summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX428
-rw-r--r--Documentation/ABI/stable/sysfs-driver-usb-usbtmc35
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uvc24
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci24
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb19
-rw-r--r--Documentation/ABI/testing/sysfs-class-lcd-s6e63m027
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-driver-sc27xx22
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-trigger-pattern82
-rw-r--r--Documentation/ABI/testing/sysfs-class-net22
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-dsa7
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs17
-rw-r--r--Documentation/ABI/testing/sysfs-power2
-rw-r--r--Documentation/PCI/00-INDEX26
-rw-r--r--Documentation/PCI/endpoint/pci-test-howto.txt19
-rw-r--r--Documentation/PCI/pci-error-recovery.txt35
-rw-r--r--Documentation/RCU/00-INDEX34
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.html31
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html9
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.html214
-rw-r--r--Documentation/RCU/rcu.txt4
-rw-r--r--Documentation/RCU/stallwarn.txt13
-rw-r--r--Documentation/RCU/whatisRCU.txt3
-rw-r--r--Documentation/admin-guide/LSM/Yama.rst4
-rw-r--r--Documentation/admin-guide/README.rst3
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst8
-rw-r--r--Documentation/admin-guide/ext4.rst574
-rw-r--r--Documentation/admin-guide/index.rst1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt48
-rw-r--r--Documentation/admin-guide/l1tf.rst2
-rw-r--r--Documentation/admin-guide/mm/index.rst1
-rw-r--r--Documentation/admin-guide/mm/memory-hotplug.rst (renamed from Documentation/memory-hotplug.txt)169
-rw-r--r--Documentation/admin-guide/pm/intel_pstate.rst7
-rw-r--r--Documentation/arm/00-INDEX50
-rw-r--r--Documentation/arm64/elf_hwcaps.txt12
-rw-r--r--Documentation/arm64/hugetlbpage.txt38
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/block/00-INDEX34
-rw-r--r--Documentation/blockdev/00-INDEX18
-rw-r--r--Documentation/blockdev/README.DAC960756
-rw-r--r--Documentation/blockdev/zram.txt2
-rw-r--r--Documentation/cdrom/00-INDEX11
-rw-r--r--Documentation/cgroup-v1/00-INDEX26
-rw-r--r--Documentation/cgroup-v1/rdma.txt2
-rw-r--r--Documentation/conf.py10
-rw-r--r--Documentation/core-api/boot-time-mm.rst4
-rw-r--r--Documentation/core-api/gfp_mask-from-fs-io.rst2
-rw-r--r--Documentation/core-api/idr.rst2
-rw-r--r--Documentation/core-api/index.rst3
-rw-r--r--Documentation/core-api/memory-allocation.rst122
-rw-r--r--Documentation/core-api/memory-hotplug.rst125
-rw-r--r--Documentation/core-api/mm-api.rst2
-rw-r--r--Documentation/core-api/printk-formats.rst11
-rw-r--r--Documentation/dev-tools/coccinelle.rst23
-rw-r--r--Documentation/dev-tools/kselftest.rst2
-rw-r--r--Documentation/device-mapper/dm-flakey.txt4
-rw-r--r--Documentation/device-mapper/log-writes.txt2
-rw-r--r--Documentation/devicetree/00-INDEX12
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.txt6
-rw-r--r--Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt1
-rw-r--r--Documentation/devicetree/bindings/connector/usb-connector.txt8
-rw-r--r--Documentation/devicetree/bindings/dma/jz4780-dma.txt14
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio.txt142
-rw-r--r--Documentation/devicetree/bindings/gpio/ingenic,gpio.txt46
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt65
-rw-r--r--Documentation/devicetree/bindings/gpio/snps,creg-gpio.txt21
-rw-r--r--Documentation/devicetree/bindings/hwmon/ina3221.txt44
-rw-r--r--Documentation/devicetree/bindings/hwmon/ltc2978.txt2
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt85
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,sei.txt36
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt5
-rw-r--r--Documentation/devicetree/bindings/leds/leds-an30259a.txt43
-rw-r--r--Documentation/devicetree/bindings/mfd/atmel-usart.txt (renamed from Documentation/devicetree/bindings/serial/atmel-usart.txt)25
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt17
-rw-r--r--Documentation/devicetree/bindings/mips/mscc.txt16
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/jz4740.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/mmci.txt11
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt72
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,mmcif.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-sprd.txt41
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt10
-rw-r--r--Documentation/devicetree/bindings/mmc/uniphier-sd.txt55
-rw-r--r--Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt3
-rw-r--r--Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt143
-rw-r--r--Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt21
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt1
-rw-r--r--Documentation/devicetree/bindings/net/marvell-pp2.txt45
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ksz90x1.txt28
-rw-r--r--Documentation/devicetree/bindings/net/mscc-ocelot.txt9
-rw-r--r--Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt21
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt6
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/pci-keystone.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/ti-pci.txt5
-rw-r--r--Documentation/devicetree/bindings/phy/brcm-sata-phy.txt1
-rw-r--r--Documentation/devicetree/bindings/phy/phy-cadence-dp.txt30
-rw-r--r--Documentation/devicetree/bindings/phy/phy-ocelot-serdes.txt43
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-inno-hdmi.txt43
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt23
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt11
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt10
-rw-r--r--Documentation/devicetree/bindings/phy/uniphier-pcie-phy.txt31
-rw-r--r--Documentation/devicetree/bindings/phy/uniphier-usb2-phy.txt45
-rw-r--r--Documentation/devicetree/bindings/phy/uniphier-usb3-hsphy.txt69
-rw-r--r--Documentation/devicetree/bindings/phy/uniphier-usb3-ssphy.txt57
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm4708-pinmux.txt57
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt39
-rw-r--r--Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nuvoton,npcm7xx-pinctrl.txt216
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,qcs404-pinctrl.txt199
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sdm660-pinctrl.txt191
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.txt153
-rw-r--r--Documentation/devicetree/bindings/power/reset/qcom,pon.txt5
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq25890.txt3
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq27xxx.txt1
-rw-r--r--Documentation/devicetree/bindings/power/supply/sc2731_charger.txt40
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.txt5
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt23
-rw-r--r--Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt12
-rw-r--r--Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt68
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt6
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt29
-rw-r--r--Documentation/devicetree/bindings/sound/adi,adau1977.txt54
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/cs42l51.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/maxim,max98088.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/mikroe,mikroe-proto.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/nau8822.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/pcm3060.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6afe.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/st,sta32x.txt9
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-sai.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-i2s.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt12
-rw-r--r--Documentation/devicetree/bindings/sound/ts3a227e.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/wm8782.txt17
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt39
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.txt36
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt8
-rw-r--r--Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-pxa2xx.txt24
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rspi.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/spi-slave-mt27xx.txt32
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sprd.txt26
-rw-r--r--Documentation/devicetree/bindings/spi/spi-stm32-qspi.txt44
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,cmt.txt7
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/ehci-mv.txt23
-rw-r--r--Documentation/devicetree/bindings/usb/exynos-usb.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/faraday,fotg210.txt35
-rw-r--r--Documentation/devicetree/bindings/usb/fcs,fusb302.txt32
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usb3.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usbhs.txt11
-rw-r--r--Documentation/devicetree/bindings/usb/usb-ehci.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/usb-ohci.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt5
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/driver-api/basics.rst3
-rw-r--r--Documentation/driver-api/firewire.rst48
-rw-r--r--Documentation/driver-api/fpga/fpga-mgr.rst5
-rw-r--r--Documentation/driver-api/gpio/board.rst24
-rw-r--r--Documentation/driver-api/gpio/consumer.rst64
-rw-r--r--Documentation/driver-api/gpio/driver.rst23
-rw-r--r--Documentation/driver-api/gpio/index.rst2
-rw-r--r--Documentation/driver-api/index.rst3
-rw-r--r--Documentation/driver-api/mtdnand.rst34
-rw-r--r--Documentation/driver-api/pci/index.rst22
-rw-r--r--Documentation/driver-api/pci/p2pdma.rst145
-rw-r--r--Documentation/driver-api/pci/pci.rst (renamed from Documentation/driver-api/pci.rst)0
-rw-r--r--Documentation/efi-stub.txt17
-rw-r--r--Documentation/fb/00-INDEX75
-rw-r--r--Documentation/fb/uvesafb.txt5
-rw-r--r--Documentation/fb/vesafb.txt4
-rw-r--r--Documentation/filesystems/00-INDEX153
-rw-r--r--Documentation/filesystems/dax.txt2
-rw-r--r--Documentation/filesystems/ext2.txt2
-rw-r--r--Documentation/filesystems/ext4/about.rst (renamed from Documentation/filesystems/ext4/ondisk/about.rst)0
-rw-r--r--Documentation/filesystems/ext4/allocators.rst (renamed from Documentation/filesystems/ext4/ondisk/allocators.rst)0
-rw-r--r--Documentation/filesystems/ext4/attributes.rst (renamed from Documentation/filesystems/ext4/ondisk/attributes.rst)8
-rw-r--r--Documentation/filesystems/ext4/bigalloc.rst (renamed from Documentation/filesystems/ext4/ondisk/bigalloc.rst)0
-rw-r--r--Documentation/filesystems/ext4/bitmaps.rst (renamed from Documentation/filesystems/ext4/ondisk/bitmaps.rst)0
-rw-r--r--Documentation/filesystems/ext4/blockgroup.rst (renamed from Documentation/filesystems/ext4/ondisk/blockgroup.rst)0
-rw-r--r--Documentation/filesystems/ext4/blockmap.rst (renamed from Documentation/filesystems/ext4/ondisk/blockmap.rst)0
-rw-r--r--Documentation/filesystems/ext4/blocks.rst (renamed from Documentation/filesystems/ext4/ondisk/blocks.rst)0
-rw-r--r--Documentation/filesystems/ext4/checksums.rst (renamed from Documentation/filesystems/ext4/ondisk/checksums.rst)2
-rw-r--r--Documentation/filesystems/ext4/directory.rst (renamed from Documentation/filesystems/ext4/ondisk/directory.rst)18
-rw-r--r--Documentation/filesystems/ext4/dynamic.rst (renamed from Documentation/filesystems/ext4/ondisk/dynamic.rst)0
-rw-r--r--Documentation/filesystems/ext4/eainode.rst (renamed from Documentation/filesystems/ext4/ondisk/eainode.rst)0
-rw-r--r--Documentation/filesystems/ext4/ext4.rst613
-rw-r--r--Documentation/filesystems/ext4/globals.rst (renamed from Documentation/filesystems/ext4/ondisk/globals.rst)0
-rw-r--r--Documentation/filesystems/ext4/group_descr.rst (renamed from Documentation/filesystems/ext4/ondisk/group_descr.rst)4
-rw-r--r--Documentation/filesystems/ext4/ifork.rst (renamed from Documentation/filesystems/ext4/ondisk/ifork.rst)8
-rw-r--r--Documentation/filesystems/ext4/index.rst19
-rw-r--r--Documentation/filesystems/ext4/inlinedata.rst (renamed from Documentation/filesystems/ext4/ondisk/inlinedata.rst)0
-rw-r--r--Documentation/filesystems/ext4/inodes.rst (renamed from Documentation/filesystems/ext4/ondisk/inodes.rst)19
-rw-r--r--Documentation/filesystems/ext4/journal.rst (renamed from Documentation/filesystems/ext4/ondisk/journal.rst)32
-rw-r--r--Documentation/filesystems/ext4/mmp.rst (renamed from Documentation/filesystems/ext4/ondisk/mmp.rst)2
-rw-r--r--Documentation/filesystems/ext4/ondisk/index.rst9
-rw-r--r--Documentation/filesystems/ext4/overview.rst (renamed from Documentation/filesystems/ext4/ondisk/overview.rst)0
-rw-r--r--Documentation/filesystems/ext4/special_inodes.rst (renamed from Documentation/filesystems/ext4/ondisk/special_inodes.rst)2
-rw-r--r--Documentation/filesystems/ext4/super.rst (renamed from Documentation/filesystems/ext4/ondisk/super.rst)24
-rw-r--r--Documentation/filesystems/f2fs.txt8
-rw-r--r--Documentation/filesystems/fscrypt.rst10
-rw-r--r--Documentation/filesystems/nfs/00-INDEX26
-rw-r--r--Documentation/filesystems/porting11
-rw-r--r--Documentation/fmc/00-INDEX38
-rw-r--r--Documentation/gpio/00-INDEX4
-rw-r--r--Documentation/hwmon/ina32212
-rw-r--r--Documentation/hwmon/lm756
-rw-r--r--Documentation/hwmon/ltc29785
-rw-r--r--Documentation/hwmon/mc13783-adc8
-rw-r--r--Documentation/ide/00-INDEX14
-rw-r--r--Documentation/index.rst5
-rw-r--r--Documentation/input/event-codes.rst11
-rw-r--r--Documentation/ioctl/00-INDEX12
-rw-r--r--Documentation/ioctl/ioctl-number.txt2
-rw-r--r--Documentation/isdn/00-INDEX42
-rw-r--r--Documentation/kbuild/00-INDEX14
-rw-r--r--Documentation/kernel-per-CPU-kthreads.txt2
-rw-r--r--Documentation/laptops/00-INDEX16
-rw-r--r--Documentation/leds/00-INDEX32
-rw-r--r--Documentation/locking/00-INDEX16
-rw-r--r--Documentation/locking/lockstat.txt2
-rw-r--r--Documentation/m68k/00-INDEX7
-rw-r--r--Documentation/media/uapi/dvb/video_function_calls.rst1
-rw-r--r--Documentation/memory-barriers.txt3
-rw-r--r--Documentation/mips/00-INDEX4
-rw-r--r--Documentation/mmc/00-INDEX10
-rw-r--r--Documentation/mtd/nand/pxa3xx-nand.txt113
-rw-r--r--Documentation/netlabel/00-INDEX10
-rw-r--r--Documentation/netlabel/cipso_ipv4.txt11
-rw-r--r--Documentation/netlabel/introduction.txt2
-rw-r--r--Documentation/networking/00-INDEX234
-rw-r--r--Documentation/networking/af_xdp.rst4
-rw-r--r--Documentation/networking/defza.txt57
-rw-r--r--Documentation/networking/devlink-params-bnxt.txt18
-rw-r--r--Documentation/networking/devlink-params.txt42
-rw-r--r--Documentation/networking/dpaa2/ethernet-driver.rst (renamed from drivers/staging/fsl-dpaa2/ethernet/ethernet-driver.rst)0
-rw-r--r--Documentation/networking/dpaa2/index.rst1
-rw-r--r--Documentation/networking/e100.rst3
-rw-r--r--Documentation/networking/e1000.rst3
-rw-r--r--Documentation/networking/e1000e.rst382
-rw-r--r--Documentation/networking/e1000e.txt312
-rw-r--r--Documentation/networking/filter.txt94
-rw-r--r--Documentation/networking/fm10k.rst141
-rw-r--r--Documentation/networking/i40e.rst770
-rw-r--r--Documentation/networking/i40e.txt190
-rw-r--r--Documentation/networking/i40evf.txt54
-rw-r--r--Documentation/networking/iavf.rst281
-rw-r--r--Documentation/networking/ice.rst45
-rw-r--r--Documentation/networking/ice.txt39
-rw-r--r--Documentation/networking/igb.rst193
-rw-r--r--Documentation/networking/igb.txt129
-rw-r--r--Documentation/networking/igbvf.rst64
-rw-r--r--Documentation/networking/igbvf.txt80
-rw-r--r--Documentation/networking/index.rst10
-rw-r--r--Documentation/networking/ip-sysctl.txt10
-rw-r--r--Documentation/networking/ixgb.rst467
-rw-r--r--Documentation/networking/ixgb.txt433
-rw-r--r--Documentation/networking/ixgbe.rst527
-rw-r--r--Documentation/networking/ixgbe.txt349
-rw-r--r--Documentation/networking/ixgbevf.rst66
-rw-r--r--Documentation/networking/ixgbevf.txt52
-rw-r--r--Documentation/networking/netvsc.txt9
-rw-r--r--Documentation/networking/rxrpc.txt25
-rw-r--r--Documentation/networking/tcp.txt101
-rw-r--r--Documentation/networking/xfrm_device.txt4
-rw-r--r--Documentation/parisc/00-INDEX6
-rw-r--r--Documentation/power/00-INDEX44
-rw-r--r--Documentation/power/swsusp.txt2
-rw-r--r--Documentation/powerpc/00-INDEX34
-rw-r--r--Documentation/preempt-locking.txt12
-rw-r--r--Documentation/process/2.Process.rst2
-rw-r--r--Documentation/process/adding-syscalls.rst2
-rw-r--r--Documentation/process/code-of-conduct-interpretation.rst156
-rw-r--r--Documentation/process/code-of-conduct.rst25
-rw-r--r--Documentation/process/deprecated.rst119
-rw-r--r--Documentation/process/howto.rst13
-rw-r--r--Documentation/process/index.rst3
-rw-r--r--Documentation/process/license-rules.rst2
-rw-r--r--Documentation/s390/00-INDEX28
-rw-r--r--Documentation/s390/vfio-ap.txt837
-rw-r--r--Documentation/scheduler/00-INDEX18
-rw-r--r--Documentation/scheduler/completion.txt262
-rw-r--r--Documentation/scsi/00-INDEX108
-rw-r--r--Documentation/scsi/ufs.txt20
-rw-r--r--Documentation/security/LSM.rst2
-rw-r--r--Documentation/security/keys/ecryptfs.rst8
-rw-r--r--Documentation/serial/00-INDEX16
-rw-r--r--Documentation/sound/hd-audio/models.rst2
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst307
-rw-r--r--Documentation/sphinx-static/theme_overrides.css38
-rw-r--r--Documentation/spi/00-INDEX16
-rw-r--r--Documentation/switchtec.txt30
-rw-r--r--Documentation/sysctl/00-INDEX16
-rw-r--r--Documentation/timers/00-INDEX16
-rw-r--r--Documentation/trace/ftrace.rst3
-rw-r--r--Documentation/trace/histogram.rst6
-rw-r--r--Documentation/virtual/00-INDEX11
-rw-r--r--Documentation/virtual/kvm/00-INDEX35
-rw-r--r--Documentation/virtual/kvm/api.txt145
-rw-r--r--Documentation/vm/00-INDEX50
-rw-r--r--Documentation/vm/hmm.rst14
-rw-r--r--Documentation/w1/00-INDEX10
-rw-r--r--Documentation/w1/masters/00-INDEX12
-rw-r--r--Documentation/w1/slaves/00-INDEX14
-rw-r--r--Documentation/x86/00-INDEX20
-rw-r--r--Documentation/x86/boot.txt32
-rw-r--r--Documentation/x86/intel_rdt_ui.txt22
-rw-r--r--Documentation/x86/x86_64/00-INDEX16
-rw-r--r--Documentation/x86/x86_64/mm.txt171
-rw-r--r--LICENSES/other/CC-BY-SA-4.0397
-rw-r--r--LICENSES/other/CDDL-1.06
-rw-r--r--LICENSES/other/ISC24
-rw-r--r--MAINTAINERS319
-rw-r--r--Makefile35
-rw-r--r--README1
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/include/asm/unistd.h2
-rw-r--r--arch/alpha/include/uapi/asm/siginfo.h1
-rw-r--r--arch/alpha/kernel/entry.S53
-rw-r--r--arch/alpha/kernel/systbls.S318
-rw-r--r--arch/arc/Kconfig6
-rw-r--r--arch/arc/Makefile26
-rw-r--r--arch/arc/include/uapi/asm/unistd.h1
-rw-r--r--arch/arc/kernel/process.c20
-rw-r--r--arch/arc/kernel/traps.c22
-rw-r--r--arch/arc/kernel/vmlinux.lds.S1
-rw-r--r--arch/arc/mm/dma.c41
-rw-r--r--arch/arc/mm/fault.c20
-rw-r--r--arch/arm/Kconfig.debug45
-rw-r--r--arch/arm/Makefile6
-rw-r--r--arch/arm/boot/compressed/head.S43
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts2
-rw-r--r--arch/arm/boot/dts/bcm63138.dtsi14
-rw-r--r--arch/arm/boot/dts/imx53-qsb-common.dtsi11
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi5
-rw-r--r--arch/arm/boot/dts/sama5d3_emac.dtsi2
-rw-r--r--arch/arm/boot/dts/stm32mp157c.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-r40.dtsi3
-rw-r--r--arch/arm/crypto/Kconfig7
-rw-r--r--arch/arm/crypto/Makefile2
-rw-r--r--arch/arm/crypto/chacha20-neon-core.S277
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c2
-rw-r--r--arch/arm/crypto/ghash-ce-core.S108
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c38
-rw-r--r--arch/arm/crypto/speck-neon-core.S434
-rw-r--r--arch/arm/crypto/speck-neon-glue.c288
-rw-r--r--arch/arm/include/asm/assembler.h11
-rw-r--r--arch/arm/include/asm/bug.h4
-rw-r--r--arch/arm/include/asm/dma-mapping.h2
-rw-r--r--arch/arm/include/asm/ftrace.h3
-rw-r--r--arch/arm/include/asm/io.h15
-rw-r--r--arch/arm/include/asm/kvm_arm.h4
-rw-r--r--arch/arm/include/asm/kvm_host.h13
-rw-r--r--arch/arm/include/asm/kvm_mmu.h20
-rw-r--r--arch/arm/include/asm/paravirt.h9
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h54
-rw-r--r--arch/arm/include/asm/thread_info.h4
-rw-r--r--arch/arm/include/asm/topology.h3
-rw-r--r--arch/arm/include/asm/uaccess.h49
-rw-r--r--arch/arm/include/asm/unistd.h4
-rw-r--r--arch/arm/kernel/armksyms.c3
-rw-r--r--arch/arm/kernel/entry-common.S9
-rw-r--r--arch/arm/kernel/entry-ftrace.S75
-rw-r--r--arch/arm/kernel/ftrace.c51
-rw-r--r--arch/arm/kernel/paravirt.c4
-rw-r--r--arch/arm/kernel/ptrace.c11
-rw-r--r--arch/arm/kernel/signal.c80
-rw-r--r--arch/arm/kernel/swp_emulate.c16
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c8
-rw-r--r--arch/arm/kernel/traps.c63
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S1
-rw-r--r--arch/arm/kernel/vmlinux.lds.h2
-rw-r--r--arch/arm/kvm/coproc.c8
-rw-r--r--arch/arm/lib/copy_from_user.S6
-rw-r--r--arch/arm/lib/copy_to_user.S6
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c3
-rw-r--r--arch/arm/mach-at91/pm_suspend.S8
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c8
-rw-r--r--arch/arm/mach-ep93xx/core.c9
-rw-r--r--arch/arm/mach-ep93xx/snappercl15.c15
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c16
-rw-r--r--arch/arm/mach-imx/mach-mx21ads.c12
-rw-r--r--arch/arm/mach-imx/mach-mx27ads.c12
-rw-r--r--arch/arm/mach-imx/mach-qong.c17
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c2
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c6
-rw-r--r--arch/arm/mach-mmp/brownstone.c12
-rw-r--r--arch/arm/mach-mmp/devices.c11
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c12
-rw-r--r--arch/arm/mach-omap1/board-fsample.c5
-rw-r--r--arch/arm/mach-omap1/board-h2.c5
-rw-r--r--arch/arm/mach-omap1/board-h3.c4
-rw-r--r--arch/arm/mach-omap1/board-nand.c5
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c5
-rw-r--r--arch/arm/mach-omap1/common.h4
-rw-r--r--arch/arm/mach-omap2/hsmmc.h2
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c18
-rw-r--r--arch/arm/mach-omap2/pm24xx.c7
-rw-r--r--arch/arm/mach-omap2/pm34xx.c14
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c27
-rw-r--r--arch/arm/mach-pxa/balloon3.c13
-rw-r--r--arch/arm/mach-pxa/em-x270.c15
-rw-r--r--arch/arm/mach-pxa/ezx.c33
-rw-r--r--arch/arm/mach-pxa/magician.c2
-rw-r--r--arch/arm/mach-pxa/palmtreo.c31
-rw-r--r--arch/arm/mach-pxa/palmtx.c10
-rw-r--r--arch/arm/mach-pxa/raumfeld.c12
-rw-r--r--arch/arm/mach-pxa/zeus.c23
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c1
-rw-r--r--arch/arm/mach-sa1100/assabet.c21
-rw-r--r--arch/arm/mach-sa1100/generic.c5
-rw-r--r--arch/arm/mach-sa1100/generic.h3
-rw-r--r--arch/arm/mach-sa1100/shannon.c4
-rw-r--r--arch/arm/mach-versatile/versatile_dt.c4
-rw-r--r--arch/arm/mm/alignment.c10
-rw-r--r--arch/arm/mm/dma-mapping-nommu.c11
-rw-r--r--arch/arm/mm/fault.c28
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm/vfp/vfpmodule.c31
-rw-r--r--arch/arm/xen/enlighten.c34
-rw-r--r--arch/arm64/Kconfig35
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi43
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts2
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/crypto/Kconfig11
-rw-r--r--arch/arm64/crypto/Makefile6
-rw-r--r--arch/arm64/crypto/aes-ce.S5
-rw-r--r--arch/arm64/crypto/aes-glue.c217
-rw-r--r--arch/arm64/crypto/aes-modes.S416
-rw-r--r--arch/arm64/crypto/aes-neon.S6
-rw-r--r--arch/arm64/crypto/crc32-ce-core.S287
-rw-r--r--arch/arm64/crypto/crc32-ce-glue.c244
-rw-r--r--arch/arm64/crypto/crct10dif-ce-core.S314
-rw-r--r--arch/arm64/crypto/crct10dif-ce-glue.c14
-rw-r--r--arch/arm64/crypto/speck-neon-core.S352
-rw-r--r--arch/arm64/crypto/speck-neon-glue.c282
-rw-r--r--arch/arm64/include/asm/assembler.h7
-rw-r--r--arch/arm64/include/asm/cache.h40
-rw-r--r--arch/arm64/include/asm/compat.h27
-rw-r--r--arch/arm64/include/asm/compiler.h30
-rw-r--r--arch/arm64/include/asm/cpucaps.h7
-rw-r--r--arch/arm64/include/asm/cpufeature.h30
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/daifflags.h15
-rw-r--r--arch/arm64/include/asm/esr.h77
-rw-r--r--arch/arm64/include/asm/io.h9
-rw-r--r--arch/arm64/include/asm/jump_label.h38
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h156
-rw-r--r--arch/arm64/include/asm/kvm_asm.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h29
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h10
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h47
-rw-r--r--arch/arm64/include/asm/mmu.h3
-rw-r--r--arch/arm64/include/asm/mmu_context.h17
-rw-r--r--arch/arm64/include/asm/page.h2
-rw-r--r--arch/arm64/include/asm/paravirt.h9
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h48
-rw-r--r--arch/arm64/include/asm/processor.h11
-rw-r--r--arch/arm64/include/asm/ptrace.h4
-rw-r--r--arch/arm64/include/asm/stage2_pgtable-nopmd.h42
-rw-r--r--arch/arm64/include/asm/stage2_pgtable-nopud.h39
-rw-r--r--arch/arm64/include/asm/stage2_pgtable.h236
-rw-r--r--arch/arm64/include/asm/stat.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h44
-rw-r--r--arch/arm64/include/asm/system_misc.h3
-rw-r--r--arch/arm64/include/asm/tlb.h34
-rw-r--r--arch/arm64/include/asm/tlbflush.h112
-rw-r--r--arch/arm64/include/asm/topology.h3
-rw-r--r--arch/arm64/include/asm/traps.h5
-rw-r--r--arch/arm64/include/asm/uaccess.h1
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/xen/events.h2
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arm64/include/uapi/asm/siginfo.h24
-rw-r--r--arch/arm64/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm64/kernel/cpu_errata.c96
-rw-r--r--arch/arm64/kernel/cpufeature.c195
-rw-r--r--arch/arm64/kernel/cpuinfo.c11
-rw-r--r--arch/arm64/kernel/debug-monitors.c11
-rw-r--r--arch/arm64/kernel/entry.S18
-rw-r--r--arch/arm64/kernel/fpsimd.c10
-rw-r--r--arch/arm64/kernel/head.S40
-rw-r--r--arch/arm64/kernel/jump_label.c6
-rw-r--r--arch/arm64/kernel/paravirt.c4
-rw-r--r--arch/arm64/kernel/pci.c5
-rw-r--r--arch/arm64/kernel/perf_event.c7
-rw-r--r--arch/arm64/kernel/probes/kprobes.c2
-rw-r--r--arch/arm64/kernel/process.c4
-rw-r--r--arch/arm64/kernel/psci.c1
-rw-r--r--arch/arm64/kernel/ptrace.c16
-rw-r--r--arch/arm64/kernel/setup.c60
-rw-r--r--arch/arm64/kernel/sleep.S1
-rw-r--r--arch/arm64/kernel/ssbd.c24
-rw-r--r--arch/arm64/kernel/suspend.c4
-rw-r--r--arch/arm64/kernel/sys_compat.c13
-rw-r--r--arch/arm64/kernel/traps.c276
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S36
-rw-r--r--arch/arm64/kvm/guest.c61
-rw-r--r--arch/arm64/kvm/handle_exit.c7
-rw-r--r--arch/arm64/kvm/hyp-init.S3
-rw-r--r--arch/arm64/kvm/hyp/Makefile1
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S16
-rw-r--r--arch/arm64/kvm/hyp/s2-setup.c90
-rw-r--r--arch/arm64/kvm/hyp/switch.c4
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c30
-rw-r--r--arch/arm64/kvm/hyp/tlb.c4
-rw-r--r--arch/arm64/kvm/reset.c108
-rw-r--r--arch/arm64/lib/Makefile4
-rw-r--r--arch/arm64/lib/crc32.S60
-rw-r--r--arch/arm64/mm/context.c11
-rw-r--r--arch/arm64/mm/dump.c6
-rw-r--r--arch/arm64/mm/fault.c132
-rw-r--r--arch/arm64/mm/hugetlbpage.c50
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/kasan_init.c2
-rw-r--r--arch/arm64/mm/mmu.c46
-rw-r--r--arch/arm64/mm/numa.c13
-rw-r--r--arch/arm64/mm/proc.S11
-rw-r--r--arch/c6x/Kconfig2
-rw-r--r--arch/c6x/include/uapi/asm/unistd.h1
-rw-r--r--arch/h8300/include/uapi/asm/unistd.h1
-rw-r--r--arch/h8300/kernel/vmlinux.lds.S1
-rw-r--r--arch/hexagon/Kconfig2
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h40
-rw-r--r--arch/hexagon/include/uapi/asm/unistd.h1
-rw-r--r--arch/hexagon/kernel/dma.c143
-rw-r--r--arch/ia64/hp/common/sba_iommu.c4
-rw-r--r--arch/ia64/hp/sim/simserial.c24
-rw-r--r--arch/ia64/include/asm/dma-mapping.h7
-rw-r--r--arch/ia64/include/asm/iommu.h2
-rw-r--r--arch/ia64/include/asm/machvec.h7
-rw-r--r--arch/ia64/include/asm/machvec_init.h1
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h2
-rw-r--r--arch/ia64/include/asm/unistd.h3
-rw-r--r--arch/ia64/include/uapi/asm/siginfo.h2
-rw-r--r--arch/ia64/kernel/brl_emu.c31
-rw-r--r--arch/ia64/kernel/efi.c1
-rw-r--r--arch/ia64/kernel/machvec.c16
-rw-r--r--arch/ia64/kernel/pci-dma.c55
-rw-r--r--arch/ia64/kernel/signal.c60
-rw-r--r--arch/ia64/kernel/traps.c144
-rw-r--r--arch/ia64/kernel/unaligned.c12
-rw-r--r--arch/ia64/mm/fault.c12
-rw-r--r--arch/ia64/pci/pci.c26
-rw-r--r--arch/ia64/sn/pci/pci_dma.c33
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/configs/amiga_defconfig2
-rw-r--r--arch/m68k/configs/apollo_defconfig2
-rw-r--r--arch/m68k/configs/atari_defconfig2
-rw-r--r--arch/m68k/configs/bvme6000_defconfig2
-rw-r--r--arch/m68k/configs/hp300_defconfig2
-rw-r--r--arch/m68k/configs/mac_defconfig2
-rw-r--r--arch/m68k/configs/multi_defconfig2
-rw-r--r--arch/m68k/configs/mvme147_defconfig2
-rw-r--r--arch/m68k/configs/mvme16x_defconfig2
-rw-r--r--arch/m68k/configs/q40_defconfig2
-rw-r--r--arch/m68k/configs/sun3_defconfig2
-rw-r--r--arch/m68k/configs/sun3x_defconfig2
-rw-r--r--arch/m68k/emu/nfblock.c2
-rw-r--r--arch/m68k/include/asm/atafd.h13
-rw-r--r--arch/m68k/include/asm/atafdreg.h80
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/microblaze/Kconfig4
-rw-r--r--arch/microblaze/include/asm/pgtable.h2
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/kernel/dma.c22
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S2
-rw-r--r--arch/microblaze/mm/consistent.c3
-rw-r--r--arch/mips/Kconfig7
-rw-r--r--arch/mips/alchemy/devboards/db1200.c14
-rw-r--r--arch/mips/alchemy/devboards/db1300.c14
-rw-r--r--arch/mips/alchemy/devboards/db1550.c14
-rw-r--r--arch/mips/boot/dts/ingenic/jz4740.dtsi15
-rw-r--r--arch/mips/boot/dts/ingenic/jz4770.dtsi30
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi3
-rw-r--r--arch/mips/boot/dts/mscc/ocelot.dtsi19
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/compat.h28
-rw-r--r--arch/mips/include/asm/device.h19
-rw-r--r--arch/mips/include/asm/dma-coherence.h6
-rw-r--r--arch/mips/include/asm/dma-mapping.h4
-rw-r--r--arch/mips/include/asm/processor.h10
-rw-r--r--arch/mips/include/asm/unistd.h3
-rw-r--r--arch/mips/include/asm/vr41xx/giu.h8
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h11
-rw-r--r--arch/mips/jazz/jazzdma.c7
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c14
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c14
-rw-r--r--arch/mips/kernel/process.c25
-rw-r--r--arch/mips/kernel/setup.c50
-rw-r--r--arch/mips/kernel/vdso.c18
-rw-r--r--arch/mips/lantiq/xway/dma.c1
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c14
-rw-r--r--arch/mips/lib/memset.S4
-rw-r--r--arch/mips/mm/c-r4k.c17
-rw-r--r--arch/mips/mm/dma-noncoherent.c79
-rw-r--r--arch/mips/netlogic/xlr/platform-flash.c7
-rw-r--r--arch/mips/pnx833x/common/platform.c8
-rw-r--r--arch/mips/rb532/devices.c10
-rw-r--r--arch/nds32/Kconfig2
-rw-r--r--arch/nds32/include/uapi/asm/unistd.h1
-rw-r--r--arch/nios2/Kconfig2
-rw-r--r--arch/nios2/include/uapi/asm/unistd.h1
-rw-r--r--arch/openrisc/Kconfig2
-rw-r--r--arch/openrisc/include/uapi/asm/unistd.h1
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/Makefile9
-rw-r--r--arch/parisc/boot/compressed/Makefile4
-rw-r--r--arch/parisc/boot/compressed/misc.c95
-rw-r--r--arch/parisc/boot/compressed/vmlinux.lds.S10
-rw-r--r--arch/parisc/include/asm/alternative.h47
-rw-r--r--arch/parisc/include/asm/assembly.h9
-rw-r--r--arch/parisc/include/asm/cache.h22
-rw-r--r--arch/parisc/include/asm/compat.h24
-rw-r--r--arch/parisc/include/asm/page.h12
-rw-r--r--arch/parisc/include/asm/pdc.h1
-rw-r--r--arch/parisc/include/asm/pdcpat.h62
-rw-r--r--arch/parisc/include/asm/pgtable.h33
-rw-r--r--arch/parisc/include/asm/sections.h2
-rw-r--r--arch/parisc/include/asm/spinlock.h4
-rw-r--r--arch/parisc/include/asm/tlbflush.h3
-rw-r--r--arch/parisc/include/asm/unistd.h3
-rw-r--r--arch/parisc/include/uapi/asm/Kbuild1
-rw-r--r--arch/parisc/include/uapi/asm/siginfo.h11
-rw-r--r--arch/parisc/kernel/cache.c63
-rw-r--r--arch/parisc/kernel/entry.S34
-rw-r--r--arch/parisc/kernel/firmware.c57
-rw-r--r--arch/parisc/kernel/hpmc.S3
-rw-r--r--arch/parisc/kernel/inventory.c10
-rw-r--r--arch/parisc/kernel/pacache.S280
-rw-r--r--arch/parisc/kernel/setup.c83
-rw-r--r--arch/parisc/kernel/signal.c1
-rw-r--r--arch/parisc/kernel/syscall.S12
-rw-r--r--arch/parisc/kernel/traps.c7
-rw-r--r--arch/parisc/kernel/unwind.c2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S6
-rw-r--r--arch/parisc/mm/init.c23
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h21
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h12
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h1
-rw-r--r--arch/powerpc/include/asm/bug.h2
-rw-r--r--arch/powerpc/include/asm/compat.h24
-rw-r--r--arch/powerpc/include/asm/hvcall.h41
-rw-r--r--arch/powerpc/include/asm/iommu.h4
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h45
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h118
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h3
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h16
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h8
-rw-r--r--arch/powerpc/include/asm/mmu_context.h1
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/include/asm/ptrace.h2
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h3
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h1
-rw-r--r--arch/powerpc/include/uapi/asm/siginfo.h18
-rw-r--r--arch/powerpc/kernel/asm-offsets.c13
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S4
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S4
-rw-r--r--arch/powerpc/kernel/iommu.c25
-rw-r--r--arch/powerpc/kernel/process.c19
-rw-r--r--arch/powerpc/kernel/tm.S20
-rw-r--r--arch/powerpc/kernel/traps.c34
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kvm/Makefile3
-rw-r--r--arch/powerpc/kvm/book3s.c46
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c797
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c94
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c120
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv.c873
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c92
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S95
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c1291
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c10
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S809
-rw-r--r--arch/powerpc/kvm/book3s_hv_tm.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_tm_builtin.c5
-rw-r--r--arch/powerpc/kvm/book3s_pr.c5
-rw-r--r--arch/powerpc/kvm/book3s_xics.c14
-rw-r--r--arch/powerpc/kvm/book3s_xive.c63
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c8
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S8
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c1
-rw-r--r--arch/powerpc/kvm/powerpc.c15
-rw-r--r--arch/powerpc/kvm/tm.S250
-rw-r--r--arch/powerpc/kvm/trace_book3s.h1
-rw-r--r--arch/powerpc/lib/checksum_64.S3
-rw-r--r--arch/powerpc/lib/code-patching.c14
-rw-r--r--arch/powerpc/mm/fault.c55
-rw-r--r--arch/powerpc/mm/init_64.c49
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c34
-rw-r--r--arch/powerpc/mm/numa.c12
-rw-r--r--arch/powerpc/mm/pkeys.c2
-rw-r--r--arch/powerpc/mm/tlb-radix.c9
-rw-r--r--arch/powerpc/oprofile/backtrace.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c26
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda-tce.c2
-rw-r--r--arch/riscv/Kconfig52
-rw-r--r--arch/riscv/Kconfig.debug35
-rw-r--r--arch/riscv/Makefile21
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h7
-rw-r--r--arch/riscv/include/asm/futex.h128
-rw-r--r--arch/riscv/include/asm/processor.h2
-rw-r--r--arch/riscv/include/asm/smp.h47
-rw-r--r--arch/riscv/include/asm/switch_to.h12
-rw-r--r--arch/riscv/include/asm/tlbflush.h16
-rw-r--r--arch/riscv/include/asm/unistd.h1
-rw-r--r--arch/riscv/include/uapi/asm/Kbuild1
-rw-r--r--arch/riscv/include/uapi/asm/elf.h3
-rw-r--r--arch/riscv/include/uapi/asm/siginfo.h24
-rw-r--r--arch/riscv/kernel/Makefile1
-rw-r--r--arch/riscv/kernel/cacheinfo.c7
-rw-r--r--arch/riscv/kernel/cpu.c87
-rw-r--r--arch/riscv/kernel/cpufeature.c15
-rw-r--r--arch/riscv/kernel/entry.S88
-rw-r--r--arch/riscv/kernel/fpu.S106
-rw-r--r--arch/riscv/kernel/head.S4
-rw-r--r--arch/riscv/kernel/irq.c12
-rw-r--r--arch/riscv/kernel/mcount.S1
-rw-r--r--arch/riscv/kernel/process.c6
-rw-r--r--arch/riscv/kernel/ptrace.c52
-rw-r--r--arch/riscv/kernel/setup.c15
-rw-r--r--arch/riscv/kernel/signal.c75
-rw-r--r--arch/riscv/kernel/smp.c82
-rw-r--r--arch/riscv/kernel/smpboot.c46
-rw-r--r--arch/riscv/lib/Makefile3
-rw-r--r--arch/riscv/mm/ioremap.c2
-rw-r--r--arch/s390/Kconfig21
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/appldata/appldata_base.c33
-rw-r--r--arch/s390/boot/.gitignore1
-rw-r--r--arch/s390/boot/Makefile24
-rw-r--r--arch/s390/boot/boot.h11
-rw-r--r--arch/s390/boot/cmdline.c2
-rw-r--r--arch/s390/boot/compressed/Makefile37
-rw-r--r--arch/s390/boot/compressed/decompressor.c85
-rw-r--r--arch/s390/boot/compressed/decompressor.h25
-rw-r--r--arch/s390/boot/compressed/head.S52
-rw-r--r--arch/s390/boot/compressed/misc.c116
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S24
-rw-r--r--arch/s390/boot/compressed/vmlinux.scr.lds.S15
-rw-r--r--arch/s390/boot/ctype.c2
-rw-r--r--arch/s390/boot/head.S12
-rw-r--r--arch/s390/boot/ipl_parm.c182
-rw-r--r--arch/s390/boot/ipl_vmparm.c2
-rw-r--r--arch/s390/boot/mem_detect.c182
-rw-r--r--arch/s390/boot/startup.c64
-rw-r--r--arch/s390/boot/string.c138
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/configs/performance_defconfig1
-rw-r--r--arch/s390/crypto/aes_s390.c48
-rw-r--r--arch/s390/crypto/paes_s390.c63
-rw-r--r--arch/s390/defconfig2
-rw-r--r--arch/s390/hypfs/hypfs_sprp.c42
-rw-r--r--arch/s390/include/asm/appldata.h19
-rw-r--r--arch/s390/include/asm/boot_data.h11
-rw-r--r--arch/s390/include/asm/ccwgroup.h2
-rw-r--r--arch/s390/include/asm/compat.h18
-rw-r--r--arch/s390/include/asm/facility.h9
-rw-r--r--arch/s390/include/asm/ipl.h4
-rw-r--r--arch/s390/include/asm/jump_label.h40
-rw-r--r--arch/s390/include/asm/kasan.h30
-rw-r--r--arch/s390/include/asm/kvm_host.h15
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/mem_detect.h82
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h1
-rw-r--r--arch/s390/include/asm/page.h1
-rw-r--r--arch/s390/include/asm/pgtable.h20
-rw-r--r--arch/s390/include/asm/pkey.h26
-rw-r--r--arch/s390/include/asm/processor.h53
-rw-r--r--arch/s390/include/asm/qdio.h2
-rw-r--r--arch/s390/include/asm/sclp.h8
-rw-r--r--arch/s390/include/asm/sections.h12
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/string.h21
-rw-r--r--arch/s390/include/asm/thread_info.h13
-rw-r--r--arch/s390/include/asm/unistd.h3
-rw-r--r--arch/s390/include/asm/vmlinux.lds.h20
-rw-r--r--arch/s390/include/uapi/asm/Kbuild1
-rw-r--r--arch/s390/include/uapi/asm/kvm.h2
-rw-r--r--arch/s390/include/uapi/asm/pkey.h34
-rw-r--r--arch/s390/include/uapi/asm/siginfo.h17
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h19
-rw-r--r--arch/s390/kernel/Makefile6
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/base.S2
-rw-r--r--arch/s390/kernel/dumpstack.c10
-rw-r--r--arch/s390/kernel/early.c47
-rw-r--r--arch/s390/kernel/early_nobss.c24
-rw-r--r--arch/s390/kernel/early_printk.c2
-rw-r--r--arch/s390/kernel/entry.S53
-rw-r--r--arch/s390/kernel/entry.h3
-rw-r--r--arch/s390/kernel/head64.S6
-rw-r--r--arch/s390/kernel/ipl.c119
-rw-r--r--arch/s390/kernel/ipl_vmparm.c36
-rw-r--r--arch/s390/kernel/irq.c10
-rw-r--r--arch/s390/kernel/jump_label.c11
-rw-r--r--arch/s390/kernel/machine_kexec.c17
-rw-r--r--arch/s390/kernel/module.c15
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c6
-rw-r--r--arch/s390/kernel/setup.c210
-rw-r--r--arch/s390/kernel/smp.c87
-rw-r--r--arch/s390/kernel/sthyi.c8
-rw-r--r--arch/s390/kernel/swsusp.S15
-rw-r--r--arch/s390/kernel/vdso.c8
-rw-r--r--arch/s390/kernel/vdso32/Makefile3
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S19
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S3
-rw-r--r--arch/s390/kernel/vdso64/Makefile3
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S25
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S3
-rw-r--r--arch/s390/kernel/vmlinux.lds.S17
-rw-r--r--arch/s390/kvm/kvm-s390.c188
-rw-r--r--arch/s390/kvm/kvm-s390.h1
-rw-r--r--arch/s390/kvm/vsie.c210
-rw-r--r--arch/s390/lib/Makefile4
-rw-r--r--arch/s390/lib/mem.S12
-rw-r--r--arch/s390/mm/Makefile6
-rw-r--r--arch/s390/mm/dump_pagetables.c58
-rw-r--r--arch/s390/mm/fault.c38
-rw-r--r--arch/s390/mm/gmap.c14
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/kasan_init.c387
-rw-r--r--arch/s390/mm/maccess.c25
-rw-r--r--arch/s390/mm/mem_detect.c62
-rw-r--r--arch/s390/purgatory/head.S4
-rw-r--r--arch/s390/tools/gen_facilities.c2
-rw-r--r--arch/sh/Kconfig3
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c27
-rw-r--r--arch/sh/boards/mach-migor/setup.c14
-rw-r--r--arch/sh/include/asm/unistd.h2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/compat.h25
-rw-r--r--arch/sparc/include/asm/cpudata_64.h2
-rw-r--r--arch/sparc/include/asm/dma-mapping.h4
-rw-r--r--arch/sparc/include/asm/parport.h2
-rw-r--r--arch/sparc/include/asm/thread_info_64.h4
-rw-r--r--arch/sparc/include/asm/unistd.h3
-rw-r--r--arch/sparc/include/asm/vdso.h4
-rw-r--r--arch/sparc/include/uapi/asm/siginfo.h7
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/kgdb_32.c2
-rw-r--r--arch/sparc/kernel/kgdb_64.c2
-rw-r--r--arch/sparc/kernel/perf_event.c26
-rw-r--r--arch/sparc/kernel/rtrap_64.S3
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/kernel/time_64.c3
-rw-r--r--arch/sparc/kernel/viohs.c12
-rw-r--r--arch/sparc/vdso/Makefile41
-rw-r--r--arch/sparc/vdso/checkundef.sh10
-rw-r--r--arch/sparc/vdso/vclock_gettime.c221
-rw-r--r--arch/sparc/vdso/vdso-layout.lds.S6
-rw-r--r--arch/sparc/vdso/vdso.lds.S2
-rw-r--r--arch/sparc/vdso/vdso2c.c6
-rw-r--r--arch/sparc/vdso/vdso2c.h1
-rw-r--r--arch/sparc/vdso/vdso32/vdso32.lds.S2
-rw-r--r--arch/sparc/vdso/vma.c237
-rw-r--r--arch/um/drivers/ubd_kern.c236
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/unicore32/Kconfig2
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/asm/bug.h3
-rw-r--r--arch/unicore32/include/asm/dma-mapping.h22
-rw-r--r--arch/unicore32/include/uapi/asm/unistd.h1
-rw-r--r--arch/unicore32/kernel/fpu-ucf64.c12
-rw-r--r--arch/unicore32/kernel/traps.c5
-rw-r--r--arch/unicore32/mm/fault.c27
-rw-r--r--arch/unicore32/mm/init.c3
-rw-r--r--arch/x86/Kconfig26
-rw-r--r--arch/x86/Kconfig.cpu14
-rw-r--r--arch/x86/Kconfig.debug1
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/boot/compressed/eboot.c10
-rw-r--r--arch/x86/boot/compressed/kaslr.c18
-rw-r--r--arch/x86/boot/compressed/mem_encrypt.S19
-rw-r--r--arch/x86/boot/compressed/misc.h1
-rw-r--r--arch/x86/boot/header.S6
-rw-r--r--arch/x86/boot/tools/build.c7
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/crypto/Makefile5
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c1
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c1
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c1
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c47
-rw-r--r--arch/x86/crypto/fpu.c207
-rw-r--r--arch/x86/crypto/morus1280-sse2-glue.c1
-rw-r--r--arch/x86/crypto/morus640-sse2-glue.c1
-rw-r--r--arch/x86/crypto/sha1-mb/Makefile14
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb.c1011
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_ctx.h134
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr.h110
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S287
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S304
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c64
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S209
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_x8_avx2.S492
-rw-r--r--arch/x86/crypto/sha256-mb/Makefile14
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c1013
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_ctx.h134
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr.h108
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S304
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S307
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c65
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S214
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_x8_avx2.S598
-rw-r--r--arch/x86/crypto/sha512-mb/Makefile12
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c1047
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_ctx.h128
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr.h104
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S281
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S297
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S224
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_x4_avx2.S531
-rw-r--r--arch/x86/entry/calling.h2
-rw-r--r--arch/x86/entry/entry_32.S21
-rw-r--r--arch/x86/entry/entry_64.S138
-rw-r--r--arch/x86/entry/vdso/Makefile16
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c220
-rw-r--r--arch/x86/entry/vdso/vgetcpu.c8
-rw-r--r--arch/x86/entry/vdso/vma.c38
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c9
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_gtod.c51
-rw-r--r--arch/x86/events/amd/core.c4
-rw-r--r--arch/x86/events/amd/uncore.c30
-rw-r--r--arch/x86/events/core.c41
-rw-r--r--arch/x86/events/intel/core.c346
-rw-r--r--arch/x86/events/intel/cstate.c8
-rw-r--r--arch/x86/events/intel/pt.c2
-rw-r--r--arch/x86/events/intel/rapl.c4
-rw-r--r--arch/x86/events/intel/uncore_snbep.c14
-rw-r--r--arch/x86/events/msr.c8
-rw-r--r--arch/x86/events/perf_event.h4
-rw-r--r--arch/x86/hyperv/Makefile4
-rw-r--r--arch/x86/hyperv/hv_apic.c9
-rw-r--r--arch/x86/hyperv/hv_init.c19
-rw-r--r--arch/x86/hyperv/hv_spinlock.c88
-rw-r--r--arch/x86/hyperv/mmu.c4
-rw-r--r--arch/x86/include/asm/acpi.h7
-rw-r--r--arch/x86/include/asm/alternative-asm.h20
-rw-r--r--arch/x86/include/asm/alternative.h11
-rw-r--r--arch/x86/include/asm/amd_nb.h3
-rw-r--r--arch/x86/include/asm/asm.h57
-rw-r--r--arch/x86/include/asm/atomic.h8
-rw-r--r--arch/x86/include/asm/atomic64_64.h8
-rw-r--r--arch/x86/include/asm/bitops.h9
-rw-r--r--arch/x86/include/asm/bug.h98
-rw-r--r--arch/x86/include/asm/cacheinfo.h1
-rw-r--r--arch/x86/include/asm/cmpxchg.h10
-rw-r--r--arch/x86/include/asm/compat.h21
-rw-r--r--arch/x86/include/asm/cpu_entry_area.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h82
-rw-r--r--arch/x86/include/asm/debugreg.h2
-rw-r--r--arch/x86/include/asm/desc.h4
-rw-r--r--arch/x86/include/asm/efi.h1
-rw-r--r--arch/x86/include/asm/elf.h9
-rw-r--r--arch/x86/include/asm/extable.h3
-rw-r--r--arch/x86/include/asm/fixmap.h12
-rw-r--r--arch/x86/include/asm/fpu/internal.h4
-rw-r--r--arch/x86/include/asm/fsgsbase.h49
-rw-r--r--arch/x86/include/asm/futex.h6
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h21
-rw-r--r--arch/x86/include/asm/intel-family.h33
-rw-r--r--arch/x86/include/asm/io.h15
-rw-r--r--arch/x86/include/asm/irqflags.h16
-rw-r--r--arch/x86/include/asm/jump_label.h80
-rw-r--r--arch/x86/include/asm/kexec.h2
-rw-r--r--arch/x86/include/asm/kvm_emulate.h4
-rw-r--r--arch/x86/include/asm/kvm_host.h75
-rw-r--r--arch/x86/include/asm/local.h8
-rw-r--r--arch/x86/include/asm/mce.h55
-rw-r--r--arch/x86/include/asm/mem_encrypt.h7
-rw-r--r--arch/x86/include/asm/mmu_context.h4
-rw-r--r--arch/x86/include/asm/mpx.h12
-rw-r--r--arch/x86/include/asm/mshyperv.h2
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/msr.h4
-rw-r--r--arch/x86/include/asm/nospec-branch.h17
-rw-r--r--arch/x86/include/asm/page_64_types.h15
-rw-r--r--arch/x86/include/asm/paravirt.h415
-rw-r--r--arch/x86/include/asm/paravirt_types.h138
-rw-r--r--arch/x86/include/asm/percpu.h8
-rw-r--r--arch/x86/include/asm/perf_event.h9
-rw-r--r--arch/x86/include/asm/pgalloc.h2
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h2
-rw-r--r--arch/x86/include/asm/pgtable.h7
-rw-r--r--arch/x86/include/asm/pgtable_64.h3
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
-rw-r--r--arch/x86/include/asm/preempt.h2
-rw-r--r--arch/x86/include/asm/processor.h13
-rw-r--r--arch/x86/include/asm/ptrace.h48
-rw-r--r--arch/x86/include/asm/qspinlock.h15
-rw-r--r--arch/x86/include/asm/refcount.h79
-rw-r--r--arch/x86/include/asm/rmwcc.h69
-rw-r--r--arch/x86/include/asm/sections.h1
-rw-r--r--arch/x86/include/asm/segment.h48
-rw-r--r--arch/x86/include/asm/special_insns.h4
-rw-r--r--arch/x86/include/asm/string_64.h20
-rw-r--r--arch/x86/include/asm/suspend.h8
-rw-r--r--arch/x86/include/asm/suspend_32.h4
-rw-r--r--arch/x86/include/asm/tlb.h21
-rw-r--r--arch/x86/include/asm/tlbflush.h33
-rw-r--r--arch/x86/include/asm/trace/mpx.h4
-rw-r--r--arch/x86/include/asm/uaccess.h22
-rw-r--r--arch/x86/include/asm/unistd.h3
-rw-r--r--arch/x86/include/asm/uv/uv.h6
-rw-r--r--arch/x86/include/asm/vgtod.h79
-rw-r--r--arch/x86/include/asm/virtext.h7
-rw-r--r--arch/x86/include/asm/vmx.h13
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/include/asm/xen/events.h2
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h4
-rw-r--r--arch/x86/include/uapi/asm/kvm.h9
-rw-r--r--arch/x86/include/uapi/asm/siginfo.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c6
-rw-r--r--arch/x86/kernel/alternative.c6
-rw-r--r--arch/x86/kernel/amd_gart_64.c6
-rw-r--r--arch/x86/kernel/amd_nb.c49
-rw-r--r--arch/x86/kernel/apic/apic.c7
-rw-r--r--arch/x86/kernel/apic/probe_32.c1
-rw-r--r--arch/x86/kernel/apic/vector.c9
-rw-r--r--arch/x86/kernel/asm-offsets.c18
-rw-r--r--arch/x86/kernel/asm-offsets_64.c9
-rw-r--r--arch/x86/kernel/check.c28
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/bugs.c62
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c31
-rw-r--r--arch/x86/kernel/cpu/common.c77
-rw-r--r--arch/x86/kernel/cpu/cpu.h1
-rw-r--r--arch/x86/kernel/cpu/cyrix.c2
-rw-r--r--arch/x86/kernel/cpu/hygon.c408
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c17
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h23
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c39
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c405
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c263
-rw-r--r--arch/x86/kernel/cpu/mcheck/dev-mcelog.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c22
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c14
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.c2
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c4
-rw-r--r--arch/x86/kernel/crash_dump_64.c60
-rw-r--r--arch/x86/kernel/fpu/signal.c1
-rw-r--r--arch/x86/kernel/head32.c1
-rw-r--r--arch/x86/kernel/head64.c22
-rw-r--r--arch/x86/kernel/head_64.S18
-rw-r--r--arch/x86/kernel/jump_label.c62
-rw-r--r--arch/x86/kernel/kprobes/core.c48
-rw-r--r--arch/x86/kernel/kprobes/opt.c2
-rw-r--r--arch/x86/kernel/kvm.c19
-rw-r--r--arch/x86/kernel/kvmclock.c56
-rw-r--r--arch/x86/kernel/ldt.c2
-rw-r--r--arch/x86/kernel/macros.S16
-rw-r--r--arch/x86/kernel/module.c6
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c15
-rw-r--r--arch/x86/kernel/paravirt.c320
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c87
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c97
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/process_64.c185
-rw-r--r--arch/x86/kernel/ptrace.c57
-rw-r--r--arch/x86/kernel/setup.c19
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/time.c24
-rw-r--r--arch/x86/kernel/traps.c196
-rw-r--r--arch/x86/kernel/tsc.c14
-rw-r--r--arch/x86/kernel/tsc_msr.c10
-rw-r--r--arch/x86/kernel/umip.c8
-rw-r--r--arch/x86/kernel/uprobes.c2
-rw-r--r--arch/x86/kernel/vm86_32.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S29
-rw-r--r--arch/x86/kernel/vsmp_64.c26
-rw-r--r--arch/x86/kernel/x86_init.c3
-rw-r--r--arch/x86/kvm/emulate.c11
-rw-r--r--arch/x86/kvm/hyperv.c280
-rw-r--r--arch/x86/kvm/hyperv.h4
-rw-r--r--arch/x86/kvm/lapic.c67
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu.c425
-rw-r--r--arch/x86/kvm/mmu.h13
-rw-r--r--arch/x86/kvm/mmu_audit.c12
-rw-r--r--arch/x86/kvm/paging_tmpl.h15
-rw-r--r--arch/x86/kvm/svm.c77
-rw-r--r--arch/x86/kvm/trace.h42
-rw-r--r--arch/x86/kvm/vmx.c2544
-rw-r--r--arch/x86/kvm/vmx_shadow_fields.h5
-rw-r--r--arch/x86/kvm/x86.c347
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--arch/x86/lib/checksum_32.S4
-rw-r--r--arch/x86/lib/copy_user_64.S90
-rw-r--r--arch/x86/lib/csum-copy_64.S8
-rw-r--r--arch/x86/lib/getuser.S12
-rw-r--r--arch/x86/lib/putuser.S10
-rw-r--r--arch/x86/lib/usercopy_32.c126
-rw-r--r--arch/x86/lib/usercopy_64.c8
-rw-r--r--arch/x86/mm/cpu_entry_area.c36
-rw-r--r--arch/x86/mm/dump_pagetables.c35
-rw-r--r--arch/x86/mm/extable.c114
-rw-r--r--arch/x86/mm/fault.c478
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/init_32.c23
-rw-r--r--arch/x86/mm/ioremap.c24
-rw-r--r--arch/x86/mm/mem_encrypt.c24
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c1
-rw-r--r--arch/x86/mm/mpx.c30
-rw-r--r--arch/x86/mm/pageattr.c627
-rw-r--r--arch/x86/mm/pgtable.c19
-rw-r--r--arch/x86/mm/pti.c33
-rw-r--r--arch/x86/mm/tlb.c198
-rw-r--r--arch/x86/pci/acpi.c2
-rw-r--r--arch/x86/pci/amd_bus.c6
-rw-r--r--arch/x86/pci/fixup.c12
-rw-r--r--arch/x86/platform/atom/punit_atom_debug.c6
-rw-r--r--arch/x86/platform/efi/early_printk.c8
-rw-r--r--arch/x86/platform/efi/efi_64.c10
-rw-r--r--arch/x86/platform/efi/quirks.c78
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c17
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bt.c2
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-rtc.c3
-rw-r--r--arch/x86/platform/ts5500/ts5500.c1
-rw-r--r--arch/x86/power/Makefile2
-rw-r--r--arch/x86/power/hibernate.c248
-rw-r--r--arch/x86/power/hibernate_32.c52
-rw-r--r--arch/x86/power/hibernate_64.c224
-rw-r--r--arch/x86/power/hibernate_asm_32.S37
-rw-r--r--arch/x86/power/hibernate_asm_64.S2
-rw-r--r--arch/x86/tools/relocs.c10
-rw-r--r--arch/x86/um/asm/elf.h3
-rw-r--r--arch/x86/xen/Kconfig2
-rw-r--r--arch/x86/xen/Makefile41
-rw-r--r--arch/x86/xen/efi.c14
-rw-r--r--arch/x86/xen/enlighten.c3
-rw-r--r--arch/x86/xen/enlighten_hvm.c2
-rw-r--r--arch/x86/xen/enlighten_pv.c31
-rw-r--r--arch/x86/xen/enlighten_pvh.c3
-rw-r--r--arch/x86/xen/grant-table.c25
-rw-r--r--arch/x86/xen/irq.c2
-rw-r--r--arch/x86/xen/mmu.c188
-rw-r--r--arch/x86/xen/mmu_hvm.c2
-rw-r--r--arch/x86/xen/mmu_pv.c176
-rw-r--r--arch/x86/xen/p2m.c2
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c2
-rw-r--r--arch/x86/xen/platform-pci-unplug.c19
-rw-r--r--arch/x86/xen/pmu.c15
-rw-r--r--arch/x86/xen/smp_pv.c2
-rw-r--r--arch/x86/xen/spinlock.c11
-rw-r--r--arch/x86/xen/time.c4
-rw-r--r--arch/x86/xen/vdso.h2
-rw-r--r--arch/x86/xen/xen-asm_64.S8
-rw-r--r--arch/x86/xen/xen-pvh.S15
-rw-r--r--arch/xtensa/Kconfig2
-rw-r--r--arch/xtensa/include/asm/unistd.h2
-rw-r--r--arch/xtensa/kernel/Makefile4
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S1
-rw-r--r--block/Kconfig10
-rw-r--r--block/Kconfig.iosched3
-rw-r--r--block/Makefile1
-rw-r--r--block/bfq-cgroup.c4
-rw-r--r--block/bfq-iosched.c291
-rw-r--r--block/bfq-iosched.h53
-rw-r--r--block/bfq-wf2q.c49
-rw-r--r--block/bio-integrity.c12
-rw-r--r--block/bio.c220
-rw-r--r--block/blk-cgroup.c123
-rw-r--r--block/blk-core.c280
-rw-r--r--block/blk-flush.c6
-rw-r--r--block/blk-integrity.c12
-rw-r--r--block/blk-iolatency.c230
-rw-r--r--block/blk-lib.c28
-rw-r--r--block/blk-merge.c88
-rw-r--r--block/blk-mq-debugfs.c13
-rw-r--r--block/blk-mq-sched.h4
-rw-r--r--block/blk-mq-tag.c78
-rw-r--r--block/blk-mq.c215
-rw-r--r--block/blk-pm.c216
-rw-r--r--block/blk-pm.h69
-rw-r--r--block/blk-softirq.c5
-rw-r--r--block/blk-stat.c1
-rw-r--r--block/blk-throttle.c54
-rw-r--r--block/blk-wbt.c2
-rw-r--r--block/blk.h73
-rw-r--r--block/bounce.c41
-rw-r--r--block/cfq-iosched.c16
-rw-r--r--block/elevator.c24
-rw-r--r--block/genhd.c25
-rw-r--r--block/kyber-iosched.c547
-rw-r--r--block/partition-generic.c6
-rw-r--r--crypto/Kconfig101
-rw-r--r--crypto/Makefile4
-rw-r--r--crypto/aegis.h20
-rw-r--r--crypto/af_alg.c2
-rw-r--r--crypto/ahash.c25
-rw-r--r--crypto/algapi.c17
-rw-r--r--crypto/algboss.c2
-rw-r--r--crypto/algif_aead.c12
-rw-r--r--crypto/algif_hash.c2
-rw-r--r--crypto/authenc.c8
-rw-r--r--crypto/authencesn.c8
-rw-r--r--crypto/ccm.c9
-rw-r--r--crypto/chacha20_generic.c7
-rw-r--r--crypto/cryptd.c32
-rw-r--r--crypto/crypto_null.c11
-rw-r--r--crypto/crypto_user_base.c (renamed from crypto/crypto_user.c)9
-rw-r--r--crypto/crypto_user_stat.c463
-rw-r--r--crypto/echainiv.c4
-rw-r--r--crypto/gcm.c8
-rw-r--r--crypto/internal.h8
-rw-r--r--crypto/lrw.c339
-rw-r--r--crypto/mcryptd.c675
-rw-r--r--crypto/morus1280.c7
-rw-r--r--crypto/morus640.c16
-rw-r--r--crypto/ofb.c225
-rw-r--r--crypto/rng.c1
-rw-r--r--crypto/rsa-pkcs1pad.c9
-rw-r--r--crypto/seqiv.c4
-rw-r--r--crypto/shash.c33
-rw-r--r--crypto/skcipher.c24
-rw-r--r--crypto/speck.c307
-rw-r--r--crypto/tcrypt.c27
-rw-r--r--crypto/tcrypt.h1
-rw-r--r--crypto/testmgr.c42
-rw-r--r--crypto/testmgr.h863
-rw-r--r--crypto/xcbc.c8
-rw-r--r--crypto/xts.c269
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_adxl.c192
-rw-r--r--drivers/acpi/acpi_ipmi.c4
-rw-r--r--drivers/acpi/acpi_lpit.c6
-rw-r--r--drivers/acpi/acpi_lpss.c120
-rw-r--r--drivers/acpi/acpi_pad.c1
-rw-r--r--drivers/acpi/acpi_processor.c7
-rw-r--r--drivers/acpi/acpi_tad.c201
-rw-r--r--drivers/acpi/acpica/Makefile1
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h23
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h10
-rw-r--r--drivers/acpi/acpica/dsopcode.c4
-rw-r--r--drivers/acpi/acpica/evregion.c17
-rw-r--r--drivers/acpi/acpica/evrgnini.c6
-rw-r--r--drivers/acpi/acpica/evxfregn.c1
-rw-r--r--drivers/acpi/acpica/exfield.c326
-rw-r--r--drivers/acpi/acpica/exserial.c360
-rw-r--r--drivers/acpi/acpica/psloop.c16
-rw-r--r--drivers/acpi/acpica/tbxfload.c3
-rw-r--r--drivers/acpi/arm64/iort.c2
-rw-r--r--drivers/acpi/bus.c44
-rw-r--r--drivers/acpi/cppc_acpi.c8
-rw-r--r--drivers/acpi/custom_method.c3
-rw-r--r--drivers/acpi/glue.c2
-rw-r--r--drivers/acpi/nfit/core.c297
-rw-r--r--drivers/acpi/nfit/intel.h38
-rw-r--r--drivers/acpi/nfit/nfit.h21
-rw-r--r--drivers/acpi/osl.c16
-rw-r--r--drivers/acpi/pci_root.c17
-rw-r--r--drivers/acpi/pmic/intel_pmic_bxtwc.c12
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c1
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtwc.c10
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c16
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c16
-rw-r--r--drivers/acpi/pmic/tps68470_pmic.c2
-rw-r--r--drivers/acpi/pptt.c33
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/property.c97
-rw-r--r--drivers/acpi/sbs.c8
-rw-r--r--drivers/acpi/sbshc.c2
-rw-r--r--drivers/acpi/scan.c11
-rw-r--r--drivers/acpi/x86/apple.c6
-rw-r--r--drivers/acpi/x86/utils.c2
-rw-r--r--drivers/ata/Kconfig5
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_brcm.c8
-rw-r--r--drivers/ata/ahci_platform.c15
-rw-r--r--drivers/ata/ahci_sunxi.c3
-rw-r--r--drivers/ata/libahci_platform.c54
-rw-r--r--drivers/ata/libata-core.c14
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_ep93xx.c8
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/atm/eni.c3
-rw-r--r--drivers/atm/fore200e.c401
-rw-r--r--drivers/atm/fore200e.h8
-rw-r--r--drivers/atm/nicstar.c3
-rw-r--r--drivers/atm/zatm.c42
-rw-r--r--drivers/auxdisplay/hd44780.c61
-rw-r--r--drivers/base/arch_topology.c26
-rw-r--r--drivers/base/cacheinfo.c2
-rw-r--r--drivers/base/dd.c12
-rw-r--r--drivers/base/firmware_loader/main.c7
-rw-r--r--drivers/base/platform-msi.c14
-rw-r--r--drivers/base/platform.c11
-rw-r--r--drivers/base/power/domain.c20
-rw-r--r--drivers/base/power/main.c5
-rw-r--r--drivers/base/regmap/internal.h5
-rw-r--r--drivers/base/regmap/regmap.c99
-rw-r--r--drivers/block/DAC960.c7229
-rw-r--r--drivers/block/DAC960.h4414
-rw-r--r--drivers/block/Kconfig13
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/amiflop.c318
-rw-r--r--drivers/block/aoe/aoe.h5
-rw-r--r--drivers/block/aoe/aoeblk.c70
-rw-r--r--drivers/block/aoe/aoecmd.c19
-rw-r--r--drivers/block/aoe/aoedev.c15
-rw-r--r--drivers/block/ataflop.c273
-rw-r--r--drivers/block/cryptoloop.c22
-rw-r--r--drivers/block/drbd/Kconfig1
-rw-r--r--drivers/block/drbd/drbd_int.h15
-rw-r--r--drivers/block/drbd/drbd_main.c16
-rw-r--r--drivers/block/drbd/drbd_nl.c39
-rw-r--r--drivers/block/drbd/drbd_protocol.h4
-rw-r--r--drivers/block/drbd/drbd_receiver.c35
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c65
-rw-r--r--drivers/block/floppy.c71
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c49
-rw-r--r--drivers/block/null_blk_main.c111
-rw-r--r--drivers/block/paride/pcd.c88
-rw-r--r--drivers/block/paride/pd.c94
-rw-r--r--drivers/block/paride/pf.c56
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/ps3disk.c88
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rsxx/core.c4
-rw-r--r--drivers/block/rsxx/cregs.c2
-rw-r--r--drivers/block/rsxx/dev.c2
-rw-r--r--drivers/block/rsxx/dma.c52
-rw-r--r--drivers/block/skd_main.c69
-rw-r--r--drivers/block/sunvdc.c16
-rw-r--r--drivers/block/swim.c106
-rw-r--r--drivers/block/swim3.c211
-rw-r--r--drivers/block/sx8.c166
-rw-r--r--drivers/block/umem.c42
-rw-r--r--drivers/block/virtio_blk.c68
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/block/xsysace.c80
-rw-r--r--drivers/block/z2ram.c87
-rw-r--r--drivers/block/zram/Kconfig2
-rw-r--r--drivers/block/zram/zram_drv.c28
-rw-r--r--drivers/bluetooth/ath3k.c11
-rw-r--r--drivers/bluetooth/bt3c_cs.c9
-rw-r--r--drivers/bluetooth/btbcm.c1
-rw-r--r--drivers/bluetooth/btrsi.c13
-rw-r--r--drivers/bluetooth/btrtl.c10
-rw-r--r--drivers/bluetooth/btsdio.c14
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c3
-rw-r--r--drivers/bluetooth/hci_qca.c71
-rw-r--r--drivers/bluetooth/hci_serdev.c10
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c5
-rw-r--r--drivers/bus/ts-nbus.c20
-rw-r--r--drivers/cdrom/cdrom.c29
-rw-r--r--drivers/cdrom/gdrom.c174
-rw-r--r--drivers/char/hw_random/core.c4
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c66
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c11
-rw-r--r--drivers/char/ipmi/ipmi_dmi.c31
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c53
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c4
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c67
-rw-r--r--drivers/char/ipmi/ipmi_si_hardcode.c9
-rw-r--r--drivers/char/ipmi/ipmi_si_hotmod.c17
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c28
-rw-r--r--drivers/char/ipmi/ipmi_si_mem_io.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c21
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c26
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c26
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c307
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c52
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c4
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c2
-rw-r--r--drivers/char/pcmcia/synclink_cs.c3
-rw-r--r--drivers/char/random.c24
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm-dev-common.c147
-rw-r--r--drivers/char/tpm/tpm-dev.c11
-rw-r--r--drivers/char/tpm/tpm-dev.h18
-rw-r--r--drivers/char/tpm/tpm-interface.c30
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm2-cmd.c4
-rw-r--r--drivers/char/tpm/tpmrm-dev.c15
-rw-r--r--drivers/char/tpm/xen-tpmfront.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c10
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c18
-rw-r--r--drivers/clocksource/Makefile26
-rw-r--r--drivers/clocksource/arm_arch_timer.c15
-rw-r--r--drivers/clocksource/asm9260_timer.c2
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c20
-rw-r--r--drivers/clocksource/pxa_timer.c6
-rw-r--r--drivers/clocksource/renesas-ostm.c11
-rw-r--r--drivers/clocksource/riscv_timer.c12
-rw-r--r--drivers/clocksource/sh_cmt.c106
-rw-r--r--drivers/clocksource/sh_mtu2.c10
-rw-r--r--drivers/clocksource/sh_tmu.c10
-rw-r--r--drivers/clocksource/timer-armada-370-xp.c (renamed from drivers/clocksource/time-armada-370-xp.c)0
-rw-r--r--drivers/clocksource/timer-atmel-pit.c20
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c (renamed from drivers/clocksource/cadence_ttc_timer.c)2
-rw-r--r--drivers/clocksource/timer-efm32.c (renamed from drivers/clocksource/time-efm32.c)0
-rw-r--r--drivers/clocksource/timer-fsl-ftm.c (renamed from drivers/clocksource/fsl_ftm_timer.c)0
-rw-r--r--drivers/clocksource/timer-fttmr010.c18
-rw-r--r--drivers/clocksource/timer-integrator-ap.c2
-rw-r--r--drivers/clocksource/timer-lpc32xx.c (renamed from drivers/clocksource/time-lpc32xx.c)0
-rw-r--r--drivers/clocksource/timer-orion.c (renamed from drivers/clocksource/time-orion.c)8
-rw-r--r--drivers/clocksource/timer-owl.c (renamed from drivers/clocksource/owl-timer.c)0
-rw-r--r--drivers/clocksource/timer-pistachio.c (renamed from drivers/clocksource/time-pistachio.c)0
-rw-r--r--drivers/clocksource/timer-qcom.c (renamed from drivers/clocksource/qcom-timer.c)0
-rw-r--r--drivers/clocksource/timer-sp804.c2
-rw-r--r--drivers/clocksource/timer-ti-32k.c3
-rw-r--r--drivers/clocksource/timer-versatile.c (renamed from drivers/clocksource/versatile.c)0
-rw-r--r--drivers/clocksource/timer-vf-pit.c (renamed from drivers/clocksource/vf_pit_timer.c)0
-rw-r--r--drivers/clocksource/timer-vt8500.c (renamed from drivers/clocksource/vt8500_timer.c)0
-rw-r--r--drivers/clocksource/timer-zevio.c (renamed from drivers/clocksource/zevio-timer.c)8
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c5
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c9
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c6
-rw-r--r--drivers/cpufreq/cpufreq-dt.c34
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c6
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c52
-rw-r--r--drivers/cpufreq/intel_pstate.c42
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c9
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c4
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c4
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle.c16
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/cpuidle/governors/menu.c114
-rw-r--r--drivers/cpuidle/poll_state.c8
-rw-r--r--drivers/crypto/Kconfig11
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/atmel-aes.c5
-rw-r--r--drivers/crypto/atmel-authenc.h13
-rw-r--r--drivers/crypto/atmel-ecc.c11
-rw-r--r--drivers/crypto/atmel-ecc.h14
-rw-r--r--drivers/crypto/atmel-sha.c5
-rw-r--r--drivers/crypto/atmel-tdes.c5
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c20
-rw-r--r--drivers/crypto/caam/Kconfig57
-rw-r--r--drivers/crypto/caam/Makefile10
-rw-r--r--drivers/crypto/caam/caamalg.c732
-rw-r--r--drivers/crypto/caam/caamalg_desc.c143
-rw-r--r--drivers/crypto/caam/caamalg_desc.h28
-rw-r--r--drivers/crypto/caam/caamalg_qi.c627
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c5165
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h223
-rw-r--r--drivers/crypto/caam/caamhash.c80
-rw-r--r--drivers/crypto/caam/caamhash_desc.c80
-rw-r--r--drivers/crypto/caam/caamhash_desc.h21
-rw-r--r--drivers/crypto/caam/caampkc.c1
-rw-r--r--drivers/crypto/caam/caamrng.c1
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c1
-rw-r--r--drivers/crypto/caam/dpseci.c426
-rw-r--r--drivers/crypto/caam/dpseci.h333
-rw-r--r--drivers/crypto/caam/dpseci_cmd.h149
-rw-r--r--drivers/crypto/caam/error.c79
-rw-r--r--drivers/crypto/caam/error.h6
-rw-r--r--drivers/crypto/caam/jr.c1
-rw-r--r--drivers/crypto/caam/qi.c43
-rw-r--r--drivers/crypto/caam/qi.h3
-rw-r--r--drivers/crypto/caam/regs.h30
-rw-r--r--drivers/crypto/caam/sg_sw_qm.h29
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c20
-rw-r--r--drivers/crypto/cavium/nitrox/Makefile3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_common.h19
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_csr.h111
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.c115
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h162
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c71
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.h23
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c337
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.h10
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c98
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c203
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c49
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_sriov.c151
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c13
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h2
-rw-r--r--drivers/crypto/ccp/psp-dev.c93
-rw-r--r--drivers/crypto/ccp/sp-platform.c53
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c62
-rw-r--r--drivers/crypto/chelsio/chcr_core.c6
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h4
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c3
-rw-r--r--drivers/crypto/inside-secure/safexcel.c8
-rw-r--r--drivers/crypto/mxs-dcp.c195
-rw-r--r--drivers/crypto/omap-aes.c17
-rw-r--r--drivers/crypto/omap-aes.h2
-rw-r--r--drivers/crypto/picoxcell_crypto.c21
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c60
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c6
-rw-r--r--drivers/crypto/qce/ablkcipher.c13
-rw-r--r--drivers/crypto/qce/cipher.h2
-rw-r--r--drivers/crypto/s5p-sss.c113
-rw-r--r--drivers/crypto/sahara.c31
-rw-r--r--drivers/crypto/vmx/aes_cbc.c22
-rw-r--r--drivers/crypto/vmx/aes_ctr.c18
-rw-r--r--drivers/crypto/vmx/aes_xts.c18
-rw-r--r--drivers/dax/device.c6
-rw-r--r--drivers/devfreq/devfreq.c118
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c4
-rw-r--r--drivers/devfreq/governor.h6
-rw-r--r--drivers/devfreq/governor_performance.c5
-rw-r--r--drivers/devfreq/governor_powersave.c2
-rw-r--r--drivers/devfreq/governor_simpleondemand.c12
-rw-r--r--drivers/devfreq/governor_userspace.c16
-rw-r--r--drivers/dma/Kconfig13
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/bcm2835-dma.c8
-rw-r--r--drivers/dma/coh901318.c28
-rw-r--r--drivers/dma/dma-jz4740.c21
-rw-r--r--drivers/dma/dma-jz4780.c289
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c4
-rw-r--r--drivers/dma/dw/core.c5
-rw-r--r--drivers/dma/dw/platform.c2
-rw-r--r--drivers/dma/ep93xx_dma.c21
-rw-r--r--drivers/dma/fsl-edma-common.c626
-rw-r--r--drivers/dma/fsl-edma-common.h233
-rw-r--r--drivers/dma/fsl-edma.c729
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/hsu/hsu.c4
-rw-r--r--drivers/dma/idma64.c9
-rw-r--r--drivers/dma/imx-dma.c20
-rw-r--r--drivers/dma/ioat/init.c23
-rw-r--r--drivers/dma/k3dma.c36
-rw-r--r--drivers/dma/mcf-edma.c317
-rw-r--r--drivers/dma/mmp_tdma.c29
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/mxs-dma.c3
-rw-r--r--drivers/dma/nbpfaxi.c9
-rw-r--r--drivers/dma/owl-dma.c283
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/pxa_dma.c5
-rw-r--r--drivers/dma/sh/rcar-dmac.c3
-rw-r--r--drivers/dma/sh/shdma-arm.h5
-rw-r--r--drivers/dma/sh/shdma-base.c5
-rw-r--r--drivers/dma/sh/shdma-of.c5
-rw-r--r--drivers/dma/sh/shdma-r8a73a4.c5
-rw-r--r--drivers/dma/sh/shdma.h6
-rw-r--r--drivers/dma/sh/shdmac.c6
-rw-r--r--drivers/dma/sh/sudmac.c5
-rw-r--r--drivers/dma/sh/usb-dmac.c5
-rw-r--r--drivers/dma/sprd-dma.c81
-rw-r--r--drivers/dma/st_fdma.c7
-rw-r--r--drivers/dma/ste_dma40.c14
-rw-r--r--drivers/dma/stm32-dma.c20
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/edac/altera_edac.c670
-rw-r--r--drivers/edac/altera_edac.h73
-rw-r--r--drivers/edac/amd64_edac.c24
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/edac/ghes_edac.c23
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i7core_edac.c5
-rw-r--r--drivers/edac/mce_amd.c4
-rw-r--r--drivers/edac/pnd2_edac.c2
-rw-r--r--drivers/edac/sb_edac.c204
-rw-r--r--drivers/edac/skx_edac.c7
-rw-r--r--drivers/edac/thunderx_edac.c4
-rw-r--r--drivers/firewire/core-iso.c7
-rw-r--r--drivers/firewire/core-transaction.c10
-rw-r--r--drivers/firmware/efi/Kconfig9
-rw-r--r--drivers/firmware/efi/efi.c59
-rw-r--r--drivers/firmware/efi/libstub/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c27
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c61
-rw-r--r--drivers/firmware/efi/test/efi_test.c27
-rw-r--r--drivers/firmware/efi/test/efi_test.h10
-rw-r--r--drivers/fpga/dfl-fme-region.c4
-rw-r--r--drivers/fpga/fpga-bridge.c2
-rw-r--r--drivers/fpga/of-fpga-region.c3
-rw-r--r--drivers/gpio/Kconfig31
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/gpio-adp5520.c2
-rw-r--r--drivers/gpio/gpio-adp5588.c2
-rw-r--r--drivers/gpio/gpio-bcm-kona.c14
-rw-r--r--drivers/gpio/gpio-brcmstb.c15
-rw-r--r--drivers/gpio/gpio-creg-snps.c191
-rw-r--r--drivers/gpio/gpio-davinci.c60
-rw-r--r--drivers/gpio/gpio-ep93xx.c297
-rw-r--r--drivers/gpio/gpio-ftgpio010.c115
-rw-r--r--drivers/gpio/gpio-htc-egpio.c10
-rw-r--r--drivers/gpio/gpio-ingenic.c392
-rw-r--r--drivers/gpio/gpio-max3191x.c16
-rw-r--r--drivers/gpio/gpio-mmio.c6
-rw-r--r--drivers/gpio/gpio-mockup.c82
-rw-r--r--drivers/gpio/gpio-mxs.c4
-rw-r--r--drivers/gpio/gpio-omap.c356
-rw-r--r--drivers/gpio/gpio-pxa.c6
-rw-r--r--drivers/gpio/gpio-rcar.c6
-rw-r--r--drivers/gpio/gpio-siox.c293
-rw-r--r--drivers/gpio/gpio-syscon.c2
-rw-r--r--drivers/gpio/gpio-tb10x.c123
-rw-r--r--drivers/gpio/gpio-tps65086.c12
-rw-r--r--drivers/gpio/gpio-tps6586x.c15
-rw-r--r--drivers/gpio/gpio-tps65910.c9
-rw-r--r--drivers/gpio/gpio-tps65912.c16
-rw-r--r--drivers/gpio/gpio-ts5500.c13
-rw-r--r--drivers/gpio/gpio-twl4030.c59
-rw-r--r--drivers/gpio/gpio-twl6040.c31
-rw-r--r--drivers/gpio/gpio-uniphier.c2
-rw-r--r--drivers/gpio/gpio-vf610.c13
-rw-r--r--drivers/gpio/gpio-viperboard.c10
-rw-r--r--drivers/gpio/gpio-vr41xx.c55
-rw-r--r--drivers/gpio/gpio-vx855.c20
-rw-r--r--drivers/gpio/gpio-wm831x.c8
-rw-r--r--drivers/gpio/gpio-wm8350.c8
-rw-r--r--drivers/gpio/gpio-wm8994.c8
-rw-r--r--drivers/gpio/gpio-xlp.c12
-rw-r--r--drivers/gpio/gpio-xtensa.c7
-rw-r--r--drivers/gpio/gpio-zevio.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c8
-rw-r--r--drivers/gpio/gpiolib-devprop.c26
-rw-r--r--drivers/gpio/gpiolib-devres.c (renamed from drivers/gpio/devres.c)12
-rw-r--r--drivers/gpio/gpiolib-legacy.c1
-rw-r--r--drivers/gpio/gpiolib-of.c60
-rw-r--r--drivers/gpio/gpiolib-sysfs.c11
-rw-r--r--drivers/gpio/gpiolib.c643
-rw-r--r--drivers/gpio/gpiolib.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c149
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c12
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c25
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c25
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h2
-rw-r--r--drivers/gpu/drm/drm_atomic.c7
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c12
-rw-r--r--drivers/gpu/drm/drm_client.c35
-rw-r--r--drivers/gpu/drm/drm_crtc.c10
-rw-r--r--drivers/gpu/drm/drm_debugfs.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c5
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c98
-rw-r--r--drivers/gpu/drm/drm_lease.c6
-rw-r--r--drivers/gpu/drm/drm_panel.c10
-rw-r--r--drivers/gpu/drm/drm_syncobj.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h34
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c88
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c33
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c15
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c24
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c1
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c24
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c2
-rw-r--r--drivers/hid/Kconfig16
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-bigbenff.c414
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-cougar.c66
-rw-r--r--drivers/hid/hid-elan.c2
-rw-r--r--drivers/hid/hid-google-hammer.c413
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-input.c64
-rw-r--r--drivers/hid/hid-logitech-hidpp.c309
-rw-r--r--drivers/hid/hid-magicmouse.c142
-rw-r--r--drivers/hid/hid-microsoft.c141
-rw-r--r--drivers/hid/hid-multitouch.c72
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/i2c-hid/Makefile3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c (renamed from drivers/hid/i2c-hid/i2c-hid.c)87
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c376
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h20
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c32
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c76
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c41
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c52
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.h5
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c49
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c24
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h5
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h2
-rw-r--r--drivers/hid/wacom_wac.c19
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c2
-rw-r--r--drivers/hwmon/asus_atk0110.c4
-rw-r--r--drivers/hwmon/hwmon.c28
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/ibmpex.c2
-rw-r--r--drivers/hwmon/ibmpowernv.c10
-rw-r--r--drivers/hwmon/iio_hwmon.c16
-rw-r--r--drivers/hwmon/ina3221.c592
-rw-r--r--drivers/hwmon/k10temp.c5
-rw-r--r--drivers/hwmon/lm75.c21
-rw-r--r--drivers/hwmon/lm92.c14
-rw-r--r--drivers/hwmon/lm95245.c3
-rw-r--r--drivers/hwmon/mc13783-adc.c14
-rw-r--r--drivers/hwmon/nct6775.c372
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c6
-rw-r--r--drivers/hwmon/pmbus/Kconfig18
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c9
-rw-r--r--drivers/hwmon/pmbus/pmbus.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c5
-rw-r--r--drivers/hwmon/pwm-fan.c20
-rw-r--r--drivers/hwmon/scmi-hwmon.c2
-rw-r--r--drivers/hwmon/scpi-hwmon.c4
-rw-r--r--drivers/hwmon/sht15.c5
-rw-r--r--drivers/hwmon/tmp102.c3
-rw-r--r--drivers/hwmon/tmp108.c3
-rw-r--r--drivers/hwmon/tmp421.c6
-rw-r--r--drivers/hwtracing/intel_th/core.c16
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c4
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c22
-rw-r--r--drivers/i2c/busses/i2c-rcar.c6
-rw-r--r--drivers/i2c/busses/i2c-scmi.c1
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c14
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/ide/ide-gd.c2
-rw-r--r--drivers/idle/intel_idle.c77
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c12
-rw-r--r--drivers/iio/light/apds9960.c3
-rw-r--r--drivers/iio/light/max44000.c23
-rw-r--r--drivers/iio/temperature/mlx90632.c3
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/addr.c406
-rw-r--r--drivers/infiniband/core/cache.c141
-rw-r--r--drivers/infiniband/core/cm.c9
-rw-r--r--drivers/infiniband/core/cma.c251
-rw-r--r--drivers/infiniband/core/cma_configfs.c2
-rw-r--r--drivers/infiniband/core/core_priv.h12
-rw-r--r--drivers/infiniband/core/cq.c10
-rw-r--r--drivers/infiniband/core/device.c264
-rw-r--r--drivers/infiniband/core/fmr_pool.c5
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/mad.c80
-rw-r--r--drivers/infiniband/core/mad_priv.h2
-rw-r--r--drivers/infiniband/core/netlink.c4
-rw-r--r--drivers/infiniband/core/nldev.c37
-rw-r--r--drivers/infiniband/core/rdma_core.c56
-rw-r--r--drivers/infiniband/core/rdma_core.h1
-rw-r--r--drivers/infiniband/core/restrack.c30
-rw-r--r--drivers/infiniband/core/rw.c11
-rw-r--r--drivers/infiniband/core/sa.h8
-rw-r--r--drivers/infiniband/core/sa_query.c70
-rw-r--r--drivers/infiniband/core/security.c7
-rw-r--r--drivers/infiniband/core/sysfs.c101
-rw-r--r--drivers/infiniband/core/ucm.c3
-rw-r--r--drivers/infiniband/core/ucma.c5
-rw-r--r--drivers/infiniband/core/umem.c125
-rw-r--r--drivers/infiniband/core/umem_odp.c621
-rw-r--r--drivers/infiniband/core/user_mad.c13
-rw-r--r--drivers/infiniband/core/uverbs.h15
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c111
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c140
-rw-r--r--drivers/infiniband/core/uverbs_main.c341
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c7
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c13
-rw-r--r--drivers/infiniband/core/verbs.c65
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c216
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c134
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c88
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c77
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c55
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c50
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c20
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h2
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile42
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c492
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h71
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h4
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h48
-rw-r--r--drivers/infiniband/hw/hfi1/init.c113
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.c94
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h192
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c4
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c363
-rw-r--r--drivers/infiniband/hw/hfi1/msix.h (renamed from arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c)55
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c75
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c59
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h2
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c100
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h31
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c24
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c382
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c56
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h21
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c69
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h3
-rw-r--r--drivers/infiniband/hw/hfi1/trace_iowait.h54
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c14
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c22
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c139
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h20
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c259
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h35
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h11
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c12
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_sdma.c21
-rw-r--r--drivers/infiniband/hw/hns/Kconfig1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h45
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c629
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h96
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c123
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c212
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c41
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c73
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig1
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c20
-rw-r--r--drivers/infiniband/hw/mlx4/main.c182
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c129
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h14
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c34
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c369
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c393
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c546
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c9
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h99
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c26
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c123
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c491
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c44
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c4
-rw-r--r--drivers/infiniband/hw/nes/nes.c3
-rw-r--r--drivers/infiniband/hw/nes/nes.h9
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c63
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c74
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c3
-rw-r--r--drivers/infiniband/hw/qedr/main.c73
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c5
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c17
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c342
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c101
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c17
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c47
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h15
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c39
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c74
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c16
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c91
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c46
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c677
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h42
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c39
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c35
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c49
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c55
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c168
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c19
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c18
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c9
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c3
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c25
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c28
-rw-r--r--drivers/input/evdev.c1
-rw-r--r--drivers/input/joystick/xpad.c3
-rw-r--r--drivers/input/keyboard/atakbd.c74
-rw-r--r--drivers/input/misc/uinput.c3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mousedev.c1
-rw-r--r--drivers/input/serio/i8042.c29
-rw-r--r--drivers/input/serio/serport.c2
-rw-r--r--drivers/input/touchscreen/egalax_ts.c6
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c22
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c3
-rw-r--r--drivers/iommu/Kconfig8
-rw-r--r--drivers/iommu/amd_iommu.c8
-rw-r--r--drivers/iommu/amd_iommu_init.c14
-rw-r--r--drivers/iommu/intel-iommu.c10
-rw-r--r--drivers/iommu/intel-pasid.h2
-rw-r--r--drivers/iommu/rockchip-iommu.c6
-rw-r--r--drivers/irqchip/Kconfig3
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c249
-rw-r--r--drivers/irqchip/irq-gic-v3.c85
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c253
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c507
-rw-r--r--drivers/irqchip/irq-sifive-plic.c10
-rw-r--r--drivers/irqchip/qcom-pdc.c1
-rw-r--r--drivers/isdn/capi/capi.c7
-rw-r--r--drivers/isdn/gigaset/asyncdata.c4
-rw-r--r--drivers/isdn/gigaset/ev-layer.c4
-rw-r--r--drivers/isdn/gigaset/interface.c13
-rw-r--r--drivers/isdn/gigaset/isocdata.c2
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c11
-rw-r--r--drivers/isdn/hisax/hfc_pci.h4
-rw-r--r--drivers/isdn/hisax/hfc_sx.c6
-rw-r--r--drivers/isdn/hisax/hisax.h2
-rw-r--r--drivers/isdn/hisax/w6692.c4
-rw-r--r--drivers/isdn/i4l/isdn_tty.c19
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/isdn/mISDN/tei.c7
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-an30259a.c368
-rw-r--r--drivers/leds/leds-as3645a.c4
-rw-r--r--drivers/leds/leds-gpio.c92
-rw-r--r--drivers/leds/leds-pwm.c5
-rw-r--r--drivers/leds/leds-sc27xx-bltc.c121
-rw-r--r--drivers/leds/trigger/Kconfig7
-rw-r--r--drivers/leds/trigger/Makefile1
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c411
-rw-r--r--drivers/lightnvm/Kconfig3
-rw-r--r--drivers/lightnvm/core.c334
-rw-r--r--drivers/lightnvm/pblk-cache.c1
-rw-r--r--drivers/lightnvm/pblk-core.c587
-rw-r--r--drivers/lightnvm/pblk-gc.c11
-rw-r--r--drivers/lightnvm/pblk-init.c321
-rw-r--r--drivers/lightnvm/pblk-map.c13
-rw-r--r--drivers/lightnvm/pblk-rb.c110
-rw-r--r--drivers/lightnvm/pblk-read.c86
-rw-r--r--drivers/lightnvm/pblk-recovery.c471
-rw-r--r--drivers/lightnvm/pblk-rl.c5
-rw-r--r--drivers/lightnvm/pblk-sysfs.c12
-rw-r--r--drivers/lightnvm/pblk-trace.h145
-rw-r--r--drivers/lightnvm/pblk-write.c90
-rw-r--r--drivers/lightnvm/pblk.h221
-rw-r--r--drivers/mailbox/pcc.c7
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/bcache.h3
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/closure.h3
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/extents.c2
-rw-r--r--drivers/md/bcache/journal.c6
-rw-r--r--drivers/md/bcache/request.c9
-rw-r--r--drivers/md/bcache/request.h2
-rw-r--r--drivers/md/bcache/super.c121
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/dm-cache-metadata.c4
-rw-r--r--drivers/md/dm-cache-target.c14
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-integrity.c26
-rw-r--r--drivers/md/dm-linear.c8
-rw-r--r--drivers/md/dm-mpath.c14
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-thin-metadata.c6
-rw-r--r--drivers/md/dm-verity-fec.c5
-rw-r--r--drivers/md/dm.c27
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/media/i2c/mt9v111.c41
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss.c15
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c5
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c5
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/memstick/core/ms_block.c2
-rw-r--r--drivers/memstick/core/mspro_block.c2
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h2
-rw-r--r--drivers/message/fusion/mptbase.c12
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/mfd/Kconfig26
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/altera-a10sr.c3
-rw-r--r--drivers/mfd/arizona-core.c10
-rw-r--r--drivers/mfd/at91-usart.c72
-rw-r--r--drivers/mfd/cros_ec.c3
-rw-r--r--drivers/mfd/cros_ec_dev.c1
-rw-r--r--drivers/mfd/da9052-spi.c3
-rw-r--r--drivers/mfd/intel_msic.c49
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c56
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c5
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c5
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c25
-rw-r--r--drivers/mfd/intel_soc_pmic_core.h12
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c78
-rw-r--r--drivers/mfd/madera-core.c33
-rw-r--r--drivers/mfd/max14577.c28
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/max77686.c32
-rw-r--r--drivers/mfd/max77693.c34
-rw-r--r--drivers/mfd/max77843.c19
-rw-r--r--drivers/mfd/max8997-irq.c30
-rw-r--r--drivers/mfd/max8997.c40
-rw-r--r--drivers/mfd/max8998-irq.c18
-rw-r--r--drivers/mfd/max8998.c28
-rw-r--r--drivers/mfd/mc13xxx-core.c3
-rw-r--r--drivers/mfd/mc13xxx-spi.c3
-rw-r--r--drivers/mfd/menelaus.c13
-rw-r--r--drivers/mfd/motorola-cpcap.c51
-rw-r--r--drivers/mfd/omap-usb-host.c11
-rw-r--r--drivers/mfd/rohm-bd718x7.c162
-rw-r--r--drivers/mfd/sec-core.c16
-rw-r--r--drivers/mfd/sec-irq.c24
-rw-r--r--drivers/mfd/ti-lmu.c91
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c14
-rw-r--r--drivers/mfd/twl6040.c3
-rw-r--r--drivers/misc/genwqe/card_base.h2
-rw-r--r--drivers/misc/genwqe/card_dev.c9
-rw-r--r--drivers/misc/lkdtm/core.c1
-rw-r--r--drivers/misc/lkdtm/lkdtm.h1
-rw-r--r--drivers/misc/lkdtm/usercopy.c13
-rw-r--r--drivers/mmc/core/Kconfig2
-rw-r--r--drivers/mmc/core/block.c12
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/pwrseq_simple.c14
-rw-r--r--drivers/mmc/core/slot-gpio.c2
-rw-r--r--drivers/mmc/host/Kconfig40
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c11
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c8
-rw-r--r--drivers/mmc/host/jz4740_mmc.c5
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c2
-rw-r--r--drivers/mmc/host/mmci.c698
-rw-r--r--drivers/mmc/host/mmci.h174
-rw-r--r--drivers/mmc/host/mmci_qcom_dml.c17
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c282
-rw-r--r--drivers/mmc/host/mtk-sd.c298
-rw-r--r--drivers/mmc/host/mxcmmc.c5
-rw-r--r--drivers/mmc/host/omap_hsmmc.c171
-rw-r--r--drivers/mmc/host/renesas_sdhi.h5
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c66
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c30
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c18
-rw-r--r--drivers/mmc/host/sdhci-acpi.c70
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h1
-rw-r--r--drivers/mmc/host/sdhci-iproc.c59
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c96
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c39
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c44
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c3
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c68
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h7
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c14
-rw-r--r--drivers/mmc/host/sdhci-sirf.c28
-rw-r--r--drivers/mmc/host/sdhci-spear.c33
-rw-r--r--drivers/mmc/host/sdhci-sprd.c498
-rw-r--r--drivers/mmc/host/sdhci-tegra.c675
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c4
-rw-r--r--drivers/mmc/host/sdhci.c289
-rw-r--r--drivers/mmc/host/sdhci.h37
-rw-r--r--drivers/mmc/host/sh_mmcif.c7
-rw-r--r--drivers/mmc/host/sunxi-mmc.c24
-rw-r--r--drivers/mmc/host/tifm_sd.c3
-rw-r--r--drivers/mmc/host/tmio_mmc.c80
-rw-r--r--drivers/mmc/host/tmio_mmc.h16
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c129
-rw-r--r--drivers/mmc/host/uniphier-sd.c698
-rw-r--r--drivers/mmc/host/usdhi6rol0.c5
-rw-r--r--drivers/mtd/devices/m25p80.c49
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c146
-rw-r--r--drivers/mtd/maps/physmap_of_core.c27
-rw-r--r--drivers/mtd/maps/physmap_of_gemini.c5
-rw-r--r--drivers/mtd/mtd_blkdevs.c102
-rw-r--r--drivers/mtd/mtdpart.c5
-rw-r--r--drivers/mtd/nand/raw/Kconfig20
-rw-r--r--drivers/mtd/nand/raw/Makefile6
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c249
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c111
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c124
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/main.c2
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c58
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c93
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c74
-rw-r--r--drivers/mtd/nand/raw/cmx270_nand.c48
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c75
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c93
-rw-r--r--drivers/mtd/nand/raw/denali.c157
-rw-r--r--drivers/mtd/nand/raw/denali.h10
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c12
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c10
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c156
-rw-r--r--drivers/mtd/nand/raw/docg4.c1442
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c70
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c106
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c66
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c44
-rw-r--r--drivers/mtd/nand/raw/gpio.c29
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c3
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c111
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h2
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c68
-rw-r--r--drivers/mtd/nand/raw/internals.h115
-rw-r--r--drivers/mtd/nand/raw/jz4740_nand.c53
-rw-r--r--drivers/mtd/nand/raw/jz4780_nand.c42
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c59
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c83
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c279
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c65
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c93
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c114
-rw-r--r--drivers/mtd/nand/raw/nand_amd.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c1721
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c24
-rw-r--r--drivers/mtd/nand/raw/nand_bch.c10
-rw-r--r--drivers/mtd/nand/raw/nand_ecc.c99
-rw-r--r--drivers/mtd/nand/raw/nand_esmt.c47
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c15
-rw-r--r--drivers/mtd/nand/raw/nand_ids.c26
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c113
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c642
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c2
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c19
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c305
-rw-r--r--drivers/mtd/nand/raw/nand_samsung.c2
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c18
-rw-r--r--drivers/mtd/nand/raw/nand_toshiba.c88
-rw-r--r--drivers/mtd/nand/raw/nandsim.c50
-rw-r--r--drivers/mtd/nand/raw/ndfc.c43
-rw-r--r--drivers/mtd/nand/raw/nuc900_nand.c47
-rw-r--r--drivers/mtd/nand/raw/omap2.c200
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c26
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c29
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c51
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c23
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c92
-rw-r--r--drivers/mtd/nand/raw/r852.c80
-rw-r--r--drivers/mtd/nand/raw/r852.h2
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c106
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c68
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c36
-rw-r--r--drivers/mtd/nand/raw/sm_common.c7
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c60
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c96
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c77
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c36
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c78
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c53
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c43
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c39
-rw-r--r--drivers/mtd/sm_ftl.c20
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c4
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c15
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c1
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c925
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c21
-rw-r--r--drivers/mux/adgs1408.c2
-rw-r--r--drivers/mux/gpio.c12
-rw-r--r--drivers/net/appletalk/ipddp.c8
-rw-r--r--drivers/net/bonding/bond_main.c79
-rw-r--r--drivers/net/can/rx-offload.c8
-rw-r--r--drivers/net/dsa/Kconfig8
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/Kconfig10
-rw-r--r--drivers/net/dsa/b53/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c252
-rw-r--r--drivers/net/dsa/b53/b53_priv.h36
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c214
-rw-r--r--drivers/net/dsa/b53/b53_serdes.h128
-rw-r--r--drivers/net/dsa/b53/b53_srab.c215
-rw-r--r--drivers/net/dsa/bcm_sf2.c23
-rw-r--r--drivers/net/dsa/lantiq_gswip.c1167
-rw-r--r--drivers/net/dsa/lantiq_pce.h153
-rw-r--r--drivers/net/dsa/mt7530.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c28
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c25
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c105
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h16
-rw-r--r--drivers/net/dsa/qca8k.c6
-rw-r--r--drivers/net/ethernet/8390/ax88796.c4
-rw-r--r--drivers/net/ethernet/8390/etherh.c35
-rw-r--r--drivers/net/ethernet/Kconfig7
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c4
-rw-r--r--drivers/net/ethernet/agere/et131x.c12
-rw-r--r--drivers/net/ethernet/alacritech/slic.h1
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c3
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c5
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h425
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c302
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h72
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_common_defs.h4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c285
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h72
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h229
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c505
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h42
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h206
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c12
-rw-r--r--drivers/net/ethernet/amd/declance.c12
-rw-r--r--drivers/net/ethernet/amd/ni65.c4
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c6
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c19
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c30
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/apple/mace.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c113
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c24
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c36
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c41
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c35
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c163
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h130
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c231
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c22
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c22
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c95
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c81
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1726
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h250
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c112
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h310
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c16
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c20
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c71
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c547
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c342
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c404
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c226
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c53
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h31
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c122
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h17
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h98
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c61
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c125
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c82
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.h4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c5
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c238
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h106
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c114
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c66
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c68
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c4
-rw-r--r--drivers/net/ethernet/cortina/gemini.c5
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c6
-rw-r--r--drivers/net/ethernet/dnet.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c12
-rw-r--r--drivers/net/ethernet/ethoc.c5
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c24
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c28
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile13
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h (renamed from drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h)0
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c (renamed from drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c)390
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h (renamed from drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h)36
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c630
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c (renamed from drivers/staging/fsl-dpaa2/rtc/rtc.c)57
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h (renamed from drivers/staging/fsl-dpaa2/rtc/rtc.h)0
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpkg.h (renamed from drivers/staging/fsl-dpaa2/ethernet/dpkg.h)0
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h (renamed from drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h)51
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c (renamed from drivers/staging/fsl-dpaa2/ethernet/dpni.c)152
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h (renamed from drivers/staging/fsl-dpaa2/ethernet/dpni.h)97
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h40
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.c194
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dprtc.h45
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c47
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c55
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h59
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c609
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c216
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h147
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c1088
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h83
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2480
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h377
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c58
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c621
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h25
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c121
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h27
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c14
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h97
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c43
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c32
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c295
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c5
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c19
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c28
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h3
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c149
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h9
-rw-r--r--drivers/net/ethernet/intel/Kconfig57
-rw-r--r--drivers/net/ethernet/intel/Makefile3
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c24
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c437
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c398
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c201
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h94
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c445
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c967
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h25
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile16
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2717
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h35
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c1320
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h34
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h215
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h158
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h130
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h313
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1496
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h427
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c820
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile15
-rw-r--r--drivers/net/ethernet/intel/iavf/i40e_adminq.c (renamed from drivers/net/ethernet/intel/i40evf/i40e_adminq.c)311
-rw-r--r--drivers/net/ethernet/intel/iavf/i40e_adminq.h (renamed from drivers/net/ethernet/intel/i40evf/i40e_adminq.h)35
-rw-r--r--drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h530
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h418
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_alloc.h31
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.c (renamed from drivers/net/ethernet/intel/i40evf/i40evf_client.c)224
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.h (renamed from drivers/net/ethernet/intel/i40evf/i40evf_client.h)30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c955
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_devids.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c1036
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c (renamed from drivers/net/ethernet/intel/i40evf/i40evf_main.c)1760
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_osdep.h (renamed from drivers/net/ethernet/intel/i40evf/i40e_osdep.h)28
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h67
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_register.h68
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_status.h (renamed from drivers/net/ethernet/intel/i40evf/i40e_status.h)8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h (renamed from drivers/net/ethernet/intel/i40evf/i40e_trace.h)86
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c (renamed from drivers/net/ethernet/intel/i40evf/i40e_txrx.c)812
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h (renamed from drivers/net/ethernet/intel/i40evf/i40e_txrx.h)359
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h688
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c (renamed from drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c)530
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h72
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h124
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c892
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c89
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c818
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h530
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c2619
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h77
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c3567
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c379
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c127
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c1686
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h111
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c2668
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h173
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c41
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile10
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h443
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c541
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h107
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h389
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h321
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c490
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.h13
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c806
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.h41
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c3901
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c215
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c791
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.h21
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h221
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c282
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c405
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c801
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c670
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.h66
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h33
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c101
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c12
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c567
-rw-r--r--drivers/net/ethernet/marvell/Kconfig3
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c56
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h28
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c481
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Makefile6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c721
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h111
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h186
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h211
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c303
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h525
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h262
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h5709
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c1772
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h368
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c515
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c1959
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c472
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c816
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c71
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h502
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h917
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c181
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c290
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c254
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c740
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c484
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c188
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h603
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c151
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h106
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c488
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c225
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c982
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c573
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c13
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c4
-rw-r--r--drivers/net/ethernet/mscc/Kconfig2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h79
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c75
-rw-r--r--drivers/net/ethernet/mscc/ocelot_dev_gmii.h154
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c93
-rw-r--r--drivers/net/ethernet/neterion/s2io.c7
-rw-r--r--drivers/net/ethernet/neterion/s2io.h22
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c1
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h20
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c67
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c69
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h35
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c105
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h45
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c415
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c91
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h107
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c77
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c234
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c85
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h57
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c179
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_abi.h35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app_nic.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c54
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c78
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c129
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c84
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c42
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c84
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h46
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c70
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c46
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c66
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h72
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c364
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h46
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c303
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c46
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c34
-rw-r--r--drivers/net/ethernet/ni/Kconfig3
-rw-r--r--drivers/net/ethernet/ni/nixge.c168
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c78
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c29
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c50
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c248
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c29
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h362
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c258
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c346
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h51
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c102
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c46
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.c34
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.h2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c28
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c7
-rw-r--r--drivers/net/ethernet/rdc/r6040.c12
-rw-r--r--drivers/net/ethernet/realtek/r8169.c245
-rw-r--r--drivers/net/ethernet/renesas/ravb.h11
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c161
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c5
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c4
-rw-r--r--drivers/net/ethernet/seeq/ether3.c5
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c3
-rw-r--r--drivers/net/ethernet/sfc/efx.c34
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c34
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c9
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c6
-rw-r--r--drivers/net/ethernet/socionext/netsec.c45
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c253
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c3
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c14
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h7
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c72
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h8
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c6
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c22
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c42
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c9
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c4
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c8
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c8
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c12
-rw-r--r--drivers/net/fddi/Kconfig11
-rw-r--r--drivers/net/fddi/Makefile1
-rw-r--r--drivers/net/fddi/defza.c1564
-rw-r--r--drivers/net/fddi/defza.h791
-rw-r--r--drivers/net/fddi/skfp/ecm.c3
-rw-r--r--drivers/net/fddi/skfp/h/cmtdef.h9
-rw-r--r--drivers/net/fddi/skfp/pcmplc.c11
-rw-r--r--drivers/net/geneve.c61
-rw-r--r--drivers/net/hamradio/6pack.c21
-rw-r--r--drivers/net/hamradio/mkiss.c21
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hyperv/hyperv_net.h51
-rw-r--r--drivers/net/hyperv/netvsc.c21
-rw-r--r--drivers/net/hyperv/netvsc_drv.c156
-rw-r--r--drivers/net/hyperv/rndis_filter.c97
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/ca8210.c6
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c8
-rw-r--r--drivers/net/ieee802154/mcr20a.c72
-rw-r--r--drivers/net/loopback.c10
-rw-r--r--drivers/net/macsec.c18
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/net_failover.c8
-rw-r--r--drivers/net/netdevsim/bpf.c8
-rw-r--r--drivers/net/nlmon.c6
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/aquantia.c12
-rw-r--r--drivers/net/phy/at803x.c2
-rw-r--r--drivers/net/phy/bcm63xx.c9
-rw-r--r--drivers/net/phy/dp83640.c18
-rw-r--r--drivers/net/phy/et1011c.c3
-rw-r--r--drivers/net/phy/marvell.c67
-rw-r--r--drivers/net/phy/marvell10g.c17
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c83
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c6
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c11
-rw-r--r--drivers/net/phy/mdio-thunder.c4
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/micrel.c130
-rw-r--r--drivers/net/phy/microchip.c33
-rw-r--r--drivers/net/phy/microchip_t1.c2
-rw-r--r--drivers/net/phy/mscc.c1520
-rw-r--r--drivers/net/phy/phy.c132
-rw-r--r--drivers/net/phy/phy_device.c301
-rw-r--r--drivers/net/phy/phylink.c78
-rw-r--r--drivers/net/phy/sfp-bus.c4
-rw-r--r--drivers/net/phy/sfp.c9
-rw-r--r--drivers/net/phy/ste10Xp.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/ppp_mppe.c27
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/slip/slip.c25
-rw-r--r--drivers/net/tap.c94
-rw-r--r--drivers/net/team/team.c11
-rw-r--r--drivers/net/thunderbolt.c5
-rw-r--r--drivers/net/tun.c384
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/lan78xx.c66
-rw-r--r--drivers/net/usb/lan78xx.h14
-rw-r--r--drivers/net/usb/qmi_wwan.c15
-rw-r--r--drivers/net/usb/r8152.c3
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/usb/sr9800.c3
-rw-r--r--drivers/net/usb/usbnet.c13
-rw-r--r--drivers/net/veth.c204
-rw-r--r--drivers/net/virtio_net.c68
-rw-r--r--drivers/net/vrf.c11
-rw-r--r--drivers/net/vsockmon.c14
-rw-r--r--drivers/net/vxlan.c203
-rw-r--r--drivers/net/wan/c101.c1
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c77
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.h1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wan/x25_asy.c19
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig4
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c23
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h36
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c766
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h69
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c144
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c130
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c82
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h52
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c297
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c155
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c191
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h37
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c154
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c101
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c1019
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h129
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c2072
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h677
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c281
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h21
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c198
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h254
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c84
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h100
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c170
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c83
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c19
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c333
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c63
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c13
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c98
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h19
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c350
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h2
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h2
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c8
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c10
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c34
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h189
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c649
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h141
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c188
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-scd.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c207
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c108
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c782
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c867
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/testmode.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c211
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c453
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h109
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c269
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c212
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c108
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c6
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c65
-rw-r--r--drivers/net/wireless/marvell/libertas/if_cs.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c8
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c7
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c17
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig32
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile27
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h51
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h136
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Makefile13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/core.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.c522
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c567
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h135
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c570
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h406
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.c489
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.h154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c349
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c656
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h69
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h315
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c221
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c147
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c538
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/regs.h651
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/tx.c270
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c382
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c175
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/util.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h215
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h)40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h77
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c153
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h)144
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c)680
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_mac.h)140
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c224
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h113
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c260
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c258
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_regs.h)78
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_trace.c)2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_trace.h)33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c202
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c359
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c446
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h330
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c)35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_dma.h)21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c)346
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h94
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c)85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.c54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c123
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h)79
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h105
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_pci.c)8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c)145
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_init.c)160
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_mac.c)111
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_main.c)130
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c188
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_phy.c)164
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_tx.c)48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c)199
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2_usb.c)25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2u_init.c)81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c)42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2u_main.c)70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c)235
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c)121
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_common.c350
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_core.c88
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.c163
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c427
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u.h83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_core.c108
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c129
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_mcu.c204
-rw-r--r--drivers/net/wireless/quantenna/Kconfig2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Kconfig2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Makefile3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c196
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c535
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c39
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c18
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c392
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h85
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c1249
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h (renamed from drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h)80
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h121
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c1494
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h91
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h356
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h14
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c13
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c154
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c277
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c143
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c18
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c13
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c71
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c11
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h1
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c48
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c17
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/hash.c51
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/net/xen-netback/xenbus.c3
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/nvdimm/blk.c2
-rw-r--r--drivers/nvdimm/btt.c2
-rw-r--r--drivers/nvdimm/bus.c20
-rw-r--r--drivers/nvdimm/dimm.c6
-rw-r--r--drivers/nvdimm/dimm_devs.c60
-rw-r--r--drivers/nvdimm/label.c144
-rw-r--r--drivers/nvdimm/label.h4
-rw-r--r--drivers/nvdimm/namespace_devs.c1
-rw-r--r--drivers/nvdimm/nd-core.h1
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c61
-rw-r--r--drivers/nvdimm/pmem.c6
-rw-r--r--drivers/nvdimm/region_devs.c11
-rw-r--r--drivers/nvme/host/core.c53
-rw-r--r--drivers/nvme/host/fabrics.c37
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/fc.c153
-rw-r--r--drivers/nvme/host/lightnvm.c137
-rw-r--r--drivers/nvme/host/multipath.c85
-rw-r--r--drivers/nvme/host/nvme.h36
-rw-r--r--drivers/nvme/host/pci.c107
-rw-r--r--drivers/nvme/host/rdma.c78
-rw-r--r--drivers/nvme/host/trace.h28
-rw-r--r--drivers/nvme/target/admin-cmd.c8
-rw-r--r--drivers/nvme/target/configfs.c47
-rw-r--r--drivers/nvme/target/core.c183
-rw-r--r--drivers/nvme/target/discovery.c6
-rw-r--r--drivers/nvme/target/fc.c136
-rw-r--r--drivers/nvme/target/fcloop.c1
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c12
-rw-r--r--drivers/nvme/target/io-cmd-file.c3
-rw-r--r--drivers/nvme/target/nvmet.h18
-rw-r--r--drivers/nvme/target/rdma.c41
-rw-r--r--drivers/of/device.c12
-rw-r--r--drivers/of/unittest.c26
-rw-r--r--drivers/opp/core.c149
-rw-r--r--drivers/opp/cpu.c15
-rw-r--r--drivers/opp/of.c237
-rw-r--r--drivers/opp/opp.h19
-rw-r--r--drivers/parisc/Makefile3
-rw-r--r--drivers/parisc/ccio-dma.c12
-rw-r--r--drivers/parisc/ccio-rm-dma.c202
-rw-r--r--drivers/parisc/dino.c5
-rw-r--r--drivers/parisc/sba_iommu.c17
-rw-r--r--drivers/pci/Kconfig20
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/access.c4
-rw-r--r--drivers/pci/controller/Kconfig4
-rw-r--r--drivers/pci/controller/dwc/Makefile2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c11
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c176
-rw-r--r--drivers/pci/controller/dwc/pci-keystone-dw.c484
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c788
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.h57
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h7
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c56
-rw-r--r--drivers/pci/controller/pci-aardvark.c129
-rw-r--r--drivers/pci/controller/pci-host-common.c8
-rw-r--r--drivers/pci/controller/pci-hyperv.c39
-rw-r--r--drivers/pci/controller/pci-mvebu.c436
-rw-r--r--drivers/pci/controller/pcie-cadence-ep.c13
-rw-r--r--drivers/pci/controller/pcie-cadence-host.c7
-rw-r--r--drivers/pci/controller/pcie-cadence.c24
-rw-r--r--drivers/pci/controller/pcie-iproc.c8
-rw-r--r--drivers/pci/controller/pcie-mediatek.c321
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c7
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c9
-rw-r--r--drivers/pci/controller/pcie-xilinx.c7
-rw-r--r--drivers/pci/controller/vmd.c6
-rw-r--r--drivers/pci/hotplug/TODO74
-rw-r--r--drivers/pci/hotplug/acpiphp.h10
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c36
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c11
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h11
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c105
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c6
-rw-r--r--drivers/pci/hotplug/cpqphp.h9
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c61
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c31
-rw-r--r--drivers/pci/hotplug/ibmphp.h9
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c121
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c70
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c53
-rw-r--r--drivers/pci/hotplug/pciehp.h133
-rw-r--r--drivers/pci/hotplug/pciehp_core.c168
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c263
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c184
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c41
-rw-r--r--drivers/pci/hotplug/pnv_php.c38
-rw-r--r--drivers/pci/hotplug/rpaphp.h10
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c20
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c11
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c22
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c44
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c63
-rw-r--r--drivers/pci/hotplug/shpchp.h8
-rw-r--r--drivers/pci/hotplug/shpchp_core.c48
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c21
-rw-r--r--drivers/pci/iov.c3
-rw-r--r--drivers/pci/msi.c9
-rw-r--r--drivers/pci/p2pdma.c805
-rw-r--r--drivers/pci/pci-acpi.c63
-rw-r--r--drivers/pci/pci-bridge-emul.c408
-rw-r--r--drivers/pci/pci-bridge-emul.h124
-rw-r--r--drivers/pci/pci-mid.c4
-rw-r--r--drivers/pci/pci.c139
-rw-r--r--drivers/pci/pci.h78
-rw-r--r--drivers/pci/pcie/Kconfig4
-rw-r--r--drivers/pci/pcie/aer.c239
-rw-r--r--drivers/pci/pcie/aer_inject.c96
-rw-r--r--drivers/pci/pcie/aspm.c4
-rw-r--r--drivers/pci/pcie/dpc.c72
-rw-r--r--drivers/pci/pcie/err.c281
-rw-r--r--drivers/pci/pcie/pme.c30
-rw-r--r--drivers/pci/pcie/portdrv.h32
-rw-r--r--drivers/pci/pcie/portdrv_core.c21
-rw-r--r--drivers/pci/pcie/portdrv_pci.c31
-rw-r--r--drivers/pci/probe.c24
-rw-r--r--drivers/pci/quirks.c96
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/pci/setup-bus.c28
-rw-r--r--drivers/pci/slot.c3
-rw-r--r--drivers/pcmcia/pcmcia_resource.c4
-rw-r--r--drivers/pcmcia/ricoh.h35
-rw-r--r--drivers/pcmcia/soc_common.c9
-rw-r--r--drivers/pcmcia/yenta_socket.c3
-rw-r--r--drivers/perf/arm_pmu.c8
-rw-r--r--drivers/perf/arm_pmu_platform.c6
-rw-r--r--drivers/phy/Kconfig3
-rw-r--r--drivers/phy/Makefile3
-rw-r--r--drivers/phy/broadcom/Kconfig3
-rw-r--r--drivers/phy/broadcom/phy-bcm-cygnus-pcie.c4
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c74
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c4
-rw-r--r--drivers/phy/cadence/Kconfig10
-rw-r--r--drivers/phy/cadence/Makefile1
-rw-r--r--drivers/phy/cadence/phy-cadence-dp.c541
-rw-r--r--drivers/phy/lantiq/phy-lantiq-rcu-usb2.c5
-rw-r--r--drivers/phy/marvell/Kconfig11
-rw-r--r--drivers/phy/marvell/Makefile1
-rw-r--r--drivers/phy/marvell/phy-berlin-sata.c6
-rw-r--r--drivers/phy/marvell/phy-pxa-usb.c345
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c19
-rw-r--r--drivers/phy/mscc/Kconfig11
-rw-r--r--drivers/phy/mscc/Makefile5
-rw-r--r--drivers/phy/mscc/phy-ocelot-serdes.c295
-rw-r--r--drivers/phy/qualcomm/Kconfig17
-rw-r--r--drivers/phy/qualcomm/Makefile4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c222
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h15
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-i.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c50
-rw-r--r--drivers/phy/renesas/Kconfig1
-rw-r--r--drivers/phy/renesas/Makefile1
-rw-r--r--drivers/phy/renesas/phy-rcar-gen2.c5
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c86
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb3.c5
-rw-r--r--drivers/phy/rockchip/Kconfig8
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c1277
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c8
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c8
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usb.c145
-rw-r--r--drivers/phy/socionext/Kconfig34
-rw-r--r--drivers/phy/socionext/Makefile8
-rw-r--r--drivers/phy/socionext/phy-uniphier-pcie.c240
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb2.c244
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3hs.c422
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3ss.c349
-rw-r--r--drivers/phy/tegra/xusb.c4
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c29
-rw-r--r--drivers/pinctrl/Kconfig17
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c2
-rw-r--r--drivers/pinctrl/bcm/Kconfig13
-rw-r--r--drivers/pinctrl/bcm/Makefile1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c372
-rw-r--r--drivers/pinctrl/berlin/berlin.c6
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c10
-rw-r--r--drivers/pinctrl/core.c4
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c7
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c12
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c41
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c154
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c54
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c147
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-geminilake.c75
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c289
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h42
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c56
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c19
-rw-r--r--drivers/pinctrl/mediatek/Kconfig49
-rw-r--r--drivers/pinctrl/mediatek/Makefile5
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c2
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c690
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.h51
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6765.c1108
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c1407
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c1441
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8183.c544
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c670
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h291
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt6765.h1754
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8183.h1916
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c907
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.h63
-rw-r--r--drivers/pinctrl/meson/Kconfig6
-rw-r--r--drivers/pinctrl/meson/Makefile1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c1404
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-ab8500.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-ab8505.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c12
-rw-r--r--drivers/pinctrl/nuvoton/Kconfig12
-rw-r--r--drivers/pinctrl/nuvoton/Makefile4
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c2072
-rw-r--r--drivers/pinctrl/pinctrl-amd.c35
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c29
-rw-r--r--drivers/pinctrl/pinctrl-at91.c40
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c2
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c1
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c2
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c79
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c462
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c8
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c6
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c15
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c1
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c10
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c21
-rw-r--r--drivers/pinctrl/pinctrl-rzn1.c947
-rw-r--r--drivers/pinctrl/pinctrl-single.c32
-rw-r--r--drivers/pinctrl/pinctrl-st.c6
-rw-r--r--drivers/pinctrl/qcom/Kconfig17
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c250
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c1697
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c1455
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c29
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c30
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h2
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig16
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile3
-rw-r--r--drivers/pinctrl/sh-pfc/core.c23
-rw-r--r--drivers/pinctrl/sh-pfc/core.h7
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-emev2.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a73a4.c15
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c15
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77470.c408
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c10
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c14
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c15
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c43
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7792.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c842
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c419
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77970.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c981
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77995.c370
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7203.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c15
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7720.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7723.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7724.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7757.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7785.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7786.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-shx3.c5
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c5
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h10
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c11
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h2
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c13
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c34
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c1
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c8
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c5
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier.h2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c6
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.h2
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c2
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/alienware-wmi.c1
-rw-r--r--drivers/platform/x86/asus-wmi.c39
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c1
-rw-r--r--drivers/platform/x86/eeepc-laptop.c43
-rw-r--r--drivers/platform/x86/intel_cht_int33fe.c51
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c2
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c4
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c2
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c2
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c119
-rw-r--r--drivers/power/reset/qcom-pon.c1
-rw-r--r--drivers/power/reset/rmobile-reset.c5
-rw-r--r--drivers/power/supply/Kconfig7
-rw-r--r--drivers/power/supply/Makefile1
-rw-r--r--drivers/power/supply/ab8500_fg.c52
-rw-r--r--drivers/power/supply/bq25890_charger.c62
-rw-r--r--drivers/power/supply/bq27xxx_battery.c9
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c2
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c117
-rw-r--r--drivers/power/supply/ds2780_battery.c2
-rw-r--r--drivers/power/supply/ds2781_battery.c2
-rw-r--r--drivers/power/supply/ds2782_battery.c2
-rw-r--r--drivers/power/supply/max14577_charger.c22
-rw-r--r--drivers/power/supply/max17040_battery.c18
-rw-r--r--drivers/power/supply/max17042_battery.c32
-rw-r--r--drivers/power/supply/max77693_charger.c22
-rw-r--r--drivers/power/supply/max8925_power.c1
-rw-r--r--drivers/power/supply/max8997_charger.c26
-rw-r--r--drivers/power/supply/max8998_charger.c28
-rw-r--r--drivers/power/supply/power_supply_sysfs.c3
-rw-r--r--drivers/power/supply/sc2731_charger.c504
-rw-r--r--drivers/power/supply/twl4030_charger.c30
-rw-r--r--drivers/powercap/intel_rapl.c73
-rw-r--r--drivers/ptp/ptp_chardev.c4
-rw-r--r--drivers/ptp/ptp_dte.c6
-rw-r--r--drivers/regulator/Kconfig21
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/arizona-ldo1.c2
-rw-r--r--drivers/regulator/axp20x-regulator.c3
-rw-r--r--drivers/regulator/bd71837-regulator.c626
-rw-r--r--drivers/regulator/bd718x7-regulator.c1119
-rw-r--r--drivers/regulator/core.c128
-rw-r--r--drivers/regulator/da9052-regulator.c2
-rw-r--r--drivers/regulator/da9055-regulator.c2
-rw-r--r--drivers/regulator/da9211-regulator.c10
-rw-r--r--drivers/regulator/fixed-helper.c1
-rw-r--r--drivers/regulator/fixed.c57
-rw-r--r--drivers/regulator/helpers.c243
-rw-r--r--drivers/regulator/isl9305.c4
-rw-r--r--drivers/regulator/lm363x-regulator.c6
-rw-r--r--drivers/regulator/lochnagar-regulator.c254
-rw-r--r--drivers/regulator/lp8788-ldo.c7
-rw-r--r--drivers/regulator/ltc3589.c3
-rw-r--r--drivers/regulator/ltc3676.c3
-rw-r--r--drivers/regulator/max8952.c1
-rw-r--r--drivers/regulator/max8973-regulator.c1
-rw-r--r--drivers/regulator/max8997-regulator.c4
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/of_regulator.c46
-rw-r--r--drivers/regulator/pfuze100-regulator.c91
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c17
-rw-r--r--drivers/regulator/qcom_smd-regulator.c77
-rw-r--r--drivers/regulator/s5m8767.c25
-rw-r--r--drivers/regulator/stpmic1_regulator.c674
-rw-r--r--drivers/regulator/tps65090-regulator.c1
-rw-r--r--drivers/reset/reset-imx7.c1
-rw-r--r--drivers/s390/block/dasd.c6
-rw-r--r--drivers/s390/block/dasd_genhd.c2
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/scm_blk.c2
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/monwriter.c33
-rw-r--r--drivers/s390/char/sclp.h52
-rw-r--r--drivers/s390/char/sclp_cmd.c11
-rw-r--r--drivers/s390/char/sclp_early.c123
-rw-r--r--drivers/s390/char/sclp_early_core.c127
-rw-r--r--drivers/s390/char/sclp_pci.c10
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c30
-rw-r--r--drivers/s390/cio/qdio_main.c15
-rw-r--r--drivers/s390/cio/qdio_setup.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c24
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h2
-rw-r--r--drivers/s390/crypto/Makefile6
-rw-r--r--drivers/s390/crypto/ap_bus.c79
-rw-r--r--drivers/s390/crypto/ap_bus.h25
-rw-r--r--drivers/s390/crypto/pkey_api.c521
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c157
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c939
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h88
-rw-r--r--drivers/s390/crypto/zcrypt_api.c627
-rw-r--r--drivers/s390/crypto/zcrypt_api.h15
-rw-r--r--drivers/s390/crypto/zcrypt_card.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c6
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h6
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c (renamed from drivers/s390/crypto/zcrypt_pcixcc.c)125
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.h (renamed from drivers/s390/crypto/zcrypt_pcixcc.h)14
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c20
-rw-r--r--drivers/s390/crypto/zcrypt_error.h24
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c24
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c74
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h15
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c2
-rw-r--r--drivers/s390/net/ism_drv.c4
-rw-r--r--drivers/s390/net/qeth_core.h43
-rw-r--r--drivers/s390/net/qeth_core_main.c833
-rw-r--r--drivers/s390/net/qeth_core_mpc.c33
-rw-r--r--drivers/s390/net/qeth_core_mpc.h30
-rw-r--r--drivers/s390/net/qeth_core_sys.c15
-rw-r--r--drivers/s390/net/qeth_l2_main.c150
-rw-r--r--drivers/s390/net/qeth_l3_main.c363
-rw-r--r--drivers/sbus/char/openprom.c11
-rw-r--r--drivers/sbus/char/oradax.c3
-rw-r--r--drivers/scsi/3w-9xxx.c50
-rw-r--r--drivers/scsi/3w-sas.c38
-rw-r--r--drivers/scsi/3w-xxxx.c20
-rw-r--r--drivers/scsi/3w-xxxx.h1
-rw-r--r--drivers/scsi/53c700.h2
-rw-r--r--drivers/scsi/BusLogic.c36
-rw-r--r--drivers/scsi/FlashPoint.c6
-rw-r--r--drivers/scsi/Kconfig35
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR5380.c167
-rw-r--r--drivers/scsi/NCR5380.h2
-rw-r--r--drivers/scsi/a100u2w.c20
-rw-r--r--drivers/scsi/aacraid/aachba.c7
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/advansys.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7770.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c44
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c41
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c7
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.h4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c9
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c5
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c46
-rw-r--r--drivers/scsi/am53c974.c54
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c29
-rw-r--r--drivers/scsi/atp870u.c6
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c10
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c15
-rw-r--r--drivers/scsi/be2iscsi/be_main.c75
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c27
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c108
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h9
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c9
-rw-r--r--drivers/scsi/csiostor/csio_init.c8
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c6
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c12
-rw-r--r--drivers/scsi/csiostor/csio_wr.c17
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c154
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h3
-rw-r--r--drivers/scsi/dc395x.c191
-rw-r--r--drivers/scsi/esp_scsi.c286
-rw-r--r--drivers/scsi/esp_scsi.h38
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c41
-rw-r--r--drivers/scsi/fnic/fnic_main.c19
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c99
-rw-r--r--drivers/scsi/fnic/vnic_dev.c26
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c161
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c15
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c15
-rw-r--r--drivers/scsi/hpsa.c148
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c6
-rw-r--r--drivers/scsi/ipr.c106
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/ips.c81
-rw-r--r--drivers/scsi/isci/host.c8
-rw-r--r--drivers/scsi/isci/host.h2
-rw-r--r--drivers/scsi/isci/request.c4
-rw-r--r--drivers/scsi/isci/task.c4
-rw-r--r--drivers/scsi/iscsi_tcp.c3
-rw-r--r--drivers/scsi/jazz_esp.c30
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libfc/fc_rport.c22
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c22
-rw-r--r--drivers/scsi/lpfc/lpfc.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c126
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c344
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h36
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h45
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c111
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c310
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c14
-rw-r--r--drivers/scsi/mac_esp.c217
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c117
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c153
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c16
-rw-r--r--drivers/scsi/mesh.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c1189
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c89
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c527
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1488
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c355
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c101
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c70
-rw-r--r--drivers/scsi/mvsas/mv_init.c21
-rw-r--r--drivers/scsi/mvsas/mv_sas.c12
-rw-r--r--drivers/scsi/mvumi.c89
-rw-r--r--drivers/scsi/myrb.c3656
-rw-r--r--drivers/scsi/myrb.h958
-rw-r--r--drivers/scsi/myrs.c3268
-rw-r--r--drivers/scsi/myrs.h1134
-rw-r--r--drivers/scsi/nsp32.c18
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c14
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c15
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h4
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c19
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c16
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h8
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c31
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c31
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c49
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c119
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h9
-rw-r--r--drivers/scsi/qedf/qedf_main.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c9
-rw-r--r--drivers/scsi/qla1280.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c587
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c536
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c412
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h23
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c84
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c319
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c542
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c51
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c26
-rw-r--r--drivers/scsi/raid_class.c4
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c16
-rw-r--r--drivers/scsi/scsi_pm.c1
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/scsi/sd.c9
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c100
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c11
-rw-r--r--drivers/scsi/snic/snic_disc.c7
-rw-r--r--drivers/scsi/snic/snic_io.c25
-rw-r--r--drivers/scsi/snic/snic_main.c24
-rw-r--r--drivers/scsi/snic/snic_scsi.c15
-rw-r--r--drivers/scsi/snic/vnic_dev.c29
-rw-r--r--drivers/scsi/sr.c3
-rw-r--r--drivers/scsi/sun3x_esp.c30
-rw-r--r--drivers/scsi/sun_esp.c61
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c15
-rw-r--r--drivers/scsi/ufs/Kconfig19
-rw-r--r--drivers/scsi/ufs/Makefile3
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c82
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h5
-rw-r--r--drivers/scsi/ufs/ufs.h94
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c210
-rw-r--r--drivers/scsi/ufs/ufs_bsg.h23
-rw-r--r--drivers/scsi/ufs/ufshcd.c438
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/scsi/ufs/ufshci.h25
-rw-r--r--drivers/scsi/vmw_pvscsi.c77
-rw-r--r--drivers/scsi/zorro_esp.c290
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c58
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c11
-rw-r--r--drivers/soc/fsl/qbman/qman.c3
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c11
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c8
-rw-r--r--drivers/soc/fsl/qe/ucc.c2
-rw-r--r--drivers/soc/qcom/Kconfig2
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c41
-rw-r--r--drivers/soundwire/stream.c23
-rw-r--r--drivers/spi/Kconfig53
-rw-r--r--drivers/spi/Makefile6
-rw-r--r--drivers/spi/spi-at91-usart.c432
-rw-r--r--drivers/spi/spi-atmel.c10
-rw-r--r--drivers/spi/spi-bcm-qspi.c4
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c20
-rw-r--r--drivers/spi/spi-davinci.c19
-rw-r--r--drivers/spi/spi-dw-mmio.c38
-rw-r--r--drivers/spi/spi-dw.c28
-rw-r--r--drivers/spi/spi-dw.h2
-rw-r--r--drivers/spi/spi-ep93xx.c36
-rw-r--r--drivers/spi/spi-fsl-dspi.c6
-rw-r--r--drivers/spi/spi-fsl-espi.c4
-rw-r--r--drivers/spi/spi-fsl-lpspi.c2
-rw-r--r--drivers/spi/spi-geni-qcom.c703
-rw-r--r--drivers/spi/spi-gpio.c16
-rw-r--r--drivers/spi/spi-imx.c53
-rw-r--r--drivers/spi/spi-mem.c69
-rw-r--r--drivers/spi/spi-mt65xx.c37
-rw-r--r--drivers/spi/spi-omap2-mcspi.c177
-rw-r--r--drivers/spi/spi-orion.c22
-rw-r--r--drivers/spi/spi-pic32-sqi.c4
-rw-r--r--drivers/spi/spi-pic32.c6
-rw-r--r--drivers/spi/spi-pl022.c10
-rw-r--r--drivers/spi/spi-pxa2xx.c100
-rw-r--r--drivers/spi/spi-qcom-qspi.c581
-rw-r--r--drivers/spi/spi-rb4xx.c2
-rw-r--r--drivers/spi/spi-rockchip.c62
-rw-r--r--drivers/spi/spi-rspi.c44
-rw-r--r--drivers/spi/spi-sh-hspi.c12
-rw-r--r--drivers/spi/spi-sh-msiof.c38
-rw-r--r--drivers/spi/spi-sh.c12
-rw-r--r--drivers/spi/spi-slave-mt27xx.c554
-rw-r--r--drivers/spi/spi-slave-system-control.c1
-rw-r--r--drivers/spi/spi-sprd.c745
-rw-r--r--drivers/spi/spi-stm32-qspi.c512
-rw-r--r--drivers/spi/spi-tegra20-slink.c31
-rw-r--r--drivers/spi/spi.c119
-rw-r--r--drivers/spi/spidev.c9
-rw-r--r--drivers/ssb/driver_chipcommon.c2
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c218
-rw-r--r--drivers/staging/erofs/namei.c19
-rw-r--r--drivers/staging/fsl-dpaa2/Kconfig16
-rw-r--r--drivers/staging/fsl-dpaa2/Makefile2
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/Makefile11
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/TODO18
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c280
-rw-r--r--drivers/staging/fsl-dpaa2/rtc/Makefile7
-rw-r--r--drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h137
-rw-r--r--drivers/staging/fsl-dpaa2/rtc/dprtc.c701
-rw-r--r--drivers/staging/fsl-dpaa2/rtc/dprtc.h164
-rw-r--r--drivers/staging/fwserial/fwserial.c66
-rw-r--r--drivers/staging/greybus/uart.c47
-rw-r--r--drivers/staging/iio/adc/ad7606.c8
-rw-r--r--drivers/staging/media/mt9t031/Kconfig6
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c47
-rw-r--r--drivers/staging/mt7621-eth/mdio.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c34
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c28
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c34
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c26
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c23
-rw-r--r--drivers/target/iscsi/iscsi_target.c24
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c44
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/target_core_iblock.c58
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_sbc.c23
-rw-r--r--drivers/target/target_core_spc.c6
-rw-r--r--drivers/target/target_core_transport.c19
-rw-r--r--drivers/target/target_core_xcopy.c3
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c2
-rw-r--r--drivers/thunderbolt/icm.c49
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/amiserial.c90
-rw-r--r--drivers/tty/cyclades.c77
-rw-r--r--drivers/tty/ipwireless/tty.c36
-rw-r--r--drivers/tty/isicom.c72
-rw-r--r--drivers/tty/moxa.c79
-rw-r--r--drivers/tty/mxser.c97
-rw-r--r--drivers/tty/n_gsm.c11
-rw-r--r--drivers/tty/n_r3964.c22
-rw-r--r--drivers/tty/pty.c14
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/atmel_serial.c42
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c10
-rw-r--r--drivers/tty/serial/fsl_lpuart.c3
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/tty/serial/mvebu-uart.c1
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c4
-rw-r--r--drivers/tty/serial/serial_core.c43
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c7
-rw-r--r--drivers/tty/serial/sh-sci.c56
-rw-r--r--drivers/tty/synclink.c3
-rw-r--r--drivers/tty/synclink_gt.c19
-rw-r--r--drivers/tty/synclinkmp.c3
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/tty/tty_io.c246
-rw-r--r--drivers/tty/tty_ioctl.c16
-rw-r--r--drivers/tty/vt/vt_ioctl.c35
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c19
-rw-r--r--drivers/usb/chipidea/core.c19
-rw-r--r--drivers/usb/chipidea/host.c9
-rw-r--r--drivers/usb/chipidea/otg.c9
-rw-r--r--drivers/usb/chipidea/otg.h3
-rw-r--r--drivers/usb/chipidea/udc.c9
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c4
-rw-r--r--drivers/usb/class/cdc-acm.c63
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/class/usbtmc.c1583
-rw-r--r--drivers/usb/common/roles.c15
-rw-r--r--drivers/usb/core/buffer.c8
-rw-r--r--drivers/usb/core/devio.c28
-rw-r--r--drivers/usb/core/driver.c31
-rw-r--r--drivers/usb/core/generic.c27
-rw-r--r--drivers/usb/core/hcd.c14
-rw-r--r--drivers/usb/core/hub.c42
-rw-r--r--drivers/usb/core/phy.c7
-rw-r--r--drivers/usb/core/port.c10
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/dwc2/core.h29
-rw-r--r--drivers/usb/dwc2/debugfs.c1
-rw-r--r--drivers/usb/dwc2/gadget.c121
-rw-r--r--drivers/usb/dwc2/hcd.c48
-rw-r--r--drivers/usb/dwc2/hw.h15
-rw-r--r--drivers/usb/dwc2/params.c7
-rw-r--r--drivers/usb/dwc2/platform.c8
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c212
-rw-r--r--drivers/usb/dwc3/gadget.c29
-rw-r--r--drivers/usb/early/xhci-dbc.c3
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c7
-rw-r--r--drivers/usb/gadget/function/f_uac2.c216
-rw-r--r--drivers/usb/gadget/function/f_uvc.c57
-rw-r--r--drivers/usb/gadget/function/u_uvc.h3
-rw-r--r--drivers/usb/gadget/function/uvc.h16
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c1168
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c4
-rw-r--r--drivers/usb/gadget/function/uvc_video.c48
-rw-r--r--drivers/usb/gadget/function/uvc_video.h2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c8
-rw-r--r--drivers/usb/gadget/udc/core.c9
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c36
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.c3
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c14
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-hcd.c11
-rw-r--r--drivers/usb/host/ehci-mv.c181
-rw-r--r--drivers/usb/host/ehci-q.c4
-rw-r--r--drivers/usb/host/ehci-timer.c2
-rw-r--r--drivers/usb/host/ehci.h4
-rw-r--r--drivers/usb/host/fotg210-hcd.c50
-rw-r--r--drivers/usb/host/fotg210.h7
-rw-r--r--drivers/usb/host/ohci-at91.c2
-rw-r--r--drivers/usb/host/pci-quirks.c12
-rw-r--r--drivers/usb/host/xhci-hub.c5
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c429
-rw-r--r--drivers/usb/host/xhci-mtk.c4
-rw-r--r--drivers/usb/host/xhci-mtk.h23
-rw-r--r--drivers/usb/host/xhci-pci.c32
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci-ring.c20
-rw-r--r--drivers/usb/host/xhci-tegra.c144
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/appledisplay.c7
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/misc/trancevibrator.c4
-rw-r--r--drivers/usb/mtu3/mtu3_core.c4
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c22
-rw-r--r--drivers/usb/musb/musb_dsps.c12
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c8
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c113
-rw-r--r--drivers/usb/renesas_usbhs/common.h5
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c27
-rw-r--r--drivers/usb/roles/intel-xhci-usb-role-switch.c2
-rw-r--r--drivers/usb/serial/ark3116.c38
-rw-r--r--drivers/usb/serial/cypress_m8.c7
-rw-r--r--drivers/usb/serial/f81232.c36
-rw-r--r--drivers/usb/serial/f81534.c38
-rw-r--r--drivers/usb/serial/ftdi_sio.c439
-rw-r--r--drivers/usb/serial/ftdi_sio.h28
-rw-r--r--drivers/usb/serial/io_edgeport.c37
-rw-r--r--drivers/usb/serial/io_ti.c47
-rw-r--r--drivers/usb/serial/mos7720.c86
-rw-r--r--drivers/usb/serial/mos7840.c39
-rw-r--r--drivers/usb/serial/opticon.c43
-rw-r--r--drivers/usb/serial/option.c18
-rw-r--r--drivers/usb/serial/pl2303.c29
-rw-r--r--drivers/usb/serial/quatech2.c42
-rw-r--r--drivers/usb/serial/ssu100.c42
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c74
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/usb/serial/usb-serial.c20
-rw-r--r--drivers/usb/serial/usb-wwan.h6
-rw-r--r--drivers/usb/serial/usb_wwan.c63
-rw-r--r--drivers/usb/serial/whiteheat.c42
-rw-r--r--drivers/usb/storage/Kconfig23
-rw-r--r--drivers/usb/storage/isd200.c2
-rw-r--r--drivers/usb/typec/Kconfig45
-rw-r--r--drivers/usb/typec/Makefile6
-rw-r--r--drivers/usb/typec/class.c40
-rw-r--r--drivers/usb/typec/fusb302/Kconfig7
-rw-r--r--drivers/usb/typec/fusb302/Makefile2
-rw-r--r--drivers/usb/typec/mux.c17
-rw-r--r--drivers/usb/typec/tcpm/Kconfig52
-rw-r--r--drivers/usb/typec/tcpm/Makefile7
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c (renamed from drivers/usb/typec/fusb302/fusb302.c)75
-rw-r--r--drivers/usb/typec/tcpm/fusb302_reg.h (renamed from drivers/usb/typec/fusb302/fusb302_reg.h)0
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c (renamed from drivers/usb/typec/tcpci.c)0
-rw-r--r--drivers/usb/typec/tcpm/tcpci.h (renamed from drivers/usb/typec/tcpci.h)0
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c (renamed from drivers/usb/typec/tcpci_rt1711h.c)0
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c (renamed from drivers/usb/typec/tcpm.c)17
-rw-r--r--drivers/usb/typec/tcpm/wcove.c (renamed from drivers/usb/typec/typec_wcove.c)0
-rw-r--r--drivers/usb/usbip/vhci_hcd.c57
-rw-r--r--drivers/usb/usbip/vudc_main.c10
-rw-r--r--drivers/usb/wusbcore/crypto.c16
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c6
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c23
-rw-r--r--drivers/vhost/net.c326
-rw-r--r--drivers/vhost/vhost.c24
-rw-r--r--drivers/video/backlight/Kconfig16
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/adp5520_bl.c2
-rw-r--r--drivers/video/backlight/adp8860_bl.c2
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/ld9040.c811
-rw-r--r--drivers/video/backlight/ld9040_gamma.h202
-rw-r--r--drivers/video/backlight/lm3639_bl.c6
-rw-r--r--drivers/video/backlight/pwm_bl.c81
-rw-r--r--drivers/video/backlight/s6e63m0.c857
-rw-r--r--drivers/video/backlight/s6e63m0_gamma.h266
-rw-r--r--drivers/video/fbdev/aty/atyfb.h3
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c7
-rw-r--r--drivers/video/fbdev/aty/mach64_ct.c10
-rw-r--r--drivers/video/fbdev/efifb.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c5
-rw-r--r--drivers/video/fbdev/pxa168fb.c6
-rw-r--r--drivers/video/fbdev/stifb.c2
-rw-r--r--drivers/xen/biomerge.c3
-rw-r--r--drivers/xen/grant-table.c27
-rw-r--r--drivers/xen/swiotlb-xen.c10
-rw-r--r--drivers/xen/time.c2
-rw-r--r--drivers/xen/xen-acpi-pad.c1
-rw-r--r--fs/afs/addr_list.c101
-rw-r--r--fs/afs/cell.c17
-rw-r--r--fs/afs/dynroot.c2
-rw-r--r--fs/afs/internal.h12
-rw-r--r--fs/afs/main.c2
-rw-r--r--fs/afs/proc.c7
-rw-r--r--fs/afs/rxrpc.c2
-rw-r--r--fs/aio.c8
-rw-r--r--fs/binfmt_elf.c6
-rw-r--r--fs/btrfs/backref.c39
-rw-r--r--fs/btrfs/btrfs_inode.h8
-rw-r--r--fs/btrfs/check-integrity.c6
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/ctree.c68
-rw-r--r--fs/btrfs/ctree.h56
-rw-r--r--fs/btrfs/delayed-inode.c41
-rw-r--r--fs/btrfs/delayed-inode.h4
-rw-r--r--fs/btrfs/delayed-ref.c69
-rw-r--r--fs/btrfs/delayed-ref.h10
-rw-r--r--fs/btrfs/dev-replace.c64
-rw-r--r--fs/btrfs/dev-replace.h8
-rw-r--r--fs/btrfs/dir-item.c8
-rw-r--r--fs/btrfs/disk-io.c24
-rw-r--r--fs/btrfs/export.c4
-rw-r--r--fs/btrfs/extent-tree.c424
-rw-r--r--fs/btrfs/extent_io.c33
-rw-r--r--fs/btrfs/extent_io.h4
-rw-r--r--fs/btrfs/extent_map.c32
-rw-r--r--fs/btrfs/extent_map.h4
-rw-r--r--fs/btrfs/file.c33
-rw-r--r--fs/btrfs/free-space-cache.c16
-rw-r--r--fs/btrfs/inode.c132
-rw-r--r--fs/btrfs/ioctl.c18
-rw-r--r--fs/btrfs/qgroup.c455
-rw-r--r--fs/btrfs/qgroup.h8
-rw-r--r--fs/btrfs/ref-verify.c8
-rw-r--r--fs/btrfs/relocation.c74
-rw-r--r--fs/btrfs/scrub.c34
-rw-r--r--fs/btrfs/send.c24
-rw-r--r--fs/btrfs/super.c6
-rw-r--r--fs/btrfs/tests/extent-io-tests.c10
-rw-r--r--fs/btrfs/tests/extent-map-tests.c4
-rw-r--r--fs/btrfs/transaction.c31
-rw-r--r--fs/btrfs/tree-checker.c14
-rw-r--r--fs/btrfs/tree-log.c86
-rw-r--r--fs/btrfs/tree-log.h2
-rw-r--r--fs/btrfs/volumes.c117
-rw-r--r--fs/btrfs/volumes.h9
-rw-r--r--fs/buffer.c10
-rw-r--r--fs/cachefiles/namei.c2
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/connect.c13
-rw-r--r--fs/cifs/smb2ops.c2
-rw-r--r--fs/cifs/transport.c21
-rw-r--r--fs/compat_binfmt_elf.c2
-rw-r--r--fs/compat_ioctl.c238
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/crypto/fscrypt_private.h4
-rw-r--r--fs/crypto/keyinfo.c10
-rw-r--r--fs/dax.c27
-rw-r--r--fs/ecryptfs/inode.c11
-rw-r--r--fs/exec.c8
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext4/acl.c4
-rw-r--r--fs/ext4/dir.c20
-rw-r--r--fs/ext4/ext4.h37
-rw-r--r--fs/ext4/ext4_extents.h13
-rw-r--r--fs/ext4/extents.c595
-rw-r--r--fs/ext4/extents_status.c654
-rw-r--r--fs/ext4/extents_status.h80
-rw-r--r--fs/ext4/inline.c6
-rw-r--r--fs/ext4/inode.c162
-rw-r--r--fs/ext4/ioctl.c97
-rw-r--r--fs/ext4/mballoc.c14
-rw-r--r--fs/ext4/mmp.c1
-rw-r--r--fs/ext4/move_extent.c8
-rw-r--r--fs/ext4/namei.c8
-rw-r--r--fs/ext4/page-io.c2
-rw-r--r--fs/ext4/resize.c23
-rw-r--r--fs/ext4/super.c85
-rw-r--r--fs/f2fs/acl.c12
-rw-r--r--fs/f2fs/acl.h5
-rw-r--r--fs/f2fs/checkpoint.c94
-rw-r--r--fs/f2fs/data.c220
-rw-r--r--fs/f2fs/debug.c35
-rw-r--r--fs/f2fs/dir.c30
-rw-r--r--fs/f2fs/extent_cache.c134
-rw-r--r--fs/f2fs/f2fs.h253
-rw-r--r--fs/f2fs/file.c196
-rw-r--r--fs/f2fs/gc.c115
-rw-r--r--fs/f2fs/gc.h5
-rw-r--r--fs/f2fs/hash.c5
-rw-r--r--fs/f2fs/inline.c8
-rw-r--r--fs/f2fs/inode.c28
-rw-r--r--fs/f2fs/namei.c57
-rw-r--r--fs/f2fs/node.c83
-rw-r--r--fs/f2fs/node.h5
-rw-r--r--fs/f2fs/recovery.c125
-rw-r--r--fs/f2fs/segment.c240
-rw-r--r--fs/f2fs/segment.h20
-rw-r--r--fs/f2fs/shrinker.c5
-rw-r--r--fs/f2fs/super.c415
-rw-r--r--fs/f2fs/sysfs.c17
-rw-r--r--fs/f2fs/trace.c5
-rw-r--r--fs/f2fs/trace.h5
-rw-r--r--fs/f2fs/xattr.c5
-rw-r--r--fs/f2fs/xattr.h5
-rw-r--r--fs/fat/fatent.c1
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/fscache/cookie.c31
-rw-r--r--fs/fscache/internal.h1
-rw-r--r--fs/fscache/main.c4
-rw-r--r--fs/gfs2/bmap.c6
-rw-r--r--fs/gfs2/dir.c28
-rw-r--r--fs/gfs2/file.c18
-rw-r--r--fs/gfs2/glock.c17
-rw-r--r--fs/gfs2/incore.h9
-rw-r--r--fs/gfs2/lock_dlm.c10
-rw-r--r--fs/gfs2/log.c11
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/gfs2/ops_fstype.c5
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/gfs2/rgrp.c201
-rw-r--r--fs/gfs2/rgrp.h11
-rw-r--r--fs/gfs2/super.c4
-rw-r--r--fs/gfs2/trans.c15
-rw-r--r--fs/gfs2/util.c16
-rw-r--r--fs/gfs2/util.h2
-rw-r--r--fs/gfs2/xattr.c18
-rw-r--r--fs/ioctl.c2
-rw-r--r--fs/iomap.c2
-rw-r--r--fs/jbd2/checkpoint.c4
-rw-r--r--fs/jffs2/background.c2
-rw-r--r--fs/jffs2/super.c4
-rw-r--r--fs/jfs/acl.c4
-rw-r--r--fs/jfs/inode.c1
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/ocfs2/buffer_head_io.c1
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c4
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/refcounttree.c16
-rw-r--r--fs/orangefs/acl.c4
-rw-r--r--fs/orangefs/inode.c8
-rw-r--r--fs/orangefs/namei.c8
-rw-r--r--fs/orangefs/orangefs-sysfs.c2
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/file.c2
-rw-r--r--fs/overlayfs/inode.c2
-rw-r--r--fs/overlayfs/namei.c2
-rw-r--r--fs/overlayfs/overlayfs.h4
-rw-r--r--fs/overlayfs/util.c3
-rw-r--r--fs/proc/base.c14
-rw-r--r--fs/proc/kcore.c1
-rw-r--r--fs/proc/vmcore.c34
-rw-r--r--fs/pstore/inode.c11
-rw-r--r--fs/pstore/internal.h5
-rw-r--r--fs/pstore/platform.c75
-rw-r--r--fs/pstore/ram.c47
-rw-r--r--fs/pstore/ram_core.c11
-rw-r--r--fs/read_write.c19
-rw-r--r--fs/select.c20
-rw-r--r--fs/signalfd.c6
-rw-r--r--fs/stat.c3
-rw-r--r--fs/timerfd.c12
-rw-r--r--fs/ubifs/super.c11
-rw-r--r--fs/ubifs/xattr.c24
-rw-r--r--fs/utimes.c73
-rw-r--r--fs/xattr.c24
-rw-r--r--fs/xfs/libxfs/xfs_attr.c264
-rw-r--r--fs/xfs/libxfs/xfs_attr.h (renamed from fs/xfs/xfs_attr.h)2
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c10
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c94
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h1
-rw-r--r--fs/xfs/libxfs/xfs_format.h10
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c30
-rw-r--r--fs/xfs/libxfs/xfs_sb.c5
-rw-r--r--fs/xfs/scrub/alloc.c1
-rw-r--r--fs/xfs/scrub/inode.c4
-rw-r--r--fs/xfs/scrub/repair.c128
-rw-r--r--fs/xfs/scrub/scrub.c13
-rw-r--r--fs/xfs/xfs_aops.c4
-rw-r--r--fs/xfs/xfs_aops.h14
-rw-r--r--fs/xfs/xfs_bmap_util.c81
-rw-r--r--fs/xfs/xfs_buf.c109
-rw-r--r--fs/xfs/xfs_buf.h2
-rw-r--r--fs/xfs/xfs_buf_item.c119
-rw-r--r--fs/xfs/xfs_buf_item.h1
-rw-r--r--fs/xfs/xfs_fsops.c50
-rw-r--r--fs/xfs/xfs_inode.c10
-rw-r--r--fs/xfs/xfs_ioctl.c8
-rw-r--r--fs/xfs/xfs_iomap.c53
-rw-r--r--fs/xfs/xfs_iops.c12
-rw-r--r--fs/xfs/xfs_log_recover.c10
-rw-r--r--fs/xfs/xfs_reflink.c362
-rw-r--r--fs/xfs/xfs_reflink.h4
-rw-r--r--fs/xfs/xfs_stats.c52
-rw-r--r--fs/xfs/xfs_stats.h28
-rw-r--r--fs/xfs/xfs_super.c38
-rw-r--r--fs/xfs/xfs_trace.h1
-rw-r--r--fs/xfs/xfs_trans.c10
-rw-r--r--fs/xfs/xfs_trans.h1
-rw-r--r--fs/xfs/xfs_trans_ail.c28
-rw-r--r--fs/xfs/xfs_trans_buf.c141
-rw-r--r--include/acpi/acconfig.h17
-rw-r--r--include/acpi/acexcep.h9
-rw-r--r--include/acpi/acpi_bus.h9
-rw-r--r--include/acpi/acpixf.h9
-rw-r--r--include/acpi/cppc_acpi.h1
-rw-r--r--include/asm-generic/bug.h8
-rw-r--r--include/asm-generic/compat.h24
-rw-r--r--include/asm-generic/dma-mapping.h9
-rw-r--r--include/asm-generic/qrwlock.h7
-rw-r--r--include/asm-generic/qspinlock.h16
-rw-r--r--include/asm-generic/tlb.h86
-rw-r--r--include/asm-generic/unistd.h13
-rw-r--r--include/asm-generic/vmlinux.lds.h42
-rw-r--r--include/crypto/acompress.h38
-rw-r--r--include/crypto/aead.h51
-rw-r--r--include/crypto/akcipher.h76
-rw-r--r--include/crypto/algapi.h14
-rw-r--r--include/crypto/cbc.h2
-rw-r--r--include/crypto/chacha20.h3
-rw-r--r--include/crypto/hash.h38
-rw-r--r--include/crypto/internal/cryptouser.h8
-rw-r--r--include/crypto/internal/geniv.h2
-rw-r--r--include/crypto/kpp.h51
-rw-r--r--include/crypto/mcryptd.h114
-rw-r--r--include/crypto/morus1280_glue.h2
-rw-r--r--include/crypto/morus640_glue.h2
-rw-r--r--include/crypto/null.h2
-rw-r--r--include/crypto/rng.h29
-rw-r--r--include/crypto/skcipher.h118
-rw-r--r--include/crypto/speck.h62
-rw-r--r--include/drm/drm_atomic.h11
-rw-r--r--include/drm/drm_client.h5
-rw-r--r--include/drm/drm_drv.h2
-rw-r--r--include/drm/drm_edid.h6
-rw-r--r--include/drm/drm_panel.h1
-rw-r--r--include/dt-bindings/gpio/meson-g12a-gpio.h114
-rw-r--r--include/dt-bindings/mfd/at91-usart.h17
-rw-r--r--include/dt-bindings/net/mscc-phy-vsc8531.h2
-rw-r--r--include/dt-bindings/phy/phy-ocelot-serdes.h12
-rw-r--r--include/dt-bindings/pinctrl/rzn1-pinctrl.h141
-rw-r--r--include/dt-bindings/reset/imx7-reset.h4
-rw-r--r--include/dt-bindings/usb/pd.h26
-rw-r--r--include/linux/acpi.h11
-rw-r--r--include/linux/adxl.h13
-rw-r--r--include/linux/amba/mmci.h11
-rw-r--r--include/linux/amifd.h63
-rw-r--r--include/linux/amifdreg.h82
-rw-r--r--include/linux/arch_topology.h1
-rw-r--r--include/linux/avf/virtchnl.h17
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/bio.h78
-rw-r--r--include/linux/blk-cgroup.h145
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk-pm.h24
-rw-r--r--include/linux/blk_types.h1
-rw-r--r--include/linux/blkdev.h167
-rw-r--r--include/linux/bpf-cgroup.h55
-rw-r--r--include/linux/bpf.h88
-rw-r--r--include/linux/bpf_types.h8
-rw-r--r--include/linux/bpf_verifier.h46
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/bvec.h3
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/cgroup.h17
-rw-r--r--include/linux/clocksource.h8
-rw-r--r--include/linux/compat.h108
-rw-r--r--include/linux/compat_time.h32
-rw-r--r--include/linux/compiler-gcc.h21
-rw-r--r--include/linux/compiler.h56
-rw-r--r--include/linux/compiler_types.h9
-rw-r--r--include/linux/coredump.h4
-rw-r--r--include/linux/cpufeature.h2
-rw-r--r--include/linux/cpuidle.h11
-rw-r--r--include/linux/crash_dump.h4
-rw-r--r--include/linux/crc-t10dif.h1
-rw-r--r--include/linux/crypto.h110
-rw-r--r--include/linux/debug_locks.h4
-rw-r--r--include/linux/devfreq.h8
-rw-r--r--include/linux/device.h31
-rw-r--r--include/linux/dma-debug.h8
-rw-r--r--include/linux/dma-direct.h8
-rw-r--r--include/linux/dma-mapping.h42
-rw-r--r--include/linux/dma-noncoherent.h27
-rw-r--r--include/linux/dma/sprd-dma.h69
-rw-r--r--include/linux/dns_resolver.h4
-rw-r--r--include/linux/edac.h5
-rw-r--r--include/linux/efi.h51
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/elfcore-compat.h8
-rw-r--r--include/linux/ethtool.h33
-rw-r--r--include/linux/f2fs_fs.h10
-rw-r--r--include/linux/filter.h42
-rw-r--r--include/linux/fpga/fpga-mgr.h20
-rw-r--r--include/linux/fs.h17
-rw-r--r--include/linux/fsl/mc.h6
-rw-r--r--include/linux/fsl_ifc.h2
-rw-r--r--include/linux/genhd.h10
-rw-r--r--include/linux/gpio/consumer.h80
-rw-r--r--include/linux/gpio/driver.h46
-rw-r--r--include/linux/hid.h28
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hugetlb.h14
-rw-r--r--include/linux/hw_random.h3
-rw-r--r--include/linux/hwmon.h2
-rw-r--r--include/linux/ieee80211.h112
-rw-r--r--include/linux/if_tun.h14
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/ipmi.h2
-rw-r--r--include/linux/ipmi_smi.h2
-rw-r--r--include/linux/ipv6.h3
-rw-r--r--include/linux/irq.h3
-rw-r--r--include/linux/irqchip/arm-gic-common.h6
-rw-r--r--include/linux/irqchip/arm-gic-v3.h9
-rw-r--r--include/linux/irqchip/arm-gic.h5
-rw-r--r--include/linux/irqdomain.h1
-rw-r--r--include/linux/jump_label.h65
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/leds.h15
-rw-r--r--include/linux/lightnvm.h166
-rw-r--r--include/linux/linkmode.h76
-rw-r--r--include/linux/lockdep.h7
-rw-r--r--include/linux/lsm_hooks.h16
-rw-r--r--include/linux/memremap.h6
-rw-r--r--include/linux/mfd/cros_ec_commands.h11
-rw-r--r--include/linux/mfd/da9063/pdata.h16
-rw-r--r--include/linux/mfd/ingenic-tcu.h56
-rw-r--r--include/linux/mfd/intel_msic.h7
-rw-r--r--include/linux/mfd/intel_soc_pmic.h13
-rw-r--r--include/linux/mfd/intel_soc_pmic_bxtwc.h10
-rw-r--r--include/linux/mfd/madera/core.h2
-rw-r--r--include/linux/mfd/madera/pdata.h1
-rw-r--r--include/linux/mfd/max14577-private.h11
-rw-r--r--include/linux/mfd/max14577.h11
-rw-r--r--include/linux/mfd/max77686-private.h15
-rw-r--r--include/linux/mfd/max77686.h15
-rw-r--r--include/linux/mfd/max77693-common.h6
-rw-r--r--include/linux/mfd/max77693-private.h15
-rw-r--r--include/linux/mfd/max77693.h15
-rw-r--r--include/linux/mfd/max77843-private.h6
-rw-r--r--include/linux/mfd/max8997-private.h15
-rw-r--r--include/linux/mfd/max8997.h16
-rw-r--r--include/linux/mfd/max8998-private.h15
-rw-r--r--include/linux/mfd/max8998.h15
-rw-r--r--include/linux/mfd/mc13xxx.h1
-rw-r--r--include/linux/mfd/rohm-bd718x7.h372
-rw-r--r--include/linux/mfd/samsung/core.h11
-rw-r--r--include/linux/mfd/samsung/irq.h10
-rw-r--r--include/linux/mfd/samsung/rtc.h15
-rw-r--r--include/linux/mfd/samsung/s2mpa01.h7
-rw-r--r--include/linux/mfd/samsung/s2mps11.h9
-rw-r--r--include/linux/mfd/samsung/s2mps13.h14
-rw-r--r--include/linux/mfd/samsung/s2mps14.h14
-rw-r--r--include/linux/mfd/samsung/s2mps15.h11
-rw-r--r--include/linux/mfd/samsung/s2mpu02.h14
-rw-r--r--include/linux/mfd/samsung/s5m8763.h10
-rw-r--r--include/linux/mfd/samsung/s5m8767.h10
-rw-r--r--include/linux/mfd/ti-lmu.h3
-rw-r--r--include/linux/mfd/tmio.h7
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/mii.h101
-rw-r--r--include/linux/mlx5/cq.h1
-rw-r--r--include/linux/mlx5/device.h10
-rw-r--r--include/linux/mlx5/driver.h84
-rw-r--r--include/linux/mlx5/fs.h40
-rw-r--r--include/linux/mlx5/mlx5_ifc.h264
-rw-r--r--include/linux/mlx5/qp.h1
-rw-r--r--include/linux/mlx5/srq.h1
-rw-r--r--include/linux/mlx5/transobj.h2
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h24
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmzone.h10
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/mroute_base.h11
-rw-r--r--include/linux/msi.h17
-rw-r--r--include/linux/mtd/blktrans.h5
-rw-r--r--include/linux/mtd/jedec.h91
-rw-r--r--include/linux/mtd/nand_bch.h11
-rw-r--r--include/linux/mtd/nand_ecc.h12
-rw-r--r--include/linux/mtd/onfi.h178
-rw-r--r--include/linux/mtd/platnand.h74
-rw-r--r--include/linux/mtd/rawnand.h635
-rw-r--r--include/linux/mtd/spi-nor.h119
-rw-r--r--include/linux/ndctl.h22
-rw-r--r--include/linux/netdevice.h51
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h3
-rw-r--r--include/linux/netfilter/nfnetlink_osf.h3
-rw-r--r--include/linux/netlink.h3
-rw-r--r--include/linux/netpoll.h9
-rw-r--r--include/linux/nvme.h1
-rw-r--r--include/linux/of_device.h3
-rw-r--r--include/linux/pci-dma-compat.h18
-rw-r--r--include/linux/pci-dma.h12
-rw-r--r--include/linux/pci-p2pdma.h114
-rw-r--r--include/linux/pci.h11
-rw-r--r--include/linux/pci_hotplug.h43
-rw-r--r--include/linux/pci_ids.h8
-rw-r--r--include/linux/percpu-refcount.h1
-rw-r--r--include/linux/perf/arm_pmu.h1
-rw-r--r--include/linux/phy.h41
-rw-r--r--include/linux/phy/phy-qcom-ufs.h38
-rw-r--r--include/linux/phy/phy.h2
-rw-r--r--include/linux/platform_data/dma-ep93xx.h2
-rw-r--r--include/linux/platform_data/dma-mcf-edma.h38
-rw-r--r--include/linux/platform_data/ehci-sh.h16
-rw-r--r--include/linux/platform_data/gpio-davinci.h34
-rw-r--r--include/linux/platform_data/gpio-omap.h15
-rw-r--r--include/linux/platform_data/gpio-ts5500.h27
-rw-r--r--include/linux/platform_data/hsmmc-omap.h3
-rw-r--r--include/linux/platform_data/mv_usb.h1
-rw-r--r--include/linux/platform_data/pxa_sdhci.h4
-rw-r--r--include/linux/platform_data/spi-davinci.h4
-rw-r--r--include/linux/platform_device.h1
-rw-r--r--include/linux/pm_domain.h35
-rw-r--r--include/linux/pm_opp.h6
-rw-r--r--include/linux/posix-timers.h2
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/pstore_ram.h3
-rw-r--r--include/linux/ptrace.h38
-rw-r--r--include/linux/pxa2xx_ssp.h3
-rw-r--r--include/linux/qcom-geni-se.h13
-rw-r--r--include/linux/qcom_scm.h4
-rw-r--r--include/linux/qed/common_hsi.h10
-rw-r--r--include/linux/qed/iscsi_common.h2
-rw-r--r--include/linux/qed/qed_if.h35
-rw-r--r--include/linux/qed/qed_rdma_if.h11
-rw-r--r--include/linux/rculist.h32
-rw-r--r--include/linux/rcupdate.h154
-rw-r--r--include/linux/rcupdate_wait.h14
-rw-r--r--include/linux/rcutiny.h53
-rw-r--r--include/linux/rcutree.h31
-rw-r--r--include/linux/regmap.h31
-rw-r--r--include/linux/regulator/driver.h20
-rw-r--r--include/linux/regulator/fixed.h3
-rw-r--r--include/linux/regulator/machine.h6
-rw-r--r--include/linux/restart_block.h4
-rw-r--r--include/linux/rtnetlink.h7
-rw-r--r--include/linux/rwsem.h4
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/sched/signal.h23
-rw-r--r--include/linux/sched/topology.h6
-rw-r--r--include/linux/security.h6
-rw-r--r--include/linux/serial_sci.h1
-rw-r--r--include/linux/signal.h18
-rw-r--r--include/linux/signal_types.h8
-rw-r--r--include/linux/skbuff.h55
-rw-r--r--include/linux/skmsg.h434
-rw-r--r--include/linux/smp.h4
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/spi/spi-mem.h7
-rw-r--r--include/linux/spi/spi.h36
-rw-r--r--include/linux/srcutree.h13
-rw-r--r--include/linux/start_kernel.h2
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/sunrpc/gss_krb5.h30
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/syscalls.h21
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/time32.h78
-rw-r--r--include/linux/timekeeping.h12
-rw-r--r--include/linux/timekeeping32.h53
-rw-r--r--include/linux/torture.h2
-rw-r--r--include/linux/tracehook.h13
-rw-r--r--include/linux/tracepoint-defs.h6
-rw-r--r--include/linux/tracepoint.h36
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/tty_driver.h3
-rw-r--r--include/linux/tty_ldisc.h10
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/umh.h1
-rw-r--r--include/linux/usb/chipidea.h6
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/usbnet.h2
-rw-r--r--include/linux/vga_switcheroo.h3
-rw-r--r--include/linux/virtio_net.h18
-rw-r--r--include/linux/wait.h20
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/media/v4l2-fh.h4
-rw-r--r--include/net/act_api.h15
-rw-r--r--include/net/addrconf.h5
-rw-r--r--include/net/af_rxrpc.h4
-rw-r--r--include/net/af_unix.h4
-rw-r--r--include/net/bluetooth/hci.h14
-rw-r--r--include/net/bluetooth/hci_core.h17
-rw-r--r--include/net/bluetooth/l2cap.h22
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/cfg80211.h79
-rw-r--r--include/net/devlink.h33
-rw-r--r--include/net/dsa.h1
-rw-r--r--include/net/dst.h10
-rw-r--r--include/net/gen_stats.h4
-rw-r--r--include/net/genetlink.h2
-rw-r--r--include/net/ieee80211_radiotap.h21
-rw-r--r--include/net/inet_ecn.h18
-rw-r--r--include/net/inet_sock.h6
-rw-r--r--include/net/ip.h31
-rw-r--r--include/net/ip6_fib.h27
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/ip_fib.h17
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/iucv/af_iucv.h5
-rw-r--r--include/net/llc.h1
-rw-r--r--include/net/mac80211.h127
-rw-r--r--include/net/neighbour.h16
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h13
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h13
-rw-r--r--include/net/netfilter/nf_conntrack_core.h3
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h36
-rw-r--r--include/net/netfilter/nf_flow_table.h2
-rw-r--r--include/net/netfilter/nf_tables.h9
-rw-r--r--include/net/netfilter/nf_tables_core.h4
-rw-r--r--include/net/netfilter/nfnetlink_log.h1
-rw-r--r--include/net/netlink.h161
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/nfc/hci.h2
-rw-r--r--include/net/pkt_cls.h12
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/net/route.h5
-rw-r--r--include/net/rtnetlink.h1
-rw-r--r--include/net/sch_generic.h54
-rw-r--r--include/net/sctp/constants.h5
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/sock.h22
-rw-r--r--include/net/switchdev.h7
-rw-r--r--include/net/tcp.h91
-rw-r--r--include/net/tls.h98
-rw-r--r--include/net/udp.h2
-rw-r--r--include/net/vxlan.h65
-rw-r--r--include/net/xdp.h6
-rw-r--r--include/net/xdp_sock.h127
-rw-r--r--include/rdma/ib_addr.h11
-rw-r--r--include/rdma/ib_cm.h2
-rw-r--r--include/rdma/ib_sa.h38
-rw-r--r--include/rdma/ib_umem.h9
-rw-r--r--include/rdma/ib_umem_odp.h75
-rw-r--r--include/rdma/ib_verbs.h179
-rw-r--r--include/rdma/rdma_cm.h11
-rw-r--r--include/rdma/rdma_netlink.h4
-rw-r--r--include/rdma/rdma_vt.h51
-rw-r--r--include/rdma/rdmavt_qp.h7
-rw-r--r--include/rdma/restrack.h12
-rw-r--r--include/rdma/uverbs_ioctl.h111
-rw-r--r--include/rdma/uverbs_std_types.h51
-rw-r--r--include/soc/fsl/bman.h8
-rw-r--r--include/soc/fsl/dpaa2-fd.h242
-rw-r--r--include/soc/fsl/dpaa2-global.h15
-rw-r--r--include/soc/fsl/dpaa2-io.h4
-rw-r--r--include/soc/fsl/qe/ucc_fast.h8
-rw-r--r--include/soc/fsl/qman.h8
-rw-r--r--include/soc/mscc/ocelot_hsio.h (renamed from drivers/net/ethernet/mscc/ocelot_hsio.h)74
-rw-r--r--include/sound/hda_codec.h (renamed from sound/pci/hda/hda_codec.h)0
-rw-r--r--include/sound/hdaudio.h1
-rw-r--r--include/sound/memalloc.h3
-rw-r--r--include/sound/rawmidi.h1
-rw-r--r--include/sound/simple_card_utils.h27
-rw-r--r--include/sound/soc-acpi-intel-match.h6
-rw-r--r--include/sound/soc-dapm.h8
-rw-r--r--include/sound/soc-dpcm.h10
-rw-r--r--include/sound/soc.h45
-rw-r--r--include/target/iscsi/iscsi_target_core.h6
-rw-r--r--include/target/iscsi/iscsi_target_stat.h4
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/btrfs.h36
-rw-r--r--include/trace/events/ext4.h99
-rw-r--r--include/trace/events/hwmon.h71
-rw-r--r--include/trace/events/kyber.h96
-rw-r--r--include/trace/events/migrate.h27
-rw-r--r--include/trace/events/rcu.h25
-rw-r--r--include/trace/events/rxrpc.h5
-rw-r--r--include/trace/events/sched.h11
-rw-r--r--include/trace/events/signal.h7
-rw-r--r--include/trace/events/tcp.h7
-rw-r--r--include/uapi/asm-generic/hugetlb_encode.h2
-rw-r--r--include/uapi/asm-generic/siginfo.h193
-rw-r--r--include/uapi/asm-generic/unistd.h2
-rw-r--r--include/uapi/linux/bpf.h168
-rw-r--r--include/uapi/linux/cryptouser.h52
-rw-r--r--include/uapi/linux/dns_resolver.h116
-rw-r--r--include/uapi/linux/ethtool.h15
-rw-r--r--include/uapi/linux/firewire-cdev.h22
-rw-r--r--include/uapi/linux/fs.h4
-rw-r--r--include/uapi/linux/gen_stats.h1
-rw-r--r--include/uapi/linux/gpio.h2
-rw-r--r--include/uapi/linux/if_addr.h1
-rw-r--r--include/uapi/linux/if_arp.h18
-rw-r--r--include/uapi/linux/if_fddi.h21
-rw-r--r--include/uapi/linux/if_link.h3
-rw-r--r--include/uapi/linux/if_packet.h1
-rw-r--r--include/uapi/linux/in6.h1
-rw-r--r--include/uapi/linux/input-event-codes.h18
-rw-r--r--include/uapi/linux/keyctl.h7
-rw-r--r--include/uapi/linux/kvm.h27
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/memfd.h2
-rw-r--r--include/uapi/linux/mman.h2
-rw-r--r--include/uapi/linux/ncsi.h6
-rw-r--r--include/uapi/linux/ndctl.h52
-rw-r--r--include/uapi/linux/neighbour.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h56
-rw-r--r--include/uapi/linux/netfilter/xt_cgroup.h16
-rw-r--r--include/uapi/linux/netlink.h1
-rw-r--r--include/uapi/linux/nl80211.h124
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/pkt_cls.h2
-rw-r--r--include/uapi/linux/pkt_sched.h52
-rw-r--r--include/uapi/linux/sctp.h1
-rw-r--r--include/uapi/linux/shm.h2
-rw-r--r--include/uapi/linux/smc_diag.h25
-rw-r--r--include/uapi/linux/udp.h1
-rw-r--r--include/uapi/linux/usb/tmc.h41
-rw-r--r--include/uapi/linux/usb/video.h304
-rw-r--r--include/uapi/linux/vfio.h2
-rw-r--r--include/uapi/rdma/ib_user_verbs.h20
-rw-r--r--include/uapi/rdma/mlx5-abi.h16
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h21
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h12
-rw-r--r--include/uapi/rdma/rdma_netlink.h3
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h7
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h106
-rw-r--r--include/uapi/sound/asound.h2
-rw-r--r--include/uapi/sound/skl-tplg-interface.h106
-rw-r--r--include/xen/events.h2
-rw-r--r--include/xen/interface/memory.h6
-rw-r--r--include/xen/xen-ops.h133
-rw-r--r--include/xen/xen.h4
-rw-r--r--init/Kconfig5
-rw-r--r--init/main.c10
-rw-r--r--ipc/mqueue.c10
-rw-r--r--ipc/msg.c6
-rw-r--r--ipc/sem.c10
-rw-r--r--ipc/shm.c8
-rw-r--r--ipc/syscall.c2
-rw-r--r--ipc/util.h2
-rw-r--r--kernel/bpf/Makefile7
-rw-r--r--kernel/bpf/arraymap.c51
-rw-r--r--kernel/bpf/btf.c5
-rw-r--r--kernel/bpf/cgroup.c82
-rw-r--r--kernel/bpf/core.c5
-rw-r--r--kernel/bpf/hashtab.c31
-rw-r--r--kernel/bpf/helpers.c68
-rw-r--r--kernel/bpf/local_storage.c174
-rw-r--r--kernel/bpf/map_in_map.c3
-rw-r--r--kernel/bpf/offload.c18
-rw-r--r--kernel/bpf/queue_stack_maps.c288
-rw-r--r--kernel/bpf/sockmap.c2580
-rw-r--r--kernel/bpf/stackmap.c4
-rw-r--r--kernel/bpf/syscall.c148
-rw-r--r--kernel/bpf/verifier.c985
-rw-r--r--kernel/bpf/xskmap.c12
-rw-r--r--kernel/cgroup/cgroup.c73
-rw-r--r--kernel/compat.c8
-rw-r--r--kernel/cpu.c40
-rw-r--r--kernel/dma/Kconfig19
-rw-r--r--kernel/dma/Makefile1
-rw-r--r--kernel/dma/contiguous.c6
-rw-r--r--kernel/dma/debug.c16
-rw-r--r--kernel/dma/direct.c222
-rw-r--r--kernel/dma/mapping.c71
-rw-r--r--kernel/dma/noncoherent.c106
-rw-r--r--kernel/events/core.c17
-rw-r--r--kernel/events/ring_buffer.c14
-rw-r--r--kernel/events/uprobes.c4
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/futex_compat.c2
-rw-r--r--kernel/irq/irqdomain.c5
-rw-r--r--kernel/irq/manage.c8
-rw-r--r--kernel/irq/matrix.c82
-rw-r--r--kernel/jump_label.c107
-rw-r--r--kernel/kexec_core.c6
-rw-r--r--kernel/kprobes.c39
-rw-r--r--kernel/locking/lockdep.c116
-rw-r--r--kernel/locking/lockdep_internals.h27
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/locking/qspinlock.c143
-rw-r--r--kernel/locking/qspinlock_paravirt.h4
-rw-r--r--kernel/locking/qspinlock_stat.h6
-rw-r--r--kernel/locking/rtmutex.c4
-rw-r--r--kernel/locking/rwsem-xadd.c15
-rw-r--r--kernel/locking/rwsem.c7
-rw-r--r--kernel/locking/rwsem.h95
-rw-r--r--kernel/locking/test-ww_mutex.c10
-rw-r--r--kernel/module.c9
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/suspend.c6
-rw-r--r--kernel/printk/printk.c86
-rw-r--r--kernel/ptrace.c36
-rw-r--r--kernel/rcu/Kconfig14
-rw-r--r--kernel/rcu/rcu.h67
-rw-r--r--kernel/rcu/rcuperf.c66
-rw-r--r--kernel/rcu/rcutorture.c397
-rw-r--r--kernel/rcu/srcutiny.c29
-rw-r--r--kernel/rcu/srcutree.c31
-rw-r--r--kernel/rcu/tiny.c154
-rw-r--r--kernel/rcu/tree.c2213
-rw-r--r--kernel/rcu/tree.h132
-rw-r--r--kernel/rcu/tree_exp.h426
-rw-r--r--kernel/rcu/tree_plugin.h790
-rw-r--r--kernel/rcu/update.c70
-rw-r--r--kernel/reboot.c1
-rw-r--r--kernel/resource.c141
-rw-r--r--kernel/sched/core.c27
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c315
-rw-r--r--kernel/sched/features.h2
-rw-r--r--kernel/sched/idle.c15
-rw-r--r--kernel/sched/pelt.c8
-rw-r--r--kernel/sched/pelt.h2
-rw-r--r--kernel/sched/sched.h29
-rw-r--r--kernel/sched/topology.c106
-rw-r--r--kernel/seccomp.c10
-rw-r--r--kernel/signal.c454
-rw-r--r--kernel/smp.c19
-rw-r--r--kernel/softirq.c9
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/time/Kconfig4
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/hrtimer.c8
-rw-r--r--kernel/time/posix-stubs.c18
-rw-r--r--kernel/time/posix-timers.c32
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/time.c97
-rw-r--r--kernel/time/timekeeping.c24
-rw-r--r--kernel/torture.c3
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/trace/preemptirq_delay_test.c10
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace_events_hist.c32
-rw-r--r--kernel/tracepoint.c24
-rw-r--r--kernel/umh.c16
-rw-r--r--kernel/up.c14
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug6
-rw-r--r--lib/Kconfig.kasan9
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bch.c17
-rw-r--r--lib/chacha20.c6
-rw-r--r--lib/crc-t10dif.c57
-rw-r--r--lib/crc32.c11
-rw-r--r--lib/debug_locks.c6
-rw-r--r--lib/nlattr.c269
-rw-r--r--lib/percpu-refcount.c28
-rw-r--r--lib/test_bpf.c1
-rw-r--r--lib/test_ida.c4
-rw-r--r--lib/udivmoddi4.c310
-rw-r--r--lib/umoddi3.c32
-rw-r--r--lib/vsprintf.c238
-rw-r--r--lib/xz/xz_crc32.c1
-rw-r--r--lib/xz/xz_private.h4
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/Makefile6
-rw-r--r--mm/gup_benchmark.c3
-rw-r--r--mm/huge_memory.c18
-rw-r--r--mm/hugetlb.c90
-rw-r--r--mm/maccess.c6
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memory.c247
-rw-r--r--mm/migrate.c62
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/mmu_gather.c261
-rw-r--r--mm/mremap.c30
-rw-r--r--mm/oom_kill.c4
-rw-r--r--mm/page_alloc.c12
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/percpu.c1
-rw-r--r--mm/pgtable-generic.c1
-rw-r--r--mm/rmap.c42
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/vmscan.c18
-rw-r--r--mm/vmstat.c4
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/Kconfig11
-rw-r--r--net/atm/common.c2
-rw-r--r--net/batman-adv/Kconfig11
-rw-r--r--net/batman-adv/Makefile3
-rw-r--r--net/batman-adv/bat_iv_ogm.c330
-rw-r--r--net/batman-adv/bat_v_elp.c10
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c10
-rw-r--r--net/batman-adv/debugfs.c37
-rw-r--r--net/batman-adv/debugfs.h6
-rw-r--r--net/batman-adv/gateway_client.c11
-rw-r--r--net/batman-adv/hard-interface.c47
-rw-r--r--net/batman-adv/icmp_socket.c3
-rw-r--r--net/batman-adv/log.c20
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/network-coding.c27
-rw-r--r--net/batman-adv/originator.c107
-rw-r--r--net/batman-adv/originator.h4
-rw-r--r--net/batman-adv/soft-interface.c27
-rw-r--r--net/batman-adv/sysfs.c30
-rw-r--r--net/batman-adv/trace.c22
-rw-r--r--net/batman-adv/trace.h78
-rw-r--r--net/batman-adv/translation-table.c6
-rw-r--r--net/batman-adv/tvlv.c8
-rw-r--r--net/batman-adv/types.h62
-rw-r--r--net/bluetooth/bnep/core.c7
-rw-r--r--net/bluetooth/bnep/sock.c19
-rw-r--r--net/bluetooth/cmtp/core.c14
-rw-r--r--net/bluetooth/cmtp/sock.c19
-rw-r--r--net/bluetooth/hci_core.c65
-rw-r--r--net/bluetooth/hci_event.c85
-rw-r--r--net/bluetooth/hidp/core.c23
-rw-r--r--net/bluetooth/hidp/hidp.h2
-rw-r--r--net/bluetooth/hidp/sock.c79
-rw-r--r--net/bluetooth/l2cap_core.c102
-rw-r--r--net/bluetooth/mgmt.c7
-rw-r--r--net/bluetooth/rfcomm/tty.c12
-rw-r--r--net/bluetooth/smp.c68
-rw-r--r--net/bluetooth/smp.h3
-rw-r--r--net/bpf/test_run.c35
-rw-r--r--net/bpfilter/bpfilter_kern.c11
-rw-r--r--net/bridge/Kconfig2
-rw-r--r--net/bridge/br.c20
-rw-r--r--net/bridge/br_arp_nd_proxy.c15
-rw-r--r--net/bridge/br_device.c8
-rw-r--r--net/bridge/br_fdb.c24
-rw-r--r--net/bridge/br_if.c9
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mdb.c36
-rw-r--r--net/bridge/br_multicast.c55
-rw-r--r--net/bridge/br_netfilter_hooks.c10
-rw-r--r--net/bridge/br_netlink.c43
-rw-r--r--net/bridge/br_private.h71
-rw-r--r--net/bridge/br_switchdev.c9
-rw-r--r--net/bridge/br_sysfs_br.c49
-rw-r--r--net/bridge/br_vlan.c88
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/caif/cfrfml.c3
-rw-r--r--net/ceph/crypto.c12
-rw-r--r--net/ceph/crypto.h2
-rw-r--r--net/compat.c10
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c68
-rw-r--r--net/core/devlink.c71
-rw-r--r--net/core/ethtool.c202
-rw-r--r--net/core/fib_rules.c36
-rw-r--r--net/core/filter.c806
-rw-r--r--net/core/flow_dissector.c150
-rw-r--r--net/core/gen_stats.c73
-rw-r--r--net/core/link_watch.c2
-rw-r--r--net/core/neighbour.c222
-rw-r--r--net/core/net_namespace.c6
-rw-r--r--net/core/netclassid_cgroup.c1
-rw-r--r--net/core/netpoll.c60
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c378
-rw-r--r--net/core/skbuff.c75
-rw-r--r--net/core/skmsg.c802
-rw-r--r--net/core/sock.c74
-rw-r--r--net/core/sock_map.c1003
-rw-r--r--net/core/xdp.c53
-rw-r--r--net/dccp/input.c4
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/dns_resolver/dns_key.c67
-rw-r--r--net/dns_resolver/dns_query.c5
-rw-r--r--net/dsa/Kconfig3
-rw-r--r--net/dsa/Makefile1
-rw-r--r--net/dsa/dsa.c49
-rw-r--r--net/dsa/dsa_priv.h4
-rw-r--r--net/dsa/legacy.c9
-rw-r--r--net/dsa/slave.c31
-rw-r--r--net/dsa/tag_gswip.c109
-rw-r--r--net/ieee802154/6lowpan/reassembly.c3
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/ah4.c4
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/cipso_ipv4.c11
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c202
-rw-r--r--net/ipv4/esp4.c11
-rw-r--r--net/ipv4/fib_frontend.c154
-rw-r--r--net/ipv4/fib_semantics.c87
-rw-r--r--net/ipv4/fib_trie.c37
-rw-r--r--net/ipv4/gre_demux.c7
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/ip_fragment.c27
-rw-r--r--net/ipv4/ip_gre.c15
-rw-r--r--net/ipv4/ip_input.c6
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/ip_tunnel.c9
-rw-r--r--net/ipv4/ip_vti.c4
-rw-r--r--net/ipv4/ipcomp.c4
-rw-r--r--net/ipv4/ipip.c5
-rw-r--r--net/ipv4/ipmr.c57
-rw-r--r--net/ipv4/ipmr_base.c125
-rw-r--r--net/ipv4/metrics.c30
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c17
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_masquerade_ipv4.c22
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic_main.c1
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c27
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c55
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp.c56
-rw-r--r--net/ipv4/tcp_bbr.c90
-rw-r--r--net/ipv4/tcp_bpf.c668
-rw-r--r--net/ipv4/tcp_cdg.c2
-rw-r--r--net/ipv4/tcp_dctcp.c55
-rw-r--r--net/ipv4/tcp_dctcp.h40
-rw-r--r--net/ipv4/tcp_input.c63
-rw-r--r--net/ipv4/tcp_ipv4.c8
-rw-r--r--net/ipv4/tcp_output.c162
-rw-r--r--net/ipv4/tcp_rate.c15
-rw-r--r--net/ipv4/tcp_recovery.c5
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/tcp_ulp.c75
-rw-r--r--net/ipv4/udp.c55
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv4/xfrm4_input.c1
-rw-r--r--net/ipv4/xfrm4_mode_transport.c4
-rw-r--r--net/ipv6/addrconf.c291
-rw-r--r--net/ipv6/addrlabel.c34
-rw-r--r--net/ipv6/af_inet6.c8
-rw-r--r--net/ipv6/esp6.c7
-rw-r--r--net/ipv6/ip6_fib.c70
-rw-r--r--net/ipv6/ip6_gre.c26
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/ipv6/ip6_offload.c1
-rw-r--r--net/ipv6/ip6_output.c8
-rw-r--r--net/ipv6/ip6_tunnel.c23
-rw-r--r--net/ipv6/ip6mr.c74
-rw-r--r--net/ipv6/ipv6_sockglue.c11
-rw-r--r--net/ipv6/mcast.c18
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c5
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c10
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/netfilter/nf_nat_masquerade_ipv6.c19
-rw-r--r--net/ipv6/raw.c29
-rw-r--r--net/ipv6/reassembly.c14
-rw-r--r--net/ipv6/route.c303
-rw-r--r--net/ipv6/sit.c6
-rw-r--r--net/ipv6/udp.c65
-rw-r--r--net/ipv6/udp_offload.c2
-rw-r--r--net/ipv6/xfrm6_input.c1
-rw-r--r--net/ipv6/xfrm6_mode_transport.c4
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/iucv/af_iucv.c46
-rw-r--r--net/llc/af_llc.c11
-rw-r--r--net/llc/llc_conn.c1
-rw-r--r--net/llc/llc_core.c4
-rw-r--r--net/mac80211/Kconfig17
-rw-r--r--net/mac80211/Makefile11
-rw-r--r--net/mac80211/cfg.c144
-rw-r--r--net/mac80211/debugfs.c4
-rw-r--r--net/mac80211/debugfs_sta.c364
-rw-r--r--net/mac80211/driver-ops.h26
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/ieee80211_i.h11
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/key.c111
-rw-r--r--net/mac80211/main.c78
-rw-r--r--net/mac80211/mesh.c5
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_hwmp.c9
-rw-r--r--net/mac80211/mlme.c130
-rw-r--r--net/mac80211/rate.h13
-rw-r--r--net/mac80211/rc80211_minstrel.c162
-rw-r--r--net/mac80211/rc80211_minstrel.h35
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c68
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c298
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h20
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c58
-rw-r--r--net/mac80211/rx.c55
-rw-r--r--net/mac80211/spectmgmt.c5
-rw-r--r--net/mac80211/sta_info.c27
-rw-r--r--net/mac80211/status.c30
-rw-r--r--net/mac80211/tdls.c8
-rw-r--r--net/mac80211/trace.h23
-rw-r--r--net/mac80211/tx.c81
-rw-r--r--net/mac80211/util.c166
-rw-r--r--net/mac80211/vht.c20
-rw-r--r--net/mac802154/llsec.c16
-rw-r--r--net/mac802154/llsec.h2
-rw-r--r--net/mpls/af_mpls.c138
-rw-r--r--net/ncsi/Kconfig6
-rw-r--r--net/ncsi/internal.h21
-rw-r--r--net/ncsi/ncsi-cmd.c38
-rw-r--r--net/ncsi/ncsi-manage.c98
-rw-r--r--net/ncsi/ncsi-netlink.c205
-rw-r--r--net/ncsi/ncsi-netlink.h12
-rw-r--r--net/ncsi/ncsi-pkt.h22
-rw-r--r--net/ncsi/ncsi-rsp.c150
-rw-r--r--net/netfilter/Kconfig7
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c105
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c73
-rw-r--r--net/netfilter/nf_conntrack_proto.c117
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c155
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c28
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c44
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c78
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c80
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c253
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c255
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c236
-rw-r--r--net/netfilter/nf_conntrack_standalone.c9
-rw-r--r--net/netfilter/nf_flow_table_core.c52
-rw-r--r--net/netfilter/nf_flow_table_ip.c6
-rw-r--r--net/netfilter/nf_nat_helper.c4
-rw-r--r--net/netfilter/nf_nat_redirect.c4
-rw-r--r--net/netfilter/nf_tables_api.c123
-rw-r--r--net/netfilter/nf_tables_core.c28
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c61
-rw-r--r--net/netfilter/nfnetlink_osf.c46
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netfilter/nft_cmp.c6
-rw-r--r--net/netfilter/nft_compat.c24
-rw-r--r--net/netfilter/nft_ct.c22
-rw-r--r--net/netfilter/nft_dup_netdev.c2
-rw-r--r--net/netfilter/nft_dynset.c21
-rw-r--r--net/netfilter/nft_flow_offload.c2
-rw-r--r--net/netfilter/nft_fwd_netdev.c4
-rw-r--r--net/netfilter/nft_lookup.c20
-rw-r--r--net/netfilter/nft_meta.c116
-rw-r--r--net/netfilter/nft_objref.c20
-rw-r--r--net/netfilter/nft_osf.c27
-rw-r--r--net/netfilter/nft_reject.c6
-rw-r--r--net/netfilter/nft_rt.c11
-rw-r--r--net/netfilter/nft_set_hash.c38
-rw-r--r--net/netfilter/nft_set_rbtree.c38
-rw-r--r--net/netfilter/nft_xfrm.c294
-rw-r--r--net/netfilter/xt_CT.c2
-rw-r--r--net/netfilter/xt_IDLETIMER.c4
-rw-r--r--net/netfilter/xt_SECMARK.c2
-rw-r--r--net/netfilter/xt_TEE.c76
-rw-r--r--net/netfilter/xt_cgroup.c72
-rw-r--r--net/netfilter/xt_nat.c2
-rw-r--r--net/netfilter/xt_osf.c8
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/netlabel/netlabel_unlabeled.c3
-rw-r--r--net/netlink/af_netlink.c47
-rw-r--r--net/netlink/af_netlink.h1
-rw-r--r--net/nfc/hci/core.c10
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/nfc/nci/uart.c7
-rw-r--r--net/openvswitch/conntrack.c18
-rw-r--r--net/openvswitch/datapath.c20
-rw-r--r--net/openvswitch/flow.c22
-rw-r--r--net/openvswitch/vport-internal_dev.c5
-rw-r--r--net/packet/af_packet.c28
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rds/recv.c19
-rw-r--r--net/rds/send.c13
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rxrpc/af_rxrpc.c19
-rw-r--r--net/rxrpc/ar-internal.h62
-rw-r--r--net/rxrpc/call_accept.c68
-rw-r--r--net/rxrpc/call_object.c7
-rw-r--r--net/rxrpc/conn_client.c14
-rw-r--r--net/rxrpc/conn_event.c26
-rw-r--r--net/rxrpc/conn_object.c21
-rw-r--r--net/rxrpc/input.c331
-rw-r--r--net/rxrpc/local_event.c2
-rw-r--r--net/rxrpc/local_object.c62
-rw-r--r--net/rxrpc/net_ns.c3
-rw-r--r--net/rxrpc/output.c67
-rw-r--r--net/rxrpc/peer_event.c64
-rw-r--r--net/rxrpc/peer_object.c77
-rw-r--r--net/rxrpc/proc.c126
-rw-r--r--net/rxrpc/protocol.h15
-rw-r--r--net/rxrpc/recvmsg.c43
-rw-r--r--net/rxrpc/rxkad.c44
-rw-r--r--net/rxrpc/skbuff.c15
-rw-r--r--net/rxrpc/utils.c23
-rw-r--r--net/sched/Kconfig11
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_api.c80
-rw-r--r--net/sched/act_bpf.c3
-rw-r--r--net/sched/act_connmark.c14
-rw-r--r--net/sched/act_csum.c3
-rw-r--r--net/sched/act_gact.c14
-rw-r--r--net/sched/act_ife.c3
-rw-r--r--net/sched/act_ipt.c8
-rw-r--r--net/sched/act_mirred.c8
-rw-r--r--net/sched/act_nat.c18
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c199
-rw-r--r--net/sched/act_sample.c5
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c26
-rw-r--r--net/sched/act_skbmod.c3
-rw-r--r--net/sched/act_tunnel_key.c3
-rw-r--r--net/sched/act_vlan.c3
-rw-r--r--net/sched/cls_api.c262
-rw-r--r--net/sched/cls_flower.c7
-rw-r--r--net/sched/cls_u32.c127
-rw-r--r--net/sched/sch_api.c58
-rw-r--r--net/sched/sch_atm.c2
-rw-r--r--net/sched/sch_cake.c8
-rw-r--r--net/sched/sch_cbq.c2
-rw-r--r--net/sched/sch_cbs.c2
-rw-r--r--net/sched/sch_drr.c4
-rw-r--r--net/sched/sch_dsmark.c2
-rw-r--r--net/sched/sch_fifo.c2
-rw-r--r--net/sched/sch_fq.c103
-rw-r--r--net/sched/sch_fq_codel.c2
-rw-r--r--net/sched/sch_generic.c66
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_hhf.c2
-rw-r--r--net/sched/sch_htb.c116
-rw-r--r--net/sched/sch_mq.c4
-rw-r--r--net/sched/sch_mqprio.c4
-rw-r--r--net/sched/sch_multiq.c6
-rw-r--r--net/sched/sch_netem.c16
-rw-r--r--net/sched/sch_pie.c36
-rw-r--r--net/sched/sch_prio.c6
-rw-r--r--net/sched/sch_qfq.c4
-rw-r--r--net/sched/sch_red.c4
-rw-r--r--net/sched/sch_sfb.c4
-rw-r--r--net/sched/sch_taprio.c962
-rw-r--r--net/sched/sch_tbf.c6
-rw-r--r--net/sctp/associola.c3
-rw-r--r--net/sctp/input.c1
-rw-r--r--net/sctp/output.c6
-rw-r--r--net/sctp/outqueue.c10
-rw-r--r--net/sctp/socket.c76
-rw-r--r--net/sctp/transport.c12
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/smc/af_smc.c28
-rw-r--r--net/smc/smc_clc.c14
-rw-r--r--net/smc/smc_close.c14
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/socket.c53
-rw-r--r--net/strparser/Kconfig4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c87
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_keys.c9
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c53
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c18
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c20
-rw-r--r--net/tipc/bearer.c14
-rw-r--r--net/tipc/group.c1
-rw-r--r--net/tipc/link.c69
-rw-r--r--net/tipc/link.h3
-rw-r--r--net/tipc/msg.c78
-rw-r--r--net/tipc/msg.h11
-rw-r--r--net/tipc/name_distr.c14
-rw-r--r--net/tipc/name_table.c1
-rw-r--r--net/tipc/name_table.h1
-rw-r--r--net/tipc/node.c14
-rw-r--r--net/tipc/node.h12
-rw-r--r--net/tipc/socket.c227
-rw-r--r--net/tipc/topsrv.c12
-rw-r--r--net/tipc/udp_media.c18
-rw-r--r--net/tls/Kconfig1
-rw-r--r--net/tls/tls_device.c8
-rw-r--r--net/tls/tls_device_fallback.c2
-rw-r--r--net/tls/tls_main.c89
-rw-r--r--net/tls/tls_sw.c1397
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/wireless/core.c83
-rw-r--r--net/wireless/core.h14
-rw-r--r--net/wireless/lib80211_crypt_tkip.c59
-rw-r--r--net/wireless/lib80211_crypt_wep.c52
-rw-r--r--net/wireless/nl80211.c860
-rw-r--r--net/wireless/rdev-ops.h15
-rw-r--r--net/wireless/reg.c129
-rw-r--r--net/wireless/scan.c58
-rw-r--r--net/wireless/trace.h235
-rw-r--r--net/wireless/util.c160
-rw-r--r--net/wireless/wext-compat.c14
-rw-r--r--net/xdp/xdp_umem.c106
-rw-r--r--net/xdp/xdp_umem.h12
-rw-r--r--net/xdp/xdp_umem_props.h14
-rw-r--r--net/xdp/xsk.c56
-rw-r--r--net/xdp/xsk_queue.c60
-rw-r--r--net/xdp/xsk_queue.h16
-rw-r--r--net/xfrm/xfrm_device.c8
-rw-r--r--net/xfrm/xfrm_hash.h5
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_interface.c12
-rw-r--r--net/xfrm/xfrm_output.c6
-rw-r--r--net/xfrm/xfrm_policy.c12
-rw-r--r--net/xfrm/xfrm_user.c17
-rw-r--r--samples/Kconfig1
-rw-r--r--samples/bpf/Makefile1
-rw-r--r--samples/bpf/bpf_load.c1
-rw-r--r--samples/bpf/sampleip_user.c1
-rw-r--r--samples/bpf/sockex2_kern.c11
-rw-r--r--samples/bpf/sockex3_kern.c8
-rw-r--r--samples/bpf/sockex3_user.c4
-rw-r--r--samples/bpf/tcp_tos_reflect_kern.c87
-rw-r--r--samples/bpf/test_cgrp2_attach2.c19
-rw-r--r--samples/bpf/test_current_task_under_cgroup_user.c1
-rw-r--r--samples/bpf/tracex3_user.c2
-rw-r--r--samples/bpf/xdpsock_kern.c2
-rw-r--r--samples/bpf/xdpsock_user.c15
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/Makefile.build2
-rwxr-xr-xscripts/check_00index.sh67
-rwxr-xr-xscripts/extract-vmlinux6
-rwxr-xr-xscripts/kernel-doc8
-rw-r--r--scripts/mod/Makefile2
-rw-r--r--scripts/subarch.include13
-rw-r--r--security/apparmor/lsm.c7
-rw-r--r--security/commoncap.c3
-rw-r--r--security/integrity/digsig.c10
-rw-r--r--security/integrity/evm/evm_crypto.c4
-rw-r--r--security/integrity/iint.c6
-rw-r--r--security/integrity/ima/ima.h2
-rw-r--r--security/integrity/ima/ima_api.c3
-rw-r--r--security/integrity/ima/ima_crypto.c54
-rw-r--r--security/integrity/ima/ima_fs.c9
-rw-r--r--security/integrity/ima/ima_init.c2
-rw-r--r--security/integrity/ima/ima_main.c2
-rw-r--r--security/integrity/ima/ima_template.c11
-rw-r--r--security/keys/dh.c2
-rw-r--r--security/loadpin/Kconfig4
-rw-r--r--security/loadpin/loadpin.c26
-rw-r--r--security/security.c45
-rw-r--r--security/selinux/hooks.c12
-rw-r--r--security/selinux/ss/mls.c178
-rw-r--r--security/selinux/ss/mls.h2
-rw-r--r--security/selinux/ss/policydb.c2
-rw-r--r--security/selinux/ss/services.c12
-rw-r--r--security/smack/smack_lsm.c22
-rw-r--r--security/smack/smackfs.c3
-rw-r--r--security/tomoyo/common.c3
-rw-r--r--security/tomoyo/tomoyo.c5
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c15
-rw-r--r--sound/arm/Kconfig1
-rw-r--r--sound/core/memalloc.c41
-rw-r--r--sound/core/oss/pcm_plugin.c4
-rw-r--r--sound/core/pcm_lib.c21
-rw-r--r--sound/core/rawmidi.c22
-rw-r--r--sound/core/seq/oss/seq_oss_timer.c2
-rw-r--r--sound/core/seq/seq_system.c22
-rw-r--r--sound/core/seq/seq_virmidi.c4
-rw-r--r--sound/core/sgbuf.c15
-rw-r--r--sound/firewire/Kconfig2
-rw-r--r--sound/firewire/amdtp-stream.c78
-rw-r--r--sound/firewire/bebob/bebob.c56
-rw-r--r--sound/firewire/bebob/bebob_maudio.c33
-rw-r--r--sound/firewire/dice/dice.c41
-rw-r--r--sound/firewire/digi00x/digi00x.c34
-rw-r--r--sound/firewire/fireface/ff-protocol-ff400.c9
-rw-r--r--sound/firewire/fireface/ff.c36
-rw-r--r--sound/firewire/fireworks/fireworks.c67
-rw-r--r--sound/firewire/isight.c18
-rw-r--r--sound/firewire/motu/motu.c47
-rw-r--r--sound/firewire/oxfw/oxfw-scs1x.c5
-rw-r--r--sound/firewire/oxfw/oxfw-spkr.c5
-rw-r--r--sound/firewire/oxfw/oxfw-stream.c13
-rw-r--r--sound/firewire/oxfw/oxfw.c53
-rw-r--r--sound/firewire/tascam/tascam.c39
-rw-r--r--sound/hda/ext/hdac_ext_controller.c22
-rw-r--r--sound/hda/hdac_controller.c15
-rw-r--r--sound/hda/hdac_i915.c4
-rw-r--r--sound/hda/hdac_regmap.c3
-rw-r--r--sound/i2c/cs8427.c2
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c6
-rw-r--r--sound/isa/sb/sb8_main.c10
-rw-r--r--sound/mips/hal2.c13
-rw-r--r--sound/pci/asihpi/hpios.c2
-rw-r--r--sound/pci/atiixp.c6
-rw-r--r--sound/pci/au88x0/au88x0_core.c6
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c6
-rw-r--r--sound/pci/emu10k1/emufx.c2
-rw-r--r--sound/pci/emu10k1/emupcm.c3
-rw-r--r--sound/pci/hda/hda_auto_parser.c2
-rw-r--r--sound/pci/hda/hda_beep.h2
-rw-r--r--sound/pci/hda/hda_bind.c14
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_controller.c36
-rw-r--r--sound/pci/hda/hda_controller.h20
-rw-r--r--sound/pci/hda/hda_eld.c2
-rw-r--r--sound/pci/hda/hda_generic.c2
-rw-r--r--sound/pci/hda/hda_hwdep.c2
-rw-r--r--sound/pci/hda/hda_intel.c198
-rw-r--r--sound/pci/hda/hda_intel.h1
-rw-r--r--sound/pci/hda/hda_jack.c2
-rw-r--r--sound/pci/hda/hda_proc.c2
-rw-r--r--sound/pci/hda/hda_sysfs.c2
-rw-r--r--sound/pci/hda/hda_tegra.c20
-rw-r--r--sound/pci/hda/patch_analog.c2
-rw-r--r--sound/pci/hda/patch_ca0110.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c1617
-rw-r--r--sound/pci/hda/patch_cirrus.c2
-rw-r--r--sound/pci/hda/patch_cmedia.c2
-rw-r--r--sound/pci/hda/patch_conexant.c3
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c30
-rw-r--r--sound/pci/hda/patch_si3054.c2
-rw-r--r--sound/pci/hda/patch_sigmatel.c22
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/pci/intel8x0.c97
-rw-r--r--sound/pci/intel8x0m.c20
-rw-r--r--sound/pci/rme32.c22
-rw-r--r--sound/pci/rme9652/hdspm.c2
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c77
-rw-r--r--sound/soc/amd/acp-pcm-dma.c51
-rw-r--r--sound/soc/amd/acp.h3
-rw-r--r--sound/soc/atmel/Kconfig12
-rw-r--r--sound/soc/atmel/Makefile2
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c13
-rw-r--r--sound/soc/atmel/mikroe-proto.c165
-rw-r--r--sound/soc/atmel/tse850-pcm5142.c78
-rw-r--r--sound/soc/bcm/cygnus-ssp.c13
-rw-r--r--sound/soc/codecs/Kconfig36
-rw-r--r--sound/soc/codecs/Makefile10
-rw-r--r--sound/soc/codecs/adau1761.c3
-rw-r--r--sound/soc/codecs/adau17x1.c86
-rw-r--r--sound/soc/codecs/adau17x1.h4
-rw-r--r--sound/soc/codecs/cs35l33.c3
-rw-r--r--sound/soc/codecs/cs35l35.c3
-rw-r--r--sound/soc/codecs/cs4265.c12
-rw-r--r--sound/soc/codecs/cs42l51.c21
-rw-r--r--sound/soc/codecs/cs43130.c4
-rw-r--r--sound/soc/codecs/dmic.c1
-rw-r--r--sound/soc/codecs/es8328.c7
-rw-r--r--sound/soc/codecs/hdac_hda.c483
-rw-r--r--sound/soc/codecs/hdac_hda.h24
-rw-r--r--sound/soc/codecs/hdac_hdmi.c11
-rw-r--r--sound/soc/codecs/max98088.c36
-rw-r--r--sound/soc/codecs/max98373.c50
-rw-r--r--sound/soc/codecs/nau8822.c1136
-rw-r--r--sound/soc/codecs/nau8822.h204
-rw-r--r--sound/soc/codecs/pcm186x.c3
-rw-r--r--sound/soc/codecs/pcm3060-i2c.c60
-rw-r--r--sound/soc/codecs/pcm3060-spi.c59
-rw-r--r--sound/soc/codecs/pcm3060.c295
-rw-r--r--sound/soc/codecs/pcm3060.h88
-rw-r--r--sound/soc/codecs/pcm3168a.c82
-rw-r--r--sound/soc/codecs/rt1305.c3
-rw-r--r--sound/soc/codecs/rt274.c2
-rw-r--r--sound/soc/codecs/rt5514-spi.c14
-rw-r--r--sound/soc/codecs/rt5514.c11
-rw-r--r--sound/soc/codecs/rt5616.c3
-rw-r--r--sound/soc/codecs/rt5640.c3
-rw-r--r--sound/soc/codecs/rt5645.c9
-rw-r--r--sound/soc/codecs/rt5651.c4
-rw-r--r--sound/soc/codecs/rt5660.c3
-rw-r--r--sound/soc/codecs/rt5663.c16
-rw-r--r--sound/soc/codecs/rt5665.c3
-rw-r--r--sound/soc/codecs/rt5668.c13
-rw-r--r--sound/soc/codecs/rt5670.c15
-rw-r--r--sound/soc/codecs/rt5677-spi.c1
-rw-r--r--sound/soc/codecs/rt5682.c97
-rw-r--r--sound/soc/codecs/rt5682.h14
-rw-r--r--sound/soc/codecs/sgtl5000.c2
-rw-r--r--sound/soc/codecs/sigmadsp.c3
-rw-r--r--sound/soc/codecs/sta32x.c30
-rw-r--r--sound/soc/codecs/tas5720.c103
-rw-r--r--sound/soc/codecs/tas6424.c70
-rw-r--r--sound/soc/codecs/tas6424.h10
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c85
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h23
-rw-r--r--sound/soc/codecs/tscs454.c2
-rw-r--r--sound/soc/codecs/wm2000.c54
-rw-r--r--sound/soc/codecs/wm8782.c63
-rw-r--r--sound/soc/codecs/wm8804-i2c.c15
-rw-r--r--sound/soc/codecs/wm8904.c1
-rw-r--r--sound/soc/codecs/wm8974.c1
-rw-r--r--sound/soc/codecs/wm9712.c5
-rw-r--r--sound/soc/codecs/wm_adsp.c26
-rw-r--r--sound/soc/davinci/davinci-mcasp.c37
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c2
-rw-r--r--sound/soc/fsl/fsl_esai.c2
-rw-r--r--sound/soc/fsl/fsl_utils.c4
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c5
-rw-r--r--sound/soc/generic/audio-graph-card.c21
-rw-r--r--sound/soc/generic/audio-graph-scu-card.c55
-rw-r--r--sound/soc/generic/simple-card-utils.c53
-rw-r--r--sound/soc/generic/simple-card.c30
-rw-r--r--sound/soc/generic/simple-scu-card.c54
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.c4
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c4
-rw-r--r--sound/soc/intel/boards/Kconfig22
-rw-r--r--sound/soc/intel/boards/Makefile4
-rw-r--r--sound/soc/intel/boards/broadwell.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c30
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c6
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c9
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c983
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c5
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c5
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.c127
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.h38
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c183
-rw-r--r--sound/soc/intel/common/Makefile3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-byt-match.c7
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hda-match.c40
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-kbl-match.c13
-rw-r--r--sound/soc/intel/common/sst-firmware.c2
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c71
-rw-r--r--sound/soc/intel/skylake/skl-topology.c4
-rw-r--r--sound/soc/intel/skylake/skl.c98
-rw-r--r--sound/soc/intel/skylake/skl.h12
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-cs42448.c13
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-wm8960.c14
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-mt6351.c14
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-max98090.c13
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c12
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c12
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c12
-rw-r--r--sound/soc/meson/Kconfig13
-rw-r--r--sound/soc/meson/Makefile2
-rw-r--r--sound/soc/meson/axg-card.c16
-rw-r--r--sound/soc/meson/axg-fifo.c2
-rw-r--r--sound/soc/meson/axg-pdm.c654
-rw-r--r--sound/soc/meson/axg-tdm-interface.c50
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c4
-rw-r--r--sound/soc/omap/omap-hdmi-audio.c4
-rw-r--r--sound/soc/pxa/Kconfig13
-rw-r--r--sound/soc/pxa/pxa-ssp.c6
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c48
-rw-r--r--sound/soc/qcom/apq8096.c7
-rw-r--r--sound/soc/qcom/qdsp6/q6adm.c17
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c8
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c1
-rw-r--r--sound/soc/qcom/qdsp6/q6core.c9
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c4
-rw-r--r--sound/soc/qcom/sdm845.c7
-rw-r--r--sound/soc/rockchip/rk3288_hdmi_analog.c1
-rw-r--r--sound/soc/rockchip/rockchip_pcm.c3
-rw-r--r--sound/soc/samsung/tm2_wm5110.c13
-rw-r--r--sound/soc/sh/hac.c3
-rw-r--r--sound/soc/sh/rcar/adg.c9
-rw-r--r--sound/soc/sh/rcar/core.c145
-rw-r--r--sound/soc/sh/rcar/ctu.c2
-rw-r--r--sound/soc/sh/rcar/dma.c113
-rw-r--r--sound/soc/sh/rcar/gen.c33
-rw-r--r--sound/soc/sh/rcar/rsnd.h66
-rw-r--r--sound/soc/sh/rcar/src.c2
-rw-r--r--sound/soc/sh/rcar/ssi.c128
-rw-r--r--sound/soc/sh/rcar/ssiu.c92
-rw-r--r--sound/soc/soc-compress.c4
-rw-r--r--sound/soc/soc-core.c582
-rw-r--r--sound/soc/soc-dapm.c439
-rw-r--r--sound/soc/soc-ops.c4
-rw-r--r--sound/soc/soc-pcm.c253
-rw-r--r--sound/soc/soc-topology.c15
-rw-r--r--sound/soc/soc-utils.c4
-rw-r--r--sound/soc/stm/Kconfig1
-rw-r--r--sound/soc/stm/stm32_sai.c2
-rw-r--r--sound/soc/stm/stm32_sai.h3
-rw-r--r--sound/soc/stm/stm32_sai_sub.c281
-rw-r--r--sound/soc/sunxi/Kconfig17
-rw-r--r--sound/soc/sunxi/Makefile2
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c82
-rw-r--r--sound/soc/sunxi/sun50i-codec-analog.c444
-rw-r--r--sound/soc/sunxi/sun8i-adda-pr-regmap.c102
-rw-r--r--sound/soc/sunxi/sun8i-adda-pr-regmap.h7
-rw-r--r--sound/soc/sunxi/sun8i-codec-analog.c79
-rw-r--r--sound/soc/sunxi/sun8i-codec.c22
-rw-r--r--sound/soc/tegra/tegra_sgtl5000.c17
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c3
-rw-r--r--sound/usb/caiaq/device.c1
-rw-r--r--sound/usb/midi.c3
-rw-r--r--sound/usb/mixer_quirks.c381
-rw-r--r--sound/usb/quirks-table.h9
-rw-r--r--sound/x86/intel_hdmi_audio.c29
-rw-r--r--sound/xen/xen_snd_front_alsa.c46
-rw-r--r--tools/Makefile13
-rw-r--r--tools/arch/arm64/include/asm/barrier.h70
-rw-r--r--tools/arch/ia64/include/asm/barrier.h13
-rw-r--r--tools/arch/powerpc/include/asm/barrier.h16
-rw-r--r--tools/arch/s390/include/asm/barrier.h13
-rw-r--r--tools/arch/sparc/include/asm/barrier_64.h13
-rw-r--r--tools/arch/x86/include/asm/barrier.h14
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h11
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst19
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst139
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst19
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst16
-rw-r--r--tools/bpf/bpftool/Makefile9
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool78
-rw-r--r--tools/bpf/bpftool/common.c26
-rw-r--r--tools/bpf/bpftool/jit_disasm.c4
-rw-r--r--tools/bpf/bpftool/main.c13
-rw-r--r--tools/bpf/bpftool/main.h18
-rw-r--r--tools/bpf/bpftool/map.c255
-rw-r--r--tools/bpf/bpftool/map_perf_ring.c10
-rw-r--r--tools/bpf/bpftool/net.c275
-rw-r--r--tools/bpf/bpftool/netlink_dumper.c178
-rw-r--r--tools/bpf/bpftool/netlink_dumper.h95
-rw-r--r--tools/bpf/bpftool/prog.c116
-rw-r--r--tools/crypto/getstat.c294
-rw-r--r--tools/hv/hv_fcopy_daemon.c1
-rw-r--r--tools/include/asm/barrier.h35
-rw-r--r--tools/include/linux/bitops.h7
-rw-r--r--tools/include/linux/bits.h26
-rw-r--r--tools/include/linux/err.h7
-rw-r--r--tools/include/linux/ring_buffer.h73
-rw-r--r--tools/include/tools/libc_compat.h2
-rw-r--r--tools/include/uapi/linux/bpf.h168
-rw-r--r--tools/include/uapi/linux/if_link.h2
-rw-r--r--tools/include/uapi/linux/kvm.h6
-rw-r--r--tools/include/uapi/linux/tls.h78
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat2
-rw-r--r--tools/lib/api/fs/tracing_path.c4
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile5
-rw-r--r--tools/lib/bpf/bpf.c143
-rw-r--r--tools/lib/bpf/bpf.h131
-rw-r--r--tools/lib/bpf/btf.c2
-rw-r--r--tools/lib/bpf/btf.h30
-rw-r--r--tools/lib/bpf/libbpf.c284
-rw-r--r--tools/lib/bpf/libbpf.h222
-rw-r--r--tools/lib/bpf/libbpf_errno.c16
-rw-r--r--tools/lib/bpf/netlink.c337
-rw-r--r--tools/lib/bpf/nlattr.c90
-rw-r--r--tools/lib/bpf/nlattr.h82
-rw-r--r--tools/lib/bpf/str_error.c18
-rw-r--r--tools/lib/bpf/str_error.h6
-rw-r--r--tools/lib/subcmd/pager.c11
-rw-r--r--tools/lib/subcmd/pager.h1
-rw-r--r--tools/lib/traceevent/Build2
-rw-r--r--tools/lib/traceevent/event-parse-api.c275
-rw-r--r--tools/lib/traceevent/event-parse-local.h92
-rw-r--r--tools/lib/traceevent/event-parse.c1217
-rw-r--r--tools/lib/traceevent/event-parse.h782
-rw-r--r--tools/lib/traceevent/event-plugin.c20
-rw-r--r--tools/lib/traceevent/parse-filter.c647
-rw-r--r--tools/lib/traceevent/plugin_function.c3
-rw-r--r--tools/lib/traceevent/plugin_hrtimer.c5
-rw-r--r--tools/lib/traceevent/plugin_jbd2.c1
-rw-r--r--tools/lib/traceevent/plugin_kmem.c5
-rw-r--r--tools/lib/traceevent/plugin_kvm.c15
-rw-r--r--tools/lib/traceevent/plugin_mac80211.c7
-rw-r--r--tools/lib/traceevent/plugin_sched_switch.c11
-rw-r--r--tools/lib/traceevent/plugin_scsi.c1
-rw-r--r--tools/lib/traceevent/plugin_xen.c1
-rw-r--r--tools/lib/traceevent/tep_strerror.c53
-rw-r--r--tools/lib/traceevent/trace-seq.c2
-rw-r--r--tools/lib/traceevent/trace-seq.h55
-rw-r--r--tools/memory-model/Documentation/explanation.txt186
-rw-r--r--tools/memory-model/Documentation/recipes.txt2
-rw-r--r--tools/memory-model/README39
-rw-r--r--tools/memory-model/linux-kernel.cat8
-rw-r--r--tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus7
-rw-r--r--tools/memory-model/litmus-tests/README104
-rw-r--r--tools/objtool/special.c4
-rw-r--r--tools/pci/Build1
-rw-r--r--tools/pci/Makefile53
-rw-r--r--tools/pci/pcitest.c7
-rw-r--r--tools/perf/Documentation/Makefile2
-rw-r--r--tools/perf/Makefile.config2
-rw-r--r--tools/perf/Makefile.perf6
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c59
-rw-r--r--tools/perf/arch/powerpc/util/book3s_hv_exits.h1
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c2
-rw-r--r--tools/perf/builtin-annotate.c7
-rw-r--r--tools/perf/builtin-inject.c40
-rw-r--r--tools/perf/builtin-record.c53
-rw-r--r--tools/perf/builtin-report.c12
-rw-r--r--tools/perf/builtin-script.c110
-rw-r--r--tools/perf/builtin-stat.c1463
-rw-r--r--tools/perf/builtin-trace.c179
-rwxr-xr-xtools/perf/check-headers.sh1
-rw-r--r--tools/perf/command-list.txt1
-rw-r--r--tools/perf/examples/bpf/augmented_syscalls.c154
-rw-r--r--tools/perf/examples/bpf/etcsnoop.c80
-rw-r--r--tools/perf/include/bpf/bpf.h3
-rw-r--r--tools/perf/include/bpf/linux/socket.h24
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json26
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json191
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/core-imp-def.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json50
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json89
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/intrinsic.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/pipeline.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json16
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py9
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py6
-rw-r--r--tools/perf/tests/Build1
-rw-r--r--tools/perf/tests/builtin-test.c10
-rw-r--r--tools/perf/tests/evsel-tp-sched.c4
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh2
-rw-r--r--tools/perf/tests/tests.h4
-rw-r--r--tools/perf/tests/wp.c241
-rw-r--r--tools/perf/trace/beauty/Build1
-rw-r--r--tools/perf/trace/beauty/beauty.h34
-rw-r--r--tools/perf/trace/beauty/sockaddr.c76
-rw-r--r--tools/perf/util/Build1
-rw-r--r--tools/perf/util/auxtrace.c30
-rw-r--r--tools/perf/util/auxtrace.h41
-rw-r--r--tools/perf/util/bpf-loader.c2
-rw-r--r--tools/perf/util/data-convert-bt.c58
-rw-r--r--tools/perf/util/db-export.c22
-rw-r--r--tools/perf/util/event.c22
-rw-r--r--tools/perf/util/evsel.c42
-rw-r--r--tools/perf/util/evsel.h11
-rw-r--r--tools/perf/util/evsel_fprintf.c2
-rw-r--r--tools/perf/util/header.c24
-rw-r--r--tools/perf/util/header.h17
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c34
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h2
-rw-r--r--tools/perf/util/intel-pt.c5
-rw-r--r--tools/perf/util/llvm-utils.c2
-rw-r--r--tools/perf/util/machine.c8
-rw-r--r--tools/perf/util/map.c15
-rw-r--r--tools/perf/util/mmap.c6
-rw-r--r--tools/perf/util/mmap.h17
-rw-r--r--tools/perf/util/ordered-events.c87
-rw-r--r--tools/perf/util/ordered-events.h37
-rw-r--r--tools/perf/util/pmu.c13
-rw-r--r--tools/perf/util/python.c20
-rw-r--r--tools/perf/util/s390-cpumsf.c94
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c66
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c100
-rw-r--r--tools/perf/util/session.c75
-rw-r--r--tools/perf/util/session.h5
-rw-r--r--tools/perf/util/setup.py18
-rw-r--r--tools/perf/util/sort.c22
-rw-r--r--tools/perf/util/srcline.c3
-rw-r--r--tools/perf/util/stat-display.c1166
-rw-r--r--tools/perf/util/stat-shadow.c147
-rw-r--r--tools/perf/util/stat.c100
-rw-r--r--tools/perf/util/stat.h77
-rw-r--r--tools/perf/util/strbuf.c10
-rw-r--r--tools/perf/util/thread-stack.c51
-rw-r--r--tools/perf/util/tool.h7
-rw-r--r--tools/perf/util/trace-event-info.c2
-rw-r--r--tools/perf/util/trace-event-parse.c44
-rw-r--r--tools/perf/util/trace-event-read.c15
-rw-r--r--tools/perf/util/trace-event.c8
-rw-r--r--tools/perf/util/trace-event.h21
-rw-r--r--tools/perf/util/util.c2
-rw-r--r--tools/perf/util/util.h2
-rw-r--r--tools/power/cpupower/bench/parse.c2
-rw-r--r--tools/power/cpupower/utils/cpufreq-info.c8
-rw-r--r--tools/power/cpupower/utils/helpers/amd.c11
-rw-r--r--tools/power/cpupower/utils/helpers/cpuid.c8
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h2
-rw-r--r--tools/power/cpupower/utils/helpers/misc.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/mperf_monitor.c3
-rw-r--r--tools/power/pm-graph/Makefile4
-rwxr-xr-xtools/power/pm-graph/bootgraph.py125
-rw-r--r--tools/power/pm-graph/config/cgskip.txt1
-rw-r--r--tools/power/pm-graph/config/custom-timeline-functions.cfg4
-rw-r--r--tools/power/pm-graph/sleepgraph.813
-rwxr-xr-xtools/power/pm-graph/sleepgraph.py1836
-rw-r--r--tools/power/x86/turbostat/turbostat.c46
-rw-r--r--tools/spi/spidev_test.c6
-rw-r--r--tools/testing/nvdimm/Kbuild1
-rw-r--r--tools/testing/nvdimm/acpi_nfit_test.c8
-rw-r--r--tools/testing/nvdimm/test/nfit.c4
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h24
-rw-r--r--tools/testing/selftests/android/Makefile2
-rw-r--r--tools/testing/selftests/android/config (renamed from tools/testing/selftests/android/ion/config)0
-rw-r--r--tools/testing/selftests/android/ion/Makefile2
-rw-r--r--tools/testing/selftests/bpf/.gitignore8
-rw-r--r--tools/testing/selftests/bpf/Makefile19
-rw-r--r--tools/testing/selftests/bpf/bpf_flow.c373
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h25
-rw-r--r--tools/testing/selftests/bpf/config2
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c140
-rw-r--r--tools/testing/selftests/bpf/netcnt_common.h24
-rw-r--r--tools/testing/selftests/bpf/netcnt_prog.c71
-rw-r--r--tools/testing/selftests/bpf/test_btf.c179
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c60
-rw-r--r--tools/testing/selftests/bpf/test_flow_dissector.c782
-rwxr-xr-xtools/testing/selftests/bpf/test_flow_dissector.sh115
-rwxr-xr-xtools/testing/selftests/bpf/test_libbpf.sh2
-rw-r--r--tools/testing/selftests/bpf/test_maps.c132
-rw-r--r--tools/testing/selftests/bpf/test_netcnt.c158
-rw-r--r--tools/testing/selftests/bpf/test_progs.c157
-rw-r--r--tools/testing/selftests/bpf/test_queue_map.c4
-rw-r--r--tools/testing/selftests/bpf/test_queue_stack_map.h59
-rw-r--r--tools/testing/selftests/bpf/test_section_names.c208
-rw-r--r--tools/testing/selftests/bpf/test_sk_lookup_kern.c180
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c6
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c373
-rw-r--r--tools/testing/selftests/bpf/test_sockmap_kern.h97
-rw-r--r--tools/testing/selftests/bpf/test_stack_map.c4
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_kern.c38
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_user.c31
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c1623
-rw-r--r--tools/testing/selftests/bpf/test_xdp_vlan.c292
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan.sh195
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c8
-rwxr-xr-xtools/testing/selftests/bpf/with_addr.sh54
-rwxr-xr-xtools/testing/selftests/bpf/with_tunnels.sh36
-rw-r--r--tools/testing/selftests/cgroup/.gitignore1
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c38
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h1
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c205
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh347
-rwxr-xr-xtools/testing/selftests/drivers/usb/usbip/usbip_test.sh4
-rw-r--r--tools/testing/selftests/efivarfs/config1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc80
-rw-r--r--tools/testing/selftests/futex/functional/Makefile1
-rw-r--r--tools/testing/selftests/gpio/Makefile7
-rw-r--r--tools/testing/selftests/kselftest.h1
-rw-r--r--tools/testing/selftests/kvm/.gitignore13
-rw-r--r--tools/testing/selftests/kvm/Makefile47
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c374
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/processor.h55
-rw-r--r--tools/testing/selftests/kvm/include/evmcs.h1098
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h167
-rw-r--r--tools/testing/selftests/kvm/include/sparsebit.h6
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h6
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h (renamed from tools/testing/selftests/kvm/include/x86.h)28
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h (renamed from tools/testing/selftests/kvm/include/vmx.h)35
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c311
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c521
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h33
-rw-r--r--tools/testing/selftests/kvm/lib/ucall.c144
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c (renamed from tools/testing/selftests/kvm/lib/x86.c)263
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c (renamed from tools/testing/selftests/kvm/lib/vmx.c)55
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c (renamed from tools/testing/selftests/kvm/cr4_cpuid_sync_test.c)14
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c160
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c110
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_sregs_test.c (renamed from tools/testing/selftests/kvm/set_sregs_test.c)2
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c (renamed from tools/testing/selftests/kvm/state_test.c)47
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c (renamed from tools/testing/selftests/kvm/sync_regs_test.c)2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c (renamed from tools/testing/selftests/kvm/vmx_tsc_adjust_test.c)24
-rw-r--r--tools/testing/selftests/lib.mk12
-rw-r--r--tools/testing/selftests/memory-hotplug/config1
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile5
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh167
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_sticky_fdb.sh69
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh27
-rw-r--r--tools/testing/selftests/net/ip_defrag.c393
-rwxr-xr-xtools/testing/selftests/net/ip_defrag.sh39
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh327
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c13
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh2
-rw-r--r--tools/testing/selftests/net/tls.c96
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh2
-rw-r--r--tools/testing/selftests/networking/timestamping/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/alignment/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/dscr/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/math/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/primitives/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/stringloops/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/syscalls/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/vphn/Makefile1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFLIST2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot3
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot2
-rw-r--r--tools/testing/selftests/rseq/param_test.c19
-rw-r--r--tools/testing/selftests/tc-testing/README2
-rw-r--r--tools/testing/selftests/tc-testing/bpf/Makefile29
-rw-r--r--tools/testing/selftests/tc-testing/bpf/action.c23
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py66
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json16
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gact.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json24
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py4
-rw-r--r--tools/testing/selftests/vm/Makefile4
-rw-r--r--tools/testing/selftests/x86/test_vdso.c172
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_common.c6
-rw-r--r--tools/usb/usbip/libsrc/vhci_driver.c2
-rw-r--r--virt/kvm/arm/arm.c30
-rw-r--r--virt/kvm/arm/mmu.c142
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c36
-rw-r--r--virt/kvm/arm/vgic/vgic-kvm-device.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c2
-rw-r--r--virt/kvm/coalesced_mmio.c12
-rw-r--r--virt/kvm/kvm_main.c39
6084 files changed, 305135 insertions, 160536 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
deleted file mode 100644
index 2754fe83f0d4..000000000000
--- a/Documentation/00-INDEX
+++ /dev/null
@@ -1,428 +0,0 @@
-
-This is a brief list of all the files in ./linux/Documentation and what
-they contain. If you add a documentation file, please list it here in
-alphabetical order as well, or risk being hunted down like a rabid dog.
-Please keep the descriptions small enough to fit on one line.
- Thanks -- Paul G.
-
-Following translations are available on the WWW:
-
- - Japanese, maintained by the JF Project (jf@listserv.linux.or.jp), at
- http://linuxjf.sourceforge.jp/
-
-00-INDEX
- - this file.
-ABI/
- - info on kernel <-> userspace ABI and relative interface stability.
-CodingStyle
- - nothing here, just a pointer to process/coding-style.rst.
-DMA-API.txt
- - DMA API, pci_ API & extensions for non-consistent memory machines.
-DMA-API-HOWTO.txt
- - Dynamic DMA mapping Guide
-DMA-ISA-LPC.txt
- - How to do DMA with ISA (and LPC) devices.
-DMA-attributes.txt
- - listing of the various possible attributes a DMA region can have
-EDID/
- - directory with info on customizing EDID for broken gfx/displays.
-IPMI.txt
- - info on Linux Intelligent Platform Management Interface (IPMI) Driver.
-IRQ-affinity.txt
- - how to select which CPU(s) handle which interrupt events on SMP.
-IRQ-domain.txt
- - info on interrupt numbering and setting up IRQ domains.
-IRQ.txt
- - description of what an IRQ is.
-Intel-IOMMU.txt
- - basic info on the Intel IOMMU virtualization support.
-Makefile
- - It's not of interest for those who aren't touching the build system.
-PCI/
- - info related to PCI drivers.
-RCU/
- - directory with info on RCU (read-copy update).
-SAK.txt
- - info on Secure Attention Keys.
-SM501.txt
- - Silicon Motion SM501 multimedia companion chip
-SubmittingPatches
- - nothing here, just a pointer to process/coding-style.rst.
-accounting/
- - documentation on accounting and taskstats.
-acpi/
- - info on ACPI-specific hooks in the kernel.
-admin-guide/
- - info related to Linux users and system admins.
-aoe/
- - description of AoE (ATA over Ethernet) along with config examples.
-arm/
- - directory with info about Linux on the ARM architecture.
-arm64/
- - directory with info about Linux on the 64 bit ARM architecture.
-auxdisplay/
- - misc. LCD driver documentation (cfag12864b, ks0108).
-backlight/
- - directory with info on controlling backlights in flat panel displays
-block/
- - info on the Block I/O (BIO) layer.
-blockdev/
- - info on block devices & drivers
-bt8xxgpio.txt
- - info on how to modify a bt8xx video card for GPIO usage.
-btmrvl.txt
- - info on Marvell Bluetooth driver usage.
-bus-devices/
- - directory with info on TI GPMC (General Purpose Memory Controller)
-bus-virt-phys-mapping.txt
- - how to access I/O mapped memory from within device drivers.
-cdrom/
- - directory with information on the CD-ROM drivers that Linux has.
-cgroup-v1/
- - cgroups v1 features, including cpusets and memory controller.
-cma/
- - Continuous Memory Area (CMA) debugfs interface.
-conf.py
- - It's not of interest for those who aren't touching the build system.
-connector/
- - docs on the netlink based userspace<->kernel space communication mod.
-console/
- - documentation on Linux console drivers.
-core-api/
- - documentation on kernel core components.
-cpu-freq/
- - info on CPU frequency and voltage scaling.
-cpu-hotplug.txt
- - document describing CPU hotplug support in the Linux kernel.
-cpu-load.txt
- - document describing how CPU load statistics are collected.
-cpuidle/
- - info on CPU_IDLE, CPU idle state management subsystem.
-cputopology.txt
- - documentation on how CPU topology info is exported via sysfs.
-crc32.txt
- - brief tutorial on CRC computation
-crypto/
- - directory with info on the Crypto API.
-dcdbas.txt
- - information on the Dell Systems Management Base Driver.
-debugging-modules.txt
- - some notes on debugging modules after Linux 2.6.3.
-debugging-via-ohci1394.txt
- - how to use firewire like a hardware debugger memory reader.
-dell_rbu.txt
- - document demonstrating the use of the Dell Remote BIOS Update driver.
-dev-tools/
- - directory with info on development tools for the kernel.
-device-mapper/
- - directory with info on Device Mapper.
-dmaengine/
- - the DMA engine and controller API guides.
-devicetree/
- - directory with info on device tree files used by OF/PowerPC/ARM
-digsig.txt
- -info on the Digital Signature Verification API
-dma-buf-sharing.txt
- - the DMA Buffer Sharing API Guide
-docutils.conf
- - nothing here. Just a configuration file for docutils.
-dontdiff
- - file containing a list of files that should never be diff'ed.
-driver-api/
- - the Linux driver implementer's API guide.
-driver-model/
- - directory with info about Linux driver model.
-early-userspace/
- - info about initramfs, klibc, and userspace early during boot.
-efi-stub.txt
- - How to use the EFI boot stub to bypass GRUB or elilo on EFI systems.
-eisa.txt
- - info on EISA bus support.
-extcon/
- - directory with porting guide for Android kernel switch driver.
-isa.txt
- - info on EISA bus support.
-fault-injection/
- - dir with docs about the fault injection capabilities infrastructure.
-fb/
- - directory with info on the frame buffer graphics abstraction layer.
-features/
- - status of feature implementation on different architectures.
-filesystems/
- - info on the vfs and the various filesystems that Linux supports.
-firmware_class/
- - request_firmware() hotplug interface info.
-flexible-arrays.txt
- - how to make use of flexible sized arrays in linux
-fmc/
- - information about the FMC bus abstraction
-fpga/
- - FPGA Manager Core.
-futex-requeue-pi.txt
- - info on requeueing of tasks from a non-PI futex to a PI futex
-gcc-plugins.txt
- - GCC plugin infrastructure.
-gpio/
- - gpio related documentation
-gpu/
- - directory with information on GPU driver developer's guide.
-hid/
- - directory with information on human interface devices
-highuid.txt
- - notes on the change from 16 bit to 32 bit user/group IDs.
-hwspinlock.txt
- - hardware spinlock provides hardware assistance for synchronization
-timers/
- - info on the timer related topics
-hw_random.txt
- - info on Linux support for random number generator in i8xx chipsets.
-hwmon/
- - directory with docs on various hardware monitoring drivers.
-i2c/
- - directory with info about the I2C bus/protocol (2 wire, kHz speed).
-x86/i386/
- - directory with info about Linux on Intel 32 bit architecture.
-ia64/
- - directory with info about Linux on Intel 64 bit architecture.
-ide/
- - Information regarding the Enhanced IDE drive.
-iio/
- - info on industrial IIO configfs support.
-index.rst
- - main index for the documentation at ReST format.
-infiniband/
- - directory with documents concerning Linux InfiniBand support.
-input/
- - info on Linux input device support.
-intel_txt.txt
- - info on intel Trusted Execution Technology (intel TXT).
-io-mapping.txt
- - description of io_mapping functions in linux/io-mapping.h
-io_ordering.txt
- - info on ordering I/O writes to memory-mapped addresses.
-ioctl/
- - directory with documents describing various IOCTL calls.
-iostats.txt
- - info on I/O statistics Linux kernel provides.
-irqflags-tracing.txt
- - how to use the irq-flags tracing feature.
-isapnp.txt
- - info on Linux ISA Plug & Play support.
-isdn/
- - directory with info on the Linux ISDN support, and supported cards.
-kbuild/
- - directory with info about the kernel build process.
-kdump/
- - directory with mini HowTo on getting the crash dump code to work.
-doc-guide/
- - how to write and format reStructuredText kernel documentation
-kernel-per-CPU-kthreads.txt
- - List of all per-CPU kthreads and how they introduce jitter.
-kobject.txt
- - info of the kobject infrastructure of the Linux kernel.
-kprobes.txt
- - documents the kernel probes debugging feature.
-kref.txt
- - docs on adding reference counters (krefs) to kernel objects.
-laptops/
- - directory with laptop related info and laptop driver documentation.
-ldm.txt
- - a brief description of LDM (Windows Dynamic Disks).
-leds/
- - directory with info about LED handling under Linux.
-livepatch/
- - info on kernel live patching.
-locking/
- - directory with info about kernel locking primitives
-lockup-watchdogs.txt
- - info on soft and hard lockup detectors (aka nmi_watchdog).
-logo.gif
- - full colour GIF image of Linux logo (penguin - Tux).
-logo.txt
- - info on creator of above logo & site to get additional images from.
-lsm.txt
- - Linux Security Modules: General Security Hooks for Linux
-lzo.txt
- - kernel LZO decompressor input formats
-m68k/
- - directory with info about Linux on Motorola 68k architecture.
-mailbox.txt
- - How to write drivers for the common mailbox framework (IPC).
-md/
- - directory with info about Linux Software RAID
-media/
- - info on media drivers: uAPI, kAPI and driver documentation.
-memory-barriers.txt
- - info on Linux kernel memory barriers.
-memory-devices/
- - directory with info on parts like the Texas Instruments EMIF driver
-memory-hotplug.txt
- - Hotpluggable memory support, how to use and current status.
-men-chameleon-bus.txt
- - info on MEN chameleon bus.
-mic/
- - Intel Many Integrated Core (MIC) architecture device driver.
-mips/
- - directory with info about Linux on MIPS architecture.
-misc-devices/
- - directory with info about devices using the misc dev subsystem
-mmc/
- - directory with info about the MMC subsystem
-mtd/
- - directory with info about memory technology devices (flash)
-namespaces/
- - directory with various information about namespaces
-netlabel/
- - directory with information on the NetLabel subsystem.
-networking/
- - directory with info on various aspects of networking with Linux.
-nfc/
- - directory relating info about Near Field Communications support.
-nios2/
- - Linux on the Nios II architecture.
-nommu-mmap.txt
- - documentation about no-mmu memory mapping support.
-numastat.txt
- - info on how to read Numa policy hit/miss statistics in sysfs.
-ntb.txt
- - info on Non-Transparent Bridge (NTB) drivers.
-nvdimm/
- - info on non-volatile devices.
-nvmem/
- - info on non volatile memory framework.
-output/
- - default directory where html/LaTeX/pdf files will be written.
-padata.txt
- - An introduction to the "padata" parallel execution API
-parisc/
- - directory with info on using Linux on PA-RISC architecture.
-parport-lowlevel.txt
- - description and usage of the low level parallel port functions.
-pcmcia/
- - info on the Linux PCMCIA driver.
-percpu-rw-semaphore.txt
- - RCU based read-write semaphore optimized for locking for reading
-perf/
- - info about the APM X-Gene SoC Performance Monitoring Unit (PMU).
-phy/
- - ino on Samsung USB 2.0 PHY adaptation layer.
-phy.txt
- - Description of the generic PHY framework.
-pi-futex.txt
- - documentation on lightweight priority inheritance futexes.
-pinctrl.txt
- - info on pinctrl subsystem and the PINMUX/PINCONF and drivers
-platform/
- - List of supported hardware by compal and Dell laptop.
-pnp.txt
- - Linux Plug and Play documentation.
-power/
- - directory with info on Linux PCI power management.
-powerpc/
- - directory with info on using Linux with the PowerPC.
-prctl/
- - directory with info on the priveledge control subsystem
-preempt-locking.txt
- - info on locking under a preemptive kernel.
-process/
- - how to work with the mainline kernel development process.
-pps/
- - directory with information on the pulse-per-second support
-pti/
- - directory with info on Intel MID PTI.
-ptp/
- - directory with info on support for IEEE 1588 PTP clocks in Linux.
-pwm.txt
- - info on the pulse width modulation driver subsystem
-rapidio/
- - directory with info on RapidIO packet-based fabric interconnect
-rbtree.txt
- - info on what red-black trees are and what they are for.
-remoteproc.txt
- - info on how to handle remote processor (e.g. AMP) offloads/usage.
-rfkill.txt
- - info on the radio frequency kill switch subsystem/support.
-robust-futex-ABI.txt
- - documentation of the robust futex ABI.
-robust-futexes.txt
- - a description of what robust futexes are.
-rpmsg.txt
- - info on the Remote Processor Messaging (rpmsg) Framework
-rtc.txt
- - notes on how to use the Real Time Clock (aka CMOS clock) driver.
-s390/
- - directory with info on using Linux on the IBM S390.
-scheduler/
- - directory with info on the scheduler.
-scsi/
- - directory with info on Linux scsi support.
-security/
- - directory that contains security-related info
-serial/
- - directory with info on the low level serial API.
-sgi-ioc4.txt
- - description of the SGI IOC4 PCI (multi function) device.
-sh/
- - directory with info on porting Linux to a new architecture.
-smsc_ece1099.txt
- -info on the smsc Keyboard Scan Expansion/GPIO Expansion device.
-sound/
- - directory with info on sound card support.
-spi/
- - overview of Linux kernel Serial Peripheral Interface (SPI) support.
-sphinx/
- - no documentation here, just files required by Sphinx toolchain.
-sphinx-static/
- - no documentation here, just files required by Sphinx toolchain.
-static-keys.txt
- - info on how static keys allow debug code in hotpaths via patching
-svga.txt
- - short guide on selecting video modes at boot via VGA BIOS.
-sync_file.txt
- - Sync file API guide.
-sysctl/
- - directory with info on the /proc/sys/* files.
-target/
- - directory with info on generating TCM v4 fabric .ko modules
-tee.txt
- - info on the TEE subsystem and drivers
-this_cpu_ops.txt
- - List rationale behind and the way to use this_cpu operations.
-thermal/
- - directory with information on managing thermal issues (CPU/temp)
-trace/
- - directory with info on tracing technologies within linux
-translations/
- - translations of this document from English to another language
-unaligned-memory-access.txt
- - info on how to avoid arch breaking unaligned memory access in code.
-unshare.txt
- - description of the Linux unshare system call.
-usb/
- - directory with info regarding the Universal Serial Bus.
-vfio.txt
- - info on Virtual Function I/O used in guest/hypervisor instances.
-video-output.txt
- - sysfs class driver interface to enable/disable a video output device.
-virtual/
- - directory with information on the various linux virtualizations.
-vm/
- - directory with info on the Linux vm code.
-w1/
- - directory with documents regarding the 1-wire (w1) subsystem.
-watchdog/
- - how to auto-reboot Linux if it has "fallen and can't get up". ;-)
-wimax/
- - directory with info about Intel Wireless Wimax Connections
-core-api/workqueue.rst
- - information on the Concurrency Managed Workqueue implementation
-x86/x86_64/
- - directory with info on Linux support for AMD x86-64 (Hammer) machines.
-xillybus.txt
- - Overview and basic ui of xillybus driver
-xtensa/
- - directory with documents relating to arch/xtensa port/implementation
-xz.txt
- - how to make use of the XZ data compression within linux kernel
-zorro.txt
- - info on writing drivers for Zorro bus devices found on Amigas.
diff --git a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
index e960cd027e1e..a9e123ba32cd 100644
--- a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
+++ b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
@@ -25,38 +25,3 @@ Description:
4.2.2.
The files are read only.
-
-
-What: /sys/bus/usb/drivers/usbtmc/*/TermChar
-Date: August 2008
-Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Description:
- This file is the TermChar value to be sent to the USB TMC
- device as described by the document, "Universal Serial Bus Test
- and Measurement Class Specification
- (USBTMC) Revision 1.0" as published by the USB-IF.
-
- Note that the TermCharEnabled file determines if this value is
- sent to the device or not.
-
-
-What: /sys/bus/usb/drivers/usbtmc/*/TermCharEnabled
-Date: August 2008
-Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Description:
- This file determines if the TermChar is to be sent to the
- device on every transaction or not. For more details about
- this, please see the document, "Universal Serial Bus Test and
- Measurement Class Specification (USBTMC) Revision 1.0" as
- published by the USB-IF.
-
-
-What: /sys/bus/usb/drivers/usbtmc/*/auto_abort
-Date: August 2008
-Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Description:
- This file determines if the transaction of the USB TMC
- device is to be automatically aborted if there is any error.
- For more details about this, please see the document,
- "Universal Serial Bus Test and Measurement Class Specification
- (USBTMC) Revision 1.0" as published by the USB-IF.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc
index 9281e2aa38df..809765bd9573 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uvc
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc
@@ -12,6 +12,10 @@ Date: Dec 2014
KernelVersion: 4.0
Description: Control descriptors
+ All attributes read only:
+ bInterfaceNumber - USB interface number for this
+ streaming interface
+
What: /config/usb-gadget/gadget/functions/uvc.name/control/class
Date: Dec 2014
KernelVersion: 4.0
@@ -109,6 +113,10 @@ Date: Dec 2014
KernelVersion: 4.0
Description: Streaming descriptors
+ All attributes read only:
+ bInterfaceNumber - USB interface number for this
+ streaming interface
+
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class
Date: Dec 2014
KernelVersion: 4.0
@@ -160,6 +168,10 @@ Description: Specific MJPEG format descriptors
All attributes read only,
except bmaControls and bDefaultFrameIndex:
+ bFormatIndex - unique id for this format descriptor;
+ only defined after parent header is
+ linked into the streaming class;
+ read-only
bmaControls - this format's data for bmaControls in
the streaming header
bmInterfaceFlags - specifies interlace information,
@@ -177,6 +189,10 @@ Date: Dec 2014
KernelVersion: 4.0
Description: Specific MJPEG frame descriptors
+ bFrameIndex - unique id for this framedescriptor;
+ only defined after parent format is
+ linked into the streaming header;
+ read-only
dwFrameInterval - indicates how frame interval can be
programmed; a number of values
separated by newline can be specified
@@ -204,6 +220,10 @@ Date: Dec 2014
KernelVersion: 4.0
Description: Specific uncompressed format descriptors
+ bFormatIndex - unique id for this format descriptor;
+ only defined after parent header is
+ linked into the streaming class;
+ read-only
bmaControls - this format's data for bmaControls in
the streaming header
bmInterfaceFlags - specifies interlace information,
@@ -224,6 +244,10 @@ Date: Dec 2014
KernelVersion: 4.0
Description: Specific uncompressed frame descriptors
+ bFrameIndex - unique id for this framedescriptor;
+ only defined after parent format is
+ linked into the streaming header;
+ read-only
dwFrameInterval - indicates how frame interval can be
programmed; a number of values
separated by newline can be specified
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 44d4b2be92fd..8bfee557e50e 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -323,3 +323,27 @@ Description:
This is similar to /sys/bus/pci/drivers_autoprobe, but
affects only the VFs associated with a specific PF.
+
+What: /sys/bus/pci/devices/.../p2pmem/size
+Date: November 2017
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description:
+ If the device has any Peer-to-Peer memory registered, this
+ file contains the total amount of memory that the device
+ provides (in decimal).
+
+What: /sys/bus/pci/devices/.../p2pmem/available
+Date: November 2017
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description:
+ If the device has any Peer-to-Peer memory registered, this
+ file contains the amount of memory that has not been
+ allocated (in decimal).
+
+What: /sys/bus/pci/devices/.../p2pmem/published
+Date: November 2017
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description:
+ If the device has any Peer-to-Peer memory registered, this
+ file contains a '1' if the memory has been published for
+ use outside the driver that owns the device.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 08d456e07b53..559baa5c418c 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -189,6 +189,16 @@ Description:
The file will read "hotplug", "wired" and "not used" if the
information is available, and "unknown" otherwise.
+What: /sys/bus/usb/devices/.../(hub interface)/portX/location
+Date: October 2018
+Contact: Bjørn Mork <bjorn@mork.no>
+Description:
+ Some platforms provide usb port physical location through
+ firmware. This is used by the kernel to pair up logical ports
+ mapping to the same physical connector. The attribute exposes the
+ raw location value as a hex integer.
+
+
What: /sys/bus/usb/devices/.../(hub interface)/portX/quirks
Date: May 2018
Contact: Nicolas Boichat <drinkcat@chromium.org>
@@ -219,7 +229,14 @@ Description:
ports and report them to the kernel. This attribute is to expose
the number of over-current situation occurred on a specific port
to user space. This file will contain an unsigned 32 bit value
- which wraps to 0 after its maximum is reached.
+ which wraps to 0 after its maximum is reached. This file supports
+ poll() for monitoring changes to this value in user space.
+
+ Any time this value changes the corresponding hub device will send a
+ udev event with the following attributes:
+
+ OVER_CURRENT_PORT=/sys/bus/usb/devices/.../(hub interface)/portX
+ OVER_CURRENT_COUNT=[current value of this sysfs attribute]
What: /sys/bus/usb/devices/.../(hub interface)/portX/usb3_lpm_permit
Date: November 2015
diff --git a/Documentation/ABI/testing/sysfs-class-lcd-s6e63m0 b/Documentation/ABI/testing/sysfs-class-lcd-s6e63m0
deleted file mode 100644
index ae0a2d3dcc07..000000000000
--- a/Documentation/ABI/testing/sysfs-class-lcd-s6e63m0
+++ /dev/null
@@ -1,27 +0,0 @@
-sysfs interface for the S6E63M0 AMOLED LCD panel driver
--------------------------------------------------------
-
-What: /sys/class/lcd/<lcd>/gamma_mode
-Date: May, 2010
-KernelVersion: v2.6.35
-Contact: dri-devel@lists.freedesktop.org
-Description:
- (RW) Read or write the gamma mode. Following three modes are
- supported:
- 0 - gamma value 2.2,
- 1 - gamma value 1.9 and
- 2 - gamma value 1.7.
-
-
-What: /sys/class/lcd/<lcd>/gamma_table
-Date: May, 2010
-KernelVersion: v2.6.35
-Contact: dri-devel@lists.freedesktop.org
-Description:
- (RO) Displays the size of the gamma table i.e. the number of
- gamma modes available.
-
-This is a backlight lcd driver. These interfaces are an extension to the API
-documented in Documentation/ABI/testing/sysfs-class-lcd and in
-Documentation/ABI/stable/sysfs-class-backlight (under
-/sys/class/backlight/<backlight>/).
diff --git a/Documentation/ABI/testing/sysfs-class-led-driver-sc27xx b/Documentation/ABI/testing/sysfs-class-led-driver-sc27xx
new file mode 100644
index 000000000000..45b1e605d355
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-driver-sc27xx
@@ -0,0 +1,22 @@
+What: /sys/class/leds/<led>/hw_pattern
+Date: September 2018
+KernelVersion: 4.20
+Description:
+ Specify a hardware pattern for the SC27XX LED. For the SC27XX
+ LED controller, it only supports 4 stages to make a single
+ hardware pattern, which is used to configure the rise time,
+ high time, fall time and low time for the breathing mode.
+
+ For the breathing mode, the SC27XX LED only expects one brightness
+ for the high stage. To be compatible with the hardware pattern
+ format, we should set brightness as 0 for rise stage, fall
+ stage and low stage.
+
+ Min stage duration: 125 ms
+ Max stage duration: 31875 ms
+
+ Since the stage duration step is 125 ms, the duration should be
+ a multiplier of 125, like 125ms, 250ms, 375ms, 500ms ... 31875ms.
+
+ Thus the format of the hardware pattern values should be:
+ "0 rise_duration brightness high_duration 0 fall_duration 0 low_duration".
diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
new file mode 100644
index 000000000000..fb3d1e03b881
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern
@@ -0,0 +1,82 @@
+What: /sys/class/leds/<led>/pattern
+Date: September 2018
+KernelVersion: 4.20
+Description:
+ Specify a software pattern for the LED, that supports altering
+ the brightness for the specified duration with one software
+ timer. It can do gradual dimming and step change of brightness.
+
+ The pattern is given by a series of tuples, of brightness and
+ duration (ms). The LED is expected to traverse the series and
+ each brightness value for the specified duration. Duration of
+ 0 means brightness should immediately change to new value, and
+ writing malformed pattern deactivates any active one.
+
+ 1. For gradual dimming, the dimming interval now is set as 50
+ milliseconds. So the tuple with duration less than dimming
+ interval (50ms) is treated as a step change of brightness,
+ i.e. the subsequent brightness will be applied without adding
+ intervening dimming intervals.
+
+ The gradual dimming format of the software pattern values should be:
+ "brightness_1 duration_1 brightness_2 duration_2 brightness_3
+ duration_3 ...". For example:
+
+ echo 0 1000 255 2000 > pattern
+
+ It will make the LED go gradually from zero-intensity to max (255)
+ intensity in 1000 milliseconds, then back to zero intensity in 2000
+ milliseconds:
+
+ LED brightness
+ ^
+ 255-| / \ / \ /
+ | / \ / \ /
+ | / \ / \ /
+ | / \ / \ /
+ 0-| / \/ \/
+ +---0----1----2----3----4----5----6------------> time (s)
+
+ 2. To make the LED go instantly from one brigntess value to another,
+ we should use use zero-time lengths (the brightness must be same as
+ the previous tuple's). So the format should be:
+ "brightness_1 duration_1 brightness_1 0 brightness_2 duration_2
+ brightness_2 0 ...". For example:
+
+ echo 0 1000 0 0 255 2000 255 0 > pattern
+
+ It will make the LED stay off for one second, then stay at max brightness
+ for two seconds:
+
+ LED brightness
+ ^
+ 255-| +---------+ +---------+
+ | | | | |
+ | | | | |
+ | | | | |
+ 0-| -----+ +----+ +----
+ +---0----1----2----3----4----5----6------------> time (s)
+
+What: /sys/class/leds/<led>/hw_pattern
+Date: September 2018
+KernelVersion: 4.20
+Description:
+ Specify a hardware pattern for the LED, for LED hardware that
+ supports autonomously controlling brightness over time, according
+ to some preprogrammed hardware patterns. It deactivates any active
+ software pattern.
+
+ Since different LED hardware can have different semantics of
+ hardware patterns, each driver is expected to provide its own
+ description for the hardware patterns in their ABI documentation
+ file.
+
+What: /sys/class/leds/<led>/repeat
+Date: September 2018
+KernelVersion: 4.20
+Description:
+ Specify a pattern repeat number. -1 means repeat indefinitely,
+ other negative numbers and number 0 are invalid.
+
+ This file will always return the originally written repeat
+ number.
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
index 2f1788111cd9..664a8f6a634f 100644
--- a/Documentation/ABI/testing/sysfs-class-net
+++ b/Documentation/ABI/testing/sysfs-class-net
@@ -91,6 +91,24 @@ Description:
stacked (e.g: VLAN interfaces) but still have the same MAC
address as their parent device.
+What: /sys/class/net/<iface>/dev_port
+Date: February 2014
+KernelVersion: 3.15
+Contact: netdev@vger.kernel.org
+Description:
+ Indicates the port number of this network device, formatted
+ as a decimal value. Some NICs have multiple independent ports
+ on the same PCI bus, device and function. This attribute allows
+ userspace to distinguish the respective interfaces.
+
+ Note: some device drivers started to use 'dev_id' for this
+ purpose since long before 3.15 and have not adopted the new
+ attribute ever since. To query the port number, some tools look
+ exclusively at 'dev_port', while others only consult 'dev_id'.
+ If a network device has multiple client adapter ports as
+ described in the previous paragraph and does not set this
+ attribute to its port number, it's a kernel bug.
+
What: /sys/class/net/<iface>/dormant
Date: March 2006
KernelVersion: 2.6.17
@@ -117,7 +135,7 @@ Description:
full: full duplex
Note: This attribute is only valid for interfaces that implement
- the ethtool get_settings method (mostly Ethernet).
+ the ethtool get_link_ksettings method (mostly Ethernet).
What: /sys/class/net/<iface>/flags
Date: April 2005
@@ -224,7 +242,7 @@ Description:
an integer representing the link speed in Mbits/sec.
Note: this attribute is only valid for interfaces that implement
- the ethtool get_settings method (mostly Ethernet ).
+ the ethtool get_link_ksettings method (mostly Ethernet).
What: /sys/class/net/<iface>/tx_queue_len
Date: April 2005
diff --git a/Documentation/ABI/testing/sysfs-class-net-dsa b/Documentation/ABI/testing/sysfs-class-net-dsa
new file mode 100644
index 000000000000..f240221e071e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-net-dsa
@@ -0,0 +1,7 @@
+What: /sys/class/net/<iface>/tagging
+Date: August 2018
+KernelVersion: 4.20
+Contact: netdev@vger.kernel.org
+Description:
+ String indicating the type of tagging protocol used by the
+ DSA slave network device.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 94a24aedcdb2..3ac41774ad3c 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -121,7 +121,22 @@ What: /sys/fs/f2fs/<disk>/idle_interval
Date: January 2016
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
- Controls the idle timing.
+ Controls the idle timing for all paths other than
+ discard and gc path.
+
+What: /sys/fs/f2fs/<disk>/discard_idle_interval
+Date: September 2018
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Contact: "Sahitya Tummala" <stummala@codeaurora.org>
+Description:
+ Controls the idle timing for discard path.
+
+What: /sys/fs/f2fs/<disk>/gc_idle_interval
+Date: September 2018
+Contact: "Chao Yu" <yuchao0@huawei.com>
+Contact: "Sahitya Tummala" <stummala@codeaurora.org>
+Description:
+ Controls the idle timing for gc path.
What: /sys/fs/f2fs/<disk>/iostat_enable
Date: August 2017
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index 2f813d644c69..18b7dc929234 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -99,7 +99,7 @@ Description:
this file, the suspend image will be as small as possible.
Reading from this file will display the current image size
- limit, which is set to 500 MB by default.
+ limit, which is set to around 2/5 of available RAM by default.
What: /sys/power/pm_trace
Date: August 2006
diff --git a/Documentation/PCI/00-INDEX b/Documentation/PCI/00-INDEX
deleted file mode 100644
index 206b1d5c1e71..000000000000
--- a/Documentation/PCI/00-INDEX
+++ /dev/null
@@ -1,26 +0,0 @@
-00-INDEX
- - this file
-acpi-info.txt
- - info on how PCI host bridges are represented in ACPI
-MSI-HOWTO.txt
- - the Message Signaled Interrupts (MSI) Driver Guide HOWTO and FAQ.
-PCIEBUS-HOWTO.txt
- - a guide describing the PCI Express Port Bus driver
-pci-error-recovery.txt
- - info on PCI error recovery
-pci-iov-howto.txt
- - the PCI Express I/O Virtualization HOWTO
-pci.txt
- - info on the PCI subsystem for device driver authors
-pcieaer-howto.txt
- - the PCI Express Advanced Error Reporting Driver Guide HOWTO
-endpoint/pci-endpoint.txt
- - guide to add endpoint controller driver and endpoint function driver.
-endpoint/pci-endpoint-cfs.txt
- - guide to use configfs to configure the PCI endpoint function.
-endpoint/pci-test-function.txt
- - specification of *PCI test* function device.
-endpoint/pci-test-howto.txt
- - userguide for PCI endpoint test function.
-endpoint/function/binding/
- - binding documentation for PCI endpoint function
diff --git a/Documentation/PCI/endpoint/pci-test-howto.txt b/Documentation/PCI/endpoint/pci-test-howto.txt
index e40cf0fb58d7..040479f437a5 100644
--- a/Documentation/PCI/endpoint/pci-test-howto.txt
+++ b/Documentation/PCI/endpoint/pci-test-howto.txt
@@ -99,17 +99,20 @@ Note that the devices listed here correspond to the value populated in 1.4 above
2.2 Using Endpoint Test function Device
pcitest.sh added in tools/pci/ can be used to run all the default PCI endpoint
-tests. Before pcitest.sh can be used pcitest.c should be compiled using the
-following commands.
+tests. To compile this tool the following commands should be used:
- cd <kernel-dir>
- make headers_install ARCH=arm
- arm-linux-gnueabihf-gcc -Iusr/include tools/pci/pcitest.c -o pcitest
- cp pcitest <rootfs>/usr/sbin/
- cp tools/pci/pcitest.sh <rootfs>
+ # cd <kernel-dir>
+ # make -C tools/pci
+
+or if you desire to compile and install in your system:
+
+ # cd <kernel-dir>
+ # make -C tools/pci install
+
+The tool and script will be located in <rootfs>/usr/bin/
2.2.1 pcitest.sh Output
- # ./pcitest.sh
+ # pcitest.sh
BAR tests
BAR0: OKAY
diff --git a/Documentation/PCI/pci-error-recovery.txt b/Documentation/PCI/pci-error-recovery.txt
index 688b69121e82..0b6bb3ef449e 100644
--- a/Documentation/PCI/pci-error-recovery.txt
+++ b/Documentation/PCI/pci-error-recovery.txt
@@ -110,7 +110,7 @@ The actual steps taken by a platform to recover from a PCI error
event will be platform-dependent, but will follow the general
sequence described below.
-STEP 0: Error Event: ERR_NONFATAL
+STEP 0: Error Event
-------------------
A PCI bus error is detected by the PCI hardware. On powerpc, the slot
is isolated, in that all I/O is blocked: all reads return 0xffffffff,
@@ -228,7 +228,13 @@ proceeds to either STEP3 (Link Reset) or to STEP 5 (Resume Operations).
If any driver returned PCI_ERS_RESULT_NEED_RESET, then the platform
proceeds to STEP 4 (Slot Reset)
-STEP 3: Slot Reset
+STEP 3: Link Reset
+------------------
+The platform resets the link. This is a PCI-Express specific step
+and is done whenever a fatal error has been detected that can be
+"solved" by resetting the link.
+
+STEP 4: Slot Reset
------------------
In response to a return value of PCI_ERS_RESULT_NEED_RESET, the
@@ -314,7 +320,7 @@ Failure).
>>> However, it probably should.
-STEP 4: Resume Operations
+STEP 5: Resume Operations
-------------------------
The platform will call the resume() callback on all affected device
drivers if all drivers on the segment have returned
@@ -326,7 +332,7 @@ a result code.
At this point, if a new error happens, the platform will restart
a new error recovery sequence.
-STEP 5: Permanent Failure
+STEP 6: Permanent Failure
-------------------------
A "permanent failure" has occurred, and the platform cannot recover
the device. The platform will call error_detected() with a
@@ -349,27 +355,6 @@ errors. See the discussion in powerpc/eeh-pci-error-recovery.txt
for additional detail on real-life experience of the causes of
software errors.
-STEP 0: Error Event: ERR_FATAL
--------------------
-PCI bus error is detected by the PCI hardware. On powerpc, the slot is
-isolated, in that all I/O is blocked: all reads return 0xffffffff, all
-writes are ignored.
-
-STEP 1: Remove devices
---------------------
-Platform removes the devices depending on the error agent, it could be
-this port for all subordinates or upstream component (likely downstream
-port)
-
-STEP 2: Reset link
---------------------
-The platform resets the link. This is a PCI-Express specific step and is
-done whenever a fatal error has been detected that can be "solved" by
-resetting the link.
-
-STEP 3: Re-enumerate the devices
---------------------
-Initiates the re-enumeration.
Conclusion; General Remarks
---------------------------
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX
deleted file mode 100644
index f46980c060aa..000000000000
--- a/Documentation/RCU/00-INDEX
+++ /dev/null
@@ -1,34 +0,0 @@
-00-INDEX
- - This file
-arrayRCU.txt
- - Using RCU to Protect Read-Mostly Arrays
-checklist.txt
- - Review Checklist for RCU Patches
-listRCU.txt
- - Using RCU to Protect Read-Mostly Linked Lists
-lockdep.txt
- - RCU and lockdep checking
-lockdep-splat.txt
- - RCU Lockdep splats explained.
-NMI-RCU.txt
- - Using RCU to Protect Dynamic NMI Handlers
-rcu_dereference.txt
- - Proper care and feeding of return values from rcu_dereference()
-rcubarrier.txt
- - RCU and Unloadable Modules
-rculist_nulls.txt
- - RCU list primitives for use with SLAB_TYPESAFE_BY_RCU
-rcuref.txt
- - Reference-count design for elements of lists/arrays protected by RCU
-rcu.txt
- - RCU Concepts
-RTFP.txt
- - List of RCU papers (bibliography) going back to 1980.
-stallwarn.txt
- - RCU CPU stall warnings (module parameter rcu_cpu_stall_suppress)
-torture.txt
- - RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST)
-UP.txt
- - RCU on Uniprocessor Systems
-whatisRCU.txt
- - What is RCU?
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index f5120a00f511..1d2051c0c3fc 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -1227,9 +1227,11 @@ to overflow the counter, this approach corrects the
CPU enters the idle loop from process context.
</p><p>The <tt>-&gt;dynticks</tt> field counts the corresponding
-CPU's transitions to and from dyntick-idle mode, so that this counter
-has an even value when the CPU is in dyntick-idle mode and an odd
-value otherwise.
+CPU's transitions to and from either dyntick-idle or user mode, so
+that this counter has an even value when the CPU is in dyntick-idle
+mode or user mode and an odd value otherwise. The transitions to/from
+user mode need to be counted for user mode adaptive-ticks support
+(see timers/NO_HZ.txt).
</p><p>The <tt>-&gt;rcu_need_heavy_qs</tt> field is used
to record the fact that the RCU core code would really like to
@@ -1372,8 +1374,7 @@ that is, if the CPU is currently idle.
Accessor Functions</a></h3>
<p>The following listing shows the
-<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt>,
-<tt>rcu_for_each_nonleaf_node_breadth_first()</tt>, and
+<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt> and
<tt>rcu_for_each_leaf_node()</tt> function and macros:
<pre>
@@ -1386,13 +1387,9 @@ Accessor Functions</a></h3>
7 for ((rnp) = &amp;(rsp)-&gt;node[0]; \
8 (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
9
- 10 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
- 11 for ((rnp) = &amp;(rsp)-&gt;node[0]; \
- 12 (rnp) &lt; (rsp)-&gt;level[NUM_RCU_LVLS - 1]; (rnp)++)
- 13
- 14 #define rcu_for_each_leaf_node(rsp, rnp) \
- 15 for ((rnp) = (rsp)-&gt;level[NUM_RCU_LVLS - 1]; \
- 16 (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
+ 10 #define rcu_for_each_leaf_node(rsp, rnp) \
+ 11 for ((rnp) = (rsp)-&gt;level[NUM_RCU_LVLS - 1]; \
+ 12 (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
</pre>
<p>The <tt>rcu_get_root()</tt> simply returns a pointer to the
@@ -1405,10 +1402,7 @@ macro takes advantage of the layout of the <tt>rcu_node</tt>
structures in the <tt>rcu_state</tt> structure's
<tt>-&gt;node[]</tt> array, performing a breadth-first traversal by
simply traversing the array in order.
-The <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> macro operates
-similarly, but traverses only the first part of the array, thus excluding
-the leaf <tt>rcu_node</tt> structures.
-Finally, the <tt>rcu_for_each_leaf_node()</tt> macro traverses only
+Similarly, the <tt>rcu_for_each_leaf_node()</tt> macro traverses only
the last part of the array, thus traversing only the leaf
<tt>rcu_node</tt> structures.
@@ -1416,15 +1410,14 @@ the last part of the array, thus traversing only the leaf
<tr><th>&nbsp;</th></tr>
<tr><th align="left">Quick Quiz:</th></tr>
<tr><td>
- What do <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> and
+ What does
<tt>rcu_for_each_leaf_node()</tt> do if the <tt>rcu_node</tt> tree
contains only a single node?
</td></tr>
<tr><th align="left">Answer:</th></tr>
<tr><td bgcolor="#ffffff"><font color="ffffff">
In the single-node case,
- <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> is a no-op
- and <tt>rcu_for_each_leaf_node()</tt> traverses the single node.
+ <tt>rcu_for_each_leaf_node()</tt> traverses the single node.
</font></td></tr>
<tr><td>&nbsp;</td></tr>
</table>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
index 7394f034be65..e62c7c34a369 100644
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
@@ -12,10 +12,9 @@ high efficiency and minimal disturbance, expedited grace periods accept
lower efficiency and significant disturbance to attain shorter latencies.
<p>
-There are three flavors of RCU (RCU-bh, RCU-preempt, and RCU-sched),
-but only two flavors of expedited grace periods because the RCU-bh
-expedited grace period maps onto the RCU-sched expedited grace period.
-Each of the remaining two implementations is covered in its own section.
+There are two flavors of RCU (RCU-preempt and RCU-sched), with an earlier
+third RCU-bh flavor having been implemented in terms of the other two.
+Each of the two implementations is covered in its own section.
<ol>
<li> <a href="#Expedited Grace Period Design">
@@ -158,7 +157,7 @@ whether or not the current CPU is in an RCU read-side critical section.
The best that <tt>sync_sched_exp_handler()</tt> can do is to check
for idle, on the off-chance that the CPU went idle while the IPI
was in flight.
-If the CPU is idle, then tt>sync_sched_exp_handler()</tt> reports
+If the CPU is idle, then <tt>sync_sched_exp_handler()</tt> reports
the quiescent state.
<p>
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
index 49690228b1c6..43c4e2f05f40 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ b/Documentation/RCU/Design/Requirements/Requirements.html
@@ -1306,8 +1306,6 @@ doing so would degrade real-time response.
<p>
This non-requirement appeared with preemptible RCU.
-If you need a grace period that waits on non-preemptible code regions, use
-<a href="#Sched Flavor">RCU-sched</a>.
<h2><a name="Parallelism Facts of Life">Parallelism Facts of Life</a></h2>
@@ -2165,14 +2163,9 @@ however, this is not a panacea because there would be severe restrictions
on what operations those callbacks could invoke.
<p>
-Perhaps surprisingly, <tt>synchronize_rcu()</tt>,
-<a href="#Bottom-Half Flavor"><tt>synchronize_rcu_bh()</tt></a>
-(<a href="#Bottom-Half Flavor">discussed below</a>),
-<a href="#Sched Flavor"><tt>synchronize_sched()</tt></a>,
+Perhaps surprisingly, <tt>synchronize_rcu()</tt> and
<tt>synchronize_rcu_expedited()</tt>,
-<tt>synchronize_rcu_bh_expedited()</tt>, and
-<tt>synchronize_sched_expedited()</tt>
-will all operate normally
+will operate normally
during very early boot, the reason being that there is only one CPU
and preemption is disabled.
This means that the call <tt>synchronize_rcu()</tt> (or friends)
@@ -2269,12 +2262,23 @@ Thankfully, RCU update-side primitives, including
The name notwithstanding, some Linux-kernel architectures
can have nested NMIs, which RCU must handle correctly.
Andy Lutomirski
-<a href="https://lkml.kernel.org/g/CALCETrXLq1y7e_dKFPgou-FKHB6Pu-r8+t-6Ds+8=va7anBWDA@mail.gmail.com">surprised me</a>
+<a href="https://lkml.kernel.org/r/CALCETrXLq1y7e_dKFPgou-FKHB6Pu-r8+t-6Ds+8=va7anBWDA@mail.gmail.com">surprised me</a>
with this requirement;
he also kindly surprised me with
-<a href="https://lkml.kernel.org/g/CALCETrXSY9JpW3uE6H8WYk81sg56qasA2aqmjMPsq5dOtzso=g@mail.gmail.com">an algorithm</a>
+<a href="https://lkml.kernel.org/r/CALCETrXSY9JpW3uE6H8WYk81sg56qasA2aqmjMPsq5dOtzso=g@mail.gmail.com">an algorithm</a>
that meets this requirement.
+<p>
+Furthermore, NMI handlers can be interrupted by what appear to RCU
+to be normal interrupts.
+One way that this can happen is for code that directly invokes
+<tt>rcu_irq_enter()</tt> and </tt>rcu_irq_exit()</tt> to be called
+from an NMI handler.
+This astonishing fact of life prompted the current code structure,
+which has <tt>rcu_irq_enter()</tt> invoking <tt>rcu_nmi_enter()</tt>
+and <tt>rcu_irq_exit()</tt> invoking <tt>rcu_nmi_exit()</tt>.
+And yes, I also learned of this requirement the hard way.
+
<h3><a name="Loadable Modules">Loadable Modules</a></h3>
<p>
@@ -2394,30 +2398,9 @@ when invoked from a CPU-hotplug notifier.
<p>
RCU depends on the scheduler, and the scheduler uses RCU to
protect some of its data structures.
-This means the scheduler is forbidden from acquiring
-the runqueue locks and the priority-inheritance locks
-in the middle of an outermost RCU read-side critical section unless either
-(1)&nbsp;it releases them before exiting that same
-RCU read-side critical section, or
-(2)&nbsp;interrupts are disabled across
-that entire RCU read-side critical section.
-This same prohibition also applies (recursively!) to any lock that is acquired
-while holding any lock to which this prohibition applies.
-Adhering to this rule prevents preemptible RCU from invoking
-<tt>rcu_read_unlock_special()</tt> while either runqueue or
-priority-inheritance locks are held, thus avoiding deadlock.
-
-<p>
-Prior to v4.4, it was only necessary to disable preemption across
-RCU read-side critical sections that acquired scheduler locks.
-In v4.4, expedited grace periods started using IPIs, and these
-IPIs could force a <tt>rcu_read_unlock()</tt> to take the slowpath.
-Therefore, this expedited-grace-period change required disabling of
-interrupts, not just preemption.
-
-<p>
-For RCU's part, the preemptible-RCU <tt>rcu_read_unlock()</tt>
-implementation must be written carefully to avoid similar deadlocks.
+The preemptible-RCU <tt>rcu_read_unlock()</tt>
+implementation must therefore be written carefully to avoid deadlocks
+involving the scheduler's runqueue and priority-inheritance locks.
In particular, <tt>rcu_read_unlock()</tt> must tolerate an
interrupt where the interrupt handler invokes both
<tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>.
@@ -2426,7 +2409,7 @@ negative nesting levels to avoid destructive recursion via
interrupt handler's use of RCU.
<p>
-This pair of mutual scheduler-RCU requirements came as a
+This scheduler-RCU requirement came as a
<a href="https://lwn.net/Articles/453002/">complete surprise</a>.
<p>
@@ -2437,9 +2420,28 @@ when running context-switch-heavy workloads when built with
<tt>CONFIG_NO_HZ_FULL=y</tt>
<a href="http://www.rdrop.com/users/paulmck/scalability/paper/BareMetal.2015.01.15b.pdf">did come as a surprise [PDF]</a>.
RCU has made good progress towards meeting this requirement, even
-for context-switch-have <tt>CONFIG_NO_HZ_FULL=y</tt> workloads,
+for context-switch-heavy <tt>CONFIG_NO_HZ_FULL=y</tt> workloads,
but there is room for further improvement.
+<p>
+In the past, it was forbidden to disable interrupts across an
+<tt>rcu_read_unlock()</tt> unless that interrupt-disabled region
+of code also included the matching <tt>rcu_read_lock()</tt>.
+Violating this restriction could result in deadlocks involving the
+scheduler's runqueue and priority-inheritance spinlocks.
+This restriction was lifted when interrupt-disabled calls to
+<tt>rcu_read_unlock()</tt> started deferring the reporting of
+the resulting RCU-preempt quiescent state until the end of that
+interrupts-disabled region.
+This deferred reporting means that the scheduler's runqueue and
+priority-inheritance locks cannot be held while reporting an RCU-preempt
+quiescent state, which lifts the earlier restriction, at least from
+a deadlock perspective.
+Unfortunately, real-time systems using RCU priority boosting may
+need this restriction to remain in effect because deferred
+quiescent-state reporting also defers deboosting, which in turn
+degrades real-time latencies.
+
<h3><a name="Tracing and RCU">Tracing and RCU</a></h3>
<p>
@@ -2850,15 +2852,22 @@ The other four flavors are listed below, with requirements for each
described in a separate section.
<ol>
-<li> <a href="#Bottom-Half Flavor">Bottom-Half Flavor</a>
-<li> <a href="#Sched Flavor">Sched Flavor</a>
+<li> <a href="#Bottom-Half Flavor">Bottom-Half Flavor (Historical)</a>
+<li> <a href="#Sched Flavor">Sched Flavor (Historical)</a>
<li> <a href="#Sleepable RCU">Sleepable RCU</a>
<li> <a href="#Tasks RCU">Tasks RCU</a>
-<li> <a href="#Waiting for Multiple Grace Periods">
- Waiting for Multiple Grace Periods</a>
</ol>
-<h3><a name="Bottom-Half Flavor">Bottom-Half Flavor</a></h3>
+<h3><a name="Bottom-Half Flavor">Bottom-Half Flavor (Historical)</a></h3>
+
+<p>
+The RCU-bh flavor of RCU has since been expressed in terms of
+the other RCU flavors as part of a consolidation of the three
+flavors into a single flavor.
+The read-side API remains, and continues to disable softirq and to
+be accounted for by lockdep.
+Much of the material in this section is therefore strictly historical
+in nature.
<p>
The softirq-disable (AKA &ldquo;bottom-half&rdquo;,
@@ -2918,8 +2927,20 @@ includes
<tt>call_rcu_bh()</tt>,
<tt>rcu_barrier_bh()</tt>, and
<tt>rcu_read_lock_bh_held()</tt>.
+However, the update-side APIs are now simple wrappers for other RCU
+flavors, namely RCU-sched in CONFIG_PREEMPT=n kernels and RCU-preempt
+otherwise.
+
+<h3><a name="Sched Flavor">Sched Flavor (Historical)</a></h3>
-<h3><a name="Sched Flavor">Sched Flavor</a></h3>
+<p>
+The RCU-sched flavor of RCU has since been expressed in terms of
+the other RCU flavors as part of a consolidation of the three
+flavors into a single flavor.
+The read-side API remains, and continues to disable preemption and to
+be accounted for by lockdep.
+Much of the material in this section is therefore strictly historical
+in nature.
<p>
Before preemptible RCU, waiting for an RCU grace period had the
@@ -3139,94 +3160,14 @@ The tasks-RCU API is quite compact, consisting only of
<tt>call_rcu_tasks()</tt>,
<tt>synchronize_rcu_tasks()</tt>, and
<tt>rcu_barrier_tasks()</tt>.
-
-<h3><a name="Waiting for Multiple Grace Periods">
-Waiting for Multiple Grace Periods</a></h3>
-
-<p>
-Perhaps you have an RCU protected data structure that is accessed from
-RCU read-side critical sections, from softirq handlers, and from
-hardware interrupt handlers.
-That is three flavors of RCU, the normal flavor, the bottom-half flavor,
-and the sched flavor.
-How to wait for a compound grace period?
-
-<p>
-The best approach is usually to &ldquo;just say no!&rdquo; and
-insert <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>
-around each RCU read-side critical section, regardless of what
-environment it happens to be in.
-But suppose that some of the RCU read-side critical sections are
-on extremely hot code paths, and that use of <tt>CONFIG_PREEMPT=n</tt>
-is not a viable option, so that <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> are not free.
-What then?
-
-<p>
-You <i>could</i> wait on all three grace periods in succession, as follows:
-
-<blockquote>
-<pre>
- 1 synchronize_rcu();
- 2 synchronize_rcu_bh();
- 3 synchronize_sched();
-</pre>
-</blockquote>
-
-<p>
-This works, but triples the update-side latency penalty.
-In cases where this is not acceptable, <tt>synchronize_rcu_mult()</tt>
-may be used to wait on all three flavors of grace period concurrently:
-
-<blockquote>
-<pre>
- 1 synchronize_rcu_mult(call_rcu, call_rcu_bh, call_rcu_sched);
-</pre>
-</blockquote>
-
-<p>
-But what if it is necessary to also wait on SRCU?
-This can be done as follows:
-
-<blockquote>
-<pre>
- 1 static void call_my_srcu(struct rcu_head *head,
- 2 void (*func)(struct rcu_head *head))
- 3 {
- 4 call_srcu(&amp;my_srcu, head, func);
- 5 }
- 6
- 7 synchronize_rcu_mult(call_rcu, call_rcu_bh, call_rcu_sched, call_my_srcu);
-</pre>
-</blockquote>
-
-<p>
-If you needed to wait on multiple different flavors of SRCU
-(but why???), you would need to create a wrapper function resembling
-<tt>call_my_srcu()</tt> for each SRCU flavor.
-
-<table>
-<tr><th>&nbsp;</th></tr>
-<tr><th align="left">Quick Quiz:</th></tr>
-<tr><td>
- But what if I need to wait for multiple RCU flavors, but I also need
- the grace periods to be expedited?
-</td></tr>
-<tr><th align="left">Answer:</th></tr>
-<tr><td bgcolor="#ffffff"><font color="ffffff">
- If you are using expedited grace periods, there should be less penalty
- for waiting on them in succession.
- But if that is nevertheless a problem, you can use workqueues
- or multiple kthreads to wait on the various expedited grace
- periods concurrently.
-</font></td></tr>
-<tr><td>&nbsp;</td></tr>
-</table>
-
-<p>
-Again, it is usually better to adjust the RCU read-side critical sections
-to use a single flavor of RCU, but when this is not feasible, you can use
-<tt>synchronize_rcu_mult()</tt>.
+In <tt>CONFIG_PREEMPT=n</tt> kernels, trampolines cannot be preempted,
+so these APIs map to
+<tt>call_rcu()</tt>,
+<tt>synchronize_rcu()</tt>, and
+<tt>rcu_barrier()</tt>, respectively.
+In <tt>CONFIG_PREEMPT=y</tt> kernels, trampolines can be preempted,
+and these three APIs are therefore implemented by separate functions
+that check for voluntary context switches.
<h2><a name="Possible Future Changes">Possible Future Changes</a></h2>
@@ -3238,12 +3179,6 @@ grace-period state machine so as to avoid the need for the additional
latency.
<p>
-Expedited grace periods scan the CPUs, so their latency and overhead
-increases with increasing numbers of CPUs.
-If this becomes a serious problem on large systems, it will be necessary
-to do some redesign to avoid this scalability problem.
-
-<p>
RCU disables CPU hotplug in a few places, perhaps most notably in the
<tt>rcu_barrier()</tt> operations.
If there is a strong reason to use <tt>rcu_barrier()</tt> in CPU-hotplug
@@ -3288,11 +3223,6 @@ require extremely good demonstration of need and full exploration of
alternatives.
<p>
-There is an embarrassingly large number of flavors of RCU, and this
-number has been increasing over time.
-Perhaps it will be possible to combine some at some future date.
-
-<p>
RCU's various kthreads are reasonably recent additions.
It is quite likely that adjustments will be required to more gracefully
handle extreme loads.
diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt
index 7d4ae110c2c9..721b3e426515 100644
--- a/Documentation/RCU/rcu.txt
+++ b/Documentation/RCU/rcu.txt
@@ -87,7 +87,3 @@ o Where can I find more information on RCU?
See the RTFP.txt file in this directory.
Or point your browser at http://www.rdrop.com/users/paulmck/RCU/.
-
-o What are all these files in this directory?
-
- See 00-INDEX for the list.
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index f99cf11b314b..491043fd976f 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -16,12 +16,9 @@ o A CPU looping in an RCU read-side critical section.
o A CPU looping with interrupts disabled.
-o A CPU looping with preemption disabled. This condition can
- result in RCU-sched stalls and, if ksoftirqd is in use, RCU-bh
- stalls.
+o A CPU looping with preemption disabled.
-o A CPU looping with bottom halves disabled. This condition can
- result in RCU-sched and RCU-bh stalls.
+o A CPU looping with bottom halves disabled.
o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
without invoking schedule(). If the looping in the kernel is
@@ -87,9 +84,9 @@ o A hardware failure. This is quite unlikely, but has occurred
This resulted in a series of RCU CPU stall warnings, eventually
leading the realization that the CPU had failed.
-The RCU, RCU-sched, RCU-bh, and RCU-tasks implementations have CPU stall
-warning. Note that SRCU does -not- have CPU stall warnings. Please note
-that RCU only detects CPU stalls when there is a grace period in progress.
+The RCU, RCU-sched, and RCU-tasks implementations have CPU stall warning.
+Note that SRCU does -not- have CPU stall warnings. Please note that
+RCU only detects CPU stalls when there is a grace period in progress.
No grace period, no CPU stall warnings.
To diagnose the cause of the stall, inspect the stack traces.
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index c2a7facf7ff9..86d82f7f3500 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -934,7 +934,8 @@ c. Do you need to treat NMI handlers, hardirq handlers,
d. Do you need RCU grace periods to complete even in the face
of softirq monopolization of one or more of the CPUs? For
example, is your code subject to network-based denial-of-service
- attacks? If so, you need RCU-bh.
+ attacks? If so, you should disable softirq across your readers,
+ for example, by using rcu_read_lock_bh().
e. Is your workload too update-intensive for normal use of
RCU, but inappropriate for other synchronization mechanisms?
diff --git a/Documentation/admin-guide/LSM/Yama.rst b/Documentation/admin-guide/LSM/Yama.rst
index 13468ea696b7..d0a060de3973 100644
--- a/Documentation/admin-guide/LSM/Yama.rst
+++ b/Documentation/admin-guide/LSM/Yama.rst
@@ -64,8 +64,8 @@ The sysctl settings (writable only with ``CAP_SYS_PTRACE``) are:
Using ``PTRACE_TRACEME`` is unchanged.
2 - admin-only attach:
- only processes with ``CAP_SYS_PTRACE`` may use ptrace
- with ``PTRACE_ATTACH``, or through children calling ``PTRACE_TRACEME``.
+ only processes with ``CAP_SYS_PTRACE`` may use ptrace, either with
+ ``PTRACE_ATTACH`` or through children calling ``PTRACE_TRACEME``.
3 - no attach:
no processes may use ptrace with ``PTRACE_ATTACH`` nor via
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index 15ea785b2dfa..0797eec76be1 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -51,8 +51,7 @@ Documentation
- There are various README files in the Documentation/ subdirectory:
these typically contain kernel-specific installation notes for some
- drivers for example. See Documentation/00-INDEX for a list of what
- is contained in each file. Please read the
+ drivers for example. Please read the
:ref:`Documentation/process/changes.rst <changes>` file, as it
contains information about the problems, which may result by upgrading
your kernel.
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 184193bcb262..caf36105a1c7 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1857,8 +1857,10 @@ following two functions.
wbc_init_bio(@wbc, @bio)
Should be called for each bio carrying writeback data and
- associates the bio with the inode's owner cgroup. Can be
- called anytime between bio allocation and submission.
+ associates the bio with the inode's owner cgroup and the
+ corresponding request queue. This must be called after
+ a queue (device) has been associated with the bio and
+ before submission.
wbc_account_io(@wbc, @page, @bytes)
Should be called for each data segment being written out.
@@ -1877,7 +1879,7 @@ the configuration, the bio may be executed at a lower priority and if
the writeback session is holding shared resources, e.g. a journal
entry, may lead to priority inversion. There is no one easy solution
for the problem. Filesystems can try to work around specific problem
-cases by skipping wbc_init_bio() or using bio_associate_blkcg()
+cases by skipping wbc_init_bio() or using bio_associate_create_blkg()
directly.
diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst
new file mode 100644
index 000000000000..e506d3dae510
--- /dev/null
+++ b/Documentation/admin-guide/ext4.rst
@@ -0,0 +1,574 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========================
+ext4 General Information
+========================
+
+Ext4 is an advanced level of the ext3 filesystem which incorporates
+scalability and reliability enhancements for supporting large filesystems
+(64 bit) in keeping with increasing disk capacities and state-of-the-art
+feature requirements.
+
+Mailing list: linux-ext4@vger.kernel.org
+Web site: http://ext4.wiki.kernel.org
+
+
+Quick usage instructions
+========================
+
+Note: More extensive information for getting started with ext4 can be
+found at the ext4 wiki site at the URL:
+http://ext4.wiki.kernel.org/index.php/Ext4_Howto
+
+ - The latest version of e2fsprogs can be found at:
+
+ https://www.kernel.org/pub/linux/kernel/people/tytso/e2fsprogs/
+
+ or
+
+ http://sourceforge.net/project/showfiles.php?group_id=2406
+
+ or grab the latest git repository from:
+
+ https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git
+
+ - Create a new filesystem using the ext4 filesystem type:
+
+ # mke2fs -t ext4 /dev/hda1
+
+ Or to configure an existing ext3 filesystem to support extents:
+
+ # tune2fs -O extents /dev/hda1
+
+ If the filesystem was created with 128 byte inodes, it can be
+ converted to use 256 byte for greater efficiency via:
+
+ # tune2fs -I 256 /dev/hda1
+
+ - Mounting:
+
+ # mount -t ext4 /dev/hda1 /wherever
+
+ - When comparing performance with other filesystems, it's always
+ important to try multiple workloads; very often a subtle change in a
+ workload parameter can completely change the ranking of which
+ filesystems do well compared to others. When comparing versus ext3,
+ note that ext4 enables write barriers by default, while ext3 does
+ not enable write barriers by default. So it is useful to use
+ explicitly specify whether barriers are enabled or not when via the
+ '-o barriers=[0|1]' mount option for both ext3 and ext4 filesystems
+ for a fair comparison. When tuning ext3 for best benchmark numbers,
+ it is often worthwhile to try changing the data journaling mode; '-o
+ data=writeback' can be faster for some workloads. (Note however that
+ running mounted with data=writeback can potentially leave stale data
+ exposed in recently written files in case of an unclean shutdown,
+ which could be a security exposure in some situations.) Configuring
+ the filesystem with a large journal can also be helpful for
+ metadata-intensive workloads.
+
+Features
+========
+
+Currently Available
+-------------------
+
+* ability to use filesystems > 16TB (e2fsprogs support not available yet)
+* extent format reduces metadata overhead (RAM, IO for access, transactions)
+* extent format more robust in face of on-disk corruption due to magics,
+* internal redundancy in tree
+* improved file allocation (multi-block alloc)
+* lift 32000 subdirectory limit imposed by i_links_count[1]
+* nsec timestamps for mtime, atime, ctime, create time
+* inode version field on disk (NFSv4, Lustre)
+* reduced e2fsck time via uninit_bg feature
+* journal checksumming for robustness, performance
+* persistent file preallocation (e.g for streaming media, databases)
+* ability to pack bitmaps and inode tables into larger virtual groups via the
+ flex_bg feature
+* large file support
+* inode allocation using large virtual block groups via flex_bg
+* delayed allocation
+* large block (up to pagesize) support
+* efficient new ordered mode in JBD2 and ext4 (avoid using buffer head to force
+ the ordering)
+
+[1] Filesystems with a block size of 1k may see a limit imposed by the
+directory hash tree having a maximum depth of two.
+
+Options
+=======
+
+When mounting an ext4 filesystem, the following option are accepted:
+(*) == default
+
+ ro
+ Mount filesystem read only. Note that ext4 will replay the journal (and
+ thus write to the partition) even when mounted "read only". The mount
+ options "ro,noload" can be used to prevent writes to the filesystem.
+
+ journal_checksum
+ Enable checksumming of the journal transactions. This will allow the
+ recovery code in e2fsck and the kernel to detect corruption in the
+ kernel. It is a compatible change and will be ignored by older
+ kernels.
+
+ journal_async_commit
+ Commit block can be written to disk without waiting for descriptor
+ blocks. If enabled older kernels cannot mount the device. This will
+ enable 'journal_checksum' internally.
+
+ journal_path=path, journal_dev=devnum
+ When the external journal device's major/minor numbers have changed,
+ these options allow the user to specify the new journal location. The
+ journal device is identified through either its new major/minor numbers
+ encoded in devnum, or via a path to the device.
+
+ norecovery, noload
+ Don't load the journal on mounting. Note that if the filesystem was
+ not unmounted cleanly, skipping the journal replay will lead to the
+ filesystem containing inconsistencies that can lead to any number of
+ problems.
+
+ data=journal
+ All data are committed into the journal prior to being written into the
+ main file system. Enabling this mode will disable delayed allocation
+ and O_DIRECT support.
+
+ data=ordered (*)
+ All data are forced directly out to the main file system prior to its
+ metadata being committed to the journal.
+
+ data=writeback
+ Data ordering is not preserved, data may be written into the main file
+ system after its metadata has been committed to the journal.
+
+ commit=nrsec (*)
+ Ext4 can be told to sync all its data and metadata every 'nrsec'
+ seconds. The default value is 5 seconds. This means that if you lose
+ your power, you will lose as much as the latest 5 seconds of work (your
+ filesystem will not be damaged though, thanks to the journaling). This
+ default value (or any low value) will hurt performance, but it's good
+ for data-safety. Setting it to 0 will have the same effect as leaving
+ it at the default (5 seconds). Setting it to very large values will
+ improve performance.
+
+ barrier=<0|1(*)>, barrier(*), nobarrier
+ This enables/disables the use of write barriers in the jbd code.
+ barrier=0 disables, barrier=1 enables. This also requires an IO stack
+ which can support barriers, and if jbd gets an error on a barrier
+ write, it will disable again with a warning. Write barriers enforce
+ proper on-disk ordering of journal commits, making volatile disk write
+ caches safe to use, at some performance penalty. If your disks are
+ battery-backed in one way or another, disabling barriers may safely
+ improve performance. The mount options "barrier" and "nobarrier" can
+ also be used to enable or disable barriers, for consistency with other
+ ext4 mount options.
+
+ inode_readahead_blks=n
+ This tuning parameter controls the maximum number of inode table blocks
+ that ext4's inode table readahead algorithm will pre-read into the
+ buffer cache. The default value is 32 blocks.
+
+ nouser_xattr
+ Disables Extended User Attributes. See the attr(5) manual page for
+ more information about extended attributes.
+
+ noacl
+ This option disables POSIX Access Control List support. If ACL support
+ is enabled in the kernel configuration (CONFIG_EXT4_FS_POSIX_ACL), ACL
+ is enabled by default on mount. See the acl(5) manual page for more
+ information about acl.
+
+ bsddf (*)
+ Make 'df' act like BSD.
+
+ minixdf
+ Make 'df' act like Minix.
+
+ debug
+ Extra debugging information is sent to syslog.
+
+ abort
+ Simulate the effects of calling ext4_abort() for debugging purposes.
+ This is normally used while remounting a filesystem which is already
+ mounted.
+
+ errors=remount-ro
+ Remount the filesystem read-only on an error.
+
+ errors=continue
+ Keep going on a filesystem error.
+
+ errors=panic
+ Panic and halt the machine if an error occurs. (These mount options
+ override the errors behavior specified in the superblock, which can be
+ configured using tune2fs)
+
+ data_err=ignore(*)
+ Just print an error message if an error occurs in a file data buffer in
+ ordered mode.
+ data_err=abort
+ Abort the journal if an error occurs in a file data buffer in ordered
+ mode.
+
+ grpid | bsdgroups
+ New objects have the group ID of their parent.
+
+ nogrpid (*) | sysvgroups
+ New objects have the group ID of their creator.
+
+ resgid=n
+ The group ID which may use the reserved blocks.
+
+ resuid=n
+ The user ID which may use the reserved blocks.
+
+ sb=
+ Use alternate superblock at this location.
+
+ quota, noquota, grpquota, usrquota
+ These options are ignored by the filesystem. They are used only by
+ quota tools to recognize volumes where quota should be turned on. See
+ documentation in the quota-tools package for more details
+ (http://sourceforge.net/projects/linuxquota).
+
+ jqfmt=<quota type>, usrjquota=<file>, grpjquota=<file>
+ These options tell filesystem details about quota so that quota
+ information can be properly updated during journal replay. They replace
+ the above quota options. See documentation in the quota-tools package
+ for more details (http://sourceforge.net/projects/linuxquota).
+
+ stripe=n
+ Number of filesystem blocks that mballoc will try to use for allocation
+ size and alignment. For RAID5/6 systems this should be the number of
+ data disks * RAID chunk size in file system blocks.
+
+ delalloc (*)
+ Defer block allocation until just before ext4 writes out the block(s)
+ in question. This allows ext4 to better allocation decisions more
+ efficiently.
+
+ nodelalloc
+ Disable delayed allocation. Blocks are allocated when the data is
+ copied from userspace to the page cache, either via the write(2) system
+ call or when an mmap'ed page which was previously unallocated is
+ written for the first time.
+
+ max_batch_time=usec
+ Maximum amount of time ext4 should wait for additional filesystem
+ operations to be batch together with a synchronous write operation.
+ Since a synchronous write operation is going to force a commit and then
+ a wait for the I/O complete, it doesn't cost much, and can be a huge
+ throughput win, we wait for a small amount of time to see if any other
+ transactions can piggyback on the synchronous write. The algorithm
+ used is designed to automatically tune for the speed of the disk, by
+ measuring the amount of time (on average) that it takes to finish
+ committing a transaction. Call this time the "commit time". If the
+ time that the transaction has been running is less than the commit
+ time, ext4 will try sleeping for the commit time to see if other
+ operations will join the transaction. The commit time is capped by
+ the max_batch_time, which defaults to 15000us (15ms). This
+ optimization can be turned off entirely by setting max_batch_time to 0.
+
+ min_batch_time=usec
+ This parameter sets the commit time (as described above) to be at least
+ min_batch_time. It defaults to zero microseconds. Increasing this
+ parameter may improve the throughput of multi-threaded, synchronous
+ workloads on very fast disks, at the cost of increasing latency.
+
+ journal_ioprio=prio
+ The I/O priority (from 0 to 7, where 0 is the highest priority) which
+ should be used for I/O operations submitted by kjournald2 during a
+ commit operation. This defaults to 3, which is a slightly higher
+ priority than the default I/O priority.
+
+ auto_da_alloc(*), noauto_da_alloc
+ Many broken applications don't use fsync() when replacing existing
+ files via patterns such as fd = open("foo.new")/write(fd,..)/close(fd)/
+ rename("foo.new", "foo"), or worse yet, fd = open("foo",
+ O_TRUNC)/write(fd,..)/close(fd). If auto_da_alloc is enabled, ext4
+ will detect the replace-via-rename and replace-via-truncate patterns
+ and force that any delayed allocation blocks are allocated such that at
+ the next journal commit, in the default data=ordered mode, the data
+ blocks of the new file are forced to disk before the rename() operation
+ is committed. This provides roughly the same level of guarantees as
+ ext3, and avoids the "zero-length" problem that can happen when a
+ system crashes before the delayed allocation blocks are forced to disk.
+
+ noinit_itable
+ Do not initialize any uninitialized inode table blocks in the
+ background. This feature may be used by installation CD's so that the
+ install process can complete as quickly as possible; the inode table
+ initialization process would then be deferred until the next time the
+ file system is unmounted.
+
+ init_itable=n
+ The lazy itable init code will wait n times the number of milliseconds
+ it took to zero out the previous block group's inode table. This
+ minimizes the impact on the system performance while file system's
+ inode table is being initialized.
+
+ discard, nodiscard(*)
+ Controls whether ext4 should issue discard/TRIM commands to the
+ underlying block device when blocks are freed. This is useful for SSD
+ devices and sparse/thinly-provisioned LUNs, but it is off by default
+ until sufficient testing has been done.
+
+ nouid32
+ Disables 32-bit UIDs and GIDs. This is for interoperability with
+ older kernels which only store and expect 16-bit values.
+
+ block_validity(*), noblock_validity
+ These options enable or disable the in-kernel facility for tracking
+ filesystem metadata blocks within internal data structures. This
+ allows multi- block allocator and other routines to notice bugs or
+ corrupted allocation bitmaps which cause blocks to be allocated which
+ overlap with filesystem metadata blocks.
+
+ dioread_lock, dioread_nolock
+ Controls whether or not ext4 should use the DIO read locking. If the
+ dioread_nolock option is specified ext4 will allocate uninitialized
+ extent before buffer write and convert the extent to initialized after
+ IO completes. This approach allows ext4 code to avoid using inode
+ mutex, which improves scalability on high speed storages. However this
+ does not work with data journaling and dioread_nolock option will be
+ ignored with kernel warning. Note that dioread_nolock code path is only
+ used for extent-based files. Because of the restrictions this options
+ comprises it is off by default (e.g. dioread_lock).
+
+ max_dir_size_kb=n
+ This limits the size of directories so that any attempt to expand them
+ beyond the specified limit in kilobytes will cause an ENOSPC error.
+ This is useful in memory constrained environments, where a very large
+ directory can cause severe performance problems or even provoke the Out
+ Of Memory killer. (For example, if there is only 512mb memory
+ available, a 176mb directory may seriously cramp the system's style.)
+
+ i_version
+ Enable 64-bit inode version support. This option is off by default.
+
+ dax
+ Use direct access (no page cache). See
+ Documentation/filesystems/dax.txt. Note that this option is
+ incompatible with data=journal.
+
+Data Mode
+=========
+There are 3 different data modes:
+
+* writeback mode
+
+ In data=writeback mode, ext4 does not journal data at all. This mode provides
+ a similar level of journaling as that of XFS, JFS, and ReiserFS in its default
+ mode - metadata journaling. A crash+recovery can cause incorrect data to
+ appear in files which were written shortly before the crash. This mode will
+ typically provide the best ext4 performance.
+
+* ordered mode
+
+ In data=ordered mode, ext4 only officially journals metadata, but it logically
+ groups metadata information related to data changes with the data blocks into
+ a single unit called a transaction. When it's time to write the new metadata
+ out to disk, the associated data blocks are written first. In general, this
+ mode performs slightly slower than writeback but significantly faster than
+ journal mode.
+
+* journal mode
+
+ data=journal mode provides full data and metadata journaling. All new data is
+ written to the journal first, and then to its final location. In the event of
+ a crash, the journal can be replayed, bringing both data and metadata into a
+ consistent state. This mode is the slowest except when data needs to be read
+ from and written to disk at the same time where it outperforms all others
+ modes. Enabling this mode will disable delayed allocation and O_DIRECT
+ support.
+
+/proc entries
+=============
+
+Information about mounted ext4 file systems can be found in
+/proc/fs/ext4. Each mounted filesystem will have a directory in
+/proc/fs/ext4 based on its device name (i.e., /proc/fs/ext4/hdc or
+/proc/fs/ext4/dm-0). The files in each per-device directory are shown
+in table below.
+
+Files in /proc/fs/ext4/<devname>
+
+ mb_groups
+ details of multiblock allocator buddy cache of free blocks
+
+/sys entries
+============
+
+Information about mounted ext4 file systems can be found in
+/sys/fs/ext4. Each mounted filesystem will have a directory in
+/sys/fs/ext4 based on its device name (i.e., /sys/fs/ext4/hdc or
+/sys/fs/ext4/dm-0). The files in each per-device directory are shown
+in table below.
+
+Files in /sys/fs/ext4/<devname>:
+
+(see also Documentation/ABI/testing/sysfs-fs-ext4)
+
+ delayed_allocation_blocks
+ This file is read-only and shows the number of blocks that are dirty in
+ the page cache, but which do not have their location in the filesystem
+ allocated yet.
+
+ inode_goal
+ Tuning parameter which (if non-zero) controls the goal inode used by
+ the inode allocator in preference to all other allocation heuristics.
+ This is intended for debugging use only, and should be 0 on production
+ systems.
+
+ inode_readahead_blks
+ Tuning parameter which controls the maximum number of inode table
+ blocks that ext4's inode table readahead algorithm will pre-read into
+ the buffer cache.
+
+ lifetime_write_kbytes
+ This file is read-only and shows the number of kilobytes of data that
+ have been written to this filesystem since it was created.
+
+ max_writeback_mb_bump
+ The maximum number of megabytes the writeback code will try to write
+ out before move on to another inode.
+
+ mb_group_prealloc
+ The multiblock allocator will round up allocation requests to a
+ multiple of this tuning parameter if the stripe size is not set in the
+ ext4 superblock
+
+ mb_max_to_scan
+ The maximum number of extents the multiblock allocator will search to
+ find the best extent.
+
+ mb_min_to_scan
+ The minimum number of extents the multiblock allocator will search to
+ find the best extent.
+
+ mb_order2_req
+ Tuning parameter which controls the minimum size for requests (as a
+ power of 2) where the buddy cache is used.
+
+ mb_stats
+ Controls whether the multiblock allocator should collect statistics,
+ which are shown during the unmount. 1 means to collect statistics, 0
+ means not to collect statistics.
+
+ mb_stream_req
+ Files which have fewer blocks than this tunable parameter will have
+ their blocks allocated out of a block group specific preallocation
+ pool, so that small files are packed closely together. Each large file
+ will have its blocks allocated out of its own unique preallocation
+ pool.
+
+ session_write_kbytes
+ This file is read-only and shows the number of kilobytes of data that
+ have been written to this filesystem since it was mounted.
+
+ reserved_clusters
+ This is RW file and contains number of reserved clusters in the file
+ system which will be used in the specific situations to avoid costly
+ zeroout, unexpected ENOSPC, or possible data loss. The default is 2% or
+ 4096 clusters, whichever is smaller and this can be changed however it
+ can never exceed number of clusters in the file system. If there is not
+ enough space for the reserved space when mounting the file mount will
+ _not_ fail.
+
+Ioctls
+======
+
+There is some Ext4 specific functionality which can be accessed by applications
+through the system call interfaces. The list of all Ext4 specific ioctls are
+shown in the table below.
+
+Table of Ext4 specific ioctls
+
+ EXT4_IOC_GETFLAGS
+ Get additional attributes associated with inode. The ioctl argument is
+ an integer bitfield, with bit values described in ext4.h. This ioctl is
+ an alias for FS_IOC_GETFLAGS.
+
+ EXT4_IOC_SETFLAGS
+ Set additional attributes associated with inode. The ioctl argument is
+ an integer bitfield, with bit values described in ext4.h. This ioctl is
+ an alias for FS_IOC_SETFLAGS.
+
+ EXT4_IOC_GETVERSION, EXT4_IOC_GETVERSION_OLD
+ Get the inode i_generation number stored for each inode. The
+ i_generation number is normally changed only when new inode is created
+ and it is particularly useful for network filesystems. The '_OLD'
+ version of this ioctl is an alias for FS_IOC_GETVERSION.
+
+ EXT4_IOC_SETVERSION, EXT4_IOC_SETVERSION_OLD
+ Set the inode i_generation number stored for each inode. The '_OLD'
+ version of this ioctl is an alias for FS_IOC_SETVERSION.
+
+ EXT4_IOC_GROUP_EXTEND
+ This ioctl has the same purpose as the resize mount option. It allows
+ to resize filesystem to the end of the last existing block group,
+ further resize has to be done with resize2fs, either online, or
+ offline. The argument points to the unsigned logn number representing
+ the filesystem new block count.
+
+ EXT4_IOC_MOVE_EXT
+ Move the block extents from orig_fd (the one this ioctl is pointing to)
+ to the donor_fd (the one specified in move_extent structure passed as
+ an argument to this ioctl). Then, exchange inode metadata between
+ orig_fd and donor_fd. This is especially useful for online
+ defragmentation, because the allocator has the opportunity to allocate
+ moved blocks better, ideally into one contiguous extent.
+
+ EXT4_IOC_GROUP_ADD
+ Add a new group descriptor to an existing or new group descriptor
+ block. The new group descriptor is described by ext4_new_group_input
+ structure, which is passed as an argument to this ioctl. This is
+ especially useful in conjunction with EXT4_IOC_GROUP_EXTEND, which
+ allows online resize of the filesystem to the end of the last existing
+ block group. Those two ioctls combined is used in userspace online
+ resize tool (e.g. resize2fs).
+
+ EXT4_IOC_MIGRATE
+ This ioctl operates on the filesystem itself. It converts (migrates)
+ ext3 indirect block mapped inode to ext4 extent mapped inode by walking
+ through indirect block mapping of the original inode and converting
+ contiguous block ranges into ext4 extents of the temporary inode. Then,
+ inodes are swapped. This ioctl might help, when migrating from ext3 to
+ ext4 filesystem, however suggestion is to create fresh ext4 filesystem
+ and copy data from the backup. Note, that filesystem has to support
+ extents for this ioctl to work.
+
+ EXT4_IOC_ALLOC_DA_BLKS
+ Force all of the delay allocated blocks to be allocated to preserve
+ application-expected ext3 behaviour. Note that this will also start
+ triggering a write of the data blocks, but this behaviour may change in
+ the future as it is not necessary and has been done this way only for
+ sake of simplicity.
+
+ EXT4_IOC_RESIZE_FS
+ Resize the filesystem to a new size. The number of blocks of resized
+ filesystem is passed in via 64 bit integer argument. The kernel
+ allocates bitmaps and inode table, the userspace tool thus just passes
+ the new number of blocks.
+
+ EXT4_IOC_SWAP_BOOT
+ Swap i_blocks and associated attributes (like i_blocks, i_size,
+ i_flags, ...) from the specified inode with inode EXT4_BOOT_LOADER_INO
+ (#5). This is typically used to store a boot loader in a secure part of
+ the filesystem, where it can't be changed by a normal user by accident.
+ The data blocks of the previous boot loader will be associated with the
+ given inode.
+
+References
+==========
+
+kernel source: <file:fs/ext4/>
+ <file:fs/jbd2/>
+
+programs: http://e2fsprogs.sourceforge.net/
+
+useful links: http://fedoraproject.org/wiki/ext3-devel
+ http://www.bullopensource.org/ext4/
+ http://ext4.wiki.kernel.org/index.php/Main_Page
+ http://fedoraproject.org/wiki/Features/Ext4
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 0873685bab0f..965745d5fb9a 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -71,6 +71,7 @@ configure specific aspects of kernel behavior to your liking.
java
ras
bcache
+ ext4
pm/index
thunderbolt
LSM/index
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 92eb1f42240d..8022d902e770 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -856,6 +856,11 @@
causing system reset or hang due to sending
INIT from AP to BSP.
+ disable_counter_freezing [HW]
+ Disable Intel PMU counter freezing feature.
+ The feature only exists starting from
+ Arch Perfmon v4 (Skylake and newer).
+
disable_ddw [PPC/PSERIES]
Disable Dynamic DMA Window support. Use this if
to workaround buggy firmware.
@@ -1385,6 +1390,11 @@
hvc_iucv_allow= [S390] Comma-separated list of z/VM user IDs.
If specified, z/VM IUCV HVC accepts connections
from listed z/VM user IDs only.
+
+ hv_nopvspin [X86,HYPER_V] Disables the paravirt spinlock optimizations
+ which allow the hypervisor to 'idle' the
+ guest on lock contention.
+
keep_bootcon [KNL]
Do not unregister boot console at start. This is only
useful for debugging when something happens in the window
@@ -1754,7 +1764,7 @@
Format: { "0" | "1" }
0 - Use IOMMU translation for DMA.
1 - Bypass the IOMMU for DMA.
- unset - Use IOMMU translation for DMA.
+ unset - Use value of CONFIG_IOMMU_DEFAULT_PASSTHROUGH.
io7= [HW] IO7 for Marvel based alpha systems
See comment before marvel_specify_io7 in
@@ -2274,6 +2284,8 @@
ltpc= [NET]
Format: <io>,<irq>,<dma>
+ lsm.debug [SECURITY] Enable LSM initialization debugging output.
+
machvec= [IA-64] Force the use of a particular machine-vector
(machvec) in a generic kernel.
Example: machvec=hpzx1_swiotlb
@@ -3540,14 +3552,14 @@
In kernels built with CONFIG_RCU_NOCB_CPU=y, set
the specified list of CPUs to be no-callback CPUs.
- Invocation of these CPUs' RCU callbacks will
- be offloaded to "rcuox/N" kthreads created for
- that purpose, where "x" is "b" for RCU-bh, "p"
- for RCU-preempt, and "s" for RCU-sched, and "N"
- is the CPU number. This reduces OS jitter on the
- offloaded CPUs, which can be useful for HPC and
- real-time workloads. It can also improve energy
- efficiency for asymmetric multiprocessors.
+ Invocation of these CPUs' RCU callbacks will be
+ offloaded to "rcuox/N" kthreads created for that
+ purpose, where "x" is "p" for RCU-preempt, and
+ "s" for RCU-sched, and "N" is the CPU number.
+ This reduces OS jitter on the offloaded CPUs,
+ which can be useful for HPC and real-time
+ workloads. It can also improve energy efficiency
+ for asymmetric multiprocessors.
rcu_nocb_poll [KNL]
Rather than requiring that offloaded CPUs
@@ -3601,7 +3613,14 @@
Set required age in jiffies for a
given grace period before RCU starts
soliciting quiescent-state help from
- rcu_note_context_switch().
+ rcu_note_context_switch(). If not specified, the
+ kernel will calculate a value based on the most
+ recent settings of rcutree.jiffies_till_first_fqs
+ and rcutree.jiffies_till_next_fqs.
+ This calculated value may be viewed in
+ rcutree.jiffies_to_sched_qs. Any attempt to
+ set rcutree.jiffies_to_sched_qs will be
+ cheerfully overwritten.
rcutree.jiffies_till_first_fqs= [KNL]
Set delay from grace-period initialization to
@@ -3869,12 +3888,6 @@
rcupdate.rcu_self_test= [KNL]
Run the RCU early boot self tests
- rcupdate.rcu_self_test_bh= [KNL]
- Run the RCU bh early boot self tests
-
- rcupdate.rcu_self_test_sched= [KNL]
- Run the RCU sched early boot self tests
-
rdinit= [KNL]
Format: <full_path>
Run specified binary instead of /init from the ramdisk,
@@ -4610,7 +4623,8 @@
usbcore.old_scheme_first=
[USB] Start with the old device initialization
- scheme (default 0 = off).
+ scheme, applies only to low and full-speed devices
+ (default 0 = off).
usbcore.usbfs_memory_mb=
[USB] Memory limit (in MB) for buffers allocated by
diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst
index bae52b845de0..b85dd80510b0 100644
--- a/Documentation/admin-guide/l1tf.rst
+++ b/Documentation/admin-guide/l1tf.rst
@@ -553,7 +553,7 @@ When nested virtualization is in use, three operating systems are involved:
the bare metal hypervisor, the nested hypervisor and the nested virtual
machine. VMENTER operations from the nested hypervisor into the nested
guest will always be processed by the bare metal hypervisor. If KVM is the
-bare metal hypervisor it wiil:
+bare metal hypervisor it will:
- Flush the L1D cache on every switch from the nested hypervisor to the
nested virtual machine, so that the nested hypervisor's secrets are not
diff --git a/Documentation/admin-guide/mm/index.rst b/Documentation/admin-guide/mm/index.rst
index ceead68c2df7..8edb35f11317 100644
--- a/Documentation/admin-guide/mm/index.rst
+++ b/Documentation/admin-guide/mm/index.rst
@@ -29,6 +29,7 @@ the Linux memory management.
hugetlbpage
idle_page_tracking
ksm
+ memory-hotplug
numa_memory_policy
pagemap
soft-dirty
diff --git a/Documentation/memory-hotplug.txt b/Documentation/admin-guide/mm/memory-hotplug.rst
index 7f49ebf3ddb2..25157aec5b31 100644
--- a/Documentation/memory-hotplug.txt
+++ b/Documentation/admin-guide/mm/memory-hotplug.rst
@@ -1,3 +1,5 @@
+.. _admin_guide_memory_hotplug:
+
==============
Memory Hotplug
==============
@@ -9,39 +11,19 @@ This document is about memory hotplug including how-to-use and current status.
Because Memory Hotplug is still under development, contents of this text will
be changed often.
-.. CONTENTS
-
- 1. Introduction
- 1.1 purpose of memory hotplug
- 1.2. Phases of memory hotplug
- 1.3. Unit of Memory online/offline operation
- 2. Kernel Configuration
- 3. sysfs files for memory hotplug
- 4. Physical memory hot-add phase
- 4.1 Hardware(Firmware) Support
- 4.2 Notify memory hot-add event by hand
- 5. Logical Memory hot-add phase
- 5.1. State of memory
- 5.2. How to online memory
- 6. Logical memory remove
- 6.1 Memory offline and ZONE_MOVABLE
- 6.2. How to offline memory
- 7. Physical memory remove
- 8. Memory hotplug event notifier
- 9. Future Work List
-
+.. contents:: :local:
.. note::
(1) x86_64's has special implementation for memory hotplug.
This text does not describe it.
- (2) This text assumes that sysfs is mounted at /sys.
+ (2) This text assumes that sysfs is mounted at ``/sys``.
Introduction
============
-purpose of memory hotplug
+Purpose of memory hotplug
-------------------------
Memory Hotplug allows users to increase/decrease the amount of memory.
@@ -57,7 +39,6 @@ hardware which supports memory power management.
Linux memory hotplug is designed for both purpose.
-
Phases of memory hotplug
------------------------
@@ -92,7 +73,6 @@ phase by hand.
(However, if you writes udev's hotplug scripts for memory hotplug, these
phases can be execute in seamless way.)
-
Unit of Memory online/offline operation
---------------------------------------
@@ -107,10 +87,9 @@ unit upon which memory online/offline operations are to be performed. The
default size of a memory block is the same as memory section size unless an
architecture specifies otherwise. (see :ref:`memory_hotplug_sysfs_files`.)
-To determine the size (in bytes) of a memory block please read this file:
-
-/sys/devices/system/memory/block_size_bytes
+To determine the size (in bytes) of a memory block please read this file::
+ /sys/devices/system/memory/block_size_bytes
Kernel Configuration
====================
@@ -119,22 +98,22 @@ To use memory hotplug feature, kernel must be compiled with following
config options.
- For all memory hotplug:
- - Memory model -> Sparse Memory (CONFIG_SPARSEMEM)
- - Allow for memory hot-add (CONFIG_MEMORY_HOTPLUG)
+ - Memory model -> Sparse Memory (``CONFIG_SPARSEMEM``)
+ - Allow for memory hot-add (``CONFIG_MEMORY_HOTPLUG``)
- To enable memory removal, the following are also necessary:
- - Allow for memory hot remove (CONFIG_MEMORY_HOTREMOVE)
- - Page Migration (CONFIG_MIGRATION)
+ - Allow for memory hot remove (``CONFIG_MEMORY_HOTREMOVE``)
+ - Page Migration (``CONFIG_MIGRATION``)
- For ACPI memory hotplug, the following are also necessary:
- - Memory hotplug (under ACPI Support menu) (CONFIG_ACPI_HOTPLUG_MEMORY)
+ - Memory hotplug (under ACPI Support menu) (``CONFIG_ACPI_HOTPLUG_MEMORY``)
- This option can be kernel module.
- As a related configuration, if your box has a feature of NUMA-node hotplug
via ACPI, then this option is necessary too.
- ACPI0004,PNP0A05 and PNP0A06 Container Driver (under ACPI Support menu)
- (CONFIG_ACPI_CONTAINER).
+ (``CONFIG_ACPI_CONTAINER``).
This option can be kernel module too.
@@ -145,10 +124,11 @@ sysfs files for memory hotplug
==============================
All memory blocks have their device information in sysfs. Each memory block
-is described under /sys/devices/system/memory as:
+is described under ``/sys/devices/system/memory`` as::
/sys/devices/system/memory/memoryXXX
- (XXX is the memory block id.)
+
+where XXX is the memory block id.
For the memory block covered by the sysfs directory. It is expected that all
memory sections in this range are present and no memory holes exist in the
@@ -157,7 +137,7 @@ the existence of one should not affect the hotplug capabilities of the memory
block.
For example, assume 1GiB memory block size. A device for a memory starting at
-0x100000000 is /sys/device/system/memory/memory4::
+0x100000000 is ``/sys/device/system/memory/memory4``::
(0x100000000 / 1Gib = 4)
@@ -165,11 +145,11 @@ This device covers address range [0x100000000 ... 0x140000000)
Under each memory block, you can see 5 files:
-- /sys/devices/system/memory/memoryXXX/phys_index
-- /sys/devices/system/memory/memoryXXX/phys_device
-- /sys/devices/system/memory/memoryXXX/state
-- /sys/devices/system/memory/memoryXXX/removable
-- /sys/devices/system/memory/memoryXXX/valid_zones
+- ``/sys/devices/system/memory/memoryXXX/phys_index``
+- ``/sys/devices/system/memory/memoryXXX/phys_device``
+- ``/sys/devices/system/memory/memoryXXX/state``
+- ``/sys/devices/system/memory/memoryXXX/removable``
+- ``/sys/devices/system/memory/memoryXXX/valid_zones``
=================== ============================================================
``phys_index`` read-only and contains memory block id, same as XXX.
@@ -207,13 +187,15 @@ Under each memory block, you can see 5 files:
These directories/files appear after physical memory hotplug phase.
If CONFIG_NUMA is enabled the memoryXXX/ directories can also be accessed
-via symbolic links located in the /sys/devices/system/node/node* directories.
+via symbolic links located in the ``/sys/devices/system/node/node*`` directories.
+
+For example::
-For example:
-/sys/devices/system/node/node0/memory9 -> ../../memory/memory9
+ /sys/devices/system/node/node0/memory9 -> ../../memory/memory9
-A backlink will also be created:
-/sys/devices/system/memory/memory9/node0 -> ../../node/node0
+A backlink will also be created::
+
+ /sys/devices/system/memory/memory9/node0 -> ../../node/node0
.. _memory_hotplug_physical_mem:
@@ -240,7 +222,6 @@ If firmware supports NUMA-node hotplug, and defines an object _HID "ACPI0004",
calls hotplug code for all of objects which are defined in it.
If memory device is found, memory hotplug code will be called.
-
Notify memory hot-add event by hand
-----------------------------------
@@ -251,8 +232,9 @@ CONFIG_ARCH_MEMORY_PROBE and can be configured on powerpc, sh, and x86
if hotplug is supported, although for x86 this should be handled by ACPI
notification.
-Probe interface is located at
-/sys/devices/system/memory/probe
+Probe interface is located at::
+
+ /sys/devices/system/memory/probe
You can tell the physical address of new memory to the kernel by::
@@ -263,7 +245,6 @@ memory_block_size] memory range is hot-added. In this case, hotplug script is
not called (in current implementation). You'll have to online memory by
yourself. Please see :ref:`memory_hotplug_how_to_online_memory`.
-
Logical Memory hot-add phase
============================
@@ -301,7 +282,7 @@ This sets a global policy and impacts all memory blocks that will subsequently
be hotplugged. Currently offline blocks keep their state. It is possible, under
certain circumstances, that some memory blocks will be added but will fail to
online. User space tools can check their "state" files
-(/sys/devices/system/memory/memoryXXX/state) and try to online them manually.
+(``/sys/devices/system/memory/memoryXXX/state``) and try to online them manually.
If the automatic onlining wasn't requested, failed, or some memory block was
offlined it is possible to change the individual block's state by writing to the
@@ -334,8 +315,6 @@ available memory will be increased.
This may be changed in future.
-
-
Logical memory remove
=====================
@@ -413,88 +392,6 @@ Need more implementation yet....
- Notification completion of remove works by OS to firmware.
- Guard from remove if not yet.
-Memory hotplug event notifier
-=============================
-
-Hotplugging events are sent to a notification queue.
-
-There are six types of notification defined in include/linux/memory.h:
-
-MEM_GOING_ONLINE
- Generated before new memory becomes available in order to be able to
- prepare subsystems to handle memory. The page allocator is still unable
- to allocate from the new memory.
-
-MEM_CANCEL_ONLINE
- Generated if MEMORY_GOING_ONLINE fails.
-
-MEM_ONLINE
- Generated when memory has successfully brought online. The callback may
- allocate pages from the new memory.
-
-MEM_GOING_OFFLINE
- Generated to begin the process of offlining memory. Allocations are no
- longer possible from the memory but some of the memory to be offlined
- is still in use. The callback can be used to free memory known to a
- subsystem from the indicated memory block.
-
-MEM_CANCEL_OFFLINE
- Generated if MEMORY_GOING_OFFLINE fails. Memory is available again from
- the memory block that we attempted to offline.
-
-MEM_OFFLINE
- Generated after offlining memory is complete.
-
-A callback routine can be registered by calling::
-
- hotplug_memory_notifier(callback_func, priority)
-
-Callback functions with higher values of priority are called before callback
-functions with lower values.
-
-A callback function must have the following prototype::
-
- int callback_func(
- struct notifier_block *self, unsigned long action, void *arg);
-
-The first argument of the callback function (self) is a pointer to the block
-of the notifier chain that points to the callback function itself.
-The second argument (action) is one of the event types described above.
-The third argument (arg) passes a pointer of struct memory_notify::
-
- struct memory_notify {
- unsigned long start_pfn;
- unsigned long nr_pages;
- int status_change_nid_normal;
- int status_change_nid_high;
- int status_change_nid;
- }
-
-- start_pfn is start_pfn of online/offline memory.
-- nr_pages is # of pages of online/offline memory.
-- status_change_nid_normal is set node id when N_NORMAL_MEMORY of nodemask
- is (will be) set/clear, if this is -1, then nodemask status is not changed.
-- status_change_nid_high is set node id when N_HIGH_MEMORY of nodemask
- is (will be) set/clear, if this is -1, then nodemask status is not changed.
-- status_change_nid is set node id when N_MEMORY of nodemask is (will be)
- set/clear. It means a new(memoryless) node gets new memory by online and a
- node loses all memory. If this is -1, then nodemask status is not changed.
-
- If status_changed_nid* >= 0, callback should create/discard structures for the
- node if necessary.
-
-The callback routine shall return one of the values
-NOTIFY_DONE, NOTIFY_OK, NOTIFY_BAD, NOTIFY_STOP
-defined in include/linux/notifier.h
-
-NOTIFY_DONE and NOTIFY_OK have no effect on the further processing.
-
-NOTIFY_BAD is used as response to the MEM_GOING_ONLINE, MEM_GOING_OFFLINE,
-MEM_ONLINE, or MEM_OFFLINE action to cancel hotplugging. It stops
-further processing of the notification queue.
-
-NOTIFY_STOP stops further processing of the notification queue.
-
Future Work
===========
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index 8f1d3de449b5..ac6f5c597a56 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -465,6 +465,13 @@ Next, the following policy attributes have special meaning if
policy for the time interval between the last two invocations of the
driver's utilization update callback by the CPU scheduler for that CPU.
+One more policy attribute is present if the `HWP feature is enabled in the
+processor <Active Mode With HWP_>`_:
+
+``base_frequency``
+ Shows the base frequency of the CPU. Any frequency above this will be
+ in the turbo frequency range.
+
The meaning of these attributes in the `passive mode <Passive Mode_>`_ is the
same as for other scaling drivers.
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX
deleted file mode 100644
index b6e69fd371c4..000000000000
--- a/Documentation/arm/00-INDEX
+++ /dev/null
@@ -1,50 +0,0 @@
-00-INDEX
- - this file
-Booting
- - requirements for booting
-CCN.txt
- - Cache Coherent Network ring-bus and perf PMU driver.
-Interrupts
- - ARM Interrupt subsystem documentation
-IXP4xx
- - Intel IXP4xx Network processor.
-Netwinder
- - Netwinder specific documentation
-Porting
- - Symbol definitions for porting Linux to a new ARM machine.
-Setup
- - Kernel initialization parameters on ARM Linux
-README
- - General ARM documentation
-SA1100/
- - SA1100 documentation
-Samsung-S3C24XX/
- - S3C24XX ARM Linux Overview
-SPEAr/
- - ST SPEAr platform Linux Overview
-VFP/
- - Release notes for Linux Kernel Vector Floating Point support code
-cluster-pm-race-avoidance.txt
- - Algorithm for CPU and Cluster setup/teardown
-empeg/
- - Ltd's Empeg MP3 Car Audio Player
-firmware.txt
- - Secure firmware registration and calling.
-kernel_mode_neon.txt
- - How to use NEON instructions in kernel mode
-kernel_user_helpers.txt
- - Helper functions in kernel space made available for userspace.
-mem_alignment
- - alignment abort handler documentation
-memory.txt
- - description of the virtual memory layout
-nwfpe/
- - NWFPE floating point emulator documentation
-swp_emulation
- - SWP/SWPB emulation handler/logging description
-tcm.txt
- - ARM Tightly Coupled Memory
-uefi.txt
- - [U]EFI configuration and runtime services documentation
-vlocks.txt
- - Voting locks, low-level mechanism relying on memory system atomic writes.
diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt
index d6aff2c5e9e2..ea819ae024dd 100644
--- a/Documentation/arm64/elf_hwcaps.txt
+++ b/Documentation/arm64/elf_hwcaps.txt
@@ -78,11 +78,11 @@ HWCAP_EVTSTRM
HWCAP_AES
- Functionality implied by ID_AA64ISAR1_EL1.AES == 0b0001.
+ Functionality implied by ID_AA64ISAR0_EL1.AES == 0b0001.
HWCAP_PMULL
- Functionality implied by ID_AA64ISAR1_EL1.AES == 0b0010.
+ Functionality implied by ID_AA64ISAR0_EL1.AES == 0b0010.
HWCAP_SHA1
@@ -153,7 +153,7 @@ HWCAP_ASIMDDP
HWCAP_SHA512
- Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0002.
+ Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0010.
HWCAP_SVE
@@ -173,8 +173,12 @@ HWCAP_USCAT
HWCAP_ILRCPC
- Functionality implied by ID_AA64ISR1_EL1.LRCPC == 0b0002.
+ Functionality implied by ID_AA64ISAR1_EL1.LRCPC == 0b0010.
HWCAP_FLAGM
Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001.
+
+HWCAP_SSBS
+
+ Functionality implied by ID_AA64PFR1_EL1.SSBS == 0b0010.
diff --git a/Documentation/arm64/hugetlbpage.txt b/Documentation/arm64/hugetlbpage.txt
new file mode 100644
index 000000000000..cfae87dc653b
--- /dev/null
+++ b/Documentation/arm64/hugetlbpage.txt
@@ -0,0 +1,38 @@
+HugeTLBpage on ARM64
+====================
+
+Hugepage relies on making efficient use of TLBs to improve performance of
+address translations. The benefit depends on both -
+
+ - the size of hugepages
+ - size of entries supported by the TLBs
+
+The ARM64 port supports two flavours of hugepages.
+
+1) Block mappings at the pud/pmd level
+--------------------------------------
+
+These are regular hugepages where a pmd or a pud page table entry points to a
+block of memory. Regardless of the supported size of entries in TLB, block
+mappings reduce the depth of page table walk needed to translate hugepage
+addresses.
+
+2) Using the Contiguous bit
+---------------------------
+
+The architecture provides a contiguous bit in the translation table entries
+(D4.5.3, ARM DDI 0487C.a) that hints to the MMU to indicate that it is one of a
+contiguous set of entries that can be cached in a single TLB entry.
+
+The contiguous bit is used in Linux to increase the mapping size at the pmd and
+pte (last) level. The number of supported contiguous entries varies by page size
+and level of the page table.
+
+
+The following hugepage sizes are supported -
+
+ CONT PTE PMD CONT PMD PUD
+ -------- --- -------- ---
+ 4K: 64K 2M 32M 1G
+ 16K: 2M 32M 1G
+ 64K: 2M 512M 16G
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 3b2f2dd82225..76ccded8b74c 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -56,6 +56,7 @@ stable kernels.
| ARM | Cortex-A72 | #853709 | N/A |
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
+| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
deleted file mode 100644
index 8d55b4bbb5e2..000000000000
--- a/Documentation/block/00-INDEX
+++ /dev/null
@@ -1,34 +0,0 @@
-00-INDEX
- - This file
-bfq-iosched.txt
- - BFQ IO scheduler and its tunables
-biodoc.txt
- - Notes on the Generic Block Layer Rewrite in Linux 2.5
-biovecs.txt
- - Immutable biovecs and biovec iterators
-capability.txt
- - Generic Block Device Capability (/sys/block/<device>/capability)
-cfq-iosched.txt
- - CFQ IO scheduler tunables
-cmdline-partition.txt
- - how to specify block device partitions on kernel command line
-data-integrity.txt
- - Block data integrity
-deadline-iosched.txt
- - Deadline IO scheduler tunables
-ioprio.txt
- - Block io priorities (in CFQ scheduler)
-pr.txt
- - Block layer support for Persistent Reservations
-null_blk.txt
- - Null block for block-layer benchmarking.
-queue-sysfs.txt
- - Queue's sysfs entries
-request.txt
- - The members of struct request (in include/linux/blkdev.h)
-stat.txt
- - Block layer statistics in /sys/block/<device>/stat
-switching-sched.txt
- - Switching I/O schedulers at runtime
-writeback_cache_control.txt
- - Control of volatile write back caches
diff --git a/Documentation/blockdev/00-INDEX b/Documentation/blockdev/00-INDEX
deleted file mode 100644
index c08df56dd91b..000000000000
--- a/Documentation/blockdev/00-INDEX
+++ /dev/null
@@ -1,18 +0,0 @@
-00-INDEX
- - this file
-README.DAC960
- - info on Mylex DAC960/DAC1100 PCI RAID Controller Driver for Linux.
-cciss.txt
- - info, major/minor #'s for Compaq's SMART Array Controllers.
-cpqarray.txt
- - info on using Compaq's SMART2 Intelligent Disk Array Controllers.
-floppy.txt
- - notes and driver options for the floppy disk driver.
-mflash.txt
- - info on mGine m(g)flash driver for linux.
-nbd.txt
- - info on a TCP implementation of a network block device.
-paride.txt
- - information about the parallel port IDE subsystem.
-ramdisk.txt
- - short guide on how to set up and use the RAM disk.
diff --git a/Documentation/blockdev/README.DAC960 b/Documentation/blockdev/README.DAC960
deleted file mode 100644
index bd85fb9dc6e5..000000000000
--- a/Documentation/blockdev/README.DAC960
+++ /dev/null
@@ -1,756 +0,0 @@
- Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
-
- Version 2.2.11 for Linux 2.2.19
- Version 2.4.11 for Linux 2.4.12
-
- PRODUCTION RELEASE
-
- 11 October 2001
-
- Leonard N. Zubkoff
- Dandelion Digital
- lnz@dandelion.com
-
- Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
-
-
- INTRODUCTION
-
-Mylex, Inc. designs and manufactures a variety of high performance PCI RAID
-controllers. Mylex Corporation is located at 34551 Ardenwood Blvd., Fremont,
-California 94555, USA and can be reached at 510.796.6100 or on the World Wide
-Web at http://www.mylex.com. Mylex Technical Support can be reached by
-electronic mail at mylexsup@us.ibm.com, by voice at 510.608.2400, or by FAX at
-510.745.7715. Contact information for offices in Europe and Japan is available
-on their Web site.
-
-The latest information on Linux support for DAC960 PCI RAID Controllers, as
-well as the most recent release of this driver, will always be available from
-my Linux Home Page at URL "http://www.dandelion.com/Linux/". The Linux DAC960
-driver supports all current Mylex PCI RAID controllers including the new
-eXtremeRAID 2000/3000 and AcceleRAID 352/170/160 models which have an entirely
-new firmware interface from the older eXtremeRAID 1100, AcceleRAID 150/200/250,
-and DAC960PJ/PG/PU/PD/PL. See below for a complete controller list as well as
-minimum firmware version requirements. For simplicity, in most places this
-documentation refers to DAC960 generically rather than explicitly listing all
-the supported models.
-
-Driver bug reports should be sent via electronic mail to "lnz@dandelion.com".
-Please include with the bug report the complete configuration messages reported
-by the driver at startup, along with any subsequent system messages relevant to
-the controller's operation, and a detailed description of your system's
-hardware configuration. Driver bugs are actually quite rare; if you encounter
-problems with disks being marked offline, for example, please contact Mylex
-Technical Support as the problem is related to the hardware configuration
-rather than the Linux driver.
-
-Please consult the RAID controller documentation for detailed information
-regarding installation and configuration of the controllers. This document
-primarily provides information specific to the Linux support.
-
-
- DRIVER FEATURES
-
-The DAC960 RAID controllers are supported solely as high performance RAID
-controllers, not as interfaces to arbitrary SCSI devices. The Linux DAC960
-driver operates at the block device level, the same level as the SCSI and IDE
-drivers. Unlike other RAID controllers currently supported on Linux, the
-DAC960 driver is not dependent on the SCSI subsystem, and hence avoids all the
-complexity and unnecessary code that would be associated with an implementation
-as a SCSI driver. The DAC960 driver is designed for as high a performance as
-possible with no compromises or extra code for compatibility with lower
-performance devices. The DAC960 driver includes extensive error logging and
-online configuration management capabilities. Except for initial configuration
-of the controller and adding new disk drives, most everything can be handled
-from Linux while the system is operational.
-
-The DAC960 driver is architected to support up to 8 controllers per system.
-Each DAC960 parallel SCSI controller can support up to 15 disk drives per
-channel, for a maximum of 60 drives on a four channel controller; the fibre
-channel eXtremeRAID 3000 controller supports up to 125 disk drives per loop for
-a total of 250 drives. The drives installed on a controller are divided into
-one or more "Drive Groups", and then each Drive Group is subdivided further
-into 1 to 32 "Logical Drives". Each Logical Drive has a specific RAID Level
-and caching policy associated with it, and it appears to Linux as a single
-block device. Logical Drives are further subdivided into up to 7 partitions
-through the normal Linux and PC disk partitioning schemes. Logical Drives are
-also known as "System Drives", and Drive Groups are also called "Packs". Both
-terms are in use in the Mylex documentation; I have chosen to standardize on
-the more generic "Logical Drive" and "Drive Group".
-
-DAC960 RAID disk devices are named in the style of the obsolete Device File
-System (DEVFS). The device corresponding to Logical Drive D on Controller C
-is referred to as /dev/rd/cCdD, and the partitions are called /dev/rd/cCdDp1
-through /dev/rd/cCdDp7. For example, partition 3 of Logical Drive 5 on
-Controller 2 is referred to as /dev/rd/c2d5p3. Note that unlike with SCSI
-disks the device names will not change in the event of a disk drive failure.
-The DAC960 driver is assigned major numbers 48 - 55 with one major number per
-controller. The 8 bits of minor number are divided into 5 bits for the Logical
-Drive and 3 bits for the partition.
-
-
- SUPPORTED DAC960/AcceleRAID/eXtremeRAID PCI RAID CONTROLLERS
-
-The following list comprises the supported DAC960, AcceleRAID, and eXtremeRAID
-PCI RAID Controllers as of the date of this document. It is recommended that
-anyone purchasing a Mylex PCI RAID Controller not in the following table
-contact the author beforehand to verify that it is or will be supported.
-
-eXtremeRAID 3000
- 1 Wide Ultra-2/LVD SCSI channel
- 2 External Fibre FC-AL channels
- 233MHz StrongARM SA 110 Processor
- 64 Bit 33MHz PCI (backward compatible with 32 Bit PCI slots)
- 32MB/64MB ECC SDRAM Memory
-
-eXtremeRAID 2000
- 4 Wide Ultra-160 LVD SCSI channels
- 233MHz StrongARM SA 110 Processor
- 64 Bit 33MHz PCI (backward compatible with 32 Bit PCI slots)
- 32MB/64MB ECC SDRAM Memory
-
-AcceleRAID 352
- 2 Wide Ultra-160 LVD SCSI channels
- 100MHz Intel i960RN RISC Processor
- 64 Bit 33MHz PCI (backward compatible with 32 Bit PCI slots)
- 32MB/64MB ECC SDRAM Memory
-
-AcceleRAID 170
- 1 Wide Ultra-160 LVD SCSI channel
- 100MHz Intel i960RM RISC Processor
- 16MB/32MB/64MB ECC SDRAM Memory
-
-AcceleRAID 160 (AcceleRAID 170LP)
- 1 Wide Ultra-160 LVD SCSI channel
- 100MHz Intel i960RS RISC Processor
- Built in 16M ECC SDRAM Memory
- PCI Low Profile Form Factor - fit for 2U height
-
-eXtremeRAID 1100 (DAC1164P)
- 3 Wide Ultra-2/LVD SCSI channels
- 233MHz StrongARM SA 110 Processor
- 64 Bit 33MHz PCI (backward compatible with 32 Bit PCI slots)
- 16MB/32MB/64MB Parity SDRAM Memory with Battery Backup
-
-AcceleRAID 250 (DAC960PTL1)
- Uses onboard Symbios SCSI chips on certain motherboards
- Also includes one onboard Wide Ultra-2/LVD SCSI Channel
- 66MHz Intel i960RD RISC Processor
- 4MB/8MB/16MB/32MB/64MB/128MB ECC EDO Memory
-
-AcceleRAID 200 (DAC960PTL0)
- Uses onboard Symbios SCSI chips on certain motherboards
- Includes no onboard SCSI Channels
- 66MHz Intel i960RD RISC Processor
- 4MB/8MB/16MB/32MB/64MB/128MB ECC EDO Memory
-
-AcceleRAID 150 (DAC960PRL)
- Uses onboard Symbios SCSI chips on certain motherboards
- Also includes one onboard Wide Ultra-2/LVD SCSI Channel
- 33MHz Intel i960RP RISC Processor
- 4MB Parity EDO Memory
-
-DAC960PJ 1/2/3 Wide Ultra SCSI-3 Channels
- 66MHz Intel i960RD RISC Processor
- 4MB/8MB/16MB/32MB/64MB/128MB ECC EDO Memory
-
-DAC960PG 1/2/3 Wide Ultra SCSI-3 Channels
- 33MHz Intel i960RP RISC Processor
- 4MB/8MB ECC EDO Memory
-
-DAC960PU 1/2/3 Wide Ultra SCSI-3 Channels
- Intel i960CF RISC Processor
- 4MB/8MB EDRAM or 2MB/4MB/8MB/16MB/32MB DRAM Memory
-
-DAC960PD 1/2/3 Wide Fast SCSI-2 Channels
- Intel i960CF RISC Processor
- 4MB/8MB EDRAM or 2MB/4MB/8MB/16MB/32MB DRAM Memory
-
-DAC960PL 1/2/3 Wide Fast SCSI-2 Channels
- Intel i960 RISC Processor
- 2MB/4MB/8MB/16MB/32MB DRAM Memory
-
-DAC960P 1/2/3 Wide Fast SCSI-2 Channels
- Intel i960 RISC Processor
- 2MB/4MB/8MB/16MB/32MB DRAM Memory
-
-For the eXtremeRAID 2000/3000 and AcceleRAID 352/170/160, firmware version
-6.00-01 or above is required.
-
-For the eXtremeRAID 1100, firmware version 5.06-0-52 or above is required.
-
-For the AcceleRAID 250, 200, and 150, firmware version 4.06-0-57 or above is
-required.
-
-For the DAC960PJ and DAC960PG, firmware version 4.06-0-00 or above is required.
-
-For the DAC960PU, DAC960PD, DAC960PL, and DAC960P, either firmware version
-3.51-0-04 or above is required (for dual Flash ROM controllers), or firmware
-version 2.73-0-00 or above is required (for single Flash ROM controllers)
-
-Please note that not all SCSI disk drives are suitable for use with DAC960
-controllers, and only particular firmware versions of any given model may
-actually function correctly. Similarly, not all motherboards have a BIOS that
-properly initializes the AcceleRAID 250, AcceleRAID 200, AcceleRAID 150,
-DAC960PJ, and DAC960PG because the Intel i960RD/RP is a multi-function device.
-If in doubt, contact Mylex RAID Technical Support (mylexsup@us.ibm.com) to
-verify compatibility. Mylex makes available a hard disk compatibility list at
-http://www.mylex.com/support/hdcomp/hd-lists.html.
-
-
- DRIVER INSTALLATION
-
-This distribution was prepared for Linux kernel version 2.2.19 or 2.4.12.
-
-To install the DAC960 RAID driver, you may use the following commands,
-replacing "/usr/src" with wherever you keep your Linux kernel source tree:
-
- cd /usr/src
- tar -xvzf DAC960-2.2.11.tar.gz (or DAC960-2.4.11.tar.gz)
- mv README.DAC960 linux/Documentation
- mv DAC960.[ch] linux/drivers/block
- patch -p0 < DAC960.patch (if DAC960.patch is included)
- cd linux
- make config
- make bzImage (or zImage)
-
-Then install "arch/x86/boot/bzImage" or "arch/x86/boot/zImage" as your
-standard kernel, run lilo if appropriate, and reboot.
-
-To create the necessary devices in /dev, the "make_rd" script included in
-"DAC960-Utilities.tar.gz" from http://www.dandelion.com/Linux/ may be used.
-LILO 21 and FDISK v2.9 include DAC960 support; also included in this archive
-are patches to LILO 20 and FDISK v2.8 that add DAC960 support, along with
-statically linked executables of LILO and FDISK. This modified version of LILO
-will allow booting from a DAC960 controller and/or mounting the root file
-system from a DAC960.
-
-Red Hat Linux 6.0 and SuSE Linux 6.1 include support for Mylex PCI RAID
-controllers. Installing directly onto a DAC960 may be problematic from other
-Linux distributions until their installation utilities are updated.
-
-
- INSTALLATION NOTES
-
-Before installing Linux or adding DAC960 logical drives to an existing Linux
-system, the controller must first be configured to provide one or more logical
-drives using the BIOS Configuration Utility or DACCF. Please note that since
-there are only at most 6 usable partitions on each logical drive, systems
-requiring more partitions should subdivide a drive group into multiple logical
-drives, each of which can have up to 6 usable partitions. Also, note that with
-large disk arrays it is advisable to enable the 8GB BIOS Geometry (255/63)
-rather than accepting the default 2GB BIOS Geometry (128/32); failing to so do
-will cause the logical drive geometry to have more than 65535 cylinders which
-will make it impossible for FDISK to be used properly. The 8GB BIOS Geometry
-can be enabled by configuring the DAC960 BIOS, which is accessible via Alt-M
-during the BIOS initialization sequence.
-
-For maximum performance and the most efficient E2FSCK performance, it is
-recommended that EXT2 file systems be built with a 4KB block size and 16 block
-stride to match the DAC960 controller's 64KB default stripe size. The command
-"mke2fs -b 4096 -R stride=16 <device>" is appropriate. Unless there will be a
-large number of small files on the file systems, it is also beneficial to add
-the "-i 16384" option to increase the bytes per inode parameter thereby
-reducing the file system metadata. Finally, on systems that will only be run
-with Linux 2.2 or later kernels it is beneficial to enable sparse superblocks
-with the "-s 1" option.
-
-
- DAC960 ANNOUNCEMENTS MAILING LIST
-
-The DAC960 Announcements Mailing List provides a forum for informing Linux
-users of new driver releases and other announcements regarding Linux support
-for DAC960 PCI RAID Controllers. To join the mailing list, send a message to
-"dac960-announce-request@dandelion.com" with the line "subscribe" in the
-message body.
-
-
- CONTROLLER CONFIGURATION AND STATUS MONITORING
-
-The DAC960 RAID controllers running firmware 4.06 or above include a Background
-Initialization facility so that system downtime is minimized both for initial
-installation and subsequent configuration of additional storage. The BIOS
-Configuration Utility (accessible via Alt-R during the BIOS initialization
-sequence) is used to quickly configure the controller, and then the logical
-drives that have been created are available for immediate use even while they
-are still being initialized by the controller. The primary need for online
-configuration and status monitoring is then to avoid system downtime when disk
-drives fail and must be replaced. Mylex's online monitoring and configuration
-utilities are being ported to Linux and will become available at some point in
-the future. Note that with a SAF-TE (SCSI Accessed Fault-Tolerant Enclosure)
-enclosure, the controller is able to rebuild failed drives automatically as
-soon as a drive replacement is made available.
-
-The primary interfaces for controller configuration and status monitoring are
-special files created in the /proc/rd/... hierarchy along with the normal
-system console logging mechanism. Whenever the system is operating, the DAC960
-driver queries each controller for status information every 10 seconds, and
-checks for additional conditions every 60 seconds. The initial status of each
-controller is always available for controller N in /proc/rd/cN/initial_status,
-and the current status as of the last status monitoring query is available in
-/proc/rd/cN/current_status. In addition, status changes are also logged by the
-driver to the system console and will appear in the log files maintained by
-syslog. The progress of asynchronous rebuild or consistency check operations
-is also available in /proc/rd/cN/current_status, and progress messages are
-logged to the system console at most every 60 seconds.
-
-Starting with the 2.2.3/2.0.3 versions of the driver, the status information
-available in /proc/rd/cN/initial_status and /proc/rd/cN/current_status has been
-augmented to include the vendor, model, revision, and serial number (if
-available) for each physical device found connected to the controller:
-
-***** DAC960 RAID Driver Version 2.2.3 of 19 August 1999 *****
-Copyright 1998-1999 by Leonard N. Zubkoff <lnz@dandelion.com>
-Configuring Mylex DAC960PRL PCI RAID Controller
- Firmware Version: 4.07-0-07, Channels: 1, Memory Size: 16MB
- PCI Bus: 1, Device: 4, Function: 1, I/O Address: Unassigned
- PCI Address: 0xFE300000 mapped at 0xA0800000, IRQ Channel: 21
- Controller Queue Depth: 128, Maximum Blocks per Command: 128
- Driver Queue Depth: 127, Maximum Scatter/Gather Segments: 33
- Stripe Size: 64KB, Segment Size: 8KB, BIOS Geometry: 255/63
- SAF-TE Enclosure Management Enabled
- Physical Devices:
- 0:0 Vendor: IBM Model: DRVS09D Revision: 0270
- Serial Number: 68016775HA
- Disk Status: Online, 17928192 blocks
- 0:1 Vendor: IBM Model: DRVS09D Revision: 0270
- Serial Number: 68004E53HA
- Disk Status: Online, 17928192 blocks
- 0:2 Vendor: IBM Model: DRVS09D Revision: 0270
- Serial Number: 13013935HA
- Disk Status: Online, 17928192 blocks
- 0:3 Vendor: IBM Model: DRVS09D Revision: 0270
- Serial Number: 13016897HA
- Disk Status: Online, 17928192 blocks
- 0:4 Vendor: IBM Model: DRVS09D Revision: 0270
- Serial Number: 68019905HA
- Disk Status: Online, 17928192 blocks
- 0:5 Vendor: IBM Model: DRVS09D Revision: 0270
- Serial Number: 68012753HA
- Disk Status: Online, 17928192 blocks
- 0:6 Vendor: ESG-SHV Model: SCA HSBP M6 Revision: 0.61
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Online, 89640960 blocks, Write Thru
- No Rebuild or Consistency Check in Progress
-
-To simplify the monitoring process for custom software, the special file
-/proc/rd/status returns "OK" when all DAC960 controllers in the system are
-operating normally and no failures have occurred, or "ALERT" if any logical
-drives are offline or critical or any non-standby physical drives are dead.
-
-Configuration commands for controller N are available via the special file
-/proc/rd/cN/user_command. A human readable command can be written to this
-special file to initiate a configuration operation, and the results of the
-operation can then be read back from the special file in addition to being
-logged to the system console. The shell command sequence
-
- echo "<configuration-command>" > /proc/rd/c0/user_command
- cat /proc/rd/c0/user_command
-
-is typically used to execute configuration commands. The configuration
-commands are:
-
- flush-cache
-
- The "flush-cache" command flushes the controller's cache. The system
- automatically flushes the cache at shutdown or if the driver module is
- unloaded, so this command is only needed to be certain a write back cache
- is flushed to disk before the system is powered off by a command to a UPS.
- Note that the flush-cache command also stops an asynchronous rebuild or
- consistency check, so it should not be used except when the system is being
- halted.
-
- kill <channel>:<target-id>
-
- The "kill" command marks the physical drive <channel>:<target-id> as DEAD.
- This command is provided primarily for testing, and should not be used
- during normal system operation.
-
- make-online <channel>:<target-id>
-
- The "make-online" command changes the physical drive <channel>:<target-id>
- from status DEAD to status ONLINE. In cases where multiple physical drives
- have been killed simultaneously, this command may be used to bring all but
- one of them back online, after which a rebuild to the final drive is
- necessary.
-
- Warning: make-online should only be used on a dead physical drive that is
- an active part of a drive group, never on a standby drive. The command
- should never be used on a dead drive that is part of a critical logical
- drive; rebuild should be used if only a single drive is dead.
-
- make-standby <channel>:<target-id>
-
- The "make-standby" command changes physical drive <channel>:<target-id>
- from status DEAD to status STANDBY. It should only be used in cases where
- a dead drive was replaced after an automatic rebuild was performed onto a
- standby drive. It cannot be used to add a standby drive to the controller
- configuration if one was not created initially; the BIOS Configuration
- Utility must be used for that currently.
-
- rebuild <channel>:<target-id>
-
- The "rebuild" command initiates an asynchronous rebuild onto physical drive
- <channel>:<target-id>. It should only be used when a dead drive has been
- replaced.
-
- check-consistency <logical-drive-number>
-
- The "check-consistency" command initiates an asynchronous consistency check
- of <logical-drive-number> with automatic restoration. It can be used
- whenever it is desired to verify the consistency of the redundancy
- information.
-
- cancel-rebuild
- cancel-consistency-check
-
- The "cancel-rebuild" and "cancel-consistency-check" commands cancel any
- rebuild or consistency check operations previously initiated.
-
-
- EXAMPLE I - DRIVE FAILURE WITHOUT A STANDBY DRIVE
-
-The following annotated logs demonstrate the controller configuration and and
-online status monitoring capabilities of the Linux DAC960 Driver. The test
-configuration comprises 6 1GB Quantum Atlas I disk drives on two channels of a
-DAC960PJ controller. The physical drives are configured into a single drive
-group without a standby drive, and the drive group has been configured into two
-logical drives, one RAID-5 and one RAID-6. Note that these logs are from an
-earlier version of the driver and the messages have changed somewhat with newer
-releases, but the functionality remains similar. First, here is the current
-status of the RAID configuration:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
-***** DAC960 RAID Driver Version 2.0.0 of 23 March 1999 *****
-Copyright 1998-1999 by Leonard N. Zubkoff <lnz@dandelion.com>
-Configuring Mylex DAC960PJ PCI RAID Controller
- Firmware Version: 4.06-0-08, Channels: 3, Memory Size: 8MB
- PCI Bus: 0, Device: 19, Function: 1, I/O Address: Unassigned
- PCI Address: 0xFD4FC000 mapped at 0x8807000, IRQ Channel: 9
- Controller Queue Depth: 128, Maximum Blocks per Command: 128
- Driver Queue Depth: 127, Maximum Scatter/Gather Segments: 33
- Stripe Size: 64KB, Segment Size: 8KB, BIOS Geometry: 255/63
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Online, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Online, 5498880 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Online, 3305472 blocks, Write Thru
- No Rebuild or Consistency Check in Progress
-
-gwynedd:/u/lnz# cat /proc/rd/status
-OK
-
-The above messages indicate that everything is healthy, and /proc/rd/status
-returns "OK" indicating that there are no problems with any DAC960 controller
-in the system. For demonstration purposes, while I/O is active Physical Drive
-1:1 is now disconnected, simulating a drive failure. The failure is noted by
-the driver within 10 seconds of the controller's having detected it, and the
-driver logs the following console status messages indicating that Logical
-Drives 0 and 1 are now CRITICAL as a result of Physical Drive 1:1 being DEAD:
-
-DAC960#0: Physical Drive 1:2 Error Log: Sense Key = 6, ASC = 29, ASCQ = 02
-DAC960#0: Physical Drive 1:3 Error Log: Sense Key = 6, ASC = 29, ASCQ = 02
-DAC960#0: Physical Drive 1:1 killed because of timeout on SCSI command
-DAC960#0: Physical Drive 1:1 is now DEAD
-DAC960#0: Logical Drive 0 (/dev/rd/c0d0) is now CRITICAL
-DAC960#0: Logical Drive 1 (/dev/rd/c0d1) is now CRITICAL
-
-The Sense Keys logged here are just Check Condition / Unit Attention conditions
-arising from a SCSI bus reset that is forced by the controller during its error
-recovery procedures. Concurrently with the above, the driver status available
-from /proc/rd also reflects the drive failure. The status message in
-/proc/rd/status has changed from "OK" to "ALERT":
-
-gwynedd:/u/lnz# cat /proc/rd/status
-ALERT
-
-and /proc/rd/c0/current_status has been updated:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Dead, 2201600 blocks
- 1:2 - Disk: Online, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Critical, 5498880 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Critical, 3305472 blocks, Write Thru
- No Rebuild or Consistency Check in Progress
-
-Since there are no standby drives configured, the system can continue to access
-the logical drives in a performance degraded mode until the failed drive is
-replaced and a rebuild operation completed to restore the redundancy of the
-logical drives. Once Physical Drive 1:1 is replaced with a properly
-functioning drive, or if the physical drive was killed without having failed
-(e.g., due to electrical problems on the SCSI bus), the user can instruct the
-controller to initiate a rebuild operation onto the newly replaced drive:
-
-gwynedd:/u/lnz# echo "rebuild 1:1" > /proc/rd/c0/user_command
-gwynedd:/u/lnz# cat /proc/rd/c0/user_command
-Rebuild of Physical Drive 1:1 Initiated
-
-The echo command instructs the controller to initiate an asynchronous rebuild
-operation onto Physical Drive 1:1, and the status message that results from the
-operation is then available for reading from /proc/rd/c0/user_command, as well
-as being logged to the console by the driver.
-
-Within 10 seconds of this command the driver logs the initiation of the
-asynchronous rebuild operation:
-
-DAC960#0: Rebuild of Physical Drive 1:1 Initiated
-DAC960#0: Physical Drive 1:1 Error Log: Sense Key = 6, ASC = 29, ASCQ = 01
-DAC960#0: Physical Drive 1:1 is now WRITE-ONLY
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 1% completed
-
-and /proc/rd/c0/current_status is updated:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Write-Only, 2201600 blocks
- 1:2 - Disk: Online, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Critical, 5498880 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Critical, 3305472 blocks, Write Thru
- Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 6% completed
-
-As the rebuild progresses, the current status in /proc/rd/c0/current_status is
-updated every 10 seconds:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Write-Only, 2201600 blocks
- 1:2 - Disk: Online, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Critical, 5498880 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Critical, 3305472 blocks, Write Thru
- Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 15% completed
-
-and every minute a progress message is logged to the console by the driver:
-
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 32% completed
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 63% completed
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 94% completed
-DAC960#0: Rebuild in Progress: Logical Drive 1 (/dev/rd/c0d1) 94% completed
-
-Finally, the rebuild completes successfully. The driver logs the status of the
-logical and physical drives and the rebuild completion:
-
-DAC960#0: Rebuild Completed Successfully
-DAC960#0: Physical Drive 1:1 is now ONLINE
-DAC960#0: Logical Drive 0 (/dev/rd/c0d0) is now ONLINE
-DAC960#0: Logical Drive 1 (/dev/rd/c0d1) is now ONLINE
-
-/proc/rd/c0/current_status is updated:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Online, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Online, 5498880 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Online, 3305472 blocks, Write Thru
- Rebuild Completed Successfully
-
-and /proc/rd/status indicates that everything is healthy once again:
-
-gwynedd:/u/lnz# cat /proc/rd/status
-OK
-
-
- EXAMPLE II - DRIVE FAILURE WITH A STANDBY DRIVE
-
-The following annotated logs demonstrate the controller configuration and and
-online status monitoring capabilities of the Linux DAC960 Driver. The test
-configuration comprises 6 1GB Quantum Atlas I disk drives on two channels of a
-DAC960PJ controller. The physical drives are configured into a single drive
-group with a standby drive, and the drive group has been configured into two
-logical drives, one RAID-5 and one RAID-6. Note that these logs are from an
-earlier version of the driver and the messages have changed somewhat with newer
-releases, but the functionality remains similar. First, here is the current
-status of the RAID configuration:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
-***** DAC960 RAID Driver Version 2.0.0 of 23 March 1999 *****
-Copyright 1998-1999 by Leonard N. Zubkoff <lnz@dandelion.com>
-Configuring Mylex DAC960PJ PCI RAID Controller
- Firmware Version: 4.06-0-08, Channels: 3, Memory Size: 8MB
- PCI Bus: 0, Device: 19, Function: 1, I/O Address: Unassigned
- PCI Address: 0xFD4FC000 mapped at 0x8807000, IRQ Channel: 9
- Controller Queue Depth: 128, Maximum Blocks per Command: 128
- Driver Queue Depth: 127, Maximum Scatter/Gather Segments: 33
- Stripe Size: 64KB, Segment Size: 8KB, BIOS Geometry: 255/63
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Online, 2201600 blocks
- 1:3 - Disk: Standby, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Online, 4399104 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Online, 2754560 blocks, Write Thru
- No Rebuild or Consistency Check in Progress
-
-gwynedd:/u/lnz# cat /proc/rd/status
-OK
-
-The above messages indicate that everything is healthy, and /proc/rd/status
-returns "OK" indicating that there are no problems with any DAC960 controller
-in the system. For demonstration purposes, while I/O is active Physical Drive
-1:2 is now disconnected, simulating a drive failure. The failure is noted by
-the driver within 10 seconds of the controller's having detected it, and the
-driver logs the following console status messages:
-
-DAC960#0: Physical Drive 1:1 Error Log: Sense Key = 6, ASC = 29, ASCQ = 02
-DAC960#0: Physical Drive 1:3 Error Log: Sense Key = 6, ASC = 29, ASCQ = 02
-DAC960#0: Physical Drive 1:2 killed because of timeout on SCSI command
-DAC960#0: Physical Drive 1:2 is now DEAD
-DAC960#0: Physical Drive 1:2 killed because it was removed
-DAC960#0: Logical Drive 0 (/dev/rd/c0d0) is now CRITICAL
-DAC960#0: Logical Drive 1 (/dev/rd/c0d1) is now CRITICAL
-
-Since a standby drive is configured, the controller automatically begins
-rebuilding onto the standby drive:
-
-DAC960#0: Physical Drive 1:3 is now WRITE-ONLY
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 4% completed
-
-Concurrently with the above, the driver status available from /proc/rd also
-reflects the drive failure and automatic rebuild. The status message in
-/proc/rd/status has changed from "OK" to "ALERT":
-
-gwynedd:/u/lnz# cat /proc/rd/status
-ALERT
-
-and /proc/rd/c0/current_status has been updated:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Dead, 2201600 blocks
- 1:3 - Disk: Write-Only, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Critical, 4399104 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Critical, 2754560 blocks, Write Thru
- Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 4% completed
-
-As the rebuild progresses, the current status in /proc/rd/c0/current_status is
-updated every 10 seconds:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Dead, 2201600 blocks
- 1:3 - Disk: Write-Only, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Critical, 4399104 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Critical, 2754560 blocks, Write Thru
- Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 40% completed
-
-and every minute a progress message is logged on the console by the driver:
-
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 40% completed
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 76% completed
-DAC960#0: Rebuild in Progress: Logical Drive 1 (/dev/rd/c0d1) 66% completed
-DAC960#0: Rebuild in Progress: Logical Drive 1 (/dev/rd/c0d1) 84% completed
-
-Finally, the rebuild completes successfully. The driver logs the status of the
-logical and physical drives and the rebuild completion:
-
-DAC960#0: Rebuild Completed Successfully
-DAC960#0: Physical Drive 1:3 is now ONLINE
-DAC960#0: Logical Drive 0 (/dev/rd/c0d0) is now ONLINE
-DAC960#0: Logical Drive 1 (/dev/rd/c0d1) is now ONLINE
-
-/proc/rd/c0/current_status is updated:
-
-***** DAC960 RAID Driver Version 2.0.0 of 23 March 1999 *****
-Copyright 1998-1999 by Leonard N. Zubkoff <lnz@dandelion.com>
-Configuring Mylex DAC960PJ PCI RAID Controller
- Firmware Version: 4.06-0-08, Channels: 3, Memory Size: 8MB
- PCI Bus: 0, Device: 19, Function: 1, I/O Address: Unassigned
- PCI Address: 0xFD4FC000 mapped at 0x8807000, IRQ Channel: 9
- Controller Queue Depth: 128, Maximum Blocks per Command: 128
- Driver Queue Depth: 127, Maximum Scatter/Gather Segments: 33
- Stripe Size: 64KB, Segment Size: 8KB, BIOS Geometry: 255/63
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Dead, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Online, 4399104 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Online, 2754560 blocks, Write Thru
- Rebuild Completed Successfully
-
-and /proc/rd/status indicates that everything is healthy once again:
-
-gwynedd:/u/lnz# cat /proc/rd/status
-OK
-
-Note that the absence of a viable standby drive does not create an "ALERT"
-status. Once dead Physical Drive 1:2 has been replaced, the controller must be
-told that this has occurred and that the newly replaced drive should become the
-new standby drive:
-
-gwynedd:/u/lnz# echo "make-standby 1:2" > /proc/rd/c0/user_command
-gwynedd:/u/lnz# cat /proc/rd/c0/user_command
-Make Standby of Physical Drive 1:2 Succeeded
-
-The echo command instructs the controller to make Physical Drive 1:2 into a
-standby drive, and the status message that results from the operation is then
-available for reading from /proc/rd/c0/user_command, as well as being logged to
-the console by the driver. Within 60 seconds of this command the driver logs:
-
-DAC960#0: Physical Drive 1:2 Error Log: Sense Key = 6, ASC = 29, ASCQ = 01
-DAC960#0: Physical Drive 1:2 is now STANDBY
-DAC960#0: Make Standby of Physical Drive 1:2 Succeeded
-
-and /proc/rd/c0/current_status is updated:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Standby, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Online, 4399104 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Online, 2754560 blocks, Write Thru
- Rebuild Completed Successfully
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 875b2b56b87f..3c1b5ab54bc0 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -190,7 +190,7 @@ whitespace:
notify_free Depending on device usage scenario it may account
a) the number of pages freed because of swap slot free
notifications or b) the number of pages freed because of
- REQ_DISCARD requests sent by bio. The former ones are
+ REQ_OP_DISCARD requests sent by bio. The former ones are
sent to a swap block device when a swap slot is freed,
which implies that this disk is being used as a swap disk.
The latter ones are sent by filesystem mounted with
diff --git a/Documentation/cdrom/00-INDEX b/Documentation/cdrom/00-INDEX
deleted file mode 100644
index 433edf23dc49..000000000000
--- a/Documentation/cdrom/00-INDEX
+++ /dev/null
@@ -1,11 +0,0 @@
-00-INDEX
- - this file (info on CD-ROMs and Linux)
-Makefile
- - only used to generate TeX output from the documentation.
-cdrom-standard.tex
- - LaTeX document on standardizing the CD-ROM programming interface.
-ide-cd
- - info on setting up and using ATAPI (aka IDE) CD-ROMs.
-packet-writing.txt
- - Info on the CDRW packet writing module
-
diff --git a/Documentation/cgroup-v1/00-INDEX b/Documentation/cgroup-v1/00-INDEX
deleted file mode 100644
index 13e0c85e7b35..000000000000
--- a/Documentation/cgroup-v1/00-INDEX
+++ /dev/null
@@ -1,26 +0,0 @@
-00-INDEX
- - this file
-blkio-controller.txt
- - Description for Block IO Controller, implementation and usage details.
-cgroups.txt
- - Control Groups definition, implementation details, examples and API.
-cpuacct.txt
- - CPU Accounting Controller; account CPU usage for groups of tasks.
-cpusets.txt
- - documents the cpusets feature; assign CPUs and Mem to a set of tasks.
-admin-guide/devices.rst
- - Device Whitelist Controller; description, interface and security.
-freezer-subsystem.txt
- - checkpointing; rationale to not use signals, interface.
-hugetlb.txt
- - HugeTLB Controller implementation and usage details.
-memcg_test.txt
- - Memory Resource Controller; implementation details.
-memory.txt
- - Memory Resource Controller; design, accounting, interface, testing.
-net_cls.txt
- - Network classifier cgroups details and usages.
-net_prio.txt
- - Network priority cgroups details and usages.
-pids.txt
- - Process number cgroups details and usages.
diff --git a/Documentation/cgroup-v1/rdma.txt b/Documentation/cgroup-v1/rdma.txt
index af618171e0eb..9bdb7fd03f83 100644
--- a/Documentation/cgroup-v1/rdma.txt
+++ b/Documentation/cgroup-v1/rdma.txt
@@ -27,7 +27,7 @@ cgroup.
Currently user space applications can easily take away all the rdma verb
specific resources such as AH, CQ, QP, MR etc. Due to which other applications
in other cgroup or kernel space ULPs may not even get chance to allocate any
-rdma resources. This can leads to service unavailability.
+rdma resources. This can lead to service unavailability.
Therefore RDMA controller is needed through which resource consumption
of processes can be limited. Through this controller different rdma
diff --git a/Documentation/conf.py b/Documentation/conf.py
index b691af4831fa..72647a38b5c2 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -259,7 +259,7 @@ latex_elements = {
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
-'pointsize': '8pt',
+'pointsize': '11pt',
# Latex figure (float) alignment
#'figure_align': 'htbp',
@@ -272,8 +272,8 @@ latex_elements = {
'preamble': '''
% Use some font with UTF-8 support with XeLaTeX
\\usepackage{fontspec}
- \\setsansfont{DejaVu Serif}
- \\setromanfont{DejaVu Sans}
+ \\setsansfont{DejaVu Sans}
+ \\setromanfont{DejaVu Serif}
\\setmonofont{DejaVu Sans Mono}
'''
@@ -383,6 +383,10 @@ latex_documents = [
'The kernel development community', 'manual'),
('filesystems/index', 'filesystems.tex', 'Linux Filesystems API',
'The kernel development community', 'manual'),
+ ('admin-guide/ext4', 'ext4-admin-guide.tex', 'ext4 Administration Guide',
+ 'ext4 Community', 'manual'),
+ ('filesystems/ext4/index', 'ext4-data-structures.tex',
+ 'ext4 Data Structures and Algorithms', 'ext4 Community', 'manual'),
('gpu/index', 'gpu.tex', 'Linux GPU Driver Developer\'s Guide',
'The kernel development community', 'manual'),
('input/index', 'linux-input.tex', 'The Linux input driver subsystem',
diff --git a/Documentation/core-api/boot-time-mm.rst b/Documentation/core-api/boot-time-mm.rst
index 03cb1643f46f..6e12e89a03e0 100644
--- a/Documentation/core-api/boot-time-mm.rst
+++ b/Documentation/core-api/boot-time-mm.rst
@@ -76,7 +76,7 @@ These interfaces available only with bootmem, i.e when ``CONFIG_NO_BOOTMEM=n``
.. kernel-doc:: include/linux/bootmem.h
.. kernel-doc:: mm/bootmem.c
- :nodocs:
+ :functions:
Memblock specific API
---------------------
@@ -89,4 +89,4 @@ really happens under the hood.
.. kernel-doc:: include/linux/memblock.h
.. kernel-doc:: mm/memblock.c
- :nodocs:
+ :functions:
diff --git a/Documentation/core-api/gfp_mask-from-fs-io.rst b/Documentation/core-api/gfp_mask-from-fs-io.rst
index e0df8f416582..e7c32a8de126 100644
--- a/Documentation/core-api/gfp_mask-from-fs-io.rst
+++ b/Documentation/core-api/gfp_mask-from-fs-io.rst
@@ -1,3 +1,5 @@
+.. _gfp_mask_from_fs_io:
+
=================================
GFP masks used from FS/IO context
=================================
diff --git a/Documentation/core-api/idr.rst b/Documentation/core-api/idr.rst
index d351e880a2f6..a2738050c4f0 100644
--- a/Documentation/core-api/idr.rst
+++ b/Documentation/core-api/idr.rst
@@ -1,4 +1,4 @@
-.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. SPDX-License-Identifier: GPL-2.0+
=============
ID Allocation
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index 26b735cefb93..29c790f571a5 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -27,10 +27,13 @@ Core utilities
errseq
printk-formats
circular-buffers
+ memory-allocation
mm-api
gfp_mask-from-fs-io
timekeeping
boot-time-mm
+ memory-hotplug
+
Interfaces for kernel debugging
===============================
diff --git a/Documentation/core-api/memory-allocation.rst b/Documentation/core-api/memory-allocation.rst
new file mode 100644
index 000000000000..f8bb9aa120c4
--- /dev/null
+++ b/Documentation/core-api/memory-allocation.rst
@@ -0,0 +1,122 @@
+=======================
+Memory Allocation Guide
+=======================
+
+Linux provides a variety of APIs for memory allocation. You can
+allocate small chunks using `kmalloc` or `kmem_cache_alloc` families,
+large virtually contiguous areas using `vmalloc` and its derivatives,
+or you can directly request pages from the page allocator with
+`alloc_pages`. It is also possible to use more specialized allocators,
+for instance `cma_alloc` or `zs_malloc`.
+
+Most of the memory allocation APIs use GFP flags to express how that
+memory should be allocated. The GFP acronym stands for "get free
+pages", the underlying memory allocation function.
+
+Diversity of the allocation APIs combined with the numerous GFP flags
+makes the question "How should I allocate memory?" not that easy to
+answer, although very likely you should use
+
+::
+
+ kzalloc(<size>, GFP_KERNEL);
+
+Of course there are cases when other allocation APIs and different GFP
+flags must be used.
+
+Get Free Page flags
+===================
+
+The GFP flags control the allocators behavior. They tell what memory
+zones can be used, how hard the allocator should try to find free
+memory, whether the memory can be accessed by the userspace etc. The
+:ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` provides
+reference documentation for the GFP flags and their combinations and
+here we briefly outline their recommended usage:
+
+ * Most of the time ``GFP_KERNEL`` is what you need. Memory for the
+ kernel data structures, DMAable memory, inode cache, all these and
+ many other allocations types can use ``GFP_KERNEL``. Note, that
+ using ``GFP_KERNEL`` implies ``GFP_RECLAIM``, which means that
+ direct reclaim may be triggered under memory pressure; the calling
+ context must be allowed to sleep.
+ * If the allocation is performed from an atomic context, e.g interrupt
+ handler, use ``GFP_NOWAIT``. This flag prevents direct reclaim and
+ IO or filesystem operations. Consequently, under memory pressure
+ ``GFP_NOWAIT`` allocation is likely to fail. Allocations which
+ have a reasonable fallback should be using ``GFP_NOWARN``.
+ * If you think that accessing memory reserves is justified and the kernel
+ will be stressed unless allocation succeeds, you may use ``GFP_ATOMIC``.
+ * Untrusted allocations triggered from userspace should be a subject
+ of kmem accounting and must have ``__GFP_ACCOUNT`` bit set. There
+ is the handy ``GFP_KERNEL_ACCOUNT`` shortcut for ``GFP_KERNEL``
+ allocations that should be accounted.
+ * Userspace allocations should use either of the ``GFP_USER``,
+ ``GFP_HIGHUSER`` or ``GFP_HIGHUSER_MOVABLE`` flags. The longer
+ the flag name the less restrictive it is.
+
+ ``GFP_HIGHUSER_MOVABLE`` does not require that allocated memory
+ will be directly accessible by the kernel and implies that the
+ data is movable.
+
+ ``GFP_HIGHUSER`` means that the allocated memory is not movable,
+ but it is not required to be directly accessible by the kernel. An
+ example may be a hardware allocation that maps data directly into
+ userspace but has no addressing limitations.
+
+ ``GFP_USER`` means that the allocated memory is not movable and it
+ must be directly accessible by the kernel.
+
+You may notice that quite a few allocations in the existing code
+specify ``GFP_NOIO`` or ``GFP_NOFS``. Historically, they were used to
+prevent recursion deadlocks caused by direct memory reclaim calling
+back into the FS or IO paths and blocking on already held
+resources. Since 4.12 the preferred way to address this issue is to
+use new scope APIs described in
+:ref:`Documentation/core-api/gfp_mask-from-fs-io.rst <gfp_mask_from_fs_io>`.
+
+Other legacy GFP flags are ``GFP_DMA`` and ``GFP_DMA32``. They are
+used to ensure that the allocated memory is accessible by hardware
+with limited addressing capabilities. So unless you are writing a
+driver for a device with such restrictions, avoid using these flags.
+And even with hardware with restrictions it is preferable to use
+`dma_alloc*` APIs.
+
+Selecting memory allocator
+==========================
+
+The most straightforward way to allocate memory is to use a function
+from the :c:func:`kmalloc` family. And, to be on the safe size it's
+best to use routines that set memory to zero, like
+:c:func:`kzalloc`. If you need to allocate memory for an array, there
+are :c:func:`kmalloc_array` and :c:func:`kcalloc` helpers.
+
+The maximal size of a chunk that can be allocated with `kmalloc` is
+limited. The actual limit depends on the hardware and the kernel
+configuration, but it is a good practice to use `kmalloc` for objects
+smaller than page size.
+
+For large allocations you can use :c:func:`vmalloc` and
+:c:func:`vzalloc`, or directly request pages from the page
+allocator. The memory allocated by `vmalloc` and related functions is
+not physically contiguous.
+
+If you are not sure whether the allocation size is too large for
+`kmalloc`, it is possible to use :c:func:`kvmalloc` and its
+derivatives. It will try to allocate memory with `kmalloc` and if the
+allocation fails it will be retried with `vmalloc`. There are
+restrictions on which GFP flags can be used with `kvmalloc`; please
+see :c:func:`kvmalloc_node` reference documentation. Note that
+`kvmalloc` may return memory that is not physically contiguous.
+
+If you need to allocate many identical objects you can use the slab
+cache allocator. The cache should be set up with
+:c:func:`kmem_cache_create` before it can be used. Afterwards
+:c:func:`kmem_cache_alloc` and its convenience wrappers can allocate
+memory from that cache.
+
+When the allocated memory is no longer needed it must be freed. You
+can use :c:func:`kvfree` for the memory allocated with `kmalloc`,
+`vmalloc` and `kvmalloc`. The slab caches should be freed with
+:c:func:`kmem_cache_free`. And don't forget to destroy the cache with
+:c:func:`kmem_cache_destroy`.
diff --git a/Documentation/core-api/memory-hotplug.rst b/Documentation/core-api/memory-hotplug.rst
new file mode 100644
index 000000000000..de7467e48067
--- /dev/null
+++ b/Documentation/core-api/memory-hotplug.rst
@@ -0,0 +1,125 @@
+.. _memory_hotplug:
+
+==============
+Memory hotplug
+==============
+
+Memory hotplug event notifier
+=============================
+
+Hotplugging events are sent to a notification queue.
+
+There are six types of notification defined in ``include/linux/memory.h``:
+
+MEM_GOING_ONLINE
+ Generated before new memory becomes available in order to be able to
+ prepare subsystems to handle memory. The page allocator is still unable
+ to allocate from the new memory.
+
+MEM_CANCEL_ONLINE
+ Generated if MEM_GOING_ONLINE fails.
+
+MEM_ONLINE
+ Generated when memory has successfully brought online. The callback may
+ allocate pages from the new memory.
+
+MEM_GOING_OFFLINE
+ Generated to begin the process of offlining memory. Allocations are no
+ longer possible from the memory but some of the memory to be offlined
+ is still in use. The callback can be used to free memory known to a
+ subsystem from the indicated memory block.
+
+MEM_CANCEL_OFFLINE
+ Generated if MEM_GOING_OFFLINE fails. Memory is available again from
+ the memory block that we attempted to offline.
+
+MEM_OFFLINE
+ Generated after offlining memory is complete.
+
+A callback routine can be registered by calling::
+
+ hotplug_memory_notifier(callback_func, priority)
+
+Callback functions with higher values of priority are called before callback
+functions with lower values.
+
+A callback function must have the following prototype::
+
+ int callback_func(
+ struct notifier_block *self, unsigned long action, void *arg);
+
+The first argument of the callback function (self) is a pointer to the block
+of the notifier chain that points to the callback function itself.
+The second argument (action) is one of the event types described above.
+The third argument (arg) passes a pointer of struct memory_notify::
+
+ struct memory_notify {
+ unsigned long start_pfn;
+ unsigned long nr_pages;
+ int status_change_nid_normal;
+ int status_change_nid_high;
+ int status_change_nid;
+ }
+
+- start_pfn is start_pfn of online/offline memory.
+- nr_pages is # of pages of online/offline memory.
+- status_change_nid_normal is set node id when N_NORMAL_MEMORY of nodemask
+ is (will be) set/clear, if this is -1, then nodemask status is not changed.
+- status_change_nid_high is set node id when N_HIGH_MEMORY of nodemask
+ is (will be) set/clear, if this is -1, then nodemask status is not changed.
+- status_change_nid is set node id when N_MEMORY of nodemask is (will be)
+ set/clear. It means a new(memoryless) node gets new memory by online and a
+ node loses all memory. If this is -1, then nodemask status is not changed.
+
+ If status_changed_nid* >= 0, callback should create/discard structures for the
+ node if necessary.
+
+The callback routine shall return one of the values
+NOTIFY_DONE, NOTIFY_OK, NOTIFY_BAD, NOTIFY_STOP
+defined in ``include/linux/notifier.h``
+
+NOTIFY_DONE and NOTIFY_OK have no effect on the further processing.
+
+NOTIFY_BAD is used as response to the MEM_GOING_ONLINE, MEM_GOING_OFFLINE,
+MEM_ONLINE, or MEM_OFFLINE action to cancel hotplugging. It stops
+further processing of the notification queue.
+
+NOTIFY_STOP stops further processing of the notification queue.
+
+Locking Internals
+=================
+
+When adding/removing memory that uses memory block devices (i.e. ordinary RAM),
+the device_hotplug_lock should be held to:
+
+- synchronize against online/offline requests (e.g. via sysfs). This way, memory
+ block devices can only be accessed (.online/.state attributes) by user
+ space once memory has been fully added. And when removing memory, we
+ know nobody is in critical sections.
+- synchronize against CPU hotplug and similar (e.g. relevant for ACPI and PPC)
+
+Especially, there is a possible lock inversion that is avoided using
+device_hotplug_lock when adding memory and user space tries to online that
+memory faster than expected:
+
+- device_online() will first take the device_lock(), followed by
+ mem_hotplug_lock
+- add_memory_resource() will first take the mem_hotplug_lock, followed by
+ the device_lock() (while creating the devices, during bus_add_device()).
+
+As the device is visible to user space before taking the device_lock(), this
+can result in a lock inversion.
+
+onlining/offlining of memory should be done via device_online()/
+device_offline() - to make sure it is properly synchronized to actions
+via sysfs. Holding device_hotplug_lock is advised (to e.g. protect online_type)
+
+When adding/removing/onlining/offlining memory or adding/removing
+heterogeneous/device memory, we should always hold the mem_hotplug_lock in
+write mode to serialise memory hotplug (e.g. access to global/zone
+variables).
+
+In addition, mem_hotplug_lock (in contrast to device_hotplug_lock) in read
+mode allows for a quite efficient get_online_mems/put_online_mems
+implementation, so code accessing memory can protect from that memory
+vanishing.
diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst
index 46ae3537fb12..5ce1ec1dd066 100644
--- a/Documentation/core-api/mm-api.rst
+++ b/Documentation/core-api/mm-api.rst
@@ -14,6 +14,8 @@ User Space Memory Access
.. kernel-doc:: mm/util.c
:functions: get_user_pages_fast
+.. _mm-api-gfp-flags:
+
Memory Allocation Controls
==========================
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index 25dc591cb110..ff48b55040ef 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -376,15 +376,15 @@ correctness of the format string and va_list arguments.
Passed by reference.
-kobjects
---------
+Device tree nodes
+-----------------
::
%pOF[fnpPcCF]
-For printing kobject based structs (device nodes). Default behaviour is
+For printing device tree node structures. Default behaviour is
equivalent to %pOFf.
- f - device node full_name
@@ -420,9 +420,8 @@ struct clk
%pC pll1
%pCn pll1
-For printing struct clk structures. %pC and %pCn print the name
-(Common Clock Framework) or address (legacy clock framework) of the
-structure.
+For printing struct clk structures. %pC and %pCn print the name of the clock
+(Common Clock Framework) or a unique 32-bit ID (legacy clock framework).
Passed by reference.
diff --git a/Documentation/dev-tools/coccinelle.rst b/Documentation/dev-tools/coccinelle.rst
index 94f41c290bfc..aa14f05cabb1 100644
--- a/Documentation/dev-tools/coccinelle.rst
+++ b/Documentation/dev-tools/coccinelle.rst
@@ -30,18 +30,29 @@ of many distributions, e.g. :
- NetBSD
- FreeBSD
-You can get the latest version released from the Coccinelle homepage at
+Some distribution packages are obsolete and it is recommended
+to use the latest version released from the Coccinelle homepage at
http://coccinelle.lip6.fr/
-Once you have it, run the following command::
+Or from Github at:
- ./configure
+https://github.com/coccinelle/coccinelle
+
+Once you have it, run the following commands::
+
+ ./autogen
+ ./configure
make
as a regular user, and install it with::
sudo make install
+More detailed installation instructions to build from source can be
+found at:
+
+https://github.com/coccinelle/coccinelle/blob/master/install.txt
+
Supplemental documentation
---------------------------
@@ -51,6 +62,10 @@ https://bottest.wiki.kernel.org/coccicheck
The wiki documentation always refers to the linux-next version of the script.
+For Semantic Patch Language(SmPL) grammar documentation refer to:
+
+http://coccinelle.lip6.fr/documentation.php
+
Using Coccinelle on the Linux kernel
------------------------------------
@@ -223,7 +238,7 @@ Since coccicheck runs through make, it naturally runs from the kernel
proper dir, as such the second rule above would be implied for picking up a
.cocciconfig when using ``make coccicheck``.
-``make coccicheck`` also supports using M= targets.If you do not supply
+``make coccicheck`` also supports using M= targets. If you do not supply
any M= target, it is assumed you want to target the entire kernel.
The kernel coccicheck script has::
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index 6f653acea248..dad1bb8711e2 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -159,7 +159,7 @@ Contributing new tests (details)
* If a test needs specific kernel config options enabled, add a config file in
the test directory to enable them.
- e.g: tools/testing/selftests/android/ion/config
+ e.g: tools/testing/selftests/android/config
Test Harness
============
diff --git a/Documentation/device-mapper/dm-flakey.txt b/Documentation/device-mapper/dm-flakey.txt
index c43030718cef..9f0e247d0877 100644
--- a/Documentation/device-mapper/dm-flakey.txt
+++ b/Documentation/device-mapper/dm-flakey.txt
@@ -33,6 +33,10 @@ Optional feature parameters:
All write I/O is silently ignored.
Read I/O is handled correctly.
+ error_writes:
+ All write I/O is failed with an error signalled.
+ Read I/O is handled correctly.
+
corrupt_bio_byte <Nth_byte> <direction> <value> <flags>:
During <down interval>, replace <Nth_byte> of the data of
each matching bio with <value>.
diff --git a/Documentation/device-mapper/log-writes.txt b/Documentation/device-mapper/log-writes.txt
index f4ebcbaf50f3..b638d124be6a 100644
--- a/Documentation/device-mapper/log-writes.txt
+++ b/Documentation/device-mapper/log-writes.txt
@@ -38,7 +38,7 @@ inconsistent file system.
Any REQ_FUA requests bypass this flushing mechanism and are logged as soon as
they complete as those requests will obviously bypass the device cache.
-Any REQ_DISCARD requests are treated like WRITE requests. Otherwise we would
+Any REQ_OP_DISCARD requests are treated like WRITE requests. Otherwise we would
have all the DISCARD requests, and then the WRITE requests and then the FLUSH
request. Consider the following example:
diff --git a/Documentation/devicetree/00-INDEX b/Documentation/devicetree/00-INDEX
deleted file mode 100644
index 8c4102c6a5e7..000000000000
--- a/Documentation/devicetree/00-INDEX
+++ /dev/null
@@ -1,12 +0,0 @@
-Documentation for device trees, a data structure by which bootloaders pass
-hardware layout to Linux in a device-independent manner, simplifying hardware
-probing. This subsystem is maintained by Grant Likely
-<grant.likely@secretlab.ca> and has a mailing list at
-https://lists.ozlabs.org/listinfo/devicetree-discuss
-
-00-INDEX
- - this file
-booting-without-of.txt
- - Booting Linux without Open Firmware, describes history and format of device trees.
-usage-model.txt
- - How Linux uses DT and what DT aims to solve. \ No newline at end of file
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index 5d5bd456d9d9..e30fd106df4f 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -10,6 +10,7 @@ PHYs.
Required properties:
- compatible : compatible string, one of:
- "allwinner,sun4i-a10-ahci"
+ - "allwinner,sun8i-r40-ahci"
- "brcm,iproc-ahci"
- "hisilicon,hisi-ahci"
- "cavium,octeon-7130-ahci"
@@ -31,8 +32,10 @@ Optional properties:
- clocks : a list of phandle + clock specifier pairs
- resets : a list of phandle + reset specifier pairs
- target-supply : regulator for SATA target power
+- phy-supply : regulator for PHY power
- phys : reference to the SATA PHY node
- phy-names : must be "sata-phy"
+- ahci-supply : regulator for AHCI controller
- ports-implemented : Mask that indicates which ports that the HBA supports
are available for software to use. Useful if PORTS_IMPL
is not programmed by the BIOS, which is true with
@@ -42,12 +45,13 @@ Required properties when using sub-nodes:
- #address-cells : number of cells to encode an address
- #size-cells : number of cells representing the size of an address
+For allwinner,sun8i-r40-ahci, the reset propertie must be present.
Sub-nodes required properties:
- reg : the port number
And at least one of the following properties:
- phys : reference to the SATA PHY node
-- target-supply : regulator for SATA target power
+- target-supply : regulator for SATA target power
Examples:
sata@ffe08000 {
diff --git a/Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt b/Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt
index 0a5b3b47f217..7713a413c6a7 100644
--- a/Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt
+++ b/Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt
@@ -9,6 +9,7 @@ Required properties:
"brcm,bcm7445-ahci"
"brcm,bcm-nsp-ahci"
"brcm,sata3-ahci"
+ "brcm,bcm63138-ahci"
- reg : register mappings for AHCI and SATA_TOP_CTRL
- reg-names : "ahci" and "top-ctrl"
- interrupts : interrupt mapping for SATA IRQ
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.txt b/Documentation/devicetree/bindings/connector/usb-connector.txt
index 8855bfcfd778..d90e17e2428b 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.txt
+++ b/Documentation/devicetree/bindings/connector/usb-connector.txt
@@ -29,15 +29,15 @@ Required properties for usb-c-connector with power delivery support:
in "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.2
Source_Capabilities Message, the order of each entry(PDO) should follow
the PD spec chapter 6.4.1. Required for power source and power dual role.
- User can specify the source PDO array via PDO_FIXED/BATT/VAR() defined in
- dt-bindings/usb/pd.h.
+ User can specify the source PDO array via PDO_FIXED/BATT/VAR/PPS_APDO()
+ defined in dt-bindings/usb/pd.h.
- sink-pdos: An array of u32 with each entry providing supported power
sink data object(PDO), the detailed bit definitions of PDO can be found
in "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.3
Sink Capabilities Message, the order of each entry(PDO) should follow
the PD spec chapter 6.4.1. Required for power sink and power dual role.
- User can specify the sink PDO array via PDO_FIXED/BATT/VAR() defined in
- dt-bindings/usb/pd.h.
+ User can specify the sink PDO array via PDO_FIXED/BATT/VAR/PPS_APDO() defined
+ in dt-bindings/usb/pd.h.
- op-sink-microwatt: Sink required operating power in microwatt, if source
can't offer the power, Capability Mismatch is set. Required for power
sink and power dual role.
diff --git a/Documentation/devicetree/bindings/dma/jz4780-dma.txt b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
index 03e9cf7b42e0..636fcb26b164 100644
--- a/Documentation/devicetree/bindings/dma/jz4780-dma.txt
+++ b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
@@ -2,8 +2,13 @@
Required properties:
-- compatible: Should be "ingenic,jz4780-dma"
-- reg: Should contain the DMA controller registers location and length.
+- compatible: Should be one of:
+ * ingenic,jz4740-dma
+ * ingenic,jz4725b-dma
+ * ingenic,jz4770-dma
+ * ingenic,jz4780-dma
+- reg: Should contain the DMA channel registers location and length, followed
+ by the DMA controller registers location and length.
- interrupts: Should contain the interrupt specifier of the DMA controller.
- clocks: Should contain a clock specifier for the JZ4780 PDMA clock.
- #dma-cells: Must be <2>. Number of integer cells in the dmas property of
@@ -19,9 +24,10 @@ Optional properties:
Example:
-dma: dma@13420000 {
+dma: dma-controller@13420000 {
compatible = "ingenic,jz4780-dma";
- reg = <0x13420000 0x10000>;
+ reg = <0x13420000 0x400
+ 0x13421000 0x40>;
interrupt-parent = <&intc>;
interrupts = <10>;
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index 946229c48657..a5a7c3f5a1e3 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -17,6 +17,7 @@ Required Properties:
- compatible: "renesas,dmac-<soctype>", "renesas,rcar-dmac" as fallback.
Examples with soctypes are:
- "renesas,dmac-r8a7743" (RZ/G1M)
+ - "renesas,dmac-r8a7744" (RZ/G1N)
- "renesas,dmac-r8a7745" (RZ/G1E)
- "renesas,dmac-r8a77470" (RZ/G1C)
- "renesas,dmac-r8a7790" (R-Car H2)
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
index 482e54362d3e..1743017bd948 100644
--- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
@@ -4,6 +4,7 @@ Required Properties:
-compatible: "renesas,<soctype>-usb-dmac", "renesas,usb-dmac" as fallback.
Examples with soctypes are:
- "renesas,r8a7743-usb-dmac" (RZ/G1M)
+ - "renesas,r8a7744-usb-dmac" (RZ/G1N)
- "renesas,r8a7745-usb-dmac" (RZ/G1E)
- "renesas,r8a7790-usb-dmac" (R-Car H2)
- "renesas,r8a7791-usb-dmac" (R-Car M2-W)
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index a7c31de29362..f0ba154b5723 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -1,18 +1,9 @@
Specifying GPIO information for devices
-============================================
+=======================================
1) gpios property
-----------------
-Nodes that makes use of GPIOs should specify them using one or more
-properties, each containing a 'gpio-list':
-
- gpio-list ::= <single-gpio> [gpio-list]
- single-gpio ::= <gpio-phandle> <gpio-specifier>
- gpio-phandle : phandle to gpio controller node
- gpio-specifier : Array of #gpio-cells specifying specific gpio
- (controller specific)
-
GPIO properties should be named "[<name>-]gpios", with <name> being the purpose
of this GPIO for the device. While a non-existent <name> is considered valid
for compatibility reasons (resolving to the "gpios" property), it is not allowed
@@ -33,33 +24,27 @@ The following example could be used to describe GPIO pins used as device enable
and bit-banged data signals:
gpio1: gpio1 {
- gpio-controller
- #gpio-cells = <2>;
- };
- gpio2: gpio2 {
- gpio-controller
- #gpio-cells = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
};
[...]
- enable-gpios = <&gpio2 2>;
data-gpios = <&gpio1 12 0>,
<&gpio1 13 0>,
<&gpio1 14 0>,
<&gpio1 15 0>;
-Note that gpio-specifier length is controller dependent. In the
-above example, &gpio1 uses 2 cells to specify a gpio, while &gpio2
-only uses one.
+In the above example, &gpio1 uses 2 cells to specify a gpio. The first cell is
+a local offset to the GPIO line and the second cell represent consumer flags,
+such as if the consumer desire the line to be active low (inverted) or open
+drain. This is the recommended practice.
-gpio-specifier may encode: bank, pin position inside the bank,
-whether pin is open-drain and whether pin is logically inverted.
+The exact meaning of each specifier cell is controller specific, and must be
+documented in the device tree binding for the device, but it is strongly
+recommended to use the two-cell approach.
-Exact meaning of each specifier cell is controller specific, and must
-be documented in the device tree binding for the device.
-
-Most controllers are however specifying a generic flag bitfield
-in the last cell, so for these, use the macros defined in
+Most controllers are specifying a generic flag bitfield in the last cell, so
+for these, use the macros defined in
include/dt-bindings/gpio/gpio.h whenever possible:
Example of a node using GPIOs:
@@ -236,46 +221,40 @@ Example of two SOC GPIO banks defined as gpio-controller nodes:
Some or all of the GPIOs provided by a GPIO controller may be routed to pins
on the package via a pin controller. This allows muxing those pins between
-GPIO and other functions.
+GPIO and other functions. It is a fairly common practice among silicon
+engineers.
+
+2.2) Ordinary (numerical) GPIO ranges
+-------------------------------------
It is useful to represent which GPIOs correspond to which pins on which pin
-controllers. The gpio-ranges property described below represents this, and
-contains information structures as follows:
-
- gpio-range-list ::= <single-gpio-range> [gpio-range-list]
- single-gpio-range ::= <numeric-gpio-range> | <named-gpio-range>
- numeric-gpio-range ::=
- <pinctrl-phandle> <gpio-base> <pinctrl-base> <count>
- named-gpio-range ::= <pinctrl-phandle> <gpio-base> '<0 0>'
- pinctrl-phandle : phandle to pin controller node
- gpio-base : Base GPIO ID in the GPIO controller
- pinctrl-base : Base pinctrl pin ID in the pin controller
- count : The number of GPIOs/pins in this range
-
-The "pin controller node" mentioned above must conform to the bindings
-described in ../pinctrl/pinctrl-bindings.txt.
-
-In case named gpio ranges are used (ranges with both <pinctrl-base> and
-<count> set to 0), the property gpio-ranges-group-names contains one string
-for every single-gpio-range in gpio-ranges:
- gpiorange-names-list ::= <gpiorange-name> [gpiorange-names-list]
- gpiorange-name : Name of the pingroup associated to the GPIO range in
- the respective pin controller.
-
-Elements of gpiorange-names-list corresponding to numeric ranges contain
-the empty string. Elements of gpiorange-names-list corresponding to named
-ranges contain the name of a pin group defined in the respective pin
-controller. The number of pins/GPIOs in the range is the number of pins in
-that pin group.
+controllers. The gpio-ranges property described below represents this with
+a discrete set of ranges mapping pins from the pin controller local number space
+to pins in the GPIO controller local number space.
-Previous versions of this binding required all pin controller nodes that
-were referenced by any gpio-ranges property to contain a property named
-#gpio-range-cells with value <3>. This requirement is now deprecated.
-However, that property may still exist in older device trees for
-compatibility reasons, and would still be required even in new device
-trees that need to be compatible with older software.
+The format is: <[pin controller phandle], [GPIO controller offset],
+ [pin controller offset], [number of pins]>;
+
+The GPIO controller offset pertains to the GPIO controller node containing the
+range definition.
+
+The pin controller node referenced by the phandle must conform to the bindings
+described in pinctrl/pinctrl-bindings.txt.
+
+Each offset runs from 0 to N. It is perfectly fine to pile any number of
+ranges with just one pin-to-GPIO line mapping if the ranges are concocted, but
+in practice these ranges are often lumped in discrete sets.
+
+Example:
+
+ gpio-ranges = <&foo 0 20 10>, <&bar 10 50 20>;
-Example 1:
+This means:
+- pins 20..29 on pin controller "foo" is mapped to GPIO line 0..9 and
+- pins 50..69 on pin controller "bar" is mapped to GPIO line 10..29
+
+
+Verbose example:
qe_pio_e: gpio-controller@1460 {
#gpio-cells = <2>;
@@ -289,7 +268,28 @@ Here, a single GPIO controller has GPIOs 0..9 routed to pin controller
pinctrl1's pins 20..29, and GPIOs 10..29 routed to pin controller pinctrl2's
pins 50..69.
-Example 2:
+
+2.3) GPIO ranges from named pin groups
+--------------------------------------
+
+It is also possible to use pin groups for gpio ranges when pin groups are the
+easiest and most convenient mapping.
+
+Both both <pinctrl-base> and <count> must set to 0 when using named pin groups
+names.
+
+The property gpio-ranges-group-names must contain exactly one string for each
+range.
+
+Elements of gpio-ranges-group-names must contain the name of a pin group
+defined in the respective pin controller. The number of pins/GPIO lines in the
+range is the number of pins in that pin group. The number of pins of that
+group is defined int the implementation and not in the device tree.
+
+If numerical and named pin groups are mixed, the string corresponding to a
+numerical pin range in gpio-ranges-group-names must be empty.
+
+Example:
gpio_pio_i: gpio-controller@14b0 {
#gpio-cells = <2>;
@@ -306,6 +306,14 @@ Example 2:
"bar";
};
-Here, three GPIO ranges are defined wrt. two pin controllers. pinctrl1 GPIO
-ranges are defined using pin numbers whereas the GPIO ranges wrt. pinctrl2
-are named "foo" and "bar".
+Here, three GPIO ranges are defined referring to two pin controllers.
+
+pinctrl1 GPIO ranges are defined using pin numbers whereas the GPIO ranges
+in pinctrl2 are defined using the pin groups named "foo" and "bar".
+
+Previous versions of this binding required all pin controller nodes that
+were referenced by any gpio-ranges property to contain a property named
+#gpio-range-cells with value <3>. This requirement is now deprecated.
+However, that property may still exist in older device trees for
+compatibility reasons, and would still be required even in new device
+trees that need to be compatible with older software.
diff --git a/Documentation/devicetree/bindings/gpio/ingenic,gpio.txt b/Documentation/devicetree/bindings/gpio/ingenic,gpio.txt
deleted file mode 100644
index 7988aeb725f4..000000000000
--- a/Documentation/devicetree/bindings/gpio/ingenic,gpio.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-Ingenic jz47xx GPIO controller
-
-That the Ingenic GPIO driver node must be a sub-node of the Ingenic pinctrl
-driver node.
-
-Required properties:
---------------------
-
- - compatible: Must contain one of:
- - "ingenic,jz4740-gpio"
- - "ingenic,jz4770-gpio"
- - "ingenic,jz4780-gpio"
- - reg: The GPIO bank number.
- - interrupt-controller: Marks the device node as an interrupt controller.
- - interrupts: Interrupt specifier for the controllers interrupt.
- - #interrupt-cells: Should be 2. Refer to
- ../interrupt-controller/interrupts.txt for more details.
- - gpio-controller: Marks the device node as a GPIO controller.
- - #gpio-cells: Should be 2. The first cell is the GPIO number and the second
- cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>. Only the
- GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
- - gpio-ranges: Range of pins managed by the GPIO controller. Refer to
- 'gpio.txt' in this directory for more details.
-
-Example:
---------
-
-&pinctrl {
- #address-cells = <1>;
- #size-cells = <0>;
-
- gpa: gpio@0 {
- compatible = "ingenic,jz4740-gpio";
- reg = <0>;
-
- gpio-controller;
- gpio-ranges = <&pinctrl 0 0 32>;
- #gpio-cells = <2>;
-
- interrupt-controller;
- #interrupt-cells = <2>;
-
- interrupt-parent = <&intc>;
- interrupts = <28>;
- };
-};
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 4018ee57a6af..2889bbcd7416 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -4,8 +4,10 @@ Required Properties:
- compatible: should contain one or more of the following:
- "renesas,gpio-r8a7743": for R8A7743 (RZ/G1M) compatible GPIO controller.
+ - "renesas,gpio-r8a7744": for R8A7744 (RZ/G1N) compatible GPIO controller.
- "renesas,gpio-r8a7745": for R8A7745 (RZ/G1E) compatible GPIO controller.
- "renesas,gpio-r8a77470": for R8A77470 (RZ/G1C) compatible GPIO controller.
+ - "renesas,gpio-r8a774a1": for R8A774A1 (RZ/G2M) compatible GPIO controller.
- "renesas,gpio-r8a7778": for R8A7778 (R-Car M1) compatible GPIO controller.
- "renesas,gpio-r8a7779": for R8A7779 (R-Car H1) compatible GPIO controller.
- "renesas,gpio-r8a7790": for R8A7790 (R-Car H2) compatible GPIO controller.
@@ -22,7 +24,7 @@ Required Properties:
- "renesas,gpio-r8a77995": for R8A77995 (R-Car D3) compatible GPIO controller.
- "renesas,rcar-gen1-gpio": for a generic R-Car Gen1 GPIO controller.
- "renesas,rcar-gen2-gpio": for a generic R-Car Gen2 or RZ/G1 GPIO controller.
- - "renesas,rcar-gen3-gpio": for a generic R-Car Gen3 GPIO controller.
+ - "renesas,rcar-gen3-gpio": for a generic R-Car Gen3 or RZ/G2 GPIO controller.
- "renesas,gpio-rcar": deprecated.
When compatible with the generic version nodes must list the
@@ -38,7 +40,7 @@ Required Properties:
- #gpio-cells: Should be 2. The first cell is the GPIO number and the second
cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>. Only the
GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
- - gpio-ranges: Range of pins managed by the GPIO controller.
+ - gpio-ranges: See gpio.txt.
Optional properties:
@@ -46,35 +48,44 @@ Optional properties:
mandatory if the hardware implements a controllable functional clock for
the GPIO instance.
-Please refer to gpio.txt in this directory for details of gpio-ranges property
-and the common GPIO bindings used by client devices.
+ - gpio-reserved-ranges: See gpio.txt.
+
+Please refer to gpio.txt in this directory for the common GPIO bindings used by
+client devices.
The GPIO controller also acts as an interrupt controller. It uses the default
two cells specifier as described in Documentation/devicetree/bindings/
interrupt-controller/interrupts.txt.
-Example: R8A7779 (R-Car H1) GPIO controller nodes
+Example: R8A77470 (RZ/G1C) GPIO controller nodes
- gpio0: gpio@ffc40000 {
- compatible = "renesas,gpio-r8a7779", "renesas,rcar-gen1-gpio";
- reg = <0xffc40000 0x2c>;
- interrupt-parent = <&gic>;
- interrupts = <0 141 0x4>;
- #gpio-cells = <2>;
- gpio-controller;
- gpio-ranges = <&pfc 0 0 32>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
+ gpio0: gpio@e6050000 {
+ compatible = "renesas,gpio-r8a77470",
+ "renesas,rcar-gen2-gpio";
+ reg = <0 0xe6050000 0 0x50>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 0 23>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 912>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 912>;
+ };
...
- gpio6: gpio@ffc46000 {
- compatible = "renesas,gpio-r8a7779", "renesas,rcar-gen1-gpio";
- reg = <0xffc46000 0x2c>;
- interrupt-parent = <&gic>;
- interrupts = <0 147 0x4>;
- #gpio-cells = <2>;
- gpio-controller;
- gpio-ranges = <&pfc 0 192 9>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
+ gpio3: gpio@e6053000 {
+ compatible = "renesas,gpio-r8a77470",
+ "renesas,rcar-gen2-gpio";
+ reg = <0 0xe6053000 0 0x50>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 96 30>;
+ gpio-reserved-ranges = <17 10>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 909>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 909>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/snps,creg-gpio.txt b/Documentation/devicetree/bindings/gpio/snps,creg-gpio.txt
new file mode 100644
index 000000000000..1b30812b015b
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/snps,creg-gpio.txt
@@ -0,0 +1,21 @@
+Synopsys GPIO via CREG (Control REGisters) driver
+
+Required properties:
+- compatible : "snps,creg-gpio-hsdk" or "snps,creg-gpio-axs10x".
+- reg : Exactly one register range with length 0x4.
+- #gpio-cells : Since the generic GPIO binding is used, the
+ amount of cells must be specified as 2. The first cell is the
+ pin number, the second cell is used to specify optional parameters:
+ See "gpio-specifier" in .../devicetree/bindings/gpio/gpio.txt.
+- gpio-controller : Marks the device node as a GPIO controller.
+- ngpios: Number of GPIO pins.
+
+Example:
+
+gpio: gpio@f00014b0 {
+ compatible = "snps,creg-gpio-hsdk";
+ reg = <0xf00014b0 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+};
diff --git a/Documentation/devicetree/bindings/hwmon/ina3221.txt b/Documentation/devicetree/bindings/hwmon/ina3221.txt
new file mode 100644
index 000000000000..a7b25caa2b8e
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/ina3221.txt
@@ -0,0 +1,44 @@
+Texas Instruments INA3221 Device Tree Bindings
+
+1) ina3221 node
+ Required properties:
+ - compatible: Must be "ti,ina3221"
+ - reg: I2C address
+
+ Optional properties:
+ = The node contains optional child nodes for three channels =
+ = Each child node describes the information of input source =
+
+ - #address-cells: Required only if a child node is present. Must be 1.
+ - #size-cells: Required only if a child node is present. Must be 0.
+
+2) child nodes
+ Required properties:
+ - reg: Must be 0, 1 or 2, corresponding to IN1, IN2 or IN3 port of INA3221
+
+ Optional properties:
+ - label: Name of the input source
+ - shunt-resistor-micro-ohms: Shunt resistor value in micro-Ohm
+
+Example:
+
+ina3221@40 {
+ compatible = "ti,ina3221";
+ reg = <0x40>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ input@0 {
+ reg = <0x0>;
+ status = "disabled";
+ };
+ input@1 {
+ reg = <0x1>;
+ shunt-resistor-micro-ohms = <5000>;
+ };
+ input@2 {
+ reg = <0x2>;
+ label = "VDD_5V";
+ shunt-resistor-micro-ohms = <5000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/hwmon/ltc2978.txt b/Documentation/devicetree/bindings/hwmon/ltc2978.txt
index bf2a47bbdc58..b428a70a7cc0 100644
--- a/Documentation/devicetree/bindings/hwmon/ltc2978.txt
+++ b/Documentation/devicetree/bindings/hwmon/ltc2978.txt
@@ -15,6 +15,7 @@ Required properties:
* "lltc,ltm2987"
* "lltc,ltm4675"
* "lltc,ltm4676"
+ * "lltc,ltm4686"
- reg: I2C slave address
Optional properties:
@@ -30,6 +31,7 @@ Valid names of regulators depend on number of supplies supported per device:
* ltc3880, ltc3882, ltc3886 : vout0 - vout1
* ltc3883 : vout0
* ltm4676 : vout0 - vout1
+ * ltm4686 : vout0 - vout1
Example:
ltc2978@5e {
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index 996ce84352cb..7cccc49b6bea 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -1,4 +1,4 @@
-Device-Tree bindings for input/gpio_keys.c keyboard driver
+Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver
Required properties:
- compatible = "gpio-keys";
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt
index aa8bf2ec8905..1c94a57a661e 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt
@@ -5,6 +5,8 @@ The Marvell ICU (Interrupt Consolidation Unit) controller is
responsible for collecting all wired-interrupt sources in the CP and
communicating them to the GIC in the AP, the unit translates interrupt
requests on input wires to MSG memory mapped transactions to the GIC.
+These messages will access a different GIC memory area depending on
+their type (NSR, SR, SEI, REI, etc).
Required properties:
@@ -12,20 +14,23 @@ Required properties:
- reg: Should contain ICU registers location and length.
-- #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The value shall be 3.
+Subnodes: Each group of interrupt is declared as a subnode of the ICU,
+with their own compatible.
+
+Required properties for the icu_nsr/icu_sei subnodes:
- The 1st cell is the group type of the ICU interrupt. Possible group
- types are:
+- compatible: Should be one of:
+ * "marvell,cp110-icu-nsr"
+ * "marvell,cp110-icu-sr"
+ * "marvell,cp110-icu-sei"
+ * "marvell,cp110-icu-rei"
- ICU_GRP_NSR (0x0) : Shared peripheral interrupt, non-secure
- ICU_GRP_SR (0x1) : Shared peripheral interrupt, secure
- ICU_GRP_SEI (0x4) : System error interrupt
- ICU_GRP_REI (0x5) : RAM error interrupt
+- #interrupt-cells: Specifies the number of cells needed to encode an
+ interrupt source. The value shall be 2.
- The 2nd cell is the index of the interrupt in the ICU unit.
+ The 1st cell is the index of the interrupt in the ICU unit.
- The 3rd cell is the type of the interrupt. See arm,gic.txt for
+ The 2nd cell is the type of the interrupt. See arm,gic.txt for
details.
- interrupt-controller: Identifies the node as an interrupt
@@ -35,17 +40,73 @@ Required properties:
that allows to trigger interrupts using MSG memory mapped
transactions.
+Note: each 'interrupts' property referring to any 'icu_xxx' node shall
+ have a different number within [0:206].
+
Example:
icu: interrupt-controller@1e0000 {
compatible = "marvell,cp110-icu";
- reg = <0x1e0000 0x10>;
+ reg = <0x1e0000 0x440>;
+
+ CP110_LABEL(icu_nsr): interrupt-controller@10 {
+ compatible = "marvell,cp110-icu-nsr";
+ reg = <0x10 0x20>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ msi-parent = <&gicp>;
+ };
+
+ CP110_LABEL(icu_sei): interrupt-controller@50 {
+ compatible = "marvell,cp110-icu-sei";
+ reg = <0x50 0x10>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ msi-parent = <&sei>;
+ };
+};
+
+node1 {
+ interrupt-parent = <&icu_nsr>;
+ interrupts = <106 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+node2 {
+ interrupt-parent = <&icu_sei>;
+ interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+/* Would not work with the above nodes */
+node3 {
+ interrupt-parent = <&icu_nsr>;
+ interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+The legacy bindings were different in this way:
+
+- #interrupt-cells: The value was 3.
+ The 1st cell was the group type of the ICU interrupt. Possible
+ group types were:
+ ICU_GRP_NSR (0x0) : Shared peripheral interrupt, non-secure
+ ICU_GRP_SR (0x1) : Shared peripheral interrupt, secure
+ ICU_GRP_SEI (0x4) : System error interrupt
+ ICU_GRP_REI (0x5) : RAM error interrupt
+ The 2nd cell was the index of the interrupt in the ICU unit.
+ The 3rd cell was the type of the interrupt. See arm,gic.txt for
+ details.
+
+Example:
+
+icu: interrupt-controller@1e0000 {
+ compatible = "marvell,cp110-icu";
+ reg = <0x1e0000 0x440>;
+
#interrupt-cells = <3>;
interrupt-controller;
msi-parent = <&gicp>;
};
-usb3h0: usb3@500000 {
+node1 {
interrupt-parent = <&icu>;
interrupts = <ICU_GRP_NSR 106 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,sei.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,sei.txt
new file mode 100644
index 000000000000..0beafed502f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,sei.txt
@@ -0,0 +1,36 @@
+Marvell SEI (System Error Interrupt) Controller
+-----------------------------------------------
+
+Marvell SEI (System Error Interrupt) controller is an interrupt
+aggregator. It receives interrupts from several sources and aggregates
+them to a single interrupt line (an SPI) on the parent interrupt
+controller.
+
+This interrupt controller can handle up to 64 SEIs, a set comes from the
+AP and is wired while a second set comes from the CPs by the mean of
+MSIs.
+
+Required properties:
+
+- compatible: should be one of:
+ * "marvell,ap806-sei"
+- reg: SEI registers location and length.
+- interrupts: identifies the parent IRQ that will be triggered.
+- #interrupt-cells: number of cells to define an SEI wired interrupt
+ coming from the AP, should be 1. The cell is the IRQ
+ number.
+- interrupt-controller: identifies the node as an interrupt controller
+ for AP interrupts.
+- msi-controller: identifies the node as an MSI controller for the CPs
+ interrupts.
+
+Example:
+
+ sei: interrupt-controller@3f0200 {
+ compatible = "marvell,ap806-sei";
+ reg = <0x3f0200 0x40>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ msi-controller;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
index a046ed374d80..8de96a4fb2d5 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
@@ -2,10 +2,12 @@ DT bindings for the R-Mobile/R-Car/RZ/G interrupt controller
Required properties:
-- compatible: has to be "renesas,irqc-<soctype>", "renesas,irqc" as fallback.
+- compatible: must be "renesas,irqc-<soctype>" or "renesas,intc-ex-<soctype>",
+ and "renesas,irqc" as fallback.
Examples with soctypes are:
- "renesas,irqc-r8a73a4" (R-Mobile APE6)
- "renesas,irqc-r8a7743" (RZ/G1M)
+ - "renesas,irqc-r8a7744" (RZ/G1N)
- "renesas,irqc-r8a7745" (RZ/G1E)
- "renesas,irqc-r8a77470" (RZ/G1C)
- "renesas,irqc-r8a7790" (R-Car H2)
@@ -19,6 +21,7 @@ Required properties:
- "renesas,intc-ex-r8a77965" (R-Car M3-N)
- "renesas,intc-ex-r8a77970" (R-Car V3M)
- "renesas,intc-ex-r8a77980" (R-Car V3H)
+ - "renesas,intc-ex-r8a77990" (R-Car E3)
- "renesas,intc-ex-r8a77995" (R-Car D3)
- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
interrupts.txt in this directory
diff --git a/Documentation/devicetree/bindings/leds/leds-an30259a.txt b/Documentation/devicetree/bindings/leds/leds-an30259a.txt
new file mode 100644
index 000000000000..6ffb861083c0
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-an30259a.txt
@@ -0,0 +1,43 @@
+* Panasonic AN30259A 3-channel LED driver
+
+The AN30259A is a LED controller capable of driving three LEDs independently. It supports
+constant current output and sloping current output modes. The chip is connected over I2C.
+
+Required properties:
+ - compatible: Must be "panasonic,an30259a".
+ - reg: I2C slave address.
+ - #address-cells: Must be 1.
+ - #size-cells: Must be 0.
+
+Each LED is represented as a sub-node of the panasonic,an30259a node.
+
+Required sub-node properties:
+ - reg: Pin that the LED is connected to. Must be 1, 2, or 3.
+
+Optional sub-node properties:
+ - label: see Documentation/devicetree/bindings/leds/common.txt
+ - linux,default-trigger: see Documentation/devicetree/bindings/leds/common.txt
+
+Example:
+led-controller@30 {
+ compatible = "panasonic,an30259a";
+ reg = <0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ linux,default-trigger = "heartbeat";
+ label = "red:indicator";
+ };
+
+ led@2 {
+ reg = <2>;
+ label = "green:indicator";
+ };
+
+ led@3 {
+ reg = <3>;
+ label = "blue:indicator";
+ };
+};
diff --git a/Documentation/devicetree/bindings/serial/atmel-usart.txt b/Documentation/devicetree/bindings/mfd/atmel-usart.txt
index 7c0d6b2f53e4..7f0cd72f47d2 100644
--- a/Documentation/devicetree/bindings/serial/atmel-usart.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-usart.txt
@@ -1,6 +1,6 @@
* Atmel Universal Synchronous Asynchronous Receiver/Transmitter (USART)
-Required properties:
+Required properties for USART:
- compatible: Should be "atmel,<chip>-usart" or "atmel,<chip>-dbgu"
The compatible <chip> indicated will be the first SoC to support an
additional mode or an USART new feature.
@@ -11,7 +11,13 @@ Required properties:
Required elements: "usart"
- clocks: phandles to input clocks.
-Optional properties:
+Required properties for USART in SPI mode:
+- #size-cells : Must be <0>
+- #address-cells : Must be <1>
+- cs-gpios: chipselects (internal cs not supported)
+- atmel,usart-mode : Must be <AT91_USART_MODE_SPI> (found in dt-bindings/mfd/at91-usart.h)
+
+Optional properties in serial mode:
- atmel,use-dma-rx: use of PDC or DMA for receiving data
- atmel,use-dma-tx: use of PDC or DMA for transmitting data
- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD line respectively.
@@ -62,3 +68,18 @@ Example:
dma-names = "tx", "rx";
atmel,fifo-size = <32>;
};
+
+- SPI mode:
+ #include <dt-bindings/mfd/at91-usart.h>
+
+ spi0: spi@f001c000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-usart", "atmel,at91sam9260-usart";
+ atmel,usart-mode = <AT91_USART_MODE_SPI>;
+ reg = <0xf001c000 0x100>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH 5>;
+ clocks = <&usart0_clk>;
+ clock-names = "usart";
+ cs-gpios = <&pioB 3 0>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt
index 3ca56fdb5ffe..a4b056761eaa 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt
@@ -1,16 +1,17 @@
-* ROHM BD71837 Power Management Integrated Circuit bindings
+* ROHM BD71837 and BD71847 Power Management Integrated Circuit bindings
-BD71837MWV is a programmable Power Management IC for powering single-core,
-dual-core, and quad-core SoCs such as NXP-i.MX 8M. It is optimized for
-low BOM cost and compact solution footprint. It integrates 8 Buck
-egulators and 7 LDOs to provide all the power rails required by the SoC and
-the commonly used peripherals.
+BD71837MWV and BD71847MWV are programmable Power Management ICs for powering
+single-core, dual-core, and quad-core SoCs such as NXP-i.MX 8M. They are
+optimized for low BOM cost and compact solution footprint. BD71837MWV
+integrates 8 Buck regulators and 7 LDOs. BD71847MWV contains 6 Buck regulators
+and 6 LDOs.
-Datasheet for PMIC is available at:
+Datasheet for BD71837 is available at:
https://www.rohm.com/datasheet/BD71837MWV/bd71837mwv-e
Required properties:
- - compatible : Should be "rohm,bd71837".
+ - compatible : Should be "rohm,bd71837" for bd71837
+ "rohm,bd71847" for bd71847.
- reg : I2C slave address.
- interrupt-parent : Phandle to the parent interrupt controller.
- interrupts : The interrupt line the device is connected to.
diff --git a/Documentation/devicetree/bindings/mips/mscc.txt b/Documentation/devicetree/bindings/mips/mscc.txt
index ae15ec333542..bc817e984628 100644
--- a/Documentation/devicetree/bindings/mips/mscc.txt
+++ b/Documentation/devicetree/bindings/mips/mscc.txt
@@ -41,3 +41,19 @@ Example:
compatible = "mscc,ocelot-cpu-syscon", "syscon";
reg = <0x70000000 0x2c>;
};
+
+o HSIO regs:
+
+The SoC has a few registers (HSIO) handling miscellaneous functionalities:
+configuration and status of PLL5, RCOMP, SyncE, SerDes configurations and
+status, SerDes muxing and a thermal sensor.
+
+Required properties:
+- compatible: Should be "mscc,ocelot-hsio", "syscon", "simple-mfd"
+- reg : Should contain registers location and length
+
+Example:
+ syscon@10d0000 {
+ compatible = "mscc,ocelot-hsio", "syscon", "simple-mfd";
+ reg = <0x10d0000 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
index f6ddba31cb73..e2effe17f05e 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
@@ -15,6 +15,7 @@ Required Properties:
- "arasan,sdhci-5.1": generic Arasan SDHCI 5.1 PHY
- "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1": rk3399 eMMC PHY
For this device it is strongly suggested to include arasan,soc-ctl-syscon.
+ - "ti,am654-sdhci-5.1", "arasan,sdhci-5.1": TI AM654 MMC PHY
- reg: From mmc bindings: Register location and length.
- clocks: From clock bindings: Handles to clock inputs.
- clock-names: From clock bindings: Tuple including "clk_xin" and "clk_ahb"
diff --git a/Documentation/devicetree/bindings/mmc/jz4740.txt b/Documentation/devicetree/bindings/mmc/jz4740.txt
index 7cd8c432d7c8..8a6f87f13114 100644
--- a/Documentation/devicetree/bindings/mmc/jz4740.txt
+++ b/Documentation/devicetree/bindings/mmc/jz4740.txt
@@ -7,6 +7,7 @@ described in mmc.txt.
Required properties:
- compatible: Should be one of the following:
- "ingenic,jz4740-mmc" for the JZ4740
+ - "ingenic,jz4725b-mmc" for the JZ4725B
- "ingenic,jz4780-mmc" for the JZ4780
- reg: Should contain the MMC controller registers location and length.
- interrupts: Should contain the interrupt specifier of the MMC controller.
diff --git a/Documentation/devicetree/bindings/mmc/mmci.txt b/Documentation/devicetree/bindings/mmc/mmci.txt
index 03796cf2d3e7..6d3c626e017d 100644
--- a/Documentation/devicetree/bindings/mmc/mmci.txt
+++ b/Documentation/devicetree/bindings/mmc/mmci.txt
@@ -15,8 +15,11 @@ Required properties:
Optional properties:
- arm,primecell-periphid : contains the PrimeCell Peripheral ID, it overrides
the ID provided by the HW
+- resets : phandle to internal reset line.
+ Should be defined for sdmmc variant.
- vqmmc-supply : phandle to the regulator device tree node, mentioned
as the VCCQ/VDD_IO supply in the eMMC/SD specs.
+specific for ux500 variant:
- st,sig-dir-dat0 : bus signal direction pin used for DAT[0].
- st,sig-dir-dat2 : bus signal direction pin used for DAT[2].
- st,sig-dir-dat31 : bus signal direction pin used for DAT[3] and DAT[1].
@@ -24,6 +27,14 @@ Optional properties:
- st,sig-dir-cmd : cmd signal direction pin used for CMD.
- st,sig-pin-fbclk : feedback clock signal pin used.
+specific for sdmmc variant:
+- st,sig-dir : signal direction polarity used for cmd, dat0 dat123.
+- st,neg-edge : data & command phase relation, generated on
+ sd clock falling edge.
+- st,use-ckin : use ckin pin from an external driver to sample
+ the receive data (example: with voltage
+ switch transceiver).
+
Deprecated properties:
- mmc-cap-mmc-highspeed : indicates whether MMC is high speed capable.
- mmc-cap-sd-highspeed : indicates whether SD is high speed capable.
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
index f33467a54a05..f5bcda3980cc 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
@@ -10,6 +10,7 @@ Required properties:
- compatible: value should be either of the following.
"mediatek,mt8135-mmc": for mmc host ip compatible with mt8135
"mediatek,mt8173-mmc": for mmc host ip compatible with mt8173
+ "mediatek,mt8183-mmc": for mmc host ip compatible with mt8183
"mediatek,mt2701-mmc": for mmc host ip compatible with mt2701
"mediatek,mt2712-mmc": for mmc host ip compatible with mt2712
"mediatek,mt7622-mmc": for MT7622 SoC
@@ -22,6 +23,7 @@ Required properties:
"source" - source clock (required)
"hclk" - HCLK which used for host (required)
"source_cg" - independent source clock gate (required for MT2712)
+ "bus_clk" - bus clock used for internal register access (required for MT2712 MSDC0/3)
- pinctrl-names: should be "default", "state_uhs"
- pinctrl-0: should contain default/high speed pin ctrl
- pinctrl-1: should contain uhs mode pin ctrl
diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
index 9bce57862ed6..32b4b4e41923 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
@@ -38,3 +38,75 @@ sdhci@c8000200 {
power-gpios = <&gpio 155 0>; /* gpio PT3 */
bus-width = <8>;
};
+
+Optional properties for Tegra210 and Tegra186:
+- pinctrl-names, pinctrl-0, pinctrl-1 : Specify pad voltage
+ configurations. Valid pinctrl-names are "sdmmc-3v3" and "sdmmc-1v8"
+ for controllers supporting multiple voltage levels. The order of names
+ should correspond to the pin configuration states in pinctrl-0 and
+ pinctrl-1.
+- nvidia,only-1-8-v : The presence of this property indicates that the
+ controller operates at a 1.8 V fixed I/O voltage.
+- nvidia,pad-autocal-pull-up-offset-3v3,
+ nvidia,pad-autocal-pull-down-offset-3v3 : Specify drive strength
+ calibration offsets for 3.3 V signaling modes.
+- nvidia,pad-autocal-pull-up-offset-1v8,
+ nvidia,pad-autocal-pull-down-offset-1v8 : Specify drive strength
+ calibration offsets for 1.8 V signaling modes.
+- nvidia,pad-autocal-pull-up-offset-3v3-timeout,
+ nvidia,pad-autocal-pull-down-offset-3v3-timeout : Specify drive
+ strength used as a fallback in case the automatic calibration times
+ out on a 3.3 V signaling mode.
+- nvidia,pad-autocal-pull-up-offset-1v8-timeout,
+ nvidia,pad-autocal-pull-down-offset-1v8-timeout : Specify drive
+ strength used as a fallback in case the automatic calibration times
+ out on a 1.8 V signaling mode.
+- nvidia,pad-autocal-pull-up-offset-sdr104,
+ nvidia,pad-autocal-pull-down-offset-sdr104 : Specify drive strength
+ calibration offsets for SDR104 mode.
+- nvidia,pad-autocal-pull-up-offset-hs400,
+ nvidia,pad-autocal-pull-down-offset-hs400 : Specify drive strength
+ calibration offsets for HS400 mode.
+- nvidia,default-tap : Specify the default inbound sampling clock
+ trimmer value for non-tunable modes.
+- nvidia,default-trim : Specify the default outbound clock trimmer
+ value.
+- nvidia,dqs-trim : Specify DQS trim value for HS400 timing
+
+ Notes on the pad calibration pull up and pulldown offset values:
+ - The property values are drive codes which are programmed into the
+ PD_OFFSET and PU_OFFSET sections of the
+ SDHCI_TEGRA_AUTO_CAL_CONFIG register.
+ - A higher value corresponds to higher drive strength. Please refer
+ to the reference manual of the SoC for correct values.
+ - The SDR104 and HS400 timing specific values are used in
+ corresponding modes if specified.
+
+ Notes on tap and trim values:
+ - The values are used for compensating trace length differences
+ by adjusting the sampling point.
+ - The values are programmed to the Vendor Clock Control Register.
+ Please refer to the reference manual of the SoC for correct
+ values.
+ - The DQS trim values are only used on controllers which support
+ HS400 timing. Only SDMMC4 on Tegra210 and Tegra 186 supports
+ HS400.
+
+Example:
+sdhci@700b0000 {
+ compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci";
+ reg = <0x0 0x700b0000 0x0 0x200>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC1>;
+ clock-names = "sdhci";
+ resets = <&tegra_car 14>;
+ reset-names = "sdhci";
+ pinctrl-names = "sdmmc-3v3", "sdmmc-1v8";
+ pinctrl-0 = <&sdmmc1_3v3>;
+ pinctrl-1 = <&sdmmc1_1v8>;
+ nvidia,pad-autocal-pull-up-offset-3v3 = <0x00>;
+ nvidia,pad-autocal-pull-down-offset-3v3 = <0x7d>;
+ nvidia,pad-autocal-pull-up-offset-1v8 = <0x7b>;
+ nvidia,pad-autocal-pull-down-offset-1v8 = <0x7b>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
index 5ff1e12c655a..c064af5838aa 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
+++ b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
@@ -12,6 +12,7 @@ Required properties:
- "renesas,mmcif-r8a73a4" for the MMCIF found in r8a73a4 SoCs
- "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs
- "renesas,mmcif-r8a7743" for the MMCIF found in r8a7743 SoCs
+ - "renesas,mmcif-r8a7744" for the MMCIF found in r8a7744 SoCs
- "renesas,mmcif-r8a7745" for the MMCIF found in r8a7745 SoCs
- "renesas,mmcif-r8a7778" for the MMCIF found in r8a7778 SoCs
- "renesas,mmcif-r8a7790" for the MMCIF found in r8a7790 SoCs
@@ -23,7 +24,8 @@ Required properties:
- interrupts: Some SoCs have only 1 shared interrupt, while others have either
2 or 3 individual interrupts (error, int, card detect). Below is the number
of interrupts for each SoC:
- 1: r8a73a4, r8a7743, r8a7745, r8a7778, r8a7790, r8a7791, r8a7793, r8a7794
+ 1: r8a73a4, r8a7743, r8a7744, r8a7745, r8a7778, r8a7790, r8a7791, r8a7793,
+ r8a7794
2: r8a7740, sh73a0
3: r7s72100
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-sprd.txt b/Documentation/devicetree/bindings/mmc/sdhci-sprd.txt
new file mode 100644
index 000000000000..45c9978aad7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/sdhci-sprd.txt
@@ -0,0 +1,41 @@
+* Spreadtrum SDHCI controller (sdhci-sprd)
+
+The Secure Digital (SD) Host controller on Spreadtrum SoCs provides an interface
+for MMC, SD and SDIO types of cards.
+
+This file documents differences between the core properties in mmc.txt
+and the properties used by the sdhci-sprd driver.
+
+Required properties:
+- compatible: Should contain "sprd,sdhci-r11".
+- reg: physical base address of the controller and length.
+- interrupts: Interrupts used by the SDHCI controller.
+- clocks: Should contain phandle for the clock feeding the SDHCI controller
+- clock-names: Should contain the following:
+ "sdio" - SDIO source clock (required)
+ "enable" - gate clock which used for enabling/disabling the device (required)
+
+Optional properties:
+- assigned-clocks: the same with "sdio" clock
+- assigned-clock-parents: the default parent of "sdio" clock
+
+Examples:
+
+sdio0: sdio@20600000 {
+ compatible = "sprd,sdhci-r11";
+ reg = <0 0x20600000 0 0x1000>;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+
+ clock-names = "sdio", "enable";
+ clocks = <&ap_clk CLK_EMMC_2X>,
+ <&apahb_gate CLK_EMMC_EB>;
+ assigned-clocks = <&ap_clk CLK_EMMC_2X>;
+ assigned-clock-parents = <&rpll CLK_RPLL_390M>;
+
+ bus-width = <8>;
+ non-removable;
+ no-sdio;
+ no-sd;
+ cap-mmc-hw-reset;
+ status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index c434200d19d5..27f2eab2981d 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -16,7 +16,11 @@ Required properties:
"renesas,sdhi-r8a73a4" - SDHI IP on R8A73A4 SoC
"renesas,sdhi-r8a7740" - SDHI IP on R8A7740 SoC
"renesas,sdhi-r8a7743" - SDHI IP on R8A7743 SoC
+ "renesas,sdhi-r8a7744" - SDHI IP on R8A7744 SoC
"renesas,sdhi-r8a7745" - SDHI IP on R8A7745 SoC
+ "renesas,sdhi-r8a774a1" - SDHI IP on R8A774A1 SoC
+ "renesas,sdhi-r8a77470" - SDHI IP on R8A77470 SoC
+ "renesas,sdhi-mmc-r8a77470" - SDHI/MMC IP on R8A77470 SoC
"renesas,sdhi-r8a7778" - SDHI IP on R8A7778 SoC
"renesas,sdhi-r8a7779" - SDHI IP on R8A7779 SoC
"renesas,sdhi-r8a7790" - SDHI IP on R8A7790 SoC
@@ -27,14 +31,16 @@ Required properties:
"renesas,sdhi-r8a7795" - SDHI IP on R8A7795 SoC
"renesas,sdhi-r8a7796" - SDHI IP on R8A7796 SoC
"renesas,sdhi-r8a77965" - SDHI IP on R8A77965 SoC
+ "renesas,sdhi-r8a77970" - SDHI IP on R8A77970 SoC
"renesas,sdhi-r8a77980" - SDHI IP on R8A77980 SoC
"renesas,sdhi-r8a77990" - SDHI IP on R8A77990 SoC
"renesas,sdhi-r8a77995" - SDHI IP on R8A77995 SoC
"renesas,sdhi-shmobile" - a generic sh-mobile SDHI controller
"renesas,rcar-gen1-sdhi" - a generic R-Car Gen1 SDHI controller
- "renesas,rcar-gen2-sdhi" - a generic R-Car Gen2 or RZ/G1
+ "renesas,rcar-gen2-sdhi" - a generic R-Car Gen2 and RZ/G1 SDHI
+ (not SDHI/MMC) controller
+ "renesas,rcar-gen3-sdhi" - a generic R-Car Gen3 or RZ/G2
SDHI controller
- "renesas,rcar-gen3-sdhi" - a generic R-Car Gen3 SDHI controller
When compatible with the generic version, nodes must list
diff --git a/Documentation/devicetree/bindings/mmc/uniphier-sd.txt b/Documentation/devicetree/bindings/mmc/uniphier-sd.txt
new file mode 100644
index 000000000000..e1d658755722
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/uniphier-sd.txt
@@ -0,0 +1,55 @@
+UniPhier SD/eMMC controller
+
+Required properties:
+- compatible: should be one of the following:
+ "socionext,uniphier-sd-v2.91" - IP version 2.91
+ "socionext,uniphier-sd-v3.1" - IP version 3.1
+ "socionext,uniphier-sd-v3.1.1" - IP version 3.1.1
+- reg: offset and length of the register set for the device.
+- interrupts: a single interrupt specifier.
+- clocks: a single clock specifier of the controller clock.
+- reset-names: should contain the following:
+ "host" - mandatory for all versions
+ "bridge" - should exist only for "socionext,uniphier-sd-v2.91"
+ "hw" - should exist if eMMC hw reset line is available
+- resets: a list of reset specifiers, corresponding to the reset-names
+
+Optional properties:
+- pinctrl-names: if present, should contain the following:
+ "default" - should exist for all instances
+ "uhs" - should exist for SD instance with UHS support
+- pinctrl-0: pin control state for the default mode
+- pinctrl-1: pin control state for the UHS mode
+- dma-names: should be "rx-tx" if present.
+ This property can exist only for "socionext,uniphier-sd-v2.91".
+- dmas: a single DMA channel specifier
+ This property can exist only for "socionext,uniphier-sd-v2.91".
+- bus-width: see mmc.txt
+- cap-sd-highspeed: see mmc.txt
+- cap-mmc-highspeed: see mmc.txt
+- sd-uhs-sdr12: see mmc.txt
+- sd-uhs-sdr25: see mmc.txt
+- sd-uhs-sdr50: see mmc.txt
+- cap-mmc-hw-reset: should exist if reset-names contains "hw". see mmc.txt
+- non-removable: see mmc.txt
+
+Example:
+
+ sd: sdhc@5a400000 {
+ compatible = "socionext,uniphier-sd-v2.91";
+ reg = <0x5a400000 0x200>;
+ interrupts = <0 76 4>;
+ pinctrl-names = "default", "uhs";
+ pinctrl-0 = <&pinctrl_sd>;
+ pinctrl-1 = <&pinctrl_sd_uhs>;
+ clocks = <&mio_clk 0>;
+ reset-names = "host", "bridge";
+ resets = <&mio_rst 0>, <&mio_rst 3>;
+ dma-names = "rx-tx";
+ dmas = <&dmac 4>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ };
diff --git a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
index 4648948f7c3b..e15589f47787 100644
--- a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
+++ b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
@@ -19,6 +19,9 @@ Optional properties:
- interrupt-names: must be "mdio_done_error" when there is a share interrupt fed
to this hardware block, or must be "mdio_done" for the first interrupt and
"mdio_error" for the second when there are separate interrupts
+- clocks: A reference to the clock supplying the MDIO bus controller
+- clock-frequency: the MDIO bus clock that must be output by the MDIO bus
+ hardware, if absent, the default hardware values are used
Child nodes of this MDIO bus controller node are standard Ethernet PHY device
nodes as described in Documentation/devicetree/bindings/net/phy.txt
diff --git a/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt b/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt
new file mode 100644
index 000000000000..886cbe8ffb38
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt
@@ -0,0 +1,143 @@
+Lantiq GSWIP Ethernet switches
+==================================
+
+Required properties for GSWIP core:
+
+- compatible : "lantiq,xrx200-gswip" for the embedded GSWIP in the
+ xRX200 SoC
+- reg : memory range of the GSWIP core registers
+ : memory range of the GSWIP MDIO registers
+ : memory range of the GSWIP MII registers
+
+See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of
+additional required and optional properties.
+
+
+Required properties for MDIO bus:
+- compatible : "lantiq,xrx200-mdio" for the MDIO bus inside the GSWIP
+ core of the xRX200 SoC and the PHYs connected to it.
+
+See Documentation/devicetree/bindings/net/mdio.txt for a list of additional
+required and optional properties.
+
+
+Required properties for GPHY firmware loading:
+- compatible : "lantiq,xrx200-gphy-fw", "lantiq,gphy-fw"
+ "lantiq,xrx300-gphy-fw", "lantiq,gphy-fw"
+ "lantiq,xrx330-gphy-fw", "lantiq,gphy-fw"
+ for the loading of the firmware into the embedded
+ GPHY core of the SoC.
+- lantiq,rcu : reference to the rcu syscon
+
+The GPHY firmware loader has a list of GPHY entries, one for each
+embedded GPHY
+
+- reg : Offset of the GPHY firmware register in the RCU
+ register range
+- resets : list of resets of the embedded GPHY
+- reset-names : list of names of the resets
+
+Example:
+
+Ethernet switch on the VRX200 SoC:
+
+switch@e108000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "lantiq,xrx200-gswip";
+ reg = < 0xe108000 0x3100 /* switch */
+ 0xe10b100 0xd8 /* mdio */
+ 0xe10b1d8 0x130 /* mii */
+ >;
+ dsa,member = <0 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan3";
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan4";
+ phy-mode = "rgmii";
+ phy-handle = <&phy1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ phy-mode = "internal";
+ phy-handle = <&phy11>;
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan1";
+ phy-mode = "internal";
+ phy-handle = <&phy13>;
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "wan";
+ phy-mode = "rgmii";
+ phy-handle = <&phy5>;
+ };
+
+ port@6 {
+ reg = <0x6>;
+ label = "cpu";
+ ethernet = <&eth0>;
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "lantiq,xrx200-mdio";
+ reg = <0>;
+
+ phy0: ethernet-phy@0 {
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
+ reg = <0x1>;
+ };
+ phy5: ethernet-phy@5 {
+ reg = <0x5>;
+ };
+ phy11: ethernet-phy@11 {
+ reg = <0x11>;
+ };
+ phy13: ethernet-phy@13 {
+ reg = <0x13>;
+ };
+ };
+
+ gphy-fw {
+ compatible = "lantiq,xrx200-gphy-fw", "lantiq,gphy-fw";
+ lantiq,rcu = <&rcu0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gphy@20 {
+ reg = <0x20>;
+
+ resets = <&reset0 31 30>;
+ reset-names = "gphy";
+ };
+
+ gphy@68 {
+ reg = <0x68>;
+
+ resets = <&reset0 29 28>;
+ reset-names = "gphy";
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt b/Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt
new file mode 100644
index 000000000000..5ff5e68bbbb6
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt
@@ -0,0 +1,21 @@
+Lantiq xRX200 GSWIP PMAC Ethernet driver
+==================================
+
+Required properties:
+
+- compatible : "lantiq,xrx200-net" for the PMAC of the embedded
+ : GSWIP in the xXR200
+- reg : memory range of the PMAC core inside of the GSWIP core
+- interrupts : TX and RX DMA interrupts. Use interrupt-names "tx" for
+ : the TX interrupt and "rx" for the RX interrupt.
+
+Example:
+
+ethernet@e10b308 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "lantiq,xrx200-net";
+ reg = <0xe10b308 0xcf8>;
+ interrupts = <73>, <72>;
+ interrupt-names = "tx", "rx";
+};
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 457d5ae16f23..3e17ac1d5d58 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -10,6 +10,7 @@ Required properties:
Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
the Cadence GEM, or the generic form: "cdns,gem".
Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
+ Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
index fc019df0d863..b78397669320 100644
--- a/Documentation/devicetree/bindings/net/marvell-pp2.txt
+++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt
@@ -31,7 +31,7 @@ required.
Required properties (port):
-- interrupts: interrupt for the port
+- interrupts: interrupt(s) for the port
- port-id: ID of the port from the MAC point of view
- gop-port-id: only for marvell,armada-7k-pp2, ID of the port from the
GOP (Group Of Ports) point of view. This ID is used to index the
@@ -43,10 +43,12 @@ Optional properties (port):
- marvell,loopback: port is loopback mode
- phy: a phandle to a phy node defining the PHY address (as the reg
property, a single integer).
-- interrupt-names: if more than a single interrupt for rx is given, must
- be the name associated to the interrupts listed. Valid
- names are: "tx-cpu0", "tx-cpu1", "tx-cpu2", "tx-cpu3",
- "rx-shared", "link".
+- interrupt-names: if more than a single interrupt for is given, must be the
+ name associated to the interrupts listed. Valid names are:
+ "hifX", with X in [0..8], and "link". The names "tx-cpu0",
+ "tx-cpu1", "tx-cpu2", "tx-cpu3" and "rx-shared" are supported
+ for backward compatibility but shouldn't be used for new
+ additions.
- marvell,system-controller: a phandle to the system controller.
Example for marvell,armada-375-pp2:
@@ -89,9 +91,14 @@ cpm_ethernet: ethernet@0 {
<ICU_GRP_NSR 43 IRQ_TYPE_LEVEL_HIGH>,
<ICU_GRP_NSR 47 IRQ_TYPE_LEVEL_HIGH>,
<ICU_GRP_NSR 51 IRQ_TYPE_LEVEL_HIGH>,
- <ICU_GRP_NSR 55 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2",
- "tx-cpu3", "rx-shared";
+ <ICU_GRP_NSR 55 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 59 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 63 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 67 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 71 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 129 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4",
+ "hif5", "hif6", "hif7", "hif8", "link";
port-id = <0>;
gop-port-id = <0>;
};
@@ -101,9 +108,14 @@ cpm_ethernet: ethernet@0 {
<ICU_GRP_NSR 44 IRQ_TYPE_LEVEL_HIGH>,
<ICU_GRP_NSR 48 IRQ_TYPE_LEVEL_HIGH>,
<ICU_GRP_NSR 52 IRQ_TYPE_LEVEL_HIGH>,
- <ICU_GRP_NSR 56 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2",
- "tx-cpu3", "rx-shared";
+ <ICU_GRP_NSR 56 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 60 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 64 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 68 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 72 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 128 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4",
+ "hif5", "hif6", "hif7", "hif8", "link";
port-id = <1>;
gop-port-id = <2>;
};
@@ -113,9 +125,14 @@ cpm_ethernet: ethernet@0 {
<ICU_GRP_NSR 45 IRQ_TYPE_LEVEL_HIGH>,
<ICU_GRP_NSR 49 IRQ_TYPE_LEVEL_HIGH>,
<ICU_GRP_NSR 53 IRQ_TYPE_LEVEL_HIGH>,
- <ICU_GRP_NSR 57 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2",
- "tx-cpu3", "rx-shared";
+ <ICU_GRP_NSR 57 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 61 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 65 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 69 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 73 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 127 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hif0", "hif1", "hif2", "hif3", "hif4",
+ "hif5", "hif6", "hif7", "hif8", "link";
port-id = <2>;
gop-port-id = <3>;
};
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
index e22d8cfea687..5100358177c9 100644
--- a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
@@ -1,4 +1,4 @@
-Micrel KSZ9021/KSZ9031 Gigabit Ethernet PHY
+Micrel KSZ9021/KSZ9031/KSZ9131 Gigabit Ethernet PHY
Some boards require special tuning values, particularly when it comes
to clock delays. You can specify clock delay values in the PHY OF
@@ -64,6 +64,32 @@ KSZ9031:
Attention: The link partner must be configurable as slave otherwise
no link will be established.
+KSZ9131:
+
+ All skew control options are specified in picoseconds. The increment
+ step is 100ps. Unlike KSZ9031, the values represent picoseccond delays.
+ A negative value can be assigned as rxc-skew-psec = <(-100)>;.
+
+ Optional properties:
+
+ Range of the value -700 to 2400, default value 0:
+
+ - rxc-skew-psec : Skew control of RX clock pad
+ - txc-skew-psec : Skew control of TX clock pad
+
+ Range of the value -700 to 800, default value 0:
+
+ - rxdv-skew-psec : Skew control of RX CTL pad
+ - txen-skew-psec : Skew control of TX CTL pad
+ - rxd0-skew-psec : Skew control of RX data 0 pad
+ - rxd1-skew-psec : Skew control of RX data 1 pad
+ - rxd2-skew-psec : Skew control of RX data 2 pad
+ - rxd3-skew-psec : Skew control of RX data 3 pad
+ - txd0-skew-psec : Skew control of TX data 0 pad
+ - txd1-skew-psec : Skew control of TX data 1 pad
+ - txd2-skew-psec : Skew control of TX data 2 pad
+ - txd3-skew-psec : Skew control of TX data 3 pad
+
Examples:
mdio {
diff --git a/Documentation/devicetree/bindings/net/mscc-ocelot.txt b/Documentation/devicetree/bindings/net/mscc-ocelot.txt
index 0a84711abece..9e5c17d426ce 100644
--- a/Documentation/devicetree/bindings/net/mscc-ocelot.txt
+++ b/Documentation/devicetree/bindings/net/mscc-ocelot.txt
@@ -12,7 +12,6 @@ Required properties:
- "sys"
- "rew"
- "qs"
- - "hsio"
- "qsys"
- "ana"
- "portX" with X from 0 to the number of last port index available on that
@@ -45,7 +44,6 @@ Example:
reg = <0x1010000 0x10000>,
<0x1030000 0x10000>,
<0x1080000 0x100>,
- <0x10d0000 0x10000>,
<0x11e0000 0x100>,
<0x11f0000 0x100>,
<0x1200000 0x100>,
@@ -59,10 +57,9 @@ Example:
<0x1280000 0x100>,
<0x1800000 0x80000>,
<0x1880000 0x10000>;
- reg-names = "sys", "rew", "qs", "hsio", "port0",
- "port1", "port2", "port3", "port4", "port5",
- "port6", "port7", "port8", "port9", "port10",
- "qsys", "ana";
+ reg-names = "sys", "rew", "qs", "port0", "port1", "port2",
+ "port3", "port4", "port5", "port6", "port7",
+ "port8", "port9", "port10", "qsys", "ana";
interrupts = <21 22>;
interrupt-names = "xtr", "inj";
diff --git a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
index 0eedabe22cc3..5ff37c68c941 100644
--- a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
+++ b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
@@ -1,10 +1,5 @@
* Microsemi - vsc8531 Giga bit ethernet phy
-Required properties:
-- compatible : Should contain phy id as "ethernet-phy-idAAAA.BBBB"
- The PHY device uses the binding described in
- Documentation/devicetree/bindings/net/phy.txt
-
Optional properties:
- vsc8531,vddmac : The vddmac in mV. Allowed values is listed
in the first row of Table 1 (below).
@@ -27,14 +22,16 @@ Optional properties:
'vddmac'.
Default value is 0%.
Ref: Table:1 - Edge rate change (below).
-- vsc8531,led-0-mode : LED mode. Specify how the LED[0] should behave.
- Allowed values are define in
- "include/dt-bindings/net/mscc-phy-vsc8531.h".
- Default value is VSC8531_LINK_1000_ACTIVITY (1).
-- vsc8531,led-1-mode : LED mode. Specify how the LED[1] should behave.
- Allowed values are define in
+- vsc8531,led-[N]-mode : LED mode. Specify how the LED[N] should behave.
+ N depends on the number of LEDs supported by a
+ PHY.
+ Allowed values are defined in
"include/dt-bindings/net/mscc-phy-vsc8531.h".
- Default value is VSC8531_LINK_100_ACTIVITY (2).
+ Default values are VSC8531_LINK_1000_ACTIVITY (1),
+ VSC8531_LINK_100_ACTIVITY (2),
+ VSC8531_LINK_ACTIVITY (0) and
+ VSC8531_DUPLEX_COLLISION (8).
+
Table: 1 - Edge rate change
----------------------------------------------------------------|
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index da249b7c406c..3530256a879c 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -6,6 +6,7 @@ interface contains.
Required properties:
- compatible: Must contain one or more of the following:
- "renesas,etheravb-r8a7743" for the R8A7743 SoC.
+ - "renesas,etheravb-r8a7744" for the R8A7744 SoC.
- "renesas,etheravb-r8a7745" for the R8A7745 SoC.
- "renesas,etheravb-r8a77470" for the R8A77470 SoC.
- "renesas,etheravb-r8a7790" for the R8A7790 SoC.
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index 7fd4e8ce4149..2196d1ab3c8c 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -56,6 +56,11 @@ Optional properties:
the length can vary between hw versions.
- <supply-name>-supply: handle to the regulator device tree node
optional "supply-name" is "vdd-0.8-cx-mx".
+- memory-region:
+ Usage: optional
+ Value type: <phandle>
+ Definition: reference to the reserved-memory for the msa region
+ used by the wifi firmware running in Q6.
Example (to supply the calibration data alone):
@@ -149,4 +154,5 @@ wifi@18000000 {
<0 140 0 /* CE10 */ >,
<0 141 0 /* CE11 */ >;
vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+ memory-region = <&wifi_msa_mem>;
};
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
index cb33421184a0..f37494d5a7be 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
@@ -50,6 +50,7 @@ Additional required properties for imx7d-pcie:
- reset-names: Must contain the following entires:
- "pciephy"
- "apps"
+ - "turnoff"
Example:
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt
index 4dd17de549a7..2030ee0dc4f9 100644
--- a/Documentation/devicetree/bindings/pci/pci-keystone.txt
+++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt
@@ -19,6 +19,9 @@ pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
interrupt-cells: should be set to 1
interrupts: GIC interrupt lines connected to PCI MSI interrupt lines
+ti,syscon-pcie-id : phandle to the device control module required to set device
+ id and vendor id.
+
Example:
pcie_msi_intc: msi-interrupt-controller {
interrupt-controller;
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
index 9fe7e12a7bf3..b94078f58d8e 100644
--- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
+++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
@@ -7,6 +7,7 @@ OHCI and EHCI controllers.
Required properties:
- compatible: "renesas,pci-r8a7743" for the R8A7743 SoC;
+ "renesas,pci-r8a7744" for the R8A7744 SoC;
"renesas,pci-r8a7745" for the R8A7745 SoC;
"renesas,pci-r8a7790" for the R8A7790 SoC;
"renesas,pci-r8a7791" for the R8A7791 SoC;
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index a5f7fc62d10e..976ef7bfff93 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -2,6 +2,7 @@
Required properties:
compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
+ "renesas,pcie-r8a7744" for the R8A7744 SoC;
"renesas,pcie-r8a7779" for the R8A7779 SoC;
"renesas,pcie-r8a7790" for the R8A7790 SoC;
"renesas,pcie-r8a7791" for the R8A7791 SoC;
@@ -9,6 +10,7 @@ compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
"renesas,pcie-r8a7795" for the R8A7795 SoC;
"renesas,pcie-r8a7796" for the R8A7796 SoC;
"renesas,pcie-r8a77980" for the R8A77980 SoC;
+ "renesas,pcie-r8a77990" for the R8A77990 SoC;
"renesas,pcie-rcar-gen2" for a generic R-Car Gen2 or
RZ/G1 compatible device.
"renesas,pcie-rcar-gen3" for a generic R-Car Gen3 compatible device.
diff --git a/Documentation/devicetree/bindings/pci/ti-pci.txt b/Documentation/devicetree/bindings/pci/ti-pci.txt
index 7f7af3044016..452fe48c4fdd 100644
--- a/Documentation/devicetree/bindings/pci/ti-pci.txt
+++ b/Documentation/devicetree/bindings/pci/ti-pci.txt
@@ -26,6 +26,11 @@ HOST MODE
ranges,
interrupt-map-mask,
interrupt-map : as specified in ../designware-pcie.txt
+ - ti,syscon-unaligned-access: phandle to the syscon DT node. The 1st argument
+ should contain the register offset within syscon
+ and the 2nd argument should contain the bit field
+ for setting the bit to enable unaligned
+ access.
DEVICE MODE
===========
diff --git a/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt b/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
index 0aced97d8092..b640845fec67 100644
--- a/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
+++ b/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
@@ -8,6 +8,7 @@ Required properties:
"brcm,iproc-nsp-sata-phy"
"brcm,phy-sata3"
"brcm,iproc-sr-sata-phy"
+ "brcm,bcm63138-sata-phy"
- address-cells: should be 1
- size-cells: should be 0
- reg: register ranges for the PHY PCB interface
diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-dp.txt b/Documentation/devicetree/bindings/phy/phy-cadence-dp.txt
new file mode 100644
index 000000000000..7f49fd54ebc1
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-cadence-dp.txt
@@ -0,0 +1,30 @@
+Cadence MHDP DisplayPort SD0801 PHY binding
+===========================================
+
+This binding describes the Cadence SD0801 PHY hardware included with
+the Cadence MHDP DisplayPort controller.
+
+-------------------------------------------------------------------------------
+Required properties (controller (parent) node):
+- compatible : Should be "cdns,dp-phy"
+- reg : Defines the following sets of registers in the parent
+ mhdp device:
+ - Offset of the DPTX PHY configuration registers
+ - Offset of the SD0801 PHY configuration registers
+- #phy-cells : from the generic PHY bindings, must be 0.
+
+Optional properties:
+- num_lanes : Number of DisplayPort lanes to use (1, 2 or 4)
+- max_bit_rate : Maximum DisplayPort link bit rate to use, in Mbps (2160,
+ 2430, 2700, 3240, 4320, 5400 or 8100)
+-------------------------------------------------------------------------------
+
+Example:
+ dp_phy: phy@f0fb030a00 {
+ compatible = "cdns,dp-phy";
+ reg = <0xf0 0xfb030a00 0x0 0x00000040>,
+ <0xf0 0xfb500000 0x0 0x00100000>;
+ num_lanes = <4>;
+ max_bit_rate = <8100>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-ocelot-serdes.txt b/Documentation/devicetree/bindings/phy/phy-ocelot-serdes.txt
new file mode 100644
index 000000000000..332219860187
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-ocelot-serdes.txt
@@ -0,0 +1,43 @@
+Microsemi Ocelot SerDes muxing driver
+-------------------------------------
+
+On Microsemi Ocelot, there is a handful of registers in HSIO address
+space for setting up the SerDes to switch port muxing.
+
+A SerDes X can be "muxed" to work with switch port Y or Z for example.
+One specific SerDes can also be used as a PCIe interface.
+
+Hence, a SerDes represents an interface, be it an Ethernet or a PCIe one.
+
+There are two kinds of SerDes: SERDES1G supports 10/100Mbps in
+half/full-duplex and 1000Mbps in full-duplex mode while SERDES6G supports
+10/100Mbps in half/full-duplex and 1000/2500Mbps in full-duplex mode.
+
+Also, SERDES6G number (aka "macro") 0 is the only interface supporting
+QSGMII.
+
+This is a child of the HSIO syscon ("mscc,ocelot-hsio", see
+Documentation/devicetree/bindings/mips/mscc.txt) on the Microsemi Ocelot.
+
+Required properties:
+
+- compatible: should be "mscc,vsc7514-serdes"
+- #phy-cells : from the generic phy bindings, must be 2.
+ The first number defines the input port to use for a given
+ SerDes macro. The second defines the macro to use. They are
+ defined in dt-bindings/phy/phy-ocelot-serdes.h
+
+Example:
+
+ serdes: serdes {
+ compatible = "mscc,vsc7514-serdes";
+ #phy-cells = <2>;
+ };
+
+ ethernet {
+ port1 {
+ phy-handle = <&phy_foo>;
+ /* Link SERDES1G_5 to port1 */
+ phys = <&serdes 1 SERDES1G_5>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-hdmi.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-hdmi.txt
new file mode 100644
index 000000000000..710cccd5ee56
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-hdmi.txt
@@ -0,0 +1,43 @@
+ROCKCHIP HDMI PHY WITH INNO IP BLOCK
+
+Required properties:
+ - compatible : should be one of the listed compatibles:
+ * "rockchip,rk3228-hdmi-phy",
+ * "rockchip,rk3328-hdmi-phy";
+ - reg : Address and length of the hdmi phy control register set
+ - clocks : phandle + clock specifier for the phy clocks
+ - clock-names : string, clock name, must contain "sysclk" for system
+ control and register configuration, "refoclk" for crystal-
+ oscillator reference PLL clock input and "refpclk" for pclk-
+ based refeference PLL clock input.
+ - #clock-cells: should be 0.
+ - clock-output-names : shall be the name for the output clock.
+ - interrupts : phandle + interrupt specified for the hdmiphy interrupt
+ - #phy-cells : must be 0. See ./phy-bindings.txt for details.
+
+Optional properties for rk3328-hdmi-phy:
+ - nvmem-cells = phandle + nvmem specifier for the cpu-version efuse
+ - nvmem-cell-names : "cpu-version" to read the chip version, required
+ for adjustment to some frequency settings
+
+Example:
+ hdmi_phy: hdmi-phy@12030000 {
+ compatible = "rockchip,rk3228-hdmi-phy";
+ reg = <0x12030000 0x10000>;
+ #phy-cells = <0>;
+ clocks = <&cru PCLK_HDMI_PHY>, <&xin24m>, <&cru DCLK_HDMIPHY>;
+ clock-names = "sysclk", "refoclk", "refpclk";
+ #clock-cells = <0>;
+ clock-output-names = "hdmi_phy";
+ status = "disabled";
+ };
+
+Then the PHY can be used in other nodes such as:
+
+ hdmi: hdmi@200a0000 {
+ compatible = "rockchip,rk3228-dw-hdmi";
+ ...
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi";
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
index 0c7629e88bf3..adf20b2bdf71 100644
--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
@@ -10,16 +10,20 @@ Required properties:
"qcom,msm8996-qmp-pcie-phy" for 14nm PCIe phy on msm8996,
"qcom,msm8996-qmp-usb3-phy" for 14nm USB3 phy on msm8996,
"qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845,
- "qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845.
+ "qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845,
+ "qcom,sdm845-qmp-ufs-phy" for UFS QMP phy on sdm845.
- - reg:
- - For "qcom,sdm845-qmp-usb3-phy":
- - index 0: address and length of register set for PHY's common serdes
- block.
- - named register "dp_com" (using reg-names): address and length of the
- DP_COM control block.
- - For all others:
- - offset and length of register set for PHY's common serdes block.
+- reg:
+ - index 0: address and length of register set for PHY's common
+ serdes block.
+ - index 1: address and length of the DP_COM control block (for
+ "qcom,sdm845-qmp-usb3-phy" only).
+
+- reg-names:
+ - For "qcom,sdm845-qmp-usb3-phy":
+ - Should be: "reg-base", "dp_com"
+ - For all others:
+ - The reg-names property shouldn't be defined.
- #clock-cells: must be 1
- Phy pll outputs a bunch of clocks for Tx, Rx and Pipe
@@ -35,6 +39,7 @@ Required properties:
"aux" for phy aux clock,
"ref" for 19.2 MHz ref clk,
"com_aux" for phy common block aux clock,
+ "ref_aux" for phy reference aux clock,
For "qcom,msm8996-qmp-pcie-phy" must contain:
"aux", "cfg_ahb", "ref".
For "qcom,msm8996-qmp-usb3-phy" must contain:
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
index fb4a204da2bf..de7b5393c163 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
@@ -1,10 +1,12 @@
* Renesas R-Car generation 3 USB 2.0 PHY
This file provides information on what the device node for the R-Car generation
-3 USB 2.0 PHY contains.
+3 and RZ/G2 USB 2.0 PHY contain.
Required properties:
-- compatible: "renesas,usb2-phy-r8a7795" if the device is a part of an R8A7795
+- compatible: "renesas,usb2-phy-r8a774a1" if the device is a part of an R8A774A1
+ SoC.
+ "renesas,usb2-phy-r8a7795" if the device is a part of an R8A7795
SoC.
"renesas,usb2-phy-r8a7796" if the device is a part of an R8A7796
SoC.
@@ -14,7 +16,8 @@ Required properties:
R8A77990 SoC.
"renesas,usb2-phy-r8a77995" if the device is a part of an
R8A77995 SoC.
- "renesas,rcar-gen3-usb2-phy" for a generic R-Car Gen3 compatible device.
+ "renesas,rcar-gen3-usb2-phy" for a generic R-Car Gen3 or RZ/G2
+ compatible device.
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first
@@ -31,6 +34,8 @@ channel as USB OTG:
- interrupts: interrupt specifier for the PHY.
- vbus-supply: Phandle to a regulator that provides power to the VBUS. This
regulator will be managed during the PHY power on/off sequence.
+- renesas,no-otg-pins: boolean, specify when a board does not provide proper
+ otg pins.
Example (R-Car H3):
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
index 47dd296ecead..9d9826609c2f 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
@@ -1,20 +1,22 @@
* Renesas R-Car generation 3 USB 3.0 PHY
This file provides information on what the device node for the R-Car generation
-3 USB 3.0 PHY contains.
+3 and RZ/G2 USB 3.0 PHY contain.
If you want to enable spread spectrum clock (ssc), you should use USB_EXTAL
instead of USB3_CLK. However, if you don't want to these features, you don't
need this driver.
Required properties:
-- compatible: "renesas,r8a7795-usb3-phy" if the device is a part of an R8A7795
+- compatible: "renesas,r8a774a1-usb3-phy" if the device is a part of an R8A774A1
+ SoC.
+ "renesas,r8a7795-usb3-phy" if the device is a part of an R8A7795
SoC.
"renesas,r8a7796-usb3-phy" if the device is a part of an R8A7796
SoC.
"renesas,r8a77965-usb3-phy" if the device is a part of an
R8A77965 SoC.
- "renesas,rcar-gen3-usb3-phy" for a generic R-Car Gen3 compatible
- device.
+ "renesas,rcar-gen3-usb3-phy" for a generic R-Car Gen3 or RZ/G2
+ compatible device.
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first
diff --git a/Documentation/devicetree/bindings/phy/uniphier-pcie-phy.txt b/Documentation/devicetree/bindings/phy/uniphier-pcie-phy.txt
new file mode 100644
index 000000000000..1889d3b89d68
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/uniphier-pcie-phy.txt
@@ -0,0 +1,31 @@
+Socionext UniPhier PCIe PHY bindings
+
+This describes the devicetree bindings for PHY interface built into
+PCIe controller implemented on Socionext UniPhier SoCs.
+
+Required properties:
+- compatible: Should contain one of the following:
+ "socionext,uniphier-ld20-pcie-phy" - for LD20 PHY
+ "socionext,uniphier-pxs3-pcie-phy" - for PXs3 PHY
+- reg: Specifies offset and length of the register set for the device.
+- #phy-cells: Must be zero.
+- clocks: A phandle to the clock gate for PCIe glue layer including
+ this phy.
+- resets: A phandle to the reset line for PCIe glue layer including
+ this phy.
+
+Optional properties:
+- socionext,syscon: A phandle to system control to set configurations
+ for phy.
+
+Refer to phy/phy-bindings.txt for the generic PHY binding properties.
+
+Example:
+ pcie_phy: phy@66038000 {
+ compatible = "socionext,uniphier-ld20-pcie-phy";
+ reg = <0x66038000 0x4000>;
+ #phy-cells = <0>;
+ clocks = <&sys_clk 24>;
+ resets = <&sys_rst 24>;
+ socionext,syscon = <&soc_glue>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/uniphier-usb2-phy.txt b/Documentation/devicetree/bindings/phy/uniphier-usb2-phy.txt
new file mode 100644
index 000000000000..b43b28250cc0
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/uniphier-usb2-phy.txt
@@ -0,0 +1,45 @@
+Socionext UniPhier USB2 PHY
+
+This describes the devicetree bindings for PHY interface built into
+USB2 controller implemented on Socionext UniPhier SoCs.
+
+Pro4 SoC has both USB2 and USB3 host controllers, however, this USB3
+controller doesn't include its own High-Speed PHY. This needs to specify
+USB2 PHY instead of USB3 HS-PHY.
+
+Required properties:
+- compatible: Should contain one of the following:
+ "socionext,uniphier-pro4-usb2-phy" - for Pro4 SoC
+ "socionext,uniphier-ld11-usb2-phy" - for LD11 SoC
+
+Sub-nodes:
+Each PHY should be represented as a sub-node.
+
+Sub-nodes required properties:
+- #phy-cells: Should be 0.
+- reg: The number of the PHY.
+
+Sub-nodes optional properties:
+- vbus-supply: A phandle to the regulator for USB VBUS.
+
+Refer to phy/phy-bindings.txt for the generic PHY binding properties.
+
+Example:
+ soc-glue@5f800000 {
+ ...
+ usb-phy {
+ compatible = "socionext,uniphier-ld11-usb2-phy";
+ usb_phy0: phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+ ...
+ };
+ };
+
+ usb@5a800100 {
+ compatible = "socionext,uniphier-ehci", "generic-ehci";
+ ...
+ phy-names = "usb";
+ phys = <&usb_phy0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/uniphier-usb3-hsphy.txt b/Documentation/devicetree/bindings/phy/uniphier-usb3-hsphy.txt
new file mode 100644
index 000000000000..e8d8086a7ae9
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/uniphier-usb3-hsphy.txt
@@ -0,0 +1,69 @@
+Socionext UniPhier USB3 High-Speed (HS) PHY
+
+This describes the devicetree bindings for PHY interfaces built into
+USB3 controller implemented on Socionext UniPhier SoCs.
+Although the controller includes High-Speed PHY and Super-Speed PHY,
+this describes about High-Speed PHY.
+
+Required properties:
+- compatible: Should contain one of the following:
+ "socionext,uniphier-pro4-usb3-hsphy" - for Pro4 SoC
+ "socionext,uniphier-pxs2-usb3-hsphy" - for PXs2 SoC
+ "socionext,uniphier-ld20-usb3-hsphy" - for LD20 SoC
+ "socionext,uniphier-pxs3-usb3-hsphy" - for PXs3 SoC
+- reg: Specifies offset and length of the register set for the device.
+- #phy-cells: Should be 0.
+- clocks: A list of phandles to the clock gate for USB3 glue layer.
+ According to the clock-names, appropriate clocks are required.
+- clock-names: Should contain the following:
+ "gio", "link" - for Pro4 SoC
+ "phy", "phy-ext", "link" - for PXs3 SoC, "phy-ext" is optional.
+ "phy", "link" - for others
+- resets: A list of phandles to the reset control for USB3 glue layer.
+ According to the reset-names, appropriate resets are required.
+- reset-names: Should contain the following:
+ "gio", "link" - for Pro4 SoC
+ "phy", "link" - for others
+
+Optional properties:
+- vbus-supply: A phandle to the regulator for USB VBUS.
+- nvmem-cells: Phandles to nvmem cell that contains the trimming data.
+ Available only for HS-PHY implemented on LD20 and PXs3, and
+ if unspecified, default value is used.
+- nvmem-cell-names: Should be the following names, which correspond to
+ each nvmem-cells.
+ All of the 3 parameters associated with the following names are
+ required for each port, if any one is omitted, the trimming data
+ of the port will not be set at all.
+ "rterm", "sel_t", "hs_i" - Each cell name for phy parameters
+
+Refer to phy/phy-bindings.txt for the generic PHY binding properties.
+
+Example:
+
+ usb-glue@65b00000 {
+ compatible = "socionext,uniphier-ld20-dwc3-glue",
+ "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x65b00000 0x400>;
+
+ usb_vbus0: regulator {
+ ...
+ };
+
+ usb_hsphy0: hs-phy@200 {
+ compatible = "socionext,uniphier-ld20-usb3-hsphy";
+ reg = <0x200 0x10>;
+ #phy-cells = <0>;
+ clock-names = "link", "phy";
+ clocks = <&sys_clk 14>, <&sys_clk 16>;
+ reset-names = "link", "phy";
+ resets = <&sys_rst 14>, <&sys_rst 16>;
+ vbus-supply = <&usb_vbus0>;
+ nvmem-cell-names = "rterm", "sel_t", "hs_i";
+ nvmem-cells = <&usb_rterm0>, <&usb_sel_t0>,
+ <&usb_hs_i0>;
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/phy/uniphier-usb3-ssphy.txt b/Documentation/devicetree/bindings/phy/uniphier-usb3-ssphy.txt
new file mode 100644
index 000000000000..490b815445e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/uniphier-usb3-ssphy.txt
@@ -0,0 +1,57 @@
+Socionext UniPhier USB3 Super-Speed (SS) PHY
+
+This describes the devicetree bindings for PHY interfaces built into
+USB3 controller implemented on Socionext UniPhier SoCs.
+Although the controller includes High-Speed PHY and Super-Speed PHY,
+this describes about Super-Speed PHY.
+
+Required properties:
+- compatible: Should contain one of the following:
+ "socionext,uniphier-pro4-usb3-ssphy" - for Pro4 SoC
+ "socionext,uniphier-pxs2-usb3-ssphy" - for PXs2 SoC
+ "socionext,uniphier-ld20-usb3-ssphy" - for LD20 SoC
+ "socionext,uniphier-pxs3-usb3-ssphy" - for PXs3 SoC
+- reg: Specifies offset and length of the register set for the device.
+- #phy-cells: Should be 0.
+- clocks: A list of phandles to the clock gate for USB3 glue layer.
+ According to the clock-names, appropriate clocks are required.
+- clock-names:
+ "gio", "link" - for Pro4 SoC
+ "phy", "phy-ext", "link" - for PXs3 SoC, "phy-ext" is optional.
+ "phy", "link" - for others
+- resets: A list of phandles to the reset control for USB3 glue layer.
+ According to the reset-names, appropriate resets are required.
+- reset-names:
+ "gio", "link" - for Pro4 SoC
+ "phy", "link" - for others
+
+Optional properties:
+- vbus-supply: A phandle to the regulator for USB VBUS.
+
+Refer to phy/phy-bindings.txt for the generic PHY binding properties.
+
+Example:
+
+ usb-glue@65b00000 {
+ compatible = "socionext,uniphier-ld20-dwc3-glue",
+ "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x65b00000 0x400>;
+
+ usb_vbus0: regulator {
+ ...
+ };
+
+ usb_ssphy0: ss-phy@300 {
+ compatible = "socionext,uniphier-ld20-usb3-ssphy";
+ reg = <0x300 0x10>;
+ #phy-cells = <0>;
+ clock-names = "link", "phy";
+ clocks = <&sys_clk 14>, <&sys_clk 16>;
+ reset-names = "link", "phy";
+ resets = <&sys_rst 14>, <&sys_rst 16>;
+ vbus-supply = <&usb_vbus0>;
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm4708-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/brcm,bcm4708-pinmux.txt
new file mode 100644
index 000000000000..4fa9539070cb
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm4708-pinmux.txt
@@ -0,0 +1,57 @@
+Broadcom Northstar pins mux controller
+
+Some of Northstar SoCs's pins can be used for various purposes thanks to the mux
+controller. This binding allows describing mux controller and listing available
+functions. They can be referenced later by other bindings to let system
+configure controller correctly.
+
+A list of pins varies across chipsets so few bindings are available.
+
+Required properties:
+- compatible: must be one of:
+ "brcm,bcm4708-pinmux"
+ "brcm,bcm4709-pinmux"
+ "brcm,bcm53012-pinmux"
+- reg: iomem address range of CRU (Central Resource Unit) pin registers
+- reg-names: "cru_gpio_control" - the only needed & supported reg right now
+
+Functions and their groups available for all chipsets:
+- "spi": "spi_grp"
+- "i2c": "i2c_grp"
+- "pwm": "pwm0_grp", "pwm1_grp", "pwm2_grp", "pwm3_grp"
+- "uart1": "uart1_grp"
+
+Additionally available on BCM4709 and BCM53012:
+- "mdio": "mdio_grp"
+- "uart2": "uart2_grp"
+- "sdio": "sdio_pwr_grp", "sdio_1p8v_grp"
+
+For documentation of subnodes see:
+Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Example:
+ dmu@1800c000 {
+ compatible = "simple-bus";
+ ranges = <0 0x1800c000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cru@100 {
+ compatible = "simple-bus";
+ reg = <0x100 0x1a4>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pin-controller@1c0 {
+ compatible = "brcm,bcm4708-pinmux";
+ reg = <0x1c0 0x24>;
+ reg-names = "cru_gpio_control";
+
+ spi-pins {
+ function = "spi";
+ groups = "spi_grp";
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt
index ca313a7aeaff..af20b0ec715c 100644
--- a/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt
@@ -20,16 +20,30 @@ Required properties:
- compatible: One of:
- "ingenic,jz4740-pinctrl"
+ - "ingenic,jz4725b-pinctrl"
- "ingenic,jz4770-pinctrl"
- "ingenic,jz4780-pinctrl"
- reg: Address range of the pinctrl registers.
-GPIO sub-nodes
---------------
+Required properties for sub-nodes (GPIO chips):
+-----------------------------------------------
-The pinctrl node can have optional sub-nodes for the Ingenic GPIO driver;
-please refer to ../gpio/ingenic,gpio.txt.
+ - compatible: Must contain one of:
+ - "ingenic,jz4740-gpio"
+ - "ingenic,jz4770-gpio"
+ - "ingenic,jz4780-gpio"
+ - reg: The GPIO bank number.
+ - interrupt-controller: Marks the device node as an interrupt controller.
+ - interrupts: Interrupt specifier for the controllers interrupt.
+ - #interrupt-cells: Should be 2. Refer to
+ ../interrupt-controller/interrupts.txt for more details.
+ - gpio-controller: Marks the device node as a GPIO controller.
+ - #gpio-cells: Should be 2. The first cell is the GPIO number and the second
+ cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>. Only the
+ GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
+ - gpio-ranges: Range of pins managed by the GPIO controller. Refer to
+ ../gpio/gpio.txt for more details.
Example:
@@ -38,4 +52,21 @@ Example:
pinctrl: pin-controller@10010000 {
compatible = "ingenic,jz4740-pinctrl";
reg = <0x10010000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpa: gpio@0 {
+ compatible = "ingenic,jz4740-gpio";
+ reg = <0>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <28>;
+ };
};
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index 54ecb8ab7788..82ead40311f6 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -13,6 +13,8 @@ Required properties for the root node:
"amlogic,meson-gxl-aobus-pinctrl"
"amlogic,meson-axg-periphs-pinctrl"
"amlogic,meson-axg-aobus-pinctrl"
+ "amlogic,meson-g12a-periphs-pinctrl"
+ "amlogic,meson-g12a-aobus-pinctrl"
- reg: address and size of registers controlling irq functionality
=== GPIO sub-nodes ===
diff --git a/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm7xx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm7xx-pinctrl.txt
new file mode 100644
index 000000000000..83f4bbac94bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/nuvoton,npcm7xx-pinctrl.txt
@@ -0,0 +1,216 @@
+Nuvoton NPCM7XX Pin Controllers
+
+The Nuvoton BMC NPCM7XX Pin Controller multi-function routed through
+the multiplexing block, Each pin supports GPIO functionality (GPIOx)
+and multiple functions that directly connect the pin to different
+hardware blocks.
+
+Required properties:
+- #address-cells : should be 1.
+- #size-cells : should be 1.
+- compatible : "nuvoton,npcm750-pinctrl" for Poleg NPCM7XX.
+- ranges : defines mapping ranges between pin controller node (parent)
+ to GPIO bank node (children).
+
+=== GPIO Bank Subnode ===
+
+The NPCM7XX has 8 GPIO Banks each GPIO bank supports 32 GPIO.
+
+Required GPIO Bank subnode-properties:
+- reg : specifies physical base address and size of the GPIO
+ bank registers.
+- gpio-controller : Marks the device node as a GPIO controller.
+- #gpio-cells : Must be <2>. The first cell is the gpio pin number
+ and the second cell is used for optional parameters.
+- interrupts : contain the GPIO bank interrupt with flags for falling edge.
+- gpio-ranges : defines the range of pins managed by the GPIO bank controller.
+
+For example, GPIO bank subnodes like the following:
+ gpio0: gpio@f0010000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x0 0x80>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ };
+
+=== Pin Mux Subnode ===
+
+- pin: A string containing the name of the pin
+ An array of strings, each string containing the name of a pin.
+ These pin are used for selecting pin configuration.
+
+The following are the list of pins available:
+ "GPIO0/IOX1DI", "GPIO1/IOX1LD", "GPIO2/IOX1CK", "GPIO3/IOX1D0",
+ "GPIO4/IOX2DI/SMB1DSDA", "GPIO5/IOX2LD/SMB1DSCL", "GPIO6/IOX2CK/SMB2DSDA",
+ "GPIO7/IOX2D0/SMB2DSCL", "GPIO8/LKGPO1", "GPIO9/LKGPO2", "GPIO10/IOXHLD",
+ "GPIO11/IOXHCK", "GPIO12/GSPICK/SMB5BSCL", "GPIO13/GSPIDO/SMB5BSDA",
+ "GPIO14/GSPIDI/SMB5CSCL", "GPIO15/GSPICS/SMB5CSDA", "GPIO16/LKGPO0",
+ "GPIO17/PSPI2DI/SMB4DEN","GPIO18/PSPI2D0/SMB4BSDA", "GPIO19/PSPI2CK/SMB4BSCL",
+ "GPIO20/SMB4CSDA/SMB15SDA", "GPIO21/SMB4CSCL/SMB15SCL", "GPIO22/SMB4DSDA/SMB14SDA",
+ "GPIO23/SMB4DSCL/SMB14SCL", "GPIO24/IOXHDO", "GPIO25/IOXHDI", "GPIO26/SMB5SDA",
+ "GPIO27/SMB5SCL", "GPIO28/SMB4SDA", "GPIO29/SMB4SCL", "GPIO30/SMB3SDA",
+ "GPIO31/SMB3SCL", "GPIO32/nSPI0CS1","SPI0D2", "SPI0D3", "GPIO37/SMB3CSDA",
+ "GPIO38/SMB3CSCL", "GPIO39/SMB3BSDA", "GPIO40/SMB3BSCL", "GPIO41/BSPRXD",
+ "GPO42/BSPTXD/STRAP11", "GPIO43/RXD1/JTMS2/BU1RXD", "GPIO44/nCTS1/JTDI2/BU1CTS",
+ "GPIO45/nDCD1/JTDO2", "GPIO46/nDSR1/JTCK2", "GPIO47/nRI1/JCP_RDY2",
+ "GPIO48/TXD2/BSPTXD", "GPIO49/RXD2/BSPRXD", "GPIO50/nCTS2", "GPO51/nRTS2/STRAP2",
+ "GPIO52/nDCD2", "GPO53/nDTR2_BOUT2/STRAP1", "GPIO54/nDSR2", "GPIO55/nRI2",
+ "GPIO56/R1RXERR", "GPIO57/R1MDC", "GPIO58/R1MDIO", "GPIO59/SMB3DSDA",
+ "GPIO60/SMB3DSCL", "GPO61/nDTR1_BOUT1/STRAP6", "GPO62/nRTST1/STRAP5",
+ "GPO63/TXD1/STRAP4", "GPIO64/FANIN0", "GPIO65/FANIN1", "GPIO66/FANIN2",
+ "GPIO67/FANIN3", "GPIO68/FANIN4", "GPIO69/FANIN5", "GPIO70/FANIN6", "GPIO71/FANIN7",
+ "GPIO72/FANIN8", "GPIO73/FANIN9", "GPIO74/FANIN10", "GPIO75/FANIN11",
+ "GPIO76/FANIN12", "GPIO77/FANIN13","GPIO78/FANIN14", "GPIO79/FANIN15",
+ "GPIO80/PWM0", "GPIO81/PWM1", "GPIO82/PWM2", "GPIO83/PWM3", "GPIO84/R2TXD0",
+ "GPIO85/R2TXD1", "GPIO86/R2TXEN", "GPIO87/R2RXD0", "GPIO88/R2RXD1", "GPIO89/R2CRSDV",
+ "GPIO90/R2RXERR", "GPIO91/R2MDC", "GPIO92/R2MDIO", "GPIO93/GA20/SMB5DSCL",
+ "GPIO94/nKBRST/SMB5DSDA", "GPIO95/nLRESET/nESPIRST", "GPIO96/RG1TXD0",
+ "GPIO97/RG1TXD1", "GPIO98/RG1TXD2", "GPIO99/RG1TXD3","GPIO100/RG1TXC",
+ "GPIO101/RG1TXCTL", "GPIO102/RG1RXD0", "GPIO103/RG1RXD1", "GPIO104/RG1RXD2",
+ "GPIO105/RG1RXD3", "GPIO106/RG1RXC", "GPIO107/RG1RXCTL", "GPIO108/RG1MDC",
+ "GPIO109/RG1MDIO", "GPIO110/RG2TXD0/DDRV0", "GPIO111/RG2TXD1/DDRV1",
+ "GPIO112/RG2TXD2/DDRV2", "GPIO113/RG2TXD3/DDRV3", "GPIO114/SMB0SCL",
+ "GPIO115/SMB0SDA", "GPIO116/SMB1SCL", "GPIO117/SMB1SDA", "GPIO118/SMB2SCL",
+ "GPIO119/SMB2SDA", "GPIO120/SMB2CSDA", "GPIO121/SMB2CSCL", "GPIO122/SMB2BSDA",
+ "GPIO123/SMB2BSCL", "GPIO124/SMB1CSDA", "GPIO125/SMB1CSCL","GPIO126/SMB1BSDA",
+ "GPIO127/SMB1BSCL", "GPIO128/SMB8SCL", "GPIO129/SMB8SDA", "GPIO130/SMB9SCL",
+ "GPIO131/SMB9SDA", "GPIO132/SMB10SCL", "GPIO133/SMB10SDA","GPIO134/SMB11SCL",
+ "GPIO135/SMB11SDA", "GPIO136/SD1DT0", "GPIO137/SD1DT1", "GPIO138/SD1DT2",
+ "GPIO139/SD1DT3", "GPIO140/SD1CLK", "GPIO141/SD1WP", "GPIO142/SD1CMD",
+ "GPIO143/SD1CD/SD1PWR", "GPIO144/PWM4", "GPIO145/PWM5", "GPIO146/PWM6",
+ "GPIO147/PWM7", "GPIO148/MMCDT4", "GPIO149/MMCDT5", "GPIO150/MMCDT6",
+ "GPIO151/MMCDT7", "GPIO152/MMCCLK", "GPIO153/MMCWP", "GPIO154/MMCCMD",
+ "GPIO155/nMMCCD/nMMCRST", "GPIO156/MMCDT0", "GPIO157/MMCDT1", "GPIO158/MMCDT2",
+ "GPIO159/MMCDT3", "GPIO160/CLKOUT/RNGOSCOUT", "GPIO161/nLFRAME/nESPICS",
+ "GPIO162/SERIRQ", "GPIO163/LCLK/ESPICLK", "GPIO164/LAD0/ESPI_IO0",
+ "GPIO165/LAD1/ESPI_IO1", "GPIO166/LAD2/ESPI_IO2", "GPIO167/LAD3/ESPI_IO3",
+ "GPIO168/nCLKRUN/nESPIALERT", "GPIO169/nSCIPME", "GPIO170/nSMI", "GPIO171/SMB6SCL",
+ "GPIO172/SMB6SDA", "GPIO173/SMB7SCL", "GPIO174/SMB7SDA", "GPIO175/PSPI1CK/FANIN19",
+ "GPIO176/PSPI1DO/FANIN18", "GPIO177/PSPI1DI/FANIN17", "GPIO178/R1TXD0",
+ "GPIO179/R1TXD1", "GPIO180/R1TXEN", "GPIO181/R1RXD0", "GPIO182/R1RXD1",
+ "GPIO183/SPI3CK", "GPO184/SPI3D0/STRAP9", "GPO185/SPI3D1/STRAP10",
+ "GPIO186/nSPI3CS0", "GPIO187/nSPI3CS1", "GPIO188/SPI3D2/nSPI3CS2",
+ "GPIO189/SPI3D3/nSPI3CS3", "GPIO190/nPRD_SMI", "GPIO191", "GPIO192", "GPIO193/R1CRSDV",
+ "GPIO194/SMB0BSCL", "GPIO195/SMB0BSDA", "GPIO196/SMB0CSCL", "GPIO197/SMB0DEN",
+ "GPIO198/SMB0DSDA", "GPIO199/SMB0DSCL", "GPIO200/R2CK", "GPIO201/R1CK",
+ "GPIO202/SMB0CSDA", "GPIO203/FANIN16", "GPIO204/DDC2SCL", "GPIO205/DDC2SDA",
+ "GPIO206/HSYNC2", "GPIO207/VSYNC2", "GPIO208/RG2TXC/DVCK", "GPIO209/RG2TXCTL/DDRV4",
+ "GPIO210/RG2RXD0/DDRV5", "GPIO211/RG2RXD1/DDRV6", "GPIO212/RG2RXD2/DDRV7",
+ "GPIO213/RG2RXD3/DDRV8", "GPIO214/RG2RXC/DDRV9", "GPIO215/RG2RXCTL/DDRV10",
+ "GPIO216/RG2MDC/DDRV11", "GPIO217/RG2MDIO/DVHSYNC", "GPIO218/nWDO1",
+ "GPIO219/nWDO2", "GPIO220/SMB12SCL", "GPIO221/SMB12SDA", "GPIO222/SMB13SCL",
+ "GPIO223/SMB13SDA", "GPIO224/SPIXCK", "GPO225/SPIXD0/STRAP12", "GPO226/SPIXD1/STRAP13",
+ "GPIO227/nSPIXCS0", "GPIO228/nSPIXCS1", "GPO229/SPIXD2/STRAP3", "GPIO230/SPIXD3",
+ "GPIO231/nCLKREQ", "GPI255/DACOSEL"
+
+Optional Properties:
+ bias-disable, bias-pull-down, bias-pull-up, input-enable,
+ input-disable, output-high, output-low, drive-push-pull,
+ drive-open-drain, input-debounce, slew-rate, drive-strength
+
+ slew-rate valid arguments are:
+ <0> - slow
+ <1> - fast
+ drive-strength valid arguments are:
+ <2> - 2mA
+ <4> - 4mA
+ <8> - 8mA
+ <12> - 12mA
+ <16> - 16mA
+ <24> - 24mA
+
+For example, pinctrl might have pinmux subnodes like the following:
+
+ gpio0_iox1d1_pin: gpio0-iox1d1-pin {
+ pins = "GPIO0/IOX1DI";
+ output-high;
+ };
+ gpio0_iox1ck_pin: gpio0-iox1ck-pin {
+ pins = "GPIO2/IOX1CK";
+ output_high;
+ };
+
+=== Pin Group Subnode ===
+
+Required pin group subnode-properties:
+- groups : A string containing the name of the group to mux.
+- function: A string containing the name of the function to mux to the
+ group.
+
+The following are the list of the available groups and functions :
+ smb0, smb0b, smb0c, smb0d, smb0den, smb1, smb1b, smb1c, smb1d,
+ smb2, smb2b, smb2c, smb2d, smb3, smb3b, smb3c, smb3d, smb4, smb4b,
+ smb4c, smb4d, smb4den, smb5, smb5b, smb5c, smb5d, ga20kbc, smb6,
+ smb7, smb8, smb9, smb10, smb11, smb12, smb13, smb14, smb15, fanin0,
+ fanin1, fanin2, fanin3, fanin4, fanin5, fanin6, fanin7, fanin8,
+ fanin9, fanin10, fanin11 fanin12 fanin13, fanin14, fanin15, faninx,
+ pwm0, pwm1, pwm2, pwm3, pwm4, pwm5, pwm6, pwm7, rg1, rg1mdio, rg2,
+ rg2mdio, ddr, uart1, uart2, bmcuart0a, bmcuart0b, bmcuart1, iox1,
+ iox2, ioxh, gspi, mmc, mmcwp, mmccd, mmcrst, mmc8, r1, r1err, r1md,
+ r2, r2err, r2md, sd1, sd1pwr, wdog1, wdog2, scipme, sci, serirq,
+ jtag2, spix, spixcs1, pspi1, pspi2, ddc, clkreq, clkout, spi3, spi3cs1,
+ spi3quad, spi3cs2, spi3cs3, spi0cs1, lpc, lpcclk, espi, lkgpo0, lkgpo1,
+ lkgpo2, nprd_smi
+
+For example, pinctrl might have group subnodes like the following:
+ r1err_pins: r1err-pins {
+ groups = "r1err";
+ function = "r1err";
+ };
+ r1md_pins: r1md-pins {
+ groups = "r1md";
+ function = "r1md";
+ };
+ r1_pins: r1-pins {
+ groups = "r1";
+ function = "r1";
+ };
+
+Examples
+========
+pinctrl: pinctrl@f0800000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "nuvoton,npcm750-pinctrl";
+ ranges = <0 0xf0010000 0x8000>;
+
+ gpio0: gpio@f0010000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x0 0x80>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ };
+
+ ....
+
+ gpio7: gpio@f0017000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x7000 0x80>;
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-ranges = <&pinctrl 0 224 32>;
+ };
+
+ gpio0_iox1d1_pin: gpio0-iox1d1-pin {
+ pins = "GPIO0/IOX1DI";
+ output-high;
+ };
+
+ iox1_pins: iox1-pins {
+ groups = "iox1";
+ function = "iox1";
+ };
+ iox2_pins: iox2-pins {
+ groups = "iox2";
+ function = "iox2";
+ };
+
+ ....
+
+ clkreq_pins: clkreq-pins {
+ groups = "clkreq";
+ function = "clkreq";
+ };
+}; \ No newline at end of file
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index ffd4345415f3..ab4000eab07d 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -19,6 +19,7 @@ PMIC's from Qualcomm.
"qcom,pm8998-gpio"
"qcom,pma8084-gpio"
"qcom,pmi8994-gpio"
+ "qcom,pms405-gpio"
And must contain either "qcom,spmi-gpio" or "qcom,ssbi-gpio"
if the device is on an spmi bus or an ssbi bus respectively
@@ -91,6 +92,7 @@ to specify in a pin configuration subnode:
gpio1-gpio26 for pm8998
gpio1-gpio22 for pma8084
gpio1-gpio10 for pmi8994
+ gpio1-gpio11 for pms405
- function:
Usage: required
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,qcs404-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,qcs404-pinctrl.txt
new file mode 100644
index 000000000000..2b8f77762edc
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,qcs404-pinctrl.txt
@@ -0,0 +1,199 @@
+Qualcomm QCS404 TLMM block
+
+This binding describes the Top Level Mode Multiplexer block found in the
+QCS404 platform.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,qcs404-pinctrl"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: the base address and size of the north, south and east TLMM
+ tiles.
+
+- reg-names:
+ Usage: required
+ Value type: <stringlist>
+ Defintiion: names for the cells of reg, must contain "north", "south"
+ and "east".
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the TLMM summary IRQ.
+
+- interrupt-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as an interrupt controller
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/interrupt-controller/irq.h>
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as a gpio controller
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/gpio/gpio.h>
+
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+
+PIN CONFIGURATION NODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode.
+
+ Valid pins are:
+ gpio0-gpio119
+ Supports mux, bias and drive-strength
+
+ sdc1_clk, sdc1_cmd, sdc1_data, sdc2_clk, sdc2_cmd,
+ sdc2_data
+ Supports bias and drive-strength
+
+ ufs_reset
+ Supports bias and drive-strength
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Functions are only valid for gpio pins.
+ Valid values are:
+
+ gpio, hdmi_tx, hdmi_ddc, blsp_uart_tx_a2, blsp_spi2, m_voc,
+ qdss_cti_trig_in_a0, blsp_uart_rx_a2, qdss_tracectl_a,
+ blsp_uart2, aud_cdc, blsp_i2c_sda_a2, qdss_tracedata_a,
+ blsp_i2c_scl_a2, qdss_tracectl_b, qdss_cti_trig_in_b0,
+ blsp_uart1, blsp_spi_mosi_a1, blsp_spi_miso_a1,
+ qdss_tracedata_b, blsp_i2c1, blsp_spi_cs_n_a1, gcc_plltest,
+ blsp_spi_clk_a1, rgb_data0, blsp_uart5, blsp_spi5,
+ adsp_ext, rgb_data1, prng_rosc, rgb_data2, blsp_i2c5,
+ gcc_gp1_clk_b, rgb_data3, gcc_gp2_clk_b, blsp_spi0,
+ blsp_uart0, gcc_gp3_clk_b, blsp_i2c0, qdss_traceclk_b,
+ pcie_clk, nfc_irq, blsp_spi4, nfc_dwl, audio_ts, rgb_data4,
+ spi_lcd, blsp_uart_tx_b2, gcc_gp3_clk_a, rgb_data5,
+ blsp_uart_rx_b2, blsp_i2c_sda_b2, blsp_i2c_scl_b2,
+ pwm_led11, i2s_3_data0_a, ebi2_lcd, i2s_3_data1_a,
+ i2s_3_data2_a, atest_char, pwm_led3, i2s_3_data3_a,
+ pwm_led4, i2s_4, ebi2_a, dsd_clk_b, pwm_led5, pwm_led6,
+ pwm_led7, pwm_led8, pwm_led24, spkr_dac0, blsp_i2c4,
+ pwm_led9, pwm_led10, spdifrx_opt, pwm_led12, pwm_led13,
+ pwm_led14, wlan1_adc1, rgb_data_b0, pwm_led15,
+ blsp_spi_mosi_b1, wlan1_adc0, rgb_data_b1, pwm_led16,
+ blsp_spi_miso_b1, qdss_cti_trig_out_b0, wlan2_adc1,
+ rgb_data_b2, pwm_led17, blsp_spi_cs_n_b1, wlan2_adc0,
+ rgb_data_b3, pwm_led18, blsp_spi_clk_b1, rgb_data_b4,
+ pwm_led19, ext_mclk1_b, qdss_traceclk_a, rgb_data_b5,
+ pwm_led20, atest_char3, i2s_3_sck_b, ldo_update, bimc_dte0,
+ rgb_hsync, pwm_led21, i2s_3_ws_b, dbg_out, rgb_vsync,
+ i2s_3_data0_b, ldo_en, hdmi_dtest, rgb_de, i2s_3_data1_b,
+ hdmi_lbk9, rgb_clk, atest_char1, i2s_3_data2_b, ebi_cdc,
+ hdmi_lbk8, rgb_mdp, atest_char0, i2s_3_data3_b, hdmi_lbk7,
+ rgb_data_b6, rgb_data_b7, hdmi_lbk6, rgmii_int, cri_trng1,
+ rgmii_wol, cri_trng0, gcc_tlmm, rgmii_ck, rgmii_tx,
+ hdmi_lbk5, hdmi_pixel, hdmi_rcv, hdmi_lbk4, rgmii_ctl,
+ ext_lpass, rgmii_rx, cri_trng, hdmi_lbk3, hdmi_lbk2,
+ qdss_cti_trig_out_b1, rgmii_mdio, hdmi_lbk1, rgmii_mdc,
+ hdmi_lbk0, ir_in, wsa_en, rgb_data6, rgb_data7,
+ atest_char2, ebi_ch0, blsp_uart3, blsp_spi3, sd_write,
+ blsp_i2c3, gcc_gp1_clk_a, qdss_cti_trig_in_b1,
+ gcc_gp2_clk_a, ext_mclk0, mclk_in1, i2s_1, dsd_clk_a,
+ qdss_cti_trig_in_a1, rgmi_dll1, pwm_led22, pwm_led23,
+ qdss_cti_trig_out_a0, rgmi_dll2, pwm_led1,
+ qdss_cti_trig_out_a1, pwm_led2, i2s_2, pll_bist,
+ ext_mclk1_a, mclk_in2, bimc_dte1, i2s_3_sck_a, i2s_3_ws_a
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull down.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull up.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+ Not valid for sdc pins.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+ Not valid for sdc pins.
+
+- drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins, in mA.
+ Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
+
+Example:
+
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,qcs404-pinctrl";
+ reg = <0x01000000 0x200000>,
+ <0x01300000 0x200000>,
+ <0x07b00000 0x200000>;
+ reg-names = "south", "north", "east";
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 120>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdm660-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,sdm660-pinctrl.txt
new file mode 100644
index 000000000000..769ca83bb40d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm660-pinctrl.txt
@@ -0,0 +1,191 @@
+Qualcomm Technologies, Inc. SDM660 TLMM block
+
+This binding describes the Top Level Mode Multiplexer block found in the
+SDM660 platform.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,sdm660-pinctrl" or
+ "qcom,sdm630-pinctrl".
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: the base address and size of the north, center and south
+ TLMM tiles.
+
+- reg-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: names for the cells of reg, must contain "north", "center"
+ and "south".
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the TLMM summary IRQ.
+
+- interrupt-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as an interrupt controller
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/interrupt-controller/irq.h>
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as a gpio controller
+
+- gpio-ranges:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Specifies the mapping between gpio controller and
+ pin-controller pins.
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/gpio/gpio.h>
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+
+PIN CONFIGURATION NODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode. Valid pins are:
+ gpio0-gpio113,
+ Supports mux, bias and drive-strength
+ sdc1_clk, sdc1_cmd, sdc1_data sdc2_clk, sdc2_cmd, sdc2_data sdc1_rclk,
+ Supports bias and drive-strength
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Functions are only valid for gpio pins.
+ Valid values are:
+ adsp_ext, agera_pll, atest_char, atest_char0, atest_char1,
+ atest_char2, atest_char3, atest_gpsadc0, atest_gpsadc1,
+ atest_tsens, atest_tsens2, atest_usb1, atest_usb10,
+ atest_usb11, atest_usb12, atest_usb13, atest_usb2,
+ atest_usb20, atest_usb21, atest_usb22, atest_usb23,
+ audio_ref, bimc_dte0, bimc_dte1, blsp_i2c1, blsp_i2c2,
+ blsp_i2c3, blsp_i2c4, blsp_i2c5, blsp_i2c6, blsp_i2c7,
+ blsp_i2c8_a, blsp_i2c8_b, blsp_spi1, blsp_spi2, blsp_spi3,
+ blsp_spi3_cs1, blsp_spi3_cs2, blsp_spi4, blsp_spi5,
+ blsp_spi6, blsp_spi7, blsp_spi8_a, blsp_spi8_b,
+ blsp_spi8_cs1, blsp_spi8_cs2, blsp_uart1, blsp_uart2,
+ blsp_uart5, blsp_uart6_a, blsp_uart6_b, blsp_uim1,
+ blsp_uim2, blsp_uim5, blsp_uim6, cam_mclk, cci_async,
+ cci_i2c, cri_trng, cri_trng0, cri_trng1, dbg_out, ddr_bist,
+ gcc_gp1, gcc_gp2, gcc_gp3, gpio, gps_tx_a, gps_tx_b, gps_tx_c,
+ isense_dbg, jitter_bist, ldo_en, ldo_update, m_voc, mdp_vsync,
+ mdss_vsync0, mdss_vsync1, mdss_vsync2, mdss_vsync3, mss_lte,
+ nav_pps_a, nav_pps_b, nav_pps_c, pa_indicator, phase_flag0,
+ phase_flag1, phase_flag10, phase_flag11, phase_flag12,
+ phase_flag13, phase_flag14, phase_flag15, phase_flag16,
+ phase_flag17, phase_flag18, phase_flag19, phase_flag2,
+ phase_flag20, phase_flag21, phase_flag22, phase_flag23,
+ phase_flag24, phase_flag25, phase_flag26, phase_flag27,
+ phase_flag28, phase_flag29, phase_flag3, phase_flag30,
+ phase_flag31, phase_flag4, phase_flag5, phase_flag6,
+ phase_flag7, phase_flag8, phase_flag9, pll_bypassnl,
+ pll_reset, pri_mi2s, pri_mi2s_ws, prng_rosc, pwr_crypto,
+ pwr_modem, pwr_nav, qdss_cti0_a, qdss_cti0_b, qdss_cti1_a,
+ qdss_cti1_b, qdss_gpio, qdss_gpio0, qdss_gpio1, qdss_gpio10,
+ qdss_gpio11, qdss_gpio12, qdss_gpio13, qdss_gpio14, qdss_gpio15,
+ qdss_gpio2, qdss_gpio3, qdss_gpio4, qdss_gpio5, qdss_gpio6,
+ qdss_gpio7, qdss_gpio8, qdss_gpio9, qlink_enable, qlink_request,
+ qspi_clk, qspi_cs, qspi_data0, qspi_data1, qspi_data2,
+ qspi_data3, qspi_resetn, sec_mi2s, sndwire_clk, sndwire_data,
+ sp_cmu, ssc_irq, tgu_ch0, tgu_ch1, tsense_pwm1, tsense_pwm2,
+ uim1_clk, uim1_data, uim1_present, uim1_reset, uim2_clk,
+ uim2_data, uim2_present, uim2_reset, uim_batt, vfr_1,
+ vsense_clkout, vsense_data0, vsense_data1, vsense_mode,
+ wlan1_adc0, wlan1_adc1, wlan2_adc0, wlan2_adc1
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull down.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull up.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+ Not valid for sdc pins.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+ Not valid for sdc pins.
+
+- drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins, in mA.
+ Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
+
+Example:
+
+ tlmm: pinctrl@3100000 {
+ compatible = "qcom,sdm660-pinctrl";
+ reg = <0x3100000 0x200000>,
+ <0x3500000 0x200000>,
+ <0x3900000 0x200000>;
+ reg-names = "south", "center", "north";
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ gpio-ranges = <&tlmm 0 0 114>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index abd8fbcf1e62..3902efa18fd0 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -14,8 +14,11 @@ Required Properties:
- "renesas,pfc-r8a73a4": for R8A73A4 (R-Mobile APE6) compatible pin-controller.
- "renesas,pfc-r8a7740": for R8A7740 (R-Mobile A1) compatible pin-controller.
- "renesas,pfc-r8a7743": for R8A7743 (RZ/G1M) compatible pin-controller.
+ - "renesas,pfc-r8a7744": for R8A7744 (RZ/G1N) compatible pin-controller.
- "renesas,pfc-r8a7745": for R8A7745 (RZ/G1E) compatible pin-controller.
- "renesas,pfc-r8a77470": for R8A77470 (RZ/G1C) compatible pin-controller.
+ - "renesas,pfc-r8a774a1": for R8A774A1 (RZ/G2M) compatible pin-controller.
+ - "renesas,pfc-r8a774c0": for R8A774C0 (RZ/G2E) compatible pin-controller.
- "renesas,pfc-r8a7778": for R8A7778 (R-Car M1) compatible pin-controller.
- "renesas,pfc-r8a7779": for R8A7779 (R-Car H1) compatible pin-controller.
- "renesas,pfc-r8a7790": for R8A7790 (R-Car H2) compatible pin-controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.txt
new file mode 100644
index 000000000000..25e53acd523e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.txt
@@ -0,0 +1,153 @@
+Renesas RZ/N1 SoC Pinctrl node description.
+
+Pin controller node
+-------------------
+Required properties:
+- compatible: SoC-specific compatible string "renesas,<soc-specific>-pinctrl"
+ followed by "renesas,rzn1-pinctrl" as fallback. The SoC-specific compatible
+ strings must be one of:
+ "renesas,r9a06g032-pinctrl" for RZ/N1D
+ "renesas,r9a06g033-pinctrl" for RZ/N1S
+- reg: Address base and length of the memory area where the pin controller
+ hardware is mapped to.
+- clocks: phandle for the clock, see the description of clock-names below.
+- clock-names: Contains the name of the clock:
+ "bus", the bus clock, sometimes described as pclk, for register accesses.
+
+Example:
+ pinctrl: pin-controller@40067000 {
+ compatible = "renesas,r9a06g032-pinctrl", "renesas,rzn1-pinctrl";
+ reg = <0x40067000 0x1000>, <0x51000000 0x480>;
+ clocks = <&sysctrl R9A06G032_HCLK_PINCONFIG>;
+ clock-names = "bus";
+ };
+
+Sub-nodes
+---------
+
+The child nodes of the pin controller node describe a pin multiplexing
+function.
+
+- Pin multiplexing sub-nodes:
+ A pin multiplexing sub-node describes how to configure a set of
+ (or a single) pin in some desired alternate function mode.
+ A single sub-node may define several pin configurations.
+ Please refer to pinctrl-bindings.txt to get to know more on generic
+ pin properties usage.
+
+ The allowed generic formats for a pin multiplexing sub-node are the
+ following ones:
+
+ node-1 {
+ pinmux = <PIN_ID_AND_MUX>, <PIN_ID_AND_MUX>, ... ;
+ GENERIC_PINCONFIG;
+ };
+
+ node-2 {
+ sub-node-1 {
+ pinmux = <PIN_ID_AND_MUX>, <PIN_ID_AND_MUX>, ... ;
+ GENERIC_PINCONFIG;
+ };
+
+ sub-node-2 {
+ pinmux = <PIN_ID_AND_MUX>, <PIN_ID_AND_MUX>, ... ;
+ GENERIC_PINCONFIG;
+ };
+
+ ...
+
+ sub-node-n {
+ pinmux = <PIN_ID_AND_MUX>, <PIN_ID_AND_MUX>, ... ;
+ GENERIC_PINCONFIG;
+ };
+ };
+
+ node-3 {
+ pinmux = <PIN_ID_AND_MUX>, <PIN_ID_AND_MUX>, ... ;
+ GENERIC_PINCONFIG;
+
+ sub-node-1 {
+ pinmux = <PIN_ID_AND_MUX>, <PIN_ID_AND_MUX>, ... ;
+ GENERIC_PINCONFIG;
+ };
+
+ ...
+
+ sub-node-n {
+ pinmux = <PIN_ID_AND_MUX>, <PIN_ID_AND_MUX>, ... ;
+ GENERIC_PINCONFIG;
+ };
+ };
+
+ Use the latter two formats when pins part of the same logical group need to
+ have different generic pin configuration flags applied. Note that the generic
+ pinconfig in node-3 does not apply to the sub-nodes.
+
+ Client sub-nodes shall refer to pin multiplexing sub-nodes using the phandle
+ of the most external one.
+
+ Eg.
+
+ client-1 {
+ ...
+ pinctrl-0 = <&node-1>;
+ ...
+ };
+
+ client-2 {
+ ...
+ pinctrl-0 = <&node-2>;
+ ...
+ };
+
+ Required properties:
+ - pinmux:
+ integer array representing pin number and pin multiplexing configuration.
+ When a pin has to be configured in alternate function mode, use this
+ property to identify the pin by its global index, and provide its
+ alternate function configuration number along with it.
+ When multiple pins are required to be configured as part of the same
+ alternate function they shall be specified as members of the same
+ argument list of a single "pinmux" property.
+ Integers values in the "pinmux" argument list are assembled as:
+ (PIN | MUX_FUNC << 8)
+ where PIN directly corresponds to the pl_gpio pin number and MUX_FUNC is
+ one of the alternate function identifiers defined in:
+ <include/dt-bindings/pinctrl/rzn1-pinctrl.h>
+ These identifiers collapse the IO Multiplex Configuration Level 1 and
+ Level 2 numbers that are detailed in the hardware reference manual into a
+ single number. The identifiers for Level 2 are simply offset by 10.
+ Additional identifiers are provided to specify the MDIO source peripheral.
+
+ Optional generic pinconf properties:
+ - bias-disable - disable any pin bias
+ - bias-pull-up - pull up the pin with 50 KOhm
+ - bias-pull-down - pull down the pin with 50 KOhm
+ - bias-high-impedance - high impedance mode
+ - drive-strength - sink or source at most 4, 6, 8 or 12 mA
+
+ Example:
+ A serial communication interface with a TX output pin and an RX input pin.
+
+ &pinctrl {
+ pins_uart0: pins_uart0 {
+ pinmux = <
+ RZN1_PINMUX(103, RZN1_FUNC_UART0_I) /* UART0_TXD */
+ RZN1_PINMUX(104, RZN1_FUNC_UART0_I) /* UART0_RXD */
+ >;
+ };
+ };
+
+ Example 2:
+ Here we set the pull up on the RXD pin of the UART.
+
+ &pinctrl {
+ pins_uart0: pins_uart0 {
+ pinmux = <RZN1_PINMUX(103, RZN1_FUNC_UART0_I)>; /* TXD */
+
+ pins_uart6_rx {
+ pinmux = <RZN1_PINMUX(104, RZN1_FUNC_UART0_I)>; /* RXD */
+ bias-pull-up;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/reset/qcom,pon.txt b/Documentation/devicetree/bindings/power/reset/qcom,pon.txt
index 651491bb63b7..5705f575862d 100644
--- a/Documentation/devicetree/bindings/power/reset/qcom,pon.txt
+++ b/Documentation/devicetree/bindings/power/reset/qcom,pon.txt
@@ -6,7 +6,10 @@ and resin along with the Android reboot-mode.
This DT node has pwrkey and resin as sub nodes.
Required Properties:
--compatible: "qcom,pm8916-pon"
+-compatible: Must be one of:
+ "qcom,pm8916-pon"
+ "qcom,pms405-pon"
+
-reg: Specifies the physical address of the pon register
Optional subnode:
diff --git a/Documentation/devicetree/bindings/power/supply/bq25890.txt b/Documentation/devicetree/bindings/power/supply/bq25890.txt
index c9dd17d142ad..dc0568933359 100644
--- a/Documentation/devicetree/bindings/power/supply/bq25890.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq25890.txt
@@ -1,5 +1,8 @@
Binding for TI bq25890 Li-Ion Charger
+This driver will support the bq25896 and the bq25890. There are other ICs
+in the same family but those have not been tested.
+
Required properties:
- compatible: Should contain one of the following:
* "ti,bq25890"
diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
index 37994fdb18ca..4fa8e08df2b6 100644
--- a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
@@ -23,6 +23,7 @@ Required properties:
* "ti,bq27546" - BQ27546
* "ti,bq27742" - BQ27742
* "ti,bq27545" - BQ27545
+ * "ti,bq27411" - BQ27411
* "ti,bq27421" - BQ27421
* "ti,bq27425" - BQ27425
* "ti,bq27426" - BQ27426
diff --git a/Documentation/devicetree/bindings/power/supply/sc2731_charger.txt b/Documentation/devicetree/bindings/power/supply/sc2731_charger.txt
new file mode 100644
index 000000000000..5266fab16575
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/sc2731_charger.txt
@@ -0,0 +1,40 @@
+Spreadtrum SC2731 PMIC battery charger binding
+
+Required properties:
+ - compatible: Should be "sprd,sc2731-charger".
+ - reg: Address offset of charger register.
+ - phys: Contains a phandle to the USB phy.
+
+Optional Properties:
+- monitored-battery: phandle of battery characteristics devicetree node.
+ The charger uses the following battery properties:
+- charge-term-current-microamp: current for charge termination phase.
+- constant-charge-voltage-max-microvolt: maximum constant input voltage.
+ See Documentation/devicetree/bindings/power/supply/battery.txt
+
+Example:
+
+ bat: battery {
+ compatible = "simple-battery";
+ charge-term-current-microamp = <120000>;
+ constant-charge-voltage-max-microvolt = <4350000>;
+ ......
+ };
+
+ sc2731_pmic: pmic@0 {
+ compatible = "sprd,sc2731";
+ reg = <0>;
+ spi-max-frequency = <26000000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ charger@0 {
+ compatible = "sprd,sc2731-charger";
+ reg = <0x0>;
+ phys = <&ssphy>;
+ monitored-battery = <&bat>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
index c7610718adff..f9be1acf891c 100644
--- a/Documentation/devicetree/bindings/regulator/pfuze100.txt
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt
@@ -12,6 +12,11 @@ Optional properties:
disabled. This binding is a workaround to keep backward compatibility with
old dtb's which rely on the fact that the switched regulators are always on
and don't mark them explicit as "regulator-always-on".
+- fsl,pmic-stby-poweroff: if present, configure the PMIC to shutdown all
+ power rails when PMIC_STBY_REQ line is asserted during the power off sequence.
+ Use this option if the SoC should be powered off by external power
+ management IC (PMIC) on PMIC_STBY_REQ signal.
+ As opposite to PMIC_STBY_REQ boards can implement PMIC_ON_REQ signal.
Required child node:
- regulators: This is the list of child nodes that specify the regulator
diff --git a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt
index 58a1d97972f5..45025b5b67f6 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.txt
@@ -26,6 +26,7 @@ Regulator nodes are identified by their compatible:
"qcom,rpm-pm8998-regulators"
"qcom,rpm-pma8084-regulators"
"qcom,rpm-pmi8998-regulators"
+ "qcom,rpm-pms405-regulators"
- vdd_s1-supply:
- vdd_s2-supply:
@@ -188,6 +189,24 @@ Regulator nodes are identified by their compatible:
Definition: reference to regulator supplying the input pin, as
described in the data sheet
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_s4-supply:
+- vdd_s5-supply:
+- vdd_l1_l2-supply:
+- vdd_l3_l8-supply:
+- vdd_l4-supply:
+- vdd_l5_l6-supply:
+- vdd_l7-supply:
+- vdd_l3_l8-supply:
+- vdd_l9-supply:
+- vdd_l10_l11_l12_l13-supply:
+ Usage: optional (pms405 only)
+ Value type: <phandle>
+ Definition: reference to regulator supplying the input pin, as
+ described in the data sheet
+
The regulator node houses sub-nodes for each regulator within the device. Each
sub-node is identified using the node's name, with valid values listed for each
of the pmics below.
@@ -222,6 +241,10 @@ pma8084:
pmi8998:
bob
+pms405:
+ s1, s2, s3, s4, s5, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
+ l13
+
The content of each sub-node is defined by the standard binding for regulators -
see regulator.txt.
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
index 76ead07072b1..4b98ca26e61a 100644
--- a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt
@@ -1,7 +1,9 @@
-ROHM BD71837 Power Management Integrated Circuit (PMIC) regulator bindings
+ROHM BD71837 and BD71847 Power Management Integrated Circuit regulator bindings
Required properties:
- - regulator-name: should be "buck1", ..., "buck8" and "ldo1", ..., "ldo7"
+ - regulator-name: should be "buck1", ..., "buck8" and "ldo1", ..., "ldo7" for
+ BD71837. For BD71847 names should be "buck1", ..., "buck6"
+ and "ldo1", ..., "ldo6"
List of regulators provided by this controller. BD71837 regulators node
should be sub node of the BD71837 MFD node. See BD71837 MFD bindings at
@@ -16,10 +18,14 @@ disabled by driver at startup. LDO5 and LDO6 are supplied by those and
if they are disabled at startup the voltage monitoring for LDO5/LDO6 will
cause PMIC to reset.
-The valid names for regulator nodes are:
+The valid names for BD71837 regulator nodes are:
BUCK1, BUCK2, BUCK3, BUCK4, BUCK5, BUCK6, BUCK7, BUCK8
LDO1, LDO2, LDO3, LDO4, LDO5, LDO6, LDO7
+The valid names for BD71847 regulator nodes are:
+BUCK1, BUCK2, BUCK3, BUCK4, BUCK5, BUCK6
+LDO1, LDO2, LDO3, LDO4, LDO5, LDO6
+
Optional properties:
- Any optional property defined in bindings/regulator/regulator.txt
diff --git a/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt b/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt
new file mode 100644
index 000000000000..a3f476240565
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt
@@ -0,0 +1,68 @@
+STMicroelectronics STPMIC1 Voltage regulators
+
+Regulator Nodes are optional depending on needs.
+
+Available Regulators in STPMIC1 device are:
+ - buck1 for Buck BUCK1
+ - buck2 for Buck BUCK2
+ - buck3 for Buck BUCK3
+ - buck4 for Buck BUCK4
+ - ldo1 for LDO LDO1
+ - ldo2 for LDO LDO2
+ - ldo3 for LDO LDO3
+ - ldo4 for LDO LDO4
+ - ldo5 for LDO LDO5
+ - ldo6 for LDO LDO6
+ - vref_ddr for LDO Vref DDR
+ - boost for Buck BOOST
+ - pwr_sw1 for VBUS_OTG switch
+ - pwr_sw2 for SW_OUT switch
+
+Switches are fixed voltage regulators with only enable/disable capability.
+
+Optional properties:
+- st,mask-reset: mask reset for this regulator: the regulator configuration
+ is maintained during pmic reset.
+- regulator-pull-down: enable high pull down
+ if not specified light pull down is used
+- regulator-over-current-protection:
+ if set, all regulators are switched off in case of over-current detection
+ on this regulator,
+ if not set, the driver only sends an over-current event.
+- interrupt-parent: phandle to the parent interrupt controller
+- interrupts: index of current limit detection interrupt
+- <regulator>-supply: phandle to the parent supply/regulator node
+ each regulator supply can be described except vref_ddr.
+
+Example:
+regulators {
+ compatible = "st,stpmic1-regulators";
+
+ ldo6-supply = <&v3v3>;
+
+ vdd_core: buck1 {
+ regulator-name = "vdd_core";
+ interrupts = <IT_CURLIM_BUCK1 0>;
+ interrupt-parent = <&pmic>;
+ st,mask-reset;
+ regulator-pull-down;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ v3v3: buck4 {
+ regulator-name = "v3v3";
+ interrupts = <IT_CURLIM_BUCK4 0>;
+ interrupt-parent = <&mypmic>;
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ v1v8: ldo6 {
+ regulator-name = "v1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-over-current-protection;
+ };
+};
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt
index 03c741602c6d..6d2dd8a31482 100644
--- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt
@@ -98,6 +98,12 @@ The property below is dependent on fsl,tdm-interface:
usage: optional for tdm interface
value type: <empty>
Definition : Internal loopback connecting on TDM layer.
+- fsl,hmask
+ usage: optional
+ Value type: <u16>
+ Definition: HDLC address recognition. Set to zero to disable
+ address filtering of packets:
+ fsl,hmask = /bits/ 16 <0x0000>;
Example for tdm interface:
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
index ff92e5a41bed..dab7ca9f250c 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
@@ -53,20 +53,8 @@ Required properties:
- clocks: Serial engine core clock needed by the device.
Qualcomm Technologies Inc. GENI Serial Engine based SPI Controller
-
-Required properties:
-- compatible: Must contain "qcom,geni-spi".
-- reg: Must contain SPI register location and length.
-- interrupts: Must contain SPI controller interrupts.
-- clock-names: Must contain "se".
-- clocks: Serial engine core clock needed by the device.
-- spi-max-frequency: Specifies maximum SPI clock frequency, units - Hz.
-- #address-cells: Must be <1> to define a chip select address on
- the SPI bus.
-- #size-cells: Must be <0>.
-
-SPI slave nodes must be children of the SPI master node and conform to SPI bus
-binding as described in Documentation/devicetree/bindings/spi/spi-bus.txt.
+node binding is described in
+Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt.
Example:
geniqup@8c0000 {
@@ -103,17 +91,4 @@ Example:
pinctrl-1 = <&qup_1_uart_3_sleep>;
};
- spi0: spi@a84000 {
- compatible = "qcom,geni-spi";
- reg = <0xa84000 0x4000>;
- interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "se";
- clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&qup_1_spi_2_active>;
- pinctrl-1 = <&qup_1_spi_2_sleep>;
- spi-max-frequency = <19200000>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
}
diff --git a/Documentation/devicetree/bindings/sound/adi,adau1977.txt b/Documentation/devicetree/bindings/sound/adi,adau1977.txt
new file mode 100644
index 000000000000..e79aeef73f28
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/adi,adau1977.txt
@@ -0,0 +1,54 @@
+Analog Devices ADAU1977/ADAU1978/ADAU1979
+
+Datasheets:
+http://www.analog.com/media/en/technical-documentation/data-sheets/ADAU1977.pdf
+http://www.analog.com/media/en/technical-documentation/data-sheets/ADAU1978.pdf
+http://www.analog.com/media/en/technical-documentation/data-sheets/ADAU1979.pdf
+
+This driver supports both the I2C and SPI bus.
+
+Required properties:
+ - compatible: Should contain one of the following:
+ "adi,adau1977"
+ "adi,adau1978"
+ "adi,adau1979"
+
+ - AVDD-supply: analog power supply for the device, please consult
+ Documentation/devicetree/bindings/regulator/regulator.txt
+
+Optional properties:
+ - reset-gpio: the reset pin for the chip, for more details consult
+ Documentation/devicetree/bindings/gpio/gpio.txt
+
+ - DVDD-supply: supply voltage for the digital core, please consult
+ Documentation/devicetree/bindings/regulator/regulator.txt
+
+For required properties on SPI, please consult
+Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Required properties on I2C:
+
+ - reg: The i2c address. Value depends on the state of ADDR0
+ and ADDR1, as wired in hardware.
+
+Examples:
+
+ adau1977_spi: adau1977@0 {
+ compatible = "adi,adau1977";
+ spi-max-frequency = <600000>;
+
+ AVDD-supply = <&regulator>;
+ DVDD-supply = <&regulator_digital>;
+
+ reset_gpio = <&gpio 10 GPIO_ACTIVE_LOW>;
+ };
+
+ adau1977_i2c: adau1977@11 {
+ compatible = "adi,adau1977";
+ reg = <0x11>;
+
+ AVDD-supply = <&regulator>;
+ DVDD-supply = <&regulator_digital>;
+
+ reset_gpio = <&gpio 10 GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt
new file mode 100644
index 000000000000..5672d0bc5b16
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt
@@ -0,0 +1,24 @@
+* Amlogic Audio PDM input
+
+Required properties:
+- compatible: 'amlogic,axg-pdm'
+- reg: physical base address of the controller and length of memory
+ mapped region.
+- clocks: list of clock phandle, one for each entry clock-names.
+- clock-names: should contain the following:
+ * "pclk" : peripheral clock.
+ * "dclk" : pdm digital clock
+ * "sysclk" : dsp system clock
+- #sound-dai-cells: must be 0.
+
+Example of PDM on the A113 SoC:
+
+pdm: audio-controller@ff632000 {
+ compatible = "amlogic,axg-pdm";
+ reg = <0x0 0xff632000 0x0 0x34>;
+ #sound-dai-cells = <0>;
+ clocks = <&clkc_audio AUD_CLKID_PDM>,
+ <&clkc_audio AUD_CLKID_PDM_DCLK>,
+ <&clkc_audio AUD_CLKID_PDM_SYSCLK>;
+ clock-names = "pclk", "dclk", "sysclk";
+};
diff --git a/Documentation/devicetree/bindings/sound/cs42l51.txt b/Documentation/devicetree/bindings/sound/cs42l51.txt
new file mode 100644
index 000000000000..4b5de33ce377
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs42l51.txt
@@ -0,0 +1,17 @@
+CS42L51 audio CODEC
+
+Optional properties:
+
+ - clocks : a list of phandles + clock-specifiers, one for each entry in
+ clock-names
+
+ - clock-names : must contain "MCLK"
+
+Example:
+
+cs42l51: cs42l51@4a {
+ compatible = "cirrus,cs42l51";
+ reg = <0x4a>;
+ clocks = <&mclk_prov>;
+ clock-names = "MCLK";
+};
diff --git a/Documentation/devicetree/bindings/sound/maxim,max98088.txt b/Documentation/devicetree/bindings/sound/maxim,max98088.txt
new file mode 100644
index 000000000000..da764d913319
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/maxim,max98088.txt
@@ -0,0 +1,23 @@
+MAX98088 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible: "maxim,max98088" or "maxim,max98089".
+- reg: The I2C address of the device.
+
+Optional properties:
+
+- clocks: the clock provider of MCLK, see ../clock/clock-bindings.txt section
+ "consumer" for more information.
+- clock-names: must be set to "mclk"
+
+Example:
+
+max98089: codec@10 {
+ compatible = "maxim,max98089";
+ reg = <0x10>;
+ clocks = <&clks IMX6QDL_CLK_CKO2>;
+ clock-names = "mclk";
+};
diff --git a/Documentation/devicetree/bindings/sound/mikroe,mikroe-proto.txt b/Documentation/devicetree/bindings/sound/mikroe,mikroe-proto.txt
new file mode 100644
index 000000000000..912f8fae11c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mikroe,mikroe-proto.txt
@@ -0,0 +1,23 @@
+Mikroe-PROTO audio board
+
+Required properties:
+ - compatible: "mikroe,mikroe-proto"
+ - dai-format: Must be "i2s".
+ - i2s-controller: The phandle of the I2S controller.
+ - audio-codec: The phandle of the WM8731 audio codec.
+Optional properties:
+ - model: The user-visible name of this sound complex.
+ - bitclock-master: Indicates dai-link bit clock master; for details see simple-card.txt (1).
+ - frame-master: Indicates dai-link frame master; for details see simple-card.txt (1).
+
+(1) : There must be the same master for both bit and frame clocks.
+
+Example:
+ sound {
+ compatible = "mikroe,mikroe-proto";
+ model = "wm8731 @ sama5d2_xplained";
+ i2s-controller = <&i2s0>;
+ audio-codec = <&wm8731>;
+ dai-format = "i2s";
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/nau8822.txt b/Documentation/devicetree/bindings/sound/nau8822.txt
new file mode 100644
index 000000000000..a471d162d4e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nau8822.txt
@@ -0,0 +1,16 @@
+NAU8822 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+ - compatible : "nuvoton,nau8822"
+
+ - reg : the I2C address of the device.
+
+Example:
+
+codec: nau8822@1a {
+ compatible = "nuvoton,nau8822";
+ reg = <0x1a>;
+};
diff --git a/Documentation/devicetree/bindings/sound/pcm3060.txt b/Documentation/devicetree/bindings/sound/pcm3060.txt
new file mode 100644
index 000000000000..90fcb8523099
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/pcm3060.txt
@@ -0,0 +1,17 @@
+PCM3060 audio CODEC
+
+This driver supports both I2C and SPI.
+
+Required properties:
+
+- compatible: "ti,pcm3060"
+
+- reg : the I2C address of the device for I2C, the chip select
+ number for SPI.
+
+Examples:
+
+ pcm3060: pcm3060@46 {
+ compatible = "ti,pcm3060";
+ reg = <0x46>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6afe.txt b/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
index a8179409c194..d74888b9f1bb 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
@@ -49,7 +49,7 @@ configuration of each dai. Must contain the following properties.
Usage: required for mi2s interface
Value type: <prop-encoded-array>
Definition: Must be list of serial data lines used by this dai.
- should be one or more of the 1-4 sd lines.
+ should be one or more of the 0-3 sd lines.
- qcom,tdm-sync-mode:
Usage: required for tdm interface
@@ -137,42 +137,42 @@ q6afe@4 {
prim-mi2s-rx@16 {
reg = <16>;
- qcom,sd-lines = <1 3>;
+ qcom,sd-lines = <0 2>;
};
prim-mi2s-tx@17 {
reg = <17>;
- qcom,sd-lines = <2>;
+ qcom,sd-lines = <1>;
};
sec-mi2s-rx@18 {
reg = <18>;
- qcom,sd-lines = <1 4>;
+ qcom,sd-lines = <0 3>;
};
sec-mi2s-tx@19 {
reg = <19>;
- qcom,sd-lines = <2>;
+ qcom,sd-lines = <1>;
};
tert-mi2s-rx@20 {
reg = <20>;
- qcom,sd-lines = <2 4>;
+ qcom,sd-lines = <1 3>;
};
tert-mi2s-tx@21 {
reg = <21>;
- qcom,sd-lines = <1>;
+ qcom,sd-lines = <0>;
};
quat-mi2s-rx@22 {
reg = <22>;
- qcom,sd-lines = <1>;
+ qcom,sd-lines = <0>;
};
quat-mi2s-tx@23 {
reg = <23>;
- qcom,sd-lines = <2>;
+ qcom,sd-lines = <1>;
};
};
};
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index 9e764270c36b..d92b705e7917 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -340,10 +340,12 @@ Required properties:
- compatible : "renesas,rcar_sound-<soctype>", fallbacks
"renesas,rcar_sound-gen1" if generation1, and
"renesas,rcar_sound-gen2" if generation2 (or RZ/G1)
- "renesas,rcar_sound-gen3" if generation3
+ "renesas,rcar_sound-gen3" if generation3 (or RZ/G2)
Examples with soctypes are:
- "renesas,rcar_sound-r8a7743" (RZ/G1M)
+ - "renesas,rcar_sound-r8a7744" (RZ/G1N)
- "renesas,rcar_sound-r8a7745" (RZ/G1E)
+ - "renesas,rcar_sound-r8a774a1" (RZ/G2M)
- "renesas,rcar_sound-r8a7778" (R-Car M1A)
- "renesas,rcar_sound-r8a7779" (R-Car H1)
- "renesas,rcar_sound-r8a7790" (R-Car H2)
@@ -353,6 +355,7 @@ Required properties:
- "renesas,rcar_sound-r8a7795" (R-Car H3)
- "renesas,rcar_sound-r8a7796" (R-Car M3-W)
- "renesas,rcar_sound-r8a77965" (R-Car M3-N)
+ - "renesas,rcar_sound-r8a77990" (R-Car E3)
- reg : Should contain the register physical address.
required register is
SRU/ADG/SSI if generation1
diff --git a/Documentation/devicetree/bindings/sound/st,sta32x.txt b/Documentation/devicetree/bindings/sound/st,sta32x.txt
index 255de3ae5b2f..52265fb757c5 100644
--- a/Documentation/devicetree/bindings/sound/st,sta32x.txt
+++ b/Documentation/devicetree/bindings/sound/st,sta32x.txt
@@ -19,6 +19,10 @@ Required properties:
Optional properties:
+ - clocks, clock-names: Clock specifier for XTI input clock.
+ If specified, the clock will be enabled when the codec is probed,
+ and disabled when it is removed. The 'clock-names' must be set to 'xti'.
+
- st,output-conf: number, Selects the output configuration:
0: 2-channel (full-bridge) power, 2-channel data-out
1: 2 (half-bridge). 1 (full-bridge) on-board power
@@ -39,6 +43,9 @@ Optional properties:
- st,thermal-warning-recover:
If present, thermal warning recovery is enabled.
+ - st,fault-detect-recovery:
+ If present, fault detect recovery is enabled.
+
- st,thermal-warning-adjustment:
If present, thermal warning adjustment is enabled.
@@ -76,6 +83,8 @@ Example:
codec: sta32x@38 {
compatible = "st,sta32x";
reg = <0x1c>;
+ clocks = <&clock>;
+ clock-names = "xti";
reset-gpios = <&gpio1 19 0>;
power-down-gpios = <&gpio1 16 0>;
st,output-conf = /bits/ 8 <0x3>; // set output to 2-channel
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
index 3a3fc506e43a..3f4467ff0aa2 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
@@ -31,7 +31,11 @@ SAI subnodes required properties:
- reg: Base address and size of SAI sub-block register set.
- clocks: Must contain one phandle and clock specifier pair
for sai_ck which feeds the internal clock generator.
+ If the SAI shares a master clock, with another SAI set as MCLK
+ clock provider, SAI provider phandle must be specified here.
- clock-names: Must contain "sai_ck".
+ Must also contain "MCLK", if SAI shares a master clock,
+ with a SAI set as MCLK clock provider.
- dmas: see Documentation/devicetree/bindings/dma/stm32-dma.txt
- dma-names: identifier string for each DMA request line
"tx": if sai sub-block is configured as playback DAI
@@ -51,6 +55,9 @@ SAI subnodes Optional properties:
configured according to protocol defined in related DAI link node,
such as i2s, left justified, right justified, dsp and pdm protocols.
Note: ac97 protocol is not supported by SAI driver
+ - #clock-cells: should be 0. This property must be present if the SAI device
+ is a master clock provider, according to clocks bindings, described in
+ Documentation/devicetree/bindings/clock/clock-bindings.txt.
The device node should contain one 'port' child node with one child 'endpoint'
node, according to the bindings defined in Documentation/devicetree/bindings/
diff --git a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
index b9d50d6cdef3..61e71c1729e0 100644
--- a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
@@ -10,6 +10,7 @@ Required properties:
- "allwinner,sun6i-a31-i2s"
- "allwinner,sun8i-a83t-i2s"
- "allwinner,sun8i-h3-i2s"
+ - "allwinner,sun50i-a64-codec-i2s"
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: should contain the I2S interrupt.
@@ -26,6 +27,7 @@ Required properties for the following compatibles:
- "allwinner,sun6i-a31-i2s"
- "allwinner,sun8i-a83t-i2s"
- "allwinner,sun8i-h3-i2s"
+ - "allwinner,sun50i-a64-codec-i2s"
- resets: phandle to the reset line for this codec
Example:
diff --git a/Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt b/Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt
new file mode 100644
index 000000000000..4f8ad0e04d20
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt
@@ -0,0 +1,12 @@
+* Allwinner A64 Codec Analog Controls
+
+Required properties:
+- compatible: must be one of the following compatibles:
+ - "allwinner,sun50i-a64-codec-analog"
+- reg: must contain the registers location and length
+
+Example:
+ codec_analog: codec-analog@1f015c0 {
+ compatible = "allwinner,sun50i-a64-codec-analog";
+ reg = <0x01f015c0 0x4>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/ts3a227e.txt b/Documentation/devicetree/bindings/sound/ts3a227e.txt
index 3ed8359144d3..21ab45bc7e8f 100644
--- a/Documentation/devicetree/bindings/sound/ts3a227e.txt
+++ b/Documentation/devicetree/bindings/sound/ts3a227e.txt
@@ -14,7 +14,7 @@ Required properties:
Optional properies:
- ti,micbias: Intended MICBIAS voltage (datasheet section 9.6.7).
- Select 0/1/2/3/4/5/6/7 to specify MACBIAS voltage
+ Select 0/1/2/3/4/5/6/7 to specify MICBIAS voltage
2.1V/2.2V/2.3V/2.4V/2.5V/2.6V/2.7V/2.8V
Default value is "1" (2.2V).
diff --git a/Documentation/devicetree/bindings/sound/wm8782.txt b/Documentation/devicetree/bindings/sound/wm8782.txt
new file mode 100644
index 000000000000..256cdec6ec4d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/wm8782.txt
@@ -0,0 +1,17 @@
+WM8782 stereo ADC
+
+This device does not have any control interface or reset pins.
+
+Required properties:
+
+ - compatible : "wlf,wm8782"
+ - Vdda-supply : phandle to a regulator for the analog power supply (2.7V - 5.5V)
+ - Vdd-supply : phandle to a regulator for the digital power supply (2.7V - 3.6V)
+
+Example:
+
+wm8782: stereo-adc {
+ compatible = "wlf,wm8782";
+ Vdda-supply = <&vdda_supply>;
+ Vdd-supply = <&vdd_supply>;
+};
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
new file mode 100644
index 000000000000..790311a42bf1
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
@@ -0,0 +1,39 @@
+GENI based Qualcomm Universal Peripheral (QUP) Serial Peripheral Interface (SPI)
+
+The QUP v3 core is a GENI based AHB slave that provides a common data path
+(an output FIFO and an input FIFO) for serial peripheral interface (SPI)
+mini-core.
+
+SPI in master mode supports up to 50MHz, up to four chip selects, programmable
+data path from 4 bits to 32 bits and numerous protocol variants.
+
+Required properties:
+- compatible: Must contain "qcom,geni-spi".
+- reg: Must contain SPI register location and length.
+- interrupts: Must contain SPI controller interrupts.
+- clock-names: Must contain "se".
+- clocks: Serial engine core clock needed by the device.
+- #address-cells: Must be <1> to define a chip select address on
+ the SPI bus.
+- #size-cells: Must be <0>.
+
+SPI Controller nodes must be child of GENI based Qualcomm Universal
+Peripharal. Please refer GENI based QUP wrapper controller node bindings
+described in Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt.
+
+SPI slave nodes must be children of the SPI master node and conform to SPI bus
+binding as described in Documentation/devicetree/bindings/spi/spi-bus.txt.
+
+Example:
+ spi0: spi@a84000 {
+ compatible = "qcom,geni-spi";
+ reg = <0xa84000 0x4000>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "se";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qup_1_spi_2_active>;
+ pinctrl-1 = <&qup_1_spi_2_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.txt b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.txt
new file mode 100644
index 000000000000..1d64b61f5171
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.txt
@@ -0,0 +1,36 @@
+Qualcomm Quad Serial Peripheral Interface (QSPI)
+
+The QSPI controller allows SPI protocol communication in single, dual, or quad
+wire transmission modes for read/write access to slaves such as NOR flash.
+
+Required properties:
+- compatible: An SoC specific identifier followed by "qcom,qspi-v1", such as
+ "qcom,sdm845-qspi", "qcom,qspi-v1"
+- reg: Should contain the base register location and length.
+- interrupts: Interrupt number used by the controller.
+- clocks: Should contain the core and AHB clock.
+- clock-names: Should be "core" for core clock and "iface" for AHB clock.
+
+SPI slave nodes must be children of the SPI master node and can contain
+properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+
+ qspi: spi@88df000 {
+ compatible = "qcom,sdm845-qspi", "qcom,qspi-v1";
+ reg = <0x88df000 0x600>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "iface", "core";
+ clocks = <&gcc GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <&gcc GCC_QSPI_CORE_CLK>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+ spi-tx-bus-width = <2>;
+ spi-rx-bus-width = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index bfbc2035fb6b..4b836ad17b19 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -2,7 +2,9 @@ Renesas MSIOF spi controller
Required properties:
- compatible : "renesas,msiof-r8a7743" (RZ/G1M)
+ "renesas,msiof-r8a7744" (RZ/G1N)
"renesas,msiof-r8a7745" (RZ/G1E)
+ "renesas,msiof-r8a774a1" (RZ/G2M)
"renesas,msiof-r8a7790" (R-Car H2)
"renesas,msiof-r8a7791" (R-Car M2-W)
"renesas,msiof-r8a7792" (R-Car V2H)
@@ -11,10 +13,14 @@ Required properties:
"renesas,msiof-r8a7795" (R-Car H3)
"renesas,msiof-r8a7796" (R-Car M3-W)
"renesas,msiof-r8a77965" (R-Car M3-N)
+ "renesas,msiof-r8a77970" (R-Car V3M)
+ "renesas,msiof-r8a77980" (R-Car V3H)
+ "renesas,msiof-r8a77990" (R-Car E3)
+ "renesas,msiof-r8a77995" (R-Car D3)
"renesas,msiof-sh73a0" (SH-Mobile AG5)
"renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
"renesas,rcar-gen2-msiof" (generic R-Car Gen2 and RZ/G1 compatible device)
- "renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device)
+ "renesas,rcar-gen3-msiof" (generic R-Car Gen3 and RZ/G2 compatible device)
"renesas,sh-msiof" (deprecated)
When compatible with the generic version, nodes
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
index 642d3fb1ef85..2864bc6b659c 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
@@ -2,7 +2,7 @@ Synopsys DesignWare AMBA 2.0 Synchronous Serial Interface.
Required properties:
- compatible : "snps,dw-apb-ssi" or "mscc,<soc>-spi", where soc is "ocelot" or
- "jaguar2"
+ "jaguar2", or "amazon,alpine-dw-apb-ssi"
- reg : The register base for the controller. For "mscc,<soc>-spi", a second
register set is required (named ICPU_CFG:SPI_MST)
- interrupts : One interrupt, used by the controller.
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
index 4af132606b37..8d178a4503cf 100644
--- a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
@@ -3,6 +3,7 @@
Required properties:
- compatible :
- "fsl,imx7ulp-spi" for LPSPI compatible with the one integrated on i.MX7ULP soc
+ - "fsl,imx8qxp-spi" for LPSPI compatible with the one integrated on i.MX8QXP soc
- reg : address and length of the lpspi master registers
- interrupts : lpspi interrupt
- clocks : lpspi clock specifier
diff --git a/Documentation/devicetree/bindings/spi/spi-pxa2xx.txt b/Documentation/devicetree/bindings/spi/spi-pxa2xx.txt
new file mode 100644
index 000000000000..0335a9bd2e8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-pxa2xx.txt
@@ -0,0 +1,24 @@
+PXA2xx SSP SPI Controller
+
+Required properties:
+- compatible: Must be "marvell,mmp2-ssp".
+- reg: Offset and length of the device's register set.
+- interrupts: Should be the interrupt number.
+- clocks: Should contain a single entry describing the clock input.
+- #address-cells: Number of cells required to define a chip select address.
+- #size-cells: Should be zero.
+
+Optional properties:
+- cs-gpios: list of GPIO chip selects. See the SPI bus bindings,
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Child nodes represent devices on the SPI bus
+ See ../spi/spi-bus.txt
+
+Example:
+ ssp1: spi@d4035000 {
+ compatible = "marvell,mmp2-ssp";
+ reg = <0xd4035000 0x1000>;
+ clocks = <&soc_clocks MMP2_CLK_SSP0>;
+ interrupts = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/spi-rspi.txt b/Documentation/devicetree/bindings/spi/spi-rspi.txt
index 96fd58548f69..fc97ad64fbf2 100644
--- a/Documentation/devicetree/bindings/spi/spi-rspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-rspi.txt
@@ -3,7 +3,7 @@ Device tree configuration for Renesas RSPI/QSPI driver
Required properties:
- compatible : For Renesas Serial Peripheral Interface on legacy SH:
"renesas,rspi-<soctype>", "renesas,rspi" as fallback.
- For Renesas Serial Peripheral Interface on RZ/A1H:
+ For Renesas Serial Peripheral Interface on RZ/A:
"renesas,rspi-<soctype>", "renesas,rspi-rz" as fallback.
For Quad Serial Peripheral Interface on R-Car Gen2 and
RZ/G1 devices:
@@ -11,7 +11,9 @@ Required properties:
Examples with soctypes are:
- "renesas,rspi-sh7757" (SH)
- "renesas,rspi-r7s72100" (RZ/A1H)
+ - "renesas,rspi-r7s9210" (RZ/A2)
- "renesas,qspi-r8a7743" (RZ/G1M)
+ - "renesas,qspi-r8a7744" (RZ/G1N)
- "renesas,qspi-r8a7745" (RZ/G1E)
- "renesas,qspi-r8a7790" (R-Car H2)
- "renesas,qspi-r8a7791" (R-Car M2-W)
diff --git a/Documentation/devicetree/bindings/spi/spi-slave-mt27xx.txt b/Documentation/devicetree/bindings/spi/spi-slave-mt27xx.txt
new file mode 100644
index 000000000000..c37e5a179b21
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-slave-mt27xx.txt
@@ -0,0 +1,32 @@
+Binding for MTK SPI Slave controller
+
+Required properties:
+- compatible: should be one of the following.
+ - mediatek,mt2712-spi-slave: for mt2712 platforms
+- reg: Address and length of the register set for the device.
+- interrupts: Should contain spi interrupt.
+- clocks: phandles to input clocks.
+ It's clock gate, and should be <&infracfg CLK_INFRA_AO_SPI1>.
+- clock-names: should be "spi" for the clock gate.
+
+Optional properties:
+- assigned-clocks: it's mux clock, should be <&topckgen CLK_TOP_SPISLV_SEL>.
+- assigned-clock-parents: parent of mux clock.
+ It's PLL, and should be one of the following.
+ - <&topckgen CLK_TOP_UNIVPLL1_D2>: specify parent clock 312MHZ.
+ It's the default one.
+ - <&topckgen CLK_TOP_UNIVPLL1_D4>: specify parent clock 156MHZ.
+ - <&topckgen CLK_TOP_UNIVPLL2_D4>: specify parent clock 104MHZ.
+ - <&topckgen CLK_TOP_UNIVPLL1_D8>: specify parent clock 78MHZ.
+
+Example:
+- SoC Specific Portion:
+spis1: spi@10013000 {
+ compatible = "mediatek,mt2712-spi-slave";
+ reg = <0 0x10013000 0 0x100>;
+ interrupts = <GIC_SPI 283 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infracfg CLK_INFRA_AO_SPI1>;
+ clock-names = "spi";
+ assigned-clocks = <&topckgen CLK_TOP_SPISLV_SEL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL1_D2>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-sprd.txt b/Documentation/devicetree/bindings/spi/spi-sprd.txt
new file mode 100644
index 000000000000..bad211a19da4
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-sprd.txt
@@ -0,0 +1,26 @@
+Spreadtrum SPI Controller
+
+Required properties:
+- compatible: Should be "sprd,sc9860-spi".
+- reg: Offset and length of SPI controller register space.
+- interrupts: Should contain SPI interrupt.
+- clock-names: Should contain following entries:
+ "spi" for SPI clock,
+ "source" for SPI source (parent) clock,
+ "enable" for SPI module enable clock.
+- clocks: List of clock input name strings sorted in the same order
+ as the clock-names property.
+- #address-cells: The number of cells required to define a chip select
+ address on the SPI bus. Should be set to 1.
+- #size-cells: Should be set to 0.
+
+Example:
+spi0: spi@70a00000{
+ compatible = "sprd,sc9860-spi";
+ reg = <0 0x70a00000 0 0x1000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "spi", "source","enable";
+ clocks = <&clk_spi0>, <&ext_26m>, <&clk_ap_apb_gates 5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-stm32-qspi.txt b/Documentation/devicetree/bindings/spi/spi-stm32-qspi.txt
new file mode 100644
index 000000000000..adeeb63e84b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-stm32-qspi.txt
@@ -0,0 +1,44 @@
+* STMicroelectronics Quad Serial Peripheral Interface(QSPI)
+
+Required properties:
+- compatible: should be "st,stm32f469-qspi"
+- reg: the first contains the register location and length.
+ the second contains the memory mapping address and length
+- reg-names: should contain the reg names "qspi" "qspi_mm"
+- interrupts: should contain the interrupt for the device
+- clocks: the phandle of the clock needed by the QSPI controller
+- A pinctrl must be defined to set pins in mode of operation for QSPI transfer
+
+Optional properties:
+- resets: must contain the phandle to the reset controller.
+
+A spi flash (NOR/NAND) must be a child of spi node and could have some
+properties. Also see jedec,spi-nor.txt.
+
+Required properties:
+- reg: chip-Select number (QSPI controller may connect 2 flashes)
+- spi-max-frequency: max frequency of spi bus
+
+Optional property:
+- spi-rx-bus-width: see ./spi-bus.txt for the description
+
+Example:
+
+qspi: spi@a0001000 {
+ compatible = "st,stm32f469-qspi";
+ reg = <0xa0001000 0x1000>, <0x90000000 0x10000000>;
+ reg-names = "qspi", "qspi_mm";
+ interrupts = <91>;
+ resets = <&rcc STM32F4_AHB3_RESET(QSPI)>;
+ clocks = <&rcc 0 STM32F4_AHB3_CLOCK(QSPI)>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_qspi0>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>;
+ ...
+ };
+};
diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
index b40add2d9bb4..49948fcc0631 100644
--- a/Documentation/devicetree/bindings/timer/renesas,cmt.txt
+++ b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
@@ -34,6 +34,10 @@ Required Properties:
- "renesas,r8a7793-cmt1" for the 48-bit CMT1 device included in r8a7793.
- "renesas,r8a7794-cmt0" for the 32-bit CMT0 device included in r8a7794.
- "renesas,r8a7794-cmt1" for the 48-bit CMT1 device included in r8a7794.
+ - "renesas,r8a77970-cmt0" for the 32-bit CMT0 device included in r8a77970.
+ - "renesas,r8a77970-cmt1" for the 48-bit CMT1 device included in r8a77970.
+ - "renesas,r8a77980-cmt0" for the 32-bit CMT0 device included in r8a77980.
+ - "renesas,r8a77980-cmt1" for the 48-bit CMT1 device included in r8a77980.
- "renesas,rcar-gen2-cmt0" for 32-bit CMT0 devices included in R-Car Gen2
and RZ/G1.
@@ -41,6 +45,9 @@ Required Properties:
and RZ/G1.
These are fallbacks for r8a73a4, R-Car Gen2 and RZ/G1 entries
listed above.
+ - "renesas,rcar-gen3-cmt0" for 32-bit CMT0 devices included in R-Car Gen3.
+ - "renesas,rcar-gen3-cmt1" for 48-bit CMT1 devices included in R-Car Gen3.
+ These are fallbacks for R-Car Gen3 entries listed above.
- reg: base address and length of the registers block for the timer module.
- interrupts: interrupt-specifier for the timer, one per channel.
diff --git a/Documentation/devicetree/bindings/trivial-devices.txt b/Documentation/devicetree/bindings/trivial-devices.txt
index 763a2808a95c..69c934aec13b 100644
--- a/Documentation/devicetree/bindings/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/trivial-devices.txt
@@ -35,7 +35,6 @@ at,24c08 i2c serial eeprom (24cxx)
atmel,at97sc3204t i2c trusted platform module (TPM)
capella,cm32181 CM32181: Ambient Light Sensor
capella,cm3232 CM3232: Ambient Light Sensor
-cirrus,cs42l51 Cirrus Logic CS42L51 audio codec
dallas,ds1374 I2C, 32-Bit Binary Counter Watchdog RTC with Trickle Charger and Reset Input/Output
dallas,ds1631 High-Precision Digital Thermometer
dallas,ds1672 Dallas DS1672 Real-time Clock
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
index 2e9318151df7..529e51879fb2 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
@@ -80,6 +80,8 @@ Optional properties:
controller. It's expected that a mux state of 0 indicates device mode and a
mux state of 1 indicates host mode.
- mux-control-names: Shall be "usb_switch" if mux-controls is specified.
+- pinctrl-names: Names for optional pin modes in "default", "host", "device"
+- pinctrl-n: alternate pin modes
i.mx specific properties
- fsl,usbmisc: phandler of non-core register device, with one
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 3e4c38b806ac..636630fb92d7 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -19,6 +19,7 @@ Exception for clocks:
"cavium,octeon-7130-usb-uctl"
"qcom,dwc3"
"samsung,exynos5250-dwusb3"
+ "samsung,exynos5433-dwusb3"
"samsung,exynos7-dwusb3"
"sprd,sc9860-dwc3"
"st,stih407-dwc3"
diff --git a/Documentation/devicetree/bindings/usb/ehci-mv.txt b/Documentation/devicetree/bindings/usb/ehci-mv.txt
new file mode 100644
index 000000000000..335589895763
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ehci-mv.txt
@@ -0,0 +1,23 @@
+* Marvell PXA/MMP EHCI controller.
+
+Required properties:
+
+- compatible: must be "marvell,pxau2o-ehci"
+- reg: physical base addresses of the controller and length of memory mapped region
+- interrupts: one EHCI controller interrupt should be described here
+- clocks: phandle list of usb clocks
+- clock-names: should be "USBCLK"
+- phys: phandle for the PHY device
+- phy-names: should be "usb"
+
+Example:
+
+ ehci0: usb-ehci@d4208000 {
+ compatible = "marvell,pxau2o-ehci";
+ reg = <0xd4208000 0x200>;
+ interrupts = <44>;
+ clocks = <&soc_clocks MMP2_CLK_USB>;
+ clock-names = "USBCLK";
+ phys = <&usb_otg_phy>;
+ phy-names = "usb";
+ };
diff --git a/Documentation/devicetree/bindings/usb/exynos-usb.txt b/Documentation/devicetree/bindings/usb/exynos-usb.txt
index c97374315049..b7111f43fa59 100644
--- a/Documentation/devicetree/bindings/usb/exynos-usb.txt
+++ b/Documentation/devicetree/bindings/usb/exynos-usb.txt
@@ -83,6 +83,8 @@ Required properties:
- compatible: should be one of the following -
"samsung,exynos5250-dwusb3": for USB 3.0 DWC3 controller on
Exynos5250/5420.
+ "samsung,exynos5433-dwusb3": for USB 3.0 DWC3 controller on
+ Exynos5433.
"samsung,exynos7-dwusb3": for USB 3.0 DWC3 controller on Exynos7.
- #address-cells, #size-cells : should be '1' if the device has sub-nodes
with 'reg' property.
diff --git a/Documentation/devicetree/bindings/usb/faraday,fotg210.txt b/Documentation/devicetree/bindings/usb/faraday,fotg210.txt
new file mode 100644
index 000000000000..06a2286e2054
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/faraday,fotg210.txt
@@ -0,0 +1,35 @@
+Faraday FOTG Host controller
+
+This OTG-capable USB host controller is found in Cortina Systems
+Gemini and other SoC products.
+
+Required properties:
+- compatible: should be one of:
+ "faraday,fotg210"
+ "cortina,gemini-usb", "faraday,fotg210"
+- reg: should contain one register range i.e. start and length
+- interrupts: description of the interrupt line
+
+Optional properties:
+- clocks: should contain the IP block clock
+- clock-names: should be "PCLK" for the IP block clock
+
+Required properties for "cortina,gemini-usb" compatible:
+- syscon: a phandle to the system controller to access PHY registers
+
+Optional properties for "cortina,gemini-usb" compatible:
+- cortina,gemini-mini-b: boolean property that indicates that a Mini-B
+ OTG connector is in use
+- wakeup-source: see power/wakeup-source.txt
+
+Example for Gemini:
+
+usb@68000000 {
+ compatible = "cortina,gemini-usb", "faraday,fotg210";
+ reg = <0x68000000 0x1000>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cc 12>;
+ clock-names = "PCLK";
+ syscon = <&syscon>;
+ wakeup-source;
+};
diff --git a/Documentation/devicetree/bindings/usb/fcs,fusb302.txt b/Documentation/devicetree/bindings/usb/fcs,fusb302.txt
index 6087dc7f209e..a5d011d2efc8 100644
--- a/Documentation/devicetree/bindings/usb/fcs,fusb302.txt
+++ b/Documentation/devicetree/bindings/usb/fcs,fusb302.txt
@@ -5,10 +5,19 @@ Required properties :
- reg : I2C slave address
- interrupts : Interrupt specifier
-Optional properties :
-- fcs,operating-sink-microwatt :
- Minimum amount of power accepted from a sink
- when negotiating
+Required sub-node:
+- connector : The "usb-c-connector" attached to the FUSB302 IC. The bindings
+ of the connector node are specified in:
+
+ Documentation/devicetree/bindings/connector/usb-connector.txt
+
+Deprecated properties :
+- fcs,max-sink-microvolt : Maximum sink voltage accepted by port controller
+- fcs,max-sink-microamp : Maximum sink current accepted by port controller
+- fcs,max-sink-microwatt : Maximum sink power accepted by port controller
+- fcs,operating-sink-microwatt : Minimum amount of power accepted from a sink
+ when negotiating
+
Example:
@@ -17,7 +26,16 @@ fusb302: typec-portc@54 {
reg = <0x54>;
interrupt-parent = <&nmi_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
- fcs,max-sink-microvolt = <12000000>;
- fcs,max-sink-microamp = <3000000>;
- fcs,max-sink-microwatt = <36000000>;
+
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)
+ PDO_VAR(3000, 12000, 3000)
+ PDO_PPS_APDO(3000, 11000, 3000)>;
+ op-sink-microwatt = <10000000>;
+ };
};
diff --git a/Documentation/devicetree/bindings/usb/renesas_usb3.txt b/Documentation/devicetree/bindings/usb/renesas_usb3.txt
index 2c071bb5801e..d366555166d0 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usb3.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usb3.txt
@@ -2,11 +2,13 @@ Renesas Electronics USB3.0 Peripheral driver
Required properties:
- compatible: Must contain one of the following:
+ - "renesas,r8a774a1-usb3-peri"
- "renesas,r8a7795-usb3-peri"
- "renesas,r8a7796-usb3-peri"
- "renesas,r8a77965-usb3-peri"
- - "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 compatible
- device
+ - "renesas,r8a77990-usb3-peri"
+ - "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 or RZ/G2
+ compatible device
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
index 43960faf5a88..90719f501852 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
@@ -4,7 +4,9 @@ Required properties:
- compatible: Must contain one or more of the following:
- "renesas,usbhs-r8a7743" for r8a7743 (RZ/G1M) compatible device
+ - "renesas,usbhs-r8a7744" for r8a7744 (RZ/G1N) compatible device
- "renesas,usbhs-r8a7745" for r8a7745 (RZ/G1E) compatible device
+ - "renesas,usbhs-r8a774a1" for r8a774a1 (RZ/G2M) compatible device
- "renesas,usbhs-r8a7790" for r8a7790 (R-Car H2) compatible device
- "renesas,usbhs-r8a7791" for r8a7791 (R-Car M2-W) compatible device
- "renesas,usbhs-r8a7792" for r8a7792 (R-Car V2H) compatible device
@@ -13,10 +15,11 @@ Required properties:
- "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
- "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
- "renesas,usbhs-r8a77965" for r8a77965 (R-Car M3-N) compatible device
+ - "renesas,usbhs-r8a77990" for r8a77990 (R-Car E3) compatible device
- "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device
- "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device
- "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices
- - "renesas,rcar-gen3-usbhs" for R-Car Gen3 compatible device
+ - "renesas,rcar-gen3-usbhs" for R-Car Gen3 or RZ/G2 compatible devices
- "renesas,rza1-usbhs" for RZ/A1 compatible device
When compatible with the generic version, nodes must list the
@@ -25,7 +28,11 @@ Required properties:
- reg: Base address and length of the register for the USBHS
- interrupts: Interrupt specifier for the USBHS
- - clocks: A list of phandle + clock specifier pairs
+ - clocks: A list of phandle + clock specifier pairs.
+ - In case of "renesas,rcar-gen3-usbhs", two clocks are required.
+ First clock should be peripheral and second one should be host.
+ - In case of except above, one clock is required. First clock
+ should be peripheral.
Optional properties:
- renesas,buswait: Integer to use BUSWAIT register
diff --git a/Documentation/devicetree/bindings/usb/usb-ehci.txt b/Documentation/devicetree/bindings/usb/usb-ehci.txt
index 0f1b75386207..406252d14c6b 100644
--- a/Documentation/devicetree/bindings/usb/usb-ehci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-ehci.txt
@@ -15,7 +15,11 @@ Optional properties:
- needs-reset-on-resume : boolean, set this to force EHCI reset after resume
- has-transaction-translator : boolean, set this if EHCI have a Transaction
Translator built into the root hub.
- - clocks : a list of phandle + clock specifier pairs
+ - clocks : a list of phandle + clock specifier pairs. In case of Renesas
+ R-Car Gen3 SoCs:
+ - if a host only channel: first clock should be host.
+ - if a USB DRD channel: first clock should be host and second one
+ should be peripheral.
- phys : see usb-hcd.txt in the current directory
- resets : phandle + reset specifier pair
diff --git a/Documentation/devicetree/bindings/usb/usb-ohci.txt b/Documentation/devicetree/bindings/usb/usb-ohci.txt
index a8d2103d1f3d..aaaa5255c972 100644
--- a/Documentation/devicetree/bindings/usb/usb-ohci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-ohci.txt
@@ -12,7 +12,11 @@ Optional properties:
- no-big-frame-no : boolean, set if frame_no lives in bits [15:0] of HCCA
- remote-wakeup-connected: remote wakeup is wired on the platform
- num-ports : u32, to override the detected port count
-- clocks : a list of phandle + clock specifier pairs
+- clocks : a list of phandle + clock specifier pairs. In case of Renesas
+ R-Car Gen3 SoCs:
+ - if a host only channel: first clock should be host.
+ - if a USB DRD channel: first clock should be host and second one
+ should be peripheral.
- phys : see usb-hcd.txt in the current directory
- resets : a list of phandle + reset specifier pairs
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index ac4cd0d6195a..fea8b1545751 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -8,6 +8,8 @@ Required properties:
- "marvell,armada-375-xhci" for Armada 375 SoCs
- "marvell,armada-380-xhci" for Armada 38x SoCs
- "renesas,xhci-r8a7743" for r8a7743 SoC
+ - "renesas,xhci-r8a7744" for r8a7744 SoC
+ - "renesas,xhci-r8a774a1" for r8a774a1 SoC
- "renesas,xhci-r8a7790" for r8a7790 SoC
- "renesas,xhci-r8a7791" for r8a7791 SoC
- "renesas,xhci-r8a7793" for r8a7793 SoC
@@ -17,7 +19,8 @@ Required properties:
- "renesas,xhci-r8a77990" for r8a77990 SoC
- "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible
device
- - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device
+ - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 or RZ/G2 compatible
+ device
- "xhci-platform" (deprecated)
When compatible with the generic version, nodes must list the
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 2c3fc512e746..f26bf667e530 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -235,6 +235,7 @@ micrel Micrel Inc.
microchip Microchip Technology Inc.
microcrystal Micro Crystal AG
micron Micron Technology Inc.
+mikroe MikroElektronika d.o.o.
minix MINIX Technology Ltd.
miramems MiraMEMS Sensing Technology Co., Ltd.
mitsubishi Mitsubishi Electric Corporation
diff --git a/Documentation/driver-api/basics.rst b/Documentation/driver-api/basics.rst
index 826e85d50a16..e970fadf4d1a 100644
--- a/Documentation/driver-api/basics.rst
+++ b/Documentation/driver-api/basics.rst
@@ -121,6 +121,9 @@ Kernel utility functions
.. kernel-doc:: kernel/rcu/update.c
:export:
+.. kernel-doc:: include/linux/overflow.h
+ :internal:
+
Device Resource Management
--------------------------
diff --git a/Documentation/driver-api/firewire.rst b/Documentation/driver-api/firewire.rst
new file mode 100644
index 000000000000..94a2d7f01d99
--- /dev/null
+++ b/Documentation/driver-api/firewire.rst
@@ -0,0 +1,48 @@
+===========================================
+Firewire (IEEE 1394) driver Interface Guide
+===========================================
+
+Introduction and Overview
+=========================
+
+The Linux FireWire subsystem adds some interfaces into the Linux system to
+ use/maintain+any resource on IEEE 1394 bus.
+
+The main purpose of these interfaces is to access address space on each node
+on IEEE 1394 bus by ISO/IEC 13213 (IEEE 1212) procedure, and to control
+isochronous resources on the bus by IEEE 1394 procedure.
+
+Two types of interfaces are added, according to consumers of the interface. A
+set of userspace interfaces is available via `firewire character devices`. A set
+of kernel interfaces is available via exported symbols in `firewire-core` module.
+
+Firewire char device data structures
+====================================
+
+.. include:: /ABI/stable/firewire-cdev
+ :literal:
+
+.. kernel-doc:: include/uapi/linux/firewire-cdev.h
+ :internal:
+
+Firewire device probing and sysfs interfaces
+============================================
+
+.. include:: /ABI/stable/sysfs-bus-firewire
+ :literal:
+
+.. kernel-doc:: drivers/firewire/core-device.c
+ :export:
+
+Firewire core transaction interfaces
+====================================
+
+.. kernel-doc:: drivers/firewire/core-transaction.c
+ :export:
+
+Firewire Isochronous I/O interfaces
+===================================
+
+.. kernel-doc:: drivers/firewire/core-iso.c
+ :export:
+
diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst
index 4b3825da48d9..82b6dbbd31cd 100644
--- a/Documentation/driver-api/fpga/fpga-mgr.rst
+++ b/Documentation/driver-api/fpga/fpga-mgr.rst
@@ -184,6 +184,11 @@ API for implementing a new FPGA Manager driver
API for programming an FPGA
---------------------------
+FPGA Manager flags
+
+.. kernel-doc:: include/linux/fpga/fpga-mgr.h
+ :doc: FPGA Manager flags
+
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
:functions: fpga_image_info
diff --git a/Documentation/driver-api/gpio/board.rst b/Documentation/driver-api/gpio/board.rst
index 2c112553df84..a0f294e2e250 100644
--- a/Documentation/driver-api/gpio/board.rst
+++ b/Documentation/driver-api/gpio/board.rst
@@ -193,3 +193,27 @@ And the table can be added to the board code as follows::
The line will be hogged as soon as the gpiochip is created or - in case the
chip was created earlier - when the hog table is registered.
+
+Arrays of pins
+--------------
+In addition to requesting pins belonging to a function one by one, a device may
+also request an array of pins assigned to the function. The way those pins are
+mapped to the device determines if the array qualifies for fast bitmap
+processing. If yes, a bitmap is passed over get/set array functions directly
+between a caller and a respective .get/set_multiple() callback of a GPIO chip.
+
+In order to qualify for fast bitmap processing, the array must meet the
+following requirements:
+- pin hardware number of array member 0 must also be 0,
+- pin hardware numbers of consecutive array members which belong to the same
+ chip as member 0 does must also match their array indexes.
+
+Otherwise fast bitmap processing path is not used in order to avoid consecutive
+pins which belong to the same chip but are not in hardware order being processed
+separately.
+
+If the array applies for fast bitmap processing path, pins which belong to
+different chips than member 0 does, as well as those with indexes different from
+their hardware pin numbers, are excluded from the fast path, both input and
+output. Moreover, open drain and open source pins are excluded from fast bitmap
+output processing.
diff --git a/Documentation/driver-api/gpio/consumer.rst b/Documentation/driver-api/gpio/consumer.rst
index aa03f389d41d..5e4d8aa68913 100644
--- a/Documentation/driver-api/gpio/consumer.rst
+++ b/Documentation/driver-api/gpio/consumer.rst
@@ -109,9 +109,11 @@ For a function using multiple GPIOs all of those can be obtained with one call::
enum gpiod_flags flags)
This function returns a struct gpio_descs which contains an array of
-descriptors::
+descriptors. It also contains a pointer to a gpiolib private structure which,
+if passed back to get/set array functions, may speed up I/O proocessing::
struct gpio_descs {
+ struct gpio_array *info;
unsigned int ndescs;
struct gpio_desc *desc[];
}
@@ -323,29 +325,37 @@ The following functions get or set the values of an array of GPIOs::
int gpiod_get_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_get_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_get_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array);
-
- void gpiod_set_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
- void gpiod_set_raw_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
- void gpiod_set_array_value_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
- void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
+
+ int gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
+ int gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
+ int gpiod_set_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
+ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
The array can be an arbitrary set of GPIOs. The functions will try to access
GPIOs belonging to the same bank or chip simultaneously if supported by the
@@ -356,8 +366,9 @@ accessed sequentially.
The functions take three arguments:
* array_size - the number of array elements
* desc_array - an array of GPIO descriptors
- * value_array - an array to store the GPIOs' values (get) or
- an array of values to assign to the GPIOs (set)
+ * array_info - optional information obtained from gpiod_array_get()
+ * value_bitmap - a bitmap to store the GPIOs' values (get) or
+ a bitmap of values to assign to the GPIOs (set)
The descriptor array can be obtained using the gpiod_get_array() function
or one of its variants. If the group of descriptors returned by that function
@@ -366,16 +377,25 @@ the struct gpio_descs returned by gpiod_get_array()::
struct gpio_descs *my_gpio_descs = gpiod_get_array(...);
gpiod_set_array_value(my_gpio_descs->ndescs, my_gpio_descs->desc,
- my_gpio_values);
+ my_gpio_descs->info, my_gpio_value_bitmap);
It is also possible to access a completely arbitrary array of descriptors. The
descriptors may be obtained using any combination of gpiod_get() and
gpiod_get_array(). Afterwards the array of descriptors has to be setup
-manually before it can be passed to one of the above functions.
+manually before it can be passed to one of the above functions. In that case,
+array_info should be set to NULL.
Note that for optimal performance GPIOs belonging to the same chip should be
contiguous within the array of descriptors.
+Still better performance may be achieved if array indexes of the descriptors
+match hardware pin numbers of a single chip. If an array passed to a get/set
+array function matches the one obtained from gpiod_get_array() and array_info
+associated with the array is also passed, the function may take a fast bitmap
+processing path, passing the value_bitmap argument directly to the respective
+.get/set_multiple() callback of the chip. That allows for utilization of GPIO
+banks as data I/O ports without much loss of performance.
+
The return value of gpiod_get_array_value() and its variants is 0 on success
or negative on error. Note the difference to gpiod_get_value(), which returns
0 or 1 on success to convey the GPIO value. With the array functions, the GPIO
diff --git a/Documentation/driver-api/gpio/driver.rst b/Documentation/driver-api/gpio/driver.rst
index cbe0242842d1..a6c14ff0c54f 100644
--- a/Documentation/driver-api/gpio/driver.rst
+++ b/Documentation/driver-api/gpio/driver.rst
@@ -374,7 +374,28 @@ When implementing an irqchip inside a GPIO driver, these two functions should
typically be called in the .startup() and .shutdown() callbacks from the
irqchip.
-When using the gpiolib irqchip helpers, these callback are automatically
+When using the gpiolib irqchip helpers, these callbacks are automatically
+assigned.
+
+
+Disabling and enabling IRQs
+---------------------------
+When a GPIO is used as an IRQ signal, then gpiolib also needs to know if
+the IRQ is enabled or disabled. In order to inform gpiolib about this,
+a driver should call::
+
+ void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset)
+
+This allows drivers to drive the GPIO as an output while the IRQ is
+disabled. When the IRQ is enabled again, a driver should call::
+
+ void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset)
+
+When implementing an irqchip inside a GPIO driver, these two functions should
+typically be called in the .irq_disable() and .irq_enable() callbacks from the
+irqchip.
+
+When using the gpiolib irqchip helpers, these callbacks are automatically
assigned.
Real-Time compliance for GPIO IRQ chips
diff --git a/Documentation/driver-api/gpio/index.rst b/Documentation/driver-api/gpio/index.rst
index 6a374ded1287..c5b8467f9104 100644
--- a/Documentation/driver-api/gpio/index.rst
+++ b/Documentation/driver-api/gpio/index.rst
@@ -38,7 +38,7 @@ Device tree support
Device-managed API
==================
-.. kernel-doc:: drivers/gpio/devres.c
+.. kernel-doc:: drivers/gpio/gpiolib-devres.c
:export:
sysfs helpers
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 6d9f2f9fe20e..909f991b4c0d 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -29,7 +29,8 @@ available subsections can be seen below.
iio/index
input
usb/index
- pci
+ firewire
+ pci/index
spi
i2c
hsi
diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst
index c55a6034c397..55447659b81f 100644
--- a/Documentation/driver-api/mtdnand.rst
+++ b/Documentation/driver-api/mtdnand.rst
@@ -180,10 +180,10 @@ by a chip select decoder.
{
struct nand_chip *this = mtd_to_nand(mtd);
switch(cmd){
- case NAND_CTL_SETCLE: this->IO_ADDR_W |= CLE_ADRR_BIT; break;
- case NAND_CTL_CLRCLE: this->IO_ADDR_W &= ~CLE_ADRR_BIT; break;
- case NAND_CTL_SETALE: this->IO_ADDR_W |= ALE_ADRR_BIT; break;
- case NAND_CTL_CLRALE: this->IO_ADDR_W &= ~ALE_ADRR_BIT; break;
+ case NAND_CTL_SETCLE: this->legacy.IO_ADDR_W |= CLE_ADRR_BIT; break;
+ case NAND_CTL_CLRCLE: this->legacy.IO_ADDR_W &= ~CLE_ADRR_BIT; break;
+ case NAND_CTL_SETALE: this->legacy.IO_ADDR_W |= ALE_ADRR_BIT; break;
+ case NAND_CTL_CLRALE: this->legacy.IO_ADDR_W &= ~ALE_ADRR_BIT; break;
}
}
@@ -197,7 +197,7 @@ to read back the state of the pin. The function has no arguments and
should return 0, if the device is busy (R/B pin is low) and 1, if the
device is ready (R/B pin is high). If the hardware interface does not
give access to the ready busy pin, then the function must not be defined
-and the function pointer this->dev_ready is set to NULL.
+and the function pointer this->legacy.dev_ready is set to NULL.
Init function
-------------
@@ -235,18 +235,18 @@ necessary information about the device.
}
/* Set address of NAND IO lines */
- this->IO_ADDR_R = baseaddr;
- this->IO_ADDR_W = baseaddr;
+ this->legacy.IO_ADDR_R = baseaddr;
+ this->legacy.IO_ADDR_W = baseaddr;
/* Reference hardware control function */
this->hwcontrol = board_hwcontrol;
/* Set command delay time, see datasheet for correct value */
- this->chip_delay = CHIP_DEPENDEND_COMMAND_DELAY;
+ this->legacy.chip_delay = CHIP_DEPENDEND_COMMAND_DELAY;
/* Assign the device ready function, if available */
- this->dev_ready = board_dev_ready;
+ this->legacy.dev_ready = board_dev_ready;
this->eccmode = NAND_ECC_SOFT;
/* Scan to find existence of the device */
- if (nand_scan (board_mtd, 1)) {
+ if (nand_scan (this, 1)) {
err = -ENXIO;
goto out_ior;
}
@@ -277,7 +277,7 @@ unregisters the partitions in the MTD layer.
static void __exit board_cleanup (void)
{
/* Release resources, unregister device */
- nand_release (board_mtd);
+ nand_release (mtd_to_nand(board_mtd));
/* unmap physical address */
iounmap(baseaddr);
@@ -336,17 +336,17 @@ connected to an address decoder.
struct nand_chip *this = mtd_to_nand(mtd);
/* Deselect all chips */
- this->IO_ADDR_R &= ~BOARD_NAND_ADDR_MASK;
- this->IO_ADDR_W &= ~BOARD_NAND_ADDR_MASK;
+ this->legacy.IO_ADDR_R &= ~BOARD_NAND_ADDR_MASK;
+ this->legacy.IO_ADDR_W &= ~BOARD_NAND_ADDR_MASK;
switch (chip) {
case 0:
- this->IO_ADDR_R |= BOARD_NAND_ADDR_CHIP0;
- this->IO_ADDR_W |= BOARD_NAND_ADDR_CHIP0;
+ this->legacy.IO_ADDR_R |= BOARD_NAND_ADDR_CHIP0;
+ this->legacy.IO_ADDR_W |= BOARD_NAND_ADDR_CHIP0;
break;
....
case n:
- this->IO_ADDR_R |= BOARD_NAND_ADDR_CHIPn;
- this->IO_ADDR_W |= BOARD_NAND_ADDR_CHIPn;
+ this->legacy.IO_ADDR_R |= BOARD_NAND_ADDR_CHIPn;
+ this->legacy.IO_ADDR_W |= BOARD_NAND_ADDR_CHIPn;
break;
}
}
diff --git a/Documentation/driver-api/pci/index.rst b/Documentation/driver-api/pci/index.rst
new file mode 100644
index 000000000000..c6cf1fef61ce
--- /dev/null
+++ b/Documentation/driver-api/pci/index.rst
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================================
+The Linux PCI driver implementer's API guide
+============================================
+
+.. class:: toc-title
+
+ Table of contents
+
+.. toctree::
+ :maxdepth: 2
+
+ pci
+ p2pdma
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/driver-api/pci/p2pdma.rst b/Documentation/driver-api/pci/p2pdma.rst
new file mode 100644
index 000000000000..4c577fa7bef9
--- /dev/null
+++ b/Documentation/driver-api/pci/p2pdma.rst
@@ -0,0 +1,145 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================
+PCI Peer-to-Peer DMA Support
+============================
+
+The PCI bus has pretty decent support for performing DMA transfers
+between two devices on the bus. This type of transaction is henceforth
+called Peer-to-Peer (or P2P). However, there are a number of issues that
+make P2P transactions tricky to do in a perfectly safe way.
+
+One of the biggest issues is that PCI doesn't require forwarding
+transactions between hierarchy domains, and in PCIe, each Root Port
+defines a separate hierarchy domain. To make things worse, there is no
+simple way to determine if a given Root Complex supports this or not.
+(See PCIe r4.0, sec 1.3.1). Therefore, as of this writing, the kernel
+only supports doing P2P when the endpoints involved are all behind the
+same PCI bridge, as such devices are all in the same PCI hierarchy
+domain, and the spec guarantees that all transactions within the
+hierarchy will be routable, but it does not require routing
+between hierarchies.
+
+The second issue is that to make use of existing interfaces in Linux,
+memory that is used for P2P transactions needs to be backed by struct
+pages. However, PCI BARs are not typically cache coherent so there are
+a few corner case gotchas with these pages so developers need to
+be careful about what they do with them.
+
+
+Driver Writer's Guide
+=====================
+
+In a given P2P implementation there may be three or more different
+types of kernel drivers in play:
+
+* Provider - A driver which provides or publishes P2P resources like
+ memory or doorbell registers to other drivers.
+* Client - A driver which makes use of a resource by setting up a
+ DMA transaction to or from it.
+* Orchestrator - A driver which orchestrates the flow of data between
+ clients and providers.
+
+In many cases there could be overlap between these three types (i.e.,
+it may be typical for a driver to be both a provider and a client).
+
+For example, in the NVMe Target Copy Offload implementation:
+
+* The NVMe PCI driver is both a client, provider and orchestrator
+ in that it exposes any CMB (Controller Memory Buffer) as a P2P memory
+ resource (provider), it accepts P2P memory pages as buffers in requests
+ to be used directly (client) and it can also make use of the CMB as
+ submission queue entries (orchastrator).
+* The RDMA driver is a client in this arrangement so that an RNIC
+ can DMA directly to the memory exposed by the NVMe device.
+* The NVMe Target driver (nvmet) can orchestrate the data from the RNIC
+ to the P2P memory (CMB) and then to the NVMe device (and vice versa).
+
+This is currently the only arrangement supported by the kernel but
+one could imagine slight tweaks to this that would allow for the same
+functionality. For example, if a specific RNIC added a BAR with some
+memory behind it, its driver could add support as a P2P provider and
+then the NVMe Target could use the RNIC's memory instead of the CMB
+in cases where the NVMe cards in use do not have CMB support.
+
+
+Provider Drivers
+----------------
+
+A provider simply needs to register a BAR (or a portion of a BAR)
+as a P2P DMA resource using :c:func:`pci_p2pdma_add_resource()`.
+This will register struct pages for all the specified memory.
+
+After that it may optionally publish all of its resources as
+P2P memory using :c:func:`pci_p2pmem_publish()`. This will allow
+any orchestrator drivers to find and use the memory. When marked in
+this way, the resource must be regular memory with no side effects.
+
+For the time being this is fairly rudimentary in that all resources
+are typically going to be P2P memory. Future work will likely expand
+this to include other types of resources like doorbells.
+
+
+Client Drivers
+--------------
+
+A client driver typically only has to conditionally change its DMA map
+routine to use the mapping function :c:func:`pci_p2pdma_map_sg()` instead
+of the usual :c:func:`dma_map_sg()` function. Memory mapped in this
+way does not need to be unmapped.
+
+The client may also, optionally, make use of
+:c:func:`is_pci_p2pdma_page()` to determine when to use the P2P mapping
+functions and when to use the regular mapping functions. In some
+situations, it may be more appropriate to use a flag to indicate a
+given request is P2P memory and map appropriately. It is important to
+ensure that struct pages that back P2P memory stay out of code that
+does not have support for them as other code may treat the pages as
+regular memory which may not be appropriate.
+
+
+Orchestrator Drivers
+--------------------
+
+The first task an orchestrator driver must do is compile a list of
+all client devices that will be involved in a given transaction. For
+example, the NVMe Target driver creates a list including the namespace
+block device and the RNIC in use. If the orchestrator has access to
+a specific P2P provider to use it may check compatibility using
+:c:func:`pci_p2pdma_distance()` otherwise it may find a memory provider
+that's compatible with all clients using :c:func:`pci_p2pmem_find()`.
+If more than one provider is supported, the one nearest to all the clients will
+be chosen first. If more than one provider is an equal distance away, the
+one returned will be chosen at random (it is not an arbitrary but
+truely random). This function returns the PCI device to use for the provider
+with a reference taken and therefore when it's no longer needed it should be
+returned with pci_dev_put().
+
+Once a provider is selected, the orchestrator can then use
+:c:func:`pci_alloc_p2pmem()` and :c:func:`pci_free_p2pmem()` to
+allocate P2P memory from the provider. :c:func:`pci_p2pmem_alloc_sgl()`
+and :c:func:`pci_p2pmem_free_sgl()` are convenience functions for
+allocating scatter-gather lists with P2P memory.
+
+Struct Page Caveats
+-------------------
+
+Driver writers should be very careful about not passing these special
+struct pages to code that isn't prepared for it. At this time, the kernel
+interfaces do not have any checks for ensuring this. This obviously
+precludes passing these pages to userspace.
+
+P2P memory is also technically IO memory but should never have any side
+effects behind it. Thus, the order of loads and stores should not be important
+and ioreadX(), iowriteX() and friends should not be necessary.
+However, as the memory is not cache coherent, if access ever needs to
+be protected by a spinlock then :c:func:`mmiowb()` must be used before
+unlocking the lock. (See ACQUIRES VS I/O ACCESSES in
+Documentation/memory-barriers.txt)
+
+
+P2P DMA Support Library
+=======================
+
+.. kernel-doc:: drivers/pci/p2pdma.c
+ :export:
diff --git a/Documentation/driver-api/pci.rst b/Documentation/driver-api/pci/pci.rst
index ca85e5e78b2c..ca85e5e78b2c 100644
--- a/Documentation/driver-api/pci.rst
+++ b/Documentation/driver-api/pci/pci.rst
diff --git a/Documentation/efi-stub.txt b/Documentation/efi-stub.txt
index 41df801f9a50..833edb0d0bc4 100644
--- a/Documentation/efi-stub.txt
+++ b/Documentation/efi-stub.txt
@@ -83,7 +83,18 @@ is passed to bzImage.efi.
The "dtb=" option
-----------------
-For the ARM and arm64 architectures, we also need to be able to provide a
-device tree to the kernel. This is done with the "dtb=" command line option,
-and is processed in the same manner as the "initrd=" option that is
+For the ARM and arm64 architectures, a device tree must be provided to
+the kernel. Normally firmware shall supply the device tree via the
+EFI CONFIGURATION TABLE. However, the "dtb=" command line option can
+be used to override the firmware supplied device tree, or to supply
+one when firmware is unable to.
+
+Please note: Firmware adds runtime configuration information to the
+device tree before booting the kernel. If dtb= is used to override
+the device tree, then any runtime data provided by firmware will be
+lost. The dtb= option should only be used either as a debug tool, or
+as a last resort when a device tree is not provided in the EFI
+CONFIGURATION TABLE.
+
+"dtb=" is processed in the same manner as the "initrd=" option that is
described above.
diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
deleted file mode 100644
index fe85e7c5907a..000000000000
--- a/Documentation/fb/00-INDEX
+++ /dev/null
@@ -1,75 +0,0 @@
-Index of files in Documentation/fb. If you think something about frame
-buffer devices needs an entry here, needs correction or you've written one
-please mail me.
- Geert Uytterhoeven <geert@linux-m68k.org>
-
-00-INDEX
- - this file.
-api.txt
- - The frame buffer API between applications and buffer devices.
-arkfb.txt
- - info on the fbdev driver for ARK Logic chips.
-aty128fb.txt
- - info on the ATI Rage128 frame buffer driver.
-cirrusfb.txt
- - info on the driver for Cirrus Logic chipsets.
-cmap_xfbdev.txt
- - an introduction to fbdev's cmap structures.
-deferred_io.txt
- - an introduction to deferred IO.
-efifb.txt
- - info on the EFI platform driver for Intel based Apple computers.
-ep93xx-fb.txt
- - info on the driver for EP93xx LCD controller.
-fbcon.txt
- - intro to and usage guide for the framebuffer console (fbcon).
-framebuffer.txt
- - introduction to frame buffer devices.
-gxfb.txt
- - info on the framebuffer driver for AMD Geode GX2 based processors.
-intel810.txt
- - documentation for the Intel 810/815 framebuffer driver.
-intelfb.txt
- - docs for Intel 830M/845G/852GM/855GM/865G/915G/945G fb driver.
-internals.txt
- - quick overview of frame buffer device internals.
-lxfb.txt
- - info on the framebuffer driver for AMD Geode LX based processors.
-matroxfb.txt
- - info on the Matrox framebuffer driver for Alpha, Intel and PPC.
-metronomefb.txt
- - info on the driver for the Metronome display controller.
-modedb.txt
- - info on the video mode database.
-pvr2fb.txt
- - info on the PowerVR 2 frame buffer driver.
-pxafb.txt
- - info on the driver for the PXA25x LCD controller.
-s3fb.txt
- - info on the fbdev driver for S3 Trio/Virge chips.
-sa1100fb.txt
- - information about the driver for the SA-1100 LCD controller.
-sh7760fb.txt
- - info on the SH7760/SH7763 integrated LCDC Framebuffer driver.
-sisfb.txt
- - info on the framebuffer device driver for various SiS chips.
-sm501.txt
- - info on the framebuffer device driver for sm501 videoframebuffer.
-sstfb.txt
- - info on the frame buffer driver for 3dfx' Voodoo Graphics boards.
-tgafb.txt
- - info on the TGA (DECChip 21030) frame buffer driver.
-tridentfb.txt
- info on the framebuffer driver for some Trident chip based cards.
-udlfb.txt
- - Driver for DisplayLink USB 2.0 chips.
-uvesafb.txt
- - info on the userspace VESA (VBE2+ compliant) frame buffer device.
-vesafb.txt
- - info on the VESA frame buffer device.
-viafb.modes
- - list of modes for VIA Integration Graphic Chip.
-viafb.txt
- - info on the VIA Integration Graphic Chip console framebuffer driver.
-vt8623fb.txt
- - info on the fb driver for the graphics core in VIA VT8623 chipsets.
diff --git a/Documentation/fb/uvesafb.txt b/Documentation/fb/uvesafb.txt
index f6362d88763b..aa924196c366 100644
--- a/Documentation/fb/uvesafb.txt
+++ b/Documentation/fb/uvesafb.txt
@@ -15,7 +15,8 @@ than x86. Check the v86d documentation for a list of currently supported
arches.
v86d source code can be downloaded from the following website:
- http://dev.gentoo.org/~spock/projects/uvesafb
+
+ https://github.com/mjanusz/v86d
Please refer to the v86d documentation for detailed configuration and
installation instructions.
@@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
--
Michal Januszewski <spock@gentoo.org>
- Last updated: 2009-03-30
+ Last updated: 2017-10-10
Documentation of the uvesafb options is loosely based on vesafb.txt.
diff --git a/Documentation/fb/vesafb.txt b/Documentation/fb/vesafb.txt
index 950d5a658cb3..413bb73235be 100644
--- a/Documentation/fb/vesafb.txt
+++ b/Documentation/fb/vesafb.txt
@@ -114,11 +114,11 @@ to turn it on.
You can pass options to vesafb using "video=vesafb:option" on
the kernel command line. Multiple options should be separated
-by comma, like this: "video=vesafb:ypan,invers"
+by comma, like this: "video=vesafb:ypan,inverse"
Accepted options:
-invers no comment...
+inverse use inverse color map
ypan enable display panning using the VESA protected mode
interface. The visible screen is just a window of the
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
deleted file mode 100644
index 0937bade1099..000000000000
--- a/Documentation/filesystems/00-INDEX
+++ /dev/null
@@ -1,153 +0,0 @@
-00-INDEX
- - this file (info on some of the filesystems supported by linux).
-Locking
- - info on locking rules as they pertain to Linux VFS.
-9p.txt
- - 9p (v9fs) is an implementation of the Plan 9 remote fs protocol.
-adfs.txt
- - info and mount options for the Acorn Advanced Disc Filing System.
-afs.txt
- - info and examples for the distributed AFS (Andrew File System) fs.
-affs.txt
- - info and mount options for the Amiga Fast File System.
-autofs-mount-control.txt
- - info on device control operations for autofs module.
-automount-support.txt
- - information about filesystem automount support.
-befs.txt
- - information about the BeOS filesystem for Linux.
-bfs.txt
- - info for the SCO UnixWare Boot Filesystem (BFS).
-btrfs.txt
- - info for the BTRFS filesystem.
-caching/
- - directory containing filesystem cache documentation.
-ceph.txt
- - info for the Ceph Distributed File System.
-cifs/
- - directory containing CIFS filesystem documentation and example code.
-coda.txt
- - description of the CODA filesystem.
-configfs/
- - directory containing configfs documentation and example code.
-cramfs.txt
- - info on the cram filesystem for small storage (ROMs etc).
-dax.txt
- - info on avoiding the page cache for files stored on CPU-addressable
- storage devices.
-debugfs.txt
- - info on the debugfs filesystem.
-devpts.txt
- - info on the devpts filesystem.
-directory-locking
- - info about the locking scheme used for directory operations.
-dlmfs.txt
- - info on the userspace interface to the OCFS2 DLM.
-dnotify.txt
- - info about directory notification in Linux.
-dnotify_test.c
- - example program for dnotify.
-ecryptfs.txt
- - docs on eCryptfs: stacked cryptographic filesystem for Linux.
-efivarfs.txt
- - info for the efivarfs filesystem.
-exofs.txt
- - info, usage, mount options, design about EXOFS.
-ext2.txt
- - info, mount options and specifications for the Ext2 filesystem.
-ext3.txt
- - info, mount options and specifications for the Ext3 filesystem.
-ext4.txt
- - info, mount options and specifications for the Ext4 filesystem.
-f2fs.txt
- - info and mount options for the F2FS filesystem.
-fiemap.txt
- - info on fiemap ioctl.
-files.txt
- - info on file management in the Linux kernel.
-fuse.txt
- - info on the Filesystem in User SpacE including mount options.
-gfs2-glocks.txt
- - info on the Global File System 2 - Glock internal locking rules.
-gfs2-uevents.txt
- - info on the Global File System 2 - uevents.
-gfs2.txt
- - info on the Global File System 2.
-hfs.txt
- - info on the Macintosh HFS Filesystem for Linux.
-hfsplus.txt
- - info on the Macintosh HFSPlus Filesystem for Linux.
-hpfs.txt
- - info and mount options for the OS/2 HPFS.
-inotify.txt
- - info on the powerful yet simple file change notification system.
-isofs.txt
- - info and mount options for the ISO 9660 (CDROM) filesystem.
-jfs.txt
- - info and mount options for the JFS filesystem.
-locks.txt
- - info on file locking implementations, flock() vs. fcntl(), etc.
-mandatory-locking.txt
- - info on the Linux implementation of Sys V mandatory file locking.
-nfs/
- - nfs-related documentation.
-nilfs2.txt
- - info and mount options for the NILFS2 filesystem.
-ntfs.txt
- - info and mount options for the NTFS filesystem (Windows NT).
-ocfs2.txt
- - info and mount options for the OCFS2 clustered filesystem.
-omfs.txt
- - info on the Optimized MPEG FileSystem.
-path-lookup.txt
- - info on path walking and name lookup locking.
-pohmelfs/
- - directory containing pohmelfs filesystem documentation.
-porting
- - various information on filesystem porting.
-proc.txt
- - info on Linux's /proc filesystem.
-qnx6.txt
- - info on the QNX6 filesystem.
-quota.txt
- - info on Quota subsystem.
-ramfs-rootfs-initramfs.txt
- - info on the 'in memory' filesystems ramfs, rootfs and initramfs.
-relay.txt
- - info on relay, for efficient streaming from kernel to user space.
-romfs.txt
- - description of the ROMFS filesystem.
-seq_file.txt
- - how to use the seq_file API.
-sharedsubtree.txt
- - a description of shared subtrees for namespaces.
-spufs.txt
- - info and mount options for the SPU filesystem used on Cell.
-squashfs.txt
- - info on the squashfs filesystem.
-sysfs-pci.txt
- - info on accessing PCI device resources through sysfs.
-sysfs-tagging.txt
- - info on sysfs tagging to avoid duplicates.
-sysfs.txt
- - info on sysfs, a ram-based filesystem for exporting kernel objects.
-sysv-fs.txt
- - info on the SystemV/V7/Xenix/Coherent filesystem.
-tmpfs.txt
- - info on tmpfs, a filesystem that holds all files in virtual memory.
-ubifs.txt
- - info on the Unsorted Block Images FileSystem.
-udf.txt
- - info and mount options for the UDF filesystem.
-ufs.txt
- - info on the ufs filesystem.
-vfat.txt
- - info on using the VFAT filesystem used in Windows NT and Windows 95.
-vfs.txt
- - overview of the Virtual File System.
-xfs-delayed-logging-design.txt
- - info on the XFS Delayed Logging Design.
-xfs-self-describing-metadata.txt
- - info on XFS Self Describing Metadata.
-xfs.txt
- - info and mount options for the XFS filesystem.
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt
index 70cb68bed2e8..bc393e0a22b8 100644
--- a/Documentation/filesystems/dax.txt
+++ b/Documentation/filesystems/dax.txt
@@ -75,7 +75,7 @@ exposure of uninitialized data through mmap.
These filesystems may be used for inspiration:
- ext2: see Documentation/filesystems/ext2.txt
-- ext4: see Documentation/filesystems/ext4.txt
+- ext4: see Documentation/filesystems/ext4/ext4.rst
- xfs: see Documentation/filesystems/xfs.txt
diff --git a/Documentation/filesystems/ext2.txt b/Documentation/filesystems/ext2.txt
index 81c0becab225..a45c9fc0747b 100644
--- a/Documentation/filesystems/ext2.txt
+++ b/Documentation/filesystems/ext2.txt
@@ -358,7 +358,7 @@ and are copied into the filesystem. If a transaction is incomplete at
the time of the crash, then there is no guarantee of consistency for
the blocks in that transaction so they are discarded (which means any
filesystem changes they represent are also lost).
-Check Documentation/filesystems/ext4.txt if you want to read more about
+Check Documentation/filesystems/ext4/ext4.rst if you want to read more about
ext4 and journaling.
References
diff --git a/Documentation/filesystems/ext4/ondisk/about.rst b/Documentation/filesystems/ext4/about.rst
index 0aadba052264..0aadba052264 100644
--- a/Documentation/filesystems/ext4/ondisk/about.rst
+++ b/Documentation/filesystems/ext4/about.rst
diff --git a/Documentation/filesystems/ext4/ondisk/allocators.rst b/Documentation/filesystems/ext4/allocators.rst
index 7aa85152ace3..7aa85152ace3 100644
--- a/Documentation/filesystems/ext4/ondisk/allocators.rst
+++ b/Documentation/filesystems/ext4/allocators.rst
diff --git a/Documentation/filesystems/ext4/ondisk/attributes.rst b/Documentation/filesystems/ext4/attributes.rst
index 0b01b67b81fe..54386a010a8d 100644
--- a/Documentation/filesystems/ext4/ondisk/attributes.rst
+++ b/Documentation/filesystems/ext4/attributes.rst
@@ -30,7 +30,7 @@ Extended attributes, when stored after the inode, have a header
``ext4_xattr_ibody_header`` that is 4 bytes long:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -47,7 +47,7 @@ The beginning of an extended attribute block is in
``struct ext4_xattr_header``, which is 32 bytes long:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -92,7 +92,7 @@ entries must be stored in sorted order. The sort order is
Attributes stored inside an inode do not need be stored in sorted order.
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -157,7 +157,7 @@ attribute name index field is set, and matching string is removed from
the key name. Here is a map of name index values to key prefixes:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Name Index
diff --git a/Documentation/filesystems/ext4/ondisk/bigalloc.rst b/Documentation/filesystems/ext4/bigalloc.rst
index c6d88557553c..c6d88557553c 100644
--- a/Documentation/filesystems/ext4/ondisk/bigalloc.rst
+++ b/Documentation/filesystems/ext4/bigalloc.rst
diff --git a/Documentation/filesystems/ext4/ondisk/bitmaps.rst b/Documentation/filesystems/ext4/bitmaps.rst
index c7546dbc197a..c7546dbc197a 100644
--- a/Documentation/filesystems/ext4/ondisk/bitmaps.rst
+++ b/Documentation/filesystems/ext4/bitmaps.rst
diff --git a/Documentation/filesystems/ext4/ondisk/blockgroup.rst b/Documentation/filesystems/ext4/blockgroup.rst
index baf888e4c06a..baf888e4c06a 100644
--- a/Documentation/filesystems/ext4/ondisk/blockgroup.rst
+++ b/Documentation/filesystems/ext4/blockgroup.rst
diff --git a/Documentation/filesystems/ext4/ondisk/blockmap.rst b/Documentation/filesystems/ext4/blockmap.rst
index 30e25750d88a..30e25750d88a 100644
--- a/Documentation/filesystems/ext4/ondisk/blockmap.rst
+++ b/Documentation/filesystems/ext4/blockmap.rst
diff --git a/Documentation/filesystems/ext4/ondisk/blocks.rst b/Documentation/filesystems/ext4/blocks.rst
index 73d4dc0f7bda..73d4dc0f7bda 100644
--- a/Documentation/filesystems/ext4/ondisk/blocks.rst
+++ b/Documentation/filesystems/ext4/blocks.rst
diff --git a/Documentation/filesystems/ext4/ondisk/checksums.rst b/Documentation/filesystems/ext4/checksums.rst
index 9d6a793b2e03..5519e253810d 100644
--- a/Documentation/filesystems/ext4/ondisk/checksums.rst
+++ b/Documentation/filesystems/ext4/checksums.rst
@@ -28,7 +28,7 @@ of checksum. The checksum function is whatever the superblock describes
(crc32c as of October 2013) unless noted otherwise.
.. list-table::
- :widths: 1 1 4
+ :widths: 20 8 50
:header-rows: 1
* - Metadata
diff --git a/Documentation/filesystems/ext4/ondisk/directory.rst b/Documentation/filesystems/ext4/directory.rst
index 8fcba68c2884..614034e24669 100644
--- a/Documentation/filesystems/ext4/ondisk/directory.rst
+++ b/Documentation/filesystems/ext4/directory.rst
@@ -34,7 +34,7 @@ is at most 263 bytes long, though on disk you'll need to reference
``dirent.rec_len`` to know for sure.
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -66,7 +66,7 @@ tree traversal. This format is ``ext4_dir_entry_2``, which is at most
``dirent.rec_len`` to know for sure.
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -99,7 +99,7 @@ tree traversal. This format is ``ext4_dir_entry_2``, which is at most
The directory file type is one of the following values:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
@@ -130,7 +130,7 @@ in the place where the name normally goes. The structure is
``struct ext4_dir_entry_tail``:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -212,7 +212,7 @@ The root of the htree is in ``struct dx_root``, which is the full length
of a data block:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -305,7 +305,7 @@ of a data block:
The directory hash is one of the following values:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
@@ -327,7 +327,7 @@ Interior nodes of an htree are recorded as ``struct dx_node``, which is
also the full length of a data block:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -375,7 +375,7 @@ The hash maps that exist in both ``struct dx_root`` and
long:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -405,7 +405,7 @@ directory index (which will ensure that there's space for the checksum.
The dx\_tail structure is 8 bytes long and looks like this:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
diff --git a/Documentation/filesystems/ext4/ondisk/dynamic.rst b/Documentation/filesystems/ext4/dynamic.rst
index bb0c84333341..bb0c84333341 100644
--- a/Documentation/filesystems/ext4/ondisk/dynamic.rst
+++ b/Documentation/filesystems/ext4/dynamic.rst
diff --git a/Documentation/filesystems/ext4/ondisk/eainode.rst b/Documentation/filesystems/ext4/eainode.rst
index ecc0d01a0a72..ecc0d01a0a72 100644
--- a/Documentation/filesystems/ext4/ondisk/eainode.rst
+++ b/Documentation/filesystems/ext4/eainode.rst
diff --git a/Documentation/filesystems/ext4/ext4.rst b/Documentation/filesystems/ext4/ext4.rst
deleted file mode 100644
index 9d4368d591fa..000000000000
--- a/Documentation/filesystems/ext4/ext4.rst
+++ /dev/null
@@ -1,613 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-========================
-General Information
-========================
-
-Ext4 is an advanced level of the ext3 filesystem which incorporates
-scalability and reliability enhancements for supporting large filesystems
-(64 bit) in keeping with increasing disk capacities and state-of-the-art
-feature requirements.
-
-Mailing list: linux-ext4@vger.kernel.org
-Web site: http://ext4.wiki.kernel.org
-
-
-Quick usage instructions
-========================
-
-Note: More extensive information for getting started with ext4 can be
-found at the ext4 wiki site at the URL:
-http://ext4.wiki.kernel.org/index.php/Ext4_Howto
-
- - The latest version of e2fsprogs can be found at:
-
- https://www.kernel.org/pub/linux/kernel/people/tytso/e2fsprogs/
-
- or
-
- http://sourceforge.net/project/showfiles.php?group_id=2406
-
- or grab the latest git repository from:
-
- https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git
-
- - Create a new filesystem using the ext4 filesystem type:
-
- # mke2fs -t ext4 /dev/hda1
-
- Or to configure an existing ext3 filesystem to support extents:
-
- # tune2fs -O extents /dev/hda1
-
- If the filesystem was created with 128 byte inodes, it can be
- converted to use 256 byte for greater efficiency via:
-
- # tune2fs -I 256 /dev/hda1
-
- - Mounting:
-
- # mount -t ext4 /dev/hda1 /wherever
-
- - When comparing performance with other filesystems, it's always
- important to try multiple workloads; very often a subtle change in a
- workload parameter can completely change the ranking of which
- filesystems do well compared to others. When comparing versus ext3,
- note that ext4 enables write barriers by default, while ext3 does
- not enable write barriers by default. So it is useful to use
- explicitly specify whether barriers are enabled or not when via the
- '-o barriers=[0|1]' mount option for both ext3 and ext4 filesystems
- for a fair comparison. When tuning ext3 for best benchmark numbers,
- it is often worthwhile to try changing the data journaling mode; '-o
- data=writeback' can be faster for some workloads. (Note however that
- running mounted with data=writeback can potentially leave stale data
- exposed in recently written files in case of an unclean shutdown,
- which could be a security exposure in some situations.) Configuring
- the filesystem with a large journal can also be helpful for
- metadata-intensive workloads.
-
-Features
-========
-
-Currently Available
--------------------
-
-* ability to use filesystems > 16TB (e2fsprogs support not available yet)
-* extent format reduces metadata overhead (RAM, IO for access, transactions)
-* extent format more robust in face of on-disk corruption due to magics,
-* internal redundancy in tree
-* improved file allocation (multi-block alloc)
-* lift 32000 subdirectory limit imposed by i_links_count[1]
-* nsec timestamps for mtime, atime, ctime, create time
-* inode version field on disk (NFSv4, Lustre)
-* reduced e2fsck time via uninit_bg feature
-* journal checksumming for robustness, performance
-* persistent file preallocation (e.g for streaming media, databases)
-* ability to pack bitmaps and inode tables into larger virtual groups via the
- flex_bg feature
-* large file support
-* inode allocation using large virtual block groups via flex_bg
-* delayed allocation
-* large block (up to pagesize) support
-* efficient new ordered mode in JBD2 and ext4 (avoid using buffer head to force
- the ordering)
-
-[1] Filesystems with a block size of 1k may see a limit imposed by the
-directory hash tree having a maximum depth of two.
-
-Options
-=======
-
-When mounting an ext4 filesystem, the following option are accepted:
-(*) == default
-
-======================= =======================================================
-Mount Option Description
-======================= =======================================================
-ro Mount filesystem read only. Note that ext4 will
- replay the journal (and thus write to the
- partition) even when mounted "read only". The
- mount options "ro,noload" can be used to prevent
- writes to the filesystem.
-
-journal_checksum Enable checksumming of the journal transactions.
- This will allow the recovery code in e2fsck and the
- kernel to detect corruption in the kernel. It is a
- compatible change and will be ignored by older kernels.
-
-journal_async_commit Commit block can be written to disk without waiting
- for descriptor blocks. If enabled older kernels cannot
- mount the device. This will enable 'journal_checksum'
- internally.
-
-journal_path=path
-journal_dev=devnum When the external journal device's major/minor numbers
- have changed, these options allow the user to specify
- the new journal location. The journal device is
- identified through either its new major/minor numbers
- encoded in devnum, or via a path to the device.
-
-norecovery Don't load the journal on mounting. Note that
-noload if the filesystem was not unmounted cleanly,
- skipping the journal replay will lead to the
- filesystem containing inconsistencies that can
- lead to any number of problems.
-
-data=journal All data are committed into the journal prior to being
- written into the main file system. Enabling
- this mode will disable delayed allocation and
- O_DIRECT support.
-
-data=ordered (*) All data are forced directly out to the main file
- system prior to its metadata being committed to the
- journal.
-
-data=writeback Data ordering is not preserved, data may be written
- into the main file system after its metadata has been
- committed to the journal.
-
-commit=nrsec (*) Ext4 can be told to sync all its data and metadata
- every 'nrsec' seconds. The default value is 5 seconds.
- This means that if you lose your power, you will lose
- as much as the latest 5 seconds of work (your
- filesystem will not be damaged though, thanks to the
- journaling). This default value (or any low value)
- will hurt performance, but it's good for data-safety.
- Setting it to 0 will have the same effect as leaving
- it at the default (5 seconds).
- Setting it to very large values will improve
- performance.
-
-barrier=<0|1(*)> This enables/disables the use of write barriers in
-barrier(*) the jbd code. barrier=0 disables, barrier=1 enables.
-nobarrier This also requires an IO stack which can support
- barriers, and if jbd gets an error on a barrier
- write, it will disable again with a warning.
- Write barriers enforce proper on-disk ordering
- of journal commits, making volatile disk write caches
- safe to use, at some performance penalty. If
- your disks are battery-backed in one way or another,
- disabling barriers may safely improve performance.
- The mount options "barrier" and "nobarrier" can
- also be used to enable or disable barriers, for
- consistency with other ext4 mount options.
-
-inode_readahead_blks=n This tuning parameter controls the maximum
- number of inode table blocks that ext4's inode
- table readahead algorithm will pre-read into
- the buffer cache. The default value is 32 blocks.
-
-nouser_xattr Disables Extended User Attributes. See the
- attr(5) manual page for more information about
- extended attributes.
-
-noacl This option disables POSIX Access Control List
- support. If ACL support is enabled in the kernel
- configuration (CONFIG_EXT4_FS_POSIX_ACL), ACL is
- enabled by default on mount. See the acl(5) manual
- page for more information about acl.
-
-bsddf (*) Make 'df' act like BSD.
-minixdf Make 'df' act like Minix.
-
-debug Extra debugging information is sent to syslog.
-
-abort Simulate the effects of calling ext4_abort() for
- debugging purposes. This is normally used while
- remounting a filesystem which is already mounted.
-
-errors=remount-ro Remount the filesystem read-only on an error.
-errors=continue Keep going on a filesystem error.
-errors=panic Panic and halt the machine if an error occurs.
- (These mount options override the errors behavior
- specified in the superblock, which can be configured
- using tune2fs)
-
-data_err=ignore(*) Just print an error message if an error occurs
- in a file data buffer in ordered mode.
-data_err=abort Abort the journal if an error occurs in a file
- data buffer in ordered mode.
-
-grpid New objects have the group ID of their parent.
-bsdgroups
-
-nogrpid (*) New objects have the group ID of their creator.
-sysvgroups
-
-resgid=n The group ID which may use the reserved blocks.
-
-resuid=n The user ID which may use the reserved blocks.
-
-sb=n Use alternate superblock at this location.
-
-quota These options are ignored by the filesystem. They
-noquota are used only by quota tools to recognize volumes
-grpquota where quota should be turned on. See documentation
-usrquota in the quota-tools package for more details
- (http://sourceforge.net/projects/linuxquota).
-
-jqfmt=<quota type> These options tell filesystem details about quota
-usrjquota=<file> so that quota information can be properly updated
-grpjquota=<file> during journal replay. They replace the above
- quota options. See documentation in the quota-tools
- package for more details
- (http://sourceforge.net/projects/linuxquota).
-
-stripe=n Number of filesystem blocks that mballoc will try
- to use for allocation size and alignment. For RAID5/6
- systems this should be the number of data
- disks * RAID chunk size in file system blocks.
-
-delalloc (*) Defer block allocation until just before ext4
- writes out the block(s) in question. This
- allows ext4 to better allocation decisions
- more efficiently.
-nodelalloc Disable delayed allocation. Blocks are allocated
- when the data is copied from userspace to the
- page cache, either via the write(2) system call
- or when an mmap'ed page which was previously
- unallocated is written for the first time.
-
-max_batch_time=usec Maximum amount of time ext4 should wait for
- additional filesystem operations to be batch
- together with a synchronous write operation.
- Since a synchronous write operation is going to
- force a commit and then a wait for the I/O
- complete, it doesn't cost much, and can be a
- huge throughput win, we wait for a small amount
- of time to see if any other transactions can
- piggyback on the synchronous write. The
- algorithm used is designed to automatically tune
- for the speed of the disk, by measuring the
- amount of time (on average) that it takes to
- finish committing a transaction. Call this time
- the "commit time". If the time that the
- transaction has been running is less than the
- commit time, ext4 will try sleeping for the
- commit time to see if other operations will join
- the transaction. The commit time is capped by
- the max_batch_time, which defaults to 15000us
- (15ms). This optimization can be turned off
- entirely by setting max_batch_time to 0.
-
-min_batch_time=usec This parameter sets the commit time (as
- described above) to be at least min_batch_time.
- It defaults to zero microseconds. Increasing
- this parameter may improve the throughput of
- multi-threaded, synchronous workloads on very
- fast disks, at the cost of increasing latency.
-
-journal_ioprio=prio The I/O priority (from 0 to 7, where 0 is the
- highest priority) which should be used for I/O
- operations submitted by kjournald2 during a
- commit operation. This defaults to 3, which is
- a slightly higher priority than the default I/O
- priority.
-
-auto_da_alloc(*) Many broken applications don't use fsync() when
-noauto_da_alloc replacing existing files via patterns such as
- fd = open("foo.new")/write(fd,..)/close(fd)/
- rename("foo.new", "foo"), or worse yet,
- fd = open("foo", O_TRUNC)/write(fd,..)/close(fd).
- If auto_da_alloc is enabled, ext4 will detect
- the replace-via-rename and replace-via-truncate
- patterns and force that any delayed allocation
- blocks are allocated such that at the next
- journal commit, in the default data=ordered
- mode, the data blocks of the new file are forced
- to disk before the rename() operation is
- committed. This provides roughly the same level
- of guarantees as ext3, and avoids the
- "zero-length" problem that can happen when a
- system crashes before the delayed allocation
- blocks are forced to disk.
-
-noinit_itable Do not initialize any uninitialized inode table
- blocks in the background. This feature may be
- used by installation CD's so that the install
- process can complete as quickly as possible; the
- inode table initialization process would then be
- deferred until the next time the file system
- is unmounted.
-
-init_itable=n The lazy itable init code will wait n times the
- number of milliseconds it took to zero out the
- previous block group's inode table. This
- minimizes the impact on the system performance
- while file system's inode table is being initialized.
-
-discard Controls whether ext4 should issue discard/TRIM
-nodiscard(*) commands to the underlying block device when
- blocks are freed. This is useful for SSD devices
- and sparse/thinly-provisioned LUNs, but it is off
- by default until sufficient testing has been done.
-
-nouid32 Disables 32-bit UIDs and GIDs. This is for
- interoperability with older kernels which only
- store and expect 16-bit values.
-
-block_validity(*) These options enable or disable the in-kernel
-noblock_validity facility for tracking filesystem metadata blocks
- within internal data structures. This allows multi-
- block allocator and other routines to notice
- bugs or corrupted allocation bitmaps which cause
- blocks to be allocated which overlap with
- filesystem metadata blocks.
-
-dioread_lock Controls whether or not ext4 should use the DIO read
-dioread_nolock locking. If the dioread_nolock option is specified
- ext4 will allocate uninitialized extent before buffer
- write and convert the extent to initialized after IO
- completes. This approach allows ext4 code to avoid
- using inode mutex, which improves scalability on high
- speed storages. However this does not work with
- data journaling and dioread_nolock option will be
- ignored with kernel warning. Note that dioread_nolock
- code path is only used for extent-based files.
- Because of the restrictions this options comprises
- it is off by default (e.g. dioread_lock).
-
-max_dir_size_kb=n This limits the size of directories so that any
- attempt to expand them beyond the specified
- limit in kilobytes will cause an ENOSPC error.
- This is useful in memory constrained
- environments, where a very large directory can
- cause severe performance problems or even
- provoke the Out Of Memory killer. (For example,
- if there is only 512mb memory available, a 176mb
- directory may seriously cramp the system's style.)
-
-i_version Enable 64-bit inode version support. This option is
- off by default.
-
-dax Use direct access (no page cache). See
- Documentation/filesystems/dax.txt. Note that
- this option is incompatible with data=journal.
-======================= =======================================================
-
-Data Mode
-=========
-There are 3 different data modes:
-
-* writeback mode
-
- In data=writeback mode, ext4 does not journal data at all. This mode provides
- a similar level of journaling as that of XFS, JFS, and ReiserFS in its default
- mode - metadata journaling. A crash+recovery can cause incorrect data to
- appear in files which were written shortly before the crash. This mode will
- typically provide the best ext4 performance.
-
-* ordered mode
-
- In data=ordered mode, ext4 only officially journals metadata, but it logically
- groups metadata information related to data changes with the data blocks into
- a single unit called a transaction. When it's time to write the new metadata
- out to disk, the associated data blocks are written first. In general, this
- mode performs slightly slower than writeback but significantly faster than
- journal mode.
-
-* journal mode
-
- data=journal mode provides full data and metadata journaling. All new data is
- written to the journal first, and then to its final location. In the event of
- a crash, the journal can be replayed, bringing both data and metadata into a
- consistent state. This mode is the slowest except when data needs to be read
- from and written to disk at the same time where it outperforms all others
- modes. Enabling this mode will disable delayed allocation and O_DIRECT
- support.
-
-/proc entries
-=============
-
-Information about mounted ext4 file systems can be found in
-/proc/fs/ext4. Each mounted filesystem will have a directory in
-/proc/fs/ext4 based on its device name (i.e., /proc/fs/ext4/hdc or
-/proc/fs/ext4/dm-0). The files in each per-device directory are shown
-in table below.
-
-Files in /proc/fs/ext4/<devname>
-
-================ =======
- File Content
-================ =======
- mb_groups details of multiblock allocator buddy cache of free blocks
-================ =======
-
-/sys entries
-============
-
-Information about mounted ext4 file systems can be found in
-/sys/fs/ext4. Each mounted filesystem will have a directory in
-/sys/fs/ext4 based on its device name (i.e., /sys/fs/ext4/hdc or
-/sys/fs/ext4/dm-0). The files in each per-device directory are shown
-in table below.
-
-Files in /sys/fs/ext4/<devname>:
-
-(see also Documentation/ABI/testing/sysfs-fs-ext4)
-
-============================= =================================================
-File Content
-============================= =================================================
- delayed_allocation_blocks This file is read-only and shows the number of
- blocks that are dirty in the page cache, but
- which do not have their location in the
- filesystem allocated yet.
-
-inode_goal Tuning parameter which (if non-zero) controls
- the goal inode used by the inode allocator in
- preference to all other allocation heuristics.
- This is intended for debugging use only, and
- should be 0 on production systems.
-
-inode_readahead_blks Tuning parameter which controls the maximum
- number of inode table blocks that ext4's inode
- table readahead algorithm will pre-read into
- the buffer cache
-
-lifetime_write_kbytes This file is read-only and shows the number of
- kilobytes of data that have been written to this
- filesystem since it was created.
-
- max_writeback_mb_bump The maximum number of megabytes the writeback
- code will try to write out before move on to
- another inode.
-
- mb_group_prealloc The multiblock allocator will round up allocation
- requests to a multiple of this tuning parameter if
- the stripe size is not set in the ext4 superblock
-
- mb_max_to_scan The maximum number of extents the multiblock
- allocator will search to find the best extent
-
- mb_min_to_scan The minimum number of extents the multiblock
- allocator will search to find the best extent
-
- mb_order2_req Tuning parameter which controls the minimum size
- for requests (as a power of 2) where the buddy
- cache is used
-
- mb_stats Controls whether the multiblock allocator should
- collect statistics, which are shown during the
- unmount. 1 means to collect statistics, 0 means
- not to collect statistics
-
- mb_stream_req Files which have fewer blocks than this tunable
- parameter will have their blocks allocated out
- of a block group specific preallocation pool, so
- that small files are packed closely together.
- Each large file will have its blocks allocated
- out of its own unique preallocation pool.
-
- session_write_kbytes This file is read-only and shows the number of
- kilobytes of data that have been written to this
- filesystem since it was mounted.
-
- reserved_clusters This is RW file and contains number of reserved
- clusters in the file system which will be used
- in the specific situations to avoid costly
- zeroout, unexpected ENOSPC, or possible data
- loss. The default is 2% or 4096 clusters,
- whichever is smaller and this can be changed
- however it can never exceed number of clusters
- in the file system. If there is not enough space
- for the reserved space when mounting the file
- mount will _not_ fail.
-============================= =================================================
-
-Ioctls
-======
-
-There is some Ext4 specific functionality which can be accessed by applications
-through the system call interfaces. The list of all Ext4 specific ioctls are
-shown in the table below.
-
-Table of Ext4 specific ioctls
-
-============================= =================================================
-Ioctl Description
-============================= =================================================
- EXT4_IOC_GETFLAGS Get additional attributes associated with inode.
- The ioctl argument is an integer bitfield, with
- bit values described in ext4.h. This ioctl is an
- alias for FS_IOC_GETFLAGS.
-
- EXT4_IOC_SETFLAGS Set additional attributes associated with inode.
- The ioctl argument is an integer bitfield, with
- bit values described in ext4.h. This ioctl is an
- alias for FS_IOC_SETFLAGS.
-
- EXT4_IOC_GETVERSION
- EXT4_IOC_GETVERSION_OLD
- Get the inode i_generation number stored for
- each inode. The i_generation number is normally
- changed only when new inode is created and it is
- particularly useful for network filesystems. The
- '_OLD' version of this ioctl is an alias for
- FS_IOC_GETVERSION.
-
- EXT4_IOC_SETVERSION
- EXT4_IOC_SETVERSION_OLD
- Set the inode i_generation number stored for
- each inode. The '_OLD' version of this ioctl
- is an alias for FS_IOC_SETVERSION.
-
- EXT4_IOC_GROUP_EXTEND This ioctl has the same purpose as the resize
- mount option. It allows to resize filesystem
- to the end of the last existing block group,
- further resize has to be done with resize2fs,
- either online, or offline. The argument points
- to the unsigned logn number representing the
- filesystem new block count.
-
- EXT4_IOC_MOVE_EXT Move the block extents from orig_fd (the one
- this ioctl is pointing to) to the donor_fd (the
- one specified in move_extent structure passed
- as an argument to this ioctl). Then, exchange
- inode metadata between orig_fd and donor_fd.
- This is especially useful for online
- defragmentation, because the allocator has the
- opportunity to allocate moved blocks better,
- ideally into one contiguous extent.
-
- EXT4_IOC_GROUP_ADD Add a new group descriptor to an existing or
- new group descriptor block. The new group
- descriptor is described by ext4_new_group_input
- structure, which is passed as an argument to
- this ioctl. This is especially useful in
- conjunction with EXT4_IOC_GROUP_EXTEND,
- which allows online resize of the filesystem
- to the end of the last existing block group.
- Those two ioctls combined is used in userspace
- online resize tool (e.g. resize2fs).
-
- EXT4_IOC_MIGRATE This ioctl operates on the filesystem itself.
- It converts (migrates) ext3 indirect block mapped
- inode to ext4 extent mapped inode by walking
- through indirect block mapping of the original
- inode and converting contiguous block ranges
- into ext4 extents of the temporary inode. Then,
- inodes are swapped. This ioctl might help, when
- migrating from ext3 to ext4 filesystem, however
- suggestion is to create fresh ext4 filesystem
- and copy data from the backup. Note, that
- filesystem has to support extents for this ioctl
- to work.
-
- EXT4_IOC_ALLOC_DA_BLKS Force all of the delay allocated blocks to be
- allocated to preserve application-expected ext3
- behaviour. Note that this will also start
- triggering a write of the data blocks, but this
- behaviour may change in the future as it is
- not necessary and has been done this way only
- for sake of simplicity.
-
- EXT4_IOC_RESIZE_FS Resize the filesystem to a new size. The number
- of blocks of resized filesystem is passed in via
- 64 bit integer argument. The kernel allocates
- bitmaps and inode table, the userspace tool thus
- just passes the new number of blocks.
-
- EXT4_IOC_SWAP_BOOT Swap i_blocks and associated attributes
- (like i_blocks, i_size, i_flags, ...) from
- the specified inode with inode
- EXT4_BOOT_LOADER_INO (#5). This is typically
- used to store a boot loader in a secure part of
- the filesystem, where it can't be changed by a
- normal user by accident.
- The data blocks of the previous boot loader
- will be associated with the given inode.
-============================= =================================================
-
-References
-==========
-
-kernel source: <file:fs/ext4/>
- <file:fs/jbd2/>
-
-programs: http://e2fsprogs.sourceforge.net/
-
-useful links: http://fedoraproject.org/wiki/ext3-devel
- http://www.bullopensource.org/ext4/
- http://ext4.wiki.kernel.org/index.php/Main_Page
- http://fedoraproject.org/wiki/Features/Ext4
diff --git a/Documentation/filesystems/ext4/ondisk/globals.rst b/Documentation/filesystems/ext4/globals.rst
index 368bf7662b96..368bf7662b96 100644
--- a/Documentation/filesystems/ext4/ondisk/globals.rst
+++ b/Documentation/filesystems/ext4/globals.rst
diff --git a/Documentation/filesystems/ext4/ondisk/group_descr.rst b/Documentation/filesystems/ext4/group_descr.rst
index 759827e5d2cf..0f783ed88592 100644
--- a/Documentation/filesystems/ext4/ondisk/group_descr.rst
+++ b/Documentation/filesystems/ext4/group_descr.rst
@@ -43,7 +43,7 @@ entire bitmap.
The block group descriptor is laid out in ``struct ext4_group_desc``.
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -157,7 +157,7 @@ The block group descriptor is laid out in ``struct ext4_group_desc``.
Block group flags can be any combination of the following:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
diff --git a/Documentation/filesystems/ext4/ondisk/ifork.rst b/Documentation/filesystems/ext4/ifork.rst
index 5dbe3b2b121a..b9816d5a896b 100644
--- a/Documentation/filesystems/ext4/ondisk/ifork.rst
+++ b/Documentation/filesystems/ext4/ifork.rst
@@ -68,7 +68,7 @@ The extent tree header is recorded in ``struct ext4_extent_header``,
which is 12 bytes long:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -104,7 +104,7 @@ Internal nodes of the extent tree, also known as index nodes, are
recorded as ``struct ext4_extent_idx``, and are 12 bytes long:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -134,7 +134,7 @@ Leaf nodes of the extent tree are recorded as ``struct ext4_extent``,
and are also 12 bytes long:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -174,7 +174,7 @@ including) the checksum itself.
``struct ext4_extent_tail`` is 4 bytes long:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
diff --git a/Documentation/filesystems/ext4/index.rst b/Documentation/filesystems/ext4/index.rst
index 71121605558c..3be3e54d480d 100644
--- a/Documentation/filesystems/ext4/index.rst
+++ b/Documentation/filesystems/ext4/index.rst
@@ -1,17 +1,14 @@
.. SPDX-License-Identifier: GPL-2.0
-===============
-ext4 Filesystem
-===============
-
-General usage and on-disk artifacts writen by ext4. More documentation may
-be ported from the wiki as time permits. This should be considered the
-canonical source of information as the details here have been reviewed by
-the ext4 community.
+===================================
+ext4 Data Structures and Algorithms
+===================================
.. toctree::
- :maxdepth: 5
+ :maxdepth: 6
:numbered:
- ext4
- ondisk/index
+ about.rst
+ overview.rst
+ globals.rst
+ dynamic.rst
diff --git a/Documentation/filesystems/ext4/ondisk/inlinedata.rst b/Documentation/filesystems/ext4/inlinedata.rst
index d1075178ce0b..d1075178ce0b 100644
--- a/Documentation/filesystems/ext4/ondisk/inlinedata.rst
+++ b/Documentation/filesystems/ext4/inlinedata.rst
diff --git a/Documentation/filesystems/ext4/ondisk/inodes.rst b/Documentation/filesystems/ext4/inodes.rst
index 655ce898f3f5..6bd35e506b6f 100644
--- a/Documentation/filesystems/ext4/ondisk/inodes.rst
+++ b/Documentation/filesystems/ext4/inodes.rst
@@ -29,8 +29,9 @@ and the inode structure itself.
The inode table entry is laid out in ``struct ext4_inode``.
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
+ :class: longtable
* - Offset
- Size
@@ -176,7 +177,7 @@ The inode table entry is laid out in ``struct ext4_inode``.
The ``i_mode`` value is a combination of the following flags:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
@@ -227,7 +228,7 @@ The ``i_mode`` value is a combination of the following flags:
The ``i_flags`` field is a combination of these values:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
@@ -314,7 +315,7 @@ The ``osd1`` field has multiple meanings depending on the creator:
Linux:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -331,7 +332,7 @@ Linux:
Hurd:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -346,7 +347,7 @@ Hurd:
Masix:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -365,7 +366,7 @@ The ``osd2`` field has multiple meanings depending on the filesystem creator:
Linux:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -402,7 +403,7 @@ Linux:
Hurd:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -433,7 +434,7 @@ Hurd:
Masix:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
diff --git a/Documentation/filesystems/ext4/ondisk/journal.rst b/Documentation/filesystems/ext4/journal.rst
index e7031af86876..ea613ee701f5 100644
--- a/Documentation/filesystems/ext4/ondisk/journal.rst
+++ b/Documentation/filesystems/ext4/journal.rst
@@ -48,7 +48,7 @@ Layout
Generally speaking, the journal has this format:
.. list-table::
- :widths: 1 1 78
+ :widths: 16 48 16
:header-rows: 1
* - Superblock
@@ -76,7 +76,7 @@ The journal superblock will be in the next full block after the
superblock.
.. list-table::
- :widths: 1 1 1 1 76
+ :widths: 12 12 12 32 12
:header-rows: 1
* - 1024 bytes of padding
@@ -98,7 +98,7 @@ Every block in the journal starts with a common 12-byte header
``struct journal_header_s``:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -124,7 +124,7 @@ Every block in the journal starts with a common 12-byte header
The journal block type can be any one of:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
@@ -154,7 +154,7 @@ The journal superblock is recorded as ``struct journal_superblock_s``,
which is 1024 bytes long:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -264,7 +264,7 @@ which is 1024 bytes long:
The journal compat features are any combination of the following:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
@@ -278,7 +278,7 @@ The journal compat features are any combination of the following:
The journal incompat features are any combination of the following:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
@@ -306,7 +306,7 @@ Journal checksum type codes are one of the following. crc32 or crc32c are the
most likely choices.
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
@@ -330,7 +330,7 @@ described by a data structure, but here is the block structure anyway.
Descriptor blocks consume at least 36 bytes, but use a full block:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -355,7 +355,7 @@ defined as ``struct journal_block_tag3_s``, which looks like the
following. The size is 16 or 32 bytes.
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -400,7 +400,7 @@ following. The size is 16 or 32 bytes.
The journal tag flags are any combination of the following:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
@@ -421,7 +421,7 @@ is defined as ``struct journal_block_tag_s``, which looks like the
following. The size is 8, 12, 24, or 28 bytes:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -471,7 +471,7 @@ JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 are set, the end of the block is a
``struct jbd2_journal_block_tail``, which looks like this:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -513,7 +513,7 @@ Revocation blocks are described in
length, but use a full block:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -543,7 +543,7 @@ JBD2\_FEATURE\_INCOMPAT\_CSUM\_V3 are set, the end of the revocation
block is a ``struct jbd2_journal_revoke_tail``, which has this format:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -567,7 +567,7 @@ The commit block is described by ``struct commit_header``, which is 32
bytes long (but uses a full block):
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
diff --git a/Documentation/filesystems/ext4/ondisk/mmp.rst b/Documentation/filesystems/ext4/mmp.rst
index b7d7a3137f80..25660981d93c 100644
--- a/Documentation/filesystems/ext4/ondisk/mmp.rst
+++ b/Documentation/filesystems/ext4/mmp.rst
@@ -32,7 +32,7 @@ The checksum is calculated against the FS UUID and the MMP structure.
The MMP structure (``struct mmp_struct``) is as follows:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 12 20 40
:header-rows: 1
* - Offset
diff --git a/Documentation/filesystems/ext4/ondisk/index.rst b/Documentation/filesystems/ext4/ondisk/index.rst
deleted file mode 100644
index f7d082c3a435..000000000000
--- a/Documentation/filesystems/ext4/ondisk/index.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-==============================
-Data Structures and Algorithms
-==============================
-.. include:: about.rst
-.. include:: overview.rst
-.. include:: globals.rst
-.. include:: dynamic.rst
diff --git a/Documentation/filesystems/ext4/ondisk/overview.rst b/Documentation/filesystems/ext4/overview.rst
index cbab18baba12..cbab18baba12 100644
--- a/Documentation/filesystems/ext4/ondisk/overview.rst
+++ b/Documentation/filesystems/ext4/overview.rst
diff --git a/Documentation/filesystems/ext4/ondisk/special_inodes.rst b/Documentation/filesystems/ext4/special_inodes.rst
index a82f70c9baeb..9061aabba827 100644
--- a/Documentation/filesystems/ext4/ondisk/special_inodes.rst
+++ b/Documentation/filesystems/ext4/special_inodes.rst
@@ -6,7 +6,7 @@ Special inodes
ext4 reserves some inode for special features, as follows:
.. list-table::
- :widths: 1 79
+ :widths: 6 70
:header-rows: 1
* - inode Number
diff --git a/Documentation/filesystems/ext4/ondisk/super.rst b/Documentation/filesystems/ext4/super.rst
index 5f81dd87e0b9..04ff079a2acf 100644
--- a/Documentation/filesystems/ext4/ondisk/super.rst
+++ b/Documentation/filesystems/ext4/super.rst
@@ -19,7 +19,7 @@ The ext4 superblock is laid out as follows in
``struct ext4_super_block``:
.. list-table::
- :widths: 1 1 1 77
+ :widths: 8 8 24 40
:header-rows: 1
* - Offset
@@ -483,7 +483,7 @@ The ext4 superblock is laid out as follows in
The superblock state is some combination of the following:
.. list-table::
- :widths: 1 79
+ :widths: 8 72
:header-rows: 1
* - Value
@@ -500,7 +500,7 @@ The superblock state is some combination of the following:
The superblock error policy is one of the following:
.. list-table::
- :widths: 1 79
+ :widths: 8 72
:header-rows: 1
* - Value
@@ -517,7 +517,7 @@ The superblock error policy is one of the following:
The filesystem creator is one of the following:
.. list-table::
- :widths: 1 79
+ :widths: 8 72
:header-rows: 1
* - Value
@@ -538,7 +538,7 @@ The filesystem creator is one of the following:
The superblock revision is one of the following:
.. list-table::
- :widths: 1 79
+ :widths: 8 72
:header-rows: 1
* - Value
@@ -556,7 +556,7 @@ The superblock compatible features field is a combination of any of the
following:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
@@ -595,7 +595,7 @@ The superblock incompatible features field is a combination of any of the
following:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
@@ -647,7 +647,7 @@ The superblock read-only compatible features field is a combination of any of
the following:
.. list-table::
- :widths: 1 79
+ :widths: 16 64
:header-rows: 1
* - Value
@@ -702,7 +702,7 @@ the following:
The ``s_def_hash_version`` field is one of the following:
.. list-table::
- :widths: 1 79
+ :widths: 8 72
:header-rows: 1
* - Value
@@ -725,7 +725,7 @@ The ``s_def_hash_version`` field is one of the following:
The ``s_default_mount_opts`` field is any combination of the following:
.. list-table::
- :widths: 1 79
+ :widths: 8 72
:header-rows: 1
* - Value
@@ -767,7 +767,7 @@ The ``s_default_mount_opts`` field is any combination of the following:
The ``s_flags`` field is any combination of the following:
.. list-table::
- :widths: 1 79
+ :widths: 8 72
:header-rows: 1
* - Value
@@ -784,7 +784,7 @@ The ``s_flags`` field is any combination of the following:
The ``s_encrypt_algos`` list can contain any of the following:
.. list-table::
- :widths: 1 79
+ :widths: 8 72
:header-rows: 1
* - Value
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index e5edd29687b5..e46c2147ddf8 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -172,9 +172,10 @@ fault_type=%d Support configuring fault injection type, should be
FAULT_DIR_DEPTH 0x000000100
FAULT_EVICT_INODE 0x000000200
FAULT_TRUNCATE 0x000000400
- FAULT_IO 0x000000800
+ FAULT_READ_IO 0x000000800
FAULT_CHECKPOINT 0x000001000
FAULT_DISCARD 0x000002000
+ FAULT_WRITE_IO 0x000004000
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
writes towards main area.
@@ -211,6 +212,11 @@ fsync_mode=%s Control the policy of fsync. Currently supports "posix",
non-atomic files likewise "nobarrier" mount option.
test_dummy_encryption Enable dummy encryption, which provides a fake fscrypt
context. The fake fscrypt context is used by xfstests.
+checkpoint=%s Set to "disable" to turn off checkpointing. Set to "enable"
+ to reenable checkpointing. Is enabled by default. While
+ disabled, any unmounting or unexpected shutdowns will cause
+ the filesystem contents to appear as they did when the
+ filesystem was mounted with that option.
================================================================================
DEBUGFS ENTRIES
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 48b424de85bb..cfbc18f0d9c9 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -191,21 +191,11 @@ Currently, the following pairs of encryption modes are supported:
- AES-256-XTS for contents and AES-256-CTS-CBC for filenames
- AES-128-CBC for contents and AES-128-CTS-CBC for filenames
-- Speck128/256-XTS for contents and Speck128/256-CTS-CBC for filenames
It is strongly recommended to use AES-256-XTS for contents encryption.
AES-128-CBC was added only for low-powered embedded devices with
crypto accelerators such as CAAM or CESA that do not support XTS.
-Similarly, Speck128/256 support was only added for older or low-end
-CPUs which cannot do AES fast enough -- especially ARM CPUs which have
-NEON instructions but not the Cryptography Extensions -- and for which
-it would not otherwise be feasible to use encryption at all. It is
-not recommended to use Speck on CPUs that have AES instructions.
-Speck support is only available if it has been enabled in the crypto
-API via CONFIG_CRYPTO_SPECK. Also, on ARM platforms, to get
-acceptable performance CONFIG_CRYPTO_SPECK_NEON must be enabled.
-
New encryption modes can be added relatively easily, without changes
to individual filesystems. However, authenticated encryption (AE)
modes are not currently supported because of the difficulty of dealing
diff --git a/Documentation/filesystems/nfs/00-INDEX b/Documentation/filesystems/nfs/00-INDEX
deleted file mode 100644
index 53f3b596ac0d..000000000000
--- a/Documentation/filesystems/nfs/00-INDEX
+++ /dev/null
@@ -1,26 +0,0 @@
-00-INDEX
- - this file (nfs-related documentation).
-Exporting
- - explanation of how to make filesystems exportable.
-fault_injection.txt
- - information for using fault injection on the server
-knfsd-stats.txt
- - statistics which the NFS server makes available to user space.
-nfs.txt
- - nfs client, and DNS resolution for fs_locations.
-nfs41-server.txt
- - info on the Linux server implementation of NFSv4 minor version 1.
-nfs-rdma.txt
- - how to install and setup the Linux NFS/RDMA client and server software
-nfsd-admin-interfaces.txt
- - Administrative interfaces for nfsd.
-nfsroot.txt
- - short guide on setting up a diskless box with NFS root filesystem.
-pnfs.txt
- - short explanation of some of the internals of the pnfs client code
-rpc-cache.txt
- - introduction to the caching mechanisms in the sunrpc layer.
-idmapper.txt
- - information for configuring request-keys to be used by idmapper
-rpc-server-gss.txt
- - Information on GSS authentication support in the NFS Server
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 7b7b845c490a..321d74b73937 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -622,3 +622,14 @@ in your dentry operations instead.
alloc_file_clone(file, flags, ops) does not affect any caller's references.
On success you get a new struct file sharing the mount/dentry with the
original, on failure - ERR_PTR().
+--
+[recommended]
+ ->lookup() instances doing an equivalent of
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ return d_splice_alias(inode, dentry);
+ don't need to bother with the check - d_splice_alias() will do the
+ right thing when given ERR_PTR(...) as inode. Moreover, passing NULL
+ inode to d_splice_alias() will also do the right thing (equivalent of
+ d_add(dentry, NULL); return NULL;), so that kind of special cases
+ also doesn't need a separate treatment.
diff --git a/Documentation/fmc/00-INDEX b/Documentation/fmc/00-INDEX
deleted file mode 100644
index 431c69570f43..000000000000
--- a/Documentation/fmc/00-INDEX
+++ /dev/null
@@ -1,38 +0,0 @@
-
-Documentation in this directory comes from sections of the manual we
-wrote for the externally-developed fmc-bus package. The complete
-manual as of today (2013-02) is available in PDF format at
-http://www.ohwr.org/projects/fmc-bus/files
-
-00-INDEX
- - this file.
-
-FMC-and-SDB.txt
- - What are FMC and SDB, basic concepts for this framework
-
-API.txt
- - The functions that are exported by the bus driver
-
-parameters.txt
- - The module parameters
-
-carrier.txt
- - writing a carrier (a device)
-
-mezzanine.txt
- - writing code for your mezzanine (a driver)
-
-identifiers.txt
- - how identification and matching works
-
-fmc-fakedev.txt
- - about drivers/fmc/fmc-fakedev.ko
-
-fmc-trivial.txt
- - about drivers/fmc/fmc-trivial.ko
-
-fmc-write-eeprom.txt
- - about drivers/fmc/fmc-write-eeprom.ko
-
-fmc-chardev.txt
- - about drivers/fmc/fmc-chardev.ko
diff --git a/Documentation/gpio/00-INDEX b/Documentation/gpio/00-INDEX
deleted file mode 100644
index 17e19a68058f..000000000000
--- a/Documentation/gpio/00-INDEX
+++ /dev/null
@@ -1,4 +0,0 @@
-00-INDEX
- - This file
-sysfs.txt
- - Information about the GPIO sysfs interface
diff --git a/Documentation/hwmon/ina3221 b/Documentation/hwmon/ina3221
index 0ff74854cb2e..4b82cbfb551c 100644
--- a/Documentation/hwmon/ina3221
+++ b/Documentation/hwmon/ina3221
@@ -21,6 +21,8 @@ and power are calculated host-side from these.
Sysfs entries
-------------
+in[123]_label Voltage channel labels
+in[123]_enable Voltage channel enable controls
in[123]_input Bus voltage(mV) channels
curr[123]_input Current(mA) measurement channels
shunt[123]_resistor Shunt resistance(uOhm) channels
diff --git a/Documentation/hwmon/lm75 b/Documentation/hwmon/lm75
index ac95edfcd907..2f1120f88c16 100644
--- a/Documentation/hwmon/lm75
+++ b/Documentation/hwmon/lm75
@@ -17,8 +17,8 @@ Supported chips:
Addresses scanned: none
Datasheet: Publicly available at the Maxim website
http://www.maximintegrated.com/
- * Maxim MAX6625, MAX6626
- Prefixes: 'max6625', 'max6626'
+ * Maxim MAX6625, MAX6626, MAX31725, MAX31726
+ Prefixes: 'max6625', 'max6626', 'max31725', 'max31726'
Addresses scanned: none
Datasheet: Publicly available at the Maxim website
http://www.maxim-ic.com/
@@ -86,7 +86,7 @@ The LM75 is essentially an industry standard; there may be other
LM75 clones not listed here, with or without various enhancements,
that are supported. The clones are not detected by the driver, unless
they reproduce the exact register tricks of the original LM75, and must
-therefore be instantiated explicitly. Higher resolution up to 12-bit
+therefore be instantiated explicitly. Higher resolution up to 16-bit
is supported by this driver, other specific enhancements are not.
The LM77 is not supported, contrary to what we pretended for a long time.
diff --git a/Documentation/hwmon/ltc2978 b/Documentation/hwmon/ltc2978
index 9a49d3c90cd1..dfb2caa401d9 100644
--- a/Documentation/hwmon/ltc2978
+++ b/Documentation/hwmon/ltc2978
@@ -55,6 +55,10 @@ Supported chips:
Prefix: 'ltm4676'
Addresses scanned: -
Datasheet: http://www.linear.com/product/ltm4676
+ * Analog Devices LTM4686
+ Prefix: 'ltm4686'
+ Addresses scanned: -
+ Datasheet: http://www.analog.com/ltm4686
Author: Guenter Roeck <linux@roeck-us.net>
@@ -76,6 +80,7 @@ additional components on a single die. The chip is instantiated and reported
as two separate chips on two different I2C bus addresses.
LTM4675 is a dual 9A or single 18A μModule regulator
LTM4676 is a dual 13A or single 26A uModule regulator.
+LTM4686 is a dual 10A or single 20A uModule regulator.
Usage Notes
diff --git a/Documentation/hwmon/mc13783-adc b/Documentation/hwmon/mc13783-adc
index d0e7b3fa9e75..05ccc9f159f1 100644
--- a/Documentation/hwmon/mc13783-adc
+++ b/Documentation/hwmon/mc13783-adc
@@ -2,12 +2,12 @@ Kernel driver mc13783-adc
=========================
Supported chips:
- * Freescale Atlas MC13783
+ * Freescale MC13783
Prefix: 'mc13783'
- Datasheet: http://www.freescale.com/files/rf_if/doc/data_sheet/MC13783.pdf?fsrch=1
- * Freescale Atlas MC13892
+ Datasheet: https://www.nxp.com/docs/en/data-sheet/MC13783.pdf
+ * Freescale MC13892
Prefix: 'mc13892'
- Datasheet: http://cache.freescale.com/files/analog/doc/data_sheet/MC13892.pdf?fsrch=1&sr=1
+ Datasheet: https://www.nxp.com/docs/en/data-sheet/MC13892.pdf
Authors:
Sascha Hauer <s.hauer@pengutronix.de>
diff --git a/Documentation/ide/00-INDEX b/Documentation/ide/00-INDEX
deleted file mode 100644
index 22f98ca79539..000000000000
--- a/Documentation/ide/00-INDEX
+++ /dev/null
@@ -1,14 +0,0 @@
-00-INDEX
- - this file
-ChangeLog.ide-cd.1994-2004
- - ide-cd changelog
-ChangeLog.ide-floppy.1996-2002
- - ide-floppy changelog
-ChangeLog.ide-tape.1995-2002
- - ide-tape changelog
-ide-tape.txt
- - info on the IDE ATAPI streaming tape driver
-ide.txt
- - important info for users of ATA devices (IDE/EIDE disks and CD-ROMS).
-warm-plug-howto.txt
- - using sysfs to remove and add IDE devices. \ No newline at end of file
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 5db7e87c7cb1..c858c2e66e36 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -22,10 +22,7 @@ The following describes the license of the Linux kernel source code
(GPLv2), how to properly mark the license of individual files in the source
tree, as well as links to the full license text.
-.. toctree::
- :maxdepth: 2
-
- process/license-rules.rst
+* :ref:`kernel_licensing`
User-oriented documentation
---------------------------
diff --git a/Documentation/input/event-codes.rst b/Documentation/input/event-codes.rst
index a8c0873beb95..cef220c176a4 100644
--- a/Documentation/input/event-codes.rst
+++ b/Documentation/input/event-codes.rst
@@ -190,7 +190,16 @@ A few EV_REL codes have special meanings:
* REL_WHEEL, REL_HWHEEL:
- These codes are used for vertical and horizontal scroll wheels,
- respectively.
+ respectively. The value is the number of "notches" moved on the wheel, the
+ physical size of which varies by device. For high-resolution wheels (which
+ report multiple events for each notch of movement, or do not have notches)
+ this may be an approximation based on the high-resolution scroll events.
+
+* REL_WHEEL_HI_RES:
+
+ - If a vertical scroll wheel supports high-resolution scrolling, this code
+ will be emitted in addition to REL_WHEEL. The value is the (approximate)
+ distance travelled by the user's finger, in microns.
EV_ABS
------
diff --git a/Documentation/ioctl/00-INDEX b/Documentation/ioctl/00-INDEX
deleted file mode 100644
index c1a925787950..000000000000
--- a/Documentation/ioctl/00-INDEX
+++ /dev/null
@@ -1,12 +0,0 @@
-00-INDEX
- - this file
-botching-up-ioctls.txt
- - how to avoid botching up ioctls
-cdrom.txt
- - summary of CDROM ioctl calls
-hdio.txt
- - summary of HDIO_ ioctl calls
-ioctl-decoding.txt
- - how to decode the bits of an IOCTL code
-ioctl-number.txt
- - how to implement and register device/driver ioctl calls
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 13a7c999c04a..d05d93761653 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -201,7 +201,7 @@ Code Seq#(hex) Include File Comments
'X' 01 linux/pktcdvd.h conflict!
'Y' all linux/cyclades.h
'Z' 14-15 drivers/message/fusion/mptctl.h
-'[' 00-07 linux/usb/tmc.h USB Test and Measurement Devices
+'[' 00-3F linux/usb/tmc.h USB Test and Measurement Devices
<mailto:gregkh@linuxfoundation.org>
'a' all linux/atm*.h, linux/sonet.h ATM on linux
<http://lrcwww.epfl.ch/>
diff --git a/Documentation/isdn/00-INDEX b/Documentation/isdn/00-INDEX
deleted file mode 100644
index 2d1889b6c1fa..000000000000
--- a/Documentation/isdn/00-INDEX
+++ /dev/null
@@ -1,42 +0,0 @@
-00-INDEX
- - this file (info on ISDN implementation for Linux)
-CREDITS
- - list of the kind folks that brought you this stuff.
-HiSax.cert
- - information about the ITU approval certification of the HiSax driver.
-INTERFACE
- - description of isdn4linux Link Level and Hardware Level interfaces.
-INTERFACE.fax
- - description of the fax subinterface of isdn4linux.
-INTERFACE.CAPI
- - description of kernel CAPI Link Level to Hardware Level interface.
-README
- - general info on what you need and what to do for Linux ISDN.
-README.FAQ
- - general info for FAQ.
-README.HiSax
- - info on the HiSax driver which replaces the old teles.
-README.audio
- - info for running audio over ISDN.
-README.avmb1
- - info on driver for AVM-B1 ISDN card.
-README.concap
- - info on "CONCAP" encapsulation protocol interface used for X.25.
-README.diversion
- - info on module for isdn diversion services.
-README.fax
- - info for using Fax over ISDN.
-README.gigaset
- - info on the drivers for Siemens Gigaset ISDN adapters
-README.hfc-pci
- - info on hfc-pci based cards.
-README.hysdn
- - info on driver for Hypercope active HYSDN cards
-README.mISDN
- - info on the Modular ISDN subsystem (mISDN)
-README.syncppp
- - info on running Sync PPP over ISDN.
-README.x25
- - info for running X.25 over ISDN.
-syncPPP.FAQ
- - frequently asked questions about running PPP over ISDN.
diff --git a/Documentation/kbuild/00-INDEX b/Documentation/kbuild/00-INDEX
deleted file mode 100644
index 8c5e6aa78004..000000000000
--- a/Documentation/kbuild/00-INDEX
+++ /dev/null
@@ -1,14 +0,0 @@
-00-INDEX
- - this file: info on the kernel build process
-headers_install.txt
- - how to export Linux headers for use by userspace
-kbuild.txt
- - developer information on kbuild
-kconfig.txt
- - usage help for make *config
-kconfig-language.txt
- - specification of Config Language, the language in Kconfig files
-makefiles.txt
- - developer information for linux kernel makefiles
-modules.txt
- - how to build modules and to install them
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt
index 0f00f9c164ac..23b0c8b20cd1 100644
--- a/Documentation/kernel-per-CPU-kthreads.txt
+++ b/Documentation/kernel-per-CPU-kthreads.txt
@@ -321,7 +321,7 @@ To reduce its OS jitter, do at least one of the following:
to do.
Name:
- rcuob/%d, rcuop/%d, and rcuos/%d
+ rcuop/%d and rcuos/%d
Purpose:
Offload RCU callbacks from the corresponding CPU.
diff --git a/Documentation/laptops/00-INDEX b/Documentation/laptops/00-INDEX
deleted file mode 100644
index 86169dc766f7..000000000000
--- a/Documentation/laptops/00-INDEX
+++ /dev/null
@@ -1,16 +0,0 @@
-00-INDEX
- - This file
-asus-laptop.txt
- - information on the Asus Laptop Extras driver.
-disk-shock-protection.txt
- - information on hard disk shock protection.
-laptop-mode.txt
- - how to conserve battery power using laptop-mode.
-sony-laptop.txt
- - Sony Notebook Control Driver (SNC) Readme.
-sonypi.txt
- - info on Linux Sony Programmable I/O Device support.
-thinkpad-acpi.txt
- - information on the (IBM and Lenovo) ThinkPad ACPI Extras driver.
-toshiba_haps.txt
- - information on the Toshiba HDD Active Protection Sensor driver.
diff --git a/Documentation/leds/00-INDEX b/Documentation/leds/00-INDEX
deleted file mode 100644
index ae626b29a740..000000000000
--- a/Documentation/leds/00-INDEX
+++ /dev/null
@@ -1,32 +0,0 @@
-00-INDEX
- - This file
-leds-blinkm.txt
- - Driver for BlinkM LED-devices.
-leds-class.txt
- - documents LED handling under Linux.
-leds-class-flash.txt
- - documents flash LED handling under Linux.
-leds-lm3556.txt
- - notes on how to use the leds-lm3556 driver.
-leds-lp3944.txt
- - notes on how to use the leds-lp3944 driver.
-leds-lp5521.txt
- - notes on how to use the leds-lp5521 driver.
-leds-lp5523.txt
- - notes on how to use the leds-lp5523 driver.
-leds-lp5562.txt
- - notes on how to use the leds-lp5562 driver.
-leds-lp55xx.txt
- - description about lp55xx common driver.
-leds-lm3556.txt
- - notes on how to use the leds-lm3556 driver.
-leds-mlxcpld.txt
- - notes on how to use the leds-mlxcpld driver.
-ledtrig-oneshot.txt
- - One-shot LED trigger for both sporadic and dense events.
-ledtrig-transient.txt
- - LED Transient Trigger, one shot timer activation.
-ledtrig-usbport.txt
- - notes on how to use the drivers/usb/core/ledtrig-usbport.c trigger.
-uleds.txt
- - notes on how to use the uleds driver.
diff --git a/Documentation/locking/00-INDEX b/Documentation/locking/00-INDEX
deleted file mode 100644
index c256c9bee2a4..000000000000
--- a/Documentation/locking/00-INDEX
+++ /dev/null
@@ -1,16 +0,0 @@
-00-INDEX
- - this file.
-lockdep-design.txt
- - documentation on the runtime locking correctness validator.
-lockstat.txt
- - info on collecting statistics on locks (and contention).
-mutex-design.txt
- - info on the generic mutex subsystem.
-rt-mutex-design.txt
- - description of the RealTime mutex implementation design.
-rt-mutex.txt
- - desc. of RT-mutex subsystem with PI (Priority Inheritance) support.
-spinlocks.txt
- - info on using spinlocks to provide exclusive access in kernel.
-ww-mutex-design.txt
- - Intro to Mutex wait/would deadlock handling.s
diff --git a/Documentation/locking/lockstat.txt b/Documentation/locking/lockstat.txt
index 5786ad2cd5e6..fdbeb0c45ef3 100644
--- a/Documentation/locking/lockstat.txt
+++ b/Documentation/locking/lockstat.txt
@@ -91,7 +91,7 @@ Look at the current lock statistics:
07 &mm->mmap_sem-R: 37 100 1.31 299502.61 325629.52 3256.30 212344 34316685 0.10 7744.91 95016910.20 2.77
08 ---------------
09 &mm->mmap_sem 1 [<ffffffff811502a7>] khugepaged_scan_mm_slot+0x57/0x280
-19 &mm->mmap_sem 96 [<ffffffff815351c4>] __do_page_fault+0x1d4/0x510
+10 &mm->mmap_sem 96 [<ffffffff815351c4>] __do_page_fault+0x1d4/0x510
11 &mm->mmap_sem 34 [<ffffffff81113d77>] vm_mmap_pgoff+0x87/0xd0
12 &mm->mmap_sem 17 [<ffffffff81127e71>] vm_munmap+0x41/0x80
13 ---------------
diff --git a/Documentation/m68k/00-INDEX b/Documentation/m68k/00-INDEX
deleted file mode 100644
index 2be8c6b00e74..000000000000
--- a/Documentation/m68k/00-INDEX
+++ /dev/null
@@ -1,7 +0,0 @@
-00-INDEX
- - this file
-README.buddha
- - Amiga Buddha and Catweasel IDE Driver
-kernel-options.txt
- - command line options for Linux/m68k
-
diff --git a/Documentation/media/uapi/dvb/video_function_calls.rst b/Documentation/media/uapi/dvb/video_function_calls.rst
index 3f4f6c9ffad7..a4222b6cd2d3 100644
--- a/Documentation/media/uapi/dvb/video_function_calls.rst
+++ b/Documentation/media/uapi/dvb/video_function_calls.rst
@@ -33,4 +33,3 @@ Video Function Calls
video-clear-buffer
video-set-streamtype
video-set-format
- video-set-attributes
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 0d8d7ef131e9..c1d913944ad8 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -471,8 +471,7 @@ And a couple of implicit varieties:
operations after the ACQUIRE operation will appear to happen after the
ACQUIRE operation with respect to the other components of the system.
ACQUIRE operations include LOCK operations and both smp_load_acquire()
- and smp_cond_acquire() operations. The later builds the necessary ACQUIRE
- semantics from relying on a control dependency and smp_rmb().
+ and smp_cond_load_acquire() operations.
Memory operations that occur before an ACQUIRE operation may appear to
happen after it completes.
diff --git a/Documentation/mips/00-INDEX b/Documentation/mips/00-INDEX
deleted file mode 100644
index 8ae9cffc2262..000000000000
--- a/Documentation/mips/00-INDEX
+++ /dev/null
@@ -1,4 +0,0 @@
-00-INDEX
- - this file.
-AU1xxx_IDE.README
- - README for MIPS AU1XXX IDE driver.
diff --git a/Documentation/mmc/00-INDEX b/Documentation/mmc/00-INDEX
deleted file mode 100644
index 4623bc0aa0bb..000000000000
--- a/Documentation/mmc/00-INDEX
+++ /dev/null
@@ -1,10 +0,0 @@
-00-INDEX
- - this file
-mmc-dev-attrs.txt
- - info on SD and MMC device attributes
-mmc-dev-parts.txt
- - info on SD and MMC device partitions
-mmc-async-req.txt
- - info on mmc asynchronous requests
-mmc-tools.txt
- - info on mmc-utils tools
diff --git a/Documentation/mtd/nand/pxa3xx-nand.txt b/Documentation/mtd/nand/pxa3xx-nand.txt
deleted file mode 100644
index 1074cbc67ec6..000000000000
--- a/Documentation/mtd/nand/pxa3xx-nand.txt
+++ /dev/null
@@ -1,113 +0,0 @@
-
-About this document
-===================
-
-Some notes about Marvell's NAND controller available in PXA and Armada 370/XP
-SoC (aka NFCv1 and NFCv2), with an emphasis on the latter.
-
-NFCv2 controller background
-===========================
-
-The controller has a 2176 bytes FIFO buffer. Therefore, in order to support
-larger pages, I/O operations on 4 KiB and 8 KiB pages is done with a set of
-chunked transfers.
-
-For instance, if we choose a 2048 data chunk and set "BCH" ECC (see below)
-we'll have this layout in the pages:
-
- ------------------------------------------------------------------------------
- | 2048B data | 32B spare | 30B ECC || 2048B data | 32B spare | 30B ECC | ... |
- ------------------------------------------------------------------------------
-
-The driver reads the data and spare portions independently and builds an internal
-buffer with this layout (in the 4 KiB page case):
-
- ------------------------------------------
- | 4096B data | 64B spare |
- ------------------------------------------
-
-Also, for the READOOB command the driver disables the ECC and reads a 'spare + ECC'
-OOB, one per chunk read.
-
- -------------------------------------------------------------------
- | 4096B data | 32B spare | 30B ECC | 32B spare | 30B ECC |
- -------------------------------------------------------------------
-
-So, in order to achieve reading (for instance), we issue several READ0 commands
-(with some additional controller-specific magic) and read two chunks of 2080B
-(2048 data + 32 spare) each.
-The driver accommodates this data to expose the NAND core a contiguous buffer
-(4096 data + spare) or (4096 + spare + ECC + spare + ECC).
-
-ECC
-===
-
-The controller has built-in hardware ECC capabilities. In addition it is
-configurable between two modes: 1) Hamming, 2) BCH.
-
-Note that the actual BCH mode: BCH-4 or BCH-8 will depend on the way
-the controller is configured to transfer the data.
-
-In the BCH mode the ECC code will be calculated for each transferred chunk
-and expected to be located (when reading/programming) right after the spare
-bytes as the figure above shows.
-
-So, repeating the above scheme, a 2048B data chunk will be followed by 32B
-spare, and then the ECC controller will read/write the ECC code (30B in
-this case):
-
- ------------------------------------
- | 2048B data | 32B spare | 30B ECC |
- ------------------------------------
-
-If the ECC mode is 'BCH' then the ECC is *always* 30 bytes long.
-If the ECC mode is 'Hamming' the ECC is 6 bytes long, for each 512B block.
-So in Hamming mode, a 2048B page will have a 24B ECC.
-
-Despite all of the above, the controller requires the driver to only read or
-write in multiples of 8-bytes, because the data buffer is 64-bits.
-
-OOB
-===
-
-Because of the above scheme, and because the "spare" OOB is really located in
-the middle of a page, spare OOB cannot be read or write independently of the
-data area. In other words, in order to read the OOB (aka READOOB), the entire
-page (aka READ0) has to be read.
-
-In the same sense, in order to write to the spare OOB the driver has to write
-an *entire* page.
-
-Factory bad blocks handling
-===========================
-
-Given the ECC BCH requires to layout the device's pages in a split
-data/OOB/data/OOB way, the controller has a view of the flash page that's
-different from the specified (aka the manufacturer's) view. In other words,
-
-Factory view:
-
- -----------------------------------------------
- | Data |x OOB |
- -----------------------------------------------
-
-Driver's view:
-
- -----------------------------------------------
- | Data | OOB | Data x | OOB |
- -----------------------------------------------
-
-It can be seen from the above, that the factory bad block marker must be
-searched within the 'data' region, and not in the usual OOB region.
-
-In addition, this means under regular usage the driver will write such
-position (since it belongs to the data region) and every used block is
-likely to be marked as bad.
-
-For this reason, marking the block as bad in the OOB is explicitly
-disabled by using the NAND_BBT_NO_OOB_BBM option in the driver. The rationale
-for this is that there's no point in marking a block as bad, because good
-blocks are also 'marked as bad' (in the OOB BBM sense) under normal usage.
-
-Instead, the driver relies on the bad block table alone, and should only perform
-the bad block scan on the very first time (when the device hasn't been used).
diff --git a/Documentation/netlabel/00-INDEX b/Documentation/netlabel/00-INDEX
deleted file mode 100644
index 837bf35990e2..000000000000
--- a/Documentation/netlabel/00-INDEX
+++ /dev/null
@@ -1,10 +0,0 @@
-00-INDEX
- - this file.
-cipso_ipv4.txt
- - documentation on the IPv4 CIPSO protocol engine.
-draft-ietf-cipso-ipsecurity-01.txt
- - IETF draft of the CIPSO protocol, dated 16 July 1992.
-introduction.txt
- - NetLabel introduction, READ THIS FIRST.
-lsm_interface.txt
- - documentation on the NetLabel kernel security module API.
diff --git a/Documentation/netlabel/cipso_ipv4.txt b/Documentation/netlabel/cipso_ipv4.txt
index 93dacb132c3c..a6075481fd60 100644
--- a/Documentation/netlabel/cipso_ipv4.txt
+++ b/Documentation/netlabel/cipso_ipv4.txt
@@ -6,11 +6,12 @@ May 17, 2006
* Overview
-The NetLabel CIPSO/IPv4 protocol engine is based on the IETF Commercial IP
-Security Option (CIPSO) draft from July 16, 1992. A copy of this draft can be
-found in this directory, consult '00-INDEX' for the filename. While the IETF
-draft never made it to an RFC standard it has become a de-facto standard for
-labeled networking and is used in many trusted operating systems.
+The NetLabel CIPSO/IPv4 protocol engine is based on the IETF Commercial
+IP Security Option (CIPSO) draft from July 16, 1992. A copy of this
+draft can be found in this directory
+(draft-ietf-cipso-ipsecurity-01.txt). While the IETF draft never made
+it to an RFC standard it has become a de-facto standard for labeled
+networking and is used in many trusted operating systems.
* Outbound Packet Processing
diff --git a/Documentation/netlabel/introduction.txt b/Documentation/netlabel/introduction.txt
index 5ecd8d1dcf4e..3caf77bcff0f 100644
--- a/Documentation/netlabel/introduction.txt
+++ b/Documentation/netlabel/introduction.txt
@@ -22,7 +22,7 @@ refrain from calling the protocol engines directly, instead they should use
the NetLabel kernel security module API described below.
Detailed information about each NetLabel protocol engine can be found in this
-directory, consult '00-INDEX' for filenames.
+directory.
* Communication Layer
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
deleted file mode 100644
index 02a323c43261..000000000000
--- a/Documentation/networking/00-INDEX
+++ /dev/null
@@ -1,234 +0,0 @@
-00-INDEX
- - this file
-3c509.txt
- - information on the 3Com Etherlink III Series Ethernet cards.
-6pack.txt
- - info on the 6pack protocol, an alternative to KISS for AX.25
-LICENSE.qla3xxx
- - GPLv2 for QLogic Linux Networking HBA Driver
-LICENSE.qlge
- - GPLv2 for QLogic Linux qlge NIC Driver
-LICENSE.qlcnic
- - GPLv2 for QLogic Linux qlcnic NIC Driver
-PLIP.txt
- - PLIP: The Parallel Line Internet Protocol device driver
-README.ipw2100
- - README for the Intel PRO/Wireless 2100 driver.
-README.ipw2200
- - README for the Intel PRO/Wireless 2915ABG and 2200BG driver.
-README.sb1000
- - info on General Instrument/NextLevel SURFboard1000 cable modem.
-altera_tse.txt
- - Altera Triple-Speed Ethernet controller.
-arcnet-hardware.txt
- - tons of info on ARCnet, hubs, jumper settings for ARCnet cards, etc.
-arcnet.txt
- - info on the using the ARCnet driver itself.
-atm.txt
- - info on where to get ATM programs and support for Linux.
-ax25.txt
- - info on using AX.25 and NET/ROM code for Linux
-baycom.txt
- - info on the driver for Baycom style amateur radio modems
-bonding.txt
- - Linux Ethernet Bonding Driver HOWTO: link aggregation in Linux.
-bridge.txt
- - where to get user space programs for ethernet bridging with Linux.
-cdc_mbim.txt
- - 3G/LTE USB modem (Mobile Broadband Interface Model)
-checksum-offloads.txt
- - Explanation of checksum offloads; LCO, RCO
-cops.txt
- - info on the COPS LocalTalk Linux driver
-cs89x0.txt
- - the Crystal LAN (CS8900/20-based) Ethernet ISA adapter driver
-cxacru.txt
- - Conexant AccessRunner USB ADSL Modem
-cxacru-cf.py
- - Conexant AccessRunner USB ADSL Modem configuration file parser
-cxgb.txt
- - Release Notes for the Chelsio N210 Linux device driver.
-dccp.txt
- - the Datagram Congestion Control Protocol (DCCP) (RFC 4340..42).
-dctcp.txt
- - DataCenter TCP congestion control
-de4x5.txt
- - the Digital EtherWORKS DE4?? and DE5?? PCI Ethernet driver
-decnet.txt
- - info on using the DECnet networking layer in Linux.
-dl2k.txt
- - README for D-Link DL2000-based Gigabit Ethernet Adapters (dl2k.ko).
-dm9000.txt
- - README for the Simtec DM9000 Network driver.
-dmfe.txt
- - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
-dns_resolver.txt
- - The DNS resolver module allows kernel servies to make DNS queries.
-driver.txt
- - Softnet driver issues.
-ena.txt
- - info on Amazon's Elastic Network Adapter (ENA)
-e100.txt
- - info on Intel's EtherExpress PRO/100 line of 10/100 boards
-e1000.txt
- - info on Intel's E1000 line of gigabit ethernet boards
-e1000e.txt
- - README for the Intel Gigabit Ethernet Driver (e1000e).
-eql.txt
- - serial IP load balancing
-fib_trie.txt
- - Level Compressed Trie (LC-trie) notes: a structure for routing.
-filter.txt
- - Linux Socket Filtering
-fore200e.txt
- - FORE Systems PCA-200E/SBA-200E ATM NIC driver info.
-framerelay.txt
- - info on using Frame Relay/Data Link Connection Identifier (DLCI).
-gen_stats.txt
- - Generic networking statistics for netlink users.
-generic-hdlc.txt
- - The generic High Level Data Link Control (HDLC) layer.
-generic_netlink.txt
- - info on Generic Netlink
-gianfar.txt
- - Gianfar Ethernet Driver.
-i40e.txt
- - README for the Intel Ethernet Controller XL710 Driver (i40e).
-i40evf.txt
- - Short note on the Driver for the Intel(R) XL710 X710 Virtual Function
-ieee802154.txt
- - Linux IEEE 802.15.4 implementation, API and drivers
-igb.txt
- - README for the Intel Gigabit Ethernet Driver (igb).
-igbvf.txt
- - README for the Intel Gigabit Ethernet Driver (igbvf).
-ip-sysctl.txt
- - /proc/sys/net/ipv4/* variables
-ip_dynaddr.txt
- - IP dynamic address hack e.g. for auto-dialup links
-ipddp.txt
- - AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation
-iphase.txt
- - Interphase PCI ATM (i)Chip IA Linux driver info.
-ipsec.txt
- - Note on not compressing IPSec payload and resulting failed policy check.
-ipv6.txt
- - Options to the ipv6 kernel module.
-ipvs-sysctl.txt
- - Per-inode explanation of the /proc/sys/net/ipv4/vs interface.
-irda.txt
- - where to get IrDA (infrared) utilities and info for Linux.
-ixgb.txt
- - README for the Intel 10 Gigabit Ethernet Driver (ixgb).
-ixgbe.txt
- - README for the Intel 10 Gigabit Ethernet Driver (ixgbe).
-ixgbevf.txt
- - README for the Intel Virtual Function (VF) Driver (ixgbevf).
-l2tp.txt
- - User guide to the L2TP tunnel protocol.
-lapb-module.txt
- - programming information of the LAPB module.
-ltpc.txt
- - the Apple or Farallon LocalTalk PC card driver
-mac80211-auth-assoc-deauth.txt
- - authentication and association / deauth-disassoc with max80211
-mac80211-injection.txt
- - HOWTO use packet injection with mac80211
-multiqueue.txt
- - HOWTO for multiqueue network device support.
-netconsole.txt
- - The network console module netconsole.ko: configuration and notes.
-netdev-features.txt
- - Network interface features API description.
-netdevices.txt
- - info on network device driver functions exported to the kernel.
-netif-msg.txt
- - Design of the network interface message level setting (NETIF_MSG_*).
-netlink_mmap.txt
- - memory mapped I/O with netlink
-nf_conntrack-sysctl.txt
- - list of netfilter-sysctl knobs.
-nfc.txt
- - The Linux Near Field Communication (NFS) subsystem.
-openvswitch.txt
- - Open vSwitch developer documentation.
-operstates.txt
- - Overview of network interface operational states.
-packet_mmap.txt
- - User guide to memory mapped packet socket rings (PACKET_[RT]X_RING).
-phonet.txt
- - The Phonet packet protocol used in Nokia cellular modems.
-phy.txt
- - The PHY abstraction layer.
-pktgen.txt
- - User guide to the kernel packet generator (pktgen.ko).
-policy-routing.txt
- - IP policy-based routing
-ppp_generic.txt
- - Information about the generic PPP driver.
-proc_net_tcp.txt
- - Per inode overview of the /proc/net/tcp and /proc/net/tcp6 interfaces.
-radiotap-headers.txt
- - Background on radiotap headers.
-ray_cs.txt
- - Raylink Wireless LAN card driver info.
-rds.txt
- - Background on the reliable, ordered datagram delivery method RDS.
-regulatory.txt
- - Overview of the Linux wireless regulatory infrastructure.
-rxrpc.txt
- - Guide to the RxRPC protocol.
-s2io.txt
- - Release notes for Neterion Xframe I/II 10GbE driver.
-scaling.txt
- - Explanation of network scaling techniques: RSS, RPS, RFS, aRFS, XPS.
-sctp.txt
- - Notes on the Linux kernel implementation of the SCTP protocol.
-secid.txt
- - Explanation of the secid member in flow structures.
-skfp.txt
- - SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info.
-smc9.txt
- - the driver for SMC's 9000 series of Ethernet cards
-spider_net.txt
- - README for the Spidernet Driver (as found in PS3 / Cell BE).
-stmmac.txt
- - README for the STMicro Synopsys Ethernet driver.
-tc-actions-env-rules.txt
- - rules for traffic control (tc) actions.
-timestamping.txt
- - overview of network packet timestamping variants.
-tcp.txt
- - short blurb on how TCP output takes place.
-tcp-thin.txt
- - kernel tuning options for low rate 'thin' TCP streams.
-team.txt
- - pointer to information for ethernet teaming devices.
-tlan.txt
- - ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info.
-tproxy.txt
- - Transparent proxy support user guide.
-tuntap.txt
- - TUN/TAP device driver, allowing user space Rx/Tx of packets.
-udplite.txt
- - UDP-Lite protocol (RFC 3828) introduction.
-vortex.txt
- - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
-vxge.txt
- - README for the Neterion X3100 PCIe Server Adapter.
-vxlan.txt
- - Virtual extensible LAN overview
-x25.txt
- - general info on X.25 development.
-x25-iface.txt
- - description of the X.25 Packet Layer to LAPB device interface.
-xfrm_device.txt
- - description of XFRM offload API
-xfrm_proc.txt
- - description of the statistics package for XFRM.
-xfrm_sync.txt
- - sync patches for XFRM enable migration of an SA between hosts.
-xfrm_sysctl.txt
- - description of the XFRM configuration options.
-z8530drv.txt
- - info about Linux driver for Z8530 based HDLC cards for AX.25
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index ff929cfab4f4..4ae4f9d8f8fe 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -159,8 +159,8 @@ log2(2048) LSB of the addr will be masked off, meaning that 2048, 2050
and 3000 refers to the same chunk.
-UMEM Completetion Ring
-~~~~~~~~~~~~~~~~~~~~~~
+UMEM Completion Ring
+~~~~~~~~~~~~~~~~~~~~
The Completion Ring is used transfer ownership of UMEM frames from
kernel-space to user-space. Just like the Fill ring, UMEM indicies are
diff --git a/Documentation/networking/defza.txt b/Documentation/networking/defza.txt
new file mode 100644
index 000000000000..663e4a906751
--- /dev/null
+++ b/Documentation/networking/defza.txt
@@ -0,0 +1,57 @@
+Notes on the DEC FDDIcontroller 700 (DEFZA-xx) driver v.1.1.4.
+
+
+DEC FDDIcontroller 700 is DEC's first-generation TURBOchannel FDDI
+network card, designed in 1990 specifically for the DECstation 5000
+model 200 workstation. The board is a single attachment station and
+it was manufactured in two variations, both of which are supported.
+
+First is the SAS MMF DEFZA-AA option, the original design implementing
+the standard MMF-PMD, however with a pair of ST connectors rather than
+the usual MIC connector. The other one is the SAS ThinWire/STP DEFZA-CA
+option, denoted 700-C, with the network medium selectable by a switch
+between the DEC proprietary ThinWire-PMD using a BNC connector and the
+standard STP-PMD using a DE-9F connector. This option can interface to
+a DECconcentrator 500 device and, in the case of the STP-PMD, also other
+FDDI equipment and was designed to make it easier to transition from
+existing IEEE 802.3 10BASE2 Ethernet and IEEE 802.5 Token Ring networks
+by providing means to reuse existing cabling.
+
+This driver handles any number of cards installed in a single system.
+They get fddi0, fddi1, etc. interface names assigned in the order of
+increasing TURBOchannel slot numbers.
+
+The board only supports DMA on the receive side. Transmission involves
+the use of PIO. As a result under a heavy transmission load there will
+be a significant impact on system performance.
+
+The board supports a 64-entry CAM for matching destination addresses.
+Two entries are preoccupied by the Directed Beacon and Ring Purger
+multicast addresses and the rest is used as a multicast filter. An
+all-multi mode is also supported for LLC frames and it is used if
+requested explicitly or if the CAM overflows. The promiscuous mode
+supports separate enables for LLC and SMT frames, but this driver
+doesn't support changing them individually.
+
+
+Known problems:
+
+None.
+
+
+To do:
+
+5. MAC address change. The card does not support changing the Media
+ Access Controller's address registers but a similar effect can be
+ achieved by adding an alias to the CAM. There is no way to disable
+ matching against the original address though.
+
+7. Queueing incoming/outgoing SMT frames in the driver if the SMT
+ receive/RMC transmit ring is full. (?)
+
+8. Retrieving/reporting FDDI/SNMP stats.
+
+
+Both success and failure reports are welcome.
+
+Maciej W. Rozycki <macro@linux-mips.org>
diff --git a/Documentation/networking/devlink-params-bnxt.txt b/Documentation/networking/devlink-params-bnxt.txt
new file mode 100644
index 000000000000..481aa303d5b4
--- /dev/null
+++ b/Documentation/networking/devlink-params-bnxt.txt
@@ -0,0 +1,18 @@
+enable_sriov [DEVICE, GENERIC]
+ Configuration mode: Permanent
+
+ignore_ari [DEVICE, GENERIC]
+ Configuration mode: Permanent
+
+msix_vec_per_pf_max [DEVICE, GENERIC]
+ Configuration mode: Permanent
+
+msix_vec_per_pf_min [DEVICE, GENERIC]
+ Configuration mode: Permanent
+
+gre_ver_check [DEVICE, DRIVER-SPECIFIC]
+ Generic Routing Encapsulation (GRE) version check will
+ be enabled in the device. If disabled, device skips
+ version checking for incoming packets.
+ Type: Boolean
+ Configuration mode: Permanent
diff --git a/Documentation/networking/devlink-params.txt b/Documentation/networking/devlink-params.txt
new file mode 100644
index 000000000000..ae444ffe73ac
--- /dev/null
+++ b/Documentation/networking/devlink-params.txt
@@ -0,0 +1,42 @@
+Devlink configuration parameters
+================================
+Following is the list of configuration parameters via devlink interface.
+Each parameter can be generic or driver specific and are device level
+parameters.
+
+Note that the driver-specific files should contain the generic params
+they support to, with supported config modes.
+
+Each parameter can be set in different configuration modes:
+ runtime - set while driver is running, no reset required.
+ driverinit - applied while driver initializes, requires restart
+ driver by devlink reload command.
+ permanent - written to device's non-volatile memory, hard reset
+ required.
+
+Following is the list of parameters:
+====================================
+enable_sriov [DEVICE, GENERIC]
+ Enable Single Root I/O Virtualisation (SRIOV) in
+ the device.
+ Type: Boolean
+
+ignore_ari [DEVICE, GENERIC]
+ Ignore Alternative Routing-ID Interpretation (ARI)
+ capability. If enabled, adapter will ignore ARI
+ capability even when platforms has the support
+ enabled and creates same number of partitions when
+ platform does not support ARI.
+ Type: Boolean
+
+msix_vec_per_pf_max [DEVICE, GENERIC]
+ Provides the maximum number of MSIX interrupts that
+ a device can create. Value is same across all
+ physical functions (PFs) in the device.
+ Type: u32
+
+msix_vec_per_pf_min [DEVICE, GENERIC]
+ Provides the minimum number of MSIX interrupts required
+ for the device initialization. Value is same across all
+ physical functions (PFs) in the device.
+ Type: u32
diff --git a/drivers/staging/fsl-dpaa2/ethernet/ethernet-driver.rst b/Documentation/networking/dpaa2/ethernet-driver.rst
index 90ec940749e8..90ec940749e8 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/ethernet-driver.rst
+++ b/Documentation/networking/dpaa2/ethernet-driver.rst
diff --git a/Documentation/networking/dpaa2/index.rst b/Documentation/networking/dpaa2/index.rst
index 10bea113a7bc..67bd87fe6c53 100644
--- a/Documentation/networking/dpaa2/index.rst
+++ b/Documentation/networking/dpaa2/index.rst
@@ -7,3 +7,4 @@ DPAA2 Documentation
overview
dpio-driver
+ ethernet-driver
diff --git a/Documentation/networking/e100.rst b/Documentation/networking/e100.rst
index f81111eba9c5..5e2839b4ec92 100644
--- a/Documentation/networking/e100.rst
+++ b/Documentation/networking/e100.rst
@@ -1,4 +1,5 @@
-==============================================================
+.. SPDX-License-Identifier: GPL-2.0+
+
Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
==============================================================
diff --git a/Documentation/networking/e1000.rst b/Documentation/networking/e1000.rst
index f10dd4086921..6379d4d20771 100644
--- a/Documentation/networking/e1000.rst
+++ b/Documentation/networking/e1000.rst
@@ -1,4 +1,5 @@
-===========================================================
+.. SPDX-License-Identifier: GPL-2.0+
+
Linux* Base Driver for Intel(R) Ethernet Network Connection
===========================================================
diff --git a/Documentation/networking/e1000e.rst b/Documentation/networking/e1000e.rst
new file mode 100644
index 000000000000..33554e5416c5
--- /dev/null
+++ b/Documentation/networking/e1000e.rst
@@ -0,0 +1,382 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Linux* Driver for Intel(R) Ethernet Network Connection
+======================================================
+
+Intel Gigabit Linux driver.
+Copyright(c) 2008-2018 Intel Corporation.
+
+Contents
+========
+
+- Identifying Your Adapter
+- Command Line Parameters
+- Additional Configurations
+- Support
+
+
+Identifying Your Adapter
+========================
+For information on how to identify your adapter, and for the latest Intel
+network drivers, refer to the Intel Support website:
+https://www.intel.com/support
+
+
+Command Line Parameters
+=======================
+If the driver is built as a module, the following optional parameters are used
+by entering them on the command line with the modprobe command using this
+syntax::
+
+ modprobe e1000e [<option>=<VAL1>,<VAL2>,...]
+
+There needs to be a <VAL#> for each network port in the system supported by
+this driver. The values will be applied to each instance, in function order.
+For example::
+
+ modprobe e1000e InterruptThrottleRate=16000,16000
+
+In this case, there are two network ports supported by e1000e in the system.
+The default value for each parameter is generally the recommended setting,
+unless otherwise noted.
+
+NOTE: A descriptor describes a data buffer and attributes related to the data
+buffer. This information is accessed by the hardware.
+
+InterruptThrottleRate
+---------------------
+:Valid Range: 0,1,3,4,100-100000
+:Default Value: 3
+
+Interrupt Throttle Rate controls the number of interrupts each interrupt
+vector can generate per second. Increasing ITR lowers latency at the cost of
+increased CPU utilization, though it may help throughput in some circumstances.
+
+Setting InterruptThrottleRate to a value greater or equal to 100
+will program the adapter to send out a maximum of that many interrupts
+per second, even if more packets have come in. This reduces interrupt
+load on the system and can lower CPU utilization under heavy load,
+but will increase latency as packets are not processed as quickly.
+
+The default behaviour of the driver previously assumed a static
+InterruptThrottleRate value of 8000, providing a good fallback value for
+all traffic types, but lacking in small packet performance and latency.
+The hardware can handle many more small packets per second however, and
+for this reason an adaptive interrupt moderation algorithm was implemented.
+
+The driver has two adaptive modes (setting 1 or 3) in which
+it dynamically adjusts the InterruptThrottleRate value based on the traffic
+that it receives. After determining the type of incoming traffic in the last
+timeframe, it will adjust the InterruptThrottleRate to an appropriate value
+for that traffic.
+
+The algorithm classifies the incoming traffic every interval into
+classes. Once the class is determined, the InterruptThrottleRate value is
+adjusted to suit that traffic type the best. There are three classes defined:
+"Bulk traffic", for large amounts of packets of normal size; "Low latency",
+for small amounts of traffic and/or a significant percentage of small
+packets; and "Lowest latency", for almost completely small packets or
+minimal traffic.
+
+ - 0: Off
+ Turns off any interrupt moderation and may improve small packet latency.
+ However, this is generally not suitable for bulk throughput traffic due
+ to the increased CPU utilization of the higher interrupt rate.
+ - 1: Dynamic mode
+ This mode attempts to moderate interrupts per vector while maintaining
+ very low latency. This can sometimes cause extra CPU utilization. If
+ planning on deploying e1000e in a latency sensitive environment, this
+ parameter should be considered.
+ - 3: Dynamic Conservative mode (default)
+ In dynamic conservative mode, the InterruptThrottleRate value is set to
+ 4000 for traffic that falls in class "Bulk traffic". If traffic falls in
+ the "Low latency" or "Lowest latency" class, the InterruptThrottleRate is
+ increased stepwise to 20000. This default mode is suitable for most
+ applications.
+ - 4: Simplified Balancing mode
+ In simplified mode the interrupt rate is based on the ratio of TX and
+ RX traffic. If the bytes per second rate is approximately equal, the
+ interrupt rate will drop as low as 2000 interrupts per second. If the
+ traffic is mostly transmit or mostly receive, the interrupt rate could
+ be as high as 8000.
+ - 100-100000:
+ Setting InterruptThrottleRate to a value greater or equal to 100
+ will program the adapter to send at most that many interrupts per second,
+ even if more packets have come in. This reduces interrupt load on the
+ system and can lower CPU utilization under heavy load, but will increase
+ latency as packets are not processed as quickly.
+
+NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and
+RxAbsIntDelay parameters. In other words, minimizing the receive and/or
+transmit absolute delays does not force the controller to generate more
+interrupts than what the Interrupt Throttle Rate allows.
+
+RxIntDelay
+----------
+:Valid Range: 0-65535 (0=off)
+:Default Value: 0
+
+This value delays the generation of receive interrupts in units of 1.024
+microseconds. Receive interrupt reduction can improve CPU efficiency if
+properly tuned for specific network traffic. Increasing this value adds extra
+latency to frame reception and can end up decreasing the throughput of TCP
+traffic. If the system is reporting dropped receives, this value may be set
+too high, causing the driver to run out of available receive descriptors.
+
+CAUTION: When setting RxIntDelay to a value other than 0, adapters may hang
+(stop transmitting) under certain network conditions. If this occurs a NETDEV
+WATCHDOG message is logged in the system event log. In addition, the
+controller is automatically reset, restoring the network connection. To
+eliminate the potential for the hang ensure that RxIntDelay is set to 0.
+
+RxAbsIntDelay
+-------------
+:Valid Range: 0-65535 (0=off)
+:Default Value: 8
+
+This value, in units of 1.024 microseconds, limits the delay in which a
+receive interrupt is generated. This value ensures that an interrupt is
+generated after the initial packet is received within the set amount of time,
+which is useful only if RxIntDelay is non-zero. Proper tuning, along with
+RxIntDelay, may improve traffic throughput in specific network conditions.
+
+TxIntDelay
+----------
+:Valid Range: 0-65535 (0=off)
+:Default Value: 8
+
+This value delays the generation of transmit interrupts in units of 1.024
+microseconds. Transmit interrupt reduction can improve CPU efficiency if
+properly tuned for specific network traffic. If the system is reporting
+dropped transmits, this value may be set too high causing the driver to run
+out of available transmit descriptors.
+
+TxAbsIntDelay
+-------------
+:Valid Range: 0-65535 (0=off)
+:Default Value: 32
+
+This value, in units of 1.024 microseconds, limits the delay in which a
+transmit interrupt is generated. It is useful only if TxIntDelay is non-zero.
+It ensures that an interrupt is generated after the initial Packet is sent on
+the wire within the set amount of time. Proper tuning, along with TxIntDelay,
+may improve traffic throughput in specific network conditions.
+
+copybreak
+---------
+:Valid Range: 0-xxxxxxx (0=off)
+:Default Value: 256
+
+The driver copies all packets below or equaling this size to a fresh receive
+buffer before handing it up the stack.
+This parameter differs from other parameters because it is a single (not 1,1,1
+etc.) parameter applied to all driver instances and it is also available
+during runtime at /sys/module/e1000e/parameters/copybreak.
+
+To use copybreak, type::
+
+ modprobe e1000e.ko copybreak=128
+
+SmartPowerDownEnable
+--------------------
+:Valid Range: 0,1
+:Default Value: 0 (disabled)
+
+Allows the PHY to turn off in lower power states. The user can turn off this
+parameter in supported chipsets.
+
+KumeranLockLoss
+---------------
+:Valid Range: 0,1
+:Default Value: 1 (enabled)
+
+This workaround skips resetting the PHY at shutdown for the initial silicon
+releases of ICH8 systems.
+
+IntMode
+-------
+:Valid Range: 0-2
+:Default Value: 0
+
+ +-------+----------------+
+ | Value | Interrupt Mode |
+ +=======+================+
+ | 0 | Legacy |
+ +-------+----------------+
+ | 1 | MSI |
+ +-------+----------------+
+ | 2 | MSI-X |
+ +-------+----------------+
+
+IntMode allows load time control over the type of interrupt registered for by
+the driver. MSI-X is required for multiple queue support, and some kernels and
+combinations of kernel .config options will force a lower level of interrupt
+support.
+
+This command will show different values for each type of interrupt::
+
+ cat /proc/interrupts
+
+CrcStripping
+------------
+:Valid Range: 0,1
+:Default Value: 1 (enabled)
+
+Strip the CRC from received packets before sending up the network stack. If
+you have a machine with a BMC enabled but cannot receive IPMI traffic after
+loading or enabling the driver, try disabling this feature.
+
+WriteProtectNVM
+---------------
+:Valid Range: 0,1
+:Default Value: 1 (enabled)
+
+If set to 1, configure the hardware to ignore all write/erase cycles to the
+GbE region in the ICHx NVM (in order to prevent accidental corruption of the
+NVM). This feature can be disabled by setting the parameter to 0 during initial
+driver load.
+
+NOTE: The machine must be power cycled (full off/on) when enabling NVM writes
+via setting the parameter to zero. Once the NVM has been locked (via the
+parameter at 1 when the driver loads) it cannot be unlocked except via power
+cycle.
+
+Debug
+-----
+:Valid Range: 0-16 (0=none,...,16=all)
+:Default Value: 0
+
+This parameter adjusts the level of debug messages displayed in the system logs.
+
+
+Additional Features and Configurations
+======================================
+
+Jumbo Frames
+------------
+Jumbo Frames support is enabled by changing the Maximum Transmission Unit (MTU)
+to a value larger than the default value of 1500.
+
+Use the ifconfig command to increase the MTU size. For example, enter the
+following where <x> is the interface number::
+
+ ifconfig eth<x> mtu 9000 up
+
+Alternatively, you can use the ip command as follows::
+
+ ip link set mtu 9000 dev eth<x>
+ ip link set up dev eth<x>
+
+This setting is not saved across reboots. The setting change can be made
+permanent by adding 'MTU=9000' to the file:
+
+- For RHEL: /etc/sysconfig/network-scripts/ifcfg-eth<x>
+- For SLES: /etc/sysconfig/network/<config_file>
+
+NOTE: The maximum MTU setting for Jumbo Frames is 8996. This value coincides
+with the maximum Jumbo Frames size of 9018 bytes.
+
+NOTE: Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
+poor performance or loss of link.
+
+NOTE: The following adapters limit Jumbo Frames sized packets to a maximum of
+4088 bytes:
+
+ - Intel(R) 82578DM Gigabit Network Connection
+ - Intel(R) 82577LM Gigabit Network Connection
+
+The following adapters do not support Jumbo Frames:
+
+ - Intel(R) PRO/1000 Gigabit Server Adapter
+ - Intel(R) PRO/1000 PM Network Connection
+ - Intel(R) 82562G 10/100 Network Connection
+ - Intel(R) 82562G-2 10/100 Network Connection
+ - Intel(R) 82562GT 10/100 Network Connection
+ - Intel(R) 82562GT-2 10/100 Network Connection
+ - Intel(R) 82562V 10/100 Network Connection
+ - Intel(R) 82562V-2 10/100 Network Connection
+ - Intel(R) 82566DC Gigabit Network Connection
+ - Intel(R) 82566DC-2 Gigabit Network Connection
+ - Intel(R) 82566DM Gigabit Network Connection
+ - Intel(R) 82566MC Gigabit Network Connection
+ - Intel(R) 82566MM Gigabit Network Connection
+ - Intel(R) 82567V-3 Gigabit Network Connection
+ - Intel(R) 82577LC Gigabit Network Connection
+ - Intel(R) 82578DC Gigabit Network Connection
+
+NOTE: Jumbo Frames cannot be configured on an 82579-based Network device if
+MACSec is enabled on the system.
+
+
+ethtool
+-------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information. The latest ethtool
+version is required for this functionality. Download it at:
+
+https://www.kernel.org/pub/software/network/ethtool/
+
+NOTE: When validating enable/disable tests on some parts (for example, 82578),
+it is necessary to add a few seconds between tests when working with ethtool.
+
+
+Speed and Duplex Configuration
+------------------------------
+In addressing speed and duplex configuration issues, you need to distinguish
+between copper-based adapters and fiber-based adapters.
+
+In the default mode, an Intel(R) Ethernet Network Adapter using copper
+connections will attempt to auto-negotiate with its link partner to determine
+the best setting. If the adapter cannot establish link with the link partner
+using auto-negotiation, you may need to manually configure the adapter and link
+partner to identical settings to establish link and pass packets. This should
+only be needed when attempting to link with an older switch that does not
+support auto-negotiation or one that has been forced to a specific speed or
+duplex mode. Your link partner must match the setting you choose. 1 Gbps speeds
+and higher cannot be forced. Use the autonegotiation advertising setting to
+manually set devices for 1 Gbps and higher.
+
+Speed, duplex, and autonegotiation advertising are configured through the
+ethtool* utility.
+
+Caution: Only experienced network administrators should force speed and duplex
+or change autonegotiation advertising manually. The settings at the switch must
+always match the adapter settings. Adapter performance may suffer or your
+adapter may not operate if you configure the adapter differently from your
+switch.
+
+An Intel(R) Ethernet Network Adapter using fiber-based connections, however,
+will not attempt to auto-negotiate with its link partner since those adapters
+operate only in full duplex and only at their native speed.
+
+
+Enabling Wake on LAN* (WoL)
+---------------------------
+WoL is configured through the ethtool* utility.
+
+WoL will be enabled on the system during the next shut down or reboot. For
+this driver version, in order to enable WoL, the e1000e driver must be loaded
+prior to shutting down or suspending the system.
+
+NOTE: Wake on LAN is only supported on port A for the following devices:
+- Intel(R) PRO/1000 PT Dual Port Network Connection
+- Intel(R) PRO/1000 PT Dual Port Server Connection
+- Intel(R) PRO/1000 PT Dual Port Server Adapter
+- Intel(R) PRO/1000 PF Dual Port Server Adapter
+- Intel(R) PRO/1000 PT Quad Port Server Adapter
+- Intel(R) Gigabit PT Quad Port Server ExpressModule
+
+
+Support
+=======
+For general information, go to the Intel support website at:
+
+https://www.intel.com/support/
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+https://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on a supported kernel
+with a supported adapter, email the specific information related to the issue
+to e1000-devel@lists.sf.net.
diff --git a/Documentation/networking/e1000e.txt b/Documentation/networking/e1000e.txt
deleted file mode 100644
index 12089547baed..000000000000
--- a/Documentation/networking/e1000e.txt
+++ /dev/null
@@ -1,312 +0,0 @@
-Linux* Driver for Intel(R) Ethernet Network Connection
-======================================================
-
-Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2013 Intel Corporation.
-
-Contents
-========
-
-- Identifying Your Adapter
-- Command Line Parameters
-- Additional Configurations
-- Support
-
-Identifying Your Adapter
-========================
-
-The e1000e driver supports all PCI Express Intel(R) Gigabit Network
-Connections, except those that are 82575, 82576 and 82580-based*.
-
-* NOTE: The Intel(R) PRO/1000 P Dual Port Server Adapter is supported by
- the e1000 driver, not the e1000e driver due to the 82546 part being used
- behind a PCI Express bridge.
-
-For more information on how to identify your adapter, go to the Adapter &
-Driver ID Guide at:
-
- http://support.intel.com/support/go/network/adapter/idguide.htm
-
-For the latest Intel network drivers for Linux, refer to the following
-website. In the search field, enter your adapter name or type, or use the
-networking link on the left to search for your adapter:
-
- http://support.intel.com/support/go/network/adapter/home.htm
-
-Command Line Parameters
-=======================
-
-The default value for each parameter is generally the recommended setting,
-unless otherwise noted.
-
-NOTES: For more information about the InterruptThrottleRate,
- RxIntDelay, TxIntDelay, RxAbsIntDelay, and TxAbsIntDelay
- parameters, see the application note at:
- http://www.intel.com/design/network/applnots/ap450.htm
-
-InterruptThrottleRate
----------------------
-Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
- 4=simplified balancing)
-Default Value: 3
-
-The driver can limit the amount of interrupts per second that the adapter
-will generate for incoming packets. It does this by writing a value to the
-adapter that is based on the maximum amount of interrupts that the adapter
-will generate per second.
-
-Setting InterruptThrottleRate to a value greater or equal to 100
-will program the adapter to send out a maximum of that many interrupts
-per second, even if more packets have come in. This reduces interrupt
-load on the system and can lower CPU utilization under heavy load,
-but will increase latency as packets are not processed as quickly.
-
-The default behaviour of the driver previously assumed a static
-InterruptThrottleRate value of 8000, providing a good fallback value for
-all traffic types, but lacking in small packet performance and latency.
-The hardware can handle many more small packets per second however, and
-for this reason an adaptive interrupt moderation algorithm was implemented.
-
-The driver has two adaptive modes (setting 1 or 3) in which
-it dynamically adjusts the InterruptThrottleRate value based on the traffic
-that it receives. After determining the type of incoming traffic in the last
-timeframe, it will adjust the InterruptThrottleRate to an appropriate value
-for that traffic.
-
-The algorithm classifies the incoming traffic every interval into
-classes. Once the class is determined, the InterruptThrottleRate value is
-adjusted to suit that traffic type the best. There are three classes defined:
-"Bulk traffic", for large amounts of packets of normal size; "Low latency",
-for small amounts of traffic and/or a significant percentage of small
-packets; and "Lowest latency", for almost completely small packets or
-minimal traffic.
-
-In dynamic conservative mode, the InterruptThrottleRate value is set to 4000
-for traffic that falls in class "Bulk traffic". If traffic falls in the "Low
-latency" or "Lowest latency" class, the InterruptThrottleRate is increased
-stepwise to 20000. This default mode is suitable for most applications.
-
-For situations where low latency is vital such as cluster or
-grid computing, the algorithm can reduce latency even more when
-InterruptThrottleRate is set to mode 1. In this mode, which operates
-the same as mode 3, the InterruptThrottleRate will be increased stepwise to
-70000 for traffic in class "Lowest latency".
-
-In simplified mode the interrupt rate is based on the ratio of TX and
-RX traffic. If the bytes per second rate is approximately equal, the
-interrupt rate will drop as low as 2000 interrupts per second. If the
-traffic is mostly transmit or mostly receive, the interrupt rate could
-be as high as 8000.
-
-Setting InterruptThrottleRate to 0 turns off any interrupt moderation
-and may improve small packet latency, but is generally not suitable
-for bulk throughput traffic.
-
-NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and
- RxAbsIntDelay parameters. In other words, minimizing the receive
- and/or transmit absolute delays does not force the controller to
- generate more interrupts than what the Interrupt Throttle Rate
- allows.
-
-NOTE: When e1000e is loaded with default settings and multiple adapters
- are in use simultaneously, the CPU utilization may increase non-
- linearly. In order to limit the CPU utilization without impacting
- the overall throughput, we recommend that you load the driver as
- follows:
-
- modprobe e1000e InterruptThrottleRate=3000,3000,3000
-
- This sets the InterruptThrottleRate to 3000 interrupts/sec for
- the first, second, and third instances of the driver. The range
- of 2000 to 3000 interrupts per second works on a majority of
- systems and is a good starting point, but the optimal value will
- be platform-specific. If CPU utilization is not a concern, use
- RX_POLLING (NAPI) and default driver settings.
-
-RxIntDelay
-----------
-Valid Range: 0-65535 (0=off)
-Default Value: 0
-
-This value delays the generation of receive interrupts in units of 1.024
-microseconds. Receive interrupt reduction can improve CPU efficiency if
-properly tuned for specific network traffic. Increasing this value adds
-extra latency to frame reception and can end up decreasing the throughput
-of TCP traffic. If the system is reporting dropped receives, this value
-may be set too high, causing the driver to run out of available receive
-descriptors.
-
-CAUTION: When setting RxIntDelay to a value other than 0, adapters may
- hang (stop transmitting) under certain network conditions. If
- this occurs a NETDEV WATCHDOG message is logged in the system
- event log. In addition, the controller is automatically reset,
- restoring the network connection. To eliminate the potential
- for the hang ensure that RxIntDelay is set to 0.
-
-RxAbsIntDelay
--------------
-Valid Range: 0-65535 (0=off)
-Default Value: 8
-
-This value, in units of 1.024 microseconds, limits the delay in which a
-receive interrupt is generated. Useful only if RxIntDelay is non-zero,
-this value ensures that an interrupt is generated after the initial
-packet is received within the set amount of time. Proper tuning,
-along with RxIntDelay, may improve traffic throughput in specific network
-conditions.
-
-TxIntDelay
-----------
-Valid Range: 0-65535 (0=off)
-Default Value: 8
-
-This value delays the generation of transmit interrupts in units of
-1.024 microseconds. Transmit interrupt reduction can improve CPU
-efficiency if properly tuned for specific network traffic. If the
-system is reporting dropped transmits, this value may be set too high
-causing the driver to run out of available transmit descriptors.
-
-TxAbsIntDelay
--------------
-Valid Range: 0-65535 (0=off)
-Default Value: 32
-
-This value, in units of 1.024 microseconds, limits the delay in which a
-transmit interrupt is generated. Useful only if TxIntDelay is non-zero,
-this value ensures that an interrupt is generated after the initial
-packet is sent on the wire within the set amount of time. Proper tuning,
-along with TxIntDelay, may improve traffic throughput in specific
-network conditions.
-
-Copybreak
----------
-Valid Range: 0-xxxxxxx (0=off)
-Default Value: 256
-
-Driver copies all packets below or equaling this size to a fresh RX
-buffer before handing it up the stack.
-
-This parameter is different than other parameters, in that it is a
-single (not 1,1,1 etc.) parameter applied to all driver instances and
-it is also available during runtime at
-/sys/module/e1000e/parameters/copybreak
-
-SmartPowerDownEnable
---------------------
-Valid Range: 0-1
-Default Value: 0 (disabled)
-
-Allows PHY to turn off in lower power states. The user can set this parameter
-in supported chipsets.
-
-KumeranLockLoss
----------------
-Valid Range: 0-1
-Default Value: 1 (enabled)
-
-This workaround skips resetting the PHY at shutdown for the initial
-silicon releases of ICH8 systems.
-
-IntMode
--------
-Valid Range: 0-2 (0=legacy, 1=MSI, 2=MSI-X)
-Default Value: 2
-
-Allows changing the interrupt mode at module load time, without requiring a
-recompile. If the driver load fails to enable a specific interrupt mode, the
-driver will try other interrupt modes, from least to most compatible. The
-interrupt order is MSI-X, MSI, Legacy. If specifying MSI (IntMode=1)
-interrupts, only MSI and Legacy will be attempted.
-
-CrcStripping
-------------
-Valid Range: 0-1
-Default Value: 1 (enabled)
-
-Strip the CRC from received packets before sending up the network stack. If
-you have a machine with a BMC enabled but cannot receive IPMI traffic after
-loading or enabling the driver, try disabling this feature.
-
-WriteProtectNVM
----------------
-Valid Range: 0,1
-Default Value: 1
-
-If set to 1, configure the hardware to ignore all write/erase cycles to the
-GbE region in the ICHx NVM (in order to prevent accidental corruption of the
-NVM). This feature can be disabled by setting the parameter to 0 during initial
-driver load.
-NOTE: The machine must be power cycled (full off/on) when enabling NVM writes
-via setting the parameter to zero. Once the NVM has been locked (via the
-parameter at 1 when the driver loads) it cannot be unlocked except via power
-cycle.
-
-Additional Configurations
-=========================
-
- Jumbo Frames
- ------------
- Jumbo Frames support is enabled by changing the MTU to a value larger than
- the default of 1500. Use the ifconfig command to increase the MTU size.
- For example:
-
- ifconfig eth<x> mtu 9000 up
-
- This setting is not saved across reboots.
-
- Notes:
-
- - The maximum MTU setting for Jumbo Frames is 9216. This value coincides
- with the maximum Jumbo Frames size of 9234 bytes.
-
- - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
- poor performance or loss of link.
-
- - Some adapters limit Jumbo Frames sized packets to a maximum of
- 4096 bytes and some adapters do not support Jumbo Frames.
-
- - Jumbo Frames cannot be configured on an 82579-based Network device, if
- MACSec is enabled on the system.
-
- ethtool
- -------
- The driver utilizes the ethtool interface for driver configuration and
- diagnostics, as well as displaying statistical information. We
- strongly recommend downloading the latest version of ethtool at:
-
- https://kernel.org/pub/software/network/ethtool/
-
- NOTE: When validating enable/disable tests on some parts (82578, for example)
- you need to add a few seconds between tests when working with ethtool.
-
- Speed and Duplex
- ----------------
- Speed and Duplex are configured through the ethtool* utility. For
- instructions, refer to the ethtool man page.
-
- Enabling Wake on LAN* (WoL)
- ---------------------------
- WoL is configured through the ethtool* utility. For instructions on
- enabling WoL with ethtool, refer to the ethtool man page.
-
- WoL will be enabled on the system during the next shut down or reboot.
- For this driver version, in order to enable WoL, the e1000e driver must be
- loaded when shutting down or rebooting the system.
-
- In most cases Wake On LAN is only supported on port A for multiple port
- adapters. To verify if a port supports Wake on Lan run ethtool eth<X>.
-
-Support
-=======
-
-For general information, go to the Intel support website at:
-
- www.intel.com/support/
-
-or the Intel Wired Networking project hosted by Sourceforge at:
-
- http://sourceforge.net/projects/e1000
-
-If an issue is identified with the released source code on the supported
-kernel with a supported adapter, email the specific information related
-to the issue to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index e6b4ebb2b243..2196b824e96c 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -203,11 +203,11 @@ opcodes as defined in linux/filter.h stand for:
Instruction Addressing mode Description
- ld 1, 2, 3, 4, 10 Load word into A
+ ld 1, 2, 3, 4, 12 Load word into A
ldi 4 Load word into A
ldh 1, 2 Load half-word into A
ldb 1, 2 Load byte into A
- ldx 3, 4, 5, 10 Load word into X
+ ldx 3, 4, 5, 12 Load word into X
ldxi 4 Load word into X
ldxb 5 Load byte into X
@@ -216,14 +216,14 @@ opcodes as defined in linux/filter.h stand for:
jmp 6 Jump to label
ja 6 Jump to label
- jeq 7, 8 Jump on A == k
- jneq 8 Jump on A != k
- jne 8 Jump on A != k
- jlt 8 Jump on A < k
- jle 8 Jump on A <= k
- jgt 7, 8 Jump on A > k
- jge 7, 8 Jump on A >= k
- jset 7, 8 Jump on A & k
+ jeq 7, 8, 9, 10 Jump on A == <x>
+ jneq 9, 10 Jump on A != <x>
+ jne 9, 10 Jump on A != <x>
+ jlt 9, 10 Jump on A < <x>
+ jle 9, 10 Jump on A <= <x>
+ jgt 7, 8, 9, 10 Jump on A > <x>
+ jge 7, 8, 9, 10 Jump on A >= <x>
+ jset 7, 8, 9, 10 Jump on A & <x>
add 0, 4 A + <x>
sub 0, 4 A - <x>
@@ -240,7 +240,7 @@ opcodes as defined in linux/filter.h stand for:
tax Copy A into X
txa Copy X into A
- ret 4, 9 Return
+ ret 4, 11 Return
The next table shows addressing formats from the 2nd column:
@@ -254,9 +254,11 @@ The next table shows addressing formats from the 2nd column:
5 4*([k]&0xf) Lower nibble * 4 at byte offset k in the packet
6 L Jump label L
7 #k,Lt,Lf Jump to Lt if true, otherwise jump to Lf
- 8 #k,Lt Jump to Lt if predicate is true
- 9 a/%a Accumulator A
- 10 extension BPF extension
+ 8 x/%x,Lt,Lf Jump to Lt if true, otherwise jump to Lf
+ 9 #k,Lt Jump to Lt if predicate is true
+ 10 x/%x,Lt Jump to Lt if predicate is true
+ 11 a/%a Accumulator A
+ 12 extension BPF extension
The Linux kernel also has a couple of BPF extensions that are used along
with the class of load instructions by "overloading" the k argument with
@@ -1125,6 +1127,14 @@ pointer type. The types of pointers describe their base, as follows:
PTR_TO_STACK Frame pointer.
PTR_TO_PACKET skb->data.
PTR_TO_PACKET_END skb->data + headlen; arithmetic forbidden.
+ PTR_TO_SOCKET Pointer to struct bpf_sock_ops, implicitly refcounted.
+ PTR_TO_SOCKET_OR_NULL
+ Either a pointer to a socket, or NULL; socket lookup
+ returns this type, which becomes a PTR_TO_SOCKET when
+ checked != NULL. PTR_TO_SOCKET is reference-counted,
+ so programs must release the reference through the
+ socket release function before the end of the program.
+ Arithmetic on these pointers is forbidden.
However, a pointer may be offset from this base (as a result of pointer
arithmetic), and this is tracked in two parts: the 'fixed offset' and 'variable
offset'. The former is used when an exactly-known value (e.g. an immediate
@@ -1171,6 +1181,13 @@ over the Ethernet header, then reads IHL and addes (IHL * 4), the resulting
pointer will have a variable offset known to be 4n+2 for some n, so adding the 2
bytes (NET_IP_ALIGN) gives a 4-byte alignment and so word-sized accesses through
that pointer are safe.
+The 'id' field is also used on PTR_TO_SOCKET and PTR_TO_SOCKET_OR_NULL, common
+to all copies of the pointer returned from a socket lookup. This has similar
+behaviour to the handling for PTR_TO_MAP_VALUE_OR_NULL->PTR_TO_MAP_VALUE, but
+it also handles reference tracking for the pointer. PTR_TO_SOCKET implicitly
+represents a reference to the corresponding 'struct sock'. To ensure that the
+reference is not leaked, it is imperative to NULL-check the reference and in
+the non-NULL case, and pass the valid reference to the socket release function.
Direct packet access
--------------------
@@ -1444,6 +1461,55 @@ Error:
8: (7a) *(u64 *)(r0 +0) = 1
R0 invalid mem access 'imm'
+Program that performs a socket lookup then sets the pointer to NULL without
+checking it:
+value:
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+Error:
+ 0: (b7) r2 = 0
+ 1: (63) *(u32 *)(r10 -8) = r2
+ 2: (bf) r2 = r10
+ 3: (07) r2 += -8
+ 4: (b7) r3 = 4
+ 5: (b7) r4 = 0
+ 6: (b7) r5 = 0
+ 7: (85) call bpf_sk_lookup_tcp#65
+ 8: (b7) r0 = 0
+ 9: (95) exit
+ Unreleased reference id=1, alloc_insn=7
+
+Program that performs a socket lookup but does not NULL-check the returned
+value:
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
+ BPF_EXIT_INSN(),
+Error:
+ 0: (b7) r2 = 0
+ 1: (63) *(u32 *)(r10 -8) = r2
+ 2: (bf) r2 = r10
+ 3: (07) r2 += -8
+ 4: (b7) r3 = 4
+ 5: (b7) r4 = 0
+ 6: (b7) r5 = 0
+ 7: (85) call bpf_sk_lookup_tcp#65
+ 8: (95) exit
+ Unreleased reference id=1, alloc_insn=7
+
Testing
-------
diff --git a/Documentation/networking/fm10k.rst b/Documentation/networking/fm10k.rst
new file mode 100644
index 000000000000..bf5e5942f28d
--- /dev/null
+++ b/Documentation/networking/fm10k.rst
@@ -0,0 +1,141 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Linux* Base Driver for Intel(R) Ethernet Multi-host Controller
+==============================================================
+
+August 20, 2018
+Copyright(c) 2015-2018 Intel Corporation.
+
+Contents
+========
+- Identifying Your Adapter
+- Additional Configurations
+- Performance Tuning
+- Known Issues
+- Support
+
+Identifying Your Adapter
+========================
+The driver in this release is compatible with devices based on the Intel(R)
+Ethernet Multi-host Controller.
+
+For information on how to identify your adapter, and for the latest Intel
+network drivers, refer to the Intel Support website:
+http://www.intel.com/support
+
+
+Flow Control
+------------
+The Intel(R) Ethernet Switch Host Interface Driver does not support Flow
+Control. It will not send pause frames. This may result in dropped frames.
+
+
+Virtual Functions (VFs)
+-----------------------
+Use sysfs to enable VFs.
+Valid Range: 0-64
+
+For example::
+
+ echo $num_vf_enabled > /sys/class/net/$dev/device/sriov_numvfs //enable VFs
+ echo 0 > /sys/class/net/$dev/device/sriov_numvfs //disable VFs
+
+NOTE: Neither the device nor the driver control how VFs are mapped into config
+space. Bus layout will vary by operating system. On operating systems that
+support it, you can check sysfs to find the mapping.
+
+NOTE: When SR-IOV mode is enabled, hardware VLAN filtering and VLAN tag
+stripping/insertion will remain enabled. Please remove the old VLAN filter
+before the new VLAN filter is added. For example::
+
+ ip link set eth0 vf 0 vlan 100 // set vlan 100 for VF 0
+ ip link set eth0 vf 0 vlan 0 // Delete vlan 100
+ ip link set eth0 vf 0 vlan 200 // set a new vlan 200 for VF 0
+
+
+Additional Features and Configurations
+======================================
+
+Jumbo Frames
+------------
+Jumbo Frames support is enabled by changing the Maximum Transmission Unit (MTU)
+to a value larger than the default value of 1500.
+
+Use the ifconfig command to increase the MTU size. For example, enter the
+following where <x> is the interface number::
+
+ ifconfig eth<x> mtu 9000 up
+
+Alternatively, you can use the ip command as follows::
+
+ ip link set mtu 9000 dev eth<x>
+ ip link set up dev eth<x>
+
+This setting is not saved across reboots. The setting change can be made
+permanent by adding 'MTU=9000' to the file:
+
+- For RHEL: /etc/sysconfig/network-scripts/ifcfg-eth<x>
+- For SLES: /etc/sysconfig/network/<config_file>
+
+NOTE: The maximum MTU setting for Jumbo Frames is 15342. This value coincides
+with the maximum Jumbo Frames size of 15364 bytes.
+
+NOTE: This driver will attempt to use multiple page sized buffers to receive
+each jumbo packet. This should help to avoid buffer starvation issues when
+allocating receive packets.
+
+
+Generic Receive Offload, aka GRO
+--------------------------------
+The driver supports the in-kernel software implementation of GRO. GRO has
+shown that by coalescing Rx traffic into larger chunks of data, CPU
+utilization can be significantly reduced when under large Rx load. GRO is an
+evolution of the previously-used LRO interface. GRO is able to coalesce
+other protocols besides TCP. It's also safe to use with configurations that
+are problematic for LRO, namely bridging and iSCSI.
+
+
+
+Supported ethtool Commands and Options for Filtering
+----------------------------------------------------
+-n --show-nfc
+ Retrieves the receive network flow classification configurations.
+
+rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6
+ Retrieves the hash options for the specified network traffic type.
+
+-N --config-nfc
+ Configures the receive network flow classification.
+
+rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6 m|v|t|s|d|f|n|r
+ Configures the hash options for the specified network traffic type.
+
+- udp4: UDP over IPv4
+- udp6: UDP over IPv6
+- f Hash on bytes 0 and 1 of the Layer 4 header of the rx packet.
+- n Hash on bytes 2 and 3 of the Layer 4 header of the rx packet.
+
+
+Known Issues/Troubleshooting
+============================
+
+Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS under Linux KVM
+---------------------------------------------------------------------------------------
+KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
+includes traditional PCIe devices, as well as SR-IOV-capable devices based on
+the Intel Ethernet Controller XL710.
+
+
+Support
+=======
+For general information, go to the Intel support website at:
+
+https://www.intel.com/support/
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+https://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on a supported kernel
+with a supported adapter, email the specific information related to the issue
+to e1000-devel@lists.sf.net.
diff --git a/Documentation/networking/i40e.rst b/Documentation/networking/i40e.rst
new file mode 100644
index 000000000000..0cc16c525d10
--- /dev/null
+++ b/Documentation/networking/i40e.rst
@@ -0,0 +1,770 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Linux* Base Driver for the Intel(R) Ethernet Controller 700 Series
+==================================================================
+
+Intel 40 Gigabit Linux driver.
+Copyright(c) 1999-2018 Intel Corporation.
+
+Contents
+========
+
+- Overview
+- Identifying Your Adapter
+- Intel(R) Ethernet Flow Director
+- Additional Configurations
+- Known Issues
+- Support
+
+
+Driver information can be obtained using ethtool, lspci, and ifconfig.
+Instructions on updating ethtool can be found in the section Additional
+Configurations later in this document.
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel adapter. All hardware requirements listed apply to use
+with Linux.
+
+
+Identifying Your Adapter
+========================
+The driver is compatible with devices based on the following:
+
+ * Intel(R) Ethernet Controller X710
+ * Intel(R) Ethernet Controller XL710
+ * Intel(R) Ethernet Network Connection X722
+ * Intel(R) Ethernet Controller XXV710
+
+For the best performance, make sure the latest NVM/FW is installed on your
+device.
+
+For information on how to identify your adapter, and for the latest NVM/FW
+images and Intel network drivers, refer to the Intel Support website:
+https://www.intel.com/support
+
+SFP+ and QSFP+ Devices
+----------------------
+For information about supported media, refer to this document:
+https://www.intel.com/content/dam/www/public/us/en/documents/release-notes/xl710-ethernet-controller-feature-matrix.pdf
+
+NOTE: Some adapters based on the Intel(R) Ethernet Controller 700 Series only
+support Intel Ethernet Optics modules. On these adapters, other modules are not
+supported and will not function. In all cases Intel recommends using Intel
+Ethernet Optics; other modules may function but are not validated by Intel.
+Contact Intel for supported media types.
+
+NOTE: For connections based on Intel(R) Ethernet Controller 700 Series, support
+is dependent on your system board. Please see your vendor for details.
+
+NOTE: In systems that do not have adequate airflow to cool the adapter and
+optical modules, you must use high temperature optical modules.
+
+Virtual Functions (VFs)
+-----------------------
+Use sysfs to enable VFs. For example::
+
+ #echo $num_vf_enabled > /sys/class/net/$dev/device/sriov_numvfs #enable VFs
+ #echo 0 > /sys/class/net/$dev/device/sriov_numvfs #disable VFs
+
+For example, the following instructions will configure PF eth0 and the first VF
+on VLAN 10::
+
+ $ ip link set dev eth0 vf 0 vlan 10
+
+VLAN Tag Packet Steering
+------------------------
+Allows you to send all packets with a specific VLAN tag to a particular SR-IOV
+virtual function (VF). Further, this feature allows you to designate a
+particular VF as trusted, and allows that trusted VF to request selective
+promiscuous mode on the Physical Function (PF).
+
+To set a VF as trusted or untrusted, enter the following command in the
+Hypervisor::
+
+ # ip link set dev eth0 vf 1 trust [on|off]
+
+Once the VF is designated as trusted, use the following commands in the VM to
+set the VF to promiscuous mode.
+
+::
+
+ For promiscuous all:
+ #ip link set eth2 promisc on
+ Where eth2 is a VF interface in the VM
+
+ For promiscuous Multicast:
+ #ip link set eth2 allmulticast on
+ Where eth2 is a VF interface in the VM
+
+NOTE: By default, the ethtool priv-flag vf-true-promisc-support is set to
+"off",meaning that promiscuous mode for the VF will be limited. To set the
+promiscuous mode for the VF to true promiscuous and allow the VF to see all
+ingress traffic, use the following command::
+
+ #ethtool -set-priv-flags p261p1 vf-true-promisc-support on
+
+The vf-true-promisc-support priv-flag does not enable promiscuous mode; rather,
+it designates which type of promiscuous mode (limited or true) you will get
+when you enable promiscuous mode using the ip link commands above. Note that
+this is a global setting that affects the entire device. However,the
+vf-true-promisc-support priv-flag is only exposed to the first PF of the
+device. The PF remains in limited promiscuous mode (unless it is in MFP mode)
+regardless of the vf-true-promisc-support setting.
+
+Now add a VLAN interface on the VF interface::
+
+ #ip link add link eth2 name eth2.100 type vlan id 100
+
+Note that the order in which you set the VF to promiscuous mode and add the
+VLAN interface does not matter (you can do either first). The end result in
+this example is that the VF will get all traffic that is tagged with VLAN 100.
+
+Intel(R) Ethernet Flow Director
+-------------------------------
+The Intel Ethernet Flow Director performs the following tasks:
+
+- Directs receive packets according to their flows to different queues.
+- Enables tight control on routing a flow in the platform.
+- Matches flows and CPU cores for flow affinity.
+- Supports multiple parameters for flexible flow classification and load
+ balancing (in SFP mode only).
+
+NOTE: The Linux i40e driver supports the following flow types: IPv4, TCPv4, and
+UDPv4. For a given flow type, it supports valid combinations of IP addresses
+(source or destination) and UDP/TCP ports (source and destination). For
+example, you can supply only a source IP address, a source IP address and a
+destination port, or any combination of one or more of these four parameters.
+
+NOTE: The Linux i40e driver allows you to filter traffic based on a
+user-defined flexible two-byte pattern and offset by using the ethtool user-def
+and mask fields. Only L3 and L4 flow types are supported for user-defined
+flexible filters. For a given flow type, you must clear all Intel Ethernet Flow
+Director filters before changing the input set (for that flow type).
+
+To enable or disable the Intel Ethernet Flow Director::
+
+ # ethtool -K ethX ntuple <on|off>
+
+When disabling ntuple filters, all the user programmed filters are flushed from
+the driver cache and hardware. All needed filters must be re-added when ntuple
+is re-enabled.
+
+To add a filter that directs packet to queue 2, use -U or -N switch::
+
+ # ethtool -N ethX flow-type tcp4 src-ip 192.168.10.1 dst-ip \
+ 192.168.10.2 src-port 2000 dst-port 2001 action 2 [loc 1]
+
+To set a filter using only the source and destination IP address::
+
+ # ethtool -N ethX flow-type tcp4 src-ip 192.168.10.1 dst-ip \
+ 192.168.10.2 action 2 [loc 1]
+
+To see the list of filters currently present::
+
+ # ethtool <-u|-n> ethX
+
+Application Targeted Routing (ATR) Perfect Filters
+--------------------------------------------------
+ATR is enabled by default when the kernel is in multiple transmit queue mode.
+An ATR Intel Ethernet Flow Director filter rule is added when a TCP-IP flow
+starts and is deleted when the flow ends. When a TCP-IP Intel Ethernet Flow
+Director rule is added from ethtool (Sideband filter), ATR is turned off by the
+driver. To re-enable ATR, the sideband can be disabled with the ethtool -K
+option. For example::
+
+ ethtool –K [adapter] ntuple [off|on]
+
+If sideband is re-enabled after ATR is re-enabled, ATR remains enabled until a
+TCP-IP flow is added. When all TCP-IP sideband rules are deleted, ATR is
+automatically re-enabled.
+
+Packets that match the ATR rules are counted in fdir_atr_match stats in
+ethtool, which also can be used to verify whether ATR rules still exist.
+
+Sideband Perfect Filters
+------------------------
+Sideband Perfect Filters are used to direct traffic that matches specified
+characteristics. They are enabled through ethtool's ntuple interface. To add a
+new filter use the following command::
+
+ ethtool -U <device> flow-type <type> src-ip <ip> dst-ip <ip> src-port <port> \
+ dst-port <port> action <queue>
+
+Where:
+ <device> - the ethernet device to program
+ <type> - can be ip4, tcp4, udp4, or sctp4
+ <ip> - the ip address to match on
+ <port> - the port number to match on
+ <queue> - the queue to direct traffic towards (-1 discards matching traffic)
+
+Use the following command to display all of the active filters::
+
+ ethtool -u <device>
+
+Use the following command to delete a filter::
+
+ ethtool -U <device> delete <N>
+
+Where <N> is the filter id displayed when printing all the active filters, and
+may also have been specified using "loc <N>" when adding the filter.
+
+The following example matches TCP traffic sent from 192.168.0.1, port 5300,
+directed to 192.168.0.5, port 80, and sends it to queue 7::
+
+ ethtool -U enp130s0 flow-type tcp4 src-ip 192.168.0.1 dst-ip 192.168.0.5 \
+ src-port 5300 dst-port 80 action 7
+
+For each flow-type, the programmed filters must all have the same matching
+input set. For example, issuing the following two commands is acceptable::
+
+ ethtool -U enp130s0 flow-type ip4 src-ip 192.168.0.1 src-port 5300 action 7
+ ethtool -U enp130s0 flow-type ip4 src-ip 192.168.0.5 src-port 55 action 10
+
+Issuing the next two commands, however, is not acceptable, since the first
+specifies src-ip and the second specifies dst-ip::
+
+ ethtool -U enp130s0 flow-type ip4 src-ip 192.168.0.1 src-port 5300 action 7
+ ethtool -U enp130s0 flow-type ip4 dst-ip 192.168.0.5 src-port 55 action 10
+
+The second command will fail with an error. You may program multiple filters
+with the same fields, using different values, but, on one device, you may not
+program two tcp4 filters with different matching fields.
+
+Matching on a sub-portion of a field is not supported by the i40e driver, thus
+partial mask fields are not supported.
+
+The driver also supports matching user-defined data within the packet payload.
+This flexible data is specified using the "user-def" field of the ethtool
+command in the following way:
+
++----------------------------+--------------------------+
+| 31 28 24 20 16 | 15 12 8 4 0 |
++----------------------------+--------------------------+
+| offset into packet payload | 2 bytes of flexible data |
++----------------------------+--------------------------+
+
+For example,
+
+::
+
+ ... user-def 0x4FFFF ...
+
+tells the filter to look 4 bytes into the payload and match that value against
+0xFFFF. The offset is based on the beginning of the payload, and not the
+beginning of the packet. Thus
+
+::
+
+ flow-type tcp4 ... user-def 0x8BEAF ...
+
+would match TCP/IPv4 packets which have the value 0xBEAF 8 bytes into the
+TCP/IPv4 payload.
+
+Note that ICMP headers are parsed as 4 bytes of header and 4 bytes of payload.
+Thus to match the first byte of the payload, you must actually add 4 bytes to
+the offset. Also note that ip4 filters match both ICMP frames as well as raw
+(unknown) ip4 frames, where the payload will be the L3 payload of the IP4 frame.
+
+The maximum offset is 64. The hardware will only read up to 64 bytes of data
+from the payload. The offset must be even because the flexible data is 2 bytes
+long and must be aligned to byte 0 of the packet payload.
+
+The user-defined flexible offset is also considered part of the input set and
+cannot be programmed separately for multiple filters of the same type. However,
+the flexible data is not part of the input set and multiple filters may use the
+same offset but match against different data.
+
+To create filters that direct traffic to a specific Virtual Function, use the
+"action" parameter. Specify the action as a 64 bit value, where the lower 32
+bits represents the queue number, while the next 8 bits represent which VF.
+Note that 0 is the PF, so the VF identifier is offset by 1. For example::
+
+ ... action 0x800000002 ...
+
+specifies to direct traffic to Virtual Function 7 (8 minus 1) into queue 2 of
+that VF.
+
+Note that these filters will not break internal routing rules, and will not
+route traffic that otherwise would not have been sent to the specified Virtual
+Function.
+
+Setting the link-down-on-close Private Flag
+-------------------------------------------
+When the link-down-on-close private flag is set to "on", the port's link will
+go down when the interface is brought down using the ifconfig ethX down command.
+
+Use ethtool to view and set link-down-on-close, as follows::
+
+ ethtool --show-priv-flags ethX
+ ethtool --set-priv-flags ethX link-down-on-close [on|off]
+
+Viewing Link Messages
+---------------------
+Link messages will not be displayed to the console if the distribution is
+restricting system messages. In order to see network driver link messages on
+your console, set dmesg to eight by entering the following::
+
+ dmesg -n 8
+
+NOTE: This setting is not saved across reboots.
+
+Jumbo Frames
+------------
+Jumbo Frames support is enabled by changing the Maximum Transmission Unit (MTU)
+to a value larger than the default value of 1500.
+
+Use the ifconfig command to increase the MTU size. For example, enter the
+following where <x> is the interface number::
+
+ ifconfig eth<x> mtu 9000 up
+
+Alternatively, you can use the ip command as follows::
+
+ ip link set mtu 9000 dev eth<x>
+ ip link set up dev eth<x>
+
+This setting is not saved across reboots. The setting change can be made
+permanent by adding 'MTU=9000' to the file::
+
+ /etc/sysconfig/network-scripts/ifcfg-eth<x> // for RHEL
+ /etc/sysconfig/network/<config_file> // for SLES
+
+NOTE: The maximum MTU setting for Jumbo Frames is 9702. This value coincides
+with the maximum Jumbo Frames size of 9728 bytes.
+
+NOTE: This driver will attempt to use multiple page sized buffers to receive
+each jumbo packet. This should help to avoid buffer starvation issues when
+allocating receive packets.
+
+ethtool
+-------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information. The latest ethtool
+version is required for this functionality. Download it at:
+https://www.kernel.org/pub/software/network/ethtool/
+
+Supported ethtool Commands and Options for Filtering
+----------------------------------------------------
+-n --show-nfc
+ Retrieves the receive network flow classification configurations.
+
+rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6
+ Retrieves the hash options for the specified network traffic type.
+
+-N --config-nfc
+ Configures the receive network flow classification.
+
+rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6 m|v|t|s|d|f|n|r...
+ Configures the hash options for the specified network traffic type.
+
+udp4 UDP over IPv4
+udp6 UDP over IPv6
+
+f Hash on bytes 0 and 1 of the Layer 4 header of the Rx packet.
+n Hash on bytes 2 and 3 of the Layer 4 header of the Rx packet.
+
+Speed and Duplex Configuration
+------------------------------
+In addressing speed and duplex configuration issues, you need to distinguish
+between copper-based adapters and fiber-based adapters.
+
+In the default mode, an Intel(R) Ethernet Network Adapter using copper
+connections will attempt to auto-negotiate with its link partner to determine
+the best setting. If the adapter cannot establish link with the link partner
+using auto-negotiation, you may need to manually configure the adapter and link
+partner to identical settings to establish link and pass packets. This should
+only be needed when attempting to link with an older switch that does not
+support auto-negotiation or one that has been forced to a specific speed or
+duplex mode. Your link partner must match the setting you choose. 1 Gbps speeds
+and higher cannot be forced. Use the autonegotiation advertising setting to
+manually set devices for 1 Gbps and higher.
+
+NOTE: You cannot set the speed for devices based on the Intel(R) Ethernet
+Network Adapter XXV710 based devices.
+
+Speed, duplex, and autonegotiation advertising are configured through the
+ethtool* utility.
+
+Caution: Only experienced network administrators should force speed and duplex
+or change autonegotiation advertising manually. The settings at the switch must
+always match the adapter settings. Adapter performance may suffer or your
+adapter may not operate if you configure the adapter differently from your
+switch.
+
+An Intel(R) Ethernet Network Adapter using fiber-based connections, however,
+will not attempt to auto-negotiate with its link partner since those adapters
+operate only in full duplex and only at their native speed.
+
+NAPI
+----
+NAPI (Rx polling mode) is supported in the i40e driver.
+For more information on NAPI, see
+https://wiki.linuxfoundation.org/networking/napi
+
+Flow Control
+------------
+Ethernet Flow Control (IEEE 802.3x) can be configured with ethtool to enable
+receiving and transmitting pause frames for i40e. When transmit is enabled,
+pause frames are generated when the receive packet buffer crosses a predefined
+threshold. When receive is enabled, the transmit unit will halt for the time
+delay specified when a pause frame is received.
+
+NOTE: You must have a flow control capable link partner.
+
+Flow Control is on by default.
+
+Use ethtool to change the flow control settings.
+
+To enable or disable Rx or Tx Flow Control::
+
+ ethtool -A eth? rx <on|off> tx <on|off>
+
+Note: This command only enables or disables Flow Control if auto-negotiation is
+disabled. If auto-negotiation is enabled, this command changes the parameters
+used for auto-negotiation with the link partner.
+
+To enable or disable auto-negotiation::
+
+ ethtool -s eth? autoneg <on|off>
+
+Note: Flow Control auto-negotiation is part of link auto-negotiation. Depending
+on your device, you may not be able to change the auto-negotiation setting.
+
+RSS Hash Flow
+-------------
+Allows you to set the hash bytes per flow type and any combination of one or
+more options for Receive Side Scaling (RSS) hash byte configuration.
+
+::
+
+ # ethtool -N <dev> rx-flow-hash <type> <option>
+
+Where <type> is:
+ tcp4 signifying TCP over IPv4
+ udp4 signifying UDP over IPv4
+ tcp6 signifying TCP over IPv6
+ udp6 signifying UDP over IPv6
+And <option> is one or more of:
+ s Hash on the IP source address of the Rx packet.
+ d Hash on the IP destination address of the Rx packet.
+ f Hash on bytes 0 and 1 of the Layer 4 header of the Rx packet.
+ n Hash on bytes 2 and 3 of the Layer 4 header of the Rx packet.
+
+MAC and VLAN anti-spoofing feature
+----------------------------------
+When a malicious driver attempts to send a spoofed packet, it is dropped by the
+hardware and not transmitted.
+NOTE: This feature can be disabled for a specific Virtual Function (VF)::
+
+ ip link set <pf dev> vf <vf id> spoofchk {off|on}
+
+IEEE 1588 Precision Time Protocol (PTP) Hardware Clock (PHC)
+------------------------------------------------------------
+Precision Time Protocol (PTP) is used to synchronize clocks in a computer
+network. PTP support varies among Intel devices that support this driver. Use
+"ethtool -T <netdev name>" to get a definitive list of PTP capabilities
+supported by the device.
+
+IEEE 802.1ad (QinQ) Support
+---------------------------
+The IEEE 802.1ad standard, informally known as QinQ, allows for multiple VLAN
+IDs within a single Ethernet frame. VLAN IDs are sometimes referred to as
+"tags," and multiple VLAN IDs are thus referred to as a "tag stack." Tag stacks
+allow L2 tunneling and the ability to segregate traffic within a particular
+VLAN ID, among other uses.
+
+The following are examples of how to configure 802.1ad (QinQ)::
+
+ ip link add link eth0 eth0.24 type vlan proto 802.1ad id 24
+ ip link add link eth0.24 eth0.24.371 type vlan proto 802.1Q id 371
+
+Where "24" and "371" are example VLAN IDs.
+
+NOTES:
+ Receive checksum offloads, cloud filters, and VLAN acceleration are not
+ supported for 802.1ad (QinQ) packets.
+
+VXLAN and GENEVE Overlay HW Offloading
+--------------------------------------
+Virtual Extensible LAN (VXLAN) allows you to extend an L2 network over an L3
+network, which may be useful in a virtualized or cloud environment. Some
+Intel(R) Ethernet Network devices perform VXLAN processing, offloading it from
+the operating system. This reduces CPU utilization.
+
+VXLAN offloading is controlled by the Tx and Rx checksum offload options
+provided by ethtool. That is, if Tx checksum offload is enabled, and the
+adapter has the capability, VXLAN offloading is also enabled.
+
+Support for VXLAN and GENEVE HW offloading is dependent on kernel support of
+the HW offloading features.
+
+Multiple Functions per Port
+---------------------------
+Some adapters based on the Intel Ethernet Controller X710/XL710 support
+multiple functions on a single physical port. Configure these functions through
+the System Setup/BIOS.
+
+Minimum TX Bandwidth is the guaranteed minimum data transmission bandwidth, as
+a percentage of the full physical port link speed, that the partition will
+receive. The bandwidth the partition is awarded will never fall below the level
+you specify.
+
+The range for the minimum bandwidth values is:
+1 to ((100 minus # of partitions on the physical port) plus 1)
+For example, if a physical port has 4 partitions, the range would be:
+1 to ((100 - 4) + 1 = 97)
+
+The Maximum Bandwidth percentage represents the maximum transmit bandwidth
+allocated to the partition as a percentage of the full physical port link
+speed. The accepted range of values is 1-100. The value is used as a limiter,
+should you chose that any one particular function not be able to consume 100%
+of a port's bandwidth (should it be available). The sum of all the values for
+Maximum Bandwidth is not restricted, because no more than 100% of a port's
+bandwidth can ever be used.
+
+NOTE: X710/XXV710 devices fail to enable Max VFs (64) when Multiple Functions
+per Port (MFP) and SR-IOV are enabled. An error from i40e is logged that says
+"add vsi failed for VF N, aq_err 16". To workaround the issue, enable less than
+64 virtual functions (VFs).
+
+Data Center Bridging (DCB)
+--------------------------
+DCB is a configuration Quality of Service implementation in hardware. It uses
+the VLAN priority tag (802.1p) to filter traffic. That means that there are 8
+different priorities that traffic can be filtered into. It also enables
+priority flow control (802.1Qbb) which can limit or eliminate the number of
+dropped packets during network stress. Bandwidth can be allocated to each of
+these priorities, which is enforced at the hardware level (802.1Qaz).
+
+Adapter firmware implements LLDP and DCBX protocol agents as per 802.1AB and
+802.1Qaz respectively. The firmware based DCBX agent runs in willing mode only
+and can accept settings from a DCBX capable peer. Software configuration of
+DCBX parameters via dcbtool/lldptool are not supported.
+
+NOTE: Firmware LLDP can be disabled by setting the private flag disable-fw-lldp.
+
+The i40e driver implements the DCB netlink interface layer to allow user-space
+to communicate with the driver and query DCB configuration for the port.
+
+NOTE:
+The kernel assumes that TC0 is available, and will disable Priority Flow
+Control (PFC) on the device if TC0 is not available. To fix this, ensure TC0 is
+enabled when setting up DCB on your switch.
+
+Interrupt Rate Limiting
+-----------------------
+:Valid Range: 0-235 (0=no limit)
+
+The Intel(R) Ethernet Controller XL710 family supports an interrupt rate
+limiting mechanism. The user can control, via ethtool, the number of
+microseconds between interrupts.
+
+Syntax::
+
+ # ethtool -C ethX rx-usecs-high N
+
+The range of 0-235 microseconds provides an effective range of 4,310 to 250,000
+interrupts per second. The value of rx-usecs-high can be set independently of
+rx-usecs and tx-usecs in the same ethtool command, and is also independent of
+the adaptive interrupt moderation algorithm. The underlying hardware supports
+granularity in 4-microsecond intervals, so adjacent values may result in the
+same interrupt rate.
+
+One possible use case is the following::
+
+ # ethtool -C ethX adaptive-rx off adaptive-tx off rx-usecs-high 20 rx-usecs \
+ 5 tx-usecs 5
+
+The above command would disable adaptive interrupt moderation, and allow a
+maximum of 5 microseconds before indicating a receive or transmit was complete.
+However, instead of resulting in as many as 200,000 interrupts per second, it
+limits total interrupts per second to 50,000 via the rx-usecs-high parameter.
+
+Performance Optimization
+========================
+Driver defaults are meant to fit a wide variety of workloads, but if further
+optimization is required we recommend experimenting with the following settings.
+
+NOTE: For better performance when processing small (64B) frame sizes, try
+enabling Hyper threading in the BIOS in order to increase the number of logical
+cores in the system and subsequently increase the number of queues available to
+the adapter.
+
+Virtualized Environments
+------------------------
+1. Disable XPS on both ends by using the included virt_perf_default script
+or by running the following command as root::
+
+ for file in `ls /sys/class/net/<ethX>/queues/tx-*/xps_cpus`;
+ do echo 0 > $file; done
+
+2. Using the appropriate mechanism (vcpupin) in the vm, pin the cpu's to
+individual lcpu's, making sure to use a set of cpu's included in the
+device's local_cpulist: /sys/class/net/<ethX>/device/local_cpulist.
+
+3. Configure as many Rx/Tx queues in the VM as available. Do not rely on
+the default setting of 1.
+
+
+Non-virtualized Environments
+----------------------------
+Pin the adapter's IRQs to specific cores by disabling the irqbalance service
+and using the included set_irq_affinity script. Please see the script's help
+text for further options.
+
+- The following settings will distribute the IRQs across all the cores evenly::
+
+ # scripts/set_irq_affinity -x all <interface1> , [ <interface2>, ... ]
+
+- The following settings will distribute the IRQs across all the cores that are
+ local to the adapter (same NUMA node)::
+
+ # scripts/set_irq_affinity -x local <interface1> ,[ <interface2>, ... ]
+
+For very CPU intensive workloads, we recommend pinning the IRQs to all cores.
+
+For IP Forwarding: Disable Adaptive ITR and lower Rx and Tx interrupts per
+queue using ethtool.
+
+- Setting rx-usecs and tx-usecs to 125 will limit interrupts to about 8000
+ interrupts per second per queue.
+
+::
+
+ # ethtool -C <interface> adaptive-rx off adaptive-tx off rx-usecs 125 \
+ tx-usecs 125
+
+For lower CPU utilization: Disable Adaptive ITR and lower Rx and Tx interrupts
+per queue using ethtool.
+
+- Setting rx-usecs and tx-usecs to 250 will limit interrupts to about 4000
+ interrupts per second per queue.
+
+::
+
+ # ethtool -C <interface> adaptive-rx off adaptive-tx off rx-usecs 250 \
+ tx-usecs 250
+
+For lower latency: Disable Adaptive ITR and ITR by setting Rx and Tx to 0 using
+ethtool.
+
+::
+
+ # ethtool -C <interface> adaptive-rx off adaptive-tx off rx-usecs 0 \
+ tx-usecs 0
+
+Application Device Queues (ADq)
+-------------------------------
+Application Device Queues (ADq) allows you to dedicate one or more queues to a
+specific application. This can reduce latency for the specified application,
+and allow Tx traffic to be rate limited per application. Follow the steps below
+to set ADq.
+
+1. Create traffic classes (TCs). Maximum of 8 TCs can be created per interface.
+The shaper bw_rlimit parameter is optional.
+
+Example: Sets up two tcs, tc0 and tc1, with 16 queues each and max tx rate set
+to 1Gbit for tc0 and 3Gbit for tc1.
+
+::
+
+ # tc qdisc add dev <interface> root mqprio num_tc 2 map 0 0 0 0 1 1 1 1
+ queues 16@0 16@16 hw 1 mode channel shaper bw_rlimit min_rate 1Gbit 2Gbit
+ max_rate 1Gbit 3Gbit
+
+map: priority mapping for up to 16 priorities to tcs (e.g. map 0 0 0 0 1 1 1 1
+sets priorities 0-3 to use tc0 and 4-7 to use tc1)
+
+queues: for each tc, <num queues>@<offset> (e.g. queues 16@0 16@16 assigns
+16 queues to tc0 at offset 0 and 16 queues to tc1 at offset 16. Max total
+number of queues for all tcs is 64 or number of cores, whichever is lower.)
+
+hw 1 mode channel: ‘channel’ with ‘hw’ set to 1 is a new new hardware
+offload mode in mqprio that makes full use of the mqprio options, the
+TCs, the queue configurations, and the QoS parameters.
+
+shaper bw_rlimit: for each tc, sets minimum and maximum bandwidth rates.
+Totals must be equal or less than port speed.
+
+For example: min_rate 1Gbit 3Gbit: Verify bandwidth limit using network
+monitoring tools such as ifstat or sar –n DEV [interval] [number of samples]
+
+2. Enable HW TC offload on interface::
+
+ # ethtool -K <interface> hw-tc-offload on
+
+3. Apply TCs to ingress (RX) flow of interface::
+
+ # tc qdisc add dev <interface> ingress
+
+NOTES:
+ - Run all tc commands from the iproute2 <pathtoiproute2>/tc/ directory.
+ - ADq is not compatible with cloud filters.
+ - Setting up channels via ethtool (ethtool -L) is not supported when the
+ TCs are configured using mqprio.
+ - You must have iproute2 latest version
+ - NVM version 6.01 or later is required.
+ - ADq cannot be enabled when any the following features are enabled: Data
+ Center Bridging (DCB), Multiple Functions per Port (MFP), or Sideband
+ Filters.
+ - If another driver (for example, DPDK) has set cloud filters, you cannot
+ enable ADq.
+ - Tunnel filters are not supported in ADq. If encapsulated packets do
+ arrive in non-tunnel mode, filtering will be done on the inner headers.
+ For example, for VXLAN traffic in non-tunnel mode, PCTYPE is identified
+ as a VXLAN encapsulated packet, outer headers are ignored. Therefore,
+ inner headers are matched.
+ - If a TC filter on a PF matches traffic over a VF (on the PF), that
+ traffic will be routed to the appropriate queue of the PF, and will
+ not be passed on the VF. Such traffic will end up getting dropped higher
+ up in the TCP/IP stack as it does not match PF address data.
+ - If traffic matches multiple TC filters that point to different TCs,
+ that traffic will be duplicated and sent to all matching TC queues.
+ The hardware switch mirrors the packet to a VSI list when multiple
+ filters are matched.
+
+
+Known Issues/Troubleshooting
+============================
+
+NOTE: 1 Gb devices based on the Intel(R) Ethernet Network Connection X722 do
+not support the following features:
+
+ * Data Center Bridging (DCB)
+ * QOS
+ * VMQ
+ * SR-IOV
+ * Task Encapsulation offload (VXLAN, NVGRE)
+ * Energy Efficient Ethernet (EEE)
+ * Auto-media detect
+
+Unexpected Issues when the device driver and DPDK share a device
+----------------------------------------------------------------
+Unexpected issues may result when an i40e device is in multi driver mode and
+the kernel driver and DPDK driver are sharing the device. This is because
+access to the global NIC resources is not synchronized between multiple
+drivers. Any change to the global NIC configuration (writing to a global
+register, setting global configuration by AQ, or changing switch modes) will
+affect all ports and drivers on the device. Loading DPDK with the
+"multi-driver" module parameter may mitigate some of the issues.
+
+TC0 must be enabled when setting up DCB on a switch
+---------------------------------------------------
+The kernel assumes that TC0 is available, and will disable Priority Flow
+Control (PFC) on the device if TC0 is not available. To fix this, ensure TC0 is
+enabled when setting up DCB on your switch.
+
+
+Support
+=======
+For general information, go to the Intel support website at:
+
+https://www.intel.com/support/
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+https://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on a supported kernel
+with a supported adapter, email the specific information related to the issue
+to e1000-devel@lists.sf.net.
diff --git a/Documentation/networking/i40e.txt b/Documentation/networking/i40e.txt
deleted file mode 100644
index c2d6e1824b29..000000000000
--- a/Documentation/networking/i40e.txt
+++ /dev/null
@@ -1,190 +0,0 @@
-Linux Base Driver for the Intel(R) Ethernet Controller XL710 Family
-===================================================================
-
-Intel i40e Linux driver.
-Copyright(c) 2013 Intel Corporation.
-
-Contents
-========
-
-- Identifying Your Adapter
-- Additional Configurations
-- Performance Tuning
-- Known Issues
-- Support
-
-
-Identifying Your Adapter
-========================
-
-The driver in this release is compatible with the Intel Ethernet
-Controller XL710 Family.
-
-For more information on how to identify your adapter, go to the Adapter &
-Driver ID Guide at:
-
- http://support.intel.com/support/network/sb/CS-012904.htm
-
-
-Enabling the driver
-===================
-
-The driver is enabled via the standard kernel configuration system,
-using the make command:
-
- make config/oldconfig/menuconfig/etc.
-
-The driver is located in the menu structure at:
-
- -> Device Drivers
- -> Network device support (NETDEVICES [=y])
- -> Ethernet driver support
- -> Intel devices
- -> Intel(R) Ethernet Controller XL710 Family
-
-Additional Configurations
-=========================
-
- Generic Receive Offload (GRO)
- -----------------------------
- The driver supports the in-kernel software implementation of GRO. GRO has
- shown that by coalescing Rx traffic into larger chunks of data, CPU
- utilization can be significantly reduced when under large Rx load. GRO is
- an evolution of the previously-used LRO interface. GRO is able to coalesce
- other protocols besides TCP. It's also safe to use with configurations that
- are problematic for LRO, namely bridging and iSCSI.
-
- Ethtool
- -------
- The driver utilizes the ethtool interface for driver configuration and
- diagnostics, as well as displaying statistical information. The latest
- ethtool version is required for this functionality.
-
- The latest release of ethtool can be found from
- https://www.kernel.org/pub/software/network/ethtool
-
-
- Flow Director n-ntuple traffic filters (FDir)
- ---------------------------------------------
- The driver utilizes the ethtool interface for configuring ntuple filters,
- via "ethtool -N <device> <filter>".
-
- The sctp4, ip4, udp4, and tcp4 flow types are supported with the standard
- fields including src-ip, dst-ip, src-port and dst-port. The driver only
- supports fully enabling or fully masking the fields, so use of the mask
- fields for partial matches is not supported.
-
- Additionally, the driver supports using the action to specify filters for a
- Virtual Function. You can specify the action as a 64bit value, where the
- lower 32 bits represents the queue number, while the next 8 bits represent
- which VF. Note that 0 is the PF, so the VF identifier is offset by 1. For
- example:
-
- ... action 0x800000002 ...
-
- Would indicate to direct traffic for Virtual Function 7 (8 minus 1) on queue
- 2 of that VF.
-
- The driver also supports using the user-defined field to specify 2 bytes of
- arbitrary data to match within the packet payload in addition to the regular
- fields. The data is specified in the lower 32bits of the user-def field in
- the following way:
-
- +----------------------------+---------------------------+
- | 31 28 24 20 16 | 15 12 8 4 0|
- +----------------------------+---------------------------+
- | offset into packet payload | 2 bytes of flexible data |
- +----------------------------+---------------------------+
-
- As an example,
-
- ... user-def 0x4FFFF ....
-
- means to match the value 0xFFFF 4 bytes into the packet payload. Note that
- the offset is based on the beginning of the payload, and not the beginning
- of the packet. Thus
-
- flow-type tcp4 ... user-def 0x8BEAF ....
-
- would match TCP/IPv4 packets which have the value 0xBEAF 8bytes into the
- TCP/IPv4 payload.
-
- For ICMP, the hardware parses the ICMP header as 4 bytes of header and 4
- bytes of payload, so if you want to match an ICMP frames payload you may need
- to add 4 to the offset in order to match the data.
-
- Furthermore, the offset can only be up to a value of 64, as the hardware
- will only read up to 64 bytes of data from the payload. It must also be even
- as the flexible data is 2 bytes long and must be aligned to byte 0 of the
- packet payload.
-
- When programming filters, the hardware is limited to using a single input
- set for each flow type. This means that it is an error to program two
- different filters with the same type that don't match on the same fields.
- Thus the second of the following two commands will fail:
-
- ethtool -N <device> flow-type tcp4 src-ip 192.168.0.7 action 5
- ethtool -N <device> flow-type tcp4 dst-ip 192.168.15.18 action 1
-
- This is because the first filter will be accepted and reprogram the input
- set for TCPv4 filters, but the second filter will be unable to reprogram the
- input set until all the conflicting TCPv4 filters are first removed.
-
- Note that the user-defined flexible offset is also considered part of the
- input set and cannot be programmed separately for multiple filters of the
- same type. However, the flexible data is not part of the input set and
- multiple filters may use the same offset but match against different data.
-
- Data Center Bridging (DCB)
- --------------------------
- DCB configuration is not currently supported.
-
- FCoE
- ----
- The driver supports Fiber Channel over Ethernet (FCoE) and Data Center
- Bridging (DCB) functionality. Configuring DCB and FCoE is outside the scope
- of this driver doc. Refer to http://www.open-fcoe.org/ for FCoE project
- information and http://www.open-lldp.org/ or email list
- e1000-eedc@lists.sourceforge.net for DCB information.
-
- MAC and VLAN anti-spoofing feature
- ----------------------------------
- When a malicious driver attempts to send a spoofed packet, it is dropped by
- the hardware and not transmitted. An interrupt is sent to the PF driver
- notifying it of the spoof attempt.
-
- When a spoofed packet is detected the PF driver will send the following
- message to the system log (displayed by the "dmesg" command):
-
- Spoof event(s) detected on VF (n)
-
- Where n=the VF that attempted to do the spoofing.
-
-
-Performance Tuning
-==================
-
-An excellent article on performance tuning can be found at:
-
-http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf
-
-
-Known Issues
-============
-
-
-Support
-=======
-
-For general information, go to the Intel support website at:
-
- http://support.intel.com
-
-or the Intel Wired Networking project hosted by Sourceforge at:
-
- http://e1000.sourceforge.net
-
-If an issue is identified with the released source code on the supported
-kernel with a supported adapter, email the specific information related
-to the issue to e1000-devel@lists.sourceforge.net and copy
-netdev@vger.kernel.org.
diff --git a/Documentation/networking/i40evf.txt b/Documentation/networking/i40evf.txt
deleted file mode 100644
index e9b3035b95d0..000000000000
--- a/Documentation/networking/i40evf.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-Linux* Base Driver for Intel(R) Network Connection
-==================================================
-
-Intel Ethernet Adaptive Virtual Function Linux driver.
-Copyright(c) 2013-2017 Intel Corporation.
-
-Contents
-========
-
-- Identifying Your Adapter
-- Known Issues/Troubleshooting
-- Support
-
-This file describes the i40evf Linux* Base Driver.
-
-The i40evf driver supports the below mentioned virtual function
-devices and can only be activated on kernels running the i40e or
-newer Physical Function (PF) driver compiled with CONFIG_PCI_IOV.
-The i40evf driver requires CONFIG_PCI_MSI to be enabled.
-
-The guest OS loading the i40evf driver must support MSI-X interrupts.
-
-Supported Hardware
-==================
-Intel XL710 X710 Virtual Function
-Intel Ethernet Adaptive Virtual Function
-Intel X722 Virtual Function
-
-Identifying Your Adapter
-========================
-
-For more information on how to identify your adapter, go to the
-Adapter & Driver ID Guide at:
-
- http://support.intel.com/support/go/network/adapter/idguide.htm
-
-Known Issues/Troubleshooting
-============================
-
-
-Support
-=======
-
-For general information, go to the Intel support website at:
-
- http://support.intel.com
-
-or the Intel Wired Networking project hosted by Sourceforge at:
-
- http://sourceforge.net/projects/e1000
-
-If an issue is identified with the released source code on the supported
-kernel with a supported adapter, email the specific information related
-to the issue to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/iavf.rst b/Documentation/networking/iavf.rst
new file mode 100644
index 000000000000..f8b42b64eb28
--- /dev/null
+++ b/Documentation/networking/iavf.rst
@@ -0,0 +1,281 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Linux* Base Driver for Intel(R) Ethernet Adaptive Virtual Function
+==================================================================
+
+Intel Ethernet Adaptive Virtual Function Linux driver.
+Copyright(c) 2013-2018 Intel Corporation.
+
+Contents
+========
+
+- Identifying Your Adapter
+- Additional Configurations
+- Known Issues/Troubleshooting
+- Support
+
+This file describes the iavf Linux* Base Driver. This driver was formerly
+called i40evf.
+
+The iavf driver supports the below mentioned virtual function devices and
+can only be activated on kernels running the i40e or newer Physical Function
+(PF) driver compiled with CONFIG_PCI_IOV. The iavf driver requires
+CONFIG_PCI_MSI to be enabled.
+
+The guest OS loading the iavf driver must support MSI-X interrupts.
+
+Identifying Your Adapter
+========================
+The driver in this kernel is compatible with devices based on the following:
+ * Intel(R) XL710 X710 Virtual Function
+ * Intel(R) X722 Virtual Function
+ * Intel(R) XXV710 Virtual Function
+ * Intel(R) Ethernet Adaptive Virtual Function
+
+For the best performance, make sure the latest NVM/FW is installed on your
+device.
+
+For information on how to identify your adapter, and for the latest NVM/FW
+images and Intel network drivers, refer to the Intel Support website:
+http://www.intel.com/support
+
+
+Additional Features and Configurations
+======================================
+
+Viewing Link Messages
+---------------------
+Link messages will not be displayed to the console if the distribution is
+restricting system messages. In order to see network driver link messages on
+your console, set dmesg to eight by entering the following::
+
+ dmesg -n 8
+
+NOTE: This setting is not saved across reboots.
+
+ethtool
+-------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information. The latest ethtool
+version is required for this functionality. Download it at:
+https://www.kernel.org/pub/software/network/ethtool/
+
+Setting VLAN Tag Stripping
+--------------------------
+If you have applications that require Virtual Functions (VFs) to receive
+packets with VLAN tags, you can disable VLAN tag stripping for the VF. The
+Physical Function (PF) processes requests issued from the VF to enable or
+disable VLAN tag stripping. Note that if the PF has assigned a VLAN to a VF,
+then requests from that VF to set VLAN tag stripping will be ignored.
+
+To enable/disable VLAN tag stripping for a VF, issue the following command
+from inside the VM in which you are running the VF::
+
+ ethtool -K <if_name> rxvlan on/off
+
+or alternatively::
+
+ ethtool --offload <if_name> rxvlan on/off
+
+Adaptive Virtual Function
+-------------------------
+Adaptive Virtual Function (AVF) allows the virtual function driver, or VF, to
+adapt to changing feature sets of the physical function driver (PF) with which
+it is associated. This allows system administrators to update a PF without
+having to update all the VFs associated with it. All AVFs have a single common
+device ID and branding string.
+
+AVFs have a minimum set of features known as "base mode," but may provide
+additional features depending on what features are available in the PF with
+which the AVF is associated. The following are base mode features:
+
+- 4 Queue Pairs (QP) and associated Configuration Status Registers (CSRs)
+ for Tx/Rx.
+- i40e descriptors and ring format.
+- Descriptor write-back completion.
+- 1 control queue, with i40e descriptors, CSRs and ring format.
+- 5 MSI-X interrupt vectors and corresponding i40e CSRs.
+- 1 Interrupt Throttle Rate (ITR) index.
+- 1 Virtual Station Interface (VSI) per VF.
+- 1 Traffic Class (TC), TC0
+- Receive Side Scaling (RSS) with 64 entry indirection table and key,
+ configured through the PF.
+- 1 unicast MAC address reserved per VF.
+- 16 MAC address filters for each VF.
+- Stateless offloads - non-tunneled checksums.
+- AVF device ID.
+- HW mailbox is used for VF to PF communications (including on Windows).
+
+IEEE 802.1ad (QinQ) Support
+---------------------------
+The IEEE 802.1ad standard, informally known as QinQ, allows for multiple VLAN
+IDs within a single Ethernet frame. VLAN IDs are sometimes referred to as
+"tags," and multiple VLAN IDs are thus referred to as a "tag stack." Tag stacks
+allow L2 tunneling and the ability to segregate traffic within a particular
+VLAN ID, among other uses.
+
+The following are examples of how to configure 802.1ad (QinQ)::
+
+ ip link add link eth0 eth0.24 type vlan proto 802.1ad id 24
+ ip link add link eth0.24 eth0.24.371 type vlan proto 802.1Q id 371
+
+Where "24" and "371" are example VLAN IDs.
+
+NOTES:
+ Receive checksum offloads, cloud filters, and VLAN acceleration are not
+ supported for 802.1ad (QinQ) packets.
+
+Application Device Queues (ADq)
+-------------------------------
+Application Device Queues (ADq) allows you to dedicate one or more queues to a
+specific application. This can reduce latency for the specified application,
+and allow Tx traffic to be rate limited per application. Follow the steps below
+to set ADq.
+
+1. Create traffic classes (TCs). Maximum of 8 TCs can be created per interface.
+The shaper bw_rlimit parameter is optional.
+
+Example: Sets up two tcs, tc0 and tc1, with 16 queues each and max tx rate set
+to 1Gbit for tc0 and 3Gbit for tc1.
+
+::
+
+ # tc qdisc add dev <interface> root mqprio num_tc 2 map 0 0 0 0 1 1 1 1
+ queues 16@0 16@16 hw 1 mode channel shaper bw_rlimit min_rate 1Gbit 2Gbit
+ max_rate 1Gbit 3Gbit
+
+map: priority mapping for up to 16 priorities to tcs (e.g. map 0 0 0 0 1 1 1 1
+sets priorities 0-3 to use tc0 and 4-7 to use tc1)
+
+queues: for each tc, <num queues>@<offset> (e.g. queues 16@0 16@16 assigns
+16 queues to tc0 at offset 0 and 16 queues to tc1 at offset 16. Max total
+number of queues for all tcs is 64 or number of cores, whichever is lower.)
+
+hw 1 mode channel: ‘channel’ with ‘hw’ set to 1 is a new new hardware
+offload mode in mqprio that makes full use of the mqprio options, the
+TCs, the queue configurations, and the QoS parameters.
+
+shaper bw_rlimit: for each tc, sets minimum and maximum bandwidth rates.
+Totals must be equal or less than port speed.
+
+For example: min_rate 1Gbit 3Gbit: Verify bandwidth limit using network
+monitoring tools such as ifstat or sar –n DEV [interval] [number of samples]
+
+2. Enable HW TC offload on interface::
+
+ # ethtool -K <interface> hw-tc-offload on
+
+3. Apply TCs to ingress (RX) flow of interface::
+
+ # tc qdisc add dev <interface> ingress
+
+NOTES:
+ - Run all tc commands from the iproute2 <pathtoiproute2>/tc/ directory.
+ - ADq is not compatible with cloud filters.
+ - Setting up channels via ethtool (ethtool -L) is not supported when the TCs
+ are configured using mqprio.
+ - You must have iproute2 latest version
+ - NVM version 6.01 or later is required.
+ - ADq cannot be enabled when any the following features are enabled: Data
+ Center Bridging (DCB), Multiple Functions per Port (MFP), or Sideband Filters.
+ - If another driver (for example, DPDK) has set cloud filters, you cannot
+ enable ADq.
+ - Tunnel filters are not supported in ADq. If encapsulated packets do arrive
+ in non-tunnel mode, filtering will be done on the inner headers. For example,
+ for VXLAN traffic in non-tunnel mode, PCTYPE is identified as a VXLAN
+ encapsulated packet, outer headers are ignored. Therefore, inner headers are
+ matched.
+ - If a TC filter on a PF matches traffic over a VF (on the PF), that traffic
+ will be routed to the appropriate queue of the PF, and will not be passed on
+ the VF. Such traffic will end up getting dropped higher up in the TCP/IP
+ stack as it does not match PF address data.
+ - If traffic matches multiple TC filters that point to different TCs, that
+ traffic will be duplicated and sent to all matching TC queues. The hardware
+ switch mirrors the packet to a VSI list when multiple filters are matched.
+
+
+Known Issues/Troubleshooting
+============================
+
+Traffic Is Not Being Passed Between VM and Client
+-------------------------------------------------
+You may not be able to pass traffic between a client system and a
+Virtual Machine (VM) running on a separate host if the Virtual Function
+(VF, or Virtual NIC) is not in trusted mode and spoof checking is enabled
+on the VF. Note that this situation can occur in any combination of client,
+host, and guest operating system. For information on how to set the VF to
+trusted mode, refer to the section "VLAN Tag Packet Steering" in this
+readme document. For information on setting spoof checking, refer to the
+section "MAC and VLAN anti-spoofing feature" in this readme document.
+
+Do not unload port driver if VF with active VM is bound to it
+-------------------------------------------------------------
+Do not unload a port's driver if a Virtual Function (VF) with an active Virtual
+Machine (VM) is bound to it. Doing so will cause the port to appear to hang.
+Once the VM shuts down, or otherwise releases the VF, the command will complete.
+
+Virtual machine does not get link
+---------------------------------
+If the virtual machine has more than one virtual port assigned to it, and those
+virtual ports are bound to different physical ports, you may not get link on
+all of the virtual ports. The following command may work around the issue::
+
+ ethtool -r <PF>
+
+Where <PF> is the PF interface in the host, for example: p5p1. You may need to
+run the command more than once to get link on all virtual ports.
+
+MAC address of Virtual Function changes unexpectedly
+----------------------------------------------------
+If a Virtual Function's MAC address is not assigned in the host, then the VF
+(virtual function) driver will use a random MAC address. This random MAC
+address may change each time the VF driver is reloaded. You can assign a static
+MAC address in the host machine. This static MAC address will survive
+a VF driver reload.
+
+Driver Buffer Overflow Fix
+--------------------------
+The fix to resolve CVE-2016-8105, referenced in Intel SA-00069
+https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00069.html
+is included in this and future versions of the driver.
+
+Multiple Interfaces on Same Ethernet Broadcast Network
+------------------------------------------------------
+Due to the default ARP behavior on Linux, it is not possible to have one system
+on two IP networks in the same Ethernet broadcast domain (non-partitioned
+switch) behave as expected. All Ethernet interfaces will respond to IP traffic
+for any IP address assigned to the system. This results in unbalanced receive
+traffic.
+
+If you have multiple interfaces in a server, either turn on ARP filtering by
+entering::
+
+ echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
+
+NOTE: This setting is not saved across reboots. The configuration change can be
+made permanent by adding the following line to the file /etc/sysctl.conf::
+
+ net.ipv4.conf.all.arp_filter = 1
+
+Another alternative is to install the interfaces in separate broadcast domains
+(either in different switches or in a switch partitioned to VLANs).
+
+Rx Page Allocation Errors
+-------------------------
+'Page allocation failure. order:0' errors may occur under stress.
+This is caused by the way the Linux kernel reports this stressed condition.
+
+
+Support
+=======
+For general information, go to the Intel support website at:
+
+https://support.intel.com
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+https://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on the supported kernel
+with a supported adapter, email the specific information related to the issue
+to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/ice.rst b/Documentation/networking/ice.rst
new file mode 100644
index 000000000000..1e4948c9e989
--- /dev/null
+++ b/Documentation/networking/ice.rst
@@ -0,0 +1,45 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Linux* Base Driver for the Intel(R) Ethernet Connection E800 Series
+===================================================================
+
+Intel ice Linux driver.
+Copyright(c) 2018 Intel Corporation.
+
+Contents
+========
+
+- Enabling the driver
+- Support
+
+The driver in this release supports Intel's E800 Series of products. For
+more information, visit Intel's support page at https://support.intel.com.
+
+Enabling the driver
+===================
+The driver is enabled via the standard kernel configuration system,
+using the make command::
+
+ make oldconfig/silentoldconfig/menuconfig/etc.
+
+The driver is located in the menu structure at:
+
+ -> Device Drivers
+ -> Network device support (NETDEVICES [=y])
+ -> Ethernet driver support
+ -> Intel devices
+ -> Intel(R) Ethernet Connection E800 Series Support
+
+Support
+=======
+For general information, go to the Intel support website at:
+
+https://www.intel.com/support/
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+https://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on a supported kernel
+with a supported adapter, email the specific information related to the issue
+to e1000-devel@lists.sf.net.
diff --git a/Documentation/networking/ice.txt b/Documentation/networking/ice.txt
deleted file mode 100644
index 6261c46378e1..000000000000
--- a/Documentation/networking/ice.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-Intel(R) Ethernet Connection E800 Series Linux Driver
-===================================================================
-
-Intel ice Linux driver.
-Copyright(c) 2018 Intel Corporation.
-
-Contents
-========
-- Enabling the driver
-- Support
-
-The driver in this release supports Intel's E800 Series of products. For
-more information, visit Intel's support page at http://support.intel.com.
-
-Enabling the driver
-===================
-
-The driver is enabled via the standard kernel configuration system,
-using the make command:
-
- Make oldconfig/silentoldconfig/menuconfig/etc.
-
-The driver is located in the menu structure at:
-
- -> Device Drivers
- -> Network device support (NETDEVICES [=y])
- -> Ethernet driver support
- -> Intel devices
- -> Intel(R) Ethernet Connection E800 Series Support
-
-Support
-=======
-
-For general information, go to the Intel support website at:
-
- http://support.intel.com
-
-If an issue is identified with the released source code, please email
-the maintainer listed in the MAINTAINERS file.
diff --git a/Documentation/networking/igb.rst b/Documentation/networking/igb.rst
new file mode 100644
index 000000000000..ba16b86d5593
--- /dev/null
+++ b/Documentation/networking/igb.rst
@@ -0,0 +1,193 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Linux* Base Driver for Intel(R) Ethernet Network Connection
+===========================================================
+
+Intel Gigabit Linux driver.
+Copyright(c) 1999-2018 Intel Corporation.
+
+Contents
+========
+
+- Identifying Your Adapter
+- Command Line Parameters
+- Additional Configurations
+- Support
+
+
+Identifying Your Adapter
+========================
+For information on how to identify your adapter, and for the latest Intel
+network drivers, refer to the Intel Support website:
+http://www.intel.com/support
+
+
+Command Line Parameters
+========================
+If the driver is built as a module, the following optional parameters are used
+by entering them on the command line with the modprobe command using this
+syntax::
+
+ modprobe igb [<option>=<VAL1>,<VAL2>,...]
+
+There needs to be a <VAL#> for each network port in the system supported by
+this driver. The values will be applied to each instance, in function order.
+For example::
+
+ modprobe igb max_vfs=2,4
+
+In this case, there are two network ports supported by igb in the system.
+
+NOTE: A descriptor describes a data buffer and attributes related to the data
+buffer. This information is accessed by the hardware.
+
+max_vfs
+-------
+:Valid Range: 0-7
+
+This parameter adds support for SR-IOV. It causes the driver to spawn up to
+max_vfs worth of virtual functions. If the value is greater than 0 it will
+also force the VMDq parameter to be 1 or more.
+
+The parameters for the driver are referenced by position. Thus, if you have a
+dual port adapter, or more than one adapter in your system, and want N virtual
+functions per port, you must specify a number for each port with each parameter
+separated by a comma. For example::
+
+ modprobe igb max_vfs=4
+
+This will spawn 4 VFs on the first port.
+
+::
+
+ modprobe igb max_vfs=2,4
+
+This will spawn 2 VFs on the first port and 4 VFs on the second port.
+
+NOTE: Caution must be used in loading the driver with these parameters.
+Depending on your system configuration, number of slots, etc., it is impossible
+to predict in all cases where the positions would be on the command line.
+
+NOTE: Neither the device nor the driver control how VFs are mapped into config
+space. Bus layout will vary by operating system. On operating systems that
+support it, you can check sysfs to find the mapping.
+
+NOTE: When either SR-IOV mode or VMDq mode is enabled, hardware VLAN filtering
+and VLAN tag stripping/insertion will remain enabled. Please remove the old
+VLAN filter before the new VLAN filter is added. For example::
+
+ ip link set eth0 vf 0 vlan 100 // set vlan 100 for VF 0
+ ip link set eth0 vf 0 vlan 0 // Delete vlan 100
+ ip link set eth0 vf 0 vlan 200 // set a new vlan 200 for VF 0
+
+Debug
+-----
+:Valid Range: 0-16 (0=none,...,16=all)
+:Default Value: 0
+
+This parameter adjusts the level debug messages displayed in the system logs.
+
+
+Additional Features and Configurations
+======================================
+
+Jumbo Frames
+------------
+Jumbo Frames support is enabled by changing the Maximum Transmission Unit (MTU)
+to a value larger than the default value of 1500.
+
+Use the ifconfig command to increase the MTU size. For example, enter the
+following where <x> is the interface number::
+
+ ifconfig eth<x> mtu 9000 up
+
+Alternatively, you can use the ip command as follows::
+
+ ip link set mtu 9000 dev eth<x>
+ ip link set up dev eth<x>
+
+This setting is not saved across reboots. The setting change can be made
+permanent by adding 'MTU=9000' to the file:
+
+- For RHEL: /etc/sysconfig/network-scripts/ifcfg-eth<x>
+- For SLES: /etc/sysconfig/network/<config_file>
+
+NOTE: The maximum MTU setting for Jumbo Frames is 9216. This value coincides
+with the maximum Jumbo Frames size of 9234 bytes.
+
+NOTE: Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
+poor performance or loss of link.
+
+
+ethtool
+-------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information. The latest ethtool
+version is required for this functionality. Download it at:
+
+https://www.kernel.org/pub/software/network/ethtool/
+
+
+Enabling Wake on LAN* (WoL)
+---------------------------
+WoL is configured through the ethtool* utility.
+
+WoL will be enabled on the system during the next shut down or reboot. For
+this driver version, in order to enable WoL, the igb driver must be loaded
+prior to shutting down or suspending the system.
+
+NOTE: Wake on LAN is only supported on port A of multi-port devices. Also
+Wake On LAN is not supported for the following device:
+- Intel(R) Gigabit VT Quad Port Server Adapter
+
+
+Multiqueue
+----------
+In this mode, a separate MSI-X vector is allocated for each queue and one for
+"other" interrupts such as link status change and errors. All interrupts are
+throttled via interrupt moderation. Interrupt moderation must be used to avoid
+interrupt storms while the driver is processing one interrupt. The moderation
+value should be at least as large as the expected time for the driver to
+process an interrupt. Multiqueue is off by default.
+
+REQUIREMENTS: MSI-X support is required for Multiqueue. If MSI-X is not found,
+the system will fallback to MSI or to Legacy interrupts. This driver supports
+receive multiqueue on all kernels that support MSI-X.
+
+NOTE: On some kernels a reboot is required to switch between single queue mode
+and multiqueue mode or vice-versa.
+
+
+MAC and VLAN anti-spoofing feature
+----------------------------------
+When a malicious driver attempts to send a spoofed packet, it is dropped by the
+hardware and not transmitted.
+
+An interrupt is sent to the PF driver notifying it of the spoof attempt. When a
+spoofed packet is detected, the PF driver will send the following message to
+the system log (displayed by the "dmesg" command):
+Spoof event(s) detected on VF(n), where n = the VF that attempted to do the
+spoofing
+
+
+Setting MAC Address, VLAN and Rate Limit Using IProute2 Tool
+------------------------------------------------------------
+You can set a MAC address of a Virtual Function (VF), a default VLAN and the
+rate limit using the IProute2 tool. Download the latest version of the
+IProute2 tool from Sourceforge if your version does not have all the features
+you require.
+
+
+Support
+=======
+For general information, go to the Intel support website at:
+
+https://www.intel.com/support/
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+https://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on a supported kernel
+with a supported adapter, email the specific information related to the issue
+to e1000-devel@lists.sf.net.
diff --git a/Documentation/networking/igb.txt b/Documentation/networking/igb.txt
deleted file mode 100644
index f90643ef39c9..000000000000
--- a/Documentation/networking/igb.txt
+++ /dev/null
@@ -1,129 +0,0 @@
-Linux* Base Driver for Intel(R) Ethernet Network Connection
-===========================================================
-
-Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2013 Intel Corporation.
-
-Contents
-========
-
-- Identifying Your Adapter
-- Additional Configurations
-- Support
-
-Identifying Your Adapter
-========================
-
-This driver supports all 82575, 82576 and 82580-based Intel (R) gigabit network
-connections.
-
-For specific information on how to identify your adapter, go to the Adapter &
-Driver ID Guide at:
-
- http://support.intel.com/support/go/network/adapter/idguide.htm
-
-Command Line Parameters
-=======================
-
-The default value for each parameter is generally the recommended setting,
-unless otherwise noted.
-
-max_vfs
--------
-Valid Range: 0-7
-Default Value: 0
-
-This parameter adds support for SR-IOV. It causes the driver to spawn up to
-max_vfs worth of virtual function.
-
-Additional Configurations
-=========================
-
- Jumbo Frames
- ------------
- Jumbo Frames support is enabled by changing the MTU to a value larger than
- the default of 1500. Use the ip command to increase the MTU size.
- For example:
-
- ip link set dev eth<x> mtu 9000
-
- This setting is not saved across reboots.
-
- Notes:
-
- - The maximum MTU setting for Jumbo Frames is 9216. This value coincides
- with the maximum Jumbo Frames size of 9234 bytes.
-
- - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
- poor performance or loss of link.
-
- ethtool
- -------
- The driver utilizes the ethtool interface for driver configuration and
- diagnostics, as well as displaying statistical information. The latest
- version of ethtool can be found at:
-
- https://www.kernel.org/pub/software/network/ethtool/
-
- Enabling Wake on LAN* (WoL)
- ---------------------------
- WoL is configured through the ethtool* utility.
-
- For instructions on enabling WoL with ethtool, refer to the ethtool man page.
-
- WoL will be enabled on the system during the next shut down or reboot.
- For this driver version, in order to enable WoL, the igb driver must be
- loaded when shutting down or rebooting the system.
-
- Wake On LAN is only supported on port A of multi-port adapters.
-
- Wake On LAN is not supported for the Intel(R) Gigabit VT Quad Port Server
- Adapter.
-
- Multiqueue
- ----------
- In this mode, a separate MSI-X vector is allocated for each queue and one
- for "other" interrupts such as link status change and errors. All
- interrupts are throttled via interrupt moderation. Interrupt moderation
- must be used to avoid interrupt storms while the driver is processing one
- interrupt. The moderation value should be at least as large as the expected
- time for the driver to process an interrupt. Multiqueue is off by default.
-
- REQUIREMENTS: MSI-X support is required for Multiqueue. If MSI-X is not
- found, the system will fallback to MSI or to Legacy interrupts.
-
- MAC and VLAN anti-spoofing feature
- ----------------------------------
- When a malicious driver attempts to send a spoofed packet, it is dropped by
- the hardware and not transmitted. An interrupt is sent to the PF driver
- notifying it of the spoof attempt.
-
- When a spoofed packet is detected the PF driver will send the following
- message to the system log (displayed by the "dmesg" command):
-
- Spoof event(s) detected on VF(n)
-
- Where n=the VF that attempted to do the spoofing.
-
- Setting MAC Address, VLAN and Rate Limit Using IProute2 Tool
- ------------------------------------------------------------
- You can set a MAC address of a Virtual Function (VF), a default VLAN and the
- rate limit using the IProute2 tool. Download the latest version of the
- iproute2 tool from Sourceforge if your version does not have all the
- features you require.
-
-
-Support
-=======
-
-For general information, go to the Intel support website at:
-
- www.intel.com/support/
-
-or the Intel Wired Networking project hosted by Sourceforge at:
-
- http://sourceforge.net/projects/e1000
-
-If an issue is identified with the released source code on the supported
-kernel with a supported adapter, email the specific information related
-to the issue to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/igbvf.rst b/Documentation/networking/igbvf.rst
new file mode 100644
index 000000000000..a8a9ffa4f8d3
--- /dev/null
+++ b/Documentation/networking/igbvf.rst
@@ -0,0 +1,64 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Linux* Base Virtual Function Driver for Intel(R) 1G Ethernet
+============================================================
+
+Intel Gigabit Virtual Function Linux driver.
+Copyright(c) 1999-2018 Intel Corporation.
+
+Contents
+========
+- Identifying Your Adapter
+- Additional Configurations
+- Support
+
+This driver supports Intel 82576-based virtual function devices-based virtual
+function devices that can only be activated on kernels that support SR-IOV.
+
+SR-IOV requires the correct platform and OS support.
+
+The guest OS loading this driver must support MSI-X interrupts.
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel adapter. All hardware requirements listed apply to use
+with Linux.
+
+Driver information can be obtained using ethtool, lspci, and ifconfig.
+Instructions on updating ethtool can be found in the section Additional
+Configurations later in this document.
+
+NOTE: There is a limit of a total of 32 shared VLANs to 1 or more VFs.
+
+
+Identifying Your Adapter
+========================
+For information on how to identify your adapter, and for the latest Intel
+network drivers, refer to the Intel Support website:
+http://www.intel.com/support
+
+
+Additional Features and Configurations
+======================================
+
+ethtool
+-------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information. The latest ethtool
+version is required for this functionality. Download it at:
+
+https://www.kernel.org/pub/software/network/ethtool/
+
+
+Support
+=======
+For general information, go to the Intel support website at:
+
+https://www.intel.com/support/
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+https://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on a supported kernel
+with a supported adapter, email the specific information related to the issue
+to e1000-devel@lists.sf.net.
diff --git a/Documentation/networking/igbvf.txt b/Documentation/networking/igbvf.txt
deleted file mode 100644
index bd404735fb46..000000000000
--- a/Documentation/networking/igbvf.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-Linux* Base Driver for Intel(R) Ethernet Network Connection
-===========================================================
-
-Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2013 Intel Corporation.
-
-Contents
-========
-
-- Identifying Your Adapter
-- Additional Configurations
-- Support
-
-This file describes the igbvf Linux* Base Driver for Intel Network Connection.
-
-The igbvf driver supports 82576-based virtual function devices that can only
-be activated on kernels that support SR-IOV. SR-IOV requires the correct
-platform and OS support.
-
-The igbvf driver requires the igb driver, version 2.0 or later. The igbvf
-driver supports virtual functions generated by the igb driver with a max_vfs
-value of 1 or greater. For more information on the max_vfs parameter refer
-to the README included with the igb driver.
-
-The guest OS loading the igbvf driver must support MSI-X interrupts.
-
-This driver is only supported as a loadable module at this time. Intel is
-not supplying patches against the kernel source to allow for static linking
-of the driver. For questions related to hardware requirements, refer to the
-documentation supplied with your Intel Gigabit adapter. All hardware
-requirements listed apply to use with Linux.
-
-Instructions on updating ethtool can be found in the section "Additional
-Configurations" later in this document.
-
-VLANs: There is a limit of a total of 32 shared VLANs to 1 or more VFs.
-
-Identifying Your Adapter
-========================
-
-The igbvf driver supports 82576-based virtual function devices that can only
-be activated on kernels that support SR-IOV.
-
-For more information on how to identify your adapter, go to the Adapter &
-Driver ID Guide at:
-
- http://support.intel.com/support/go/network/adapter/idguide.htm
-
-For the latest Intel network drivers for Linux, refer to the following
-website. In the search field, enter your adapter name or type, or use the
-networking link on the left to search for your adapter:
-
- http://downloadcenter.intel.com/scripts-df-external/Support_Intel.aspx
-
-Additional Configurations
-=========================
-
- ethtool
- -------
- The driver utilizes the ethtool interface for driver configuration and
- diagnostics, as well as displaying statistical information. The ethtool
- version 3.0 or later is required for this functionality, although we
- strongly recommend downloading the latest version at:
-
- https://www.kernel.org/pub/software/network/ethtool/
-
-Support
-=======
-
-For general information, go to the Intel support website at:
-
- http://support.intel.com
-
-or the Intel Wired Networking project hosted by Sourceforge at:
-
- http://sourceforge.net/projects/e1000
-
-If an issue is identified with the released source code on the supported
-kernel with a supported adapter, email the specific information related
-to the issue to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index fcd710f2cc7a..bd89dae8d578 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -14,6 +14,16 @@ Contents:
dpaa2/index
e100
e1000
+ e1000e
+ fm10k
+ igb
+ igbvf
+ ixgb
+ ixgbe
+ ixgbevf
+ i40e
+ iavf
+ ice
kapi
z8530book
msg_zerocopy
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 8313a636dd53..163b5ff1073c 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
1 - Disabled by default, enabled when an ICMP black hole detected
2 - Always enabled, use initial MSS of tcp_base_mss.
-tcp_probe_interval - INTEGER
+tcp_probe_interval - UNSIGNED INTEGER
Controls how often to start TCP Packetization-Layer Path MTU
Discovery reprobe. The default is reprobing every 10 minutes as
per RFC4821.
@@ -1442,6 +1442,14 @@ max_hbh_length - INTEGER
header.
Default: INT_MAX (unlimited)
+skip_notify_on_dev_down - BOOLEAN
+ Controls whether an RTM_DELROUTE message is generated for routes
+ removed when a device is taken down or deleted. IPv4 does not
+ generate this message; IPv6 does by default. Setting this sysctl
+ to true skips the message, making IPv4 and IPv6 on par in relying
+ on userspace caches to track link events and evict routes.
+ Default: false (generate message)
+
IPv6 Fragmentation:
ip6frag_high_thresh - INTEGER
diff --git a/Documentation/networking/ixgb.rst b/Documentation/networking/ixgb.rst
new file mode 100644
index 000000000000..8bd80e27843d
--- /dev/null
+++ b/Documentation/networking/ixgb.rst
@@ -0,0 +1,467 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Linux Base Driver for 10 Gigabit Intel(R) Ethernet Network Connection
+=====================================================================
+
+October 1, 2018
+
+
+Contents
+========
+
+- In This Release
+- Identifying Your Adapter
+- Command Line Parameters
+- Improving Performance
+- Additional Configurations
+- Known Issues/Troubleshooting
+- Support
+
+
+
+In This Release
+===============
+
+This file describes the ixgb Linux Base Driver for the 10 Gigabit Intel(R)
+Network Connection. This driver includes support for Itanium(R)2-based
+systems.
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your 10 Gigabit adapter. All hardware requirements listed apply
+to use with Linux.
+
+The following features are available in this kernel:
+ - Native VLANs
+ - Channel Bonding (teaming)
+ - SNMP
+
+Channel Bonding documentation can be found in the Linux kernel source:
+/Documentation/networking/bonding.txt
+
+The driver information previously displayed in the /proc filesystem is not
+supported in this release. Alternatively, you can use ethtool (version 1.6
+or later), lspci, and iproute2 to obtain the same information.
+
+Instructions on updating ethtool can be found in the section "Additional
+Configurations" later in this document.
+
+
+Identifying Your Adapter
+========================
+
+The following Intel network adapters are compatible with the drivers in this
+release:
+
++------------+------------------------------+----------------------------------+
+| Controller | Adapter Name | Physical Layer |
++============+==============================+==================================+
+| 82597EX | Intel(R) PRO/10GbE LR/SR/CX4 | - 10G Base-LR (fiber) |
+| | Server Adapters | - 10G Base-SR (fiber) |
+| | | - 10G Base-CX4 (copper) |
++------------+------------------------------+----------------------------------+
+
+For more information on how to identify your adapter, go to the Adapter &
+Driver ID Guide at:
+
+ https://support.intel.com
+
+
+Command Line Parameters
+=======================
+
+If the driver is built as a module, the following optional parameters are
+used by entering them on the command line with the modprobe command using
+this syntax::
+
+ modprobe ixgb [<option>=<VAL1>,<VAL2>,...]
+
+For example, with two 10GbE PCI adapters, entering::
+
+ modprobe ixgb TxDescriptors=80,128
+
+loads the ixgb driver with 80 TX resources for the first adapter and 128 TX
+resources for the second adapter.
+
+The default value for each parameter is generally the recommended setting,
+unless otherwise noted.
+
+Copybreak
+---------
+:Valid Range: 0-XXXX
+:Default Value: 256
+
+ This is the maximum size of packet that is copied to a new buffer on
+ receive.
+
+Debug
+-----
+:Valid Range: 0-16 (0=none,...,16=all)
+:Default Value: 0
+
+ This parameter adjusts the level of debug messages displayed in the
+ system logs.
+
+FlowControl
+-----------
+:Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
+:Default Value: 1 if no EEPROM, otherwise read from EEPROM
+
+ This parameter controls the automatic generation(Tx) and response(Rx) to
+ Ethernet PAUSE frames. There are hardware bugs associated with enabling
+ Tx flow control so beware.
+
+RxDescriptors
+-------------
+:Valid Range: 64-4096
+:Default Value: 1024
+
+ This value is the number of receive descriptors allocated by the driver.
+ Increasing this value allows the driver to buffer more incoming packets.
+ Each descriptor is 16 bytes. A receive buffer is also allocated for
+ each descriptor and can be either 2048, 4056, 8192, or 16384 bytes,
+ depending on the MTU setting. When the MTU size is 1500 or less, the
+ receive buffer size is 2048 bytes. When the MTU is greater than 1500 the
+ receive buffer size will be either 4056, 8192, or 16384 bytes. The
+ maximum MTU size is 16114.
+
+TxDescriptors
+-------------
+:Valid Range: 64-4096
+:Default Value: 256
+
+ This value is the number of transmit descriptors allocated by the driver.
+ Increasing this value allows the driver to queue more transmits. Each
+ descriptor is 16 bytes.
+
+RxIntDelay
+----------
+:Valid Range: 0-65535 (0=off)
+:Default Value: 72
+
+ This value delays the generation of receive interrupts in units of
+ 0.8192 microseconds. Receive interrupt reduction can improve CPU
+ efficiency if properly tuned for specific network traffic. Increasing
+ this value adds extra latency to frame reception and can end up
+ decreasing the throughput of TCP traffic. If the system is reporting
+ dropped receives, this value may be set too high, causing the driver to
+ run out of available receive descriptors.
+
+TxIntDelay
+----------
+:Valid Range: 0-65535 (0=off)
+:Default Value: 32
+
+ This value delays the generation of transmit interrupts in units of
+ 0.8192 microseconds. Transmit interrupt reduction can improve CPU
+ efficiency if properly tuned for specific network traffic. Increasing
+ this value adds extra latency to frame transmission and can end up
+ decreasing the throughput of TCP traffic. If this value is set too high,
+ it will cause the driver to run out of available transmit descriptors.
+
+XsumRX
+------
+:Valid Range: 0-1
+:Default Value: 1
+
+ A value of '1' indicates that the driver should enable IP checksum
+ offload for received packets (both UDP and TCP) to the adapter hardware.
+
+RxFCHighThresh
+--------------
+:Valid Range: 1,536-262,136 (0x600 - 0x3FFF8, 8 byte granularity)
+:Default Value: 196,608 (0x30000)
+
+ Receive Flow control high threshold (when we send a pause frame)
+
+RxFCLowThresh
+-------------
+:Valid Range: 64-262,136 (0x40 - 0x3FFF8, 8 byte granularity)
+:Default Value: 163,840 (0x28000)
+
+ Receive Flow control low threshold (when we send a resume frame)
+
+FCReqTimeout
+------------
+:Valid Range: 1-65535
+:Default Value: 65535
+
+ Flow control request timeout (how long to pause the link partner's tx)
+
+IntDelayEnable
+--------------
+:Value Range: 0,1
+:Default Value: 1
+
+ Interrupt Delay, 0 disables transmit interrupt delay and 1 enables it.
+
+
+Improving Performance
+=====================
+
+With the 10 Gigabit server adapters, the default Linux configuration will
+very likely limit the total available throughput artificially. There is a set
+of configuration changes that, when applied together, will increase the ability
+of Linux to transmit and receive data. The following enhancements were
+originally acquired from settings published at http://www.spec.org/web99/ for
+various submitted results using Linux.
+
+NOTE:
+ These changes are only suggestions, and serve as a starting point for
+ tuning your network performance.
+
+The changes are made in three major ways, listed in order of greatest effect:
+
+- Use ip link to modify the mtu (maximum transmission unit) and the txqueuelen
+ parameter.
+- Use sysctl to modify /proc parameters (essentially kernel tuning)
+- Use setpci to modify the MMRBC field in PCI-X configuration space to increase
+ transmit burst lengths on the bus.
+
+NOTE:
+ setpci modifies the adapter's configuration registers to allow it to read
+ up to 4k bytes at a time (for transmits). However, for some systems the
+ behavior after modifying this register may be undefined (possibly errors of
+ some kind). A power-cycle, hard reset or explicitly setting the e6 register
+ back to 22 (setpci -d 8086:1a48 e6.b=22) may be required to get back to a
+ stable configuration.
+
+- COPY these lines and paste them into ixgb_perf.sh:
+
+::
+
+ #!/bin/bash
+ echo "configuring network performance , edit this file to change the interface
+ or device ID of 10GbE card"
+ # set mmrbc to 4k reads, modify only Intel 10GbE device IDs
+ # replace 1a48 with appropriate 10GbE device's ID installed on the system,
+ # if needed.
+ setpci -d 8086:1a48 e6.b=2e
+ # set the MTU (max transmission unit) - it requires your switch and clients
+ # to change as well.
+ # set the txqueuelen
+ # your ixgb adapter should be loaded as eth1 for this to work, change if needed
+ ip li set dev eth1 mtu 9000 txqueuelen 1000 up
+ # call the sysctl utility to modify /proc/sys entries
+ sysctl -p ./sysctl_ixgb.conf
+
+- COPY these lines and paste them into sysctl_ixgb.conf:
+
+::
+
+ # some of the defaults may be different for your kernel
+ # call this file with sysctl -p <this file>
+ # these are just suggested values that worked well to increase throughput in
+ # several network benchmark tests, your mileage may vary
+
+ ### IPV4 specific settings
+ # turn TCP timestamp support off, default 1, reduces CPU use
+ net.ipv4.tcp_timestamps = 0
+ # turn SACK support off, default on
+ # on systems with a VERY fast bus -> memory interface this is the big gainer
+ net.ipv4.tcp_sack = 0
+ # set min/default/max TCP read buffer, default 4096 87380 174760
+ net.ipv4.tcp_rmem = 10000000 10000000 10000000
+ # set min/pressure/max TCP write buffer, default 4096 16384 131072
+ net.ipv4.tcp_wmem = 10000000 10000000 10000000
+ # set min/pressure/max TCP buffer space, default 31744 32256 32768
+ net.ipv4.tcp_mem = 10000000 10000000 10000000
+
+ ### CORE settings (mostly for socket and UDP effect)
+ # set maximum receive socket buffer size, default 131071
+ net.core.rmem_max = 524287
+ # set maximum send socket buffer size, default 131071
+ net.core.wmem_max = 524287
+ # set default receive socket buffer size, default 65535
+ net.core.rmem_default = 524287
+ # set default send socket buffer size, default 65535
+ net.core.wmem_default = 524287
+ # set maximum amount of option memory buffers, default 10240
+ net.core.optmem_max = 524287
+ # set number of unprocessed input packets before kernel starts dropping them; default 300
+ net.core.netdev_max_backlog = 300000
+
+Edit the ixgb_perf.sh script if necessary to change eth1 to whatever interface
+your ixgb driver is using and/or replace '1a48' with appropriate 10GbE device's
+ID installed on the system.
+
+NOTE:
+ Unless these scripts are added to the boot process, these changes will
+ only last only until the next system reboot.
+
+
+Resolving Slow UDP Traffic
+--------------------------
+If your server does not seem to be able to receive UDP traffic as fast as it
+can receive TCP traffic, it could be because Linux, by default, does not set
+the network stack buffers as large as they need to be to support high UDP
+transfer rates. One way to alleviate this problem is to allow more memory to
+be used by the IP stack to store incoming data.
+
+For instance, use the commands::
+
+ sysctl -w net.core.rmem_max=262143
+
+and::
+
+ sysctl -w net.core.rmem_default=262143
+
+to increase the read buffer memory max and default to 262143 (256k - 1) from
+defaults of max=131071 (128k - 1) and default=65535 (64k - 1). These variables
+will increase the amount of memory used by the network stack for receives, and
+can be increased significantly more if necessary for your application.
+
+
+Additional Configurations
+=========================
+
+Configuring the Driver on Different Distributions
+-------------------------------------------------
+Configuring a network driver to load properly when the system is started is
+distribution dependent. Typically, the configuration process involves adding
+an alias line to /etc/modprobe.conf as well as editing other system startup
+scripts and/or configuration files. Many popular Linux distributions ship
+with tools to make these changes for you. To learn the proper way to
+configure a network device for your system, refer to your distribution
+documentation. If during this process you are asked for the driver or module
+name, the name for the Linux Base Driver for the Intel 10GbE Family of
+Adapters is ixgb.
+
+Viewing Link Messages
+---------------------
+Link messages will not be displayed to the console if the distribution is
+restricting system messages. In order to see network driver link messages on
+your console, set dmesg to eight by entering the following::
+
+ dmesg -n 8
+
+NOTE: This setting is not saved across reboots.
+
+Jumbo Frames
+------------
+The driver supports Jumbo Frames for all adapters. Jumbo Frames support is
+enabled by changing the MTU to a value larger than the default of 1500.
+The maximum value for the MTU is 16114. Use the ip command to
+increase the MTU size. For example::
+
+ ip li set dev ethx mtu 9000
+
+The maximum MTU setting for Jumbo Frames is 16114. This value coincides
+with the maximum Jumbo Frames size of 16128.
+
+Ethtool
+-------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information. The ethtool
+version 1.6 or later is required for this functionality.
+
+The latest release of ethtool can be found from
+https://www.kernel.org/pub/software/network/ethtool/
+
+NOTE:
+ The ethtool version 1.6 only supports a limited set of ethtool options.
+ Support for a more complete ethtool feature set can be enabled by
+ upgrading to the latest version.
+
+NAPI
+----
+NAPI (Rx polling mode) is supported in the ixgb driver.
+
+See https://wiki.linuxfoundation.org/networking/napi for more information on
+NAPI.
+
+
+Known Issues/Troubleshooting
+============================
+
+NOTE:
+ After installing the driver, if your Intel Network Connection is not
+ working, verify in the "In This Release" section of the readme that you have
+ installed the correct driver.
+
+Cable Interoperability Issue with Fujitsu XENPAK Module in SmartBits Chassis
+----------------------------------------------------------------------------
+Excessive CRC errors may be observed if the Intel(R) PRO/10GbE CX4
+Server adapter is connected to a Fujitsu XENPAK CX4 module in a SmartBits
+chassis using 15 m/24AWG cable assemblies manufactured by Fujitsu or Leoni.
+The CRC errors may be received either by the Intel(R) PRO/10GbE CX4
+Server adapter or the SmartBits. If this situation occurs using a different
+cable assembly may resolve the issue.
+
+Cable Interoperability Issues with HP Procurve 3400cl Switch Port
+-----------------------------------------------------------------
+Excessive CRC errors may be observed if the Intel(R) PRO/10GbE CX4 Server
+adapter is connected to an HP Procurve 3400cl switch port using short cables
+(1 m or shorter). If this situation occurs, using a longer cable may resolve
+the issue.
+
+Excessive CRC errors may be observed using Fujitsu 24AWG cable assemblies that
+Are 10 m or longer or where using a Leoni 15 m/24AWG cable assembly. The CRC
+errors may be received either by the CX4 Server adapter or at the switch. If
+this situation occurs, using a different cable assembly may resolve the issue.
+
+Jumbo Frames System Requirement
+-------------------------------
+Memory allocation failures have been observed on Linux systems with 64 MB
+of RAM or less that are running Jumbo Frames. If you are using Jumbo
+Frames, your system may require more than the advertised minimum
+requirement of 64 MB of system memory.
+
+Performance Degradation with Jumbo Frames
+-----------------------------------------
+Degradation in throughput performance may be observed in some Jumbo frames
+environments. If this is observed, increasing the application's socket buffer
+size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
+See the specific application manual and /usr/src/linux*/Documentation/
+networking/ip-sysctl.txt for more details.
+
+Allocating Rx Buffers when Using Jumbo Frames
+---------------------------------------------
+Allocating Rx buffers when using Jumbo Frames on 2.6.x kernels may fail if
+the available memory is heavily fragmented. This issue may be seen with PCI-X
+adapters or with packet split disabled. This can be reduced or eliminated
+by changing the amount of available memory for receive buffer allocation, by
+increasing /proc/sys/vm/min_free_kbytes.
+
+Multiple Interfaces on Same Ethernet Broadcast Network
+------------------------------------------------------
+Due to the default ARP behavior on Linux, it is not possible to have
+one system on two IP networks in the same Ethernet broadcast domain
+(non-partitioned switch) behave as expected. All Ethernet interfaces
+will respond to IP traffic for any IP address assigned to the system.
+This results in unbalanced receive traffic.
+
+If you have multiple interfaces in a server, do either of the following:
+
+ - Turn on ARP filtering by entering::
+
+ echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
+
+ - Install the interfaces in separate broadcast domains - either in
+ different switches or in a switch partitioned to VLANs.
+
+UDP Stress Test Dropped Packet Issue
+--------------------------------------
+Under small packets UDP stress test with 10GbE driver, the Linux system
+may drop UDP packets due to the fullness of socket buffers. You may want
+to change the driver's Flow Control variables to the minimum value for
+controlling packet reception.
+
+Tx Hangs Possible Under Stress
+------------------------------
+Under stress conditions, if TX hangs occur, turning off TSO
+"ethtool -K eth0 tso off" may resolve the problem.
+
+
+Support
+=======
+For general information, go to the Intel support website at:
+
+https://www.intel.com/support/
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+https://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on a supported kernel
+with a supported adapter, email the specific information related to the issue
+to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/ixgb.txt b/Documentation/networking/ixgb.txt
deleted file mode 100644
index 09f71d71920a..000000000000
--- a/Documentation/networking/ixgb.txt
+++ /dev/null
@@ -1,433 +0,0 @@
-Linux Base Driver for 10 Gigabit Intel(R) Ethernet Network Connection
-=====================================================================
-
-March 14, 2011
-
-
-Contents
-========
-
-- In This Release
-- Identifying Your Adapter
-- Building and Installation
-- Command Line Parameters
-- Improving Performance
-- Additional Configurations
-- Known Issues/Troubleshooting
-- Support
-
-
-
-In This Release
-===============
-
-This file describes the ixgb Linux Base Driver for the 10 Gigabit Intel(R)
-Network Connection. This driver includes support for Itanium(R)2-based
-systems.
-
-For questions related to hardware requirements, refer to the documentation
-supplied with your 10 Gigabit adapter. All hardware requirements listed apply
-to use with Linux.
-
-The following features are available in this kernel:
- - Native VLANs
- - Channel Bonding (teaming)
- - SNMP
-
-Channel Bonding documentation can be found in the Linux kernel source:
-/Documentation/networking/bonding.txt
-
-The driver information previously displayed in the /proc filesystem is not
-supported in this release. Alternatively, you can use ethtool (version 1.6
-or later), lspci, and iproute2 to obtain the same information.
-
-Instructions on updating ethtool can be found in the section "Additional
-Configurations" later in this document.
-
-
-Identifying Your Adapter
-========================
-
-The following Intel network adapters are compatible with the drivers in this
-release:
-
-Controller Adapter Name Physical Layer
----------- ------------ --------------
-82597EX Intel(R) PRO/10GbE LR/SR/CX4 10G Base-LR (1310 nm optical fiber)
- Server Adapters 10G Base-SR (850 nm optical fiber)
- 10G Base-CX4(twin-axial copper cabling)
-
-For more information on how to identify your adapter, go to the Adapter &
-Driver ID Guide at:
-
- http://support.intel.com/support/network/sb/CS-012904.htm
-
-
-Building and Installation
-=========================
-
-select m for "Intel(R) PRO/10GbE support" located at:
- Location:
- -> Device Drivers
- -> Network device support (NETDEVICES [=y])
- -> Ethernet (10000 Mbit) (NETDEV_10000 [=y])
-1. make modules && make modules_install
-
-2. Load the module:
-
-    modprobe ixgb <parameter>=<value>
-
- The insmod command can be used if the full
- path to the driver module is specified. For example:
-
- insmod /lib/modules/<KERNEL VERSION>/kernel/drivers/net/ixgb/ixgb.ko
-
- With 2.6 based kernels also make sure that older ixgb drivers are
- removed from the kernel, before loading the new module:
-
- rmmod ixgb; modprobe ixgb
-
-3. Assign an IP address to the interface by entering the following, where
- x is the interface number:
-
- ip addr add ethx <IP_address>
-
-4. Verify that the interface works. Enter the following, where <IP_address>
- is the IP address for another machine on the same subnet as the interface
- that is being tested:
-
- ping <IP_address>
-
-
-Command Line Parameters
-=======================
-
-If the driver is built as a module, the following optional parameters are
-used by entering them on the command line with the modprobe command using
-this syntax:
-
- modprobe ixgb [<option>=<VAL1>,<VAL2>,...]
-
-For example, with two 10GbE PCI adapters, entering:
-
- modprobe ixgb TxDescriptors=80,128
-
-loads the ixgb driver with 80 TX resources for the first adapter and 128 TX
-resources for the second adapter.
-
-The default value for each parameter is generally the recommended setting,
-unless otherwise noted.
-
-FlowControl
-Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
-Default: Read from the EEPROM
- If EEPROM is not detected, default is 1
- This parameter controls the automatic generation(Tx) and response(Rx) to
- Ethernet PAUSE frames. There are hardware bugs associated with enabling
- Tx flow control so beware.
-
-RxDescriptors
-Valid Range: 64-512
-Default Value: 512
- This value is the number of receive descriptors allocated by the driver.
- Increasing this value allows the driver to buffer more incoming packets.
- Each descriptor is 16 bytes. A receive buffer is also allocated for
- each descriptor and can be either 2048, 4056, 8192, or 16384 bytes,
- depending on the MTU setting. When the MTU size is 1500 or less, the
- receive buffer size is 2048 bytes. When the MTU is greater than 1500 the
- receive buffer size will be either 4056, 8192, or 16384 bytes. The
- maximum MTU size is 16114.
-
-RxIntDelay
-Valid Range: 0-65535 (0=off)
-Default Value: 72
- This value delays the generation of receive interrupts in units of
- 0.8192 microseconds. Receive interrupt reduction can improve CPU
- efficiency if properly tuned for specific network traffic. Increasing
- this value adds extra latency to frame reception and can end up
- decreasing the throughput of TCP traffic. If the system is reporting
- dropped receives, this value may be set too high, causing the driver to
- run out of available receive descriptors.
-
-TxDescriptors
-Valid Range: 64-4096
-Default Value: 256
- This value is the number of transmit descriptors allocated by the driver.
- Increasing this value allows the driver to queue more transmits. Each
- descriptor is 16 bytes.
-
-XsumRX
-Valid Range: 0-1
-Default Value: 1
- A value of '1' indicates that the driver should enable IP checksum
- offload for received packets (both UDP and TCP) to the adapter hardware.
-
-
-Improving Performance
-=====================
-
-With the 10 Gigabit server adapters, the default Linux configuration will
-very likely limit the total available throughput artificially. There is a set
-of configuration changes that, when applied together, will increase the ability
-of Linux to transmit and receive data. The following enhancements were
-originally acquired from settings published at http://www.spec.org/web99/ for
-various submitted results using Linux.
-
-NOTE: These changes are only suggestions, and serve as a starting point for
- tuning your network performance.
-
-The changes are made in three major ways, listed in order of greatest effect:
-- Use ip link to modify the mtu (maximum transmission unit) and the txqueuelen
- parameter.
-- Use sysctl to modify /proc parameters (essentially kernel tuning)
-- Use setpci to modify the MMRBC field in PCI-X configuration space to increase
- transmit burst lengths on the bus.
-
-NOTE: setpci modifies the adapter's configuration registers to allow it to read
-up to 4k bytes at a time (for transmits). However, for some systems the
-behavior after modifying this register may be undefined (possibly errors of
-some kind). A power-cycle, hard reset or explicitly setting the e6 register
-back to 22 (setpci -d 8086:1a48 e6.b=22) may be required to get back to a
-stable configuration.
-
-- COPY these lines and paste them into ixgb_perf.sh:
-#!/bin/bash
-echo "configuring network performance , edit this file to change the interface
-or device ID of 10GbE card"
-# set mmrbc to 4k reads, modify only Intel 10GbE device IDs
-# replace 1a48 with appropriate 10GbE device's ID installed on the system,
-# if needed.
-setpci -d 8086:1a48 e6.b=2e
-# set the MTU (max transmission unit) - it requires your switch and clients
-# to change as well.
-# set the txqueuelen
-# your ixgb adapter should be loaded as eth1 for this to work, change if needed
-ip li set dev eth1 mtu 9000 txqueuelen 1000 up
-# call the sysctl utility to modify /proc/sys entries
-sysctl -p ./sysctl_ixgb.conf
-- END ixgb_perf.sh
-
-- COPY these lines and paste them into sysctl_ixgb.conf:
-# some of the defaults may be different for your kernel
-# call this file with sysctl -p <this file>
-# these are just suggested values that worked well to increase throughput in
-# several network benchmark tests, your mileage may vary
-
-### IPV4 specific settings
-# turn TCP timestamp support off, default 1, reduces CPU use
-net.ipv4.tcp_timestamps = 0
-# turn SACK support off, default on
-# on systems with a VERY fast bus -> memory interface this is the big gainer
-net.ipv4.tcp_sack = 0
-# set min/default/max TCP read buffer, default 4096 87380 174760
-net.ipv4.tcp_rmem = 10000000 10000000 10000000
-# set min/pressure/max TCP write buffer, default 4096 16384 131072
-net.ipv4.tcp_wmem = 10000000 10000000 10000000
-# set min/pressure/max TCP buffer space, default 31744 32256 32768
-net.ipv4.tcp_mem = 10000000 10000000 10000000
-
-### CORE settings (mostly for socket and UDP effect)
-# set maximum receive socket buffer size, default 131071
-net.core.rmem_max = 524287
-# set maximum send socket buffer size, default 131071
-net.core.wmem_max = 524287
-# set default receive socket buffer size, default 65535
-net.core.rmem_default = 524287
-# set default send socket buffer size, default 65535
-net.core.wmem_default = 524287
-# set maximum amount of option memory buffers, default 10240
-net.core.optmem_max = 524287
-# set number of unprocessed input packets before kernel starts dropping them; default 300
-net.core.netdev_max_backlog = 300000
-- END sysctl_ixgb.conf
-
-Edit the ixgb_perf.sh script if necessary to change eth1 to whatever interface
-your ixgb driver is using and/or replace '1a48' with appropriate 10GbE device's
-ID installed on the system.
-
-NOTE: Unless these scripts are added to the boot process, these changes will
- only last only until the next system reboot.
-
-
-Resolving Slow UDP Traffic
---------------------------
-If your server does not seem to be able to receive UDP traffic as fast as it
-can receive TCP traffic, it could be because Linux, by default, does not set
-the network stack buffers as large as they need to be to support high UDP
-transfer rates. One way to alleviate this problem is to allow more memory to
-be used by the IP stack to store incoming data.
-
-For instance, use the commands:
- sysctl -w net.core.rmem_max=262143
-and
- sysctl -w net.core.rmem_default=262143
-to increase the read buffer memory max and default to 262143 (256k - 1) from
-defaults of max=131071 (128k - 1) and default=65535 (64k - 1). These variables
-will increase the amount of memory used by the network stack for receives, and
-can be increased significantly more if necessary for your application.
-
-
-Additional Configurations
-=========================
-
- Configuring the Driver on Different Distributions
- -------------------------------------------------
- Configuring a network driver to load properly when the system is started is
- distribution dependent. Typically, the configuration process involves adding
- an alias line to /etc/modprobe.conf as well as editing other system startup
- scripts and/or configuration files. Many popular Linux distributions ship
- with tools to make these changes for you. To learn the proper way to
- configure a network device for your system, refer to your distribution
- documentation. If during this process you are asked for the driver or module
- name, the name for the Linux Base Driver for the Intel 10GbE Family of
- Adapters is ixgb.
-
- Viewing Link Messages
- ---------------------
- Link messages will not be displayed to the console if the distribution is
- restricting system messages. In order to see network driver link messages on
- your console, set dmesg to eight by entering the following:
-
- dmesg -n 8
-
- NOTE: This setting is not saved across reboots.
-
-
- Jumbo Frames
- ------------
- The driver supports Jumbo Frames for all adapters. Jumbo Frames support is
- enabled by changing the MTU to a value larger than the default of 1500.
- The maximum value for the MTU is 16114. Use the ip command to
- increase the MTU size. For example:
-
- ip li set dev ethx mtu 9000
-
- The maximum MTU setting for Jumbo Frames is 16114. This value coincides
- with the maximum Jumbo Frames size of 16128.
-
-
- ethtool
- -------
- The driver utilizes the ethtool interface for driver configuration and
- diagnostics, as well as displaying statistical information. The ethtool
- version 1.6 or later is required for this functionality.
-
- The latest release of ethtool can be found from
- https://www.kernel.org/pub/software/network/ethtool/
-
- NOTE: The ethtool version 1.6 only supports a limited set of ethtool options.
- Support for a more complete ethtool feature set can be enabled by
- upgrading to the latest version.
-
-
- NAPI
- ----
-
- NAPI (Rx polling mode) is supported in the ixgb driver. NAPI is enabled
- or disabled based on the configuration of the kernel. see CONFIG_IXGB_NAPI
-
- See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI.
-
-
-Known Issues/Troubleshooting
-============================
-
- NOTE: After installing the driver, if your Intel Network Connection is not
- working, verify in the "In This Release" section of the readme that you have
- installed the correct driver.
-
- Intel(R) PRO/10GbE CX4 Server Adapter Cable Interoperability Issue with
- Fujitsu XENPAK Module in SmartBits Chassis
- ---------------------------------------------------------------------
- Excessive CRC errors may be observed if the Intel(R) PRO/10GbE CX4
- Server adapter is connected to a Fujitsu XENPAK CX4 module in a SmartBits
- chassis using 15 m/24AWG cable assemblies manufactured by Fujitsu or Leoni.
- The CRC errors may be received either by the Intel(R) PRO/10GbE CX4
- Server adapter or the SmartBits. If this situation occurs using a different
- cable assembly may resolve the issue.
-
- CX4 Server Adapter Cable Interoperability Issues with HP Procurve 3400cl
- Switch Port
- ------------------------------------------------------------------------
- Excessive CRC errors may be observed if the Intel(R) PRO/10GbE CX4 Server
- adapter is connected to an HP Procurve 3400cl switch port using short cables
- (1 m or shorter). If this situation occurs, using a longer cable may resolve
- the issue.
-
- Excessive CRC errors may be observed using Fujitsu 24AWG cable assemblies that
- Are 10 m or longer or where using a Leoni 15 m/24AWG cable assembly. The CRC
- errors may be received either by the CX4 Server adapter or at the switch. If
- this situation occurs, using a different cable assembly may resolve the issue.
-
-
- Jumbo Frames System Requirement
- -------------------------------
- Memory allocation failures have been observed on Linux systems with 64 MB
- of RAM or less that are running Jumbo Frames. If you are using Jumbo
- Frames, your system may require more than the advertised minimum
- requirement of 64 MB of system memory.
-
-
- Performance Degradation with Jumbo Frames
- -----------------------------------------
- Degradation in throughput performance may be observed in some Jumbo frames
- environments. If this is observed, increasing the application's socket buffer
- size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
- See the specific application manual and /usr/src/linux*/Documentation/
- networking/ip-sysctl.txt for more details.
-
-
- Allocating Rx Buffers when Using Jumbo Frames
- ---------------------------------------------
- Allocating Rx buffers when using Jumbo Frames on 2.6.x kernels may fail if
- the available memory is heavily fragmented. This issue may be seen with PCI-X
- adapters or with packet split disabled. This can be reduced or eliminated
- by changing the amount of available memory for receive buffer allocation, by
- increasing /proc/sys/vm/min_free_kbytes.
-
-
- Multiple Interfaces on Same Ethernet Broadcast Network
- ------------------------------------------------------
- Due to the default ARP behavior on Linux, it is not possible to have
- one system on two IP networks in the same Ethernet broadcast domain
- (non-partitioned switch) behave as expected. All Ethernet interfaces
- will respond to IP traffic for any IP address assigned to the system.
- This results in unbalanced receive traffic.
-
- If you have multiple interfaces in a server, do either of the following:
-
- - Turn on ARP filtering by entering:
- echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
-
- - Install the interfaces in separate broadcast domains - either in
- different switches or in a switch partitioned to VLANs.
-
-
- UDP Stress Test Dropped Packet Issue
- --------------------------------------
- Under small packets UDP stress test with 10GbE driver, the Linux system
- may drop UDP packets due to the fullness of socket buffers. You may want
- to change the driver's Flow Control variables to the minimum value for
- controlling packet reception.
-
-
- Tx Hangs Possible Under Stress
- ------------------------------
- Under stress conditions, if TX hangs occur, turning off TSO
- "ethtool -K eth0 tso off" may resolve the problem.
-
-
-Support
-=======
-
-For general information, go to the Intel support website at:
-
- http://support.intel.com
-
-or the Intel Wired Networking project hosted by Sourceforge at:
-
- http://sourceforge.net/projects/e1000
-
-If an issue is identified with the released source code on the supported
-kernel with a supported adapter, email the specific information related
-to the issue to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/ixgbe.rst b/Documentation/networking/ixgbe.rst
new file mode 100644
index 000000000000..725fc697fd8f
--- /dev/null
+++ b/Documentation/networking/ixgbe.rst
@@ -0,0 +1,527 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters
+=============================================================================
+
+Intel 10 Gigabit Linux driver.
+Copyright(c) 1999-2018 Intel Corporation.
+
+Contents
+========
+
+- Identifying Your Adapter
+- Command Line Parameters
+- Additional Configurations
+- Known Issues
+- Support
+
+Identifying Your Adapter
+========================
+The driver is compatible with devices based on the following:
+
+ * Intel(R) Ethernet Controller 82598
+ * Intel(R) Ethernet Controller 82599
+ * Intel(R) Ethernet Controller X520
+ * Intel(R) Ethernet Controller X540
+ * Intel(R) Ethernet Controller x550
+ * Intel(R) Ethernet Controller X552
+ * Intel(R) Ethernet Controller X553
+
+For information on how to identify your adapter, and for the latest Intel
+network drivers, refer to the Intel Support website:
+https://www.intel.com/support
+
+SFP+ Devices with Pluggable Optics
+----------------------------------
+
+82599-BASED ADAPTERS
+~~~~~~~~~~~~~~~~~~~~
+NOTES:
+- If your 82599-based Intel(R) Network Adapter came with Intel optics or is an
+Intel(R) Ethernet Server Adapter X520-2, then it only supports Intel optics
+and/or the direct attach cables listed below.
+- When 82599-based SFP+ devices are connected back to back, they should be set
+to the same Speed setting via ethtool. Results may vary if you mix speed
+settings.
+
++---------------+---------------------------------------+------------------+
+| Supplier | Type | Part Numbers |
++===============+=======================================+==================+
+| SR Modules |
++---------------+---------------------------------------+------------------+
+| Intel | DUAL RATE 1G/10G SFP+ SR (bailed) | FTLX8571D3BCV-IT |
++---------------+---------------------------------------+------------------+
+| Intel | DUAL RATE 1G/10G SFP+ SR (bailed) | AFBR-703SDZ-IN2 |
++---------------+---------------------------------------+------------------+
+| Intel | DUAL RATE 1G/10G SFP+ SR (bailed) | AFBR-703SDDZ-IN1 |
++---------------+---------------------------------------+------------------+
+| LR Modules |
++---------------+---------------------------------------+------------------+
+| Intel | DUAL RATE 1G/10G SFP+ LR (bailed) | FTLX1471D3BCV-IT |
++---------------+---------------------------------------+------------------+
+| Intel | DUAL RATE 1G/10G SFP+ LR (bailed) | AFCT-701SDZ-IN2 |
++---------------+---------------------------------------+------------------+
+| Intel | DUAL RATE 1G/10G SFP+ LR (bailed) | AFCT-701SDDZ-IN1 |
++---------------+---------------------------------------+------------------+
+
+The following is a list of 3rd party SFP+ modules that have received some
+testing. Not all modules are applicable to all devices.
+
++---------------+---------------------------------------+------------------+
+| Supplier | Type | Part Numbers |
++===============+=======================================+==================+
+| Finisar | SFP+ SR bailed, 10g single rate | FTLX8571D3BCL |
++---------------+---------------------------------------+------------------+
+| Avago | SFP+ SR bailed, 10g single rate | AFBR-700SDZ |
++---------------+---------------------------------------+------------------+
+| Finisar | SFP+ LR bailed, 10g single rate | FTLX1471D3BCL |
++---------------+---------------------------------------+------------------+
+| Finisar | DUAL RATE 1G/10G SFP+ SR (No Bail) | FTLX8571D3QCV-IT |
++---------------+---------------------------------------+------------------+
+| Avago | DUAL RATE 1G/10G SFP+ SR (No Bail) | AFBR-703SDZ-IN1 |
++---------------+---------------------------------------+------------------+
+| Finisar | DUAL RATE 1G/10G SFP+ LR (No Bail) | FTLX1471D3QCV-IT |
++---------------+---------------------------------------+------------------+
+| Avago | DUAL RATE 1G/10G SFP+ LR (No Bail) | AFCT-701SDZ-IN1 |
++---------------+---------------------------------------+------------------+
+| Finisar | 1000BASE-T SFP | FCLF8522P2BTL |
++---------------+---------------------------------------+------------------+
+| Avago | 1000BASE-T | ABCU-5710RZ |
++---------------+---------------------------------------+------------------+
+| HP | 1000BASE-SX SFP | 453153-001 |
++---------------+---------------------------------------+------------------+
+
+82599-based adapters support all passive and active limiting direct attach
+cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
+
+Laser turns off for SFP+ when ifconfig ethX down
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"ifconfig ethX down" turns off the laser for 82599-based SFP+ fiber adapters.
+"ifconfig ethX up" turns on the laser.
+Alternatively, you can use "ip link set [down/up] dev ethX" to turn the
+laser off and on.
+
+
+82599-based QSFP+ Adapters
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+NOTES:
+- If your 82599-based Intel(R) Network Adapter came with Intel optics, it only
+supports Intel optics.
+- 82599-based QSFP+ adapters only support 4x10 Gbps connections. 1x40 Gbps
+connections are not supported. QSFP+ link partners must be configured for
+4x10 Gbps.
+- 82599-based QSFP+ adapters do not support automatic link speed detection.
+The link speed must be configured to either 10 Gbps or 1 Gbps to match the link
+partners speed capabilities. Incorrect speed configurations will result in
+failure to link.
+- Intel(R) Ethernet Converged Network Adapter X520-Q1 only supports the optics
+and direct attach cables listed below.
+
++---------------+---------------------------------------+------------------+
+| Supplier | Type | Part Numbers |
++===============+=======================================+==================+
+| Intel | DUAL RATE 1G/10G QSFP+ SRL (bailed) | E10GQSFPSR |
++---------------+---------------------------------------+------------------+
+
+82599-based QSFP+ adapters support all passive and active limiting QSFP+
+direct attach cables that comply with SFF-8436 v4.1 specifications.
+
+82598-BASED ADAPTERS
+~~~~~~~~~~~~~~~~~~~~
+NOTES:
+- Intel(r) Ethernet Network Adapters that support removable optical modules
+only support their original module type (for example, the Intel(R) 10 Gigabit
+SR Dual Port Express Module only supports SR optical modules). If you plug in
+a different type of module, the driver will not load.
+- Hot Swapping/hot plugging optical modules is not supported.
+- Only single speed, 10 gigabit modules are supported.
+- LAN on Motherboard (LOMs) may support DA, SR, or LR modules. Other module
+types are not supported. Please see your system documentation for details.
+
+The following is a list of SFP+ modules and direct attach cables that have
+received some testing. Not all modules are applicable to all devices.
+
++---------------+---------------------------------------+------------------+
+| Supplier | Type | Part Numbers |
++===============+=======================================+==================+
+| Finisar | SFP+ SR bailed, 10g single rate | FTLX8571D3BCL |
++---------------+---------------------------------------+------------------+
+| Avago | SFP+ SR bailed, 10g single rate | AFBR-700SDZ |
++---------------+---------------------------------------+------------------+
+| Finisar | SFP+ LR bailed, 10g single rate | FTLX1471D3BCL |
++---------------+---------------------------------------+------------------+
+
+82598-based adapters support all passive direct attach cables that comply with
+SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Active direct attach cables
+are not supported.
+
+Third party optic modules and cables referred to above are listed only for the
+purpose of highlighting third party specifications and potential
+compatibility, and are not recommendations or endorsements or sponsorship of
+any third party's product by Intel. Intel is not endorsing or promoting
+products made by any third party and the third party reference is provided
+only to share information regarding certain optic modules and cables with the
+above specifications. There may be other manufacturers or suppliers, producing
+or supplying optic modules and cables with similar or matching descriptions.
+Customers must use their own discretion and diligence to purchase optic
+modules and cables from any third party of their choice. Customers are solely
+responsible for assessing the suitability of the product and/or devices and
+for the selection of the vendor for purchasing any product. THE OPTIC MODULES
+AND CABLES REFERRED TO ABOVE ARE NOT WARRANTED OR SUPPORTED BY INTEL. INTEL
+ASSUMES NO LIABILITY WHATSOEVER, AND INTEL DISCLAIMS ANY EXPRESS OR IMPLIED
+WARRANTY, RELATING TO SALE AND/OR USE OF SUCH THIRD PARTY PRODUCTS OR
+SELECTION OF VENDOR BY CUSTOMERS.
+
+Command Line Parameters
+=======================
+
+max_vfs
+-------
+:Valid Range: 1-63
+
+This parameter adds support for SR-IOV. It causes the driver to spawn up to
+max_vfs worth of virtual functions.
+If the value is greater than 0 it will also force the VMDq parameter to be 1 or
+more.
+
+NOTE: This parameter is only used on kernel 3.7.x and below. On kernel 3.8.x
+and above, use sysfs to enable VFs. Also, for Red Hat distributions, this
+parameter is only used on version 6.6 and older. For version 6.7 and newer, use
+sysfs. For example::
+
+ #echo $num_vf_enabled > /sys/class/net/$dev/device/sriov_numvfs // enable VFs
+ #echo 0 > /sys/class/net/$dev/device/sriov_numvfs //disable VFs
+
+The parameters for the driver are referenced by position. Thus, if you have a
+dual port adapter, or more than one adapter in your system, and want N virtual
+functions per port, you must specify a number for each port with each parameter
+separated by a comma. For example::
+
+ modprobe ixgbe max_vfs=4
+
+This will spawn 4 VFs on the first port.
+
+::
+
+ modprobe ixgbe max_vfs=2,4
+
+This will spawn 2 VFs on the first port and 4 VFs on the second port.
+
+NOTE: Caution must be used in loading the driver with these parameters.
+Depending on your system configuration, number of slots, etc., it is impossible
+to predict in all cases where the positions would be on the command line.
+
+NOTE: Neither the device nor the driver control how VFs are mapped into config
+space. Bus layout will vary by operating system. On operating systems that
+support it, you can check sysfs to find the mapping.
+
+NOTE: When either SR-IOV mode or VMDq mode is enabled, hardware VLAN filtering
+and VLAN tag stripping/insertion will remain enabled. Please remove the old
+VLAN filter before the new VLAN filter is added. For example,
+
+::
+
+ ip link set eth0 vf 0 vlan 100 // set VLAN 100 for VF 0
+ ip link set eth0 vf 0 vlan 0 // Delete VLAN 100
+ ip link set eth0 vf 0 vlan 200 // set a new VLAN 200 for VF 0
+
+With kernel 3.6, the driver supports the simultaneous usage of max_vfs and DCB
+features, subject to the constraints described below. Prior to kernel 3.6, the
+driver did not support the simultaneous operation of max_vfs greater than 0 and
+the DCB features (multiple traffic classes utilizing Priority Flow Control and
+Extended Transmission Selection).
+
+When DCB is enabled, network traffic is transmitted and received through
+multiple traffic classes (packet buffers in the NIC). The traffic is associated
+with a specific class based on priority, which has a value of 0 through 7 used
+in the VLAN tag. When SR-IOV is not enabled, each traffic class is associated
+with a set of receive/transmit descriptor queue pairs. The number of queue
+pairs for a given traffic class depends on the hardware configuration. When
+SR-IOV is enabled, the descriptor queue pairs are grouped into pools. The
+Physical Function (PF) and each Virtual Function (VF) is allocated a pool of
+receive/transmit descriptor queue pairs. When multiple traffic classes are
+configured (for example, DCB is enabled), each pool contains a queue pair from
+each traffic class. When a single traffic class is configured in the hardware,
+the pools contain multiple queue pairs from the single traffic class.
+
+The number of VFs that can be allocated depends on the number of traffic
+classes that can be enabled. The configurable number of traffic classes for
+each enabled VF is as follows:
+0 - 15 VFs = Up to 8 traffic classes, depending on device support
+16 - 31 VFs = Up to 4 traffic classes
+32 - 63 VFs = 1 traffic class
+
+When VFs are configured, the PF is allocated one pool as well. The PF supports
+the DCB features with the constraint that each traffic class will only use a
+single queue pair. When zero VFs are configured, the PF can support multiple
+queue pairs per traffic class.
+
+allow_unsupported_sfp
+---------------------
+:Valid Range: 0,1
+:Default Value: 0 (disabled)
+
+This parameter allows unsupported and untested SFP+ modules on 82599-based
+adapters, as long as the type of module is known to the driver.
+
+debug
+-----
+:Valid Range: 0-16 (0=none,...,16=all)
+:Default Value: 0
+
+This parameter adjusts the level of debug messages displayed in the system
+logs.
+
+
+Additional Features and Configurations
+======================================
+
+Flow Control
+------------
+Ethernet Flow Control (IEEE 802.3x) can be configured with ethtool to enable
+receiving and transmitting pause frames for ixgbe. When transmit is enabled,
+pause frames are generated when the receive packet buffer crosses a predefined
+threshold. When receive is enabled, the transmit unit will halt for the time
+delay specified when a pause frame is received.
+
+NOTE: You must have a flow control capable link partner.
+
+Flow Control is enabled by default.
+
+Use ethtool to change the flow control settings. To enable or disable Rx or
+Tx Flow Control::
+
+ ethtool -A eth? rx <on|off> tx <on|off>
+
+Note: This command only enables or disables Flow Control if auto-negotiation is
+disabled. If auto-negotiation is enabled, this command changes the parameters
+used for auto-negotiation with the link partner.
+
+To enable or disable auto-negotiation::
+
+ ethtool -s eth? autoneg <on|off>
+
+Note: Flow Control auto-negotiation is part of link auto-negotiation. Depending
+on your device, you may not be able to change the auto-negotiation setting.
+
+NOTE: For 82598 backplane cards entering 1 gigabit mode, flow control default
+behavior is changed to off. Flow control in 1 gigabit mode on these devices can
+lead to transmit hangs.
+
+Intel(R) Ethernet Flow Director
+-------------------------------
+The Intel Ethernet Flow Director performs the following tasks:
+
+- Directs receive packets according to their flows to different queues.
+- Enables tight control on routing a flow in the platform.
+- Matches flows and CPU cores for flow affinity.
+- Supports multiple parameters for flexible flow classification and load
+ balancing (in SFP mode only).
+
+NOTE: Intel Ethernet Flow Director masking works in the opposite manner from
+subnet masking. In the following command::
+
+ #ethtool -N eth11 flow-type ip4 src-ip 172.4.1.2 m 255.0.0.0 dst-ip \
+ 172.21.1.1 m 255.128.0.0 action 31
+
+The src-ip value that is written to the filter will be 0.4.1.2, not 172.0.0.0
+as might be expected. Similarly, the dst-ip value written to the filter will be
+0.21.1.1, not 172.0.0.0.
+
+To enable or disable the Intel Ethernet Flow Director::
+
+ # ethtool -K ethX ntuple <on|off>
+
+When disabling ntuple filters, all the user programmed filters are flushed from
+the driver cache and hardware. All needed filters must be re-added when ntuple
+is re-enabled.
+
+To add a filter that directs packet to queue 2, use -U or -N switch::
+
+ # ethtool -N ethX flow-type tcp4 src-ip 192.168.10.1 dst-ip \
+ 192.168.10.2 src-port 2000 dst-port 2001 action 2 [loc 1]
+
+To see the list of filters currently present::
+
+ # ethtool <-u|-n> ethX
+
+Sideband Perfect Filters
+------------------------
+Sideband Perfect Filters are used to direct traffic that matches specified
+characteristics. They are enabled through ethtool's ntuple interface. To add a
+new filter use the following command::
+
+ ethtool -U <device> flow-type <type> src-ip <ip> dst-ip <ip> src-port <port> \
+ dst-port <port> action <queue>
+
+Where:
+ <device> - the ethernet device to program
+ <type> - can be ip4, tcp4, udp4, or sctp4
+ <ip> - the IP address to match on
+ <port> - the port number to match on
+ <queue> - the queue to direct traffic towards (-1 discards the matched traffic)
+
+Use the following command to delete a filter::
+
+ ethtool -U <device> delete <N>
+
+Where <N> is the filter id displayed when printing all the active filters, and
+may also have been specified using "loc <N>" when adding the filter.
+
+The following example matches TCP traffic sent from 192.168.0.1, port 5300,
+directed to 192.168.0.5, port 80, and sends it to queue 7::
+
+ ethtool -U enp130s0 flow-type tcp4 src-ip 192.168.0.1 dst-ip 192.168.0.5 \
+ src-port 5300 dst-port 80 action 7
+
+For each flow-type, the programmed filters must all have the same matching
+input set. For example, issuing the following two commands is acceptable::
+
+ ethtool -U enp130s0 flow-type ip4 src-ip 192.168.0.1 src-port 5300 action 7
+ ethtool -U enp130s0 flow-type ip4 src-ip 192.168.0.5 src-port 55 action 10
+
+Issuing the next two commands, however, is not acceptable, since the first
+specifies src-ip and the second specifies dst-ip::
+
+ ethtool -U enp130s0 flow-type ip4 src-ip 192.168.0.1 src-port 5300 action 7
+ ethtool -U enp130s0 flow-type ip4 dst-ip 192.168.0.5 src-port 55 action 10
+
+The second command will fail with an error. You may program multiple filters
+with the same fields, using different values, but, on one device, you may not
+program two TCP4 filters with different matching fields.
+
+Matching on a sub-portion of a field is not supported by the ixgbe driver, thus
+partial mask fields are not supported.
+
+To create filters that direct traffic to a specific Virtual Function, use the
+"user-def" parameter. Specify the user-def as a 64 bit value, where the lower 32
+bits represents the queue number, while the next 8 bits represent which VF.
+Note that 0 is the PF, so the VF identifier is offset by 1. For example::
+
+ ... user-def 0x800000002 ...
+
+specifies to direct traffic to Virtual Function 7 (8 minus 1) into queue 2 of
+that VF.
+
+Note that these filters will not break internal routing rules, and will not
+route traffic that otherwise would not have been sent to the specified Virtual
+Function.
+
+Jumbo Frames
+------------
+Jumbo Frames support is enabled by changing the Maximum Transmission Unit (MTU)
+to a value larger than the default value of 1500.
+
+Use the ifconfig command to increase the MTU size. For example, enter the
+following where <x> is the interface number::
+
+ ifconfig eth<x> mtu 9000 up
+
+Alternatively, you can use the ip command as follows::
+
+ ip link set mtu 9000 dev eth<x>
+ ip link set up dev eth<x>
+
+This setting is not saved across reboots. The setting change can be made
+permanent by adding 'MTU=9000' to the file::
+
+ /etc/sysconfig/network-scripts/ifcfg-eth<x> // for RHEL
+ /etc/sysconfig/network/<config_file> // for SLES
+
+NOTE: The maximum MTU setting for Jumbo Frames is 9710. This value coincides
+with the maximum Jumbo Frames size of 9728 bytes.
+
+NOTE: This driver will attempt to use multiple page sized buffers to receive
+each jumbo packet. This should help to avoid buffer starvation issues when
+allocating receive packets.
+
+NOTE: For 82599-based network connections, if you are enabling jumbo frames in
+a virtual function (VF), jumbo frames must first be enabled in the physical
+function (PF). The VF MTU setting cannot be larger than the PF MTU.
+
+Generic Receive Offload, aka GRO
+--------------------------------
+The driver supports the in-kernel software implementation of GRO. GRO has
+shown that by coalescing Rx traffic into larger chunks of data, CPU
+utilization can be significantly reduced when under large Rx load. GRO is an
+evolution of the previously-used LRO interface. GRO is able to coalesce
+other protocols besides TCP. It's also safe to use with configurations that
+are problematic for LRO, namely bridging and iSCSI.
+
+Data Center Bridging (DCB)
+--------------------------
+NOTE:
+The kernel assumes that TC0 is available, and will disable Priority Flow
+Control (PFC) on the device if TC0 is not available. To fix this, ensure TC0 is
+enabled when setting up DCB on your switch.
+
+DCB is a configuration Quality of Service implementation in hardware. It uses
+the VLAN priority tag (802.1p) to filter traffic. That means that there are 8
+different priorities that traffic can be filtered into. It also enables
+priority flow control (802.1Qbb) which can limit or eliminate the number of
+dropped packets during network stress. Bandwidth can be allocated to each of
+these priorities, which is enforced at the hardware level (802.1Qaz).
+
+Adapter firmware implements LLDP and DCBX protocol agents as per 802.1AB and
+802.1Qaz respectively. The firmware based DCBX agent runs in willing mode only
+and can accept settings from a DCBX capable peer. Software configuration of
+DCBX parameters via dcbtool/lldptool are not supported.
+
+The ixgbe driver implements the DCB netlink interface layer to allow user-space
+to communicate with the driver and query DCB configuration for the port.
+
+ethtool
+-------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information. The latest ethtool
+version is required for this functionality. Download it at:
+https://www.kernel.org/pub/software/network/ethtool/
+
+FCoE
+----
+The ixgbe driver supports Fiber Channel over Ethernet (FCoE) and Data Center
+Bridging (DCB). This code has no default effect on the regular driver
+operation. Configuring DCB and FCoE is outside the scope of this README. Refer
+to http://www.open-fcoe.org/ for FCoE project information and contact
+ixgbe-eedc@lists.sourceforge.net for DCB information.
+
+MAC and VLAN anti-spoofing feature
+----------------------------------
+When a malicious driver attempts to send a spoofed packet, it is dropped by the
+hardware and not transmitted.
+
+An interrupt is sent to the PF driver notifying it of the spoof attempt. When a
+spoofed packet is detected, the PF driver will send the following message to
+the system log (displayed by the "dmesg" command)::
+
+ ixgbe ethX: ixgbe_spoof_check: n spoofed packets detected
+
+where "x" is the PF interface number; and "n" is number of spoofed packets.
+NOTE: This feature can be disabled for a specific Virtual Function (VF)::
+
+ ip link set <pf dev> vf <vf id> spoofchk {off|on}
+
+
+Known Issues/Troubleshooting
+============================
+
+Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS
+-----------------------------------------------------------------------
+Linux KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM.
+This includes traditional PCIe devices, as well as SR-IOV-capable devices based
+on the Intel Ethernet Controller XL710.
+
+
+Support
+=======
+For general information, go to the Intel support website at:
+
+https://www.intel.com/support/
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+https://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on a supported kernel
+with a supported adapter, email the specific information related to the issue
+to e1000-devel@lists.sf.net.
diff --git a/Documentation/networking/ixgbe.txt b/Documentation/networking/ixgbe.txt
deleted file mode 100644
index 687835415707..000000000000
--- a/Documentation/networking/ixgbe.txt
+++ /dev/null
@@ -1,349 +0,0 @@
-Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Family of
-Adapters
-=============================================================================
-
-Intel 10 Gigabit Linux driver.
-Copyright(c) 1999 - 2013 Intel Corporation.
-
-Contents
-========
-
-- Identifying Your Adapter
-- Additional Configurations
-- Performance Tuning
-- Known Issues
-- Support
-
-Identifying Your Adapter
-========================
-
-The driver in this release is compatible with 82598, 82599 and X540-based
-Intel Network Connections.
-
-For more information on how to identify your adapter, go to the Adapter &
-Driver ID Guide at:
-
- http://support.intel.com/support/network/sb/CS-012904.htm
-
-SFP+ Devices with Pluggable Optics
-----------------------------------
-
-82599-BASED ADAPTERS
-
-NOTES: If your 82599-based Intel(R) Network Adapter came with Intel optics, or
-is an Intel(R) Ethernet Server Adapter X520-2, then it only supports Intel
-optics and/or the direct attach cables listed below.
-
-When 82599-based SFP+ devices are connected back to back, they should be set to
-the same Speed setting via ethtool. Results may vary if you mix speed settings.
-82598-based adapters support all passive direct attach cables that comply
-with SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Active direct attach
-cables are not supported.
-
-Supplier Type Part Numbers
-
-SR Modules
-Intel DUAL RATE 1G/10G SFP+ SR (bailed) FTLX8571D3BCV-IT
-Intel DUAL RATE 1G/10G SFP+ SR (bailed) AFBR-703SDDZ-IN1
-Intel DUAL RATE 1G/10G SFP+ SR (bailed) AFBR-703SDZ-IN2
-LR Modules
-Intel DUAL RATE 1G/10G SFP+ LR (bailed) FTLX1471D3BCV-IT
-Intel DUAL RATE 1G/10G SFP+ LR (bailed) AFCT-701SDDZ-IN1
-Intel DUAL RATE 1G/10G SFP+ LR (bailed) AFCT-701SDZ-IN2
-
-The following is a list of 3rd party SFP+ modules and direct attach cables that
-have received some testing. Not all modules are applicable to all devices.
-
-Supplier Type Part Numbers
-
-Finisar SFP+ SR bailed, 10g single rate FTLX8571D3BCL
-Avago SFP+ SR bailed, 10g single rate AFBR-700SDZ
-Finisar SFP+ LR bailed, 10g single rate FTLX1471D3BCL
-
-Finisar DUAL RATE 1G/10G SFP+ SR (No Bail) FTLX8571D3QCV-IT
-Avago DUAL RATE 1G/10G SFP+ SR (No Bail) AFBR-703SDZ-IN1
-Finisar DUAL RATE 1G/10G SFP+ LR (No Bail) FTLX1471D3QCV-IT
-Avago DUAL RATE 1G/10G SFP+ LR (No Bail) AFCT-701SDZ-IN1
-Finistar 1000BASE-T SFP FCLF8522P2BTL
-Avago 1000BASE-T SFP ABCU-5710RZ
-
-82599-based adapters support all passive and active limiting direct attach
-cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
-
-Laser turns off for SFP+ when device is down
--------------------------------------------
-"ip link set down" turns off the laser for 82599-based SFP+ fiber adapters.
-"ip link set up" turns on the laser.
-
-
-82598-BASED ADAPTERS
-
-NOTES for 82598-Based Adapters:
-- Intel(R) Network Adapters that support removable optical modules only support
- their original module type (i.e., the Intel(R) 10 Gigabit SR Dual Port
- Express Module only supports SR optical modules). If you plug in a different
- type of module, the driver will not load.
-- Hot Swapping/hot plugging optical modules is not supported.
-- Only single speed, 10 gigabit modules are supported.
-- LAN on Motherboard (LOMs) may support DA, SR, or LR modules. Other module
- types are not supported. Please see your system documentation for details.
-
-The following is a list of 3rd party SFP+ modules and direct attach cables that
-have received some testing. Not all modules are applicable to all devices.
-
-Supplier Type Part Numbers
-
-Finisar SFP+ SR bailed, 10g single rate FTLX8571D3BCL
-Avago SFP+ SR bailed, 10g single rate AFBR-700SDZ
-Finisar SFP+ LR bailed, 10g single rate FTLX1471D3BCL
-
-82598-based adapters support all passive direct attach cables that comply
-with SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Active direct attach
-cables are not supported.
-
-
-Flow Control
-------------
-Ethernet Flow Control (IEEE 802.3x) can be configured with ethtool to enable
-receiving and transmitting pause frames for ixgbe. When TX is enabled, PAUSE
-frames are generated when the receive packet buffer crosses a predefined
-threshold. When rx is enabled, the transmit unit will halt for the time delay
-specified when a PAUSE frame is received.
-
-Flow Control is enabled by default. If you want to disable a flow control
-capable link partner, use ethtool:
-
- ethtool -A eth? autoneg off RX off TX off
-
-NOTE: For 82598 backplane cards entering 1 gig mode, flow control default
-behavior is changed to off. Flow control in 1 gig mode on these devices can
-lead to Tx hangs.
-
-Intel(R) Ethernet Flow Director
--------------------------------
-Supports advanced filters that direct receive packets by their flows to
-different queues. Enables tight control on routing a flow in the platform.
-Matches flows and CPU cores for flow affinity. Supports multiple parameters
-for flexible flow classification and load balancing.
-
-Flow director is enabled only if the kernel is multiple TX queue capable.
-
-An included script (set_irq_affinity.sh) automates setting the IRQ to CPU
-affinity.
-
-You can verify that the driver is using Flow Director by looking at the counter
-in ethtool: fdir_miss and fdir_match.
-
-Other ethtool Commands:
-To enable Flow Director
- ethtool -K ethX ntuple on
-To add a filter
- Use -U switch. e.g., ethtool -U ethX flow-type tcp4 src-ip 10.0.128.23
- action 1
-To see the list of filters currently present:
- ethtool -u ethX
-
-Perfect Filter: Perfect filter is an interface to load the filter table that
-funnels all flow into queue_0 unless an alternative queue is specified using
-"action". In that case, any flow that matches the filter criteria will be
-directed to the appropriate queue.
-
-If the queue is defined as -1, filter will drop matching packets.
-
-To account for filter matches and misses, there are two stats in ethtool:
-fdir_match and fdir_miss. In addition, rx_queue_N_packets shows the number of
-packets processed by the Nth queue.
-
-NOTE: Receive Packet Steering (RPS) and Receive Flow Steering (RFS) are not
-compatible with Flow Director. IF Flow Director is enabled, these will be
-disabled.
-
-The following three parameters impact Flow Director.
-
-FdirMode
---------
-Valid Range: 0-2 (0=off, 1=ATR, 2=Perfect filter mode)
-Default Value: 1
-
- Flow Director filtering modes.
-
-FdirPballoc
------------
-Valid Range: 0-2 (0=64k, 1=128k, 2=256k)
-Default Value: 0
-
- Flow Director allocated packet buffer size.
-
-AtrSampleRate
---------------
-Valid Range: 1-100
-Default Value: 20
-
- Software ATR Tx packet sample rate. For example, when set to 20, every 20th
- packet, looks to see if the packet will create a new flow.
-
-Node
-----
-Valid Range: 0-n
-Default Value: 1 (off)
-
- 0 - n: where n is the number of NUMA nodes (i.e. 0 - 3) currently online in
- your system
- 1: turns this option off
-
- The Node parameter will allow you to pick which NUMA node you want to have
- the adapter allocate memory on.
-
-max_vfs
--------
-Valid Range: 1-63
-Default Value: 0
-
- If the value is greater than 0 it will also force the VMDq parameter to be 1
- or more.
-
- This parameter adds support for SR-IOV. It causes the driver to spawn up to
- max_vfs worth of virtual function.
-
-
-Additional Configurations
-=========================
-
- Jumbo Frames
- ------------
- The driver supports Jumbo Frames for all adapters. Jumbo Frames support is
- enabled by changing the MTU to a value larger than the default of 1500.
- The maximum value for the MTU is 16110. Use the ip command to
- increase the MTU size. For example:
-
- ip link set dev ethx mtu 9000
-
- The maximum MTU setting for Jumbo Frames is 9710. This value coincides
- with the maximum Jumbo Frames size of 9728.
-
- Generic Receive Offload, aka GRO
- --------------------------------
- The driver supports the in-kernel software implementation of GRO. GRO has
- shown that by coalescing Rx traffic into larger chunks of data, CPU
- utilization can be significantly reduced when under large Rx load. GRO is an
- evolution of the previously-used LRO interface. GRO is able to coalesce
- other protocols besides TCP. It's also safe to use with configurations that
- are problematic for LRO, namely bridging and iSCSI.
-
- Data Center Bridging, aka DCB
- -----------------------------
- DCB is a configuration Quality of Service implementation in hardware.
- It uses the VLAN priority tag (802.1p) to filter traffic. That means
- that there are 8 different priorities that traffic can be filtered into.
- It also enables priority flow control which can limit or eliminate the
- number of dropped packets during network stress. Bandwidth can be
- allocated to each of these priorities, which is enforced at the hardware
- level.
-
- To enable DCB support in ixgbe, you must enable the DCB netlink layer to
- allow the userspace tools (see below) to communicate with the driver.
- This can be found in the kernel configuration here:
-
- -> Networking support
- -> Networking options
- -> Data Center Bridging support
-
- Once this is selected, DCB support must be selected for ixgbe. This can
- be found here:
-
- -> Device Drivers
- -> Network device support (NETDEVICES [=y])
- -> Ethernet (10000 Mbit) (NETDEV_10000 [=y])
- -> Intel(R) 10GbE PCI Express adapters support
- -> Data Center Bridging (DCB) Support
-
- After these options are selected, you must rebuild your kernel and your
- modules.
-
- In order to use DCB, userspace tools must be downloaded and installed.
- The dcbd tools can be found at:
-
- http://e1000.sf.net
-
- Ethtool
- -------
- The driver utilizes the ethtool interface for driver configuration and
- diagnostics, as well as displaying statistical information. The latest
- ethtool version is required for this functionality.
-
- The latest release of ethtool can be found from
- https://www.kernel.org/pub/software/network/ethtool/
-
- FCoE
- ----
- This release of the ixgbe driver contains new code to enable users to use
- Fiber Channel over Ethernet (FCoE) and Data Center Bridging (DCB)
- functionality that is supported by the 82598-based hardware. This code has
- no default effect on the regular driver operation, and configuring DCB and
- FCoE is outside the scope of this driver README. Refer to
- http://www.open-fcoe.org/ for FCoE project information and contact
- e1000-eedc@lists.sourceforge.net for DCB information.
-
- MAC and VLAN anti-spoofing feature
- ----------------------------------
- When a malicious driver attempts to send a spoofed packet, it is dropped by
- the hardware and not transmitted. An interrupt is sent to the PF driver
- notifying it of the spoof attempt.
-
- When a spoofed packet is detected the PF driver will send the following
- message to the system log (displayed by the "dmesg" command):
-
- Spoof event(s) detected on VF (n)
-
- Where n=the VF that attempted to do the spoofing.
-
-
-Performance Tuning
-==================
-
-An excellent article on performance tuning can be found at:
-
-http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf
-
-
-Known Issues
-============
-
- Enabling SR-IOV in a 32-bit or 64-bit Microsoft* Windows* Server 2008/R2
- Guest OS using Intel (R) 82576-based GbE or Intel (R) 82599-based 10GbE
- controller under KVM
- ------------------------------------------------------------------------
- KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
- includes traditional PCIe devices, as well as SR-IOV-capable devices using
- Intel 82576-based and 82599-based controllers.
-
- While direct assignment of a PCIe device or an SR-IOV Virtual Function (VF)
- to a Linux-based VM running 2.6.32 or later kernel works fine, there is a
- known issue with Microsoft Windows Server 2008 VM that results in a "yellow
- bang" error. This problem is within the KVM VMM itself, not the Intel driver,
- or the SR-IOV logic of the VMM, but rather that KVM emulates an older CPU
- model for the guests, and this older CPU model does not support MSI-X
- interrupts, which is a requirement for Intel SR-IOV.
-
- If you wish to use the Intel 82576 or 82599-based controllers in SR-IOV mode
- with KVM and a Microsoft Windows Server 2008 guest try the following
- workaround. The workaround is to tell KVM to emulate a different model of CPU
- when using qemu to create the KVM guest:
-
- "-cpu qemu64,model=13"
-
-
-Support
-=======
-
-For general information, go to the Intel support website at:
-
- http://support.intel.com
-
-or the Intel Wired Networking project hosted by Sourceforge at:
-
- http://e1000.sourceforge.net
-
-If an issue is identified with the released source code on the supported
-kernel with a supported adapter, email the specific information related
-to the issue to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/ixgbevf.rst b/Documentation/networking/ixgbevf.rst
new file mode 100644
index 000000000000..56cde6366c2f
--- /dev/null
+++ b/Documentation/networking/ixgbevf.rst
@@ -0,0 +1,66 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Linux* Base Virtual Function Driver for Intel(R) 10G Ethernet
+=============================================================
+
+Intel 10 Gigabit Virtual Function Linux driver.
+Copyright(c) 1999-2018 Intel Corporation.
+
+Contents
+========
+
+- Identifying Your Adapter
+- Known Issues
+- Support
+
+This driver supports 82599, X540, X550, and X552-based virtual function devices
+that can only be activated on kernels that support SR-IOV.
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel adapter. All hardware requirements listed apply to use
+with Linux.
+
+
+Identifying Your Adapter
+========================
+The driver is compatible with devices based on the following:
+
+ * Intel(R) Ethernet Controller 82598
+ * Intel(R) Ethernet Controller 82599
+ * Intel(R) Ethernet Controller X520
+ * Intel(R) Ethernet Controller X540
+ * Intel(R) Ethernet Controller x550
+ * Intel(R) Ethernet Controller X552
+ * Intel(R) Ethernet Controller X553
+
+For information on how to identify your adapter, and for the latest Intel
+network drivers, refer to the Intel Support website:
+https://www.intel.com/support
+
+Known Issues/Troubleshooting
+============================
+
+SR-IOV requires the correct platform and OS support.
+
+The guest OS loading this driver must support MSI-X interrupts.
+
+This driver is only supported as a loadable module at this time. Intel is not
+supplying patches against the kernel source to allow for static linking of the
+drivers.
+
+VLANs: There is a limit of a total of 64 shared VLANs to 1 or more VFs.
+
+
+Support
+=======
+For general information, go to the Intel support website at:
+
+https://www.intel.com/support/
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+https://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on a supported kernel
+with a supported adapter, email the specific information related to the issue
+to e1000-devel@lists.sf.net.
diff --git a/Documentation/networking/ixgbevf.txt b/Documentation/networking/ixgbevf.txt
deleted file mode 100644
index 53d8d2a5a6a3..000000000000
--- a/Documentation/networking/ixgbevf.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-Linux* Base Driver for Intel(R) Ethernet Network Connection
-===========================================================
-
-Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2013 Intel Corporation.
-
-Contents
-========
-
-- Identifying Your Adapter
-- Known Issues/Troubleshooting
-- Support
-
-This file describes the ixgbevf Linux* Base Driver for Intel Network
-Connection.
-
-The ixgbevf driver supports 82599-based virtual function devices that can only
-be activated on kernels with CONFIG_PCI_IOV enabled.
-
-The ixgbevf driver supports virtual functions generated by the ixgbe driver
-with a max_vfs value of 1 or greater.
-
-The guest OS loading the ixgbevf driver must support MSI-X interrupts.
-
-VLANs: There is a limit of a total of 32 shared VLANs to 1 or more VFs.
-
-Identifying Your Adapter
-========================
-
-For more information on how to identify your adapter, go to the Adapter &
-Driver ID Guide at:
-
- http://support.intel.com/support/go/network/adapter/idguide.htm
-
-Known Issues/Troubleshooting
-============================
-
-
-Support
-=======
-
-For general information, go to the Intel support website at:
-
- http://support.intel.com
-
-or the Intel Wired Networking project hosted by Sourceforge at:
-
- http://sourceforge.net/projects/e1000
-
-If an issue is identified with the released source code on the supported
-kernel with a supported adapter, email the specific information related
-to the issue to e1000-devel@lists.sf.net
diff --git a/Documentation/networking/netvsc.txt b/Documentation/networking/netvsc.txt
index 92f5b31392fa..3bfa635bbbd5 100644
--- a/Documentation/networking/netvsc.txt
+++ b/Documentation/networking/netvsc.txt
@@ -45,6 +45,15 @@ Features
like packets and significantly reduces CPU usage under heavy Rx
load.
+ Large Receive Offload (LRO), or Receive Side Coalescing (RSC)
+ -------------------------------------------------------------
+ The driver supports LRO/RSC in the vSwitch feature. It reduces the per packet
+ processing overhead by coalescing multiple TCP segments when possible. The
+ feature is enabled by default on VMs running on Windows Server 2019 and
+ later. It may be changed by ethtool command:
+ ethtool -K eth0 lro on
+ ethtool -K eth0 lro off
+
SR-IOV support
--------------
Hyper-V supports SR-IOV as a hardware acceleration option. If SR-IOV
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index b5407163d53b..605e00cdd6be 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -1069,6 +1069,31 @@ The kernel interface functions are as follows:
This function may transmit a PING ACK.
+ (*) Get reply timestamp.
+
+ bool rxrpc_kernel_get_reply_time(struct socket *sock,
+ struct rxrpc_call *call,
+ ktime_t *_ts)
+
+ This allows the timestamp on the first DATA packet of the reply of a
+ client call to be queried, provided that it is still in the Rx ring. If
+ successful, the timestamp will be stored into *_ts and true will be
+ returned; false will be returned otherwise.
+
+ (*) Get remote client epoch.
+
+ u32 rxrpc_kernel_get_epoch(struct socket *sock,
+ struct rxrpc_call *call)
+
+ This allows the epoch that's contained in packets of an incoming client
+ call to be queried. This value is returned. The function always
+ successful if the call is still in progress. It shouldn't be called once
+ the call has expired. Note that calling this on a local client call only
+ returns the local epoch.
+
+ This value can be used to determine if the remote client has been
+ restarted as it shouldn't change otherwise.
+
=======================
CONFIGURABLE PARAMETERS
diff --git a/Documentation/networking/tcp.txt b/Documentation/networking/tcp.txt
deleted file mode 100644
index 9c7139d57e57..000000000000
--- a/Documentation/networking/tcp.txt
+++ /dev/null
@@ -1,101 +0,0 @@
-TCP protocol
-============
-
-Last updated: 3 June 2017
-
-Contents
-========
-
-- Congestion control
-- How the new TCP output machine [nyi] works
-
-Congestion control
-==================
-
-The following variables are used in the tcp_sock for congestion control:
-snd_cwnd The size of the congestion window
-snd_ssthresh Slow start threshold. We are in slow start if
- snd_cwnd is less than this.
-snd_cwnd_cnt A counter used to slow down the rate of increase
- once we exceed slow start threshold.
-snd_cwnd_clamp This is the maximum size that snd_cwnd can grow to.
-snd_cwnd_stamp Timestamp for when congestion window last validated.
-snd_cwnd_used Used as a highwater mark for how much of the
- congestion window is in use. It is used to adjust
- snd_cwnd down when the link is limited by the
- application rather than the network.
-
-As of 2.6.13, Linux supports pluggable congestion control algorithms.
-A congestion control mechanism can be registered through functions in
-tcp_cong.c. The functions used by the congestion control mechanism are
-registered via passing a tcp_congestion_ops struct to
-tcp_register_congestion_control. As a minimum, the congestion control
-mechanism must provide a valid name and must implement either ssthresh,
-cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook.
-
-Private data for a congestion control mechanism is stored in tp->ca_priv.
-tcp_ca(tp) returns a pointer to this space. This is preallocated space - it
-is important to check the size of your private data will fit this space, or
-alternatively, space could be allocated elsewhere and a pointer to it could
-be stored here.
-
-There are three kinds of congestion control algorithms currently: The
-simplest ones are derived from TCP reno (highspeed, scalable) and just
-provide an alternative congestion window calculation. More complex
-ones like BIC try to look at other events to provide better
-heuristics. There are also round trip time based algorithms like
-Vegas and Westwood+.
-
-Good TCP congestion control is a complex problem because the algorithm
-needs to maintain fairness and performance. Please review current
-research and RFC's before developing new modules.
-
-The default congestion control mechanism is chosen based on the
-DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default
-value then you can set it using sysctl net.ipv4.tcp_congestion_control. The
-module will be autoloaded if needed and you will get the expected protocol. If
-you ask for an unknown congestion method, then the sysctl attempt will fail.
-
-If you remove a TCP congestion control module, then you will get the next
-available one. Since reno cannot be built as a module, and cannot be
-removed, it will always be available.
-
-How the new TCP output machine [nyi] works.
-===========================================
-
-Data is kept on a single queue. The skb->users flag tells us if the frame is
-one that has been queued already. To add a frame we throw it on the end. Ack
-walks down the list from the start.
-
-We keep a set of control flags
-
-
- sk->tcp_pend_event
-
- TCP_PEND_ACK Ack needed
- TCP_ACK_NOW Needed now
- TCP_WINDOW Window update check
- TCP_WINZERO Zero probing
-
-
- sk->transmit_queue The transmission frame begin
- sk->transmit_new First new frame pointer
- sk->transmit_end Where to add frames
-
- sk->tcp_last_tx_ack Last ack seen
- sk->tcp_dup_ack Dup ack count for fast retransmit
-
-
-Frames are queued for output by tcp_write. We do our best to send the frames
-off immediately if possible, but otherwise queue and compute the body
-checksum in the copy.
-
-When a write is done we try to clear any pending events and piggy back them.
-If the window is full we queue full sized frames. On the first timeout in
-zero window we split this.
-
-On a timer we walk the retransmit list to send any retransmits, update the
-backoff timers etc. A change of route table stamp causes a change of header
-and recompute. We add any new tcp level headers and refinish the checksum
-before sending.
-
diff --git a/Documentation/networking/xfrm_device.txt b/Documentation/networking/xfrm_device.txt
index 50c34ca65efe..267f55b5f54a 100644
--- a/Documentation/networking/xfrm_device.txt
+++ b/Documentation/networking/xfrm_device.txt
@@ -68,6 +68,10 @@ and an indication of whether it is for Rx or Tx. The driver should
- verify the algorithm is supported for offloads
- store the SA information (key, salt, target-ip, protocol, etc)
- enable the HW offload of the SA
+ - return status value:
+ 0 success
+ -EOPNETSUPP offload not supported, try SW IPsec
+ other fail the request
The driver can also set an offload_handle in the SA, an opaque void pointer
that can be used to convey context into the fast-path offload requests.
diff --git a/Documentation/parisc/00-INDEX b/Documentation/parisc/00-INDEX
deleted file mode 100644
index cbd060961f43..000000000000
--- a/Documentation/parisc/00-INDEX
+++ /dev/null
@@ -1,6 +0,0 @@
-00-INDEX
- - this file.
-debugging
- - some debugging hints for real-mode code
-registers
- - current/planned usage of registers
diff --git a/Documentation/power/00-INDEX b/Documentation/power/00-INDEX
deleted file mode 100644
index 7f3c2def2cac..000000000000
--- a/Documentation/power/00-INDEX
+++ /dev/null
@@ -1,44 +0,0 @@
-00-INDEX
- - This file
-apm-acpi.txt
- - basic info about the APM and ACPI support.
-basic-pm-debugging.txt
- - Debugging suspend and resume
-charger-manager.txt
- - Battery charger management.
-admin-guide/devices.rst
- - How drivers interact with system-wide power management
-drivers-testing.txt
- - Testing suspend and resume support in device drivers
-freezing-of-tasks.txt
- - How processes and controlled during suspend
-interface.txt
- - Power management user interface in /sys/power
-opp.txt
- - Operating Performance Point library
-pci.txt
- - How the PCI Subsystem Does Power Management
-pm_qos_interface.txt
- - info on Linux PM Quality of Service interface
-power_supply_class.txt
- - Tells userspace about battery, UPS, AC or DC power supply properties
-runtime_pm.txt
- - Power management framework for I/O devices.
-s2ram.txt
- - How to get suspend to ram working (and debug it when it isn't)
-states.txt
- - System power management states
-suspend-and-cpuhotplug.txt
- - Explains the interaction between Suspend-to-RAM (S3) and CPU hotplug
-swsusp-and-swap-files.txt
- - Using swap files with software suspend (to disk)
-swsusp-dmcrypt.txt
- - How to use dm-crypt and software suspend (to disk) together
-swsusp.txt
- - Goals, implementation, and usage of software suspend (ACPI S3)
-tricks.txt
- - How to trick software suspend (to disk) into working when it isn't
-userland-swsusp.txt
- - Experimental implementation of software suspend in userspace
-video.txt
- - Video issues during resume from suspend
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index cc87adf44c0a..236d1fb13640 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -56,7 +56,7 @@ If you want to limit the suspend image size to N bytes, do
echo N > /sys/power/image_size
-before suspend (it is limited to 500 MB by default).
+before suspend (it is limited to around 2/5 of available RAM by default).
. The resume process checks for the presence of the resume device,
if found, it then checks the contents for the hibernation image signature.
diff --git a/Documentation/powerpc/00-INDEX b/Documentation/powerpc/00-INDEX
deleted file mode 100644
index 9dc845cf7d88..000000000000
--- a/Documentation/powerpc/00-INDEX
+++ /dev/null
@@ -1,34 +0,0 @@
-Index of files in Documentation/powerpc. If you think something about
-Linux/PPC needs an entry here, needs correction or you've written one
-please mail me.
- Cort Dougan (cort@fsmlabs.com)
-
-00-INDEX
- - this file
-bootwrapper.txt
- - Information on how the powerpc kernel is wrapped for boot on various
- different platforms.
-cpu_features.txt
- - info on how we support a variety of CPUs with minimal compile-time
- options.
-cxl.txt
- - Overview of the CXL driver.
-eeh-pci-error-recovery.txt
- - info on PCI Bus EEH Error Recovery
-firmware-assisted-dump.txt
- - Documentation on the firmware assisted dump mechanism "fadump".
-hvcs.txt
- - IBM "Hypervisor Virtual Console Server" Installation Guide
-mpc52xx.txt
- - Linux 2.6.x on MPC52xx family
-pmu-ebb.txt
- - Description of the API for using the PMU with Event Based Branches.
-qe_firmware.txt
- - describes the layout of firmware binaries for the Freescale QUICC
- Engine and the code that parses and uploads the microcode therein.
-ptrace.txt
- - Information on the ptrace interfaces for hardware debug registers.
-transactional_memory.txt
- - Overview of the Power8 transactional memory support.
-dscr.txt
- - Overview DSCR (Data Stream Control Register) support.
diff --git a/Documentation/preempt-locking.txt b/Documentation/preempt-locking.txt
index c945062be66c..509f5a422d57 100644
--- a/Documentation/preempt-locking.txt
+++ b/Documentation/preempt-locking.txt
@@ -3,7 +3,6 @@ Proper Locking Under a Preemptible Kernel: Keeping Kernel Code Preempt-Safe
===========================================================================
:Author: Robert Love <rml@tech9.net>
-:Last Updated: 28 Aug 2002
Introduction
@@ -92,11 +91,12 @@ any locks or interrupts are disabled, since preemption is implicitly disabled
in those cases.
But keep in mind that 'irqs disabled' is a fundamentally unsafe way of
-disabling preemption - any spin_unlock() decreasing the preemption count
-to 0 might trigger a reschedule. A simple printk() might trigger a reschedule.
-So use this implicit preemption-disabling property only if you know that the
-affected codepath does not do any of this. Best policy is to use this only for
-small, atomic code that you wrote and which calls no complex functions.
+disabling preemption - any cond_resched() or cond_resched_lock() might trigger
+a reschedule if the preempt count is 0. A simple printk() might trigger a
+reschedule. So use this implicit preemption-disabling property only if you
+know that the affected codepath does not do any of this. Best policy is to use
+this only for small, atomic code that you wrote and which calls no complex
+functions.
Example::
diff --git a/Documentation/process/2.Process.rst b/Documentation/process/2.Process.rst
index 51d0349c7809..ae020d84d7c4 100644
--- a/Documentation/process/2.Process.rst
+++ b/Documentation/process/2.Process.rst
@@ -82,7 +82,7 @@ As an example, here is how the 4.16 development cycle went (all dates in
March 11 4.16-rc5
March 18 4.16-rc6
March 25 4.16-rc7
- April 1 4.17 stable release
+ April 1 4.16 stable release
============== ===============================
How do the developers decide when to close the development cycle and create
diff --git a/Documentation/process/adding-syscalls.rst b/Documentation/process/adding-syscalls.rst
index 0d4f29bc798b..88a7d5c8bb2f 100644
--- a/Documentation/process/adding-syscalls.rst
+++ b/Documentation/process/adding-syscalls.rst
@@ -232,7 +232,7 @@ normally be optional, so add a ``CONFIG`` option (typically to
by the option.
- Make the option depend on EXPERT if it should be hidden from normal users.
- Make any new source files implementing the function dependent on the CONFIG
- option in the Makefile (e.g. ``obj-$(CONFIG_XYZZY_SYSCALL) += xyzzy.c``).
+ option in the Makefile (e.g. ``obj-$(CONFIG_XYZZY_SYSCALL) += xyzzy.o``).
- Double check that the kernel still builds with the new CONFIG option turned
off.
diff --git a/Documentation/process/code-of-conduct-interpretation.rst b/Documentation/process/code-of-conduct-interpretation.rst
new file mode 100644
index 000000000000..e899f14a4ba2
--- /dev/null
+++ b/Documentation/process/code-of-conduct-interpretation.rst
@@ -0,0 +1,156 @@
+.. _code_of_conduct_interpretation:
+
+Linux Kernel Contributor Covenant Code of Conduct Interpretation
+================================================================
+
+The :ref:`code_of_conduct` is a general document meant to
+provide a set of rules for almost any open source community. Every
+open-source community is unique and the Linux kernel is no exception.
+Because of this, this document describes how we in the Linux kernel
+community will interpret it. We also do not expect this interpretation
+to be static over time, and will adjust it as needed.
+
+The Linux kernel development effort is a very personal process compared
+to "traditional" ways of developing software. Your contributions and
+ideas behind them will be carefully reviewed, often resulting in
+critique and criticism. The review will almost always require
+improvements before the material can be included in the
+kernel. Know that this happens because everyone involved wants to see
+the best possible solution for the overall success of Linux. This
+development process has been proven to create the most robust operating
+system kernel ever, and we do not want to do anything to cause the
+quality of submission and eventual result to ever decrease.
+
+Maintainers
+-----------
+
+The Code of Conduct uses the term "maintainers" numerous times. In the
+kernel community, a "maintainer" is anyone who is responsible for a
+subsystem, driver, or file, and is listed in the MAINTAINERS file in the
+kernel source tree.
+
+Responsibilities
+----------------
+
+The Code of Conduct mentions rights and responsibilities for
+maintainers, and this needs some further clarifications.
+
+First and foremost, it is a reasonable expectation to have maintainers
+lead by example.
+
+That being said, our community is vast and broad, and there is no new
+requirement for maintainers to unilaterally handle how other people
+behave in the parts of the community where they are active. That
+responsibility is upon all of us, and ultimately the Code of Conduct
+documents final escalation paths in case of unresolved concerns
+regarding conduct issues.
+
+Maintainers should be willing to help when problems occur, and work with
+others in the community when needed. Do not be afraid to reach out to
+the Technical Advisory Board (TAB) or other maintainers if you're
+uncertain how to handle situations that come up. It will not be
+considered a violation report unless you want it to be. If you are
+uncertain about approaching the TAB or any other maintainers, please
+reach out to our conflict mediator, Mishi Choudhary <mishi@linux.com>.
+
+In the end, "be kind to each other" is really what the end goal is for
+everybody. We know everyone is human and we all fail at times, but the
+primary goal for all of us should be to work toward amicable resolutions
+of problems. Enforcement of the code of conduct will only be a last
+resort option.
+
+Our goal of creating a robust and technically advanced operating system
+and the technical complexity involved naturally require expertise and
+decision-making.
+
+The required expertise varies depending on the area of contribution. It
+is determined mainly by context and technical complexity and only
+secondary by the expectations of contributors and maintainers.
+
+Both the expertise expectations and decision-making are subject to
+discussion, but at the very end there is a basic necessity to be able to
+make decisions in order to make progress. This prerogative is in the
+hands of maintainers and project's leadership and is expected to be used
+in good faith.
+
+As a consequence, setting expertise expectations, making decisions and
+rejecting unsuitable contributions are not viewed as a violation of the
+Code of Conduct.
+
+While maintainers are in general welcoming to newcomers, their capacity
+of helping contributors overcome the entry hurdles is limited, so they
+have to set priorities. This, also, is not to be seen as a violation of
+the Code of Conduct. The kernel community is aware of that and provides
+entry level programs in various forms like kernelnewbies.org.
+
+Scope
+-----
+
+The Linux kernel community primarily interacts on a set of public email
+lists distributed around a number of different servers controlled by a
+number of different companies or individuals. All of these lists are
+defined in the MAINTAINERS file in the kernel source tree. Any emails
+sent to those mailing lists are considered covered by the Code of
+Conduct.
+
+Developers who use the kernel.org bugzilla, and other subsystem bugzilla
+or bug tracking tools should follow the guidelines of the Code of
+Conduct. The Linux kernel community does not have an "official" project
+email address, or "official" social media address. Any activity
+performed using a kernel.org email account must follow the Code of
+Conduct as published for kernel.org, just as any individual using a
+corporate email account must follow the specific rules of that
+corporation.
+
+The Code of Conduct does not prohibit continuing to include names, email
+addresses, and associated comments in mailing list messages, kernel
+change log messages, or code comments.
+
+Interaction in other forums is covered by whatever rules apply to said
+forums and is in general not covered by the Code of Conduct. Exceptions
+may be considered for extreme circumstances.
+
+Contributions submitted for the kernel should use appropriate language.
+Content that already exists predating the Code of Conduct will not be
+addressed now as a violation. Inappropriate language can be seen as a
+bug, though; such bugs will be fixed more quickly if any interested
+parties submit patches to that effect. Expressions that are currently
+part of the user/kernel API, or reflect terminology used in published
+standards or specifications, are not considered bugs.
+
+Enforcement
+-----------
+
+The address listed in the Code of Conduct goes to the Code of Conduct
+Committee. The exact members receiving these emails at any given time
+are listed at https://kernel.org/code-of-conduct.html. Members can not
+access reports made before they joined or after they have left the
+committee.
+
+The initial Code of Conduct Committee consists of volunteer members of
+the TAB, as well as a professional mediator acting as a neutral third
+party. The first task of the committee is to establish documented
+processes, which will be made public.
+
+Any member of the committee, including the mediator, can be contacted
+directly if a reporter does not wish to include the full committee in a
+complaint or concern.
+
+The Code of Conduct Committee reviews the cases according to the
+processes (see above) and consults with the TAB as needed and
+appropriate, for instance to request and receive information about the
+kernel community.
+
+Any decisions by the committee will be brought to the TAB, for
+implementation of enforcement with the relevant maintainers if needed.
+A decision by the Code of Conduct Committee can be overturned by the TAB
+by a two-thirds vote.
+
+At quarterly intervals, the Code of Conduct Committee and TAB will
+provide a report summarizing the anonymised reports that the Code of
+Conduct committee has received and their status, as well details of any
+overridden decisions including complete and identifiable voting details.
+
+We expect to establish a different process for Code of Conduct Committee
+staffing beyond the bootstrap period. This document will be updated
+with that information when this occurs.
diff --git a/Documentation/process/code-of-conduct.rst b/Documentation/process/code-of-conduct.rst
index ab7c24b5478c..be50294aebd5 100644
--- a/Documentation/process/code-of-conduct.rst
+++ b/Documentation/process/code-of-conduct.rst
@@ -1,3 +1,5 @@
+.. _code_of_conduct:
+
Contributor Covenant Code of Conduct
++++++++++++++++++++++++++++++++++++
@@ -63,19 +65,22 @@ Enforcement
===========
Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported by contacting the Technical Advisory Board (TAB) at
-<tab@lists.linux-foundation.org>. All complaints will be reviewed and
-investigated and will result in a response that is deemed necessary and
-appropriate to the circumstances. The TAB is obligated to maintain
-confidentiality with regard to the reporter of an incident. Further details of
-specific enforcement policies may be posted separately.
-
-Maintainers who do not follow or enforce the Code of Conduct in good faith may
-face temporary or permanent repercussions as determined by other members of the
-project’s leadership.
+reported by contacting the Code of Conduct Committee at
+<conduct@kernel.org>. All complaints will be reviewed and investigated
+and will result in a response that is deemed necessary and appropriate
+to the circumstances. The Code of Conduct Committee is obligated to
+maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted
+separately.
Attribution
===========
This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+Interpretation
+==============
+
+See the :ref:`code_of_conduct_interpretation` document for how the Linux
+kernel community will be interpreting this document.
diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst
new file mode 100644
index 000000000000..0ef5a63c06ba
--- /dev/null
+++ b/Documentation/process/deprecated.rst
@@ -0,0 +1,119 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================================================
+Deprecated Interfaces, Language Features, Attributes, and Conventions
+=====================================================================
+
+In a perfect world, it would be possible to convert all instances of
+some deprecated API into the new API and entirely remove the old API in
+a single development cycle. However, due to the size of the kernel, the
+maintainership hierarchy, and timing, it's not always feasible to do these
+kinds of conversions at once. This means that new instances may sneak into
+the kernel while old ones are being removed, only making the amount of
+work to remove the API grow. In order to educate developers about what
+has been deprecated and why, this list has been created as a place to
+point when uses of deprecated things are proposed for inclusion in the
+kernel.
+
+__deprecated
+------------
+While this attribute does visually mark an interface as deprecated,
+it `does not produce warnings during builds any more
+<https://git.kernel.org/linus/771c035372a036f83353eef46dbb829780330234>`_
+because one of the standing goals of the kernel is to build without
+warnings and no one was actually doing anything to remove these deprecated
+interfaces. While using `__deprecated` is nice to note an old API in
+a header file, it isn't the full solution. Such interfaces must either
+be fully removed from the kernel, or added to this file to discourage
+others from using them in the future.
+
+open-coded arithmetic in allocator arguments
+--------------------------------------------
+Dynamic size calculations (especially multiplication) should not be
+performed in memory allocator (or similar) function arguments due to the
+risk of them overflowing. This could lead to values wrapping around and a
+smaller allocation being made than the caller was expecting. Using those
+allocations could lead to linear overflows of heap memory and other
+misbehaviors. (One exception to this is literal values where the compiler
+can warn if they might overflow. Though using literals for arguments as
+suggested below is also harmless.)
+
+For example, do not use ``count * size`` as an argument, as in::
+
+ foo = kmalloc(count * size, GFP_KERNEL);
+
+Instead, the 2-factor form of the allocator should be used::
+
+ foo = kmalloc_array(count, size, GFP_KERNEL);
+
+If no 2-factor form is available, the saturate-on-overflow helpers should
+be used::
+
+ bar = vmalloc(array_size(count, size));
+
+Another common case to avoid is calculating the size of a structure with
+a trailing array of others structures, as in::
+
+ header = kzalloc(sizeof(*header) + count * sizeof(*header->item),
+ GFP_KERNEL);
+
+Instead, use the helper::
+
+ header = kzalloc(struct_size(header, item, count), GFP_KERNEL);
+
+See :c:func:`array_size`, :c:func:`array3_size`, and :c:func:`struct_size`,
+for more details as well as the related :c:func:`check_add_overflow` and
+:c:func:`check_mul_overflow` family of functions.
+
+simple_strtol(), simple_strtoll(), simple_strtoul(), simple_strtoull()
+----------------------------------------------------------------------
+The :c:func:`simple_strtol`, :c:func:`simple_strtoll`,
+:c:func:`simple_strtoul`, and :c:func:`simple_strtoull` functions
+explicitly ignore overflows, which may lead to unexpected results
+in callers. The respective :c:func:`kstrtol`, :c:func:`kstrtoll`,
+:c:func:`kstrtoul`, and :c:func:`kstrtoull` functions tend to be the
+correct replacements, though note that those require the string to be
+NUL or newline terminated.
+
+strcpy()
+--------
+:c:func:`strcpy` performs no bounds checking on the destination
+buffer. This could result in linear overflows beyond the
+end of the buffer, leading to all kinds of misbehaviors. While
+`CONFIG_FORTIFY_SOURCE=y` and various compiler flags help reduce the
+risk of using this function, there is no good reason to add new uses of
+this function. The safe replacement is :c:func:`strscpy`.
+
+strncpy() on NUL-terminated strings
+-----------------------------------
+Use of :c:func:`strncpy` does not guarantee that the destination buffer
+will be NUL terminated. This can lead to various linear read overflows
+and other misbehavior due to the missing termination. It also NUL-pads the
+destination buffer if the source contents are shorter than the destination
+buffer size, which may be a needless performance penalty for callers using
+only NUL-terminated strings. The safe replacement is :c:func:`strscpy`.
+(Users of :c:func:`strscpy` still needing NUL-padding will need an
+explicit :c:func:`memset` added.)
+
+If a caller is using non-NUL-terminated strings, :c:func:`strncpy()` can
+still be used, but destinations should be marked with the `__nonstring
+<https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html>`_
+attribute to avoid future compiler warnings.
+
+strlcpy()
+---------
+:c:func:`strlcpy` reads the entire source buffer first, possibly exceeding
+the given limit of bytes to copy. This is inefficient and can lead to
+linear read overflows if a source string is not NUL-terminated. The
+safe replacement is :c:func:`strscpy`.
+
+Variable Length Arrays (VLAs)
+-----------------------------
+Using stack VLAs produces much worse machine code than statically
+sized stack arrays. While these non-trivial `performance issues
+<https://git.kernel.org/linus/02361bc77888>`_ are reason enough to
+eliminate VLAs, they are also a security risk. Dynamic growth of a stack
+array may exceed the remaining memory in the stack segment. This could
+lead to a crash, possible overwriting sensitive contents at the end of the
+stack (when built without `CONFIG_THREAD_INFO_IN_TASK=y`), or overwriting
+memory adjacent to the stack (when built without `CONFIG_VMAP_STACK=y`)
diff --git a/Documentation/process/howto.rst b/Documentation/process/howto.rst
index 130bf0f48875..dcb25f94188e 100644
--- a/Documentation/process/howto.rst
+++ b/Documentation/process/howto.rst
@@ -57,12 +57,13 @@ of doing things.
Legal Issues
------------
-The Linux kernel source code is released under the GPL. Please see the
-file, COPYING, in the main directory of the source tree, for details on
-the license. If you have further questions about the license, please
-contact a lawyer, and do not ask on the Linux kernel mailing list. The
-people on the mailing lists are not lawyers, and you should not rely on
-their statements on legal matters.
+The Linux kernel source code is released under the GPL. Please see the file
+COPYING in the main directory of the source tree. The Linux kernel licensing
+rules and how to use `SPDX <https://spdx.org/>`_ identifiers in source code are
+descibed in :ref:`Documentation/process/license-rules.rst <kernel_licensing>`.
+If you have further questions about the license, please contact a lawyer, and do
+not ask on the Linux kernel mailing list. The people on the mailing lists are
+not lawyers, and you should not rely on their statements on legal matters.
For common questions and answers about the GPL, please see:
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 9ae3e317bddf..757808526d9a 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -19,8 +19,10 @@ Below are the essential guides that every developer should read.
.. toctree::
:maxdepth: 1
+ license-rules
howto
code-of-conduct
+ code-of-conduct-interpretation
development-process
submitting-patches
coding-style
@@ -41,6 +43,7 @@ Other guides to the community that are of interest to most developers are:
stable-kernel-rules
submit-checklist
kernel-docs
+ deprecated
These are some overall technical guides that have been put here for now for
lack of a better place.
diff --git a/Documentation/process/license-rules.rst b/Documentation/process/license-rules.rst
index 8ea26325fe3f..2bb8c0fc2238 100644
--- a/Documentation/process/license-rules.rst
+++ b/Documentation/process/license-rules.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
+.. _kernel_licensing:
+
Linux kernel licensing rules
============================
diff --git a/Documentation/s390/00-INDEX b/Documentation/s390/00-INDEX
deleted file mode 100644
index 317f0378ae01..000000000000
--- a/Documentation/s390/00-INDEX
+++ /dev/null
@@ -1,28 +0,0 @@
-00-INDEX
- - this file.
-3270.ChangeLog
- - ChangeLog for the UTS Global 3270-support patch (outdated).
-3270.txt
- - how to use the IBM 3270 display system support.
-cds.txt
- - s390 common device support (common I/O layer).
-CommonIO
- - common I/O layer command line parameters, procfs and debugfs entries
-config3270.sh
- - example configuration for 3270 devices.
-DASD
- - information on the DASD disk device driver.
-Debugging390.txt
- - hints for debugging on s390 systems.
-driver-model.txt
- - information on s390 devices and the driver model.
-monreader.txt
- - information on accessing the z/VM monitor stream from Linux.
-qeth.txt
- - HiperSockets Bridge Port Support.
-s390dbf.txt
- - information on using the s390 debug feature.
-vfio-ccw.txt
- information on the vfio-ccw I/O subchannel driver.
-zfcpdump.txt
- - information on the s390 SCSI dump tool.
diff --git a/Documentation/s390/vfio-ap.txt b/Documentation/s390/vfio-ap.txt
new file mode 100644
index 000000000000..65167cfe4485
--- /dev/null
+++ b/Documentation/s390/vfio-ap.txt
@@ -0,0 +1,837 @@
+Introduction:
+============
+The Adjunct Processor (AP) facility is an IBM Z cryptographic facility comprised
+of three AP instructions and from 1 up to 256 PCIe cryptographic adapter cards.
+The AP devices provide cryptographic functions to all CPUs assigned to a
+linux system running in an IBM Z system LPAR.
+
+The AP adapter cards are exposed via the AP bus. The motivation for vfio-ap
+is to make AP cards available to KVM guests using the VFIO mediated device
+framework. This implementation relies considerably on the s390 virtualization
+facilities which do most of the hard work of providing direct access to AP
+devices.
+
+AP Architectural Overview:
+=========================
+To facilitate the comprehension of the design, let's start with some
+definitions:
+
+* AP adapter
+
+ An AP adapter is an IBM Z adapter card that can perform cryptographic
+ functions. There can be from 0 to 256 adapters assigned to an LPAR. Adapters
+ assigned to the LPAR in which a linux host is running will be available to
+ the linux host. Each adapter is identified by a number from 0 to 255; however,
+ the maximum adapter number is determined by machine model and/or adapter type.
+ When installed, an AP adapter is accessed by AP instructions executed by any
+ CPU.
+
+ The AP adapter cards are assigned to a given LPAR via the system's Activation
+ Profile which can be edited via the HMC. When the linux host system is IPL'd
+ in the LPAR, the AP bus detects the AP adapter cards assigned to the LPAR and
+ creates a sysfs device for each assigned adapter. For example, if AP adapters
+ 4 and 10 (0x0a) are assigned to the LPAR, the AP bus will create the following
+ sysfs device entries:
+
+ /sys/devices/ap/card04
+ /sys/devices/ap/card0a
+
+ Symbolic links to these devices will also be created in the AP bus devices
+ sub-directory:
+
+ /sys/bus/ap/devices/[card04]
+ /sys/bus/ap/devices/[card04]
+
+* AP domain
+
+ An adapter is partitioned into domains. An adapter can hold up to 256 domains
+ depending upon the adapter type and hardware configuration. A domain is
+ identified by a number from 0 to 255; however, the maximum domain number is
+ determined by machine model and/or adapter type.. A domain can be thought of
+ as a set of hardware registers and memory used for processing AP commands. A
+ domain can be configured with a secure private key used for clear key
+ encryption. A domain is classified in one of two ways depending upon how it
+ may be accessed:
+
+ * Usage domains are domains that are targeted by an AP instruction to
+ process an AP command.
+
+ * Control domains are domains that are changed by an AP command sent to a
+ usage domain; for example, to set the secure private key for the control
+ domain.
+
+ The AP usage and control domains are assigned to a given LPAR via the system's
+ Activation Profile which can be edited via the HMC. When a linux host system
+ is IPL'd in the LPAR, the AP bus module detects the AP usage and control
+ domains assigned to the LPAR. The domain number of each usage domain and
+ adapter number of each AP adapter are combined to create AP queue devices
+ (see AP Queue section below). The domain number of each control domain will be
+ represented in a bitmask and stored in a sysfs file
+ /sys/bus/ap/ap_control_domain_mask. The bits in the mask, from most to least
+ significant bit, correspond to domains 0-255.
+
+* AP Queue
+
+ An AP queue is the means by which an AP command is sent to a usage domain
+ inside a specific adapter. An AP queue is identified by a tuple
+ comprised of an AP adapter ID (APID) and an AP queue index (APQI). The
+ APQI corresponds to a given usage domain number within the adapter. This tuple
+ forms an AP Queue Number (APQN) uniquely identifying an AP queue. AP
+ instructions include a field containing the APQN to identify the AP queue to
+ which the AP command is to be sent for processing.
+
+ The AP bus will create a sysfs device for each APQN that can be derived from
+ the cross product of the AP adapter and usage domain numbers detected when the
+ AP bus module is loaded. For example, if adapters 4 and 10 (0x0a) and usage
+ domains 6 and 71 (0x47) are assigned to the LPAR, the AP bus will create the
+ following sysfs entries:
+
+ /sys/devices/ap/card04/04.0006
+ /sys/devices/ap/card04/04.0047
+ /sys/devices/ap/card0a/0a.0006
+ /sys/devices/ap/card0a/0a.0047
+
+ The following symbolic links to these devices will be created in the AP bus
+ devices subdirectory:
+
+ /sys/bus/ap/devices/[04.0006]
+ /sys/bus/ap/devices/[04.0047]
+ /sys/bus/ap/devices/[0a.0006]
+ /sys/bus/ap/devices/[0a.0047]
+
+* AP Instructions:
+
+ There are three AP instructions:
+
+ * NQAP: to enqueue an AP command-request message to a queue
+ * DQAP: to dequeue an AP command-reply message from a queue
+ * PQAP: to administer the queues
+
+ AP instructions identify the domain that is targeted to process the AP
+ command; this must be one of the usage domains. An AP command may modify a
+ domain that is not one of the usage domains, but the modified domain
+ must be one of the control domains.
+
+AP and SIE:
+==========
+Let's now take a look at how AP instructions executed on a guest are interpreted
+by the hardware.
+
+A satellite control block called the Crypto Control Block (CRYCB) is attached to
+our main hardware virtualization control block. The CRYCB contains three fields
+to identify the adapters, usage domains and control domains assigned to the KVM
+guest:
+
+* The AP Mask (APM) field is a bit mask that identifies the AP adapters assigned
+ to the KVM guest. Each bit in the mask, from left to right (i.e. from most
+ significant to least significant bit in big endian order), corresponds to
+ an APID from 0-255. If a bit is set, the corresponding adapter is valid for
+ use by the KVM guest.
+
+* The AP Queue Mask (AQM) field is a bit mask identifying the AP usage domains
+ assigned to the KVM guest. Each bit in the mask, from left to right (i.e. from
+ most significant to least significant bit in big endian order), corresponds to
+ an AP queue index (APQI) from 0-255. If a bit is set, the corresponding queue
+ is valid for use by the KVM guest.
+
+* The AP Domain Mask field is a bit mask that identifies the AP control domains
+ assigned to the KVM guest. The ADM bit mask controls which domains can be
+ changed by an AP command-request message sent to a usage domain from the
+ guest. Each bit in the mask, from left to right (i.e. from most significant to
+ least significant bit in big endian order), corresponds to a domain from
+ 0-255. If a bit is set, the corresponding domain can be modified by an AP
+ command-request message sent to a usage domain.
+
+If you recall from the description of an AP Queue, AP instructions include
+an APQN to identify the AP queue to which an AP command-request message is to be
+sent (NQAP and PQAP instructions), or from which a command-reply message is to
+be received (DQAP instruction). The validity of an APQN is defined by the matrix
+calculated from the APM and AQM; it is the cross product of all assigned adapter
+numbers (APM) with all assigned queue indexes (AQM). For example, if adapters 1
+and 2 and usage domains 5 and 6 are assigned to a guest, the APQNs (1,5), (1,6),
+(2,5) and (2,6) will be valid for the guest.
+
+The APQNs can provide secure key functionality - i.e., a private key is stored
+on the adapter card for each of its domains - so each APQN must be assigned to
+at most one guest or to the linux host.
+
+ Example 1: Valid configuration:
+ ------------------------------
+ Guest1: adapters 1,2 domains 5,6
+ Guest2: adapter 1,2 domain 7
+
+ This is valid because both guests have a unique set of APQNs:
+ Guest1 has APQNs (1,5), (1,6), (2,5), (2,6);
+ Guest2 has APQNs (1,7), (2,7)
+
+ Example 2: Valid configuration:
+ ------------------------------
+ Guest1: adapters 1,2 domains 5,6
+ Guest2: adapters 3,4 domains 5,6
+
+ This is also valid because both guests have a unique set of APQNs:
+ Guest1 has APQNs (1,5), (1,6), (2,5), (2,6);
+ Guest2 has APQNs (3,5), (3,6), (4,5), (4,6)
+
+ Example 3: Invalid configuration:
+ --------------------------------
+ Guest1: adapters 1,2 domains 5,6
+ Guest2: adapter 1 domains 6,7
+
+ This is an invalid configuration because both guests have access to
+ APQN (1,6).
+
+The Design:
+===========
+The design introduces three new objects:
+
+1. AP matrix device
+2. VFIO AP device driver (vfio_ap.ko)
+3. VFIO AP mediated matrix pass-through device
+
+The VFIO AP device driver
+-------------------------
+The VFIO AP (vfio_ap) device driver serves the following purposes:
+
+1. Provides the interfaces to secure APQNs for exclusive use of KVM guests.
+
+2. Sets up the VFIO mediated device interfaces to manage a mediated matrix
+ device and creates the sysfs interfaces for assigning adapters, usage
+ domains, and control domains comprising the matrix for a KVM guest.
+
+3. Configures the APM, AQM and ADM in the CRYCB referenced by a KVM guest's
+ SIE state description to grant the guest access to a matrix of AP devices
+
+Reserve APQNs for exclusive use of KVM guests
+---------------------------------------------
+The following block diagram illustrates the mechanism by which APQNs are
+reserved:
+
+ +------------------+
+ 7 remove | |
+ +--------------------> cex4queue driver |
+ | | |
+ | +------------------+
+ |
+ |
+ | +------------------+ +-----------------+
+ | 5 register driver | | 3 create | |
+ | +----------------> Device core +----------> matrix device |
+ | | | | | |
+ | | +--------^---------+ +-----------------+
+ | | |
+ | | +-------------------+
+ | | +-----------------------------------+ |
+ | | | 4 register AP driver | | 2 register device
+ | | | | |
++--------+---+-v---+ +--------+-------+-+
+| | | |
+| ap_bus +--------------------- > vfio_ap driver |
+| | 8 probe | |
++--------^---------+ +--^--^------------+
+6 edit | | |
+ apmask | +-----------------------------+ | 9 mdev create
+ aqmask | | 1 modprobe |
++--------+-----+---+ +----------------+-+ +------------------+
+| | | |8 create | mediated |
+| admin | | VFIO device core |---------> matrix |
+| + | | | device |
++------+-+---------+ +--------^---------+ +--------^---------+
+ | | | |
+ | | 9 create vfio_ap-passthrough | |
+ | +------------------------------+ |
+ +-------------------------------------------------------------+
+ 10 assign adapter/domain/control domain
+
+The process for reserving an AP queue for use by a KVM guest is:
+
+1. The administrator loads the vfio_ap device driver
+2. The vfio-ap driver during its initialization will register a single 'matrix'
+ device with the device core. This will serve as the parent device for
+ all mediated matrix devices used to configure an AP matrix for a guest.
+3. The /sys/devices/vfio_ap/matrix device is created by the device core
+4 The vfio_ap device driver will register with the AP bus for AP queue devices
+ of type 10 and higher (CEX4 and newer). The driver will provide the vfio_ap
+ driver's probe and remove callback interfaces. Devices older than CEX4 queues
+ are not supported to simplify the implementation by not needlessly
+ complicating the design by supporting older devices that will go out of
+ service in the relatively near future, and for which there are few older
+ systems around on which to test.
+5. The AP bus registers the vfio_ap device driver with the device core
+6. The administrator edits the AP adapter and queue masks to reserve AP queues
+ for use by the vfio_ap device driver.
+7. The AP bus removes the AP queues reserved for the vfio_ap driver from the
+ default zcrypt cex4queue driver.
+8. The AP bus probes the vfio_ap device driver to bind the queues reserved for
+ it.
+9. The administrator creates a passthrough type mediated matrix device to be
+ used by a guest
+10 The administrator assigns the adapters, usage domains and control domains
+ to be exclusively used by a guest.
+
+Set up the VFIO mediated device interfaces
+------------------------------------------
+The VFIO AP device driver utilizes the common interface of the VFIO mediated
+device core driver to:
+* Register an AP mediated bus driver to add a mediated matrix device to and
+ remove it from a VFIO group.
+* Create and destroy a mediated matrix device
+* Add a mediated matrix device to and remove it from the AP mediated bus driver
+* Add a mediated matrix device to and remove it from an IOMMU group
+
+The following high-level block diagram shows the main components and interfaces
+of the VFIO AP mediated matrix device driver:
+
+ +-------------+
+ | |
+ | +---------+ | mdev_register_driver() +--------------+
+ | | Mdev | +<-----------------------+ |
+ | | bus | | | vfio_mdev.ko |
+ | | driver | +----------------------->+ |<-> VFIO user
+ | +---------+ | probe()/remove() +--------------+ APIs
+ | |
+ | MDEV CORE |
+ | MODULE |
+ | mdev.ko |
+ | +---------+ | mdev_register_device() +--------------+
+ | |Physical | +<-----------------------+ |
+ | | device | | | vfio_ap.ko |<-> matrix
+ | |interface| +----------------------->+ | device
+ | +---------+ | callback +--------------+
+ +-------------+
+
+During initialization of the vfio_ap module, the matrix device is registered
+with an 'mdev_parent_ops' structure that provides the sysfs attribute
+structures, mdev functions and callback interfaces for managing the mediated
+matrix device.
+
+* sysfs attribute structures:
+ * supported_type_groups
+ The VFIO mediated device framework supports creation of user-defined
+ mediated device types. These mediated device types are specified
+ via the 'supported_type_groups' structure when a device is registered
+ with the mediated device framework. The registration process creates the
+ sysfs structures for each mediated device type specified in the
+ 'mdev_supported_types' sub-directory of the device being registered. Along
+ with the device type, the sysfs attributes of the mediated device type are
+ provided.
+
+ The VFIO AP device driver will register one mediated device type for
+ passthrough devices:
+ /sys/devices/vfio_ap/matrix/mdev_supported_types/vfio_ap-passthrough
+ Only the read-only attributes required by the VFIO mdev framework will
+ be provided:
+ ... name
+ ... device_api
+ ... available_instances
+ ... device_api
+ Where:
+ * name: specifies the name of the mediated device type
+ * device_api: the mediated device type's API
+ * available_instances: the number of mediated matrix passthrough devices
+ that can be created
+ * device_api: specifies the VFIO API
+ * mdev_attr_groups
+ This attribute group identifies the user-defined sysfs attributes of the
+ mediated device. When a device is registered with the VFIO mediated device
+ framework, the sysfs attribute files identified in the 'mdev_attr_groups'
+ structure will be created in the mediated matrix device's directory. The
+ sysfs attributes for a mediated matrix device are:
+ * assign_adapter:
+ * unassign_adapter:
+ Write-only attributes for assigning/unassigning an AP adapter to/from the
+ mediated matrix device. To assign/unassign an adapter, the APID of the
+ adapter is echoed to the respective attribute file.
+ * assign_domain:
+ * unassign_domain:
+ Write-only attributes for assigning/unassigning an AP usage domain to/from
+ the mediated matrix device. To assign/unassign a domain, the domain
+ number of the the usage domain is echoed to the respective attribute
+ file.
+ * matrix:
+ A read-only file for displaying the APQNs derived from the cross product
+ of the adapter and domain numbers assigned to the mediated matrix device.
+ * assign_control_domain:
+ * unassign_control_domain:
+ Write-only attributes for assigning/unassigning an AP control domain
+ to/from the mediated matrix device. To assign/unassign a control domain,
+ the ID of the domain to be assigned/unassigned is echoed to the respective
+ attribute file.
+ * control_domains:
+ A read-only file for displaying the control domain numbers assigned to the
+ mediated matrix device.
+
+* functions:
+ * create:
+ allocates the ap_matrix_mdev structure used by the vfio_ap driver to:
+ * Store the reference to the KVM structure for the guest using the mdev
+ * Store the AP matrix configuration for the adapters, domains, and control
+ domains assigned via the corresponding sysfs attributes files
+ * remove:
+ deallocates the mediated matrix device's ap_matrix_mdev structure. This will
+ be allowed only if a running guest is not using the mdev.
+
+* callback interfaces
+ * open:
+ The vfio_ap driver uses this callback to register a
+ VFIO_GROUP_NOTIFY_SET_KVM notifier callback function for the mdev matrix
+ device. The open is invoked when QEMU connects the VFIO iommu group
+ for the mdev matrix device to the MDEV bus. Access to the KVM structure used
+ to configure the KVM guest is provided via this callback. The KVM structure,
+ is used to configure the guest's access to the AP matrix defined via the
+ mediated matrix device's sysfs attribute files.
+ * release:
+ unregisters the VFIO_GROUP_NOTIFY_SET_KVM notifier callback function for the
+ mdev matrix device and deconfigures the guest's AP matrix.
+
+Configure the APM, AQM and ADM in the CRYCB:
+-------------------------------------------
+Configuring the AP matrix for a KVM guest will be performed when the
+VFIO_GROUP_NOTIFY_SET_KVM notifier callback is invoked. The notifier
+function is called when QEMU connects to KVM. The guest's AP matrix is
+configured via it's CRYCB by:
+* Setting the bits in the APM corresponding to the APIDs assigned to the
+ mediated matrix device via its 'assign_adapter' interface.
+* Setting the bits in the AQM corresponding to the domains assigned to the
+ mediated matrix device via its 'assign_domain' interface.
+* Setting the bits in the ADM corresponding to the domain dIDs assigned to the
+ mediated matrix device via its 'assign_control_domains' interface.
+
+The CPU model features for AP
+-----------------------------
+The AP stack relies on the presence of the AP instructions as well as two
+facilities: The AP Facilities Test (APFT) facility; and the AP Query
+Configuration Information (QCI) facility. These features/facilities are made
+available to a KVM guest via the following CPU model features:
+
+1. ap: Indicates whether the AP instructions are installed on the guest. This
+ feature will be enabled by KVM only if the AP instructions are installed
+ on the host.
+
+2. apft: Indicates the APFT facility is available on the guest. This facility
+ can be made available to the guest only if it is available on the host (i.e.,
+ facility bit 15 is set).
+
+3. apqci: Indicates the AP QCI facility is available on the guest. This facility
+ can be made available to the guest only if it is available on the host (i.e.,
+ facility bit 12 is set).
+
+Note: If the user chooses to specify a CPU model different than the 'host'
+model to QEMU, the CPU model features and facilities need to be turned on
+explicitly; for example:
+
+ /usr/bin/qemu-system-s390x ... -cpu z13,ap=on,apqci=on,apft=on
+
+A guest can be precluded from using AP features/facilities by turning them off
+explicitly; for example:
+
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=off,apqci=off,apft=off
+
+Note: If the APFT facility is turned off (apft=off) for the guest, the guest
+will not see any AP devices. The zcrypt device drivers that register for type 10
+and newer AP devices - i.e., the cex4card and cex4queue device drivers - need
+the APFT facility to ascertain the facilities installed on a given AP device. If
+the APFT facility is not installed on the guest, then the probe of device
+drivers will fail since only type 10 and newer devices can be configured for
+guest use.
+
+Example:
+=======
+Let's now provide an example to illustrate how KVM guests may be given
+access to AP facilities. For this example, we will show how to configure
+three guests such that executing the lszcrypt command on the guests would
+look like this:
+
+Guest1
+------
+CARD.DOMAIN TYPE MODE
+------------------------------
+05 CEX5C CCA-Coproc
+05.0004 CEX5C CCA-Coproc
+05.00ab CEX5C CCA-Coproc
+06 CEX5A Accelerator
+06.0004 CEX5A Accelerator
+06.00ab CEX5C CCA-Coproc
+
+Guest2
+------
+CARD.DOMAIN TYPE MODE
+------------------------------
+05 CEX5A Accelerator
+05.0047 CEX5A Accelerator
+05.00ff CEX5A Accelerator
+
+Guest2
+------
+CARD.DOMAIN TYPE MODE
+------------------------------
+06 CEX5A Accelerator
+06.0047 CEX5A Accelerator
+06.00ff CEX5A Accelerator
+
+These are the steps:
+
+1. Install the vfio_ap module on the linux host. The dependency chain for the
+ vfio_ap module is:
+ * iommu
+ * s390
+ * zcrypt
+ * vfio
+ * vfio_mdev
+ * vfio_mdev_device
+ * KVM
+
+ To build the vfio_ap module, the kernel build must be configured with the
+ following Kconfig elements selected:
+ * IOMMU_SUPPORT
+ * S390
+ * ZCRYPT
+ * S390_AP_IOMMU
+ * VFIO
+ * VFIO_MDEV
+ * VFIO_MDEV_DEVICE
+ * KVM
+
+ If using make menuconfig select the following to build the vfio_ap module:
+ -> Device Drivers
+ -> IOMMU Hardware Support
+ select S390 AP IOMMU Support
+ -> VFIO Non-Privileged userspace driver framework
+ -> Mediated device driver frramework
+ -> VFIO driver for Mediated devices
+ -> I/O subsystem
+ -> VFIO support for AP devices
+
+2. Secure the AP queues to be used by the three guests so that the host can not
+ access them. To secure them, there are two sysfs files that specify
+ bitmasks marking a subset of the APQN range as 'usable by the default AP
+ queue device drivers' or 'not usable by the default device drivers' and thus
+ available for use by the vfio_ap device driver'. The location of the sysfs
+ files containing the masks are:
+
+ /sys/bus/ap/apmask
+ /sys/bus/ap/aqmask
+
+ The 'apmask' is a 256-bit mask that identifies a set of AP adapter IDs
+ (APID). Each bit in the mask, from left to right (i.e., from most significant
+ to least significant bit in big endian order), corresponds to an APID from
+ 0-255. If a bit is set, the APID is marked as usable only by the default AP
+ queue device drivers; otherwise, the APID is usable by the vfio_ap
+ device driver.
+
+ The 'aqmask' is a 256-bit mask that identifies a set of AP queue indexes
+ (APQI). Each bit in the mask, from left to right (i.e., from most significant
+ to least significant bit in big endian order), corresponds to an APQI from
+ 0-255. If a bit is set, the APQI is marked as usable only by the default AP
+ queue device drivers; otherwise, the APQI is usable by the vfio_ap device
+ driver.
+
+ Take, for example, the following mask:
+
+ 0x7dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+
+ It indicates:
+
+ 1, 2, 3, 4, 5, and 7-255 belong to the default drivers' pool, and 0 and 6
+ belong to the vfio_ap device driver's pool.
+
+ The APQN of each AP queue device assigned to the linux host is checked by the
+ AP bus against the set of APQNs derived from the cross product of APIDs
+ and APQIs marked as usable only by the default AP queue device drivers. If a
+ match is detected, only the default AP queue device drivers will be probed;
+ otherwise, the vfio_ap device driver will be probed.
+
+ By default, the two masks are set to reserve all APQNs for use by the default
+ AP queue device drivers. There are two ways the default masks can be changed:
+
+ 1. The sysfs mask files can be edited by echoing a string into the
+ respective sysfs mask file in one of two formats:
+
+ * An absolute hex string starting with 0x - like "0x12345678" - sets
+ the mask. If the given string is shorter than the mask, it is padded
+ with 0s on the right; for example, specifying a mask value of 0x41 is
+ the same as specifying:
+
+ 0x4100000000000000000000000000000000000000000000000000000000000000
+
+ Keep in mind that the mask reads from left to right (i.e., most
+ significant to least significant bit in big endian order), so the mask
+ above identifies device numbers 1 and 7 (01000001).
+
+ If the string is longer than the mask, the operation is terminated with
+ an error (EINVAL).
+
+ * Individual bits in the mask can be switched on and off by specifying
+ each bit number to be switched in a comma separated list. Each bit
+ number string must be prepended with a ('+') or minus ('-') to indicate
+ the corresponding bit is to be switched on ('+') or off ('-'). Some
+ valid values are:
+
+ "+0" switches bit 0 on
+ "-13" switches bit 13 off
+ "+0x41" switches bit 65 on
+ "-0xff" switches bit 255 off
+
+ The following example:
+ +0,-6,+0x47,-0xf0
+
+ Switches bits 0 and 71 (0x47) on
+ Switches bits 6 and 240 (0xf0) off
+
+ Note that the bits not specified in the list remain as they were before
+ the operation.
+
+ 2. The masks can also be changed at boot time via parameters on the kernel
+ command line like this:
+
+ ap.apmask=0xffff ap.aqmask=0x40
+
+ This would create the following masks:
+
+ apmask:
+ 0xffff000000000000000000000000000000000000000000000000000000000000
+
+ aqmask:
+ 0x4000000000000000000000000000000000000000000000000000000000000000
+
+ Resulting in these two pools:
+
+ default drivers pool: adapter 0-15, domain 1
+ alternate drivers pool: adapter 16-255, domains 0, 2-255
+
+ Securing the APQNs for our example:
+ ----------------------------------
+ To secure the AP queues 05.0004, 05.0047, 05.00ab, 05.00ff, 06.0004, 06.0047,
+ 06.00ab, and 06.00ff for use by the vfio_ap device driver, the corresponding
+ APQNs can either be removed from the default masks:
+
+ echo -5,-6 > /sys/bus/ap/apmask
+
+ echo -4,-0x47,-0xab,-0xff > /sys/bus/ap/aqmask
+
+ Or the masks can be set as follows:
+
+ echo 0xf9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff \
+ > apmask
+
+ echo 0xf7fffffffffffffffeffffffffffffffffffffffffeffffffffffffffffffffe \
+ > aqmask
+
+ This will result in AP queues 05.0004, 05.0047, 05.00ab, 05.00ff, 06.0004,
+ 06.0047, 06.00ab, and 06.00ff getting bound to the vfio_ap device driver. The
+ sysfs directory for the vfio_ap device driver will now contain symbolic links
+ to the AP queue devices bound to it:
+
+ /sys/bus/ap
+ ... [drivers]
+ ...... [vfio_ap]
+ ......... [05.0004]
+ ......... [05.0047]
+ ......... [05.00ab]
+ ......... [05.00ff]
+ ......... [06.0004]
+ ......... [06.0047]
+ ......... [06.00ab]
+ ......... [06.00ff]
+
+ Keep in mind that only type 10 and newer adapters (i.e., CEX4 and later)
+ can be bound to the vfio_ap device driver. The reason for this is to
+ simplify the implementation by not needlessly complicating the design by
+ supporting older devices that will go out of service in the relatively near
+ future and for which there are few older systems on which to test.
+
+ The administrator, therefore, must take care to secure only AP queues that
+ can be bound to the vfio_ap device driver. The device type for a given AP
+ queue device can be read from the parent card's sysfs directory. For example,
+ to see the hardware type of the queue 05.0004:
+
+ cat /sys/bus/ap/devices/card05/hwtype
+
+ The hwtype must be 10 or higher (CEX4 or newer) in order to be bound to the
+ vfio_ap device driver.
+
+3. Create the mediated devices needed to configure the AP matrixes for the
+ three guests and to provide an interface to the vfio_ap driver for
+ use by the guests:
+
+ /sys/devices/vfio_ap/matrix/
+ --- [mdev_supported_types]
+ ------ [vfio_ap-passthrough] (passthrough mediated matrix device type)
+ --------- create
+ --------- [devices]
+
+ To create the mediated devices for the three guests:
+
+ uuidgen > create
+ uuidgen > create
+ uuidgen > create
+
+ or
+
+ echo $uuid1 > create
+ echo $uuid2 > create
+ echo $uuid3 > create
+
+ This will create three mediated devices in the [devices] subdirectory named
+ after the UUID written to the create attribute file. We call them $uuid1,
+ $uuid2 and $uuid3 and this is the sysfs directory structure after creation:
+
+ /sys/devices/vfio_ap/matrix/
+ --- [mdev_supported_types]
+ ------ [vfio_ap-passthrough]
+ --------- [devices]
+ ------------ [$uuid1]
+ --------------- assign_adapter
+ --------------- assign_control_domain
+ --------------- assign_domain
+ --------------- matrix
+ --------------- unassign_adapter
+ --------------- unassign_control_domain
+ --------------- unassign_domain
+
+ ------------ [$uuid2]
+ --------------- assign_adapter
+ --------------- assign_control_domain
+ --------------- assign_domain
+ --------------- matrix
+ --------------- unassign_adapter
+ ----------------unassign_control_domain
+ ----------------unassign_domain
+
+ ------------ [$uuid3]
+ --------------- assign_adapter
+ --------------- assign_control_domain
+ --------------- assign_domain
+ --------------- matrix
+ --------------- unassign_adapter
+ ----------------unassign_control_domain
+ ----------------unassign_domain
+
+4. The administrator now needs to configure the matrixes for the mediated
+ devices $uuid1 (for Guest1), $uuid2 (for Guest2) and $uuid3 (for Guest3).
+
+ This is how the matrix is configured for Guest1:
+
+ echo 5 > assign_adapter
+ echo 6 > assign_adapter
+ echo 4 > assign_domain
+ echo 0xab > assign_domain
+
+ Control domains can similarly be assigned using the assign_control_domain
+ sysfs file.
+
+ If a mistake is made configuring an adapter, domain or control domain,
+ you can use the unassign_xxx files to unassign the adapter, domain or
+ control domain.
+
+ To display the matrix configuration for Guest1:
+
+ cat matrix
+
+ This is how the matrix is configured for Guest2:
+
+ echo 5 > assign_adapter
+ echo 0x47 > assign_domain
+ echo 0xff > assign_domain
+
+ This is how the matrix is configured for Guest3:
+
+ echo 6 > assign_adapter
+ echo 0x47 > assign_domain
+ echo 0xff > assign_domain
+
+ In order to successfully assign an adapter:
+
+ * The adapter number specified must represent a value from 0 up to the
+ maximum adapter number configured for the system. If an adapter number
+ higher than the maximum is specified, the operation will terminate with
+ an error (ENODEV).
+
+ * All APQNs that can be derived from the adapter ID and the IDs of
+ the previously assigned domains must be bound to the vfio_ap device
+ driver. If no domains have yet been assigned, then there must be at least
+ one APQN with the specified APID bound to the vfio_ap driver. If no such
+ APQNs are bound to the driver, the operation will terminate with an
+ error (EADDRNOTAVAIL).
+
+ No APQN that can be derived from the adapter ID and the IDs of the
+ previously assigned domains can be assigned to another mediated matrix
+ device. If an APQN is assigned to another mediated matrix device, the
+ operation will terminate with an error (EADDRINUSE).
+
+ In order to successfully assign a domain:
+
+ * The domain number specified must represent a value from 0 up to the
+ maximum domain number configured for the system. If a domain number
+ higher than the maximum is specified, the operation will terminate with
+ an error (ENODEV).
+
+ * All APQNs that can be derived from the domain ID and the IDs of
+ the previously assigned adapters must be bound to the vfio_ap device
+ driver. If no domains have yet been assigned, then there must be at least
+ one APQN with the specified APQI bound to the vfio_ap driver. If no such
+ APQNs are bound to the driver, the operation will terminate with an
+ error (EADDRNOTAVAIL).
+
+ No APQN that can be derived from the domain ID and the IDs of the
+ previously assigned adapters can be assigned to another mediated matrix
+ device. If an APQN is assigned to another mediated matrix device, the
+ operation will terminate with an error (EADDRINUSE).
+
+ In order to successfully assign a control domain, the domain number
+ specified must represent a value from 0 up to the maximum domain number
+ configured for the system. If a control domain number higher than the maximum
+ is specified, the operation will terminate with an error (ENODEV).
+
+5. Start Guest1:
+
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \
+ -device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid1 ...
+
+7. Start Guest2:
+
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \
+ -device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid2 ...
+
+7. Start Guest3:
+
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \
+ -device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid3 ...
+
+When the guest is shut down, the mediated matrix devices may be removed.
+
+Using our example again, to remove the mediated matrix device $uuid1:
+
+ /sys/devices/vfio_ap/matrix/
+ --- [mdev_supported_types]
+ ------ [vfio_ap-passthrough]
+ --------- [devices]
+ ------------ [$uuid1]
+ --------------- remove
+
+
+ echo 1 > remove
+
+ This will remove all of the mdev matrix device's sysfs structures including
+ the mdev device itself. To recreate and reconfigure the mdev matrix device,
+ all of the steps starting with step 3 will have to be performed again. Note
+ that the remove will fail if a guest using the mdev is still running.
+
+ It is not necessary to remove an mdev matrix device, but one may want to
+ remove it if no guest will use it during the remaining lifetime of the linux
+ host. If the mdev matrix device is removed, one may want to also reconfigure
+ the pool of adapters and queues reserved for use by the default drivers.
+
+Limitations
+===========
+* The KVM/kernel interfaces do not provide a way to prevent restoring an APQN
+ to the default drivers pool of a queue that is still assigned to a mediated
+ device in use by a guest. It is incumbent upon the administrator to
+ ensure there is no mediated device in use by a guest to which the APQN is
+ assigned lest the host be given access to the private data of the AP queue
+ device such as a private key configured specifically for the guest.
+
+* Dynamically modifying the AP matrix for a running guest (which would amount to
+ hot(un)plug of AP devices for the guest) is currently not supported
+
+* Live guest migration is not supported for guests using AP devices.
diff --git a/Documentation/scheduler/00-INDEX b/Documentation/scheduler/00-INDEX
deleted file mode 100644
index eccf7ad2e7f9..000000000000
--- a/Documentation/scheduler/00-INDEX
+++ /dev/null
@@ -1,18 +0,0 @@
-00-INDEX
- - this file.
-sched-arch.txt
- - CPU Scheduler implementation hints for architecture specific code.
-sched-bwc.txt
- - CFS bandwidth control overview.
-sched-design-CFS.txt
- - goals, design and implementation of the Completely Fair Scheduler.
-sched-domains.txt
- - information on scheduling domains.
-sched-nice-design.txt
- - How and why the scheduler's nice levels are implemented.
-sched-rt-group.txt
- - real-time group scheduling.
-sched-deadline.txt
- - deadline scheduling.
-sched-stats.txt
- - information on schedstats (Linux Scheduler Statistics).
diff --git a/Documentation/scheduler/completion.txt b/Documentation/scheduler/completion.txt
index 656cf803c006..e5b9df4d8078 100644
--- a/Documentation/scheduler/completion.txt
+++ b/Documentation/scheduler/completion.txt
@@ -1,146 +1,187 @@
-completions - wait for completion handling
-==========================================
-
-This document was originally written based on 3.18.0 (linux-next)
+Completions - "wait for completion" barrier APIs
+================================================
Introduction:
-------------
-If you have one or more threads of execution that must wait for some process
+If you have one or more threads that must wait for some kernel activity
to have reached a point or a specific state, completions can provide a
race-free solution to this problem. Semantically they are somewhat like a
-pthread_barrier and have similar use-cases.
+pthread_barrier() and have similar use-cases.
Completions are a code synchronization mechanism which is preferable to any
-misuse of locks. Any time you think of using yield() or some quirky
-msleep(1) loop to allow something else to proceed, you probably want to
-look into using one of the wait_for_completion*() calls instead. The
-advantage of using completions is clear intent of the code, but also more
-efficient code as both threads can continue until the result is actually
-needed.
-
-Completions are built on top of the generic event infrastructure in Linux,
-with the event reduced to a simple flag (appropriately called "done") in
-struct completion that tells the waiting threads of execution if they
-can continue safely.
-
-As completions are scheduling related, the code is found in
+misuse of locks/semaphores and busy-loops. Any time you think of using
+yield() or some quirky msleep(1) loop to allow something else to proceed,
+you probably want to look into using one of the wait_for_completion*()
+calls and complete() instead.
+
+The advantage of using completions is that they have a well defined, focused
+purpose which makes it very easy to see the intent of the code, but they
+also result in more efficient code as all threads can continue execution
+until the result is actually needed, and both the waiting and the signalling
+is highly efficient using low level scheduler sleep/wakeup facilities.
+
+Completions are built on top of the waitqueue and wakeup infrastructure of
+the Linux scheduler. The event the threads on the waitqueue are waiting for
+is reduced to a simple flag in 'struct completion', appropriately called "done".
+
+As completions are scheduling related, the code can be found in
kernel/sched/completion.c.
Usage:
------
-There are three parts to using completions, the initialization of the
-struct completion, the waiting part through a call to one of the variants of
-wait_for_completion() and the signaling side through a call to complete()
-or complete_all(). Further there are some helper functions for checking the
-state of completions.
+There are three main parts to using completions:
+
+ - the initialization of the 'struct completion' synchronization object
+ - the waiting part through a call to one of the variants of wait_for_completion(),
+ - the signaling side through a call to complete() or complete_all().
+
+There are also some helper functions for checking the state of completions.
+Note that while initialization must happen first, the waiting and signaling
+part can happen in any order. I.e. it's entirely normal for a thread
+to have marked a completion as 'done' before another thread checks whether
+it has to wait for it.
-To use completions one needs to include <linux/completion.h> and
-create a variable of type struct completion. The structure used for
-handling of completions is:
+To use completions you need to #include <linux/completion.h> and
+create a static or dynamic variable of type 'struct completion',
+which has only two fields:
struct completion {
unsigned int done;
wait_queue_head_t wait;
};
-providing the wait queue to place tasks on for waiting and the flag for
-indicating the state of affairs.
+This provides the ->wait waitqueue to place tasks on for waiting (if any), and
+the ->done completion flag for indicating whether it's completed or not.
-Completions should be named to convey the intent of the waiter. A good
-example is:
+Completions should be named to refer to the event that is being synchronized on.
+A good example is:
wait_for_completion(&early_console_added);
complete(&early_console_added);
-Good naming (as always) helps code readability.
+Good, intuitive naming (as always) helps code readability. Naming a completion
+'complete' is not helpful unless the purpose is super obvious...
Initializing completions:
-------------------------
-Initialization of dynamically allocated completions, often embedded in
-other structures, is done with:
+Dynamically allocated completion objects should preferably be embedded in data
+structures that are assured to be alive for the life-time of the function/driver,
+to prevent races with asynchronous complete() calls from occurring.
+
+Particular care should be taken when using the _timeout() or _killable()/_interruptible()
+variants of wait_for_completion(), as it must be assured that memory de-allocation
+does not happen until all related activities (complete() or reinit_completion())
+have taken place, even if these wait functions return prematurely due to a timeout
+or a signal triggering.
+
+Initializing of dynamically allocated completion objects is done via a call to
+init_completion():
- void init_completion(&done);
+ init_completion(&dynamic_object->done);
-Initialization is accomplished by initializing the wait queue and setting
-the default state to "not available", that is, "done" is set to 0.
+In this call we initialize the waitqueue and set ->done to 0, i.e. "not completed"
+or "not done".
The re-initialization function, reinit_completion(), simply resets the
-done element to "not available", thus again to 0, without touching the
-wait queue. Calling init_completion() twice on the same completion object is
+->done field to 0 ("not done"), without touching the waitqueue.
+Callers of this function must make sure that there are no racy
+wait_for_completion() calls going on in parallel.
+
+Calling init_completion() on the same completion object twice is
most likely a bug as it re-initializes the queue to an empty queue and
-enqueued tasks could get "lost" - use reinit_completion() in that case.
+enqueued tasks could get "lost" - use reinit_completion() in that case,
+but be aware of other races.
+
+For static declaration and initialization, macros are available.
+
+For static (or global) declarations in file scope you can use DECLARE_COMPLETION():
-For static declaration and initialization, macros are available. These are:
+ static DECLARE_COMPLETION(setup_done);
+ DECLARE_COMPLETION(setup_done);
- static DECLARE_COMPLETION(setup_done)
+Note that in this case the completion is boot time (or module load time)
+initialized to 'not done' and doesn't require an init_completion() call.
-used for static declarations in file scope. Within functions the static
-initialization should always use:
+When a completion is declared as a local variable within a function,
+then the initialization should always use DECLARE_COMPLETION_ONSTACK()
+explicitly, not just to make lockdep happy, but also to make it clear
+that limited scope had been considered and is intentional:
DECLARE_COMPLETION_ONSTACK(setup_done)
-suitable for automatic/local variables on the stack and will make lockdep
-happy. Note also that one needs to make *sure* the completion passed to
-work threads remains in-scope, and no references remain to on-stack data
-when the initiating function returns.
+Note that when using completion objects as local variables you must be
+acutely aware of the short life time of the function stack: the function
+must not return to a calling context until all activities (such as waiting
+threads) have ceased and the completion object is completely unused.
-Using on-stack completions for code that calls any of the _timeout or
-_interruptible/_killable variants is not advisable as they will require
-additional synchronization to prevent the on-stack completion object in
-the timeout/signal cases from going out of scope. Consider using dynamically
-allocated completions when intending to use the _interruptible/_killable
-or _timeout variants of wait_for_completion().
+To emphasise this again: in particular when using some of the waiting API variants
+with more complex outcomes, such as the timeout or signalling (_timeout(),
+_killable() and _interruptible()) variants, the wait might complete
+prematurely while the object might still be in use by another thread - and a return
+from the wait_on_completion*() caller function will deallocate the function
+stack and cause subtle data corruption if a complete() is done in some
+other thread. Simple testing might not trigger these kinds of races.
+If unsure, use dynamically allocated completion objects, preferably embedded
+in some other long lived object that has a boringly long life time which
+exceeds the life time of any helper threads using the completion object,
+or has a lock or other synchronization mechanism to make sure complete()
+is not called on a freed object.
+
+A naive DECLARE_COMPLETION() on the stack triggers a lockdep warning.
Waiting for completions:
------------------------
-For a thread of execution to wait for some concurrent work to finish, it
-calls wait_for_completion() on the initialized completion structure.
+For a thread to wait for some concurrent activity to finish, it
+calls wait_for_completion() on the initialized completion structure:
+
+ void wait_for_completion(struct completion *done)
+
A typical usage scenario is:
+ CPU#1 CPU#2
+
struct completion setup_done;
+
init_completion(&setup_done);
- initialize_work(...,&setup_done,...)
+ initialize_work(...,&setup_done,...);
- /* run non-dependent code */ /* do setup */
+ /* run non-dependent code */ /* do setup */
- wait_for_completion(&setup_done); complete(setup_done)
+ wait_for_completion(&setup_done); complete(setup_done);
-This is not implying any temporal order on wait_for_completion() and the
-call to complete() - if the call to complete() happened before the call
+This is not implying any particular order between wait_for_completion() and
+the call to complete() - if the call to complete() happened before the call
to wait_for_completion() then the waiting side simply will continue
-immediately as all dependencies are satisfied if not it will block until
+immediately as all dependencies are satisfied; if not, it will block until
completion is signaled by complete().
Note that wait_for_completion() is calling spin_lock_irq()/spin_unlock_irq(),
so it can only be called safely when you know that interrupts are enabled.
-Calling it from hard-irq or irqs-off atomic contexts will result in
-hard-to-detect spurious enabling of interrupts.
-
-wait_for_completion():
-
- void wait_for_completion(struct completion *done):
+Calling it from IRQs-off atomic contexts will result in hard-to-detect
+spurious enabling of interrupts.
The default behavior is to wait without a timeout and to mark the task as
uninterruptible. wait_for_completion() and its variants are only safe
in process context (as they can sleep) but not in atomic context,
-interrupt context, with disabled irqs. or preemption is disabled - see also
+interrupt context, with disabled IRQs, or preemption is disabled - see also
try_wait_for_completion() below for handling completion in atomic/interrupt
context.
As all variants of wait_for_completion() can (obviously) block for a long
-time, you probably don't want to call this with held mutexes.
+time depending on the nature of the activity they are waiting for, so in
+most cases you probably don't want to call this with held mutexes.
-Variants available:
--------------------
+wait_for_completion*() variants available:
+------------------------------------------
The below variants all return status and this status should be checked in
most(/all) cases - in cases where the status is deliberately not checked you
@@ -148,51 +189,53 @@ probably want to make a note explaining this (e.g. see
arch/arm/kernel/smp.c:__cpu_up()).
A common problem that occurs is to have unclean assignment of return types,
-so care should be taken with assigning return-values to variables of proper
-type. Checking for the specific meaning of return values also has been found
-to be quite inaccurate e.g. constructs like
-if (!wait_for_completion_interruptible_timeout(...)) would execute the same
-code path for successful completion and for the interrupted case - which is
-probably not what you want.
+so take care to assign return-values to variables of the proper type.
+
+Checking for the specific meaning of return values also has been found
+to be quite inaccurate, e.g. constructs like:
+
+ if (!wait_for_completion_interruptible_timeout(...))
+
+... would execute the same code path for successful completion and for the
+interrupted case - which is probably not what you want.
int wait_for_completion_interruptible(struct completion *done)
-This function marks the task TASK_INTERRUPTIBLE. If a signal was received
-while waiting it will return -ERESTARTSYS; 0 otherwise.
+This function marks the task TASK_INTERRUPTIBLE while it is waiting.
+If a signal was received while waiting it will return -ERESTARTSYS; 0 otherwise.
- unsigned long wait_for_completion_timeout(struct completion *done,
- unsigned long timeout)
+ unsigned long wait_for_completion_timeout(struct completion *done, unsigned long timeout)
The task is marked as TASK_UNINTERRUPTIBLE and will wait at most 'timeout'
-(in jiffies). If timeout occurs it returns 0 else the remaining time in
-jiffies (but at least 1). Timeouts are preferably calculated with
-msecs_to_jiffies() or usecs_to_jiffies(). If the returned timeout value is
-deliberately ignored a comment should probably explain why (e.g. see
-drivers/mfd/wm8350-core.c wm8350_read_auxadc())
+jiffies. If a timeout occurs it returns 0, else the remaining time in
+jiffies (but at least 1).
+
+Timeouts are preferably calculated with msecs_to_jiffies() or usecs_to_jiffies(),
+to make the code largely HZ-invariant.
+
+If the returned timeout value is deliberately ignored a comment should probably explain
+why (e.g. see drivers/mfd/wm8350-core.c wm8350_read_auxadc()).
- long wait_for_completion_interruptible_timeout(
- struct completion *done, unsigned long timeout)
+ long wait_for_completion_interruptible_timeout(struct completion *done, unsigned long timeout)
This function passes a timeout in jiffies and marks the task as
TASK_INTERRUPTIBLE. If a signal was received it will return -ERESTARTSYS;
-otherwise it returns 0 if the completion timed out or the remaining time in
+otherwise it returns 0 if the completion timed out, or the remaining time in
jiffies if completion occurred.
Further variants include _killable which uses TASK_KILLABLE as the
-designated tasks state and will return -ERESTARTSYS if it is interrupted or
-else 0 if completion was achieved. There is a _timeout variant as well:
+designated tasks state and will return -ERESTARTSYS if it is interrupted,
+or 0 if completion was achieved. There is a _timeout variant as well:
long wait_for_completion_killable(struct completion *done)
- long wait_for_completion_killable_timeout(struct completion *done,
- unsigned long timeout)
+ long wait_for_completion_killable_timeout(struct completion *done, unsigned long timeout)
The _io variants wait_for_completion_io() behave the same as the non-_io
-variants, except for accounting waiting time as waiting on IO, which has
-an impact on how the task is accounted in scheduling stats.
+variants, except for accounting waiting time as 'waiting on IO', which has
+an impact on how the task is accounted in scheduling/IO stats:
void wait_for_completion_io(struct completion *done)
- unsigned long wait_for_completion_io_timeout(struct completion *done
- unsigned long timeout)
+ unsigned long wait_for_completion_io_timeout(struct completion *done, unsigned long timeout)
Signaling completions:
@@ -200,31 +243,32 @@ Signaling completions:
A thread that wants to signal that the conditions for continuation have been
achieved calls complete() to signal exactly one of the waiters that it can
-continue.
+continue:
void complete(struct completion *done)
-or calls complete_all() to signal all current and future waiters.
+... or calls complete_all() to signal all current and future waiters:
void complete_all(struct completion *done)
The signaling will work as expected even if completions are signaled before
a thread starts waiting. This is achieved by the waiter "consuming"
-(decrementing) the done element of struct completion. Waiting threads
+(decrementing) the done field of 'struct completion'. Waiting threads
wakeup order is the same in which they were enqueued (FIFO order).
If complete() is called multiple times then this will allow for that number
of waiters to continue - each call to complete() will simply increment the
-done element. Calling complete_all() multiple times is a bug though. Both
-complete() and complete_all() can be called in hard-irq/atomic context safely.
+done field. Calling complete_all() multiple times is a bug though. Both
+complete() and complete_all() can be called in IRQ/atomic context safely.
-There only can be one thread calling complete() or complete_all() on a
-particular struct completion at any time - serialized through the wait
+There can only be one thread calling complete() or complete_all() on a
+particular 'struct completion' at any time - serialized through the wait
queue spinlock. Any such concurrent calls to complete() or complete_all()
probably are a design bug.
-Signaling completion from hard-irq context is fine as it will appropriately
-lock with spin_lock_irqsave/spin_unlock_irqrestore and it will never sleep.
+Signaling completion from IRQ context is fine as it will appropriately
+lock with spin_lock_irqsave()/spin_unlock_irqrestore() and it will never
+sleep.
try_wait_for_completion()/completion_done():
@@ -236,7 +280,7 @@ else it consumes one posted completion and returns true.
bool try_wait_for_completion(struct completion *done)
-Finally, to check the state of a completion without changing it in any way,
+Finally, to check the state of a completion without changing it in any way,
call completion_done(), which returns false if there are no posted
completions that were not yet consumed by waiters (implying that there are
waiters) and true otherwise;
@@ -244,4 +288,4 @@ waiters) and true otherwise;
bool completion_done(struct completion *done)
Both try_wait_for_completion() and completion_done() are safe to be called in
-hard-irq or atomic context.
+IRQ or atomic context.
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
deleted file mode 100644
index bb4a76f823e1..000000000000
--- a/Documentation/scsi/00-INDEX
+++ /dev/null
@@ -1,108 +0,0 @@
-00-INDEX
- - this file
-53c700.txt
- - info on driver for 53c700 based adapters
-BusLogic.txt
- - info on driver for adapters with BusLogic chips
-ChangeLog.1992-1997
- - Changes to scsi files, if not listed elsewhere
-ChangeLog.arcmsr
- - Changes to driver for ARECA's SATA RAID controller cards
-ChangeLog.ips
- - IBM ServeRAID driver Changelog
-ChangeLog.lpfc
- - Changes to lpfc driver
-ChangeLog.megaraid
- - Changes to LSI megaraid controller.
-ChangeLog.megaraid_sas
- - Changes to serial attached scsi version of LSI megaraid controller.
-ChangeLog.ncr53c8xx
- - Changes to ncr53c8xx driver
-ChangeLog.sym53c8xx
- - Changes to sym53c8xx driver
-ChangeLog.sym53c8xx_2
- - Changes to second generation of sym53c8xx driver
-FlashPoint.txt
- - info on driver for BusLogic FlashPoint adapters
-LICENSE.FlashPoint
- - Licence of the Flashpoint driver
-LICENSE.qla2xxx
- - License for QLogic Linux Fibre Channel HBA Driver firmware.
-LICENSE.qla4xxx
- - License for QLogic Linux iSCSI HBA Driver.
-Mylex.txt
- - info on driver for Mylex adapters
-NinjaSCSI.txt
- - info on WorkBiT NinjaSCSI-32/32Bi driver
-aacraid.txt
- - Driver supporting Adaptec RAID controllers
-advansys.txt
- - List of Advansys Host Adapters
-aha152x.txt
- - info on driver for Adaptec AHA152x based adapters
-aic79xx.txt
- - Adaptec Ultra320 SCSI host adapters
-aic7xxx.txt
- - info on driver for Adaptec controllers
-arcmsr_spec.txt
- - ARECA FIRMWARE SPEC (for IOP331 adapter)
-bfa.txt
- - Brocade FC/FCOE adapter driver.
-bnx2fc.txt
- - FCoE hardware offload for Broadcom network interfaces.
-cxgb3i.txt
- - Chelsio iSCSI Linux Driver
-dc395x.txt
- - README file for the dc395x SCSI driver
-dpti.txt
- - info on driver for DPT SmartRAID and Adaptec I2O RAID based adapters
-dtc3x80.txt
- - info on driver for DTC 2x80 based adapters
-g_NCR5380.txt
- - info on driver for NCR5380 and NCR53c400 based adapters
-hpsa.txt
- - HP Smart Array Controller SCSI driver.
-hptiop.txt
- - HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
-libsas.txt
- - Serial Attached SCSI management layer.
-link_power_management_policy.txt
- - Link power management options.
-lpfc.txt
- - LPFC driver release notes
-megaraid.txt
- - Common Management Module, shared code handling ioctls for LSI drivers
-ncr53c8xx.txt
- - info on driver for NCR53c8xx based adapters
-osd.txt
- Object-Based Storage Device, command set introduction.
-osst.txt
- - info on driver for OnStream SC-x0 SCSI tape
-ppa.txt
- - info on driver for IOmega zip drive
-qlogicfas.txt
- - info on driver for QLogic FASxxx based adapters
-scsi-changer.txt
- - README for the SCSI media changer driver
-scsi-generic.txt
- - info on the sg driver for generic (non-disk/CD/tape) SCSI devices.
-scsi-parameters.txt
- - List of SCSI-parameters to pass to the kernel at module load-time.
-scsi.txt
- - short blurb on using SCSI support as a module.
-scsi_mid_low_api.txt
- - info on API between SCSI layer and low level drivers
-scsi_eh.txt
- - info on SCSI midlayer error handling infrastructure
-scsi_fc_transport.txt
- - SCSI Fiber Channel Tansport
-st.txt
- - info on scsi tape driver
-sym53c500_cs.txt
- - info on PCMCIA driver for Symbios Logic 53c500 based adapters
-sym53c8xx_2.txt
- - info on second generation driver for sym53c8xx based adapters
-tmscsim.txt
- - info on driver for AM53c974 based adapters
-ufs.txt
- - info on Universal Flash Storage(UFS) and UFS host controller driver.
diff --git a/Documentation/scsi/ufs.txt b/Documentation/scsi/ufs.txt
index 41a6164592aa..520b5b033256 100644
--- a/Documentation/scsi/ufs.txt
+++ b/Documentation/scsi/ufs.txt
@@ -128,6 +128,26 @@ The current UFSHCD implementation supports following functionality,
In this version of UFSHCD Query requests and power management
functionality are not implemented.
+4. BSG Support
+------------------
+
+This transport driver supports exchanging UFS protocol information units
+(UPIUs) with a UFS device. Typically, user space will allocate
+struct ufs_bsg_request and struct ufs_bsg_reply (see ufs_bsg.h) as
+request_upiu and reply_upiu respectively. Filling those UPIUs should
+be done in accordance with JEDEC spec UFS2.1 paragraph 10.7.
+*Caveat emptor*: The driver makes no further input validations and sends the
+UPIU to the device as it is. Open the bsg device in /dev/ufs-bsg and
+send SG_IO with the applicable sg_io_v4:
+
+ io_hdr_v4.guard = 'Q';
+ io_hdr_v4.protocol = BSG_PROTOCOL_SCSI;
+ io_hdr_v4.subprotocol = BSG_SUB_PROTOCOL_SCSI_TRANSPORT;
+ io_hdr_v4.response = (__u64)reply_upiu;
+ io_hdr_v4.max_response_len = reply_len;
+ io_hdr_v4.request_len = request_len;
+ io_hdr_v4.request = (__u64)request_upiu;
+
UFS Specifications can be found at,
UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
diff --git a/Documentation/security/LSM.rst b/Documentation/security/LSM.rst
index 98522e0e1ee2..8b9ee597e9d0 100644
--- a/Documentation/security/LSM.rst
+++ b/Documentation/security/LSM.rst
@@ -5,7 +5,7 @@ Linux Security Module Development
Based on https://lkml.org/lkml/2007/10/26/215,
a new LSM is accepted into the kernel when its intent (a description of
what it tries to protect against and in what cases one would expect to
-use it) has been appropriately documented in ``Documentation/security/LSM.rst``.
+use it) has been appropriately documented in ``Documentation/admin-guide/LSM/``.
This allows an LSM's code to be easily compared to its goals, and so
that end users and distros can make a more informed decision about which
LSMs suit their requirements.
diff --git a/Documentation/security/keys/ecryptfs.rst b/Documentation/security/keys/ecryptfs.rst
index 4920f3a8ea75..0e2be0a6bb6a 100644
--- a/Documentation/security/keys/ecryptfs.rst
+++ b/Documentation/security/keys/ecryptfs.rst
@@ -5,10 +5,10 @@ Encrypted keys for the eCryptfs filesystem
ECryptfs is a stacked filesystem which transparently encrypts and decrypts each
file using a randomly generated File Encryption Key (FEK).
-Each FEK is in turn encrypted with a File Encryption Key Encryption Key (FEFEK)
+Each FEK is in turn encrypted with a File Encryption Key Encryption Key (FEKEK)
either in kernel space or in user space with a daemon called 'ecryptfsd'. In
the former case the operation is performed directly by the kernel CryptoAPI
-using a key, the FEFEK, derived from a user prompted passphrase; in the latter
+using a key, the FEKEK, derived from a user prompted passphrase; in the latter
the FEK is encrypted by 'ecryptfsd' with the help of external libraries in order
to support other mechanisms like public key cryptography, PKCS#11 and TPM based
operations.
@@ -22,12 +22,12 @@ by the userspace utility 'mount.ecryptfs' shipped with the package
The 'encrypted' key type has been extended with the introduction of the new
format 'ecryptfs' in order to be used in conjunction with the eCryptfs
filesystem. Encrypted keys of the newly introduced format store an
-authentication token in its payload with a FEFEK randomly generated by the
+authentication token in its payload with a FEKEK randomly generated by the
kernel and protected by the parent master key.
In order to avoid known-plaintext attacks, the datablob obtained through
commands 'keyctl print' or 'keyctl pipe' does not contain the overall
-authentication token, which content is well known, but only the FEFEK in
+authentication token, which content is well known, but only the FEKEK in
encrypted form.
The eCryptfs filesystem may really benefit from using encrypted keys in that the
diff --git a/Documentation/serial/00-INDEX b/Documentation/serial/00-INDEX
deleted file mode 100644
index 8021a9f29fc5..000000000000
--- a/Documentation/serial/00-INDEX
+++ /dev/null
@@ -1,16 +0,0 @@
-00-INDEX
- - this file.
-README.cycladesZ
- - info on Cyclades-Z firmware loading.
-driver
- - intro to the low level serial driver.
-moxa-smartio
- - file with info on installing/using Moxa multiport serial driver.
-n_gsm.txt
- - GSM 0710 tty multiplexer howto.
-rocket.txt
- - info on the Comtrol RocketPort multiport serial driver.
-serial-rs485.txt
- - info about RS485 structures and support in the kernel.
-tty.txt
- - guide to the locking policies of the tty layer.
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
index e06238131f77..368a07a165f5 100644
--- a/Documentation/sound/hd-audio/models.rst
+++ b/Documentation/sound/hd-audio/models.rst
@@ -309,6 +309,8 @@ asus-nx50
ASUS Nx50 fixups
asus-nx51
ASUS Nx51 fixups
+asus-g751
+ ASUS G751 fixups
alc891-headset
Headset mode support on ALC891
alc891-headset-multi
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index a0b268466cb1..b37234afdfa1 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -3,8 +3,6 @@ Writing an ALSA Driver
======================
:Author: Takashi Iwai <tiwai@suse.de>
-:Date: Oct 15, 2007
-:Edition: 0.3.7
Preface
=======
@@ -21,11 +19,6 @@ explain the general topic of linux kernel coding and doesn't cover
low-level driver implementation details. It only describes the standard
way to write a PCI sound driver on ALSA.
-If you are already familiar with the older ALSA ver.0.5.x API, you can
-check the drivers such as ``sound/pci/es1938.c`` or
-``sound/pci/maestro3.c`` which have also almost the same code-base in
-the ALSA 0.5.x tree, so you can compare the differences.
-
This document is still a draft version. Any feedback and corrections,
please!!
@@ -35,24 +28,7 @@ File Tree Structure
General
-------
-The ALSA drivers are provided in two ways.
-
-One is the trees provided as a tarball or via cvs from the ALSA's ftp
-site, and another is the 2.6 (or later) Linux kernel tree. To
-synchronize both, the ALSA driver tree is split into two different
-trees: alsa-kernel and alsa-driver. The former contains purely the
-source code for the Linux 2.6 (or later) tree. This tree is designed
-only for compilation on 2.6 or later environment. The latter,
-alsa-driver, contains many subtle files for compiling ALSA drivers
-outside of the Linux kernel tree, wrapper functions for older 2.2 and
-2.4 kernels, to adapt the latest kernel API, and additional drivers
-which are still in development or in tests. The drivers in alsa-driver
-tree will be moved to alsa-kernel (and eventually to the 2.6 kernel
-tree) when they are finished and confirmed to work fine.
-
-The file tree structure of ALSA driver is depicted below. Both
-alsa-kernel and alsa-driver have almost the same file structure, except
-for “core” directory. It's named as “acore” in alsa-driver tree.
+The file tree structure of ALSA driver is depicted below.
::
@@ -61,14 +37,11 @@ for “core” directory. It's named as “acore” in alsa-driver tree.
/oss
/seq
/oss
- /instr
- /ioctl32
/include
/drivers
/mpu401
/opl3
/i2c
- /l3
/synth
/emux
/pci
@@ -80,6 +53,7 @@ for “core” directory. It's named as “acore” in alsa-driver tree.
/sparc
/usb
/pcmcia /(cards)
+ /soc
/oss
@@ -99,13 +73,6 @@ directory. The rawmidi OSS emulation is included in the ALSA rawmidi
code since it's quite small. The sequencer code is stored in
``core/seq/oss`` directory (see `below <#core-seq-oss>`__).
-core/ioctl32
-~~~~~~~~~~~~
-
-This directory contains the 32bit-ioctl wrappers for 64bit architectures
-such like x86-64, ppc64 and sparc64. For 32bit and alpha architectures,
-these are not compiled.
-
core/seq
~~~~~~~~
@@ -119,11 +86,6 @@ core/seq/oss
This contains the OSS sequencer emulation codes.
-core/seq/instr
-~~~~~~~~~~~~~~
-
-This directory contains the modules for the sequencer instrument layer.
-
include directory
-----------------
@@ -161,11 +123,6 @@ Although there is a standard i2c layer on Linux, ALSA has its own i2c
code for some cards, because the soundcard needs only a simple operation
and the standard i2c API is too complicated for such a purpose.
-i2c/l3
-~~~~~~
-
-This is a sub-directory for ARM L3 i2c.
-
synth directory
---------------
@@ -209,11 +166,19 @@ The PCMCIA, especially PCCard drivers will go here. CardBus drivers will
be in the pci directory, because their API is identical to that of
standard PCI cards.
+soc directory
+-------------
+
+This directory contains the codes for ASoC (ALSA System on Chip)
+layer including ASoC core, codec and machine drivers.
+
oss directory
-------------
-The OSS/Lite source files are stored here in Linux 2.6 (or later) tree.
-In the ALSA driver tarball, this directory is empty, of course :)
+Here contains OSS/Lite codes.
+All codes have been deprecated except for dmasound on m68k as of
+writing this.
+
Basic Flow for PCI Drivers
==========================
@@ -352,10 +317,8 @@ to details explained in the following section.
/* (3) */
err = snd_mychip_create(card, pci, &chip);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto error;
/* (4) */
strcpy(card->driver, "My Chip");
@@ -368,22 +331,23 @@ to details explained in the following section.
/* (6) */
err = snd_card_register(card);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto error;
/* (7) */
pci_set_drvdata(pci, card);
dev++;
return 0;
+
+ error:
+ snd_card_free(card);
+ return err;
}
/* destructor -- see the "Destructor" sub-section */
static void snd_mychip_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
- pci_set_drvdata(pci, NULL);
}
@@ -445,14 +409,26 @@ In this part, the PCI resources are allocated.
struct mychip *chip;
....
err = snd_mychip_create(card, pci, &chip);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto error;
The details will be explained in the section `PCI Resource
Management`_.
+When something goes wrong, the probe function needs to deal with the
+error. In this example, we have a single error handling path placed
+at the end of the function.
+
+::
+
+ error:
+ snd_card_free(card);
+ return err;
+
+Since each component can be properly freed, the single
+:c:func:`snd_card_free()` call should suffice in most cases.
+
+
4) Set the driver ID and name strings.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -486,10 +462,8 @@ too.
::
err = snd_card_register(card);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto error;
Will be explained in the section `Management of Cards and
Components`_, too.
@@ -513,14 +487,13 @@ The destructor, remove callback, simply releases the card instance. Then
the ALSA middle layer will release all the attached components
automatically.
-It would be typically like the following:
+It would be typically just :c:func:`calling snd_card_free()`:
::
static void snd_mychip_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
- pci_set_drvdata(pci, NULL);
}
@@ -546,7 +519,7 @@ in the source file. If the code is split into several files, the files
without module options don't need them.
In addition to these headers, you'll need ``<linux/interrupt.h>`` for
-interrupt handling, and ``<asm/io.h>`` for I/O access. If you use the
+interrupt handling, and ``<linux/io.h>`` for I/O access. If you use the
:c:func:`mdelay()` or :c:func:`udelay()` functions, you'll need
to include ``<linux/delay.h>`` too.
@@ -720,6 +693,13 @@ function, which will call the real destructor.
where :c:func:`snd_mychip_free()` is the real destructor.
+The demerit of this method is the obviously more amount of codes.
+The merit is, however, you can trigger the own callback at registering
+and disconnecting the card via setting in snd_device_ops.
+About the registering and disconnecting the card, see the subsections
+below.
+
+
Registration and Release
------------------------
@@ -905,10 +885,8 @@ Resource Allocation
-------------------
The allocation of I/O ports and irqs is done via standard kernel
-functions. Unlike ALSA ver.0.5.x., there are no helpers for that. And
-these resources must be released in the destructor function (see below).
-Also, on ALSA 0.9.x, you don't need to allocate (pseudo-)DMA for PCI
-like in ALSA 0.5.x.
+functions. These resources must be released in the destructor
+function (see below).
Now assume that the PCI device has an I/O port with 8 bytes and an
interrupt. Then :c:type:`struct mychip <mychip>` will have the
@@ -1064,7 +1042,8 @@ and the allocation would be like below:
::
- if ((err = pci_request_regions(pci, "My Chip")) < 0) {
+ err = pci_request_regions(pci, "My Chip");
+ if (err < 0) {
kfree(chip);
return err;
}
@@ -1086,6 +1065,21 @@ and the corresponding destructor would be:
....
}
+Of course, a modern way with :c:func:`pci_iomap()` will make things a
+bit easier, too.
+
+::
+
+ err = pci_request_regions(pci, "My Chip");
+ if (err < 0) {
+ kfree(chip);
+ return err;
+ }
+ chip->iobase_virt = pci_iomap(pci, 0, 0);
+
+which is paired with :c:func:`pci_iounmap()` at destructor.
+
+
PCI Entries
-----------
@@ -1154,13 +1148,6 @@ And at last, the module entries:
Note that these module entries are tagged with ``__init`` and ``__exit``
prefixes.
-Oh, one thing was forgotten. If you have no exported symbols, you need
-to declare it in 2.2 or 2.4 kernels (it's not necessary in 2.6 kernels).
-
-::
-
- EXPORT_NO_SYMBOLS;
-
That's all!
PCM Interface
@@ -2113,6 +2100,16 @@ non-contiguous buffers. The mmap calls this callback to get the page
address. Some examples will be explained in the later section `Buffer
and Memory Management`_, too.
+mmap calllback
+~~~~~~~~~~~~~~
+
+This is another optional callback for controlling mmap behavior.
+Once when defined, PCM core calls this callback when a page is
+memory-mapped instead of dealing via the standard helper.
+If you need special handling (due to some architecture or
+device-specific issues), implement everything here as you like.
+
+
PCM Interrupt Handler
---------------------
@@ -2370,6 +2367,27 @@ to define the inverse rule:
hw_rule_format_by_channels, NULL,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+One typical usage of the hw constraints is to align the buffer size
+with the period size. As default, ALSA PCM core doesn't enforce the
+buffer size to be aligned with the period size. For example, it'd be
+possible to have a combination like 256 period bytes with 999 buffer
+bytes.
+
+Many device chips, however, require the buffer to be a multiple of
+periods. In such a case, call
+:c:func:`snd_pcm_hw_constraint_integer()` for
+``SNDRV_PCM_HW_PARAM_PERIODS``.
+
+::
+
+ snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+
+This assures that the number of periods is integer, hence the buffer
+size is aligned with the period size.
+
+The hw constraint is a very much powerful mechanism to define the
+preferred PCM configuration, and there are relevant helpers.
I won't give more details here, rather I would like to say, “Luke, use
the source.”
@@ -3712,7 +3730,14 @@ example, for an intermediate buffer. Since the allocated pages are not
contiguous, you need to set the ``page`` callback to obtain the physical
address at every offset.
-The implementation of ``page`` callback would be like this:
+The easiest way to achieve it would be to use
+:c:func:`snd_pcm_lib_alloc_vmalloc_buffer()` for allocating the buffer
+via :c:func:`vmalloc()`, and set :c:func:`snd_pcm_sgbuf_ops_page()` to
+the ``page`` callback. At release, you need to call
+:c:func:`snd_pcm_lib_free_vmalloc_buffer()`.
+
+If you want to implementation the ``page`` manually, it would be like
+this:
::
@@ -3848,7 +3873,9 @@ Power Management
If the chip is supposed to work with suspend/resume functions, you need
to add power-management code to the driver. The additional code for
-power-management should be ifdef-ed with ``CONFIG_PM``.
+power-management should be ifdef-ed with ``CONFIG_PM``, or annotated
+with __maybe_unused attribute; otherwise the compiler will complain
+you.
If the driver *fully* supports suspend/resume that is, the device can be
properly resumed to its state when suspend was called, you can set the
@@ -3879,18 +3906,16 @@ the case of PCI drivers, the callbacks look like below:
::
- #ifdef CONFIG_PM
- static int snd_my_suspend(struct pci_dev *pci, pm_message_t state)
+ static int __maybe_unused snd_my_suspend(struct device *dev)
{
.... /* do things for suspend */
return 0;
}
- static int snd_my_resume(struct pci_dev *pci)
+ static int __maybe_unused snd_my_resume(struct device *dev)
{
.... /* do things for suspend */
return 0;
}
- #endif
The scheme of the real suspend job is as follows.
@@ -3909,18 +3934,14 @@ The scheme of the real suspend job is as follows.
6. Stop the hardware if necessary.
-7. Disable the PCI device by calling
- :c:func:`pci_disable_device()`. Then, call
- :c:func:`pci_save_state()` at last.
-
A typical code would be like:
::
- static int mychip_suspend(struct pci_dev *pci, pm_message_t state)
+ static int __maybe_unused mychip_suspend(struct device *dev)
{
/* (1) */
- struct snd_card *card = pci_get_drvdata(pci);
+ struct snd_card *card = dev_get_drvdata(dev);
struct mychip *chip = card->private_data;
/* (2) */
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -3932,9 +3953,6 @@ A typical code would be like:
snd_mychip_save_registers(chip);
/* (6) */
snd_mychip_stop_hardware(chip);
- /* (7) */
- pci_disable_device(pci);
- pci_save_state(pci);
return 0;
}
@@ -3943,44 +3961,35 @@ The scheme of the real resume job is as follows.
1. Retrieve the card and the chip data.
-2. Set up PCI. First, call :c:func:`pci_restore_state()`. Then
- enable the pci device again by calling
- :c:func:`pci_enable_device()`. Call
- :c:func:`pci_set_master()` if necessary, too.
+2. Re-initialize the chip.
-3. Re-initialize the chip.
+3. Restore the saved registers if necessary.
-4. Restore the saved registers if necessary.
+4. Resume the mixer, e.g. calling :c:func:`snd_ac97_resume()`.
-5. Resume the mixer, e.g. calling :c:func:`snd_ac97_resume()`.
+5. Restart the hardware (if any).
-6. Restart the hardware (if any).
-
-7. Call :c:func:`snd_power_change_state()` with
+6. Call :c:func:`snd_power_change_state()` with
``SNDRV_CTL_POWER_D0`` to notify the processes.
A typical code would be like:
::
- static int mychip_resume(struct pci_dev *pci)
+ static int __maybe_unused mychip_resume(struct pci_dev *pci)
{
/* (1) */
- struct snd_card *card = pci_get_drvdata(pci);
+ struct snd_card *card = dev_get_drvdata(dev);
struct mychip *chip = card->private_data;
/* (2) */
- pci_restore_state(pci);
- pci_enable_device(pci);
- pci_set_master(pci);
- /* (3) */
snd_mychip_reinit_chip(chip);
- /* (4) */
+ /* (3) */
snd_mychip_restore_registers(chip);
- /* (5) */
+ /* (4) */
snd_ac97_resume(chip->ac97);
- /* (6) */
+ /* (5) */
snd_mychip_restart_chip(chip);
- /* (7) */
+ /* (6) */
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
@@ -4046,15 +4055,14 @@ And next, set suspend/resume callbacks to the pci_driver.
::
+ static SIMPLE_DEV_PM_OPS(snd_my_pm_ops, mychip_suspend, mychip_resume);
+
static struct pci_driver driver = {
.name = KBUILD_MODNAME,
.id_table = snd_my_ids,
.probe = snd_my_probe,
.remove = snd_my_remove,
- #ifdef CONFIG_PM
- .suspend = snd_my_suspend,
- .resume = snd_my_resume,
- #endif
+ .driver.pm = &snd_my_pm_ops,
};
Module Parameters
@@ -4078,7 +4086,7 @@ variables, instead. ``enable`` option is not always necessary in this
case, but it would be better to have a dummy option for compatibility.
The module parameters must be declared with the standard
-``module_param()()``, ``module_param_array()()`` and
+``module_param()``, ``module_param_array()`` and
:c:func:`MODULE_PARM_DESC()` macros.
The typical coding would be like below:
@@ -4094,15 +4102,14 @@ The typical coding would be like below:
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable " CARD_NAME " soundcard.");
-Also, don't forget to define the module description, classes, license
-and devices. Especially, the recent modprobe requires to define the
+Also, don't forget to define the module description and the license.
+Especially, the recent modprobe requires to define the
module license as GPL, etc., otherwise the system is shown as “tainted”.
::
- MODULE_DESCRIPTION("My Chip");
+ MODULE_DESCRIPTION("Sound driver for My Chip");
MODULE_LICENSE("GPL");
- MODULE_SUPPORTED_DEVICE("{{Vendor,My Chip Name}}");
How To Put Your Driver Into ALSA Tree
@@ -4117,21 +4124,17 @@ a question now: how to put my own driver into the ALSA driver tree? Here
Suppose that you create a new PCI driver for the card “xyz”. The card
module name would be snd-xyz. The new driver is usually put into the
-alsa-driver tree, ``alsa-driver/pci`` directory in the case of PCI
-cards. Then the driver is evaluated, audited and tested by developers
-and users. After a certain time, the driver will go to the alsa-kernel
-tree (to the corresponding directory, such as ``alsa-kernel/pci``) and
-eventually will be integrated into the Linux 2.6 tree (the directory
-would be ``linux/sound/pci``).
+alsa-driver tree, ``sound/pci`` directory in the case of PCI
+cards.
In the following sections, the driver code is supposed to be put into
-alsa-driver tree. The two cases are covered: a driver consisting of a
+Linux kernel tree. The two cases are covered: a driver consisting of a
single source file and one consisting of several source files.
Driver with A Single Source File
--------------------------------
-1. Modify alsa-driver/pci/Makefile
+1. Modify sound/pci/Makefile
Suppose you have a file xyz.c. Add the following two lines
@@ -4160,52 +4163,43 @@ Driver with A Single Source File
For the details of Kconfig script, refer to the kbuild documentation.
-3. Run cvscompile script to re-generate the configure script and build
- the whole stuff again.
-
Drivers with Several Source Files
---------------------------------
Suppose that the driver snd-xyz have several source files. They are
-located in the new subdirectory, pci/xyz.
+located in the new subdirectory, sound/pci/xyz.
-1. Add a new directory (``xyz``) in ``alsa-driver/pci/Makefile`` as
- below
+1. Add a new directory (``sound/pci/xyz``) in ``sound/pci/Makefile``
+ as below
::
- obj-$(CONFIG_SND) += xyz/
+ obj-$(CONFIG_SND) += sound/pci/xyz/
-2. Under the directory ``xyz``, create a Makefile
+2. Under the directory ``sound/pci/xyz``, create a Makefile
::
- ifndef SND_TOPDIR
- SND_TOPDIR=../..
- endif
-
- include $(SND_TOPDIR)/toplevel.config
- include $(SND_TOPDIR)/Makefile.conf
-
snd-xyz-objs := xyz.o abc.o def.o
-
obj-$(CONFIG_SND_XYZ) += snd-xyz.o
- include $(SND_TOPDIR)/Rules.make
-
3. Create the Kconfig entry
This procedure is as same as in the last section.
-4. Run cvscompile script to re-generate the configure script and build
- the whole stuff again.
Useful Functions
================
:c:func:`snd_printk()` and friends
----------------------------------------
+----------------------------------
+
+.. note:: This subsection describes a few helper functions for
+ decorating a bit more on the standard :c:func:`printk()` & co.
+ However, in general, the use of such helpers is no longer recommended.
+ If possible, try to stick with the standard functions like
+ :c:func:`dev_err()` or :c:func:`pr_err()`.
ALSA provides a verbose version of the :c:func:`printk()` function.
If a kernel config ``CONFIG_SND_VERBOSE_PRINTK`` is set, this function
@@ -4221,13 +4215,10 @@ just like :c:func:`snd_printk()`. If the ALSA is compiled without
the debugging flag, it's ignored.
:c:func:`snd_printdd()` is compiled in only when
-``CONFIG_SND_DEBUG_VERBOSE`` is set. Please note that
-``CONFIG_SND_DEBUG_VERBOSE`` is not set as default even if you configure
-the alsa-driver with ``--with-debug=full`` option. You need to give
-explicitly ``--with-debug=detect`` option instead.
+``CONFIG_SND_DEBUG_VERBOSE`` is set.
:c:func:`snd_BUG()`
-------------------------
+-------------------
It shows the ``BUG?`` message and stack trace as well as
:c:func:`snd_BUG_ON()` at the point. It's useful to show that a
@@ -4236,7 +4227,7 @@ fatal error happens there.
When no debug flag is set, this macro is ignored.
:c:func:`snd_BUG_ON()`
-----------------------------
+----------------------
:c:func:`snd_BUG_ON()` macro is similar with
:c:func:`WARN_ON()` macro. For example, snd_BUG_ON(!pointer); or
diff --git a/Documentation/sphinx-static/theme_overrides.css b/Documentation/sphinx-static/theme_overrides.css
index 522b6d4c49d4..e21e36cd6761 100644
--- a/Documentation/sphinx-static/theme_overrides.css
+++ b/Documentation/sphinx-static/theme_overrides.css
@@ -4,6 +4,44 @@
*
*/
+/* Improve contrast and increase size for easier reading. */
+
+body {
+ font-family: serif;
+ color: black;
+ font-size: 100%;
+}
+
+h1, h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend {
+ font-family: sans-serif;
+}
+
+.wy-menu-vertical li.current a {
+ color: #505050;
+}
+
+.wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
+ color: #303030;
+}
+
+div[class^="highlight"] pre {
+ font-family: monospace;
+ color: black;
+ font-size: 100%;
+}
+
+.wy-menu-vertical {
+ font-family: sans-serif;
+}
+
+.c {
+ font-style: normal;
+}
+
+p {
+ font-size: 100%;
+}
+
/* Interim: Code-blocks with line nos - lines and line numbers don't line up.
* see: https://github.com/rtfd/sphinx_rtd_theme/issues/419
*/
diff --git a/Documentation/spi/00-INDEX b/Documentation/spi/00-INDEX
deleted file mode 100644
index 8e4bb17d70eb..000000000000
--- a/Documentation/spi/00-INDEX
+++ /dev/null
@@ -1,16 +0,0 @@
-00-INDEX
- - this file.
-butterfly
- - AVR Butterfly SPI driver overview and pin configuration.
-ep93xx_spi
- - Basic EP93xx SPI driver configuration.
-pxa2xx
- - PXA2xx SPI master controller build by spi_message fifo wq
-spidev
- - Intro to the userspace API for spi devices
-spi-lm70llp
- - Connecting an LM70-LLP sensor to the kernel via the SPI subsys.
-spi-sc18is602
- - NXP SC18IS602/603 I2C-bus to SPI bridge
-spi-summary
- - (Linux) SPI overview. If unsure about SPI or SPI in Linux, start here.
diff --git a/Documentation/switchtec.txt b/Documentation/switchtec.txt
index f788264921ff..30d6a64e53f7 100644
--- a/Documentation/switchtec.txt
+++ b/Documentation/switchtec.txt
@@ -23,7 +23,7 @@ The primary means of communicating with the Switchtec management firmware is
through the Memory-mapped Remote Procedure Call (MRPC) interface.
Commands are submitted to the interface with a 4-byte command
identifier and up to 1KB of command specific data. The firmware will
-respond with a 4 bytes return code and up to 1KB of command specific
+respond with a 4-byte return code and up to 1KB of command-specific
data. The interface only processes a single command at a time.
@@ -36,8 +36,8 @@ device: /dev/switchtec#, one for each management endpoint in the system.
The char device has the following semantics:
* A write must consist of at least 4 bytes and no more than 1028 bytes.
- The first four bytes will be interpreted as the command to run and
- the remainder will be used as the input data. A write will send the
+ The first 4 bytes will be interpreted as the Command ID and the
+ remainder will be used as the input data. A write will send the
command to the firmware to begin processing.
* Each write must be followed by exactly one read. Any double write will
@@ -45,9 +45,9 @@ The char device has the following semantics:
produce an error.
* A read will block until the firmware completes the command and return
- the four bytes of status plus up to 1024 bytes of output data. (The
- length will be specified by the size parameter of the read call --
- reading less than 4 bytes will produce an error.
+ the 4-byte Command Return Value plus up to 1024 bytes of output
+ data. (The length will be specified by the size parameter of the read
+ call -- reading less than 4 bytes will produce an error.)
* The poll call will also be supported for userspace applications that
need to do other things while waiting for the command to complete.
@@ -83,10 +83,20 @@ The following IOCTLs are also supported by the device:
Non-Transparent Bridge (NTB) Driver
===================================
-An NTB driver is provided for the switchtec hardware in switchtec_ntb.
-Currently, it only supports switches configured with exactly 2
-partitions. It also requires the following configuration settings:
+An NTB hardware driver is provided for the Switchtec hardware in
+ntb_hw_switchtec. Currently, it only supports switches configured with
+exactly 2 NT partitions and zero or more non-NT partitions. It also requires
+the following configuration settings:
-* Both partitions must be able to access each other's GAS spaces.
+* Both NT partitions must be able to access each other's GAS spaces.
Thus, the bits in the GAS Access Vector under Management Settings
must be set to support this.
+* Kernel configuration MUST include support for NTB (CONFIG_NTB needs
+ to be set)
+
+NT EP BAR 2 will be dynamically configured as a Direct Window, and
+the configuration file does not need to configure it explicitly.
+
+Please refer to Documentation/ntb.txt in Linux source tree for an overall
+understanding of the Linux NTB stack. ntb_hw_switchtec works as an NTB
+Hardware Driver in this stack.
diff --git a/Documentation/sysctl/00-INDEX b/Documentation/sysctl/00-INDEX
deleted file mode 100644
index 8cf5d493fd03..000000000000
--- a/Documentation/sysctl/00-INDEX
+++ /dev/null
@@ -1,16 +0,0 @@
-00-INDEX
- - this file.
-README
- - general information about /proc/sys/ sysctl files.
-abi.txt
- - documentation for /proc/sys/abi/*.
-fs.txt
- - documentation for /proc/sys/fs/*.
-kernel.txt
- - documentation for /proc/sys/kernel/*.
-net.txt
- - documentation for /proc/sys/net/*.
-sunrpc.txt
- - documentation for /proc/sys/sunrpc/*.
-vm.txt
- - documentation for /proc/sys/vm/*.
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
deleted file mode 100644
index 3be05fe0f1f9..000000000000
--- a/Documentation/timers/00-INDEX
+++ /dev/null
@@ -1,16 +0,0 @@
-00-INDEX
- - this file
-highres.txt
- - High resolution timers and dynamic ticks design notes
-hpet.txt
- - High Precision Event Timer Driver for Linux
-hrtimers.txt
- - subsystem for high-resolution kernel timers
-NO_HZ.txt
- - Summary of the different methods for the scheduler clock-interrupts management.
-timekeeping.txt
- - Clock sources, clock events, sched_clock() and delay timer notes
-timers-howto.txt
- - how to insert delays in the kernel the right (tm) way.
-timer_stats.txt
- - timer usage statistics
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 7ea16a0ceffc..f82434f2795e 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -2987,6 +2987,9 @@ The following commands are supported:
command, it only prints out the contents of the ring buffer for the
CPU that executed the function that triggered the dump.
+- stacktrace:
+ When the function is hit, a stack trace is recorded.
+
trace_pipe
----------
diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst
index 5ac724baea7d..7dda76503127 100644
--- a/Documentation/trace/histogram.rst
+++ b/Documentation/trace/histogram.rst
@@ -1765,7 +1765,7 @@ For example, here's how a latency can be calculated::
# echo 'hist:keys=pid,prio:ts0=common_timestamp ...' >> event1/trigger
# echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...' >> event2/trigger
-In the first line above, the event's timetamp is saved into the
+In the first line above, the event's timestamp is saved into the
variable ts0. In the next line, ts0 is subtracted from the second
event's timestamp to produce the latency, which is then assigned into
yet another variable, 'wakeup_lat'. The hist trigger below in turn
@@ -1811,7 +1811,7 @@ the command that defined it with a '!'::
/sys/kernel/debug/tracing/synthetic_events
At this point, there isn't yet an actual 'wakeup_latency' event
-instantiated in the event subsytem - for this to happen, a 'hist
+instantiated in the event subsystem - for this to happen, a 'hist
trigger action' needs to be instantiated and bound to actual fields
and variables defined on other events (see Section 2.2.3 below on
how that is done using hist trigger 'onmatch' action). Once that is
@@ -1837,7 +1837,7 @@ output can be displayed by reading the event's 'hist' file.
A hist trigger 'action' is a function that's executed whenever a
histogram entry is added or updated.
-The default 'action' if no special function is explicity specified is
+The default 'action' if no special function is explicitly specified is
as it always has been, to simply update the set of values associated
with an entry. Some applications, however, may want to perform
additional actions at that point, such as generate another event, or
diff --git a/Documentation/virtual/00-INDEX b/Documentation/virtual/00-INDEX
deleted file mode 100644
index af0d23968ee7..000000000000
--- a/Documentation/virtual/00-INDEX
+++ /dev/null
@@ -1,11 +0,0 @@
-Virtualization support in the Linux kernel.
-
-00-INDEX
- - this file.
-
-paravirt_ops.txt
- - Describes the Linux kernel pv_ops to support different hypervisors
-kvm/
- - Kernel Virtual Machine. See also http://linux-kvm.org
-uml/
- - User Mode Linux, builds/runs Linux kernel as a userspace program.
diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
deleted file mode 100644
index 3492458a4ae8..000000000000
--- a/Documentation/virtual/kvm/00-INDEX
+++ /dev/null
@@ -1,35 +0,0 @@
-00-INDEX
- - this file.
-amd-memory-encryption.rst
- - notes on AMD Secure Encrypted Virtualization feature and SEV firmware
- command description
-api.txt
- - KVM userspace API.
-arm
- - internal ABI between the kernel and HYP (for arm/arm64)
-cpuid.txt
- - KVM-specific cpuid leaves (x86).
-devices/
- - KVM_CAP_DEVICE_CTRL userspace API.
-halt-polling.txt
- - notes on halt-polling
-hypercalls.txt
- - KVM hypercalls.
-locking.txt
- - notes on KVM locks.
-mmu.txt
- - the x86 kvm shadow mmu.
-msr.txt
- - KVM-specific MSRs (x86).
-nested-vmx.txt
- - notes on nested virtualization for Intel x86 processors.
-ppc-pv.txt
- - the paravirtualization interface on PowerPC.
-review-checklist.txt
- - review checklist for KVM patches.
-s390-diag.txt
- - Diagnose hypercall description (for IBM S/390)
-timekeeping.txt
- - timekeeping virtualization for x86-based architectures.
-vcpu-requests.rst
- - internal VCPU request API
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index c664064f76fb..cd209f7730af 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -123,6 +123,37 @@ memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
flag KVM_VM_MIPS_VZ.
+On arm64, the physical address size for a VM (IPA Size limit) is limited
+to 40bits by default. The limit can be configured if the host supports the
+extension KVM_CAP_ARM_VM_IPA_SIZE. When supported, use
+KVM_VM_TYPE_ARM_IPA_SIZE(IPA_Bits) to set the size in the machine type
+identifier, where IPA_Bits is the maximum width of any physical
+address used by the VM. The IPA_Bits is encoded in bits[7-0] of the
+machine type identifier.
+
+e.g, to configure a guest to use 48bit physical address size :
+
+ vm_fd = ioctl(dev_fd, KVM_CREATE_VM, KVM_VM_TYPE_ARM_IPA_SIZE(48));
+
+The requested size (IPA_Bits) must be :
+ 0 - Implies default size, 40bits (for backward compatibility)
+
+ or
+
+ N - Implies N bits, where N is a positive integer such that,
+ 32 <= N <= Host_IPA_Limit
+
+Host_IPA_Limit is the maximum possible value for IPA_Bits on the host and
+is dependent on the CPU capability and the kernel configuration. The limit can
+be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION
+ioctl() at run-time.
+
+Please note that configuring the IPA size does not affect the capability
+exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects
+size of the address translated by the stage2 level (guest physical to
+host physical address translations).
+
+
4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST
Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST
@@ -850,7 +881,7 @@ struct kvm_vcpu_events {
__u8 injected;
__u8 nr;
__u8 has_error_code;
- __u8 pad;
+ __u8 pending;
__u32 error_code;
} exception;
struct {
@@ -873,15 +904,23 @@ struct kvm_vcpu_events {
__u8 smm_inside_nmi;
__u8 latched_init;
} smi;
+ __u8 reserved[27];
+ __u8 exception_has_payload;
+ __u64 exception_payload;
};
-Only two fields are defined in the flags field:
+The following bits are defined in the flags field:
-- KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
+- KVM_VCPUEVENT_VALID_SHADOW may be set to signal that
interrupt.shadow contains a valid state.
-- KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that
- smi contains a valid state.
+- KVM_VCPUEVENT_VALID_SMM may be set to signal that smi contains a
+ valid state.
+
+- KVM_VCPUEVENT_VALID_PAYLOAD may be set to signal that the
+ exception_has_payload, exception_payload, and exception.pending
+ fields contain a valid state. This bit will be set whenever
+ KVM_CAP_EXCEPTION_PAYLOAD is enabled.
ARM/ARM64:
@@ -961,6 +1000,11 @@ shall be written into the VCPU.
KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available.
+If KVM_CAP_EXCEPTION_PAYLOAD is enabled, KVM_VCPUEVENT_VALID_PAYLOAD
+can be set in the flags field to signal that the
+exception_has_payload, exception_payload, and exception.pending fields
+contain a valid state and shall be written into the VCPU.
+
ARM/ARM64:
Set the pending SError exception state for this VCPU. It is not possible to
@@ -1922,6 +1966,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_TIDR | 64
PPC | KVM_REG_PPC_PSSCR | 64
PPC | KVM_REG_PPC_DEC_EXPIRY | 64
+ PPC | KVM_REG_PPC_PTCR | 64
PPC | KVM_REG_PPC_TM_GPR0 | 64
...
PPC | KVM_REG_PPC_TM_GPR31 | 64
@@ -2269,6 +2314,10 @@ The supported flags are:
The emulated MMU supports 1T segments in addition to the
standard 256M ones.
+ - KVM_PPC_NO_HASH
+ This flag indicates that HPT guests are not supported by KVM,
+ thus all guests must use radix MMU mode.
+
The "slb_size" field indicates how many SLB entries are supported
The "sps" array contains 8 entries indicating the supported base
@@ -3676,6 +3725,34 @@ Returns: 0 on success, -1 on error
This copies the vcpu's kvm_nested_state struct from userspace to the kernel. For
the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE.
+4.116 KVM_(UN)REGISTER_COALESCED_MMIO
+
+Capability: KVM_CAP_COALESCED_MMIO (for coalesced mmio)
+ KVM_CAP_COALESCED_PIO (for coalesced pio)
+Architectures: all
+Type: vm ioctl
+Parameters: struct kvm_coalesced_mmio_zone
+Returns: 0 on success, < 0 on error
+
+Coalesced I/O is a performance optimization that defers hardware
+register write emulation so that userspace exits are avoided. It is
+typically used to reduce the overhead of emulating frequently accessed
+hardware registers.
+
+When a hardware register is configured for coalesced I/O, write accesses
+do not exit to userspace and their value is recorded in a ring buffer
+that is shared between kernel and userspace.
+
+Coalesced I/O is used if one or more write accesses to a hardware
+register can be deferred until a read or a write to another hardware
+register on the same device. This last access will cause a vmexit and
+userspace will process accesses from the ring buffer before emulating
+it. That will avoid exiting to userspace on repeated writes.
+
+Coalesced pio is based on coalesced mmio. There is little difference
+between coalesced mmio and pio except that coalesced pio records accesses
+to I/O ports.
+
5. The kvm_run structure
------------------------
@@ -4510,7 +4587,8 @@ Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
Architectures: s390
Parameters: none
Returns: 0 on success, -EINVAL if hpage module parameter was not set
- or cmma is enabled
+ or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL
+ flag set
With this capability the KVM support for memory backing with 1m pages
through hugetlbfs can be enabled for a VM. After the capability is
@@ -4521,6 +4599,54 @@ hpage module parameter is not set to 1, -EINVAL is returned.
While it is generally possible to create a huge page backed VM without
this capability, the VM will not be able to run.
+7.15 KVM_CAP_MSR_PLATFORM_INFO
+
+Architectures: x86
+Parameters: args[0] whether feature should be enabled or not
+
+With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise,
+a #GP would be raised when the guest tries to access. Currently, this
+capability does not enable write permissions of this MSR for the guest.
+
+7.16 KVM_CAP_PPC_NESTED_HV
+
+Architectures: ppc
+Parameters: none
+Returns: 0 on success, -EINVAL when the implementation doesn't support
+ nested-HV virtualization.
+
+HV-KVM on POWER9 and later systems allows for "nested-HV"
+virtualization, which provides a way for a guest VM to run guests that
+can run using the CPU's supervisor mode (privileged non-hypervisor
+state). Enabling this capability on a VM depends on the CPU having
+the necessary functionality and on the facility being enabled with a
+kvm-hv module parameter.
+
+7.17 KVM_CAP_EXCEPTION_PAYLOAD
+
+Architectures: x86
+Parameters: args[0] whether feature should be enabled or not
+
+With this capability enabled, CR2 will not be modified prior to the
+emulated VM-exit when L1 intercepts a #PF exception that occurs in
+L2. Similarly, for kvm-intel only, DR6 will not be modified prior to
+the emulated VM-exit when L1 intercepts a #DB exception that occurs in
+L2. As a result, when KVM_GET_VCPU_EVENTS reports a pending #PF (or
+#DB) exception for L2, exception.has_payload will be set and the
+faulting address (or the new DR6 bits*) will be reported in the
+exception_payload field. Similarly, when userspace injects a #PF (or
+#DB) into L2 using KVM_SET_VCPU_EVENTS, it is expected to set
+exception.has_payload and to put the faulting address (or the new DR6
+bits*) in the exception_payload field.
+
+This capability also enables exception.pending in struct
+kvm_vcpu_events, which allows userspace to distinguish between pending
+and injected exceptions.
+
+
+* For the new DR6 bits, note that bit 16 is set iff the #DB exception
+ will clear DR6.RTM.
+
8. Other capabilities.
----------------------
@@ -4762,3 +4888,10 @@ CPU when the exception is taken. If this virtual SError is taken to EL1 using
AArch64, this value will be reported in the ISS field of ESR_ELx.
See KVM_CAP_VCPU_EVENTS for more details.
+8.20 KVM_CAP_HYPERV_SEND_IPI
+
+Architectures: x86
+
+This capability indicates that KVM supports paravirtualized Hyper-V IPI send
+hypercalls:
+HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx.
diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
deleted file mode 100644
index f4a4f3e884cf..000000000000
--- a/Documentation/vm/00-INDEX
+++ /dev/null
@@ -1,50 +0,0 @@
-00-INDEX
- - this file.
-active_mm.rst
- - An explanation from Linus about tsk->active_mm vs tsk->mm.
-balance.rst
- - various information on memory balancing.
-cleancache.rst
- - Intro to cleancache and page-granularity victim cache.
-frontswap.rst
- - Outline frontswap, part of the transcendent memory frontend.
-highmem.rst
- - Outline of highmem and common issues.
-hmm.rst
- - Documentation of heterogeneous memory management
-hugetlbfs_reserv.rst
- - A brief overview of hugetlbfs reservation design/implementation.
-hwpoison.rst
- - explains what hwpoison is
-ksm.rst
- - how to use the Kernel Samepage Merging feature.
-mmu_notifier.rst
- - a note about clearing pte/pmd and mmu notifications
-numa.rst
- - information about NUMA specific code in the Linux vm.
-overcommit-accounting.rst
- - description of the Linux kernels overcommit handling modes.
-page_frags.rst
- - description of page fragments allocator
-page_migration.rst
- - description of page migration in NUMA systems.
-page_owner.rst
- - tracking about who allocated each page
-remap_file_pages.rst
- - a note about remap_file_pages() system call
-slub.rst
- - a short users guide for SLUB.
-split_page_table_lock.rst
- - Separate per-table lock to improve scalability of the old page_table_lock.
-swap_numa.rst
- - automatic binding of swap device to numa node
-transhuge.rst
- - Transparent Hugepage Support, alternative way of using hugepages.
-unevictable-lru.rst
- - Unevictable LRU infrastructure
-z3fold.txt
- - outline of z3fold allocator for storing compressed pages
-zsmalloc.rst
- - outline of zsmalloc allocator for storing compressed pages
-zswap.rst
- - Intro to compressed cache for swap pages
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index cdf3911582c8..44205f0b671f 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -194,13 +194,13 @@ use either::
unsigned long start,
unsigned long end,
hmm_pfn_t *pfns);
- int hmm_vma_fault(struct vm_area_struct *vma,
- struct hmm_range *range,
- unsigned long start,
- unsigned long end,
- hmm_pfn_t *pfns,
- bool write,
- bool block);
+ int hmm_vma_fault(struct vm_area_struct *vma,
+ struct hmm_range *range,
+ unsigned long start,
+ unsigned long end,
+ hmm_pfn_t *pfns,
+ bool write,
+ bool block);
The first one (hmm_vma_get_pfns()) will only fetch present CPU page table
entries and will not trigger a page fault on missing or non-present entries.
diff --git a/Documentation/w1/00-INDEX b/Documentation/w1/00-INDEX
deleted file mode 100644
index cb49802745dc..000000000000
--- a/Documentation/w1/00-INDEX
+++ /dev/null
@@ -1,10 +0,0 @@
-00-INDEX
- - This file
-slaves/
- - Drivers that provide support for specific family codes.
-masters/
- - Individual chips providing 1-wire busses.
-w1.generic
- - The 1-wire (w1) bus
-w1.netlink
- - Userspace communication protocol over connector [1].
diff --git a/Documentation/w1/masters/00-INDEX b/Documentation/w1/masters/00-INDEX
deleted file mode 100644
index 8330cf9325f0..000000000000
--- a/Documentation/w1/masters/00-INDEX
+++ /dev/null
@@ -1,12 +0,0 @@
-00-INDEX
- - This file
-ds2482
- - The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses.
-ds2490
- - The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges.
-mxc-w1
- - W1 master controller driver found on Freescale MX2/MX3 SoCs
-omap-hdq
- - HDQ/1-wire module of TI OMAP 2430/3430.
-w1-gpio
- - GPIO 1-wire bus master driver.
diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX
deleted file mode 100644
index 68946f83e579..000000000000
--- a/Documentation/w1/slaves/00-INDEX
+++ /dev/null
@@ -1,14 +0,0 @@
-00-INDEX
- - This file
-w1_therm
- - The Maxim/Dallas Semiconductor ds18*20 temperature sensor.
-w1_ds2413
- - The Maxim/Dallas Semiconductor ds2413 dual channel addressable switch.
-w1_ds2423
- - The Maxim/Dallas Semiconductor ds2423 counter device.
-w1_ds2438
- - The Maxim/Dallas Semiconductor ds2438 smart battery monitor.
-w1_ds28e04
- - The Maxim/Dallas Semiconductor ds28e04 eeprom.
-w1_ds28e17
- - The Maxim/Dallas Semiconductor ds28e17 1-Wire-to-I2C Master Bridge.
diff --git a/Documentation/x86/00-INDEX b/Documentation/x86/00-INDEX
deleted file mode 100644
index 3bb2ee3edcd1..000000000000
--- a/Documentation/x86/00-INDEX
+++ /dev/null
@@ -1,20 +0,0 @@
-00-INDEX
- - this file
-boot.txt
- - List of boot protocol versions
-earlyprintk.txt
- - Using earlyprintk with a USB2 debug port key.
-entry_64.txt
- - Describe (some of the) kernel entry points for x86.
-exception-tables.txt
- - why and how Linux kernel uses exception tables on x86
-microcode.txt
- - How to load microcode from an initrd-CPIO archive early to fix CPU issues.
-mtrr.txt
- - how to use x86 Memory Type Range Registers to increase performance
-pat.txt
- - Page Attribute Table intro and API
-usb-legacy-support.txt
- - how to fix/avoid quirks when using emulated PS/2 mouse/keyboard.
-zero-page.txt
- - layout of the first page of memory.
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index 5e9b826b5f62..7727db8f94bc 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -61,6 +61,18 @@ Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
to struct boot_params for loading bzImage and ramdisk
above 4G in 64bit.
+Protocol 2.13: (Kernel 3.14) Support 32- and 64-bit flags being set in
+ xloadflags to support booting a 64-bit kernel from 32-bit
+ EFI
+
+Protocol 2.14: (Kernel 4.20) Added acpi_rsdp_addr holding the physical
+ address of the ACPI RSDP table.
+ The bootloader updates version with:
+ 0x8000 | min(kernel-version, bootloader-version)
+ kernel-version being the protocol version supported by
+ the kernel and bootloader-version the protocol version
+ supported by the bootloader.
+
**** MEMORY LAYOUT
The traditional memory map for the kernel loader, used for Image or
@@ -197,6 +209,7 @@ Offset Proto Name Meaning
0258/8 2.10+ pref_address Preferred loading address
0260/4 2.10+ init_size Linear memory required during initialization
0264/4 2.11+ handover_offset Offset of handover entry point
+0268/8 2.14+ acpi_rsdp_addr Physical address of RSDP table
(1) For backwards compatibility, if the setup_sects field contains 0, the
real value is 4.
@@ -309,7 +322,7 @@ Protocol: 2.00+
Contains the magic number "HdrS" (0x53726448).
Field name: version
-Type: read
+Type: modify
Offset/size: 0x206/2
Protocol: 2.00+
@@ -317,6 +330,12 @@ Protocol: 2.00+
e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
10.17.
+ Up to protocol version 2.13 this information is only read by the
+ bootloader. From protocol version 2.14 onwards the bootloader will
+ write the used protocol version or-ed with 0x8000 to the field. The
+ used protocol version will be the minimum of the supported protocol
+ versions of the bootloader and the kernel.
+
Field name: realmode_swtch
Type: modify (optional)
Offset/size: 0x208/4
@@ -744,6 +763,17 @@ Offset/size: 0x264/4
See EFI HANDOVER PROTOCOL below for more details.
+Field name: acpi_rsdp_addr
+Type: write
+Offset/size: 0x268/8
+Protocol: 2.14+
+
+ This field can be set by the boot loader to tell the kernel the
+ physical address of the ACPI RSDP table.
+
+ A value of 0 indicates the kernel should fall back to the standard
+ methods to locate the RSDP.
+
**** THE IMAGE CHECKSUM
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
index f662d3c530e5..52b10945ff75 100644
--- a/Documentation/x86/intel_rdt_ui.txt
+++ b/Documentation/x86/intel_rdt_ui.txt
@@ -520,18 +520,24 @@ the pseudo-locked region:
2) Cache hit and miss measurements using model specific precision counters if
available. Depending on the levels of cache on the system the pseudo_lock_l2
and pseudo_lock_l3 tracepoints are available.
- WARNING: triggering this measurement uses from two (for just L2
- measurements) to four (for L2 and L3 measurements) precision counters on
- the system, if any other measurements are in progress the counters and
- their corresponding event registers will be clobbered.
When a pseudo-locked region is created a new debugfs directory is created for
it in debugfs as /sys/kernel/debug/resctrl/<newdir>. A single
write-only file, pseudo_lock_measure, is present in this directory. The
-measurement on the pseudo-locked region depends on the number, 1 or 2,
-written to this debugfs file. Since the measurements are recorded with the
-tracing infrastructure the relevant tracepoints need to be enabled before the
-measurement is triggered.
+measurement of the pseudo-locked region depends on the number written to this
+debugfs file:
+1 - writing "1" to the pseudo_lock_measure file will trigger the latency
+ measurement captured in the pseudo_lock_mem_latency tracepoint. See
+ example below.
+2 - writing "2" to the pseudo_lock_measure file will trigger the L2 cache
+ residency (cache hits and misses) measurement captured in the
+ pseudo_lock_l2 tracepoint. See example below.
+3 - writing "3" to the pseudo_lock_measure file will trigger the L3 cache
+ residency (cache hits and misses) measurement captured in the
+ pseudo_lock_l3 tracepoint.
+
+All measurements are recorded with the tracing infrastructure. This requires
+the relevant tracepoints to be enabled before the measurement is triggered.
Example of latency debugging interface:
In this example a pseudo-locked region named "newlock" was created. Here is
diff --git a/Documentation/x86/x86_64/00-INDEX b/Documentation/x86/x86_64/00-INDEX
deleted file mode 100644
index 92fc20ab5f0e..000000000000
--- a/Documentation/x86/x86_64/00-INDEX
+++ /dev/null
@@ -1,16 +0,0 @@
-00-INDEX
- - This file
-boot-options.txt
- - AMD64-specific boot options.
-cpu-hotplug-spec
- - Firmware support for CPU hotplug under Linux/x86-64
-fake-numa-for-cpusets
- - Using numa=fake and CPUSets for Resource Management
-kernel-stacks
- - Context-specific per-processor interrupt stacks.
-machinecheck
- - Configurable sysfs parameters for the x86-64 machine check code.
-mm.txt
- - Memory layout of x86-64 (4 level page tables, 46 bits physical).
-uefi.txt
- - Booting Linux via Unified Extensible Firmware Interface.
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 5432a96d31ff..702898633b00 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -1,55 +1,124 @@
+====================================================
+Complete virtual memory map with 4-level page tables
+====================================================
-Virtual memory map with 4 level page tables:
-
-0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
-hole caused by [47:63] sign extension
-ffff800000000000 - ffff87ffffffffff (=43 bits) guard hole, reserved for hypervisor
-ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory
-ffffc80000000000 - ffffc8ffffffffff (=40 bits) hole
-ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
-ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
-ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
-... unused hole ...
-ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
-... unused hole ...
- vaddr_end for KASLR
-fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
-fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI
-ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
-... unused hole ...
-ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
-... unused hole ...
-ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
-ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space
-[fixmap start] - ffffffffff5fffff kernel-internal fixmap range
-ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
-ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
-
-Virtual memory map with 5 level page tables:
-
-0000000000000000 - 00ffffffffffffff (=56 bits) user space, different per mm
-hole caused by [56:63] sign extension
-ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor
-ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory
-ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI
-ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB)
-ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
-ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
-... unused hole ...
-ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
-... unused hole ...
- vaddr_end for KASLR
-fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
-... unused hole ...
-ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
-... unused hole ...
-ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
-... unused hole ...
-ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
-ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space
-[fixmap start] - ffffffffff5fffff kernel-internal fixmap range
-ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
-ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
+Notes:
+
+ - Negative addresses such as "-23 TB" are absolute addresses in bytes, counted down
+ from the top of the 64-bit address space. It's easier to understand the layout
+ when seen both in absolute addresses and in distance-from-top notation.
+
+ For example 0xffffe90000000000 == -23 TB, it's 23 TB lower than the top of the
+ 64-bit address space (ffffffffffffffff).
+
+ Note that as we get closer to the top of the address space, the notation changes
+ from TB to GB and then MB/KB.
+
+ - "16M TB" might look weird at first sight, but it's an easier to visualize size
+ notation than "16 EB", which few will recognize at first sight as 16 exabytes.
+ It also shows it nicely how incredibly large 64-bit address space is.
+
+========================================================================================================================
+ Start addr | Offset | End addr | Size | VM area description
+========================================================================================================================
+ | | | |
+ 0000000000000000 | 0 | 00007fffffffffff | 128 TB | user-space virtual memory, different per mm
+__________________|____________|__________________|_________|___________________________________________________________
+ | | | |
+ 0000800000000000 | +128 TB | ffff7fffffffffff | ~16M TB | ... huge, almost 64 bits wide hole of non-canonical
+ | | | | virtual memory addresses up to the -128 TB
+ | | | | starting offset of kernel mappings.
+__________________|____________|__________________|_________|___________________________________________________________
+ |
+ | Kernel-space virtual memory, shared between all processes:
+____________________________________________________________|___________________________________________________________
+ | | | |
+ ffff800000000000 | -128 TB | ffff87ffffffffff | 8 TB | ... guard hole, also reserved for hypervisor
+ ffff880000000000 | -120 TB | ffffc7ffffffffff | 64 TB | direct mapping of all physical memory (page_offset_base)
+ ffffc80000000000 | -56 TB | ffffc8ffffffffff | 1 TB | ... unused hole
+ ffffc90000000000 | -55 TB | ffffe8ffffffffff | 32 TB | vmalloc/ioremap space (vmalloc_base)
+ ffffe90000000000 | -23 TB | ffffe9ffffffffff | 1 TB | ... unused hole
+ ffffea0000000000 | -22 TB | ffffeaffffffffff | 1 TB | virtual memory map (vmemmap_base)
+ ffffeb0000000000 | -21 TB | ffffebffffffffff | 1 TB | ... unused hole
+ ffffec0000000000 | -20 TB | fffffbffffffffff | 16 TB | KASAN shadow memory
+ fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
+ | | | | vaddr_end for KASLR
+ fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | LDT remap for PTI
+ ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
+__________________|____________|__________________|_________|____________________________________________________________
+ |
+ | Identical layout to the 47-bit one from here on:
+____________________________________________________________|____________________________________________________________
+ | | | |
+ ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
+ ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
+ ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
+ ffffffff80000000 | -2 GB | ffffffff9fffffff | 512 MB | kernel text mapping, mapped to physical address 0
+ ffffffff80000000 |-2048 MB | | |
+ ffffffffa0000000 |-1536 MB | fffffffffeffffff | 1520 MB | module mapping space
+ ffffffffff000000 | -16 MB | | |
+ FIXADDR_START | ~-11 MB | ffffffffff5fffff | ~0.5 MB | kernel-internal fixmap range, variable size and offset
+ ffffffffff600000 | -10 MB | ffffffffff600fff | 4 kB | legacy vsyscall ABI
+ ffffffffffe00000 | -2 MB | ffffffffffffffff | 2 MB | ... unused hole
+__________________|____________|__________________|_________|___________________________________________________________
+
+
+====================================================
+Complete virtual memory map with 5-level page tables
+====================================================
+
+Notes:
+
+ - With 56-bit addresses, user-space memory gets expanded by a factor of 512x,
+ from 0.125 PB to 64 PB. All kernel mappings shift down to the -64 PT starting
+ offset and many of the regions expand to support the much larger physical
+ memory supported.
+
+========================================================================================================================
+ Start addr | Offset | End addr | Size | VM area description
+========================================================================================================================
+ | | | |
+ 0000000000000000 | 0 | 00ffffffffffffff | 64 PB | user-space virtual memory, different per mm
+__________________|____________|__________________|_________|___________________________________________________________
+ | | | |
+ 0000800000000000 | +64 PB | ffff7fffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical
+ | | | | virtual memory addresses up to the -128 TB
+ | | | | starting offset of kernel mappings.
+__________________|____________|__________________|_________|___________________________________________________________
+ |
+ | Kernel-space virtual memory, shared between all processes:
+____________________________________________________________|___________________________________________________________
+ | | | |
+ ff00000000000000 | -64 PB | ff0fffffffffffff | 4 PB | ... guard hole, also reserved for hypervisor
+ ff10000000000000 | -60 PB | ff8fffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base)
+ ff90000000000000 | -28 PB | ff9fffffffffffff | 4 PB | LDT remap for PTI
+ ffa0000000000000 | -24 PB | ffd1ffffffffffff | 12.5 PB | vmalloc/ioremap space (vmalloc_base)
+ ffd2000000000000 | -11.5 PB | ffd3ffffffffffff | 0.5 PB | ... unused hole
+ ffd4000000000000 | -11 PB | ffd5ffffffffffff | 0.5 PB | virtual memory map (vmemmap_base)
+ ffd6000000000000 | -10.5 PB | ffdeffffffffffff | 2.25 PB | ... unused hole
+ ffdf000000000000 | -8.25 PB | fffffdffffffffff | ~8 PB | KASAN shadow memory
+ fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
+ | | | | vaddr_end for KASLR
+ fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
+ ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
+__________________|____________|__________________|_________|____________________________________________________________
+ |
+ | Identical layout to the 47-bit one from here on:
+____________________________________________________________|____________________________________________________________
+ | | | |
+ ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
+ ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
+ ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
+ ffffffff80000000 | -2 GB | ffffffff9fffffff | 512 MB | kernel text mapping, mapped to physical address 0
+ ffffffff80000000 |-2048 MB | | |
+ ffffffffa0000000 |-1536 MB | fffffffffeffffff | 1520 MB | module mapping space
+ ffffffffff000000 | -16 MB | | |
+ FIXADDR_START | ~-11 MB | ffffffffff5fffff | ~0.5 MB | kernel-internal fixmap range, variable size and offset
+ ffffffffff600000 | -10 MB | ffffffffff600fff | 4 kB | legacy vsyscall ABI
+ ffffffffffe00000 | -2 MB | ffffffffffffffff | 2 MB | ... unused hole
+__________________|____________|__________________|_________|___________________________________________________________
Architecture defines a 64-bit virtual address. Implementations can support
less. Currently supported are 48- and 57-bit virtual addresses. Bits 63
diff --git a/LICENSES/other/CC-BY-SA-4.0 b/LICENSES/other/CC-BY-SA-4.0
deleted file mode 100644
index f9158e831e79..000000000000
--- a/LICENSES/other/CC-BY-SA-4.0
+++ /dev/null
@@ -1,397 +0,0 @@
-Valid-License-Identifier: CC-BY-SA-4.0
-SPDX-URL: https://spdx.org/licenses/CC-BY-SA-4.0
-Usage-Guide:
- To use the Creative Commons Attribution Share Alike 4.0 International
- license put the following SPDX tag/value pair into a comment according to
- the placement guidelines in the licensing rules documentation:
- SPDX-License-Identifier: CC-BY-SA-4.0
-License-Text:
-
-Creative Commons Attribution-ShareAlike 4.0 International
-
-Creative Commons Corporation ("Creative Commons") is not a law firm and
-does not provide legal services or legal advice. Distribution of Creative
-Commons public licenses does not create a lawyer-client or other
-relationship. Creative Commons makes its licenses and related information
-available on an "as-is" basis. Creative Commons gives no warranties
-regarding its licenses, any material licensed under their terms and
-conditions, or any related information. Creative Commons disclaims all
-liability for damages resulting from their use to the fullest extent
-possible.
-
-Using Creative Commons Public Licenses
-
-Creative Commons public licenses provide a standard set of terms and
-conditions that creators and other rights holders may use to share original
-works of authorship and other material subject to copyright and certain
-other rights specified in the public license below. The following
-considerations are for informational purposes only, are not exhaustive, and
-do not form part of our licenses.
-
-Considerations for licensors: Our public licenses are intended for use by
-those authorized to give the public permission to use material in ways
-otherwise restricted by copyright and certain other rights. Our licenses
-are irrevocable. Licensors should read and understand the terms and
-conditions of the license they choose before applying it. Licensors should
-also secure all rights necessary before applying our licenses so that the
-public can reuse the material as expected. Licensors should clearly mark
-any material not subject to the license. This includes other CC-licensed
-material, or material used under an exception or limitation to
-copyright. More considerations for licensors :
-wiki.creativecommons.org/Considerations_for_licensors
-
-Considerations for the public: By using one of our public licenses, a
-licensor grants the public permission to use the licensed material under
-specified terms and conditions. If the licensor's permission is not
-necessary for any reason - for example, because of any applicable exception
-or limitation to copyright - then that use is not regulated by the
-license. Our licenses grant only permissions under copyright and certain
-other rights that a licensor has authority to grant. Use of the licensed
-material may still be restricted for other reasons, including because
-others have copyright or other rights in the material. A licensor may make
-special requests, such as asking that all changes be marked or described.
-
-Although not required by our licenses, you are encouraged to respect those
-requests where reasonable. More considerations for the public :
-wiki.creativecommons.org/Considerations_for_licensees
-
-Creative Commons Attribution-ShareAlike 4.0 International Public License
-
-By exercising the Licensed Rights (defined below), You accept and agree to
-be bound by the terms and conditions of this Creative Commons
-Attribution-ShareAlike 4.0 International Public License ("Public
-License"). To the extent this Public License may be interpreted as a
-contract, You are granted the Licensed Rights in consideration of Your
-acceptance of these terms and conditions, and the Licensor grants You such
-rights in consideration of benefits the Licensor receives from making the
-Licensed Material available under these terms and conditions.
-
-Section 1 - Definitions.
-
- a. Adapted Material means material subject to Copyright and Similar
- Rights that is derived from or based upon the Licensed Material and
- in which the Licensed Material is translated, altered, arranged,
- transformed, or otherwise modified in a manner requiring permission
- under the Copyright and Similar Rights held by the Licensor. For
- purposes of this Public License, where the Licensed Material is a
- musical work, performance, or sound recording, Adapted Material is
- always produced where the Licensed Material is synched in timed
- relation with a moving image.
-
- b. Adapter's License means the license You apply to Your Copyright and
- Similar Rights in Your contributions to Adapted Material in
- accordance with the terms and conditions of this Public License.
-
- c. BY-SA Compatible License means a license listed at
- creativecommons.org/compatiblelicenses, approved by Creative Commons
- as essentially the equivalent of this Public License.
-
- d. Copyright and Similar Rights means copyright and/or similar rights
- closely related to copyright including, without limitation,
- performance, broadcast, sound recording, and Sui Generis Database
- Rights, without regard to how the rights are labeled or
- categorized. For purposes of this Public License, the rights
- specified in Section 2(b)(1)-(2) are not Copyright and Similar
- Rights.
-
- e. Effective Technological Measures means those measures that, in the
- absence of proper authority, may not be circumvented under laws
- fulfilling obligations under Article 11 of the WIPO Copyright Treaty
- adopted on December 20, 1996, and/or similar international
- agreements.
-
- f. Exceptions and Limitations means fair use, fair dealing, and/or any
- other exception or limitation to Copyright and Similar Rights that
- applies to Your use of the Licensed Material.
-
- g. License Elements means the license attributes listed in the name of
- a Creative Commons Public License. The License Elements of this
- Public License are Attribution and ShareAlike.
-
- h. Licensed Material means the artistic or literary work, database, or
- other material to which the Licensor applied this Public License.
-
- i. Licensed Rights means the rights granted to You subject to the terms
- and conditions of this Public License, which are limited to all
- Copyright and Similar Rights that apply to Your use of the Licensed
- Material and that the Licensor has authority to license.
-
- j. Licensor means the individual(s) or entity(ies) granting rights
- under this Public License.
-
- k. Share means to provide material to the public by any means or
- process that requires permission under the Licensed Rights, such as
- reproduction, public display, public performance, distribution,
- dissemination, communication, or importation, and to make material
- available to the public including in ways that members of the public
- may access the material from a place and at a time individually
- chosen by them.
-
- l. Sui Generis Database Rights means rights other than copyright
- resulting from Directive 96/9/EC of the European Parliament and of
- the Council of 11 March 1996 on the legal protection of databases,
- as amended and/or succeeded, as well as other essentially equivalent
- rights anywhere in the world. m. You means the individual or entity
- exercising the Licensed Rights under this Public License. Your has a
- corresponding meaning.
-
-Section 2 - Scope.
-
- a. License grant.
-
- 1. Subject to the terms and conditions of this Public License, the
- Licensor hereby grants You a worldwide, royalty-free,
- non-sublicensable, non-exclusive, irrevocable license to
- exercise the Licensed Rights in the Licensed Material to:
-
- A. reproduce and Share the Licensed Material, in whole or in part; and
-
- B. produce, reproduce, and Share Adapted Material.
-
- 2. Exceptions and Limitations. For the avoidance of doubt, where
- Exceptions and Limitations apply to Your use, this Public
- License does not apply, and You do not need to comply with its
- terms and conditions.
-
- 3. Term. The term of this Public License is specified in Section 6(a).
-
- 4. Media and formats; technical modifications allowed. The Licensor
- authorizes You to exercise the Licensed Rights in all media and
- formats whether now known or hereafter created, and to make
- technical modifications necessary to do so. The Licensor waives
- and/or agrees not to assert any right or authority to forbid You
- from making technical modifications necessary to exercise the
- Licensed Rights, including technical modifications necessary to
- circumvent Effective Technological Measures. For purposes of
- this Public License, simply making modifications authorized by
- this Section 2(a)(4) never produces Adapted Material.
-
- 5. Downstream recipients.
-
- A. Offer from the Licensor - Licensed Material. Every recipient
- of the Licensed Material automatically receives an offer
- from the Licensor to exercise the Licensed Rights under the
- terms and conditions of this Public License.
-
- B. Additional offer from the Licensor - Adapted Material. Every
- recipient of Adapted Material from You automatically
- receives an offer from the Licensor to exercise the Licensed
- Rights in the Adapted Material under the conditions of the
- Adapter's License You apply.
-
- C. No downstream restrictions. You may not offer or impose any
- additional or different terms or conditions on, or apply any
- Effective Technological Measures to, the Licensed Material
- if doing so restricts exercise of the Licensed Rights by any
- recipient of the Licensed Material.
-
- 6. No endorsement. Nothing in this Public License constitutes or
- may be construed as permission to assert or imply that You are,
- or that Your use of the Licensed Material is, connected with, or
- sponsored, endorsed, or granted official status by, the Licensor
- or others designated to receive attribution as provided in
- Section 3(a)(1)(A)(i).
-
- b. Other rights.
-
- 1. Moral rights, such as the right of integrity, are not licensed
- under this Public License, nor are publicity, privacy, and/or
- other similar personality rights; however, to the extent
- possible, the Licensor waives and/or agrees not to assert any
- such rights held by the Licensor to the limited extent necessary
- to allow You to exercise the Licensed Rights, but not otherwise.
-
- 2. Patent and trademark rights are not licensed under this Public
- License.
-
- 3. To the extent possible, the Licensor waives any right to collect
- royalties from You for the exercise of the Licensed Rights,
- whether directly or through a collecting society under any
- voluntary or waivable statutory or compulsory licensing
- scheme. In all other cases the Licensor expressly reserves any
- right to collect such royalties.
-
-Section 3 - License Conditions.
-
-Your exercise of the Licensed Rights is expressly made subject to the
-following conditions.
-
- a. Attribution.
-
- 1. If You Share the Licensed Material (including in modified form),
- You must:
-
- A. retain the following if it is supplied by the Licensor with
- the Licensed Material:
-
- i. identification of the creator(s) of the Licensed
- Material and any others designated to receive
- attribution, in any reasonable manner requested by the
- Licensor (including by pseudonym if designated);
-
- ii. a copyright notice;
-
- iii. a notice that refers to this Public License;
-
- iv. a notice that refers to the disclaimer of warranties;
-
- v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable;
-
- B. indicate if You modified the Licensed Material and retain an
- indication of any previous modifications; and
-
- C. indicate the Licensed Material is licensed under this Public
- License, and include the text of, or the URI or hyperlink to,
- this Public License.
-
- 2. You may satisfy the conditions in Section 3(a)(1) in any
- reasonable manner based on the medium, means, and context in
- which You Share the Licensed Material. For example, it may be
- reasonable to satisfy the conditions by providing a URI or
- hyperlink to a resource that includes the required information.
-
- 3. If requested by the Licensor, You must remove any of the
- information required by Section 3(a)(1)(A) to the extent
- reasonably practicable. b. ShareAlike.In addition to the
- conditions in Section 3(a), if You Share Adapted Material You
- produce, the following conditions also apply.
-
- 1. The Adapter's License You apply must be a Creative Commons
- license with the same License Elements, this version or
- later, or a BY-SA Compatible License.
-
- 2. You must include the text of, or the URI or hyperlink to, the
- Adapter's License You apply. You may satisfy this condition
- in any reasonable manner based on the medium, means, and
- context in which You Share Adapted Material.
-
- 3. You may not offer or impose any additional or different terms
- or conditions on, or apply any Effective Technological
- Measures to, Adapted Material that restrict exercise of the
- rights granted under the Adapter's License You apply.
-
-Section 4 - Sui Generis Database Rights.
-
-Where the Licensed Rights include Sui Generis Database Rights that apply to
-Your use of the Licensed Material:
-
- a. for the avoidance of doubt, Section 2(a)(1) grants You the right to
- extract, reuse, reproduce, and Share all or a substantial portion of
- the contents of the database;
-
- b. if You include all or a substantial portion of the database contents
- in a database in which You have Sui Generis Database Rights, then
- the database in which You have Sui Generis Database Rights (but not
- its individual contents) is Adapted Material, including for purposes
- of Section 3(b); and
-
- c. You must comply with the conditions in Section 3(a) if You Share all
- or a substantial portion of the contents of the database.
-
- For the avoidance of doubt, this Section 4 supplements and does not
- replace Your obligations under this Public License where the Licensed
- Rights include other Copyright and Similar Rights.
-
-Section 5 - Disclaimer of Warranties and Limitation of Liability.
-
- a. Unless otherwise separately undertaken by the Licensor, to the
- extent possible, the Licensor offers the Licensed Material as-is and
- as-available, and makes no representations or warranties of any kind
- concerning the Licensed Material, whether express, implied,
- statutory, or other. This includes, without limitation, warranties
- of title, merchantability, fitness for a particular purpose,
- non-infringement, absence of latent or other defects, accuracy, or
- the presence or absence of errors, whether or not known or
- discoverable. Where disclaimers of warranties are not allowed in
- full or in part, this disclaimer may not apply to You.
-
- b. To the extent possible, in no event will the Licensor be liable to
- You on any legal theory (including, without limitation, negligence)
- or otherwise for any direct, special, indirect, incidental,
- consequential, punitive, exemplary, or other losses, costs,
- expenses, or damages arising out of this Public License or use of
- the Licensed Material, even if the Licensor has been advised of the
- possibility of such losses, costs, expenses, or damages. Where a
- limitation of liability is not allowed in full or in part, this
- limitation may not apply to You.
-
- c. The disclaimer of warranties and limitation of liability provided
- above shall be interpreted in a manner that, to the extent possible,
- most closely approximates an absolute disclaimer and waiver of all
- liability.
-
-Section 6 - Term and Termination.
-
- a. This Public License applies for the term of the Copyright and
- Similar Rights licensed here. However, if You fail to comply with
- this Public License, then Your rights under this Public License
- terminate automatically.
-
- b. Where Your right to use the Licensed Material has terminated under
- Section 6(a), it reinstates:
-
- 1. automatically as of the date the violation is cured, provided it
- is cured within 30 days of Your discovery of the violation; or
-
- 2. upon express reinstatement by the Licensor.
-
- c. For the avoidance of doubt, this Section 6(b) does not affect any
- right the Licensor may have to seek remedies for Your violations of
- this Public License.
-
- d. For the avoidance of doubt, the Licensor may also offer the Licensed
- Material under separate terms or conditions or stop distributing the
- Licensed Material at any time; however, doing so will not terminate
- this Public License.
-
- e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
-
-Section 7 - Other Terms and Conditions.
-
- a. The Licensor shall not be bound by any additional or different terms
- or conditions communicated by You unless expressly agreed.
-
- b. Any arrangements, understandings, or agreements regarding the
- Licensed Material not stated herein are separate from and
- independent of the terms and conditions of this Public License.
-
-Section 8 - Interpretation.
-
- a. For the avoidance of doubt, this Public License does not, and shall
- not be interpreted to, reduce, limit, restrict, or impose conditions
- on any use of the Licensed Material that could lawfully be made
- without permission under this Public License.
-
- b. To the extent possible, if any provision of this Public License is
- deemed unenforceable, it shall be automatically reformed to the
- minimum extent necessary to make it enforceable. If the provision
- cannot be reformed, it shall be severed from this Public License
- without affecting the enforceability of the remaining terms and
- conditions.
-
- c. No term or condition of this Public License will be waived and no
- failure to comply consented to unless expressly agreed to by the
- Licensor.
-
- d. Nothing in this Public License constitutes or may be interpreted as
- a limitation upon, or waiver of, any privileges and immunities that
- apply to the Licensor or You, including from the legal processes of
- any jurisdiction or authority.
-
-Creative Commons is not a party to its public licenses. Notwithstanding,
-Creative Commons may elect to apply one of its public licenses to material
-it publishes and in those instances will be considered the "Licensor." The
-text of the Creative Commons public licenses is dedicated to the public
-domain under the CC0 Public Domain Dedication. Except for the limited
-purpose of indicating that material is shared under a Creative Commons
-public license or as otherwise permitted by the Creative Commons policies
-published at creativecommons.org/policies, Creative Commons does not
-authorize the use of the trademark "Creative Commons" or any other
-trademark or logo of Creative Commons without its prior written consent
-including, without limitation, in connection with any unauthorized
-modifications to any of its public licenses or any other arrangements,
-understandings, or agreements concerning use of licensed material. For the
-avoidance of doubt, this paragraph does not form part of the public
-licenses.
-
-Creative Commons may be contacted at creativecommons.org.
diff --git a/LICENSES/other/CDDL-1.0 b/LICENSES/other/CDDL-1.0
index 195a1687930a..25f614276ddd 100644
--- a/LICENSES/other/CDDL-1.0
+++ b/LICENSES/other/CDDL-1.0
@@ -1,10 +1,14 @@
Valid-License-Identifier: CDDL-1.0
SPDX-URL: https://spdx.org/licenses/CDDL-1.0.html
Usage-Guide:
+ Do NOT use. The CDDL-1.0 is not GPL compatible. It may only be used for
+ dual-licensed files where the other license is GPL compatible.
+ If you end up using this it MUST be used together with a GPL2 compatible
+ license using "OR".
To use the Common Development and Distribution License 1.0 put the
following SPDX tag/value pair into a comment according to the placement
guidelines in the licensing rules documentation:
- SPDX-License-Identifier: CDDL-1.0
+ SPDX-License-Identifier: ($GPL-COMPATIBLE-ID OR CDDL-1.0)
License-Text:
diff --git a/LICENSES/other/ISC b/LICENSES/other/ISC
new file mode 100644
index 000000000000..8953c3142079
--- /dev/null
+++ b/LICENSES/other/ISC
@@ -0,0 +1,24 @@
+Valid-License-Identifier: ISC
+SPDX-URL: https://spdx.org/licenses/ISC.html
+Usage-Guide:
+ To use the ISC License put the following SPDX tag/value pair into a
+ comment according to the placement guidelines in the licensing rules
+ documentation:
+ SPDX-License-Identifier: ISC
+License-Text:
+
+ISC License
+
+Copyright (c) <year> <copyright holders>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/MAINTAINERS b/MAINTAINERS
index 4ece30f15777..554941e05171 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -324,7 +324,6 @@ F: Documentation/ABI/testing/sysfs-bus-acpi
F: Documentation/ABI/testing/configfs-acpi
F: drivers/pci/*acpi*
F: drivers/pci/*/*acpi*
-F: drivers/pci/*/*/*acpi*
F: tools/power/acpi/
ACPI APEI
@@ -840,7 +839,7 @@ ANALOG DEVICES INC ADGS1408 DRIVER
M: Mircea Caprioru <mircea.caprioru@analog.com>
S: Supported
F: drivers/mux/adgs1408.c
-F: Documentation/devicetree/bindings/mux/adgs1408.txt
+F: Documentation/devicetree/bindings/mux/adi,adgs1408.txt
ANALOG DEVICES INC ADP5061 DRIVER
M: Stefan Popa <stefan.popa@analog.com>
@@ -1181,7 +1180,7 @@ N: owl
F: arch/arm/mach-actions/
F: arch/arm/boot/dts/owl-*
F: arch/arm64/boot/dts/actions/
-F: drivers/clocksource/owl-*
+F: drivers/clocksource/timer-owl*
F: drivers/pinctrl/actions/*
F: drivers/soc/actions/
F: include/dt-bindings/power/owl-*
@@ -1251,7 +1250,7 @@ N: meson
ARM/Annapurna Labs ALPINE ARCHITECTURE
M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
-M: Antoine Tenart <antoine.tenart@free-electrons.com>
+M: Antoine Tenart <antoine.tenart@bootlin.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-alpine/
@@ -1604,7 +1603,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/boot/dts/lpc43*
F: drivers/clk/nxp/clk-lpc18xx*
-F: drivers/clocksource/time-lpc32xx.c
+F: drivers/clocksource/timer-lpc32xx.c
F: drivers/i2c/busses/i2c-lpc2k.c
F: drivers/memory/pl172.c
F: drivers/mtd/spi-nor/nxp-spifi.c
@@ -2196,6 +2195,7 @@ F: drivers/clk/uniphier/
F: drivers/gpio/gpio-uniphier.c
F: drivers/i2c/busses/i2c-uniphier*
F: drivers/irqchip/irq-uniphier-aidet.c
+F: drivers/mmc/host/uniphier-sd.c
F: drivers/pinctrl/uniphier/
F: drivers/reset/reset-uniphier.c
F: drivers/tty/serial/8250/8250_uniphier.c
@@ -2220,7 +2220,7 @@ F: arch/arm/mach-vexpress/
F: */*/vexpress*
F: */*/*/vexpress*
F: drivers/clk/versatile/clk-vexpress-osc.c
-F: drivers/clocksource/versatile.c
+F: drivers/clocksource/timer-versatile.c
N: mps2
ARM/VFP SUPPORT
@@ -2242,7 +2242,7 @@ M: Tony Prisk <linux@prisktech.co.nz>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-vt8500/
-F: drivers/clocksource/vt8500_timer.c
+F: drivers/clocksource/timer-vt8500.c
F: drivers/i2c/busses/i2c-wmt.c
F: drivers/mmc/host/wmt-sdmmc.c
F: drivers/pwm/pwm-vt8500.c
@@ -2307,7 +2307,7 @@ F: drivers/cpuidle/cpuidle-zynq.c
F: drivers/block/xsysace.c
N: zynq
N: xilinx
-F: drivers/clocksource/cadence_ttc_timer.c
+F: drivers/clocksource/timer-cadence-ttc.c
F: drivers/i2c/busses/i2c-cadence.c
F: drivers/mmc/host/sdhci-of-arasan.c
F: drivers/edac/synopsys_edac.c
@@ -2956,7 +2956,6 @@ F: include/linux/bcm963xx_tag.h
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
M: Rasesh Mody <rasesh.mody@cavium.com>
-M: Harish Patil <harish.patil@cavium.com>
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
@@ -2977,6 +2976,7 @@ F: drivers/scsi/bnx2i/
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
M: Ariel Elior <ariel.elior@cavium.com>
+M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
@@ -3007,6 +3007,14 @@ S: Supported
F: drivers/gpio/gpio-brcmstb.c
F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+BROADCOM BRCMSTB I2C DRIVER
+M: Kamal Dasu <kdasu.kdev@gmail.com>
+L: linux-i2c@vger.kernel.org
+L: bcm-kernel-feedback-list@broadcom.com
+S: Supported
+F: drivers/i2c/busses/i2c-brcmstb.c
+F: Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt
+
BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER
M: Al Cooper <alcooperx@gmail.com>
L: linux-kernel@vger.kernel.org
@@ -3114,6 +3122,15 @@ S: Maintained
F: Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.txt
F: drivers/memory/brcmstb_dpfe.c
+BROADCOM SPI DRIVER
+M: Kamal Dasu <kdasu.kdev@gmail.com>
+M: bcm-kernel-feedback-list@broadcom.com
+S: Maintained
+F: Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
+F: drivers/spi/spi-bcm-qspi.*
+F: drivers/spi/spi-brcmstb-qspi.c
+F: drivers/spi/spi-iproc-qspi.c
+
BROADCOM SYSTEMPORT ETHERNET DRIVER
M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
@@ -3674,6 +3691,12 @@ S: Maintained
F: Documentation/devicetree/bindings/media/coda.txt
F: drivers/media/platform/coda/
+CODE OF CONDUCT
+M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+S: Supported
+F: Documentation/process/code-of-conduct.rst
+F: Documentation/process/code-of-conduct-interpretation.rst
+
COMMON CLK FRAMEWORK
M: Michael Turquette <mturquette@baylibre.com>
M: Stephen Boyd <sboyd@kernel.org>
@@ -4032,7 +4055,7 @@ M: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/cxlflash/
-F: include/uapi/scsi/cxlflash_ioctls.h
+F: include/uapi/scsi/cxlflash_ioctl.h
F: Documentation/powerpc/cxlflash.txt
CYBERPRO FB DRIVER
@@ -4171,6 +4194,11 @@ S: Maintained
F: drivers/platform/x86/dell-smbios-wmi.c
F: tools/wmi/dell-smbios-example.c
+DEFZA FDDI NETWORK DRIVER
+M: "Maciej W. Rozycki" <macro@linux-mips.org>
+S: Maintained
+F: drivers/net/fddi/defza.*
+
DELL LAPTOP DRIVER
M: Matthew Garrett <mjg59@srcf.ucam.org>
M: Pali Rohár <pali.rohar@gmail.com>
@@ -4486,11 +4514,12 @@ S: Maintained
F: Documentation/
F: scripts/kernel-doc
X: Documentation/ABI/
+X: Documentation/acpi/
X: Documentation/devicetree/
-X: Documentation/acpi
-X: Documentation/power
-X: Documentation/spi
-X: Documentation/media
+X: Documentation/i2c/
+X: Documentation/media/
+X: Documentation/power/
+X: Documentation/spi/
T: git git://git.lwn.net/linux.git docs-next
DOCUMENTATION/ITALIAN
@@ -4528,9 +4557,13 @@ F: drivers/soc/fsl/dpio
DPAA2 ETHERNET DRIVER
M: Ioana Radulescu <ruxandra.radulescu@nxp.com>
-L: linux-kernel@vger.kernel.org
+L: netdev@vger.kernel.org
S: Maintained
-F: drivers/staging/fsl-dpaa2/ethernet
+F: drivers/net/ethernet/freescale/dpaa2/dpaa2-eth*
+F: drivers/net/ethernet/freescale/dpaa2/dpni*
+F: drivers/net/ethernet/freescale/dpaa2/dpkg.h
+F: drivers/net/ethernet/freescale/dpaa2/Makefile
+F: drivers/net/ethernet/freescale/dpaa2/Kconfig
DPAA2 ETHERNET SWITCH DRIVER
M: Ioana Radulescu <ruxandra.radulescu@nxp.com>
@@ -4541,9 +4574,10 @@ F: drivers/staging/fsl-dpaa2/ethsw
DPAA2 PTP CLOCK DRIVER
M: Yangbo Lu <yangbo.lu@nxp.com>
-L: linux-kernel@vger.kernel.org
+L: netdev@vger.kernel.org
S: Maintained
-F: drivers/staging/fsl-dpaa2/rtc
+F: drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp*
+F: drivers/net/ethernet/freescale/dpaa2/dprtc*
DPT_I2O SCSI RAID DRIVER
M: Adaptec OEM Raid Solutions <aacraid@microsemi.com>
@@ -5330,7 +5364,8 @@ S: Maintained
F: drivers/edac/r82600_edac.c
EDAC-SBRIDGE
-M: Mauro Carvalho Chehab <mchehab@kernel.org>
+M: Tony Luck <tony.luck@intel.com>
+R: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/sb_edac.c
@@ -5470,7 +5505,8 @@ S: Odd Fixes
F: drivers/net/ethernet/agere/
ETHERNET BRIDGE
-M: Stephen Hemminger <stephen@networkplumber.org>
+M: Roopa Prabhu <roopa@cumulusnetworks.com>
+M: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net:Bridge
@@ -5514,7 +5550,7 @@ W: http://ext4.wiki.kernel.org
Q: http://patchwork.ozlabs.org/project/linux-ext4/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4.git
S: Maintained
-F: Documentation/filesystems/ext4.txt
+F: Documentation/filesystems/ext4/ext4.rst
F: fs/ext4/
Extended Verification Module (EVM)
@@ -6454,6 +6490,7 @@ F: Documentation/devicetree/bindings/hwmon/
F: Documentation/hwmon/
F: drivers/hwmon/
F: include/linux/hwmon*.h
+F: include/trace/events/hwmon*.h
HARDWARE RANDOM NUMBER GENERATOR CORE
M: Matt Mackall <mpm@selenic.com>
@@ -6762,6 +6799,12 @@ S: Maintained
F: mm/memory-failure.c
F: mm/hwpoison-inject.c
+HYGON PROCESSOR SUPPORT
+M: Pu Wen <puwen@hygon.cn>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: arch/x86/kernel/cpu/hygon.c
+
Hyper-V CORE AND DRIVERS
M: "K. Y. Srinivasan" <kys@microsoft.com>
M: Haiyang Zhang <haiyangz@microsoft.com>
@@ -7341,15 +7384,16 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
S: Supported
F: Documentation/networking/e100.rst
F: Documentation/networking/e1000.rst
-F: Documentation/networking/e1000e.txt
-F: Documentation/networking/igb.txt
-F: Documentation/networking/igbvf.txt
-F: Documentation/networking/ixgb.txt
-F: Documentation/networking/ixgbe.txt
-F: Documentation/networking/ixgbevf.txt
-F: Documentation/networking/i40e.txt
-F: Documentation/networking/i40evf.txt
-F: Documentation/networking/ice.txt
+F: Documentation/networking/e1000e.rst
+F: Documentation/networking/fm10k.rst
+F: Documentation/networking/igb.rst
+F: Documentation/networking/igbvf.rst
+F: Documentation/networking/ixgb.rst
+F: Documentation/networking/ixgbe.rst
+F: Documentation/networking/ixgbevf.rst
+F: Documentation/networking/i40e.rst
+F: Documentation/networking/iavf.rst
+F: Documentation/networking/ice.rst
F: drivers/net/ethernet/intel/
F: drivers/net/ethernet/intel/*/
F: include/linux/avf/virtchnl.h
@@ -7371,6 +7415,12 @@ T: git https://github.com/intel/gvt-linux.git
S: Supported
F: drivers/gpu/drm/i915/gvt/
+INTEL PMIC GPIO DRIVER
+R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+S: Maintained
+F: drivers/gpio/gpio-*cove.c
+F: drivers/gpio/gpio-msic.c
+
INTEL HID EVENT DRIVER
M: Alex Hung <alex.hung@canonical.com>
L: platform-driver-x86@vger.kernel.org
@@ -7497,6 +7547,14 @@ F: drivers/platform/x86/intel_punit_ipc.c
F: arch/x86/include/asm/intel_pmc_ipc.h
F: arch/x86/include/asm/intel_punit_ipc.h
+INTEL MULTIFUNCTION PMIC DEVICE DRIVERS
+R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+S: Maintained
+F: drivers/mfd/intel_msic.c
+F: drivers/mfd/intel_soc_pmic*
+F: include/linux/mfd/intel_msic.h
+F: include/linux/mfd/intel_soc_pmic*
+
INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
M: Stanislav Yakovlev <stas.yakovlev@gmail.com>
L: linux-wireless@vger.kernel.org
@@ -7520,14 +7578,6 @@ S: Supported
F: drivers/infiniband/hw/i40iw/
F: include/uapi/rdma/i40iw-abi.h
-INTEL SHA MULTIBUFFER DRIVER
-M: Megha Dey <megha.dey@linux.intel.com>
-R: Tim Chen <tim.c.chen@linux.intel.com>
-L: linux-crypto@vger.kernel.org
-S: Supported
-F: arch/x86/crypto/sha*-mb/
-F: crypto/mcryptd.c
-
INTEL TELEMETRY DRIVER
M: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>
L: platform-driver-x86@vger.kernel.org
@@ -7635,6 +7685,7 @@ M: Corey Minyard <minyard@acm.org>
L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
W: http://openipmi.sourceforge.net/
S: Supported
+F: Documentation/devicetree/bindings/ipmi/
F: Documentation/IPMI.txt
F: drivers/char/ipmi/
F: include/linux/ipmi*
@@ -8106,6 +8157,7 @@ F: security/keys/encrypted-keys/
KEYS-TRUSTED
M: James Bottomley <jejb@linux.vnet.ibm.com>
+M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
M: Mimi Zohar <zohar@linux.vnet.ibm.com>
L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
@@ -8183,6 +8235,25 @@ S: Maintained
F: net/l3mdev
F: include/net/l3mdev.h
+L7 BPF FRAMEWORK
+M: John Fastabend <john.fastabend@gmail.com>
+M: Daniel Borkmann <daniel@iogearbox.net>
+L: netdev@vger.kernel.org
+S: Maintained
+F: include/linux/skmsg.h
+F: net/core/skmsg.c
+F: net/core/sock_map.c
+F: net/ipv4/tcp_bpf.c
+
+LANTIQ / INTEL Ethernet drivers
+M: Hauke Mehrtens <hauke@hauke-m.de>
+L: netdev@vger.kernel.org
+S: Maintained
+F: net/dsa/tag_gswip.c
+F: drivers/net/ethernet/lantiq_xrx200.c
+F: drivers/net/dsa/lantiq_pce.h
+F: drivers/net/dsa/lantiq_gswip.c
+
LANTIQ MIPS ARCHITECTURE
M: John Crispin <john@phrozen.org>
L: linux-mips@linux-mips.org
@@ -8598,7 +8669,6 @@ F: include/linux/spinlock*.h
F: arch/*/include/asm/spinlock*.h
F: include/linux/rwlock*.h
F: include/linux/mutex*.h
-F: arch/*/include/asm/mutex*.h
F: include/linux/rwsem*.h
F: arch/*/include/asm/rwsem.h
F: include/linux/seqlock.h
@@ -8744,7 +8814,7 @@ M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/dsa/mv88e6xxx/
-F: linux/platform_data/mv88e6xxx.h
+F: include/linux/platform_data/mv88e6xxx.h
F: Documentation/devicetree/bindings/net/dsa/marvell.txt
MARVELL ARMADA DRM SUPPORT
@@ -8834,6 +8904,15 @@ S: Supported
F: drivers/mmc/host/sdhci-xenon*
F: Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
+MARVELL OCTEONTX2 RVU ADMIN FUNCTION DRIVER
+M: Sunil Goutham <sgoutham@marvell.com>
+M: Linu Cherian <lcherian@marvell.com>
+M: Geetha sowjanya <gakula@marvell.com>
+M: Jerin Jacob <jerinj@marvell.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/marvell/octeontx2/af/
+
MATROX FRAMEBUFFER DRIVER
L: linux-fbdev@vger.kernel.org
S: Orphan
@@ -8847,13 +8926,6 @@ S: Maintained
F: Documentation/hwmon/max16065
F: drivers/hwmon/max16065.c
-MAX20751 HARDWARE MONITOR DRIVER
-M: Guenter Roeck <linux@roeck-us.net>
-L: linux-hwmon@vger.kernel.org
-S: Maintained
-F: Documentation/hwmon/max20751
-F: drivers/hwmon/max20751.c
-
MAX2175 SDR TUNER DRIVER
M: Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>
L: linux-media@vger.kernel.org
@@ -9519,6 +9591,7 @@ M: Richard Genoud <richard.genoud@gmail.com>
S: Maintained
F: drivers/tty/serial/atmel_serial.c
F: drivers/tty/serial/atmel_serial.h
+F: Documentation/devicetree/bindings/mfd/atmel-usart.txt
MICROCHIP / ATMEL DMA DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
@@ -9550,6 +9623,21 @@ S: Supported
F: drivers/mtd/nand/raw/atmel/*
F: Documentation/devicetree/bindings/mtd/atmel-nand.txt
+MICROCHIP AT91 USART MFD DRIVER
+M: Radu Pirea <radu_nicolae.pirea@upb.ro>
+L: linux-kernel@vger.kernel.org
+S: Supported
+F: drivers/mfd/at91-usart.c
+F: include/dt-bindings/mfd/at91-usart.h
+F: Documentation/devicetree/bindings/mfd/atmel-usart.txt
+
+MICROCHIP AT91 USART SPI DRIVER
+M: Radu Pirea <radu_nicolae.pirea@upb.ro>
+L: linux-spi@vger.kernel.org
+S: Supported
+F: drivers/spi/spi-at91-usart.c
+F: Documentation/devicetree/bindings/mfd/atmel-usart.txt
+
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
M: Woojung Huh <Woojung.Huh@microchip.com>
M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
@@ -9658,7 +9746,8 @@ MIPS/LOONGSON2 ARCHITECTURE
M: Jiaxun Yang <jiaxun.yang@flygoat.com>
L: linux-mips@linux-mips.org
S: Maintained
-F: arch/mips/loongson64/*{2e/2f}*
+F: arch/mips/loongson64/fuloong-2e/
+F: arch/mips/loongson64/lemote-2f/
F: arch/mips/include/asm/mach-loongson64/
F: drivers/*/*loongson2*
F: drivers/*/*/*loongson2*
@@ -9698,6 +9787,19 @@ S: Maintained
F: arch/arm/boot/dts/mmp*
F: arch/arm/mach-mmp/
+MMU GATHER AND TLB INVALIDATION
+M: Will Deacon <will.deacon@arm.com>
+M: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Nick Piggin <npiggin@gmail.com>
+M: Peter Zijlstra <peterz@infradead.org>
+L: linux-arch@vger.kernel.org
+L: linux-mm@kvack.org
+S: Maintained
+F: arch/*/include/asm/tlb.h
+F: include/asm-generic/tlb.h
+F: mm/mmu_gather.c
+
MN88472 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -9716,13 +9818,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained
F: drivers/media/dvb-frontends/mn88473*
-PCI DRIVER FOR MOBIVEIL PCIE IP
-M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
-L: linux-pci@vger.kernel.org
-S: Supported
-F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
-F: drivers/pci/controller/pcie-mobiveil.c
-
MODULE SUPPORT
M: Jessica Yu <jeyu@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@@ -9872,7 +9967,7 @@ M: Peter Rosin <peda@axentia.se>
S: Maintained
F: Documentation/ABI/testing/sysfs-class-mux*
F: Documentation/devicetree/bindings/mux/
-F: include/linux/dt-bindings/mux/
+F: include/dt-bindings/mux/
F: include/linux/mux/
F: drivers/mux/
@@ -9909,6 +10004,13 @@ S: Supported
F: drivers/gpu/drm/mxsfb/
F: Documentation/devicetree/bindings/display/mxsfb.txt
+MYLEX DAC960 PCI RAID Controller
+M: Hannes Reinecke <hare@kernel.org>
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/myrb.*
+F: drivers/scsi/myrs.*
+
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
M: Chris Lee <christopher.lee@cspi.com>
L: netdev@vger.kernel.org
@@ -10129,7 +10231,6 @@ L: netdev@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
S: Maintained
-F: net/core/flow.c
F: net/xfrm/
F: net/key/
F: net/ipv4/xfrm*
@@ -10192,6 +10293,8 @@ NETWORKING [TLS]
M: Boris Pismenny <borisp@mellanox.com>
M: Aviad Yehezkel <aviadye@mellanox.com>
M: Dave Watson <davejwatson@fb.com>
+M: John Fastabend <john.fastabend@gmail.com>
+M: Daniel Borkmann <daniel@iogearbox.net>
L: netdev@vger.kernel.org
S: Maintained
F: net/tls/*
@@ -10949,7 +11052,7 @@ M: Willy Tarreau <willy@haproxy.com>
M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
S: Odd Fixes
F: Documentation/auxdisplay/lcd-panel-cgram.txt
-F: drivers/misc/panel.c
+F: drivers/auxdisplay/panel.c
PARALLEL PORT SUBSYSTEM
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@@ -11137,6 +11240,13 @@ F: include/uapi/linux/switchtec_ioctl.h
F: include/linux/switchtec.h
F: drivers/ntb/hw/mscc/
+PCI DRIVER FOR MOBIVEIL PCIE IP
+M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+L: linux-pci@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+F: drivers/pci/controller/pcie-mobiveil.c
+
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
M: Jason Cooper <jason@lakedaemon.net>
@@ -11189,7 +11299,7 @@ M: Murali Karicheri <m-karicheri2@ti.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: drivers/pci/controller/dwc/*keystone*
+F: drivers/pci/controller/dwc/pci-keystone.c
PCI ENDPOINT SUBSYSTEM
M: Kishon Vijay Abraham I <kishon@ti.com>
@@ -11203,8 +11313,14 @@ F: tools/pci/
PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
M: Russell Currey <ruscur@russell.cc>
+M: Sam Bobroff <sbobroff@linux.ibm.com>
+M: Oliver O'Halloran <oohall@gmail.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
+F: Documentation/PCI/pci-error-recovery.txt
+F: drivers/pci/pcie/aer.c
+F: drivers/pci/pcie/dpc.c
+F: drivers/pci/pcie/err.c
F: Documentation/powerpc/eeh-pci-error-recovery.txt
F: arch/powerpc/kernel/eeh*.c
F: arch/powerpc/platforms/*/eeh*.c
@@ -11483,15 +11599,12 @@ S: Maintained
F: drivers/pinctrl/intel/
PIN CONTROLLER - MEDIATEK
-M: Sean Wang <sean.wang@mediatek.com>
+M: Sean Wang <sean.wang@kernel.org>
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
F: Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
-F: drivers/pinctrl/mediatek/mtk-eint.*
-F: drivers/pinctrl/mediatek/pinctrl-mtk-common.*
-F: drivers/pinctrl/mediatek/pinctrl-mt2701.c
-F: drivers/pinctrl/mediatek/pinctrl-mt7622.c
+F: drivers/pinctrl/mediatek/
PIN CONTROLLER - QUALCOMM
M: Bjorn Andersson <bjorn.andersson@linaro.org>
@@ -11569,7 +11682,26 @@ W: http://hwmon.wiki.kernel.org/
W: http://www.roeck-us.net/linux/drivers/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
+F: Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
+F: Documentation/devicetree/bindings/hwmon/max31785.txt
+F: Documentation/devicetree/bindings/hwmon/ltc2978.txt
+F: Documentation/hwmon/adm1275
+F: Documentation/hwmon/ibm-cffps
+F: Documentation/hwmon/ir35221
+F: Documentation/hwmon/lm25066
+F: Documentation/hwmon/ltc2978
+F: Documentation/hwmon/ltc3815
+F: Documentation/hwmon/max16064
+F: Documentation/hwmon/max20751
+F: Documentation/hwmon/max31785
+F: Documentation/hwmon/max34440
+F: Documentation/hwmon/max8688
F: Documentation/hwmon/pmbus
+F: Documentation/hwmon/pmbus-core
+F: Documentation/hwmon/tps40422
+F: Documentation/hwmon/ucd9000
+F: Documentation/hwmon/ucd9200
+F: Documentation/hwmon/zl6100
F: drivers/hwmon/pmbus/
F: include/linux/pmbus.h
@@ -11973,7 +12105,7 @@ F: Documentation/scsi/LICENSE.qla4xxx
F: drivers/scsi/qla4xxx/
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M: Harish Patil <harish.patil@cavium.com>
+M: Shahed Shaikh <Shahed.Shaikh@cavium.com>
M: Manish Chopra <manish.chopra@cavium.com>
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
@@ -11981,7 +12113,6 @@ S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER
-M: Harish Patil <harish.patil@cavium.com>
M: Manish Chopra <manish.chopra@cavium.com>
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
@@ -12260,6 +12391,7 @@ F: Documentation/networking/rds.txt
RDT - RESOURCE ALLOCATION
M: Fenghua Yu <fenghua.yu@intel.com>
+M: Reinette Chatre <reinette.chatre@intel.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: arch/x86/kernel/cpu/intel_rdt*
@@ -12668,6 +12800,18 @@ W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: drivers/s390/crypto/
+S390 VFIO AP DRIVER
+M: Tony Krowiak <akrowiak@linux.ibm.com>
+M: Pierre Morel <pmorel@linux.ibm.com>
+M: Halil Pasic <pasic@linux.ibm.com>
+L: linux-s390@vger.kernel.org
+W: http://www.ibm.com/developerworks/linux/linux390/
+S: Supported
+F: drivers/s390/crypto/vfio_ap_drv.c
+F: drivers/s390/crypto/vfio_ap_private.h
+F: drivers/s390/crypto/vfio_ap_ops.c
+F: Documentation/s390/vfio-ap.txt
+
S390 ZFCP DRIVER
M: Steffen Maier <maier@linux.ibm.com>
M: Benjamin Block <bblock@linux.ibm.com>
@@ -13056,7 +13200,7 @@ SELINUX SECURITY MODULE
M: Paul Moore <paul@paul-moore.com>
M: Stephen Smalley <sds@tycho.nsa.gov>
M: Eric Paris <eparis@parisplace.org>
-L: selinux@tycho.nsa.gov (moderated for non-subscribers)
+L: selinux@vger.kernel.org
W: https://selinuxproject.org
W: https://github.com/SELinuxProject
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git
@@ -13300,6 +13444,7 @@ M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
S: Supported
F: drivers/siox/*
+F: drivers/gpio/gpio-siox.c
F: include/trace/events/siox.h
SIS 190 ETHERNET DRIVER
@@ -13449,9 +13594,8 @@ F: drivers/i2c/busses/i2c-synquacer.c
F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt
SOCIONEXT UNIPHIER SOUND DRIVER
-M: Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-S: Maintained
+S: Orphan
F: sound/soc/uniphier/
SOEKRIS NET48XX LED SUPPORT
@@ -13484,8 +13628,8 @@ L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: Documentation/devicetree/bindings/arm/firmware/sdei.txt
F: drivers/firmware/arm_sdei.c
-F: include/linux/sdei.h
-F: include/uapi/linux/sdei.h
+F: include/linux/arm_sdei.h
+F: include/uapi/linux/arm_sdei.h
SOFTWARE RAID (Multiple Disks) SUPPORT
M: Shaohua Li <shli@kernel.org>
@@ -14028,6 +14172,12 @@ S: Supported
F: drivers/reset/reset-axs10x.c
F: Documentation/devicetree/bindings/reset/snps,axs10x-reset.txt
+SYNOPSYS CREG GPIO DRIVER
+M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+S: Maintained
+F: drivers/gpio/gpio-creg-snps.c
+F: Documentation/devicetree/bindings/gpio/snps,creg-gpio.txt
+
SYNOPSYS DESIGNWARE 8250 UART DRIVER
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained
@@ -14614,6 +14764,13 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/ti/netcp*
+TI PCM3060 ASoC CODEC DRIVER
+M: Kirill Marinushkin <kmarinushkin@birdec.tech>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/pcm3060.txt
+F: sound/soc/codecs/pcm3060*
+
TI TAS571X FAMILY ASoC CODEC DRIVER
M: Kevin Cernekee <cernekee@chromium.org>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -15286,6 +15443,12 @@ F: Documentation/driver-api/usb/typec_bus.rst
F: drivers/usb/typec/altmodes/
F: include/linux/usb/typec_altmode.h
+USB TYPEC PORT CONTROLLER DRIVERS
+M: Guenter Roeck <linux@roeck-us.net>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: drivers/usb/typec/tcpm/
+
USB UHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
@@ -15389,7 +15552,7 @@ S: Maintained
UVESAFB DRIVER
M: Michal Januszewski <spock@gentoo.org>
L: linux-fbdev@vger.kernel.org
-W: http://dev.gentoo.org/~spock/projects/uvesafb/
+W: https://github.com/mjanusz/v86d
S: Maintained
F: Documentation/fb/uvesafb.txt
F: drivers/video/fbdev/uvesafb.*
@@ -15702,7 +15865,7 @@ F: include/linux/regulator/
VRF
M: David Ahern <dsa@cumulusnetworks.com>
-M: Shrijeet Mukherjee <shm@cumulusnetworks.com>
+M: Shrijeet Mukherjee <shrijeet@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/vrf.c
@@ -15913,6 +16076,7 @@ F: net/x25/
X86 ARCHITECTURE (32-BIT AND 64-BIT)
M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@redhat.com>
+M: Borislav Petkov <bp@alien8.de>
R: "H. Peter Anvin" <hpa@zytor.com>
M: x86@kernel.org
L: linux-kernel@vger.kernel.org
@@ -15941,6 +16105,15 @@ M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/*
+X86 MM
+M: Dave Hansen <dave.hansen@linux.intel.com>
+M: Andy Lutomirski <luto@kernel.org>
+M: Peter Zijlstra <peterz@infradead.org>
+L: linux-kernel@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm
+S: Maintained
+F: arch/x86/mm/
+
X86 PLATFORM DRIVERS
M: Darren Hart <dvhart@infradead.org>
M: Andy Shevchenko <andy@infradead.org>
diff --git a/Makefile b/Makefile
index 83a03facb5ba..2fc5732a4f9e 100644
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc4
-NAME = Merciless Moray
+EXTRAVERSION =
+NAME = "People's Front"
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -299,19 +299,7 @@ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
-# SUBARCH tells the usermode build what the underlying arch is. That is set
-# first, and if a usermode build is happening, the "ARCH=um" on the command
-# line overrides the setting of ARCH below. If a native build is happening,
-# then ARCH is assigned, getting whatever value it gets normally, and
-# SUBARCH is subsequently ignored.
-
-SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
- -e s/sun4u/sparc64/ \
- -e s/arm.*/arm/ -e s/sa110/arm/ \
- -e s/s390x/s390/ -e s/parisc64/parisc/ \
- -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
- -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
- -e s/riscv.*/riscv/)
+include scripts/subarch.include
# Cross compiling and selecting different set of gcc/bin-utils
# ---------------------------------------------------------------------------
@@ -495,13 +483,15 @@ endif
ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
+GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
+CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR)
+GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
endif
@@ -1083,7 +1073,7 @@ scripts: scripts_basic asm-generic gcc-plugins $(autoksyms_h)
# version.h and scripts_basic is processed / created.
# Listed in dependency order
-PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
+PHONY += prepare archprepare macroprepare prepare0 prepare1 prepare2 prepare3
# prepare3 is used to check if we are building in a separate output directory,
# and if so do:
@@ -1106,7 +1096,9 @@ prepare2: prepare3 outputmakefile asm-generic
prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h
$(cmd_crmodverdir)
-archprepare: archheaders archscripts prepare1 scripts_basic
+macroprepare: prepare1 archmacros
+
+archprepare: archheaders archscripts macroprepare scripts_basic
prepare0: archprepare gcc-plugins
$(Q)$(MAKE) $(build)=.
@@ -1174,6 +1166,9 @@ archheaders:
PHONY += archscripts
archscripts:
+PHONY += archmacros
+archmacros:
+
PHONY += __headers
__headers: $(version_h) scripts_basic uapi-asm-generic archheaders archscripts
$(Q)$(MAKE) $(build)=scripts build_unifdef
diff --git a/README b/README
index 2c927ccbd970..669ac7c32292 100644
--- a/README
+++ b/README
@@ -12,7 +12,6 @@ In order to build the documentation, use ``make htmldocs`` or
There are various text files in the Documentation/ subdirectory,
several of them using the Restructured Text markup notation.
-See Documentation/00-INDEX for a list of what is contained in each file.
Please read the Documentation/process/changes.rst file, as it contains the
requirements for building and running the kernel, and information about
diff --git a/arch/Kconfig b/arch/Kconfig
index 6801123932a5..9d329608913e 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -359,6 +359,9 @@ config HAVE_PERF_USER_STACK_DUMP
config HAVE_ARCH_JUMP_LABEL
bool
+config HAVE_ARCH_JUMP_LABEL_RELATIVE
+ bool
+
config HAVE_RCU_TABLE_FREE
bool
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index d6e29a1de4cc..9ff37aa1165f 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -6,6 +6,7 @@
#define NR_SYSCALLS 523
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
@@ -13,6 +14,7 @@
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/alpha/include/uapi/asm/siginfo.h b/arch/alpha/include/uapi/asm/siginfo.h
index db3f0138536f..6e1a2af2f962 100644
--- a/arch/alpha/include/uapi/asm/siginfo.h
+++ b/arch/alpha/include/uapi/asm/siginfo.h
@@ -2,7 +2,6 @@
#ifndef _ALPHA_SIGINFO_H
#define _ALPHA_SIGINFO_H
-#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
#define __ARCH_SI_TRAPNO
#include <asm-generic/siginfo.h>
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index c64806a2daf5..2e09248f8324 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -473,7 +473,7 @@ entSys:
bne $3, strace
beq $4, 1f
ldq $27, 0($5)
-1: jsr $26, ($27), alpha_ni_syscall
+1: jsr $26, ($27), sys_ni_syscall
ldgp $gp, 0($26)
blt $0, $syscall_error /* the call failed */
stq $0, 0($sp)
@@ -587,7 +587,7 @@ strace:
/* get the system call pointer.. */
lda $1, NR_SYSCALLS($31)
lda $2, sys_call_table
- lda $27, alpha_ni_syscall
+ lda $27, sys_ni_syscall
cmpult $0, $1, $1
s8addq $0, $2, $2
beq $1, 1f
@@ -791,7 +791,7 @@ ret_from_kernel_thread:
/*
* Special system calls. Most of these are special in that they either
- * have to play switch_stack games or in some way use the pt_regs struct.
+ * have to play switch_stack games.
*/
.macro fork_like name
@@ -812,46 +812,41 @@ fork_like fork
fork_like vfork
fork_like clone
+.macro sigreturn_like name
.align 4
- .globl sys_sigreturn
- .ent sys_sigreturn
-sys_sigreturn:
+ .globl sys_\name
+ .ent sys_\name
+sys_\name:
.prologue 0
lda $9, ret_from_straced
cmpult $26, $9, $9
lda $sp, -SWITCH_STACK_SIZE($sp)
- jsr $26, do_sigreturn
+ jsr $26, do_\name
bne $9, 1f
jsr $26, syscall_trace_leave
1: br $1, undo_switch_stack
br ret_from_sys_call
-.end sys_sigreturn
+.end sys_\name
+.endm
- .align 4
- .globl sys_rt_sigreturn
- .ent sys_rt_sigreturn
-sys_rt_sigreturn:
- .prologue 0
- lda $9, ret_from_straced
- cmpult $26, $9, $9
- lda $sp, -SWITCH_STACK_SIZE($sp)
- jsr $26, do_rt_sigreturn
- bne $9, 1f
- jsr $26, syscall_trace_leave
-1: br $1, undo_switch_stack
- br ret_from_sys_call
-.end sys_rt_sigreturn
+sigreturn_like sigreturn
+sigreturn_like rt_sigreturn
.align 4
- .globl alpha_ni_syscall
- .ent alpha_ni_syscall
-alpha_ni_syscall:
+ .globl alpha_syscall_zero
+ .ent alpha_syscall_zero
+alpha_syscall_zero:
.prologue 0
- /* Special because it also implements overflow handling via
- syscall number 0. And if you recall, zero is a special
- trigger for "not an error". Store large non-zero there. */
+ /* Special because it needs to do something opposite to
+ force_successful_syscall_return(). We use the saved
+ syscall number for that, zero meaning "not an error".
+ That works nicely, but for real syscall 0 we need to
+ make sure that this logics doesn't get confused.
+ Store a non-zero there - -ENOSYS we need in register
+ for our return value will do just fine.
+ */
lda $0, -ENOSYS
unop
stq $0, 0($sp)
ret
-.end alpha_ni_syscall
+.end alpha_syscall_zero
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 1374e591511f..5b2e8ecb7ce3 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -11,93 +11,93 @@
.align 3
.globl sys_call_table
sys_call_table:
- .quad alpha_ni_syscall /* 0 */
+ .quad alpha_syscall_zero /* 0 */
.quad sys_exit
.quad alpha_fork
.quad sys_read
.quad sys_write
- .quad alpha_ni_syscall /* 5 */
+ .quad sys_ni_syscall /* 5 */
.quad sys_close
.quad sys_osf_wait4
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_link
.quad sys_unlink /* 10 */
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_chdir
.quad sys_fchdir
.quad sys_mknod
.quad sys_chmod /* 15 */
.quad sys_chown
.quad sys_osf_brk
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_lseek
.quad sys_getxpid /* 20 */
.quad sys_osf_mount
.quad sys_umount
.quad sys_setuid
.quad sys_getxuid
- .quad alpha_ni_syscall /* 25 */
+ .quad sys_ni_syscall /* 25 */
.quad sys_ptrace
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 30 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 30 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_access
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 35 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 35 */
.quad sys_sync
.quad sys_kill
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_setpgid
- .quad alpha_ni_syscall /* 40 */
+ .quad sys_ni_syscall /* 40 */
.quad sys_dup
.quad sys_alpha_pipe
.quad sys_osf_set_program_attributes
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_open /* 45 */
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_getxgid
.quad sys_osf_sigprocmask
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 50 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 50 */
.quad sys_acct
.quad sys_sigpending
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_ioctl
- .quad alpha_ni_syscall /* 55 */
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall /* 55 */
+ .quad sys_ni_syscall
.quad sys_symlink
.quad sys_readlink
.quad sys_execve
.quad sys_umask /* 60 */
.quad sys_chroot
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_getpgrp
.quad sys_getpagesize
- .quad alpha_ni_syscall /* 65 */
+ .quad sys_ni_syscall /* 65 */
.quad alpha_vfork
.quad sys_newstat
.quad sys_newlstat
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 70 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 70 */
.quad sys_osf_mmap
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_munmap
.quad sys_mprotect
.quad sys_madvise /* 75 */
.quad sys_vhangup
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_getgroups
/* map BSD's setpgrp to sys_setpgid for binary compatibility: */
.quad sys_setgroups /* 80 */
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_setpgid
.quad sys_osf_setitimer
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 85 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 85 */
.quad sys_osf_getitimer
.quad sys_gethostname
.quad sys_sethostname
@@ -119,19 +119,19 @@ sys_call_table:
.quad sys_bind
.quad sys_setsockopt /* 105 */
.quad sys_listen
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 110 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 110 */
.quad sys_sigsuspend
.quad sys_osf_sigstack
.quad sys_recvmsg
.quad sys_sendmsg
- .quad alpha_ni_syscall /* 115 */
+ .quad sys_ni_syscall /* 115 */
.quad sys_osf_gettimeofday
.quad sys_osf_getrusage
.quad sys_getsockopt
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
#ifdef CONFIG_OSF4_COMPAT
.quad sys_osf_readv /* 120 */
.quad sys_osf_writev
@@ -156,66 +156,66 @@ sys_call_table:
.quad sys_mkdir
.quad sys_rmdir
.quad sys_osf_utimes
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 140 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 140 */
.quad sys_getpeername
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_getrlimit
.quad sys_setrlimit /* 145 */
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_setsid
.quad sys_quotactl
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_getsockname /* 150 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 155 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 155 */
.quad sys_osf_sigaction
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_osf_getdirentries
.quad sys_osf_statfs /* 160 */
.quad sys_osf_fstatfs
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_osf_getdomainname /* 165 */
.quad sys_setdomainname
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 170 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 175 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 180 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 185 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 190 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 195 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 170 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 175 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 180 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 185 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 190 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 195 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
/* The OSF swapon has two extra arguments, but we ignore them. */
.quad sys_swapon
.quad sys_msgctl /* 200 */
@@ -231,93 +231,93 @@ sys_call_table:
.quad sys_shmctl /* 210 */
.quad sys_shmdt
.quad sys_shmget
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 215 */
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 215 */
+ .quad sys_ni_syscall
.quad sys_msync
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 220 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 220 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_osf_stat
.quad sys_osf_lstat /* 225 */
.quad sys_osf_fstat
.quad sys_osf_statfs64
.quad sys_osf_fstatfs64
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 230 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 230 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_getpgid
.quad sys_getsid
.quad sys_sigaltstack /* 235 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 240 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 240 */
.quad sys_osf_sysinfo
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_osf_proplist_syscall
- .quad alpha_ni_syscall /* 245 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 250 */
+ .quad sys_ni_syscall /* 245 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 250 */
.quad sys_osf_usleep_thread
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_sysfs
- .quad alpha_ni_syscall /* 255 */
+ .quad sys_ni_syscall /* 255 */
.quad sys_osf_getsysinfo
.quad sys_osf_setsysinfo
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 260 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 265 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 270 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 275 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 280 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 285 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 290 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 295 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 260 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 265 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 270 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 275 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 280 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 285 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 290 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 295 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
/* linux-specific system calls start at 300 */
.quad sys_bdflush /* 300 */
.quad sys_sethae
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index b4441b0764d7..e98c6b8e6186 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -9,6 +9,7 @@
config ARC
def_bool y
select ARC_TIMERS
+ select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -17,8 +18,7 @@ config ARC
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
- select DMA_NONCOHERENT_OPS
- select DMA_NONCOHERENT_MMAP
+ select DMA_DIRECT_OPS
select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC)
select GENERIC_CLOCKEVENTS
select GENERIC_FIND_FIRST_BIT
@@ -149,7 +149,7 @@ config ARC_CPU_770
Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
This core has a bunch of cool new features:
-MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
- Shared Address Spaces (for sharing TLB entires in MMU)
+ Shared Address Spaces (for sharing TLB entries in MMU)
-Caches: New Prog Model, Region Flush
-Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 99cce77ab98f..644815c0516e 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -6,33 +6,11 @@
# published by the Free Software Foundation.
#
-ifeq ($(CROSS_COMPILE),)
-ifndef CONFIG_CPU_BIG_ENDIAN
-CROSS_COMPILE := arc-linux-
-else
-CROSS_COMPILE := arceb-linux-
-endif
-endif
-
KBUILD_DEFCONFIG := nsim_700_defconfig
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
-cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
-
-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
-
-ifdef CONFIG_ISA_ARCOMPACT
-ifeq ($(is_700), 0)
- $(error Toolchain not configured for ARCompact builds)
-endif
-endif
-
-ifdef CONFIG_ISA_ARCV2
-ifeq ($(is_700), 1)
- $(error Toolchain not configured for ARCv2 builds)
-endif
-endif
+cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
@@ -79,7 +57,7 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
-LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
+LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
# Modules with short calls might break for calls into builtin-kernel
KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 517178b1daef..3b3543fd151c 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -17,6 +17,7 @@
#define _UAPI_ASM_ARC_UNISTD_H
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 4674541eba3f..8ce6e7235915 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flags,
task_thread_info(current)->thr_ptr;
}
+
+ /*
+ * setup usermode thread pointer #1:
+ * when child is picked by scheduler, __switch_to() uses @c_callee to
+ * populate usermode callee regs: this works (despite being in a kernel
+ * function) since special return path for child @ret_from_fork()
+ * ensures those regs are not clobbered all the way to RTIE to usermode
+ */
+ c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /*
+ * setup usermode thread pointer #2:
+ * however for this special use of r25 in kernel, __switch_to() sets
+ * r25 for kernel needs and only in the final return path is usermode
+ * r25 setup, from pt_regs->user_r25. So set that up as well
+ */
+ c_regs->user_r25 = c_callee->r25;
+#endif
+
return 0;
}
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index b123558bf0bb..a7fcbc0d3943 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -42,21 +42,22 @@ void die(const char *str, struct pt_regs *regs, unsigned long address)
* -for kernel, chk if due to copy_(to|from)_user, otherwise die()
*/
static noinline int
-unhandled_exception(const char *str, struct pt_regs *regs, siginfo_t *info)
+unhandled_exception(const char *str, struct pt_regs *regs,
+ int signo, int si_code, void __user *addr)
{
if (user_mode(regs)) {
struct task_struct *tsk = current;
- tsk->thread.fault_address = (__force unsigned int)info->si_addr;
+ tsk->thread.fault_address = (__force unsigned int)addr;
- force_sig_info(info->si_signo, info, tsk);
+ force_sig_fault(signo, si_code, addr, tsk);
} else {
/* If not due to copy_(to|from)_user, we are doomed */
if (fixup_exception(regs))
return 0;
- die(str, regs, (unsigned long)info->si_addr);
+ die(str, regs, (unsigned long)addr);
}
return 1;
@@ -64,16 +65,9 @@ unhandled_exception(const char *str, struct pt_regs *regs, siginfo_t *info)
#define DO_ERROR_INFO(signr, str, name, sicode) \
int name(unsigned long address, struct pt_regs *regs) \
-{ \
- siginfo_t info; \
- \
- clear_siginfo(&info); \
- info.si_signo = signr; \
- info.si_errno = 0; \
- info.si_code = sicode; \
- info.si_addr = (void __user *)address; \
- \
- return unhandled_exception(str, regs, &info);\
+{ \
+ return unhandled_exception(str, regs, signr, sicode, \
+ (void __user *)address); \
}
/*
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index f35ed578e007..8fb16bdabdcf 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -71,7 +71,6 @@ SECTIONS
INIT_SETUP(L1_CACHE_BYTES)
INIT_CALLS
CON_INITCALL
- SECURITY_INITCALL
}
.init.arch.info : {
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index c75d5c3470e3..db203ff69ccf 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -84,29 +84,10 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
__free_pages(page, get_order(size));
}
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr)
{
- unsigned long user_count = vma_pages(vma);
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long pfn = __phys_to_pfn(dma_addr);
- unsigned long off = vma->vm_pgoff;
- int ret = -ENXIO;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
- if (off < count && user_count <= (count - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- pfn + off,
- user_count << PAGE_SHIFT,
- vma->vm_page_prot);
- }
-
- return ret;
+ return __phys_to_pfn(dma_addr);
}
/*
@@ -167,7 +148,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
}
/*
- * Plug in coherent or noncoherent dma ops
+ * Plug in direct dma map ops.
*/
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
@@ -175,13 +156,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
/*
* IOC hardware snoops all DMA traffic keeping the caches consistent
* with memory - eliding need for any explicit cache maintenance of
- * DMA buffers - so we can use dma_direct cache ops.
+ * DMA buffers.
*/
- if (is_isa_arcv2() && ioc_enable && coherent) {
- set_dma_ops(dev, &dma_direct_ops);
- dev_info(dev, "use dma_direct_ops cache ops\n");
- } else {
- set_dma_ops(dev, &dma_noncoherent_ops);
- dev_info(dev, "use dma_noncoherent_ops cache ops\n");
- }
+ if (is_isa_arcv2() && ioc_enable && coherent)
+ dev->dma_coherent = true;
+
+ dev_info(dev, "use %sncoherent DMA ops\n",
+ dev->dma_coherent ? "" : "non");
}
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index db6913094be3..c9da6102eb4f 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -66,14 +66,12 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- siginfo_t info;
+ int si_code;
int ret;
vm_fault_t fault;
int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
- clear_siginfo(&info);
-
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
@@ -91,7 +89,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
return;
}
- info.si_code = SEGV_MAPERR;
+ si_code = SEGV_MAPERR;
/*
* If we're in an interrupt or have no user
@@ -119,7 +117,7 @@ retry:
* we can handle it..
*/
good_area:
- info.si_code = SEGV_ACCERR;
+ si_code = SEGV_ACCERR;
/* Handle protection violation, execute on heap or stack */
@@ -199,11 +197,7 @@ bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.fault_address = address;
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- /* info.si_code has been set above */
- info.si_addr = (void __user *)address;
- force_sig_info(SIGSEGV, &info, tsk);
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk);
return;
}
@@ -238,9 +232,5 @@ do_sigbus:
goto no_context;
tsk->thread.fault_address = address;
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (void __user *)address;
- force_sig_info(SIGBUS, &info, tsk);
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk);
}
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f6fcb8a79889..a810fa8ba404 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -45,35 +45,42 @@ config DEBUG_WX
If in doubt, say "Y".
-# RMK wants arm kernels compiled with frame pointers or stack unwinding.
-# If you know what you are doing and are willing to live without stack
-# traces, you can get a slightly smaller kernel by setting this option to
-# n, but then RMK will have to kill you ;).
-config FRAME_POINTER
- bool
- depends on !THUMB2_KERNEL
- default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
+choice
+ prompt "Choose kernel unwinder"
+ default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
+ default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
help
- If you say N here, the resulting kernel will be slightly smaller and
- faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
- when a problem occurs with the kernel, the information that is
- reported is severely limited.
+ This determines which method will be used for unwinding kernel stack
+ traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
+ livepatch, lockdep, and more.
+
+config UNWINDER_FRAME_POINTER
+ bool "Frame pointer unwinder"
+ depends on !THUMB2_KERNEL && !CC_IS_CLANG
+ select ARCH_WANT_FRAME_POINTERS
+ select FRAME_POINTER
+ help
+ This option enables the frame pointer unwinder for unwinding
+ kernel stack traces.
-config ARM_UNWIND
- bool "Enable stack unwinding support (EXPERIMENTAL)"
+config UNWINDER_ARM
+ bool "ARM EABI stack unwinder"
depends on AEABI
- default y
+ select ARM_UNWIND
help
This option enables stack unwinding support in the kernel
using the information automatically generated by the
compiler. The resulting kernel image is slightly bigger but
the performance is not affected. Currently, this feature
- only works with EABI compilers. If unsure say Y.
+ only works with EABI compilers.
-config OLD_MCOUNT
+endchoice
+
+config ARM_UNWIND
+ bool
+
+config FRAME_POINTER
bool
- depends on FUNCTION_TRACER && FRAME_POINTER
- default y
config DEBUG_USER
bool "Verbose user fault messages"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index d1516f85f25d..5c91e0093ee8 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -74,7 +74,7 @@ endif
arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t)
arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t
arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4
-arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3
+arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3m
# Evaluate arch cc-option calls now
arch-y := $(arch-y)
@@ -264,13 +264,9 @@ platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y)))
ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y)
ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y)
-ifeq ($(KBUILD_SRC),)
-KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs))
-else
KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs))
endif
endif
-endif
export TEXT_OFFSET GZFLAGS MMUEXT
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 517e0e18f0b8..6c7ccb428c07 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -114,6 +114,35 @@
#endif
.endm
+ /*
+ * Debug kernel copy by printing the memory addresses involved
+ */
+ .macro dbgkc, begin, end, cbegin, cend
+#ifdef DEBUG
+ kputc #'\n'
+ kputc #'C'
+ kputc #':'
+ kputc #'0'
+ kputc #'x'
+ kphex \begin, 8 /* Start of compressed kernel */
+ kputc #'-'
+ kputc #'0'
+ kputc #'x'
+ kphex \end, 8 /* End of compressed kernel */
+ kputc #'-'
+ kputc #'>'
+ kputc #'0'
+ kputc #'x'
+ kphex \cbegin, 8 /* Start of kernel copy */
+ kputc #'-'
+ kputc #'0'
+ kputc #'x'
+ kphex \cend, 8 /* End of kernel copy */
+ kputc #'\n'
+ kputc #'\r'
+#endif
+ .endm
+
.section ".start", #alloc, #execinstr
/*
* sort out different calling conventions
@@ -450,6 +479,20 @@ dtb_check_done:
add r6, r9, r5
add r9, r9, r10
+#ifdef DEBUG
+ sub r10, r6, r5
+ sub r10, r9, r10
+ /*
+ * We are about to copy the kernel to a new memory area.
+ * The boundaries of the new memory area can be found in
+ * r10 and r9, whilst r5 and r6 contain the boundaries
+ * of the memory we are going to copy.
+ * Calling dbgkc will help with the printing of this
+ * information.
+ */
+ dbgkc r5, r6, r10, r9
+#endif
+
1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
cmp r6, r5
stmdb r9!, {r0 - r3, r10 - r12, lr}
diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
index b10dccd0958f..3b1baa8605a7 100644
--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
@@ -11,6 +11,7 @@
#include "sama5d2-pinfunc.h"
#include <dt-bindings/mfd/atmel-flexcom.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/at91.h>
/ {
model = "Atmel SAMA5D2 PTC EK";
@@ -299,6 +300,7 @@
<PIN_PA30__NWE_NANDWE>,
<PIN_PB2__NRD_NANDOE>;
bias-pull-up;
+ atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
};
ale_cle_rdy_cs {
diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
index 43ee992ccdcf..6df61518776f 100644
--- a/arch/arm/boot/dts/bcm63138.dtsi
+++ b/arch/arm/boot/dts/bcm63138.dtsi
@@ -106,21 +106,23 @@
global_timer: timer@1e200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x1e200 0x20>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
clocks = <&axi_clk>;
};
local_timer: local-timer@1e600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x1e600 0x20>;
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+ IRQ_TYPE_EDGE_RISING)>;
clocks = <&axi_clk>;
};
twd_watchdog: watchdog@1e620 {
compatible = "arm,cortex-a9-twd-wdt";
reg = <0x1e620 0x20>;
- interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
+ IRQ_TYPE_LEVEL_HIGH)>;
};
armpll: armpll {
@@ -158,7 +160,7 @@
serial0: serial@600 {
compatible = "brcm,bcm6345-uart";
reg = <0x600 0x1b>;
- interrupts = <GIC_SPI 32 0>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&periph_clk>;
clock-names = "periph";
status = "disabled";
@@ -167,7 +169,7 @@
serial1: serial@620 {
compatible = "brcm,bcm6345-uart";
reg = <0x620 0x1b>;
- interrupts = <GIC_SPI 33 0>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&periph_clk>;
clock-names = "periph";
status = "disabled";
@@ -180,7 +182,7 @@
reg = <0x2000 0x600>, <0xf0 0x10>;
reg-names = "nand", "nand-int-base";
status = "disabled";
- interrupts = <GIC_SPI 38 0>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "nand";
};
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index 7423d462d1e4..50dde84b72ed 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -123,6 +123,17 @@
};
};
+&cpu0 {
+ /* CPU rated to 1GHz, not 1.2GHz as per the default settings */
+ operating-points = <
+ /* kHz uV */
+ 166666 850000
+ 400000 900000
+ 800000 1050000
+ 1000000 1200000
+ >;
+};
+
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 7234e8330a57..efbdeaaa8dcd 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -146,8 +146,9 @@
fsl,max-link-speed = <2>;
power-domains = <&pgc_pcie_phy>;
resets = <&src IMX7_RESET_PCIEPHY>,
- <&src IMX7_RESET_PCIE_CTRL_APPS_EN>;
- reset-names = "pciephy", "apps";
+ <&src IMX7_RESET_PCIE_CTRL_APPS_EN>,
+ <&src IMX7_RESET_PCIE_CTRL_APPS_TURNOFF>;
+ reset-names = "pciephy", "apps", "turnoff";
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi
index 7cb235ef0fb6..6e9e1c2f9def 100644
--- a/arch/arm/boot/dts/sama5d3_emac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
@@ -41,7 +41,7 @@
};
macb1: ethernet@f802c000 {
- compatible = "cdns,at91sam9260-macb", "cdns,macb";
+ compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf802c000 0x100>;
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi
index 661be948ab74..185541a5b69f 100644
--- a/arch/arm/boot/dts/stm32mp157c.dtsi
+++ b/arch/arm/boot/dts/stm32mp157c.dtsi
@@ -1078,8 +1078,8 @@
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc SPI6_K>;
resets = <&rcc SPI6_R>;
- dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>,
- <&mdma1 35 0x0 0x40002 0x0 0x0 0>;
+ dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
+ <&mdma1 35 0x0 0x40002 0x0 0x0>;
dma-names = "rx", "tx";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi
index ffd9f00f74a4..5f547c161baf 100644
--- a/arch/arm/boot/dts/sun8i-r40.dtsi
+++ b/arch/arm/boot/dts/sun8i-r40.dtsi
@@ -800,8 +800,7 @@
};
hdmi_phy: hdmi-phy@1ef0000 {
- compatible = "allwinner,sun8i-r40-hdmi-phy",
- "allwinner,sun50i-a64-hdmi-phy";
+ compatible = "allwinner,sun8i-r40-hdmi-phy";
reg = <0x01ef0000 0x10000>;
clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
<&ccu 7>, <&ccu 16>;
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 925d1364727a..ef0c7feea6e2 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -99,6 +99,7 @@ config CRYPTO_GHASH_ARM_CE
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
select CRYPTO_CRYPTD
+ select CRYPTO_GF128MUL
help
Use an implementation of GHASH (used by the GCM AEAD chaining mode)
that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
@@ -121,10 +122,4 @@ config CRYPTO_CHACHA20_NEON
select CRYPTO_BLKCIPHER
select CRYPTO_CHACHA20
-config CRYPTO_SPECK_NEON
- tristate "NEON accelerated Speck cipher algorithms"
- depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
- select CRYPTO_SPECK
-
endif
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 8de542c48ade..bd5bceef0605 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
-obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
@@ -54,7 +53,6 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
-speck-neon-y := speck-neon-core.o speck-neon-glue.o
ifdef REGENERATE_ARM_CRYPTO
quiet_cmd_perl = PERL $@
diff --git a/arch/arm/crypto/chacha20-neon-core.S b/arch/arm/crypto/chacha20-neon-core.S
index 451a849ad518..50e7b9896818 100644
--- a/arch/arm/crypto/chacha20-neon-core.S
+++ b/arch/arm/crypto/chacha20-neon-core.S
@@ -18,6 +18,34 @@
* (at your option) any later version.
*/
+ /*
+ * NEON doesn't have a rotate instruction. The alternatives are, more or less:
+ *
+ * (a) vshl.u32 + vsri.u32 (needs temporary register)
+ * (b) vshl.u32 + vshr.u32 + vorr (needs temporary register)
+ * (c) vrev32.16 (16-bit rotations only)
+ * (d) vtbl.8 + vtbl.8 (multiple of 8 bits rotations only,
+ * needs index vector)
+ *
+ * ChaCha20 has 16, 12, 8, and 7-bit rotations. For the 12 and 7-bit
+ * rotations, the only choices are (a) and (b). We use (a) since it takes
+ * two-thirds the cycles of (b) on both Cortex-A7 and Cortex-A53.
+ *
+ * For the 16-bit rotation, we use vrev32.16 since it's consistently fastest
+ * and doesn't need a temporary register.
+ *
+ * For the 8-bit rotation, we use vtbl.8 + vtbl.8. On Cortex-A7, this sequence
+ * is twice as fast as (a), even when doing (a) on multiple registers
+ * simultaneously to eliminate the stall between vshl and vsri. Also, it
+ * parallelizes better when temporary registers are scarce.
+ *
+ * A disadvantage is that on Cortex-A53, the vtbl sequence is the same speed as
+ * (a), so the need to load the rotation table actually makes the vtbl method
+ * slightly slower overall on that CPU (~1.3% slower ChaCha20). Still, it
+ * seems to be a good compromise to get a more significant speed boost on some
+ * CPUs, e.g. ~4.8% faster ChaCha20 on Cortex-A7.
+ */
+
#include <linux/linkage.h>
.text
@@ -46,7 +74,9 @@ ENTRY(chacha20_block_xor_neon)
vmov q10, q2
vmov q11, q3
+ adr ip, .Lrol8_table
mov r3, #10
+ vld1.8 {d10}, [ip, :64]
.Ldoubleround:
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
@@ -62,9 +92,9 @@ ENTRY(chacha20_block_xor_neon)
// x0 += x1, x3 = rotl32(x3 ^ x0, 8)
vadd.i32 q0, q0, q1
- veor q4, q3, q0
- vshl.u32 q3, q4, #8
- vsri.u32 q3, q4, #24
+ veor q3, q3, q0
+ vtbl.8 d6, {d6}, d10
+ vtbl.8 d7, {d7}, d10
// x2 += x3, x1 = rotl32(x1 ^ x2, 7)
vadd.i32 q2, q2, q3
@@ -92,9 +122,9 @@ ENTRY(chacha20_block_xor_neon)
// x0 += x1, x3 = rotl32(x3 ^ x0, 8)
vadd.i32 q0, q0, q1
- veor q4, q3, q0
- vshl.u32 q3, q4, #8
- vsri.u32 q3, q4, #24
+ veor q3, q3, q0
+ vtbl.8 d6, {d6}, d10
+ vtbl.8 d7, {d7}, d10
// x2 += x3, x1 = rotl32(x1 ^ x2, 7)
vadd.i32 q2, q2, q3
@@ -139,13 +169,17 @@ ENTRY(chacha20_block_xor_neon)
bx lr
ENDPROC(chacha20_block_xor_neon)
+ .align 4
+.Lctrinc: .word 0, 1, 2, 3
+.Lrol8_table: .byte 3, 0, 1, 2, 7, 4, 5, 6
+
.align 5
ENTRY(chacha20_4block_xor_neon)
- push {r4-r6, lr}
- mov ip, sp // preserve the stack pointer
- sub r3, sp, #0x20 // allocate a 32 byte buffer
- bic r3, r3, #0x1f // aligned to 32 bytes
- mov sp, r3
+ push {r4-r5}
+ mov r4, sp // preserve the stack pointer
+ sub ip, sp, #0x20 // allocate a 32 byte buffer
+ bic ip, ip, #0x1f // aligned to 32 bytes
+ mov sp, ip
// r0: Input state matrix, s
// r1: 4 data blocks output, o
@@ -155,25 +189,24 @@ ENTRY(chacha20_4block_xor_neon)
// This function encrypts four consecutive ChaCha20 blocks by loading
// the state matrix in NEON registers four times. The algorithm performs
// each operation on the corresponding word of each state matrix, hence
- // requires no word shuffling. For final XORing step we transpose the
- // matrix by interleaving 32- and then 64-bit words, which allows us to
- // do XOR in NEON registers.
+ // requires no word shuffling. The words are re-interleaved before the
+ // final addition of the original state and the XORing step.
//
- // x0..15[0-3] = s0..3[0..3]
- add r3, r0, #0x20
+ // x0..15[0-3] = s0..15[0-3]
+ add ip, r0, #0x20
vld1.32 {q0-q1}, [r0]
- vld1.32 {q2-q3}, [r3]
+ vld1.32 {q2-q3}, [ip]
- adr r3, CTRINC
+ adr r5, .Lctrinc
vdup.32 q15, d7[1]
vdup.32 q14, d7[0]
- vld1.32 {q11}, [r3, :128]
+ vld1.32 {q4}, [r5, :128]
vdup.32 q13, d6[1]
vdup.32 q12, d6[0]
- vadd.i32 q12, q12, q11 // x12 += counter values 0-3
vdup.32 q11, d5[1]
vdup.32 q10, d5[0]
+ vadd.u32 q12, q12, q4 // x12 += counter values 0-3
vdup.32 q9, d4[1]
vdup.32 q8, d4[0]
vdup.32 q7, d3[1]
@@ -185,9 +218,13 @@ ENTRY(chacha20_4block_xor_neon)
vdup.32 q1, d0[1]
vdup.32 q0, d0[0]
+ adr ip, .Lrol8_table
mov r3, #10
+ b 1f
.Ldoubleround4:
+ vld1.32 {q8-q9}, [sp, :256]
+1:
// x0 += x4, x12 = rotl32(x12 ^ x0, 16)
// x1 += x5, x13 = rotl32(x13 ^ x1, 16)
// x2 += x6, x14 = rotl32(x14 ^ x2, 16)
@@ -236,24 +273,25 @@ ENTRY(chacha20_4block_xor_neon)
// x1 += x5, x13 = rotl32(x13 ^ x1, 8)
// x2 += x6, x14 = rotl32(x14 ^ x2, 8)
// x3 += x7, x15 = rotl32(x15 ^ x3, 8)
+ vld1.8 {d16}, [ip, :64]
vadd.i32 q0, q0, q4
vadd.i32 q1, q1, q5
vadd.i32 q2, q2, q6
vadd.i32 q3, q3, q7
- veor q8, q12, q0
- veor q9, q13, q1
- vshl.u32 q12, q8, #8
- vshl.u32 q13, q9, #8
- vsri.u32 q12, q8, #24
- vsri.u32 q13, q9, #24
+ veor q12, q12, q0
+ veor q13, q13, q1
+ veor q14, q14, q2
+ veor q15, q15, q3
- veor q8, q14, q2
- veor q9, q15, q3
- vshl.u32 q14, q8, #8
- vshl.u32 q15, q9, #8
- vsri.u32 q14, q8, #24
- vsri.u32 q15, q9, #24
+ vtbl.8 d24, {d24}, d16
+ vtbl.8 d25, {d25}, d16
+ vtbl.8 d26, {d26}, d16
+ vtbl.8 d27, {d27}, d16
+ vtbl.8 d28, {d28}, d16
+ vtbl.8 d29, {d29}, d16
+ vtbl.8 d30, {d30}, d16
+ vtbl.8 d31, {d31}, d16
vld1.32 {q8-q9}, [sp, :256]
@@ -332,24 +370,25 @@ ENTRY(chacha20_4block_xor_neon)
// x1 += x6, x12 = rotl32(x12 ^ x1, 8)
// x2 += x7, x13 = rotl32(x13 ^ x2, 8)
// x3 += x4, x14 = rotl32(x14 ^ x3, 8)
+ vld1.8 {d16}, [ip, :64]
vadd.i32 q0, q0, q5
vadd.i32 q1, q1, q6
vadd.i32 q2, q2, q7
vadd.i32 q3, q3, q4
- veor q8, q15, q0
- veor q9, q12, q1
- vshl.u32 q15, q8, #8
- vshl.u32 q12, q9, #8
- vsri.u32 q15, q8, #24
- vsri.u32 q12, q9, #24
+ veor q15, q15, q0
+ veor q12, q12, q1
+ veor q13, q13, q2
+ veor q14, q14, q3
- veor q8, q13, q2
- veor q9, q14, q3
- vshl.u32 q13, q8, #8
- vshl.u32 q14, q9, #8
- vsri.u32 q13, q8, #24
- vsri.u32 q14, q9, #24
+ vtbl.8 d30, {d30}, d16
+ vtbl.8 d31, {d31}, d16
+ vtbl.8 d24, {d24}, d16
+ vtbl.8 d25, {d25}, d16
+ vtbl.8 d26, {d26}, d16
+ vtbl.8 d27, {d27}, d16
+ vtbl.8 d28, {d28}, d16
+ vtbl.8 d29, {d29}, d16
vld1.32 {q8-q9}, [sp, :256]
@@ -379,104 +418,76 @@ ENTRY(chacha20_4block_xor_neon)
vsri.u32 q6, q9, #25
subs r3, r3, #1
- beq 0f
-
- vld1.32 {q8-q9}, [sp, :256]
- b .Ldoubleround4
-
- // x0[0-3] += s0[0]
- // x1[0-3] += s0[1]
- // x2[0-3] += s0[2]
- // x3[0-3] += s0[3]
-0: ldmia r0!, {r3-r6}
- vdup.32 q8, r3
- vdup.32 q9, r4
- vadd.i32 q0, q0, q8
- vadd.i32 q1, q1, q9
- vdup.32 q8, r5
- vdup.32 q9, r6
- vadd.i32 q2, q2, q8
- vadd.i32 q3, q3, q9
-
- // x4[0-3] += s1[0]
- // x5[0-3] += s1[1]
- // x6[0-3] += s1[2]
- // x7[0-3] += s1[3]
- ldmia r0!, {r3-r6}
- vdup.32 q8, r3
- vdup.32 q9, r4
- vadd.i32 q4, q4, q8
- vadd.i32 q5, q5, q9
- vdup.32 q8, r5
- vdup.32 q9, r6
- vadd.i32 q6, q6, q8
- vadd.i32 q7, q7, q9
-
- // interleave 32-bit words in state n, n+1
- vzip.32 q0, q1
- vzip.32 q2, q3
- vzip.32 q4, q5
- vzip.32 q6, q7
-
- // interleave 64-bit words in state n, n+2
+ bne .Ldoubleround4
+
+ // x0..7[0-3] are in q0-q7, x10..15[0-3] are in q10-q15.
+ // x8..9[0-3] are on the stack.
+
+ // Re-interleave the words in the first two rows of each block (x0..7).
+ // Also add the counter values 0-3 to x12[0-3].
+ vld1.32 {q8}, [r5, :128] // load counter values 0-3
+ vzip.32 q0, q1 // => (0 1 0 1) (0 1 0 1)
+ vzip.32 q2, q3 // => (2 3 2 3) (2 3 2 3)
+ vzip.32 q4, q5 // => (4 5 4 5) (4 5 4 5)
+ vzip.32 q6, q7 // => (6 7 6 7) (6 7 6 7)
+ vadd.u32 q12, q8 // x12 += counter values 0-3
vswp d1, d4
vswp d3, d6
+ vld1.32 {q8-q9}, [r0]! // load s0..7
vswp d9, d12
vswp d11, d14
- // xor with corresponding input, write to output
+ // Swap q1 and q4 so that we'll free up consecutive registers (q0-q1)
+ // after XORing the first 32 bytes.
+ vswp q1, q4
+
+ // First two rows of each block are (q0 q1) (q2 q6) (q4 q5) (q3 q7)
+
+ // x0..3[0-3] += s0..3[0-3] (add orig state to 1st row of each block)
+ vadd.u32 q0, q0, q8
+ vadd.u32 q2, q2, q8
+ vadd.u32 q4, q4, q8
+ vadd.u32 q3, q3, q8
+
+ // x4..7[0-3] += s4..7[0-3] (add orig state to 2nd row of each block)
+ vadd.u32 q1, q1, q9
+ vadd.u32 q6, q6, q9
+ vadd.u32 q5, q5, q9
+ vadd.u32 q7, q7, q9
+
+ // XOR first 32 bytes using keystream from first two rows of first block
vld1.8 {q8-q9}, [r2]!
veor q8, q8, q0
- veor q9, q9, q4
+ veor q9, q9, q1
vst1.8 {q8-q9}, [r1]!
+ // Re-interleave the words in the last two rows of each block (x8..15).
vld1.32 {q8-q9}, [sp, :256]
-
- // x8[0-3] += s2[0]
- // x9[0-3] += s2[1]
- // x10[0-3] += s2[2]
- // x11[0-3] += s2[3]
- ldmia r0!, {r3-r6}
- vdup.32 q0, r3
- vdup.32 q4, r4
- vadd.i32 q8, q8, q0
- vadd.i32 q9, q9, q4
- vdup.32 q0, r5
- vdup.32 q4, r6
- vadd.i32 q10, q10, q0
- vadd.i32 q11, q11, q4
-
- // x12[0-3] += s3[0]
- // x13[0-3] += s3[1]
- // x14[0-3] += s3[2]
- // x15[0-3] += s3[3]
- ldmia r0!, {r3-r6}
- vdup.32 q0, r3
- vdup.32 q4, r4
- adr r3, CTRINC
- vadd.i32 q12, q12, q0
- vld1.32 {q0}, [r3, :128]
- vadd.i32 q13, q13, q4
- vadd.i32 q12, q12, q0 // x12 += counter values 0-3
-
- vdup.32 q0, r5
- vdup.32 q4, r6
- vadd.i32 q14, q14, q0
- vadd.i32 q15, q15, q4
-
- // interleave 32-bit words in state n, n+1
- vzip.32 q8, q9
- vzip.32 q10, q11
- vzip.32 q12, q13
- vzip.32 q14, q15
-
- // interleave 64-bit words in state n, n+2
- vswp d17, d20
- vswp d19, d22
+ vzip.32 q12, q13 // => (12 13 12 13) (12 13 12 13)
+ vzip.32 q14, q15 // => (14 15 14 15) (14 15 14 15)
+ vzip.32 q8, q9 // => (8 9 8 9) (8 9 8 9)
+ vzip.32 q10, q11 // => (10 11 10 11) (10 11 10 11)
+ vld1.32 {q0-q1}, [r0] // load s8..15
vswp d25, d28
vswp d27, d30
+ vswp d17, d20
+ vswp d19, d22
+
+ // Last two rows of each block are (q8 q12) (q10 q14) (q9 q13) (q11 q15)
+
+ // x8..11[0-3] += s8..11[0-3] (add orig state to 3rd row of each block)
+ vadd.u32 q8, q8, q0
+ vadd.u32 q10, q10, q0
+ vadd.u32 q9, q9, q0
+ vadd.u32 q11, q11, q0
+
+ // x12..15[0-3] += s12..15[0-3] (add orig state to 4th row of each block)
+ vadd.u32 q12, q12, q1
+ vadd.u32 q14, q14, q1
+ vadd.u32 q13, q13, q1
+ vadd.u32 q15, q15, q1
- vmov q4, q1
+ // XOR the rest of the data with the keystream
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q8
@@ -509,13 +520,11 @@ ENTRY(chacha20_4block_xor_neon)
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]
+ mov sp, r4 // restore original stack pointer
veor q0, q0, q11
veor q1, q1, q15
vst1.8 {q0-q1}, [r1]
- mov sp, ip
- pop {r4-r6, pc}
+ pop {r4-r5}
+ bx lr
ENDPROC(chacha20_4block_xor_neon)
-
- .align 4
-CTRINC: .word 0, 1, 2, 3
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
index 96e62ec105d0..cd9e93b46c2d 100644
--- a/arch/arm/crypto/crc32-ce-glue.c
+++ b/arch/arm/crypto/crc32-ce-glue.c
@@ -236,7 +236,7 @@ static void __exit crc32_pmull_mod_exit(void)
ARRAY_SIZE(crc32_pmull_algs));
}
-static const struct cpu_feature crc32_cpu_feature[] = {
+static const struct cpu_feature __maybe_unused crc32_cpu_feature[] = {
{ cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { }
};
MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature);
diff --git a/arch/arm/crypto/ghash-ce-core.S b/arch/arm/crypto/ghash-ce-core.S
index 2f78c10b1881..406009afa9cf 100644
--- a/arch/arm/crypto/ghash-ce-core.S
+++ b/arch/arm/crypto/ghash-ce-core.S
@@ -63,6 +63,33 @@
k48 .req d31
SHASH2_p64 .req d31
+ HH .req q10
+ HH3 .req q11
+ HH4 .req q12
+ HH34 .req q13
+
+ HH_L .req d20
+ HH_H .req d21
+ HH3_L .req d22
+ HH3_H .req d23
+ HH4_L .req d24
+ HH4_H .req d25
+ HH34_L .req d26
+ HH34_H .req d27
+ SHASH2_H .req d29
+
+ XL2 .req q5
+ XM2 .req q6
+ XH2 .req q7
+ T3 .req q8
+
+ XL2_L .req d10
+ XL2_H .req d11
+ XM2_L .req d12
+ XM2_H .req d13
+ T3_L .req d16
+ T3_H .req d17
+
.text
.fpu crypto-neon-fp-armv8
@@ -175,12 +202,77 @@
beq 0f
vld1.64 {T1}, [ip]
teq r0, #0
- b 1f
+ b 3f
+
+0: .ifc \pn, p64
+ tst r0, #3 // skip until #blocks is a
+ bne 2f // round multiple of 4
+
+ vld1.8 {XL2-XM2}, [r2]!
+1: vld1.8 {T3-T2}, [r2]!
+ vrev64.8 XL2, XL2
+ vrev64.8 XM2, XM2
+
+ subs r0, r0, #4
+
+ vext.8 T1, XL2, XL2, #8
+ veor XL2_H, XL2_H, XL_L
+ veor XL, XL, T1
+
+ vrev64.8 T3, T3
+ vrev64.8 T1, T2
+
+ vmull.p64 XH, HH4_H, XL_H // a1 * b1
+ veor XL2_H, XL2_H, XL_H
+ vmull.p64 XL, HH4_L, XL_L // a0 * b0
+ vmull.p64 XM, HH34_H, XL2_H // (a1 + a0)(b1 + b0)
+
+ vmull.p64 XH2, HH3_H, XM2_L // a1 * b1
+ veor XM2_L, XM2_L, XM2_H
+ vmull.p64 XL2, HH3_L, XM2_H // a0 * b0
+ vmull.p64 XM2, HH34_L, XM2_L // (a1 + a0)(b1 + b0)
+
+ veor XH, XH, XH2
+ veor XL, XL, XL2
+ veor XM, XM, XM2
+
+ vmull.p64 XH2, HH_H, T3_L // a1 * b1
+ veor T3_L, T3_L, T3_H
+ vmull.p64 XL2, HH_L, T3_H // a0 * b0
+ vmull.p64 XM2, SHASH2_H, T3_L // (a1 + a0)(b1 + b0)
+
+ veor XH, XH, XH2
+ veor XL, XL, XL2
+ veor XM, XM, XM2
+
+ vmull.p64 XH2, SHASH_H, T1_L // a1 * b1
+ veor T1_L, T1_L, T1_H
+ vmull.p64 XL2, SHASH_L, T1_H // a0 * b0
+ vmull.p64 XM2, SHASH2_p64, T1_L // (a1 + a0)(b1 + b0)
+
+ veor XH, XH, XH2
+ veor XL, XL, XL2
+ veor XM, XM, XM2
-0: vld1.64 {T1}, [r2]!
+ beq 4f
+
+ vld1.8 {XL2-XM2}, [r2]!
+
+ veor T1, XL, XH
+ veor XM, XM, T1
+
+ __pmull_reduce_p64
+
+ veor T1, T1, XH
+ veor XL, XL, T1
+
+ b 1b
+ .endif
+
+2: vld1.64 {T1}, [r2]!
subs r0, r0, #1
-1: /* multiply XL by SHASH in GF(2^128) */
+3: /* multiply XL by SHASH in GF(2^128) */
#ifndef CONFIG_CPU_BIG_ENDIAN
vrev64.8 T1, T1
#endif
@@ -193,7 +285,7 @@
__pmull_\pn XL, XL_L, SHASH_L, s1l, s2l, s3l, s4l @ a0 * b0
__pmull_\pn XM, T1_L, SHASH2_\pn @ (a1+a0)(b1+b0)
- veor T1, XL, XH
+4: veor T1, XL, XH
veor XM, XM, T1
__pmull_reduce_\pn
@@ -212,8 +304,14 @@
* struct ghash_key const *k, const char *head)
*/
ENTRY(pmull_ghash_update_p64)
- vld1.64 {SHASH}, [r3]
+ vld1.64 {SHASH}, [r3]!
+ vld1.64 {HH}, [r3]!
+ vld1.64 {HH3-HH4}, [r3]
+
veor SHASH2_p64, SHASH_L, SHASH_H
+ veor SHASH2_H, HH_L, HH_H
+ veor HH34_L, HH3_L, HH3_H
+ veor HH34_H, HH4_L, HH4_H
vmov.i8 MASK, #0xe1
vshl.u64 MASK, MASK, #57
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index 8930fc4e7c22..b7d30b6cf49c 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -1,7 +1,7 @@
/*
* Accelerated GHASH implementation with ARMv8 vmull.p64 instructions.
*
- * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2015 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -28,8 +28,10 @@ MODULE_ALIAS_CRYPTO("ghash");
#define GHASH_DIGEST_SIZE 16
struct ghash_key {
- u64 a;
- u64 b;
+ u64 h[2];
+ u64 h2[2];
+ u64 h3[2];
+ u64 h4[2];
};
struct ghash_desc_ctx {
@@ -117,26 +119,40 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
return 0;
}
+static void ghash_reflect(u64 h[], const be128 *k)
+{
+ u64 carry = be64_to_cpu(k->a) >> 63;
+
+ h[0] = (be64_to_cpu(k->b) << 1) | carry;
+ h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63);
+
+ if (carry)
+ h[1] ^= 0xc200000000000000UL;
+}
+
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *inkey, unsigned int keylen)
{
struct ghash_key *key = crypto_shash_ctx(tfm);
- u64 a, b;
+ be128 h, k;
if (keylen != GHASH_BLOCK_SIZE) {
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- /* perform multiplication by 'x' in GF(2^128) */
- b = get_unaligned_be64(inkey);
- a = get_unaligned_be64(inkey + 8);
+ memcpy(&k, inkey, GHASH_BLOCK_SIZE);
+ ghash_reflect(key->h, &k);
+
+ h = k;
+ gf128mul_lle(&h, &k);
+ ghash_reflect(key->h2, &h);
- key->a = (a << 1) | (b >> 63);
- key->b = (b << 1) | (a >> 63);
+ gf128mul_lle(&h, &k);
+ ghash_reflect(key->h3, &h);
- if (b >> 63)
- key->b ^= 0xc200000000000000UL;
+ gf128mul_lle(&h, &k);
+ ghash_reflect(key->h4, &h);
return 0;
}
diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S
deleted file mode 100644
index 57caa742016e..000000000000
--- a/arch/arm/crypto/speck-neon-core.S
+++ /dev/null
@@ -1,434 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Author: Eric Biggers <ebiggers@google.com>
- */
-
-#include <linux/linkage.h>
-
- .text
- .fpu neon
-
- // arguments
- ROUND_KEYS .req r0 // const {u64,u32} *round_keys
- NROUNDS .req r1 // int nrounds
- DST .req r2 // void *dst
- SRC .req r3 // const void *src
- NBYTES .req r4 // unsigned int nbytes
- TWEAK .req r5 // void *tweak
-
- // registers which hold the data being encrypted/decrypted
- X0 .req q0
- X0_L .req d0
- X0_H .req d1
- Y0 .req q1
- Y0_H .req d3
- X1 .req q2
- X1_L .req d4
- X1_H .req d5
- Y1 .req q3
- Y1_H .req d7
- X2 .req q4
- X2_L .req d8
- X2_H .req d9
- Y2 .req q5
- Y2_H .req d11
- X3 .req q6
- X3_L .req d12
- X3_H .req d13
- Y3 .req q7
- Y3_H .req d15
-
- // the round key, duplicated in all lanes
- ROUND_KEY .req q8
- ROUND_KEY_L .req d16
- ROUND_KEY_H .req d17
-
- // index vector for vtbl-based 8-bit rotates
- ROTATE_TABLE .req d18
-
- // multiplication table for updating XTS tweaks
- GF128MUL_TABLE .req d19
- GF64MUL_TABLE .req d19
-
- // current XTS tweak value(s)
- TWEAKV .req q10
- TWEAKV_L .req d20
- TWEAKV_H .req d21
-
- TMP0 .req q12
- TMP0_L .req d24
- TMP0_H .req d25
- TMP1 .req q13
- TMP2 .req q14
- TMP3 .req q15
-
- .align 4
-.Lror64_8_table:
- .byte 1, 2, 3, 4, 5, 6, 7, 0
-.Lror32_8_table:
- .byte 1, 2, 3, 0, 5, 6, 7, 4
-.Lrol64_8_table:
- .byte 7, 0, 1, 2, 3, 4, 5, 6
-.Lrol32_8_table:
- .byte 3, 0, 1, 2, 7, 4, 5, 6
-.Lgf128mul_table:
- .byte 0, 0x87
- .fill 14
-.Lgf64mul_table:
- .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b
- .fill 12
-
-/*
- * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
- *
- * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
- * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
- * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
- *
- * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because
- * the vtbl approach is faster on some processors and the same speed on others.
- */
-.macro _speck_round_128bytes n
-
- // x = ror(x, 8)
- vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
- vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
- vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
- vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
- vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
- vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
- vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
- vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
-
- // x += y
- vadd.u\n X0, Y0
- vadd.u\n X1, Y1
- vadd.u\n X2, Y2
- vadd.u\n X3, Y3
-
- // x ^= k
- veor X0, ROUND_KEY
- veor X1, ROUND_KEY
- veor X2, ROUND_KEY
- veor X3, ROUND_KEY
-
- // y = rol(y, 3)
- vshl.u\n TMP0, Y0, #3
- vshl.u\n TMP1, Y1, #3
- vshl.u\n TMP2, Y2, #3
- vshl.u\n TMP3, Y3, #3
- vsri.u\n TMP0, Y0, #(\n - 3)
- vsri.u\n TMP1, Y1, #(\n - 3)
- vsri.u\n TMP2, Y2, #(\n - 3)
- vsri.u\n TMP3, Y3, #(\n - 3)
-
- // y ^= x
- veor Y0, TMP0, X0
- veor Y1, TMP1, X1
- veor Y2, TMP2, X2
- veor Y3, TMP3, X3
-.endm
-
-/*
- * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
- *
- * This is the inverse of _speck_round_128bytes().
- */
-.macro _speck_unround_128bytes n
-
- // y ^= x
- veor TMP0, Y0, X0
- veor TMP1, Y1, X1
- veor TMP2, Y2, X2
- veor TMP3, Y3, X3
-
- // y = ror(y, 3)
- vshr.u\n Y0, TMP0, #3
- vshr.u\n Y1, TMP1, #3
- vshr.u\n Y2, TMP2, #3
- vshr.u\n Y3, TMP3, #3
- vsli.u\n Y0, TMP0, #(\n - 3)
- vsli.u\n Y1, TMP1, #(\n - 3)
- vsli.u\n Y2, TMP2, #(\n - 3)
- vsli.u\n Y3, TMP3, #(\n - 3)
-
- // x ^= k
- veor X0, ROUND_KEY
- veor X1, ROUND_KEY
- veor X2, ROUND_KEY
- veor X3, ROUND_KEY
-
- // x -= y
- vsub.u\n X0, Y0
- vsub.u\n X1, Y1
- vsub.u\n X2, Y2
- vsub.u\n X3, Y3
-
- // x = rol(x, 8);
- vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
- vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
- vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
- vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
- vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
- vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
- vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
- vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
-.endm
-
-.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp
-
- // Load the next source block
- vld1.8 {\dst_reg}, [SRC]!
-
- // Save the current tweak in the tweak buffer
- vst1.8 {TWEAKV}, [\tweak_buf:128]!
-
- // XOR the next source block with the current tweak
- veor \dst_reg, TWEAKV
-
- /*
- * Calculate the next tweak by multiplying the current one by x,
- * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
- */
- vshr.u64 \tmp, TWEAKV, #63
- vshl.u64 TWEAKV, #1
- veor TWEAKV_H, \tmp\()_L
- vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H
- veor TWEAKV_L, \tmp\()_H
-.endm
-
-.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp
-
- // Load the next two source blocks
- vld1.8 {\dst_reg}, [SRC]!
-
- // Save the current two tweaks in the tweak buffer
- vst1.8 {TWEAKV}, [\tweak_buf:128]!
-
- // XOR the next two source blocks with the current two tweaks
- veor \dst_reg, TWEAKV
-
- /*
- * Calculate the next two tweaks by multiplying the current ones by x^2,
- * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
- */
- vshr.u64 \tmp, TWEAKV, #62
- vshl.u64 TWEAKV, #2
- vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L
- vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H
- veor TWEAKV, \tmp
-.endm
-
-/*
- * _speck_xts_crypt() - Speck-XTS encryption/decryption
- *
- * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
- * using Speck-XTS, specifically the variant with a block size of '2n' and round
- * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
- * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
- * nonzero multiple of 128.
- */
-.macro _speck_xts_crypt n, decrypting
- push {r4-r7}
- mov r7, sp
-
- /*
- * The first four parameters were passed in registers r0-r3. Load the
- * additional parameters, which were passed on the stack.
- */
- ldr NBYTES, [sp, #16]
- ldr TWEAK, [sp, #20]
-
- /*
- * If decrypting, modify the ROUND_KEYS parameter to point to the last
- * round key rather than the first, since for decryption the round keys
- * are used in reverse order.
- */
-.if \decrypting
-.if \n == 64
- add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3
- sub ROUND_KEYS, #8
-.else
- add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2
- sub ROUND_KEYS, #4
-.endif
-.endif
-
- // Load the index vector for vtbl-based 8-bit rotates
-.if \decrypting
- ldr r12, =.Lrol\n\()_8_table
-.else
- ldr r12, =.Lror\n\()_8_table
-.endif
- vld1.8 {ROTATE_TABLE}, [r12:64]
-
- // One-time XTS preparation
-
- /*
- * Allocate stack space to store 128 bytes worth of tweaks. For
- * performance, this space is aligned to a 16-byte boundary so that we
- * can use the load/store instructions that declare 16-byte alignment.
- * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'.
- */
- sub r12, sp, #128
- bic r12, #0xf
- mov sp, r12
-
-.if \n == 64
- // Load first tweak
- vld1.8 {TWEAKV}, [TWEAK]
-
- // Load GF(2^128) multiplication table
- ldr r12, =.Lgf128mul_table
- vld1.8 {GF128MUL_TABLE}, [r12:64]
-.else
- // Load first tweak
- vld1.8 {TWEAKV_L}, [TWEAK]
-
- // Load GF(2^64) multiplication table
- ldr r12, =.Lgf64mul_table
- vld1.8 {GF64MUL_TABLE}, [r12:64]
-
- // Calculate second tweak, packing it together with the first
- vshr.u64 TMP0_L, TWEAKV_L, #63
- vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L
- vshl.u64 TWEAKV_H, TWEAKV_L, #1
- veor TWEAKV_H, TMP0_L
-.endif
-
-.Lnext_128bytes_\@:
-
- /*
- * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak
- * values, and save the tweaks on the stack for later. Then
- * de-interleave the 'x' and 'y' elements of each block, i.e. make it so
- * that the X[0-3] registers contain only the second halves of blocks,
- * and the Y[0-3] registers contain only the first halves of blocks.
- * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
- */
- mov r12, sp
-.if \n == 64
- _xts128_precrypt_one X0, r12, TMP0
- _xts128_precrypt_one Y0, r12, TMP0
- _xts128_precrypt_one X1, r12, TMP0
- _xts128_precrypt_one Y1, r12, TMP0
- _xts128_precrypt_one X2, r12, TMP0
- _xts128_precrypt_one Y2, r12, TMP0
- _xts128_precrypt_one X3, r12, TMP0
- _xts128_precrypt_one Y3, r12, TMP0
- vswp X0_L, Y0_H
- vswp X1_L, Y1_H
- vswp X2_L, Y2_H
- vswp X3_L, Y3_H
-.else
- _xts64_precrypt_two X0, r12, TMP0
- _xts64_precrypt_two Y0, r12, TMP0
- _xts64_precrypt_two X1, r12, TMP0
- _xts64_precrypt_two Y1, r12, TMP0
- _xts64_precrypt_two X2, r12, TMP0
- _xts64_precrypt_two Y2, r12, TMP0
- _xts64_precrypt_two X3, r12, TMP0
- _xts64_precrypt_two Y3, r12, TMP0
- vuzp.32 Y0, X0
- vuzp.32 Y1, X1
- vuzp.32 Y2, X2
- vuzp.32 Y3, X3
-.endif
-
- // Do the cipher rounds
-
- mov r12, ROUND_KEYS
- mov r6, NROUNDS
-
-.Lnext_round_\@:
-.if \decrypting
-.if \n == 64
- vld1.64 ROUND_KEY_L, [r12]
- sub r12, #8
- vmov ROUND_KEY_H, ROUND_KEY_L
-.else
- vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]
- sub r12, #4
-.endif
- _speck_unround_128bytes \n
-.else
-.if \n == 64
- vld1.64 ROUND_KEY_L, [r12]!
- vmov ROUND_KEY_H, ROUND_KEY_L
-.else
- vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]!
-.endif
- _speck_round_128bytes \n
-.endif
- subs r6, r6, #1
- bne .Lnext_round_\@
-
- // Re-interleave the 'x' and 'y' elements of each block
-.if \n == 64
- vswp X0_L, Y0_H
- vswp X1_L, Y1_H
- vswp X2_L, Y2_H
- vswp X3_L, Y3_H
-.else
- vzip.32 Y0, X0
- vzip.32 Y1, X1
- vzip.32 Y2, X2
- vzip.32 Y3, X3
-.endif
-
- // XOR the encrypted/decrypted blocks with the tweaks we saved earlier
- mov r12, sp
- vld1.8 {TMP0, TMP1}, [r12:128]!
- vld1.8 {TMP2, TMP3}, [r12:128]!
- veor X0, TMP0
- veor Y0, TMP1
- veor X1, TMP2
- veor Y1, TMP3
- vld1.8 {TMP0, TMP1}, [r12:128]!
- vld1.8 {TMP2, TMP3}, [r12:128]!
- veor X2, TMP0
- veor Y2, TMP1
- veor X3, TMP2
- veor Y3, TMP3
-
- // Store the ciphertext in the destination buffer
- vst1.8 {X0, Y0}, [DST]!
- vst1.8 {X1, Y1}, [DST]!
- vst1.8 {X2, Y2}, [DST]!
- vst1.8 {X3, Y3}, [DST]!
-
- // Continue if there are more 128-byte chunks remaining, else return
- subs NBYTES, #128
- bne .Lnext_128bytes_\@
-
- // Store the next tweak
-.if \n == 64
- vst1.8 {TWEAKV}, [TWEAK]
-.else
- vst1.8 {TWEAKV_L}, [TWEAK]
-.endif
-
- mov sp, r7
- pop {r4-r7}
- bx lr
-.endm
-
-ENTRY(speck128_xts_encrypt_neon)
- _speck_xts_crypt n=64, decrypting=0
-ENDPROC(speck128_xts_encrypt_neon)
-
-ENTRY(speck128_xts_decrypt_neon)
- _speck_xts_crypt n=64, decrypting=1
-ENDPROC(speck128_xts_decrypt_neon)
-
-ENTRY(speck64_xts_encrypt_neon)
- _speck_xts_crypt n=32, decrypting=0
-ENDPROC(speck64_xts_encrypt_neon)
-
-ENTRY(speck64_xts_decrypt_neon)
- _speck_xts_crypt n=32, decrypting=1
-ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c
deleted file mode 100644
index f012c3ea998f..000000000000
--- a/arch/arm/crypto/speck-neon-glue.c
+++ /dev/null
@@ -1,288 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Note: the NIST recommendation for XTS only specifies a 128-bit block size,
- * but a 64-bit version (needed for Speck64) is fairly straightforward; the math
- * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial
- * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004:
- * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes
- * OCB and PMAC"), represented as 0x1B.
- */
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <crypto/algapi.h>
-#include <crypto/gf128mul.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/speck.h>
-#include <crypto/xts.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-/* The assembly functions only handle multiples of 128 bytes */
-#define SPECK_NEON_CHUNK_SIZE 128
-
-/* Speck128 */
-
-struct speck128_xts_tfm_ctx {
- struct speck128_tfm_ctx main_key;
- struct speck128_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck128_xts_crypt(struct skcipher_request *req,
- speck128_crypt_one_t crypt_one,
- speck128_xts_crypt_many_t crypt_many)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- le128 tweak;
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
-
- crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- le128_xor((le128 *)dst, (const le128 *)src, &tweak);
- (*crypt_one)(&ctx->main_key, dst, dst);
- le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
- gf128mul_x_ble(&tweak, &tweak);
-
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static int speck128_xts_encrypt(struct skcipher_request *req)
-{
- return __speck128_xts_crypt(req, crypto_speck128_encrypt,
- speck128_xts_encrypt_neon);
-}
-
-static int speck128_xts_decrypt(struct skcipher_request *req)
-{
- return __speck128_xts_crypt(req, crypto_speck128_decrypt,
- speck128_xts_decrypt_neon);
-}
-
-static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- keylen /= 2;
-
- err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-/* Speck64 */
-
-struct speck64_xts_tfm_ctx {
- struct speck64_tfm_ctx main_key;
- struct speck64_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
- speck64_xts_crypt_many_t crypt_many)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- __le64 tweak;
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
-
- crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- *(__le64 *)dst = *(__le64 *)src ^ tweak;
- (*crypt_one)(&ctx->main_key, dst, dst);
- *(__le64 *)dst ^= tweak;
- tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
- ((tweak & cpu_to_le64(1ULL << 63)) ?
- 0x1B : 0));
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static int speck64_xts_encrypt(struct skcipher_request *req)
-{
- return __speck64_xts_crypt(req, crypto_speck64_encrypt,
- speck64_xts_encrypt_neon);
-}
-
-static int speck64_xts_decrypt(struct skcipher_request *req)
-{
- return __speck64_xts_crypt(req, crypto_speck64_decrypt,
- speck64_xts_decrypt_neon);
-}
-
-static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- keylen /= 2;
-
- err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-static struct skcipher_alg speck_algs[] = {
- {
- .base.cra_name = "xts(speck128)",
- .base.cra_driver_name = "xts-speck128-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = SPECK128_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
- .base.cra_alignmask = 7,
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * SPECK128_128_KEY_SIZE,
- .max_keysize = 2 * SPECK128_256_KEY_SIZE,
- .ivsize = SPECK128_BLOCK_SIZE,
- .walksize = SPECK_NEON_CHUNK_SIZE,
- .setkey = speck128_xts_setkey,
- .encrypt = speck128_xts_encrypt,
- .decrypt = speck128_xts_decrypt,
- }, {
- .base.cra_name = "xts(speck64)",
- .base.cra_driver_name = "xts-speck64-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = SPECK64_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
- .base.cra_alignmask = 7,
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * SPECK64_96_KEY_SIZE,
- .max_keysize = 2 * SPECK64_128_KEY_SIZE,
- .ivsize = SPECK64_BLOCK_SIZE,
- .walksize = SPECK_NEON_CHUNK_SIZE,
- .setkey = speck64_xts_setkey,
- .encrypt = speck64_xts_encrypt,
- .decrypt = speck64_xts_decrypt,
- }
-};
-
-static int __init speck_neon_module_init(void)
-{
- if (!(elf_hwcap & HWCAP_NEON))
- return -ENODEV;
- return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-static void __exit speck_neon_module_exit(void)
-{
- crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-module_init(speck_neon_module_init);
-module_exit(speck_neon_module_exit);
-
-MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
-MODULE_ALIAS_CRYPTO("xts(speck128)");
-MODULE_ALIAS_CRYPTO("xts-speck128-neon");
-MODULE_ALIAS_CRYPTO("xts(speck64)");
-MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index b17ee03d280b..88286dd483ff 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -467,6 +467,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif
.endm
+ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
+#ifdef CONFIG_CPU_SPECTRE
+ sub \tmp, \limit, #1
+ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
+ addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
+ subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
+ movlo \addr, #0 @ if (tmp < 0) addr = NULL
+ csdb
+#endif
+ .endm
+
.macro uaccess_disable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 237aa52d8733..36c951dd23b8 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -62,8 +62,8 @@ do { \
struct pt_regs;
void die(const char *msg, struct pt_regs *regs, int err);
-struct siginfo;
-void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
+void arm_notify_die(const char *str, struct pt_regs *regs,
+ int signo, int si_code, void __user *addr,
unsigned long err, unsigned long trap);
#ifdef CONFIG_ARM_LPAE
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 8436f6ade57d..965b7c846ecb 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -100,8 +100,10 @@ static inline unsigned long dma_max_pfn(struct device *dev)
extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent);
+#ifdef CONFIG_MMU
#define arch_teardown_dma_ops arch_teardown_dma_ops
extern void arch_teardown_dma_ops(struct device *dev);
+#endif
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 9e842ff41768..18b0197f2384 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -16,9 +16,6 @@ extern void __gnu_mcount_nc(void);
#ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace {
-#ifdef CONFIG_OLD_MCOUNT
- bool old_mcount;
-#endif
};
static inline unsigned long ftrace_call_adjust(unsigned long addr)
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 2cfbc531f63b..6b51826ab3d1 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -28,7 +28,6 @@
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
-#include <xen/xen.h>
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -459,20 +458,6 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
#include <asm-generic/io.h>
-/*
- * can the hardware map this into one segment or not, given no other
- * constraints.
- */
-#define BIOVEC_MERGEABLE(vec1, vec2) \
- ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-
-struct bio_vec;
-extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2);
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
- (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
-
#ifdef CONFIG_MMU
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 3ab8b3781bfe..b95f8d0d9f17 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -133,8 +133,7 @@
* space.
*/
#define KVM_PHYS_SHIFT (40)
-#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT)
-#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL))
+
#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
/* Virtualization Translation Control Register (VTCR) bits */
@@ -161,6 +160,7 @@
#else
#define VTTBR_X (5 - KVM_T0SZ)
#endif
+#define VTTBR_CNP_BIT _AC(1, UL)
#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
#define VTTBR_VMID_SHIFT _AC(48, ULL)
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 3ad482d2f1eb..5ca5d9af0c26 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -273,7 +273,7 @@ static inline void __cpu_init_stage2(void)
kvm_call_hyp(__init_stage2_translation);
}
-static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
+static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
return 0;
}
@@ -354,4 +354,15 @@ static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
struct kvm *kvm_arch_alloc_vm(void);
void kvm_arch_free_vm(struct kvm *kvm);
+static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
+{
+ /*
+ * On 32bit ARM, VMs get a static 40bit IPA stage2 setup,
+ * so any non-zero value used as type is illegal.
+ */
+ if (type)
+ return -EINVAL;
+ return 0;
+}
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 265ea9cf7df7..1098ffc3d54b 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -35,16 +35,12 @@
addr; \
})
-/*
- * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
- */
-#define KVM_MMU_CACHE_MIN_PAGES 2
-
#ifndef __ASSEMBLY__
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/kvm_arm.h>
#include <asm/kvm_hyp.h>
#include <asm/pgalloc.h>
#include <asm/stage2_pgtable.h>
@@ -52,6 +48,13 @@
/* Ensure compatibility with arm64 */
#define VA_BITS 32
+#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT
+#define kvm_phys_size(kvm) (1ULL << kvm_phys_shift(kvm))
+#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - 1ULL)
+#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK
+
+#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t))
+
int create_hyp_mappings(void *from, void *to, pgprot_t prot);
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **kaddr,
@@ -355,6 +358,13 @@ static inline int hyp_map_aux_data(void)
#define kvm_phys_to_vttbr(addr) (addr)
+static inline void kvm_set_ipa_limit(void) {}
+
+static inline bool kvm_cpu_has_cnp(void)
+{
+ return false;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/paravirt.h b/arch/arm/include/asm/paravirt.h
index d51e5cd31d01..cdbf02d9c1d4 100644
--- a/arch/arm/include/asm/paravirt.h
+++ b/arch/arm/include/asm/paravirt.h
@@ -10,11 +10,16 @@ extern struct static_key paravirt_steal_rq_enabled;
struct pv_time_ops {
unsigned long long (*steal_clock)(int cpu);
};
-extern struct pv_time_ops pv_time_ops;
+
+struct paravirt_patch_template {
+ struct pv_time_ops time;
+};
+
+extern struct paravirt_patch_template pv_ops;
static inline u64 paravirt_steal_clock(int cpu)
{
- return pv_time_ops.steal_clock(cpu);
+ return pv_ops.time.steal_clock(cpu);
}
#endif
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index 460d616bb2d6..f6a7ea805232 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -19,43 +19,53 @@
#ifndef __ARM_S2_PGTABLE_H_
#define __ARM_S2_PGTABLE_H_
-#define stage2_pgd_none(pgd) pgd_none(pgd)
-#define stage2_pgd_clear(pgd) pgd_clear(pgd)
-#define stage2_pgd_present(pgd) pgd_present(pgd)
-#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud)
-#define stage2_pud_offset(pgd, address) pud_offset(pgd, address)
-#define stage2_pud_free(pud) pud_free(NULL, pud)
-
-#define stage2_pud_none(pud) pud_none(pud)
-#define stage2_pud_clear(pud) pud_clear(pud)
-#define stage2_pud_present(pud) pud_present(pud)
-#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd)
-#define stage2_pmd_offset(pud, address) pmd_offset(pud, address)
-#define stage2_pmd_free(pmd) pmd_free(NULL, pmd)
-
-#define stage2_pud_huge(pud) pud_huge(pud)
+/*
+ * kvm_mmu_cache_min_pages() is the number of pages required
+ * to install a stage-2 translation. We pre-allocate the entry
+ * level table at VM creation. Since we have a 3 level page-table,
+ * we need only two pages to add a new mapping.
+ */
+#define kvm_mmu_cache_min_pages(kvm) 2
+
+#define stage2_pgd_none(kvm, pgd) pgd_none(pgd)
+#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd)
+#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
+#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud)
+#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address)
+#define stage2_pud_free(kvm, pud) pud_free(NULL, pud)
+
+#define stage2_pud_none(kvm, pud) pud_none(pud)
+#define stage2_pud_clear(kvm, pud) pud_clear(pud)
+#define stage2_pud_present(kvm, pud) pud_present(pud)
+#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd)
+#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address)
+#define stage2_pmd_free(kvm, pmd) pmd_free(NULL, pmd)
+
+#define stage2_pud_huge(kvm, pud) pud_huge(pud)
/* Open coded p*d_addr_end that can deal with 64bit addresses */
-static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end)
+static inline phys_addr_t
+stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK;
return (boundary - 1 < end - 1) ? boundary : end;
}
-#define stage2_pud_addr_end(addr, end) (end)
+#define stage2_pud_addr_end(kvm, addr, end) (end)
-static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end)
+static inline phys_addr_t
+stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK;
return (boundary - 1 < end - 1) ? boundary : end;
}
-#define stage2_pgd_index(addr) pgd_index(addr)
+#define stage2_pgd_index(kvm, addr) pgd_index(addr)
-#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep)
-#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
-#define stage2_pud_table_empty(pudp) false
+#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
+#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
+#define stage2_pud_table_empty(kvm, pudp) false
#endif /* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 9b37b6ab27fe..8f55dc520a3e 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -121,8 +121,8 @@ extern void vfp_flush_hwstate(struct thread_info *);
struct user_vfp;
struct user_vfp_exc;
-extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
- struct user_vfp_exc __user *);
+extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
+ struct user_vfp_exc *);
extern int vfp_restore_user_hwstate(struct user_vfp *,
struct user_vfp_exc *);
#endif
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 5d88d2f22b2c..2a786f54d8b8 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -33,6 +33,9 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
/* Replace task scheduler's default cpu-invariant accounting */
#define arch_scale_cpu_capacity topology_get_cpu_scale
+/* Enable topology flag updates */
+#define arch_update_cpu_topology topology_update_cpu_topology
+
#else
static inline void init_cpu_topology(void) { }
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 5451e1f05a19..c136eef8f690 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -69,6 +69,14 @@ extern int __put_user_bad(void);
static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
+
+ /*
+ * Prevent a mispredicted conditional call to set_fs from forwarding
+ * the wrong address limit to access_ok under speculation.
+ */
+ dsb(nsh);
+ isb();
+
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
}
@@ -92,6 +100,32 @@ static inline void set_fs(mm_segment_t fs)
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
/*
+ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
+ * is above the current addr_limit.
+ */
+#define uaccess_mask_range_ptr(ptr, size) \
+ ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
+static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
+ size_t size)
+{
+ void __user *safe_ptr = (void __user *)ptr;
+ unsigned long tmp;
+
+ asm volatile(
+ " sub %1, %3, #1\n"
+ " subs %1, %1, %0\n"
+ " addhs %1, %1, #1\n"
+ " subhss %1, %1, %2\n"
+ " movlo %0, #0\n"
+ : "+r" (safe_ptr), "=&r" (tmp)
+ : "r" (size), "r" (current_thread_info()->addr_limit)
+ : "cc");
+
+ csdb();
+ return safe_ptr;
+}
+
+/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
* which read from user space (*get_*) need to take care not to leak
@@ -362,6 +396,14 @@ do { \
__pu_err; \
})
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1.1, all accessors need to include
+ * verification of the address space.
+ */
+#define __put_user(x, ptr) put_user(x, ptr)
+
+#else
#define __put_user(x, ptr) \
({ \
long __pu_err = 0; \
@@ -369,12 +411,6 @@ do { \
__pu_err; \
})
-#define __put_user_error(x, ptr, err) \
-({ \
- __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
- (void) 0; \
-})
-
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
do { \
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
@@ -454,6 +490,7 @@ do { \
: "r" (x), "i" (-EFAULT) \
: "cc")
+#endif /* !CONFIG_CPU_SPECTRE */
#ifdef CONFIG_MMU
extern unsigned long __must_check
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 076090d2dbf5..88ef2ce1f69a 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -16,23 +16,23 @@
#include <uapi/asm/unistd.h>
#include <asm/unistd-nr.h>
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_OLD_MMAP
#define __ARCH_WANT_SYS_OLD_SELECT
+#define __ARCH_WANT_SYS_UTIME
#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
#define __ARCH_WANT_SYS_TIME
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_SOCKETCALL
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 783fbb4de5f9..8fa2dc21d332 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -167,9 +167,6 @@ EXPORT_SYMBOL(_find_next_bit_be);
#endif
#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_OLD_MCOUNT
-EXPORT_SYMBOL(mcount);
-#endif
EXPORT_SYMBOL(__gnu_mcount_nc);
#endif
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 746565a876dc..0465d65d23de 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -296,16 +296,15 @@ __sys_trace:
cmp scno, #-1 @ skip the syscall?
bne 2b
add sp, sp, #S_OFF @ restore stack
- b ret_slow_syscall
-__sys_trace_return:
- str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+__sys_trace_return_nosave:
+ enable_irq_notrace
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
-__sys_trace_return_nosave:
- enable_irq_notrace
+__sys_trace_return:
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index efcd9f25a14b..0be69e551a64 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -15,23 +15,8 @@
* start of every function. In mcount, apart from the function's address (in
* lr), we need to get hold of the function's caller's address.
*
- * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
- *
- * bl mcount
- *
- * These versions have the limitation that in order for the mcount routine to
- * be able to determine the function's caller's address, an APCS-style frame
- * pointer (which is set up with something like the code below) is required.
- *
- * mov ip, sp
- * push {fp, ip, lr, pc}
- * sub fp, ip, #4
- *
- * With EABI, these frame pointers are not available unless -mapcs-frame is
- * specified, and if building as Thumb-2, not even then.
- *
- * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
- * with call sites like:
+ * Newer GCCs (4.4+) solve this problem by using a version of mcount with call
+ * sites like:
*
* push {lr}
* bl __gnu_mcount_nc
@@ -46,17 +31,10 @@
* allows it to be clobbered in subroutines and doesn't use it to hold
* parameters.)
*
- * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
- * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
- * arch/arm/kernel/ftrace.c).
+ * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}"
+ * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
*/
-#ifndef CONFIG_OLD_MCOUNT
-#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
-#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
-#endif
-#endif
-
.macro mcount_adjust_addr rd, rn
bic \rd, \rn, #1 @ clear the Thumb bit if present
sub \rd, \rd, #MCOUNT_INSN_SIZE
@@ -209,51 +187,6 @@ ftrace_graph_call\suffix:
mcount_exit
.endm
-#ifdef CONFIG_OLD_MCOUNT
-/*
- * mcount
- */
-
-.macro mcount_enter
- stmdb sp!, {r0-r3, lr}
-.endm
-
-.macro mcount_get_lr reg
- ldr \reg, [fp, #-4]
-.endm
-
-.macro mcount_exit
- ldr lr, [fp, #-4]
- ldmia sp!, {r0-r3, pc}
-.endm
-
-ENTRY(mcount)
-#ifdef CONFIG_DYNAMIC_FTRACE
- stmdb sp!, {lr}
- ldr lr, [fp, #-4]
- ldmia sp!, {pc}
-#else
- __mcount _old
-#endif
-ENDPROC(mcount)
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-ENTRY(ftrace_caller_old)
- __ftrace_caller _old
-ENDPROC(ftrace_caller_old)
-#endif
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller_old)
- __ftrace_graph_caller
-ENDPROC(ftrace_graph_caller_old)
-#endif
-
-.purgem mcount_enter
-.purgem mcount_get_lr
-.purgem mcount_exit
-#endif
-
/*
* __gnu_mcount_nc
*/
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 5617932a83df..0142fcfcc3d3 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -47,30 +47,6 @@ void arch_ftrace_update_code(int command)
stop_machine(__ftrace_modify_code, &command, NULL);
}
-#ifdef CONFIG_OLD_MCOUNT
-#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
-#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
-
-#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
-
-static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
-{
- return rec->arch.old_mcount ? OLD_NOP : NOP;
-}
-
-static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
-{
- if (!rec->arch.old_mcount)
- return addr;
-
- if (addr == MCOUNT_ADDR)
- addr = OLD_MCOUNT_ADDR;
- else if (addr == FTRACE_ADDR)
- addr = OLD_FTRACE_ADDR;
-
- return addr;
-}
-#else
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
{
return NOP;
@@ -80,7 +56,6 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
{
return addr;
}
-#endif
int ftrace_arch_code_modify_prepare(void)
{
@@ -150,15 +125,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
}
#endif
-#ifdef CONFIG_OLD_MCOUNT
- if (!ret) {
- pc = (unsigned long)&ftrace_call_old;
- new = ftrace_call_replace(pc, (unsigned long)func);
-
- ret = ftrace_modify_code(pc, 0, new, false);
- }
-#endif
-
return ret;
}
@@ -203,16 +169,6 @@ int ftrace_make_nop(struct module *mod,
new = ftrace_nop_replace(rec);
ret = ftrace_modify_code(ip, old, new, true);
-#ifdef CONFIG_OLD_MCOUNT
- if (ret == -EINVAL && addr == MCOUNT_ADDR) {
- rec->arch.old_mcount = true;
-
- old = ftrace_call_replace(ip, adjust_address(rec, addr));
- new = ftrace_nop_replace(rec);
- ret = ftrace_modify_code(ip, old, new, true);
- }
-#endif
-
return ret;
}
@@ -290,13 +246,6 @@ static int ftrace_modify_graph_caller(bool enable)
#endif
-#ifdef CONFIG_OLD_MCOUNT
- if (!ret)
- ret = __ftrace_modify_caller(&ftrace_graph_call_old,
- ftrace_graph_caller_old,
- enable);
-#endif
-
return ret;
}
diff --git a/arch/arm/kernel/paravirt.c b/arch/arm/kernel/paravirt.c
index 53f371ed4568..75c158b0353f 100644
--- a/arch/arm/kernel/paravirt.c
+++ b/arch/arm/kernel/paravirt.c
@@ -21,5 +21,5 @@
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
-struct pv_time_ops pv_time_ops;
-EXPORT_SYMBOL_GPL(pv_time_ops);
+struct paravirt_patch_template pv_ops;
+EXPORT_SYMBOL_GPL(pv_ops);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 36718a424358..6fa5b6387556 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -203,15 +203,8 @@ void ptrace_disable(struct task_struct *child)
*/
void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
{
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = TRAP_BRKPT;
- info.si_addr = (void __user *)instruction_pointer(regs);
-
- force_sig_info(SIGTRAP, &info, tsk);
+ force_sig_fault(SIGTRAP, TRAP_BRKPT,
+ (void __user *)instruction_pointer(regs), tsk);
}
static int break_trap(struct pt_regs *regs, unsigned int instr)
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index b8f766cf3a90..b908382b69ff 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -77,8 +77,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
kframe->magic = IWMMXT_MAGIC;
kframe->size = IWMMXT_STORAGE_SIZE;
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
-
- err = __copy_to_user(frame, kframe, sizeof(*frame));
} else {
/*
* For bug-compatibility with older kernels, some space
@@ -86,10 +84,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
* Set the magic and size appropriately so that properly
* written userspace can skip it reliably:
*/
- __put_user_error(DUMMY_MAGIC, &frame->magic, err);
- __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
+ *kframe = (struct iwmmxt_sigframe) {
+ .magic = DUMMY_MAGIC,
+ .size = IWMMXT_STORAGE_SIZE,
+ };
}
+ err = __copy_to_user(frame, kframe, sizeof(*kframe));
+
return err;
}
@@ -135,17 +137,18 @@ static int restore_iwmmxt_context(char __user **auxp)
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
{
- const unsigned long magic = VFP_MAGIC;
- const unsigned long size = VFP_STORAGE_SIZE;
+ struct vfp_sigframe kframe;
int err = 0;
- __put_user_error(magic, &frame->magic, err);
- __put_user_error(size, &frame->size, err);
+ memset(&kframe, 0, sizeof(kframe));
+ kframe.magic = VFP_MAGIC;
+ kframe.size = VFP_STORAGE_SIZE;
+ err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
if (err)
- return -EFAULT;
+ return err;
- return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
+ return __copy_to_user(frame, &kframe, sizeof(kframe));
}
static int restore_vfp_context(char __user **auxp)
@@ -288,30 +291,35 @@ static int
setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
{
struct aux_sigframe __user *aux;
+ struct sigcontext context;
int err = 0;
- __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
- __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
- __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
- __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
- __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
- __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
- __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
- __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
- __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
- __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
- __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
- __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
- __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
- __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
- __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
- __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
- __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
-
- __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
- __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
- __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
- __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
+ context = (struct sigcontext) {
+ .arm_r0 = regs->ARM_r0,
+ .arm_r1 = regs->ARM_r1,
+ .arm_r2 = regs->ARM_r2,
+ .arm_r3 = regs->ARM_r3,
+ .arm_r4 = regs->ARM_r4,
+ .arm_r5 = regs->ARM_r5,
+ .arm_r6 = regs->ARM_r6,
+ .arm_r7 = regs->ARM_r7,
+ .arm_r8 = regs->ARM_r8,
+ .arm_r9 = regs->ARM_r9,
+ .arm_r10 = regs->ARM_r10,
+ .arm_fp = regs->ARM_fp,
+ .arm_ip = regs->ARM_ip,
+ .arm_sp = regs->ARM_sp,
+ .arm_lr = regs->ARM_lr,
+ .arm_pc = regs->ARM_pc,
+ .arm_cpsr = regs->ARM_cpsr,
+
+ .trap_no = current->thread.trap_no,
+ .error_code = current->thread.error_code,
+ .fault_address = current->thread.address,
+ .oldmask = set->sig[0],
+ };
+
+ err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
@@ -328,7 +336,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
if (err == 0)
err |= preserve_vfp_context(&aux->vfp);
#endif
- __put_user_error(0, &aux->end_magic, err);
+ err |= __put_user(0, &aux->end_magic);
return err;
}
@@ -491,7 +499,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
/*
* Set uc.uc_flags to a value which sc.trap_no would never have.
*/
- __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
+ err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
err |= setup_sigframe(frame, regs, set);
if (err == 0)
@@ -511,8 +519,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
- __put_user_error(0, &frame->sig.uc.uc_flags, err);
- __put_user_error(NULL, &frame->sig.uc.uc_link, err);
+ err |= __put_user(0, &frame->sig.uc.uc_flags);
+ err |= __put_user(NULL, &frame->sig.uc.uc_link);
err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
err |= setup_sigframe(&frame->sig, regs, set);
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 80517f293eb9..a188d5e8ab7f 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -98,22 +98,20 @@ static int proc_status_show(struct seq_file *m, void *v)
*/
static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
- siginfo_t info;
+ int si_code;
- clear_siginfo(&info);
down_read(&current->mm->mmap_sem);
if (find_vma(current->mm, addr) == NULL)
- info.si_code = SEGV_MAPERR;
+ si_code = SEGV_MAPERR;
else
- info.si_code = SEGV_ACCERR;
+ si_code = SEGV_ACCERR;
up_read(&current->mm->mmap_sem);
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_addr = (void *) instruction_pointer(regs);
-
pr_debug("SWP{B} emulation: access caused memory abort!\n");
- arm_notify_die("Illegal memory access", regs, &info, 0, 0);
+ arm_notify_die("Illegal memory access", regs,
+ SIGSEGV, si_code,
+ (void __user *)instruction_pointer(regs),
+ 0, 0);
abtcounter++;
}
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index f0dd4b6ebb63..40da0872170f 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -277,6 +277,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
int maxevents, int timeout)
{
struct epoll_event *kbuf;
+ struct oabi_epoll_event e;
mm_segment_t fs;
long ret, err, i;
@@ -295,8 +296,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
set_fs(fs);
err = 0;
for (i = 0; i < ret; i++) {
- __put_user_error(kbuf[i].events, &events->events, err);
- __put_user_error(kbuf[i].data, &events->data, err);
+ e.events = kbuf[i].events;
+ e.data = kbuf[i].data;
+ err = __copy_to_user(events, &e, sizeof(e));
+ if (err)
+ break;
events++;
}
kfree(kbuf);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index badf02ca3693..2d668cff8ef4 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -365,13 +365,14 @@ void die(const char *str, struct pt_regs *regs, int err)
}
void arm_notify_die(const char *str, struct pt_regs *regs,
- struct siginfo *info, unsigned long err, unsigned long trap)
+ int signo, int si_code, void __user *addr,
+ unsigned long err, unsigned long trap)
{
if (user_mode(regs)) {
current->thread.error_code = err;
current->thread.trap_no = trap;
- force_sig_info(info->si_signo, info, current);
+ force_sig_fault(signo, si_code, addr, current);
} else {
die(str, regs, err);
}
@@ -438,10 +439,8 @@ int call_undef_hook(struct pt_regs *regs, unsigned int instr)
asmlinkage void do_undefinstr(struct pt_regs *regs)
{
unsigned int instr;
- siginfo_t info;
void __user *pc;
- clear_siginfo(&info);
pc = (void __user *)instruction_pointer(regs);
if (processor_mode(regs) == SVC_MODE) {
@@ -485,13 +484,8 @@ die_sig:
dump_instr(KERN_INFO, regs);
}
#endif
-
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPC;
- info.si_addr = pc;
-
- arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
+ arm_notify_die("Oops - undefined instruction", regs,
+ SIGILL, ILL_ILLOPC, pc, 0, 6);
}
NOKPROBE_SYMBOL(do_undefinstr)
@@ -539,9 +533,6 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason)
static int bad_syscall(int n, struct pt_regs *regs)
{
- siginfo_t info;
-
- clear_siginfo(&info);
if ((current->personality & PER_MASK) != PER_LINUX) {
send_sig(SIGSEGV, current, 1);
return regs->ARM_r0;
@@ -555,13 +546,10 @@ static int bad_syscall(int n, struct pt_regs *regs)
}
#endif
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)instruction_pointer(regs) -
- (thumb_mode(regs) ? 2 : 4);
-
- arm_notify_die("Oops - bad syscall", regs, &info, n, 0);
+ arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP,
+ (void __user *)instruction_pointer(regs) -
+ (thumb_mode(regs) ? 2 : 4),
+ n, 0);
return regs->ARM_r0;
}
@@ -607,20 +595,13 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
asmlinkage int arm_syscall(int no, struct pt_regs *regs)
{
- siginfo_t info;
-
- clear_siginfo(&info);
if ((no >> 16) != (__ARM_NR_BASE>> 16))
return bad_syscall(no, regs);
switch (no & 0xffff) {
case 0: /* branch through 0 */
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = SEGV_MAPERR;
- info.si_addr = NULL;
-
- arm_notify_die("branch through zero", regs, &info, 0, 0);
+ arm_notify_die("branch through zero", regs,
+ SIGSEGV, SEGV_MAPERR, NULL, 0, 0);
return 0;
case NR(breakpoint): /* SWI BREAK_POINT */
@@ -688,13 +669,10 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
}
}
#endif
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)instruction_pointer(regs) -
- (thumb_mode(regs) ? 2 : 4);
-
- arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
+ arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP,
+ (void __user *)instruction_pointer(regs) -
+ (thumb_mode(regs) ? 2 : 4),
+ no, 0);
return 0;
}
@@ -744,9 +722,6 @@ asmlinkage void
baddataabort(int code, unsigned long instr, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
- siginfo_t info;
-
- clear_siginfo(&info);
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_BADABORT) {
@@ -757,12 +732,8 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
}
#endif
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPC;
- info.si_addr = (void __user *)addr;
-
- arm_notify_die("unknown data abort code", regs, &info, instr, 0);
+ arm_notify_die("unknown data abort code", regs,
+ SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0);
}
void __readwrite_bug(const char *fn)
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 3593d5c1acd2..8c74037ade22 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -96,7 +96,6 @@ SECTIONS
INIT_SETUP(16)
INIT_CALLS
CON_INITCALL
- SECURITY_INITCALL
INIT_RAM_FS
}
diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h
index ae5fdff18406..8247bc15addc 100644
--- a/arch/arm/kernel/vmlinux.lds.h
+++ b/arch/arm/kernel/vmlinux.lds.h
@@ -49,6 +49,8 @@
#define ARM_DISCARD \
*(.ARM.exidx.exit.text) \
*(.ARM.extab.exit.text) \
+ *(.ARM.exidx.text.exit) \
+ *(.ARM.extab.text.exit) \
ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
ARM_EXIT_DISCARD(EXIT_TEXT) \
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 450c7a4fbc8a..cb094e55dc5f 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -478,15 +478,15 @@ static const struct coproc_reg cp15_regs[] = {
/* ICC_SGI1R */
{ CRm64(12), Op1( 0), is64, access_gic_sgi},
- /* ICC_ASGI1R */
- { CRm64(12), Op1( 1), is64, access_gic_sgi},
- /* ICC_SGI0R */
- { CRm64(12), Op1( 2), is64, access_gic_sgi},
/* VBAR: swapped by interrupt.S. */
{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
NULL, reset_val, c12_VBAR, 0x00000000 },
+ /* ICC_ASGI1R */
+ { CRm64(12), Op1( 1), is64, access_gic_sgi},
+ /* ICC_SGI0R */
+ { CRm64(12), Op1( 2), is64, access_gic_sgi},
/* ICC_SRE */
{ CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index a826df3d3814..6709a8d33963 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user)
#ifdef CONFIG_CPU_SPECTRE
get_thread_info r3
ldr r3, [r3, #TI_ADDR_LIMIT]
- adds ip, r1, r2 @ ip=addr+size
- sub r3, r3, #1 @ addr_limit - 1
- cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
- movcs r1, #0 @ addr = NULL
- csdb
+ uaccess_mask_range_ptr r1, r2, r3, ip
#endif
#include "copy_template.S"
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index caf5019d8161..970abe521197 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -94,6 +94,11 @@
ENTRY(__copy_to_user_std)
WEAK(arm_copy_to_user)
+#ifdef CONFIG_CPU_SPECTRE
+ get_thread_info r3
+ ldr r3, [r3, #TI_ADDR_LIMIT]
+ uaccess_mask_range_ptr r0, r2, r3, ip
+#endif
#include "copy_template.S"
@@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std)
rsb r0, r0, r2
copy_abort_end
.popsection
-
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 9b4ed1728616..73dc7360cbdd 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
n = __copy_to_user_std(to, from, n);
uaccess_restore(ua_flags);
} else {
- n = __copy_to_user_memcpy(to, from, n);
+ n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
+ from, n);
}
return n;
}
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index a7c6ae13c945..bfe1c4d06901 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -149,6 +149,14 @@ exit_suspend:
ENDPROC(at91_pm_suspend_in_sram)
ENTRY(at91_backup_mode)
+ /* Switch the master clock source to slow clock. */
+ ldr pmc, .pmc_base
+ ldr tmp1, [pmc, #AT91_PMC_MCKR]
+ bic tmp1, tmp1, #AT91_PMC_CSS
+ str tmp1, [pmc, #AT91_PMC_MCKR]
+
+ wait_mckrdy
+
/*BUMEN*/
ldr r0, .sfr
mov tmp1, #0x1
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 353f9e5a1454..efdaa27241c5 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -130,10 +130,10 @@ static struct platform_device davinci_fb_device = {
};
static const struct gpio_led ntosd2_leds[] = {
- { .name = "led1_green", .gpio = GPIO(10), },
- { .name = "led1_red", .gpio = GPIO(11), },
- { .name = "led2_green", .gpio = GPIO(12), },
- { .name = "led2_red", .gpio = GPIO(13), },
+ { .name = "led1_green", .gpio = 10, },
+ { .name = "led1_red", .gpio = 11, },
+ { .name = "led2_green", .gpio = 12, },
+ { .name = "led2_red", .gpio = 13, },
};
static struct gpio_led_platform_data ntosd2_leds_data = {
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index faf48a3b1fea..706515faee06 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -141,6 +141,15 @@ EXPORT_SYMBOL_GPL(ep93xx_chip_revision);
*************************************************************************/
static struct resource ep93xx_gpio_resource[] = {
DEFINE_RES_MEM(EP93XX_GPIO_PHYS_BASE, 0xcc),
+ DEFINE_RES_IRQ(IRQ_EP93XX_GPIO_AB),
+ DEFINE_RES_IRQ(IRQ_EP93XX_GPIO0MUX),
+ DEFINE_RES_IRQ(IRQ_EP93XX_GPIO1MUX),
+ DEFINE_RES_IRQ(IRQ_EP93XX_GPIO2MUX),
+ DEFINE_RES_IRQ(IRQ_EP93XX_GPIO3MUX),
+ DEFINE_RES_IRQ(IRQ_EP93XX_GPIO4MUX),
+ DEFINE_RES_IRQ(IRQ_EP93XX_GPIO5MUX),
+ DEFINE_RES_IRQ(IRQ_EP93XX_GPIO6MUX),
+ DEFINE_RES_IRQ(IRQ_EP93XX_GPIO7MUX),
};
static struct platform_device ep93xx_gpio_device = {
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index 45940c1d7787..cf0cb58b3454 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -23,8 +23,7 @@
#include <linux/i2c.h>
#include <linux/fb.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/rawnand.h>
+#include <linux/mtd/platnand.h>
#include <mach/hardware.h>
#include <linux/platform_data/video-ep93xx.h>
@@ -43,12 +42,11 @@
#define SNAPPERCL15_NAND_CEN (1 << 11) /* Chip enable (active low) */
#define SNAPPERCL15_NAND_RDY (1 << 14) /* Device ready */
-#define NAND_CTRL_ADDR(chip) (chip->IO_ADDR_W + 0x40)
+#define NAND_CTRL_ADDR(chip) (chip->legacy.IO_ADDR_W + 0x40)
-static void snappercl15_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+static void snappercl15_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
static u16 nand_state = SNAPPERCL15_NAND_WPN;
u16 set;
@@ -70,13 +68,12 @@ static void snappercl15_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
}
if (cmd != NAND_CMD_NONE)
- __raw_writew((cmd & 0xff) | nand_state, chip->IO_ADDR_W);
+ __raw_writew((cmd & 0xff) | nand_state,
+ chip->legacy.IO_ADDR_W);
}
-static int snappercl15_nand_dev_ready(struct mtd_info *mtd)
+static int snappercl15_nand_dev_ready(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
return !!(__raw_readw(NAND_CTRL_ADDR(chip)) & SNAPPERCL15_NAND_RDY);
}
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index c089a2a4fe30..c6a533699b00 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -16,8 +16,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/spi/mmc_spi.h>
@@ -76,13 +75,11 @@ static void __init ts72xx_map_io(void)
#define TS72XX_NAND_CONTROL_ADDR_LINE 22 /* 0xN0400000 */
#define TS72XX_NAND_BUSY_ADDR_LINE 23 /* 0xN0800000 */
-static void ts72xx_nand_hwcontrol(struct mtd_info *mtd,
+static void ts72xx_nand_hwcontrol(struct nand_chip *chip,
int cmd, unsigned int ctrl)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
if (ctrl & NAND_CTRL_CHANGE) {
- void __iomem *addr = chip->IO_ADDR_R;
+ void __iomem *addr = chip->legacy.IO_ADDR_R;
unsigned char bits;
addr += (1 << TS72XX_NAND_CONTROL_ADDR_LINE);
@@ -96,13 +93,12 @@ static void ts72xx_nand_hwcontrol(struct mtd_info *mtd,
}
if (cmd != NAND_CMD_NONE)
- __raw_writeb(cmd, chip->IO_ADDR_W);
+ __raw_writeb(cmd, chip->legacy.IO_ADDR_W);
}
-static int ts72xx_nand_device_ready(struct mtd_info *mtd)
+static int ts72xx_nand_device_ready(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- void __iomem *addr = chip->IO_ADDR_R;
+ void __iomem *addr = chip->legacy.IO_ADDR_R;
addr += (1 << TS72XX_NAND_BUSY_ADDR_LINE);
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index 5e366824814f..2e1e540f2e5a 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -18,6 +18,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/physmap.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
#include <linux/gpio.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
@@ -175,6 +176,7 @@ static struct resource mx21ads_mmgpio_resource =
DEFINE_RES_MEM_NAMED(MX21ADS_IO_REG, SZ_2, "dat");
static struct bgpio_pdata mx21ads_mmgpio_pdata = {
+ .label = "mx21ads-mmgpio",
.base = MX21ADS_MMGPIO_BASE,
.ngpio = 16,
};
@@ -203,7 +205,6 @@ static struct regulator_init_data mx21ads_lcd_regulator_init_data = {
static struct fixed_voltage_config mx21ads_lcd_regulator_pdata = {
.supply_name = "LCD",
.microvolts = 3300000,
- .gpio = MX21ADS_IO_LCDON,
.enable_high = 1,
.init_data = &mx21ads_lcd_regulator_init_data,
};
@@ -216,6 +217,14 @@ static struct platform_device mx21ads_lcd_regulator = {
},
};
+static struct gpiod_lookup_table mx21ads_lcd_regulator_gpiod_table = {
+ .dev_id = "reg-fixed-voltage.0", /* Let's hope ID 0 is what we get */
+ .table = {
+ GPIO_LOOKUP("mx21ads-mmgpio", 9, NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
/*
* Connected is a portrait Sharp-QVGA display
* of type: LQ035Q7DB02
@@ -311,6 +320,7 @@ static void __init mx21ads_late_init(void)
{
imx21_add_mxc_mmc(0, &mx21ads_sdhc_pdata);
+ gpiod_add_lookup_table(&mx21ads_lcd_regulator_gpiod_table);
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
mx21ads_cs8900_resources[1].start =
diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c
index a04bb094ded1..f5e04047ed13 100644
--- a/arch/arm/mach-imx/mach-mx27ads.c
+++ b/arch/arm/mach-imx/mach-mx27ads.c
@@ -16,6 +16,7 @@
#include <linux/gpio/driver.h>
/* Needed for gpio_to_irq() */
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -230,10 +231,17 @@ static struct regulator_init_data mx27ads_lcd_regulator_init_data = {
static struct fixed_voltage_config mx27ads_lcd_regulator_pdata = {
.supply_name = "LCD",
.microvolts = 3300000,
- .gpio = MX27ADS_LCD_GPIO,
.init_data = &mx27ads_lcd_regulator_init_data,
};
+static struct gpiod_lookup_table mx27ads_lcd_regulator_gpiod_table = {
+ .dev_id = "reg-fixed-voltage.0", /* Let's hope ID 0 is what we get */
+ .table = {
+ GPIO_LOOKUP("LCD", 0, NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static void __init mx27ads_regulator_init(void)
{
struct gpio_chip *vchip;
@@ -247,6 +255,8 @@ static void __init mx27ads_regulator_init(void)
vchip->set = vgpio_set;
gpiochip_add_data(vchip, NULL);
+ gpiod_add_lookup_table(&mx27ads_lcd_regulator_gpiod_table);
+
platform_device_register_data(NULL, "reg-fixed-voltage",
PLATFORM_DEVID_AUTO,
&mx27ads_lcd_regulator_pdata,
diff --git a/arch/arm/mach-imx/mach-qong.c b/arch/arm/mach-imx/mach-qong.c
index 42a700053103..5c5df8ca38dd 100644
--- a/arch/arm/mach-imx/mach-qong.c
+++ b/arch/arm/mach-imx/mach-qong.c
@@ -18,7 +18,7 @@
#include <linux/memory.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
-#include <linux/mtd/rawnand.h>
+#include <linux/mtd/platnand.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
@@ -129,30 +129,29 @@ static void qong_init_nor_mtd(void)
/*
* Hardware specific access to control-lines
*/
-static void qong_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+static void qong_nand_cmd_ctrl(struct nand_chip *nand_chip, int cmd,
+ unsigned int ctrl)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
-
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
- writeb(cmd, nand_chip->IO_ADDR_W + (1 << 24));
+ writeb(cmd, nand_chip->legacy.IO_ADDR_W + (1 << 24));
else
- writeb(cmd, nand_chip->IO_ADDR_W + (1 << 23));
+ writeb(cmd, nand_chip->legacy.IO_ADDR_W + (1 << 23));
}
/*
* Read the Device Ready pin.
*/
-static int qong_nand_device_ready(struct mtd_info *mtd)
+static int qong_nand_device_ready(struct nand_chip *chip)
{
return gpio_get_value(IOMUX_TO_GPIO(MX31_PIN_NFRB));
}
-static void qong_nand_select_chip(struct mtd_info *mtd, int chip)
+static void qong_nand_select_chip(struct nand_chip *chip, int cs)
{
- if (chip >= 0)
+ if (cs >= 0)
gpio_set_value(IOMUX_TO_GPIO(MX31_PIN_NFCE_B), 0);
else
gpio_set_value(IOMUX_TO_GPIO(MX31_PIN_NFCE_B), 1);
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 772a7cf2010e..976ded5c5916 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -80,8 +80,6 @@ static unsigned int mmc_status(struct device *dev)
static struct mmci_platform_data mmc_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.status = mmc_status,
- .gpio_wp = -1,
- .gpio_cd = -1,
};
static u64 notrace intcp_read_sched_clock(void)
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 3ec829d52cdd..57d7df79d838 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -20,6 +20,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <asm/types.h>
@@ -75,9 +76,8 @@ static struct mtd_partition ixdp425_partitions[] = {
};
static void
-ixdp425_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+ixdp425_flash_nand_cmd_ctrl(struct nand_chip *this, int cmd, unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
int offset = (int)nand_get_controller_data(this);
if (ctrl & NAND_CTRL_CHANGE) {
@@ -93,7 +93,7 @@ ixdp425_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
}
if (cmd != NAND_CMD_NONE)
- writeb(cmd, this->IO_ADDR_W + offset);
+ writeb(cmd, this->legacy.IO_ADDR_W + offset);
}
static struct platform_nand_data ixdp425_flash_nand_data = {
diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c
index d1613b954926..a04e249c654b 100644
--- a/arch/arm/mach-mmp/brownstone.c
+++ b/arch/arm/mach-mmp/brownstone.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio-pxa.h>
+#include <linux/gpio/machine.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/max8649.h>
#include <linux/regulator/fixed.h>
@@ -148,7 +149,6 @@ static struct regulator_init_data brownstone_v_5vp_data = {
static struct fixed_voltage_config brownstone_v_5vp = {
.supply_name = "v_5vp",
.microvolts = 5000000,
- .gpio = GPIO_5V_ENABLE,
.enable_high = 1,
.enabled_at_boot = 1,
.init_data = &brownstone_v_5vp_data,
@@ -162,6 +162,15 @@ static struct platform_device brownstone_v_5vp_device = {
},
};
+static struct gpiod_lookup_table brownstone_v_5vp_gpiod_table = {
+ .dev_id = "reg-fixed-voltage.1", /* .id set to 1 above */
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO_5V_ENABLE,
+ NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static struct max8925_platform_data brownstone_max8925_info = {
.irq_base = MMP_NR_IRQS,
};
@@ -217,6 +226,7 @@ static void __init brownstone_init(void)
mmp2_add_isram(&mmp2_isram_platdata);
/* enable 5v regulator */
+ gpiod_add_lookup_table(&brownstone_v_5vp_gpiod_table);
platform_device_register(&brownstone_v_5vp_device);
}
diff --git a/arch/arm/mach-mmp/devices.c b/arch/arm/mach-mmp/devices.c
index 671c7a09ab3d..0fca63c80e1a 100644
--- a/arch/arm/mach-mmp/devices.c
+++ b/arch/arm/mach-mmp/devices.c
@@ -277,21 +277,12 @@ struct platform_device pxa168_device_u2o = {
#if IS_ENABLED(CONFIG_USB_EHCI_MV_U2O)
struct resource pxa168_u2oehci_resources[] = {
- /* regbase */
[0] = {
- .start = PXA168_U2O_REGBASE + U2x_CAPREGS_OFFSET,
+ .start = PXA168_U2O_REGBASE,
.end = PXA168_U2O_REGBASE + USB_REG_RANGE,
.flags = IORESOURCE_MEM,
- .name = "capregs",
},
- /* phybase */
[1] = {
- .start = PXA168_U2O_PHYBASE,
- .end = PXA168_U2O_PHYBASE + USB_PHY_RANGE,
- .flags = IORESOURCE_MEM,
- .name = "phyregs",
- },
- [2] = {
.start = IRQ_PXA168_USB1,
.end = IRQ_PXA168_USB1,
.flags = IORESOURCE_IRQ,
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index dd28d2614d7f..f226973f3d8c 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -300,7 +300,6 @@ static struct regulator_init_data modem_nreset_data = {
static struct fixed_voltage_config modem_nreset_config = {
.supply_name = "modem_nreset",
.microvolts = 3300000,
- .gpio = AMS_DELTA_GPIO_PIN_MODEM_NRESET,
.startup_delay = 25000,
.enable_high = 1,
.enabled_at_boot = 1,
@@ -315,6 +314,15 @@ static struct platform_device modem_nreset_device = {
},
};
+static struct gpiod_lookup_table ams_delta_nreset_gpiod_table = {
+ .dev_id = "reg-fixed-voltage",
+ .table = {
+ GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_MODEM_NRESET,
+ NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
struct modem_private_data {
struct regulator *regulator;
};
@@ -568,7 +576,6 @@ static struct regulator_init_data keybrd_pwr_initdata = {
static struct fixed_voltage_config keybrd_pwr_config = {
.supply_name = "keybrd_pwr",
.microvolts = 5000000,
- .gpio = AMS_DELTA_GPIO_PIN_KEYBRD_PWR,
.enable_high = 1,
.init_data = &keybrd_pwr_initdata,
};
@@ -602,6 +609,7 @@ static struct platform_device *ams_delta_devices[] __initdata = {
};
static struct gpiod_lookup_table *ams_delta_gpio_tables[] __initdata = {
+ &ams_delta_nreset_gpiod_table,
&ams_delta_audio_gpio_table,
&keybrd_pwr_gpio_table,
&ams_delta_lcd_gpio_table,
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index 69bd601feb83..4a0a66815ca0 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -16,8 +16,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <linux/mtd/physmap.h>
#include <linux/input.h>
#include <linux/smc91x.h>
@@ -186,7 +185,7 @@ static struct platform_device nor_device = {
#define FSAMPLE_NAND_RB_GPIO_PIN 62
-static int nand_dev_ready(struct mtd_info *mtd)
+static int nand_dev_ready(struct nand_chip *chip)
{
return gpio_get_value(FSAMPLE_NAND_RB_GPIO_PIN);
}
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 9aeb8ad8c327..9d9a6ca15df0 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -24,8 +24,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <linux/mtd/physmap.h>
#include <linux/input.h>
#include <linux/mfd/tps65010.h>
@@ -182,7 +181,7 @@ static struct mtd_partition h2_nand_partitions[] = {
#define H2_NAND_RB_GPIO_PIN 62
-static int h2_nand_dev_ready(struct mtd_info *mtd)
+static int h2_nand_dev_ready(struct nand_chip *chip)
{
return gpio_get_value(H2_NAND_RB_GPIO_PIN);
}
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index 2edcd6356f2d..cd6e02c5c01a 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -23,7 +23,7 @@
#include <linux/workqueue.h>
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
+#include <linux/mtd/platnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/input.h>
@@ -185,7 +185,7 @@ static struct mtd_partition nand_partitions[] = {
#define H3_NAND_RB_GPIO_PIN 10
-static int nand_dev_ready(struct mtd_info *mtd)
+static int nand_dev_ready(struct nand_chip *chip)
{
return gpio_get_value(H3_NAND_RB_GPIO_PIN);
}
diff --git a/arch/arm/mach-omap1/board-nand.c b/arch/arm/mach-omap1/board-nand.c
index 1bffbb4e050f..20923eb2d9b6 100644
--- a/arch/arm/mach-omap1/board-nand.c
+++ b/arch/arm/mach-omap1/board-nand.c
@@ -20,9 +20,8 @@
#include "common.h"
-void omap1_nand_cmd_ctl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+void omap1_nand_cmd_ctl(struct nand_chip *this, int cmd, unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
unsigned long mask;
if (cmd == NAND_CMD_NONE)
@@ -32,6 +31,6 @@ void omap1_nand_cmd_ctl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
if (ctrl & NAND_ALE)
mask |= 0x04;
- writeb(cmd, this->IO_ADDR_W + mask);
+ writeb(cmd, this->legacy.IO_ADDR_W + mask);
}
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index b4951eb82898..06a584fef5b8 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -16,8 +16,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <linux/mtd/physmap.h>
#include <linux/input.h>
#include <linux/smc91x.h>
@@ -144,7 +143,7 @@ static struct platform_device nor_device = {
#define P2_NAND_RB_GPIO_PIN 62
-static int nand_dev_ready(struct mtd_info *mtd)
+static int nand_dev_ready(struct nand_chip *chip)
{
return gpio_get_value(P2_NAND_RB_GPIO_PIN);
}
diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h
index c6537d2c2859..504b959ba5cf 100644
--- a/arch/arm/mach-omap1/common.h
+++ b/arch/arm/mach-omap1/common.h
@@ -26,7 +26,6 @@
#ifndef __ARCH_ARM_MACH_OMAP1_COMMON_H
#define __ARCH_ARM_MACH_OMAP1_COMMON_H
-#include <linux/mtd/mtd.h>
#include <linux/platform_data/i2c-omap.h>
#include <linux/reboot.h>
@@ -82,7 +81,8 @@ void omap1_restart(enum reboot_mode, const char *);
extern void __init omap_check_revision(void);
-extern void omap1_nand_cmd_ctl(struct mtd_info *mtd, int cmd,
+struct nand_chip;
+extern void omap1_nand_cmd_ctl(struct nand_chip *this, int cmd,
unsigned int ctrl);
extern void omap1_timer_init(void);
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
index af9af5094ec3..bf99aec5a155 100644
--- a/arch/arm/mach-omap2/hsmmc.h
+++ b/arch/arm/mach-omap2/hsmmc.h
@@ -12,8 +12,6 @@ struct omap2_hsmmc_info {
u8 mmc; /* controller 1/2/3 */
u32 caps; /* 4/8 wires and any additional host
* capabilities OR'd (ref. linux/mmc/host.h) */
- int gpio_cd; /* or -EINVAL */
- int gpio_wp; /* or -EINVAL */
struct platform_device *pdev; /* mmc controller instance */
/* init some special card */
void (*init_card)(struct mmc_card *card);
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 7f02743edbe4..9fec5f84bf77 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/davinci_emac.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of_platform.h>
@@ -328,7 +329,6 @@ static struct regulator_init_data pandora_vmmc3 = {
static struct fixed_voltage_config pandora_vwlan = {
.supply_name = "vwlan",
.microvolts = 1800000, /* 1.8V */
- .gpio = PANDORA_WIFI_NRESET_GPIO,
.startup_delay = 50000, /* 50ms */
.enable_high = 1,
.init_data = &pandora_vmmc3,
@@ -342,6 +342,19 @@ static struct platform_device pandora_vwlan_device = {
},
};
+static struct gpiod_lookup_table pandora_vwlan_gpiod_table = {
+ .dev_id = "reg-fixed-voltage.1",
+ .table = {
+ /*
+ * As this is a low GPIO number it should be at the first
+ * GPIO bank.
+ */
+ GPIO_LOOKUP("gpio-0-31", PANDORA_WIFI_NRESET_GPIO,
+ NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static void pandora_wl1251_init_card(struct mmc_card *card)
{
/*
@@ -363,8 +376,6 @@ static struct omap2_hsmmc_info pandora_mmc3[] = {
{
.mmc = 3,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
- .gpio_cd = -EINVAL,
- .gpio_wp = -EINVAL,
.init_card = pandora_wl1251_init_card,
},
{} /* Terminator */
@@ -403,6 +414,7 @@ fail:
static void __init omap3_pandora_legacy_init(void)
{
platform_device_register(&pandora_backlight);
+ gpiod_add_lookup_table(&pandora_vwlan_gpiod_table);
platform_device_register(&pandora_vwlan_device);
omap_hsmmc_init(pandora_mmc3);
omap_hsmmc_late_init(pandora_mmc3);
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index 2a1a4180d5d0..1298b53ac263 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -18,6 +18,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/cpu_pm.h>
#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
@@ -29,8 +30,6 @@
#include <linux/clk-provider.h>
#include <linux/irq.h>
#include <linux/time.h>
-#include <linux/gpio.h>
-#include <linux/platform_data/gpio-omap.h>
#include <asm/fncpy.h>
@@ -87,7 +86,7 @@ static int omap2_enter_full_retention(void)
l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL;
omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0);
- omap2_gpio_prepare_for_idle(0);
+ cpu_cluster_pm_enter();
/* One last check for pending IRQs to avoid extra latency due
* to sleeping unnecessarily. */
@@ -100,7 +99,7 @@ static int omap2_enter_full_retention(void)
OMAP_SDRC_REGADDR(SDRC_POWER));
no_sleep:
- omap2_gpio_resume_after_idle();
+ cpu_cluster_pm_exit();
clk_enable(osc_ck);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 36c55547137c..1a90050361f1 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -18,19 +18,18 @@
* published by the Free Software Foundation.
*/
+#include <linux/cpu_pm.h>
#include <linux/pm.h>
#include <linux/suspend.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/omap-dma.h>
#include <linux/omap-gpmc.h>
-#include <linux/platform_data/gpio-omap.h>
#include <trace/events/power.h>
@@ -197,7 +196,6 @@ void omap_sram_idle(void)
int mpu_next_state = PWRDM_POWER_ON;
int per_next_state = PWRDM_POWER_ON;
int core_next_state = PWRDM_POWER_ON;
- int per_going_off;
u32 sdrc_pwr = 0;
mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
@@ -227,10 +225,8 @@ void omap_sram_idle(void)
pwrdm_pre_transition(NULL);
/* PER */
- if (per_next_state < PWRDM_POWER_ON) {
- per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
- omap2_gpio_prepare_for_idle(per_going_off);
- }
+ if (per_next_state == PWRDM_POWER_OFF)
+ cpu_cluster_pm_enter();
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
@@ -295,8 +291,8 @@ void omap_sram_idle(void)
pwrdm_post_transition(NULL);
/* PER */
- if (per_next_state < PWRDM_POWER_ON)
- omap2_gpio_resume_after_idle();
+ if (per_next_state == PWRDM_POWER_OFF)
+ cpu_cluster_pm_exit();
}
static void omap3_pm_idle(void)
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index 94778739e38f..fda9b75c3a33 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -16,8 +16,7 @@
#include <linux/platform_device.h>
#include <linux/mv643xx_eth.h>
#include <linux/ata_platform.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <linux/timeriomem-rng.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -131,11 +130,9 @@ static void ts78xx_ts_rtc_unload(void)
* NAND_CLE: bit 1 -> bit 1
* NAND_ALE: bit 2 -> bit 0
*/
-static void ts78xx_ts_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
+static void ts78xx_ts_nand_cmd_ctrl(struct nand_chip *this, int cmd,
+ unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
-
if (ctrl & NAND_CTRL_CHANGE) {
unsigned char bits;
@@ -147,19 +144,18 @@ static void ts78xx_ts_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
}
if (cmd != NAND_CMD_NONE)
- writeb(cmd, this->IO_ADDR_W);
+ writeb(cmd, this->legacy.IO_ADDR_W);
}
-static int ts78xx_ts_nand_dev_ready(struct mtd_info *mtd)
+static int ts78xx_ts_nand_dev_ready(struct nand_chip *chip)
{
return readb(TS_NAND_CTRL) & 0x20;
}
-static void ts78xx_ts_nand_write_buf(struct mtd_info *mtd,
- const uint8_t *buf, int len)
+static void ts78xx_ts_nand_write_buf(struct nand_chip *chip,
+ const uint8_t *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- void __iomem *io_base = chip->IO_ADDR_W;
+ void __iomem *io_base = chip->legacy.IO_ADDR_W;
unsigned long off = ((unsigned long)buf & 3);
int sz;
@@ -182,11 +178,10 @@ static void ts78xx_ts_nand_write_buf(struct mtd_info *mtd,
writesb(io_base, buf, len);
}
-static void ts78xx_ts_nand_read_buf(struct mtd_info *mtd,
- uint8_t *buf, int len)
+static void ts78xx_ts_nand_read_buf(struct nand_chip *chip,
+ uint8_t *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- void __iomem *io_base = chip->IO_ADDR_R;
+ void __iomem *io_base = chip->legacy.IO_ADDR_R;
unsigned long off = ((unsigned long)buf & 3);
int sz;
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index af46d2182533..c52c081eb6d9 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -25,11 +25,10 @@
#include <linux/ioport.h>
#include <linux/ucb1400.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
#include <linux/types.h>
#include <linux/platform_data/pcf857x.h>
#include <linux/platform_data/i2c-pxa.h>
-#include <linux/mtd/rawnand.h>
+#include <linux/mtd/platnand.h>
#include <linux/mtd/physmap.h>
#include <linux/regulator/max1586.h>
@@ -571,9 +570,9 @@ static inline void balloon3_i2c_init(void) {}
* NAND
******************************************************************************/
#if defined(CONFIG_MTD_NAND_PLATFORM)||defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
-static void balloon3_nand_cmd_ctl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+static void balloon3_nand_cmd_ctl(struct nand_chip *this, int cmd,
+ unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
uint8_t balloon3_ctl_set = 0, balloon3_ctl_clr = 0;
if (ctrl & NAND_CTRL_CHANGE) {
@@ -597,10 +596,10 @@ static void balloon3_nand_cmd_ctl(struct mtd_info *mtd, int cmd, unsigned int ct
}
if (cmd != NAND_CMD_NONE)
- writeb(cmd, this->IO_ADDR_W);
+ writeb(cmd, this->legacy.IO_ADDR_W);
}
-static void balloon3_nand_select_chip(struct mtd_info *mtd, int chip)
+static void balloon3_nand_select_chip(struct nand_chip *this, int chip)
{
if (chip < 0 || chip > 3)
return;
@@ -616,7 +615,7 @@ static void balloon3_nand_select_chip(struct mtd_info *mtd, int chip)
BALLOON3_NAND_CONTROL_REG);
}
-static int balloon3_nand_dev_ready(struct mtd_info *mtd)
+static int balloon3_nand_dev_ready(struct nand_chip *this)
{
return __raw_readl(BALLOON3_NAND_STAT_REG) & BALLOON3_NAND_STAT_RNB;
}
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 29be04c6cc48..67e37df637f5 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -15,8 +15,7 @@
#include <linux/dm9000.h>
#include <linux/platform_data/rtc-v3020.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <linux/mtd/physmap.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
@@ -285,11 +284,10 @@ static void nand_cs_off(void)
}
/* hardware specific access to control-lines */
-static void em_x270_nand_cmd_ctl(struct mtd_info *mtd, int dat,
+static void em_x270_nand_cmd_ctl(struct nand_chip *this, int dat,
unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- unsigned long nandaddr = (unsigned long)this->IO_ADDR_W;
+ unsigned long nandaddr = (unsigned long)this->legacy.IO_ADDR_W;
dsb();
@@ -309,15 +307,15 @@ static void em_x270_nand_cmd_ctl(struct mtd_info *mtd, int dat,
}
dsb();
- this->IO_ADDR_W = (void __iomem *)nandaddr;
+ this->legacy.IO_ADDR_W = (void __iomem *)nandaddr;
if (dat != NAND_CMD_NONE)
- writel(dat, this->IO_ADDR_W);
+ writel(dat, this->legacy.IO_ADDR_W);
dsb();
}
/* read device ready pin */
-static int em_x270_nand_device_ready(struct mtd_info *mtd)
+static int em_x270_nand_device_ready(struct nand_chip *this)
{
dsb();
@@ -986,7 +984,6 @@ static struct fixed_voltage_config camera_dummy_config = {
.supply_name = "camera_vdd",
.input_supply = "vcc cam",
.microvolts = 2800000,
- .gpio = -1,
.enable_high = 0,
.init_data = &camera_dummy_initdata,
};
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 2c90b58f347d..565965e9acc7 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -21,6 +21,7 @@
#include <linux/regulator/fixed.h>
#include <linux/input.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/gpio_keys.h>
#include <linux/leds-lp3944.h>
#include <linux/platform_data/i2c-pxa.h>
@@ -698,31 +699,39 @@ static struct pxa27x_keypad_platform_data e2_keypad_platform_data = {
#if defined(CONFIG_MACH_EZX_A780) || defined(CONFIG_MACH_EZX_A910)
/* camera */
-static struct regulator_consumer_supply camera_dummy_supplies[] = {
+static struct regulator_consumer_supply camera_regulator_supplies[] = {
REGULATOR_SUPPLY("vdd", "0-005d"),
};
-static struct regulator_init_data camera_dummy_initdata = {
- .consumer_supplies = camera_dummy_supplies,
- .num_consumer_supplies = ARRAY_SIZE(camera_dummy_supplies),
+static struct regulator_init_data camera_regulator_initdata = {
+ .consumer_supplies = camera_regulator_supplies,
+ .num_consumer_supplies = ARRAY_SIZE(camera_regulator_supplies),
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
};
-static struct fixed_voltage_config camera_dummy_config = {
+static struct fixed_voltage_config camera_regulator_config = {
.supply_name = "camera_vdd",
.microvolts = 2800000,
- .gpio = GPIO50_nCAM_EN,
.enable_high = 0,
- .init_data = &camera_dummy_initdata,
+ .init_data = &camera_regulator_initdata,
};
-static struct platform_device camera_supply_dummy_device = {
+static struct platform_device camera_supply_regulator_device = {
.name = "reg-fixed-voltage",
.id = 1,
.dev = {
- .platform_data = &camera_dummy_config,
+ .platform_data = &camera_regulator_config,
+ },
+};
+
+static struct gpiod_lookup_table camera_supply_gpiod_table = {
+ .dev_id = "reg-fixed-voltage.1",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO50_nCAM_EN,
+ NULL, GPIO_ACTIVE_HIGH),
+ { },
},
};
#endif
@@ -800,7 +809,7 @@ static struct i2c_board_info a780_i2c_board_info[] = {
static struct platform_device *a780_devices[] __initdata = {
&a780_gpio_keys,
- &camera_supply_dummy_device,
+ &camera_supply_regulator_device,
};
static void __init a780_init(void)
@@ -823,6 +832,7 @@ static void __init a780_init(void)
if (a780_camera_init() == 0)
pxa_set_camera_info(&a780_pxacamera_platform_data);
+ gpiod_add_lookup_table(&camera_supply_gpiod_table);
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(a780_devices));
@@ -1098,7 +1108,7 @@ static struct i2c_board_info __initdata a910_i2c_board_info[] = {
static struct platform_device *a910_devices[] __initdata = {
&a910_gpio_keys,
- &camera_supply_dummy_device,
+ &camera_supply_regulator_device,
};
static void __init a910_init(void)
@@ -1121,6 +1131,7 @@ static void __init a910_init(void)
if (a910_camera_init() == 0)
pxa_set_camera_info(&a910_pxacamera_platform_data);
+ gpiod_add_lookup_table(&camera_supply_gpiod_table);
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(a910_devices));
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index c5325d1ae77b..14c0f80bc9e7 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/mfd/htc-pasic3.h>
@@ -696,7 +697,6 @@ static struct regulator_init_data vads7846_regulator = {
static struct fixed_voltage_config vads7846 = {
.supply_name = "vads7846",
.microvolts = 3300000, /* probably */
- .gpio = -EINVAL,
.startup_delay = 0,
.init_data = &vads7846_regulator,
};
diff --git a/arch/arm/mach-pxa/palmtreo.c b/arch/arm/mach-pxa/palmtreo.c
index 4cc05ecce618..b66b0b11d717 100644
--- a/arch/arm/mach-pxa/palmtreo.c
+++ b/arch/arm/mach-pxa/palmtreo.c
@@ -404,36 +404,6 @@ static void __init palmtreo_leds_init(void)
}
/******************************************************************************
- * diskonchip docg4 flash
- ******************************************************************************/
-#if defined(CONFIG_MACH_TREO680)
-/* REVISIT: does the centro have this device also? */
-#if IS_ENABLED(CONFIG_MTD_NAND_DOCG4)
-static struct resource docg4_resources[] = {
- {
- .start = 0x00000000,
- .end = 0x00001FFF,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device treo680_docg4_flash = {
- .name = "docg4",
- .id = -1,
- .resource = docg4_resources,
- .num_resources = ARRAY_SIZE(docg4_resources),
-};
-
-static void __init treo680_docg4_flash_init(void)
-{
- platform_device_register(&treo680_docg4_flash);
-}
-#else
-static inline void treo680_docg4_flash_init(void) {}
-#endif
-#endif
-
-/******************************************************************************
* Machine init
******************************************************************************/
static void __init treo_reserve(void)
@@ -517,7 +487,6 @@ static void __init treo680_init(void)
treo680_gpio_init();
palm27x_mmc_init(GPIO_NR_TREO_SD_DETECT_N, GPIO_NR_TREO680_SD_READONLY,
GPIO_NR_TREO680_SD_POWER, 0);
- treo680_docg4_flash_init();
}
#endif
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
index 47e3e38e9bec..1d06a8e91d8f 100644
--- a/arch/arm/mach-pxa/palmtx.c
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -28,8 +28,7 @@
#include <linux/wm97xx.h>
#include <linux/power_supply.h>
#include <linux/usb/gpio_vbus.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/physmap.h>
@@ -247,11 +246,10 @@ static inline void palmtx_keys_init(void) {}
******************************************************************************/
#if defined(CONFIG_MTD_NAND_PLATFORM) || \
defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
-static void palmtx_nand_cmd_ctl(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
+static void palmtx_nand_cmd_ctl(struct nand_chip *this, int cmd,
+ unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- char __iomem *nandaddr = this->IO_ADDR_W;
+ char __iomem *nandaddr = this->legacy.IO_ADDR_W;
if (cmd == NAND_CMD_NONE)
return;
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 034345546f84..bd3c23ad6ce6 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -886,7 +886,6 @@ static struct regulator_init_data audio_va_initdata = {
static struct fixed_voltage_config audio_va_config = {
.supply_name = "audio_va",
.microvolts = 5000000,
- .gpio = GPIO_AUDIO_VA_ENABLE,
.enable_high = 1,
.enabled_at_boot = 0,
.init_data = &audio_va_initdata,
@@ -900,6 +899,15 @@ static struct platform_device audio_va_device = {
},
};
+static struct gpiod_lookup_table audio_va_gpiod_table = {
+ .dev_id = "reg-fixed-voltage.0",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO_AUDIO_VA_ENABLE,
+ NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
/* Dummy supplies for Codec's VD/VLC */
static struct regulator_consumer_supply audio_dummy_supplies[] = {
@@ -918,7 +926,6 @@ static struct regulator_init_data audio_dummy_initdata = {
static struct fixed_voltage_config audio_dummy_config = {
.supply_name = "audio_vd",
.microvolts = 3300000,
- .gpio = -1,
.init_data = &audio_dummy_initdata,
};
@@ -1033,6 +1040,7 @@ static void __init raumfeld_audio_init(void)
else
gpio_direction_output(GPIO_MCLK_RESET, 1);
+ gpiod_add_lookup_table(&audio_va_gpiod_table);
platform_add_devices(ARRAY_AND_SIZE(audio_regulator_devices));
}
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index e3851795d6d7..d53ea12fc766 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -17,6 +17,7 @@
#include <linux/irq.h>
#include <linux/pm.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/serial_8250.h>
#include <linux/dm9000.h>
#include <linux/mmc/host.h>
@@ -410,7 +411,6 @@ static struct regulator_init_data can_regulator_init_data = {
static struct fixed_voltage_config can_regulator_pdata = {
.supply_name = "CAN_SHDN",
.microvolts = 3300000,
- .gpio = ZEUS_CAN_SHDN_GPIO,
.init_data = &can_regulator_init_data,
};
@@ -422,6 +422,15 @@ static struct platform_device can_regulator_device = {
},
};
+static struct gpiod_lookup_table can_regulator_gpiod_table = {
+ .dev_id = "reg-fixed-voltage.0",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", ZEUS_CAN_SHDN_GPIO,
+ NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static struct mcp251x_platform_data zeus_mcp2515_pdata = {
.oscillator_frequency = 16*1000*1000,
};
@@ -538,7 +547,6 @@ static struct regulator_init_data zeus_ohci_regulator_data = {
static struct fixed_voltage_config zeus_ohci_regulator_config = {
.supply_name = "vbus2",
.microvolts = 5000000, /* 5.0V */
- .gpio = ZEUS_USB2_PWREN_GPIO,
.enable_high = 1,
.startup_delay = 0,
.init_data = &zeus_ohci_regulator_data,
@@ -552,6 +560,15 @@ static struct platform_device zeus_ohci_regulator_device = {
},
};
+static struct gpiod_lookup_table zeus_ohci_regulator_gpiod_table = {
+ .dev_id = "reg-fixed-voltage.0",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", ZEUS_USB2_PWREN_GPIO,
+ NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static struct pxaohci_platform_data zeus_ohci_platform_data = {
.port_mode = PMM_NPS_MODE,
/* Clear Power Control Polarity Low and set Power Sense
@@ -855,6 +872,8 @@ static void __init zeus_init(void)
pxa2xx_mfp_config(ARRAY_AND_SIZE(zeus_pin_config));
+ gpiod_add_lookup_table(&can_regulator_gpiod_table);
+ gpiod_add_lookup_table(&zeus_ohci_regulator_gpiod_table);
platform_add_devices(zeus_devices, ARRAY_SIZE(zeus_devices));
zeus_register_ohci();
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index f04650297487..379424d72ae7 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -352,7 +352,6 @@ static struct fixed_voltage_config wallvdd_pdata = {
.supply_name = "WALLVDD",
.microvolts = 5000000,
.init_data = &wallvdd_data,
- .gpio = -EINVAL,
};
static struct platform_device wallvdd_device = {
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index c46fa5dfd2e0..908e5aa831c8 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -222,7 +222,6 @@ static struct fixed_voltage_config smdk6410_b_pwr_5v_pdata = {
.supply_name = "B_PWR_5V",
.microvolts = 5000000,
.init_data = &smdk6410_b_pwr_5v_data,
- .gpio = -EINVAL,
};
static struct platform_device smdk6410_b_pwr_5v = {
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 575ec085cffa..3e8c0948abcc 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -101,7 +101,7 @@ static int __init assabet_init_gpio(void __iomem *reg, u32 def_val)
assabet_bcr_gc = gc;
- return gc->base;
+ return 0;
}
/*
@@ -471,6 +471,14 @@ static struct fixed_voltage_config assabet_cf_vcc_pdata __initdata = {
.enable_high = 1,
};
+static struct gpiod_lookup_table assabet_cf_vcc_gpio_table = {
+ .dev_id = "reg-fixed-voltage.0",
+ .table = {
+ GPIO_LOOKUP("assabet", 0, NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static void __init assabet_init(void)
{
/*
@@ -517,9 +525,11 @@ static void __init assabet_init(void)
neponset_resources, ARRAY_SIZE(neponset_resources));
#endif
} else {
+ gpiod_add_lookup_table(&assabet_cf_vcc_gpio_table);
sa11x0_register_fixed_regulator(0, &assabet_cf_vcc_pdata,
- assabet_cf_vcc_consumers,
- ARRAY_SIZE(assabet_cf_vcc_consumers));
+ assabet_cf_vcc_consumers,
+ ARRAY_SIZE(assabet_cf_vcc_consumers),
+ true);
}
@@ -802,7 +812,6 @@ fs_initcall(assabet_leds_init);
void __init assabet_init_irq(void)
{
- unsigned int assabet_gpio_base;
u32 def_val;
sa1100_init_irq();
@@ -817,9 +826,7 @@ void __init assabet_init_irq(void)
*
* This must precede any driver calls to BCR_set() or BCR_clear().
*/
- assabet_gpio_base = assabet_init_gpio((void *)&ASSABET_BCR, def_val);
-
- assabet_cf_vcc_pdata.gpio = assabet_gpio_base + 0;
+ assabet_init_gpio((void *)&ASSABET_BCR, def_val);
}
MACHINE_START(ASSABET, "Intel-Assabet")
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 7167ddf84a0e..800321c6cbd8 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -348,7 +348,8 @@ void __init sa11x0_init_late(void)
int __init sa11x0_register_fixed_regulator(int n,
struct fixed_voltage_config *cfg,
- struct regulator_consumer_supply *supplies, unsigned num_supplies)
+ struct regulator_consumer_supply *supplies, unsigned num_supplies,
+ bool uses_gpio)
{
struct regulator_init_data *id;
@@ -356,7 +357,7 @@ int __init sa11x0_register_fixed_regulator(int n,
if (!cfg->init_data)
return -ENOMEM;
- if (cfg->gpio < 0)
+ if (!uses_gpio)
id->constraints.always_on = 1;
id->constraints.name = cfg->supply_name;
id->constraints.min_uV = cfg->microvolts;
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index 5f3cb52fa6ab..158a4fd5ca24 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -54,4 +54,5 @@ void sa11x0_register_pcmcia(int socket, struct gpiod_lookup_table *);
struct fixed_voltage_config;
struct regulator_consumer_supply;
int sa11x0_register_fixed_regulator(int n, struct fixed_voltage_config *cfg,
- struct regulator_consumer_supply *supplies, unsigned num_supplies);
+ struct regulator_consumer_supply *supplies, unsigned num_supplies,
+ bool uses_gpio);
diff --git a/arch/arm/mach-sa1100/shannon.c b/arch/arm/mach-sa1100/shannon.c
index 22f7fe0b809f..5bc82e2671c6 100644
--- a/arch/arm/mach-sa1100/shannon.c
+++ b/arch/arm/mach-sa1100/shannon.c
@@ -102,14 +102,14 @@ static struct fixed_voltage_config shannon_cf_vcc_pdata __initdata = {
.supply_name = "cf-power",
.microvolts = 3300000,
.enabled_at_boot = 1,
- .gpio = -EINVAL,
};
static void __init shannon_init(void)
{
sa11x0_register_fixed_regulator(0, &shannon_cf_vcc_pdata,
shannon_cf_vcc_consumers,
- ARRAY_SIZE(shannon_cf_vcc_consumers));
+ ARRAY_SIZE(shannon_cf_vcc_consumers),
+ false);
sa11x0_register_pcmcia(0, &shannon_pcmcia0_gpio_table);
sa11x0_register_pcmcia(1, &shannon_pcmcia1_gpio_table);
sa11x0_ppc_configure_mcp();
diff --git a/arch/arm/mach-versatile/versatile_dt.c b/arch/arm/mach-versatile/versatile_dt.c
index 3c8d39c12909..e9d60687e416 100644
--- a/arch/arm/mach-versatile/versatile_dt.c
+++ b/arch/arm/mach-versatile/versatile_dt.c
@@ -89,15 +89,11 @@ unsigned int mmc_status(struct device *dev)
static struct mmci_platform_data mmc0_plat_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.status = mmc_status,
- .gpio_wp = -1,
- .gpio_cd = -1,
};
static struct mmci_platform_data mmc1_plat_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.status = mmc_status,
- .gpio_wp = -1,
- .gpio_cd = -1,
};
/*
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index bd2c739d8083..b54f8f8def36 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -948,15 +948,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
goto fixup;
if (ai_usermode & UM_SIGNAL) {
- siginfo_t si;
-
- clear_siginfo(&si);
- si.si_signo = SIGBUS;
- si.si_errno = 0;
- si.si_code = BUS_ADRALN;
- si.si_addr = (void __user *)addr;
-
- force_sig_info(si.si_signo, &si, current);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr, current);
} else {
/*
* We're about to disable the alignment trap and return to
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
index f448a0663b10..712416ecd8e6 100644
--- a/arch/arm/mm/dma-mapping-nommu.c
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -47,7 +47,8 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
*/
if (attrs & DMA_ATTR_NON_CONSISTENT)
- return dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+ return dma_direct_alloc_pages(dev, size, dma_handle, gfp,
+ attrs);
ret = dma_alloc_from_global_coherent(size, dma_handle);
@@ -70,7 +71,7 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
unsigned long attrs)
{
if (attrs & DMA_ATTR_NON_CONSISTENT) {
- dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
+ dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
} else {
int ret = dma_release_from_global_coherent(get_order(size),
cpu_addr);
@@ -90,7 +91,7 @@ static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
return ret;
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
@@ -237,7 +238,3 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
set_dma_ops(dev, dma_ops);
}
-
-void arch_teardown_dma_ops(struct device *dev)
-{
-}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 3232afb6fdc0..f4ea4c62c613 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -161,13 +161,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
unsigned int fsr, unsigned int sig, int code,
struct pt_regs *regs)
{
- struct siginfo si;
-
if (addr > TASK_SIZE)
harden_branch_predictor();
- clear_siginfo(&si);
-
#ifdef CONFIG_DEBUG_USER
if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
@@ -181,11 +177,7 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
tsk->thread.address = addr;
tsk->thread.error_code = fsr;
tsk->thread.trap_no = 14;
- si.si_signo = sig;
- si.si_errno = 0;
- si.si_code = code;
- si.si_addr = (void __user *)addr;
- force_sig_info(sig, &si, tsk);
+ force_sig_fault(sig, code, (void __user *)addr, tsk);
}
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
@@ -554,7 +546,6 @@ asmlinkage void
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
- struct siginfo info;
if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
@@ -563,12 +554,8 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
inf->name, fsr, addr);
show_pte(current->mm, addr);
- clear_siginfo(&info);
- info.si_signo = inf->sig;
- info.si_errno = 0;
- info.si_code = inf->code;
- info.si_addr = (void __user *)addr;
- arm_notify_die("", regs, &info, fsr, 0);
+ arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
+ fsr, 0);
}
void __init
@@ -588,7 +575,6 @@ asmlinkage void
do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
{
const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
- struct siginfo info;
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
@@ -596,12 +582,8 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr);
- clear_siginfo(&info);
- info.si_signo = inf->sig;
- info.si_errno = 0;
- info.si_code = inf->code;
- info.si_addr = (void __user *)addr;
- arm_notify_die("", regs, &info, ifsr, 0);
+ arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
+ ifsr, 0);
}
/*
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index fc91205ff46c..5bf9443cfbaa 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
{
- BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
+ BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
PCI_IO_VIRT_BASE + offset + SZ_64K,
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index fbc74b5fa3ed..8edf93b4490f 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -413,3 +413,4 @@
396 common pkey_free sys_pkey_free
397 common statx sys_statx
398 common rseq sys_rseq
+399 common io_pgetevents sys_io_pgetevents
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index dc7e6b50ef67..aff6e6eadc70 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -216,13 +216,6 @@ static struct notifier_block vfp_notifier_block = {
*/
static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
{
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGFPE;
- info.si_code = sicode;
- info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
-
/*
* This is the same as NWFPE, because it's not clear what
* this is used for
@@ -230,7 +223,9 @@ static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
current->thread.error_code = 0;
current->thread.trap_no = 6;
- send_sig_info(SIGFPE, &info, current);
+ send_sig_fault(SIGFPE, sicode,
+ (void __user *)(instruction_pointer(regs) - 4),
+ current);
}
static void vfp_panic(char *reason, u32 inst)
@@ -553,12 +548,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
* Save the current VFP state into the provided structures and prepare
* for entry into a new function (signal handler).
*/
-int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
- struct user_vfp_exc __user *ufp_exc)
+int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
+ struct user_vfp_exc *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
- int err = 0;
/* Ensure that the saved hwstate is up-to-date. */
vfp_sync_hwstate(thread);
@@ -567,22 +561,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
- err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
- sizeof(hwstate->fpregs));
+ memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
+
/*
* Copy the status and control register.
*/
- __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
+ ufp->fpscr = hwstate->fpscr;
/*
* Copy the exception registers.
*/
- __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
- __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
- __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
-
- if (err)
- return -EFAULT;
+ ufp_exc->fpexc = hwstate->fpexc;
+ ufp_exc->fpinst = hwstate->fpinst;
+ ufp_exc->fpinst2 = ufp_exc->fpinst2;
/* Ensure that VFP is disabled. */
vfp_flush_hwstate(thread);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 07060e5b5864..17e478928276 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -62,29 +62,6 @@ static __read_mostly unsigned int xen_events_irq;
uint32_t xen_start_flags;
EXPORT_SYMBOL(xen_start_flags);
-int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *gfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned domid,
- struct page **pages)
-{
- return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
- prot, domid, pages);
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
-
-/* Not used by XENFEAT_auto_translated guests. */
-int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t gfn, int nr,
- pgprot_t prot, unsigned domid,
- struct page **pages)
-{
- return -ENOSYS;
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
-
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int nr, struct page **pages)
{
@@ -92,17 +69,6 @@ int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
-/* Not used by XENFEAT_auto_translated guests. */
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *mfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned int domid, struct page **pages)
-{
- return -ENOSYS;
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
-
static void xen_read_wallclock(struct timespec64 *ts)
{
u32 version;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1b1a0e95c751..c03cd0d765d3 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -75,6 +75,7 @@ config ARM64
select CLONE_BACKWARDS
select COMMON_CLK
select CPU_PM if (SUSPEND || CPU_IDLE)
+ select CRC32
select DCACHE_WORD_ACCESS
select DMA_DIRECT_OPS
select EDAC_SUPPORT
@@ -104,6 +105,7 @@ config ARM64
select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
@@ -142,6 +144,7 @@ config ARM64
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE
+ select HAVE_RCU_TABLE_INVALIDATE
select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
@@ -479,6 +482,19 @@ config ARM64_ERRATUM_1024718
If unsure, say Y.
+config ARM64_ERRATUM_1188873
+ bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
+ default y
+ select ARM_ARCH_TIMER_OOL_WORKAROUND
+ help
+ This option adds work arounds for ARM Cortex-A76 erratum 1188873
+
+ Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could cause
+ register corruption when accessing the timer registers from
+ AArch32 userspace.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
@@ -769,9 +785,6 @@ source kernel/Kconfig.hz
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y
-config ARCH_HAS_HOLES_MEMORYMODEL
- def_bool y if SPARSEMEM
-
config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
@@ -786,7 +799,7 @@ config ARCH_FLATMEM_ENABLE
def_bool !NUMA
config HAVE_ARCH_PFN_VALID
- def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
+ def_bool y
config HW_PERF_EVENTS
def_bool y
@@ -1132,6 +1145,20 @@ config ARM64_RAS_EXTN
and access the new registers if the system supports the extension.
Platform RAS features may additionally depend on firmware support.
+config ARM64_CNP
+ bool "Enable support for Common Not Private (CNP) translations"
+ default y
+ depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
+ help
+ Common Not Private (CNP) allows translation table entries to
+ be shared between different PEs in the same inner shareable
+ domain, so the hardware can use this fact to optimise the
+ caching of such entries in the TLB.
+
+ Selecting this option allows the CNP feature to be detected
+ at runtime, and does not affect PEs that do not implement
+ this feature.
+
endmenu
config ARM64_SVE
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 393d2b524284..5a89a957641b 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -128,6 +128,7 @@ config ARCH_MVEBU
select MVEBU_ICU
select MVEBU_ODMI
select MVEBU_PIC
+ select MVEBU_SEI
select OF_GPIO
select PINCTRL
select PINCTRL_ARMADA_37XX
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index d033da401c26..fb3d2ee77c56 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -137,6 +137,8 @@
reset-names = "stmmaceth", "stmmaceth-ocp";
clocks = <&clkmgr STRATIX10_EMAC0_CLK>;
clock-names = "stmmaceth";
+ tx-fifo-depth = <16384>;
+ rx-fifo-depth = <16384>;
status = "disabled";
};
@@ -150,6 +152,8 @@
reset-names = "stmmaceth", "stmmaceth-ocp";
clocks = <&clkmgr STRATIX10_EMAC1_CLK>;
clock-names = "stmmaceth";
+ tx-fifo-depth = <16384>;
+ rx-fifo-depth = <16384>;
status = "disabled";
};
@@ -163,6 +167,8 @@
reset-names = "stmmaceth", "stmmaceth-ocp";
clocks = <&clkmgr STRATIX10_EMAC2_CLK>;
clock-names = "stmmaceth";
+ tx-fifo-depth = <16384>;
+ rx-fifo-depth = <16384>;
status = "disabled";
};
@@ -467,16 +473,51 @@
status = "disabled";
};
+ sdr: sdr@f8011100 {
+ compatible = "altr,sdr-ctl", "syscon";
+ reg = <0xf8011100 0xc0>;
+ };
+
eccmgr {
- compatible = "altr,socfpga-s10-ecc-manager";
+ compatible = "altr,socfpga-a10-ecc-manager";
+ altr,sysmgr-syscon = <&sysmgr>;
+ #address-cells = <1>;
+ #size-cells = <1>;
interrupts = <0 15 4>, <0 95 4>;
interrupt-controller;
#interrupt-cells = <2>;
+ ranges;
sdramedac {
compatible = "altr,sdram-edac-s10";
+ altr,sdr-syscon = <&sdr>;
interrupts = <16 4>, <48 4>;
};
+
+ usb0-ecc@ff8c4000 {
+ compatible = "altr,socfpga-usb-ecc";
+ reg = <0xff8c4000 0x100>;
+ altr,ecc-parent = <&usb0>;
+ interrupts = <2 4>,
+ <34 4>;
+ };
+
+ emac0-rx-ecc@ff8c0000 {
+ compatible = "altr,socfpga-eth-mac-ecc";
+ reg = <0xff8c0000 0x100>;
+ altr,ecc-parent = <&gmac0>;
+ interrupts = <4 4>,
+ <36 4>;
+ };
+
+ emac0-tx-ecc@ff8c0400 {
+ compatible = "altr,socfpga-eth-mac-ecc";
+ reg = <0xff8c0400 0x100>;
+ altr,ecc-parent = <&gmac0>;
+ interrupts = <5 4>,
+ <37 4>;
+ };
+
};
qspi: spi@ff8d2000 {
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
index 6edc4fa9fd42..7c661753bfaf 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
@@ -76,7 +76,7 @@
phy-mode = "rgmii";
phy-handle = <&phy0>;
- max-frame-size = <3800>;
+ max-frame-size = <9000>;
mdio0 {
#address-cells = <1>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index db8d364f8476..3d165b4cdd2a 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -698,6 +698,7 @@ CONFIG_MEMTEST=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
@@ -706,7 +707,6 @@ CONFIG_CRYPTO_SHA3_ARM64=m
CONFIG_CRYPTO_SM3_ARM64_CE=m
CONFIG_CRYPTO_GHASH_ARM64_CE=y
CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
-CONFIG_CRYPTO_CRC32_ARM64_CE=m
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_CHACHA20_NEON=m
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index e3fdb0fd6f70..a5606823ed4d 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -66,11 +66,6 @@ config CRYPTO_CRCT10DIF_ARM64_CE
depends on KERNEL_MODE_NEON && CRC_T10DIF
select CRYPTO_HASH
-config CRYPTO_CRC32_ARM64_CE
- tristate "CRC32 and CRC32C digest algorithms using ARMv8 extensions"
- depends on CRC32
- select CRYPTO_HASH
-
config CRYPTO_AES_ARM64
tristate "AES core cipher using scalar instructions"
select CRYPTO_AES
@@ -119,10 +114,4 @@ config CRYPTO_AES_ARM64_BS
select CRYPTO_AES_ARM64
select CRYPTO_SIMD
-config CRYPTO_SPECK_NEON
- tristate "NEON accelerated Speck cipher algorithms"
- depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
- select CRYPTO_SPECK
-
endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index bcafd016618e..f476fede09ba 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -32,9 +32,6 @@ ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM64_CE) += crct10dif-ce.o
crct10dif-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
-obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o
-crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o
-
obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o
@@ -56,9 +53,6 @@ sha512-arm64-y := sha512-glue.o sha512-core.o
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
-obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
-speck-neon-y := speck-neon-core.o speck-neon-glue.o
-
obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o
aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
index 623e74ed1c67..143070510809 100644
--- a/arch/arm64/crypto/aes-ce.S
+++ b/arch/arm64/crypto/aes-ce.S
@@ -17,6 +17,11 @@
.arch armv8-a+crypto
+ xtsmask .req v16
+
+ .macro xts_reload_mask, tmp
+ .endm
+
/* preload all round keys */
.macro load_round_keys, rounds, rk
cmp \rounds, #12
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index adcb83eb683c..1e676625ef33 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -15,6 +15,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/xts.h>
@@ -31,6 +32,8 @@
#define aes_ecb_decrypt ce_aes_ecb_decrypt
#define aes_cbc_encrypt ce_aes_cbc_encrypt
#define aes_cbc_decrypt ce_aes_cbc_decrypt
+#define aes_cbc_cts_encrypt ce_aes_cbc_cts_encrypt
+#define aes_cbc_cts_decrypt ce_aes_cbc_cts_decrypt
#define aes_ctr_encrypt ce_aes_ctr_encrypt
#define aes_xts_encrypt ce_aes_xts_encrypt
#define aes_xts_decrypt ce_aes_xts_decrypt
@@ -45,6 +48,8 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
#define aes_ecb_decrypt neon_aes_ecb_decrypt
#define aes_cbc_encrypt neon_aes_cbc_encrypt
#define aes_cbc_decrypt neon_aes_cbc_decrypt
+#define aes_cbc_cts_encrypt neon_aes_cbc_cts_encrypt
+#define aes_cbc_cts_decrypt neon_aes_cbc_cts_decrypt
#define aes_ctr_encrypt neon_aes_ctr_encrypt
#define aes_xts_encrypt neon_aes_xts_encrypt
#define aes_xts_decrypt neon_aes_xts_decrypt
@@ -63,30 +68,41 @@ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
/* defined in aes-modes.S */
-asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
+asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks);
-asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
+asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks);
-asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
+asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 iv[]);
-asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
+asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 iv[]);
-asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
+asmlinkage void aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int bytes, u8 const iv[]);
+asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int bytes, u8 const iv[]);
+
+asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 ctr[]);
-asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
- int rounds, int blocks, u8 const rk2[], u8 iv[],
+asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
+ int rounds, int blocks, u32 const rk2[], u8 iv[],
int first);
-asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[],
- int rounds, int blocks, u8 const rk2[], u8 iv[],
+asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
+ int rounds, int blocks, u32 const rk2[], u8 iv[],
int first);
asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds,
int blocks, u8 dg[], int enc_before,
int enc_after);
+struct cts_cbc_req_ctx {
+ struct scatterlist sg_src[2];
+ struct scatterlist sg_dst[2];
+ struct skcipher_request subreq;
+};
+
struct crypto_aes_xts_ctx {
struct crypto_aes_ctx key1;
struct crypto_aes_ctx __aligned(8) key2;
@@ -142,7 +158,7 @@ static int ecb_encrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin();
aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_enc, rounds, blocks);
+ ctx->key_enc, rounds, blocks);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
@@ -162,7 +178,7 @@ static int ecb_decrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin();
aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_dec, rounds, blocks);
+ ctx->key_dec, rounds, blocks);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
@@ -182,7 +198,7 @@ static int cbc_encrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin();
aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
+ ctx->key_enc, rounds, blocks, walk.iv);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
@@ -202,13 +218,149 @@ static int cbc_decrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin();
aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_dec, rounds, blocks, walk.iv);
+ ctx->key_dec, rounds, blocks, walk.iv);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
+static int cts_cbc_init_tfm(struct crypto_skcipher *tfm)
+{
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct cts_cbc_req_ctx));
+ return 0;
+}
+
+static int cts_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req);
+ int err, rounds = 6 + ctx->key_length / 4;
+ int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
+ struct scatterlist *src = req->src, *dst = req->dst;
+ struct skcipher_walk walk;
+
+ skcipher_request_set_tfm(&rctx->subreq, tfm);
+
+ if (req->cryptlen <= AES_BLOCK_SIZE) {
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+ cbc_blocks = 1;
+ }
+
+ if (cbc_blocks > 0) {
+ unsigned int blocks;
+
+ skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst,
+ cbc_blocks * AES_BLOCK_SIZE,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &rctx->subreq, false);
+
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ kernel_neon_begin();
+ aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_enc, rounds, blocks, walk.iv);
+ kernel_neon_end();
+ err = skcipher_walk_done(&walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ if (err)
+ return err;
+
+ if (req->cryptlen == AES_BLOCK_SIZE)
+ return 0;
+
+ dst = src = scatterwalk_ffwd(rctx->sg_src, req->src,
+ rctx->subreq.cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(rctx->sg_dst, req->dst,
+ rctx->subreq.cryptlen);
+ }
+
+ /* handle ciphertext stealing */
+ skcipher_request_set_crypt(&rctx->subreq, src, dst,
+ req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &rctx->subreq, false);
+ if (err)
+ return err;
+
+ kernel_neon_begin();
+ aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_enc, rounds, walk.nbytes, walk.iv);
+ kernel_neon_end();
+
+ return skcipher_walk_done(&walk, 0);
+}
+
+static int cts_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req);
+ int err, rounds = 6 + ctx->key_length / 4;
+ int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
+ struct scatterlist *src = req->src, *dst = req->dst;
+ struct skcipher_walk walk;
+
+ skcipher_request_set_tfm(&rctx->subreq, tfm);
+
+ if (req->cryptlen <= AES_BLOCK_SIZE) {
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+ cbc_blocks = 1;
+ }
+
+ if (cbc_blocks > 0) {
+ unsigned int blocks;
+
+ skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst,
+ cbc_blocks * AES_BLOCK_SIZE,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &rctx->subreq, false);
+
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ kernel_neon_begin();
+ aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_dec, rounds, blocks, walk.iv);
+ kernel_neon_end();
+ err = skcipher_walk_done(&walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ if (err)
+ return err;
+
+ if (req->cryptlen == AES_BLOCK_SIZE)
+ return 0;
+
+ dst = src = scatterwalk_ffwd(rctx->sg_src, req->src,
+ rctx->subreq.cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(rctx->sg_dst, req->dst,
+ rctx->subreq.cryptlen);
+ }
+
+ /* handle ciphertext stealing */
+ skcipher_request_set_crypt(&rctx->subreq, src, dst,
+ req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &rctx->subreq, false);
+ if (err)
+ return err;
+
+ kernel_neon_begin();
+ aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_dec, rounds, walk.nbytes, walk.iv);
+ kernel_neon_end();
+
+ return skcipher_walk_done(&walk, 0);
+}
+
static int ctr_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -222,7 +374,7 @@ static int ctr_encrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin();
aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
+ ctx->key_enc, rounds, blocks, walk.iv);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
@@ -238,7 +390,7 @@ static int ctr_encrypt(struct skcipher_request *req)
blocks = -1;
kernel_neon_begin();
- aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds,
+ aes_ctr_encrypt(tail, NULL, ctx->key_enc, rounds,
blocks, walk.iv);
kernel_neon_end();
crypto_xor_cpy(tdst, tsrc, tail, nbytes);
@@ -272,8 +424,8 @@ static int xts_encrypt(struct skcipher_request *req)
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
kernel_neon_begin();
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key1.key_enc, rounds, blocks,
- (u8 *)ctx->key2.key_enc, walk.iv, first);
+ ctx->key1.key_enc, rounds, blocks,
+ ctx->key2.key_enc, walk.iv, first);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
@@ -294,8 +446,8 @@ static int xts_decrypt(struct skcipher_request *req)
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
kernel_neon_begin();
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key1.key_dec, rounds, blocks,
- (u8 *)ctx->key2.key_enc, walk.iv, first);
+ ctx->key1.key_dec, rounds, blocks,
+ ctx->key2.key_enc, walk.iv, first);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
@@ -336,6 +488,24 @@ static struct skcipher_alg aes_algs[] = { {
.decrypt = cbc_decrypt,
}, {
.base = {
+ .cra_name = "__cts(cbc(aes))",
+ .cra_driver_name = "__cts-cbc-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .walksize = 2 * AES_BLOCK_SIZE,
+ .setkey = skcipher_aes_setkey,
+ .encrypt = cts_cbc_encrypt,
+ .decrypt = cts_cbc_decrypt,
+ .init = cts_cbc_init_tfm,
+}, {
+ .base = {
.cra_name = "__ctr(aes)",
.cra_driver_name = "__ctr-aes-" MODE,
.cra_priority = PRIO,
@@ -412,7 +582,6 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
{
struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
be128 *consts = (be128 *)ctx->consts;
- u8 *rk = (u8 *)ctx->key.key_enc;
int rounds = 6 + key_len / 4;
int err;
@@ -422,7 +591,8 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
/* encrypt the zero vector */
kernel_neon_begin();
- aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1);
+ aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, ctx->key.key_enc,
+ rounds, 1);
kernel_neon_end();
cmac_gf128_mul_by_x(consts, consts);
@@ -441,7 +611,6 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
};
struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
- u8 *rk = (u8 *)ctx->key.key_enc;
int rounds = 6 + key_len / 4;
u8 key[AES_BLOCK_SIZE];
int err;
@@ -451,8 +620,8 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
return err;
kernel_neon_begin();
- aes_ecb_encrypt(key, ks[0], rk, rounds, 1);
- aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2);
+ aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1);
+ aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2);
kernel_neon_end();
return cbcmac_setkey(tfm, key, sizeof(key));
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 483a7130cf0e..67700045a0e0 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -14,12 +14,12 @@
.align 4
aes_encrypt_block4x:
- encrypt_block4x v0, v1, v2, v3, w22, x21, x8, w7
+ encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret
ENDPROC(aes_encrypt_block4x)
aes_decrypt_block4x:
- decrypt_block4x v0, v1, v2, v3, w22, x21, x8, w7
+ decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret
ENDPROC(aes_decrypt_block4x)
@@ -31,71 +31,57 @@ ENDPROC(aes_decrypt_block4x)
*/
AES_ENTRY(aes_ecb_encrypt)
- frame_push 5
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
-
-.Lecbencrestart:
- enc_prepare w22, x21, x5
+ enc_prepare w3, x2, x5
.LecbencloopNx:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lecbenc1x
- ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
bl aes_encrypt_block4x
- st1 {v0.16b-v3.16b}, [x19], #64
- cond_yield_neon .Lecbencrestart
+ st1 {v0.16b-v3.16b}, [x0], #64
b .LecbencloopNx
.Lecbenc1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lecbencout
.Lecbencloop:
- ld1 {v0.16b}, [x20], #16 /* get next pt block */
- encrypt_block v0, w22, x21, x5, w6
- st1 {v0.16b}, [x19], #16
- subs w23, w23, #1
+ ld1 {v0.16b}, [x1], #16 /* get next pt block */
+ encrypt_block v0, w3, x2, x5, w6
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
bne .Lecbencloop
.Lecbencout:
- frame_pop
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_ecb_encrypt)
AES_ENTRY(aes_ecb_decrypt)
- frame_push 5
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
-
-.Lecbdecrestart:
- dec_prepare w22, x21, x5
+ dec_prepare w3, x2, x5
.LecbdecloopNx:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lecbdec1x
- ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
bl aes_decrypt_block4x
- st1 {v0.16b-v3.16b}, [x19], #64
- cond_yield_neon .Lecbdecrestart
+ st1 {v0.16b-v3.16b}, [x0], #64
b .LecbdecloopNx
.Lecbdec1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lecbdecout
.Lecbdecloop:
- ld1 {v0.16b}, [x20], #16 /* get next ct block */
- decrypt_block v0, w22, x21, x5, w6
- st1 {v0.16b}, [x19], #16
- subs w23, w23, #1
+ ld1 {v0.16b}, [x1], #16 /* get next ct block */
+ decrypt_block v0, w3, x2, x5, w6
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
bne .Lecbdecloop
.Lecbdecout:
- frame_pop
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_ecb_decrypt)
@@ -108,162 +94,211 @@ AES_ENDPROC(aes_ecb_decrypt)
*/
AES_ENTRY(aes_cbc_encrypt)
- frame_push 6
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
-
-.Lcbcencrestart:
- ld1 {v4.16b}, [x24] /* get iv */
- enc_prepare w22, x21, x6
+ ld1 {v4.16b}, [x5] /* get iv */
+ enc_prepare w3, x2, x6
.Lcbcencloop4x:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lcbcenc1x
- ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */
- encrypt_block v0, w22, x21, x6, w7
+ encrypt_block v0, w3, x2, x6, w7
eor v1.16b, v1.16b, v0.16b
- encrypt_block v1, w22, x21, x6, w7
+ encrypt_block v1, w3, x2, x6, w7
eor v2.16b, v2.16b, v1.16b
- encrypt_block v2, w22, x21, x6, w7
+ encrypt_block v2, w3, x2, x6, w7
eor v3.16b, v3.16b, v2.16b
- encrypt_block v3, w22, x21, x6, w7
- st1 {v0.16b-v3.16b}, [x19], #64
+ encrypt_block v3, w3, x2, x6, w7
+ st1 {v0.16b-v3.16b}, [x0], #64
mov v4.16b, v3.16b
- st1 {v4.16b}, [x24] /* return iv */
- cond_yield_neon .Lcbcencrestart
b .Lcbcencloop4x
.Lcbcenc1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lcbcencout
.Lcbcencloop:
- ld1 {v0.16b}, [x20], #16 /* get next pt block */
+ ld1 {v0.16b}, [x1], #16 /* get next pt block */
eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */
- encrypt_block v4, w22, x21, x6, w7
- st1 {v4.16b}, [x19], #16
- subs w23, w23, #1
+ encrypt_block v4, w3, x2, x6, w7
+ st1 {v4.16b}, [x0], #16
+ subs w4, w4, #1
bne .Lcbcencloop
.Lcbcencout:
- st1 {v4.16b}, [x24] /* return iv */
- frame_pop
+ st1 {v4.16b}, [x5] /* return iv */
ret
AES_ENDPROC(aes_cbc_encrypt)
AES_ENTRY(aes_cbc_decrypt)
- frame_push 6
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
-.Lcbcdecrestart:
- ld1 {v7.16b}, [x24] /* get iv */
- dec_prepare w22, x21, x6
+ ld1 {v7.16b}, [x5] /* get iv */
+ dec_prepare w3, x2, x6
.LcbcdecloopNx:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lcbcdec1x
- ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
mov v4.16b, v0.16b
mov v5.16b, v1.16b
mov v6.16b, v2.16b
bl aes_decrypt_block4x
- sub x20, x20, #16
+ sub x1, x1, #16
eor v0.16b, v0.16b, v7.16b
eor v1.16b, v1.16b, v4.16b
- ld1 {v7.16b}, [x20], #16 /* reload 1 ct block */
+ ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */
eor v2.16b, v2.16b, v5.16b
eor v3.16b, v3.16b, v6.16b
- st1 {v0.16b-v3.16b}, [x19], #64
- st1 {v7.16b}, [x24] /* return iv */
- cond_yield_neon .Lcbcdecrestart
+ st1 {v0.16b-v3.16b}, [x0], #64
b .LcbcdecloopNx
.Lcbcdec1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lcbcdecout
.Lcbcdecloop:
- ld1 {v1.16b}, [x20], #16 /* get next ct block */
+ ld1 {v1.16b}, [x1], #16 /* get next ct block */
mov v0.16b, v1.16b /* ...and copy to v0 */
- decrypt_block v0, w22, x21, x6, w7
+ decrypt_block v0, w3, x2, x6, w7
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
mov v7.16b, v1.16b /* ct is next iv */
- st1 {v0.16b}, [x19], #16
- subs w23, w23, #1
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
bne .Lcbcdecloop
.Lcbcdecout:
- st1 {v7.16b}, [x24] /* return iv */
- frame_pop
+ st1 {v7.16b}, [x5] /* return iv */
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_cbc_decrypt)
/*
+ * aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ * int rounds, int bytes, u8 const iv[])
+ * aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
+ * int rounds, int bytes, u8 const iv[])
+ */
+
+AES_ENTRY(aes_cbc_cts_encrypt)
+ adr_l x8, .Lcts_permute_table
+ sub x4, x4, #16
+ add x9, x8, #32
+ add x8, x8, x4
+ sub x9, x9, x4
+ ld1 {v3.16b}, [x8]
+ ld1 {v4.16b}, [x9]
+
+ ld1 {v0.16b}, [x1], x4 /* overlapping loads */
+ ld1 {v1.16b}, [x1]
+
+ ld1 {v5.16b}, [x5] /* get iv */
+ enc_prepare w3, x2, x6
+
+ eor v0.16b, v0.16b, v5.16b /* xor with iv */
+ tbl v1.16b, {v1.16b}, v4.16b
+ encrypt_block v0, w3, x2, x6, w7
+
+ eor v1.16b, v1.16b, v0.16b
+ tbl v0.16b, {v0.16b}, v3.16b
+ encrypt_block v1, w3, x2, x6, w7
+
+ add x4, x0, x4
+ st1 {v0.16b}, [x4] /* overlapping stores */
+ st1 {v1.16b}, [x0]
+ ret
+AES_ENDPROC(aes_cbc_cts_encrypt)
+
+AES_ENTRY(aes_cbc_cts_decrypt)
+ adr_l x8, .Lcts_permute_table
+ sub x4, x4, #16
+ add x9, x8, #32
+ add x8, x8, x4
+ sub x9, x9, x4
+ ld1 {v3.16b}, [x8]
+ ld1 {v4.16b}, [x9]
+
+ ld1 {v0.16b}, [x1], x4 /* overlapping loads */
+ ld1 {v1.16b}, [x1]
+
+ ld1 {v5.16b}, [x5] /* get iv */
+ dec_prepare w3, x2, x6
+
+ tbl v2.16b, {v1.16b}, v4.16b
+ decrypt_block v0, w3, x2, x6, w7
+ eor v2.16b, v2.16b, v0.16b
+
+ tbx v0.16b, {v1.16b}, v4.16b
+ tbl v2.16b, {v2.16b}, v3.16b
+ decrypt_block v0, w3, x2, x6, w7
+ eor v0.16b, v0.16b, v5.16b /* xor with iv */
+
+ add x4, x0, x4
+ st1 {v2.16b}, [x4] /* overlapping stores */
+ st1 {v0.16b}, [x0]
+ ret
+AES_ENDPROC(aes_cbc_cts_decrypt)
+
+ .section ".rodata", "a"
+ .align 6
+.Lcts_permute_table:
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+ .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .previous
+
+
+ /*
* aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 ctr[])
*/
AES_ENTRY(aes_ctr_encrypt)
- frame_push 6
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
-
-.Lctrrestart:
- enc_prepare w22, x21, x6
- ld1 {v4.16b}, [x24]
+ enc_prepare w3, x2, x6
+ ld1 {v4.16b}, [x5]
umov x6, v4.d[1] /* keep swabbed ctr in reg */
rev x6, x6
+ cmn w6, w4 /* 32 bit overflow? */
+ bcs .Lctrloop
.LctrloopNx:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lctr1x
- cmn w6, #4 /* 32 bit overflow? */
- bcs .Lctr1x
- ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
- dup v7.4s, w6
+ add w7, w6, #1
mov v0.16b, v4.16b
- add v7.4s, v7.4s, v8.4s
+ add w8, w6, #2
mov v1.16b, v4.16b
- rev32 v8.16b, v7.16b
+ add w9, w6, #3
mov v2.16b, v4.16b
+ rev w7, w7
mov v3.16b, v4.16b
- mov v1.s[3], v8.s[0]
- mov v2.s[3], v8.s[1]
- mov v3.s[3], v8.s[2]
- ld1 {v5.16b-v7.16b}, [x20], #48 /* get 3 input blocks */
+ rev w8, w8
+ mov v1.s[3], w7
+ rev w9, w9
+ mov v2.s[3], w8
+ mov v3.s[3], w9
+ ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
bl aes_encrypt_block4x
eor v0.16b, v5.16b, v0.16b
- ld1 {v5.16b}, [x20], #16 /* get 1 input block */
+ ld1 {v5.16b}, [x1], #16 /* get 1 input block */
eor v1.16b, v6.16b, v1.16b
eor v2.16b, v7.16b, v2.16b
eor v3.16b, v5.16b, v3.16b
- st1 {v0.16b-v3.16b}, [x19], #64
+ st1 {v0.16b-v3.16b}, [x0], #64
add x6, x6, #4
rev x7, x6
ins v4.d[1], x7
- cbz w23, .Lctrout
- st1 {v4.16b}, [x24] /* return next CTR value */
- cond_yield_neon .Lctrrestart
+ cbz w4, .Lctrout
b .LctrloopNx
.Lctr1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lctrout
.Lctrloop:
mov v0.16b, v4.16b
- encrypt_block v0, w22, x21, x8, w7
+ encrypt_block v0, w3, x2, x8, w7
adds x6, x6, #1 /* increment BE ctr */
rev x7, x6
@@ -271,22 +306,22 @@ AES_ENTRY(aes_ctr_encrypt)
bcs .Lctrcarry /* overflow? */
.Lctrcarrydone:
- subs w23, w23, #1
+ subs w4, w4, #1
bmi .Lctrtailblock /* blocks <0 means tail block */
- ld1 {v3.16b}, [x20], #16
+ ld1 {v3.16b}, [x1], #16
eor v3.16b, v0.16b, v3.16b
- st1 {v3.16b}, [x19], #16
+ st1 {v3.16b}, [x0], #16
bne .Lctrloop
.Lctrout:
- st1 {v4.16b}, [x24] /* return next CTR value */
-.Lctrret:
- frame_pop
+ st1 {v4.16b}, [x5] /* return next CTR value */
+ ldp x29, x30, [sp], #16
ret
.Lctrtailblock:
- st1 {v0.16b}, [x19]
- b .Lctrret
+ st1 {v0.16b}, [x0]
+ ldp x29, x30, [sp], #16
+ ret
.Lctrcarry:
umov x7, v4.d[0] /* load upper word of ctr */
@@ -296,7 +331,6 @@ AES_ENTRY(aes_ctr_encrypt)
ins v4.d[0], x7
b .Lctrcarrydone
AES_ENDPROC(aes_ctr_encrypt)
- .ltorg
/*
@@ -306,150 +340,132 @@ AES_ENDPROC(aes_ctr_encrypt)
* int blocks, u8 const rk2[], u8 iv[], int first)
*/
- .macro next_tweak, out, in, const, tmp
+ .macro next_tweak, out, in, tmp
sshr \tmp\().2d, \in\().2d, #63
- and \tmp\().16b, \tmp\().16b, \const\().16b
+ and \tmp\().16b, \tmp\().16b, xtsmask.16b
add \out\().2d, \in\().2d, \in\().2d
ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
eor \out\().16b, \out\().16b, \tmp\().16b
.endm
-.Lxts_mul_x:
-CPU_LE( .quad 1, 0x87 )
-CPU_BE( .quad 0x87, 1 )
+ .macro xts_load_mask, tmp
+ movi xtsmask.2s, #0x1
+ movi \tmp\().2s, #0x87
+ uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s
+ .endm
AES_ENTRY(aes_xts_encrypt)
- frame_push 6
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x6
-
- ld1 {v4.16b}, [x24]
+ ld1 {v4.16b}, [x6]
+ xts_load_mask v8
cbz w7, .Lxtsencnotfirst
enc_prepare w3, x5, x8
encrypt_block v4, w3, x5, x8, w7 /* first tweak */
enc_switch_key w3, x2, x8
- ldr q7, .Lxts_mul_x
b .LxtsencNx
-.Lxtsencrestart:
- ld1 {v4.16b}, [x24]
.Lxtsencnotfirst:
- enc_prepare w22, x21, x8
+ enc_prepare w3, x2, x8
.LxtsencloopNx:
- ldr q7, .Lxts_mul_x
- next_tweak v4, v4, v7, v8
+ next_tweak v4, v4, v8
.LxtsencNx:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lxtsenc1x
- ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */
- next_tweak v5, v4, v7, v8
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
+ next_tweak v5, v4, v8
eor v0.16b, v0.16b, v4.16b
- next_tweak v6, v5, v7, v8
+ next_tweak v6, v5, v8
eor v1.16b, v1.16b, v5.16b
eor v2.16b, v2.16b, v6.16b
- next_tweak v7, v6, v7, v8
+ next_tweak v7, v6, v8
eor v3.16b, v3.16b, v7.16b
bl aes_encrypt_block4x
eor v3.16b, v3.16b, v7.16b
eor v0.16b, v0.16b, v4.16b
eor v1.16b, v1.16b, v5.16b
eor v2.16b, v2.16b, v6.16b
- st1 {v0.16b-v3.16b}, [x19], #64
+ st1 {v0.16b-v3.16b}, [x0], #64
mov v4.16b, v7.16b
- cbz w23, .Lxtsencout
- st1 {v4.16b}, [x24]
- cond_yield_neon .Lxtsencrestart
+ cbz w4, .Lxtsencout
+ xts_reload_mask v8
b .LxtsencloopNx
.Lxtsenc1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lxtsencout
.Lxtsencloop:
- ld1 {v1.16b}, [x20], #16
+ ld1 {v1.16b}, [x1], #16
eor v0.16b, v1.16b, v4.16b
- encrypt_block v0, w22, x21, x8, w7
+ encrypt_block v0, w3, x2, x8, w7
eor v0.16b, v0.16b, v4.16b
- st1 {v0.16b}, [x19], #16
- subs w23, w23, #1
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
beq .Lxtsencout
- next_tweak v4, v4, v7, v8
+ next_tweak v4, v4, v8
b .Lxtsencloop
.Lxtsencout:
- st1 {v4.16b}, [x24]
- frame_pop
+ st1 {v4.16b}, [x6]
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_xts_encrypt)
AES_ENTRY(aes_xts_decrypt)
- frame_push 6
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x6
-
- ld1 {v4.16b}, [x24]
+ ld1 {v4.16b}, [x6]
+ xts_load_mask v8
cbz w7, .Lxtsdecnotfirst
enc_prepare w3, x5, x8
encrypt_block v4, w3, x5, x8, w7 /* first tweak */
dec_prepare w3, x2, x8
- ldr q7, .Lxts_mul_x
b .LxtsdecNx
-.Lxtsdecrestart:
- ld1 {v4.16b}, [x24]
.Lxtsdecnotfirst:
- dec_prepare w22, x21, x8
+ dec_prepare w3, x2, x8
.LxtsdecloopNx:
- ldr q7, .Lxts_mul_x
- next_tweak v4, v4, v7, v8
+ next_tweak v4, v4, v8
.LxtsdecNx:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lxtsdec1x
- ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */
- next_tweak v5, v4, v7, v8
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
+ next_tweak v5, v4, v8
eor v0.16b, v0.16b, v4.16b
- next_tweak v6, v5, v7, v8
+ next_tweak v6, v5, v8
eor v1.16b, v1.16b, v5.16b
eor v2.16b, v2.16b, v6.16b
- next_tweak v7, v6, v7, v8
+ next_tweak v7, v6, v8
eor v3.16b, v3.16b, v7.16b
bl aes_decrypt_block4x
eor v3.16b, v3.16b, v7.16b
eor v0.16b, v0.16b, v4.16b
eor v1.16b, v1.16b, v5.16b
eor v2.16b, v2.16b, v6.16b
- st1 {v0.16b-v3.16b}, [x19], #64
+ st1 {v0.16b-v3.16b}, [x0], #64
mov v4.16b, v7.16b
- cbz w23, .Lxtsdecout
- st1 {v4.16b}, [x24]
- cond_yield_neon .Lxtsdecrestart
+ cbz w4, .Lxtsdecout
+ xts_reload_mask v8
b .LxtsdecloopNx
.Lxtsdec1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lxtsdecout
.Lxtsdecloop:
- ld1 {v1.16b}, [x20], #16
+ ld1 {v1.16b}, [x1], #16
eor v0.16b, v1.16b, v4.16b
- decrypt_block v0, w22, x21, x8, w7
+ decrypt_block v0, w3, x2, x8, w7
eor v0.16b, v0.16b, v4.16b
- st1 {v0.16b}, [x19], #16
- subs w23, w23, #1
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
beq .Lxtsdecout
- next_tweak v4, v4, v7, v8
+ next_tweak v4, v4, v8
b .Lxtsdecloop
.Lxtsdecout:
- st1 {v4.16b}, [x24]
- frame_pop
+ st1 {v4.16b}, [x6]
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_xts_decrypt)
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
index 1c7b45b7268e..29100f692e8a 100644
--- a/arch/arm64/crypto/aes-neon.S
+++ b/arch/arm64/crypto/aes-neon.S
@@ -14,6 +14,12 @@
#define AES_ENTRY(func) ENTRY(neon_ ## func)
#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
+ xtsmask .req v7
+
+ .macro xts_reload_mask, tmp
+ xts_load_mask \tmp
+ .endm
+
/* multiply by polynomial 'x' in GF(2^8) */
.macro mul_by_x, out, in, temp, const
sshr \temp, \in, #7
diff --git a/arch/arm64/crypto/crc32-ce-core.S b/arch/arm64/crypto/crc32-ce-core.S
deleted file mode 100644
index 8061bf0f9c66..000000000000
--- a/arch/arm64/crypto/crc32-ce-core.S
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Accelerated CRC32(C) using arm64 CRC, NEON and Crypto Extensions instructions
- *
- * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
-/*
- * Copyright 2012 Xyratex Technology Limited
- *
- * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
- * calculation.
- * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
- * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
- * at:
- * http://www.intel.com/products/processor/manuals/
- * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
- * Volume 2B: Instruction Set Reference, N-Z
- *
- * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
- * Alexander Boyko <Alexander_Boyko@xyratex.com>
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
- .section ".rodata", "a"
- .align 6
- .cpu generic+crypto+crc
-
-.Lcrc32_constants:
- /*
- * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
- * #define CONSTANT_R1 0x154442bd4LL
- *
- * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
- * #define CONSTANT_R2 0x1c6e41596LL
- */
- .octa 0x00000001c6e415960000000154442bd4
-
- /*
- * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
- * #define CONSTANT_R3 0x1751997d0LL
- *
- * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
- * #define CONSTANT_R4 0x0ccaa009eLL
- */
- .octa 0x00000000ccaa009e00000001751997d0
-
- /*
- * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
- * #define CONSTANT_R5 0x163cd6124LL
- */
- .quad 0x0000000163cd6124
- .quad 0x00000000FFFFFFFF
-
- /*
- * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
- *
- * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
- * = 0x1F7011641LL
- * #define CONSTANT_RU 0x1F7011641LL
- */
- .octa 0x00000001F701164100000001DB710641
-
-.Lcrc32c_constants:
- .octa 0x000000009e4addf800000000740eef02
- .octa 0x000000014cd00bd600000000f20c0dfe
- .quad 0x00000000dd45aab8
- .quad 0x00000000FFFFFFFF
- .octa 0x00000000dea713f10000000105ec76f0
-
- vCONSTANT .req v0
- dCONSTANT .req d0
- qCONSTANT .req q0
-
- BUF .req x19
- LEN .req x20
- CRC .req x21
- CONST .req x22
-
- vzr .req v9
-
- /**
- * Calculate crc32
- * BUF - buffer
- * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
- * CRC - initial crc32
- * return %eax crc32
- * uint crc32_pmull_le(unsigned char const *buffer,
- * size_t len, uint crc32)
- */
- .text
-ENTRY(crc32_pmull_le)
- adr_l x3, .Lcrc32_constants
- b 0f
-
-ENTRY(crc32c_pmull_le)
- adr_l x3, .Lcrc32c_constants
-
-0: frame_push 4, 64
-
- mov BUF, x0
- mov LEN, x1
- mov CRC, x2
- mov CONST, x3
-
- bic LEN, LEN, #15
- ld1 {v1.16b-v4.16b}, [BUF], #0x40
- movi vzr.16b, #0
- fmov dCONSTANT, CRC
- eor v1.16b, v1.16b, vCONSTANT.16b
- sub LEN, LEN, #0x40
- cmp LEN, #0x40
- b.lt less_64
-
- ldr qCONSTANT, [CONST]
-
-loop_64: /* 64 bytes Full cache line folding */
- sub LEN, LEN, #0x40
-
- pmull2 v5.1q, v1.2d, vCONSTANT.2d
- pmull2 v6.1q, v2.2d, vCONSTANT.2d
- pmull2 v7.1q, v3.2d, vCONSTANT.2d
- pmull2 v8.1q, v4.2d, vCONSTANT.2d
-
- pmull v1.1q, v1.1d, vCONSTANT.1d
- pmull v2.1q, v2.1d, vCONSTANT.1d
- pmull v3.1q, v3.1d, vCONSTANT.1d
- pmull v4.1q, v4.1d, vCONSTANT.1d
-
- eor v1.16b, v1.16b, v5.16b
- ld1 {v5.16b}, [BUF], #0x10
- eor v2.16b, v2.16b, v6.16b
- ld1 {v6.16b}, [BUF], #0x10
- eor v3.16b, v3.16b, v7.16b
- ld1 {v7.16b}, [BUF], #0x10
- eor v4.16b, v4.16b, v8.16b
- ld1 {v8.16b}, [BUF], #0x10
-
- eor v1.16b, v1.16b, v5.16b
- eor v2.16b, v2.16b, v6.16b
- eor v3.16b, v3.16b, v7.16b
- eor v4.16b, v4.16b, v8.16b
-
- cmp LEN, #0x40
- b.lt less_64
-
- if_will_cond_yield_neon
- stp q1, q2, [sp, #.Lframe_local_offset]
- stp q3, q4, [sp, #.Lframe_local_offset + 32]
- do_cond_yield_neon
- ldp q1, q2, [sp, #.Lframe_local_offset]
- ldp q3, q4, [sp, #.Lframe_local_offset + 32]
- ldr qCONSTANT, [CONST]
- movi vzr.16b, #0
- endif_yield_neon
- b loop_64
-
-less_64: /* Folding cache line into 128bit */
- ldr qCONSTANT, [CONST, #16]
-
- pmull2 v5.1q, v1.2d, vCONSTANT.2d
- pmull v1.1q, v1.1d, vCONSTANT.1d
- eor v1.16b, v1.16b, v5.16b
- eor v1.16b, v1.16b, v2.16b
-
- pmull2 v5.1q, v1.2d, vCONSTANT.2d
- pmull v1.1q, v1.1d, vCONSTANT.1d
- eor v1.16b, v1.16b, v5.16b
- eor v1.16b, v1.16b, v3.16b
-
- pmull2 v5.1q, v1.2d, vCONSTANT.2d
- pmull v1.1q, v1.1d, vCONSTANT.1d
- eor v1.16b, v1.16b, v5.16b
- eor v1.16b, v1.16b, v4.16b
-
- cbz LEN, fold_64
-
-loop_16: /* Folding rest buffer into 128bit */
- subs LEN, LEN, #0x10
-
- ld1 {v2.16b}, [BUF], #0x10
- pmull2 v5.1q, v1.2d, vCONSTANT.2d
- pmull v1.1q, v1.1d, vCONSTANT.1d
- eor v1.16b, v1.16b, v5.16b
- eor v1.16b, v1.16b, v2.16b
-
- b.ne loop_16
-
-fold_64:
- /* perform the last 64 bit fold, also adds 32 zeroes
- * to the input stream */
- ext v2.16b, v1.16b, v1.16b, #8
- pmull2 v2.1q, v2.2d, vCONSTANT.2d
- ext v1.16b, v1.16b, vzr.16b, #8
- eor v1.16b, v1.16b, v2.16b
-
- /* final 32-bit fold */
- ldr dCONSTANT, [CONST, #32]
- ldr d3, [CONST, #40]
-
- ext v2.16b, v1.16b, vzr.16b, #4
- and v1.16b, v1.16b, v3.16b
- pmull v1.1q, v1.1d, vCONSTANT.1d
- eor v1.16b, v1.16b, v2.16b
-
- /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
- ldr qCONSTANT, [CONST, #48]
-
- and v2.16b, v1.16b, v3.16b
- ext v2.16b, vzr.16b, v2.16b, #8
- pmull2 v2.1q, v2.2d, vCONSTANT.2d
- and v2.16b, v2.16b, v3.16b
- pmull v2.1q, v2.1d, vCONSTANT.1d
- eor v1.16b, v1.16b, v2.16b
- mov w0, v1.s[1]
-
- frame_pop
- ret
-ENDPROC(crc32_pmull_le)
-ENDPROC(crc32c_pmull_le)
-
- .macro __crc32, c
-0: subs x2, x2, #16
- b.mi 8f
- ldp x3, x4, [x1], #16
-CPU_BE( rev x3, x3 )
-CPU_BE( rev x4, x4 )
- crc32\c\()x w0, w0, x3
- crc32\c\()x w0, w0, x4
- b.ne 0b
- ret
-
-8: tbz x2, #3, 4f
- ldr x3, [x1], #8
-CPU_BE( rev x3, x3 )
- crc32\c\()x w0, w0, x3
-4: tbz x2, #2, 2f
- ldr w3, [x1], #4
-CPU_BE( rev w3, w3 )
- crc32\c\()w w0, w0, w3
-2: tbz x2, #1, 1f
- ldrh w3, [x1], #2
-CPU_BE( rev16 w3, w3 )
- crc32\c\()h w0, w0, w3
-1: tbz x2, #0, 0f
- ldrb w3, [x1]
- crc32\c\()b w0, w0, w3
-0: ret
- .endm
-
- .align 5
-ENTRY(crc32_armv8_le)
- __crc32
-ENDPROC(crc32_armv8_le)
-
- .align 5
-ENTRY(crc32c_armv8_le)
- __crc32 c
-ENDPROC(crc32c_armv8_le)
diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c
deleted file mode 100644
index 34b4e3d46aab..000000000000
--- a/arch/arm64/crypto/crc32-ce-glue.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Accelerated CRC32(C) using arm64 NEON and Crypto Extensions instructions
- *
- * Copyright (C) 2016 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/cpufeature.h>
-#include <linux/crc32.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <crypto/internal/hash.h>
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <asm/unaligned.h>
-
-#define PMULL_MIN_LEN 64L /* minimum size of buffer
- * for crc32_pmull_le_16 */
-#define SCALE_F 16L /* size of NEON register */
-
-asmlinkage u32 crc32_pmull_le(const u8 buf[], u64 len, u32 init_crc);
-asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], size_t len);
-
-asmlinkage u32 crc32c_pmull_le(const u8 buf[], u64 len, u32 init_crc);
-asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], size_t len);
-
-static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], size_t len);
-static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], size_t len);
-
-static int crc32_pmull_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = 0;
- return 0;
-}
-
-static int crc32c_pmull_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = ~0;
- return 0;
-}
-
-static int crc32_pmull_setkey(struct crypto_shash *hash, const u8 *key,
- unsigned int keylen)
-{
- u32 *mctx = crypto_shash_ctx(hash);
-
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- *mctx = le32_to_cpup((__le32 *)key);
- return 0;
-}
-
-static int crc32_pmull_init(struct shash_desc *desc)
-{
- u32 *mctx = crypto_shash_ctx(desc->tfm);
- u32 *crc = shash_desc_ctx(desc);
-
- *crc = *mctx;
- return 0;
-}
-
-static int crc32_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- *crc = crc32_armv8_le(*crc, data, length);
- return 0;
-}
-
-static int crc32c_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- *crc = crc32c_armv8_le(*crc, data, length);
- return 0;
-}
-
-static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
- unsigned int l;
-
- if ((u64)data % SCALE_F) {
- l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F));
-
- *crc = fallback_crc32(*crc, data, l);
-
- data += l;
- length -= l;
- }
-
- if (length >= PMULL_MIN_LEN && may_use_simd()) {
- l = round_down(length, SCALE_F);
-
- kernel_neon_begin();
- *crc = crc32_pmull_le(data, l, *crc);
- kernel_neon_end();
-
- data += l;
- length -= l;
- }
-
- if (length > 0)
- *crc = fallback_crc32(*crc, data, length);
-
- return 0;
-}
-
-static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
- unsigned int l;
-
- if ((u64)data % SCALE_F) {
- l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F));
-
- *crc = fallback_crc32c(*crc, data, l);
-
- data += l;
- length -= l;
- }
-
- if (length >= PMULL_MIN_LEN && may_use_simd()) {
- l = round_down(length, SCALE_F);
-
- kernel_neon_begin();
- *crc = crc32c_pmull_le(data, l, *crc);
- kernel_neon_end();
-
- data += l;
- length -= l;
- }
-
- if (length > 0) {
- *crc = fallback_crc32c(*crc, data, length);
- }
-
- return 0;
-}
-
-static int crc32_pmull_final(struct shash_desc *desc, u8 *out)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- put_unaligned_le32(*crc, out);
- return 0;
-}
-
-static int crc32c_pmull_final(struct shash_desc *desc, u8 *out)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- put_unaligned_le32(~*crc, out);
- return 0;
-}
-
-static struct shash_alg crc32_pmull_algs[] = { {
- .setkey = crc32_pmull_setkey,
- .init = crc32_pmull_init,
- .update = crc32_update,
- .final = crc32_pmull_final,
- .descsize = sizeof(u32),
- .digestsize = sizeof(u32),
-
- .base.cra_ctxsize = sizeof(u32),
- .base.cra_init = crc32_pmull_cra_init,
- .base.cra_name = "crc32",
- .base.cra_driver_name = "crc32-arm64-ce",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_blocksize = 1,
- .base.cra_module = THIS_MODULE,
-}, {
- .setkey = crc32_pmull_setkey,
- .init = crc32_pmull_init,
- .update = crc32c_update,
- .final = crc32c_pmull_final,
- .descsize = sizeof(u32),
- .digestsize = sizeof(u32),
-
- .base.cra_ctxsize = sizeof(u32),
- .base.cra_init = crc32c_pmull_cra_init,
- .base.cra_name = "crc32c",
- .base.cra_driver_name = "crc32c-arm64-ce",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_blocksize = 1,
- .base.cra_module = THIS_MODULE,
-} };
-
-static int __init crc32_pmull_mod_init(void)
-{
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_PMULL)) {
- crc32_pmull_algs[0].update = crc32_pmull_update;
- crc32_pmull_algs[1].update = crc32c_pmull_update;
-
- if (elf_hwcap & HWCAP_CRC32) {
- fallback_crc32 = crc32_armv8_le;
- fallback_crc32c = crc32c_armv8_le;
- } else {
- fallback_crc32 = crc32_le;
- fallback_crc32c = __crc32c_le;
- }
- } else if (!(elf_hwcap & HWCAP_CRC32)) {
- return -ENODEV;
- }
- return crypto_register_shashes(crc32_pmull_algs,
- ARRAY_SIZE(crc32_pmull_algs));
-}
-
-static void __exit crc32_pmull_mod_exit(void)
-{
- crypto_unregister_shashes(crc32_pmull_algs,
- ARRAY_SIZE(crc32_pmull_algs));
-}
-
-static const struct cpu_feature crc32_cpu_feature[] = {
- { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { }
-};
-MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature);
-
-module_init(crc32_pmull_mod_init);
-module_exit(crc32_pmull_mod_exit);
-
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S
index 663ea71cdb38..9e82e8e8ed05 100644
--- a/arch/arm64/crypto/crct10dif-ce-core.S
+++ b/arch/arm64/crypto/crct10dif-ce-core.S
@@ -80,7 +80,186 @@
vzr .req v13
-ENTRY(crc_t10dif_pmull)
+ ad .req v14
+ bd .req v10
+
+ k00_16 .req v15
+ k32_48 .req v16
+
+ t3 .req v17
+ t4 .req v18
+ t5 .req v19
+ t6 .req v20
+ t7 .req v21
+ t8 .req v22
+ t9 .req v23
+
+ perm1 .req v24
+ perm2 .req v25
+ perm3 .req v26
+ perm4 .req v27
+
+ bd1 .req v28
+ bd2 .req v29
+ bd3 .req v30
+ bd4 .req v31
+
+ .macro __pmull_init_p64
+ .endm
+
+ .macro __pmull_pre_p64, bd
+ .endm
+
+ .macro __pmull_init_p8
+ // k00_16 := 0x0000000000000000_000000000000ffff
+ // k32_48 := 0x00000000ffffffff_0000ffffffffffff
+ movi k32_48.2d, #0xffffffff
+ mov k32_48.h[2], k32_48.h[0]
+ ushr k00_16.2d, k32_48.2d, #32
+
+ // prepare the permutation vectors
+ mov_q x5, 0x080f0e0d0c0b0a09
+ movi perm4.8b, #8
+ dup perm1.2d, x5
+ eor perm1.16b, perm1.16b, perm4.16b
+ ushr perm2.2d, perm1.2d, #8
+ ushr perm3.2d, perm1.2d, #16
+ ushr perm4.2d, perm1.2d, #24
+ sli perm2.2d, perm1.2d, #56
+ sli perm3.2d, perm1.2d, #48
+ sli perm4.2d, perm1.2d, #40
+ .endm
+
+ .macro __pmull_pre_p8, bd
+ tbl bd1.16b, {\bd\().16b}, perm1.16b
+ tbl bd2.16b, {\bd\().16b}, perm2.16b
+ tbl bd3.16b, {\bd\().16b}, perm3.16b
+ tbl bd4.16b, {\bd\().16b}, perm4.16b
+ .endm
+
+__pmull_p8_core:
+.L__pmull_p8_core:
+ ext t4.8b, ad.8b, ad.8b, #1 // A1
+ ext t5.8b, ad.8b, ad.8b, #2 // A2
+ ext t6.8b, ad.8b, ad.8b, #3 // A3
+
+ pmull t4.8h, t4.8b, bd.8b // F = A1*B
+ pmull t8.8h, ad.8b, bd1.8b // E = A*B1
+ pmull t5.8h, t5.8b, bd.8b // H = A2*B
+ pmull t7.8h, ad.8b, bd2.8b // G = A*B2
+ pmull t6.8h, t6.8b, bd.8b // J = A3*B
+ pmull t9.8h, ad.8b, bd3.8b // I = A*B3
+ pmull t3.8h, ad.8b, bd4.8b // K = A*B4
+ b 0f
+
+.L__pmull_p8_core2:
+ tbl t4.16b, {ad.16b}, perm1.16b // A1
+ tbl t5.16b, {ad.16b}, perm2.16b // A2
+ tbl t6.16b, {ad.16b}, perm3.16b // A3
+
+ pmull2 t4.8h, t4.16b, bd.16b // F = A1*B
+ pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1
+ pmull2 t5.8h, t5.16b, bd.16b // H = A2*B
+ pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2
+ pmull2 t6.8h, t6.16b, bd.16b // J = A3*B
+ pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3
+ pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4
+
+0: eor t4.16b, t4.16b, t8.16b // L = E + F
+ eor t5.16b, t5.16b, t7.16b // M = G + H
+ eor t6.16b, t6.16b, t9.16b // N = I + J
+
+ uzp1 t8.2d, t4.2d, t5.2d
+ uzp2 t4.2d, t4.2d, t5.2d
+ uzp1 t7.2d, t6.2d, t3.2d
+ uzp2 t6.2d, t6.2d, t3.2d
+
+ // t4 = (L) (P0 + P1) << 8
+ // t5 = (M) (P2 + P3) << 16
+ eor t8.16b, t8.16b, t4.16b
+ and t4.16b, t4.16b, k32_48.16b
+
+ // t6 = (N) (P4 + P5) << 24
+ // t7 = (K) (P6 + P7) << 32
+ eor t7.16b, t7.16b, t6.16b
+ and t6.16b, t6.16b, k00_16.16b
+
+ eor t8.16b, t8.16b, t4.16b
+ eor t7.16b, t7.16b, t6.16b
+
+ zip2 t5.2d, t8.2d, t4.2d
+ zip1 t4.2d, t8.2d, t4.2d
+ zip2 t3.2d, t7.2d, t6.2d
+ zip1 t6.2d, t7.2d, t6.2d
+
+ ext t4.16b, t4.16b, t4.16b, #15
+ ext t5.16b, t5.16b, t5.16b, #14
+ ext t6.16b, t6.16b, t6.16b, #13
+ ext t3.16b, t3.16b, t3.16b, #12
+
+ eor t4.16b, t4.16b, t5.16b
+ eor t6.16b, t6.16b, t3.16b
+ ret
+ENDPROC(__pmull_p8_core)
+
+ .macro __pmull_p8, rq, ad, bd, i
+ .ifnc \bd, v10
+ .err
+ .endif
+ mov ad.16b, \ad\().16b
+ .ifb \i
+ pmull \rq\().8h, \ad\().8b, bd.8b // D = A*B
+ .else
+ pmull2 \rq\().8h, \ad\().16b, bd.16b // D = A*B
+ .endif
+
+ bl .L__pmull_p8_core\i
+
+ eor \rq\().16b, \rq\().16b, t4.16b
+ eor \rq\().16b, \rq\().16b, t6.16b
+ .endm
+
+ .macro fold64, p, reg1, reg2
+ ldp q11, q12, [arg2], #0x20
+
+ __pmull_\p v8, \reg1, v10, 2
+ __pmull_\p \reg1, \reg1, v10
+
+CPU_LE( rev64 v11.16b, v11.16b )
+CPU_LE( rev64 v12.16b, v12.16b )
+
+ __pmull_\p v9, \reg2, v10, 2
+ __pmull_\p \reg2, \reg2, v10
+
+CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
+CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
+
+ eor \reg1\().16b, \reg1\().16b, v8.16b
+ eor \reg2\().16b, \reg2\().16b, v9.16b
+ eor \reg1\().16b, \reg1\().16b, v11.16b
+ eor \reg2\().16b, \reg2\().16b, v12.16b
+ .endm
+
+ .macro fold16, p, reg, rk
+ __pmull_\p v8, \reg, v10
+ __pmull_\p \reg, \reg, v10, 2
+ .ifnb \rk
+ ldr_l q10, \rk, x8
+ __pmull_pre_\p v10
+ .endif
+ eor v7.16b, v7.16b, v8.16b
+ eor v7.16b, v7.16b, \reg\().16b
+ .endm
+
+ .macro __pmull_p64, rd, rn, rm, n
+ .ifb \n
+ pmull \rd\().1q, \rn\().1d, \rm\().1d
+ .else
+ pmull2 \rd\().1q, \rn\().2d, \rm\().2d
+ .endif
+ .endm
+
+ .macro crc_t10dif_pmull, p
frame_push 3, 128
mov arg1_low32, w0
@@ -89,6 +268,8 @@ ENTRY(crc_t10dif_pmull)
movi vzr.16b, #0 // init zero register
+ __pmull_init_\p
+
// adjust the 16-bit initial_crc value, scale it to 32 bits
lsl arg1_low32, arg1_low32, #16
@@ -96,7 +277,7 @@ ENTRY(crc_t10dif_pmull)
cmp arg3, #256
// for sizes less than 128, we can't fold 64B at a time...
- b.lt _less_than_128
+ b.lt .L_less_than_128_\@
// load the initial crc value
// crc value does not need to be byte-reflected, but it needs
@@ -137,6 +318,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4
// type of pmull instruction
// will determine which constant to use
+ __pmull_pre_\p v10
//
// we subtract 256 instead of 128 to save one instruction from the loop
@@ -147,41 +329,19 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
// buffer. The _fold_64_B_loop will fold 64B at a time
// until we have 64+y Bytes of buffer
-
// fold 64B at a time. This section of the code folds 4 vector
// registers in parallel
-_fold_64_B_loop:
+.L_fold_64_B_loop_\@:
- .macro fold64, reg1, reg2
- ldp q11, q12, [arg2], #0x20
-
- pmull2 v8.1q, \reg1\().2d, v10.2d
- pmull \reg1\().1q, \reg1\().1d, v10.1d
-
-CPU_LE( rev64 v11.16b, v11.16b )
-CPU_LE( rev64 v12.16b, v12.16b )
-
- pmull2 v9.1q, \reg2\().2d, v10.2d
- pmull \reg2\().1q, \reg2\().1d, v10.1d
-
-CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
-CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
-
- eor \reg1\().16b, \reg1\().16b, v8.16b
- eor \reg2\().16b, \reg2\().16b, v9.16b
- eor \reg1\().16b, \reg1\().16b, v11.16b
- eor \reg2\().16b, \reg2\().16b, v12.16b
- .endm
-
- fold64 v0, v1
- fold64 v2, v3
- fold64 v4, v5
- fold64 v6, v7
+ fold64 \p, v0, v1
+ fold64 \p, v2, v3
+ fold64 \p, v4, v5
+ fold64 \p, v6, v7
subs arg3, arg3, #128
// check if there is another 64B in the buffer to be able to fold
- b.lt _fold_64_B_end
+ b.lt .L_fold_64_B_end_\@
if_will_cond_yield_neon
stp q0, q1, [sp, #.Lframe_local_offset]
@@ -195,11 +355,13 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
ldp q6, q7, [sp, #.Lframe_local_offset + 96]
ldr_l q10, rk3, x8
movi vzr.16b, #0 // init zero register
+ __pmull_init_\p
+ __pmull_pre_\p v10
endif_yield_neon
- b _fold_64_B_loop
+ b .L_fold_64_B_loop_\@
-_fold_64_B_end:
+.L_fold_64_B_end_\@:
// at this point, the buffer pointer is pointing at the last y Bytes
// of the buffer the 64B of folded data is in 4 of the vector
// registers: v0, v1, v2, v3
@@ -208,38 +370,29 @@ _fold_64_B_end:
// constants
ldr_l q10, rk9, x8
+ __pmull_pre_\p v10
- .macro fold16, reg, rk
- pmull v8.1q, \reg\().1d, v10.1d
- pmull2 \reg\().1q, \reg\().2d, v10.2d
- .ifnb \rk
- ldr_l q10, \rk, x8
- .endif
- eor v7.16b, v7.16b, v8.16b
- eor v7.16b, v7.16b, \reg\().16b
- .endm
-
- fold16 v0, rk11
- fold16 v1, rk13
- fold16 v2, rk15
- fold16 v3, rk17
- fold16 v4, rk19
- fold16 v5, rk1
- fold16 v6
+ fold16 \p, v0, rk11
+ fold16 \p, v1, rk13
+ fold16 \p, v2, rk15
+ fold16 \p, v3, rk17
+ fold16 \p, v4, rk19
+ fold16 \p, v5, rk1
+ fold16 \p, v6
// instead of 64, we add 48 to the loop counter to save 1 instruction
// from the loop instead of a cmp instruction, we use the negative
// flag with the jl instruction
adds arg3, arg3, #(128-16)
- b.lt _final_reduction_for_128
+ b.lt .L_final_reduction_for_128_\@
// now we have 16+y bytes left to reduce. 16 Bytes is in register v7
// and the rest is in memory. We can fold 16 bytes at a time if y>=16
// continue folding 16B at a time
-_16B_reduction_loop:
- pmull v8.1q, v7.1d, v10.1d
- pmull2 v7.1q, v7.2d, v10.2d
+.L_16B_reduction_loop_\@:
+ __pmull_\p v8, v7, v10
+ __pmull_\p v7, v7, v10, 2
eor v7.16b, v7.16b, v8.16b
ldr q0, [arg2], #16
@@ -251,22 +404,22 @@ CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
// instead of a cmp instruction, we utilize the flags with the
// jge instruction equivalent of: cmp arg3, 16-16
// check if there is any more 16B in the buffer to be able to fold
- b.ge _16B_reduction_loop
+ b.ge .L_16B_reduction_loop_\@
// now we have 16+z bytes left to reduce, where 0<= z < 16.
// first, we reduce the data in the xmm7 register
-_final_reduction_for_128:
+.L_final_reduction_for_128_\@:
// check if any more data to fold. If not, compute the CRC of
// the final 128 bits
adds arg3, arg3, #16
- b.eq _128_done
+ b.eq .L_128_done_\@
// here we are getting data that is less than 16 bytes.
// since we know that there was data before the pointer, we can
// offset the input pointer before the actual point, to receive
// exactly 16 bytes. after that the registers need to be adjusted.
-_get_last_two_regs:
+.L_get_last_two_regs_\@:
add arg2, arg2, arg3
ldr q1, [arg2, #-16]
CPU_LE( rev64 v1.16b, v1.16b )
@@ -291,47 +444,48 @@ CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
bsl v0.16b, v2.16b, v1.16b
// fold 16 Bytes
- pmull v8.1q, v7.1d, v10.1d
- pmull2 v7.1q, v7.2d, v10.2d
+ __pmull_\p v8, v7, v10
+ __pmull_\p v7, v7, v10, 2
eor v7.16b, v7.16b, v8.16b
eor v7.16b, v7.16b, v0.16b
-_128_done:
+.L_128_done_\@:
// compute crc of a 128-bit value
ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10
+ __pmull_pre_\p v10
// 64b fold
ext v0.16b, vzr.16b, v7.16b, #8
mov v7.d[0], v7.d[1]
- pmull v7.1q, v7.1d, v10.1d
+ __pmull_\p v7, v7, v10
eor v7.16b, v7.16b, v0.16b
// 32b fold
ext v0.16b, v7.16b, vzr.16b, #4
mov v7.s[3], vzr.s[0]
- pmull2 v0.1q, v0.2d, v10.2d
+ __pmull_\p v0, v0, v10, 2
eor v7.16b, v7.16b, v0.16b
// barrett reduction
-_barrett:
ldr_l q10, rk7, x8
+ __pmull_pre_\p v10
mov v0.d[0], v7.d[1]
- pmull v0.1q, v0.1d, v10.1d
+ __pmull_\p v0, v0, v10
ext v0.16b, vzr.16b, v0.16b, #12
- pmull2 v0.1q, v0.2d, v10.2d
+ __pmull_\p v0, v0, v10, 2
ext v0.16b, vzr.16b, v0.16b, #12
eor v7.16b, v7.16b, v0.16b
mov w0, v7.s[1]
-_cleanup:
+.L_cleanup_\@:
// scale the result back to 16 bits
lsr x0, x0, #16
frame_pop
ret
-_less_than_128:
- cbz arg3, _cleanup
+.L_less_than_128_\@:
+ cbz arg3, .L_cleanup_\@
movi v0.16b, #0
mov v0.s[3], arg1_low32 // get the initial crc value
@@ -342,20 +496,21 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
eor v7.16b, v7.16b, v0.16b // xor the initial crc value
cmp arg3, #16
- b.eq _128_done // exactly 16 left
- b.lt _less_than_16_left
+ b.eq .L_128_done_\@ // exactly 16 left
+ b.lt .L_less_than_16_left_\@
ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10
+ __pmull_pre_\p v10
// update the counter. subtract 32 instead of 16 to save one
// instruction from the loop
subs arg3, arg3, #32
- b.ge _16B_reduction_loop
+ b.ge .L_16B_reduction_loop_\@
add arg3, arg3, #16
- b _get_last_two_regs
+ b .L_get_last_two_regs_\@
-_less_than_16_left:
+.L_less_than_16_left_\@:
// shl r9, 4
adr_l x0, tbl_shf_table + 16
sub x0, x0, arg3
@@ -363,8 +518,17 @@ _less_than_16_left:
movi v9.16b, #0x80
eor v0.16b, v0.16b, v9.16b
tbl v7.16b, {v7.16b}, v0.16b
- b _128_done
-ENDPROC(crc_t10dif_pmull)
+ b .L_128_done_\@
+ .endm
+
+ENTRY(crc_t10dif_pmull_p8)
+ crc_t10dif_pmull p8
+ENDPROC(crc_t10dif_pmull_p8)
+
+ .align 5
+ENTRY(crc_t10dif_pmull_p64)
+ crc_t10dif_pmull p64
+ENDPROC(crc_t10dif_pmull_p64)
// precomputed constants
// these constants are precomputed from the poly:
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
index 96f0cae4a022..b461d62023f2 100644
--- a/arch/arm64/crypto/crct10dif-ce-glue.c
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
@@ -22,7 +22,10 @@
#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
-asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u64 len);
+asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 buf[], u64 len);
+asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 buf[], u64 len);
+
+static u16 (*crc_t10dif_pmull)(u16 init_crc, const u8 buf[], u64 len);
static int crct10dif_init(struct shash_desc *desc)
{
@@ -85,6 +88,11 @@ static struct shash_alg crc_t10dif_alg = {
static int __init crc_t10dif_mod_init(void)
{
+ if (elf_hwcap & HWCAP_PMULL)
+ crc_t10dif_pmull = crc_t10dif_pmull_p64;
+ else
+ crc_t10dif_pmull = crc_t10dif_pmull_p8;
+
return crypto_register_shash(&crc_t10dif_alg);
}
@@ -93,8 +101,10 @@ static void __exit crc_t10dif_mod_exit(void)
crypto_unregister_shash(&crc_t10dif_alg);
}
-module_cpu_feature_match(PMULL, crc_t10dif_mod_init);
+module_cpu_feature_match(ASIMD, crc_t10dif_mod_init);
module_exit(crc_t10dif_mod_exit);
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("crct10dif");
+MODULE_ALIAS_CRYPTO("crct10dif-arm64-ce");
diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S
deleted file mode 100644
index b14463438b09..000000000000
--- a/arch/arm64/crypto/speck-neon-core.S
+++ /dev/null
@@ -1,352 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Author: Eric Biggers <ebiggers@google.com>
- */
-
-#include <linux/linkage.h>
-
- .text
-
- // arguments
- ROUND_KEYS .req x0 // const {u64,u32} *round_keys
- NROUNDS .req w1 // int nrounds
- NROUNDS_X .req x1
- DST .req x2 // void *dst
- SRC .req x3 // const void *src
- NBYTES .req w4 // unsigned int nbytes
- TWEAK .req x5 // void *tweak
-
- // registers which hold the data being encrypted/decrypted
- // (underscores avoid a naming collision with ARM64 registers x0-x3)
- X_0 .req v0
- Y_0 .req v1
- X_1 .req v2
- Y_1 .req v3
- X_2 .req v4
- Y_2 .req v5
- X_3 .req v6
- Y_3 .req v7
-
- // the round key, duplicated in all lanes
- ROUND_KEY .req v8
-
- // index vector for tbl-based 8-bit rotates
- ROTATE_TABLE .req v9
- ROTATE_TABLE_Q .req q9
-
- // temporary registers
- TMP0 .req v10
- TMP1 .req v11
- TMP2 .req v12
- TMP3 .req v13
-
- // multiplication table for updating XTS tweaks
- GFMUL_TABLE .req v14
- GFMUL_TABLE_Q .req q14
-
- // next XTS tweak value(s)
- TWEAKV_NEXT .req v15
-
- // XTS tweaks for the blocks currently being encrypted/decrypted
- TWEAKV0 .req v16
- TWEAKV1 .req v17
- TWEAKV2 .req v18
- TWEAKV3 .req v19
- TWEAKV4 .req v20
- TWEAKV5 .req v21
- TWEAKV6 .req v22
- TWEAKV7 .req v23
-
- .align 4
-.Lror64_8_table:
- .octa 0x080f0e0d0c0b0a090007060504030201
-.Lror32_8_table:
- .octa 0x0c0f0e0d080b0a090407060500030201
-.Lrol64_8_table:
- .octa 0x0e0d0c0b0a09080f0605040302010007
-.Lrol32_8_table:
- .octa 0x0e0d0c0f0a09080b0605040702010003
-.Lgf128mul_table:
- .octa 0x00000000000000870000000000000001
-.Lgf64mul_table:
- .octa 0x0000000000000000000000002d361b00
-
-/*
- * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
- *
- * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
- * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
- * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
- * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64.
- */
-.macro _speck_round_128bytes n, lanes
-
- // x = ror(x, 8)
- tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
- tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
- tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
- tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
-
- // x += y
- add X_0.\lanes, X_0.\lanes, Y_0.\lanes
- add X_1.\lanes, X_1.\lanes, Y_1.\lanes
- add X_2.\lanes, X_2.\lanes, Y_2.\lanes
- add X_3.\lanes, X_3.\lanes, Y_3.\lanes
-
- // x ^= k
- eor X_0.16b, X_0.16b, ROUND_KEY.16b
- eor X_1.16b, X_1.16b, ROUND_KEY.16b
- eor X_2.16b, X_2.16b, ROUND_KEY.16b
- eor X_3.16b, X_3.16b, ROUND_KEY.16b
-
- // y = rol(y, 3)
- shl TMP0.\lanes, Y_0.\lanes, #3
- shl TMP1.\lanes, Y_1.\lanes, #3
- shl TMP2.\lanes, Y_2.\lanes, #3
- shl TMP3.\lanes, Y_3.\lanes, #3
- sri TMP0.\lanes, Y_0.\lanes, #(\n - 3)
- sri TMP1.\lanes, Y_1.\lanes, #(\n - 3)
- sri TMP2.\lanes, Y_2.\lanes, #(\n - 3)
- sri TMP3.\lanes, Y_3.\lanes, #(\n - 3)
-
- // y ^= x
- eor Y_0.16b, TMP0.16b, X_0.16b
- eor Y_1.16b, TMP1.16b, X_1.16b
- eor Y_2.16b, TMP2.16b, X_2.16b
- eor Y_3.16b, TMP3.16b, X_3.16b
-.endm
-
-/*
- * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
- *
- * This is the inverse of _speck_round_128bytes().
- */
-.macro _speck_unround_128bytes n, lanes
-
- // y ^= x
- eor TMP0.16b, Y_0.16b, X_0.16b
- eor TMP1.16b, Y_1.16b, X_1.16b
- eor TMP2.16b, Y_2.16b, X_2.16b
- eor TMP3.16b, Y_3.16b, X_3.16b
-
- // y = ror(y, 3)
- ushr Y_0.\lanes, TMP0.\lanes, #3
- ushr Y_1.\lanes, TMP1.\lanes, #3
- ushr Y_2.\lanes, TMP2.\lanes, #3
- ushr Y_3.\lanes, TMP3.\lanes, #3
- sli Y_0.\lanes, TMP0.\lanes, #(\n - 3)
- sli Y_1.\lanes, TMP1.\lanes, #(\n - 3)
- sli Y_2.\lanes, TMP2.\lanes, #(\n - 3)
- sli Y_3.\lanes, TMP3.\lanes, #(\n - 3)
-
- // x ^= k
- eor X_0.16b, X_0.16b, ROUND_KEY.16b
- eor X_1.16b, X_1.16b, ROUND_KEY.16b
- eor X_2.16b, X_2.16b, ROUND_KEY.16b
- eor X_3.16b, X_3.16b, ROUND_KEY.16b
-
- // x -= y
- sub X_0.\lanes, X_0.\lanes, Y_0.\lanes
- sub X_1.\lanes, X_1.\lanes, Y_1.\lanes
- sub X_2.\lanes, X_2.\lanes, Y_2.\lanes
- sub X_3.\lanes, X_3.\lanes, Y_3.\lanes
-
- // x = rol(x, 8)
- tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
- tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
- tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
- tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
-.endm
-
-.macro _next_xts_tweak next, cur, tmp, n
-.if \n == 64
- /*
- * Calculate the next tweak by multiplying the current one by x,
- * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
- */
- sshr \tmp\().2d, \cur\().2d, #63
- and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b
- shl \next\().2d, \cur\().2d, #1
- ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
- eor \next\().16b, \next\().16b, \tmp\().16b
-.else
- /*
- * Calculate the next two tweaks by multiplying the current ones by x^2,
- * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
- */
- ushr \tmp\().2d, \cur\().2d, #62
- shl \next\().2d, \cur\().2d, #2
- tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b
- eor \next\().16b, \next\().16b, \tmp\().16b
-.endif
-.endm
-
-/*
- * _speck_xts_crypt() - Speck-XTS encryption/decryption
- *
- * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
- * using Speck-XTS, specifically the variant with a block size of '2n' and round
- * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
- * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
- * nonzero multiple of 128.
- */
-.macro _speck_xts_crypt n, lanes, decrypting
-
- /*
- * If decrypting, modify the ROUND_KEYS parameter to point to the last
- * round key rather than the first, since for decryption the round keys
- * are used in reverse order.
- */
-.if \decrypting
- mov NROUNDS, NROUNDS /* zero the high 32 bits */
-.if \n == 64
- add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3
- sub ROUND_KEYS, ROUND_KEYS, #8
-.else
- add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2
- sub ROUND_KEYS, ROUND_KEYS, #4
-.endif
-.endif
-
- // Load the index vector for tbl-based 8-bit rotates
-.if \decrypting
- ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table
-.else
- ldr ROTATE_TABLE_Q, .Lror\n\()_8_table
-.endif
-
- // One-time XTS preparation
-.if \n == 64
- // Load first tweak
- ld1 {TWEAKV0.16b}, [TWEAK]
-
- // Load GF(2^128) multiplication table
- ldr GFMUL_TABLE_Q, .Lgf128mul_table
-.else
- // Load first tweak
- ld1 {TWEAKV0.8b}, [TWEAK]
-
- // Load GF(2^64) multiplication table
- ldr GFMUL_TABLE_Q, .Lgf64mul_table
-
- // Calculate second tweak, packing it together with the first
- ushr TMP0.2d, TWEAKV0.2d, #63
- shl TMP1.2d, TWEAKV0.2d, #1
- tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b
- eor TMP0.8b, TMP0.8b, TMP1.8b
- mov TWEAKV0.d[1], TMP0.d[0]
-.endif
-
-.Lnext_128bytes_\@:
-
- // Calculate XTS tweaks for next 128 bytes
- _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n
- _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n
- _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n
- _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n
- _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n
- _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n
- _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n
- _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n
-
- // Load the next source blocks into {X,Y}[0-3]
- ld1 {X_0.16b-Y_1.16b}, [SRC], #64
- ld1 {X_2.16b-Y_3.16b}, [SRC], #64
-
- // XOR the source blocks with their XTS tweaks
- eor TMP0.16b, X_0.16b, TWEAKV0.16b
- eor Y_0.16b, Y_0.16b, TWEAKV1.16b
- eor TMP1.16b, X_1.16b, TWEAKV2.16b
- eor Y_1.16b, Y_1.16b, TWEAKV3.16b
- eor TMP2.16b, X_2.16b, TWEAKV4.16b
- eor Y_2.16b, Y_2.16b, TWEAKV5.16b
- eor TMP3.16b, X_3.16b, TWEAKV6.16b
- eor Y_3.16b, Y_3.16b, TWEAKV7.16b
-
- /*
- * De-interleave the 'x' and 'y' elements of each block, i.e. make it so
- * that the X[0-3] registers contain only the second halves of blocks,
- * and the Y[0-3] registers contain only the first halves of blocks.
- * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
- */
- uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes
- uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes
- uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes
- uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes
- uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes
- uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes
- uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes
- uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes
-
- // Do the cipher rounds
- mov x6, ROUND_KEYS
- mov w7, NROUNDS
-.Lnext_round_\@:
-.if \decrypting
- ld1r {ROUND_KEY.\lanes}, [x6]
- sub x6, x6, #( \n / 8 )
- _speck_unround_128bytes \n, \lanes
-.else
- ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
- _speck_round_128bytes \n, \lanes
-.endif
- subs w7, w7, #1
- bne .Lnext_round_\@
-
- // Re-interleave the 'x' and 'y' elements of each block
- zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes
- zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes
- zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes
- zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes
- zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes
- zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes
- zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes
- zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes
-
- // XOR the encrypted/decrypted blocks with the tweaks calculated earlier
- eor X_0.16b, TMP0.16b, TWEAKV0.16b
- eor Y_0.16b, Y_0.16b, TWEAKV1.16b
- eor X_1.16b, TMP1.16b, TWEAKV2.16b
- eor Y_1.16b, Y_1.16b, TWEAKV3.16b
- eor X_2.16b, TMP2.16b, TWEAKV4.16b
- eor Y_2.16b, Y_2.16b, TWEAKV5.16b
- eor X_3.16b, TMP3.16b, TWEAKV6.16b
- eor Y_3.16b, Y_3.16b, TWEAKV7.16b
- mov TWEAKV0.16b, TWEAKV_NEXT.16b
-
- // Store the ciphertext in the destination buffer
- st1 {X_0.16b-Y_1.16b}, [DST], #64
- st1 {X_2.16b-Y_3.16b}, [DST], #64
-
- // Continue if there are more 128-byte chunks remaining
- subs NBYTES, NBYTES, #128
- bne .Lnext_128bytes_\@
-
- // Store the next tweak and return
-.if \n == 64
- st1 {TWEAKV_NEXT.16b}, [TWEAK]
-.else
- st1 {TWEAKV_NEXT.8b}, [TWEAK]
-.endif
- ret
-.endm
-
-ENTRY(speck128_xts_encrypt_neon)
- _speck_xts_crypt n=64, lanes=2d, decrypting=0
-ENDPROC(speck128_xts_encrypt_neon)
-
-ENTRY(speck128_xts_decrypt_neon)
- _speck_xts_crypt n=64, lanes=2d, decrypting=1
-ENDPROC(speck128_xts_decrypt_neon)
-
-ENTRY(speck64_xts_encrypt_neon)
- _speck_xts_crypt n=32, lanes=4s, decrypting=0
-ENDPROC(speck64_xts_encrypt_neon)
-
-ENTRY(speck64_xts_decrypt_neon)
- _speck_xts_crypt n=32, lanes=4s, decrypting=1
-ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c
deleted file mode 100644
index 6e233aeb4ff4..000000000000
--- a/arch/arm64/crypto/speck-neon-glue.c
+++ /dev/null
@@ -1,282 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- * (64-bit version; based on the 32-bit version)
- *
- * Copyright (c) 2018 Google, Inc
- */
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <crypto/algapi.h>
-#include <crypto/gf128mul.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/speck.h>
-#include <crypto/xts.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-/* The assembly functions only handle multiples of 128 bytes */
-#define SPECK_NEON_CHUNK_SIZE 128
-
-/* Speck128 */
-
-struct speck128_xts_tfm_ctx {
- struct speck128_tfm_ctx main_key;
- struct speck128_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck128_xts_crypt(struct skcipher_request *req,
- speck128_crypt_one_t crypt_one,
- speck128_xts_crypt_many_t crypt_many)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- le128 tweak;
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
-
- crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- le128_xor((le128 *)dst, (const le128 *)src, &tweak);
- (*crypt_one)(&ctx->main_key, dst, dst);
- le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
- gf128mul_x_ble(&tweak, &tweak);
-
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static int speck128_xts_encrypt(struct skcipher_request *req)
-{
- return __speck128_xts_crypt(req, crypto_speck128_encrypt,
- speck128_xts_encrypt_neon);
-}
-
-static int speck128_xts_decrypt(struct skcipher_request *req)
-{
- return __speck128_xts_crypt(req, crypto_speck128_decrypt,
- speck128_xts_decrypt_neon);
-}
-
-static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- keylen /= 2;
-
- err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-/* Speck64 */
-
-struct speck64_xts_tfm_ctx {
- struct speck64_tfm_ctx main_key;
- struct speck64_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
- speck64_xts_crypt_many_t crypt_many)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- __le64 tweak;
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
-
- crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- *(__le64 *)dst = *(__le64 *)src ^ tweak;
- (*crypt_one)(&ctx->main_key, dst, dst);
- *(__le64 *)dst ^= tweak;
- tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
- ((tweak & cpu_to_le64(1ULL << 63)) ?
- 0x1B : 0));
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static int speck64_xts_encrypt(struct skcipher_request *req)
-{
- return __speck64_xts_crypt(req, crypto_speck64_encrypt,
- speck64_xts_encrypt_neon);
-}
-
-static int speck64_xts_decrypt(struct skcipher_request *req)
-{
- return __speck64_xts_crypt(req, crypto_speck64_decrypt,
- speck64_xts_decrypt_neon);
-}
-
-static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- keylen /= 2;
-
- err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-static struct skcipher_alg speck_algs[] = {
- {
- .base.cra_name = "xts(speck128)",
- .base.cra_driver_name = "xts-speck128-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = SPECK128_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
- .base.cra_alignmask = 7,
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * SPECK128_128_KEY_SIZE,
- .max_keysize = 2 * SPECK128_256_KEY_SIZE,
- .ivsize = SPECK128_BLOCK_SIZE,
- .walksize = SPECK_NEON_CHUNK_SIZE,
- .setkey = speck128_xts_setkey,
- .encrypt = speck128_xts_encrypt,
- .decrypt = speck128_xts_decrypt,
- }, {
- .base.cra_name = "xts(speck64)",
- .base.cra_driver_name = "xts-speck64-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = SPECK64_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
- .base.cra_alignmask = 7,
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * SPECK64_96_KEY_SIZE,
- .max_keysize = 2 * SPECK64_128_KEY_SIZE,
- .ivsize = SPECK64_BLOCK_SIZE,
- .walksize = SPECK_NEON_CHUNK_SIZE,
- .setkey = speck64_xts_setkey,
- .encrypt = speck64_xts_encrypt,
- .decrypt = speck64_xts_decrypt,
- }
-};
-
-static int __init speck_neon_module_init(void)
-{
- if (!(elf_hwcap & HWCAP_ASIMD))
- return -ENODEV;
- return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-static void __exit speck_neon_module_exit(void)
-{
- crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-module_init(speck_neon_module_init);
-module_exit(speck_neon_module_exit);
-
-MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
-MODULE_ALIAS_CRYPTO("xts(speck128)");
-MODULE_ALIAS_CRYPTO("xts-speck128-neon");
-MODULE_ALIAS_CRYPTO("xts(speck64)");
-MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 0bcc98dbba56..6142402c2eb4 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -286,12 +286,11 @@ alternative_endif
ldr \rd, [\rn, #MM_CONTEXT_ID]
.endm
/*
- * read_ctr - read CTR_EL0. If the system has mismatched
- * cache line sizes, provide the system wide safe value
- * from arm64_ftr_reg_ctrel0.sys_val
+ * read_ctr - read CTR_EL0. If the system has mismatched register fields,
+ * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
*/
.macro read_ctr, reg
-alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
+alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
mrs \reg, ctr_el0 // read CTR
nop
alternative_else
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 5ee5bca8c24b..13dd42c3ad4e 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -40,6 +40,15 @@
#define L1_CACHE_SHIFT (6)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define CLIDR_LOUU_SHIFT 27
+#define CLIDR_LOC_SHIFT 24
+#define CLIDR_LOUIS_SHIFT 21
+
+#define CLIDR_LOUU(clidr) (((clidr) >> CLIDR_LOUU_SHIFT) & 0x7)
+#define CLIDR_LOC(clidr) (((clidr) >> CLIDR_LOC_SHIFT) & 0x7)
+#define CLIDR_LOUIS(clidr) (((clidr) >> CLIDR_LOUIS_SHIFT) & 0x7)
+
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
* sure that all such allocations are cache aligned. Otherwise,
@@ -84,6 +93,37 @@ static inline int cache_line_size(void)
return cwg ? 4 << cwg : ARCH_DMA_MINALIGN;
}
+/*
+ * Read the effective value of CTR_EL0.
+ *
+ * According to ARM ARM for ARMv8-A (ARM DDI 0487C.a),
+ * section D10.2.33 "CTR_EL0, Cache Type Register" :
+ *
+ * CTR_EL0.IDC reports the data cache clean requirements for
+ * instruction to data coherence.
+ *
+ * 0 - dcache clean to PoU is required unless :
+ * (CLIDR_EL1.LoC == 0) || (CLIDR_EL1.LoUIS == 0 && CLIDR_EL1.LoUU == 0)
+ * 1 - dcache clean to PoU is not required for i-to-d coherence.
+ *
+ * This routine provides the CTR_EL0 with the IDC field updated to the
+ * effective state.
+ */
+static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void)
+{
+ u32 ctr = read_cpuid_cachetype();
+
+ if (!(ctr & BIT(CTR_IDC_SHIFT))) {
+ u64 clidr = read_sysreg(clidr_el1);
+
+ if (CLIDR_LOC(clidr) == 0 ||
+ (CLIDR_LOUIS(clidr) == 0 && CLIDR_LOUU(clidr) == 0))
+ ctr |= BIT(CTR_IDC_SHIFT);
+ }
+
+ return ctr;
+}
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 1a037b94eba1..93ce86d5dae1 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -25,6 +25,8 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
+#include <asm-generic/compat.h>
+
#define COMPAT_USER_HZ 100
#ifdef __AARCH64EB__
#define COMPAT_UTS_MACHINE "armv8b\0\0"
@@ -32,10 +34,6 @@
#define COMPAT_UTS_MACHINE "armv8l\0\0"
#endif
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u16 __compat_uid16_t;
@@ -43,27 +41,13 @@ typedef u16 __compat_gid16_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u32 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef s32 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s32 compat_key_t;
-typedef s32 compat_timer_t;
-
-typedef s16 compat_short_t;
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 compat_s64;
-typedef u16 compat_ushort_t;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
typedef u64 compat_u64;
-typedef u32 compat_uptr_t;
struct compat_stat {
#ifdef __AARCH64EB__
@@ -86,11 +70,11 @@ struct compat_stat {
compat_off_t st_size;
compat_off_t st_blksize;
compat_off_t st_blocks;
- compat_time_t st_atime;
+ old_time32_t st_atime;
compat_ulong_t st_atime_nsec;
- compat_time_t st_mtime;
+ old_time32_t st_mtime;
compat_ulong_t st_mtime_nsec;
- compat_time_t st_ctime;
+ old_time32_t st_ctime;
compat_ulong_t st_ctime_nsec;
compat_ulong_t __unused4[2];
};
@@ -159,6 +143,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
}
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
+#define COMPAT_MINSIGSTKSZ 2048
static inline void __user *arch_compat_alloc_user_space(long len)
{
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
deleted file mode 100644
index ee35fd0f2236..000000000000
--- a/arch/arm64/include/asm/compiler.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Based on arch/arm/include/asm/compiler.h
- *
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_COMPILER_H
-#define __ASM_COMPILER_H
-
-/*
- * This is used to ensure the compiler did actually allocate the register we
- * asked it for some inline assembly sequences. Apparently we can't trust the
- * compiler from one version to another so a bit of paranoia won't hurt. This
- * string is meant to be concatenated with the inline asm string and will
- * cause compilation to stop on mismatch. (for details, see gcc PR 15089)
- */
-#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
-
-#endif /* __ASM_COMPILER_H */
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index ae1f70450fb2..6e2d254c09eb 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -33,7 +33,7 @@
#define ARM64_WORKAROUND_CAVIUM_27456 12
#define ARM64_HAS_32BIT_EL0 13
#define ARM64_HARDEN_EL2_VECTORS 14
-#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
+#define ARM64_HAS_CNP 15
#define ARM64_HAS_NO_FPSIMD 16
#define ARM64_WORKAROUND_REPEAT_TLBI 17
#define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18
@@ -51,7 +51,10 @@
#define ARM64_SSBD 30
#define ARM64_MISMATCHED_CACHE_TYPE 31
#define ARM64_HAS_STAGE2_FWB 32
+#define ARM64_HAS_CRC32 33
+#define ARM64_SSBS 34
+#define ARM64_WORKAROUND_1188873 35
-#define ARM64_NCAPS 33
+#define ARM64_NCAPS 36
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 1717ba1db35d..7e2ec64aa414 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -262,7 +262,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
/*
* CPU feature detected at boot time based on system-wide value of a
* feature. It is safe for a late CPU to have this feature even though
- * the system hasn't enabled it, although the featuer will not be used
+ * the system hasn't enabled it, although the feature will not be used
* by Linux in this case. If the system has enabled this feature already,
* then every late CPU must have it.
*/
@@ -508,6 +508,12 @@ static inline bool system_supports_sve(void)
cpus_have_const_cap(ARM64_SVE);
}
+static inline bool system_supports_cnp(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_CNP) &&
+ cpus_have_const_cap(ARM64_HAS_CNP);
+}
+
#define ARM64_SSBD_UNKNOWN -1
#define ARM64_SSBD_FORCE_DISABLE 0
#define ARM64_SSBD_KERNEL 1
@@ -530,6 +536,28 @@ void arm64_set_ssbd_mitigation(bool state);
static inline void arm64_set_ssbd_mitigation(bool state) {}
#endif
+extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
+
+static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
+{
+ switch (parange) {
+ case 0: return 32;
+ case 1: return 36;
+ case 2: return 40;
+ case 3: return 42;
+ case 4: return 44;
+ case 5: return 48;
+ case 6: return 52;
+ /*
+ * A future PE could use a value unknown to the kernel.
+ * However, by the "D10.1.4 Principles of the ID scheme
+ * for fields in ID registers", ARM DDI 0487C.a, any new
+ * value is guaranteed to be higher than what we know already.
+ * As a safe limit, we return the limit supported by the kernel.
+ */
+ default: return CONFIG_ARM64_PA_BITS;
+ }
+}
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index ea690b3562af..12f93e4d2452 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -86,6 +86,7 @@
#define ARM_CPU_PART_CORTEX_A75 0xD0A
#define ARM_CPU_PART_CORTEX_A35 0xD04
#define ARM_CPU_PART_CORTEX_A55 0xD05
+#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define APM_CPU_PART_POTENZA 0x000
@@ -110,6 +111,7 @@
#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
+#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 22e4c83de5a5..8d91f2233135 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -36,11 +36,8 @@ static inline unsigned long local_daif_save(void)
{
unsigned long flags;
- asm volatile(
- "mrs %0, daif // local_daif_save\n"
- : "=r" (flags)
- :
- : "memory");
+ flags = arch_local_save_flags();
+
local_daif_mask();
return flags;
@@ -60,11 +57,9 @@ static inline void local_daif_restore(unsigned long flags)
{
if (!arch_irqs_disabled_flags(flags))
trace_hardirqs_on();
- asm volatile(
- "msr daif, %0 // local_daif_restore"
- :
- : "r" (flags)
- : "memory");
+
+ arch_local_irq_restore(flags);
+
if (arch_irqs_disabled_flags(flags))
trace_hardirqs_off();
}
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index ce70c3ffb993..676de2ec1762 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -137,6 +137,8 @@
#define ESR_ELx_CV (UL(1) << 24)
#define ESR_ELx_COND_SHIFT (20)
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
+#define ESR_ELx_WFx_ISS_TI (UL(1) << 0)
+#define ESR_ELx_WFx_ISS_WFI (UL(0) << 0)
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
@@ -148,6 +150,9 @@
#define DISR_EL1_ESR_MASK (ESR_ELx_AET | ESR_ELx_EA | ESR_ELx_FSC)
/* ESR value templates for specific events */
+#define ESR_ELx_WFx_MASK (ESR_ELx_EC_MASK | ESR_ELx_WFx_ISS_TI)
+#define ESR_ELx_WFx_WFI_VAL ((ESR_ELx_EC_WFx << ESR_ELx_EC_SHIFT) | \
+ ESR_ELx_WFx_ISS_WFI)
/* BRK instruction trap from AArch64 state */
#define ESR_ELx_VAL_BRK64(imm) \
@@ -187,6 +192,8 @@
#define ESR_ELx_SYS64_ISS_SYS_OP_MASK (ESR_ELx_SYS64_ISS_SYS_MASK | \
ESR_ELx_SYS64_ISS_DIR_MASK)
+#define ESR_ELx_SYS64_ISS_RT(esr) \
+ (((esr) & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT)
/*
* User space cache operations have the following sysreg encoding
* in System instructions.
@@ -206,6 +213,18 @@
#define ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL \
(ESR_ELx_SYS64_ISS_SYS_VAL(1, 3, 1, 7, 0) | \
ESR_ELx_SYS64_ISS_DIR_WRITE)
+/*
+ * User space MRS operations which are supported for emulation
+ * have the following sysreg encoding in System instructions.
+ * op0 = 3, op1= 0, crn = 0, {crm = 0, 4-7}, READ (L = 1)
+ */
+#define ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \
+ ESR_ELx_SYS64_ISS_OP1_MASK | \
+ ESR_ELx_SYS64_ISS_CRN_MASK | \
+ ESR_ELx_SYS64_ISS_DIR_MASK)
+#define ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL \
+ (ESR_ELx_SYS64_ISS_SYS_VAL(3, 0, 0, 0, 0) | \
+ ESR_ELx_SYS64_ISS_DIR_READ)
#define ESR_ELx_SYS64_ISS_SYS_CTR ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 1, 0, 0)
#define ESR_ELx_SYS64_ISS_SYS_CTR_READ (ESR_ELx_SYS64_ISS_SYS_CTR | \
@@ -249,6 +268,64 @@
#define ESR_ELx_FP_EXC_TFV (UL(1) << 23)
+/*
+ * ISS field definitions for CP15 accesses
+ */
+#define ESR_ELx_CP15_32_ISS_DIR_MASK 0x1
+#define ESR_ELx_CP15_32_ISS_DIR_READ 0x1
+#define ESR_ELx_CP15_32_ISS_DIR_WRITE 0x0
+
+#define ESR_ELx_CP15_32_ISS_RT_SHIFT 5
+#define ESR_ELx_CP15_32_ISS_RT_MASK (UL(0x1f) << ESR_ELx_CP15_32_ISS_RT_SHIFT)
+#define ESR_ELx_CP15_32_ISS_CRM_SHIFT 1
+#define ESR_ELx_CP15_32_ISS_CRM_MASK (UL(0xf) << ESR_ELx_CP15_32_ISS_CRM_SHIFT)
+#define ESR_ELx_CP15_32_ISS_CRN_SHIFT 10
+#define ESR_ELx_CP15_32_ISS_CRN_MASK (UL(0xf) << ESR_ELx_CP15_32_ISS_CRN_SHIFT)
+#define ESR_ELx_CP15_32_ISS_OP1_SHIFT 14
+#define ESR_ELx_CP15_32_ISS_OP1_MASK (UL(0x7) << ESR_ELx_CP15_32_ISS_OP1_SHIFT)
+#define ESR_ELx_CP15_32_ISS_OP2_SHIFT 17
+#define ESR_ELx_CP15_32_ISS_OP2_MASK (UL(0x7) << ESR_ELx_CP15_32_ISS_OP2_SHIFT)
+
+#define ESR_ELx_CP15_32_ISS_SYS_MASK (ESR_ELx_CP15_32_ISS_OP1_MASK | \
+ ESR_ELx_CP15_32_ISS_OP2_MASK | \
+ ESR_ELx_CP15_32_ISS_CRN_MASK | \
+ ESR_ELx_CP15_32_ISS_CRM_MASK | \
+ ESR_ELx_CP15_32_ISS_DIR_MASK)
+#define ESR_ELx_CP15_32_ISS_SYS_VAL(op1, op2, crn, crm) \
+ (((op1) << ESR_ELx_CP15_32_ISS_OP1_SHIFT) | \
+ ((op2) << ESR_ELx_CP15_32_ISS_OP2_SHIFT) | \
+ ((crn) << ESR_ELx_CP15_32_ISS_CRN_SHIFT) | \
+ ((crm) << ESR_ELx_CP15_32_ISS_CRM_SHIFT))
+
+#define ESR_ELx_CP15_64_ISS_DIR_MASK 0x1
+#define ESR_ELx_CP15_64_ISS_DIR_READ 0x1
+#define ESR_ELx_CP15_64_ISS_DIR_WRITE 0x0
+
+#define ESR_ELx_CP15_64_ISS_RT_SHIFT 5
+#define ESR_ELx_CP15_64_ISS_RT_MASK (UL(0x1f) << ESR_ELx_CP15_64_ISS_RT_SHIFT)
+
+#define ESR_ELx_CP15_64_ISS_RT2_SHIFT 10
+#define ESR_ELx_CP15_64_ISS_RT2_MASK (UL(0x1f) << ESR_ELx_CP15_64_ISS_RT2_SHIFT)
+
+#define ESR_ELx_CP15_64_ISS_OP1_SHIFT 16
+#define ESR_ELx_CP15_64_ISS_OP1_MASK (UL(0xf) << ESR_ELx_CP15_64_ISS_OP1_SHIFT)
+#define ESR_ELx_CP15_64_ISS_CRM_SHIFT 1
+#define ESR_ELx_CP15_64_ISS_CRM_MASK (UL(0xf) << ESR_ELx_CP15_64_ISS_CRM_SHIFT)
+
+#define ESR_ELx_CP15_64_ISS_SYS_VAL(op1, crm) \
+ (((op1) << ESR_ELx_CP15_64_ISS_OP1_SHIFT) | \
+ ((crm) << ESR_ELx_CP15_64_ISS_CRM_SHIFT))
+
+#define ESR_ELx_CP15_64_ISS_SYS_MASK (ESR_ELx_CP15_64_ISS_OP1_MASK | \
+ ESR_ELx_CP15_64_ISS_CRM_MASK | \
+ ESR_ELx_CP15_64_ISS_DIR_MASK)
+
+#define ESR_ELx_CP15_64_ISS_SYS_CNTVCT (ESR_ELx_CP15_64_ISS_SYS_VAL(1, 14) | \
+ ESR_ELx_CP15_64_ISS_DIR_READ)
+
+#define ESR_ELx_CP15_32_ISS_SYS_CNTFRQ (ESR_ELx_CP15_32_ISS_SYS_VAL(0, 0, 14, 0) |\
+ ESR_ELx_CP15_32_ISS_DIR_READ)
+
#ifndef __ASSEMBLY__
#include <asm/types.h>
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 35b2e50f17fb..9f8b915af3a7 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -31,8 +31,6 @@
#include <asm/alternative.h>
#include <asm/cpufeature.h>
-#include <xen/xen.h>
-
/*
* Generic IO read/write. These perform native-endian accesses.
*/
@@ -205,12 +203,5 @@ extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
extern int devmem_is_allowed(unsigned long pfn);
-struct bio_vec;
-extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2);
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
- (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
-
#endif /* __KERNEL__ */
#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 7e2b3e360086..472023498d71 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -26,13 +26,16 @@
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
-static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
{
- asm_volatile_goto("1: nop\n\t"
- ".pushsection __jump_table, \"aw\"\n\t"
- ".align 3\n\t"
- ".quad 1b, %l[l_yes], %c0\n\t"
- ".popsection\n\t"
+ asm_volatile_goto(
+ "1: nop \n\t"
+ " .pushsection __jump_table, \"aw\" \n\t"
+ " .align 3 \n\t"
+ " .long 1b - ., %l[l_yes] - . \n\t"
+ " .quad %c0 - . \n\t"
+ " .popsection \n\t"
: : "i"(&((char *)key)[branch]) : : l_yes);
return false;
@@ -40,13 +43,16 @@ l_yes:
return true;
}
-static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ bool branch)
{
- asm_volatile_goto("1: b %l[l_yes]\n\t"
- ".pushsection __jump_table, \"aw\"\n\t"
- ".align 3\n\t"
- ".quad 1b, %l[l_yes], %c0\n\t"
- ".popsection\n\t"
+ asm_volatile_goto(
+ "1: b %l[l_yes] \n\t"
+ " .pushsection __jump_table, \"aw\" \n\t"
+ " .align 3 \n\t"
+ " .long 1b - ., %l[l_yes] - . \n\t"
+ " .quad %c0 - . \n\t"
+ " .popsection \n\t"
: : "i"(&((char *)key)[branch]) : : l_yes);
return false;
@@ -54,13 +60,5 @@ l_yes:
return true;
}
-typedef u64 jump_label_t;
-
-struct jump_entry {
- jump_label_t code;
- jump_label_t target;
- jump_label_t key;
-};
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index a780f6714b44..850e2122d53f 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -97,7 +97,7 @@
+ EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \
+ EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
-#define SWAPPER_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR + TEXT_OFFSET, _end))
+#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR + TEXT_OFFSET, _end))
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index aa45df752a16..6f602af5263c 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -107,6 +107,7 @@
#define VTCR_EL2_RES1 (1 << 31)
#define VTCR_EL2_HD (1 << 22)
#define VTCR_EL2_HA (1 << 21)
+#define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT
#define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK
#define VTCR_EL2_TG0_MASK TCR_TG0_MASK
#define VTCR_EL2_TG0_4K TCR_TG0_4K
@@ -120,62 +121,150 @@
#define VTCR_EL2_IRGN0_WBWA TCR_IRGN0_WBWA
#define VTCR_EL2_SL0_SHIFT 6
#define VTCR_EL2_SL0_MASK (3 << VTCR_EL2_SL0_SHIFT)
-#define VTCR_EL2_SL0_LVL1 (1 << VTCR_EL2_SL0_SHIFT)
#define VTCR_EL2_T0SZ_MASK 0x3f
-#define VTCR_EL2_T0SZ_40B 24
#define VTCR_EL2_VS_SHIFT 19
#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT)
#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT)
+#define VTCR_EL2_T0SZ(x) TCR_T0SZ(x)
+
/*
* We configure the Stage-2 page tables to always restrict the IPA space to be
* 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
* not known to exist and will break with this configuration.
*
- * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time
- * (see hyp-init.S).
+ * The VTCR_EL2 is configured per VM and is initialised in kvm_arm_setup_stage2().
*
* Note that when using 4K pages, we concatenate two first level page tables
* together. With 16K pages, we concatenate 16 first level page tables.
*
- * The magic numbers used for VTTBR_X in this patch can be found in Tables
- * D4-23 and D4-25 in ARM DDI 0487A.b.
*/
-#define VTCR_EL2_T0SZ_IPA VTCR_EL2_T0SZ_40B
#define VTCR_EL2_COMMON_BITS (VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1)
-#ifdef CONFIG_ARM64_64K_PAGES
/*
- * Stage2 translation configuration:
- * 64kB pages (TG0 = 1)
- * 2 level page tables (SL = 1)
+ * VTCR_EL2:SL0 indicates the entry level for Stage2 translation.
+ * Interestingly, it depends on the page size.
+ * See D.10.2.121, VTCR_EL2, in ARM DDI 0487C.a
+ *
+ * -----------------------------------------
+ * | Entry level | 4K | 16K/64K |
+ * ------------------------------------------
+ * | Level: 0 | 2 | - |
+ * ------------------------------------------
+ * | Level: 1 | 1 | 2 |
+ * ------------------------------------------
+ * | Level: 2 | 0 | 1 |
+ * ------------------------------------------
+ * | Level: 3 | - | 0 |
+ * ------------------------------------------
+ *
+ * The table roughly translates to :
+ *
+ * SL0(PAGE_SIZE, Entry_level) = TGRAN_SL0_BASE - Entry_Level
+ *
+ * Where TGRAN_SL0_BASE is a magic number depending on the page size:
+ * TGRAN_SL0_BASE(4K) = 2
+ * TGRAN_SL0_BASE(16K) = 3
+ * TGRAN_SL0_BASE(64K) = 3
+ * provided we take care of ruling out the unsupported cases and
+ * Entry_Level = 4 - Number_of_levels.
+ *
*/
-#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SL0_LVL1)
-#define VTTBR_X_TGRAN_MAGIC 38
+#ifdef CONFIG_ARM64_64K_PAGES
+
+#define VTCR_EL2_TGRAN VTCR_EL2_TG0_64K
+#define VTCR_EL2_TGRAN_SL0_BASE 3UL
+
#elif defined(CONFIG_ARM64_16K_PAGES)
-/*
- * Stage2 translation configuration:
- * 16kB pages (TG0 = 2)
- * 2 level page tables (SL = 1)
- */
-#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_16K | VTCR_EL2_SL0_LVL1)
-#define VTTBR_X_TGRAN_MAGIC 42
+
+#define VTCR_EL2_TGRAN VTCR_EL2_TG0_16K
+#define VTCR_EL2_TGRAN_SL0_BASE 3UL
+
#else /* 4K */
-/*
- * Stage2 translation configuration:
- * 4kB pages (TG0 = 0)
- * 3 level page tables (SL = 1)
- */
-#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SL0_LVL1)
-#define VTTBR_X_TGRAN_MAGIC 37
+
+#define VTCR_EL2_TGRAN VTCR_EL2_TG0_4K
+#define VTCR_EL2_TGRAN_SL0_BASE 2UL
+
#endif
-#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
-#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
+#define VTCR_EL2_LVLS_TO_SL0(levels) \
+ ((VTCR_EL2_TGRAN_SL0_BASE - (4 - (levels))) << VTCR_EL2_SL0_SHIFT)
+#define VTCR_EL2_SL0_TO_LVLS(sl0) \
+ ((sl0) + 4 - VTCR_EL2_TGRAN_SL0_BASE)
+#define VTCR_EL2_LVLS(vtcr) \
+ VTCR_EL2_SL0_TO_LVLS(((vtcr) & VTCR_EL2_SL0_MASK) >> VTCR_EL2_SL0_SHIFT)
-#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
+#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN)
+#define VTCR_EL2_IPA(vtcr) (64 - ((vtcr) & VTCR_EL2_T0SZ_MASK))
+
+/*
+ * ARM VMSAv8-64 defines an algorithm for finding the translation table
+ * descriptors in section D4.2.8 in ARM DDI 0487C.a.
+ *
+ * The algorithm defines the expectations on the translation table
+ * addresses for each level, based on PAGE_SIZE, entry level
+ * and the translation table size (T0SZ). The variable "x" in the
+ * algorithm determines the alignment of a table base address at a given
+ * level and thus determines the alignment of VTTBR:BADDR for stage2
+ * page table entry level.
+ * Since the number of bits resolved at the entry level could vary
+ * depending on the T0SZ, the value of "x" is defined based on a
+ * Magic constant for a given PAGE_SIZE and Entry Level. The
+ * intermediate levels must be always aligned to the PAGE_SIZE (i.e,
+ * x = PAGE_SHIFT).
+ *
+ * The value of "x" for entry level is calculated as :
+ * x = Magic_N - T0SZ
+ *
+ * where Magic_N is an integer depending on the page size and the entry
+ * level of the page table as below:
+ *
+ * --------------------------------------------
+ * | Entry level | 4K 16K 64K |
+ * --------------------------------------------
+ * | Level: 0 (4 levels) | 28 | - | - |
+ * --------------------------------------------
+ * | Level: 1 (3 levels) | 37 | 31 | 25 |
+ * --------------------------------------------
+ * | Level: 2 (2 levels) | 46 | 42 | 38 |
+ * --------------------------------------------
+ * | Level: 3 (1 level) | - | 53 | 51 |
+ * --------------------------------------------
+ *
+ * We have a magic formula for the Magic_N below:
+ *
+ * Magic_N(PAGE_SIZE, Level) = 64 - ((PAGE_SHIFT - 3) * Number_of_levels)
+ *
+ * where Number_of_levels = (4 - Level). We are only interested in the
+ * value for Entry_Level for the stage2 page table.
+ *
+ * So, given that T0SZ = (64 - IPA_SHIFT), we can compute 'x' as follows:
+ *
+ * x = (64 - ((PAGE_SHIFT - 3) * Number_of_levels)) - (64 - IPA_SHIFT)
+ * = IPA_SHIFT - ((PAGE_SHIFT - 3) * Number of levels)
+ *
+ * Here is one way to explain the Magic Formula:
+ *
+ * x = log2(Size_of_Entry_Level_Table)
+ *
+ * Since, we can resolve (PAGE_SHIFT - 3) bits at each level, and another
+ * PAGE_SHIFT bits in the PTE, we have :
+ *
+ * Bits_Entry_level = IPA_SHIFT - ((PAGE_SHIFT - 3) * (n - 1) + PAGE_SHIFT)
+ * = IPA_SHIFT - (PAGE_SHIFT - 3) * n - 3
+ * where n = number of levels, and since each pointer is 8bytes, we have:
+ *
+ * x = Bits_Entry_Level + 3
+ * = IPA_SHIFT - (PAGE_SHIFT - 3) * n
+ *
+ * The only constraint here is that, we have to find the number of page table
+ * levels for a given IPA size (which we do, see stage2_pt_levels())
+ */
+#define ARM64_VTTBR_X(ipa, levels) ((ipa) - ((levels) * (PAGE_SHIFT - 3)))
+
+#define VTTBR_CNP_BIT (UL(1))
#define VTTBR_VMID_SHIFT (UL(48))
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
@@ -223,6 +312,13 @@
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf))
+/*
+ * We have
+ * PAR [PA_Shift - 1 : 12] = PA [PA_Shift - 1 : 12]
+ * HPFAR [PA_Shift - 9 : 4] = FIPA [PA_Shift - 1 : 12]
+ */
+#define PAR_TO_HPFAR(par) \
+ (((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8)
#define kvm_arm_exception_type \
{0, "IRQ" }, \
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 102b5a5c47b6..aea01a09eb94 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -30,6 +30,7 @@
#define ARM_EXCEPTION_IRQ 0
#define ARM_EXCEPTION_EL1_SERROR 1
#define ARM_EXCEPTION_TRAP 2
+#define ARM_EXCEPTION_IL 3
/* The hyp-stub will return this for any kvm_call_hyp() call */
#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
@@ -72,8 +73,6 @@ extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void);
-extern u32 __init_stage2_translation(void);
-
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
#define __hyp_this_cpu_ptr(sym) \
({ \
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 6106a85ae0be..21247870def7 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -335,7 +335,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
- return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ return ESR_ELx_SYS64_ISS_RT(esr);
}
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 3d6d7336f871..52fbc823ff8c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -53,7 +53,7 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
-int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext);
+int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
struct kvm_arch {
@@ -61,11 +61,13 @@ struct kvm_arch {
u64 vmid_gen;
u32 vmid;
- /* 1-level 2nd stage table, protected by kvm->mmu_lock */
+ /* stage2 entry level table */
pgd_t *pgd;
/* VTTBR value associated with above pgd and vmid */
u64 vttbr;
+ /* VTCR_EL2 value for this VM */
+ u64 vtcr;
/* The last vcpu id that ran on each physical CPU */
int __percpu *last_vcpu_ran;
@@ -387,6 +389,8 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
+void __kvm_enable_ssbs(void);
+
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
@@ -407,6 +411,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
*/
BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
+
+ /*
+ * Disabling SSBD on a non-VHE system requires us to enable SSBS
+ * at EL2.
+ */
+ if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
+ arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+ kvm_call_hyp(__kvm_enable_ssbs);
+ }
}
static inline bool kvm_arch_check_sve_has_vhe(void)
@@ -440,13 +453,7 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
-static inline void __cpu_init_stage2(void)
-{
- u32 parange = kvm_call_hyp(__init_stage2_translation);
-
- WARN_ONCE(parange < 40,
- "PARange is %d bits, unsupported configuration!", parange);
-}
+static inline void __cpu_init_stage2(void) {}
/* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
@@ -509,8 +516,12 @@ static inline int kvm_arm_have_ssbd(void)
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
+void kvm_set_ipa_limit(void);
+
#define __KVM_HAVE_ARCH_VM_ALLOC
struct kvm *kvm_arch_alloc_vm(void);
void kvm_arch_free_vm(struct kvm *kvm);
+int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 384c34397619..23aca66767f9 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -155,5 +155,15 @@ void deactivate_traps_vhe_put(void);
u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
void __noreturn __hyp_do_panic(unsigned long, ...);
+/*
+ * Must be called from hyp code running at EL2 with an updated VTTBR
+ * and interrupts disabled.
+ */
+static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
+{
+ write_sysreg(kvm->arch.vtcr, vtcr_el2);
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+}
+
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index d6fff7de5539..658657367f2f 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -141,8 +141,16 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
* We currently only support a 40bit IPA.
*/
#define KVM_PHYS_SHIFT (40)
-#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
-#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
+
+#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
+#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
+#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
+
+static inline bool kvm_page_empty(void *ptr)
+{
+ struct page *ptr_page = virt_to_page(ptr);
+ return page_count(ptr_page) == 1;
+}
#include <asm/stage2_pgtable.h>
@@ -238,12 +246,6 @@ static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
}
-static inline bool kvm_page_empty(void *ptr)
-{
- struct page *ptr_page = virt_to_page(ptr);
- return page_count(ptr_page) == 1;
-}
-
#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
#ifdef __PAGETABLE_PMD_FOLDED
@@ -517,5 +519,34 @@ static inline int hyp_map_aux_data(void)
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
+/*
+ * Get the magic number 'x' for VTTBR:BADDR of this KVM instance.
+ * With v8.2 LVA extensions, 'x' should be a minimum of 6 with
+ * 52bit IPS.
+ */
+static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels)
+{
+ int x = ARM64_VTTBR_X(ipa_shift, levels);
+
+ return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x;
+}
+
+static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels)
+{
+ unsigned int x = arm64_vttbr_x(ipa_shift, levels);
+
+ return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x);
+}
+
+static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
+{
+ return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
+}
+
+static inline bool kvm_cpu_has_cnp(void)
+{
+ return system_supports_cnp();
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index dd320df0d026..7689c7aa1d77 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -95,5 +95,8 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
extern void mark_linear_text_alias_ro(void);
+#define INIT_MM_CONTEXT(name) \
+ .pgd = init_pg_dir,
+
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 39ec0b8a689e..1e58bf58c22b 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -147,12 +147,25 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp)
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
ttbr_replace_func *replace_phys;
- phys_addr_t pgd_phys = virt_to_phys(pgdp);
+ /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
+ phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
+
+ if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
+ /*
+ * cpu_replace_ttbr1() is used when there's a boot CPU
+ * up (i.e. cpufeature framework is not up yet) and
+ * latter only when we enable CNP via cpufeature's
+ * enable() callback.
+ * Also we rely on the cpu_hwcap bit being set before
+ * calling the enable() function.
+ */
+ ttbr1 |= TTBR_CNP_BIT;
+ }
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
cpu_install_idmap();
- replace_phys(pgd_phys);
+ replace_phys(ttbr1);
cpu_uninstall_idmap();
}
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 60d02c81a3a2..c88a3cb117a1 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -37,9 +37,7 @@ extern void clear_page(void *to);
typedef struct page *pgtable_t;
-#ifdef CONFIG_HAVE_ARCH_PFN_VALID
extern int pfn_valid(unsigned long);
-#endif
#include <asm/memory.h>
diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h
index bb5dcea42003..799d9dd6f7cc 100644
--- a/arch/arm64/include/asm/paravirt.h
+++ b/arch/arm64/include/asm/paravirt.h
@@ -10,11 +10,16 @@ extern struct static_key paravirt_steal_rq_enabled;
struct pv_time_ops {
unsigned long long (*steal_clock)(int cpu);
};
-extern struct pv_time_ops pv_time_ops;
+
+struct paravirt_patch_template {
+ struct pv_time_ops time;
+};
+
+extern struct paravirt_patch_template pv_ops;
static inline u64 paravirt_steal_clock(int cpu)
{
- return pv_time_ops.steal_clock(cpu);
+ return pv_ops.time.steal_clock(cpu);
}
#endif
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index fd208eac9f2a..1d7d8da2ef9b 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -211,6 +211,8 @@
#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
+#define TTBR_CNP_BIT (UL(1) << 0)
+
/*
* TCR flags.
*/
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 1bdeca8918a6..50b1ef8584c0 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -360,6 +360,7 @@ static inline int pmd_protnone(pmd_t pmd)
#define pmd_present(pmd) pte_present(pmd_pte(pmd))
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
+#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
@@ -428,10 +429,33 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PUD_TYPE_TABLE)
#endif
+extern pgd_t init_pg_dir[PTRS_PER_PGD];
+extern pgd_t init_pg_end[];
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
+
+extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
+
+static inline bool in_swapper_pgdir(void *addr)
+{
+ return ((unsigned long)addr & PAGE_MASK) ==
+ ((unsigned long)swapper_pg_dir & PAGE_MASK);
+}
+
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
+#ifdef __PAGETABLE_PMD_FOLDED
+ if (in_swapper_pgdir(pmdp)) {
+ set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
+ return;
+ }
+#endif /* __PAGETABLE_PMD_FOLDED */
+
WRITE_ONCE(*pmdp, pmd);
- dsb(ishst);
+
+ if (pmd_valid(pmd))
+ dsb(ishst);
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -477,11 +501,21 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
#define pud_present(pud) pte_present(pud_pte(pud))
+#define pud_valid(pud) pte_valid(pud_pte(pud))
static inline void set_pud(pud_t *pudp, pud_t pud)
{
+#ifdef __PAGETABLE_PUD_FOLDED
+ if (in_swapper_pgdir(pudp)) {
+ set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
+ return;
+ }
+#endif /* __PAGETABLE_PUD_FOLDED */
+
WRITE_ONCE(*pudp, pud);
- dsb(ishst);
+
+ if (pud_valid(pud))
+ dsb(ishst);
}
static inline void pud_clear(pud_t *pudp)
@@ -532,6 +566,11 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
+ if (in_swapper_pgdir(pgdp)) {
+ set_swapper_pgd(pgdp, pgd);
+ return;
+ }
+
WRITE_ONCE(*pgdp, pgd);
dsb(ishst);
}
@@ -712,11 +751,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
}
#endif
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern pgd_t swapper_pg_end[];
-extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
-extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
-
/*
* Encode and decode a swap entry:
* bits 0-1: present (must be zero)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 79657ad91397..2bf6691371c2 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -174,6 +174,10 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
{
start_thread_common(regs, pc);
regs->pstate = PSR_MODE_EL0t;
+
+ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+ regs->pstate |= PSR_SSBS_BIT;
+
regs->sp = sp;
}
@@ -190,6 +194,9 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate |= PSR_AA32_E_BIT;
#endif
+ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+ regs->pstate |= PSR_AA32_SSBS_BIT;
+
regs->compat_sp = sp;
}
#endif
@@ -244,10 +251,6 @@ static inline void spin_lock_prefetch(const void *ptr)
#endif
-void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused);
-void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused);
-void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused);
-
extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
extern void __init minsigstksz_setup(void);
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 177b851ca6d9..fce22c4b2f73 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -25,6 +25,9 @@
#define CurrentEL_EL1 (1 << 2)
#define CurrentEL_EL2 (2 << 2)
+/* Additional SPSR bits not exposed in the UABI */
+#define PSR_IL_BIT (1 << 20)
+
/* AArch32-specific ptrace requests */
#define COMPAT_PTRACE_GETREGS 12
#define COMPAT_PTRACE_SETREGS 13
@@ -50,6 +53,7 @@
#define PSR_AA32_I_BIT 0x00000080
#define PSR_AA32_A_BIT 0x00000100
#define PSR_AA32_E_BIT 0x00000200
+#define PSR_AA32_SSBS_BIT 0x00800000
#define PSR_AA32_DIT_BIT 0x01000000
#define PSR_AA32_Q_BIT 0x08000000
#define PSR_AA32_V_BIT 0x10000000
diff --git a/arch/arm64/include/asm/stage2_pgtable-nopmd.h b/arch/arm64/include/asm/stage2_pgtable-nopmd.h
deleted file mode 100644
index 2656a0fd05a6..000000000000
--- a/arch/arm64/include/asm/stage2_pgtable-nopmd.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2016 - ARM Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM64_S2_PGTABLE_NOPMD_H_
-#define __ARM64_S2_PGTABLE_NOPMD_H_
-
-#include <asm/stage2_pgtable-nopud.h>
-
-#define __S2_PGTABLE_PMD_FOLDED
-
-#define S2_PMD_SHIFT S2_PUD_SHIFT
-#define S2_PTRS_PER_PMD 1
-#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT)
-#define S2_PMD_MASK (~(S2_PMD_SIZE-1))
-
-#define stage2_pud_none(pud) (0)
-#define stage2_pud_present(pud) (1)
-#define stage2_pud_clear(pud) do { } while (0)
-#define stage2_pud_populate(pud, pmd) do { } while (0)
-#define stage2_pmd_offset(pud, address) ((pmd_t *)(pud))
-
-#define stage2_pmd_free(pmd) do { } while (0)
-
-#define stage2_pmd_addr_end(addr, end) (end)
-
-#define stage2_pud_huge(pud) (0)
-#define stage2_pmd_table_empty(pmdp) (0)
-
-#endif
diff --git a/arch/arm64/include/asm/stage2_pgtable-nopud.h b/arch/arm64/include/asm/stage2_pgtable-nopud.h
deleted file mode 100644
index 5ee87b54ebf3..000000000000
--- a/arch/arm64/include/asm/stage2_pgtable-nopud.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2016 - ARM Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM64_S2_PGTABLE_NOPUD_H_
-#define __ARM64_S2_PGTABLE_NOPUD_H_
-
-#define __S2_PGTABLE_PUD_FOLDED
-
-#define S2_PUD_SHIFT S2_PGDIR_SHIFT
-#define S2_PTRS_PER_PUD 1
-#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT)
-#define S2_PUD_MASK (~(S2_PUD_SIZE-1))
-
-#define stage2_pgd_none(pgd) (0)
-#define stage2_pgd_present(pgd) (1)
-#define stage2_pgd_clear(pgd) do { } while (0)
-#define stage2_pgd_populate(pgd, pud) do { } while (0)
-
-#define stage2_pud_offset(pgd, address) ((pud_t *)(pgd))
-
-#define stage2_pud_free(x) do { } while (0)
-
-#define stage2_pud_addr_end(addr, end) (end)
-#define stage2_pud_table_empty(pmdp) (0)
-
-#endif
diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h
index 8b68099348e5..d352f6df8d2c 100644
--- a/arch/arm64/include/asm/stage2_pgtable.h
+++ b/arch/arm64/include/asm/stage2_pgtable.h
@@ -19,9 +19,17 @@
#ifndef __ARM64_S2_PGTABLE_H_
#define __ARM64_S2_PGTABLE_H_
+#include <linux/hugetlb.h>
#include <asm/pgtable.h>
/*
+ * PGDIR_SHIFT determines the size a top-level page table entry can map
+ * and depends on the number of levels in the page table. Compute the
+ * PGDIR_SHIFT for a given number of levels.
+ */
+#define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls))
+
+/*
* The hardware supports concatenation of up to 16 tables at stage2 entry level
* and we use the feature whenever possible.
*
@@ -29,112 +37,208 @@
* On arm64, the smallest PAGE_SIZE supported is 4k, which means
* (PAGE_SHIFT - 3) > 4 holds for all page sizes.
* This implies, the total number of page table levels at stage2 expected
- * by the hardware is actually the number of levels required for (KVM_PHYS_SHIFT - 4)
+ * by the hardware is actually the number of levels required for (IPA_SHIFT - 4)
* in normal translations(e.g, stage1), since we cannot have another level in
- * the range (KVM_PHYS_SHIFT, KVM_PHYS_SHIFT - 4).
+ * the range (IPA_SHIFT, IPA_SHIFT - 4).
*/
-#define STAGE2_PGTABLE_LEVELS ARM64_HW_PGTABLE_LEVELS(KVM_PHYS_SHIFT - 4)
+#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
+#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr)
-/*
- * With all the supported VA_BITs and 40bit guest IPA, the following condition
- * is always true:
- *
- * STAGE2_PGTABLE_LEVELS <= CONFIG_PGTABLE_LEVELS
- *
- * We base our stage-2 page table walker helpers on this assumption and
- * fall back to using the host version of the helper wherever possible.
- * i.e, if a particular level is not folded (e.g, PUD) at stage2, we fall back
- * to using the host version, since it is guaranteed it is not folded at host.
- *
- * If the condition breaks in the future, we can rearrange the host level
- * definitions and reuse them for stage2. Till then...
- */
-#if STAGE2_PGTABLE_LEVELS > CONFIG_PGTABLE_LEVELS
-#error "Unsupported combination of guest IPA and host VA_BITS."
-#endif
-
-/* S2_PGDIR_SHIFT is the size mapped by top-level stage2 entry */
-#define S2_PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - STAGE2_PGTABLE_LEVELS)
-#define S2_PGDIR_SIZE (_AC(1, UL) << S2_PGDIR_SHIFT)
-#define S2_PGDIR_MASK (~(S2_PGDIR_SIZE - 1))
+/* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */
+#define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm))
+#define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm))
+#define stage2_pgdir_mask(kvm) ~(stage2_pgdir_size(kvm) - 1)
/*
* The number of PTRS across all concatenated stage2 tables given by the
* number of bits resolved at the initial level.
+ * If we force more levels than necessary, we may have (stage2_pgdir_shift > IPA),
+ * in which case, stage2_pgd_ptrs will have one entry.
*/
-#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - S2_PGDIR_SHIFT))
+#define pgd_ptrs_shift(ipa, pgdir_shift) \
+ ((ipa) > (pgdir_shift) ? ((ipa) - (pgdir_shift)) : 0)
+#define __s2_pgd_ptrs(ipa, lvls) \
+ (1 << (pgd_ptrs_shift((ipa), pt_levels_pgdir_shift(lvls))))
+#define __s2_pgd_size(ipa, lvls) (__s2_pgd_ptrs((ipa), (lvls)) * sizeof(pgd_t))
+
+#define stage2_pgd_ptrs(kvm) __s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
+#define stage2_pgd_size(kvm) __s2_pgd_size(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
/*
- * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
- * levels in addition to the PGD.
+ * kvm_mmmu_cache_min_pages() is the number of pages required to install
+ * a stage-2 translation. We pre-allocate the entry level page table at
+ * the VM creation.
*/
-#define KVM_MMU_CACHE_MIN_PAGES (STAGE2_PGTABLE_LEVELS - 1)
+#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1)
-
-#if STAGE2_PGTABLE_LEVELS > 3
+/* Stage2 PUD definitions when the level is present */
+static inline bool kvm_stage2_has_pud(struct kvm *kvm)
+{
+ return (CONFIG_PGTABLE_LEVELS > 3) && (kvm_stage2_levels(kvm) > 3);
+}
#define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
-#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT)
+#define S2_PUD_SIZE (1UL << S2_PUD_SHIFT)
#define S2_PUD_MASK (~(S2_PUD_SIZE - 1))
-#define stage2_pgd_none(pgd) pgd_none(pgd)
-#define stage2_pgd_clear(pgd) pgd_clear(pgd)
-#define stage2_pgd_present(pgd) pgd_present(pgd)
-#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud)
-#define stage2_pud_offset(pgd, address) pud_offset(pgd, address)
-#define stage2_pud_free(pud) pud_free(NULL, pud)
+static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd)
+{
+ if (kvm_stage2_has_pud(kvm))
+ return pgd_none(pgd);
+ else
+ return 0;
+}
-#define stage2_pud_table_empty(pudp) kvm_page_empty(pudp)
+static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp)
+{
+ if (kvm_stage2_has_pud(kvm))
+ pgd_clear(pgdp);
+}
-static inline phys_addr_t stage2_pud_addr_end(phys_addr_t addr, phys_addr_t end)
+static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd)
{
- phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK;
+ if (kvm_stage2_has_pud(kvm))
+ return pgd_present(pgd);
+ else
+ return 1;
+}
- return (boundary - 1 < end - 1) ? boundary : end;
+static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud)
+{
+ if (kvm_stage2_has_pud(kvm))
+ pgd_populate(NULL, pgd, pud);
+}
+
+static inline pud_t *stage2_pud_offset(struct kvm *kvm,
+ pgd_t *pgd, unsigned long address)
+{
+ if (kvm_stage2_has_pud(kvm))
+ return pud_offset(pgd, address);
+ else
+ return (pud_t *)pgd;
}
-#endif /* STAGE2_PGTABLE_LEVELS > 3 */
+static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
+{
+ if (kvm_stage2_has_pud(kvm))
+ pud_free(NULL, pud);
+}
+static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp)
+{
+ if (kvm_stage2_has_pud(kvm))
+ return kvm_page_empty(pudp);
+ else
+ return false;
+}
-#if STAGE2_PGTABLE_LEVELS > 2
+static inline phys_addr_t
+stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
+{
+ if (kvm_stage2_has_pud(kvm)) {
+ phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK;
+
+ return (boundary - 1 < end - 1) ? boundary : end;
+ } else {
+ return end;
+ }
+}
+
+/* Stage2 PMD definitions when the level is present */
+static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
+{
+ return (CONFIG_PGTABLE_LEVELS > 2) && (kvm_stage2_levels(kvm) > 2);
+}
#define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
-#define S2_PMD_SIZE (_AC(1, UL) << S2_PMD_SHIFT)
+#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT)
#define S2_PMD_MASK (~(S2_PMD_SIZE - 1))
-#define stage2_pud_none(pud) pud_none(pud)
-#define stage2_pud_clear(pud) pud_clear(pud)
-#define stage2_pud_present(pud) pud_present(pud)
-#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd)
-#define stage2_pmd_offset(pud, address) pmd_offset(pud, address)
-#define stage2_pmd_free(pmd) pmd_free(NULL, pmd)
+static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ return pud_none(pud);
+ else
+ return 0;
+}
+
+static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ pud_clear(pud);
+}
-#define stage2_pud_huge(pud) pud_huge(pud)
-#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
+static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ return pud_present(pud);
+ else
+ return 1;
+}
-static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end)
+static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd)
{
- phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK;
+ if (kvm_stage2_has_pmd(kvm))
+ pud_populate(NULL, pud, pmd);
+}
- return (boundary - 1 < end - 1) ? boundary : end;
+static inline pmd_t *stage2_pmd_offset(struct kvm *kvm,
+ pud_t *pud, unsigned long address)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ return pmd_offset(pud, address);
+ else
+ return (pmd_t *)pud;
}
-#endif /* STAGE2_PGTABLE_LEVELS > 2 */
+static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ pmd_free(NULL, pmd);
+}
+
+static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ return pud_huge(pud);
+ else
+ return 0;
+}
+
+static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ return kvm_page_empty(pmdp);
+ else
+ return 0;
+}
-#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep)
+static inline phys_addr_t
+stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
+{
+ if (kvm_stage2_has_pmd(kvm)) {
+ phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK;
-#if STAGE2_PGTABLE_LEVELS == 2
-#include <asm/stage2_pgtable-nopmd.h>
-#elif STAGE2_PGTABLE_LEVELS == 3
-#include <asm/stage2_pgtable-nopud.h>
-#endif
+ return (boundary - 1 < end - 1) ? boundary : end;
+ } else {
+ return end;
+ }
+}
+static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep)
+{
+ return kvm_page_empty(ptep);
+}
-#define stage2_pgd_index(addr) (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
+static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr)
+{
+ return (((addr) >> stage2_pgdir_shift(kvm)) & (stage2_pgd_ptrs(kvm) - 1));
+}
-static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end)
+static inline phys_addr_t
+stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
- phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK;
+ phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm);
return (boundary - 1 < end - 1) ? boundary : end;
}
diff --git a/arch/arm64/include/asm/stat.h b/arch/arm64/include/asm/stat.h
index eab738019707..397c6ccd04e7 100644
--- a/arch/arm64/include/asm/stat.h
+++ b/arch/arm64/include/asm/stat.h
@@ -20,7 +20,7 @@
#ifdef CONFIG_COMPAT
-#include <linux/compat_time.h>
+#include <linux/time.h>
#include <asm/compat.h>
/*
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index c1470931b897..0c909c4a932f 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -20,7 +20,6 @@
#ifndef __ASM_SYSREG_H
#define __ASM_SYSREG_H
-#include <asm/compiler.h>
#include <linux/stringify.h>
/*
@@ -84,13 +83,26 @@
#endif /* CONFIG_BROKEN_GAS_INST */
-#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
-#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
+/*
+ * Instructions for modifying PSTATE fields.
+ * As per Arm ARM for v8-A, Section "C.5.1.3 op0 == 0b00, architectural hints,
+ * barriers and CLREX, and PSTATE access", ARM DDI 0487 C.a, system instructions
+ * for accessing PSTATE fields have the following encoding:
+ * Op0 = 0, CRn = 4
+ * Op1, Op2 encodes the PSTATE field modified and defines the constraints.
+ * CRm = Imm4 for the instruction.
+ * Rt = 0x1f
+ */
+#define pstate_field(op1, op2) ((op1) << Op1_shift | (op2) << Op2_shift)
+#define PSTATE_Imm_shift CRm_shift
+
+#define PSTATE_PAN pstate_field(0, 4)
+#define PSTATE_UAO pstate_field(0, 3)
+#define PSTATE_SSBS pstate_field(3, 1)
-#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
- (!!x)<<8 | 0x1f)
-#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
- (!!x)<<8 | 0x1f)
+#define SET_PSTATE_PAN(x) __emit_inst(0xd500401f | PSTATE_PAN | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
@@ -419,6 +431,7 @@
#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
/* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_DSSBS (1UL << 44)
#define SCTLR_ELx_EE (1 << 25)
#define SCTLR_ELx_IESB (1 << 21)
#define SCTLR_ELx_WXN (1 << 19)
@@ -439,7 +452,7 @@
(1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
(1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
(1 << 27) | (1 << 30) | (1 << 31) | \
- (0xffffffffUL << 32))
+ (0xffffefffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
@@ -453,7 +466,7 @@
#define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
#define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
- ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
+ SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
#error "Inconsistent SCTLR_EL2 set/clear bits"
@@ -477,7 +490,7 @@
(1 << 29))
#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
(1 << 27) | (1 << 30) | (1 << 31) | \
- (0xffffffffUL << 32))
+ (0xffffefffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
@@ -489,12 +502,12 @@
#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\
SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\
- SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_NTWI |\
+ SCTLR_EL1_DZE | SCTLR_EL1_UCT |\
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
#define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\
SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
- SCTLR_EL1_RES0)
+ SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0)
#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
#error "Inconsistent SCTLR_EL1 set/clear bits"
@@ -544,6 +557,13 @@
#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
+/* id_aa64pfr1 */
+#define ID_AA64PFR1_SSBS_SHIFT 4
+
+#define ID_AA64PFR1_SSBS_PSTATE_NI 0
+#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
+#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
+
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 28893a0b141d..0e2a0ecaf484 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -33,7 +33,8 @@ void die(const char *msg, struct pt_regs *regs, int err);
struct siginfo;
void arm64_notify_die(const char *str, struct pt_regs *regs,
- struct siginfo *info, int err);
+ int signo, int sicode, void __user *addr,
+ int err);
void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index a3233167be60..106fdc951b6e 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -22,16 +22,10 @@
#include <linux/pagemap.h>
#include <linux/swap.h>
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-
-#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
static inline void __tlb_remove_table(void *_table)
{
free_page_and_swap_cache((struct page *)_table);
}
-#else
-#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
-#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
static void tlb_flush(struct mmu_gather *tlb);
@@ -40,36 +34,35 @@ static void tlb_flush(struct mmu_gather *tlb);
static inline void tlb_flush(struct mmu_gather *tlb)
{
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
+ bool last_level = !tlb->freed_tables;
+ unsigned long stride = tlb_get_unmap_size(tlb);
/*
- * The ASID allocator will either invalidate the ASID or mark
- * it as used.
+ * If we're tearing down the address space then we only care about
+ * invalidating the walk-cache, since the ASID allocator won't
+ * reallocate our ASID without invalidating the entire TLB.
*/
- if (tlb->fullmm)
+ if (tlb->fullmm) {
+ if (!last_level)
+ flush_tlb_mm(tlb->mm);
return;
+ }
- /*
- * The intermediate page table levels are already handled by
- * the __(pte|pmd|pud)_free_tlb() functions, so last level
- * TLBI is sufficient here.
- */
- __flush_tlb_range(&vma, tlb->start, tlb->end, true);
+ __flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
- __flush_tlb_pgtable(tlb->mm, addr);
pgtable_page_dtor(pte);
- tlb_remove_entry(tlb, pte);
+ tlb_remove_table(tlb, pte);
}
#if CONFIG_PGTABLE_LEVELS > 2
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
- __flush_tlb_pgtable(tlb->mm, addr);
- tlb_remove_entry(tlb, virt_to_page(pmdp));
+ tlb_remove_table(tlb, virt_to_page(pmdp));
}
#endif
@@ -77,8 +70,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr)
{
- __flush_tlb_pgtable(tlb->mm, addr);
- tlb_remove_entry(tlb, virt_to_page(pudp));
+ tlb_remove_table(tlb, virt_to_page(pudp));
}
#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index a4a1901140ee..c3c0387aee18 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -70,43 +70,73 @@
})
/*
- * TLB Management
- * ==============
+ * TLB Invalidation
+ * ================
*
- * The TLB specific code is expected to perform whatever tests it needs
- * to determine if it should invalidate the TLB for each call. Start
- * addresses are inclusive and end addresses are exclusive; it is safe to
- * round these addresses down.
+ * This header file implements the low-level TLB invalidation routines
+ * (sometimes referred to as "flushing" in the kernel) for arm64.
*
- * flush_tlb_all()
+ * Every invalidation operation uses the following template:
+ *
+ * DSB ISHST // Ensure prior page-table updates have completed
+ * TLBI ... // Invalidate the TLB
+ * DSB ISH // Ensure the TLB invalidation has completed
+ * if (invalidated kernel mappings)
+ * ISB // Discard any instructions fetched from the old mapping
+ *
+ *
+ * The following functions form part of the "core" TLB invalidation API,
+ * as documented in Documentation/core-api/cachetlb.rst:
*
- * Invalidate the entire TLB.
+ * flush_tlb_all()
+ * Invalidate the entire TLB (kernel + user) on all CPUs
*
* flush_tlb_mm(mm)
+ * Invalidate an entire user address space on all CPUs.
+ * The 'mm' argument identifies the ASID to invalidate.
+ *
+ * flush_tlb_range(vma, start, end)
+ * Invalidate the virtual-address range '[start, end)' on all
+ * CPUs for the user address space corresponding to 'vma->mm'.
+ * Note that this operation also invalidates any walk-cache
+ * entries associated with translations for the specified address
+ * range.
+ *
+ * flush_tlb_kernel_range(start, end)
+ * Same as flush_tlb_range(..., start, end), but applies to
+ * kernel mappings rather than a particular user address space.
+ * Whilst not explicitly documented, this function is used when
+ * unmapping pages from vmalloc/io space.
+ *
+ * flush_tlb_page(vma, addr)
+ * Invalidate a single user mapping for address 'addr' in the
+ * address space corresponding to 'vma->mm'. Note that this
+ * operation only invalidates a single, last-level page-table
+ * entry and therefore does not affect any walk-caches.
*
- * Invalidate all TLB entries in a particular address space.
- * - mm - mm_struct describing address space
*
- * flush_tlb_range(mm,start,end)
+ * Next, we have some undocumented invalidation routines that you probably
+ * don't want to call unless you know what you're doing:
*
- * Invalidate a range of TLB entries in the specified address
- * space.
- * - mm - mm_struct describing address space
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
+ * local_flush_tlb_all()
+ * Same as flush_tlb_all(), but only applies to the calling CPU.
*
- * flush_tlb_page(vaddr,vma)
+ * __flush_tlb_kernel_pgtable(addr)
+ * Invalidate a single kernel mapping for address 'addr' on all
+ * CPUs, ensuring that any walk-cache entries associated with the
+ * translation are also invalidated.
*
- * Invalidate the specified page in the specified address range.
- * - vaddr - virtual address (may not be aligned)
- * - vma - vma_struct describing address range
+ * __flush_tlb_range(vma, start, end, stride, last_level)
+ * Invalidate the virtual-address range '[start, end)' on all
+ * CPUs for the user address space corresponding to 'vma->mm'.
+ * The invalidation operations are issued at a granularity
+ * determined by 'stride' and only affect any walk-cache entries
+ * if 'last_level' is equal to false.
*
- * flush_kern_tlb_page(kaddr)
*
- * Invalidate the TLB entry for the specified page. The address
- * will be in the kernels virtual memory space. Current uses
- * only require the D-TLB to be invalidated.
- * - kaddr - Kernel virtual memory address
+ * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
+ * on top of these routines, since that is our interface to the mmu_gather
+ * API as used by munmap() and friends.
*/
static inline void local_flush_tlb_all(void)
{
@@ -149,25 +179,28 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
* This is meant to avoid soft lock-ups on large TLB flushing ranges and not
* necessarily a performance improvement.
*/
-#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
+#define MAX_TLBI_OPS 1024UL
static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- bool last_level)
+ unsigned long stride, bool last_level)
{
unsigned long asid = ASID(vma->vm_mm);
unsigned long addr;
- if ((end - start) > MAX_TLB_RANGE) {
+ if ((end - start) > (MAX_TLBI_OPS * stride)) {
flush_tlb_mm(vma->vm_mm);
return;
}
+ /* Convert the stride into units of 4k */
+ stride >>= 12;
+
start = __TLBI_VADDR(start, asid);
end = __TLBI_VADDR(end, asid);
dsb(ishst);
- for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
+ for (addr = start; addr < end; addr += stride) {
if (last_level) {
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
@@ -182,14 +215,18 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- __flush_tlb_range(vma, start, end, false);
+ /*
+ * We cannot use leaf-only invalidation here, since we may be invalidating
+ * table entries as part of collapsing hugepages or moving page tables.
+ */
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, false);
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long addr;
- if ((end - start) > MAX_TLB_RANGE) {
+ if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
flush_tlb_all();
return;
}
@@ -199,7 +236,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
- __tlbi(vaae1is, addr);
+ __tlbi(vaale1is, addr);
dsb(ish);
isb();
}
@@ -208,20 +245,11 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
* Used to invalidate the TLB (walk caches) corresponding to intermediate page
* table levels (pgd/pud/pmd).
*/
-static inline void __flush_tlb_pgtable(struct mm_struct *mm,
- unsigned long uaddr)
-{
- unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm));
-
- __tlbi(vae1is, addr);
- __tlbi_user(vae1is, addr);
- dsb(ish);
-}
-
static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
{
unsigned long addr = __TLBI_VADDR(kaddr, 0);
+ dsb(ishst);
__tlbi(vaae1is, addr);
dsb(ish);
}
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 49a0fee4f89b..0524f2438649 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -45,6 +45,9 @@ int pcibus_to_node(struct pci_bus *bus);
/* Replace task scheduler's default cpu-invariant accounting */
#define arch_scale_cpu_capacity topology_get_cpu_scale
+/* Enable topology flag updates */
+#define arch_update_cpu_topology topology_update_cpu_topology
+
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index c320f3bf6c57..f9c1aa6167d2 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -37,8 +37,9 @@ void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
void force_signal_inject(int signal, int code, unsigned long address);
void arm64_notify_segfault(unsigned long addr);
-void arm64_force_sig_info(struct siginfo *info, const char *str,
- struct task_struct *tsk);
+void arm64_force_sig_fault(int signo, int code, void __user *addr, const char *str);
+void arm64_force_sig_mceerr(int code, void __user *addr, short lsb, const char *str);
+void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr, const char *str);
/*
* Move regs->pc to next instruction and do necessary setup before it
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index e66b0fca99c2..07c34087bd5e 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -32,7 +32,6 @@
#include <asm/cpufeature.h>
#include <asm/ptrace.h>
#include <asm/memory.h>
-#include <asm/compiler.h>
#include <asm/extable.h>
#define get_ds() (KERNEL_DS)
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index e0d0f5b856e7..b13ca091f833 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -18,11 +18,11 @@
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h
index 4e22b7a8c038..2788e95d0ff0 100644
--- a/arch/arm64/include/asm/xen/events.h
+++ b/arch/arm64/include/asm/xen/events.h
@@ -14,7 +14,7 @@ enum ipi_vector {
static inline int xen_irqs_disabled(struct pt_regs *regs)
{
- return raw_irqs_disabled_flags((unsigned long) regs->pstate);
+ return !interrupts_enabled(regs);
}
#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index 198afbf0688f..6c5adf458690 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -19,3 +19,4 @@ generic-y += swab.h
generic-y += termbits.h
generic-y += termios.h
generic-y += types.h
+generic-y += siginfo.h
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 17c65c8f33cb..2bcd6e4f3474 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -48,5 +48,6 @@
#define HWCAP_USCAT (1 << 25)
#define HWCAP_ILRCPC (1 << 26)
#define HWCAP_FLAGM (1 << 27)
+#define HWCAP_SSBS (1 << 28)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 98c4ce55d9c3..a36227fdb084 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -46,6 +46,7 @@
#define PSR_I_BIT 0x00000080
#define PSR_A_BIT 0x00000100
#define PSR_D_BIT 0x00000200
+#define PSR_SSBS_BIT 0x00001000
#define PSR_PAN_BIT 0x00400000
#define PSR_UAO_BIT 0x00800000
#define PSR_V_BIT 0x10000000
diff --git a/arch/arm64/include/uapi/asm/siginfo.h b/arch/arm64/include/uapi/asm/siginfo.h
deleted file mode 100644
index 574d12f86039..000000000000
--- a/arch/arm64/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_SIGINFO_H
-#define __ASM_SIGINFO_H
-
-#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h
index 5072cbd15c82..dae1584cf017 100644
--- a/arch/arm64/include/uapi/asm/unistd.h
+++ b/arch/arm64/include/uapi/asm/unistd.h
@@ -16,5 +16,6 @@
*/
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_NEW_STAT
#include <asm-generic/unistd.h>
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index dec10898d688..a509e35132d2 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -68,21 +68,43 @@ static bool
has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
int scope)
{
- u64 mask = CTR_CACHE_MINLINE_MASK;
-
- /* Skip matching the min line sizes for cache type check */
- if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
- mask ^= arm64_ftr_reg_ctrel0.strict_mask;
+ u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
+ u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
+ u64 ctr_raw, ctr_real;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return (read_cpuid_cachetype() & mask) !=
- (arm64_ftr_reg_ctrel0.sys_val & mask);
+
+ /*
+ * We want to make sure that all the CPUs in the system expose
+ * a consistent CTR_EL0 to make sure that applications behaves
+ * correctly with migration.
+ *
+ * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
+ *
+ * 1) It is safe if the system doesn't support IDC, as CPU anyway
+ * reports IDC = 0, consistent with the rest.
+ *
+ * 2) If the system has IDC, it is still safe as we trap CTR_EL0
+ * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
+ *
+ * So, we need to make sure either the raw CTR_EL0 or the effective
+ * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
+ */
+ ctr_raw = read_cpuid_cachetype() & mask;
+ ctr_real = read_cpuid_effective_cachetype() & mask;
+
+ return (ctr_real != sys) && (ctr_raw != sys);
}
static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
{
- sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
+ u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
+
+ /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
+ if ((read_cpuid_cachetype() & mask) !=
+ (arm64_ftr_reg_ctrel0.sys_val & mask))
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
}
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
@@ -116,6 +138,15 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
static DEFINE_SPINLOCK(bp_lock);
int cpu, slot = -1;
+ /*
+ * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs
+ * start/end if we're a guest. Skip the hyp-vectors work.
+ */
+ if (!hyp_vecs_start) {
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ return;
+ }
+
spin_lock(&bp_lock);
for_each_possible_cpu(cpu) {
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
@@ -312,6 +343,14 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
void arm64_set_ssbd_mitigation(bool state)
{
+ if (this_cpu_has_cap(ARM64_SSBS)) {
+ if (state)
+ asm volatile(SET_PSTATE_SSBS(0));
+ else
+ asm volatile(SET_PSTATE_SSBS(1));
+ return;
+ }
+
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
@@ -336,6 +375,11 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ if (this_cpu_has_cap(ARM64_SSBS)) {
+ required = false;
+ goto out_printmsg;
+ }
+
if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
ssbd_state = ARM64_SSBD_UNKNOWN;
return false;
@@ -384,7 +428,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
switch (ssbd_state) {
case ARM64_SSBD_FORCE_DISABLE:
- pr_info_once("%s disabled from command-line\n", entry->desc);
arm64_set_ssbd_mitigation(false);
required = false;
break;
@@ -397,7 +440,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
break;
case ARM64_SSBD_FORCE_ENABLE:
- pr_info_once("%s forced from command-line\n", entry->desc);
arm64_set_ssbd_mitigation(true);
required = true;
break;
@@ -407,10 +449,27 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
break;
}
+out_printmsg:
+ switch (ssbd_state) {
+ case ARM64_SSBD_FORCE_DISABLE:
+ pr_info_once("%s disabled from command-line\n", entry->desc);
+ break;
+
+ case ARM64_SSBD_FORCE_ENABLE:
+ pr_info_once("%s forced from command-line\n", entry->desc);
+ break;
+ }
+
return required;
}
#endif /* CONFIG_ARM64_SSBD */
+static void __maybe_unused
+cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
+{
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
+}
+
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.matches = is_affected_midr_range, \
.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
@@ -616,14 +675,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
#endif
{
- .desc = "Mismatched cache line size",
- .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
- .matches = has_mismatched_cache_type,
- .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
- .cpu_enable = cpu_enable_trap_ctr_access,
- },
- {
- .desc = "Mismatched cache type",
+ .desc = "Mismatched cache type (CTR_EL0)",
.capability = ARM64_MISMATCHED_CACHE_TYPE,
.matches = has_mismatched_cache_type,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
@@ -680,6 +732,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = has_ssbd_mitigation,
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+ {
+ /* Cortex-A76 r0p0 to r2p0 */
+ .desc = "ARM erratum 1188873",
+ .capability = ARM64_WORKAROUND_1188873,
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e238b7932096..af50064dea51 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -20,6 +20,7 @@
#include <linux/bsearch.h>
#include <linux/cpumask.h>
+#include <linux/crash_dump.h>
#include <linux/sort.h>
#include <linux/stop_machine.h>
#include <linux/types.h>
@@ -117,6 +118,7 @@ EXPORT_SYMBOL(cpu_hwcap_keys);
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
+static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
/*
* NOTE: Any changes to the visibility of features should be kept in
@@ -164,6 +166,11 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
@@ -371,7 +378,7 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 4 */
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
- ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
+ ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
/* Op1 = 0, CRn = 0, CRm = 5 */
@@ -657,7 +664,6 @@ void update_cpu_features(int cpu,
/*
* EL3 is not our concern.
- * ID_AA64PFR1 is currently RES0.
*/
taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
@@ -848,15 +854,55 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
}
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
- int __unused)
+ int scope)
{
- return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_IDC_SHIFT);
+ u64 ctr;
+
+ if (scope == SCOPE_SYSTEM)
+ ctr = arm64_ftr_reg_ctrel0.sys_val;
+ else
+ ctr = read_cpuid_effective_cachetype();
+
+ return ctr & BIT(CTR_IDC_SHIFT);
+}
+
+static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
+ * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
+ * to the CTR_EL0 on this CPU and emulate it with the real/safe
+ * value.
+ */
+ if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
}
static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
- int __unused)
+ int scope)
{
- return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_DIC_SHIFT);
+ u64 ctr;
+
+ if (scope == SCOPE_SYSTEM)
+ ctr = arm64_ftr_reg_ctrel0.sys_val;
+ else
+ ctr = read_cpuid_cachetype();
+
+ return ctr & BIT(CTR_DIC_SHIFT);
+}
+
+static bool __maybe_unused
+has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ /*
+ * Kdump isn't guaranteed to power-off all secondary CPUs, CNP
+ * may share TLB entries with a CPU stuck in the crashed
+ * kernel.
+ */
+ if (is_kdump_kernel())
+ return false;
+
+ return has_cpuid_feature(entry, scope);
}
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
@@ -1035,6 +1081,70 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
WARN_ON(val & (7 << 27 | 7 << 21));
}
+#ifdef CONFIG_ARM64_SSBD
+static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
+{
+ if (user_mode(regs))
+ return 1;
+
+ if (instr & BIT(PSTATE_Imm_shift))
+ regs->pstate |= PSR_SSBS_BIT;
+ else
+ regs->pstate &= ~PSR_SSBS_BIT;
+
+ arm64_skip_faulting_instruction(regs, 4);
+ return 0;
+}
+
+static struct undef_hook ssbs_emulation_hook = {
+ .instr_mask = ~(1U << PSTATE_Imm_shift),
+ .instr_val = 0xd500401f | PSTATE_SSBS,
+ .fn = ssbs_emulation_handler,
+};
+
+static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
+{
+ static bool undef_hook_registered = false;
+ static DEFINE_SPINLOCK(hook_lock);
+
+ spin_lock(&hook_lock);
+ if (!undef_hook_registered) {
+ register_undef_hook(&ssbs_emulation_hook);
+ undef_hook_registered = true;
+ }
+ spin_unlock(&hook_lock);
+
+ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
+ arm64_set_ssbd_mitigation(false);
+ } else {
+ arm64_set_ssbd_mitigation(true);
+ }
+}
+#endif /* CONFIG_ARM64_SSBD */
+
+#ifdef CONFIG_ARM64_PAN
+static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * We modify PSTATE. This won't work from irq context as the PSTATE
+ * is discarded once we return from the exception.
+ */
+ WARN_ON_ONCE(in_interrupt());
+
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
+ asm(SET_PSTATE_PAN(1));
+}
+#endif /* CONFIG_ARM64_PAN */
+
+#ifdef CONFIG_ARM64_RAS_EXTN
+static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
+{
+ /* Firmware may have left a deferred SError in this register. */
+ write_sysreg_s(0, SYS_DISR_EL1);
+}
+#endif /* CONFIG_ARM64_RAS_EXTN */
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -1184,6 +1294,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_CACHE_IDC,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cache_idc,
+ .cpu_enable = cpu_emulate_effective_ctr,
},
{
.desc = "Instruction cache invalidation not required for I/D coherence",
@@ -1222,6 +1333,41 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_hw_dbm,
},
#endif
+#ifdef CONFIG_ARM64_SSBD
+ {
+ .desc = "CRC32 instructions",
+ .capability = ARM64_HAS_CRC32,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR0_EL1,
+ .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
+ .min_field_value = 1,
+ },
+ {
+ .desc = "Speculative Store Bypassing Safe (SSBS)",
+ .capability = ARM64_SSBS,
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .field_pos = ID_AA64PFR1_SSBS_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
+ .cpu_enable = cpu_enable_ssbs,
+ },
+#endif
+#ifdef CONFIG_ARM64_CNP
+ {
+ .desc = "Common not Private translations",
+ .capability = ARM64_HAS_CNP,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_useable_cnp,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR2_CNP_SHIFT,
+ .min_field_value = 1,
+ .cpu_enable = cpu_enable_cnp,
+ },
+#endif
{},
};
@@ -1267,6 +1413,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
#ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
#endif
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
{},
};
@@ -1658,6 +1805,11 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
}
+static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
+{
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+}
+
/*
* We emulate only the following system register space.
* Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
@@ -1719,27 +1871,32 @@ static int emulate_sys_reg(u32 id, u64 *valp)
return 0;
}
-static int emulate_mrs(struct pt_regs *regs, u32 insn)
+int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
{
int rc;
- u32 sys_reg, dst;
u64 val;
- /*
- * sys_reg values are defined as used in mrs/msr instruction.
- * shift the imm value to get the encoding.
- */
- sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
rc = emulate_sys_reg(sys_reg, &val);
if (!rc) {
- dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
- pt_regs_write_reg(regs, dst, val);
+ pt_regs_write_reg(regs, rt, val);
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
-
return rc;
}
+static int emulate_mrs(struct pt_regs *regs, u32 insn)
+{
+ u32 sys_reg, rt;
+
+ /*
+ * sys_reg values are defined as used in mrs/msr instruction.
+ * shift the imm value to get the encoding.
+ */
+ sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
+ rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
+ return do_emulate_mrs(regs, sys_reg, rt);
+}
+
static struct undef_hook mrs_hook = {
.instr_mask = 0xfff00000,
.instr_val = 0xd5300000,
@@ -1755,9 +1912,3 @@ static int __init enable_mrs_emulation(void)
}
core_initcall(enable_mrs_emulation);
-
-void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
-{
- /* Firmware may have left a deferred SError in this register. */
- write_sysreg_s(0, SYS_DISR_EL1);
-}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index e9ab7b3ed317..bcc2831399cb 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -81,6 +81,7 @@ static const char *const hwcap_str[] = {
"uscat",
"ilrcpc",
"flagm",
+ "ssbs",
NULL
};
@@ -324,7 +325,15 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
info->reg_cntfrq = arch_timer_get_cntfrq();
- info->reg_ctr = read_cpuid_cachetype();
+ /*
+ * Use the effective value of the CTR_EL0 than the raw value
+ * exposed by the CPU. CTR_E0.IDC field value must be interpreted
+ * with the CLIDR_EL1 fields to avoid triggering false warnings
+ * when there is a mismatch across the CPUs. Keep track of the
+ * effective value of the CTR_EL0 in our internal records for
+ * acurate sanity check and feature enablement.
+ */
+ info->reg_ctr = read_cpuid_effective_cachetype();
info->reg_dczid = read_cpuid(DCZID_EL0);
info->reg_midr = read_cpuid_id();
info->reg_revidr = read_cpuid(REVIDR_EL1);
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 06ca574495af..d7bb6aefae0a 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -210,13 +210,6 @@ NOKPROBE_SYMBOL(call_step_hook);
static void send_user_sigtrap(int si_code)
{
struct pt_regs *regs = current_pt_regs();
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = si_code;
- info.si_addr = (void __user *)instruction_pointer(regs);
if (WARN_ON(!user_mode(regs)))
return;
@@ -224,7 +217,9 @@ static void send_user_sigtrap(int si_code)
if (interrupts_enabled(regs))
local_irq_enable();
- arm64_force_sig_info(&info, "User debug trap", current);
+ arm64_force_sig_fault(SIGTRAP, si_code,
+ (void __user *)instruction_pointer(regs),
+ "User debug trap");
}
static int single_step_handler(unsigned long addr, unsigned int esr,
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 09dbea221a27..039144ecbcb2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -589,7 +589,7 @@ el1_undef:
inherit_daif pstate=x23, tmp=x2
mov x0, sp
bl do_undefinstr
- ASM_BUG()
+ kernel_exit 1
el1_dbg:
/*
* Debug exception handling
@@ -665,6 +665,7 @@ el0_sync:
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
+ ccmp x24, #ESR_ELx_EC_WFx, #4, ne
b.eq el0_sys
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el0_sp_pc
@@ -697,9 +698,9 @@ el0_sync_compat:
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
- b.eq el0_undef
+ b.eq el0_cp15
cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
- b.eq el0_undef
+ b.eq el0_cp15
cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
@@ -722,6 +723,17 @@ el0_irq_compat:
el0_error_compat:
kernel_entry 0, 32
b el0_error_naked
+
+el0_cp15:
+ /*
+ * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
+ */
+ enable_daif
+ ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_cp15instr
+ b ret_to_user
#endif
el0_da:
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 58c53bc96928..5ebe73b69961 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -842,7 +842,6 @@ asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
*/
asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
{
- siginfo_t info;
unsigned int si_code = FPE_FLTUNK;
if (esr & ESR_ELx_FP_EXC_TFV) {
@@ -858,12 +857,9 @@ asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
si_code = FPE_FLTRES;
}
- clear_siginfo(&info);
- info.si_signo = SIGFPE;
- info.si_code = si_code;
- info.si_addr = (void __user *)instruction_pointer(regs);
-
- send_sig_info(SIGFPE, &info, current);
+ send_sig_fault(SIGFPE, si_code,
+ (void __user *)instruction_pointer(regs),
+ current);
}
void fpsimd_thread_switch(struct task_struct *next)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b0853069702f..4471f570a295 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -287,19 +287,21 @@ __create_page_tables:
mov x28, lr
/*
- * Invalidate the idmap and swapper page tables to avoid potential
- * dirty cache lines being evicted.
+ * Invalidate the init page tables to avoid potential dirty cache lines
+ * being evicted. Other page tables are allocated in rodata as part of
+ * the kernel image, and thus are clean to the PoC per the boot
+ * protocol.
*/
- adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_end
+ adrp x0, init_pg_dir
+ adrp x1, init_pg_end
sub x1, x1, x0
bl __inval_dcache_area
/*
- * Clear the idmap and swapper page tables.
+ * Clear the init page tables.
*/
- adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_end
+ adrp x0, init_pg_dir
+ adrp x1, init_pg_end
sub x1, x1, x0
1: stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
@@ -373,7 +375,7 @@ __create_page_tables:
/*
* Map the kernel image (starting with PHYS_OFFSET).
*/
- adrp x0, swapper_pg_dir
+ adrp x0, init_pg_dir
mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
add x5, x5, x23 // add KASLR displacement
mov x4, PTRS_PER_PGD
@@ -390,7 +392,7 @@ __create_page_tables:
* tables again to remove any speculatively loaded cache lines.
*/
adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_end
+ adrp x1, init_pg_end
sub x1, x1, x0
dmb sy
bl __inval_dcache_area
@@ -706,6 +708,7 @@ secondary_startup:
* Common entry point for secondary CPUs.
*/
bl __cpu_setup // initialise processor
+ adrp x1, swapper_pg_dir
bl __enable_mmu
ldr x8, =__secondary_switched
br x8
@@ -748,6 +751,7 @@ ENDPROC(__secondary_switched)
* Enable the MMU.
*
* x0 = SCTLR_EL1 value for turning on the MMU.
+ * x1 = TTBR1_EL1 value
*
* Returns to the caller via x30/lr. This requires the caller to be covered
* by the .idmap.text section.
@@ -756,17 +760,16 @@ ENDPROC(__secondary_switched)
* If it isn't, park the CPU
*/
ENTRY(__enable_mmu)
- mrs x1, ID_AA64MMFR0_EL1
- ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
+ mrs x2, ID_AA64MMFR0_EL1
+ ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
b.ne __no_granule_support
- update_early_cpu_boot_status 0, x1, x2
- adrp x1, idmap_pg_dir
- adrp x2, swapper_pg_dir
- phys_to_ttbr x3, x1
- phys_to_ttbr x4, x2
- msr ttbr0_el1, x3 // load TTBR0
- msr ttbr1_el1, x4 // load TTBR1
+ update_early_cpu_boot_status 0, x2, x3
+ adrp x2, idmap_pg_dir
+ phys_to_ttbr x1, x1
+ phys_to_ttbr x2, x2
+ msr ttbr0_el1, x2 // load TTBR0
+ msr ttbr1_el1, x1 // load TTBR1
isb
msr sctlr_el1, x0
isb
@@ -823,6 +826,7 @@ __primary_switch:
mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
#endif
+ adrp x1, init_pg_dir
bl __enable_mmu
#ifdef CONFIG_RELOCATABLE
bl __relocate_kernel
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index e0756416e567..646b9562ee64 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -25,12 +25,12 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- void *addr = (void *)entry->code;
+ void *addr = (void *)jump_entry_code(entry);
u32 insn;
if (type == JUMP_LABEL_JMP) {
- insn = aarch64_insn_gen_branch_imm(entry->code,
- entry->target,
+ insn = aarch64_insn_gen_branch_imm(jump_entry_code(entry),
+ jump_entry_target(entry),
AARCH64_INSN_BRANCH_NOLINK);
} else {
insn = aarch64_insn_gen_nop();
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index 53f371ed4568..75c158b0353f 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -21,5 +21,5 @@
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
-struct pv_time_ops pv_time_ops;
-EXPORT_SYMBOL_GPL(pv_time_ops);
+struct paravirt_patch_template pv_ops;
+EXPORT_SYMBOL_GPL(pv_ops);
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 0e2ea1c78542..bb85e2f4603f 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -165,16 +165,15 @@ static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
/* Interface called from ACPI code to setup PCI host controller */
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
{
- int node = acpi_get_node(root->device->handle);
struct acpi_pci_generic_root_info *ri;
struct pci_bus *bus, *child;
struct acpi_pci_root_ops *root_ops;
- ri = kzalloc_node(sizeof(*ri), GFP_KERNEL, node);
+ ri = kzalloc(sizeof(*ri), GFP_KERNEL);
if (!ri)
return NULL;
- root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
+ root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
if (!root_ops) {
kfree(ri);
return NULL;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 8e38d5267f22..e213f8e867f6 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -966,6 +966,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
return 0;
}
+static int armv8pmu_filter_match(struct perf_event *event)
+{
+ unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
+ return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
+}
+
static void armv8pmu_reset(void *info)
{
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
@@ -1114,6 +1120,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->stop = armv8pmu_stop,
cpu_pmu->reset = armv8pmu_reset,
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
+ cpu_pmu->filter_match = armv8pmu_filter_match;
return 0;
}
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index e78c3ef04d95..9b65132e789a 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -107,7 +107,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if (!p->ainsn.api.insn)
return -ENOMEM;
break;
- };
+ }
/* prepare the instruction */
if (p->ainsn.api.insn)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 7f1628effe6d..ce99c58cd1f1 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -358,6 +358,10 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT;
+
+ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
+ childregs->pstate |= PSR_SSBS_BIT;
+
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index e8edbf13302a..8cdaf25e99cd 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -24,7 +24,6 @@
#include <uapi/linux/psci.h>
-#include <asm/compiler.h>
#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/smp_plat.h>
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6219486fa25f..1710a2d01669 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -182,13 +182,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
struct pt_regs *regs)
{
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = TRAP_HWBKPT;
- info.si_addr = (void __user *)(bkpt->trigger);
+ const char *desc = "Hardware breakpoint trap (ptrace)";
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
@@ -208,10 +202,14 @@ static void ptrace_hbptriggered(struct perf_event *bp,
break;
}
}
- force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger);
+ arm64_force_sig_ptrace_errno_trap(si_errno,
+ (void __user *)bkpt->trigger,
+ desc);
}
#endif
- arm64_force_sig_info(&info, "Hardware breakpoint trap (ptrace)", current);
+ arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT,
+ (void __user *)(bkpt->trigger),
+ desc);
}
/*
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 5b4fac434c84..d0f62dd24c90 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -64,6 +64,9 @@
#include <asm/xen/hypervisor.h>
#include <asm/mmu_context.h>
+static int num_standard_resources;
+static struct resource *standard_resources;
+
phys_addr_t __fdt_pointer __initdata;
/*
@@ -206,14 +209,19 @@ static void __init request_standard_resources(void)
{
struct memblock_region *region;
struct resource *res;
+ unsigned long i = 0;
kernel_code.start = __pa_symbol(_text);
kernel_code.end = __pa_symbol(__init_begin - 1);
kernel_data.start = __pa_symbol(_sdata);
kernel_data.end = __pa_symbol(_end - 1);
+ num_standard_resources = memblock.memory.cnt;
+ standard_resources = alloc_bootmem_low(num_standard_resources *
+ sizeof(*standard_resources));
+
for_each_memblock(memory, region) {
- res = alloc_bootmem_low(sizeof(*res));
+ res = &standard_resources[i++];
if (memblock_is_nomap(region)) {
res->name = "reserved";
res->flags = IORESOURCE_MEM;
@@ -243,36 +251,26 @@ static void __init request_standard_resources(void)
static int __init reserve_memblock_reserved_regions(void)
{
- phys_addr_t start, end, roundup_end = 0;
- struct resource *mem, *res;
- u64 i;
-
- for_each_reserved_mem_region(i, &start, &end) {
- if (end <= roundup_end)
- continue; /* done already */
-
- start = __pfn_to_phys(PFN_DOWN(start));
- end = __pfn_to_phys(PFN_UP(end)) - 1;
- roundup_end = end;
-
- res = kzalloc(sizeof(*res), GFP_ATOMIC);
- if (WARN_ON(!res))
- return -ENOMEM;
- res->start = start;
- res->end = end;
- res->name = "reserved";
- res->flags = IORESOURCE_MEM;
-
- mem = request_resource_conflict(&iomem_resource, res);
- /*
- * We expected memblock_reserve() regions to conflict with
- * memory created by request_standard_resources().
- */
- if (WARN_ON_ONCE(!mem))
+ u64 i, j;
+
+ for (i = 0; i < num_standard_resources; ++i) {
+ struct resource *mem = &standard_resources[i];
+ phys_addr_t r_start, r_end, mem_size = resource_size(mem);
+
+ if (!memblock_is_region_reserved(mem->start, mem_size))
continue;
- kfree(res);
- reserve_region_with_split(mem, start, end, "reserved");
+ for_each_reserved_mem_region(j, &r_start, &r_end) {
+ resource_size_t start, end;
+
+ start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
+ end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
+
+ if (start > mem->end || end < mem->start)
+ continue;
+
+ reserve_region_with_split(mem, start, end, "reserved");
+ }
}
return 0;
@@ -351,12 +349,8 @@ void __init setup_arch(char **cmdline_p)
#endif
#ifdef CONFIG_VT
-#if defined(CONFIG_VGA_CONSOLE)
- conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
-#endif
if (boot_args[1] || boot_args[2] || boot_args[3]) {
pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index bebec8ef9372..3e53ffa07994 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -101,6 +101,7 @@ ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */
+ adrp x1, swapper_pg_dir
bl __enable_mmu
ldr x8, =_cpu_resume
br x8
diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
index 3432e5ef9f41..885f13e58708 100644
--- a/arch/arm64/kernel/ssbd.c
+++ b/arch/arm64/kernel/ssbd.c
@@ -3,17 +3,33 @@
* Copyright (C) 2018 ARM Ltd, All Rights Reserved.
*/
+#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
+static void ssbd_ssbs_enable(struct task_struct *task)
+{
+ u64 val = is_compat_thread(task_thread_info(task)) ?
+ PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+ task_pt_regs(task)->pstate |= val;
+}
+
+static void ssbd_ssbs_disable(struct task_struct *task)
+{
+ u64 val = is_compat_thread(task_thread_info(task)) ?
+ PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+ task_pt_regs(task)->pstate &= ~val;
+}
+
/*
* prctl interface for SSBD
- * FIXME: Drop the below ifdefery once merged in 4.18.
*/
-#ifdef PR_SPEC_STORE_BYPASS
static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
{
int state = arm64_get_ssbd_state();
@@ -46,12 +62,14 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
return -EPERM;
task_clear_spec_ssb_disable(task);
clear_tsk_thread_flag(task, TIF_SSBD);
+ ssbd_ssbs_enable(task);
break;
case PR_SPEC_DISABLE:
if (state == ARM64_SSBD_FORCE_DISABLE)
return -EPERM;
task_set_spec_ssb_disable(task);
set_tsk_thread_flag(task, TIF_SSBD);
+ ssbd_ssbs_disable(task);
break;
case PR_SPEC_FORCE_DISABLE:
if (state == ARM64_SSBD_FORCE_DISABLE)
@@ -59,6 +77,7 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
task_set_spec_ssb_disable(task);
task_set_spec_ssb_force_disable(task);
set_tsk_thread_flag(task, TIF_SSBD);
+ ssbd_ssbs_disable(task);
break;
default:
return -ERANGE;
@@ -107,4 +126,3 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
return -ENODEV;
}
}
-#endif /* PR_SPEC_STORE_BYPASS */
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 70c283368b64..9405d1b7f4b0 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -48,6 +48,10 @@ void notrace __cpu_suspend_exit(void)
*/
cpu_uninstall_idmap();
+ /* Restore CnP bit in TTBR1_EL1 */
+ if (system_supports_cnp())
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+
/*
* PSTATE was not saved over suspend/resume, re-enable any detected
* features that might not have been set correctly.
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index a6109825eeb9..32653d156747 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -68,8 +68,8 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
*/
long compat_arm_syscall(struct pt_regs *regs)
{
- siginfo_t info;
unsigned int no = regs->regs[7];
+ void __user *addr;
switch (no) {
/*
@@ -112,13 +112,10 @@ long compat_arm_syscall(struct pt_regs *regs)
break;
}
- clear_siginfo(&info);
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)instruction_pointer(regs) -
- (compat_thumb_mode(regs) ? 2 : 4);
+ addr = (void __user *)instruction_pointer(regs) -
+ (compat_thumb_mode(regs) ? 2 : 4);
- arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no);
+ arm64_notify_die("Oops - bad compat syscall(2)", regs,
+ SIGILL, ILL_ILLTRP, addr, no);
return 0;
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 039e9ff379cc..5f4d9acb32f5 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -224,24 +224,19 @@ void die(const char *str, struct pt_regs *regs, int err)
do_exit(SIGSEGV);
}
-static bool show_unhandled_signals_ratelimited(void)
+static void arm64_show_signal(int signo, const char *str)
{
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- return show_unhandled_signals && __ratelimit(&rs);
-}
-
-void arm64_force_sig_info(struct siginfo *info, const char *str,
- struct task_struct *tsk)
-{
+ struct task_struct *tsk = current;
unsigned int esr = tsk->thread.fault_code;
struct pt_regs *regs = task_pt_regs(tsk);
- if (!unhandled_signal(tsk, info->si_signo))
- goto send_sig;
-
- if (!show_unhandled_signals_ratelimited())
- goto send_sig;
+ /* Leave if the signal won't be shown */
+ if (!show_unhandled_signals ||
+ !unhandled_signal(tsk, signo) ||
+ !__ratelimit(&rs))
+ return;
pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
if (esr)
@@ -251,19 +246,39 @@ void arm64_force_sig_info(struct siginfo *info, const char *str,
print_vma_addr(KERN_CONT " in ", regs->pc);
pr_cont("\n");
__show_regs(regs);
+}
-send_sig:
- force_sig_info(info->si_signo, info, tsk);
+void arm64_force_sig_fault(int signo, int code, void __user *addr,
+ const char *str)
+{
+ arm64_show_signal(signo, str);
+ force_sig_fault(signo, code, addr, current);
+}
+
+void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
+ const char *str)
+{
+ arm64_show_signal(SIGBUS, str);
+ force_sig_mceerr(code, addr, lsb, current);
+}
+
+void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr,
+ const char *str)
+{
+ arm64_show_signal(SIGTRAP, str);
+ force_sig_ptrace_errno_trap(errno, addr);
}
void arm64_notify_die(const char *str, struct pt_regs *regs,
- struct siginfo *info, int err)
+ int signo, int sicode, void __user *addr,
+ int err)
{
if (user_mode(regs)) {
WARN_ON(regs != current_pt_regs());
current->thread.fault_address = 0;
current->thread.fault_code = err;
- arm64_force_sig_info(info, str, current);
+
+ arm64_force_sig_fault(signo, sicode, addr, str);
} else {
die(str, regs, err);
}
@@ -310,10 +325,12 @@ static int call_undef_hook(struct pt_regs *regs)
int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
void __user *pc = (void __user *)instruction_pointer(regs);
- if (!user_mode(regs))
- return 1;
-
- if (compat_thumb_mode(regs)) {
+ if (!user_mode(regs)) {
+ __le32 instr_le;
+ if (probe_kernel_address((__force __le32 *)pc, instr_le))
+ goto exit;
+ instr = le32_to_cpu(instr_le);
+ } else if (compat_thumb_mode(regs)) {
/* 16-bit Thumb instruction */
__le16 instr_le;
if (get_user(instr_le, (__le16 __user *)pc))
@@ -348,11 +365,11 @@ exit:
void force_signal_inject(int signal, int code, unsigned long address)
{
- siginfo_t info;
const char *desc;
struct pt_regs *regs = current_pt_regs();
- clear_siginfo(&info);
+ if (WARN_ON(!user_mode(regs)))
+ return;
switch (signal) {
case SIGILL:
@@ -372,12 +389,7 @@ void force_signal_inject(int signal, int code, unsigned long address)
signal = SIGKILL;
}
- info.si_signo = signal;
- info.si_errno = 0;
- info.si_code = code;
- info.si_addr = (void __user *)address;
-
- arm64_notify_die(desc, regs, &info, 0);
+ arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0);
}
/*
@@ -406,14 +418,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0)
return;
+ BUG_ON(!user_mode(regs));
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
}
-void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
-{
- sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
-}
-
#define __user_cache_maint(insn, address, res) \
if (address >= user_addr_max()) { \
res = -EFAULT; \
@@ -437,7 +445,7 @@ void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{
unsigned long address;
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
int ret = 0;
@@ -472,7 +480,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
{
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
pt_regs_write_reg(regs, rt, val);
@@ -482,7 +490,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
{
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
@@ -490,12 +498,28 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
{
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
pt_regs_write_reg(regs, rt, arch_timer_get_rate());
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
+static void mrs_handler(unsigned int esr, struct pt_regs *regs)
+{
+ u32 sysreg, rt;
+
+ rt = ESR_ELx_SYS64_ISS_RT(esr);
+ sysreg = esr_sys64_to_sysreg(esr);
+
+ if (do_emulate_mrs(regs, sysreg, rt) != 0)
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+}
+
+static void wfi_handler(unsigned int esr, struct pt_regs *regs)
+{
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+}
+
struct sys64_hook {
unsigned int esr_mask;
unsigned int esr_val;
@@ -526,9 +550,176 @@ static struct sys64_hook sys64_hooks[] = {
.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
.handler = cntfrq_read_handler,
},
+ {
+ /* Trap read access to CPUID registers */
+ .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
+ .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
+ .handler = mrs_handler,
+ },
+ {
+ /* Trap WFI instructions executed in userspace */
+ .esr_mask = ESR_ELx_WFx_MASK,
+ .esr_val = ESR_ELx_WFx_WFI_VAL,
+ .handler = wfi_handler,
+ },
{},
};
+
+#ifdef CONFIG_COMPAT
+#define PSTATE_IT_1_0_SHIFT 25
+#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
+#define PSTATE_IT_7_2_SHIFT 10
+#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
+
+static u32 compat_get_it_state(struct pt_regs *regs)
+{
+ u32 it, pstate = regs->pstate;
+
+ it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
+ it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
+
+ return it;
+}
+
+static void compat_set_it_state(struct pt_regs *regs, u32 it)
+{
+ u32 pstate_it;
+
+ pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
+ pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
+
+ regs->pstate &= ~PSR_AA32_IT_MASK;
+ regs->pstate |= pstate_it;
+}
+
+static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
+{
+ int cond;
+
+ /* Only a T32 instruction can trap without CV being set */
+ if (!(esr & ESR_ELx_CV)) {
+ u32 it;
+
+ it = compat_get_it_state(regs);
+ if (!it)
+ return true;
+
+ cond = it >> 4;
+ } else {
+ cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
+ }
+
+ return aarch32_opcode_cond_checks[cond](regs->pstate);
+}
+
+static void advance_itstate(struct pt_regs *regs)
+{
+ u32 it;
+
+ /* ARM mode */
+ if (!(regs->pstate & PSR_AA32_T_BIT) ||
+ !(regs->pstate & PSR_AA32_IT_MASK))
+ return;
+
+ it = compat_get_it_state(regs);
+
+ /*
+ * If this is the last instruction of the block, wipe the IT
+ * state. Otherwise advance it.
+ */
+ if (!(it & 7))
+ it = 0;
+ else
+ it = (it & 0xe0) | ((it << 1) & 0x1f);
+
+ compat_set_it_state(regs, it);
+}
+
+static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
+ unsigned int sz)
+{
+ advance_itstate(regs);
+ arm64_skip_faulting_instruction(regs, sz);
+}
+
+static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+ int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
+
+ pt_regs_write_reg(regs, reg, arch_timer_get_rate());
+ arm64_compat_skip_faulting_instruction(regs, 4);
+}
+
+static struct sys64_hook cp15_32_hooks[] = {
+ {
+ .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
+ .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
+ .handler = compat_cntfrq_read_handler,
+ },
+ {},
+};
+
+static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+ int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
+ int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
+ u64 val = arch_counter_get_cntvct();
+
+ pt_regs_write_reg(regs, rt, lower_32_bits(val));
+ pt_regs_write_reg(regs, rt2, upper_32_bits(val));
+ arm64_compat_skip_faulting_instruction(regs, 4);
+}
+
+static struct sys64_hook cp15_64_hooks[] = {
+ {
+ .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
+ .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
+ .handler = compat_cntvct_read_handler,
+ },
+ {},
+};
+
+asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
+{
+ struct sys64_hook *hook, *hook_base;
+
+ if (!cp15_cond_valid(esr, regs)) {
+ /*
+ * There is no T16 variant of a CP access, so we
+ * always advance PC by 4 bytes.
+ */
+ arm64_compat_skip_faulting_instruction(regs, 4);
+ return;
+ }
+
+ switch (ESR_ELx_EC(esr)) {
+ case ESR_ELx_EC_CP15_32:
+ hook_base = cp15_32_hooks;
+ break;
+ case ESR_ELx_EC_CP15_64:
+ hook_base = cp15_64_hooks;
+ break;
+ default:
+ do_undefinstr(regs);
+ return;
+ }
+
+ for (hook = hook_base; hook->handler; hook++)
+ if ((hook->esr_mask & esr) == hook->esr_val) {
+ hook->handler(esr, regs);
+ return;
+ }
+
+ /*
+ * New cp15 instructions may previously have been undefined at
+ * EL0. Fall back to our usual undefined instruction handler
+ * so that we handle these consistently.
+ */
+ do_undefinstr(regs);
+}
+#endif
+
asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
{
struct sys64_hook *hook;
@@ -605,7 +796,6 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
handler[reason], smp_processor_id(), esr,
esr_get_class_string(esr));
- die("Oops - bad mode", regs, 0);
local_daif_mask();
panic("bad mode");
}
@@ -616,19 +806,13 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
*/
asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{
- siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
- clear_siginfo(&info);
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPC;
- info.si_addr = pc;
-
current->thread.fault_address = 0;
current->thread.fault_code = esr;
- arm64_force_sig_info(&info, "Bad EL0 synchronous exception", current);
+ arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
+ "Bad EL0 synchronous exception");
}
#ifdef CONFIG_VMAP_STACK
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 605d1b60469c..03b00007553d 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -138,6 +138,23 @@ SECTIONS
EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */
NOTES
+ . = ALIGN(PAGE_SIZE);
+ idmap_pg_dir = .;
+ . += IDMAP_DIR_SIZE;
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ tramp_pg_dir = .;
+ . += PAGE_SIZE;
+#endif
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ reserved_ttbr0 = .;
+ . += RESERVED_TTBR0_SIZE;
+#endif
+ swapper_pg_dir = .;
+ . += PAGE_SIZE;
+ swapper_pg_end = .;
+
. = ALIGN(SEGMENT_ALIGN);
__init_begin = .;
__inittext_begin = .;
@@ -166,7 +183,6 @@ SECTIONS
INIT_SETUP(16)
INIT_CALLS
CON_INITCALL
- SECURITY_INITCALL
INIT_RAM_FS
*(.init.rodata.* .init.bss) /* from the EFI stub */
}
@@ -216,21 +232,9 @@ SECTIONS
BSS_SECTION(0, 0, 0)
. = ALIGN(PAGE_SIZE);
- idmap_pg_dir = .;
- . += IDMAP_DIR_SIZE;
-
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- tramp_pg_dir = .;
- . += PAGE_SIZE;
-#endif
-
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- reserved_ttbr0 = .;
- . += RESERVED_TTBR0_SIZE;
-#endif
- swapper_pg_dir = .;
- . += SWAPPER_DIR_SIZE;
- swapper_pg_end = .;
+ init_pg_dir = .;
+ . += INIT_DIR_SIZE;
+ init_pg_end = .;
__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
_end = .;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 07256b08226c..dd436a50fce7 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+ u64 off = core_reg_offset_from_id(reg->id);
+ int size;
+
+ switch (off) {
+ case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+ KVM_REG_ARM_CORE_REG(regs.regs[30]):
+ case KVM_REG_ARM_CORE_REG(regs.sp):
+ case KVM_REG_ARM_CORE_REG(regs.pc):
+ case KVM_REG_ARM_CORE_REG(regs.pstate):
+ case KVM_REG_ARM_CORE_REG(sp_el1):
+ case KVM_REG_ARM_CORE_REG(elr_el1):
+ case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+ KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+ size = sizeof(__u64);
+ break;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+ KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+ size = sizeof(__uint128_t);
+ break;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+ size = sizeof(__u32);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (KVM_REG_SIZE(reg->id) == size &&
+ IS_ALIGNED(off, size / sizeof(__u32)))
+ return 0;
+
+ return -EINVAL;
+}
+
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/*
@@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
+ if (validate_core_offset(reg))
+ return -EINVAL;
+
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
return -EFAULT;
@@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
+ if (validate_core_offset(reg))
+ return -EINVAL;
+
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
return -EINVAL;
@@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
- u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
+ u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) {
case PSR_AA32_MODE_USR:
+ if (!system_supports_32bit_el0())
+ return -EINVAL;
+ break;
case PSR_AA32_MODE_FIQ:
case PSR_AA32_MODE_IRQ:
case PSR_AA32_MODE_SVC:
case PSR_AA32_MODE_ABT:
case PSR_AA32_MODE_UND:
+ if (!vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
+ break;
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
+ if (vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
break;
default:
err = -EINVAL;
@@ -338,15 +391,15 @@ int __attribute_const__ kvm_target_cpu(void)
return KVM_ARM_TARGET_CORTEX_A53;
case ARM_CPU_PART_CORTEX_A57:
return KVM_ARM_TARGET_CORTEX_A57;
- };
+ }
break;
case ARM_CPU_IMP_APM:
switch (part_number) {
case APM_CPU_PART_POTENZA:
return KVM_ARM_TARGET_XGENE_POTENZA;
- };
+ }
break;
- };
+ }
/* Return a default generic target */
return KVM_ARM_TARGET_GENERIC_V8;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index e5e741bfffe1..35a81bebd02b 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -284,6 +284,13 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
*/
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
return 0;
+ case ARM_EXCEPTION_IL:
+ /*
+ * We attempted an illegal exception return. Guest state must
+ * have been corrupted somehow. Give up.
+ */
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ return -EINVAL;
default:
kvm_pr_unimpl("Unsupported exception type: %d",
exception_index);
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index ea9225160786..4576b86a5579 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -65,6 +65,9 @@ __do_hyp_init:
b.lo __kvm_handle_stub_hvc
phys_to_ttbr x4, x0
+alternative_if ARM64_HAS_CNP
+ orr x4, x4, #TTBR_CNP_BIT
+alternative_else_nop_endif
msr ttbr0_el2, x4
mrs x4, tcr_el1
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 2fabc2dc1966..82d1904328ad 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_KVM_ARM_HOST) += switch.o
obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
-obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
# KVM code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 24b4fbafe3e4..b1f14f736962 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -162,6 +162,20 @@ el1_error:
mov x0, #ARM_EXCEPTION_EL1_SERROR
b __guest_exit
+el2_sync:
+ /* Check for illegal exception return, otherwise panic */
+ mrs x0, spsr_el2
+
+ /* if this was something else, then panic! */
+ tst x0, #PSR_IL_BIT
+ b.eq __hyp_panic
+
+ /* Let's attempt a recovery from the illegal exception return */
+ get_vcpu_ptr x1, x0
+ mov x0, #ARM_EXCEPTION_IL
+ b __guest_exit
+
+
el2_error:
ldp x0, x1, [sp], #16
@@ -240,7 +254,7 @@ ENTRY(__kvm_hyp_vector)
invalid_vect el2t_fiq_invalid // FIQ EL2t
invalid_vect el2t_error_invalid // Error EL2t
- invalid_vect el2h_sync_invalid // Synchronous EL2h
+ valid_vect el2_sync // Synchronous EL2h
invalid_vect el2h_irq_invalid // IRQ EL2h
invalid_vect el2h_fiq_invalid // FIQ EL2h
valid_vect el2_error // Error EL2h
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
deleted file mode 100644
index 603e1ee83e89..000000000000
--- a/arch/arm64/kvm/hyp/s2-setup.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 2016 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/types.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_hyp.h>
-
-u32 __hyp_text __init_stage2_translation(void)
-{
- u64 val = VTCR_EL2_FLAGS;
- u64 parange;
- u64 tmp;
-
- /*
- * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS
- * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
- * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
- */
- parange = read_sysreg(id_aa64mmfr0_el1) & 7;
- if (parange > ID_AA64MMFR0_PARANGE_MAX)
- parange = ID_AA64MMFR0_PARANGE_MAX;
- val |= parange << 16;
-
- /* Compute the actual PARange... */
- switch (parange) {
- case 0:
- parange = 32;
- break;
- case 1:
- parange = 36;
- break;
- case 2:
- parange = 40;
- break;
- case 3:
- parange = 42;
- break;
- case 4:
- parange = 44;
- break;
- case 5:
- default:
- parange = 48;
- break;
- }
-
- /*
- * ... and clamp it to 40 bits, unless we have some braindead
- * HW that implements less than that. In all cases, we'll
- * return that value for the rest of the kernel to decide what
- * to do.
- */
- val |= 64 - (parange > 40 ? 40 : parange);
-
- /*
- * Check the availability of Hardware Access Flag / Dirty Bit
- * Management in ID_AA64MMFR1_EL1 and enable the feature in VTCR_EL2.
- */
- tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_HADBS_SHIFT) & 0xf;
- if (tmp)
- val |= VTCR_EL2_HA;
-
- /*
- * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
- * bit in VTCR_EL2.
- */
- tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf;
- val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ?
- VTCR_EL2_VS_16BIT :
- VTCR_EL2_VS_8BIT;
-
- write_sysreg(val, vtcr_el2);
-
- return parange;
-}
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ca46153d7915..7cc175c88a37 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -198,7 +198,7 @@ void deactivate_traps_vhe_put(void)
static void __hyp_text __activate_vm(struct kvm *kvm)
{
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ __load_guest_stage2(kvm);
}
static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
@@ -263,7 +263,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
return false; /* Translation failed, back to guest */
/* Convert PAR to HPFAR format */
- *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
+ *hpfar = PAR_TO_HPFAR(tmp);
return true;
}
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 9ce223944983..68d6f7c3b237 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -152,8 +152,25 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
static void __hyp_text
__sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
{
+ u64 pstate = ctxt->gp_regs.regs.pstate;
+ u64 mode = pstate & PSR_AA32_MODE_MASK;
+
+ /*
+ * Safety check to ensure we're setting the CPU up to enter the guest
+ * in a less privileged mode.
+ *
+ * If we are attempting a return to EL2 or higher in AArch64 state,
+ * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
+ * we'll take an illegal exception state exception immediately after
+ * the ERET to the guest. Attempts to return to AArch32 Hyp will
+ * result in an illegal exception return because EL2's execution state
+ * is determined by SCR_EL3.RW.
+ */
+ if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
+ pstate = PSR_MODE_EL2h | PSR_IL_BIT;
+
write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
- write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+ write_sysreg_el2(pstate, spsr);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
@@ -288,3 +305,14 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
vcpu->arch.sysregs_loaded_on_cpu = false;
}
+
+void __hyp_text __kvm_enable_ssbs(void)
+{
+ u64 tmp;
+
+ asm volatile(
+ "mrs %0, sctlr_el2\n"
+ "orr %0, %0, %1\n"
+ "msr sctlr_el2, %0"
+ : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
+}
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 131c7772703c..4dbd9c69a96d 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -30,7 +30,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
* let's flip TGE before executing the TLB operation.
*/
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ __load_guest_stage2(kvm);
val = read_sysreg(hcr_el2);
val &= ~HCR_TGE;
write_sysreg(val, hcr_el2);
@@ -39,7 +39,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
{
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ __load_guest_stage2(kvm);
isb();
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index e37c78bbe1ca..b72a3dd56204 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -26,6 +26,7 @@
#include <kvm/arm_arch_timer.h>
+#include <asm/cpufeature.h>
#include <asm/cputype.h>
#include <asm/ptrace.h>
#include <asm/kvm_arm.h>
@@ -33,6 +34,9 @@
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
+/* Maximum phys_shift supported for any VM on this host */
+static u32 kvm_ipa_limit;
+
/*
* ARMv8 Reset Values
*/
@@ -55,12 +59,12 @@ static bool cpu_has_32bit_el1(void)
}
/**
- * kvm_arch_dev_ioctl_check_extension
+ * kvm_arch_vm_ioctl_check_extension
*
* We currently assume that the number of HW registers is uniform
* across all CPUs (see cpuinfo_sanity_check).
*/
-int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
+int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r;
@@ -82,9 +86,11 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_SET_GUEST_DEBUG:
case KVM_CAP_VCPU_ATTRIBUTES:
- case KVM_CAP_VCPU_EVENTS:
r = 1;
break;
+ case KVM_CAP_ARM_VM_IPA_SIZE:
+ r = kvm_ipa_limit;
+ break;
default:
r = 0;
}
@@ -133,3 +139,99 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset timer */
return kvm_timer_vcpu_reset(vcpu);
}
+
+void kvm_set_ipa_limit(void)
+{
+ unsigned int ipa_max, pa_max, va_max, parange;
+
+ parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7;
+ pa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
+
+ /* Clamp the IPA limit to the PA size supported by the kernel */
+ ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max;
+ /*
+ * Since our stage2 table is dependent on the stage1 page table code,
+ * we must always honor the following condition:
+ *
+ * Number of levels in Stage1 >= Number of levels in Stage2.
+ *
+ * So clamp the ipa limit further down to limit the number of levels.
+ * Since we can concatenate upto 16 tables at entry level, we could
+ * go upto 4bits above the maximum VA addressible with the current
+ * number of levels.
+ */
+ va_max = PGDIR_SHIFT + PAGE_SHIFT - 3;
+ va_max += 4;
+
+ if (va_max < ipa_max)
+ ipa_max = va_max;
+
+ /*
+ * If the final limit is lower than the real physical address
+ * limit of the CPUs, report the reason.
+ */
+ if (ipa_max < pa_max)
+ pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n",
+ (va_max < pa_max) ? "Virtual" : "Physical");
+
+ WARN(ipa_max < KVM_PHYS_SHIFT,
+ "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max);
+ kvm_ipa_limit = ipa_max;
+ kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit);
+}
+
+/*
+ * Configure the VTCR_EL2 for this VM. The VTCR value is common
+ * across all the physical CPUs on the system. We use system wide
+ * sanitised values to fill in different fields, except for Hardware
+ * Management of Access Flags. HA Flag is set unconditionally on
+ * all CPUs, as it is safe to run with or without the feature and
+ * the bit is RES0 on CPUs that don't support it.
+ */
+int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
+{
+ u64 vtcr = VTCR_EL2_FLAGS;
+ u32 parange, phys_shift;
+ u8 lvls;
+
+ if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+ return -EINVAL;
+
+ phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
+ if (phys_shift) {
+ if (phys_shift > kvm_ipa_limit ||
+ phys_shift < 32)
+ return -EINVAL;
+ } else {
+ phys_shift = KVM_PHYS_SHIFT;
+ }
+
+ parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7;
+ if (parange > ID_AA64MMFR0_PARANGE_MAX)
+ parange = ID_AA64MMFR0_PARANGE_MAX;
+ vtcr |= parange << VTCR_EL2_PS_SHIFT;
+
+ vtcr |= VTCR_EL2_T0SZ(phys_shift);
+ /*
+ * Use a minimum 2 level page table to prevent splitting
+ * host PMD huge pages at stage2.
+ */
+ lvls = stage2_pgtable_levels(phys_shift);
+ if (lvls < 2)
+ lvls = 2;
+ vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
+
+ /*
+ * Enable the Hardware Access Flag management, unconditionally
+ * on all CPUs. The features is RES0 on CPUs without the support
+ * and must be ignored by the CPUs.
+ */
+ vtcr |= VTCR_EL2_HA;
+
+ /* Set the vmid bits */
+ vtcr |= (kvm_get_vmid_bits() == 16) ?
+ VTCR_EL2_VS_16BIT :
+ VTCR_EL2_VS_8BIT;
+ kvm->arch.vtcr = vtcr;
+ return 0;
+}
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 68755fd70dcf..69ff9887f724 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := clear_user.o delay.o copy_from_user.o \
# when supported by the CPU. Result and argument registers are handled
# correctly, based on the function prototype.
lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
-CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
+CFLAGS_atomic_ll_sc.o := -ffixed-x1 -ffixed-x2 \
-ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \
-ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \
-fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
@@ -25,3 +25,5 @@ KCOV_INSTRUMENT_atomic_ll_sc.o := n
UBSAN_SANITIZE_atomic_ll_sc.o := n
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
+
+obj-$(CONFIG_CRC32) += crc32.o
diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S
new file mode 100644
index 000000000000..5bc1e85b4e1c
--- /dev/null
+++ b/arch/arm64/lib/crc32.S
@@ -0,0 +1,60 @@
+/*
+ * Accelerated CRC32(C) using AArch64 CRC instructions
+ *
+ * Copyright (C) 2016 - 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/alternative.h>
+#include <asm/assembler.h>
+
+ .cpu generic+crc
+
+ .macro __crc32, c
+0: subs x2, x2, #16
+ b.mi 8f
+ ldp x3, x4, [x1], #16
+CPU_BE( rev x3, x3 )
+CPU_BE( rev x4, x4 )
+ crc32\c\()x w0, w0, x3
+ crc32\c\()x w0, w0, x4
+ b.ne 0b
+ ret
+
+8: tbz x2, #3, 4f
+ ldr x3, [x1], #8
+CPU_BE( rev x3, x3 )
+ crc32\c\()x w0, w0, x3
+4: tbz x2, #2, 2f
+ ldr w3, [x1], #4
+CPU_BE( rev w3, w3 )
+ crc32\c\()w w0, w0, w3
+2: tbz x2, #1, 1f
+ ldrh w3, [x1], #2
+CPU_BE( rev16 w3, w3 )
+ crc32\c\()h w0, w0, w3
+1: tbz x2, #0, 0f
+ ldrb w3, [x1]
+ crc32\c\()b w0, w0, w3
+0: ret
+ .endm
+
+ .align 5
+ENTRY(crc32_le)
+alternative_if_not ARM64_HAS_CRC32
+ b crc32_le_base
+alternative_else_nop_endif
+ __crc32
+ENDPROC(crc32_le)
+
+ .align 5
+ENTRY(__crc32c_le)
+alternative_if_not ARM64_HAS_CRC32
+ b __crc32c_le_base
+alternative_else_nop_endif
+ __crc32 c
+ENDPROC(__crc32c_le)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index c127f94da8e2..1f0ea2facf24 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -88,7 +88,7 @@ void verify_cpu_asid_bits(void)
}
}
-static void flush_context(unsigned int cpu)
+static void flush_context(void)
{
int i;
u64 asid;
@@ -142,7 +142,7 @@ static bool check_update_reserved_asid(u64 asid, u64 newasid)
return hit;
}
-static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+static u64 new_context(struct mm_struct *mm)
{
static u32 cur_idx = 1;
u64 asid = atomic64_read(&mm->context.id);
@@ -180,7 +180,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
/* We're out of ASIDs, so increment the global generation count */
generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
&asid_generation);
- flush_context(cpu);
+ flush_context();
/* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
@@ -196,6 +196,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
unsigned long flags;
u64 asid, old_active_asid;
+ if (system_supports_cnp())
+ cpu_set_reserved_ttbr0();
+
asid = atomic64_read(&mm->context.id);
/*
@@ -223,7 +226,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
/* Check that our ASID belongs to the current generation. */
asid = atomic64_read(&mm->context.id);
if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
- asid = new_context(mm, cpu);
+ asid = new_context(mm);
atomic64_set(&mm->context.id, asid);
}
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 65dfc8571bf8..fcb1f2a6d7c6 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -36,8 +36,8 @@ static const struct addr_marker address_markers[] = {
#endif
{ MODULES_VADDR, "Modules start" },
{ MODULES_END, "Modules end" },
- { VMALLOC_START, "vmalloc() Area" },
- { VMALLOC_END, "vmalloc() End" },
+ { VMALLOC_START, "vmalloc() area" },
+ { VMALLOC_END, "vmalloc() end" },
{ FIXADDR_START, "Fixmap start" },
{ FIXADDR_TOP, "Fixmap end" },
{ PCI_IO_START, "PCI I/O start" },
@@ -46,7 +46,7 @@ static const struct addr_marker address_markers[] = {
{ VMEMMAP_START, "vmemmap start" },
{ VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
#endif
- { PAGE_OFFSET, "Linear Mapping" },
+ { PAGE_OFFSET, "Linear mapping" },
{ -1, NULL },
};
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 50b30ff30de4..7d9571f4ae3d 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -37,6 +37,7 @@
#include <asm/cmpxchg.h>
#include <asm/cpufeature.h>
#include <asm/exception.h>
+#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/sysreg.h>
@@ -56,10 +57,16 @@ struct fault_info {
};
static const struct fault_info fault_info[];
+static struct fault_info debug_fault_info[];
static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
{
- return fault_info + (esr & 63);
+ return fault_info + (esr & ESR_ELx_FSC);
+}
+
+static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr)
+{
+ return debug_fault_info + DBG_ESR_EVT(esr);
}
#ifdef CONFIG_KPROBES
@@ -235,9 +242,8 @@ static bool is_el1_instruction_abort(unsigned int esr)
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
-static inline bool is_el1_permission_fault(unsigned int esr,
- struct pt_regs *regs,
- unsigned long addr)
+static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
{
unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
@@ -283,7 +289,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
return;
- if (is_el1_permission_fault(esr, regs, addr)) {
+ if (is_el1_permission_fault(addr, esr, regs)) {
if (esr & ESR_ELx_WNR)
msg = "write to read-only memory";
else
@@ -297,9 +303,9 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
die_kernel_fault(msg, addr, esr, regs);
}
-static void __do_user_fault(struct siginfo *info, unsigned int esr)
+static void set_thread_esr(unsigned long address, unsigned int esr)
{
- current->thread.fault_address = (unsigned long)info->si_addr;
+ current->thread.fault_address = address;
/*
* If the faulting address is in the kernel, we must sanitize the ESR.
@@ -352,7 +358,6 @@ static void __do_user_fault(struct siginfo *info, unsigned int esr)
}
current->thread.fault_code = esr;
- arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current);
}
static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
@@ -363,14 +368,10 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
*/
if (user_mode(regs)) {
const struct fault_info *inf = esr_to_fault_info(esr);
- struct siginfo si;
-
- clear_siginfo(&si);
- si.si_signo = inf->sig;
- si.si_code = inf->code;
- si.si_addr = (void __user *)addr;
- __do_user_fault(&si, esr);
+ set_thread_esr(addr, esr);
+ arm64_force_sig_fault(inf->sig, inf->code, (void __user *)addr,
+ inf->name);
} else {
__do_kernel_fault(addr, esr, regs);
}
@@ -424,9 +425,9 @@ static bool is_el0_instruction_abort(unsigned int esr)
static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
+ const struct fault_info *inf;
struct task_struct *tsk;
struct mm_struct *mm;
- struct siginfo si;
vm_fault_t fault, major = 0;
unsigned long vm_flags = VM_READ | VM_WRITE;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -454,7 +455,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (addr < TASK_SIZE && is_el1_permission_fault(esr, regs, addr)) {
+ if (addr < TASK_SIZE && is_el1_permission_fault(addr, esr, regs)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die_kernel_fault("access to user memory with fs=KERNEL_DS",
@@ -562,37 +563,35 @@ retry:
return 0;
}
- clear_siginfo(&si);
- si.si_addr = (void __user *)addr;
-
+ inf = esr_to_fault_info(esr);
+ set_thread_esr(addr, esr);
if (fault & VM_FAULT_SIGBUS) {
/*
* We had some memory, but were unable to successfully fix up
* this page fault.
*/
- si.si_signo = SIGBUS;
- si.si_code = BUS_ADRERR;
- } else if (fault & VM_FAULT_HWPOISON_LARGE) {
- unsigned int hindex = VM_FAULT_GET_HINDEX(fault);
-
- si.si_signo = SIGBUS;
- si.si_code = BUS_MCEERR_AR;
- si.si_addr_lsb = hstate_index_to_shift(hindex);
- } else if (fault & VM_FAULT_HWPOISON) {
- si.si_signo = SIGBUS;
- si.si_code = BUS_MCEERR_AR;
- si.si_addr_lsb = PAGE_SHIFT;
+ arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr,
+ inf->name);
+ } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) {
+ unsigned int lsb;
+
+ lsb = PAGE_SHIFT;
+ if (fault & VM_FAULT_HWPOISON_LARGE)
+ lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
+
+ arm64_force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr, lsb,
+ inf->name);
} else {
/*
* Something tried to access memory that isn't in our memory
* map.
*/
- si.si_signo = SIGSEGV;
- si.si_code = fault == VM_FAULT_BADACCESS ?
- SEGV_ACCERR : SEGV_MAPERR;
+ arm64_force_sig_fault(SIGSEGV,
+ fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR,
+ (void __user *)addr,
+ inf->name);
}
- __do_user_fault(&si, esr);
return 0;
no_context:
@@ -625,8 +624,8 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
- struct siginfo info;
const struct fault_info *inf;
+ void __user *siaddr;
inf = esr_to_fault_info(esr);
@@ -645,15 +644,11 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
nmi_exit();
}
- clear_siginfo(&info);
- info.si_signo = inf->sig;
- info.si_errno = 0;
- info.si_code = inf->code;
if (esr & ESR_ELx_FnV)
- info.si_addr = NULL;
+ siaddr = NULL;
else
- info.si_addr = (void __user *)addr;
- arm64_notify_die(inf->name, regs, &info, esr);
+ siaddr = (void __user *)addr;
+ arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
return 0;
}
@@ -734,7 +729,6 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_fault_info(esr);
- struct siginfo info;
if (!inf->fn(addr, esr, regs))
return;
@@ -745,12 +739,8 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
show_pte(addr);
}
- clear_siginfo(&info);
- info.si_signo = inf->sig;
- info.si_errno = 0;
- info.si_code = inf->code;
- info.si_addr = (void __user *)addr;
- arm64_notify_die(inf->name, regs, &info, esr);
+ arm64_notify_die(inf->name, regs,
+ inf->sig, inf->code, (void __user *)addr, esr);
}
asmlinkage void __exception do_el0_irq_bp_hardening(void)
@@ -771,7 +761,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
if (addr > TASK_SIZE)
arm64_apply_bp_hardening();
- local_irq_enable();
+ local_daif_restore(DAIF_PROCCTX);
do_mem_abort(addr, esr, regs);
}
@@ -780,20 +770,14 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
{
- struct siginfo info;
-
if (user_mode(regs)) {
if (instruction_pointer(regs) > TASK_SIZE)
arm64_apply_bp_hardening();
- local_irq_enable();
+ local_daif_restore(DAIF_PROCCTX);
}
- clear_siginfo(&info);
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRALN;
- info.si_addr = (void __user *)addr;
- arm64_notify_die("SP/PC alignment exception", regs, &info, esr);
+ arm64_notify_die("SP/PC alignment exception", regs,
+ SIGBUS, BUS_ADRALN, (void __user *)addr, esr);
}
int __init early_brk64(unsigned long addr, unsigned int esr,
@@ -831,7 +815,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
{
- const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
+ const struct fault_info *inf = esr_to_debug_fault_info(esr);
int rv;
/*
@@ -847,14 +831,8 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
if (!inf->fn(addr, esr, regs)) {
rv = 1;
} else {
- struct siginfo info;
-
- clear_siginfo(&info);
- info.si_signo = inf->sig;
- info.si_errno = 0;
- info.si_code = inf->code;
- info.si_addr = (void __user *)addr;
- arm64_notify_die(inf->name, regs, &info, esr);
+ arm64_notify_die(inf->name, regs,
+ inf->sig, inf->code, (void __user *)addr, esr);
rv = 0;
}
@@ -864,17 +842,3 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
return rv;
}
NOKPROBE_SYMBOL(do_debug_exception);
-
-#ifdef CONFIG_ARM64_PAN
-void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
-{
- /*
- * We modify PSTATE. This won't work from irq context as the PSTATE
- * is discarded once we return from the exception.
- */
- WARN_ON_ONCE(in_interrupt());
-
- sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
- asm(SET_PSTATE_PAN(1));
-}
-#endif /* CONFIG_ARM64_PAN */
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 192b3ba07075..f58ea503ad01 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
/*
* If HW_AFDBM is enabled, then the HW could turn on
- * the dirty bit for any page in the set, so check
- * them all. All hugetlb entries are already young.
+ * the dirty or accessed bit for any page in the set,
+ * so check them all.
*/
if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte);
+
+ if (pte_young(pte))
+ orig_pte = pte_mkyoung(orig_pte);
}
if (valid) {
@@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
}
+/*
+ * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
+ * and write permission.
+ *
+ * For a contiguous huge pte range we need to check whether or not write
+ * permission has to change only on the first pte in the set. Then for
+ * all the contiguous ptes we need to check whether or not there is a
+ * discrepancy between dirty or young.
+ */
+static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
+{
+ int i;
+
+ if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
+ return 1;
+
+ for (i = 0; i < ncontig; i++) {
+ pte_t orig_pte = huge_ptep_get(ptep + i);
+
+ if (pte_dirty(pte) != pte_dirty(orig_pte))
+ return 1;
+
+ if (pte_young(pte) != pte_young(orig_pte))
+ return 1;
+ }
+
+ return 0;
+}
+
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
- int ncontig, i, changed = 0;
+ int ncontig, i;
size_t pgsize = 0;
unsigned long pfn = pte_pfn(pte), dpfn;
pgprot_t hugeprot;
@@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
dpfn = pgsize >> PAGE_SHIFT;
+ if (!__cont_access_flags_changed(ptep, pte, ncontig))
+ return 0;
+
orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
- if (!pte_same(orig_pte, pte))
- changed = 1;
- /* Make sure we don't lose the dirty state */
+ /* Make sure we don't lose the dirty or young state */
if (pte_dirty(orig_pte))
pte = pte_mkdirty(pte);
+ if (pte_young(orig_pte))
+ pte = pte_mkyoung(pte);
+
hugeprot = pte_pgprot(pte);
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
- return changed;
+ return 1;
}
void huge_ptep_set_wrprotect(struct mm_struct *mm,
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 787e27964ab9..3cf87341859f 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -284,7 +284,6 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
#endif /* CONFIG_NUMA */
-#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
phys_addr_t addr = pfn << PAGE_SHIFT;
@@ -294,7 +293,6 @@ int pfn_valid(unsigned long pfn)
return memblock_is_map_memory(addr);
}
EXPORT_SYMBOL(pfn_valid);
-#endif
#ifndef CONFIG_SPARSEMEM
static void __init arm64_memory_present(void)
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 12145874c02b..fccb1a6f8c6f 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -192,7 +192,7 @@ void __init kasan_init(void)
/*
* We are going to perform proper setup of shadow memory.
- * At first we should unmap early shadow (clear_pgds() call bellow).
+ * At first we should unmap early shadow (clear_pgds() call below).
* However, instrumented code couldn't execute without shadow memory.
* tmp_pg_dir used to keep early shadow mapped until full shadow
* setup will be finished.
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 8080c9f489c3..9498c15b847b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -67,6 +67,24 @@ static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
+static DEFINE_SPINLOCK(swapper_pgdir_lock);
+
+void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+ pgd_t *fixmap_pgdp;
+
+ spin_lock(&swapper_pgdir_lock);
+ fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
+ WRITE_ONCE(*fixmap_pgdp, pgd);
+ /*
+ * We need dsb(ishst) here to ensure the page-table-walker sees
+ * our new entry before set_p?d() returns. The fixmap's
+ * flush_tlb_kernel_range() via clear_fixmap() does this for us.
+ */
+ pgd_clear_fixmap();
+ spin_unlock(&swapper_pgdir_lock);
+}
+
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -629,34 +647,18 @@ static void __init map_kernel(pgd_t *pgdp)
*/
void __init paging_init(void)
{
- phys_addr_t pgd_phys = early_pgtable_alloc();
- pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
+ pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
map_kernel(pgdp);
map_mem(pgdp);
- /*
- * We want to reuse the original swapper_pg_dir so we don't have to
- * communicate the new address to non-coherent secondaries in
- * secondary_entry, and so cpu_switch_mm can generate the address with
- * adrp+add rather than a load from some global variable.
- *
- * To do this we need to go via a temporary pgd.
- */
- cpu_replace_ttbr1(__va(pgd_phys));
- memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
- cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
-
pgd_clear_fixmap();
- memblock_free(pgd_phys, PAGE_SIZE);
- /*
- * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
- * allocated with it.
- */
- memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
- __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir)
- - PAGE_SIZE);
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+ init_mm.pgd = swapper_pg_dir;
+
+ memblock_free(__pa_symbol(init_pg_dir),
+ __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
}
/*
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 146c04ceaa51..d7b66fc5e1c5 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -391,7 +391,6 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(numa_nodes_parsed);
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
- numa_free_distance();
ret = numa_alloc_distance();
if (ret < 0)
@@ -399,20 +398,24 @@ static int __init numa_init(int (*init_func)(void))
ret = init_func();
if (ret < 0)
- return ret;
+ goto out_free_distance;
if (nodes_empty(numa_nodes_parsed)) {
pr_info("No NUMA configuration found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_free_distance;
}
ret = numa_register_nodes();
if (ret < 0)
- return ret;
+ goto out_free_distance;
setup_node_to_cpumask_map();
return 0;
+out_free_distance:
+ numa_free_distance();
+ return ret;
}
/**
@@ -432,7 +435,7 @@ static int __init dummy_numa_init(void)
if (numa_off)
pr_info("NUMA disabled\n"); /* Forced off on command line. */
pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n",
- 0LLU, PFN_PHYS(max_pfn) - 1);
+ memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1);
for_each_memblock(memory, mblk) {
ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 03646e6a2ef4..2c75b0b903ae 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -160,6 +160,12 @@ ENTRY(cpu_do_switch_mm)
mrs x2, ttbr1_el1
mmid x1, x1 // get mm->context.id
phys_to_ttbr x3, x0
+
+alternative_if ARM64_HAS_CNP
+ cbz x1, 1f // skip CNP for reserved ASID
+ orr x3, x3, #TTBR_CNP_BIT
+1:
+alternative_else_nop_endif
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
bfi x3, x1, #48, #16 // set the ASID field in TTBR0
#endif
@@ -184,7 +190,7 @@ ENDPROC(cpu_do_switch_mm)
.endm
/*
- * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
+ * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
*
* This is the low-level counterpart to cpu_replace_ttbr1, and should not be
* called by anything else. It can only be executed from a TTBR0 mapping.
@@ -194,8 +200,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
__idmap_cpu_set_reserved_ttbr1 x1, x3
- phys_to_ttbr x3, x0
- msr ttbr1_el1, x3
+ msr ttbr1_el1, x0
isb
restore_daif x2
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index a641b0bf1611..f65a084607fd 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -9,7 +9,7 @@ config C6X
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select CLKDEV_LOOKUP
- select DMA_NONCOHERENT_OPS
+ select DMA_DIRECT_OPS
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select HAVE_ARCH_TRACEHOOK
diff --git a/arch/c6x/include/uapi/asm/unistd.h b/arch/c6x/include/uapi/asm/unistd.h
index 0d2daf7f9809..6b2fe792de9d 100644
--- a/arch/c6x/include/uapi/asm/unistd.h
+++ b/arch/c6x/include/uapi/asm/unistd.h
@@ -16,6 +16,7 @@
*/
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_CLONE
/* Use the standard ABI for syscalls. */
diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h
index 7dd20ef7625a..628195823816 100644
--- a/arch/h8300/include/uapi/asm/unistd.h
+++ b/arch/h8300/include/uapi/asm/unistd.h
@@ -1,5 +1,6 @@
#define __ARCH_NOMMU
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
#include <asm-generic/unistd.h>
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
index 35716a3048de..49f716c0a1df 100644
--- a/arch/h8300/kernel/vmlinux.lds.S
+++ b/arch/h8300/kernel/vmlinux.lds.S
@@ -56,7 +56,6 @@ SECTIONS
__init_begin = .;
INIT_TEXT_SECTION(4)
INIT_DATA_SECTION(4)
- SECURITY_INIT
__init_end = .;
_edata = . ;
_begin_data = LOADADDR(.data);
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 89a4b22f34d9..3ef46522e89f 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -4,6 +4,7 @@ comment "Linux Kernel Configuration for Hexagon"
config HEXAGON
def_bool y
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_PREEMPT
select HAVE_OPROFILE
# Other pending projects/to-do items.
@@ -29,6 +30,7 @@ config HEXAGON
select GENERIC_CLOCKEVENTS_BROADCAST
select MODULES_USE_ELF_RELA
select GENERIC_CPU_DEVICES
+ select DMA_DIRECT_OPS
---help---
Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications.
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index dd2fd9c0d292..47c4da3d64a4 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += compat.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += extable.h
generic-y += fb.h
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
deleted file mode 100644
index 263f6acbfb0f..000000000000
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * DMA operations for the Hexagon architecture
- *
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#ifndef _ASM_DMA_MAPPING_H
-#define _ASM_DMA_MAPPING_H
-
-#include <linux/types.h>
-#include <linux/cache.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-#include <asm/io.h>
-
-struct device;
-
-extern const struct dma_map_ops *dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return dma_ops;
-}
-
-#endif
diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h
index ea181e79162e..c91ca7d02461 100644
--- a/arch/hexagon/include/uapi/asm/unistd.h
+++ b/arch/hexagon/include/uapi/asm/unistd.h
@@ -29,6 +29,7 @@
#define sys_mmap2 sys_mmap_pgoff
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index 7ebe7ad19d15..706699374444 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -18,32 +18,19 @@
* 02110-1301, USA.
*/
-#include <linux/dma-mapping.h>
-#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <linux/bootmem.h>
#include <linux/genalloc.h>
-#include <asm/dma-mapping.h>
#include <linux/module.h>
#include <asm/page.h>
-#define HEXAGON_MAPPING_ERROR 0
-
-const struct dma_map_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
-static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
-{
- return phys_to_virt((unsigned long) dma_addr);
-}
-
static struct gen_pool *coherent_pool;
/* Allocates from a pool of uncached memory that was reserved at boot time */
-static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag,
- unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_addr,
+ gfp_t flag, unsigned long attrs)
{
void *ret;
@@ -75,58 +62,17 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
return ret;
}
-static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, unsigned long attrs)
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, unsigned long attrs)
{
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
}
-static int check_addr(const char *name, struct device *hwdev,
- dma_addr_t bus, size_t size)
-{
- if (hwdev && hwdev->dma_mask && !dma_capable(hwdev, bus, size)) {
- if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
- printk(KERN_ERR
- "%s: overflow %Lx+%zu of device mask %Lx\n",
- name, (long long)bus, size,
- (long long)*hwdev->dma_mask);
- return 0;
- }
- return 1;
-}
-
-static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- struct scatterlist *s;
- int i;
-
- WARN_ON(nents == 0 || sg[0].length == 0);
-
- for_each_sg(sg, s, nents, i) {
- s->dma_address = sg_phys(s);
- if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
- return 0;
-
- s->dma_length = s->length;
+ void *addr = phys_to_virt(paddr);
- if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
- continue;
-
- flush_dcache_range(dma_addr_to_virt(s->dma_address),
- dma_addr_to_virt(s->dma_address + s->length));
- }
-
- return nents;
-}
-
-/*
- * address is virtual
- */
-static inline void dma_sync(void *addr, size_t size,
- enum dma_data_direction dir)
-{
switch (dir) {
case DMA_TO_DEVICE:
hexagon_clean_dcache_range((unsigned long) addr,
@@ -144,76 +90,3 @@ static inline void dma_sync(void *addr, size_t size,
BUG();
}
}
-
-/**
- * hexagon_map_page() - maps an address for device DMA
- * @dev: pointer to DMA device
- * @page: pointer to page struct of DMA memory
- * @offset: offset within page
- * @size: size of memory to map
- * @dir: transfer direction
- * @attrs: pointer to DMA attrs (not used)
- *
- * Called to map a memory address to a DMA address prior
- * to accesses to/from device.
- *
- * We don't particularly have many hoops to jump through
- * so far. Straight translation between phys and virtual.
- *
- * DMA is not cache coherent so sync is necessary; this
- * seems to be a convenient place to do it.
- *
- */
-static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t bus = page_to_phys(page) + offset;
- WARN_ON(size == 0);
-
- if (!check_addr("map_single", dev, bus, size))
- return HEXAGON_MAPPING_ERROR;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_sync(dma_addr_to_virt(bus), size, dir);
-
- return bus;
-}
-
-static void hexagon_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir)
-{
- dma_sync(dma_addr_to_virt(dma_handle), size, dir);
-}
-
-static void hexagon_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir)
-{
- dma_sync(dma_addr_to_virt(dma_handle), size, dir);
-}
-
-static int hexagon_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == HEXAGON_MAPPING_ERROR;
-}
-
-const struct dma_map_ops hexagon_dma_ops = {
- .alloc = hexagon_dma_alloc_coherent,
- .free = hexagon_free_coherent,
- .map_sg = hexagon_map_sg,
- .map_page = hexagon_map_page,
- .sync_single_for_cpu = hexagon_sync_single_for_cpu,
- .sync_single_for_device = hexagon_sync_single_for_device,
- .mapping_error = hexagon_mapping_error,
-};
-
-void __init hexagon_dma_init(void)
-{
- if (dma_ops)
- return;
-
- dma_ops = &hexagon_dma_ops;
-}
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 671ce1e3f6f2..e8a93b07283e 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2207,10 +2207,6 @@ const struct dma_map_ops sba_dma_ops = {
.unmap_page = sba_unmap_page,
.map_sg = sba_map_sg_attrs,
.unmap_sg = sba_unmap_sg_attrs,
- .sync_single_for_cpu = machvec_dma_sync_single,
- .sync_sg_for_cpu = machvec_dma_sync_sg,
- .sync_single_for_device = machvec_dma_sync_single,
- .sync_sg_for_device = machvec_dma_sync_sg,
.dma_supported = sba_dma_supported,
.mapping_error = sba_dma_mapping_error,
};
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 663388a73d4e..7aeb48a18576 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -297,29 +297,29 @@ static void rs_unthrottle(struct tty_struct * tty)
printk(KERN_INFO "simrs_unthrottle called\n");
}
+static int rs_setserial(struct tty_struct *tty, struct serial_struct *ss)
+{
+ return 0;
+}
+
+static int rs_getserial(struct tty_struct *tty, struct serial_struct *ss)
+{
+ return 0;
+}
+
static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
- (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
- (cmd != TIOCMIWAIT)) {
+ if ((cmd != TIOCSERCONFIG) && (cmd != TIOCMIWAIT)) {
if (tty_io_error(tty))
return -EIO;
}
switch (cmd) {
- case TIOCGSERIAL:
- case TIOCSSERIAL:
- case TIOCSERGSTRUCT:
case TIOCMIWAIT:
return 0;
case TIOCSERCONFIG:
case TIOCSERGETLSR: /* Get line status register */
return -EINVAL;
- case TIOCSERGWILD:
- case TIOCSERSWILD:
- /* "setserial -W" is called in Debian boot */
- printk (KERN_INFO "TIOCSER?WILD ioctl obsolete, ignored.\n");
- return 0;
}
return -ENOIOCTLCMD;
}
@@ -448,6 +448,8 @@ static const struct tty_operations hp_ops = {
.throttle = rs_throttle,
.unthrottle = rs_unthrottle,
.send_xchar = rs_send_xchar,
+ .set_serial = rs_setserial,
+ .get_serial = rs_getserial,
.hangup = rs_hangup,
.proc_show = rs_proc_show,
};
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 76e4d6632d68..f7ec71e4001e 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -10,17 +10,10 @@
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
-#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-
extern const struct dma_map_ops *dma_ops;
extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void);
-extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
- enum dma_data_direction);
-extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
return platform_dma_get_ops(NULL);
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
index 156b9d8e1932..7429a72f3f92 100644
--- a/arch/ia64/include/asm/iommu.h
+++ b/arch/ia64/include/asm/iommu.h
@@ -5,7 +5,6 @@
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
-extern void pci_iommu_shutdown(void);
extern void no_iommu_init(void);
#ifdef CONFIG_INTEL_IOMMU
extern int force_iommu, no_iommu;
@@ -16,7 +15,6 @@ extern int iommu_detected;
#define no_iommu (1)
#define iommu_detected (0)
#endif
-extern void iommu_dma_init(void);
extern void machvec_init(const char *name);
#endif
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 267f4f170191..5133739966bc 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -44,7 +44,6 @@ typedef void ia64_mv_kernel_launch_event_t(void);
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
-typedef u64 ia64_mv_dma_get_required_mask (struct device *);
typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
/*
@@ -127,7 +126,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# define platform_global_tlb_purge ia64_mv.global_tlb_purge
# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
# define platform_dma_init ia64_mv.dma_init
-# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
# define platform_dma_get_ops ia64_mv.dma_get_ops
# define platform_irq_to_vector ia64_mv.irq_to_vector
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
@@ -171,7 +169,6 @@ struct ia64_machine_vector {
ia64_mv_global_tlb_purge_t *global_tlb_purge;
ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
ia64_mv_dma_init *dma_init;
- ia64_mv_dma_get_required_mask *dma_get_required_mask;
ia64_mv_dma_get_ops *dma_get_ops;
ia64_mv_irq_to_vector *irq_to_vector;
ia64_mv_local_vector_to_irq *local_vector_to_irq;
@@ -211,7 +208,6 @@ struct ia64_machine_vector {
platform_global_tlb_purge, \
platform_tlb_migrate_finish, \
platform_dma_init, \
- platform_dma_get_required_mask, \
platform_dma_get_ops, \
platform_irq_to_vector, \
platform_local_vector_to_irq, \
@@ -286,9 +282,6 @@ extern const struct dma_map_ops *dma_get_ops(struct device *);
#ifndef platform_dma_get_ops
# define platform_dma_get_ops dma_get_ops
#endif
-#ifndef platform_dma_get_required_mask
-# define platform_dma_get_required_mask ia64_dma_get_required_mask
-#endif
#ifndef platform_irq_to_vector
# define platform_irq_to_vector __ia64_irq_to_vector
#endif
diff --git a/arch/ia64/include/asm/machvec_init.h b/arch/ia64/include/asm/machvec_init.h
index 2b32fd06b7c6..2aafb69a3787 100644
--- a/arch/ia64/include/asm/machvec_init.h
+++ b/arch/ia64/include/asm/machvec_init.h
@@ -4,7 +4,6 @@
extern ia64_mv_send_ipi_t ia64_send_ipi;
extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
-extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask;
extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index ece9fa85be88..b5153d300289 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -55,7 +55,6 @@ extern ia64_mv_readb_t __sn_readb_relaxed;
extern ia64_mv_readw_t __sn_readw_relaxed;
extern ia64_mv_readl_t __sn_readl_relaxed;
extern ia64_mv_readq_t __sn_readq_relaxed;
-extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask;
extern ia64_mv_dma_init sn_dma_init;
extern ia64_mv_migrate_t sn_migrate;
extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
@@ -100,7 +99,6 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
#define platform_pci_legacy_read sn_pci_legacy_read
#define platform_pci_legacy_write sn_pci_legacy_write
-#define platform_dma_get_required_mask sn_dma_get_required_mask
#define platform_dma_init sn_dma_init
#define platform_migrate sn_migrate
#define platform_kernel_launch_event sn_kernel_launch_event
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index ffb705dc9c13..49e34db2529c 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -28,6 +28,9 @@
#define __IGNORE_vfork /* clone() */
#define __IGNORE_umount2 /* umount() */
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SYS_UTIME
+
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
#include <linux/types.h>
diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h
index 52b5af424511..796af1ccaa7e 100644
--- a/arch/ia64/include/uapi/asm/siginfo.h
+++ b/arch/ia64/include/uapi/asm/siginfo.h
@@ -9,8 +9,6 @@
#define _UAPI_ASM_IA64_SIGINFO_H
-#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-
#include <asm-generic/siginfo.h>
#define si_imm _sifields._sigfault._imm /* as per UNIX SysV ABI spec */
diff --git a/arch/ia64/kernel/brl_emu.c b/arch/ia64/kernel/brl_emu.c
index a61f6c6a36f8..c0239bf77a09 100644
--- a/arch/ia64/kernel/brl_emu.c
+++ b/arch/ia64/kernel/brl_emu.c
@@ -58,11 +58,9 @@ ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
unsigned long bundle[2];
unsigned long opcode, btype, qp, offset, cpl;
unsigned long next_ip;
- struct siginfo siginfo;
struct illegal_op_return rv;
long tmp_taken, unimplemented_address;
- clear_siginfo(&siginfo);
rv.fkt = (unsigned long) -1;
/*
@@ -198,39 +196,22 @@ ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
* The target address contains unimplemented bits.
*/
printk(KERN_DEBUG "Woah! Unimplemented Instruction Address Trap!\n");
- siginfo.si_signo = SIGILL;
- siginfo.si_errno = 0;
- siginfo.si_flags = 0;
- siginfo.si_isr = 0;
- siginfo.si_imm = 0;
- siginfo.si_code = ILL_BADIADDR;
- force_sig_info(SIGILL, &siginfo, current);
+ force_sig_fault(SIGILL, ILL_BADIADDR, (void __user *)NULL,
+ 0, 0, 0, current);
} else if (ia64_psr(regs)->tb) {
/*
* Branch Tracing is enabled.
* Force a taken branch signal.
*/
- siginfo.si_signo = SIGTRAP;
- siginfo.si_errno = 0;
- siginfo.si_code = TRAP_BRANCH;
- siginfo.si_flags = 0;
- siginfo.si_isr = 0;
- siginfo.si_addr = 0;
- siginfo.si_imm = 0;
- force_sig_info(SIGTRAP, &siginfo, current);
+ force_sig_fault(SIGTRAP, TRAP_BRANCH, (void __user *)NULL,
+ 0, 0, 0, current);
} else if (ia64_psr(regs)->ss) {
/*
* Single Step is enabled.
* Force a trace signal.
*/
- siginfo.si_signo = SIGTRAP;
- siginfo.si_errno = 0;
- siginfo.si_code = TRAP_TRACE;
- siginfo.si_flags = 0;
- siginfo.si_isr = 0;
- siginfo.si_addr = 0;
- siginfo.si_imm = 0;
- force_sig_info(SIGTRAP, &siginfo, current);
+ force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)NULL,
+ 0, 0, 0, current);
}
return rv;
}
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 9c09bf390cce..f77d80edddfe 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -842,7 +842,6 @@ kern_mem_attribute (unsigned long phys_addr, unsigned long size)
} while (md);
return 0; /* never reached */
}
-EXPORT_SYMBOL(kern_mem_attribute);
int
valid_phys_addr_range (phys_addr_t phys_addr, unsigned long size)
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index 7bfe98859911..1b604d02250b 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -73,19 +73,3 @@ machvec_timer_interrupt (int irq, void *dev_id)
{
}
EXPORT_SYMBOL(machvec_timer_interrupt);
-
-void
-machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction dir)
-{
- mb();
-}
-EXPORT_SYMBOL(machvec_dma_sync_single);
-
-void
-machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n,
- enum dma_data_direction dir)
-{
- mb();
-}
-EXPORT_SYMBOL(machvec_dma_sync_sg);
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index b5df084c0af4..fe988c49f01c 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -15,11 +15,6 @@
#include <linux/kernel.h>
#include <asm/page.h>
-dma_addr_t bad_dma_address __read_mostly;
-EXPORT_SYMBOL(bad_dma_address);
-
-static int iommu_sac_force __read_mostly;
-
int no_iommu __read_mostly;
#ifdef CONFIG_IOMMU_DEBUG
int force_iommu __read_mostly = 1;
@@ -29,8 +24,6 @@ int force_iommu __read_mostly;
int iommu_pass_through;
-extern struct dma_map_ops intel_dma_ops;
-
static int __init pci_iommu_init(void)
{
if (iommu_detected)
@@ -42,56 +35,8 @@ static int __init pci_iommu_init(void)
/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);
-void pci_iommu_shutdown(void)
-{
- return;
-}
-
-void __init
-iommu_dma_init(void)
-{
- return;
-}
-
-int iommu_dma_supported(struct device *dev, u64 mask)
-{
- /* Copied from i386. Doesn't make much sense, because it will
- only work for pci_alloc_coherent.
- The caller just has to use GFP_DMA in this case. */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- /* Tell the device to use SAC when IOMMU force is on. This
- allows the driver to use cheaper accesses in some cases.
-
- Problem with this is that if we overflow the IOMMU area and
- return DAC as fallback address the device may not handle it
- correctly.
-
- As a special case some controllers have a 39bit address
- mode that is as efficient as 32bit (aic79xx). Don't force
- SAC for these. Assume all masks <= 40 bits are of this
- type. Normally this doesn't make any difference, but gives
- more gentle handling of IOMMU overflow. */
- if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
- dev_info(dev, "Force SAC with mask %llx\n", mask);
- return 0;
- }
-
- return 1;
-}
-EXPORT_SYMBOL(iommu_dma_supported);
-
void __init pci_iommu_alloc(void)
{
- dma_ops = &intel_dma_ops;
-
- intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single;
- intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg;
- intel_dma_ops.sync_single_for_device = machvec_dma_sync_single;
- intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg;
- intel_dma_ops.dma_supported = iommu_dma_supported;
-
/*
* The order of these functions is important for
* fall-back/fail-over reasons
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index d1234a5ba4c5..9a960829a01d 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -110,7 +110,6 @@ ia64_rt_sigreturn (struct sigscratch *scr)
{
extern char ia64_strace_leave_kernel, ia64_leave_kernel;
struct sigcontext __user *sc;
- struct siginfo si;
sigset_t set;
long retval;
@@ -153,14 +152,7 @@ ia64_rt_sigreturn (struct sigscratch *scr)
return retval;
give_sigsegv:
- clear_siginfo(&si);
- si.si_signo = SIGSEGV;
- si.si_errno = 0;
- si.si_code = SI_KERNEL;
- si.si_pid = task_pid_vnr(current);
- si.si_uid = from_kuid_munged(current_user_ns(), current_uid());
- si.si_addr = sc;
- force_sig_info(SIGSEGV, &si, current);
+ force_sig(SIGSEGV, current);
return retval;
}
@@ -232,37 +224,6 @@ rbs_on_sig_stack (unsigned long bsp)
}
static long
-force_sigsegv_info (int sig, void __user *addr)
-{
- unsigned long flags;
- struct siginfo si;
-
- clear_siginfo(&si);
- if (sig == SIGSEGV) {
- /*
- * Acquiring siglock around the sa_handler-update is almost
- * certainly overkill, but this isn't a
- * performance-critical path and I'd rather play it safe
- * here than having to debug a nasty race if and when
- * something changes in kernel/signal.c that would make it
- * no longer safe to modify sa_handler without holding the
- * lock.
- */
- spin_lock_irqsave(&current->sighand->siglock, flags);
- current->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
- }
- si.si_signo = SIGSEGV;
- si.si_errno = 0;
- si.si_code = SI_KERNEL;
- si.si_pid = task_pid_vnr(current);
- si.si_uid = from_kuid_munged(current_user_ns(), current_uid());
- si.si_addr = addr;
- force_sig_info(SIGSEGV, &si, current);
- return 1;
-}
-
-static long
setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr)
{
extern char __kernel_sigtramp[];
@@ -295,15 +256,18 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr)
* instead so we will die with SIGSEGV.
*/
check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN;
- if (!likely(on_sig_stack(check_sp)))
- return force_sigsegv_info(ksig->sig, (void __user *)
- check_sp);
+ if (!likely(on_sig_stack(check_sp))) {
+ force_sigsegv(ksig->sig, current);
+ return 1;
+ }
}
}
frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);
- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
- return force_sigsegv_info(ksig->sig, frame);
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) {
+ force_sigsegv(ksig->sig, current);
+ return 1;
+ }
err = __put_user(ksig->sig, &frame->arg0);
err |= __put_user(&frame->info, &frame->arg1);
@@ -317,8 +281,10 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr)
err |= __save_altstack(&frame->sc.sc_stack, scr->pt.r12);
err |= setup_sigcontext(&frame->sc, set, scr);
- if (unlikely(err))
- return force_sigsegv_info(ksig->sig, frame);
+ if (unlikely(err)) {
+ force_sigsegv(ksig->sig, current);
+ return 1;
+ }
scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */
scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index c6f4932073a1..85d8616ac4f6 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -100,16 +100,8 @@ die_if_kernel (char *str, struct pt_regs *regs, long err)
void
__kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
{
- siginfo_t siginfo;
int sig, code;
- /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
- clear_siginfo(&siginfo);
- siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
- siginfo.si_imm = break_num;
- siginfo.si_flags = 0; /* clear __ISR_VALID */
- siginfo.si_isr = 0;
-
switch (break_num) {
case 0: /* unknown error (used by GCC for __builtin_abort()) */
if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
@@ -182,10 +174,9 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
sig = SIGTRAP; code = TRAP_BRKPT;
}
}
- siginfo.si_signo = sig;
- siginfo.si_errno = 0;
- siginfo.si_code = code;
- force_sig_info(sig, &siginfo, current);
+ force_sig_fault(sig, code,
+ (void __user *) (regs->cr_iip + ia64_psr(regs)->ri),
+ break_num, 0 /* clear __ISR_VALID */, 0, current);
}
/*
@@ -344,30 +335,25 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
return -1;
} else {
- struct siginfo siginfo;
-
/* is next instruction a trap? */
+ int si_code;
+
if (exception & 2) {
ia64_increment_ip(regs);
}
- clear_siginfo(&siginfo);
- siginfo.si_signo = SIGFPE;
- siginfo.si_errno = 0;
- siginfo.si_code = FPE_FLTUNK; /* default code */
- siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
+ si_code = FPE_FLTUNK; /* default code */
if (isr & 0x11) {
- siginfo.si_code = FPE_FLTINV;
+ si_code = FPE_FLTINV;
} else if (isr & 0x22) {
/* denormal operand gets the same si_code as underflow
* see arch/i386/kernel/traps.c:math_error() */
- siginfo.si_code = FPE_FLTUND;
+ si_code = FPE_FLTUND;
} else if (isr & 0x44) {
- siginfo.si_code = FPE_FLTDIV;
+ si_code = FPE_FLTDIV;
}
- siginfo.si_isr = isr;
- siginfo.si_flags = __ISR_VALID;
- siginfo.si_imm = 0;
- force_sig_info(SIGFPE, &siginfo, current);
+ force_sig_fault(SIGFPE, si_code,
+ (void __user *) (regs->cr_iip + ia64_psr(regs)->ri),
+ 0, __ISR_VALID, isr, current);
}
} else {
if (exception == -1) {
@@ -375,24 +361,19 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
return -1;
} else if (exception != 0) {
/* raise exception */
- struct siginfo siginfo;
+ int si_code;
- clear_siginfo(&siginfo);
- siginfo.si_signo = SIGFPE;
- siginfo.si_errno = 0;
- siginfo.si_code = FPE_FLTUNK; /* default code */
- siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
+ si_code = FPE_FLTUNK; /* default code */
if (isr & 0x880) {
- siginfo.si_code = FPE_FLTOVF;
+ si_code = FPE_FLTOVF;
} else if (isr & 0x1100) {
- siginfo.si_code = FPE_FLTUND;
+ si_code = FPE_FLTUND;
} else if (isr & 0x2200) {
- siginfo.si_code = FPE_FLTRES;
+ si_code = FPE_FLTRES;
}
- siginfo.si_isr = isr;
- siginfo.si_flags = __ISR_VALID;
- siginfo.si_imm = 0;
- force_sig_info(SIGFPE, &siginfo, current);
+ force_sig_fault(SIGFPE, si_code,
+ (void __user *) (regs->cr_iip + ia64_psr(regs)->ri),
+ 0, __ISR_VALID, isr, current);
}
}
return 0;
@@ -408,7 +389,6 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
struct pt_regs regs)
{
struct illegal_op_return rv;
- struct siginfo si;
char buf[128];
#ifdef CONFIG_IA64_BRL_EMU
@@ -426,11 +406,9 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
if (die_if_kernel(buf, &regs, 0))
return rv;
- clear_siginfo(&si);
- si.si_signo = SIGILL;
- si.si_code = ILL_ILLOPC;
- si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri);
- force_sig_info(SIGILL, &si, current);
+ force_sig_fault(SIGILL, ILL_ILLOPC,
+ (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri),
+ 0, 0, 0, current);
return rv;
}
@@ -441,7 +419,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
{
unsigned long code, error = isr, iip;
char buf[128];
- int result, sig;
+ int result, sig, si_code;
static const char *reason[] = {
"IA-64 Illegal Operation fault",
"IA-64 Privileged Operation fault",
@@ -490,7 +468,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 26: /* NaT Consumption */
if (user_mode(&regs)) {
- struct siginfo siginfo;
void __user *addr;
if (((isr >> 4) & 0xf) == 2) {
@@ -505,15 +482,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
addr = (void __user *) (regs.cr_iip
+ ia64_psr(&regs)->ri);
}
- clear_siginfo(&siginfo);
- siginfo.si_signo = sig;
- siginfo.si_code = code;
- siginfo.si_errno = 0;
- siginfo.si_addr = addr;
- siginfo.si_imm = vector;
- siginfo.si_flags = __ISR_VALID;
- siginfo.si_isr = isr;
- force_sig_info(sig, &siginfo, current);
+ force_sig_fault(sig, code, addr,
+ vector, __ISR_VALID, isr, current);
return;
} else if (ia64_done_with_exception(&regs))
return;
@@ -522,17 +492,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 31: /* Unsupported Data Reference */
if (user_mode(&regs)) {
- struct siginfo siginfo;
-
- clear_siginfo(&siginfo);
- siginfo.si_signo = SIGILL;
- siginfo.si_code = ILL_ILLOPN;
- siginfo.si_errno = 0;
- siginfo.si_addr = (void __user *) iip;
- siginfo.si_imm = vector;
- siginfo.si_flags = __ISR_VALID;
- siginfo.si_isr = isr;
- force_sig_info(SIGILL, &siginfo, current);
+ force_sig_fault(SIGILL, ILL_ILLOPN, (void __user *) iip,
+ vector, __ISR_VALID, isr, current);
return;
}
sprintf(buf, "Unsupported data reference");
@@ -541,10 +502,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 29: /* Debug */
case 35: /* Taken Branch Trap */
case 36: /* Single Step Trap */
- {
- struct siginfo siginfo;
-
- clear_siginfo(&siginfo);
if (fsys_mode(current, &regs)) {
extern char __kernel_syscall_via_break[];
/*
@@ -568,7 +525,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
switch (vector) {
default:
case 29:
- siginfo.si_code = TRAP_HWBKPT;
+ si_code = TRAP_HWBKPT;
#ifdef CONFIG_ITANIUM
/*
* Erratum 10 (IFA may contain incorrect address) now has
@@ -578,37 +535,22 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
ifa = regs.cr_iip;
#endif
break;
- case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
- case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
+ case 35: si_code = TRAP_BRANCH; ifa = 0; break;
+ case 36: si_code = TRAP_TRACE; ifa = 0; break;
}
- if (notify_die(DIE_FAULT, "ia64_fault", &regs, vector, siginfo.si_code, SIGTRAP)
+ if (notify_die(DIE_FAULT, "ia64_fault", &regs, vector, si_code, SIGTRAP)
== NOTIFY_STOP)
return;
- siginfo.si_signo = SIGTRAP;
- siginfo.si_errno = 0;
- siginfo.si_addr = (void __user *) ifa;
- siginfo.si_imm = 0;
- siginfo.si_flags = __ISR_VALID;
- siginfo.si_isr = isr;
- force_sig_info(SIGTRAP, &siginfo, current);
+ force_sig_fault(SIGTRAP, si_code, (void __user *) ifa,
+ 0, __ISR_VALID, isr, current);
return;
- }
case 32: /* fp fault */
case 33: /* fp trap */
result = handle_fpu_swa((vector == 32) ? 1 : 0, &regs, isr);
if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
- struct siginfo siginfo;
-
- clear_siginfo(&siginfo);
- siginfo.si_signo = SIGFPE;
- siginfo.si_errno = 0;
- siginfo.si_code = FPE_FLTINV;
- siginfo.si_addr = (void __user *) iip;
- siginfo.si_flags = __ISR_VALID;
- siginfo.si_isr = isr;
- siginfo.si_imm = 0;
- force_sig_info(SIGFPE, &siginfo, current);
+ force_sig_fault(SIGFPE, FPE_FLTINV, (void __user *) iip,
+ 0, __ISR_VALID, isr, current);
}
return;
@@ -634,17 +576,9 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
} else {
/* Unimplemented Instr. Address Trap */
if (user_mode(&regs)) {
- struct siginfo siginfo;
-
- clear_siginfo(&siginfo);
- siginfo.si_signo = SIGILL;
- siginfo.si_code = ILL_BADIADDR;
- siginfo.si_errno = 0;
- siginfo.si_flags = 0;
- siginfo.si_isr = 0;
- siginfo.si_imm = 0;
- siginfo.si_addr = (void __user *) iip;
- force_sig_info(SIGILL, &siginfo, current);
+ force_sig_fault(SIGILL, ILL_BADIADDR,
+ (void __user *) iip,
+ 0, 0, 0, current);
return;
}
sprintf(buf, "Unimplemented Instruction Address fault");
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index e309f9859acc..a167a3824b35 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -1298,7 +1298,6 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
mm_segment_t old_fs = get_fs();
unsigned long bundle[2];
unsigned long opcode;
- struct siginfo si;
const struct exception_table_entry *eh = NULL;
union {
unsigned long l;
@@ -1537,14 +1536,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
/* NOT_REACHED */
}
force_sigbus:
- clear_siginfo(&si);
- si.si_signo = SIGBUS;
- si.si_errno = 0;
- si.si_code = BUS_ADRALN;
- si.si_addr = (void __user *) ifa;
- si.si_flags = 0;
- si.si_isr = 0;
- si.si_imm = 0;
- force_sig_info(SIGBUS, &si, current);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) ifa,
+ 0, 0, 0, current);
goto done;
}
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index a9d55ad8d67b..5baeb022f474 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -248,16 +248,8 @@ retry:
return;
}
if (user_mode(regs)) {
- struct siginfo si;
-
- clear_siginfo(&si);
- si.si_signo = signal;
- si.si_errno = 0;
- si.si_code = code;
- si.si_addr = (void __user *) address;
- si.si_isr = isr;
- si.si_flags = __ISR_VALID;
- force_sig_info(signal, &si, current);
+ force_sig_fault(signal, code, (void __user *) address,
+ 0, __ISR_VALID, isr, current);
return;
}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 7ccc64d5fe3e..5d71800df431 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -568,32 +568,6 @@ static void __init set_pci_dfl_cacheline_size(void)
pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
}
-u64 ia64_dma_get_required_mask(struct device *dev)
-{
- u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
- u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
- u64 mask;
-
- if (!high_totalram) {
- /* convert to mask just covering totalram */
- low_totalram = (1 << (fls(low_totalram) - 1));
- low_totalram += low_totalram - 1;
- mask = low_totalram;
- } else {
- high_totalram = (1 << (fls(high_totalram) - 1));
- high_totalram += high_totalram - 1;
- mask = (((u64)high_totalram) << 32) + 0xffffffff;
- }
- return mask;
-}
-EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
-
-u64 dma_get_required_mask(struct device *dev)
-{
- return platform_dma_get_required_mask(dev);
-}
-EXPORT_SYMBOL_GPL(dma_get_required_mask);
-
static int __init pcibios_init(void)
{
set_pci_dfl_cacheline_size();
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 74c934a997bb..4ce4ee4ef9f2 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -314,41 +314,15 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
return nhwentries;
}
-static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir)
-{
- BUG_ON(!dev_is_pci(dev));
-}
-
-static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!dev_is_pci(dev));
-}
-
-static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- BUG_ON(!dev_is_pci(dev));
-}
-
-static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- BUG_ON(!dev_is_pci(dev));
-}
-
static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
-u64 sn_dma_get_required_mask(struct device *dev)
+static u64 sn_dma_get_required_mask(struct device *dev)
{
return DMA_BIT_MASK(64);
}
-EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
char *sn_pci_get_legacy_mem(struct pci_bus *bus)
{
@@ -467,12 +441,9 @@ static struct dma_map_ops sn_dma_ops = {
.unmap_page = sn_dma_unmap_page,
.map_sg = sn_dma_map_sg,
.unmap_sg = sn_dma_unmap_sg,
- .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
- .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
- .sync_single_for_device = sn_dma_sync_single_for_device,
- .sync_sg_for_device = sn_dma_sync_sg_for_device,
.mapping_error = sn_dma_mapping_error,
.dma_supported = sn_dma_supported,
+ .get_required_mask = sn_dma_get_required_mask,
};
void sn_dma_init(void)
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 070553791e97..c7b2a8d60a41 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -26,7 +26,7 @@ config M68K
select MODULES_USE_ELF_RELA
select OLD_SIGSUSPEND3
select OLD_SIGACTION
- select DMA_NONCOHERENT_OPS if HAS_DMA
+ select DMA_DIRECT_OPS if HAS_DMA
select HAVE_MEMBLOCK
select ARCH_DISCARD_MEMBLOCK
select NO_BOOTMEM
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 1d5483f6e457..85904b73e261 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -621,7 +621,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -657,7 +656,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 52a0af127951..9b3818bbb68b 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -578,7 +578,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -614,7 +613,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index b3103e51268a..769677809945 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -599,7 +599,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -635,7 +634,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index fb7d651a4cab..7dd264ddf2ea 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 6b37f5537c39..515f7439c755 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -580,7 +580,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -616,7 +615,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index c717bf879449..8e1038ceb407 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -602,7 +602,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -638,7 +637,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 226c994ce794..62c8aaa15cc7 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -684,7 +684,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -720,7 +719,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index b383327fd77a..733973f91297 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 9783d3deb9e9..fee30cc9ac16 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index a35d10ee10cb..eebf9c9088e7 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -593,7 +593,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -629,7 +628,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 573bf922d448..dabc54318c09 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -571,7 +571,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -607,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index efb27a7fcc55..0d9a5c2a311a 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -572,7 +572,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -608,7 +607,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index e9110b9b8bcd..38049357d6d3 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -73,7 +73,7 @@ static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio)
len = bvec.bv_len;
len >>= 9;
nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
- bvec_to_phys(&bvec));
+ page_to_phys(bvec.bv_page) + bvec.bv_offset);
sec += len;
}
bio_endio(bio);
diff --git a/arch/m68k/include/asm/atafd.h b/arch/m68k/include/asm/atafd.h
deleted file mode 100644
index ad7014cad633..000000000000
--- a/arch/m68k/include/asm/atafd.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_M68K_FD_H
-#define _ASM_M68K_FD_H
-
-/* Definitions for the Atari Floppy driver */
-
-struct atari_format_descr {
- int track; /* to be formatted */
- int head; /* "" "" */
- int sect_offset; /* offset of first sector */
-};
-
-#endif
diff --git a/arch/m68k/include/asm/atafdreg.h b/arch/m68k/include/asm/atafdreg.h
deleted file mode 100644
index c31b4919ed2d..000000000000
--- a/arch/m68k/include/asm/atafdreg.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_FDREG_H
-#define _LINUX_FDREG_H
-
-/*
-** WD1772 stuff
- */
-
-/* register codes */
-
-#define FDCSELREG_STP (0x80) /* command/status register */
-#define FDCSELREG_TRA (0x82) /* track register */
-#define FDCSELREG_SEC (0x84) /* sector register */
-#define FDCSELREG_DTA (0x86) /* data register */
-
-/* register names for FDC_READ/WRITE macros */
-
-#define FDCREG_CMD 0
-#define FDCREG_STATUS 0
-#define FDCREG_TRACK 2
-#define FDCREG_SECTOR 4
-#define FDCREG_DATA 6
-
-/* command opcodes */
-
-#define FDCCMD_RESTORE (0x00) /* - */
-#define FDCCMD_SEEK (0x10) /* | */
-#define FDCCMD_STEP (0x20) /* | TYP 1 Commands */
-#define FDCCMD_STIN (0x40) /* | */
-#define FDCCMD_STOT (0x60) /* - */
-#define FDCCMD_RDSEC (0x80) /* - TYP 2 Commands */
-#define FDCCMD_WRSEC (0xa0) /* - " */
-#define FDCCMD_RDADR (0xc0) /* - */
-#define FDCCMD_RDTRA (0xe0) /* | TYP 3 Commands */
-#define FDCCMD_WRTRA (0xf0) /* - */
-#define FDCCMD_FORCI (0xd0) /* - TYP 4 Command */
-
-/* command modifier bits */
-
-#define FDCCMDADD_SR6 (0x00) /* step rate settings */
-#define FDCCMDADD_SR12 (0x01)
-#define FDCCMDADD_SR2 (0x02)
-#define FDCCMDADD_SR3 (0x03)
-#define FDCCMDADD_V (0x04) /* verify */
-#define FDCCMDADD_H (0x08) /* wait for spin-up */
-#define FDCCMDADD_U (0x10) /* update track register */
-#define FDCCMDADD_M (0x10) /* multiple sector access */
-#define FDCCMDADD_E (0x04) /* head settling flag */
-#define FDCCMDADD_P (0x02) /* precompensation off */
-#define FDCCMDADD_A0 (0x01) /* DAM flag */
-
-/* status register bits */
-
-#define FDCSTAT_MOTORON (0x80) /* motor on */
-#define FDCSTAT_WPROT (0x40) /* write protected (FDCCMD_WR*) */
-#define FDCSTAT_SPINUP (0x20) /* motor speed stable (Type I) */
-#define FDCSTAT_DELDAM (0x20) /* sector has deleted DAM (Type II+III) */
-#define FDCSTAT_RECNF (0x10) /* record not found */
-#define FDCSTAT_CRC (0x08) /* CRC error */
-#define FDCSTAT_TR00 (0x04) /* Track 00 flag (Type I) */
-#define FDCSTAT_LOST (0x04) /* Lost Data (Type II+III) */
-#define FDCSTAT_IDX (0x02) /* Index status (Type I) */
-#define FDCSTAT_DRQ (0x02) /* DRQ status (Type II+III) */
-#define FDCSTAT_BUSY (0x01) /* FDC is busy */
-
-
-/* PSG Port A Bit Nr 0 .. Side Sel .. 0 -> Side 1 1 -> Side 2 */
-#define DSKSIDE (0x01)
-
-#define DSKDRVNONE (0x06)
-#define DSKDRV0 (0x02)
-#define DSKDRV1 (0x04)
-
-/* step rates */
-#define FDCSTEP_6 0x00
-#define FDCSTEP_12 0x01
-#define FDCSTEP_2 0x02
-#define FDCSTEP_3 0x03
-
-#endif
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 30d0d3fbd4ef..e680031bda7b 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -7,6 +7,7 @@
#define NR_syscalls 380
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
#define __ARCH_WANT_STAT64
@@ -21,7 +22,6 @@
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLD_MMAP
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index ace5c5bf1836..164a4857737a 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -1,6 +1,7 @@
config MICROBLAZE
def_bool y
select ARCH_NO_SWAP
+ select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -11,8 +12,7 @@ config MICROBLAZE
select TIMER_OF
select CLONE_BACKWARDS3
select COMMON_CLK
- select DMA_NONCOHERENT_OPS
- select DMA_NONCOHERENT_MMAP
+ select DMA_DIRECT_OPS
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 7b650ab14fa0..f64ebb9c9a41 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -553,8 +553,6 @@ void __init *early_get_page(void);
extern unsigned long ioremap_bot, ioremap_base;
-unsigned long consistent_virt_to_pfn(void *vaddr);
-
void setup_memory(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index a62d09420a47..f42c40f5001b 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -15,6 +15,7 @@
/* #define __ARCH_WANT_OLD_READDIR */
/* #define __ARCH_WANT_OLD_STAT */
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
@@ -26,7 +27,6 @@
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
/* #define __ARCH_WANT_SYS_OLD_GETRLIMIT */
#define __ARCH_WANT_SYS_OLDUMOUNT
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 71032cf64669..a89c2d4ed5ff 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -42,25 +42,3 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
{
__dma_sync(dev, paddr, size, dir);
}
-
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t handle, size_t size,
- unsigned long attrs)
-{
-#ifdef CONFIG_MMU
- unsigned long user_count = vma_pages(vma);
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long off = vma->vm_pgoff;
- unsigned long pfn;
-
- if (off >= count || user_count > (count - off))
- return -ENXIO;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pfn = consistent_virt_to_pfn(cpu_addr);
- return remap_pfn_range(vma, vma->vm_start, pfn + off,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
-#else
- return -ENXIO;
-#endif
-}
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 289d0e7f3e3a..e1f3e8741292 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -117,8 +117,6 @@ SECTIONS {
CON_INITCALL
}
- SECURITY_INIT
-
__init_end_before_initramfs = .;
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index c9a278ac795a..d801cc5f5b95 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -165,7 +165,8 @@ static pte_t *consistent_virt_to_pte(void *vaddr)
return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
}
-unsigned long consistent_virt_to_pfn(void *vaddr)
+long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
+ dma_addr_t dma_addr)
{
pte_t *ptep = consistent_virt_to_pte(vaddr);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 35511999156a..77c022e56e6e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1106,21 +1106,22 @@ config ARCH_SUPPORTS_UPROBES
bool
config DMA_MAYBE_COHERENT
+ select ARCH_HAS_DMA_COHERENCE_H
select DMA_NONCOHERENT
bool
config DMA_PERDEV_COHERENT
bool
- select DMA_MAYBE_COHERENT
+ select DMA_NONCOHERENT
config DMA_NONCOHERENT
bool
+ select ARCH_HAS_DMA_MMAP_PGPROT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_SYNC_DMA_FOR_CPU
select NEED_DMA_MAP_STATE
- select DMA_NONCOHERENT_MMAP
+ select ARCH_HAS_DMA_COHERENT_TO_PFN
select DMA_NONCOHERENT_CACHE_SYNC
- select DMA_NONCOHERENT_OPS
config SYS_HAS_EARLY_PRINTK
bool
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index da7663770425..4bf02f96ab7f 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -29,8 +29,7 @@
#include <linux/leds.h>
#include <linux/mmc/host.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/spi/spi.h>
@@ -197,11 +196,10 @@ static struct i2c_board_info db1200_i2c_devs[] __initdata = {
/**********************************************************************/
-static void au1200_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+static void au1200_nand_cmd_ctrl(struct nand_chip *this, int cmd,
unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- unsigned long ioaddr = (unsigned long)this->IO_ADDR_W;
+ unsigned long ioaddr = (unsigned long)this->legacy.IO_ADDR_W;
ioaddr &= 0xffffff00;
@@ -213,14 +211,14 @@ static void au1200_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
/* assume we want to r/w real data by default */
ioaddr += MEM_STNAND_DATA;
}
- this->IO_ADDR_R = this->IO_ADDR_W = (void __iomem *)ioaddr;
+ this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = (void __iomem *)ioaddr;
if (cmd != NAND_CMD_NONE) {
- __raw_writeb(cmd, this->IO_ADDR_W);
+ __raw_writeb(cmd, this->legacy.IO_ADDR_W);
wmb();
}
}
-static int au1200_nand_device_ready(struct mtd_info *mtd)
+static int au1200_nand_device_ready(struct nand_chip *this)
{
return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1;
}
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
index efb318e03e0a..ad7dd8e89598 100644
--- a/arch/mips/alchemy/devboards/db1300.c
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -19,8 +19,7 @@
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <linux/platform_device.h>
#include <linux/smsc911x.h>
#include <linux/wm97xx.h>
@@ -149,11 +148,10 @@ static void __init db1300_gpio_config(void)
/**********************************************************************/
-static void au1300_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+static void au1300_nand_cmd_ctrl(struct nand_chip *this, int cmd,
unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- unsigned long ioaddr = (unsigned long)this->IO_ADDR_W;
+ unsigned long ioaddr = (unsigned long)this->legacy.IO_ADDR_W;
ioaddr &= 0xffffff00;
@@ -165,14 +163,14 @@ static void au1300_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
/* assume we want to r/w real data by default */
ioaddr += MEM_STNAND_DATA;
}
- this->IO_ADDR_R = this->IO_ADDR_W = (void __iomem *)ioaddr;
+ this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = (void __iomem *)ioaddr;
if (cmd != NAND_CMD_NONE) {
- __raw_writeb(cmd, this->IO_ADDR_W);
+ __raw_writeb(cmd, this->legacy.IO_ADDR_W);
wmb();
}
}
-static int au1300_nand_device_ready(struct mtd_info *mtd)
+static int au1300_nand_device_ready(struct nand_chip *this)
{
return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1;
}
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index 7d3dfaa10231..7700ad0b93b4 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -13,8 +13,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/spi/spi.h>
@@ -126,11 +125,10 @@ static struct i2c_board_info db1550_i2c_devs[] __initdata = {
/**********************************************************************/
-static void au1550_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+static void au1550_nand_cmd_ctrl(struct nand_chip *this, int cmd,
unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- unsigned long ioaddr = (unsigned long)this->IO_ADDR_W;
+ unsigned long ioaddr = (unsigned long)this->legacy.IO_ADDR_W;
ioaddr &= 0xffffff00;
@@ -142,14 +140,14 @@ static void au1550_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
/* assume we want to r/w real data by default */
ioaddr += MEM_STNAND_DATA;
}
- this->IO_ADDR_R = this->IO_ADDR_W = (void __iomem *)ioaddr;
+ this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = (void __iomem *)ioaddr;
if (cmd != NAND_CMD_NONE) {
- __raw_writeb(cmd, this->IO_ADDR_W);
+ __raw_writeb(cmd, this->legacy.IO_ADDR_W);
wmb();
}
}
-static int au1550_nand_device_ready(struct mtd_info *mtd)
+static int au1550_nand_device_ready(struct nand_chip *this)
{
return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1;
}
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index 26c6b561d6f7..6fb16fd24035 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -154,6 +154,21 @@
clock-names = "baud", "module";
};
+ dmac: dma-controller@13020000 {
+ compatible = "ingenic,jz4740-dma";
+ reg = <0x13020000 0xbc
+ 0x13020300 0x14>;
+ #dma-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <29>;
+
+ clocks = <&cgu JZ4740_CLK_DMA>;
+
+ /* Disable dmac until we have something that uses it */
+ status = "disabled";
+ };
+
uhc: uhc@13030000 {
compatible = "ingenic,jz4740-ohci", "generic-ohci";
reg = <0x13030000 0x1000>;
diff --git a/arch/mips/boot/dts/ingenic/jz4770.dtsi b/arch/mips/boot/dts/ingenic/jz4770.dtsi
index 7c2804f3f5f1..49ede6c14ff3 100644
--- a/arch/mips/boot/dts/ingenic/jz4770.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4770.dtsi
@@ -196,6 +196,36 @@
status = "disabled";
};
+ dmac0: dma-controller@13420000 {
+ compatible = "ingenic,jz4770-dma";
+ reg = <0x13420000 0xC0
+ 0x13420300 0x20>;
+
+ #dma-cells = <1>;
+
+ clocks = <&cgu JZ4770_CLK_DMA>;
+ interrupt-parent = <&intc>;
+ interrupts = <24>;
+
+ /* Disable dmac0 until we have something that uses it */
+ status = "disabled";
+ };
+
+ dmac1: dma-controller@13420100 {
+ compatible = "ingenic,jz4770-dma";
+ reg = <0x13420100 0xC0
+ 0x13420400 0x20>;
+
+ #dma-cells = <1>;
+
+ clocks = <&cgu JZ4770_CLK_DMA>;
+ interrupt-parent = <&intc>;
+ interrupts = <23>;
+
+ /* Disable dmac1 until we have something that uses it */
+ status = "disabled";
+ };
+
uhc: uhc@13430000 {
compatible = "generic-ohci";
reg = <0x13430000 0x1000>;
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index ce93d57f1b4d..b03cdec56de9 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -266,7 +266,8 @@
dma: dma@13420000 {
compatible = "ingenic,jz4780-dma";
- reg = <0x13420000 0x10000>;
+ reg = <0x13420000 0x400
+ 0x13421000 0x40>;
#dma-cells = <2>;
interrupt-parent = <&intc>;
diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi
index f7eb612b46ba..8ce317c5b9ed 100644
--- a/arch/mips/boot/dts/mscc/ocelot.dtsi
+++ b/arch/mips/boot/dts/mscc/ocelot.dtsi
@@ -107,7 +107,6 @@
reg = <0x1010000 0x10000>,
<0x1030000 0x10000>,
<0x1080000 0x100>,
- <0x10d0000 0x10000>,
<0x11e0000 0x100>,
<0x11f0000 0x100>,
<0x1200000 0x100>,
@@ -121,10 +120,10 @@
<0x1280000 0x100>,
<0x1800000 0x80000>,
<0x1880000 0x10000>;
- reg-names = "sys", "rew", "qs", "hsio", "port0",
- "port1", "port2", "port3", "port4", "port5",
- "port6", "port7", "port8", "port9", "port10",
- "qsys", "ana";
+ reg-names = "sys", "rew", "qs", "port0", "port1",
+ "port2", "port3", "port4", "port5", "port6",
+ "port7", "port8", "port9", "port10", "qsys",
+ "ana";
interrupts = <21 22>;
interrupt-names = "xtr", "inj";
@@ -231,5 +230,15 @@
pinctrl-0 = <&miim1>;
status = "disabled";
};
+
+ hsio: syscon@10d0000 {
+ compatible = "mscc,ocelot-hsio", "syscon", "simple-mfd";
+ reg = <0x10d0000 0x10000>;
+
+ serdes: serdes {
+ compatible = "mscc,vsc7514-serdes";
+ #phy-cells = <2>;
+ };
+ };
};
};
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 58351e48421e..9a81e72119da 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,6 +1,7 @@
# MIPS headers
generic-(CONFIG_GENERIC_CSUM) += checksum.h
generic-y += current.h
+generic-y += device.h
generic-y += dma-contiguous.h
generic-y += emergency-restart.h
generic-y += export.h
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 78675f19440f..c99166eadbde 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -9,43 +9,25 @@
#include <asm/page.h>
#include <asm/ptrace.h>
+#include <asm-generic/compat.h>
+
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "mips\0\0\0"
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_suseconds_t;
-
-typedef s32 compat_pid_t;
typedef s32 __compat_uid_t;
typedef s32 __compat_gid_t;
typedef __compat_uid_t __compat_uid32_t;
typedef __compat_gid_t __compat_gid32_t;
typedef u32 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u32 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef u32 compat_nlink_t;
typedef s32 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef s32 compat_caddr_t;
typedef struct {
s32 val[2];
} compat_fsid_t;
-typedef s32 compat_timer_t;
-typedef s32 compat_key_t;
-
-typedef s16 compat_short_t;
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 compat_s64;
-typedef u16 compat_ushort_t;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
typedef u64 compat_u64;
-typedef u32 compat_uptr_t;
struct compat_stat {
compat_dev_t st_dev;
@@ -59,11 +41,11 @@ struct compat_stat {
s32 st_pad2[2];
compat_off_t st_size;
s32 st_pad3;
- compat_time_t st_atime;
+ old_time32_t st_atime;
s32 st_atime_nsec;
- compat_time_t st_mtime;
+ old_time32_t st_mtime;
s32 st_mtime_nsec;
- compat_time_t st_ctime;
+ old_time32_t st_ctime;
s32 st_ctime_nsec;
s32 st_blksize;
s32 st_blocks;
diff --git a/arch/mips/include/asm/device.h b/arch/mips/include/asm/device.h
deleted file mode 100644
index 6aa796f1081a..000000000000
--- a/arch/mips/include/asm/device.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#ifndef _ASM_MIPS_DEVICE_H
-#define _ASM_MIPS_DEVICE_H
-
-struct dev_archdata {
-#ifdef CONFIG_DMA_PERDEV_COHERENT
- /* Non-zero if DMA is coherent with CPU caches */
- bool dma_coherent;
-#endif
-};
-
-struct pdev_archdata {
-};
-
-#endif /* _ASM_MIPS_DEVICE_H*/
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
index 8eda48748ed5..5eaa1fcc878a 100644
--- a/arch/mips/include/asm/dma-coherence.h
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -20,6 +20,12 @@ enum coherent_io_user_state {
#elif defined(CONFIG_DMA_MAYBE_COHERENT)
extern enum coherent_io_user_state coherentio;
extern int hw_coherentio;
+
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return coherentio == IO_COHERENCE_ENABLED ||
+ (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio);
+}
#else
#ifdef CONFIG_DMA_NONCOHERENT
#define coherentio IO_COHERENCE_DISABLED
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index e81c4e97ff1a..b4c477eb46ce 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -12,8 +12,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &jazz_dma_ops;
#elif defined(CONFIG_SWIOTLB)
return &swiotlb_dma_ops;
-#elif defined(CONFIG_DMA_NONCOHERENT_OPS)
- return &dma_noncoherent_ops;
#else
return &dma_direct_ops;
#endif
@@ -25,7 +23,7 @@ static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
bool coherent)
{
#ifdef CONFIG_DMA_PERDEV_COHERENT
- dev->archdata.dma_coherent = coherent;
+ dev->dma_coherent = coherent;
#endif
}
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index b2fa62922d88..49d6046ca1d0 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -13,6 +13,7 @@
#include <linux/atomic.h>
#include <linux/cpumask.h>
+#include <linux/sizes.h>
#include <linux/threads.h>
#include <asm/cachectl.h>
@@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_count;
#endif
-/*
- * One page above the stack is used for branch delay slot "emulation".
- * See dsemul.c for details.
- */
-#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
+#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
+
+extern unsigned long mips_stack_top(void);
+#define STACK_TOP mips_stack_top()
/*
* This decides where the kernel will search for a free chunk of vm
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 3c09450908aa..c68b8ae3efcb 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -24,16 +24,17 @@
#ifndef __ASSEMBLY__
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_WAITPID
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLD_UNAME
#define __ARCH_WANT_SYS_OLDUMOUNT
diff --git a/arch/mips/include/asm/vr41xx/giu.h b/arch/mips/include/asm/vr41xx/giu.h
index 6a90bc1d916b..ecda4cf300de 100644
--- a/arch/mips/include/asm/vr41xx/giu.h
+++ b/arch/mips/include/asm/vr41xx/giu.h
@@ -51,12 +51,4 @@ typedef enum {
extern void vr41xx_set_irq_level(unsigned int pin, irq_level_t level);
-typedef enum {
- GPIO_PULL_DOWN,
- GPIO_PULL_UP,
- GPIO_PULL_DISABLE,
-} gpio_pull_t;
-
-extern int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull);
-
#endif /* __NEC_VR41XX_GIU_H */
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index 262504bd59a5..c34c7eef0a1c 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -14,17 +14,6 @@
#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2*sizeof(int))
#undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */
-/*
- * Careful to keep union _sifields from shifting ...
- */
-#if _MIPS_SZLONG == 32
-#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
-#elif _MIPS_SZLONG == 64
-#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-#else
-#error _MIPS_SZLONG neither 32 nor 64
-#endif
-
#define __ARCH_HAS_SWAPPED_SIGINFO
#include <asm-generic/siginfo.h>
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index d31bc2f01208..0a0aaf39fd16 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -564,13 +564,13 @@ static void *jazz_dma_alloc(struct device *dev, size_t size,
{
void *ret;
- ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+ ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
if (!ret)
return NULL;
*dma_handle = vdma_alloc(virt_to_phys(ret), size);
if (*dma_handle == VDMA_ERROR) {
- dma_direct_free(dev, size, ret, *dma_handle, attrs);
+ dma_direct_free_pages(dev, size, ret, *dma_handle, attrs);
return NULL;
}
@@ -587,7 +587,7 @@ static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
vdma_free(dma_handle);
if (!(attrs & DMA_ATTR_NON_CONSISTENT))
vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
- return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+ dma_direct_free_pages(dev, size, vaddr, dma_handle, attrs);
}
static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
@@ -682,7 +682,6 @@ static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
const struct dma_map_ops jazz_dma_ops = {
.alloc = jazz_dma_alloc,
.free = jazz_dma_free,
- .mmap = arch_dma_mmap,
.map_page = jazz_dma_map_page,
.unmap_page = jazz_dma_unmap_page,
.map_sg = jazz_dma_map_sg,
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 89b234844534..7a12763d553a 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -54,10 +54,10 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
- struct compat_timeval pr_cutime;/* Cumulative user time */
- struct compat_timeval pr_cstime;/* Cumulative system time */
+ struct old_timeval32 pr_utime; /* User time */
+ struct old_timeval32 pr_stime; /* System time */
+ struct old_timeval32 pr_cutime;/* Cumulative user time */
+ struct old_timeval32 pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
@@ -81,9 +81,9 @@ struct elf_prpsinfo32
#define elf_caddr_t u32
#define init_elf_binfmt init_elfn32_binfmt
-#define jiffies_to_timeval jiffies_to_compat_timeval
+#define jiffies_to_timeval jiffies_to_old_timeval32
static __inline__ void
-jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
+jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
{
/*
* Convert jiffies to nanoseconds and separate with
@@ -101,6 +101,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
#define TASK_SIZE TASK_SIZE32
#undef ns_to_timeval
-#define ns_to_timeval ns_to_compat_timeval
+#define ns_to_timeval ns_to_old_timeval32
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index a88c59db3d48..e6db06a1d31a 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -59,10 +59,10 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
- struct compat_timeval pr_cutime;/* Cumulative user time */
- struct compat_timeval pr_cstime;/* Cumulative system time */
+ struct old_timeval32 pr_utime; /* User time */
+ struct old_timeval32 pr_stime; /* System time */
+ struct old_timeval32 pr_cutime;/* Cumulative user time */
+ struct old_timeval32 pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
@@ -86,9 +86,9 @@ struct elf_prpsinfo32
#define elf_caddr_t u32
#define init_elf_binfmt init_elf32_binfmt
-#define jiffies_to_timeval jiffies_to_compat_timeval
+#define jiffies_to_timeval jiffies_to_old_timeval32
static inline void
-jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
+jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
{
/*
* Convert jiffies to nanoseconds and separate with
@@ -104,6 +104,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
#define TASK_SIZE TASK_SIZE32
#undef ns_to_timeval
-#define ns_to_timeval ns_to_compat_timeval
+#define ns_to_timeval ns_to_old_timeval32
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 8fc69891e117..d4f7fd4550e1 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -32,6 +32,7 @@
#include <linux/nmi.h>
#include <linux/cpu.h>
+#include <asm/abi.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
@@ -39,6 +40,7 @@
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/irq.h>
+#include <asm/mips-cps.h>
#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
@@ -645,6 +647,29 @@ out:
return pc;
}
+unsigned long mips_stack_top(void)
+{
+ unsigned long top = TASK_SIZE & PAGE_MASK;
+
+ /* One page for branch delay slot "emulation" */
+ top -= PAGE_SIZE;
+
+ /* Space for the VDSO, data page & GIC user page */
+ top -= PAGE_ALIGN(current->thread.abi->vdso->size);
+ top -= PAGE_SIZE;
+ top -= mips_gic_present() ? PAGE_SIZE : 0;
+
+ /* Space for cache colour alignment */
+ if (cpu_has_dc_aliases)
+ top -= shm_align_mask + 1;
+
+ /* Space to randomize the VDSO base */
+ if (current->flags & PF_RANDOMIZE)
+ top -= VDSO_RANDOMIZE_SIZE;
+
+ return top;
+}
+
/*
* Don't forget that the stack pointer must be aligned on a 8 bytes
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c71d1eb7da59..e64b9e8bb002 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -846,6 +846,34 @@ static void __init arch_mem_init(char **cmdline_p)
struct memblock_region *reg;
extern void plat_mem_setup(void);
+ /*
+ * Initialize boot_command_line to an innocuous but non-empty string in
+ * order to prevent early_init_dt_scan_chosen() from copying
+ * CONFIG_CMDLINE into it without our knowledge. We handle
+ * CONFIG_CMDLINE ourselves below & don't want to duplicate its
+ * content because repeating arguments can be problematic.
+ */
+ strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
+
+ /* call board setup routine */
+ plat_mem_setup();
+
+ /*
+ * Make sure all kernel memory is in the maps. The "UP" and
+ * "DOWN" are opposite for initdata since if it crosses over
+ * into another memory section you don't want that to be
+ * freed when the initdata is freed.
+ */
+ arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
+ PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
+ BOOT_MEM_RAM);
+ arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
+ PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
+ BOOT_MEM_INIT_RAM);
+
+ pr_info("Determined physical RAM map:\n");
+ print_memory_map();
+
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
@@ -873,26 +901,6 @@ static void __init arch_mem_init(char **cmdline_p)
}
#endif
#endif
-
- /* call board setup routine */
- plat_mem_setup();
-
- /*
- * Make sure all kernel memory is in the maps. The "UP" and
- * "DOWN" are opposite for initdata since if it crosses over
- * into another memory section you don't want that to be
- * freed when the initdata is freed.
- */
- arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
- PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
- BOOT_MEM_RAM);
- arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
- PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
- BOOT_MEM_INIT_RAM);
-
- pr_info("Determined physical RAM map:\n");
- print_memory_map();
-
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
@@ -1067,7 +1075,7 @@ static int __init debugfs_mips(void)
arch_initcall(debugfs_mips);
#endif
-#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
+#ifdef CONFIG_DMA_MAYBE_COHERENT
/* User defined DMA coherency from command line. */
enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
EXPORT_SYMBOL_GPL(coherentio);
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 8f845f6e5f42..48a9c6b90e07 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -15,6 +15,7 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timekeeper_internal.h>
@@ -97,6 +98,21 @@ void update_vsyscall_tz(void)
}
}
+static unsigned long vdso_base(void)
+{
+ unsigned long base;
+
+ /* Skip the delay slot emulation page */
+ base = STACK_TOP + PAGE_SIZE;
+
+ if (current->flags & PF_RANDOMIZE) {
+ base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
+ base = PAGE_ALIGN(base);
+ }
+
+ return base;
+}
+
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mips_vdso_image *image = current->thread.abi->vdso;
@@ -137,7 +153,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (cpu_has_dc_aliases)
size += shm_align_mask + 1;
- base = get_unmapped_area(NULL, 0, size, 0, 0);
+ base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 664f2f7f55c1..982859f2b2a3 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -106,7 +106,6 @@ ltq_dma_open(struct ltq_dma_channel *ch)
spin_lock_irqsave(&ltq_dma_lock, flag);
ltq_dma_w32(ch->nr, LTQ_DMA_CS);
ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
- ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
spin_unlock_irqrestore(&ltq_dma_lock, flag);
}
EXPORT_SYMBOL_GPL(ltq_dma_open);
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index e0af39b33e28..fe25c99089b7 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -505,7 +505,7 @@ void __init ltq_soc_init(void)
clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI);
clkdev_add_pmu("1a800000.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI);
clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL);
- clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP);
+ clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP);
clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
} else if (of_machine_is_compatible("lantiq,ar10")) {
@@ -513,11 +513,11 @@ void __init ltq_soc_init(void)
ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz());
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
- clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH |
+ clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH |
PMU_PPE_DP | PMU_PPE_TC);
clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
- clkdev_add_pmu("1f203020.gphy", NULL, 1, 0, PMU_GPHY);
- clkdev_add_pmu("1f203068.gphy", NULL, 1, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
@@ -536,12 +536,12 @@ void __init ltq_soc_init(void)
clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS);
clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
- clkdev_add_pmu("1e108000.eth", NULL, 0, 0,
+ clkdev_add_pmu("1e10b308.eth", NULL, 0, 0,
PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
PMU_PPE_QSB | PMU_PPE_TOP);
- clkdev_add_pmu("1f203020.gphy", NULL, 0, 0, PMU_GPHY);
- clkdev_add_pmu("1f203068.gphy", NULL, 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 3a6f34ef5ffc..069acec3df9f 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -280,9 +280,11 @@
* unset_bytes = end_addr - current_addr + 1
* a2 = t1 - a0 + 1
*/
+ .set reorder
PTR_SUBU a2, t1, a0
+ PTR_ADDIU a2, 1
jr ra
- PTR_ADDIU a2, 1
+ .set noreorder
.endm
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index a9ef057c79fe..05bd77727fb9 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1955,22 +1955,21 @@ void r4k_cache_init(void)
__flush_icache_user_range = r4k_flush_icache_user_range;
__local_flush_icache_user_range = local_r4k_flush_icache_user_range;
-#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
-# if defined(CONFIG_DMA_PERDEV_COHERENT)
- if (0) {
-# else
- if ((coherentio == IO_COHERENCE_ENABLED) ||
- ((coherentio == IO_COHERENCE_DEFAULT) && hw_coherentio)) {
-# endif
+#ifdef CONFIG_DMA_NONCOHERENT
+#ifdef CONFIG_DMA_MAYBE_COHERENT
+ if (coherentio == IO_COHERENCE_ENABLED ||
+ (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {
_dma_cache_wback_inv = (void *)cache_noop;
_dma_cache_wback = (void *)cache_noop;
_dma_cache_inv = (void *)cache_noop;
- } else {
+ } else
+#endif /* CONFIG_DMA_MAYBE_COHERENT */
+ {
_dma_cache_wback_inv = r4k_dma_cache_wback_inv;
_dma_cache_wback = r4k_dma_cache_wback_inv;
_dma_cache_inv = r4k_dma_cache_inv;
}
-#endif
+#endif /* CONFIG_DMA_NONCOHERENT */
build_clear_page();
build_copy_page();
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 2aca1236af36..e6c9485cadcf 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -14,26 +14,6 @@
#include <asm/dma-coherence.h>
#include <asm/io.h>
-#ifdef CONFIG_DMA_PERDEV_COHERENT
-static inline int dev_is_coherent(struct device *dev)
-{
- return dev->archdata.dma_coherent;
-}
-#else
-static inline int dev_is_coherent(struct device *dev)
-{
- switch (coherentio) {
- default:
- case IO_COHERENCE_DEFAULT:
- return hw_coherentio;
- case IO_COHERENCE_ENABLED:
- return 1;
- case IO_COHERENCE_DISABLED:
- return 0;
- }
-}
-#endif /* CONFIG_DMA_PERDEV_COHERENT */
-
/*
* The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
* fill random cachelines with stale data at any time, requiring an extra
@@ -49,9 +29,6 @@ static inline int dev_is_coherent(struct device *dev)
*/
static inline bool cpu_needs_post_dma_flush(struct device *dev)
{
- if (dev_is_coherent(dev))
- return false;
-
switch (boot_cpu_type()) {
case CPU_R10000:
case CPU_R12000:
@@ -72,11 +49,8 @@ void *arch_dma_alloc(struct device *dev, size_t size,
{
void *ret;
- ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
- if (!ret)
- return NULL;
-
- if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
+ ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
+ if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
dma_cache_wback_inv((unsigned long) ret, size);
ret = (void *)UNCAC_ADDR(ret);
}
@@ -87,43 +61,24 @@ void *arch_dma_alloc(struct device *dev, size_t size,
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
- if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev))
+ if (!(attrs & DMA_ATTR_NON_CONSISTENT))
cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
- dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
+ dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
}
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr)
{
- unsigned long user_count = vma_pages(vma);
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long addr = (unsigned long)cpu_addr;
- unsigned long off = vma->vm_pgoff;
- unsigned long pfn;
- int ret = -ENXIO;
-
- if (!dev_is_coherent(dev))
- addr = CAC_ADDR(addr);
-
- pfn = page_to_pfn(virt_to_page((void *)addr));
+ unsigned long addr = CAC_ADDR((unsigned long)cpu_addr);
+ return page_to_pfn(virt_to_page((void *)addr));
+}
+pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
+ unsigned long attrs)
+{
if (attrs & DMA_ATTR_WRITE_COMBINE)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
- if (off < count && user_count <= (count - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- pfn + off,
- user_count << PAGE_SHIFT,
- vma->vm_page_prot);
- }
-
- return ret;
+ return pgprot_writecombine(prot);
+ return pgprot_noncached(prot);
}
static inline void dma_sync_virt(void *addr, size_t size,
@@ -187,8 +142,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
- if (!dev_is_coherent(dev))
- dma_sync_phys(paddr, size, dir);
+ dma_sync_phys(paddr, size, dir);
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
@@ -203,6 +157,5 @@ void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
BUG_ON(direction == DMA_NONE);
- if (!dev_is_coherent(dev))
- dma_sync_virt(vaddr, size, direction);
+ dma_sync_virt(vaddr, size, direction);
}
diff --git a/arch/mips/netlogic/xlr/platform-flash.c b/arch/mips/netlogic/xlr/platform-flash.c
index 4d1b4c003376..cf9162284b07 100644
--- a/arch/mips/netlogic/xlr/platform-flash.c
+++ b/arch/mips/netlogic/xlr/platform-flash.c
@@ -19,8 +19,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/physmap.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/xlr/iomap.h>
@@ -92,8 +91,8 @@ struct xlr_nand_flash_priv {
static struct xlr_nand_flash_priv nand_priv;
-static void xlr_nand_ctrl(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
+static void xlr_nand_ctrl(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
{
if (ctrl & NAND_CLE)
nlm_write_reg(nand_priv.flash_mmio,
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index a7a4e9f5146d..dafbf027fad0 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -30,8 +30,7 @@
#include <linux/resource.h>
#include <linux/serial.h>
#include <linux/serial_pnx8xxx.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
#include <irq.h>
#include <irq-mapping.h>
@@ -178,10 +177,9 @@ static struct platform_device pnx833x_sata_device = {
};
static void
-pnx833x_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+pnx833x_flash_nand_cmd_ctrl(struct nand_chip *this, int cmd, unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- unsigned long nandaddr = (unsigned long)this->IO_ADDR_W;
+ unsigned long nandaddr = (unsigned long)this->legacy.IO_ADDR_W;
if (cmd == NAND_CMD_NONE)
return;
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 354d258396ff..2b23ad640f39 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -20,9 +20,8 @@
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/platform_device.h>
-#include <linux/mtd/rawnand.h>
+#include <linux/mtd/platnand.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
@@ -141,14 +140,13 @@ static struct platform_device cf_slot0 = {
};
/* Resources and device for NAND */
-static int rb532_dev_ready(struct mtd_info *mtd)
+static int rb532_dev_ready(struct nand_chip *chip)
{
return gpio_get_value(GPIO_RDY);
}
-static void rb532_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+static void rb532_cmd_ctrl(struct nand_chip *chip, int cmd, unsigned int ctrl)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
unsigned char orbits, nandbits;
if (ctrl & NAND_CTRL_CHANGE) {
@@ -161,7 +159,7 @@ static void rb532_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
set_latch_u5(orbits, nandbits);
}
if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
+ writeb(cmd, chip->legacy.IO_ADDR_W);
}
static struct resource nand_slot0_res[] = {
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 7068f341133d..56992330026a 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -11,7 +11,7 @@ config NDS32
select CLKSRC_MMIO
select CLONE_BACKWARDS
select COMMON_CLK
- select DMA_NONCOHERENT_OPS
+ select DMA_DIRECT_OPS
select GENERIC_ATOMIC64
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h
index 6e95901cabe3..603e826e0449 100644
--- a/arch/nds32/include/uapi/asm/unistd.h
+++ b/arch/nds32/include/uapi/asm/unistd.h
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
+#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYNC_FILE_RANGE2
/* Use the standard ABI for syscalls */
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index f4ad1138e6b9..03965692fbfe 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -4,7 +4,7 @@ config NIOS2
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_SWAP
- select DMA_NONCOHERENT_OPS
+ select DMA_DIRECT_OPS
select TIMER_OF
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
diff --git a/arch/nios2/include/uapi/asm/unistd.h b/arch/nios2/include/uapi/asm/unistd.h
index b6bdae04bc84..d9948d88790b 100644
--- a/arch/nios2/include/uapi/asm/unistd.h
+++ b/arch/nios2/include/uapi/asm/unistd.h
@@ -19,6 +19,7 @@
#define sys_mmap2 sys_mmap_pgoff
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
/* Use the standard ABI for syscalls */
#include <asm-generic/unistd.h>
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index e0081e734827..a655ae280637 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -7,7 +7,7 @@
config OPENRISC
def_bool y
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select DMA_NONCOHERENT_OPS
+ select DMA_DIRECT_OPS
select OF
select OF_EARLY_FLATTREE
select IRQ_DOMAIN
diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h
index 11c5a58ab333..ec37df18d8ed 100644
--- a/arch/openrisc/include/uapi/asm/unistd.h
+++ b/arch/openrisc/include/uapi/asm/unistd.h
@@ -20,6 +20,7 @@
#define sys_mmap2 sys_mmap_pgoff
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 8e6d83f79e72..f1cd12afd943 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -186,7 +186,7 @@ config PA11
depends on PA7000 || PA7100LC || PA7200 || PA7300LC
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select DMA_NONCOHERENT_OPS
+ select DMA_DIRECT_OPS
select DMA_NONCOHERENT_CACHE_SYNC
config PREFETCH
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 5ce030266e7d..d047a09d660f 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -156,12 +156,3 @@ define archhelp
@echo ' copy to $$(INSTALL_PATH)'
@echo ' zinstall - Install compressed vmlinuz kernel'
endef
-
-# we require gcc 3.3 or above to compile the kernel
-archprepare: checkbin
-checkbin:
- @if test "$(cc-version)" -lt "0303"; then \
- echo -n "Sorry, GCC v3.3 or above is required to build " ; \
- echo "the kernel." ; \
- false ; \
- fi
diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
index 7d7e594bda36..777533cdea31 100644
--- a/arch/parisc/boot/compressed/Makefile
+++ b/arch/parisc/boot/compressed/Makefile
@@ -14,7 +14,7 @@ targets += misc.o piggy.o sizes.h head.o real2.o firmware.o
KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
+KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -fno-builtin-printf
KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os
ifndef CONFIG_64BIT
KBUILD_CFLAGS += -mfast-indirect-calls
@@ -22,7 +22,6 @@ endif
OBJECTS += $(obj)/head.o $(obj)/real2.o $(obj)/firmware.o $(obj)/misc.o $(obj)/piggy.o
-# LDFLAGS_vmlinux := -X --whole-archive -e startup -T
LDFLAGS_vmlinux := -X -e startup --as-needed -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(LIBGCC)
$(call if_changed,ld)
@@ -55,7 +54,6 @@ $(obj)/misc.o: $(obj)/sizes.h
CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
$(obj)/vmlinux.lds: $(obj)/sizes.h
-OBJCOPYFLAGS_vmlinux.bin := -O binary -R .comment -S
$(obj)/vmlinux.bin: vmlinux
$(call if_changed,objcopy)
diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c
index f57118e1f6b4..2556bb181813 100644
--- a/arch/parisc/boot/compressed/misc.c
+++ b/arch/parisc/boot/compressed/misc.c
@@ -5,6 +5,7 @@
*/
#include <linux/uaccess.h>
+#include <linux/elf.h>
#include <asm/unaligned.h>
#include <asm/page.h>
#include "sizes.h"
@@ -227,13 +228,62 @@ static void flush_data_cache(char *start, unsigned long length)
asm ("sync");
}
+static void parse_elf(void *output)
+{
+#ifdef CONFIG_64BIT
+ Elf64_Ehdr ehdr;
+ Elf64_Phdr *phdrs, *phdr;
+#else
+ Elf32_Ehdr ehdr;
+ Elf32_Phdr *phdrs, *phdr;
+#endif
+ void *dest;
+ int i;
+
+ memcpy(&ehdr, output, sizeof(ehdr));
+ if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
+ ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
+ ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
+ ehdr.e_ident[EI_MAG3] != ELFMAG3) {
+ error("Kernel is not a valid ELF file");
+ return;
+ }
+
+#ifdef DEBUG
+ printf("Parsing ELF... ");
+#endif
+
+ phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum);
+ if (!phdrs)
+ error("Failed to allocate space for phdrs");
+
+ memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum);
+
+ for (i = 0; i < ehdr.e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ switch (phdr->p_type) {
+ case PT_LOAD:
+ dest = (void *)((unsigned long) phdr->p_paddr &
+ (__PAGE_OFFSET_DEFAULT-1));
+ memmove(dest, output + phdr->p_offset, phdr->p_filesz);
+ break;
+ default:
+ break;
+ }
+ }
+
+ free(phdrs);
+}
+
unsigned long decompress_kernel(unsigned int started_wide,
unsigned int command_line,
const unsigned int rd_start,
const unsigned int rd_end)
{
char *output;
- unsigned long len, len_all;
+ unsigned long vmlinux_addr, vmlinux_len;
+ unsigned long kernel_addr, kernel_len;
#ifdef CONFIG_64BIT
parisc_narrow_firmware = 0;
@@ -241,27 +291,29 @@ unsigned long decompress_kernel(unsigned int started_wide,
set_firmware_width_unlocked();
- putchar('U'); /* if you get this p and no more, string storage */
+ putchar('D'); /* if you get this D and no more, string storage */
/* in $GLOBAL$ is wrong or %dp is wrong */
- puts("ncompressing ...\n");
-
- output = (char *) KERNEL_BINARY_TEXT_START;
- len_all = __pa(SZ_end) - __pa(SZparisc_kernel_start);
+ puts("ecompressing Linux... ");
- if ((unsigned long) &_startcode_end > (unsigned long) output)
+ /* where the final bits are stored */
+ kernel_addr = KERNEL_BINARY_TEXT_START;
+ kernel_len = __pa(SZ_end) - __pa(SZparisc_kernel_start);
+ if ((unsigned long) &_startcode_end > kernel_addr)
error("Bootcode overlaps kernel code");
- len = get_unaligned_le32(&output_len);
- if (len > len_all)
- error("Output len too big.");
- else
- memset(&output[len], 0, len_all - len);
+ /*
+ * Calculate addr to where the vmlinux ELF file shall be decompressed.
+ * Assembly code in head.S positioned the stack directly behind bss, so
+ * leave 2 MB for the stack.
+ */
+ vmlinux_addr = (unsigned long) &_ebss + 2*1024*1024;
+ vmlinux_len = get_unaligned_le32(&output_len);
+ output = (char *) vmlinux_addr;
/*
* Initialize free_mem_ptr and free_mem_end_ptr.
*/
- free_mem_ptr = (unsigned long) &_ebss;
- free_mem_ptr += 2*1024*1024; /* leave 2 MB for stack */
+ free_mem_ptr = vmlinux_addr + vmlinux_len;
/* Limit memory for bootoader to 1GB */
#define ARTIFICIAL_LIMIT (1*1024*1024*1024)
@@ -275,7 +327,11 @@ unsigned long decompress_kernel(unsigned int started_wide,
free_mem_end_ptr = rd_start;
#endif
+ if (free_mem_ptr >= free_mem_end_ptr)
+ error("Kernel too big for machine.");
+
#ifdef DEBUG
+ printf("\n");
printf("startcode_end = %x\n", &_startcode_end);
printf("commandline = %x\n", command_line);
printf("rd_start = %x\n", rd_start);
@@ -287,16 +343,19 @@ unsigned long decompress_kernel(unsigned int started_wide,
printf("input_data = %x\n", input_data);
printf("input_len = %x\n", input_len);
printf("output = %x\n", output);
- printf("output_len = %x\n", len);
- printf("output_max = %x\n", len_all);
+ printf("output_len = %x\n", vmlinux_len);
+ printf("kernel_addr = %x\n", kernel_addr);
+ printf("kernel_len = %x\n", kernel_len);
#endif
__decompress(input_data, input_len, NULL, NULL,
output, 0, NULL, error);
+ parse_elf(output);
- flush_data_cache(output, len);
+ output = (char *) kernel_addr;
+ flush_data_cache(output, kernel_len);
- printf("Booting kernel ...\n\n");
+ printf("done.\nBooting the kernel.\n");
return (unsigned long) output;
}
diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S
index 4ebd4e65524c..bfd7872739a3 100644
--- a/arch/parisc/boot/compressed/vmlinux.lds.S
+++ b/arch/parisc/boot/compressed/vmlinux.lds.S
@@ -42,6 +42,12 @@ SECTIONS
#endif
_startcode_end = .;
+ /* vmlinux.bin.gz is here */
+ . = ALIGN(8);
+ .rodata.compressed : {
+ *(.rodata.compressed)
+ }
+
/* bootloader code and data starts behind area of extracted kernel */
. = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START);
@@ -68,10 +74,6 @@ SECTIONS
_erodata = . ;
}
. = ALIGN(8);
- .rodata.compressed : {
- *(.rodata.compressed)
- }
- . = ALIGN(8);
.bss : {
_bss = . ;
*(.bss)
diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h
new file mode 100644
index 000000000000..bf485a94d0b4
--- /dev/null
+++ b/arch/parisc/include/asm/alternative.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_PARISC_ALTERNATIVE_H
+#define __ASM_PARISC_ALTERNATIVE_H
+
+#define ALT_COND_NO_SMP 0x01 /* when running UP instead of SMP */
+#define ALT_COND_NO_DCACHE 0x02 /* if system has no d-cache */
+#define ALT_COND_NO_ICACHE 0x04 /* if system has no i-cache */
+#define ALT_COND_NO_SPLIT_TLB 0x08 /* if split_tlb == 0 */
+#define ALT_COND_NO_IOC_FDC 0x10 /* if I/O cache does not need flushes */
+
+#define INSN_PxTLB 0x02 /* modify pdtlb, pitlb */
+#define INSN_NOP 0x08000240 /* nop */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+struct alt_instr {
+ s32 orig_offset; /* offset to original instructions */
+ u32 len; /* end of original instructions */
+ u32 cond; /* see ALT_COND_XXX */
+ u32 replacement; /* replacement instruction or code */
+};
+
+void set_kernel_text_rw(int enable_read_write);
+
+/* Alternative SMP implementation. */
+#define ALTERNATIVE(cond, replacement) "!0:" \
+ ".section .altinstructions, \"aw\" !" \
+ ".word (0b-4-.), 1, " __stringify(cond) "," \
+ __stringify(replacement) " !" \
+ ".previous"
+
+#else
+
+#define ALTERNATIVE(from, to, cond, replacement)\
+ .section .altinstructions, "aw" ! \
+ .word (from - .), (to - from)/4 ! \
+ .word cond, replacement ! \
+ .previous
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_PARISC_ALTERNATIVE_H */
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index e9c6385ef0d1..c17ec0ee6e7c 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -129,15 +129,8 @@
.macro debug value
.endm
-
- /* Shift Left - note the r and t can NOT be the same! */
- .macro shl r, sa, t
- dep,z \r, 31-(\sa), 32-(\sa), \t
- .endm
-
- /* The PA 2.0 shift left */
.macro shlw r, sa, t
- depw,z \r, 31-(\sa), 32-(\sa), \t
+ zdep \r, 31-(\sa), 32-(\sa), \t
.endm
/* And the PA 2.0W shift left */
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index 150b7f30ea90..006fb939cac8 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -6,6 +6,7 @@
#ifndef __ARCH_PARISC_CACHE_H
#define __ARCH_PARISC_CACHE_H
+#include <asm/alternative.h>
/*
* PA 2.0 processors have 64 and 128-byte L2 cachelines; PA 1.1 processors
@@ -41,9 +42,24 @@ extern int icache_stride;
extern struct pdc_cache_info cache_info;
void parisc_setup_cache_timing(void);
-#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr));
-#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr));
-#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" : : "r" (addr));
+#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \
+ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
+ : : "r" (addr))
+#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \
+ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
+ ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \
+ : : "r" (addr))
+#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \
+ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
+ : : "r" (addr))
+
+#define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \
+ ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
+ ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \
+ : : "r" (addr))
+#define asm_io_sync() asm volatile("sync" \
+ ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
+ ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :: )
#endif /* ! __ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index ab8a54771507..e03e3c849f40 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -8,36 +8,22 @@
#include <linux/sched.h>
#include <linux/thread_info.h>
+#include <asm-generic/compat.h>
+
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "parisc\0\0"
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
typedef u32 __compat_uid_t;
typedef u32 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u32 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
-typedef s32 compat_key_t;
-typedef s32 compat_timer_t;
-
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 compat_s64;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
typedef u64 compat_u64;
-typedef u32 compat_uptr_t;
struct compat_stat {
compat_dev_t st_dev; /* dev_t is 32 bits on parisc */
@@ -48,11 +34,11 @@ struct compat_stat {
u16 st_reserved2; /* old st_gid */
compat_dev_t st_rdev;
compat_off_t st_size;
- compat_time_t st_atime;
+ old_time32_t st_atime;
u32 st_atime_nsec;
- compat_time_t st_mtime;
+ old_time32_t st_mtime;
u32 st_mtime_nsec;
- compat_time_t st_ctime;
+ old_time32_t st_ctime;
u32 st_ctime_nsec;
s32 st_blksize;
s32 st_blocks;
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index af00fe9bf846..b77f49ce6220 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -117,14 +117,16 @@ extern int npmem_ranges;
/* This governs the relationship between virtual and physical addresses.
* If you alter it, make sure to take care of our various fixed mapping
* segments in fixmap.h */
-#if defined(BOOTLOADER)
-#define __PAGE_OFFSET (0) /* bootloader uses physical addresses */
-#else
#ifdef CONFIG_64BIT
-#define __PAGE_OFFSET (0x40000000) /* 1GB */
+#define __PAGE_OFFSET_DEFAULT (0x40000000) /* 1GB */
#else
-#define __PAGE_OFFSET (0x10000000) /* 256MB */
+#define __PAGE_OFFSET_DEFAULT (0x10000000) /* 256MB */
#endif
+
+#if defined(BOOTLOADER)
+#define __PAGE_OFFSET (0) /* bootloader uses physical addresses */
+#else
+#define __PAGE_OFFSET __PAGE_OFFSET_DEFAULT
#endif /* BOOTLOADER */
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
index 339e83ddb39e..5b187d40d604 100644
--- a/arch/parisc/include/asm/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -11,6 +11,7 @@ extern int parisc_narrow_firmware;
extern int pdc_type;
extern unsigned long parisc_cell_num; /* cell number the CPU runs on (PAT) */
extern unsigned long parisc_cell_loc; /* cell location of CPU (PAT) */
+extern unsigned long parisc_pat_pdc_cap; /* PDC capabilities (PAT) */
/* Values for pdc_type */
#define PDC_TYPE_ILLEGAL -1
diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h
index a468a172ee33..bce9ee1c1c99 100644
--- a/arch/parisc/include/asm/pdcpat.h
+++ b/arch/parisc/include/asm/pdcpat.h
@@ -173,6 +173,16 @@
/* PDC PAT PD */
#define PDC_PAT_PD 74L /* Protection Domain Info */
#define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */
+#define PDC_PAT_PD_GET_PDC_INTERF_REV 1L /* Get PDC Interface Revisions */
+
+#define PDC_PAT_CAPABILITY_BIT_PDC_SERIALIZE (1UL << 0)
+#define PDC_PAT_CAPABILITY_BIT_PDC_POLLING (1UL << 1)
+#define PDC_PAT_CAPABILITY_BIT_PDC_NBC (1UL << 2) /* non-blocking calls */
+#define PDC_PAT_CAPABILITY_BIT_PDC_UFO (1UL << 3)
+#define PDC_PAT_CAPABILITY_BIT_PDC_IODC_32 (1UL << 4)
+#define PDC_PAT_CAPABILITY_BIT_PDC_IODC_64 (1UL << 5)
+#define PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ (1UL << 6)
+#define PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB (1UL << 7)
/* PDC_PAT_PD_GET_ADDR_MAP entry types */
#define PAT_MEMORY_DESCRIPTOR 1
@@ -186,6 +196,14 @@
#define PAT_MEMUSE_GI 128
#define PAT_MEMUSE_GNI 129
+/* PDC PAT REGISTER TOC */
+#define PDC_PAT_REGISTER_TOC 75L
+#define PDC_PAT_TOC_REGISTER_VECTOR 0L /* Register TOC Vector */
+#define PDC_PAT_TOC_READ_VECTOR 1L /* Read TOC Vector */
+
+/* PDC PAT SYSTEM_INFO */
+#define PDC_PAT_SYSTEM_INFO 76L
+/* PDC_PAT_SYSTEM_INFO uses the same options as PDC_SYSTEM_INFO function. */
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -297,18 +315,29 @@ struct pdc_pat_pd_addr_map_entry {
** PDC_PAT_CELL_GET_INFO return block
*/
typedef struct pdc_pat_cell_info_rtn_block {
- unsigned long cpu_info;
- unsigned long cell_info;
- unsigned long cell_location;
- unsigned long reo_location;
- unsigned long mem_size;
- unsigned long dimm_status;
unsigned long pdc_rev;
- unsigned long fabric_info0;
- unsigned long fabric_info1;
- unsigned long fabric_info2;
- unsigned long fabric_info3;
- unsigned long reserved[21];
+ unsigned long capabilities; /* see PDC_PAT_CAPABILITY_BIT_* */
+ unsigned long reserved0[2];
+ unsigned long cell_info; /* 0x20 */
+ unsigned long cell_phys_location;
+ unsigned long cpu_info;
+ unsigned long cpu_speed;
+ unsigned long io_chassis_phys_location;
+ unsigned long cell_io_information;
+ unsigned long reserved1[2];
+ unsigned long io_slot_info_size; /* 0x60 */
+ struct {
+ unsigned long header, info0, info1;
+ unsigned long phys_loc, hw_path;
+ } io_slot[16];
+ unsigned long cell_mem_size; /* 0x2e8 */
+ unsigned long cell_dimm_info_size;
+ unsigned long dimm_info[16];
+ unsigned long fabric_info_size; /* 0x3f8 */
+ struct { /* 0x380 */
+ unsigned long fabric_info_xbc_port;
+ unsigned long rc_attached_to_xbc;
+ } xbc[8*4];
} pdc_pat_cell_info_rtn_block_t;
@@ -326,12 +355,19 @@ typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
-extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr);
+extern int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info,
+ unsigned long *actcnt, unsigned long offset,
+ unsigned long cell_number);
+extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc,
+ unsigned long mod, unsigned long view_type, void *mem_addr);
extern int pdc_pat_cell_num_to_loc(void *, unsigned long);
extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa);
-extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset);
+extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
+ unsigned long count, unsigned long offset);
+extern int pdc_pat_pd_get_pdc_revisions(unsigned long *legacy_rev,
+ unsigned long *pat_rev, unsigned long *pdc_cap);
extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val);
extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val);
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index fa6b7c78f18a..b941ac7d4e70 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -43,8 +43,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
mtsp(mm->context, 1);
pdtlb(addr);
- if (unlikely(split_tlb))
- pitlb(addr);
+ pitlb(addr);
}
/* Certain architectures need to do special things when PTEs
@@ -56,19 +55,14 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
*(pteptr) = (pteval); \
} while(0)
-#define pte_inserted(x) \
- ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \
- == (_PAGE_PRESENT|_PAGE_ACCESSED))
-
#define set_pte_at(mm, addr, ptep, pteval) \
do { \
pte_t old_pte; \
unsigned long flags; \
spin_lock_irqsave(&pa_tlb_lock, flags); \
old_pte = *ptep; \
- if (pte_inserted(old_pte)) \
- purge_tlb_entries(mm, addr); \
set_pte(ptep, pteval); \
+ purge_tlb_entries(mm, addr); \
spin_unlock_irqrestore(&pa_tlb_lock, flags); \
} while (0)
@@ -202,7 +196,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
-#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC)
@@ -227,22 +221,22 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#ifndef __ASSEMBLY__
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
/* Others seem to make this executable, I don't know if that's correct
or not. The stack is mapped this way though so this is necessary
in the short term - dhd@linuxcare.com, 2000-08-08 */
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
-#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED)
-#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
+#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
+#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
#define PAGE_COPY PAGE_EXECREAD
-#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
+#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
-#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
+#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
/*
@@ -479,8 +473,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 0;
}
- purge_tlb_entries(vma->vm_mm, addr);
set_pte(ptep, pte_mkold(pte));
+ purge_tlb_entries(vma->vm_mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 1;
}
@@ -493,9 +487,8 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
spin_lock_irqsave(&pa_tlb_lock, flags);
old_pte = *ptep;
- if (pte_inserted(old_pte))
- purge_tlb_entries(mm, addr);
set_pte(ptep, __pte(0));
+ purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags);
return old_pte;
@@ -505,8 +498,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
{
unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags);
- purge_tlb_entries(mm, addr);
set_pte(ptep, pte_wrprotect(*ptep));
+ purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags);
}
diff --git a/arch/parisc/include/asm/sections.h b/arch/parisc/include/asm/sections.h
index 5a40b51df80c..bb52aea0cb21 100644
--- a/arch/parisc/include/asm/sections.h
+++ b/arch/parisc/include/asm/sections.h
@@ -5,6 +5,8 @@
/* nothing to see, move along */
#include <asm-generic/sections.h>
+extern char __alt_instructions[], __alt_instructions_end[];
+
#ifdef CONFIG_64BIT
#define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 8a63515f03bf..16aec9ba2580 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
- mb();
- *a = 1;
+ /* Release with ordered store. */
+ __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index 14668bd52d60..6804374efa66 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -85,8 +85,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
purge_tlb_start(flags);
mtsp(sid, 1);
pdtlb(addr);
- if (unlikely(split_tlb))
- pitlb(addr);
+ pitlb(addr);
purge_tlb_end(flags);
}
#endif
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 3d507d04eb4c..bc37a4953eaa 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -141,6 +141,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
return K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5); \
}
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
@@ -151,11 +152,11 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_COMPAT_SYS_TIME
#define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL
#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_WAITPID
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild
index 286ef5a5904b..adb5c64831c7 100644
--- a/arch/parisc/include/uapi/asm/Kbuild
+++ b/arch/parisc/include/uapi/asm/Kbuild
@@ -7,3 +7,4 @@ generic-y += kvm_para.h
generic-y += param.h
generic-y += poll.h
generic-y += resource.h
+generic-y += siginfo.h
diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h
deleted file mode 100644
index 4a1062e05aaf..000000000000
--- a/arch/parisc/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _PARISC_SIGINFO_H
-#define _PARISC_SIGINFO_H
-
-#if defined(__LP64__)
-#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-#endif
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index bddd2acebdcc..804880efa11e 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -36,6 +36,7 @@ EXPORT_SYMBOL(dcache_stride);
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
+void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
@@ -303,6 +304,17 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
preempt_enable();
}
+static inline void
+__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ unsigned long physaddr)
+{
+ preempt_disable();
+ purge_dcache_page_asm(physaddr, vmaddr);
+ if (vma->vm_flags & VM_EXEC)
+ flush_icache_page_asm(physaddr, vmaddr);
+ preempt_enable();
+}
+
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping_file(page);
@@ -364,7 +376,7 @@ EXPORT_SYMBOL(flush_kernel_icache_range_asm);
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
-#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
+#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
void __init parisc_setup_cache_timing(void)
@@ -404,10 +416,6 @@ void __init parisc_setup_cache_timing(void)
goto set_tlb_threshold;
}
- alltime = mfctl(16);
- flush_tlb_all();
- alltime = mfctl(16) - alltime;
-
size = 0;
start = (unsigned long) _text;
rangetime = mfctl(16);
@@ -418,13 +426,19 @@ void __init parisc_setup_cache_timing(void)
}
rangetime = mfctl(16) - rangetime;
- printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
+ alltime = mfctl(16);
+ flush_tlb_all();
+ alltime = mfctl(16) - alltime;
+
+ printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
alltime, size, rangetime);
- threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
+ threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
+ printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
+ threshold/1024);
set_tlb_threshold:
- if (threshold)
+ if (threshold > parisc_tlb_flush_threshold)
parisc_tlb_flush_threshold = threshold;
printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
parisc_tlb_flush_threshold/1024);
@@ -477,18 +491,6 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
/* Purge TLB entries for small ranges using the pdtlb and
pitlb instructions. These instructions execute locally
but cause a purge request to be broadcast to other TLBs. */
- if (likely(!split_tlb)) {
- while (start < end) {
- purge_tlb_start(flags);
- mtsp(sid, 1);
- pdtlb(start);
- purge_tlb_end(flags);
- start += PAGE_SIZE;
- }
- return 0;
- }
-
- /* split TLB case */
while (start < end) {
purge_tlb_start(flags);
mtsp(sid, 1);
@@ -573,9 +575,12 @@ void flush_cache_mm(struct mm_struct *mm)
pfn = pte_pfn(*ptep);
if (!pfn_valid(pfn))
continue;
- if (unlikely(mm->context))
+ if (unlikely(mm->context)) {
flush_tlb_page(vma, addr);
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ } else {
+ __purge_cache_page(vma, addr, PFN_PHYS(pfn));
+ }
}
}
}
@@ -610,9 +615,12 @@ void flush_cache_range(struct vm_area_struct *vma,
continue;
pfn = pte_pfn(*ptep);
if (pfn_valid(pfn)) {
- if (unlikely(vma->vm_mm->context))
+ if (unlikely(vma->vm_mm->context)) {
flush_tlb_page(vma, addr);
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ } else {
+ __purge_cache_page(vma, addr, PFN_PHYS(pfn));
+ }
}
}
}
@@ -621,9 +629,12 @@ void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
if (pfn_valid(pfn)) {
- if (likely(vma->vm_mm->context))
+ if (likely(vma->vm_mm->context)) {
flush_tlb_page(vma, vmaddr);
- __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ } else {
+ __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
}
}
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 242c5ab65611..1c60408a64ad 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -38,6 +38,7 @@
#include <asm/ldcw.h>
#include <asm/traps.h>
#include <asm/thread_info.h>
+#include <asm/alternative.h>
#include <linux/linkage.h>
@@ -186,7 +187,7 @@
bv,n 0(%r3)
nop
.word 0 /* checksum (will be patched) */
- .word PA(os_hpmc) /* address of handler */
+ .word 0 /* address of handler */
.word 0 /* length of handler */
.endm
@@ -426,13 +427,10 @@
ldw,s \index(\pmd),\pmd
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
- copy \pmd,%r9
- SHLREG %r9,PxD_VALUE_SHIFT,\pmd
+ SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
- LDREG %r0(\pmd),\pte
- bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
.endm
/* Look up PTE in a 3-Level scheme.
@@ -448,7 +446,6 @@
.macro L3_ptep pgd,pte,index,va,fault
#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
- copy %r0,\pte
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
ldw,s \index(\pgd),\pgd
extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
@@ -463,36 +460,39 @@
L2_ptep \pgd,\pte,\index,\va,\fault
.endm
- /* Acquire pa_tlb_lock lock and recheck page is still present. */
+ /* Acquire pa_tlb_lock lock and check page is present. */
.macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
#ifdef CONFIG_SMP
- cmpib,COND(=),n 0,\spc,2f
+98: cmpib,COND(=),n 0,\spc,2f
load_pa_tlb_lock \tmp
1: LDCW 0(\tmp),\tmp1
cmpib,COND(=) 0,\tmp1,1b
nop
LDREG 0(\ptp),\pte
- bb,<,n \pte,_PAGE_PRESENT_BIT,2f
+ bb,<,n \pte,_PAGE_PRESENT_BIT,3f
b \fault
- stw \spc,0(\tmp)
-2:
+ stw,ma \spc,0(\tmp)
+99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
+2: LDREG 0(\ptp),\pte
+ bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
+3:
.endm
/* Release pa_tlb_lock lock without reloading lock address. */
.macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP
- or,COND(=) %r0,\spc,%r0
- sync
- or,COND(=) %r0,\spc,%r0
- stw \spc,0(\tmp)
+98: or,COND(=) %r0,\spc,%r0
+ stw,ma \spc,0(\tmp)
+99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
/* Release pa_tlb_lock lock. */
.macro tlb_unlock1 spc,tmp
#ifdef CONFIG_SMP
- load_pa_tlb_lock \tmp
+98: load_pa_tlb_lock \tmp
+99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
tlb_unlock0 \spc,\tmp
#endif
.endm
@@ -1658,7 +1658,7 @@ dbit_fault:
itlb_fault:
b intr_save
- ldi 6,%r8
+ ldi PARISC_ITLB_TRAP,%r8
nadtlb_fault:
b intr_save
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 6d471c00c71a..e6f3b49f2fd7 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -1326,6 +1326,36 @@ int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long
}
/**
+ * pdc_pat_cell_info - Retrieve the cell's information.
+ * @info: The pointer to a struct pdc_pat_cell_info_rtn_block.
+ * @actcnt: The number of bytes which should be written to info.
+ * @offset: offset of the structure.
+ * @cell_number: The cell number which should be asked, or -1 for current cell.
+ *
+ * This PDC call returns information about the given cell (or all cells).
+ */
+int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info,
+ unsigned long *actcnt, unsigned long offset,
+ unsigned long cell_number)
+{
+ int retval;
+ unsigned long flags;
+ struct pdc_pat_cell_info_rtn_block result;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_GET_INFO,
+ __pa(pdc_result), __pa(&result), *actcnt,
+ offset, cell_number);
+ if (!retval) {
+ *actcnt = pdc_result[0];
+ memcpy(info, &result, *actcnt);
+ }
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
* pdc_pat_cpu_get_number - Retrieve the cpu number.
* @cpu_info: The return buffer.
* @hpa: The Hard Physical Address of the CPU.
@@ -1413,6 +1443,33 @@ int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
}
/**
+ * pdc_pat_pd_get_PDC_interface_revisions - Retrieve PDC interface revisions.
+ * @legacy_rev: The legacy revision.
+ * @pat_rev: The PAT revision.
+ * @pdc_cap: The PDC capabilities.
+ *
+ */
+int pdc_pat_pd_get_pdc_revisions(unsigned long *legacy_rev,
+ unsigned long *pat_rev, unsigned long *pdc_cap)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_PD, PDC_PAT_PD_GET_PDC_INTERF_REV,
+ __pa(pdc_result));
+ if (retval == PDC_OK) {
+ *legacy_rev = pdc_result[0];
+ *pat_rev = pdc_result[1];
+ *pdc_cap = pdc_result[2];
+ }
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+
+/**
* pdc_pat_io_pci_cfg_read - Read PCI configuration space.
* @pci_addr: PCI configuration space address for which the read request is being made.
* @pci_size: Size of read in bytes. Valid values are 1, 2, and 4.
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index 781c3b9a3e46..fde654115564 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -85,7 +85,7 @@ END(hpmc_pim_data)
.import intr_save, code
.align 16
-ENTRY_CFI(os_hpmc)
+ENTRY(os_hpmc)
.os_hpmc:
/*
@@ -302,7 +302,6 @@ os_hpmc_6:
b .
nop
.align 16 /* make function length multiple of 16 bytes */
-ENDPROC_CFI(os_hpmc)
.os_hpmc_end:
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index b0fe19ac4d78..35d05fdd7483 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -43,6 +43,7 @@ int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
/* cell number and location (PAT firmware only) */
unsigned long parisc_cell_num __read_mostly;
unsigned long parisc_cell_loc __read_mostly;
+unsigned long parisc_pat_pdc_cap __read_mostly;
void __init setup_pdc(void)
@@ -81,12 +82,21 @@ void __init setup_pdc(void)
#ifdef CONFIG_64BIT
status = pdc_pat_cell_get_number(&cell_info);
if (status == PDC_OK) {
+ unsigned long legacy_rev, pat_rev;
pdc_type = PDC_TYPE_PAT;
pr_cont("64 bit PAT.\n");
parisc_cell_num = cell_info.cell_num;
parisc_cell_loc = cell_info.cell_loc;
pr_info("PAT: Running on cell %lu and location %lu.\n",
parisc_cell_num, parisc_cell_loc);
+ status = pdc_pat_pd_get_pdc_revisions(&legacy_rev,
+ &pat_rev, &parisc_pat_pdc_cap);
+ pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n",
+ legacy_rev, pat_rev, parisc_pat_pdc_cap,
+ parisc_pat_pdc_cap
+ & PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB ? 1:0,
+ parisc_pat_pdc_cap
+ & PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ ? 1:0);
return;
}
#endif
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index f33bf2d306d6..187f032c9dd8 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -37,6 +37,7 @@
#include <asm/pgtable.h>
#include <asm/cache.h>
#include <asm/ldcw.h>
+#include <asm/alternative.h>
#include <linux/linkage.h>
#include <linux/init.h>
@@ -190,7 +191,7 @@ ENDPROC_CFI(flush_tlb_all_local)
.import cache_info,data
ENTRY_CFI(flush_instruction_cache_local)
- load32 cache_info, %r1
+88: load32 cache_info, %r1
/* Flush Instruction Cache */
@@ -243,6 +244,7 @@ fioneloop2:
fisync:
sync
mtsm %r22 /* restore I-bit */
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
bv %r0(%r2)
nop
ENDPROC_CFI(flush_instruction_cache_local)
@@ -250,7 +252,7 @@ ENDPROC_CFI(flush_instruction_cache_local)
.import cache_info, data
ENTRY_CFI(flush_data_cache_local)
- load32 cache_info, %r1
+88: load32 cache_info, %r1
/* Flush Data Cache */
@@ -304,6 +306,7 @@ fdsync:
syncdma
sync
mtsm %r22 /* restore I-bit */
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
bv %r0(%r2)
nop
ENDPROC_CFI(flush_data_cache_local)
@@ -312,6 +315,7 @@ ENDPROC_CFI(flush_data_cache_local)
.macro tlb_lock la,flags,tmp
#ifdef CONFIG_SMP
+98:
#if __PA_LDCW_ALIGNMENT > 4
load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
@@ -326,15 +330,17 @@ ENDPROC_CFI(flush_data_cache_local)
nop
b,n 2b
3:
+99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
.macro tlb_unlock la,flags,tmp
#ifdef CONFIG_SMP
- ldi 1,\tmp
+98: ldi 1,\tmp
sync
stw \tmp,0(\la)
mtsm \flags
+99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
@@ -596,9 +602,11 @@ ENTRY_CFI(copy_user_page_asm)
pdtlb,l %r0(%r29)
#else
tlb_lock %r20,%r21,%r22
- pdtlb %r0(%r28)
- pdtlb %r0(%r29)
+0: pdtlb %r0(%r28)
+1: pdtlb %r0(%r29)
tlb_unlock %r20,%r21,%r22
+ ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
+ ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
#ifdef CONFIG_64BIT
@@ -736,8 +744,9 @@ ENTRY_CFI(clear_user_page_asm)
pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
- pdtlb %r0(%r28)
+0: pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
+ ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
#ifdef CONFIG_64BIT
@@ -813,11 +822,12 @@ ENTRY_CFI(flush_dcache_page_asm)
pdtlb,l %r0(%r28)
#else
tlb_lock %r20,%r21,%r22
- pdtlb %r0(%r28)
+0: pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22
+ ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
- ldil L%dcache_stride, %r1
+88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), r31
#ifdef CONFIG_64BIT
@@ -828,8 +838,7 @@ ENTRY_CFI(flush_dcache_page_asm)
add %r28, %r25, %r25
sub %r25, r31, %r25
-
-1: fdc,m r31(%r28)
+1: fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
@@ -844,14 +853,76 @@ ENTRY_CFI(flush_dcache_page_asm)
fdc,m r31(%r28)
fdc,m r31(%r28)
fdc,m r31(%r28)
- cmpb,COND(<<) %r28, %r25,1b
+ cmpb,COND(>>) %r25, %r28, 1b /* predict taken */
fdc,m r31(%r28)
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_dcache_page_asm)
+ENTRY_CFI(purge_dcache_page_asm)
+ ldil L%(TMPALIAS_MAP_START), %r28
+#ifdef CONFIG_64BIT
+#if (TMPALIAS_MAP_START >= 0x80000000)
+ depdi 0, 31,32, %r28 /* clear any sign extension */
+#endif
+ convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
+ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
+#else
+ extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
+ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
+ depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
+#endif
+
+ /* Purge any old translation */
+
+#ifdef CONFIG_PA20
+ pdtlb,l %r0(%r28)
+#else
+ tlb_lock %r20,%r21,%r22
+0: pdtlb %r0(%r28)
+ tlb_unlock %r20,%r21,%r22
+ ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
+#endif
+
+88: ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), r31
+
+#ifdef CONFIG_64BIT
+ depdi,z 1, 63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r25
+#endif
+ add %r28, %r25, %r25
+ sub %r25, r31, %r25
+
+1: pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ pdc,m r31(%r28)
+ cmpb,COND(>>) %r25, %r28, 1b /* predict taken */
+ pdc,m r31(%r28)
+
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
+ sync
+ bv %r0(%r2)
+ nop
+ENDPROC_CFI(purge_dcache_page_asm)
+
ENTRY_CFI(flush_icache_page_asm)
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
@@ -874,15 +945,19 @@ ENTRY_CFI(flush_icache_page_asm)
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
- pitlb,l %r0(%sr4,%r28)
+1: pitlb,l %r0(%sr4,%r28)
+ ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP)
#else
tlb_lock %r20,%r21,%r22
- pdtlb %r0(%r28)
- pitlb %r0(%sr4,%r28)
+0: pdtlb %r0(%r28)
+1: pitlb %r0(%sr4,%r28)
tlb_unlock %r20,%r21,%r22
+ ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
+ ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB)
+ ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP)
#endif
- ldil L%icache_stride, %r1
+88: ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r31
#ifdef CONFIG_64BIT
@@ -893,7 +968,6 @@ ENTRY_CFI(flush_icache_page_asm)
add %r28, %r25, %r25
sub %r25, %r31, %r25
-
/* fic only has the type 26 form on PA1.1, requiring an
* explicit space specification, so use %sr4 */
1: fic,m %r31(%sr4,%r28)
@@ -911,16 +985,17 @@ ENTRY_CFI(flush_icache_page_asm)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
fic,m %r31(%sr4,%r28)
- cmpb,COND(<<) %r28, %r25,1b
+ cmpb,COND(>>) %r25, %r28, 1b /* predict taken */
fic,m %r31(%sr4,%r28)
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_icache_page_asm)
ENTRY_CFI(flush_kernel_dcache_page_asm)
- ldil L%dcache_stride, %r1
+88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
#ifdef CONFIG_64BIT
@@ -931,7 +1006,6 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
add %r26, %r25, %r25
sub %r25, %r23, %r25
-
1: fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
@@ -947,16 +1021,17 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
- cmpb,COND(<<) %r26, %r25,1b
+ cmpb,COND(>>) %r25, %r26, 1b /* predict taken */
fdc,m %r23(%r26)
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_kernel_dcache_page_asm)
ENTRY_CFI(purge_kernel_dcache_page_asm)
- ldil L%dcache_stride, %r1
+88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
#ifdef CONFIG_64BIT
@@ -982,74 +1057,183 @@ ENTRY_CFI(purge_kernel_dcache_page_asm)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
- cmpb,COND(<<) %r26, %r25, 1b
+ cmpb,COND(>>) %r25, %r26, 1b /* predict taken */
pdc,m %r23(%r26)
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(purge_kernel_dcache_page_asm)
ENTRY_CFI(flush_user_dcache_range_asm)
- ldil L%dcache_stride, %r1
+88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
-1: cmpb,COND(<<),n %r26, %r25, 1b
+#ifdef CONFIG_64BIT
+ depd,z %r23, 59, 60, %r21
+#else
+ depw,z %r23, 27, 28, %r21
+#endif
+ add %r26, %r21, %r22
+ cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
+1: add %r22, %r21, %r22
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
fdc,m %r23(%sr3, %r26)
+2: cmpb,COND(>>),n %r25, %r26, 2b
+ fdc,m %r23(%sr3, %r26)
+
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_user_dcache_range_asm)
ENTRY_CFI(flush_kernel_dcache_range_asm)
- ldil L%dcache_stride, %r1
+88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
-1: cmpb,COND(<<),n %r26, %r25,1b
+#ifdef CONFIG_64BIT
+ depd,z %r23, 59, 60, %r21
+#else
+ depw,z %r23, 27, 28, %r21
+#endif
+ add %r26, %r21, %r22
+ cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
+1: add %r22, %r21, %r22
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
+ fdc,m %r23(%r26)
+
+2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */
fdc,m %r23(%r26)
sync
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
syncdma
bv %r0(%r2)
nop
ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY_CFI(purge_kernel_dcache_range_asm)
- ldil L%dcache_stride, %r1
+88: ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
-1: cmpb,COND(<<),n %r26, %r25,1b
+#ifdef CONFIG_64BIT
+ depd,z %r23, 59, 60, %r21
+#else
+ depw,z %r23, 27, 28, %r21
+#endif
+ add %r26, %r21, %r22
+ cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
+1: add %r22, %r21, %r22
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
+ pdc,m %r23(%r26)
+
+2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */
pdc,m %r23(%r26)
sync
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
syncdma
bv %r0(%r2)
nop
ENDPROC_CFI(purge_kernel_dcache_range_asm)
ENTRY_CFI(flush_user_icache_range_asm)
- ldil L%icache_stride, %r1
+88: ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
-1: cmpb,COND(<<),n %r26, %r25,1b
+#ifdef CONFIG_64BIT
+ depd,z %r23, 59, 60, %r21
+#else
+ depw,z %r23, 27, 28, %r21
+#endif
+ add %r26, %r21, %r22
+ cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
+1: add %r22, %r21, %r22
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
+ fic,m %r23(%sr3, %r26)
+
+2: cmpb,COND(>>),n %r25, %r26, 2b
fic,m %r23(%sr3, %r26)
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_user_icache_range_asm)
ENTRY_CFI(flush_kernel_icache_page)
- ldil L%icache_stride, %r1
+88: ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
#ifdef CONFIG_64BIT
@@ -1076,23 +1260,51 @@ ENTRY_CFI(flush_kernel_icache_page)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
- cmpb,COND(<<) %r26, %r25, 1b
+ cmpb,COND(>>) %r25, %r26, 1b /* predict taken */
fic,m %r23(%sr4, %r26)
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
ENDPROC_CFI(flush_kernel_icache_page)
ENTRY_CFI(flush_kernel_icache_range_asm)
- ldil L%icache_stride, %r1
+88: ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
-1: cmpb,COND(<<),n %r26, %r25, 1b
+#ifdef CONFIG_64BIT
+ depd,z %r23, 59, 60, %r21
+#else
+ depw,z %r23, 27, 28, %r21
+#endif
+ add %r26, %r21, %r22
+ cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */
+1: add %r22, %r21, %r22
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ fic,m %r23(%sr4, %r26)
+ cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */
+ fic,m %r23(%sr4, %r26)
+
+2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */
fic,m %r23(%sr4, %r26)
+89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP)
sync
bv %r0(%r2)
nop
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 4e87c35c22b7..cd227f1cf629 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -102,7 +102,7 @@ void __init dma_ops_init(void)
case pcxl: /* falls through */
case pcxs:
case pcxt:
- hppa_dma_ops = &dma_noncoherent_ops;
+ hppa_dma_ops = &dma_direct_ops;
break;
default:
break;
@@ -305,6 +305,86 @@ static int __init parisc_init_resources(void)
return 0;
}
+static int no_alternatives __initdata;
+static int __init setup_no_alternatives(char *str)
+{
+ no_alternatives = 1;
+ return 1;
+}
+__setup("no-alternatives", setup_no_alternatives);
+
+static void __init apply_alternatives_all(void)
+{
+ struct alt_instr *entry;
+ int index = 0, applied = 0;
+
+
+ pr_info("alternatives: %spatching kernel code\n",
+ no_alternatives ? "NOT " : "");
+ if (no_alternatives)
+ return;
+
+ set_kernel_text_rw(1);
+
+ for (entry = (struct alt_instr *) &__alt_instructions;
+ entry < (struct alt_instr *) &__alt_instructions_end;
+ entry++, index++) {
+
+ u32 *from, len, cond, replacement;
+
+ from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset);
+ len = entry->len;
+ cond = entry->cond;
+ replacement = entry->replacement;
+
+ WARN_ON(!cond);
+ pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
+ index, cond, len, from, replacement);
+
+ if ((cond & ALT_COND_NO_SMP) && (num_online_cpus() != 1))
+ continue;
+ if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0))
+ continue;
+ if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0))
+ continue;
+
+ /*
+ * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
+ * set (bit #61, big endian), we have to flush and sync every
+ * time IO-PDIR is changed in Ike/Astro.
+ */
+ if ((cond & ALT_COND_NO_IOC_FDC) &&
+ (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC))
+ continue;
+
+ /* Want to replace pdtlb by a pdtlb,l instruction? */
+ if (replacement == INSN_PxTLB) {
+ replacement = *from;
+ if (boot_cpu_data.cpu_type >= pcxu) /* >= pa2.0 ? */
+ replacement |= (1 << 10); /* set el bit */
+ }
+
+ /*
+ * Replace instruction with NOPs?
+ * For long distance insert a branch instruction instead.
+ */
+ if (replacement == INSN_NOP && len > 1)
+ replacement = 0xe8000002 + (len-2)*8; /* "b,n .+8" */
+
+ pr_debug("Do %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
+ index, cond, len, from, replacement);
+
+ /* Replace instruction */
+ *from = replacement;
+ applied++;
+ }
+
+ pr_info("alternatives: applied %d out of %d patches\n", applied, index);
+
+ set_kernel_text_rw(0);
+}
+
+
extern void gsc_init(void);
extern void processor_init(void);
extern void ccio_init(void);
@@ -346,6 +426,7 @@ static int __init parisc_init(void)
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
+ apply_alternatives_all();
parisc_setup_cache_timing();
/* These are in a non-obvious order, will fix when we have an iotree */
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 342073f44d3f..848c1934680b 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -65,7 +65,6 @@
#define INSN_LDI_R25_1 0x34190002 /* ldi 1,%r25 (in_syscall=1) */
#define INSN_LDI_R20 0x3414015a /* ldi __NR_rt_sigreturn,%r20 */
#define INSN_BLE_SR2_R0 0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */
-#define INSN_NOP 0x08000240 /* nop */
/* For debugging */
#define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index f453997a7b8f..f5f22ea9b97e 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -640,8 +640,7 @@ cas_action:
sub,<> %r28, %r25, %r0
2: stw %r24, 0(%r26)
/* Free lock */
- sync
- stw %r20, 0(%sr2,%r20)
+ stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
@@ -655,8 +654,7 @@ cas_action:
3:
/* Error occurred on load or store */
/* Free lock */
- sync
- stw %r20, 0(%sr2,%r20)
+ stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
#endif
@@ -857,8 +855,7 @@ cas2_action:
cas2_end:
/* Free lock */
- sync
- stw %r20, 0(%sr2,%r20)
+ stw,ma %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
@@ -868,8 +865,7 @@ cas2_end:
22:
/* Error occurred on load or store */
/* Free lock */
- sync
- stw %r20, 0(%sr2,%r20)
+ stw,ma %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
b lws_exit
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 68f10f87073d..472a818e8c17 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -430,8 +430,8 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
}
printk("\n");
- pr_crit("%s: Code=%d (%s) regs=%p (Addr=" RFMT ")\n",
- msg, code, trap_name(code), regs, offset);
+ pr_crit("%s: Code=%d (%s) at addr " RFMT "\n",
+ msg, code, trap_name(code), offset);
show_regs(regs);
spin_unlock(&terminate_lock);
@@ -802,7 +802,8 @@ void __init initialize_ivt(const void *iva)
* the Length/4 words starting at Address is zero.
*/
- /* Compute Checksum for HPMC handler */
+ /* Setup IVA and compute checksum for HPMC handler */
+ ivap[6] = (u32)__pa(os_hpmc);
length = os_hpmc_size;
ivap[7] = length;
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index f329b466e68f..2d14f17838d2 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -426,7 +426,7 @@ void unwind_frame_init_task(struct unwind_frame_info *info,
r.gr[30] = get_parisc_stackpointer();
regs = &r;
}
- unwind_frame_init(info, task, &r);
+ unwind_frame_init(info, task, regs);
} else {
unwind_frame_init_from_blocked_task(info, task);
}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index da2e31190efa..c3b1b9c24ede 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -61,6 +61,12 @@ SECTIONS
EXIT_DATA
}
PERCPU_SECTION(8)
+ . = ALIGN(4);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
. = ALIGN(HUGEPAGE_SIZE);
__init_end = .;
/* freed after init ends here */
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 74842d28a7a1..e7e626bcd0be 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -494,12 +494,8 @@ static void __init map_pages(unsigned long start_vaddr,
pte = pte_mkhuge(pte);
}
- if (address >= end_paddr) {
- if (force)
- break;
- else
- pte_val(pte) = 0;
- }
+ if (address >= end_paddr)
+ break;
set_pte(pg_table, pte);
@@ -515,6 +511,21 @@ static void __init map_pages(unsigned long start_vaddr,
}
}
+void __init set_kernel_text_rw(int enable_read_write)
+{
+ unsigned long start = (unsigned long)_stext;
+ unsigned long end = (unsigned long)_etext;
+
+ map_pages(start, __pa(start), end-start,
+ PAGE_KERNEL_RWX, enable_read_write ? 1:0);
+
+ /* force the kernel to see the new TLB entries */
+ __flush_tlb_range(0, start, end);
+
+ /* dump old cached instructions */
+ flush_icache_range(start, end);
+}
+
void __ref free_initmem(void)
{
unsigned long init_begin = (unsigned long)__init_begin;
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 1f4691ce4126..c55ba3b4873b 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -150,4 +150,25 @@ extern s32 patch__memset_nocache, patch__memcpy_nocache;
extern long flush_count_cache;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
+void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
+#else
+static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
+ bool preserve_nv) { }
+static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
+ bool preserve_nv) { }
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
+void kvmhv_save_host_pmu(void);
+void kvmhv_load_host_pmu(void);
+void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
+void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
+
+int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
+
+long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
+long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
+ unsigned long dabrx);
+
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index b3520b549cba..66db23e2f4dc 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -203,6 +203,18 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
BUG();
}
+static inline unsigned int ap_to_shift(unsigned long ap)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+ if (mmu_psize_defs[psize].ap == ap)
+ return mmu_psize_defs[psize].shift;
+ }
+
+ return -1;
+}
+
static inline unsigned long get_sllp_encoding(int psize)
{
unsigned long sllp;
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 13a688fc8cd0..2a2486526d1f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -114,7 +114,7 @@
*/
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
- _PAGE_SOFT_DIRTY)
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
/*
* user access blocked by key
*/
@@ -132,7 +132,7 @@
*/
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
- _PAGE_SOFT_DIRTY)
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
#define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \
H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
@@ -1051,7 +1051,6 @@ static inline void vmemmap_remove_mapping(unsigned long start,
return hash__vmemmap_remove_mapping(start, page_size);
}
#endif
-struct page *realmode_pfn_to_page(unsigned long pfn);
static inline pte_t pmd_pte(pmd_t pmd)
{
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 1154a6dc6d26..671316f9e95d 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -53,6 +53,7 @@ extern void radix__flush_tlb_lpid_page(unsigned int lpid,
unsigned long addr,
unsigned long page_size);
extern void radix__flush_pwc_lpid(unsigned int lpid);
+extern void radix__flush_tlb_lpid(unsigned int lpid);
extern void radix__local_flush_tlb_lpid(unsigned int lpid);
extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid);
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index fd06dbe7d7d3..fed7e6241349 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -133,7 +133,7 @@ struct pt_regs;
extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
extern void bad_page_fault(struct pt_regs *, unsigned long, int);
extern void _exception(int, struct pt_regs *, int, unsigned long);
-extern void _exception_pkey(int, struct pt_regs *, int, unsigned long, int);
+extern void _exception_pkey(struct pt_regs *, unsigned long, int);
extern void die(const char *, struct pt_regs *, long);
extern bool die_will_crash(void);
extern void panic_flush_kmsg_start(void);
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 85c8af2bb272..74d0db511099 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
#include <linux/sched.h>
+#include <asm-generic/compat.h>
+
#define COMPAT_USER_HZ 100
#ifdef __BIG_ENDIAN__
#define COMPAT_UTS_MACHINE "ppc\0\0"
@@ -15,34 +17,18 @@
#define COMPAT_UTS_MACHINE "ppcle\0\0"
#endif
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
typedef u32 __compat_uid_t;
typedef u32 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u32 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u32 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef s16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s32 compat_key_t;
-typedef s32 compat_timer_t;
-
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 compat_s64;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
typedef u64 compat_u64;
-typedef u32 compat_uptr_t;
struct compat_stat {
compat_dev_t st_dev;
@@ -55,11 +41,11 @@ struct compat_stat {
compat_off_t st_size;
compat_off_t st_blksize;
compat_off_t st_blocks;
- compat_time_t st_atime;
+ old_time32_t st_atime;
u32 st_atime_nsec;
- compat_time_t st_mtime;
+ old_time32_t st_mtime;
u32 st_mtime_nsec;
- compat_time_t st_ctime;
+ old_time32_t st_ctime;
u32 st_ctime_nsec;
u32 __unused4[2];
};
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index a0b17f9f1ea4..45e8789bb770 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -322,6 +322,11 @@
#define H_GET_24X7_DATA 0xF07C
#define H_GET_PERF_COUNTER_INFO 0xF080
+/* Platform-specific hcalls used for nested HV KVM */
+#define H_SET_PARTITION_TABLE 0xF800
+#define H_ENTER_NESTED 0xF804
+#define H_TLB_INVALIDATE 0xF808
+
/* Values for 2nd argument to H_SET_MODE */
#define H_SET_MODE_RESOURCE_SET_CIABR 1
#define H_SET_MODE_RESOURCE_SET_DAWR 2
@@ -461,6 +466,42 @@ struct h_cpu_char_result {
u64 behaviour;
};
+/* Register state for entering a nested guest with H_ENTER_NESTED */
+struct hv_guest_state {
+ u64 version; /* version of this structure layout */
+ u32 lpid;
+ u32 vcpu_token;
+ /* These registers are hypervisor privileged (at least for writing) */
+ u64 lpcr;
+ u64 pcr;
+ u64 amor;
+ u64 dpdes;
+ u64 hfscr;
+ s64 tb_offset;
+ u64 dawr0;
+ u64 dawrx0;
+ u64 ciabr;
+ u64 hdec_expiry;
+ u64 purr;
+ u64 spurr;
+ u64 ic;
+ u64 vtb;
+ u64 hdar;
+ u64 hdsisr;
+ u64 heir;
+ u64 asdr;
+ /* These are OS privileged but need to be set late in guest entry */
+ u64 srr0;
+ u64 srr1;
+ u64 sprg[4];
+ u64 pidr;
+ u64 cfar;
+ u64 ppr;
+};
+
+/* Latest version of hv_guest_state structure */
+#define HV_GUEST_STATE_VERSION 1
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index ab3a4fba38e3..35db0cbc9222 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -126,7 +126,7 @@ struct iommu_table {
int it_nid;
};
-#define IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry) \
+#define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
((tbl)->it_ops->useraddrptr((tbl), (entry), false))
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
((tbl)->it_ops->useraddrptr((tbl), (entry), true))
@@ -220,8 +220,6 @@ extern void iommu_del_device(struct device *dev);
extern int __init tce_iommu_bus_notifier_init(void);
extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction);
-extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
- unsigned long *hpa, enum dma_data_direction *direction);
#else
static inline void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number,
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index a790d5cf6ea3..1f321914676d 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -84,7 +84,6 @@
#define BOOK3S_INTERRUPT_INST_STORAGE 0x400
#define BOOK3S_INTERRUPT_INST_SEGMENT 0x480
#define BOOK3S_INTERRUPT_EXTERNAL 0x500
-#define BOOK3S_INTERRUPT_EXTERNAL_LEVEL 0x501
#define BOOK3S_INTERRUPT_EXTERNAL_HV 0x502
#define BOOK3S_INTERRUPT_ALIGNMENT 0x600
#define BOOK3S_INTERRUPT_PROGRAM 0x700
@@ -134,8 +133,7 @@
#define BOOK3S_IRQPRIO_EXTERNAL 14
#define BOOK3S_IRQPRIO_DECREMENTER 15
#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 16
-#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 17
-#define BOOK3S_IRQPRIO_MAX 18
+#define BOOK3S_IRQPRIO_MAX 17
#define BOOK3S_HFLAG_DCBZ32 0x1
#define BOOK3S_HFLAG_SLB 0x2
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 83a9aa3cf689..09f8e9ba69bc 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -188,14 +188,37 @@ extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr);
+extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, u64 root,
+ u64 *pte_ret_p);
+extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, u64 table,
+ int table_index, u64 *pte_ret_p);
extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, bool data, bool iswrite);
+extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
+ unsigned int shift, struct kvm_memory_slot *memslot,
+ unsigned int lpid);
+extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable,
+ bool writing, unsigned long gpa,
+ unsigned int lpid);
+extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
+ unsigned long gpa,
+ struct kvm_memory_slot *memslot,
+ bool writing, bool kvm_ro,
+ pte_t *inserted_pte, unsigned int *levelp);
extern int kvmppc_init_vm_radix(struct kvm *kvm);
extern void kvmppc_free_radix(struct kvm *kvm);
+extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
+ unsigned int lpid);
extern int kvmppc_radix_init(void);
extern void kvmppc_radix_exit(void);
extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn);
+extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
+ unsigned long gpa, unsigned int shift,
+ struct kvm_memory_slot *memslot,
+ unsigned int lpid);
extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn);
extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
@@ -271,6 +294,21 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
#endif
+long kvmhv_nested_init(void);
+void kvmhv_nested_exit(void);
+void kvmhv_vm_nested_init(struct kvm *kvm);
+long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
+void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
+void kvmhv_release_all_nested(struct kvm *kvm);
+long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
+long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
+int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
+ u64 time_limit, unsigned long lpcr);
+void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
+void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
+ struct hv_guest_state *hr);
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
+
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
extern int kvm_irq_bypass;
@@ -301,12 +339,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
- vcpu->arch.cr = val;
+ vcpu->arch.regs.ccr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cr;
+ return vcpu->arch.regs.ccr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
@@ -384,9 +422,6 @@ extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
/* TO = 31 for unconditional trap */
#define INS_TW 0x7fe00008
-/* LPIDs we support with this build -- runtime limit may be lower */
-#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
-
#define SPLIT_HACK_MASK 0xff000000
#define SPLIT_HACK_OFFS 0xfb000000
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index dc435a5af7d6..6d298145d564 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -23,6 +23,108 @@
#include <linux/string.h>
#include <asm/bitops.h>
#include <asm/book3s/64/mmu-hash.h>
+#include <asm/cpu_has_feature.h>
+#include <asm/ppc-opcode.h>
+
+#ifdef CONFIG_PPC_PSERIES
+static inline bool kvmhv_on_pseries(void)
+{
+ return !cpu_has_feature(CPU_FTR_HVMODE);
+}
+#else
+static inline bool kvmhv_on_pseries(void)
+{
+ return false;
+}
+#endif
+
+/*
+ * Structure for a nested guest, that is, for a guest that is managed by
+ * one of our guests.
+ */
+struct kvm_nested_guest {
+ struct kvm *l1_host; /* L1 VM that owns this nested guest */
+ int l1_lpid; /* lpid L1 guest thinks this guest is */
+ int shadow_lpid; /* real lpid of this nested guest */
+ pgd_t *shadow_pgtable; /* our page table for this guest */
+ u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
+ u64 process_table; /* process table entry for this guest */
+ long refcnt; /* number of pointers to this struct */
+ struct mutex tlb_lock; /* serialize page faults and tlbies */
+ struct kvm_nested_guest *next;
+ cpumask_t need_tlb_flush;
+ cpumask_t cpu_in_guest;
+ short prev_cpu[NR_CPUS];
+};
+
+/*
+ * We define a nested rmap entry as a single 64-bit quantity
+ * 0xFFF0000000000000 12-bit lpid field
+ * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number
+ * 0x0000000000000001 1-bit single entry flag
+ */
+#define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL
+#define RMAP_NESTED_LPID_SHIFT (52)
+#define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL
+#define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL
+
+/* Structure for a nested guest rmap entry */
+struct rmap_nested {
+ struct llist_node list;
+ u64 rmap;
+};
+
+/*
+ * for_each_nest_rmap_safe - iterate over the list of nested rmap entries
+ * safe against removal of the list entry or NULL list
+ * @pos: a (struct rmap_nested *) to use as a loop cursor
+ * @node: pointer to the first entry
+ * NOTE: this can be NULL
+ * @rmapp: an (unsigned long *) in which to return the rmap entries on each
+ * iteration
+ * NOTE: this must point to already allocated memory
+ *
+ * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
+ * rmap entry in the memslot. The list is always terminated by a "single entry"
+ * stored in the list element of the final entry of the llist. If there is ONLY
+ * a single entry then this is itself in the rmap entry of the memslot, not a
+ * llist head pointer.
+ *
+ * Note that the iterator below assumes that a nested rmap entry is always
+ * non-zero. This is true for our usage because the LPID field is always
+ * non-zero (zero is reserved for the host).
+ *
+ * This should be used to iterate over the list of rmap_nested entries with
+ * processing done on the u64 rmap value given by each iteration. This is safe
+ * against removal of list entries and it is always safe to call free on (pos).
+ *
+ * e.g.
+ * struct rmap_nested *cursor;
+ * struct llist_node *first;
+ * unsigned long rmap;
+ * for_each_nest_rmap_safe(cursor, first, &rmap) {
+ * do_something(rmap);
+ * free(cursor);
+ * }
+ */
+#define for_each_nest_rmap_safe(pos, node, rmapp) \
+ for ((pos) = llist_entry((node), typeof(*(pos)), list); \
+ (node) && \
+ (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
+ ((u64) (node)) : ((pos)->rmap))) && \
+ (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
+ ((struct llist_node *) ((pos) = NULL)) : \
+ (pos)->list.next)), true); \
+ (pos) = llist_entry((node), typeof(*(pos)), list))
+
+struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
+ bool create);
+void kvmhv_put_nested(struct kvm_nested_guest *gp);
+int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
+
+/* Encoding of first parameter for H_TLB_INVALIDATE */
+#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
+ ___PPC_R(r))
/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
#define PPC_MIN_HPT_ORDER 18
@@ -435,6 +537,7 @@ static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
}
extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
+extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
extern void kvmhv_rm_send_ipi(int cpu);
@@ -482,7 +585,7 @@ static inline u64 sanitize_msr(u64 msr)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
{
- vcpu->arch.cr = vcpu->arch.cr_tm;
+ vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
vcpu->arch.regs.xer = vcpu->arch.xer_tm;
vcpu->arch.regs.link = vcpu->arch.lr_tm;
vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
@@ -499,7 +602,7 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
{
- vcpu->arch.cr_tm = vcpu->arch.cr;
+ vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
vcpu->arch.xer_tm = vcpu->arch.regs.xer;
vcpu->arch.lr_tm = vcpu->arch.regs.link;
vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
@@ -515,6 +618,17 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
+ unsigned long gpa, unsigned int level,
+ unsigned long mmu_seq, unsigned int lpid,
+ unsigned long *rmapp, struct rmap_nested **n_rmap);
+extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
+ struct rmap_nested **n_rmap);
+extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ unsigned long gpa, unsigned long hpa,
+ unsigned long nbytes);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index d978fdf698af..eb3ba6390108 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -25,6 +25,9 @@
#define XICS_MFRR 0xc
#define XICS_IPI 2 /* interrupt source # for IPIs */
+/* LPIDs we support with this build -- runtime limit may be lower */
+#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
+
/* Maximum number of threads per physical core */
#define MAX_SMT_THREADS 8
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index d513e3ed1c65..f0cef625f17c 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -46,12 +46,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
- vcpu->arch.cr = val;
+ vcpu->arch.regs.ccr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cr;
+ return vcpu->arch.regs.ccr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 906bcbdfd2a1..fac6f631ed29 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -46,6 +46,7 @@
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */
#define KVM_MAX_VCPU_ID (MAX_SMT_THREADS * KVM_MAX_VCORES)
+#define KVM_MAX_NESTED_GUESTS KVMPPC_NR_LPIDS
#else
#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
@@ -94,6 +95,7 @@ struct dtl_entry;
struct kvmppc_vcpu_book3s;
struct kvmppc_book3s_shadow_vcpu;
+struct kvm_nested_guest;
struct kvm_vm_stat {
ulong remote_tlb_flush;
@@ -287,10 +289,12 @@ struct kvm_arch {
u8 radix;
u8 fwnmi_enabled;
bool threads_indep;
+ bool nested_enable;
pgd_t *pgtable;
u64 process_table;
struct dentry *debugfs_dir;
struct dentry *htab_dentry;
+ struct dentry *radix_dentry;
struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -311,6 +315,9 @@ struct kvm_arch {
#endif
struct kvmppc_ops *kvm_ops;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ u64 l1_ptcr;
+ int max_nested_lpid;
+ struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
/* This array can grow quite large, keep it at the end */
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
#endif
@@ -360,7 +367,9 @@ struct kvmppc_pte {
bool may_write : 1;
bool may_execute : 1;
unsigned long wimg;
+ unsigned long rc;
u8 page_size; /* MMU_PAGE_xxx */
+ u8 page_shift;
};
struct kvmppc_mmu {
@@ -537,8 +546,6 @@ struct kvm_vcpu_arch {
ulong tar;
#endif
- u32 cr;
-
#ifdef CONFIG_PPC_BOOK3S
ulong hflags;
ulong guest_owned_ext;
@@ -707,6 +714,7 @@ struct kvm_vcpu_arch {
u8 hcall_needed;
u8 epr_flags; /* KVMPPC_EPR_xxx */
u8 epr_needed;
+ u8 external_oneshot; /* clear external irq after delivery */
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
@@ -781,6 +789,10 @@ struct kvm_vcpu_arch {
u32 emul_inst;
u32 online;
+
+ /* For support of nested guests */
+ struct kvm_nested_guest *nested;
+ u32 nested_vcpu_id;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index e991821dd7fa..9b89b1918dfc 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -194,9 +194,7 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
(stt)->size, (ioba), (npages)) ? \
H_PARAMETER : H_SUCCESS)
-extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
- unsigned long tce);
-extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
+extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
unsigned long *ua, unsigned long **prmap);
extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
unsigned long idx, unsigned long tce);
@@ -327,6 +325,7 @@ struct kvmppc_ops {
int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
unsigned long flags);
void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
+ int (*enable_nested)(struct kvm *kvm);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
@@ -585,6 +584,7 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status);
+extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
#else
static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
u32 priority) { return -1; }
@@ -607,6 +607,7 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur
static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status) { return -ENODEV; }
+static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
#endif /* CONFIG_KVM_XIVE */
/*
@@ -652,6 +653,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr);
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
+void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
/*
* Host-side operations we want to set up while running in real
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b2f89b621b15..b694d6af1150 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -38,6 +38,7 @@ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
+extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index 7f627e3f4da4..630eb8b1b7ed 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -54,7 +54,6 @@ void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
struct pnv_php_slot {
struct hotplug_slot slot;
- struct hotplug_slot_info slot_info;
uint64_t id;
char *name;
int slot_no;
@@ -72,6 +71,7 @@ struct pnv_php_slot {
struct pci_dev *pdev;
struct pci_bus *bus;
bool power_state_check;
+ u8 attention_state;
void *fdt;
void *dt;
struct of_changeset ocs;
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 665af14850e4..6093bc8f74e5 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -104,6 +104,7 @@
#define OP_31_XOP_LHZUX 311
#define OP_31_XOP_MSGSNDP 142
#define OP_31_XOP_MSGCLRP 174
+#define OP_31_XOP_TLBIE 306
#define OP_31_XOP_MFSPR 339
#define OP_31_XOP_LWAX 341
#define OP_31_XOP_LHAX 343
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 447cbd1bee99..5b480e1d5909 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -149,7 +149,7 @@ do { \
#define arch_has_single_step() (1)
#define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601))
-#define ARCH_HAS_USER_SINGLE_STEP_INFO
+#define ARCH_HAS_USER_SINGLE_STEP_REPORT
/*
* kprobe-based event tracer support
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e5b314ed054e..c90698972f42 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -415,6 +415,7 @@
#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
#define HFSCR_FP __MASK(FSCR_FP_LG)
+#define HFSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 ASM_CONST(0x8000000000000000)
@@ -766,6 +767,7 @@
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
#define HSRR1_DENORM 0x00100000 /* Denorm exception */
+#define HSRR1_HISI_WRITE 0x00010000 /* HISI bcs couldn't update mem */
#define SPRN_TBCTL 0x35f /* PA6T Timebase control register */
#define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 1a951b00465d..1fffbba8d6a5 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
extern unsigned long long memory_limit;
+extern bool init_mem_is_free;
extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index c19379f0a32e..b0de85b477e1 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -22,6 +22,7 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
@@ -35,7 +36,6 @@
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLD_UNAME
@@ -47,6 +47,7 @@
#endif
#ifdef CONFIG_PPC64
#define __ARCH_WANT_COMPAT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_NEWFSTATAT
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index 1a6ed5919ffd..a658091a19f9 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -7,3 +7,4 @@ generic-y += poll.h
generic-y += resource.h
generic-y += sockios.h
generic-y += statfs.h
+generic-y += siginfo.h
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 1b32b56a03d3..8c876c166ef2 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -634,6 +634,7 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
+#define KVM_REG_PPC_PTCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
diff --git a/arch/powerpc/include/uapi/asm/siginfo.h b/arch/powerpc/include/uapi/asm/siginfo.h
deleted file mode 100644
index 1d51d9b88221..000000000000
--- a/arch/powerpc/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-#ifndef _ASM_POWERPC_SIGINFO_H
-#define _ASM_POWERPC_SIGINFO_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifdef __powerpc64__
-# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-#endif
-
-#include <asm-generic/siginfo.h>
-
-#endif /* _ASM_POWERPC_SIGINFO_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 89cf15566c4e..d68b9ef38328 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -387,12 +387,12 @@ int main(void)
OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64);
OFFSET(TVAL64_TV_SEC, timeval, tv_sec);
OFFSET(TVAL64_TV_USEC, timeval, tv_usec);
- OFFSET(TVAL32_TV_SEC, compat_timeval, tv_sec);
- OFFSET(TVAL32_TV_USEC, compat_timeval, tv_usec);
+ OFFSET(TVAL32_TV_SEC, old_timeval32, tv_sec);
+ OFFSET(TVAL32_TV_USEC, old_timeval32, tv_usec);
OFFSET(TSPC64_TV_SEC, timespec, tv_sec);
OFFSET(TSPC64_TV_NSEC, timespec, tv_nsec);
- OFFSET(TSPC32_TV_SEC, compat_timespec, tv_sec);
- OFFSET(TSPC32_TV_NSEC, compat_timespec, tv_nsec);
+ OFFSET(TSPC32_TV_SEC, old_timespec32, tv_sec);
+ OFFSET(TSPC32_TV_NSEC, old_timespec32, tv_nsec);
#else
OFFSET(TVAL32_TV_SEC, timeval, tv_sec);
OFFSET(TVAL32_TV_USEC, timeval, tv_usec);
@@ -438,7 +438,7 @@ int main(void)
#ifdef CONFIG_PPC_BOOK3S
OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
#endif
- OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
+ OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
@@ -503,6 +503,7 @@ int main(void)
OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty);
OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst);
+ OFFSET(VCPU_NESTED, kvm_vcpu, arch.nested);
OFFSET(VCPU_CPU, kvm_vcpu, cpu);
OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu);
#endif
@@ -695,7 +696,7 @@ int main(void)
#endif /* CONFIG_PPC_BOOK3S_64 */
#else /* CONFIG_PPC_BOOK3S */
- OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
+ OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 458b928dbd84..c317080db771 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -147,8 +147,8 @@ __init_hvmode_206:
rldicl. r0,r3,4,63
bnelr
ld r5,CPU_SPEC_FEATURES(r4)
- LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
- xor r5,r5,r6
+ LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST)
+ andc r5,r5,r6
std r5,CPU_SPEC_FEATURES(r4)
blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ea04dfb8c092..2d8fc8c9da7a 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
#ifdef CONFIG_PPC_DENORMALISATION
mfspr r10,SPRN_HSRR1
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
- addi r11,r11,-4 /* HSRR0 is next instruction */
bne+ denorm_assist
#endif
@@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
*/
XVCPSGNDP32(32)
denorm_done:
+ mfspr r11,SPRN_HSRR0
+ subi r11,r11,4
mtspr SPRN_HSRR0,r11
mtcrf 0x80,r9
ld r9,PACA_EXGEN+EX_R9(r13)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index af7a20dc6e09..19b4c628f3be 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1013,31 +1013,6 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
}
EXPORT_SYMBOL_GPL(iommu_tce_xchg);
-#ifdef CONFIG_PPC_BOOK3S_64
-long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
- unsigned long *hpa, enum dma_data_direction *direction)
-{
- long ret;
-
- ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
-
- if (!ret && ((*direction == DMA_FROM_DEVICE) ||
- (*direction == DMA_BIDIRECTIONAL))) {
- struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
-
- if (likely(pg)) {
- SetPageDirty(pg);
- } else {
- tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
- ret = -EFAULT;
- }
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
-#endif
-
int iommu_take_ownership(struct iommu_table *tbl)
{
unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 913c5725cdb2..5d983d8bac27 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -620,8 +620,6 @@ void do_send_trap(struct pt_regs *regs, unsigned long address,
void do_break (struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
- siginfo_t info;
-
current->thread.trap_nr = TRAP_HWBKPT;
if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
@@ -634,12 +632,7 @@ void do_break (struct pt_regs *regs, unsigned long address,
hw_breakpoint_disable();
/* Deliver the signal to userspace */
- clear_siginfo(&info);
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = TRAP_HWBKPT;
- info.si_addr = (void __user *)address;
- force_sig_info(SIGTRAP, &info, current);
+ force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address, current);
}
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
@@ -1306,6 +1299,16 @@ void show_user_instructions(struct pt_regs *regs)
pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
+ /*
+ * Make sure the NIP points at userspace, not kernel text/data or
+ * elsewhere.
+ */
+ if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
+ pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
+ current->comm, current->pid);
+ return;
+ }
+
pr_info("%s[%d]: code: ", current->comm, current->pid);
for (i = 0; i < instructions_to_print; i++) {
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 6bffbc5affe7..7716374786bd 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim)
std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13)
- /* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */
+ /*
+ * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
+ * clobbered by an exception once we turn on MSR_RI below.
+ */
+ ld r11, PACATMSCRATCH(r13)
+ std r11, GPR1(r1)
+
+ /*
+ * Store r13 away so we can free up the scratch SPR for the SLB fault
+ * handler (needed once we start accessing the thread_struct).
+ */
+ GET_SCRATCH0(r11)
+ std r11, GPR13(r1)
+
/* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI
mtmsrd r11, 1
+ /* Store the PPR in r11 and reset to decent value */
mfspr r11, SPRN_PPR
HMT_MEDIUM
@@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim)
SAVE_GPR(8, r7) /* user r8 */
SAVE_GPR(9, r7) /* user r9 */
SAVE_GPR(10, r7) /* user r10 */
- ld r3, PACATMSCRATCH(r13) /* user r1 */
+ ld r3, GPR1(r1) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
ld r5, GPR11(r1) /* user r11 */
ld r6, GPR12(r1) /* user r12 */
- GET_SCRATCH0(8) /* user r13 */
+ ld r8, GPR13(r1) /* user r13 */
std r3, GPR1(r7)
std r4, GPR7(r7)
std r5, GPR11(r7)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index c85adb858271..ab1bd06d7c44 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -307,12 +307,9 @@ void die(const char *str, struct pt_regs *regs, long err)
}
NOKPROBE_SYMBOL(die);
-void user_single_step_siginfo(struct task_struct *tsk,
- struct pt_regs *regs, siginfo_t *info)
+void user_single_step_report(struct pt_regs *regs)
{
- info->si_signo = SIGTRAP;
- info->si_code = TRAP_TRACE;
- info->si_addr = (void __user *)regs->nip;
+ force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip, current);
}
static void show_signal_msg(int signr, struct pt_regs *regs, int code,
@@ -341,14 +338,12 @@ static void show_signal_msg(int signr, struct pt_regs *regs, int code,
show_user_instructions(regs);
}
-void _exception_pkey(int signr, struct pt_regs *regs, int code,
- unsigned long addr, int key)
+static bool exception_common(int signr, struct pt_regs *regs, int code,
+ unsigned long addr)
{
- siginfo_t info;
-
if (!user_mode(regs)) {
die("Exception in kernel mode", regs, signr);
- return;
+ return false;
}
show_signal_msg(signr, regs, code, addr);
@@ -364,18 +359,23 @@ void _exception_pkey(int signr, struct pt_regs *regs, int code,
*/
thread_pkey_regs_save(&current->thread);
- clear_siginfo(&info);
- info.si_signo = signr;
- info.si_code = code;
- info.si_addr = (void __user *) addr;
- info.si_pkey = key;
+ return true;
+}
+
+void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key)
+{
+ if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr))
+ return;
- force_sig_info(signr, &info, current);
+ force_sig_pkuerr((void __user *) addr, key);
}
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{
- _exception_pkey(signr, regs, code, addr, 0);
+ if (!exception_common(signr, regs, code, addr))
+ return;
+
+ force_sig_fault(signr, code, (void __user *)addr, current);
}
void system_reset_exception(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 07ae018e550e..105a976323aa 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -212,8 +212,6 @@ SECTIONS
CON_INITCALL
}
- SECURITY_INIT
-
. = ALIGN(8);
__ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
__start___ftr_fixup = .;
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index f872c04bb5b1..e814f40ab836 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -75,7 +75,8 @@ kvm-hv-y += \
book3s_hv.o \
book3s_hv_interrupts.o \
book3s_64_mmu_hv.o \
- book3s_64_mmu_radix.o
+ book3s_64_mmu_radix.o \
+ book3s_hv_nested.o
kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
book3s_hv_tm.o
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 87348e498c89..fd9893bc7aa1 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -78,8 +78,11 @@ void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
ulong pc = kvmppc_get_pc(vcpu);
+ ulong lr = kvmppc_get_lr(vcpu);
if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
+ if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
+ kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
}
}
@@ -150,7 +153,6 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
- case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
@@ -236,18 +238,35 @@ EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
- unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
-
- if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
- vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
+ /*
+ * This case (KVM_INTERRUPT_SET) should never actually arise for
+ * a pseries guest (because pseries guests expect their interrupt
+ * controllers to continue asserting an external interrupt request
+ * until it is acknowledged at the interrupt controller), but is
+ * included to avoid ABI breakage and potentially for other
+ * sorts of guest.
+ *
+ * There is a subtlety here: HV KVM does not test the
+ * external_oneshot flag in the code that synthesizes
+ * external interrupts for the guest just before entering
+ * the guest. That is OK even if userspace did do a
+ * KVM_INTERRUPT_SET on a pseries guest vcpu, because the
+ * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
+ * which ends up doing a smp_send_reschedule(), which will
+ * pull the guest all the way out to the host, meaning that
+ * we will call kvmppc_core_prepare_to_enter() before entering
+ * the guest again, and that will handle the external_oneshot
+ * flag correctly.
+ */
+ if (irq->irq == KVM_INTERRUPT_SET)
+ vcpu->arch.external_oneshot = 1;
- kvmppc_book3s_queue_irqprio(vcpu, vec);
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
}
void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
{
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
- kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
}
void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
@@ -278,7 +297,6 @@ static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
vec = BOOK3S_INTERRUPT_DECREMENTER;
break;
case BOOK3S_IRQPRIO_EXTERNAL:
- case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
vec = BOOK3S_INTERRUPT_EXTERNAL;
break;
@@ -352,8 +370,16 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
case BOOK3S_IRQPRIO_DECREMENTER:
/* DEC interrupts get cleared by mtdec */
return false;
- case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
- /* External interrupts get cleared by userspace */
+ case BOOK3S_IRQPRIO_EXTERNAL:
+ /*
+ * External interrupts get cleared by userspace
+ * except when set by the KVM_INTERRUPT ioctl with
+ * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
+ */
+ if (vcpu->arch.external_oneshot) {
+ vcpu->arch.external_oneshot = 0;
+ return true;
+ }
return false;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 68e14afecac8..c615617e78ac 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -268,14 +268,13 @@ int kvmppc_mmu_hv_init(void)
{
unsigned long host_lpid, rsvd_lpid;
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return -EINVAL;
-
if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE))
return -EINVAL;
/* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
- host_lpid = mfspr(SPRN_LPID);
+ host_lpid = 0;
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ host_lpid = mfspr(SPRN_LPID);
rsvd_lpid = LPID_RSVD;
kvmppc_init_lpid(rsvd_lpid + 1);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index fd6e8c13685f..d68162ee159b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -10,6 +10,9 @@
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <linux/anon_inodes.h>
+#include <linux/file.h>
+#include <linux/debugfs.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -26,87 +29,74 @@
*/
static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
-int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *gpte, bool data, bool iswrite)
+int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, u64 root,
+ u64 *pte_ret_p)
{
struct kvm *kvm = vcpu->kvm;
- u32 pid;
int ret, level, ps;
- __be64 prte, rpte;
- unsigned long ptbl;
- unsigned long root, pte, index;
- unsigned long rts, bits, offset;
- unsigned long gpa;
- unsigned long proc_tbl_size;
-
- /* Work out effective PID */
- switch (eaddr >> 62) {
- case 0:
- pid = vcpu->arch.pid;
- break;
- case 3:
- pid = 0;
- break;
- default:
- return -EINVAL;
- }
- proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
- if (pid * 16 >= proc_tbl_size)
- return -EINVAL;
-
- /* Read partition table to find root of tree for effective PID */
- ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
- ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
- if (ret)
- return ret;
+ unsigned long rts, bits, offset, index;
+ u64 pte, base, gpa;
+ __be64 rpte;
- root = be64_to_cpu(prte);
rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
((root & RTS2_MASK) >> RTS2_SHIFT);
bits = root & RPDS_MASK;
- root = root & RPDB_MASK;
+ base = root & RPDB_MASK;
offset = rts + 31;
- /* current implementations only support 52-bit space */
+ /* Current implementations only support 52-bit space */
if (offset != 52)
return -EINVAL;
+ /* Walk each level of the radix tree */
for (level = 3; level >= 0; --level) {
+ u64 addr;
+ /* Check a valid size */
if (level && bits != p9_supported_radix_bits[level])
return -EINVAL;
if (level == 0 && !(bits == 5 || bits == 9))
return -EINVAL;
offset -= bits;
index = (eaddr >> offset) & ((1UL << bits) - 1);
- /* check that low bits of page table base are zero */
- if (root & ((1UL << (bits + 3)) - 1))
+ /* Check that low bits of page table base are zero */
+ if (base & ((1UL << (bits + 3)) - 1))
return -EINVAL;
- ret = kvm_read_guest(kvm, root + index * 8,
- &rpte, sizeof(rpte));
- if (ret)
+ /* Read the entry from guest memory */
+ addr = base + (index * sizeof(rpte));
+ ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
+ if (ret) {
+ if (pte_ret_p)
+ *pte_ret_p = addr;
return ret;
+ }
pte = __be64_to_cpu(rpte);
if (!(pte & _PAGE_PRESENT))
return -ENOENT;
+ /* Check if a leaf entry */
if (pte & _PAGE_PTE)
break;
- bits = pte & 0x1f;
- root = pte & 0x0fffffffffffff00ul;
+ /* Get ready to walk the next level */
+ base = pte & RPDB_MASK;
+ bits = pte & RPDS_MASK;
}
- /* need a leaf at lowest level; 512GB pages not supported */
+
+ /* Need a leaf at lowest level; 512GB pages not supported */
if (level < 0 || level == 3)
return -EINVAL;
- /* offset is now log base 2 of the page size */
+ /* We found a valid leaf PTE */
+ /* Offset is now log base 2 of the page size */
gpa = pte & 0x01fffffffffff000ul;
if (gpa & ((1ul << offset) - 1))
return -EINVAL;
- gpa += eaddr & ((1ul << offset) - 1);
+ gpa |= eaddr & ((1ul << offset) - 1);
for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
if (offset == mmu_psize_defs[ps].shift)
break;
gpte->page_size = ps;
+ gpte->page_shift = offset;
gpte->eaddr = eaddr;
gpte->raddr = gpa;
@@ -115,6 +105,77 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
gpte->may_read = !!(pte & _PAGE_READ);
gpte->may_write = !!(pte & _PAGE_WRITE);
gpte->may_execute = !!(pte & _PAGE_EXEC);
+
+ gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
+
+ if (pte_ret_p)
+ *pte_ret_p = pte;
+
+ return 0;
+}
+
+/*
+ * Used to walk a partition or process table radix tree in guest memory
+ * Note: We exploit the fact that a partition table and a process
+ * table have the same layout, a partition-scoped page table and a
+ * process-scoped page table have the same layout, and the 2nd
+ * doubleword of a partition table entry has the same layout as
+ * the PTCR register.
+ */
+int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, u64 table,
+ int table_index, u64 *pte_ret_p)
+{
+ struct kvm *kvm = vcpu->kvm;
+ int ret;
+ unsigned long size, ptbl, root;
+ struct prtb_entry entry;
+
+ if ((table & PRTS_MASK) > 24)
+ return -EINVAL;
+ size = 1ul << ((table & PRTS_MASK) + 12);
+
+ /* Is the table big enough to contain this entry? */
+ if ((table_index * sizeof(entry)) >= size)
+ return -EINVAL;
+
+ /* Read the table to find the root of the radix tree */
+ ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
+ ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
+ if (ret)
+ return ret;
+
+ /* Root is stored in the first double word */
+ root = be64_to_cpu(entry.prtb0);
+
+ return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
+}
+
+int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, bool data, bool iswrite)
+{
+ u32 pid;
+ u64 pte;
+ int ret;
+
+ /* Work out effective PID */
+ switch (eaddr >> 62) {
+ case 0:
+ pid = vcpu->arch.pid;
+ break;
+ case 3:
+ pid = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
+ vcpu->kvm->arch.process_table, pid, &pte);
+ if (ret)
+ return ret;
+
+ /* Check privilege (applies only to process scoped translations) */
if (kvmppc_get_msr(vcpu) & MSR_PR) {
if (pte & _PAGE_PRIVILEGED) {
gpte->may_read = 0;
@@ -137,20 +198,46 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
}
static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
- unsigned int pshift)
+ unsigned int pshift, unsigned int lpid)
{
unsigned long psize = PAGE_SIZE;
+ int psi;
+ long rc;
+ unsigned long rb;
if (pshift)
psize = 1UL << pshift;
+ else
+ pshift = PAGE_SHIFT;
addr &= ~(psize - 1);
- radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize);
+
+ if (!kvmhv_on_pseries()) {
+ radix__flush_tlb_lpid_page(lpid, addr, psize);
+ return;
+ }
+
+ psi = shift_to_mmu_psize(pshift);
+ rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
+ lpid, rb);
+ if (rc)
+ pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
}
-static void kvmppc_radix_flush_pwc(struct kvm *kvm)
+static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
{
- radix__flush_pwc_lpid(kvm->arch.lpid);
+ long rc;
+
+ if (!kvmhv_on_pseries()) {
+ radix__flush_pwc_lpid(lpid);
+ return;
+ }
+
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ if (rc)
+ pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
}
static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
@@ -195,23 +282,38 @@ static void kvmppc_pmd_free(pmd_t *pmdp)
kmem_cache_free(kvm_pmd_cache, pmdp);
}
-static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
- unsigned long gpa, unsigned int shift)
+/* Called with kvm->mmu_lock held */
+void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
+ unsigned int shift, struct kvm_memory_slot *memslot,
+ unsigned int lpid)
{
- unsigned long page_size = 1ul << shift;
unsigned long old;
+ unsigned long gfn = gpa >> PAGE_SHIFT;
+ unsigned long page_size = PAGE_SIZE;
+ unsigned long hpa;
old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
- kvmppc_radix_tlbie_page(kvm, gpa, shift);
- if (old & _PAGE_DIRTY) {
- unsigned long gfn = gpa >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot;
+ kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
+
+ /* The following only applies to L1 entries */
+ if (lpid != kvm->arch.lpid)
+ return;
+ if (!memslot) {
memslot = gfn_to_memslot(kvm, gfn);
- if (memslot && memslot->dirty_bitmap)
- kvmppc_update_dirty_map(memslot, gfn, page_size);
+ if (!memslot)
+ return;
}
+ if (shift)
+ page_size = 1ul << shift;
+
+ gpa &= ~(page_size - 1);
+ hpa = old & PTE_RPN_MASK;
+ kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
+
+ if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
+ kvmppc_update_dirty_map(memslot, gfn, page_size);
}
/*
@@ -224,7 +326,8 @@ static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
* and emit a warning if encountered, but there may already be data
* corruption due to the unexpected mappings.
*/
-static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full)
+static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
+ unsigned int lpid)
{
if (full) {
memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
@@ -238,14 +341,15 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full)
WARN_ON_ONCE(1);
kvmppc_unmap_pte(kvm, p,
pte_pfn(*p) << PAGE_SHIFT,
- PAGE_SHIFT);
+ PAGE_SHIFT, NULL, lpid);
}
}
kvmppc_pte_free(pte);
}
-static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full)
+static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
+ unsigned int lpid)
{
unsigned long im;
pmd_t *p = pmd;
@@ -260,20 +364,21 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full)
WARN_ON_ONCE(1);
kvmppc_unmap_pte(kvm, (pte_t *)p,
pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
- PMD_SHIFT);
+ PMD_SHIFT, NULL, lpid);
}
} else {
pte_t *pte;
pte = pte_offset_map(p, 0);
- kvmppc_unmap_free_pte(kvm, pte, full);
+ kvmppc_unmap_free_pte(kvm, pte, full, lpid);
pmd_clear(p);
}
}
kvmppc_pmd_free(pmd);
}
-static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud)
+static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
+ unsigned int lpid)
{
unsigned long iu;
pud_t *p = pud;
@@ -287,36 +392,40 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud)
pmd_t *pmd;
pmd = pmd_offset(p, 0);
- kvmppc_unmap_free_pmd(kvm, pmd, true);
+ kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
pud_clear(p);
}
}
pud_free(kvm->mm, pud);
}
-void kvmppc_free_radix(struct kvm *kvm)
+void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
{
unsigned long ig;
- pgd_t *pgd;
- if (!kvm->arch.pgtable)
- return;
- pgd = kvm->arch.pgtable;
for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
pud_t *pud;
if (!pgd_present(*pgd))
continue;
pud = pud_offset(pgd, 0);
- kvmppc_unmap_free_pud(kvm, pud);
+ kvmppc_unmap_free_pud(kvm, pud, lpid);
pgd_clear(pgd);
}
- pgd_free(kvm->mm, kvm->arch.pgtable);
- kvm->arch.pgtable = NULL;
+}
+
+void kvmppc_free_radix(struct kvm *kvm)
+{
+ if (kvm->arch.pgtable) {
+ kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
+ kvm->arch.lpid);
+ pgd_free(kvm->mm, kvm->arch.pgtable);
+ kvm->arch.pgtable = NULL;
+ }
}
static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
- unsigned long gpa)
+ unsigned long gpa, unsigned int lpid)
{
pte_t *pte = pte_offset_kernel(pmd, 0);
@@ -326,13 +435,13 @@ static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
* flushing the PWC again.
*/
pmd_clear(pmd);
- kvmppc_radix_flush_pwc(kvm);
+ kvmppc_radix_flush_pwc(kvm, lpid);
- kvmppc_unmap_free_pte(kvm, pte, false);
+ kvmppc_unmap_free_pte(kvm, pte, false, lpid);
}
static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
- unsigned long gpa)
+ unsigned long gpa, unsigned int lpid)
{
pmd_t *pmd = pmd_offset(pud, 0);
@@ -342,9 +451,9 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
* so can be freed without flushing the PWC again.
*/
pud_clear(pud);
- kvmppc_radix_flush_pwc(kvm);
+ kvmppc_radix_flush_pwc(kvm, lpid);
- kvmppc_unmap_free_pmd(kvm, pmd, false);
+ kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
}
/*
@@ -356,8 +465,10 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
*/
#define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
-static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
- unsigned int level, unsigned long mmu_seq)
+int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
+ unsigned long gpa, unsigned int level,
+ unsigned long mmu_seq, unsigned int lpid,
+ unsigned long *rmapp, struct rmap_nested **n_rmap)
{
pgd_t *pgd;
pud_t *pud, *new_pud = NULL;
@@ -366,7 +477,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
int ret;
/* Traverse the guest's 2nd-level tree, allocate new levels needed */
- pgd = kvm->arch.pgtable + pgd_index(gpa);
+ pgd = pgtable + pgd_index(gpa);
pud = NULL;
if (pgd_present(*pgd))
pud = pud_offset(pgd, gpa);
@@ -423,7 +534,8 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
goto out_unlock;
}
/* Valid 1GB page here already, remove it */
- kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT);
+ kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
+ lpid);
}
if (level == 2) {
if (!pud_none(*pud)) {
@@ -432,9 +544,11 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
* install a large page, so remove and free the page
* table page.
*/
- kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa);
+ kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
}
kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
+ if (rmapp && n_rmap)
+ kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
ret = 0;
goto out_unlock;
}
@@ -458,7 +572,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
PTE_BITS_MUST_MATCH);
kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
- 0, pte_val(pte), lgpa, PMD_SHIFT);
+ 0, pte_val(pte), lgpa, PMD_SHIFT);
ret = 0;
goto out_unlock;
}
@@ -472,7 +586,8 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
goto out_unlock;
}
/* Valid 2MB page here already, remove it */
- kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT);
+ kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
+ lpid);
}
if (level == 1) {
if (!pmd_none(*pmd)) {
@@ -481,9 +596,11 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
* install a large page, so remove and free the page
* table page.
*/
- kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa);
+ kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
}
kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
+ if (rmapp && n_rmap)
+ kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
ret = 0;
goto out_unlock;
}
@@ -508,6 +625,8 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
goto out_unlock;
}
kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
+ if (rmapp && n_rmap)
+ kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
ret = 0;
out_unlock:
@@ -521,21 +640,154 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
return ret;
}
-int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned long ea, unsigned long dsisr)
+bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
+ unsigned long gpa, unsigned int lpid)
+{
+ unsigned long pgflags;
+ unsigned int shift;
+ pte_t *ptep;
+
+ /*
+ * Need to set an R or C bit in the 2nd-level tables;
+ * since we are just helping out the hardware here,
+ * it is sufficient to do what the hardware does.
+ */
+ pgflags = _PAGE_ACCESSED;
+ if (writing)
+ pgflags |= _PAGE_DIRTY;
+ /*
+ * We are walking the secondary (partition-scoped) page table here.
+ * We can do this without disabling irq because the Linux MM
+ * subsystem doesn't do THP splits and collapses on this tree.
+ */
+ ptep = __find_linux_pte(pgtable, gpa, NULL, &shift);
+ if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
+ kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
+ return true;
+ }
+ return false;
+}
+
+int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
+ unsigned long gpa,
+ struct kvm_memory_slot *memslot,
+ bool writing, bool kvm_ro,
+ pte_t *inserted_pte, unsigned int *levelp)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long mmu_seq, pte_size;
- unsigned long gpa, gfn, hva, pfn;
- struct kvm_memory_slot *memslot;
struct page *page = NULL;
- long ret;
- bool writing;
+ unsigned long mmu_seq;
+ unsigned long hva, gfn = gpa >> PAGE_SHIFT;
bool upgrade_write = false;
bool *upgrade_p = &upgrade_write;
pte_t pte, *ptep;
- unsigned long pgflags;
unsigned int shift, level;
+ int ret;
+
+ /* used to check for invalidations in progress */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
+
+ /*
+ * Do a fast check first, since __gfn_to_pfn_memslot doesn't
+ * do it with !atomic && !async, which is how we call it.
+ * We always ask for write permission since the common case
+ * is that the page is writable.
+ */
+ hva = gfn_to_hva_memslot(memslot, gfn);
+ if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
+ upgrade_write = true;
+ } else {
+ unsigned long pfn;
+
+ /* Call KVM generic code to do the slow-path check */
+ pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
+ writing, upgrade_p);
+ if (is_error_noslot_pfn(pfn))
+ return -EFAULT;
+ page = NULL;
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (PageReserved(page))
+ page = NULL;
+ }
+ }
+
+ /*
+ * Read the PTE from the process' radix tree and use that
+ * so we get the shift and attribute bits.
+ */
+ local_irq_disable();
+ ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+ /*
+ * If the PTE disappeared temporarily due to a THP
+ * collapse, just return and let the guest try again.
+ */
+ if (!ptep) {
+ local_irq_enable();
+ if (page)
+ put_page(page);
+ return RESUME_GUEST;
+ }
+ pte = *ptep;
+ local_irq_enable();
+
+ /* Get pte level from shift/size */
+ if (shift == PUD_SHIFT &&
+ (gpa & (PUD_SIZE - PAGE_SIZE)) ==
+ (hva & (PUD_SIZE - PAGE_SIZE))) {
+ level = 2;
+ } else if (shift == PMD_SHIFT &&
+ (gpa & (PMD_SIZE - PAGE_SIZE)) ==
+ (hva & (PMD_SIZE - PAGE_SIZE))) {
+ level = 1;
+ } else {
+ level = 0;
+ if (shift > PAGE_SHIFT) {
+ /*
+ * If the pte maps more than one page, bring over
+ * bits from the virtual address to get the real
+ * address of the specific single page we want.
+ */
+ unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
+ pte = __pte(pte_val(pte) | (hva & rpnmask));
+ }
+ }
+
+ pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
+ if (writing || upgrade_write) {
+ if (pte_val(pte) & _PAGE_WRITE)
+ pte = __pte(pte_val(pte) | _PAGE_DIRTY);
+ } else {
+ pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
+ }
+
+ /* Allocate space in the tree and write the PTE */
+ ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
+ mmu_seq, kvm->arch.lpid, NULL, NULL);
+ if (inserted_pte)
+ *inserted_pte = pte;
+ if (levelp)
+ *levelp = level;
+
+ if (page) {
+ if (!ret && (pte_val(pte) & _PAGE_WRITE))
+ set_page_dirty_lock(page);
+ put_page(page);
+ }
+
+ return ret;
+}
+
+int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned long ea, unsigned long dsisr)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long gpa, gfn;
+ struct kvm_memory_slot *memslot;
+ long ret;
+ bool writing = !!(dsisr & DSISR_ISSTORE);
+ bool kvm_ro = false;
/* Check for unusual errors */
if (dsisr & DSISR_UNSUPP_MMU) {
@@ -549,12 +801,14 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
return RESUME_GUEST;
}
- /* Translate the logical address and get the page */
+ /* Translate the logical address */
gpa = vcpu->arch.fault_gpa & ~0xfffUL;
gpa &= ~0xF000000000000000ul;
gfn = gpa >> PAGE_SHIFT;
if (!(dsisr & DSISR_PRTABLE_FAULT))
gpa |= ea & 0xfff;
+
+ /* Get the corresponding memslot */
memslot = gfn_to_memslot(kvm, gfn);
/* No memslot means it's an emulated MMIO region */
@@ -568,142 +822,35 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
return RESUME_GUEST;
}
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
- dsisr & DSISR_ISSTORE);
+ return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
}
- writing = (dsisr & DSISR_ISSTORE) != 0;
if (memslot->flags & KVM_MEM_READONLY) {
if (writing) {
/* give the guest a DSI */
- dsisr = DSISR_ISSTORE | DSISR_PROTFAULT;
- kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
+ kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE |
+ DSISR_PROTFAULT);
return RESUME_GUEST;
}
- upgrade_p = NULL;
+ kvm_ro = true;
}
+ /* Failed to set the reference/change bits */
if (dsisr & DSISR_SET_RC) {
- /*
- * Need to set an R or C bit in the 2nd-level tables;
- * since we are just helping out the hardware here,
- * it is sufficient to do what the hardware does.
- */
- pgflags = _PAGE_ACCESSED;
- if (writing)
- pgflags |= _PAGE_DIRTY;
- /*
- * We are walking the secondary page table here. We can do this
- * without disabling irq.
- */
spin_lock(&kvm->mmu_lock);
- ptep = __find_linux_pte(kvm->arch.pgtable,
- gpa, NULL, &shift);
- if (ptep && pte_present(*ptep) &&
- (!writing || pte_write(*ptep))) {
- kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
- gpa, shift);
+ if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable,
+ writing, gpa, kvm->arch.lpid))
dsisr &= ~DSISR_SET_RC;
- }
spin_unlock(&kvm->mmu_lock);
+
if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
DSISR_PROTFAULT | DSISR_SET_RC)))
return RESUME_GUEST;
}
- /* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
- smp_rmb();
-
- /*
- * Do a fast check first, since __gfn_to_pfn_memslot doesn't
- * do it with !atomic && !async, which is how we call it.
- * We always ask for write permission since the common case
- * is that the page is writable.
- */
- hva = gfn_to_hva_memslot(memslot, gfn);
- if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
- pfn = page_to_pfn(page);
- upgrade_write = true;
- } else {
- /* Call KVM generic code to do the slow-path check */
- pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
- writing, upgrade_p);
- if (is_error_noslot_pfn(pfn))
- return -EFAULT;
- page = NULL;
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (PageReserved(page))
- page = NULL;
- }
- }
-
- /* See if we can insert a 1GB or 2MB large PTE here */
- level = 0;
- if (page && PageCompound(page)) {
- pte_size = PAGE_SIZE << compound_order(compound_head(page));
- if (pte_size >= PUD_SIZE &&
- (gpa & (PUD_SIZE - PAGE_SIZE)) ==
- (hva & (PUD_SIZE - PAGE_SIZE))) {
- level = 2;
- pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1);
- } else if (pte_size >= PMD_SIZE &&
- (gpa & (PMD_SIZE - PAGE_SIZE)) ==
- (hva & (PMD_SIZE - PAGE_SIZE))) {
- level = 1;
- pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
- }
- }
-
- /*
- * Compute the PTE value that we need to insert.
- */
- if (page) {
- pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE |
- _PAGE_ACCESSED;
- if (writing || upgrade_write)
- pgflags |= _PAGE_WRITE | _PAGE_DIRTY;
- pte = pfn_pte(pfn, __pgprot(pgflags));
- } else {
- /*
- * Read the PTE from the process' radix tree and use that
- * so we get the attribute bits.
- */
- local_irq_disable();
- ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
- pte = *ptep;
- local_irq_enable();
- if (shift == PUD_SHIFT &&
- (gpa & (PUD_SIZE - PAGE_SIZE)) ==
- (hva & (PUD_SIZE - PAGE_SIZE))) {
- level = 2;
- } else if (shift == PMD_SHIFT &&
- (gpa & (PMD_SIZE - PAGE_SIZE)) ==
- (hva & (PMD_SIZE - PAGE_SIZE))) {
- level = 1;
- } else if (shift && shift != PAGE_SHIFT) {
- /* Adjust PFN */
- unsigned long mask = (1ul << shift) - PAGE_SIZE;
- pte = __pte(pte_val(pte) | (hva & mask));
- }
- pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
- if (writing || upgrade_write) {
- if (pte_val(pte) & _PAGE_WRITE)
- pte = __pte(pte_val(pte) | _PAGE_DIRTY);
- } else {
- pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
- }
- }
-
- /* Allocate space in the tree and write the PTE */
- ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
-
- if (page) {
- if (!ret && (pte_val(pte) & _PAGE_WRITE))
- set_page_dirty_lock(page);
- put_page(page);
- }
+ /* Try to insert a pte */
+ ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
+ kvm_ro, NULL, NULL);
if (ret == 0 || ret == -EAGAIN)
ret = RESUME_GUEST;
@@ -717,20 +864,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
pte_t *ptep;
unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift;
- unsigned long old;
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
- if (ptep && pte_present(*ptep)) {
- old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0,
- gpa, shift);
- kvmppc_radix_tlbie_page(kvm, gpa, shift);
- if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
- unsigned long psize = PAGE_SIZE;
- if (shift)
- psize = 1ul << shift;
- kvmppc_update_dirty_map(memslot, gfn, psize);
- }
- }
+ if (ptep && pte_present(*ptep))
+ kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
+ kvm->arch.lpid);
return 0;
}
@@ -785,7 +923,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
ret = 1 << (shift - PAGE_SHIFT);
kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
gpa, shift);
- kvmppc_radix_tlbie_page(kvm, gpa, shift);
+ kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
}
return ret;
}
@@ -870,6 +1008,215 @@ static void pmd_ctor(void *addr)
memset(addr, 0, RADIX_PMD_TABLE_SIZE);
}
+struct debugfs_radix_state {
+ struct kvm *kvm;
+ struct mutex mutex;
+ unsigned long gpa;
+ int lpid;
+ int chars_left;
+ int buf_index;
+ char buf[128];
+ u8 hdr;
+};
+
+static int debugfs_radix_open(struct inode *inode, struct file *file)
+{
+ struct kvm *kvm = inode->i_private;
+ struct debugfs_radix_state *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ kvm_get_kvm(kvm);
+ p->kvm = kvm;
+ mutex_init(&p->mutex);
+ file->private_data = p;
+
+ return nonseekable_open(inode, file);
+}
+
+static int debugfs_radix_release(struct inode *inode, struct file *file)
+{
+ struct debugfs_radix_state *p = file->private_data;
+
+ kvm_put_kvm(p->kvm);
+ kfree(p);
+ return 0;
+}
+
+static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct debugfs_radix_state *p = file->private_data;
+ ssize_t ret, r;
+ unsigned long n;
+ struct kvm *kvm;
+ unsigned long gpa;
+ pgd_t *pgt;
+ struct kvm_nested_guest *nested;
+ pgd_t pgd, *pgdp;
+ pud_t pud, *pudp;
+ pmd_t pmd, *pmdp;
+ pte_t *ptep;
+ int shift;
+ unsigned long pte;
+
+ kvm = p->kvm;
+ if (!kvm_is_radix(kvm))
+ return 0;
+
+ ret = mutex_lock_interruptible(&p->mutex);
+ if (ret)
+ return ret;
+
+ if (p->chars_left) {
+ n = p->chars_left;
+ if (n > len)
+ n = len;
+ r = copy_to_user(buf, p->buf + p->buf_index, n);
+ n -= r;
+ p->chars_left -= n;
+ p->buf_index += n;
+ buf += n;
+ len -= n;
+ ret = n;
+ if (r) {
+ if (!n)
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ gpa = p->gpa;
+ nested = NULL;
+ pgt = NULL;
+ while (len != 0 && p->lpid >= 0) {
+ if (gpa >= RADIX_PGTABLE_RANGE) {
+ gpa = 0;
+ pgt = NULL;
+ if (nested) {
+ kvmhv_put_nested(nested);
+ nested = NULL;
+ }
+ p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
+ p->hdr = 0;
+ if (p->lpid < 0)
+ break;
+ }
+ if (!pgt) {
+ if (p->lpid == 0) {
+ pgt = kvm->arch.pgtable;
+ } else {
+ nested = kvmhv_get_nested(kvm, p->lpid, false);
+ if (!nested) {
+ gpa = RADIX_PGTABLE_RANGE;
+ continue;
+ }
+ pgt = nested->shadow_pgtable;
+ }
+ }
+ n = 0;
+ if (!p->hdr) {
+ if (p->lpid > 0)
+ n = scnprintf(p->buf, sizeof(p->buf),
+ "\nNested LPID %d: ", p->lpid);
+ n += scnprintf(p->buf + n, sizeof(p->buf) - n,
+ "pgdir: %lx\n", (unsigned long)pgt);
+ p->hdr = 1;
+ goto copy;
+ }
+
+ pgdp = pgt + pgd_index(gpa);
+ pgd = READ_ONCE(*pgdp);
+ if (!(pgd_val(pgd) & _PAGE_PRESENT)) {
+ gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE;
+ continue;
+ }
+
+ pudp = pud_offset(&pgd, gpa);
+ pud = READ_ONCE(*pudp);
+ if (!(pud_val(pud) & _PAGE_PRESENT)) {
+ gpa = (gpa & PUD_MASK) + PUD_SIZE;
+ continue;
+ }
+ if (pud_val(pud) & _PAGE_PTE) {
+ pte = pud_val(pud);
+ shift = PUD_SHIFT;
+ goto leaf;
+ }
+
+ pmdp = pmd_offset(&pud, gpa);
+ pmd = READ_ONCE(*pmdp);
+ if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
+ gpa = (gpa & PMD_MASK) + PMD_SIZE;
+ continue;
+ }
+ if (pmd_val(pmd) & _PAGE_PTE) {
+ pte = pmd_val(pmd);
+ shift = PMD_SHIFT;
+ goto leaf;
+ }
+
+ ptep = pte_offset_kernel(&pmd, gpa);
+ pte = pte_val(READ_ONCE(*ptep));
+ if (!(pte & _PAGE_PRESENT)) {
+ gpa += PAGE_SIZE;
+ continue;
+ }
+ shift = PAGE_SHIFT;
+ leaf:
+ n = scnprintf(p->buf, sizeof(p->buf),
+ " %lx: %lx %d\n", gpa, pte, shift);
+ gpa += 1ul << shift;
+ copy:
+ p->chars_left = n;
+ if (n > len)
+ n = len;
+ r = copy_to_user(buf, p->buf, n);
+ n -= r;
+ p->chars_left -= n;
+ p->buf_index = n;
+ buf += n;
+ len -= n;
+ ret += n;
+ if (r) {
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
+ }
+ p->gpa = gpa;
+ if (nested)
+ kvmhv_put_nested(nested);
+
+ out:
+ mutex_unlock(&p->mutex);
+ return ret;
+}
+
+static ssize_t debugfs_radix_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -EACCES;
+}
+
+static const struct file_operations debugfs_radix_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_radix_open,
+ .release = debugfs_radix_release,
+ .read = debugfs_radix_read,
+ .write = debugfs_radix_write,
+ .llseek = generic_file_llseek,
+};
+
+void kvmhv_radix_debugfs_init(struct kvm *kvm)
+{
+ kvm->arch.radix_dentry = debugfs_create_file("radix", 0400,
+ kvm->arch.debugfs_dir, kvm,
+ &debugfs_radix_fops);
+}
+
int kvmppc_radix_init(void)
{
unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 9a3f2646ecc7..62a8d03ba7e9 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -363,6 +363,40 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
return ret;
}
+static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
+ unsigned long tce)
+{
+ unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
+ enum dma_data_direction dir = iommu_tce_direction(tce);
+ struct kvmppc_spapr_tce_iommu_table *stit;
+ unsigned long ua = 0;
+
+ /* Allow userspace to poison TCE table */
+ if (dir == DMA_NONE)
+ return H_SUCCESS;
+
+ if (iommu_tce_check_gpa(stt->page_shift, gpa))
+ return H_TOO_HARD;
+
+ if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
+ return H_TOO_HARD;
+
+ list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
+ unsigned long hpa = 0;
+ struct mm_iommu_table_group_mem_t *mem;
+ long shift = stit->tbl->it_page_shift;
+
+ mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
+ if (!mem)
+ return H_TOO_HARD;
+
+ if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
+ return H_TOO_HARD;
+ }
+
+ return H_SUCCESS;
+}
+
static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
{
unsigned long hpa = 0;
@@ -376,11 +410,10 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
{
struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
if (!pua)
- /* it_userspace allocation might be delayed */
- return H_TOO_HARD;
+ return H_SUCCESS;
mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
if (!mem)
@@ -401,7 +434,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
long ret;
if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
- return H_HARDWARE;
+ return H_TOO_HARD;
if (dir == DMA_NONE)
return H_SUCCESS;
@@ -449,15 +482,15 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
return H_TOO_HARD;
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
- return H_HARDWARE;
+ return H_TOO_HARD;
if (mm_iommu_mapped_inc(mem))
- return H_CLOSED;
+ return H_TOO_HARD;
ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
if (WARN_ON_ONCE(ret)) {
mm_iommu_mapped_dec(mem);
- return H_HARDWARE;
+ return H_TOO_HARD;
}
if (dir != DMA_NONE)
@@ -517,8 +550,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
+ if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
ret = H_PARAMETER;
goto unlock_exit;
}
@@ -533,14 +565,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir);
- if (ret == H_SUCCESS)
- continue;
-
- if (ret == H_TOO_HARD)
+ if (ret != H_SUCCESS) {
+ kvmppc_clear_tce(stit->tbl, entry);
goto unlock_exit;
-
- WARN_ON_ONCE(1);
- kvmppc_clear_tce(stit->tbl, entry);
+ }
}
kvmppc_tce_put(stt, entry, tce);
@@ -583,7 +611,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return ret;
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
ret = H_TOO_HARD;
goto unlock_exit;
}
@@ -599,10 +627,26 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
ret = kvmppc_tce_validate(stt, tce);
if (ret != H_SUCCESS)
goto unlock_exit;
+ }
+
+ for (i = 0; i < npages; ++i) {
+ /*
+ * This looks unsafe, because we validate, then regrab
+ * the TCE from userspace which could have been changed by
+ * another thread.
+ *
+ * But it actually is safe, because the relevant checks will be
+ * re-executed in the following code. If userspace tries to
+ * change this dodgily it will result in a messier failure mode
+ * but won't threaten the host.
+ */
+ if (get_user(tce, tces + i)) {
+ ret = H_TOO_HARD;
+ goto unlock_exit;
+ }
+ tce = be64_to_cpu(tce);
- if (kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
- &ua, NULL))
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -610,14 +654,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
stit->tbl, entry + i, ua,
iommu_tce_direction(tce));
- if (ret == H_SUCCESS)
- continue;
-
- if (ret == H_TOO_HARD)
+ if (ret != H_SUCCESS) {
+ kvmppc_clear_tce(stit->tbl, entry);
goto unlock_exit;
-
- WARN_ON_ONCE(1);
- kvmppc_clear_tce(stit->tbl, entry);
+ }
}
kvmppc_tce_put(stt, entry + i, tce);
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 506a4d400458..2206bc729b9a 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -87,6 +87,7 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
}
EXPORT_SYMBOL_GPL(kvmppc_find_table);
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
* Validates TCE address.
* At the moment flags and page mask are validated.
@@ -94,14 +95,14 @@ EXPORT_SYMBOL_GPL(kvmppc_find_table);
* to the table and user space is supposed to process them), we can skip
* checking other things (such as TCE is a guest RAM address or the page
* was actually allocated).
- *
- * WARNING: This will be called in real-mode on HV KVM and virtual
- * mode on PR KVM
*/
-long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
+static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
+ unsigned long tce)
{
unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
enum dma_data_direction dir = iommu_tce_direction(tce);
+ struct kvmppc_spapr_tce_iommu_table *stit;
+ unsigned long ua = 0;
/* Allow userspace to poison TCE table */
if (dir == DMA_NONE)
@@ -110,9 +111,25 @@ long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
if (iommu_tce_check_gpa(stt->page_shift, gpa))
return H_PARAMETER;
+ if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
+ return H_TOO_HARD;
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+ unsigned long hpa = 0;
+ struct mm_iommu_table_group_mem_t *mem;
+ long shift = stit->tbl->it_page_shift;
+
+ mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
+ if (!mem)
+ return H_TOO_HARD;
+
+ if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
+ return H_TOO_HARD;
+ }
+
return H_SUCCESS;
}
-EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
/* Note on the use of page_address() in real mode,
*
@@ -164,10 +181,10 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
}
EXPORT_SYMBOL_GPL(kvmppc_tce_put);
-long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
+long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
unsigned long *ua, unsigned long **prmap)
{
- unsigned long gfn = gpa >> PAGE_SHIFT;
+ unsigned long gfn = tce >> PAGE_SHIFT;
struct kvm_memory_slot *memslot;
memslot = search_memslots(kvm_memslots(kvm), gfn);
@@ -175,7 +192,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
return -EINVAL;
*ua = __gfn_to_hva_memslot(memslot, gfn) |
- (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
+ (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
if (prmap)
@@ -184,15 +201,38 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
return 0;
}
-EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
+EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
+static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
+ unsigned long entry, unsigned long *hpa,
+ enum dma_data_direction *direction)
+{
+ long ret;
+
+ ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
+
+ if (!ret && ((*direction == DMA_FROM_DEVICE) ||
+ (*direction == DMA_BIDIRECTIONAL))) {
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
+ /*
+ * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
+ * calling this so we still get here a valid UA.
+ */
+ if (pua && *pua)
+ mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
+ }
+
+ return ret;
+}
+
+static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
+ unsigned long entry)
{
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
- iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
}
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -200,7 +240,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
{
struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
if (!pua)
/* it_userspace allocation might be delayed */
@@ -224,7 +264,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
unsigned long hpa = 0;
long ret;
- if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
+ if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
/*
* real mode xchg can fail if struct page crosses
* a page boundary
@@ -236,7 +276,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
if (ret)
- iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
return ret;
}
@@ -264,7 +304,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
{
long ret;
unsigned long hpa = 0;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
struct mm_iommu_table_group_mem_t *mem;
if (!pua)
@@ -277,12 +317,12 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
&hpa)))
- return H_HARDWARE;
+ return H_TOO_HARD;
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
- return H_CLOSED;
+ return H_TOO_HARD;
- ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+ ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
if (ret) {
mm_iommu_mapped_dec(mem);
/*
@@ -345,13 +385,12 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
if (ret != H_SUCCESS)
return ret;
- ret = kvmppc_tce_validate(stt, tce);
+ ret = kvmppc_rm_tce_validate(stt, tce);
if (ret != H_SUCCESS)
return ret;
dir = iommu_tce_direction(tce);
- if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
+ if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
return H_PARAMETER;
entry = ioba >> stt->page_shift;
@@ -364,14 +403,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir);
- if (ret == H_SUCCESS)
- continue;
-
- if (ret == H_TOO_HARD)
+ if (ret != H_SUCCESS) {
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
return ret;
-
- WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(stit->tbl, entry);
+ }
}
kvmppc_tce_put(stt, entry, tce);
@@ -457,7 +492,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
*/
struct mm_iommu_table_group_mem_t *mem;
- if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
return H_TOO_HARD;
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
@@ -473,12 +508,12 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
* We do not require memory to be preregistered in this case
* so lock rmap and do __find_linux_pte_or_hugepte().
*/
- if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
return H_TOO_HARD;
rmap = (void *) vmalloc_to_phys(rmap);
if (WARN_ON_ONCE_RM(!rmap))
- return H_HARDWARE;
+ return H_TOO_HARD;
/*
* Synchronize with the MMU notifier callbacks in
@@ -498,14 +533,16 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
for (i = 0; i < npages; ++i) {
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
- ret = kvmppc_tce_validate(stt, tce);
+ ret = kvmppc_rm_tce_validate(stt, tce);
if (ret != H_SUCCESS)
goto unlock_exit;
+ }
+
+ for (i = 0; i < npages; ++i) {
+ unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
ua = 0;
- if (kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
- &ua, NULL))
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -513,14 +550,11 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
stit->tbl, entry + i, ua,
iommu_tce_direction(tce));
- if (ret == H_SUCCESS)
- continue;
-
- if (ret == H_TOO_HARD)
+ if (ret != H_SUCCESS) {
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
+ entry);
goto unlock_exit;
-
- WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(stit->tbl, entry);
+ }
}
kvmppc_tce_put(stt, entry + i, tce);
@@ -571,7 +605,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
return ret;
WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(stit->tbl, entry);
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
}
}
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 36b11c5a0dbb..8c7e933e942e 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -36,7 +36,6 @@
#define OP_31_XOP_MTSR 210
#define OP_31_XOP_MTSRIN 242
#define OP_31_XOP_TLBIEL 274
-#define OP_31_XOP_TLBIE 306
/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
#define OP_31_XOP_FAKE_SC1 308
#define OP_31_XOP_SLBMTE 402
@@ -110,7 +109,7 @@ static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
vcpu->arch.tar_tm = vcpu->arch.tar;
vcpu->arch.lr_tm = vcpu->arch.regs.link;
- vcpu->arch.cr_tm = vcpu->arch.cr;
+ vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
vcpu->arch.xer_tm = vcpu->arch.regs.xer;
vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
}
@@ -129,7 +128,7 @@ static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
vcpu->arch.tar = vcpu->arch.tar_tm;
vcpu->arch.regs.link = vcpu->arch.lr_tm;
- vcpu->arch.cr = vcpu->arch.cr_tm;
+ vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
vcpu->arch.regs.xer = vcpu->arch.xer_tm;
vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
}
@@ -141,7 +140,7 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
uint64_t texasr;
/* CR0 = 0 | MSR[TS] | 0 */
- vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
<< CR0_SHIFT);
@@ -220,7 +219,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
tm_abort(ra_val);
/* CR0 = 0 | MSR[TS] | 0 */
- vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
<< CR0_SHIFT);
@@ -494,8 +493,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
preempt_disable();
- vcpu->arch.cr = (CR0_TBEGIN_FAILURE |
- (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)));
+ vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
+ (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
(((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3e3a71594e63..bf8def2159c3 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -50,6 +50,7 @@
#include <asm/reg.h>
#include <asm/ppc-opcode.h>
#include <asm/asm-prototypes.h>
+#include <asm/archrandom.h>
#include <asm/debug.h>
#include <asm/disassemble.h>
#include <asm/cputable.h>
@@ -104,6 +105,10 @@ static bool indep_threads_mode = true;
module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)");
+static bool one_vm_per_core;
+module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires indep_threads_mode=N)");
+
#ifdef CONFIG_KVM_XICS
static struct kernel_param_ops module_param_ops = {
.set = param_set_int,
@@ -117,6 +122,16 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644);
MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
#endif
+/* If set, guests are allowed to create and control nested guests */
+static bool nested = true;
+module_param(nested, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)");
+
+static inline bool nesting_enabled(struct kvm *kvm)
+{
+ return kvm->arch.nested_enable && kvm_is_radix(kvm);
+}
+
/* If set, the threads on each CPU core have to be in the same MMU mode */
static bool no_mixing_hpt_and_radix;
@@ -173,6 +188,10 @@ static bool kvmppc_ipi_thread(int cpu)
{
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+ /* If we're a nested hypervisor, fall back to ordinary IPIs for now */
+ if (kvmhv_on_pseries())
+ return false;
+
/* On POWER9 we can use msgsnd to IPI any cpu */
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
msg |= get_hard_smp_processor_id(cpu);
@@ -410,8 +429,8 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
- pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
- vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
+ pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n",
+ vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
pr_err("fault dar = %.16lx dsisr = %.8x\n",
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
@@ -730,8 +749,7 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
/*
* Ensure that the read of vcore->dpdes comes after the read
* of vcpu->doorbell_request. This barrier matches the
- * lwsync in book3s_hv_rmhandlers.S just before the
- * fast_guest_return label.
+ * smb_wmb() in kvmppc_guest_entry_inject().
*/
smp_rmb();
vc = vcpu->arch.vcore;
@@ -912,6 +930,19 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
break;
}
return RESUME_HOST;
+ case H_SET_DABR:
+ ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4));
+ break;
+ case H_SET_XDABR:
+ ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ break;
+ case H_GET_TCE:
+ ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
case H_PUT_TCE:
ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
@@ -935,6 +966,32 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
+ case H_RANDOM:
+ if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
+ ret = H_HARDWARE;
+ break;
+
+ case H_SET_PARTITION_TABLE:
+ ret = H_FUNCTION;
+ if (nesting_enabled(vcpu->kvm))
+ ret = kvmhv_set_partition_table(vcpu);
+ break;
+ case H_ENTER_NESTED:
+ ret = H_FUNCTION;
+ if (!nesting_enabled(vcpu->kvm))
+ break;
+ ret = kvmhv_enter_nested_guest(vcpu);
+ if (ret == H_INTERRUPT) {
+ kvmppc_set_gpr(vcpu, 3, 0);
+ return -EINTR;
+ }
+ break;
+ case H_TLB_INVALIDATE:
+ ret = H_FUNCTION;
+ if (nesting_enabled(vcpu->kvm))
+ ret = kvmhv_do_nested_tlbie(vcpu);
+ break;
+
default:
return RESUME_HOST;
}
@@ -943,6 +1000,24 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
+/*
+ * Handle H_CEDE in the nested virtualization case where we haven't
+ * called the real-mode hcall handlers in book3s_hv_rmhandlers.S.
+ * This has to be done early, not in kvmppc_pseries_do_hcall(), so
+ * that the cede logic in kvmppc_run_single_vcpu() works properly.
+ */
+static void kvmppc_nested_cede(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.shregs.msr |= MSR_EE;
+ vcpu->arch.ceded = 1;
+ smp_mb();
+ if (vcpu->arch.prodded) {
+ vcpu->arch.prodded = 0;
+ smp_mb();
+ vcpu->arch.ceded = 0;
+ }
+}
+
static int kvmppc_hcall_impl_hv(unsigned long cmd)
{
switch (cmd) {
@@ -1085,7 +1160,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-/* Called with vcpu->arch.vcore->lock held */
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
@@ -1190,7 +1264,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOK3S_INTERRUPT_H_INST_STORAGE:
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
- vcpu->arch.fault_dsisr = 0;
+ vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
+ DSISR_SRR1_MATCH_64S;
+ if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
r = RESUME_PAGE_FAULT;
break;
/*
@@ -1206,10 +1283,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
swab32(vcpu->arch.emul_inst) :
vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
- /* Need vcore unlocked to call kvmppc_get_last_inst */
- spin_unlock(&vcpu->arch.vcore->lock);
r = kvmppc_emulate_debug_inst(run, vcpu);
- spin_lock(&vcpu->arch.vcore->lock);
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
@@ -1225,12 +1299,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
r = EMULATE_FAIL;
if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
- cpu_has_feature(CPU_FTR_ARCH_300)) {
- /* Need vcore unlocked to call kvmppc_get_last_inst */
- spin_unlock(&vcpu->arch.vcore->lock);
+ cpu_has_feature(CPU_FTR_ARCH_300))
r = kvmppc_emulate_doorbell_instr(vcpu);
- spin_lock(&vcpu->arch.vcore->lock);
- }
if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
@@ -1265,6 +1335,104 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
return r;
}
+static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
+{
+ int r;
+ int srcu_idx;
+
+ vcpu->stat.sum_exits++;
+
+ /*
+ * This can happen if an interrupt occurs in the last stages
+ * of guest entry or the first stages of guest exit (i.e. after
+ * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
+ * and before setting it to KVM_GUEST_MODE_HOST_HV).
+ * That can happen due to a bug, or due to a machine check
+ * occurring at just the wrong time.
+ */
+ if (vcpu->arch.shregs.msr & MSR_HV) {
+ pr_emerg("KVM trap in HV mode while nested!\n");
+ pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
+ vcpu->arch.trap, kvmppc_get_pc(vcpu),
+ vcpu->arch.shregs.msr);
+ kvmppc_dump_regs(vcpu);
+ return RESUME_HOST;
+ }
+ switch (vcpu->arch.trap) {
+ /* We're good on these - the host merely wanted to get our attention */
+ case BOOK3S_INTERRUPT_HV_DECREMENTER:
+ vcpu->stat.dec_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_EXTERNAL:
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_HOST;
+ break;
+ case BOOK3S_INTERRUPT_H_DOORBELL:
+ case BOOK3S_INTERRUPT_H_VIRT:
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_GUEST;
+ break;
+ /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
+ case BOOK3S_INTERRUPT_HMI:
+ case BOOK3S_INTERRUPT_PERFMON:
+ case BOOK3S_INTERRUPT_SYSTEM_RESET:
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_MACHINE_CHECK:
+ /* Pass the machine check to the L1 guest */
+ r = RESUME_HOST;
+ /* Print the MCE event to host console. */
+ machine_check_print_event_info(&vcpu->arch.mce_evt, false);
+ break;
+ /*
+ * We get these next two if the guest accesses a page which it thinks
+ * it has mapped but which is not actually present, either because
+ * it is for an emulated I/O device or because the corresonding
+ * host page has been paged out.
+ */
+ case BOOK3S_INTERRUPT_H_DATA_STORAGE:
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ r = kvmhv_nested_page_fault(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+ break;
+ case BOOK3S_INTERRUPT_H_INST_STORAGE:
+ vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
+ vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
+ DSISR_SRR1_MATCH_64S;
+ if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ r = kvmhv_nested_page_fault(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+ break;
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ case BOOK3S_INTERRUPT_HV_SOFTPATCH:
+ /*
+ * This occurs for various TM-related instructions that
+ * we need to emulate on POWER9 DD2.2. We have already
+ * handled the cases where the guest was in real-suspend
+ * mode and was transitioning to transactional state.
+ */
+ r = kvmhv_p9_tm_emulation(vcpu);
+ break;
+#endif
+
+ case BOOK3S_INTERRUPT_HV_RM_HARD:
+ vcpu->arch.trap = 0;
+ r = RESUME_GUEST;
+ if (!xive_enabled())
+ kvmppc_xics_rm_complete(vcpu, 0);
+ break;
+ default:
+ r = RESUME_HOST;
+ break;
+ }
+
+ return r;
+}
+
static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
@@ -1555,6 +1723,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_ONLINE:
*val = get_reg_val(id, vcpu->arch.online);
break;
+ case KVM_REG_PPC_PTCR:
+ *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
+ break;
default:
r = -EINVAL;
break;
@@ -1786,6 +1957,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
atomic_dec(&vcpu->arch.vcore->online_count);
vcpu->arch.online = i;
break;
+ case KVM_REG_PPC_PTCR:
+ vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
+ break;
default:
r = -EINVAL;
break;
@@ -2019,15 +2193,18 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
* Set the default HFSCR for the guest from the host value.
* This value is only used on POWER9.
* On POWER9, we want to virtualize the doorbell facility, so we
- * turn off the HFSCR bit, which causes those instructions to trap.
+ * don't set the HFSCR_MSGP bit, and that causes those instructions
+ * to trap and then we emulate them.
*/
- vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
- if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
+ HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
+ if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ vcpu->arch.hfscr |= HFSCR_TM;
+ }
+ if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;
- else if (!cpu_has_feature(CPU_FTR_TM_COMP))
- vcpu->arch.hfscr &= ~HFSCR_TM;
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- vcpu->arch.hfscr &= ~HFSCR_MSGP;
kvmppc_mmu_book3s_hv_init(vcpu);
@@ -2242,10 +2419,18 @@ static void kvmppc_release_hwthread(int cpu)
static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
{
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+ cpumask_t *cpu_in_guest;
int i;
cpu = cpu_first_thread_sibling(cpu);
- cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
+ if (nested) {
+ cpumask_set_cpu(cpu, &nested->need_tlb_flush);
+ cpu_in_guest = &nested->cpu_in_guest;
+ } else {
+ cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
+ cpu_in_guest = &kvm->arch.cpu_in_guest;
+ }
/*
* Make sure setting of bit in need_tlb_flush precedes
* testing of cpu_in_guest bits. The matching barrier on
@@ -2253,13 +2438,23 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
*/
smp_mb();
for (i = 0; i < threads_per_core; ++i)
- if (cpumask_test_cpu(cpu + i, &kvm->arch.cpu_in_guest))
+ if (cpumask_test_cpu(cpu + i, cpu_in_guest))
smp_call_function_single(cpu + i, do_nothing, NULL, 1);
}
static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
{
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
struct kvm *kvm = vcpu->kvm;
+ int prev_cpu;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return;
+
+ if (nested)
+ prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
+ else
+ prev_cpu = vcpu->arch.prev_cpu;
/*
* With radix, the guest can do TLB invalidations itself,
@@ -2273,12 +2468,46 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
* ran to flush the TLB. The TLB is shared between threads,
* so we use a single bit in .need_tlb_flush for all 4 threads.
*/
- if (vcpu->arch.prev_cpu != pcpu) {
- if (vcpu->arch.prev_cpu >= 0 &&
- cpu_first_thread_sibling(vcpu->arch.prev_cpu) !=
+ if (prev_cpu != pcpu) {
+ if (prev_cpu >= 0 &&
+ cpu_first_thread_sibling(prev_cpu) !=
cpu_first_thread_sibling(pcpu))
- radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu);
- vcpu->arch.prev_cpu = pcpu;
+ radix_flush_cpu(kvm, prev_cpu, vcpu);
+ if (nested)
+ nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
+ else
+ vcpu->arch.prev_cpu = pcpu;
+ }
+}
+
+static void kvmppc_radix_check_need_tlb_flush(struct kvm *kvm, int pcpu,
+ struct kvm_nested_guest *nested)
+{
+ cpumask_t *need_tlb_flush;
+ int lpid;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ pcpu &= ~0x3UL;
+
+ if (nested) {
+ lpid = nested->shadow_lpid;
+ need_tlb_flush = &nested->need_tlb_flush;
+ } else {
+ lpid = kvm->arch.lpid;
+ need_tlb_flush = &kvm->arch.need_tlb_flush;
+ }
+
+ mtspr(SPRN_LPID, lpid);
+ isync();
+ smp_mb();
+
+ if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
+ radix__local_flush_tlb_lpid_guest(lpid);
+ /* Clear the bit after the TLB flush */
+ cpumask_clear_cpu(pcpu, need_tlb_flush);
}
}
@@ -2493,6 +2722,10 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return false;
+ /* In one_vm_per_core mode, require all vcores to be from the same vm */
+ if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm)
+ return false;
+
/* Some POWER9 chips require all threads to be in the same MMU mode */
if (no_mixing_hpt_and_radix &&
kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm))
@@ -2600,6 +2833,14 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
spin_lock(&vc->lock);
now = get_tb();
for_each_runnable_thread(i, vcpu, vc) {
+ /*
+ * It's safe to unlock the vcore in the loop here, because
+ * for_each_runnable_thread() is safe against removal of
+ * the vcpu, and the vcore state is VCORE_EXITING here,
+ * so any vcpus becoming runnable will have their arch.trap
+ * set to zero and can't actually run in the guest.
+ */
+ spin_unlock(&vc->lock);
/* cancel pending dec exception if dec is positive */
if (now < vcpu->arch.dec_expires &&
kvmppc_core_pending_dec(vcpu))
@@ -2615,6 +2856,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
vcpu->arch.ret = ret;
vcpu->arch.trap = 0;
+ spin_lock(&vc->lock);
if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
if (vcpu->arch.pending_exceptions)
kvmppc_core_prepare_to_enter(vcpu);
@@ -2963,8 +3205,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
spin_unlock(&core_info.vc[sub]->lock);
if (kvm_is_radix(vc->kvm)) {
- int tmp = pcpu;
-
/*
* Do we need to flush the process scoped TLB for the LPAR?
*
@@ -2975,17 +3215,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
*
* Hash must be flushed in realmode in order to use tlbiel.
*/
- mtspr(SPRN_LPID, vc->kvm->arch.lpid);
- isync();
-
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- tmp &= ~0x3UL;
-
- if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) {
- radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid);
- /* Clear the bit after the TLB flush */
- cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush);
- }
+ kvmppc_radix_check_need_tlb_flush(vc->kvm, pcpu, NULL);
}
/*
@@ -3080,6 +3310,300 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
}
/*
+ * Load up hypervisor-mode registers on P9.
+ */
+static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
+ unsigned long lpcr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ s64 hdec;
+ u64 tb, purr, spurr;
+ int trap;
+ unsigned long host_hfscr = mfspr(SPRN_HFSCR);
+ unsigned long host_ciabr = mfspr(SPRN_CIABR);
+ unsigned long host_dawr = mfspr(SPRN_DAWR);
+ unsigned long host_dawrx = mfspr(SPRN_DAWRX);
+ unsigned long host_psscr = mfspr(SPRN_PSSCR);
+ unsigned long host_pidr = mfspr(SPRN_PID);
+
+ hdec = time_limit - mftb();
+ if (hdec < 0)
+ return BOOK3S_INTERRUPT_HV_DECREMENTER;
+ mtspr(SPRN_HDEC, hdec);
+
+ if (vc->tb_offset) {
+ u64 new_tb = mftb() + vc->tb_offset;
+ mtspr(SPRN_TBU40, new_tb);
+ tb = mftb();
+ if ((tb & 0xffffff) < (new_tb & 0xffffff))
+ mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ vc->tb_offset_applied = vc->tb_offset;
+ }
+
+ if (vc->pcr)
+ mtspr(SPRN_PCR, vc->pcr);
+ mtspr(SPRN_DPDES, vc->dpdes);
+ mtspr(SPRN_VTB, vc->vtb);
+
+ local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
+ local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
+ mtspr(SPRN_PURR, vcpu->arch.purr);
+ mtspr(SPRN_SPURR, vcpu->arch.spurr);
+
+ if (cpu_has_feature(CPU_FTR_DAWR)) {
+ mtspr(SPRN_DAWR, vcpu->arch.dawr);
+ mtspr(SPRN_DAWRX, vcpu->arch.dawrx);
+ }
+ mtspr(SPRN_CIABR, vcpu->arch.ciabr);
+ mtspr(SPRN_IC, vcpu->arch.ic);
+ mtspr(SPRN_PID, vcpu->arch.pid);
+
+ mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+
+ mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
+
+ mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
+ mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
+ mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
+ mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
+
+ mtspr(SPRN_AMOR, ~0UL);
+
+ mtspr(SPRN_LPCR, lpcr);
+ isync();
+
+ kvmppc_xive_push_vcpu(vcpu);
+
+ mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
+ mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
+
+ trap = __kvmhv_vcpu_entry_p9(vcpu);
+
+ /* Advance host PURR/SPURR by the amount used by guest */
+ purr = mfspr(SPRN_PURR);
+ spurr = mfspr(SPRN_SPURR);
+ mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
+ purr - vcpu->arch.purr);
+ mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
+ spurr - vcpu->arch.spurr);
+ vcpu->arch.purr = purr;
+ vcpu->arch.spurr = spurr;
+
+ vcpu->arch.ic = mfspr(SPRN_IC);
+ vcpu->arch.pid = mfspr(SPRN_PID);
+ vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
+
+ vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
+ vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
+ vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
+ vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
+
+ mtspr(SPRN_PSSCR, host_psscr);
+ mtspr(SPRN_HFSCR, host_hfscr);
+ mtspr(SPRN_CIABR, host_ciabr);
+ mtspr(SPRN_DAWR, host_dawr);
+ mtspr(SPRN_DAWRX, host_dawrx);
+ mtspr(SPRN_PID, host_pidr);
+
+ /*
+ * Since this is radix, do a eieio; tlbsync; ptesync sequence in
+ * case we interrupted the guest between a tlbie and a ptesync.
+ */
+ asm volatile("eieio; tlbsync; ptesync");
+
+ mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */
+ isync();
+
+ vc->dpdes = mfspr(SPRN_DPDES);
+ vc->vtb = mfspr(SPRN_VTB);
+ mtspr(SPRN_DPDES, 0);
+ if (vc->pcr)
+ mtspr(SPRN_PCR, 0);
+
+ if (vc->tb_offset_applied) {
+ u64 new_tb = mftb() - vc->tb_offset_applied;
+ mtspr(SPRN_TBU40, new_tb);
+ tb = mftb();
+ if ((tb & 0xffffff) < (new_tb & 0xffffff))
+ mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ vc->tb_offset_applied = 0;
+ }
+
+ mtspr(SPRN_HDEC, 0x7fffffff);
+ mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
+
+ return trap;
+}
+
+/*
+ * Virtual-mode guest entry for POWER9 and later when the host and
+ * guest are both using the radix MMU. The LPIDR has already been set.
+ */
+int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
+ unsigned long lpcr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ unsigned long host_dscr = mfspr(SPRN_DSCR);
+ unsigned long host_tidr = mfspr(SPRN_TIDR);
+ unsigned long host_iamr = mfspr(SPRN_IAMR);
+ s64 dec;
+ u64 tb;
+ int trap, save_pmu;
+
+ dec = mfspr(SPRN_DEC);
+ tb = mftb();
+ if (dec < 512)
+ return BOOK3S_INTERRUPT_HV_DECREMENTER;
+ local_paca->kvm_hstate.dec_expires = dec + tb;
+ if (local_paca->kvm_hstate.dec_expires < time_limit)
+ time_limit = local_paca->kvm_hstate.dec_expires;
+
+ vcpu->arch.ceded = 0;
+
+ kvmhv_save_host_pmu(); /* saves it to PACA kvm_hstate */
+
+ kvmppc_subcore_enter_guest();
+
+ vc->entry_exit_map = 1;
+ vc->in_guest = 1;
+
+ if (vcpu->arch.vpa.pinned_addr) {
+ struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
+ u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
+ lp->yield_count = cpu_to_be32(yield_count);
+ vcpu->arch.vpa.dirty = 1;
+ }
+
+ if (cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
+
+ kvmhv_load_guest_pmu(vcpu);
+
+ msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
+ load_fp_state(&vcpu->arch.fp);
+#ifdef CONFIG_ALTIVEC
+ load_vr_state(&vcpu->arch.vr);
+#endif
+
+ mtspr(SPRN_DSCR, vcpu->arch.dscr);
+ mtspr(SPRN_IAMR, vcpu->arch.iamr);
+ mtspr(SPRN_PSPB, vcpu->arch.pspb);
+ mtspr(SPRN_FSCR, vcpu->arch.fscr);
+ mtspr(SPRN_TAR, vcpu->arch.tar);
+ mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
+ mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
+ mtspr(SPRN_BESCR, vcpu->arch.bescr);
+ mtspr(SPRN_WORT, vcpu->arch.wort);
+ mtspr(SPRN_TIDR, vcpu->arch.tid);
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
+ mtspr(SPRN_AMR, vcpu->arch.amr);
+ mtspr(SPRN_UAMOR, vcpu->arch.uamor);
+
+ if (!(vcpu->arch.ctrl & 1))
+ mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
+
+ mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
+
+ if (kvmhv_on_pseries()) {
+ /* call our hypervisor to load up HV regs and go */
+ struct hv_guest_state hvregs;
+
+ kvmhv_save_hv_regs(vcpu, &hvregs);
+ hvregs.lpcr = lpcr;
+ vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
+ hvregs.version = HV_GUEST_STATE_VERSION;
+ if (vcpu->arch.nested) {
+ hvregs.lpid = vcpu->arch.nested->shadow_lpid;
+ hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
+ } else {
+ hvregs.lpid = vcpu->kvm->arch.lpid;
+ hvregs.vcpu_token = vcpu->vcpu_id;
+ }
+ hvregs.hdec_expiry = time_limit;
+ trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
+ __pa(&vcpu->arch.regs));
+ kvmhv_restore_hv_return_state(vcpu, &hvregs);
+ vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
+ vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
+ vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
+
+ /* H_CEDE has to be handled now, not later */
+ if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
+ kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
+ kvmppc_nested_cede(vcpu);
+ trap = 0;
+ }
+ } else {
+ trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
+ }
+
+ vcpu->arch.slb_max = 0;
+ dec = mfspr(SPRN_DEC);
+ tb = mftb();
+ vcpu->arch.dec_expires = dec + tb;
+ vcpu->cpu = -1;
+ vcpu->arch.thread_cpu = -1;
+ vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
+
+ vcpu->arch.iamr = mfspr(SPRN_IAMR);
+ vcpu->arch.pspb = mfspr(SPRN_PSPB);
+ vcpu->arch.fscr = mfspr(SPRN_FSCR);
+ vcpu->arch.tar = mfspr(SPRN_TAR);
+ vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
+ vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
+ vcpu->arch.bescr = mfspr(SPRN_BESCR);
+ vcpu->arch.wort = mfspr(SPRN_WORT);
+ vcpu->arch.tid = mfspr(SPRN_TIDR);
+ vcpu->arch.amr = mfspr(SPRN_AMR);
+ vcpu->arch.uamor = mfspr(SPRN_UAMOR);
+ vcpu->arch.dscr = mfspr(SPRN_DSCR);
+
+ mtspr(SPRN_PSPB, 0);
+ mtspr(SPRN_WORT, 0);
+ mtspr(SPRN_AMR, 0);
+ mtspr(SPRN_UAMOR, 0);
+ mtspr(SPRN_DSCR, host_dscr);
+ mtspr(SPRN_TIDR, host_tidr);
+ mtspr(SPRN_IAMR, host_iamr);
+ mtspr(SPRN_PSPB, 0);
+
+ msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
+ store_fp_state(&vcpu->arch.fp);
+#ifdef CONFIG_ALTIVEC
+ store_vr_state(&vcpu->arch.vr);
+#endif
+
+ if (cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
+
+ save_pmu = 1;
+ if (vcpu->arch.vpa.pinned_addr) {
+ struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
+ u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
+ lp->yield_count = cpu_to_be32(yield_count);
+ vcpu->arch.vpa.dirty = 1;
+ save_pmu = lp->pmcregs_in_use;
+ }
+
+ kvmhv_save_guest_pmu(vcpu, save_pmu);
+
+ vc->entry_exit_map = 0x101;
+ vc->in_guest = 0;
+
+ mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
+
+ kvmhv_load_host_pmu();
+
+ kvmppc_subcore_exit_guest();
+
+ return trap;
+}
+
+/*
* Wait for some other vcpu thread to execute us, and
* wake us up when we need to handle something in the host.
*/
@@ -3256,6 +3780,11 @@ out:
trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
}
+/*
+ * This never fails for a radix guest, as none of the operations it does
+ * for a radix guest can fail or have a way to report failure.
+ * kvmhv_run_single_vcpu() relies on this fact.
+ */
static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
{
int r = 0;
@@ -3405,6 +3934,171 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
return vcpu->arch.ret;
}
+int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
+ struct kvm_vcpu *vcpu, u64 time_limit,
+ unsigned long lpcr)
+{
+ int trap, r, pcpu;
+ int srcu_idx;
+ struct kvmppc_vcore *vc;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+
+ trace_kvmppc_run_vcpu_enter(vcpu);
+
+ kvm_run->exit_reason = 0;
+ vcpu->arch.ret = RESUME_GUEST;
+ vcpu->arch.trap = 0;
+
+ vc = vcpu->arch.vcore;
+ vcpu->arch.ceded = 0;
+ vcpu->arch.run_task = current;
+ vcpu->arch.kvm_run = kvm_run;
+ vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
+ vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
+ vcpu->arch.busy_preempt = TB_NIL;
+ vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
+ vc->runnable_threads[0] = vcpu;
+ vc->n_runnable = 1;
+ vc->runner = vcpu;
+
+ /* See if the MMU is ready to go */
+ if (!kvm->arch.mmu_ready)
+ kvmhv_setup_mmu(vcpu);
+
+ if (need_resched())
+ cond_resched();
+
+ kvmppc_update_vpas(vcpu);
+
+ init_vcore_to_run(vc);
+ vc->preempt_tb = TB_NIL;
+
+ preempt_disable();
+ pcpu = smp_processor_id();
+ vc->pcpu = pcpu;
+ kvmppc_prepare_radix_vcpu(vcpu, pcpu);
+
+ local_irq_disable();
+ hard_irq_disable();
+ if (signal_pending(current))
+ goto sigpend;
+ if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready)
+ goto out;
+
+ if (!nested) {
+ kvmppc_core_prepare_to_enter(vcpu);
+ if (vcpu->arch.doorbell_request) {
+ vc->dpdes = 1;
+ smp_wmb();
+ vcpu->arch.doorbell_request = 0;
+ }
+ if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
+ &vcpu->arch.pending_exceptions))
+ lpcr |= LPCR_MER;
+ } else if (vcpu->arch.pending_exceptions ||
+ vcpu->arch.doorbell_request ||
+ xive_interrupt_pending(vcpu)) {
+ vcpu->arch.ret = RESUME_HOST;
+ goto out;
+ }
+
+ kvmppc_clear_host_core(pcpu);
+
+ local_paca->kvm_hstate.tid = 0;
+ local_paca->kvm_hstate.napping = 0;
+ local_paca->kvm_hstate.kvm_split_mode = NULL;
+ kvmppc_start_thread(vcpu, vc);
+ kvmppc_create_dtl_entry(vcpu, vc);
+ trace_kvm_guest_enter(vcpu);
+
+ vc->vcore_state = VCORE_RUNNING;
+ trace_kvmppc_run_core(vc, 0);
+
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ kvmppc_radix_check_need_tlb_flush(kvm, pcpu, nested);
+
+ trace_hardirqs_on();
+ guest_enter_irqoff();
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+
+ this_cpu_disable_ftrace();
+
+ trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
+ vcpu->arch.trap = trap;
+
+ this_cpu_enable_ftrace();
+
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ mtspr(SPRN_LPID, kvm->arch.host_lpid);
+ isync();
+ }
+
+ trace_hardirqs_off();
+ set_irq_happened(trap);
+
+ kvmppc_set_host_core(pcpu);
+
+ local_irq_enable();
+ guest_exit();
+
+ cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
+
+ preempt_enable();
+
+ /* cancel pending decrementer exception if DEC is now positive */
+ if (get_tb() < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu))
+ kvmppc_core_dequeue_dec(vcpu);
+
+ trace_kvm_guest_exit(vcpu);
+ r = RESUME_GUEST;
+ if (trap) {
+ if (!nested)
+ r = kvmppc_handle_exit_hv(kvm_run, vcpu, current);
+ else
+ r = kvmppc_handle_nested_exit(vcpu);
+ }
+ vcpu->arch.ret = r;
+
+ if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded &&
+ !kvmppc_vcpu_woken(vcpu)) {
+ kvmppc_set_timer(vcpu);
+ while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
+ if (signal_pending(current)) {
+ vcpu->stat.signal_exits++;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ vcpu->arch.ret = -EINTR;
+ break;
+ }
+ spin_lock(&vc->lock);
+ kvmppc_vcore_blocked(vc);
+ spin_unlock(&vc->lock);
+ }
+ }
+ vcpu->arch.ceded = 0;
+
+ vc->vcore_state = VCORE_INACTIVE;
+ trace_kvmppc_run_core(vc, 1);
+
+ done:
+ kvmppc_remove_runnable(vc, vcpu);
+ trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
+
+ return vcpu->arch.ret;
+
+ sigpend:
+ vcpu->stat.signal_exits++;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ vcpu->arch.ret = -EINTR;
+ out:
+ local_irq_enable();
+ preempt_enable();
+ goto done;
+}
+
static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int r;
@@ -3480,7 +4174,20 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
do {
- r = kvmppc_run_vcpu(run, vcpu);
+ /*
+ * The early POWER9 chips that can't mix radix and HPT threads
+ * on the same core also need the workaround for the problem
+ * where the TLB would prefetch entries in the guest exit path
+ * for radix guests using the guest PIDR value and LPID 0.
+ * The workaround is in the old path (kvmppc_run_vcpu())
+ * but not the new path (kvmhv_run_single_vcpu()).
+ */
+ if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
+ !no_mixing_hpt_and_radix)
+ r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0,
+ vcpu->arch.vcore->lpcr);
+ else
+ r = kvmppc_run_vcpu(run, vcpu);
if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
!(vcpu->arch.shregs.msr & MSR_PR)) {
@@ -3559,6 +4266,10 @@ static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01);
kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L);
+ /* If running as a nested hypervisor, we don't support HPT guests */
+ if (kvmhv_on_pseries())
+ info->flags |= KVM_PPC_NO_HASH;
+
return 0;
}
@@ -3723,8 +4434,7 @@ void kvmppc_setup_partition_table(struct kvm *kvm)
__pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE;
dw1 = PATB_GR | kvm->arch.process_table;
}
-
- mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1);
+ kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1);
}
/*
@@ -3820,6 +4530,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
{
+ if (nesting_enabled(kvm))
+ kvmhv_release_all_nested(kvm);
kvmppc_free_radix(kvm);
kvmppc_update_lpcr(kvm, LPCR_VPM1,
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
@@ -3841,6 +4553,7 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
kvmppc_free_hpt(&kvm->arch.hpt);
kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
+ kvmppc_rmap_reset(kvm);
kvm->arch.radix = 1;
return 0;
}
@@ -3940,6 +4653,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvmppc_alloc_host_rm_ops();
+ kvmhv_vm_nested_init(kvm);
+
/*
* Since we don't flush the TLB when tearing down a VM,
* and this lpid might have previously been used,
@@ -3958,9 +4673,13 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
/* Init LPCR for virtual RMA mode */
- kvm->arch.host_lpid = mfspr(SPRN_LPID);
- kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
- lpcr &= LPCR_PECE | LPCR_LPES;
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ kvm->arch.host_lpid = mfspr(SPRN_LPID);
+ kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
+ lpcr &= LPCR_PECE | LPCR_LPES;
+ } else {
+ lpcr = 0;
+ }
lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
LPCR_VPM0 | LPCR_VPM1;
kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
@@ -4027,8 +4746,14 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
* On POWER9, we only need to do this if the "indep_threads_mode"
* module parameter has been set to N.
*/
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- kvm->arch.threads_indep = indep_threads_mode;
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (!indep_threads_mode && !cpu_has_feature(CPU_FTR_HVMODE)) {
+ pr_warn("KVM: Ignoring indep_threads_mode=N in nested hypervisor\n");
+ kvm->arch.threads_indep = true;
+ } else {
+ kvm->arch.threads_indep = indep_threads_mode;
+ }
+ }
if (!kvm->arch.threads_indep)
kvm_hv_vm_activated();
@@ -4051,6 +4776,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
snprintf(buf, sizeof(buf), "vm%d", current->pid);
kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
kvmppc_mmu_debugfs_init(kvm);
+ if (radix_enabled())
+ kvmhv_radix_debugfs_init(kvm);
return 0;
}
@@ -4073,13 +4800,21 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvmppc_free_vcores(kvm);
- kvmppc_free_lpid(kvm->arch.lpid);
if (kvm_is_radix(kvm))
kvmppc_free_radix(kvm);
else
kvmppc_free_hpt(&kvm->arch.hpt);
+ /* Perform global invalidation and return lpid to the pool */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (nesting_enabled(kvm))
+ kvmhv_release_all_nested(kvm);
+ kvm->arch.process_table = 0;
+ kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
+ }
+ kvmppc_free_lpid(kvm->arch.lpid);
+
kvmppc_free_pimap(kvm);
}
@@ -4104,11 +4839,15 @@ static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
static int kvmppc_core_check_processor_compat_hv(void)
{
- if (!cpu_has_feature(CPU_FTR_HVMODE) ||
- !cpu_has_feature(CPU_FTR_ARCH_206))
- return -EIO;
+ if (cpu_has_feature(CPU_FTR_HVMODE) &&
+ cpu_has_feature(CPU_FTR_ARCH_206))
+ return 0;
- return 0;
+ /* POWER9 in radix mode is capable of being a nested hypervisor. */
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
+ return 0;
+
+ return -EIO;
}
#ifdef CONFIG_KVM_XICS
@@ -4426,6 +5165,10 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
if (radix && !radix_enabled())
return -EINVAL;
+ /* If we're a nested hypervisor, we currently only support radix */
+ if (kvmhv_on_pseries() && !radix)
+ return -EINVAL;
+
mutex_lock(&kvm->lock);
if (radix != kvm_is_radix(kvm)) {
if (kvm->arch.mmu_ready) {
@@ -4458,6 +5201,19 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
return err;
}
+static int kvmhv_enable_nested(struct kvm *kvm)
+{
+ if (!nested)
+ return -EPERM;
+ if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
+ return -ENODEV;
+
+ /* kvm == NULL means the caller is testing if the capability exists */
+ if (kvm)
+ kvm->arch.nested_enable = true;
+ return 0;
+}
+
static struct kvmppc_ops kvm_ops_hv = {
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
@@ -4497,6 +5253,7 @@ static struct kvmppc_ops kvm_ops_hv = {
.configure_mmu = kvmhv_configure_mmu,
.get_rmmu_info = kvmhv_get_rmmu_info,
.set_smt_mode = kvmhv_set_smt_mode,
+ .enable_nested = kvmhv_enable_nested,
};
static int kvm_init_subcore_bitmap(void)
@@ -4547,6 +5304,10 @@ static int kvmppc_book3s_init_hv(void)
if (r < 0)
return -ENODEV;
+ r = kvmhv_nested_init();
+ if (r)
+ return r;
+
r = kvm_init_subcore_bitmap();
if (r)
return r;
@@ -4557,7 +5318,8 @@ static int kvmppc_book3s_init_hv(void)
* indirectly, via OPAL.
*/
#ifdef CONFIG_SMP
- if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) {
+ if (!xive_enabled() && !kvmhv_on_pseries() &&
+ !local_paca->kvm_hstate.xics_phys) {
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
@@ -4605,6 +5367,7 @@ static void kvmppc_book3s_exit_hv(void)
if (kvmppc_radix_possible())
kvmppc_radix_exit();
kvmppc_hv_ops = NULL;
+ kvmhv_nested_exit();
}
module_init(kvmppc_book3s_init_hv);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index fc6bb9630a9c..a71e2fc00a4e 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -231,6 +231,15 @@ void kvmhv_rm_send_ipi(int cpu)
void __iomem *xics_phys;
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+ /* For a nested hypervisor, use the XICS via hcall */
+ if (kvmhv_on_pseries()) {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
+ IPI_PRIORITY);
+ return;
+ }
+
/* On POWER9 we can use msgsnd for any destination cpu. */
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
msg |= get_hard_smp_processor_id(cpu);
@@ -460,12 +469,19 @@ static long kvmppc_read_one_intr(bool *again)
return 1;
/* Now read the interrupt from the ICP */
- xics_phys = local_paca->kvm_hstate.xics_phys;
- rc = 0;
- if (!xics_phys)
- rc = opal_int_get_xirr(&xirr, false);
- else
- xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
+ if (kvmhv_on_pseries()) {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
+ xirr = cpu_to_be32(retbuf[0]);
+ } else {
+ xics_phys = local_paca->kvm_hstate.xics_phys;
+ rc = 0;
+ if (!xics_phys)
+ rc = opal_int_get_xirr(&xirr, false);
+ else
+ xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
+ }
if (rc < 0)
return 1;
@@ -494,7 +510,13 @@ static long kvmppc_read_one_intr(bool *again)
*/
if (xisr == XICS_IPI) {
rc = 0;
- if (xics_phys) {
+ if (kvmhv_on_pseries()) {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ plpar_hcall_raw(H_IPI, retbuf,
+ hard_smp_processor_id(), 0xff);
+ plpar_hcall_raw(H_EOI, retbuf, h_xirr);
+ } else if (xics_phys) {
__raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
} else {
@@ -520,7 +542,13 @@ static long kvmppc_read_one_intr(bool *again)
/* We raced with the host,
* we need to resend that IPI, bummer
*/
- if (xics_phys)
+ if (kvmhv_on_pseries()) {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ plpar_hcall_raw(H_IPI, retbuf,
+ hard_smp_processor_id(),
+ IPI_PRIORITY);
+ } else if (xics_phys)
__raw_rm_writeb(IPI_PRIORITY,
xics_phys + XICS_MFRR);
else
@@ -729,3 +757,51 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
smp_mb();
local_paca->kvm_hstate.kvm_split_mode = NULL;
}
+
+/*
+ * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
+ * Can we inject a Decrementer or a External interrupt?
+ */
+void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
+{
+ int ext;
+ unsigned long vec = 0;
+ unsigned long lpcr;
+
+ /* Insert EXTERNAL bit into LPCR at the MER bit position */
+ ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= ext << LPCR_MER_SH;
+ mtspr(SPRN_LPCR, lpcr);
+ isync();
+
+ if (vcpu->arch.shregs.msr & MSR_EE) {
+ if (ext) {
+ vec = BOOK3S_INTERRUPT_EXTERNAL;
+ } else {
+ long int dec = mfspr(SPRN_DEC);
+ if (!(lpcr & LPCR_LD))
+ dec = (int) dec;
+ if (dec < 0)
+ vec = BOOK3S_INTERRUPT_DECREMENTER;
+ }
+ }
+ if (vec) {
+ unsigned long msr, old_msr = vcpu->arch.shregs.msr;
+
+ kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
+ kvmppc_set_srr1(vcpu, old_msr);
+ kvmppc_set_pc(vcpu, vec);
+ msr = vcpu->arch.intr_msr;
+ if (MSR_TM_ACTIVE(old_msr))
+ msr |= MSR_TS_S;
+ vcpu->arch.shregs.msr = msr;
+ }
+
+ if (vcpu->arch.doorbell_request) {
+ mtspr(SPRN_DPDES, 1);
+ vcpu->arch.vcore->dpdes = 1;
+ smp_wmb();
+ vcpu->arch.doorbell_request = 0;
+ }
+}
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 666b91c79eb4..a6d10010d9e8 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -64,52 +64,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Save host PMU registers */
-BEGIN_FTR_SECTION
- /* Work around P8 PMAE bug */
- li r3, -1
- clrrdi r3, r3, 10
- mfspr r8, SPRN_MMCR2
- mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */
- isync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- li r3, 1
- sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
- mfspr r7, SPRN_MMCR0 /* save MMCR0 */
- mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
- mfspr r6, SPRN_MMCRA
- /* Clear MMCRA in order to disable SDAR updates */
- li r5, 0
- mtspr SPRN_MMCRA, r5
- isync
- lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */
- cmpwi r5, 0
- beq 31f /* skip if not */
- mfspr r5, SPRN_MMCR1
- mfspr r9, SPRN_SIAR
- mfspr r10, SPRN_SDAR
- std r7, HSTATE_MMCR0(r13)
- std r5, HSTATE_MMCR1(r13)
- std r6, HSTATE_MMCRA(r13)
- std r9, HSTATE_SIAR(r13)
- std r10, HSTATE_SDAR(r13)
-BEGIN_FTR_SECTION
- mfspr r9, SPRN_SIER
- std r8, HSTATE_MMCR2(r13)
- std r9, HSTATE_SIER(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- mfspr r3, SPRN_PMC1
- mfspr r5, SPRN_PMC2
- mfspr r6, SPRN_PMC3
- mfspr r7, SPRN_PMC4
- mfspr r8, SPRN_PMC5
- mfspr r9, SPRN_PMC6
- stw r3, HSTATE_PMC1(r13)
- stw r5, HSTATE_PMC2(r13)
- stw r6, HSTATE_PMC3(r13)
- stw r7, HSTATE_PMC4(r13)
- stw r8, HSTATE_PMC5(r13)
- stw r9, HSTATE_PMC6(r13)
-31:
+ bl kvmhv_save_host_pmu
/*
* Put whatever is in the decrementer into the
@@ -161,3 +116,51 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
blr
+
+_GLOBAL(kvmhv_save_host_pmu)
+BEGIN_FTR_SECTION
+ /* Work around P8 PMAE bug */
+ li r3, -1
+ clrrdi r3, r3, 10
+ mfspr r8, SPRN_MMCR2
+ mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */
+ isync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mfspr r7, SPRN_MMCR0 /* save MMCR0 */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
+ mfspr r6, SPRN_MMCRA
+ /* Clear MMCRA in order to disable SDAR updates */
+ li r5, 0
+ mtspr SPRN_MMCRA, r5
+ isync
+ lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */
+ cmpwi r5, 0
+ beq 31f /* skip if not */
+ mfspr r5, SPRN_MMCR1
+ mfspr r9, SPRN_SIAR
+ mfspr r10, SPRN_SDAR
+ std r7, HSTATE_MMCR0(r13)
+ std r5, HSTATE_MMCR1(r13)
+ std r6, HSTATE_MMCRA(r13)
+ std r9, HSTATE_SIAR(r13)
+ std r10, HSTATE_SDAR(r13)
+BEGIN_FTR_SECTION
+ mfspr r9, SPRN_SIER
+ std r8, HSTATE_MMCR2(r13)
+ std r9, HSTATE_SIER(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ mfspr r3, SPRN_PMC1
+ mfspr r5, SPRN_PMC2
+ mfspr r6, SPRN_PMC3
+ mfspr r7, SPRN_PMC4
+ mfspr r8, SPRN_PMC5
+ mfspr r9, SPRN_PMC6
+ stw r3, HSTATE_PMC1(r13)
+ stw r5, HSTATE_PMC2(r13)
+ stw r6, HSTATE_PMC3(r13)
+ stw r7, HSTATE_PMC4(r13)
+ stw r8, HSTATE_PMC5(r13)
+ stw r9, HSTATE_PMC6(r13)
+31: blr
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
new file mode 100644
index 000000000000..401d2ecbebc5
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -0,0 +1,1291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corporation, 2018
+ * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com>
+ * Paul Mackerras <paulus@ozlabs.org>
+ *
+ * Description: KVM functions specific to running nested KVM-HV guests
+ * on Book3S processors (specifically POWER9 and later).
+ */
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/llist.h>
+
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/pte-walk.h>
+#include <asm/reg.h>
+
+static struct patb_entry *pseries_partition_tb;
+
+static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
+static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free);
+
+void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ hr->pcr = vc->pcr;
+ hr->dpdes = vc->dpdes;
+ hr->hfscr = vcpu->arch.hfscr;
+ hr->tb_offset = vc->tb_offset;
+ hr->dawr0 = vcpu->arch.dawr;
+ hr->dawrx0 = vcpu->arch.dawrx;
+ hr->ciabr = vcpu->arch.ciabr;
+ hr->purr = vcpu->arch.purr;
+ hr->spurr = vcpu->arch.spurr;
+ hr->ic = vcpu->arch.ic;
+ hr->vtb = vc->vtb;
+ hr->srr0 = vcpu->arch.shregs.srr0;
+ hr->srr1 = vcpu->arch.shregs.srr1;
+ hr->sprg[0] = vcpu->arch.shregs.sprg0;
+ hr->sprg[1] = vcpu->arch.shregs.sprg1;
+ hr->sprg[2] = vcpu->arch.shregs.sprg2;
+ hr->sprg[3] = vcpu->arch.shregs.sprg3;
+ hr->pidr = vcpu->arch.pid;
+ hr->cfar = vcpu->arch.cfar;
+ hr->ppr = vcpu->arch.ppr;
+}
+
+static void byteswap_pt_regs(struct pt_regs *regs)
+{
+ unsigned long *addr = (unsigned long *) regs;
+
+ for (; addr < ((unsigned long *) (regs + 1)); addr++)
+ *addr = swab64(*addr);
+}
+
+static void byteswap_hv_regs(struct hv_guest_state *hr)
+{
+ hr->version = swab64(hr->version);
+ hr->lpid = swab32(hr->lpid);
+ hr->vcpu_token = swab32(hr->vcpu_token);
+ hr->lpcr = swab64(hr->lpcr);
+ hr->pcr = swab64(hr->pcr);
+ hr->amor = swab64(hr->amor);
+ hr->dpdes = swab64(hr->dpdes);
+ hr->hfscr = swab64(hr->hfscr);
+ hr->tb_offset = swab64(hr->tb_offset);
+ hr->dawr0 = swab64(hr->dawr0);
+ hr->dawrx0 = swab64(hr->dawrx0);
+ hr->ciabr = swab64(hr->ciabr);
+ hr->hdec_expiry = swab64(hr->hdec_expiry);
+ hr->purr = swab64(hr->purr);
+ hr->spurr = swab64(hr->spurr);
+ hr->ic = swab64(hr->ic);
+ hr->vtb = swab64(hr->vtb);
+ hr->hdar = swab64(hr->hdar);
+ hr->hdsisr = swab64(hr->hdsisr);
+ hr->heir = swab64(hr->heir);
+ hr->asdr = swab64(hr->asdr);
+ hr->srr0 = swab64(hr->srr0);
+ hr->srr1 = swab64(hr->srr1);
+ hr->sprg[0] = swab64(hr->sprg[0]);
+ hr->sprg[1] = swab64(hr->sprg[1]);
+ hr->sprg[2] = swab64(hr->sprg[2]);
+ hr->sprg[3] = swab64(hr->sprg[3]);
+ hr->pidr = swab64(hr->pidr);
+ hr->cfar = swab64(hr->cfar);
+ hr->ppr = swab64(hr->ppr);
+}
+
+static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
+ struct hv_guest_state *hr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ hr->dpdes = vc->dpdes;
+ hr->hfscr = vcpu->arch.hfscr;
+ hr->purr = vcpu->arch.purr;
+ hr->spurr = vcpu->arch.spurr;
+ hr->ic = vcpu->arch.ic;
+ hr->vtb = vc->vtb;
+ hr->srr0 = vcpu->arch.shregs.srr0;
+ hr->srr1 = vcpu->arch.shregs.srr1;
+ hr->sprg[0] = vcpu->arch.shregs.sprg0;
+ hr->sprg[1] = vcpu->arch.shregs.sprg1;
+ hr->sprg[2] = vcpu->arch.shregs.sprg2;
+ hr->sprg[3] = vcpu->arch.shregs.sprg3;
+ hr->pidr = vcpu->arch.pid;
+ hr->cfar = vcpu->arch.cfar;
+ hr->ppr = vcpu->arch.ppr;
+ switch (trap) {
+ case BOOK3S_INTERRUPT_H_DATA_STORAGE:
+ hr->hdar = vcpu->arch.fault_dar;
+ hr->hdsisr = vcpu->arch.fault_dsisr;
+ hr->asdr = vcpu->arch.fault_gpa;
+ break;
+ case BOOK3S_INTERRUPT_H_INST_STORAGE:
+ hr->asdr = vcpu->arch.fault_gpa;
+ break;
+ case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
+ hr->heir = vcpu->arch.emul_inst;
+ break;
+ }
+}
+
+static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
+{
+ /*
+ * Don't let L1 enable features for L2 which we've disabled for L1,
+ * but preserve the interrupt cause field.
+ */
+ hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
+
+ /* Don't let data address watchpoint match in hypervisor state */
+ hr->dawrx0 &= ~DAWRX_HYP;
+
+ /* Don't let completed instruction address breakpt match in HV state */
+ if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
+ hr->ciabr &= ~CIABR_PRIV;
+}
+
+static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ vc->pcr = hr->pcr;
+ vc->dpdes = hr->dpdes;
+ vcpu->arch.hfscr = hr->hfscr;
+ vcpu->arch.dawr = hr->dawr0;
+ vcpu->arch.dawrx = hr->dawrx0;
+ vcpu->arch.ciabr = hr->ciabr;
+ vcpu->arch.purr = hr->purr;
+ vcpu->arch.spurr = hr->spurr;
+ vcpu->arch.ic = hr->ic;
+ vc->vtb = hr->vtb;
+ vcpu->arch.shregs.srr0 = hr->srr0;
+ vcpu->arch.shregs.srr1 = hr->srr1;
+ vcpu->arch.shregs.sprg0 = hr->sprg[0];
+ vcpu->arch.shregs.sprg1 = hr->sprg[1];
+ vcpu->arch.shregs.sprg2 = hr->sprg[2];
+ vcpu->arch.shregs.sprg3 = hr->sprg[3];
+ vcpu->arch.pid = hr->pidr;
+ vcpu->arch.cfar = hr->cfar;
+ vcpu->arch.ppr = hr->ppr;
+}
+
+void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
+ struct hv_guest_state *hr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ vc->dpdes = hr->dpdes;
+ vcpu->arch.hfscr = hr->hfscr;
+ vcpu->arch.purr = hr->purr;
+ vcpu->arch.spurr = hr->spurr;
+ vcpu->arch.ic = hr->ic;
+ vc->vtb = hr->vtb;
+ vcpu->arch.fault_dar = hr->hdar;
+ vcpu->arch.fault_dsisr = hr->hdsisr;
+ vcpu->arch.fault_gpa = hr->asdr;
+ vcpu->arch.emul_inst = hr->heir;
+ vcpu->arch.shregs.srr0 = hr->srr0;
+ vcpu->arch.shregs.srr1 = hr->srr1;
+ vcpu->arch.shregs.sprg0 = hr->sprg[0];
+ vcpu->arch.shregs.sprg1 = hr->sprg[1];
+ vcpu->arch.shregs.sprg2 = hr->sprg[2];
+ vcpu->arch.shregs.sprg3 = hr->sprg[3];
+ vcpu->arch.pid = hr->pidr;
+ vcpu->arch.cfar = hr->cfar;
+ vcpu->arch.ppr = hr->ppr;
+}
+
+long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
+{
+ long int err, r;
+ struct kvm_nested_guest *l2;
+ struct pt_regs l2_regs, saved_l1_regs;
+ struct hv_guest_state l2_hv, saved_l1_hv;
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ u64 hv_ptr, regs_ptr;
+ u64 hdec_exp;
+ s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
+ u64 mask;
+ unsigned long lpcr;
+
+ if (vcpu->kvm->arch.l1_ptcr == 0)
+ return H_NOT_AVAILABLE;
+
+ /* copy parameters in */
+ hv_ptr = kvmppc_get_gpr(vcpu, 4);
+ err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
+ sizeof(struct hv_guest_state));
+ if (err)
+ return H_PARAMETER;
+ if (kvmppc_need_byteswap(vcpu))
+ byteswap_hv_regs(&l2_hv);
+ if (l2_hv.version != HV_GUEST_STATE_VERSION)
+ return H_P2;
+
+ regs_ptr = kvmppc_get_gpr(vcpu, 5);
+ err = kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
+ sizeof(struct pt_regs));
+ if (err)
+ return H_PARAMETER;
+ if (kvmppc_need_byteswap(vcpu))
+ byteswap_pt_regs(&l2_regs);
+ if (l2_hv.vcpu_token >= NR_CPUS)
+ return H_PARAMETER;
+
+ /* translate lpid */
+ l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
+ if (!l2)
+ return H_PARAMETER;
+ if (!l2->l1_gr_to_hr) {
+ mutex_lock(&l2->tlb_lock);
+ kvmhv_update_ptbl_cache(l2);
+ mutex_unlock(&l2->tlb_lock);
+ }
+
+ /* save l1 values of things */
+ vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
+ saved_l1_regs = vcpu->arch.regs;
+ kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
+
+ /* convert TB values/offsets to host (L0) values */
+ hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
+ vc->tb_offset += l2_hv.tb_offset;
+
+ /* set L1 state to L2 state */
+ vcpu->arch.nested = l2;
+ vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
+ vcpu->arch.regs = l2_regs;
+ vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
+ LPCR_LPES | LPCR_MER;
+ lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask);
+ sanitise_hv_regs(vcpu, &l2_hv);
+ restore_hv_regs(vcpu, &l2_hv);
+
+ vcpu->arch.ret = RESUME_GUEST;
+ vcpu->arch.trap = 0;
+ do {
+ if (mftb() >= hdec_exp) {
+ vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
+ r = RESUME_HOST;
+ break;
+ }
+ r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp,
+ lpcr);
+ } while (is_kvmppc_resume_guest(r));
+
+ /* save L2 state for return */
+ l2_regs = vcpu->arch.regs;
+ l2_regs.msr = vcpu->arch.shregs.msr;
+ delta_purr = vcpu->arch.purr - l2_hv.purr;
+ delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
+ delta_ic = vcpu->arch.ic - l2_hv.ic;
+ delta_vtb = vc->vtb - l2_hv.vtb;
+ save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
+
+ /* restore L1 state */
+ vcpu->arch.nested = NULL;
+ vcpu->arch.regs = saved_l1_regs;
+ vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
+ /* set L1 MSR TS field according to L2 transaction state */
+ if (l2_regs.msr & MSR_TS_MASK)
+ vcpu->arch.shregs.msr |= MSR_TS_S;
+ vc->tb_offset = saved_l1_hv.tb_offset;
+ restore_hv_regs(vcpu, &saved_l1_hv);
+ vcpu->arch.purr += delta_purr;
+ vcpu->arch.spurr += delta_spurr;
+ vcpu->arch.ic += delta_ic;
+ vc->vtb += delta_vtb;
+
+ kvmhv_put_nested(l2);
+
+ /* copy l2_hv_state and regs back to guest */
+ if (kvmppc_need_byteswap(vcpu)) {
+ byteswap_hv_regs(&l2_hv);
+ byteswap_pt_regs(&l2_regs);
+ }
+ err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
+ sizeof(struct hv_guest_state));
+ if (err)
+ return H_AUTHORITY;
+ err = kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
+ sizeof(struct pt_regs));
+ if (err)
+ return H_AUTHORITY;
+
+ if (r == -EINTR)
+ return H_INTERRUPT;
+
+ return vcpu->arch.trap;
+}
+
+long kvmhv_nested_init(void)
+{
+ long int ptb_order;
+ unsigned long ptcr;
+ long rc;
+
+ if (!kvmhv_on_pseries())
+ return 0;
+ if (!radix_enabled())
+ return -ENODEV;
+
+ /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
+ ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1;
+ if (ptb_order < 8)
+ ptb_order = 8;
+ pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
+ GFP_KERNEL);
+ if (!pseries_partition_tb) {
+ pr_err("kvm-hv: failed to allocated nested partition table\n");
+ return -ENOMEM;
+ }
+
+ ptcr = __pa(pseries_partition_tb) | (ptb_order - 8);
+ rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
+ if (rc != H_SUCCESS) {
+ pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
+ rc);
+ kfree(pseries_partition_tb);
+ pseries_partition_tb = NULL;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void kvmhv_nested_exit(void)
+{
+ /*
+ * N.B. the kvmhv_on_pseries() test is there because it enables
+ * the compiler to remove the call to plpar_hcall_norets()
+ * when CONFIG_PPC_PSERIES=n.
+ */
+ if (kvmhv_on_pseries() && pseries_partition_tb) {
+ plpar_hcall_norets(H_SET_PARTITION_TABLE, 0);
+ kfree(pseries_partition_tb);
+ pseries_partition_tb = NULL;
+ }
+}
+
+static void kvmhv_flush_lpid(unsigned int lpid)
+{
+ long rc;
+
+ if (!kvmhv_on_pseries()) {
+ radix__flush_tlb_lpid(lpid);
+ return;
+ }
+
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ if (rc)
+ pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
+}
+
+void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
+{
+ if (!kvmhv_on_pseries()) {
+ mmu_partition_table_set_entry(lpid, dw0, dw1);
+ return;
+ }
+
+ pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
+ pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
+ /* L0 will do the necessary barriers */
+ kvmhv_flush_lpid(lpid);
+}
+
+static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
+{
+ unsigned long dw0;
+
+ dw0 = PATB_HR | radix__get_tree_size() |
+ __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
+ kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
+}
+
+void kvmhv_vm_nested_init(struct kvm *kvm)
+{
+ kvm->arch.max_nested_lpid = -1;
+}
+
+/*
+ * Handle the H_SET_PARTITION_TABLE hcall.
+ * r4 = guest real address of partition table + log_2(size) - 12
+ * (formatted as for the PTCR).
+ */
+long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
+ int srcu_idx;
+ long ret = H_SUCCESS;
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ /*
+ * Limit the partition table to 4096 entries (because that's what
+ * hardware supports), and check the base address.
+ */
+ if ((ptcr & PRTS_MASK) > 12 - 8 ||
+ !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
+ ret = H_PARAMETER;
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ if (ret == H_SUCCESS)
+ kvm->arch.l1_ptcr = ptcr;
+ return ret;
+}
+
+/*
+ * Reload the partition table entry for a guest.
+ * Caller must hold gp->tlb_lock.
+ */
+static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
+{
+ int ret;
+ struct patb_entry ptbl_entry;
+ unsigned long ptbl_addr;
+ struct kvm *kvm = gp->l1_host;
+
+ ret = -EFAULT;
+ ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
+ if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8)))
+ ret = kvm_read_guest(kvm, ptbl_addr,
+ &ptbl_entry, sizeof(ptbl_entry));
+ if (ret) {
+ gp->l1_gr_to_hr = 0;
+ gp->process_table = 0;
+ } else {
+ gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
+ gp->process_table = be64_to_cpu(ptbl_entry.patb1);
+ }
+ kvmhv_set_nested_ptbl(gp);
+}
+
+struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
+{
+ struct kvm_nested_guest *gp;
+ long shadow_lpid;
+
+ gp = kzalloc(sizeof(*gp), GFP_KERNEL);
+ if (!gp)
+ return NULL;
+ gp->l1_host = kvm;
+ gp->l1_lpid = lpid;
+ mutex_init(&gp->tlb_lock);
+ gp->shadow_pgtable = pgd_alloc(kvm->mm);
+ if (!gp->shadow_pgtable)
+ goto out_free;
+ shadow_lpid = kvmppc_alloc_lpid();
+ if (shadow_lpid < 0)
+ goto out_free2;
+ gp->shadow_lpid = shadow_lpid;
+
+ memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
+
+ return gp;
+
+ out_free2:
+ pgd_free(kvm->mm, gp->shadow_pgtable);
+ out_free:
+ kfree(gp);
+ return NULL;
+}
+
+/*
+ * Free up any resources allocated for a nested guest.
+ */
+static void kvmhv_release_nested(struct kvm_nested_guest *gp)
+{
+ struct kvm *kvm = gp->l1_host;
+
+ if (gp->shadow_pgtable) {
+ /*
+ * No vcpu is using this struct and no call to
+ * kvmhv_get_nested can find this struct,
+ * so we don't need to hold kvm->mmu_lock.
+ */
+ kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
+ gp->shadow_lpid);
+ pgd_free(kvm->mm, gp->shadow_pgtable);
+ }
+ kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
+ kvmppc_free_lpid(gp->shadow_lpid);
+ kfree(gp);
+}
+
+static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
+{
+ struct kvm *kvm = gp->l1_host;
+ int lpid = gp->l1_lpid;
+ long ref;
+
+ spin_lock(&kvm->mmu_lock);
+ if (gp == kvm->arch.nested_guests[lpid]) {
+ kvm->arch.nested_guests[lpid] = NULL;
+ if (lpid == kvm->arch.max_nested_lpid) {
+ while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
+ ;
+ kvm->arch.max_nested_lpid = lpid;
+ }
+ --gp->refcnt;
+ }
+ ref = gp->refcnt;
+ spin_unlock(&kvm->mmu_lock);
+ if (ref == 0)
+ kvmhv_release_nested(gp);
+}
+
+/*
+ * Free up all nested resources allocated for this guest.
+ * This is called with no vcpus of the guest running, when
+ * switching the guest to HPT mode or when destroying the
+ * guest.
+ */
+void kvmhv_release_all_nested(struct kvm *kvm)
+{
+ int i;
+ struct kvm_nested_guest *gp;
+ struct kvm_nested_guest *freelist = NULL;
+ struct kvm_memory_slot *memslot;
+ int srcu_idx;
+
+ spin_lock(&kvm->mmu_lock);
+ for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
+ gp = kvm->arch.nested_guests[i];
+ if (!gp)
+ continue;
+ kvm->arch.nested_guests[i] = NULL;
+ if (--gp->refcnt == 0) {
+ gp->next = freelist;
+ freelist = gp;
+ }
+ }
+ kvm->arch.max_nested_lpid = -1;
+ spin_unlock(&kvm->mmu_lock);
+ while ((gp = freelist) != NULL) {
+ freelist = gp->next;
+ kvmhv_release_nested(gp);
+ }
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ kvm_for_each_memslot(memslot, kvm_memslots(kvm))
+ kvmhv_free_memslot_nest_rmap(memslot);
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+}
+
+/* caller must hold gp->tlb_lock */
+static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
+{
+ struct kvm *kvm = gp->l1_host;
+
+ spin_lock(&kvm->mmu_lock);
+ kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
+ spin_unlock(&kvm->mmu_lock);
+ kvmhv_flush_lpid(gp->shadow_lpid);
+ kvmhv_update_ptbl_cache(gp);
+ if (gp->l1_gr_to_hr == 0)
+ kvmhv_remove_nested(gp);
+}
+
+struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
+ bool create)
+{
+ struct kvm_nested_guest *gp, *newgp;
+
+ if (l1_lpid >= KVM_MAX_NESTED_GUESTS ||
+ l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
+ return NULL;
+
+ spin_lock(&kvm->mmu_lock);
+ gp = kvm->arch.nested_guests[l1_lpid];
+ if (gp)
+ ++gp->refcnt;
+ spin_unlock(&kvm->mmu_lock);
+
+ if (gp || !create)
+ return gp;
+
+ newgp = kvmhv_alloc_nested(kvm, l1_lpid);
+ if (!newgp)
+ return NULL;
+ spin_lock(&kvm->mmu_lock);
+ if (kvm->arch.nested_guests[l1_lpid]) {
+ /* someone else beat us to it */
+ gp = kvm->arch.nested_guests[l1_lpid];
+ } else {
+ kvm->arch.nested_guests[l1_lpid] = newgp;
+ ++newgp->refcnt;
+ gp = newgp;
+ newgp = NULL;
+ if (l1_lpid > kvm->arch.max_nested_lpid)
+ kvm->arch.max_nested_lpid = l1_lpid;
+ }
+ ++gp->refcnt;
+ spin_unlock(&kvm->mmu_lock);
+
+ if (newgp)
+ kvmhv_release_nested(newgp);
+
+ return gp;
+}
+
+void kvmhv_put_nested(struct kvm_nested_guest *gp)
+{
+ struct kvm *kvm = gp->l1_host;
+ long ref;
+
+ spin_lock(&kvm->mmu_lock);
+ ref = --gp->refcnt;
+ spin_unlock(&kvm->mmu_lock);
+ if (ref == 0)
+ kvmhv_release_nested(gp);
+}
+
+static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
+{
+ if (lpid > kvm->arch.max_nested_lpid)
+ return NULL;
+ return kvm->arch.nested_guests[lpid];
+}
+
+static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
+{
+ return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
+ RMAP_NESTED_GPA_MASK));
+}
+
+void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
+ struct rmap_nested **n_rmap)
+{
+ struct llist_node *entry = ((struct llist_head *) rmapp)->first;
+ struct rmap_nested *cursor;
+ u64 rmap, new_rmap = (*n_rmap)->rmap;
+
+ /* Are there any existing entries? */
+ if (!(*rmapp)) {
+ /* No -> use the rmap as a single entry */
+ *rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY;
+ return;
+ }
+
+ /* Do any entries match what we're trying to insert? */
+ for_each_nest_rmap_safe(cursor, entry, &rmap) {
+ if (kvmhv_n_rmap_is_equal(rmap, new_rmap))
+ return;
+ }
+
+ /* Do we need to create a list or just add the new entry? */
+ rmap = *rmapp;
+ if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
+ *rmapp = 0UL;
+ llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp);
+ if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
+ (*n_rmap)->list.next = (struct llist_node *) rmap;
+
+ /* Set NULL so not freed by caller */
+ *n_rmap = NULL;
+}
+
+static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
+ unsigned long hpa, unsigned long mask)
+{
+ struct kvm_nested_guest *gp;
+ unsigned long gpa;
+ unsigned int shift, lpid;
+ pte_t *ptep;
+
+ gpa = n_rmap & RMAP_NESTED_GPA_MASK;
+ lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
+ gp = kvmhv_find_nested(kvm, lpid);
+ if (!gp)
+ return;
+
+ /* Find and invalidate the pte */
+ ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
+ /* Don't spuriously invalidate ptes if the pfn has changed */
+ if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
+ kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
+}
+
+static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
+ unsigned long hpa, unsigned long mask)
+{
+ struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
+ struct rmap_nested *cursor;
+ unsigned long rmap;
+
+ for_each_nest_rmap_safe(cursor, entry, &rmap) {
+ kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
+ kfree(cursor);
+ }
+}
+
+/* called with kvm->mmu_lock held */
+void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ unsigned long gpa, unsigned long hpa,
+ unsigned long nbytes)
+{
+ unsigned long gfn, end_gfn;
+ unsigned long addr_mask;
+
+ if (!memslot)
+ return;
+ gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
+ end_gfn = gfn + (nbytes >> PAGE_SHIFT);
+
+ addr_mask = PTE_RPN_MASK & ~(nbytes - 1);
+ hpa &= addr_mask;
+
+ for (; gfn < end_gfn; gfn++) {
+ unsigned long *rmap = &memslot->arch.rmap[gfn];
+ kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
+ }
+}
+
+static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
+{
+ unsigned long page;
+
+ for (page = 0; page < free->npages; page++) {
+ unsigned long rmap, *rmapp = &free->arch.rmap[page];
+ struct rmap_nested *cursor;
+ struct llist_node *entry;
+
+ entry = llist_del_all((struct llist_head *) rmapp);
+ for_each_nest_rmap_safe(cursor, entry, &rmap)
+ kfree(cursor);
+ }
+}
+
+static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
+ struct kvm_nested_guest *gp,
+ long gpa, int *shift_ret)
+{
+ struct kvm *kvm = vcpu->kvm;
+ bool ret = false;
+ pte_t *ptep;
+ int shift;
+
+ spin_lock(&kvm->mmu_lock);
+ ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
+ if (!shift)
+ shift = PAGE_SHIFT;
+ if (ptep && pte_present(*ptep)) {
+ kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
+ ret = true;
+ }
+ spin_unlock(&kvm->mmu_lock);
+
+ if (shift_ret)
+ *shift_ret = shift;
+ return ret;
+}
+
+static inline int get_ric(unsigned int instr)
+{
+ return (instr >> 18) & 0x3;
+}
+
+static inline int get_prs(unsigned int instr)
+{
+ return (instr >> 17) & 0x1;
+}
+
+static inline int get_r(unsigned int instr)
+{
+ return (instr >> 16) & 0x1;
+}
+
+static inline int get_lpid(unsigned long r_val)
+{
+ return r_val & 0xffffffff;
+}
+
+static inline int get_is(unsigned long r_val)
+{
+ return (r_val >> 10) & 0x3;
+}
+
+static inline int get_ap(unsigned long r_val)
+{
+ return (r_val >> 5) & 0x7;
+}
+
+static inline long get_epn(unsigned long r_val)
+{
+ return r_val >> 12;
+}
+
+static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
+ int ap, long epn)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *gp;
+ long npages;
+ int shift, shadow_shift;
+ unsigned long addr;
+
+ shift = ap_to_shift(ap);
+ addr = epn << 12;
+ if (shift < 0)
+ /* Invalid ap encoding */
+ return -EINVAL;
+
+ addr &= ~((1UL << shift) - 1);
+ npages = 1UL << (shift - PAGE_SHIFT);
+
+ gp = kvmhv_get_nested(kvm, lpid, false);
+ if (!gp) /* No such guest -> nothing to do */
+ return 0;
+ mutex_lock(&gp->tlb_lock);
+
+ /* There may be more than one host page backing this single guest pte */
+ do {
+ kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
+
+ npages -= 1UL << (shadow_shift - PAGE_SHIFT);
+ addr += 1UL << shadow_shift;
+ } while (npages > 0);
+
+ mutex_unlock(&gp->tlb_lock);
+ kvmhv_put_nested(gp);
+ return 0;
+}
+
+static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
+ struct kvm_nested_guest *gp, int ric)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&gp->tlb_lock);
+ switch (ric) {
+ case 0:
+ /* Invalidate TLB */
+ spin_lock(&kvm->mmu_lock);
+ kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
+ gp->shadow_lpid);
+ kvmhv_flush_lpid(gp->shadow_lpid);
+ spin_unlock(&kvm->mmu_lock);
+ break;
+ case 1:
+ /*
+ * Invalidate PWC
+ * We don't cache this -> nothing to do
+ */
+ break;
+ case 2:
+ /* Invalidate TLB, PWC and caching of partition table entries */
+ kvmhv_flush_nested(gp);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&gp->tlb_lock);
+}
+
+static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *gp;
+ int i;
+
+ spin_lock(&kvm->mmu_lock);
+ for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
+ gp = kvm->arch.nested_guests[i];
+ if (gp) {
+ spin_unlock(&kvm->mmu_lock);
+ kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
+ spin_lock(&kvm->mmu_lock);
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+}
+
+static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
+ unsigned long rsval, unsigned long rbval)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *gp;
+ int r, ric, prs, is, ap;
+ int lpid;
+ long epn;
+ int ret = 0;
+
+ ric = get_ric(instr);
+ prs = get_prs(instr);
+ r = get_r(instr);
+ lpid = get_lpid(rsval);
+ is = get_is(rbval);
+
+ /*
+ * These cases are invalid and are not handled:
+ * r != 1 -> Only radix supported
+ * prs == 1 -> Not HV privileged
+ * ric == 3 -> No cluster bombs for radix
+ * is == 1 -> Partition scoped translations not associated with pid
+ * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA
+ */
+ if ((!r) || (prs) || (ric == 3) || (is == 1) ||
+ ((!is) && (ric == 1 || ric == 2)))
+ return -EINVAL;
+
+ switch (is) {
+ case 0:
+ /*
+ * We know ric == 0
+ * Invalidate TLB for a given target address
+ */
+ epn = get_epn(rbval);
+ ap = get_ap(rbval);
+ ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
+ break;
+ case 2:
+ /* Invalidate matching LPID */
+ gp = kvmhv_get_nested(kvm, lpid, false);
+ if (gp) {
+ kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
+ kvmhv_put_nested(gp);
+ }
+ break;
+ case 3:
+ /* Invalidate ALL LPIDs */
+ kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * This handles the H_TLB_INVALIDATE hcall.
+ * Parameters are (r4) tlbie instruction code, (r5) rS contents,
+ * (r6) rB contents.
+ */
+long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
+ if (ret)
+ return H_PARAMETER;
+ return H_SUCCESS;
+}
+
+/* Used to convert a nested guest real address to a L1 guest real address */
+static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
+ struct kvm_nested_guest *gp,
+ unsigned long n_gpa, unsigned long dsisr,
+ struct kvmppc_pte *gpte_p)
+{
+ u64 fault_addr, flags = dsisr & DSISR_ISSTORE;
+ int ret;
+
+ ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
+ &fault_addr);
+
+ if (ret) {
+ /* We didn't find a pte */
+ if (ret == -EINVAL) {
+ /* Unsupported mmu config */
+ flags |= DSISR_UNSUPP_MMU;
+ } else if (ret == -ENOENT) {
+ /* No translation found */
+ flags |= DSISR_NOHPTE;
+ } else if (ret == -EFAULT) {
+ /* Couldn't access L1 real address */
+ flags |= DSISR_PRTABLE_FAULT;
+ vcpu->arch.fault_gpa = fault_addr;
+ } else {
+ /* Unknown error */
+ return ret;
+ }
+ goto forward_to_l1;
+ } else {
+ /* We found a pte -> check permissions */
+ if (dsisr & DSISR_ISSTORE) {
+ /* Can we write? */
+ if (!gpte_p->may_write) {
+ flags |= DSISR_PROTFAULT;
+ goto forward_to_l1;
+ }
+ } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
+ /* Can we execute? */
+ if (!gpte_p->may_execute) {
+ flags |= SRR1_ISI_N_OR_G;
+ goto forward_to_l1;
+ }
+ } else {
+ /* Can we read? */
+ if (!gpte_p->may_read && !gpte_p->may_write) {
+ flags |= DSISR_PROTFAULT;
+ goto forward_to_l1;
+ }
+ }
+ }
+
+ return 0;
+
+forward_to_l1:
+ vcpu->arch.fault_dsisr = flags;
+ if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
+ vcpu->arch.shregs.msr &= ~0x783f0000ul;
+ vcpu->arch.shregs.msr |= flags;
+ }
+ return RESUME_HOST;
+}
+
+static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
+ struct kvm_nested_guest *gp,
+ unsigned long n_gpa,
+ struct kvmppc_pte gpte,
+ unsigned long dsisr)
+{
+ struct kvm *kvm = vcpu->kvm;
+ bool writing = !!(dsisr & DSISR_ISSTORE);
+ u64 pgflags;
+ bool ret;
+
+ /* Are the rc bits set in the L1 partition scoped pte? */
+ pgflags = _PAGE_ACCESSED;
+ if (writing)
+ pgflags |= _PAGE_DIRTY;
+ if (pgflags & ~gpte.rc)
+ return RESUME_HOST;
+
+ spin_lock(&kvm->mmu_lock);
+ /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
+ ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing,
+ gpte.raddr, kvm->arch.lpid);
+ spin_unlock(&kvm->mmu_lock);
+ if (!ret)
+ return -EINVAL;
+
+ /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
+ ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa,
+ gp->shadow_lpid);
+ if (!ret)
+ return -EINVAL;
+ return 0;
+}
+
+static inline int kvmppc_radix_level_to_shift(int level)
+{
+ switch (level) {
+ case 2:
+ return PUD_SHIFT;
+ case 1:
+ return PMD_SHIFT;
+ default:
+ return PAGE_SHIFT;
+ }
+}
+
+static inline int kvmppc_radix_shift_to_level(int shift)
+{
+ if (shift == PUD_SHIFT)
+ return 2;
+ if (shift == PMD_SHIFT)
+ return 1;
+ if (shift == PAGE_SHIFT)
+ return 0;
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+/* called with gp->tlb_lock held */
+static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
+ struct kvm_nested_guest *gp)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_memory_slot *memslot;
+ struct rmap_nested *n_rmap;
+ struct kvmppc_pte gpte;
+ pte_t pte, *pte_p;
+ unsigned long mmu_seq;
+ unsigned long dsisr = vcpu->arch.fault_dsisr;
+ unsigned long ea = vcpu->arch.fault_dar;
+ unsigned long *rmapp;
+ unsigned long n_gpa, gpa, gfn, perm = 0UL;
+ unsigned int shift, l1_shift, level;
+ bool writing = !!(dsisr & DSISR_ISSTORE);
+ bool kvm_ro = false;
+ long int ret;
+
+ if (!gp->l1_gr_to_hr) {
+ kvmhv_update_ptbl_cache(gp);
+ if (!gp->l1_gr_to_hr)
+ return RESUME_HOST;
+ }
+
+ /* Convert the nested guest real address into a L1 guest real address */
+
+ n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
+ if (!(dsisr & DSISR_PRTABLE_FAULT))
+ n_gpa |= ea & 0xFFF;
+ ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
+
+ /*
+ * If the hardware found a translation but we don't now have a usable
+ * translation in the l1 partition-scoped tree, remove the shadow pte
+ * and let the guest retry.
+ */
+ if (ret == RESUME_HOST &&
+ (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G |
+ DSISR_BAD_COPYPASTE)))
+ goto inval;
+ if (ret)
+ return ret;
+
+ /* Failed to set the reference/change bits */
+ if (dsisr & DSISR_SET_RC) {
+ ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
+ if (ret == RESUME_HOST)
+ return ret;
+ if (ret)
+ goto inval;
+ dsisr &= ~DSISR_SET_RC;
+ if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
+ DSISR_PROTFAULT)))
+ return RESUME_GUEST;
+ }
+
+ /*
+ * We took an HISI or HDSI while we were running a nested guest which
+ * means we have no partition scoped translation for that. This means
+ * we need to insert a pte for the mapping into our shadow_pgtable.
+ */
+
+ l1_shift = gpte.page_shift;
+ if (l1_shift < PAGE_SHIFT) {
+ /* We don't support l1 using a page size smaller than our own */
+ pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
+ l1_shift, PAGE_SHIFT);
+ return -EINVAL;
+ }
+ gpa = gpte.raddr;
+ gfn = gpa >> PAGE_SHIFT;
+
+ /* 1. Get the corresponding host memslot */
+
+ memslot = gfn_to_memslot(kvm, gfn);
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
+ if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
+ /* unusual error -> reflect to the guest as a DSI */
+ kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
+ return RESUME_GUEST;
+ }
+ /* passthrough of emulated MMIO case... */
+ pr_err("emulated MMIO passthrough?\n");
+ return -EINVAL;
+ }
+ if (memslot->flags & KVM_MEM_READONLY) {
+ if (writing) {
+ /* Give the guest a DSI */
+ kvmppc_core_queue_data_storage(vcpu, ea,
+ DSISR_ISSTORE | DSISR_PROTFAULT);
+ return RESUME_GUEST;
+ }
+ kvm_ro = true;
+ }
+
+ /* 2. Find the host pte for this L1 guest real address */
+
+ /* Used to check for invalidations in progress */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
+
+ /* See if can find translation in our partition scoped tables for L1 */
+ pte = __pte(0);
+ spin_lock(&kvm->mmu_lock);
+ pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ if (!shift)
+ shift = PAGE_SHIFT;
+ if (pte_p)
+ pte = *pte_p;
+ spin_unlock(&kvm->mmu_lock);
+
+ if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
+ /* No suitable pte found -> try to insert a mapping */
+ ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
+ writing, kvm_ro, &pte, &level);
+ if (ret == -EAGAIN)
+ return RESUME_GUEST;
+ else if (ret)
+ return ret;
+ shift = kvmppc_radix_level_to_shift(level);
+ }
+
+ /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
+
+ /* The permissions is the combination of the host and l1 guest ptes */
+ perm |= gpte.may_read ? 0UL : _PAGE_READ;
+ perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
+ perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
+ pte = __pte(pte_val(pte) & ~perm);
+
+ /* What size pte can we insert? */
+ if (shift > l1_shift) {
+ u64 mask;
+ unsigned int actual_shift = PAGE_SHIFT;
+ if (PMD_SHIFT < l1_shift)
+ actual_shift = PMD_SHIFT;
+ mask = (1UL << shift) - (1UL << actual_shift);
+ pte = __pte(pte_val(pte) | (gpa & mask));
+ shift = actual_shift;
+ }
+ level = kvmppc_radix_shift_to_level(shift);
+ n_gpa &= ~((1UL << shift) - 1);
+
+ /* 4. Insert the pte into our shadow_pgtable */
+
+ n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL);
+ if (!n_rmap)
+ return RESUME_GUEST; /* Let the guest try again */
+ n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) |
+ (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
+ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
+ ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
+ mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
+ if (n_rmap)
+ kfree(n_rmap);
+ if (ret == -EAGAIN)
+ ret = RESUME_GUEST; /* Let the guest try again */
+
+ return ret;
+
+ inval:
+ kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
+ return RESUME_GUEST;
+}
+
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
+{
+ struct kvm_nested_guest *gp = vcpu->arch.nested;
+ long int ret;
+
+ mutex_lock(&gp->tlb_lock);
+ ret = __kvmhv_nested_page_fault(vcpu, gp);
+ mutex_unlock(&gp->tlb_lock);
+ return ret;
+}
+
+int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
+{
+ int ret = -1;
+
+ spin_lock(&kvm->mmu_lock);
+ while (++lpid <= kvm->arch.max_nested_lpid) {
+ if (kvm->arch.nested_guests[lpid]) {
+ ret = lpid;
+ break;
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+ return ret;
+}
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index b11043b23c18..0787f12c1a1b 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -177,6 +177,7 @@ void kvmppc_subcore_enter_guest(void)
local_paca->sibling_subcore_state->in_guest[subcore_id] = 1;
}
+EXPORT_SYMBOL_GPL(kvmppc_subcore_enter_guest);
void kvmppc_subcore_exit_guest(void)
{
@@ -187,6 +188,7 @@ void kvmppc_subcore_exit_guest(void)
local_paca->sibling_subcore_state->in_guest[subcore_id] = 0;
}
+EXPORT_SYMBOL_GPL(kvmppc_subcore_exit_guest);
static bool kvmppc_tb_resync_required(void)
{
@@ -331,5 +333,13 @@ long kvmppc_realmode_hmi_handler(void)
} else {
wait_for_tb_resync();
}
+
+ /*
+ * Reset tb_offset_applied so the guest exit code won't try
+ * to subtract the previous timebase offset from the timebase.
+ */
+ if (local_paca->kvm_hstate.kvm_vcore)
+ local_paca->kvm_hstate.kvm_vcore->tb_offset_applied = 0;
+
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 758d1d23215e..b3f5786b20dc 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -136,7 +136,7 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
/* Mark the target VCPU as having an interrupt pending */
vcpu->stat.queue_intr++;
- set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
+ set_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
/* Kick self ? Just set MER and return */
if (vcpu == this_vcpu) {
@@ -170,8 +170,7 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
{
/* Note: Only called on self ! */
- clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
- &vcpu->arch.pending_exceptions);
+ clear_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
}
@@ -768,6 +767,14 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
void __iomem *xics_phys;
int64_t rc;
+ if (kvmhv_on_pseries()) {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ iosync();
+ plpar_hcall_raw(H_EOI, retbuf, hwirq);
+ return;
+ }
+
rc = pnv_opal_pci_msi_eoi(c, hwirq);
if (rc)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 1d14046124a0..9b8d50a7cbaf 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -28,6 +28,7 @@
#include <asm/exception-64s.h>
#include <asm/kvm_book3s_asm.h>
#include <asm/book3s/64/mmu-hash.h>
+#include <asm/export.h>
#include <asm/tm.h>
#include <asm/opal.h>
#include <asm/xive-regs.h>
@@ -46,8 +47,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define NAPPING_NOVCPU 2
/* Stack frame offsets for kvmppc_hv_entry */
-#define SFS 160
+#define SFS 208
#define STACK_SLOT_TRAP (SFS-4)
+#define STACK_SLOT_SHORT_PATH (SFS-8)
#define STACK_SLOT_TID (SFS-16)
#define STACK_SLOT_PSSCR (SFS-24)
#define STACK_SLOT_PID (SFS-32)
@@ -56,6 +58,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define STACK_SLOT_DAWR (SFS-56)
#define STACK_SLOT_DAWRX (SFS-64)
#define STACK_SLOT_HFSCR (SFS-72)
+/* the following is used by the P9 short path */
+#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
/*
* Call kvmppc_hv_entry in real mode.
@@ -113,45 +117,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_SPRG_VDSO_WRITE,r3
/* Reload the host's PMU registers */
- lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
- cmpwi r4, 0
- beq 23f /* skip if not */
-BEGIN_FTR_SECTION
- ld r3, HSTATE_MMCR0(r13)
- andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
- cmpwi r4, MMCR0_PMAO
- beql kvmppc_fix_pmao
-END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
- lwz r3, HSTATE_PMC1(r13)
- lwz r4, HSTATE_PMC2(r13)
- lwz r5, HSTATE_PMC3(r13)
- lwz r6, HSTATE_PMC4(r13)
- lwz r8, HSTATE_PMC5(r13)
- lwz r9, HSTATE_PMC6(r13)
- mtspr SPRN_PMC1, r3
- mtspr SPRN_PMC2, r4
- mtspr SPRN_PMC3, r5
- mtspr SPRN_PMC4, r6
- mtspr SPRN_PMC5, r8
- mtspr SPRN_PMC6, r9
- ld r3, HSTATE_MMCR0(r13)
- ld r4, HSTATE_MMCR1(r13)
- ld r5, HSTATE_MMCRA(r13)
- ld r6, HSTATE_SIAR(r13)
- ld r7, HSTATE_SDAR(r13)
- mtspr SPRN_MMCR1, r4
- mtspr SPRN_MMCRA, r5
- mtspr SPRN_SIAR, r6
- mtspr SPRN_SDAR, r7
-BEGIN_FTR_SECTION
- ld r8, HSTATE_MMCR2(r13)
- ld r9, HSTATE_SIER(r13)
- mtspr SPRN_MMCR2, r8
- mtspr SPRN_SIER, r9
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- mtspr SPRN_MMCR0, r3
- isync
-23:
+ bl kvmhv_load_host_pmu
/*
* Reload DEC. HDEC interrupts were disabled when
@@ -796,66 +762,23 @@ BEGIN_FTR_SECTION
b 91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/*
- * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
+ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
mr r3, r4
ld r4, VCPU_MSR(r3)
+ li r5, 0 /* don't preserve non-vol regs */
bl kvmppc_restore_tm_hv
+ nop
ld r4, HSTATE_KVM_VCPU(r13)
91:
#endif
- /* Load guest PMU registers */
- /* R4 is live here (vcpu pointer) */
- li r3, 1
- sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
- mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
- isync
-BEGIN_FTR_SECTION
- ld r3, VCPU_MMCR(r4)
- andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
- cmpwi r5, MMCR0_PMAO
- beql kvmppc_fix_pmao
-END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
- lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
- lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
- lwz r6, VCPU_PMC + 8(r4)
- lwz r7, VCPU_PMC + 12(r4)
- lwz r8, VCPU_PMC + 16(r4)
- lwz r9, VCPU_PMC + 20(r4)
- mtspr SPRN_PMC1, r3
- mtspr SPRN_PMC2, r5
- mtspr SPRN_PMC3, r6
- mtspr SPRN_PMC4, r7
- mtspr SPRN_PMC5, r8
- mtspr SPRN_PMC6, r9
- ld r3, VCPU_MMCR(r4)
- ld r5, VCPU_MMCR + 8(r4)
- ld r6, VCPU_MMCR + 16(r4)
- ld r7, VCPU_SIAR(r4)
- ld r8, VCPU_SDAR(r4)
- mtspr SPRN_MMCR1, r5
- mtspr SPRN_MMCRA, r6
- mtspr SPRN_SIAR, r7
- mtspr SPRN_SDAR, r8
-BEGIN_FTR_SECTION
- ld r5, VCPU_MMCR + 24(r4)
- ld r6, VCPU_SIER(r4)
- mtspr SPRN_MMCR2, r5
- mtspr SPRN_SIER, r6
-BEGIN_FTR_SECTION_NESTED(96)
- lwz r7, VCPU_PMC + 24(r4)
- lwz r8, VCPU_PMC + 28(r4)
- ld r9, VCPU_MMCR + 32(r4)
- mtspr SPRN_SPMC1, r7
- mtspr SPRN_SPMC2, r8
- mtspr SPRN_MMCRS, r9
-END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- mtspr SPRN_MMCR0, r3
- isync
+ /* Load guest PMU registers; r4 = vcpu pointer here */
+ mr r3, r4
+ bl kvmhv_load_guest_pmu
/* Load up FP, VMX and VSX registers */
+ ld r4, HSTATE_KVM_VCPU(r13)
bl kvmppc_load_fp
ld r14, VCPU_GPR(R14)(r4)
@@ -1100,73 +1023,40 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
no_xive:
#endif /* CONFIG_KVM_XICS */
-deliver_guest_interrupt:
- ld r6, VCPU_CTR(r4)
- ld r7, VCPU_XER(r4)
-
- mtctr r6
- mtxer r7
+ li r0, 0
+ stw r0, STACK_SLOT_SHORT_PATH(r1)
-kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
- ld r10, VCPU_PC(r4)
- ld r11, VCPU_MSR(r4)
+deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
+ /* Check if we can deliver an external or decrementer interrupt now */
+ ld r0, VCPU_PENDING_EXC(r4)
+BEGIN_FTR_SECTION
+ /* On POWER9, also check for emulated doorbell interrupt */
+ lbz r3, VCPU_DBELL_REQ(r4)
+ or r0, r0, r3
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ cmpdi r0, 0
+ beq 71f
+ mr r3, r4
+ bl kvmppc_guest_entry_inject_int
+ ld r4, HSTATE_KVM_VCPU(r13)
+71:
ld r6, VCPU_SRR0(r4)
ld r7, VCPU_SRR1(r4)
mtspr SPRN_SRR0, r6
mtspr SPRN_SRR1, r7
+fast_guest_entry_c:
+ ld r10, VCPU_PC(r4)
+ ld r11, VCPU_MSR(r4)
/* r11 = vcpu->arch.msr & ~MSR_HV */
rldicl r11, r11, 63 - MSR_HV_LG, 1
rotldi r11, r11, 1 + MSR_HV_LG
ori r11, r11, MSR_ME
- /* Check if we can deliver an external or decrementer interrupt now */
- ld r0, VCPU_PENDING_EXC(r4)
- rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
- cmpdi cr1, r0, 0
- andi. r8, r11, MSR_EE
- mfspr r8, SPRN_LPCR
- /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
- rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
- mtspr SPRN_LPCR, r8
- isync
- beq 5f
- li r0, BOOK3S_INTERRUPT_EXTERNAL
- bne cr1, 12f
- mfspr r0, SPRN_DEC
-BEGIN_FTR_SECTION
- /* On POWER9 check whether the guest has large decrementer enabled */
- andis. r8, r8, LPCR_LD@h
- bne 15f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- extsw r0, r0
-15: cmpdi r0, 0
- li r0, BOOK3S_INTERRUPT_DECREMENTER
- bge 5f
-
-12: mtspr SPRN_SRR0, r10
- mr r10,r0
- mtspr SPRN_SRR1, r11
- mr r9, r4
- bl kvmppc_msr_interrupt
-5:
-BEGIN_FTR_SECTION
- b fast_guest_return
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
- /* On POWER9, check for pending doorbell requests */
- lbz r0, VCPU_DBELL_REQ(r4)
- cmpwi r0, 0
- beq fast_guest_return
- ld r5, HSTATE_KVM_VCORE(r13)
- /* Set DPDES register so the CPU will take a doorbell interrupt */
- li r0, 1
- mtspr SPRN_DPDES, r0
- std r0, VCORE_DPDES(r5)
- /* Make sure other cpus see vcore->dpdes set before dbell req clear */
- lwsync
- /* Clear the pending doorbell request */
- li r0, 0
- stb r0, VCPU_DBELL_REQ(r4)
+ ld r6, VCPU_CTR(r4)
+ ld r7, VCPU_XER(r4)
+ mtctr r6
+ mtxer r7
/*
* Required state:
@@ -1202,7 +1092,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r5, VCPU_LR(r4)
- lwz r6, VCPU_CR(r4)
+ ld r6, VCPU_CR(r4)
mtlr r5
mtcr r6
@@ -1234,6 +1124,83 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
HRFI_TO_GUEST
b .
+/*
+ * Enter the guest on a P9 or later system where we have exactly
+ * one vcpu per vcore and we don't need to go to real mode
+ * (which implies that host and guest are both using radix MMU mode).
+ * r3 = vcpu pointer
+ * Most SPRs and all the VSRs have been loaded already.
+ */
+_GLOBAL(__kvmhv_vcpu_entry_p9)
+EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+ stdu r1, -SFS(r1)
+
+ li r0, 1
+ stw r0, STACK_SLOT_SHORT_PATH(r1)
+
+ std r3, HSTATE_KVM_VCPU(r13)
+ mfcr r4
+ stw r4, SFS+8(r1)
+
+ std r1, HSTATE_HOST_R1(r13)
+
+ reg = 14
+ .rept 18
+ std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
+ reg = reg + 1
+ .endr
+
+ reg = 14
+ .rept 18
+ ld reg, __VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ mfmsr r10
+ std r10, HSTATE_HOST_MSR(r13)
+
+ mr r4, r3
+ b fast_guest_entry_c
+guest_exit_short_path:
+
+ li r0, KVM_GUEST_MODE_NONE
+ stb r0, HSTATE_IN_GUEST(r13)
+
+ reg = 14
+ .rept 18
+ std reg, __VCPU_GPR(reg)(r9)
+ reg = reg + 1
+ .endr
+
+ reg = 14
+ .rept 18
+ ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
+ reg = reg + 1
+ .endr
+
+ lwz r4, SFS+8(r1)
+ mtcr r4
+
+ mr r3, r12 /* trap number */
+
+ addi r1, r1, SFS
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+
+ /* If we are in real mode, do a rfid to get back to the caller */
+ mfmsr r4
+ andi. r5, r4, MSR_IR
+ bnelr
+ rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */
+ mtspr SPRN_SRR0, r0
+ ld r10, HSTATE_HOST_MSR(r13)
+ rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG
+ mtspr SPRN_SRR1, r10
+ RFI_TO_KERNEL
+ b .
+
secondary_too_late:
li r12, 0
stw r12, STACK_SLOT_TRAP(r1)
@@ -1313,7 +1280,7 @@ kvmppc_interrupt_hv:
std r3, VCPU_GPR(R12)(r9)
/* CR is in the high half of r12 */
srdi r4, r12, 32
- stw r4, VCPU_CR(r9)
+ std r4, VCPU_CR(r9)
BEGIN_FTR_SECTION
ld r3, HSTATE_CFAR(r13)
std r3, VCPU_CFAR(r9)
@@ -1387,18 +1354,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
std r3, VCPU_CTR(r9)
std r4, VCPU_XER(r9)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* For softpatch interrupt, go off and do TM instruction emulation */
- cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
- beq kvmppc_tm_emul
-#endif
+ /* Save more register state */
+ mfdar r3
+ mfdsisr r4
+ std r3, VCPU_DAR(r9)
+ stw r4, VCPU_DSISR(r9)
/* If this is a page table miss then see if it's theirs or ours */
cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
beq kvmppc_hdsi
+ std r3, VCPU_FAULT_DAR(r9)
+ stw r4, VCPU_FAULT_DSISR(r9)
cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
beq kvmppc_hisi
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* For softpatch interrupt, go off and do TM instruction emulation */
+ cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
+ beq kvmppc_tm_emul
+#endif
+
/* See if this is a leftover HDEC interrupt */
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
bne 2f
@@ -1418,10 +1393,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
BEGIN_FTR_SECTION
PPC_MSGSYNC
lwsync
+ /* always exit if we're running a nested guest */
+ ld r0, VCPU_NESTED(r9)
+ cmpdi r0, 0
+ bne guest_exit_cont
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
- beq 4f
+ beq maybe_reenter_guest
b guest_exit_cont
3:
/* If it's a hypervisor facility unavailable interrupt, save HFSCR */
@@ -1433,82 +1412,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
14:
/* External interrupt ? */
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
- bne+ guest_exit_cont
-
- /* External interrupt, first check for host_ipi. If this is
- * set, we know the host wants us out so let's do it now
- */
- bl kvmppc_read_intr
-
- /*
- * Restore the active volatile registers after returning from
- * a C function.
- */
- ld r9, HSTATE_KVM_VCPU(r13)
- li r12, BOOK3S_INTERRUPT_EXTERNAL
-
- /*
- * kvmppc_read_intr return codes:
- *
- * Exit to host (r3 > 0)
- * 1 An interrupt is pending that needs to be handled by the host
- * Exit guest and return to host by branching to guest_exit_cont
- *
- * 2 Passthrough that needs completion in the host
- * Exit guest and return to host by branching to guest_exit_cont
- * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
- * to indicate to the host to complete handling the interrupt
- *
- * Before returning to guest, we check if any CPU is heading out
- * to the host and if so, we head out also. If no CPUs are heading
- * check return values <= 0.
- *
- * Return to guest (r3 <= 0)
- * 0 No external interrupt is pending
- * -1 A guest wakeup IPI (which has now been cleared)
- * In either case, we return to guest to deliver any pending
- * guest interrupts.
- *
- * -2 A PCI passthrough external interrupt was handled
- * (interrupt was delivered directly to guest)
- * Return to guest to deliver any pending guest interrupts.
- */
-
- cmpdi r3, 1
- ble 1f
-
- /* Return code = 2 */
- li r12, BOOK3S_INTERRUPT_HV_RM_HARD
- stw r12, VCPU_TRAP(r9)
- b guest_exit_cont
-
-1: /* Return code <= 1 */
- cmpdi r3, 0
- bgt guest_exit_cont
-
- /* Return code <= 0 */
-4: ld r5, HSTATE_KVM_VCORE(r13)
- lwz r0, VCORE_ENTRY_EXIT(r5)
- cmpwi r0, 0x100
- mr r4, r9
- blt deliver_guest_interrupt
-
-guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
- /* Save more register state */
- mfdar r6
- mfdsisr r7
- std r6, VCPU_DAR(r9)
- stw r7, VCPU_DSISR(r9)
- /* don't overwrite fault_dar/fault_dsisr if HDSI */
- cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
- beq mc_cont
- std r6, VCPU_FAULT_DAR(r9)
- stw r7, VCPU_FAULT_DSISR(r9)
-
+ beq kvmppc_guest_external
/* See if it is a machine check */
cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
beq machine_check_realmode
-mc_cont:
+ /* Or a hypervisor maintenance interrupt */
+ cmpwi r12, BOOK3S_INTERRUPT_HMI
+ beq hmi_realmode
+
+guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
+
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
addi r3, r9, VCPU_TB_RMEXIT
mr r4, r9
@@ -1552,6 +1465,11 @@ mc_cont:
1:
#endif /* CONFIG_KVM_XICS */
+ /* If we came in through the P9 short path, go back out to C now */
+ lwz r0, STACK_SLOT_SHORT_PATH(r1)
+ cmpwi r0, 0
+ bne guest_exit_short_path
+
/* For hash guest, read the guest SLB and save it away */
ld r5, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r5)
@@ -1780,11 +1698,13 @@ BEGIN_FTR_SECTION
b 91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/*
- * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
+ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
mr r3, r9
ld r4, VCPU_MSR(r3)
+ li r5, 0 /* don't preserve non-vol regs */
bl kvmppc_save_tm_hv
+ nop
ld r9, HSTATE_KVM_VCPU(r13)
91:
#endif
@@ -1802,83 +1722,12 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
25:
/* Save PMU registers if requested */
/* r8 and cr0.eq are live here */
-BEGIN_FTR_SECTION
- /*
- * POWER8 seems to have a hardware bug where setting
- * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
- * when some counters are already negative doesn't seem
- * to cause a performance monitor alert (and hence interrupt).
- * The effect of this is that when saving the PMU state,
- * if there is no PMU alert pending when we read MMCR0
- * before freezing the counters, but one becomes pending
- * before we read the counters, we lose it.
- * To work around this, we need a way to freeze the counters
- * before reading MMCR0. Normally, freezing the counters
- * is done by writing MMCR0 (to set MMCR0[FC]) which
- * unavoidably writes MMCR0[PMA0] as well. On POWER8,
- * we can also freeze the counters using MMCR2, by writing
- * 1s to all the counter freeze condition bits (there are
- * 9 bits each for 6 counters).
- */
- li r3, -1 /* set all freeze bits */
- clrrdi r3, r3, 10
- mfspr r10, SPRN_MMCR2
- mtspr SPRN_MMCR2, r3
- isync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- li r3, 1
- sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
- mfspr r4, SPRN_MMCR0 /* save MMCR0 */
- mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
- mfspr r6, SPRN_MMCRA
- /* Clear MMCRA in order to disable SDAR updates */
- li r7, 0
- mtspr SPRN_MMCRA, r7
- isync
+ mr r3, r9
+ li r4, 1
beq 21f /* if no VPA, save PMU stuff anyway */
- lbz r7, LPPACA_PMCINUSE(r8)
- cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
- bne 21f
- std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
- b 22f
-21: mfspr r5, SPRN_MMCR1
- mfspr r7, SPRN_SIAR
- mfspr r8, SPRN_SDAR
- std r4, VCPU_MMCR(r9)
- std r5, VCPU_MMCR + 8(r9)
- std r6, VCPU_MMCR + 16(r9)
-BEGIN_FTR_SECTION
- std r10, VCPU_MMCR + 24(r9)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- std r7, VCPU_SIAR(r9)
- std r8, VCPU_SDAR(r9)
- mfspr r3, SPRN_PMC1
- mfspr r4, SPRN_PMC2
- mfspr r5, SPRN_PMC3
- mfspr r6, SPRN_PMC4
- mfspr r7, SPRN_PMC5
- mfspr r8, SPRN_PMC6
- stw r3, VCPU_PMC(r9)
- stw r4, VCPU_PMC + 4(r9)
- stw r5, VCPU_PMC + 8(r9)
- stw r6, VCPU_PMC + 12(r9)
- stw r7, VCPU_PMC + 16(r9)
- stw r8, VCPU_PMC + 20(r9)
-BEGIN_FTR_SECTION
- mfspr r5, SPRN_SIER
- std r5, VCPU_SIER(r9)
-BEGIN_FTR_SECTION_NESTED(96)
- mfspr r6, SPRN_SPMC1
- mfspr r7, SPRN_SPMC2
- mfspr r8, SPRN_MMCRS
- stw r6, VCPU_PMC + 24(r9)
- stw r7, VCPU_PMC + 28(r9)
- std r8, VCPU_MMCR + 32(r9)
- lis r4, 0x8000
- mtspr SPRN_MMCRS, r4
-END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-22:
+ lbz r4, LPPACA_PMCINUSE(r8)
+21: bl kvmhv_save_guest_pmu
+ ld r9, HSTATE_KVM_VCPU(r13)
/* Restore host values of some registers */
BEGIN_FTR_SECTION
@@ -2010,24 +1859,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_DPDES, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- /* If HMI, call kvmppc_realmode_hmi_handler() */
- lwz r12, STACK_SLOT_TRAP(r1)
- cmpwi r12, BOOK3S_INTERRUPT_HMI
- bne 27f
- bl kvmppc_realmode_hmi_handler
- nop
- cmpdi r3, 0
- /*
- * At this point kvmppc_realmode_hmi_handler may have resync-ed
- * the TB, and if it has, we must not subtract the guest timebase
- * offset from the timebase. So, skip it.
- *
- * Also, do not call kvmppc_subcore_exit_guest() because it has
- * been invoked as part of kvmppc_realmode_hmi_handler().
- */
- beq 30f
-
-27:
/* Subtract timebase offset from timebase */
ld r8, VCORE_TB_OFFSET_APPL(r5)
cmpdi r8,0
@@ -2045,7 +1876,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
addis r8,r8,0x100 /* if so, increment upper 40 bits */
mtspr SPRN_TBU40,r8
-17: bl kvmppc_subcore_exit_guest
+17:
+ /*
+ * If this is an HMI, we called kvmppc_realmode_hmi_handler
+ * above, which may or may not have already called
+ * kvmppc_subcore_exit_guest. Fortunately, all that
+ * kvmppc_subcore_exit_guest does is clear a flag, so calling
+ * it again here is benign even if kvmppc_realmode_hmi_handler
+ * has already called it.
+ */
+ bl kvmppc_subcore_exit_guest
nop
30: ld r5,HSTATE_KVM_VCORE(r13)
ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
@@ -2099,6 +1939,67 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtlr r0
blr
+kvmppc_guest_external:
+ /* External interrupt, first check for host_ipi. If this is
+ * set, we know the host wants us out so let's do it now
+ */
+ bl kvmppc_read_intr
+
+ /*
+ * Restore the active volatile registers after returning from
+ * a C function.
+ */
+ ld r9, HSTATE_KVM_VCPU(r13)
+ li r12, BOOK3S_INTERRUPT_EXTERNAL
+
+ /*
+ * kvmppc_read_intr return codes:
+ *
+ * Exit to host (r3 > 0)
+ * 1 An interrupt is pending that needs to be handled by the host
+ * Exit guest and return to host by branching to guest_exit_cont
+ *
+ * 2 Passthrough that needs completion in the host
+ * Exit guest and return to host by branching to guest_exit_cont
+ * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
+ * to indicate to the host to complete handling the interrupt
+ *
+ * Before returning to guest, we check if any CPU is heading out
+ * to the host and if so, we head out also. If no CPUs are heading
+ * check return values <= 0.
+ *
+ * Return to guest (r3 <= 0)
+ * 0 No external interrupt is pending
+ * -1 A guest wakeup IPI (which has now been cleared)
+ * In either case, we return to guest to deliver any pending
+ * guest interrupts.
+ *
+ * -2 A PCI passthrough external interrupt was handled
+ * (interrupt was delivered directly to guest)
+ * Return to guest to deliver any pending guest interrupts.
+ */
+
+ cmpdi r3, 1
+ ble 1f
+
+ /* Return code = 2 */
+ li r12, BOOK3S_INTERRUPT_HV_RM_HARD
+ stw r12, VCPU_TRAP(r9)
+ b guest_exit_cont
+
+1: /* Return code <= 1 */
+ cmpdi r3, 0
+ bgt guest_exit_cont
+
+ /* Return code <= 0 */
+maybe_reenter_guest:
+ ld r5, HSTATE_KVM_VCORE(r13)
+ lwz r0, VCORE_ENTRY_EXIT(r5)
+ cmpwi r0, 0x100
+ mr r4, r9
+ blt deliver_guest_interrupt
+ b guest_exit_cont
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Softpatch interrupt for transactional memory emulation cases
@@ -2302,6 +2203,10 @@ hcall_try_real_mode:
andi. r0,r11,MSR_PR
/* sc 1 from userspace - reflect to guest syscall */
bne sc_1_fast_return
+ /* sc 1 from nested guest - give it to L1 to handle */
+ ld r0, VCPU_NESTED(r9)
+ cmpdi r0, 0
+ bne guest_exit_cont
clrrdi r3,r3,2
cmpldi r3,hcall_real_table_end - hcall_real_table
bge guest_exit_cont
@@ -2561,6 +2466,7 @@ hcall_real_table:
hcall_real_table_end:
_GLOBAL(kvmppc_h_set_xdabr)
+EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
andi. r0, r5, DABRX_USER | DABRX_KERNEL
beq 6f
li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
@@ -2570,6 +2476,7 @@ _GLOBAL(kvmppc_h_set_xdabr)
blr
_GLOBAL(kvmppc_h_set_dabr)
+EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
li r5, DABRX_USER | DABRX_KERNEL
3:
BEGIN_FTR_SECTION
@@ -2682,11 +2589,13 @@ BEGIN_FTR_SECTION
b 91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/*
- * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
+ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
ld r3, HSTATE_KVM_VCPU(r13)
ld r4, VCPU_MSR(r3)
+ li r5, 0 /* don't preserve non-vol regs */
bl kvmppc_save_tm_hv
+ nop
91:
#endif
@@ -2802,11 +2711,13 @@ BEGIN_FTR_SECTION
b 91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/*
- * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
+ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
mr r3, r4
ld r4, VCPU_MSR(r3)
+ li r5, 0 /* don't preserve non-vol regs */
bl kvmppc_restore_tm_hv
+ nop
ld r4, HSTATE_KVM_VCPU(r13)
91:
#endif
@@ -2874,13 +2785,7 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
mr r9, r4
cmpdi r3, 0
bgt guest_exit_cont
-
- /* see if any other thread is already exiting */
- lwz r0,VCORE_ENTRY_EXIT(r5)
- cmpwi r0,0x100
- bge guest_exit_cont
-
- b kvmppc_cede_reentry /* if not go back to guest */
+ b maybe_reenter_guest
/* cede when already previously prodded case */
kvm_cede_prodded:
@@ -2947,12 +2852,12 @@ machine_check_realmode:
*/
ld r11, VCPU_MSR(r9)
rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
- bne mc_cont /* if so, exit to host */
+ bne guest_exit_cont /* if so, exit to host */
/* Check if guest is capable of handling NMI exit */
ld r10, VCPU_KVM(r9)
lbz r10, KVM_FWNMI(r10)
cmpdi r10, 1 /* FWNMI capable? */
- beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
+ beq guest_exit_cont /* if so, exit with KVM_EXIT_NMI. */
/* if not, fall through for backward compatibility. */
andi. r10, r11, MSR_RI /* check for unrecoverable exception */
@@ -2966,6 +2871,21 @@ machine_check_realmode:
2: b fast_interrupt_c_return
/*
+ * Call C code to handle a HMI in real mode.
+ * Only the primary thread does the call, secondary threads are handled
+ * by calling hmi_exception_realmode() after kvmppc_hv_entry returns.
+ * r9 points to the vcpu on entry
+ */
+hmi_realmode:
+ lbz r0, HSTATE_PTID(r13)
+ cmpwi r0, 0
+ bne guest_exit_cont
+ bl kvmppc_realmode_hmi_handler
+ ld r9, HSTATE_KVM_VCPU(r13)
+ li r12, BOOK3S_INTERRUPT_HMI
+ b guest_exit_cont
+
+/*
* Check the reason we woke from nap, and take appropriate action.
* Returns (in r3):
* 0 if nothing needs to be done
@@ -3130,10 +3050,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* Save transactional state and TM-related registers.
* Called with r3 pointing to the vcpu struct and r4 containing
* the guest MSR value.
- * This can modify all checkpointed registers, but
+ * r5 is non-zero iff non-volatile register state needs to be maintained.
+ * If r5 == 0, this can modify all checkpointed registers, but
* restores r1 and r2 before exit.
*/
-kvmppc_save_tm_hv:
+_GLOBAL_TOC(kvmppc_save_tm_hv)
+EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv)
/* See if we need to handle fake suspend mode */
BEGIN_FTR_SECTION
b __kvmppc_save_tm
@@ -3161,12 +3083,6 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop
- std r1, HSTATE_HOST_R1(r13)
-
- /* Clear the MSR RI since r1, r13 may be foobar. */
- li r5, 0
- mtmsrd r5, 1
-
/* We have to treclaim here because that's the only way to do S->N */
li r3, TM_CAUSE_KVM_RESCHED
TRECLAIM(R3)
@@ -3175,22 +3091,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
* We were in fake suspend, so we are not going to save the
* register state as the guest checkpointed state (since
* we already have it), therefore we can now use any volatile GPR.
+ * In fact treclaim in fake suspend state doesn't modify
+ * any registers.
*/
- /* Reload PACA pointer, stack pointer and TOC. */
- GET_PACA(r13)
- ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATOC(r13)
- /* Set MSR RI now we have r1 and r13 back. */
- li r5, MSR_RI
- mtmsrd r5, 1
-
- HMT_MEDIUM
- ld r6, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r6
-BEGIN_FTR_SECTION_NESTED(96)
+BEGIN_FTR_SECTION
bl pnv_power9_force_smt4_release
-END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
+END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop
4:
@@ -3216,10 +3123,12 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
* Restore transactional state and TM-related registers.
* Called with r3 pointing to the vcpu struct
* and r4 containing the guest MSR value.
+ * r5 is non-zero iff non-volatile register state needs to be maintained.
* This potentially modifies all checkpointed registers.
* It restores r1 and r2 from the PACA.
*/
-kvmppc_restore_tm_hv:
+_GLOBAL_TOC(kvmppc_restore_tm_hv)
+EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv)
/*
* If we are doing TM emulation for the guest on a POWER9 DD2,
* then we don't actually do a trechkpt -- we either set up
@@ -3424,6 +3333,194 @@ kvmppc_msr_interrupt:
blr
/*
+ * Load up guest PMU state. R3 points to the vcpu struct.
+ */
+_GLOBAL(kvmhv_load_guest_pmu)
+EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu)
+ mr r4, r3
+ mflr r0
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ isync
+BEGIN_FTR_SECTION
+ ld r3, VCPU_MMCR(r4)
+ andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
+ cmpwi r5, MMCR0_PMAO
+ beql kvmppc_fix_pmao
+END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
+ lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
+ lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
+ lwz r6, VCPU_PMC + 8(r4)
+ lwz r7, VCPU_PMC + 12(r4)
+ lwz r8, VCPU_PMC + 16(r4)
+ lwz r9, VCPU_PMC + 20(r4)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r5
+ mtspr SPRN_PMC3, r6
+ mtspr SPRN_PMC4, r7
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+ ld r3, VCPU_MMCR(r4)
+ ld r5, VCPU_MMCR + 8(r4)
+ ld r6, VCPU_MMCR + 16(r4)
+ ld r7, VCPU_SIAR(r4)
+ ld r8, VCPU_SDAR(r4)
+ mtspr SPRN_MMCR1, r5
+ mtspr SPRN_MMCRA, r6
+ mtspr SPRN_SIAR, r7
+ mtspr SPRN_SDAR, r8
+BEGIN_FTR_SECTION
+ ld r5, VCPU_MMCR + 24(r4)
+ ld r6, VCPU_SIER(r4)
+ mtspr SPRN_MMCR2, r5
+ mtspr SPRN_SIER, r6
+BEGIN_FTR_SECTION_NESTED(96)
+ lwz r7, VCPU_PMC + 24(r4)
+ lwz r8, VCPU_PMC + 28(r4)
+ ld r9, VCPU_MMCR + 32(r4)
+ mtspr SPRN_SPMC1, r7
+ mtspr SPRN_SPMC2, r8
+ mtspr SPRN_MMCRS, r9
+END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ mtspr SPRN_MMCR0, r3
+ isync
+ mtlr r0
+ blr
+
+/*
+ * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
+ */
+_GLOBAL(kvmhv_load_host_pmu)
+EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu)
+ mflr r0
+ lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
+ cmpwi r4, 0
+ beq 23f /* skip if not */
+BEGIN_FTR_SECTION
+ ld r3, HSTATE_MMCR0(r13)
+ andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
+ cmpwi r4, MMCR0_PMAO
+ beql kvmppc_fix_pmao
+END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
+ lwz r3, HSTATE_PMC1(r13)
+ lwz r4, HSTATE_PMC2(r13)
+ lwz r5, HSTATE_PMC3(r13)
+ lwz r6, HSTATE_PMC4(r13)
+ lwz r8, HSTATE_PMC5(r13)
+ lwz r9, HSTATE_PMC6(r13)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r4
+ mtspr SPRN_PMC3, r5
+ mtspr SPRN_PMC4, r6
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+ ld r3, HSTATE_MMCR0(r13)
+ ld r4, HSTATE_MMCR1(r13)
+ ld r5, HSTATE_MMCRA(r13)
+ ld r6, HSTATE_SIAR(r13)
+ ld r7, HSTATE_SDAR(r13)
+ mtspr SPRN_MMCR1, r4
+ mtspr SPRN_MMCRA, r5
+ mtspr SPRN_SIAR, r6
+ mtspr SPRN_SDAR, r7
+BEGIN_FTR_SECTION
+ ld r8, HSTATE_MMCR2(r13)
+ ld r9, HSTATE_SIER(r13)
+ mtspr SPRN_MMCR2, r8
+ mtspr SPRN_SIER, r9
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ mtspr SPRN_MMCR0, r3
+ isync
+ mtlr r0
+23: blr
+
+/*
+ * Save guest PMU state into the vcpu struct.
+ * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
+ */
+_GLOBAL(kvmhv_save_guest_pmu)
+EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu)
+ mr r9, r3
+ mr r8, r4
+BEGIN_FTR_SECTION
+ /*
+ * POWER8 seems to have a hardware bug where setting
+ * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
+ * when some counters are already negative doesn't seem
+ * to cause a performance monitor alert (and hence interrupt).
+ * The effect of this is that when saving the PMU state,
+ * if there is no PMU alert pending when we read MMCR0
+ * before freezing the counters, but one becomes pending
+ * before we read the counters, we lose it.
+ * To work around this, we need a way to freeze the counters
+ * before reading MMCR0. Normally, freezing the counters
+ * is done by writing MMCR0 (to set MMCR0[FC]) which
+ * unavoidably writes MMCR0[PMA0] as well. On POWER8,
+ * we can also freeze the counters using MMCR2, by writing
+ * 1s to all the counter freeze condition bits (there are
+ * 9 bits each for 6 counters).
+ */
+ li r3, -1 /* set all freeze bits */
+ clrrdi r3, r3, 10
+ mfspr r10, SPRN_MMCR2
+ mtspr SPRN_MMCR2, r3
+ isync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mfspr r4, SPRN_MMCR0 /* save MMCR0 */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ mfspr r6, SPRN_MMCRA
+ /* Clear MMCRA in order to disable SDAR updates */
+ li r7, 0
+ mtspr SPRN_MMCRA, r7
+ isync
+ cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */
+ bne 21f
+ std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
+ b 22f
+21: mfspr r5, SPRN_MMCR1
+ mfspr r7, SPRN_SIAR
+ mfspr r8, SPRN_SDAR
+ std r4, VCPU_MMCR(r9)
+ std r5, VCPU_MMCR + 8(r9)
+ std r6, VCPU_MMCR + 16(r9)
+BEGIN_FTR_SECTION
+ std r10, VCPU_MMCR + 24(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ std r7, VCPU_SIAR(r9)
+ std r8, VCPU_SDAR(r9)
+ mfspr r3, SPRN_PMC1
+ mfspr r4, SPRN_PMC2
+ mfspr r5, SPRN_PMC3
+ mfspr r6, SPRN_PMC4
+ mfspr r7, SPRN_PMC5
+ mfspr r8, SPRN_PMC6
+ stw r3, VCPU_PMC(r9)
+ stw r4, VCPU_PMC + 4(r9)
+ stw r5, VCPU_PMC + 8(r9)
+ stw r6, VCPU_PMC + 12(r9)
+ stw r7, VCPU_PMC + 16(r9)
+ stw r8, VCPU_PMC + 20(r9)
+BEGIN_FTR_SECTION
+ mfspr r5, SPRN_SIER
+ std r5, VCPU_SIER(r9)
+BEGIN_FTR_SECTION_NESTED(96)
+ mfspr r6, SPRN_SPMC1
+ mfspr r7, SPRN_SPMC2
+ mfspr r8, SPRN_MMCRS
+ stw r6, VCPU_PMC + 24(r9)
+ stw r7, VCPU_PMC + 28(r9)
+ std r8, VCPU_MMCR + 32(r9)
+ lis r4, 0x8000
+ mtspr SPRN_MMCRS, r4
+END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+22: blr
+
+/*
* This works around a hardware bug on POWER8E processors, where
* writing a 1 to the MMCR0[PMAO] bit doesn't generate a
* performance monitor interrupt. Instead, when we need to have
diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
index 008285058f9b..888e2609e3f1 100644
--- a/arch/powerpc/kvm/book3s_hv_tm.c
+++ b/arch/powerpc/kvm/book3s_hv_tm.c
@@ -130,7 +130,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
/* Set CR0 to indicate previous transactional state */
- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
/* L=1 => tresume, L=0 => tsuspend */
if (instr & (1 << 21)) {
@@ -174,7 +174,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
copy_from_checkpoint(vcpu);
/* Set CR0 to indicate previous transactional state */
- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
return RESUME_GUEST;
@@ -204,7 +204,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
copy_to_checkpoint(vcpu);
/* Set CR0 to indicate previous transactional state */
- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
vcpu->arch.shregs.msr = msr | MSR_TS_S;
return RESUME_GUEST;
diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
index b2c7c6fca4f9..3cf5863bc06e 100644
--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
@@ -89,7 +89,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
if (instr & (1 << 21))
vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
/* Set CR0 to 0b0010 */
- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000;
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+ 0x20000000;
return 1;
}
@@ -105,5 +106,5 @@ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */
vcpu->arch.regs.nip = vcpu->arch.tfhar;
copy_from_checkpoint(vcpu);
- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000;
}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 614ebb4261f7..4efd65d9e828 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -167,7 +167,7 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
- svcpu->cr = vcpu->arch.cr;
+ svcpu->cr = vcpu->arch.regs.ccr;
svcpu->xer = vcpu->arch.regs.xer;
svcpu->ctr = vcpu->arch.regs.ctr;
svcpu->lr = vcpu->arch.regs.link;
@@ -249,7 +249,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
- vcpu->arch.cr = svcpu->cr;
+ vcpu->arch.regs.ccr = svcpu->cr;
vcpu->arch.regs.xer = svcpu->xer;
vcpu->arch.regs.ctr = svcpu->ctr;
vcpu->arch.regs.link = svcpu->lr;
@@ -1246,7 +1246,6 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_EXTERNAL:
- case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
case BOOK3S_INTERRUPT_EXTERNAL_HV:
case BOOK3S_INTERRUPT_H_VIRT:
vcpu->stat.ext_intr_exits++;
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index b8356cdc0c04..b0b2bfc2ff51 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -310,7 +310,7 @@ static inline bool icp_try_update(struct kvmppc_icp *icp,
*/
if (new.out_ee) {
kvmppc_book3s_queue_irqprio(icp->vcpu,
- BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+ BOOK3S_INTERRUPT_EXTERNAL);
if (!change_self)
kvmppc_fast_vcpu_kick(icp->vcpu);
}
@@ -593,8 +593,7 @@ static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
u32 xirr;
/* First, remove EE from the processor */
- kvmppc_book3s_dequeue_irqprio(icp->vcpu,
- BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+ kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
/*
* ICP State: Accept_Interrupt
@@ -754,8 +753,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
* We can remove EE from the current processor, the update
* transaction will set it again if needed
*/
- kvmppc_book3s_dequeue_irqprio(icp->vcpu,
- BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+ kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
do {
old_state = new_state = READ_ONCE(icp->state);
@@ -1167,8 +1165,7 @@ int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
* Deassert the CPU interrupt request.
* icp_try_update will reassert it if necessary.
*/
- kvmppc_book3s_dequeue_irqprio(icp->vcpu,
- BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+ kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
/*
* Note that if we displace an interrupt from old_state.xisr,
@@ -1393,7 +1390,8 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
}
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ cpu_has_feature(CPU_FTR_HVMODE)) {
/* Enable real mode support */
xics->real_mode = ENABLE_REALMODE;
xics->real_mode_dbg = DEBUG_REALMODE;
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 30c2eb766954..ad4a370703d3 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -62,6 +62,69 @@
#define XIVE_Q_GAP 2
/*
+ * Push a vcpu's context to the XIVE on guest entry.
+ * This assumes we are in virtual mode (MMU on)
+ */
+void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
+{
+ void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
+ u64 pq;
+
+ if (!tima)
+ return;
+ eieio();
+ __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
+ __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
+ vcpu->arch.xive_pushed = 1;
+ eieio();
+
+ /*
+ * We clear the irq_pending flag. There is a small chance of a
+ * race vs. the escalation interrupt happening on another
+ * processor setting it again, but the only consequence is to
+ * cause a spurious wakeup on the next H_CEDE, which is not an
+ * issue.
+ */
+ vcpu->arch.irq_pending = 0;
+
+ /*
+ * In single escalation mode, if the escalation interrupt is
+ * on, we mask it.
+ */
+ if (vcpu->arch.xive_esc_on) {
+ pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
+ XIVE_ESB_SET_PQ_01));
+ mb();
+
+ /*
+ * We have a possible subtle race here: The escalation
+ * interrupt might have fired and be on its way to the
+ * host queue while we mask it, and if we unmask it
+ * early enough (re-cede right away), there is a
+ * theorical possibility that it fires again, thus
+ * landing in the target queue more than once which is
+ * a big no-no.
+ *
+ * Fortunately, solving this is rather easy. If the
+ * above load setting PQ to 01 returns a previous
+ * value where P is set, then we know the escalation
+ * interrupt is somewhere on its way to the host. In
+ * that case we simply don't clear the xive_esc_on
+ * flag below. It will be eventually cleared by the
+ * handler for the escalation interrupt.
+ *
+ * Then, when doing a cede, we check that flag again
+ * before re-enabling the escalation interrupt, and if
+ * set, we abort the cede.
+ */
+ if (!(pq & XIVE_ESB_VAL_P))
+ /* Now P is 0, we can clear the flag */
+ vcpu->arch.xive_esc_on = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
+
+/*
* This is a simple trigger for a generic XIVE IRQ. This must
* only be called for interrupts that support a trigger page
*/
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 4171ede8722b..033363d6e764 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -280,14 +280,6 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
/* First collect pending bits from HW */
GLUE(X_PFX,ack_pending)(xc);
- /*
- * Cleanup the old-style bits if needed (they may have been
- * set by pull or an escalation interrupts).
- */
- if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
- clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
- &vcpu->arch.pending_exceptions);
-
pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
xc->pending, xc->hw_cppr, xc->cppr);
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 81bd8a07aa51..051af7d97327 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -182,7 +182,7 @@
*/
PPC_LL r4, PACACURRENT(r13)
PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4)
- stw r10, VCPU_CR(r4)
+ PPC_STL r10, VCPU_CR(r4)
PPC_STL r11, VCPU_GPR(R4)(r4)
PPC_STL r5, VCPU_GPR(R5)(r4)
PPC_STL r6, VCPU_GPR(R6)(r4)
@@ -292,7 +292,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
PPC_STL r4, VCPU_GPR(R4)(r11)
PPC_LL r4, THREAD_NORMSAVE(0)(r10)
PPC_STL r5, VCPU_GPR(R5)(r11)
- stw r13, VCPU_CR(r11)
+ PPC_STL r13, VCPU_CR(r11)
mfspr r5, \srr0
PPC_STL r3, VCPU_GPR(R10)(r11)
PPC_LL r3, THREAD_NORMSAVE(2)(r10)
@@ -319,7 +319,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
PPC_STL r4, VCPU_GPR(R4)(r11)
PPC_LL r4, GPR9(r8)
PPC_STL r5, VCPU_GPR(R5)(r11)
- stw r9, VCPU_CR(r11)
+ PPC_STL r9, VCPU_CR(r11)
mfspr r5, \srr0
PPC_STL r3, VCPU_GPR(R8)(r11)
PPC_LL r3, GPR10(r8)
@@ -643,7 +643,7 @@ lightweight_exit:
PPC_LL r3, VCPU_LR(r4)
PPC_LL r5, VCPU_XER(r4)
PPC_LL r6, VCPU_CTR(r4)
- lwz r7, VCPU_CR(r4)
+ PPC_LL r7, VCPU_CR(r4)
PPC_LL r8, VCPU_PC(r4)
PPC_LD(r9, VCPU_SHARED_MSR, r11)
PPC_LL r0, VCPU_GPR(R0)(r4)
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 75dce1ef3bc8..f91b1309a0a8 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -117,7 +117,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = EMULATE_FAIL;
vcpu->arch.regs.msr = vcpu->arch.shared->msr;
- vcpu->arch.regs.ccr = vcpu->arch.cr;
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
int type = op.type & INSTR_TYPE_MASK;
int size = GETSIZE(op.type);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index eba5756d5b41..2869a299c4ed 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -594,7 +594,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !!(hv_enabled && radix_enabled());
break;
case KVM_CAP_PPC_MMU_HASH_V3:
- r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300));
+ r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
+ cpu_has_feature(CPU_FTR_HVMODE));
+ break;
+ case KVM_CAP_PPC_NESTED_HV:
+ r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
+ !kvmppc_hv_ops->enable_nested(NULL));
break;
#endif
case KVM_CAP_SYNC_MMU:
@@ -2114,6 +2119,14 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
break;
}
+
+ case KVM_CAP_PPC_NESTED_HV:
+ r = -EINVAL;
+ if (!is_kvmppc_hv_enabled(kvm) ||
+ !kvm->arch.kvm_ops->enable_nested)
+ break;
+ r = kvm->arch.kvm_ops->enable_nested(kvm);
+ break;
#endif
default:
r = -EINVAL;
diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S
index 90e330f21356..0531a1492fdf 100644
--- a/arch/powerpc/kvm/tm.S
+++ b/arch/powerpc/kvm/tm.S
@@ -28,17 +28,25 @@
* Save transactional state and TM-related registers.
* Called with:
* - r3 pointing to the vcpu struct
- * - r4 points to the MSR with current TS bits:
+ * - r4 containing the MSR with current TS bits:
* (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
- * This can modify all checkpointed registers, but
- * restores r1, r2 before exit.
+ * - r5 containing a flag indicating that non-volatile registers
+ * must be preserved.
+ * If r5 == 0, this can modify all checkpointed registers, but
+ * restores r1, r2 before exit. If r5 != 0, this restores the
+ * MSR TM/FP/VEC/VSX bits to their state on entry.
*/
_GLOBAL(__kvmppc_save_tm)
mflr r0
std r0, PPC_LR_STKOFF(r1)
+ stdu r1, -SWITCH_FRAME_SIZE(r1)
+
+ mr r9, r3
+ cmpdi cr7, r5, 0
/* Turn on TM. */
mfmsr r8
+ mr r10, r8
li r0, 1
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
ori r8, r8, MSR_FP
@@ -51,6 +59,27 @@ _GLOBAL(__kvmppc_save_tm)
std r1, HSTATE_SCRATCH2(r13)
std r3, HSTATE_SCRATCH1(r13)
+ /* Save CR on the stack - even if r5 == 0 we need to get cr7 back. */
+ mfcr r6
+ SAVE_GPR(6, r1)
+
+ /* Save DSCR so we can restore it to avoid running with user value */
+ mfspr r7, SPRN_DSCR
+ SAVE_GPR(7, r1)
+
+ /*
+ * We are going to do treclaim., which will modify all checkpointed
+ * registers. Save the non-volatile registers on the stack if
+ * preservation of non-volatile state has been requested.
+ */
+ beq cr7, 3f
+ SAVE_NVGPRS(r1)
+
+ /* MSR[TS] will be 0 (non-transactional) once we do treclaim. */
+ li r0, 0
+ rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
+ SAVE_GPR(10, r1) /* final MSR value */
+3:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
BEGIN_FTR_SECTION
/* Emulation of the treclaim instruction needs TEXASR before treclaim */
@@ -74,22 +103,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
std r9, PACATMSCRATCH(r13)
ld r9, HSTATE_SCRATCH1(r13)
- /* Get a few more GPRs free. */
- std r29, VCPU_GPRS_TM(29)(r9)
- std r30, VCPU_GPRS_TM(30)(r9)
- std r31, VCPU_GPRS_TM(31)(r9)
-
- /* Save away PPR and DSCR soon so don't run with user values. */
- mfspr r31, SPRN_PPR
+ /* Save away PPR soon so we don't run with user value. */
+ std r0, VCPU_GPRS_TM(0)(r9)
+ mfspr r0, SPRN_PPR
HMT_MEDIUM
- mfspr r30, SPRN_DSCR
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- ld r29, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r29
-#endif
- /* Save all but r9, r13 & r29-r31 */
- reg = 0
+ /* Reload stack pointer. */
+ std r1, VCPU_GPRS_TM(1)(r9)
+ ld r1, HSTATE_SCRATCH2(r13)
+
+ /* Set MSR RI now we have r1 and r13 back. */
+ std r2, VCPU_GPRS_TM(2)(r9)
+ li r2, MSR_RI
+ mtmsrd r2, 1
+
+ /* Reload TOC pointer. */
+ ld r2, PACATOC(r13)
+
+ /* Save all but r0-r2, r9 & r13 */
+ reg = 3
.rept 29
.if (reg != 9) && (reg != 13)
std reg, VCPU_GPRS_TM(reg)(r9)
@@ -103,33 +135,29 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
ld r4, PACATMSCRATCH(r13)
std r4, VCPU_GPRS_TM(9)(r9)
- /* Reload stack pointer and TOC. */
- ld r1, HSTATE_SCRATCH2(r13)
- ld r2, PACATOC(r13)
-
- /* Set MSR RI now we have r1 and r13 back. */
- li r5, MSR_RI
- mtmsrd r5, 1
+ /* Restore host DSCR and CR values, after saving guest values */
+ mfcr r6
+ mfspr r7, SPRN_DSCR
+ stw r6, VCPU_CR_TM(r9)
+ std r7, VCPU_DSCR_TM(r9)
+ REST_GPR(6, r1)
+ REST_GPR(7, r1)
+ mtcr r6
+ mtspr SPRN_DSCR, r7
- /* Save away checkpinted SPRs. */
- std r31, VCPU_PPR_TM(r9)
- std r30, VCPU_DSCR_TM(r9)
+ /* Save away checkpointed SPRs. */
+ std r0, VCPU_PPR_TM(r9)
mflr r5
- mfcr r6
mfctr r7
mfspr r8, SPRN_AMR
mfspr r10, SPRN_TAR
mfxer r11
std r5, VCPU_LR_TM(r9)
- stw r6, VCPU_CR_TM(r9)
std r7, VCPU_CTR_TM(r9)
std r8, VCPU_AMR_TM(r9)
std r10, VCPU_TAR_TM(r9)
std r11, VCPU_XER_TM(r9)
- /* Restore r12 as trap number. */
- lwz r12, VCPU_TRAP(r9)
-
/* Save FP/VSX. */
addi r3, r9, VCPU_FPRS_TM
bl store_fp_state
@@ -137,6 +165,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
bl store_vr_state
mfspr r6, SPRN_VRSAVE
stw r6, VCPU_VRSAVE_TM(r9)
+
+ /* Restore non-volatile registers if requested to */
+ beq cr7, 1f
+ REST_NVGPRS(r1)
+ REST_GPR(10, r1)
1:
/*
* We need to save these SPRs after the treclaim so that the software
@@ -146,12 +179,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
*/
mfspr r7, SPRN_TEXASR
std r7, VCPU_TEXASR(r9)
-11:
mfspr r5, SPRN_TFHAR
mfspr r6, SPRN_TFIAR
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
+ /* Restore MSR state if requested */
+ beq cr7, 2f
+ mtmsrd r10, 0
+2:
+ addi r1, r1, SWITCH_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
blr
@@ -161,49 +198,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
* be invoked from C function by PR KVM only.
*/
_GLOBAL(_kvmppc_save_tm_pr)
- mflr r5
- std r5, PPC_LR_STKOFF(r1)
- stdu r1, -SWITCH_FRAME_SIZE(r1)
- SAVE_NVGPRS(r1)
-
- /* save MSR since TM/math bits might be impacted
- * by __kvmppc_save_tm().
- */
- mfmsr r5
- SAVE_GPR(5, r1)
-
- /* also save DSCR/CR/TAR so that it can be recovered later */
- mfspr r6, SPRN_DSCR
- SAVE_GPR(6, r1)
-
- mfcr r7
- stw r7, _CCR(r1)
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+ stdu r1, -PPC_MIN_STKFRM(r1)
mfspr r8, SPRN_TAR
- SAVE_GPR(8, r1)
+ std r8, PPC_MIN_STKFRM-8(r1)
+ li r5, 1 /* preserve non-volatile registers */
bl __kvmppc_save_tm
- REST_GPR(8, r1)
+ ld r8, PPC_MIN_STKFRM-8(r1)
mtspr SPRN_TAR, r8
- ld r7, _CCR(r1)
- mtcr r7
-
- REST_GPR(6, r1)
- mtspr SPRN_DSCR, r6
-
- /* need preserve current MSR's MSR_TS bits */
- REST_GPR(5, r1)
- mfmsr r6
- rldicl r6, r6, 64 - MSR_TS_S_LG, 62
- rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
- mtmsrd r5
-
- REST_NVGPRS(r1)
- addi r1, r1, SWITCH_FRAME_SIZE
- ld r5, PPC_LR_STKOFF(r1)
- mtlr r5
+ addi r1, r1, PPC_MIN_STKFRM
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
blr
EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
@@ -215,15 +225,21 @@ EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
* - r4 is the guest MSR with desired TS bits:
* For HV KVM, it is VCPU_MSR
* For PR KVM, it is provided by caller
- * This potentially modifies all checkpointed registers.
- * It restores r1, r2 from the PACA.
+ * - r5 containing a flag indicating that non-volatile registers
+ * must be preserved.
+ * If r5 == 0, this potentially modifies all checkpointed registers, but
+ * restores r1, r2 from the PACA before exit.
+ * If r5 != 0, this restores the MSR TM/FP/VEC/VSX bits to their state on entry.
*/
_GLOBAL(__kvmppc_restore_tm)
mflr r0
std r0, PPC_LR_STKOFF(r1)
+ cmpdi cr7, r5, 0
+
/* Turn on TM/FP/VSX/VMX so we can restore them. */
mfmsr r5
+ mr r10, r5
li r6, MSR_TM >> 32
sldi r6, r6, 32
or r5, r5, r6
@@ -244,8 +260,7 @@ _GLOBAL(__kvmppc_restore_tm)
mr r5, r4
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
- beqlr /* TM not active in guest */
- std r1, HSTATE_SCRATCH2(r13)
+ beq 9f /* TM not active in guest */
/* Make sure the failure summary is set, otherwise we'll program check
* when we trechkpt. It's possible that this might have been not set
@@ -256,6 +271,26 @@ _GLOBAL(__kvmppc_restore_tm)
mtspr SPRN_TEXASR, r7
/*
+ * Make a stack frame and save non-volatile registers if requested.
+ */
+ stdu r1, -SWITCH_FRAME_SIZE(r1)
+ std r1, HSTATE_SCRATCH2(r13)
+
+ mfcr r6
+ mfspr r7, SPRN_DSCR
+ SAVE_GPR(2, r1)
+ SAVE_GPR(6, r1)
+ SAVE_GPR(7, r1)
+
+ beq cr7, 4f
+ SAVE_NVGPRS(r1)
+
+ /* MSR[TS] will be 1 (suspended) once we do trechkpt */
+ li r0, 1
+ rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
+ SAVE_GPR(10, r1) /* final MSR value */
+4:
+ /*
* We need to load up the checkpointed state for the guest.
* We need to do this early as it will blow away any GPRs, VSRs and
* some SPRs.
@@ -291,8 +326,6 @@ _GLOBAL(__kvmppc_restore_tm)
ld r29, VCPU_DSCR_TM(r3)
ld r30, VCPU_PPR_TM(r3)
- std r2, PACATMSCRATCH(r13) /* Save TOC */
-
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
li r5, 0
mtmsrd r5, 1
@@ -318,18 +351,31 @@ _GLOBAL(__kvmppc_restore_tm)
/* Now let's get back the state we need. */
HMT_MEDIUM
GET_PACA(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- ld r29, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r29
-#endif
ld r1, HSTATE_SCRATCH2(r13)
- ld r2, PACATMSCRATCH(r13)
+ REST_GPR(7, r1)
+ mtspr SPRN_DSCR, r7
/* Set the MSR RI since we have our registers back. */
li r5, MSR_RI
mtmsrd r5, 1
+
+ /* Restore TOC pointer and CR */
+ REST_GPR(2, r1)
+ REST_GPR(6, r1)
+ mtcr r6
+
+ /* Restore non-volatile registers if requested to. */
+ beq cr7, 5f
+ REST_GPR(10, r1)
+ REST_NVGPRS(r1)
+
+5: addi r1, r1, SWITCH_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
+
+9: /* Restore MSR bits if requested */
+ beqlr cr7
+ mtmsrd r10, 0
blr
/*
@@ -337,47 +383,23 @@ _GLOBAL(__kvmppc_restore_tm)
* can be invoked from C function by PR KVM only.
*/
_GLOBAL(_kvmppc_restore_tm_pr)
- mflr r5
- std r5, PPC_LR_STKOFF(r1)
- stdu r1, -SWITCH_FRAME_SIZE(r1)
- SAVE_NVGPRS(r1)
-
- /* save MSR to avoid TM/math bits change */
- mfmsr r5
- SAVE_GPR(5, r1)
-
- /* also save DSCR/CR/TAR so that it can be recovered later */
- mfspr r6, SPRN_DSCR
- SAVE_GPR(6, r1)
-
- mfcr r7
- stw r7, _CCR(r1)
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+ stdu r1, -PPC_MIN_STKFRM(r1)
+ /* save TAR so that it can be recovered later */
mfspr r8, SPRN_TAR
- SAVE_GPR(8, r1)
+ std r8, PPC_MIN_STKFRM-8(r1)
+ li r5, 1
bl __kvmppc_restore_tm
- REST_GPR(8, r1)
+ ld r8, PPC_MIN_STKFRM-8(r1)
mtspr SPRN_TAR, r8
- ld r7, _CCR(r1)
- mtcr r7
-
- REST_GPR(6, r1)
- mtspr SPRN_DSCR, r6
-
- /* need preserve current MSR's MSR_TS bits */
- REST_GPR(5, r1)
- mfmsr r6
- rldicl r6, r6, 64 - MSR_TS_S_LG, 62
- rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
- mtmsrd r5
-
- REST_NVGPRS(r1)
- addi r1, r1, SWITCH_FRAME_SIZE
- ld r5, PPC_LR_STKOFF(r1)
- mtlr r5
+ addi r1, r1, PPC_MIN_STKFRM
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
blr
EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr);
diff --git a/arch/powerpc/kvm/trace_book3s.h b/arch/powerpc/kvm/trace_book3s.h
index f3b23759e017..372a82fa2de3 100644
--- a/arch/powerpc/kvm/trace_book3s.h
+++ b/arch/powerpc/kvm/trace_book3s.h
@@ -14,7 +14,6 @@
{0x400, "INST_STORAGE"}, \
{0x480, "INST_SEGMENT"}, \
{0x500, "EXTERNAL"}, \
- {0x501, "EXTERNAL_LEVEL"}, \
{0x502, "EXTERNAL_HV"}, \
{0x600, "ALIGNMENT"}, \
{0x700, "PROGRAM"}, \
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 886ed94b9c13..d05c8af4ac51 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic)
addc r0, r8, r9
ld r10, 0(r4)
ld r11, 8(r4)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ rotldi r5, r5, 8
+#endif
adde r0, r0, r10
add r5, r5, r7
adde r0, r0, r11
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 850f3b8f4da5..5ffee298745f 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -142,7 +142,7 @@ static inline int unmap_patch_area(unsigned long addr)
return 0;
}
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
{
int err;
unsigned int *patch_addr = NULL;
@@ -182,12 +182,22 @@ out:
}
#else /* !CONFIG_STRICT_KERNEL_RWX */
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
{
return raw_patch_instruction(addr, instr);
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
+
+int patch_instruction(unsigned int *addr, unsigned int instr)
+{
+ /* Make sure we aren't patching a freed init section */
+ if (init_mem_is_free && init_section_contains(addr, 4)) {
+ pr_debug("Skipping init section patching addr: 0x%px\n", addr);
+ return 0;
+ }
+ return do_patch_instruction(addr, instr);
+}
NOKPROBE_SYMBOL(patch_instruction);
int patch_branch(unsigned int *addr, unsigned long target, int flags)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index d51cf5f4e45e..1697e903bbf2 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -103,8 +103,7 @@ static bool store_updates_sp(unsigned int inst)
*/
static int
-__bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code,
- int pkey)
+__bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code)
{
/*
* If we are in kernel mode, bail out with a SEGV, this will
@@ -114,18 +113,17 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code,
if (!user_mode(regs))
return SIGSEGV;
- _exception_pkey(SIGSEGV, regs, si_code, address, pkey);
+ _exception(SIGSEGV, regs, si_code, address);
return 0;
}
static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address)
{
- return __bad_area_nosemaphore(regs, address, SEGV_MAPERR, 0);
+ return __bad_area_nosemaphore(regs, address, SEGV_MAPERR);
}
-static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code,
- int pkey)
+static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
{
struct mm_struct *mm = current->mm;
@@ -135,54 +133,61 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code,
*/
up_read(&mm->mmap_sem);
- return __bad_area_nosemaphore(regs, address, si_code, pkey);
+ return __bad_area_nosemaphore(regs, address, si_code);
}
static noinline int bad_area(struct pt_regs *regs, unsigned long address)
{
- return __bad_area(regs, address, SEGV_MAPERR, 0);
+ return __bad_area(regs, address, SEGV_MAPERR);
}
static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address,
int pkey)
{
- return __bad_area_nosemaphore(regs, address, SEGV_PKUERR, pkey);
+ /*
+ * If we are in kernel mode, bail out with a SEGV, this will
+ * be caught by the assembly which will restore the non-volatile
+ * registers before calling bad_page_fault()
+ */
+ if (!user_mode(regs))
+ return SIGSEGV;
+
+ _exception_pkey(regs, address, pkey);
+
+ return 0;
}
static noinline int bad_access(struct pt_regs *regs, unsigned long address)
{
- return __bad_area(regs, address, SEGV_ACCERR, 0);
+ return __bad_area(regs, address, SEGV_ACCERR);
}
static int do_sigbus(struct pt_regs *regs, unsigned long address,
vm_fault_t fault)
{
- siginfo_t info;
- unsigned int lsb = 0;
-
if (!user_mode(regs))
return SIGBUS;
current->thread.trap_nr = BUS_ADRERR;
- clear_siginfo(&info);
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (void __user *)address;
#ifdef CONFIG_MEMORY_FAILURE
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+ unsigned int lsb = 0; /* shutup gcc */
+
pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
current->comm, current->pid, address);
- info.si_code = BUS_MCEERR_AR;
+
+ if (fault & VM_FAULT_HWPOISON_LARGE)
+ lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
+ if (fault & VM_FAULT_HWPOISON)
+ lsb = PAGE_SHIFT;
+
+ force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb,
+ current);
+ return 0;
}
- if (fault & VM_FAULT_HWPOISON_LARGE)
- lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
- if (fault & VM_FAULT_HWPOISON)
- lsb = PAGE_SHIFT;
#endif
- info.si_addr_lsb = lsb;
- force_sig_info(SIGBUS, &info, current);
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current);
return 0;
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 51ce091914f9..7a9886f98b0c 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -308,55 +308,6 @@ void register_page_bootmem_memmap(unsigned long section_nr,
{
}
-/*
- * We do not have access to the sparsemem vmemmap, so we fallback to
- * walking the list of sparsemem blocks which we already maintain for
- * the sake of crashdump. In the long run, we might want to maintain
- * a tree if performance of that linear walk becomes a problem.
- *
- * realmode_pfn_to_page functions can fail due to:
- * 1) As real sparsemem blocks do not lay in RAM continously (they
- * are in virtual address space which is not available in the real mode),
- * the requested page struct can be split between blocks so get_page/put_page
- * may fail.
- * 2) When huge pages are used, the get_page/put_page API will fail
- * in real mode as the linked addresses in the page struct are virtual
- * too.
- */
-struct page *realmode_pfn_to_page(unsigned long pfn)
-{
- struct vmemmap_backing *vmem_back;
- struct page *page;
- unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
- unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
-
- for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
- if (pg_va < vmem_back->virt_addr)
- continue;
-
- /* After vmemmap_list entry free is possible, need check all */
- if ((pg_va + sizeof(struct page)) <=
- (vmem_back->virt_addr + page_size)) {
- page = (struct page *) (vmem_back->phys + pg_va -
- vmem_back->virt_addr);
- return page;
- }
- }
-
- /* Probably that page struct is split between real pages */
- return NULL;
-}
-EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
-
-#else
-
-struct page *realmode_pfn_to_page(unsigned long pfn)
-{
- struct page *page = pfn_to_page(pfn);
- return page;
-}
-EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
-
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 5c8530d0c611..04ccb274a620 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -63,6 +63,7 @@
#endif
unsigned long long memory_limit;
+bool init_mem_is_free;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
@@ -396,6 +397,7 @@ void free_initmem(void)
{
ppc_md.progress = ppc_printk_progress;
mark_initmem_nx();
+ init_mem_is_free = true;
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index c9ee9e23845f..56c2234cc6ae 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -18,11 +18,15 @@
#include <linux/migrate.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
+#include <linux/sizes.h>
#include <asm/mmu_context.h>
#include <asm/pte-walk.h>
static DEFINE_MUTEX(mem_list_mutex);
+#define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1
+#define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1)
+
struct mm_iommu_table_group_mem_t {
struct list_head next;
struct rcu_head rcu;
@@ -263,6 +267,9 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
if (!page)
continue;
+ if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
+ SetPageDirty(page);
+
put_page(page);
mem->hpas[i] = 0;
}
@@ -360,7 +367,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
return ret;
}
-EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries)
@@ -390,7 +396,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
if (pageshift > mem->pageshift)
return -EFAULT;
- *hpa = *va | (ua & ~PAGE_MASK);
+ *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
return 0;
}
@@ -413,11 +419,31 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
if (!pa)
return -EFAULT;
- *hpa = *pa | (ua & ~PAGE_MASK);
+ *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
return 0;
}
-EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
+
+extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
+{
+ struct mm_iommu_table_group_mem_t *mem;
+ long entry;
+ void *va;
+ unsigned long *pa;
+
+ mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
+ if (!mem)
+ return;
+
+ entry = (ua - mem->ua) >> PAGE_SHIFT;
+ va = &mem->hpas[entry];
+
+ pa = (void *) vmalloc_to_phys(va);
+ if (!pa)
+ return;
+
+ *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
+}
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
{
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 35ac5422903a..055b211b7126 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu)
int new_nid;
/* Use associativity from first thread for all siblings */
- vphn_get_associativity(cpu, associativity);
+ if (vphn_get_associativity(cpu, associativity))
+ return cpu_to_node(cpu);
+
new_nid = associativity_to_nid(associativity);
if (new_nid < 0 || !node_possible(new_nid))
new_nid = first_online_node;
@@ -1215,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu)
* Need to ensure that NODE_DATA is initialized for a node from
* available memory (see memblock_alloc_try_nid). If unable to
* init the node, then default to nearest node that has memory
- * installed.
+ * installed. Skip onlining a node if the subsystems are not
+ * yet initialized.
*/
- if (try_online_node(new_nid))
+ if (!topology_inited || try_online_node(new_nid))
new_nid = first_online_node;
#else
/*
@@ -1452,7 +1455,8 @@ static struct timer_list topology_timer;
static void reset_topology_timer(void)
{
- mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
+ if (vphn_enabled)
+ mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
index 333b1f80c435..b271b283c785 100644
--- a/arch/powerpc/mm/pkeys.c
+++ b/arch/powerpc/mm/pkeys.c
@@ -45,7 +45,7 @@ static void scan_pkey_feature(void)
* Since any pkey can be used for data or execute, we will just treat
* all keys as equal and track them as one entity.
*/
- pkeys_total = be32_to_cpu(vals[0]);
+ pkeys_total = vals[0];
pkeys_devtree_defined = true;
}
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index fef3e1eb3a19..4c4dfc473800 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -833,6 +833,15 @@ EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid);
/*
* Flush partition scoped translations from LPID (=LPIDR)
*/
+void radix__flush_tlb_lpid(unsigned int lpid)
+{
+ _tlbie_lpid(lpid, RIC_FLUSH_ALL);
+}
+EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid);
+
+/*
+ * Flush partition scoped translations from LPID (=LPIDR)
+ */
void radix__local_flush_tlb_lpid(unsigned int lpid)
{
_tlbiel_lpid(lpid, RIC_FLUSH_ALL);
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c
index ad054dd0d666..5df6290d1ccc 100644
--- a/arch/powerpc/oprofile/backtrace.c
+++ b/arch/powerpc/oprofile/backtrace.c
@@ -7,7 +7,7 @@
* 2 of the License, or (at your option) any later version.
**/
-#include <linux/compat_time.h>
+#include <linux/time.h>
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 0c45cdbac4cf..7f12c7b78c0f 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -50,11 +50,11 @@ struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
EXPORT_SYMBOL_GPL(cbe_spu_info);
/*
- * The spufs fault-handling code needs to call force_sig_info to raise signals
+ * The spufs fault-handling code needs to call force_sig_fault to raise signals
* on DMA errors. Export it here to avoid general kernel-wide access to this
* function
*/
-EXPORT_SYMBOL_GPL(force_sig_info);
+EXPORT_SYMBOL_GPL(force_sig_fault);
/*
* Protects cbe_spu_info and spu->number.
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index 83cf58daaa79..971ac43b5d60 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -36,42 +36,32 @@
static void spufs_handle_event(struct spu_context *ctx,
unsigned long ea, int type)
{
- siginfo_t info;
-
if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
ctx->event_return |= type;
wake_up_all(&ctx->stop_wq);
return;
}
- clear_siginfo(&info);
-
switch (type) {
case SPE_EVENT_INVALID_DMA:
- info.si_signo = SIGBUS;
- info.si_code = BUS_OBJERR;
+ force_sig_fault(SIGBUS, BUS_OBJERR, NULL, current);
break;
case SPE_EVENT_SPE_DATA_STORAGE:
- info.si_signo = SIGSEGV;
- info.si_addr = (void __user *)ea;
- info.si_code = SEGV_ACCERR;
ctx->ops->restart_dma(ctx);
+ force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *)ea,
+ current);
break;
case SPE_EVENT_DMA_ALIGNMENT:
- info.si_signo = SIGBUS;
/* DAR isn't set for an alignment fault :( */
- info.si_code = BUS_ADRALN;
+ force_sig_fault(SIGBUS, BUS_ADRALN, NULL, current);
break;
case SPE_EVENT_SPE_ERROR:
- info.si_signo = SIGILL;
- info.si_addr = (void __user *)(unsigned long)
- ctx->ops->npc_read(ctx) - 4;
- info.si_code = ILL_ILLOPC;
+ force_sig_fault(
+ SIGILL, ILL_ILLOPC,
+ (void __user *)(unsigned long)
+ ctx->ops->npc_read(ctx) - 4, current);
break;
}
-
- if (info.si_signo)
- force_sig_info(info.si_signo, &info, current);
}
int spufs_handle_class0(struct spu_context *ctx)
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index 6c5db1acbe8d..fe9691040f54 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
level_shift = entries_shift + 3;
level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
- if ((level_shift - 3) * levels + page_shift >= 60)
+ if ((level_shift - 3) * levels + page_shift >= 55)
return -EINVAL;
/* Allocate TCE table */
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index a344980287a5..fe451348ae57 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -31,6 +31,7 @@ config RISCV
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_CONTIGUOUS
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GENERIC_DMA_COHERENT
select HAVE_PERF_EVENTS
select IRQ_DOMAIN
@@ -108,10 +109,12 @@ config ARCH_RV32I
select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_UCMPDI2
+ select GENERIC_LIB_UMODDI3
config ARCH_RV64I
bool "RV64I"
select 64BIT
+ select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
@@ -208,14 +211,61 @@ config RISCV_BASE_PMU
endmenu
+config FPU
+ bool "FPU support"
+ default y
+ help
+ Say N here if you want to disable all floating-point related procedure
+ in the kernel.
+
+ If you don't know what to do here, say Y.
+
endmenu
-menu "Kernel type"
+menu "Kernel features"
source "kernel/Kconfig.hz"
endmenu
+menu "Boot options"
+
+config CMDLINE_BOOL
+ bool "Built-in kernel command line"
+ help
+ For most platforms, it is firmware or second stage bootloader
+ that by default specifies the kernel command line options.
+ However, it might be necessary or advantageous to either override
+ the default kernel command line or add a few extra options to it.
+ For such cases, this option allows hardcoding command line options
+ directly into the kernel.
+
+ For that, choose 'Y' here and fill in the extra boot parameters
+ in CONFIG_CMDLINE.
+
+ The built-in options will be concatenated to the default command
+ line if CMDLINE_FORCE is set to 'N'. Otherwise, the default
+ command line will be ignored and replaced by the built-in string.
+
+config CMDLINE
+ string "Built-in kernel command string"
+ depends on CMDLINE_BOOL
+ default ""
+ help
+ Supply command-line options at build time by entering them here.
+
+config CMDLINE_FORCE
+ bool "Built-in command line overrides bootloader arguments"
+ depends on CMDLINE_BOOL
+ help
+ Set this option to 'Y' to have the kernel ignore the bootloader
+ or firmware command line. Instead, the built-in command line
+ will be used exclusively.
+
+ If you don't know what to do here, say N.
+
+endmenu
+
menu "Bus support"
config PCI
diff --git a/arch/riscv/Kconfig.debug b/arch/riscv/Kconfig.debug
index 3224ff6ecf6e..c5a72f17c469 100644
--- a/arch/riscv/Kconfig.debug
+++ b/arch/riscv/Kconfig.debug
@@ -1,37 +1,2 @@
-
-config CMDLINE_BOOL
- bool "Built-in kernel command line"
- help
- For most platforms, it is firmware or second stage bootloader
- that by default specifies the kernel command line options.
- However, it might be necessary or advantageous to either override
- the default kernel command line or add a few extra options to it.
- For such cases, this option allows hardcoding command line options
- directly into the kernel.
-
- For that, choose 'Y' here and fill in the extra boot parameters
- in CONFIG_CMDLINE.
-
- The built-in options will be concatenated to the default command
- line if CMDLINE_FORCE is set to 'N'. Otherwise, the default
- command line will be ignored and replaced by the built-in string.
-
-config CMDLINE
- string "Built-in kernel command string"
- depends on CMDLINE_BOOL
- default ""
- help
- Supply command-line options at build time by entering them here.
-
-config CMDLINE_FORCE
- bool "Built-in command line overrides bootloader arguments"
- depends on CMDLINE_BOOL
- help
- Set this option to 'Y' to have the kernel ignore the bootloader
- or firmware command line. Instead, the built-in command line
- will be used exclusively.
-
- If you don't know what to do here, say N.
-
config EARLY_PRINTK
def_bool y
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 61ec42405ec9..d10146197533 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -25,10 +25,7 @@ ifeq ($(CONFIG_ARCH_RV64I),y)
KBUILD_CFLAGS += -mabi=lp64
KBUILD_AFLAGS += -mabi=lp64
-
- KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128)
- KBUILD_MARCH = rv64im
KBUILD_LDFLAGS += -melf64lriscv
else
BITS := 32
@@ -36,22 +33,20 @@ else
KBUILD_CFLAGS += -mabi=ilp32
KBUILD_AFLAGS += -mabi=ilp32
- KBUILD_MARCH = rv32im
KBUILD_LDFLAGS += -melf32lriscv
endif
KBUILD_CFLAGS += -Wall
-ifeq ($(CONFIG_RISCV_ISA_A),y)
- KBUILD_ARCH_A = a
-endif
-ifeq ($(CONFIG_RISCV_ISA_C),y)
- KBUILD_ARCH_C = c
-endif
-
-KBUILD_AFLAGS += -march=$(KBUILD_MARCH)$(KBUILD_ARCH_A)fd$(KBUILD_ARCH_C)
+# ISA string setting
+riscv-march-$(CONFIG_ARCH_RV32I) := rv32im
+riscv-march-$(CONFIG_ARCH_RV64I) := rv64im
+riscv-march-$(CONFIG_RISCV_ISA_A) := $(riscv-march-y)a
+riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
+riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
+KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
+KBUILD_AFLAGS += -march=$(riscv-march-y)
-KBUILD_CFLAGS += -march=$(KBUILD_MARCH)$(KBUILD_ARCH_A)$(KBUILD_ARCH_C)
KBUILD_CFLAGS += -mno-save-restore
KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET)
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index efdbe311e936..6a646d9ea780 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -13,7 +13,6 @@ generic-y += errno.h
generic-y += exec.h
generic-y += fb.h
generic-y += fcntl.h
-generic-y += futex.h
generic-y += hardirq.h
generic-y += hash.h
generic-y += hw_irq.h
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..c9fecd120d18
--- /dev/null
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_PROTOTYPES_H
+
+#include <linux/ftrace.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_RISCV_PROTOTYPES_H */
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
new file mode 100644
index 000000000000..3b19eba1bc8e
--- /dev/null
+++ b/arch/riscv/include/asm/futex.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 2018 Jim Wilson (jimw@sifive.com)
+ */
+
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#ifndef CONFIG_RISCV_ISA_A
+/*
+ * Use the generic interrupt disabling versions if the A extension
+ * is not supported.
+ */
+#ifdef CONFIG_SMP
+#error "Can't support generic futex calls without A extension on SMP"
+#endif
+#include <asm-generic/futex.h>
+
+#else /* CONFIG_RISCV_ISA_A */
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <asm/asm.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ uintptr_t tmp; \
+ __enable_user_access(); \
+ __asm__ __volatile__ ( \
+ "1: " insn " \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .balign 4 \n" \
+ "3: li %[r],%[e] \n" \
+ " jump 2b,%[t] \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .balign " RISCV_SZPTR " \n" \
+ " " RISCV_PTR " 1b, 3b \n" \
+ " .previous \n" \
+ : [r] "+r" (ret), [ov] "=&r" (oldval), \
+ [u] "+m" (*uaddr), [t] "=&r" (tmp) \
+ : [op] "Jr" (oparg), [e] "i" (-EFAULT) \
+ : "memory"); \
+ __disable_user_access(); \
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret = 0;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("amoswap.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("amoadd.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("amoor.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("amoand.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("amoxor.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val;
+ uintptr_t tmp;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __enable_user_access();
+ __asm__ __volatile__ (
+ "1: lr.w.aqrl %[v],%[u] \n"
+ " bne %[v],%z[ov],3f \n"
+ "2: sc.w.aqrl %[t],%z[nv],%[u] \n"
+ " bnez %[t],1b \n"
+ "3: \n"
+ " .section .fixup,\"ax\" \n"
+ " .balign 4 \n"
+ "4: li %[r],%[e] \n"
+ " jump 3b,%[t] \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " .balign " RISCV_SZPTR " \n"
+ " " RISCV_PTR " 1b, 4b \n"
+ " " RISCV_PTR " 2b, 4b \n"
+ " .previous \n"
+ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
+ : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "i" (-EFAULT)
+ : "memory");
+ __disable_user_access();
+
+ *uval = val;
+ return ret;
+}
+
+#endif /* CONFIG_RISCV_ISA_A */
+#endif /* _ASM_FUTEX_H */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 3fe4af8147d2..50de774d827a 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -88,7 +88,7 @@ static inline void wait_for_interrupt(void)
}
struct device_node;
-extern int riscv_of_processor_hart(struct device_node *node);
+int riscv_of_processor_hartid(struct device_node *node);
extern void riscv_fill_hwcap(void);
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index 36016845461d..41aa73b476f4 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -14,16 +14,24 @@
#ifndef _ASM_RISCV_SMP_H
#define _ASM_RISCV_SMP_H
-/* This both needs asm-offsets.h and is used when generating it. */
-#ifndef GENERATING_ASM_OFFSETS
-#include <asm/asm-offsets.h>
-#endif
-
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
+#include <linux/thread_info.h>
+
+#define INVALID_HARTID ULONG_MAX
+/*
+ * Mapping between linux logical cpu index and hartid.
+ */
+extern unsigned long __cpuid_to_hartid_map[NR_CPUS];
+#define cpuid_to_hartid_map(cpu) __cpuid_to_hartid_map[cpu]
+
+struct seq_file;
#ifdef CONFIG_SMP
+/* print IPI stats */
+void show_ipi_stats(struct seq_file *p, int prec);
+
/* SMP initialization hook for setup_arch */
void __init setup_smp(void);
@@ -33,14 +41,31 @@ void arch_send_call_function_ipi_mask(struct cpumask *mask);
/* Hook for the generic smp_call_function_single() routine. */
void arch_send_call_function_single_ipi(int cpu);
+int riscv_hartid_to_cpuid(int hartid);
+void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
+
/*
- * This is particularly ugly: it appears we can't actually get the definition
- * of task_struct here, but we need access to the CPU this task is running on.
- * Instead of using C we're using asm-offsets.h to get the current processor
- * ID.
+ * Obtains the hart ID of the currently executing task. This relies on
+ * THREAD_INFO_IN_TASK, but we define that unconditionally.
*/
-#define raw_smp_processor_id() (*((int*)((char*)get_current() + TASK_TI_CPU)))
+#define raw_smp_processor_id() (current_thread_info()->cpu)
-#endif /* CONFIG_SMP */
+#else
+
+static inline void show_ipi_stats(struct seq_file *p, int prec)
+{
+}
+static inline int riscv_hartid_to_cpuid(int hartid)
+{
+ return 0;
+}
+
+static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
+ struct cpumask *out)
+{
+ cpumask_set_cpu(cpuid_to_hartid_map(0), out);
+}
+
+#endif /* CONFIG_SMP */
#endif /* _ASM_RISCV_SMP_H */
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index dd6b05bff75b..733559083f24 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -18,6 +18,7 @@
#include <asm/ptrace.h>
#include <asm/csr.h>
+#ifdef CONFIG_FPU
extern void __fstate_save(struct task_struct *save_to);
extern void __fstate_restore(struct task_struct *restore_from);
@@ -55,6 +56,14 @@ static inline void __switch_to_aux(struct task_struct *prev,
fstate_restore(next, task_pt_regs(next));
}
+extern bool has_fpu;
+#else
+#define has_fpu false
+#define fstate_save(task, regs) do { } while (0)
+#define fstate_restore(task, regs) do { } while (0)
+#define __switch_to_aux(__prev, __next) do { } while (0)
+#endif
+
extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
@@ -62,7 +71,8 @@ extern struct task_struct *__switch_to(struct task_struct *,
do { \
struct task_struct *__prev = (prev); \
struct task_struct *__next = (next); \
- __switch_to_aux(__prev, __next); \
+ if (has_fpu) \
+ __switch_to_aux(__prev, __next); \
((last) = __switch_to(__prev, __next)); \
} while (0)
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 85c2d8bae957..54fee0cadb1e 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -16,6 +16,7 @@
#define _ASM_RISCV_TLBFLUSH_H
#include <linux/mm_types.h>
+#include <asm/smp.h>
/*
* Flush entire local TLB. 'sfence.vma' implicitly fences with the instruction
@@ -49,13 +50,22 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
#include <asm/sbi.h>
+static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
+ unsigned long size)
+{
+ struct cpumask hmask;
+
+ cpumask_clear(&hmask);
+ riscv_cpuid_to_hartid_mask(cmask, &hmask);
+ sbi_remote_sfence_vma(hmask.bits, start, size);
+}
+
#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0)
#define flush_tlb_range(vma, start, end) \
- sbi_remote_sfence_vma(mm_cpumask((vma)->vm_mm)->bits, \
- start, (end) - (start))
+ remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
#define flush_tlb_mm(mm) \
- sbi_remote_sfence_vma(mm_cpumask(mm)->bits, 0, -1)
+ remote_sfence_vma(mm_cpumask(mm), 0, -1)
#endif /* CONFIG_SMP */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index 0caea01d5cca..eff7aa9aa163 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -16,6 +16,7 @@
* be included multiple times. See uapi/asm/syscalls.h for more info.
*/
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild
index 7e91f4850475..5511b9918131 100644
--- a/arch/riscv/include/uapi/asm/Kbuild
+++ b/arch/riscv/include/uapi/asm/Kbuild
@@ -26,3 +26,4 @@ generic-y += swab.h
generic-y += termbits.h
generic-y += termios.h
generic-y += types.h
+generic-y += siginfo.h
diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h
index 1e0dfc36aab9..644a00ce6e2e 100644
--- a/arch/riscv/include/uapi/asm/elf.h
+++ b/arch/riscv/include/uapi/asm/elf.h
@@ -19,7 +19,10 @@ typedef unsigned long elf_greg_t;
typedef struct user_regs_struct elf_gregset_t;
#define ELF_NGREG (sizeof(elf_gregset_t) / sizeof(elf_greg_t))
+/* We don't support f without d, or q. */
+typedef __u64 elf_fpreg_t;
typedef union __riscv_fp_state elf_fpregset_t;
+#define ELF_NFPREG (sizeof(struct __riscv_d_ext_state) / sizeof(elf_fpreg_t))
#if __riscv_xlen == 64
#define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info)
diff --git a/arch/riscv/include/uapi/asm/siginfo.h b/arch/riscv/include/uapi/asm/siginfo.h
deleted file mode 100644
index f96849aac662..000000000000
--- a/arch/riscv/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- * Copyright (C) 2016 SiFive, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_SIGINFO_H
-#define __ASM_SIGINFO_H
-
-#define __ARCH_SI_PREAMBLE_SIZE (__SIZEOF_POINTER__ == 4 ? 12 : 16)
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index e1274fc03af4..f13f7f276639 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -31,6 +31,7 @@ obj-y += vdso/
CFLAGS_setup.o := -mcmodel=medany
+obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index 0bc86e5f8f3f..cb35ffd8ec6b 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -22,13 +22,6 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
{
this_leaf->level = level;
this_leaf->type = type;
- /* not a sector cache */
- this_leaf->physical_line_partition = 1;
- /* TODO: Add to DTS */
- this_leaf->attributes =
- CACHE_WRITE_BACK
- | CACHE_READ_ALLOCATE
- | CACHE_WRITE_ALLOCATE;
}
static int __init_cache_level(unsigned int cpu)
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index ca6c81e54e37..3a5a2ee31547 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -14,9 +14,13 @@
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/of.h>
+#include <asm/smp.h>
-/* Return -1 if not a valid hart */
-int riscv_of_processor_hart(struct device_node *node)
+/*
+ * Returns the hart ID of the given device tree node, or -1 if the device tree
+ * node isn't a RISC-V hart.
+ */
+int riscv_of_processor_hartid(struct device_node *node)
{
const char *isa, *status;
u32 hart;
@@ -58,6 +62,64 @@ int riscv_of_processor_hart(struct device_node *node)
#ifdef CONFIG_PROC_FS
+static void print_isa(struct seq_file *f, const char *orig_isa)
+{
+ static const char *ext = "mafdc";
+ const char *isa = orig_isa;
+ const char *e;
+
+ /*
+ * Linux doesn't support rv32e or rv128i, and we only support booting
+ * kernels on harts with the same ISA that the kernel is compiled for.
+ */
+#if defined(CONFIG_32BIT)
+ if (strncmp(isa, "rv32i", 5) != 0)
+ return;
+#elif defined(CONFIG_64BIT)
+ if (strncmp(isa, "rv64i", 5) != 0)
+ return;
+#endif
+
+ /* Print the base ISA, as we already know it's legal. */
+ seq_puts(f, "isa\t\t: ");
+ seq_write(f, isa, 5);
+ isa += 5;
+
+ /*
+ * Check the rest of the ISA string for valid extensions, printing those
+ * we find. RISC-V ISA strings define an order, so we only print the
+ * extension bits when they're in order.
+ */
+ for (e = ext; *e != '\0'; ++e) {
+ if (isa[0] == e[0]) {
+ seq_write(f, isa, 1);
+ isa++;
+ }
+ }
+ seq_puts(f, "\n");
+
+ /*
+ * If we were given an unsupported ISA in the device tree then print
+ * a bit of info describing what went wrong.
+ */
+ if (isa[0] != '\0')
+ pr_info("unsupported ISA \"%s\" in device tree", orig_isa);
+}
+
+static void print_mmu(struct seq_file *f, const char *mmu_type)
+{
+#if defined(CONFIG_32BIT)
+ if (strcmp(mmu_type, "riscv,sv32") != 0)
+ return;
+#elif defined(CONFIG_64BIT)
+ if (strcmp(mmu_type, "riscv,sv39") != 0 &&
+ strcmp(mmu_type, "riscv,sv48") != 0)
+ return;
+#endif
+
+ seq_printf(f, "mmu\t\t: %s\n", mmu_type+6);
+}
+
static void *c_start(struct seq_file *m, loff_t *pos)
{
*pos = cpumask_next(*pos - 1, cpu_online_mask);
@@ -78,21 +140,20 @@ static void c_stop(struct seq_file *m, void *v)
static int c_show(struct seq_file *m, void *v)
{
- unsigned long hart_id = (unsigned long)v - 1;
- struct device_node *node = of_get_cpu_node(hart_id, NULL);
+ unsigned long cpu_id = (unsigned long)v - 1;
+ struct device_node *node = of_get_cpu_node(cpuid_to_hartid_map(cpu_id),
+ NULL);
const char *compat, *isa, *mmu;
- seq_printf(m, "hart\t: %lu\n", hart_id);
- if (!of_property_read_string(node, "riscv,isa", &isa)
- && isa[0] == 'r'
- && isa[1] == 'v')
- seq_printf(m, "isa\t: %s\n", isa);
- if (!of_property_read_string(node, "mmu-type", &mmu)
- && !strncmp(mmu, "riscv,", 6))
- seq_printf(m, "mmu\t: %s\n", mmu+6);
+ seq_printf(m, "processor\t: %lu\n", cpu_id);
+ seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
+ if (!of_property_read_string(node, "riscv,isa", &isa))
+ print_isa(m, isa);
+ if (!of_property_read_string(node, "mmu-type", &mmu))
+ print_mmu(m, mmu);
if (!of_property_read_string(node, "compatible", &compat)
&& strcmp(compat, "riscv"))
- seq_printf(m, "uarch\t: %s\n", compat);
+ seq_printf(m, "uarch\t\t: %s\n", compat);
seq_puts(m, "\n");
return 0;
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 17011a870044..5493f3228704 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -22,6 +22,9 @@
#include <asm/hwcap.h>
unsigned long elf_hwcap __read_mostly;
+#ifdef CONFIG_FPU
+bool has_fpu __read_mostly;
+#endif
void riscv_fill_hwcap(void)
{
@@ -57,5 +60,17 @@ void riscv_fill_hwcap(void)
for (i = 0; i < strlen(isa); ++i)
elf_hwcap |= isa2hwcap[(unsigned char)(isa[i])];
+ /* We don't support systems with F but without D, so mask those out
+ * here. */
+ if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
+ pr_info("This kernel does not support systems with F but not D");
+ elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
+ }
+
pr_info("elf_hwcap is 0x%lx", elf_hwcap);
+
+#ifdef CONFIG_FPU
+ if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D))
+ has_fpu = true;
+#endif
}
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index fa2c08e3c05e..13d4826ab2a1 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -168,7 +168,6 @@ ENTRY(handle_exception)
/* Handle interrupts */
move a0, sp /* pt_regs */
- move a1, s4 /* scause */
tail do_IRQ
1:
/* Exceptions run with interrupts enabled */
@@ -357,93 +356,6 @@ ENTRY(__switch_to)
ret
ENDPROC(__switch_to)
-ENTRY(__fstate_save)
- li a2, TASK_THREAD_F0
- add a0, a0, a2
- li t1, SR_FS
- csrs sstatus, t1
- frcsr t0
- fsd f0, TASK_THREAD_F0_F0(a0)
- fsd f1, TASK_THREAD_F1_F0(a0)
- fsd f2, TASK_THREAD_F2_F0(a0)
- fsd f3, TASK_THREAD_F3_F0(a0)
- fsd f4, TASK_THREAD_F4_F0(a0)
- fsd f5, TASK_THREAD_F5_F0(a0)
- fsd f6, TASK_THREAD_F6_F0(a0)
- fsd f7, TASK_THREAD_F7_F0(a0)
- fsd f8, TASK_THREAD_F8_F0(a0)
- fsd f9, TASK_THREAD_F9_F0(a0)
- fsd f10, TASK_THREAD_F10_F0(a0)
- fsd f11, TASK_THREAD_F11_F0(a0)
- fsd f12, TASK_THREAD_F12_F0(a0)
- fsd f13, TASK_THREAD_F13_F0(a0)
- fsd f14, TASK_THREAD_F14_F0(a0)
- fsd f15, TASK_THREAD_F15_F0(a0)
- fsd f16, TASK_THREAD_F16_F0(a0)
- fsd f17, TASK_THREAD_F17_F0(a0)
- fsd f18, TASK_THREAD_F18_F0(a0)
- fsd f19, TASK_THREAD_F19_F0(a0)
- fsd f20, TASK_THREAD_F20_F0(a0)
- fsd f21, TASK_THREAD_F21_F0(a0)
- fsd f22, TASK_THREAD_F22_F0(a0)
- fsd f23, TASK_THREAD_F23_F0(a0)
- fsd f24, TASK_THREAD_F24_F0(a0)
- fsd f25, TASK_THREAD_F25_F0(a0)
- fsd f26, TASK_THREAD_F26_F0(a0)
- fsd f27, TASK_THREAD_F27_F0(a0)
- fsd f28, TASK_THREAD_F28_F0(a0)
- fsd f29, TASK_THREAD_F29_F0(a0)
- fsd f30, TASK_THREAD_F30_F0(a0)
- fsd f31, TASK_THREAD_F31_F0(a0)
- sw t0, TASK_THREAD_FCSR_F0(a0)
- csrc sstatus, t1
- ret
-ENDPROC(__fstate_save)
-
-ENTRY(__fstate_restore)
- li a2, TASK_THREAD_F0
- add a0, a0, a2
- li t1, SR_FS
- lw t0, TASK_THREAD_FCSR_F0(a0)
- csrs sstatus, t1
- fld f0, TASK_THREAD_F0_F0(a0)
- fld f1, TASK_THREAD_F1_F0(a0)
- fld f2, TASK_THREAD_F2_F0(a0)
- fld f3, TASK_THREAD_F3_F0(a0)
- fld f4, TASK_THREAD_F4_F0(a0)
- fld f5, TASK_THREAD_F5_F0(a0)
- fld f6, TASK_THREAD_F6_F0(a0)
- fld f7, TASK_THREAD_F7_F0(a0)
- fld f8, TASK_THREAD_F8_F0(a0)
- fld f9, TASK_THREAD_F9_F0(a0)
- fld f10, TASK_THREAD_F10_F0(a0)
- fld f11, TASK_THREAD_F11_F0(a0)
- fld f12, TASK_THREAD_F12_F0(a0)
- fld f13, TASK_THREAD_F13_F0(a0)
- fld f14, TASK_THREAD_F14_F0(a0)
- fld f15, TASK_THREAD_F15_F0(a0)
- fld f16, TASK_THREAD_F16_F0(a0)
- fld f17, TASK_THREAD_F17_F0(a0)
- fld f18, TASK_THREAD_F18_F0(a0)
- fld f19, TASK_THREAD_F19_F0(a0)
- fld f20, TASK_THREAD_F20_F0(a0)
- fld f21, TASK_THREAD_F21_F0(a0)
- fld f22, TASK_THREAD_F22_F0(a0)
- fld f23, TASK_THREAD_F23_F0(a0)
- fld f24, TASK_THREAD_F24_F0(a0)
- fld f25, TASK_THREAD_F25_F0(a0)
- fld f26, TASK_THREAD_F26_F0(a0)
- fld f27, TASK_THREAD_F27_F0(a0)
- fld f28, TASK_THREAD_F28_F0(a0)
- fld f29, TASK_THREAD_F29_F0(a0)
- fld f30, TASK_THREAD_F30_F0(a0)
- fld f31, TASK_THREAD_F31_F0(a0)
- fscsr t0
- csrc sstatus, t1
- ret
-ENDPROC(__fstate_restore)
-
-
.section ".rodata"
/* Exception vector table */
ENTRY(excp_vect_table)
diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S
new file mode 100644
index 000000000000..1defb0618aff
--- /dev/null
+++ b/arch/riscv/kernel/fpu.S
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/asm-offsets.h>
+
+ENTRY(__fstate_save)
+ li a2, TASK_THREAD_F0
+ add a0, a0, a2
+ li t1, SR_FS
+ csrs sstatus, t1
+ frcsr t0
+ fsd f0, TASK_THREAD_F0_F0(a0)
+ fsd f1, TASK_THREAD_F1_F0(a0)
+ fsd f2, TASK_THREAD_F2_F0(a0)
+ fsd f3, TASK_THREAD_F3_F0(a0)
+ fsd f4, TASK_THREAD_F4_F0(a0)
+ fsd f5, TASK_THREAD_F5_F0(a0)
+ fsd f6, TASK_THREAD_F6_F0(a0)
+ fsd f7, TASK_THREAD_F7_F0(a0)
+ fsd f8, TASK_THREAD_F8_F0(a0)
+ fsd f9, TASK_THREAD_F9_F0(a0)
+ fsd f10, TASK_THREAD_F10_F0(a0)
+ fsd f11, TASK_THREAD_F11_F0(a0)
+ fsd f12, TASK_THREAD_F12_F0(a0)
+ fsd f13, TASK_THREAD_F13_F0(a0)
+ fsd f14, TASK_THREAD_F14_F0(a0)
+ fsd f15, TASK_THREAD_F15_F0(a0)
+ fsd f16, TASK_THREAD_F16_F0(a0)
+ fsd f17, TASK_THREAD_F17_F0(a0)
+ fsd f18, TASK_THREAD_F18_F0(a0)
+ fsd f19, TASK_THREAD_F19_F0(a0)
+ fsd f20, TASK_THREAD_F20_F0(a0)
+ fsd f21, TASK_THREAD_F21_F0(a0)
+ fsd f22, TASK_THREAD_F22_F0(a0)
+ fsd f23, TASK_THREAD_F23_F0(a0)
+ fsd f24, TASK_THREAD_F24_F0(a0)
+ fsd f25, TASK_THREAD_F25_F0(a0)
+ fsd f26, TASK_THREAD_F26_F0(a0)
+ fsd f27, TASK_THREAD_F27_F0(a0)
+ fsd f28, TASK_THREAD_F28_F0(a0)
+ fsd f29, TASK_THREAD_F29_F0(a0)
+ fsd f30, TASK_THREAD_F30_F0(a0)
+ fsd f31, TASK_THREAD_F31_F0(a0)
+ sw t0, TASK_THREAD_FCSR_F0(a0)
+ csrc sstatus, t1
+ ret
+ENDPROC(__fstate_save)
+
+ENTRY(__fstate_restore)
+ li a2, TASK_THREAD_F0
+ add a0, a0, a2
+ li t1, SR_FS
+ lw t0, TASK_THREAD_FCSR_F0(a0)
+ csrs sstatus, t1
+ fld f0, TASK_THREAD_F0_F0(a0)
+ fld f1, TASK_THREAD_F1_F0(a0)
+ fld f2, TASK_THREAD_F2_F0(a0)
+ fld f3, TASK_THREAD_F3_F0(a0)
+ fld f4, TASK_THREAD_F4_F0(a0)
+ fld f5, TASK_THREAD_F5_F0(a0)
+ fld f6, TASK_THREAD_F6_F0(a0)
+ fld f7, TASK_THREAD_F7_F0(a0)
+ fld f8, TASK_THREAD_F8_F0(a0)
+ fld f9, TASK_THREAD_F9_F0(a0)
+ fld f10, TASK_THREAD_F10_F0(a0)
+ fld f11, TASK_THREAD_F11_F0(a0)
+ fld f12, TASK_THREAD_F12_F0(a0)
+ fld f13, TASK_THREAD_F13_F0(a0)
+ fld f14, TASK_THREAD_F14_F0(a0)
+ fld f15, TASK_THREAD_F15_F0(a0)
+ fld f16, TASK_THREAD_F16_F0(a0)
+ fld f17, TASK_THREAD_F17_F0(a0)
+ fld f18, TASK_THREAD_F18_F0(a0)
+ fld f19, TASK_THREAD_F19_F0(a0)
+ fld f20, TASK_THREAD_F20_F0(a0)
+ fld f21, TASK_THREAD_F21_F0(a0)
+ fld f22, TASK_THREAD_F22_F0(a0)
+ fld f23, TASK_THREAD_F23_F0(a0)
+ fld f24, TASK_THREAD_F24_F0(a0)
+ fld f25, TASK_THREAD_F25_F0(a0)
+ fld f26, TASK_THREAD_F26_F0(a0)
+ fld f27, TASK_THREAD_F27_F0(a0)
+ fld f28, TASK_THREAD_F28_F0(a0)
+ fld f29, TASK_THREAD_F29_F0(a0)
+ fld f30, TASK_THREAD_F30_F0(a0)
+ fld f31, TASK_THREAD_F31_F0(a0)
+ fscsr t0
+ csrc sstatus, t1
+ ret
+ENDPROC(__fstate_restore)
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index c4d2c63f9a29..711190d473d4 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -47,6 +47,8 @@ ENTRY(_start)
/* Save hart ID and DTB physical address */
mv s0, a0
mv s1, a1
+ la a2, boot_cpu_hartid
+ REG_S a0, (a2)
/* Initialize page tables and relocate to virtual addresses */
la sp, init_thread_union + THREAD_SIZE
@@ -55,7 +57,7 @@ ENTRY(_start)
/* Restore C environment */
la tp, init_task
- sw s0, TASK_TI_CPU(tp)
+ sw zero, TASK_TI_CPU(tp)
la sp, init_thread_union
li a0, ASM_THREAD_SIZE
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 0cfac48a1272..48e6b7db83a1 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -8,6 +8,8 @@
#include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
+#include <linux/seq_file.h>
+#include <asm/smp.h>
/*
* Possible interrupt causes:
@@ -24,12 +26,18 @@
*/
#define INTERRUPT_CAUSE_FLAG (1UL << (__riscv_xlen - 1))
-asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs, unsigned long cause)
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ show_ipi_stats(p, prec);
+ return 0;
+}
+
+asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
- switch (cause & ~INTERRUPT_CAUSE_FLAG) {
+ switch (regs->scause & ~INTERRUPT_CAUSE_FLAG) {
case INTERRUPT_CAUSE_TIMER:
riscv_timer_interrupt();
break;
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 5721624886a1..8a5593ff9ff3 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -75,7 +75,6 @@ ENTRY(return_to_handler)
RESTORE_RET_ABI_STATE
jalr a1
ENDPROC(return_to_handler)
-EXPORT_SYMBOL(return_to_handler)
#endif
#ifndef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index d7c6ca7c95ae..bef19993ea92 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -76,7 +76,9 @@ void show_regs(struct pt_regs *regs)
void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
- regs->sstatus = SR_SPIE /* User mode, irqs on */ | SR_FS_INITIAL;
+ regs->sstatus = SR_SPIE;
+ if (has_fpu)
+ regs->sstatus |= SR_FS_INITIAL;
regs->sepc = pc;
regs->sp = sp;
set_fs(USER_DS);
@@ -84,12 +86,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
void flush_thread(void)
{
+#ifdef CONFIG_FPU
/*
* Reset FPU context
* frm: round to nearest, ties to even (IEEE default)
* fflags: accrued exceptions cleared
*/
memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
+#endif
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 9f82a7e34c64..60f1e02eed36 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -28,6 +28,9 @@
enum riscv_regset {
REGSET_X,
+#ifdef CONFIG_FPU
+ REGSET_F,
+#endif
};
static int riscv_gpr_get(struct task_struct *target,
@@ -54,6 +57,45 @@ static int riscv_gpr_set(struct task_struct *target,
return ret;
}
+#ifdef CONFIG_FPU
+static int riscv_fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+ struct __riscv_d_ext_state *fstate = &target->thread.fstate;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fstate, 0,
+ offsetof(struct __riscv_d_ext_state, fcsr));
+ if (!ret) {
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fstate, 0,
+ offsetof(struct __riscv_d_ext_state, fcsr) +
+ sizeof(fstate->fcsr));
+ }
+
+ return ret;
+}
+
+static int riscv_fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct __riscv_d_ext_state *fstate = &target->thread.fstate;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0,
+ offsetof(struct __riscv_d_ext_state, fcsr));
+ if (!ret) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0,
+ offsetof(struct __riscv_d_ext_state, fcsr) +
+ sizeof(fstate->fcsr));
+ }
+
+ return ret;
+}
+#endif
static const struct user_regset riscv_user_regset[] = {
[REGSET_X] = {
@@ -64,6 +106,16 @@ static const struct user_regset riscv_user_regset[] = {
.get = &riscv_gpr_get,
.set = &riscv_gpr_set,
},
+#ifdef CONFIG_FPU
+ [REGSET_F] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .get = &riscv_fpr_get,
+ .set = &riscv_fpr_set,
+ },
+#endif
};
static const struct user_regset_view riscv_user_native_view = {
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index aee603123030..2c290e6aaa6e 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -81,6 +81,16 @@ EXPORT_SYMBOL(empty_zero_page);
/* The lucky hart to first increment this variable will boot the other cores */
atomic_t hart_lottery;
+unsigned long boot_cpu_hartid;
+
+unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
+ [0 ... NR_CPUS-1] = INVALID_HARTID
+};
+
+void __init smp_setup_processor_id(void)
+{
+ cpuid_to_hartid_map(0) = boot_cpu_hartid;
+}
#ifdef CONFIG_BLK_DEV_INITRD
static void __init setup_initrd(void)
@@ -186,7 +196,7 @@ static void __init setup_bootmem(void)
BUG_ON(mem_size == 0);
set_max_mapnr(PFN_DOWN(mem_size));
- max_low_pfn = pfn_base + PFN_DOWN(mem_size);
+ max_low_pfn = memblock_end_of_DRAM();
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();
@@ -227,7 +237,10 @@ void __init setup_arch(char **cmdline_p)
setup_bootmem();
paging_init();
unflatten_device_tree();
+
+#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
+#endif
#ifdef CONFIG_SMP
setup_smp();
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 718d0c984ef0..f9b5e7e352ef 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -37,45 +37,69 @@ struct rt_sigframe {
struct ucontext uc;
};
-static long restore_d_state(struct pt_regs *regs,
- struct __riscv_d_ext_state __user *state)
+#ifdef CONFIG_FPU
+static long restore_fp_state(struct pt_regs *regs,
+ union __riscv_fp_state *sc_fpregs)
{
long err;
+ struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
+ size_t i;
+
err = __copy_from_user(&current->thread.fstate, state, sizeof(*state));
- if (likely(!err))
- fstate_restore(current, regs);
+ if (unlikely(err))
+ return err;
+
+ fstate_restore(current, regs);
+
+ /* We support no other extension state at this time. */
+ for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
+ u32 value;
+
+ err = __get_user(value, &sc_fpregs->q.reserved[i]);
+ if (unlikely(err))
+ break;
+ if (value != 0)
+ return -EINVAL;
+ }
+
return err;
}
-static long save_d_state(struct pt_regs *regs,
- struct __riscv_d_ext_state __user *state)
+static long save_fp_state(struct pt_regs *regs,
+ union __riscv_fp_state *sc_fpregs)
{
+ long err;
+ struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
+ size_t i;
+
fstate_save(current, regs);
- return __copy_to_user(state, &current->thread.fstate, sizeof(*state));
+ err = __copy_to_user(state, &current->thread.fstate, sizeof(*state));
+ if (unlikely(err))
+ return err;
+
+ /* We support no other extension state at this time. */
+ for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
+ err = __put_user(0, &sc_fpregs->q.reserved[i]);
+ if (unlikely(err))
+ break;
+ }
+
+ return err;
}
+#else
+#define save_fp_state(task, regs) (0)
+#define restore_fp_state(task, regs) (0)
+#endif
static long restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
{
long err;
- size_t i;
/* sc_regs is structured the same as the start of pt_regs */
err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs));
- if (unlikely(err))
- return err;
/* Restore the floating-point state. */
- err = restore_d_state(regs, &sc->sc_fpregs.d);
- if (unlikely(err))
- return err;
- /* We support no other extension state at this time. */
- for (i = 0; i < ARRAY_SIZE(sc->sc_fpregs.q.reserved); i++) {
- u32 value;
- err = __get_user(value, &sc->sc_fpregs.q.reserved[i]);
- if (unlikely(err))
- break;
- if (value != 0)
- return -EINVAL;
- }
+ if (has_fpu)
+ err |= restore_fp_state(regs, &sc->sc_fpregs);
return err;
}
@@ -124,14 +148,11 @@ static long setup_sigcontext(struct rt_sigframe __user *frame,
{
struct sigcontext __user *sc = &frame->uc.uc_mcontext;
long err;
- size_t i;
/* sc_regs is structured the same as the start of pt_regs */
err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs));
/* Save the floating-point state. */
- err |= save_d_state(regs, &sc->sc_fpregs.d);
- /* We support no other extension state at this time. */
- for (i = 0; i < ARRAY_SIZE(sc->sc_fpregs.q.reserved); i++)
- err |= __put_user(0, &sc->sc_fpregs.q.reserved[i]);
+ if (has_fpu)
+ err |= save_fp_state(regs, &sc->sc_fpregs);
return err;
}
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 906fe21ea21b..57b1383e5ef7 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -22,23 +22,44 @@
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
-/* A collection of single bit ipi messages. */
-static struct {
- unsigned long bits ____cacheline_aligned;
-} ipi_data[NR_CPUS] __cacheline_aligned;
-
enum ipi_message_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_MAX
};
+/* A collection of single bit ipi messages. */
+static struct {
+ unsigned long stats[IPI_MAX] ____cacheline_aligned;
+ unsigned long bits ____cacheline_aligned;
+} ipi_data[NR_CPUS] __cacheline_aligned;
+
+int riscv_hartid_to_cpuid(int hartid)
+{
+ int i = -1;
+
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpuid_to_hartid_map(i) == hartid)
+ return i;
+ pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
+ BUG();
+ return i;
+}
+
+void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
+{
+ int cpu;
+
+ for_each_cpu(cpu, in)
+ cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
+}
/* Unsupported */
int setup_profiling_timer(unsigned int multiplier)
{
@@ -48,6 +69,7 @@ int setup_profiling_timer(unsigned int multiplier)
void riscv_software_interrupt(void)
{
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
+ unsigned long *stats = ipi_data[smp_processor_id()].stats;
/* Clear pending IPI */
csr_clear(sip, SIE_SSIE);
@@ -62,11 +84,15 @@ void riscv_software_interrupt(void)
if (ops == 0)
return;
- if (ops & (1 << IPI_RESCHEDULE))
+ if (ops & (1 << IPI_RESCHEDULE)) {
+ stats[IPI_RESCHEDULE]++;
scheduler_ipi();
+ }
- if (ops & (1 << IPI_CALL_FUNC))
+ if (ops & (1 << IPI_CALL_FUNC)) {
+ stats[IPI_CALL_FUNC]++;
generic_smp_call_function_interrupt();
+ }
BUG_ON((ops >> IPI_MAX) != 0);
@@ -78,14 +104,36 @@ void riscv_software_interrupt(void)
static void
send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
{
- int i;
+ int cpuid, hartid;
+ struct cpumask hartid_mask;
+ cpumask_clear(&hartid_mask);
mb();
- for_each_cpu(i, to_whom)
- set_bit(operation, &ipi_data[i].bits);
-
+ for_each_cpu(cpuid, to_whom) {
+ set_bit(operation, &ipi_data[cpuid].bits);
+ hartid = cpuid_to_hartid_map(cpuid);
+ cpumask_set_cpu(hartid, &hartid_mask);
+ }
mb();
- sbi_send_ipi(cpumask_bits(to_whom));
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
+}
+
+static const char * const ipi_names[] = {
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNC] = "Function call interrupts",
+};
+
+void show_ipi_stats(struct seq_file *p, int prec)
+{
+ unsigned int cpu, i;
+
+ for (i = 0; i < IPI_MAX; i++) {
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+ prec >= 4 ? " " : "");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
+ seq_printf(p, " %s\n", ipi_names[i]);
+ }
}
void arch_send_call_function_ipi_mask(struct cpumask *mask)
@@ -127,7 +175,7 @@ void smp_send_reschedule(int cpu)
void flush_icache_mm(struct mm_struct *mm, bool local)
{
unsigned int cpu;
- cpumask_t others, *mask;
+ cpumask_t others, hmask, *mask;
preempt_disable();
@@ -145,9 +193,11 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
*/
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
local |= cpumask_empty(&others);
- if (mm != current->active_mm || !local)
- sbi_remote_fence_i(others.bits);
- else {
+ if (mm != current->active_mm || !local) {
+ cpumask_clear(&hmask);
+ riscv_cpuid_to_hartid_mask(&others, &hmask);
+ sbi_remote_fence_i(hmask.bits);
+ } else {
/*
* It's assumed that at least one strongly ordered operation is
* performed on this hart between setting a hart's cpumask bit
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 56abab6a9812..18cda0e8cf94 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/sched/task_stack.h>
+#include <linux/sched/mm.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
@@ -50,25 +51,33 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
void __init setup_smp(void)
{
struct device_node *dn = NULL;
- int hart, im_okay_therefore_i_am = 0;
+ int hart;
+ bool found_boot_cpu = false;
+ int cpuid = 1;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
- hart = riscv_of_processor_hart(dn);
- if (hart >= 0) {
- set_cpu_possible(hart, true);
- set_cpu_present(hart, true);
- if (hart == smp_processor_id()) {
- BUG_ON(im_okay_therefore_i_am);
- im_okay_therefore_i_am = 1;
- }
+ hart = riscv_of_processor_hartid(dn);
+ if (hart < 0)
+ continue;
+
+ if (hart == cpuid_to_hartid_map(0)) {
+ BUG_ON(found_boot_cpu);
+ found_boot_cpu = 1;
+ continue;
}
+
+ cpuid_to_hartid_map(cpuid) = hart;
+ set_cpu_possible(cpuid, true);
+ set_cpu_present(cpuid, true);
+ cpuid++;
}
- BUG_ON(!im_okay_therefore_i_am);
+ BUG_ON(!found_boot_cpu);
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
+ int hartid = cpuid_to_hartid_map(cpu);
tidle->thread_info.cpu = cpu;
/*
@@ -79,8 +88,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
* the spinning harts that they can continue the boot process.
*/
smp_mb();
- __cpu_up_stack_pointer[cpu] = task_stack_page(tidle) + THREAD_SIZE;
- __cpu_up_task_pointer[cpu] = tidle;
+ WRITE_ONCE(__cpu_up_stack_pointer[hartid],
+ task_stack_page(tidle) + THREAD_SIZE);
+ WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle);
while (!cpu_online(cpu))
cpu_relax();
@@ -100,14 +110,22 @@ asmlinkage void __init smp_callin(void)
struct mm_struct *mm = &init_mm;
/* All kernel threads share the same mm context. */
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
current->active_mm = mm;
trap_init();
notify_cpu_starting(smp_processor_id());
set_cpu_online(smp_processor_id(), 1);
+ /*
+ * Remote TLB flushes are ignored while the CPU is offline, so emit
+ * a local TLB flush right now just in case.
+ */
local_flush_tlb_all();
- local_irq_enable();
+ /*
+ * Disable preemption before enabling interrupts, so we don't try to
+ * schedule a CPU that hasn't actually started yet.
+ */
preempt_disable();
+ local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 445ec84f9a47..5739bd05d289 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -2,6 +2,7 @@ lib-y += delay.o
lib-y += memcpy.o
lib-y += memset.o
lib-y += uaccess.o
-lib-y += tishift.o
+
+lib-(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_32BIT) += udivdi3.o
diff --git a/arch/riscv/mm/ioremap.c b/arch/riscv/mm/ioremap.c
index 70ef2724cdf6..bd2f2db557cc 100644
--- a/arch/riscv/mm/ioremap.c
+++ b/arch/riscv/mm/ioremap.c
@@ -42,7 +42,7 @@ static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
/* Page-align mappings */
offset = addr & (~PAGE_MASK);
- addr &= PAGE_MASK;
+ addr -= offset;
size = PAGE_ALIGN(size + offset);
area = get_vm_area_caller(size, VM_IOREMAP, caller);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9a9c7a6fe925..8b25e1f45b27 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -56,6 +56,12 @@ config PCI_QUIRKS
config ARCH_SUPPORTS_UPROBES
def_bool y
+config KASAN_SHADOW_OFFSET
+ hex
+ depends on KASAN
+ default 0x18000000000000 if KASAN_S390_4_LEVEL_PAGING
+ default 0x30000000000
+
config S390
def_bool y
select ARCH_BINFMT_ELF_STATE
@@ -120,11 +126,14 @@ config S390
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ select HAVE_ARCH_KASAN
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_ARCH_VMAP_STACK
select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
@@ -649,6 +658,7 @@ config PACK_STACK
config CHECK_STACK
def_bool y
+ depends on !VMAP_STACK
prompt "Detect kernel stack overflow"
help
This option enables the compiler option -mstack-guard and
@@ -773,6 +783,17 @@ config VFIO_CCW
To compile this driver as a module, choose M here: the
module will be called vfio_ccw.
+config VFIO_AP
+ def_tristate n
+ prompt "VFIO support for AP devices"
+ depends on S390_AP_IOMMU && VFIO_MDEV_DEVICE && KVM
+ help
+ This driver grants access to Adjunct Processor (AP) devices
+ via the VFIO mediated device interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vfio_ap.
+
endmenu
menu "Dump support"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index ee65185bbc80..0b33577932c3 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -27,7 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
UTS_MACHINE := s390x
-STACK_SIZE := 16384
+STACK_SIZE := $(if $(CONFIG_KASAN),32768,16384)
CHECKFLAGS += -D__s390__ -D__s390x__
export LD_BFD
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 9bf8489df6e6..e4b58240ec53 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -137,6 +137,14 @@ static void appldata_work_fn(struct work_struct *work)
mutex_unlock(&appldata_ops_mutex);
}
+static struct appldata_product_id appldata_id = {
+ .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
+ 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
+ .prod_fn = 0xD5D3, /* "NL" */
+ .version_nr = 0xF2F6, /* "26" */
+ .release_nr = 0xF0F1, /* "01" */
+};
+
/*
* appldata_diag()
*
@@ -145,17 +153,22 @@ static void appldata_work_fn(struct work_struct *work)
int appldata_diag(char record_nr, u16 function, unsigned long buffer,
u16 length, char *mod_lvl)
{
- struct appldata_product_id id = {
- .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
- 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
- .prod_fn = 0xD5D3, /* "NL" */
- .version_nr = 0xF2F6, /* "26" */
- .release_nr = 0xF0F1, /* "01" */
- };
+ struct appldata_parameter_list *parm_list;
+ struct appldata_product_id *id;
+ int rc;
- id.record_nr = record_nr;
- id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
- return appldata_asm(&id, function, (void *) buffer, length);
+ parm_list = kmalloc(sizeof(*parm_list), GFP_KERNEL);
+ id = kmemdup(&appldata_id, sizeof(appldata_id), GFP_KERNEL);
+ rc = -ENOMEM;
+ if (parm_list && id) {
+ id->record_nr = record_nr;
+ id->mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
+ rc = appldata_asm(parm_list, id, function,
+ (void *) buffer, length);
+ }
+ kfree(id);
+ kfree(parm_list);
+ return rc;
}
/************************ timer, work, DIAG <END> ****************************/
diff --git a/arch/s390/boot/.gitignore b/arch/s390/boot/.gitignore
index 017d5912ad2d..16ff906e4610 100644
--- a/arch/s390/boot/.gitignore
+++ b/arch/s390/boot/.gitignore
@@ -1,2 +1,3 @@
image
bzImage
+section_cmp.*
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 9e6668ee93de..d5ad724f5c96 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -6,6 +6,7 @@
KCOV_INSTRUMENT := n
GCOV_PROFILE := n
UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
@@ -27,15 +28,32 @@ endif
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
-obj-y := head.o als.o ebcdic.o sclp_early_core.o mem.o
-targets := bzImage startup.a $(obj-y)
+obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o string.o ebcdic.o
+obj-y += sclp_early_core.o mem.o ipl_vmparm.o cmdline.o ctype.o
+targets := bzImage startup.a section_cmp.boot.data $(obj-y)
subdir- := compressed
OBJECTS := $(addprefix $(obj)/,$(obj-y))
-$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
+quiet_cmd_section_cmp = SECTCMP $*
+define cmd_section_cmp
+ s1=`$(OBJDUMP) -t -j "$*" "$<" | sort | \
+ sed -n "/0000000000000000/! s/.*\s$*\s\+//p" | sha256sum`; \
+ s2=`$(OBJDUMP) -t -j "$*" "$(word 2,$^)" | sort | \
+ sed -n "/0000000000000000/! s/.*\s$*\s\+//p" | sha256sum`; \
+ if [ "$$s1" != "$$s2" ]; then \
+ echo "error: section $* differs between $< and $(word 2,$^)" >&2; \
+ exit 1; \
+ fi; \
+ touch $@
+endef
+
+$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data FORCE
$(call if_changed,objcopy)
+$(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE
+ $(call if_changed,section_cmp)
+
$(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
new file mode 100644
index 000000000000..fc41e2277ea8
--- /dev/null
+++ b/arch/s390/boot/boot.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BOOT_BOOT_H
+#define BOOT_BOOT_H
+
+void startup_kernel(void);
+void detect_memory(void);
+void store_ipl_parmblock(void);
+void setup_boot_command_line(void);
+void setup_memory_end(void);
+
+#endif /* BOOT_BOOT_H */
diff --git a/arch/s390/boot/cmdline.c b/arch/s390/boot/cmdline.c
new file mode 100644
index 000000000000..73d826cdbdeb
--- /dev/null
+++ b/arch/s390/boot/cmdline.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../lib/cmdline.c"
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 04609478d18b..593039620487 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -8,14 +8,16 @@
KCOV_INSTRUMENT := n
GCOV_PROFILE := n
UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
-obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,head.o misc.o) piggy.o
+obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) piggy.o info.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
-targets += vmlinux.scr.lds $(obj-y) $(if $(CONFIG_KERNEL_UNCOMPRESSED),,sizes.h)
+targets += info.bin $(obj-y)
KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
+OBJCOPYFLAGS :=
OBJECTS := $(addprefix $(obj)/,$(obj-y))
@@ -23,23 +25,16 @@ LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
$(call if_changed,ld)
-# extract required uncompressed vmlinux symbols and adjust them to reflect offsets inside vmlinux.bin
-sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - 0x100000)/p'
-
-quiet_cmd_sizes = GEN $@
- cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
-
-$(obj)/sizes.h: vmlinux
- $(call if_changed,sizes)
-
-AFLAGS_head.o += -I$(objtree)/$(obj)
-$(obj)/head.o: $(obj)/sizes.h
+OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info
+$(obj)/info.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
-CFLAGS_misc.o += -I$(objtree)/$(obj)
-$(obj)/misc.o: $(obj)/sizes.h
+OBJCOPYFLAGS_info.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.info
+$(obj)/info.o: $(obj)/info.bin FORCE
+ $(call if_changed,objcopy)
-OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
-$(obj)/vmlinux.bin: vmlinux
+OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section=.vmlinux.info -S
+$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
vmlinux.bin.all-y := $(obj)/vmlinux.bin
@@ -64,10 +59,10 @@ $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
$(call if_changed,xzkern)
-LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
-$(obj)/piggy.o: $(obj)/vmlinux.scr.lds $(obj)/vmlinux.bin$(suffix-y)
- $(call if_changed,ld)
+OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
+$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE
+ $(call if_changed,objcopy)
-chkbss := $(filter-out $(obj)/misc.o $(obj)/piggy.o,$(OBJECTS))
+chkbss := $(filter-out $(obj)/piggy.o $(obj)/info.o,$(OBJECTS))
chkbss-target := $(obj)/vmlinux.bin
include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c
new file mode 100644
index 000000000000..45046630c56a
--- /dev/null
+++ b/arch/s390/boot/compressed/decompressor.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Definitions and wrapper functions for kernel decompressor
+ *
+ * Copyright IBM Corp. 2010
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include "decompressor.h"
+
+/*
+ * gzip declarations
+ */
+#define STATIC static
+#define STATIC_RW_DATA static __section(.data)
+
+#undef memset
+#undef memcpy
+#undef memmove
+#define memmove memmove
+#define memzero(s, n) memset((s), 0, (n))
+
+/* Symbols defined by linker scripts */
+extern char _end[];
+extern unsigned char _compressed_start[];
+extern unsigned char _compressed_end[];
+
+#ifdef CONFIG_HAVE_KERNEL_BZIP2
+#define HEAP_SIZE 0x400000
+#else
+#define HEAP_SIZE 0x10000
+#endif
+
+static unsigned long free_mem_ptr = (unsigned long) _end;
+static unsigned long free_mem_end_ptr = (unsigned long) _end + HEAP_SIZE;
+
+#ifdef CONFIG_KERNEL_GZIP
+#include "../../../../lib/decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_BZIP2
+#include "../../../../lib/decompress_bunzip2.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
+
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
+#define decompress_offset ALIGN((unsigned long)_end + HEAP_SIZE, PAGE_SIZE)
+
+unsigned long mem_safe_offset(void)
+{
+ /*
+ * due to 4MB HEAD_SIZE for bzip2
+ * 'decompress_offset + vmlinux.image_size' could be larger than
+ * kernel at final position + its .bss, so take the larger of two
+ */
+ return max(decompress_offset + vmlinux.image_size,
+ vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size);
+}
+
+void *decompress_kernel(void)
+{
+ void *output = (void *)decompress_offset;
+
+ __decompress(_compressed_start, _compressed_end - _compressed_start,
+ NULL, NULL, output, 0, NULL, error);
+ return output;
+}
diff --git a/arch/s390/boot/compressed/decompressor.h b/arch/s390/boot/compressed/decompressor.h
new file mode 100644
index 000000000000..e1c1f2ec60f4
--- /dev/null
+++ b/arch/s390/boot/compressed/decompressor.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BOOT_COMPRESSED_DECOMPRESSOR_H
+#define BOOT_COMPRESSED_DECOMPRESSOR_H
+
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
+static inline void *decompress_kernel(void) {}
+#else
+void *decompress_kernel(void);
+#endif
+unsigned long mem_safe_offset(void);
+void error(char *m);
+
+struct vmlinux_info {
+ unsigned long default_lma;
+ void (*entry)(void);
+ unsigned long image_size; /* does not include .bss */
+ unsigned long bss_size; /* uncompressed image .bss size */
+ unsigned long bootdata_off;
+ unsigned long bootdata_size;
+};
+
+extern char _vmlinux_info[];
+#define vmlinux (*(struct vmlinux_info *)_vmlinux_info)
+
+#endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
deleted file mode 100644
index df8dbbc17bcc..000000000000
--- a/arch/s390/boot/compressed/head.S
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Startup glue code to uncompress the kernel
- *
- * Copyright IBM Corp. 2010
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-#include "sizes.h"
-
-__HEAD
-ENTRY(startup_decompressor)
- basr %r13,0 # get base
-.LPG1:
- # setup stack
- lg %r15,.Lstack-.LPG1(%r13)
- aghi %r15,-160
- brasl %r14,decompress_kernel
- # Set up registers for memory mover. We move the decompressed image to
- # 0x100000, where startup_continue of the decompressed image is supposed
- # to be.
- lgr %r4,%r2
- lg %r2,.Loffset-.LPG1(%r13)
- lg %r3,.Lmvsize-.LPG1(%r13)
- lgr %r5,%r3
- # Move the memory mover someplace safe so it doesn't overwrite itself.
- la %r1,0x200
- mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
- # When the memory mover is done we pass control to
- # arch/s390/kernel/head64.S:startup_continue which lives at 0x100000 in
- # the decompressed image.
- lgr %r6,%r2
- br %r1
-mover:
- mvcle %r2,%r4,0
- jo mover
- br %r6
-mover_end:
-
- .align 8
-.Lstack:
- .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
-.Loffset:
- .quad 0x100000
-.Lmvsize:
- .quad SZ__bss_start
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
deleted file mode 100644
index f66ad73c205b..000000000000
--- a/arch/s390/boot/compressed/misc.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Definitions and wrapper functions for kernel decompressor
- *
- * Copyright IBM Corp. 2010
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <linux/uaccess.h>
-#include <asm/page.h>
-#include <asm/sclp.h>
-#include <asm/ipl.h>
-#include "sizes.h"
-
-/*
- * gzip declarations
- */
-#define STATIC static
-
-#undef memset
-#undef memcpy
-#undef memmove
-#define memmove memmove
-#define memzero(s, n) memset((s), 0, (n))
-
-/* Symbols defined by linker scripts */
-extern char input_data[];
-extern int input_len;
-extern char _end[];
-extern char _bss[], _ebss[];
-
-static void error(char *m);
-
-static unsigned long free_mem_ptr;
-static unsigned long free_mem_end_ptr;
-
-#ifdef CONFIG_HAVE_KERNEL_BZIP2
-#define HEAP_SIZE 0x400000
-#else
-#define HEAP_SIZE 0x10000
-#endif
-
-#ifdef CONFIG_KERNEL_GZIP
-#include "../../../../lib/decompress_inflate.c"
-#endif
-
-#ifdef CONFIG_KERNEL_BZIP2
-#include "../../../../lib/decompress_bunzip2.c"
-#endif
-
-#ifdef CONFIG_KERNEL_LZ4
-#include "../../../../lib/decompress_unlz4.c"
-#endif
-
-#ifdef CONFIG_KERNEL_LZMA
-#include "../../../../lib/decompress_unlzma.c"
-#endif
-
-#ifdef CONFIG_KERNEL_LZO
-#include "../../../../lib/decompress_unlzo.c"
-#endif
-
-#ifdef CONFIG_KERNEL_XZ
-#include "../../../../lib/decompress_unxz.c"
-#endif
-
-static int puts(const char *s)
-{
- sclp_early_printk(s);
- return 0;
-}
-
-static void error(char *x)
-{
- unsigned long long psw = 0x000a0000deadbeefULL;
-
- puts("\n\n");
- puts(x);
- puts("\n\n -- System halted");
-
- asm volatile("lpsw %0" : : "Q" (psw));
-}
-
-unsigned long decompress_kernel(void)
-{
- void *output, *kernel_end;
-
- output = (void *) ALIGN((unsigned long) _end + HEAP_SIZE, PAGE_SIZE);
- kernel_end = output + SZ__bss_start;
-
-#ifdef CONFIG_BLK_DEV_INITRD
- /*
- * Move the initrd right behind the end of the decompressed
- * kernel image. This also prevents initrd corruption caused by
- * bss clearing since kernel_end will always be located behind the
- * current bss section..
- */
- if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
- memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
- INITRD_START = (unsigned long) kernel_end;
- }
-#endif
-
- /*
- * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
- * initialized afterwards since they reside in bss.
- */
- memset(_bss, 0, _ebss - _bss);
- free_mem_ptr = (unsigned long) _end;
- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
-
- __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
- return (unsigned long) output;
-}
-
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index b16ac8b3c439..7efc3938f595 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
@@ -8,9 +9,6 @@ ENTRY(startup)
SECTIONS
{
- /* Be careful parts of head_64.S assume startup_32 is at
- * address 0.
- */
. = 0;
.head.text : {
_head = . ;
@@ -26,7 +24,7 @@ SECTIONS
.rodata : {
_rodata = . ;
*(.rodata) /* read-only data */
- *(EXCLUDE_FILE (*piggy.o) .rodata.compressed)
+ *(.rodata.*)
_erodata = . ;
}
.data : {
@@ -35,14 +33,28 @@ SECTIONS
*(.data.*)
_edata = . ;
}
- startup_continue = 0x100000;
+ BOOT_DATA
+
+ /*
+ * uncompressed image info used by the decompressor it should match
+ * struct vmlinux_info. It comes from .vmlinux.info section of
+ * uncompressed vmlinux in a form of info.o
+ */
+ . = ALIGN(8);
+ .vmlinux.info : {
+ _vmlinux_info = .;
+ *(.vmlinux.info)
+ }
+
#ifdef CONFIG_KERNEL_UNCOMPRESSED
. = 0x100000;
#else
. = ALIGN(8);
#endif
.rodata.compressed : {
- *(.rodata.compressed)
+ _compressed_start = .;
+ *(.vmlinux.bin.compressed)
+ _compressed_end = .;
}
. = ALIGN(256);
.bss : {
diff --git a/arch/s390/boot/compressed/vmlinux.scr.lds.S b/arch/s390/boot/compressed/vmlinux.scr.lds.S
deleted file mode 100644
index ff01d18c9222..000000000000
--- a/arch/s390/boot/compressed/vmlinux.scr.lds.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-SECTIONS
-{
- .rodata.compressed : {
-#ifndef CONFIG_KERNEL_UNCOMPRESSED
- input_len = .;
- LONG(input_data_end - input_data) input_data = .;
-#endif
- *(.data)
-#ifndef CONFIG_KERNEL_UNCOMPRESSED
- output_len = . - 4;
- input_data_end = .;
-#endif
- }
-}
diff --git a/arch/s390/boot/ctype.c b/arch/s390/boot/ctype.c
new file mode 100644
index 000000000000..2495810b47e3
--- /dev/null
+++ b/arch/s390/boot/ctype.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../lib/ctype.c"
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index f721913b73f1..ce2cbbc41742 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -60,6 +60,9 @@ __HEAD
.long 0x02000690,0x60000050
.long 0x020006e0,0x20000050
+ .org 0x1a0
+ .quad 0,iplstart
+
.org 0x200
#
@@ -308,16 +311,11 @@ ENTRY(startup_kdump)
spt 6f-.LPG0(%r13)
mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
l %r15,.Lstack-.LPG0(%r13)
- ahi %r15,-STACK_FRAME_OVERHEAD
brasl %r14,verify_facilities
-#ifdef CONFIG_KERNEL_UNCOMPRESSED
- jg startup_continue
-#else
- jg startup_decompressor
-#endif
+ brasl %r14,startup_kernel
.Lstack:
- .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
+ .long 0x8000 + (1<<(PAGE_SHIFT+BOOT_STACK_ORDER)) - STACK_FRAME_OVERHEAD
.align 8
6: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
new file mode 100644
index 000000000000..9dab596be98e
--- /dev/null
+++ b/arch/s390/boot/ipl_parm.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+#include <asm/sections.h>
+#include <asm/boot_data.h>
+#include "boot.h"
+
+char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
+struct ipl_parameter_block __bootdata(early_ipl_block);
+int __bootdata(early_ipl_block_valid);
+
+unsigned long __bootdata(memory_end);
+int __bootdata(memory_end_set);
+int __bootdata(noexec_disabled);
+
+static inline int __diag308(unsigned long subcode, void *addr)
+{
+ register unsigned long _addr asm("0") = (unsigned long)addr;
+ register unsigned long _rc asm("1") = 0;
+ unsigned long reg1, reg2;
+ psw_t old = S390_lowcore.program_new_psw;
+
+ asm volatile(
+ " epsw %0,%1\n"
+ " st %0,%[psw_pgm]\n"
+ " st %1,%[psw_pgm]+4\n"
+ " larl %0,1f\n"
+ " stg %0,%[psw_pgm]+8\n"
+ " diag %[addr],%[subcode],0x308\n"
+ "1: nopr %%r7\n"
+ : "=&d" (reg1), "=&a" (reg2),
+ [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
+ [addr] "+d" (_addr), "+d" (_rc)
+ : [subcode] "d" (subcode)
+ : "cc", "memory");
+ S390_lowcore.program_new_psw = old;
+ return _rc;
+}
+
+void store_ipl_parmblock(void)
+{
+ int rc;
+
+ rc = __diag308(DIAG308_STORE, &early_ipl_block);
+ if (rc == DIAG308_RC_OK &&
+ early_ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
+ early_ipl_block_valid = 1;
+}
+
+static size_t scpdata_length(const char *buf, size_t count)
+{
+ while (count) {
+ if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
+ break;
+ count--;
+ }
+ return count;
+}
+
+static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size,
+ const struct ipl_parameter_block *ipb)
+{
+ size_t count;
+ size_t i;
+ int has_lowercase;
+
+ count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
+ ipb->ipl_info.fcp.scp_data_len));
+ if (!count)
+ goto out;
+
+ has_lowercase = 0;
+ for (i = 0; i < count; i++) {
+ if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
+ count = 0;
+ goto out;
+ }
+ if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
+ has_lowercase = 1;
+ }
+
+ if (has_lowercase)
+ memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
+ else
+ for (i = 0; i < count; i++)
+ dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
+out:
+ dest[count] = '\0';
+ return count;
+}
+
+static void append_ipl_block_parm(void)
+{
+ char *parm, *delim;
+ size_t len, rc = 0;
+
+ len = strlen(early_command_line);
+
+ delim = early_command_line + len; /* '\0' character position */
+ parm = early_command_line + len + 1; /* append right after '\0' */
+
+ switch (early_ipl_block.hdr.pbt) {
+ case DIAG308_IPL_TYPE_CCW:
+ rc = ipl_block_get_ascii_vmparm(
+ parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block);
+ break;
+ case DIAG308_IPL_TYPE_FCP:
+ rc = ipl_block_get_ascii_scpdata(
+ parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block);
+ break;
+ }
+ if (rc) {
+ if (*parm == '=')
+ memmove(early_command_line, parm + 1, rc);
+ else
+ *delim = ' '; /* replace '\0' with space */
+ }
+}
+
+static inline int has_ebcdic_char(const char *str)
+{
+ int i;
+
+ for (i = 0; str[i]; i++)
+ if (str[i] & 0x80)
+ return 1;
+ return 0;
+}
+
+void setup_boot_command_line(void)
+{
+ COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
+ /* convert arch command line to ascii if necessary */
+ if (has_ebcdic_char(COMMAND_LINE))
+ EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
+ /* copy arch command line */
+ strcpy(early_command_line, strim(COMMAND_LINE));
+
+ /* append IPL PARM data to the boot command line */
+ if (early_ipl_block_valid)
+ append_ipl_block_parm();
+}
+
+static char command_line_buf[COMMAND_LINE_SIZE] __section(.data);
+static void parse_mem_opt(void)
+{
+ char *param, *val;
+ bool enabled;
+ char *args;
+ int rc;
+
+ args = strcpy(command_line_buf, early_command_line);
+ while (*args) {
+ args = next_arg(args, &param, &val);
+
+ if (!strcmp(param, "mem")) {
+ memory_end = memparse(val, NULL);
+ memory_end_set = 1;
+ }
+
+ if (!strcmp(param, "noexec")) {
+ rc = kstrtobool(val, &enabled);
+ if (!rc && !enabled)
+ noexec_disabled = 1;
+ }
+ }
+}
+
+void setup_memory_end(void)
+{
+ parse_mem_opt();
+#ifdef CONFIG_CRASH_DUMP
+ if (!OLDMEM_BASE && early_ipl_block_valid &&
+ early_ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP &&
+ early_ipl_block.ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) {
+ if (!sclp_early_get_hsa_size(&memory_end) && memory_end)
+ memory_end_set = 1;
+ }
+#endif
+}
diff --git a/arch/s390/boot/ipl_vmparm.c b/arch/s390/boot/ipl_vmparm.c
new file mode 100644
index 000000000000..8dacd5fadfd7
--- /dev/null
+++ b/arch/s390/boot/ipl_vmparm.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kernel/ipl_vmparm.c"
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
new file mode 100644
index 000000000000..4cb771ba13fa
--- /dev/null
+++ b/arch/s390/boot/mem_detect.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/sclp.h>
+#include <asm/sections.h>
+#include <asm/mem_detect.h>
+#include <asm/sparsemem.h>
+#include "compressed/decompressor.h"
+#include "boot.h"
+
+unsigned long __bootdata(max_physmem_end);
+struct mem_detect_info __bootdata(mem_detect);
+
+/* up to 256 storage elements, 1020 subincrements each */
+#define ENTRIES_EXTENDED_MAX \
+ (256 * (1020 / 2) * sizeof(struct mem_detect_block))
+
+/*
+ * To avoid corrupting old kernel memory during dump, find lowest memory
+ * chunk possible either right after the kernel end (decompressed kernel) or
+ * after initrd (if it is present and there is no hole between the kernel end
+ * and initrd)
+ */
+static void *mem_detect_alloc_extended(void)
+{
+ unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
+
+ if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
+ INITRD_START < offset + ENTRIES_EXTENDED_MAX)
+ offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
+
+ return (void *)offset;
+}
+
+static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
+{
+ if (n < MEM_INLINED_ENTRIES)
+ return &mem_detect.entries[n];
+ if (unlikely(!mem_detect.entries_extended))
+ mem_detect.entries_extended = mem_detect_alloc_extended();
+ return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
+}
+
+/*
+ * sequential calls to add_mem_detect_block with adjacent memory areas
+ * are merged together into single memory block.
+ */
+void add_mem_detect_block(u64 start, u64 end)
+{
+ struct mem_detect_block *block;
+
+ if (mem_detect.count) {
+ block = __get_mem_detect_block_ptr(mem_detect.count - 1);
+ if (block->end == start) {
+ block->end = end;
+ return;
+ }
+ }
+
+ block = __get_mem_detect_block_ptr(mem_detect.count);
+ block->start = start;
+ block->end = end;
+ mem_detect.count++;
+}
+
+static unsigned long get_mem_detect_end(void)
+{
+ if (mem_detect.count)
+ return __get_mem_detect_block_ptr(mem_detect.count - 1)->end;
+ return 0;
+}
+
+static int __diag260(unsigned long rx1, unsigned long rx2)
+{
+ register unsigned long _rx1 asm("2") = rx1;
+ register unsigned long _rx2 asm("3") = rx2;
+ register unsigned long _ry asm("4") = 0x10; /* storage configuration */
+ int rc = -1; /* fail */
+ unsigned long reg1, reg2;
+ psw_t old = S390_lowcore.program_new_psw;
+
+ asm volatile(
+ " epsw %0,%1\n"
+ " st %0,%[psw_pgm]\n"
+ " st %1,%[psw_pgm]+4\n"
+ " larl %0,1f\n"
+ " stg %0,%[psw_pgm]+8\n"
+ " diag %[rx],%[ry],0x260\n"
+ " ipm %[rc]\n"
+ " srl %[rc],28\n"
+ "1:\n"
+ : "=&d" (reg1), "=&a" (reg2),
+ [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
+ [rc] "+&d" (rc), [ry] "+d" (_ry)
+ : [rx] "d" (_rx1), "d" (_rx2)
+ : "cc", "memory");
+ S390_lowcore.program_new_psw = old;
+ return rc == 0 ? _ry : -1;
+}
+
+static int diag260(void)
+{
+ int rc, i;
+
+ struct {
+ unsigned long start;
+ unsigned long end;
+ } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
+
+ memset(storage_extents, 0, sizeof(storage_extents));
+ rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
+ if (rc == -1)
+ return -1;
+
+ for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
+ add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
+ return 0;
+}
+
+static int tprot(unsigned long addr)
+{
+ unsigned long pgm_addr;
+ int rc = -EFAULT;
+ psw_t old = S390_lowcore.program_new_psw;
+
+ S390_lowcore.program_new_psw.mask = __extract_psw();
+ asm volatile(
+ " larl %[pgm_addr],1f\n"
+ " stg %[pgm_addr],%[psw_pgm_addr]\n"
+ " tprot 0(%[addr]),0\n"
+ " ipm %[rc]\n"
+ " srl %[rc],28\n"
+ "1:\n"
+ : [pgm_addr] "=&d"(pgm_addr),
+ [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
+ [rc] "+&d"(rc)
+ : [addr] "a"(addr)
+ : "cc", "memory");
+ S390_lowcore.program_new_psw = old;
+ return rc;
+}
+
+static void search_mem_end(void)
+{
+ unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
+ unsigned long offset = 0;
+ unsigned long pivot;
+
+ while (range > 1) {
+ range >>= 1;
+ pivot = offset + range;
+ if (!tprot(pivot << 20))
+ offset = pivot;
+ }
+
+ add_mem_detect_block(0, (offset + 1) << 20);
+}
+
+void detect_memory(void)
+{
+ sclp_early_get_memsize(&max_physmem_end);
+
+ if (!sclp_early_read_storage_info()) {
+ mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
+ return;
+ }
+
+ if (!diag260()) {
+ mem_detect.info_source = MEM_DETECT_DIAG260;
+ return;
+ }
+
+ if (max_physmem_end) {
+ add_mem_detect_block(0, max_physmem_end);
+ mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
+ return;
+ }
+
+ search_mem_end();
+ mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
+ max_physmem_end = get_mem_detect_end();
+}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
new file mode 100644
index 000000000000..4d441317cdeb
--- /dev/null
+++ b/arch/s390/boot/startup.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/string.h>
+#include <asm/setup.h>
+#include <asm/sclp.h>
+#include "compressed/decompressor.h"
+#include "boot.h"
+
+extern char __boot_data_start[], __boot_data_end[];
+
+void error(char *x)
+{
+ sclp_early_printk("\n\n");
+ sclp_early_printk(x);
+ sclp_early_printk("\n\n -- System halted");
+
+ disabled_wait(0xdeadbeef);
+}
+
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
+unsigned long mem_safe_offset(void)
+{
+ return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
+}
+#endif
+
+static void rescue_initrd(void)
+{
+ unsigned long min_initrd_addr;
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
+ return;
+ if (!INITRD_START || !INITRD_SIZE)
+ return;
+ min_initrd_addr = mem_safe_offset();
+ if (min_initrd_addr <= INITRD_START)
+ return;
+ memmove((void *)min_initrd_addr, (void *)INITRD_START, INITRD_SIZE);
+ INITRD_START = min_initrd_addr;
+}
+
+static void copy_bootdata(void)
+{
+ if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
+ error(".boot.data section size mismatch");
+ memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
+}
+
+void startup_kernel(void)
+{
+ void *img;
+
+ rescue_initrd();
+ sclp_early_read_info();
+ store_ipl_parmblock();
+ setup_boot_command_line();
+ setup_memory_end();
+ detect_memory();
+ if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
+ img = decompress_kernel();
+ memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
+ }
+ copy_bootdata();
+ vmlinux.entry();
+}
diff --git a/arch/s390/boot/string.c b/arch/s390/boot/string.c
new file mode 100644
index 000000000000..25aca07898ba
--- /dev/null
+++ b/arch/s390/boot/string.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include "../lib/string.c"
+
+int strncmp(const char *cs, const char *ct, size_t count)
+{
+ unsigned char c1, c2;
+
+ while (count) {
+ c1 = *cs++;
+ c2 = *ct++;
+ if (c1 != c2)
+ return c1 < c2 ? -1 : 1;
+ if (!c1)
+ break;
+ count--;
+ }
+ return 0;
+}
+
+char *skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
+
+char *strim(char *s)
+{
+ size_t size;
+ char *end;
+
+ size = strlen(s);
+ if (!size)
+ return s;
+
+ end = s + size - 1;
+ while (end >= s && isspace(*end))
+ end--;
+ *(end + 1) = '\0';
+
+ return skip_spaces(s);
+}
+
+/* Works only for digits and letters, but small and fast */
+#define TOLOWER(x) ((x) | 0x20)
+
+static unsigned int simple_guess_base(const char *cp)
+{
+ if (cp[0] == '0') {
+ if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2]))
+ return 16;
+ else
+ return 8;
+ } else {
+ return 10;
+ }
+}
+
+/**
+ * simple_strtoull - convert a string to an unsigned long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+
+unsigned long long simple_strtoull(const char *cp, char **endp,
+ unsigned int base)
+{
+ unsigned long long result = 0;
+
+ if (!base)
+ base = simple_guess_base(cp);
+
+ if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
+ cp += 2;
+
+ while (isxdigit(*cp)) {
+ unsigned int value;
+
+ value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
+ if (value >= base)
+ break;
+ result = result * base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *)cp;
+
+ return result;
+}
+
+long simple_strtol(const char *cp, char **endp, unsigned int base)
+{
+ if (*cp == '-')
+ return -simple_strtoull(cp + 1, endp, base);
+
+ return simple_strtoull(cp, endp, base);
+}
+
+int kstrtobool(const char *s, bool *res)
+{
+ if (!s)
+ return -EINVAL;
+
+ switch (s[0]) {
+ case 'y':
+ case 'Y':
+ case '1':
+ *res = true;
+ return 0;
+ case 'n':
+ case 'N':
+ case '0':
+ *res = false;
+ return 0;
+ case 'o':
+ case 'O':
+ switch (s[1]) {
+ case 'n':
+ case 'N':
+ *res = true;
+ return 0;
+ case 'f':
+ case 'F':
+ *res = false;
+ return 0;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 941d8cc6c9f5..259d1698ac50 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -668,7 +668,6 @@ CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index eb6f75f24208..37fd60c20e22 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -610,7 +610,6 @@ CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index c54cb26eb7f5..812d9498d97b 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -44,7 +44,7 @@ struct s390_aes_ctx {
int key_len;
unsigned long fc;
union {
- struct crypto_skcipher *blk;
+ struct crypto_sync_skcipher *blk;
struct crypto_cipher *cip;
} fallback;
};
@@ -54,7 +54,7 @@ struct s390_xts_ctx {
u8 pcc_key[32];
int key_len;
unsigned long fc;
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
struct gcm_sg_walk {
@@ -184,14 +184,15 @@ static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned int ret;
- crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
+ crypto_sync_skcipher_clear_flags(sctx->fallback.blk,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len);
+ ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) &
+ tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) &
CRYPTO_TFM_RES_MASK;
return ret;
@@ -204,9 +205,9 @@ static int fallback_blk_dec(struct blkcipher_desc *desc,
unsigned int ret;
struct crypto_blkcipher *tfm = desc->tfm;
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
- SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
- skcipher_request_set_tfm(req, sctx->fallback.blk);
+ skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
@@ -223,9 +224,9 @@ static int fallback_blk_enc(struct blkcipher_desc *desc,
unsigned int ret;
struct crypto_blkcipher *tfm = desc->tfm;
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
- SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
- skcipher_request_set_tfm(req, sctx->fallback.blk);
+ skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
@@ -306,8 +307,7 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
const char *name = tfm->__crt_alg->cra_name;
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- sctx->fallback.blk = crypto_alloc_skcipher(name, 0,
- CRYPTO_ALG_ASYNC |
+ sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(sctx->fallback.blk)) {
@@ -323,7 +323,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(sctx->fallback.blk);
+ crypto_free_sync_skcipher(sctx->fallback.blk);
}
static struct crypto_alg ecb_aes_alg = {
@@ -453,14 +453,15 @@ static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
unsigned int ret;
- crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
+ crypto_sync_skcipher_clear_flags(xts_ctx->fallback,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
+ ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) &
+ tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) &
CRYPTO_TFM_RES_MASK;
return ret;
@@ -472,10 +473,10 @@ static int xts_fallback_decrypt(struct blkcipher_desc *desc,
{
struct crypto_blkcipher *tfm = desc->tfm;
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
- SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
unsigned int ret;
- skcipher_request_set_tfm(req, xts_ctx->fallback);
+ skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
@@ -491,10 +492,10 @@ static int xts_fallback_encrypt(struct blkcipher_desc *desc,
{
struct crypto_blkcipher *tfm = desc->tfm;
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
- SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
unsigned int ret;
- skcipher_request_set_tfm(req, xts_ctx->fallback);
+ skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
@@ -611,8 +612,7 @@ static int xts_fallback_init(struct crypto_tfm *tfm)
const char *name = tfm->__crt_alg->cra_name;
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
- xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
- CRYPTO_ALG_ASYNC |
+ xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(xts_ctx->fallback)) {
@@ -627,7 +627,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
{
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(xts_ctx->fallback);
+ crypto_free_sync_skcipher(xts_ctx->fallback);
}
static struct crypto_alg xts_aes_alg = {
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index ab9a0ebecc19..e8d9fa54569c 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -30,26 +30,31 @@ static DEFINE_SPINLOCK(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
+struct key_blob {
+ __u8 key[MAXKEYBLOBSIZE];
+ unsigned int keylen;
+};
+
struct s390_paes_ctx {
- struct pkey_seckey sk;
+ struct key_blob kb;
struct pkey_protkey pk;
unsigned long fc;
};
struct s390_pxts_ctx {
- struct pkey_seckey sk[2];
+ struct key_blob kb[2];
struct pkey_protkey pk[2];
unsigned long fc;
};
-static inline int __paes_convert_key(struct pkey_seckey *sk,
+static inline int __paes_convert_key(struct key_blob *kb,
struct pkey_protkey *pk)
{
int i, ret;
/* try three times in case of failure */
for (i = 0; i < 3; i++) {
- ret = pkey_skey2pkey(sk, pk);
+ ret = pkey_keyblob2pkey(kb->key, kb->keylen, pk);
if (ret == 0)
break;
}
@@ -61,7 +66,7 @@ static int __paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
- if (__paes_convert_key(&ctx->sk, &ctx->pk))
+ if (__paes_convert_key(&ctx->kb, &ctx->pk))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
@@ -80,10 +85,8 @@ static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (key_len != SECKEYBLOBSIZE)
- return -EINVAL;
-
- memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
+ memcpy(ctx->kb.key, in_key, key_len);
+ ctx->kb.keylen = key_len;
if (__paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
@@ -147,8 +150,8 @@ static struct crypto_alg ecb_paes_alg = {
.cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
- .min_keysize = SECKEYBLOBSIZE,
- .max_keysize = SECKEYBLOBSIZE,
+ .min_keysize = MINKEYBLOBSIZE,
+ .max_keysize = MAXKEYBLOBSIZE,
.setkey = ecb_paes_set_key,
.encrypt = ecb_paes_encrypt,
.decrypt = ecb_paes_decrypt,
@@ -160,7 +163,7 @@ static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
- if (__paes_convert_key(&ctx->sk, &ctx->pk))
+ if (__paes_convert_key(&ctx->kb, &ctx->pk))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
@@ -179,7 +182,8 @@ static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
- memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
+ memcpy(ctx->kb.key, in_key, key_len);
+ ctx->kb.keylen = key_len;
if (__cbc_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
@@ -250,8 +254,8 @@ static struct crypto_alg cbc_paes_alg = {
.cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
- .min_keysize = SECKEYBLOBSIZE,
- .max_keysize = SECKEYBLOBSIZE,
+ .min_keysize = MINKEYBLOBSIZE,
+ .max_keysize = MAXKEYBLOBSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = cbc_paes_set_key,
.encrypt = cbc_paes_encrypt,
@@ -264,8 +268,8 @@ static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
{
unsigned long fc;
- if (__paes_convert_key(&ctx->sk[0], &ctx->pk[0]) ||
- __paes_convert_key(&ctx->sk[1], &ctx->pk[1]))
+ if (__paes_convert_key(&ctx->kb[0], &ctx->pk[0]) ||
+ __paes_convert_key(&ctx->kb[1], &ctx->pk[1]))
return -EINVAL;
if (ctx->pk[0].type != ctx->pk[1].type)
@@ -287,10 +291,16 @@ static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
u8 ckey[2 * AES_MAX_KEY_SIZE];
- unsigned int ckey_len;
+ unsigned int ckey_len, keytok_len;
+
+ if (key_len % 2)
+ return -EINVAL;
- memcpy(ctx->sk[0].seckey, in_key, SECKEYBLOBSIZE);
- memcpy(ctx->sk[1].seckey, in_key + SECKEYBLOBSIZE, SECKEYBLOBSIZE);
+ keytok_len = key_len / 2;
+ memcpy(ctx->kb[0].key, in_key, keytok_len);
+ ctx->kb[0].keylen = keytok_len;
+ memcpy(ctx->kb[1].key, in_key + keytok_len, keytok_len);
+ ctx->kb[1].keylen = keytok_len;
if (__xts_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
@@ -386,8 +396,8 @@ static struct crypto_alg xts_paes_alg = {
.cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
- .min_keysize = 2 * SECKEYBLOBSIZE,
- .max_keysize = 2 * SECKEYBLOBSIZE,
+ .min_keysize = 2 * MINKEYBLOBSIZE,
+ .max_keysize = 2 * MAXKEYBLOBSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = xts_paes_set_key,
.encrypt = xts_paes_encrypt,
@@ -400,7 +410,7 @@ static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
- if (__paes_convert_key(&ctx->sk, &ctx->pk))
+ if (__paes_convert_key(&ctx->kb, &ctx->pk))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
@@ -420,7 +430,8 @@ static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
{
struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
- memcpy(ctx->sk.seckey, in_key, key_len);
+ memcpy(ctx->kb.key, in_key, key_len);
+ ctx->kb.keylen = key_len;
if (__ctr_paes_set_key(ctx)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
@@ -532,8 +543,8 @@ static struct crypto_alg ctr_paes_alg = {
.cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list),
.cra_u = {
.blkcipher = {
- .min_keysize = SECKEYBLOBSIZE,
- .max_keysize = SECKEYBLOBSIZE,
+ .min_keysize = MINKEYBLOBSIZE,
+ .max_keysize = MAXKEYBLOBSIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ctr_paes_set_key,
.encrypt = ctr_paes_encrypt,
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index f40600eb1762..7cb6a52f727d 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -221,7 +221,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
@@ -232,6 +231,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m
+CONFIG_ZCRYPT_MULTIDEVNODES=y
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c
index 5d85a039391c..601b70786dc8 100644
--- a/arch/s390/hypfs/hypfs_sprp.c
+++ b/arch/s390/hypfs/hypfs_sprp.c
@@ -68,40 +68,44 @@ static int hypfs_sprp_create(void **data_ptr, void **free_ptr, size_t *size)
static int __hypfs_sprp_ioctl(void __user *user_area)
{
- struct hypfs_diag304 diag304;
+ struct hypfs_diag304 *diag304;
unsigned long cmd;
void __user *udata;
void *data;
int rc;
- if (copy_from_user(&diag304, user_area, sizeof(diag304)))
- return -EFAULT;
- if ((diag304.args[0] >> 8) != 0 || diag304.args[1] > DIAG304_CMD_MAX)
- return -EINVAL;
-
+ rc = -ENOMEM;
data = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!data)
- return -ENOMEM;
-
- udata = (void __user *)(unsigned long) diag304.data;
- if (diag304.args[1] == DIAG304_SET_WEIGHTS ||
- diag304.args[1] == DIAG304_SET_CAPPING)
- if (copy_from_user(data, udata, PAGE_SIZE)) {
- rc = -EFAULT;
+ diag304 = kzalloc(sizeof(*diag304), GFP_KERNEL);
+ if (!data || !diag304)
+ goto out;
+
+ rc = -EFAULT;
+ if (copy_from_user(diag304, user_area, sizeof(*diag304)))
+ goto out;
+ rc = -EINVAL;
+ if ((diag304->args[0] >> 8) != 0 || diag304->args[1] > DIAG304_CMD_MAX)
+ goto out;
+
+ rc = -EFAULT;
+ udata = (void __user *)(unsigned long) diag304->data;
+ if (diag304->args[1] == DIAG304_SET_WEIGHTS ||
+ diag304->args[1] == DIAG304_SET_CAPPING)
+ if (copy_from_user(data, udata, PAGE_SIZE))
goto out;
- }
- cmd = *(unsigned long *) &diag304.args[0];
- diag304.rc = hypfs_sprp_diag304(data, cmd);
+ cmd = *(unsigned long *) &diag304->args[0];
+ diag304->rc = hypfs_sprp_diag304(data, cmd);
- if (diag304.args[1] == DIAG304_QUERY_PRP)
+ if (diag304->args[1] == DIAG304_QUERY_PRP)
if (copy_to_user(udata, data, PAGE_SIZE)) {
rc = -EFAULT;
goto out;
}
- rc = copy_to_user(user_area, &diag304, sizeof(diag304)) ? -EFAULT : 0;
+ rc = copy_to_user(user_area, diag304, sizeof(*diag304)) ? -EFAULT : 0;
out:
+ kfree(diag304);
free_page((unsigned long) data);
return rc;
}
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index 4afbb5938726..c5bd9f4437e5 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -40,26 +40,27 @@ struct appldata_product_id {
u16 mod_lvl; /* modification level */
} __attribute__ ((packed));
-static inline int appldata_asm(struct appldata_product_id *id,
+
+static inline int appldata_asm(struct appldata_parameter_list *parm_list,
+ struct appldata_product_id *id,
unsigned short fn, void *buffer,
unsigned short length)
{
- struct appldata_parameter_list parm_list;
int ry;
if (!MACHINE_IS_VM)
return -EOPNOTSUPP;
- parm_list.diag = 0xdc;
- parm_list.function = fn;
- parm_list.parlist_length = sizeof(parm_list);
- parm_list.buffer_length = length;
- parm_list.product_id_addr = (unsigned long) id;
- parm_list.buffer_addr = virt_to_phys(buffer);
+ parm_list->diag = 0xdc;
+ parm_list->function = fn;
+ parm_list->parlist_length = sizeof(*parm_list);
+ parm_list->buffer_length = length;
+ parm_list->product_id_addr = (unsigned long) id;
+ parm_list->buffer_addr = virt_to_phys(buffer);
diag_stat_inc(DIAG_STAT_X0DC);
asm volatile(
" diag %1,%0,0xdc"
: "=d" (ry)
- : "d" (&parm_list), "m" (parm_list), "m" (*id)
+ : "d" (parm_list), "m" (*parm_list), "m" (*id)
: "cc");
return ry;
}
diff --git a/arch/s390/include/asm/boot_data.h b/arch/s390/include/asm/boot_data.h
new file mode 100644
index 000000000000..2d999ccb977a
--- /dev/null
+++ b/arch/s390/include/asm/boot_data.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_BOOT_DATA_H
+
+#include <asm/setup.h>
+#include <asm/ipl.h>
+
+extern char early_command_line[COMMAND_LINE_SIZE];
+extern struct ipl_parameter_block early_ipl_block;
+extern int early_ipl_block_valid;
+
+#endif /* _ASM_S390_BOOT_DATA_H */
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index 860cab7479c3..7293c139dd79 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -64,6 +64,8 @@ extern int ccwgroup_driver_register (struct ccwgroup_driver *cdriver);
extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver);
int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
int num_devices, const char *buf);
+struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
+ char *bus_id);
extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 97db2fba546a..63b46e30b2c3 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -9,6 +9,8 @@
#include <linux/sched/task_stack.h>
#include <linux/thread_info.h>
+#include <asm-generic/compat.h>
+
#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p( \
typeof(0?(__force t)0:0ULL), u64))
@@ -51,34 +53,18 @@
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s32 compat_key_t;
-typedef s32 compat_timer_t;
-
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 compat_s64;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
typedef u64 compat_u64;
-typedef u32 compat_uptr_t;
typedef struct {
u32 mask;
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 99c8ce30b3cd..e78cda94456b 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -64,11 +64,10 @@ static inline int test_facility(unsigned long nr)
* @stfle_fac_list: array where facility list can be stored
* @size: size of passed in array in double words
*/
-static inline void stfle(u64 *stfle_fac_list, int size)
+static inline void __stfle(u64 *stfle_fac_list, int size)
{
unsigned long nr;
- preempt_disable();
asm volatile(
" stfl 0(0)\n"
: "=m" (S390_lowcore.stfl_fac_list));
@@ -85,6 +84,12 @@ static inline void stfle(u64 *stfle_fac_list, int size)
nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
}
memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
+}
+
+static inline void stfle(u64 *stfle_fac_list, int size)
+{
+ preempt_disable();
+ __stfle(stfle_fac_list, size);
preempt_enable();
}
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index ae5135704616..a8389e2d2f03 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -89,8 +89,8 @@ void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
extern void s390_reset_system(void);
extern void ipl_store_parameters(void);
-extern size_t append_ipl_vmparm(char *, size_t);
-extern size_t append_ipl_scpdata(char *, size_t);
+extern size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
+ const struct ipl_parameter_block *ipb);
enum ipl_type {
IPL_TYPE_UNKNOWN = 1,
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 40f651292aa7..e2d3e6c43395 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -14,41 +14,33 @@
* We use a brcl 0,2 instruction for jump labels at compile time so it
* can be easily distinguished from a hotpatch generated instruction.
*/
-static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+static inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
- ".pushsection __jump_table, \"aw\"\n"
- ".balign 8\n"
- ".quad 0b, %l[label], %0\n"
- ".popsection\n"
- : : "X" (&((char *)key)[branch]) : : label);
-
+ asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
+ ".pushsection __jump_table,\"aw\"\n"
+ ".balign 8\n"
+ ".long 0b-.,%l[label]-.\n"
+ ".quad %0-.\n"
+ ".popsection\n"
+ : : "X" (&((char *)key)[branch]) : : label);
return false;
label:
return true;
}
-static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+static inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("0: brcl 15, %l[label]\n"
- ".pushsection __jump_table, \"aw\"\n"
- ".balign 8\n"
- ".quad 0b, %l[label], %0\n"
- ".popsection\n"
- : : "X" (&((char *)key)[branch]) : : label);
-
+ asm_volatile_goto("0: brcl 15,%l[label]\n"
+ ".pushsection __jump_table,\"aw\"\n"
+ ".balign 8\n"
+ ".long 0b-.,%l[label]-.\n"
+ ".quad %0-.\n"
+ ".popsection\n"
+ : : "X" (&((char *)key)[branch]) : : label);
return false;
label:
return true;
}
-typedef unsigned long jump_label_t;
-
-struct jump_entry {
- jump_label_t code;
- jump_label_t target;
- jump_label_t key;
-};
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
new file mode 100644
index 000000000000..70930fe5c496
--- /dev/null
+++ b/arch/s390/include/asm/kasan.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_KASAN
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#ifdef CONFIG_KASAN_S390_4_LEVEL_PAGING
+#define KASAN_SHADOW_SIZE \
+ (_AC(1, UL) << (_REGION1_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
+#else
+#define KASAN_SHADOW_SIZE \
+ (_AC(1, UL) << (_REGION2_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
+#endif
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+#define KASAN_SHADOW_START KASAN_SHADOW_OFFSET
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+
+extern void kasan_early_init(void);
+extern void kasan_copy_shadow(pgd_t *dst);
+extern void kasan_free_early_identity(void);
+#else
+static inline void kasan_early_init(void) { }
+static inline void kasan_copy_shadow(pgd_t *dst) { }
+static inline void kasan_free_early_identity(void) { }
+#endif
+
+#endif
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 29c940bf8506..d5d24889c3bc 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -44,6 +44,7 @@
#define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2)
#define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3)
#define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4)
+#define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5)
#define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f
@@ -186,6 +187,7 @@ struct kvm_s390_sie_block {
#define ECA_AIV 0x00200000
#define ECA_VX 0x00020000
#define ECA_PROTEXCI 0x00002000
+#define ECA_APIE 0x00000008
#define ECA_SII 0x00000001
__u32 eca; /* 0x004c */
#define ICPT_INST 0x04
@@ -237,7 +239,11 @@ struct kvm_s390_sie_block {
psw_t gpsw; /* 0x0090 */
__u64 gg14; /* 0x00a0 */
__u64 gg15; /* 0x00a8 */
- __u8 reservedb0[20]; /* 0x00b0 */
+ __u8 reservedb0[8]; /* 0x00b0 */
+#define HPID_KVM 0x4
+#define HPID_VSIE 0x5
+ __u8 hpid; /* 0x00b8 */
+ __u8 reservedb9[11]; /* 0x00b9 */
__u16 extcpuaddr; /* 0x00c4 */
__u16 eic; /* 0x00c6 */
__u32 reservedc8; /* 0x00c8 */
@@ -255,6 +261,8 @@ struct kvm_s390_sie_block {
__u8 reservede4[4]; /* 0x00e4 */
__u64 tecmc; /* 0x00e8 */
__u8 reservedf0[12]; /* 0x00f0 */
+#define CRYCB_FORMAT_MASK 0x00000003
+#define CRYCB_FORMAT0 0x00000000
#define CRYCB_FORMAT1 0x00000001
#define CRYCB_FORMAT2 0x00000003
__u32 crycbd; /* 0x00fc */
@@ -715,6 +723,7 @@ struct kvm_s390_crypto {
__u32 crycbd;
__u8 aes_kw;
__u8 dea_kw;
+ __u8 apie;
};
#define APCB0_MASK_SIZE 1
@@ -855,6 +864,10 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
+void kvm_arch_crypto_clear_masks(struct kvm *kvm);
+void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
+ unsigned long *aqm, unsigned long *adm);
+
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
extern char sie_exit;
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 406d940173ab..cc0947e08b6f 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -102,9 +102,9 @@ struct lowcore {
__u64 current_task; /* 0x0338 */
__u64 kernel_stack; /* 0x0340 */
- /* Interrupt, panic and restart stack. */
+ /* Interrupt, DAT-off and restartstack. */
__u64 async_stack; /* 0x0348 */
- __u64 panic_stack; /* 0x0350 */
+ __u64 nodat_stack; /* 0x0350 */
__u64 restart_stack; /* 0x0358 */
/* Restart function and parameter. */
diff --git a/arch/s390/include/asm/mem_detect.h b/arch/s390/include/asm/mem_detect.h
new file mode 100644
index 000000000000..6114b92ab667
--- /dev/null
+++ b/arch/s390/include/asm/mem_detect.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_MEM_DETECT_H
+#define _ASM_S390_MEM_DETECT_H
+
+#include <linux/types.h>
+
+enum mem_info_source {
+ MEM_DETECT_NONE = 0,
+ MEM_DETECT_SCLP_STOR_INFO,
+ MEM_DETECT_DIAG260,
+ MEM_DETECT_SCLP_READ_INFO,
+ MEM_DETECT_BIN_SEARCH
+};
+
+struct mem_detect_block {
+ u64 start;
+ u64 end;
+};
+
+/*
+ * Storage element id is defined as 1 byte (up to 256 storage elements).
+ * In practise only storage element id 0 and 1 are used).
+ * According to architecture one storage element could have as much as
+ * 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
+ * If more mem_detect_blocks are required, a block of memory from already
+ * known mem_detect_block is taken (entries_extended points to it).
+ */
+#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
+
+struct mem_detect_info {
+ u32 count;
+ u8 info_source;
+ struct mem_detect_block entries[MEM_INLINED_ENTRIES];
+ struct mem_detect_block *entries_extended;
+};
+extern struct mem_detect_info mem_detect;
+
+void add_mem_detect_block(u64 start, u64 end);
+
+static inline int __get_mem_detect_block(u32 n, unsigned long *start,
+ unsigned long *end)
+{
+ if (n >= mem_detect.count) {
+ *start = 0;
+ *end = 0;
+ return -1;
+ }
+
+ if (n < MEM_INLINED_ENTRIES) {
+ *start = (unsigned long)mem_detect.entries[n].start;
+ *end = (unsigned long)mem_detect.entries[n].end;
+ } else {
+ *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
+ *end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
+ }
+ return 0;
+}
+
+/**
+ * for_each_mem_detect_block - early online memory range iterator
+ * @i: an integer used as loop variable
+ * @p_start: ptr to unsigned long for start address of the range
+ * @p_end: ptr to unsigned long for end address of the range
+ *
+ * Walks over detected online memory ranges.
+ */
+#define for_each_mem_detect_block(i, p_start, p_end) \
+ for (i = 0, __get_mem_detect_block(i, p_start, p_end); \
+ i < mem_detect.count; \
+ i++, __get_mem_detect_block(i, p_start, p_end))
+
+static inline void get_mem_detect_reserved(unsigned long *start,
+ unsigned long *size)
+{
+ *start = (unsigned long)mem_detect.entries_extended;
+ if (mem_detect.count > MEM_INLINED_ENTRIES)
+ *size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
+ else
+ *size = 0;
+}
+
+#endif
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index a8418e1379eb..bcfb6371086f 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -32,6 +32,8 @@ typedef struct {
unsigned int uses_cmm:1;
/* The gmaps associated with this context are allowed to use huge pages. */
unsigned int allow_gmap_hpage_1m:1;
+ /* The mmu context is for compat task */
+ unsigned int compat_mm:1;
} mm_context_t;
#define INIT_MM_CONTEXT(name) \
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 0717ee76885d..dbd689d556ce 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -25,6 +25,7 @@ static inline int init_new_context(struct task_struct *tsk,
atomic_set(&mm->context.flush_count, 0);
mm->context.gmap_asce = 0;
mm->context.flush_mm = 0;
+ mm->context.compat_mm = 0;
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste ||
test_thread_flag(TIF_PGSTE) ||
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 41e3908b397f..a4d38092530a 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -161,6 +161,7 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+#define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 0e7cb0dc9c33..411d435e7a7d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -341,6 +341,8 @@ static inline int is_module_addr(void *addr)
#define PTRS_PER_P4D _CRST_ENTRIES
#define PTRS_PER_PGD _CRST_ENTRIES
+#define MAX_PTRS_PER_P4D PTRS_PER_P4D
+
/*
* Segment table and region3 table entry encoding
* (R = read-only, I = invalid, y = young bit):
@@ -466,6 +468,12 @@ static inline int is_module_addr(void *addr)
_SEGMENT_ENTRY_YOUNG | \
_SEGMENT_ENTRY_PROTECT | \
_SEGMENT_ENTRY_NOEXEC)
+#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
+ _SEGMENT_ENTRY_LARGE | \
+ _SEGMENT_ENTRY_READ | \
+ _SEGMENT_ENTRY_WRITE | \
+ _SEGMENT_ENTRY_YOUNG | \
+ _SEGMENT_ENTRY_DIRTY)
/*
* Region3 entry (large page) protection definitions.
@@ -599,6 +607,14 @@ static inline int pgd_bad(pgd_t pgd)
return (pgd_val(pgd) & mask) != 0;
}
+static inline unsigned long pgd_pfn(pgd_t pgd)
+{
+ unsigned long origin_mask;
+
+ origin_mask = _REGION_ENTRY_ORIGIN;
+ return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
+}
+
static inline int p4d_folded(p4d_t p4d)
{
return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
@@ -1171,6 +1187,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
@@ -1210,7 +1227,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
#define pud_page(pud) pfn_to_page(pud_pfn(pud))
-#define p4d_page(pud) pfn_to_page(p4d_pfn(p4d))
+#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
+#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
/* Find an entry in the lowest level page table.. */
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h
index 053117ba7328..9b6e79077866 100644
--- a/arch/s390/include/asm/pkey.h
+++ b/arch/s390/include/asm/pkey.h
@@ -109,4 +109,30 @@ int pkey_verifykey(const struct pkey_seckey *seckey,
u16 *pcardnr, u16 *pdomain,
u16 *pkeysize, u32 *pattributes);
+/*
+ * In-kernel API: Generate (AES) random protected key.
+ * @param keytype one of the PKEY_KEYTYPE values
+ * @param protkey pointer to buffer receiving the protected key
+ * @return 0 on success, negative errno value on failure
+ */
+int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey);
+
+/*
+ * In-kernel API: Verify an (AES) protected key.
+ * @param protkey pointer to buffer containing the protected key to verify
+ * @return 0 on success, negative errno value on failure. In case the protected
+ * key is not valid -EKEYREJECTED is returned
+ */
+int pkey_verifyprotkey(const struct pkey_protkey *protkey);
+
+/*
+ * In-kernel API: Transform an key blob (of any type) into a protected key.
+ * @param key pointer to a buffer containing the key blob
+ * @param keylen size of the key blob in bytes
+ * @param protkey pointer to buffer receiving the protected key
+ * @return 0 on success, negative errno value on failure
+ */
+int pkey_keyblob2pkey(const __u8 *key, __u32 keylen,
+ struct pkey_protkey *protkey);
+
#endif /* _KAPI_PKEY_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 7f2953c15c37..34768e6ef4fb 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -242,7 +242,7 @@ static inline unsigned long current_stack_pointer(void)
return sp;
}
-static inline unsigned short stap(void)
+static __no_sanitize_address_or_inline unsigned short stap(void)
{
unsigned short cpu_address;
@@ -250,6 +250,55 @@ static inline unsigned short stap(void)
return cpu_address;
}
+#define CALL_ARGS_0() \
+ register unsigned long r2 asm("2")
+#define CALL_ARGS_1(arg1) \
+ register unsigned long r2 asm("2") = (unsigned long)(arg1)
+#define CALL_ARGS_2(arg1, arg2) \
+ CALL_ARGS_1(arg1); \
+ register unsigned long r3 asm("3") = (unsigned long)(arg2)
+#define CALL_ARGS_3(arg1, arg2, arg3) \
+ CALL_ARGS_2(arg1, arg2); \
+ register unsigned long r4 asm("4") = (unsigned long)(arg3)
+#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
+ CALL_ARGS_3(arg1, arg2, arg3); \
+ register unsigned long r4 asm("5") = (unsigned long)(arg4)
+#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
+ CALL_ARGS_4(arg1, arg2, arg3, arg4); \
+ register unsigned long r4 asm("6") = (unsigned long)(arg5)
+
+#define CALL_FMT_0
+#define CALL_FMT_1 CALL_FMT_0, "0" (r2)
+#define CALL_FMT_2 CALL_FMT_1, "d" (r3)
+#define CALL_FMT_3 CALL_FMT_2, "d" (r4)
+#define CALL_FMT_4 CALL_FMT_3, "d" (r5)
+#define CALL_FMT_5 CALL_FMT_4, "d" (r6)
+
+#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
+#define CALL_CLOBBER_4 CALL_CLOBBER_5
+#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
+#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
+#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
+#define CALL_CLOBBER_0 CALL_CLOBBER_1
+
+#define CALL_ON_STACK(fn, stack, nr, args...) \
+({ \
+ CALL_ARGS_##nr(args); \
+ unsigned long prev; \
+ \
+ asm volatile( \
+ " la %[_prev],0(15)\n" \
+ " la 15,0(%[_stack])\n" \
+ " stg %[_prev],%[_bc](15)\n" \
+ " brasl 14,%[_fn]\n" \
+ " la 15,0(%[_prev])\n" \
+ : "+&d" (r2), [_prev] "=&a" (prev) \
+ : [_stack] "a" (stack), \
+ [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
+ [_fn] "X" (fn) CALL_FMT_##nr : CALL_CLOBBER_##nr); \
+ r2; \
+})
+
/*
* Give up the time slice of the virtual PU.
*/
@@ -287,7 +336,7 @@ static inline void __load_psw(psw_t psw)
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
-static inline void __load_psw_mask(unsigned long mask)
+static __no_sanitize_address_or_inline void __load_psw_mask(unsigned long mask)
{
unsigned long addr;
psw_t psw;
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 9c9970a5dfb1..d46edde7e458 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -252,13 +252,11 @@ struct slsb {
* (for communication with upper layer programs)
* (only required for use with completion queues)
* @flags: flags indicating state of buffer
- * @aob: pointer to QAOB used for the particular SBAL
* @user: pointer to upper layer program's state information related to SBAL
* (stored in user1 data of QAOB)
*/
struct qdio_outbuf_state {
u8 flags;
- struct qaob *aob;
void *user;
};
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 3cae9168f63c..0cd4bda85eb1 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -95,6 +95,7 @@ extern struct sclp_info sclp;
struct zpci_report_error_header {
u8 version; /* Interface version byte */
u8 action; /* Action qualifier byte
+ * 0: Adapter Reset Request
* 1: Deconfigure and repair action requested
* (OpenCrypto Problem Call Home)
* 2: Informational Report
@@ -104,12 +105,17 @@ struct zpci_report_error_header {
u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
} __packed;
+int sclp_early_read_info(void);
+int sclp_early_read_storage_info(void);
int sclp_early_get_core_info(struct sclp_core_info *info);
void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
void sclp_early_detect(void);
void sclp_early_printk(const char *s);
-void __sclp_early_printk(const char *s, unsigned int len);
+void sclp_early_printk_force(const char *s);
+void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
+int sclp_early_get_memsize(unsigned long *mem);
+int sclp_early_get_hsa_size(unsigned long *hsa_size);
int _sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core);
int sclp_core_deconfigure(u8 core);
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 724faede8ac5..7afe4620685c 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,4 +4,16 @@
#include <asm-generic/sections.h>
+/*
+ * .boot.data section contains variables "shared" between the decompressor and
+ * the decompressed kernel. The decompressor will store values in them, and
+ * copy over to the decompressed image before starting it.
+ *
+ * Each variable end up in its own intermediate section .boot.data.<var name>,
+ * those sections are later sorted by alignment + name and merged together into
+ * final .boot.data section, which should be identical in the decompressor and
+ * the decompressed kernel (that is checked during the build).
+ */
+#define __bootdata(var) __section(.boot.data.var) var
+
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 1d66016f4170..efda97804aa4 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -65,12 +65,11 @@
#define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET))
#define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET))
+extern int noexec_disabled;
extern int memory_end_set;
extern unsigned long memory_end;
extern unsigned long max_physmem_end;
-extern void detect_memory_memblock(void);
-
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 50f26fc9acb2..116cc15a4b8a 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -53,6 +53,27 @@ char *strstr(const char *s1, const char *s2);
#undef __HAVE_ARCH_STRSEP
#undef __HAVE_ARCH_STRSPN
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+extern void *__memcpy(void *dest, const void *src, size_t n);
+extern void *__memset(void *s, int c, size_t n);
+extern void *__memmove(void *dest, const void *src, size_t n);
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */
+
void *__memset16(uint16_t *s, uint16_t v, size_t count);
void *__memset32(uint32_t *s, uint32_t v, size_t count);
void *__memset64(uint64_t *s, uint64_t v, size_t count);
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 3c883c368eb0..27248f42a03c 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -11,19 +11,24 @@
#include <linux/const.h>
/*
- * Size of kernel stack for each process
+ * General size of kernel stacks
*/
+#ifdef CONFIG_KASAN
+#define THREAD_SIZE_ORDER 3
+#else
#define THREAD_SIZE_ORDER 2
-#define ASYNC_ORDER 2
-
+#endif
+#define BOOT_STACK_ORDER 2
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
-#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
#ifndef __ASSEMBLY__
#include <asm/lowcore.h>
#include <asm/page.h>
#include <asm/processor.h>
+#define STACK_INIT_OFFSET \
+ (THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs))
+
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index fd79c0d35dc4..a1fbf15d53aa 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -15,6 +15,7 @@
#define __IGNORE_pkey_alloc
#define __IGNORE_pkey_free
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
@@ -25,7 +26,6 @@
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLD_MMAP
@@ -34,6 +34,7 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
# ifdef CONFIG_COMPAT
# define __ARCH_WANT_COMPAT_SYS_TIME
+# define __ARCH_WANT_SYS_UTIME32
# endif
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
diff --git a/arch/s390/include/asm/vmlinux.lds.h b/arch/s390/include/asm/vmlinux.lds.h
new file mode 100644
index 000000000000..2d127f900352
--- /dev/null
+++ b/arch/s390/include/asm/vmlinux.lds.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/page.h>
+
+/*
+ * .boot.data section is shared between the decompressor code and the
+ * decompressed kernel. The decompressor will store values in it, and copy
+ * over to the decompressed image before starting it.
+ *
+ * .boot.data variables are kept in separate .boot.data.<var name> sections,
+ * which are sorted by alignment first, then by name before being merged
+ * into single .boot.data section. This way big holes cased by page aligned
+ * structs are avoided and linker produces consistent result.
+ */
+#define BOOT_DATA \
+ . = ALIGN(PAGE_SIZE); \
+ .boot.data : { \
+ __boot_data_start = .; \
+ *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.boot.data*))) \
+ __boot_data_end = .; \
+ }
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index e364873e0d10..dc38a90cf091 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -18,3 +18,4 @@ generic-y += shmbuf.h
generic-y += sockios.h
generic-y += swab.h
generic-y += termbits.h
+generic-y += siginfo.h \ No newline at end of file
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 9a50f02b9894..16511d97e8dc 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -160,6 +160,8 @@ struct kvm_s390_vm_cpu_subfunc {
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2
#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3
+#define KVM_S390_VM_CRYPTO_ENABLE_APIE 4
+#define KVM_S390_VM_CRYPTO_DISABLE_APIE 5
/* kvm attributes for migration mode */
#define KVM_S390_VM_MIGRATION_STOP 0
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
index 6f84a53c3270..c0e86ce4a00b 100644
--- a/arch/s390/include/uapi/asm/pkey.h
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -21,9 +21,13 @@
#define PKEY_IOCTL_MAGIC 'p'
#define SECKEYBLOBSIZE 64 /* secure key blob size is always 64 bytes */
+#define PROTKEYBLOBSIZE 80 /* protected key blob size is always 80 bytes */
#define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */
#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
+#define MINKEYBLOBSIZE SECKEYBLOBSIZE /* Minimum size of a key blob */
+#define MAXKEYBLOBSIZE PROTKEYBLOBSIZE /* Maximum size of a key blob */
+
/* defines for the type field within the pkey_protkey struct */
#define PKEY_KEYTYPE_AES_128 1
#define PKEY_KEYTYPE_AES_192 2
@@ -129,4 +133,34 @@ struct pkey_verifykey {
#define PKEY_VERIFY_ATTR_AES 0x00000001 /* key is an AES key */
#define PKEY_VERIFY_ATTR_OLD_MKVP 0x00000100 /* key has old MKVP value */
+/*
+ * Generate (AES) random protected key.
+ */
+struct pkey_genprotk {
+ __u32 keytype; /* in: key type to generate */
+ struct pkey_protkey protkey; /* out: the protected key */
+};
+
+#define PKEY_GENPROTK _IOWR(PKEY_IOCTL_MAGIC, 0x08, struct pkey_genprotk)
+
+/*
+ * Verify an (AES) protected key.
+ */
+struct pkey_verifyprotk {
+ struct pkey_protkey protkey; /* in: the protected key to verify */
+};
+
+#define PKEY_VERIFYPROTK _IOW(PKEY_IOCTL_MAGIC, 0x09, struct pkey_verifyprotk)
+
+/*
+ * Transform an key blob (of any type) into a protected key
+ */
+struct pkey_kblob2pkey {
+ __u8 __user *key; /* in: the key blob */
+ __u32 keylen; /* in: the key blob length */
+ struct pkey_protkey protkey; /* out: the protected key */
+};
+
+#define PKEY_KBLOB2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x0A, struct pkey_kblob2pkey)
+
#endif /* _UAPI_PKEY_H */
diff --git a/arch/s390/include/uapi/asm/siginfo.h b/arch/s390/include/uapi/asm/siginfo.h
deleted file mode 100644
index 6984820f2f1c..000000000000
--- a/arch/s390/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * S390 version
- *
- * Derived from "include/asm-i386/siginfo.h"
- */
-
-#ifndef _S390_SIGINFO_H
-#define _S390_SIGINFO_H
-
-#ifdef __s390x__
-#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-#endif
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index 2bb1f3bb98ac..42c81a95e97b 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -2,9 +2,9 @@
/*
* include/asm-s390/zcrypt.h
*
- * zcrypt 2.1.0 (user-visible header)
+ * zcrypt 2.2.1 (user-visible header)
*
- * Copyright IBM Corp. 2001, 2006
+ * Copyright IBM Corp. 2001, 2018
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -15,12 +15,15 @@
#define __ASM_S390_ZCRYPT_H
#define ZCRYPT_VERSION 2
-#define ZCRYPT_RELEASE 1
+#define ZCRYPT_RELEASE 2
#define ZCRYPT_VARIANT 1
#include <linux/ioctl.h>
#include <linux/compiler.h>
+/* Name of the zcrypt device driver. */
+#define ZCRYPT_NAME "zcrypt"
+
/**
* struct ica_rsa_modexpo
*
@@ -310,6 +313,16 @@ struct zcrypt_device_matrix_ext {
#define ZCRYPT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x5a, int[MAX_ZDEV_CARDIDS_EXT])
/*
+ * Support for multiple zcrypt device nodes.
+ */
+
+/* Nr of minor device node numbers to allocate. */
+#define ZCRYPT_MAX_MINOR_NODES 256
+
+/* Max amount of possible ioctls */
+#define MAX_ZDEV_IOCTLS (1 << _IOC_NRBITS)
+
+/*
* Only deprecated defines, structs and ioctls below this line.
*/
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index dbfd1730e631..386b1abb217b 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -23,6 +23,10 @@ KCOV_INSTRUMENT_early_nobss.o := n
UBSAN_SANITIZE_early.o := n
UBSAN_SANITIZE_early_nobss.o := n
+KASAN_SANITIZE_early_nobss.o := n
+KASAN_SANITIZE_ipl.o := n
+KASAN_SANITIZE_machine_kexec.o := n
+
#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#
@@ -47,7 +51,7 @@ obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
-obj-y += nospec-branch.o
+obj-y += nospec-branch.o ipl_vmparm.o
extra-y += head64.o vmlinux.lds
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 66e830f1c7bf..164bec175628 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -159,7 +159,7 @@ int main(void)
OFFSET(__LC_CURRENT, lowcore, current_task);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
- OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
+ OFFSET(__LC_NODAT_STACK, lowcore, nodat_stack);
OFFSET(__LC_RESTART_STACK, lowcore, restart_stack);
OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index b65874b0b412..f268fca67e82 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -18,7 +18,7 @@
ENTRY(s390_base_mcck_handler)
basr %r13,0
-0: lg %r15,__LC_PANIC_STACK # load panic stack
+0: lg %r15,__LC_NODAT_STACK # load panic stack
aghi %r15,-STACK_FRAME_OVERHEAD
larl %r1,s390_base_mcck_handler_fn
lg %r9,0(%r1)
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 5b23c4f6e50c..cb7f55bbe06e 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -30,7 +30,7 @@
* The stack trace can start at any of the three stacks and can potentially
* touch all of them. The order is: panic stack, async stack, sync stack.
*/
-static unsigned long
+static unsigned long __no_sanitize_address
__dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
unsigned long low, unsigned long high)
{
@@ -77,11 +77,11 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
#ifdef CONFIG_CHECK_STACK
sp = __dump_trace(func, data, sp,
- S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
- S390_lowcore.panic_stack + frame_size);
+ S390_lowcore.nodat_stack + frame_size - THREAD_SIZE,
+ S390_lowcore.nodat_stack + frame_size);
#endif
sp = __dump_trace(func, data, sp,
- S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+ S390_lowcore.async_stack + frame_size - THREAD_SIZE,
S390_lowcore.async_stack + frame_size);
task = task ?: current;
__dump_trace(func, data, sp,
@@ -124,7 +124,7 @@ void show_registers(struct pt_regs *regs)
char *mode;
mode = user_mode(regs) ? "User" : "Krnl";
- printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
+ printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
if (!user_mode(regs))
pr_cont(" (%pSR)", (void *)regs->psw.addr);
pr_cont("\n");
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 5b28b434f8a1..af5c2b3f7065 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -29,10 +29,9 @@
#include <asm/cpcmd.h>
#include <asm/sclp.h>
#include <asm/facility.h>
+#include <asm/boot_data.h>
#include "entry.h"
-static void __init setup_boot_command_line(void);
-
/*
* Initialize storage key for kernel pages
*/
@@ -284,51 +283,11 @@ static int __init cad_setup(char *str)
}
early_param("cad", cad_setup);
-/* Set up boot command line */
-static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
-{
- char *parm, *delim;
- size_t rc, len;
-
- len = strlen(boot_command_line);
-
- delim = boot_command_line + len; /* '\0' character position */
- parm = boot_command_line + len + 1; /* append right after '\0' */
-
- rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
- if (rc) {
- if (*parm == '=')
- memmove(boot_command_line, parm + 1, rc);
- else
- *delim = ' '; /* replace '\0' with space */
- }
-}
-
-static inline int has_ebcdic_char(const char *str)
-{
- int i;
-
- for (i = 0; str[i]; i++)
- if (str[i] & 0x80)
- return 1;
- return 0;
-}
-
+char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
static void __init setup_boot_command_line(void)
{
- COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
- /* convert arch command line to ascii if necessary */
- if (has_ebcdic_char(COMMAND_LINE))
- EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
/* copy arch command line */
- strlcpy(boot_command_line, strstrip(COMMAND_LINE),
- ARCH_COMMAND_LINE_SIZE);
-
- /* append IPL PARM data to the boot command line */
- if (MACHINE_IS_VM)
- append_to_cmdline(append_ipl_vmparm);
-
- append_to_cmdline(append_ipl_scpdata);
+ strlcpy(boot_command_line, early_command_line, ARCH_COMMAND_LINE_SIZE);
}
static void __init check_image_bootable(void)
diff --git a/arch/s390/kernel/early_nobss.c b/arch/s390/kernel/early_nobss.c
index 2d84fc48df3a..8d73f7fae16e 100644
--- a/arch/s390/kernel/early_nobss.c
+++ b/arch/s390/kernel/early_nobss.c
@@ -13,8 +13,8 @@
#include <linux/string.h>
#include <asm/sections.h>
#include <asm/lowcore.h>
-#include <asm/setup.h>
#include <asm/timex.h>
+#include <asm/kasan.h>
#include "entry.h"
static void __init reset_tod_clock(void)
@@ -32,26 +32,6 @@ static void __init reset_tod_clock(void)
S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
}
-static void __init rescue_initrd(void)
-{
- unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
-
- /*
- * Just like in case of IPL from VM reader we make sure there is a
- * gap of 4MB between end of kernel and start of initrd.
- * That way we can also be sure that saving an NSS will succeed,
- * which however only requires different segments.
- */
- if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
- return;
- if (!INITRD_START || !INITRD_SIZE)
- return;
- if (INITRD_START >= min_initrd_addr)
- return;
- memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
- INITRD_START = min_initrd_addr;
-}
-
static void __init clear_bss_section(void)
{
memset(__bss_start, 0, __bss_stop - __bss_start);
@@ -60,6 +40,6 @@ static void __init clear_bss_section(void)
void __init startup_init_nobss(void)
{
reset_tod_clock();
- rescue_initrd();
clear_bss_section();
+ kasan_early_init();
}
diff --git a/arch/s390/kernel/early_printk.c b/arch/s390/kernel/early_printk.c
index 9431784d7796..40c1dfec944e 100644
--- a/arch/s390/kernel/early_printk.c
+++ b/arch/s390/kernel/early_printk.c
@@ -10,7 +10,7 @@
static void sclp_early_write(struct console *con, const char *s, unsigned int len)
{
- __sclp_early_printk(s, len);
+ __sclp_early_printk(s, len, 0);
}
static struct console sclp_early_console = {
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 150130c897c3..724fba4d09d2 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -85,14 +85,34 @@ _LPP_OFFSET = __LC_LPP
#endif
.endm
- .macro CHECK_STACK stacksize,savearea
+ .macro CHECK_STACK savearea
#ifdef CONFIG_CHECK_STACK
- tml %r15,\stacksize - CONFIG_STACK_GUARD
+ tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
lghi %r14,\savearea
jz stack_overflow
#endif
.endm
+ .macro CHECK_VMAP_STACK savearea,oklabel
+#ifdef CONFIG_VMAP_STACK
+ lgr %r14,%r15
+ nill %r14,0x10000 - STACK_SIZE
+ oill %r14,STACK_INIT
+ clg %r14,__LC_KERNEL_STACK
+ je \oklabel
+ clg %r14,__LC_ASYNC_STACK
+ je \oklabel
+ clg %r14,__LC_NODAT_STACK
+ je \oklabel
+ clg %r14,__LC_RESTART_STACK
+ je \oklabel
+ lghi %r14,\savearea
+ j stack_overflow
+#else
+ j \oklabel
+#endif
+ .endm
+
.macro SWITCH_ASYNC savearea,timer
tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
@@ -104,11 +124,11 @@ _LPP_OFFSET = __LC_LPP
brasl %r14,cleanup_critical
tmhh %r8,0x0001 # retest problem state after cleanup
jnz 1f
-0: lg %r14,__LC_ASYNC_STACK # are we already on the async stack?
+0: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
slgr %r14,%r15
srag %r14,%r14,STACK_SHIFT
jnz 2f
- CHECK_STACK 1<<STACK_SHIFT,\savearea
+ CHECK_STACK \savearea
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 3f
1: UPDATE_VTIME %r14,%r15,\timer
@@ -600,9 +620,10 @@ ENTRY(pgm_check_handler)
jnz 1f # -> enabled, can't be a double fault
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz .Lpgm_svcper # -> single stepped svc
-1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+1: CHECK_STACK __LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j 4f
+ # CHECK_VMAP_STACK branches to stack_overflow or 4f
+ CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
lg %r15,__LC_KERNEL_STACK
@@ -1136,7 +1157,8 @@ ENTRY(mcck_int_handler)
jnz 4f
TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
jno .Lmcck_panic
-4: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
+4: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
+ SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
.Lmcck_skip:
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
@@ -1163,7 +1185,6 @@ ENTRY(mcck_int_handler)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1
- ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
jno .Lmcck_return
TRACE_IRQS_OFF
@@ -1182,7 +1203,7 @@ ENTRY(mcck_int_handler)
lpswe __LC_RETURN_MCCK_PSW
.Lmcck_panic:
- lg %r15,__LC_PANIC_STACK
+ lg %r15,__LC_NODAT_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15)
j .Lmcck_skip
@@ -1193,12 +1214,10 @@ ENTRY(restart_int_handler)
ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
stg %r15,__LC_SAVE_AREA_RESTART
lg %r15,__LC_RESTART_STACK
- aghi %r15,-__PT_SIZE # create pt_regs on stack
- xc 0(__PT_SIZE,%r15),0(%r15)
- stmg %r0,%r14,__PT_R0(%r15)
- mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
- mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
- aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
+ xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
+ stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
+ mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
+ mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
lg %r2,__LC_RESTART_DATA
@@ -1216,14 +1235,14 @@ ENTRY(restart_int_handler)
.section .kprobes.text, "ax"
-#ifdef CONFIG_CHECK_STACK
+#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
/*
* The synchronous or the asynchronous stack overflowed. We are dead.
* No need to properly save the registers, we are going to panic anyway.
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
stack_overflow:
- lg %r15,__LC_PANIC_STACK # change to panic stack
+ lg %r15,__LC_NODAT_STACK # change to panic stack
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
stmg %r8,%r9,__PT_PSW(%r11)
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 472fa2f1a4a5..c3816ae108b0 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -86,4 +86,7 @@ DECLARE_PER_CPU(u64, mt_cycles[8]);
void gs_load_bc_cb(struct pt_regs *regs);
void set_fs_fixup(void);
+unsigned long stack_alloc(void);
+void stack_free(unsigned long stack);
+
#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 6d14ad42ba88..57bba24b1c27 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -14,6 +14,7 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
+#include <asm/ptrace.h>
__HEAD
ENTRY(startup_continue)
@@ -35,10 +36,7 @@ ENTRY(startup_continue)
#
larl %r14,init_task
stg %r14,__LC_CURRENT
- larl %r15,init_thread_union
- aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE
- stg %r15,__LC_KERNEL_STACK # set end of kernel stack
- aghi %r15,-160
+ larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD
#
# Early setup functions that may not rely on an initialized bss section,
# like moving the initrd. Returns with an initialized bss section.
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 4296d7e61fb6..18a5d6317acc 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -29,6 +29,8 @@
#include <asm/checksum.h>
#include <asm/debug.h>
#include <asm/os_info.h>
+#include <asm/sections.h>
+#include <asm/boot_data.h>
#include "entry.h"
#define IPL_PARM_BLOCK_VERSION 0
@@ -117,6 +119,9 @@ static char *dump_type_str(enum dump_type type)
}
}
+struct ipl_parameter_block __bootdata(early_ipl_block);
+int __bootdata(early_ipl_block_valid);
+
static int ipl_block_valid;
static struct ipl_parameter_block ipl_block;
@@ -151,6 +156,8 @@ static inline int __diag308(unsigned long subcode, void *addr)
int diag308(unsigned long subcode, void *addr)
{
+ if (IS_ENABLED(CONFIG_KASAN))
+ __arch_local_irq_stosm(0x04); /* enable DAT */
diag_stat_inc(DIAG_STAT_X308);
return __diag308(subcode, addr);
}
@@ -262,115 +269,16 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
-/* VM IPL PARM routines */
-static size_t reipl_get_ascii_vmparm(char *dest, size_t size,
- const struct ipl_parameter_block *ipb)
-{
- int i;
- size_t len;
- char has_lowercase = 0;
-
- len = 0;
- if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
- (ipb->ipl_info.ccw.vm_parm_len > 0)) {
-
- len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
- memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
- /* If at least one character is lowercase, we assume mixed
- * case; otherwise we convert everything to lowercase.
- */
- for (i = 0; i < len; i++)
- if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
- (dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
- (dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
- has_lowercase = 1;
- break;
- }
- if (!has_lowercase)
- EBC_TOLOWER(dest, len);
- EBCASC(dest, len);
- }
- dest[len] = 0;
-
- return len;
-}
-
-size_t append_ipl_vmparm(char *dest, size_t size)
-{
- size_t rc;
-
- rc = 0;
- if (ipl_block_valid && ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)
- rc = reipl_get_ascii_vmparm(dest, size, &ipl_block);
- else
- dest[0] = 0;
- return rc;
-}
-
static ssize_t ipl_vm_parm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char parm[DIAG308_VMPARM_SIZE + 1] = {};
- append_ipl_vmparm(parm, sizeof(parm));
+ if (ipl_block_valid && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
+ ipl_block_get_ascii_vmparm(parm, sizeof(parm), &ipl_block);
return sprintf(page, "%s\n", parm);
}
-static size_t scpdata_length(const char* buf, size_t count)
-{
- while (count) {
- if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
- break;
- count--;
- }
- return count;
-}
-
-static size_t reipl_append_ascii_scpdata(char *dest, size_t size,
- const struct ipl_parameter_block *ipb)
-{
- size_t count;
- size_t i;
- int has_lowercase;
-
- count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
- ipb->ipl_info.fcp.scp_data_len));
- if (!count)
- goto out;
-
- has_lowercase = 0;
- for (i = 0; i < count; i++) {
- if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
- count = 0;
- goto out;
- }
- if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
- has_lowercase = 1;
- }
-
- if (has_lowercase)
- memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
- else
- for (i = 0; i < count; i++)
- dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
-out:
- dest[count] = '\0';
- return count;
-}
-
-size_t append_ipl_scpdata(char *dest, size_t len)
-{
- size_t rc;
-
- rc = 0;
- if (ipl_block_valid && ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP)
- rc = reipl_append_ascii_scpdata(dest, len, &ipl_block);
- else
- dest[0] = 0;
- return rc;
-}
-
-
static struct kobj_attribute sys_ipl_vm_parm_attr =
__ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
@@ -564,7 +472,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
{
char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
- reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
+ ipl_block_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
return sprintf(page, "%s\n", vmparm);
}
@@ -1769,11 +1677,10 @@ void __init setup_ipl(void)
void __init ipl_store_parameters(void)
{
- int rc;
-
- rc = diag308(DIAG308_STORE, &ipl_block);
- if (rc == DIAG308_RC_OK && ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
+ if (early_ipl_block_valid) {
+ memcpy(&ipl_block, &early_ipl_block, sizeof(ipl_block));
ipl_block_valid = 1;
+ }
}
void s390_reset_system(void)
diff --git a/arch/s390/kernel/ipl_vmparm.c b/arch/s390/kernel/ipl_vmparm.c
new file mode 100644
index 000000000000..411838c0a0af
--- /dev/null
+++ b/arch/s390/kernel/ipl_vmparm.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <asm/ebcdic.h>
+#include <asm/ipl.h>
+
+/* VM IPL PARM routines */
+size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
+ const struct ipl_parameter_block *ipb)
+{
+ int i;
+ size_t len;
+ char has_lowercase = 0;
+
+ len = 0;
+ if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
+ (ipb->ipl_info.ccw.vm_parm_len > 0)) {
+
+ len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
+ memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
+ /* If at least one character is lowercase, we assume mixed
+ * case; otherwise we convert everything to lowercase.
+ */
+ for (i = 0; i < len; i++)
+ if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
+ (dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
+ (dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
+ has_lowercase = 1;
+ break;
+ }
+ if (!has_lowercase)
+ EBC_TOLOWER(dest, len);
+ EBCASC(dest, len);
+ }
+ dest[len] = 0;
+
+ return len;
+}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 3d17c41074ca..0e8d68bac82c 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -172,15 +172,7 @@ void do_softirq_own_stack(void)
/* Check against async. stack address range. */
new = S390_lowcore.async_stack;
if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
- /* Need to switch to the async. stack. */
- new -= STACK_FRAME_OVERHEAD;
- ((struct stack_frame *) new)->back_chain = old;
- asm volatile(" la 15,0(%0)\n"
- " brasl 14,__do_softirq\n"
- " la 15,0(%1)\n"
- : : "a" (new), "a" (old)
- : "0", "1", "2", "3", "4", "5", "14",
- "cc", "memory" );
+ CALL_ON_STACK(__do_softirq, new, 0);
} else {
/* We are already on the async stack. */
__do_softirq();
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index 43f8430fb67d..50a1798604a8 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -33,13 +33,13 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
{
/* brcl 15,offset */
insn->opcode = 0xc0f4;
- insn->offset = (entry->target - entry->code) >> 1;
+ insn->offset = (jump_entry_target(entry) - jump_entry_code(entry)) >> 1;
}
static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
struct insn *new)
{
- unsigned char *ipc = (unsigned char *)entry->code;
+ unsigned char *ipc = (unsigned char *)jump_entry_code(entry);
unsigned char *ipe = (unsigned char *)expected;
unsigned char *ipn = (unsigned char *)new;
@@ -59,6 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
int init)
{
+ void *code = (void *)jump_entry_code(entry);
struct insn old, new;
if (type == JUMP_LABEL_JMP) {
@@ -69,13 +70,13 @@ static void __jump_label_transform(struct jump_entry *entry,
jump_label_make_nop(entry, &new);
}
if (init) {
- if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
+ if (memcmp(code, &orignop, sizeof(orignop)))
jump_label_bug(entry, &orignop, &new);
} else {
- if (memcmp((void *)entry->code, &old, sizeof(old)))
+ if (memcmp(code, &old, sizeof(old)))
jump_label_bug(entry, &old, &new);
}
- s390_kernel_write((void *)entry->code, &new, sizeof(new));
+ s390_kernel_write(code, &new, sizeof(new));
}
static int __sm_arch_jump_label_transform(void *data)
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b7020e721ae3..cb582649aba6 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -142,18 +142,27 @@ static noinline void __machine_kdump(void *image)
}
#endif
+static unsigned long do_start_kdump(unsigned long addr)
+{
+ struct kimage *image = (struct kimage *) addr;
+ int (*start_kdump)(int) = (void *)image->start;
+ int rc;
+
+ __arch_local_irq_stnsm(0xfb); /* disable DAT */
+ rc = start_kdump(0);
+ __arch_local_irq_stosm(0x04); /* enable DAT */
+ return rc;
+}
+
/*
* Check if kdump checksums are valid: We call purgatory with parameter "0"
*/
static bool kdump_csum_valid(struct kimage *image)
{
#ifdef CONFIG_CRASH_DUMP
- int (*start_kdump)(int) = (void *)image->start;
int rc;
- __arch_local_irq_stnsm(0xfb); /* disable DAT */
- rc = start_kdump(0);
- __arch_local_irq_stosm(0x04); /* enable DAT */
+ rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image);
return rc == 0;
#else
return false;
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index d298d3cb46d0..31889db609e9 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/kasan.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
#include <asm/alternative.h>
@@ -32,12 +33,18 @@
void *module_alloc(unsigned long size)
{
+ void *p;
+
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC,
- 0, NUMA_NO_NODE,
- __builtin_return_address(0));
+ p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ if (p && (kasan_module_alloc(p, size) < 0)) {
+ vfree(p);
+ return NULL;
+ }
+ return p;
}
void module_arch_freeing_init(struct module *mod)
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 5c53e977be62..7bf604ff50a1 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -2045,14 +2045,17 @@ static int __init init_cpum_sampling_pmu(void)
}
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
- if (!sfdbg)
+ if (!sfdbg) {
pr_err("Registering for s390dbf failed\n");
+ return -ENOMEM;
+ }
debug_register_view(sfdbg, &debug_sprintf_view);
err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
if (err) {
pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
+ debug_unregister(sfdbg);
goto out;
}
@@ -2061,6 +2064,7 @@ static int __init init_cpum_sampling_pmu(void)
pr_cpumsf_err(RS_INIT_FAILURE_PERF);
unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
+ debug_unregister(sfdbg);
goto out;
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index c637c12f9e37..a2e952b66248 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -49,6 +49,7 @@
#include <linux/crash_dump.h>
#include <linux/memory.h>
#include <linux/compat.h>
+#include <linux/start_kernel.h>
#include <asm/ipl.h>
#include <asm/facility.h>
@@ -69,6 +70,7 @@
#include <asm/numa.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
+#include <asm/mem_detect.h>
#include "entry.h"
/*
@@ -88,9 +90,11 @@ char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0;
-int __initdata memory_end_set;
-unsigned long __initdata memory_end;
-unsigned long __initdata max_physmem_end;
+int __bootdata(noexec_disabled);
+int __bootdata(memory_end_set);
+unsigned long __bootdata(memory_end);
+unsigned long __bootdata(max_physmem_end);
+struct mem_detect_info __bootdata(mem_detect);
unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);
@@ -283,15 +287,6 @@ void machine_power_off(void)
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL_GPL(pm_power_off);
-static int __init early_parse_mem(char *p)
-{
- memory_end = memparse(p, &p);
- memory_end &= PAGE_MASK;
- memory_end_set = 1;
- return 0;
-}
-early_param("mem", early_parse_mem);
-
static int __init parse_vmalloc(char *arg)
{
if (!arg)
@@ -303,6 +298,78 @@ early_param("vmalloc", parse_vmalloc);
void *restart_stack __section(.data);
+unsigned long stack_alloc(void)
+{
+#ifdef CONFIG_VMAP_STACK
+ return (unsigned long)
+ __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
+ VMALLOC_START, VMALLOC_END,
+ THREADINFO_GFP,
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+#else
+ return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+#endif
+}
+
+void stack_free(unsigned long stack)
+{
+#ifdef CONFIG_VMAP_STACK
+ vfree((void *) stack);
+#else
+ free_pages(stack, THREAD_SIZE_ORDER);
+#endif
+}
+
+int __init arch_early_irq_init(void)
+{
+ unsigned long stack;
+
+ stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+ if (!stack)
+ panic("Couldn't allocate async stack");
+ S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
+ return 0;
+}
+
+static int __init async_stack_realloc(void)
+{
+ unsigned long old, new;
+
+ old = S390_lowcore.async_stack - STACK_INIT_OFFSET;
+ new = stack_alloc();
+ if (!new)
+ panic("Couldn't allocate async stack");
+ S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
+ free_pages(old, THREAD_SIZE_ORDER);
+ return 0;
+}
+early_initcall(async_stack_realloc);
+
+void __init arch_call_rest_init(void)
+{
+ struct stack_frame *frame;
+ unsigned long stack;
+
+ stack = stack_alloc();
+ if (!stack)
+ panic("Couldn't allocate kernel stack");
+ current->stack = (void *) stack;
+#ifdef CONFIG_VMAP_STACK
+ current->stack_vm_area = (void *) stack;
+#endif
+ set_task_stack_end_magic(current);
+ stack += STACK_INIT_OFFSET;
+ S390_lowcore.kernel_stack = stack;
+ frame = (struct stack_frame *) stack;
+ memset(frame, 0, sizeof(*frame));
+ /* Branch to rest_init on the new stack, never returns */
+ asm volatile(
+ " la 15,0(%[_frame])\n"
+ " jg rest_init\n"
+ : : [_frame] "a" (frame));
+}
+
static void __init setup_lowcore(void)
{
struct lowcore *lc;
@@ -329,14 +396,8 @@ static void __init setup_lowcore(void)
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->io_new_psw.addr = (unsigned long) io_int_handler;
lc->clock_comparator = clock_comparator_max;
- lc->kernel_stack = ((unsigned long) &init_thread_union)
+ lc->nodat_stack = ((unsigned long) &init_thread_union)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
- lc->async_stack = (unsigned long)
- memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
- + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
- lc->panic_stack = (unsigned long)
- memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
- + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
@@ -357,8 +418,12 @@ static void __init setup_lowcore(void)
lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock;
- restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
- restart_stack += ASYNC_SIZE;
+ /*
+ * Allocate the global restart stack which is the same for
+ * all CPUs in cast *one* of them does a PSW restart.
+ */
+ restart_stack = memblock_virt_alloc(THREAD_SIZE, THREAD_SIZE);
+ restart_stack += STACK_INIT_OFFSET;
/*
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
@@ -467,19 +532,26 @@ static void __init setup_memory_end(void)
{
unsigned long vmax, vmalloc_size, tmp;
- /* Choose kernel address space layout: 2, 3, or 4 levels. */
+ /* Choose kernel address space layout: 3 or 4 levels. */
vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
- tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
- tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
- if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
- vmax = _REGION2_SIZE; /* 3-level kernel page table */
- else
- vmax = _REGION1_SIZE; /* 4-level kernel page table */
+ if (IS_ENABLED(CONFIG_KASAN)) {
+ vmax = IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)
+ ? _REGION1_SIZE
+ : _REGION2_SIZE;
+ } else {
+ tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
+ tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
+ if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
+ vmax = _REGION2_SIZE; /* 3-level kernel page table */
+ else
+ vmax = _REGION1_SIZE; /* 4-level kernel page table */
+ }
+
/* module area is at the end of the kernel address space. */
MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
- VMALLOC_START = vmax - vmalloc_size;
+ VMALLOC_START = VMALLOC_END - vmalloc_size;
/* Split remaining virtual space between 1:1 mapping & vmemmap array */
tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
@@ -491,7 +563,12 @@ static void __init setup_memory_end(void)
vmemmap = (struct page *) tmp;
/* Take care that memory_end is set and <= vmemmap */
- memory_end = min(memory_end ?: max_physmem_end, tmp);
+ memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap);
+#ifdef CONFIG_KASAN
+ /* fit in kasan shadow memory region between 1:1 and vmemmap */
+ memory_end = min(memory_end, KASAN_SHADOW_START);
+ vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
+#endif
max_pfn = max_low_pfn = PFN_DOWN(memory_end);
memblock_remove(memory_end, ULONG_MAX);
@@ -532,17 +609,8 @@ static struct notifier_block kdump_mem_nb = {
*/
static void reserve_memory_end(void)
{
-#ifdef CONFIG_CRASH_DUMP
- if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
- !OLDMEM_BASE && sclp.hsa_size) {
- memory_end = sclp.hsa_size;
- memory_end &= PAGE_MASK;
- memory_end_set = 1;
- }
-#endif
- if (!memory_end_set)
- return;
- memblock_reserve(memory_end, ULONG_MAX);
+ if (memory_end_set)
+ memblock_reserve(memory_end, ULONG_MAX);
}
/*
@@ -649,6 +717,62 @@ static void __init reserve_initrd(void)
#endif
}
+static void __init reserve_mem_detect_info(void)
+{
+ unsigned long start, size;
+
+ get_mem_detect_reserved(&start, &size);
+ if (size)
+ memblock_reserve(start, size);
+}
+
+static void __init free_mem_detect_info(void)
+{
+ unsigned long start, size;
+
+ get_mem_detect_reserved(&start, &size);
+ if (size)
+ memblock_free(start, size);
+}
+
+static void __init memblock_physmem_add(phys_addr_t start, phys_addr_t size)
+{
+ memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
+ start, start + size - 1);
+ memblock_add_range(&memblock.memory, start, size, 0, 0);
+ memblock_add_range(&memblock.physmem, start, size, 0, 0);
+}
+
+static const char * __init get_mem_info_source(void)
+{
+ switch (mem_detect.info_source) {
+ case MEM_DETECT_SCLP_STOR_INFO:
+ return "sclp storage info";
+ case MEM_DETECT_DIAG260:
+ return "diag260";
+ case MEM_DETECT_SCLP_READ_INFO:
+ return "sclp read info";
+ case MEM_DETECT_BIN_SEARCH:
+ return "binary search";
+ }
+ return "none";
+}
+
+static void __init memblock_add_mem_detect_info(void)
+{
+ unsigned long start, end;
+ int i;
+
+ memblock_dbg("physmem info source: %s (%hhd)\n",
+ get_mem_info_source(), mem_detect.info_source);
+ /* keep memblock lists close to the kernel */
+ memblock_set_bottom_up(true);
+ for_each_mem_detect_block(i, &start, &end)
+ memblock_physmem_add(start, end - start);
+ memblock_set_bottom_up(false);
+ memblock_dump_all();
+}
+
/*
* Check for initrd being in usable memory
*/
@@ -913,11 +1037,13 @@ void __init setup_arch(char **cmdline_p)
reserve_oldmem();
reserve_kernel();
reserve_initrd();
+ reserve_mem_detect_info();
memblock_allow_resize();
/* Get information about *all* installed memory */
- detect_memory_memblock();
+ memblock_add_mem_detect_info();
+ free_mem_detect_info();
remove_oldmem();
/*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2f8f7d7dd9a8..1b3188f57b58 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -186,36 +186,34 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
pcpu_sigp_retry(pcpu, order, 0);
}
-#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
-#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
-
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
{
- unsigned long async_stack, panic_stack;
+ unsigned long async_stack, nodat_stack;
struct lowcore *lc;
if (pcpu != &pcpu_devices[0]) {
pcpu->lowcore = (struct lowcore *)
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
- async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
- panic_stack = __get_free_page(GFP_KERNEL);
- if (!pcpu->lowcore || !panic_stack || !async_stack)
+ nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+ if (!pcpu->lowcore || !nodat_stack)
goto out;
} else {
- async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
- panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
+ nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
}
+ async_stack = stack_alloc();
+ if (!async_stack)
+ goto out;
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
- lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
- lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
+ lc->async_stack = async_stack + STACK_INIT_OFFSET;
+ lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
if (nmi_alloc_per_cpu(lc))
- goto out;
+ goto out_async;
if (vdso_alloc_per_cpu(lc))
goto out_mcesa;
lowcore_ptr[cpu] = lc;
@@ -224,10 +222,11 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
out_mcesa:
nmi_free_per_cpu(lc);
+out_async:
+ stack_free(async_stack);
out:
if (pcpu != &pcpu_devices[0]) {
- free_page(panic_stack);
- free_pages(async_stack, ASYNC_ORDER);
+ free_pages(nodat_stack, THREAD_SIZE_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
}
return -ENOMEM;
@@ -237,15 +236,21 @@ out:
static void pcpu_free_lowcore(struct pcpu *pcpu)
{
+ unsigned long async_stack, nodat_stack, lowcore;
+
+ nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
+ async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
+ lowcore = (unsigned long) pcpu->lowcore;
+
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
vdso_free_per_cpu(pcpu->lowcore);
nmi_free_per_cpu(pcpu->lowcore);
+ stack_free(async_stack);
if (pcpu == &pcpu_devices[0])
return;
- free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
- free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
- free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
+ free_pages(nodat_stack, THREAD_SIZE_ORDER);
+ free_pages(lowcore, LC_ORDER);
}
#endif /* CONFIG_HOTPLUG_CPU */
@@ -293,7 +298,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
{
struct lowcore *lc = pcpu->lowcore;
- lc->restart_stack = lc->kernel_stack;
+ lc->restart_stack = lc->nodat_stack;
lc->restart_fn = (unsigned long) func;
lc->restart_data = (unsigned long) data;
lc->restart_source = -1UL;
@@ -303,15 +308,21 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
/*
* Call function via PSW restart on pcpu and stop the current cpu.
*/
-static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
- void *data, unsigned long stack)
+static void __pcpu_delegate(void (*func)(void*), void *data)
+{
+ func(data); /* should not return */
+}
+
+static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
+ void (*func)(void *),
+ void *data, unsigned long stack)
{
struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
unsigned long source_cpu = stap();
- __load_psw_mask(PSW_KERNEL_BITS);
+ __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
if (pcpu->address == source_cpu)
- func(data); /* should not return */
+ CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data);
/* Stop target cpu (if func returns this stops the current cpu). */
pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
/* Restart func on the target cpu and stop the current cpu. */
@@ -372,8 +383,7 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
pcpu_delegate(&pcpu_devices[0], func, data,
- pcpu_devices->lowcore->panic_stack -
- PANIC_FRAME_OFFSET + PAGE_SIZE);
+ pcpu_devices->lowcore->nodat_stack);
}
int smp_find_processor_id(u16 address)
@@ -791,37 +801,42 @@ void __init smp_detect_cpus(void)
memblock_free_early((unsigned long)info, sizeof(*info));
}
-/*
- * Activate a secondary processor.
- */
-static void smp_start_secondary(void *cpuvoid)
+static void smp_init_secondary(void)
{
int cpu = smp_processor_id();
S390_lowcore.last_update_clock = get_tod_clock();
- S390_lowcore.restart_stack = (unsigned long) restart_stack;
- S390_lowcore.restart_fn = (unsigned long) do_restart;
- S390_lowcore.restart_data = 0;
- S390_lowcore.restart_source = -1UL;
restore_access_regs(S390_lowcore.access_regs_save_area);
- __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
- __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
cpu_init();
preempt_disable();
init_cpu_timer();
vtime_init();
pfault_init();
- notify_cpu_starting(cpu);
+ notify_cpu_starting(smp_processor_id());
if (topology_cpu_dedicated(cpu))
set_cpu_flag(CIF_DEDICATED_CPU);
else
clear_cpu_flag(CIF_DEDICATED_CPU);
- set_cpu_online(cpu, true);
+ set_cpu_online(smp_processor_id(), true);
inc_irq_stat(CPU_RST);
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
+/*
+ * Activate a secondary processor.
+ */
+static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
+{
+ S390_lowcore.restart_stack = (unsigned long) restart_stack;
+ S390_lowcore.restart_fn = (unsigned long) do_restart;
+ S390_lowcore.restart_data = 0;
+ S390_lowcore.restart_source = -1UL;
+ __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
+ __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
+ CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0);
+}
+
/* Upping and downing of CPUs */
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c
index 0859cde36f75..888cc2f166db 100644
--- a/arch/s390/kernel/sthyi.c
+++ b/arch/s390/kernel/sthyi.c
@@ -183,17 +183,19 @@ static void fill_hdr(struct sthyi_sctns *sctns)
static void fill_stsi_mac(struct sthyi_sctns *sctns,
struct sysinfo_1_1_1 *sysinfo)
{
+ sclp_ocf_cpc_name_copy(sctns->mac.infmname);
+ if (*(u64 *)sctns->mac.infmname != 0)
+ sctns->mac.infmval1 |= MAC_NAME_VLD;
+
if (stsi(sysinfo, 1, 1, 1))
return;
- sclp_ocf_cpc_name_copy(sctns->mac.infmname);
-
memcpy(sctns->mac.infmtype, sysinfo->type, sizeof(sctns->mac.infmtype));
memcpy(sctns->mac.infmmanu, sysinfo->manufacturer, sizeof(sctns->mac.infmmanu));
memcpy(sctns->mac.infmpman, sysinfo->plant, sizeof(sctns->mac.infmpman));
memcpy(sctns->mac.infmseq, sysinfo->sequence, sizeof(sctns->mac.infmseq));
- sctns->mac.infmval1 |= MAC_ID_VLD | MAC_NAME_VLD;
+ sctns->mac.infmval1 |= MAC_ID_VLD;
}
static void fill_stsi_par(struct sthyi_sctns *sctns,
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index a049a7b9d6e8..537f97fde37f 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -29,10 +29,11 @@
.section .text
ENTRY(swsusp_arch_suspend)
- stmg %r6,%r15,__SF_GPRS(%r15)
+ lg %r1,__LC_NODAT_STACK
+ aghi %r1,-STACK_FRAME_OVERHEAD
+ stmg %r6,%r15,__SF_GPRS(%r1)
+ stg %r15,__SF_BACKCHAIN(%r1)
lgr %r1,%r15
- aghi %r15,-STACK_FRAME_OVERHEAD
- stg %r1,__SF_BACKCHAIN(%r15)
/* Store FPU registers */
brasl %r14,save_fpu_regs
@@ -197,13 +198,9 @@ pgm_check_entry:
brc 2,3b /* busy, try again */
/* Suspend CPU not available -> panic */
- larl %r15,init_thread_union
- ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+ larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD
larl %r2,.Lpanic_string
- lghi %r1,0
- sam31
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE
- brasl %r14,sclp_early_printk
+ brasl %r14,sclp_early_printk_force
larl %r3,.Ldisabled_wait_31
lpsw 0(%r3)
4:
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 3031cc6dd0ab..ec31b48a42a5 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -56,7 +56,7 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
- if (is_compat_task()) {
+ if (vma->vm_mm->context.compat_mm) {
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
}
@@ -77,7 +77,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
- if (is_compat_task())
+ if (vma->vm_mm->context.compat_mm)
vdso_pages = vdso32_pages;
#endif
@@ -224,8 +224,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
- if (is_compat_task())
+ if (is_compat_task()) {
vdso_pages = vdso32_pages;
+ mm->context.compat_mm = 1;
+ }
#endif
/*
* vDSO has a problem and was disabled, just don't "enable" it for
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index c5c856f320bc..eb8aebea3ea7 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -28,9 +28,10 @@ obj-y += vdso32_wrapper.o
extra-y += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
-# Disable gcov profiling and ubsan for VDSO code
+# Disable gcov profiling, ubsan and kasan for VDSO code
GCOV_PROFILE := n
UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index a9418bf975db..ada5c11a16e5 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -10,6 +10,7 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
+#include <asm/ptrace.h>
.text
.align 4
@@ -18,8 +19,8 @@
__kernel_clock_gettime:
CFI_STARTPROC
ahi %r15,-16
- CFI_DEF_CFA_OFFSET 176
- CFI_VAL_OFFSET 15, -160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+ CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
basr %r5,0
0: al %r5,21f-0b(%r5) /* get &_vdso_data */
chi %r2,__CLOCK_REALTIME_COARSE
@@ -72,13 +73,13 @@ __kernel_clock_gettime:
st %r1,4(%r3) /* store tp->tv_nsec */
lhi %r2,0
ahi %r15,16
- CFI_DEF_CFA_OFFSET 160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* CLOCK_MONOTONIC_COARSE */
- CFI_DEF_CFA_OFFSET 176
- CFI_VAL_OFFSET 15, -160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+ CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
tml %r4,0x0001 /* pending update ? loop */
jnz 9b
@@ -158,17 +159,17 @@ __kernel_clock_gettime:
st %r1,4(%r3) /* store tp->tv_nsec */
lhi %r2,0
ahi %r15,16
- CFI_DEF_CFA_OFFSET 160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* Fallback to system call */
- CFI_DEF_CFA_OFFSET 176
- CFI_VAL_OFFSET 15, -160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+ CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
19: lhi %r1,__NR_clock_gettime
svc 0
ahi %r15,16
- CFI_DEF_CFA_OFFSET 160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
CFI_ENDPROC
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 3c0db0fa6ad9..b23063fbc892 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -10,6 +10,7 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
+#include <asm/ptrace.h>
.text
.align 4
@@ -19,7 +20,7 @@ __kernel_gettimeofday:
CFI_STARTPROC
ahi %r15,-16
CFI_ADJUST_CFA_OFFSET 16
- CFI_VAL_OFFSET 15, -160
+ CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
basr %r5,0
0: al %r5,13f-0b(%r5) /* get &_vdso_data */
1: ltr %r3,%r3 /* check if tz is NULL */
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 15b1ceafc4c1..a22b2cf86eec 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -28,9 +28,10 @@ obj-y += vdso64_wrapper.o
extra-y += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
-# Disable gcov profiling and ubsan for VDSO code
+# Disable gcov profiling, ubsan and kasan for VDSO code
GCOV_PROFILE := n
UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index fac3ab5ec83a..9d2ee79b90f2 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -10,6 +10,7 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
+#include <asm/ptrace.h>
.text
.align 4
@@ -18,8 +19,8 @@
__kernel_clock_gettime:
CFI_STARTPROC
aghi %r15,-16
- CFI_DEF_CFA_OFFSET 176
- CFI_VAL_OFFSET 15, -160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+ CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
larl %r5,_vdso_data
cghi %r2,__CLOCK_REALTIME_COARSE
je 4f
@@ -56,13 +57,13 @@ __kernel_clock_gettime:
stg %r1,8(%r3) /* store tp->tv_nsec */
lghi %r2,0
aghi %r15,16
- CFI_DEF_CFA_OFFSET 160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* CLOCK_MONOTONIC_COARSE */
- CFI_DEF_CFA_OFFSET 176
- CFI_VAL_OFFSET 15, -160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+ CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
tmll %r4,0x0001 /* pending update ? loop */
jnz 3b
@@ -115,13 +116,13 @@ __kernel_clock_gettime:
stg %r1,8(%r3) /* store tp->tv_nsec */
lghi %r2,0
aghi %r15,16
- CFI_DEF_CFA_OFFSET 160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* CPUCLOCK_VIRT for this thread */
- CFI_DEF_CFA_OFFSET 176
- CFI_VAL_OFFSET 15, -160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+ CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
9: lghi %r4,0
icm %r0,15,__VDSO_ECTG_OK(%r5)
jz 12f
@@ -142,17 +143,17 @@ __kernel_clock_gettime:
stg %r4,8(%r3)
lghi %r2,0
aghi %r15,16
- CFI_DEF_CFA_OFFSET 160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
/* Fallback to system call */
- CFI_DEF_CFA_OFFSET 176
- CFI_VAL_OFFSET 15, -160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
+ CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
12: lghi %r1,__NR_clock_gettime
svc 0
aghi %r15,16
- CFI_DEF_CFA_OFFSET 160
+ CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
br %r14
CFI_ENDPROC
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 6e1f0b421695..aebe10dc7c99 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -10,6 +10,7 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/dwarf.h>
+#include <asm/ptrace.h>
.text
.align 4
@@ -19,7 +20,7 @@ __kernel_gettimeofday:
CFI_STARTPROC
aghi %r15,-16
CFI_ADJUST_CFA_OFFSET 16
- CFI_VAL_OFFSET 15, -160
+ CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
larl %r5,_vdso_data
0: ltgr %r3,%r3 /* check if tz is NULL */
je 1f
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index b43f8d33a369..21eb7407d51b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -16,6 +16,7 @@
#define RO_AFTER_INIT_DATA
#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
@@ -64,6 +65,7 @@ SECTIONS
__start_ro_after_init = .;
.data..ro_after_init : {
*(.data..ro_after_init)
+ JUMP_TABLE_DATA
}
EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE);
@@ -134,6 +136,8 @@ SECTIONS
__nospec_return_end = . ;
}
+ BOOT_DATA
+
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
@@ -146,6 +150,19 @@ SECTIONS
_end = . ;
+ /*
+ * uncompressed image info used by the decompressor
+ * it should match struct vmlinux_info
+ */
+ .vmlinux.info 0 : {
+ QUAD(_stext) /* default_lma */
+ QUAD(startup_continue) /* entry */
+ QUAD(__bss_start - _stext) /* image_size */
+ QUAD(__bss_stop - __bss_start) /* bss_size */
+ QUAD(__boot_data_start) /* bootdata_off */
+ QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */
+ }
+
/* Debugging sections. */
STABS_DEBUG
DWARF_DEBUG
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f69333fd2fa3..fe24150ff666 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -40,6 +40,7 @@
#include <asm/sclp.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
+#include <asm/ap.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -481,7 +482,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_S390_HPAGE_1M:
r = 0;
- if (hpage)
+ if (hpage && !kvm_is_ucontrol(kvm))
r = 1;
break;
case KVM_CAP_S390_MEM_OP:
@@ -691,7 +692,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
mutex_lock(&kvm->lock);
if (kvm->created_vcpus)
r = -EBUSY;
- else if (!hpage || kvm->arch.use_cmma)
+ else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
r = -EINVAL;
else {
r = 0;
@@ -844,20 +845,24 @@ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
kvm_s390_vcpu_block_all(kvm);
- kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_s390_vcpu_crypto_setup(vcpu);
+ /* recreate the shadow crycb by leaving the VSIE handler */
+ kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
+ }
kvm_s390_vcpu_unblock_all(kvm);
}
static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
{
- if (!test_kvm_facility(kvm, 76))
- return -EINVAL;
-
mutex_lock(&kvm->lock);
switch (attr->attr) {
case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
get_random_bytes(
kvm->arch.crypto.crycb->aes_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
@@ -865,6 +870,10 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
break;
case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
get_random_bytes(
kvm->arch.crypto.crycb->dea_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
@@ -872,17 +881,39 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
break;
case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
kvm->arch.crypto.aes_kw = 0;
memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
break;
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
kvm->arch.crypto.dea_kw = 0;
memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
break;
+ case KVM_S390_VM_CRYPTO_ENABLE_APIE:
+ if (!ap_instructions_available()) {
+ mutex_unlock(&kvm->lock);
+ return -EOPNOTSUPP;
+ }
+ kvm->arch.crypto.apie = 1;
+ break;
+ case KVM_S390_VM_CRYPTO_DISABLE_APIE:
+ if (!ap_instructions_available()) {
+ mutex_unlock(&kvm->lock);
+ return -EOPNOTSUPP;
+ }
+ kvm->arch.crypto.apie = 0;
+ break;
default:
mutex_unlock(&kvm->lock);
return -ENXIO;
@@ -1491,6 +1522,10 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
ret = 0;
break;
+ case KVM_S390_VM_CRYPTO_ENABLE_APIE:
+ case KVM_S390_VM_CRYPTO_DISABLE_APIE:
+ ret = ap_instructions_available() ? 0 : -ENXIO;
+ break;
default:
ret = -ENXIO;
break;
@@ -1992,55 +2027,101 @@ long kvm_arch_vm_ioctl(struct file *filp,
return r;
}
-static int kvm_s390_query_ap_config(u8 *config)
-{
- u32 fcn_code = 0x04000000UL;
- u32 cc = 0;
-
- memset(config, 0, 128);
- asm volatile(
- "lgr 0,%1\n"
- "lgr 2,%2\n"
- ".long 0xb2af0000\n" /* PQAP(QCI) */
- "0: ipm %0\n"
- "srl %0,28\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+r" (cc)
- : "r" (fcn_code), "r" (config)
- : "cc", "0", "2", "memory"
- );
-
- return cc;
-}
-
static int kvm_s390_apxa_installed(void)
{
- u8 config[128];
- int cc;
+ struct ap_config_info info;
- if (test_facility(12)) {
- cc = kvm_s390_query_ap_config(config);
-
- if (cc)
- pr_err("PQAP(QCI) failed with cc=%d", cc);
- else
- return config[0] & 0x40;
+ if (ap_instructions_available()) {
+ if (ap_qci(&info) == 0)
+ return info.apxa;
}
return 0;
}
+/*
+ * The format of the crypto control block (CRYCB) is specified in the 3 low
+ * order bits of the CRYCB designation (CRYCBD) field as follows:
+ * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
+ * AP extended addressing (APXA) facility are installed.
+ * Format 1: The APXA facility is not installed but the MSAX3 facility is.
+ * Format 2: Both the APXA and MSAX3 facilities are installed
+ */
static void kvm_s390_set_crycb_format(struct kvm *kvm)
{
kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
+ /* Clear the CRYCB format bits - i.e., set format 0 by default */
+ kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
+
+ /* Check whether MSAX3 is installed */
+ if (!test_kvm_facility(kvm, 76))
+ return;
+
if (kvm_s390_apxa_installed())
kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
else
kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
}
+void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
+ unsigned long *aqm, unsigned long *adm)
+{
+ struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
+
+ mutex_lock(&kvm->lock);
+ kvm_s390_vcpu_block_all(kvm);
+
+ switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
+ case CRYCB_FORMAT2: /* APCB1 use 256 bits */
+ memcpy(crycb->apcb1.apm, apm, 32);
+ VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
+ apm[0], apm[1], apm[2], apm[3]);
+ memcpy(crycb->apcb1.aqm, aqm, 32);
+ VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
+ aqm[0], aqm[1], aqm[2], aqm[3]);
+ memcpy(crycb->apcb1.adm, adm, 32);
+ VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
+ adm[0], adm[1], adm[2], adm[3]);
+ break;
+ case CRYCB_FORMAT1:
+ case CRYCB_FORMAT0: /* Fall through both use APCB0 */
+ memcpy(crycb->apcb0.apm, apm, 8);
+ memcpy(crycb->apcb0.aqm, aqm, 2);
+ memcpy(crycb->apcb0.adm, adm, 2);
+ VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
+ apm[0], *((unsigned short *)aqm),
+ *((unsigned short *)adm));
+ break;
+ default: /* Can not happen */
+ break;
+ }
+
+ /* recreate the shadow crycb for each vcpu */
+ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
+ kvm_s390_vcpu_unblock_all(kvm);
+ mutex_unlock(&kvm->lock);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
+
+void kvm_arch_crypto_clear_masks(struct kvm *kvm)
+{
+ mutex_lock(&kvm->lock);
+ kvm_s390_vcpu_block_all(kvm);
+
+ memset(&kvm->arch.crypto.crycb->apcb0, 0,
+ sizeof(kvm->arch.crypto.crycb->apcb0));
+ memset(&kvm->arch.crypto.crycb->apcb1, 0,
+ sizeof(kvm->arch.crypto.crycb->apcb1));
+
+ VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
+ /* recreate the shadow crycb for each vcpu */
+ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
+ kvm_s390_vcpu_unblock_all(kvm);
+ mutex_unlock(&kvm->lock);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
+
static u64 kvm_s390_get_initial_cpuid(void)
{
struct cpuid cpuid;
@@ -2052,12 +2133,12 @@ static u64 kvm_s390_get_initial_cpuid(void)
static void kvm_s390_crypto_init(struct kvm *kvm)
{
- if (!test_kvm_facility(kvm, 76))
- return;
-
kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
kvm_s390_set_crycb_format(kvm);
+ if (!test_kvm_facility(kvm, 76))
+ return;
+
/* Enable AES/DEA protected key functions by default */
kvm->arch.crypto.aes_kw = 1;
kvm->arch.crypto.dea_kw = 1;
@@ -2583,17 +2664,25 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
- if (!test_kvm_facility(vcpu->kvm, 76))
+ /*
+ * If the AP instructions are not being interpreted and the MSAX3
+ * facility is not configured for the guest, there is nothing to set up.
+ */
+ if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
return;
+ vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
+ vcpu->arch.sie_block->eca &= ~ECA_APIE;
+
+ if (vcpu->kvm->arch.crypto.apie)
+ vcpu->arch.sie_block->eca |= ECA_APIE;
+ /* Set up protected key support */
if (vcpu->kvm->arch.crypto.aes_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_AES;
if (vcpu->kvm->arch.crypto.dea_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
-
- vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
}
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
@@ -2685,6 +2774,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
+ vcpu->arch.sie_block->hpid = HPID_KVM;
+
kvm_s390_vcpu_crypto_setup(vcpu);
return rc;
@@ -2768,18 +2859,25 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
exit_sie(vcpu);
}
+bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
+{
+ return atomic_read(&vcpu->arch.sie_block->prog20) &
+ (PROG_BLOCK_SIE | PROG_REQUEST);
+}
+
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{
atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
}
/*
- * Kick a guest cpu out of SIE and wait until SIE is not running.
+ * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
* If the CPU is not running (e.g. waiting as idle) the function will
* return immediately. */
void exit_sie(struct kvm_vcpu *vcpu)
{
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
+ kvm_s390_vsie_kick(vcpu);
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
cpu_relax();
}
@@ -3196,6 +3294,8 @@ retry:
/* nothing to do, just clear the request */
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ /* we left the vsie handler, nothing to do, just clear the request */
+ kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
return 0;
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 981e3ba97461..1f6e36cdce0d 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -290,6 +290,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
+bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
void exit_sie(struct kvm_vcpu *vcpu);
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a2b28cd1e3fe..a153257bf7d9 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -135,14 +135,148 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
atomic_set(&scb_s->cpuflags, newflags);
return 0;
}
+/* Copy to APCB FORMAT1 from APCB FORMAT0 */
+static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
+ unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h)
+{
+ struct kvm_s390_apcb0 tmp;
-/*
+ if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
+ return -EFAULT;
+
+ apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
+ apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
+ apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
+
+ return 0;
+
+}
+
+/**
+ * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
+ * @vcpu: pointer to the virtual CPU
+ * @apcb_s: pointer to start of apcb in the shadow crycb
+ * @apcb_o: pointer to start of original apcb in the guest2
+ * @apcb_h: pointer to start of apcb in the guest1
+ *
+ * Returns 0 and -EFAULT on error reading guest apcb
+ */
+static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
+ unsigned long apcb_o, unsigned long *apcb_h)
+{
+ if (read_guest_real(vcpu, apcb_o, apcb_s,
+ sizeof(struct kvm_s390_apcb0)))
+ return -EFAULT;
+
+ bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0));
+
+ return 0;
+}
+
+/**
+ * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
+ * @vcpu: pointer to the virtual CPU
+ * @apcb_s: pointer to start of apcb in the shadow crycb
+ * @apcb_o: pointer to start of original guest apcb
+ * @apcb_h: pointer to start of apcb in the host
+ *
+ * Returns 0 and -EFAULT on error reading guest apcb
+ */
+static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
+ unsigned long apcb_o,
+ unsigned long *apcb_h)
+{
+ if (read_guest_real(vcpu, apcb_o, apcb_s,
+ sizeof(struct kvm_s390_apcb1)))
+ return -EFAULT;
+
+ bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1));
+
+ return 0;
+}
+
+/**
+ * setup_apcb - Create a shadow copy of the apcb.
+ * @vcpu: pointer to the virtual CPU
+ * @crycb_s: pointer to shadow crycb
+ * @crycb_o: pointer to original guest crycb
+ * @crycb_h: pointer to the host crycb
+ * @fmt_o: format of the original guest crycb.
+ * @fmt_h: format of the host crycb.
+ *
+ * Checks the compatibility between the guest and host crycb and calls the
+ * appropriate copy function.
+ *
+ * Return 0 or an error number if the guest and host crycb are incompatible.
+ */
+static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
+ const u32 crycb_o,
+ struct kvm_s390_crypto_cb *crycb_h,
+ int fmt_o, int fmt_h)
+{
+ struct kvm_s390_crypto_cb *crycb;
+
+ crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o;
+
+ switch (fmt_o) {
+ case CRYCB_FORMAT2:
+ if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK))
+ return -EACCES;
+ if (fmt_h != CRYCB_FORMAT2)
+ return -EINVAL;
+ return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
+ (unsigned long) &crycb->apcb1,
+ (unsigned long *)&crycb_h->apcb1);
+ case CRYCB_FORMAT1:
+ switch (fmt_h) {
+ case CRYCB_FORMAT2:
+ return setup_apcb10(vcpu, &crycb_s->apcb1,
+ (unsigned long) &crycb->apcb0,
+ &crycb_h->apcb1);
+ case CRYCB_FORMAT1:
+ return setup_apcb00(vcpu,
+ (unsigned long *) &crycb_s->apcb0,
+ (unsigned long) &crycb->apcb0,
+ (unsigned long *) &crycb_h->apcb0);
+ }
+ break;
+ case CRYCB_FORMAT0:
+ if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK))
+ return -EACCES;
+
+ switch (fmt_h) {
+ case CRYCB_FORMAT2:
+ return setup_apcb10(vcpu, &crycb_s->apcb1,
+ (unsigned long) &crycb->apcb0,
+ &crycb_h->apcb1);
+ case CRYCB_FORMAT1:
+ case CRYCB_FORMAT0:
+ return setup_apcb00(vcpu,
+ (unsigned long *) &crycb_s->apcb0,
+ (unsigned long) &crycb->apcb0,
+ (unsigned long *) &crycb_h->apcb0);
+ }
+ }
+ return -EINVAL;
+}
+
+/**
+ * shadow_crycb - Create a shadow copy of the crycb block
+ * @vcpu: a pointer to the virtual CPU
+ * @vsie_page: a pointer to internal date used for the vSIE
+ *
* Create a shadow copy of the crycb block and setup key wrapping, if
* requested for guest 3 and enabled for guest 2.
*
- * We only accept format-1 (no AP in g2), but convert it into format-2
+ * We accept format-1 or format-2, but we convert format-1 into format-2
+ * in the shadow CRYCB.
+ * Using format-2 enables the firmware to choose the right format when
+ * scheduling the SIE.
* There is nothing to do for format-0.
*
+ * This function centralize the issuing of set_validity_icpt() for all
+ * the subfunctions working on the crycb.
+ *
* Returns: - 0 if shadowed or nothing to do
* - > 0 if control has to be given to guest 2
*/
@@ -154,23 +288,40 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
unsigned long *b1, *b2;
u8 ecb3_flags;
+ int apie_h;
+ int key_msk = test_kvm_facility(vcpu->kvm, 76);
+ int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
+ int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
+ int ret = 0;
scb_s->crycbd = 0;
- if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
- return 0;
- /* format-1 is supported with message-security-assist extension 3 */
- if (!test_kvm_facility(vcpu->kvm, 76))
+
+ apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
+ if (!apie_h && !key_msk)
return 0;
+
+ if (!crycb_addr)
+ return set_validity_icpt(scb_s, 0x0039U);
+
+ if (fmt_o == CRYCB_FORMAT1)
+ if ((crycb_addr & PAGE_MASK) !=
+ ((crycb_addr + 128) & PAGE_MASK))
+ return set_validity_icpt(scb_s, 0x003CU);
+
+ if (apie_h && (scb_o->eca & ECA_APIE)) {
+ ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
+ vcpu->kvm->arch.crypto.crycb,
+ fmt_o, fmt_h);
+ if (ret)
+ goto end;
+ scb_s->eca |= scb_o->eca & ECA_APIE;
+ }
+
/* we may only allow it if enabled for guest 2 */
ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
(ECB3_AES | ECB3_DEA);
if (!ecb3_flags)
- return 0;
-
- if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK))
- return set_validity_icpt(scb_s, 0x003CU);
- else if (!crycb_addr)
- return set_validity_icpt(scb_s, 0x0039U);
+ goto end;
/* copy only the wrapping keys */
if (read_guest_real(vcpu, crycb_addr + 72,
@@ -178,8 +329,6 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return set_validity_icpt(scb_s, 0x0035U);
scb_s->ecb3 |= ecb3_flags;
- scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 |
- CRYCB_FORMAT2;
/* xor both blocks in one run */
b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
@@ -187,6 +336,16 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
/* as 56%8 == 0, bitmap_xor won't overwrite any data */
bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
+end:
+ switch (ret) {
+ case -EINVAL:
+ return set_validity_icpt(scb_s, 0x0020U);
+ case -EFAULT:
+ return set_validity_icpt(scb_s, 0x0035U);
+ case -EACCES:
+ return set_validity_icpt(scb_s, 0x003CU);
+ }
+ scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
return 0;
}
@@ -383,6 +542,8 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (test_kvm_facility(vcpu->kvm, 156))
scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
+ scb_s->hpid = HPID_VSIE;
+
prepare_ibc(vcpu, vsie_page);
rc = shadow_crycb(vcpu, vsie_page);
out:
@@ -830,7 +991,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
int guest_bp_isolation;
- int rc;
+ int rc = 0;
handle_last_fault(vcpu, vsie_page);
@@ -858,7 +1019,18 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
guest_enter_irqoff();
local_irq_enable();
- rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
+ /*
+ * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
+ * and VCPU requests also hinder the vSIE from running and lead
+ * to an immediate exit. kvm_s390_vsie_kick() has to be used to
+ * also kick the vSIE.
+ */
+ vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
+ barrier();
+ if (!kvm_s390_vcpu_sie_inhibited(vcpu))
+ rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
+ barrier();
+ vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
local_irq_disable();
guest_exit_irqoff();
@@ -1005,7 +1177,8 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (rc == -EAGAIN)
rc = 0;
if (rc || scb_s->icptcode || signal_pending(current) ||
- kvm_s390_vcpu_has_irq(vcpu, 0))
+ kvm_s390_vcpu_has_irq(vcpu, 0) ||
+ kvm_s390_vcpu_sie_inhibited(vcpu))
break;
}
@@ -1122,7 +1295,8 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
if (unlikely(scb_addr & 0x1ffUL))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0))
+ if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
+ kvm_s390_vcpu_sie_inhibited(vcpu))
return 0;
vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 57ab40188d4b..5418d10dc2a8 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -9,5 +9,9 @@ lib-$(CONFIG_SMP) += spinlock.o
lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o
+# Instrumenting memory accesses to __user data (in different address space)
+# produce false positives
+KASAN_SANITIZE_uaccess.o := n
+
chkbss := mem.o
include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index 40c4d59c926e..53008da05190 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -14,7 +14,8 @@
/*
* void *memmove(void *dest, const void *src, size_t n)
*/
-ENTRY(memmove)
+WEAK(memmove)
+ENTRY(__memmove)
ltgr %r4,%r4
lgr %r1,%r2
jz .Lmemmove_exit
@@ -47,6 +48,7 @@ ENTRY(memmove)
BR_EX %r14
.Lmemmove_mvc:
mvc 0(1,%r1),0(%r3)
+ENDPROC(__memmove)
EXPORT_SYMBOL(memmove)
/*
@@ -64,7 +66,8 @@ EXPORT_SYMBOL(memmove)
* return __builtin_memset(s, c, n);
* }
*/
-ENTRY(memset)
+WEAK(memset)
+ENTRY(__memset)
ltgr %r4,%r4
jz .Lmemset_exit
ltgr %r3,%r3
@@ -108,6 +111,7 @@ ENTRY(memset)
xc 0(1,%r1),0(%r1)
.Lmemset_mvc:
mvc 1(1,%r1),0(%r1)
+ENDPROC(__memset)
EXPORT_SYMBOL(memset)
/*
@@ -115,7 +119,8 @@ EXPORT_SYMBOL(memset)
*
* void *memcpy(void *dest, const void *src, size_t n)
*/
-ENTRY(memcpy)
+WEAK(memcpy)
+ENTRY(__memcpy)
ltgr %r4,%r4
jz .Lmemcpy_exit
aghi %r4,-1
@@ -136,6 +141,7 @@ ENTRY(memcpy)
j .Lmemcpy_remainder
.Lmemcpy_mvc:
mvc 0(1,%r1),0(%r3)
+ENDPROC(__memcpy)
EXPORT_SYMBOL(memcpy)
/*
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 33fe418506bc..f5880bfd1b0c 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -4,10 +4,12 @@
#
obj-y := init.o fault.o extmem.o mmap.o vmem.o maccess.o
-obj-y += page-states.o gup.o pageattr.o mem_detect.o
-obj-y += pgtable.o pgalloc.o
+obj-y += page-states.o gup.o pageattr.o pgtable.o pgalloc.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
obj-$(CONFIG_PGSTE) += gmap.o
+
+KASAN_SANITIZE_kasan_init.o := n
+obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 7cdea2ec51e9..363f6470d742 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -3,6 +3,8 @@
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/kasan.h>
+#include <asm/kasan.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
@@ -17,18 +19,26 @@ enum address_markers_idx {
IDENTITY_NR = 0,
KERNEL_START_NR,
KERNEL_END_NR,
+#ifdef CONFIG_KASAN
+ KASAN_SHADOW_START_NR,
+ KASAN_SHADOW_END_NR,
+#endif
VMEMMAP_NR,
VMALLOC_NR,
MODULES_NR,
};
static struct addr_marker address_markers[] = {
- [IDENTITY_NR] = {0, "Identity Mapping"},
- [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
- [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
- [VMEMMAP_NR] = {0, "vmemmap Area"},
- [VMALLOC_NR] = {0, "vmalloc Area"},
- [MODULES_NR] = {0, "Modules Area"},
+ [IDENTITY_NR] = {0, "Identity Mapping"},
+ [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
+ [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
+#ifdef CONFIG_KASAN
+ [KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"},
+ [KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"},
+#endif
+ [VMEMMAP_NR] = {0, "vmemmap Area"},
+ [VMALLOC_NR] = {0, "vmalloc Area"},
+ [MODULES_NR] = {0, "Modules Area"},
{ -1, NULL }
};
@@ -80,7 +90,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
} else if (prot != cur || level != st->level ||
st->current_address >= st->marker[1].start_address) {
/* Print the actual finished series */
- seq_printf(m, "0x%0*lx-0x%0*lx",
+ seq_printf(m, "0x%0*lx-0x%0*lx ",
width, st->start_address,
width, st->current_address);
delta = (st->current_address - st->start_address) >> 10;
@@ -90,7 +100,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
}
seq_printf(m, "%9lu%c ", delta, *unit);
print_prot(m, st->current_prot, st->level);
- if (st->current_address >= st->marker[1].start_address) {
+ while (st->current_address >= st->marker[1].start_address) {
st->marker++;
seq_printf(m, "---[ %s ]---\n", st->marker->name);
}
@@ -100,6 +110,17 @@ static void note_page(struct seq_file *m, struct pg_state *st,
}
}
+#ifdef CONFIG_KASAN
+static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st)
+{
+ unsigned int prot;
+
+ prot = pte_val(*kasan_zero_pte) &
+ (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
+ note_page(m, st, prot, 4);
+}
+#endif
+
/*
* The actual page table walker functions. In order to keep the
* implementation of print_prot() short, we only check and pass
@@ -132,6 +153,13 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
pmd_t *pmd;
int i;
+#ifdef CONFIG_KASAN
+ if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) {
+ note_kasan_zero_page(m, st);
+ return;
+ }
+#endif
+
for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
st->current_address = addr;
pmd = pmd_offset(pud, addr);
@@ -156,6 +184,13 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pud_t *pud;
int i;
+#ifdef CONFIG_KASAN
+ if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) {
+ note_kasan_zero_page(m, st);
+ return;
+ }
+#endif
+
for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
st->current_address = addr;
pud = pud_offset(p4d, addr);
@@ -179,6 +214,13 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
p4d_t *p4d;
int i;
+#ifdef CONFIG_KASAN
+ if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) {
+ note_kasan_zero_page(m, st);
+ return;
+ }
+#endif
+
for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
st->current_address = addr;
p4d = p4d_offset(pgd, addr);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 72af23bacbb5..2b8f32f56e0c 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -636,17 +636,19 @@ struct pfault_refbk {
u64 reserved;
} __attribute__ ((packed, aligned(8)));
+static struct pfault_refbk pfault_init_refbk = {
+ .refdiagc = 0x258,
+ .reffcode = 0,
+ .refdwlen = 5,
+ .refversn = 2,
+ .refgaddr = __LC_LPP,
+ .refselmk = 1ULL << 48,
+ .refcmpmk = 1ULL << 48,
+ .reserved = __PF_RES_FIELD
+};
+
int pfault_init(void)
{
- struct pfault_refbk refbk = {
- .refdiagc = 0x258,
- .reffcode = 0,
- .refdwlen = 5,
- .refversn = 2,
- .refgaddr = __LC_LPP,
- .refselmk = 1ULL << 48,
- .refcmpmk = 1ULL << 48,
- .reserved = __PF_RES_FIELD };
int rc;
if (pfault_disable)
@@ -658,18 +660,20 @@ int pfault_init(void)
"1: la %0,8\n"
"2:\n"
EX_TABLE(0b,1b)
- : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
+ : "=d" (rc)
+ : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
return rc;
}
+static struct pfault_refbk pfault_fini_refbk = {
+ .refdiagc = 0x258,
+ .reffcode = 1,
+ .refdwlen = 5,
+ .refversn = 2,
+};
+
void pfault_fini(void)
{
- struct pfault_refbk refbk = {
- .refdiagc = 0x258,
- .reffcode = 1,
- .refdwlen = 5,
- .refversn = 2,
- };
if (pfault_disable)
return;
@@ -678,7 +682,7 @@ void pfault_fini(void)
" diag %0,0,0x258\n"
"0: nopr %%r7\n"
EX_TABLE(0b,0b)
- : : "a" (&refbk), "m" (refbk) : "cc");
+ : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
}
static DEFINE_SPINLOCK(pfault_lock);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index bb44990c8212..1e668b95e0c6 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -708,11 +708,13 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
vmaddr |= gaddr & ~PMD_MASK;
/* Find vma in the parent mm */
vma = find_vma(gmap->mm, vmaddr);
+ if (!vma)
+ continue;
/*
* We do not discard pages that are backed by
* hugetlbfs, so we don't have to refault them.
*/
- if (vma && is_vm_hugetlb_page(vma))
+ if (is_vm_hugetlb_page(vma))
continue;
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range(vma, vmaddr, size);
@@ -905,10 +907,16 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
pmd_t *pmdp;
BUG_ON(gmap_is_shadow(gmap));
- spin_lock(&gmap->guest_table_lock);
pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
+ if (!pmdp)
+ return NULL;
+
+ /* without huge pages, there is no need to take the table lock */
+ if (!gmap->mm->context.allow_gmap_hpage_1m)
+ return pmd_none(*pmdp) ? NULL : pmdp;
- if (!pmdp || pmd_none(*pmdp)) {
+ spin_lock(&gmap->guest_table_lock);
+ if (pmd_none(*pmdp)) {
spin_unlock(&gmap->guest_table_lock);
return NULL;
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3fa3e5323612..92d7a153e72a 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -42,6 +42,7 @@
#include <asm/ctl_reg.h>
#include <asm/sclp.h>
#include <asm/set_memory.h>
+#include <asm/kasan.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
@@ -98,8 +99,9 @@ void __init paging_init(void)
S390_lowcore.user_asce = S390_lowcore.kernel_asce;
crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
vmem_map_init();
+ kasan_copy_shadow(init_mm.pgd);
- /* enable virtual mapping in kernel mode */
+ /* enable virtual mapping in kernel mode */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
@@ -107,6 +109,7 @@ void __init paging_init(void)
psw_bits(psw).dat = 1;
psw_bits(psw).as = PSW_BITS_AS_HOME;
__load_psw_mask(psw.mask);
+ kasan_free_early_identity();
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
new file mode 100644
index 000000000000..acb9645b762b
--- /dev/null
+++ b/arch/s390/mm/kasan_init.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kasan.h>
+#include <linux/sched/task.h>
+#include <linux/memblock.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/kasan.h>
+#include <asm/mem_detect.h>
+#include <asm/processor.h>
+#include <asm/sclp.h>
+#include <asm/facility.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+
+static unsigned long segment_pos __initdata;
+static unsigned long segment_low __initdata;
+static unsigned long pgalloc_pos __initdata;
+static unsigned long pgalloc_low __initdata;
+static unsigned long pgalloc_freeable __initdata;
+static bool has_edat __initdata;
+static bool has_nx __initdata;
+
+#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
+
+static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+
+static void __init kasan_early_panic(const char *reason)
+{
+ sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
+ sclp_early_printk(reason);
+ disabled_wait(0);
+}
+
+static void * __init kasan_early_alloc_segment(void)
+{
+ segment_pos -= _SEGMENT_SIZE;
+
+ if (segment_pos < segment_low)
+ kasan_early_panic("out of memory during initialisation\n");
+
+ return (void *)segment_pos;
+}
+
+static void * __init kasan_early_alloc_pages(unsigned int order)
+{
+ pgalloc_pos -= (PAGE_SIZE << order);
+
+ if (pgalloc_pos < pgalloc_low)
+ kasan_early_panic("out of memory during initialisation\n");
+
+ return (void *)pgalloc_pos;
+}
+
+static void * __init kasan_early_crst_alloc(unsigned long val)
+{
+ unsigned long *table;
+
+ table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
+ if (table)
+ crst_table_init(table, val);
+ return table;
+}
+
+static pte_t * __init kasan_early_pte_alloc(void)
+{
+ static void *pte_leftover;
+ pte_t *pte;
+
+ BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
+
+ if (!pte_leftover) {
+ pte_leftover = kasan_early_alloc_pages(0);
+ pte = pte_leftover + _PAGE_TABLE_SIZE;
+ } else {
+ pte = pte_leftover;
+ pte_leftover = NULL;
+ }
+ memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
+ return pte;
+}
+
+enum populate_mode {
+ POPULATE_ONE2ONE,
+ POPULATE_MAP,
+ POPULATE_ZERO_SHADOW
+};
+static void __init kasan_early_vmemmap_populate(unsigned long address,
+ unsigned long end,
+ enum populate_mode mode)
+{
+ unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
+ pgd_t *pg_dir;
+ p4d_t *p4_dir;
+ pud_t *pu_dir;
+ pmd_t *pm_dir;
+ pte_t *pt_dir;
+
+ pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
+ if (!has_nx)
+ pgt_prot_zero &= ~_PAGE_NOEXEC;
+ pgt_prot = pgprot_val(PAGE_KERNEL_EXEC);
+ sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC);
+
+ while (address < end) {
+ pg_dir = pgd_offset_k(address);
+ if (pgd_none(*pg_dir)) {
+ if (mode == POPULATE_ZERO_SHADOW &&
+ IS_ALIGNED(address, PGDIR_SIZE) &&
+ end - address >= PGDIR_SIZE) {
+ pgd_populate(&init_mm, pg_dir, kasan_zero_p4d);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ continue;
+ }
+ p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
+ pgd_populate(&init_mm, pg_dir, p4_dir);
+ }
+
+ p4_dir = p4d_offset(pg_dir, address);
+ if (p4d_none(*p4_dir)) {
+ if (mode == POPULATE_ZERO_SHADOW &&
+ IS_ALIGNED(address, P4D_SIZE) &&
+ end - address >= P4D_SIZE) {
+ p4d_populate(&init_mm, p4_dir, kasan_zero_pud);
+ address = (address + P4D_SIZE) & P4D_MASK;
+ continue;
+ }
+ pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
+ p4d_populate(&init_mm, p4_dir, pu_dir);
+ }
+
+ pu_dir = pud_offset(p4_dir, address);
+ if (pud_none(*pu_dir)) {
+ if (mode == POPULATE_ZERO_SHADOW &&
+ IS_ALIGNED(address, PUD_SIZE) &&
+ end - address >= PUD_SIZE) {
+ pud_populate(&init_mm, pu_dir, kasan_zero_pmd);
+ address = (address + PUD_SIZE) & PUD_MASK;
+ continue;
+ }
+ pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
+ pud_populate(&init_mm, pu_dir, pm_dir);
+ }
+
+ pm_dir = pmd_offset(pu_dir, address);
+ if (pmd_none(*pm_dir)) {
+ if (mode == POPULATE_ZERO_SHADOW &&
+ IS_ALIGNED(address, PMD_SIZE) &&
+ end - address >= PMD_SIZE) {
+ pmd_populate(&init_mm, pm_dir, kasan_zero_pte);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+ /* the first megabyte of 1:1 is mapped with 4k pages */
+ if (has_edat && address && end - address >= PMD_SIZE &&
+ mode != POPULATE_ZERO_SHADOW) {
+ void *page;
+
+ if (mode == POPULATE_ONE2ONE) {
+ page = (void *)address;
+ } else {
+ page = kasan_early_alloc_segment();
+ memset(page, 0, _SEGMENT_SIZE);
+ }
+ pmd_val(*pm_dir) = __pa(page) | sgt_prot;
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+
+ pt_dir = kasan_early_pte_alloc();
+ pmd_populate(&init_mm, pm_dir, pt_dir);
+ } else if (pmd_large(*pm_dir)) {
+ address = (address + PMD_SIZE) & PMD_MASK;
+ continue;
+ }
+
+ pt_dir = pte_offset_kernel(pm_dir, address);
+ if (pte_none(*pt_dir)) {
+ void *page;
+
+ switch (mode) {
+ case POPULATE_ONE2ONE:
+ page = (void *)address;
+ pte_val(*pt_dir) = __pa(page) | pgt_prot;
+ break;
+ case POPULATE_MAP:
+ page = kasan_early_alloc_pages(0);
+ memset(page, 0, PAGE_SIZE);
+ pte_val(*pt_dir) = __pa(page) | pgt_prot;
+ break;
+ case POPULATE_ZERO_SHADOW:
+ page = kasan_zero_page;
+ pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
+ break;
+ }
+ }
+ address += PAGE_SIZE;
+ }
+}
+
+static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
+{
+ unsigned long asce_bits;
+
+ asce_bits = asce_type | _ASCE_TABLE_LENGTH;
+ S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
+ S390_lowcore.user_asce = S390_lowcore.kernel_asce;
+
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ __ctl_load(S390_lowcore.kernel_asce, 7, 7);
+ __ctl_load(S390_lowcore.kernel_asce, 13, 13);
+}
+
+static void __init kasan_enable_dat(void)
+{
+ psw_t psw;
+
+ psw.mask = __extract_psw();
+ psw_bits(psw).dat = 1;
+ psw_bits(psw).as = PSW_BITS_AS_HOME;
+ __load_psw_mask(psw.mask);
+}
+
+static void __init kasan_early_detect_facilities(void)
+{
+ __stfle(S390_lowcore.stfle_fac_list,
+ ARRAY_SIZE(S390_lowcore.stfle_fac_list));
+ if (test_facility(8)) {
+ has_edat = true;
+ __ctl_set_bit(0, 23);
+ }
+ if (!noexec_disabled && test_facility(130)) {
+ has_nx = true;
+ __ctl_set_bit(0, 20);
+ }
+}
+
+static unsigned long __init get_mem_detect_end(void)
+{
+ unsigned long start;
+ unsigned long end;
+
+ if (mem_detect.count) {
+ __get_mem_detect_block(mem_detect.count - 1, &start, &end);
+ return end;
+ }
+ return 0;
+}
+
+void __init kasan_early_init(void)
+{
+ unsigned long untracked_mem_end;
+ unsigned long shadow_alloc_size;
+ unsigned long initrd_end;
+ unsigned long asce_type;
+ unsigned long memsize;
+ unsigned long vmax;
+ unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
+ pte_t pte_z;
+ pmd_t pmd_z = __pmd(__pa(kasan_zero_pte) | _SEGMENT_ENTRY);
+ pud_t pud_z = __pud(__pa(kasan_zero_pmd) | _REGION3_ENTRY);
+ p4d_t p4d_z = __p4d(__pa(kasan_zero_pud) | _REGION2_ENTRY);
+
+ kasan_early_detect_facilities();
+ if (!has_nx)
+ pgt_prot &= ~_PAGE_NOEXEC;
+ pte_z = __pte(__pa(kasan_zero_page) | pgt_prot);
+
+ memsize = get_mem_detect_end();
+ if (!memsize)
+ kasan_early_panic("cannot detect physical memory size\n");
+ /* respect mem= cmdline parameter */
+ if (memory_end_set && memsize > memory_end)
+ memsize = memory_end;
+ memsize = min(memsize, KASAN_SHADOW_START);
+
+ if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) {
+ /* 4 level paging */
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
+ crst_table_init((unsigned long *)early_pg_dir,
+ _REGION2_ENTRY_EMPTY);
+ untracked_mem_end = vmax = _REGION1_SIZE;
+ asce_type = _ASCE_TYPE_REGION2;
+ } else {
+ /* 3 level paging */
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
+ crst_table_init((unsigned long *)early_pg_dir,
+ _REGION3_ENTRY_EMPTY);
+ untracked_mem_end = vmax = _REGION2_SIZE;
+ asce_type = _ASCE_TYPE_REGION3;
+ }
+
+ /* init kasan zero shadow */
+ crst_table_init((unsigned long *)kasan_zero_p4d, p4d_val(p4d_z));
+ crst_table_init((unsigned long *)kasan_zero_pud, pud_val(pud_z));
+ crst_table_init((unsigned long *)kasan_zero_pmd, pmd_val(pmd_z));
+ memset64((u64 *)kasan_zero_pte, pte_val(pte_z), PTRS_PER_PTE);
+
+ shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
+ pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
+ initrd_end =
+ round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
+ pgalloc_low = max(pgalloc_low, initrd_end);
+ }
+
+ if (pgalloc_low + shadow_alloc_size > memsize)
+ kasan_early_panic("out of memory during initialisation\n");
+
+ if (has_edat) {
+ segment_pos = round_down(memsize, _SEGMENT_SIZE);
+ segment_low = segment_pos - shadow_alloc_size;
+ pgalloc_pos = segment_low;
+ } else {
+ pgalloc_pos = memsize;
+ }
+ init_mm.pgd = early_pg_dir;
+ /*
+ * Current memory layout:
+ * +- 0 -------------+ +- shadow start -+
+ * | 1:1 ram mapping | /| 1/8 ram |
+ * +- end of ram ----+ / +----------------+
+ * | ... gap ... |/ | kasan |
+ * +- shadow start --+ | zero |
+ * | 1/8 addr space | | page |
+ * +- shadow end -+ | mapping |
+ * | ... gap ... |\ | (untracked) |
+ * +- modules vaddr -+ \ +----------------+
+ * | 2Gb | \| unmapped | allocated per module
+ * +-----------------+ +- shadow end ---+
+ */
+ /* populate kasan shadow (for identity mapping and zero page mapping) */
+ kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
+ if (IS_ENABLED(CONFIG_MODULES))
+ untracked_mem_end = vmax - MODULES_LEN;
+ kasan_early_vmemmap_populate(__sha(max_physmem_end),
+ __sha(untracked_mem_end),
+ POPULATE_ZERO_SHADOW);
+ /* memory allocated for identity mapping structs will be freed later */
+ pgalloc_freeable = pgalloc_pos;
+ /* populate identity mapping */
+ kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
+ kasan_set_pgd(early_pg_dir, asce_type);
+ kasan_enable_dat();
+ /* enable kasan */
+ init_task.kasan_depth = 0;
+ memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
+ sclp_early_printk("KernelAddressSanitizer initialized\n");
+}
+
+void __init kasan_copy_shadow(pgd_t *pg_dir)
+{
+ /*
+ * At this point we are still running on early pages setup early_pg_dir,
+ * while swapper_pg_dir has just been initialized with identity mapping.
+ * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
+ */
+
+ pgd_t *pg_dir_src;
+ pgd_t *pg_dir_dst;
+ p4d_t *p4_dir_src;
+ p4d_t *p4_dir_dst;
+ pud_t *pu_dir_src;
+ pud_t *pu_dir_dst;
+
+ pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
+ pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START);
+ p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
+ p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
+ if (!p4d_folded(*p4_dir_src)) {
+ /* 4 level paging */
+ memcpy(p4_dir_dst, p4_dir_src,
+ (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
+ return;
+ }
+ /* 3 level paging */
+ pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START);
+ pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START);
+ memcpy(pu_dir_dst, pu_dir_src,
+ (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
+}
+
+void __init kasan_free_early_identity(void)
+{
+ memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
+}
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 7be06475809b..97b3ee53852b 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -89,10 +89,8 @@ static int __memcpy_real(void *dest, void *src, size_t count)
return rc;
}
-/*
- * Copy memory in real mode (kernel to kernel)
- */
-int memcpy_real(void *dest, void *src, size_t count)
+static unsigned long _memcpy_real(unsigned long dest, unsigned long src,
+ unsigned long count)
{
int irqs_disabled, rc;
unsigned long flags;
@@ -103,7 +101,7 @@ int memcpy_real(void *dest, void *src, size_t count)
irqs_disabled = arch_irqs_disabled_flags(flags);
if (!irqs_disabled)
trace_hardirqs_off();
- rc = __memcpy_real(dest, src, count);
+ rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
if (!irqs_disabled)
trace_hardirqs_on();
__arch_local_irq_ssm(flags);
@@ -111,6 +109,23 @@ int memcpy_real(void *dest, void *src, size_t count)
}
/*
+ * Copy memory in real mode (kernel to kernel)
+ */
+int memcpy_real(void *dest, void *src, size_t count)
+{
+ if (S390_lowcore.nodat_stack != 0)
+ return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack,
+ 3, dest, src, count);
+ /*
+ * This is a really early memcpy_real call, the stacks are
+ * not set up yet. Just call _memcpy_real on the early boot
+ * stack
+ */
+ return _memcpy_real((unsigned long) dest,(unsigned long) src,
+ (unsigned long) count);
+}
+
+/*
* Copy memory in absolute mode (kernel to kernel)
*/
void memcpy_absolute(void *dest, void *src, size_t count)
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
deleted file mode 100644
index 21f6c82c8296..000000000000
--- a/arch/s390/mm/mem_detect.c
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright IBM Corp. 2008, 2009
- *
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/memblock.h>
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <asm/ipl.h>
-#include <asm/sclp.h>
-#include <asm/setup.h>
-
-#define CHUNK_READ_WRITE 0
-#define CHUNK_READ_ONLY 1
-
-static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
-{
- memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
- start, start + size - 1);
- memblock_add_range(&memblock.memory, start, size, 0, 0);
- memblock_add_range(&memblock.physmem, start, size, 0, 0);
-}
-
-void __init detect_memory_memblock(void)
-{
- unsigned long memsize, rnmax, rzm, addr, size;
- int type;
-
- rzm = sclp.rzm;
- rnmax = sclp.rnmax;
- memsize = rzm * rnmax;
- if (!rzm)
- rzm = 1UL << 17;
- max_physmem_end = memsize;
- addr = 0;
- /* keep memblock lists close to the kernel */
- memblock_set_bottom_up(true);
- do {
- size = 0;
- /* assume lowcore is writable */
- type = addr ? tprot(addr) : CHUNK_READ_WRITE;
- do {
- size += rzm;
- if (max_physmem_end && addr + size >= max_physmem_end)
- break;
- } while (type == tprot(addr + size));
- if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
- if (max_physmem_end && (addr + size > max_physmem_end))
- size = max_physmem_end - addr;
- memblock_physmem_add(addr, size);
- }
- addr += size;
- } while (addr < max_physmem_end);
- memblock_set_bottom_up(false);
- if (!max_physmem_end)
- max_physmem_end = memblock_end_of_DRAM();
- memblock_dump_all();
-}
diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
index 2e3707b12edd..5a10ce34b95d 100644
--- a/arch/s390/purgatory/head.S
+++ b/arch/s390/purgatory/head.S
@@ -11,6 +11,7 @@
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/sigp.h>
+#include <asm/ptrace.h>
/* The purgatory is the code running between two kernels. It's main purpose
* is to verify that the next kernel was not corrupted after load and to
@@ -88,8 +89,7 @@ ENTRY(purgatory_start)
.base_crash:
/* Setup stack */
- larl %r15,purgatory_end
- aghi %r15,-160
+ larl %r15,purgatory_end-STACK_FRAME_OVERHEAD
/* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called
* directly with a flag passed in %r2 whether the purgatory shall do
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 0c85aedcf9b3..fd788e0f2e5b 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -106,6 +106,8 @@ static struct facility_def facility_defs[] = {
.name = "FACILITIES_KVM_CPUMODEL",
.bits = (int[]){
+ 12, /* AP Query Configuration Information */
+ 15, /* AP Facilities Test */
156, /* etoken facility */
-1 /* END */
}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 1fb7b6d72baf..475d786a65b0 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -7,6 +7,7 @@ config SUPERH
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select HAVE_PATA_PLATFORM
select CLKDEV_LOOKUP
+ select DMA_DIRECT_OPS
select HAVE_IDE if HAS_IOPORT_MAP
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
@@ -158,13 +159,11 @@ config SWAP_IO_SPACE
bool
config DMA_COHERENT
- select DMA_DIRECT_OPS
bool
config DMA_NONCOHERENT
def_bool !DMA_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
- select DMA_NONCOHERENT_OPS
config PGTABLE_LEVELS
default 3 if X2TLB
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index adc61d14172c..06a894526a0b 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -633,7 +633,6 @@ static struct regulator_init_data cn12_power_init_data = {
static struct fixed_voltage_config cn12_power_info = {
.supply_name = "CN12 SD/MMC Vdd",
.microvolts = 3300000,
- .gpio = GPIO_PTB7,
.enable_high = 1,
.init_data = &cn12_power_init_data,
};
@@ -646,6 +645,16 @@ static struct platform_device cn12_power = {
},
};
+static struct gpiod_lookup_table cn12_power_gpiod_table = {
+ .dev_id = "reg-fixed-voltage.0",
+ .table = {
+ /* Offset 7 on port B */
+ GPIO_LOOKUP("sh7724_pfc", GPIO_PTB7,
+ NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
/* SDHI0 */
static struct regulator_consumer_supply sdhi0_power_consumers[] =
@@ -665,7 +674,6 @@ static struct regulator_init_data sdhi0_power_init_data = {
static struct fixed_voltage_config sdhi0_power_info = {
.supply_name = "CN11 SD/MMC Vdd",
.microvolts = 3300000,
- .gpio = GPIO_PTB6,
.enable_high = 1,
.init_data = &sdhi0_power_init_data,
};
@@ -678,6 +686,16 @@ static struct platform_device sdhi0_power = {
},
};
+static struct gpiod_lookup_table sdhi0_power_gpiod_table = {
+ .dev_id = "reg-fixed-voltage.1",
+ .table = {
+ /* Offset 6 on port B */
+ GPIO_LOOKUP("sh7724_pfc", GPIO_PTB6,
+ NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static struct tmio_mmc_data sdhi0_info = {
.chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX,
.chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX,
@@ -1413,6 +1431,11 @@ static int __init arch_setup(void)
DMA_MEMORY_EXCLUSIVE);
platform_device_add(ecovec_ceu_devices[1]);
+ gpiod_add_lookup_table(&cn12_power_gpiod_table);
+#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
+ gpiod_add_lookup_table(&sdhi0_power_gpiod_table);
+#endif
+
return platform_add_devices(ecovec_devices,
ARRAY_SIZE(ecovec_devices));
}
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 254f2c662703..f4ad33c6d2aa 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -14,7 +14,7 @@
#include <linux/mmc/host.h>
#include <linux/mtd/physmap.h>
#include <linux/mfd/tmio.h>
-#include <linux/mtd/rawnand.h>
+#include <linux/mtd/platnand.h>
#include <linux/i2c.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
@@ -165,23 +165,21 @@ static struct mtd_partition migor_nand_flash_partitions[] = {
},
};
-static void migor_nand_flash_cmd_ctl(struct mtd_info *mtd, int cmd,
+static void migor_nand_flash_cmd_ctl(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
- writeb(cmd, chip->IO_ADDR_W + 0x00400000);
+ writeb(cmd, chip->legacy.IO_ADDR_W + 0x00400000);
else if (ctrl & NAND_ALE)
- writeb(cmd, chip->IO_ADDR_W + 0x00800000);
+ writeb(cmd, chip->legacy.IO_ADDR_W + 0x00800000);
else
- writeb(cmd, chip->IO_ADDR_W);
+ writeb(cmd, chip->legacy.IO_ADDR_W);
}
-static int migor_nand_flash_ready(struct mtd_info *mtd)
+static int migor_nand_flash_ready(struct nand_chip *chip)
{
return gpio_get_value(GPIO_PTA1); /* NAND_RBn */
}
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index b36200af9ce7..a99234b61051 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -5,6 +5,7 @@
# include <asm/unistd_64.h>
# endif
+# define __ARCH_WANT_NEW_STAT
# define __ARCH_WANT_OLD_READDIR
# define __ARCH_WANT_OLD_STAT
# define __ARCH_WANT_STAT64
@@ -19,7 +20,6 @@
# define __ARCH_WANT_SYS_SOCKETCALL
# define __ARCH_WANT_SYS_FADVISE64
# define __ARCH_WANT_SYS_GETPGRP
-# define __ARCH_WANT_SYS_LLSEEK
# define __ARCH_WANT_SYS_NICE
# define __ARCH_WANT_SYS_OLD_GETRLIMIT
# define __ARCH_WANT_SYS_OLD_UNAME
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index e6f2a38d2e61..7e2aa59fcc29 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -51,7 +51,7 @@ config SPARC
config SPARC32
def_bool !64BIT
select ARCH_HAS_SYNC_DMA_FOR_CPU
- select DMA_NONCOHERENT_OPS
+ select DMA_DIRECT_OPS
select GENERIC_ATOMIC64
select CLZ_TAB
select HAVE_UID16
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index 4eb51d2dae98..30b1763580b1 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -6,38 +6,23 @@
*/
#include <linux/types.h>
+#include <asm-generic/compat.h>
+
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "sparc\0\0"
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef s16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s32 compat_key_t;
-typedef s32 compat_timer_t;
-
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 compat_s64;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
typedef u64 compat_u64;
-typedef u32 compat_uptr_t;
-
struct compat_stat {
compat_dev_t st_dev;
compat_ino_t st_ino;
@@ -47,11 +32,11 @@ struct compat_stat {
__compat_gid_t st_gid;
compat_dev_t st_rdev;
compat_off_t st_size;
- compat_time_t st_atime;
+ old_time32_t st_atime;
compat_ulong_t st_atime_nsec;
- compat_time_t st_mtime;
+ old_time32_t st_mtime;
compat_ulong_t st_mtime_nsec;
- compat_time_t st_ctime;
+ old_time32_t st_ctime;
compat_ulong_t st_ctime_nsec;
compat_off_t st_blksize;
compat_off_t st_blocks;
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index 666d6b5c0440..9c3fc03abe9a 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -28,7 +28,7 @@ typedef struct {
unsigned short sock_id; /* physical package */
unsigned short core_id;
unsigned short max_cache_id; /* groupings of highest shared cache */
- unsigned short proc_id; /* strand (aka HW thread) id */
+ signed short proc_id; /* strand (aka HW thread) id */
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index e17566376934..b0bb2fcaf1c9 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -14,11 +14,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
#ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon)
- return &dma_noncoherent_ops;
+ return &dma_direct_ops;
#endif
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (bus == &pci_bus_type)
- return &dma_noncoherent_ops;
+ return &dma_direct_ops;
#endif
return dma_ops;
}
diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
index 05df5f043053..3c5a1c620f0f 100644
--- a/arch/sparc/include/asm/parport.h
+++ b/arch/sparc/include/asm/parport.h
@@ -21,6 +21,7 @@
*/
#define HAS_DMA
+#ifdef CONFIG_PARPORT_PC_FIFO
static DEFINE_SPINLOCK(dma_spin_lock);
#define claim_dma_lock() \
@@ -31,6 +32,7 @@ static DEFINE_SPINLOCK(dma_spin_lock);
#define release_dma_lock(__flags) \
spin_unlock_irqrestore(&dma_spin_lock, __flags);
+#endif
static struct sparc_ebus_info {
struct ebus_dma_info info;
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 7fb676360928..20255471e653 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -121,8 +121,12 @@ struct thread_info {
}
/* how to get the thread information struct from C */
+#ifndef BUILD_VDSO
register struct thread_info *current_thread_info_reg asm("g6");
#define current_thread_info() (current_thread_info_reg)
+#else
+extern struct thread_info *current_thread_info(void);
+#endif
/* thread information allocation */
#if PAGE_SHIFT == 13
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index b2a6a955113e..00f87dbd0b17 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -21,6 +21,7 @@
#else
#define __NR_time 231 /* Linux sparc32 */
#endif
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
@@ -33,7 +34,6 @@
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
@@ -42,6 +42,7 @@
#define __ARCH_WANT_SYS_IPC
#else
#define __ARCH_WANT_COMPAT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
diff --git a/arch/sparc/include/asm/vdso.h b/arch/sparc/include/asm/vdso.h
index 93b628731a5e..59e79d35cd73 100644
--- a/arch/sparc/include/asm/vdso.h
+++ b/arch/sparc/include/asm/vdso.h
@@ -8,10 +8,8 @@
struct vdso_image {
void *data;
unsigned long size; /* Always a multiple of PAGE_SIZE */
+
long sym_vvar_start; /* Negative offset to the vvar area */
- long sym_vread_tick; /* Start of vread_tick section */
- long sym_vread_tick_patch_start; /* Start of tick read */
- long sym_vread_tick_patch_end; /* End of tick read */
};
#ifdef CONFIG_SPARC64
diff --git a/arch/sparc/include/uapi/asm/siginfo.h b/arch/sparc/include/uapi/asm/siginfo.h
index e7049550ac82..68bdde4c2a2e 100644
--- a/arch/sparc/include/uapi/asm/siginfo.h
+++ b/arch/sparc/include/uapi/asm/siginfo.h
@@ -4,7 +4,6 @@
#if defined(__sparc__) && defined(__arch64__)
-#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
#define __ARCH_SI_BAND_T int
#endif /* defined(__sparc__) && defined(__arch64__) */
@@ -17,10 +16,4 @@
#define SI_NOINFO 32767 /* no information in siginfo_t */
-/*
- * SIGEMT si_codes
- */
-#define EMT_TAGOVF 1 /* tag overflow */
-#define NSIGEMT 1
-
#endif /* _UAPI__SPARC_SIGINFO_H */
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index 09acf0ddec10..45b4bf1875e6 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -427,8 +427,9 @@
#define __NR_preadv2 358
#define __NR_pwritev2 359
#define __NR_statx 360
+#define __NR_io_pgetevents 361
-#define NR_syscalls 361
+#define NR_syscalls 362
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index 5868fc333ea8..639c8e54530a 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->pc = addr;
linux_regs->npc = addr + 4;
}
- /* fallthru */
+ /* fall through */
case 'D':
case 'k':
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index d5f7dc6323d5..a68bbddbdba4 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->tpc = addr;
linux_regs->tnpc = addr + 4;
}
- /* fallthru */
+ /* fall through */
case 'D':
case 'k':
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index d3149baaa33c..67b3e6b3ce5d 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -24,6 +24,7 @@
#include <asm/cpudata.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include <linux/sched/clock.h>
#include <asm/nmi.h>
#include <asm/pcr.h>
#include <asm/cacheflush.h>
@@ -927,6 +928,8 @@ static void read_in_all_counters(struct cpu_hw_events *cpuc)
sparc_perf_event_update(cp, &cp->hw,
cpuc->current_idx[i]);
cpuc->current_idx[i] = PIC_NO_INDEX;
+ if (cp->hw.state & PERF_HES_STOPPED)
+ cp->hw.state |= PERF_HES_ARCH;
}
}
}
@@ -959,10 +962,12 @@ static void calculate_single_pcr(struct cpu_hw_events *cpuc)
enc = perf_event_get_enc(cpuc->events[i]);
cpuc->pcr[0] &= ~mask_for_index(idx);
- if (hwc->state & PERF_HES_STOPPED)
+ if (hwc->state & PERF_HES_ARCH) {
cpuc->pcr[0] |= nop_for_index(idx);
- else
+ } else {
cpuc->pcr[0] |= event_encoding(enc, idx);
+ hwc->state = 0;
+ }
}
out:
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
@@ -988,6 +993,9 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
cpuc->current_idx[i] = idx;
+ if (cp->hw.state & PERF_HES_ARCH)
+ continue;
+
sparc_pmu_start(cp, PERF_EF_RELOAD);
}
out:
@@ -1079,6 +1087,8 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
event->hw.state = 0;
sparc_pmu_enable_event(cpuc, &event->hw, idx);
+
+ perf_event_update_userpage(event);
}
static void sparc_pmu_stop(struct perf_event *event, int flags)
@@ -1371,9 +1381,9 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
cpuc->events[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PIC_NO_INDEX;
- event->hw.state = PERF_HES_UPTODATE;
+ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (!(ef_flags & PERF_EF_START))
- event->hw.state |= PERF_HES_STOPPED;
+ event->hw.state |= PERF_HES_ARCH;
/*
* If group events scheduling transaction was started,
@@ -1603,6 +1613,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
+ u64 finish_clock;
+ u64 start_clock;
int i;
if (!atomic_read(&active_events))
@@ -1616,6 +1628,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
return NOTIFY_DONE;
}
+ start_clock = sched_clock();
+
regs = args->regs;
cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1654,6 +1668,10 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
sparc_pmu_stop(event, 0);
}
+ finish_clock = sched_clock();
+
+ perf_sample_event_took(finish_clock - start_clock);
+
return NOTIFY_STOP;
}
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index f6528884a2c8..4073e2b87dd0 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -84,8 +84,9 @@ __handle_signal:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
+ andn %l1, %l4, %l1
ba,pt %xcc, __handle_preemption_continue
- andn %l1, %l4, %l1
+ srl %l4, 20, %l4
/* When returning from a NMI (%pil==15) interrupt we want to
* avoid running softirqs, doing IRQ tracing, preempting, etc.
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 12bee14b552c..621a363098ec 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -90,4 +90,4 @@ sys_call_table:
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
-/*360*/ .long sys_statx
+/*360*/ .long sys_statx, sys_io_pgetevents
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 387ef993880a..bb68c805b891 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -91,7 +91,7 @@ sys_call_table32:
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
.word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
-/*360*/ .word sys_statx
+/*360*/ .word sys_statx, compat_sys_io_pgetevents
#endif /* CONFIG_COMPAT */
@@ -173,4 +173,4 @@ sys_call_table:
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
.word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
-/*360*/ .word sys_statx
+/*360*/ .word sys_statx, sys_io_pgetevents
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index f0eba72aa1ad..5f356dc8e178 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -53,8 +53,6 @@
DEFINE_SPINLOCK(rtc_lock);
-unsigned int __read_mostly vdso_fix_stick;
-
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
@@ -838,7 +836,6 @@ void __init time_init_early(void)
} else {
init_tick_ops(&tick_operations);
clocksource_tick.archdata.vclock_mode = VCLOCK_TICK;
- vdso_fix_stick = 1;
}
} else {
init_tick_ops(&stick_operations);
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index 635d67ffc9a3..7db5aabe9708 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -180,11 +180,17 @@ static int send_dreg(struct vio_driver_state *vio)
struct vio_dring_register pkt;
char all[sizeof(struct vio_dring_register) +
(sizeof(struct ldc_trans_cookie) *
- dr->ncookies)];
+ VIO_MAX_RING_COOKIES)];
} u;
+ size_t bytes = sizeof(struct vio_dring_register) +
+ (sizeof(struct ldc_trans_cookie) *
+ dr->ncookies);
int i;
- memset(&u, 0, sizeof(u));
+ if (WARN_ON(bytes > sizeof(u)))
+ return -EINVAL;
+
+ memset(&u, 0, bytes);
init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
u.pkt.dring_ident = 0;
u.pkt.num_descr = dr->num_entries;
@@ -206,7 +212,7 @@ static int send_dreg(struct vio_driver_state *vio)
(unsigned long long) u.pkt.cookies[i].cookie_size);
}
- return send_ctrl(vio, &u.pkt.tag, sizeof(u));
+ return send_ctrl(vio, &u.pkt.tag, bytes);
}
static int send_rdx(struct vio_driver_state *vio)
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index dd0b5a92ffd0..a6e18ca4cc18 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -31,23 +31,19 @@ obj-y += $(vdso_img_objs)
targets += $(vdso_img_cfiles)
targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
-export CPPFLAGS_vdso.lds += -P -C
+CPPFLAGS_vdso.lds += -P -C
-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
- -Wl,--no-undefined \
- -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
- $(DISABLE_LTO)
+VDSO_LDFLAGS_vdso.lds = -m elf64_sparc -soname linux-vdso.so.1 --no-undefined \
+ -z max-page-size=8192 -z common-page-size=8192
-$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs-y += vdso2c
quiet_cmd_vdso2c = VDSO2C $@
-define cmd_vdso2c
- $(obj)/vdso2c $< $(<:%.dbg=%) $@
-endef
+ cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
$(call if_changed,vdso2c)
@@ -56,13 +52,14 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
# Don't omit frame pointers for ease of userspace debugging, but do
# optimize sibling calls.
#
-CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables \
- -m64 -ffixed-g2 -ffixed-g3 -fcall-used-g4 -fcall-used-g5 -ffixed-g6 \
- -ffixed-g7 $(filter -g%,$(KBUILD_CFLAGS)) \
- $(call cc-option, -fno-stack-protector) -fno-omit-frame-pointer \
- -foptimize-sibling-calls -DBUILD_VDSO
+CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables -m64 \
+ $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
+ -fno-omit-frame-pointer -foptimize-sibling-calls \
+ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
+
+SPARC_REG_CFLAGS = -ffixed-g4 -ffixed-g5 -fcall-used-g5 -fcall-used-g7
-$(vobjs): KBUILD_CFLAGS += $(CFL)
+$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
#
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
@@ -75,7 +72,7 @@ $(obj)/%.so: $(obj)/%.so.dbg
$(call if_changed,objcopy)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf32_sparc,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m elf32_sparc -soname linux-gate.so.1
#This makes sure the $(obj) subdirectory exists even though vdso32/
#is not a kbuild sub-make subdirectory
@@ -93,7 +90,8 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_32 := $(filter-out -mcmodel=medlow,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
-KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic -mno-app-regs -ffixed-g7
+KBUILD_CFLAGS_32 := $(filter-out $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic
KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
@@ -111,12 +109,13 @@ $(obj)/vdso32.so.dbg: FORCE \
# The DSO images are built using a special linker script.
#
quiet_cmd_vdso = VDSO $@
- cmd_vdso = $(CC) -nostdlib -o $@ \
+ cmd_vdso = $(LD) -nostdlib -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
- -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
+ -T $(filter %.lds,$^) $(filter %.o,$^) && \
+ sh $(srctree)/$(src)/checkundef.sh '$(OBJDUMP)' '$@'
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
- $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic
+VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
+ $(call ld-option, --build-id) -Bsymbolic
GCOV_PROFILE := n
#
diff --git a/arch/sparc/vdso/checkundef.sh b/arch/sparc/vdso/checkundef.sh
new file mode 100644
index 000000000000..2d85876ffc32
--- /dev/null
+++ b/arch/sparc/vdso/checkundef.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+objdump="$1"
+file="$2"
+$objdump -t "$file" | grep '*UUND*' | grep -v '#scratch' > /dev/null 2>&1
+if [ $? -eq 1 ]; then
+ exit 0
+else
+ echo "$file: undefined symbols found" >&2
+ exit 1
+fi
diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
index 3feb3d960ca5..55662c3b4513 100644
--- a/arch/sparc/vdso/vclock_gettime.c
+++ b/arch/sparc/vdso/vclock_gettime.c
@@ -12,11 +12,6 @@
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
-/* Disable profiling for userspace code: */
-#ifndef DISABLE_BRANCH_PROFILING
-#define DISABLE_BRANCH_PROFILING
-#endif
-
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/string.h>
@@ -26,16 +21,19 @@
#include <asm/clocksource.h>
#include <asm/vvar.h>
-#undef TICK_PRIV_BIT
#ifdef CONFIG_SPARC64
-#define TICK_PRIV_BIT (1UL << 63)
-#else
-#define TICK_PRIV_BIT (1ULL << 63)
-#endif
-
#define SYSCALL_STRING \
"ta 0x6d;" \
- "sub %%g0, %%o0, %%o0;" \
+ "bcs,a 1f;" \
+ " sub %%g0, %%o0, %%o0;" \
+ "1:"
+#else
+#define SYSCALL_STRING \
+ "ta 0x10;" \
+ "bcs,a 1f;" \
+ " sub %%g0, %%o0, %%o0;" \
+ "1:"
+#endif
#define SYSCALL_CLOBBERS \
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
@@ -50,24 +48,22 @@
* Compute the vvar page's address in the process address space, and return it
* as a pointer to the vvar_data.
*/
-static notrace noinline struct vvar_data *
-get_vvar_data(void)
+notrace static __always_inline struct vvar_data *get_vvar_data(void)
{
unsigned long ret;
/*
- * vdso data page is the first vDSO page so grab the return address
+ * vdso data page is the first vDSO page so grab the PC
* and move up a page to get to the data page.
*/
- ret = (unsigned long)__builtin_return_address(0);
+ __asm__("rd %%pc, %0" : "=r" (ret));
ret &= ~(8192 - 1);
ret -= 8192;
return (struct vvar_data *) ret;
}
-static notrace long
-vdso_fallback_gettime(long clock, struct timespec *ts)
+notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
register long num __asm__("g1") = __NR_clock_gettime;
register long o0 __asm__("o0") = clock;
@@ -78,8 +74,7 @@ vdso_fallback_gettime(long clock, struct timespec *ts)
return o0;
}
-static notrace __always_inline long
-vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
+notrace static long vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
{
register long num __asm__("g1") = __NR_gettimeofday;
register long o0 __asm__("o0") = (long) tv;
@@ -91,38 +86,44 @@ vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
}
#ifdef CONFIG_SPARC64
-static notrace noinline u64
-vread_tick(void) {
+notrace static __always_inline u64 vread_tick(void)
+{
+ u64 ret;
+
+ __asm__ __volatile__("rd %%tick, %0" : "=r" (ret));
+ return ret;
+}
+
+notrace static __always_inline u64 vread_tick_stick(void)
+{
u64 ret;
- __asm__ __volatile__("rd %%asr24, %0 \n"
- ".section .vread_tick_patch, \"ax\" \n"
- "rd %%tick, %0 \n"
- ".previous \n"
- : "=&r" (ret));
- return ret & ~TICK_PRIV_BIT;
+ __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret));
+ return ret;
}
#else
-static notrace noinline u64
-vread_tick(void)
+notrace static __always_inline u64 vread_tick(void)
{
- unsigned int lo, hi;
-
- __asm__ __volatile__("rd %%asr24, %%g1\n\t"
- "srlx %%g1, 32, %1\n\t"
- "srl %%g1, 0, %0\n"
- ".section .vread_tick_patch, \"ax\" \n"
- "rd %%tick, %%g1\n"
- ".previous \n"
- : "=&r" (lo), "=&r" (hi)
- :
- : "g1");
- return lo | ((u64)hi << 32);
+ register unsigned long long ret asm("o4");
+
+ __asm__ __volatile__("rd %%tick, %L0\n\t"
+ "srlx %L0, 32, %H0"
+ : "=r" (ret));
+ return ret;
+}
+
+notrace static __always_inline u64 vread_tick_stick(void)
+{
+ register unsigned long long ret asm("o4");
+
+ __asm__ __volatile__("rd %%asr24, %L0\n\t"
+ "srlx %L0, 32, %H0"
+ : "=r" (ret));
+ return ret;
}
#endif
-static notrace inline u64
-vgetsns(struct vvar_data *vvar)
+notrace static __always_inline u64 vgetsns(struct vvar_data *vvar)
{
u64 v;
u64 cycles;
@@ -132,13 +133,22 @@ vgetsns(struct vvar_data *vvar)
return v * vvar->clock.mult;
}
-static notrace noinline int
-do_realtime(struct vvar_data *vvar, struct timespec *ts)
+notrace static __always_inline u64 vgetsns_stick(struct vvar_data *vvar)
+{
+ u64 v;
+ u64 cycles;
+
+ cycles = vread_tick_stick();
+ v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
+ return v * vvar->clock.mult;
+}
+
+notrace static __always_inline int do_realtime(struct vvar_data *vvar,
+ struct timespec *ts)
{
unsigned long seq;
u64 ns;
- ts->tv_nsec = 0;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->wall_time_sec;
@@ -147,18 +157,38 @@ do_realtime(struct vvar_data *vvar, struct timespec *ts)
ns >>= vvar->clock.shift;
} while (unlikely(vvar_read_retry(vvar, seq)));
- timespec_add_ns(ts, ns);
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
return 0;
}
-static notrace noinline int
-do_monotonic(struct vvar_data *vvar, struct timespec *ts)
+notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
+ struct timespec *ts)
+{
+ unsigned long seq;
+ u64 ns;
+
+ do {
+ seq = vvar_read_begin(vvar);
+ ts->tv_sec = vvar->wall_time_sec;
+ ns = vvar->wall_time_snsec;
+ ns += vgetsns_stick(vvar);
+ ns >>= vvar->clock.shift;
+ } while (unlikely(vvar_read_retry(vvar, seq)));
+
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
+notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
+ struct timespec *ts)
{
unsigned long seq;
u64 ns;
- ts->tv_nsec = 0;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->monotonic_time_sec;
@@ -167,13 +197,34 @@ do_monotonic(struct vvar_data *vvar, struct timespec *ts)
ns >>= vvar->clock.shift;
} while (unlikely(vvar_read_retry(vvar, seq)));
- timespec_add_ns(ts, ns);
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
+notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
+ struct timespec *ts)
+{
+ unsigned long seq;
+ u64 ns;
+
+ do {
+ seq = vvar_read_begin(vvar);
+ ts->tv_sec = vvar->monotonic_time_sec;
+ ns = vvar->monotonic_time_snsec;
+ ns += vgetsns_stick(vvar);
+ ns >>= vvar->clock.shift;
+ } while (unlikely(vvar_read_retry(vvar, seq)));
+
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
return 0;
}
-static notrace noinline int
-do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts)
+notrace static int do_realtime_coarse(struct vvar_data *vvar,
+ struct timespec *ts)
{
unsigned long seq;
@@ -185,8 +236,8 @@ do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts)
return 0;
}
-static notrace noinline int
-do_monotonic_coarse(struct vvar_data *vvar, struct timespec *ts)
+notrace static int do_monotonic_coarse(struct vvar_data *vvar,
+ struct timespec *ts)
{
unsigned long seq;
@@ -228,6 +279,31 @@ clock_gettime(clockid_t, struct timespec *)
__attribute__((weak, alias("__vdso_clock_gettime")));
notrace int
+__vdso_clock_gettime_stick(clockid_t clock, struct timespec *ts)
+{
+ struct vvar_data *vvd = get_vvar_data();
+
+ switch (clock) {
+ case CLOCK_REALTIME:
+ if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
+ break;
+ return do_realtime_stick(vvd, ts);
+ case CLOCK_MONOTONIC:
+ if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
+ break;
+ return do_monotonic_stick(vvd, ts);
+ case CLOCK_REALTIME_COARSE:
+ return do_realtime_coarse(vvd, ts);
+ case CLOCK_MONOTONIC_COARSE:
+ return do_monotonic_coarse(vvd, ts);
+ }
+ /*
+ * Unknown clock ID ? Fall back to the syscall.
+ */
+ return vdso_fallback_gettime(clock, ts);
+}
+
+notrace int
__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
struct vvar_data *vvd = get_vvar_data();
@@ -262,3 +338,36 @@ __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
int
gettimeofday(struct timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday")));
+
+notrace int
+__vdso_gettimeofday_stick(struct timeval *tv, struct timezone *tz)
+{
+ struct vvar_data *vvd = get_vvar_data();
+
+ if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
+ if (likely(tv != NULL)) {
+ union tstv_t {
+ struct timespec ts;
+ struct timeval tv;
+ } *tstv = (union tstv_t *) tv;
+ do_realtime_stick(vvd, &tstv->ts);
+ /*
+ * Assign before dividing to ensure that the division is
+ * done in the type of tv_usec, not tv_nsec.
+ *
+ * There cannot be > 1 billion usec in a second:
+ * do_realtime() has already distributed such overflow
+ * into tv_sec. So we can assign it to an int safely.
+ */
+ tstv->tv.tv_usec = tstv->ts.tv_nsec;
+ tstv->tv.tv_usec /= 1000;
+ }
+ if (unlikely(tz != NULL)) {
+ /* Avoid memcpy. Some old compilers fail to inline it */
+ tz->tz_minuteswest = vvd->tz_minuteswest;
+ tz->tz_dsttime = vvd->tz_dsttime;
+ }
+ return 0;
+ }
+ return vdso_fallback_gettimeofday(tv, tz);
+}
diff --git a/arch/sparc/vdso/vdso-layout.lds.S b/arch/sparc/vdso/vdso-layout.lds.S
index f2c83abaca12..d31e57e8a3bb 100644
--- a/arch/sparc/vdso/vdso-layout.lds.S
+++ b/arch/sparc/vdso/vdso-layout.lds.S
@@ -73,12 +73,6 @@ SECTIONS
.text : { *(.text*) } :text =0x90909090,
- .vread_tick_patch : {
- vread_tick_patch_start = .;
- *(.vread_tick_patch)
- vread_tick_patch_end = .;
- }
-
/DISCARD/ : {
*(.discard)
*(.discard.*)
diff --git a/arch/sparc/vdso/vdso.lds.S b/arch/sparc/vdso/vdso.lds.S
index f3caa29a331c..629ab6900df7 100644
--- a/arch/sparc/vdso/vdso.lds.S
+++ b/arch/sparc/vdso/vdso.lds.S
@@ -18,8 +18,10 @@ VERSION {
global:
clock_gettime;
__vdso_clock_gettime;
+ __vdso_clock_gettime_stick;
gettimeofday;
__vdso_gettimeofday;
+ __vdso_gettimeofday_stick;
local: *;
};
}
diff --git a/arch/sparc/vdso/vdso2c.c b/arch/sparc/vdso/vdso2c.c
index 9f5b1cd6d51d..ab7504176a7f 100644
--- a/arch/sparc/vdso/vdso2c.c
+++ b/arch/sparc/vdso/vdso2c.c
@@ -63,9 +63,6 @@ enum {
sym_vvar_start,
sym_VDSO_FAKE_SECTION_TABLE_START,
sym_VDSO_FAKE_SECTION_TABLE_END,
- sym_vread_tick,
- sym_vread_tick_patch_start,
- sym_vread_tick_patch_end
};
struct vdso_sym {
@@ -81,9 +78,6 @@ struct vdso_sym required_syms[] = {
[sym_VDSO_FAKE_SECTION_TABLE_END] = {
"VDSO_FAKE_SECTION_TABLE_END", 0
},
- [sym_vread_tick] = {"vread_tick", 1},
- [sym_vread_tick_patch_start] = {"vread_tick_patch_start", 1},
- [sym_vread_tick_patch_end] = {"vread_tick_patch_end", 1}
};
__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
diff --git a/arch/sparc/vdso/vdso2c.h b/arch/sparc/vdso/vdso2c.h
index 808decb0f7be..60d69acc748f 100644
--- a/arch/sparc/vdso/vdso2c.h
+++ b/arch/sparc/vdso/vdso2c.h
@@ -17,7 +17,6 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
unsigned long mapping_size;
int i;
unsigned long j;
-
ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr;
ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
ELF(Dyn) *dyn = 0, *dyn_end = 0;
diff --git a/arch/sparc/vdso/vdso32/vdso32.lds.S b/arch/sparc/vdso/vdso32/vdso32.lds.S
index 53575ee154c4..218930fdff03 100644
--- a/arch/sparc/vdso/vdso32/vdso32.lds.S
+++ b/arch/sparc/vdso/vdso32/vdso32.lds.S
@@ -17,8 +17,10 @@ VERSION {
global:
clock_gettime;
__vdso_clock_gettime;
+ __vdso_clock_gettime_stick;
gettimeofday;
__vdso_gettimeofday;
+ __vdso_gettimeofday_stick;
local: *;
};
}
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
index f51595f861b8..154fe8adc090 100644
--- a/arch/sparc/vdso/vma.c
+++ b/arch/sparc/vdso/vma.c
@@ -16,6 +16,8 @@
#include <linux/linkage.h>
#include <linux/random.h>
#include <linux/elf.h>
+#include <asm/cacheflush.h>
+#include <asm/spitfire.h>
#include <asm/vdso.h>
#include <asm/vvar.h>
#include <asm/page.h>
@@ -40,20 +42,221 @@ static struct vm_special_mapping vdso_mapping32 = {
struct vvar_data *vvar_data;
-#define SAVE_INSTR_SIZE 4
+struct vdso_elfinfo32 {
+ Elf32_Ehdr *hdr;
+ Elf32_Sym *dynsym;
+ unsigned long dynsymsize;
+ const char *dynstr;
+ unsigned long text;
+};
+
+struct vdso_elfinfo64 {
+ Elf64_Ehdr *hdr;
+ Elf64_Sym *dynsym;
+ unsigned long dynsymsize;
+ const char *dynstr;
+ unsigned long text;
+};
+
+struct vdso_elfinfo {
+ union {
+ struct vdso_elfinfo32 elf32;
+ struct vdso_elfinfo64 elf64;
+ } u;
+};
+
+static void *one_section64(struct vdso_elfinfo64 *e, const char *name,
+ unsigned long *size)
+{
+ const char *snames;
+ Elf64_Shdr *shdrs;
+ unsigned int i;
+
+ shdrs = (void *)e->hdr + e->hdr->e_shoff;
+ snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
+ for (i = 1; i < e->hdr->e_shnum; i++) {
+ if (!strcmp(snames+shdrs[i].sh_name, name)) {
+ if (size)
+ *size = shdrs[i].sh_size;
+ return (void *)e->hdr + shdrs[i].sh_offset;
+ }
+ }
+ return NULL;
+}
+
+static int find_sections64(const struct vdso_image *image, struct vdso_elfinfo *_e)
+{
+ struct vdso_elfinfo64 *e = &_e->u.elf64;
+
+ e->hdr = image->data;
+ e->dynsym = one_section64(e, ".dynsym", &e->dynsymsize);
+ e->dynstr = one_section64(e, ".dynstr", NULL);
+
+ if (!e->dynsym || !e->dynstr) {
+ pr_err("VDSO64: Missing symbol sections.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static Elf64_Sym *find_sym64(const struct vdso_elfinfo64 *e, const char *name)
+{
+ unsigned int i;
+
+ for (i = 0; i < (e->dynsymsize / sizeof(Elf64_Sym)); i++) {
+ Elf64_Sym *s = &e->dynsym[i];
+ if (s->st_name == 0)
+ continue;
+ if (!strcmp(e->dynstr + s->st_name, name))
+ return s;
+ }
+ return NULL;
+}
+
+static int patchsym64(struct vdso_elfinfo *_e, const char *orig,
+ const char *new)
+{
+ struct vdso_elfinfo64 *e = &_e->u.elf64;
+ Elf64_Sym *osym = find_sym64(e, orig);
+ Elf64_Sym *nsym = find_sym64(e, new);
+
+ if (!nsym || !osym) {
+ pr_err("VDSO64: Missing symbols.\n");
+ return -ENODEV;
+ }
+ osym->st_value = nsym->st_value;
+ osym->st_size = nsym->st_size;
+ osym->st_info = nsym->st_info;
+ osym->st_other = nsym->st_other;
+ osym->st_shndx = nsym->st_shndx;
+
+ return 0;
+}
+
+static void *one_section32(struct vdso_elfinfo32 *e, const char *name,
+ unsigned long *size)
+{
+ const char *snames;
+ Elf32_Shdr *shdrs;
+ unsigned int i;
+
+ shdrs = (void *)e->hdr + e->hdr->e_shoff;
+ snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
+ for (i = 1; i < e->hdr->e_shnum; i++) {
+ if (!strcmp(snames+shdrs[i].sh_name, name)) {
+ if (size)
+ *size = shdrs[i].sh_size;
+ return (void *)e->hdr + shdrs[i].sh_offset;
+ }
+ }
+ return NULL;
+}
+
+static int find_sections32(const struct vdso_image *image, struct vdso_elfinfo *_e)
+{
+ struct vdso_elfinfo32 *e = &_e->u.elf32;
+
+ e->hdr = image->data;
+ e->dynsym = one_section32(e, ".dynsym", &e->dynsymsize);
+ e->dynstr = one_section32(e, ".dynstr", NULL);
+
+ if (!e->dynsym || !e->dynstr) {
+ pr_err("VDSO32: Missing symbol sections.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static Elf32_Sym *find_sym32(const struct vdso_elfinfo32 *e, const char *name)
+{
+ unsigned int i;
+
+ for (i = 0; i < (e->dynsymsize / sizeof(Elf32_Sym)); i++) {
+ Elf32_Sym *s = &e->dynsym[i];
+ if (s->st_name == 0)
+ continue;
+ if (!strcmp(e->dynstr + s->st_name, name))
+ return s;
+ }
+ return NULL;
+}
+
+static int patchsym32(struct vdso_elfinfo *_e, const char *orig,
+ const char *new)
+{
+ struct vdso_elfinfo32 *e = &_e->u.elf32;
+ Elf32_Sym *osym = find_sym32(e, orig);
+ Elf32_Sym *nsym = find_sym32(e, new);
+
+ if (!nsym || !osym) {
+ pr_err("VDSO32: Missing symbols.\n");
+ return -ENODEV;
+ }
+ osym->st_value = nsym->st_value;
+ osym->st_size = nsym->st_size;
+ osym->st_info = nsym->st_info;
+ osym->st_other = nsym->st_other;
+ osym->st_shndx = nsym->st_shndx;
+
+ return 0;
+}
+
+static int find_sections(const struct vdso_image *image, struct vdso_elfinfo *e,
+ bool elf64)
+{
+ if (elf64)
+ return find_sections64(image, e);
+ else
+ return find_sections32(image, e);
+}
+
+static int patch_one_symbol(struct vdso_elfinfo *e, const char *orig,
+ const char *new_target, bool elf64)
+{
+ if (elf64)
+ return patchsym64(e, orig, new_target);
+ else
+ return patchsym32(e, orig, new_target);
+}
+
+static int stick_patch(const struct vdso_image *image, struct vdso_elfinfo *e, bool elf64)
+{
+ int err;
+
+ err = find_sections(image, e, elf64);
+ if (err)
+ return err;
+
+ err = patch_one_symbol(e,
+ "__vdso_gettimeofday",
+ "__vdso_gettimeofday_stick", elf64);
+ if (err)
+ return err;
+
+ return patch_one_symbol(e,
+ "__vdso_clock_gettime",
+ "__vdso_clock_gettime_stick", elf64);
+ return 0;
+}
/*
* Allocate pages for the vdso and vvar, and copy in the vdso text from the
* kernel image.
*/
int __init init_vdso_image(const struct vdso_image *image,
- struct vm_special_mapping *vdso_mapping)
+ struct vm_special_mapping *vdso_mapping, bool elf64)
{
- int i;
+ int cnpages = (image->size) / PAGE_SIZE;
struct page *dp, **dpp = NULL;
- int dnpages = 0;
struct page *cp, **cpp = NULL;
- int cnpages = (image->size) / PAGE_SIZE;
+ struct vdso_elfinfo ei;
+ int i, dnpages = 0;
+
+ if (tlb_type != spitfire) {
+ int err = stick_patch(image, &ei, elf64);
+ if (err)
+ return err;
+ }
/*
* First, the vdso text. This is initialied data, an integral number of
@@ -68,22 +271,6 @@ int __init init_vdso_image(const struct vdso_image *image,
if (!cpp)
goto oom;
- if (vdso_fix_stick) {
- /*
- * If the system uses %tick instead of %stick, patch the VDSO
- * with instruction reading %tick instead of %stick.
- */
- unsigned int j, k = SAVE_INSTR_SIZE;
- unsigned char *data = image->data;
-
- for (j = image->sym_vread_tick_patch_start;
- j < image->sym_vread_tick_patch_end; j++) {
-
- data[image->sym_vread_tick + k] = data[j];
- k++;
- }
- }
-
for (i = 0; i < cnpages; i++) {
cp = alloc_page(GFP_KERNEL);
if (!cp)
@@ -146,13 +333,13 @@ static int __init init_vdso(void)
{
int err = 0;
#ifdef CONFIG_SPARC64
- err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64);
+ err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64, true);
if (err)
return err;
#endif
#ifdef CONFIG_COMPAT
- err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32);
+ err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32, false);
#endif
return err;
@@ -262,7 +449,9 @@ static __init int vdso_setup(char *s)
unsigned long val;
err = kstrtoul(s, 10, &val);
+ if (err)
+ return err;
vdso_enabled = val;
- return err;
+ return 0;
}
__setup("vdso=", vdso_setup);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 83c470364dfb..74c002ddc0ce 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/ata.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h>
@@ -142,7 +143,6 @@ struct cow {
#define MAX_SG 64
struct ubd {
- struct list_head restart;
/* name (and fd, below) of the file opened for writing, either the
* backing or the cow file. */
char *file;
@@ -156,11 +156,8 @@ struct ubd {
struct cow cow;
struct platform_device pdev;
struct request_queue *queue;
+ struct blk_mq_tag_set tag_set;
spinlock_t lock;
- struct scatterlist sg[MAX_SG];
- struct request *request;
- int start_sg, end_sg;
- sector_t rq_pos;
};
#define DEFAULT_COW { \
@@ -182,10 +179,6 @@ struct ubd {
.shared = 0, \
.cow = DEFAULT_COW, \
.lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \
- .request = NULL, \
- .start_sg = 0, \
- .end_sg = 0, \
- .rq_pos = 0, \
}
/* Protected by ubd_lock */
@@ -196,6 +189,9 @@ static int fake_ide = 0;
static struct proc_dir_entry *proc_ide_root = NULL;
static struct proc_dir_entry *proc_ide = NULL;
+static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd);
+
static void make_proc_ide(void)
{
proc_ide_root = proc_mkdir("ide", NULL);
@@ -436,11 +432,8 @@ __uml_help(udb_setup,
" in the boot output.\n\n"
);
-static void do_ubd_request(struct request_queue * q);
-
/* Only changed by ubd_init, which is an initcall. */
static int thread_fd = -1;
-static LIST_HEAD(restart);
/* Function to read several request pointers at a time
* handling fractional reads if (and as) needed
@@ -498,9 +491,6 @@ static int bulk_req_safe_read(
/* Called without dev->lock held, and only in interrupt context. */
static void ubd_handler(void)
{
- struct ubd *ubd;
- struct list_head *list, *next_ele;
- unsigned long flags;
int n;
int count;
@@ -520,23 +510,17 @@ static void ubd_handler(void)
return;
}
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
- blk_end_request(
- (*irq_req_buffer)[count]->req,
- BLK_STS_OK,
- (*irq_req_buffer)[count]->length
- );
- kfree((*irq_req_buffer)[count]);
+ struct io_thread_req *io_req = (*irq_req_buffer)[count];
+ int err = io_req->error ? BLK_STS_IOERR : BLK_STS_OK;
+
+ if (!blk_update_request(io_req->req, err, io_req->length))
+ __blk_mq_end_request(io_req->req, err);
+
+ kfree(io_req);
}
}
- reactivate_fd(thread_fd, UBD_IRQ);
- list_for_each_safe(list, next_ele, &restart){
- ubd = container_of(list, struct ubd, restart);
- list_del_init(&ubd->restart);
- spin_lock_irqsave(&ubd->lock, flags);
- do_ubd_request(ubd->queue);
- spin_unlock_irqrestore(&ubd->lock, flags);
- }
+ reactivate_fd(thread_fd, UBD_IRQ);
}
static irqreturn_t ubd_intr(int irq, void *dev)
@@ -857,6 +841,7 @@ static void ubd_device_release(struct device *dev)
struct ubd *ubd_dev = dev_get_drvdata(dev);
blk_cleanup_queue(ubd_dev->queue);
+ blk_mq_free_tag_set(&ubd_dev->tag_set);
*ubd_dev = ((struct ubd) DEFAULT_UBD);
}
@@ -891,7 +876,7 @@ static int ubd_disk_register(int major, u64 size, int unit,
disk->private_data = &ubd_devs[unit];
disk->queue = ubd_devs[unit].queue;
- device_add_disk(parent, disk);
+ device_add_disk(parent, disk, NULL);
*disk_out = disk;
return 0;
@@ -899,6 +884,10 @@ static int ubd_disk_register(int major, u64 size, int unit,
#define ROUND_BLOCK(n) ((n + ((1 << 9) - 1)) & (-1 << 9))
+static const struct blk_mq_ops ubd_mq_ops = {
+ .queue_rq = ubd_queue_rq,
+};
+
static int ubd_add(int n, char **error_out)
{
struct ubd *ubd_dev = &ubd_devs[n];
@@ -915,15 +904,23 @@ static int ubd_add(int n, char **error_out)
ubd_dev->size = ROUND_BLOCK(ubd_dev->size);
- INIT_LIST_HEAD(&ubd_dev->restart);
- sg_init_table(ubd_dev->sg, MAX_SG);
+ ubd_dev->tag_set.ops = &ubd_mq_ops;
+ ubd_dev->tag_set.queue_depth = 64;
+ ubd_dev->tag_set.numa_node = NUMA_NO_NODE;
+ ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ubd_dev->tag_set.driver_data = ubd_dev;
+ ubd_dev->tag_set.nr_hw_queues = 1;
- err = -ENOMEM;
- ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock);
- if (ubd_dev->queue == NULL) {
- *error_out = "Failed to initialize device queue";
+ err = blk_mq_alloc_tag_set(&ubd_dev->tag_set);
+ if (err)
goto out;
+
+ ubd_dev->queue = blk_mq_init_queue(&ubd_dev->tag_set);
+ if (IS_ERR(ubd_dev->queue)) {
+ err = PTR_ERR(ubd_dev->queue);
+ goto out_cleanup;
}
+
ubd_dev->queue->queuedata = ubd_dev;
blk_queue_write_cache(ubd_dev->queue, true, false);
@@ -931,7 +928,7 @@ static int ubd_add(int n, char **error_out)
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
if(err){
*error_out = "Failed to register device";
- goto out_cleanup;
+ goto out_cleanup_tags;
}
if (fake_major != UBD_MAJOR)
@@ -949,6 +946,8 @@ static int ubd_add(int n, char **error_out)
out:
return err;
+out_cleanup_tags:
+ blk_mq_free_tag_set(&ubd_dev->tag_set);
out_cleanup:
blk_cleanup_queue(ubd_dev->queue);
goto out;
@@ -1290,123 +1289,82 @@ static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
req->bitmap_words, bitmap_len);
}
-/* Called with dev->lock held */
-static void prepare_request(struct request *req, struct io_thread_req *io_req,
- unsigned long long offset, int page_offset,
- int len, struct page *page)
+static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
+ u64 off, struct bio_vec *bvec)
{
- struct gendisk *disk = req->rq_disk;
- struct ubd *ubd_dev = disk->private_data;
-
- io_req->req = req;
- io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd :
- ubd_dev->fd;
- io_req->fds[1] = ubd_dev->fd;
- io_req->cow_offset = -1;
- io_req->offset = offset;
- io_req->length = len;
- io_req->error = 0;
- io_req->sector_mask = 0;
-
- io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE;
- io_req->offsets[0] = 0;
- io_req->offsets[1] = ubd_dev->cow.data_offset;
- io_req->buffer = page_address(page) + page_offset;
- io_req->sectorsize = 1 << 9;
-
- if(ubd_dev->cow.file != NULL)
- cowify_req(io_req, ubd_dev->cow.bitmap,
- ubd_dev->cow.bitmap_offset, ubd_dev->cow.bitmap_len);
-
-}
+ struct ubd *dev = hctx->queue->queuedata;
+ struct io_thread_req *io_req;
+ int ret;
-/* Called with dev->lock held */
-static void prepare_flush_request(struct request *req,
- struct io_thread_req *io_req)
-{
- struct gendisk *disk = req->rq_disk;
- struct ubd *ubd_dev = disk->private_data;
+ io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC);
+ if (!io_req)
+ return -ENOMEM;
io_req->req = req;
- io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd :
- ubd_dev->fd;
- io_req->op = UBD_FLUSH;
-}
+ if (dev->cow.file)
+ io_req->fds[0] = dev->cow.fd;
+ else
+ io_req->fds[0] = dev->fd;
-static bool submit_request(struct io_thread_req *io_req, struct ubd *dev)
-{
- int n = os_write_file(thread_fd, &io_req,
- sizeof(io_req));
- if (n != sizeof(io_req)) {
- if (n != -EAGAIN)
- printk("write to io thread failed, "
- "errno = %d\n", -n);
- else if (list_empty(&dev->restart))
- list_add(&dev->restart, &restart);
+ if (req_op(req) == REQ_OP_FLUSH) {
+ io_req->op = UBD_FLUSH;
+ } else {
+ io_req->fds[1] = dev->fd;
+ io_req->cow_offset = -1;
+ io_req->offset = off;
+ io_req->length = bvec->bv_len;
+ io_req->error = 0;
+ io_req->sector_mask = 0;
+
+ io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
+ io_req->offsets[0] = 0;
+ io_req->offsets[1] = dev->cow.data_offset;
+ io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset;
+ io_req->sectorsize = 1 << 9;
+
+ if (dev->cow.file) {
+ cowify_req(io_req, dev->cow.bitmap,
+ dev->cow.bitmap_offset, dev->cow.bitmap_len);
+ }
+ }
+ ret = os_write_file(thread_fd, &io_req, sizeof(io_req));
+ if (ret != sizeof(io_req)) {
+ if (ret != -EAGAIN)
+ pr_err("write to io thread failed: %d\n", -ret);
kfree(io_req);
- return false;
}
- return true;
+
+ return ret;
}
-/* Called with dev->lock held */
-static void do_ubd_request(struct request_queue *q)
+static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct io_thread_req *io_req;
- struct request *req;
-
- while(1){
- struct ubd *dev = q->queuedata;
- if(dev->request == NULL){
- struct request *req = blk_fetch_request(q);
- if(req == NULL)
- return;
-
- dev->request = req;
- dev->rq_pos = blk_rq_pos(req);
- dev->start_sg = 0;
- dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
- }
-
- req = dev->request;
+ struct request *req = bd->rq;
+ int ret = 0;
- if (req_op(req) == REQ_OP_FLUSH) {
- io_req = kmalloc(sizeof(struct io_thread_req),
- GFP_ATOMIC);
- if (io_req == NULL) {
- if (list_empty(&dev->restart))
- list_add(&dev->restart, &restart);
- return;
- }
- prepare_flush_request(req, io_req);
- if (submit_request(io_req, dev) == false)
- return;
- }
+ blk_mq_start_request(req);
- while(dev->start_sg < dev->end_sg){
- struct scatterlist *sg = &dev->sg[dev->start_sg];
-
- io_req = kmalloc(sizeof(struct io_thread_req),
- GFP_ATOMIC);
- if(io_req == NULL){
- if(list_empty(&dev->restart))
- list_add(&dev->restart, &restart);
- return;
- }
- prepare_request(req, io_req,
- (unsigned long long)dev->rq_pos << 9,
- sg->offset, sg->length, sg_page(sg));
-
- if (submit_request(io_req, dev) == false)
- return;
-
- dev->rq_pos += sg->length >> 9;
- dev->start_sg++;
+ if (req_op(req) == REQ_OP_FLUSH) {
+ ret = ubd_queue_one_vec(hctx, req, 0, NULL);
+ } else {
+ struct req_iterator iter;
+ struct bio_vec bvec;
+ u64 off = (u64)blk_rq_pos(req) << 9;
+
+ rq_for_each_segment(bvec, req, iter) {
+ ret = ubd_queue_one_vec(hctx, req, off, &bvec);
+ if (ret < 0)
+ goto out;
+ off += bvec.bv_len;
}
- dev->end_sg = 0;
- dev->request = NULL;
}
+out:
+ if (ret < 0) {
+ blk_mq_requeue_request(req, true);
+ }
+ return BLK_STS_OK;
}
static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index 7adb4e6b658a..4049f2c46387 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -53,8 +53,6 @@
CON_INITCALL
}
- SECURITY_INIT
-
.exitcall : {
__exitcall_begin = .;
*(.exitcall.exit)
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 60eae744d8fd..3a3b40f79558 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -4,6 +4,7 @@ config UNICORE32
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select DMA_DIRECT_OPS
select HAVE_MEMBLOCK
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
@@ -20,7 +21,6 @@ config UNICORE32
select GENERIC_IOMAP
select MODULES_USE_ELF_REL
select NEED_DMA_MAP_STATE
- select SWIOTLB
help
UniCore-32 is 32-bit Instruction Set Architecture,
including a series of low-power-consumption RISC chip
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index bfc7abe77905..1372553dc0a9 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += compat.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/unicore32/include/asm/bug.h b/arch/unicore32/include/asm/bug.h
index 93a56f3e2344..83c7687a0e61 100644
--- a/arch/unicore32/include/asm/bug.h
+++ b/arch/unicore32/include/asm/bug.h
@@ -17,6 +17,7 @@ struct siginfo;
extern void die(const char *msg, struct pt_regs *regs, int err);
extern void uc32_notify_die(const char *str, struct pt_regs *regs,
- struct siginfo *info, unsigned long err, unsigned long trap);
+ int sig, int code, void __user *addr,
+ unsigned long err, unsigned long trap);
#endif /* __UNICORE_BUG_H__ */
diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h
deleted file mode 100644
index 790bc2ef4af2..000000000000
--- a/arch/unicore32/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * linux/arch/unicore32/include/asm/dma-mapping.h
- *
- * Code specific to PKUnity SoC and UniCore ISA
- *
- * Copyright (C) 2001-2010 GUAN Xue-tao
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __UNICORE_DMA_MAPPING_H__
-#define __UNICORE_DMA_MAPPING_H__
-
-#include <linux/swiotlb.h>
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &swiotlb_dma_ops;
-}
-
-#endif
diff --git a/arch/unicore32/include/uapi/asm/unistd.h b/arch/unicore32/include/uapi/asm/unistd.h
index 65856eaab163..1e8fe5941b8a 100644
--- a/arch/unicore32/include/uapi/asm/unistd.h
+++ b/arch/unicore32/include/uapi/asm/unistd.h
@@ -15,4 +15,5 @@
/* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h>
+#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/unicore32/kernel/fpu-ucf64.c b/arch/unicore32/kernel/fpu-ucf64.c
index 8594b168f25e..fc5dad32a982 100644
--- a/arch/unicore32/kernel/fpu-ucf64.c
+++ b/arch/unicore32/kernel/fpu-ucf64.c
@@ -54,14 +54,6 @@
*/
void ucf64_raise_sigfpe(struct pt_regs *regs)
{
- siginfo_t info;
-
- clear_siginfo(&info);
-
- info.si_signo = SIGFPE;
- info.si_code = FPE_FLTUNK;
- info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
-
/*
* This is the same as NWFPE, because it's not clear what
* this is used for
@@ -69,7 +61,9 @@ void ucf64_raise_sigfpe(struct pt_regs *regs)
current->thread.error_code = 0;
current->thread.trap_no = 6;
- send_sig_info(SIGFPE, &info, current);
+ send_sig_fault(SIGFPE, FPE_FLTUNK,
+ (void __user *)(instruction_pointer(regs) - 4),
+ current);
}
/*
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c
index c4ac6043ebb0..fb376d83e043 100644
--- a/arch/unicore32/kernel/traps.c
+++ b/arch/unicore32/kernel/traps.c
@@ -241,13 +241,14 @@ void die(const char *str, struct pt_regs *regs, int err)
}
void uc32_notify_die(const char *str, struct pt_regs *regs,
- struct siginfo *info, unsigned long err, unsigned long trap)
+ int sig, int code, void __user *addr,
+ unsigned long err, unsigned long trap)
{
if (user_mode(regs)) {
current->thread.error_code = err;
current->thread.trap_no = trap;
- force_sig_info(info->si_signo, info, current);
+ force_sig_fault(sig, code, addr, current);
} else
die(str, regs, err);
}
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
index 8f12a5b50a42..b9a3a50644c1 100644
--- a/arch/unicore32/mm/fault.c
+++ b/arch/unicore32/mm/fault.c
@@ -120,17 +120,10 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
unsigned int fsr, unsigned int sig, int code,
struct pt_regs *regs)
{
- struct siginfo si;
-
tsk->thread.address = addr;
tsk->thread.error_code = fsr;
tsk->thread.trap_no = 14;
- clear_siginfo(&si);
- si.si_signo = sig;
- si.si_errno = 0;
- si.si_code = code;
- si.si_addr = (void __user *)addr;
- force_sig_info(sig, &si, tsk);
+ force_sig_fault(sig, code, (void __user *)addr, tsk);
}
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
@@ -466,7 +459,6 @@ asmlinkage void do_DataAbort(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
- struct siginfo info;
if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
@@ -474,19 +466,14 @@ asmlinkage void do_DataAbort(unsigned long addr, unsigned int fsr,
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
- clear_siginfo(&info);
- info.si_signo = inf->sig;
- info.si_errno = 0;
- info.si_code = inf->code;
- info.si_addr = (void __user *)addr;
- uc32_notify_die("", regs, &info, fsr, 0);
+ uc32_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
+ fsr, 0);
}
asmlinkage void do_PrefetchAbort(unsigned long addr,
unsigned int ifsr, struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + fsr_fs(ifsr);
- struct siginfo info;
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
@@ -494,10 +481,6 @@ asmlinkage void do_PrefetchAbort(unsigned long addr,
printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr);
- clear_siginfo(&info);
- info.si_signo = inf->sig;
- info.si_errno = 0;
- info.si_code = inf->code;
- info.si_addr = (void __user *)addr;
- uc32_notify_die("", regs, &info, ifsr, 0);
+ uc32_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
+ ifsr, 0);
}
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index f4950fbfe574..5f72a8d1d953 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -234,9 +234,6 @@ void __init bootmem_init(void)
uc32_bootmem_init(min, max_low);
-#ifdef CONFIG_SWIOTLB
- swiotlb_init(1);
-#endif
/*
* Sparsemem tries to allocate bootmem in memory_present(),
* so must be done after the fixed reservations
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1a0be022f91d..cbd5f28ea8e2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -48,6 +48,7 @@ config X86
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ANON_INODES
select ARCH_CLOCKSOURCE_DATA
+ select ARCH_CLOCKSOURCE_INIT
select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_DEBUG_VIRTUAL
@@ -119,6 +120,7 @@ config X86
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU
@@ -447,7 +449,6 @@ config RETPOLINE
config INTEL_RDT
bool "Intel Resource Director Technology support"
- default n
depends on X86 && CPU_SUP_INTEL
select KERNFS
help
@@ -523,6 +524,7 @@ config X86_VSMP
bool "ScaleMP vSMP"
select HYPERVISOR_GUEST
select PARAVIRT
+ select PARAVIRT_XXL
depends on X86_64 && PCI
depends on X86_EXTENDED_PLATFORM
depends on SMP
@@ -701,7 +703,6 @@ config STA2X11
select SWIOTLB
select MFD_STA2X11
select GPIOLIB
- default n
---help---
This adds support for boards based on the STA2X11 IO-Hub,
a.k.a. "ConneXt". The chip is used in place of the standard
@@ -754,6 +755,9 @@ config PARAVIRT
over full virtualization. However, when run without a hypervisor
the kernel is theoretically slower and slightly larger.
+config PARAVIRT_XXL
+ bool
+
config PARAVIRT_DEBUG
bool "paravirt-ops debugging"
depends on PARAVIRT && DEBUG_KERNEL
@@ -799,7 +803,6 @@ config KVM_GUEST
config KVM_DEBUG_FS
bool "Enable debug information for KVM Guests in debugfs"
depends on KVM_GUEST && DEBUG_FS
- default n
---help---
This option enables collection of various statistics for KVM guest.
Statistics are displayed in debugfs filesystem. Enabling this option
@@ -808,7 +811,6 @@ config KVM_DEBUG_FS
config PARAVIRT_TIME_ACCOUNTING
bool "Paravirtual steal time accounting"
depends on PARAVIRT
- default n
---help---
Select this option to enable fine granularity task steal time
accounting. Time spent executing other tasks in parallel with
@@ -1168,7 +1170,6 @@ source "arch/x86/events/Kconfig"
config X86_LEGACY_VM86
bool "Legacy VM86 support"
- default n
depends on X86_32
---help---
This option allows user programs to put the CPU into V8086
@@ -1491,6 +1492,14 @@ config X86_DIRECT_GBPAGES
supports them), so don't confuse the user by printing
that we have them enabled.
+config X86_CPA_STATISTICS
+ bool "Enable statistic for Change Page Attribute"
+ depends on DEBUG_FS
+ ---help---
+ Expose statistics about the Change Page Attribute mechanims, which
+ helps to determine the effectivness of preserving large and huge
+ page mappings when mapping protections are changed.
+
config ARCH_HAS_MEM_ENCRYPT
def_bool y
@@ -2220,7 +2229,6 @@ config HOTPLUG_CPU
config BOOTPARAM_HOTPLUG_CPU0
bool "Set default setting of cpu0_hotpluggable"
- default n
depends on HOTPLUG_CPU
---help---
Set whether default state of cpu0_hotpluggable is on or off.
@@ -2422,7 +2430,7 @@ menu "Power management and ACPI options"
config ARCH_HIBERNATION_HEADER
def_bool y
- depends on X86_64 && HIBERNATION
+ depends on HIBERNATION
source "kernel/power/Kconfig"
@@ -2742,8 +2750,7 @@ config OLPC
config OLPC_XO1_PM
bool "OLPC XO-1 Power Management"
- depends on OLPC && MFD_CS5535 && PM_SLEEP
- select MFD_CORE
+ depends on OLPC && MFD_CS5535=y && PM_SLEEP
---help---
Add support for poweroff and suspend of the OLPC XO-1 laptop.
@@ -2825,7 +2832,6 @@ source "drivers/pcmcia/Kconfig"
config RAPIDIO
tristate "RapidIO support"
depends on PCI
- default n
help
If enabled this option will include drivers and the core
infrastructure code to support RapidIO interconnect devices.
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 638411f22267..6adce15268bd 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -426,6 +426,20 @@ config CPU_SUP_AMD
If unsure, say N.
+config CPU_SUP_HYGON
+ default y
+ bool "Support Hygon processors" if PROCESSOR_SELECT
+ select CPU_SUP_AMD
+ help
+ This enables detection, tunings and quirks for Hygon processors
+
+ You need this enabled if you want your kernel to run on an
+ Hygon CPU. Disabling this option on other types of CPUs
+ makes the kernel a tiny bit smaller. Disabling it on an Hygon
+ CPU might render the kernel unbootable.
+
+ If unsure, say N.
+
config CPU_SUP_CENTAUR
default y
bool "Support Centaur processors" if PROCESSOR_SELECT
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 7d68f0c7cfb1..0723dff17e6c 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -314,7 +314,6 @@ config DEBUG_NMI_SELFTEST
config DEBUG_IMR_SELFTEST
bool "Isolated Memory Region self test"
- default n
depends on INTEL_IMR
---help---
This option enables automated sanity testing of the IMR code.
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 8f6e7eb8ae9f..5b562e464009 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -193,7 +193,6 @@ cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTI
# does binutils support specific instructions?
asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
-asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1)
@@ -237,6 +236,13 @@ archscripts: scripts_basic
archheaders:
$(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
+archmacros:
+ $(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s
+
+ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s -Wa,-
+export ASM_MACRO_FLAGS
+KBUILD_CFLAGS += $(ASM_MACRO_FLAGS)
+
###
# Kernel objects
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 28764dacf018..466f66c8a7f8 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -37,6 +37,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += -Wno-pointer-sign
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 1458b1700fc7..8b4c5e001157 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -738,6 +738,7 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
struct desc_struct *desc;
void *handle;
efi_system_table_t *_table;
+ unsigned long cmdline_paddr;
efi_early = c;
@@ -756,6 +757,15 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
setup_boot_services32(efi_early);
/*
+ * make_boot_params() may have been called before efi_main(), in which
+ * case this is the second time we parse the cmdline. This is ok,
+ * parsing the cmdline multiple times does not have side-effects.
+ */
+ cmdline_paddr = ((u64)hdr->cmd_line_ptr |
+ ((u64)boot_params->ext_cmd_line_ptr << 32));
+ efi_parse_options((char *)cmdline_paddr);
+
+ /*
* If the boot loader gave us a value for secure_boot then we use that,
* otherwise we ask the BIOS.
*/
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index d1e19f358b6e..9ed9709d9947 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -241,7 +241,7 @@ static void parse_gb_huge_pages(char *param, char *val)
}
-static int handle_mem_options(void)
+static void handle_mem_options(void)
{
char *args = (char *)get_cmd_line_ptr();
size_t len = strlen((char *)args);
@@ -251,7 +251,7 @@ static int handle_mem_options(void)
if (!strstr(args, "memmap=") && !strstr(args, "mem=") &&
!strstr(args, "hugepages"))
- return 0;
+ return;
tmp_cmdline = malloc(len + 1);
if (!tmp_cmdline)
@@ -269,8 +269,7 @@ static int handle_mem_options(void)
/* Stop at -- */
if (!val && strcmp(param, "--") == 0) {
warn("Only '--' specified in cmdline");
- free(tmp_cmdline);
- return -1;
+ goto out;
}
if (!strcmp(param, "memmap")) {
@@ -283,16 +282,16 @@ static int handle_mem_options(void)
if (!strcmp(p, "nopentium"))
continue;
mem_size = memparse(p, &p);
- if (mem_size == 0) {
- free(tmp_cmdline);
- return -EINVAL;
- }
+ if (mem_size == 0)
+ goto out;
+
mem_limit = mem_size;
}
}
+out:
free(tmp_cmdline);
- return 0;
+ return;
}
/*
@@ -578,7 +577,6 @@ static void process_mem_region(struct mem_vector *entry,
unsigned long image_size)
{
struct mem_vector region, overlap;
- struct slot_area slot_area;
unsigned long start_orig, end;
struct mem_vector cur_entry;
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index eaa843a52907..a480356e0ed8 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit)
push %ebx
push %ecx
push %edx
- push %edi
-
- /*
- * RIP-relative addressing is needed to access the encryption bit
- * variable. Since we are running in 32-bit mode we need this call/pop
- * sequence to get the proper relative addressing.
- */
- call 1f
-1: popl %edi
- subl $1b, %edi
-
- movl enc_bit(%edi), %eax
- cmpl $0, %eax
- jge .Lsev_exit
/* Check if running under a hypervisor */
movl $1, %eax
@@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit)
movl %ebx, %eax
andl $0x3f, %eax /* Return the encryption bit location */
- movl %eax, enc_bit(%edi)
jmp .Lsev_exit
.Lno_sev:
xor %eax, %eax
- movl %eax, enc_bit(%edi)
.Lsev_exit:
- pop %edi
pop %edx
pop %ecx
pop %ebx
@@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask)
ENDPROC(set_sev_encryption_mask)
.data
-enc_bit:
- .int 0xffffffff
#ifdef CONFIG_AMD_MEM_ENCRYPT
.balign 8
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index a423bdb42686..a1d5918765f3 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -9,6 +9,7 @@
* paravirt and debugging variants are added.)
*/
#undef CONFIG_PARAVIRT
+#undef CONFIG_PARAVIRT_XXL
#undef CONFIG_PARAVIRT_SPINLOCKS
#undef CONFIG_KASAN
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 850b8762e889..4c881c850125 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -300,7 +300,7 @@ _start:
# Part 2 of the header, from the old setup.S
.ascii "HdrS" # header signature
- .word 0x020d # header version number (>= 0x0105)
+ .word 0x020e # header version number (>= 0x0105)
# or else old loadlin-1.5 will fail)
.globl realmode_swtch
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
@@ -558,6 +558,10 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
init_size: .long INIT_SIZE # kernel initialization size
handover_offset: .long 0 # Filled in by build.c
+acpi_rsdp_addr: .quad 0 # 64-bit physical pointer to the
+ # ACPI RSDP table, added with
+ # version 2.14
+
# End of setup header #####################################################
.section ".entrytext", "ax"
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index d4e6cd4577e5..bf0e82400358 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -391,6 +391,13 @@ int main(int argc, char ** argv)
die("Unable to mmap '%s': %m", argv[2]);
/* Number of 16-byte paragraphs, including space for a 4-byte CRC */
sys_size = (sz + 15 + 4) / 16;
+#ifdef CONFIG_EFI_STUB
+ /*
+ * COFF requires minimum 32-byte alignment of sections, and
+ * adding a signature is problematic without that alignment.
+ */
+ sys_size = (sys_size + 1) & ~1;
+#endif
/* Patch the setup code with the appropriate size parameters */
buf[0x1f1] = setup_sectors-1;
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 0eb9f92f3717..6c3ab05c231d 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -247,6 +247,7 @@ CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index e32fc1f274d8..ac9ae487cfeb 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -243,6 +243,7 @@ CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index a450ad573dcb..a4b0007a54e1 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -60,9 +60,6 @@ endif
ifeq ($(avx2_supported),yes)
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
- obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb/
- obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb/
- obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb/
obj-$(CONFIG_CRYPTO_MORUS1280_AVX2) += morus1280-avx2.o
endif
@@ -106,7 +103,7 @@ ifeq ($(avx2_supported),yes)
morus1280-avx2-y := morus1280-avx2-asm.o morus1280-avx2-glue.o
endif
-aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
+aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index acd11b3bf639..2a356b948720 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -379,7 +379,6 @@ static int __init crypto_aegis128_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index 2071c3d1ae07..dbe8bb980da1 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -379,7 +379,6 @@ static int __init crypto_aegis128l_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index b5f2a8fd5a71..8bebda2de92f 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -379,7 +379,6 @@ static int __init crypto_aegis256_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index acbe7e8336d8..661f7daf43da 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -102,9 +102,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
-int crypto_fpu_init(void);
-void crypto_fpu_exit(void);
-
#define AVX_GEN2_OPTSIZE 640
#define AVX_GEN4_OPTSIZE 4096
@@ -817,7 +814,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
/* Linearize assoc, if not already linear */
if (req->src->length >= assoclen && req->src->length &&
(!PageHighMem(sg_page(req->src)) ||
- req->src->offset + req->src->length < PAGE_SIZE)) {
+ req->src->offset + req->src->length <= PAGE_SIZE)) {
scatterwalk_start(&assoc_sg_walk, req->src);
assoc = scatterwalk_map(&assoc_sg_walk);
} else {
@@ -1253,22 +1250,6 @@ static struct skcipher_alg aesni_skciphers[] = {
static
struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
-static struct {
- const char *algname;
- const char *drvname;
- const char *basename;
- struct simd_skcipher_alg *simd;
-} aesni_simd_skciphers2[] = {
-#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
- IS_BUILTIN(CONFIG_CRYPTO_PCBC)
- {
- .algname = "pcbc(aes)",
- .drvname = "pcbc-aes-aesni",
- .basename = "fpu(pcbc(__aes-aesni))",
- },
-#endif
-};
-
#ifdef CONFIG_X86_64
static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
unsigned int key_len)
@@ -1422,10 +1403,6 @@ static void aesni_free_simds(void)
for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
aesni_simd_skciphers[i]; i++)
simd_skcipher_free(aesni_simd_skciphers[i]);
-
- for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
- if (aesni_simd_skciphers2[i].simd)
- simd_skcipher_free(aesni_simd_skciphers2[i].simd);
}
static int __init aesni_init(void)
@@ -1469,13 +1446,9 @@ static int __init aesni_init(void)
#endif
#endif
- err = crypto_fpu_init();
- if (err)
- return err;
-
err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
if (err)
- goto fpu_exit;
+ return err;
err = crypto_register_skciphers(aesni_skciphers,
ARRAY_SIZE(aesni_skciphers));
@@ -1499,18 +1472,6 @@ static int __init aesni_init(void)
aesni_simd_skciphers[i] = simd;
}
- for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
- algname = aesni_simd_skciphers2[i].algname;
- drvname = aesni_simd_skciphers2[i].drvname;
- basename = aesni_simd_skciphers2[i].basename;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
- err = PTR_ERR(simd);
- if (IS_ERR(simd))
- continue;
-
- aesni_simd_skciphers2[i].simd = simd;
- }
-
return 0;
unregister_simds:
@@ -1521,8 +1482,6 @@ unregister_skciphers:
ARRAY_SIZE(aesni_skciphers));
unregister_algs:
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
-fpu_exit:
- crypto_fpu_exit();
return err;
}
@@ -1533,8 +1492,6 @@ static void __exit aesni_exit(void)
crypto_unregister_skciphers(aesni_skciphers,
ARRAY_SIZE(aesni_skciphers));
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
-
- crypto_fpu_exit();
}
late_initcall(aesni_init);
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
deleted file mode 100644
index 406680476c52..000000000000
--- a/arch/x86/crypto/fpu.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * FPU: Wrapper for blkcipher touching fpu
- *
- * Copyright (c) Intel Corp.
- * Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <crypto/internal/skcipher.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <asm/fpu/api.h>
-
-struct crypto_fpu_ctx {
- struct crypto_skcipher *child;
-};
-
-static int crypto_fpu_setkey(struct crypto_skcipher *parent, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(parent);
- struct crypto_skcipher *child = ctx->child;
- int err;
-
- crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(child, key, keylen);
- crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
-
-static int crypto_fpu_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher *child = ctx->child;
- SKCIPHER_REQUEST_ON_STACK(subreq, child);
- int err;
-
- skcipher_request_set_tfm(subreq, child);
- skcipher_request_set_callback(subreq, 0, NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
- req->iv);
-
- kernel_fpu_begin();
- err = crypto_skcipher_encrypt(subreq);
- kernel_fpu_end();
-
- skcipher_request_zero(subreq);
- return err;
-}
-
-static int crypto_fpu_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher *child = ctx->child;
- SKCIPHER_REQUEST_ON_STACK(subreq, child);
- int err;
-
- skcipher_request_set_tfm(subreq, child);
- skcipher_request_set_callback(subreq, 0, NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
- req->iv);
-
- kernel_fpu_begin();
- err = crypto_skcipher_decrypt(subreq);
- kernel_fpu_end();
-
- skcipher_request_zero(subreq);
- return err;
-}
-
-static int crypto_fpu_init_tfm(struct crypto_skcipher *tfm)
-{
- struct skcipher_instance *inst = skcipher_alg_instance(tfm);
- struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher_spawn *spawn;
- struct crypto_skcipher *cipher;
-
- spawn = skcipher_instance_ctx(inst);
- cipher = crypto_spawn_skcipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- ctx->child = cipher;
-
- return 0;
-}
-
-static void crypto_fpu_exit_tfm(struct crypto_skcipher *tfm)
-{
- struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_skcipher(ctx->child);
-}
-
-static void crypto_fpu_free(struct skcipher_instance *inst)
-{
- crypto_drop_skcipher(skcipher_instance_ctx(inst));
- kfree(inst);
-}
-
-static int crypto_fpu_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct crypto_skcipher_spawn *spawn;
- struct skcipher_instance *inst;
- struct crypto_attr_type *algt;
- struct skcipher_alg *alg;
- const char *cipher_name;
- int err;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ (CRYPTO_ALG_INTERNAL | CRYPTO_ALG_TYPE_SKCIPHER)) &
- algt->mask)
- return -EINVAL;
-
- if (!(algt->mask & CRYPTO_ALG_INTERNAL))
- return -EINVAL;
-
- cipher_name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(cipher_name))
- return PTR_ERR(cipher_name);
-
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
-
- spawn = skcipher_instance_ctx(inst);
-
- crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
- err = crypto_grab_skcipher(spawn, cipher_name, CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
- if (err)
- goto out_free_inst;
-
- alg = crypto_skcipher_spawn_alg(spawn);
-
- err = crypto_inst_setname(skcipher_crypto_instance(inst), "fpu",
- &alg->base);
- if (err)
- goto out_drop_skcipher;
-
- inst->alg.base.cra_flags = CRYPTO_ALG_INTERNAL;
- inst->alg.base.cra_priority = alg->base.cra_priority;
- inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
- inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
-
- inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
-
- inst->alg.base.cra_ctxsize = sizeof(struct crypto_fpu_ctx);
-
- inst->alg.init = crypto_fpu_init_tfm;
- inst->alg.exit = crypto_fpu_exit_tfm;
-
- inst->alg.setkey = crypto_fpu_setkey;
- inst->alg.encrypt = crypto_fpu_encrypt;
- inst->alg.decrypt = crypto_fpu_decrypt;
-
- inst->free = crypto_fpu_free;
-
- err = skcipher_register_instance(tmpl, inst);
- if (err)
- goto out_drop_skcipher;
-
-out:
- return err;
-
-out_drop_skcipher:
- crypto_drop_skcipher(spawn);
-out_free_inst:
- kfree(inst);
- goto out;
-}
-
-static struct crypto_template crypto_fpu_tmpl = {
- .name = "fpu",
- .create = crypto_fpu_create,
- .module = THIS_MODULE,
-};
-
-int __init crypto_fpu_init(void)
-{
- return crypto_register_template(&crypto_fpu_tmpl);
-}
-
-void crypto_fpu_exit(void)
-{
- crypto_unregister_template(&crypto_fpu_tmpl);
-}
-
-MODULE_ALIAS_CRYPTO("fpu");
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
index 95cf857d2cbb..f40244eaf14d 100644
--- a/arch/x86/crypto/morus1280-sse2-glue.c
+++ b/arch/x86/crypto/morus1280-sse2-glue.c
@@ -40,7 +40,6 @@ MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
static int __init crypto_morus1280_sse2_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
index 615fb7bc9a32..9afaf8f8565a 100644
--- a/arch/x86/crypto/morus640-sse2-glue.c
+++ b/arch/x86/crypto/morus640-sse2-glue.c
@@ -40,7 +40,6 @@ MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
static int __init crypto_morus640_sse2_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
deleted file mode 100644
index 815ded3ba90e..000000000000
--- a/arch/x86/crypto/sha1-mb/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Arch-specific CryptoAPI modules.
-#
-
-OBJECT_FILES_NON_STANDARD := y
-
-avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
- $(comma)4)$(comma)%ymm2,yes,no)
-ifeq ($(avx2_supported),yes)
- obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb.o
- sha1-mb-y := sha1_mb.o sha1_mb_mgr_flush_avx2.o \
- sha1_mb_mgr_init_avx2.o sha1_mb_mgr_submit_avx2.o sha1_x8_avx2.o
-endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
deleted file mode 100644
index b93805664c1d..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ /dev/null
@@ -1,1011 +0,0 @@
-/*
- * Multi buffer SHA1 algorithm Glue Code
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <asm/byteorder.h>
-#include <linux/hardirq.h>
-#include <asm/fpu/api.h>
-#include "sha1_mb_ctx.h"
-
-#define FLUSH_INTERVAL 1000 /* in usec */
-
-static struct mcryptd_alg_state sha1_mb_alg_state;
-
-struct sha1_mb_ctx {
- struct mcryptd_ahash *mcryptd_tfm;
-};
-
-static inline struct mcryptd_hash_request_ctx
- *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
-{
- struct ahash_request *areq;
-
- areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
- return container_of(areq, struct mcryptd_hash_request_ctx, areq);
-}
-
-static inline struct ahash_request
- *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
-{
- return container_of((void *) ctx, struct ahash_request, __ctx);
-}
-
-static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct ahash_request *areq)
-{
- rctx->flag = HASH_UPDATE;
-}
-
-static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
- (struct sha1_mb_mgr *state, struct job_sha1 *job);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
- (struct sha1_mb_mgr *state);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
- (struct sha1_mb_mgr *state);
-
-static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
- uint64_t total_len)
-{
- uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
-
- memset(&padblock[i], 0, SHA1_BLOCK_SIZE);
- padblock[i] = 0x80;
-
- i += ((SHA1_BLOCK_SIZE - 1) &
- (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1)))
- + 1 + SHA1_PADLENGTHFIELD_SIZE;
-
-#if SHA1_PADLENGTHFIELD_SIZE == 16
- *((uint64_t *) &padblock[i - 16]) = 0;
-#endif
-
- *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
-
- /* Number of extra blocks to hash */
- return i >> SHA1_LOG2_BLOCK_SIZE;
-}
-
-static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
- struct sha1_hash_ctx *ctx)
-{
- while (ctx) {
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Clear PROCESSING bit */
- ctx->status = HASH_CTX_STS_COMPLETE;
- return ctx;
- }
-
- /*
- * If the extra blocks are empty, begin hashing what remains
- * in the user's buffer.
- */
- if (ctx->partial_block_buffer_length == 0 &&
- ctx->incoming_buffer_length) {
-
- const void *buffer = ctx->incoming_buffer;
- uint32_t len = ctx->incoming_buffer_length;
- uint32_t copy_len;
-
- /*
- * Only entire blocks can be hashed.
- * Copy remainder to extra blocks buffer.
- */
- copy_len = len & (SHA1_BLOCK_SIZE-1);
-
- if (copy_len) {
- len -= copy_len;
- memcpy(ctx->partial_block_buffer,
- ((const char *) buffer + len),
- copy_len);
- ctx->partial_block_buffer_length = copy_len;
- }
-
- ctx->incoming_buffer_length = 0;
-
- /* len should be a multiple of the block size now */
- assert((len % SHA1_BLOCK_SIZE) == 0);
-
- /* Set len to the number of blocks to be hashed */
- len >>= SHA1_LOG2_BLOCK_SIZE;
-
- if (len) {
-
- ctx->job.buffer = (uint8_t *) buffer;
- ctx->job.len = len;
- ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr,
- &ctx->job);
- continue;
- }
- }
-
- /*
- * If the extra blocks are not empty, then we are
- * either on the last block(s) or we need more
- * user input before continuing.
- */
- if (ctx->status & HASH_CTX_STS_LAST) {
-
- uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks =
- sha1_pad(buf, ctx->total_length);
-
- ctx->status = (HASH_CTX_STS_PROCESSING |
- HASH_CTX_STS_COMPLETE);
- ctx->job.buffer = buf;
- ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha1_hash_ctx *)
- sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
-
- ctx->status = HASH_CTX_STS_IDLE;
- return ctx;
- }
-
- return NULL;
-}
-
-static struct sha1_hash_ctx
- *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
-{
- /*
- * If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to
- * the user.
- * If it is not ready, resubmit the job to finish processing.
- * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
- * Otherwise, all jobs currently being managed by the hash_ctx_mgr
- * still need processing.
- */
- struct sha1_hash_ctx *ctx;
-
- ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr);
- return sha1_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr)
-{
- sha1_job_mgr_init(&mgr->mgr);
-}
-
-static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
- struct sha1_hash_ctx *ctx,
- const void *buffer,
- uint32_t len,
- int flags)
-{
- if (flags & ~(HASH_UPDATE | HASH_LAST)) {
- /* User should not pass anything other than UPDATE or LAST */
- ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_PROCESSING) {
- /* Cannot submit to a currently processing job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Cannot update a finished job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
- return ctx;
- }
-
- /*
- * If we made it here, there were no errors during this call to
- * submit
- */
- ctx->error = HASH_CTX_ERROR_NONE;
-
- /* Store buffer ptr info from user */
- ctx->incoming_buffer = buffer;
- ctx->incoming_buffer_length = len;
-
- /*
- * Store the user's request flags and mark this ctx as currently
- * being processed.
- */
- ctx->status = (flags & HASH_LAST) ?
- (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
- HASH_CTX_STS_PROCESSING;
-
- /* Advance byte counter */
- ctx->total_length += len;
-
- /*
- * If there is anything currently buffered in the extra blocks,
- * append to it until it contains a whole block.
- * Or if the user's buffer contains less than a whole block,
- * append as much as possible to the extra block.
- */
- if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) {
- /*
- * Compute how many bytes to copy from user buffer into
- * extra block
- */
- uint32_t copy_len = SHA1_BLOCK_SIZE -
- ctx->partial_block_buffer_length;
- if (len < copy_len)
- copy_len = len;
-
- if (copy_len) {
- /* Copy and update relevant pointers and counters */
- memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
- buffer, copy_len);
-
- ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)
- ((const char *)buffer + copy_len);
- ctx->incoming_buffer_length = len - copy_len;
- }
-
- /*
- * The extra block should never contain more than 1 block
- * here
- */
- assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
-
- /*
- * If the extra block buffer contains exactly 1 block, it can
- * be hashed.
- */
- if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
- ctx->partial_block_buffer_length = 0;
-
- ctx->job.buffer = ctx->partial_block_buffer;
- ctx->job.len = 1;
- ctx = (struct sha1_hash_ctx *)
- sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
- }
- }
-
- return sha1_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
-{
- struct sha1_hash_ctx *ctx;
-
- while (1) {
- ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr);
-
- /* If flush returned 0, there are no more jobs in flight. */
- if (!ctx)
- return NULL;
-
- /*
- * If flush returned a job, resubmit the job to finish
- * processing.
- */
- ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
-
- /*
- * If sha1_ctx_mgr_resubmit returned a job, it is ready to be
- * returned. Otherwise, all jobs currently being managed by the
- * sha1_ctx_mgr still need processing. Loop.
- */
- if (ctx)
- return ctx;
- }
-}
-
-static int sha1_mb_init(struct ahash_request *areq)
-{
- struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
-
- hash_ctx_init(sctx);
- sctx->job.result_digest[0] = SHA1_H0;
- sctx->job.result_digest[1] = SHA1_H1;
- sctx->job.result_digest[2] = SHA1_H2;
- sctx->job.result_digest[3] = SHA1_H3;
- sctx->job.result_digest[4] = SHA1_H4;
- sctx->total_length = 0;
- sctx->partial_block_buffer_length = 0;
- sctx->status = HASH_CTX_STS_IDLE;
-
- return 0;
-}
-
-static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
-{
- int i;
- struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
- __be32 *dst = (__be32 *) rctx->out;
-
- for (i = 0; i < 5; ++i)
- dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
-
- return 0;
-}
-
-static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
- struct mcryptd_alg_cstate *cstate, bool flush)
-{
- int flag = HASH_UPDATE;
- int nbytes, err = 0;
- struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
- struct sha1_hash_ctx *sha_ctx;
-
- /* more work ? */
- while (!(rctx->flag & HASH_DONE)) {
- nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
- if (nbytes < 0) {
- err = nbytes;
- goto out;
- }
- /* check if the walk is done */
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- if (rctx->flag & HASH_FINAL)
- flag |= HASH_LAST;
-
- }
- sha_ctx = (struct sha1_hash_ctx *)
- ahash_request_ctx(&rctx->areq);
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
- rctx->walk.data, nbytes, flag);
- if (!sha_ctx) {
- if (flush)
- sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
- }
- kernel_fpu_end();
- if (sha_ctx)
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- else {
- rctx = NULL;
- goto out;
- }
- }
-
- /* copy the results */
- if (rctx->flag & HASH_FINAL)
- sha1_mb_set_results(rctx);
-
-out:
- *ret_rctx = rctx;
- return err;
-}
-
-static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate,
- int err)
-{
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha1_hash_ctx *sha_ctx;
- struct mcryptd_hash_request_ctx *req_ctx;
- int ret;
-
- /* remove from work list */
- spin_lock(&cstate->work_lock);
- list_del(&rctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- if (irqs_disabled())
- rctx->complete(&req->base, err);
- else {
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
-
- /* check to see if there are other jobs that are done */
- sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
- while (sha_ctx) {
- req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&req_ctx, cstate, false);
- if (req_ctx) {
- spin_lock(&cstate->work_lock);
- list_del(&req_ctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- req = cast_mcryptd_ctx_to_req(req_ctx);
- if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
- else {
- local_bh_disable();
- req_ctx->complete(&req->base, ret);
- local_bh_enable();
- }
- }
- sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
- }
-
- return 0;
-}
-
-static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate)
-{
- unsigned long next_flush;
- unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
-
- /* initialize tag */
- rctx->tag.arrival = jiffies; /* tag the arrival time */
- rctx->tag.seq_num = cstate->next_seq_num++;
- next_flush = rctx->tag.arrival + delay;
- rctx->tag.expire = next_flush;
-
- spin_lock(&cstate->work_lock);
- list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock(&cstate->work_lock);
-
- mcryptd_arm_flusher(cstate, delay);
-}
-
-static int sha1_mb_update(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha1_hash_ctx *sha_ctx;
- int ret = 0, nbytes;
-
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk))
- rctx->flag |= HASH_DONE;
-
- /* submit */
- sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
- sha1_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, HASH_UPDATE);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
-
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha1_mb_finup(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha1_hash_ctx *sha_ctx;
- int ret = 0, flag = HASH_UPDATE, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- flag = HASH_LAST;
- }
-
- /* submit */
- rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
- sha1_mb_add_list(rctx, cstate);
-
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, flag);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha1_mb_final(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
-
- struct sha1_hash_ctx *sha_ctx;
- int ret = 0;
- u8 data;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- rctx->flag |= HASH_DONE | HASH_FINAL;
-
- sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
- /* flag HASH_FINAL and 0 data size */
- sha1_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
- HASH_LAST);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha1_mb_export(struct ahash_request *areq, void *out)
-{
- struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha1_mb_import(struct ahash_request *areq, const void *in)
-{
- struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
-
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha1_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
-{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- sizeof(struct sha1_hash_ctx));
-
- return 0;
-}
-
-static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static struct ahash_alg sha1_mb_areq_alg = {
- .init = sha1_mb_init,
- .update = sha1_mb_update,
- .final = sha1_mb_final,
- .finup = sha1_mb_finup,
- .export = sha1_mb_export,
- .import = sha1_mb_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_hash_ctx),
- .base = {
- .cra_name = "__sha1-mb",
- .cra_driver_name = "__intel_sha1-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread
- * sleep
- */
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha1_mb_areq_alg.halg.base.cra_list),
- .cra_init = sha1_mb_areq_init_tfm,
- .cra_exit = sha1_mb_areq_exit_tfm,
- .cra_ctxsize = sizeof(struct sha1_hash_ctx),
- }
- }
-};
-
-static int sha1_mb_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_init(mcryptd_req);
-}
-
-static int sha1_mb_async_update(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_update(mcryptd_req);
-}
-
-static int sha1_mb_async_finup(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_finup(mcryptd_req);
-}
-
-static int sha1_mb_async_final(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_final(mcryptd_req);
-}
-
-static int sha1_mb_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_digest(mcryptd_req);
-}
-
-static int sha1_mb_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_export(mcryptd_req, out);
-}
-
-static int sha1_mb_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
- struct mcryptd_hash_request_ctx *rctx;
- struct ahash_request *areq;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- rctx = ahash_request_ctx(mcryptd_req);
- areq = &rctx->areq;
-
- ahash_request_set_tfm(areq, child);
- ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req);
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static struct ahash_alg sha1_mb_async_alg = {
- .init = sha1_mb_async_init,
- .update = sha1_mb_async_update,
- .final = sha1_mb_async_final,
- .finup = sha1_mb_async_finup,
- .digest = sha1_mb_async_digest,
- .export = sha1_mb_async_export,
- .import = sha1_mb_async_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_hash_ctx),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "sha1_mb",
- /*
- * Low priority, since with few concurrent hash requests
- * this is extremely slow due to the flush delay. Users
- * whose workloads would benefit from this can request
- * it explicitly by driver name, or can increase its
- * priority at runtime using NETLINK_CRYPTO.
- */
- .cra_priority = 50,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
- .cra_init = sha1_mb_async_init_tfm,
- .cra_exit = sha1_mb_async_exit_tfm,
- .cra_ctxsize = sizeof(struct sha1_mb_ctx),
- .cra_alignmask = 0,
- },
- },
-};
-
-static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
-{
- struct mcryptd_hash_request_ctx *rctx;
- unsigned long cur_time;
- unsigned long next_flush = 0;
- struct sha1_hash_ctx *sha_ctx;
-
-
- cur_time = jiffies;
-
- while (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- if (time_before(cur_time, rctx->tag.expire))
- break;
- kernel_fpu_begin();
- sha_ctx = (struct sha1_hash_ctx *)
- sha1_ctx_mgr_flush(cstate->mgr);
- kernel_fpu_end();
- if (!sha_ctx) {
- pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
- break;
- }
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- sha_finish_walk(&rctx, cstate, true);
- sha_complete_job(rctx, cstate, 0);
- }
-
- if (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- /* get the hash context and then flush time */
- next_flush = rctx->tag.expire;
- mcryptd_arm_flusher(cstate, get_delay(next_flush));
- }
- return next_flush;
-}
-
-static int __init sha1_mb_mod_init(void)
-{
-
- int cpu;
- int err;
- struct mcryptd_alg_cstate *cpu_state;
-
- /* check for dependent cpu features */
- if (!boot_cpu_has(X86_FEATURE_AVX2) ||
- !boot_cpu_has(X86_FEATURE_BMI2))
- return -ENODEV;
-
- /* initialize multibuffer structures */
- sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate);
-
- sha1_job_mgr_init = sha1_mb_mgr_init_avx2;
- sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2;
- sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2;
- sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2;
-
- if (!sha1_mb_alg_state.alg_cstate)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
- cpu_state->next_flush = 0;
- cpu_state->next_seq_num = 0;
- cpu_state->flusher_engaged = false;
- INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
- cpu_state->cpu = cpu;
- cpu_state->alg_state = &sha1_mb_alg_state;
- cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr),
- GFP_KERNEL);
- if (!cpu_state->mgr)
- goto err2;
- sha1_ctx_mgr_init(cpu_state->mgr);
- INIT_LIST_HEAD(&cpu_state->work_list);
- spin_lock_init(&cpu_state->work_lock);
- }
- sha1_mb_alg_state.flusher = &sha1_mb_flusher;
-
- err = crypto_register_ahash(&sha1_mb_areq_alg);
- if (err)
- goto err2;
- err = crypto_register_ahash(&sha1_mb_async_alg);
- if (err)
- goto err1;
-
-
- return 0;
-err1:
- crypto_unregister_ahash(&sha1_mb_areq_alg);
-err2:
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha1_mb_alg_state.alg_cstate);
- return -ENODEV;
-}
-
-static void __exit sha1_mb_mod_fini(void)
-{
- int cpu;
- struct mcryptd_alg_cstate *cpu_state;
-
- crypto_unregister_ahash(&sha1_mb_async_alg);
- crypto_unregister_ahash(&sha1_mb_areq_alg);
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha1_mb_alg_state.alg_cstate);
-}
-
-module_init(sha1_mb_mod_init);
-module_exit(sha1_mb_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
-
-MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
deleted file mode 100644
index 9454bd16f9f8..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Header file for multi buffer SHA context
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SHA_MB_CTX_INTERNAL_H
-#define _SHA_MB_CTX_INTERNAL_H
-
-#include "sha1_mb_mgr.h"
-
-#define HASH_UPDATE 0x00
-#define HASH_LAST 0x01
-#define HASH_DONE 0x02
-#define HASH_FINAL 0x04
-
-#define HASH_CTX_STS_IDLE 0x00
-#define HASH_CTX_STS_PROCESSING 0x01
-#define HASH_CTX_STS_LAST 0x02
-#define HASH_CTX_STS_COMPLETE 0x04
-
-enum hash_ctx_error {
- HASH_CTX_ERROR_NONE = 0,
- HASH_CTX_ERROR_INVALID_FLAGS = -1,
- HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
- HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
-
-#ifdef HASH_CTX_DEBUG
- HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
-#endif
-};
-
-
-#define hash_ctx_user_data(ctx) ((ctx)->user_data)
-#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
-#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
-#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
-#define hash_ctx_status(ctx) ((ctx)->status)
-#define hash_ctx_error(ctx) ((ctx)->error)
-#define hash_ctx_init(ctx) \
- do { \
- (ctx)->error = HASH_CTX_ERROR_NONE; \
- (ctx)->status = HASH_CTX_STS_COMPLETE; \
- } while (0)
-
-
-/* Hash Constants and Typedefs */
-#define SHA1_DIGEST_LENGTH 5
-#define SHA1_LOG2_BLOCK_SIZE 6
-
-#define SHA1_PADLENGTHFIELD_SIZE 8
-
-#ifdef SHA_MB_DEBUG
-#define assert(expr) \
-do { \
- if (unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-#else
-#define assert(expr) do {} while (0)
-#endif
-
-struct sha1_ctx_mgr {
- struct sha1_mb_mgr mgr;
-};
-
-/* typedef struct sha1_ctx_mgr sha1_ctx_mgr; */
-
-struct sha1_hash_ctx {
- /* Must be at struct offset 0 */
- struct job_sha1 job;
- /* status flag */
- int status;
- /* error flag */
- int error;
-
- uint64_t total_length;
- const void *incoming_buffer;
- uint32_t incoming_buffer_length;
- uint8_t partial_block_buffer[SHA1_BLOCK_SIZE * 2];
- uint32_t partial_block_buffer_length;
- void *user_data;
-};
-
-#endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
deleted file mode 100644
index 08ad1a9acfd7..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Header file for multi buffer SHA1 algorithm manager
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford@intel.com>
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __SHA_MB_MGR_H
-#define __SHA_MB_MGR_H
-
-
-#include <linux/types.h>
-
-#define NUM_SHA1_DIGEST_WORDS 5
-
-enum job_sts { STS_UNKNOWN = 0,
- STS_BEING_PROCESSED = 1,
- STS_COMPLETED = 2,
- STS_INTERNAL_ERROR = 3,
- STS_ERROR = 4
-};
-
-struct job_sha1 {
- u8 *buffer;
- u32 len;
- u32 result_digest[NUM_SHA1_DIGEST_WORDS] __aligned(32);
- enum job_sts status;
- void *user_data;
-};
-
-/* SHA1 out-of-order scheduler */
-
-/* typedef uint32_t sha1_digest_array[5][8]; */
-
-struct sha1_args_x8 {
- uint32_t digest[5][8];
- uint8_t *data_ptr[8];
-};
-
-struct sha1_lane_data {
- struct job_sha1 *job_in_lane;
-};
-
-struct sha1_mb_mgr {
- struct sha1_args_x8 args;
-
- uint32_t lens[8];
-
- /* each byte is index (0...7) of unused lanes */
- uint64_t unused_lanes;
- /* byte 4 is set to FF as a flag */
- struct sha1_lane_data ldata[8];
-};
-
-
-#define SHA1_MB_MGR_NUM_LANES_AVX2 8
-
-void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state);
-struct job_sha1 *sha1_mb_mgr_submit_avx2(struct sha1_mb_mgr *state,
- struct job_sha1 *job);
-struct job_sha1 *sha1_mb_mgr_flush_avx2(struct sha1_mb_mgr *state);
-struct job_sha1 *sha1_mb_mgr_get_comp_job_avx2(struct sha1_mb_mgr *state);
-
-#endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
deleted file mode 100644
index 86688c6e7a25..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Header file for multi buffer SHA1 algorithm data structure
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford@intel.com>
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# Macros for defining data structures
-
-# Usage example
-
-#START_FIELDS # JOB_AES
-### name size align
-#FIELD _plaintext, 8, 8 # pointer to plaintext
-#FIELD _ciphertext, 8, 8 # pointer to ciphertext
-#FIELD _IV, 16, 8 # IV
-#FIELD _keys, 8, 8 # pointer to keys
-#FIELD _len, 4, 4 # length in bytes
-#FIELD _status, 4, 4 # status enumeration
-#FIELD _user_data, 8, 8 # pointer to user data
-#UNION _union, size1, align1, \
-# size2, align2, \
-# size3, align3, \
-# ...
-#END_FIELDS
-#%assign _JOB_AES_size _FIELD_OFFSET
-#%assign _JOB_AES_align _STRUCT_ALIGN
-
-#########################################################################
-
-# Alternate "struc-like" syntax:
-# STRUCT job_aes2
-# RES_Q .plaintext, 1
-# RES_Q .ciphertext, 1
-# RES_DQ .IV, 1
-# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
-# RES_U .union, size1, align1, \
-# size2, align2, \
-# ...
-# ENDSTRUCT
-# # Following only needed if nesting
-# %assign job_aes2_size _FIELD_OFFSET
-# %assign job_aes2_align _STRUCT_ALIGN
-#
-# RES_* macros take a name, a count and an optional alignment.
-# The count in in terms of the base size of the macro, and the
-# default alignment is the base size.
-# The macros are:
-# Macro Base size
-# RES_B 1
-# RES_W 2
-# RES_D 4
-# RES_Q 8
-# RES_DQ 16
-# RES_Y 32
-# RES_Z 64
-#
-# RES_U defines a union. It's arguments are a name and two or more
-# pairs of "size, alignment"
-#
-# The two assigns are only needed if this structure is being nested
-# within another. Even if the assigns are not done, one can still use
-# STRUCT_NAME_size as the size of the structure.
-#
-# Note that for nesting, you still need to assign to STRUCT_NAME_size.
-#
-# The differences between this and using "struc" directly are that each
-# type is implicitly aligned to its natural length (although this can be
-# over-ridden with an explicit third parameter), and that the structure
-# is padded at the end to its overall alignment.
-#
-
-#########################################################################
-
-#ifndef _SHA1_MB_MGR_DATASTRUCT_ASM_
-#define _SHA1_MB_MGR_DATASTRUCT_ASM_
-
-## START_FIELDS
-.macro START_FIELDS
- _FIELD_OFFSET = 0
- _STRUCT_ALIGN = 0
-.endm
-
-## FIELD name size align
-.macro FIELD name size align
- _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
- \name = _FIELD_OFFSET
- _FIELD_OFFSET = _FIELD_OFFSET + (\size)
-.if (\align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = \align
-.endif
-.endm
-
-## END_FIELDS
-.macro END_FIELDS
- _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
-.endm
-
-########################################################################
-
-.macro STRUCT p1
-START_FIELDS
-.struc \p1
-.endm
-
-.macro ENDSTRUCT
- tmp = _FIELD_OFFSET
- END_FIELDS
- tmp = (_FIELD_OFFSET - %%tmp)
-.if (tmp > 0)
- .lcomm tmp
-.endif
-.endstruc
-.endm
-
-## RES_int name size align
-.macro RES_int p1 p2 p3
- name = \p1
- size = \p2
- align = .\p3
-
- _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
-.align align
-.lcomm name size
- _FIELD_OFFSET = _FIELD_OFFSET + (size)
-.if (align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = align
-.endif
-.endm
-
-
-
-# macro RES_B name, size [, align]
-.macro RES_B _name, _size, _align=1
-RES_int _name _size _align
-.endm
-
-# macro RES_W name, size [, align]
-.macro RES_W _name, _size, _align=2
-RES_int _name 2*(_size) _align
-.endm
-
-# macro RES_D name, size [, align]
-.macro RES_D _name, _size, _align=4
-RES_int _name 4*(_size) _align
-.endm
-
-# macro RES_Q name, size [, align]
-.macro RES_Q _name, _size, _align=8
-RES_int _name 8*(_size) _align
-.endm
-
-# macro RES_DQ name, size [, align]
-.macro RES_DQ _name, _size, _align=16
-RES_int _name 16*(_size) _align
-.endm
-
-# macro RES_Y name, size [, align]
-.macro RES_Y _name, _size, _align=32
-RES_int _name 32*(_size) _align
-.endm
-
-# macro RES_Z name, size [, align]
-.macro RES_Z _name, _size, _align=64
-RES_int _name 64*(_size) _align
-.endm
-
-
-#endif
-
-########################################################################
-#### Define constants
-########################################################################
-
-########################################################################
-#### Define SHA1 Out Of Order Data Structures
-########################################################################
-
-START_FIELDS # LANE_DATA
-### name size align
-FIELD _job_in_lane, 8, 8 # pointer to job object
-END_FIELDS
-
-_LANE_DATA_size = _FIELD_OFFSET
-_LANE_DATA_align = _STRUCT_ALIGN
-
-########################################################################
-
-START_FIELDS # SHA1_ARGS_X8
-### name size align
-FIELD _digest, 4*5*8, 16 # transposed digest
-FIELD _data_ptr, 8*8, 8 # array of pointers to data
-END_FIELDS
-
-_SHA1_ARGS_X4_size = _FIELD_OFFSET
-_SHA1_ARGS_X4_align = _STRUCT_ALIGN
-_SHA1_ARGS_X8_size = _FIELD_OFFSET
-_SHA1_ARGS_X8_align = _STRUCT_ALIGN
-
-########################################################################
-
-START_FIELDS # MB_MGR
-### name size align
-FIELD _args, _SHA1_ARGS_X4_size, _SHA1_ARGS_X4_align
-FIELD _lens, 4*8, 8
-FIELD _unused_lanes, 8, 8
-FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
-END_FIELDS
-
-_MB_MGR_size = _FIELD_OFFSET
-_MB_MGR_align = _STRUCT_ALIGN
-
-_args_digest = _args + _digest
-_args_data_ptr = _args + _data_ptr
-
-
-########################################################################
-#### Define constants
-########################################################################
-
-#define STS_UNKNOWN 0
-#define STS_BEING_PROCESSED 1
-#define STS_COMPLETED 2
-
-########################################################################
-#### Define JOB_SHA1 structure
-########################################################################
-
-START_FIELDS # JOB_SHA1
-
-### name size align
-FIELD _buffer, 8, 8 # pointer to buffer
-FIELD _len, 4, 4 # length in bytes
-FIELD _result_digest, 5*4, 32 # Digest (output)
-FIELD _status, 4, 4
-FIELD _user_data, 8, 8
-END_FIELDS
-
-_JOB_SHA1_size = _FIELD_OFFSET
-_JOB_SHA1_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
deleted file mode 100644
index 7cfba738f104..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Flush routine for SHA1 multibuffer
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford@intel.com>
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha1_mb_mgr_datastruct.S"
-
-
-.extern sha1_x8_avx2
-
-# LINUX register definitions
-#define arg1 %rdi
-#define arg2 %rsi
-
-# Common definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-
-# idx must be a register not clobbered by sha1_x8_avx2
-#define idx %r8
-#define DWORD_idx %r8d
-
-#define unused_lanes %rbx
-#define lane_data %rbx
-#define tmp2 %rbx
-#define tmp2_w %ebx
-
-#define job_rax %rax
-#define tmp1 %rax
-#define size_offset %rax
-#define tmp %rax
-#define start_offset %rax
-
-#define tmp3 %arg1
-
-#define extra_blocks %arg2
-#define p %arg2
-
-.macro LABEL prefix n
-\prefix\n\():
-.endm
-
-.macro JNE_SKIP i
-jne skip_\i
-.endm
-
-.altmacro
-.macro SET_OFFSET _offset
-offset = \_offset
-.endm
-.noaltmacro
-
-# JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state)
-# arg 1 : rcx : state
-ENTRY(sha1_mb_mgr_flush_avx2)
- FRAME_BEGIN
- push %rbx
-
- # If bit (32+3) is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $32+3, unused_lanes
- jc return_null
-
- # find a lane with a non-null job
- xor idx, idx
- offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne one(%rip), idx
- offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne two(%rip), idx
- offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne three(%rip), idx
- offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne four(%rip), idx
- offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne five(%rip), idx
- offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne six(%rip), idx
- offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne seven(%rip), idx
-
- # copy idx to empty lanes
-copy_lane_data:
- offset = (_args + _data_ptr)
- mov offset(state,idx,8), tmp
-
- I = 0
-.rep 8
- offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
-.altmacro
- JNE_SKIP %I
- offset = (_args + _data_ptr + 8*I)
- mov tmp, offset(state)
- offset = (_lens + 4*I)
- movl $0xFFFFFFFF, offset(state)
-LABEL skip_ %I
- I = (I+1)
-.noaltmacro
-.endr
-
- # Find min length
- vmovdqu _lens+0*16(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqu %xmm0, _lens+0*16(state)
- vmovdqu %xmm1, _lens+1*16(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha1_x8_avx2
- # state and idx are intact
-
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state , idx, 4) , %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), tmp2_w
-
- vmovdqu %xmm0, _result_digest(job_rax)
- offset = (_result_digest + 1*16)
- mov tmp2_w, offset(job_rax)
-
-return:
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha1_mb_mgr_flush_avx2)
-
-
-#################################################################
-
-.align 16
-ENTRY(sha1_mb_mgr_get_comp_job_avx2)
- push %rbx
-
- ## if bit 32+3 is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $(32+3), unused_lanes
- jc .return_null
-
- # Find min length
- vmovdqu _lens(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
-
- vmovd %xmm2, DWORD_idx
- test $~0xF, idx
- jnz .return_null
-
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), tmp2_w
-
- vmovdqu %xmm0, _result_digest(job_rax)
- movl tmp2_w, _result_digest+1*16(job_rax)
-
- pop %rbx
-
- ret
-
-.return_null:
- xor job_rax, job_rax
- pop %rbx
- ret
-ENDPROC(sha1_mb_mgr_get_comp_job_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
-.octa 0x000000000000000000000000FFFFFFF0
-
-.section .rodata.cst8, "aM", @progbits, 8
-.align 8
-one:
-.quad 1
-two:
-.quad 2
-three:
-.quad 3
-four:
-.quad 4
-five:
-.quad 5
-six:
-.quad 6
-seven:
-.quad 7
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
deleted file mode 100644
index d2add0d35f43..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Initialization code for multi buffer SHA1 algorithm for AVX2
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "sha1_mb_mgr.h"
-
-void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state)
-{
- unsigned int j;
- state->unused_lanes = 0xF76543210ULL;
- for (j = 0; j < 8; j++) {
- state->lens[j] = 0xFFFFFFFF;
- state->ldata[j].job_in_lane = NULL;
- }
-}
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
deleted file mode 100644
index 7a93b1c0d69a..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Buffer submit code for multi buffer SHA1 algorithm
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford@intel.com>
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha1_mb_mgr_datastruct.S"
-
-
-.extern sha1_x8_avx
-
-# LINUX register definitions
-arg1 = %rdi
-arg2 = %rsi
-size_offset = %rcx
-tmp2 = %rcx
-extra_blocks = %rdx
-
-# Common definitions
-#define state arg1
-#define job %rsi
-#define len2 arg2
-#define p2 arg2
-
-# idx must be a register not clobberred by sha1_x8_avx2
-idx = %r8
-DWORD_idx = %r8d
-last_len = %r8
-
-p = %r11
-start_offset = %r11
-
-unused_lanes = %rbx
-BYTE_unused_lanes = %bl
-
-job_rax = %rax
-len = %rax
-DWORD_len = %eax
-
-lane = %r12
-tmp3 = %r12
-
-tmp = %r9
-DWORD_tmp = %r9d
-
-lane_data = %r10
-
-# JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job)
-# arg 1 : rcx : state
-# arg 2 : rdx : job
-ENTRY(sha1_mb_mgr_submit_avx2)
- FRAME_BEGIN
- push %rbx
- push %r12
-
- mov _unused_lanes(state), unused_lanes
- mov unused_lanes, lane
- and $0xF, lane
- shr $4, unused_lanes
- imul $_LANE_DATA_size, lane, lane_data
- movl $STS_BEING_PROCESSED, _status(job)
- lea _ldata(state, lane_data), lane_data
- mov unused_lanes, _unused_lanes(state)
- movl _len(job), DWORD_len
-
- mov job, _job_in_lane(lane_data)
- shl $4, len
- or lane, len
-
- movl DWORD_len, _lens(state , lane, 4)
-
- # Load digest words from result_digest
- vmovdqu _result_digest(job), %xmm0
- mov _result_digest+1*16(job), DWORD_tmp
- vmovd %xmm0, _args_digest(state, lane, 4)
- vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
- vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
- vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
- movl DWORD_tmp, _args_digest+4*32(state , lane, 4)
-
- mov _buffer(job), p
- mov p, _args_data_ptr(state, lane, 8)
-
- cmp $0xF, unused_lanes
- jne return_null
-
-start_loop:
- # Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqa %xmm0, _lens + 0*16(state)
- vmovdqa %xmm1, _lens + 1*16(state)
-
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha1_x8_avx2
-
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- mov _unused_lanes(state), unused_lanes
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), DWORD_tmp
-
- vmovdqu %xmm0, _result_digest(job_rax)
- movl DWORD_tmp, _result_digest+1*16(job_rax)
-
-return:
- pop %r12
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-
-ENDPROC(sha1_mb_mgr_submit_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
- .octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
deleted file mode 100644
index 20f77aa633de..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
+++ /dev/null
@@ -1,492 +0,0 @@
-/*
- * Multi-buffer SHA1 algorithm hash compute routine
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford@intel.com>
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include "sha1_mb_mgr_datastruct.S"
-
-## code to compute oct SHA1 using SSE-256
-## outer calling routine takes care of save and restore of XMM registers
-
-## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15# ymm0-15
-##
-## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
-## Linux preserves: rdi rbp r8
-##
-## clobbers ymm0-15
-
-
-# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
-# "transpose" data in {r0...r7} using temps {t0...t1}
-# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
-# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
-# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
-# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
-# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
-# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
-# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
-# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
-#
-# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
-# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
-# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
-# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
-# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
-# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
-# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
-# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
-#
-
-.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
- # process top half (r0..r3) {a...d}
- vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
- vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
- vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
- vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
- vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
- vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
- vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
- vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
-
- # use r2 in place of t0
- # process bottom half (r4..r7) {e...h}
- vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
- vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
- vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
- vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
- vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
- vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
- vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
- vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
-
- vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
- vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
- vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
- vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
- vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
- vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
- vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
- vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
-
-.endm
-##
-## Magic functions defined in FIPS 180-1
-##
-# macro MAGIC_F0 F,B,C,D,T ## F = (D ^ (B & (C ^ D)))
-.macro MAGIC_F0 regF regB regC regD regT
- vpxor \regD, \regC, \regF
- vpand \regB, \regF, \regF
- vpxor \regD, \regF, \regF
-.endm
-
-# macro MAGIC_F1 F,B,C,D,T ## F = (B ^ C ^ D)
-.macro MAGIC_F1 regF regB regC regD regT
- vpxor \regC, \regD, \regF
- vpxor \regB, \regF, \regF
-.endm
-
-# macro MAGIC_F2 F,B,C,D,T ## F = ((B & C) | (B & D) | (C & D))
-.macro MAGIC_F2 regF regB regC regD regT
- vpor \regC, \regB, \regF
- vpand \regC, \regB, \regT
- vpand \regD, \regF, \regF
- vpor \regT, \regF, \regF
-.endm
-
-# macro MAGIC_F3 F,B,C,D,T ## F = (B ^ C ^ D)
-.macro MAGIC_F3 regF regB regC regD regT
- MAGIC_F1 \regF,\regB,\regC,\regD,\regT
-.endm
-
-# PROLD reg, imm, tmp
-.macro PROLD reg imm tmp
- vpsrld $(32-\imm), \reg, \tmp
- vpslld $\imm, \reg, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-.macro PROLD_nd reg imm tmp src
- vpsrld $(32-\imm), \src, \tmp
- vpslld $\imm, \src, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-.macro SHA1_STEP_00_15 regA regB regC regD regE regT regF memW immCNT MAGIC
- vpaddd \immCNT, \regE, \regE
- vpaddd \memW*32(%rsp), \regE, \regE
- PROLD_nd \regT, 5, \regF, \regA
- vpaddd \regT, \regE, \regE
- \MAGIC \regF, \regB, \regC, \regD, \regT
- PROLD \regB, 30, \regT
- vpaddd \regF, \regE, \regE
-.endm
-
-.macro SHA1_STEP_16_79 regA regB regC regD regE regT regF memW immCNT MAGIC
- vpaddd \immCNT, \regE, \regE
- offset = ((\memW - 14) & 15) * 32
- vmovdqu offset(%rsp), W14
- vpxor W14, W16, W16
- offset = ((\memW - 8) & 15) * 32
- vpxor offset(%rsp), W16, W16
- offset = ((\memW - 3) & 15) * 32
- vpxor offset(%rsp), W16, W16
- vpsrld $(32-1), W16, \regF
- vpslld $1, W16, W16
- vpor W16, \regF, \regF
-
- ROTATE_W
-
- offset = ((\memW - 0) & 15) * 32
- vmovdqu \regF, offset(%rsp)
- vpaddd \regF, \regE, \regE
- PROLD_nd \regT, 5, \regF, \regA
- vpaddd \regT, \regE, \regE
- \MAGIC \regF,\regB,\regC,\regD,\regT ## FUN = MAGIC_Fi(B,C,D)
- PROLD \regB,30, \regT
- vpaddd \regF, \regE, \regE
-.endm
-
-########################################################################
-########################################################################
-########################################################################
-
-## FRAMESZ plus pushes must be an odd multiple of 8
-YMM_SAVE = (15-15)*32
-FRAMESZ = 32*16 + YMM_SAVE
-_YMM = FRAMESZ - YMM_SAVE
-
-#define VMOVPS vmovups
-
-IDX = %rax
-inp0 = %r9
-inp1 = %r10
-inp2 = %r11
-inp3 = %r12
-inp4 = %r13
-inp5 = %r14
-inp6 = %r15
-inp7 = %rcx
-arg1 = %rdi
-arg2 = %rsi
-RSP_SAVE = %rdx
-
-# ymm0 A
-# ymm1 B
-# ymm2 C
-# ymm3 D
-# ymm4 E
-# ymm5 F AA
-# ymm6 T0 BB
-# ymm7 T1 CC
-# ymm8 T2 DD
-# ymm9 T3 EE
-# ymm10 T4 TMP
-# ymm11 T5 FUN
-# ymm12 T6 K
-# ymm13 T7 W14
-# ymm14 T8 W15
-# ymm15 T9 W16
-
-
-A = %ymm0
-B = %ymm1
-C = %ymm2
-D = %ymm3
-E = %ymm4
-F = %ymm5
-T0 = %ymm6
-T1 = %ymm7
-T2 = %ymm8
-T3 = %ymm9
-T4 = %ymm10
-T5 = %ymm11
-T6 = %ymm12
-T7 = %ymm13
-T8 = %ymm14
-T9 = %ymm15
-
-AA = %ymm5
-BB = %ymm6
-CC = %ymm7
-DD = %ymm8
-EE = %ymm9
-TMP = %ymm10
-FUN = %ymm11
-K = %ymm12
-W14 = %ymm13
-W15 = %ymm14
-W16 = %ymm15
-
-.macro ROTATE_ARGS
- TMP_ = E
- E = D
- D = C
- C = B
- B = A
- A = TMP_
-.endm
-
-.macro ROTATE_W
-TMP_ = W16
-W16 = W15
-W15 = W14
-W14 = TMP_
-.endm
-
-# 8 streams x 5 32bit words per digest x 4 bytes per word
-#define DIGEST_SIZE (8*5*4)
-
-.align 32
-
-# void sha1_x8_avx2(void **input_data, UINT128 *digest, UINT32 size)
-# arg 1 : pointer to array[4] of pointer to input data
-# arg 2 : size (in blocks) ;; assumed to be >= 1
-#
-ENTRY(sha1_x8_avx2)
-
- # save callee-saved clobbered registers to comply with C function ABI
- push %r12
- push %r13
- push %r14
- push %r15
-
- #save rsp
- mov %rsp, RSP_SAVE
- sub $FRAMESZ, %rsp
-
- #align rsp to 32 Bytes
- and $~0x1F, %rsp
-
- ## Initialize digests
- vmovdqu 0*32(arg1), A
- vmovdqu 1*32(arg1), B
- vmovdqu 2*32(arg1), C
- vmovdqu 3*32(arg1), D
- vmovdqu 4*32(arg1), E
-
- ## transpose input onto stack
- mov _data_ptr+0*8(arg1),inp0
- mov _data_ptr+1*8(arg1),inp1
- mov _data_ptr+2*8(arg1),inp2
- mov _data_ptr+3*8(arg1),inp3
- mov _data_ptr+4*8(arg1),inp4
- mov _data_ptr+5*8(arg1),inp5
- mov _data_ptr+6*8(arg1),inp6
- mov _data_ptr+7*8(arg1),inp7
-
- xor IDX, IDX
-lloop:
- vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), F
- I=0
-.rep 2
- VMOVPS (inp0, IDX), T0
- VMOVPS (inp1, IDX), T1
- VMOVPS (inp2, IDX), T2
- VMOVPS (inp3, IDX), T3
- VMOVPS (inp4, IDX), T4
- VMOVPS (inp5, IDX), T5
- VMOVPS (inp6, IDX), T6
- VMOVPS (inp7, IDX), T7
-
- TRANSPOSE8 T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
- vpshufb F, T0, T0
- vmovdqu T0, (I*8)*32(%rsp)
- vpshufb F, T1, T1
- vmovdqu T1, (I*8+1)*32(%rsp)
- vpshufb F, T2, T2
- vmovdqu T2, (I*8+2)*32(%rsp)
- vpshufb F, T3, T3
- vmovdqu T3, (I*8+3)*32(%rsp)
- vpshufb F, T4, T4
- vmovdqu T4, (I*8+4)*32(%rsp)
- vpshufb F, T5, T5
- vmovdqu T5, (I*8+5)*32(%rsp)
- vpshufb F, T6, T6
- vmovdqu T6, (I*8+6)*32(%rsp)
- vpshufb F, T7, T7
- vmovdqu T7, (I*8+7)*32(%rsp)
- add $32, IDX
- I = (I+1)
-.endr
- # save old digests
- vmovdqu A,AA
- vmovdqu B,BB
- vmovdqu C,CC
- vmovdqu D,DD
- vmovdqu E,EE
-
-##
-## perform 0-79 steps
-##
- vmovdqu K00_19(%rip), K
-## do rounds 0...15
- I = 0
-.rep 16
- SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 16...19
- vmovdqu ((16 - 16) & 15) * 32 (%rsp), W16
- vmovdqu ((16 - 15) & 15) * 32 (%rsp), W15
-.rep 4
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 20...39
- vmovdqu K20_39(%rip), K
-.rep 20
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 40...59
- vmovdqu K40_59(%rip), K
-.rep 20
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 60...79
- vmovdqu K60_79(%rip), K
-.rep 20
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3
- ROTATE_ARGS
- I = (I+1)
-.endr
-
- vpaddd AA,A,A
- vpaddd BB,B,B
- vpaddd CC,C,C
- vpaddd DD,D,D
- vpaddd EE,E,E
-
- sub $1, arg2
- jne lloop
-
- # write out digests
- vmovdqu A, 0*32(arg1)
- vmovdqu B, 1*32(arg1)
- vmovdqu C, 2*32(arg1)
- vmovdqu D, 3*32(arg1)
- vmovdqu E, 4*32(arg1)
-
- # update input pointers
- add IDX, inp0
- add IDX, inp1
- add IDX, inp2
- add IDX, inp3
- add IDX, inp4
- add IDX, inp5
- add IDX, inp6
- add IDX, inp7
- mov inp0, _data_ptr (arg1)
- mov inp1, _data_ptr + 1*8(arg1)
- mov inp2, _data_ptr + 2*8(arg1)
- mov inp3, _data_ptr + 3*8(arg1)
- mov inp4, _data_ptr + 4*8(arg1)
- mov inp5, _data_ptr + 5*8(arg1)
- mov inp6, _data_ptr + 6*8(arg1)
- mov inp7, _data_ptr + 7*8(arg1)
-
- ################
- ## Postamble
-
- mov RSP_SAVE, %rsp
-
- # restore callee-saved clobbered registers
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-
- ret
-ENDPROC(sha1_x8_avx2)
-
-
-.section .rodata.cst32.K00_19, "aM", @progbits, 32
-.align 32
-K00_19:
-.octa 0x5A8279995A8279995A8279995A827999
-.octa 0x5A8279995A8279995A8279995A827999
-
-.section .rodata.cst32.K20_39, "aM", @progbits, 32
-.align 32
-K20_39:
-.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
-.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
-
-.section .rodata.cst32.K40_59, "aM", @progbits, 32
-.align 32
-K40_59:
-.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
-.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
-
-.section .rodata.cst32.K60_79, "aM", @progbits, 32
-.align 32
-K60_79:
-.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
-.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
-
-.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
-.align 32
-PSHUFFLE_BYTE_FLIP_MASK:
-.octa 0x0c0d0e0f08090a0b0405060700010203
-.octa 0x0c0d0e0f08090a0b0405060700010203
diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
deleted file mode 100644
index 53ad6e7db747..000000000000
--- a/arch/x86/crypto/sha256-mb/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Arch-specific CryptoAPI modules.
-#
-
-OBJECT_FILES_NON_STANDARD := y
-
-avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
- $(comma)4)$(comma)%ymm2,yes,no)
-ifeq ($(avx2_supported),yes)
- obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb.o
- sha256-mb-y := sha256_mb.o sha256_mb_mgr_flush_avx2.o \
- sha256_mb_mgr_init_avx2.o sha256_mb_mgr_submit_avx2.o sha256_x8_avx2.o
-endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
deleted file mode 100644
index 97c5fc43e115..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ /dev/null
@@ -1,1013 +0,0 @@
-/*
- * Multi buffer SHA256 algorithm Glue Code
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <asm/byteorder.h>
-#include <linux/hardirq.h>
-#include <asm/fpu/api.h>
-#include "sha256_mb_ctx.h"
-
-#define FLUSH_INTERVAL 1000 /* in usec */
-
-static struct mcryptd_alg_state sha256_mb_alg_state;
-
-struct sha256_mb_ctx {
- struct mcryptd_ahash *mcryptd_tfm;
-};
-
-static inline struct mcryptd_hash_request_ctx
- *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx)
-{
- struct ahash_request *areq;
-
- areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
- return container_of(areq, struct mcryptd_hash_request_ctx, areq);
-}
-
-static inline struct ahash_request
- *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
-{
- return container_of((void *) ctx, struct ahash_request, __ctx);
-}
-
-static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct ahash_request *areq)
-{
- rctx->flag = HASH_UPDATE;
-}
-
-static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state);
-static asmlinkage struct job_sha256* (*sha256_job_mgr_submit)
- (struct sha256_mb_mgr *state, struct job_sha256 *job);
-static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
- (struct sha256_mb_mgr *state);
-static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
- (struct sha256_mb_mgr *state);
-
-inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
- uint64_t total_len)
-{
- uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
-
- memset(&padblock[i], 0, SHA256_BLOCK_SIZE);
- padblock[i] = 0x80;
-
- i += ((SHA256_BLOCK_SIZE - 1) &
- (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1)))
- + 1 + SHA256_PADLENGTHFIELD_SIZE;
-
-#if SHA256_PADLENGTHFIELD_SIZE == 16
- *((uint64_t *) &padblock[i - 16]) = 0;
-#endif
-
- *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
-
- /* Number of extra blocks to hash */
- return i >> SHA256_LOG2_BLOCK_SIZE;
-}
-
-static struct sha256_hash_ctx
- *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr,
- struct sha256_hash_ctx *ctx)
-{
- while (ctx) {
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Clear PROCESSING bit */
- ctx->status = HASH_CTX_STS_COMPLETE;
- return ctx;
- }
-
- /*
- * If the extra blocks are empty, begin hashing what remains
- * in the user's buffer.
- */
- if (ctx->partial_block_buffer_length == 0 &&
- ctx->incoming_buffer_length) {
-
- const void *buffer = ctx->incoming_buffer;
- uint32_t len = ctx->incoming_buffer_length;
- uint32_t copy_len;
-
- /*
- * Only entire blocks can be hashed.
- * Copy remainder to extra blocks buffer.
- */
- copy_len = len & (SHA256_BLOCK_SIZE-1);
-
- if (copy_len) {
- len -= copy_len;
- memcpy(ctx->partial_block_buffer,
- ((const char *) buffer + len),
- copy_len);
- ctx->partial_block_buffer_length = copy_len;
- }
-
- ctx->incoming_buffer_length = 0;
-
- /* len should be a multiple of the block size now */
- assert((len % SHA256_BLOCK_SIZE) == 0);
-
- /* Set len to the number of blocks to be hashed */
- len >>= SHA256_LOG2_BLOCK_SIZE;
-
- if (len) {
-
- ctx->job.buffer = (uint8_t *) buffer;
- ctx->job.len = len;
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
- }
-
- /*
- * If the extra blocks are not empty, then we are
- * either on the last block(s) or we need more
- * user input before continuing.
- */
- if (ctx->status & HASH_CTX_STS_LAST) {
-
- uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks =
- sha256_pad(buf, ctx->total_length);
-
- ctx->status = (HASH_CTX_STS_PROCESSING |
- HASH_CTX_STS_COMPLETE);
- ctx->job.buffer = buf;
- ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
-
- ctx->status = HASH_CTX_STS_IDLE;
- return ctx;
- }
-
- return NULL;
-}
-
-static struct sha256_hash_ctx
- *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr)
-{
- /*
- * If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to
- * the user. If it is not ready, resubmit the job to finish processing.
- * If sha256_ctx_mgr_resubmit returned a job, it is ready to be
- * returned. Otherwise, all jobs currently being managed by the
- * hash_ctx_mgr still need processing.
- */
- struct sha256_hash_ctx *ctx;
-
- ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr);
- return sha256_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr)
-{
- sha256_job_mgr_init(&mgr->mgr);
-}
-
-static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
- struct sha256_hash_ctx *ctx,
- const void *buffer,
- uint32_t len,
- int flags)
-{
- if (flags & ~(HASH_UPDATE | HASH_LAST)) {
- /* User should not pass anything other than UPDATE or LAST */
- ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_PROCESSING) {
- /* Cannot submit to a currently processing job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Cannot update a finished job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
- return ctx;
- }
-
- /* If we made it here, there was no error during this call to submit */
- ctx->error = HASH_CTX_ERROR_NONE;
-
- /* Store buffer ptr info from user */
- ctx->incoming_buffer = buffer;
- ctx->incoming_buffer_length = len;
-
- /*
- * Store the user's request flags and mark this ctx as currently
- * being processed.
- */
- ctx->status = (flags & HASH_LAST) ?
- (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
- HASH_CTX_STS_PROCESSING;
-
- /* Advance byte counter */
- ctx->total_length += len;
-
- /*
- * If there is anything currently buffered in the extra blocks,
- * append to it until it contains a whole block.
- * Or if the user's buffer contains less than a whole block,
- * append as much as possible to the extra block.
- */
- if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) {
- /*
- * Compute how many bytes to copy from user buffer into
- * extra block
- */
- uint32_t copy_len = SHA256_BLOCK_SIZE -
- ctx->partial_block_buffer_length;
- if (len < copy_len)
- copy_len = len;
-
- if (copy_len) {
- /* Copy and update relevant pointers and counters */
- memcpy(
- &ctx->partial_block_buffer[ctx->partial_block_buffer_length],
- buffer, copy_len);
-
- ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)
- ((const char *)buffer + copy_len);
- ctx->incoming_buffer_length = len - copy_len;
- }
-
- /* The extra block should never contain more than 1 block */
- assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
-
- /*
- * If the extra block buffer contains exactly 1 block,
- * it can be hashed.
- */
- if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
- ctx->partial_block_buffer_length = 0;
-
- ctx->job.buffer = ctx->partial_block_buffer;
- ctx->job.len = 1;
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
- }
- }
-
- return sha256_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr)
-{
- struct sha256_hash_ctx *ctx;
-
- while (1) {
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_flush(&mgr->mgr);
-
- /* If flush returned 0, there are no more jobs in flight. */
- if (!ctx)
- return NULL;
-
- /*
- * If flush returned a job, resubmit the job to finish
- * processing.
- */
- ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
-
- /*
- * If sha256_ctx_mgr_resubmit returned a job, it is ready to
- * be returned. Otherwise, all jobs currently being managed by
- * the sha256_ctx_mgr still need processing. Loop.
- */
- if (ctx)
- return ctx;
- }
-}
-
-static int sha256_mb_init(struct ahash_request *areq)
-{
- struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
-
- hash_ctx_init(sctx);
- sctx->job.result_digest[0] = SHA256_H0;
- sctx->job.result_digest[1] = SHA256_H1;
- sctx->job.result_digest[2] = SHA256_H2;
- sctx->job.result_digest[3] = SHA256_H3;
- sctx->job.result_digest[4] = SHA256_H4;
- sctx->job.result_digest[5] = SHA256_H5;
- sctx->job.result_digest[6] = SHA256_H6;
- sctx->job.result_digest[7] = SHA256_H7;
- sctx->total_length = 0;
- sctx->partial_block_buffer_length = 0;
- sctx->status = HASH_CTX_STS_IDLE;
-
- return 0;
-}
-
-static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
-{
- int i;
- struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
- __be32 *dst = (__be32 *) rctx->out;
-
- for (i = 0; i < 8; ++i)
- dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
-
- return 0;
-}
-
-static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
- struct mcryptd_alg_cstate *cstate, bool flush)
-{
- int flag = HASH_UPDATE;
- int nbytes, err = 0;
- struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
- struct sha256_hash_ctx *sha_ctx;
-
- /* more work ? */
- while (!(rctx->flag & HASH_DONE)) {
- nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
- if (nbytes < 0) {
- err = nbytes;
- goto out;
- }
- /* check if the walk is done */
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- if (rctx->flag & HASH_FINAL)
- flag |= HASH_LAST;
-
- }
- sha_ctx = (struct sha256_hash_ctx *)
- ahash_request_ctx(&rctx->areq);
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx,
- rctx->walk.data, nbytes, flag);
- if (!sha_ctx) {
- if (flush)
- sha_ctx = sha256_ctx_mgr_flush(cstate->mgr);
- }
- kernel_fpu_end();
- if (sha_ctx)
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- else {
- rctx = NULL;
- goto out;
- }
- }
-
- /* copy the results */
- if (rctx->flag & HASH_FINAL)
- sha256_mb_set_results(rctx);
-
-out:
- *ret_rctx = rctx;
- return err;
-}
-
-static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate,
- int err)
-{
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha256_hash_ctx *sha_ctx;
- struct mcryptd_hash_request_ctx *req_ctx;
- int ret;
-
- /* remove from work list */
- spin_lock(&cstate->work_lock);
- list_del(&rctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- if (irqs_disabled())
- rctx->complete(&req->base, err);
- else {
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
-
- /* check to see if there are other jobs that are done */
- sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
- while (sha_ctx) {
- req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&req_ctx, cstate, false);
- if (req_ctx) {
- spin_lock(&cstate->work_lock);
- list_del(&req_ctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- req = cast_mcryptd_ctx_to_req(req_ctx);
- if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
- else {
- local_bh_disable();
- req_ctx->complete(&req->base, ret);
- local_bh_enable();
- }
- }
- sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
- }
-
- return 0;
-}
-
-static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate)
-{
- unsigned long next_flush;
- unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
-
- /* initialize tag */
- rctx->tag.arrival = jiffies; /* tag the arrival time */
- rctx->tag.seq_num = cstate->next_seq_num++;
- next_flush = rctx->tag.arrival + delay;
- rctx->tag.expire = next_flush;
-
- spin_lock(&cstate->work_lock);
- list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock(&cstate->work_lock);
-
- mcryptd_arm_flusher(cstate, delay);
-}
-
-static int sha256_mb_update(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha256_hash_ctx *sha_ctx;
- int ret = 0, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk))
- rctx->flag |= HASH_DONE;
-
- /* submit */
- sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
- sha256_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, HASH_UPDATE);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
-
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha256_mb_finup(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha256_hash_ctx *sha_ctx;
- int ret = 0, flag = HASH_UPDATE, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- flag = HASH_LAST;
- }
-
- /* submit */
- rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
- sha256_mb_add_list(rctx, cstate);
-
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, flag);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha256_mb_final(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
-
- struct sha256_hash_ctx *sha_ctx;
- int ret = 0;
- u8 data;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- rctx->flag |= HASH_DONE | HASH_FINAL;
-
- sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
- /* flag HASH_FINAL and 0 data size */
- sha256_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
- HASH_LAST);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha256_mb_export(struct ahash_request *areq, void *out)
-{
- struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha256_mb_import(struct ahash_request *areq, const void *in)
-{
- struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
-
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha256_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm)
-{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- sizeof(struct sha256_hash_ctx));
-
- return 0;
-}
-
-static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static struct ahash_alg sha256_mb_areq_alg = {
- .init = sha256_mb_init,
- .update = sha256_mb_update,
- .final = sha256_mb_final,
- .finup = sha256_mb_finup,
- .export = sha256_mb_export,
- .import = sha256_mb_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_hash_ctx),
- .base = {
- .cra_name = "__sha256-mb",
- .cra_driver_name = "__intel_sha256-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread
- * sleep
- */
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha256_mb_areq_alg.halg.base.cra_list),
- .cra_init = sha256_mb_areq_init_tfm,
- .cra_exit = sha256_mb_areq_exit_tfm,
- .cra_ctxsize = sizeof(struct sha256_hash_ctx),
- }
- }
-};
-
-static int sha256_mb_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_init(mcryptd_req);
-}
-
-static int sha256_mb_async_update(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_update(mcryptd_req);
-}
-
-static int sha256_mb_async_finup(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_finup(mcryptd_req);
-}
-
-static int sha256_mb_async_final(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_final(mcryptd_req);
-}
-
-static int sha256_mb_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_digest(mcryptd_req);
-}
-
-static int sha256_mb_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_export(mcryptd_req, out);
-}
-
-static int sha256_mb_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
- struct mcryptd_hash_request_ctx *rctx;
- struct ahash_request *areq;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- rctx = ahash_request_ctx(mcryptd_req);
- areq = &rctx->areq;
-
- ahash_request_set_tfm(areq, child);
- ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req);
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static struct ahash_alg sha256_mb_async_alg = {
- .init = sha256_mb_async_init,
- .update = sha256_mb_async_update,
- .final = sha256_mb_async_final,
- .finup = sha256_mb_async_finup,
- .export = sha256_mb_async_export,
- .import = sha256_mb_async_import,
- .digest = sha256_mb_async_digest,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_hash_ctx),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256_mb",
- /*
- * Low priority, since with few concurrent hash requests
- * this is extremely slow due to the flush delay. Users
- * whose workloads would benefit from this can request
- * it explicitly by driver name, or can increase its
- * priority at runtime using NETLINK_CRYPTO.
- */
- .cra_priority = 50,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha256_mb_async_alg.halg.base.cra_list),
- .cra_init = sha256_mb_async_init_tfm,
- .cra_exit = sha256_mb_async_exit_tfm,
- .cra_ctxsize = sizeof(struct sha256_mb_ctx),
- .cra_alignmask = 0,
- },
- },
-};
-
-static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate)
-{
- struct mcryptd_hash_request_ctx *rctx;
- unsigned long cur_time;
- unsigned long next_flush = 0;
- struct sha256_hash_ctx *sha_ctx;
-
-
- cur_time = jiffies;
-
- while (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- if (time_before(cur_time, rctx->tag.expire))
- break;
- kernel_fpu_begin();
- sha_ctx = (struct sha256_hash_ctx *)
- sha256_ctx_mgr_flush(cstate->mgr);
- kernel_fpu_end();
- if (!sha_ctx) {
- pr_err("sha256_mb error: nothing got"
- " flushed for non-empty list\n");
- break;
- }
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- sha_finish_walk(&rctx, cstate, true);
- sha_complete_job(rctx, cstate, 0);
- }
-
- if (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- /* get the hash context and then flush time */
- next_flush = rctx->tag.expire;
- mcryptd_arm_flusher(cstate, get_delay(next_flush));
- }
- return next_flush;
-}
-
-static int __init sha256_mb_mod_init(void)
-{
-
- int cpu;
- int err;
- struct mcryptd_alg_cstate *cpu_state;
-
- /* check for dependent cpu features */
- if (!boot_cpu_has(X86_FEATURE_AVX2) ||
- !boot_cpu_has(X86_FEATURE_BMI2))
- return -ENODEV;
-
- /* initialize multibuffer structures */
- sha256_mb_alg_state.alg_cstate = alloc_percpu
- (struct mcryptd_alg_cstate);
-
- sha256_job_mgr_init = sha256_mb_mgr_init_avx2;
- sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2;
- sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2;
- sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2;
-
- if (!sha256_mb_alg_state.alg_cstate)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
- cpu_state->next_flush = 0;
- cpu_state->next_seq_num = 0;
- cpu_state->flusher_engaged = false;
- INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
- cpu_state->cpu = cpu;
- cpu_state->alg_state = &sha256_mb_alg_state;
- cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr),
- GFP_KERNEL);
- if (!cpu_state->mgr)
- goto err2;
- sha256_ctx_mgr_init(cpu_state->mgr);
- INIT_LIST_HEAD(&cpu_state->work_list);
- spin_lock_init(&cpu_state->work_lock);
- }
- sha256_mb_alg_state.flusher = &sha256_mb_flusher;
-
- err = crypto_register_ahash(&sha256_mb_areq_alg);
- if (err)
- goto err2;
- err = crypto_register_ahash(&sha256_mb_async_alg);
- if (err)
- goto err1;
-
-
- return 0;
-err1:
- crypto_unregister_ahash(&sha256_mb_areq_alg);
-err2:
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha256_mb_alg_state.alg_cstate);
- return -ENODEV;
-}
-
-static void __exit sha256_mb_mod_fini(void)
-{
- int cpu;
- struct mcryptd_alg_cstate *cpu_state;
-
- crypto_unregister_ahash(&sha256_mb_async_alg);
- crypto_unregister_ahash(&sha256_mb_areq_alg);
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha256_mb_alg_state.alg_cstate);
-}
-
-module_init(sha256_mb_mod_init);
-module_exit(sha256_mb_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated");
-
-MODULE_ALIAS_CRYPTO("sha256");
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
deleted file mode 100644
index 7c432543dc7f..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Header file for multi buffer SHA256 context
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SHA_MB_CTX_INTERNAL_H
-#define _SHA_MB_CTX_INTERNAL_H
-
-#include "sha256_mb_mgr.h"
-
-#define HASH_UPDATE 0x00
-#define HASH_LAST 0x01
-#define HASH_DONE 0x02
-#define HASH_FINAL 0x04
-
-#define HASH_CTX_STS_IDLE 0x00
-#define HASH_CTX_STS_PROCESSING 0x01
-#define HASH_CTX_STS_LAST 0x02
-#define HASH_CTX_STS_COMPLETE 0x04
-
-enum hash_ctx_error {
- HASH_CTX_ERROR_NONE = 0,
- HASH_CTX_ERROR_INVALID_FLAGS = -1,
- HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
- HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
-
-#ifdef HASH_CTX_DEBUG
- HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
-#endif
-};
-
-
-#define hash_ctx_user_data(ctx) ((ctx)->user_data)
-#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
-#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
-#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
-#define hash_ctx_status(ctx) ((ctx)->status)
-#define hash_ctx_error(ctx) ((ctx)->error)
-#define hash_ctx_init(ctx) \
- do { \
- (ctx)->error = HASH_CTX_ERROR_NONE; \
- (ctx)->status = HASH_CTX_STS_COMPLETE; \
- } while (0)
-
-
-/* Hash Constants and Typedefs */
-#define SHA256_DIGEST_LENGTH 8
-#define SHA256_LOG2_BLOCK_SIZE 6
-
-#define SHA256_PADLENGTHFIELD_SIZE 8
-
-#ifdef SHA_MB_DEBUG
-#define assert(expr) \
-do { \
- if (unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-#else
-#define assert(expr) do {} while (0)
-#endif
-
-struct sha256_ctx_mgr {
- struct sha256_mb_mgr mgr;
-};
-
-/* typedef struct sha256_ctx_mgr sha256_ctx_mgr; */
-
-struct sha256_hash_ctx {
- /* Must be at struct offset 0 */
- struct job_sha256 job;
- /* status flag */
- int status;
- /* error flag */
- int error;
-
- uint64_t total_length;
- const void *incoming_buffer;
- uint32_t incoming_buffer_length;
- uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2];
- uint32_t partial_block_buffer_length;
- void *user_data;
-};
-
-#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
deleted file mode 100644
index b01ae408c56d..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Header file for multi buffer SHA256 algorithm manager
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __SHA_MB_MGR_H
-#define __SHA_MB_MGR_H
-
-#include <linux/types.h>
-
-#define NUM_SHA256_DIGEST_WORDS 8
-
-enum job_sts { STS_UNKNOWN = 0,
- STS_BEING_PROCESSED = 1,
- STS_COMPLETED = 2,
- STS_INTERNAL_ERROR = 3,
- STS_ERROR = 4
-};
-
-struct job_sha256 {
- u8 *buffer;
- u32 len;
- u32 result_digest[NUM_SHA256_DIGEST_WORDS] __aligned(32);
- enum job_sts status;
- void *user_data;
-};
-
-/* SHA256 out-of-order scheduler */
-
-/* typedef uint32_t sha8_digest_array[8][8]; */
-
-struct sha256_args_x8 {
- uint32_t digest[8][8];
- uint8_t *data_ptr[8];
-};
-
-struct sha256_lane_data {
- struct job_sha256 *job_in_lane;
-};
-
-struct sha256_mb_mgr {
- struct sha256_args_x8 args;
-
- uint32_t lens[8];
-
- /* each byte is index (0...7) of unused lanes */
- uint64_t unused_lanes;
- /* byte 4 is set to FF as a flag */
- struct sha256_lane_data ldata[8];
-};
-
-
-#define SHA256_MB_MGR_NUM_LANES_AVX2 8
-
-void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state);
-struct job_sha256 *sha256_mb_mgr_submit_avx2(struct sha256_mb_mgr *state,
- struct job_sha256 *job);
-struct job_sha256 *sha256_mb_mgr_flush_avx2(struct sha256_mb_mgr *state);
-struct job_sha256 *sha256_mb_mgr_get_comp_job_avx2(struct sha256_mb_mgr *state);
-
-#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
deleted file mode 100644
index 5c377bac21d0..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Header file for multi buffer SHA256 algorithm data structure
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# Macros for defining data structures
-
-# Usage example
-
-#START_FIELDS # JOB_AES
-### name size align
-#FIELD _plaintext, 8, 8 # pointer to plaintext
-#FIELD _ciphertext, 8, 8 # pointer to ciphertext
-#FIELD _IV, 16, 8 # IV
-#FIELD _keys, 8, 8 # pointer to keys
-#FIELD _len, 4, 4 # length in bytes
-#FIELD _status, 4, 4 # status enumeration
-#FIELD _user_data, 8, 8 # pointer to user data
-#UNION _union, size1, align1, \
-# size2, align2, \
-# size3, align3, \
-# ...
-#END_FIELDS
-#%assign _JOB_AES_size _FIELD_OFFSET
-#%assign _JOB_AES_align _STRUCT_ALIGN
-
-#########################################################################
-
-# Alternate "struc-like" syntax:
-# STRUCT job_aes2
-# RES_Q .plaintext, 1
-# RES_Q .ciphertext, 1
-# RES_DQ .IV, 1
-# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
-# RES_U .union, size1, align1, \
-# size2, align2, \
-# ...
-# ENDSTRUCT
-# # Following only needed if nesting
-# %assign job_aes2_size _FIELD_OFFSET
-# %assign job_aes2_align _STRUCT_ALIGN
-#
-# RES_* macros take a name, a count and an optional alignment.
-# The count in in terms of the base size of the macro, and the
-# default alignment is the base size.
-# The macros are:
-# Macro Base size
-# RES_B 1
-# RES_W 2
-# RES_D 4
-# RES_Q 8
-# RES_DQ 16
-# RES_Y 32
-# RES_Z 64
-#
-# RES_U defines a union. It's arguments are a name and two or more
-# pairs of "size, alignment"
-#
-# The two assigns are only needed if this structure is being nested
-# within another. Even if the assigns are not done, one can still use
-# STRUCT_NAME_size as the size of the structure.
-#
-# Note that for nesting, you still need to assign to STRUCT_NAME_size.
-#
-# The differences between this and using "struc" directly are that each
-# type is implicitly aligned to its natural length (although this can be
-# over-ridden with an explicit third parameter), and that the structure
-# is padded at the end to its overall alignment.
-#
-
-#########################################################################
-
-#ifndef _DATASTRUCT_ASM_
-#define _DATASTRUCT_ASM_
-
-#define SZ8 8*SHA256_DIGEST_WORD_SIZE
-#define ROUNDS 64*SZ8
-#define PTR_SZ 8
-#define SHA256_DIGEST_WORD_SIZE 4
-#define MAX_SHA256_LANES 8
-#define SHA256_DIGEST_WORDS 8
-#define SHA256_DIGEST_ROW_SIZE (MAX_SHA256_LANES * SHA256_DIGEST_WORD_SIZE)
-#define SHA256_DIGEST_SIZE (SHA256_DIGEST_ROW_SIZE * SHA256_DIGEST_WORDS)
-#define SHA256_BLK_SZ 64
-
-# START_FIELDS
-.macro START_FIELDS
- _FIELD_OFFSET = 0
- _STRUCT_ALIGN = 0
-.endm
-
-# FIELD name size align
-.macro FIELD name size align
- _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
- \name = _FIELD_OFFSET
- _FIELD_OFFSET = _FIELD_OFFSET + (\size)
-.if (\align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = \align
-.endif
-.endm
-
-# END_FIELDS
-.macro END_FIELDS
- _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
-.endm
-
-########################################################################
-
-.macro STRUCT p1
-START_FIELDS
-.struc \p1
-.endm
-
-.macro ENDSTRUCT
- tmp = _FIELD_OFFSET
- END_FIELDS
- tmp = (_FIELD_OFFSET - %%tmp)
-.if (tmp > 0)
- .lcomm tmp
-.endif
-.endstruc
-.endm
-
-## RES_int name size align
-.macro RES_int p1 p2 p3
- name = \p1
- size = \p2
- align = .\p3
-
- _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
-.align align
-.lcomm name size
- _FIELD_OFFSET = _FIELD_OFFSET + (size)
-.if (align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = align
-.endif
-.endm
-
-# macro RES_B name, size [, align]
-.macro RES_B _name, _size, _align=1
-RES_int _name _size _align
-.endm
-
-# macro RES_W name, size [, align]
-.macro RES_W _name, _size, _align=2
-RES_int _name 2*(_size) _align
-.endm
-
-# macro RES_D name, size [, align]
-.macro RES_D _name, _size, _align=4
-RES_int _name 4*(_size) _align
-.endm
-
-# macro RES_Q name, size [, align]
-.macro RES_Q _name, _size, _align=8
-RES_int _name 8*(_size) _align
-.endm
-
-# macro RES_DQ name, size [, align]
-.macro RES_DQ _name, _size, _align=16
-RES_int _name 16*(_size) _align
-.endm
-
-# macro RES_Y name, size [, align]
-.macro RES_Y _name, _size, _align=32
-RES_int _name 32*(_size) _align
-.endm
-
-# macro RES_Z name, size [, align]
-.macro RES_Z _name, _size, _align=64
-RES_int _name 64*(_size) _align
-.endm
-
-#endif
-
-
-########################################################################
-#### Define SHA256 Out Of Order Data Structures
-########################################################################
-
-START_FIELDS # LANE_DATA
-### name size align
-FIELD _job_in_lane, 8, 8 # pointer to job object
-END_FIELDS
-
- _LANE_DATA_size = _FIELD_OFFSET
- _LANE_DATA_align = _STRUCT_ALIGN
-
-########################################################################
-
-START_FIELDS # SHA256_ARGS_X4
-### name size align
-FIELD _digest, 4*8*8, 4 # transposed digest
-FIELD _data_ptr, 8*8, 8 # array of pointers to data
-END_FIELDS
-
- _SHA256_ARGS_X4_size = _FIELD_OFFSET
- _SHA256_ARGS_X4_align = _STRUCT_ALIGN
- _SHA256_ARGS_X8_size = _FIELD_OFFSET
- _SHA256_ARGS_X8_align = _STRUCT_ALIGN
-
-#######################################################################
-
-START_FIELDS # MB_MGR
-### name size align
-FIELD _args, _SHA256_ARGS_X4_size, _SHA256_ARGS_X4_align
-FIELD _lens, 4*8, 8
-FIELD _unused_lanes, 8, 8
-FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
-END_FIELDS
-
- _MB_MGR_size = _FIELD_OFFSET
- _MB_MGR_align = _STRUCT_ALIGN
-
-_args_digest = _args + _digest
-_args_data_ptr = _args + _data_ptr
-
-#######################################################################
-
-START_FIELDS #STACK_FRAME
-### name size align
-FIELD _data, 16*SZ8, 1 # transposed digest
-FIELD _digest, 8*SZ8, 1 # array of pointers to data
-FIELD _ytmp, 4*SZ8, 1
-FIELD _rsp, 8, 1
-END_FIELDS
-
- _STACK_FRAME_size = _FIELD_OFFSET
- _STACK_FRAME_align = _STRUCT_ALIGN
-
-#######################################################################
-
-########################################################################
-#### Define constants
-########################################################################
-
-#define STS_UNKNOWN 0
-#define STS_BEING_PROCESSED 1
-#define STS_COMPLETED 2
-
-########################################################################
-#### Define JOB_SHA256 structure
-########################################################################
-
-START_FIELDS # JOB_SHA256
-
-### name size align
-FIELD _buffer, 8, 8 # pointer to buffer
-FIELD _len, 8, 8 # length in bytes
-FIELD _result_digest, 8*4, 32 # Digest (output)
-FIELD _status, 4, 4
-FIELD _user_data, 8, 8
-END_FIELDS
-
- _JOB_SHA256_size = _FIELD_OFFSET
- _JOB_SHA256_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
deleted file mode 100644
index d2364c55bbde..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
- * Flush routine for SHA256 multibuffer
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha256_mb_mgr_datastruct.S"
-
-.extern sha256_x8_avx2
-
-#LINUX register definitions
-#define arg1 %rdi
-#define arg2 %rsi
-
-# Common register definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-
-# idx must be a register not clobberred by sha1_mult
-#define idx %r8
-#define DWORD_idx %r8d
-
-#define unused_lanes %rbx
-#define lane_data %rbx
-#define tmp2 %rbx
-#define tmp2_w %ebx
-
-#define job_rax %rax
-#define tmp1 %rax
-#define size_offset %rax
-#define tmp %rax
-#define start_offset %rax
-
-#define tmp3 %arg1
-
-#define extra_blocks %arg2
-#define p %arg2
-
-.macro LABEL prefix n
-\prefix\n\():
-.endm
-
-.macro JNE_SKIP i
-jne skip_\i
-.endm
-
-.altmacro
-.macro SET_OFFSET _offset
-offset = \_offset
-.endm
-.noaltmacro
-
-# JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state)
-# arg 1 : rcx : state
-ENTRY(sha256_mb_mgr_flush_avx2)
- FRAME_BEGIN
- push %rbx
-
- # If bit (32+3) is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $32+3, unused_lanes
- jc return_null
-
- # find a lane with a non-null job
- xor idx, idx
- offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne one(%rip), idx
- offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne two(%rip), idx
- offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne three(%rip), idx
- offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne four(%rip), idx
- offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne five(%rip), idx
- offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne six(%rip), idx
- offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne seven(%rip), idx
-
- # copy idx to empty lanes
-copy_lane_data:
- offset = (_args + _data_ptr)
- mov offset(state,idx,8), tmp
-
- I = 0
-.rep 8
- offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
-.altmacro
- JNE_SKIP %I
- offset = (_args + _data_ptr + 8*I)
- mov tmp, offset(state)
- offset = (_lens + 4*I)
- movl $0xFFFFFFFF, offset(state)
-LABEL skip_ %I
- I = (I+1)
-.noaltmacro
-.endr
-
- # Find min length
- vmovdqu _lens+0*16(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqu %xmm0, _lens+0*16(state)
- vmovdqu %xmm1, _lens+1*16(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha256_x8_avx2
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
-
- mov unused_lanes, _unused_lanes(state)
- movl $0xFFFFFFFF, _lens(state,idx,4)
-
- vmovd _args_digest(state , idx, 4) , %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- vmovd _args_digest+4*32(state, idx, 4), %xmm1
- vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
-
- vmovdqu %xmm0, _result_digest(job_rax)
- offset = (_result_digest + 1*16)
- vmovdqu %xmm1, offset(job_rax)
-
-return:
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha256_mb_mgr_flush_avx2)
-
-##############################################################################
-
-.align 16
-ENTRY(sha256_mb_mgr_get_comp_job_avx2)
- push %rbx
-
- ## if bit 32+3 is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $(32+3), unused_lanes
- jc .return_null
-
- # Find min length
- vmovdqu _lens(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
-
- vmovd %xmm2, DWORD_idx
- test $~0xF, idx
- jnz .return_null
-
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- vmovd _args_digest+4*32(state, idx, 4), %xmm1
- vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
-
- vmovdqu %xmm0, _result_digest(job_rax)
- offset = (_result_digest + 1*16)
- vmovdqu %xmm1, offset(job_rax)
-
- pop %rbx
-
- ret
-
-.return_null:
- xor job_rax, job_rax
- pop %rbx
- ret
-ENDPROC(sha256_mb_mgr_get_comp_job_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
-.octa 0x000000000000000000000000FFFFFFF0
-
-.section .rodata.cst8, "aM", @progbits, 8
-.align 8
-one:
-.quad 1
-two:
-.quad 2
-three:
-.quad 3
-four:
-.quad 4
-five:
-.quad 5
-six:
-.quad 6
-seven:
-.quad 7
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
deleted file mode 100644
index b0c498371e67..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Initialization code for multi buffer SHA256 algorithm for AVX2
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "sha256_mb_mgr.h"
-
-void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state)
-{
- unsigned int j;
-
- state->unused_lanes = 0xF76543210ULL;
- for (j = 0; j < 8; j++) {
- state->lens[j] = 0xFFFFFFFF;
- state->ldata[j].job_in_lane = NULL;
- }
-}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
deleted file mode 100644
index b36ae7454084..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Buffer submit code for multi buffer SHA256 algorithm
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha256_mb_mgr_datastruct.S"
-
-.extern sha256_x8_avx2
-
-# LINUX register definitions
-arg1 = %rdi
-arg2 = %rsi
-size_offset = %rcx
-tmp2 = %rcx
-extra_blocks = %rdx
-
-# Common definitions
-#define state arg1
-#define job %rsi
-#define len2 arg2
-#define p2 arg2
-
-# idx must be a register not clobberred by sha1_x8_avx2
-idx = %r8
-DWORD_idx = %r8d
-last_len = %r8
-
-p = %r11
-start_offset = %r11
-
-unused_lanes = %rbx
-BYTE_unused_lanes = %bl
-
-job_rax = %rax
-len = %rax
-DWORD_len = %eax
-
-lane = %r12
-tmp3 = %r12
-
-tmp = %r9
-DWORD_tmp = %r9d
-
-lane_data = %r10
-
-# JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job)
-# arg 1 : rcx : state
-# arg 2 : rdx : job
-ENTRY(sha256_mb_mgr_submit_avx2)
- FRAME_BEGIN
- push %rbx
- push %r12
-
- mov _unused_lanes(state), unused_lanes
- mov unused_lanes, lane
- and $0xF, lane
- shr $4, unused_lanes
- imul $_LANE_DATA_size, lane, lane_data
- movl $STS_BEING_PROCESSED, _status(job)
- lea _ldata(state, lane_data), lane_data
- mov unused_lanes, _unused_lanes(state)
- movl _len(job), DWORD_len
-
- mov job, _job_in_lane(lane_data)
- shl $4, len
- or lane, len
-
- movl DWORD_len, _lens(state , lane, 4)
-
- # Load digest words from result_digest
- vmovdqu _result_digest(job), %xmm0
- vmovdqu _result_digest+1*16(job), %xmm1
- vmovd %xmm0, _args_digest(state, lane, 4)
- vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
- vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
- vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
- vmovd %xmm1, _args_digest+4*32(state , lane, 4)
-
- vpextrd $1, %xmm1, _args_digest+5*32(state , lane, 4)
- vpextrd $2, %xmm1, _args_digest+6*32(state , lane, 4)
- vpextrd $3, %xmm1, _args_digest+7*32(state , lane, 4)
-
- mov _buffer(job), p
- mov p, _args_data_ptr(state, lane, 8)
-
- cmp $0xF, unused_lanes
- jne return_null
-
-start_loop:
- # Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqa %xmm0, _lens + 0*16(state)
- vmovdqa %xmm1, _lens + 1*16(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha256_x8_avx2
-
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- mov _unused_lanes(state), unused_lanes
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state,idx,4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
- vmovd _args_digest+4*32(state, idx, 4), %xmm1
-
- vpinsrd $1, _args_digest+5*32(state , idx, 4), %xmm1, %xmm1
- vpinsrd $2, _args_digest+6*32(state , idx, 4), %xmm1, %xmm1
- vpinsrd $3, _args_digest+7*32(state , idx, 4), %xmm1, %xmm1
-
- vmovdqu %xmm0, _result_digest(job_rax)
- vmovdqu %xmm1, _result_digest+1*16(job_rax)
-
-return:
- pop %r12
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-
-ENDPROC(sha256_mb_mgr_submit_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
- .octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
deleted file mode 100644
index 1687c80c5995..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * Multi-buffer SHA256 algorithm hash compute routine
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include "sha256_mb_mgr_datastruct.S"
-
-## code to compute oct SHA256 using SSE-256
-## outer calling routine takes care of save and restore of XMM registers
-## Logic designed/laid out by JDG
-
-## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; %ymm0-15
-## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
-## Linux preserves: rdi rbp r8
-##
-## clobbers %ymm0-15
-
-arg1 = %rdi
-arg2 = %rsi
-reg3 = %rcx
-reg4 = %rdx
-
-# Common definitions
-STATE = arg1
-INP_SIZE = arg2
-
-IDX = %rax
-ROUND = %rbx
-TBL = reg3
-
-inp0 = %r9
-inp1 = %r10
-inp2 = %r11
-inp3 = %r12
-inp4 = %r13
-inp5 = %r14
-inp6 = %r15
-inp7 = reg4
-
-a = %ymm0
-b = %ymm1
-c = %ymm2
-d = %ymm3
-e = %ymm4
-f = %ymm5
-g = %ymm6
-h = %ymm7
-
-T1 = %ymm8
-
-a0 = %ymm12
-a1 = %ymm13
-a2 = %ymm14
-TMP = %ymm15
-TMP0 = %ymm6
-TMP1 = %ymm7
-
-TT0 = %ymm8
-TT1 = %ymm9
-TT2 = %ymm10
-TT3 = %ymm11
-TT4 = %ymm12
-TT5 = %ymm13
-TT6 = %ymm14
-TT7 = %ymm15
-
-# Define stack usage
-
-# Assume stack aligned to 32 bytes before call
-# Therefore FRAMESZ mod 32 must be 32-8 = 24
-
-#define FRAMESZ 0x388
-
-#define VMOVPS vmovups
-
-# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
-# "transpose" data in {r0...r7} using temps {t0...t1}
-# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
-# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
-# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
-# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
-# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
-# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
-# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
-# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
-#
-# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
-# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
-# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
-# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
-# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
-# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
-# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
-# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
-#
-
-.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
- # process top half (r0..r3) {a...d}
- vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
- vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
- vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
- vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
- vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
- vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
- vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
- vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
-
- # use r2 in place of t0
- # process bottom half (r4..r7) {e...h}
- vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
- vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
- vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
- vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
- vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
- vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
- vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
- vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
-
- vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
- vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
- vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
- vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
- vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
- vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
- vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
- vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
-
-.endm
-
-.macro ROTATE_ARGS
-TMP_ = h
-h = g
-g = f
-f = e
-e = d
-d = c
-c = b
-b = a
-a = TMP_
-.endm
-
-.macro _PRORD reg imm tmp
- vpslld $(32-\imm),\reg,\tmp
- vpsrld $\imm,\reg, \reg
- vpor \tmp,\reg, \reg
-.endm
-
-# PRORD_nd reg, imm, tmp, src
-.macro _PRORD_nd reg imm tmp src
- vpslld $(32-\imm), \src, \tmp
- vpsrld $\imm, \src, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-# PRORD dst/src, amt
-.macro PRORD reg imm
- _PRORD \reg,\imm,TMP
-.endm
-
-# PRORD_nd dst, src, amt
-.macro PRORD_nd reg tmp imm
- _PRORD_nd \reg, \imm, TMP, \tmp
-.endm
-
-# arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_00_15 _T1 i
- PRORD_nd a0,e,5 # sig1: a0 = (e >> 5)
-
- vpxor g, f, a2 # ch: a2 = f^g
- vpand e,a2, a2 # ch: a2 = (f^g)&e
- vpxor g, a2, a2 # a2 = ch
-
- PRORD_nd a1,e,25 # sig1: a1 = (e >> 25)
-
- vmovdqu \_T1,(SZ8*(\i & 0xf))(%rsp)
- vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
- vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
- PRORD a0, 6 # sig1: a0 = (e >> 6) ^ (e >> 11)
- vpaddd a2, h, h # h = h + ch
- PRORD_nd a2,a,11 # sig0: a2 = (a >> 11)
- vpaddd \_T1,h, h # h = h + ch + W + K
- vpxor a1, a0, a0 # a0 = sigma1
- PRORD_nd a1,a,22 # sig0: a1 = (a >> 22)
- vpxor c, a, \_T1 # maj: T1 = a^c
- add $SZ8, ROUND # ROUND++
- vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
- vpaddd a0, h, h
- vpaddd h, d, d
- vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
- PRORD a2,2 # sig0: a2 = (a >> 2) ^ (a >> 13)
- vpxor a1, a2, a2 # a2 = sig0
- vpand c, a, a1 # maj: a1 = a&c
- vpor \_T1, a1, a1 # a1 = maj
- vpaddd a1, h, h # h = h + ch + W + K + maj
- vpaddd a2, h, h # h = h + ch + W + K + maj + sigma0
- ROTATE_ARGS
-.endm
-
-# arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_16_XX _T1 i
- vmovdqu (SZ8*((\i-15)&0xf))(%rsp), \_T1
- vmovdqu (SZ8*((\i-2)&0xf))(%rsp), a1
- vmovdqu \_T1, a0
- PRORD \_T1,11
- vmovdqu a1, a2
- PRORD a1,2
- vpxor a0, \_T1, \_T1
- PRORD \_T1, 7
- vpxor a2, a1, a1
- PRORD a1, 17
- vpsrld $3, a0, a0
- vpxor a0, \_T1, \_T1
- vpsrld $10, a2, a2
- vpxor a2, a1, a1
- vpaddd (SZ8*((\i-16)&0xf))(%rsp), \_T1, \_T1
- vpaddd (SZ8*((\i-7)&0xf))(%rsp), a1, a1
- vpaddd a1, \_T1, \_T1
-
- ROUND_00_15 \_T1,\i
-.endm
-
-# SHA256_ARGS:
-# UINT128 digest[8]; // transposed digests
-# UINT8 *data_ptr[4];
-
-# void sha256_x8_avx2(SHA256_ARGS *args, UINT64 bytes);
-# arg 1 : STATE : pointer to array of pointers to input data
-# arg 2 : INP_SIZE : size of input in blocks
- # general registers preserved in outer calling routine
- # outer calling routine saves all the XMM registers
- # save rsp, allocate 32-byte aligned for local variables
-ENTRY(sha256_x8_avx2)
-
- # save callee-saved clobbered registers to comply with C function ABI
- push %r12
- push %r13
- push %r14
- push %r15
-
- mov %rsp, IDX
- sub $FRAMESZ, %rsp
- and $~0x1F, %rsp
- mov IDX, _rsp(%rsp)
-
- # Load the pre-transposed incoming digest.
- vmovdqu 0*SHA256_DIGEST_ROW_SIZE(STATE),a
- vmovdqu 1*SHA256_DIGEST_ROW_SIZE(STATE),b
- vmovdqu 2*SHA256_DIGEST_ROW_SIZE(STATE),c
- vmovdqu 3*SHA256_DIGEST_ROW_SIZE(STATE),d
- vmovdqu 4*SHA256_DIGEST_ROW_SIZE(STATE),e
- vmovdqu 5*SHA256_DIGEST_ROW_SIZE(STATE),f
- vmovdqu 6*SHA256_DIGEST_ROW_SIZE(STATE),g
- vmovdqu 7*SHA256_DIGEST_ROW_SIZE(STATE),h
-
- lea K256_8(%rip),TBL
-
- # load the address of each of the 4 message lanes
- # getting ready to transpose input onto stack
- mov _args_data_ptr+0*PTR_SZ(STATE),inp0
- mov _args_data_ptr+1*PTR_SZ(STATE),inp1
- mov _args_data_ptr+2*PTR_SZ(STATE),inp2
- mov _args_data_ptr+3*PTR_SZ(STATE),inp3
- mov _args_data_ptr+4*PTR_SZ(STATE),inp4
- mov _args_data_ptr+5*PTR_SZ(STATE),inp5
- mov _args_data_ptr+6*PTR_SZ(STATE),inp6
- mov _args_data_ptr+7*PTR_SZ(STATE),inp7
-
- xor IDX, IDX
-lloop:
- xor ROUND, ROUND
-
- # save old digest
- vmovdqu a, _digest(%rsp)
- vmovdqu b, _digest+1*SZ8(%rsp)
- vmovdqu c, _digest+2*SZ8(%rsp)
- vmovdqu d, _digest+3*SZ8(%rsp)
- vmovdqu e, _digest+4*SZ8(%rsp)
- vmovdqu f, _digest+5*SZ8(%rsp)
- vmovdqu g, _digest+6*SZ8(%rsp)
- vmovdqu h, _digest+7*SZ8(%rsp)
- i = 0
-.rep 2
- VMOVPS i*32(inp0, IDX), TT0
- VMOVPS i*32(inp1, IDX), TT1
- VMOVPS i*32(inp2, IDX), TT2
- VMOVPS i*32(inp3, IDX), TT3
- VMOVPS i*32(inp4, IDX), TT4
- VMOVPS i*32(inp5, IDX), TT5
- VMOVPS i*32(inp6, IDX), TT6
- VMOVPS i*32(inp7, IDX), TT7
- vmovdqu g, _ytmp(%rsp)
- vmovdqu h, _ytmp+1*SZ8(%rsp)
- TRANSPOSE8 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, TMP0, TMP1
- vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP1
- vmovdqu _ytmp(%rsp), g
- vpshufb TMP1, TT0, TT0
- vpshufb TMP1, TT1, TT1
- vpshufb TMP1, TT2, TT2
- vpshufb TMP1, TT3, TT3
- vpshufb TMP1, TT4, TT4
- vpshufb TMP1, TT5, TT5
- vpshufb TMP1, TT6, TT6
- vpshufb TMP1, TT7, TT7
- vmovdqu _ytmp+1*SZ8(%rsp), h
- vmovdqu TT4, _ytmp(%rsp)
- vmovdqu TT5, _ytmp+1*SZ8(%rsp)
- vmovdqu TT6, _ytmp+2*SZ8(%rsp)
- vmovdqu TT7, _ytmp+3*SZ8(%rsp)
- ROUND_00_15 TT0,(i*8+0)
- vmovdqu _ytmp(%rsp), TT0
- ROUND_00_15 TT1,(i*8+1)
- vmovdqu _ytmp+1*SZ8(%rsp), TT1
- ROUND_00_15 TT2,(i*8+2)
- vmovdqu _ytmp+2*SZ8(%rsp), TT2
- ROUND_00_15 TT3,(i*8+3)
- vmovdqu _ytmp+3*SZ8(%rsp), TT3
- ROUND_00_15 TT0,(i*8+4)
- ROUND_00_15 TT1,(i*8+5)
- ROUND_00_15 TT2,(i*8+6)
- ROUND_00_15 TT3,(i*8+7)
- i = (i+1)
-.endr
- add $64, IDX
- i = (i*8)
-
- jmp Lrounds_16_xx
-.align 16
-Lrounds_16_xx:
-.rep 16
- ROUND_16_XX T1, i
- i = (i+1)
-.endr
-
- cmp $ROUNDS,ROUND
- jb Lrounds_16_xx
-
- # add old digest
- vpaddd _digest+0*SZ8(%rsp), a, a
- vpaddd _digest+1*SZ8(%rsp), b, b
- vpaddd _digest+2*SZ8(%rsp), c, c
- vpaddd _digest+3*SZ8(%rsp), d, d
- vpaddd _digest+4*SZ8(%rsp), e, e
- vpaddd _digest+5*SZ8(%rsp), f, f
- vpaddd _digest+6*SZ8(%rsp), g, g
- vpaddd _digest+7*SZ8(%rsp), h, h
-
- sub $1, INP_SIZE # unit is blocks
- jne lloop
-
- # write back to memory (state object) the transposed digest
- vmovdqu a, 0*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu b, 1*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu c, 2*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu d, 3*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu e, 4*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu f, 5*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu g, 6*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu h, 7*SHA256_DIGEST_ROW_SIZE(STATE)
-
- # update input pointers
- add IDX, inp0
- mov inp0, _args_data_ptr+0*8(STATE)
- add IDX, inp1
- mov inp1, _args_data_ptr+1*8(STATE)
- add IDX, inp2
- mov inp2, _args_data_ptr+2*8(STATE)
- add IDX, inp3
- mov inp3, _args_data_ptr+3*8(STATE)
- add IDX, inp4
- mov inp4, _args_data_ptr+4*8(STATE)
- add IDX, inp5
- mov inp5, _args_data_ptr+5*8(STATE)
- add IDX, inp6
- mov inp6, _args_data_ptr+6*8(STATE)
- add IDX, inp7
- mov inp7, _args_data_ptr+7*8(STATE)
-
- # Postamble
- mov _rsp(%rsp), %rsp
-
- # restore callee-saved clobbered registers
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-
- ret
-ENDPROC(sha256_x8_avx2)
-
-.section .rodata.K256_8, "a", @progbits
-.align 64
-K256_8:
- .octa 0x428a2f98428a2f98428a2f98428a2f98
- .octa 0x428a2f98428a2f98428a2f98428a2f98
- .octa 0x71374491713744917137449171374491
- .octa 0x71374491713744917137449171374491
- .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
- .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
- .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
- .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
- .octa 0x3956c25b3956c25b3956c25b3956c25b
- .octa 0x3956c25b3956c25b3956c25b3956c25b
- .octa 0x59f111f159f111f159f111f159f111f1
- .octa 0x59f111f159f111f159f111f159f111f1
- .octa 0x923f82a4923f82a4923f82a4923f82a4
- .octa 0x923f82a4923f82a4923f82a4923f82a4
- .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
- .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
- .octa 0xd807aa98d807aa98d807aa98d807aa98
- .octa 0xd807aa98d807aa98d807aa98d807aa98
- .octa 0x12835b0112835b0112835b0112835b01
- .octa 0x12835b0112835b0112835b0112835b01
- .octa 0x243185be243185be243185be243185be
- .octa 0x243185be243185be243185be243185be
- .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
- .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
- .octa 0x72be5d7472be5d7472be5d7472be5d74
- .octa 0x72be5d7472be5d7472be5d7472be5d74
- .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
- .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
- .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
- .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
- .octa 0xc19bf174c19bf174c19bf174c19bf174
- .octa 0xc19bf174c19bf174c19bf174c19bf174
- .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
- .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
- .octa 0xefbe4786efbe4786efbe4786efbe4786
- .octa 0xefbe4786efbe4786efbe4786efbe4786
- .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
- .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
- .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
- .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
- .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
- .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
- .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
- .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
- .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
- .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
- .octa 0x76f988da76f988da76f988da76f988da
- .octa 0x76f988da76f988da76f988da76f988da
- .octa 0x983e5152983e5152983e5152983e5152
- .octa 0x983e5152983e5152983e5152983e5152
- .octa 0xa831c66da831c66da831c66da831c66d
- .octa 0xa831c66da831c66da831c66da831c66d
- .octa 0xb00327c8b00327c8b00327c8b00327c8
- .octa 0xb00327c8b00327c8b00327c8b00327c8
- .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
- .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
- .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
- .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
- .octa 0xd5a79147d5a79147d5a79147d5a79147
- .octa 0xd5a79147d5a79147d5a79147d5a79147
- .octa 0x06ca635106ca635106ca635106ca6351
- .octa 0x06ca635106ca635106ca635106ca6351
- .octa 0x14292967142929671429296714292967
- .octa 0x14292967142929671429296714292967
- .octa 0x27b70a8527b70a8527b70a8527b70a85
- .octa 0x27b70a8527b70a8527b70a8527b70a85
- .octa 0x2e1b21382e1b21382e1b21382e1b2138
- .octa 0x2e1b21382e1b21382e1b21382e1b2138
- .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
- .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
- .octa 0x53380d1353380d1353380d1353380d13
- .octa 0x53380d1353380d1353380d1353380d13
- .octa 0x650a7354650a7354650a7354650a7354
- .octa 0x650a7354650a7354650a7354650a7354
- .octa 0x766a0abb766a0abb766a0abb766a0abb
- .octa 0x766a0abb766a0abb766a0abb766a0abb
- .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
- .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
- .octa 0x92722c8592722c8592722c8592722c85
- .octa 0x92722c8592722c8592722c8592722c85
- .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
- .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
- .octa 0xa81a664ba81a664ba81a664ba81a664b
- .octa 0xa81a664ba81a664ba81a664ba81a664b
- .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
- .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
- .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
- .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
- .octa 0xd192e819d192e819d192e819d192e819
- .octa 0xd192e819d192e819d192e819d192e819
- .octa 0xd6990624d6990624d6990624d6990624
- .octa 0xd6990624d6990624d6990624d6990624
- .octa 0xf40e3585f40e3585f40e3585f40e3585
- .octa 0xf40e3585f40e3585f40e3585f40e3585
- .octa 0x106aa070106aa070106aa070106aa070
- .octa 0x106aa070106aa070106aa070106aa070
- .octa 0x19a4c11619a4c11619a4c11619a4c116
- .octa 0x19a4c11619a4c11619a4c11619a4c116
- .octa 0x1e376c081e376c081e376c081e376c08
- .octa 0x1e376c081e376c081e376c081e376c08
- .octa 0x2748774c2748774c2748774c2748774c
- .octa 0x2748774c2748774c2748774c2748774c
- .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
- .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
- .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
- .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
- .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
- .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
- .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
- .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
- .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
- .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
- .octa 0x748f82ee748f82ee748f82ee748f82ee
- .octa 0x748f82ee748f82ee748f82ee748f82ee
- .octa 0x78a5636f78a5636f78a5636f78a5636f
- .octa 0x78a5636f78a5636f78a5636f78a5636f
- .octa 0x84c8781484c8781484c8781484c87814
- .octa 0x84c8781484c8781484c8781484c87814
- .octa 0x8cc702088cc702088cc702088cc70208
- .octa 0x8cc702088cc702088cc702088cc70208
- .octa 0x90befffa90befffa90befffa90befffa
- .octa 0x90befffa90befffa90befffa90befffa
- .octa 0xa4506ceba4506ceba4506ceba4506ceb
- .octa 0xa4506ceba4506ceba4506ceba4506ceb
- .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
- .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
- .octa 0xc67178f2c67178f2c67178f2c67178f2
- .octa 0xc67178f2c67178f2c67178f2c67178f2
-
-.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
-.align 32
-PSHUFFLE_BYTE_FLIP_MASK:
-.octa 0x0c0d0e0f08090a0b0405060700010203
-.octa 0x0c0d0e0f08090a0b0405060700010203
-
-.section .rodata.cst256.K256, "aM", @progbits, 256
-.align 64
-.global K256
-K256:
- .int 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
- .int 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
- .int 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
- .int 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
- .int 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
- .int 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
- .int 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
- .int 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
- .int 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
- .int 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
- .int 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
- .int 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
- .int 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
- .int 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
- .int 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
- .int 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
diff --git a/arch/x86/crypto/sha512-mb/Makefile b/arch/x86/crypto/sha512-mb/Makefile
deleted file mode 100644
index 90f1ef69152e..000000000000
--- a/arch/x86/crypto/sha512-mb/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Arch-specific CryptoAPI modules.
-#
-
-avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
- $(comma)4)$(comma)%ymm2,yes,no)
-ifeq ($(avx2_supported),yes)
- obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb.o
- sha512-mb-y := sha512_mb.o sha512_mb_mgr_flush_avx2.o \
- sha512_mb_mgr_init_avx2.o sha512_mb_mgr_submit_avx2.o sha512_x4_avx2.o
-endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
deleted file mode 100644
index 26b85678012d..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * Multi buffer SHA512 algorithm Glue Code
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <asm/byteorder.h>
-#include <linux/hardirq.h>
-#include <asm/fpu/api.h>
-#include "sha512_mb_ctx.h"
-
-#define FLUSH_INTERVAL 1000 /* in usec */
-
-static struct mcryptd_alg_state sha512_mb_alg_state;
-
-struct sha512_mb_ctx {
- struct mcryptd_ahash *mcryptd_tfm;
-};
-
-static inline struct mcryptd_hash_request_ctx
- *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx *hash_ctx)
-{
- struct ahash_request *areq;
-
- areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
- return container_of(areq, struct mcryptd_hash_request_ctx, areq);
-}
-
-static inline struct ahash_request
- *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
-{
- return container_of((void *) ctx, struct ahash_request, __ctx);
-}
-
-static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct ahash_request *areq)
-{
- rctx->flag = HASH_UPDATE;
-}
-
-static asmlinkage void (*sha512_job_mgr_init)(struct sha512_mb_mgr *state);
-static asmlinkage struct job_sha512* (*sha512_job_mgr_submit)
- (struct sha512_mb_mgr *state,
- struct job_sha512 *job);
-static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
- (struct sha512_mb_mgr *state);
-static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
- (struct sha512_mb_mgr *state);
-
-inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
- uint64_t total_len)
-{
- uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1);
-
- memset(&padblock[i], 0, SHA512_BLOCK_SIZE);
- padblock[i] = 0x80;
-
- i += ((SHA512_BLOCK_SIZE - 1) &
- (0 - (total_len + SHA512_PADLENGTHFIELD_SIZE + 1)))
- + 1 + SHA512_PADLENGTHFIELD_SIZE;
-
-#if SHA512_PADLENGTHFIELD_SIZE == 16
- *((uint64_t *) &padblock[i - 16]) = 0;
-#endif
-
- *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
-
- /* Number of extra blocks to hash */
- return i >> SHA512_LOG2_BLOCK_SIZE;
-}
-
-static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
- (struct sha512_ctx_mgr *mgr, struct sha512_hash_ctx *ctx)
-{
- while (ctx) {
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Clear PROCESSING bit */
- ctx->status = HASH_CTX_STS_COMPLETE;
- return ctx;
- }
-
- /*
- * If the extra blocks are empty, begin hashing what remains
- * in the user's buffer.
- */
- if (ctx->partial_block_buffer_length == 0 &&
- ctx->incoming_buffer_length) {
-
- const void *buffer = ctx->incoming_buffer;
- uint32_t len = ctx->incoming_buffer_length;
- uint32_t copy_len;
-
- /*
- * Only entire blocks can be hashed.
- * Copy remainder to extra blocks buffer.
- */
- copy_len = len & (SHA512_BLOCK_SIZE-1);
-
- if (copy_len) {
- len -= copy_len;
- memcpy(ctx->partial_block_buffer,
- ((const char *) buffer + len),
- copy_len);
- ctx->partial_block_buffer_length = copy_len;
- }
-
- ctx->incoming_buffer_length = 0;
-
- /* len should be a multiple of the block size now */
- assert((len % SHA512_BLOCK_SIZE) == 0);
-
- /* Set len to the number of blocks to be hashed */
- len >>= SHA512_LOG2_BLOCK_SIZE;
-
- if (len) {
-
- ctx->job.buffer = (uint8_t *) buffer;
- ctx->job.len = len;
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_submit(&mgr->mgr,
- &ctx->job);
- continue;
- }
- }
-
- /*
- * If the extra blocks are not empty, then we are
- * either on the last block(s) or we need more
- * user input before continuing.
- */
- if (ctx->status & HASH_CTX_STS_LAST) {
-
- uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks =
- sha512_pad(buf, ctx->total_length);
-
- ctx->status = (HASH_CTX_STS_PROCESSING |
- HASH_CTX_STS_COMPLETE);
- ctx->job.buffer = buf;
- ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
-
- if (ctx)
- ctx->status = HASH_CTX_STS_IDLE;
- return ctx;
- }
-
- return NULL;
-}
-
-static struct sha512_hash_ctx
- *sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate *cstate)
-{
- /*
- * If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to
- * the user.
- * If it is not ready, resubmit the job to finish processing.
- * If sha512_ctx_mgr_resubmit returned a job, it is ready to be
- * returned.
- * Otherwise, all jobs currently being managed by the hash_ctx_mgr
- * still need processing.
- */
- struct sha512_ctx_mgr *mgr;
- struct sha512_hash_ctx *ctx;
- unsigned long flags;
-
- mgr = cstate->mgr;
- spin_lock_irqsave(&cstate->work_lock, flags);
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_get_comp_job(&mgr->mgr);
- ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
- return ctx;
-}
-
-static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
-{
- sha512_job_mgr_init(&mgr->mgr);
-}
-
-static struct sha512_hash_ctx
- *sha512_ctx_mgr_submit(struct mcryptd_alg_cstate *cstate,
- struct sha512_hash_ctx *ctx,
- const void *buffer,
- uint32_t len,
- int flags)
-{
- struct sha512_ctx_mgr *mgr;
- unsigned long irqflags;
-
- mgr = cstate->mgr;
- spin_lock_irqsave(&cstate->work_lock, irqflags);
- if (flags & ~(HASH_UPDATE | HASH_LAST)) {
- /* User should not pass anything other than UPDATE or LAST */
- ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
- goto unlock;
- }
-
- if (ctx->status & HASH_CTX_STS_PROCESSING) {
- /* Cannot submit to a currently processing job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
- goto unlock;
- }
-
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Cannot update a finished job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
- goto unlock;
- }
-
- /*
- * If we made it here, there were no errors during this call to
- * submit
- */
- ctx->error = HASH_CTX_ERROR_NONE;
-
- /* Store buffer ptr info from user */
- ctx->incoming_buffer = buffer;
- ctx->incoming_buffer_length = len;
-
- /*
- * Store the user's request flags and mark this ctx as currently being
- * processed.
- */
- ctx->status = (flags & HASH_LAST) ?
- (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
- HASH_CTX_STS_PROCESSING;
-
- /* Advance byte counter */
- ctx->total_length += len;
-
- /*
- * If there is anything currently buffered in the extra blocks,
- * append to it until it contains a whole block.
- * Or if the user's buffer contains less than a whole block,
- * append as much as possible to the extra block.
- */
- if (ctx->partial_block_buffer_length || len < SHA512_BLOCK_SIZE) {
- /* Compute how many bytes to copy from user buffer into extra
- * block
- */
- uint32_t copy_len = SHA512_BLOCK_SIZE -
- ctx->partial_block_buffer_length;
- if (len < copy_len)
- copy_len = len;
-
- if (copy_len) {
- /* Copy and update relevant pointers and counters */
- memcpy
- (&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
- buffer, copy_len);
-
- ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)
- ((const char *)buffer + copy_len);
- ctx->incoming_buffer_length = len - copy_len;
- }
-
- /* The extra block should never contain more than 1 block
- * here
- */
- assert(ctx->partial_block_buffer_length <= SHA512_BLOCK_SIZE);
-
- /* If the extra block buffer contains exactly 1 block, it can
- * be hashed.
- */
- if (ctx->partial_block_buffer_length >= SHA512_BLOCK_SIZE) {
- ctx->partial_block_buffer_length = 0;
-
- ctx->job.buffer = ctx->partial_block_buffer;
- ctx->job.len = 1;
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
- }
- }
-
- ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
-unlock:
- spin_unlock_irqrestore(&cstate->work_lock, irqflags);
- return ctx;
-}
-
-static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct mcryptd_alg_cstate *cstate)
-{
- struct sha512_ctx_mgr *mgr;
- struct sha512_hash_ctx *ctx;
- unsigned long flags;
-
- mgr = cstate->mgr;
- spin_lock_irqsave(&cstate->work_lock, flags);
- while (1) {
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_flush(&mgr->mgr);
-
- /* If flush returned 0, there are no more jobs in flight. */
- if (!ctx)
- break;
-
- /*
- * If flush returned a job, resubmit the job to finish
- * processing.
- */
- ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
-
- /*
- * If sha512_ctx_mgr_resubmit returned a job, it is ready to
- * be returned. Otherwise, all jobs currently being managed by
- * the sha512_ctx_mgr still need processing. Loop.
- */
- if (ctx)
- break;
- }
- spin_unlock_irqrestore(&cstate->work_lock, flags);
- return ctx;
-}
-
-static int sha512_mb_init(struct ahash_request *areq)
-{
- struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
-
- hash_ctx_init(sctx);
- sctx->job.result_digest[0] = SHA512_H0;
- sctx->job.result_digest[1] = SHA512_H1;
- sctx->job.result_digest[2] = SHA512_H2;
- sctx->job.result_digest[3] = SHA512_H3;
- sctx->job.result_digest[4] = SHA512_H4;
- sctx->job.result_digest[5] = SHA512_H5;
- sctx->job.result_digest[6] = SHA512_H6;
- sctx->job.result_digest[7] = SHA512_H7;
- sctx->total_length = 0;
- sctx->partial_block_buffer_length = 0;
- sctx->status = HASH_CTX_STS_IDLE;
-
- return 0;
-}
-
-static int sha512_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
-{
- int i;
- struct sha512_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
- __be64 *dst = (__be64 *) rctx->out;
-
- for (i = 0; i < 8; ++i)
- dst[i] = cpu_to_be64(sctx->job.result_digest[i]);
-
- return 0;
-}
-
-static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
- struct mcryptd_alg_cstate *cstate, bool flush)
-{
- int flag = HASH_UPDATE;
- int nbytes, err = 0;
- struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
- struct sha512_hash_ctx *sha_ctx;
-
- /* more work ? */
- while (!(rctx->flag & HASH_DONE)) {
- nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
- if (nbytes < 0) {
- err = nbytes;
- goto out;
- }
- /* check if the walk is done */
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- if (rctx->flag & HASH_FINAL)
- flag |= HASH_LAST;
-
- }
- sha_ctx = (struct sha512_hash_ctx *)
- ahash_request_ctx(&rctx->areq);
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx,
- rctx->walk.data, nbytes, flag);
- if (!sha_ctx) {
- if (flush)
- sha_ctx = sha512_ctx_mgr_flush(cstate);
- }
- kernel_fpu_end();
- if (sha_ctx)
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- else {
- rctx = NULL;
- goto out;
- }
- }
-
- /* copy the results */
- if (rctx->flag & HASH_FINAL)
- sha512_mb_set_results(rctx);
-
-out:
- *ret_rctx = rctx;
- return err;
-}
-
-static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate,
- int err)
-{
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha512_hash_ctx *sha_ctx;
- struct mcryptd_hash_request_ctx *req_ctx;
- int ret;
- unsigned long flags;
-
- /* remove from work list */
- spin_lock_irqsave(&cstate->work_lock, flags);
- list_del(&rctx->waiter);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
-
- if (irqs_disabled())
- rctx->complete(&req->base, err);
- else {
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
-
- /* check to see if there are other jobs that are done */
- sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
- while (sha_ctx) {
- req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&req_ctx, cstate, false);
- if (req_ctx) {
- spin_lock_irqsave(&cstate->work_lock, flags);
- list_del(&req_ctx->waiter);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
-
- req = cast_mcryptd_ctx_to_req(req_ctx);
- if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
- else {
- local_bh_disable();
- req_ctx->complete(&req->base, ret);
- local_bh_enable();
- }
- }
- sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
- }
-
- return 0;
-}
-
-static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate)
-{
- unsigned long next_flush;
- unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
- unsigned long flags;
-
- /* initialize tag */
- rctx->tag.arrival = jiffies; /* tag the arrival time */
- rctx->tag.seq_num = cstate->next_seq_num++;
- next_flush = rctx->tag.arrival + delay;
- rctx->tag.expire = next_flush;
-
- spin_lock_irqsave(&cstate->work_lock, flags);
- list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
-
- mcryptd_arm_flusher(cstate, delay);
-}
-
-static int sha512_mb_update(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha512_hash_ctx *sha_ctx;
- int ret = 0, nbytes;
-
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk))
- rctx->flag |= HASH_DONE;
-
- /* submit */
- sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
- sha512_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
- nbytes, HASH_UPDATE);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
-
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha512_mb_finup(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha512_hash_ctx *sha_ctx;
- int ret = 0, flag = HASH_UPDATE, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- flag = HASH_LAST;
- }
-
- /* submit */
- rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
- sha512_mb_add_list(rctx, cstate);
-
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
- nbytes, flag);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha512_mb_final(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
-
- struct sha512_hash_ctx *sha_ctx;
- int ret = 0;
- u8 data;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- rctx->flag |= HASH_DONE | HASH_FINAL;
-
- sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
- /* flag HASH_FINAL and 0 data size */
- sha512_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, &data, 0, HASH_LAST);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha512_mb_export(struct ahash_request *areq, void *out)
-{
- struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha512_mb_import(struct ahash_request *areq, const void *in)
-{
- struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha512_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
-
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha512-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha512_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha512_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static int sha512_mb_areq_init_tfm(struct crypto_tfm *tfm)
-{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- sizeof(struct sha512_hash_ctx));
-
- return 0;
-}
-
-static void sha512_mb_areq_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static struct ahash_alg sha512_mb_areq_alg = {
- .init = sha512_mb_init,
- .update = sha512_mb_update,
- .final = sha512_mb_final,
- .finup = sha512_mb_finup,
- .export = sha512_mb_export,
- .import = sha512_mb_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct sha512_hash_ctx),
- .base = {
- .cra_name = "__sha512-mb",
- .cra_driver_name = "__intel_sha512-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread
- * sleep
- */
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha512_mb_areq_alg.halg.base.cra_list),
- .cra_init = sha512_mb_areq_init_tfm,
- .cra_exit = sha512_mb_areq_exit_tfm,
- .cra_ctxsize = sizeof(struct sha512_hash_ctx),
- }
- }
-};
-
-static int sha512_mb_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_init(mcryptd_req);
-}
-
-static int sha512_mb_async_update(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_update(mcryptd_req);
-}
-
-static int sha512_mb_async_finup(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_finup(mcryptd_req);
-}
-
-static int sha512_mb_async_final(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_final(mcryptd_req);
-}
-
-static int sha512_mb_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_digest(mcryptd_req);
-}
-
-static int sha512_mb_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_export(mcryptd_req, out);
-}
-
-static int sha512_mb_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
- struct mcryptd_hash_request_ctx *rctx;
- struct ahash_request *areq;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- rctx = ahash_request_ctx(mcryptd_req);
-
- areq = &rctx->areq;
-
- ahash_request_set_tfm(areq, child);
- ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req);
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static struct ahash_alg sha512_mb_async_alg = {
- .init = sha512_mb_async_init,
- .update = sha512_mb_async_update,
- .final = sha512_mb_async_final,
- .finup = sha512_mb_async_finup,
- .digest = sha512_mb_async_digest,
- .export = sha512_mb_async_export,
- .import = sha512_mb_async_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct sha512_hash_ctx),
- .base = {
- .cra_name = "sha512",
- .cra_driver_name = "sha512_mb",
- /*
- * Low priority, since with few concurrent hash requests
- * this is extremely slow due to the flush delay. Users
- * whose workloads would benefit from this can request
- * it explicitly by driver name, or can increase its
- * priority at runtime using NETLINK_CRYPTO.
- */
- .cra_priority = 50,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha512_mb_async_alg.halg.base.cra_list),
- .cra_init = sha512_mb_async_init_tfm,
- .cra_exit = sha512_mb_async_exit_tfm,
- .cra_ctxsize = sizeof(struct sha512_mb_ctx),
- .cra_alignmask = 0,
- },
- },
-};
-
-static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
-{
- struct mcryptd_hash_request_ctx *rctx;
- unsigned long cur_time;
- unsigned long next_flush = 0;
- struct sha512_hash_ctx *sha_ctx;
-
-
- cur_time = jiffies;
-
- while (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- if time_before(cur_time, rctx->tag.expire)
- break;
- kernel_fpu_begin();
- sha_ctx = (struct sha512_hash_ctx *)
- sha512_ctx_mgr_flush(cstate);
- kernel_fpu_end();
- if (!sha_ctx) {
- pr_err("sha512_mb error: nothing got flushed for"
- " non-empty list\n");
- break;
- }
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- sha_finish_walk(&rctx, cstate, true);
- sha_complete_job(rctx, cstate, 0);
- }
-
- if (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- /* get the hash context and then flush time */
- next_flush = rctx->tag.expire;
- mcryptd_arm_flusher(cstate, get_delay(next_flush));
- }
- return next_flush;
-}
-
-static int __init sha512_mb_mod_init(void)
-{
-
- int cpu;
- int err;
- struct mcryptd_alg_cstate *cpu_state;
-
- /* check for dependent cpu features */
- if (!boot_cpu_has(X86_FEATURE_AVX2) ||
- !boot_cpu_has(X86_FEATURE_BMI2))
- return -ENODEV;
-
- /* initialize multibuffer structures */
- sha512_mb_alg_state.alg_cstate =
- alloc_percpu(struct mcryptd_alg_cstate);
-
- sha512_job_mgr_init = sha512_mb_mgr_init_avx2;
- sha512_job_mgr_submit = sha512_mb_mgr_submit_avx2;
- sha512_job_mgr_flush = sha512_mb_mgr_flush_avx2;
- sha512_job_mgr_get_comp_job = sha512_mb_mgr_get_comp_job_avx2;
-
- if (!sha512_mb_alg_state.alg_cstate)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
- cpu_state->next_flush = 0;
- cpu_state->next_seq_num = 0;
- cpu_state->flusher_engaged = false;
- INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
- cpu_state->cpu = cpu;
- cpu_state->alg_state = &sha512_mb_alg_state;
- cpu_state->mgr = kzalloc(sizeof(struct sha512_ctx_mgr),
- GFP_KERNEL);
- if (!cpu_state->mgr)
- goto err2;
- sha512_ctx_mgr_init(cpu_state->mgr);
- INIT_LIST_HEAD(&cpu_state->work_list);
- spin_lock_init(&cpu_state->work_lock);
- }
- sha512_mb_alg_state.flusher = &sha512_mb_flusher;
-
- err = crypto_register_ahash(&sha512_mb_areq_alg);
- if (err)
- goto err2;
- err = crypto_register_ahash(&sha512_mb_async_alg);
- if (err)
- goto err1;
-
-
- return 0;
-err1:
- crypto_unregister_ahash(&sha512_mb_areq_alg);
-err2:
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha512_mb_alg_state.alg_cstate);
- return -ENODEV;
-}
-
-static void __exit sha512_mb_mod_fini(void)
-{
- int cpu;
- struct mcryptd_alg_cstate *cpu_state;
-
- crypto_unregister_ahash(&sha512_mb_async_alg);
- crypto_unregister_ahash(&sha512_mb_areq_alg);
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha512_mb_alg_state.alg_cstate);
-}
-
-module_init(sha512_mb_mod_init);
-module_exit(sha512_mb_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated");
-
-MODULE_ALIAS("sha512");
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
deleted file mode 100644
index e5c465bd821e..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Header file for multi buffer SHA512 context
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SHA_MB_CTX_INTERNAL_H
-#define _SHA_MB_CTX_INTERNAL_H
-
-#include "sha512_mb_mgr.h"
-
-#define HASH_UPDATE 0x00
-#define HASH_LAST 0x01
-#define HASH_DONE 0x02
-#define HASH_FINAL 0x04
-
-#define HASH_CTX_STS_IDLE 0x00
-#define HASH_CTX_STS_PROCESSING 0x01
-#define HASH_CTX_STS_LAST 0x02
-#define HASH_CTX_STS_COMPLETE 0x04
-
-enum hash_ctx_error {
- HASH_CTX_ERROR_NONE = 0,
- HASH_CTX_ERROR_INVALID_FLAGS = -1,
- HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
- HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
-};
-
-#define hash_ctx_user_data(ctx) ((ctx)->user_data)
-#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
-#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
-#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
-#define hash_ctx_status(ctx) ((ctx)->status)
-#define hash_ctx_error(ctx) ((ctx)->error)
-#define hash_ctx_init(ctx) \
- do { \
- (ctx)->error = HASH_CTX_ERROR_NONE; \
- (ctx)->status = HASH_CTX_STS_COMPLETE; \
- } while (0)
-
-/* Hash Constants and Typedefs */
-#define SHA512_DIGEST_LENGTH 8
-#define SHA512_LOG2_BLOCK_SIZE 7
-
-#define SHA512_PADLENGTHFIELD_SIZE 16
-
-#ifdef SHA_MB_DEBUG
-#define assert(expr) \
-do { \
- if (unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-#else
-#define assert(expr) do {} while (0)
-#endif
-
-struct sha512_ctx_mgr {
- struct sha512_mb_mgr mgr;
-};
-
-/* typedef struct sha512_ctx_mgr sha512_ctx_mgr; */
-
-struct sha512_hash_ctx {
- /* Must be at struct offset 0 */
- struct job_sha512 job;
- /* status flag */
- int status;
- /* error flag */
- int error;
-
- uint64_t total_length;
- const void *incoming_buffer;
- uint32_t incoming_buffer_length;
- uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2];
- uint32_t partial_block_buffer_length;
- void *user_data;
-};
-
-#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
deleted file mode 100644
index 178f17eef382..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Header file for multi buffer SHA512 algorithm manager
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __SHA_MB_MGR_H
-#define __SHA_MB_MGR_H
-
-#include <linux/types.h>
-
-#define NUM_SHA512_DIGEST_WORDS 8
-
-enum job_sts {STS_UNKNOWN = 0,
- STS_BEING_PROCESSED = 1,
- STS_COMPLETED = 2,
- STS_INTERNAL_ERROR = 3,
- STS_ERROR = 4
-};
-
-struct job_sha512 {
- u8 *buffer;
- u64 len;
- u64 result_digest[NUM_SHA512_DIGEST_WORDS] __aligned(32);
- enum job_sts status;
- void *user_data;
-};
-
-struct sha512_args_x4 {
- uint64_t digest[8][4];
- uint8_t *data_ptr[4];
-};
-
-struct sha512_lane_data {
- struct job_sha512 *job_in_lane;
-};
-
-struct sha512_mb_mgr {
- struct sha512_args_x4 args;
-
- uint64_t lens[4];
-
- /* each byte is index (0...7) of unused lanes */
- uint64_t unused_lanes;
- /* byte 4 is set to FF as a flag */
- struct sha512_lane_data ldata[4];
-};
-
-#define SHA512_MB_MGR_NUM_LANES_AVX2 4
-
-void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state);
-struct job_sha512 *sha512_mb_mgr_submit_avx2(struct sha512_mb_mgr *state,
- struct job_sha512 *job);
-struct job_sha512 *sha512_mb_mgr_flush_avx2(struct sha512_mb_mgr *state);
-struct job_sha512 *sha512_mb_mgr_get_comp_job_avx2(struct sha512_mb_mgr *state);
-
-#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
deleted file mode 100644
index cf2636d4c9ba..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Header file for multi buffer SHA256 algorithm data structure
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# Macros for defining data structures
-
-# Usage example
-
-#START_FIELDS # JOB_AES
-### name size align
-#FIELD _plaintext, 8, 8 # pointer to plaintext
-#FIELD _ciphertext, 8, 8 # pointer to ciphertext
-#FIELD _IV, 16, 8 # IV
-#FIELD _keys, 8, 8 # pointer to keys
-#FIELD _len, 4, 4 # length in bytes
-#FIELD _status, 4, 4 # status enumeration
-#FIELD _user_data, 8, 8 # pointer to user data
-#UNION _union, size1, align1, \
-# size2, align2, \
-# size3, align3, \
-# ...
-#END_FIELDS
-#%assign _JOB_AES_size _FIELD_OFFSET
-#%assign _JOB_AES_align _STRUCT_ALIGN
-
-#########################################################################
-
-# Alternate "struc-like" syntax:
-# STRUCT job_aes2
-# RES_Q .plaintext, 1
-# RES_Q .ciphertext, 1
-# RES_DQ .IV, 1
-# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
-# RES_U .union, size1, align1, \
-# size2, align2, \
-# ...
-# ENDSTRUCT
-# # Following only needed if nesting
-# %assign job_aes2_size _FIELD_OFFSET
-# %assign job_aes2_align _STRUCT_ALIGN
-#
-# RES_* macros take a name, a count and an optional alignment.
-# The count in in terms of the base size of the macro, and the
-# default alignment is the base size.
-# The macros are:
-# Macro Base size
-# RES_B 1
-# RES_W 2
-# RES_D 4
-# RES_Q 8
-# RES_DQ 16
-# RES_Y 32
-# RES_Z 64
-#
-# RES_U defines a union. It's arguments are a name and two or more
-# pairs of "size, alignment"
-#
-# The two assigns are only needed if this structure is being nested
-# within another. Even if the assigns are not done, one can still use
-# STRUCT_NAME_size as the size of the structure.
-#
-# Note that for nesting, you still need to assign to STRUCT_NAME_size.
-#
-# The differences between this and using "struc" directly are that each
-# type is implicitly aligned to its natural length (although this can be
-# over-ridden with an explicit third parameter), and that the structure
-# is padded at the end to its overall alignment.
-#
-
-#########################################################################
-
-#ifndef _DATASTRUCT_ASM_
-#define _DATASTRUCT_ASM_
-
-#define PTR_SZ 8
-#define SHA512_DIGEST_WORD_SIZE 8
-#define SHA512_MB_MGR_NUM_LANES_AVX2 4
-#define NUM_SHA512_DIGEST_WORDS 8
-#define SZ4 4*SHA512_DIGEST_WORD_SIZE
-#define ROUNDS 80*SZ4
-#define SHA512_DIGEST_ROW_SIZE (SHA512_MB_MGR_NUM_LANES_AVX2 * 8)
-
-# START_FIELDS
-.macro START_FIELDS
- _FIELD_OFFSET = 0
- _STRUCT_ALIGN = 0
-.endm
-
-# FIELD name size align
-.macro FIELD name size align
- _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
- \name = _FIELD_OFFSET
- _FIELD_OFFSET = _FIELD_OFFSET + (\size)
-.if (\align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = \align
-.endif
-.endm
-
-# END_FIELDS
-.macro END_FIELDS
- _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
-.endm
-
-.macro STRUCT p1
-START_FIELDS
-.struc \p1
-.endm
-
-.macro ENDSTRUCT
- tmp = _FIELD_OFFSET
- END_FIELDS
- tmp = (_FIELD_OFFSET - ##tmp)
-.if (tmp > 0)
- .lcomm tmp
-.endm
-
-## RES_int name size align
-.macro RES_int p1 p2 p3
- name = \p1
- size = \p2
- align = .\p3
-
- _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
-.align align
-.lcomm name size
- _FIELD_OFFSET = _FIELD_OFFSET + (size)
-.if (align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = align
-.endif
-.endm
-
-# macro RES_B name, size [, align]
-.macro RES_B _name, _size, _align=1
-RES_int _name _size _align
-.endm
-
-# macro RES_W name, size [, align]
-.macro RES_W _name, _size, _align=2
-RES_int _name 2*(_size) _align
-.endm
-
-# macro RES_D name, size [, align]
-.macro RES_D _name, _size, _align=4
-RES_int _name 4*(_size) _align
-.endm
-
-# macro RES_Q name, size [, align]
-.macro RES_Q _name, _size, _align=8
-RES_int _name 8*(_size) _align
-.endm
-
-# macro RES_DQ name, size [, align]
-.macro RES_DQ _name, _size, _align=16
-RES_int _name 16*(_size) _align
-.endm
-
-# macro RES_Y name, size [, align]
-.macro RES_Y _name, _size, _align=32
-RES_int _name 32*(_size) _align
-.endm
-
-# macro RES_Z name, size [, align]
-.macro RES_Z _name, _size, _align=64
-RES_int _name 64*(_size) _align
-.endm
-
-#endif
-
-###################################################################
-### Define SHA512 Out Of Order Data Structures
-###################################################################
-
-START_FIELDS # LANE_DATA
-### name size align
-FIELD _job_in_lane, 8, 8 # pointer to job object
-END_FIELDS
-
- _LANE_DATA_size = _FIELD_OFFSET
- _LANE_DATA_align = _STRUCT_ALIGN
-
-####################################################################
-
-START_FIELDS # SHA512_ARGS_X4
-### name size align
-FIELD _digest, 8*8*4, 4 # transposed digest
-FIELD _data_ptr, 8*4, 8 # array of pointers to data
-END_FIELDS
-
- _SHA512_ARGS_X4_size = _FIELD_OFFSET
- _SHA512_ARGS_X4_align = _STRUCT_ALIGN
-
-#####################################################################
-
-START_FIELDS # MB_MGR
-### name size align
-FIELD _args, _SHA512_ARGS_X4_size, _SHA512_ARGS_X4_align
-FIELD _lens, 8*4, 8
-FIELD _unused_lanes, 8, 8
-FIELD _ldata, _LANE_DATA_size*4, _LANE_DATA_align
-END_FIELDS
-
- _MB_MGR_size = _FIELD_OFFSET
- _MB_MGR_align = _STRUCT_ALIGN
-
-_args_digest = _args + _digest
-_args_data_ptr = _args + _data_ptr
-
-#######################################################################
-
-#######################################################################
-#### Define constants
-#######################################################################
-
-#define STS_UNKNOWN 0
-#define STS_BEING_PROCESSED 1
-#define STS_COMPLETED 2
-
-#######################################################################
-#### Define JOB_SHA512 structure
-#######################################################################
-
-START_FIELDS # JOB_SHA512
-### name size align
-FIELD _buffer, 8, 8 # pointer to buffer
-FIELD _len, 8, 8 # length in bytes
-FIELD _result_digest, 8*8, 32 # Digest (output)
-FIELD _status, 4, 4
-FIELD _user_data, 8, 8
-END_FIELDS
-
- _JOB_SHA512_size = _FIELD_OFFSET
- _JOB_SHA512_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
deleted file mode 100644
index 7c629caebc05..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Flush routine for SHA512 multibuffer
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha512_mb_mgr_datastruct.S"
-
-.extern sha512_x4_avx2
-
-# LINUX register definitions
-#define arg1 %rdi
-#define arg2 %rsi
-
-# idx needs to be other than arg1, arg2, rbx, r12
-#define idx %rdx
-
-# Common definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-
-#define unused_lanes %rbx
-#define lane_data %rbx
-#define tmp2 %rbx
-
-#define job_rax %rax
-#define tmp1 %rax
-#define size_offset %rax
-#define tmp %rax
-#define start_offset %rax
-
-#define tmp3 arg1
-
-#define extra_blocks arg2
-#define p arg2
-
-#define tmp4 %r8
-#define lens0 %r8
-
-#define lens1 %r9
-#define lens2 %r10
-#define lens3 %r11
-
-.macro LABEL prefix n
-\prefix\n\():
-.endm
-
-.macro JNE_SKIP i
-jne skip_\i
-.endm
-
-.altmacro
-.macro SET_OFFSET _offset
-offset = \_offset
-.endm
-.noaltmacro
-
-# JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state)
-# arg 1 : rcx : state
-ENTRY(sha512_mb_mgr_flush_avx2)
- FRAME_BEGIN
- push %rbx
-
- # If bit (32+3) is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $32+7, unused_lanes
- jc return_null
-
- # find a lane with a non-null job
- xor idx, idx
- offset = (_ldata + 1*_LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne one(%rip), idx
- offset = (_ldata + 2*_LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne two(%rip), idx
- offset = (_ldata + 3*_LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne three(%rip), idx
-
- # copy idx to empty lanes
-copy_lane_data:
- offset = (_args + _data_ptr)
- mov offset(state,idx,8), tmp
-
- I = 0
-.rep 4
- offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
-.altmacro
- JNE_SKIP %I
- offset = (_args + _data_ptr + 8*I)
- mov tmp, offset(state)
- offset = (_lens + 8*I +4)
- movl $0xFFFFFFFF, offset(state)
-LABEL skip_ %I
- I = (I+1)
-.noaltmacro
-.endr
-
- # Find min length
- mov _lens + 0*8(state),lens0
- mov lens0,idx
- mov _lens + 1*8(state),lens1
- cmp idx,lens1
- cmovb lens1,idx
- mov _lens + 2*8(state),lens2
- cmp idx,lens2
- cmovb lens2,idx
- mov _lens + 3*8(state),lens3
- cmp idx,lens3
- cmovb lens3,idx
- mov idx,len2
- and $0xF,idx
- and $~0xFF,len2
- jz len_is_0
-
- sub len2, lens0
- sub len2, lens1
- sub len2, lens2
- sub len2, lens3
- shr $32,len2
- mov lens0, _lens + 0*8(state)
- mov lens1, _lens + 1*8(state)
- mov lens2, _lens + 2*8(state)
- mov lens3, _lens + 3*8(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha512_x4_avx2
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $8, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens+4(state, idx, 8)
-
- vmovq _args_digest+0*32(state, idx, 8), %xmm0
- vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
- vmovq _args_digest+2*32(state, idx, 8), %xmm1
- vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
- vmovq _args_digest+4*32(state, idx, 8), %xmm2
- vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
- vmovq _args_digest+6*32(state, idx, 8), %xmm3
- vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
-
- vmovdqu %xmm0, _result_digest(job_rax)
- vmovdqu %xmm1, _result_digest+1*16(job_rax)
- vmovdqu %xmm2, _result_digest+2*16(job_rax)
- vmovdqu %xmm3, _result_digest+3*16(job_rax)
-
-return:
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha512_mb_mgr_flush_avx2)
-.align 16
-
-ENTRY(sha512_mb_mgr_get_comp_job_avx2)
- push %rbx
-
- mov _unused_lanes(state), unused_lanes
- bt $(32+7), unused_lanes
- jc .return_null
-
- # Find min length
- mov _lens(state),lens0
- mov lens0,idx
- mov _lens+1*8(state),lens1
- cmp idx,lens1
- cmovb lens1,idx
- mov _lens+2*8(state),lens2
- cmp idx,lens2
- cmovb lens2,idx
- mov _lens+3*8(state),lens3
- cmp idx,lens3
- cmovb lens3,idx
- test $~0xF,idx
- jnz .return_null
- and $0xF,idx
-
- #process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $8, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens+4(state, idx, 8)
-
- vmovq _args_digest(state, idx, 8), %xmm0
- vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
- vmovq _args_digest+2*32(state, idx, 8), %xmm1
- vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
- vmovq _args_digest+4*32(state, idx, 8), %xmm2
- vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
- vmovq _args_digest+6*32(state, idx, 8), %xmm3
- vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
-
- vmovdqu %xmm0, _result_digest+0*16(job_rax)
- vmovdqu %xmm1, _result_digest+1*16(job_rax)
- vmovdqu %xmm2, _result_digest+2*16(job_rax)
- vmovdqu %xmm3, _result_digest+3*16(job_rax)
-
- pop %rbx
-
- ret
-
-.return_null:
- xor job_rax, job_rax
- pop %rbx
- ret
-ENDPROC(sha512_mb_mgr_get_comp_job_avx2)
-
-.section .rodata.cst8.one, "aM", @progbits, 8
-.align 8
-one:
-.quad 1
-
-.section .rodata.cst8.two, "aM", @progbits, 8
-.align 8
-two:
-.quad 2
-
-.section .rodata.cst8.three, "aM", @progbits, 8
-.align 8
-three:
-.quad 3
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
deleted file mode 100644
index 4ba709ba78e5..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Buffer submit code for multi buffer SHA512 algorithm
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha512_mb_mgr_datastruct.S"
-
-.extern sha512_x4_avx2
-
-#define arg1 %rdi
-#define arg2 %rsi
-
-#define idx %rdx
-#define last_len %rdx
-
-#define size_offset %rcx
-#define tmp2 %rcx
-
-# Common definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-#define p2 arg2
-
-#define p %r11
-#define start_offset %r11
-
-#define unused_lanes %rbx
-
-#define job_rax %rax
-#define len %rax
-
-#define lane %r12
-#define tmp3 %r12
-#define lens3 %r12
-
-#define extra_blocks %r8
-#define lens0 %r8
-
-#define tmp %r9
-#define lens1 %r9
-
-#define lane_data %r10
-#define lens2 %r10
-
-#define DWORD_len %eax
-
-# JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job)
-# arg 1 : rcx : state
-# arg 2 : rdx : job
-ENTRY(sha512_mb_mgr_submit_avx2)
- FRAME_BEGIN
- push %rbx
- push %r12
-
- mov _unused_lanes(state), unused_lanes
- movzb %bl,lane
- shr $8, unused_lanes
- imul $_LANE_DATA_size, lane,lane_data
- movl $STS_BEING_PROCESSED, _status(job)
- lea _ldata(state, lane_data), lane_data
- mov unused_lanes, _unused_lanes(state)
- movl _len(job), DWORD_len
-
- mov job, _job_in_lane(lane_data)
- movl DWORD_len,_lens+4(state , lane, 8)
-
- # Load digest words from result_digest
- vmovdqu _result_digest+0*16(job), %xmm0
- vmovdqu _result_digest+1*16(job), %xmm1
- vmovdqu _result_digest+2*16(job), %xmm2
- vmovdqu _result_digest+3*16(job), %xmm3
-
- vmovq %xmm0, _args_digest(state, lane, 8)
- vpextrq $1, %xmm0, _args_digest+1*32(state , lane, 8)
- vmovq %xmm1, _args_digest+2*32(state , lane, 8)
- vpextrq $1, %xmm1, _args_digest+3*32(state , lane, 8)
- vmovq %xmm2, _args_digest+4*32(state , lane, 8)
- vpextrq $1, %xmm2, _args_digest+5*32(state , lane, 8)
- vmovq %xmm3, _args_digest+6*32(state , lane, 8)
- vpextrq $1, %xmm3, _args_digest+7*32(state , lane, 8)
-
- mov _buffer(job), p
- mov p, _args_data_ptr(state, lane, 8)
-
- cmp $0xFF, unused_lanes
- jne return_null
-
-start_loop:
-
- # Find min length
- mov _lens+0*8(state),lens0
- mov lens0,idx
- mov _lens+1*8(state),lens1
- cmp idx,lens1
- cmovb lens1, idx
- mov _lens+2*8(state),lens2
- cmp idx,lens2
- cmovb lens2,idx
- mov _lens+3*8(state),lens3
- cmp idx,lens3
- cmovb lens3,idx
- mov idx,len2
- and $0xF,idx
- and $~0xFF,len2
- jz len_is_0
-
- sub len2,lens0
- sub len2,lens1
- sub len2,lens2
- sub len2,lens3
- shr $32,len2
- mov lens0, _lens + 0*8(state)
- mov lens1, _lens + 1*8(state)
- mov lens2, _lens + 2*8(state)
- mov lens3, _lens + 3*8(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha512_x4_avx2
- # state and idx are intact
-
-len_is_0:
-
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- mov _unused_lanes(state), unused_lanes
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- shl $8, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF,_lens+4(state,idx,8)
- vmovq _args_digest+0*32(state , idx, 8), %xmm0
- vpinsrq $1, _args_digest+1*32(state , idx, 8), %xmm0, %xmm0
- vmovq _args_digest+2*32(state , idx, 8), %xmm1
- vpinsrq $1, _args_digest+3*32(state , idx, 8), %xmm1, %xmm1
- vmovq _args_digest+4*32(state , idx, 8), %xmm2
- vpinsrq $1, _args_digest+5*32(state , idx, 8), %xmm2, %xmm2
- vmovq _args_digest+6*32(state , idx, 8), %xmm3
- vpinsrq $1, _args_digest+7*32(state , idx, 8), %xmm3, %xmm3
-
- vmovdqu %xmm0, _result_digest + 0*16(job_rax)
- vmovdqu %xmm1, _result_digest + 1*16(job_rax)
- vmovdqu %xmm2, _result_digest + 2*16(job_rax)
- vmovdqu %xmm3, _result_digest + 3*16(job_rax)
-
-return:
- pop %r12
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha512_mb_mgr_submit_avx2)
-
-/* UNUSED?
-.section .rodata.cst16, "aM", @progbits, 16
-.align 16
-H0: .int 0x6a09e667
-H1: .int 0xbb67ae85
-H2: .int 0x3c6ef372
-H3: .int 0xa54ff53a
-H4: .int 0x510e527f
-H5: .int 0x9b05688c
-H6: .int 0x1f83d9ab
-H7: .int 0x5be0cd19
-*/
diff --git a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
deleted file mode 100644
index e22e907643a6..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Multi-buffer SHA512 algorithm hash compute routine
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# code to compute quad SHA512 using AVX2
-# use YMMs to tackle the larger digest size
-# outer calling routine takes care of save and restore of XMM registers
-# Logic designed/laid out by JDG
-
-# Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
-# Stack must be aligned to 32 bytes before call
-# Linux clobbers: rax rbx rcx rsi r8 r9 r10 r11 r12
-# Linux preserves: rcx rdx rdi rbp r13 r14 r15
-# clobbers ymm0-15
-
-#include <linux/linkage.h>
-#include "sha512_mb_mgr_datastruct.S"
-
-arg1 = %rdi
-arg2 = %rsi
-
-# Common definitions
-STATE = arg1
-INP_SIZE = arg2
-
-IDX = %rax
-ROUND = %rbx
-TBL = %r8
-
-inp0 = %r9
-inp1 = %r10
-inp2 = %r11
-inp3 = %r12
-
-a = %ymm0
-b = %ymm1
-c = %ymm2
-d = %ymm3
-e = %ymm4
-f = %ymm5
-g = %ymm6
-h = %ymm7
-
-a0 = %ymm8
-a1 = %ymm9
-a2 = %ymm10
-
-TT0 = %ymm14
-TT1 = %ymm13
-TT2 = %ymm12
-TT3 = %ymm11
-TT4 = %ymm10
-TT5 = %ymm9
-
-T1 = %ymm14
-TMP = %ymm15
-
-# Define stack usage
-STACK_SPACE1 = SZ4*16 + NUM_SHA512_DIGEST_WORDS*SZ4 + 24
-
-#define VMOVPD vmovupd
-_digest = SZ4*16
-
-# transpose r0, r1, r2, r3, t0, t1
-# "transpose" data in {r0..r3} using temps {t0..t3}
-# Input looks like: {r0 r1 r2 r3}
-# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
-# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
-# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
-# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
-#
-# output looks like: {t0 r1 r0 r3}
-# t0 = {d1 d0 c1 c0 b1 b0 a1 a0}
-# r1 = {d3 d2 c3 c2 b3 b2 a3 a2}
-# r0 = {d5 d4 c5 c4 b5 b4 a5 a4}
-# r3 = {d7 d6 c7 c6 b7 b6 a7 a6}
-
-.macro TRANSPOSE r0 r1 r2 r3 t0 t1
- vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
- vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
- vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
- vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
-
- vperm2f128 $0x20, \r2, \r0, \r1 # h6...a6
- vperm2f128 $0x31, \r2, \r0, \r3 # h2...a2
- vperm2f128 $0x31, \t1, \t0, \r0 # h5...a5
- vperm2f128 $0x20, \t1, \t0, \t0 # h1...a1
-.endm
-
-.macro ROTATE_ARGS
-TMP_ = h
-h = g
-g = f
-f = e
-e = d
-d = c
-c = b
-b = a
-a = TMP_
-.endm
-
-# PRORQ reg, imm, tmp
-# packed-rotate-right-double
-# does a rotate by doing two shifts and an or
-.macro _PRORQ reg imm tmp
- vpsllq $(64-\imm),\reg,\tmp
- vpsrlq $\imm,\reg, \reg
- vpor \tmp,\reg, \reg
-.endm
-
-# non-destructive
-# PRORQ_nd reg, imm, tmp, src
-.macro _PRORQ_nd reg imm tmp src
- vpsllq $(64-\imm), \src, \tmp
- vpsrlq $\imm, \src, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-# PRORQ dst/src, amt
-.macro PRORQ reg imm
- _PRORQ \reg, \imm, TMP
-.endm
-
-# PRORQ_nd dst, src, amt
-.macro PRORQ_nd reg tmp imm
- _PRORQ_nd \reg, \imm, TMP, \tmp
-.endm
-
-#; arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_00_15 _T1 i
- PRORQ_nd a0, e, (18-14) # sig1: a0 = (e >> 4)
-
- vpxor g, f, a2 # ch: a2 = f^g
- vpand e,a2, a2 # ch: a2 = (f^g)&e
- vpxor g, a2, a2 # a2 = ch
-
- PRORQ_nd a1,e,41 # sig1: a1 = (e >> 25)
-
- offset = SZ4*(\i & 0xf)
- vmovdqu \_T1,offset(%rsp)
- vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
- vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
- PRORQ a0, 14 # sig1: a0 = (e >> 6) ^ (e >> 11)
- vpaddq a2, h, h # h = h + ch
- PRORQ_nd a2,a,6 # sig0: a2 = (a >> 11)
- vpaddq \_T1,h, h # h = h + ch + W + K
- vpxor a1, a0, a0 # a0 = sigma1
- vmovdqu a,\_T1
- PRORQ_nd a1,a,39 # sig0: a1 = (a >> 22)
- vpxor c, \_T1, \_T1 # maj: T1 = a^c
- add $SZ4, ROUND # ROUND++
- vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
- vpaddq a0, h, h
- vpaddq h, d, d
- vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
- PRORQ a2,28 # sig0: a2 = (a >> 2) ^ (a >> 13)
- vpxor a1, a2, a2 # a2 = sig0
- vpand c, a, a1 # maj: a1 = a&c
- vpor \_T1, a1, a1 # a1 = maj
- vpaddq a1, h, h # h = h + ch + W + K + maj
- vpaddq a2, h, h # h = h + ch + W + K + maj + sigma0
- ROTATE_ARGS
-.endm
-
-
-#; arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_16_XX _T1 i
- vmovdqu SZ4*((\i-15)&0xf)(%rsp), \_T1
- vmovdqu SZ4*((\i-2)&0xf)(%rsp), a1
- vmovdqu \_T1, a0
- PRORQ \_T1,7
- vmovdqu a1, a2
- PRORQ a1,42
- vpxor a0, \_T1, \_T1
- PRORQ \_T1, 1
- vpxor a2, a1, a1
- PRORQ a1, 19
- vpsrlq $7, a0, a0
- vpxor a0, \_T1, \_T1
- vpsrlq $6, a2, a2
- vpxor a2, a1, a1
- vpaddq SZ4*((\i-16)&0xf)(%rsp), \_T1, \_T1
- vpaddq SZ4*((\i-7)&0xf)(%rsp), a1, a1
- vpaddq a1, \_T1, \_T1
-
- ROUND_00_15 \_T1,\i
-.endm
-
-
-# void sha512_x4_avx2(void *STATE, const int INP_SIZE)
-# arg 1 : STATE : pointer to input data
-# arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
-ENTRY(sha512_x4_avx2)
- # general registers preserved in outer calling routine
- # outer calling routine saves all the XMM registers
- # save callee-saved clobbered registers to comply with C function ABI
- push %r12
- push %r13
- push %r14
- push %r15
-
- sub $STACK_SPACE1, %rsp
-
- # Load the pre-transposed incoming digest.
- vmovdqu 0*SHA512_DIGEST_ROW_SIZE(STATE),a
- vmovdqu 1*SHA512_DIGEST_ROW_SIZE(STATE),b
- vmovdqu 2*SHA512_DIGEST_ROW_SIZE(STATE),c
- vmovdqu 3*SHA512_DIGEST_ROW_SIZE(STATE),d
- vmovdqu 4*SHA512_DIGEST_ROW_SIZE(STATE),e
- vmovdqu 5*SHA512_DIGEST_ROW_SIZE(STATE),f
- vmovdqu 6*SHA512_DIGEST_ROW_SIZE(STATE),g
- vmovdqu 7*SHA512_DIGEST_ROW_SIZE(STATE),h
-
- lea K512_4(%rip),TBL
-
- # load the address of each of the 4 message lanes
- # getting ready to transpose input onto stack
- mov _data_ptr+0*PTR_SZ(STATE),inp0
- mov _data_ptr+1*PTR_SZ(STATE),inp1
- mov _data_ptr+2*PTR_SZ(STATE),inp2
- mov _data_ptr+3*PTR_SZ(STATE),inp3
-
- xor IDX, IDX
-lloop:
- xor ROUND, ROUND
-
- # save old digest
- vmovdqu a, _digest(%rsp)
- vmovdqu b, _digest+1*SZ4(%rsp)
- vmovdqu c, _digest+2*SZ4(%rsp)
- vmovdqu d, _digest+3*SZ4(%rsp)
- vmovdqu e, _digest+4*SZ4(%rsp)
- vmovdqu f, _digest+5*SZ4(%rsp)
- vmovdqu g, _digest+6*SZ4(%rsp)
- vmovdqu h, _digest+7*SZ4(%rsp)
- i = 0
-.rep 4
- vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP
- VMOVPD i*32(inp0, IDX), TT2
- VMOVPD i*32(inp1, IDX), TT1
- VMOVPD i*32(inp2, IDX), TT4
- VMOVPD i*32(inp3, IDX), TT3
- TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5
- vpshufb TMP, TT0, TT0
- vpshufb TMP, TT1, TT1
- vpshufb TMP, TT2, TT2
- vpshufb TMP, TT3, TT3
- ROUND_00_15 TT0,(i*4+0)
- ROUND_00_15 TT1,(i*4+1)
- ROUND_00_15 TT2,(i*4+2)
- ROUND_00_15 TT3,(i*4+3)
- i = (i+1)
-.endr
- add $128, IDX
-
- i = (i*4)
-
- jmp Lrounds_16_xx
-.align 16
-Lrounds_16_xx:
-.rep 16
- ROUND_16_XX T1, i
- i = (i+1)
-.endr
- cmp $0xa00,ROUND
- jb Lrounds_16_xx
-
- # add old digest
- vpaddq _digest(%rsp), a, a
- vpaddq _digest+1*SZ4(%rsp), b, b
- vpaddq _digest+2*SZ4(%rsp), c, c
- vpaddq _digest+3*SZ4(%rsp), d, d
- vpaddq _digest+4*SZ4(%rsp), e, e
- vpaddq _digest+5*SZ4(%rsp), f, f
- vpaddq _digest+6*SZ4(%rsp), g, g
- vpaddq _digest+7*SZ4(%rsp), h, h
-
- sub $1, INP_SIZE # unit is blocks
- jne lloop
-
- # write back to memory (state object) the transposed digest
- vmovdqu a, 0*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu b, 1*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu c, 2*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu d, 3*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu e, 4*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu f, 5*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu g, 6*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu h, 7*SHA512_DIGEST_ROW_SIZE(STATE)
-
- # update input data pointers
- add IDX, inp0
- mov inp0, _data_ptr+0*PTR_SZ(STATE)
- add IDX, inp1
- mov inp1, _data_ptr+1*PTR_SZ(STATE)
- add IDX, inp2
- mov inp2, _data_ptr+2*PTR_SZ(STATE)
- add IDX, inp3
- mov inp3, _data_ptr+3*PTR_SZ(STATE)
-
- #;;;;;;;;;;;;;;;
- #; Postamble
- add $STACK_SPACE1, %rsp
- # restore callee-saved clobbered registers
-
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-
- # outer calling routine restores XMM and other GP registers
- ret
-ENDPROC(sha512_x4_avx2)
-
-.section .rodata.K512_4, "a", @progbits
-.align 64
-K512_4:
- .octa 0x428a2f98d728ae22428a2f98d728ae22,\
- 0x428a2f98d728ae22428a2f98d728ae22
- .octa 0x7137449123ef65cd7137449123ef65cd,\
- 0x7137449123ef65cd7137449123ef65cd
- .octa 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f,\
- 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f
- .octa 0xe9b5dba58189dbbce9b5dba58189dbbc,\
- 0xe9b5dba58189dbbce9b5dba58189dbbc
- .octa 0x3956c25bf348b5383956c25bf348b538,\
- 0x3956c25bf348b5383956c25bf348b538
- .octa 0x59f111f1b605d01959f111f1b605d019,\
- 0x59f111f1b605d01959f111f1b605d019
- .octa 0x923f82a4af194f9b923f82a4af194f9b,\
- 0x923f82a4af194f9b923f82a4af194f9b
- .octa 0xab1c5ed5da6d8118ab1c5ed5da6d8118,\
- 0xab1c5ed5da6d8118ab1c5ed5da6d8118
- .octa 0xd807aa98a3030242d807aa98a3030242,\
- 0xd807aa98a3030242d807aa98a3030242
- .octa 0x12835b0145706fbe12835b0145706fbe,\
- 0x12835b0145706fbe12835b0145706fbe
- .octa 0x243185be4ee4b28c243185be4ee4b28c,\
- 0x243185be4ee4b28c243185be4ee4b28c
- .octa 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2,\
- 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2
- .octa 0x72be5d74f27b896f72be5d74f27b896f,\
- 0x72be5d74f27b896f72be5d74f27b896f
- .octa 0x80deb1fe3b1696b180deb1fe3b1696b1,\
- 0x80deb1fe3b1696b180deb1fe3b1696b1
- .octa 0x9bdc06a725c712359bdc06a725c71235,\
- 0x9bdc06a725c712359bdc06a725c71235
- .octa 0xc19bf174cf692694c19bf174cf692694,\
- 0xc19bf174cf692694c19bf174cf692694
- .octa 0xe49b69c19ef14ad2e49b69c19ef14ad2,\
- 0xe49b69c19ef14ad2e49b69c19ef14ad2
- .octa 0xefbe4786384f25e3efbe4786384f25e3,\
- 0xefbe4786384f25e3efbe4786384f25e3
- .octa 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5,\
- 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5
- .octa 0x240ca1cc77ac9c65240ca1cc77ac9c65,\
- 0x240ca1cc77ac9c65240ca1cc77ac9c65
- .octa 0x2de92c6f592b02752de92c6f592b0275,\
- 0x2de92c6f592b02752de92c6f592b0275
- .octa 0x4a7484aa6ea6e4834a7484aa6ea6e483,\
- 0x4a7484aa6ea6e4834a7484aa6ea6e483
- .octa 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4,\
- 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4
- .octa 0x76f988da831153b576f988da831153b5,\
- 0x76f988da831153b576f988da831153b5
- .octa 0x983e5152ee66dfab983e5152ee66dfab,\
- 0x983e5152ee66dfab983e5152ee66dfab
- .octa 0xa831c66d2db43210a831c66d2db43210,\
- 0xa831c66d2db43210a831c66d2db43210
- .octa 0xb00327c898fb213fb00327c898fb213f,\
- 0xb00327c898fb213fb00327c898fb213f
- .octa 0xbf597fc7beef0ee4bf597fc7beef0ee4,\
- 0xbf597fc7beef0ee4bf597fc7beef0ee4
- .octa 0xc6e00bf33da88fc2c6e00bf33da88fc2,\
- 0xc6e00bf33da88fc2c6e00bf33da88fc2
- .octa 0xd5a79147930aa725d5a79147930aa725,\
- 0xd5a79147930aa725d5a79147930aa725
- .octa 0x06ca6351e003826f06ca6351e003826f,\
- 0x06ca6351e003826f06ca6351e003826f
- .octa 0x142929670a0e6e70142929670a0e6e70,\
- 0x142929670a0e6e70142929670a0e6e70
- .octa 0x27b70a8546d22ffc27b70a8546d22ffc,\
- 0x27b70a8546d22ffc27b70a8546d22ffc
- .octa 0x2e1b21385c26c9262e1b21385c26c926,\
- 0x2e1b21385c26c9262e1b21385c26c926
- .octa 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed,\
- 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed
- .octa 0x53380d139d95b3df53380d139d95b3df,\
- 0x53380d139d95b3df53380d139d95b3df
- .octa 0x650a73548baf63de650a73548baf63de,\
- 0x650a73548baf63de650a73548baf63de
- .octa 0x766a0abb3c77b2a8766a0abb3c77b2a8,\
- 0x766a0abb3c77b2a8766a0abb3c77b2a8
- .octa 0x81c2c92e47edaee681c2c92e47edaee6,\
- 0x81c2c92e47edaee681c2c92e47edaee6
- .octa 0x92722c851482353b92722c851482353b,\
- 0x92722c851482353b92722c851482353b
- .octa 0xa2bfe8a14cf10364a2bfe8a14cf10364,\
- 0xa2bfe8a14cf10364a2bfe8a14cf10364
- .octa 0xa81a664bbc423001a81a664bbc423001,\
- 0xa81a664bbc423001a81a664bbc423001
- .octa 0xc24b8b70d0f89791c24b8b70d0f89791,\
- 0xc24b8b70d0f89791c24b8b70d0f89791
- .octa 0xc76c51a30654be30c76c51a30654be30,\
- 0xc76c51a30654be30c76c51a30654be30
- .octa 0xd192e819d6ef5218d192e819d6ef5218,\
- 0xd192e819d6ef5218d192e819d6ef5218
- .octa 0xd69906245565a910d69906245565a910,\
- 0xd69906245565a910d69906245565a910
- .octa 0xf40e35855771202af40e35855771202a,\
- 0xf40e35855771202af40e35855771202a
- .octa 0x106aa07032bbd1b8106aa07032bbd1b8,\
- 0x106aa07032bbd1b8106aa07032bbd1b8
- .octa 0x19a4c116b8d2d0c819a4c116b8d2d0c8,\
- 0x19a4c116b8d2d0c819a4c116b8d2d0c8
- .octa 0x1e376c085141ab531e376c085141ab53,\
- 0x1e376c085141ab531e376c085141ab53
- .octa 0x2748774cdf8eeb992748774cdf8eeb99,\
- 0x2748774cdf8eeb992748774cdf8eeb99
- .octa 0x34b0bcb5e19b48a834b0bcb5e19b48a8,\
- 0x34b0bcb5e19b48a834b0bcb5e19b48a8
- .octa 0x391c0cb3c5c95a63391c0cb3c5c95a63,\
- 0x391c0cb3c5c95a63391c0cb3c5c95a63
- .octa 0x4ed8aa4ae3418acb4ed8aa4ae3418acb,\
- 0x4ed8aa4ae3418acb4ed8aa4ae3418acb
- .octa 0x5b9cca4f7763e3735b9cca4f7763e373,\
- 0x5b9cca4f7763e3735b9cca4f7763e373
- .octa 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3,\
- 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3
- .octa 0x748f82ee5defb2fc748f82ee5defb2fc,\
- 0x748f82ee5defb2fc748f82ee5defb2fc
- .octa 0x78a5636f43172f6078a5636f43172f60,\
- 0x78a5636f43172f6078a5636f43172f60
- .octa 0x84c87814a1f0ab7284c87814a1f0ab72,\
- 0x84c87814a1f0ab7284c87814a1f0ab72
- .octa 0x8cc702081a6439ec8cc702081a6439ec,\
- 0x8cc702081a6439ec8cc702081a6439ec
- .octa 0x90befffa23631e2890befffa23631e28,\
- 0x90befffa23631e2890befffa23631e28
- .octa 0xa4506cebde82bde9a4506cebde82bde9,\
- 0xa4506cebde82bde9a4506cebde82bde9
- .octa 0xbef9a3f7b2c67915bef9a3f7b2c67915,\
- 0xbef9a3f7b2c67915bef9a3f7b2c67915
- .octa 0xc67178f2e372532bc67178f2e372532b,\
- 0xc67178f2e372532bc67178f2e372532b
- .octa 0xca273eceea26619cca273eceea26619c,\
- 0xca273eceea26619cca273eceea26619c
- .octa 0xd186b8c721c0c207d186b8c721c0c207,\
- 0xd186b8c721c0c207d186b8c721c0c207
- .octa 0xeada7dd6cde0eb1eeada7dd6cde0eb1e,\
- 0xeada7dd6cde0eb1eeada7dd6cde0eb1e
- .octa 0xf57d4f7fee6ed178f57d4f7fee6ed178,\
- 0xf57d4f7fee6ed178f57d4f7fee6ed178
- .octa 0x06f067aa72176fba06f067aa72176fba,\
- 0x06f067aa72176fba06f067aa72176fba
- .octa 0x0a637dc5a2c898a60a637dc5a2c898a6,\
- 0x0a637dc5a2c898a60a637dc5a2c898a6
- .octa 0x113f9804bef90dae113f9804bef90dae,\
- 0x113f9804bef90dae113f9804bef90dae
- .octa 0x1b710b35131c471b1b710b35131c471b,\
- 0x1b710b35131c471b1b710b35131c471b
- .octa 0x28db77f523047d8428db77f523047d84,\
- 0x28db77f523047d8428db77f523047d84
- .octa 0x32caab7b40c7249332caab7b40c72493,\
- 0x32caab7b40c7249332caab7b40c72493
- .octa 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc,\
- 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc
- .octa 0x431d67c49c100d4c431d67c49c100d4c,\
- 0x431d67c49c100d4c431d67c49c100d4c
- .octa 0x4cc5d4becb3e42b64cc5d4becb3e42b6,\
- 0x4cc5d4becb3e42b64cc5d4becb3e42b6
- .octa 0x597f299cfc657e2a597f299cfc657e2a,\
- 0x597f299cfc657e2a597f299cfc657e2a
- .octa 0x5fcb6fab3ad6faec5fcb6fab3ad6faec,\
- 0x5fcb6fab3ad6faec5fcb6fab3ad6faec
- .octa 0x6c44198c4a4758176c44198c4a475817,\
- 0x6c44198c4a4758176c44198c4a475817
-
-.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
-.align 32
-PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607
- .octa 0x18191a1b1c1d1e1f1011121314151617
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 352e70cd33e8..708b46a54578 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -338,7 +338,7 @@ For 32-bit we have the following conventions - kernel is built with
.macro CALL_enter_from_user_mode
#ifdef CONFIG_CONTEXT_TRACKING
#ifdef HAVE_JUMP_LABEL
- STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
+ STATIC_BRANCH_JMP l_yes=.Lafter_call_\@, key=context_tracking_enabled, branch=1
#endif
call enter_from_user_mode
.Lafter_call_\@:
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 2767c625a52c..687e47f8a796 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -389,6 +389,13 @@
* that register for the time this macro runs
*/
+ /*
+ * The high bits of the CS dword (__csh) are used for
+ * CS_FROM_ENTRY_STACK and CS_FROM_USER_CR3. Clear them in case
+ * hardware didn't do this for us.
+ */
+ andl $(0x0000ffff), PT_CS(%esp)
+
/* Are we on the entry stack? Bail out if not! */
movl PER_CPU_VAR(cpu_entry_area), %ecx
addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
@@ -407,12 +414,6 @@
/* Load top of task-stack into %edi */
movl TSS_entry2task_stack(%edi), %edi
- /*
- * Clear unused upper bits of the dword containing the word-sized CS
- * slot in pt_regs in case hardware didn't clear it for us.
- */
- andl $(0x0000ffff), PT_CS(%esp)
-
/* Special case - entry from kernel mode via entry stack */
#ifdef CONFIG_VM86
movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
@@ -782,7 +783,7 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
* will ignore all of the single-step traps generated in this range.
*/
-#ifdef CONFIG_XEN
+#ifdef CONFIG_XEN_PV
/*
* Xen doesn't set %esp to be precisely what the normal SYSENTER
* entry point expects, so fix it up before using the normal path.
@@ -1240,7 +1241,7 @@ ENTRY(spurious_interrupt_bug)
jmp common_exception
END(spurious_interrupt_bug)
-#ifdef CONFIG_XEN
+#ifdef CONFIG_XEN_PV
ENTRY(xen_hypervisor_callback)
pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
@@ -1321,11 +1322,13 @@ ENTRY(xen_failsafe_callback)
_ASM_EXTABLE(3b, 8b)
_ASM_EXTABLE(4b, 9b)
ENDPROC(xen_failsafe_callback)
+#endif /* CONFIG_XEN_PV */
+#ifdef CONFIG_XEN_PVHVM
BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
xen_evtchn_do_upcall)
+#endif
-#endif /* CONFIG_XEN */
#if IS_ENABLED(CONFIG_HYPERV)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 957dfb693ecc..4d7a2d9d44cf 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -142,67 +142,6 @@ END(native_usergs_sysret64)
* with them due to bugs in both AMD and Intel CPUs.
*/
- .pushsection .entry_trampoline, "ax"
-
-/*
- * The code in here gets remapped into cpu_entry_area's trampoline. This means
- * that the assembler and linker have the wrong idea as to where this code
- * lives (and, in fact, it's mapped more than once, so it's not even at a
- * fixed address). So we can't reference any symbols outside the entry
- * trampoline and expect it to work.
- *
- * Instead, we carefully abuse %rip-relative addressing.
- * _entry_trampoline(%rip) refers to the start of the remapped) entry
- * trampoline. We can thus find cpu_entry_area with this macro:
- */
-
-#define CPU_ENTRY_AREA \
- _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
-
-/* The top word of the SYSENTER stack is hot and is usable as scratch space. */
-#define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \
- SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA
-
-ENTRY(entry_SYSCALL_64_trampoline)
- UNWIND_HINT_EMPTY
- swapgs
-
- /* Stash the user RSP. */
- movq %rsp, RSP_SCRATCH
-
- /* Note: using %rsp as a scratch reg. */
- SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
-
- /* Load the top of the task stack into RSP */
- movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp
-
- /* Start building the simulated IRET frame. */
- pushq $__USER_DS /* pt_regs->ss */
- pushq RSP_SCRATCH /* pt_regs->sp */
- pushq %r11 /* pt_regs->flags */
- pushq $__USER_CS /* pt_regs->cs */
- pushq %rcx /* pt_regs->ip */
-
- /*
- * x86 lacks a near absolute jump, and we can't jump to the real
- * entry text with a relative jump. We could push the target
- * address and then use retq, but this destroys the pipeline on
- * many CPUs (wasting over 20 cycles on Sandy Bridge). Instead,
- * spill RDI and restore it in a second-stage trampoline.
- */
- pushq %rdi
- movq $entry_SYSCALL_64_stage2, %rdi
- JMP_NOSPEC %rdi
-END(entry_SYSCALL_64_trampoline)
-
- .popsection
-
-ENTRY(entry_SYSCALL_64_stage2)
- UNWIND_HINT_EMPTY
- popq %rdi
- jmp entry_SYSCALL_64_after_hwframe
-END(entry_SYSCALL_64_stage2)
-
ENTRY(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
/*
@@ -212,21 +151,19 @@ ENTRY(entry_SYSCALL_64)
*/
swapgs
- /*
- * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it
- * is not required to switch CR3.
- */
- movq %rsp, PER_CPU_VAR(rsp_scratch)
+ /* tss.sp2 is scratch space. */
+ movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
/* Construct struct pt_regs on stack */
- pushq $__USER_DS /* pt_regs->ss */
- pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
- pushq %r11 /* pt_regs->flags */
- pushq $__USER_CS /* pt_regs->cs */
- pushq %rcx /* pt_regs->ip */
+ pushq $__USER_DS /* pt_regs->ss */
+ pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
+ pushq %r11 /* pt_regs->flags */
+ pushq $__USER_CS /* pt_regs->cs */
+ pushq %rcx /* pt_regs->ip */
GLOBAL(entry_SYSCALL_64_after_hwframe)
- pushq %rax /* pt_regs->orig_ax */
+ pushq %rax /* pt_regs->orig_ax */
PUSH_AND_CLEAR_REGS rax=$-ENOSYS
@@ -900,6 +837,42 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
*/
#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
+/**
+ * idtentry - Generate an IDT entry stub
+ * @sym: Name of the generated entry point
+ * @do_sym: C function to be called
+ * @has_error_code: True if this IDT vector has an error code on the stack
+ * @paranoid: non-zero means that this vector may be invoked from
+ * kernel mode with user GSBASE and/or user CR3.
+ * 2 is special -- see below.
+ * @shift_ist: Set to an IST index if entries from kernel mode should
+ * decrement the IST stack so that nested entries get a
+ * fresh stack. (This is for #DB, which has a nasty habit
+ * of recursing.)
+ *
+ * idtentry generates an IDT stub that sets up a usable kernel context,
+ * creates struct pt_regs, and calls @do_sym. The stub has the following
+ * special behaviors:
+ *
+ * On an entry from user mode, the stub switches from the trampoline or
+ * IST stack to the normal thread stack. On an exit to user mode, the
+ * normal exit-to-usermode path is invoked.
+ *
+ * On an exit to kernel mode, if @paranoid == 0, we check for preemption,
+ * whereas we omit the preemption check if @paranoid != 0. This is purely
+ * because the implementation is simpler this way. The kernel only needs
+ * to check for asynchronous kernel preemption when IRQ handlers return.
+ *
+ * If @paranoid == 0, then the stub will handle IRET faults by pretending
+ * that the fault came from user mode. It will handle gs_change faults by
+ * pretending that the fault happened with kernel GSBASE. Since this handling
+ * is omitted for @paranoid != 0, the #GP, #SS, and #NP stubs must have
+ * @paranoid == 0. This special handling will do the wrong thing for
+ * espfix-induced #DF on IRET, so #DF must not use @paranoid == 0.
+ *
+ * @paranoid == 2 is special: the stub will never switch stacks. This is for
+ * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
+ */
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
ENTRY(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
@@ -1050,7 +1023,7 @@ ENTRY(do_softirq_own_stack)
ret
ENDPROC(do_softirq_own_stack)
-#ifdef CONFIG_XEN
+#ifdef CONFIG_XEN_PV
idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
/*
@@ -1130,11 +1103,13 @@ ENTRY(xen_failsafe_callback)
ENCODE_FRAME_POINTER
jmp error_exit
END(xen_failsafe_callback)
+#endif /* CONFIG_XEN_PV */
+#ifdef CONFIG_XEN_PVHVM
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
xen_hvm_callback_vector xen_evtchn_do_upcall
+#endif
-#endif /* CONFIG_XEN */
#if IS_ENABLED(CONFIG_HYPERV)
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
@@ -1151,7 +1126,7 @@ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
idtentry int3 do_int3 has_error_code=0
idtentry stack_segment do_stack_segment has_error_code=1
-#ifdef CONFIG_XEN
+#ifdef CONFIG_XEN_PV
idtentry xennmi do_nmi has_error_code=0
idtentry xendebug do_debug has_error_code=0
idtentry xenint3 do_int3 has_error_code=0
@@ -1187,6 +1162,16 @@ ENTRY(paranoid_entry)
xorl %ebx, %ebx
1:
+ /*
+ * Always stash CR3 in %r14. This value will be restored,
+ * verbatim, at exit. Needed if paranoid_entry interrupted
+ * another entry that already switched to the user CR3 value
+ * but has not yet returned to userspace.
+ *
+ * This is also why CS (stashed in the "iret frame" by the
+ * hardware at entry) can not be used: this may be a return
+ * to kernel code, but with a user CR3 value.
+ */
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
ret
@@ -1211,11 +1196,13 @@ ENTRY(paranoid_exit)
testl %ebx, %ebx /* swapgs needed? */
jnz .Lparanoid_exit_no_swapgs
TRACE_IRQS_IRETQ
+ /* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
SWAPGS_UNSAFE_STACK
jmp .Lparanoid_exit_restore
.Lparanoid_exit_no_swapgs:
TRACE_IRQS_IRETQ_DEBUG
+ /* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
.Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
@@ -1626,6 +1613,7 @@ end_repeat_nmi:
movq $-1, %rsi
call do_nmi
+ /* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
testl %ebx, %ebx /* swapgs needed? */
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index fa3f439f0a92..141d415a8c80 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -68,7 +68,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-fno-omit-frame-pointer -foptimize-sibling-calls \
- -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
+ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
+
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_VDSO_CFLAGS),)
+ CFL += $(RETPOLINE_VDSO_CFLAGS)
+endif
+endif
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
@@ -138,7 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_VDSO_CFLAGS),)
+ KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+endif
+endif
+
$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
$(obj)/vdso32.so.dbg: FORCE \
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index f19856d95c60..007b3fe9d727 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -43,50 +43,26 @@ extern u8 hvclock_page
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
long ret;
- asm("syscall" : "=a" (ret) :
- "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+ asm ("syscall" : "=a" (ret), "=m" (*ts) :
+ "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+ "rcx", "r11");
return ret;
}
-notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
-{
- long ret;
-
- asm("syscall" : "=a" (ret) :
- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
- return ret;
-}
-
-
#else
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
long ret;
- asm(
+ asm (
"mov %%ebx, %%edx \n"
- "mov %2, %%ebx \n"
+ "mov %[clock], %%ebx \n"
"call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n"
- : "=a" (ret)
- : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
- : "memory", "edx");
- return ret;
-}
-
-notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
-{
- long ret;
-
- asm(
- "mov %%ebx, %%edx \n"
- "mov %2, %%ebx \n"
- "call __kernel_vsyscall \n"
- "mov %%edx, %%ebx \n"
- : "=a" (ret)
- : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
- : "memory", "edx");
+ : "=a" (ret), "=m" (*ts)
+ : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
+ : "edx");
return ret;
}
@@ -98,12 +74,11 @@ static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
}
-static notrace u64 vread_pvclock(int *mode)
+static notrace u64 vread_pvclock(void)
{
const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
- u64 ret;
- u64 last;
u32 version;
+ u64 ret;
/*
* Note: The kernel and hypervisor must guarantee that cpu ID
@@ -130,175 +105,112 @@ static notrace u64 vread_pvclock(int *mode)
do {
version = pvclock_read_begin(pvti);
- if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) {
- *mode = VCLOCK_NONE;
- return 0;
- }
+ if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT)))
+ return U64_MAX;
ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
} while (pvclock_read_retry(pvti, version));
- /* refer to vread_tsc() comment for rationale */
- last = gtod->cycle_last;
-
- if (likely(ret >= last))
- return ret;
-
- return last;
+ return ret;
}
#endif
#ifdef CONFIG_HYPERV_TSCPAGE
-static notrace u64 vread_hvclock(int *mode)
+static notrace u64 vread_hvclock(void)
{
const struct ms_hyperv_tsc_page *tsc_pg =
(const struct ms_hyperv_tsc_page *)&hvclock_page;
- u64 current_tick = hv_read_tsc_page(tsc_pg);
-
- if (current_tick != U64_MAX)
- return current_tick;
- *mode = VCLOCK_NONE;
- return 0;
+ return hv_read_tsc_page(tsc_pg);
}
#endif
-notrace static u64 vread_tsc(void)
-{
- u64 ret = (u64)rdtsc_ordered();
- u64 last = gtod->cycle_last;
-
- if (likely(ret >= last))
- return ret;
-
- /*
- * GCC likes to generate cmov here, but this branch is extremely
- * predictable (it's just a function of time and the likely is
- * very likely) and there's a data dependence, so force GCC
- * to generate a branch instead. I don't barrier() because
- * we don't actually need a barrier, and if this function
- * ever gets inlined it will generate worse code.
- */
- asm volatile ("");
- return last;
-}
-
-notrace static inline u64 vgetsns(int *mode)
+notrace static inline u64 vgetcyc(int mode)
{
- u64 v;
- cycles_t cycles;
-
- if (gtod->vclock_mode == VCLOCK_TSC)
- cycles = vread_tsc();
+ if (mode == VCLOCK_TSC)
+ return (u64)rdtsc_ordered();
#ifdef CONFIG_PARAVIRT_CLOCK
- else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
- cycles = vread_pvclock(mode);
+ else if (mode == VCLOCK_PVCLOCK)
+ return vread_pvclock();
#endif
#ifdef CONFIG_HYPERV_TSCPAGE
- else if (gtod->vclock_mode == VCLOCK_HVCLOCK)
- cycles = vread_hvclock(mode);
+ else if (mode == VCLOCK_HVCLOCK)
+ return vread_hvclock();
#endif
- else
- return 0;
- v = (cycles - gtod->cycle_last) & gtod->mask;
- return v * gtod->mult;
+ return U64_MAX;
}
-/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
-notrace static int __always_inline do_realtime(struct timespec *ts)
+notrace static int do_hres(clockid_t clk, struct timespec *ts)
{
- unsigned long seq;
- u64 ns;
- int mode;
+ struct vgtod_ts *base = &gtod->basetime[clk];
+ u64 cycles, last, sec, ns;
+ unsigned int seq;
do {
seq = gtod_read_begin(gtod);
- mode = gtod->vclock_mode;
- ts->tv_sec = gtod->wall_time_sec;
- ns = gtod->wall_time_snsec;
- ns += vgetsns(&mode);
+ cycles = vgetcyc(gtod->vclock_mode);
+ ns = base->nsec;
+ last = gtod->cycle_last;
+ if (unlikely((s64)cycles < 0))
+ return vdso_fallback_gettime(clk, ts);
+ if (cycles > last)
+ ns += (cycles - last) * gtod->mult;
ns >>= gtod->shift;
+ sec = base->sec;
} while (unlikely(gtod_read_retry(gtod, seq)));
- ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ /*
+ * Do this outside the loop: a race inside the loop could result
+ * in __iter_div_u64_rem() being extremely slow.
+ */
+ ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
- return mode;
+ return 0;
}
-notrace static int __always_inline do_monotonic(struct timespec *ts)
+notrace static void do_coarse(clockid_t clk, struct timespec *ts)
{
- unsigned long seq;
- u64 ns;
- int mode;
+ struct vgtod_ts *base = &gtod->basetime[clk];
+ unsigned int seq;
do {
seq = gtod_read_begin(gtod);
- mode = gtod->vclock_mode;
- ts->tv_sec = gtod->monotonic_time_sec;
- ns = gtod->monotonic_time_snsec;
- ns += vgetsns(&mode);
- ns >>= gtod->shift;
+ ts->tv_sec = base->sec;
+ ts->tv_nsec = base->nsec;
} while (unlikely(gtod_read_retry(gtod, seq)));
-
- ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
- ts->tv_nsec = ns;
-
- return mode;
}
-notrace static void do_realtime_coarse(struct timespec *ts)
+notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
{
- unsigned long seq;
- do {
- seq = gtod_read_begin(gtod);
- ts->tv_sec = gtod->wall_time_coarse_sec;
- ts->tv_nsec = gtod->wall_time_coarse_nsec;
- } while (unlikely(gtod_read_retry(gtod, seq)));
-}
+ unsigned int msk;
-notrace static void do_monotonic_coarse(struct timespec *ts)
-{
- unsigned long seq;
- do {
- seq = gtod_read_begin(gtod);
- ts->tv_sec = gtod->monotonic_time_coarse_sec;
- ts->tv_nsec = gtod->monotonic_time_coarse_nsec;
- } while (unlikely(gtod_read_retry(gtod, seq)));
-}
+ /* Sort out negative (CPU/FD) and invalid clocks */
+ if (unlikely((unsigned int) clock >= MAX_CLOCKS))
+ return vdso_fallback_gettime(clock, ts);
-notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
-{
- switch (clock) {
- case CLOCK_REALTIME:
- if (do_realtime(ts) == VCLOCK_NONE)
- goto fallback;
- break;
- case CLOCK_MONOTONIC:
- if (do_monotonic(ts) == VCLOCK_NONE)
- goto fallback;
- break;
- case CLOCK_REALTIME_COARSE:
- do_realtime_coarse(ts);
- break;
- case CLOCK_MONOTONIC_COARSE:
- do_monotonic_coarse(ts);
- break;
- default:
- goto fallback;
+ /*
+ * Convert the clockid to a bitmask and use it to check which
+ * clocks are handled in the VDSO directly.
+ */
+ msk = 1U << clock;
+ if (likely(msk & VGTOD_HRES)) {
+ return do_hres(clock, ts);
+ } else if (msk & VGTOD_COARSE) {
+ do_coarse(clock, ts);
+ return 0;
}
-
- return 0;
-fallback:
return vdso_fallback_gettime(clock, ts);
}
+
int clock_gettime(clockid_t, struct timespec *)
__attribute__((weak, alias("__vdso_clock_gettime")));
notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
if (likely(tv != NULL)) {
- if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
- return vdso_fallback_gtod(tv, tz);
+ struct timespec *ts = (struct timespec *) tv;
+
+ do_hres(CLOCK_REALTIME, ts);
tv->tv_usec /= 1000;
}
if (unlikely(tz != NULL)) {
@@ -318,7 +230,7 @@ int gettimeofday(struct timeval *, struct timezone *)
notrace time_t __vdso_time(time_t *t)
{
/* This is atomic on x86 so we don't need any locks. */
- time_t result = READ_ONCE(gtod->wall_time_sec);
+ time_t result = READ_ONCE(gtod->basetime[CLOCK_REALTIME].sec);
if (t)
*t = result;
diff --git a/arch/x86/entry/vdso/vgetcpu.c b/arch/x86/entry/vdso/vgetcpu.c
index 8ec3d1f4ce9a..f86ab0ae1777 100644
--- a/arch/x86/entry/vdso/vgetcpu.c
+++ b/arch/x86/entry/vdso/vgetcpu.c
@@ -13,14 +13,8 @@
notrace long
__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
{
- unsigned int p;
+ vdso_read_cpunode(cpu, node);
- p = __getcpu();
-
- if (cpu)
- *cpu = p & VGETCPU_CPU_MASK;
- if (node)
- *node = p >> 12;
return 0;
}
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 5b8b556dbb12..3f9d43f26f63 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -332,40 +332,6 @@ static __init int vdso_setup(char *s)
return 0;
}
__setup("vdso=", vdso_setup);
-#endif
-
-#ifdef CONFIG_X86_64
-static void vgetcpu_cpu_init(void *arg)
-{
- int cpu = smp_processor_id();
- struct desc_struct d = { };
- unsigned long node = 0;
-#ifdef CONFIG_NUMA
- node = cpu_to_node(cpu);
-#endif
- if (static_cpu_has(X86_FEATURE_RDTSCP))
- write_rdtscp_aux((node << 12) | cpu);
-
- /*
- * Store cpu number in limit so that it can be loaded
- * quickly in user space in vgetcpu. (12 bits for the CPU
- * and 8 bits for the node)
- */
- d.limit0 = cpu | ((node & 0xf) << 12);
- d.limit1 = node >> 4;
- d.type = 5; /* RO data, expand down, accessed */
- d.dpl = 3; /* Visible to user code */
- d.s = 1; /* Not a system segment */
- d.p = 1; /* Present */
- d.d = 1; /* 32-bit */
-
- write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
-}
-
-static int vgetcpu_online(unsigned int cpu)
-{
- return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
-}
static int __init init_vdso(void)
{
@@ -375,9 +341,7 @@ static int __init init_vdso(void)
init_vdso_image(&vdso_image_x32);
#endif
- /* notifier priority > KVM */
- return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
- "x86/vdso/vma:online", vgetcpu_online, NULL);
+ return 0;
}
subsys_initcall(init_vdso);
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 82ed001e8909..85fd85d52ffd 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -100,20 +100,13 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
*/
if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
- siginfo_t info;
struct thread_struct *thread = &current->thread;
thread->error_code = 6; /* user fault, no page, write */
thread->cr2 = ptr;
thread->trap_nr = X86_TRAP_PF;
- clear_siginfo(&info);
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = SEGV_MAPERR;
- info.si_addr = (void __user *)ptr;
-
- force_sig_info(SIGSEGV, &info, current);
+ force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)ptr, current);
return false;
} else {
return true;
diff --git a/arch/x86/entry/vsyscall/vsyscall_gtod.c b/arch/x86/entry/vsyscall/vsyscall_gtod.c
index e1216dd95c04..cfcdba082feb 100644
--- a/arch/x86/entry/vsyscall/vsyscall_gtod.c
+++ b/arch/x86/entry/vsyscall/vsyscall_gtod.c
@@ -31,6 +31,8 @@ void update_vsyscall(struct timekeeper *tk)
{
int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data;
+ struct vgtod_ts *base;
+ u64 nsec;
/* Mark the new vclock used. */
BUILD_BUG_ON(VCLOCK_MAX >= 32);
@@ -45,34 +47,37 @@ void update_vsyscall(struct timekeeper *tk)
vdata->mult = tk->tkr_mono.mult;
vdata->shift = tk->tkr_mono.shift;
- vdata->wall_time_sec = tk->xtime_sec;
- vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec;
+ base = &vdata->basetime[CLOCK_REALTIME];
+ base->sec = tk->xtime_sec;
+ base->nsec = tk->tkr_mono.xtime_nsec;
- vdata->monotonic_time_sec = tk->xtime_sec
- + tk->wall_to_monotonic.tv_sec;
- vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec
- + ((u64)tk->wall_to_monotonic.tv_nsec
- << tk->tkr_mono.shift);
- while (vdata->monotonic_time_snsec >=
- (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
- vdata->monotonic_time_snsec -=
- ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
- vdata->monotonic_time_sec++;
- }
+ base = &vdata->basetime[CLOCK_TAI];
+ base->sec = tk->xtime_sec + (s64)tk->tai_offset;
+ base->nsec = tk->tkr_mono.xtime_nsec;
- vdata->wall_time_coarse_sec = tk->xtime_sec;
- vdata->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >>
- tk->tkr_mono.shift);
+ base = &vdata->basetime[CLOCK_MONOTONIC];
+ base->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
+ nsec = tk->tkr_mono.xtime_nsec;
+ nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
+ while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
+ nsec -= ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
+ base->sec++;
+ }
+ base->nsec = nsec;
- vdata->monotonic_time_coarse_sec =
- vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
- vdata->monotonic_time_coarse_nsec =
- vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
+ base = &vdata->basetime[CLOCK_REALTIME_COARSE];
+ base->sec = tk->xtime_sec;
+ base->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
- while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
- vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
- vdata->monotonic_time_coarse_sec++;
+ base = &vdata->basetime[CLOCK_MONOTONIC_COARSE];
+ base->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
+ nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
+ nsec += tk->wall_to_monotonic.tv_nsec;
+ while (nsec >= NSEC_PER_SEC) {
+ nsec -= NSEC_PER_SEC;
+ base->sec++;
}
+ base->nsec = nsec;
gtod_write_end(vdata);
}
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index c84584bb9402..7d2d7c801dba 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -669,6 +669,10 @@ static int __init amd_core_pmu_init(void)
* We fallback to using default amd_get_event_constraints.
*/
break;
+ case 0x18:
+ pr_cont("Fam18h ");
+ /* Using default amd_get_event_constraints. */
+ break;
default:
pr_err("core perfctr but no constraints; unknown hardware!\n");
return -ENODEV;
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 981ba5e8241b..398df6eaa109 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -36,6 +36,7 @@
static int num_counters_llc;
static int num_counters_nb;
+static bool l3_mask;
static HLIST_HEAD(uncore_unused_list);
@@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1;
+ /*
+ * SliceMask and ThreadMask need to be set for certain L3 events in
+ * Family 17h. For other events, the two fields do not affect the count.
+ */
+ if (l3_mask)
+ hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
+
if (event->cpu < 0)
return -EINVAL;
@@ -507,17 +515,19 @@ static int __init amd_uncore_init(void)
{
int ret = -ENODEV;
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return -ENODEV;
if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
return -ENODEV;
- if (boot_cpu_data.x86 == 0x17) {
+ if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
/*
- * For F17h, the Northbridge counters are repurposed as Data
- * Fabric counters. Also, L3 counters are supported too. The PMUs
- * are exported based on family as either L2 or L3 and NB or DF.
+ * For F17h or F18h, the Northbridge counters are
+ * repurposed as Data Fabric counters. Also, L3
+ * counters are supported too. The PMUs are exported
+ * based on family as either L2 or L3 and NB or DF.
*/
num_counters_nb = NUM_COUNTERS_NB;
num_counters_llc = NUM_COUNTERS_L3;
@@ -525,6 +535,7 @@ static int __init amd_uncore_init(void)
amd_llc_pmu.name = "amd_l3";
format_attr_event_df.show = &event_show_df;
format_attr_event_l3.show = &event_show_l3;
+ l3_mask = true;
} else {
num_counters_nb = NUM_COUNTERS_NB;
num_counters_llc = NUM_COUNTERS_L2;
@@ -532,6 +543,7 @@ static int __init amd_uncore_init(void)
amd_llc_pmu.name = "amd_l2";
format_attr_event_df = format_attr_event;
format_attr_event_l3 = format_attr_event;
+ l3_mask = false;
}
amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
@@ -547,7 +559,9 @@ static int __init amd_uncore_init(void)
if (ret)
goto fail_nb;
- pr_info("AMD NB counters detected\n");
+ pr_info("%s NB counters detected\n",
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
+ "HYGON" : "AMD");
ret = 0;
}
@@ -561,7 +575,9 @@ static int __init amd_uncore_init(void)
if (ret)
goto fail_llc;
- pr_info("AMD LLC counters detected\n");
+ pr_info("%s LLC counters detected\n",
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
+ "HYGON" : "AMD");
ret = 0;
}
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index dfb2f7c0d019..106911b603bd 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1033,6 +1033,27 @@ static inline void x86_assign_hw_event(struct perf_event *event,
}
}
+/**
+ * x86_perf_rdpmc_index - Return PMC counter used for event
+ * @event: the perf_event to which the PMC counter was assigned
+ *
+ * The counter assigned to this performance event may change if interrupts
+ * are enabled. This counter should thus never be used while interrupts are
+ * enabled. Before this function is used to obtain the assigned counter the
+ * event should be checked for validity using, for example,
+ * perf_event_read_local(), within the same interrupt disabled section in
+ * which this counter is planned to be used.
+ *
+ * Return: The index of the performance monitoring counter assigned to
+ * @perf_event.
+ */
+int x86_perf_rdpmc_index(struct perf_event *event)
+{
+ lockdep_assert_irqs_disabled();
+
+ return event->hw.event_base_rdpmc;
+}
+
static inline int match_prev_assignment(struct hw_perf_event *hwc,
struct cpu_hw_events *cpuc,
int i)
@@ -1584,7 +1605,7 @@ static void __init pmu_check_apic(void)
}
-static struct attribute_group x86_pmu_format_group = {
+static struct attribute_group x86_pmu_format_group __ro_after_init = {
.name = "format",
.attrs = NULL,
};
@@ -1631,9 +1652,9 @@ __init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
struct attribute **new;
int j, i;
- for (j = 0; a[j]; j++)
+ for (j = 0; a && a[j]; j++)
;
- for (i = 0; b[i]; i++)
+ for (i = 0; b && b[i]; i++)
j++;
j++;
@@ -1642,9 +1663,9 @@ __init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
return NULL;
j = 0;
- for (i = 0; a[i]; i++)
+ for (i = 0; a && a[i]; i++)
new[j++] = a[i];
- for (i = 0; b[i]; i++)
+ for (i = 0; b && b[i]; i++)
new[j++] = b[i];
new[j] = NULL;
@@ -1715,7 +1736,7 @@ static struct attribute *events_attr[] = {
NULL,
};
-static struct attribute_group x86_pmu_events_group = {
+static struct attribute_group x86_pmu_events_group __ro_after_init = {
.name = "events",
.attrs = events_attr,
};
@@ -1776,6 +1797,10 @@ static int __init init_hw_perf_events(void)
case X86_VENDOR_AMD:
err = amd_pmu_init();
break;
+ case X86_VENDOR_HYGON:
+ err = amd_pmu_init();
+ x86_pmu.name = "HYGON";
+ break;
default:
err = -ENOTSUPP;
}
@@ -2230,7 +2255,7 @@ static struct attribute *x86_pmu_attrs[] = {
NULL,
};
-static struct attribute_group x86_pmu_attr_group = {
+static struct attribute_group x86_pmu_attr_group __ro_after_init = {
.attrs = x86_pmu_attrs,
};
@@ -2248,7 +2273,7 @@ static struct attribute *x86_pmu_caps_attrs[] = {
NULL
};
-static struct attribute_group x86_pmu_caps_group = {
+static struct attribute_group x86_pmu_caps_group __ro_after_init = {
.name = "caps",
.attrs = x86_pmu_caps_attrs,
};
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 035c37481f57..0fb8659b20d8 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -242,7 +242,7 @@ EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
-static struct attribute *nhm_events_attrs[] = {
+static struct attribute *nhm_mem_events_attrs[] = {
EVENT_PTR(mem_ld_nhm),
NULL,
};
@@ -278,8 +278,6 @@ EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
"4", "2");
static struct attribute *snb_events_attrs[] = {
- EVENT_PTR(mem_ld_snb),
- EVENT_PTR(mem_st_snb),
EVENT_PTR(td_slots_issued),
EVENT_PTR(td_slots_retired),
EVENT_PTR(td_fetch_bubbles),
@@ -290,6 +288,12 @@ static struct attribute *snb_events_attrs[] = {
NULL,
};
+static struct attribute *snb_mem_events_attrs[] = {
+ EVENT_PTR(mem_ld_snb),
+ EVENT_PTR(mem_st_snb),
+ NULL,
+};
+
static struct event_constraint intel_hsw_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
@@ -1995,6 +1999,18 @@ static void intel_pmu_nhm_enable_all(int added)
intel_pmu_enable_all(added);
}
+static void enable_counter_freeze(void)
+{
+ update_debugctlmsr(get_debugctlmsr() |
+ DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
+}
+
+static void disable_counter_freeze(void)
+{
+ update_debugctlmsr(get_debugctlmsr() &
+ ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
+}
+
static inline u64 intel_pmu_get_status(void)
{
u64 status;
@@ -2200,59 +2216,15 @@ static void intel_pmu_reset(void)
local_irq_restore(flags);
}
-/*
- * This handler is triggered by the local APIC, so the APIC IRQ handling
- * rules apply:
- */
-static int intel_pmu_handle_irq(struct pt_regs *regs)
+static int handle_pmi_common(struct pt_regs *regs, u64 status)
{
struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
- int bit, loops;
- u64 status;
- int handled;
- int pmu_enabled;
-
- cpuc = this_cpu_ptr(&cpu_hw_events);
-
- /*
- * Save the PMU state.
- * It needs to be restored when leaving the handler.
- */
- pmu_enabled = cpuc->enabled;
- /*
- * No known reason to not always do late ACK,
- * but just in case do it opt-in.
- */
- if (!x86_pmu.late_ack)
- apic_write(APIC_LVTPC, APIC_DM_NMI);
- intel_bts_disable_local();
- cpuc->enabled = 0;
- __intel_pmu_disable_all();
- handled = intel_pmu_drain_bts_buffer();
- handled += intel_bts_interrupt();
- status = intel_pmu_get_status();
- if (!status)
- goto done;
-
- loops = 0;
-again:
- intel_pmu_lbr_read();
- intel_pmu_ack_status(status);
- if (++loops > 100) {
- static bool warned = false;
- if (!warned) {
- WARN(1, "perfevents: irq loop stuck!\n");
- perf_event_print_debug();
- warned = true;
- }
- intel_pmu_reset();
- goto done;
- }
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int bit;
+ int handled = 0;
inc_irq_stat(apic_perf_irqs);
-
/*
* Ignore a range of extra bits in status that do not indicate
* overflow by themselves.
@@ -2261,7 +2233,7 @@ again:
GLOBAL_STATUS_ASIF |
GLOBAL_STATUS_LBRS_FROZEN);
if (!status)
- goto done;
+ return 0;
/*
* In case multiple PEBS events are sampled at the same time,
* it is possible to have GLOBAL_STATUS bit 62 set indicating
@@ -2331,6 +2303,146 @@ again:
x86_pmu_stop(event, 0);
}
+ return handled;
+}
+
+static bool disable_counter_freezing;
+static int __init intel_perf_counter_freezing_setup(char *s)
+{
+ disable_counter_freezing = true;
+ pr_info("Intel PMU Counter freezing feature disabled\n");
+ return 1;
+}
+__setup("disable_counter_freezing", intel_perf_counter_freezing_setup);
+
+/*
+ * Simplified handler for Arch Perfmon v4:
+ * - We rely on counter freezing/unfreezing to enable/disable the PMU.
+ * This is done automatically on PMU ack.
+ * - Ack the PMU only after the APIC.
+ */
+
+static int intel_pmu_handle_irq_v4(struct pt_regs *regs)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int handled = 0;
+ bool bts = false;
+ u64 status;
+ int pmu_enabled = cpuc->enabled;
+ int loops = 0;
+
+ /* PMU has been disabled because of counter freezing */
+ cpuc->enabled = 0;
+ if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
+ bts = true;
+ intel_bts_disable_local();
+ handled = intel_pmu_drain_bts_buffer();
+ handled += intel_bts_interrupt();
+ }
+ status = intel_pmu_get_status();
+ if (!status)
+ goto done;
+again:
+ intel_pmu_lbr_read();
+ if (++loops > 100) {
+ static bool warned;
+
+ if (!warned) {
+ WARN(1, "perfevents: irq loop stuck!\n");
+ perf_event_print_debug();
+ warned = true;
+ }
+ intel_pmu_reset();
+ goto done;
+ }
+
+
+ handled += handle_pmi_common(regs, status);
+done:
+ /* Ack the PMI in the APIC */
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+
+ /*
+ * The counters start counting immediately while ack the status.
+ * Make it as close as possible to IRET. This avoids bogus
+ * freezing on Skylake CPUs.
+ */
+ if (status) {
+ intel_pmu_ack_status(status);
+ } else {
+ /*
+ * CPU may issues two PMIs very close to each other.
+ * When the PMI handler services the first one, the
+ * GLOBAL_STATUS is already updated to reflect both.
+ * When it IRETs, the second PMI is immediately
+ * handled and it sees clear status. At the meantime,
+ * there may be a third PMI, because the freezing bit
+ * isn't set since the ack in first PMI handlers.
+ * Double check if there is more work to be done.
+ */
+ status = intel_pmu_get_status();
+ if (status)
+ goto again;
+ }
+
+ if (bts)
+ intel_bts_enable_local();
+ cpuc->enabled = pmu_enabled;
+ return handled;
+}
+
+/*
+ * This handler is triggered by the local APIC, so the APIC IRQ handling
+ * rules apply:
+ */
+static int intel_pmu_handle_irq(struct pt_regs *regs)
+{
+ struct cpu_hw_events *cpuc;
+ int loops;
+ u64 status;
+ int handled;
+ int pmu_enabled;
+
+ cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ /*
+ * Save the PMU state.
+ * It needs to be restored when leaving the handler.
+ */
+ pmu_enabled = cpuc->enabled;
+ /*
+ * No known reason to not always do late ACK,
+ * but just in case do it opt-in.
+ */
+ if (!x86_pmu.late_ack)
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+ intel_bts_disable_local();
+ cpuc->enabled = 0;
+ __intel_pmu_disable_all();
+ handled = intel_pmu_drain_bts_buffer();
+ handled += intel_bts_interrupt();
+ status = intel_pmu_get_status();
+ if (!status)
+ goto done;
+
+ loops = 0;
+again:
+ intel_pmu_lbr_read();
+ intel_pmu_ack_status(status);
+ if (++loops > 100) {
+ static bool warned;
+
+ if (!warned) {
+ WARN(1, "perfevents: irq loop stuck!\n");
+ perf_event_print_debug();
+ warned = true;
+ }
+ intel_pmu_reset();
+ goto done;
+ }
+
+ handled += handle_pmi_common(regs, status);
+
/*
* Repeat if there is more work to be done:
*/
@@ -3350,6 +3462,9 @@ static void intel_pmu_cpu_starting(int cpu)
if (x86_pmu.version > 1)
flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
+ if (x86_pmu.counter_freezing)
+ enable_counter_freeze();
+
if (!cpuc->shared_regs)
return;
@@ -3421,6 +3536,9 @@ static void intel_pmu_cpu_dying(int cpu)
free_excl_cntrs(cpu);
fini_debug_store_on_cpu(cpu);
+
+ if (x86_pmu.counter_freezing)
+ disable_counter_freeze();
}
static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -3725,6 +3843,40 @@ static __init void intel_nehalem_quirk(void)
}
}
+static bool intel_glp_counter_freezing_broken(int cpu)
+{
+ u32 rev = UINT_MAX; /* default to broken for unknown stepping */
+
+ switch (cpu_data(cpu).x86_stepping) {
+ case 1:
+ rev = 0x28;
+ break;
+ case 8:
+ rev = 0x6;
+ break;
+ }
+
+ return (cpu_data(cpu).microcode < rev);
+}
+
+static __init void intel_glp_counter_freezing_quirk(void)
+{
+ /* Check if it's already disabled */
+ if (disable_counter_freezing)
+ return;
+
+ /*
+ * If the system starts with the wrong ucode, leave the
+ * counter-freezing feature permanently disabled.
+ */
+ if (intel_glp_counter_freezing_broken(raw_smp_processor_id())) {
+ pr_info("PMU counter freezing disabled due to CPU errata,"
+ "please upgrade microcode\n");
+ x86_pmu.counter_freezing = false;
+ x86_pmu.handle_irq = intel_pmu_handle_irq;
+ }
+}
+
/*
* enable software workaround for errata:
* SNB: BJ122
@@ -3764,8 +3916,6 @@ EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
static struct attribute *hsw_events_attrs[] = {
- EVENT_PTR(mem_ld_hsw),
- EVENT_PTR(mem_st_hsw),
EVENT_PTR(td_slots_issued),
EVENT_PTR(td_slots_retired),
EVENT_PTR(td_fetch_bubbles),
@@ -3776,6 +3926,12 @@ static struct attribute *hsw_events_attrs[] = {
NULL
};
+static struct attribute *hsw_mem_events_attrs[] = {
+ EVENT_PTR(mem_ld_hsw),
+ EVENT_PTR(mem_st_hsw),
+ NULL,
+};
+
static struct attribute *hsw_tsx_events_attrs[] = {
EVENT_PTR(tx_start),
EVENT_PTR(tx_commit),
@@ -3792,13 +3948,6 @@ static struct attribute *hsw_tsx_events_attrs[] = {
NULL
};
-static __init struct attribute **get_hsw_events_attrs(void)
-{
- return boot_cpu_has(X86_FEATURE_RTM) ?
- merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) :
- hsw_events_attrs;
-}
-
static ssize_t freeze_on_smi_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
@@ -3875,9 +4024,32 @@ static struct attribute *intel_pmu_attrs[] = {
NULL,
};
+static __init struct attribute **
+get_events_attrs(struct attribute **base,
+ struct attribute **mem,
+ struct attribute **tsx)
+{
+ struct attribute **attrs = base;
+ struct attribute **old;
+
+ if (mem && x86_pmu.pebs)
+ attrs = merge_attr(attrs, mem);
+
+ if (tsx && boot_cpu_has(X86_FEATURE_RTM)) {
+ old = attrs;
+ attrs = merge_attr(attrs, tsx);
+ if (old != base)
+ kfree(old);
+ }
+
+ return attrs;
+}
+
__init int intel_pmu_init(void)
{
struct attribute **extra_attr = NULL;
+ struct attribute **mem_attr = NULL;
+ struct attribute **tsx_attr = NULL;
struct attribute **to_free = NULL;
union cpuid10_edx edx;
union cpuid10_eax eax;
@@ -3935,6 +4107,9 @@ __init int intel_pmu_init(void)
max((int)edx.split.num_counters_fixed, assume);
}
+ if (version >= 4)
+ x86_pmu.counter_freezing = !disable_counter_freezing;
+
if (boot_cpu_has(X86_FEATURE_PDCM)) {
u64 capabilities;
@@ -3986,7 +4161,7 @@ __init int intel_pmu_init(void)
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.extra_regs = intel_nehalem_extra_regs;
- x86_pmu.cpu_events = nhm_events_attrs;
+ mem_attr = nhm_mem_events_attrs;
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
@@ -4004,11 +4179,11 @@ __init int intel_pmu_init(void)
name = "nehalem";
break;
- case INTEL_FAM6_ATOM_PINEVIEW:
- case INTEL_FAM6_ATOM_LINCROFT:
- case INTEL_FAM6_ATOM_PENWELL:
- case INTEL_FAM6_ATOM_CLOVERVIEW:
- case INTEL_FAM6_ATOM_CEDARVIEW:
+ case INTEL_FAM6_ATOM_BONNELL:
+ case INTEL_FAM6_ATOM_BONNELL_MID:
+ case INTEL_FAM6_ATOM_SALTWELL:
+ case INTEL_FAM6_ATOM_SALTWELL_MID:
+ case INTEL_FAM6_ATOM_SALTWELL_TABLET:
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -4021,9 +4196,11 @@ __init int intel_pmu_init(void)
name = "bonnell";
break;
- case INTEL_FAM6_ATOM_SILVERMONT1:
- case INTEL_FAM6_ATOM_SILVERMONT2:
+ case INTEL_FAM6_ATOM_SILVERMONT:
+ case INTEL_FAM6_ATOM_SILVERMONT_X:
+ case INTEL_FAM6_ATOM_SILVERMONT_MID:
case INTEL_FAM6_ATOM_AIRMONT:
+ case INTEL_FAM6_ATOM_AIRMONT_MID:
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -4042,7 +4219,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_DENVERTON:
+ case INTEL_FAM6_ATOM_GOLDMONT_X:
memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -4068,7 +4245,8 @@ __init int intel_pmu_init(void)
name = "goldmont";
break;
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ x86_add_quirk(intel_glp_counter_freezing_quirk);
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
@@ -4112,7 +4290,7 @@ __init int intel_pmu_init(void)
x86_pmu.extra_regs = intel_westmere_extra_regs;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
- x86_pmu.cpu_events = nhm_events_attrs;
+ mem_attr = nhm_mem_events_attrs;
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
@@ -4152,6 +4330,7 @@ __init int intel_pmu_init(void)
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu.cpu_events = snb_events_attrs;
+ mem_attr = snb_mem_events_attrs;
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
@@ -4192,6 +4371,7 @@ __init int intel_pmu_init(void)
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
x86_pmu.cpu_events = snb_events_attrs;
+ mem_attr = snb_mem_events_attrs;
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
@@ -4226,10 +4406,12 @@ __init int intel_pmu_init(void)
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
- x86_pmu.cpu_events = get_hsw_events_attrs();
+ x86_pmu.cpu_events = hsw_events_attrs;
x86_pmu.lbr_double_abort = true;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
+ mem_attr = hsw_mem_events_attrs;
+ tsx_attr = hsw_tsx_events_attrs;
pr_cont("Haswell events, ");
name = "haswell";
break;
@@ -4265,10 +4447,12 @@ __init int intel_pmu_init(void)
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
- x86_pmu.cpu_events = get_hsw_events_attrs();
+ x86_pmu.cpu_events = hsw_events_attrs;
x86_pmu.limit_period = bdw_limit_period;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
+ mem_attr = hsw_mem_events_attrs;
+ tsx_attr = hsw_tsx_events_attrs;
pr_cont("Broadwell events, ");
name = "broadwell";
break;
@@ -4324,7 +4508,9 @@ __init int intel_pmu_init(void)
hsw_format_attr : nhm_format_attr;
extra_attr = merge_attr(extra_attr, skl_format_attr);
to_free = extra_attr;
- x86_pmu.cpu_events = get_hsw_events_attrs();
+ x86_pmu.cpu_events = hsw_events_attrs;
+ mem_attr = hsw_mem_events_attrs;
+ tsx_attr = hsw_tsx_events_attrs;
intel_pmu_pebs_data_source_skl(
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
pr_cont("Skylake events, ");
@@ -4357,6 +4543,9 @@ __init int intel_pmu_init(void)
WARN_ON(!x86_pmu.format_attrs);
}
+ x86_pmu.cpu_events = get_events_attrs(x86_pmu.cpu_events,
+ mem_attr, tsx_attr);
+
if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
@@ -4431,6 +4620,13 @@ __init int intel_pmu_init(void)
pr_cont("full-width counters, ");
}
+ /*
+ * For arch perfmon 4 use counter freezing to avoid
+ * several MSR accesses in the PMI.
+ */
+ if (x86_pmu.counter_freezing)
+ x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
+
kfree(to_free);
return 0;
}
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 9f8084f18d58..d2e780705c5a 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -559,8 +559,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
- X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates),
- X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE, snb_cstates),
@@ -581,9 +581,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
- X86_CSTATES_MODEL(INTEL_FAM6_ATOM_DENVERTON, glm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
- X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GEMINI_LAKE, glm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 8d016ce5b80d..3a0aa83cbd07 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -95,7 +95,7 @@ static ssize_t pt_cap_show(struct device *cdev,
return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
}
-static struct attribute_group pt_cap_group = {
+static struct attribute_group pt_cap_group __ro_after_init = {
.name = "caps",
};
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 32f3e9423e99..91039ffed633 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -777,9 +777,9 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, skl_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
- X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
- X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
{},
};
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 51d7c117e3c7..c07bee31abe8 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
void bdx_uncore_cpu_init(void)
{
- int pkg = topology_phys_to_logical_pkg(0);
+ int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
@@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
},
{ /* M3UPI0 Link 0 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
- .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
},
{ /* M3UPI0 Link 1 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
- .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
},
{ /* M3UPI1 Link 2 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
- .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
},
{ /* end: all zeroes */ }
};
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index b4771a6ddbc1..1b9f85abf9bc 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -69,14 +69,14 @@ static bool test_intel(int idx)
case INTEL_FAM6_BROADWELL_GT3E:
case INTEL_FAM6_BROADWELL_X:
- case INTEL_FAM6_ATOM_SILVERMONT1:
- case INTEL_FAM6_ATOM_SILVERMONT2:
+ case INTEL_FAM6_ATOM_SILVERMONT:
+ case INTEL_FAM6_ATOM_SILVERMONT_X:
case INTEL_FAM6_ATOM_AIRMONT:
case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_DENVERTON:
+ case INTEL_FAM6_ATOM_GOLDMONT_X:
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM:
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 156286335351..adae087cecdd 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -560,9 +560,11 @@ struct x86_pmu {
struct event_constraint *event_constraints;
struct x86_pmu_quirk *quirks;
int perfctr_second_write;
- bool late_ack;
u64 (*limit_period)(struct perf_event *event, u64 l);
+ /* PMI handler bits */
+ unsigned int late_ack :1,
+ counter_freezing :1;
/*
* sysfs attrs
*/
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile
index b21ee65c4101..1c11f9420a82 100644
--- a/arch/x86/hyperv/Makefile
+++ b/arch/x86/hyperv/Makefile
@@ -1,2 +1,6 @@
obj-y := hv_init.o mmu.o nested.o
obj-$(CONFIG_X86_64) += hv_apic.o
+
+ifdef CONFIG_X86_64
+obj-$(CONFIG_PARAVIRT_SPINLOCKS) += hv_spinlock.o
+endif
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 5b0f613428c2..8eb6fbee8e13 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -20,7 +20,6 @@
*/
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/clockchips.h>
@@ -95,8 +94,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
*/
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
{
- struct ipi_arg_ex **arg;
- struct ipi_arg_ex *ipi_arg;
+ struct hv_send_ipi_ex **arg;
+ struct hv_send_ipi_ex *ipi_arg;
unsigned long flags;
int nr_bank = 0;
int ret = 1;
@@ -105,7 +104,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
return false;
local_irq_save(flags);
- arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
+ arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
ipi_arg = *arg;
if (unlikely(!ipi_arg))
@@ -135,7 +134,7 @@ ipi_mask_ex_done:
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
{
int cur_cpu, vcpu;
- struct ipi_arg_non_ex ipi_arg;
+ struct hv_send_ipi ipi_arg;
int ret = 1;
trace_hyperv_send_ipi_mask(mask, vector);
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 20c876c7c5bf..7abb09e2eeb8 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -17,6 +17,7 @@
*
*/
+#include <linux/efi.h>
#include <linux/types.h>
#include <asm/apic.h>
#include <asm/desc.h>
@@ -253,6 +254,22 @@ static int hv_cpu_die(unsigned int cpu)
return 0;
}
+static int __init hv_pci_init(void)
+{
+ int gen2vm = efi_enabled(EFI_BOOT);
+
+ /*
+ * For Generation-2 VM, we exit from pci_arch_init() by returning 0.
+ * The purpose is to suppress the harmless warning:
+ * "PCI: Fatal: No config space access function found"
+ */
+ if (gen2vm)
+ return 0;
+
+ /* For Generation-1 VM, we'll proceed in pci_arch_init(). */
+ return 1;
+}
+
/*
* This function is to be invoked early in the boot sequence after the
* hypervisor has been detected.
@@ -329,6 +346,8 @@ void __init hyperv_init(void)
hv_apic_init();
+ x86_init.pci.arch_init = hv_pci_init;
+
/*
* Register Hyper-V specific clocksource.
*/
diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c
new file mode 100644
index 000000000000..a861b0456b1a
--- /dev/null
+++ b/arch/x86/hyperv/hv_spinlock.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Hyper-V specific spinlock code.
+ *
+ * Copyright (C) 2018, Intel, Inc.
+ *
+ * Author : Yi Sun <yi.y.sun@intel.com>
+ */
+
+#define pr_fmt(fmt) "Hyper-V: " fmt
+
+#include <linux/spinlock.h>
+
+#include <asm/mshyperv.h>
+#include <asm/paravirt.h>
+#include <asm/apic.h>
+
+static bool __initdata hv_pvspin = true;
+
+static void hv_qlock_kick(int cpu)
+{
+ apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
+}
+
+static void hv_qlock_wait(u8 *byte, u8 val)
+{
+ unsigned long msr_val;
+ unsigned long flags;
+
+ if (in_nmi())
+ return;
+
+ /*
+ * Reading HV_X64_MSR_GUEST_IDLE MSR tells the hypervisor that the
+ * vCPU can be put into 'idle' state. This 'idle' state is
+ * terminated by an IPI, usually from hv_qlock_kick(), even if
+ * interrupts are disabled on the vCPU.
+ *
+ * To prevent a race against the unlock path it is required to
+ * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE
+ * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between
+ * the lock value check and the rdmsrl() then the vCPU might be put
+ * into 'idle' state by the hypervisor and kept in that state for
+ * an unspecified amount of time.
+ */
+ local_irq_save(flags);
+ /*
+ * Only issue the rdmsrl() when the lock state has not changed.
+ */
+ if (READ_ONCE(*byte) == val)
+ rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
+ local_irq_restore(flags);
+}
+
+/*
+ * Hyper-V does not support this so far.
+ */
+bool hv_vcpu_is_preempted(int vcpu)
+{
+ return false;
+}
+PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
+
+void __init hv_init_spinlocks(void)
+{
+ if (!hv_pvspin || !apic ||
+ !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
+ !(ms_hyperv.features & HV_X64_MSR_GUEST_IDLE_AVAILABLE)) {
+ pr_info("PV spinlocks disabled\n");
+ return;
+ }
+ pr_info("PV spinlocks enabled\n");
+
+ __pv_init_lock_hash();
+ pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+ pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
+ pv_ops.lock.wait = hv_qlock_wait;
+ pv_ops.lock.kick = hv_qlock_kick;
+ pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
+}
+
+static __init int hv_parse_nopvspin(char *arg)
+{
+ hv_pvspin = false;
+ return 0;
+}
+early_param("hv_nopvspin", hv_parse_nopvspin);
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index ef5f29f913d7..e65d7fe6489f 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -231,6 +231,6 @@ void hyperv_setup_mmu_ops(void)
return;
pr_info("Using hypercall for remote TLB flush\n");
- pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
- pv_mmu_ops.tlb_remove_table = tlb_remove_table;
+ pv_ops.mmu.flush_tlb_others = hyperv_flush_tlb_others;
+ pv_ops.mmu.tlb_remove_table = tlb_remove_table;
}
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index a303d7b7d763..2f01eb4d6208 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -142,6 +142,8 @@ static inline u64 acpi_arch_get_root_pointer(void)
void acpi_generic_reduced_hw_init(void);
+u64 x86_default_get_root_pointer(void);
+
#else /* !CONFIG_ACPI */
#define acpi_lapic 0
@@ -153,6 +155,11 @@ static inline void disable_acpi(void) { }
static inline void acpi_generic_reduced_hw_init(void) { }
+static inline u64 x86_default_get_root_pointer(void)
+{
+ return 0;
+}
+
#endif /* !CONFIG_ACPI */
#define ARCH_HAS_POWER_INIT 1
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 31b627b43a8e..8e4ea39e55d0 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -7,16 +7,24 @@
#include <asm/asm.h>
#ifdef CONFIG_SMP
- .macro LOCK_PREFIX
-672: lock
+.macro LOCK_PREFIX_HERE
.pushsection .smp_locks,"a"
.balign 4
- .long 672b - .
+ .long 671f - . # offset
.popsection
- .endm
+671:
+.endm
+
+.macro LOCK_PREFIX insn:vararg
+ LOCK_PREFIX_HERE
+ lock \insn
+.endm
#else
- .macro LOCK_PREFIX
- .endm
+.macro LOCK_PREFIX_HERE
+.endm
+
+.macro LOCK_PREFIX insn:vararg
+.endm
#endif
/*
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 4cd6a3b71824..d7faa16622d8 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -31,15 +31,8 @@
*/
#ifdef CONFIG_SMP
-#define LOCK_PREFIX_HERE \
- ".pushsection .smp_locks,\"a\"\n" \
- ".balign 4\n" \
- ".long 671f - .\n" /* offset */ \
- ".popsection\n" \
- "671:"
-
-#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
-
+#define LOCK_PREFIX_HERE "LOCK_PREFIX_HERE\n\t"
+#define LOCK_PREFIX "LOCK_PREFIX "
#else /* ! CONFIG_SMP */
#define LOCK_PREFIX_HERE ""
#define LOCK_PREFIX ""
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index fddb6d26239f..1ae4e5791afa 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -103,6 +103,9 @@ static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)
static inline bool amd_gart_present(void)
{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return false;
+
/* GART present only on Fam15h, upto model 0fh */
if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
(boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 990770f9e76b..21b086786404 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -120,16 +120,32 @@
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
- .pushsection "__ex_table","a" ; \
- .balign 4 ; \
- .long (from) - . ; \
- .long (to) - . ; \
- .long (handler) - . ; \
+ ASM_EXTABLE_HANDLE from to handler
+
+.macro ASM_EXTABLE_HANDLE from:req to:req handler:req
+ .pushsection "__ex_table","a"
+ .balign 4
+ .long (\from) - .
+ .long (\to) - .
+ .long (\handler) - .
.popsection
+.endm
+#else /* __ASSEMBLY__ */
+
+# define _ASM_EXTABLE_HANDLE(from, to, handler) \
+ "ASM_EXTABLE_HANDLE from=" #from " to=" #to \
+ " handler=\"" #handler "\"\n\t"
+
+/* For C file, we already have NOKPROBE_SYMBOL macro */
+
+#endif /* __ASSEMBLY__ */
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
+# define _ASM_EXTABLE_UA(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
+
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
@@ -145,6 +161,7 @@
_ASM_PTR (entry); \
.popsection
+#ifdef __ASSEMBLY__
.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
movl %edi,%ecx
@@ -165,34 +182,10 @@
jmp copy_user_handle_tail
.previous
- _ASM_EXTABLE(100b,103b)
- _ASM_EXTABLE(101b,103b)
+ _ASM_EXTABLE_UA(100b, 103b)
+ _ASM_EXTABLE_UA(101b, 103b)
.endm
-
-#else
-# define _EXPAND_EXTABLE_HANDLE(x) #x
-# define _ASM_EXTABLE_HANDLE(from, to, handler) \
- " .pushsection \"__ex_table\",\"a\"\n" \
- " .balign 4\n" \
- " .long (" #from ") - .\n" \
- " .long (" #to ") - .\n" \
- " .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \
- " .popsection\n"
-
-# define _ASM_EXTABLE(from, to) \
- _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
-
-# define _ASM_EXTABLE_FAULT(from, to) \
- _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
-
-# define _ASM_EXTABLE_EX(from, to) \
- _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
-
-# define _ASM_EXTABLE_REFCOUNT(from, to) \
- _ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount)
-
-/* For C file, we already have NOKPROBE_SYMBOL macro */
-#endif
+#endif /* __ASSEMBLY__ */
#ifndef __ASSEMBLY__
/*
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index ce84388e540c..ea3d95275b43 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -82,7 +82,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
*/
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
+ return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
}
#define arch_atomic_sub_and_test arch_atomic_sub_and_test
@@ -122,7 +122,7 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
*/
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
{
- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
+ return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
}
#define arch_atomic_dec_and_test arch_atomic_dec_and_test
@@ -136,7 +136,7 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
*/
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
{
- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
+ return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
}
#define arch_atomic_inc_and_test arch_atomic_inc_and_test
@@ -151,7 +151,7 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
*/
static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
+ return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
}
#define arch_atomic_add_negative arch_atomic_add_negative
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 5f851d92eecd..dadc20adba21 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -73,7 +73,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
*/
static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
+ return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
}
#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
@@ -115,7 +115,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
*/
static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
{
- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
+ return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
}
#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
@@ -129,7 +129,7 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
*/
static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
{
- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
+ return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
}
#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
@@ -144,7 +144,7 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
*/
static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
+ return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
}
#define arch_atomic64_add_negative arch_atomic64_add_negative
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 9f645ba57dbb..124f9195eb3e 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -217,8 +217,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
*/
static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts),
- *addr, "Ir", nr, "%0", c);
+ return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
}
/**
@@ -264,8 +263,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
*/
static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr),
- *addr, "Ir", nr, "%0", c);
+ return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
}
/**
@@ -318,8 +316,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
*/
static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc),
- *addr, "Ir", nr, "%0", c);
+ return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 6804d6642767..5090035e6d16 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -4,6 +4,8 @@
#include <linux/stringify.h>
+#ifndef __ASSEMBLY__
+
/*
* Despite that some emulators terminate on UD2, we use it for WARN().
*
@@ -20,53 +22,15 @@
#define LEN_UD2 2
-#ifdef CONFIG_GENERIC_BUG
-
-#ifdef CONFIG_X86_32
-# define __BUG_REL(val) ".long " __stringify(val)
-#else
-# define __BUG_REL(val) ".long " __stringify(val) " - 2b"
-#endif
-
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-
-#define _BUG_FLAGS(ins, flags) \
-do { \
- asm volatile("1:\t" ins "\n" \
- ".pushsection __bug_table,\"aw\"\n" \
- "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
- "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
- "\t.word %c1" "\t# bug_entry::line\n" \
- "\t.word %c2" "\t# bug_entry::flags\n" \
- "\t.org 2b+%c3\n" \
- ".popsection" \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (flags), \
- "i" (sizeof(struct bug_entry))); \
-} while (0)
-
-#else /* !CONFIG_DEBUG_BUGVERBOSE */
-
#define _BUG_FLAGS(ins, flags) \
do { \
- asm volatile("1:\t" ins "\n" \
- ".pushsection __bug_table,\"aw\"\n" \
- "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
- "\t.word %c0" "\t# bug_entry::flags\n" \
- "\t.org 2b+%c1\n" \
- ".popsection" \
- : : "i" (flags), \
+ asm volatile("ASM_BUG ins=\"" ins "\" file=%c0 line=%c1 " \
+ "flags=%c2 size=%c3" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
-#endif /* CONFIG_DEBUG_BUGVERBOSE */
-
-#else
-
-#define _BUG_FLAGS(ins, flags) asm volatile(ins)
-
-#endif /* CONFIG_GENERIC_BUG */
-
#define HAVE_ARCH_BUG
#define BUG() \
do { \
@@ -82,4 +46,54 @@ do { \
#include <asm-generic/bug.h>
+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_GENERIC_BUG
+
+#ifdef CONFIG_X86_32
+.macro __BUG_REL val:req
+ .long \val
+.endm
+#else
+.macro __BUG_REL val:req
+ .long \val - 2b
+.endm
+#endif
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+.macro ASM_BUG ins:req file:req line:req flags:req size:req
+1: \ins
+ .pushsection __bug_table,"aw"
+2: __BUG_REL val=1b # bug_entry::bug_addr
+ __BUG_REL val=\file # bug_entry::file
+ .word \line # bug_entry::line
+ .word \flags # bug_entry::flags
+ .org 2b+\size
+ .popsection
+.endm
+
+#else /* !CONFIG_DEBUG_BUGVERBOSE */
+
+.macro ASM_BUG ins:req file:req line:req flags:req size:req
+1: \ins
+ .pushsection __bug_table,"aw"
+2: __BUG_REL val=1b # bug_entry::bug_addr
+ .word \flags # bug_entry::flags
+ .org 2b+\size
+ .popsection
+.endm
+
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
+#else /* CONFIG_GENERIC_BUG */
+
+.macro ASM_BUG ins:req file:req line:req flags:req size:req
+ \ins
+.endm
+
+#endif /* CONFIG_GENERIC_BUG */
+
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_X86_BUG_H */
diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
index e958e28f7ab5..86b63c7feab7 100644
--- a/arch/x86/include/asm/cacheinfo.h
+++ b/arch/x86/include/asm/cacheinfo.h
@@ -3,5 +3,6 @@
#define _ASM_X86_CACHEINFO_H
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
+void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
#endif /* _ASM_X86_CACHEINFO_H */
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index a55d79b233d3..bfb85e5844ab 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -242,10 +242,12 @@ extern void __add_wrong_size(void)
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
- asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
- : "=a" (__ret), "+d" (__old2), \
- "+m" (*(p1)), "+m" (*(p2)) \
- : "i" (2 * sizeof(long)), "a" (__old1), \
+ asm volatile(pfx "cmpxchg%c5b %1" \
+ CC_SET(e) \
+ : CC_OUT(e) (__ret), \
+ "+m" (*(p1)), "+m" (*(p2)), \
+ "+a" (__old1), "+d" (__old2) \
+ : "i" (2 * sizeof(long)), \
"b" (__new1), "c" (__new2)); \
__ret; \
})
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index fb97cf7c4137..fab4df16a3c4 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -12,38 +12,23 @@
#include <asm/user32.h>
#include <asm/unistd.h>
+#include <asm-generic/compat.h>
+
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "i686\0\0"
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s32 compat_timer_t;
-typedef s32 compat_key_t;
-
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 __attribute__((aligned(4))) compat_s64;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
-typedef u32 compat_u32;
typedef u64 __attribute__((aligned(4))) compat_u64;
-typedef u32 compat_uptr_t;
struct compat_stat {
compat_dev_t st_dev;
@@ -240,6 +225,6 @@ static inline bool in_compat_syscall(void)
struct compat_siginfo;
int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
- const siginfo_t *from, bool x32_ABI);
+ const kernel_siginfo_t *from, bool x32_ABI);
#endif /* _ASM_X86_COMPAT_H */
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
index 4a7884b8dca5..29c706415443 100644
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -30,8 +30,6 @@ struct cpu_entry_area {
*/
struct tss_struct tss;
- char entry_trampoline[PAGE_SIZE];
-
#ifdef CONFIG_X86_64
/*
* Exception stacks used for IST entries.
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index aced6c9290d6..7d442722ef24 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -2,10 +2,10 @@
#ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H
-#include <asm/processor.h>
-
-#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
#include <asm/asm.h>
#include <linux/bitops.h>
@@ -161,37 +161,10 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
*/
static __always_inline __pure bool _static_cpu_has(u16 bit)
{
- asm_volatile_goto("1: jmp 6f\n"
- "2:\n"
- ".skip -(((5f-4f) - (2b-1b)) > 0) * "
- "((5f-4f) - (2b-1b)),0x90\n"
- "3:\n"
- ".section .altinstructions,\"a\"\n"
- " .long 1b - .\n" /* src offset */
- " .long 4f - .\n" /* repl offset */
- " .word %P[always]\n" /* always replace */
- " .byte 3b - 1b\n" /* src len */
- " .byte 5f - 4f\n" /* repl len */
- " .byte 3b - 2b\n" /* pad len */
- ".previous\n"
- ".section .altinstr_replacement,\"ax\"\n"
- "4: jmp %l[t_no]\n"
- "5:\n"
- ".previous\n"
- ".section .altinstructions,\"a\"\n"
- " .long 1b - .\n" /* src offset */
- " .long 0\n" /* no replacement */
- " .word %P[feature]\n" /* feature bit */
- " .byte 3b - 1b\n" /* src len */
- " .byte 0\n" /* repl len */
- " .byte 0\n" /* pad len */
- ".previous\n"
- ".section .altinstr_aux,\"ax\"\n"
- "6:\n"
- " testb %[bitnum],%[cap_byte]\n"
- " jnz %l[t_yes]\n"
- " jmp %l[t_no]\n"
- ".previous\n"
+ asm_volatile_goto("STATIC_CPU_HAS bitnum=%[bitnum] "
+ "cap_byte=\"%[cap_byte]\" "
+ "feature=%P[feature] t_yes=%l[t_yes] "
+ "t_no=%l[t_no] always=%P[always]"
: : [feature] "i" (bit),
[always] "i" (X86_FEATURE_ALWAYS),
[bitnum] "i" (1 << (bit & 7)),
@@ -226,5 +199,44 @@ t_no:
#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
boot_cpu_data.x86_model
-#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
+#else /* __ASSEMBLY__ */
+
+.macro STATIC_CPU_HAS bitnum:req cap_byte:req feature:req t_yes:req t_no:req always:req
+1:
+ jmp 6f
+2:
+ .skip -(((5f-4f) - (2b-1b)) > 0) * ((5f-4f) - (2b-1b)),0x90
+3:
+ .section .altinstructions,"a"
+ .long 1b - . /* src offset */
+ .long 4f - . /* repl offset */
+ .word \always /* always replace */
+ .byte 3b - 1b /* src len */
+ .byte 5f - 4f /* repl len */
+ .byte 3b - 2b /* pad len */
+ .previous
+ .section .altinstr_replacement,"ax"
+4:
+ jmp \t_no
+5:
+ .previous
+ .section .altinstructions,"a"
+ .long 1b - . /* src offset */
+ .long 0 /* no replacement */
+ .word \feature /* feature bit */
+ .byte 3b - 1b /* src len */
+ .byte 0 /* repl len */
+ .byte 0 /* pad len */
+ .previous
+ .section .altinstr_aux,"ax"
+6:
+ testb \bitnum,\cap_byte
+ jnz \t_yes
+ jmp \t_no
+ .previous
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 4505ac2735ad..9e5ca30738e5 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -8,7 +8,7 @@
DECLARE_PER_CPU(unsigned long, cpu_dr7);
-#ifndef CONFIG_PARAVIRT
+#ifndef CONFIG_PARAVIRT_XXL
/*
* These special macros can be used to get or set a debugging register
*/
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 13c5ee878a47..68a99d2a5f33 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -108,7 +108,7 @@ static inline int desc_empty(const void *ptr)
return !(desc[0] | desc[1]);
}
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
#define load_TR_desc() native_load_tr_desc()
@@ -134,7 +134,7 @@ static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
{
}
-#endif /* CONFIG_PARAVIRT */
+#endif /* CONFIG_PARAVIRT_XXL */
#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index cec5fae23eb3..eea40d52ca78 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -140,6 +140,7 @@ extern void __init efi_apply_memmap_quirks(void);
extern int __init efi_reuse_config(u64 tables, int nr_tables);
extern void efi_delete_dummy_variable(void);
extern void efi_switch_mm(struct mm_struct *mm);
+extern void efi_recover_from_page_fault(unsigned long phys_addr);
struct efi_setup_data {
u64 fw_vendor;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 0d157d2a1e2a..69c0f892e310 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -10,6 +10,7 @@
#include <asm/ptrace.h>
#include <asm/user.h>
#include <asm/auxvec.h>
+#include <asm/fsgsbase.h>
typedef unsigned long elf_greg_t;
@@ -62,8 +63,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
#define R_X86_64_8 14 /* Direct 8 bit sign extended */
#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
-
-#define R_X86_64_NUM 16
+#define R_X86_64_PC64 24 /* Place relative 64-bit signed */
/*
* These are used to set parameters in the core dumps.
@@ -205,7 +205,6 @@ void set_personality_ia32(bool);
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
- unsigned long base; \
unsigned v; \
(pr_reg)[0] = (regs)->r15; \
(pr_reg)[1] = (regs)->r14; \
@@ -228,8 +227,8 @@ do { \
(pr_reg)[18] = (regs)->flags; \
(pr_reg)[19] = (regs)->sp; \
(pr_reg)[20] = (regs)->ss; \
- rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base; \
- rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base; \
+ (pr_reg)[21] = x86_fsbase_read_cpu(); \
+ (pr_reg)[22] = x86_gsbase_read_cpu_inactive(); \
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h
index f9c3a5d502f4..d8c2198d543b 100644
--- a/arch/x86/include/asm/extable.h
+++ b/arch/x86/include/asm/extable.h
@@ -29,7 +29,8 @@ struct pt_regs;
(b)->handler = (tmp).handler - (delta); \
} while (0)
-extern int fixup_exception(struct pt_regs *regs, int trapnr);
+extern int fixup_exception(struct pt_regs *regs, int trapnr,
+ unsigned long error_code, unsigned long fault_addr);
extern int fixup_bug(struct pt_regs *regs, int trapnr);
extern bool ex_has_fault_handler(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index e203169931c7..50ba74a34a37 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -14,6 +14,16 @@
#ifndef _ASM_X86_FIXMAP_H
#define _ASM_X86_FIXMAP_H
+/*
+ * Exposed to assembly code for setting up initial page tables. Cannot be
+ * calculated in assembly code (fixmap entries are an enum), but is sanity
+ * checked in the actual fixmap C code to make sure that the fixmap is
+ * covered fully.
+ */
+#define FIXMAP_PMD_NUM 2
+/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
+#define FIXMAP_PMD_TOP 507
+
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <asm/acpi.h>
@@ -152,7 +162,7 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
void native_set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags);
-#ifndef CONFIG_PARAVIRT
+#ifndef CONFIG_PARAVIRT_XXL
static inline void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index a38bf5a1e37a..5f7290e6e954 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -226,7 +226,7 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
"3: movl $-2,%[err]\n\t" \
"jmp 2b\n\t" \
".popsection\n\t" \
- _ASM_EXTABLE(1b, 3b) \
+ _ASM_EXTABLE_UA(1b, 3b) \
: [err] "=r" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
@@ -528,7 +528,7 @@ static inline void fpregs_activate(struct fpu *fpu)
static inline void
switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
- if (old_fpu->initialized) {
+ if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
else
diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h
new file mode 100644
index 000000000000..eb377b6e9eed
--- /dev/null
+++ b/arch/x86/include/asm/fsgsbase.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_FSGSBASE_H
+#define _ASM_FSGSBASE_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_X86_64
+
+#include <asm/msr-index.h>
+
+/*
+ * Read/write a task's FSBASE or GSBASE. This returns the value that
+ * the FS/GS base would have (if the task were to be resumed). These
+ * work on the current task or on a non-running (typically stopped
+ * ptrace child) task.
+ */
+extern unsigned long x86_fsbase_read_task(struct task_struct *task);
+extern unsigned long x86_gsbase_read_task(struct task_struct *task);
+extern int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
+extern int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
+
+/* Helper functions for reading/writing FS/GS base */
+
+static inline unsigned long x86_fsbase_read_cpu(void)
+{
+ unsigned long fsbase;
+
+ rdmsrl(MSR_FS_BASE, fsbase);
+
+ return fsbase;
+}
+
+static inline unsigned long x86_gsbase_read_cpu_inactive(void)
+{
+ unsigned long gsbase;
+
+ rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
+
+ return gsbase;
+}
+
+extern void x86_fsbase_write_cpu(unsigned long fsbase);
+extern void x86_gsbase_write_cpu_inactive(unsigned long gsbase);
+
+#endif /* CONFIG_X86_64 */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_FSGSBASE_H */
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index de4d68852d3a..13c83fe97988 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -20,7 +20,7 @@
"3:\tmov\t%3, %1\n" \
"\tjmp\t2b\n" \
"\t.previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ _ASM_EXTABLE_UA(1b, 3b) \
: "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
: "i" (-EFAULT), "0" (oparg), "1" (0))
@@ -36,8 +36,8 @@
"4:\tmov\t%5, %1\n" \
"\tjmp\t3b\n" \
"\t.previous\n" \
- _ASM_EXTABLE(1b, 4b) \
- _ASM_EXTABLE(2b, 4b) \
+ _ASM_EXTABLE_UA(1b, 4b) \
+ _ASM_EXTABLE_UA(2b, 4b) \
: "=&a" (oldval), "=&r" (ret), \
"+m" (*uaddr), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0))
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index e977b6b3a538..4139f7650fe5 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -38,6 +38,8 @@
#define HV_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
/* Partition reference TSC MSR is available */
#define HV_MSR_REFERENCE_TSC_AVAILABLE (1 << 9)
+/* Partition Guest IDLE MSR is available */
+#define HV_X64_MSR_GUEST_IDLE_AVAILABLE (1 << 10)
/* A partition's reference time stamp counter (TSC) page */
#define HV_X64_MSR_REFERENCE_TSC 0x40000021
@@ -246,6 +248,9 @@
#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
+/* Hyper-V guest idle MSR */
+#define HV_X64_MSR_GUEST_IDLE 0x400000F0
+
/* Hyper-V guest crash notification MSR's */
#define HV_X64_MSR_CRASH_P0 0x40000100
#define HV_X64_MSR_CRASH_P1 0x40000101
@@ -726,19 +731,21 @@ struct hv_enlightened_vmcs {
#define HV_STIMER_AUTOENABLE (1ULL << 3)
#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F)
-struct ipi_arg_non_ex {
- u32 vector;
- u32 reserved;
- u64 cpu_mask;
-};
-
struct hv_vpset {
u64 format;
u64 valid_bank_mask;
u64 bank_contents[];
};
-struct ipi_arg_ex {
+/* HvCallSendSyntheticClusterIpi hypercall */
+struct hv_send_ipi {
+ u32 vector;
+ u32 reserved;
+ u64 cpu_mask;
+};
+
+/* HvCallSendSyntheticClusterIpiEx hypercall */
+struct hv_send_ipi_ex {
u32 vector;
u32 reserved;
struct hv_vpset vp_set;
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 7ed08a7c3398..0dd6b0f4000e 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -8,9 +8,6 @@
* The "_X" parts are generally the EP and EX Xeons, or the
* "Extreme" ones, like Broadwell-E.
*
- * Things ending in "2" are usually because we have no better
- * name for them. There's no processor called "SILVERMONT2".
- *
* While adding a new CPUID for a new microarchitecture, add a new
* group to keep logically sorted out in chronological order. Within
* that group keep the CPUID for the variants sorted by model number.
@@ -57,19 +54,23 @@
/* "Small Core" Processors (Atom) */
-#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
-#define INTEL_FAM6_ATOM_LINCROFT 0x26
-#define INTEL_FAM6_ATOM_PENWELL 0x27
-#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
-#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
-#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
-#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
-#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
-#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
-#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */
-#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
-#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
-#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A
+#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
+#define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */
+
+#define INTEL_FAM6_ATOM_SALTWELL 0x36 /* Cedarview */
+#define INTEL_FAM6_ATOM_SALTWELL_MID 0x27 /* Penwell */
+#define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */
+
+#define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */
+#define INTEL_FAM6_ATOM_SILVERMONT_X 0x4D /* Avaton, Rangely */
+#define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */
+
+#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */
+#define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */
+
+#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
+#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
+#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
/* Xeon Phi */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 6de64840dd22..832da8229cc7 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -187,11 +187,12 @@ extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size)
#define ioremap_nocache ioremap_nocache
extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
#define ioremap_uc ioremap_uc
-
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
#define ioremap_cache ioremap_cache
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
#define ioremap_prot ioremap_prot
+extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size);
+#define ioremap_encrypted ioremap_encrypted
/**
* ioremap - map bus memory into CPU space
@@ -369,18 +370,6 @@ extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
extern bool is_early_ioremap_ptep(pte_t *ptep);
-#ifdef CONFIG_XEN
-#include <xen/xen.h>
-struct bio_vec;
-
-extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2);
-
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
- (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
-#endif /* CONFIG_XEN */
-
#define IO_SPACE_LIMIT 0xffff
#include <asm-generic/io.h>
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 15450a675031..058e40fed167 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -64,7 +64,7 @@ static inline __cpuidle void native_halt(void)
#endif
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
#ifndef __ASSEMBLY__
@@ -123,6 +123,10 @@ static inline notrace unsigned long arch_local_irq_save(void)
#define DISABLE_INTERRUPTS(x) cli
#ifdef CONFIG_X86_64
+#ifdef CONFIG_DEBUG_ENTRY
+#define SAVE_FLAGS(x) pushfq; popq %rax
+#endif
+
#define SWAPGS swapgs
/*
* Currently paravirt can't handle swapgs nicely when we
@@ -135,8 +139,6 @@ static inline notrace unsigned long arch_local_irq_save(void)
*/
#define SWAPGS_UNSAFE_STACK swapgs
-#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
-
#define INTERRUPT_RETURN jmp native_iret
#define USERGS_SYSRET64 \
swapgs; \
@@ -145,18 +147,12 @@ static inline notrace unsigned long arch_local_irq_save(void)
swapgs; \
sysretl
-#ifdef CONFIG_DEBUG_ENTRY
-#define SAVE_FLAGS(x) pushfq; popq %rax
-#endif
#else
#define INTERRUPT_RETURN iret
-#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
-#define GET_CR0_INTO_EAX movl %cr0, %eax
#endif
-
#endif /* __ASSEMBLY__ */
-#endif /* CONFIG_PARAVIRT */
+#endif /* CONFIG_PARAVIRT_XXL */
#ifndef __ASSEMBLY__
static inline int arch_irqs_disabled_flags(unsigned long flags)
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 8c0de4282659..a5fb34fe56a4 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -2,19 +2,6 @@
#ifndef _ASM_X86_JUMP_LABEL_H
#define _ASM_X86_JUMP_LABEL_H
-#ifndef HAVE_JUMP_LABEL
-/*
- * For better or for worse, if jump labels (the gcc extension) are missing,
- * then the entire static branch patching infrastructure is compiled out.
- * If that happens, the code in here will malfunction. Raise a compiler
- * error instead.
- *
- * In theory, jump labels and the static branch patching infrastructure
- * could be decoupled to fix this.
- */
-#error asm/jump_label.h included on a non-jump-label kernel
-#endif
-
#define JUMP_LABEL_NOP_SIZE 5
#ifdef CONFIG_X86_64
@@ -33,14 +20,9 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:"
- ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
- ".pushsection __jump_table, \"aw\" \n\t"
- _ASM_ALIGN "\n\t"
- _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
- ".popsection \n\t"
- : : "i" (key), "i" (branch) : : l_yes);
-
+ asm_volatile_goto("STATIC_BRANCH_NOP l_yes=\"%l[l_yes]\" key=\"%c0\" "
+ "branch=\"%c1\""
+ : : "i" (key), "i" (branch) : : l_yes);
return false;
l_yes:
return true;
@@ -48,13 +30,8 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:"
- ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
- "2:\n\t"
- ".pushsection __jump_table, \"aw\" \n\t"
- _ASM_ALIGN "\n\t"
- _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
- ".popsection \n\t"
+ asm_volatile_goto("STATIC_BRANCH_JMP l_yes=\"%l[l_yes]\" key=\"%c0\" "
+ "branch=\"%c1\""
: : "i" (key), "i" (branch) : : l_yes);
return false;
@@ -62,49 +39,28 @@ l_yes:
return true;
}
-#ifdef CONFIG_X86_64
-typedef u64 jump_label_t;
-#else
-typedef u32 jump_label_t;
-#endif
-
-struct jump_entry {
- jump_label_t code;
- jump_label_t target;
- jump_label_t key;
-};
-
#else /* __ASSEMBLY__ */
-.macro STATIC_JUMP_IF_TRUE target, key, def
-.Lstatic_jump_\@:
- .if \def
- /* Equivalent to "jmp.d32 \target" */
- .byte 0xe9
- .long \target - .Lstatic_jump_after_\@
-.Lstatic_jump_after_\@:
- .else
- .byte STATIC_KEY_INIT_NOP
- .endif
+.macro STATIC_BRANCH_NOP l_yes:req key:req branch:req
+.Lstatic_branch_nop_\@:
+ .byte STATIC_KEY_INIT_NOP
+.Lstatic_branch_no_after_\@:
.pushsection __jump_table, "aw"
_ASM_ALIGN
- _ASM_PTR .Lstatic_jump_\@, \target, \key
+ .long .Lstatic_branch_nop_\@ - ., \l_yes - .
+ _ASM_PTR \key + \branch - .
.popsection
.endm
-.macro STATIC_JUMP_IF_FALSE target, key, def
-.Lstatic_jump_\@:
- .if \def
- .byte STATIC_KEY_INIT_NOP
- .else
- /* Equivalent to "jmp.d32 \target" */
- .byte 0xe9
- .long \target - .Lstatic_jump_after_\@
-.Lstatic_jump_after_\@:
- .endif
+.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req
+.Lstatic_branch_jmp_\@:
+ .byte 0xe9
+ .long \l_yes - .Lstatic_branch_jmp_after_\@
+.Lstatic_branch_jmp_after_\@:
.pushsection __jump_table, "aw"
_ASM_ALIGN
- _ASM_PTR .Lstatic_jump_\@, \target, \key + 1
+ .long .Lstatic_branch_jmp_\@ - ., \l_yes - .
+ _ASM_PTR \key + \branch - .
.popsection
.endm
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index f327236f0fa7..5125fca472bb 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -67,7 +67,7 @@ struct kimage;
/* Memory to backup during crash kdump */
#define KEXEC_BACKUP_SRC_START (0UL)
-#define KEXEC_BACKUP_SRC_END (640 * 1024UL) /* 640K */
+#define KEXEC_BACKUP_SRC_END (640 * 1024UL - 1) /* 640K */
/*
* CPU does not save ss and sp on stack if execution is already
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 0f82cd91cd3c..93c4bf598fb0 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -364,6 +364,10 @@ struct x86_emulate_ctxt {
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
+#define X86EMUL_CPUID_VENDOR_HygonGenuine_ebx 0x6f677948
+#define X86EMUL_CPUID_VENDOR_HygonGenuine_ecx 0x656e6975
+#define X86EMUL_CPUID_VENDOR_HygonGenuine_edx 0x6e65476e
+
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8e90488c3d56..55e51ff7e421 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -102,7 +102,15 @@
#define UNMAPPED_GVA (~(gpa_t)0)
/* KVM Hugepage definitions for x86 */
-#define KVM_NR_PAGE_SIZES 3
+enum {
+ PT_PAGE_TABLE_LEVEL = 1,
+ PT_DIRECTORY_LEVEL = 2,
+ PT_PDPE_LEVEL = 3,
+ /* set max level to the biggest one */
+ PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL,
+};
+#define KVM_NR_PAGE_SIZES (PT_MAX_HUGEPAGE_LEVEL - \
+ PT_PAGE_TABLE_LEVEL + 1)
#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
@@ -177,6 +185,7 @@ enum {
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
+#define DR6_BT (1 << 15)
#define DR6_RTM (1 << 16)
#define DR6_FIXED_1 0xfffe0ff0
#define DR6_INIT 0xffff0ff0
@@ -247,7 +256,7 @@ struct kvm_mmu_memory_cache {
* @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
*/
union kvm_mmu_page_role {
- unsigned word;
+ u32 word;
struct {
unsigned level:4;
unsigned cr4_pae:1;
@@ -273,6 +282,34 @@ union kvm_mmu_page_role {
};
};
+union kvm_mmu_extended_role {
+/*
+ * This structure complements kvm_mmu_page_role caching everything needed for
+ * MMU configuration. If nothing in both these structures changed, MMU
+ * re-configuration can be skipped. @valid bit is set on first usage so we don't
+ * treat all-zero structure as valid data.
+ */
+ u32 word;
+ struct {
+ unsigned int valid:1;
+ unsigned int execonly:1;
+ unsigned int cr0_pg:1;
+ unsigned int cr4_pse:1;
+ unsigned int cr4_pke:1;
+ unsigned int cr4_smap:1;
+ unsigned int cr4_smep:1;
+ unsigned int cr4_la57:1;
+ };
+};
+
+union kvm_mmu_role {
+ u64 as_u64;
+ struct {
+ union kvm_mmu_page_role base;
+ union kvm_mmu_extended_role ext;
+ };
+};
+
struct kvm_rmap_head {
unsigned long val;
};
@@ -280,18 +317,18 @@ struct kvm_rmap_head {
struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
+ bool unsync;
/*
* The following two entries are used to key the shadow page in the
* hash table.
*/
- gfn_t gfn;
union kvm_mmu_page_role role;
+ gfn_t gfn;
u64 *spt;
/* hold the gfn of each spte inside spt */
gfn_t *gfns;
- bool unsync;
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
@@ -360,7 +397,7 @@ struct kvm_mmu {
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte);
hpa_t root_hpa;
- union kvm_mmu_page_role base_role;
+ union kvm_mmu_role mmu_role;
u8 root_level;
u8 shadow_root_level;
u8 ept_ad;
@@ -490,7 +527,7 @@ struct kvm_vcpu_hv {
struct kvm_hyperv_exit exit;
struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
- cpumask_t tlb_lush;
+ cpumask_t tlb_flush;
};
struct kvm_vcpu_arch {
@@ -534,7 +571,13 @@ struct kvm_vcpu_arch {
* the paging mode of the l1 guest. This context is always used to
* handle faults.
*/
- struct kvm_mmu mmu;
+ struct kvm_mmu *mmu;
+
+ /* Non-nested MMU for L1 */
+ struct kvm_mmu root_mmu;
+
+ /* L1 MMU when running nested */
+ struct kvm_mmu guest_mmu;
/*
* Paging state of an L2 guest (used for nested npt)
@@ -585,6 +628,8 @@ struct kvm_vcpu_arch {
bool has_error_code;
u8 nr;
u32 error_code;
+ unsigned long payload;
+ bool has_payload;
u8 nested_apf;
} exception;
@@ -781,6 +826,9 @@ struct kvm_hv {
u64 hv_reenlightenment_control;
u64 hv_tsc_emulation_control;
u64 hv_tsc_emulation_status;
+
+ /* How many vCPUs have VP index != vCPU index */
+ atomic_t num_mismatched_vp_indexes;
};
enum kvm_irqchip_mode {
@@ -869,6 +917,9 @@ struct kvm_arch {
bool x2apic_format;
bool x2apic_broadcast_quirk_disabled;
+
+ bool guest_can_read_msr_platform_info;
+ bool exception_payload_enabled;
};
struct kvm_vm_stat {
@@ -1022,6 +1073,7 @@ struct kvm_x86_ops {
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
+ bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
@@ -1055,6 +1107,7 @@ struct kvm_x86_ops {
bool (*umip_emulated)(void);
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
+ void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
@@ -1129,6 +1182,9 @@ struct kvm_x86_ops {
int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*get_msr_feature)(struct kvm_msr_entry *entry);
+
+ int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
+ uint16_t *vmcs_version);
};
struct kvm_arch_async_pf {
@@ -1166,7 +1222,6 @@ void kvm_mmu_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
-void kvm_mmu_setup(struct kvm_vcpu *vcpu);
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
@@ -1320,7 +1375,8 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
-void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free);
+void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ ulong roots_to_free);
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
@@ -1482,6 +1538,7 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
int kvm_is_in_guest(void);
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index c91083c59845..349a47acaa4a 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -53,7 +53,7 @@ static inline void local_sub(long i, local_t *l)
*/
static inline bool local_sub_and_test(long i, local_t *l)
{
- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e);
+ return GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, e, "er", i);
}
/**
@@ -66,7 +66,7 @@ static inline bool local_sub_and_test(long i, local_t *l)
*/
static inline bool local_dec_and_test(local_t *l)
{
- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e);
+ return GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, e);
}
/**
@@ -79,7 +79,7 @@ static inline bool local_dec_and_test(local_t *l)
*/
static inline bool local_inc_and_test(local_t *l)
{
- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e);
+ return GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, e);
}
/**
@@ -93,7 +93,7 @@ static inline bool local_inc_and_test(local_t *l)
*/
static inline bool local_add_negative(long i, local_t *l)
{
- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s);
+ return GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, s, "er", i);
}
/**
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 3a17107594c8..4da9b1c58d28 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -10,41 +10,44 @@
/* MCG_CAP register defines */
#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
-#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
-#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
-#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
+#define MCG_CTL_P BIT_ULL(8) /* MCG_CTL register available */
+#define MCG_EXT_P BIT_ULL(9) /* Extended registers available */
+#define MCG_CMCI_P BIT_ULL(10) /* CMCI supported */
#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
#define MCG_EXT_CNT_SHIFT 16
#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
-#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
-#define MCG_ELOG_P (1ULL<<26) /* Extended error log supported */
-#define MCG_LMCE_P (1ULL<<27) /* Local machine check supported */
+#define MCG_SER_P BIT_ULL(24) /* MCA recovery/new status bits */
+#define MCG_ELOG_P BIT_ULL(26) /* Extended error log supported */
+#define MCG_LMCE_P BIT_ULL(27) /* Local machine check supported */
/* MCG_STATUS register defines */
-#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
-#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
-#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
-#define MCG_STATUS_LMCES (1ULL<<3) /* LMCE signaled */
+#define MCG_STATUS_RIPV BIT_ULL(0) /* restart ip valid */
+#define MCG_STATUS_EIPV BIT_ULL(1) /* ip points to correct instruction */
+#define MCG_STATUS_MCIP BIT_ULL(2) /* machine check in progress */
+#define MCG_STATUS_LMCES BIT_ULL(3) /* LMCE signaled */
/* MCG_EXT_CTL register defines */
-#define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Enable LMCE */
+#define MCG_EXT_CTL_LMCE_EN BIT_ULL(0) /* Enable LMCE */
/* MCi_STATUS register defines */
-#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
-#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
-#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
-#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
-#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
-#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
-#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
-#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
-#define MCI_STATUS_AR (1ULL<<55) /* Action required */
+#define MCI_STATUS_VAL BIT_ULL(63) /* valid error */
+#define MCI_STATUS_OVER BIT_ULL(62) /* previous errors lost */
+#define MCI_STATUS_UC BIT_ULL(61) /* uncorrected error */
+#define MCI_STATUS_EN BIT_ULL(60) /* error enabled */
+#define MCI_STATUS_MISCV BIT_ULL(59) /* misc error reg. valid */
+#define MCI_STATUS_ADDRV BIT_ULL(58) /* addr reg. valid */
+#define MCI_STATUS_PCC BIT_ULL(57) /* processor context corrupt */
+#define MCI_STATUS_S BIT_ULL(56) /* Signaled machine check */
+#define MCI_STATUS_AR BIT_ULL(55) /* Action required */
+#define MCI_STATUS_CEC_SHIFT 38 /* Corrected Error Count */
+#define MCI_STATUS_CEC_MASK GENMASK_ULL(52,38)
+#define MCI_STATUS_CEC(c) (((c) & MCI_STATUS_CEC_MASK) >> MCI_STATUS_CEC_SHIFT)
/* AMD-specific bits */
-#define MCI_STATUS_TCC (1ULL<<55) /* Task context corrupt */
-#define MCI_STATUS_SYNDV (1ULL<<53) /* synd reg. valid */
-#define MCI_STATUS_DEFERRED (1ULL<<44) /* uncorrected error, deferred exception */
-#define MCI_STATUS_POISON (1ULL<<43) /* access poisonous data */
+#define MCI_STATUS_TCC BIT_ULL(55) /* Task context corrupt */
+#define MCI_STATUS_SYNDV BIT_ULL(53) /* synd reg. valid */
+#define MCI_STATUS_DEFERRED BIT_ULL(44) /* uncorrected error, deferred exception */
+#define MCI_STATUS_POISON BIT_ULL(43) /* access poisonous data */
/*
* McaX field if set indicates a given bank supports MCA extensions:
@@ -84,7 +87,7 @@
#define MCI_MISC_ADDR_GENERIC 7 /* generic */
/* CTL2 register defines */
-#define MCI_CTL2_CMCI_EN (1ULL << 30)
+#define MCI_CTL2_CMCI_EN BIT_ULL(30)
#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
#define MCJ_CTX_MASK 3
@@ -214,6 +217,8 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
#endif
+static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
+
int mce_available(struct cpuinfo_x86 *c);
bool mce_is_memory_error(struct mce *m);
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index c0643831706e..616f8e637bc3 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -48,10 +48,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void);
+void __init mem_encrypt_free_decrypted_mem(void);
bool sme_active(void);
bool sev_active(void);
+#define __bss_decrypted __attribute__((__section__(".bss..decrypted")))
+
#else /* !CONFIG_AMD_MEM_ENCRYPT */
#define sme_me_mask 0ULL
@@ -77,6 +80,8 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
static inline int __init
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
+#define __bss_decrypted
+
#endif /* CONFIG_AMD_MEM_ENCRYPT */
/*
@@ -88,6 +93,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
#define __sme_pa(x) (__pa(x) | sme_me_mask)
#define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask)
+extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
+
#endif /* __ASSEMBLY__ */
#endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index eeeb9289c764..0ca50611e8ce 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -16,12 +16,12 @@
extern atomic64_t last_mm_ctx_id;
-#ifndef CONFIG_PARAVIRT
+#ifndef CONFIG_PARAVIRT_XXL
static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
}
-#endif /* !CONFIG_PARAVIRT */
+#endif /* !CONFIG_PARAVIRT_XXL */
#ifdef CONFIG_PERF_EVENTS
diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h
index 61eb4b63c5ec..d0b1434fb0b6 100644
--- a/arch/x86/include/asm/mpx.h
+++ b/arch/x86/include/asm/mpx.h
@@ -57,8 +57,14 @@
#define MPX_BNDCFG_ADDR_MASK (~((1UL<<MPX_BNDCFG_TAIL)-1))
#define MPX_BNDSTA_ERROR_CODE 0x3
+struct mpx_fault_info {
+ void __user *addr;
+ void __user *lower;
+ void __user *upper;
+};
+
#ifdef CONFIG_X86_INTEL_MPX
-siginfo_t *mpx_generate_siginfo(struct pt_regs *regs);
+int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
int mpx_handle_bd_fault(void);
static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
{
@@ -78,9 +84,9 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
unsigned long flags);
#else
-static inline siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
+static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
{
- return NULL;
+ return -EINVAL;
}
static inline int mpx_handle_bd_fault(void)
{
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index f37704497d8f..0d6271cce198 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -351,6 +351,8 @@ int hyperv_flush_guest_mapping(u64 as);
#ifdef CONFIG_X86_64
void hv_apic_init(void);
+void __init hv_init_spinlocks(void);
+bool hv_vcpu_is_preempted(int vcpu);
#else
static inline void hv_apic_init(void) {}
#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4731f0cf97c5..80f4a4f38c79 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -164,6 +164,7 @@
#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
+#define DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI (1UL << 12)
#define DEBUGCTLMSR_FREEZE_IN_SMM_BIT 14
#define DEBUGCTLMSR_FREEZE_IN_SMM (1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT)
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 04addd6e0a4a..91e4cf189914 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -242,7 +242,7 @@ static inline unsigned long long native_read_pmc(int counter)
return EAX_EDX_VAL(val, low, high);
}
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
#include <linux/errno.h>
@@ -305,7 +305,7 @@ do { \
#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
-#endif /* !CONFIG_PARAVIRT */
+#endif /* !CONFIG_PARAVIRT_XXL */
/*
* 64-bit version of wrmsr_safe():
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index fd2a8c1b88bc..80dc14422495 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -170,11 +170,15 @@
*/
# define CALL_NOSPEC \
ANNOTATE_NOSPEC_ALTERNATIVE \
- ALTERNATIVE( \
+ ALTERNATIVE_2( \
ANNOTATE_RETPOLINE_SAFE \
"call *%[thunk_target]\n", \
"call __x86_indirect_thunk_%V[thunk_target]\n", \
- X86_FEATURE_RETPOLINE)
+ X86_FEATURE_RETPOLINE, \
+ "lfence;\n" \
+ ANNOTATE_RETPOLINE_SAFE \
+ "call *%[thunk_target]\n", \
+ X86_FEATURE_RETPOLINE_AMD)
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
@@ -184,7 +188,8 @@
* here, anyway.
*/
# define CALL_NOSPEC \
- ALTERNATIVE( \
+ ANNOTATE_NOSPEC_ALTERNATIVE \
+ ALTERNATIVE_2( \
ANNOTATE_RETPOLINE_SAFE \
"call *%[thunk_target]\n", \
" jmp 904f;\n" \
@@ -199,7 +204,11 @@
" ret;\n" \
" .align 16\n" \
"904: call 901b;\n", \
- X86_FEATURE_RETPOLINE)
+ X86_FEATURE_RETPOLINE, \
+ "lfence;\n" \
+ ANNOTATE_RETPOLINE_SAFE \
+ "call *%[thunk_target]\n", \
+ X86_FEATURE_RETPOLINE_AMD)
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
#else /* No retpoline for C / inline asm */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 6afac386a434..cd0cf1c568b4 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -59,13 +59,16 @@
#endif
/*
- * Kernel image size is limited to 1GiB due to the fixmap living in the
- * next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use
- * 512MiB by default, leaving 1.5GiB for modules once the page tables
- * are fully set up. If kernel ASLR is configured, it can extend the
- * kernel page table mapping, reducing the size of the modules area.
+ * Maximum kernel image size is limited to 1 GiB, due to the fixmap living
+ * in the next 1 GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S).
+ *
+ * On KASLR use 1 GiB by default, leaving 1 GiB for modules once the
+ * page tables are fully set up.
+ *
+ * If KASLR is disabled we can shrink it to 0.5 GiB and increase the size
+ * of the modules area to 1.5 GiB.
*/
-#if defined(CONFIG_RANDOMIZE_BASE)
+#ifdef CONFIG_RANDOMIZE_BASE
#define KERNEL_IMAGE_SIZE (1024 * 1024 * 1024)
#else
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index e375d4266b53..4bf42f9e4eea 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -17,16 +17,73 @@
#include <linux/cpumask.h>
#include <asm/frame.h>
+static inline unsigned long long paravirt_sched_clock(void)
+{
+ return PVOP_CALL0(unsigned long long, time.sched_clock);
+}
+
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return PVOP_CALL1(u64, time.steal_clock, cpu);
+}
+
+/* The paravirtualized I/O functions */
+static inline void slow_down_io(void)
+{
+ pv_ops.cpu.io_delay();
+#ifdef REALLY_SLOW_IO
+ pv_ops.cpu.io_delay();
+ pv_ops.cpu.io_delay();
+ pv_ops.cpu.io_delay();
+#endif
+}
+
+static inline void __flush_tlb(void)
+{
+ PVOP_VCALL0(mmu.flush_tlb_user);
+}
+
+static inline void __flush_tlb_global(void)
+{
+ PVOP_VCALL0(mmu.flush_tlb_kernel);
+}
+
+static inline void __flush_tlb_one_user(unsigned long addr)
+{
+ PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
+}
+
+static inline void flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
+{
+ PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
+}
+
+static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
+}
+
+static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
+{
+ PVOP_VCALL1(mmu.exit_mmap, mm);
+}
+
+#ifdef CONFIG_PARAVIRT_XXL
static inline void load_sp0(unsigned long sp0)
{
- PVOP_VCALL1(pv_cpu_ops.load_sp0, sp0);
+ PVOP_VCALL1(cpu.load_sp0, sp0);
}
/* The paravirtualized CPUID instruction. */
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
- PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
+ PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
}
/*
@@ -34,98 +91,98 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
*/
static inline unsigned long paravirt_get_debugreg(int reg)
{
- return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
+ return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
}
#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
static inline void set_debugreg(unsigned long val, int reg)
{
- PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
+ PVOP_VCALL2(cpu.set_debugreg, reg, val);
}
static inline unsigned long read_cr0(void)
{
- return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
+ return PVOP_CALL0(unsigned long, cpu.read_cr0);
}
static inline void write_cr0(unsigned long x)
{
- PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
+ PVOP_VCALL1(cpu.write_cr0, x);
}
static inline unsigned long read_cr2(void)
{
- return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
+ return PVOP_CALL0(unsigned long, mmu.read_cr2);
}
static inline void write_cr2(unsigned long x)
{
- PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
+ PVOP_VCALL1(mmu.write_cr2, x);
}
static inline unsigned long __read_cr3(void)
{
- return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
+ return PVOP_CALL0(unsigned long, mmu.read_cr3);
}
static inline void write_cr3(unsigned long x)
{
- PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
+ PVOP_VCALL1(mmu.write_cr3, x);
}
static inline void __write_cr4(unsigned long x)
{
- PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
+ PVOP_VCALL1(cpu.write_cr4, x);
}
#ifdef CONFIG_X86_64
static inline unsigned long read_cr8(void)
{
- return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
+ return PVOP_CALL0(unsigned long, cpu.read_cr8);
}
static inline void write_cr8(unsigned long x)
{
- PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
+ PVOP_VCALL1(cpu.write_cr8, x);
}
#endif
static inline void arch_safe_halt(void)
{
- PVOP_VCALL0(pv_irq_ops.safe_halt);
+ PVOP_VCALL0(irq.safe_halt);
}
static inline void halt(void)
{
- PVOP_VCALL0(pv_irq_ops.halt);
+ PVOP_VCALL0(irq.halt);
}
static inline void wbinvd(void)
{
- PVOP_VCALL0(pv_cpu_ops.wbinvd);
+ PVOP_VCALL0(cpu.wbinvd);
}
#define get_kernel_rpl() (pv_info.kernel_rpl)
static inline u64 paravirt_read_msr(unsigned msr)
{
- return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr);
+ return PVOP_CALL1(u64, cpu.read_msr, msr);
}
static inline void paravirt_write_msr(unsigned msr,
unsigned low, unsigned high)
{
- PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
+ PVOP_VCALL3(cpu.write_msr, msr, low, high);
}
static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
{
- return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
+ return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
}
static inline int paravirt_write_msr_safe(unsigned msr,
unsigned low, unsigned high)
{
- return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high);
+ return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
}
#define rdmsr(msr, val1, val2) \
@@ -170,23 +227,9 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
return err;
}
-static inline unsigned long long paravirt_sched_clock(void)
-{
- return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
-}
-
-struct static_key;
-extern struct static_key paravirt_steal_enabled;
-extern struct static_key paravirt_steal_rq_enabled;
-
-static inline u64 paravirt_steal_clock(int cpu)
-{
- return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
-}
-
static inline unsigned long long paravirt_read_pmc(int counter)
{
- return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
+ return PVOP_CALL1(u64, cpu.read_pmc, counter);
}
#define rdpmc(counter, low, high) \
@@ -200,166 +243,127 @@ do { \
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
- PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
+ PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
}
static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
{
- PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
+ PVOP_VCALL2(cpu.free_ldt, ldt, entries);
}
static inline void load_TR_desc(void)
{
- PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
+ PVOP_VCALL0(cpu.load_tr_desc);
}
static inline void load_gdt(const struct desc_ptr *dtr)
{
- PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
+ PVOP_VCALL1(cpu.load_gdt, dtr);
}
static inline void load_idt(const struct desc_ptr *dtr)
{
- PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
+ PVOP_VCALL1(cpu.load_idt, dtr);
}
static inline void set_ldt(const void *addr, unsigned entries)
{
- PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
+ PVOP_VCALL2(cpu.set_ldt, addr, entries);
}
static inline unsigned long paravirt_store_tr(void)
{
- return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
+ return PVOP_CALL0(unsigned long, cpu.store_tr);
}
+
#define store_tr(tr) ((tr) = paravirt_store_tr())
static inline void load_TLS(struct thread_struct *t, unsigned cpu)
{
- PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
+ PVOP_VCALL2(cpu.load_tls, t, cpu);
}
#ifdef CONFIG_X86_64
static inline void load_gs_index(unsigned int gs)
{
- PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
+ PVOP_VCALL1(cpu.load_gs_index, gs);
}
#endif
static inline void write_ldt_entry(struct desc_struct *dt, int entry,
const void *desc)
{
- PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
+ PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
}
static inline void write_gdt_entry(struct desc_struct *dt, int entry,
void *desc, int type)
{
- PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
+ PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
}
static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
{
- PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
+ PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
}
static inline void set_iopl_mask(unsigned mask)
{
- PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
-}
-
-/* The paravirtualized I/O functions */
-static inline void slow_down_io(void)
-{
- pv_cpu_ops.io_delay();
-#ifdef REALLY_SLOW_IO
- pv_cpu_ops.io_delay();
- pv_cpu_ops.io_delay();
- pv_cpu_ops.io_delay();
-#endif
+ PVOP_VCALL1(cpu.set_iopl_mask, mask);
}
static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
- PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
+ PVOP_VCALL2(mmu.activate_mm, prev, next);
}
static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
- PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
-}
-
-static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
-{
- PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
-}
-
-static inline void __flush_tlb(void)
-{
- PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
-}
-static inline void __flush_tlb_global(void)
-{
- PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
-}
-static inline void __flush_tlb_one_user(unsigned long addr)
-{
- PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr);
-}
-
-static inline void flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
-{
- PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info);
-}
-
-static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
- PVOP_VCALL2(pv_mmu_ops.tlb_remove_table, tlb, table);
+ PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
}
static inline int paravirt_pgd_alloc(struct mm_struct *mm)
{
- return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
+ return PVOP_CALL1(int, mmu.pgd_alloc, mm);
}
static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
+ PVOP_VCALL2(mmu.pgd_free, mm, pgd);
}
static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
{
- PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
+ PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
}
static inline void paravirt_release_pte(unsigned long pfn)
{
- PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
+ PVOP_VCALL1(mmu.release_pte, pfn);
}
static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
{
- PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
+ PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
}
static inline void paravirt_release_pmd(unsigned long pfn)
{
- PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
+ PVOP_VCALL1(mmu.release_pmd, pfn);
}
static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
{
- PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
+ PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
}
static inline void paravirt_release_pud(unsigned long pfn)
{
- PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
+ PVOP_VCALL1(mmu.release_pud, pfn);
}
static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
{
- PVOP_VCALL2(pv_mmu_ops.alloc_p4d, mm, pfn);
+ PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
}
static inline void paravirt_release_p4d(unsigned long pfn)
{
- PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn);
+ PVOP_VCALL1(mmu.release_p4d, pfn);
}
static inline pte_t __pte(pteval_t val)
@@ -367,13 +371,9 @@ static inline pte_t __pte(pteval_t val)
pteval_t ret;
if (sizeof(pteval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pteval_t,
- pv_mmu_ops.make_pte,
- val, (u64)val >> 32);
+ ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
else
- ret = PVOP_CALLEE1(pteval_t,
- pv_mmu_ops.make_pte,
- val);
+ ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
return (pte_t) { .pte = ret };
}
@@ -383,11 +383,10 @@ static inline pteval_t pte_val(pte_t pte)
pteval_t ret;
if (sizeof(pteval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
+ ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
pte.pte, (u64)pte.pte >> 32);
else
- ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
- pte.pte);
+ ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
return ret;
}
@@ -397,11 +396,9 @@ static inline pgd_t __pgd(pgdval_t val)
pgdval_t ret;
if (sizeof(pgdval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
- val, (u64)val >> 32);
+ ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
else
- ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
- val);
+ ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
return (pgd_t) { ret };
}
@@ -411,11 +408,10 @@ static inline pgdval_t pgd_val(pgd_t pgd)
pgdval_t ret;
if (sizeof(pgdval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
+ ret = PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
pgd.pgd, (u64)pgd.pgd >> 32);
else
- ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
- pgd.pgd);
+ ret = PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
return ret;
}
@@ -426,8 +422,7 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long a
{
pteval_t ret;
- ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
- mm, addr, ptep);
+ ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, mm, addr, ptep);
return (pte_t) { .pte = ret };
}
@@ -437,20 +432,18 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long a
{
if (sizeof(pteval_t) > sizeof(long))
/* 5 arg words */
- pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
+ pv_ops.mmu.ptep_modify_prot_commit(mm, addr, ptep, pte);
else
- PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
+ PVOP_VCALL4(mmu.ptep_modify_prot_commit,
mm, addr, ptep, pte.pte);
}
static inline void set_pte(pte_t *ptep, pte_t pte)
{
if (sizeof(pteval_t) > sizeof(long))
- PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
- pte.pte, (u64)pte.pte >> 32);
+ PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
else
- PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
- pte.pte);
+ PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
}
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
@@ -458,9 +451,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
{
if (sizeof(pteval_t) > sizeof(long))
/* 5 arg words */
- pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
+ pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
else
- PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
+ PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
}
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
@@ -468,9 +461,9 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
pmdval_t val = native_pmd_val(pmd);
if (sizeof(pmdval_t) > sizeof(long))
- PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
+ PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
else
- PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
+ PVOP_VCALL2(mmu.set_pmd, pmdp, val);
}
#if CONFIG_PGTABLE_LEVELS >= 3
@@ -479,11 +472,9 @@ static inline pmd_t __pmd(pmdval_t val)
pmdval_t ret;
if (sizeof(pmdval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
- val, (u64)val >> 32);
+ ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
else
- ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
- val);
+ ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
return (pmd_t) { ret };
}
@@ -493,11 +484,10 @@ static inline pmdval_t pmd_val(pmd_t pmd)
pmdval_t ret;
if (sizeof(pmdval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
+ ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
pmd.pmd, (u64)pmd.pmd >> 32);
else
- ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
- pmd.pmd);
+ ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
return ret;
}
@@ -507,39 +497,23 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
pudval_t val = native_pud_val(pud);
if (sizeof(pudval_t) > sizeof(long))
- PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
- val, (u64)val >> 32);
+ PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
else
- PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
- val);
+ PVOP_VCALL2(mmu.set_pud, pudp, val);
}
#if CONFIG_PGTABLE_LEVELS >= 4
static inline pud_t __pud(pudval_t val)
{
pudval_t ret;
- if (sizeof(pudval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
- val, (u64)val >> 32);
- else
- ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
- val);
+ ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
return (pud_t) { ret };
}
static inline pudval_t pud_val(pud_t pud)
{
- pudval_t ret;
-
- if (sizeof(pudval_t) > sizeof(long))
- ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
- pud.pud, (u64)pud.pud >> 32);
- else
- ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
- pud.pud);
-
- return ret;
+ return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
}
static inline void pud_clear(pud_t *pudp)
@@ -551,31 +525,26 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
{
p4dval_t val = native_p4d_val(p4d);
- if (sizeof(p4dval_t) > sizeof(long))
- PVOP_VCALL3(pv_mmu_ops.set_p4d, p4dp,
- val, (u64)val >> 32);
- else
- PVOP_VCALL2(pv_mmu_ops.set_p4d, p4dp,
- val);
+ PVOP_VCALL2(mmu.set_p4d, p4dp, val);
}
#if CONFIG_PGTABLE_LEVELS >= 5
static inline p4d_t __p4d(p4dval_t val)
{
- p4dval_t ret = PVOP_CALLEE1(p4dval_t, pv_mmu_ops.make_p4d, val);
+ p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
return (p4d_t) { ret };
}
static inline p4dval_t p4d_val(p4d_t p4d)
{
- return PVOP_CALLEE1(p4dval_t, pv_mmu_ops.p4d_val, p4d.p4d);
+ return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
}
static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
{
- PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgd));
+ PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
}
#define set_pgd(pgdp, pgdval) do { \
@@ -606,19 +575,18 @@ static inline void p4d_clear(p4d_t *p4dp)
64-bit pte atomically */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
{
- PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
- pte.pte, pte.pte >> 32);
+ PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
+ PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
}
static inline void pmd_clear(pmd_t *pmdp)
{
- PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
+ PVOP_VCALL1(mmu.pmd_clear, pmdp);
}
#else /* !CONFIG_X86_PAE */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
@@ -641,64 +609,68 @@ static inline void pmd_clear(pmd_t *pmdp)
#define __HAVE_ARCH_START_CONTEXT_SWITCH
static inline void arch_start_context_switch(struct task_struct *prev)
{
- PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
+ PVOP_VCALL1(cpu.start_context_switch, prev);
}
static inline void arch_end_context_switch(struct task_struct *next)
{
- PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
+ PVOP_VCALL1(cpu.end_context_switch, next);
}
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
{
- PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
+ PVOP_VCALL0(mmu.lazy_mode.enter);
}
static inline void arch_leave_lazy_mmu_mode(void)
{
- PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
+ PVOP_VCALL0(mmu.lazy_mode.leave);
}
static inline void arch_flush_lazy_mmu_mode(void)
{
- PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
+ PVOP_VCALL0(mmu.lazy_mode.flush);
}
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags)
{
- pv_mmu_ops.set_fixmap(idx, phys, flags);
+ pv_ops.mmu.set_fixmap(idx, phys, flags);
}
+#endif
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
u32 val)
{
- PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
+ PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
}
static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
{
- PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
+ PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
}
static __always_inline void pv_wait(u8 *ptr, u8 val)
{
- PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
+ PVOP_VCALL2(lock.wait, ptr, val);
}
static __always_inline void pv_kick(int cpu)
{
- PVOP_VCALL1(pv_lock_ops.kick, cpu);
+ PVOP_VCALL1(lock.kick, cpu);
}
static __always_inline bool pv_vcpu_is_preempted(long cpu)
{
- return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
+ return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
}
+void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
+bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
+
#endif /* SMP && PARAVIRT_SPINLOCKS */
#ifdef CONFIG_X86_32
@@ -778,24 +750,25 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
#define __PV_IS_CALLEE_SAVE(func) \
((struct paravirt_callee_save) { func })
+#ifdef CONFIG_PARAVIRT_XXL
static inline notrace unsigned long arch_local_save_flags(void)
{
- return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
+ return PVOP_CALLEE0(unsigned long, irq.save_fl);
}
static inline notrace void arch_local_irq_restore(unsigned long f)
{
- PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
+ PVOP_VCALLEE1(irq.restore_fl, f);
}
static inline notrace void arch_local_irq_disable(void)
{
- PVOP_VCALLEE0(pv_irq_ops.irq_disable);
+ PVOP_VCALLEE0(irq.irq_disable);
}
static inline notrace void arch_local_irq_enable(void)
{
- PVOP_VCALLEE0(pv_irq_ops.irq_enable);
+ PVOP_VCALLEE0(irq.irq_enable);
}
static inline notrace unsigned long arch_local_irq_save(void)
@@ -806,6 +779,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
arch_local_irq_disable();
return f;
}
+#endif
/* Make sure as little as possible of this mess escapes. */
@@ -827,7 +801,7 @@ extern void default_banner(void);
#else /* __ASSEMBLY__ */
-#define _PVSITE(ptype, clobbers, ops, word, algn) \
+#define _PVSITE(ptype, ops, word, algn) \
771:; \
ops; \
772:; \
@@ -836,7 +810,6 @@ extern void default_banner(void);
word 771b; \
.byte ptype; \
.byte 772b-771b; \
- .short clobbers; \
.popsection
@@ -868,8 +841,8 @@ extern void default_banner(void);
COND_POP(set, CLBR_RCX, rcx); \
COND_POP(set, CLBR_RAX, rax)
-#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
-#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
+#define PARA_PATCH(off) ((off) / 8)
+#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
#define PARA_INDIRECT(addr) *addr(%rip)
#else
#define PV_SAVE_REGS(set) \
@@ -883,46 +856,41 @@ extern void default_banner(void);
COND_POP(set, CLBR_EDI, edi); \
COND_POP(set, CLBR_EAX, eax)
-#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
-#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+#define PARA_PATCH(off) ((off) / 4)
+#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4)
#define PARA_INDIRECT(addr) *%cs:addr
#endif
+#ifdef CONFIG_PARAVIRT_XXL
#define INTERRUPT_RETURN \
- PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
- ANNOTATE_RETPOLINE_SAFE; \
- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);)
+ PARA_SITE(PARA_PATCH(PV_CPU_iret), \
+ ANNOTATE_RETPOLINE_SAFE; \
+ jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
#define DISABLE_INTERRUPTS(clobbers) \
- PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
+ PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
- ANNOTATE_RETPOLINE_SAFE; \
- call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
+ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#define ENABLE_INTERRUPTS(clobbers) \
- PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
+ PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
- ANNOTATE_RETPOLINE_SAFE; \
- call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
+ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+#endif
-#ifdef CONFIG_X86_32
-#define GET_CR0_INTO_EAX \
- push %ecx; push %edx; \
- ANNOTATE_RETPOLINE_SAFE; \
- call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
- pop %edx; pop %ecx
-#else /* !CONFIG_X86_32 */
-
+#ifdef CONFIG_X86_64
+#ifdef CONFIG_PARAVIRT_XXL
/*
* If swapgs is used while the userspace stack is still current,
* there's no way to call a pvop. The PV replacement *must* be
* inlined, or the swapgs instruction must be trapped and emulated.
*/
#define SWAPGS_UNSAFE_STACK \
- PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
- swapgs)
+ PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs)
/*
* Note: swapgs is very special, and in practise is either going to be
@@ -931,44 +899,51 @@ extern void default_banner(void);
* it.
*/
#define SWAPGS \
- PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
- ANNOTATE_RETPOLINE_SAFE; \
- call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
+ PARA_SITE(PARA_PATCH(PV_CPU_swapgs), \
+ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \
)
+#endif
#define GET_CR2_INTO_RAX \
ANNOTATE_RETPOLINE_SAFE; \
- call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2);
+ call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2);
+#ifdef CONFIG_PARAVIRT_XXL
#define USERGS_SYSRET64 \
- PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
- CLBR_NONE, \
- ANNOTATE_RETPOLINE_SAFE; \
- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);)
+ PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
+ ANNOTATE_RETPOLINE_SAFE; \
+ jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
#ifdef CONFIG_DEBUG_ENTRY
#define SAVE_FLAGS(clobbers) \
- PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
+ PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
- ANNOTATE_RETPOLINE_SAFE; \
- call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \
+ ANNOTATE_RETPOLINE_SAFE; \
+ call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#endif
+#endif
#endif /* CONFIG_X86_32 */
#endif /* __ASSEMBLY__ */
#else /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop
+#endif /* !CONFIG_PARAVIRT */
+
#ifndef __ASSEMBLY__
+#ifndef CONFIG_PARAVIRT_XXL
static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
}
+#endif
+#ifndef CONFIG_PARAVIRT
static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
{
}
+#endif
#endif /* __ASSEMBLY__ */
-#endif /* !CONFIG_PARAVIRT */
#endif /* _ASM_X86_PARAVIRT_H */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 4b75acc23b30..fba54ca23b2a 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -66,12 +66,14 @@ struct paravirt_callee_save {
/* general info */
struct pv_info {
+#ifdef CONFIG_PARAVIRT_XXL
unsigned int kernel_rpl;
int shared_kernel_pmd;
#ifdef CONFIG_X86_64
u16 extra_user_64bit_cs; /* __USER_CS if none */
#endif
+#endif
const char *name;
};
@@ -85,17 +87,18 @@ struct pv_init_ops {
* the number of bytes of code generated, as we nop pad the
* rest in generic code.
*/
- unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
+ unsigned (*patch)(u8 type, void *insnbuf,
unsigned long addr, unsigned len);
} __no_randomize_layout;
-
+#ifdef CONFIG_PARAVIRT_XXL
struct pv_lazy_ops {
/* Set deferred update mode, used for batching operations. */
void (*enter)(void);
void (*leave)(void);
void (*flush)(void);
} __no_randomize_layout;
+#endif
struct pv_time_ops {
unsigned long long (*sched_clock)(void);
@@ -104,6 +107,9 @@ struct pv_time_ops {
struct pv_cpu_ops {
/* hooks for various privileged instructions */
+ void (*io_delay)(void);
+
+#ifdef CONFIG_PARAVIRT_XXL
unsigned long (*get_debugreg)(int regno);
void (*set_debugreg)(int regno, unsigned long value);
@@ -141,7 +147,6 @@ struct pv_cpu_ops {
void (*set_iopl_mask)(unsigned mask);
void (*wbinvd)(void);
- void (*io_delay)(void);
/* cpuid emulation, mostly so that caps bits can be disabled */
void (*cpuid)(unsigned int *eax, unsigned int *ebx,
@@ -176,9 +181,11 @@ struct pv_cpu_ops {
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
+#endif
} __no_randomize_layout;
struct pv_irq_ops {
+#ifdef CONFIG_PARAVIRT_XXL
/*
* Get/set interrupt state. save_fl and restore_fl are only
* expected to use X86_EFLAGS_IF; all other bits
@@ -195,35 +202,34 @@ struct pv_irq_ops {
void (*safe_halt)(void);
void (*halt)(void);
-
+#endif
} __no_randomize_layout;
struct pv_mmu_ops {
+ /* TLB operations */
+ void (*flush_tlb_user)(void);
+ void (*flush_tlb_kernel)(void);
+ void (*flush_tlb_one_user)(unsigned long addr);
+ void (*flush_tlb_others)(const struct cpumask *cpus,
+ const struct flush_tlb_info *info);
+
+ void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
+
+ /* Hook for intercepting the destruction of an mm_struct. */
+ void (*exit_mmap)(struct mm_struct *mm);
+
+#ifdef CONFIG_PARAVIRT_XXL
unsigned long (*read_cr2)(void);
void (*write_cr2)(unsigned long);
unsigned long (*read_cr3)(void);
void (*write_cr3)(unsigned long);
- /*
- * Hooks for intercepting the creation/use/destruction of an
- * mm_struct.
- */
+ /* Hooks for intercepting the creation/use of an mm_struct. */
void (*activate_mm)(struct mm_struct *prev,
struct mm_struct *next);
void (*dup_mmap)(struct mm_struct *oldmm,
struct mm_struct *mm);
- void (*exit_mmap)(struct mm_struct *mm);
-
-
- /* TLB operations */
- void (*flush_tlb_user)(void);
- void (*flush_tlb_kernel)(void);
- void (*flush_tlb_one_user)(unsigned long addr);
- void (*flush_tlb_others)(const struct cpumask *cpus,
- const struct flush_tlb_info *info);
-
- void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
/* Hooks for allocating and freeing a pagetable top-level */
int (*pgd_alloc)(struct mm_struct *mm);
@@ -298,6 +304,7 @@ struct pv_mmu_ops {
an mfn. We can tell which is which from the index. */
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
+#endif
} __no_randomize_layout;
struct arch_spinlock;
@@ -321,48 +328,31 @@ struct pv_lock_ops {
* number for each function using the offset which we use to indicate
* what to patch. */
struct paravirt_patch_template {
- struct pv_init_ops pv_init_ops;
- struct pv_time_ops pv_time_ops;
- struct pv_cpu_ops pv_cpu_ops;
- struct pv_irq_ops pv_irq_ops;
- struct pv_mmu_ops pv_mmu_ops;
- struct pv_lock_ops pv_lock_ops;
+ struct pv_init_ops init;
+ struct pv_time_ops time;
+ struct pv_cpu_ops cpu;
+ struct pv_irq_ops irq;
+ struct pv_mmu_ops mmu;
+ struct pv_lock_ops lock;
} __no_randomize_layout;
extern struct pv_info pv_info;
-extern struct pv_init_ops pv_init_ops;
-extern struct pv_time_ops pv_time_ops;
-extern struct pv_cpu_ops pv_cpu_ops;
-extern struct pv_irq_ops pv_irq_ops;
-extern struct pv_mmu_ops pv_mmu_ops;
-extern struct pv_lock_ops pv_lock_ops;
+extern struct paravirt_patch_template pv_ops;
#define PARAVIRT_PATCH(x) \
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
#define paravirt_type(op) \
[paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
- [paravirt_opptr] "i" (&(op))
+ [paravirt_opptr] "i" (&(pv_ops.op))
#define paravirt_clobber(clobber) \
[paravirt_clobber] "i" (clobber)
-/*
- * Generate some code, and mark it as patchable by the
- * apply_paravirt() alternate instruction patcher.
- */
-#define _paravirt_alt(insn_string, type, clobber) \
- "771:\n\t" insn_string "\n" "772:\n" \
- ".pushsection .parainstructions,\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR " 771b\n" \
- " .byte " type "\n" \
- " .byte 772b-771b\n" \
- " .short " clobber "\n" \
- ".popsection\n"
-
/* Generate patchable code, with the default asm parameters. */
-#define paravirt_alt(insn_string) \
- _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
+#define paravirt_call \
+ "PARAVIRT_CALL type=\"%c[paravirt_typenum]\"" \
+ " clobber=\"%c[paravirt_clobber]\"" \
+ " pv_opptr=\"%c[paravirt_opptr]\";"
/* Simple instruction patching code. */
#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
@@ -373,34 +363,17 @@ extern struct pv_lock_ops pv_lock_ops;
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
-unsigned paravirt_patch_call(void *insnbuf,
- const void *target, u16 tgt_clobbers,
- unsigned long addr, u16 site_clobbers,
- unsigned len);
-unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
- unsigned long addr, unsigned len);
-unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+unsigned paravirt_patch_default(u8 type, void *insnbuf,
unsigned long addr, unsigned len);
unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
const char *start, const char *end);
-unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
- unsigned long addr, unsigned len);
+unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len);
int paravirt_disable_iospace(void);
/*
- * This generates an indirect call based on the operation type number.
- * The type number, computed in PARAVIRT_PATCH, is derived from the
- * offset into the paravirt_patch_template structure, and can therefore be
- * freely converted back into a structure offset.
- */
-#define PARAVIRT_CALL \
- ANNOTATE_RETPOLINE_SAFE \
- "call *%c[paravirt_opptr];"
-
-/*
* These macros are intended to wrap calls through one of the paravirt
* ops structs, so that they can be later identified and patched at
* runtime.
@@ -510,9 +483,9 @@ int paravirt_disable_iospace(void);
#endif /* CONFIG_X86_32 */
#ifdef CONFIG_PARAVIRT_DEBUG
-#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
+#define PVOP_TEST_NULL(op) BUG_ON(pv_ops.op == NULL)
#else
-#define PVOP_TEST_NULL(op) ((void)op)
+#define PVOP_TEST_NULL(op) ((void)pv_ops.op)
#endif
#define PVOP_RETMASK(rettype) \
@@ -537,7 +510,7 @@ int paravirt_disable_iospace(void);
/* since this condition will never hold */ \
if (sizeof(rettype) > sizeof(unsigned long)) { \
asm volatile(pre \
- paravirt_alt(PARAVIRT_CALL) \
+ paravirt_call \
post \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
@@ -547,7 +520,7 @@ int paravirt_disable_iospace(void);
__ret = (rettype)((((u64)__edx) << 32) | __eax); \
} else { \
asm volatile(pre \
- paravirt_alt(PARAVIRT_CALL) \
+ paravirt_call \
post \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
@@ -574,7 +547,7 @@ int paravirt_disable_iospace(void);
PVOP_VCALL_ARGS; \
PVOP_TEST_NULL(op); \
asm volatile(pre \
- paravirt_alt(PARAVIRT_CALL) \
+ paravirt_call \
post \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
@@ -688,12 +661,31 @@ struct paravirt_patch_site {
u8 *instr; /* original instructions */
u8 instrtype; /* type of this instruction */
u8 len; /* length of original instruction */
- u16 clobbers; /* what registers you may clobber */
};
extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];
+#else /* __ASSEMBLY__ */
+
+/*
+ * This generates an indirect call based on the operation type number.
+ * The type number, computed in PARAVIRT_PATCH, is derived from the
+ * offset into the paravirt_patch_template structure, and can therefore be
+ * freely converted back into a structure offset.
+ */
+.macro PARAVIRT_CALL type:req clobber:req pv_opptr:req
+771: ANNOTATE_RETPOLINE_SAFE
+ call *\pv_opptr
+772: .pushsection .parainstructions,"a"
+ _ASM_ALIGN
+ _ASM_PTR 771b
+ .byte \type
+ .byte 772b-771b
+ .short \clobber
+ .popsection
+.endm
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PARAVIRT_TYPES_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index e9202a0de8f0..1a19d11cfbbd 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -185,22 +185,22 @@ do { \
typeof(var) pfo_ret__; \
switch (sizeof(var)) { \
case 1: \
- asm(op "b "__percpu_arg(1)",%0" \
+ asm volatile(op "b "__percpu_arg(1)",%0"\
: "=q" (pfo_ret__) \
: "m" (var)); \
break; \
case 2: \
- asm(op "w "__percpu_arg(1)",%0" \
+ asm volatile(op "w "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
case 4: \
- asm(op "l "__percpu_arg(1)",%0" \
+ asm volatile(op "l "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
case 8: \
- asm(op "q "__percpu_arg(1)",%0" \
+ asm volatile(op "q "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 12f54082f4c8..8bdf74902293 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -46,6 +46,14 @@
#define INTEL_ARCH_EVENT_MASK \
(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
+#define AMD64_L3_SLICE_SHIFT 48
+#define AMD64_L3_SLICE_MASK \
+ ((0xFULL) << AMD64_L3_SLICE_SHIFT)
+
+#define AMD64_L3_THREAD_SHIFT 56
+#define AMD64_L3_THREAD_MASK \
+ ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
+
#define X86_RAW_EVENT_MASK \
(ARCH_PERFMON_EVENTSEL_EVENT | \
ARCH_PERFMON_EVENTSEL_UMASK | \
@@ -270,6 +278,7 @@ struct perf_guest_switch_msr {
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
extern void perf_check_microcode(void);
+extern int x86_perf_rdpmc_index(struct perf_event *event);
#else
static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index fbd578daa66e..ec7f43327033 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -8,7 +8,7 @@
static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
#define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
index 858358a82b14..33845d36897c 100644
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -20,7 +20,7 @@ typedef union {
} pte_t;
#endif /* !__ASSEMBLY__ */
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
#define SHARED_KERNEL_PMD ((!static_cpu_has(X86_FEATURE_PTI) && \
(pv_info.shared_kernel_pmd)))
#else
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 690c0307afed..40616e805292 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -55,9 +55,9 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
extern pmdval_t early_pmd_flags;
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
-#else /* !CONFIG_PARAVIRT */
+#else /* !CONFIG_PARAVIRT_XXL */
#define set_pte(ptep, pte) native_set_pte(ptep, pte)
#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
@@ -112,8 +112,7 @@ extern pmdval_t early_pmd_flags;
#define __pte(x) native_make_pte(x)
#define arch_end_context_switch(prev) do {} while(0)
-
-#endif /* CONFIG_PARAVIRT */
+#endif /* CONFIG_PARAVIRT_XXL */
/*
* The following only work if pte_present() is true.
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index ce2b59047cb8..9c85b54bf03c 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -14,6 +14,7 @@
#include <asm/processor.h>
#include <linux/bitops.h>
#include <linux/threads.h>
+#include <asm/fixmap.h>
extern p4d_t level4_kernel_pgt[512];
extern p4d_t level4_ident_pgt[512];
@@ -22,7 +23,7 @@ extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
extern pmd_t level2_ident_pgt[512];
-extern pte_t level1_fixmap_pgt[512];
+extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
extern pgd_t init_top_pgt[];
#define swapper_pg_dir init_top_pgt
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index b64acb08a62b..106b7d0e2dae 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -124,7 +124,7 @@
*/
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SOFT_DIRTY)
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
/*
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 7f2dbd91fc74..90cb2f36c042 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -88,7 +88,7 @@ static __always_inline void __preempt_count_sub(int val)
*/
static __always_inline bool __preempt_count_dec_and_test(void)
{
- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
+ return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var]));
}
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index d53c54b842da..617805981cce 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -155,7 +155,8 @@ enum cpuid_regs_idx {
#define X86_VENDOR_CENTAUR 5
#define X86_VENDOR_TRANSMETA 7
#define X86_VENDOR_NSC 8
-#define X86_VENDOR_NUM 9
+#define X86_VENDOR_HYGON 9
+#define X86_VENDOR_NUM 10
#define X86_VENDOR_UNKNOWN 0xff
@@ -315,7 +316,13 @@ struct x86_hw_tss {
*/
u64 sp1;
+ /*
+ * Since Linux does not use ring 2, the 'sp2' slot is unused by
+ * hardware. entry_SYSCALL_64 uses it as scratch space to stash
+ * the user RSP value.
+ */
u64 sp2;
+
u64 reserved2;
u64 ist[7];
u32 reserved3;
@@ -578,7 +585,7 @@ static inline bool on_thread_stack(void)
current_stack_pointer) < THREAD_SIZE;
}
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
#define __cpuid native_cpuid
@@ -589,7 +596,7 @@ static inline void load_sp0(unsigned long sp0)
}
#define set_iopl_mask native_set_iopl_mask
-#endif /* CONFIG_PARAVIRT */
+#endif /* CONFIG_PARAVIRT_XXL */
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 6de1fd3d0097..143c99499531 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -37,8 +37,10 @@ struct pt_regs {
unsigned short __esh;
unsigned short fs;
unsigned short __fsh;
+ /* On interrupt, gs and __gsh store the vector number. */
unsigned short gs;
unsigned short __gsh;
+ /* On interrupt, this is the error code. */
unsigned long orig_ax;
unsigned long ip;
unsigned short cs;
@@ -144,7 +146,7 @@ static inline int v8086_mode(struct pt_regs *regs)
static inline bool user_64bit_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_64
-#ifndef CONFIG_PARAVIRT
+#ifndef CONFIG_PARAVIRT_XXL
/*
* On non-paravirt systems, this is the only long mode CPL 3
* selector. We do not allow long mode selectors in the LDT.
@@ -237,23 +239,51 @@ static inline int regs_within_kernel_stack(struct pt_regs *regs,
}
/**
+ * regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns the address of the @n th entry of the
+ * kernel stack which is specified by @regs. If the @n th entry is NOT in
+ * the kernel stack, this returns NULL.
+ */
+static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return addr;
+ else
+ return NULL;
+}
+
+/* To avoid include hell, we can't include uaccess.h */
+extern long probe_kernel_read(void *dst, const void *src, size_t size);
+
+/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
- * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack
* this returns 0.
*/
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n)
{
- unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
- addr += n;
- if (regs_within_kernel_stack(regs, (unsigned long)addr))
- return *addr;
- else
- return 0;
+ unsigned long *addr;
+ unsigned long val;
+ long ret;
+
+ addr = regs_get_kernel_stack_nth_addr(regs, n);
+ if (addr) {
+ ret = probe_kernel_read(&val, addr, sizeof(val));
+ if (!ret)
+ return val;
+ }
+ return 0;
}
#define arch_has_single_step() (1)
@@ -263,7 +293,7 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
#endif
-#define ARCH_HAS_USER_SINGLE_STEP_INFO
+#define ARCH_HAS_USER_SINGLE_STEP_REPORT
/*
* When hitting ptrace_stop(), we cannot return using SYSRET because
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 3e70bed8a978..87623c6b13db 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -6,9 +6,24 @@
#include <asm/cpufeature.h>
#include <asm-generic/qspinlock_types.h>
#include <asm/paravirt.h>
+#include <asm/rmwcc.h>
#define _Q_PENDING_LOOPS (1 << 9)
+#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
+static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
+{
+ u32 val = 0;
+
+ if (GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
+ "I", _Q_PENDING_OFFSET))
+ val |= _Q_PENDING_VAL;
+
+ val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
+
+ return val;
+}
+
#ifdef CONFIG_PARAVIRT_SPINLOCKS
extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
extern void __pv_init_lock_hash(void);
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
index 19b90521954c..a8b5e1e13319 100644
--- a/arch/x86/include/asm/refcount.h
+++ b/arch/x86/include/asm/refcount.h
@@ -4,6 +4,41 @@
* x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from
* PaX/grsecurity.
*/
+
+#ifdef __ASSEMBLY__
+
+#include <asm/asm.h>
+#include <asm/bug.h>
+
+.macro REFCOUNT_EXCEPTION counter:req
+ .pushsection .text..refcount
+111: lea \counter, %_ASM_CX
+112: ud2
+ ASM_UNREACHABLE
+ .popsection
+113: _ASM_EXTABLE_REFCOUNT(112b, 113b)
+.endm
+
+/* Trigger refcount exception if refcount result is negative. */
+.macro REFCOUNT_CHECK_LT_ZERO counter:req
+ js 111f
+ REFCOUNT_EXCEPTION counter="\counter"
+.endm
+
+/* Trigger refcount exception if refcount result is zero or negative. */
+.macro REFCOUNT_CHECK_LE_ZERO counter:req
+ jz 111f
+ REFCOUNT_CHECK_LT_ZERO counter="\counter"
+.endm
+
+/* Trigger refcount exception unconditionally. */
+.macro REFCOUNT_ERROR counter:req
+ jmp 111f
+ REFCOUNT_EXCEPTION counter="\counter"
+.endm
+
+#else /* __ASSEMBLY__ */
+
#include <linux/refcount.h>
#include <asm/bug.h>
@@ -15,34 +50,11 @@
* central refcount exception. The fixup address for the exception points
* back to the regular execution flow in .text.
*/
-#define _REFCOUNT_EXCEPTION \
- ".pushsection .text..refcount\n" \
- "111:\tlea %[counter], %%" _ASM_CX "\n" \
- "112:\t" ASM_UD2 "\n" \
- ASM_UNREACHABLE \
- ".popsection\n" \
- "113:\n" \
- _ASM_EXTABLE_REFCOUNT(112b, 113b)
-
-/* Trigger refcount exception if refcount result is negative. */
-#define REFCOUNT_CHECK_LT_ZERO \
- "js 111f\n\t" \
- _REFCOUNT_EXCEPTION
-
-/* Trigger refcount exception if refcount result is zero or negative. */
-#define REFCOUNT_CHECK_LE_ZERO \
- "jz 111f\n\t" \
- REFCOUNT_CHECK_LT_ZERO
-
-/* Trigger refcount exception unconditionally. */
-#define REFCOUNT_ERROR \
- "jmp 111f\n\t" \
- _REFCOUNT_EXCEPTION
static __always_inline void refcount_add(unsigned int i, refcount_t *r)
{
asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
- REFCOUNT_CHECK_LT_ZERO
+ "REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\""
: [counter] "+m" (r->refs.counter)
: "ir" (i)
: "cc", "cx");
@@ -51,7 +63,7 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r)
static __always_inline void refcount_inc(refcount_t *r)
{
asm volatile(LOCK_PREFIX "incl %0\n\t"
- REFCOUNT_CHECK_LT_ZERO
+ "REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\""
: [counter] "+m" (r->refs.counter)
: : "cc", "cx");
}
@@ -59,7 +71,7 @@ static __always_inline void refcount_inc(refcount_t *r)
static __always_inline void refcount_dec(refcount_t *r)
{
asm volatile(LOCK_PREFIX "decl %0\n\t"
- REFCOUNT_CHECK_LE_ZERO
+ "REFCOUNT_CHECK_LE_ZERO counter=\"%[counter]\""
: [counter] "+m" (r->refs.counter)
: : "cc", "cx");
}
@@ -67,14 +79,17 @@ static __always_inline void refcount_dec(refcount_t *r)
static __always_inline __must_check
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
- GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
- r->refs.counter, "er", i, "%0", e, "cx");
+
+ return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
+ "REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"",
+ r->refs.counter, e, "er", i, "cx");
}
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
- GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
- r->refs.counter, "%0", e, "cx");
+ return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
+ "REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"",
+ r->refs.counter, e, "cx");
}
static __always_inline __must_check
@@ -91,7 +106,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
/* Did we try to increment from/to an undesirable state? */
if (unlikely(c < 0 || c == INT_MAX || result < c)) {
- asm volatile(REFCOUNT_ERROR
+ asm volatile("REFCOUNT_ERROR counter=\"%[counter]\""
: : [counter] "m" (r->refs.counter)
: "cc", "cx");
break;
@@ -107,4 +122,6 @@ static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r)
return refcount_add_not_zero(1, r);
}
+#endif /* __ASSEMBLY__ */
+
#endif
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index 4914a3e7c803..46ac84b506f5 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -2,56 +2,69 @@
#ifndef _ASM_X86_RMWcc
#define _ASM_X86_RMWcc
+/* This counts to 12. Any more, it will return 13th argument. */
+#define __RMWcc_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
+#define RMWcc_ARGS(X...) __RMWcc_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define __RMWcc_CONCAT(a, b) a ## b
+#define RMWcc_CONCAT(a, b) __RMWcc_CONCAT(a, b)
+
#define __CLOBBERS_MEM(clb...) "memory", ## clb
#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
/* Use asm goto */
-#define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \
-do { \
+#define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \
+({ \
+ bool c = false; \
asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \
- : : [counter] "m" (var), ## __VA_ARGS__ \
+ : : [var] "m" (_var), ## __VA_ARGS__ \
: clobbers : cc_label); \
- return 0; \
-cc_label: \
- return 1; \
-} while (0)
-
-#define __BINARY_RMWcc_ARG " %1, "
-
+ if (0) { \
+cc_label: c = true; \
+ } \
+ c; \
+})
#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
/* Use flags output or a set instruction */
-#define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \
-do { \
+#define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \
+({ \
bool c; \
asm volatile (fullop CC_SET(cc) \
- : [counter] "+m" (var), CC_OUT(cc) (c) \
+ : [var] "+m" (_var), CC_OUT(cc) (c) \
: __VA_ARGS__ : clobbers); \
- return c; \
-} while (0)
-
-#define __BINARY_RMWcc_ARG " %2, "
+ c; \
+})
#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
+#define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \
__GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
-#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\
- __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \
- __CLOBBERS_MEM(clobbers))
+#define GEN_UNARY_RMWcc_3(op, var, cc) \
+ GEN_UNARY_RMWcc_4(op, var, cc, "%[var]")
-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
- __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \
- __CLOBBERS_MEM(), vcon (val))
+#define GEN_UNARY_RMWcc(X...) RMWcc_CONCAT(GEN_UNARY_RMWcc_, RMWcc_ARGS(X))(X)
+
+#define GEN_BINARY_RMWcc_6(op, var, cc, vcon, _val, arg0) \
+ __GEN_RMWcc(op " %[val], " arg0, var, cc, \
+ __CLOBBERS_MEM(), [val] vcon (_val))
+
+#define GEN_BINARY_RMWcc_5(op, var, cc, vcon, val) \
+ GEN_BINARY_RMWcc_6(op, var, cc, vcon, val, "%[var]")
+
+#define GEN_BINARY_RMWcc(X...) RMWcc_CONCAT(GEN_BINARY_RMWcc_, RMWcc_ARGS(X))(X)
+
+#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, cc, clobbers...) \
+ __GEN_RMWcc(op " %[var]\n\t" suffix, var, cc, \
+ __CLOBBERS_MEM(clobbers))
-#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc, \
- clobbers...) \
- __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \
- __CLOBBERS_MEM(clobbers), vcon (val))
+#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, cc, vcon, _val, clobbers...)\
+ __GEN_RMWcc(op " %[val], %[var]\n\t" suffix, var, cc, \
+ __CLOBBERS_MEM(clobbers), [val] vcon (_val))
#endif /* _ASM_X86_RMWcc */
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 4a911a382ade..8ea1cfdbeabc 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -11,7 +11,6 @@ extern char __end_rodata_aligned[];
#if defined(CONFIG_X86_64)
extern char __end_rodata_hpage_align[];
-extern char __entry_trampoline_start[], __entry_trampoline_end[];
#endif
#endif /* _ASM_X86_SECTIONS_H */
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index e293c122d0d5..ac3892920419 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -186,8 +186,7 @@
#define GDT_ENTRY_TLS_MIN 12
#define GDT_ENTRY_TLS_MAX 14
-/* Abused to load per CPU data from limit */
-#define GDT_ENTRY_PER_CPU 15
+#define GDT_ENTRY_CPUNODE 15
/*
* Number of entries in the GDT table:
@@ -207,11 +206,11 @@
#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
#define __USER32_DS __USER_DS
#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
-#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU*8 + 3)
+#define __CPUNODE_SEG (GDT_ENTRY_CPUNODE*8 + 3)
#endif
-#ifndef CONFIG_PARAVIRT
+#ifndef CONFIG_PARAVIRT_XXL
# define get_kernel_rpl() 0
#endif
@@ -225,6 +224,47 @@
#define GDT_ENTRY_TLS_ENTRIES 3
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
+#ifdef CONFIG_X86_64
+
+/* Bit size and mask of CPU number stored in the per CPU data (and TSC_AUX) */
+#define VDSO_CPUNODE_BITS 12
+#define VDSO_CPUNODE_MASK 0xfff
+
+#ifndef __ASSEMBLY__
+
+/* Helper functions to store/load CPU and node numbers */
+
+static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node)
+{
+ return (node << VDSO_CPUNODE_BITS) | cpu;
+}
+
+static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
+{
+ unsigned int p;
+
+ /*
+ * Load CPU and node number from the GDT. LSL is faster than RDTSCP
+ * and works on all CPUs. This is volatile so that it orders
+ * correctly with respect to barrier() and to keep GCC from cleverly
+ * hoisting it out of the calling function.
+ *
+ * If RDPID is available, use it.
+ */
+ alternative_io ("lsl %[seg],%[p]",
+ ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
+ X86_FEATURE_RDPID,
+ [p] "=a" (p), [seg] "r" (__CPUNODE_SEG));
+
+ if (cpu)
+ *cpu = (p & VDSO_CPUNODE_MASK);
+ if (node)
+ *node = (p >> VDSO_CPUNODE_BITS);
+}
+
+#endif /* !__ASSEMBLY__ */
+#endif /* CONFIG_X86_64 */
+
#ifdef __KERNEL__
/*
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 317fc59b512c..43c029cdc3fe 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -141,7 +141,7 @@ static inline unsigned long __read_cr4(void)
return native_read_cr4();
}
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
@@ -208,7 +208,7 @@ static inline void load_gs_index(unsigned selector)
#endif
-#endif/* CONFIG_PARAVIRT */
+#endif /* CONFIG_PARAVIRT_XXL */
static inline void clflush(volatile void *__p)
{
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index d33f92b9fa22..7ad41bfcc16c 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -149,7 +149,25 @@ memcpy_mcsafe(void *dst, const void *src, size_t cnt)
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
-void memcpy_flushcache(void *dst, const void *src, size_t cnt);
+void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
+static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
+{
+ if (__builtin_constant_p(cnt)) {
+ switch (cnt) {
+ case 4:
+ asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src));
+ return;
+ case 8:
+ asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
+ return;
+ case 16:
+ asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
+ asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8)));
+ return;
+ }
+ }
+ __memcpy_flushcache(dst, src, cnt);
+}
#endif
#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/suspend.h b/arch/x86/include/asm/suspend.h
index ecffe81ff65c..a892494ca5e4 100644
--- a/arch/x86/include/asm/suspend.h
+++ b/arch/x86/include/asm/suspend.h
@@ -4,3 +4,11 @@
#else
# include <asm/suspend_64.h>
#endif
+extern unsigned long restore_jump_address __visible;
+extern unsigned long jump_address_phys;
+extern unsigned long restore_cr3 __visible;
+extern unsigned long temp_pgt __visible;
+extern unsigned long relocated_restore_code __visible;
+extern int relocate_restore_code(void);
+/* Defined in hibernate_asm_32/64.S */
+extern asmlinkage __visible int restore_image(void);
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index 8be6afb58471..fdbd9d7b7bca 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -32,4 +32,8 @@ struct saved_context {
unsigned long return_address;
} __attribute__((packed));
+/* routines for saving/restoring kernel state */
+extern char core_restore_code[];
+extern char restore_registers[];
+
#endif /* _ASM_X86_SUSPEND_32_H */
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index cb0a1f470980..404b8b1d44f5 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -6,16 +6,23 @@
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-#define tlb_flush(tlb) \
-{ \
- if (!tlb->fullmm && !tlb->need_flush_all) \
- flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
- else \
- flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
-}
+static inline void tlb_flush(struct mmu_gather *tlb);
#include <asm-generic/tlb.h>
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ unsigned long start = 0UL, end = TLB_FLUSH_ALL;
+ unsigned int stride_shift = tlb_get_unmap_shift(tlb);
+
+ if (!tlb->fullmm && !tlb->need_flush_all) {
+ start = tlb->start;
+ end = tlb->end;
+ }
+
+ flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
+}
+
/*
* While x86 architecture in general requires an IPI to perform TLB
* shootdown, enablement code for several hypervisors overrides
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 58ce5288878e..323a313947e0 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -148,22 +148,6 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
#endif
-static inline bool tlb_defer_switch_to_init_mm(void)
-{
- /*
- * If we have PCID, then switching to init_mm is reasonably
- * fast. If we don't have PCID, then switching to init_mm is
- * quite slow, so we try to defer it in the hopes that we can
- * avoid it entirely. The latter approach runs the risk of
- * receiving otherwise unnecessary IPIs.
- *
- * This choice is just a heuristic. The tlb code can handle this
- * function returning true or false regardless of whether we have
- * PCID.
- */
- return !static_cpu_has(X86_FEATURE_PCID);
-}
-
struct tlb_context {
u64 ctx_id;
u64 tlb_gen;
@@ -547,23 +531,30 @@ struct flush_tlb_info {
unsigned long start;
unsigned long end;
u64 new_tlb_gen;
+ unsigned int stride_shift;
+ bool freed_tables;
};
#define local_flush_tlb() __flush_tlb()
-#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
+#define flush_tlb_mm(mm) \
+ flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
-#define flush_tlb_range(vma, start, end) \
- flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
+#define flush_tlb_range(vma, start, end) \
+ flush_tlb_mm_range((vma)->vm_mm, start, end, \
+ ((vma)->vm_flags & VM_HUGETLB) \
+ ? huge_page_shift(hstate_vma(vma)) \
+ : PAGE_SHIFT, false)
extern void flush_tlb_all(void);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
- unsigned long end, unsigned long vmflag);
+ unsigned long end, unsigned int stride_shift,
+ bool freed_tables);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
{
- flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
+ flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
}
void native_flush_tlb_others(const struct cpumask *cpumask,
diff --git a/arch/x86/include/asm/trace/mpx.h b/arch/x86/include/asm/trace/mpx.h
index 7bd92db09e8d..54133017267c 100644
--- a/arch/x86/include/asm/trace/mpx.h
+++ b/arch/x86/include/asm/trace/mpx.h
@@ -11,12 +11,12 @@
TRACE_EVENT(mpx_bounds_register_exception,
- TP_PROTO(void *addr_referenced,
+ TP_PROTO(void __user *addr_referenced,
const struct mpx_bndreg *bndreg),
TP_ARGS(addr_referenced, bndreg),
TP_STRUCT__entry(
- __field(void *, addr_referenced)
+ __field(void __user *, addr_referenced)
__field(u64, lower_bound)
__field(u64, upper_bound)
),
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index aae77eb8491c..b5e58cc0c5e7 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -198,8 +198,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
"4: movl %3,%0\n" \
" jmp 3b\n" \
".previous\n" \
- _ASM_EXTABLE(1b, 4b) \
- _ASM_EXTABLE(2b, 4b) \
+ _ASM_EXTABLE_UA(1b, 4b) \
+ _ASM_EXTABLE_UA(2b, 4b) \
: "=r" (err) \
: "A" (x), "r" (addr), "i" (errret), "0" (err))
@@ -340,8 +340,8 @@ do { \
" xorl %%edx,%%edx\n" \
" jmp 3b\n" \
".previous\n" \
- _ASM_EXTABLE(1b, 4b) \
- _ASM_EXTABLE(2b, 4b) \
+ _ASM_EXTABLE_UA(1b, 4b) \
+ _ASM_EXTABLE_UA(2b, 4b) \
: "=r" (retval), "=&A"(x) \
: "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
"i" (errret), "0" (retval)); \
@@ -386,7 +386,7 @@ do { \
" xor"itype" %"rtype"1,%"rtype"1\n" \
" jmp 2b\n" \
".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ _ASM_EXTABLE_UA(1b, 3b) \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
@@ -398,7 +398,7 @@ do { \
"3: mov %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ _ASM_EXTABLE_UA(1b, 3b) \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
@@ -474,7 +474,7 @@ struct __large_struct { unsigned long buf[100]; };
"3: mov %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ _ASM_EXTABLE_UA(1b, 3b) \
: "=r"(err) \
: ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
@@ -602,7 +602,7 @@ extern void __cmpxchg_wrong_size(void)
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
"\t.previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ _ASM_EXTABLE_UA(1b, 3b) \
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
: "i" (-EFAULT), "q" (__new), "1" (__old) \
: "memory" \
@@ -618,7 +618,7 @@ extern void __cmpxchg_wrong_size(void)
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
"\t.previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ _ASM_EXTABLE_UA(1b, 3b) \
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
@@ -634,7 +634,7 @@ extern void __cmpxchg_wrong_size(void)
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
"\t.previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ _ASM_EXTABLE_UA(1b, 3b) \
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
@@ -653,7 +653,7 @@ extern void __cmpxchg_wrong_size(void)
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
"\t.previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ _ASM_EXTABLE_UA(1b, 3b) \
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 51c4eee00732..dc4ed8bc2382 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -24,6 +24,7 @@
# include <asm/unistd_64.h>
# include <asm/unistd_64_x32.h>
# define __ARCH_WANT_COMPAT_SYS_TIME
+# define __ARCH_WANT_SYS_UTIME32
# define __ARCH_WANT_COMPAT_SYS_PREADV64
# define __ARCH_WANT_COMPAT_SYS_PWRITEV64
# define __ARCH_WANT_COMPAT_SYS_PREADV64V2
@@ -31,13 +32,13 @@
# endif
+# define __ARCH_WANT_NEW_STAT
# define __ARCH_WANT_OLD_READDIR
# define __ARCH_WANT_OLD_STAT
# define __ARCH_WANT_SYS_ALARM
# define __ARCH_WANT_SYS_FADVISE64
# define __ARCH_WANT_SYS_GETHOSTNAME
# define __ARCH_WANT_SYS_GETPGRP
-# define __ARCH_WANT_SYS_LLSEEK
# define __ARCH_WANT_SYS_NICE
# define __ARCH_WANT_SYS_OLDUMOUNT
# define __ARCH_WANT_SYS_OLD_GETRLIMIT
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index a80c0673798f..e60c45fd3679 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -10,8 +10,13 @@ struct cpumask;
struct mm_struct;
#ifdef CONFIG_X86_UV
+#include <linux/efi.h>
extern enum uv_system_type get_uv_system_type(void);
+static inline bool is_early_uv_system(void)
+{
+ return !((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab);
+}
extern int is_uv_system(void);
extern int is_uv_hubless(void);
extern void uv_cpu_init(void);
@@ -23,6 +28,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
#else /* X86_UV */
static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
+static inline bool is_early_uv_system(void) { return 0; }
static inline int is_uv_system(void) { return 0; }
static inline int is_uv_hubless(void) { return 0; }
static inline void uv_cpu_init(void) { }
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 53748541c487..913a133f8e6f 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -5,33 +5,46 @@
#include <linux/compiler.h>
#include <linux/clocksource.h>
+#include <uapi/linux/time.h>
+
#ifdef BUILD_VDSO32_64
typedef u64 gtod_long_t;
#else
typedef unsigned long gtod_long_t;
#endif
+
+/*
+ * There is one of these objects in the vvar page for each
+ * vDSO-accelerated clockid. For high-resolution clocks, this encodes
+ * the time corresponding to vsyscall_gtod_data.cycle_last. For coarse
+ * clocks, this encodes the actual time.
+ *
+ * To confuse the reader, for high-resolution clocks, nsec is left-shifted
+ * by vsyscall_gtod_data.shift.
+ */
+struct vgtod_ts {
+ u64 sec;
+ u64 nsec;
+};
+
+#define VGTOD_BASES (CLOCK_TAI + 1)
+#define VGTOD_HRES (BIT(CLOCK_REALTIME) | BIT(CLOCK_MONOTONIC) | BIT(CLOCK_TAI))
+#define VGTOD_COARSE (BIT(CLOCK_REALTIME_COARSE) | BIT(CLOCK_MONOTONIC_COARSE))
+
/*
* vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
* so be carefull by modifying this structure.
*/
struct vsyscall_gtod_data {
- unsigned seq;
-
- int vclock_mode;
- u64 cycle_last;
- u64 mask;
- u32 mult;
- u32 shift;
-
- /* open coded 'struct timespec' */
- u64 wall_time_snsec;
- gtod_long_t wall_time_sec;
- gtod_long_t monotonic_time_sec;
- u64 monotonic_time_snsec;
- gtod_long_t wall_time_coarse_sec;
- gtod_long_t wall_time_coarse_nsec;
- gtod_long_t monotonic_time_coarse_sec;
- gtod_long_t monotonic_time_coarse_nsec;
+ unsigned int seq;
+
+ int vclock_mode;
+ u64 cycle_last;
+ u64 mask;
+ u32 mult;
+ u32 shift;
+
+ struct vgtod_ts basetime[VGTOD_BASES];
int tz_minuteswest;
int tz_dsttime;
@@ -44,9 +57,9 @@ static inline bool vclock_was_used(int vclock)
return READ_ONCE(vclocks_used) & (1 << vclock);
}
-static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
+static inline unsigned int gtod_read_begin(const struct vsyscall_gtod_data *s)
{
- unsigned ret;
+ unsigned int ret;
repeat:
ret = READ_ONCE(s->seq);
@@ -59,7 +72,7 @@ repeat:
}
static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
- unsigned start)
+ unsigned int start)
{
smp_rmb();
return unlikely(s->seq != start);
@@ -77,30 +90,4 @@ static inline void gtod_write_end(struct vsyscall_gtod_data *s)
++s->seq;
}
-#ifdef CONFIG_X86_64
-
-#define VGETCPU_CPU_MASK 0xfff
-
-static inline unsigned int __getcpu(void)
-{
- unsigned int p;
-
- /*
- * Load per CPU data from GDT. LSL is faster than RDTSCP and
- * works on all CPUs. This is volatile so that it orders
- * correctly wrt barrier() and to keep gcc from cleverly
- * hoisting it out of the calling function.
- *
- * If RDPID is available, use it.
- */
- alternative_io ("lsl %[seg],%[p]",
- ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
- X86_FEATURE_RDPID,
- [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
-
- return p;
-}
-
-#endif /* CONFIG_X86_64 */
-
#endif /* _ASM_X86_VGTOD_H */
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index 0116b2ee9e64..1fc7a0d1e877 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -40,7 +40,7 @@ static inline int cpu_has_vmx(void)
*/
static inline void cpu_vmxoff(void)
{
- asm volatile (ASM_VMX_VMXOFF : : : "cc");
+ asm volatile ("vmxoff");
cr4_clear_bits(X86_CR4_VMXE);
}
@@ -83,9 +83,10 @@ static inline void cpu_emergency_vmxoff(void)
*/
static inline int cpu_has_svm(const char **msg)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) {
if (msg)
- *msg = "not amd";
+ *msg = "not amd or hygon";
return 0;
}
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9527ba5d62da..ade0f153947d 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -503,19 +503,6 @@ enum vmcs_field {
#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
-
-#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
-#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
-#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
-#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
-#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
-#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
-#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
-#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
-#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
-#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08"
-#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
-
struct vmx_msr_entry {
u32 index;
u32 reserved;
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index b85a7c54c6a1..0f842104862c 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -303,4 +303,6 @@ extern void x86_init_noop(void);
extern void x86_init_uint_noop(unsigned int unused);
extern bool x86_pnpbios_disabled(void);
+void x86_verify_bootdata_version(void);
+
#endif
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index d383140e1dc8..068d9b067c83 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_XEN_EVENTS_H
#define _ASM_X86_XEN_EVENTS_H
+#include <xen/xen.h>
+
enum ipi_vector {
XEN_RESCHEDULE_VECTOR,
XEN_CALL_FUNCTION_VECTOR,
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index a06cbf019744..22f89d040ddd 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -16,6 +16,9 @@
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
+/* version flags */
+#define VERSION_WRITTEN 0x8000
+
/* loadflags */
#define LOADED_HIGH (1<<0)
#define KASLR_FLAG (1<<1)
@@ -86,6 +89,7 @@ struct setup_header {
__u64 pref_address;
__u32 init_size;
__u32 handover_offset;
+ __u64 acpi_rsdp_addr;
} __attribute__((packed));
struct sys_desc_table {
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 86299efa804a..dabfcf7c3941 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -288,6 +288,7 @@ struct kvm_reinject_control {
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
#define KVM_VCPUEVENT_VALID_SMM 0x00000008
+#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010
/* Interrupt shadow states */
#define KVM_X86_SHADOW_INT_MOV_SS 0x01
@@ -299,7 +300,7 @@ struct kvm_vcpu_events {
__u8 injected;
__u8 nr;
__u8 has_error_code;
- __u8 pad;
+ __u8 pending;
__u32 error_code;
} exception;
struct {
@@ -322,7 +323,9 @@ struct kvm_vcpu_events {
__u8 smm_inside_nmi;
__u8 latched_init;
} smi;
- __u32 reserved[9];
+ __u8 reserved[27];
+ __u8 exception_has_payload;
+ __u64 exception_payload;
};
/* for KVM_GET/SET_DEBUGREGS */
@@ -377,9 +380,11 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
+#define KVM_STATE_NESTED_EVMCS 0x00000004
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
diff --git a/arch/x86/include/uapi/asm/siginfo.h b/arch/x86/include/uapi/asm/siginfo.h
index b3d157957177..6642d8be40c4 100644
--- a/arch/x86/include/uapi/asm/siginfo.h
+++ b/arch/x86/include/uapi/asm/siginfo.h
@@ -7,8 +7,6 @@
typedef long long __kernel_si_clock_t __attribute__((aligned(4)));
# define __ARCH_SI_CLOCK_T __kernel_si_clock_t
# define __ARCH_SI_ATTRIBUTES __attribute__((aligned(8)))
-# else /* x86-64 */
-# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
# endif
#endif
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 3b20607d581b..e8fea7ffa306 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -48,6 +48,7 @@
#include <asm/mpspec.h>
#include <asm/smp.h>
#include <asm/i8259.h>
+#include <asm/setup.h>
#include "sleep.h" /* To include x86_acpi_suspend_lowlevel */
static int __initdata acpi_force = 0;
@@ -1771,3 +1772,8 @@ void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
e820__range_add(addr, size, E820_TYPE_ACPI);
e820__update_table_print();
}
+
+u64 x86_default_get_root_pointer(void)
+{
+ return boot_params.hdr.acpi_rsdp_addr;
+}
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index b9d5e7c9ef43..ebeac487a20c 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -222,6 +222,10 @@ void __init arch_init_ideal_nops(void)
}
break;
+ case X86_VENDOR_HYGON:
+ ideal_nops = p6_nops;
+ return;
+
case X86_VENDOR_AMD:
if (boot_cpu_data.x86 > 0xf) {
ideal_nops = p6_nops;
@@ -594,7 +598,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
BUG_ON(p->len > MAX_PATCH_LEN);
/* prep the buffer with the original instructions */
memcpy(insnbuf, p->instr, p->len);
- used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
+ used = pv_ops.init.patch(p->instrtype, insnbuf,
(unsigned long)p->instr, p->len);
BUG_ON(used > p->len);
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index f299d8a479bb..3f9d1b4019bb 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -482,7 +482,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
{
void *vaddr;
- vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
+ vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs);
if (!vaddr ||
!force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
return vaddr;
@@ -494,7 +494,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
goto out_free;
return vaddr;
out_free:
- dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
+ dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs);
return NULL;
}
@@ -504,7 +504,7 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr, unsigned long attrs)
{
gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
- dma_direct_free(dev, size, vaddr, dma_addr, attrs);
+ dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs);
}
static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index b481b95bd8f6..a6eca647bc76 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -61,6 +61,21 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{}
};
+static const struct pci_device_id hygon_root_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
+ {}
+};
+
+const struct pci_device_id hygon_nb_misc_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+ {}
+};
+
+static const struct pci_device_id hygon_nb_link_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
+ {}
+};
+
const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
{ 0x00, 0x18, 0x20 },
{ 0xff, 0x00, 0x20 },
@@ -194,15 +209,24 @@ EXPORT_SYMBOL_GPL(amd_df_indirect_read);
int amd_cache_northbridges(void)
{
- u16 i = 0;
- struct amd_northbridge *nb;
+ const struct pci_device_id *misc_ids = amd_nb_misc_ids;
+ const struct pci_device_id *link_ids = amd_nb_link_ids;
+ const struct pci_device_id *root_ids = amd_root_ids;
struct pci_dev *root, *misc, *link;
+ struct amd_northbridge *nb;
+ u16 i = 0;
if (amd_northbridges.num)
return 0;
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+ root_ids = hygon_root_ids;
+ misc_ids = hygon_nb_misc_ids;
+ link_ids = hygon_nb_link_ids;
+ }
+
misc = NULL;
- while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
+ while ((misc = next_northbridge(misc, misc_ids)) != NULL)
i++;
if (!i)
@@ -218,11 +242,11 @@ int amd_cache_northbridges(void)
link = misc = root = NULL;
for (i = 0; i != amd_northbridges.num; i++) {
node_to_amd_nb(i)->root = root =
- next_northbridge(root, amd_root_ids);
+ next_northbridge(root, root_ids);
node_to_amd_nb(i)->misc = misc =
- next_northbridge(misc, amd_nb_misc_ids);
+ next_northbridge(misc, misc_ids);
node_to_amd_nb(i)->link = link =
- next_northbridge(link, amd_nb_link_ids);
+ next_northbridge(link, link_ids);
}
if (amd_gart_present())
@@ -261,11 +285,19 @@ EXPORT_SYMBOL_GPL(amd_cache_northbridges);
*/
bool __init early_is_amd_nb(u32 device)
{
+ const struct pci_device_id *misc_ids = amd_nb_misc_ids;
const struct pci_device_id *id;
u32 vendor = device & 0xffff;
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+ return false;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ misc_ids = hygon_nb_misc_ids;
+
device >>= 16;
- for (id = amd_nb_misc_ids; id->vendor; id++)
+ for (id = misc_ids; id->vendor; id++)
if (vendor == id->vendor && device == id->device)
return true;
return false;
@@ -277,7 +309,8 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
u64 base, msr;
unsigned int segn_busn_bits;
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return NULL;
/* assume all cpus from fam10h have mmconfig */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 84132eddb5a8..ab731ab09f06 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -224,6 +224,11 @@ static int modern_apic(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86 >= 0xf)
return 1;
+
+ /* Hygon systems use modern APIC */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ return 1;
+
return lapic_get_version() >= 0x14;
}
@@ -1912,6 +1917,8 @@ static int __init detect_init_APIC(void)
(boot_cpu_data.x86 >= 15))
break;
goto no_apic;
+ case X86_VENDOR_HYGON:
+ break;
case X86_VENDOR_INTEL:
if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
(boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)))
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 02e8acb134f8..47ff2976c292 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -185,6 +185,7 @@ void __init default_setup_apic_routing(void)
break;
}
/* If P4 and above fall through */
+ case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
def_to_bigsmp = 1;
}
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 7654febd5102..652e7ffa9b9d 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -313,14 +313,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
struct apic_chip_data *apicd = apic_chip_data(irqd);
int vector, cpu;
- cpumask_and(vector_searchmask, vector_searchmask, affmsk);
- cpu = cpumask_first(vector_searchmask);
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
+ cpumask_and(vector_searchmask, dest, affmsk);
+
/* set_affinity might call here for nothing */
if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
return 0;
- vector = irq_matrix_alloc_managed(vector_matrix, cpu);
+ vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
+ &cpu);
trace_vector_alloc_managed(irqd->irq, vector, vector);
if (vector < 0)
return vector;
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 01de31db300d..72adf6c335dc 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -64,15 +64,12 @@ void common(void) {
OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
#endif
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
BLANK();
- OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
- OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
- OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
- OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
- OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
- OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
- OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
+ OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable);
+ OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable);
+ OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret);
+ OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2);
#endif
#ifdef CONFIG_XEN
@@ -99,13 +96,12 @@ void common(void) {
OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
/* Layout info for cpu_entry_area */
- OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
- OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
DEFINE(MASK_entry_stack, (~(sizeof(struct entry_stack) - 1)));
- /* Offset for sp0 and sp1 into the tss_struct */
+ /* Offset for fields in tss_struct */
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
+ OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
}
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 3b9405e7ba2b..ddced33184b5 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -21,10 +21,13 @@ static char syscalls_ia32[] = {
int main(void)
{
#ifdef CONFIG_PARAVIRT
- OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
- OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
+#ifdef CONFIG_PARAVIRT_XXL
+ OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template,
+ cpu.usergs_sysret64);
+ OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs);
#ifdef CONFIG_DEBUG_ENTRY
- OFFSET(PV_IRQ_save_fl, pv_irq_ops, save_fl);
+ OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl);
+#endif
#endif
BLANK();
#endif
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 33399426793e..1979a76bfadd 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/kthread.h>
@@ -31,11 +34,17 @@ static __init int set_corruption_check(char *arg)
ssize_t ret;
unsigned long val;
+ if (!arg) {
+ pr_err("memory_corruption_check config string not provided\n");
+ return -EINVAL;
+ }
+
ret = kstrtoul(arg, 10, &val);
if (ret)
return ret;
memory_corruption_check = val;
+
return 0;
}
early_param("memory_corruption_check", set_corruption_check);
@@ -45,6 +54,11 @@ static __init int set_corruption_check_period(char *arg)
ssize_t ret;
unsigned long val;
+ if (!arg) {
+ pr_err("memory_corruption_check_period config string not provided\n");
+ return -EINVAL;
+ }
+
ret = kstrtoul(arg, 10, &val);
if (ret)
return ret;
@@ -59,6 +73,11 @@ static __init int set_corruption_check_size(char *arg)
char *end;
unsigned size;
+ if (!arg) {
+ pr_err("memory_corruption_check_size config string not provided\n");
+ return -EINVAL;
+ }
+
size = memparse(arg, &end);
if (*end == '\0')
@@ -113,7 +132,7 @@ void __init setup_bios_corruption_check(void)
}
if (num_scan_areas)
- printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas);
+ pr_info("Scanning %d areas for low memory corruption\n", num_scan_areas);
}
@@ -132,8 +151,7 @@ void check_for_bios_corruption(void)
for (; size; addr++, size -= sizeof(unsigned long)) {
if (!*addr)
continue;
- printk(KERN_ERR "Corrupted low memory at %p (%lx phys) = %08lx\n",
- addr, __pa(addr), *addr);
+ pr_err("Corrupted low memory at %p (%lx phys) = %08lx\n", addr, __pa(addr), *addr);
corruption = 1;
*addr = 0;
}
@@ -157,11 +175,11 @@ static int start_periodic_check_for_corruption(void)
if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0)
return 0;
- printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
- corruption_check_period);
+ pr_info("Scanning for low memory corruption every %d seconds\n", corruption_check_period);
/* First time we run the checks right away */
schedule_delayed_work(&bios_check_work, 0);
+
return 0;
}
device_initcall(start_periodic_check_for_corruption);
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 347137e80bf5..1f5d2291c31e 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
+obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 22ab408177b2..eeea634bee0a 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -922,7 +922,7 @@ static void init_amd(struct cpuinfo_x86 *c)
static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
/* AMD errata T13 (order #21922) */
- if ((c->x86 == 6)) {
+ if (c->x86 == 6) {
/* Duron Rev A0 */
if (c->x86_model == 3 && c->x86_stepping == 0)
size = 64;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 40bdaea97fe7..c37e66e493bf 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -35,12 +35,10 @@ static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
-/*
- * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
- * writes to SPEC_CTRL contain whatever reserved bits have been set.
- */
-u64 __ro_after_init x86_spec_ctrl_base;
+/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
+u64 x86_spec_ctrl_base;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+static DEFINE_MUTEX(spec_ctrl_mutex);
/*
* The vendor and possibly platform specific bits which can be modified in
@@ -312,6 +310,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
}
if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
return SPECTRE_V2_CMD_AUTO;
@@ -325,6 +324,46 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return cmd;
}
+static bool stibp_needed(void)
+{
+ if (spectre_v2_enabled == SPECTRE_V2_NONE)
+ return false;
+
+ if (!boot_cpu_has(X86_FEATURE_STIBP))
+ return false;
+
+ return true;
+}
+
+static void update_stibp_msr(void *info)
+{
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+
+void arch_smt_update(void)
+{
+ u64 mask;
+
+ if (!stibp_needed())
+ return;
+
+ mutex_lock(&spec_ctrl_mutex);
+ mask = x86_spec_ctrl_base;
+ if (cpu_smt_control == CPU_SMT_ENABLED)
+ mask |= SPEC_CTRL_STIBP;
+ else
+ mask &= ~SPEC_CTRL_STIBP;
+
+ if (mask != x86_spec_ctrl_base) {
+ pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
+ cpu_smt_control == CPU_SMT_ENABLED ?
+ "Enabling" : "Disabling");
+ x86_spec_ctrl_base = mask;
+ on_each_cpu(update_stibp_msr, NULL, 1);
+ }
+ mutex_unlock(&spec_ctrl_mutex);
+}
+
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -371,7 +410,8 @@ static void __init spectre_v2_select_mitigation(void)
return;
retpoline_auto:
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
retpoline_amd:
if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
@@ -424,6 +464,9 @@ specv2_set_mode:
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
+
+ /* Enable STIBP if appropriate */
+ arch_smt_update();
}
#undef pr_fmt
@@ -814,6 +857,8 @@ static ssize_t l1tf_show_state(char *buf)
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
+ int ret;
+
if (!boot_cpu_has_bug(bug))
return sprintf(buf, "Not affected\n");
@@ -831,10 +876,13 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
case X86_BUG_SPECTRE_V2:
- return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ ret = sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+ (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
+ boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
spectre_v2_module_string());
+ return ret;
case X86_BUG_SPEC_STORE_BYPASS:
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 0c5fcbd998cf..dc1b9342e9c4 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -602,6 +602,10 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
else
amd_cpuid4(index, &eax, &ebx, &ecx);
amd_init_l3_cache(this_leaf, index);
+ } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+ cpuid_count(0x8000001d, index, &eax.full,
+ &ebx.full, &ecx.full, &edx);
+ amd_init_l3_cache(this_leaf, index);
} else {
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
}
@@ -625,7 +629,8 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
union _cpuid4_leaf_eax cache_eax;
int i = -1;
- if (c->x86_vendor == X86_VENDOR_AMD)
+ if (c->x86_vendor == X86_VENDOR_AMD ||
+ c->x86_vendor == X86_VENDOR_HYGON)
op = 0x8000001d;
else
op = 4;
@@ -678,6 +683,22 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
}
}
+void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
+{
+ /*
+ * We may have multiple LLCs if L3 caches exist, so check if we
+ * have an L3 cache by looking at the L3 cache CPUID leaf.
+ */
+ if (!cpuid_edx(0x80000006))
+ return;
+
+ /*
+ * LLC is at the core complex level.
+ * Core complex ID is ApicId[3] for these processors.
+ */
+ per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
+}
+
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
{
@@ -691,6 +712,11 @@ void init_amd_cacheinfo(struct cpuinfo_x86 *c)
}
}
+void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
+{
+ num_cache_leaves = find_num_cache_leaves(c);
+}
+
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
{
/* Cache sizes */
@@ -913,7 +939,8 @@ static void __cache_cpumap_setup(unsigned int cpu, int index,
int index_msb, i;
struct cpuinfo_x86 *c = &cpu_data(cpu);
- if (c->x86_vendor == X86_VENDOR_AMD) {
+ if (c->x86_vendor == X86_VENDOR_AMD ||
+ c->x86_vendor == X86_VENDOR_HYGON) {
if (__cache_amd_cpumap_setup(cpu, index, base))
return;
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 44c4ef3d989b..660d0b22e962 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -949,11 +949,11 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
}
static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY },
{ X86_VENDOR_CENTAUR, 5 },
{ X86_VENDOR_INTEL, 5 },
{ X86_VENDOR_NSC, 5 },
@@ -963,15 +963,16 @@ static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
{ X86_VENDOR_AMD },
+ { X86_VENDOR_HYGON },
{}
};
/* Only list CPUs which speculate but are non susceptible to SSB */
static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
@@ -984,14 +985,14 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
/* in addition to cpu_no_speculation */
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
{}
@@ -1076,6 +1077,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
memset(&c->x86_capability, 0, sizeof c->x86_capability);
c->extended_cpuid_level = 0;
+ if (!have_cpuid_p())
+ identify_cpu_without_cpuid(c);
+
/* cyrix could have cpuid enabled via c_identify()*/
if (have_cpuid_p()) {
cpu_detect(c);
@@ -1093,7 +1097,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
if (this_cpu->c_bsp_init)
this_cpu->c_bsp_init(c);
} else {
- identify_cpu_without_cpuid(c);
setup_clear_cpu_cap(X86_FEATURE_CPUID);
}
@@ -1240,10 +1243,10 @@ static void generic_identify(struct cpuinfo_x86 *c)
* ESPFIX issue, we can change this.
*/
#ifdef CONFIG_X86_32
-# ifdef CONFIG_PARAVIRT
+# ifdef CONFIG_PARAVIRT_XXL
do {
extern void native_iret(void);
- if (pv_cpu_ops.iret == native_iret)
+ if (pv_ops.cpu.iret == native_iret)
set_cpu_bug(c, X86_BUG_ESPFIX);
} while (0);
# else
@@ -1531,19 +1534,8 @@ EXPORT_PER_CPU_SYMBOL(__preempt_count);
/* May not be marked __init: used by software suspend */
void syscall_init(void)
{
- extern char _entry_trampoline[];
- extern char entry_SYSCALL_64_trampoline[];
-
- int cpu = smp_processor_id();
- unsigned long SYSCALL64_entry_trampoline =
- (unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
- (entry_SYSCALL_64_trampoline - _entry_trampoline);
-
wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
- if (static_cpu_has(X86_FEATURE_PTI))
- wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
- else
- wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
+ wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
#ifdef CONFIG_IA32_EMULATION
wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
@@ -1554,7 +1546,8 @@ void syscall_init(void)
* AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
*/
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
- wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
+ wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
+ (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
#else
wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
@@ -1669,6 +1662,29 @@ static void wait_for_master_cpu(int cpu)
#endif
}
+#ifdef CONFIG_X86_64
+static void setup_getcpu(int cpu)
+{
+ unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
+ struct desc_struct d = { };
+
+ if (static_cpu_has(X86_FEATURE_RDTSCP))
+ write_rdtscp_aux(cpudata);
+
+ /* Store CPU and node number in limit. */
+ d.limit0 = cpudata;
+ d.limit1 = cpudata >> 16;
+
+ d.type = 5; /* RO data, expand down, accessed */
+ d.dpl = 3; /* Visible to user code */
+ d.s = 1; /* Not a system segment */
+ d.p = 1; /* Present */
+ d.d = 1; /* 32-bit */
+
+ write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
+}
+#endif
+
/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT
@@ -1706,6 +1722,7 @@ void cpu_init(void)
early_cpu_to_node(cpu) != NUMA_NO_NODE)
set_numa_node(early_cpu_to_node(cpu));
#endif
+ setup_getcpu(cpu);
me = current;
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 7b229afa0a37..da5446acc241 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -54,6 +54,7 @@ extern u32 get_scattered_cpuid_leaf(unsigned int level,
enum cpuid_regs_idx reg);
extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
+extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c);
extern void detect_num_cpu_cores(struct cpuinfo_x86 *c);
extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 8949b7ae6d92..d12226f60168 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -437,7 +437,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c)
/* enable MAPEN */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
/* enable cpuid */
- setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80);
+ setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80);
/* disable MAPEN */
setCx86(CX86_CCR3, ccr3);
local_irq_restore(flags);
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
new file mode 100644
index 000000000000..cf25405444ab
--- /dev/null
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hygon Processor Support for Linux
+ *
+ * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
+ *
+ * Author: Pu Wen <puwen@hygon.cn>
+ */
+#include <linux/io.h>
+
+#include <asm/cpu.h>
+#include <asm/smp.h>
+#include <asm/cacheinfo.h>
+#include <asm/spec-ctrl.h>
+#include <asm/delay.h>
+#ifdef CONFIG_X86_64
+# include <asm/set_memory.h>
+#endif
+
+#include "cpu.h"
+
+/*
+ * nodes_per_socket: Stores the number of nodes per socket.
+ * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
+ */
+static u32 nodes_per_socket = 1;
+
+#ifdef CONFIG_NUMA
+/*
+ * To workaround broken NUMA config. Read the comment in
+ * srat_detect_node().
+ */
+static int nearby_node(int apicid)
+{
+ int i, node;
+
+ for (i = apicid - 1; i >= 0; i--) {
+ node = __apicid_to_node[i];
+ if (node != NUMA_NO_NODE && node_online(node))
+ return node;
+ }
+ for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
+ node = __apicid_to_node[i];
+ if (node != NUMA_NO_NODE && node_online(node))
+ return node;
+ }
+ return first_node(node_online_map); /* Shouldn't happen */
+}
+#endif
+
+static void hygon_get_topology_early(struct cpuinfo_x86 *c)
+{
+ if (cpu_has(c, X86_FEATURE_TOPOEXT))
+ smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
+}
+
+/*
+ * Fixup core topology information for
+ * (1) Hygon multi-node processors
+ * Assumption: Number of cores in each internal node is the same.
+ * (2) Hygon processors supporting compute units
+ */
+static void hygon_get_topology(struct cpuinfo_x86 *c)
+{
+ u8 node_id;
+ int cpu = smp_processor_id();
+
+ /* get information required for multi-node processors */
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+ int err;
+ u32 eax, ebx, ecx, edx;
+
+ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+
+ node_id = ecx & 0xff;
+
+ c->cpu_core_id = ebx & 0xff;
+
+ if (smp_num_siblings > 1)
+ c->x86_max_cores /= smp_num_siblings;
+
+ /*
+ * In case leaf B is available, use it to derive
+ * topology information.
+ */
+ err = detect_extended_topology(c);
+ if (!err)
+ c->x86_coreid_bits = get_count_order(c->x86_max_cores);
+
+ cacheinfo_hygon_init_llc_id(c, cpu, node_id);
+ } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
+ u64 value;
+
+ rdmsrl(MSR_FAM10H_NODE_ID, value);
+ node_id = value & 7;
+
+ per_cpu(cpu_llc_id, cpu) = node_id;
+ } else
+ return;
+
+ if (nodes_per_socket > 1)
+ set_cpu_cap(c, X86_FEATURE_AMD_DCM);
+}
+
+/*
+ * On Hygon setup the lower bits of the APIC id distinguish the cores.
+ * Assumes number of cores is a power of two.
+ */
+static void hygon_detect_cmp(struct cpuinfo_x86 *c)
+{
+ unsigned int bits;
+ int cpu = smp_processor_id();
+
+ bits = c->x86_coreid_bits;
+ /* Low order bits define the core id (index of core in socket) */
+ c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
+ /* Convert the initial APIC ID into the socket ID */
+ c->phys_proc_id = c->initial_apicid >> bits;
+ /* use socket ID also for last level cache */
+ per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+}
+
+static void srat_detect_node(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_NUMA
+ int cpu = smp_processor_id();
+ int node;
+ unsigned int apicid = c->apicid;
+
+ node = numa_cpu_node(cpu);
+ if (node == NUMA_NO_NODE)
+ node = per_cpu(cpu_llc_id, cpu);
+
+ /*
+ * On multi-fabric platform (e.g. Numascale NumaChip) a
+ * platform-specific handler needs to be called to fixup some
+ * IDs of the CPU.
+ */
+ if (x86_cpuinit.fixup_cpu_id)
+ x86_cpuinit.fixup_cpu_id(c, node);
+
+ if (!node_online(node)) {
+ /*
+ * Two possibilities here:
+ *
+ * - The CPU is missing memory and no node was created. In
+ * that case try picking one from a nearby CPU.
+ *
+ * - The APIC IDs differ from the HyperTransport node IDs.
+ * Assume they are all increased by a constant offset, but
+ * in the same order as the HT nodeids. If that doesn't
+ * result in a usable node fall back to the path for the
+ * previous case.
+ *
+ * This workaround operates directly on the mapping between
+ * APIC ID and NUMA node, assuming certain relationship
+ * between APIC ID, HT node ID and NUMA topology. As going
+ * through CPU mapping may alter the outcome, directly
+ * access __apicid_to_node[].
+ */
+ int ht_nodeid = c->initial_apicid;
+
+ if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
+ node = __apicid_to_node[ht_nodeid];
+ /* Pick a nearby node */
+ if (!node_online(node))
+ node = nearby_node(apicid);
+ }
+ numa_set_node(cpu, node);
+#endif
+}
+
+static void early_init_hygon_mc(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+ unsigned int bits, ecx;
+
+ /* Multi core CPU? */
+ if (c->extended_cpuid_level < 0x80000008)
+ return;
+
+ ecx = cpuid_ecx(0x80000008);
+
+ c->x86_max_cores = (ecx & 0xff) + 1;
+
+ /* CPU telling us the core id bits shift? */
+ bits = (ecx >> 12) & 0xF;
+
+ /* Otherwise recompute */
+ if (bits == 0) {
+ while ((1 << bits) < c->x86_max_cores)
+ bits++;
+ }
+
+ c->x86_coreid_bits = bits;
+#endif
+}
+
+static void bsp_init_hygon(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_X86_64
+ unsigned long long tseg;
+
+ /*
+ * Split up direct mapping around the TSEG SMM area.
+ * Don't do it for gbpages because there seems very little
+ * benefit in doing so.
+ */
+ if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
+ unsigned long pfn = tseg >> PAGE_SHIFT;
+
+ pr_debug("tseg: %010llx\n", tseg);
+ if (pfn_range_is_mapped(pfn, pfn + 1))
+ set_memory_4k((unsigned long)__va(tseg), 1);
+ }
+#endif
+
+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
+ u64 val;
+
+ rdmsrl(MSR_K7_HWCR, val);
+ if (!(val & BIT(24)))
+ pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
+ }
+
+ if (cpu_has(c, X86_FEATURE_MWAITX))
+ use_mwaitx_delay();
+
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+ u32 ecx;
+
+ ecx = cpuid_ecx(0x8000001e);
+ nodes_per_socket = ((ecx >> 8) & 7) + 1;
+ } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
+ u64 value;
+
+ rdmsrl(MSR_FAM10H_NODE_ID, value);
+ nodes_per_socket = ((value >> 3) & 7) + 1;
+ }
+
+ if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
+ !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+ /*
+ * Try to cache the base value so further operations can
+ * avoid RMW. If that faults, do not enable SSBD.
+ */
+ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+ setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
+ setup_force_cpu_cap(X86_FEATURE_SSBD);
+ x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
+ }
+ }
+}
+
+static void early_init_hygon(struct cpuinfo_x86 *c)
+{
+ u32 dummy;
+
+ early_init_hygon_mc(c);
+
+ set_cpu_cap(c, X86_FEATURE_K8);
+
+ rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
+
+ /*
+ * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
+ * with P/T states and does not stop in deep C-states
+ */
+ if (c->x86_power & (1 << 8)) {
+ set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+ set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
+ }
+
+ /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
+ if (c->x86_power & BIT(12))
+ set_cpu_cap(c, X86_FEATURE_ACC_POWER);
+
+#ifdef CONFIG_X86_64
+ set_cpu_cap(c, X86_FEATURE_SYSCALL32);
+#endif
+
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
+ /*
+ * ApicID can always be treated as an 8-bit value for Hygon APIC So, we
+ * can safely set X86_FEATURE_EXTD_APICID unconditionally.
+ */
+ if (boot_cpu_has(X86_FEATURE_APIC))
+ set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
+#endif
+
+ /*
+ * This is only needed to tell the kernel whether to use VMCALL
+ * and VMMCALL. VMMCALL is never executed except under virt, so
+ * we can set it unconditionally.
+ */
+ set_cpu_cap(c, X86_FEATURE_VMMCALL);
+
+ hygon_get_topology_early(c);
+}
+
+static void init_hygon(struct cpuinfo_x86 *c)
+{
+ early_init_hygon(c);
+
+ /*
+ * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+ * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
+ */
+ clear_cpu_cap(c, 0*32+31);
+
+ set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+
+ /* get apicid instead of initial apic id from cpuid */
+ c->apicid = hard_smp_processor_id();
+
+ set_cpu_cap(c, X86_FEATURE_ZEN);
+ set_cpu_cap(c, X86_FEATURE_CPB);
+
+ cpu_detect_cache_sizes(c);
+
+ hygon_detect_cmp(c);
+ hygon_get_topology(c);
+ srat_detect_node(c);
+
+ init_hygon_cacheinfo(c);
+
+ if (cpu_has(c, X86_FEATURE_XMM2)) {
+ unsigned long long val;
+ int ret;
+
+ /*
+ * A serializing LFENCE has less overhead than MFENCE, so
+ * use it for execution serialization. On families which
+ * don't have that MSR, LFENCE is already serializing.
+ * msr_set_bit() uses the safe accessors, too, even if the MSR
+ * is not present.
+ */
+ msr_set_bit(MSR_F10H_DECFG,
+ MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+
+ /*
+ * Verify that the MSR write was successful (could be running
+ * under a hypervisor) and only then assume that LFENCE is
+ * serializing.
+ */
+ ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
+ if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
+ /* A serializing LFENCE stops RDTSC speculation */
+ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+ } else {
+ /* MFENCE stops RDTSC speculation */
+ set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+ }
+ }
+
+ /*
+ * Hygon processors have APIC timer running in deep C states.
+ */
+ set_cpu_cap(c, X86_FEATURE_ARAT);
+
+ /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+}
+
+static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
+{
+ u32 ebx, eax, ecx, edx;
+ u16 mask = 0xfff;
+
+ if (c->extended_cpuid_level < 0x80000006)
+ return;
+
+ cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
+
+ tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
+ tlb_lli_4k[ENTRIES] = ebx & mask;
+
+ /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
+ if (!((eax >> 16) & mask))
+ tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
+ else
+ tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
+
+ /* a 4M entry uses two 2M entries */
+ tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
+
+ /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
+ if (!(eax & mask)) {
+ cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
+ tlb_lli_2m[ENTRIES] = eax & 0xff;
+ } else
+ tlb_lli_2m[ENTRIES] = eax & mask;
+
+ tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
+}
+
+static const struct cpu_dev hygon_cpu_dev = {
+ .c_vendor = "Hygon",
+ .c_ident = { "HygonGenuine" },
+ .c_early_init = early_init_hygon,
+ .c_detect_tlb = cpu_detect_tlb_hygon,
+ .c_bsp_init = bsp_init_hygon,
+ .c_init = init_hygon,
+ .c_x86_vendor = X86_VENDOR_HYGON,
+};
+
+cpu_dev_register(hygon_cpu_dev);
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index abb71ac70443..44272b7107ad 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -485,9 +485,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
size_t tsize;
if (is_llc_occupancy_enabled()) {
- d->rmid_busy_llc = kcalloc(BITS_TO_LONGS(r->num_rmid),
- sizeof(unsigned long),
- GFP_KERNEL);
+ d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
if (!d->rmid_busy_llc)
return -ENOMEM;
INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
@@ -496,7 +494,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
tsize = sizeof(*d->mbm_total);
d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
if (!d->mbm_total) {
- kfree(d->rmid_busy_llc);
+ bitmap_free(d->rmid_busy_llc);
return -ENOMEM;
}
}
@@ -504,7 +502,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
tsize = sizeof(*d->mbm_local);
d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
if (!d->mbm_local) {
- kfree(d->rmid_busy_llc);
+ bitmap_free(d->rmid_busy_llc);
kfree(d->mbm_total);
return -ENOMEM;
}
@@ -610,9 +608,16 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
cancel_delayed_work(&d->cqm_limbo);
}
+ /*
+ * rdt_domain "d" is going to be freed below, so clear
+ * its pointer from pseudo_lock_region struct.
+ */
+ if (d->plr)
+ d->plr->d = NULL;
+
kfree(d->ctrl_val);
kfree(d->mbps_val);
- kfree(d->rmid_busy_llc);
+ bitmap_free(d->rmid_busy_llc);
kfree(d->mbm_total);
kfree(d->mbm_local);
kfree(d);
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 4e588f36228f..3736f6dc9545 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -382,6 +382,11 @@ static inline bool is_mbm_event(int e)
e <= QOS_L3_MBM_LOCAL_EVENT_ID);
}
+struct rdt_parse_data {
+ struct rdtgroup *rdtgrp;
+ char *buf;
+};
+
/**
* struct rdt_resource - attributes of an RDT resource
* @rid: The index of the resource
@@ -423,16 +428,19 @@ struct rdt_resource {
struct rdt_cache cache;
struct rdt_membw membw;
const char *format_str;
- int (*parse_ctrlval) (void *data, struct rdt_resource *r,
- struct rdt_domain *d);
+ int (*parse_ctrlval)(struct rdt_parse_data *data,
+ struct rdt_resource *r,
+ struct rdt_domain *d);
struct list_head evt_list;
int num_rmid;
unsigned int mon_scale;
unsigned long fflags;
};
-int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d);
-int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d);
+int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d);
+int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d);
extern struct mutex rdtgroup_mutex;
@@ -521,14 +529,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
int rdtgroup_schemata_show(struct kernfs_open_file *of,
struct seq_file *s, void *v);
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
- u32 _cbm, int closid, bool exclusive);
+ unsigned long cbm, int closid, bool exclusive);
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
- u32 cbm);
+ unsigned long cbm);
enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
int rdtgroup_tasks_assigned(struct rdtgroup *r);
int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm);
bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
int rdt_pseudo_lock_init(void);
void rdt_pseudo_lock_release(void);
@@ -536,6 +544,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
int update_domains(struct rdt_resource *r, int closid);
+int closids_supported(void);
void closid_free(int closid);
int alloc_rmid(void);
void free_rmid(u32 rmid);
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
index af358ca05160..27937458c231 100644
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
@@ -64,19 +64,19 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
-int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d)
+int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d)
{
- unsigned long data;
- char *buf = _buf;
+ unsigned long bw_val;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
return -EINVAL;
}
- if (!bw_validate(buf, &data, r))
+ if (!bw_validate(data->buf, &bw_val, r))
return -EINVAL;
- d->new_ctrl = data;
+ d->new_ctrl = bw_val;
d->have_new_ctrl = true;
return 0;
@@ -123,18 +123,13 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
return true;
}
-struct rdt_cbm_parse_data {
- struct rdtgroup *rdtgrp;
- char *buf;
-};
-
/*
* Read one cache bit mask (hex). Check that it is valid for the current
* resource type.
*/
-int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
+int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d)
{
- struct rdt_cbm_parse_data *data = _data;
struct rdtgroup *rdtgrp = data->rdtgrp;
u32 cbm_val;
@@ -195,11 +190,17 @@ int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
static int parse_line(char *line, struct rdt_resource *r,
struct rdtgroup *rdtgrp)
{
- struct rdt_cbm_parse_data data;
+ struct rdt_parse_data data;
char *dom = NULL, *id;
struct rdt_domain *d;
unsigned long dom_id;
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
+ r->rid == RDT_RESOURCE_MBA) {
+ rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
+ return -EINVAL;
+ }
+
next:
if (!line || line[0] == '\0')
return 0;
@@ -403,8 +404,16 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
for_each_alloc_enabled_rdt_resource(r)
seq_printf(s, "%s:uninitialized\n", r->name);
} else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
- seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name,
- rdtgrp->plr->d->id, rdtgrp->plr->cbm);
+ if (!rdtgrp->plr->d) {
+ rdt_last_cmd_clear();
+ rdt_last_cmd_puts("Cache domain offline\n");
+ ret = -ENODEV;
+ } else {
+ seq_printf(s, "%s:%d=%x\n",
+ rdtgrp->plr->r->name,
+ rdtgrp->plr->d->id,
+ rdtgrp->plr->cbm);
+ }
} else {
closid = rdtgrp->closid;
for_each_alloc_enabled_rdt_resource(r) {
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
index 40f3903ae5d9..815b4e92522c 100644
--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/kthread.h>
#include <linux/mman.h>
+#include <linux/perf_event.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -26,6 +27,7 @@
#include <asm/intel_rdt_sched.h>
#include <asm/perf_event.h>
+#include "../../events/perf_event.h" /* For X86_CONFIG() */
#include "intel_rdt.h"
#define CREATE_TRACE_POINTS
@@ -91,7 +93,7 @@ static u64 get_prefetch_disable_bits(void)
*/
return 0xF;
case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
/*
* SDM defines bits of MSR_MISC_FEATURE_CONTROL register
* as:
@@ -106,16 +108,6 @@ static u64 get_prefetch_disable_bits(void)
return 0;
}
-/*
- * Helper to write 64bit value to MSR without tracing. Used when
- * use of the cache should be restricted and use of registers used
- * for local variables avoided.
- */
-static inline void pseudo_wrmsrl_notrace(unsigned int msr, u64 val)
-{
- __wrmsr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
-}
-
/**
* pseudo_lock_minor_get - Obtain available minor number
* @minor: Pointer to where new minor number will be stored
@@ -797,25 +789,27 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
/**
* rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
* @d: RDT domain
- * @_cbm: CBM to test
+ * @cbm: CBM to test
*
- * @d represents a cache instance and @_cbm a capacity bitmask that is
- * considered for it. Determine if @_cbm overlaps with any existing
+ * @d represents a cache instance and @cbm a capacity bitmask that is
+ * considered for it. Determine if @cbm overlaps with any existing
* pseudo-locked region on @d.
*
- * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
+ * Return: true if @cbm overlaps with pseudo-locked region on @d, false
* otherwise.
*/
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
{
- unsigned long *cbm = (unsigned long *)&_cbm;
- unsigned long *cbm_b;
unsigned int cbm_len;
+ unsigned long cbm_b;
if (d->plr) {
cbm_len = d->plr->r->cache.cbm_len;
- cbm_b = (unsigned long *)&d->plr->cbm;
- if (bitmap_intersects(cbm, cbm_b, cbm_len))
+ cbm_b = d->plr->cbm;
+ if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
return true;
}
return false;
@@ -886,31 +880,14 @@ static int measure_cycles_lat_fn(void *_plr)
struct pseudo_lock_region *plr = _plr;
unsigned long i;
u64 start, end;
-#ifdef CONFIG_KASAN
- /*
- * The registers used for local register variables are also used
- * when KASAN is active. When KASAN is active we use a regular
- * variable to ensure we always use a valid pointer to access memory.
- * The cost is that accessing this pointer, which could be in
- * cache, will be included in the measurement of memory read latency.
- */
void *mem_r;
-#else
-#ifdef CONFIG_X86_64
- register void *mem_r asm("rbx");
-#else
- register void *mem_r asm("ebx");
-#endif /* CONFIG_X86_64 */
-#endif /* CONFIG_KASAN */
local_irq_disable();
/*
- * The wrmsr call may be reordered with the assignment below it.
- * Call wrmsr as directly as possible to avoid tracing clobbering
- * local register variable used for memory pointer.
+ * Disable hardware prefetchers.
*/
- __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
- mem_r = plr->kmem;
+ wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ mem_r = READ_ONCE(plr->kmem);
/*
* Dummy execute of the time measurement to load the needed
* instructions into the L1 instruction cache.
@@ -932,157 +909,240 @@ static int measure_cycles_lat_fn(void *_plr)
return 0;
}
-static int measure_cycles_perf_fn(void *_plr)
+/*
+ * Create a perf_event_attr for the hit and miss perf events that will
+ * be used during the performance measurement. A perf_event maintains
+ * a pointer to its perf_event_attr so a unique attribute structure is
+ * created for each perf_event.
+ *
+ * The actual configuration of the event is set right before use in order
+ * to use the X86_CONFIG macro.
+ */
+static struct perf_event_attr perf_miss_attr = {
+ .type = PERF_TYPE_RAW,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .disabled = 0,
+ .exclude_user = 1,
+};
+
+static struct perf_event_attr perf_hit_attr = {
+ .type = PERF_TYPE_RAW,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .disabled = 0,
+ .exclude_user = 1,
+};
+
+struct residency_counts {
+ u64 miss_before, hits_before;
+ u64 miss_after, hits_after;
+};
+
+static int measure_residency_fn(struct perf_event_attr *miss_attr,
+ struct perf_event_attr *hit_attr,
+ struct pseudo_lock_region *plr,
+ struct residency_counts *counts)
{
- unsigned long long l3_hits = 0, l3_miss = 0;
- u64 l3_hit_bits = 0, l3_miss_bits = 0;
- struct pseudo_lock_region *plr = _plr;
- unsigned long long l2_hits, l2_miss;
- u64 l2_hit_bits, l2_miss_bits;
- unsigned long i;
-#ifdef CONFIG_KASAN
- /*
- * The registers used for local register variables are also used
- * when KASAN is active. When KASAN is active we use regular variables
- * at the cost of including cache access latency to these variables
- * in the measurements.
- */
+ u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0;
+ struct perf_event *miss_event, *hit_event;
+ int hit_pmcnum, miss_pmcnum;
unsigned int line_size;
unsigned int size;
+ unsigned long i;
void *mem_r;
-#else
- register unsigned int line_size asm("esi");
- register unsigned int size asm("edi");
-#ifdef CONFIG_X86_64
- register void *mem_r asm("rbx");
-#else
- register void *mem_r asm("ebx");
-#endif /* CONFIG_X86_64 */
-#endif /* CONFIG_KASAN */
+ u64 tmp;
+
+ miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu,
+ NULL, NULL, NULL);
+ if (IS_ERR(miss_event))
+ goto out;
+
+ hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu,
+ NULL, NULL, NULL);
+ if (IS_ERR(hit_event))
+ goto out_miss;
+
+ local_irq_disable();
+ /*
+ * Check any possible error state of events used by performing
+ * one local read.
+ */
+ if (perf_event_read_local(miss_event, &tmp, NULL, NULL)) {
+ local_irq_enable();
+ goto out_hit;
+ }
+ if (perf_event_read_local(hit_event, &tmp, NULL, NULL)) {
+ local_irq_enable();
+ goto out_hit;
+ }
+
+ /*
+ * Disable hardware prefetchers.
+ */
+ wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+
+ /* Initialize rest of local variables */
+ /*
+ * Performance event has been validated right before this with
+ * interrupts disabled - it is thus safe to read the counter index.
+ */
+ miss_pmcnum = x86_perf_rdpmc_index(miss_event);
+ hit_pmcnum = x86_perf_rdpmc_index(hit_event);
+ line_size = READ_ONCE(plr->line_size);
+ mem_r = READ_ONCE(plr->kmem);
+ size = READ_ONCE(plr->size);
+
+ /*
+ * Read counter variables twice - first to load the instructions
+ * used in L1 cache, second to capture accurate value that does not
+ * include cache misses incurred because of instruction loads.
+ */
+ rdpmcl(hit_pmcnum, hits_before);
+ rdpmcl(miss_pmcnum, miss_before);
+ /*
+ * From SDM: Performing back-to-back fast reads are not guaranteed
+ * to be monotonic.
+ * Use LFENCE to ensure all previous instructions are retired
+ * before proceeding.
+ */
+ rmb();
+ rdpmcl(hit_pmcnum, hits_before);
+ rdpmcl(miss_pmcnum, miss_before);
+ /*
+ * Use LFENCE to ensure all previous instructions are retired
+ * before proceeding.
+ */
+ rmb();
+ for (i = 0; i < size; i += line_size) {
+ /*
+ * Add a barrier to prevent speculative execution of this
+ * loop reading beyond the end of the buffer.
+ */
+ rmb();
+ asm volatile("mov (%0,%1,1), %%eax\n\t"
+ :
+ : "r" (mem_r), "r" (i)
+ : "%eax", "memory");
+ }
+ /*
+ * Use LFENCE to ensure all previous instructions are retired
+ * before proceeding.
+ */
+ rmb();
+ rdpmcl(hit_pmcnum, hits_after);
+ rdpmcl(miss_pmcnum, miss_after);
+ /*
+ * Use LFENCE to ensure all previous instructions are retired
+ * before proceeding.
+ */
+ rmb();
+ /* Re-enable hardware prefetchers */
+ wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
+ local_irq_enable();
+out_hit:
+ perf_event_release_kernel(hit_event);
+out_miss:
+ perf_event_release_kernel(miss_event);
+out:
+ /*
+ * All counts will be zero on failure.
+ */
+ counts->miss_before = miss_before;
+ counts->hits_before = hits_before;
+ counts->miss_after = miss_after;
+ counts->hits_after = hits_after;
+ return 0;
+}
+
+static int measure_l2_residency(void *_plr)
+{
+ struct pseudo_lock_region *plr = _plr;
+ struct residency_counts counts = {0};
/*
* Non-architectural event for the Goldmont Microarchitecture
* from Intel x86 Architecture Software Developer Manual (SDM):
* MEM_LOAD_UOPS_RETIRED D1H (event number)
* Umask values:
- * L1_HIT 01H
* L2_HIT 02H
- * L1_MISS 08H
* L2_MISS 10H
- *
- * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event
- * has two "no fix" errata associated with it: BDM35 and BDM100. On
- * this platform we use the following events instead:
- * L2_RQSTS 24H (Documented in https://download.01.org/perfmon/BDW/)
- * REFERENCES FFH
- * MISS 3FH
- * LONGEST_LAT_CACHE 2EH (Documented in SDM)
- * REFERENCE 4FH
- * MISS 41H
*/
-
- /*
- * Start by setting flags for IA32_PERFEVTSELx:
- * OS (Operating system mode) 0x2
- * INT (APIC interrupt enable) 0x10
- * EN (Enable counter) 0x40
- *
- * Then add the Umask value and event number to select performance
- * event.
- */
-
switch (boot_cpu_data.x86_model) {
case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
- l2_hit_bits = (0x52ULL << 16) | (0x2 << 8) | 0xd1;
- l2_miss_bits = (0x52ULL << 16) | (0x10 << 8) | 0xd1;
- break;
- case INTEL_FAM6_BROADWELL_X:
- /* On BDW the l2_hit_bits count references, not hits */
- l2_hit_bits = (0x52ULL << 16) | (0xff << 8) | 0x24;
- l2_miss_bits = (0x52ULL << 16) | (0x3f << 8) | 0x24;
- /* On BDW the l3_hit_bits count references, not hits */
- l3_hit_bits = (0x52ULL << 16) | (0x4f << 8) | 0x2e;
- l3_miss_bits = (0x52ULL << 16) | (0x41 << 8) | 0x2e;
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ perf_miss_attr.config = X86_CONFIG(.event = 0xd1,
+ .umask = 0x10);
+ perf_hit_attr.config = X86_CONFIG(.event = 0xd1,
+ .umask = 0x2);
break;
default:
goto out;
}
- local_irq_disable();
+ measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);
/*
- * Call wrmsr direcly to avoid the local register variables from
- * being overwritten due to reordering of their assignment with
- * the wrmsr calls.
+ * If a failure prevented the measurements from succeeding
+ * tracepoints will still be written and all counts will be zero.
*/
- __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
- /* Disable events and reset counters */
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0, 0x0);
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x0);
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0, 0x0);
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 1, 0x0);
- if (l3_hit_bits > 0) {
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x0);
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3, 0x0);
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 2, 0x0);
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_PERFCTR0 + 3, 0x0);
- }
- /* Set and enable the L2 counters */
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0, l2_hit_bits);
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1, l2_miss_bits);
- if (l3_hit_bits > 0) {
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2,
- l3_hit_bits);
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3,
- l3_miss_bits);
- }
- mem_r = plr->kmem;
- size = plr->size;
- line_size = plr->line_size;
- for (i = 0; i < size; i += line_size) {
- asm volatile("mov (%0,%1,1), %%eax\n\t"
- :
- : "r" (mem_r), "r" (i)
- : "%eax", "memory");
- }
+ trace_pseudo_lock_l2(counts.hits_after - counts.hits_before,
+ counts.miss_after - counts.miss_before);
+out:
+ plr->thread_done = 1;
+ wake_up_interruptible(&plr->lock_thread_wq);
+ return 0;
+}
+
+static int measure_l3_residency(void *_plr)
+{
+ struct pseudo_lock_region *plr = _plr;
+ struct residency_counts counts = {0};
+
/*
- * Call wrmsr directly (no tracing) to not influence
- * the cache access counters as they are disabled.
+ * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event
+ * has two "no fix" errata associated with it: BDM35 and BDM100. On
+ * this platform the following events are used instead:
+ * LONGEST_LAT_CACHE 2EH (Documented in SDM)
+ * REFERENCE 4FH
+ * MISS 41H
*/
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0,
- l2_hit_bits & ~(0x40ULL << 16));
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 1,
- l2_miss_bits & ~(0x40ULL << 16));
- if (l3_hit_bits > 0) {
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 2,
- l3_hit_bits & ~(0x40ULL << 16));
- pseudo_wrmsrl_notrace(MSR_ARCH_PERFMON_EVENTSEL0 + 3,
- l3_miss_bits & ~(0x40ULL << 16));
- }
- l2_hits = native_read_pmc(0);
- l2_miss = native_read_pmc(1);
- if (l3_hit_bits > 0) {
- l3_hits = native_read_pmc(2);
- l3_miss = native_read_pmc(3);
+
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_BROADWELL_X:
+ /* On BDW the hit event counts references, not hits */
+ perf_hit_attr.config = X86_CONFIG(.event = 0x2e,
+ .umask = 0x4f);
+ perf_miss_attr.config = X86_CONFIG(.event = 0x2e,
+ .umask = 0x41);
+ break;
+ default:
+ goto out;
}
- wrmsr(MSR_MISC_FEATURE_CONTROL, 0x0, 0x0);
- local_irq_enable();
+
+ measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);
/*
- * On BDW we count references and misses, need to adjust. Sometimes
- * the "hits" counter is a bit more than the references, for
- * example, x references but x + 1 hits. To not report invalid
- * hit values in this case we treat that as misses eaqual to
- * references.
+ * If a failure prevented the measurements from succeeding
+ * tracepoints will still be written and all counts will be zero.
*/
- if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X)
- l2_hits -= (l2_miss > l2_hits ? l2_hits : l2_miss);
- trace_pseudo_lock_l2(l2_hits, l2_miss);
- if (l3_hit_bits > 0) {
- if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X)
- l3_hits -= (l3_miss > l3_hits ? l3_hits : l3_miss);
- trace_pseudo_lock_l3(l3_hits, l3_miss);
+
+ counts.miss_after -= counts.miss_before;
+ if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X) {
+ /*
+ * On BDW references and misses are counted, need to adjust.
+ * Sometimes the "hits" counter is a bit more than the
+ * references, for example, x references but x + 1 hits.
+ * To not report invalid hit values in this case we treat
+ * that as misses equal to references.
+ */
+ /* First compute the number of cache references measured */
+ counts.hits_after -= counts.hits_before;
+ /* Next convert references to cache hits */
+ counts.hits_after -= min(counts.miss_after, counts.hits_after);
+ } else {
+ counts.hits_after -= counts.hits_before;
}
+ trace_pseudo_lock_l3(counts.hits_after, counts.miss_after);
out:
plr->thread_done = 1;
wake_up_interruptible(&plr->lock_thread_wq);
@@ -1114,6 +1174,11 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
goto out;
}
+ if (!plr->d) {
+ ret = -ENODEV;
+ goto out;
+ }
+
plr->thread_done = 0;
cpu = cpumask_first(&plr->d->cpu_mask);
if (!cpu_online(cpu)) {
@@ -1121,13 +1186,20 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
goto out;
}
+ plr->cpu = cpu;
+
if (sel == 1)
thread = kthread_create_on_node(measure_cycles_lat_fn, plr,
cpu_to_node(cpu),
"pseudo_lock_measure/%u",
cpu);
else if (sel == 2)
- thread = kthread_create_on_node(measure_cycles_perf_fn, plr,
+ thread = kthread_create_on_node(measure_l2_residency, plr,
+ cpu_to_node(cpu),
+ "pseudo_lock_measure/%u",
+ cpu);
+ else if (sel == 3)
+ thread = kthread_create_on_node(measure_l3_residency, plr,
cpu_to_node(cpu),
"pseudo_lock_measure/%u",
cpu);
@@ -1171,7 +1243,7 @@ static ssize_t pseudo_lock_measure_trigger(struct file *file,
buf[buf_size] = '\0';
ret = kstrtoint(buf, 10, &sel);
if (ret == 0) {
- if (sel != 1)
+ if (sel != 1 && sel != 2 && sel != 3)
return -EINVAL;
ret = debugfs_file_get(file->f_path.dentry);
if (ret)
@@ -1427,6 +1499,11 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
plr = rdtgrp->plr;
+ if (!plr->d) {
+ mutex_unlock(&rdtgroup_mutex);
+ return -ENODEV;
+ }
+
/*
* Task is required to run with affinity to the cpus associated
* with the pseudo-locked region. If this is not the case the task
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index b799c00bef09..f27b8115ffa2 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -97,6 +97,12 @@ void rdt_last_cmd_printf(const char *fmt, ...)
* limited as the number of resources grows.
*/
static int closid_free_map;
+static int closid_free_map_len;
+
+int closids_supported(void)
+{
+ return closid_free_map_len;
+}
static void closid_init(void)
{
@@ -111,6 +117,7 @@ static void closid_init(void)
/* CLOSID 0 is always reserved for the default group */
closid_free_map &= ~1;
+ closid_free_map_len = rdt_min_closid;
}
static int closid_alloc(void)
@@ -261,17 +268,27 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
struct seq_file *s, void *v)
{
struct rdtgroup *rdtgrp;
+ struct cpumask *mask;
int ret = 0;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp) {
- if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
- seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
- cpumask_pr_args(&rdtgrp->plr->d->cpu_mask));
- else
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+ if (!rdtgrp->plr->d) {
+ rdt_last_cmd_clear();
+ rdt_last_cmd_puts("Cache domain offline\n");
+ ret = -ENODEV;
+ } else {
+ mask = &rdtgrp->plr->d->cpu_mask;
+ seq_printf(s, is_cpu_list(of) ?
+ "%*pbl\n" : "%*pb\n",
+ cpumask_pr_args(mask));
+ }
+ } else {
seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
cpumask_pr_args(&rdtgrp->cpu_mask));
+ }
} else {
ret = -ENOENT;
}
@@ -802,7 +819,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
sw_shareable = 0;
exclusive = 0;
seq_printf(seq, "%d=", dom->id);
- for (i = 0; i < r->num_closid; i++, ctrl++) {
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
if (!closid_allocated(i))
continue;
mode = rdtgroup_mode_by_closid(i);
@@ -954,7 +971,78 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
}
/**
- * rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
+ * rdt_cdp_peer_get - Retrieve CDP peer if it exists
+ * @r: RDT resource to which RDT domain @d belongs
+ * @d: Cache instance for which a CDP peer is requested
+ * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
+ * Used to return the result.
+ * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
+ * Used to return the result.
+ *
+ * RDT resources are managed independently and by extension the RDT domains
+ * (RDT resource instances) are managed independently also. The Code and
+ * Data Prioritization (CDP) RDT resources, while managed independently,
+ * could refer to the same underlying hardware. For example,
+ * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
+ *
+ * When provided with an RDT resource @r and an instance of that RDT
+ * resource @d rdt_cdp_peer_get() will return if there is a peer RDT
+ * resource and the exact instance that shares the same hardware.
+ *
+ * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
+ * If a CDP peer was found, @r_cdp will point to the peer RDT resource
+ * and @d_cdp will point to the peer RDT domain.
+ */
+static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
+ struct rdt_resource **r_cdp,
+ struct rdt_domain **d_cdp)
+{
+ struct rdt_resource *_r_cdp = NULL;
+ struct rdt_domain *_d_cdp = NULL;
+ int ret = 0;
+
+ switch (r->rid) {
+ case RDT_RESOURCE_L3DATA:
+ _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE];
+ break;
+ case RDT_RESOURCE_L3CODE:
+ _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA];
+ break;
+ case RDT_RESOURCE_L2DATA:
+ _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE];
+ break;
+ case RDT_RESOURCE_L2CODE:
+ _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA];
+ break;
+ default:
+ ret = -ENOENT;
+ goto out;
+ }
+
+ /*
+ * When a new CPU comes online and CDP is enabled then the new
+ * RDT domains (if any) associated with both CDP RDT resources
+ * are added in the same CPU online routine while the
+ * rdtgroup_mutex is held. It should thus not happen for one
+ * RDT domain to exist and be associated with its RDT CDP
+ * resource but there is no RDT domain associated with the
+ * peer RDT CDP resource. Hence the WARN.
+ */
+ _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
+ if (WARN_ON(!_d_cdp)) {
+ _r_cdp = NULL;
+ ret = -EINVAL;
+ }
+
+out:
+ *r_cdp = _r_cdp;
+ *d_cdp = _d_cdp;
+
+ return ret;
+}
+
+/**
+ * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
* @r: Resource to which domain instance @d belongs.
* @d: The domain instance for which @closid is being tested.
* @cbm: Capacity bitmask being tested.
@@ -968,33 +1056,34 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
* is false then overlaps with any resource group or hardware entities
* will be considered.
*
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
* Return: false if CBM does not overlap, true if it does.
*/
-bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
- u32 _cbm, int closid, bool exclusive)
+static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+ unsigned long cbm, int closid, bool exclusive)
{
- unsigned long *cbm = (unsigned long *)&_cbm;
- unsigned long *ctrl_b;
enum rdtgrp_mode mode;
+ unsigned long ctrl_b;
u32 *ctrl;
int i;
/* Check for any overlap with regions used by hardware directly */
if (!exclusive) {
- if (bitmap_intersects(cbm,
- (unsigned long *)&r->cache.shareable_bits,
- r->cache.cbm_len))
+ ctrl_b = r->cache.shareable_bits;
+ if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
return true;
}
/* Check for overlap with other resource groups */
ctrl = d->ctrl_val;
- for (i = 0; i < r->num_closid; i++, ctrl++) {
- ctrl_b = (unsigned long *)ctrl;
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
+ ctrl_b = *ctrl;
mode = rdtgroup_mode_by_closid(i);
if (closid_allocated(i) && i != closid &&
mode != RDT_MODE_PSEUDO_LOCKSETUP) {
- if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
+ if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
if (exclusive) {
if (mode == RDT_MODE_EXCLUSIVE)
return true;
@@ -1009,6 +1098,41 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
}
/**
+ * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
+ * @r: Resource to which domain instance @d belongs.
+ * @d: The domain instance for which @closid is being tested.
+ * @cbm: Capacity bitmask being tested.
+ * @closid: Intended closid for @cbm.
+ * @exclusive: Only check if overlaps with exclusive resource groups
+ *
+ * Resources that can be allocated using a CBM can use the CBM to control
+ * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
+ * for overlap. Overlap test is not limited to the specific resource for
+ * which the CBM is intended though - when dealing with CDP resources that
+ * share the underlying hardware the overlap check should be performed on
+ * the CDP resource sharing the hardware also.
+ *
+ * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
+ * overlap test.
+ *
+ * Return: true if CBM overlap detected, false if there is no overlap
+ */
+bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+ unsigned long cbm, int closid, bool exclusive)
+{
+ struct rdt_resource *r_cdp;
+ struct rdt_domain *d_cdp;
+
+ if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive))
+ return true;
+
+ if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0)
+ return false;
+
+ return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive);
+}
+
+/**
* rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
*
* An exclusive resource group implies that there should be no sharing of
@@ -1024,16 +1148,27 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
{
int closid = rdtgrp->closid;
struct rdt_resource *r;
+ bool has_cache = false;
struct rdt_domain *d;
for_each_alloc_enabled_rdt_resource(r) {
+ if (r->rid == RDT_RESOURCE_MBA)
+ continue;
+ has_cache = true;
list_for_each_entry(d, &r->domains, list) {
if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
- rdtgrp->closid, false))
+ rdtgrp->closid, false)) {
+ rdt_last_cmd_puts("schemata overlaps\n");
return false;
+ }
}
}
+ if (!has_cache) {
+ rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n");
+ return false;
+ }
+
return true;
}
@@ -1085,7 +1220,6 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
rdtgrp->mode = RDT_MODE_SHAREABLE;
} else if (!strcmp(buf, "exclusive")) {
if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
- rdt_last_cmd_printf("schemata overlaps\n");
ret = -EINVAL;
goto out;
}
@@ -1121,15 +1255,18 @@ out:
* computed by first dividing the total cache size by the CBM length to
* determine how many bytes each bit in the bitmask represents. The result
* is multiplied with the number of bits set in the bitmask.
+ *
+ * @cbm is unsigned long, even if only 32 bits are used to make the
+ * bitmap functions work correctly.
*/
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
- struct rdt_domain *d, u32 cbm)
+ struct rdt_domain *d, unsigned long cbm)
{
struct cpu_cacheinfo *ci;
unsigned int size = 0;
int num_b, i;
- num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
+ num_b = bitmap_weight(&cbm, r->cache.cbm_len);
ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
for (i = 0; i < ci->num_leaves; i++) {
if (ci->info_list[i].level == r->cache_level) {
@@ -1155,8 +1292,9 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
struct rdt_resource *r;
struct rdt_domain *d;
unsigned int size;
- bool sep = false;
- u32 cbm;
+ int ret = 0;
+ bool sep;
+ u32 ctrl;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (!rdtgrp) {
@@ -1165,15 +1303,23 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
}
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
- seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name);
- size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
- rdtgrp->plr->d,
- rdtgrp->plr->cbm);
- seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
+ if (!rdtgrp->plr->d) {
+ rdt_last_cmd_clear();
+ rdt_last_cmd_puts("Cache domain offline\n");
+ ret = -ENODEV;
+ } else {
+ seq_printf(s, "%*s:", max_name_width,
+ rdtgrp->plr->r->name);
+ size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
+ rdtgrp->plr->d,
+ rdtgrp->plr->cbm);
+ seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
+ }
goto out;
}
for_each_alloc_enabled_rdt_resource(r) {
+ sep = false;
seq_printf(s, "%*s:", max_name_width, r->name);
list_for_each_entry(d, &r->domains, list) {
if (sep)
@@ -1181,8 +1327,13 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
size = 0;
} else {
- cbm = d->ctrl_val[rdtgrp->closid];
- size = rdtgroup_cbm_to_size(r, d, cbm);
+ ctrl = (!is_mba_sc(r) ?
+ d->ctrl_val[rdtgrp->closid] :
+ d->mbps_val[rdtgrp->closid]);
+ if (r->rid == RDT_RESOURCE_MBA)
+ size = ctrl;
+ else
+ size = rdtgroup_cbm_to_size(r, d, ctrl);
}
seq_printf(s, "%d=%u", d->id, size);
sep = true;
@@ -1193,7 +1344,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
out:
rdtgroup_kn_unlock(of->kn);
- return 0;
+ return ret;
}
/* rdtgroup information files for one cache resource. */
@@ -2327,28 +2478,48 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
*/
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
{
+ struct rdt_resource *r_cdp = NULL;
+ struct rdt_domain *d_cdp = NULL;
u32 used_b = 0, unused_b = 0;
u32 closid = rdtgrp->closid;
struct rdt_resource *r;
+ unsigned long tmp_cbm;
enum rdtgrp_mode mode;
struct rdt_domain *d;
+ u32 peer_ctl, *ctrl;
int i, ret;
- u32 *ctrl;
for_each_alloc_enabled_rdt_resource(r) {
+ /*
+ * Only initialize default allocations for CBM cache
+ * resources
+ */
+ if (r->rid == RDT_RESOURCE_MBA)
+ continue;
list_for_each_entry(d, &r->domains, list) {
+ rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
d->have_new_ctrl = false;
d->new_ctrl = r->cache.shareable_bits;
used_b = r->cache.shareable_bits;
ctrl = d->ctrl_val;
- for (i = 0; i < r->num_closid; i++, ctrl++) {
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
if (closid_allocated(i) && i != closid) {
mode = rdtgroup_mode_by_closid(i);
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
break;
- used_b |= *ctrl;
+ /*
+ * If CDP is active include peer
+ * domain's usage to ensure there
+ * is no overlap with an exclusive
+ * group.
+ */
+ if (d_cdp)
+ peer_ctl = d_cdp->ctrl_val[i];
+ else
+ peer_ctl = 0;
+ used_b |= *ctrl | peer_ctl;
if (mode == RDT_MODE_SHAREABLE)
- d->new_ctrl |= *ctrl;
+ d->new_ctrl |= *ctrl | peer_ctl;
}
}
if (d->plr && d->plr->cbm > 0)
@@ -2361,9 +2532,14 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
* modify the CBM based on system availability.
*/
cbm_ensure_valid(&d->new_ctrl, r);
- if (bitmap_weight((unsigned long *) &d->new_ctrl,
- r->cache.cbm_len) <
- r->cache.min_cbm_bits) {
+ /*
+ * Assign the u32 CBM to an unsigned long to ensure
+ * that bitmap_weight() does not access out-of-bound
+ * memory.
+ */
+ tmp_cbm = d->new_ctrl;
+ if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
+ r->cache.min_cbm_bits) {
rdt_last_cmd_printf("no space on %s:%d\n",
r->name, d->id);
return -ENOSPC;
@@ -2373,6 +2549,12 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
}
for_each_alloc_enabled_rdt_resource(r) {
+ /*
+ * Only initialize default allocations for CBM cache
+ * resources
+ */
+ if (r->rid == RDT_RESOURCE_MBA)
+ continue;
ret = update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("failed to initialize allocations\n");
@@ -2760,6 +2942,13 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
{
if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
seq_puts(seq, ",cdp");
+
+ if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
+ seq_puts(seq, ",cdpl2");
+
+ if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA]))
+ seq_puts(seq, ",mba_MBps");
+
return 0;
}
diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
index 97685a0c3175..27f394ac983f 100644
--- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
@@ -38,9 +38,6 @@ static struct mce_log_buffer mcelog = {
static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
-/* User mode helper program triggered by machine check event */
-extern char mce_helper[128];
-
static int dev_mce_log(struct notifier_block *nb, unsigned long val,
void *data)
{
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index c805a06e14c3..1fc424c40a31 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -108,6 +108,9 @@ static void setup_inj_struct(struct mce *m)
memset(m, 0, sizeof(struct mce));
m->cpuvendor = boot_cpu_data.x86_vendor;
+ m->time = ktime_get_real_seconds();
+ m->cpuid = cpuid_eax(1);
+ m->microcode = boot_cpu_data.microcode;
}
/* Update fake mce registers on current CPU. */
@@ -576,6 +579,9 @@ static int inj_bank_set(void *data, u64 val)
m->bank = val;
do_inject();
+ /* Reset injection struct */
+ setup_inj_struct(&i_mce);
+
return 0;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index f34d89c01edc..44396d521987 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -336,7 +336,8 @@ int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) =
void __init mcheck_vendor_init_severity(void)
{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
mce_severity = mce_severity_amd;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 953b3ce92dcc..8cb3c02980cf 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -270,7 +270,7 @@ static void print_mce(struct mce *m)
{
__print_mce(m);
- if (m->cpuvendor != X86_VENDOR_AMD)
+ if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
}
@@ -508,9 +508,9 @@ static int mce_usable_address(struct mce *m)
bool mce_is_memory_error(struct mce *m)
{
- if (m->cpuvendor == X86_VENDOR_AMD) {
+ if (m->cpuvendor == X86_VENDOR_AMD ||
+ m->cpuvendor == X86_VENDOR_HYGON) {
return amd_mce_is_memory_error(m);
-
} else if (m->cpuvendor == X86_VENDOR_INTEL) {
/*
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes
@@ -539,6 +539,9 @@ static bool mce_is_correctable(struct mce *m)
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
return false;
+ if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
+ return false;
+
if (m->status & MCI_STATUS_UC)
return false;
@@ -1315,7 +1318,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
local_irq_disable();
ist_end_non_atomic();
} else {
- if (!fixup_exception(regs, X86_TRAP_MC))
+ if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0))
mce_panic("Failed kernel mode recovery", &m, NULL);
}
@@ -1705,7 +1708,7 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
*/
static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
{
- if (c->x86_vendor == X86_VENDOR_AMD) {
+ if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
@@ -1746,6 +1749,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
mce_amd_feature_init(c);
break;
}
+
+ case X86_VENDOR_HYGON:
+ mce_hygon_feature_init(c);
+ break;
+
case X86_VENDOR_CENTAUR:
mce_centaur_feature_init(c);
break;
@@ -1971,12 +1979,14 @@ static void mce_disable_error_reporting(void)
static void vendor_disable_error_reporting(void)
{
/*
- * Don't clear on Intel or AMD CPUs. Some of these MSRs are socket-wide.
+ * Don't clear on Intel or AMD or Hygon CPUs. Some of these MSRs
+ * are socket-wide.
* Disabling them for just a single offlined CPU is bad, since it will
* inhibit reporting for all shared resources on the socket like the
* last level cache (LLC), the integrated memory controller (iMC), etc.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
return;
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index ad12733f6058..1c72f3819eb1 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -199,6 +199,16 @@ static unsigned long hv_get_tsc_khz(void)
return freq / 1000;
}
+#if defined(CONFIG_SMP) && IS_ENABLED(CONFIG_HYPERV)
+static void __init hv_smp_prepare_boot_cpu(void)
+{
+ native_smp_prepare_boot_cpu();
+#if defined(CONFIG_X86_64) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+ hv_init_spinlocks();
+#endif
+}
+#endif
+
static void __init ms_hyperv_init_platform(void)
{
int hv_host_info_eax;
@@ -303,6 +313,10 @@ static void __init ms_hyperv_init_platform(void)
if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE)
alloc_intr_gate(HYPERV_STIMER0_VECTOR,
hv_stimer0_callback_vector);
+
+# ifdef CONFIG_SMP
+ smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
+# endif
#endif
}
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 765afd599039..3668c5df90c6 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -831,7 +831,8 @@ int __init amd_special_default_mtrr(void)
{
u32 l, h;
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return 0;
if (boot_cpu_data.x86 < 0xf)
return 0;
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 9a19c800fe40..507039c20128 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -127,7 +127,7 @@ static void __init set_num_var_ranges(void)
if (use_intel())
rdmsr(MSR_MTRRcap, config, dummy);
- else if (is_cpu(AMD))
+ else if (is_cpu(AMD) || is_cpu(HYGON))
config = 2;
else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
config = 8;
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index d389083330c5..9556930cd8c1 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -46,6 +46,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
{
/* returns the bit offset of the performance counter register */
switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
if (msr >= MSR_F15H_PERF_CTR)
return (msr - MSR_F15H_PERF_CTR) >> 1;
@@ -74,6 +75,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
{
/* returns the bit offset of the event selection register */
switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
if (msr >= MSR_F15H_PERF_CTL)
return (msr - MSR_F15H_PERF_CTL) >> 1;
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 8e005329648b..d9ab49bed8af 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -97,14 +97,14 @@ static void __init vmware_sched_clock_setup(void)
d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul,
d->cyc2ns_shift);
- pv_time_ops.sched_clock = vmware_sched_clock;
+ pv_ops.time.sched_clock = vmware_sched_clock;
pr_info("using sched offset of %llu ns\n", d->cyc2ns_offset);
}
static void __init vmware_paravirt_ops_setup(void)
{
pv_info.name = "VMware hypervisor";
- pv_cpu_ops.io_delay = paravirt_nop;
+ pv_ops.cpu.io_delay = paravirt_nop;
if (vmware_tsc_khz && vmw_sched_clock)
vmware_sched_clock_setup();
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index 4f2e0778feac..eb8ab3915268 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -11,40 +11,62 @@
#include <linux/uaccess.h>
#include <linux/io.h>
-/**
- * copy_oldmem_page - copy one page from "oldmem"
- * @pfn: page frame number to be copied
- * @buf: target memory address for the copy; this can be in kernel address
- * space or user address space (see @userbuf)
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page (based on pfn) to begin the copy
- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
- * otherwise @buf is in kernel address space, use memcpy().
- *
- * Copy a page from "oldmem". For this page, there is no pte mapped
- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+ unsigned long offset, int userbuf,
+ bool encrypted)
{
void *vaddr;
if (!csize)
return 0;
- vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (encrypted)
+ vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, PAGE_SIZE);
+ else
+ vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
+
if (!vaddr)
return -ENOMEM;
if (userbuf) {
- if (copy_to_user(buf, vaddr + offset, csize)) {
- iounmap(vaddr);
+ if (copy_to_user((void __user *)buf, vaddr + offset, csize)) {
+ iounmap((void __iomem *)vaddr);
return -EFAULT;
}
} else
memcpy(buf, vaddr + offset, csize);
set_iounmap_nonlazy();
- iounmap(vaddr);
+ iounmap((void __iomem *)vaddr);
return csize;
}
+
+/**
+ * copy_oldmem_page - copy one page of memory
+ * @pfn: page frame number to be copied
+ * @buf: target memory address for the copy; this can be in kernel address
+ * space or user address space (see @userbuf)
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page (based on pfn) to begin the copy
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+ * Copy a page from the old kernel's memory. For this page, there is no pte
+ * mapped in the current kernel. We stitch up a pte, similar to kmap_atomic.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
+ unsigned long offset, int userbuf)
+{
+ return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false);
+}
+
+/**
+ * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the
+ * memory with the encryption mask set to accomodate kdump on SME-enabled
+ * machines.
+ */
+ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
+ unsigned long offset, int userbuf)
+{
+ return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true);
+}
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 23f1691670b6..61a949d84dfa 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -314,7 +314,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
* thread's fpu state, reconstruct fxstate from the fsave
* header. Validate and sanitize the copied state.
*/
- struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env;
int err = 0;
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index ec6fefbfd3c0..76fa3b836598 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -37,6 +37,7 @@ asmlinkage __visible void __init i386_start_kernel(void)
cr4_init_shadow();
sanitize_boot_params(&boot_params);
+ x86_verify_bootdata_version();
x86_early_init_platform_quirks();
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 8047379e575a..5dc377dc9d7b 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -35,6 +35,7 @@
#include <asm/bootparam_utils.h>
#include <asm/microcode.h>
#include <asm/kasan.h>
+#include <asm/fixmap.h>
/*
* Manage page tables very early on.
@@ -112,6 +113,7 @@ static bool __head check_la57_support(unsigned long physaddr)
unsigned long __head __startup_64(unsigned long physaddr,
struct boot_params *bp)
{
+ unsigned long vaddr, vaddr_end;
unsigned long load_delta, *p;
unsigned long pgtable_flags;
pgdval_t *pgd;
@@ -165,7 +167,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
pud[511] += load_delta;
pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
- pmd[506] += load_delta;
+ for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
+ pmd[i] += load_delta;
/*
* Set up the identity mapping for the switchover. These
@@ -235,6 +238,21 @@ unsigned long __head __startup_64(unsigned long physaddr,
sme_encrypt_kernel(bp);
/*
+ * Clear the memory encryption mask from the .bss..decrypted section.
+ * The bss section will be memset to zero later in the initialization so
+ * there is no need to zero it after changing the memory encryption
+ * attribute.
+ */
+ if (mem_encrypt_active()) {
+ vaddr = (unsigned long)__start_bss_decrypted;
+ vaddr_end = (unsigned long)__end_bss_decrypted;
+ for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
+ i = pmd_index(vaddr);
+ pmd[i] -= sme_get_me_mask();
+ }
+ }
+
+ /*
* Return the SME encryption mask (if SME is active) to be used as a
* modifier for the initial pgdir entry programmed into CR3.
*/
@@ -439,6 +457,8 @@ void __init x86_64_start_reservations(char *real_mode_data)
if (!boot_params.hdr.version)
copy_bootdata(__va(real_mode_data));
+ x86_verify_bootdata_version();
+
x86_early_init_platform_quirks();
switch (boot_params.hdr.hardware_subarch) {
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 15ebc2fc166e..747c758f67b7 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -24,8 +24,9 @@
#include "../entry/calling.h"
#include <asm/export.h>
#include <asm/nospec-branch.h>
+#include <asm/fixmap.h>
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
#include <asm/asm-offsets.h>
#include <asm/paravirt.h>
#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
@@ -445,13 +446,20 @@ NEXT_PAGE(level2_kernel_pgt)
KERNEL_IMAGE_SIZE/PMD_SIZE)
NEXT_PAGE(level2_fixmap_pgt)
- .fill 506,8,0
- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
- .fill 5,8,0
+ .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
+ pgtno = 0
+ .rept (FIXMAP_PMD_NUM)
+ .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
+ + _PAGE_TABLE_NOENC;
+ pgtno = pgtno + 1
+ .endr
+ /* 6 MB reserved space + a 2MB hole */
+ .fill 4,8,0
NEXT_PAGE(level1_fixmap_pgt)
+ .rept (FIXMAP_PMD_NUM)
.fill 512,8,0
+ .endr
#undef PMDS
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index eeea935e9bb5..aac0c1f7e354 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -42,55 +42,40 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
void *(*poker)(void *, const void *, size_t),
int init)
{
- union jump_code_union code;
+ union jump_code_union jmp;
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
+ const void *expect, *code;
+ int line;
+
+ jmp.jump = 0xe9;
+ jmp.offset = jump_entry_target(entry) -
+ (jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
if (early_boot_irqs_disabled)
poker = text_poke_early;
if (type == JUMP_LABEL_JMP) {
if (init) {
- /*
- * Jump label is enabled for the first time.
- * So we expect a default_nop...
- */
- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
- != 0))
- bug_at((void *)entry->code, __LINE__);
+ expect = default_nop; line = __LINE__;
} else {
- /*
- * ...otherwise expect an ideal_nop. Otherwise
- * something went horribly wrong.
- */
- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
- != 0))
- bug_at((void *)entry->code, __LINE__);
+ expect = ideal_nop; line = __LINE__;
}
- code.jump = 0xe9;
- code.offset = entry->target -
- (entry->code + JUMP_LABEL_NOP_SIZE);
+ code = &jmp.code;
} else {
- /*
- * We are disabling this jump label. If it is not what
- * we think it is, then something must have gone wrong.
- * If this is the first initialization call, then we
- * are converting the default nop to the ideal nop.
- */
if (init) {
- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
- bug_at((void *)entry->code, __LINE__);
+ expect = default_nop; line = __LINE__;
} else {
- code.jump = 0xe9;
- code.offset = entry->target -
- (entry->code + JUMP_LABEL_NOP_SIZE);
- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
- bug_at((void *)entry->code, __LINE__);
+ expect = &jmp.code; line = __LINE__;
}
- memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
+
+ code = ideal_nop;
}
+ if (memcmp((void *)jump_entry_code(entry), expect, JUMP_LABEL_NOP_SIZE))
+ bug_at((void *)jump_entry_code(entry), line);
+
/*
* Make text_poke_bp() a default fallback poker.
*
@@ -99,11 +84,14 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
* always nop being the 'currently valid' instruction
*
*/
- if (poker)
- (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
- else
- text_poke_bp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE,
- (void *)entry->code + JUMP_LABEL_NOP_SIZE);
+ if (poker) {
+ (*poker)((void *)jump_entry_code(entry), code,
+ JUMP_LABEL_NOP_SIZE);
+ return;
+ }
+
+ text_poke_bp((void *)jump_entry_code(entry), code, JUMP_LABEL_NOP_SIZE,
+ (void *)jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
}
void arch_jump_label_transform(struct jump_entry *entry,
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index b0d1e81c96bb..c33b06f5faa4 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1020,64 +1020,18 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
-
- /*
- * In case the user-specified fault handler returned
- * zero, try to fix up.
- */
- if (fixup_exception(regs, trapnr))
- return 1;
-
- /*
- * fixup routine could not handle it,
- * Let do_page_fault() fix it.
- */
}
return 0;
}
NOKPROBE_SYMBOL(kprobe_fault_handler);
-/*
- * Wrapper routine for handling exceptions.
- */
-int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
- void *data)
-{
- struct die_args *args = data;
- int ret = NOTIFY_DONE;
-
- if (args->regs && user_mode(args->regs))
- return ret;
-
- if (val == DIE_GPF) {
- /*
- * To be potentially processing a kprobe fault and to
- * trust the result from kprobe_running(), we have
- * be non-preemptible.
- */
- if (!preemptible() && kprobe_running() &&
- kprobe_fault_handler(args->regs, args->trapnr))
- ret = NOTIFY_STOP;
- }
- return ret;
-}
-NOKPROBE_SYMBOL(kprobe_exceptions_notify);
-
bool arch_within_kprobe_blacklist(unsigned long addr)
{
- bool is_in_entry_trampoline_section = false;
-
-#ifdef CONFIG_X86_64
- is_in_entry_trampoline_section =
- (addr >= (unsigned long)__entry_trampoline_start &&
- addr < (unsigned long)__entry_trampoline_end);
-#endif
return (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end) ||
(addr >= (unsigned long)__entry_text_start &&
- addr < (unsigned long)__entry_text_end) ||
- is_in_entry_trampoline_section;
+ addr < (unsigned long)__entry_text_end);
}
int __init arch_init_kprobes(void)
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index eaf02f2e7300..40b16b270656 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -179,7 +179,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
opt_pre_handler(&op->kp, regs);
__this_cpu_write(current_kprobe, NULL);
}
- preempt_enable_no_resched();
+ preempt_enable();
}
NOKPROBE_SYMBOL(optimized_callback);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index d9b71924c23c..ba4bfb7f6a36 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -283,7 +283,7 @@ static void __init paravirt_ops_setup(void)
pv_info.name = "KVM";
if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
- pv_cpu_ops.io_delay = kvm_io_delay;
+ pv_ops.cpu.io_delay = kvm_io_delay;
#ifdef CONFIG_X86_IO_APIC
no_timer_check = 1;
@@ -632,14 +632,14 @@ static void __init kvm_guest_init(void)
if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
has_steal_clock = 1;
- pv_time_ops.steal_clock = kvm_steal_clock;
+ pv_ops.time.steal_clock = kvm_steal_clock;
}
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
- pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
- pv_mmu_ops.tlb_remove_table = tlb_remove_table;
+ pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
+ pv_ops.mmu.tlb_remove_table = tlb_remove_table;
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -850,13 +850,14 @@ void __init kvm_spinlock_init(void)
return;
__pv_init_lock_hash();
- pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
- pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
- pv_lock_ops.wait = kvm_wait;
- pv_lock_ops.kick = kvm_kick_cpu;
+ pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+ pv_ops.lock.queued_spin_unlock =
+ PV_CALLEE_SAVE(__pv_queued_spin_unlock);
+ pv_ops.lock.wait = kvm_wait;
+ pv_ops.lock.kick = kvm_kick_cpu;
if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
- pv_lock_ops.vcpu_is_preempted =
+ pv_ops.lock.vcpu_is_preempted =
PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
}
}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1e6764648af3..30084ecaa20f 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -28,6 +28,7 @@
#include <linux/sched/clock.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/set_memory.h>
#include <asm/hypervisor.h>
#include <asm/mem_encrypt.h>
@@ -61,9 +62,10 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
(PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
static struct pvclock_vsyscall_time_info
- hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE);
-static struct pvclock_wall_clock wall_clock;
+ hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
+static struct pvclock_wall_clock wall_clock __bss_decrypted;
static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+static struct pvclock_vsyscall_time_info *hvclock_mem;
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
{
@@ -116,13 +118,13 @@ static u64 kvm_sched_clock_read(void)
static inline void kvm_sched_clock_init(bool stable)
{
if (!stable) {
- pv_time_ops.sched_clock = kvm_clock_read;
+ pv_ops.time.sched_clock = kvm_clock_read;
clear_sched_clock_stable();
return;
}
kvm_sched_clock_offset = kvm_clock_read();
- pv_time_ops.sched_clock = kvm_sched_clock_read;
+ pv_ops.time.sched_clock = kvm_sched_clock_read;
pr_info("kvm-clock: using sched offset of %llu cycles",
kvm_sched_clock_offset);
@@ -236,6 +238,45 @@ static void kvm_shutdown(void)
native_machine_shutdown();
}
+static void __init kvmclock_init_mem(void)
+{
+ unsigned long ncpus;
+ unsigned int order;
+ struct page *p;
+ int r;
+
+ if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus())
+ return;
+
+ ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE;
+ order = get_order(ncpus * sizeof(*hvclock_mem));
+
+ p = alloc_pages(GFP_KERNEL, order);
+ if (!p) {
+ pr_warn("%s: failed to alloc %d pages", __func__, (1U << order));
+ return;
+ }
+
+ hvclock_mem = page_address(p);
+
+ /*
+ * hvclock is shared between the guest and the hypervisor, must
+ * be mapped decrypted.
+ */
+ if (sev_active()) {
+ r = set_memory_decrypted((unsigned long) hvclock_mem,
+ 1UL << order);
+ if (r) {
+ __free_pages(p, order);
+ hvclock_mem = NULL;
+ pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n");
+ return;
+ }
+ }
+
+ memset(hvclock_mem, 0, PAGE_SIZE << order);
+}
+
static int __init kvm_setup_vsyscall_timeinfo(void)
{
#ifdef CONFIG_X86_64
@@ -250,6 +291,9 @@ static int __init kvm_setup_vsyscall_timeinfo(void)
kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
#endif
+
+ kvmclock_init_mem();
+
return 0;
}
early_initcall(kvm_setup_vsyscall_timeinfo);
@@ -269,8 +313,10 @@ static int kvmclock_setup_percpu(unsigned int cpu)
/* Use the static page for the first CPUs, allocate otherwise */
if (cpu < HVC_BOOT_ARRAY_SIZE)
p = &hv_clock_boot[cpu];
+ else if (hvclock_mem)
+ p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE;
else
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ return -ENOMEM;
per_cpu(hv_clock_per_cpu, cpu) = p;
return p ? 0 : -ENOMEM;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 733e6ace0fa4..ab18e0884dc6 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -273,7 +273,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
map_ldt_struct_to_user(mm);
va = (unsigned long)ldt_slot_va(slot);
- flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
+ flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false);
ldt->slot = slot;
return 0;
diff --git a/arch/x86/kernel/macros.S b/arch/x86/kernel/macros.S
new file mode 100644
index 000000000000..161c95059044
--- /dev/null
+++ b/arch/x86/kernel/macros.S
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file includes headers whose assembly part includes macros which are
+ * commonly used. The macros are precompiled into assmebly file which is later
+ * assembled together with each compiled file.
+ */
+
+#include <linux/compiler.h>
+#include <asm/refcount.h>
+#include <asm/alternative-asm.h>
+#include <asm/bug.h>
+#include <asm/paravirt.h>
+#include <asm/asm.h>
+#include <asm/cpufeature.h>
+#include <asm/jump_label.h>
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index f58336af095c..b052e883dd8c 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -201,6 +201,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
goto overflow;
#endif
break;
+ case R_X86_64_PC64:
+ if (*(u64 *)loc != 0)
+ goto invalid_relocation;
+ val -= (u64)loc;
+ *(u64 *)loc = val;
+ break;
default:
pr_err("%s: Unknown rela relocation: %llu\n",
me->name, ELF64_R_TYPE(rel[i].r_info));
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 71f2d1125ec0..4f75d0cf6305 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -17,7 +17,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
bool pv_is_native_spin_unlock(void)
{
- return pv_lock_ops.queued_spin_unlock.func ==
+ return pv_ops.lock.queued_spin_unlock.func ==
__raw_callee_save___native_queued_spin_unlock;
}
@@ -29,17 +29,6 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
bool pv_is_native_vcpu_is_preempted(void)
{
- return pv_lock_ops.vcpu_is_preempted.func ==
+ return pv_ops.lock.vcpu_is_preempted.func ==
__raw_callee_save___native_vcpu_is_preempted;
}
-
-struct pv_lock_ops pv_lock_ops = {
-#ifdef CONFIG_SMP
- .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
- .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
- .wait = paravirt_nop,
- .kick = paravirt_nop,
- .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
-#endif /* SMP */
-};
-EXPORT_SYMBOL(pv_lock_ops);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index afdb303285f8..e4d4df37922a 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -81,17 +81,15 @@ struct branch {
u32 delta;
} __attribute__((packed));
-unsigned paravirt_patch_call(void *insnbuf,
- const void *target, u16 tgt_clobbers,
- unsigned long addr, u16 site_clobbers,
- unsigned len)
+static unsigned paravirt_patch_call(void *insnbuf, const void *target,
+ unsigned long addr, unsigned len)
{
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
if (len < 5) {
#ifdef CONFIG_RETPOLINE
- WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
+ WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr);
#endif
return len; /* call too long for patch site */
}
@@ -103,15 +101,16 @@ unsigned paravirt_patch_call(void *insnbuf,
return 5;
}
-unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
- unsigned long addr, unsigned len)
+#ifdef CONFIG_PARAVIRT_XXL
+static unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
+ unsigned long addr, unsigned len)
{
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
if (len < 5) {
#ifdef CONFIG_RETPOLINE
- WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
+ WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
#endif
return len; /* call too long for patch site */
}
@@ -121,6 +120,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
return 5;
}
+#endif
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
@@ -130,29 +130,14 @@ void __init native_pv_lock_init(void)
static_branch_disable(&virt_spin_lock_key);
}
-/*
- * Neat trick to map patch type back to the call within the
- * corresponding structure.
- */
-static void *get_call_destination(u8 type)
-{
- struct paravirt_patch_template tmpl = {
- .pv_init_ops = pv_init_ops,
- .pv_time_ops = pv_time_ops,
- .pv_cpu_ops = pv_cpu_ops,
- .pv_irq_ops = pv_irq_ops,
- .pv_mmu_ops = pv_mmu_ops,
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
- .pv_lock_ops = pv_lock_ops,
-#endif
- };
- return *((void **)&tmpl + type);
-}
-
-unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+unsigned paravirt_patch_default(u8 type, void *insnbuf,
unsigned long addr, unsigned len)
{
- void *opfunc = get_call_destination(type);
+ /*
+ * Neat trick to map patch type back to the call within the
+ * corresponding structure.
+ */
+ void *opfunc = *((void **)&pv_ops + type);
unsigned ret;
if (opfunc == NULL)
@@ -167,15 +152,15 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
else if (opfunc == _paravirt_ident_64)
ret = paravirt_patch_ident_64(insnbuf, len);
- else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
- type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
+#ifdef CONFIG_PARAVIRT_XXL
+ else if (type == PARAVIRT_PATCH(cpu.iret) ||
+ type == PARAVIRT_PATCH(cpu.usergs_sysret64))
/* If operation requires a jmp, then jmp */
ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
+#endif
else
- /* Otherwise call the function; assume target could
- clobber any caller-save reg */
- ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
- addr, clobbers, len);
+ /* Otherwise call the function. */
+ ret = paravirt_patch_call(insnbuf, opfunc, addr, len);
return ret;
}
@@ -281,6 +266,7 @@ void paravirt_flush_lazy_mmu(void)
preempt_enable();
}
+#ifdef CONFIG_PARAVIRT_XXL
void paravirt_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
@@ -301,6 +287,7 @@ void paravirt_end_context_switch(struct task_struct *next)
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
arch_enter_lazy_mmu_mode();
}
+#endif
enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
{
@@ -312,85 +299,16 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
struct pv_info pv_info = {
.name = "bare hardware",
+#ifdef CONFIG_PARAVIRT_XXL
.kernel_rpl = 0,
.shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
#ifdef CONFIG_X86_64
.extra_user_64bit_cs = __USER_CS,
#endif
-};
-
-struct pv_init_ops pv_init_ops = {
- .patch = native_patch,
-};
-
-struct pv_time_ops pv_time_ops = {
- .sched_clock = native_sched_clock,
- .steal_clock = native_steal_clock,
-};
-
-__visible struct pv_irq_ops pv_irq_ops = {
- .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
- .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
- .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
- .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
- .safe_halt = native_safe_halt,
- .halt = native_halt,
-};
-
-__visible struct pv_cpu_ops pv_cpu_ops = {
- .cpuid = native_cpuid,
- .get_debugreg = native_get_debugreg,
- .set_debugreg = native_set_debugreg,
- .read_cr0 = native_read_cr0,
- .write_cr0 = native_write_cr0,
- .write_cr4 = native_write_cr4,
-#ifdef CONFIG_X86_64
- .read_cr8 = native_read_cr8,
- .write_cr8 = native_write_cr8,
#endif
- .wbinvd = native_wbinvd,
- .read_msr = native_read_msr,
- .write_msr = native_write_msr,
- .read_msr_safe = native_read_msr_safe,
- .write_msr_safe = native_write_msr_safe,
- .read_pmc = native_read_pmc,
- .load_tr_desc = native_load_tr_desc,
- .set_ldt = native_set_ldt,
- .load_gdt = native_load_gdt,
- .load_idt = native_load_idt,
- .store_tr = native_store_tr,
- .load_tls = native_load_tls,
-#ifdef CONFIG_X86_64
- .load_gs_index = native_load_gs_index,
-#endif
- .write_ldt_entry = native_write_ldt_entry,
- .write_gdt_entry = native_write_gdt_entry,
- .write_idt_entry = native_write_idt_entry,
-
- .alloc_ldt = paravirt_nop,
- .free_ldt = paravirt_nop,
-
- .load_sp0 = native_load_sp0,
-
-#ifdef CONFIG_X86_64
- .usergs_sysret64 = native_usergs_sysret64,
-#endif
- .iret = native_iret,
- .swapgs = native_swapgs,
-
- .set_iopl_mask = native_set_iopl_mask,
- .io_delay = native_io_delay,
-
- .start_context_switch = paravirt_nop,
- .end_context_switch = paravirt_nop,
};
-/* At this point, native_get/set_debugreg has real function entries */
-NOKPROBE_SYMBOL(native_get_debugreg);
-NOKPROBE_SYMBOL(native_set_debugreg);
-NOKPROBE_SYMBOL(native_load_idt);
-
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
/* 32-bit pagetable entries */
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
@@ -399,85 +317,171 @@ NOKPROBE_SYMBOL(native_load_idt);
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
#endif
-struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
+struct paravirt_patch_template pv_ops = {
+ /* Init ops. */
+ .init.patch = native_patch,
- .read_cr2 = native_read_cr2,
- .write_cr2 = native_write_cr2,
- .read_cr3 = __native_read_cr3,
- .write_cr3 = native_write_cr3,
+ /* Time ops. */
+ .time.sched_clock = native_sched_clock,
+ .time.steal_clock = native_steal_clock,
- .flush_tlb_user = native_flush_tlb,
- .flush_tlb_kernel = native_flush_tlb_global,
- .flush_tlb_one_user = native_flush_tlb_one_user,
- .flush_tlb_others = native_flush_tlb_others,
- .tlb_remove_table = (void (*)(struct mmu_gather *, void *))tlb_remove_page,
+ /* Cpu ops. */
+ .cpu.io_delay = native_io_delay,
- .pgd_alloc = __paravirt_pgd_alloc,
- .pgd_free = paravirt_nop,
+#ifdef CONFIG_PARAVIRT_XXL
+ .cpu.cpuid = native_cpuid,
+ .cpu.get_debugreg = native_get_debugreg,
+ .cpu.set_debugreg = native_set_debugreg,
+ .cpu.read_cr0 = native_read_cr0,
+ .cpu.write_cr0 = native_write_cr0,
+ .cpu.write_cr4 = native_write_cr4,
+#ifdef CONFIG_X86_64
+ .cpu.read_cr8 = native_read_cr8,
+ .cpu.write_cr8 = native_write_cr8,
+#endif
+ .cpu.wbinvd = native_wbinvd,
+ .cpu.read_msr = native_read_msr,
+ .cpu.write_msr = native_write_msr,
+ .cpu.read_msr_safe = native_read_msr_safe,
+ .cpu.write_msr_safe = native_write_msr_safe,
+ .cpu.read_pmc = native_read_pmc,
+ .cpu.load_tr_desc = native_load_tr_desc,
+ .cpu.set_ldt = native_set_ldt,
+ .cpu.load_gdt = native_load_gdt,
+ .cpu.load_idt = native_load_idt,
+ .cpu.store_tr = native_store_tr,
+ .cpu.load_tls = native_load_tls,
+#ifdef CONFIG_X86_64
+ .cpu.load_gs_index = native_load_gs_index,
+#endif
+ .cpu.write_ldt_entry = native_write_ldt_entry,
+ .cpu.write_gdt_entry = native_write_gdt_entry,
+ .cpu.write_idt_entry = native_write_idt_entry,
- .alloc_pte = paravirt_nop,
- .alloc_pmd = paravirt_nop,
- .alloc_pud = paravirt_nop,
- .alloc_p4d = paravirt_nop,
- .release_pte = paravirt_nop,
- .release_pmd = paravirt_nop,
- .release_pud = paravirt_nop,
- .release_p4d = paravirt_nop,
+ .cpu.alloc_ldt = paravirt_nop,
+ .cpu.free_ldt = paravirt_nop,
- .set_pte = native_set_pte,
- .set_pte_at = native_set_pte_at,
- .set_pmd = native_set_pmd,
+ .cpu.load_sp0 = native_load_sp0,
- .ptep_modify_prot_start = __ptep_modify_prot_start,
- .ptep_modify_prot_commit = __ptep_modify_prot_commit,
+#ifdef CONFIG_X86_64
+ .cpu.usergs_sysret64 = native_usergs_sysret64,
+#endif
+ .cpu.iret = native_iret,
+ .cpu.swapgs = native_swapgs,
+
+ .cpu.set_iopl_mask = native_set_iopl_mask,
+
+ .cpu.start_context_switch = paravirt_nop,
+ .cpu.end_context_switch = paravirt_nop,
+
+ /* Irq ops. */
+ .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
+ .irq.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
+ .irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
+ .irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
+ .irq.safe_halt = native_safe_halt,
+ .irq.halt = native_halt,
+#endif /* CONFIG_PARAVIRT_XXL */
+
+ /* Mmu ops. */
+ .mmu.flush_tlb_user = native_flush_tlb,
+ .mmu.flush_tlb_kernel = native_flush_tlb_global,
+ .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
+ .mmu.flush_tlb_others = native_flush_tlb_others,
+ .mmu.tlb_remove_table =
+ (void (*)(struct mmu_gather *, void *))tlb_remove_page,
+
+ .mmu.exit_mmap = paravirt_nop,
+
+#ifdef CONFIG_PARAVIRT_XXL
+ .mmu.read_cr2 = native_read_cr2,
+ .mmu.write_cr2 = native_write_cr2,
+ .mmu.read_cr3 = __native_read_cr3,
+ .mmu.write_cr3 = native_write_cr3,
+
+ .mmu.pgd_alloc = __paravirt_pgd_alloc,
+ .mmu.pgd_free = paravirt_nop,
+
+ .mmu.alloc_pte = paravirt_nop,
+ .mmu.alloc_pmd = paravirt_nop,
+ .mmu.alloc_pud = paravirt_nop,
+ .mmu.alloc_p4d = paravirt_nop,
+ .mmu.release_pte = paravirt_nop,
+ .mmu.release_pmd = paravirt_nop,
+ .mmu.release_pud = paravirt_nop,
+ .mmu.release_p4d = paravirt_nop,
+
+ .mmu.set_pte = native_set_pte,
+ .mmu.set_pte_at = native_set_pte_at,
+ .mmu.set_pmd = native_set_pmd,
+
+ .mmu.ptep_modify_prot_start = __ptep_modify_prot_start,
+ .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit,
#if CONFIG_PGTABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
- .set_pte_atomic = native_set_pte_atomic,
- .pte_clear = native_pte_clear,
- .pmd_clear = native_pmd_clear,
+ .mmu.set_pte_atomic = native_set_pte_atomic,
+ .mmu.pte_clear = native_pte_clear,
+ .mmu.pmd_clear = native_pmd_clear,
#endif
- .set_pud = native_set_pud,
+ .mmu.set_pud = native_set_pud,
- .pmd_val = PTE_IDENT,
- .make_pmd = PTE_IDENT,
+ .mmu.pmd_val = PTE_IDENT,
+ .mmu.make_pmd = PTE_IDENT,
#if CONFIG_PGTABLE_LEVELS >= 4
- .pud_val = PTE_IDENT,
- .make_pud = PTE_IDENT,
+ .mmu.pud_val = PTE_IDENT,
+ .mmu.make_pud = PTE_IDENT,
- .set_p4d = native_set_p4d,
+ .mmu.set_p4d = native_set_p4d,
#if CONFIG_PGTABLE_LEVELS >= 5
- .p4d_val = PTE_IDENT,
- .make_p4d = PTE_IDENT,
+ .mmu.p4d_val = PTE_IDENT,
+ .mmu.make_p4d = PTE_IDENT,
- .set_pgd = native_set_pgd,
+ .mmu.set_pgd = native_set_pgd,
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
#endif /* CONFIG_PGTABLE_LEVELS >= 4 */
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
- .pte_val = PTE_IDENT,
- .pgd_val = PTE_IDENT,
+ .mmu.pte_val = PTE_IDENT,
+ .mmu.pgd_val = PTE_IDENT,
- .make_pte = PTE_IDENT,
- .make_pgd = PTE_IDENT,
+ .mmu.make_pte = PTE_IDENT,
+ .mmu.make_pgd = PTE_IDENT,
- .dup_mmap = paravirt_nop,
- .exit_mmap = paravirt_nop,
- .activate_mm = paravirt_nop,
+ .mmu.dup_mmap = paravirt_nop,
+ .mmu.activate_mm = paravirt_nop,
- .lazy_mode = {
- .enter = paravirt_nop,
- .leave = paravirt_nop,
- .flush = paravirt_nop,
+ .mmu.lazy_mode = {
+ .enter = paravirt_nop,
+ .leave = paravirt_nop,
+ .flush = paravirt_nop,
},
- .set_fixmap = native_set_fixmap,
+ .mmu.set_fixmap = native_set_fixmap,
+#endif /* CONFIG_PARAVIRT_XXL */
+
+#if defined(CONFIG_PARAVIRT_SPINLOCKS)
+ /* Lock ops. */
+#ifdef CONFIG_SMP
+ .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
+ .lock.queued_spin_unlock =
+ PV_CALLEE_SAVE(__native_queued_spin_unlock),
+ .lock.wait = paravirt_nop,
+ .lock.kick = paravirt_nop,
+ .lock.vcpu_is_preempted =
+ PV_CALLEE_SAVE(__native_vcpu_is_preempted),
+#endif /* SMP */
+#endif
};
-EXPORT_SYMBOL_GPL(pv_time_ops);
-EXPORT_SYMBOL (pv_cpu_ops);
-EXPORT_SYMBOL (pv_mmu_ops);
+#ifdef CONFIG_PARAVIRT_XXL
+/* At this point, native_get/set_debugreg has real function entries */
+NOKPROBE_SYMBOL(native_get_debugreg);
+NOKPROBE_SYMBOL(native_set_debugreg);
+NOKPROBE_SYMBOL(native_load_idt);
+#endif
+
+EXPORT_SYMBOL_GPL(pv_ops);
EXPORT_SYMBOL_GPL(pv_info);
-EXPORT_SYMBOL (pv_irq_ops);
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 758e69d72ebf..6368c22fa1fa 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -1,18 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
#include <asm/paravirt.h>
-DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
-DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
-DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
-DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
-DEF_NATIVE(pv_cpu_ops, iret, "iret");
-DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
-DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
-DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
+#ifdef CONFIG_PARAVIRT_XXL
+DEF_NATIVE(irq, irq_disable, "cli");
+DEF_NATIVE(irq, irq_enable, "sti");
+DEF_NATIVE(irq, restore_fl, "push %eax; popf");
+DEF_NATIVE(irq, save_fl, "pushf; pop %eax");
+DEF_NATIVE(cpu, iret, "iret");
+DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax");
+DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3");
+DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax");
+#endif
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
-DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
-DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
+DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)");
+DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
#endif
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
@@ -30,53 +32,42 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
extern bool pv_is_native_spin_unlock(void);
extern bool pv_is_native_vcpu_is_preempted(void);
-unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
- unsigned long addr, unsigned len)
+unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len)
{
- const unsigned char *start, *end;
- unsigned ret;
-
#define PATCH_SITE(ops, x) \
- case PARAVIRT_PATCH(ops.x): \
- start = start_##ops##_##x; \
- end = end_##ops##_##x; \
- goto patch_site
+ case PARAVIRT_PATCH(ops.x): \
+ return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x)
+
switch (type) {
- PATCH_SITE(pv_irq_ops, irq_disable);
- PATCH_SITE(pv_irq_ops, irq_enable);
- PATCH_SITE(pv_irq_ops, restore_fl);
- PATCH_SITE(pv_irq_ops, save_fl);
- PATCH_SITE(pv_cpu_ops, iret);
- PATCH_SITE(pv_mmu_ops, read_cr2);
- PATCH_SITE(pv_mmu_ops, read_cr3);
- PATCH_SITE(pv_mmu_ops, write_cr3);
+#ifdef CONFIG_PARAVIRT_XXL
+ PATCH_SITE(irq, irq_disable);
+ PATCH_SITE(irq, irq_enable);
+ PATCH_SITE(irq, restore_fl);
+ PATCH_SITE(irq, save_fl);
+ PATCH_SITE(cpu, iret);
+ PATCH_SITE(mmu, read_cr2);
+ PATCH_SITE(mmu, read_cr3);
+ PATCH_SITE(mmu, write_cr3);
+#endif
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
- case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
- if (pv_is_native_spin_unlock()) {
- start = start_pv_lock_ops_queued_spin_unlock;
- end = end_pv_lock_ops_queued_spin_unlock;
- goto patch_site;
- }
- goto patch_default;
+ case PARAVIRT_PATCH(lock.queued_spin_unlock):
+ if (pv_is_native_spin_unlock())
+ return paravirt_patch_insns(ibuf, len,
+ start_lock_queued_spin_unlock,
+ end_lock_queued_spin_unlock);
+ break;
- case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
- if (pv_is_native_vcpu_is_preempted()) {
- start = start_pv_lock_ops_vcpu_is_preempted;
- end = end_pv_lock_ops_vcpu_is_preempted;
- goto patch_site;
- }
- goto patch_default;
+ case PARAVIRT_PATCH(lock.vcpu_is_preempted):
+ if (pv_is_native_vcpu_is_preempted())
+ return paravirt_patch_insns(ibuf, len,
+ start_lock_vcpu_is_preempted,
+ end_lock_vcpu_is_preempted);
+ break;
#endif
default:
-patch_default: __maybe_unused
- ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
- break;
-
-patch_site:
- ret = paravirt_patch_insns(ibuf, len, start, end);
break;
}
#undef PATCH_SITE
- return ret;
+ return paravirt_patch_default(type, ibuf, addr, len);
}
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index 9cb98f7b07c9..7ca9cb726f4d 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -3,24 +3,26 @@
#include <asm/asm-offsets.h>
#include <linux/stringify.h>
-DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
-DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
-DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
-DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
-DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
-DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
-DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
-DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
+#ifdef CONFIG_PARAVIRT_XXL
+DEF_NATIVE(irq, irq_disable, "cli");
+DEF_NATIVE(irq, irq_enable, "sti");
+DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq");
+DEF_NATIVE(irq, save_fl, "pushfq; popq %rax");
+DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax");
+DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax");
+DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3");
+DEF_NATIVE(cpu, wbinvd, "wbinvd");
-DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
-DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
+DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq");
+DEF_NATIVE(cpu, swapgs, "swapgs");
+#endif
DEF_NATIVE(, mov32, "mov %edi, %eax");
DEF_NATIVE(, mov64, "mov %rdi, %rax");
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
-DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
-DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
+DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)");
+DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
#endif
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
@@ -38,55 +40,44 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
extern bool pv_is_native_spin_unlock(void);
extern bool pv_is_native_vcpu_is_preempted(void);
-unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
- unsigned long addr, unsigned len)
+unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len)
{
- const unsigned char *start, *end;
- unsigned ret;
-
#define PATCH_SITE(ops, x) \
- case PARAVIRT_PATCH(ops.x): \
- start = start_##ops##_##x; \
- end = end_##ops##_##x; \
- goto patch_site
- switch(type) {
- PATCH_SITE(pv_irq_ops, restore_fl);
- PATCH_SITE(pv_irq_ops, save_fl);
- PATCH_SITE(pv_irq_ops, irq_enable);
- PATCH_SITE(pv_irq_ops, irq_disable);
- PATCH_SITE(pv_cpu_ops, usergs_sysret64);
- PATCH_SITE(pv_cpu_ops, swapgs);
- PATCH_SITE(pv_mmu_ops, read_cr2);
- PATCH_SITE(pv_mmu_ops, read_cr3);
- PATCH_SITE(pv_mmu_ops, write_cr3);
- PATCH_SITE(pv_cpu_ops, wbinvd);
-#if defined(CONFIG_PARAVIRT_SPINLOCKS)
- case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
- if (pv_is_native_spin_unlock()) {
- start = start_pv_lock_ops_queued_spin_unlock;
- end = end_pv_lock_ops_queued_spin_unlock;
- goto patch_site;
- }
- goto patch_default;
+ case PARAVIRT_PATCH(ops.x): \
+ return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x)
- case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
- if (pv_is_native_vcpu_is_preempted()) {
- start = start_pv_lock_ops_vcpu_is_preempted;
- end = end_pv_lock_ops_vcpu_is_preempted;
- goto patch_site;
- }
- goto patch_default;
+ switch (type) {
+#ifdef CONFIG_PARAVIRT_XXL
+ PATCH_SITE(irq, restore_fl);
+ PATCH_SITE(irq, save_fl);
+ PATCH_SITE(irq, irq_enable);
+ PATCH_SITE(irq, irq_disable);
+ PATCH_SITE(cpu, usergs_sysret64);
+ PATCH_SITE(cpu, swapgs);
+ PATCH_SITE(cpu, wbinvd);
+ PATCH_SITE(mmu, read_cr2);
+ PATCH_SITE(mmu, read_cr3);
+ PATCH_SITE(mmu, write_cr3);
#endif
+#if defined(CONFIG_PARAVIRT_SPINLOCKS)
+ case PARAVIRT_PATCH(lock.queued_spin_unlock):
+ if (pv_is_native_spin_unlock())
+ return paravirt_patch_insns(ibuf, len,
+ start_lock_queued_spin_unlock,
+ end_lock_queued_spin_unlock);
+ break;
- default:
-patch_default: __maybe_unused
- ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
+ case PARAVIRT_PATCH(lock.vcpu_is_preempted):
+ if (pv_is_native_vcpu_is_preempted())
+ return paravirt_patch_insns(ibuf, len,
+ start_lock_vcpu_is_preempted,
+ end_lock_vcpu_is_preempted);
break;
+#endif
-patch_site:
- ret = paravirt_patch_insns(ibuf, len, start, end);
+ default:
break;
}
#undef PATCH_SITE
- return ret;
+ return paravirt_patch_default(type, ibuf, addr, len);
}
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 661583662430..71c0b01d93b1 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -42,10 +42,8 @@ IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
int __init pci_swiotlb_detect_4gb(void)
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
-#ifdef CONFIG_X86_64
if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
swiotlb = 1;
-#endif
/*
* If SME is active then swiotlb will be set to 1 so that bounce
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ea5ea850348d..31b4755369f0 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -54,13 +54,12 @@
#include <asm/vdso.h>
#include <asm/intel_rdt_sched.h>
#include <asm/unistd.h>
+#include <asm/fsgsbase.h>
#ifdef CONFIG_IA32_EMULATION
/* Not included via unistd.h */
#include <asm/unistd_32_ia32.h>
#endif
-__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
-
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
{
@@ -286,6 +285,138 @@ static __always_inline void load_seg_legacy(unsigned short prev_index,
}
}
+static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
+ struct thread_struct *next)
+{
+ load_seg_legacy(prev->fsindex, prev->fsbase,
+ next->fsindex, next->fsbase, FS);
+ load_seg_legacy(prev->gsindex, prev->gsbase,
+ next->gsindex, next->gsbase, GS);
+}
+
+static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
+ unsigned short selector)
+{
+ unsigned short idx = selector >> 3;
+ unsigned long base;
+
+ if (likely((selector & SEGMENT_TI_MASK) == 0)) {
+ if (unlikely(idx >= GDT_ENTRIES))
+ return 0;
+
+ /*
+ * There are no user segments in the GDT with nonzero bases
+ * other than the TLS segments.
+ */
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return 0;
+
+ idx -= GDT_ENTRY_TLS_MIN;
+ base = get_desc_base(&task->thread.tls_array[idx]);
+ } else {
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ struct ldt_struct *ldt;
+
+ /*
+ * If performance here mattered, we could protect the LDT
+ * with RCU. This is a slow path, though, so we can just
+ * take the mutex.
+ */
+ mutex_lock(&task->mm->context.lock);
+ ldt = task->mm->context.ldt;
+ if (unlikely(idx >= ldt->nr_entries))
+ base = 0;
+ else
+ base = get_desc_base(ldt->entries + idx);
+ mutex_unlock(&task->mm->context.lock);
+#else
+ base = 0;
+#endif
+ }
+
+ return base;
+}
+
+void x86_fsbase_write_cpu(unsigned long fsbase)
+{
+ /*
+ * Set the selector to 0 as a notion, that the segment base is
+ * overwritten, which will be checked for skipping the segment load
+ * during context switch.
+ */
+ loadseg(FS, 0);
+ wrmsrl(MSR_FS_BASE, fsbase);
+}
+
+void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
+{
+ /* Set the selector to 0 for the same reason as %fs above. */
+ loadseg(GS, 0);
+ wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
+}
+
+unsigned long x86_fsbase_read_task(struct task_struct *task)
+{
+ unsigned long fsbase;
+
+ if (task == current)
+ fsbase = x86_fsbase_read_cpu();
+ else if (task->thread.fsindex == 0)
+ fsbase = task->thread.fsbase;
+ else
+ fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
+
+ return fsbase;
+}
+
+unsigned long x86_gsbase_read_task(struct task_struct *task)
+{
+ unsigned long gsbase;
+
+ if (task == current)
+ gsbase = x86_gsbase_read_cpu_inactive();
+ else if (task->thread.gsindex == 0)
+ gsbase = task->thread.gsbase;
+ else
+ gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
+
+ return gsbase;
+}
+
+int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
+{
+ /*
+ * Not strictly needed for %fs, but do it for symmetry
+ * with %gs
+ */
+ if (unlikely(fsbase >= TASK_SIZE_MAX))
+ return -EPERM;
+
+ preempt_disable();
+ task->thread.fsbase = fsbase;
+ if (task == current)
+ x86_fsbase_write_cpu(fsbase);
+ task->thread.fsindex = 0;
+ preempt_enable();
+
+ return 0;
+}
+
+int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
+{
+ if (unlikely(gsbase >= TASK_SIZE_MAX))
+ return -EPERM;
+
+ preempt_disable();
+ task->thread.gsbase = gsbase;
+ if (task == current)
+ x86_gsbase_write_cpu_inactive(gsbase);
+ task->thread.gsindex = 0;
+ preempt_enable();
+
+ return 0;
+}
+
int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p, unsigned long tls)
{
@@ -473,10 +604,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);
- load_seg_legacy(prev->fsindex, prev->fsbase,
- next->fsindex, next->fsbase, FS);
- load_seg_legacy(prev->gsindex, prev->gsbase,
- next->gsindex, next->gsbase, GS);
+ x86_fsgsbase_load(prev, next);
switch_fpu_finish(next_fpu, cpu);
@@ -627,54 +755,25 @@ static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
{
int ret = 0;
- int doit = task == current;
- int cpu;
switch (option) {
- case ARCH_SET_GS:
- if (arg2 >= TASK_SIZE_MAX)
- return -EPERM;
- cpu = get_cpu();
- task->thread.gsindex = 0;
- task->thread.gsbase = arg2;
- if (doit) {
- load_gs_index(0);
- ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, arg2);
- }
- put_cpu();
+ case ARCH_SET_GS: {
+ ret = x86_gsbase_write_task(task, arg2);
break;
- case ARCH_SET_FS:
- /* Not strictly needed for fs, but do it for symmetry
- with gs */
- if (arg2 >= TASK_SIZE_MAX)
- return -EPERM;
- cpu = get_cpu();
- task->thread.fsindex = 0;
- task->thread.fsbase = arg2;
- if (doit) {
- /* set the selector to 0 to not confuse __switch_to */
- loadsegment(fs, 0);
- ret = wrmsrl_safe(MSR_FS_BASE, arg2);
- }
- put_cpu();
+ }
+ case ARCH_SET_FS: {
+ ret = x86_fsbase_write_task(task, arg2);
break;
+ }
case ARCH_GET_FS: {
- unsigned long base;
+ unsigned long base = x86_fsbase_read_task(task);
- if (doit)
- rdmsrl(MSR_FS_BASE, base);
- else
- base = task->thread.fsbase;
ret = put_user(base, (unsigned long __user *)arg2);
break;
}
case ARCH_GET_GS: {
- unsigned long base;
+ unsigned long base = x86_gsbase_read_task(task);
- if (doit)
- rdmsrl(MSR_KERNEL_GS_BASE, base);
- else
- base = task->thread.gsbase;
ret = put_user(base, (unsigned long __user *)arg2);
break;
}
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index e2ee403865eb..ffae9b9740fd 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -39,6 +39,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/traps.h>
#include <asm/syscall.h>
+#include <asm/fsgsbase.h>
#include "tls.h"
@@ -396,12 +397,11 @@ static int putreg(struct task_struct *child,
if (value >= TASK_SIZE_MAX)
return -EIO;
/*
- * When changing the segment base, use do_arch_prctl_64
- * to set either thread.fs or thread.fsindex and the
- * corresponding GDT slot.
+ * When changing the FS base, use the same
+ * mechanism as for do_arch_prctl_64().
*/
if (child->thread.fsbase != value)
- return do_arch_prctl_64(child, ARCH_SET_FS, value);
+ return x86_fsbase_write_task(child, value);
return 0;
case offsetof(struct user_regs_struct,gs_base):
/*
@@ -410,7 +410,7 @@ static int putreg(struct task_struct *child,
if (value >= TASK_SIZE_MAX)
return -EIO;
if (child->thread.gsbase != value)
- return do_arch_prctl_64(child, ARCH_SET_GS, value);
+ return x86_gsbase_write_task(child, value);
return 0;
#endif
}
@@ -434,20 +434,10 @@ static unsigned long getreg(struct task_struct *task, unsigned long offset)
return get_flags(task);
#ifdef CONFIG_X86_64
- case offsetof(struct user_regs_struct, fs_base): {
- /*
- * XXX: This will not behave as expected if called on
- * current or if fsindex != 0.
- */
- return task->thread.fsbase;
- }
- case offsetof(struct user_regs_struct, gs_base): {
- /*
- * XXX: This will not behave as expected if called on
- * current or if fsindex != 0.
- */
- return task->thread.gsbase;
- }
+ case offsetof(struct user_regs_struct, fs_base):
+ return x86_fsbase_read_task(task);
+ case offsetof(struct user_regs_struct, gs_base):
+ return x86_gsbase_read_task(task);
#endif
}
@@ -1369,33 +1359,18 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#endif
}
-static void fill_sigtrap_info(struct task_struct *tsk,
- struct pt_regs *regs,
- int error_code, int si_code,
- struct siginfo *info)
+void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+ int error_code, int si_code)
{
tsk->thread.trap_nr = X86_TRAP_DB;
tsk->thread.error_code = error_code;
- info->si_signo = SIGTRAP;
- info->si_code = si_code;
- info->si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
-}
-
-void user_single_step_siginfo(struct task_struct *tsk,
- struct pt_regs *regs,
- struct siginfo *info)
-{
- fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
+ /* Send us the fake SIGTRAP */
+ force_sig_fault(SIGTRAP, si_code,
+ user_mode(regs) ? (void __user *)regs->ip : NULL, tsk);
}
-void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
- int error_code, int si_code)
+void user_single_step_report(struct pt_regs *regs)
{
- struct siginfo info;
-
- clear_siginfo(&info);
- fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
- /* Send us the fake SIGTRAP */
- force_sig_info(SIGTRAP, &info, tsk);
+ send_sigtrap(current, regs, 0, TRAP_BRKPT);
}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b4866badb235..7005f89bf3b2 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1251,7 +1251,7 @@ void __init setup_arch(char **cmdline_p)
x86_init.hyper.guest_late_init();
e820__reserve_resources();
- e820__register_nosave_regions(max_low_pfn);
+ e820__register_nosave_regions(max_pfn);
x86_init.resources.reserve_resources();
@@ -1281,6 +1281,23 @@ void __init setup_arch(char **cmdline_p)
unwind_init();
}
+/*
+ * From boot protocol 2.14 onwards we expect the bootloader to set the
+ * version to "0x8000 | <used version>". In case we find a version >= 2.14
+ * without the 0x8000 we assume the boot loader supports 2.13 only and
+ * reset the version accordingly. The 0x8000 flag is removed in any case.
+ */
+void __init x86_verify_bootdata_version(void)
+{
+ if (boot_params.hdr.version & VERSION_WRITTEN)
+ boot_params.hdr.version &= ~VERSION_WRITTEN;
+ else if (boot_params.hdr.version >= 0x020e)
+ boot_params.hdr.version = 0x020d;
+
+ if (boot_params.hdr.version < 0x020e)
+ boot_params.hdr.acpi_rsdp_addr = 0;
+}
+
#ifdef CONFIG_X86_32
static struct resource video_ram_resource = {
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f02ecaf97904..5369d7fac797 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -676,6 +676,7 @@ static void __init smp_quirk_init_udelay(void)
/* if modern processor, use no delay */
if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
+ ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
init_udelay = 0;
return;
@@ -1592,7 +1593,8 @@ static inline void mwait_play_dead(void)
void *mwait_ptr;
int i;
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return;
if (!this_cpu_has(X86_FEATURE_MWAIT))
return;
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index be01328eb755..0e14f6c0d35e 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -10,6 +10,7 @@
*
*/
+#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -25,7 +26,7 @@
#include <asm/time.h>
#ifdef CONFIG_X86_64
-__visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES;
+__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
#endif
unsigned long profile_pc(struct pt_regs *regs)
@@ -105,3 +106,24 @@ void __init time_init(void)
{
late_time_init = x86_late_time_init;
}
+
+/*
+ * Sanity check the vdso related archdata content.
+ */
+void clocksource_arch_init(struct clocksource *cs)
+{
+ if (cs->archdata.vclock_mode == VCLOCK_NONE)
+ return;
+
+ if (cs->archdata.vclock_mode > VCLOCK_MAX) {
+ pr_warn("clocksource %s registered with invalid vclock_mode %d. Disabling vclock.\n",
+ cs->name, cs->archdata.vclock_mode);
+ cs->archdata.vclock_mode = VCLOCK_NONE;
+ }
+
+ if (cs->mask != CLOCKSOURCE_MASK(64)) {
+ pr_warn("clocksource %s registered with invalid mask %016llx. Disabling vclock.\n",
+ cs->name, cs->mask);
+ cs->archdata.vclock_mode = VCLOCK_NONE;
+ }
+}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index e6db475164ed..8f6dcd88202e 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -189,7 +189,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr)
}
static nokprobe_inline int
-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
struct pt_regs *regs, long error_code)
{
if (v8086_mode(regs)) {
@@ -202,11 +202,8 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
error_code, trapnr))
return 0;
}
- return -1;
- }
-
- if (!user_mode(regs)) {
- if (fixup_exception(regs, trapnr))
+ } else if (!user_mode(regs)) {
+ if (fixup_exception(regs, trapnr, error_code, 0))
return 0;
tsk->thread.error_code = error_code;
@@ -214,49 +211,6 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
die(str, regs, error_code);
}
- return -1;
-}
-
-static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
- siginfo_t *info)
-{
- unsigned long siaddr;
- int sicode;
-
- switch (trapnr) {
- default:
- return SEND_SIG_PRIV;
-
- case X86_TRAP_DE:
- sicode = FPE_INTDIV;
- siaddr = uprobe_get_trap_addr(regs);
- break;
- case X86_TRAP_UD:
- sicode = ILL_ILLOPN;
- siaddr = uprobe_get_trap_addr(regs);
- break;
- case X86_TRAP_AC:
- sicode = BUS_ADRALN;
- siaddr = 0;
- break;
- }
-
- info->si_signo = signr;
- info->si_errno = 0;
- info->si_code = sicode;
- info->si_addr = (void __user *)siaddr;
- return info;
-}
-
-static void
-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
- long error_code, siginfo_t *info)
-{
- struct task_struct *tsk = current;
-
-
- if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
- return;
/*
* We want error_code and trap_nr set for userspace faults and
* kernelspace faults which result in die(), but not
@@ -269,24 +223,45 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
+ return -1;
+}
+
+static void show_signal(struct task_struct *tsk, int signr,
+ const char *type, const char *desc,
+ struct pt_regs *regs, long error_code)
+{
if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
printk_ratelimit()) {
- pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
- tsk->comm, tsk->pid, str,
+ pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
+ tsk->comm, task_pid_nr(tsk), type, desc,
regs->ip, regs->sp, error_code);
print_vma_addr(KERN_CONT " in ", regs->ip);
pr_cont("\n");
}
+}
- force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
+static void
+do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+ long error_code, int sicode, void __user *addr)
+{
+ struct task_struct *tsk = current;
+
+
+ if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
+ return;
+
+ show_signal(tsk, signr, "trap ", str, regs, error_code);
+
+ if (!sicode)
+ force_sig(signr, tsk);
+ else
+ force_sig_fault(signr, sicode, addr, tsk);
}
NOKPROBE_SYMBOL(do_trap);
static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
- unsigned long trapnr, int signr)
+ unsigned long trapnr, int signr, int sicode, void __user *addr)
{
- siginfo_t info;
-
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
/*
@@ -299,26 +274,26 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
NOTIFY_STOP) {
cond_local_irq_enable(regs);
- clear_siginfo(&info);
- do_trap(trapnr, signr, str, regs, error_code,
- fill_trap_info(regs, signr, trapnr, &info));
+ do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
}
}
-#define DO_ERROR(trapnr, signr, str, name) \
-dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
-{ \
- do_error_trap(regs, error_code, str, trapnr, signr); \
+#define IP ((void __user *)uprobe_get_trap_addr(regs))
+#define DO_ERROR(trapnr, signr, sicode, addr, str, name) \
+dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
+{ \
+ do_error_trap(regs, error_code, str, trapnr, signr, sicode, addr); \
}
-DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error)
-DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
-DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op)
-DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun)
-DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
-DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
-DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
-DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check)
+DO_ERROR(X86_TRAP_DE, SIGFPE, FPE_INTDIV, IP, "divide error", divide_error)
+DO_ERROR(X86_TRAP_OF, SIGSEGV, 0, NULL, "overflow", overflow)
+DO_ERROR(X86_TRAP_UD, SIGILL, ILL_ILLOPN, IP, "invalid opcode", invalid_op)
+DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, 0, NULL, "coprocessor segment overrun", coprocessor_segment_overrun)
+DO_ERROR(X86_TRAP_TS, SIGSEGV, 0, NULL, "invalid TSS", invalid_TSS)
+DO_ERROR(X86_TRAP_NP, SIGBUS, 0, NULL, "segment not present", segment_not_present)
+DO_ERROR(X86_TRAP_SS, SIGBUS, 0, NULL, "stack segment", stack_segment)
+DO_ERROR(X86_TRAP_AC, SIGBUS, BUS_ADRALN, NULL, "alignment check", alignment_check)
+#undef IP
#ifdef CONFIG_VMAP_STACK
__visible void __noreturn handle_stack_overflow(const char *message,
@@ -383,6 +358,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
* we won't enable interupts or schedule before we invoke
* general_protection, so nothing will clobber the stack
* frame we just set up.
+ *
+ * We will enter general_protection with kernel GSBASE,
+ * which is what the stub expects, given that the faulting
+ * RIP will be the IRET instruction.
*/
regs->ip = (unsigned long)general_protection;
regs->sp = (unsigned long)&gpregs->orig_ax;
@@ -455,7 +434,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
{
const struct mpx_bndcsr *bndcsr;
- siginfo_t *info;
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
if (notify_die(DIE_TRAP, "bounds", regs, error_code,
@@ -493,8 +471,11 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
goto exit_trap;
break; /* Success, it was handled */
case 1: /* Bound violation. */
- info = mpx_generate_siginfo(regs);
- if (IS_ERR(info)) {
+ {
+ struct task_struct *tsk = current;
+ struct mpx_fault_info mpx;
+
+ if (mpx_fault_info(&mpx, regs)) {
/*
* We failed to decode the MPX instruction. Act as if
* the exception was not caused by MPX.
@@ -503,14 +484,20 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
}
/*
* Success, we decoded the instruction and retrieved
- * an 'info' containing the address being accessed
+ * an 'mpx' containing the address being accessed
* which caused the exception. This information
* allows and application to possibly handle the
* #BR exception itself.
*/
- do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info);
- kfree(info);
+ if (!do_trap_no_signal(tsk, X86_TRAP_BR, "bounds", regs,
+ error_code))
+ break;
+
+ show_signal(tsk, SIGSEGV, "trap ", "bounds", regs, error_code);
+
+ force_sig_bnderr(mpx.addr, mpx.lower, mpx.upper);
break;
+ }
case 0: /* No exception caused by Intel MPX operations. */
goto exit_trap;
default:
@@ -527,12 +514,13 @@ exit_trap:
* up here if the kernel has MPX turned off at compile
* time..
*/
- do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
+ do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, 0, NULL);
}
dotraplinkage void
do_general_protection(struct pt_regs *regs, long error_code)
{
+ const char *desc = "general protection fault";
struct task_struct *tsk;
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
@@ -551,30 +539,33 @@ do_general_protection(struct pt_regs *regs, long error_code)
tsk = current;
if (!user_mode(regs)) {
- if (fixup_exception(regs, X86_TRAP_GP))
+ if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
return;
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
- if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
+
+ /*
+ * To be potentially processing a kprobe fault and to
+ * trust the result from kprobe_running(), we have to
+ * be non-preemptible.
+ */
+ if (!preemptible() && kprobe_running() &&
+ kprobe_fault_handler(regs, X86_TRAP_GP))
+ return;
+
+ if (notify_die(DIE_GPF, desc, regs, error_code,
X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
- die("general protection fault", regs, error_code);
+ die(desc, regs, error_code);
return;
}
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
- if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
- printk_ratelimit()) {
- pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
- tsk->comm, task_pid_nr(tsk),
- regs->ip, regs->sp, error_code);
- print_vma_addr(KERN_CONT " in ", regs->ip);
- pr_cont("\n");
- }
+ show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
- force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
+ force_sig(SIGSEGV, tsk);
}
NOKPROBE_SYMBOL(do_general_protection);
@@ -617,7 +608,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
goto exit;
cond_local_irq_enable(regs);
- do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
+ do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, 0, NULL);
cond_local_irq_disable(regs);
exit:
@@ -831,14 +822,14 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
{
struct task_struct *task = current;
struct fpu *fpu = &task->thread.fpu;
- siginfo_t info;
+ int si_code;
char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
"simd exception";
cond_local_irq_enable(regs);
if (!user_mode(regs)) {
- if (fixup_exception(regs, trapnr))
+ if (fixup_exception(regs, trapnr, error_code, 0))
return;
task->thread.error_code = error_code;
@@ -857,18 +848,14 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
task->thread.trap_nr = trapnr;
task->thread.error_code = error_code;
- clear_siginfo(&info);
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_addr = (void __user *)uprobe_get_trap_addr(regs);
-
- info.si_code = fpu__exception_code(fpu, trapnr);
+ si_code = fpu__exception_code(fpu, trapnr);
/* Retry when we get spurious exceptions: */
- if (!info.si_code)
+ if (!si_code)
return;
- force_sig_info(SIGFPE, &info, task);
+ force_sig_fault(SIGFPE, si_code,
+ (void __user *)uprobe_get_trap_addr(regs), task);
}
dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
@@ -928,20 +915,13 @@ NOKPROBE_SYMBOL(do_device_not_available);
#ifdef CONFIG_X86_32
dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
{
- siginfo_t info;
-
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
local_irq_enable();
- clear_siginfo(&info);
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_BADSTK;
- info.si_addr = NULL;
if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
- &info);
+ ILL_BADSTK, (void __user *)NULL);
}
}
#endif
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 6490f618e096..e9f777bfed40 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -26,6 +26,7 @@
#include <asm/apic.h>
#include <asm/intel-family.h>
#include <asm/i8259.h>
+#include <asm/uv/uv.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
@@ -57,7 +58,7 @@ struct cyc2ns {
static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
-void cyc2ns_read_begin(struct cyc2ns_data *data)
+void __always_inline cyc2ns_read_begin(struct cyc2ns_data *data)
{
int seq, idx;
@@ -74,7 +75,7 @@ void cyc2ns_read_begin(struct cyc2ns_data *data)
} while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
}
-void cyc2ns_read_end(void)
+void __always_inline cyc2ns_read_end(void)
{
preempt_enable_notrace();
}
@@ -103,7 +104,7 @@ void cyc2ns_read_end(void)
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
struct cyc2ns_data data;
unsigned long long ns;
@@ -246,7 +247,7 @@ unsigned long long sched_clock(void)
bool using_native_sched_clock(void)
{
- return pv_time_ops.sched_clock == native_sched_clock;
+ return pv_ops.time.sched_clock == native_sched_clock;
}
#else
unsigned long long
@@ -635,7 +636,7 @@ unsigned long native_calibrate_tsc(void)
case INTEL_FAM6_KABYLAKE_DESKTOP:
crystal_khz = 24000; /* 24.0 MHz */
break;
- case INTEL_FAM6_ATOM_DENVERTON:
+ case INTEL_FAM6_ATOM_GOLDMONT_X:
crystal_khz = 25000; /* 25.0 MHz */
break;
case INTEL_FAM6_ATOM_GOLDMONT:
@@ -1433,6 +1434,9 @@ void __init tsc_early_init(void)
{
if (!boot_cpu_has(X86_FEATURE_TSC))
return;
+ /* Don't change UV TSC multi-chassis synchronization */
+ if (is_early_uv_system())
+ return;
if (!determine_cpu_tsc_frequencies(true))
return;
loops_per_jiffy = get_loops_per_jiffy();
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 27ef714d886c..3d0e9aeea7c8 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -59,12 +59,12 @@ static const struct freq_desc freq_desc_ann = {
};
static const struct x86_cpu_id tsc_msr_cpu_ids[] = {
- INTEL_CPU_FAM6(ATOM_PENWELL, freq_desc_pnw),
- INTEL_CPU_FAM6(ATOM_CLOVERVIEW, freq_desc_clv),
- INTEL_CPU_FAM6(ATOM_SILVERMONT1, freq_desc_byt),
+ INTEL_CPU_FAM6(ATOM_SALTWELL_MID, freq_desc_pnw),
+ INTEL_CPU_FAM6(ATOM_SALTWELL_TABLET, freq_desc_clv),
+ INTEL_CPU_FAM6(ATOM_SILVERMONT, freq_desc_byt),
+ INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, freq_desc_tng),
INTEL_CPU_FAM6(ATOM_AIRMONT, freq_desc_cht),
- INTEL_CPU_FAM6(ATOM_MERRIFIELD, freq_desc_tng),
- INTEL_CPU_FAM6(ATOM_MOOREFIELD, freq_desc_ann),
+ INTEL_CPU_FAM6(ATOM_AIRMONT_MID, freq_desc_ann),
{}
};
diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c
index ff20b35e98dd..f8f3cfda01ae 100644
--- a/arch/x86/kernel/umip.c
+++ b/arch/x86/kernel/umip.c
@@ -271,19 +271,13 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
*/
static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs)
{
- siginfo_t info;
struct task_struct *tsk = current;
tsk->thread.cr2 = (unsigned long)addr;
tsk->thread.error_code = X86_PF_USER | X86_PF_WRITE;
tsk->thread.trap_nr = X86_TRAP_PF;
- clear_siginfo(&info);
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = SEGV_MAPERR;
- info.si_addr = addr;
- force_sig_info(SIGSEGV, &info, tsk);
+ force_sig_fault(SIGSEGV, SEGV_MAPERR, addr, tsk);
if (!(show_unhandled_signals && unhandled_signal(tsk, SIGSEGV)))
return;
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index deb576b23b7c..843feb94a950 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -1086,7 +1086,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n",
current->pid, regs->sp, regs->ip);
- force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
+ force_sig(SIGSEGV, current);
}
return -1;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 1c03e4aa6474..c2fd39752da8 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -199,7 +199,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
pte_unmap_unlock(pte, ptl);
out:
up_write(&mm->mmap_sem);
- flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
+ flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 8bde0a419f86..0d618ee634ac 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -65,6 +65,23 @@ jiffies_64 = jiffies;
#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
+/*
+ * This section contains data which will be mapped as decrypted. Memory
+ * encryption operates on a page basis. Make this section PMD-aligned
+ * to avoid splitting the pages while mapping the section early.
+ *
+ * Note: We use a separate section so that only this section gets
+ * decrypted to avoid exposing more than we wish.
+ */
+#define BSS_DECRYPTED \
+ . = ALIGN(PMD_SIZE); \
+ __start_bss_decrypted = .; \
+ *(.bss..decrypted); \
+ . = ALIGN(PAGE_SIZE); \
+ __start_bss_decrypted_unused = .; \
+ . = ALIGN(PMD_SIZE); \
+ __end_bss_decrypted = .; \
+
#else
#define X86_ALIGN_RODATA_BEGIN
@@ -74,6 +91,7 @@ jiffies_64 = jiffies;
#define ALIGN_ENTRY_TEXT_BEGIN
#define ALIGN_ENTRY_TEXT_END
+#define BSS_DECRYPTED
#endif
@@ -118,16 +136,6 @@ SECTIONS
*(.fixup)
*(.gnu.warning)
-#ifdef CONFIG_X86_64
- . = ALIGN(PAGE_SIZE);
- __entry_trampoline_start = .;
- _entry_trampoline = .;
- *(.entry_trampoline)
- . = ALIGN(PAGE_SIZE);
- __entry_trampoline_end = .;
- ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
-#endif
-
#ifdef CONFIG_RETPOLINE
__indirect_thunk_start = .;
*(.text.__x86.indirect_thunk)
@@ -355,6 +363,7 @@ SECTIONS
__bss_start = .;
*(.bss..page_aligned)
*(.bss)
+ BSS_DECRYPTED
. = ALIGN(PAGE_SIZE);
__bss_stop = .;
}
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 44685fb2a192..1eae5af491c2 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -26,7 +26,7 @@
#define TOPOLOGY_REGISTER_OFFSET 0x10
-#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
+#if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL
/*
* Interrupt control on vSMPowered systems:
* ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
@@ -69,17 +69,17 @@ asmlinkage __visible void vsmp_irq_enable(void)
}
PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
-static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
+static unsigned __init vsmp_patch(u8 type, void *ibuf,
unsigned long addr, unsigned len)
{
switch (type) {
- case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
- case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
- case PARAVIRT_PATCH(pv_irq_ops.save_fl):
- case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
- return paravirt_patch_default(type, clobbers, ibuf, addr, len);
+ case PARAVIRT_PATCH(irq.irq_enable):
+ case PARAVIRT_PATCH(irq.irq_disable):
+ case PARAVIRT_PATCH(irq.save_fl):
+ case PARAVIRT_PATCH(irq.restore_fl):
+ return paravirt_patch_default(type, ibuf, addr, len);
default:
- return native_patch(type, clobbers, ibuf, addr, len);
+ return native_patch(type, ibuf, addr, len);
}
}
@@ -111,11 +111,11 @@ static void __init set_vsmp_pv_ops(void)
if (cap & ctl & (1 << 4)) {
/* Setup irq ops and turn on vSMP IRQ fastpath handling */
- pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
- pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
- pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
- pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
- pv_init_ops.patch = vsmp_patch;
+ pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
+ pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
+ pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
+ pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
+ pv_ops.init.patch = vsmp_patch;
ctl &= ~(1 << 4);
}
writel(ctl, address + 4);
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 2792b5573818..50a2b492fdd6 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -31,7 +31,6 @@ static int __init iommu_init_noop(void) { return 0; }
static void iommu_shutdown_noop(void) { }
static bool __init bool_x86_init_noop(void) { return false; }
static void x86_op_int_noop(int cpu) { }
-static u64 u64_x86_init_noop(void) { return 0; }
/*
* The platform setup functions are preset with the default functions
@@ -96,7 +95,7 @@ struct x86_init_ops x86_init __initdata = {
},
.acpi = {
- .get_root_pointer = u64_x86_init_noop,
+ .get_root_pointer = x86_default_get_root_pointer,
.reduced_hw_early_init = acpi_generic_reduced_hw_init,
},
};
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 106482da6388..34edf198708f 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2711,7 +2711,16 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
return true;
- /* default: (not Intel, not AMD), apply Intel's stricter rules... */
+ /* Hygon ("HygonGenuine") */
+ if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
+ ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
+ edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
+ return true;
+
+ /*
+ * default: (not Intel, not AMD, not Hygon), apply Intel's
+ * stricter rules...
+ */
return false;
}
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 01d209ab5481..4e80080f277a 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -36,6 +36,8 @@
#include "trace.h"
+#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
+
static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
{
return atomic64_read(&synic->sint[sint]);
@@ -132,8 +134,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
struct kvm_vcpu *vcpu = NULL;
int i;
- if (vpidx < KVM_MAX_VCPUS)
- vcpu = kvm_get_vcpu(kvm, vpidx);
+ if (vpidx >= KVM_MAX_VCPUS)
+ return NULL;
+
+ vcpu = kvm_get_vcpu(kvm, vpidx);
if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
return vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
@@ -689,6 +693,24 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
stimer_cleanup(&hv_vcpu->stimer[i]);
}
+bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
+ return false;
+ return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
+}
+EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
+
+bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
+ struct hv_vp_assist_page *assist_page)
+{
+ if (!kvm_hv_assist_page_enabled(vcpu))
+ return false;
+ return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
+ assist_page, sizeof(*assist_page));
+}
+EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
+
static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
{
struct hv_message *msg = &stimer->msg;
@@ -1040,21 +1062,41 @@ static u64 current_task_runtime_100ns(void)
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
- struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
switch (msr) {
- case HV_X64_MSR_VP_INDEX:
- if (!host)
+ case HV_X64_MSR_VP_INDEX: {
+ struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+ int vcpu_idx = kvm_vcpu_get_idx(vcpu);
+ u32 new_vp_index = (u32)data;
+
+ if (!host || new_vp_index >= KVM_MAX_VCPUS)
return 1;
- hv->vp_index = (u32)data;
+
+ if (new_vp_index == hv_vcpu->vp_index)
+ return 0;
+
+ /*
+ * The VP index is initialized to vcpu_index by
+ * kvm_hv_vcpu_postcreate so they initially match. Now the
+ * VP index is changing, adjust num_mismatched_vp_indexes if
+ * it now matches or no longer matches vcpu_idx.
+ */
+ if (hv_vcpu->vp_index == vcpu_idx)
+ atomic_inc(&hv->num_mismatched_vp_indexes);
+ else if (new_vp_index == vcpu_idx)
+ atomic_dec(&hv->num_mismatched_vp_indexes);
+
+ hv_vcpu->vp_index = new_vp_index;
break;
+ }
case HV_X64_MSR_VP_ASSIST_PAGE: {
u64 gfn;
unsigned long addr;
if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
- hv->hv_vapic = data;
- if (kvm_lapic_enable_pv_eoi(vcpu, 0))
+ hv_vcpu->hv_vapic = data;
+ if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
return 1;
break;
}
@@ -1062,12 +1104,19 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
if (kvm_is_error_hva(addr))
return 1;
- if (__clear_user((void __user *)addr, PAGE_SIZE))
+
+ /*
+ * Clear apic_assist portion of f(struct hv_vp_assist_page
+ * only, there can be valuable data in the rest which needs
+ * to be preserved e.g. on migration.
+ */
+ if (__clear_user((void __user *)addr, sizeof(u32)))
return 1;
- hv->hv_vapic = data;
+ hv_vcpu->hv_vapic = data;
kvm_vcpu_mark_page_dirty(vcpu, gfn);
if (kvm_lapic_enable_pv_eoi(vcpu,
- gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
+ gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
+ sizeof(struct hv_vp_assist_page)))
return 1;
break;
}
@@ -1080,7 +1129,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
case HV_X64_MSR_VP_RUNTIME:
if (!host)
return 1;
- hv->runtime_offset = data - current_task_runtime_100ns();
+ hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
break;
case HV_X64_MSR_SCONTROL:
case HV_X64_MSR_SVERSION:
@@ -1172,11 +1221,11 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
bool host)
{
u64 data = 0;
- struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
switch (msr) {
case HV_X64_MSR_VP_INDEX:
- data = hv->vp_index;
+ data = hv_vcpu->vp_index;
break;
case HV_X64_MSR_EOI:
return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
@@ -1185,10 +1234,10 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
case HV_X64_MSR_TPR:
return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
case HV_X64_MSR_VP_ASSIST_PAGE:
- data = hv->hv_vapic;
+ data = hv_vcpu->hv_vapic;
break;
case HV_X64_MSR_VP_RUNTIME:
- data = current_task_runtime_100ns() + hv->runtime_offset;
+ data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
break;
case HV_X64_MSR_SCONTROL:
case HV_X64_MSR_SVERSION:
@@ -1255,32 +1304,47 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
return kvm_hv_get_msr(vcpu, msr, pdata, host);
}
-static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
+static __always_inline unsigned long *sparse_set_to_vcpu_mask(
+ struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
+ u64 *vp_bitmap, unsigned long *vcpu_bitmap)
{
- int i = 0, j;
+ struct kvm_hv *hv = &kvm->arch.hyperv;
+ struct kvm_vcpu *vcpu;
+ int i, bank, sbank = 0;
- if (!(valid_bank_mask & BIT_ULL(bank_no)))
- return -1;
+ memset(vp_bitmap, 0,
+ KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
+ for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
+ KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
+ vp_bitmap[bank] = sparse_banks[sbank++];
- for (j = 0; j < bank_no; j++)
- if (valid_bank_mask & BIT_ULL(j))
- i++;
+ if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
+ /* for all vcpus vp_index == vcpu_idx */
+ return (unsigned long *)vp_bitmap;
+ }
- return i;
+ bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index,
+ (unsigned long *)vp_bitmap))
+ __set_bit(i, vcpu_bitmap);
+ }
+ return vcpu_bitmap;
}
static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
u16 rep_cnt, bool ex)
{
struct kvm *kvm = current_vcpu->kvm;
- struct kvm_vcpu_hv *hv_current = &current_vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = &current_vcpu->arch.hyperv;
struct hv_tlb_flush_ex flush_ex;
struct hv_tlb_flush flush;
- struct kvm_vcpu *vcpu;
- unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0};
- unsigned long valid_bank_mask = 0;
+ u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
+ DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
+ unsigned long *vcpu_mask;
+ u64 valid_bank_mask;
u64 sparse_banks[64];
- int sparse_banks_len, i;
+ int sparse_banks_len;
bool all_cpus;
if (!ex) {
@@ -1290,6 +1354,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
trace_kvm_hv_flush_tlb(flush.processor_mask,
flush.address_space, flush.flags);
+ valid_bank_mask = BIT_ULL(0);
sparse_banks[0] = flush.processor_mask;
all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
} else {
@@ -1306,7 +1371,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
all_cpus = flush_ex.hv_vp_set.format !=
HV_GENERIC_SET_SPARSE_4K;
- sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
+ sparse_banks_len =
+ bitmap_weight((unsigned long *)&valid_bank_mask, 64) *
sizeof(sparse_banks[0]);
if (!sparse_banks_len && !all_cpus)
@@ -1321,48 +1387,19 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
return HV_STATUS_INVALID_HYPERCALL_INPUT;
}
- cpumask_clear(&hv_current->tlb_lush);
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
- int bank = hv->vp_index / 64, sbank = 0;
-
- if (!all_cpus) {
- /* Banks >64 can't be represented */
- if (bank >= 64)
- continue;
-
- /* Non-ex hypercalls can only address first 64 vCPUs */
- if (!ex && bank)
- continue;
-
- if (ex) {
- /*
- * Check is the bank of this vCPU is in sparse
- * set and get the sparse bank number.
- */
- sbank = get_sparse_bank_no(valid_bank_mask,
- bank);
-
- if (sbank < 0)
- continue;
- }
-
- if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64)))
- continue;
- }
+ cpumask_clear(&hv_vcpu->tlb_flush);
- /*
- * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we
- * can't analyze it here, flush TLB regardless of the specified
- * address space.
- */
- __set_bit(i, vcpu_bitmap);
- }
+ vcpu_mask = all_cpus ? NULL :
+ sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
+ vp_bitmap, vcpu_bitmap);
+ /*
+ * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
+ * analyze it here, flush TLB regardless of the specified address space.
+ */
kvm_make_vcpus_request_mask(kvm,
KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
- vcpu_bitmap, &hv_current->tlb_lush);
+ vcpu_mask, &hv_vcpu->tlb_flush);
ret_success:
/* We always do full TLB flush, set rep_done = rep_cnt. */
@@ -1370,6 +1407,99 @@ ret_success:
((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
}
+static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
+ unsigned long *vcpu_bitmap)
+{
+ struct kvm_lapic_irq irq = {
+ .delivery_mode = APIC_DM_FIXED,
+ .vector = vector
+ };
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
+ continue;
+
+ /* We fail only when APIC is disabled */
+ kvm_apic_set_irq(vcpu, &irq, NULL);
+ }
+}
+
+static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
+ bool ex, bool fast)
+{
+ struct kvm *kvm = current_vcpu->kvm;
+ struct hv_send_ipi_ex send_ipi_ex;
+ struct hv_send_ipi send_ipi;
+ u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
+ DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
+ unsigned long *vcpu_mask;
+ unsigned long valid_bank_mask;
+ u64 sparse_banks[64];
+ int sparse_banks_len;
+ u32 vector;
+ bool all_cpus;
+
+ if (!ex) {
+ if (!fast) {
+ if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
+ sizeof(send_ipi))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ sparse_banks[0] = send_ipi.cpu_mask;
+ vector = send_ipi.vector;
+ } else {
+ /* 'reserved' part of hv_send_ipi should be 0 */
+ if (unlikely(ingpa >> 32 != 0))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ sparse_banks[0] = outgpa;
+ vector = (u32)ingpa;
+ }
+ all_cpus = false;
+ valid_bank_mask = BIT_ULL(0);
+
+ trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
+ } else {
+ if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
+ sizeof(send_ipi_ex))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+
+ trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
+ send_ipi_ex.vp_set.format,
+ send_ipi_ex.vp_set.valid_bank_mask);
+
+ vector = send_ipi_ex.vector;
+ valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
+ sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
+ sizeof(sparse_banks[0]);
+
+ all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
+
+ if (!sparse_banks_len)
+ goto ret_success;
+
+ if (!all_cpus &&
+ kvm_read_guest(kvm,
+ ingpa + offsetof(struct hv_send_ipi_ex,
+ vp_set.bank_contents),
+ sparse_banks,
+ sparse_banks_len))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
+
+ if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+
+ vcpu_mask = all_cpus ? NULL :
+ sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
+ vp_bitmap, vcpu_bitmap);
+
+ kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
+
+ret_success:
+ return HV_STATUS_SUCCESS;
+}
+
bool kvm_hv_hypercall_enabled(struct kvm *kvm)
{
return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
@@ -1539,6 +1669,20 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
}
ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
break;
+ case HVCALL_SEND_IPI:
+ if (unlikely(rep)) {
+ ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ break;
+ }
+ ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
+ break;
+ case HVCALL_SEND_IPI_EX:
+ if (unlikely(fast || rep)) {
+ ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ break;
+ }
+ ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
+ break;
default:
ret = HV_STATUS_INVALID_HYPERCALL_CODE;
break;
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index d6aa969e20f1..0e66c12ed2c3 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -62,6 +62,10 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
+bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
+bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
+ struct hv_vp_assist_page *assist_page);
+
static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
int timer_index)
{
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 17c0472c5b34..3cd227ff807f 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -70,6 +70,11 @@
#define APIC_BROADCAST 0xFF
#define X2APIC_BROADCAST 0xFFFFFFFFul
+static bool lapic_timer_advance_adjust_done = false;
+#define LAPIC_TIMER_ADVANCE_ADJUST_DONE 100
+/* step-by-step approximation to mitigate fluctuation */
+#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
+
static inline int apic_test_vector(int vec, void *bitmap)
{
return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
@@ -955,14 +960,14 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
map = rcu_dereference(kvm->arch.apic_map);
ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
- if (ret)
+ if (ret) {
+ *r = 0;
for_each_set_bit(i, &bitmap, 16) {
if (!dst[i])
continue;
- if (*r < 0)
- *r = 0;
*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
}
+ }
rcu_read_unlock();
return ret;
@@ -1344,9 +1349,8 @@ EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
{
- return kvm_apic_hw_enabled(apic) &&
- addr >= apic->base_address &&
- addr < apic->base_address + LAPIC_MMIO_LENGTH;
+ return addr >= apic->base_address &&
+ addr < apic->base_address + LAPIC_MMIO_LENGTH;
}
static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
@@ -1358,6 +1362,15 @@ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
+ if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
+ if (!kvm_check_has_quirk(vcpu->kvm,
+ KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
+ return -EOPNOTSUPP;
+
+ memset(data, 0xff, len);
+ return 0;
+ }
+
kvm_lapic_reg_read(apic, offset, len, data);
return 0;
@@ -1464,7 +1477,7 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
void wait_lapic_expire(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- u64 guest_tsc, tsc_deadline;
+ u64 guest_tsc, tsc_deadline, ns;
if (!lapic_in_kernel(vcpu))
return;
@@ -1484,6 +1497,24 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
if (guest_tsc < tsc_deadline)
__delay(min(tsc_deadline - guest_tsc,
nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
+
+ if (!lapic_timer_advance_adjust_done) {
+ /* too early */
+ if (guest_tsc < tsc_deadline) {
+ ns = (tsc_deadline - guest_tsc) * 1000000ULL;
+ do_div(ns, vcpu->arch.virtual_tsc_khz);
+ lapic_timer_advance_ns -= min((unsigned int)ns,
+ lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
+ } else {
+ /* too late */
+ ns = (guest_tsc - tsc_deadline) * 1000000ULL;
+ do_div(ns, vcpu->arch.virtual_tsc_khz);
+ lapic_timer_advance_ns += min((unsigned int)ns,
+ lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
+ }
+ if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
+ lapic_timer_advance_adjust_done = true;
+ }
}
static void start_sw_tscdeadline(struct kvm_lapic *apic)
@@ -1917,6 +1948,14 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
+ if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
+ if (!kvm_check_has_quirk(vcpu->kvm,
+ KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
+ return -EOPNOTSUPP;
+
+ return 0;
+ }
+
/*
* APIC register must be aligned on 128-bits boundary.
* 32/64/128 bits registers must be accessed thru 32 bits.
@@ -2605,17 +2644,25 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
return 0;
}
-int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
+int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
{
u64 addr = data & ~KVM_MSR_ENABLED;
+ struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
+ unsigned long new_len;
+
if (!IS_ALIGNED(addr, 4))
return 1;
vcpu->arch.pv_eoi.msr_val = data;
if (!pv_eoi_enabled(vcpu))
return 0;
- return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
- addr, sizeof(u8));
+
+ if (addr == ghc->gpa && len <= ghc->len)
+ new_len = ghc->len;
+ else
+ new_len = len;
+
+ return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
}
void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index ed0ed39abd36..ff6ef9c3d760 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -120,7 +120,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
}
-int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
+int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len);
void kvm_lapic_init(void);
void kvm_lapic_exit(void);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e24ea7067373..cf5f572f2305 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
*/
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
+/*
+ * In some cases, we need to preserve the GFN of a non-present or reserved
+ * SPTE when we usurp the upper five bits of the physical address space to
+ * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
+ * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
+ * left into the reserved bits, i.e. the GFN in the SPTE will be split into
+ * high and low parts. This mask covers the lower bits of the GFN.
+ */
+static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
+
+
static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
@@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
static gfn_t get_mmio_spte_gfn(u64 spte)
{
- u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
- shadow_nonpresent_or_rsvd_mask;
- u64 gpa = spte & ~mask;
+ u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
& shadow_nonpresent_or_rsvd_mask;
@@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
static void kvm_mmu_reset_all_pte_masks(void)
{
+ u8 low_phys_bits;
+
shadow_user_mask = 0;
shadow_accessed_mask = 0;
shadow_dirty_mask = 0;
@@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
* appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF.
*/
+ low_phys_bits = boot_cpu_data.x86_phys_bits;
if (boot_cpu_data.x86_phys_bits <
- 52 - shadow_nonpresent_or_rsvd_mask_len)
+ 52 - shadow_nonpresent_or_rsvd_mask_len) {
shadow_nonpresent_or_rsvd_mask =
rsvd_bits(boot_cpu_data.x86_phys_bits -
shadow_nonpresent_or_rsvd_mask_len,
boot_cpu_data.x86_phys_bits - 1);
+ low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
+ }
+ shadow_nonpresent_or_rsvd_lower_gfn_mask =
+ GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
}
static int is_cpuid_PSE36(void)
@@ -899,7 +915,7 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
/*
* Make sure the write to vcpu->mode is not reordered in front of
- * reads to sptes. If it does, kvm_commit_zap_page() can see us
+ * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us
* OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
*/
smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
@@ -916,7 +932,7 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
if (!obj)
- return -ENOMEM;
+ return cache->nobjs >= min ? 0 : -ENOMEM;
cache->objects[cache->nobjs++] = obj;
}
return 0;
@@ -944,7 +960,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
if (!page)
- return -ENOMEM;
+ return cache->nobjs >= min ? 0 : -ENOMEM;
cache->objects[cache->nobjs++] = page;
}
return 0;
@@ -1249,24 +1265,24 @@ pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
mmu_free_pte_list_desc(desc);
}
-static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
+static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
{
struct pte_list_desc *desc;
struct pte_list_desc *prev_desc;
int i;
if (!rmap_head->val) {
- printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
+ pr_err("%s: %p 0->BUG\n", __func__, spte);
BUG();
} else if (!(rmap_head->val & 1)) {
- rmap_printk("pte_list_remove: %p 1->0\n", spte);
+ rmap_printk("%s: %p 1->0\n", __func__, spte);
if ((u64 *)rmap_head->val != spte) {
- printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte);
+ pr_err("%s: %p 1->BUG\n", __func__, spte);
BUG();
}
rmap_head->val = 0;
} else {
- rmap_printk("pte_list_remove: %p many->many\n", spte);
+ rmap_printk("%s: %p many->many\n", __func__, spte);
desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
prev_desc = NULL;
while (desc) {
@@ -1280,11 +1296,17 @@ static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
prev_desc = desc;
desc = desc->more;
}
- pr_err("pte_list_remove: %p many->many\n", spte);
+ pr_err("%s: %p many->many\n", __func__, spte);
BUG();
}
}
+static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
+{
+ mmu_spte_clear_track_bits(sptep);
+ __pte_list_remove(sptep, rmap_head);
+}
+
static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
struct kvm_memory_slot *slot)
{
@@ -1333,7 +1355,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
sp = page_header(__pa(spte));
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmap_head = gfn_to_rmap(kvm, gfn, sp);
- pte_list_remove(spte, rmap_head);
+ __pte_list_remove(spte, rmap_head);
}
/*
@@ -1669,7 +1691,7 @@ static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
while ((sptep = rmap_get_first(rmap_head, &iter))) {
rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
- drop_spte(kvm, sptep);
+ pte_list_remove(rmap_head, sptep);
flush = true;
}
@@ -1705,7 +1727,7 @@ restart:
need_flush = 1;
if (pte_write(*ptep)) {
- drop_spte(kvm, sptep);
+ pte_list_remove(rmap_head, sptep);
goto restart;
} else {
new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
@@ -1972,7 +1994,7 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
u64 *parent_pte)
{
- pte_list_remove(parent_pte, &sp->parent_ptes);
+ __pte_list_remove(parent_pte, &sp->parent_ptes);
}
static void drop_parent_pte(struct kvm_mmu_page *sp,
@@ -2165,7 +2187,7 @@ static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
if (sp->role.cr4_pae != !!is_pae(vcpu)
- || vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
+ || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false;
}
@@ -2359,14 +2381,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
int collisions = 0;
LIST_HEAD(invalid_list);
- role = vcpu->arch.mmu.base_role;
+ role = vcpu->arch.mmu->mmu_role.base;
role.level = level;
role.direct = direct;
if (role.direct)
role.cr4_pae = 0;
role.access = access;
- if (!vcpu->arch.mmu.direct_map
- && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
+ if (!vcpu->arch.mmu->direct_map
+ && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
@@ -2441,11 +2463,11 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
{
iterator->addr = addr;
iterator->shadow_addr = root;
- iterator->level = vcpu->arch.mmu.shadow_root_level;
+ iterator->level = vcpu->arch.mmu->shadow_root_level;
if (iterator->level == PT64_ROOT_4LEVEL &&
- vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL &&
- !vcpu->arch.mmu.direct_map)
+ vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
+ !vcpu->arch.mmu->direct_map)
--iterator->level;
if (iterator->level == PT32E_ROOT_LEVEL) {
@@ -2453,10 +2475,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
* prev_root is currently only used for 64-bit hosts. So only
* the active root_hpa is valid here.
*/
- BUG_ON(root != vcpu->arch.mmu.root_hpa);
+ BUG_ON(root != vcpu->arch.mmu->root_hpa);
iterator->shadow_addr
- = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
+ = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
--iterator->level;
if (!iterator->shadow_addr)
@@ -2467,7 +2489,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
struct kvm_vcpu *vcpu, u64 addr)
{
- shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu.root_hpa,
+ shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
addr);
}
@@ -3079,7 +3101,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
int emulate = 0;
gfn_t pseudo_gfn;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return 0;
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
@@ -3109,16 +3131,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
{
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_MCEERR_AR;
- info.si_addr = (void __user *)address;
- info.si_addr_lsb = PAGE_SHIFT;
-
- send_sig_info(SIGBUS, &info, tsk);
+ send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
}
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
@@ -3294,7 +3307,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
u64 spte = 0ull;
uint retry_count = 0;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return false;
if (!page_fault_can_be_fast(error_code))
@@ -3464,11 +3477,11 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
}
/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
-void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free)
+void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ ulong roots_to_free)
{
int i;
LIST_HEAD(invalid_list);
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
@@ -3528,20 +3541,20 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
struct kvm_mmu_page *sp;
unsigned i;
- if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL) {
+ if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
spin_lock(&vcpu->kvm->mmu_lock);
if(make_mmu_pages_available(vcpu) < 0) {
spin_unlock(&vcpu->kvm->mmu_lock);
return -ENOSPC;
}
sp = kvm_mmu_get_page(vcpu, 0, 0,
- vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
+ vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
- vcpu->arch.mmu.root_hpa = __pa(sp->spt);
- } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
+ vcpu->arch.mmu->root_hpa = __pa(sp->spt);
+ } else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
for (i = 0; i < 4; ++i) {
- hpa_t root = vcpu->arch.mmu.pae_root[i];
+ hpa_t root = vcpu->arch.mmu->pae_root[i];
MMU_WARN_ON(VALID_PAGE(root));
spin_lock(&vcpu->kvm->mmu_lock);
@@ -3554,9 +3567,9 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
- vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
+ vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
}
- vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
+ vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
} else
BUG();
@@ -3570,7 +3583,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
gfn_t root_gfn;
int i;
- root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
+ root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
return 1;
@@ -3579,8 +3592,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* Do we shadow a long mode page table? If so we need to
* write-protect the guests page table root.
*/
- if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
- hpa_t root = vcpu->arch.mmu.root_hpa;
+ if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
+ hpa_t root = vcpu->arch.mmu->root_hpa;
MMU_WARN_ON(VALID_PAGE(root));
@@ -3590,11 +3603,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
return -ENOSPC;
}
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
- vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
+ vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
- vcpu->arch.mmu.root_hpa = root;
+ vcpu->arch.mmu->root_hpa = root;
return 0;
}
@@ -3604,17 +3617,17 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* the shadow page table may be a PAE or a long mode page table.
*/
pm_mask = PT_PRESENT_MASK;
- if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL)
+ if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
for (i = 0; i < 4; ++i) {
- hpa_t root = vcpu->arch.mmu.pae_root[i];
+ hpa_t root = vcpu->arch.mmu->pae_root[i];
MMU_WARN_ON(VALID_PAGE(root));
- if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
- pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
+ if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
+ pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
if (!(pdptr & PT_PRESENT_MASK)) {
- vcpu->arch.mmu.pae_root[i] = 0;
+ vcpu->arch.mmu->pae_root[i] = 0;
continue;
}
root_gfn = pdptr >> PAGE_SHIFT;
@@ -3632,16 +3645,16 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
- vcpu->arch.mmu.pae_root[i] = root | pm_mask;
+ vcpu->arch.mmu->pae_root[i] = root | pm_mask;
}
- vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
+ vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
/*
* If we shadow a 32 bit page table with a long mode page
* table we enter this path.
*/
- if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) {
- if (vcpu->arch.mmu.lm_root == NULL) {
+ if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
+ if (vcpu->arch.mmu->lm_root == NULL) {
/*
* The additional page necessary for this is only
* allocated on demand.
@@ -3653,12 +3666,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
if (lm_root == NULL)
return 1;
- lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
+ lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
- vcpu->arch.mmu.lm_root = lm_root;
+ vcpu->arch.mmu->lm_root = lm_root;
}
- vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
+ vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
}
return 0;
@@ -3666,7 +3679,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.mmu.direct_map)
+ if (vcpu->arch.mmu->direct_map)
return mmu_alloc_direct_roots(vcpu);
else
return mmu_alloc_shadow_roots(vcpu);
@@ -3677,17 +3690,16 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
int i;
struct kvm_mmu_page *sp;
- if (vcpu->arch.mmu.direct_map)
+ if (vcpu->arch.mmu->direct_map)
return;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return;
vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
- if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
- hpa_t root = vcpu->arch.mmu.root_hpa;
-
+ if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
+ hpa_t root = vcpu->arch.mmu->root_hpa;
sp = page_header(root);
/*
@@ -3718,7 +3730,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
for (i = 0; i < 4; ++i) {
- hpa_t root = vcpu->arch.mmu.pae_root[i];
+ hpa_t root = vcpu->arch.mmu->pae_root[i];
if (root && VALID_PAGE(root)) {
root &= PT64_BASE_ADDR_MASK;
@@ -3792,7 +3804,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
int root, leaf;
bool reserved = false;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
goto exit;
walk_shadow_page_lockless_begin(vcpu);
@@ -3809,7 +3821,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
if (!is_shadow_present_pte(spte))
break;
- reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
+ reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte,
iterator.level);
}
@@ -3888,7 +3900,7 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
struct kvm_shadow_walk_iterator iterator;
u64 spte;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return;
walk_shadow_page_lockless_begin(vcpu);
@@ -3915,7 +3927,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
if (r)
return r;
- MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
return nonpaging_map(vcpu, gva & PAGE_MASK,
@@ -3928,8 +3940,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
arch.gfn = gfn;
- arch.direct_map = vcpu->arch.mmu.direct_map;
- arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
+ arch.direct_map = vcpu->arch.mmu->direct_map;
+ arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
}
@@ -4035,7 +4047,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;
- MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
if (page_fault_handle_page_track(vcpu, error_code, gfn))
return RET_PF_EMULATE;
@@ -4111,7 +4123,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
{
uint i;
struct kvm_mmu_root_info root;
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
root.cr3 = mmu->get_cr3(vcpu);
root.hpa = mmu->root_hpa;
@@ -4134,7 +4146,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
union kvm_mmu_page_role new_role,
bool skip_tlb_flush)
{
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
/*
* For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
@@ -4185,7 +4197,8 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
bool skip_tlb_flush)
{
if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
- kvm_mmu_free_roots(vcpu, KVM_MMU_ROOT_CURRENT);
+ kvm_mmu_free_roots(vcpu, vcpu->arch.mmu,
+ KVM_MMU_ROOT_CURRENT);
}
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
@@ -4203,7 +4216,7 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu)
static void inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
- vcpu->arch.mmu.inject_page_fault(vcpu, fault);
+ vcpu->arch.mmu->inject_page_fault(vcpu, fault);
}
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
@@ -4407,7 +4420,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
- bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
+ bool uses_nx = context->nx ||
+ context->mmu_role.base.smep_andnot_wp;
struct rsvd_bits_validate *shadow_zero_check;
int i;
@@ -4546,7 +4560,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
* SMAP:kernel-mode data accesses from user-mode
* mappings should fault. A fault is considered
* as a SMAP violation if all of the following
- * conditions are ture:
+ * conditions are true:
* - X86_CR4_SMAP is set in CR4
* - A user page is accessed
* - The access is not a fetch
@@ -4707,27 +4721,65 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,
paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
}
-static union kvm_mmu_page_role
-kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu)
+static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
+{
+ union kvm_mmu_extended_role ext = {0};
+
+ ext.cr0_pg = !!is_paging(vcpu);
+ ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
+ ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
+ ext.cr4_pse = !!is_pse(vcpu);
+ ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
+ ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
+
+ ext.valid = 1;
+
+ return ext;
+}
+
+static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
+ bool base_only)
{
- union kvm_mmu_page_role role = {0};
+ union kvm_mmu_role role = {0};
+
+ role.base.access = ACC_ALL;
+ role.base.nxe = !!is_nx(vcpu);
+ role.base.cr4_pae = !!is_pae(vcpu);
+ role.base.cr0_wp = is_write_protection(vcpu);
+ role.base.smm = is_smm(vcpu);
+ role.base.guest_mode = is_guest_mode(vcpu);
- role.guest_mode = is_guest_mode(vcpu);
- role.smm = is_smm(vcpu);
- role.ad_disabled = (shadow_accessed_mask == 0);
- role.level = kvm_x86_ops->get_tdp_level(vcpu);
- role.direct = true;
- role.access = ACC_ALL;
+ if (base_only)
+ return role;
+
+ role.ext = kvm_calc_mmu_role_ext(vcpu);
+
+ return role;
+}
+
+static union kvm_mmu_role
+kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+{
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+
+ role.base.ad_disabled = (shadow_accessed_mask == 0);
+ role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
+ role.base.direct = true;
return role;
}
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
- struct kvm_mmu *context = &vcpu->arch.mmu;
+ struct kvm_mmu *context = vcpu->arch.mmu;
+ union kvm_mmu_role new_role =
+ kvm_calc_tdp_mmu_root_page_role(vcpu, false);
+
+ new_role.base.word &= mmu_base_role_mask.word;
+ if (new_role.as_u64 == context->mmu_role.as_u64)
+ return;
- context->base_role.word = mmu_base_role_mask.word &
- kvm_calc_tdp_mmu_root_page_role(vcpu).word;
+ context->mmu_role.as_u64 = new_role.as_u64;
context->page_fault = tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
@@ -4767,36 +4819,36 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
reset_tdp_shadow_zero_bits_mask(vcpu, context);
}
-static union kvm_mmu_page_role
-kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu)
-{
- union kvm_mmu_page_role role = {0};
- bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
- bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
-
- role.nxe = is_nx(vcpu);
- role.cr4_pae = !!is_pae(vcpu);
- role.cr0_wp = is_write_protection(vcpu);
- role.smep_andnot_wp = smep && !is_write_protection(vcpu);
- role.smap_andnot_wp = smap && !is_write_protection(vcpu);
- role.guest_mode = is_guest_mode(vcpu);
- role.smm = is_smm(vcpu);
- role.direct = !is_paging(vcpu);
- role.access = ACC_ALL;
+static union kvm_mmu_role
+kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+{
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+
+ role.base.smep_andnot_wp = role.ext.cr4_smep &&
+ !is_write_protection(vcpu);
+ role.base.smap_andnot_wp = role.ext.cr4_smap &&
+ !is_write_protection(vcpu);
+ role.base.direct = !is_paging(vcpu);
if (!is_long_mode(vcpu))
- role.level = PT32E_ROOT_LEVEL;
+ role.base.level = PT32E_ROOT_LEVEL;
else if (is_la57_mode(vcpu))
- role.level = PT64_ROOT_5LEVEL;
+ role.base.level = PT64_ROOT_5LEVEL;
else
- role.level = PT64_ROOT_4LEVEL;
+ role.base.level = PT64_ROOT_4LEVEL;
return role;
}
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
- struct kvm_mmu *context = &vcpu->arch.mmu;
+ struct kvm_mmu *context = vcpu->arch.mmu;
+ union kvm_mmu_role new_role =
+ kvm_calc_shadow_mmu_root_page_role(vcpu, false);
+
+ new_role.base.word &= mmu_base_role_mask.word;
+ if (new_role.as_u64 == context->mmu_role.as_u64)
+ return;
if (!is_paging(vcpu))
nonpaging_init_context(vcpu, context);
@@ -4807,22 +4859,28 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
else
paging32_init_context(vcpu, context);
- context->base_role.word = mmu_base_role_mask.word &
- kvm_calc_shadow_mmu_root_page_role(vcpu).word;
+ context->mmu_role.as_u64 = new_role.as_u64;
reset_shadow_zero_bits_mask(vcpu, context);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
-static union kvm_mmu_page_role
-kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
+static union kvm_mmu_role
+kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
+ bool execonly)
{
- union kvm_mmu_page_role role = vcpu->arch.mmu.base_role;
+ union kvm_mmu_role role;
+
+ /* Base role is inherited from root_mmu */
+ role.base.word = vcpu->arch.root_mmu.mmu_role.base.word;
+ role.ext = kvm_calc_mmu_role_ext(vcpu);
+
+ role.base.level = PT64_ROOT_4LEVEL;
+ role.base.direct = false;
+ role.base.ad_disabled = !accessed_dirty;
+ role.base.guest_mode = true;
+ role.base.access = ACC_ALL;
- role.level = PT64_ROOT_4LEVEL;
- role.direct = false;
- role.ad_disabled = !accessed_dirty;
- role.guest_mode = true;
- role.access = ACC_ALL;
+ role.ext.execonly = execonly;
return role;
}
@@ -4830,11 +4888,17 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty, gpa_t new_eptp)
{
- struct kvm_mmu *context = &vcpu->arch.mmu;
- union kvm_mmu_page_role root_page_role =
- kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty);
+ struct kvm_mmu *context = vcpu->arch.mmu;
+ union kvm_mmu_role new_role =
+ kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
+ execonly);
+
+ __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false);
+
+ new_role.base.word &= mmu_base_role_mask.word;
+ if (new_role.as_u64 == context->mmu_role.as_u64)
+ return;
- __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role, false);
context->shadow_root_level = PT64_ROOT_4LEVEL;
context->nx = true;
@@ -4846,7 +4910,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->update_pte = ept_update_pte;
context->root_level = PT64_ROOT_4LEVEL;
context->direct_map = false;
- context->base_role.word = root_page_role.word & mmu_base_role_mask.word;
+ context->mmu_role.as_u64 = new_role.as_u64;
+
update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true);
update_last_nonleaf_level(vcpu, context);
@@ -4857,7 +4922,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
- struct kvm_mmu *context = &vcpu->arch.mmu;
+ struct kvm_mmu *context = vcpu->arch.mmu;
kvm_init_shadow_mmu(vcpu);
context->set_cr3 = kvm_x86_ops->set_cr3;
@@ -4868,14 +4933,20 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{
+ union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
+ new_role.base.word &= mmu_base_role_mask.word;
+ if (new_role.as_u64 == g_context->mmu_role.as_u64)
+ return;
+
+ g_context->mmu_role.as_u64 = new_role.as_u64;
g_context->get_cr3 = get_cr3;
g_context->get_pdptr = kvm_pdptr_read;
g_context->inject_page_fault = kvm_inject_page_fault;
/*
- * Note that arch.mmu.gva_to_gpa translates l2_gpa to l1_gpa using
+ * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
* L1's nested page tables (e.g. EPT12). The nested translation
* of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
* L2's page tables as the first level of translation and L1's
@@ -4914,10 +4985,10 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
if (reset_roots) {
uint i;
- vcpu->arch.mmu.root_hpa = INVALID_PAGE;
+ vcpu->arch.mmu->root_hpa = INVALID_PAGE;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
+ vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
}
if (mmu_is_nested(vcpu))
@@ -4932,10 +5003,14 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
+ union kvm_mmu_role role;
+
if (tdp_enabled)
- return kvm_calc_tdp_mmu_root_page_role(vcpu);
+ role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
else
- return kvm_calc_shadow_mmu_root_page_role(vcpu);
+ role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
+
+ return role.base;
}
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
@@ -4965,8 +5040,10 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
- kvm_mmu_free_roots(vcpu, KVM_MMU_ROOTS_ALL);
- WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
+ WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
+ kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
+ WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
}
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
@@ -4980,7 +5057,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
}
++vcpu->kvm->stat.mmu_pte_updated;
- vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
+ vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
}
static bool need_remote_flush(u64 old, u64 new)
@@ -5157,10 +5234,12 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
local_flush = true;
while (npte--) {
+ u32 base_role = vcpu->arch.mmu->mmu_role.base.word;
+
entry = *spte;
mmu_page_zap_pte(vcpu->kvm, sp, spte);
if (gentry &&
- !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
+ !((sp->role.word ^ base_role)
& mmu_base_role_mask.word) && rmap_can_add(vcpu))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
if (need_remote_flush(entry, *spte))
@@ -5178,7 +5257,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
gpa_t gpa;
int r;
- if (vcpu->arch.mmu.direct_map)
+ if (vcpu->arch.mmu->direct_map)
return 0;
gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
@@ -5214,10 +5293,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
{
int r, emulation_type = 0;
enum emulation_result er;
- bool direct = vcpu->arch.mmu.direct_map;
+ bool direct = vcpu->arch.mmu->direct_map;
/* With shadow page tables, fault_address contains a GVA or nGPA. */
- if (vcpu->arch.mmu.direct_map) {
+ if (vcpu->arch.mmu->direct_map) {
vcpu->arch.gpa_available = true;
vcpu->arch.gpa_val = cr2;
}
@@ -5230,8 +5309,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
}
if (r == RET_PF_INVALID) {
- r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
- false);
+ r = vcpu->arch.mmu->page_fault(vcpu, cr2,
+ lower_32_bits(error_code),
+ false);
WARN_ON(r == RET_PF_INVALID);
}
@@ -5247,7 +5327,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
* paging in both guests. If true, we simply unprotect the page
* and resume the guest.
*/
- if (vcpu->arch.mmu.direct_map &&
+ if (vcpu->arch.mmu->direct_map &&
(error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
return 1;
@@ -5295,7 +5375,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
int i;
/* INVLPG on a * non-canonical address is a NOP according to the SDM. */
@@ -5326,7 +5406,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
bool tlb_flush = false;
uint i;
@@ -5370,8 +5450,8 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp);
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
- free_page((unsigned long)vcpu->arch.mmu.pae_root);
- free_page((unsigned long)vcpu->arch.mmu.lm_root);
+ free_page((unsigned long)vcpu->arch.mmu->pae_root);
+ free_page((unsigned long)vcpu->arch.mmu->lm_root);
}
static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
@@ -5391,9 +5471,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
if (!page)
return -ENOMEM;
- vcpu->arch.mmu.pae_root = page_address(page);
+ vcpu->arch.mmu->pae_root = page_address(page);
for (i = 0; i < 4; ++i)
- vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
+ vcpu->arch.mmu->pae_root[i] = INVALID_PAGE;
return 0;
}
@@ -5402,22 +5482,21 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
uint i;
- vcpu->arch.walk_mmu = &vcpu->arch.mmu;
- vcpu->arch.mmu.root_hpa = INVALID_PAGE;
- vcpu->arch.mmu.translate_gpa = translate_gpa;
- vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
+ vcpu->arch.mmu = &vcpu->arch.root_mmu;
+ vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
+ vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
+ vcpu->arch.root_mmu.translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
+ vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
- return alloc_mmu_pages(vcpu);
-}
-
-void kvm_mmu_setup(struct kvm_vcpu *vcpu)
-{
- MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
+ vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
- kvm_init_mmu(vcpu, true);
+ vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
+ return alloc_mmu_pages(vcpu);
}
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
@@ -5600,7 +5679,7 @@ restart:
if (sp->role.direct &&
!kvm_is_reserved_pfn(pfn) &&
PageTransCompoundMap(pfn_to_page(pfn))) {
- drop_spte(kvm, sptep);
+ pte_list_remove(rmap_head, sptep);
need_tlb_flush = 1;
goto restart;
}
@@ -5857,6 +5936,16 @@ int kvm_mmu_module_init(void)
{
int ret = -ENOMEM;
+ /*
+ * MMU roles use union aliasing which is, generally speaking, an
+ * undefined behavior. However, we supposedly know how compilers behave
+ * and the current status quo is unlikely to change. Guardians below are
+ * supposed to let us know if the assumption becomes false.
+ */
+ BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
+ BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
+ BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
+
kvm_mmu_reset_all_pte_masks();
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
@@ -5886,7 +5975,7 @@ out:
}
/*
- * Caculate mmu pages needed for kvm.
+ * Calculate mmu pages needed for kvm.
*/
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 1fab69c0b2f3..c7b333147c4a 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -43,11 +43,6 @@
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
-#define PT_PDPE_LEVEL 3
-#define PT_DIRECTORY_LEVEL 2
-#define PT_PAGE_TABLE_LEVEL 1
-#define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1)
-
static inline u64 rsvd_bits(int s, int e)
{
if (e < s)
@@ -80,7 +75,7 @@ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
- if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
+ if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
return 0;
return kvm_mmu_load(vcpu);
@@ -102,9 +97,9 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
{
- if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
- vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa |
- kvm_get_active_pcid(vcpu));
+ if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
+ vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
+ kvm_get_active_pcid(vcpu));
}
/*
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 1272861e77b9..abac7e208853 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -59,19 +59,19 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
int i;
struct kvm_mmu_page *sp;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return;
- if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
- hpa_t root = vcpu->arch.mmu.root_hpa;
+ if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
+ hpa_t root = vcpu->arch.mmu->root_hpa;
sp = page_header(root);
- __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu.root_level);
+ __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level);
return;
}
for (i = 0; i < 4; ++i) {
- hpa_t root = vcpu->arch.mmu.pae_root[i];
+ hpa_t root = vcpu->arch.mmu->pae_root[i];
if (root && VALID_PAGE(root)) {
root &= PT64_BASE_ADDR_MASK;
@@ -122,7 +122,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
hpa = pfn << PAGE_SHIFT;
if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
- "ent %llxn", vcpu->arch.mmu.root_level, pfn,
+ "ent %llxn", vcpu->arch.mmu->root_level, pfn,
hpa, *sptep);
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 14ffd973df54..7cf2185b7eb5 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -158,14 +158,15 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
u64 gpte)
{
- if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
+ if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
goto no_present;
if (!FNAME(is_present_gpte)(gpte))
goto no_present;
/* if accessed bit is not supported prefetch non accessed gpte */
- if (PT_HAVE_ACCESSED_DIRTY(&vcpu->arch.mmu) && !(gpte & PT_GUEST_ACCESSED_MASK))
+ if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) &&
+ !(gpte & PT_GUEST_ACCESSED_MASK))
goto no_present;
return false;
@@ -480,7 +481,7 @@ error:
static int FNAME(walk_addr)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr, u32 access)
{
- return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
+ return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
access);
}
@@ -509,7 +510,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access & FNAME(gpte_access)(gpte);
- FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte);
+ FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
no_dirty_log && (pte_access & ACC_WRITE_MASK));
if (is_error_pfn(pfn))
@@ -604,7 +605,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
direct_access = gw->pte_access;
- top_level = vcpu->arch.mmu.root_level;
+ top_level = vcpu->arch.mmu->root_level;
if (top_level == PT32E_ROOT_LEVEL)
top_level = PT32_ROOT_LEVEL;
/*
@@ -616,7 +617,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
goto out_gpte_changed;
for (shadow_walk_init(&it, vcpu, addr);
@@ -1004,7 +1005,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access;
pte_access &= FNAME(gpte_access)(gpte);
- FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte);
+ FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
&nr_present))
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 89c4c5aa15f1..0e21ccc46792 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -436,14 +436,18 @@ static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
static inline bool svm_sev_enabled(void)
{
- return max_sev_asid;
+ return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
}
static inline bool sev_guest(struct kvm *kvm)
{
+#ifdef CONFIG_KVM_AMD_SEV
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
return sev->active;
+#else
+ return false;
+#endif
}
static inline int sev_get_asid(struct kvm *kvm)
@@ -805,6 +809,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu)
nested_svm_check_exception(svm, nr, has_error_code, error_code))
return;
+ kvm_deliver_exception_payload(&svm->vcpu);
+
if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
@@ -1226,8 +1232,7 @@ static __init int sev_hardware_setup(void)
min_sev_asid = cpuid_edx(0x8000001F);
/* Initialize SEV ASID bitmap */
- sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
- sizeof(unsigned long), GFP_KERNEL);
+ sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
if (!sev_asid_bitmap)
return 1;
@@ -1405,7 +1410,7 @@ static __exit void svm_hardware_unsetup(void)
int cpu;
if (svm_sev_enabled())
- kfree(sev_asid_bitmap);
+ bitmap_free(sev_asid_bitmap);
for_each_possible_cpu(cpu)
svm_cpu_uninit(cpu);
@@ -2919,18 +2924,18 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{
WARN_ON(mmu_is_nested(vcpu));
kvm_init_shadow_mmu(vcpu);
- vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
- vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
- vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
- vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
- vcpu->arch.mmu.shadow_root_level = get_npt_level(vcpu);
- reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
+ vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3;
+ vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3;
+ vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
+ vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
+ vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
+ reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
}
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
- vcpu->arch.walk_mmu = &vcpu->arch.mmu;
+ vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
}
static int nested_svm_check_permissions(struct vcpu_svm *svm)
@@ -2966,16 +2971,13 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
svm->vmcb->control.exit_info_1 = error_code;
/*
- * FIXME: we should not write CR2 when L1 intercepts an L2 #PF exception.
- * The fix is to add the ancillary datum (CR2 or DR6) to structs
- * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 can be
- * written only when inject_pending_event runs (DR6 would written here
- * too). This should be conditional on a new capability---if the
- * capability is disabled, kvm_multiple_exception would write the
- * ancillary information to CR2 or DR6, for backwards ABI-compatibility.
+ * EXITINFO2 is undefined for all exception intercepts other
+ * than #PF.
*/
if (svm->vcpu.arch.exception.nested_apf)
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
+ else if (svm->vcpu.arch.exception.has_payload)
+ svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
else
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
@@ -5639,26 +5641,24 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
"mov %%r13, %c[r13](%[svm]) \n\t"
"mov %%r14, %c[r14](%[svm]) \n\t"
"mov %%r15, %c[r15](%[svm]) \n\t"
-#endif
/*
* Clear host registers marked as clobbered to prevent
* speculative use.
*/
- "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
- "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
- "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
- "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
- "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
-#ifdef CONFIG_X86_64
- "xor %%r8, %%r8 \n\t"
- "xor %%r9, %%r9 \n\t"
- "xor %%r10, %%r10 \n\t"
- "xor %%r11, %%r11 \n\t"
- "xor %%r12, %%r12 \n\t"
- "xor %%r13, %%r13 \n\t"
- "xor %%r14, %%r14 \n\t"
- "xor %%r15, %%r15 \n\t"
+ "xor %%r8d, %%r8d \n\t"
+ "xor %%r9d, %%r9d \n\t"
+ "xor %%r10d, %%r10d \n\t"
+ "xor %%r11d, %%r11d \n\t"
+ "xor %%r12d, %%r12d \n\t"
+ "xor %%r13d, %%r13d \n\t"
+ "xor %%r14d, %%r14d \n\t"
+ "xor %%r15d, %%r15d \n\t"
#endif
+ "xor %%ebx, %%ebx \n\t"
+ "xor %%ecx, %%ecx \n\t"
+ "xor %%edx, %%edx \n\t"
+ "xor %%esi, %%esi \n\t"
+ "xor %%edi, %%edi \n\t"
"pop %%" _ASM_BP
:
: [svm]"a"(svm),
@@ -7037,6 +7037,13 @@ failed:
return ret;
}
+static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+ uint16_t *vmcs_version)
+{
+ /* Intel-only feature */
+ return -ENODEV;
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -7149,6 +7156,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.check_intercept = svm_check_intercept,
.handle_external_intr = svm_handle_external_intr,
+ .request_immediate_exit = __kvm_request_immediate_exit,
+
.sched_in = svm_sched_in,
.pmu_ops = &amd_pmu_ops,
@@ -7164,6 +7173,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.mem_enc_op = svm_mem_enc_op,
.mem_enc_reg_region = svm_register_enc_region,
.mem_enc_unreg_region = svm_unregister_enc_region,
+
+ .nested_enable_evmcs = nested_enable_evmcs,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 0f997683404f..0659465a745c 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1418,6 +1418,48 @@ TRACE_EVENT(kvm_hv_flush_tlb_ex,
__entry->valid_bank_mask, __entry->format,
__entry->address_space, __entry->flags)
);
+
+/*
+ * Tracepoints for kvm_hv_send_ipi.
+ */
+TRACE_EVENT(kvm_hv_send_ipi,
+ TP_PROTO(u32 vector, u64 processor_mask),
+ TP_ARGS(vector, processor_mask),
+
+ TP_STRUCT__entry(
+ __field(u32, vector)
+ __field(u64, processor_mask)
+ ),
+
+ TP_fast_assign(
+ __entry->vector = vector;
+ __entry->processor_mask = processor_mask;
+ ),
+
+ TP_printk("vector %x processor_mask 0x%llx",
+ __entry->vector, __entry->processor_mask)
+);
+
+TRACE_EVENT(kvm_hv_send_ipi_ex,
+ TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask),
+ TP_ARGS(vector, format, valid_bank_mask),
+
+ TP_STRUCT__entry(
+ __field(u32, vector)
+ __field(u64, format)
+ __field(u64, valid_bank_mask)
+ ),
+
+ TP_fast_assign(
+ __entry->vector = vector;
+ __entry->format = format;
+ __entry->valid_bank_mask = valid_bank_mask;
+ ),
+
+ TP_printk("vector %x format %llx valid_bank_mask 0x%llx",
+ __entry->vector, __entry->format,
+ __entry->valid_bank_mask)
+);
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 533a327372c8..4555077d69ce 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -20,6 +20,7 @@
#include "mmu.h"
#include "cpuid.h"
#include "lapic.h"
+#include "hyperv.h"
#include <linux/kvm_host.h>
#include <linux/module.h>
@@ -61,7 +62,7 @@
#define __ex(x) __kvm_handle_fault_on_reboot(x)
#define __ex_clear(x, reg) \
- ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
+ ____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg)
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
@@ -107,9 +108,12 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
* VMX and be a hypervisor for its own guests. If nested=0, guests may not
* use VMX instructions.
*/
-static bool __read_mostly nested = 0;
+static bool __read_mostly nested = 1;
module_param(nested, bool, S_IRUGO);
+static bool __read_mostly nested_early_check = 0;
+module_param(nested_early_check, bool, S_IRUGO);
+
static u64 __read_mostly host_xss;
static bool __read_mostly enable_pml = 1;
@@ -121,7 +125,6 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
#define MSR_BITMAP_MODE_X2APIC 1
#define MSR_BITMAP_MODE_X2APIC_APICV 2
-#define MSR_BITMAP_MODE_LM 4
#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
@@ -132,7 +135,7 @@ static bool __read_mostly enable_preemption_timer = 1;
module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
#endif
-#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
+#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
#define KVM_VM_CR0_ALWAYS_ON \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \
@@ -188,6 +191,7 @@ static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
module_param(ple_window_max, uint, 0444);
extern const ulong vmx_return;
+extern const ulong vmx_early_consistency_check_return;
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
@@ -397,6 +401,7 @@ struct loaded_vmcs {
int cpu;
bool launched;
bool nmi_known_unmasked;
+ bool hv_timer_armed;
/* Support for vnmi-less CPUs */
int soft_vnmi_blocked;
ktime_t entry_time;
@@ -827,14 +832,28 @@ struct nested_vmx {
*/
struct vmcs12 *cached_shadow_vmcs12;
/*
- * Indicates if the shadow vmcs must be updated with the
- * data hold by vmcs12
+ * Indicates if the shadow vmcs or enlightened vmcs must be updated
+ * with the data held by struct vmcs12.
*/
- bool sync_shadow_vmcs;
+ bool need_vmcs12_sync;
bool dirty_vmcs12;
+ /*
+ * vmcs02 has been initialized, i.e. state that is constant for
+ * vmcs02 has been written to the backing VMCS. Initialization
+ * is delayed until L1 actually attempts to run a nested VM.
+ */
+ bool vmcs02_initialized;
+
bool change_vmcs01_virtual_apic_mode;
+ /*
+ * Enlightened VMCS has been enabled. It does not mean that L1 has to
+ * use it. However, VMX features available to L1 will be limited based
+ * on what the enlightened VMCS supports.
+ */
+ bool enlightened_vmcs_enabled;
+
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
@@ -856,6 +875,7 @@ struct nested_vmx {
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
u64 vmcs01_debugctl;
+ u64 vmcs01_guest_bndcfgs;
u16 vpid02;
u16 last_vpid;
@@ -869,6 +889,10 @@ struct nested_vmx {
/* in guest mode on SMM entry? */
bool guest_mode;
} smm;
+
+ gpa_t hv_evmcs_vmptr;
+ struct page *hv_evmcs_page;
+ struct hv_enlightened_vmcs *hv_evmcs;
};
#define POSTED_INTR_ON 0
@@ -1019,6 +1043,8 @@ struct vcpu_vmx {
int ple_window;
bool ple_window_dirty;
+ bool req_immediate_exit;
+
/* Support for PML */
#define PML_ENTITY_NUM 512
struct page *pml_pg;
@@ -1378,6 +1404,49 @@ DEFINE_STATIC_KEY_FALSE(enable_evmcs);
#define KVM_EVMCS_VERSION 1
+/*
+ * Enlightened VMCSv1 doesn't support these:
+ *
+ * POSTED_INTR_NV = 0x00000002,
+ * GUEST_INTR_STATUS = 0x00000810,
+ * APIC_ACCESS_ADDR = 0x00002014,
+ * POSTED_INTR_DESC_ADDR = 0x00002016,
+ * EOI_EXIT_BITMAP0 = 0x0000201c,
+ * EOI_EXIT_BITMAP1 = 0x0000201e,
+ * EOI_EXIT_BITMAP2 = 0x00002020,
+ * EOI_EXIT_BITMAP3 = 0x00002022,
+ * GUEST_PML_INDEX = 0x00000812,
+ * PML_ADDRESS = 0x0000200e,
+ * VM_FUNCTION_CONTROL = 0x00002018,
+ * EPTP_LIST_ADDRESS = 0x00002024,
+ * VMREAD_BITMAP = 0x00002026,
+ * VMWRITE_BITMAP = 0x00002028,
+ *
+ * TSC_MULTIPLIER = 0x00002032,
+ * PLE_GAP = 0x00004020,
+ * PLE_WINDOW = 0x00004022,
+ * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
+ * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
+ * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
+ *
+ * Currently unsupported in KVM:
+ * GUEST_IA32_RTIT_CTL = 0x00002814,
+ */
+#define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \
+ PIN_BASED_VMX_PREEMPTION_TIMER)
+#define EVMCS1_UNSUPPORTED_2NDEXEC \
+ (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
+ SECONDARY_EXEC_APIC_REGISTER_VIRT | \
+ SECONDARY_EXEC_ENABLE_PML | \
+ SECONDARY_EXEC_ENABLE_VMFUNC | \
+ SECONDARY_EXEC_SHADOW_VMCS | \
+ SECONDARY_EXEC_TSC_SCALING | \
+ SECONDARY_EXEC_PAUSE_LOOP_EXITING)
+#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
+#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
+#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
+
#if IS_ENABLED(CONFIG_HYPERV)
static bool __read_mostly enlightened_vmcs = true;
module_param(enlightened_vmcs, bool, 0444);
@@ -1470,69 +1539,12 @@ static void evmcs_load(u64 phys_addr)
static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
{
- /*
- * Enlightened VMCSv1 doesn't support these:
- *
- * POSTED_INTR_NV = 0x00000002,
- * GUEST_INTR_STATUS = 0x00000810,
- * APIC_ACCESS_ADDR = 0x00002014,
- * POSTED_INTR_DESC_ADDR = 0x00002016,
- * EOI_EXIT_BITMAP0 = 0x0000201c,
- * EOI_EXIT_BITMAP1 = 0x0000201e,
- * EOI_EXIT_BITMAP2 = 0x00002020,
- * EOI_EXIT_BITMAP3 = 0x00002022,
- */
- vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
- vmcs_conf->cpu_based_2nd_exec_ctrl &=
- ~SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
- vmcs_conf->cpu_based_2nd_exec_ctrl &=
- ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
- vmcs_conf->cpu_based_2nd_exec_ctrl &=
- ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
-
- /*
- * GUEST_PML_INDEX = 0x00000812,
- * PML_ADDRESS = 0x0000200e,
- */
- vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_PML;
-
- /* VM_FUNCTION_CONTROL = 0x00002018, */
- vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
-
- /*
- * EPTP_LIST_ADDRESS = 0x00002024,
- * VMREAD_BITMAP = 0x00002026,
- * VMWRITE_BITMAP = 0x00002028,
- */
- vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_SHADOW_VMCS;
-
- /*
- * TSC_MULTIPLIER = 0x00002032,
- */
- vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_TSC_SCALING;
-
- /*
- * PLE_GAP = 0x00004020,
- * PLE_WINDOW = 0x00004022,
- */
- vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
-
- /*
- * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
- */
- vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL;
+ vmcs_conf->cpu_based_2nd_exec_ctrl &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
- /*
- * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
- * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
- */
- vmcs_conf->vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
- vmcs_conf->vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ vmcs_conf->vmexit_ctrl &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
+ vmcs_conf->vmentry_ctrl &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
- /*
- * Currently unsupported in KVM:
- * GUEST_IA32_RTIT_CTL = 0x00002814,
- */
}
/* check_ept_pointer() should be under protection of ept_pointer_lock. */
@@ -1557,22 +1569,27 @@ static void check_ept_pointer_match(struct kvm *kvm)
static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
{
- int ret;
+ struct kvm_vcpu *vcpu;
+ int ret = -ENOTSUPP, i;
spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
check_ept_pointer_match(kvm);
+ /*
+ * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the
+ * base of EPT PML4 table, strip off EPT configuration information.
+ */
if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
- ret = -ENOTSUPP;
- goto out;
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ ret |= hyperv_flush_guest_mapping(
+ to_vmx(kvm_get_vcpu(kvm, i))->ept_pointer & PAGE_MASK);
+ } else {
+ ret = hyperv_flush_guest_mapping(
+ to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK);
}
- ret = hyperv_flush_guest_mapping(
- to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
-
-out:
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
return ret;
}
@@ -1588,6 +1605,35 @@ static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
static inline void evmcs_touch_msr_bitmap(void) {}
#endif /* IS_ENABLED(CONFIG_HYPERV) */
+static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+ uint16_t *vmcs_version)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /* We don't support disabling the feature for simplicity. */
+ if (vmx->nested.enlightened_vmcs_enabled)
+ return 0;
+
+ vmx->nested.enlightened_vmcs_enabled = true;
+
+ /*
+ * vmcs_version represents the range of supported Enlightened VMCS
+ * versions: lower 8 bits is the minimal version, higher 8 bits is the
+ * maximum supported version. KVM supports versions from 1 to
+ * KVM_EVMCS_VERSION.
+ */
+ if (vmcs_version)
+ *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1;
+
+ vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
+ vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
+ vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
+ vmx->nested.msrs.secondary_ctls_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
+ vmx->nested.msrs.vmfunc_controls &= ~EVMCS1_UNSUPPORTED_VMFUNC;
+
+ return 0;
+}
+
static inline bool is_exception_n(u32 intr_info, u8 vector)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
@@ -1610,11 +1656,6 @@ static inline bool is_page_fault(u32 intr_info)
return is_exception_n(intr_info, PF_VECTOR);
}
-static inline bool is_no_device(u32 intr_info)
-{
- return is_exception_n(intr_info, NM_VECTOR);
-}
-
static inline bool is_invalid_opcode(u32 intr_info)
{
return is_exception_n(intr_info, UD_VECTOR);
@@ -1625,12 +1666,6 @@ static inline bool is_gp_fault(u32 intr_info)
return is_exception_n(intr_info, GP_VECTOR);
}
-static inline bool is_external_interrupt(u32 intr_info)
-{
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
- == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
-}
-
static inline bool is_machine_check(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
@@ -2056,9 +2091,6 @@ static inline bool is_nmi(u32 intr_info)
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
u32 exit_intr_info,
unsigned long exit_qualification);
-static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12,
- u32 reason, unsigned long qualification);
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
@@ -2070,7 +2102,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
return -1;
}
-static inline void __invvpid(int ext, u16 vpid, gva_t gva)
+static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
{
struct {
u64 vpid : 16;
@@ -2079,22 +2111,20 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
} operand = { vpid, 0, gva };
bool error;
- asm volatile (__ex(ASM_VMX_INVVPID) CC_SET(na)
- : CC_OUT(na) (error) : "a"(&operand), "c"(ext)
- : "memory");
+ asm volatile (__ex("invvpid %2, %1") CC_SET(na)
+ : CC_OUT(na) (error) : "r"(ext), "m"(operand));
BUG_ON(error);
}
-static inline void __invept(int ext, u64 eptp, gpa_t gpa)
+static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
{
struct {
u64 eptp, gpa;
} operand = {eptp, gpa};
bool error;
- asm volatile (__ex(ASM_VMX_INVEPT) CC_SET(na)
- : CC_OUT(na) (error) : "a" (&operand), "c" (ext)
- : "memory");
+ asm volatile (__ex("invept %2, %1") CC_SET(na)
+ : CC_OUT(na) (error) : "r"(ext), "m"(operand));
BUG_ON(error);
}
@@ -2113,9 +2143,8 @@ static void vmcs_clear(struct vmcs *vmcs)
u64 phys_addr = __pa(vmcs);
bool error;
- asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) CC_SET(na)
- : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
- : "memory");
+ asm volatile (__ex("vmclear %1") CC_SET(na)
+ : CC_OUT(na) (error) : "m"(phys_addr));
if (unlikely(error))
printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
vmcs, phys_addr);
@@ -2138,9 +2167,8 @@ static void vmcs_load(struct vmcs *vmcs)
if (static_branch_unlikely(&enable_evmcs))
return evmcs_load(phys_addr);
- asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) CC_SET(na)
- : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
- : "memory");
+ asm volatile (__ex("vmptrld %1") CC_SET(na)
+ : CC_OUT(na) (error) : "m"(phys_addr));
if (unlikely(error))
printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
vmcs, phys_addr);
@@ -2316,8 +2344,8 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
{
unsigned long value;
- asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
- : "=a"(value) : "d"(field) : "cc");
+ asm volatile (__ex_clear("vmread %1, %0", "%k0")
+ : "=r"(value) : "r"(field));
return value;
}
@@ -2368,8 +2396,8 @@ static __always_inline void __vmcs_writel(unsigned long field, unsigned long val
{
bool error;
- asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) CC_SET(na)
- : CC_OUT(na) (error) : "a"(value), "d"(field));
+ asm volatile (__ex("vmwrite %2, %1") CC_SET(na)
+ : CC_OUT(na) (error) : "r"(field), "rm"(value));
if (unlikely(error))
vmwrite_error(field, value);
}
@@ -2700,7 +2728,8 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
u64 guest_val, u64 host_val)
{
vmcs_write64(guest_val_vmcs, guest_val);
- vmcs_write64(host_val_vmcs, host_val);
+ if (host_val_vmcs != HOST_IA32_EFER)
+ vmcs_write64(host_val_vmcs, host_val);
vm_entry_controls_setbit(vmx, entry);
vm_exit_controls_setbit(vmx, exit);
}
@@ -2798,8 +2827,6 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
ignore_bits &= ~(u64)EFER_SCE;
#endif
- clear_atomic_switch_msr(vmx, MSR_EFER);
-
/*
* On EPT, we can't emulate NX, so we must switch EFER atomically.
* On CPUs that support "load IA32_EFER", always switch EFER
@@ -2812,8 +2839,12 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
if (guest_efer != host_efer)
add_atomic_switch_msr(vmx, MSR_EFER,
guest_efer, host_efer, false);
+ else
+ clear_atomic_switch_msr(vmx, MSR_EFER);
return false;
} else {
+ clear_atomic_switch_msr(vmx, MSR_EFER);
+
guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits;
@@ -2864,6 +2895,8 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
u16 fs_sel, gs_sel;
int i;
+ vmx->req_immediate_exit = false;
+
if (vmx->loaded_cpu_state)
return;
@@ -2894,8 +2927,7 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
}
- if (is_long_mode(&vmx->vcpu))
- wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#else
savesegment(fs, fs_sel);
savesegment(gs, gs_sel);
@@ -2946,8 +2978,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
vmx->loaded_cpu_state = NULL;
#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu))
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif
if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
kvm_load_ldt(host_state->ldt_sel);
@@ -2975,24 +3006,19 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
#ifdef CONFIG_X86_64
static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
{
- if (is_long_mode(&vmx->vcpu)) {
- preempt_disable();
- if (vmx->loaded_cpu_state)
- rdmsrl(MSR_KERNEL_GS_BASE,
- vmx->msr_guest_kernel_gs_base);
- preempt_enable();
- }
+ preempt_disable();
+ if (vmx->loaded_cpu_state)
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ preempt_enable();
return vmx->msr_guest_kernel_gs_base;
}
static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
{
- if (is_long_mode(&vmx->vcpu)) {
- preempt_disable();
- if (vmx->loaded_cpu_state)
- wrmsrl(MSR_KERNEL_GS_BASE, data);
- preempt_enable();
- }
+ preempt_disable();
+ if (vmx->loaded_cpu_state)
+ wrmsrl(MSR_KERNEL_GS_BASE, data);
+ preempt_enable();
vmx->msr_guest_kernel_gs_base = data;
}
#endif
@@ -3270,34 +3296,30 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
unsigned int nr = vcpu->arch.exception.nr;
+ bool has_payload = vcpu->arch.exception.has_payload;
+ unsigned long payload = vcpu->arch.exception.payload;
if (nr == PF_VECTOR) {
if (vcpu->arch.exception.nested_apf) {
*exit_qual = vcpu->arch.apf.nested_apf_token;
return 1;
}
- /*
- * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
- * The fix is to add the ancillary datum (CR2 or DR6) to structs
- * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
- * can be written only when inject_pending_event runs. This should be
- * conditional on a new capability---if the capability is disabled,
- * kvm_multiple_exception would write the ancillary information to
- * CR2 or DR6, for backwards ABI-compatibility.
- */
if (nested_vmx_is_page_fault_vmexit(vmcs12,
vcpu->arch.exception.error_code)) {
- *exit_qual = vcpu->arch.cr2;
- return 1;
- }
- } else {
- if (vmcs12->exception_bitmap & (1u << nr)) {
- if (nr == DB_VECTOR)
- *exit_qual = vcpu->arch.dr6;
- else
- *exit_qual = 0;
+ *exit_qual = has_payload ? payload : vcpu->arch.cr2;
return 1;
}
+ } else if (vmcs12->exception_bitmap & (1u << nr)) {
+ if (nr == DB_VECTOR) {
+ if (!has_payload) {
+ payload = vcpu->arch.dr6;
+ payload &= ~(DR6_FIXED_1 | DR6_BT);
+ payload ^= DR6_RTM;
+ }
+ *exit_qual = payload;
+ } else
+ *exit_qual = 0;
+ return 1;
}
return 0;
@@ -3324,6 +3346,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
u32 error_code = vcpu->arch.exception.error_code;
u32 intr_info = nr | INTR_INFO_VALID_MASK;
+ kvm_deliver_exception_payload(vcpu);
+
if (has_error_code) {
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
@@ -3528,9 +3552,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
- if (kvm_mpx_supported())
- msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
-
/* We support free control of debug control saving. */
msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
@@ -3547,8 +3568,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
VM_ENTRY_LOAD_IA32_PAT;
msrs->entry_ctls_high |=
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
- if (kvm_mpx_supported())
- msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */
msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
@@ -3596,12 +3615,12 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
msrs->secondary_ctls_high);
msrs->secondary_ctls_low = 0;
msrs->secondary_ctls_high &=
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_DESC |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING;
+
/*
* We can emulate "VMCS shadowing," even if the hardware
* doesn't support it.
@@ -3658,6 +3677,10 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
msrs->secondary_ctls_high |=
SECONDARY_EXEC_UNRESTRICTED_GUEST;
+ if (flexpriority_enabled)
+ msrs->secondary_ctls_high |=
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+
/* miscellaneous data */
rdmsr(MSR_IA32_VMX_MISC,
msrs->misc_low,
@@ -4396,9 +4419,7 @@ static void kvm_cpu_vmxon(u64 addr)
cr4_set_bits(X86_CR4_VMXE);
intel_pt_handle_vmx(1);
- asm volatile (ASM_VMX_VMXON_RAX
- : : "a"(&addr), "m"(addr)
- : "memory", "cc");
+ asm volatile ("vmxon %0" : : "m"(addr));
}
static int hardware_enable(void)
@@ -4467,7 +4488,7 @@ static void vmclear_local_loaded_vmcss(void)
*/
static void kvm_cpu_vmxoff(void)
{
- asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
+ asm volatile (__ex("vmxoff"));
intel_pt_handle_vmx(0);
cr4_clear_bits(X86_CR4_VMXE);
@@ -5068,19 +5089,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
if (!msr)
return;
- /*
- * MSR_KERNEL_GS_BASE is not intercepted when the guest is in
- * 64-bit mode as a 64-bit kernel may frequently access the
- * MSR. This means we need to manually save/restore the MSR
- * when switching between guest and host state, but only if
- * the guest is in 64-bit mode. Sync our cached value if the
- * guest is transitioning to 32-bit mode and the CPU contains
- * guest state, i.e. the cache is stale.
- */
-#ifdef CONFIG_X86_64
- if (!(efer & EFER_LMA))
- (void)vmx_read_guest_kernel_gs_base(vmx);
-#endif
vcpu->arch.efer = efer;
if (efer & EFER_LMA) {
vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
@@ -5124,9 +5132,10 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
bool invalidate_gpa)
{
if (enable_ept && (invalidate_gpa || !enable_vpid)) {
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return;
- ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
+ ept_sync_context(construct_eptp(vcpu,
+ vcpu->arch.mmu->root_hpa));
} else {
vpid_sync_context(vpid);
}
@@ -5276,7 +5285,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long hw_cr0;
- hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
+ hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
if (enable_unrestricted_guest)
hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
else {
@@ -5393,9 +5402,10 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
* To use VMXON (and later other VMX instructions), a guest
* must first be able to turn on cr4.VMXE (see handle_vmon()).
* So basically the check on whether to allow nested VMX
- * is here.
+ * is here. We operate under the default treatment of SMM,
+ * so VMX cannot be enabled under SMM.
*/
- if (!nested_vmx_allowed(vcpu))
+ if (!nested_vmx_allowed(vcpu) || is_smm(vcpu))
return 1;
}
@@ -6072,9 +6082,6 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
mode |= MSR_BITMAP_MODE_X2APIC_APICV;
}
- if (is_long_mode(vcpu))
- mode |= MSR_BITMAP_MODE_LM;
-
return mode;
}
@@ -6115,9 +6122,6 @@ static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
if (!changed)
return;
- vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
- !(mode & MSR_BITMAP_MODE_LM));
-
if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
@@ -6183,6 +6187,32 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
nested_mark_vmcs12_pages_dirty(vcpu);
}
+static u8 vmx_get_rvi(void)
+{
+ return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
+}
+
+static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ void *vapic_page;
+ u32 vppr;
+ int rvi;
+
+ if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
+ !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
+ WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
+ return false;
+
+ rvi = vmx_get_rvi();
+
+ vapic_page = kmap(vmx->nested.virtual_apic_page);
+ vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
+ kunmap(vmx->nested.virtual_apic_page);
+
+ return ((rvi & 0xf0) > (vppr & 0xf0));
+}
+
static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
bool nested)
{
@@ -6330,6 +6360,9 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
rdmsr(MSR_IA32_CR_PAT, low32, high32);
vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
}
+
+ if (cpu_has_load_ia32_efer)
+ vmcs_write64(HOST_IA32_EFER, host_efer);
}
static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
@@ -6657,7 +6690,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
if (enable_pml) {
- ASSERT(vmx->pml_pg);
vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
}
@@ -7966,6 +7998,9 @@ static __init int hardware_setup(void)
kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
}
+ if (!cpu_has_vmx_preemption_timer())
+ kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
+
if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
u64 vmx_msr;
@@ -8055,35 +8090,39 @@ static int handle_monitor(struct kvm_vcpu *vcpu)
/*
* The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
- * set the success or error code of an emulated VMX instruction, as specified
- * by Vol 2B, VMX Instruction Reference, "Conventions".
+ * set the success or error code of an emulated VMX instruction (as specified
+ * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
+ * instruction.
*/
-static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
+static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
{
vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
+ return kvm_skip_emulated_instruction(vcpu);
}
-static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
+static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
{
vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
X86_EFLAGS_SF | X86_EFLAGS_OF))
| X86_EFLAGS_CF);
+ return kvm_skip_emulated_instruction(vcpu);
}
-static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
- u32 vm_instruction_error)
+static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
+ u32 vm_instruction_error)
{
- if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
- /*
- * failValid writes the error number to the current VMCS, which
- * can't be done there isn't a current VMCS.
- */
- nested_vmx_failInvalid(vcpu);
- return;
- }
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * failValid writes the error number to the current VMCS, which
+ * can't be done if there isn't a current VMCS.
+ */
+ if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
+ return nested_vmx_failInvalid(vcpu);
+
vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_SF | X86_EFLAGS_OF))
@@ -8093,6 +8132,7 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
* We don't need to force a shadow sync because
* VM_INSTRUCTION_ERROR is not shadowed
*/
+ return kvm_skip_emulated_instruction(vcpu);
}
static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
@@ -8280,6 +8320,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
vmx->nested.vpid02 = allocate_vpid();
+ vmx->nested.vmcs02_initialized = false;
vmx->nested.vmxon = true;
return 0;
@@ -8333,10 +8374,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}
- if (vmx->nested.vmxon) {
- nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (vmx->nested.vmxon)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
!= VMXON_NEEDED_FEATURES) {
@@ -8355,21 +8395,17 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
* Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
* which replaces physical address width with 32
*/
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ return nested_vmx_failInvalid(vcpu);
page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
- if (is_error_page(page)) {
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (is_error_page(page))
+ return nested_vmx_failInvalid(vcpu);
+
if (*(u32 *)kmap(page) != VMCS12_REVISION) {
kunmap(page);
kvm_release_page_clean(page);
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_failInvalid(vcpu);
}
kunmap(page);
kvm_release_page_clean(page);
@@ -8379,8 +8415,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
if (ret)
return ret;
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
}
/*
@@ -8411,8 +8446,24 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
vmcs_write64(VMCS_LINK_POINTER, -1ull);
}
-static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
+static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx->nested.hv_evmcs)
+ return;
+
+ kunmap(vmx->nested.hv_evmcs_page);
+ kvm_release_page_dirty(vmx->nested.hv_evmcs_page);
+ vmx->nested.hv_evmcs_vmptr = -1ull;
+ vmx->nested.hv_evmcs_page = NULL;
+ vmx->nested.hv_evmcs = NULL;
+}
+
+static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
if (vmx->nested.current_vmptr == -1ull)
return;
@@ -8420,16 +8471,18 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
/* copy to memory all shadowed fields in case
they were modified */
copy_shadow_to_vmcs12(vmx);
- vmx->nested.sync_shadow_vmcs = false;
+ vmx->nested.need_vmcs12_sync = false;
vmx_disable_shadow_vmcs(vmx);
}
vmx->nested.posted_intr_nv = -1;
/* Flush VMCS12 to guest memory */
- kvm_vcpu_write_guest_page(&vmx->vcpu,
+ kvm_vcpu_write_guest_page(vcpu,
vmx->nested.current_vmptr >> PAGE_SHIFT,
vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
+ kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
+
vmx->nested.current_vmptr = -1ull;
}
@@ -8437,8 +8490,10 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
* Free whatever needs to be freed from vmx->nested when L1 goes down, or
* just stops using VMX.
*/
-static void free_nested(struct vcpu_vmx *vmx)
+static void free_nested(struct kvm_vcpu *vcpu)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
return;
@@ -8471,6 +8526,10 @@ static void free_nested(struct vcpu_vmx *vmx)
vmx->nested.pi_desc = NULL;
}
+ kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
+
+ nested_release_evmcs(vcpu);
+
free_loaded_vmcs(&vmx->nested.vmcs02);
}
@@ -8479,9 +8538,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
{
if (!nested_vmx_check_permission(vcpu))
return 1;
- free_nested(to_vmx(vcpu));
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ free_nested(vcpu);
+ return nested_vmx_succeed(vcpu);
}
/* Emulate the VMCLEAR instruction */
@@ -8497,25 +8555,28 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
- nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMCLEAR_INVALID_ADDRESS);
- if (vmptr == vmx->nested.vmxon_ptr) {
- nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (vmptr == vmx->nested.vmxon_ptr)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMCLEAR_VMXON_POINTER);
- if (vmptr == vmx->nested.current_vmptr)
- nested_release_vmcs12(vmx);
+ if (vmx->nested.hv_evmcs_page) {
+ if (vmptr == vmx->nested.hv_evmcs_vmptr)
+ nested_release_evmcs(vcpu);
+ } else {
+ if (vmptr == vmx->nested.current_vmptr)
+ nested_release_vmcs12(vcpu);
- kvm_vcpu_write_guest(vcpu,
- vmptr + offsetof(struct vmcs12, launch_state),
- &zero, sizeof(zero));
+ kvm_vcpu_write_guest(vcpu,
+ vmptr + offsetof(struct vmcs12,
+ launch_state),
+ &zero, sizeof(zero));
+ }
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
}
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
@@ -8598,6 +8659,395 @@ static inline int vmcs12_write_any(struct vmcs12 *vmcs12,
}
+static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
+{
+ struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
+ struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
+
+ vmcs12->hdr.revision_id = evmcs->revision_id;
+
+ /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
+ vmcs12->tpr_threshold = evmcs->tpr_threshold;
+ vmcs12->guest_rip = evmcs->guest_rip;
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
+ vmcs12->guest_rsp = evmcs->guest_rsp;
+ vmcs12->guest_rflags = evmcs->guest_rflags;
+ vmcs12->guest_interruptibility_info =
+ evmcs->guest_interruptibility_info;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
+ vmcs12->cpu_based_vm_exec_control =
+ evmcs->cpu_based_vm_exec_control;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
+ vmcs12->exception_bitmap = evmcs->exception_bitmap;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
+ vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
+ vmcs12->vm_entry_intr_info_field =
+ evmcs->vm_entry_intr_info_field;
+ vmcs12->vm_entry_exception_error_code =
+ evmcs->vm_entry_exception_error_code;
+ vmcs12->vm_entry_instruction_len =
+ evmcs->vm_entry_instruction_len;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
+ vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
+ vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
+ vmcs12->host_cr0 = evmcs->host_cr0;
+ vmcs12->host_cr3 = evmcs->host_cr3;
+ vmcs12->host_cr4 = evmcs->host_cr4;
+ vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
+ vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
+ vmcs12->host_rip = evmcs->host_rip;
+ vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
+ vmcs12->host_es_selector = evmcs->host_es_selector;
+ vmcs12->host_cs_selector = evmcs->host_cs_selector;
+ vmcs12->host_ss_selector = evmcs->host_ss_selector;
+ vmcs12->host_ds_selector = evmcs->host_ds_selector;
+ vmcs12->host_fs_selector = evmcs->host_fs_selector;
+ vmcs12->host_gs_selector = evmcs->host_gs_selector;
+ vmcs12->host_tr_selector = evmcs->host_tr_selector;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
+ vmcs12->pin_based_vm_exec_control =
+ evmcs->pin_based_vm_exec_control;
+ vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
+ vmcs12->secondary_vm_exec_control =
+ evmcs->secondary_vm_exec_control;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
+ vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
+ vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
+ vmcs12->msr_bitmap = evmcs->msr_bitmap;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
+ vmcs12->guest_es_base = evmcs->guest_es_base;
+ vmcs12->guest_cs_base = evmcs->guest_cs_base;
+ vmcs12->guest_ss_base = evmcs->guest_ss_base;
+ vmcs12->guest_ds_base = evmcs->guest_ds_base;
+ vmcs12->guest_fs_base = evmcs->guest_fs_base;
+ vmcs12->guest_gs_base = evmcs->guest_gs_base;
+ vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
+ vmcs12->guest_tr_base = evmcs->guest_tr_base;
+ vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
+ vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
+ vmcs12->guest_es_limit = evmcs->guest_es_limit;
+ vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
+ vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
+ vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
+ vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
+ vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
+ vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
+ vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
+ vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
+ vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
+ vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
+ vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
+ vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
+ vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
+ vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
+ vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
+ vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
+ vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
+ vmcs12->guest_es_selector = evmcs->guest_es_selector;
+ vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
+ vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
+ vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
+ vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
+ vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
+ vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
+ vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
+ vmcs12->tsc_offset = evmcs->tsc_offset;
+ vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
+ vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
+ vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
+ vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
+ vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
+ vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
+ vmcs12->guest_cr0 = evmcs->guest_cr0;
+ vmcs12->guest_cr3 = evmcs->guest_cr3;
+ vmcs12->guest_cr4 = evmcs->guest_cr4;
+ vmcs12->guest_dr7 = evmcs->guest_dr7;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
+ vmcs12->host_fs_base = evmcs->host_fs_base;
+ vmcs12->host_gs_base = evmcs->host_gs_base;
+ vmcs12->host_tr_base = evmcs->host_tr_base;
+ vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
+ vmcs12->host_idtr_base = evmcs->host_idtr_base;
+ vmcs12->host_rsp = evmcs->host_rsp;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
+ vmcs12->ept_pointer = evmcs->ept_pointer;
+ vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
+ vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
+ vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
+ vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
+ vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
+ vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
+ vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
+ vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
+ vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
+ vmcs12->guest_pending_dbg_exceptions =
+ evmcs->guest_pending_dbg_exceptions;
+ vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
+ vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
+ vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
+ vmcs12->guest_activity_state = evmcs->guest_activity_state;
+ vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
+ }
+
+ /*
+ * Not used?
+ * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
+ * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
+ * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
+ * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
+ * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
+ * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
+ * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
+ * vmcs12->page_fault_error_code_mask =
+ * evmcs->page_fault_error_code_mask;
+ * vmcs12->page_fault_error_code_match =
+ * evmcs->page_fault_error_code_match;
+ * vmcs12->cr3_target_count = evmcs->cr3_target_count;
+ * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
+ * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
+ * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
+ */
+
+ /*
+ * Read only fields:
+ * vmcs12->guest_physical_address = evmcs->guest_physical_address;
+ * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
+ * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
+ * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
+ * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
+ * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
+ * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
+ * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
+ * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
+ * vmcs12->exit_qualification = evmcs->exit_qualification;
+ * vmcs12->guest_linear_address = evmcs->guest_linear_address;
+ *
+ * Not present in struct vmcs12:
+ * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
+ * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
+ * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
+ * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
+ */
+
+ return 0;
+}
+
+static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
+{
+ struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
+ struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
+
+ /*
+ * Should not be changed by KVM:
+ *
+ * evmcs->host_es_selector = vmcs12->host_es_selector;
+ * evmcs->host_cs_selector = vmcs12->host_cs_selector;
+ * evmcs->host_ss_selector = vmcs12->host_ss_selector;
+ * evmcs->host_ds_selector = vmcs12->host_ds_selector;
+ * evmcs->host_fs_selector = vmcs12->host_fs_selector;
+ * evmcs->host_gs_selector = vmcs12->host_gs_selector;
+ * evmcs->host_tr_selector = vmcs12->host_tr_selector;
+ * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
+ * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
+ * evmcs->host_cr0 = vmcs12->host_cr0;
+ * evmcs->host_cr3 = vmcs12->host_cr3;
+ * evmcs->host_cr4 = vmcs12->host_cr4;
+ * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
+ * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
+ * evmcs->host_rip = vmcs12->host_rip;
+ * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
+ * evmcs->host_fs_base = vmcs12->host_fs_base;
+ * evmcs->host_gs_base = vmcs12->host_gs_base;
+ * evmcs->host_tr_base = vmcs12->host_tr_base;
+ * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
+ * evmcs->host_idtr_base = vmcs12->host_idtr_base;
+ * evmcs->host_rsp = vmcs12->host_rsp;
+ * sync_vmcs12() doesn't read these:
+ * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
+ * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
+ * evmcs->msr_bitmap = vmcs12->msr_bitmap;
+ * evmcs->ept_pointer = vmcs12->ept_pointer;
+ * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
+ * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
+ * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
+ * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
+ * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
+ * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
+ * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
+ * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
+ * evmcs->tpr_threshold = vmcs12->tpr_threshold;
+ * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
+ * evmcs->exception_bitmap = vmcs12->exception_bitmap;
+ * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
+ * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
+ * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
+ * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
+ * evmcs->page_fault_error_code_mask =
+ * vmcs12->page_fault_error_code_mask;
+ * evmcs->page_fault_error_code_match =
+ * vmcs12->page_fault_error_code_match;
+ * evmcs->cr3_target_count = vmcs12->cr3_target_count;
+ * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
+ * evmcs->tsc_offset = vmcs12->tsc_offset;
+ * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
+ * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
+ * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
+ * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
+ * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
+ * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
+ * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
+ * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
+ *
+ * Not present in struct vmcs12:
+ * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
+ * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
+ * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
+ * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
+ */
+
+ evmcs->guest_es_selector = vmcs12->guest_es_selector;
+ evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
+ evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
+ evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
+ evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
+ evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
+ evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
+ evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
+
+ evmcs->guest_es_limit = vmcs12->guest_es_limit;
+ evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
+ evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
+ evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
+ evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
+ evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
+ evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
+ evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
+ evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
+ evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
+
+ evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
+ evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
+ evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
+ evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
+ evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
+ evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
+ evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
+ evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
+
+ evmcs->guest_es_base = vmcs12->guest_es_base;
+ evmcs->guest_cs_base = vmcs12->guest_cs_base;
+ evmcs->guest_ss_base = vmcs12->guest_ss_base;
+ evmcs->guest_ds_base = vmcs12->guest_ds_base;
+ evmcs->guest_fs_base = vmcs12->guest_fs_base;
+ evmcs->guest_gs_base = vmcs12->guest_gs_base;
+ evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
+ evmcs->guest_tr_base = vmcs12->guest_tr_base;
+ evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
+ evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
+
+ evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
+ evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
+
+ evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
+ evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
+ evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
+ evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
+
+ evmcs->guest_pending_dbg_exceptions =
+ vmcs12->guest_pending_dbg_exceptions;
+ evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
+ evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
+
+ evmcs->guest_activity_state = vmcs12->guest_activity_state;
+ evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
+
+ evmcs->guest_cr0 = vmcs12->guest_cr0;
+ evmcs->guest_cr3 = vmcs12->guest_cr3;
+ evmcs->guest_cr4 = vmcs12->guest_cr4;
+ evmcs->guest_dr7 = vmcs12->guest_dr7;
+
+ evmcs->guest_physical_address = vmcs12->guest_physical_address;
+
+ evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
+ evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
+ evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
+ evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
+ evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
+ evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
+ evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
+ evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
+
+ evmcs->exit_qualification = vmcs12->exit_qualification;
+
+ evmcs->guest_linear_address = vmcs12->guest_linear_address;
+ evmcs->guest_rsp = vmcs12->guest_rsp;
+ evmcs->guest_rflags = vmcs12->guest_rflags;
+
+ evmcs->guest_interruptibility_info =
+ vmcs12->guest_interruptibility_info;
+ evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
+ evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
+ evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
+ evmcs->vm_entry_exception_error_code =
+ vmcs12->vm_entry_exception_error_code;
+ evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
+
+ evmcs->guest_rip = vmcs12->guest_rip;
+
+ evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
+
+ return 0;
+}
+
/*
* Copy the writable VMCS shadow fields back to the VMCS12, in case
* they have been modified by the L1 guest. Note that the "read-only"
@@ -8671,20 +9121,6 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
vmcs_load(vmx->loaded_vmcs->vmcs);
}
-/*
- * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
- * used before) all generate the same failure when it is missing.
- */
-static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (vmx->nested.current_vmptr == -1ull) {
- nested_vmx_failInvalid(vcpu);
- return 0;
- }
- return 1;
-}
-
static int handle_vmread(struct kvm_vcpu *vcpu)
{
unsigned long field;
@@ -8697,8 +9133,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (!nested_vmx_check_vmcs12(vcpu))
- return kvm_skip_emulated_instruction(vcpu);
+ if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
+ return nested_vmx_failInvalid(vcpu);
if (!is_guest_mode(vcpu))
vmcs12 = get_vmcs12(vcpu);
@@ -8707,20 +9143,18 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
* When vmcs->vmcs_link_pointer is -1ull, any VMREAD
* to shadowed-field sets the ALU flags for VMfailInvalid.
*/
- if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) {
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
+ return nested_vmx_failInvalid(vcpu);
vmcs12 = get_shadow_vmcs12(vcpu);
}
/* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Read the field, zero-extended to a u64 field_value */
- if (vmcs12_read_any(vmcs12, field, &field_value) < 0) {
- nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (vmcs12_read_any(vmcs12, field, &field_value) < 0)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+
/*
* Now copy part of this value to register or memory, as requested.
* Note that the number of bits actually copied is 32 or 64 depending
@@ -8738,8 +9172,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
(is_long_mode(vcpu) ? 8 : 4), NULL);
}
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
}
@@ -8764,8 +9197,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (!nested_vmx_check_vmcs12(vcpu))
- return kvm_skip_emulated_instruction(vcpu);
+ if (vmx->nested.current_vmptr == -1ull)
+ return nested_vmx_failInvalid(vcpu);
if (vmx_instruction_info & (1u << 10))
field_value = kvm_register_readl(vcpu,
@@ -8788,11 +9221,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
* VMCS," then the "read-only" fields are actually read/write.
*/
if (vmcs_field_readonly(field) &&
- !nested_cpu_has_vmwrite_any_field(vcpu)) {
- nested_vmx_failValid(vcpu,
+ !nested_cpu_has_vmwrite_any_field(vcpu))
+ return nested_vmx_failValid(vcpu,
VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
- return kvm_skip_emulated_instruction(vcpu);
- }
if (!is_guest_mode(vcpu))
vmcs12 = get_vmcs12(vcpu);
@@ -8801,18 +9232,14 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
* When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
* to shadowed-field sets the ALU flags for VMfailInvalid.
*/
- if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) {
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
+ return nested_vmx_failInvalid(vcpu);
vmcs12 = get_shadow_vmcs12(vcpu);
-
}
- if (vmcs12_write_any(vmcs12, field, field_value) < 0) {
- nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (vmcs12_write_any(vmcs12, field, field_value) < 0)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_UNSUPPORTED_VMCS_COMPONENT);
/*
* Do not track vmcs12 dirty-state if in guest-mode
@@ -8834,8 +9261,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
}
}
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
}
static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
@@ -8846,7 +9272,7 @@ static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
SECONDARY_EXEC_SHADOW_VMCS);
vmcs_write64(VMCS_LINK_POINTER,
__pa(vmx->vmcs01.shadow_vmcs));
- vmx->nested.sync_shadow_vmcs = true;
+ vmx->nested.need_vmcs12_sync = true;
}
vmx->nested.dirty_vmcs12 = true;
}
@@ -8863,36 +9289,37 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
- nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMPTRLD_INVALID_ADDRESS);
- if (vmptr == vmx->nested.vmxon_ptr) {
- nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (vmptr == vmx->nested.vmxon_ptr)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMPTRLD_VMXON_POINTER);
+
+ /* Forbid normal VMPTRLD if Enlightened version was used */
+ if (vmx->nested.hv_evmcs)
+ return 1;
if (vmx->nested.current_vmptr != vmptr) {
struct vmcs12 *new_vmcs12;
struct page *page;
page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
- if (is_error_page(page)) {
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (is_error_page(page))
+ return nested_vmx_failInvalid(vcpu);
+
new_vmcs12 = kmap(page);
if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
(new_vmcs12->hdr.shadow_vmcs &&
!nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
kunmap(page);
kvm_release_page_clean(page);
- nested_vmx_failValid(vcpu,
+ return nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
- return kvm_skip_emulated_instruction(vcpu);
}
- nested_release_vmcs12(vmx);
+ nested_release_vmcs12(vcpu);
+
/*
* Load VMCS12 from guest memory since it is not already
* cached.
@@ -8904,8 +9331,71 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
set_current_vmptr(vmx, vmptr);
}
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
+}
+
+/*
+ * This is an equivalent of the nested hypervisor executing the vmptrld
+ * instruction.
+ */
+static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
+ bool from_launch)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct hv_vp_assist_page assist_page;
+
+ if (likely(!vmx->nested.enlightened_vmcs_enabled))
+ return 1;
+
+ if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
+ return 1;
+
+ if (unlikely(!assist_page.enlighten_vmentry))
+ return 1;
+
+ if (unlikely(assist_page.current_nested_vmcs !=
+ vmx->nested.hv_evmcs_vmptr)) {
+
+ if (!vmx->nested.hv_evmcs)
+ vmx->nested.current_vmptr = -1ull;
+
+ nested_release_evmcs(vcpu);
+
+ vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page(
+ vcpu, assist_page.current_nested_vmcs);
+
+ if (unlikely(is_error_page(vmx->nested.hv_evmcs_page)))
+ return 0;
+
+ vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
+
+ if (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION) {
+ nested_release_evmcs(vcpu);
+ return 0;
+ }
+
+ vmx->nested.dirty_vmcs12 = true;
+ /*
+ * As we keep L2 state for one guest only 'hv_clean_fields' mask
+ * can't be used when we switch between them. Reset it here for
+ * simplicity.
+ */
+ vmx->nested.hv_evmcs->hv_clean_fields &=
+ ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
+ vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs;
+
+ /*
+ * Unlike normal vmcs12, enlightened vmcs12 is not fully
+ * reloaded from guest's memory (read only fields, fields not
+ * present in struct hv_enlightened_vmcs, ...). Make sure there
+ * are no leftovers.
+ */
+ if (from_launch)
+ memset(vmx->nested.cached_vmcs12, 0,
+ sizeof(*vmx->nested.cached_vmcs12));
+
+ }
+ return 1;
}
/* Emulate the VMPTRST instruction */
@@ -8920,6 +9410,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
+ if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
+ return 1;
+
if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
return 1;
/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
@@ -8928,8 +9421,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
kvm_inject_page_fault(vcpu, &e);
return 1;
}
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
}
/* Emulate the INVEPT instruction */
@@ -8959,11 +9451,9 @@ static int handle_invept(struct kvm_vcpu *vcpu)
types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
- if (type >= 32 || !(types & (1 << type))) {
- nested_vmx_failValid(vcpu,
+ if (type >= 32 || !(types & (1 << type)))
+ return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- return kvm_skip_emulated_instruction(vcpu);
- }
/* According to the Intel VMX instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global)
@@ -8985,14 +9475,20 @@ static int handle_invept(struct kvm_vcpu *vcpu)
case VMX_EPT_EXTENT_CONTEXT:
kvm_mmu_sync_roots(vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
- nested_vmx_succeed(vcpu);
break;
default:
BUG_ON(1);
break;
}
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
+}
+
+static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
}
static int handle_invvpid(struct kvm_vcpu *vcpu)
@@ -9006,6 +9502,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
u64 vpid;
u64 gla;
} operand;
+ u16 vpid02;
if (!(vmx->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_ENABLE_VPID) ||
@@ -9023,11 +9520,9 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
types = (vmx->nested.msrs.vpid_caps &
VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
- if (type >= 32 || !(types & (1 << type))) {
- nested_vmx_failValid(vcpu,
+ if (type >= 32 || !(types & (1 << type)))
+ return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- return kvm_skip_emulated_instruction(vcpu);
- }
/* according to the intel vmx instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global)
@@ -9039,47 +9534,39 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
kvm_inject_page_fault(vcpu, &e);
return 1;
}
- if (operand.vpid >> 16) {
- nested_vmx_failValid(vcpu,
+ if (operand.vpid >> 16)
+ return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ vpid02 = nested_get_vpid02(vcpu);
switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
if (!operand.vpid ||
- is_noncanonical_address(operand.gla, vcpu)) {
- nested_vmx_failValid(vcpu,
+ is_noncanonical_address(operand.gla, vcpu))
+ return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- return kvm_skip_emulated_instruction(vcpu);
- }
- if (cpu_has_vmx_invvpid_individual_addr() &&
- vmx->nested.vpid02) {
+ if (cpu_has_vmx_invvpid_individual_addr()) {
__invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
- vmx->nested.vpid02, operand.gla);
+ vpid02, operand.gla);
} else
- __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+ __vmx_flush_tlb(vcpu, vpid02, false);
break;
case VMX_VPID_EXTENT_SINGLE_CONTEXT:
case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
- if (!operand.vpid) {
- nested_vmx_failValid(vcpu,
+ if (!operand.vpid)
+ return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- return kvm_skip_emulated_instruction(vcpu);
- }
- __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+ __vmx_flush_tlb(vcpu, vpid02, false);
break;
case VMX_VPID_EXTENT_ALL_CONTEXT:
- __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+ __vmx_flush_tlb(vcpu, vpid02, false);
break;
default:
WARN_ON_ONCE(1);
return kvm_skip_emulated_instruction(vcpu);
}
- nested_vmx_succeed(vcpu);
-
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
}
static int handle_invpcid(struct kvm_vcpu *vcpu)
@@ -9150,11 +9637,11 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
}
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_roots[i].cr3)
+ if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3)
== operand.pcid)
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
- kvm_mmu_free_roots(vcpu, roots_to_free);
+ kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
/*
* If neither the current cr3 nor any of the prev_roots use the
* given PCID, then nothing needs to be done here because a
@@ -9208,7 +9695,8 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
static int handle_preemption_timer(struct kvm_vcpu *vcpu)
{
- kvm_lapic_expired_hv_timer(vcpu);
+ if (!to_vmx(vcpu)->req_immediate_exit)
+ kvm_lapic_expired_hv_timer(vcpu);
return 1;
}
@@ -9280,7 +9768,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
kvm_mmu_unload(vcpu);
mmu->ept_ad = accessed_dirty;
- mmu->base_role.ad_disabled = !accessed_dirty;
+ mmu->mmu_role.base.ad_disabled = !accessed_dirty;
vmcs12->ept_pointer = address;
/*
* TODO: Check what's the correct approach in case
@@ -9639,9 +10127,6 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
return false;
else if (is_page_fault(intr_info))
return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
- else if (is_no_device(intr_info) &&
- !(vmcs12->guest_cr0 & X86_CR0_TS))
- return false;
else if (is_debug(intr_info) &&
vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
@@ -10214,15 +10699,16 @@ static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
if (!lapic_in_kernel(vcpu))
return;
+ if (!flexpriority_enabled &&
+ !cpu_has_vmx_virtualize_x2apic_mode())
+ return;
+
/* Postpone execution until vmcs01 is the current VMCS. */
if (is_guest_mode(vcpu)) {
to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
return;
}
- if (!cpu_need_tpr_shadow(vcpu))
- return;
-
sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
@@ -10344,6 +10830,14 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
return max_irr;
}
+static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
+{
+ u8 rvi = vmx_get_rvi();
+ u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
+
+ return ((rvi & 0xf0) > (vppr & 0xf0));
+}
+
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
{
if (!kvm_vcpu_apicv_active(vcpu))
@@ -10595,24 +11089,43 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
msrs[i].host, false);
}
-static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
+static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
+{
+ vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
+ if (!vmx->loaded_vmcs->hv_timer_armed)
+ vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
+ PIN_BASED_VMX_PREEMPTION_TIMER);
+ vmx->loaded_vmcs->hv_timer_armed = true;
+}
+
+static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 tscl;
u32 delta_tsc;
- if (vmx->hv_deadline_tsc == -1)
+ if (vmx->req_immediate_exit) {
+ vmx_arm_hv_timer(vmx, 0);
return;
+ }
- tscl = rdtsc();
- if (vmx->hv_deadline_tsc > tscl)
- /* sure to be 32 bit only because checked on set_hv_timer */
- delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
- cpu_preemption_timer_multi);
- else
- delta_tsc = 0;
+ if (vmx->hv_deadline_tsc != -1) {
+ tscl = rdtsc();
+ if (vmx->hv_deadline_tsc > tscl)
+ /* set_hv_timer ensures the delta fits in 32-bits */
+ delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
+ cpu_preemption_timer_multi);
+ else
+ delta_tsc = 0;
- vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
+ vmx_arm_hv_timer(vmx, delta_tsc);
+ return;
+ }
+
+ if (vmx->loaded_vmcs->hv_timer_armed)
+ vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
+ PIN_BASED_VMX_PREEMPTION_TIMER);
+ vmx->loaded_vmcs->hv_timer_armed = false;
}
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
@@ -10635,9 +11148,25 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmcs_write32(PLE_WINDOW, vmx->ple_window);
}
- if (vmx->nested.sync_shadow_vmcs) {
- copy_vmcs12_to_shadow(vmx);
- vmx->nested.sync_shadow_vmcs = false;
+ if (vmx->nested.need_vmcs12_sync) {
+ /*
+ * hv_evmcs may end up being not mapped after migration (when
+ * L2 was running), map it here to make sure vmcs12 changes are
+ * properly reflected.
+ */
+ if (vmx->nested.enlightened_vmcs_enabled &&
+ !vmx->nested.hv_evmcs)
+ nested_vmx_handle_enlightened_vmptrld(vcpu, false);
+
+ if (vmx->nested.hv_evmcs) {
+ copy_vmcs12_to_enlightened(vmx);
+ /* All fields are clean */
+ vmx->nested.hv_evmcs->hv_clean_fields |=
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
+ } else {
+ copy_vmcs12_to_shadow(vmx);
+ }
+ vmx->nested.need_vmcs12_sync = false;
}
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
@@ -10672,7 +11201,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
atomic_switch_perf_msrs(vmx);
- vmx_arm_hv_timer(vcpu);
+ vmx_update_hv_timer(vcpu);
/*
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -10704,7 +11233,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
"jmp 1f \n\t"
"2: \n\t"
- __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
+ __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
"1: \n\t"
/* Reload cr2 if changed */
"mov %c[cr2](%0), %%" _ASM_AX " \n\t"
@@ -10736,9 +11265,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
/* Enter guest mode */
"jne 1f \n\t"
- __ex(ASM_VMX_VMLAUNCH) "\n\t"
+ __ex("vmlaunch") "\n\t"
"jmp 2f \n\t"
- "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
+ "1: " __ex("vmresume") "\n\t"
"2: "
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
@@ -10760,6 +11289,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"mov %%r13, %c[r13](%0) \n\t"
"mov %%r14, %c[r14](%0) \n\t"
"mov %%r15, %c[r15](%0) \n\t"
+ /*
+ * Clear host registers marked as clobbered to prevent
+ * speculative use.
+ */
"xor %%r8d, %%r8d \n\t"
"xor %%r9d, %%r9d \n\t"
"xor %%r10d, %%r10d \n\t"
@@ -10917,6 +11450,10 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
vmx->loaded_vmcs = vmcs;
vmx_vcpu_load(vcpu, cpu);
put_cpu();
+
+ vm_entry_controls_reset_shadow(vmx);
+ vm_exit_controls_reset_shadow(vmx);
+ vmx_segment_cache_clear(vmx);
}
/*
@@ -10925,12 +11462,10 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
*/
static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- vcpu_load(vcpu);
- vmx_switch_vmcs(vcpu, &vmx->vmcs01);
- free_nested(vmx);
- vcpu_put(vcpu);
+ vcpu_load(vcpu);
+ vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
+ free_nested(vcpu);
+ vcpu_put(vcpu);
}
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
@@ -11214,6 +11749,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
#undef cr4_fixed1_update
}
+static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (kvm_mpx_supported()) {
+ bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
+
+ if (mpx_enabled) {
+ vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
+ vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
+ } else {
+ vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
+ vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
+ }
+ }
+}
+
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -11230,8 +11782,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
- if (nested_vmx_allowed(vcpu))
+ if (nested_vmx_allowed(vcpu)) {
nested_vmx_cr_fixed1_bits_update(vcpu);
+ nested_vmx_entry_exit_ctls_update(vcpu);
+ }
}
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -11274,28 +11828,28 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
return get_vmcs12(vcpu)->ept_pointer;
}
-static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
+static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
WARN_ON(mmu_is_nested(vcpu));
- if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu)))
- return 1;
+ vcpu->arch.mmu = &vcpu->arch.guest_mmu;
kvm_init_shadow_ept_mmu(vcpu,
to_vmx(vcpu)->nested.msrs.ept_caps &
VMX_EPT_EXECUTE_ONLY_BIT,
nested_ept_ad_enabled(vcpu),
nested_ept_get_cr3(vcpu));
- vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
- vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
- vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
+ vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
+ vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
+ vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
+ vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
- return 0;
}
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
- vcpu->arch.walk_mmu = &vcpu->arch.mmu;
+ vcpu->arch.mmu = &vcpu->arch.root_mmu;
+ vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
}
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
@@ -11427,16 +11981,18 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (vcpu->arch.virtual_tsc_khz == 0)
- return;
-
- /* Make sure short timeouts reliably trigger an immediate vmexit.
- * hrtimer_start does not guarantee this. */
- if (preemption_timeout <= 1) {
+ /*
+ * A timer value of zero is architecturally guaranteed to cause
+ * a VMExit prior to executing any instructions in the guest.
+ */
+ if (preemption_timeout == 0) {
vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
return;
}
+ if (vcpu->arch.virtual_tsc_khz == 0)
+ return;
+
preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
preemption_timeout *= 1000000;
do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
@@ -11646,11 +12202,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
* bits 15:8 should be zero in posted_intr_nv,
* the descriptor address has been already checked
* in nested_get_vmcs12_pages.
+ *
+ * bits 5:0 of posted_intr_desc_addr should be zero.
*/
if (nested_cpu_has_posted_intr(vmcs12) &&
(!nested_cpu_has_vid(vmcs12) ||
!nested_exit_intr_ack_set(vcpu) ||
- vmcs12->posted_intr_nv & 0xff00))
+ (vmcs12->posted_intr_nv & 0xff00) ||
+ (vmcs12->posted_intr_desc_addr & 0x3f) ||
+ (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
return -EINVAL;
/* tpr shadow is needed by all apicv features. */
@@ -11706,15 +12266,12 @@ static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
- u64 address = vmcs12->pml_address;
- int maxphyaddr = cpuid_maxphyaddr(vcpu);
+ if (!nested_cpu_has_pml(vmcs12))
+ return 0;
- if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML)) {
- if (!nested_cpu_has_ept(vmcs12) ||
- !IS_ALIGNED(address, 4096) ||
- address >> maxphyaddr)
- return -EINVAL;
- }
+ if (!nested_cpu_has_ept(vmcs12) ||
+ !page_address_valid(vcpu, vmcs12->pml_address))
+ return -EINVAL;
return 0;
}
@@ -11894,107 +12451,87 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
return 0;
}
-static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+/*
+ * Returns if KVM is able to config CPU to tag TLB entries
+ * populated by L2 differently than TLB entries populated
+ * by L1.
+ *
+ * If L1 uses EPT, then TLB entries are tagged with different EPTP.
+ *
+ * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
+ * with different VPID (L1 entries are tagged with vmx->vpid
+ * while L2 entries are tagged with vmx->nested.vpid02).
+ */
+static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
- vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
- vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
- vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
- vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
- vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
- vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
- vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
- vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
- vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
- vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
- vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
- vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
- vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
- vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
- vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
- vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
- vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
- vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
- vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
- vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
- vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
- vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
- vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
- vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
- vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
- vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
- vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
- vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
- vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
- vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
-
- vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
- vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
- vmcs12->guest_pending_dbg_exceptions);
- vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
- vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
+ return nested_cpu_has_ept(vmcs12) ||
+ (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
+}
- if (nested_cpu_has_xsaves(vmcs12))
- vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
- vmcs_write64(VMCS_LINK_POINTER, -1ull);
+static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
+{
+ if (vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
+ return vmcs12->guest_ia32_efer;
+ else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
+ return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
+ else
+ return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
+}
- if (cpu_has_vmx_posted_intr())
- vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
+static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
+{
+ /*
+ * If vmcs02 hasn't been initialized, set the constant vmcs02 state
+ * according to L0's settings (vmcs12 is irrelevant here). Host
+ * fields that come from L0 and are not constant, e.g. HOST_CR3,
+ * will be set as needed prior to VMLAUNCH/VMRESUME.
+ */
+ if (vmx->nested.vmcs02_initialized)
+ return;
+ vmx->nested.vmcs02_initialized = true;
/*
- * Whether page-faults are trapped is determined by a combination of
- * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
- * If enable_ept, L0 doesn't care about page faults and we should
- * set all of these to L1's desires. However, if !enable_ept, L0 does
- * care about (at least some) page faults, and because it is not easy
- * (if at all possible?) to merge L0 and L1's desires, we simply ask
- * to exit on each and every L2 page fault. This is done by setting
- * MASK=MATCH=0 and (see below) EB.PF=1.
- * Note that below we don't need special code to set EB.PF beyond the
- * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
- * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
- * !enable_ept, EB.PF is 1, so the "or" will always be 1.
+ * We don't care what the EPTP value is we just need to guarantee
+ * it's valid so we don't get a false positive when doing early
+ * consistency checks.
*/
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
- enable_ept ? vmcs12->page_fault_error_code_mask : 0);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
- enable_ept ? vmcs12->page_fault_error_code_match : 0);
+ if (enable_ept && nested_early_check)
+ vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
/* All VMFUNCs are currently emulated through L0 vmexits. */
if (cpu_has_vmx_vmfunc())
vmcs_write64(VM_FUNCTION_CONTROL, 0);
- if (cpu_has_vmx_apicv()) {
- vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
- vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
- vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
- vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
- }
+ if (cpu_has_vmx_posted_intr())
+ vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
- /*
- * Set host-state according to L0's settings (vmcs12 is irrelevant here)
- * Some constant fields are set here by vmx_set_constant_host_state().
- * Other fields are different per CPU, and will be set later when
- * vmx_vcpu_load() is called, and when vmx_prepare_switch_to_guest()
- * is called.
- */
- vmx_set_constant_host_state(vmx);
+ if (cpu_has_vmx_msr_bitmap())
+ vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
+
+ if (enable_pml)
+ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
/*
- * Set the MSR load/store lists to match L0's settings.
+ * Set the MSR load/store lists to match L0's settings. Only the
+ * addresses are constant (for vmcs02), the counts can change based
+ * on L2's behavior, e.g. switching to/from long mode.
*/
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
- set_cr4_guest_host_mask(vmx);
+ vmx_set_constant_host_state(vmx);
+}
- if (vmx_mpx_supported())
- vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx,
+ struct vmcs12 *vmcs12)
+{
+ prepare_vmcs02_constant_state(vmx);
+
+ vmcs_write64(VMCS_LINK_POINTER, -1ull);
if (enable_vpid) {
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
@@ -12002,85 +12539,36 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
else
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
}
-
- /*
- * L1 may access the L2's PDPTR, so save them to construct vmcs12
- */
- if (enable_ept) {
- vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
- vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
- vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
- vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
- }
-
- if (cpu_has_vmx_msr_bitmap())
- vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
}
-/*
- * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
- * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
- * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
- * guest in a way that will both be appropriate to L1's requests, and our
- * needs. In addition to modifying the active vmcs (which is vmcs02), this
- * function also has additional necessary side-effects, like setting various
- * vcpu->arch fields.
- * Returns 0 on success, 1 on failure. Invalid state exit qualification code
- * is assigned to entry_failure_code on failure.
- */
-static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
- u32 *entry_failure_code)
+static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control, vmcs12_exec_ctrl;
+ u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
- if (vmx->nested.dirty_vmcs12) {
- prepare_vmcs02_full(vcpu, vmcs12);
- vmx->nested.dirty_vmcs12 = false;
- }
+ if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
+ prepare_vmcs02_early_full(vmx, vmcs12);
/*
- * First, the fields that are shadowed. This must be kept in sync
- * with vmx_shadow_fields.h.
+ * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
+ * entry, but only if the current (host) sp changed from the value
+ * we wrote last (vmx->host_rsp). This cache is no longer relevant
+ * if we switch vmcs, and rather than hold a separate cache per vmcs,
+ * here we just force the write to happen on entry. host_rsp will
+ * also be written unconditionally by nested_vmx_check_vmentry_hw()
+ * if we are doing early consistency checks via hardware.
*/
+ vmx->host_rsp = 0;
- vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
- vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
- vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
- vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
- vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
-
- if (vmx->nested.nested_run_pending &&
- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
- kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
- vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
- } else {
- kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
- vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
- }
- if (vmx->nested.nested_run_pending) {
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
- vmcs12->vm_entry_intr_info_field);
- vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
- vmcs12->vm_entry_exception_error_code);
- vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
- vmcs12->vm_entry_instruction_len);
- vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
- vmcs12->guest_interruptibility_info);
- vmx->loaded_vmcs->nmi_known_unmasked =
- !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
- } else {
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
- }
- vmx_set_rflags(vcpu, vmcs12->guest_rflags);
-
+ /*
+ * PIN CONTROLS
+ */
exec_control = vmcs12->pin_based_vm_exec_control;
- /* Preemption timer setting is only taken from vmcs01. */
- exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ /* Preemption timer setting is computed directly in vmx_vcpu_run. */
exec_control |= vmcs_config.pin_based_exec_ctrl;
- if (vmx->hv_deadline_tsc == -1)
- exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ vmx->loaded_vmcs->hv_timer_armed = false;
/* Posted interrupts setting is only taken from vmcs12. */
if (nested_cpu_has_posted_intr(vmcs12)) {
@@ -12089,13 +12577,43 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
} else {
exec_control &= ~PIN_BASED_POSTED_INTR;
}
-
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
- vmx->nested.preemption_timer_expired = false;
- if (nested_cpu_has_preemption_timer(vmcs12))
- vmx_start_preemption_timer(vcpu);
+ /*
+ * EXEC CONTROLS
+ */
+ exec_control = vmx_exec_control(vmx); /* L0's desires */
+ exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+ exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
+ exec_control &= ~CPU_BASED_TPR_SHADOW;
+ exec_control |= vmcs12->cpu_based_vm_exec_control;
+ /*
+ * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
+ * nested_get_vmcs12_pages can't fix it up, the illegal value
+ * will result in a VM entry failure.
+ */
+ if (exec_control & CPU_BASED_TPR_SHADOW) {
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
+ vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
+ } else {
+#ifdef CONFIG_X86_64
+ exec_control |= CPU_BASED_CR8_LOAD_EXITING |
+ CPU_BASED_CR8_STORE_EXITING;
+#endif
+ }
+
+ /*
+ * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
+ * for I/O port accesses.
+ */
+ exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
+ exec_control |= CPU_BASED_UNCOND_IO_EXITING;
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
+
+ /*
+ * SECONDARY EXEC CONTROLS
+ */
if (cpu_has_secondary_exec_ctrls()) {
exec_control = vmx->secondary_exec_control;
@@ -12136,43 +12654,214 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
}
/*
- * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
- * entry, but only if the current (host) sp changed from the value
- * we wrote last (vmx->host_rsp). This cache is no longer relevant
- * if we switch vmcs, and rather than hold a separate cache per vmcs,
- * here we just force the write to happen on entry.
+ * ENTRY CONTROLS
+ *
+ * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
+ * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
+ * on the related bits (if supported by the CPU) in the hope that
+ * we can avoid VMWrites during vmx_set_efer().
+ */
+ exec_control = (vmcs12->vm_entry_controls | vmcs_config.vmentry_ctrl) &
+ ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
+ if (cpu_has_load_ia32_efer) {
+ if (guest_efer & EFER_LMA)
+ exec_control |= VM_ENTRY_IA32E_MODE;
+ if (guest_efer != host_efer)
+ exec_control |= VM_ENTRY_LOAD_IA32_EFER;
+ }
+ vm_entry_controls_init(vmx, exec_control);
+
+ /*
+ * EXIT CONTROLS
+ *
+ * L2->L1 exit controls are emulated - the hardware exit is to L0 so
+ * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
+ * bits may be modified by vmx_set_efer() in prepare_vmcs02().
*/
- vmx->host_rsp = 0;
+ exec_control = vmcs_config.vmexit_ctrl;
+ if (cpu_has_load_ia32_efer && guest_efer != host_efer)
+ exec_control |= VM_EXIT_LOAD_IA32_EFER;
+ vm_exit_controls_init(vmx, exec_control);
- exec_control = vmx_exec_control(vmx); /* L0's desires */
- exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
- exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
- exec_control &= ~CPU_BASED_TPR_SHADOW;
- exec_control |= vmcs12->cpu_based_vm_exec_control;
+ /*
+ * Conceptually we want to copy the PML address and index from
+ * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
+ * since we always flush the log on each vmexit and never change
+ * the PML address (once set), this happens to be equivalent to
+ * simply resetting the index in vmcs02.
+ */
+ if (enable_pml)
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
/*
- * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
- * nested_get_vmcs12_pages can't fix it up, the illegal value
- * will result in a VM entry failure.
+ * Interrupt/Exception Fields
*/
- if (exec_control & CPU_BASED_TPR_SHADOW) {
- vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
- vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
+ if (vmx->nested.nested_run_pending) {
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+ vmcs12->vm_entry_intr_info_field);
+ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
+ vmcs12->vm_entry_exception_error_code);
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+ vmcs12->vm_entry_instruction_len);
+ vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
+ vmcs12->guest_interruptibility_info);
+ vmx->loaded_vmcs->nmi_known_unmasked =
+ !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
} else {
-#ifdef CONFIG_X86_64
- exec_control |= CPU_BASED_CR8_LOAD_EXITING |
- CPU_BASED_CR8_STORE_EXITING;
-#endif
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
+ }
+}
+
+static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
+{
+ struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
+
+ if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
+ vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
+ vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
+ vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
+ vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
+ vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
+ vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
+ vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
+ vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
+ vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
+ vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
+ vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
+ vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
+ vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
+ vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
+ vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
+ vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
+ vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
+ vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
+ vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
+ vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
+ vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
+ vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
+ vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
+ vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
+ vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
+ vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
+ vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
+ vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
+ vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
+ vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
+ vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
+ vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
+ vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
+ vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
+ }
+
+ if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
+ vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
+ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
+ vmcs12->guest_pending_dbg_exceptions);
+ vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
+ vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
+
+ /*
+ * L1 may access the L2's PDPTR, so save them to construct
+ * vmcs12
+ */
+ if (enable_ept) {
+ vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
+ vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
+ vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
+ vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
+ }
}
+ if (nested_cpu_has_xsaves(vmcs12))
+ vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
+
/*
- * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
- * for I/O port accesses.
+ * Whether page-faults are trapped is determined by a combination of
+ * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
+ * If enable_ept, L0 doesn't care about page faults and we should
+ * set all of these to L1's desires. However, if !enable_ept, L0 does
+ * care about (at least some) page faults, and because it is not easy
+ * (if at all possible?) to merge L0 and L1's desires, we simply ask
+ * to exit on each and every L2 page fault. This is done by setting
+ * MASK=MATCH=0 and (see below) EB.PF=1.
+ * Note that below we don't need special code to set EB.PF beyond the
+ * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
+ * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
+ * !enable_ept, EB.PF is 1, so the "or" will always be 1.
*/
- exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
- exec_control |= CPU_BASED_UNCOND_IO_EXITING;
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
+ enable_ept ? vmcs12->page_fault_error_code_mask : 0);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
+ enable_ept ? vmcs12->page_fault_error_code_match : 0);
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
+ if (cpu_has_vmx_apicv()) {
+ vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
+ vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
+ vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
+ vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
+ }
+
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+
+ set_cr4_guest_host_mask(vmx);
+
+ if (kvm_mpx_supported()) {
+ if (vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+ vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+ else
+ vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
+ }
+}
+
+/*
+ * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
+ * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
+ * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
+ * guest in a way that will both be appropriate to L1's requests, and our
+ * needs. In addition to modifying the active vmcs (which is vmcs02), this
+ * function also has additional necessary side-effects, like setting various
+ * vcpu->arch fields.
+ * Returns 0 on success, 1 on failure. Invalid state exit qualification code
+ * is assigned to entry_failure_code on failure.
+ */
+static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ u32 *entry_failure_code)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
+
+ if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) {
+ prepare_vmcs02_full(vmx, vmcs12);
+ vmx->nested.dirty_vmcs12 = false;
+ }
+
+ /*
+ * First, the fields that are shadowed. This must be kept in sync
+ * with vmx_shadow_fields.h.
+ */
+ if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
+ vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
+ vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
+ }
+
+ if (vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
+ kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
+ vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
+ } else {
+ kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
+ vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
+ }
+ vmx_set_rflags(vcpu, vmcs12->guest_rflags);
+
+ vmx->nested.preemption_timer_expired = false;
+ if (nested_cpu_has_preemption_timer(vmcs12))
+ vmx_start_preemption_timer(vcpu);
/* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
* bitwise-or of what L1 wants to trap for L2, and what we want to
@@ -12182,20 +12871,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
- /* L2->L1 exit controls are emulated - the hardware exit is to L0 so
- * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
- * bits are further modified by vmx_set_efer() below.
- */
- vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
-
- /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
- * emulated by vmx_set_efer(), below.
- */
- vm_entry_controls_init(vmx,
- (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
- ~VM_ENTRY_IA32E_MODE) |
- (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
-
if (vmx->nested.nested_run_pending &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
@@ -12218,37 +12893,29 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
* influence global bitmap(for vpid01 and vpid02 allocation)
* even if spawn a lot of nested vCPUs.
*/
- if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) {
+ if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
- __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+ __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
}
} else {
- vmx_flush_tlb(vcpu, true);
+ /*
+ * If L1 use EPT, then L0 needs to execute INVEPT on
+ * EPTP02 instead of EPTP01. Therefore, delay TLB
+ * flush until vmcs02->eptp is fully updated by
+ * KVM_REQ_LOAD_CR3. Note that this assumes
+ * KVM_REQ_TLB_FLUSH is evaluated after
+ * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
+ */
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
}
- if (enable_pml) {
- /*
- * Conceptually we want to copy the PML address and index from
- * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
- * since we always flush the log on each vmexit, this happens
- * to be equivalent to simply resetting the fields in vmcs02.
- */
- ASSERT(vmx->pml_pg);
- vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
- vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
- }
-
- if (nested_cpu_has_ept(vmcs12)) {
- if (nested_ept_init_mmu_context(vcpu)) {
- *entry_failure_code = ENTRY_FAIL_DEFAULT;
- return 1;
- }
- } else if (nested_cpu_has2(vmcs12,
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ if (nested_cpu_has_ept(vmcs12))
+ nested_ept_init_mmu_context(vcpu);
+ else if (nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
vmx_flush_tlb(vcpu, true);
- }
/*
* This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
@@ -12264,14 +12931,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmx_set_cr4(vcpu, vmcs12->guest_cr4);
vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
- if (vmx->nested.nested_run_pending &&
- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
- vcpu->arch.efer = vmcs12->guest_ia32_efer;
- else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
- vcpu->arch.efer |= (EFER_LMA | EFER_LME);
- else
- vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
- /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
+ vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
+ /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
vmx_set_efer(vcpu, vcpu->arch.efer);
/*
@@ -12313,11 +12974,15 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool ia32e;
if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
@@ -12384,6 +13049,21 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
/*
+ * If the load IA32_EFER VM-exit control is 1, bits reserved in the
+ * IA32_EFER MSR must be 0 in the field for that register. In addition,
+ * the values of the LMA and LME bits in the field must each be that of
+ * the host address-space size VM-exit control.
+ */
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
+ ia32e = (vmcs12->vm_exit_controls &
+ VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
+ if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
+ ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
+ ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
+ return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
+ }
+
+ /*
* From the Intel SDM, volume 3:
* Fields relevant to VM-entry event injection must be set properly.
* These fields are the VM-entry interruption-information field, the
@@ -12439,6 +13119,10 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
}
}
+ if (nested_cpu_has_ept(vmcs12) &&
+ !valid_ept_address(vcpu, vmcs12->ept_pointer))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
return 0;
}
@@ -12504,21 +13188,6 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
return 1;
}
- /*
- * If the load IA32_EFER VM-exit control is 1, bits reserved in the
- * IA32_EFER MSR must be 0 in the field for that register. In addition,
- * the values of the LMA and LME bits in the field must each be that of
- * the host address-space size VM-exit control.
- */
- if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
- ia32e = (vmcs12->vm_exit_controls &
- VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
- if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
- ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
- ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
- return 1;
- }
-
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
(vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
@@ -12527,45 +13196,175 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
return 0;
}
+static int __noclone nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long cr3, cr4;
+
+ if (!nested_early_check)
+ return 0;
+
+ if (vmx->msr_autoload.host.nr)
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ if (vmx->msr_autoload.guest.nr)
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+
+ preempt_disable();
+
+ vmx_prepare_switch_to_guest(vcpu);
+
+ /*
+ * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
+ * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
+ * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
+ * there is no need to preserve other bits or save/restore the field.
+ */
+ vmcs_writel(GUEST_RFLAGS, 0);
+
+ vmcs_writel(HOST_RIP, vmx_early_consistency_check_return);
+
+ cr3 = __get_current_cr3_fast();
+ if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
+ vmcs_writel(HOST_CR3, cr3);
+ vmx->loaded_vmcs->host_state.cr3 = cr3;
+ }
+
+ cr4 = cr4_read_shadow();
+ if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
+ vmcs_writel(HOST_CR4, cr4);
+ vmx->loaded_vmcs->host_state.cr4 = cr4;
+ }
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
+
+ asm(
+ /* Set HOST_RSP */
+ __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
+ "mov %%" _ASM_SP ", %c[host_rsp](%0)\n\t"
+
+ /* Check if vmlaunch of vmresume is needed */
+ "cmpl $0, %c[launched](%0)\n\t"
+ "je 1f\n\t"
+ __ex("vmresume") "\n\t"
+ "jmp 2f\n\t"
+ "1: " __ex("vmlaunch") "\n\t"
+ "jmp 2f\n\t"
+ "2: "
+
+ /* Set vmx->fail accordingly */
+ "setbe %c[fail](%0)\n\t"
+
+ ".pushsection .rodata\n\t"
+ ".global vmx_early_consistency_check_return\n\t"
+ "vmx_early_consistency_check_return: " _ASM_PTR " 2b\n\t"
+ ".popsection"
+ :
+ : "c"(vmx), "d"((unsigned long)HOST_RSP),
+ [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
+ [fail]"i"(offsetof(struct vcpu_vmx, fail)),
+ [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp))
+ : "rax", "cc", "memory"
+ );
+
+ vmcs_writel(HOST_RIP, vmx_return);
+
+ preempt_enable();
+
+ if (vmx->msr_autoload.host.nr)
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ if (vmx->msr_autoload.guest.nr)
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+
+ if (vmx->fail) {
+ WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ vmx->fail = 0;
+ return 1;
+ }
+
+ /*
+ * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
+ */
+ local_irq_enable();
+ if (hw_breakpoint_active())
+ set_debugreg(__this_cpu_read(cpu_dr7), 7);
+
+ /*
+ * A non-failing VMEntry means we somehow entered guest mode with
+ * an illegal RIP, and that's just the tip of the iceberg. There
+ * is no telling what memory has been modified or what state has
+ * been exposed to unknown code. Hitting this all but guarantees
+ * a (very critical) hardware issue.
+ */
+ WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
+ VMX_EXIT_REASONS_FAILED_VMENTRY));
+
+ return 0;
+}
+STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw);
+
+static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12);
+
/*
- * If exit_qual is NULL, this is being called from state restore (either RSM
+ * If from_vmentry is false, this is being called from state restore (either RSM
* or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
++ *
++ * Returns:
++ * 0 - success, i.e. proceed with actual VMEnter
++ * 1 - consistency check VMExit
++ * -1 - consistency check VMFail
*/
-static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
+static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ bool from_vmentry)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- bool from_vmentry = !!exit_qual;
- u32 dummy_exit_qual;
- u32 vmcs01_cpu_exec_ctrl;
- int r = 0;
-
- vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+ bool evaluate_pending_interrupts;
+ u32 exit_reason = EXIT_REASON_INVALID_STATE;
+ u32 exit_qual;
- enter_guest_mode(vcpu);
+ evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
+ (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
+ if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
+ evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+ if (kvm_mpx_supported() &&
+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+ vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
- vmx_segment_cache_clear(vmx);
+ prepare_vmcs02_early(vmx, vmcs12);
+
+ if (from_vmentry) {
+ nested_get_vmcs12_pages(vcpu);
+
+ if (nested_vmx_check_vmentry_hw(vcpu)) {
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+ return -1;
+ }
+
+ if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+ goto vmentry_fail_vmexit;
+ }
+
+ enter_guest_mode(vcpu);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
- r = EXIT_REASON_INVALID_STATE;
- if (prepare_vmcs02(vcpu, vmcs12, from_vmentry ? exit_qual : &dummy_exit_qual))
- goto fail;
+ if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
+ goto vmentry_fail_vmexit_guest_mode;
if (from_vmentry) {
- nested_get_vmcs12_pages(vcpu);
-
- r = EXIT_REASON_MSR_LOAD_FAIL;
- *exit_qual = nested_vmx_load_msr(vcpu,
- vmcs12->vm_entry_msr_load_addr,
- vmcs12->vm_entry_msr_load_count);
- if (*exit_qual)
- goto fail;
+ exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
+ exit_qual = nested_vmx_load_msr(vcpu,
+ vmcs12->vm_entry_msr_load_addr,
+ vmcs12->vm_entry_msr_load_count);
+ if (exit_qual)
+ goto vmentry_fail_vmexit_guest_mode;
} else {
/*
* The MMU is not initialized to point at the right entities yet and
@@ -12585,16 +13384,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
* to L1 or delivered directly to L2 (e.g. In case L1 don't
* intercept EXTERNAL_INTERRUPT).
*
- * Usually this would be handled by L0 requesting a
- * IRQ/NMI window by setting VMCS accordingly. However,
- * this setting was done on VMCS01 and now VMCS02 is active
- * instead. Thus, we force L0 to perform pending event
- * evaluation by requesting a KVM_REQ_EVENT.
- */
- if (vmcs01_cpu_exec_ctrl &
- (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
+ * Usually this would be handled by the processor noticing an
+ * IRQ/NMI window request, or checking RVI during evaluation of
+ * pending virtual interrupts. However, this setting was done
+ * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
+ * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
+ */
+ if (unlikely(evaluate_pending_interrupts))
kvm_make_request(KVM_REQ_EVENT, vcpu);
- }
/*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -12604,12 +13401,28 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
*/
return 0;
-fail:
+ /*
+ * A failed consistency check that leads to a VMExit during L1's
+ * VMEnter to L2 is a variation of a normal VMexit, as explained in
+ * 26.7 "VM-entry failures during or after loading guest state".
+ */
+vmentry_fail_vmexit_guest_mode:
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
leave_guest_mode(vcpu);
+
+vmentry_fail_vmexit:
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
- return r;
+
+ if (!from_vmentry)
+ return 1;
+
+ load_vmcs12_host_state(vcpu, vmcs12);
+ vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
+ vmcs12->exit_qualification = exit_qual;
+ if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
+ vmx->nested.need_vmcs12_sync = true;
+ return 1;
}
/*
@@ -12621,14 +13434,16 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
- u32 exit_qual;
int ret;
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (!nested_vmx_check_vmcs12(vcpu))
- goto out;
+ if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true))
+ return 1;
+
+ if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
+ return nested_vmx_failInvalid(vcpu);
vmcs12 = get_vmcs12(vcpu);
@@ -12638,13 +13453,16 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* rather than RFLAGS.ZF, and no error number is stored to the
* VM-instruction error field.
*/
- if (vmcs12->hdr.shadow_vmcs) {
- nested_vmx_failInvalid(vcpu);
- goto out;
- }
+ if (vmcs12->hdr.shadow_vmcs)
+ return nested_vmx_failInvalid(vcpu);
- if (enable_shadow_vmcs)
+ if (vmx->nested.hv_evmcs) {
+ copy_enlightened_to_vmcs12(vmx);
+ /* Enlightened VMCS doesn't have launch state */
+ vmcs12->launch_state = !launch;
+ } else if (enable_shadow_vmcs) {
copy_shadow_to_vmcs12(vmx);
+ }
/*
* The nested entry process starts with enforcing various prerequisites
@@ -12656,59 +13474,37 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* for misconfigurations which will anyway be caught by the processor
* when using the merged vmcs02.
*/
- if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) {
- nested_vmx_failValid(vcpu,
- VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
- goto out;
- }
+ if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
- if (vmcs12->launch_state == launch) {
- nested_vmx_failValid(vcpu,
+ if (vmcs12->launch_state == launch)
+ return nested_vmx_failValid(vcpu,
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
: VMXERR_VMRESUME_NONLAUNCHED_VMCS);
- goto out;
- }
ret = check_vmentry_prereqs(vcpu, vmcs12);
- if (ret) {
- nested_vmx_failValid(vcpu, ret);
- goto out;
- }
-
- /*
- * After this point, the trap flag no longer triggers a singlestep trap
- * on the vm entry instructions; don't call kvm_skip_emulated_instruction.
- * This is not 100% correct; for performance reasons, we delegate most
- * of the checks on host state to the processor. If those fail,
- * the singlestep trap is missed.
- */
- skip_emulated_instruction(vcpu);
-
- ret = check_vmentry_postreqs(vcpu, vmcs12, &exit_qual);
- if (ret) {
- nested_vmx_entry_failure(vcpu, vmcs12,
- EXIT_REASON_INVALID_STATE, exit_qual);
- return 1;
- }
+ if (ret)
+ return nested_vmx_failValid(vcpu, ret);
/*
* We're finally done with prerequisite checking, and can start with
* the nested entry.
*/
-
vmx->nested.nested_run_pending = 1;
- ret = enter_vmx_non_root_mode(vcpu, &exit_qual);
- if (ret) {
- nested_vmx_entry_failure(vcpu, vmcs12, ret, exit_qual);
- vmx->nested.nested_run_pending = 0;
+ ret = nested_vmx_enter_non_root_mode(vcpu, true);
+ vmx->nested.nested_run_pending = !ret;
+ if (ret > 0)
return 1;
- }
+ else if (ret)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD);
/* Hide L1D cache contents from the nested guest. */
vmx->vcpu.arch.l1tf_flush_l1d = true;
/*
- * Must happen outside of enter_vmx_non_root_mode() as it will
+ * Must happen outside of nested_vmx_enter_non_root_mode() as it will
* also be used as part of restoring nVMX state for
* snapshot restore (migration).
*
@@ -12729,9 +13525,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return kvm_vcpu_halt(vcpu);
}
return 1;
-
-out:
- return kvm_skip_emulated_instruction(vcpu);
}
/*
@@ -12863,6 +13656,11 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
return 0;
}
+static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
+{
+ to_vmx(vcpu)->req_immediate_exit = true;
+}
+
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
{
ktime_t remaining =
@@ -13040,24 +13838,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
kvm_clear_interrupt_queue(vcpu);
}
-static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- u32 entry_failure_code;
-
- nested_ept_uninit_mmu_context(vcpu);
-
- /*
- * Only PDPTE load can fail as the value of cr3 was checked on entry and
- * couldn't have changed.
- */
- if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
- nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
-
- if (!enable_ept)
- vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
-}
-
/*
* A part of what we need to when the nested L2 guest exits and we want to
* run its L1 parent, is to reset L1's guest state to the host state specified
@@ -13071,6 +13851,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct kvm_segment seg;
+ u32 entry_failure_code;
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
vcpu->arch.efer = vmcs12->host_ia32_efer;
@@ -13083,6 +13864,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
+ vmx_set_interrupt_shadow(vcpu, 0);
+
/*
* Note that calling vmx_set_cr0 is important, even if cr0 hasn't
* actually changed, because vmx_set_cr0 refers to efer set above.
@@ -13097,23 +13880,35 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
vmx_set_cr4(vcpu, vmcs12->host_cr4);
- load_vmcs12_mmu_host_state(vcpu, vmcs12);
+ nested_ept_uninit_mmu_context(vcpu);
/*
- * If vmcs01 don't use VPID, CPU flushes TLB on every
+ * Only PDPTE load can fail as the value of cr3 was checked on entry and
+ * couldn't have changed.
+ */
+ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
+
+ if (!enable_ept)
+ vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+
+ /*
+ * If vmcs01 doesn't use VPID, CPU flushes TLB on every
* VMEntry/VMExit. Thus, no need to flush TLB.
*
- * If vmcs12 uses VPID, TLB entries populated by L2 are
- * tagged with vmx->nested.vpid02 while L1 entries are tagged
- * with vmx->vpid. Thus, no need to flush TLB.
+ * If vmcs12 doesn't use VPID, L1 expects TLB to be
+ * flushed on every VMEntry/VMExit.
+ *
+ * Otherwise, we can preserve TLB entries as long as we are
+ * able to tag L1 TLB entries differently than L2 TLB entries.
*
- * Therefore, flush TLB only in case vmcs01 uses VPID and
- * vmcs12 don't use VPID as in this case L1 & L2 TLB entries
- * are both tagged with vmx->vpid.
+ * If vmcs12 uses EPT, we need to execute this flush on EPTP01
+ * and therefore we request the TLB flush to happen only after VMCS EPTP
+ * has been set by KVM_REQ_LOAD_CR3.
*/
if (enable_vpid &&
- !(nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02)) {
- vmx_flush_tlb(vcpu, true);
+ (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
@@ -13193,6 +13988,140 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
}
+static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
+{
+ struct shared_msr_entry *efer_msr;
+ unsigned int i;
+
+ if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
+ return vmcs_read64(GUEST_IA32_EFER);
+
+ if (cpu_has_load_ia32_efer)
+ return host_efer;
+
+ for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
+ if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
+ return vmx->msr_autoload.guest.val[i].value;
+ }
+
+ efer_msr = find_msr_entry(vmx, MSR_EFER);
+ if (efer_msr)
+ return efer_msr->data;
+
+ return host_efer;
+}
+
+static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmx_msr_entry g, h;
+ struct msr_data msr;
+ gpa_t gpa;
+ u32 i, j;
+
+ vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
+
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
+ /*
+ * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
+ * as vmcs01.GUEST_DR7 contains a userspace defined value
+ * and vcpu->arch.dr7 is not squirreled away before the
+ * nested VMENTER (not worth adding a variable in nested_vmx).
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
+ kvm_set_dr(vcpu, 7, DR7_FIXED_1);
+ else
+ WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
+ }
+
+ /*
+ * Note that calling vmx_set_{efer,cr0,cr4} is important as they
+ * handle a variety of side effects to KVM's software model.
+ */
+ vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
+
+ vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+ vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
+
+ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+ vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
+
+ nested_ept_uninit_mmu_context(vcpu);
+ vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+
+ /*
+ * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
+ * from vmcs01 (if necessary). The PDPTRs are not loaded on
+ * VMFail, like everything else we just need to ensure our
+ * software model is up-to-date.
+ */
+ ept_save_pdptrs(vcpu);
+
+ kvm_mmu_reset_context(vcpu);
+
+ if (cpu_has_vmx_msr_bitmap())
+ vmx_update_msr_bitmap(vcpu);
+
+ /*
+ * This nasty bit of open coding is a compromise between blindly
+ * loading L1's MSRs using the exit load lists (incorrect emulation
+ * of VMFail), leaving the nested VM's MSRs in the software model
+ * (incorrect behavior) and snapshotting the modified MSRs (too
+ * expensive since the lists are unbound by hardware). For each
+ * MSR that was (prematurely) loaded from the nested VMEntry load
+ * list, reload it from the exit load list if it exists and differs
+ * from the guest value. The intent is to stuff host state as
+ * silently as possible, not to fully process the exit load list.
+ */
+ msr.host_initiated = false;
+ for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
+ gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
+ if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
+ pr_debug_ratelimited(
+ "%s read MSR index failed (%u, 0x%08llx)\n",
+ __func__, i, gpa);
+ goto vmabort;
+ }
+
+ for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
+ gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
+ if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
+ pr_debug_ratelimited(
+ "%s read MSR failed (%u, 0x%08llx)\n",
+ __func__, j, gpa);
+ goto vmabort;
+ }
+ if (h.index != g.index)
+ continue;
+ if (h.value == g.value)
+ break;
+
+ if (nested_vmx_load_msr_check(vcpu, &h)) {
+ pr_debug_ratelimited(
+ "%s check failed (%u, 0x%x, 0x%x)\n",
+ __func__, j, h.index, h.reserved);
+ goto vmabort;
+ }
+
+ msr.index = h.index;
+ msr.data = h.value;
+ if (kvm_set_msr(vcpu, &msr)) {
+ pr_debug_ratelimited(
+ "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
+ __func__, j, h.index, h.value);
+ goto vmabort;
+ }
+ }
+ }
+
+ return;
+
+vmabort:
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
+}
+
/*
* Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
* and modify vmcs12 to make it see what it would expect to see there if
@@ -13208,14 +14137,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
- /*
- * The only expected VM-instruction error is "VM entry with
- * invalid control field(s)." Anything else indicates a
- * problem with L0.
- */
- WARN_ON_ONCE(vmx->fail && (vmcs_read32(VM_INSTRUCTION_ERROR) !=
- VMXERR_ENTRY_INVALID_CONTROL_FIELD));
-
leave_guest_mode(vcpu);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
@@ -13242,23 +14163,25 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
vmcs12->vm_exit_msr_store_count))
nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
+ } else {
+ /*
+ * The only expected VM-instruction error is "VM entry with
+ * invalid control field(s)." Anything else indicates a
+ * problem with L0. And we should never get here with a
+ * VMFail of any type if early consistency checks are enabled.
+ */
+ WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ WARN_ON_ONCE(nested_early_check);
}
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
- vm_entry_controls_reset_shadow(vmx);
- vm_exit_controls_reset_shadow(vmx);
- vmx_segment_cache_clear(vmx);
/* Update any VMCS fields that might have changed while L2 ran */
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
- if (vmx->hv_deadline_tsc == -1)
- vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
- else
- vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
+
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
@@ -13296,8 +14219,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
*/
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
- if (enable_shadow_vmcs && exit_reason != -1)
- vmx->nested.sync_shadow_vmcs = true;
+ if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
+ vmx->nested.need_vmcs12_sync = true;
/* in case we halted in L2 */
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -13332,24 +14255,24 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
return;
}
-
+
/*
* After an early L2 VM-entry failure, we're now back
* in L1 which thinks it just finished a VMLAUNCH or
* VMRESUME instruction, so we need to set the failure
* flag and the VM-instruction error field of the VMCS
- * accordingly.
+ * accordingly, and skip the emulated instruction.
*/
- nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-
- load_vmcs12_mmu_host_state(vcpu, vmcs12);
+ (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
/*
- * The emulated instruction was already skipped in
- * nested_vmx_run, but the updated RIP was never
- * written back to the vmcs01.
+ * Restore L1's host state to KVM's software model. We're here
+ * because a consistency check was caught by hardware, which
+ * means some amount of guest state has been propagated to KVM's
+ * model and needs to be unwound to the host's state.
*/
- skip_emulated_instruction(vcpu);
+ nested_vmx_restore_host_state(vcpu);
+
vmx->fail = 0;
}
@@ -13362,26 +14285,7 @@ static void vmx_leave_nested(struct kvm_vcpu *vcpu)
to_vmx(vcpu)->nested.nested_run_pending = 0;
nested_vmx_vmexit(vcpu, -1, 0, 0);
}
- free_nested(to_vmx(vcpu));
-}
-
-/*
- * L1's failure to enter L2 is a subset of a normal exit, as explained in
- * 23.7 "VM-entry failures during or after loading guest state" (this also
- * lists the acceptable exit-reason and exit-qualification parameters).
- * It should only be called before L2 actually succeeded to run, and when
- * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
- */
-static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12,
- u32 reason, unsigned long qualification)
-{
- load_vmcs12_host_state(vcpu, vmcs12);
- vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
- vmcs12->exit_qualification = qualification;
- nested_vmx_succeed(vcpu);
- if (enable_shadow_vmcs)
- to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
+ free_nested(vcpu);
}
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
@@ -13462,18 +14366,12 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
return -ERANGE;
vmx->hv_deadline_tsc = tscl + delta_tsc;
- vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
-
return delta_tsc == 0;
}
static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- vmx->hv_deadline_tsc = -1;
- vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
+ to_vmx(vcpu)->hv_deadline_tsc = -1;
}
#endif
@@ -13813,7 +14711,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
if (vmx->nested.smm.guest_mode) {
vcpu->arch.hflags &= ~HF_SMM_MASK;
- ret = enter_vmx_non_root_mode(vcpu, NULL);
+ ret = nested_vmx_enter_non_root_mode(vcpu, false);
vcpu->arch.hflags |= HF_SMM_MASK;
if (ret)
return ret;
@@ -13828,6 +14726,20 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
return 0;
}
+static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * In case we do two consecutive get/set_nested_state()s while L2 was
+ * running hv_evmcs may end up not being mapped (we map it from
+ * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always
+ * have vmcs12 if it is true.
+ */
+ return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
+ vmx->nested.hv_evmcs;
+}
+
static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state,
u32 user_data_size)
@@ -13847,12 +14759,16 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
vmx = to_vmx(vcpu);
vmcs12 = get_vmcs12(vcpu);
+
+ if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
+ kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
+
if (nested_vmx_allowed(vcpu) &&
(vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
- if (vmx->nested.current_vmptr != -1ull) {
+ if (vmx_has_valid_vmcs12(vcpu)) {
kvm_state.size += VMCS12_SIZE;
if (is_guest_mode(vcpu) &&
@@ -13881,20 +14797,24 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
return -EFAULT;
- if (vmx->nested.current_vmptr == -1ull)
+ if (!vmx_has_valid_vmcs12(vcpu))
goto out;
/*
* When running L2, the authoritative vmcs12 state is in the
* vmcs02. When running L1, the authoritative vmcs12 state is
- * in the shadow vmcs linked to vmcs01, unless
- * sync_shadow_vmcs is set, in which case, the authoritative
+ * in the shadow or enlightened vmcs linked to vmcs01, unless
+ * need_vmcs12_sync is set, in which case, the authoritative
* vmcs12 state is in the vmcs12 already.
*/
- if (is_guest_mode(vcpu))
+ if (is_guest_mode(vcpu)) {
sync_vmcs12(vcpu, vmcs12);
- else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs)
- copy_shadow_to_vmcs12(vmx);
+ } else if (!vmx->nested.need_vmcs12_sync) {
+ if (vmx->nested.hv_evmcs)
+ copy_enlightened_to_vmcs12(vmx);
+ else if (enable_shadow_vmcs)
+ copy_shadow_to_vmcs12(vmx);
+ }
if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
return -EFAULT;
@@ -13922,6 +14842,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->format != 0)
return -EINVAL;
+ if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
+ nested_enable_evmcs(vcpu, NULL);
+
if (!nested_vmx_allowed(vcpu))
return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
@@ -13939,13 +14862,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
return -EINVAL;
- if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
- return -EINVAL;
-
- if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
- !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
- return -EINVAL;
-
if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
return -EINVAL;
@@ -13954,6 +14870,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL;
+ /*
+ * SMM temporarily disables VMX, so we cannot be in guest mode,
+ * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
+ * must be zero.
+ */
+ if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
+ return -EINVAL;
+
if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
!(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL;
@@ -13967,7 +14891,25 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (ret)
return ret;
- set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
+ /* Empty 'VMXON' state is permitted */
+ if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
+ return 0;
+
+ if (kvm_state->vmx.vmcs_pa != -1ull) {
+ if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
+ !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
+ return -EINVAL;
+
+ set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
+ } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
+ /*
+ * Sync eVMCS upon entry as we may not have
+ * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
+ */
+ vmx->nested.need_vmcs12_sync = true;
+ } else {
+ return -EINVAL;
+ }
if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
vmx->nested.smm.vmxon = true;
@@ -14011,7 +14953,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
vmx->nested.dirty_vmcs12 = true;
- ret = enter_vmx_non_root_mode(vcpu, NULL);
+ ret = nested_vmx_enter_non_root_mode(vcpu, false);
if (ret)
return -EINVAL;
@@ -14097,6 +15039,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.apicv_post_state_restore = vmx_apicv_post_state_restore,
.hwapic_irr_update = vmx_hwapic_irr_update,
.hwapic_isr_update = vmx_hwapic_isr_update,
+ .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
@@ -14130,6 +15073,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.umip_emulated = vmx_umip_emulated,
.check_nested_events = vmx_check_nested_events,
+ .request_immediate_exit = vmx_request_immediate_exit,
.sched_in = vmx_sched_in,
@@ -14161,6 +15105,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.pre_enter_smm = vmx_pre_enter_smm,
.pre_leave_smm = vmx_pre_leave_smm,
.enable_smi_window = enable_smi_window,
+
+ .nested_enable_evmcs = nested_enable_evmcs,
};
static void vmx_cleanup_l1d_flush(void)
diff --git a/arch/x86/kvm/vmx_shadow_fields.h b/arch/x86/kvm/vmx_shadow_fields.h
index cd0c75f6d037..132432f375c2 100644
--- a/arch/x86/kvm/vmx_shadow_fields.h
+++ b/arch/x86/kvm/vmx_shadow_fields.h
@@ -28,7 +28,6 @@
*/
/* 16-bits */
-SHADOW_FIELD_RW(GUEST_CS_SELECTOR)
SHADOW_FIELD_RW(GUEST_INTR_STATUS)
SHADOW_FIELD_RW(GUEST_PML_INDEX)
SHADOW_FIELD_RW(HOST_FS_SELECTOR)
@@ -47,8 +46,8 @@ SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE)
SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD)
SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN)
SHADOW_FIELD_RW(TPR_THRESHOLD)
-SHADOW_FIELD_RW(GUEST_CS_LIMIT)
SHADOW_FIELD_RW(GUEST_CS_AR_BYTES)
+SHADOW_FIELD_RW(GUEST_SS_AR_BYTES)
SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO)
SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE)
@@ -61,8 +60,6 @@ SHADOW_FIELD_RW(GUEST_CR0)
SHADOW_FIELD_RW(GUEST_CR3)
SHADOW_FIELD_RW(GUEST_CR4)
SHADOW_FIELD_RW(GUEST_RFLAGS)
-SHADOW_FIELD_RW(GUEST_CS_BASE)
-SHADOW_FIELD_RW(GUEST_ES_BASE)
SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK)
SHADOW_FIELD_RW(CR0_READ_SHADOW)
SHADOW_FIELD_RW(CR4_READ_SHADOW)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 542f6315444d..66d66d77caee 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -136,7 +136,7 @@ static u32 __read_mostly tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
/* lapic timer advance (tscdeadline mode only) in nanoseconds */
-unsigned int __read_mostly lapic_timer_advance_ns = 0;
+unsigned int __read_mostly lapic_timer_advance_ns = 1000;
module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
EXPORT_SYMBOL_GPL(lapic_timer_advance_ns);
@@ -400,9 +400,51 @@ static int exception_type(int vector)
return EXCPT_FAULT;
}
+void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
+{
+ unsigned nr = vcpu->arch.exception.nr;
+ bool has_payload = vcpu->arch.exception.has_payload;
+ unsigned long payload = vcpu->arch.exception.payload;
+
+ if (!has_payload)
+ return;
+
+ switch (nr) {
+ case DB_VECTOR:
+ /*
+ * "Certain debug exceptions may clear bit 0-3. The
+ * remaining contents of the DR6 register are never
+ * cleared by the processor".
+ */
+ vcpu->arch.dr6 &= ~DR_TRAP_BITS;
+ /*
+ * DR6.RTM is set by all #DB exceptions that don't clear it.
+ */
+ vcpu->arch.dr6 |= DR6_RTM;
+ vcpu->arch.dr6 |= payload;
+ /*
+ * Bit 16 should be set in the payload whenever the #DB
+ * exception should clear DR6.RTM. This makes the payload
+ * compatible with the pending debug exceptions under VMX.
+ * Though not currently documented in the SDM, this also
+ * makes the payload compatible with the exit qualification
+ * for #DB exceptions under VMX.
+ */
+ vcpu->arch.dr6 ^= payload & DR6_RTM;
+ break;
+ case PF_VECTOR:
+ vcpu->arch.cr2 = payload;
+ break;
+ }
+
+ vcpu->arch.exception.has_payload = false;
+ vcpu->arch.exception.payload = 0;
+}
+EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload);
+
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
unsigned nr, bool has_error, u32 error_code,
- bool reinject)
+ bool has_payload, unsigned long payload, bool reinject)
{
u32 prev_nr;
int class1, class2;
@@ -424,6 +466,14 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
*/
WARN_ON_ONCE(vcpu->arch.exception.pending);
vcpu->arch.exception.injected = true;
+ if (WARN_ON_ONCE(has_payload)) {
+ /*
+ * A reinjected event has already
+ * delivered its payload.
+ */
+ has_payload = false;
+ payload = 0;
+ }
} else {
vcpu->arch.exception.pending = true;
vcpu->arch.exception.injected = false;
@@ -431,6 +481,22 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
vcpu->arch.exception.has_error_code = has_error;
vcpu->arch.exception.nr = nr;
vcpu->arch.exception.error_code = error_code;
+ vcpu->arch.exception.has_payload = has_payload;
+ vcpu->arch.exception.payload = payload;
+ /*
+ * In guest mode, payload delivery should be deferred,
+ * so that the L1 hypervisor can intercept #PF before
+ * CR2 is modified (or intercept #DB before DR6 is
+ * modified under nVMX). However, for ABI
+ * compatibility with KVM_GET_VCPU_EVENTS and
+ * KVM_SET_VCPU_EVENTS, we can't delay payload
+ * delivery unless userspace has enabled this
+ * functionality via the per-VM capability,
+ * KVM_CAP_EXCEPTION_PAYLOAD.
+ */
+ if (!vcpu->kvm->arch.exception_payload_enabled ||
+ !is_guest_mode(vcpu))
+ kvm_deliver_exception_payload(vcpu);
return;
}
@@ -455,6 +521,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
vcpu->arch.exception.has_error_code = true;
vcpu->arch.exception.nr = DF_VECTOR;
vcpu->arch.exception.error_code = 0;
+ vcpu->arch.exception.has_payload = false;
+ vcpu->arch.exception.payload = 0;
} else
/* replace previous exception with a new one in a hope
that instruction re-execution will regenerate lost
@@ -464,16 +532,29 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
- kvm_multiple_exception(vcpu, nr, false, 0, false);
+ kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
- kvm_multiple_exception(vcpu, nr, false, 0, true);
+ kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
+static void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
+ unsigned long payload)
+{
+ kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
+}
+
+static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
+ u32 error_code, unsigned long payload)
+{
+ kvm_multiple_exception(vcpu, nr, true, error_code,
+ true, payload, false);
+}
+
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{
if (err)
@@ -490,11 +571,13 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
++vcpu->stat.pf_guest;
vcpu->arch.exception.nested_apf =
is_guest_mode(vcpu) && fault->async_page_fault;
- if (vcpu->arch.exception.nested_apf)
+ if (vcpu->arch.exception.nested_apf) {
vcpu->arch.apf.nested_apf_token = fault->address;
- else
- vcpu->arch.cr2 = fault->address;
- kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
+ kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
+ } else {
+ kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
+ fault->address);
+ }
}
EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
@@ -503,7 +586,7 @@ static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fau
if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
else
- vcpu->arch.mmu.inject_page_fault(vcpu, fault);
+ vcpu->arch.mmu->inject_page_fault(vcpu, fault);
return fault->nested_page_fault;
}
@@ -517,13 +600,13 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
- kvm_multiple_exception(vcpu, nr, true, error_code, false);
+ kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
- kvm_multiple_exception(vcpu, nr, true, error_code, true);
+ kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
@@ -602,7 +685,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
if ((pdpte[i] & PT_PRESENT_MASK) &&
(pdpte[i] &
- vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
+ vcpu->arch.mmu->guest_rsvd_check.rsvd_bits_mask[0][2])) {
ret = 0;
goto out;
}
@@ -628,7 +711,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
gfn_t gfn;
int r;
- if (is_long_mode(vcpu) || !is_pae(vcpu))
+ if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu))
return false;
if (!test_bit(VCPU_EXREG_PDPTR,
@@ -2477,7 +2560,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_PV_EOI_EN:
- if (kvm_lapic_enable_pv_eoi(vcpu, data))
+ if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
return 1;
break;
@@ -2537,7 +2620,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_PLATFORM_INFO:
if (!msr_info->host_initiated ||
- data & ~MSR_PLATFORM_INFO_CPUID_FAULT ||
(!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
cpuid_fault_enabled(vcpu)))
return 1;
@@ -2780,6 +2862,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.osvw.status;
break;
case MSR_PLATFORM_INFO:
+ if (!msr_info->host_initiated &&
+ !vcpu->kvm->arch.guest_can_read_msr_platform_info)
+ return 1;
msr_info->data = vcpu->arch.msr_platform_info;
break;
case MSR_MISC_FEATURES_ENABLES:
@@ -2910,6 +2995,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_HYPERV_VP_INDEX:
case KVM_CAP_HYPERV_EVENTFD:
case KVM_CAP_HYPERV_TLBFLUSH:
+ case KVM_CAP_HYPERV_SEND_IPI:
+ case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
@@ -2927,6 +3014,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SPLIT_IRQCHIP:
case KVM_CAP_IMMEDIATE_EXIT:
case KVM_CAP_GET_MSR_FEATURES:
+ case KVM_CAP_MSR_PLATFORM_INFO:
+ case KVM_CAP_EXCEPTION_PAYLOAD:
r = 1;
break;
case KVM_CAP_SYNC_REGS:
@@ -3359,19 +3448,33 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
process_nmi(vcpu);
+
/*
- * FIXME: pass injected and pending separately. This is only
- * needed for nested virtualization, whose state cannot be
- * migrated yet. For now we can combine them.
+ * The API doesn't provide the instruction length for software
+ * exceptions, so don't report them. As long as the guest RIP
+ * isn't advanced, we should expect to encounter the exception
+ * again.
*/
- events->exception.injected =
- (vcpu->arch.exception.pending ||
- vcpu->arch.exception.injected) &&
- !kvm_exception_is_soft(vcpu->arch.exception.nr);
+ if (kvm_exception_is_soft(vcpu->arch.exception.nr)) {
+ events->exception.injected = 0;
+ events->exception.pending = 0;
+ } else {
+ events->exception.injected = vcpu->arch.exception.injected;
+ events->exception.pending = vcpu->arch.exception.pending;
+ /*
+ * For ABI compatibility, deliberately conflate
+ * pending and injected exceptions when
+ * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
+ */
+ if (!vcpu->kvm->arch.exception_payload_enabled)
+ events->exception.injected |=
+ vcpu->arch.exception.pending;
+ }
events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code;
- events->exception.pad = 0;
events->exception.error_code = vcpu->arch.exception.error_code;
+ events->exception_has_payload = vcpu->arch.exception.has_payload;
+ events->exception_payload = vcpu->arch.exception.payload;
events->interrupt.injected =
vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
@@ -3395,6 +3498,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SHADOW
| KVM_VCPUEVENT_VALID_SMM);
+ if (vcpu->kvm->arch.exception_payload_enabled)
+ events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
+
memset(&events->reserved, 0, sizeof(events->reserved));
}
@@ -3406,12 +3512,24 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW
- | KVM_VCPUEVENT_VALID_SMM))
+ | KVM_VCPUEVENT_VALID_SMM
+ | KVM_VCPUEVENT_VALID_PAYLOAD))
return -EINVAL;
- if (events->exception.injected &&
- (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR ||
- is_guest_mode(vcpu)))
+ if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
+ if (!vcpu->kvm->arch.exception_payload_enabled)
+ return -EINVAL;
+ if (events->exception.pending)
+ events->exception.injected = 0;
+ else
+ events->exception_has_payload = 0;
+ } else {
+ events->exception.pending = 0;
+ events->exception_has_payload = 0;
+ }
+
+ if ((events->exception.injected || events->exception.pending) &&
+ (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
return -EINVAL;
/* INITs are latched while in SMM */
@@ -3421,11 +3539,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
return -EINVAL;
process_nmi(vcpu);
- vcpu->arch.exception.injected = false;
- vcpu->arch.exception.pending = events->exception.injected;
+ vcpu->arch.exception.injected = events->exception.injected;
+ vcpu->arch.exception.pending = events->exception.pending;
vcpu->arch.exception.nr = events->exception.nr;
vcpu->arch.exception.has_error_code = events->exception.has_error_code;
vcpu->arch.exception.error_code = events->exception.error_code;
+ vcpu->arch.exception.has_payload = events->exception_has_payload;
+ vcpu->arch.exception.payload = events->exception_payload;
vcpu->arch.interrupt.injected = events->interrupt.injected;
vcpu->arch.interrupt.nr = events->interrupt.nr;
@@ -3691,6 +3811,10 @@ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
struct kvm_enable_cap *cap)
{
+ int r;
+ uint16_t vmcs_version;
+ void __user *user_ptr;
+
if (cap->flags)
return -EINVAL;
@@ -3703,6 +3827,16 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return -EINVAL;
return kvm_hv_activate_synic(vcpu, cap->cap ==
KVM_CAP_HYPERV_SYNIC2);
+ case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
+ r = kvm_x86_ops->nested_enable_evmcs(vcpu, &vmcs_version);
+ if (!r) {
+ user_ptr = (void __user *)(uintptr_t)cap->args[0];
+ if (copy_to_user(user_ptr, &vmcs_version,
+ sizeof(vmcs_version)))
+ r = -EFAULT;
+ }
+ return r;
+
default:
return -EINVAL;
}
@@ -4007,19 +4141,23 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
+ r = -EFAULT;
if (get_user(user_data_size, &user_kvm_nested_state->size))
- return -EFAULT;
+ break;
r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
user_data_size);
if (r < 0)
- return r;
+ break;
if (r > user_data_size) {
if (put_user(r, &user_kvm_nested_state->size))
- return -EFAULT;
- return -E2BIG;
+ r = -EFAULT;
+ else
+ r = -E2BIG;
+ break;
}
+
r = 0;
break;
}
@@ -4031,19 +4169,23 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (!kvm_x86_ops->set_nested_state)
break;
+ r = -EFAULT;
if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
- return -EFAULT;
+ break;
+ r = -EINVAL;
if (kvm_state.size < sizeof(kvm_state))
- return -EINVAL;
+ break;
if (kvm_state.flags &
- ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE))
- return -EINVAL;
+ ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
+ | KVM_STATE_NESTED_EVMCS))
+ break;
/* nested_run_pending implies guest_mode. */
- if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING)
- return -EINVAL;
+ if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING)
+ && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
+ break;
r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
break;
@@ -4350,6 +4492,14 @@ split_irqchip_unlock:
kvm->arch.pause_in_guest = true;
r = 0;
break;
+ case KVM_CAP_MSR_PLATFORM_INFO:
+ kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
+ r = 0;
+ break;
+ case KVM_CAP_EXCEPTION_PAYLOAD:
+ kvm->arch.exception_payload_enabled = cap->args[0];
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -4685,7 +4835,7 @@ static void kvm_init_msr_list(void)
*/
switch (msrs_to_save[i]) {
case MSR_IA32_BNDCFGS:
- if (!kvm_x86_ops->mpx_supported())
+ if (!kvm_mpx_supported())
continue;
break;
case MSR_TSC_AUX:
@@ -4790,7 +4940,7 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
/* NPT walks are always user-walks */
access |= PFERR_USER_MASK;
- t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
+ t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception);
return t_gpa;
}
@@ -5876,7 +6026,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
if (WARN_ON_ONCE(is_guest_mode(vcpu)))
return false;
- if (!vcpu->arch.mmu.direct_map) {
+ if (!vcpu->arch.mmu->direct_map) {
/*
* Write permission should be allowed since only
* write access need to be emulated.
@@ -5909,7 +6059,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
kvm_release_pfn_clean(pfn);
/* The instructions are well-emulated on direct mmu. */
- if (vcpu->arch.mmu.direct_map) {
+ if (vcpu->arch.mmu->direct_map) {
unsigned int indirect_shadow_pages;
spin_lock(&vcpu->kvm->mmu_lock);
@@ -5976,7 +6126,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
vcpu->arch.last_retry_eip = ctxt->eip;
vcpu->arch.last_retry_addr = cr2;
- if (!vcpu->arch.mmu.direct_map)
+ if (!vcpu->arch.mmu->direct_map)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
@@ -6036,14 +6186,7 @@ static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
kvm_run->exit_reason = KVM_EXIT_DEBUG;
*r = EMULATE_USER_EXIT;
} else {
- /*
- * "Certain debug exceptions may clear bit 0-3. The
- * remaining contents of the DR6 register are never
- * cleared by the processor".
- */
- vcpu->arch.dr6 &= ~15;
- vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
- kvm_queue_exception(vcpu, DB_VECTOR);
+ kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
}
}
@@ -6982,10 +7125,22 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
X86_EFLAGS_RF);
- if (vcpu->arch.exception.nr == DB_VECTOR &&
- (vcpu->arch.dr7 & DR7_GD)) {
- vcpu->arch.dr7 &= ~DR7_GD;
- kvm_update_dr7(vcpu);
+ if (vcpu->arch.exception.nr == DB_VECTOR) {
+ /*
+ * This code assumes that nSVM doesn't use
+ * check_nested_events(). If it does, the
+ * DR6/DR7 changes should happen before L1
+ * gets a #VMEXIT for an intercepted #DB in
+ * L2. (Under VMX, on the other hand, the
+ * DR6/DR7 changes should not happen in the
+ * event of a VM-exit to L1 for an intercepted
+ * #DB in L2.)
+ */
+ kvm_deliver_exception_payload(vcpu);
+ if (vcpu->arch.dr7 & DR7_GD) {
+ vcpu->arch.dr7 &= ~DR7_GD;
+ kvm_update_dr7(vcpu);
+ }
}
kvm_x86_ops->queue_exception(vcpu);
@@ -7361,6 +7516,12 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
+void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
+{
+ smp_send_reschedule(vcpu->cpu);
+}
+EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
+
/*
* Returns 1 to let vcpu_run() continue the guest execution loop without
* exiting to the userspace. Otherwise, the value will be returned to the
@@ -7565,7 +7726,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (req_immediate_exit) {
kvm_make_request(KVM_REQ_EVENT, vcpu);
- smp_send_reschedule(vcpu->cpu);
+ kvm_x86_ops->request_immediate_exit(vcpu);
}
trace_kvm_entry(vcpu->vcpu_id);
@@ -7829,6 +7990,29 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
return 0;
}
+/* Swap (qemu) user FPU context for the guest FPU context. */
+static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
+ /* PKRU is separately restored in kvm_x86_ops->run. */
+ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
+ ~XFEATURE_MASK_PKRU);
+ preempt_enable();
+ trace_kvm_fpu(1);
+}
+
+/* When vcpu_run ends, restore user space FPU context. */
+static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
+ copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
+ preempt_enable();
+ ++vcpu->stat.fpu_reload;
+ trace_kvm_fpu(0);
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;
@@ -8177,7 +8361,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
kvm_update_cpuid(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (!is_long_mode(vcpu) && is_pae(vcpu)) {
+ if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
mmu_reset_needed = 1;
}
@@ -8406,29 +8590,6 @@ static void fx_init(struct kvm_vcpu *vcpu)
vcpu->arch.cr0 |= X86_CR0_ET;
}
-/* Swap (qemu) user FPU context for the guest FPU context. */
-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
-{
- preempt_disable();
- copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
- /* PKRU is separately restored in kvm_x86_ops->run. */
- __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
- ~XFEATURE_MASK_PKRU);
- preempt_enable();
- trace_kvm_fpu(1);
-}
-
-/* When vcpu_run ends, restore user space FPU context. */
-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
-{
- preempt_disable();
- copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
- copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
- preempt_enable();
- ++vcpu->stat.fpu_reload;
- trace_kvm_fpu(0);
-}
-
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
@@ -8459,7 +8620,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_vcpu_mtrr_init(vcpu);
vcpu_load(vcpu);
kvm_vcpu_reset(vcpu, false);
- kvm_mmu_setup(vcpu);
+ kvm_init_mmu(vcpu, false);
vcpu_put(vcpu);
return 0;
}
@@ -8852,6 +9013,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
pvclock_update_vm_gtod_copy(kvm);
+ kvm->arch.guest_can_read_msr_platform_info = true;
+
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
@@ -9200,6 +9363,13 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvm_page_track_flush_slot(kvm, slot);
}
+static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ return (is_guest_mode(vcpu) &&
+ kvm_x86_ops->guest_apic_has_interrupt &&
+ kvm_x86_ops->guest_apic_has_interrupt(vcpu));
+}
+
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
if (!list_empty_careful(&vcpu->async_pf.done))
@@ -9224,7 +9394,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&
- kvm_cpu_has_interrupt(vcpu))
+ (kvm_cpu_has_interrupt(vcpu) ||
+ kvm_guest_apic_has_interrupt(vcpu)))
return true;
if (kvm_hv_has_stimer_pending(vcpu))
@@ -9298,7 +9469,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{
int r;
- if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
+ if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
work->wakeup_all)
return;
@@ -9306,11 +9477,11 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
if (unlikely(r))
return;
- if (!vcpu->arch.mmu.direct_map &&
- work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
+ if (!vcpu->arch.mmu->direct_map &&
+ work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
return;
- vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
+ vcpu->arch.mmu->page_fault(vcpu, work->gva, 0, true);
}
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
@@ -9434,6 +9605,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
vcpu->arch.exception.nr = 0;
vcpu->arch.exception.has_error_code = false;
vcpu->arch.exception.error_code = 0;
+ vcpu->arch.exception.has_payload = false;
+ vcpu->arch.exception.payload = 0;
} else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
fault.vector = PF_VECTOR;
fault.error_code_valid = true;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 67b9568613f3..224cd0a47568 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -266,6 +266,8 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
int handle_ud(struct kvm_vcpu *vcpu);
+void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu);
+
void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 46e71a74e612..ad8e0906d1ea 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -273,11 +273,11 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
#define SRC(y...) \
9999: y; \
- _ASM_EXTABLE(9999b, 6001f)
+ _ASM_EXTABLE_UA(9999b, 6001f)
#define DST(y...) \
9999: y; \
- _ASM_EXTABLE(9999b, 6002f)
+ _ASM_EXTABLE_UA(9999b, 6002f)
#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 020f75cc8cf6..db4e5aa0858b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -92,26 +92,26 @@ ENTRY(copy_user_generic_unrolled)
60: jmp copy_user_handle_tail /* ecx is zerorest also */
.previous
- _ASM_EXTABLE(1b,30b)
- _ASM_EXTABLE(2b,30b)
- _ASM_EXTABLE(3b,30b)
- _ASM_EXTABLE(4b,30b)
- _ASM_EXTABLE(5b,30b)
- _ASM_EXTABLE(6b,30b)
- _ASM_EXTABLE(7b,30b)
- _ASM_EXTABLE(8b,30b)
- _ASM_EXTABLE(9b,30b)
- _ASM_EXTABLE(10b,30b)
- _ASM_EXTABLE(11b,30b)
- _ASM_EXTABLE(12b,30b)
- _ASM_EXTABLE(13b,30b)
- _ASM_EXTABLE(14b,30b)
- _ASM_EXTABLE(15b,30b)
- _ASM_EXTABLE(16b,30b)
- _ASM_EXTABLE(18b,40b)
- _ASM_EXTABLE(19b,40b)
- _ASM_EXTABLE(21b,50b)
- _ASM_EXTABLE(22b,50b)
+ _ASM_EXTABLE_UA(1b, 30b)
+ _ASM_EXTABLE_UA(2b, 30b)
+ _ASM_EXTABLE_UA(3b, 30b)
+ _ASM_EXTABLE_UA(4b, 30b)
+ _ASM_EXTABLE_UA(5b, 30b)
+ _ASM_EXTABLE_UA(6b, 30b)
+ _ASM_EXTABLE_UA(7b, 30b)
+ _ASM_EXTABLE_UA(8b, 30b)
+ _ASM_EXTABLE_UA(9b, 30b)
+ _ASM_EXTABLE_UA(10b, 30b)
+ _ASM_EXTABLE_UA(11b, 30b)
+ _ASM_EXTABLE_UA(12b, 30b)
+ _ASM_EXTABLE_UA(13b, 30b)
+ _ASM_EXTABLE_UA(14b, 30b)
+ _ASM_EXTABLE_UA(15b, 30b)
+ _ASM_EXTABLE_UA(16b, 30b)
+ _ASM_EXTABLE_UA(18b, 40b)
+ _ASM_EXTABLE_UA(19b, 40b)
+ _ASM_EXTABLE_UA(21b, 50b)
+ _ASM_EXTABLE_UA(22b, 50b)
ENDPROC(copy_user_generic_unrolled)
EXPORT_SYMBOL(copy_user_generic_unrolled)
@@ -156,8 +156,8 @@ ENTRY(copy_user_generic_string)
jmp copy_user_handle_tail
.previous
- _ASM_EXTABLE(1b,11b)
- _ASM_EXTABLE(3b,12b)
+ _ASM_EXTABLE_UA(1b, 11b)
+ _ASM_EXTABLE_UA(3b, 12b)
ENDPROC(copy_user_generic_string)
EXPORT_SYMBOL(copy_user_generic_string)
@@ -189,7 +189,7 @@ ENTRY(copy_user_enhanced_fast_string)
jmp copy_user_handle_tail
.previous
- _ASM_EXTABLE(1b,12b)
+ _ASM_EXTABLE_UA(1b, 12b)
ENDPROC(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
@@ -319,27 +319,27 @@ ENTRY(__copy_user_nocache)
jmp copy_user_handle_tail
.previous
- _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
- _ASM_EXTABLE(20b,.L_fixup_8b_copy)
- _ASM_EXTABLE(21b,.L_fixup_8b_copy)
- _ASM_EXTABLE(30b,.L_fixup_4b_copy)
- _ASM_EXTABLE(31b,.L_fixup_4b_copy)
- _ASM_EXTABLE(40b,.L_fixup_1b_copy)
- _ASM_EXTABLE(41b,.L_fixup_1b_copy)
+ _ASM_EXTABLE_UA(1b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(2b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(3b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(4b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(5b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(6b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(7b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(8b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(9b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(10b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(11b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(12b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(13b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(14b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(15b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(16b, .L_fixup_4x8b_copy)
+ _ASM_EXTABLE_UA(20b, .L_fixup_8b_copy)
+ _ASM_EXTABLE_UA(21b, .L_fixup_8b_copy)
+ _ASM_EXTABLE_UA(30b, .L_fixup_4b_copy)
+ _ASM_EXTABLE_UA(31b, .L_fixup_4b_copy)
+ _ASM_EXTABLE_UA(40b, .L_fixup_1b_copy)
+ _ASM_EXTABLE_UA(41b, .L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
EXPORT_SYMBOL(__copy_user_nocache)
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 45a53dfe1859..a4a379e79259 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -31,14 +31,18 @@
.macro source
10:
- _ASM_EXTABLE(10b, .Lbad_source)
+ _ASM_EXTABLE_UA(10b, .Lbad_source)
.endm
.macro dest
20:
- _ASM_EXTABLE(20b, .Lbad_dest)
+ _ASM_EXTABLE_UA(20b, .Lbad_dest)
.endm
+ /*
+ * No _ASM_EXTABLE_UA; this is used for intentional prefetch on a
+ * potentially unmapped kernel address.
+ */
.macro ignore L=.Lignore
30:
_ASM_EXTABLE(30b, \L)
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 49b167f73215..74fdff968ea3 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -132,12 +132,12 @@ bad_get_user_8:
END(bad_get_user_8)
#endif
- _ASM_EXTABLE(1b,bad_get_user)
- _ASM_EXTABLE(2b,bad_get_user)
- _ASM_EXTABLE(3b,bad_get_user)
+ _ASM_EXTABLE_UA(1b, bad_get_user)
+ _ASM_EXTABLE_UA(2b, bad_get_user)
+ _ASM_EXTABLE_UA(3b, bad_get_user)
#ifdef CONFIG_X86_64
- _ASM_EXTABLE(4b,bad_get_user)
+ _ASM_EXTABLE_UA(4b, bad_get_user)
#else
- _ASM_EXTABLE(4b,bad_get_user_8)
- _ASM_EXTABLE(5b,bad_get_user_8)
+ _ASM_EXTABLE_UA(4b, bad_get_user_8)
+ _ASM_EXTABLE_UA(5b, bad_get_user_8)
#endif
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index 96dce5fe2a35..d2e5c9c39601 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -94,10 +94,10 @@ bad_put_user:
EXIT
END(bad_put_user)
- _ASM_EXTABLE(1b,bad_put_user)
- _ASM_EXTABLE(2b,bad_put_user)
- _ASM_EXTABLE(3b,bad_put_user)
- _ASM_EXTABLE(4b,bad_put_user)
+ _ASM_EXTABLE_UA(1b, bad_put_user)
+ _ASM_EXTABLE_UA(2b, bad_put_user)
+ _ASM_EXTABLE_UA(3b, bad_put_user)
+ _ASM_EXTABLE_UA(4b, bad_put_user)
#ifdef CONFIG_X86_32
- _ASM_EXTABLE(5b,bad_put_user)
+ _ASM_EXTABLE_UA(5b, bad_put_user)
#endif
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 7add8ba06887..71fb58d44d58 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -47,8 +47,8 @@ do { \
"3: lea 0(%2,%0,4),%0\n" \
" jmp 2b\n" \
".previous\n" \
- _ASM_EXTABLE(0b,3b) \
- _ASM_EXTABLE(1b,2b) \
+ _ASM_EXTABLE_UA(0b, 3b) \
+ _ASM_EXTABLE_UA(1b, 2b) \
: "=&c"(size), "=&D" (__d0) \
: "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
} while (0)
@@ -153,44 +153,44 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
"101: lea 0(%%eax,%0,4),%0\n"
" jmp 100b\n"
".previous\n"
- _ASM_EXTABLE(1b,100b)
- _ASM_EXTABLE(2b,100b)
- _ASM_EXTABLE(3b,100b)
- _ASM_EXTABLE(4b,100b)
- _ASM_EXTABLE(5b,100b)
- _ASM_EXTABLE(6b,100b)
- _ASM_EXTABLE(7b,100b)
- _ASM_EXTABLE(8b,100b)
- _ASM_EXTABLE(9b,100b)
- _ASM_EXTABLE(10b,100b)
- _ASM_EXTABLE(11b,100b)
- _ASM_EXTABLE(12b,100b)
- _ASM_EXTABLE(13b,100b)
- _ASM_EXTABLE(14b,100b)
- _ASM_EXTABLE(15b,100b)
- _ASM_EXTABLE(16b,100b)
- _ASM_EXTABLE(17b,100b)
- _ASM_EXTABLE(18b,100b)
- _ASM_EXTABLE(19b,100b)
- _ASM_EXTABLE(20b,100b)
- _ASM_EXTABLE(21b,100b)
- _ASM_EXTABLE(22b,100b)
- _ASM_EXTABLE(23b,100b)
- _ASM_EXTABLE(24b,100b)
- _ASM_EXTABLE(25b,100b)
- _ASM_EXTABLE(26b,100b)
- _ASM_EXTABLE(27b,100b)
- _ASM_EXTABLE(28b,100b)
- _ASM_EXTABLE(29b,100b)
- _ASM_EXTABLE(30b,100b)
- _ASM_EXTABLE(31b,100b)
- _ASM_EXTABLE(32b,100b)
- _ASM_EXTABLE(33b,100b)
- _ASM_EXTABLE(34b,100b)
- _ASM_EXTABLE(35b,100b)
- _ASM_EXTABLE(36b,100b)
- _ASM_EXTABLE(37b,100b)
- _ASM_EXTABLE(99b,101b)
+ _ASM_EXTABLE_UA(1b, 100b)
+ _ASM_EXTABLE_UA(2b, 100b)
+ _ASM_EXTABLE_UA(3b, 100b)
+ _ASM_EXTABLE_UA(4b, 100b)
+ _ASM_EXTABLE_UA(5b, 100b)
+ _ASM_EXTABLE_UA(6b, 100b)
+ _ASM_EXTABLE_UA(7b, 100b)
+ _ASM_EXTABLE_UA(8b, 100b)
+ _ASM_EXTABLE_UA(9b, 100b)
+ _ASM_EXTABLE_UA(10b, 100b)
+ _ASM_EXTABLE_UA(11b, 100b)
+ _ASM_EXTABLE_UA(12b, 100b)
+ _ASM_EXTABLE_UA(13b, 100b)
+ _ASM_EXTABLE_UA(14b, 100b)
+ _ASM_EXTABLE_UA(15b, 100b)
+ _ASM_EXTABLE_UA(16b, 100b)
+ _ASM_EXTABLE_UA(17b, 100b)
+ _ASM_EXTABLE_UA(18b, 100b)
+ _ASM_EXTABLE_UA(19b, 100b)
+ _ASM_EXTABLE_UA(20b, 100b)
+ _ASM_EXTABLE_UA(21b, 100b)
+ _ASM_EXTABLE_UA(22b, 100b)
+ _ASM_EXTABLE_UA(23b, 100b)
+ _ASM_EXTABLE_UA(24b, 100b)
+ _ASM_EXTABLE_UA(25b, 100b)
+ _ASM_EXTABLE_UA(26b, 100b)
+ _ASM_EXTABLE_UA(27b, 100b)
+ _ASM_EXTABLE_UA(28b, 100b)
+ _ASM_EXTABLE_UA(29b, 100b)
+ _ASM_EXTABLE_UA(30b, 100b)
+ _ASM_EXTABLE_UA(31b, 100b)
+ _ASM_EXTABLE_UA(32b, 100b)
+ _ASM_EXTABLE_UA(33b, 100b)
+ _ASM_EXTABLE_UA(34b, 100b)
+ _ASM_EXTABLE_UA(35b, 100b)
+ _ASM_EXTABLE_UA(36b, 100b)
+ _ASM_EXTABLE_UA(37b, 100b)
+ _ASM_EXTABLE_UA(99b, 101b)
: "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory");
@@ -259,26 +259,26 @@ static unsigned long __copy_user_intel_nocache(void *to,
"9: lea 0(%%eax,%0,4),%0\n"
"16: jmp 8b\n"
".previous\n"
- _ASM_EXTABLE(0b,16b)
- _ASM_EXTABLE(1b,16b)
- _ASM_EXTABLE(2b,16b)
- _ASM_EXTABLE(21b,16b)
- _ASM_EXTABLE(3b,16b)
- _ASM_EXTABLE(31b,16b)
- _ASM_EXTABLE(4b,16b)
- _ASM_EXTABLE(41b,16b)
- _ASM_EXTABLE(10b,16b)
- _ASM_EXTABLE(51b,16b)
- _ASM_EXTABLE(11b,16b)
- _ASM_EXTABLE(61b,16b)
- _ASM_EXTABLE(12b,16b)
- _ASM_EXTABLE(71b,16b)
- _ASM_EXTABLE(13b,16b)
- _ASM_EXTABLE(81b,16b)
- _ASM_EXTABLE(14b,16b)
- _ASM_EXTABLE(91b,16b)
- _ASM_EXTABLE(6b,9b)
- _ASM_EXTABLE(7b,16b)
+ _ASM_EXTABLE_UA(0b, 16b)
+ _ASM_EXTABLE_UA(1b, 16b)
+ _ASM_EXTABLE_UA(2b, 16b)
+ _ASM_EXTABLE_UA(21b, 16b)
+ _ASM_EXTABLE_UA(3b, 16b)
+ _ASM_EXTABLE_UA(31b, 16b)
+ _ASM_EXTABLE_UA(4b, 16b)
+ _ASM_EXTABLE_UA(41b, 16b)
+ _ASM_EXTABLE_UA(10b, 16b)
+ _ASM_EXTABLE_UA(51b, 16b)
+ _ASM_EXTABLE_UA(11b, 16b)
+ _ASM_EXTABLE_UA(61b, 16b)
+ _ASM_EXTABLE_UA(12b, 16b)
+ _ASM_EXTABLE_UA(71b, 16b)
+ _ASM_EXTABLE_UA(13b, 16b)
+ _ASM_EXTABLE_UA(81b, 16b)
+ _ASM_EXTABLE_UA(14b, 16b)
+ _ASM_EXTABLE_UA(91b, 16b)
+ _ASM_EXTABLE_UA(6b, 9b)
+ _ASM_EXTABLE_UA(7b, 16b)
: "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory");
@@ -321,9 +321,9 @@ do { \
"3: lea 0(%3,%0,4),%0\n" \
" jmp 2b\n" \
".previous\n" \
- _ASM_EXTABLE(4b,5b) \
- _ASM_EXTABLE(0b,3b) \
- _ASM_EXTABLE(1b,2b) \
+ _ASM_EXTABLE_UA(4b, 5b) \
+ _ASM_EXTABLE_UA(0b, 3b) \
+ _ASM_EXTABLE_UA(1b, 2b) \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
: "3"(size), "0"(size), "1"(to), "2"(from) \
: "memory"); \
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 9c5606d88f61..1bd837cdc4b1 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -37,8 +37,8 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
"3: lea 0(%[size1],%[size8],8),%[size8]\n"
" jmp 2b\n"
".previous\n"
- _ASM_EXTABLE(0b,3b)
- _ASM_EXTABLE(1b,2b)
+ _ASM_EXTABLE_UA(0b, 3b)
+ _ASM_EXTABLE_UA(1b, 2b)
: [size8] "=&c"(size), [dst] "=&D" (__d0)
: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
clac();
@@ -153,7 +153,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
return rc;
}
-void memcpy_flushcache(void *_dst, const void *_src, size_t size)
+void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
{
unsigned long dest = (unsigned long) _dst;
unsigned long source = (unsigned long) _src;
@@ -216,7 +216,7 @@ void memcpy_flushcache(void *_dst, const void *_src, size_t size)
clean_cache_range((void *) dest, size);
}
}
-EXPORT_SYMBOL_GPL(memcpy_flushcache);
+EXPORT_SYMBOL_GPL(__memcpy_flushcache);
void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
size_t len)
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 076ebdce9bd4..12d7e7fb4efd 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -15,7 +15,6 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
-static DEFINE_PER_CPU(struct kcore_list, kcore_entry_trampoline);
#endif
struct cpu_entry_area *get_cpu_entry_area(int cpu)
@@ -83,8 +82,6 @@ static void percpu_setup_debug_store(int cpu)
static void __init setup_cpu_entry_area(int cpu)
{
#ifdef CONFIG_X86_64
- extern char _entry_trampoline[];
-
/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
pgprot_t gdt_prot = PAGE_KERNEL_RO;
pgprot_t tss_prot = PAGE_KERNEL_RO;
@@ -146,43 +143,10 @@ static void __init setup_cpu_entry_area(int cpu)
cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
&per_cpu(exception_stacks, cpu),
sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
-
- cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
- __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
- /*
- * The cpu_entry_area alias addresses are not in the kernel binary
- * so they do not show up in /proc/kcore normally. This adds entries
- * for them manually.
- */
- kclist_add_remap(&per_cpu(kcore_entry_trampoline, cpu),
- _entry_trampoline,
- &get_cpu_entry_area(cpu)->entry_trampoline, PAGE_SIZE);
#endif
percpu_setup_debug_store(cpu);
}
-#ifdef CONFIG_X86_64
-int arch_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
- char *name)
-{
- unsigned int cpu, ncpu = 0;
-
- if (symnum >= num_possible_cpus())
- return -EINVAL;
-
- for_each_possible_cpu(cpu) {
- if (ncpu++ >= symnum)
- break;
- }
-
- *value = (unsigned long)&get_cpu_entry_area(cpu)->entry_trampoline;
- *type = 't';
- strlcpy(name, "__entry_SYSCALL_64_trampoline", KSYM_NAME_LEN);
-
- return 0;
-}
-#endif
-
static __init void setup_cpu_entry_area_ptes(void)
{
#ifdef CONFIG_X86_32
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index a12afff146d1..fc37bbd23eb8 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -19,7 +19,9 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/highmem.h>
+#include <linux/pci.h>
+#include <asm/e820/types.h>
#include <asm/pgtable.h>
/*
@@ -241,6 +243,29 @@ static unsigned long normalize_addr(unsigned long u)
return (signed long)(u << shift) >> shift;
}
+static void note_wx(struct pg_state *st)
+{
+ unsigned long npages;
+
+ npages = (st->current_address - st->start_address) / PAGE_SIZE;
+
+#ifdef CONFIG_PCI_BIOS
+ /*
+ * If PCI BIOS is enabled, the PCI BIOS area is forced to WX.
+ * Inform about it, but avoid the warning.
+ */
+ if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN &&
+ st->current_address <= PAGE_OFFSET + BIOS_END) {
+ pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages);
+ return;
+ }
+#endif
+ /* Account the WX pages */
+ st->wx_pages += npages;
+ WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n",
+ (void *)st->start_address);
+}
+
/*
* This function gets called on a break in a continuous series
* of PTE entries; the next one is different so we need to
@@ -276,14 +301,8 @@ static void note_page(struct seq_file *m, struct pg_state *st,
unsigned long delta;
int width = sizeof(unsigned long) * 2;
- if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) {
- WARN_ONCE(1,
- "x86/mm: Found insecure W+X mapping at address %p/%pS\n",
- (void *)st->start_address,
- (void *)st->start_address);
- st->wx_pages += (st->current_address -
- st->start_address) / PAGE_SIZE;
- }
+ if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX))
+ note_wx(st);
/*
* Now print the actual finished series
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 45f5d6cf65ae..6521134057e8 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -8,7 +8,8 @@
#include <asm/kdebug.h>
typedef bool (*ex_handler_t)(const struct exception_table_entry *,
- struct pt_regs *, int);
+ struct pt_regs *, int, unsigned long,
+ unsigned long);
static inline unsigned long
ex_fixup_addr(const struct exception_table_entry *x)
@@ -22,7 +23,9 @@ ex_fixup_handler(const struct exception_table_entry *x)
}
__visible bool ex_handler_default(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
{
regs->ip = ex_fixup_addr(fixup);
return true;
@@ -30,7 +33,9 @@ __visible bool ex_handler_default(const struct exception_table_entry *fixup,
EXPORT_SYMBOL(ex_handler_default);
__visible bool ex_handler_fault(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
{
regs->ip = ex_fixup_addr(fixup);
regs->ax = trapnr;
@@ -43,7 +48,9 @@ EXPORT_SYMBOL_GPL(ex_handler_fault);
* result of a refcount inc/dec/add/sub.
*/
__visible bool ex_handler_refcount(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
{
/* First unconditionally saturate the refcount. */
*(int *)regs->cx = INT_MIN / 2;
@@ -96,7 +103,9 @@ EXPORT_SYMBOL(ex_handler_refcount);
* out all the FPU registers) if we can't restore from the task's FPU state.
*/
__visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
{
regs->ip = ex_fixup_addr(fixup);
@@ -108,9 +117,79 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
+/* Helper to check whether a uaccess fault indicates a kernel bug. */
+static bool bogus_uaccess(struct pt_regs *regs, int trapnr,
+ unsigned long fault_addr)
+{
+ /* This is the normal case: #PF with a fault address in userspace. */
+ if (trapnr == X86_TRAP_PF && fault_addr < TASK_SIZE_MAX)
+ return false;
+
+ /*
+ * This code can be reached for machine checks, but only if the #MC
+ * handler has already decided that it looks like a candidate for fixup.
+ * This e.g. happens when attempting to access userspace memory which
+ * the CPU can't access because of uncorrectable bad memory.
+ */
+ if (trapnr == X86_TRAP_MC)
+ return false;
+
+ /*
+ * There are two remaining exception types we might encounter here:
+ * - #PF for faulting accesses to kernel addresses
+ * - #GP for faulting accesses to noncanonical addresses
+ * Complain about anything else.
+ */
+ if (trapnr != X86_TRAP_PF && trapnr != X86_TRAP_GP) {
+ WARN(1, "unexpected trap %d in uaccess\n", trapnr);
+ return false;
+ }
+
+ /*
+ * This is a faulting memory access in kernel space, on a kernel
+ * address, in a usercopy function. This can e.g. be caused by improper
+ * use of helpers like __put_user and by improper attempts to access
+ * userspace addresses in KERNEL_DS regions.
+ * The one (semi-)legitimate exception are probe_kernel_{read,write}(),
+ * which can be invoked from places like kgdb, /dev/mem (for reading)
+ * and privileged BPF code (for reading).
+ * The probe_kernel_*() functions set the kernel_uaccess_faults_ok flag
+ * to tell us that faulting on kernel addresses, and even noncanonical
+ * addresses, in a userspace accessor does not necessarily imply a
+ * kernel bug, root might just be doing weird stuff.
+ */
+ if (current->kernel_uaccess_faults_ok)
+ return false;
+
+ /* This is bad. Refuse the fixup so that we go into die(). */
+ if (trapnr == X86_TRAP_PF) {
+ pr_emerg("BUG: pagefault on kernel address 0x%lx in non-whitelisted uaccess\n",
+ fault_addr);
+ } else {
+ pr_emerg("BUG: GPF in non-whitelisted uaccess (non-canonical address?)\n");
+ }
+ return true;
+}
+
+__visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
+{
+ if (bogus_uaccess(regs, trapnr, fault_addr))
+ return false;
+ regs->ip = ex_fixup_addr(fixup);
+ return true;
+}
+EXPORT_SYMBOL(ex_handler_uaccess);
+
__visible bool ex_handler_ext(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
{
+ if (bogus_uaccess(regs, trapnr, fault_addr))
+ return false;
/* Special hack for uaccess_err */
current->thread.uaccess_err = 1;
regs->ip = ex_fixup_addr(fixup);
@@ -119,7 +198,9 @@ __visible bool ex_handler_ext(const struct exception_table_entry *fixup,
EXPORT_SYMBOL(ex_handler_ext);
__visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
{
if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pF)\n",
(unsigned int)regs->cx, regs->ip, (void *)regs->ip))
@@ -134,7 +215,9 @@ __visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup
EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
__visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
{
if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pF)\n",
(unsigned int)regs->cx, (unsigned int)regs->dx,
@@ -148,12 +231,14 @@ __visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup
EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
__visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+ struct pt_regs *regs, int trapnr,
+ unsigned long error_code,
+ unsigned long fault_addr)
{
if (static_cpu_has(X86_BUG_NULL_SEG))
asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
asm volatile ("mov %0, %%fs" : : "rm" (0));
- return ex_handler_default(fixup, regs, trapnr);
+ return ex_handler_default(fixup, regs, trapnr, error_code, fault_addr);
}
EXPORT_SYMBOL(ex_handler_clear_fs);
@@ -170,7 +255,8 @@ __visible bool ex_has_fault_handler(unsigned long ip)
return handler == ex_handler_fault;
}
-int fixup_exception(struct pt_regs *regs, int trapnr)
+int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
+ unsigned long fault_addr)
{
const struct exception_table_entry *e;
ex_handler_t handler;
@@ -194,7 +280,7 @@ int fixup_exception(struct pt_regs *regs, int trapnr)
return 0;
handler = ex_fixup_handler(e);
- return handler(e, regs, trapnr);
+ return handler(e, regs, trapnr, error_code, fault_addr);
}
extern unsigned int early_recursion_flag;
@@ -230,9 +316,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
* result in a hard-to-debug panic.
*
* Keep in mind that not all vectors actually get here. Early
- * fage faults, for example, are special.
+ * page faults, for example, are special.
*/
- if (fixup_exception(regs, trapnr))
+ if (fixup_exception(regs, trapnr, regs->orig_ax, 0))
return;
if (fixup_bug(regs, trapnr))
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 47bebfe6efa7..b24eb4eb9984 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -16,6 +16,7 @@
#include <linux/prefetch.h> /* prefetchw */
#include <linux/context_tracking.h> /* exception_enter(), ... */
#include <linux/uaccess.h> /* faulthandler_disabled() */
+#include <linux/efi.h> /* efi_recover_from_page_fault()*/
#include <linux/mm_types.h>
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
@@ -25,6 +26,7 @@
#include <asm/vsyscall.h> /* emulate_vsyscall */
#include <asm/vm86.h> /* struct vm86 */
#include <asm/mmu_context.h> /* vma_pkey() */
+#include <asm/efi.h> /* efi_recover_from_page_fault()*/
#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>
@@ -44,17 +46,19 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
{
- int ret = 0;
-
- /* kprobe_running() needs smp_processor_id() */
- if (kprobes_built_in() && !user_mode(regs)) {
- preempt_disable();
- if (kprobe_running() && kprobe_fault_handler(regs, 14))
- ret = 1;
- preempt_enable();
- }
-
- return ret;
+ if (!kprobes_built_in())
+ return 0;
+ if (user_mode(regs))
+ return 0;
+ /*
+ * To be potentially processing a kprobe fault and to be allowed to call
+ * kprobe_running(), we have to be non-preemptible.
+ */
+ if (preemptible())
+ return 0;
+ if (!kprobe_running())
+ return 0;
+ return kprobe_fault_handler(regs, X86_TRAP_PF);
}
/*
@@ -153,79 +157,6 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
return prefetch;
}
-/*
- * A protection key fault means that the PKRU value did not allow
- * access to some PTE. Userspace can figure out what PKRU was
- * from the XSAVE state, and this function fills out a field in
- * siginfo so userspace can discover which protection key was set
- * on the PTE.
- *
- * If we get here, we know that the hardware signaled a X86_PF_PK
- * fault and that there was a VMA once we got in the fault
- * handler. It does *not* guarantee that the VMA we find here
- * was the one that we faulted on.
- *
- * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
- * 2. T1 : set PKRU to deny access to pkey=4, touches page
- * 3. T1 : faults...
- * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
- * 5. T1 : enters fault handler, takes mmap_sem, etc...
- * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
- * faulted on a pte with its pkey=4.
- */
-static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info,
- u32 *pkey)
-{
- /* This is effectively an #ifdef */
- if (!boot_cpu_has(X86_FEATURE_OSPKE))
- return;
-
- /* Fault not from Protection Keys: nothing to do */
- if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV))
- return;
- /*
- * force_sig_info_fault() is called from a number of
- * contexts, some of which have a VMA and some of which
- * do not. The X86_PF_PK handing happens after we have a
- * valid VMA, so we should never reach this without a
- * valid VMA.
- */
- if (!pkey) {
- WARN_ONCE(1, "PKU fault with no VMA passed in");
- info->si_pkey = 0;
- return;
- }
- /*
- * si_pkey should be thought of as a strong hint, but not
- * absolutely guranteed to be 100% accurate because of
- * the race explained above.
- */
- info->si_pkey = *pkey;
-}
-
-static void
-force_sig_info_fault(int si_signo, int si_code, unsigned long address,
- struct task_struct *tsk, u32 *pkey, int fault)
-{
- unsigned lsb = 0;
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = si_signo;
- info.si_errno = 0;
- info.si_code = si_code;
- info.si_addr = (void __user *)address;
- if (fault & VM_FAULT_HWPOISON_LARGE)
- lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
- if (fault & VM_FAULT_HWPOISON)
- lsb = PAGE_SHIFT;
- info.si_addr_lsb = lsb;
-
- fill_sig_info_pkey(si_signo, si_code, &info, pkey);
-
- force_sig_info(si_signo, &info, tsk);
-}
-
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);
@@ -709,7 +640,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
int sig;
/* Are we prepared to handle this kernel fault? */
- if (fixup_exception(regs, X86_TRAP_PF)) {
+ if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
/*
* Any interrupt that takes a fault gets the fixup. This makes
* the below recursive fault logic only apply to a faults from
@@ -730,8 +661,8 @@ no_context(struct pt_regs *regs, unsigned long error_code,
tsk->thread.cr2 = address;
/* XXX: hwpoison faults will set the wrong code. */
- force_sig_info_fault(signal, si_code, address,
- tsk, NULL, 0);
+ force_sig_fault(signal, si_code, (void __user *)address,
+ tsk);
}
/*
@@ -789,6 +720,13 @@ no_context(struct pt_regs *regs, unsigned long error_code,
return;
/*
+ * Buggy firmware could access regions which might page fault, try to
+ * recover from such faults.
+ */
+ if (IS_ENABLED(CONFIG_EFI))
+ efi_recover_from_page_fault(address);
+
+ /*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice:
*/
@@ -840,9 +778,18 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
show_opcodes(regs, loglvl);
}
+/*
+ * The (legacy) vsyscall page is the long page in the kernel portion
+ * of the address space that has user-accessible permissions.
+ */
+static bool is_vsyscall_vaddr(unsigned long vaddr)
+{
+ return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
+}
+
static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, u32 *pkey, int si_code)
+ unsigned long address, u32 pkey, int si_code)
{
struct task_struct *tsk = current;
@@ -863,18 +810,6 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
if (is_errata100(regs, address))
return;
-#ifdef CONFIG_X86_64
- /*
- * Instruction fetch faults in the vsyscall page might need
- * emulation.
- */
- if (unlikely((error_code & X86_PF_INSTR) &&
- ((address & ~0xfff) == VSYSCALL_ADDR))) {
- if (emulate_vsyscall(regs, address))
- return;
- }
-#endif
-
/*
* To avoid leaking information about the kernel page table
* layout, pretend that user-mode accesses to kernel addresses
@@ -890,7 +825,10 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_PF;
- force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0);
+ if (si_code == SEGV_PKUERR)
+ force_sig_pkuerr((void __user *)address, pkey);
+
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk);
return;
}
@@ -903,35 +841,29 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, u32 *pkey)
+ unsigned long address)
{
- __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR);
+ __bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
}
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, struct vm_area_struct *vma, int si_code)
+ unsigned long address, u32 pkey, int si_code)
{
struct mm_struct *mm = current->mm;
- u32 pkey;
-
- if (vma)
- pkey = vma_pkey(vma);
-
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
up_read(&mm->mmap_sem);
- __bad_area_nosemaphore(regs, error_code, address,
- (vma) ? &pkey : NULL, si_code);
+ __bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
}
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
- __bad_area(regs, error_code, address, NULL, SEGV_MAPERR);
+ __bad_area(regs, error_code, address, 0, SEGV_MAPERR);
}
static inline bool bad_area_access_from_pkeys(unsigned long error_code,
@@ -960,18 +892,40 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
* But, doing it this way allows compiler optimizations
* if pkeys are compiled out.
*/
- if (bad_area_access_from_pkeys(error_code, vma))
- __bad_area(regs, error_code, address, vma, SEGV_PKUERR);
- else
- __bad_area(regs, error_code, address, vma, SEGV_ACCERR);
+ if (bad_area_access_from_pkeys(error_code, vma)) {
+ /*
+ * A protection key fault means that the PKRU value did not allow
+ * access to some PTE. Userspace can figure out what PKRU was
+ * from the XSAVE state. This function captures the pkey from
+ * the vma and passes it to userspace so userspace can discover
+ * which protection key was set on the PTE.
+ *
+ * If we get here, we know that the hardware signaled a X86_PF_PK
+ * fault and that there was a VMA once we got in the fault
+ * handler. It does *not* guarantee that the VMA we find here
+ * was the one that we faulted on.
+ *
+ * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
+ * 2. T1 : set PKRU to deny access to pkey=4, touches page
+ * 3. T1 : faults...
+ * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
+ * 5. T1 : enters fault handler, takes mmap_sem, etc...
+ * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
+ * faulted on a pte with its pkey=4.
+ */
+ u32 pkey = vma_pkey(vma);
+
+ __bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
+ } else {
+ __bad_area(regs, error_code, address, 0, SEGV_ACCERR);
+ }
}
static void
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
- u32 *pkey, unsigned int fault)
+ unsigned int fault)
{
struct task_struct *tsk = current;
- int code = BUS_ADRERR;
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & X86_PF_USER)) {
@@ -989,18 +943,25 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
#ifdef CONFIG_MEMORY_FAILURE
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
- printk(KERN_ERR
+ unsigned lsb = 0;
+
+ pr_err(
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
tsk->comm, tsk->pid, address);
- code = BUS_MCEERR_AR;
+ if (fault & VM_FAULT_HWPOISON_LARGE)
+ lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
+ if (fault & VM_FAULT_HWPOISON)
+ lsb = PAGE_SHIFT;
+ force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, tsk);
+ return;
}
#endif
- force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault);
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk);
}
static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, u32 *pkey, vm_fault_t fault)
+ unsigned long address, vm_fault_t fault)
{
if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
no_context(regs, error_code, address, 0, 0);
@@ -1024,27 +985,21 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
} else {
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
- do_sigbus(regs, error_code, address, pkey, fault);
+ do_sigbus(regs, error_code, address, fault);
else if (fault & VM_FAULT_SIGSEGV)
- bad_area_nosemaphore(regs, error_code, address, pkey);
+ bad_area_nosemaphore(regs, error_code, address);
else
BUG();
}
}
-static int spurious_fault_check(unsigned long error_code, pte_t *pte)
+static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
{
if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
return 0;
if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
return 0;
- /*
- * Note: We do not do lazy flushing on protection key
- * changes, so no spurious fault will ever set X86_PF_PK.
- */
- if ((error_code & X86_PF_PK))
- return 1;
return 1;
}
@@ -1071,7 +1026,7 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
* (Optional Invalidation).
*/
static noinline int
-spurious_fault(unsigned long error_code, unsigned long address)
+spurious_kernel_fault(unsigned long error_code, unsigned long address)
{
pgd_t *pgd;
p4d_t *p4d;
@@ -1102,27 +1057,27 @@ spurious_fault(unsigned long error_code, unsigned long address)
return 0;
if (p4d_large(*p4d))
- return spurious_fault_check(error_code, (pte_t *) p4d);
+ return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
pud = pud_offset(p4d, address);
if (!pud_present(*pud))
return 0;
if (pud_large(*pud))
- return spurious_fault_check(error_code, (pte_t *) pud);
+ return spurious_kernel_fault_check(error_code, (pte_t *) pud);
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return 0;
if (pmd_large(*pmd))
- return spurious_fault_check(error_code, (pte_t *) pmd);
+ return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
pte = pte_offset_kernel(pmd, address);
if (!pte_present(*pte))
return 0;
- ret = spurious_fault_check(error_code, pte);
+ ret = spurious_kernel_fault_check(error_code, pte);
if (!ret)
return 0;
@@ -1130,12 +1085,12 @@ spurious_fault(unsigned long error_code, unsigned long address)
* Make sure we have permissions in PMD.
* If not, then there's a bug in the page tables:
*/
- ret = spurious_fault_check(error_code, (pte_t *) pmd);
+ ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
return ret;
}
-NOKPROBE_SYMBOL(spurious_fault);
+NOKPROBE_SYMBOL(spurious_kernel_fault);
int show_unhandled_signals = 1;
@@ -1182,6 +1137,14 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
static int fault_in_kernel_space(unsigned long address)
{
+ /*
+ * On 64-bit systems, the vsyscall page is at an address above
+ * TASK_SIZE_MAX, but is not considered part of the kernel
+ * address space.
+ */
+ if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
+ return false;
+
return address >= TASK_SIZE_MAX;
}
@@ -1203,31 +1166,23 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
}
/*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
+ * Called for all faults where 'address' is part of the kernel address
+ * space. Might get called for faults that originate from *code* that
+ * ran in userspace or the kernel.
*/
-static noinline void
-__do_page_fault(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
+static void
+do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
+ unsigned long address)
{
- struct vm_area_struct *vma;
- struct task_struct *tsk;
- struct mm_struct *mm;
- vm_fault_t fault, major = 0;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
- u32 pkey;
-
- tsk = current;
- mm = tsk->mm;
-
- prefetchw(&mm->mmap_sem);
-
- if (unlikely(kmmio_fault(regs, address)))
- return;
+ /*
+ * Protection keys exceptions only happen on user pages. We
+ * have no user pages in the kernel portion of the address
+ * space, so do not expect them here.
+ */
+ WARN_ON_ONCE(hw_error_code & X86_PF_PK);
/*
- * We fault-in kernel-space virtual memory on-demand. The
+ * We can fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
@@ -1235,41 +1190,73 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
* only copy the information from the master page table,
* nothing more.
*
- * This verifies that the fault happens in kernel space
- * (error_code & 4) == 0, and that the fault was not a
- * protection error (error_code & 9) == 0.
+ * Before doing this on-demand faulting, ensure that the
+ * fault is not any of the following:
+ * 1. A fault on a PTE with a reserved bit set.
+ * 2. A fault caused by a user-mode access. (Do not demand-
+ * fault kernel memory due to user-mode accesses).
+ * 3. A fault caused by a page-level protection violation.
+ * (A demand fault would be on a non-present page which
+ * would have X86_PF_PROT==0).
*/
- if (unlikely(fault_in_kernel_space(address))) {
- if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
- if (vmalloc_fault(address) >= 0)
- return;
- }
-
- /* Can handle a stale RO->RW TLB: */
- if (spurious_fault(error_code, address))
+ if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
+ if (vmalloc_fault(address) >= 0)
return;
+ }
- /* kprobes don't want to hook the spurious faults: */
- if (kprobes_fault(regs))
- return;
- /*
- * Don't take the mm semaphore here. If we fixup a prefetch
- * fault we could otherwise deadlock:
- */
- bad_area_nosemaphore(regs, error_code, address, NULL);
+ /* Was the fault spurious, caused by lazy TLB invalidation? */
+ if (spurious_kernel_fault(hw_error_code, address))
+ return;
+ /* kprobes don't want to hook the spurious faults: */
+ if (kprobes_fault(regs))
return;
- }
+
+ /*
+ * Note, despite being a "bad area", there are quite a few
+ * acceptable reasons to get here, such as erratum fixups
+ * and handling kernel code that can fault, like get_user().
+ *
+ * Don't take the mm semaphore here. If we fixup a prefetch
+ * fault we could otherwise deadlock:
+ */
+ bad_area_nosemaphore(regs, hw_error_code, address);
+}
+NOKPROBE_SYMBOL(do_kern_addr_fault);
+
+/* Handle faults in the user portion of the address space */
+static inline
+void do_user_addr_fault(struct pt_regs *regs,
+ unsigned long hw_error_code,
+ unsigned long address)
+{
+ unsigned long sw_error_code;
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ vm_fault_t fault, major = 0;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+ tsk = current;
+ mm = tsk->mm;
/* kprobes don't want to hook the spurious faults: */
if (unlikely(kprobes_fault(regs)))
return;
- if (unlikely(error_code & X86_PF_RSVD))
- pgtable_bad(regs, error_code, address);
+ /*
+ * Reserved bits are never expected to be set on
+ * entries in the user portion of the page tables.
+ */
+ if (unlikely(hw_error_code & X86_PF_RSVD))
+ pgtable_bad(regs, hw_error_code, address);
- if (unlikely(smap_violation(error_code, regs))) {
- bad_area_nosemaphore(regs, error_code, address, NULL);
+ /*
+ * Check for invalid kernel (supervisor) access to user
+ * pages in the user address space.
+ */
+ if (unlikely(smap_violation(hw_error_code, regs))) {
+ bad_area_nosemaphore(regs, hw_error_code, address);
return;
}
@@ -1278,11 +1265,18 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
* in a region with pagefaults disabled then we must not take the fault
*/
if (unlikely(faulthandler_disabled() || !mm)) {
- bad_area_nosemaphore(regs, error_code, address, NULL);
+ bad_area_nosemaphore(regs, hw_error_code, address);
return;
}
/*
+ * hw_error_code is literally the "page fault error code" passed to
+ * the kernel directly from the hardware. But, we will shortly be
+ * modifying it in software, so give it a new name.
+ */
+ sw_error_code = hw_error_code;
+
+ /*
* It's safe to allow irq's after cr2 has been saved and the
* vmalloc fault has been handled.
*
@@ -1291,7 +1285,26 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
*/
if (user_mode(regs)) {
local_irq_enable();
- error_code |= X86_PF_USER;
+ /*
+ * Up to this point, X86_PF_USER set in hw_error_code
+ * indicated a user-mode access. But, after this,
+ * X86_PF_USER in sw_error_code will indicate either
+ * that, *or* an implicit kernel(supervisor)-mode access
+ * which originated from user mode.
+ */
+ if (!(hw_error_code & X86_PF_USER)) {
+ /*
+ * The CPU was in user mode, but the CPU says
+ * the fault was not a user-mode access.
+ * Must be an implicit kernel-mode access,
+ * which we do not expect to happen in the
+ * user address space.
+ */
+ pr_warn_once("kernel-mode error from user-mode: %lx\n",
+ hw_error_code);
+
+ sw_error_code |= X86_PF_USER;
+ }
flags |= FAULT_FLAG_USER;
} else {
if (regs->flags & X86_EFLAGS_IF)
@@ -1300,31 +1313,49 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (error_code & X86_PF_WRITE)
+ if (sw_error_code & X86_PF_WRITE)
flags |= FAULT_FLAG_WRITE;
- if (error_code & X86_PF_INSTR)
+ if (sw_error_code & X86_PF_INSTR)
flags |= FAULT_FLAG_INSTRUCTION;
+#ifdef CONFIG_X86_64
+ /*
+ * Instruction fetch faults in the vsyscall page might need
+ * emulation. The vsyscall page is at a high address
+ * (>PAGE_OFFSET), but is considered to be part of the user
+ * address space.
+ *
+ * The vsyscall page does not have a "real" VMA, so do this
+ * emulation before we go searching for VMAs.
+ */
+ if ((sw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) {
+ if (emulate_vsyscall(regs, address))
+ return;
+ }
+#endif
+
/*
- * When running in the kernel we expect faults to occur only to
- * addresses in user space. All other faults represent errors in
- * the kernel and should generate an OOPS. Unfortunately, in the
- * case of an erroneous fault occurring in a code path which already
- * holds mmap_sem we will deadlock attempting to validate the fault
- * against the address space. Luckily the kernel only validly
- * references user space from well defined areas of code, which are
- * listed in the exceptions table.
+ * Kernel-mode access to the user address space should only occur
+ * on well-defined single instructions listed in the exception
+ * tables. But, an erroneous kernel fault occurring outside one of
+ * those areas which also holds mmap_sem might deadlock attempting
+ * to validate the fault against the address space.
*
- * As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibility of a
- * deadlock. Attempt to lock the address space, if we cannot we then
- * validate the source. If this is invalid we can skip the address
- * space check, thus avoiding the deadlock:
+ * Only do the expensive exception table search when we might be at
+ * risk of a deadlock. This happens if we
+ * 1. Failed to acquire mmap_sem, and
+ * 2. The access did not originate in userspace. Note: either the
+ * hardware or earlier page fault code may set X86_PF_USER
+ * in sw_error_code.
*/
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
- if (!(error_code & X86_PF_USER) &&
+ if (!(sw_error_code & X86_PF_USER) &&
!search_exception_tables(regs->ip)) {
- bad_area_nosemaphore(regs, error_code, address, NULL);
+ /*
+ * Fault from code in kernel from
+ * which we do not expect faults.
+ */
+ bad_area_nosemaphore(regs, sw_error_code, address);
return;
}
retry:
@@ -1340,16 +1371,16 @@ retry:
vma = find_vma(mm, address);
if (unlikely(!vma)) {
- bad_area(regs, error_code, address);
+ bad_area(regs, sw_error_code, address);
return;
}
if (likely(vma->vm_start <= address))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
- bad_area(regs, error_code, address);
+ bad_area(regs, sw_error_code, address);
return;
}
- if (error_code & X86_PF_USER) {
+ if (sw_error_code & X86_PF_USER) {
/*
* Accessing the stack below %sp is always a bug.
* The large cushion allows instructions like enter
@@ -1357,12 +1388,12 @@ retry:
* 32 pointers and then decrements %sp by 65535.)
*/
if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
- bad_area(regs, error_code, address);
+ bad_area(regs, sw_error_code, address);
return;
}
}
if (unlikely(expand_stack(vma, address))) {
- bad_area(regs, error_code, address);
+ bad_area(regs, sw_error_code, address);
return;
}
@@ -1371,8 +1402,8 @@ retry:
* we can handle it..
*/
good_area:
- if (unlikely(access_error(error_code, vma))) {
- bad_area_access_error(regs, error_code, address, vma);
+ if (unlikely(access_error(sw_error_code, vma))) {
+ bad_area_access_error(regs, sw_error_code, address, vma);
return;
}
@@ -1388,10 +1419,7 @@ good_area:
* (potentially after handling any pending signal during the return to
* userland). The return to userland is identified whenever
* FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
- * Thus we have to be careful about not touching vma after handling the
- * fault, so we read the pkey beforehand.
*/
- pkey = vma_pkey(vma);
fault = handle_mm_fault(vma, address, flags);
major |= fault & VM_FAULT_MAJOR;
@@ -1414,13 +1442,13 @@ good_area:
return;
/* Not returning to user mode? Handle exceptions or die: */
- no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
+ no_context(regs, sw_error_code, address, SIGBUS, BUS_ADRERR);
return;
}
up_read(&mm->mmap_sem);
if (unlikely(fault & VM_FAULT_ERROR)) {
- mm_fault_error(regs, error_code, address, &pkey, fault);
+ mm_fault_error(regs, sw_error_code, address, fault);
return;
}
@@ -1438,6 +1466,28 @@ good_area:
check_v8086_mode(regs, address, tsk);
}
+NOKPROBE_SYMBOL(do_user_addr_fault);
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+static noinline void
+__do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
+ unsigned long address)
+{
+ prefetchw(&current->mm->mmap_sem);
+
+ if (unlikely(kmmio_fault(regs, address)))
+ return;
+
+ /* Was the fault on kernel-controlled part of the address space? */
+ if (unlikely(fault_in_kernel_space(address)))
+ do_kern_addr_fault(regs, hw_error_code, address);
+ else
+ do_user_addr_fault(regs, hw_error_code, address);
+}
NOKPROBE_SYMBOL(__do_page_fault);
static nokprobe_inline void
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 7a8fc26c1115..faca978ebf9d 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -815,10 +815,14 @@ void free_kernel_image_pages(void *begin, void *end)
set_memory_np_noalias(begin_ul, len_pages);
}
+void __weak mem_encrypt_free_decrypted_mem(void) { }
+
void __ref free_initmem(void)
{
e820__reallocate_tables();
+ mem_encrypt_free_decrypted_mem();
+
free_kernel_image_pages(&__init_begin, &__init_end);
}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 979e0a02cbe1..142c7d9f89cc 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -923,34 +923,19 @@ static void mark_nxdata_nx(void)
void mark_rodata_ro(void)
{
unsigned long start = PFN_ALIGN(_text);
- unsigned long size = PFN_ALIGN(_etext) - start;
+ unsigned long size = (unsigned long)__end_rodata - start;
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
- printk(KERN_INFO "Write protecting the kernel text: %luk\n",
+ pr_info("Write protecting kernel text and read-only data: %luk\n",
size >> 10);
kernel_set_to_readonly = 1;
#ifdef CONFIG_CPA_DEBUG
- printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
- start, start+size);
- set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
-
- printk(KERN_INFO "Testing CPA: write protecting again\n");
- set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
-#endif
-
- start += size;
- size = (unsigned long)__end_rodata - start;
- set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
- printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
- size >> 10);
-
-#ifdef CONFIG_CPA_DEBUG
- printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
+ pr_info("Testing CPA: Reverting %lx-%lx\n", start, start + size);
set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
- printk(KERN_INFO "Testing CPA: write protecting again\n");
+ pr_info("Testing CPA: write protecting again\n");
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
#endif
mark_nxdata_nx();
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index c63a545ec199..24e0920a9b25 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -131,7 +131,8 @@ static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
* caller shouldn't need to know that small detail.
*/
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
- unsigned long size, enum page_cache_mode pcm, void *caller)
+ unsigned long size, enum page_cache_mode pcm,
+ void *caller, bool encrypted)
{
unsigned long offset, vaddr;
resource_size_t last_addr;
@@ -199,7 +200,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
* resulting mapping.
*/
prot = PAGE_KERNEL_IO;
- if (sev_active() && mem_flags.desc_other)
+ if ((sev_active() && mem_flags.desc_other) || encrypted)
prot = pgprot_encrypted(prot);
switch (pcm) {
@@ -291,7 +292,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
return __ioremap_caller(phys_addr, size, pcm,
- __builtin_return_address(0));
+ __builtin_return_address(0), false);
}
EXPORT_SYMBOL(ioremap_nocache);
@@ -324,7 +325,7 @@ void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
return __ioremap_caller(phys_addr, size, pcm,
- __builtin_return_address(0));
+ __builtin_return_address(0), false);
}
EXPORT_SYMBOL_GPL(ioremap_uc);
@@ -341,7 +342,7 @@ EXPORT_SYMBOL_GPL(ioremap_uc);
void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
{
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
- __builtin_return_address(0));
+ __builtin_return_address(0), false);
}
EXPORT_SYMBOL(ioremap_wc);
@@ -358,14 +359,21 @@ EXPORT_SYMBOL(ioremap_wc);
void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
{
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
- __builtin_return_address(0));
+ __builtin_return_address(0), false);
}
EXPORT_SYMBOL(ioremap_wt);
+void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
+{
+ return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
+ __builtin_return_address(0), true);
+}
+EXPORT_SYMBOL(ioremap_encrypted);
+
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
{
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
- __builtin_return_address(0));
+ __builtin_return_address(0), false);
}
EXPORT_SYMBOL(ioremap_cache);
@@ -374,7 +382,7 @@ void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
{
return __ioremap_caller(phys_addr, size,
pgprot2cachemode(__pgprot(prot_val)),
- __builtin_return_address(0));
+ __builtin_return_address(0), false);
}
EXPORT_SYMBOL(ioremap_prot);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index b2de398d1fd3..006f373f54ab 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -348,6 +348,30 @@ bool sev_active(void)
EXPORT_SYMBOL(sev_active);
/* Architecture __weak replacement functions */
+void __init mem_encrypt_free_decrypted_mem(void)
+{
+ unsigned long vaddr, vaddr_end, npages;
+ int r;
+
+ vaddr = (unsigned long)__start_bss_decrypted_unused;
+ vaddr_end = (unsigned long)__end_bss_decrypted;
+ npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
+
+ /*
+ * The unused memory range was mapped decrypted, change the encryption
+ * attribute from decrypted to encrypted before freeing it.
+ */
+ if (mem_encrypt_active()) {
+ r = set_memory_encrypted(vaddr, npages);
+ if (r) {
+ pr_warn("failed to free unused decrypted pages\n");
+ return;
+ }
+ }
+
+ free_init_pages("unused decrypted", vaddr, vaddr_end);
+}
+
void __init mem_encrypt_init(void)
{
if (!sme_me_mask)
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 7ae36868aed2..a19ef1a416ff 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -27,6 +27,7 @@
* be extended when new paravirt and debugging variants are added.)
*/
#undef CONFIG_PARAVIRT
+#undef CONFIG_PARAVIRT_XXL
#undef CONFIG_PARAVIRT_SPINLOCKS
#include <linux/kernel.h>
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index e500949bae24..2385538e8065 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -118,14 +118,11 @@ bad_opcode:
* anything it wants in to the instructions. We can not
* trust anything about it. They might not be valid
* instructions or might encode invalid registers, etc...
- *
- * The caller is expected to kfree() the returned siginfo_t.
*/
-siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
+int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
{
const struct mpx_bndreg_state *bndregs;
const struct mpx_bndreg *bndreg;
- siginfo_t *info = NULL;
struct insn insn;
uint8_t bndregno;
int err;
@@ -153,11 +150,6 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
/* now go select the individual register in the set of 4 */
bndreg = &bndregs->bndreg[bndregno];
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto err_out;
- }
/*
* The registers are always 64-bit, but the upper 32
* bits are ignored in 32-bit mode. Also, note that the
@@ -168,27 +160,23 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
* complains when casting from integers to different-size
* pointers.
*/
- info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound;
- info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound;
- info->si_addr_lsb = 0;
- info->si_signo = SIGSEGV;
- info->si_errno = 0;
- info->si_code = SEGV_BNDERR;
- info->si_addr = insn_get_addr_ref(&insn, regs);
+ info->lower = (void __user *)(unsigned long)bndreg->lower_bound;
+ info->upper = (void __user *)(unsigned long)~bndreg->upper_bound;
+ info->addr = insn_get_addr_ref(&insn, regs);
+
/*
* We were not able to extract an address from the instruction,
* probably because there was something invalid in it.
*/
- if (info->si_addr == (void __user *)-1) {
+ if (info->addr == (void __user *)-1) {
err = -EINVAL;
goto err_out;
}
- trace_mpx_bounds_register_exception(info->si_addr, bndreg);
- return info;
+ trace_mpx_bounds_register_exception(info->addr, bndreg);
+ return 0;
err_out:
/* info might be NULL, but kfree() handles that */
- kfree(info);
- return ERR_PTR(err);
+ return err;
}
static __user void *mpx_get_bounds_dir(void)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 51a5a69ecac9..62bb30b4bd2a 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -37,11 +37,20 @@ struct cpa_data {
unsigned long numpages;
int flags;
unsigned long pfn;
- unsigned force_split : 1;
+ unsigned force_split : 1,
+ force_static_prot : 1;
int curpage;
struct page **pages;
};
+enum cpa_warn {
+ CPA_CONFLICT,
+ CPA_PROTECT,
+ CPA_DETECT,
+};
+
+static const int cpa_warn_level = CPA_PROTECT;
+
/*
* Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
* using cpa_lock. So that we don't allow any other cpu, with stale large tlb
@@ -94,6 +103,87 @@ void arch_report_meminfo(struct seq_file *m)
static inline void split_page_count(int level) { }
#endif
+#ifdef CONFIG_X86_CPA_STATISTICS
+
+static unsigned long cpa_1g_checked;
+static unsigned long cpa_1g_sameprot;
+static unsigned long cpa_1g_preserved;
+static unsigned long cpa_2m_checked;
+static unsigned long cpa_2m_sameprot;
+static unsigned long cpa_2m_preserved;
+static unsigned long cpa_4k_install;
+
+static inline void cpa_inc_1g_checked(void)
+{
+ cpa_1g_checked++;
+}
+
+static inline void cpa_inc_2m_checked(void)
+{
+ cpa_2m_checked++;
+}
+
+static inline void cpa_inc_4k_install(void)
+{
+ cpa_4k_install++;
+}
+
+static inline void cpa_inc_lp_sameprot(int level)
+{
+ if (level == PG_LEVEL_1G)
+ cpa_1g_sameprot++;
+ else
+ cpa_2m_sameprot++;
+}
+
+static inline void cpa_inc_lp_preserved(int level)
+{
+ if (level == PG_LEVEL_1G)
+ cpa_1g_preserved++;
+ else
+ cpa_2m_preserved++;
+}
+
+static int cpastats_show(struct seq_file *m, void *p)
+{
+ seq_printf(m, "1G pages checked: %16lu\n", cpa_1g_checked);
+ seq_printf(m, "1G pages sameprot: %16lu\n", cpa_1g_sameprot);
+ seq_printf(m, "1G pages preserved: %16lu\n", cpa_1g_preserved);
+ seq_printf(m, "2M pages checked: %16lu\n", cpa_2m_checked);
+ seq_printf(m, "2M pages sameprot: %16lu\n", cpa_2m_sameprot);
+ seq_printf(m, "2M pages preserved: %16lu\n", cpa_2m_preserved);
+ seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install);
+ return 0;
+}
+
+static int cpastats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cpastats_show, NULL);
+}
+
+static const struct file_operations cpastats_fops = {
+ .open = cpastats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init cpa_stats_init(void)
+{
+ debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL,
+ &cpastats_fops);
+ return 0;
+}
+late_initcall(cpa_stats_init);
+#else
+static inline void cpa_inc_1g_checked(void) { }
+static inline void cpa_inc_2m_checked(void) { }
+static inline void cpa_inc_4k_install(void) { }
+static inline void cpa_inc_lp_sameprot(int level) { }
+static inline void cpa_inc_lp_preserved(int level) { }
+#endif
+
+
static inline int
within(unsigned long addr, unsigned long start, unsigned long end)
{
@@ -195,14 +285,20 @@ static void cpa_flush_all(unsigned long cache)
on_each_cpu(__cpa_flush_all, (void *) cache, 1);
}
-static void __cpa_flush_range(void *arg)
+static bool __cpa_flush_range(unsigned long start, int numpages, int cache)
{
- /*
- * We could optimize that further and do individual per page
- * tlb invalidates for a low number of pages. Caveat: we must
- * flush the high aliases on 64bit as well.
- */
- __flush_tlb_all();
+ BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
+
+ WARN_ON(PAGE_ALIGN(start) != start);
+
+ if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ cpa_flush_all(cache);
+ return true;
+ }
+
+ flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
+
+ return !cache;
}
static void cpa_flush_range(unsigned long start, int numpages, int cache)
@@ -210,12 +306,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
unsigned int i, level;
unsigned long addr;
- BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
- WARN_ON(PAGE_ALIGN(start) != start);
-
- on_each_cpu(__cpa_flush_range, NULL, 1);
-
- if (!cache)
+ if (__cpa_flush_range(start, numpages, cache))
return;
/*
@@ -235,30 +326,13 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
}
}
-static void cpa_flush_array(unsigned long *start, int numpages, int cache,
+static void cpa_flush_array(unsigned long baddr, unsigned long *start,
+ int numpages, int cache,
int in_flags, struct page **pages)
{
unsigned int i, level;
-#ifdef CONFIG_PREEMPT
- /*
- * Avoid wbinvd() because it causes latencies on all CPUs,
- * regardless of any CPU isolation that may be in effect.
- *
- * This should be extended for CAT enabled systems independent of
- * PREEMPT because wbinvd() does not respect the CAT partitions and
- * this is exposed to unpriviledged users through the graphics
- * subsystem.
- */
- unsigned long do_wbinvd = 0;
-#else
- unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
-#endif
-
- BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
- on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
-
- if (!cache || do_wbinvd)
+ if (__cpa_flush_range(baddr, numpages, cache))
return;
/*
@@ -286,84 +360,179 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
}
}
-/*
- * Certain areas of memory on x86 require very specific protection flags,
- * for example the BIOS area or kernel text. Callers don't always get this
- * right (again, ioremap() on BIOS memory is not uncommon) so this function
- * checks and fixes these known static required protection bits.
- */
-static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
- unsigned long pfn)
+static bool overlaps(unsigned long r1_start, unsigned long r1_end,
+ unsigned long r2_start, unsigned long r2_end)
{
- pgprot_t forbidden = __pgprot(0);
+ return (r1_start <= r2_end && r1_end >= r2_start) ||
+ (r2_start <= r1_end && r2_end >= r1_start);
+}
- /*
- * The BIOS area between 640k and 1Mb needs to be executable for
- * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
- */
#ifdef CONFIG_PCI_BIOS
- if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
- pgprot_val(forbidden) |= _PAGE_NX;
+/*
+ * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS
+ * based config access (CONFIG_PCI_GOBIOS) support.
+ */
+#define BIOS_PFN PFN_DOWN(BIOS_BEGIN)
+#define BIOS_PFN_END PFN_DOWN(BIOS_END - 1)
+
+static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
+{
+ if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END))
+ return _PAGE_NX;
+ return 0;
+}
+#else
+static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
+{
+ return 0;
+}
#endif
- /*
- * The kernel text needs to be executable for obvious reasons
- * Does not cover __inittext since that is gone later on. On
- * 64bit we do not enforce !NX on the low mapping
- */
- if (within(address, (unsigned long)_text, (unsigned long)_etext))
- pgprot_val(forbidden) |= _PAGE_NX;
+/*
+ * The .rodata section needs to be read-only. Using the pfn catches all
+ * aliases. This also includes __ro_after_init, so do not enforce until
+ * kernel_set_to_readonly is true.
+ */
+static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn)
+{
+ unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata));
/*
- * The .rodata section needs to be read-only. Using the pfn
- * catches all aliases. This also includes __ro_after_init,
- * so do not enforce until kernel_set_to_readonly is true.
+ * Note: __end_rodata is at page aligned and not inclusive, so
+ * subtract 1 to get the last enforced PFN in the rodata area.
*/
- if (kernel_set_to_readonly &&
- within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
- __pa_symbol(__end_rodata) >> PAGE_SHIFT))
- pgprot_val(forbidden) |= _PAGE_RW;
+ epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1;
+
+ if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro))
+ return _PAGE_RW;
+ return 0;
+}
+
+/*
+ * Protect kernel text against becoming non executable by forbidding
+ * _PAGE_NX. This protects only the high kernel mapping (_text -> _etext)
+ * out of which the kernel actually executes. Do not protect the low
+ * mapping.
+ *
+ * This does not cover __inittext since that is gone after boot.
+ */
+static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end)
+{
+ unsigned long t_end = (unsigned long)_etext - 1;
+ unsigned long t_start = (unsigned long)_text;
+
+ if (overlaps(start, end, t_start, t_end))
+ return _PAGE_NX;
+ return 0;
+}
#if defined(CONFIG_X86_64)
+/*
+ * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
+ * kernel text mappings for the large page aligned text, rodata sections
+ * will be always read-only. For the kernel identity mappings covering the
+ * holes caused by this alignment can be anything that user asks.
+ *
+ * This will preserve the large page mappings for kernel text/data at no
+ * extra cost.
+ */
+static pgprotval_t protect_kernel_text_ro(unsigned long start,
+ unsigned long end)
+{
+ unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1;
+ unsigned long t_start = (unsigned long)_text;
+ unsigned int level;
+
+ if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end))
+ return 0;
/*
- * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
- * kernel text mappings for the large page aligned text, rodata sections
- * will be always read-only. For the kernel identity mappings covering
- * the holes caused by this alignment can be anything that user asks.
+ * Don't enforce the !RW mapping for the kernel text mapping, if
+ * the current mapping is already using small page mapping. No
+ * need to work hard to preserve large page mappings in this case.
*
- * This will preserve the large page mappings for kernel text/data
- * at no extra cost.
+ * This also fixes the Linux Xen paravirt guest boot failure caused
+ * by unexpected read-only mappings for kernel identity
+ * mappings. In this paravirt guest case, the kernel text mapping
+ * and the kernel identity mapping share the same page-table pages,
+ * so the protections for kernel text and identity mappings have to
+ * be the same.
*/
- if (kernel_set_to_readonly &&
- within(address, (unsigned long)_text,
- (unsigned long)__end_rodata_hpage_align)) {
- unsigned int level;
-
- /*
- * Don't enforce the !RW mapping for the kernel text mapping,
- * if the current mapping is already using small page mapping.
- * No need to work hard to preserve large page mappings in this
- * case.
- *
- * This also fixes the Linux Xen paravirt guest boot failure
- * (because of unexpected read-only mappings for kernel identity
- * mappings). In this paravirt guest case, the kernel text
- * mapping and the kernel identity mapping share the same
- * page-table pages. Thus we can't really use different
- * protections for the kernel text and identity mappings. Also,
- * these shared mappings are made of small page mappings.
- * Thus this don't enforce !RW mapping for small page kernel
- * text mapping logic will help Linux Xen parvirt guest boot
- * as well.
- */
- if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
- pgprot_val(forbidden) |= _PAGE_RW;
- }
+ if (lookup_address(start, &level) && (level != PG_LEVEL_4K))
+ return _PAGE_RW;
+ return 0;
+}
+#else
+static pgprotval_t protect_kernel_text_ro(unsigned long start,
+ unsigned long end)
+{
+ return 0;
+}
#endif
- prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
+static inline bool conflicts(pgprot_t prot, pgprotval_t val)
+{
+ return (pgprot_val(prot) & ~val) != pgprot_val(prot);
+}
- return prot;
+static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
+ unsigned long start, unsigned long end,
+ unsigned long pfn, const char *txt)
+{
+ static const char *lvltxt[] = {
+ [CPA_CONFLICT] = "conflict",
+ [CPA_PROTECT] = "protect",
+ [CPA_DETECT] = "detect",
+ };
+
+ if (warnlvl > cpa_warn_level || !conflicts(prot, val))
+ return;
+
+ pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n",
+ lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot),
+ (unsigned long long)val);
+}
+
+/*
+ * Certain areas of memory on x86 require very specific protection flags,
+ * for example the BIOS area or kernel text. Callers don't always get this
+ * right (again, ioremap() on BIOS memory is not uncommon) so this function
+ * checks and fixes these known static required protection bits.
+ */
+static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
+ unsigned long pfn, unsigned long npg,
+ int warnlvl)
+{
+ pgprotval_t forbidden, res;
+ unsigned long end;
+
+ /*
+ * There is no point in checking RW/NX conflicts when the requested
+ * mapping is setting the page !PRESENT.
+ */
+ if (!(pgprot_val(prot) & _PAGE_PRESENT))
+ return prot;
+
+ /* Operate on the virtual address */
+ end = start + npg * PAGE_SIZE - 1;
+
+ res = protect_kernel_text(start, end);
+ check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
+ forbidden = res;
+
+ res = protect_kernel_text_ro(start, end);
+ check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
+ forbidden |= res;
+
+ /* Check the PFN directly */
+ res = protect_pci_bios(pfn, pfn + npg - 1);
+ check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX");
+ forbidden |= res;
+
+ res = protect_rodata(pfn, pfn + npg - 1);
+ check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO");
+ forbidden |= res;
+
+ return __pgprot(pgprot_val(prot) & ~forbidden);
}
/*
@@ -421,18 +590,18 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
*/
pte_t *lookup_address(unsigned long address, unsigned int *level)
{
- return lookup_address_in_pgd(pgd_offset_k(address), address, level);
+ return lookup_address_in_pgd(pgd_offset_k(address), address, level);
}
EXPORT_SYMBOL_GPL(lookup_address);
static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
unsigned int *level)
{
- if (cpa->pgd)
+ if (cpa->pgd)
return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
address, level);
- return lookup_address(address, level);
+ return lookup_address(address, level);
}
/*
@@ -549,40 +718,35 @@ static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot)
return prot;
}
-static int
-try_preserve_large_page(pte_t *kpte, unsigned long address,
- struct cpa_data *cpa)
+static int __should_split_large_page(pte_t *kpte, unsigned long address,
+ struct cpa_data *cpa)
{
- unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn, old_pfn;
+ unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn;
+ pgprot_t old_prot, new_prot, req_prot, chk_prot;
pte_t new_pte, old_pte, *tmp;
- pgprot_t old_prot, new_prot, req_prot;
- int i, do_split = 1;
enum pg_level level;
- if (cpa->force_split)
- return 1;
-
- spin_lock(&pgd_lock);
/*
* Check for races, another CPU might have split this page
* up already:
*/
tmp = _lookup_address_cpa(cpa, address, &level);
if (tmp != kpte)
- goto out_unlock;
+ return 1;
switch (level) {
case PG_LEVEL_2M:
old_prot = pmd_pgprot(*(pmd_t *)kpte);
old_pfn = pmd_pfn(*(pmd_t *)kpte);
+ cpa_inc_2m_checked();
break;
case PG_LEVEL_1G:
old_prot = pud_pgprot(*(pud_t *)kpte);
old_pfn = pud_pfn(*(pud_t *)kpte);
+ cpa_inc_1g_checked();
break;
default:
- do_split = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
psize = page_level_size(level);
@@ -592,8 +756,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* Calculate the number of pages, which fit into this large
* page starting at address:
*/
- nextpage_addr = (address + psize) & pmask;
- numpages = (nextpage_addr - address) >> PAGE_SHIFT;
+ lpaddr = (address + psize) & pmask;
+ numpages = (lpaddr - address) >> PAGE_SHIFT;
if (numpages < cpa->numpages)
cpa->numpages = numpages;
@@ -620,71 +784,142 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
pgprot_val(req_prot) |= _PAGE_PSE;
/*
- * old_pfn points to the large page base pfn. So we need
- * to add the offset of the virtual address:
+ * old_pfn points to the large page base pfn. So we need to add the
+ * offset of the virtual address:
*/
pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
cpa->pfn = pfn;
- new_prot = static_protections(req_prot, address, pfn);
+ /*
+ * Calculate the large page base address and the number of 4K pages
+ * in the large page
+ */
+ lpaddr = address & pmask;
+ numpages = psize >> PAGE_SHIFT;
/*
- * We need to check the full range, whether
- * static_protection() requires a different pgprot for one of
- * the pages in the range we try to preserve:
+ * Sanity check that the existing mapping is correct versus the static
+ * protections. static_protections() guards against !PRESENT, so no
+ * extra conditional required here.
*/
- addr = address & pmask;
- pfn = old_pfn;
- for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
- pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
+ chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
+ CPA_CONFLICT);
- if (pgprot_val(chk_prot) != pgprot_val(new_prot))
- goto out_unlock;
+ if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
+ /*
+ * Split the large page and tell the split code to
+ * enforce static protections.
+ */
+ cpa->force_static_prot = 1;
+ return 1;
}
/*
- * If there are no changes, return. maxpages has been updated
- * above:
+ * Optimization: If the requested pgprot is the same as the current
+ * pgprot, then the large page can be preserved and no updates are
+ * required independent of alignment and length of the requested
+ * range. The above already established that the current pgprot is
+ * correct, which in consequence makes the requested pgprot correct
+ * as well if it is the same. The static protection scan below will
+ * not come to a different conclusion.
*/
- if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
- do_split = 0;
- goto out_unlock;
+ if (pgprot_val(req_prot) == pgprot_val(old_prot)) {
+ cpa_inc_lp_sameprot(level);
+ return 0;
}
/*
- * We need to change the attributes. Check, whether we can
- * change the large page in one go. We request a split, when
- * the address is not aligned and the number of pages is
- * smaller than the number of pages in the large page. Note
- * that we limited the number of possible pages already to
- * the number of pages in the large page.
+ * If the requested range does not cover the full page, split it up
*/
- if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) {
- /*
- * The address is aligned and the number of pages
- * covers the full page.
- */
- new_pte = pfn_pte(old_pfn, new_prot);
- __set_pmd_pte(kpte, address, new_pte);
- cpa->flags |= CPA_FLUSHTLB;
- do_split = 0;
- }
+ if (address != lpaddr || cpa->numpages != numpages)
+ return 1;
-out_unlock:
+ /*
+ * Check whether the requested pgprot is conflicting with a static
+ * protection requirement in the large page.
+ */
+ new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
+ CPA_DETECT);
+
+ /*
+ * If there is a conflict, split the large page.
+ *
+ * There used to be a 4k wise evaluation trying really hard to
+ * preserve the large pages, but experimentation has shown, that this
+ * does not help at all. There might be corner cases which would
+ * preserve one large page occasionally, but it's really not worth the
+ * extra code and cycles for the common case.
+ */
+ if (pgprot_val(req_prot) != pgprot_val(new_prot))
+ return 1;
+
+ /* All checks passed. Update the large page mapping. */
+ new_pte = pfn_pte(old_pfn, new_prot);
+ __set_pmd_pte(kpte, address, new_pte);
+ cpa->flags |= CPA_FLUSHTLB;
+ cpa_inc_lp_preserved(level);
+ return 0;
+}
+
+static int should_split_large_page(pte_t *kpte, unsigned long address,
+ struct cpa_data *cpa)
+{
+ int do_split;
+
+ if (cpa->force_split)
+ return 1;
+
+ spin_lock(&pgd_lock);
+ do_split = __should_split_large_page(kpte, address, cpa);
spin_unlock(&pgd_lock);
return do_split;
}
+static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
+ pgprot_t ref_prot, unsigned long address,
+ unsigned long size)
+{
+ unsigned int npg = PFN_DOWN(size);
+ pgprot_t prot;
+
+ /*
+ * If should_split_large_page() discovered an inconsistent mapping,
+ * remove the invalid protection in the split mapping.
+ */
+ if (!cpa->force_static_prot)
+ goto set;
+
+ prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT);
+
+ if (pgprot_val(prot) == pgprot_val(ref_prot))
+ goto set;
+
+ /*
+ * If this is splitting a PMD, fix it up. PUD splits cannot be
+ * fixed trivially as that would require to rescan the newly
+ * installed PMD mappings after returning from split_large_page()
+ * so an eventual further split can allocate the necessary PTE
+ * pages. Warn for now and revisit it in case this actually
+ * happens.
+ */
+ if (size == PAGE_SIZE)
+ ref_prot = prot;
+ else
+ pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
+set:
+ set_pte(pte, pfn_pte(pfn, ref_prot));
+}
+
static int
__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
struct page *base)
{
+ unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
pte_t *pbase = (pte_t *)page_address(base);
- unsigned long ref_pfn, pfn, pfninc = 1;
unsigned int i, level;
- pte_t *tmp;
pgprot_t ref_prot;
+ pte_t *tmp;
spin_lock(&pgd_lock);
/*
@@ -707,15 +942,17 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
* PAT bit to correct position.
*/
ref_prot = pgprot_large_2_4k(ref_prot);
-
ref_pfn = pmd_pfn(*(pmd_t *)kpte);
+ lpaddr = address & PMD_MASK;
+ lpinc = PAGE_SIZE;
break;
case PG_LEVEL_1G:
ref_prot = pud_pgprot(*(pud_t *)kpte);
ref_pfn = pud_pfn(*(pud_t *)kpte);
pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
-
+ lpaddr = address & PUD_MASK;
+ lpinc = PMD_SIZE;
/*
* Clear the PSE flags if the PRESENT flag is not set
* otherwise pmd_present/pmd_huge will return true
@@ -736,8 +973,8 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
* Get the target pfn from the original entry:
*/
pfn = ref_pfn;
- for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
- set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
+ for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
+ split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
if (virt_addr_valid(address)) {
unsigned long pfn = PFN_DOWN(__pa(address));
@@ -756,14 +993,24 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
__set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
/*
- * Intel Atom errata AAH41 workaround.
+ * Do a global flush tlb after splitting the large page
+ * and before we do the actual change page attribute in the PTE.
+ *
+ * Without this, we violate the TLB application note, that says:
+ * "The TLBs may contain both ordinary and large-page
+ * translations for a 4-KByte range of linear addresses. This
+ * may occur if software modifies the paging structures so that
+ * the page size used for the address range changes. If the two
+ * translations differ with respect to page frame or attributes
+ * (e.g., permissions), processor behavior is undefined and may
+ * be implementation-specific."
*
- * The real fix should be in hw or in a microcode update, but
- * we also probabilistically try to reduce the window of having
- * a large TLB mixed with 4K TLBs while instruction fetches are
- * going on.
+ * We do this global tlb flush inside the cpa_lock, so that we
+ * don't allow any other cpu, with stale tlb entries change the
+ * page attribute in parallel, that also falls into the
+ * just split large page entry.
*/
- __flush_tlb_all();
+ flush_tlb_all();
spin_unlock(&pgd_lock);
return 0;
@@ -1247,7 +1494,9 @@ repeat:
pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
- new_prot = static_protections(new_prot, address, pfn);
+ cpa_inc_4k_install();
+ new_prot = static_protections(new_prot, address, pfn, 1,
+ CPA_PROTECT);
new_prot = pgprot_clear_protnone_bits(new_prot);
@@ -1273,7 +1522,7 @@ repeat:
* Check, whether we can keep the large page intact
* and just change the pte:
*/
- do_split = try_preserve_large_page(kpte, address, cpa);
+ do_split = should_split_large_page(kpte, address, cpa);
/*
* When the range fits into the existing large page,
* return. cp->numpages and cpa->tlbflush have been updated in
@@ -1286,28 +1535,8 @@ repeat:
* We have to split the large page:
*/
err = split_large_page(cpa, kpte, address);
- if (!err) {
- /*
- * Do a global flush tlb after splitting the large page
- * and before we do the actual change page attribute in the PTE.
- *
- * With out this, we violate the TLB application note, that says
- * "The TLBs may contain both ordinary and large-page
- * translations for a 4-KByte range of linear addresses. This
- * may occur if software modifies the paging structures so that
- * the page size used for the address range changes. If the two
- * translations differ with respect to page frame or attributes
- * (e.g., permissions), processor behavior is undefined and may
- * be implementation-specific."
- *
- * We do this global tlb flush inside the cpa_lock, so that we
- * don't allow any other cpu, with stale tlb entries change the
- * page attribute in parallel, that also falls into the
- * just split large page entry.
- */
- flush_tlb_all();
+ if (!err)
goto repeat;
- }
return err;
}
@@ -1529,19 +1758,19 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
cache = !!pgprot2cachemode(mask_set);
/*
- * On success we use CLFLUSH, when the CPU supports it to
- * avoid the WBINVD. If the CPU does not support it and in the
- * error case we fall back to cpa_flush_all (which uses
- * WBINVD):
+ * On error; flush everything to be sure.
*/
- if (!ret && boot_cpu_has(X86_FEATURE_CLFLUSH)) {
- if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
- cpa_flush_array(addr, numpages, cache,
- cpa.flags, pages);
- } else
- cpa_flush_range(baddr, numpages, cache);
- } else
+ if (ret) {
cpa_flush_all(cache);
+ goto out;
+ }
+
+ if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
+ cpa_flush_array(baddr, addr, numpages, cache,
+ cpa.flags, pages);
+ } else {
+ cpa_flush_range(baddr, numpages, cache);
+ }
out:
return ret;
@@ -1856,10 +2085,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
/*
* Before changing the encryption attribute, we need to flush caches.
*/
- if (static_cpu_has(X86_FEATURE_CLFLUSH))
- cpa_flush_range(start, numpages, 1);
- else
- cpa_flush_all(1);
+ cpa_flush_range(start, numpages, 1);
ret = __change_page_attr_set_clr(&cpa, 1);
@@ -1870,10 +2096,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
* in case TLB flushing gets optimized in the cpa_flush_range()
* path use the same logic as above.
*/
- if (static_cpu_has(X86_FEATURE_CLFLUSH))
- cpa_flush_range(start, numpages, 0);
- else
- cpa_flush_all(0);
+ cpa_flush_range(start, numpages, 0);
return ret;
}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index ae394552fb94..59274e2c1ac4 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -115,6 +115,8 @@ static inline void pgd_list_del(pgd_t *pgd)
#define UNSHARED_PTRS_PER_PGD \
(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+#define MAX_UNSHARED_PTRS_PER_PGD \
+ max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
@@ -181,6 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
* and initialize the kernel pmds here.
*/
#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
+#define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
/*
* We allocate separate PMDs for the kernel part of the user page-table
@@ -189,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
*/
#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
KERNEL_PGD_PTRS : 0)
+#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
@@ -210,7 +214,9 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
/* No need to prepopulate any pagetable entries in non-PAE modes. */
#define PREALLOCATED_PMDS 0
+#define MAX_PREALLOCATED_PMDS 0
#define PREALLOCATED_USER_PMDS 0
+#define MAX_PREALLOCATED_USER_PMDS 0
#endif /* CONFIG_X86_PAE */
static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
@@ -428,8 +434,8 @@ static inline void _pgd_free(pgd_t *pgd)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
- pmd_t *u_pmds[PREALLOCATED_USER_PMDS];
- pmd_t *pmds[PREALLOCATED_PMDS];
+ pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
+ pmd_t *pmds[MAX_PREALLOCATED_PMDS];
pgd = _pgd_alloc();
@@ -637,6 +643,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
{
unsigned long address = __fix_to_virt(idx);
+#ifdef CONFIG_X86_64
+ /*
+ * Ensure that the static initial page tables are covering the
+ * fixmap completely.
+ */
+ BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
+ (FIXMAP_PMD_NUM * PTRS_PER_PTE));
+#endif
+
if (idx >= __end_of_fixed_addresses) {
BUG();
return;
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index c1fc1ae6b429..4fee5c3003ed 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -434,11 +434,42 @@ static void __init pti_clone_p4d(unsigned long addr)
}
/*
- * Clone the CPU_ENTRY_AREA into the user space visible page table.
+ * Clone the CPU_ENTRY_AREA and associated data into the user space visible
+ * page table.
*/
static void __init pti_clone_user_shared(void)
{
+ unsigned int cpu;
+
pti_clone_p4d(CPU_ENTRY_AREA_BASE);
+
+ for_each_possible_cpu(cpu) {
+ /*
+ * The SYSCALL64 entry code needs to be able to find the
+ * thread stack and needs one word of scratch space in which
+ * to spill a register. All of this lives in the TSS, in
+ * the sp1 and sp2 slots.
+ *
+ * This is done for all possible CPUs during boot to ensure
+ * that it's propagated to all mms. If we were to add one of
+ * these mappings during CPU hotplug, we would need to take
+ * some measure to make sure that every mm that subsequently
+ * ran on that CPU would have the relevant PGD entry in its
+ * pagetables. The usual vmalloc_fault() mechanism would not
+ * work for page faults taken in entry_SYSCALL_64 before RSP
+ * is set up.
+ */
+
+ unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
+ phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
+ pte_t *target_pte;
+
+ target_pte = pti_user_pagetable_walk_pte(va);
+ if (WARN_ON(!target_pte))
+ return;
+
+ *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
+ }
}
#else /* CONFIG_X86_64 */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index e96b99eb800c..bddd6b3cee1d 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -7,6 +7,7 @@
#include <linux/export.h>
#include <linux/cpu.h>
#include <linux/debugfs.h>
+#include <linux/ptrace.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
@@ -180,13 +181,29 @@ static void sync_current_stack_to_mm(struct mm_struct *mm)
}
}
+static bool ibpb_needed(struct task_struct *tsk, u64 last_ctx_id)
+{
+ /*
+ * Check if the current (previous) task has access to the memory
+ * of the @tsk (next) task. If access is denied, make sure to
+ * issue a IBPB to stop user->user Spectre-v2 attacks.
+ *
+ * Note: __ptrace_may_access() returns 0 or -ERRNO.
+ */
+ return (tsk && tsk->mm && tsk->mm->context.ctx_id != last_ctx_id &&
+ ptrace_may_access_sched(tsk, PTRACE_MODE_SPEC_IBPB));
+}
+
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+ bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
unsigned cpu = smp_processor_id();
u64 next_tlb_gen;
+ bool need_flush;
+ u16 new_asid;
/*
* NB: The scheduler will call us with prev == next when switching
@@ -240,20 +257,41 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
next->context.ctx_id);
/*
- * We don't currently support having a real mm loaded without
- * our cpu set in mm_cpumask(). We have all the bookkeeping
- * in place to figure out whether we would need to flush
- * if our cpu were cleared in mm_cpumask(), but we don't
- * currently use it.
+ * Even in lazy TLB mode, the CPU should stay set in the
+ * mm_cpumask. The TLB shootdown code can figure out from
+ * from cpu_tlbstate.is_lazy whether or not to send an IPI.
*/
if (WARN_ON_ONCE(real_prev != &init_mm &&
!cpumask_test_cpu(cpu, mm_cpumask(next))))
cpumask_set_cpu(cpu, mm_cpumask(next));
- return;
+ /*
+ * If the CPU is not in lazy TLB mode, we are just switching
+ * from one thread in a process to another thread in the same
+ * process. No TLB flush required.
+ */
+ if (!was_lazy)
+ return;
+
+ /*
+ * Read the tlb_gen to check whether a flush is needed.
+ * If the TLB is up to date, just use it.
+ * The barrier synchronizes with the tlb_gen increment in
+ * the TLB shootdown code.
+ */
+ smp_mb();
+ next_tlb_gen = atomic64_read(&next->context.tlb_gen);
+ if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
+ next_tlb_gen)
+ return;
+
+ /*
+ * TLB contents went out of date while we were in lazy
+ * mode. Fall through to the TLB switching code below.
+ */
+ new_asid = prev_asid;
+ need_flush = true;
} else {
- u16 new_asid;
- bool need_flush;
u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
/*
@@ -262,18 +300,13 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* one process from doing Spectre-v2 attacks on another.
*
* As an optimization, flush indirect branches only when
- * switching into processes that disable dumping. This
- * protects high value processes like gpg, without having
- * too high performance overhead. IBPB is *expensive*!
- *
- * This will not flush branches when switching into kernel
- * threads. It will also not flush if we switch to idle
- * thread and back to the same process. It will flush if we
- * switch to a different non-dumpable process.
+ * switching into a processes that can't be ptrace by the
+ * current one (as in such case, attacker has much more
+ * convenient way how to tamper with the next process than
+ * branch buffer poisoning).
*/
- if (tsk && tsk->mm &&
- tsk->mm->context.ctx_id != last_ctx_id &&
- get_dumpable(tsk->mm) != SUID_DUMP_USER)
+ if (static_cpu_has(X86_FEATURE_USE_IBPB) &&
+ ibpb_needed(tsk, last_ctx_id))
indirect_branch_prediction_barrier();
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
@@ -308,46 +341,48 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
/* Let nmi_uaccess_okay() know that we're changing CR3. */
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
barrier();
+ }
- if (need_flush) {
- this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
- this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
- load_new_mm_cr3(next->pgd, new_asid, true);
-
- /*
- * NB: This gets called via leave_mm() in the idle path
- * where RCU functions differently. Tracing normally
- * uses RCU, so we need to use the _rcuidle variant.
- *
- * (There is no good reason for this. The idle code should
- * be rearranged to call this before rcu_idle_enter().)
- */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
- } else {
- /* The new ASID is already up to date. */
- load_new_mm_cr3(next->pgd, new_asid, false);
-
- /* See above wrt _rcuidle. */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
- }
+ if (need_flush) {
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
+ load_new_mm_cr3(next->pgd, new_asid, true);
/*
- * Record last user mm's context id, so we can avoid
- * flushing branch buffer with IBPB if we switch back
- * to the same user.
+ * NB: This gets called via leave_mm() in the idle path
+ * where RCU functions differently. Tracing normally
+ * uses RCU, so we need to use the _rcuidle variant.
+ *
+ * (There is no good reason for this. The idle code should
+ * be rearranged to call this before rcu_idle_enter().)
*/
- if (next != &init_mm)
- this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
-
- /* Make sure we write CR3 before loaded_mm. */
- barrier();
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ } else {
+ /* The new ASID is already up to date. */
+ load_new_mm_cr3(next->pgd, new_asid, false);
- this_cpu_write(cpu_tlbstate.loaded_mm, next);
- this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+ /* See above wrt _rcuidle. */
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
- load_mm_cr4(next);
- switch_ldt(real_prev, next);
+ /*
+ * Record last user mm's context id, so we can avoid
+ * flushing branch buffer with IBPB if we switch back
+ * to the same user.
+ */
+ if (next != &init_mm)
+ this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+
+ /* Make sure we write CR3 before loaded_mm. */
+ barrier();
+
+ this_cpu_write(cpu_tlbstate.loaded_mm, next);
+ this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+
+ if (next != real_prev) {
+ load_mm_cr4(next);
+ switch_ldt(real_prev, next);
+ }
}
/*
@@ -368,20 +403,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
return;
- if (tlb_defer_switch_to_init_mm()) {
- /*
- * There's a significant optimization that may be possible
- * here. We have accurate enough TLB flush tracking that we
- * don't need to maintain coherence of TLB per se when we're
- * lazy. We do, however, need to maintain coherence of
- * paging-structure caches. We could, in principle, leave our
- * old mm loaded and only switch to init_mm when
- * tlb_remove_page() happens.
- */
- this_cpu_write(cpu_tlbstate.is_lazy, true);
- } else {
- switch_mm(NULL, &init_mm, NULL);
- }
+ this_cpu_write(cpu_tlbstate.is_lazy, true);
}
/*
@@ -468,6 +490,9 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
* paging-structure cache to avoid speculatively reading
* garbage into our TLB. Since switching to init_mm is barely
* slower than a minimal flush, just switch to init_mm.
+ *
+ * This should be rare, with native_flush_tlb_others skipping
+ * IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
return;
@@ -528,17 +553,16 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
f->new_tlb_gen == local_tlb_gen + 1 &&
f->new_tlb_gen == mm_tlb_gen) {
/* Partial flush */
- unsigned long addr;
- unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
+ unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
+ unsigned long addr = f->start;
- addr = f->start;
while (addr < f->end) {
__flush_tlb_one_user(addr);
- addr += PAGE_SIZE;
+ addr += 1UL << f->stride_shift;
}
if (local)
- count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
- trace_tlb_flush(reason, nr_pages);
+ count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
+ trace_tlb_flush(reason, nr_invalidate);
} else {
/* Full flush. */
local_flush_tlb();
@@ -571,6 +595,11 @@ static void flush_tlb_func_remote(void *info)
flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
}
+static bool tlb_is_not_lazy(int cpu, void *data)
+{
+ return !per_cpu(cpu_tlbstate.is_lazy, cpu);
+}
+
void native_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
@@ -606,8 +635,23 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
(void *)info, 1);
return;
}
- smp_call_function_many(cpumask, flush_tlb_func_remote,
+
+ /*
+ * If no page tables were freed, we can skip sending IPIs to
+ * CPUs in lazy TLB mode. They will flush the CPU themselves
+ * at the next context switch.
+ *
+ * However, if page tables are getting freed, we need to send the
+ * IPI everywhere, to prevent CPUs in lazy TLB mode from tripping
+ * up on the new contents of what used to be page tables, while
+ * doing a speculative memory access.
+ */
+ if (info->freed_tables)
+ smp_call_function_many(cpumask, flush_tlb_func_remote,
(void *)info, 1);
+ else
+ on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
+ (void *)info, 1, GFP_ATOMIC, cpumask);
}
/*
@@ -623,12 +667,15 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
- unsigned long end, unsigned long vmflag)
+ unsigned long end, unsigned int stride_shift,
+ bool freed_tables)
{
int cpu;
struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
.mm = mm,
+ .stride_shift = stride_shift,
+ .freed_tables = freed_tables,
};
cpu = get_cpu();
@@ -638,8 +685,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
/* Should we flush just the requested range? */
if ((end != TLB_FLUSH_ALL) &&
- !(vmflag & VM_HUGETLB) &&
- ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
+ ((end - start) >> stride_shift) <= tlb_single_page_flush_ceiling) {
info.start = start;
info.end = end;
} else {
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 5559dcaddd5e..948656069cdd 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -356,7 +356,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
} else {
struct pci_root_info *info;
- info = kzalloc_node(sizeof(*info), GFP_KERNEL, node);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
dev_err(&root->device->dev,
"pci_bus %04x:%02x: ignored (out of memory)\n",
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 649bdde63e32..bfa50e65ef6c 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -93,7 +93,8 @@ static int __init early_root_info_init(void)
vendor = id & 0xffff;
device = (id>>16) & 0xffff;
- if (vendor != PCI_VENDOR_ID_AMD)
+ if (vendor != PCI_VENDOR_ID_AMD &&
+ vendor != PCI_VENDOR_ID_HYGON)
continue;
if (hb_probes[i].device == device) {
@@ -390,7 +391,8 @@ static int __init pci_io_ecs_init(void)
static int __init amd_postcore_init(void)
{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return 0;
early_root_info_init();
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 13f4485ca388..30a5111ae5fd 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -629,17 +629,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
static void quirk_no_aersid(struct pci_dev *pdev)
{
/* VMD Domain */
- if (is_vmd(pdev->bus))
+ if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus))
pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334a, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
diff --git a/arch/x86/platform/atom/punit_atom_debug.c b/arch/x86/platform/atom/punit_atom_debug.c
index 034813d4ab1e..6cb6076223ba 100644
--- a/arch/x86/platform/atom/punit_atom_debug.c
+++ b/arch/x86/platform/atom/punit_atom_debug.c
@@ -115,7 +115,7 @@ static struct dentry *punit_dbg_file;
static int punit_dbgfs_register(struct punit_device *punit_device)
{
- static struct dentry *dev_state;
+ struct dentry *dev_state;
punit_dbg_file = debugfs_create_dir("punit_atom", NULL);
if (!punit_dbg_file)
@@ -143,8 +143,8 @@ static void punit_dbgfs_unregister(void)
(kernel_ulong_t)&drv_data }
static const struct x86_cpu_id intel_punit_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, punit_device_byt),
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, punit_device_tng),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT, punit_device_byt),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, punit_device_tng),
ICPU(INTEL_FAM6_ATOM_AIRMONT, punit_device_cht),
{}
};
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
index 5fdacb322ceb..7476b3b097e1 100644
--- a/arch/x86/platform/efi/early_printk.c
+++ b/arch/x86/platform/efi/early_printk.c
@@ -26,12 +26,14 @@ static bool early_efi_keep;
*/
static __init int early_efi_map_fb(void)
{
- unsigned long base, size;
+ u64 base, size;
if (!early_efi_keep)
return 0;
base = boot_params.screen_info.lfb_base;
+ if (boot_params.screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ base |= (u64)boot_params.screen_info.ext_lfb_base << 32;
size = boot_params.screen_info.lfb_size;
efi_fb = ioremap(base, size);
@@ -46,9 +48,11 @@ early_initcall(early_efi_map_fb);
*/
static __ref void *early_efi_map(unsigned long start, unsigned long len)
{
- unsigned long base;
+ u64 base;
base = boot_params.screen_info.lfb_base;
+ if (boot_params.screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ base |= (u64)boot_params.screen_info.ext_lfb_base << 32;
if (efi_fb)
return (efi_fb + start);
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index ee5d08f25ce4..e8da7f492970 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -619,18 +619,16 @@ void __init efi_dump_pagetable(void)
/*
* Makes the calling thread switch to/from efi_mm context. Can be used
- * for SetVirtualAddressMap() i.e. current->active_mm == init_mm as well
- * as during efi runtime calls i.e current->active_mm == current_mm.
- * We are not mm_dropping()/mm_grabbing() any mm, because we are not
- * losing/creating any references.
+ * in a kernel thread and user context. Preemption needs to remain disabled
+ * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
+ * can not change under us.
+ * It should be ensured that there are no concurent calls to this function.
*/
void efi_switch_mm(struct mm_struct *mm)
{
- task_lock(current);
efi_scratch.prev_mm = current->active_mm;
current->active_mm = mm;
switch_mm(efi_scratch.prev_mm, mm, NULL);
- task_unlock(current);
}
#ifdef CONFIG_EFI_MIXED
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 844d31cb8a0c..669babcaf245 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -16,6 +16,7 @@
#include <asm/efi.h>
#include <asm/uv/uv.h>
#include <asm/cpu_device_id.h>
+#include <asm/reboot.h>
#define EFI_MIN_RESERVE 5120
@@ -654,3 +655,80 @@ int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
}
#endif
+
+/*
+ * If any access by any efi runtime service causes a page fault, then,
+ * 1. If it's efi_reset_system(), reboot through BIOS.
+ * 2. If any other efi runtime service, then
+ * a. Return error status to the efi caller process.
+ * b. Disable EFI Runtime Services forever and
+ * c. Freeze efi_rts_wq and schedule new process.
+ *
+ * @return: Returns, if the page fault is not handled. This function
+ * will never return if the page fault is handled successfully.
+ */
+void efi_recover_from_page_fault(unsigned long phys_addr)
+{
+ if (!IS_ENABLED(CONFIG_X86_64))
+ return;
+
+ /*
+ * Make sure that an efi runtime service caused the page fault.
+ * "efi_mm" cannot be used to check if the page fault had occurred
+ * in the firmware context because efi=old_map doesn't use efi_pgd.
+ */
+ if (efi_rts_work.efi_rts_id == NONE)
+ return;
+
+ /*
+ * Address range 0x0000 - 0x0fff is always mapped in the efi_pgd, so
+ * page faulting on these addresses isn't expected.
+ */
+ if (phys_addr >= 0x0000 && phys_addr <= 0x0fff)
+ return;
+
+ /*
+ * Print stack trace as it might be useful to know which EFI Runtime
+ * Service is buggy.
+ */
+ WARN(1, FW_BUG "Page fault caused by firmware at PA: 0x%lx\n",
+ phys_addr);
+
+ /*
+ * Buggy efi_reset_system() is handled differently from other EFI
+ * Runtime Services as it doesn't use efi_rts_wq. Although,
+ * native_machine_emergency_restart() says that machine_real_restart()
+ * could fail, it's better not to compilcate this fault handler
+ * because this case occurs *very* rarely and hence could be improved
+ * on a need by basis.
+ */
+ if (efi_rts_work.efi_rts_id == RESET_SYSTEM) {
+ pr_info("efi_reset_system() buggy! Reboot through BIOS\n");
+ machine_real_restart(MRR_BIOS);
+ return;
+ }
+
+ /*
+ * Before calling EFI Runtime Service, the kernel has switched the
+ * calling process to efi_mm. Hence, switch back to task_mm.
+ */
+ arch_efi_call_virt_teardown();
+
+ /* Signal error status to the efi caller process */
+ efi_rts_work.status = EFI_ABORTED;
+ complete(&efi_rts_work.efi_rts_comp);
+
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ pr_info("Froze efi_rts_wq and disabled EFI Runtime Services\n");
+
+ /*
+ * Call schedule() in an infinite loop, so that any spurious wake ups
+ * will never run efi_rts_wq again.
+ */
+ for (;;) {
+ set_current_state(TASK_IDLE);
+ schedule();
+ }
+
+ return;
+}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
index 4392c15ed9e0..dbfc5cf2aa93 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
@@ -10,7 +10,7 @@
* of the License.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/platform_device.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
@@ -43,7 +43,6 @@ static struct fixed_voltage_config bcm43xx_vmmc = {
* real voltage and signaling are still 1.8V.
*/
.microvolts = 2000000, /* 1.8V */
- .gpio = -EINVAL,
.startup_delay = 250 * 1000, /* 250ms */
.enable_high = 1, /* active high */
.enabled_at_boot = 0, /* disabled at boot */
@@ -58,11 +57,23 @@ static struct platform_device bcm43xx_vmmc_regulator = {
},
};
+static struct gpiod_lookup_table bcm43xx_vmmc_gpio_table = {
+ .dev_id = "reg-fixed-voltage.0",
+ .table = {
+ GPIO_LOOKUP("0000:00:0c.0", -1, NULL, GPIO_ACTIVE_LOW),
+ {}
+ },
+};
+
static int __init bcm43xx_regulator_register(void)
{
+ struct gpiod_lookup_table *table = &bcm43xx_vmmc_gpio_table;
+ struct gpiod_lookup *lookup = table->table;
int ret;
- bcm43xx_vmmc.gpio = get_gpio_by_name(WLAN_SFI_GPIO_ENABLE_NAME);
+ lookup[0].chip_hwnum = get_gpio_by_name(WLAN_SFI_GPIO_ENABLE_NAME);
+ gpiod_add_lookup_table(table);
+
ret = platform_device_register(&bcm43xx_vmmc_regulator);
if (ret) {
pr_err("%s: vmmc regulator register failed\n", __func__);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
index 5a0483e7bf66..31dce781364c 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
@@ -68,7 +68,7 @@ static struct bt_sfi_data tng_bt_sfi_data __initdata = {
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata }
static const struct x86_cpu_id bt_sfi_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, tng_bt_sfi_data),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, tng_bt_sfi_data),
{}
};
diff --git a/arch/x86/platform/olpc/olpc-xo1-rtc.c b/arch/x86/platform/olpc/olpc-xo1-rtc.c
index a2b4efddd61a..8e7ddd7e313a 100644
--- a/arch/x86/platform/olpc/olpc-xo1-rtc.c
+++ b/arch/x86/platform/olpc/olpc-xo1-rtc.c
@@ -16,6 +16,7 @@
#include <asm/msr.h>
#include <asm/olpc.h>
+#include <asm/x86_init.h>
static void rtc_wake_on(struct device *dev)
{
@@ -75,6 +76,8 @@ static int __init xo1_rtc_init(void)
if (r)
return r;
+ x86_platform.legacy.rtc = 0;
+
device_init_wakeup(&xo1_rtc_device.dev, 1);
return 0;
}
diff --git a/arch/x86/platform/ts5500/ts5500.c b/arch/x86/platform/ts5500/ts5500.c
index fd39301f25ac..7e56fc74093c 100644
--- a/arch/x86/platform/ts5500/ts5500.c
+++ b/arch/x86/platform/ts5500/ts5500.c
@@ -24,7 +24,6 @@
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/init.h>
-#include <linux/platform_data/gpio-ts5500.h>
#include <linux/platform_data/max197.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile
index a4701389562c..37923d715741 100644
--- a/arch/x86/power/Makefile
+++ b/arch/x86/power/Makefile
@@ -7,4 +7,4 @@ nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_cpu.o := $(nostackp)
obj-$(CONFIG_PM_SLEEP) += cpu.o
-obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o
+obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o hibernate.o
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
new file mode 100644
index 000000000000..bcddf09b5aa3
--- /dev/null
+++ b/arch/x86/power/hibernate.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hibernation support for x86
+ *
+ * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
+ * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
+ * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
+ */
+#include <linux/gfp.h>
+#include <linux/smp.h>
+#include <linux/suspend.h>
+#include <linux/scatterlist.h>
+#include <linux/kdebug.h>
+
+#include <crypto/hash.h>
+
+#include <asm/e820/api.h>
+#include <asm/init.h>
+#include <asm/proto.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mtrr.h>
+#include <asm/sections.h>
+#include <asm/suspend.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Address to jump to in the last phase of restore in order to get to the image
+ * kernel's text (this value is passed in the image header).
+ */
+unsigned long restore_jump_address __visible;
+unsigned long jump_address_phys;
+
+/*
+ * Value of the cr3 register from before the hibernation (this value is passed
+ * in the image header).
+ */
+unsigned long restore_cr3 __visible;
+unsigned long temp_pgt __visible;
+unsigned long relocated_restore_code __visible;
+
+/**
+ * pfn_is_nosave - check if given pfn is in the 'nosave' section
+ */
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn;
+ unsigned long nosave_end_pfn;
+
+ nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
+ nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
+
+ return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
+}
+
+
+#define MD5_DIGEST_SIZE 16
+
+struct restore_data_record {
+ unsigned long jump_address;
+ unsigned long jump_address_phys;
+ unsigned long cr3;
+ unsigned long magic;
+ u8 e820_digest[MD5_DIGEST_SIZE];
+};
+
+#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
+/**
+ * get_e820_md5 - calculate md5 according to given e820 table
+ *
+ * @table: the e820 table to be calculated
+ * @buf: the md5 result to be stored to
+ */
+static int get_e820_md5(struct e820_table *table, void *buf)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
+ int size;
+ int ret = 0;
+
+ tfm = crypto_alloc_shash("md5", 0, 0);
+ if (IS_ERR(tfm))
+ return -ENOMEM;
+
+ desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto free_tfm;
+ }
+
+ desc->tfm = tfm;
+ desc->flags = 0;
+
+ size = offsetof(struct e820_table, entries) +
+ sizeof(struct e820_entry) * table->nr_entries;
+
+ if (crypto_shash_digest(desc, (u8 *)table, size, buf))
+ ret = -EINVAL;
+
+ kzfree(desc);
+
+free_tfm:
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+static int hibernation_e820_save(void *buf)
+{
+ return get_e820_md5(e820_table_firmware, buf);
+}
+
+static bool hibernation_e820_mismatch(void *buf)
+{
+ int ret;
+ u8 result[MD5_DIGEST_SIZE];
+
+ memset(result, 0, MD5_DIGEST_SIZE);
+ /* If there is no digest in suspend kernel, let it go. */
+ if (!memcmp(result, buf, MD5_DIGEST_SIZE))
+ return false;
+
+ ret = get_e820_md5(e820_table_firmware, result);
+ if (ret)
+ return true;
+
+ return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
+}
+#else
+static int hibernation_e820_save(void *buf)
+{
+ return 0;
+}
+
+static bool hibernation_e820_mismatch(void *buf)
+{
+ /* If md5 is not builtin for restore kernel, let it go. */
+ return false;
+}
+#endif
+
+#ifdef CONFIG_X86_64
+#define RESTORE_MAGIC 0x23456789ABCDEF01UL
+#else
+#define RESTORE_MAGIC 0x12345678UL
+#endif
+
+/**
+ * arch_hibernation_header_save - populate the architecture specific part
+ * of a hibernation image header
+ * @addr: address to save the data at
+ */
+int arch_hibernation_header_save(void *addr, unsigned int max_size)
+{
+ struct restore_data_record *rdr = addr;
+
+ if (max_size < sizeof(struct restore_data_record))
+ return -EOVERFLOW;
+ rdr->magic = RESTORE_MAGIC;
+ rdr->jump_address = (unsigned long)restore_registers;
+ rdr->jump_address_phys = __pa_symbol(restore_registers);
+
+ /*
+ * The restore code fixes up CR3 and CR4 in the following sequence:
+ *
+ * [in hibernation asm]
+ * 1. CR3 <= temporary page tables
+ * 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
+ * 3. CR3 <= rdr->cr3
+ * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
+ * [in restore_processor_state()]
+ * 5. CR4 <= saved CR4
+ * 6. CR3 <= saved CR3
+ *
+ * Our mmu_cr4_features has CR4.PCIDE=0, and toggling
+ * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
+ * rdr->cr3 needs to point to valid page tables but must not
+ * have any of the PCID bits set.
+ */
+ rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
+
+ return hibernation_e820_save(rdr->e820_digest);
+}
+
+/**
+ * arch_hibernation_header_restore - read the architecture specific data
+ * from the hibernation image header
+ * @addr: address to read the data from
+ */
+int arch_hibernation_header_restore(void *addr)
+{
+ struct restore_data_record *rdr = addr;
+
+ if (rdr->magic != RESTORE_MAGIC) {
+ pr_crit("Unrecognized hibernate image header format!\n");
+ return -EINVAL;
+ }
+
+ restore_jump_address = rdr->jump_address;
+ jump_address_phys = rdr->jump_address_phys;
+ restore_cr3 = rdr->cr3;
+
+ if (hibernation_e820_mismatch(rdr->e820_digest)) {
+ pr_crit("Hibernate inconsistent memory map detected!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int relocate_restore_code(void)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ relocated_restore_code = get_safe_page(GFP_ATOMIC);
+ if (!relocated_restore_code)
+ return -ENOMEM;
+
+ memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
+
+ /* Make the page containing the relocated code executable */
+ pgd = (pgd_t *)__va(read_cr3_pa()) +
+ pgd_index(relocated_restore_code);
+ p4d = p4d_offset(pgd, relocated_restore_code);
+ if (p4d_large(*p4d)) {
+ set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
+ goto out;
+ }
+ pud = pud_offset(p4d, relocated_restore_code);
+ if (pud_large(*pud)) {
+ set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
+ goto out;
+ }
+ pmd = pmd_offset(pud, relocated_restore_code);
+ if (pmd_large(*pmd)) {
+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
+ goto out;
+ }
+ pte = pte_offset_kernel(pmd, relocated_restore_code);
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
+out:
+ __flush_tlb_all();
+ return 0;
+}
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index afc4ed7b1578..15695e30f982 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -14,9 +14,7 @@
#include <asm/pgtable.h>
#include <asm/mmzone.h>
#include <asm/sections.h>
-
-/* Defined in hibernate_asm_32.S */
-extern int restore_image(void);
+#include <asm/suspend.h>
/* Pointer to the temporary resume page tables */
pgd_t *resume_pg_dir;
@@ -145,6 +143,32 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
#endif
}
+static int set_up_temporary_text_mapping(pgd_t *pgd_base)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = pgd_base + pgd_index(restore_jump_address);
+
+ pmd = resume_one_md_table_init(pgd);
+ if (!pmd)
+ return -ENOMEM;
+
+ if (boot_cpu_has(X86_FEATURE_PSE)) {
+ set_pmd(pmd + pmd_index(restore_jump_address),
+ __pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
+ } else {
+ pte = resume_one_page_table_init(pmd);
+ if (!pte)
+ return -ENOMEM;
+ set_pte(pte + pte_index(restore_jump_address),
+ __pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
+ }
+
+ return 0;
+}
+
asmlinkage int swsusp_arch_resume(void)
{
int error;
@@ -154,22 +178,22 @@ asmlinkage int swsusp_arch_resume(void)
return -ENOMEM;
resume_init_first_level_page_table(resume_pg_dir);
+
+ error = set_up_temporary_text_mapping(resume_pg_dir);
+ if (error)
+ return error;
+
error = resume_physical_mapping_init(resume_pg_dir);
if (error)
return error;
+ temp_pgt = __pa(resume_pg_dir);
+
+ error = relocate_restore_code();
+ if (error)
+ return error;
+
/* We have got enough memory and from now on we cannot recover */
restore_image();
return 0;
}
-
-/*
- * pfn_is_nosave - check if given pfn is in the 'nosave' section
- */
-
-int pfn_is_nosave(unsigned long pfn)
-{
- unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
- unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
- return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
-}
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index f8e3b668d20b..239f424ccb29 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -26,26 +26,6 @@
#include <asm/suspend.h>
#include <asm/tlbflush.h>
-/* Defined in hibernate_asm_64.S */
-extern asmlinkage __visible int restore_image(void);
-
-/*
- * Address to jump to in the last phase of restore in order to get to the image
- * kernel's text (this value is passed in the image header).
- */
-unsigned long restore_jump_address __visible;
-unsigned long jump_address_phys;
-
-/*
- * Value of the cr3 register from before the hibernation (this value is passed
- * in the image header).
- */
-unsigned long restore_cr3 __visible;
-
-unsigned long temp_level4_pgt __visible;
-
-unsigned long relocated_restore_code __visible;
-
static int set_up_temporary_text_mapping(pgd_t *pgd)
{
pmd_t *pmd;
@@ -141,46 +121,7 @@ static int set_up_temporary_mappings(void)
return result;
}
- temp_level4_pgt = __pa(pgd);
- return 0;
-}
-
-static int relocate_restore_code(void)
-{
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- relocated_restore_code = get_safe_page(GFP_ATOMIC);
- if (!relocated_restore_code)
- return -ENOMEM;
-
- memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
-
- /* Make the page containing the relocated code executable */
- pgd = (pgd_t *)__va(read_cr3_pa()) +
- pgd_index(relocated_restore_code);
- p4d = p4d_offset(pgd, relocated_restore_code);
- if (p4d_large(*p4d)) {
- set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
- goto out;
- }
- pud = pud_offset(p4d, relocated_restore_code);
- if (pud_large(*pud)) {
- set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
- goto out;
- }
- pmd = pmd_offset(pud, relocated_restore_code);
- if (pmd_large(*pmd)) {
- set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
- goto out;
- }
- pte = pte_offset_kernel(pmd, relocated_restore_code);
- set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
-out:
- __flush_tlb_all();
+ temp_pgt = __pa(pgd);
return 0;
}
@@ -200,166 +141,3 @@ asmlinkage int swsusp_arch_resume(void)
restore_image();
return 0;
}
-
-/*
- * pfn_is_nosave - check if given pfn is in the 'nosave' section
- */
-
-int pfn_is_nosave(unsigned long pfn)
-{
- unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
- unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
- return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
-}
-
-#define MD5_DIGEST_SIZE 16
-
-struct restore_data_record {
- unsigned long jump_address;
- unsigned long jump_address_phys;
- unsigned long cr3;
- unsigned long magic;
- u8 e820_digest[MD5_DIGEST_SIZE];
-};
-
-#define RESTORE_MAGIC 0x23456789ABCDEF01UL
-
-#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
-/**
- * get_e820_md5 - calculate md5 according to given e820 table
- *
- * @table: the e820 table to be calculated
- * @buf: the md5 result to be stored to
- */
-static int get_e820_md5(struct e820_table *table, void *buf)
-{
- struct crypto_shash *tfm;
- struct shash_desc *desc;
- int size;
- int ret = 0;
-
- tfm = crypto_alloc_shash("md5", 0, 0);
- if (IS_ERR(tfm))
- return -ENOMEM;
-
- desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
- GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto free_tfm;
- }
-
- desc->tfm = tfm;
- desc->flags = 0;
-
- size = offsetof(struct e820_table, entries) +
- sizeof(struct e820_entry) * table->nr_entries;
-
- if (crypto_shash_digest(desc, (u8 *)table, size, buf))
- ret = -EINVAL;
-
- kzfree(desc);
-
-free_tfm:
- crypto_free_shash(tfm);
- return ret;
-}
-
-static void hibernation_e820_save(void *buf)
-{
- get_e820_md5(e820_table_firmware, buf);
-}
-
-static bool hibernation_e820_mismatch(void *buf)
-{
- int ret;
- u8 result[MD5_DIGEST_SIZE];
-
- memset(result, 0, MD5_DIGEST_SIZE);
- /* If there is no digest in suspend kernel, let it go. */
- if (!memcmp(result, buf, MD5_DIGEST_SIZE))
- return false;
-
- ret = get_e820_md5(e820_table_firmware, result);
- if (ret)
- return true;
-
- return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
-}
-#else
-static void hibernation_e820_save(void *buf)
-{
-}
-
-static bool hibernation_e820_mismatch(void *buf)
-{
- /* If md5 is not builtin for restore kernel, let it go. */
- return false;
-}
-#endif
-
-/**
- * arch_hibernation_header_save - populate the architecture specific part
- * of a hibernation image header
- * @addr: address to save the data at
- */
-int arch_hibernation_header_save(void *addr, unsigned int max_size)
-{
- struct restore_data_record *rdr = addr;
-
- if (max_size < sizeof(struct restore_data_record))
- return -EOVERFLOW;
- rdr->jump_address = (unsigned long)restore_registers;
- rdr->jump_address_phys = __pa_symbol(restore_registers);
-
- /*
- * The restore code fixes up CR3 and CR4 in the following sequence:
- *
- * [in hibernation asm]
- * 1. CR3 <= temporary page tables
- * 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
- * 3. CR3 <= rdr->cr3
- * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
- * [in restore_processor_state()]
- * 5. CR4 <= saved CR4
- * 6. CR3 <= saved CR3
- *
- * Our mmu_cr4_features has CR4.PCIDE=0, and toggling
- * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
- * rdr->cr3 needs to point to valid page tables but must not
- * have any of the PCID bits set.
- */
- rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
-
- rdr->magic = RESTORE_MAGIC;
-
- hibernation_e820_save(rdr->e820_digest);
-
- return 0;
-}
-
-/**
- * arch_hibernation_header_restore - read the architecture specific data
- * from the hibernation image header
- * @addr: address to read the data from
- */
-int arch_hibernation_header_restore(void *addr)
-{
- struct restore_data_record *rdr = addr;
-
- restore_jump_address = rdr->jump_address;
- jump_address_phys = rdr->jump_address_phys;
- restore_cr3 = rdr->cr3;
-
- if (rdr->magic != RESTORE_MAGIC) {
- pr_crit("Unrecognized hibernate image header format!\n");
- return -EINVAL;
- }
-
- if (hibernation_e820_mismatch(rdr->e820_digest)) {
- pr_crit("Hibernate inconsistent memory map detected!\n");
- return -ENODEV;
- }
-
- return 0;
-}
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index 6e56815e13a0..6fe383002125 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -12,6 +12,7 @@
#include <asm/page_types.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
+#include <asm/frame.h>
.text
@@ -24,13 +25,30 @@ ENTRY(swsusp_arch_suspend)
pushfl
popl saved_context_eflags
+ /* save cr3 */
+ movl %cr3, %eax
+ movl %eax, restore_cr3
+
+ FRAME_BEGIN
call swsusp_save
+ FRAME_END
ret
+ENDPROC(swsusp_arch_suspend)
ENTRY(restore_image)
+ /* prepare to jump to the image kernel */
+ movl restore_jump_address, %ebx
+ movl restore_cr3, %ebp
+
movl mmu_cr4_features, %ecx
- movl resume_pg_dir, %eax
- subl $__PAGE_OFFSET, %eax
+
+ /* jump to relocated restore code */
+ movl relocated_restore_code, %eax
+ jmpl *%eax
+
+/* code below has been relocated to a safe page */
+ENTRY(core_restore_code)
+ movl temp_pgt, %eax
movl %eax, %cr3
jecxz 1f # cr4 Pentium and higher, skip if zero
@@ -49,7 +67,7 @@ copy_loop:
movl pbe_address(%edx), %esi
movl pbe_orig_address(%edx), %edi
- movl $1024, %ecx
+ movl $(PAGE_SIZE >> 2), %ecx
rep
movsl
@@ -58,10 +76,13 @@ copy_loop:
.p2align 4,,7
done:
+ jmpl *%ebx
+
+ /* code below belongs to the image kernel */
+ .align PAGE_SIZE
+ENTRY(restore_registers)
/* go back to the original page tables */
- movl $swapper_pg_dir, %eax
- subl $__PAGE_OFFSET, %eax
- movl %eax, %cr3
+ movl %ebp, %cr3
movl mmu_cr4_features, %ecx
jecxz 1f # cr4 Pentium and higher, skip if zero
movl %ecx, %cr4; # turn PGE back on
@@ -82,4 +103,8 @@ done:
xorl %eax, %eax
+ /* tell the hibernation core that we've just restored the memory */
+ movl %eax, in_suspend
+
ret
+ENDPROC(restore_registers)
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index fd369a6e9ff8..3008baa2fa95 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -59,7 +59,7 @@ ENTRY(restore_image)
movq restore_cr3(%rip), %r9
/* prepare to switch to temporary page tables */
- movq temp_level4_pgt(%rip), %rax
+ movq temp_pgt(%rip), %rax
movq mmu_cr4_features(%rip), %rbx
/* prepare to copy image data to their original locations */
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 3a6c8ebc8032..0b08067c45f3 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -196,6 +196,7 @@ static const char *rel_type(unsigned type)
#if ELF_BITS == 64
REL_TYPE(R_X86_64_NONE),
REL_TYPE(R_X86_64_64),
+ REL_TYPE(R_X86_64_PC64),
REL_TYPE(R_X86_64_PC32),
REL_TYPE(R_X86_64_GOT32),
REL_TYPE(R_X86_64_PLT32),
@@ -782,6 +783,15 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
add_reloc(&relocs32neg, offset);
break;
+ case R_X86_64_PC64:
+ /*
+ * Only used by jump labels
+ */
+ if (is_percpu_sym(sym, symname))
+ die("Invalid R_X86_64_PC64 relocation against per-CPU symbol %s\n",
+ symname);
+ break;
+
case R_X86_64_32:
case R_X86_64_32S:
case R_X86_64_64:
diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h
index 548197212a45..413f3519d9a1 100644
--- a/arch/x86/um/asm/elf.h
+++ b/arch/x86/um/asm/elf.h
@@ -116,8 +116,7 @@ do { \
#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
#define R_X86_64_8 14 /* Direct 8 bit sign extended */
#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
-
-#define R_X86_64_NUM 16
+#define R_X86_64_PC64 24 /* Place relative 64-bit signed */
/*
* This is used to ensure we don't load something for the wrong architecture.
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index c1f98f32c45f..1ef391aa184d 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -18,6 +18,7 @@ config XEN_PV
bool "Xen PV guest support"
default y
depends on XEN
+ select PARAVIRT_XXL
select XEN_HAVE_PVMMU
select XEN_HAVE_VPMU
help
@@ -68,7 +69,6 @@ config XEN_SAVE_RESTORE
config XEN_DEBUG_FS
bool "Enable Xen debug and tuning parameters in debugfs"
depends on XEN && DEBUG_FS
- default n
help
Enable statistics output and various tuning options in debugfs.
Enabling this option may incur a significant performance overhead.
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index d83cb5478f54..dd2550d33b38 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -12,25 +12,46 @@ endif
# Make sure early boot has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_enlighten_pv.o := $(nostackp)
-CFLAGS_mmu_pv.o := $(nostackp)
+CFLAGS_mmu_pv.o := $(nostackp)
-obj-y := enlighten.o multicalls.o mmu.o irq.o \
- time.o xen-asm.o xen-asm_$(BITS).o \
- grant-table.o suspend.o platform-pci-unplug.o
+obj-y += enlighten.o
+obj-y += mmu.o
+obj-y += time.o
+obj-y += grant-table.o
+obj-y += suspend.o
-obj-$(CONFIG_XEN_PVHVM) += enlighten_hvm.o mmu_hvm.o suspend_hvm.o
-obj-$(CONFIG_XEN_PV) += setup.o apic.o pmu.o suspend_pv.o \
- p2m.o enlighten_pv.o mmu_pv.o
-obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o
+obj-$(CONFIG_XEN_PVHVM) += enlighten_hvm.o
+obj-$(CONFIG_XEN_PVHVM) += mmu_hvm.o
+obj-$(CONFIG_XEN_PVHVM) += suspend_hvm.o
+obj-$(CONFIG_XEN_PVHVM) += platform-pci-unplug.o
-obj-$(CONFIG_EVENT_TRACING) += trace.o
+obj-$(CONFIG_XEN_PV) += setup.o
+obj-$(CONFIG_XEN_PV) += apic.o
+obj-$(CONFIG_XEN_PV) += pmu.o
+obj-$(CONFIG_XEN_PV) += suspend_pv.o
+obj-$(CONFIG_XEN_PV) += p2m.o
+obj-$(CONFIG_XEN_PV) += enlighten_pv.o
+obj-$(CONFIG_XEN_PV) += mmu_pv.o
+obj-$(CONFIG_XEN_PV) += irq.o
+obj-$(CONFIG_XEN_PV) += multicalls.o
+obj-$(CONFIG_XEN_PV) += xen-asm.o
+obj-$(CONFIG_XEN_PV) += xen-asm_$(BITS).o
+
+obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o
+obj-$(CONFIG_XEN_PVH) += xen-pvh.o
+
+obj-$(CONFIG_EVENT_TRACING) += trace.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_XEN_PV_SMP) += smp_pv.o
obj-$(CONFIG_XEN_PVHVM_SMP) += smp_hvm.o
+
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
+
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
+
obj-$(CONFIG_XEN_DOM0) += vga.o
+
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
+
obj-$(CONFIG_XEN_EFI) += efi.o
-obj-$(CONFIG_XEN_PVH) += xen-pvh.o
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
index 1804b27f9632..1fbb629a9d78 100644
--- a/arch/x86/xen/efi.c
+++ b/arch/x86/xen/efi.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014 Oracle Co., Daniel Kiper
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bitops.h>
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2eeddd814653..67b2f31a1265 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
#include <linux/bootmem.h>
#endif
@@ -5,6 +7,7 @@
#include <linux/kexec.h>
#include <linux/slab.h>
+#include <xen/xen.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/interface/memory.h>
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index 19c1ff542387..0e75642d42a3 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/kexec.h>
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 52a7c3faee0c..ec7a4209f310 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -995,11 +995,14 @@ void __init xen_setup_vcpu_info_placement(void)
* percpu area for all cpus, so make use of it.
*/
if (xen_have_vcpu_info_placement) {
- pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
- pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
- pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
- pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
- pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
+ pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
+ pv_ops.irq.restore_fl =
+ __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
+ pv_ops.irq.irq_disable =
+ __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
+ pv_ops.irq.irq_enable =
+ __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
+ pv_ops.mmu.read_cr2 = xen_read_cr2_direct;
}
}
@@ -1174,14 +1177,14 @@ static void __init xen_boot_params_init_edd(void)
*/
static void __init xen_setup_gdt(int cpu)
{
- pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
- pv_cpu_ops.load_gdt = xen_load_gdt_boot;
+ pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot;
+ pv_ops.cpu.load_gdt = xen_load_gdt_boot;
setup_stack_canary_segment(cpu);
switch_to_new_gdt(cpu);
- pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
- pv_cpu_ops.load_gdt = xen_load_gdt;
+ pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry;
+ pv_ops.cpu.load_gdt = xen_load_gdt;
}
static void __init xen_dom0_set_legacy_features(void)
@@ -1206,8 +1209,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Install Xen paravirt ops */
pv_info = xen_info;
- pv_init_ops.patch = paravirt_patch_default;
- pv_cpu_ops = xen_cpu_ops;
+ pv_ops.init.patch = paravirt_patch_default;
+ pv_ops.cpu = xen_cpu_ops;
xen_init_irq_ops();
/*
@@ -1276,8 +1279,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
#endif
if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
- pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
- pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
+ pv_ops.mmu.ptep_modify_prot_start =
+ xen_ptep_modify_prot_start;
+ pv_ops.mmu.ptep_modify_prot_commit =
+ xen_ptep_modify_prot_commit;
}
machine_ops = xen_machine_ops;
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index c85d1a88f476..02e3ab7ff242 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -11,6 +11,7 @@
#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>
+#include <xen/xen.h>
#include <xen/interface/memory.h>
#include <xen/interface/hvm/start_info.h>
@@ -75,7 +76,7 @@ static void __init init_pvh_bootparams(void)
* Version 2.12 supports Xen entry point but we will use default x86/PC
* environment (i.e. hardware_subarch 0).
*/
- pvh_bootparams.hdr.version = 0x212;
+ pvh_bootparams.hdr.version = (2 << 8) | 12;
pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */
x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index 92ccc718152d..ecb0d5450334 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/******************************************************************************
* grant_table.c
* x86 specific part
@@ -8,30 +9,6 @@
* Copyright (c) 2004-2005, K A Fraser
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan. Split out x86 specific part.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
*/
#include <linux/sched.h>
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 7515a19fd324..850c93f346c7 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -128,6 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
void __init xen_init_irq_ops(void)
{
- pv_irq_ops = xen_irq_ops;
+ pv_ops.irq = xen_irq_ops;
x86_init.irqs.intr_init = xen_init_IRQ;
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 96fc2f0fdbfe..60e9c37fd79f 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
#include <linux/pfn.h>
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
@@ -6,12 +8,6 @@
#include "multicalls.h"
#include "mmu.h"
-/*
- * Protects atomic reservation decrease/increase against concurrent increases.
- * Also protects non-atomic updates of current_pages and balloon lists.
- */
-DEFINE_SPINLOCK(xen_reservation_lock);
-
unsigned long arbitrary_virt_to_mfn(void *vaddr)
{
xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
@@ -42,186 +38,6 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
}
EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
-static noinline void xen_flush_tlb_all(void)
-{
- struct mmuext_op *op;
- struct multicall_space mcs;
-
- preempt_disable();
-
- mcs = xen_mc_entry(sizeof(*op));
-
- op = mcs.args;
- op->cmd = MMUEXT_TLB_FLUSH_ALL;
- MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-
- preempt_enable();
-}
-
-#define REMAP_BATCH_SIZE 16
-
-struct remap_data {
- xen_pfn_t *pfn;
- bool contiguous;
- bool no_translate;
- pgprot_t prot;
- struct mmu_update *mmu_update;
-};
-
-static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
- unsigned long addr, void *data)
-{
- struct remap_data *rmd = data;
- pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
-
- /*
- * If we have a contiguous range, just update the pfn itself,
- * else update pointer to be "next pfn".
- */
- if (rmd->contiguous)
- (*rmd->pfn)++;
- else
- rmd->pfn++;
-
- rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
- rmd->mmu_update->ptr |= rmd->no_translate ?
- MMU_PT_UPDATE_NO_TRANSLATE :
- MMU_NORMAL_PT_UPDATE;
- rmd->mmu_update->val = pte_val_ma(pte);
- rmd->mmu_update++;
-
- return 0;
-}
-
-static int do_remap_pfn(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *pfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned int domid,
- bool no_translate,
- struct page **pages)
-{
- int err = 0;
- struct remap_data rmd;
- struct mmu_update mmu_update[REMAP_BATCH_SIZE];
- unsigned long range;
- int mapped = 0;
-
- BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
-
- rmd.pfn = pfn;
- rmd.prot = prot;
- /*
- * We use the err_ptr to indicate if there we are doing a contiguous
- * mapping or a discontigious mapping.
- */
- rmd.contiguous = !err_ptr;
- rmd.no_translate = no_translate;
-
- while (nr) {
- int index = 0;
- int done = 0;
- int batch = min(REMAP_BATCH_SIZE, nr);
- int batch_left = batch;
- range = (unsigned long)batch << PAGE_SHIFT;
-
- rmd.mmu_update = mmu_update;
- err = apply_to_page_range(vma->vm_mm, addr, range,
- remap_area_pfn_pte_fn, &rmd);
- if (err)
- goto out;
-
- /* We record the error for each page that gives an error, but
- * continue mapping until the whole set is done */
- do {
- int i;
-
- err = HYPERVISOR_mmu_update(&mmu_update[index],
- batch_left, &done, domid);
-
- /*
- * @err_ptr may be the same buffer as @gfn, so
- * only clear it after each chunk of @gfn is
- * used.
- */
- if (err_ptr) {
- for (i = index; i < index + done; i++)
- err_ptr[i] = 0;
- }
- if (err < 0) {
- if (!err_ptr)
- goto out;
- err_ptr[i] = err;
- done++; /* Skip failed frame. */
- } else
- mapped += done;
- batch_left -= done;
- index += done;
- } while (batch_left);
-
- nr -= batch;
- addr += range;
- if (err_ptr)
- err_ptr += batch;
- cond_resched();
- }
-out:
-
- xen_flush_tlb_all();
-
- return err < 0 ? err : mapped;
-}
-
-int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t gfn, int nr,
- pgprot_t prot, unsigned domid,
- struct page **pages)
-{
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return -EOPNOTSUPP;
-
- return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
- pages);
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
-
-int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *gfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned domid, struct page **pages)
-{
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
- prot, domid, pages);
-
- /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
- * and the consequences later is quite hard to detect what the actual
- * cause of "wrong memory was mapped in".
- */
- BUG_ON(err_ptr == NULL);
- return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
- false, pages);
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
-
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *mfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned int domid, struct page **pages)
-{
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return -EOPNOTSUPP;
-
- return do_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
- true, pages);
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
-
/* Returns: 0 success */
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int nr, struct page **pages)
diff --git a/arch/x86/xen/mmu_hvm.c b/arch/x86/xen/mmu_hvm.c
index dd2ad82eee80..57409373750f 100644
--- a/arch/x86/xen/mmu_hvm.c
+++ b/arch/x86/xen/mmu_hvm.c
@@ -73,7 +73,7 @@ static int is_pagetable_dying_supported(void)
void __init xen_hvm_init_mmu_ops(void)
{
if (is_pagetable_dying_supported())
- pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
+ pv_ops.mmu.exit_mmap = xen_hvm_exit_mmap;
#ifdef CONFIG_PROC_VMCORE
WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram));
#endif
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 2fe5c9b1816b..70ea598a37d2 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
/*
* Xen mmu operations
*
@@ -99,6 +101,12 @@ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
#endif /* CONFIG_X86_64 */
/*
+ * Protects atomic reservation decrease/increase against concurrent increases.
+ * Also protects non-atomic updates of current_pages and balloon lists.
+ */
+static DEFINE_SPINLOCK(xen_reservation_lock);
+
+/*
* Note about cr3 (pagetable base) values:
*
* xen_cr3 contains the current logical cr3 value; it contains the
@@ -1907,7 +1915,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
- /* L3_k[511][506] -> level1_fixmap_pgt */
+ /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
convert_pfn_mfn(level2_fixmap_pgt);
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
@@ -1952,7 +1960,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
- set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+
+ for (i = 0; i < FIXMAP_PMD_NUM; i++) {
+ set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
+ PAGE_KERNEL_RO);
+ }
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
@@ -2205,7 +2217,7 @@ static void __init xen_write_cr3_init(unsigned long cr3)
set_page_prot(initial_page_table, PAGE_KERNEL);
set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
- pv_mmu_ops.write_cr3 = &xen_write_cr3;
+ pv_ops.mmu.write_cr3 = &xen_write_cr3;
}
/*
@@ -2354,27 +2366,27 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
static void __init xen_post_allocator_init(void)
{
- pv_mmu_ops.set_pte = xen_set_pte;
- pv_mmu_ops.set_pmd = xen_set_pmd;
- pv_mmu_ops.set_pud = xen_set_pud;
+ pv_ops.mmu.set_pte = xen_set_pte;
+ pv_ops.mmu.set_pmd = xen_set_pmd;
+ pv_ops.mmu.set_pud = xen_set_pud;
#ifdef CONFIG_X86_64
- pv_mmu_ops.set_p4d = xen_set_p4d;
+ pv_ops.mmu.set_p4d = xen_set_p4d;
#endif
/* This will work as long as patching hasn't happened yet
(which it hasn't) */
- pv_mmu_ops.alloc_pte = xen_alloc_pte;
- pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
- pv_mmu_ops.release_pte = xen_release_pte;
- pv_mmu_ops.release_pmd = xen_release_pmd;
+ pv_ops.mmu.alloc_pte = xen_alloc_pte;
+ pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
+ pv_ops.mmu.release_pte = xen_release_pte;
+ pv_ops.mmu.release_pmd = xen_release_pmd;
#ifdef CONFIG_X86_64
- pv_mmu_ops.alloc_pud = xen_alloc_pud;
- pv_mmu_ops.release_pud = xen_release_pud;
+ pv_ops.mmu.alloc_pud = xen_alloc_pud;
+ pv_ops.mmu.release_pud = xen_release_pud;
#endif
- pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
+ pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
#ifdef CONFIG_X86_64
- pv_mmu_ops.write_cr3 = &xen_write_cr3;
+ pv_ops.mmu.write_cr3 = &xen_write_cr3;
#endif
}
@@ -2462,7 +2474,7 @@ void __init xen_init_mmu_ops(void)
x86_init.paging.pagetable_init = xen_pagetable_init;
x86_init.hyper.init_after_bootmem = xen_after_bootmem;
- pv_mmu_ops = xen_mmu_ops;
+ pv_ops.mmu = xen_mmu_ops;
memset(dummy_mapping, 0xff, PAGE_SIZE);
}
@@ -2662,6 +2674,138 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
}
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
+static noinline void xen_flush_tlb_all(void)
+{
+ struct mmuext_op *op;
+ struct multicall_space mcs;
+
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*op));
+
+ op = mcs.args;
+ op->cmd = MMUEXT_TLB_FLUSH_ALL;
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
+}
+
+#define REMAP_BATCH_SIZE 16
+
+struct remap_data {
+ xen_pfn_t *pfn;
+ bool contiguous;
+ bool no_translate;
+ pgprot_t prot;
+ struct mmu_update *mmu_update;
+};
+
+static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
+ unsigned long addr, void *data)
+{
+ struct remap_data *rmd = data;
+ pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
+
+ /*
+ * If we have a contiguous range, just update the pfn itself,
+ * else update pointer to be "next pfn".
+ */
+ if (rmd->contiguous)
+ (*rmd->pfn)++;
+ else
+ rmd->pfn++;
+
+ rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
+ rmd->mmu_update->ptr |= rmd->no_translate ?
+ MMU_PT_UPDATE_NO_TRANSLATE :
+ MMU_NORMAL_PT_UPDATE;
+ rmd->mmu_update->val = pte_val_ma(pte);
+ rmd->mmu_update++;
+
+ return 0;
+}
+
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+ unsigned int domid, bool no_translate, struct page **pages)
+{
+ int err = 0;
+ struct remap_data rmd;
+ struct mmu_update mmu_update[REMAP_BATCH_SIZE];
+ unsigned long range;
+ int mapped = 0;
+
+ BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
+
+ rmd.pfn = pfn;
+ rmd.prot = prot;
+ /*
+ * We use the err_ptr to indicate if there we are doing a contiguous
+ * mapping or a discontigious mapping.
+ */
+ rmd.contiguous = !err_ptr;
+ rmd.no_translate = no_translate;
+
+ while (nr) {
+ int index = 0;
+ int done = 0;
+ int batch = min(REMAP_BATCH_SIZE, nr);
+ int batch_left = batch;
+
+ range = (unsigned long)batch << PAGE_SHIFT;
+
+ rmd.mmu_update = mmu_update;
+ err = apply_to_page_range(vma->vm_mm, addr, range,
+ remap_area_pfn_pte_fn, &rmd);
+ if (err)
+ goto out;
+
+ /*
+ * We record the error for each page that gives an error, but
+ * continue mapping until the whole set is done
+ */
+ do {
+ int i;
+
+ err = HYPERVISOR_mmu_update(&mmu_update[index],
+ batch_left, &done, domid);
+
+ /*
+ * @err_ptr may be the same buffer as @gfn, so
+ * only clear it after each chunk of @gfn is
+ * used.
+ */
+ if (err_ptr) {
+ for (i = index; i < index + done; i++)
+ err_ptr[i] = 0;
+ }
+ if (err < 0) {
+ if (!err_ptr)
+ goto out;
+ err_ptr[i] = err;
+ done++; /* Skip failed frame. */
+ } else
+ mapped += done;
+ batch_left -= done;
+ index += done;
+ } while (batch_left);
+
+ nr -= batch;
+ addr += range;
+ if (err_ptr)
+ err_ptr += batch;
+ cond_resched();
+ }
+out:
+
+ xen_flush_tlb_all();
+
+ return err < 0 ? err : mapped;
+}
+EXPORT_SYMBOL_GPL(xen_remap_pfn);
+
#ifdef CONFIG_KEXEC_CORE
phys_addr_t paddr_vmcoreinfo_note(void)
{
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 159a897151d6..d6d74efd8912 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
/*
* Xen leaves the responsibility for maintaining p2m mappings to the
* guests themselves, but it must also access and update the p2m array
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 37c6056a7bba..33293ce01d8d 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
/* Glue code to lib/swiotlb-xen.c */
#include <linux/dma-mapping.h>
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 33a783c77d96..66ab96a4e2b3 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -1,28 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+
/******************************************************************************
* platform-pci-unplug.c
*
* Xen platform PCI device driver
* Copyright (c) 2010, Citrix
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/export.h>
+#include <xen/xen.h>
#include <xen/platform_pci.h>
#include "xen-ops.h"
@@ -30,7 +19,6 @@
#define XEN_PLATFORM_ERR_PROTOCOL -2
#define XEN_PLATFORM_ERR_BLACKLIST -3
-#ifdef CONFIG_XEN_PVHVM
/* store the value of xen_emul_unplug after the unplug is done */
static int xen_platform_pci_unplug;
static int xen_emul_unplug;
@@ -214,4 +202,3 @@ static int __init parse_xen_emul_unplug(char *arg)
return 0;
}
early_param("xen_emul_unplug", parse_xen_emul_unplug);
-#endif
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 7d00d4ad44d4..e13b0b49fcdf 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -3,6 +3,7 @@
#include <linux/interrupt.h>
#include <asm/xen/hypercall.h>
+#include <xen/xen.h>
#include <xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
@@ -90,6 +91,12 @@ static void xen_pmu_arch_init(void)
k7_counters_mirrored = 0;
break;
}
+ } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+ amd_num_counters = F10H_NUM_COUNTERS;
+ amd_counters_base = MSR_K7_PERFCTR0;
+ amd_ctrls_base = MSR_K7_EVNTSEL0;
+ amd_msr_step = 1;
+ k7_counters_mirrored = 0;
} else {
uint32_t eax, ebx, ecx, edx;
@@ -285,7 +292,7 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
if (is_amd_pmu_msr(msr)) {
if (!xen_amd_pmu_emulate(msr, val, 1))
*val = native_read_msr_safe(msr, err);
@@ -308,7 +315,7 @@ bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
{
uint64_t val = ((uint64_t)high << 32) | low;
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
if (is_amd_pmu_msr(msr)) {
if (!xen_amd_pmu_emulate(msr, &val, 0))
*err = native_write_msr_safe(msr, low, high);
@@ -379,7 +386,7 @@ static unsigned long long xen_intel_read_pmc(int counter)
unsigned long long xen_read_pmc(int counter)
{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return xen_amd_read_pmc(counter);
else
return xen_intel_read_pmc(counter);
@@ -478,7 +485,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
{
int err, ret = IRQ_NONE;
- struct pt_regs regs;
+ struct pt_regs regs = {0};
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
uint8_t xenpmu_flags = get_xenpmu_flags();
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index e3b18ad49889..145506f9fdbe 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -22,6 +22,7 @@
#include <linux/tick.h>
#include <linux/nmi.h>
#include <linux/cpuhotplug.h>
+#include <linux/stackprotector.h>
#include <asm/paravirt.h>
#include <asm/desc.h>
@@ -88,6 +89,7 @@ static void cpu_bringup(void)
asmlinkage __visible void cpu_bringup_and_idle(void)
{
cpu_bringup();
+ boot_init_stack_canary();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 973f10e05211..23f6793af88a 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -141,11 +141,12 @@ void __init xen_init_spinlocks(void)
printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
__pv_init_lock_hash();
- pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
- pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
- pv_lock_ops.wait = xen_qlock_wait;
- pv_lock_ops.kick = xen_qlock_kick;
- pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
+ pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+ pv_ops.lock.queued_spin_unlock =
+ PV_CALLEE_SAVE(__pv_queued_spin_unlock);
+ pv_ops.lock.wait = xen_qlock_wait;
+ pv_ops.lock.kick = xen_qlock_kick;
+ pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
}
static __init int xen_parse_nopvspin(char *arg)
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index c84f1e039d84..72bf446c3fee 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -513,7 +513,7 @@ static void __init xen_time_init(void)
void __init xen_init_time_ops(void)
{
xen_sched_clock_offset = xen_clocksource_read();
- pv_time_ops = xen_time_ops;
+ pv_ops.time = xen_time_ops;
x86_init.timers.timer_init = xen_time_init;
x86_init.timers.setup_percpu_clockev = x86_init_noop;
@@ -555,7 +555,7 @@ void __init xen_hvm_init_time_ops(void)
}
xen_sched_clock_offset = xen_clocksource_read();
- pv_time_ops = xen_time_ops;
+ pv_ops.time = xen_time_ops;
x86_init.timers.setup_percpu_clockev = xen_time_init;
x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
diff --git a/arch/x86/xen/vdso.h b/arch/x86/xen/vdso.h
index 861fedfe5230..873c54c488fe 100644
--- a/arch/x86/xen/vdso.h
+++ b/arch/x86/xen/vdso.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
/* Bit used for the pseudo-hwcap for non-negative segments. We use
bit 1 to avoid bugs in some versions of glibc when bit 0 is
used; the choice is otherwise arbitrary. */
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 417b339e5c8e..bb1c2da0381d 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -91,13 +91,15 @@ ENTRY(xen_iret)
ENTRY(xen_sysret64)
/*
* We're already on the usermode stack at this point, but
- * still with the kernel gs, so we can easily switch back
+ * still with the kernel gs, so we can easily switch back.
+ *
+ * tss.sp2 is scratch space.
*/
- movq %rsp, PER_CPU_VAR(rsp_scratch)
+ movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
pushq $__USER_DS
- pushq PER_CPU_VAR(rsp_scratch)
+ pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
pushq %r11
pushq $__USER_CS
pushq %rcx
diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
index ca2d3b2bf2af..b0e471506cd8 100644
--- a/arch/x86/xen/xen-pvh.S
+++ b/arch/x86/xen/xen-pvh.S
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
/*
* Copyright C 2016, Oracle and/or its affiliates. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
*/
.code32
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index b9ad83a0ee5d..ea5d8d03e53b 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -13,7 +13,7 @@ config XTENSA
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
- select DMA_NONCOHERENT_OPS
+ select DMA_DIRECT_OPS
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_SHOW
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index ed66db3bc9bb..574e5520968c 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -5,9 +5,9 @@
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_GETPGRP
/*
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index 91907590d183..8dff506caf07 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -35,8 +35,8 @@ sed-y = -e ':a; s/\*(\([^)]*\)\.text\.unlikely/*(\1.literal.unlikely .{text}.unl
-e 's/\.{text}/.text/g'
quiet_cmd__cpp_lds_S = LDS $@
-cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
- | sed $(sed-y) >$@
+cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ \
+ -DLINKER_SCRIPT $< | sed $(sed-y) >$@
$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
$(call if_changed_dep,_cpp_lds_S)
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index a1c3edb8ad56..b727b18a68ac 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -197,7 +197,6 @@ SECTIONS
INIT_SETUP(XCHAL_ICACHE_LINESIZE)
INIT_CALLS
CON_INITCALL
- SECURITY_INITCALL
INIT_RAM_FS
}
diff --git a/block/Kconfig b/block/Kconfig
index 1f2469a0123c..f7045aa47edb 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -74,7 +74,6 @@ config BLK_DEV_BSG
config BLK_DEV_BSGLIB
bool "Block layer SG support v4 helper lib"
- default n
select BLK_DEV_BSG
select BLK_SCSI_REQUEST
help
@@ -107,7 +106,6 @@ config BLK_DEV_ZONED
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
depends on BLK_CGROUP=y
- default n
---help---
Block layer bio throttling support. It can be used to limit
the IO rate to a device. IO rate policies are per cgroup and
@@ -119,7 +117,6 @@ config BLK_DEV_THROTTLING
config BLK_DEV_THROTTLING_LOW
bool "Block throttling .low limit interface support (EXPERIMENTAL)"
depends on BLK_DEV_THROTTLING
- default n
---help---
Add .low limit interface for block throttling. The low limit is a best
effort limit to prioritize cgroups. Depending on the setting, the limit
@@ -130,7 +127,6 @@ config BLK_DEV_THROTTLING_LOW
config BLK_CMDLINE_PARSER
bool "Block device command line partition parser"
- default n
---help---
Enabling this option allows you to specify the partition layout from
the kernel boot args. This is typically of use for embedded devices
@@ -141,7 +137,6 @@ config BLK_CMDLINE_PARSER
config BLK_WBT
bool "Enable support for block device writeback throttling"
- default n
---help---
Enabling this option enables the block layer to throttle buffered
background writeback from the VM, making it more smooth and having
@@ -152,7 +147,6 @@ config BLK_WBT
config BLK_CGROUP_IOLATENCY
bool "Enable support for latency based cgroup IO protection"
depends on BLK_CGROUP=y
- default n
---help---
Enabling this option enables the .latency interface for IO throttling.
The IO controller will attempt to maintain average IO latencies below
@@ -163,7 +157,6 @@ config BLK_CGROUP_IOLATENCY
config BLK_WBT_SQ
bool "Single queue writeback throttling"
- default n
depends on BLK_WBT
---help---
Enable writeback throttling by default on legacy single queue devices
@@ -228,4 +221,7 @@ config BLK_MQ_RDMA
depends on BLOCK && INFINIBAND
default y
+config BLK_PM
+ def_bool BLOCK && PM
+
source block/Kconfig.iosched
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index a4a8914bf7a4..f95a48b0d7b2 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -36,7 +36,6 @@ config IOSCHED_CFQ
config CFQ_GROUP_IOSCHED
bool "CFQ Group Scheduling support"
depends on IOSCHED_CFQ && BLK_CGROUP
- default n
---help---
Enable group IO scheduling in CFQ.
@@ -82,7 +81,6 @@ config MQ_IOSCHED_KYBER
config IOSCHED_BFQ
tristate "BFQ I/O scheduler"
- default n
---help---
BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
of the device among all processes according to their weights,
@@ -94,7 +92,6 @@ config IOSCHED_BFQ
config BFQ_GROUP_IOSCHED
bool "BFQ hierarchical scheduling support"
depends on IOSCHED_BFQ && BLK_CGROUP
- default n
---help---
Enable hierarchical scheduling in BFQ, using the blkio
diff --git a/block/Makefile b/block/Makefile
index 572b33f32c07..27eac600474f 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -37,3 +37,4 @@ obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
+obj-$(CONFIG_BLK_PM) += blk-pm.o
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 9fe5952d117d..d9a7916ff0ab 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -642,7 +642,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
uint64_t serial_nr;
rcu_read_lock();
- serial_nr = bio_blkcg(bio)->css.serial_nr;
+ serial_nr = __bio_blkcg(bio)->css.serial_nr;
/*
* Check whether blkcg has changed. The condition may trigger
@@ -651,7 +651,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
goto out;
- bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
+ bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
/*
* Update blkg_path for bfq_log_* functions. We cache this
* path, and update it here, for the following
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 653100fb719e..6075100f03a5 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -624,12 +624,13 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
}
/*
- * Tell whether there are active queues or groups with differentiated weights.
+ * Tell whether there are active queues with different weights or
+ * active groups.
*/
-static bool bfq_differentiated_weights(struct bfq_data *bfqd)
+static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
{
/*
- * For weights to differ, at least one of the trees must contain
+ * For queue weights to differ, queue_weights_tree must contain
* at least two nodes.
*/
return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
@@ -637,9 +638,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
bfqd->queue_weights_tree.rb_node->rb_right)
#ifdef CONFIG_BFQ_GROUP_IOSCHED
) ||
- (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
- (bfqd->group_weights_tree.rb_node->rb_left ||
- bfqd->group_weights_tree.rb_node->rb_right)
+ (bfqd->num_active_groups > 0
#endif
);
}
@@ -657,26 +656,25 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
* 3) all active groups at the same level in the groups tree have the same
* number of children.
*
- * Unfortunately, keeping the necessary state for evaluating exactly the
- * above symmetry conditions would be quite complex and time-consuming.
- * Therefore this function evaluates, instead, the following stronger
- * sub-conditions, for which it is much easier to maintain the needed
- * state:
+ * Unfortunately, keeping the necessary state for evaluating exactly
+ * the last two symmetry sub-conditions above would be quite complex
+ * and time consuming. Therefore this function evaluates, instead,
+ * only the following stronger two sub-conditions, for which it is
+ * much easier to maintain the needed state:
* 1) all active queues have the same weight,
- * 2) all active groups have the same weight,
- * 3) all active groups have at most one active child each.
- * In particular, the last two conditions are always true if hierarchical
- * support and the cgroups interface are not enabled, thus no state needs
- * to be maintained in this case.
+ * 2) there are no active groups.
+ * In particular, the last condition is always true if hierarchical
+ * support or the cgroups interface are not enabled, thus no state
+ * needs to be maintained in this case.
*/
static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
{
- return !bfq_differentiated_weights(bfqd);
+ return !bfq_varied_queue_weights_or_active_groups(bfqd);
}
/*
* If the weight-counter tree passed as input contains no counter for
- * the weight of the input entity, then add that counter; otherwise just
+ * the weight of the input queue, then add that counter; otherwise just
* increment the existing counter.
*
* Note that weight-counter trees contain few nodes in mostly symmetric
@@ -687,25 +685,25 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
* In most scenarios, the rate at which nodes are created/destroyed
* should be low too.
*/
-void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
+void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct rb_root *root)
{
+ struct bfq_entity *entity = &bfqq->entity;
struct rb_node **new = &(root->rb_node), *parent = NULL;
/*
- * Do not insert if the entity is already associated with a
+ * Do not insert if the queue is already associated with a
* counter, which happens if:
- * 1) the entity is associated with a queue,
- * 2) a request arrival has caused the queue to become both
+ * 1) a request arrival has caused the queue to become both
* non-weight-raised, and hence change its weight, and
* backlogged; in this respect, each of the two events
* causes an invocation of this function,
- * 3) this is the invocation of this function caused by the
+ * 2) this is the invocation of this function caused by the
* second event. This second invocation is actually useless,
* and we handle this fact by exiting immediately. More
* efficient or clearer solutions might possibly be adopted.
*/
- if (entity->weight_counter)
+ if (bfqq->weight_counter)
return;
while (*new) {
@@ -715,7 +713,7 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
parent = *new;
if (entity->weight == __counter->weight) {
- entity->weight_counter = __counter;
+ bfqq->weight_counter = __counter;
goto inc_counter;
}
if (entity->weight < __counter->weight)
@@ -724,66 +722,67 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
new = &((*new)->rb_right);
}
- entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
- GFP_ATOMIC);
+ bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
+ GFP_ATOMIC);
/*
* In the unlucky event of an allocation failure, we just
- * exit. This will cause the weight of entity to not be
- * considered in bfq_differentiated_weights, which, in its
- * turn, causes the scenario to be deemed wrongly symmetric in
- * case entity's weight would have been the only weight making
- * the scenario asymmetric. On the bright side, no unbalance
- * will however occur when entity becomes inactive again (the
- * invocation of this function is triggered by an activation
- * of entity). In fact, bfq_weights_tree_remove does nothing
- * if !entity->weight_counter.
+ * exit. This will cause the weight of queue to not be
+ * considered in bfq_varied_queue_weights_or_active_groups,
+ * which, in its turn, causes the scenario to be deemed
+ * wrongly symmetric in case bfqq's weight would have been
+ * the only weight making the scenario asymmetric. On the
+ * bright side, no unbalance will however occur when bfqq
+ * becomes inactive again (the invocation of this function
+ * is triggered by an activation of queue). In fact,
+ * bfq_weights_tree_remove does nothing if
+ * !bfqq->weight_counter.
*/
- if (unlikely(!entity->weight_counter))
+ if (unlikely(!bfqq->weight_counter))
return;
- entity->weight_counter->weight = entity->weight;
- rb_link_node(&entity->weight_counter->weights_node, parent, new);
- rb_insert_color(&entity->weight_counter->weights_node, root);
+ bfqq->weight_counter->weight = entity->weight;
+ rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
+ rb_insert_color(&bfqq->weight_counter->weights_node, root);
inc_counter:
- entity->weight_counter->num_active++;
+ bfqq->weight_counter->num_active++;
}
/*
- * Decrement the weight counter associated with the entity, and, if the
+ * Decrement the weight counter associated with the queue, and, if the
* counter reaches 0, remove the counter from the tree.
* See the comments to the function bfq_weights_tree_add() for considerations
* about overhead.
*/
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
- struct bfq_entity *entity,
+ struct bfq_queue *bfqq,
struct rb_root *root)
{
- if (!entity->weight_counter)
+ if (!bfqq->weight_counter)
return;
- entity->weight_counter->num_active--;
- if (entity->weight_counter->num_active > 0)
+ bfqq->weight_counter->num_active--;
+ if (bfqq->weight_counter->num_active > 0)
goto reset_entity_pointer;
- rb_erase(&entity->weight_counter->weights_node, root);
- kfree(entity->weight_counter);
+ rb_erase(&bfqq->weight_counter->weights_node, root);
+ kfree(bfqq->weight_counter);
reset_entity_pointer:
- entity->weight_counter = NULL;
+ bfqq->weight_counter = NULL;
}
/*
- * Invoke __bfq_weights_tree_remove on bfqq and all its inactive
- * parent entities.
+ * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
+ * of active groups for each queue's inactive parent entity.
*/
void bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
struct bfq_entity *entity = bfqq->entity.parent;
- __bfq_weights_tree_remove(bfqd, &bfqq->entity,
+ __bfq_weights_tree_remove(bfqd, bfqq,
&bfqd->queue_weights_tree);
for_each_entity(entity) {
@@ -797,17 +796,13 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
* next_in_service for details on why
* in_service_entity must be checked too).
*
- * As a consequence, the weight of entity is
- * not to be removed. In addition, if entity
- * is active, then its parent entities are
- * active as well, and thus their weights are
- * not to be removed either. In the end, this
- * loop must stop here.
+ * As a consequence, its parent entities are
+ * active as well, and thus this loop must
+ * stop here.
*/
break;
}
- __bfq_weights_tree_remove(bfqd, entity,
- &bfqd->group_weights_tree);
+ bfqd->num_active_groups--;
}
}
@@ -3182,6 +3177,13 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
}
+static bool bfq_bfqq_injectable(struct bfq_queue *bfqq)
+{
+ return BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
+ blk_queue_nonrot(bfqq->bfqd->queue) &&
+ bfqq->bfqd->hw_tag;
+}
+
/**
* bfq_bfqq_expire - expire a queue.
* @bfqd: device owning the queue.
@@ -3291,6 +3293,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
if (ref == 1) /* bfqq is gone, no more actions on it */
return;
+ bfqq->injected_service = 0;
+
/* mark bfqq as waiting a request only if a bic still points to it */
if (!bfq_bfqq_busy(bfqq) &&
reason != BFQQE_BUDGET_TIMEOUT &&
@@ -3497,9 +3501,11 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* symmetric scenario where:
* (i) each of these processes must get the same throughput as
* the others;
- * (ii) all these processes have the same I/O pattern
- (either sequential or random).
- * In fact, in such a scenario, the drive will tend to treat
+ * (ii) the I/O of each process has the same properties, in
+ * terms of locality (sequential or random), direction
+ * (reads or writes), request sizes, greediness
+ * (from I/O-bound to sporadic), and so on.
+ * In fact, in such a scenario, the drive tends to treat
* the requests of each of these processes in about the same
* way as the requests of the others, and thus to provide
* each of these processes with about the same throughput
@@ -3508,18 +3514,50 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* certainly needed to guarantee that bfqq receives its
* assigned fraction of the device throughput (see [1] for
* details).
+ * The problem is that idling may significantly reduce
+ * throughput with certain combinations of types of I/O and
+ * devices. An important example is sync random I/O, on flash
+ * storage with command queueing. So, unless bfqq falls in the
+ * above cases where idling also boosts throughput, it would
+ * be important to check conditions (i) and (ii) accurately,
+ * so as to avoid idling when not strictly needed for service
+ * guarantees.
+ *
+ * Unfortunately, it is extremely difficult to thoroughly
+ * check condition (ii). And, in case there are active groups,
+ * it becomes very difficult to check condition (i) too. In
+ * fact, if there are active groups, then, for condition (i)
+ * to become false, it is enough that an active group contains
+ * more active processes or sub-groups than some other active
+ * group. We address this issue with the following bi-modal
+ * behavior, implemented in the function
+ * bfq_symmetric_scenario().
*
- * We address this issue by controlling, actually, only the
- * symmetry sub-condition (i), i.e., provided that
- * sub-condition (i) holds, idling is not performed,
- * regardless of whether sub-condition (ii) holds. In other
- * words, only if sub-condition (i) holds, then idling is
+ * If there are active groups, then the scenario is tagged as
+ * asymmetric, conservatively, without checking any of the
+ * conditions (i) and (ii). So the device is idled for bfqq.
+ * This behavior matches also the fact that groups are created
+ * exactly if controlling I/O (to preserve bandwidth and
+ * latency guarantees) is a primary concern.
+ *
+ * On the opposite end, if there are no active groups, then
+ * only condition (i) is actually controlled, i.e., provided
+ * that condition (i) holds, idling is not performed,
+ * regardless of whether condition (ii) holds. In other words,
+ * only if condition (i) does not hold, then idling is
* allowed, and the device tends to be prevented from queueing
- * many requests, possibly of several processes. The reason
- * for not controlling also sub-condition (ii) is that we
- * exploit preemption to preserve guarantees in case of
- * symmetric scenarios, even if (ii) does not hold, as
- * explained in the next two paragraphs.
+ * many requests, possibly of several processes. Since there
+ * are no active groups, then, to control condition (i) it is
+ * enough to check whether all active queues have the same
+ * weight.
+ *
+ * Not checking condition (ii) evidently exposes bfqq to the
+ * risk of getting less throughput than its fair share.
+ * However, for queues with the same weight, a further
+ * mechanism, preemption, mitigates or even eliminates this
+ * problem. And it does so without consequences on overall
+ * throughput. This mechanism and its benefits are explained
+ * in the next three paragraphs.
*
* Even if a queue, say Q, is expired when it remains idle, Q
* can still preempt the new in-service queue if the next
@@ -3533,11 +3571,7 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* idling allows the internal queues of the device to contain
* many requests, and thus to reorder requests, we can rather
* safely assume that the internal scheduler still preserves a
- * minimum of mid-term fairness. The motivation for using
- * preemption instead of idling is that, by not idling,
- * service guarantees are preserved without minimally
- * sacrificing throughput. In other words, both a high
- * throughput and its desired distribution are obtained.
+ * minimum of mid-term fairness.
*
* More precisely, this preemption-based, idleless approach
* provides fairness in terms of IOPS, and not sectors per
@@ -3556,22 +3590,27 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* 1024/8 times as high as the service received by the other
* queue.
*
- * On the other hand, device idling is performed, and thus
- * pure sector-domain guarantees are provided, for the
- * following queues, which are likely to need stronger
- * throughput guarantees: weight-raised queues, and queues
- * with a higher weight than other queues. When such queues
- * are active, sub-condition (i) is false, which triggers
- * device idling.
+ * The motivation for using preemption instead of idling (for
+ * queues with the same weight) is that, by not idling,
+ * service guarantees are preserved (completely or at least in
+ * part) without minimally sacrificing throughput. And, if
+ * there is no active group, then the primary expectation for
+ * this device is probably a high throughput.
*
- * According to the above considerations, the next variable is
- * true (only) if sub-condition (i) holds. To compute the
- * value of this variable, we not only use the return value of
- * the function bfq_symmetric_scenario(), but also check
- * whether bfqq is being weight-raised, because
- * bfq_symmetric_scenario() does not take into account also
- * weight-raised queues (see comments on
- * bfq_weights_tree_add()).
+ * We are now left only with explaining the additional
+ * compound condition that is checked below for deciding
+ * whether the scenario is asymmetric. To explain this
+ * compound condition, we need to add that the function
+ * bfq_symmetric_scenario checks the weights of only
+ * non-weight-raised queues, for efficiency reasons (see
+ * comments on bfq_weights_tree_add()). Then the fact that
+ * bfqq is weight-raised is checked explicitly here. More
+ * precisely, the compound condition below takes into account
+ * also the fact that, even if bfqq is being weight-raised,
+ * the scenario is still symmetric if all active queues happen
+ * to be weight-raised. Actually, we should be even more
+ * precise here, and differentiate between interactive weight
+ * raising and soft real-time weight raising.
*
* As a side note, it is worth considering that the above
* device-idling countermeasures may however fail in the
@@ -3583,7 +3622,8 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* to let requests be served in the desired order until all
* the requests already queued in the device have been served.
*/
- asymmetric_scenario = bfqq->wr_coeff > 1 ||
+ asymmetric_scenario = (bfqq->wr_coeff > 1 &&
+ bfqd->wr_busy_queues < bfqd->busy_queues) ||
!bfq_symmetric_scenario(bfqd);
/*
@@ -3629,6 +3669,30 @@ static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
}
+static struct bfq_queue *bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
+{
+ struct bfq_queue *bfqq;
+
+ /*
+ * A linear search; but, with a high probability, very few
+ * steps are needed to find a candidate queue, i.e., a queue
+ * with enough budget left for its next request. In fact:
+ * - BFQ dynamically updates the budget of every queue so as
+ * to accommodate the expected backlog of the queue;
+ * - if a queue gets all its requests dispatched as injected
+ * service, then the queue is removed from the active list
+ * (and re-added only if it gets new requests, but with
+ * enough budget for its new backlog).
+ */
+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
+ if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
+ bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
+ bfq_bfqq_budget_left(bfqq))
+ return bfqq;
+
+ return NULL;
+}
+
/*
* Select a queue for service. If we have a current queue in service,
* check whether to continue servicing it, or retrieve and set a new one.
@@ -3710,10 +3774,19 @@ check_queue:
* No requests pending. However, if the in-service queue is idling
* for a new request, or has requests waiting for a completion and
* may idle after their completion, then keep it anyway.
+ *
+ * Yet, to boost throughput, inject service from other queues if
+ * possible.
*/
if (bfq_bfqq_wait_request(bfqq) ||
(bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
- bfqq = NULL;
+ if (bfq_bfqq_injectable(bfqq) &&
+ bfqq->injected_service * bfqq->inject_coeff <
+ bfqq->entity.service * 10)
+ bfqq = bfq_choose_bfqq_for_injection(bfqd);
+ else
+ bfqq = NULL;
+
goto keep_queue;
}
@@ -3803,6 +3876,14 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
bfq_dispatch_remove(bfqd->queue, rq);
+ if (bfqq != bfqd->in_service_queue) {
+ if (likely(bfqd->in_service_queue))
+ bfqd->in_service_queue->injected_service +=
+ bfq_serv_to_charge(rq, bfqq);
+
+ goto return_rq;
+ }
+
/*
* If weight raising has to terminate for bfqq, then next
* function causes an immediate update of bfqq's weight,
@@ -3821,13 +3902,12 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
* belongs to CLASS_IDLE and other queues are waiting for
* service.
*/
- if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
- goto expire;
-
- return rq;
+ if (!(bfqd->busy_queues > 1 && bfq_class_idle(bfqq)))
+ goto return_rq;
-expire:
bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
+
+return_rq:
return rq;
}
@@ -4232,6 +4312,13 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_mark_bfqq_has_short_ttime(bfqq);
bfq_mark_bfqq_sync(bfqq);
bfq_mark_bfqq_just_created(bfqq);
+ /*
+ * Aggressively inject a lot of service: up to 90%.
+ * This coefficient remains constant during bfqq life,
+ * but this behavior might be changed, after enough
+ * testing and tuning.
+ */
+ bfqq->inject_coeff = 1;
} else
bfq_clear_bfqq_sync(bfqq);
@@ -4297,7 +4384,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
rcu_read_lock();
- bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
+ bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
if (!bfqg) {
bfqq = &bfqd->oom_bfqq;
goto out;
@@ -5330,7 +5417,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
bfqd->queue_weights_tree = RB_ROOT;
- bfqd->group_weights_tree = RB_ROOT;
+ bfqd->num_active_groups = 0;
INIT_LIST_HEAD(&bfqd->active_list);
INIT_LIST_HEAD(&bfqd->idle_list);
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index a8a2e5aca4d4..77651d817ecd 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -108,15 +108,14 @@ struct bfq_sched_data {
};
/**
- * struct bfq_weight_counter - counter of the number of all active entities
+ * struct bfq_weight_counter - counter of the number of all active queues
* with a given weight.
*/
struct bfq_weight_counter {
- unsigned int weight; /* weight of the entities this counter refers to */
- unsigned int num_active; /* nr of active entities with this weight */
+ unsigned int weight; /* weight of the queues this counter refers to */
+ unsigned int num_active; /* nr of active queues with this weight */
/*
- * Weights tree member (see bfq_data's @queue_weights_tree and
- * @group_weights_tree)
+ * Weights tree member (see bfq_data's @queue_weights_tree)
*/
struct rb_node weights_node;
};
@@ -151,8 +150,6 @@ struct bfq_weight_counter {
struct bfq_entity {
/* service_tree member */
struct rb_node rb_node;
- /* pointer to the weight counter associated with this entity */
- struct bfq_weight_counter *weight_counter;
/*
* Flag, true if the entity is on a tree (either the active or
@@ -266,6 +263,9 @@ struct bfq_queue {
/* entity representing this queue in the scheduler */
struct bfq_entity entity;
+ /* pointer to the weight counter associated with this entity */
+ struct bfq_weight_counter *weight_counter;
+
/* maximum budget allowed from the feedback mechanism */
int max_budget;
/* budget expiration (in jiffies) */
@@ -351,6 +351,32 @@ struct bfq_queue {
unsigned long split_time; /* time of last split */
unsigned long first_IO_time; /* time of first I/O for this queue */
+
+ /* max service rate measured so far */
+ u32 max_service_rate;
+ /*
+ * Ratio between the service received by bfqq while it is in
+ * service, and the cumulative service (of requests of other
+ * queues) that may be injected while bfqq is empty but still
+ * in service. To increase precision, the coefficient is
+ * measured in tenths of unit. Here are some example of (1)
+ * ratios, (2) resulting percentages of service injected
+ * w.r.t. to the total service dispatched while bfqq is in
+ * service, and (3) corresponding values of the coefficient:
+ * 1 (50%) -> 10
+ * 2 (33%) -> 20
+ * 10 (9%) -> 100
+ * 9.9 (9%) -> 99
+ * 1.5 (40%) -> 15
+ * 0.5 (66%) -> 5
+ * 0.1 (90%) -> 1
+ *
+ * So, if the coefficient is lower than 10, then
+ * injected service is more than bfqq service.
+ */
+ unsigned int inject_coeff;
+ /* amount of service injected in current service slot */
+ unsigned int injected_service;
};
/**
@@ -423,14 +449,9 @@ struct bfq_data {
*/
struct rb_root queue_weights_tree;
/*
- * rbtree of non-queue @bfq_entity weight counters, sorted by
- * weight. Used to keep track of whether all @bfq_groups have
- * the same weight. The tree contains one counter for each
- * distinct weight associated to some active @bfq_group (see
- * the comments to the functions bfq_weights_tree_[add|remove]
- * for further details).
+ * number of groups with requests still waiting for completion
*/
- struct rb_root group_weights_tree;
+ unsigned int num_active_groups;
/*
* Number of bfq_queues containing requests (including the
@@ -825,10 +846,10 @@ struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync);
void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync);
struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
-void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
+void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct rb_root *root);
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
- struct bfq_entity *entity,
+ struct bfq_queue *bfqq,
struct rb_root *root);
void bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq);
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index ae52bff43ce4..476b5a90a5a4 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -788,25 +788,29 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
new_weight = entity->orig_weight *
(bfqq ? bfqq->wr_coeff : 1);
/*
- * If the weight of the entity changes, remove the entity
- * from its old weight counter (if there is a counter
- * associated with the entity), and add it to the counter
- * associated with its new weight.
+ * If the weight of the entity changes, and the entity is a
+ * queue, remove the entity from its old weight counter (if
+ * there is a counter associated with the entity).
*/
if (prev_weight != new_weight) {
- root = bfqq ? &bfqd->queue_weights_tree :
- &bfqd->group_weights_tree;
- __bfq_weights_tree_remove(bfqd, entity, root);
+ if (bfqq) {
+ root = &bfqd->queue_weights_tree;
+ __bfq_weights_tree_remove(bfqd, bfqq, root);
+ } else
+ bfqd->num_active_groups--;
}
entity->weight = new_weight;
/*
- * Add the entity to its weights tree only if it is
- * not associated with a weight-raised queue.
+ * Add the entity, if it is not a weight-raised queue,
+ * to the counter associated with its new weight.
*/
- if (prev_weight != new_weight &&
- (bfqq ? bfqq->wr_coeff == 1 : 1))
- /* If we get here, root has been initialized. */
- bfq_weights_tree_add(bfqd, entity, root);
+ if (prev_weight != new_weight) {
+ if (bfqq && bfqq->wr_coeff == 1) {
+ /* If we get here, root has been initialized. */
+ bfq_weights_tree_add(bfqd, bfqq, root);
+ } else
+ bfqd->num_active_groups++;
+ }
new_st->wsum += entity->weight;
@@ -1012,9 +1016,9 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
struct bfq_group *bfqg =
container_of(entity, struct bfq_group, entity);
+ struct bfq_data *bfqd = bfqg->bfqd;
- bfq_weights_tree_add(bfqg->bfqd, entity,
- &bfqd->group_weights_tree);
+ bfqd->num_active_groups++;
}
#endif
@@ -1181,10 +1185,17 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
st = bfq_entity_service_tree(entity);
is_in_service = entity == sd->in_service_entity;
- if (is_in_service) {
- bfq_calc_finish(entity, entity->service);
+ bfq_calc_finish(entity, entity->service);
+
+ if (is_in_service)
sd->in_service_entity = NULL;
- }
+ else
+ /*
+ * Non in-service entity: nobody will take care of
+ * resetting its service counter on expiration. Do it
+ * now.
+ */
+ entity->service = 0;
if (entity->tree == &st->active)
bfq_active_extract(st, entity);
@@ -1685,7 +1696,7 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
if (!bfqq->dispatched)
if (bfqq->wr_coeff == 1)
- bfq_weights_tree_add(bfqd, &bfqq->entity,
+ bfq_weights_tree_add(bfqd, bfqq,
&bfqd->queue_weights_tree);
if (bfqq->wr_coeff > 1)
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 67b5fb861a51..290af497997b 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -306,6 +306,8 @@ bool bio_integrity_prep(struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
bio_integrity_process(bio, &bio->bi_iter,
bi->profile->generate_fn);
+ } else {
+ bip->bio_iter = bio->bi_iter;
}
return true;
@@ -331,20 +333,14 @@ static void bio_integrity_verify_fn(struct work_struct *work)
container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio;
struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
- struct bvec_iter iter = bio->bi_iter;
/*
* At the moment verify is called bio's iterator was advanced
* during split and completion, we need to rewind iterator to
* it's original position.
*/
- if (bio_rewind_iter(bio, &iter, iter.bi_done)) {
- bio->bi_status = bio_integrity_process(bio, &iter,
- bi->profile->verify_fn);
- } else {
- bio->bi_status = BLK_STS_IOERR;
- }
-
+ bio->bi_status = bio_integrity_process(bio, &bip->bio_iter,
+ bi->profile->verify_fn);
bio_integrity_free(bio);
bio_endio(bio);
}
diff --git a/block/bio.c b/block/bio.c
index 8c680a776171..bbfeb4ee2892 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -609,7 +609,9 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
- bio_clone_blkcg_association(bio, bio_src);
+ bio_clone_blkg_association(bio, bio_src);
+
+ blkcg_bio_issue_init(bio);
}
EXPORT_SYMBOL(__bio_clone_fast);
@@ -729,7 +731,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
}
/* If we may be able to merge these biovecs, force a recount */
- if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
+ if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec))
bio_clear_flag(bio, BIO_SEG_VALID);
done:
@@ -827,6 +829,8 @@ int bio_add_page(struct bio *bio, struct page *page,
}
EXPORT_SYMBOL(bio_add_page);
+#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
+
/**
* __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
* @bio: bio to add pages to
@@ -839,38 +843,35 @@ EXPORT_SYMBOL(bio_add_page);
*/
static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
- unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
+ unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
+ unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
+ ssize_t size, left;
+ unsigned len, i;
size_t offset;
- ssize_t size;
+
+ /*
+ * Move page array up in the allocated memory for the bio vecs as far as
+ * possible so that we can start filling biovecs from the beginning
+ * without overwriting the temporary page array.
+ */
+ BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
+ pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
- idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
- /*
- * Deep magic below: We need to walk the pinned pages backwards
- * because we are abusing the space allocated for the bio_vecs
- * for the page array. Because the bio_vecs are larger than the
- * page pointers by definition this will always work. But it also
- * means we can't use bio_add_page, so any changes to it's semantics
- * need to be reflected here as well.
- */
- bio->bi_iter.bi_size += size;
- bio->bi_vcnt += nr_pages;
+ for (left = size, i = 0; left > 0; left -= len, i++) {
+ struct page *page = pages[i];
- while (idx--) {
- bv[idx].bv_page = pages[idx];
- bv[idx].bv_len = PAGE_SIZE;
- bv[idx].bv_offset = 0;
+ len = min_t(size_t, PAGE_SIZE - offset, left);
+ if (WARN_ON_ONCE(bio_add_page(bio, page, len, offset) != len))
+ return -EINVAL;
+ offset = 0;
}
- bv[0].bv_offset += offset;
- bv[0].bv_len -= offset;
- bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
-
iov_iter_advance(iter, size);
return 0;
}
@@ -1684,7 +1685,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op,
const int sgrp = op_stat_group(req_op);
int cpu = part_stat_lock();
- part_stat_add(cpu, part, ticks[sgrp], duration);
+ part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration));
part_round_stats(q, cpu, part);
part_dec_in_flight(q, part, op_is_write(req_op));
@@ -1807,7 +1808,6 @@ struct bio *bio_split(struct bio *bio, int sectors,
bio_integrity_trim(split);
bio_advance(bio, split->bi_iter.bi_size);
- bio->bi_iter.bi_done = 0;
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
bio_set_flag(split, BIO_TRACE_COMPLETION);
@@ -1956,69 +1956,151 @@ EXPORT_SYMBOL(bioset_init_from_src);
#ifdef CONFIG_BLK_CGROUP
+/**
+ * bio_associate_blkg - associate a bio with the a blkg
+ * @bio: target bio
+ * @blkg: the blkg to associate
+ *
+ * This tries to associate @bio with the specified blkg. Association failure
+ * is handled by walking up the blkg tree. Therefore, the blkg associated can
+ * be anything between @blkg and the root_blkg. This situation only happens
+ * when a cgroup is dying and then the remaining bios will spill to the closest
+ * alive blkg.
+ *
+ * A reference will be taken on the @blkg and will be released when @bio is
+ * freed.
+ */
+int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
+{
+ if (unlikely(bio->bi_blkg))
+ return -EBUSY;
+ bio->bi_blkg = blkg_tryget_closest(blkg);
+ return 0;
+}
+
+/**
+ * __bio_associate_blkg_from_css - internal blkg association function
+ *
+ * This in the core association function that all association paths rely on.
+ * A blkg reference is taken which is released upon freeing of the bio.
+ */
+static int __bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
+{
+ struct request_queue *q = bio->bi_disk->queue;
+ struct blkcg_gq *blkg;
+ int ret;
+
+ rcu_read_lock();
+
+ if (!css || !css->parent)
+ blkg = q->root_blkg;
+ else
+ blkg = blkg_lookup_create(css_to_blkcg(css), q);
+
+ ret = bio_associate_blkg(bio, blkg);
+
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * bio_associate_blkg_from_css - associate a bio with a specified css
+ * @bio: target bio
+ * @css: target css
+ *
+ * Associate @bio with the blkg found by combining the css's blkg and the
+ * request_queue of the @bio. This falls back to the queue's root_blkg if
+ * the association fails with the css.
+ */
+int bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
+{
+ if (unlikely(bio->bi_blkg))
+ return -EBUSY;
+ return __bio_associate_blkg_from_css(bio, css);
+}
+EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
+
#ifdef CONFIG_MEMCG
/**
- * bio_associate_blkcg_from_page - associate a bio with the page's blkcg
+ * bio_associate_blkg_from_page - associate a bio with the page's blkg
* @bio: target bio
* @page: the page to lookup the blkcg from
*
- * Associate @bio with the blkcg from @page's owning memcg. This works like
- * every other associate function wrt references.
+ * Associate @bio with the blkg from @page's owning memcg and the respective
+ * request_queue. If cgroup_e_css returns NULL, fall back to the queue's
+ * root_blkg.
+ *
+ * Note: this must be called after bio has an associated device.
*/
-int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
+int bio_associate_blkg_from_page(struct bio *bio, struct page *page)
{
- struct cgroup_subsys_state *blkcg_css;
+ struct cgroup_subsys_state *css;
+ int ret;
- if (unlikely(bio->bi_css))
+ if (unlikely(bio->bi_blkg))
return -EBUSY;
if (!page->mem_cgroup)
return 0;
- blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
- &io_cgrp_subsys);
- bio->bi_css = blkcg_css;
- return 0;
+
+ rcu_read_lock();
+
+ css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
+
+ ret = __bio_associate_blkg_from_css(bio, css);
+
+ rcu_read_unlock();
+ return ret;
}
#endif /* CONFIG_MEMCG */
/**
- * bio_associate_blkcg - associate a bio with the specified blkcg
+ * bio_associate_create_blkg - associate a bio with a blkg from q
+ * @q: request_queue where bio is going
* @bio: target bio
- * @blkcg_css: css of the blkcg to associate
- *
- * Associate @bio with the blkcg specified by @blkcg_css. Block layer will
- * treat @bio as if it were issued by a task which belongs to the blkcg.
*
- * This function takes an extra reference of @blkcg_css which will be put
- * when @bio is released. The caller must own @bio and is responsible for
- * synchronizing calls to this function.
+ * Associate @bio with the blkg found from the bio's css and the request_queue.
+ * If one is not found, bio_lookup_blkg creates the blkg. This falls back to
+ * the queue's root_blkg if association fails.
*/
-int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
+int bio_associate_create_blkg(struct request_queue *q, struct bio *bio)
{
- if (unlikely(bio->bi_css))
- return -EBUSY;
- css_get(blkcg_css);
- bio->bi_css = blkcg_css;
- return 0;
+ struct cgroup_subsys_state *css;
+ int ret = 0;
+
+ /* someone has already associated this bio with a blkg */
+ if (bio->bi_blkg)
+ return ret;
+
+ rcu_read_lock();
+
+ css = blkcg_css();
+
+ ret = __bio_associate_blkg_from_css(bio, css);
+
+ rcu_read_unlock();
+ return ret;
}
-EXPORT_SYMBOL_GPL(bio_associate_blkcg);
/**
- * bio_associate_blkg - associate a bio with the specified blkg
+ * bio_reassociate_blkg - reassociate a bio with a blkg from q
+ * @q: request_queue where bio is going
* @bio: target bio
- * @blkg: the blkg to associate
*
- * Associate @bio with the blkg specified by @blkg. This is the queue specific
- * blkcg information associated with the @bio, a reference will be taken on the
- * @blkg and will be freed when the bio is freed.
+ * When submitting a bio, multiple recursive calls to make_request() may occur.
+ * This causes the initial associate done in blkcg_bio_issue_check() to be
+ * incorrect and reference the prior request_queue. This performs reassociation
+ * when this situation happens.
*/
-int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
+int bio_reassociate_blkg(struct request_queue *q, struct bio *bio)
{
- if (unlikely(bio->bi_blkg))
- return -EBUSY;
- if (!blkg_try_get(blkg))
- return -ENODEV;
- bio->bi_blkg = blkg;
- return 0;
+ if (bio->bi_blkg) {
+ blkg_put(bio->bi_blkg);
+ bio->bi_blkg = NULL;
+ }
+
+ return bio_associate_create_blkg(q, bio);
}
/**
@@ -2031,10 +2113,6 @@ void bio_disassociate_task(struct bio *bio)
put_io_context(bio->bi_ioc);
bio->bi_ioc = NULL;
}
- if (bio->bi_css) {
- css_put(bio->bi_css);
- bio->bi_css = NULL;
- }
if (bio->bi_blkg) {
blkg_put(bio->bi_blkg);
bio->bi_blkg = NULL;
@@ -2042,16 +2120,16 @@ void bio_disassociate_task(struct bio *bio)
}
/**
- * bio_clone_blkcg_association - clone blkcg association from src to dst bio
+ * bio_clone_blkg_association - clone blkg association from src to dst bio
* @dst: destination bio
* @src: source bio
*/
-void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
+void bio_clone_blkg_association(struct bio *dst, struct bio *src)
{
- if (src->bi_css)
- WARN_ON(bio_associate_blkcg(dst, src->bi_css));
+ if (src->bi_blkg)
+ bio_associate_blkg(dst, src->bi_blkg);
}
-EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
+EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
#endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index c630e02836a8..992da5592c6e 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -84,6 +84,37 @@ static void blkg_free(struct blkcg_gq *blkg)
kfree(blkg);
}
+static void __blkg_release(struct rcu_head *rcu)
+{
+ struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
+
+ percpu_ref_exit(&blkg->refcnt);
+
+ /* release the blkcg and parent blkg refs this blkg has been holding */
+ css_put(&blkg->blkcg->css);
+ if (blkg->parent)
+ blkg_put(blkg->parent);
+
+ wb_congested_put(blkg->wb_congested);
+
+ blkg_free(blkg);
+}
+
+/*
+ * A group is RCU protected, but having an rcu lock does not mean that one
+ * can access all the fields of blkg and assume these are valid. For
+ * example, don't try to follow throtl_data and request queue links.
+ *
+ * Having a reference to blkg under an rcu allows accesses to only values
+ * local to groups like group stats and group rate limits.
+ */
+static void blkg_release(struct percpu_ref *ref)
+{
+ struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
+
+ call_rcu(&blkg->rcu_head, __blkg_release);
+}
+
/**
* blkg_alloc - allocate a blkg
* @blkcg: block cgroup the new blkg is associated with
@@ -110,7 +141,6 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
blkg->q = q;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
- atomic_set(&blkg->refcnt, 1);
/* root blkg uses @q->root_rl, init rl only for !root blkgs */
if (blkcg != &blkcg_root) {
@@ -217,6 +247,11 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg_get(blkg->parent);
}
+ ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0,
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (ret)
+ goto err_cancel_ref;
+
/* invoke per-policy init */
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -249,6 +284,8 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg_put(blkg);
return ERR_PTR(ret);
+err_cancel_ref:
+ percpu_ref_exit(&blkg->refcnt);
err_put_congested:
wb_congested_put(wb_congested);
err_put_css:
@@ -259,7 +296,7 @@ err_free_blkg:
}
/**
- * blkg_lookup_create - lookup blkg, try to create one if not there
+ * __blkg_lookup_create - lookup blkg, try to create one if not there
* @blkcg: blkcg of interest
* @q: request_queue of interest
*
@@ -268,12 +305,11 @@ err_free_blkg:
* that all non-root blkg's have access to the parent blkg. This function
* should be called under RCU read lock and @q->queue_lock.
*
- * Returns pointer to the looked up or created blkg on success, ERR_PTR()
- * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
- * dead and bypassing, returns ERR_PTR(-EBUSY).
+ * Returns the blkg or the closest blkg if blkg_create fails as it walks
+ * down from root.
*/
-struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q)
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
{
struct blkcg_gq *blkg;
@@ -285,7 +321,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
- return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
+ return q->root_blkg;
blkg = __blkg_lookup(blkcg, q, true);
if (blkg)
@@ -293,23 +329,58 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
/*
* Create blkgs walking down from blkcg_root to @blkcg, so that all
- * non-root blkgs have access to their parents.
+ * non-root blkgs have access to their parents. Returns the closest
+ * blkg to the intended blkg should blkg_create() fail.
*/
while (true) {
struct blkcg *pos = blkcg;
struct blkcg *parent = blkcg_parent(blkcg);
-
- while (parent && !__blkg_lookup(parent, q, false)) {
+ struct blkcg_gq *ret_blkg = q->root_blkg;
+
+ while (parent) {
+ blkg = __blkg_lookup(parent, q, false);
+ if (blkg) {
+ /* remember closest blkg */
+ ret_blkg = blkg;
+ break;
+ }
pos = parent;
parent = blkcg_parent(parent);
}
blkg = blkg_create(pos, q, NULL);
- if (pos == blkcg || IS_ERR(blkg))
+ if (IS_ERR(blkg))
+ return ret_blkg;
+ if (pos == blkcg)
return blkg;
}
}
+/**
+ * blkg_lookup_create - find or create a blkg
+ * @blkcg: target block cgroup
+ * @q: target request_queue
+ *
+ * This looks up or creates the blkg representing the unique pair
+ * of the blkcg and the request_queue.
+ */
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
+{
+ struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
+ unsigned long flags;
+
+ if (unlikely(!blkg)) {
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ blkg = __blkg_lookup_create(blkcg, q);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+
+ return blkg;
+}
+
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
@@ -353,7 +424,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
- blkg_put(blkg);
+ percpu_ref_kill(&blkg->refcnt);
}
/**
@@ -381,29 +452,6 @@ static void blkg_destroy_all(struct request_queue *q)
}
/*
- * A group is RCU protected, but having an rcu lock does not mean that one
- * can access all the fields of blkg and assume these are valid. For
- * example, don't try to follow throtl_data and request queue links.
- *
- * Having a reference to blkg under an rcu allows accesses to only values
- * local to groups like group stats and group rate limits.
- */
-void __blkg_release_rcu(struct rcu_head *rcu_head)
-{
- struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
-
- /* release the blkcg and parent blkg refs this blkg has been holding */
- css_put(&blkg->blkcg->css);
- if (blkg->parent)
- blkg_put(blkg->parent);
-
- wb_congested_put(blkg->wb_congested);
-
- blkg_free(blkg);
-}
-EXPORT_SYMBOL_GPL(__blkg_release_rcu);
-
-/*
* The next function used by blk_queue_for_each_rl(). It's a bit tricky
* because the root blkg uses @q->root_rl instead of its own rl.
*/
@@ -1748,8 +1796,7 @@ void blkcg_maybe_throttle_current(void)
blkg = blkg_lookup(blkcg, q);
if (!blkg)
goto out;
- blkg = blkg_try_get(blkg);
- if (!blkg)
+ if (!blkg_tryget(blkg))
goto out;
rcu_read_unlock();
diff --git a/block/blk-core.c b/block/blk-core.c
index 4dbc93f43b38..3ed60723e242 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -42,6 +42,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-sched.h"
+#include "blk-pm.h"
#include "blk-rq-qos.h"
#ifdef CONFIG_DEBUG_FS
@@ -421,24 +422,25 @@ void blk_sync_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_sync_queue);
/**
- * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY
+ * blk_set_pm_only - increment pm_only counter
* @q: request queue pointer
- *
- * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not
- * set and 1 if the flag was already set.
*/
-int blk_set_preempt_only(struct request_queue *q)
+void blk_set_pm_only(struct request_queue *q)
{
- return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
+ atomic_inc(&q->pm_only);
}
-EXPORT_SYMBOL_GPL(blk_set_preempt_only);
+EXPORT_SYMBOL_GPL(blk_set_pm_only);
-void blk_clear_preempt_only(struct request_queue *q)
+void blk_clear_pm_only(struct request_queue *q)
{
- blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
- wake_up_all(&q->mq_freeze_wq);
+ int pm_only;
+
+ pm_only = atomic_dec_return(&q->pm_only);
+ WARN_ON_ONCE(pm_only < 0);
+ if (pm_only == 0)
+ wake_up_all(&q->mq_freeze_wq);
}
-EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
+EXPORT_SYMBOL_GPL(blk_clear_pm_only);
/**
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
@@ -917,7 +919,7 @@ EXPORT_SYMBOL(blk_alloc_queue);
*/
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
- const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
+ const bool pm = flags & BLK_MQ_REQ_PREEMPT;
while (true) {
bool success = false;
@@ -925,11 +927,11 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
rcu_read_lock();
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
/*
- * The code that sets the PREEMPT_ONLY flag is
- * responsible for ensuring that that flag is globally
- * visible before the queue is unfrozen.
+ * The code that increments the pm_only counter is
+ * responsible for ensuring that that counter is
+ * globally visible before the queue is unfrozen.
*/
- if (preempt || !blk_queue_preempt_only(q)) {
+ if (pm || !blk_queue_pm_only(q)) {
success = true;
} else {
percpu_ref_put(&q->q_usage_counter);
@@ -954,7 +956,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
wait_event(q->mq_freeze_wq,
(atomic_read(&q->mq_freeze_depth) == 0 &&
- (preempt || !blk_queue_preempt_only(q))) ||
+ (pm || (blk_pm_request_resume(q),
+ !blk_queue_pm_only(q)))) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
@@ -1051,8 +1054,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
mutex_init(&q->sysfs_lock);
spin_lock_init(&q->__queue_lock);
- if (!q->mq_ops)
- q->queue_lock = lock ? : &q->__queue_lock;
+ q->queue_lock = lock ? : &q->__queue_lock;
/*
* A queue starts its life with bypass turned on to avoid
@@ -1160,7 +1162,7 @@ int blk_init_allocated_queue(struct request_queue *q)
{
WARN_ON_ONCE(q->mq_ops);
- q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
+ q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL);
if (!q->fq)
return -ENOMEM;
@@ -1726,16 +1728,6 @@ void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
}
EXPORT_SYMBOL_GPL(part_round_stats);
-#ifdef CONFIG_PM
-static void blk_pm_put_request(struct request *rq)
-{
- if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
- pm_runtime_mark_last_busy(rq->q->dev);
-}
-#else
-static inline void blk_pm_put_request(struct request *rq) {}
-#endif
-
void __blk_put_request(struct request_queue *q, struct request *req)
{
req_flags_t rq_flags = req->rq_flags;
@@ -1752,6 +1744,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
blk_req_zone_write_unlock(req);
blk_pm_put_request(req);
+ blk_pm_mark_last_busy(req);
elv_completed_request(q, req);
@@ -2440,6 +2433,7 @@ blk_qc_t generic_make_request(struct bio *bio)
if (q)
blk_queue_exit(q);
q = bio->bi_disk->queue;
+ bio_reassociate_blkg(q, bio);
flags = 0;
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
@@ -2733,17 +2727,15 @@ void blk_account_io_done(struct request *req, u64 now)
* containing request is enough.
*/
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
- unsigned long duration;
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
int cpu;
- duration = nsecs_to_jiffies(now - req->start_time_ns);
cpu = part_stat_lock();
part = req->part;
part_stat_inc(cpu, part, ios[sgrp]);
- part_stat_add(cpu, part, ticks[sgrp], duration);
+ part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns);
part_round_stats(req->q, cpu, part);
part_dec_in_flight(req->q, part, rq_data_dir(req));
@@ -2752,30 +2744,6 @@ void blk_account_io_done(struct request *req, u64 now)
}
}
-#ifdef CONFIG_PM
-/*
- * Don't process normal requests when queue is suspended
- * or in the process of suspending/resuming
- */
-static bool blk_pm_allow_request(struct request *rq)
-{
- switch (rq->q->rpm_status) {
- case RPM_RESUMING:
- case RPM_SUSPENDING:
- return rq->rq_flags & RQF_PM;
- case RPM_SUSPENDED:
- return false;
- default:
- return true;
- }
-}
-#else
-static bool blk_pm_allow_request(struct request *rq)
-{
- return true;
-}
-#endif
-
void blk_account_io_start(struct request *rq, bool new_io)
{
struct hd_struct *part;
@@ -2821,11 +2789,14 @@ static struct request *elv_next_request(struct request_queue *q)
while (1) {
list_for_each_entry(rq, &q->queue_head, queuelist) {
- if (blk_pm_allow_request(rq))
- return rq;
-
- if (rq->rq_flags & RQF_SOFTBARRIER)
- break;
+#ifdef CONFIG_PM
+ /*
+ * If a request gets queued in state RPM_SUSPENDED
+ * then that's a kernel bug.
+ */
+ WARN_ON_ONCE(q->rpm_status == RPM_SUSPENDED);
+#endif
+ return rq;
}
/*
@@ -3757,191 +3728,6 @@ void blk_finish_plug(struct blk_plug *plug)
}
EXPORT_SYMBOL(blk_finish_plug);
-#ifdef CONFIG_PM
-/**
- * blk_pm_runtime_init - Block layer runtime PM initialization routine
- * @q: the queue of the device
- * @dev: the device the queue belongs to
- *
- * Description:
- * Initialize runtime-PM-related fields for @q and start auto suspend for
- * @dev. Drivers that want to take advantage of request-based runtime PM
- * should call this function after @dev has been initialized, and its
- * request queue @q has been allocated, and runtime PM for it can not happen
- * yet(either due to disabled/forbidden or its usage_count > 0). In most
- * cases, driver should call this function before any I/O has taken place.
- *
- * This function takes care of setting up using auto suspend for the device,
- * the autosuspend delay is set to -1 to make runtime suspend impossible
- * until an updated value is either set by user or by driver. Drivers do
- * not need to touch other autosuspend settings.
- *
- * The block layer runtime PM is request based, so only works for drivers
- * that use request as their IO unit instead of those directly use bio's.
- */
-void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
-{
- /* Don't enable runtime PM for blk-mq until it is ready */
- if (q->mq_ops) {
- pm_runtime_disable(dev);
- return;
- }
-
- q->dev = dev;
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_set_autosuspend_delay(q->dev, -1);
- pm_runtime_use_autosuspend(q->dev);
-}
-EXPORT_SYMBOL(blk_pm_runtime_init);
-
-/**
- * blk_pre_runtime_suspend - Pre runtime suspend check
- * @q: the queue of the device
- *
- * Description:
- * This function will check if runtime suspend is allowed for the device
- * by examining if there are any requests pending in the queue. If there
- * are requests pending, the device can not be runtime suspended; otherwise,
- * the queue's status will be updated to SUSPENDING and the driver can
- * proceed to suspend the device.
- *
- * For the not allowed case, we mark last busy for the device so that
- * runtime PM core will try to autosuspend it some time later.
- *
- * This function should be called near the start of the device's
- * runtime_suspend callback.
- *
- * Return:
- * 0 - OK to runtime suspend the device
- * -EBUSY - Device should not be runtime suspended
- */
-int blk_pre_runtime_suspend(struct request_queue *q)
-{
- int ret = 0;
-
- if (!q->dev)
- return ret;
-
- spin_lock_irq(q->queue_lock);
- if (q->nr_pending) {
- ret = -EBUSY;
- pm_runtime_mark_last_busy(q->dev);
- } else {
- q->rpm_status = RPM_SUSPENDING;
- }
- spin_unlock_irq(q->queue_lock);
- return ret;
-}
-EXPORT_SYMBOL(blk_pre_runtime_suspend);
-
-/**
- * blk_post_runtime_suspend - Post runtime suspend processing
- * @q: the queue of the device
- * @err: return value of the device's runtime_suspend function
- *
- * Description:
- * Update the queue's runtime status according to the return value of the
- * device's runtime suspend function and mark last busy for the device so
- * that PM core will try to auto suspend the device at a later time.
- *
- * This function should be called near the end of the device's
- * runtime_suspend callback.
- */
-void blk_post_runtime_suspend(struct request_queue *q, int err)
-{
- if (!q->dev)
- return;
-
- spin_lock_irq(q->queue_lock);
- if (!err) {
- q->rpm_status = RPM_SUSPENDED;
- } else {
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_mark_last_busy(q->dev);
- }
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(blk_post_runtime_suspend);
-
-/**
- * blk_pre_runtime_resume - Pre runtime resume processing
- * @q: the queue of the device
- *
- * Description:
- * Update the queue's runtime status to RESUMING in preparation for the
- * runtime resume of the device.
- *
- * This function should be called near the start of the device's
- * runtime_resume callback.
- */
-void blk_pre_runtime_resume(struct request_queue *q)
-{
- if (!q->dev)
- return;
-
- spin_lock_irq(q->queue_lock);
- q->rpm_status = RPM_RESUMING;
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(blk_pre_runtime_resume);
-
-/**
- * blk_post_runtime_resume - Post runtime resume processing
- * @q: the queue of the device
- * @err: return value of the device's runtime_resume function
- *
- * Description:
- * Update the queue's runtime status according to the return value of the
- * device's runtime_resume function. If it is successfully resumed, process
- * the requests that are queued into the device's queue when it is resuming
- * and then mark last busy and initiate autosuspend for it.
- *
- * This function should be called near the end of the device's
- * runtime_resume callback.
- */
-void blk_post_runtime_resume(struct request_queue *q, int err)
-{
- if (!q->dev)
- return;
-
- spin_lock_irq(q->queue_lock);
- if (!err) {
- q->rpm_status = RPM_ACTIVE;
- __blk_run_queue(q);
- pm_runtime_mark_last_busy(q->dev);
- pm_request_autosuspend(q->dev);
- } else {
- q->rpm_status = RPM_SUSPENDED;
- }
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(blk_post_runtime_resume);
-
-/**
- * blk_set_runtime_active - Force runtime status of the queue to be active
- * @q: the queue of the device
- *
- * If the device is left runtime suspended during system suspend the resume
- * hook typically resumes the device and corrects runtime status
- * accordingly. However, that does not affect the queue runtime PM status
- * which is still "suspended". This prevents processing requests from the
- * queue.
- *
- * This function can be used in driver's resume hook to correct queue
- * runtime PM status and re-enable peeking requests from the queue. It
- * should be called before first request is added to the queue.
- */
-void blk_set_runtime_active(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_mark_last_busy(q->dev);
- pm_request_autosuspend(q->dev);
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(blk_set_runtime_active);
-#endif
-
int __init blk_dev_init(void)
{
BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
diff --git a/block/blk-flush.c b/block/blk-flush.c
index ce41f666de3e..8b44b86779da 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -566,12 +566,12 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
EXPORT_SYMBOL(blkdev_issue_flush);
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
- int node, int cmd_size)
+ int node, int cmd_size, gfp_t flags)
{
struct blk_flush_queue *fq;
int rq_sz = sizeof(struct request);
- fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
+ fq = kzalloc_node(sizeof(*fq), flags, node);
if (!fq)
goto fail;
@@ -579,7 +579,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
spin_lock_init(&fq->mq_flush_lock);
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
- fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
+ fq->flush_rq = kzalloc_node(rq_sz, flags, node);
if (!fq->flush_rq)
goto fail_rq;
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 6121611e1316..d1ab089e0919 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -49,12 +49,8 @@ int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
bio_for_each_integrity_vec(iv, bio, iter) {
if (prev) {
- if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
+ if (!biovec_phys_mergeable(q, &ivprv, &iv))
goto new_segment;
-
- if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
- goto new_segment;
-
if (seg_size + iv.bv_len > queue_max_segment_size(q))
goto new_segment;
@@ -95,12 +91,8 @@ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
bio_for_each_integrity_vec(iv, bio, iter) {
if (prev) {
- if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
+ if (!biovec_phys_mergeable(q, &ivprv, &iv))
goto new_segment;
-
- if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
- goto new_segment;
-
if (sg->length + iv.bv_len > queue_max_segment_size(q))
goto new_segment;
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 19923f8a029d..35c48d7b8f78 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -115,9 +115,22 @@ struct child_latency_info {
atomic_t scale_cookie;
};
+struct percentile_stats {
+ u64 total;
+ u64 missed;
+};
+
+struct latency_stat {
+ union {
+ struct percentile_stats ps;
+ struct blk_rq_stat rqs;
+ };
+};
+
struct iolatency_grp {
struct blkg_policy_data pd;
- struct blk_rq_stat __percpu *stats;
+ struct latency_stat __percpu *stats;
+ struct latency_stat cur_stat;
struct blk_iolatency *blkiolat;
struct rq_depth rq_depth;
struct rq_wait rq_wait;
@@ -132,6 +145,7 @@ struct iolatency_grp {
/* Our current number of IO's for the last summation. */
u64 nr_samples;
+ bool ssd;
struct child_latency_info child_lat;
};
@@ -172,6 +186,80 @@ static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
return pd_to_blkg(&iolat->pd);
}
+static inline void latency_stat_init(struct iolatency_grp *iolat,
+ struct latency_stat *stat)
+{
+ if (iolat->ssd) {
+ stat->ps.total = 0;
+ stat->ps.missed = 0;
+ } else
+ blk_rq_stat_init(&stat->rqs);
+}
+
+static inline void latency_stat_sum(struct iolatency_grp *iolat,
+ struct latency_stat *sum,
+ struct latency_stat *stat)
+{
+ if (iolat->ssd) {
+ sum->ps.total += stat->ps.total;
+ sum->ps.missed += stat->ps.missed;
+ } else
+ blk_rq_stat_sum(&sum->rqs, &stat->rqs);
+}
+
+static inline void latency_stat_record_time(struct iolatency_grp *iolat,
+ u64 req_time)
+{
+ struct latency_stat *stat = get_cpu_ptr(iolat->stats);
+ if (iolat->ssd) {
+ if (req_time >= iolat->min_lat_nsec)
+ stat->ps.missed++;
+ stat->ps.total++;
+ } else
+ blk_rq_stat_add(&stat->rqs, req_time);
+ put_cpu_ptr(stat);
+}
+
+static inline bool latency_sum_ok(struct iolatency_grp *iolat,
+ struct latency_stat *stat)
+{
+ if (iolat->ssd) {
+ u64 thresh = div64_u64(stat->ps.total, 10);
+ thresh = max(thresh, 1ULL);
+ return stat->ps.missed < thresh;
+ }
+ return stat->rqs.mean <= iolat->min_lat_nsec;
+}
+
+static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
+ struct latency_stat *stat)
+{
+ if (iolat->ssd)
+ return stat->ps.total;
+ return stat->rqs.nr_samples;
+}
+
+static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
+ struct latency_stat *stat)
+{
+ int exp_idx;
+
+ if (iolat->ssd)
+ return;
+
+ /*
+ * CALC_LOAD takes in a number stored in fixed point representation.
+ * Because we are using this for IO time in ns, the values stored
+ * are significantly larger than the FIXED_1 denominator (2048).
+ * Therefore, rounding errors in the calculation are negligible and
+ * can be ignored.
+ */
+ exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
+ div64_u64(iolat->cur_win_nsec,
+ BLKIOLATENCY_EXP_BUCKET_SIZE));
+ CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat->rqs.mean);
+}
+
static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
wait_queue_entry_t *wait,
bool first_block)
@@ -255,7 +343,7 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat,
struct child_latency_info *lat_info,
bool up)
{
- unsigned long qd = blk_queue_depth(blkiolat->rqos.q);
+ unsigned long qd = blkiolat->rqos.q->nr_requests;
unsigned long scale = scale_amount(qd, up);
unsigned long old = atomic_read(&lat_info->scale_cookie);
unsigned long max_scale = qd << 1;
@@ -295,10 +383,9 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat,
*/
static void scale_change(struct iolatency_grp *iolat, bool up)
{
- unsigned long qd = blk_queue_depth(iolat->blkiolat->rqos.q);
+ unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
unsigned long scale = scale_amount(qd, up);
unsigned long old = iolat->rq_depth.max_depth;
- bool changed = false;
if (old > qd)
old = qd;
@@ -308,15 +395,13 @@ static void scale_change(struct iolatency_grp *iolat, bool up)
return;
if (old < qd) {
- changed = true;
old += scale;
old = min(old, qd);
iolat->rq_depth.max_depth = old;
wake_up_all(&iolat->rq_wait.wait);
}
- } else if (old > 1) {
+ } else {
old >>= 1;
- changed = true;
iolat->rq_depth.max_depth = max(old, 1UL);
}
}
@@ -369,7 +454,7 @@ static void check_scale_change(struct iolatency_grp *iolat)
* scale down event.
*/
samples_thresh = lat_info->nr_samples * 5;
- samples_thresh = div64_u64(samples_thresh, 100);
+ samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
if (iolat->nr_samples <= samples_thresh)
return;
}
@@ -395,34 +480,12 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
spinlock_t *lock)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
- struct request_queue *q = rqos->q;
+ struct blkcg_gq *blkg = bio->bi_blkg;
bool issue_as_root = bio_issue_as_root_blkg(bio);
if (!blk_iolatency_enabled(blkiolat))
return;
- rcu_read_lock();
- blkcg = bio_blkcg(bio);
- bio_associate_blkcg(bio, &blkcg->css);
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg)) {
- if (!lock)
- spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
- blkg = NULL;
- if (!lock)
- spin_unlock_irq(q->queue_lock);
- }
- if (!blkg)
- goto out;
-
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
- bio_associate_blkg(bio, blkg);
-out:
- rcu_read_unlock();
while (blkg && blkg->parent) {
struct iolatency_grp *iolat = blkg_to_lat(blkg);
if (!iolat) {
@@ -443,7 +506,6 @@ static void iolatency_record_time(struct iolatency_grp *iolat,
struct bio_issue *issue, u64 now,
bool issue_as_root)
{
- struct blk_rq_stat *rq_stat;
u64 start = bio_issue_time(issue);
u64 req_time;
@@ -469,9 +531,7 @@ static void iolatency_record_time(struct iolatency_grp *iolat,
return;
}
- rq_stat = get_cpu_ptr(iolat->stats);
- blk_rq_stat_add(rq_stat, req_time);
- put_cpu_ptr(rq_stat);
+ latency_stat_record_time(iolat, req_time);
}
#define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
@@ -482,17 +542,17 @@ static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
struct blkcg_gq *blkg = lat_to_blkg(iolat);
struct iolatency_grp *parent;
struct child_latency_info *lat_info;
- struct blk_rq_stat stat;
+ struct latency_stat stat;
unsigned long flags;
- int cpu, exp_idx;
+ int cpu;
- blk_rq_stat_init(&stat);
+ latency_stat_init(iolat, &stat);
preempt_disable();
for_each_online_cpu(cpu) {
- struct blk_rq_stat *s;
+ struct latency_stat *s;
s = per_cpu_ptr(iolat->stats, cpu);
- blk_rq_stat_sum(&stat, s);
- blk_rq_stat_init(s);
+ latency_stat_sum(iolat, &stat, s);
+ latency_stat_init(iolat, s);
}
preempt_enable();
@@ -502,41 +562,36 @@ static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
lat_info = &parent->child_lat;
- /*
- * CALC_LOAD takes in a number stored in fixed point representation.
- * Because we are using this for IO time in ns, the values stored
- * are significantly larger than the FIXED_1 denominator (2048).
- * Therefore, rounding errors in the calculation are negligible and
- * can be ignored.
- */
- exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
- div64_u64(iolat->cur_win_nsec,
- BLKIOLATENCY_EXP_BUCKET_SIZE));
- CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat.mean);
+ iolat_update_total_lat_avg(iolat, &stat);
/* Everything is ok and we don't need to adjust the scale. */
- if (stat.mean <= iolat->min_lat_nsec &&
+ if (latency_sum_ok(iolat, &stat) &&
atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
return;
/* Somebody beat us to the punch, just bail. */
spin_lock_irqsave(&lat_info->lock, flags);
+
+ latency_stat_sum(iolat, &iolat->cur_stat, &stat);
lat_info->nr_samples -= iolat->nr_samples;
- lat_info->nr_samples += stat.nr_samples;
- iolat->nr_samples = stat.nr_samples;
+ lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
+ iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
if ((lat_info->last_scale_event >= now ||
- now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME) &&
- lat_info->scale_lat <= iolat->min_lat_nsec)
+ now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
goto out;
- if (stat.mean <= iolat->min_lat_nsec &&
- stat.nr_samples >= BLKIOLATENCY_MIN_GOOD_SAMPLES) {
+ if (latency_sum_ok(iolat, &iolat->cur_stat) &&
+ latency_sum_ok(iolat, &stat)) {
+ if (latency_stat_samples(iolat, &iolat->cur_stat) <
+ BLKIOLATENCY_MIN_GOOD_SAMPLES)
+ goto out;
if (lat_info->scale_grp == iolat) {
lat_info->last_scale_event = now;
scale_cookie_change(iolat->blkiolat, lat_info, true);
}
- } else if (stat.mean > iolat->min_lat_nsec) {
+ } else if (lat_info->scale_lat == 0 ||
+ lat_info->scale_lat >= iolat->min_lat_nsec) {
lat_info->last_scale_event = now;
if (!lat_info->scale_grp ||
lat_info->scale_lat > iolat->min_lat_nsec) {
@@ -545,6 +600,7 @@ static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
}
scale_cookie_change(iolat->blkiolat, lat_info, false);
}
+ latency_stat_init(iolat, &iolat->cur_stat);
out:
spin_unlock_irqrestore(&lat_info->lock, flags);
}
@@ -650,7 +706,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)
* We could be exiting, don't access the pd unless we have a
* ref on the blkg.
*/
- if (!blkg_try_get(blkg))
+ if (!blkg_tryget(blkg))
continue;
iolat = blkg_to_lat(blkg);
@@ -761,7 +817,6 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
{
struct blkcg *blkcg = css_to_blkcg(of_css(of));
struct blkcg_gq *blkg;
- struct blk_iolatency *blkiolat;
struct blkg_conf_ctx ctx;
struct iolatency_grp *iolat;
char *p, *tok;
@@ -774,7 +829,6 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
return ret;
iolat = blkg_to_lat(ctx.blkg);
- blkiolat = iolat->blkiolat;
p = ctx.body;
ret = -EINVAL;
@@ -835,13 +889,43 @@ static int iolatency_print_limit(struct seq_file *sf, void *v)
return 0;
}
+static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
+ size_t size)
+{
+ struct latency_stat stat;
+ int cpu;
+
+ latency_stat_init(iolat, &stat);
+ preempt_disable();
+ for_each_online_cpu(cpu) {
+ struct latency_stat *s;
+ s = per_cpu_ptr(iolat->stats, cpu);
+ latency_stat_sum(iolat, &stat, s);
+ }
+ preempt_enable();
+
+ if (iolat->rq_depth.max_depth == UINT_MAX)
+ return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
+ (unsigned long long)stat.ps.missed,
+ (unsigned long long)stat.ps.total);
+ return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
+ (unsigned long long)stat.ps.missed,
+ (unsigned long long)stat.ps.total,
+ iolat->rq_depth.max_depth);
+}
+
static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
size_t size)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
- unsigned long long avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
- unsigned long long cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
+ unsigned long long avg_lat;
+ unsigned long long cur_win;
+
+ if (iolat->ssd)
+ return iolatency_ssd_stat(iolat, buf, size);
+ avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
+ cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
if (iolat->rq_depth.max_depth == UINT_MAX)
return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
avg_lat, cur_win);
@@ -858,8 +942,8 @@ static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
iolat = kzalloc_node(sizeof(*iolat), gfp, node);
if (!iolat)
return NULL;
- iolat->stats = __alloc_percpu_gfp(sizeof(struct blk_rq_stat),
- __alignof__(struct blk_rq_stat), gfp);
+ iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
+ __alignof__(struct latency_stat), gfp);
if (!iolat->stats) {
kfree(iolat);
return NULL;
@@ -876,15 +960,21 @@ static void iolatency_pd_init(struct blkg_policy_data *pd)
u64 now = ktime_to_ns(ktime_get());
int cpu;
+ if (blk_queue_nonrot(blkg->q))
+ iolat->ssd = true;
+ else
+ iolat->ssd = false;
+
for_each_possible_cpu(cpu) {
- struct blk_rq_stat *stat;
+ struct latency_stat *stat;
stat = per_cpu_ptr(iolat->stats, cpu);
- blk_rq_stat_init(stat);
+ latency_stat_init(iolat, stat);
}
+ latency_stat_init(iolat, &iolat->cur_stat);
rq_wait_init(&iolat->rq_wait);
spin_lock_init(&iolat->child_lat.lock);
- iolat->rq_depth.queue_depth = blk_queue_depth(blkg->q);
+ iolat->rq_depth.queue_depth = blkg->q->nr_requests;
iolat->rq_depth.max_depth = UINT_MAX;
iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
iolat->blkiolat = blkiolat;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index d1b9dd03da25..bbd44666f2b5 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -29,9 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
{
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = *biop;
- unsigned int granularity;
unsigned int op;
- int alignment;
sector_t bs_mask;
if (!q)
@@ -54,38 +52,16 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
- /* Zero-sector (unknown) and one-sector granularities are the same. */
- granularity = max(q->limits.discard_granularity >> 9, 1U);
- alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
-
while (nr_sects) {
- unsigned int req_sects;
- sector_t end_sect, tmp;
+ unsigned int req_sects = nr_sects;
+ sector_t end_sect;
- /*
- * Issue in chunks of the user defined max discard setting,
- * ensuring that bi_size doesn't overflow
- */
- req_sects = min_t(sector_t, nr_sects,
- q->limits.max_discard_sectors);
if (!req_sects)
goto fail;
if (req_sects > UINT_MAX >> 9)
req_sects = UINT_MAX >> 9;
- /*
- * If splitting a request, and the next starting sector would be
- * misaligned, stop the discard at the previous aligned sector.
- */
end_sect = sector + req_sects;
- tmp = end_sect;
- if (req_sects < nr_sects &&
- sector_div(tmp, granularity) != alignment) {
- end_sect = end_sect - alignment;
- sector_div(end_sect, granularity);
- end_sect = end_sect * granularity + alignment;
- req_sects = end_sect - sector;
- }
bio = next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index aaec38cc37b8..42a46744c11b 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,6 +12,69 @@
#include "blk.h"
+/*
+ * Check if the two bvecs from two bios can be merged to one segment. If yes,
+ * no need to check gap between the two bios since the 1st bio and the 1st bvec
+ * in the 2nd bio can be handled in one segment.
+ */
+static inline bool bios_segs_mergeable(struct request_queue *q,
+ struct bio *prev, struct bio_vec *prev_last_bv,
+ struct bio_vec *next_first_bv)
+{
+ if (!biovec_phys_mergeable(q, prev_last_bv, next_first_bv))
+ return false;
+ if (prev->bi_seg_back_size + next_first_bv->bv_len >
+ queue_max_segment_size(q))
+ return false;
+ return true;
+}
+
+static inline bool bio_will_gap(struct request_queue *q,
+ struct request *prev_rq, struct bio *prev, struct bio *next)
+{
+ struct bio_vec pb, nb;
+
+ if (!bio_has_data(prev) || !queue_virt_boundary(q))
+ return false;
+
+ /*
+ * Don't merge if the 1st bio starts with non-zero offset, otherwise it
+ * is quite difficult to respect the sg gap limit. We work hard to
+ * merge a huge number of small single bios in case of mkfs.
+ */
+ if (prev_rq)
+ bio_get_first_bvec(prev_rq->bio, &pb);
+ else
+ bio_get_first_bvec(prev, &pb);
+ if (pb.bv_offset)
+ return true;
+
+ /*
+ * We don't need to worry about the situation that the merged segment
+ * ends in unaligned virt boundary:
+ *
+ * - if 'pb' ends aligned, the merged segment ends aligned
+ * - if 'pb' ends unaligned, the next bio must include
+ * one single bvec of 'nb', otherwise the 'nb' can't
+ * merge with 'pb'
+ */
+ bio_get_last_bvec(prev, &pb);
+ bio_get_first_bvec(next, &nb);
+ if (bios_segs_mergeable(q, prev, &pb, &nb))
+ return false;
+ return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+}
+
+static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
+{
+ return bio_will_gap(req->q, req, req->biotail, bio);
+}
+
+static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
+{
+ return bio_will_gap(req->q, NULL, bio, req->bio);
+}
+
static struct bio *blk_bio_discard_split(struct request_queue *q,
struct bio *bio,
struct bio_set *bs,
@@ -134,9 +197,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
if (bvprvp && blk_queue_cluster(q)) {
if (seg_size + bv.bv_len > queue_max_segment_size(q))
goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
+ if (!biovec_phys_mergeable(q, bvprvp, &bv))
goto new_segment;
seg_size += bv.bv_len;
@@ -267,9 +328,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
if (seg_size + bv.bv_len
> queue_max_segment_size(q))
goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
+ if (!biovec_phys_mergeable(q, &bvprv, &bv))
goto new_segment;
seg_size += bv.bv_len;
@@ -349,17 +408,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
bio_get_last_bvec(bio, &end_bv);
bio_get_first_bvec(nxt, &nxt_bv);
- if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
- return 0;
-
- /*
- * bio and nxt are contiguous in memory; check if the queue allows
- * these two to be merged into one
- */
- if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
- return 1;
-
- return 0;
+ return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
}
static inline void
@@ -373,10 +422,7 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
if (*sg && *cluster) {
if ((*sg)->length + nbytes > queue_max_segment_size(q))
goto new_segment;
-
- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+ if (!biovec_phys_mergeable(q, bvprv, bvec))
goto new_segment;
(*sg)->length += nbytes;
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index cb1e6cf7ac48..41b86f50d126 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -102,6 +102,14 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags,
return 0;
}
+static int queue_pm_only_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+
+ seq_printf(m, "%d\n", atomic_read(&q->pm_only));
+ return 0;
+}
+
#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(QUEUED),
@@ -132,7 +140,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
QUEUE_FLAG_NAME(QUIESCED),
- QUEUE_FLAG_NAME(PREEMPT_ONLY),
};
#undef QUEUE_FLAG_NAME
@@ -209,6 +216,7 @@ static ssize_t queue_write_hint_store(void *data, const char __user *buf,
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
{ "poll_stat", 0400, queue_poll_stat_show },
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
+ { "pm_only", 0600, queue_pm_only_show, NULL },
{ "state", 0600, queue_state_show, queue_state_write },
{ "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
@@ -423,8 +431,7 @@ static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
{
const struct show_busy_params *params = data;
- if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
- blk_mq_rq_state(rq) != MQ_RQ_IDLE)
+ if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx)
__blk_mq_debugfs_rq_show(params->m,
list_entry_rq(&rq->queuelist));
}
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 4e028ee42430..8a9544203173 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -49,12 +49,12 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
return true;
}
-static inline void blk_mq_sched_completed_request(struct request *rq)
+static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
{
struct elevator_queue *e = rq->q->elevator;
if (e && e->type->ops.mq.completed_request)
- e->type->ops.mq.completed_request(rq);
+ e->type->ops.mq.completed_request(rq, now);
}
static inline void blk_mq_sched_started_request(struct request *rq)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 94e1ed667b6e..cfda95b85d34 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -232,13 +232,26 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
/*
* We can hit rq == NULL here, because the tagging functions
- * test and set the bit before assining ->rqs[].
+ * test and set the bit before assigning ->rqs[].
*/
if (rq && rq->q == hctx->queue)
iter_data->fn(hctx, rq, iter_data->data, reserved);
return true;
}
+/**
+ * bt_for_each - iterate over the requests associated with a hardware queue
+ * @hctx: Hardware queue to examine.
+ * @bt: sbitmap to examine. This is either the breserved_tags member
+ * or the bitmap_tags member of struct blk_mq_tags.
+ * @fn: Pointer to the function that will be called for each request
+ * associated with @hctx that has been assigned a driver tag.
+ * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
+ * where rq is a pointer to a request.
+ * @data: Will be passed as third argument to @fn.
+ * @reserved: Indicates whether @bt is the breserved_tags member or the
+ * bitmap_tags member of struct blk_mq_tags.
+ */
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
busy_iter_fn *fn, void *data, bool reserved)
{
@@ -280,6 +293,18 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
return true;
}
+/**
+ * bt_tags_for_each - iterate over the requests in a tag map
+ * @tags: Tag map to iterate over.
+ * @bt: sbitmap to examine. This is either the breserved_tags member
+ * or the bitmap_tags member of struct blk_mq_tags.
+ * @fn: Pointer to the function that will be called for each started
+ * request. @fn will be called as follows: @fn(rq, @data,
+ * @reserved) where rq is a pointer to a request.
+ * @data: Will be passed as second argument to @fn.
+ * @reserved: Indicates whether @bt is the breserved_tags member or the
+ * bitmap_tags member of struct blk_mq_tags.
+ */
static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
busy_tag_iter_fn *fn, void *data, bool reserved)
{
@@ -294,6 +319,15 @@ static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
}
+/**
+ * blk_mq_all_tag_busy_iter - iterate over all started requests in a tag map
+ * @tags: Tag map to iterate over.
+ * @fn: Pointer to the function that will be called for each started
+ * request. @fn will be called as follows: @fn(rq, @priv,
+ * reserved) where rq is a pointer to a request. 'reserved'
+ * indicates whether or not @rq is a reserved request.
+ * @priv: Will be passed as second argument to @fn.
+ */
static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
busy_tag_iter_fn *fn, void *priv)
{
@@ -302,6 +336,15 @@ static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
}
+/**
+ * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
+ * @tagset: Tag set to iterate over.
+ * @fn: Pointer to the function that will be called for each started
+ * request. @fn will be called as follows: @fn(rq, @priv,
+ * reserved) where rq is a pointer to a request. 'reserved'
+ * indicates whether or not @rq is a reserved request.
+ * @priv: Will be passed as second argument to @fn.
+ */
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv)
{
@@ -314,6 +357,20 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
+/**
+ * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
+ * @q: Request queue to examine.
+ * @fn: Pointer to the function that will be called for each request
+ * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
+ * reserved) where rq is a pointer to a request and hctx points
+ * to the hardware queue associated with the request. 'reserved'
+ * indicates whether or not @rq is a reserved request.
+ * @priv: Will be passed as third argument to @fn.
+ *
+ * Note: if @q->tag_set is shared with other request queues then @fn will be
+ * called for all requests on all queues that share that tag set and not only
+ * for requests associated with @q.
+ */
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv)
{
@@ -321,23 +378,20 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
int i;
/*
- * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
- * queue_hw_ctx after freeze the queue. So we could use q_usage_counter
- * to avoid race with it. __blk_mq_update_nr_hw_queues will users
- * synchronize_rcu to ensure all of the users go out of the critical
- * section below and see zeroed q_usage_counter.
+ * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
+ * while the queue is frozen. So we can use q_usage_counter to avoid
+ * racing with it. __blk_mq_update_nr_hw_queues() uses
+ * synchronize_rcu() to ensure this function left the critical section
+ * below.
*/
- rcu_read_lock();
- if (percpu_ref_is_zero(&q->q_usage_counter)) {
- rcu_read_unlock();
+ if (!percpu_ref_tryget(&q->q_usage_counter))
return;
- }
queue_for_each_hw_ctx(q, hctx, i) {
struct blk_mq_tags *tags = hctx->tags;
/*
- * If not software queues are currently mapped to this
+ * If no software queues are currently mapped to this
* hardware queue, there's nothing to check
*/
if (!blk_mq_hw_queue_mapped(hctx))
@@ -347,7 +401,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
}
- rcu_read_unlock();
+ blk_queue_exit(q);
}
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 85a1c1a59c72..dcf10e39995a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -33,6 +33,7 @@
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
+#include "blk-pm.h"
#include "blk-stat.h"
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
@@ -198,7 +199,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) {
- percpu_ref_reinit(&q->q_usage_counter);
+ percpu_ref_resurrect(&q->q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
}
@@ -475,6 +476,7 @@ static void __blk_mq_free_request(struct request *rq)
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
const int sched_tag = rq->internal_tag;
+ blk_pm_mark_last_busy(rq);
if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
@@ -526,6 +528,9 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
blk_stat_add(rq, now);
}
+ if (rq->internal_tag != -1)
+ blk_mq_sched_completed_request(rq, now);
+
blk_account_io_done(rq, now);
if (rq->end_io) {
@@ -562,8 +567,20 @@ static void __blk_mq_complete_request(struct request *rq)
if (!blk_mq_mark_complete(rq))
return;
- if (rq->internal_tag != -1)
- blk_mq_sched_completed_request(rq);
+
+ /*
+ * Most of single queue controllers, there is only one irq vector
+ * for handling IO completion, and the only irq's affinity is set
+ * as all possible CPUs. On most of ARCHs, this affinity means the
+ * irq is handled on one specific CPU.
+ *
+ * So complete IO reqeust in softirq context in case of single queue
+ * for not degrading IO performance by irqsoff latency.
+ */
+ if (rq->q->nr_hw_queues == 1) {
+ __blk_complete_request(rq);
+ return;
+ }
if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
rq->q->softirq_done_fn(rq);
@@ -1628,7 +1645,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
BUG_ON(!rq->q);
if (rq->mq_ctx != this_ctx) {
if (this_ctx) {
- trace_block_unplug(this_q, depth, from_schedule);
+ trace_block_unplug(this_q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_q, this_ctx,
&ctx_list,
from_schedule);
@@ -1648,7 +1665,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
* on 'ctx_list'. Do those.
*/
if (this_ctx) {
- trace_block_unplug(this_q, depth, from_schedule);
+ trace_block_unplug(this_q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
from_schedule);
}
@@ -2137,8 +2154,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
- blk_mq_debugfs_unregister_hctx(hctx);
-
if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_idle(hctx);
@@ -2165,6 +2180,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
queue_for_each_hw_ctx(q, hctx, i) {
if (i == nr_queue)
break;
+ blk_mq_debugfs_unregister_hctx(hctx);
blk_mq_exit_hctx(q, set, hctx, i);
}
}
@@ -2194,12 +2210,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
* runtime
*/
hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
- GFP_KERNEL, node);
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
if (!hctx->ctxs)
goto unregister_cpu_notifier;
- if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
- node))
+ if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
goto free_ctxs;
hctx->nr_ctx = 0;
@@ -2212,7 +2228,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
- hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
+ hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
if (!hctx->fq)
goto exit_hctx;
@@ -2222,8 +2239,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (hctx->flags & BLK_MQ_F_BLOCKING)
init_srcu_struct(hctx->srcu);
- blk_mq_debugfs_register_hctx(q, hctx);
-
return 0;
free_fq:
@@ -2492,6 +2507,39 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
}
EXPORT_SYMBOL(blk_mq_init_queue);
+/*
+ * Helper for setting up a queue with mq ops, given queue depth, and
+ * the passed in mq ops flags.
+ */
+struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops,
+ unsigned int queue_depth,
+ unsigned int set_flags)
+{
+ struct request_queue *q;
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = ops;
+ set->nr_hw_queues = 1;
+ set->queue_depth = queue_depth;
+ set->numa_node = NUMA_NO_NODE;
+ set->flags = set_flags;
+
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ERR_PTR(ret);
+
+ q = blk_mq_init_queue(set);
+ if (IS_ERR(q)) {
+ blk_mq_free_tag_set(set);
+ return q;
+ }
+
+ return q;
+}
+EXPORT_SYMBOL(blk_mq_init_sq_queue);
+
static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
{
int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
@@ -2506,48 +2554,90 @@ static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
return hw_ctx_size;
}
+static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
+ struct blk_mq_tag_set *set, struct request_queue *q,
+ int hctx_idx, int node)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ hctx = kzalloc_node(blk_mq_hw_ctx_size(set),
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+ node);
+ if (!hctx)
+ return NULL;
+
+ if (!zalloc_cpumask_var_node(&hctx->cpumask,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+ node)) {
+ kfree(hctx);
+ return NULL;
+ }
+
+ atomic_set(&hctx->nr_active, 0);
+ hctx->numa_node = node;
+ hctx->queue_num = hctx_idx;
+
+ if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) {
+ free_cpumask_var(hctx->cpumask);
+ kfree(hctx);
+ return NULL;
+ }
+ blk_mq_hctx_kobj_init(hctx);
+
+ return hctx;
+}
+
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct request_queue *q)
{
- int i, j;
+ int i, j, end;
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
- blk_mq_sysfs_unregister(q);
-
/* protect against switching io scheduler */
mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int node;
-
- if (hctxs[i])
- continue;
+ struct blk_mq_hw_ctx *hctx;
node = blk_mq_hw_queue_to_node(q->mq_map, i);
- hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
- GFP_KERNEL, node);
- if (!hctxs[i])
- break;
-
- if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
- node)) {
- kfree(hctxs[i]);
- hctxs[i] = NULL;
- break;
- }
-
- atomic_set(&hctxs[i]->nr_active, 0);
- hctxs[i]->numa_node = node;
- hctxs[i]->queue_num = i;
+ /*
+ * If the hw queue has been mapped to another numa node,
+ * we need to realloc the hctx. If allocation fails, fallback
+ * to use the previous one.
+ */
+ if (hctxs[i] && (hctxs[i]->numa_node == node))
+ continue;
- if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
- free_cpumask_var(hctxs[i]->cpumask);
- kfree(hctxs[i]);
- hctxs[i] = NULL;
- break;
+ hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
+ if (hctx) {
+ if (hctxs[i]) {
+ blk_mq_exit_hctx(q, set, hctxs[i], i);
+ kobject_put(&hctxs[i]->kobj);
+ }
+ hctxs[i] = hctx;
+ } else {
+ if (hctxs[i])
+ pr_warn("Allocate new hctx on node %d fails,\
+ fallback to previous one on node %d\n",
+ node, hctxs[i]->numa_node);
+ else
+ break;
}
- blk_mq_hctx_kobj_init(hctxs[i]);
}
- for (j = i; j < q->nr_hw_queues; j++) {
+ /*
+ * Increasing nr_hw_queues fails. Free the newly allocated
+ * hctxs and keep the previous q->nr_hw_queues.
+ */
+ if (i != set->nr_hw_queues) {
+ j = q->nr_hw_queues;
+ end = i;
+ } else {
+ j = i;
+ end = q->nr_hw_queues;
+ q->nr_hw_queues = set->nr_hw_queues;
+ }
+
+ for (; j < end; j++) {
struct blk_mq_hw_ctx *hctx = hctxs[j];
if (hctx) {
@@ -2559,9 +2649,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
}
}
- q->nr_hw_queues = i;
mutex_unlock(&q->sysfs_lock);
- blk_mq_sysfs_register(q);
}
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
@@ -2659,25 +2747,6 @@ void blk_mq_free_queue(struct request_queue *q)
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
}
-/* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q)
-{
- WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
-
- blk_mq_debugfs_unregister_hctxs(q);
- blk_mq_sysfs_unregister(q);
-
- /*
- * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
- * we should change hctx numa_node according to the new topology (this
- * involves freeing and re-allocating memory, worth doing?)
- */
- blk_mq_map_swqueue(q);
-
- blk_mq_sysfs_register(q);
- blk_mq_debugfs_register_hctxs(q);
-}
-
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{
int i;
@@ -2964,6 +3033,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
{
struct request_queue *q;
LIST_HEAD(head);
+ int prev_nr_hw_queues;
lockdep_assert_held(&set->tag_list_lock);
@@ -2987,11 +3057,30 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
if (!blk_mq_elv_switch_none(&head, q))
goto switch_back;
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_debugfs_unregister_hctxs(q);
+ blk_mq_sysfs_unregister(q);
+ }
+
+ prev_nr_hw_queues = set->nr_hw_queues;
set->nr_hw_queues = nr_hw_queues;
blk_mq_update_queue_map(set);
+fallback:
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q);
- blk_mq_queue_reinit(q);
+ if (q->nr_hw_queues != set->nr_hw_queues) {
+ pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
+ nr_hw_queues, prev_nr_hw_queues);
+ set->nr_hw_queues = prev_nr_hw_queues;
+ blk_mq_map_queues(set);
+ goto fallback;
+ }
+ blk_mq_map_swqueue(q);
+ }
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_sysfs_register(q);
+ blk_mq_debugfs_register_hctxs(q);
}
switch_back:
diff --git a/block/blk-pm.c b/block/blk-pm.c
new file mode 100644
index 000000000000..f8fdae01bea2
--- /dev/null
+++ b/block/blk-pm.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/blk-mq.h>
+#include <linux/blk-pm.h>
+#include <linux/blkdev.h>
+#include <linux/pm_runtime.h>
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+/**
+ * blk_pm_runtime_init - Block layer runtime PM initialization routine
+ * @q: the queue of the device
+ * @dev: the device the queue belongs to
+ *
+ * Description:
+ * Initialize runtime-PM-related fields for @q and start auto suspend for
+ * @dev. Drivers that want to take advantage of request-based runtime PM
+ * should call this function after @dev has been initialized, and its
+ * request queue @q has been allocated, and runtime PM for it can not happen
+ * yet(either due to disabled/forbidden or its usage_count > 0). In most
+ * cases, driver should call this function before any I/O has taken place.
+ *
+ * This function takes care of setting up using auto suspend for the device,
+ * the autosuspend delay is set to -1 to make runtime suspend impossible
+ * until an updated value is either set by user or by driver. Drivers do
+ * not need to touch other autosuspend settings.
+ *
+ * The block layer runtime PM is request based, so only works for drivers
+ * that use request as their IO unit instead of those directly use bio's.
+ */
+void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
+{
+ q->dev = dev;
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_set_autosuspend_delay(q->dev, -1);
+ pm_runtime_use_autosuspend(q->dev);
+}
+EXPORT_SYMBOL(blk_pm_runtime_init);
+
+/**
+ * blk_pre_runtime_suspend - Pre runtime suspend check
+ * @q: the queue of the device
+ *
+ * Description:
+ * This function will check if runtime suspend is allowed for the device
+ * by examining if there are any requests pending in the queue. If there
+ * are requests pending, the device can not be runtime suspended; otherwise,
+ * the queue's status will be updated to SUSPENDING and the driver can
+ * proceed to suspend the device.
+ *
+ * For the not allowed case, we mark last busy for the device so that
+ * runtime PM core will try to autosuspend it some time later.
+ *
+ * This function should be called near the start of the device's
+ * runtime_suspend callback.
+ *
+ * Return:
+ * 0 - OK to runtime suspend the device
+ * -EBUSY - Device should not be runtime suspended
+ */
+int blk_pre_runtime_suspend(struct request_queue *q)
+{
+ int ret = 0;
+
+ if (!q->dev)
+ return ret;
+
+ WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
+
+ /*
+ * Increase the pm_only counter before checking whether any
+ * non-PM blk_queue_enter() calls are in progress to avoid that any
+ * new non-PM blk_queue_enter() calls succeed before the pm_only
+ * counter is decreased again.
+ */
+ blk_set_pm_only(q);
+ ret = -EBUSY;
+ /* Switch q_usage_counter from per-cpu to atomic mode. */
+ blk_freeze_queue_start(q);
+ /*
+ * Wait until atomic mode has been reached. Since that
+ * involves calling call_rcu(), it is guaranteed that later
+ * blk_queue_enter() calls see the pm-only state. See also
+ * http://lwn.net/Articles/573497/.
+ */
+ percpu_ref_switch_to_atomic_sync(&q->q_usage_counter);
+ if (percpu_ref_is_zero(&q->q_usage_counter))
+ ret = 0;
+ /* Switch q_usage_counter back to per-cpu mode. */
+ blk_mq_unfreeze_queue(q);
+
+ spin_lock_irq(q->queue_lock);
+ if (ret < 0)
+ pm_runtime_mark_last_busy(q->dev);
+ else
+ q->rpm_status = RPM_SUSPENDING;
+ spin_unlock_irq(q->queue_lock);
+
+ if (ret)
+ blk_clear_pm_only(q);
+
+ return ret;
+}
+EXPORT_SYMBOL(blk_pre_runtime_suspend);
+
+/**
+ * blk_post_runtime_suspend - Post runtime suspend processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_suspend function
+ *
+ * Description:
+ * Update the queue's runtime status according to the return value of the
+ * device's runtime suspend function and mark last busy for the device so
+ * that PM core will try to auto suspend the device at a later time.
+ *
+ * This function should be called near the end of the device's
+ * runtime_suspend callback.
+ */
+void blk_post_runtime_suspend(struct request_queue *q, int err)
+{
+ if (!q->dev)
+ return;
+
+ spin_lock_irq(q->queue_lock);
+ if (!err) {
+ q->rpm_status = RPM_SUSPENDED;
+ } else {
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ }
+ spin_unlock_irq(q->queue_lock);
+
+ if (err)
+ blk_clear_pm_only(q);
+}
+EXPORT_SYMBOL(blk_post_runtime_suspend);
+
+/**
+ * blk_pre_runtime_resume - Pre runtime resume processing
+ * @q: the queue of the device
+ *
+ * Description:
+ * Update the queue's runtime status to RESUMING in preparation for the
+ * runtime resume of the device.
+ *
+ * This function should be called near the start of the device's
+ * runtime_resume callback.
+ */
+void blk_pre_runtime_resume(struct request_queue *q)
+{
+ if (!q->dev)
+ return;
+
+ spin_lock_irq(q->queue_lock);
+ q->rpm_status = RPM_RESUMING;
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_pre_runtime_resume);
+
+/**
+ * blk_post_runtime_resume - Post runtime resume processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_resume function
+ *
+ * Description:
+ * Update the queue's runtime status according to the return value of the
+ * device's runtime_resume function. If it is successfully resumed, process
+ * the requests that are queued into the device's queue when it is resuming
+ * and then mark last busy and initiate autosuspend for it.
+ *
+ * This function should be called near the end of the device's
+ * runtime_resume callback.
+ */
+void blk_post_runtime_resume(struct request_queue *q, int err)
+{
+ if (!q->dev)
+ return;
+
+ spin_lock_irq(q->queue_lock);
+ if (!err) {
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ pm_request_autosuspend(q->dev);
+ } else {
+ q->rpm_status = RPM_SUSPENDED;
+ }
+ spin_unlock_irq(q->queue_lock);
+
+ if (!err)
+ blk_clear_pm_only(q);
+}
+EXPORT_SYMBOL(blk_post_runtime_resume);
+
+/**
+ * blk_set_runtime_active - Force runtime status of the queue to be active
+ * @q: the queue of the device
+ *
+ * If the device is left runtime suspended during system suspend the resume
+ * hook typically resumes the device and corrects runtime status
+ * accordingly. However, that does not affect the queue runtime PM status
+ * which is still "suspended". This prevents processing requests from the
+ * queue.
+ *
+ * This function can be used in driver's resume hook to correct queue
+ * runtime PM status and re-enable peeking requests from the queue. It
+ * should be called before first request is added to the queue.
+ */
+void blk_set_runtime_active(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ pm_request_autosuspend(q->dev);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_set_runtime_active);
diff --git a/block/blk-pm.h b/block/blk-pm.h
new file mode 100644
index 000000000000..a8564ea72a41
--- /dev/null
+++ b/block/blk-pm.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BLOCK_BLK_PM_H_
+#define _BLOCK_BLK_PM_H_
+
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_PM
+static inline void blk_pm_request_resume(struct request_queue *q)
+{
+ if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
+ q->rpm_status == RPM_SUSPENDING))
+ pm_request_resume(q->dev);
+}
+
+static inline void blk_pm_mark_last_busy(struct request *rq)
+{
+ if (rq->q->dev && !(rq->rq_flags & RQF_PM))
+ pm_runtime_mark_last_busy(rq->q->dev);
+}
+
+static inline void blk_pm_requeue_request(struct request *rq)
+{
+ lockdep_assert_held(rq->q->queue_lock);
+
+ if (rq->q->dev && !(rq->rq_flags & RQF_PM))
+ rq->q->nr_pending--;
+}
+
+static inline void blk_pm_add_request(struct request_queue *q,
+ struct request *rq)
+{
+ lockdep_assert_held(q->queue_lock);
+
+ if (q->dev && !(rq->rq_flags & RQF_PM))
+ q->nr_pending++;
+}
+
+static inline void blk_pm_put_request(struct request *rq)
+{
+ lockdep_assert_held(rq->q->queue_lock);
+
+ if (rq->q->dev && !(rq->rq_flags & RQF_PM))
+ --rq->q->nr_pending;
+}
+#else
+static inline void blk_pm_request_resume(struct request_queue *q)
+{
+}
+
+static inline void blk_pm_mark_last_busy(struct request *rq)
+{
+}
+
+static inline void blk_pm_requeue_request(struct request *rq)
+{
+}
+
+static inline void blk_pm_add_request(struct request_queue *q,
+ struct request *rq)
+{
+}
+
+static inline void blk_pm_put_request(struct request *rq)
+{
+}
+#endif
+
+#endif /* _BLOCK_BLK_PM_H_ */
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 15c1f5e12eb8..e47a2f751884 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -97,8 +97,8 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
void __blk_complete_request(struct request *req)
{
- int ccpu, cpu;
struct request_queue *q = req->q;
+ int cpu, ccpu = q->mq_ops ? req->mq_ctx->cpu : req->cpu;
unsigned long flags;
bool shared = false;
@@ -110,8 +110,7 @@ void __blk_complete_request(struct request *req)
/*
* Select completion CPU
*/
- if (req->cpu != -1) {
- ccpu = req->cpu;
+ if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) {
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
shared = cpus_share_cache(cpu, ccpu);
} else
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 7587b1c3caaf..90561af85a62 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -190,6 +190,7 @@ void blk_stat_enable_accounting(struct request_queue *q)
blk_queue_flag_set(QUEUE_FLAG_STATS, q);
spin_unlock(&q->stats->lock);
}
+EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
struct blk_queue_stats *blk_alloc_queue_stats(void)
{
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 01d0620a4e4a..4bda70e8db48 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -84,8 +84,7 @@ struct throtl_service_queue {
* RB tree of active children throtl_grp's, which are sorted by
* their ->disptime.
*/
- struct rb_root pending_tree; /* RB tree of active tgs */
- struct rb_node *first_pending; /* first node in the tree */
+ struct rb_root_cached pending_tree; /* RB tree of active tgs */
unsigned int nr_pending; /* # queued in the tree */
unsigned long first_pending_disptime; /* disptime of the first tg */
struct timer_list pending_timer; /* fires on first_pending_disptime */
@@ -475,7 +474,7 @@ static void throtl_service_queue_init(struct throtl_service_queue *sq)
{
INIT_LIST_HEAD(&sq->queued[0]);
INIT_LIST_HEAD(&sq->queued[1]);
- sq->pending_tree = RB_ROOT;
+ sq->pending_tree = RB_ROOT_CACHED;
timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
}
@@ -616,31 +615,23 @@ static void throtl_pd_free(struct blkg_policy_data *pd)
static struct throtl_grp *
throtl_rb_first(struct throtl_service_queue *parent_sq)
{
+ struct rb_node *n;
/* Service tree is empty */
if (!parent_sq->nr_pending)
return NULL;
- if (!parent_sq->first_pending)
- parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
-
- if (parent_sq->first_pending)
- return rb_entry_tg(parent_sq->first_pending);
-
- return NULL;
-}
-
-static void rb_erase_init(struct rb_node *n, struct rb_root *root)
-{
- rb_erase(n, root);
- RB_CLEAR_NODE(n);
+ n = rb_first_cached(&parent_sq->pending_tree);
+ WARN_ON_ONCE(!n);
+ if (!n)
+ return NULL;
+ return rb_entry_tg(n);
}
static void throtl_rb_erase(struct rb_node *n,
struct throtl_service_queue *parent_sq)
{
- if (parent_sq->first_pending == n)
- parent_sq->first_pending = NULL;
- rb_erase_init(n, &parent_sq->pending_tree);
+ rb_erase_cached(n, &parent_sq->pending_tree);
+ RB_CLEAR_NODE(n);
--parent_sq->nr_pending;
}
@@ -658,11 +649,11 @@ static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
static void tg_service_queue_add(struct throtl_grp *tg)
{
struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
- struct rb_node **node = &parent_sq->pending_tree.rb_node;
+ struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
struct rb_node *parent = NULL;
struct throtl_grp *__tg;
unsigned long key = tg->disptime;
- int left = 1;
+ bool leftmost = true;
while (*node != NULL) {
parent = *node;
@@ -672,15 +663,13 @@ static void tg_service_queue_add(struct throtl_grp *tg)
node = &parent->rb_left;
else {
node = &parent->rb_right;
- left = 0;
+ leftmost = false;
}
}
- if (left)
- parent_sq->first_pending = &tg->rb_node;
-
rb_link_node(&tg->rb_node, parent, node);
- rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
+ rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
+ leftmost);
}
static void __throtl_enqueue_tg(struct throtl_grp *tg)
@@ -2126,21 +2115,11 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
}
#endif
-static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
-{
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- /* fallback to root_blkg if we fail to get a blkg ref */
- if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
- bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
-#endif
-}
-
bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio)
{
struct throtl_qnode *qn = NULL;
- struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
+ struct throtl_grp *tg = blkg_to_tg(blkg);
struct throtl_service_queue *sq;
bool rw = bio_data_dir(bio);
bool throttled = false;
@@ -2159,7 +2138,6 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
if (unlikely(blk_queue_bypass(q)))
goto out_unlock;
- blk_throtl_assoc_bio(tg, bio);
blk_throtl_update_idletime(tg);
sq = &tg->service_queue;
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 8e20a0677dcf..8ac93fcbaa2e 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -310,6 +310,7 @@ static void scale_up(struct rq_wb *rwb)
rq_depth_scale_up(&rwb->rq_depth);
calc_wb_limits(rwb);
rwb->unknown_cnt = 0;
+ rwb_wake_all(rwb);
rwb_trace_step(rwb, "scale up");
}
@@ -318,7 +319,6 @@ static void scale_down(struct rq_wb *rwb, bool hard_throttle)
rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
calc_wb_limits(rwb);
rwb->unknown_cnt = 0;
- rwb_wake_all(rwb);
rwb_trace_step(rwb, "scale down");
}
diff --git a/block/blk.h b/block/blk.h
index 9db4e389582c..3d2aecba96a4 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -4,6 +4,7 @@
#include <linux/idr.h>
#include <linux/blk-mq.h>
+#include <xen/xen.h>
#include "blk-mq.h"
/* Amount of time in which a process may batch requests */
@@ -124,7 +125,7 @@ static inline void __blk_get_queue(struct request_queue *q)
}
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
- int node, int cmd_size);
+ int node, int cmd_size, gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
int blk_init_rl(struct request_list *rl, struct request_queue *q,
@@ -149,6 +150,41 @@ static inline void blk_queue_enter_live(struct request_queue *q)
percpu_ref_get(&q->q_usage_counter);
}
+static inline bool biovec_phys_mergeable(struct request_queue *q,
+ struct bio_vec *vec1, struct bio_vec *vec2)
+{
+ unsigned long mask = queue_segment_boundary(q);
+ phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
+ phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
+
+ if (addr1 + vec1->bv_len != addr2)
+ return false;
+ if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2))
+ return false;
+ if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
+ return false;
+ return true;
+}
+
+static inline bool __bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ return offset ||
+ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
+/*
+ * Check if adding a bio_vec after bprv with offset would create a gap in
+ * the SG list. Most drivers don't care about this, but some do.
+ */
+static inline bool bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ if (!queue_virt_boundary(q))
+ return false;
+ return __bvec_gap_to_prev(q, bprv, offset);
+}
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_flush_integrity(void);
bool __bio_integrity_endio(struct bio *);
@@ -158,7 +194,38 @@ static inline bool bio_integrity_endio(struct bio *bio)
return __bio_integrity_endio(bio);
return true;
}
-#else
+
+static inline bool integrity_req_gap_back_merge(struct request *req,
+ struct bio *next)
+{
+ struct bio_integrity_payload *bip = bio_integrity(req->bio);
+ struct bio_integrity_payload *bip_next = bio_integrity(next);
+
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ bip_next->bip_vec[0].bv_offset);
+}
+
+static inline bool integrity_req_gap_front_merge(struct request *req,
+ struct bio *bio)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
+
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ bip_next->bip_vec[0].bv_offset);
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static inline bool integrity_req_gap_back_merge(struct request *req,
+ struct bio *next)
+{
+ return false;
+}
+static inline bool integrity_req_gap_front_merge(struct request *req,
+ struct bio *bio)
+{
+ return false;
+}
+
static inline void blk_flush_integrity(void)
{
}
@@ -166,7 +233,7 @@ static inline bool bio_integrity_endio(struct bio *bio)
{
return true;
}
-#endif
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
void blk_timeout_work(struct work_struct *work);
unsigned long blk_rq_timeout(unsigned long timeout);
diff --git a/block/bounce.c b/block/bounce.c
index bc63b3a2d18c..ec0d99995f5f 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -31,6 +31,24 @@
static struct bio_set bounce_bio_set, bounce_bio_split;
static mempool_t page_pool, isa_page_pool;
+static void init_bounce_bioset(void)
+{
+ static bool bounce_bs_setup;
+ int ret;
+
+ if (bounce_bs_setup)
+ return;
+
+ ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ BUG_ON(ret);
+ if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
+ BUG_ON(1);
+
+ ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
+ BUG_ON(ret);
+ bounce_bs_setup = true;
+}
+
#if defined(CONFIG_HIGHMEM)
static __init int init_emergency_pool(void)
{
@@ -44,14 +62,7 @@ static __init int init_emergency_pool(void)
BUG_ON(ret);
pr_info("pool size: %d pages\n", POOL_SIZE);
- ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
- BUG_ON(ret);
- if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
- BUG_ON(1);
-
- ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
- BUG_ON(ret);
-
+ init_bounce_bioset();
return 0;
}
@@ -86,6 +97,8 @@ static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
}
+static DEFINE_MUTEX(isa_mutex);
+
/*
* gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
* as the max address, so check if the pool has already been created.
@@ -94,14 +107,20 @@ int init_emergency_isa_pool(void)
{
int ret;
- if (mempool_initialized(&isa_page_pool))
+ mutex_lock(&isa_mutex);
+
+ if (mempool_initialized(&isa_page_pool)) {
+ mutex_unlock(&isa_mutex);
return 0;
+ }
ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa,
mempool_free_pages, (void *) 0);
BUG_ON(ret);
pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
+ init_bounce_bioset();
+ mutex_unlock(&isa_mutex);
return 0;
}
@@ -257,7 +276,9 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
}
}
- bio_clone_blkcg_association(bio, bio_src);
+ bio_clone_blkg_association(bio, bio_src);
+
+ blkcg_bio_issue_init(bio);
return bio;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 2eb87444b157..6a3d87dd3c1a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1644,14 +1644,20 @@ static void cfq_pd_offline(struct blkg_policy_data *pd)
int i;
for (i = 0; i < IOPRIO_BE_NR; i++) {
- if (cfqg->async_cfqq[0][i])
+ if (cfqg->async_cfqq[0][i]) {
cfq_put_queue(cfqg->async_cfqq[0][i]);
- if (cfqg->async_cfqq[1][i])
+ cfqg->async_cfqq[0][i] = NULL;
+ }
+ if (cfqg->async_cfqq[1][i]) {
cfq_put_queue(cfqg->async_cfqq[1][i]);
+ cfqg->async_cfqq[1][i] = NULL;
+ }
}
- if (cfqg->async_idle_cfqq)
+ if (cfqg->async_idle_cfqq) {
cfq_put_queue(cfqg->async_idle_cfqq);
+ cfqg->async_idle_cfqq = NULL;
+ }
/*
* @blkg is going offline and will be ignored by
@@ -3753,7 +3759,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
uint64_t serial_nr;
rcu_read_lock();
- serial_nr = bio_blkcg(bio)->css.serial_nr;
+ serial_nr = __bio_blkcg(bio)->css.serial_nr;
rcu_read_unlock();
/*
@@ -3818,7 +3824,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
struct cfq_group *cfqg;
rcu_read_lock();
- cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
+ cfqg = cfq_lookup_cfqg(cfqd, __bio_blkcg(bio));
if (!cfqg) {
cfqq = &cfqd->oom_cfqq;
goto out;
diff --git a/block/elevator.c b/block/elevator.c
index 6a06b5d040e5..8fdcd64ae12e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -41,6 +41,7 @@
#include "blk.h"
#include "blk-mq-sched.h"
+#include "blk-pm.h"
#include "blk-wbt.h"
static DEFINE_SPINLOCK(elv_list_lock);
@@ -557,27 +558,6 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
}
-#ifdef CONFIG_PM
-static void blk_pm_requeue_request(struct request *rq)
-{
- if (rq->q->dev && !(rq->rq_flags & RQF_PM))
- rq->q->nr_pending--;
-}
-
-static void blk_pm_add_request(struct request_queue *q, struct request *rq)
-{
- if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
- (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
- pm_request_resume(q->dev);
-}
-#else
-static inline void blk_pm_requeue_request(struct request *rq) {}
-static inline void blk_pm_add_request(struct request_queue *q,
- struct request *rq)
-{
-}
-#endif
-
void elv_requeue_request(struct request_queue *q, struct request *rq)
{
/*
@@ -609,7 +589,7 @@ void elv_drain_elevator(struct request_queue *q)
while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
;
- if (q->nr_sorted && printed++ < 10) {
+ if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
printk(KERN_ERR "%s: forced dispatching is broken "
"(nr_sorted=%u), please report this\n",
q->elevator->type->elevator_name, q->nr_sorted);
diff --git a/block/genhd.c b/block/genhd.c
index 8cc719a37b32..cff6bdf27226 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -567,7 +567,8 @@ static int exact_lock(dev_t devt, void *data)
return 0;
}
-static void register_disk(struct device *parent, struct gendisk *disk)
+static void register_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups)
{
struct device *ddev = disk_to_dev(disk);
struct block_device *bdev;
@@ -582,6 +583,10 @@ static void register_disk(struct device *parent, struct gendisk *disk)
/* delay uevents, until we scanned partition table */
dev_set_uevent_suppress(ddev, 1);
+ if (groups) {
+ WARN_ON(ddev->groups);
+ ddev->groups = groups;
+ }
if (device_add(ddev))
return;
if (!sysfs_deprecated) {
@@ -647,6 +652,7 @@ exit:
* __device_add_disk - add disk information to kernel list
* @parent: parent device for the disk
* @disk: per-device partitioning information
+ * @groups: Additional per-device sysfs groups
* @register_queue: register the queue if set to true
*
* This function registers the partitioning information in @disk
@@ -655,6 +661,7 @@ exit:
* FIXME: error handling
*/
static void __device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
bool register_queue)
{
dev_t devt;
@@ -698,7 +705,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk);
}
- register_disk(parent, disk);
+ register_disk(parent, disk, groups);
if (register_queue)
blk_register_queue(disk);
@@ -712,15 +719,17 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
blk_integrity_add(disk);
}
-void device_add_disk(struct device *parent, struct gendisk *disk)
+void device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups)
+
{
- __device_add_disk(parent, disk, true);
+ __device_add_disk(parent, disk, groups, true);
}
EXPORT_SYMBOL(device_add_disk);
void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
{
- __device_add_disk(parent, disk, false);
+ __device_add_disk(parent, disk, NULL, false);
}
EXPORT_SYMBOL(device_add_disk_no_queue_reg);
@@ -1343,18 +1352,18 @@ static int diskstats_show(struct seq_file *seqf, void *v)
part_stat_read(hd, ios[STAT_READ]),
part_stat_read(hd, merges[STAT_READ]),
part_stat_read(hd, sectors[STAT_READ]),
- jiffies_to_msecs(part_stat_read(hd, ticks[STAT_READ])),
+ (unsigned int)part_stat_read_msecs(hd, STAT_READ),
part_stat_read(hd, ios[STAT_WRITE]),
part_stat_read(hd, merges[STAT_WRITE]),
part_stat_read(hd, sectors[STAT_WRITE]),
- jiffies_to_msecs(part_stat_read(hd, ticks[STAT_WRITE])),
+ (unsigned int)part_stat_read_msecs(hd, STAT_WRITE),
inflight[0],
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
jiffies_to_msecs(part_stat_read(hd, time_in_queue)),
part_stat_read(hd, ios[STAT_DISCARD]),
part_stat_read(hd, merges[STAT_DISCARD]),
part_stat_read(hd, sectors[STAT_DISCARD]),
- jiffies_to_msecs(part_stat_read(hd, ticks[STAT_DISCARD]))
+ (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD)
);
}
disk_part_iter_exit(&piter);
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index a1660bafc912..eccac01a10b6 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -29,19 +29,30 @@
#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
-#include "blk-stat.h"
-/* Scheduling domains. */
+#define CREATE_TRACE_POINTS
+#include <trace/events/kyber.h>
+
+/*
+ * Scheduling domains: the device is divided into multiple domains based on the
+ * request type.
+ */
enum {
KYBER_READ,
- KYBER_SYNC_WRITE,
- KYBER_OTHER, /* Async writes, discard, etc. */
+ KYBER_WRITE,
+ KYBER_DISCARD,
+ KYBER_OTHER,
KYBER_NUM_DOMAINS,
};
-enum {
- KYBER_MIN_DEPTH = 256,
+static const char *kyber_domain_names[] = {
+ [KYBER_READ] = "READ",
+ [KYBER_WRITE] = "WRITE",
+ [KYBER_DISCARD] = "DISCARD",
+ [KYBER_OTHER] = "OTHER",
+};
+enum {
/*
* In order to prevent starvation of synchronous requests by a flood of
* asynchronous requests, we reserve 25% of requests for synchronous
@@ -51,25 +62,87 @@ enum {
};
/*
- * Initial device-wide depths for each scheduling domain.
+ * Maximum device-wide depth for each scheduling domain.
*
- * Even for fast devices with lots of tags like NVMe, you can saturate
- * the device with only a fraction of the maximum possible queue depth.
- * So, we cap these to a reasonable value.
+ * Even for fast devices with lots of tags like NVMe, you can saturate the
+ * device with only a fraction of the maximum possible queue depth. So, we cap
+ * these to a reasonable value.
*/
static const unsigned int kyber_depth[] = {
[KYBER_READ] = 256,
- [KYBER_SYNC_WRITE] = 128,
- [KYBER_OTHER] = 64,
+ [KYBER_WRITE] = 128,
+ [KYBER_DISCARD] = 64,
+ [KYBER_OTHER] = 16,
};
/*
- * Scheduling domain batch sizes. We favor reads.
+ * Default latency targets for each scheduling domain.
+ */
+static const u64 kyber_latency_targets[] = {
+ [KYBER_READ] = 2ULL * NSEC_PER_MSEC,
+ [KYBER_WRITE] = 10ULL * NSEC_PER_MSEC,
+ [KYBER_DISCARD] = 5ULL * NSEC_PER_SEC,
+};
+
+/*
+ * Batch size (number of requests we'll dispatch in a row) for each scheduling
+ * domain.
*/
static const unsigned int kyber_batch_size[] = {
[KYBER_READ] = 16,
- [KYBER_SYNC_WRITE] = 8,
- [KYBER_OTHER] = 8,
+ [KYBER_WRITE] = 8,
+ [KYBER_DISCARD] = 1,
+ [KYBER_OTHER] = 1,
+};
+
+/*
+ * Requests latencies are recorded in a histogram with buckets defined relative
+ * to the target latency:
+ *
+ * <= 1/4 * target latency
+ * <= 1/2 * target latency
+ * <= 3/4 * target latency
+ * <= target latency
+ * <= 1 1/4 * target latency
+ * <= 1 1/2 * target latency
+ * <= 1 3/4 * target latency
+ * > 1 3/4 * target latency
+ */
+enum {
+ /*
+ * The width of the latency histogram buckets is
+ * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
+ */
+ KYBER_LATENCY_SHIFT = 2,
+ /*
+ * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
+ * thus, "good".
+ */
+ KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT,
+ /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
+ KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT,
+};
+
+/*
+ * We measure both the total latency and the I/O latency (i.e., latency after
+ * submitting to the device).
+ */
+enum {
+ KYBER_TOTAL_LATENCY,
+ KYBER_IO_LATENCY,
+};
+
+static const char *kyber_latency_type_names[] = {
+ [KYBER_TOTAL_LATENCY] = "total",
+ [KYBER_IO_LATENCY] = "I/O",
+};
+
+/*
+ * Per-cpu latency histograms: total latency and I/O latency for each scheduling
+ * domain except for KYBER_OTHER.
+ */
+struct kyber_cpu_latency {
+ atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
};
/*
@@ -88,12 +161,9 @@ struct kyber_ctx_queue {
struct kyber_queue_data {
struct request_queue *q;
- struct blk_stat_callback *cb;
-
/*
- * The device is divided into multiple scheduling domains based on the
- * request type. Each domain has a fixed number of in-flight requests of
- * that type device-wide, limited by these tokens.
+ * Each scheduling domain has a limited number of in-flight requests
+ * device-wide, limited by these tokens.
*/
struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
@@ -103,8 +173,19 @@ struct kyber_queue_data {
*/
unsigned int async_depth;
+ struct kyber_cpu_latency __percpu *cpu_latency;
+
+ /* Timer for stats aggregation and adjusting domain tokens. */
+ struct timer_list timer;
+
+ unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
+
+ unsigned long latency_timeout[KYBER_OTHER];
+
+ int domain_p99[KYBER_OTHER];
+
/* Target latencies in nanoseconds. */
- u64 read_lat_nsec, write_lat_nsec;
+ u64 latency_targets[KYBER_OTHER];
};
struct kyber_hctx_data {
@@ -124,233 +205,219 @@ static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
static unsigned int kyber_sched_domain(unsigned int op)
{
- if ((op & REQ_OP_MASK) == REQ_OP_READ)
+ switch (op & REQ_OP_MASK) {
+ case REQ_OP_READ:
return KYBER_READ;
- else if ((op & REQ_OP_MASK) == REQ_OP_WRITE && op_is_sync(op))
- return KYBER_SYNC_WRITE;
- else
+ case REQ_OP_WRITE:
+ return KYBER_WRITE;
+ case REQ_OP_DISCARD:
+ return KYBER_DISCARD;
+ default:
return KYBER_OTHER;
+ }
}
-enum {
- NONE = 0,
- GOOD = 1,
- GREAT = 2,
- BAD = -1,
- AWFUL = -2,
-};
-
-#define IS_GOOD(status) ((status) > 0)
-#define IS_BAD(status) ((status) < 0)
-
-static int kyber_lat_status(struct blk_stat_callback *cb,
- unsigned int sched_domain, u64 target)
+static void flush_latency_buckets(struct kyber_queue_data *kqd,
+ struct kyber_cpu_latency *cpu_latency,
+ unsigned int sched_domain, unsigned int type)
{
- u64 latency;
-
- if (!cb->stat[sched_domain].nr_samples)
- return NONE;
+ unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
+ atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
+ unsigned int bucket;
- latency = cb->stat[sched_domain].mean;
- if (latency >= 2 * target)
- return AWFUL;
- else if (latency > target)
- return BAD;
- else if (latency <= target / 2)
- return GREAT;
- else /* (latency <= target) */
- return GOOD;
+ for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
+ buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
}
/*
- * Adjust the read or synchronous write depth given the status of reads and
- * writes. The goal is that the latencies of the two domains are fair (i.e., if
- * one is good, then the other is good).
+ * Calculate the histogram bucket with the given percentile rank, or -1 if there
+ * aren't enough samples yet.
*/
-static void kyber_adjust_rw_depth(struct kyber_queue_data *kqd,
- unsigned int sched_domain, int this_status,
- int other_status)
+static int calculate_percentile(struct kyber_queue_data *kqd,
+ unsigned int sched_domain, unsigned int type,
+ unsigned int percentile)
{
- unsigned int orig_depth, depth;
+ unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
+ unsigned int bucket, samples = 0, percentile_samples;
+
+ for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
+ samples += buckets[bucket];
+
+ if (!samples)
+ return -1;
/*
- * If this domain had no samples, or reads and writes are both good or
- * both bad, don't adjust the depth.
+ * We do the calculation once we have 500 samples or one second passes
+ * since the first sample was recorded, whichever comes first.
*/
- if (this_status == NONE ||
- (IS_GOOD(this_status) && IS_GOOD(other_status)) ||
- (IS_BAD(this_status) && IS_BAD(other_status)))
- return;
-
- orig_depth = depth = kqd->domain_tokens[sched_domain].sb.depth;
+ if (!kqd->latency_timeout[sched_domain])
+ kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
+ if (samples < 500 &&
+ time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
+ return -1;
+ }
+ kqd->latency_timeout[sched_domain] = 0;
- if (other_status == NONE) {
- depth++;
- } else {
- switch (this_status) {
- case GOOD:
- if (other_status == AWFUL)
- depth -= max(depth / 4, 1U);
- else
- depth -= max(depth / 8, 1U);
- break;
- case GREAT:
- if (other_status == AWFUL)
- depth /= 2;
- else
- depth -= max(depth / 4, 1U);
+ percentile_samples = DIV_ROUND_UP(samples * percentile, 100);
+ for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
+ if (buckets[bucket] >= percentile_samples)
break;
- case BAD:
- depth++;
- break;
- case AWFUL:
- if (other_status == GREAT)
- depth += 2;
- else
- depth++;
- break;
- }
+ percentile_samples -= buckets[bucket];
}
+ memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
- depth = clamp(depth, 1U, kyber_depth[sched_domain]);
- if (depth != orig_depth)
- sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
+ trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
+ kyber_latency_type_names[type], percentile,
+ bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
+
+ return bucket;
}
-/*
- * Adjust the depth of other requests given the status of reads and synchronous
- * writes. As long as either domain is doing fine, we don't throttle, but if
- * both domains are doing badly, we throttle heavily.
- */
-static void kyber_adjust_other_depth(struct kyber_queue_data *kqd,
- int read_status, int write_status,
- bool have_samples)
-{
- unsigned int orig_depth, depth;
- int status;
-
- orig_depth = depth = kqd->domain_tokens[KYBER_OTHER].sb.depth;
-
- if (read_status == NONE && write_status == NONE) {
- depth += 2;
- } else if (have_samples) {
- if (read_status == NONE)
- status = write_status;
- else if (write_status == NONE)
- status = read_status;
- else
- status = max(read_status, write_status);
- switch (status) {
- case GREAT:
- depth += 2;
- break;
- case GOOD:
- depth++;
- break;
- case BAD:
- depth -= max(depth / 4, 1U);
- break;
- case AWFUL:
- depth /= 2;
- break;
- }
+static void kyber_resize_domain(struct kyber_queue_data *kqd,
+ unsigned int sched_domain, unsigned int depth)
+{
+ depth = clamp(depth, 1U, kyber_depth[sched_domain]);
+ if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
+ sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
+ trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
+ depth);
}
-
- depth = clamp(depth, 1U, kyber_depth[KYBER_OTHER]);
- if (depth != orig_depth)
- sbitmap_queue_resize(&kqd->domain_tokens[KYBER_OTHER], depth);
}
-/*
- * Apply heuristics for limiting queue depths based on gathered latency
- * statistics.
- */
-static void kyber_stat_timer_fn(struct blk_stat_callback *cb)
+static void kyber_timer_fn(struct timer_list *t)
{
- struct kyber_queue_data *kqd = cb->data;
- int read_status, write_status;
+ struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
+ unsigned int sched_domain;
+ int cpu;
+ bool bad = false;
+
+ /* Sum all of the per-cpu latency histograms. */
+ for_each_online_cpu(cpu) {
+ struct kyber_cpu_latency *cpu_latency;
+
+ cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
+ for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
+ flush_latency_buckets(kqd, cpu_latency, sched_domain,
+ KYBER_TOTAL_LATENCY);
+ flush_latency_buckets(kqd, cpu_latency, sched_domain,
+ KYBER_IO_LATENCY);
+ }
+ }
- read_status = kyber_lat_status(cb, KYBER_READ, kqd->read_lat_nsec);
- write_status = kyber_lat_status(cb, KYBER_SYNC_WRITE, kqd->write_lat_nsec);
+ /*
+ * Check if any domains have a high I/O latency, which might indicate
+ * congestion in the device. Note that we use the p90; we don't want to
+ * be too sensitive to outliers here.
+ */
+ for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
+ int p90;
- kyber_adjust_rw_depth(kqd, KYBER_READ, read_status, write_status);
- kyber_adjust_rw_depth(kqd, KYBER_SYNC_WRITE, write_status, read_status);
- kyber_adjust_other_depth(kqd, read_status, write_status,
- cb->stat[KYBER_OTHER].nr_samples != 0);
+ p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
+ 90);
+ if (p90 >= KYBER_GOOD_BUCKETS)
+ bad = true;
+ }
/*
- * Continue monitoring latencies if we aren't hitting the targets or
- * we're still throttling other requests.
+ * Adjust the scheduling domain depths. If we determined that there was
+ * congestion, we throttle all domains with good latencies. Either way,
+ * we ease up on throttling domains with bad latencies.
*/
- if (!blk_stat_is_active(kqd->cb) &&
- ((IS_BAD(read_status) || IS_BAD(write_status) ||
- kqd->domain_tokens[KYBER_OTHER].sb.depth < kyber_depth[KYBER_OTHER])))
- blk_stat_activate_msecs(kqd->cb, 100);
+ for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
+ unsigned int orig_depth, depth;
+ int p99;
+
+ p99 = calculate_percentile(kqd, sched_domain,
+ KYBER_TOTAL_LATENCY, 99);
+ /*
+ * This is kind of subtle: different domains will not
+ * necessarily have enough samples to calculate the latency
+ * percentiles during the same window, so we have to remember
+ * the p99 for the next time we observe congestion; once we do,
+ * we don't want to throttle again until we get more data, so we
+ * reset it to -1.
+ */
+ if (bad) {
+ if (p99 < 0)
+ p99 = kqd->domain_p99[sched_domain];
+ kqd->domain_p99[sched_domain] = -1;
+ } else if (p99 >= 0) {
+ kqd->domain_p99[sched_domain] = p99;
+ }
+ if (p99 < 0)
+ continue;
+
+ /*
+ * If this domain has bad latency, throttle less. Otherwise,
+ * throttle more iff we determined that there is congestion.
+ *
+ * The new depth is scaled linearly with the p99 latency vs the
+ * latency target. E.g., if the p99 is 3/4 of the target, then
+ * we throttle down to 3/4 of the current depth, and if the p99
+ * is 2x the target, then we double the depth.
+ */
+ if (bad || p99 >= KYBER_GOOD_BUCKETS) {
+ orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
+ depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT;
+ kyber_resize_domain(kqd, sched_domain, depth);
+ }
+ }
}
-static unsigned int kyber_sched_tags_shift(struct kyber_queue_data *kqd)
+static unsigned int kyber_sched_tags_shift(struct request_queue *q)
{
/*
* All of the hardware queues have the same depth, so we can just grab
* the shift of the first one.
*/
- return kqd->q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift;
-}
-
-static int kyber_bucket_fn(const struct request *rq)
-{
- return kyber_sched_domain(rq->cmd_flags);
+ return q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift;
}
static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
{
struct kyber_queue_data *kqd;
- unsigned int max_tokens;
unsigned int shift;
int ret = -ENOMEM;
int i;
- kqd = kmalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
+ kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
if (!kqd)
goto err;
+
kqd->q = q;
- kqd->cb = blk_stat_alloc_callback(kyber_stat_timer_fn, kyber_bucket_fn,
- KYBER_NUM_DOMAINS, kqd);
- if (!kqd->cb)
+ kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!kqd->cpu_latency)
goto err_kqd;
- /*
- * The maximum number of tokens for any scheduling domain is at least
- * the queue depth of a single hardware queue. If the hardware doesn't
- * have many tags, still provide a reasonable number.
- */
- max_tokens = max_t(unsigned int, q->tag_set->queue_depth,
- KYBER_MIN_DEPTH);
+ timer_setup(&kqd->timer, kyber_timer_fn, 0);
+
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
WARN_ON(!kyber_depth[i]);
WARN_ON(!kyber_batch_size[i]);
ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
- max_tokens, -1, false, GFP_KERNEL,
- q->node);
+ kyber_depth[i], -1, false,
+ GFP_KERNEL, q->node);
if (ret) {
while (--i >= 0)
sbitmap_queue_free(&kqd->domain_tokens[i]);
- goto err_cb;
+ goto err_buckets;
}
- sbitmap_queue_resize(&kqd->domain_tokens[i], kyber_depth[i]);
}
- shift = kyber_sched_tags_shift(kqd);
- kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
+ for (i = 0; i < KYBER_OTHER; i++) {
+ kqd->domain_p99[i] = -1;
+ kqd->latency_targets[i] = kyber_latency_targets[i];
+ }
- kqd->read_lat_nsec = 2000000ULL;
- kqd->write_lat_nsec = 10000000ULL;
+ shift = kyber_sched_tags_shift(q);
+ kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
return kqd;
-err_cb:
- blk_stat_free_callback(kqd->cb);
+err_buckets:
+ free_percpu(kqd->cpu_latency);
err_kqd:
kfree(kqd);
err:
@@ -372,25 +439,24 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
return PTR_ERR(kqd);
}
+ blk_stat_enable_accounting(q);
+
eq->elevator_data = kqd;
q->elevator = eq;
- blk_stat_add_callback(q, kqd->cb);
-
return 0;
}
static void kyber_exit_sched(struct elevator_queue *e)
{
struct kyber_queue_data *kqd = e->elevator_data;
- struct request_queue *q = kqd->q;
int i;
- blk_stat_remove_callback(q, kqd->cb);
+ del_timer_sync(&kqd->timer);
for (i = 0; i < KYBER_NUM_DOMAINS; i++)
sbitmap_queue_free(&kqd->domain_tokens[i]);
- blk_stat_free_callback(kqd->cb);
+ free_percpu(kqd->cpu_latency);
kfree(kqd);
}
@@ -558,41 +624,44 @@ static void kyber_finish_request(struct request *rq)
rq_clear_domain_token(kqd, rq);
}
-static void kyber_completed_request(struct request *rq)
+static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
+ unsigned int sched_domain, unsigned int type,
+ u64 target, u64 latency)
{
- struct request_queue *q = rq->q;
- struct kyber_queue_data *kqd = q->elevator->elevator_data;
- unsigned int sched_domain;
- u64 now, latency, target;
+ unsigned int bucket;
+ u64 divisor;
- /*
- * Check if this request met our latency goal. If not, quickly gather
- * some statistics and start throttling.
- */
- sched_domain = kyber_sched_domain(rq->cmd_flags);
- switch (sched_domain) {
- case KYBER_READ:
- target = kqd->read_lat_nsec;
- break;
- case KYBER_SYNC_WRITE:
- target = kqd->write_lat_nsec;
- break;
- default:
- return;
+ if (latency > 0) {
+ divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1);
+ bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
+ KYBER_LATENCY_BUCKETS - 1);
+ } else {
+ bucket = 0;
}
- /* If we are already monitoring latencies, don't check again. */
- if (blk_stat_is_active(kqd->cb))
- return;
+ atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
+}
- now = ktime_get_ns();
- if (now < rq->io_start_time_ns)
+static void kyber_completed_request(struct request *rq, u64 now)
+{
+ struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
+ struct kyber_cpu_latency *cpu_latency;
+ unsigned int sched_domain;
+ u64 target;
+
+ sched_domain = kyber_sched_domain(rq->cmd_flags);
+ if (sched_domain == KYBER_OTHER)
return;
- latency = now - rq->io_start_time_ns;
+ cpu_latency = get_cpu_ptr(kqd->cpu_latency);
+ target = kqd->latency_targets[sched_domain];
+ add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
+ target, now - rq->start_time_ns);
+ add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
+ now - rq->io_start_time_ns);
+ put_cpu_ptr(kqd->cpu_latency);
- if (latency > target)
- blk_stat_activate_msecs(kqd->cb, 10);
+ timer_reduce(&kqd->timer, jiffies + HZ / 10);
}
struct flush_kcq_data {
@@ -713,6 +782,9 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
rq_set_domain_token(rq, nr);
list_del_init(&rq->queuelist);
return rq;
+ } else {
+ trace_kyber_throttled(kqd->q,
+ kyber_domain_names[khd->cur_domain]);
}
} else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
nr = kyber_get_domain_token(kqd, khd, hctx);
@@ -723,6 +795,9 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
rq_set_domain_token(rq, nr);
list_del_init(&rq->queuelist);
return rq;
+ } else {
+ trace_kyber_throttled(kqd->q,
+ kyber_domain_names[khd->cur_domain]);
}
}
@@ -790,17 +865,17 @@ static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
return false;
}
-#define KYBER_LAT_SHOW_STORE(op) \
-static ssize_t kyber_##op##_lat_show(struct elevator_queue *e, \
- char *page) \
+#define KYBER_LAT_SHOW_STORE(domain, name) \
+static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \
+ char *page) \
{ \
struct kyber_queue_data *kqd = e->elevator_data; \
\
- return sprintf(page, "%llu\n", kqd->op##_lat_nsec); \
+ return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
} \
\
-static ssize_t kyber_##op##_lat_store(struct elevator_queue *e, \
- const char *page, size_t count) \
+static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \
+ const char *page, size_t count) \
{ \
struct kyber_queue_data *kqd = e->elevator_data; \
unsigned long long nsec; \
@@ -810,12 +885,12 @@ static ssize_t kyber_##op##_lat_store(struct elevator_queue *e, \
if (ret) \
return ret; \
\
- kqd->op##_lat_nsec = nsec; \
+ kqd->latency_targets[domain] = nsec; \
\
return count; \
}
-KYBER_LAT_SHOW_STORE(read);
-KYBER_LAT_SHOW_STORE(write);
+KYBER_LAT_SHOW_STORE(KYBER_READ, read);
+KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
#undef KYBER_LAT_SHOW_STORE
#define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
@@ -882,7 +957,8 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
return 0; \
}
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
-KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_SYNC_WRITE, sync_write)
+KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
+KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
#undef KYBER_DEBUGFS_DOMAIN_ATTRS
@@ -900,20 +976,7 @@ static int kyber_cur_domain_show(void *data, struct seq_file *m)
struct blk_mq_hw_ctx *hctx = data;
struct kyber_hctx_data *khd = hctx->sched_data;
- switch (khd->cur_domain) {
- case KYBER_READ:
- seq_puts(m, "READ\n");
- break;
- case KYBER_SYNC_WRITE:
- seq_puts(m, "SYNC_WRITE\n");
- break;
- case KYBER_OTHER:
- seq_puts(m, "OTHER\n");
- break;
- default:
- seq_printf(m, "%u\n", khd->cur_domain);
- break;
- }
+ seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
return 0;
}
@@ -930,7 +993,8 @@ static int kyber_batching_show(void *data, struct seq_file *m)
{#name "_tokens", 0400, kyber_##name##_tokens_show}
static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
KYBER_QUEUE_DOMAIN_ATTRS(read),
- KYBER_QUEUE_DOMAIN_ATTRS(sync_write),
+ KYBER_QUEUE_DOMAIN_ATTRS(write),
+ KYBER_QUEUE_DOMAIN_ATTRS(discard),
KYBER_QUEUE_DOMAIN_ATTRS(other),
{"async_depth", 0400, kyber_async_depth_show},
{},
@@ -942,7 +1006,8 @@ static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
{#name "_waiting", 0400, kyber_##name##_waiting_show}
static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
KYBER_HCTX_DOMAIN_ATTRS(read),
- KYBER_HCTX_DOMAIN_ATTRS(sync_write),
+ KYBER_HCTX_DOMAIN_ATTRS(write),
+ KYBER_HCTX_DOMAIN_ATTRS(discard),
KYBER_HCTX_DOMAIN_ATTRS(other),
{"cur_domain", 0400, kyber_cur_domain_show},
{"batching", 0400, kyber_batching_show},
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 5a8975a1201c..d3d14e81fb12 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -136,18 +136,18 @@ ssize_t part_stat_show(struct device *dev,
part_stat_read(p, ios[STAT_READ]),
part_stat_read(p, merges[STAT_READ]),
(unsigned long long)part_stat_read(p, sectors[STAT_READ]),
- jiffies_to_msecs(part_stat_read(p, ticks[STAT_READ])),
+ (unsigned int)part_stat_read_msecs(p, STAT_READ),
part_stat_read(p, ios[STAT_WRITE]),
part_stat_read(p, merges[STAT_WRITE]),
(unsigned long long)part_stat_read(p, sectors[STAT_WRITE]),
- jiffies_to_msecs(part_stat_read(p, ticks[STAT_WRITE])),
+ (unsigned int)part_stat_read_msecs(p, STAT_WRITE),
inflight[0],
jiffies_to_msecs(part_stat_read(p, io_ticks)),
jiffies_to_msecs(part_stat_read(p, time_in_queue)),
part_stat_read(p, ios[STAT_DISCARD]),
part_stat_read(p, merges[STAT_DISCARD]),
(unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]),
- jiffies_to_msecs(part_stat_read(p, ticks[STAT_DISCARD])));
+ (unsigned int)part_stat_read_msecs(p, STAT_DISCARD));
}
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
diff --git a/crypto/Kconfig b/crypto/Kconfig
index f3e40ac56d93..f7a235db56aa 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -213,20 +213,6 @@ config CRYPTO_CRYPTD
converts an arbitrary synchronous software crypto algorithm
into an asynchronous algorithm that executes in a kernel thread.
-config CRYPTO_MCRYPTD
- tristate "Software async multi-buffer crypto daemon"
- select CRYPTO_BLKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MANAGER
- select CRYPTO_WORKQUEUE
- help
- This is a generic software asynchronous crypto daemon that
- provides the kernel thread to assist multi-buffer crypto
- algorithms for submitting jobs and flushing jobs in multi-buffer
- crypto algorithms. Multi-buffer crypto algorithms are executed
- in the context of this kernel thread and drivers can post
- their crypto request asynchronously to be processed by this daemon.
-
config CRYPTO_AUTHENC
tristate "Authenc support"
select CRYPTO_AEAD
@@ -470,6 +456,18 @@ config CRYPTO_LRW
The first 128, 192 or 256 bits in the key are used for AES and the
rest is used to tie each cipher block to its logical position.
+config CRYPTO_OFB
+ tristate "OFB support"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_MANAGER
+ help
+ OFB: the Output Feedback mode makes a block cipher into a synchronous
+ stream cipher. It generates keystream blocks, which are then XORed
+ with the plaintext blocks to get the ciphertext. Flipping a bit in the
+ ciphertext produces a flipped bit in the plaintext at the same
+ location. This property allows many error correcting codes to function
+ normally even when applied before encryption.
+
config CRYPTO_PCBC
tristate "PCBC support"
select CRYPTO_BLKCIPHER
@@ -848,54 +846,6 @@ config CRYPTO_SHA1_PPC_SPE
SHA-1 secure hash standard (DFIPS 180-4) implemented
using powerpc SPE SIMD instruction set.
-config CRYPTO_SHA1_MB
- tristate "SHA1 digest algorithm (x86_64 Multi-Buffer, Experimental)"
- depends on X86 && 64BIT
- select CRYPTO_SHA1
- select CRYPTO_HASH
- select CRYPTO_MCRYPTD
- help
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using multi-buffer technique. This algorithm computes on
- multiple data lanes concurrently with SIMD instructions for
- better throughput. It should not be enabled by default but
- used when there is significant amount of work to keep the keep
- the data lanes filled to get performance benefit. If the data
- lanes remain unfilled, a flush operation will be initiated to
- process the crypto jobs, adding a slight latency.
-
-config CRYPTO_SHA256_MB
- tristate "SHA256 digest algorithm (x86_64 Multi-Buffer, Experimental)"
- depends on X86 && 64BIT
- select CRYPTO_SHA256
- select CRYPTO_HASH
- select CRYPTO_MCRYPTD
- help
- SHA-256 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using multi-buffer technique. This algorithm computes on
- multiple data lanes concurrently with SIMD instructions for
- better throughput. It should not be enabled by default but
- used when there is significant amount of work to keep the keep
- the data lanes filled to get performance benefit. If the data
- lanes remain unfilled, a flush operation will be initiated to
- process the crypto jobs, adding a slight latency.
-
-config CRYPTO_SHA512_MB
- tristate "SHA512 digest algorithm (x86_64 Multi-Buffer, Experimental)"
- depends on X86 && 64BIT
- select CRYPTO_SHA512
- select CRYPTO_HASH
- select CRYPTO_MCRYPTD
- help
- SHA-512 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using multi-buffer technique. This algorithm computes on
- multiple data lanes concurrently with SIMD instructions for
- better throughput. It should not be enabled by default but
- used when there is significant amount of work to keep the keep
- the data lanes filled to get performance benefit. If the data
- lanes remain unfilled, a flush operation will be initiated to
- process the crypto jobs, adding a slight latency.
-
config CRYPTO_SHA256
tristate "SHA224 and SHA256 digest algorithm"
select CRYPTO_HASH
@@ -1133,7 +1083,7 @@ config CRYPTO_AES_NI_INTEL
In addition to AES cipher algorithm support, the acceleration
for some popular block cipher mode is supported too, including
- ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional
+ ECB, CBC, LRW, XTS. The 64 bit version has additional
acceleration for CTR.
config CRYPTO_AES_SPARC64
@@ -1590,20 +1540,6 @@ config CRYPTO_SM4
If unsure, say N.
-config CRYPTO_SPECK
- tristate "Speck cipher algorithm"
- select CRYPTO_ALGAPI
- help
- Speck is a lightweight block cipher that is tuned for optimal
- performance in software (rather than hardware).
-
- Speck may not be as secure as AES, and should only be used on systems
- where AES is not fast enough.
-
- See also: <https://eprint.iacr.org/2013/404.pdf>
-
- If unsure, say N.
-
config CRYPTO_TEA
tristate "TEA, XTEA and XETA cipher algorithms"
select CRYPTO_ALGAPI
@@ -1875,6 +1811,17 @@ config CRYPTO_USER_API_AEAD
This option enables the user-spaces interface for AEAD
cipher algorithms.
+config CRYPTO_STATS
+ bool "Crypto usage statistics for User-space"
+ help
+ This option enables the gathering of crypto stats.
+ This will collect:
+ - encrypt/decrypt size and numbers of symmeric operations
+ - compress/decompress size and numbers of compress operations
+ - size and numbers of hash operations
+ - encrypt/decrypt/sign/verify numbers for asymmetric operations
+ - generate/seed numbers for rng operations
+
config CRYPTO_HASH_INFO
bool
diff --git a/crypto/Makefile b/crypto/Makefile
index 6d1d40eeb964..5c207c76abf7 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -54,6 +54,7 @@ cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
+crypto_user-y := crypto_user_base.o crypto_user_stat.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
@@ -93,7 +94,6 @@ obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o
obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o
obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
-obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o
@@ -115,7 +115,6 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
-obj-$(CONFIG_CRYPTO_SPECK) += speck.o
obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
@@ -143,6 +142,7 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
+obj-$(CONFIG_CRYPTO_OFB) += ofb.o
ecdh_generic-y := ecc.o
ecdh_generic-y += ecdh.o
diff --git a/crypto/aegis.h b/crypto/aegis.h
index f1c6900ddb80..405e025fc906 100644
--- a/crypto/aegis.h
+++ b/crypto/aegis.h
@@ -21,7 +21,7 @@
union aegis_block {
__le64 words64[AEGIS_BLOCK_SIZE / sizeof(__le64)];
- u32 words32[AEGIS_BLOCK_SIZE / sizeof(u32)];
+ __le32 words32[AEGIS_BLOCK_SIZE / sizeof(__le32)];
u8 bytes[AEGIS_BLOCK_SIZE];
};
@@ -57,24 +57,22 @@ static void crypto_aegis_aesenc(union aegis_block *dst,
const union aegis_block *src,
const union aegis_block *key)
{
- u32 *d = dst->words32;
const u8 *s = src->bytes;
- const u32 *k = key->words32;
const u32 *t0 = crypto_ft_tab[0];
const u32 *t1 = crypto_ft_tab[1];
const u32 *t2 = crypto_ft_tab[2];
const u32 *t3 = crypto_ft_tab[3];
u32 d0, d1, d2, d3;
- d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]] ^ k[0];
- d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]] ^ k[1];
- d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]] ^ k[2];
- d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]] ^ k[3];
+ d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]];
+ d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]];
+ d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]];
+ d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]];
- d[0] = d0;
- d[1] = d1;
- d[2] = d2;
- d[3] = d3;
+ dst->words32[0] = cpu_to_le32(d0) ^ key->words32[0];
+ dst->words32[1] = cpu_to_le32(d1) ^ key->words32[1];
+ dst->words32[2] = cpu_to_le32(d2) ^ key->words32[2];
+ dst->words32[3] = cpu_to_le32(d3) ^ key->words32[3];
}
#endif /* _CRYPTO_AEGIS_H */
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index b053179e0bc5..17eb09d222ff 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1071,7 +1071,7 @@ __poll_t af_alg_poll(struct file *file, struct socket *sock,
struct af_alg_ctx *ctx = ask->private;
__poll_t mask;
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
mask = 0;
if (!ctx->more || ctx->used)
diff --git a/crypto/ahash.c b/crypto/ahash.c
index a64c143165b1..e21667b4e10a 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -364,24 +364,35 @@ static int crypto_ahash_op(struct ahash_request *req,
int crypto_ahash_final(struct ahash_request *req)
{
- return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
+ int ret;
+
+ ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
+ crypto_stat_ahash_final(req, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
- return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
+ int ret;
+
+ ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
+ crypto_stat_ahash_final(req, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int ret;
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_ahash_op(req, tfm->digest);
+ ret = -ENOKEY;
+ else
+ ret = crypto_ahash_op(req, tfm->digest);
+ crypto_stat_ahash_final(req, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -550,8 +561,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
- if (alg->halg.digestsize > PAGE_SIZE / 8 ||
- alg->halg.statesize > PAGE_SIZE / 8 ||
+ if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
+ alg->halg.statesize > HASH_MAX_STATESIZE ||
alg->halg.statesize == 0)
return -EINVAL;
diff --git a/crypto/algapi.c b/crypto/algapi.c
index c0755cf4f53f..2545c5f89c4c 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -57,9 +57,14 @@ static int crypto_check_alg(struct crypto_alg *alg)
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
return -EINVAL;
- if (alg->cra_blocksize > PAGE_SIZE / 8)
+ /* General maximums for all algs. */
+ if (alg->cra_alignmask > MAX_ALGAPI_ALIGNMASK)
return -EINVAL;
+ if (alg->cra_blocksize > MAX_ALGAPI_BLOCKSIZE)
+ return -EINVAL;
+
+ /* Lower maximums for specific alg types. */
if (!alg->cra_type && (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_CIPHER) {
if (alg->cra_alignmask > MAX_CIPHER_ALIGNMASK)
@@ -253,6 +258,14 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
list_add(&alg->cra_list, &crypto_alg_list);
list_add(&larval->alg.cra_list, &crypto_alg_list);
+ atomic_set(&alg->encrypt_cnt, 0);
+ atomic_set(&alg->decrypt_cnt, 0);
+ atomic64_set(&alg->encrypt_tlen, 0);
+ atomic64_set(&alg->decrypt_tlen, 0);
+ atomic_set(&alg->verify_cnt, 0);
+ atomic_set(&alg->cipher_err_cnt, 0);
+ atomic_set(&alg->sign_cnt, 0);
+
out:
return larval;
@@ -367,6 +380,8 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
err = wait_for_completion_killable(&larval->completion);
WARN_ON(err);
+ if (!err)
+ crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval);
out:
crypto_larval_kill(&larval->alg);
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 5e6df2a087fa..527b44d0af21 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -274,6 +274,8 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
return cryptomgr_schedule_probe(data);
case CRYPTO_MSG_ALG_REGISTER:
return cryptomgr_schedule_test(data);
+ case CRYPTO_MSG_ALG_LOADED:
+ break;
}
return NOTIFY_DONE;
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index c40a8c7ee8ae..eb100a04ce9f 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -42,7 +42,7 @@
struct aead_tfm {
struct crypto_aead *aead;
- struct crypto_skcipher *null_tfm;
+ struct crypto_sync_skcipher *null_tfm;
};
static inline bool aead_sufficient_data(struct sock *sk)
@@ -75,13 +75,13 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
return af_alg_sendmsg(sock, msg, size, ivsize);
}
-static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
+static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
struct scatterlist *src,
struct scatterlist *dst, unsigned int len)
{
- SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
- skcipher_request_set_tfm(skreq, null_tfm);
+ skcipher_request_set_sync_tfm(skreq, null_tfm);
skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
NULL, NULL);
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
@@ -99,7 +99,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
struct af_alg_ctx *ctx = ask->private;
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
- struct crypto_skcipher *null_tfm = aeadc->null_tfm;
+ struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
unsigned int i, as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
struct af_alg_tsgl *tsgl, *tmp;
@@ -478,7 +478,7 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
{
struct aead_tfm *tfm;
struct crypto_aead *aead;
- struct crypto_skcipher *null_tfm;
+ struct crypto_sync_skcipher *null_tfm;
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index bfcf595fd8f9..d0cde541beb6 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -239,7 +239,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
- char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1];
+ char state[HASH_MAX_STATESIZE];
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 4fa8d40d947b..37f54d1b2f66 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -33,7 +33,7 @@ struct authenc_instance_ctx {
struct crypto_authenc_ctx {
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_skcipher *null;
+ struct crypto_sync_skcipher *null;
};
struct authenc_request_ctx {
@@ -185,9 +185,9 @@ static int crypto_authenc_copy_assoc(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
- skcipher_request_set_tfm(skreq, ctx->null);
+ skcipher_request_set_sync_tfm(skreq, ctx->null);
skcipher_request_set_callback(skreq, aead_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
@@ -318,7 +318,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_skcipher *null;
+ struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 50b804747e20..80a25cc04aec 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -36,7 +36,7 @@ struct crypto_authenc_esn_ctx {
unsigned int reqoff;
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_skcipher *null;
+ struct crypto_sync_skcipher *null;
};
struct authenc_esn_request_ctx {
@@ -183,9 +183,9 @@ static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
- SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
- skcipher_request_set_tfm(skreq, ctx->null);
+ skcipher_request_set_sync_tfm(skreq, ctx->null);
skcipher_request_set_callback(skreq, aead_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
@@ -341,7 +341,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_skcipher *null;
+ struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 0a083342ec8c..b242fd0d3262 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -50,7 +50,10 @@ struct crypto_ccm_req_priv_ctx {
u32 flags;
struct scatterlist src[3];
struct scatterlist dst[3];
- struct skcipher_request skreq;
+ union {
+ struct ahash_request ahreq;
+ struct skcipher_request skreq;
+ };
};
struct cbcmac_tfm_ctx {
@@ -181,7 +184,7 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
- AHASH_REQUEST_ON_STACK(ahreq, ctx->mac);
+ struct ahash_request *ahreq = &pctx->ahreq;
unsigned int assoclen = req->assoclen;
struct scatterlist sg[3];
u8 *odata = pctx->odata;
@@ -427,7 +430,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
crypto_aead_set_reqsize(
tfm,
align + sizeof(struct crypto_ccm_req_priv_ctx) +
- crypto_skcipher_reqsize(ctr));
+ max(crypto_ahash_reqsize(mac), crypto_skcipher_reqsize(ctr)));
return 0;
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
index e451c3cb6a56..3ae96587caf9 100644
--- a/crypto/chacha20_generic.c
+++ b/crypto/chacha20_generic.c
@@ -18,20 +18,21 @@
static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes)
{
- u32 stream[CHACHA20_BLOCK_WORDS];
+ /* aligned to potentially speed up crypto_xor() */
+ u8 stream[CHACHA20_BLOCK_SIZE] __aligned(sizeof(long));
if (dst != src)
memcpy(dst, src, bytes);
while (bytes >= CHACHA20_BLOCK_SIZE) {
chacha20_block(state, stream);
- crypto_xor(dst, (const u8 *)stream, CHACHA20_BLOCK_SIZE);
+ crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE);
bytes -= CHACHA20_BLOCK_SIZE;
dst += CHACHA20_BLOCK_SIZE;
}
if (bytes) {
chacha20_block(state, stream);
- crypto_xor(dst, (const u8 *)stream, bytes);
+ crypto_xor(dst, stream, bytes);
}
}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index addca7bae33f..7118fb5efbaa 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -76,7 +76,7 @@ struct cryptd_blkcipher_request_ctx {
struct cryptd_skcipher_ctx {
atomic_t refcnt;
- struct crypto_skcipher *child;
+ struct crypto_sync_skcipher *child;
};
struct cryptd_skcipher_request_ctx {
@@ -449,14 +449,16 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
- struct crypto_skcipher *child = ctx->child;
+ struct crypto_sync_skcipher *child = ctx->child;
int err;
- crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(child,
+ crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(child, key, keylen);
- crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ err = crypto_sync_skcipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent,
+ crypto_sync_skcipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
@@ -483,13 +485,13 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher *child = ctx->child;
- SKCIPHER_REQUEST_ON_STACK(subreq, child);
+ struct crypto_sync_skcipher *child = ctx->child;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
if (unlikely(err == -EINPROGRESS))
goto out;
- skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_sync_tfm(subreq, child);
skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
@@ -511,13 +513,13 @@ static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher *child = ctx->child;
- SKCIPHER_REQUEST_ON_STACK(subreq, child);
+ struct crypto_sync_skcipher *child = ctx->child;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
if (unlikely(err == -EINPROGRESS))
goto out;
- skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_sync_tfm(subreq, child);
skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
@@ -568,7 +570,7 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- ctx->child = cipher;
+ ctx->child = (struct crypto_sync_skcipher *)cipher;
crypto_skcipher_set_reqsize(
tfm, sizeof(struct cryptd_skcipher_request_ctx));
return 0;
@@ -578,7 +580,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_skcipher(ctx->child);
+ crypto_free_sync_skcipher(ctx->child);
}
static void cryptd_skcipher_free(struct skcipher_instance *inst)
@@ -1243,7 +1245,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
- return ctx->child;
+ return &ctx->child->base;
}
EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 0959b268966c..0bae59922a80 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -26,7 +26,7 @@
#include <linux/string.h>
static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
-static struct crypto_skcipher *crypto_default_null_skcipher;
+static struct crypto_sync_skcipher *crypto_default_null_skcipher;
static int crypto_default_null_skcipher_refcnt;
static int null_compress(struct crypto_tfm *tfm, const u8 *src,
@@ -152,16 +152,15 @@ MODULE_ALIAS_CRYPTO("compress_null");
MODULE_ALIAS_CRYPTO("digest_null");
MODULE_ALIAS_CRYPTO("cipher_null");
-struct crypto_skcipher *crypto_get_default_null_skcipher(void)
+struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
{
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
mutex_lock(&crypto_default_null_skcipher_lock);
tfm = crypto_default_null_skcipher;
if (!tfm) {
- tfm = crypto_alloc_skcipher("ecb(cipher_null)",
- 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
if (IS_ERR(tfm))
goto unlock;
@@ -181,7 +180,7 @@ void crypto_put_default_null_skcipher(void)
{
mutex_lock(&crypto_default_null_skcipher_lock);
if (!--crypto_default_null_skcipher_refcnt) {
- crypto_free_skcipher(crypto_default_null_skcipher);
+ crypto_free_sync_skcipher(crypto_default_null_skcipher);
crypto_default_null_skcipher = NULL;
}
mutex_unlock(&crypto_default_null_skcipher_lock);
diff --git a/crypto/crypto_user.c b/crypto/crypto_user_base.c
index 0e89b5457cab..e41f6cc33fff 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user_base.c
@@ -29,6 +29,7 @@
#include <crypto/internal/rng.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
+#include <crypto/internal/cryptouser.h>
#include "internal.h"
@@ -37,7 +38,7 @@
static DEFINE_MUTEX(crypto_cfg_mutex);
/* The crypto netlink socket */
-static struct sock *crypto_nlsk;
+struct sock *crypto_nlsk;
struct crypto_dump_info {
struct sk_buff *in_skb;
@@ -46,7 +47,7 @@ struct crypto_dump_info {
u16 nlmsg_flags;
};
-static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
+struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
{
struct crypto_alg *q, *alg = NULL;
@@ -461,6 +462,7 @@ static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
[CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0,
+ [CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
};
static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
@@ -481,6 +483,9 @@ static const struct crypto_link {
.dump = crypto_dump_report,
.done = crypto_dump_report_done},
[CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
+ [CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = { .doit = crypto_reportstat,
+ .dump = crypto_dump_reportstat,
+ .done = crypto_dump_reportstat_done},
};
static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
new file mode 100644
index 000000000000..021ad06bbb62
--- /dev/null
+++ b/crypto/crypto_user_stat.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Crypto user configuration API.
+ *
+ * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com>
+ *
+ */
+
+#include <linux/crypto.h>
+#include <linux/cryptouser.h>
+#include <linux/sched.h>
+#include <net/netlink.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/rng.h>
+#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/cryptouser.h>
+
+#include "internal.h"
+
+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
+
+static DEFINE_MUTEX(crypto_cfg_mutex);
+
+extern struct sock *crypto_nlsk;
+
+struct crypto_dump_info {
+ struct sk_buff *in_skb;
+ struct sk_buff *out_skb;
+ u32 nlmsg_seq;
+ u16 nlmsg_flags;
+};
+
+static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat raead;
+ u64 v64;
+ u32 v32;
+
+ strncpy(raead.type, "aead", sizeof(raead.type));
+
+ v32 = atomic_read(&alg->encrypt_cnt);
+ raead.stat_encrypt_cnt = v32;
+ v64 = atomic64_read(&alg->encrypt_tlen);
+ raead.stat_encrypt_tlen = v64;
+ v32 = atomic_read(&alg->decrypt_cnt);
+ raead.stat_decrypt_cnt = v32;
+ v64 = atomic64_read(&alg->decrypt_tlen);
+ raead.stat_decrypt_tlen = v64;
+ v32 = atomic_read(&alg->aead_err_cnt);
+ raead.stat_aead_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_AEAD,
+ sizeof(struct crypto_stat), &raead))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rcipher;
+ u64 v64;
+ u32 v32;
+
+ strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+ v32 = atomic_read(&alg->encrypt_cnt);
+ rcipher.stat_encrypt_cnt = v32;
+ v64 = atomic64_read(&alg->encrypt_tlen);
+ rcipher.stat_encrypt_tlen = v64;
+ v32 = atomic_read(&alg->decrypt_cnt);
+ rcipher.stat_decrypt_cnt = v32;
+ v64 = atomic64_read(&alg->decrypt_tlen);
+ rcipher.stat_decrypt_tlen = v64;
+ v32 = atomic_read(&alg->cipher_err_cnt);
+ rcipher.stat_cipher_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_CIPHER,
+ sizeof(struct crypto_stat), &rcipher))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rcomp;
+ u64 v64;
+ u32 v32;
+
+ strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
+ v32 = atomic_read(&alg->compress_cnt);
+ rcomp.stat_compress_cnt = v32;
+ v64 = atomic64_read(&alg->compress_tlen);
+ rcomp.stat_compress_tlen = v64;
+ v32 = atomic_read(&alg->decompress_cnt);
+ rcomp.stat_decompress_cnt = v32;
+ v64 = atomic64_read(&alg->decompress_tlen);
+ rcomp.stat_decompress_tlen = v64;
+ v32 = atomic_read(&alg->cipher_err_cnt);
+ rcomp.stat_compress_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_COMPRESS,
+ sizeof(struct crypto_stat), &rcomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat racomp;
+ u64 v64;
+ u32 v32;
+
+ strlcpy(racomp.type, "acomp", sizeof(racomp.type));
+ v32 = atomic_read(&alg->compress_cnt);
+ racomp.stat_compress_cnt = v32;
+ v64 = atomic64_read(&alg->compress_tlen);
+ racomp.stat_compress_tlen = v64;
+ v32 = atomic_read(&alg->decompress_cnt);
+ racomp.stat_decompress_cnt = v32;
+ v64 = atomic64_read(&alg->decompress_tlen);
+ racomp.stat_decompress_tlen = v64;
+ v32 = atomic_read(&alg->cipher_err_cnt);
+ racomp.stat_compress_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_ACOMP,
+ sizeof(struct crypto_stat), &racomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rakcipher;
+ u64 v64;
+ u32 v32;
+
+ strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+ v32 = atomic_read(&alg->encrypt_cnt);
+ rakcipher.stat_encrypt_cnt = v32;
+ v64 = atomic64_read(&alg->encrypt_tlen);
+ rakcipher.stat_encrypt_tlen = v64;
+ v32 = atomic_read(&alg->decrypt_cnt);
+ rakcipher.stat_decrypt_cnt = v32;
+ v64 = atomic64_read(&alg->decrypt_tlen);
+ rakcipher.stat_decrypt_tlen = v64;
+ v32 = atomic_read(&alg->sign_cnt);
+ rakcipher.stat_sign_cnt = v32;
+ v32 = atomic_read(&alg->verify_cnt);
+ rakcipher.stat_verify_cnt = v32;
+ v32 = atomic_read(&alg->akcipher_err_cnt);
+ rakcipher.stat_akcipher_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
+ sizeof(struct crypto_stat), &rakcipher))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rkpp;
+ u32 v;
+
+ strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
+
+ v = atomic_read(&alg->setsecret_cnt);
+ rkpp.stat_setsecret_cnt = v;
+ v = atomic_read(&alg->generate_public_key_cnt);
+ rkpp.stat_generate_public_key_cnt = v;
+ v = atomic_read(&alg->compute_shared_secret_cnt);
+ rkpp.stat_compute_shared_secret_cnt = v;
+ v = atomic_read(&alg->kpp_err_cnt);
+ rkpp.stat_kpp_err_cnt = v;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_KPP,
+ sizeof(struct crypto_stat), &rkpp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rhash;
+ u64 v64;
+ u32 v32;
+
+ strncpy(rhash.type, "ahash", sizeof(rhash.type));
+
+ v32 = atomic_read(&alg->hash_cnt);
+ rhash.stat_hash_cnt = v32;
+ v64 = atomic64_read(&alg->hash_tlen);
+ rhash.stat_hash_tlen = v64;
+ v32 = atomic_read(&alg->hash_err_cnt);
+ rhash.stat_hash_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_HASH,
+ sizeof(struct crypto_stat), &rhash))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rhash;
+ u64 v64;
+ u32 v32;
+
+ strncpy(rhash.type, "shash", sizeof(rhash.type));
+
+ v32 = atomic_read(&alg->hash_cnt);
+ rhash.stat_hash_cnt = v32;
+ v64 = atomic64_read(&alg->hash_tlen);
+ rhash.stat_hash_tlen = v64;
+ v32 = atomic_read(&alg->hash_err_cnt);
+ rhash.stat_hash_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_HASH,
+ sizeof(struct crypto_stat), &rhash))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rrng;
+ u64 v64;
+ u32 v32;
+
+ strncpy(rrng.type, "rng", sizeof(rrng.type));
+
+ v32 = atomic_read(&alg->generate_cnt);
+ rrng.stat_generate_cnt = v32;
+ v64 = atomic64_read(&alg->generate_tlen);
+ rrng.stat_generate_tlen = v64;
+ v32 = atomic_read(&alg->seed_cnt);
+ rrng.stat_seed_cnt = v32;
+ v32 = atomic_read(&alg->hash_err_cnt);
+ rrng.stat_rng_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_RNG,
+ sizeof(struct crypto_stat), &rrng))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_reportstat_one(struct crypto_alg *alg,
+ struct crypto_user_alg *ualg,
+ struct sk_buff *skb)
+{
+ strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
+ strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
+ sizeof(ualg->cru_driver_name));
+ strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
+ sizeof(ualg->cru_module_name));
+
+ ualg->cru_type = 0;
+ ualg->cru_mask = 0;
+ ualg->cru_flags = alg->cra_flags;
+ ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
+
+ if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
+ goto nla_put_failure;
+ if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
+ struct crypto_stat rl;
+
+ strlcpy(rl.type, "larval", sizeof(rl.type));
+ if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL,
+ sizeof(struct crypto_stat), &rl))
+ goto nla_put_failure;
+ goto out;
+ }
+
+ switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ if (crypto_report_aead(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ if (crypto_report_cipher(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_BLKCIPHER:
+ if (crypto_report_cipher(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_CIPHER:
+ if (crypto_report_cipher(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_COMPRESS:
+ if (crypto_report_comp(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_ACOMPRESS:
+ if (crypto_report_acomp(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_SCOMPRESS:
+ if (crypto_report_acomp(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_AKCIPHER:
+ if (crypto_report_akcipher(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_KPP:
+ if (crypto_report_kpp(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ if (crypto_report_ahash(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_HASH:
+ if (crypto_report_shash(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_RNG:
+ if (crypto_report_rng(skb, alg))
+ goto nla_put_failure;
+ break;
+ default:
+ pr_err("ERROR: Unhandled alg %d in %s\n",
+ alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
+ __func__);
+ }
+
+out:
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_reportstat_alg(struct crypto_alg *alg,
+ struct crypto_dump_info *info)
+{
+ struct sk_buff *in_skb = info->in_skb;
+ struct sk_buff *skb = info->out_skb;
+ struct nlmsghdr *nlh;
+ struct crypto_user_alg *ualg;
+ int err = 0;
+
+ nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
+ CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags);
+ if (!nlh) {
+ err = -EMSGSIZE;
+ goto out;
+ }
+
+ ualg = nlmsg_data(nlh);
+
+ err = crypto_reportstat_one(alg, ualg, skb);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+
+ nlmsg_end(skb, nlh);
+
+out:
+ return err;
+}
+
+int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
+ struct nlattr **attrs)
+{
+ struct crypto_user_alg *p = nlmsg_data(in_nlh);
+ struct crypto_alg *alg;
+ struct sk_buff *skb;
+ struct crypto_dump_info info;
+ int err;
+
+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
+ return -EINVAL;
+
+ alg = crypto_alg_match(p, 0);
+ if (!alg)
+ return -ENOENT;
+
+ err = -ENOMEM;
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ goto drop_alg;
+
+ info.in_skb = in_skb;
+ info.out_skb = skb;
+ info.nlmsg_seq = in_nlh->nlmsg_seq;
+ info.nlmsg_flags = 0;
+
+ err = crypto_reportstat_alg(alg, &info);
+
+drop_alg:
+ crypto_mod_put(alg);
+
+ if (err)
+ return err;
+
+ return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
+}
+
+int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct crypto_alg *alg;
+ struct crypto_dump_info info;
+ int err;
+
+ if (cb->args[0])
+ goto out;
+
+ cb->args[0] = 1;
+
+ info.in_skb = cb->skb;
+ info.out_skb = skb;
+ info.nlmsg_seq = cb->nlh->nlmsg_seq;
+ info.nlmsg_flags = NLM_F_MULTI;
+
+ list_for_each_entry(alg, &crypto_alg_list, cra_list) {
+ err = crypto_reportstat_alg(alg, &info);
+ if (err)
+ goto out_err;
+ }
+
+out:
+ return skb->len;
+out_err:
+ return err;
+}
+
+int crypto_dump_reportstat_done(struct netlink_callback *cb)
+{
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 45819e6015bf..77e607fdbfb7 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -47,9 +47,9 @@ static int echainiv_encrypt(struct aead_request *req)
info = req->iv;
if (req->src != req->dst) {
- SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
- skcipher_request_set_tfm(nreq, ctx->sknull);
+ skcipher_request_set_sync_tfm(nreq, ctx->sknull);
skcipher_request_set_callback(nreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(nreq, req->src, req->dst,
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 0ad879e1f9b2..e438492db2ca 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -50,7 +50,7 @@ struct crypto_rfc4543_instance_ctx {
struct crypto_rfc4543_ctx {
struct crypto_aead *child;
- struct crypto_skcipher *null;
+ struct crypto_sync_skcipher *null;
u8 nonce[4];
};
@@ -1067,9 +1067,9 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int nbytes = req->assoclen + req->cryptlen -
(enc ? 0 : authsize);
- SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
- skcipher_request_set_tfm(nreq, ctx->null);
+ skcipher_request_set_sync_tfm(nreq, ctx->null);
skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
@@ -1093,7 +1093,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
struct crypto_aead_spawn *spawn = &ictx->aead;
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
- struct crypto_skcipher *null;
+ struct crypto_sync_skcipher *null;
unsigned long align;
int err = 0;
diff --git a/crypto/internal.h b/crypto/internal.h
index 9a3f39939fba..ef769b5e8ad3 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -26,12 +26,6 @@
#include <linux/rwsem.h>
#include <linux/slab.h>
-/* Crypto notification events. */
-enum {
- CRYPTO_MSG_ALG_REQUEST,
- CRYPTO_MSG_ALG_REGISTER,
-};
-
struct crypto_instance;
struct crypto_template;
@@ -90,8 +84,6 @@ struct crypto_alg *crypto_find_alg(const char *alg_name,
void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask);
-int crypto_register_notifier(struct notifier_block *nb);
-int crypto_unregister_notifier(struct notifier_block *nb);
int crypto_probing_notify(unsigned long val, void *v);
unsigned int crypto_alg_extsize(struct crypto_alg *alg);
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 393a782679c7..0430ccd08728 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -29,8 +29,6 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
-#define LRW_BUFFER_SIZE 128u
-
#define LRW_BLOCK_SIZE 16
struct priv {
@@ -56,19 +54,7 @@ struct priv {
};
struct rctx {
- be128 buf[LRW_BUFFER_SIZE / sizeof(be128)];
-
be128 t;
-
- be128 *ext;
-
- struct scatterlist srcbuf[2];
- struct scatterlist dstbuf[2];
- struct scatterlist *src;
- struct scatterlist *dst;
-
- unsigned int left;
-
struct skcipher_request subreq;
};
@@ -120,112 +106,68 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
return 0;
}
-static inline void inc(be128 *iv)
-{
- be64_add_cpu(&iv->b, 1);
- if (!iv->b)
- be64_add_cpu(&iv->a, 1);
-}
-
-/* this returns the number of consequative 1 bits starting
- * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
-static inline int get_index128(be128 *block)
+/*
+ * Returns the number of trailing '1' bits in the words of the counter, which is
+ * represented by 4 32-bit words, arranged from least to most significant.
+ * At the same time, increments the counter by one.
+ *
+ * For example:
+ *
+ * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
+ * int i = next_index(&counter);
+ * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
+ */
+static int next_index(u32 *counter)
{
- int x;
- __be32 *p = (__be32 *) block;
+ int i, res = 0;
- for (p += 3, x = 0; x < 128; p--, x += 32) {
- u32 val = be32_to_cpup(p);
+ for (i = 0; i < 4; i++) {
+ if (counter[i] + 1 != 0)
+ return res + ffz(counter[i]++);
- if (!~val)
- continue;
-
- return x + ffz(val);
+ counter[i] = 0;
+ res += 32;
}
- return x;
+ /*
+ * If we get here, then x == 128 and we are incrementing the counter
+ * from all ones to all zeros. This means we must return index 127, i.e.
+ * the one corresponding to key2*{ 1,...,1 }.
+ */
+ return 127;
}
-static int post_crypt(struct skcipher_request *req)
+/*
+ * We compute the tweak masks twice (both before and after the ECB encryption or
+ * decryption) to avoid having to allocate a temporary buffer and/or make
+ * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
+ * just doing the next_index() calls again.
+ */
+static int xor_tweak(struct skcipher_request *req, bool second_pass)
{
- struct rctx *rctx = skcipher_request_ctx(req);
- be128 *buf = rctx->ext ?: rctx->buf;
- struct skcipher_request *subreq;
const int bs = LRW_BLOCK_SIZE;
- struct skcipher_walk w;
- struct scatterlist *sg;
- unsigned offset;
- int err;
-
- subreq = &rctx->subreq;
- err = skcipher_walk_virt(&w, subreq, false);
-
- while (w.nbytes) {
- unsigned int avail = w.nbytes;
- be128 *wdst;
-
- wdst = w.dst.virt.addr;
-
- do {
- be128_xor(wdst, buf++, wdst);
- wdst++;
- } while ((avail -= bs) >= bs);
-
- err = skcipher_walk_done(&w, avail);
- }
-
- rctx->left -= subreq->cryptlen;
-
- if (err || !rctx->left)
- goto out;
-
- rctx->dst = rctx->dstbuf;
-
- scatterwalk_done(&w.out, 0, 1);
- sg = w.out.sg;
- offset = w.out.offset;
-
- if (rctx->dst != sg) {
- rctx->dst[0] = *sg;
- sg_unmark_end(rctx->dst);
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
- }
- rctx->dst[0].length -= offset - sg->offset;
- rctx->dst[0].offset = offset;
-
-out:
- return err;
-}
-
-static int pre_crypt(struct skcipher_request *req)
-{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rctx *rctx = skcipher_request_ctx(req);
struct priv *ctx = crypto_skcipher_ctx(tfm);
- be128 *buf = rctx->ext ?: rctx->buf;
- struct skcipher_request *subreq;
- const int bs = LRW_BLOCK_SIZE;
+ struct rctx *rctx = skcipher_request_ctx(req);
+ be128 t = rctx->t;
struct skcipher_walk w;
- struct scatterlist *sg;
- unsigned cryptlen;
- unsigned offset;
- be128 *iv;
- bool more;
+ __be32 *iv;
+ u32 counter[4];
int err;
- subreq = &rctx->subreq;
- skcipher_request_set_tfm(subreq, tfm);
-
- cryptlen = subreq->cryptlen;
- more = rctx->left > cryptlen;
- if (!more)
- cryptlen = rctx->left;
+ if (second_pass) {
+ req = &rctx->subreq;
+ /* set to our TFM to enforce correct alignment: */
+ skcipher_request_set_tfm(req, tfm);
+ }
- skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
- cryptlen, req->iv);
+ err = skcipher_walk_virt(&w, req, false);
+ iv = (__be32 *)w.iv;
- err = skcipher_walk_virt(&w, subreq, false);
- iv = w.iv;
+ counter[0] = be32_to_cpu(iv[3]);
+ counter[1] = be32_to_cpu(iv[2]);
+ counter[2] = be32_to_cpu(iv[1]);
+ counter[3] = be32_to_cpu(iv[0]);
while (w.nbytes) {
unsigned int avail = w.nbytes;
@@ -236,188 +178,85 @@ static int pre_crypt(struct skcipher_request *req)
wdst = w.dst.virt.addr;
do {
- *buf++ = rctx->t;
- be128_xor(wdst++, &rctx->t, wsrc++);
+ be128_xor(wdst++, &t, wsrc++);
/* T <- I*Key2, using the optimization
* discussed in the specification */
- be128_xor(&rctx->t, &rctx->t,
- &ctx->mulinc[get_index128(iv)]);
- inc(iv);
+ be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]);
} while ((avail -= bs) >= bs);
- err = skcipher_walk_done(&w, avail);
- }
-
- skcipher_request_set_tfm(subreq, ctx->child);
- skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
- cryptlen, NULL);
-
- if (err || !more)
- goto out;
-
- rctx->src = rctx->srcbuf;
-
- scatterwalk_done(&w.in, 0, 1);
- sg = w.in.sg;
- offset = w.in.offset;
+ if (second_pass && w.nbytes == w.total) {
+ iv[0] = cpu_to_be32(counter[3]);
+ iv[1] = cpu_to_be32(counter[2]);
+ iv[2] = cpu_to_be32(counter[1]);
+ iv[3] = cpu_to_be32(counter[0]);
+ }
- if (rctx->src != sg) {
- rctx->src[0] = *sg;
- sg_unmark_end(rctx->src);
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
+ err = skcipher_walk_done(&w, avail);
}
- rctx->src[0].length -= offset - sg->offset;
- rctx->src[0].offset = offset;
-out:
return err;
}
-static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
+static int xor_tweak_pre(struct skcipher_request *req)
{
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- struct rctx *rctx = skcipher_request_ctx(req);
- struct skcipher_request *subreq;
- gfp_t gfp;
-
- subreq = &rctx->subreq;
- skcipher_request_set_callback(subreq, req->base.flags, done, req);
-
- gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
- GFP_ATOMIC;
- rctx->ext = NULL;
-
- subreq->cryptlen = LRW_BUFFER_SIZE;
- if (req->cryptlen > LRW_BUFFER_SIZE) {
- unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
-
- rctx->ext = kmalloc(n, gfp);
- if (rctx->ext)
- subreq->cryptlen = n;
- }
-
- rctx->src = req->src;
- rctx->dst = req->dst;
- rctx->left = req->cryptlen;
-
- /* calculate first value of T */
- memcpy(&rctx->t, req->iv, sizeof(rctx->t));
-
- /* T <- I*Key2 */
- gf128mul_64k_bbe(&rctx->t, ctx->table);
-
- return 0;
+ return xor_tweak(req, false);
}
-static void exit_crypt(struct skcipher_request *req)
+static int xor_tweak_post(struct skcipher_request *req)
{
- struct rctx *rctx = skcipher_request_ctx(req);
-
- rctx->left = 0;
-
- if (rctx->ext)
- kzfree(rctx->ext);
+ return xor_tweak(req, true);
}
-static int do_encrypt(struct skcipher_request *req, int err)
-{
- struct rctx *rctx = skcipher_request_ctx(req);
- struct skcipher_request *subreq;
-
- subreq = &rctx->subreq;
-
- while (!err && rctx->left) {
- err = pre_crypt(req) ?:
- crypto_skcipher_encrypt(subreq) ?:
- post_crypt(req);
-
- if (err == -EINPROGRESS || err == -EBUSY)
- return err;
- }
-
- exit_crypt(req);
- return err;
-}
-
-static void encrypt_done(struct crypto_async_request *areq, int err)
+static void crypt_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
- struct skcipher_request *subreq;
- struct rctx *rctx;
- rctx = skcipher_request_ctx(req);
+ if (!err)
+ err = xor_tweak_post(req);
- if (err == -EINPROGRESS) {
- if (rctx->left != req->cryptlen)
- return;
- goto out;
- }
-
- subreq = &rctx->subreq;
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
-
- err = do_encrypt(req, err ?: post_crypt(req));
- if (rctx->left)
- return;
-
-out:
skcipher_request_complete(req, err);
}
-static int encrypt(struct skcipher_request *req)
-{
- return do_encrypt(req, init_crypt(req, encrypt_done));
-}
-
-static int do_decrypt(struct skcipher_request *req, int err)
+static void init_crypt(struct skcipher_request *req)
{
+ struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
struct rctx *rctx = skcipher_request_ctx(req);
- struct skcipher_request *subreq;
-
- subreq = &rctx->subreq;
+ struct skcipher_request *subreq = &rctx->subreq;
- while (!err && rctx->left) {
- err = pre_crypt(req) ?:
- crypto_skcipher_decrypt(subreq) ?:
- post_crypt(req);
+ skcipher_request_set_tfm(subreq, ctx->child);
+ skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
+ /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
+ skcipher_request_set_crypt(subreq, req->dst, req->dst,
+ req->cryptlen, req->iv);
- if (err == -EINPROGRESS || err == -EBUSY)
- return err;
- }
+ /* calculate first value of T */
+ memcpy(&rctx->t, req->iv, sizeof(rctx->t));
- exit_crypt(req);
- return err;
+ /* T <- I*Key2 */
+ gf128mul_64k_bbe(&rctx->t, ctx->table);
}
-static void decrypt_done(struct crypto_async_request *areq, int err)
+static int encrypt(struct skcipher_request *req)
{
- struct skcipher_request *req = areq->data;
- struct skcipher_request *subreq;
- struct rctx *rctx;
-
- rctx = skcipher_request_ctx(req);
-
- if (err == -EINPROGRESS) {
- if (rctx->left != req->cryptlen)
- return;
- goto out;
- }
-
- subreq = &rctx->subreq;
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
-
- err = do_decrypt(req, err ?: post_crypt(req));
- if (rctx->left)
- return;
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq = &rctx->subreq;
-out:
- skcipher_request_complete(req, err);
+ init_crypt(req);
+ return xor_tweak_pre(req) ?:
+ crypto_skcipher_encrypt(subreq) ?:
+ xor_tweak_post(req);
}
static int decrypt(struct skcipher_request *req)
{
- return do_decrypt(req, init_crypt(req, decrypt_done));
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq = &rctx->subreq;
+
+ init_crypt(req);
+ return xor_tweak_pre(req) ?:
+ crypto_skcipher_decrypt(subreq) ?:
+ xor_tweak_post(req);
}
static int init_tfm(struct crypto_skcipher *tfm)
@@ -543,7 +382,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
- (__alignof__(u64) - 1);
+ (__alignof__(__be32) - 1);
inst->alg.ivsize = LRW_BLOCK_SIZE;
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
deleted file mode 100644
index f14152147ce8..000000000000
--- a/crypto/mcryptd.c
+++ /dev/null
@@ -1,675 +0,0 @@
-/*
- * Software multibuffer async crypto daemon.
- *
- * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
- *
- * Adapted from crypto daemon.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/aead.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <linux/sched/stat.h>
-#include <linux/slab.h>
-
-#define MCRYPTD_MAX_CPU_QLEN 100
-#define MCRYPTD_BATCH 9
-
-static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
- unsigned int tail);
-
-struct mcryptd_flush_list {
- struct list_head list;
- struct mutex lock;
-};
-
-static struct mcryptd_flush_list __percpu *mcryptd_flist;
-
-struct hashd_instance_ctx {
- struct crypto_ahash_spawn spawn;
- struct mcryptd_queue *queue;
-};
-
-static void mcryptd_queue_worker(struct work_struct *work);
-
-void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
-{
- struct mcryptd_flush_list *flist;
-
- if (!cstate->flusher_engaged) {
- /* put the flusher on the flush list */
- flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
- mutex_lock(&flist->lock);
- list_add_tail(&cstate->flush_list, &flist->list);
- cstate->flusher_engaged = true;
- cstate->next_flush = jiffies + delay;
- queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
- &cstate->flush, delay);
- mutex_unlock(&flist->lock);
- }
-}
-EXPORT_SYMBOL(mcryptd_arm_flusher);
-
-static int mcryptd_init_queue(struct mcryptd_queue *queue,
- unsigned int max_cpu_qlen)
-{
- int cpu;
- struct mcryptd_cpu_queue *cpu_queue;
-
- queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
- pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
- if (!queue->cpu_queue)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
- pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
- crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
- INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
- spin_lock_init(&cpu_queue->q_lock);
- }
- return 0;
-}
-
-static void mcryptd_fini_queue(struct mcryptd_queue *queue)
-{
- int cpu;
- struct mcryptd_cpu_queue *cpu_queue;
-
- for_each_possible_cpu(cpu) {
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
- BUG_ON(cpu_queue->queue.qlen);
- }
- free_percpu(queue->cpu_queue);
-}
-
-static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
- struct crypto_async_request *request,
- struct mcryptd_hash_request_ctx *rctx)
-{
- int cpu, err;
- struct mcryptd_cpu_queue *cpu_queue;
-
- cpu_queue = raw_cpu_ptr(queue->cpu_queue);
- spin_lock(&cpu_queue->q_lock);
- cpu = smp_processor_id();
- rctx->tag.cpu = smp_processor_id();
-
- err = crypto_enqueue_request(&cpu_queue->queue, request);
- pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
- cpu, cpu_queue, request);
- spin_unlock(&cpu_queue->q_lock);
- queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
-
- return err;
-}
-
-/*
- * Try to opportunisticlly flush the partially completed jobs if
- * crypto daemon is the only task running.
- */
-static void mcryptd_opportunistic_flush(void)
-{
- struct mcryptd_flush_list *flist;
- struct mcryptd_alg_cstate *cstate;
-
- flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
- while (single_task_running()) {
- mutex_lock(&flist->lock);
- cstate = list_first_entry_or_null(&flist->list,
- struct mcryptd_alg_cstate, flush_list);
- if (!cstate || !cstate->flusher_engaged) {
- mutex_unlock(&flist->lock);
- return;
- }
- list_del(&cstate->flush_list);
- cstate->flusher_engaged = false;
- mutex_unlock(&flist->lock);
- cstate->alg_state->flusher(cstate);
- }
-}
-
-/*
- * Called in workqueue context, do one real cryption work (via
- * req->complete) and reschedule itself if there are more work to
- * do.
- */
-static void mcryptd_queue_worker(struct work_struct *work)
-{
- struct mcryptd_cpu_queue *cpu_queue;
- struct crypto_async_request *req, *backlog;
- int i;
-
- /*
- * Need to loop through more than once for multi-buffer to
- * be effective.
- */
-
- cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
- i = 0;
- while (i < MCRYPTD_BATCH || single_task_running()) {
-
- spin_lock_bh(&cpu_queue->q_lock);
- backlog = crypto_get_backlog(&cpu_queue->queue);
- req = crypto_dequeue_request(&cpu_queue->queue);
- spin_unlock_bh(&cpu_queue->q_lock);
-
- if (!req) {
- mcryptd_opportunistic_flush();
- return;
- }
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
- req->complete(req, 0);
- if (!cpu_queue->queue.qlen)
- return;
- ++i;
- }
- if (cpu_queue->queue.qlen)
- queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
-}
-
-void mcryptd_flusher(struct work_struct *__work)
-{
- struct mcryptd_alg_cstate *alg_cpu_state;
- struct mcryptd_alg_state *alg_state;
- struct mcryptd_flush_list *flist;
- int cpu;
-
- cpu = smp_processor_id();
- alg_cpu_state = container_of(to_delayed_work(__work),
- struct mcryptd_alg_cstate, flush);
- alg_state = alg_cpu_state->alg_state;
- if (alg_cpu_state->cpu != cpu)
- pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
- cpu, alg_cpu_state->cpu);
-
- if (alg_cpu_state->flusher_engaged) {
- flist = per_cpu_ptr(mcryptd_flist, cpu);
- mutex_lock(&flist->lock);
- list_del(&alg_cpu_state->flush_list);
- alg_cpu_state->flusher_engaged = false;
- mutex_unlock(&flist->lock);
- alg_state->flusher(alg_cpu_state);
- }
-}
-EXPORT_SYMBOL_GPL(mcryptd_flusher);
-
-static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
-
- return ictx->queue;
-}
-
-static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
- unsigned int tail)
-{
- char *p;
- struct crypto_instance *inst;
- int err;
-
- p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- inst = (void *)(p + head);
-
- err = -ENAMETOOLONG;
- if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_inst;
-
- memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
-
- inst->alg.cra_priority = alg->cra_priority + 50;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
-
-out:
- return p;
-
-out_free_inst:
- kfree(p);
- p = ERR_PTR(err);
- goto out;
-}
-
-static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
- u32 *mask)
-{
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return false;
-
- *type |= algt->type & CRYPTO_ALG_INTERNAL;
- *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
-
- if (*type & *mask & CRYPTO_ALG_INTERNAL)
- return true;
- else
- return false;
-}
-
-static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_ahash_spawn *spawn = &ictx->spawn;
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_ahash *hash;
-
- hash = crypto_spawn_ahash(spawn);
- if (IS_ERR(hash))
- return PTR_ERR(hash);
-
- ctx->child = hash;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct mcryptd_hash_request_ctx) +
- crypto_ahash_reqsize(hash));
- return 0;
-}
-
-static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_ahash(ctx->child);
-}
-
-static int mcryptd_hash_setkey(struct crypto_ahash *parent,
- const u8 *key, unsigned int keylen)
-{
- struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
- struct crypto_ahash *child = ctx->child;
- int err;
-
- crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(child, key, keylen);
- crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
-
-static int mcryptd_hash_enqueue(struct ahash_request *req,
- crypto_completion_t complete)
-{
- int ret;
-
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct mcryptd_queue *queue =
- mcryptd_get_queue(crypto_ahash_tfm(tfm));
-
- rctx->complete = req->base.complete;
- req->base.complete = complete;
-
- ret = mcryptd_enqueue_request(queue, &req->base, rctx);
-
- return ret;
-}
-
-static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
-{
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_ahash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct ahash_request *desc = &rctx->areq;
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- ahash_request_set_tfm(desc, child);
- ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req_async);
-
- rctx->out = req->result;
- err = crypto_ahash_init(desc);
-
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_init_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_init);
-}
-
-static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
-{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- rctx->out = req->result;
- err = crypto_ahash_update(&rctx->areq);
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
-
- return;
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_update_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_update);
-}
-
-static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
-{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- rctx->out = req->result;
- err = crypto_ahash_final(&rctx->areq);
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
-
- return;
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_final_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_final);
-}
-
-static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
-{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
- rctx->out = req->result;
- err = crypto_ahash_finup(&rctx->areq);
-
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
-
- return;
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
-}
-
-static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
-{
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_ahash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct ahash_request *desc = &rctx->areq;
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- ahash_request_set_tfm(desc, child);
- ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req_async);
-
- rctx->out = req->result;
- err = crypto_ahash_init(desc) ?: crypto_ahash_finup(desc);
-
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
-}
-
-static int mcryptd_hash_export(struct ahash_request *req, void *out)
-{
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- return crypto_ahash_export(&rctx->areq, out);
-}
-
-static int mcryptd_hash_import(struct ahash_request *req, const void *in)
-{
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- return crypto_ahash_import(&rctx->areq, in);
-}
-
-static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
- struct mcryptd_queue *queue)
-{
- struct hashd_instance_ctx *ctx;
- struct ahash_instance *inst;
- struct hash_alg_common *halg;
- struct crypto_alg *alg;
- u32 type = 0;
- u32 mask = 0;
- int err;
-
- if (!mcryptd_check_internal(tb, &type, &mask))
- return -EINVAL;
-
- halg = ahash_attr_alg(tb[1], type, mask);
- if (IS_ERR(halg))
- return PTR_ERR(halg);
-
- alg = &halg->base;
- pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
- inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
- sizeof(*ctx));
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
-
- ctx = ahash_instance_ctx(inst);
- ctx->queue = queue;
-
- err = crypto_init_ahash_spawn(&ctx->spawn, halg,
- ahash_crypto_instance(inst));
- if (err)
- goto out_free_inst;
-
- inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
- (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
- CRYPTO_ALG_OPTIONAL_KEY));
-
- inst->alg.halg.digestsize = halg->digestsize;
- inst->alg.halg.statesize = halg->statesize;
- inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
-
- inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
- inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
-
- inst->alg.init = mcryptd_hash_init_enqueue;
- inst->alg.update = mcryptd_hash_update_enqueue;
- inst->alg.final = mcryptd_hash_final_enqueue;
- inst->alg.finup = mcryptd_hash_finup_enqueue;
- inst->alg.export = mcryptd_hash_export;
- inst->alg.import = mcryptd_hash_import;
- if (crypto_hash_alg_has_setkey(halg))
- inst->alg.setkey = mcryptd_hash_setkey;
- inst->alg.digest = mcryptd_hash_digest_enqueue;
-
- err = ahash_register_instance(tmpl, inst);
- if (err) {
- crypto_drop_ahash(&ctx->spawn);
-out_free_inst:
- kfree(inst);
- }
-
-out_put_alg:
- crypto_mod_put(alg);
- return err;
-}
-
-static struct mcryptd_queue mqueue;
-
-static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_DIGEST:
- return mcryptd_create_hash(tmpl, tb, &mqueue);
- break;
- }
-
- return -EINVAL;
-}
-
-static void mcryptd_free(struct crypto_instance *inst)
-{
- struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
- struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
-
- switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_drop_ahash(&hctx->spawn);
- kfree(ahash_instance(inst));
- return;
- default:
- crypto_drop_spawn(&ctx->spawn);
- kfree(inst);
- }
-}
-
-static struct crypto_template mcryptd_tmpl = {
- .name = "mcryptd",
- .create = mcryptd_create,
- .free = mcryptd_free,
- .module = THIS_MODULE,
-};
-
-struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask)
-{
- char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
- struct crypto_ahash *tfm;
-
- if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
- "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-EINVAL);
- tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
- if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
- crypto_free_ahash(tfm);
- return ERR_PTR(-EINVAL);
- }
-
- return __mcryptd_ahash_cast(tfm);
-}
-EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
-
-struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
-{
- struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
-
- return ctx->child;
-}
-EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
-
-struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
-{
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- return &rctx->areq;
-}
-EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
-
-void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
-{
- crypto_free_ahash(&tfm->base);
-}
-EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
-
-static int __init mcryptd_init(void)
-{
- int err, cpu;
- struct mcryptd_flush_list *flist;
-
- mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
- for_each_possible_cpu(cpu) {
- flist = per_cpu_ptr(mcryptd_flist, cpu);
- INIT_LIST_HEAD(&flist->list);
- mutex_init(&flist->lock);
- }
-
- err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
- if (err) {
- free_percpu(mcryptd_flist);
- return err;
- }
-
- err = crypto_register_template(&mcryptd_tmpl);
- if (err) {
- mcryptd_fini_queue(&mqueue);
- free_percpu(mcryptd_flist);
- }
-
- return err;
-}
-
-static void __exit mcryptd_exit(void)
-{
- mcryptd_fini_queue(&mqueue);
- crypto_unregister_template(&mcryptd_tmpl);
- free_percpu(mcryptd_flist);
-}
-
-subsys_initcall(mcryptd_init);
-module_exit(mcryptd_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
-MODULE_ALIAS_CRYPTO("mcryptd");
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index d057cf5ac4a8..3889c188f266 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -385,14 +385,11 @@ static void crypto_morus1280_final(struct morus1280_state *state,
struct morus1280_block *tag_xor,
u64 assoclen, u64 cryptlen)
{
- u64 assocbits = assoclen * 8;
- u64 cryptbits = cryptlen * 8;
-
struct morus1280_block tmp;
unsigned int i;
- tmp.words[0] = cpu_to_le64(assocbits);
- tmp.words[1] = cpu_to_le64(cryptbits);
+ tmp.words[0] = assoclen * 8;
+ tmp.words[1] = cryptlen * 8;
tmp.words[2] = 0;
tmp.words[3] = 0;
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 1ca76e54281b..da06ec2f6a80 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -384,21 +384,13 @@ static void crypto_morus640_final(struct morus640_state *state,
struct morus640_block *tag_xor,
u64 assoclen, u64 cryptlen)
{
- u64 assocbits = assoclen * 8;
- u64 cryptbits = cryptlen * 8;
-
- u32 assocbits_lo = (u32)assocbits;
- u32 assocbits_hi = (u32)(assocbits >> 32);
- u32 cryptbits_lo = (u32)cryptbits;
- u32 cryptbits_hi = (u32)(cryptbits >> 32);
-
struct morus640_block tmp;
unsigned int i;
- tmp.words[0] = cpu_to_le32(assocbits_lo);
- tmp.words[1] = cpu_to_le32(assocbits_hi);
- tmp.words[2] = cpu_to_le32(cryptbits_lo);
- tmp.words[3] = cpu_to_le32(cryptbits_hi);
+ tmp.words[0] = lower_32_bits(assoclen * 8);
+ tmp.words[1] = upper_32_bits(assoclen * 8);
+ tmp.words[2] = lower_32_bits(cryptlen * 8);
+ tmp.words[3] = upper_32_bits(cryptlen * 8);
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
state->s[4].words[i] ^= state->s[0].words[i];
diff --git a/crypto/ofb.c b/crypto/ofb.c
new file mode 100644
index 000000000000..886631708c5e
--- /dev/null
+++ b/crypto/ofb.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * OFB: Output FeedBack mode
+ *
+ * Copyright (C) 2018 ARM Limited or its affiliates.
+ * All rights reserved.
+ *
+ * Based loosely on public domain code gleaned from libtomcrypt
+ * (https://github.com/libtom/libtomcrypt).
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+struct crypto_ofb_ctx {
+ struct crypto_cipher *child;
+ int cnt;
+};
+
+
+static int crypto_ofb_setkey(struct crypto_skcipher *parent, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_cipher *child = ctx->child;
+ int err;
+
+ crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_cipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int crypto_ofb_encrypt_segment(struct crypto_ofb_ctx *ctx,
+ struct skcipher_walk *walk,
+ struct crypto_cipher *tfm)
+{
+ int bsize = crypto_cipher_blocksize(tfm);
+ int nbytes = walk->nbytes;
+
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *iv = walk->iv;
+
+ do {
+ if (ctx->cnt == bsize) {
+ if (nbytes < bsize)
+ break;
+ crypto_cipher_encrypt_one(tfm, iv, iv);
+ ctx->cnt = 0;
+ }
+ *dst = *src ^ iv[ctx->cnt];
+ src++;
+ dst++;
+ ctx->cnt++;
+ } while (--nbytes);
+ return nbytes;
+}
+
+static int crypto_ofb_encrypt(struct skcipher_request *req)
+{
+ struct skcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int bsize;
+ struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_cipher *child = ctx->child;
+ int ret = 0;
+
+ bsize = crypto_cipher_blocksize(child);
+ ctx->cnt = bsize;
+
+ ret = skcipher_walk_virt(&walk, req, false);
+
+ while (walk.nbytes) {
+ ret = crypto_ofb_encrypt_segment(ctx, &walk, child);
+ ret = skcipher_walk_done(&walk, ret);
+ }
+
+ return ret;
+}
+
+/* OFB encrypt and decrypt are identical */
+static int crypto_ofb_decrypt(struct skcipher_request *req)
+{
+ return crypto_ofb_encrypt(req);
+}
+
+static int crypto_ofb_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
+ struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_cipher *cipher;
+
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ return 0;
+}
+
+static void crypto_ofb_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_cipher(ctx->child);
+}
+
+static void crypto_ofb_free(struct skcipher_instance *inst)
+{
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct skcipher_instance *inst;
+ struct crypto_attr_type *algt;
+ struct crypto_spawn *spawn;
+ struct crypto_alg *alg;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
+ if (err)
+ return err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ algt = crypto_get_attr_type(tb);
+ err = PTR_ERR(algt);
+ if (IS_ERR(algt))
+ goto err_free_inst;
+
+ mask = CRYPTO_ALG_TYPE_MASK |
+ crypto_requires_off(algt->type, algt->mask,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
+ err = PTR_ERR(alg);
+ if (IS_ERR(alg))
+ goto err_free_inst;
+
+ spawn = skcipher_instance_ctx(inst);
+ err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ crypto_mod_put(alg);
+ if (err)
+ goto err_free_inst;
+
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "ofb", alg);
+ if (err)
+ goto err_drop_spawn;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
+
+ /* We access the data as u32s when xoring. */
+ inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
+
+ inst->alg.ivsize = alg->cra_blocksize;
+ inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
+ inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_ofb_ctx);
+
+ inst->alg.init = crypto_ofb_init_tfm;
+ inst->alg.exit = crypto_ofb_exit_tfm;
+
+ inst->alg.setkey = crypto_ofb_setkey;
+ inst->alg.encrypt = crypto_ofb_encrypt;
+ inst->alg.decrypt = crypto_ofb_decrypt;
+
+ inst->free = crypto_ofb_free;
+
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_spawn(spawn);
+err_free_inst:
+ kfree(inst);
+ goto out;
+}
+
+static struct crypto_template crypto_ofb_tmpl = {
+ .name = "ofb",
+ .create = crypto_ofb_create,
+ .module = THIS_MODULE,
+};
+
+static int __init crypto_ofb_module_init(void)
+{
+ return crypto_register_template(&crypto_ofb_tmpl);
+}
+
+static void __exit crypto_ofb_module_exit(void)
+{
+ crypto_unregister_template(&crypto_ofb_tmpl);
+}
+
+module_init(crypto_ofb_module_init);
+module_exit(crypto_ofb_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("OFB block cipher algorithm");
+MODULE_ALIAS_CRYPTO("ofb");
diff --git a/crypto/rng.c b/crypto/rng.c
index b4a618668161..547f16ecbfb0 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -50,6 +50,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
}
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
+ crypto_stat_rng_seed(tfm, err);
out:
kzfree(buf);
return err;
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 9893dbfc1af4..812476e46821 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -261,15 +261,6 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
- req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
- if (!req_ctx->out_buf) {
- kfree(req_ctx->in_buf);
- return -ENOMEM;
- }
-
- pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
- ctx->key_size, NULL);
-
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_encrypt_sign_complete_cb, req);
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 39dbf2f7e5f5..64a412be255e 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -73,9 +73,9 @@ static int seqiv_aead_encrypt(struct aead_request *req)
info = req->iv;
if (req->src != req->dst) {
- SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
- skcipher_request_set_tfm(nreq, ctx->sknull);
+ skcipher_request_set_sync_tfm(nreq, ctx->sknull);
skcipher_request_set_callback(nreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(nreq, req->src, req->dst,
diff --git a/crypto/shash.c b/crypto/shash.c
index 5d732c6bb4b2..d21f04d70dce 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -73,13 +73,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
-static inline unsigned int shash_align_buffer_size(unsigned len,
- unsigned long mask)
-{
- typedef u8 __aligned_largest u8_aligned;
- return len + (mask & ~(__alignof__(u8_aligned) - 1));
-}
-
static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
@@ -88,11 +81,17 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
unsigned long alignmask = crypto_shash_alignmask(tfm);
unsigned int unaligned_len = alignmask + 1 -
((unsigned long)data & alignmask);
- u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)]
- __aligned_largest;
+ /*
+ * We cannot count on __aligned() working for large values:
+ * https://patchwork.kernel.org/patch/9507697/
+ */
+ u8 ubuf[MAX_ALGAPI_ALIGNMASK * 2];
u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
+ if (WARN_ON(buf + unaligned_len > ubuf + sizeof(ubuf)))
+ return -EINVAL;
+
if (unaligned_len > len)
unaligned_len = len;
@@ -124,11 +123,17 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
unsigned long alignmask = crypto_shash_alignmask(tfm);
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned int ds = crypto_shash_digestsize(tfm);
- u8 ubuf[shash_align_buffer_size(ds, alignmask)]
- __aligned_largest;
+ /*
+ * We cannot count on __aligned() working for large values:
+ * https://patchwork.kernel.org/patch/9507697/
+ */
+ u8 ubuf[MAX_ALGAPI_ALIGNMASK + HASH_MAX_DIGESTSIZE];
u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
+ if (WARN_ON(buf + ds > ubuf + sizeof(ubuf)))
+ return -EINVAL;
+
err = shash->final(desc, buf);
if (err)
goto out;
@@ -458,9 +463,9 @@ static int shash_prepare_alg(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->base;
- if (alg->digestsize > PAGE_SIZE / 8 ||
- alg->descsize > PAGE_SIZE / 8 ||
- alg->statesize > PAGE_SIZE / 8)
+ if (alg->digestsize > HASH_MAX_DIGESTSIZE ||
+ alg->descsize > HASH_MAX_DESCSIZE ||
+ alg->statesize > HASH_MAX_STATESIZE)
return -EINVAL;
base->cra_type = &crypto_shash_type;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 0bd8c6caa498..4caab81d2d02 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -949,6 +949,30 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
}
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
+struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
+ const char *alg_name, u32 type, u32 mask)
+{
+ struct crypto_skcipher *tfm;
+
+ /* Only sync algorithms allowed. */
+ mask |= CRYPTO_ALG_ASYNC;
+
+ tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
+
+ /*
+ * Make sure we do not allocate something that might get used with
+ * an on-stack request: check the request size.
+ */
+ if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
+ MAX_SYNC_SKCIPHER_REQSIZE)) {
+ crypto_free_skcipher(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return (struct crypto_sync_skcipher *)tfm;
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
+
int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
{
return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
diff --git a/crypto/speck.c b/crypto/speck.c
deleted file mode 100644
index 58aa9f7f91f7..000000000000
--- a/crypto/speck.c
+++ /dev/null
@@ -1,307 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Speck: a lightweight block cipher
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Speck has 10 variants, including 5 block sizes. For now we only implement
- * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and
- * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits
- * and a key size of K bits. The Speck128 variants are believed to be the most
- * secure variants, and they use the same block size and key sizes as AES. The
- * Speck64 variants are less secure, but on 32-bit processors are usually
- * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less
- * secure and/or not as well suited for implementation on either 32-bit or
- * 64-bit processors, so are omitted.
- *
- * Reference: "The Simon and Speck Families of Lightweight Block Ciphers"
- * https://eprint.iacr.org/2013/404.pdf
- *
- * In a correspondence, the Speck designers have also clarified that the words
- * should be interpreted in little-endian format, and the words should be
- * ordered such that the first word of each block is 'y' rather than 'x', and
- * the first key word (rather than the last) becomes the first round key.
- */
-
-#include <asm/unaligned.h>
-#include <crypto/speck.h>
-#include <linux/bitops.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-/* Speck128 */
-
-static __always_inline void speck128_round(u64 *x, u64 *y, u64 k)
-{
- *x = ror64(*x, 8);
- *x += *y;
- *x ^= k;
- *y = rol64(*y, 3);
- *y ^= *x;
-}
-
-static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k)
-{
- *y ^= *x;
- *y = ror64(*y, 3);
- *x ^= k;
- *x -= *y;
- *x = rol64(*x, 8);
-}
-
-void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in)
-{
- u64 y = get_unaligned_le64(in);
- u64 x = get_unaligned_le64(in + 8);
- int i;
-
- for (i = 0; i < ctx->nrounds; i++)
- speck128_round(&x, &y, ctx->round_keys[i]);
-
- put_unaligned_le64(y, out);
- put_unaligned_le64(x, out + 8);
-}
-EXPORT_SYMBOL_GPL(crypto_speck128_encrypt);
-
-static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in);
-}
-
-void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in)
-{
- u64 y = get_unaligned_le64(in);
- u64 x = get_unaligned_le64(in + 8);
- int i;
-
- for (i = ctx->nrounds - 1; i >= 0; i--)
- speck128_unround(&x, &y, ctx->round_keys[i]);
-
- put_unaligned_le64(y, out);
- put_unaligned_le64(x, out + 8);
-}
-EXPORT_SYMBOL_GPL(crypto_speck128_decrypt);
-
-static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in);
-}
-
-int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
- unsigned int keylen)
-{
- u64 l[3];
- u64 k;
- int i;
-
- switch (keylen) {
- case SPECK128_128_KEY_SIZE:
- k = get_unaligned_le64(key);
- l[0] = get_unaligned_le64(key + 8);
- ctx->nrounds = SPECK128_128_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck128_round(&l[0], &k, i);
- }
- break;
- case SPECK128_192_KEY_SIZE:
- k = get_unaligned_le64(key);
- l[0] = get_unaligned_le64(key + 8);
- l[1] = get_unaligned_le64(key + 16);
- ctx->nrounds = SPECK128_192_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck128_round(&l[i % 2], &k, i);
- }
- break;
- case SPECK128_256_KEY_SIZE:
- k = get_unaligned_le64(key);
- l[0] = get_unaligned_le64(key + 8);
- l[1] = get_unaligned_le64(key + 16);
- l[2] = get_unaligned_le64(key + 24);
- ctx->nrounds = SPECK128_256_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck128_round(&l[i % 3], &k, i);
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(crypto_speck128_setkey);
-
-static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen);
-}
-
-/* Speck64 */
-
-static __always_inline void speck64_round(u32 *x, u32 *y, u32 k)
-{
- *x = ror32(*x, 8);
- *x += *y;
- *x ^= k;
- *y = rol32(*y, 3);
- *y ^= *x;
-}
-
-static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k)
-{
- *y ^= *x;
- *y = ror32(*y, 3);
- *x ^= k;
- *x -= *y;
- *x = rol32(*x, 8);
-}
-
-void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in)
-{
- u32 y = get_unaligned_le32(in);
- u32 x = get_unaligned_le32(in + 4);
- int i;
-
- for (i = 0; i < ctx->nrounds; i++)
- speck64_round(&x, &y, ctx->round_keys[i]);
-
- put_unaligned_le32(y, out);
- put_unaligned_le32(x, out + 4);
-}
-EXPORT_SYMBOL_GPL(crypto_speck64_encrypt);
-
-static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in);
-}
-
-void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in)
-{
- u32 y = get_unaligned_le32(in);
- u32 x = get_unaligned_le32(in + 4);
- int i;
-
- for (i = ctx->nrounds - 1; i >= 0; i--)
- speck64_unround(&x, &y, ctx->round_keys[i]);
-
- put_unaligned_le32(y, out);
- put_unaligned_le32(x, out + 4);
-}
-EXPORT_SYMBOL_GPL(crypto_speck64_decrypt);
-
-static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in);
-}
-
-int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
- unsigned int keylen)
-{
- u32 l[3];
- u32 k;
- int i;
-
- switch (keylen) {
- case SPECK64_96_KEY_SIZE:
- k = get_unaligned_le32(key);
- l[0] = get_unaligned_le32(key + 4);
- l[1] = get_unaligned_le32(key + 8);
- ctx->nrounds = SPECK64_96_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck64_round(&l[i % 2], &k, i);
- }
- break;
- case SPECK64_128_KEY_SIZE:
- k = get_unaligned_le32(key);
- l[0] = get_unaligned_le32(key + 4);
- l[1] = get_unaligned_le32(key + 8);
- l[2] = get_unaligned_le32(key + 12);
- ctx->nrounds = SPECK64_128_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck64_round(&l[i % 3], &k, i);
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(crypto_speck64_setkey);
-
-static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen);
-}
-
-/* Algorithm definitions */
-
-static struct crypto_alg speck_algs[] = {
- {
- .cra_name = "speck128",
- .cra_driver_name = "speck128-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = SPECK128_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct speck128_tfm_ctx),
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = SPECK128_128_KEY_SIZE,
- .cia_max_keysize = SPECK128_256_KEY_SIZE,
- .cia_setkey = speck128_setkey,
- .cia_encrypt = speck128_encrypt,
- .cia_decrypt = speck128_decrypt
- }
- }
- }, {
- .cra_name = "speck64",
- .cra_driver_name = "speck64-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = SPECK64_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct speck64_tfm_ctx),
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = SPECK64_96_KEY_SIZE,
- .cia_max_keysize = SPECK64_128_KEY_SIZE,
- .cia_setkey = speck64_setkey,
- .cia_encrypt = speck64_encrypt,
- .cia_decrypt = speck64_decrypt
- }
- }
- }
-};
-
-static int __init speck_module_init(void)
-{
- return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-static void __exit speck_module_exit(void)
-{
- crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-module_init(speck_module_init);
-module_exit(speck_module_exit);
-
-MODULE_DESCRIPTION("Speck block cipher (generic)");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
-MODULE_ALIAS_CRYPTO("speck128");
-MODULE_ALIAS_CRYPTO("speck128-generic");
-MODULE_ALIAS_CRYPTO("speck64");
-MODULE_ALIAS_CRYPTO("speck64-generic");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index bdde95e8d369..c20c9f5c18f2 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -76,8 +76,7 @@ static char *check[] = {
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
- "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
- NULL
+ "lzo", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", NULL
};
static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
@@ -1103,6 +1102,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
break;
}
+ if (speed[i].klen)
+ crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
+
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
@@ -1733,6 +1735,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("xts(aes)");
ret += tcrypt_test("ctr(aes)");
ret += tcrypt_test("rfc3686(ctr(aes))");
+ ret += tcrypt_test("ofb(aes)");
break;
case 11:
@@ -1878,10 +1881,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("ecb(seed)");
break;
- case 44:
- ret += tcrypt_test("zlib");
- break;
-
case 45:
ret += tcrypt_test("rfc4309(ccm(aes))");
break;
@@ -2033,6 +2032,8 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
break;
case 191:
ret += tcrypt_test("ecb(sm4)");
+ ret += tcrypt_test("cbc(sm4)");
+ ret += tcrypt_test("ctr(sm4)");
break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
@@ -2282,6 +2283,20 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
num_mb);
break;
+ case 218:
+ test_cipher_speed("ecb(sm4)", ENCRYPT, sec, NULL, 0,
+ speed_template_16);
+ test_cipher_speed("ecb(sm4)", DECRYPT, sec, NULL, 0,
+ speed_template_16);
+ test_cipher_speed("cbc(sm4)", ENCRYPT, sec, NULL, 0,
+ speed_template_16);
+ test_cipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
+ speed_template_16);
+ test_cipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
+ speed_template_16);
+ test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
+ speed_template_16);
+ break;
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index f0bfee1bb293..d09ea8b10b4f 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -51,6 +51,7 @@ static struct cipher_speed_template des3_speed_template[] = {
* Cipher speed tests
*/
static u8 speed_template_8[] = {8, 0};
+static u8 speed_template_16[] = {16, 0};
static u8 speed_template_24[] = {24, 0};
static u8 speed_template_8_16[] = {8, 16, 0};
static u8 speed_template_8_32[] = {8, 32, 0};
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index a1d42245082a..b1f79c6bf409 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1400,8 +1400,8 @@ static int test_comp(struct crypto_comp *tfm,
int ilen;
unsigned int dlen = COMP_BUF_SIZE;
- memset(output, 0, sizeof(COMP_BUF_SIZE));
- memset(decomp_output, 0, sizeof(COMP_BUF_SIZE));
+ memset(output, 0, COMP_BUF_SIZE);
+ memset(decomp_output, 0, COMP_BUF_SIZE);
ilen = ctemplate[i].inlen;
ret = crypto_comp_compress(tfm, ctemplate[i].input,
@@ -1445,7 +1445,7 @@ static int test_comp(struct crypto_comp *tfm,
int ilen;
unsigned int dlen = COMP_BUF_SIZE;
- memset(decomp_output, 0, sizeof(COMP_BUF_SIZE));
+ memset(decomp_output, 0, COMP_BUF_SIZE);
ilen = dtemplate[i].inlen;
ret = crypto_comp_decompress(tfm, dtemplate[i].input,
@@ -2662,6 +2662,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(serpent_cbc_tv_template)
},
}, {
+ .alg = "cbc(sm4)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(sm4_cbc_tv_template)
+ }
+ }, {
.alg = "cbc(twofish)",
.test = alg_test_skcipher,
.suite = {
@@ -2785,6 +2791,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(serpent_ctr_tv_template)
}
}, {
+ .alg = "ctr(sm4)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(sm4_ctr_tv_template)
+ }
+ }, {
.alg = "ctr(twofish)",
.test = alg_test_skcipher,
.suite = {
@@ -3038,18 +3050,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(sm4_tv_template)
}
}, {
- .alg = "ecb(speck128)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(speck128_tv_template)
- }
- }, {
- .alg = "ecb(speck64)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(speck64_tv_template)
- }
- }, {
.alg = "ecb(tea)",
.test = alg_test_skcipher,
.suite = {
@@ -3577,18 +3577,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(serpent_xts_tv_template)
}
}, {
- .alg = "xts(speck128)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(speck128_xts_tv_template)
- }
- }, {
- .alg = "xts(speck64)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(speck64_xts_tv_template)
- }
- }, {
.alg = "xts(twofish)",
.test = alg_test_skcipher,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 173111c70746..1fe7b97ba03f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -24,8 +24,6 @@
#ifndef _CRYPTO_TESTMGR_H
#define _CRYPTO_TESTMGR_H
-#include <linux/netlink.h>
-
#define MAX_DIGEST_SIZE 64
#define MAX_TAP 8
@@ -10133,12 +10131,13 @@ static const struct cipher_testvec serpent_xts_tv_template[] = {
};
/*
- * SM4 test vector taken from the draft RFC
- * https://tools.ietf.org/html/draft-crypto-sm4-00#ref-GBT.32907-2016
+ * SM4 test vectors taken from the "The SM4 Blockcipher Algorithm And Its
+ * Modes Of Operations" draft RFC
+ * https://datatracker.ietf.org/doc/draft-ribose-cfrg-sm4
*/
static const struct cipher_testvec sm4_tv_template[] = {
- { /* SM4 Appendix A: Example Calculations. Example 1. */
+ { /* GB/T 32907-2016 Example 1. */
.key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
.klen = 16,
@@ -10147,10 +10146,7 @@ static const struct cipher_testvec sm4_tv_template[] = {
.ctext = "\x68\x1E\xDF\x34\xD2\x06\x96\x5E"
"\x86\xB3\xE9\x4F\x53\x6E\x42\x46",
.len = 16,
- }, { /*
- * SM4 Appendix A: Example Calculations.
- * Last 10 iterations of Example 2.
- */
+ }, { /* Last 10 iterations of GB/T 32907-2016 Example 2. */
.key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
.klen = 16,
@@ -10195,744 +10191,116 @@ static const struct cipher_testvec sm4_tv_template[] = {
"\x59\x52\x98\xc7\xc6\xfd\x27\x1f"
"\x4\x2\xf8\x4\xc3\x3d\x3f\x66",
.len = 160
+ }, { /* A.2.1.1 SM4-ECB Example 1 */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
+ .ctext = "\x5e\xc8\x14\x3d\xe5\x09\xcf\xf7"
+ "\xb5\x17\x9f\x8f\x47\x4b\x86\x19"
+ "\x2f\x1d\x30\x5a\x7f\xb1\x7d\xf9"
+ "\x85\xf8\x1c\x84\x82\x19\x23\x04",
+ .len = 32,
+ }, { /* A.2.1.2 SM4-ECB Example 2 */
+ .key = "\xFE\xDC\xBA\x98\x76\x54\x32\x10"
+ "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
+ .klen = 16,
+ .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
+ .ctext = "\xC5\x87\x68\x97\xE4\xA5\x9B\xBB"
+ "\xA7\x2A\x10\xC8\x38\x72\x24\x5B"
+ "\x12\xDD\x90\xBC\x2D\x20\x06\x92"
+ "\xB5\x29\xA4\x15\x5A\xC9\xE6\x00",
+ .len = 32,
}
};
-/*
- * Speck test vectors taken from the original paper:
- * "The Simon and Speck Families of Lightweight Block Ciphers"
- * https://eprint.iacr.org/2013/404.pdf
- *
- * Note that the paper does not make byte and word order clear. But it was
- * confirmed with the authors that the intended orders are little endian byte
- * order and (y, x) word order. Equivalently, the printed test vectors, when
- * looking at only the bytes (ignoring the whitespace that divides them into
- * words), are backwards: the left-most byte is actually the one with the
- * highest memory address, while the right-most byte is actually the one with
- * the lowest memory address.
- */
-
-static const struct cipher_testvec speck128_tv_template[] = {
- { /* Speck128/128 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+static const struct cipher_testvec sm4_cbc_tv_template[] = {
+ { /* A.2.2.1 SM4-CBC Example 1 */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
.klen = 16,
- .ptext = "\x20\x6d\x61\x64\x65\x20\x69\x74"
- "\x20\x65\x71\x75\x69\x76\x61\x6c",
- .ctext = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
- "\x65\x32\x78\x79\x51\x98\x5d\xa6",
- .len = 16,
- }, { /* Speck128/192 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17",
- .klen = 24,
- .ptext = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
- "\x68\x69\x65\x66\x20\x48\x61\x72",
- .ctext = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
- "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
- .len = 16,
- }, { /* Speck128/256 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
- .klen = 32,
- .ptext = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
- "\x49\x6e\x20\x74\x68\x6f\x73\x65",
- .ctext = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
- "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
- .len = 16,
- },
-};
-
-/*
- * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the
- * ciphertext recomputed with Speck128 as the cipher
- */
-static const struct cipher_testvec speck128_xts_tv_template[] = {
- {
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 32,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ctext = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
- "\x3b\x99\x4a\x64\x74\x77\xac\xed"
- "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
- "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
- .len = 32,
- }, {
- .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x22\x22\x22\x22\x22\x22\x22\x22"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 32,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .ctext = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
- "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
- "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
- "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
+ .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
+ .ctext = "\x78\xEB\xB1\x1C\xC4\x0B\x0A\x48"
+ "\x31\x2A\xAE\xB2\x04\x02\x44\xCB"
+ "\x4C\xB7\x01\x69\x51\x90\x92\x26"
+ "\x97\x9B\x0D\x15\xDC\x6A\x8F\x6D",
.len = 32,
- }, {
- .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
- "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
- "\x22\x22\x22\x22\x22\x22\x22\x22"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 32,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .ctext = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
- "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
- "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
- "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
+ }, { /* A.2.2.2 SM4-CBC Example 2 */
+ .key = "\xFE\xDC\xBA\x98\x76\x54\x32\x10"
+ "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
+ .klen = 16,
+ .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
+ .ctext = "\x0d\x3a\x6d\xdc\x2d\x21\xc6\x98"
+ "\x85\x72\x15\x58\x7b\x7b\xb5\x9a"
+ "\x91\xf2\xc1\x47\x91\x1a\x41\x44"
+ "\x66\x5e\x1f\xa1\xd4\x0b\xae\x38",
.len = 32,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x31\x41\x59\x26\x53\x58\x97\x93"
- "\x23\x84\x62\x64\x33\x83\x27\x95",
- .klen = 32,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .ctext = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
- "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
- "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
- "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
- "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
- "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
- "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
- "\x19\xc5\x58\x84\x63\xb9\x12\x68"
- "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
- "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
- "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
- "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
- "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
- "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
- "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
- "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
- "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
- "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
- "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
- "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
- "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
- "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
- "\xa5\x20\xac\x24\x1c\x73\x59\x73"
- "\x58\x61\x3a\x87\x58\xb3\x20\x56"
- "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
- "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
- "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
- "\x09\x35\x71\x50\x65\xac\x92\xe3"
- "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
- "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
- "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
- "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
- "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
- "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
- "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
- "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
- "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
- "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
- "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
- "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
- "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
- "\x87\x30\xac\xd5\xea\x73\x49\x10"
- "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
- "\x66\x02\x35\x3d\x60\x06\x36\x4f"
- "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
- "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
- "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
- "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
- "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
- "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
- "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
- "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
- "\x51\x66\x02\x69\x04\x97\x36\xd4"
- "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
- "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
- "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
- "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
- "\x03\xe7\x05\x39\xf5\x05\x26\xee"
- "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
- "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
- "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
- "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
- "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
- "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
- .len = 512,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x62\x49\x77\x57\x24\x70\x93\x69"
- "\x99\x59\x57\x49\x66\x96\x76\x27"
- "\x31\x41\x59\x26\x53\x58\x97\x93"
- "\x23\x84\x62\x64\x33\x83\x27\x95"
- "\x02\x88\x41\x97\x16\x93\x99\x37"
- "\x51\x05\x82\x09\x74\x94\x45\x92",
- .klen = 64,
- .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .ctext = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
- "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
- "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
- "\x92\x99\xde\xd3\x76\xed\xcd\x63"
- "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
- "\x79\x36\x31\x19\x62\xae\x10\x7e"
- "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
- "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
- "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
- "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
- "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
- "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
- "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
- "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
- "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
- "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
- "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
- "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
- "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
- "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
- "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
- "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
- "\xac\xa5\xd0\x38\xef\x19\x56\x53"
- "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
- "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
- "\xed\x22\x34\x1c\x5d\xed\x17\x06"
- "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
- "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
- "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
- "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
- "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
- "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
- "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
- "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
- "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
- "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
- "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
- "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
- "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
- "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
- "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
- "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
- "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
- "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
- "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
- "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
- "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
- "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
- "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
- "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
- "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
- "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
- "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
- "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
- "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
- "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
- "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
- "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
- "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
- "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
- "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
- "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
- "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
- "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
- .len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
}
};
-static const struct cipher_testvec speck64_tv_template[] = {
- { /* Speck64/96 */
- .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
- "\x10\x11\x12\x13",
- .klen = 12,
- .ptext = "\x65\x61\x6e\x73\x20\x46\x61\x74",
- .ctext = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
- .len = 8,
- }, { /* Speck64/128 */
- .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
- "\x10\x11\x12\x13\x18\x19\x1a\x1b",
+static const struct cipher_testvec sm4_ctr_tv_template[] = {
+ { /* A.2.5.1 SM4-CTR Example 1 */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
.klen = 16,
- .ptext = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
- .ctext = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
- .len = 8,
- },
-};
-
-/*
- * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the
- * ciphertext recomputed with Speck64 as the cipher, and key lengths adjusted
- */
-static const struct cipher_testvec speck64_xts_tv_template[] = {
- {
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 24,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ctext = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
- "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
- "\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
- "\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
- .len = 32,
- }, {
- .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 24,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .ctext = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
- "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
- "\xb3\x12\x69\x7e\x36\xeb\x52\xff"
- "\x62\xdd\xba\x90\xb3\xe1\xee\x99",
- .len = 32,
- }, {
- .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
- "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 24,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .ctext = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
- "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
- "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
- "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
- .len = 32,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x31\x41\x59\x26\x53\x58\x97\x93",
- .klen = 24,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .ctext = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
- "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
- "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
- "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
- "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
- "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
- "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
- "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
- "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
- "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
- "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
- "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
- "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
- "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
- "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
- "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
- "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
- "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
- "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
- "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
- "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
- "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
- "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
- "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
- "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
- "\x07\xff\xf3\x72\x74\x48\xb5\x40"
- "\x50\xb5\xdd\x90\x43\x31\x18\x15"
- "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
- "\x29\x93\x90\x8b\xda\x07\xf0\x35"
- "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
- "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
- "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
- "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
- "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
- "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
- "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
- "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
- "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
- "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
- "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
- "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
- "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
- "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
- "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
- "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
- "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
- "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
- "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
- "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
- "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
- "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
- "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
- "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
- "\x59\xda\xee\x1a\x22\x18\xda\x0d"
- "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
- "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
- "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
- "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
- "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
- "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
- "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
- "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
- "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
- "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
- .len = 512,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x62\x49\x77\x57\x24\x70\x93\x69"
- "\x99\x59\x57\x49\x66\x96\x76\x27",
- .klen = 32,
- .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .ctext = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
- "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
- "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
- "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
- "\x78\x16\xea\x80\xdb\x33\x75\x94"
- "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
- "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
- "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
- "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
- "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
- "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
- "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
- "\x84\x5e\x46\xed\x20\x89\xb6\x44"
- "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
- "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
- "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
- "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
- "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
- "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
- "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
- "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
- "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
- "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
- "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
- "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
- "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
- "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
- "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
- "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
- "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
- "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
- "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
- "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
- "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
- "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
- "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
- "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
- "\x52\x7f\x29\x51\x94\x20\x67\x3c"
- "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
- "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
- "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
- "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
- "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
- "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
- "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
- "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
- "\x65\x53\x0f\x41\x11\xbd\x98\x99"
- "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
- "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
- "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
- "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
- "\x63\x51\x34\x9d\x96\x12\xae\xce"
- "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
- "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
- "\x54\x27\x74\xbb\x10\x86\x57\x46"
- "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
- "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
- "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
- "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
- "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
- "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
- "\x9b\x63\x76\x32\x2f\x19\x72\x10"
- "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
- "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
- .len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
+ .ptext = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
+ "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xee\xee\xee\xee"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb",
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
+ .ctext = "\xac\x32\x36\xcb\x97\x0c\xc2\x07"
+ "\x91\x36\x4c\x39\x5a\x13\x42\xd1"
+ "\xa3\xcb\xc1\x87\x8c\x6f\x30\xcd"
+ "\x07\x4c\xce\x38\x5c\xdd\x70\xc7"
+ "\xf2\x34\xbc\x0e\x24\xc1\x19\x80"
+ "\xfd\x12\x86\x31\x0c\xe3\x7b\x92"
+ "\x6e\x02\xfc\xd0\xfa\xa0\xba\xf3"
+ "\x8b\x29\x33\x85\x1d\x82\x45\x14",
+ .len = 64,
+ }, { /* A.2.5.2 SM4-CTR Example 2 */
+ .key = "\xFE\xDC\xBA\x98\x76\x54\x32\x10"
+ "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
+ .klen = 16,
+ .ptext = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
+ "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xee\xee\xee\xee"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb",
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
+ .ctext = "\x5d\xcc\xcd\x25\xb9\x5a\xb0\x74"
+ "\x17\xa0\x85\x12\xee\x16\x0e\x2f"
+ "\x8f\x66\x15\x21\xcb\xba\xb4\x4c"
+ "\xc8\x71\x38\x44\x5b\xc2\x9e\x5c"
+ "\x0a\xe0\x29\x72\x05\xd6\x27\x04"
+ "\x17\x3b\x21\x23\x9b\x88\x7f\x6c"
+ "\x8c\xb5\xb8\x00\x91\x7a\x24\x88"
+ "\x28\x4b\xde\x9e\x16\xea\x29\x06",
+ .len = 64,
}
};
@@ -13883,6 +13251,27 @@ static const struct cipher_testvec aes_lrw_tv_template[] = {
.ctext = "\x5b\x90\x8e\xc1\xab\xdd\x67\x5f"
"\x3d\x69\x8a\x95\x53\xc8\x9c\xe5",
.len = 16,
+ }, { /* Test counter wrap-around, modified from LRW-32-AES 1 */
+ .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
+ "\x4c\x26\x84\x14\xb5\x68\x01\x85"
+ "\x25\x8e\x2a\x05\xe7\x3e\x9d\x03"
+ "\xee\x5a\x83\x0c\xcc\x09\x4c\x87",
+ .klen = 32,
+ .iv = "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .ptext = "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x41\x42\x43\x44\x45\x46"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x41\x42\x43\x44\x45\x46"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x41\x42\x43\x44\x45\x46",
+ .ctext = "\x47\x90\x50\xf6\xf4\x8d\x5c\x7f"
+ "\x84\xc7\x83\x95\x2d\xa2\x02\xc0"
+ "\xda\x7f\xa3\xc0\x88\x2a\x0a\x50"
+ "\xfb\xc1\x78\x03\x39\xfe\x1d\xe5"
+ "\xf1\xb2\x73\xcd\x65\xa3\xdf\x5f"
+ "\xe9\x5d\x48\x92\x54\x63\x4e\xb8",
+ .len = 48,
}, {
/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */
.key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index 25c75af50d3f..c055f57fab11 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -57,15 +57,17 @@ struct xcbc_desc_ctx {
u8 ctx[];
};
+#define XCBC_BLOCKSIZE 16
+
static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
unsigned long alignmask = crypto_shash_alignmask(parent);
struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent);
- int bs = crypto_shash_blocksize(parent);
u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
int err = 0;
- u8 key1[bs];
+ u8 key1[XCBC_BLOCKSIZE];
+ int bs = sizeof(key1);
if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen)))
return err;
@@ -212,7 +214,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
return PTR_ERR(alg);
switch(alg->cra_blocksize) {
- case 16:
+ case XCBC_BLOCKSIZE:
break;
default:
goto out_put_alg;
diff --git a/crypto/xts.c b/crypto/xts.c
index ccf55fbb8bc2..847f54f76789 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -26,8 +26,6 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
-#define XTS_BUFFER_SIZE 128u
-
struct priv {
struct crypto_skcipher *child;
struct crypto_cipher *tweak;
@@ -39,19 +37,7 @@ struct xts_instance_ctx {
};
struct rctx {
- le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
-
le128 t;
-
- le128 *ext;
-
- struct scatterlist srcbuf[2];
- struct scatterlist dstbuf[2];
- struct scatterlist *src;
- struct scatterlist *dst;
-
- unsigned int left;
-
struct skcipher_request subreq;
};
@@ -96,81 +82,27 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
return err;
}
-static int post_crypt(struct skcipher_request *req)
+/*
+ * We compute the tweak masks twice (both before and after the ECB encryption or
+ * decryption) to avoid having to allocate a temporary buffer and/or make
+ * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
+ * just doing the gf128mul_x_ble() calls again.
+ */
+static int xor_tweak(struct skcipher_request *req, bool second_pass)
{
struct rctx *rctx = skcipher_request_ctx(req);
- le128 *buf = rctx->ext ?: rctx->buf;
- struct skcipher_request *subreq;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const int bs = XTS_BLOCK_SIZE;
struct skcipher_walk w;
- struct scatterlist *sg;
- unsigned offset;
+ le128 t = rctx->t;
int err;
- subreq = &rctx->subreq;
- err = skcipher_walk_virt(&w, subreq, false);
-
- while (w.nbytes) {
- unsigned int avail = w.nbytes;
- le128 *wdst;
-
- wdst = w.dst.virt.addr;
-
- do {
- le128_xor(wdst, buf++, wdst);
- wdst++;
- } while ((avail -= bs) >= bs);
-
- err = skcipher_walk_done(&w, avail);
+ if (second_pass) {
+ req = &rctx->subreq;
+ /* set to our TFM to enforce correct alignment: */
+ skcipher_request_set_tfm(req, tfm);
}
-
- rctx->left -= subreq->cryptlen;
-
- if (err || !rctx->left)
- goto out;
-
- rctx->dst = rctx->dstbuf;
-
- scatterwalk_done(&w.out, 0, 1);
- sg = w.out.sg;
- offset = w.out.offset;
-
- if (rctx->dst != sg) {
- rctx->dst[0] = *sg;
- sg_unmark_end(rctx->dst);
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
- }
- rctx->dst[0].length -= offset - sg->offset;
- rctx->dst[0].offset = offset;
-
-out:
- return err;
-}
-
-static int pre_crypt(struct skcipher_request *req)
-{
- struct rctx *rctx = skcipher_request_ctx(req);
- le128 *buf = rctx->ext ?: rctx->buf;
- struct skcipher_request *subreq;
- const int bs = XTS_BLOCK_SIZE;
- struct skcipher_walk w;
- struct scatterlist *sg;
- unsigned cryptlen;
- unsigned offset;
- bool more;
- int err;
-
- subreq = &rctx->subreq;
- cryptlen = subreq->cryptlen;
-
- more = rctx->left > cryptlen;
- if (!more)
- cryptlen = rctx->left;
-
- skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
- cryptlen, NULL);
-
- err = skcipher_walk_virt(&w, subreq, false);
+ err = skcipher_walk_virt(&w, req, false);
while (w.nbytes) {
unsigned int avail = w.nbytes;
@@ -181,180 +113,71 @@ static int pre_crypt(struct skcipher_request *req)
wdst = w.dst.virt.addr;
do {
- *buf++ = rctx->t;
- le128_xor(wdst++, &rctx->t, wsrc++);
- gf128mul_x_ble(&rctx->t, &rctx->t);
+ le128_xor(wdst++, &t, wsrc++);
+ gf128mul_x_ble(&t, &t);
} while ((avail -= bs) >= bs);
err = skcipher_walk_done(&w, avail);
}
- skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
- cryptlen, NULL);
-
- if (err || !more)
- goto out;
-
- rctx->src = rctx->srcbuf;
-
- scatterwalk_done(&w.in, 0, 1);
- sg = w.in.sg;
- offset = w.in.offset;
-
- if (rctx->src != sg) {
- rctx->src[0] = *sg;
- sg_unmark_end(rctx->src);
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
- }
- rctx->src[0].length -= offset - sg->offset;
- rctx->src[0].offset = offset;
-
-out:
return err;
}
-static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
+static int xor_tweak_pre(struct skcipher_request *req)
{
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- struct rctx *rctx = skcipher_request_ctx(req);
- struct skcipher_request *subreq;
- gfp_t gfp;
-
- subreq = &rctx->subreq;
- skcipher_request_set_tfm(subreq, ctx->child);
- skcipher_request_set_callback(subreq, req->base.flags, done, req);
-
- gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
- GFP_ATOMIC;
- rctx->ext = NULL;
-
- subreq->cryptlen = XTS_BUFFER_SIZE;
- if (req->cryptlen > XTS_BUFFER_SIZE) {
- unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
-
- rctx->ext = kmalloc(n, gfp);
- if (rctx->ext)
- subreq->cryptlen = n;
- }
-
- rctx->src = req->src;
- rctx->dst = req->dst;
- rctx->left = req->cryptlen;
-
- /* calculate first value of T */
- crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
-
- return 0;
+ return xor_tweak(req, false);
}
-static void exit_crypt(struct skcipher_request *req)
+static int xor_tweak_post(struct skcipher_request *req)
{
- struct rctx *rctx = skcipher_request_ctx(req);
-
- rctx->left = 0;
-
- if (rctx->ext)
- kzfree(rctx->ext);
+ return xor_tweak(req, true);
}
-static int do_encrypt(struct skcipher_request *req, int err)
-{
- struct rctx *rctx = skcipher_request_ctx(req);
- struct skcipher_request *subreq;
-
- subreq = &rctx->subreq;
-
- while (!err && rctx->left) {
- err = pre_crypt(req) ?:
- crypto_skcipher_encrypt(subreq) ?:
- post_crypt(req);
-
- if (err == -EINPROGRESS || err == -EBUSY)
- return err;
- }
-
- exit_crypt(req);
- return err;
-}
-
-static void encrypt_done(struct crypto_async_request *areq, int err)
+static void crypt_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
- struct skcipher_request *subreq;
- struct rctx *rctx;
-
- rctx = skcipher_request_ctx(req);
-
- if (err == -EINPROGRESS) {
- if (rctx->left != req->cryptlen)
- return;
- goto out;
- }
-
- subreq = &rctx->subreq;
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
- err = do_encrypt(req, err ?: post_crypt(req));
- if (rctx->left)
- return;
+ if (!err)
+ err = xor_tweak_post(req);
-out:
skcipher_request_complete(req, err);
}
-static int encrypt(struct skcipher_request *req)
-{
- return do_encrypt(req, init_crypt(req, encrypt_done));
-}
-
-static int do_decrypt(struct skcipher_request *req, int err)
+static void init_crypt(struct skcipher_request *req)
{
+ struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
struct rctx *rctx = skcipher_request_ctx(req);
- struct skcipher_request *subreq;
-
- subreq = &rctx->subreq;
+ struct skcipher_request *subreq = &rctx->subreq;
- while (!err && rctx->left) {
- err = pre_crypt(req) ?:
- crypto_skcipher_decrypt(subreq) ?:
- post_crypt(req);
-
- if (err == -EINPROGRESS || err == -EBUSY)
- return err;
- }
+ skcipher_request_set_tfm(subreq, ctx->child);
+ skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
+ skcipher_request_set_crypt(subreq, req->dst, req->dst,
+ req->cryptlen, NULL);
- exit_crypt(req);
- return err;
+ /* calculate first value of T */
+ crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
}
-static void decrypt_done(struct crypto_async_request *areq, int err)
+static int encrypt(struct skcipher_request *req)
{
- struct skcipher_request *req = areq->data;
- struct skcipher_request *subreq;
- struct rctx *rctx;
-
- rctx = skcipher_request_ctx(req);
-
- if (err == -EINPROGRESS) {
- if (rctx->left != req->cryptlen)
- return;
- goto out;
- }
-
- subreq = &rctx->subreq;
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
-
- err = do_decrypt(req, err ?: post_crypt(req));
- if (rctx->left)
- return;
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq = &rctx->subreq;
-out:
- skcipher_request_complete(req, err);
+ init_crypt(req);
+ return xor_tweak_pre(req) ?:
+ crypto_skcipher_encrypt(subreq) ?:
+ xor_tweak_post(req);
}
static int decrypt(struct skcipher_request *req)
{
- return do_decrypt(req, init_crypt(req, decrypt_done));
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq = &rctx->subreq;
+
+ init_crypt(req);
+ return xor_tweak_pre(req) ?:
+ crypto_skcipher_decrypt(subreq) ?:
+ xor_tweak_post(req);
}
static int init_tfm(struct crypto_skcipher *tfm)
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index dd1eea90f67f..365e6c1a729e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -138,7 +138,6 @@ config ACPI_REV_OVERRIDE_POSSIBLE
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
- default n
help
Say N to disable Embedded Controller /sys/kernel/debug interface
@@ -283,7 +282,6 @@ config ACPI_PROCESSOR
config ACPI_IPMI
tristate "IPMI"
depends on IPMI_HANDLER
- default n
help
This driver enables the ACPI to access the BMC controller. And it
uses the IPMI request/response message to communicate with BMC
@@ -361,7 +359,6 @@ config ACPI_TABLE_UPGRADE
config ACPI_DEBUG
bool "Debug Statements"
- default n
help
The ACPI subsystem can produce debug output. Saying Y enables this
output and increases the kernel size by around 50K.
@@ -374,7 +371,6 @@ config ACPI_DEBUG
config ACPI_PCI_SLOT
bool "PCI slot detection driver"
depends on SYSFS
- default n
help
This driver creates entries in /sys/bus/pci/slots/ for all PCI
slots in the system. This can help correlate PCI bus addresses,
@@ -436,7 +432,6 @@ config ACPI_HED
config ACPI_CUSTOM_METHOD
tristate "Allow ACPI methods to be inserted/replaced at run time"
depends on DEBUG_FS
- default n
help
This debug facility allows ACPI AML methods to be inserted and/or
replaced without rebooting the system. For details refer to:
@@ -481,7 +476,6 @@ config ACPI_EXTLOG
tristate "Extended Error Log support"
depends on X86_MCE && X86_LOCAL_APIC && EDAC
select UEFI_CPER
- default n
help
Certain usages such as Predictive Failure Analysis (PFA) require
more information about the error than what can be described in
@@ -498,6 +492,9 @@ config ACPI_EXTLOG
driver adds support for that functionality with corresponding
tracepoint which carries that information to userspace.
+config ACPI_ADXL
+ bool
+
menuconfig PMIC_OPREGION
bool "PMIC (Power Management Integrated Circuit) operation region support"
help
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 6d59aa109a91..edc039313cd6 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -61,6 +61,9 @@ acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
+# Address translation
+acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o
+
# These are (potentially) separate modules
# IPMI may be used by other drivers, so it has to initialise before them
diff --git a/drivers/acpi/acpi_adxl.c b/drivers/acpi/acpi_adxl.c
new file mode 100644
index 000000000000..13c8f7b50c46
--- /dev/null
+++ b/drivers/acpi/acpi_adxl.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Address translation interface via ACPI DSM.
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Specification for this interface is available at:
+ *
+ * https://cdrdv2.intel.com/v1/dl/getContent/603354
+ */
+
+#include <linux/acpi.h>
+#include <linux/adxl.h>
+
+#define ADXL_REVISION 0x1
+#define ADXL_IDX_GET_ADDR_PARAMS 0x1
+#define ADXL_IDX_FORWARD_TRANSLATE 0x2
+#define ACPI_ADXL_PATH "\\_SB.ADXL"
+
+/*
+ * The specification doesn't provide a limit on how many
+ * components are in a memory address. But since we allocate
+ * memory based on the number the BIOS tells us, we should
+ * defend against insane values.
+ */
+#define ADXL_MAX_COMPONENTS 500
+
+#undef pr_fmt
+#define pr_fmt(fmt) "ADXL: " fmt
+
+static acpi_handle handle;
+static union acpi_object *params;
+static const guid_t adxl_guid =
+ GUID_INIT(0xAA3C050A, 0x7EA4, 0x4C1F,
+ 0xAF, 0xDA, 0x12, 0x67, 0xDF, 0xD3, 0xD4, 0x8D);
+
+static int adxl_count;
+static char **adxl_component_names;
+
+static union acpi_object *adxl_dsm(int cmd, union acpi_object argv[])
+{
+ union acpi_object *obj, *o;
+
+ obj = acpi_evaluate_dsm_typed(handle, &adxl_guid, ADXL_REVISION,
+ cmd, argv, ACPI_TYPE_PACKAGE);
+ if (!obj) {
+ pr_info("DSM call failed for cmd=%d\n", cmd);
+ return NULL;
+ }
+
+ if (obj->package.count != 2) {
+ pr_info("Bad pkg count %d\n", obj->package.count);
+ goto err;
+ }
+
+ o = obj->package.elements;
+ if (o->type != ACPI_TYPE_INTEGER) {
+ pr_info("Bad 1st element type %d\n", o->type);
+ goto err;
+ }
+ if (o->integer.value) {
+ pr_info("Bad ret val %llu\n", o->integer.value);
+ goto err;
+ }
+
+ o = obj->package.elements + 1;
+ if (o->type != ACPI_TYPE_PACKAGE) {
+ pr_info("Bad 2nd element type %d\n", o->type);
+ goto err;
+ }
+ return obj;
+
+err:
+ ACPI_FREE(obj);
+ return NULL;
+}
+
+/**
+ * adxl_get_component_names - get list of memory component names
+ * Returns NULL terminated list of string names
+ *
+ * Give the caller a pointer to the list of memory component names
+ * e.g. { "SystemAddress", "ProcessorSocketId", "ChannelId", ... NULL }
+ * Caller should count how many strings in order to allocate a buffer
+ * for the return from adxl_decode().
+ */
+const char * const *adxl_get_component_names(void)
+{
+ return (const char * const *)adxl_component_names;
+}
+EXPORT_SYMBOL_GPL(adxl_get_component_names);
+
+/**
+ * adxl_decode - ask BIOS to decode a system address to memory address
+ * @addr: the address to decode
+ * @component_values: pointer to array of values for each component
+ * Returns 0 on success, negative error code otherwise
+ *
+ * The index of each value returned in the array matches the index of
+ * each component name returned by adxl_get_component_names().
+ * Components that are not defined for this address translation (e.g.
+ * mirror channel number for a non-mirrored address) are set to ~0ull.
+ */
+int adxl_decode(u64 addr, u64 component_values[])
+{
+ union acpi_object argv4[2], *results, *r;
+ int i, cnt;
+
+ if (!adxl_component_names)
+ return -EOPNOTSUPP;
+
+ argv4[0].type = ACPI_TYPE_PACKAGE;
+ argv4[0].package.count = 1;
+ argv4[0].package.elements = &argv4[1];
+ argv4[1].integer.type = ACPI_TYPE_INTEGER;
+ argv4[1].integer.value = addr;
+
+ results = adxl_dsm(ADXL_IDX_FORWARD_TRANSLATE, argv4);
+ if (!results)
+ return -EINVAL;
+
+ r = results->package.elements + 1;
+ cnt = r->package.count;
+ if (cnt != adxl_count) {
+ ACPI_FREE(results);
+ return -EINVAL;
+ }
+ r = r->package.elements;
+
+ for (i = 0; i < cnt; i++)
+ component_values[i] = r[i].integer.value;
+
+ ACPI_FREE(results);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adxl_decode);
+
+static int __init adxl_init(void)
+{
+ char *path = ACPI_ADXL_PATH;
+ union acpi_object *p;
+ acpi_status status;
+ int i;
+
+ status = acpi_get_handle(NULL, path, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_debug("No ACPI handle for path %s\n", path);
+ return -ENODEV;
+ }
+
+ if (!acpi_has_method(handle, "_DSM")) {
+ pr_info("No DSM method\n");
+ return -ENODEV;
+ }
+
+ if (!acpi_check_dsm(handle, &adxl_guid, ADXL_REVISION,
+ ADXL_IDX_GET_ADDR_PARAMS |
+ ADXL_IDX_FORWARD_TRANSLATE)) {
+ pr_info("DSM method does not support forward translate\n");
+ return -ENODEV;
+ }
+
+ params = adxl_dsm(ADXL_IDX_GET_ADDR_PARAMS, NULL);
+ if (!params) {
+ pr_info("Failed to get component names\n");
+ return -ENODEV;
+ }
+
+ p = params->package.elements + 1;
+ adxl_count = p->package.count;
+ if (adxl_count > ADXL_MAX_COMPONENTS) {
+ pr_info("Insane number of address component names %d\n", adxl_count);
+ ACPI_FREE(params);
+ return -ENODEV;
+ }
+ p = p->package.elements;
+
+ /*
+ * Allocate one extra for NULL termination.
+ */
+ adxl_component_names = kcalloc(adxl_count + 1, sizeof(char *), GFP_KERNEL);
+ if (!adxl_component_names) {
+ ACPI_FREE(params);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adxl_count; i++)
+ adxl_component_names[i] = p[i].string.pointer;
+
+ return 0;
+}
+subsys_initcall(adxl_init);
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index 1b64419e2fec..712fd31674a6 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -46,7 +46,7 @@ struct acpi_ipmi_device {
spinlock_t tx_msg_lock;
acpi_handle handle;
struct device *dev;
- ipmi_user_t user_interface;
+ struct ipmi_user *user_interface;
int ipmi_ifnum; /* IPMI interface number */
long curr_msgid;
bool dead;
@@ -125,7 +125,7 @@ ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
{
struct acpi_ipmi_device *ipmi_device;
int err;
- ipmi_user_t user;
+ struct ipmi_user *user;
ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
if (!ipmi_device)
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index cf4fc0161164..e43cb71b6972 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -117,11 +117,17 @@ static void lpit_update_residency(struct lpit_residency_info *info,
if (!info->iomem_addr)
return;
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
+ return;
+
/* Silently fail, if cpuidle attribute group is not present */
sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
&dev_attr_low_power_idle_system_residency_us.attr,
"cpuidle");
} else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
+ return;
+
/* Silently fail, if cpuidle attribute group is not present */
sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
&dev_attr_low_power_idle_cpu_residency_us.attr,
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index bf64cfa30feb..b9bda06d344d 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mutex.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/platform_data/clk-lpss.h>
#include <linux/platform_data/x86/pmc_atom.h>
@@ -83,6 +84,7 @@ struct lpss_device_desc {
size_t prv_size_override;
struct property_entry *properties;
void (*setup)(struct lpss_private_data *pdata);
+ bool resume_from_noirq;
};
static const struct lpss_device_desc lpss_dma_desc = {
@@ -99,6 +101,9 @@ struct lpss_private_data {
u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
};
+/* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */
+static u32 pmc_atom_d3_mask = 0xfe000ffe;
+
/* LPSS run time quirks */
static unsigned int lpss_quirks;
@@ -175,6 +180,21 @@ static void byt_pwm_setup(struct lpss_private_data *pdata)
static void byt_i2c_setup(struct lpss_private_data *pdata)
{
+ const char *uid_str = acpi_device_uid(pdata->adev);
+ acpi_handle handle = pdata->adev->handle;
+ unsigned long long shared_host = 0;
+ acpi_status status;
+ long uid = 0;
+
+ /* Expected to always be true, but better safe then sorry */
+ if (uid_str)
+ uid = simple_strtol(uid_str, NULL, 10);
+
+ /* Detect I2C bus shared with PUNIT and ignore its d3 status */
+ status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
+ if (ACPI_SUCCESS(status) && shared_host && uid)
+ pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1));
+
lpss_deassert_reset(pdata);
if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
@@ -274,12 +294,14 @@ static const struct lpss_device_desc byt_i2c_dev_desc = {
.flags = LPSS_CLK | LPSS_SAVE_CTX,
.prv_offset = 0x800,
.setup = byt_i2c_setup,
+ .resume_from_noirq = true,
};
static const struct lpss_device_desc bsw_i2c_dev_desc = {
.flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
.prv_offset = 0x800,
.setup = byt_i2c_setup,
+ .resume_from_noirq = true,
};
static const struct lpss_device_desc bsw_spi_dev_desc = {
@@ -292,7 +314,7 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
static const struct x86_cpu_id lpss_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */
ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
{}
};
@@ -327,9 +349,11 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT33FC", },
/* Braswell LPSS devices */
+ { "80862286", LPSS_ADDR(lpss_dma_desc) },
{ "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
{ "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
+ { "808622C0", LPSS_ADDR(lpss_dma_desc) },
{ "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
/* Broadwell LPSS devices */
@@ -451,26 +475,35 @@ struct lpss_device_links {
*/
static const struct lpss_device_links lpss_device_links[] = {
{"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
+ {"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
+ {"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
};
-static bool hid_uid_match(const char *hid1, const char *uid1,
+static bool hid_uid_match(struct acpi_device *adev,
const char *hid2, const char *uid2)
{
- return !strcmp(hid1, hid2) && uid1 && uid2 && !strcmp(uid1, uid2);
+ const char *hid1 = acpi_device_hid(adev);
+ const char *uid1 = acpi_device_uid(adev);
+
+ if (strcmp(hid1, hid2))
+ return false;
+
+ if (!uid2)
+ return true;
+
+ return uid1 && !strcmp(uid1, uid2);
}
static bool acpi_lpss_is_supplier(struct acpi_device *adev,
const struct lpss_device_links *link)
{
- return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
- link->supplier_hid, link->supplier_uid);
+ return hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
}
static bool acpi_lpss_is_consumer(struct acpi_device *adev,
const struct lpss_device_links *link)
{
- return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
- link->consumer_hid, link->consumer_uid);
+ return hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
}
struct hid_uid {
@@ -486,18 +519,23 @@ static int match_hid_uid(struct device *dev, void *data)
if (!adev)
return 0;
- return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
- id->hid, id->uid);
+ return hid_uid_match(adev, id->hid, id->uid);
}
static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
{
+ struct device *dev;
+
struct hid_uid data = {
.hid = hid,
.uid = uid,
};
- return bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
+ dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
+ if (dev)
+ return dev;
+
+ return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
}
static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
@@ -892,7 +930,7 @@ static void lpss_iosf_enter_d3_state(void)
* Here we read the values related to LPSS power island, i.e. LPSS
* devices, excluding both LPSS DMA controllers, along with SCC domain.
*/
- u32 func_dis, d3_sts_0, pmc_status, pmc_mask = 0xfe000ffe;
+ u32 func_dis, d3_sts_0, pmc_status;
int ret;
ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
@@ -910,7 +948,7 @@ static void lpss_iosf_enter_d3_state(void)
* Shutdown both LPSS DMA controllers if and only if all other devices
* are already in D3hot.
*/
- pmc_status = (~(d3_sts_0 | func_dis)) & pmc_mask;
+ pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask;
if (pmc_status)
goto exit;
@@ -1004,7 +1042,7 @@ static int acpi_lpss_resume(struct device *dev)
}
#ifdef CONFIG_PM_SLEEP
-static int acpi_lpss_suspend_late(struct device *dev)
+static int acpi_lpss_do_suspend_late(struct device *dev)
{
int ret;
@@ -1015,12 +1053,62 @@ static int acpi_lpss_suspend_late(struct device *dev)
return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
}
-static int acpi_lpss_resume_early(struct device *dev)
+static int acpi_lpss_suspend_late(struct device *dev)
+{
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+
+ if (pdata->dev_desc->resume_from_noirq)
+ return 0;
+
+ return acpi_lpss_do_suspend_late(dev);
+}
+
+static int acpi_lpss_suspend_noirq(struct device *dev)
+{
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+ int ret;
+
+ if (pdata->dev_desc->resume_from_noirq) {
+ ret = acpi_lpss_do_suspend_late(dev);
+ if (ret)
+ return ret;
+ }
+
+ return acpi_subsys_suspend_noirq(dev);
+}
+
+static int acpi_lpss_do_resume_early(struct device *dev)
{
int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
+
+static int acpi_lpss_resume_early(struct device *dev)
+{
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+
+ if (pdata->dev_desc->resume_from_noirq)
+ return 0;
+
+ return acpi_lpss_do_resume_early(dev);
+}
+
+static int acpi_lpss_resume_noirq(struct device *dev)
+{
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+ int ret;
+
+ ret = acpi_subsys_resume_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (!dev_pm_may_skip_resume(dev) && pdata->dev_desc->resume_from_noirq)
+ ret = acpi_lpss_do_resume_early(dev);
+
+ return ret;
+}
+
#endif /* CONFIG_PM_SLEEP */
static int acpi_lpss_runtime_suspend(struct device *dev)
@@ -1050,8 +1138,8 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
.complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
.suspend_late = acpi_lpss_suspend_late,
- .suspend_noirq = acpi_subsys_suspend_noirq,
- .resume_noirq = acpi_subsys_resume_noirq,
+ .suspend_noirq = acpi_lpss_suspend_noirq,
+ .resume_noirq = acpi_lpss_resume_noirq,
.resume_early = acpi_lpss_resume_early,
.freeze = acpi_subsys_freeze,
.freeze_late = acpi_subsys_freeze_late,
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 552c1f725b6c..a47676a55b84 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -70,6 +70,7 @@ static void power_saving_mwait_init(void)
#if defined(CONFIG_X86)
switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
/*
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 449d86d39965..fc447410ae4d 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -643,7 +643,7 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
status = acpi_get_type(handle, &acpi_type);
if (ACPI_FAILURE(status))
- return false;
+ return status;
switch (acpi_type) {
case ACPI_TYPE_PROCESSOR:
@@ -663,11 +663,12 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
}
processor_validated_ids_update(uid);
- return true;
+ return AE_OK;
err:
+ /* Exit on error, but don't abort the namespace walk */
acpi_handle_info(handle, "Invalid processor object\n");
- return false;
+ return AE_OK;
}
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index e99c4ed7e677..33a4bcdaa4d7 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -52,6 +52,201 @@ struct acpi_tad_driver_data {
u32 capabilities;
};
+struct acpi_tad_rt {
+ u16 year; /* 1900 - 9999 */
+ u8 month; /* 1 - 12 */
+ u8 day; /* 1 - 31 */
+ u8 hour; /* 0 - 23 */
+ u8 minute; /* 0 - 59 */
+ u8 second; /* 0 - 59 */
+ u8 valid; /* 0 (failed) or 1 (success) for reads, 0 for writes */
+ u16 msec; /* 1 - 1000 */
+ s16 tz; /* -1440 to 1440 or 2047 (unspecified) */
+ u8 daylight;
+ u8 padding[3]; /* must be 0 */
+} __packed;
+
+static int acpi_tad_set_real_time(struct device *dev, struct acpi_tad_rt *rt)
+{
+ acpi_handle handle = ACPI_HANDLE(dev);
+ union acpi_object args[] = {
+ { .type = ACPI_TYPE_BUFFER, },
+ };
+ struct acpi_object_list arg_list = {
+ .pointer = args,
+ .count = ARRAY_SIZE(args),
+ };
+ unsigned long long retval;
+ acpi_status status;
+
+ if (rt->year < 1900 || rt->year > 9999 ||
+ rt->month < 1 || rt->month > 12 ||
+ rt->hour > 23 || rt->minute > 59 || rt->second > 59 ||
+ rt->tz < -1440 || (rt->tz > 1440 && rt->tz != 2047) ||
+ rt->daylight > 3)
+ return -ERANGE;
+
+ args[0].buffer.pointer = (u8 *)rt;
+ args[0].buffer.length = sizeof(*rt);
+
+ pm_runtime_get_sync(dev);
+
+ status = acpi_evaluate_integer(handle, "_SRT", &arg_list, &retval);
+
+ pm_runtime_put_sync(dev);
+
+ if (ACPI_FAILURE(status) || retval)
+ return -EIO;
+
+ return 0;
+}
+
+static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt)
+{
+ acpi_handle handle = ACPI_HANDLE(dev);
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER };
+ union acpi_object *out_obj;
+ struct acpi_tad_rt *data;
+ acpi_status status;
+ int ret = -EIO;
+
+ pm_runtime_get_sync(dev);
+
+ status = acpi_evaluate_object(handle, "_GRT", NULL, &output);
+
+ pm_runtime_put_sync(dev);
+
+ if (ACPI_FAILURE(status))
+ goto out_free;
+
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_BUFFER)
+ goto out_free;
+
+ if (out_obj->buffer.length != sizeof(*rt))
+ goto out_free;
+
+ data = (struct acpi_tad_rt *)(out_obj->buffer.pointer);
+ if (!data->valid)
+ goto out_free;
+
+ memcpy(rt, data, sizeof(*rt));
+ ret = 0;
+
+out_free:
+ ACPI_FREE(output.pointer);
+ return ret;
+}
+
+static char *acpi_tad_rt_next_field(char *s, int *val)
+{
+ char *p;
+
+ p = strchr(s, ':');
+ if (!p)
+ return NULL;
+
+ *p = '\0';
+ if (kstrtoint(s, 10, val))
+ return NULL;
+
+ return p + 1;
+}
+
+static ssize_t time_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_tad_rt rt;
+ char *str, *s;
+ int val, ret = -ENODATA;
+
+ str = kmemdup_nul(buf, count, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ s = acpi_tad_rt_next_field(str, &val);
+ if (!s)
+ goto out_free;
+
+ rt.year = val;
+
+ s = acpi_tad_rt_next_field(s, &val);
+ if (!s)
+ goto out_free;
+
+ rt.month = val;
+
+ s = acpi_tad_rt_next_field(s, &val);
+ if (!s)
+ goto out_free;
+
+ rt.day = val;
+
+ s = acpi_tad_rt_next_field(s, &val);
+ if (!s)
+ goto out_free;
+
+ rt.hour = val;
+
+ s = acpi_tad_rt_next_field(s, &val);
+ if (!s)
+ goto out_free;
+
+ rt.minute = val;
+
+ s = acpi_tad_rt_next_field(s, &val);
+ if (!s)
+ goto out_free;
+
+ rt.second = val;
+
+ s = acpi_tad_rt_next_field(s, &val);
+ if (!s)
+ goto out_free;
+
+ rt.tz = val;
+
+ if (kstrtoint(s, 10, &val))
+ goto out_free;
+
+ rt.daylight = val;
+
+ rt.valid = 0;
+ rt.msec = 0;
+ memset(rt.padding, 0, 3);
+
+ ret = acpi_tad_set_real_time(dev, &rt);
+
+out_free:
+ kfree(str);
+ return ret ? ret : count;
+}
+
+static ssize_t time_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_tad_rt rt;
+ int ret;
+
+ ret = acpi_tad_get_real_time(dev, &rt);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n",
+ rt.year, rt.month, rt.day, rt.hour, rt.minute, rt.second,
+ rt.tz, rt.daylight);
+}
+
+static DEVICE_ATTR(time, S_IRUSR | S_IWUSR, time_show, time_store);
+
+static struct attribute *acpi_tad_time_attrs[] = {
+ &dev_attr_time.attr,
+ NULL,
+};
+static const struct attribute_group acpi_tad_time_attr_group = {
+ .attrs = acpi_tad_time_attrs,
+};
+
static int acpi_tad_wake_set(struct device *dev, char *method, u32 timer_id,
u32 value)
{
@@ -448,6 +643,12 @@ static int acpi_tad_probe(struct platform_device *pdev)
goto fail;
}
+ if (caps & ACPI_TAD_RT) {
+ ret = sysfs_create_group(&dev->kobj, &acpi_tad_time_attr_group);
+ if (ret)
+ goto fail;
+ }
+
return 0;
fail:
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 71f6f2624deb..b14621da5413 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -65,6 +65,7 @@ acpi-y += \
exresnte.o \
exresolv.o \
exresop.o \
+ exserial.o \
exstore.o \
exstoren.o \
exstorob.o \
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 704bebbd35b0..b412aa909907 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -229,6 +229,8 @@ acpi_ev_default_region_setup(acpi_handle handle,
acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj);
+u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
+
/*
* evsci - SCI (System Control Interrupt) handling/dispatch
*/
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 9613b0115dad..c5b2be0b6613 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -124,6 +124,9 @@ acpi_ex_trace_point(acpi_trace_event_type type,
* exfield - ACPI AML (p-code) execution - field manipulation
*/
acpi_status
+acpi_ex_get_protocol_buffer_length(u32 protocol_id, u32 *return_length);
+
+acpi_status
acpi_ex_common_buffer_setup(union acpi_operand_object *obj_desc,
u32 buffer_length, u32 * datum_count);
@@ -268,6 +271,26 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info);
/*
+ * exserial - field_unit support for serial address spaces
+ */
+acpi_status
+acpi_ex_read_serial_bus(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **return_buffer);
+
+acpi_status
+acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
+ union acpi_operand_object *obj_desc,
+ union acpi_operand_object **return_buffer);
+
+acpi_status
+acpi_ex_read_gpio(union acpi_operand_object *obj_desc, void *buffer);
+
+acpi_status
+acpi_ex_write_gpio(union acpi_operand_object *source_desc,
+ union acpi_operand_object *obj_desc,
+ union acpi_operand_object **return_buffer);
+
+/*
* exsystem - Interface to OS services
*/
acpi_status
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0f28a38a43ea..99b0da899109 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -395,9 +395,9 @@ struct acpi_simple_repair_info {
/* Info for running the _REG methods */
struct acpi_reg_walk_info {
- acpi_adr_space_type space_id;
u32 function;
u32 reg_run_count;
+ acpi_adr_space_type space_id;
};
/*****************************************************************************
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 250dba02bab6..6c05355447c1 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -432,15 +432,15 @@ typedef enum {
*/
typedef enum {
AML_FIELD_ATTRIB_QUICK = 0x02,
- AML_FIELD_ATTRIB_SEND_RCV = 0x04,
+ AML_FIELD_ATTRIB_SEND_RECEIVE = 0x04,
AML_FIELD_ATTRIB_BYTE = 0x06,
AML_FIELD_ATTRIB_WORD = 0x08,
AML_FIELD_ATTRIB_BLOCK = 0x0A,
- AML_FIELD_ATTRIB_MULTIBYTE = 0x0B,
- AML_FIELD_ATTRIB_WORD_CALL = 0x0C,
- AML_FIELD_ATTRIB_BLOCK_CALL = 0x0D,
+ AML_FIELD_ATTRIB_BYTES = 0x0B,
+ AML_FIELD_ATTRIB_PROCESS_CALL = 0x0C,
+ AML_FIELD_ATTRIB_BLOCK_PROCESS_CALL = 0x0D,
AML_FIELD_ATTRIB_RAW_BYTES = 0x0E,
- AML_FIELD_ATTRIB_RAW_PROCESS = 0x0F
+ AML_FIELD_ATTRIB_RAW_PROCESS_BYTES = 0x0F
} AML_ACCESS_ATTRIBUTE;
/* Bit fields in the AML method_flags byte */
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index e9fb0bf3c8d2..78f9de260d5f 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -417,6 +417,10 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
ACPI_FORMAT_UINT64(obj_desc->region.address),
obj_desc->region.length));
+ status = acpi_ut_add_address_range(obj_desc->region.space_id,
+ obj_desc->region.address,
+ obj_desc->region.length, node);
+
/* Now the address and length are valid for this opregion */
obj_desc->region.flags |= AOPOBJ_DATA_VALID;
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 70c2bd169f66..49decca4e08f 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -653,6 +653,19 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
+ /*
+ * These address spaces do not need a call to _REG, since the ACPI
+ * specification defines them as: "must always be accessible". Since
+ * they never change state (never become unavailable), no need to ever
+ * call _REG on them. Also, a data_table is not a "real" address space,
+ * so do not call _REG. September 2018.
+ */
+ if ((space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) ||
+ (space_id == ACPI_ADR_SPACE_SYSTEM_IO) ||
+ (space_id == ACPI_ADR_SPACE_DATA_TABLE)) {
+ return_VOID;
+ }
+
info.space_id = space_id;
info.function = function;
info.reg_run_count = 0;
@@ -714,8 +727,8 @@ acpi_ev_reg_run(acpi_handle obj_handle,
}
/*
- * We only care about regions.and objects that are allowed to have address
- * space handlers
+ * We only care about regions and objects that are allowed to have
+ * address space handlers
*/
if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
return (AE_OK);
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 39284deedd88..17df5dacd43c 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -16,9 +16,6 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evrgnini")
-/* Local prototypes */
-static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
-
/*******************************************************************************
*
* FUNCTION: acpi_ev_system_memory_region_setup
@@ -33,7 +30,6 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
* DESCRIPTION: Setup a system_memory operation region
*
******************************************************************************/
-
acpi_status
acpi_ev_system_memory_region_setup(acpi_handle handle,
u32 function,
@@ -313,7 +309,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*
******************************************************************************/
-static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
+u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
{
acpi_status status;
struct acpi_pnp_device_id *hid;
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 091415b14fbf..3b3a25d9f0e6 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -193,7 +193,6 @@ acpi_remove_address_space_handler(acpi_handle device,
*/
region_obj =
handler_obj->address_space.region_list;
-
}
/* Remove this Handler object from the list */
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index b272c329d45d..e5798f15793a 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
- * Module Name: exfield - ACPI AML (p-code) execution - field manipulation
+ * Module Name: exfield - AML execution - field_unit read/write
*
* Copyright (C) 2000 - 2018, Intel Corp.
*
@@ -16,64 +16,62 @@
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exfield")
-/* Local prototypes */
-static u32
-acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length);
+/*
+ * This table maps the various Attrib protocols to the byte transfer
+ * length. Used for the generic serial bus.
+ */
+#define ACPI_INVALID_PROTOCOL_ID 0x80
+#define ACPI_MAX_PROTOCOL_ID 0x0F
+const u8 acpi_protocol_lengths[] = {
+ ACPI_INVALID_PROTOCOL_ID, /* 0 - reserved */
+ ACPI_INVALID_PROTOCOL_ID, /* 1 - reserved */
+ 0x00, /* 2 - ATTRIB_QUICK */
+ ACPI_INVALID_PROTOCOL_ID, /* 3 - reserved */
+ 0x01, /* 4 - ATTRIB_SEND_RECEIVE */
+ ACPI_INVALID_PROTOCOL_ID, /* 5 - reserved */
+ 0x01, /* 6 - ATTRIB_BYTE */
+ ACPI_INVALID_PROTOCOL_ID, /* 7 - reserved */
+ 0x02, /* 8 - ATTRIB_WORD */
+ ACPI_INVALID_PROTOCOL_ID, /* 9 - reserved */
+ 0xFF, /* A - ATTRIB_BLOCK */
+ 0xFF, /* B - ATTRIB_BYTES */
+ 0x02, /* C - ATTRIB_PROCESS_CALL */
+ 0xFF, /* D - ATTRIB_BLOCK_PROCESS_CALL */
+ 0xFF, /* E - ATTRIB_RAW_BYTES */
+ 0xFF /* F - ATTRIB_RAW_PROCESS_BYTES */
+};
/*******************************************************************************
*
- * FUNCTION: acpi_ex_get_serial_access_length
+ * FUNCTION: acpi_ex_get_protocol_buffer_length
*
- * PARAMETERS: accessor_type - The type of the protocol indicated by region
+ * PARAMETERS: protocol_id - The type of the protocol indicated by region
* field access attributes
- * access_length - The access length of the region field
+ * return_length - Where the protocol byte transfer length is
+ * returned
*
- * RETURN: Decoded access length
+ * RETURN: Status and decoded byte transfer length
*
* DESCRIPTION: This routine returns the length of the generic_serial_bus
* protocol bytes
*
******************************************************************************/
-static u32
-acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length)
+acpi_status
+acpi_ex_get_protocol_buffer_length(u32 protocol_id, u32 *return_length)
{
- u32 length;
-
- switch (accessor_type) {
- case AML_FIELD_ATTRIB_QUICK:
-
- length = 0;
- break;
-
- case AML_FIELD_ATTRIB_SEND_RCV:
- case AML_FIELD_ATTRIB_BYTE:
-
- length = 1;
- break;
-
- case AML_FIELD_ATTRIB_WORD:
- case AML_FIELD_ATTRIB_WORD_CALL:
-
- length = 2;
- break;
- case AML_FIELD_ATTRIB_MULTIBYTE:
- case AML_FIELD_ATTRIB_RAW_BYTES:
- case AML_FIELD_ATTRIB_RAW_PROCESS:
+ if ((protocol_id > ACPI_MAX_PROTOCOL_ID) ||
+ (acpi_protocol_lengths[protocol_id] == ACPI_INVALID_PROTOCOL_ID)) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid Field/AccessAs protocol ID: 0x%4.4X",
+ protocol_id));
- length = access_length;
- break;
-
- case AML_FIELD_ATTRIB_BLOCK:
- case AML_FIELD_ATTRIB_BLOCK_CALL:
- default:
-
- length = ACPI_GSBUS_BUFFER_SIZE - 2;
- break;
+ return (AE_AML_PROTOCOL);
}
- return (length);
+ *return_length = acpi_protocol_lengths[protocol_id];
+ return (AE_OK);
}
/*******************************************************************************
@@ -98,10 +96,8 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
{
acpi_status status;
union acpi_operand_object *buffer_desc;
- acpi_size length;
void *buffer;
- u32 function;
- u16 accessor_type;
+ u32 buffer_length;
ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
@@ -132,60 +128,11 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
ACPI_ADR_SPACE_GSBUS
|| obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
- /*
- * This is an SMBus, GSBus or IPMI read. We must create a buffer to
- * hold the data and then directly access the region handler.
- *
- * Note: SMBus and GSBus protocol value is passed in upper 16-bits
- * of Function
- */
- if (obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_SMBUS) {
- length = ACPI_SMBUS_BUFFER_SIZE;
- function =
- ACPI_READ | (obj_desc->field.attribute << 16);
- } else if (obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_GSBUS) {
- accessor_type = obj_desc->field.attribute;
- length =
- acpi_ex_get_serial_access_length(accessor_type,
- obj_desc->field.
- access_length);
-
- /*
- * Add additional 2 bytes for the generic_serial_bus data buffer:
- *
- * Status; (Byte 0 of the data buffer)
- * Length; (Byte 1 of the data buffer)
- * Data[x-1]: (Bytes 2-x of the arbitrary length data buffer)
- */
- length += 2;
- function = ACPI_READ | (accessor_type << 16);
- } else { /* IPMI */
-
- length = ACPI_IPMI_BUFFER_SIZE;
- function = ACPI_READ;
- }
-
- buffer_desc = acpi_ut_create_buffer_object(length);
- if (!buffer_desc) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- /* Lock entire transaction if requested */
-
- acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
-
- /* Call the region handler for the read */
- status = acpi_ex_access_region(obj_desc, 0,
- ACPI_CAST_PTR(u64,
- buffer_desc->
- buffer.pointer),
- function);
+ /* SMBus, GSBus, IPMI serial */
- acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
- goto exit;
+ status = acpi_ex_read_serial_bus(obj_desc, ret_buffer_desc);
+ return_ACPI_STATUS(status);
}
/*
@@ -198,14 +145,14 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
*
* Note: Field.length is in bits.
*/
- length =
+ buffer_length =
(acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
- if (length > acpi_gbl_integer_byte_width) {
+ if (buffer_length > acpi_gbl_integer_byte_width) {
/* Field is too large for an Integer, create a Buffer instead */
- buffer_desc = acpi_ut_create_buffer_object(length);
+ buffer_desc = acpi_ut_create_buffer_object(buffer_length);
if (!buffer_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -218,47 +165,24 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_NO_MEMORY);
}
- length = acpi_gbl_integer_byte_width;
+ buffer_length = acpi_gbl_integer_byte_width;
buffer = &buffer_desc->integer.value;
}
if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_GPIO)) {
- /*
- * For GPIO (general_purpose_io), the Address will be the bit offset
- * from the previous Connection() operator, making it effectively a
- * pin number index. The bit_length is the length of the field, which
- * is thus the number of pins.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "GPIO FieldRead [FROM]: Pin %u Bits %u\n",
- obj_desc->field.pin_number_index,
- obj_desc->field.bit_length));
-
- /* Lock entire transaction if requested */
- acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+ /* General Purpose I/O */
- /* Perform the write */
-
- status =
- acpi_ex_access_region(obj_desc, 0, (u64 *)buffer,
- ACPI_READ);
-
- acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
- if (ACPI_FAILURE(status)) {
- acpi_ut_remove_reference(buffer_desc);
- } else {
- *ret_buffer_desc = buffer_desc;
- }
- return_ACPI_STATUS(status);
+ status = acpi_ex_read_gpio(obj_desc, buffer);
+ goto exit;
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n",
obj_desc, obj_desc->common.type, buffer,
- (u32) length));
+ buffer_length));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"FieldRead [FROM]: BitLen %X, BitOff %X, ByteOff %X\n",
obj_desc->common_field.bit_length,
@@ -271,7 +195,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
/* Read from the field */
- status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length);
+ status = acpi_ex_extract_from_field(obj_desc, buffer, buffer_length);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
exit:
@@ -304,11 +228,8 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
union acpi_operand_object **result_desc)
{
acpi_status status;
- u32 length;
+ u32 buffer_length;
void *buffer;
- union acpi_operand_object *buffer_desc;
- u32 function;
- u16 accessor_type;
ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
@@ -331,130 +252,25 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
}
} else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
(obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_SMBUS
- || obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_GSBUS
- || obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_IPMI)) {
- /*
- * This is an SMBus, GSBus or IPMI write. We will bypass the entire
- * field mechanism and handoff the buffer directly to the handler.
- * For these address spaces, the buffer is bi-directional; on a
- * write, return data is returned in the same buffer.
- *
- * Source must be a buffer of sufficient size:
- * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or
- * ACPI_IPMI_BUFFER_SIZE.
- *
- * Note: SMBus and GSBus protocol type is passed in upper 16-bits
- * of Function
- */
- if (source_desc->common.type != ACPI_TYPE_BUFFER) {
- ACPI_ERROR((AE_INFO,
- "SMBus/IPMI/GenericSerialBus write requires "
- "Buffer, found type %s",
- acpi_ut_get_object_type_name(source_desc)));
-
- return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
- }
-
- if (obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_SMBUS) {
- length = ACPI_SMBUS_BUFFER_SIZE;
- function =
- ACPI_WRITE | (obj_desc->field.attribute << 16);
- } else if (obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_GSBUS) {
- accessor_type = obj_desc->field.attribute;
- length =
- acpi_ex_get_serial_access_length(accessor_type,
- obj_desc->field.
- access_length);
-
- /*
- * Add additional 2 bytes for the generic_serial_bus data buffer:
- *
- * Status; (Byte 0 of the data buffer)
- * Length; (Byte 1 of the data buffer)
- * Data[x-1]: (Bytes 2-x of the arbitrary length data buffer)
- */
- length += 2;
- function = ACPI_WRITE | (accessor_type << 16);
- } else { /* IPMI */
-
- length = ACPI_IPMI_BUFFER_SIZE;
- function = ACPI_WRITE;
- }
-
- if (source_desc->buffer.length < length) {
- ACPI_ERROR((AE_INFO,
- "SMBus/IPMI/GenericSerialBus write requires "
- "Buffer of length %u, found length %u",
- length, source_desc->buffer.length));
-
- return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
- }
-
- /* Create the bi-directional buffer */
-
- buffer_desc = acpi_ut_create_buffer_object(length);
- if (!buffer_desc) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- buffer = buffer_desc->buffer.pointer;
- memcpy(buffer, source_desc->buffer.pointer, length);
-
- /* Lock entire transaction if requested */
+ ACPI_ADR_SPACE_GPIO)) {
- acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+ /* General Purpose I/O */
- /*
- * Perform the write (returns status and perhaps data in the
- * same buffer)
- */
- status =
- acpi_ex_access_region(obj_desc, 0, (u64 *)buffer, function);
- acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
-
- *result_desc = buffer_desc;
+ status = acpi_ex_write_gpio(source_desc, obj_desc, result_desc);
return_ACPI_STATUS(status);
} else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
(obj_desc->field.region_obj->region.space_id ==
- ACPI_ADR_SPACE_GPIO)) {
- /*
- * For GPIO (general_purpose_io), we will bypass the entire field
- * mechanism and handoff the bit address and bit width directly to
- * the handler. The Address will be the bit offset
- * from the previous Connection() operator, making it effectively a
- * pin number index. The bit_length is the length of the field, which
- * is thus the number of pins.
- */
- if (source_desc->common.type != ACPI_TYPE_INTEGER) {
- return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X [TO]: Pin %u Bits %u\n",
- acpi_ut_get_type_name(source_desc->common.
- type),
- source_desc->common.type,
- (u32)source_desc->integer.value,
- obj_desc->field.pin_number_index,
- obj_desc->field.bit_length));
-
- buffer = &source_desc->integer.value;
-
- /* Lock entire transaction if requested */
-
- acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+ ACPI_ADR_SPACE_SMBUS
+ || obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS
+ || obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_IPMI)) {
- /* Perform the write */
+ /* SMBus, GSBus, IPMI serial */
status =
- acpi_ex_access_region(obj_desc, 0, (u64 *)buffer,
- ACPI_WRITE);
- acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+ acpi_ex_write_serial_bus(source_desc, obj_desc,
+ result_desc);
return_ACPI_STATUS(status);
}
@@ -464,23 +280,22 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
case ACPI_TYPE_INTEGER:
buffer = &source_desc->integer.value;
- length = sizeof(source_desc->integer.value);
+ buffer_length = sizeof(source_desc->integer.value);
break;
case ACPI_TYPE_BUFFER:
buffer = source_desc->buffer.pointer;
- length = source_desc->buffer.length;
+ buffer_length = source_desc->buffer.length;
break;
case ACPI_TYPE_STRING:
buffer = source_desc->string.pointer;
- length = source_desc->string.length;
+ buffer_length = source_desc->string.length;
break;
default:
-
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
@@ -488,7 +303,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
"FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n",
source_desc,
acpi_ut_get_type_name(source_desc->common.type),
- source_desc->common.type, buffer, length));
+ source_desc->common.type, buffer, buffer_length));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"FieldWrite [TO]: Obj %p (%s:%X), BitLen %X, BitOff %X, ByteOff %X\n",
@@ -505,8 +320,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
/* Write to the field */
- status = acpi_ex_insert_into_field(obj_desc, buffer, length);
+ status = acpi_ex_insert_into_field(obj_desc, buffer, buffer_length);
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
-
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
new file mode 100644
index 000000000000..0d42f30e5b25
--- /dev/null
+++ b/drivers/acpi/acpica/exserial.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/******************************************************************************
+ *
+ * Module Name: exserial - field_unit support for serial address spaces
+ *
+ * Copyright (C) 2000 - 2018, Intel Corp.
+ *
+ *****************************************************************************/
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acdispat.h"
+#include "acinterp.h"
+#include "amlcode.h"
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exserial")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_read_gpio
+ *
+ * PARAMETERS: obj_desc - The named field to read
+ * buffer - Where the return data is returnd
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Read from a named field that references a Generic Serial Bus
+ * field
+ *
+ ******************************************************************************/
+acpi_status acpi_ex_read_gpio(union acpi_operand_object *obj_desc, void *buffer)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_read_gpio, obj_desc);
+
+ /*
+ * For GPIO (general_purpose_io), the Address will be the bit offset
+ * from the previous Connection() operator, making it effectively a
+ * pin number index. The bit_length is the length of the field, which
+ * is thus the number of pins.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "GPIO FieldRead [FROM]: Pin %u Bits %u\n",
+ obj_desc->field.pin_number_index,
+ obj_desc->field.bit_length));
+
+ /* Lock entire transaction if requested */
+
+ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+ /* Perform the read */
+
+ status = acpi_ex_access_region(obj_desc, 0, (u64 *)buffer, ACPI_READ);
+
+ acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_write_gpio
+ *
+ * PARAMETERS: source_desc - Contains data to write. Expect to be
+ * an Integer object.
+ * obj_desc - The named field
+ * result_desc - Where the return value is returned, if any
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Write to a named field that references a General Purpose I/O
+ * field.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_write_gpio(union acpi_operand_object *source_desc,
+ union acpi_operand_object *obj_desc,
+ union acpi_operand_object **return_buffer)
+{
+ acpi_status status;
+ void *buffer;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_write_gpio, obj_desc);
+
+ /*
+ * For GPIO (general_purpose_io), we will bypass the entire field
+ * mechanism and handoff the bit address and bit width directly to
+ * the handler. The Address will be the bit offset
+ * from the previous Connection() operator, making it effectively a
+ * pin number index. The bit_length is the length of the field, which
+ * is thus the number of pins.
+ */
+ if (source_desc->common.type != ACPI_TYPE_INTEGER) {
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "GPIO FieldWrite [FROM]: (%s:%X), Value %.8X [TO]: Pin %u Bits %u\n",
+ acpi_ut_get_type_name(source_desc->common.type),
+ source_desc->common.type,
+ (u32)source_desc->integer.value,
+ obj_desc->field.pin_number_index,
+ obj_desc->field.bit_length));
+
+ buffer = &source_desc->integer.value;
+
+ /* Lock entire transaction if requested */
+
+ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+ /* Perform the write */
+
+ status = acpi_ex_access_region(obj_desc, 0, (u64 *)buffer, ACPI_WRITE);
+ acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_read_serial_bus
+ *
+ * PARAMETERS: obj_desc - The named field to read
+ * return_buffer - Where the return value is returned, if any
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Read from a named field that references a serial bus
+ * (SMBus, IPMI, or GSBus).
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_read_serial_bus(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **return_buffer)
+{
+ acpi_status status;
+ u32 buffer_length;
+ union acpi_operand_object *buffer_desc;
+ u32 function;
+ u16 accessor_type;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_read_serial_bus, obj_desc);
+
+ /*
+ * This is an SMBus, GSBus or IPMI read. We must create a buffer to
+ * hold the data and then directly access the region handler.
+ *
+ * Note: SMBus and GSBus protocol value is passed in upper 16-bits
+ * of Function
+ *
+ * Common buffer format:
+ * Status; (Byte 0 of the data buffer)
+ * Length; (Byte 1 of the data buffer)
+ * Data[x-1]: (Bytes 2-x of the arbitrary length data buffer)
+ */
+ switch (obj_desc->field.region_obj->region.space_id) {
+ case ACPI_ADR_SPACE_SMBUS:
+
+ buffer_length = ACPI_SMBUS_BUFFER_SIZE;
+ function = ACPI_READ | (obj_desc->field.attribute << 16);
+ break;
+
+ case ACPI_ADR_SPACE_IPMI:
+
+ buffer_length = ACPI_IPMI_BUFFER_SIZE;
+ function = ACPI_READ;
+ break;
+
+ case ACPI_ADR_SPACE_GSBUS:
+
+ accessor_type = obj_desc->field.attribute;
+ if (accessor_type == AML_FIELD_ATTRIB_RAW_PROCESS_BYTES) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid direct read using bidirectional write-then-read protocol"));
+
+ return_ACPI_STATUS(AE_AML_PROTOCOL);
+ }
+
+ status =
+ acpi_ex_get_protocol_buffer_length(accessor_type,
+ &buffer_length);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid protocol ID for GSBus: 0x%4.4X",
+ accessor_type));
+
+ return_ACPI_STATUS(status);
+ }
+
+ /* Add header length to get the full size of the buffer */
+
+ buffer_length += ACPI_SERIAL_HEADER_SIZE;
+ function = ACPI_READ | (accessor_type << 16);
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+ }
+
+ /* Create the local transfer buffer that is returned to the caller */
+
+ buffer_desc = acpi_ut_create_buffer_object(buffer_length);
+ if (!buffer_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Lock entire transaction if requested */
+
+ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+ /* Call the region handler for the write-then-read */
+
+ status = acpi_ex_access_region(obj_desc, 0,
+ ACPI_CAST_PTR(u64,
+ buffer_desc->buffer.
+ pointer), function);
+ acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+
+ *return_buffer = buffer_desc;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_write_serial_bus
+ *
+ * PARAMETERS: source_desc - Contains data to write
+ * obj_desc - The named field
+ * return_buffer - Where the return value is returned, if any
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Write to a named field that references a serial bus
+ * (SMBus, IPMI, GSBus).
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
+ union acpi_operand_object *obj_desc,
+ union acpi_operand_object **return_buffer)
+{
+ acpi_status status;
+ u32 buffer_length;
+ u32 data_length;
+ void *buffer;
+ union acpi_operand_object *buffer_desc;
+ u32 function;
+ u16 accessor_type;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_write_serial_bus, obj_desc);
+
+ /*
+ * This is an SMBus, GSBus or IPMI write. We will bypass the entire
+ * field mechanism and handoff the buffer directly to the handler.
+ * For these address spaces, the buffer is bidirectional; on a
+ * write, return data is returned in the same buffer.
+ *
+ * Source must be a buffer of sufficient size, these are fixed size:
+ * ACPI_SMBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE.
+ *
+ * Note: SMBus and GSBus protocol type is passed in upper 16-bits
+ * of Function
+ *
+ * Common buffer format:
+ * Status; (Byte 0 of the data buffer)
+ * Length; (Byte 1 of the data buffer)
+ * Data[x-1]: (Bytes 2-x of the arbitrary length data buffer)
+ */
+ if (source_desc->common.type != ACPI_TYPE_BUFFER) {
+ ACPI_ERROR((AE_INFO,
+ "SMBus/IPMI/GenericSerialBus write requires "
+ "Buffer, found type %s",
+ acpi_ut_get_object_type_name(source_desc)));
+
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ switch (obj_desc->field.region_obj->region.space_id) {
+ case ACPI_ADR_SPACE_SMBUS:
+
+ buffer_length = ACPI_SMBUS_BUFFER_SIZE;
+ data_length = ACPI_SMBUS_DATA_SIZE;
+ function = ACPI_WRITE | (obj_desc->field.attribute << 16);
+ break;
+
+ case ACPI_ADR_SPACE_IPMI:
+
+ buffer_length = ACPI_IPMI_BUFFER_SIZE;
+ data_length = ACPI_IPMI_DATA_SIZE;
+ function = ACPI_WRITE;
+ break;
+
+ case ACPI_ADR_SPACE_GSBUS:
+
+ accessor_type = obj_desc->field.attribute;
+ status =
+ acpi_ex_get_protocol_buffer_length(accessor_type,
+ &buffer_length);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid protocol ID for GSBus: 0x%4.4X",
+ accessor_type));
+
+ return_ACPI_STATUS(status);
+ }
+
+ /* Add header length to get the full size of the buffer */
+
+ buffer_length += ACPI_SERIAL_HEADER_SIZE;
+ data_length = source_desc->buffer.pointer[1];
+ function = ACPI_WRITE | (accessor_type << 16);
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+ }
+
+#if 0
+ OBSOLETE ?
+ /* Check for possible buffer overflow */
+ if (data_length > source_desc->buffer.length) {
+ ACPI_ERROR((AE_INFO,
+ "Length in buffer header (%u)(%u) is greater than "
+ "the physical buffer length (%u) and will overflow",
+ data_length, buffer_length,
+ source_desc->buffer.length));
+
+ return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
+ }
+#endif
+
+ /* Create the transfer/bidirectional/return buffer */
+
+ buffer_desc = acpi_ut_create_buffer_object(buffer_length);
+ if (!buffer_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Copy the input buffer data to the transfer buffer */
+
+ buffer = buffer_desc->buffer.pointer;
+ memcpy(buffer, source_desc->buffer.pointer, data_length);
+
+ /* Lock entire transaction if requested */
+
+ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+
+ /*
+ * Perform the write (returns status and perhaps data in the
+ * same buffer)
+ */
+ status = acpi_ex_access_region(obj_desc, 0, (u64 *)buffer, function);
+ acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
+
+ *return_buffer = buffer_desc;
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 34fc2f7476ed..0fa01c9e353e 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -147,7 +147,7 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
* future. Use of this option can cause problems with AML code that
* depends upon in-order immediate execution of module-level code.
*/
- if (acpi_gbl_group_module_level_code &&
+ if (!acpi_gbl_execute_tables_as_methods &&
(walk_state->pass_number <= ACPI_IMODE_LOAD_PASS2) &&
((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) {
/*
@@ -417,6 +417,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
union acpi_parse_object *op = NULL; /* current op */
struct acpi_parse_state *parser_state;
u8 *aml_op_start = NULL;
+ u8 opcode_length;
ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
@@ -540,8 +541,19 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
"Skip parsing opcode %s",
acpi_ps_get_opcode_name
(walk_state->opcode)));
+
+ /*
+ * Determine the opcode length before skipping the opcode.
+ * An opcode can be 1 byte or 2 bytes in length.
+ */
+ opcode_length = 1;
+ if ((walk_state->opcode & 0xFF00) ==
+ AML_EXTENDED_OPCODE) {
+ opcode_length = 2;
+ }
walk_state->parser_state.aml =
- walk_state->aml + 1;
+ walk_state->aml + opcode_length;
+
walk_state->parser_state.aml =
acpi_ps_get_next_package_end
(&walk_state->parser_state);
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 2f40f71c06db..9011297552af 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -69,8 +69,7 @@ acpi_status ACPI_INIT_FUNCTION acpi_load_tables(void)
"While loading namespace from ACPI tables"));
}
- if (acpi_gbl_execute_tables_as_methods
- || !acpi_gbl_group_module_level_code) {
+ if (acpi_gbl_execute_tables_as_methods) {
/*
* If the module-level code support is enabled, initialize the objects
* in the namespace that remain uninitialized. This runs the executable
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 08f26db2da7e..2a361e22d38d 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1428,7 +1428,7 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
return 0;
dma_deconfigure:
- acpi_dma_deconfigure(&pdev->dev);
+ arch_teardown_dma_ops(&pdev->dev);
dev_put:
platform_device_put(pdev);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d2e29a19890d..bb3d96dea6db 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1054,15 +1054,17 @@ void __init acpi_early_init(void)
goto error0;
}
- if (!acpi_gbl_execute_tables_as_methods &&
- acpi_gbl_group_module_level_code) {
- status = acpi_load_tables();
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to load the System Description Tables\n");
- goto error0;
- }
- }
+ /*
+ * ACPI 2.0 requires the EC driver to be loaded and work before
+ * the EC device is found in the namespace (i.e. before
+ * acpi_load_tables() is called).
+ *
+ * This is accomplished by looking for the ECDT table, and getting
+ * the EC parameters out of that.
+ *
+ * Ignore the result. Not having an ECDT is not fatal.
+ */
+ status = acpi_ec_ecdt_probe();
#ifdef CONFIG_X86
if (!acpi_ioapic) {
@@ -1133,25 +1135,11 @@ static int __init acpi_bus_init(void)
acpi_os_initialize1();
- /*
- * ACPI 2.0 requires the EC driver to be loaded and work before
- * the EC device is found in the namespace (i.e. before
- * acpi_load_tables() is called).
- *
- * This is accomplished by looking for the ECDT table, and getting
- * the EC parameters out of that.
- */
- status = acpi_ec_ecdt_probe();
- /* Ignore result. Not having an ECDT is not fatal. */
-
- if (acpi_gbl_execute_tables_as_methods ||
- !acpi_gbl_group_module_level_code) {
- status = acpi_load_tables();
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to load the System Description Tables\n");
- goto error1;
- }
+ status = acpi_load_tables();
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to load the System Description Tables\n");
+ goto error1;
}
status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index d9ce4b162e2c..217a782c3e55 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1061,9 +1061,9 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *highest_reg, *lowest_reg,
- *lowest_non_linear_reg, *nominal_reg,
+ *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
*low_freq_reg = NULL, *nom_freq_reg = NULL;
- u64 high, low, nom, min_nonlinear, low_f = 0, nom_f = 0;
+ u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
struct cppc_pcc_data *pcc_ss_data = NULL;
int ret = 0, regs_in_pcc = 0;
@@ -1079,6 +1079,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
+ guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
/* Are any of the regs PCC ?*/
if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
@@ -1107,6 +1108,9 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
cpc_read(cpunum, nominal_reg, &nom);
perf_caps->nominal_perf = nom;
+ cpc_read(cpunum, guaranteed_reg, &guaranteed);
+ perf_caps->guaranteed_perf = guaranteed;
+
cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
perf_caps->lowest_nonlinear_perf = min_nonlinear;
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index e967c1173ba3..4451877f83b6 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -92,8 +92,7 @@ static int __init acpi_custom_method_init(void)
static void __exit acpi_custom_method_exit(void)
{
- if (cm_dentry)
- debugfs_remove(cm_dentry);
+ debugfs_remove(cm_dentry);
}
module_init(acpi_custom_method_init);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 3be1433853bf..12ba2bee8789 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -320,7 +320,7 @@ static int acpi_platform_notify(struct device *dev)
if (!adev)
goto out;
- if (dev->bus == &platform_bus_type)
+ if (dev_is_platform(dev))
acpi_configure_pmsi_domain(dev);
if (type && type->setup)
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b072cfc5f20e..f8c638f3c946 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -25,6 +25,7 @@
#include <asm/cacheflush.h>
#include <acpi/nfit.h>
#include "nfit.h"
+#include "intel.h"
/*
* For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
@@ -191,18 +192,20 @@ static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd
* In the _LSI, _LSR, _LSW case the locked status is
* communicated via the read/write commands
*/
- if (nfit_mem->has_lsr)
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
break;
if (status >> 16 & ND_CONFIG_LOCKED)
return -EACCES;
break;
case ND_CMD_GET_CONFIG_DATA:
- if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED)
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
+ && status == ACPI_LABELS_LOCKED)
return -EACCES;
break;
case ND_CMD_SET_CONFIG_DATA:
- if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED)
+ if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
+ && status == ACPI_LABELS_LOCKED)
return -EACCES;
break;
default:
@@ -480,14 +483,16 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
min_t(u32, 256, in_buf.buffer.length), true);
/* call the BIOS, prefer the named methods over _DSM if available */
- if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsr)
+ if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
+ && test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
out_obj = acpi_label_info(handle);
- else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) {
+ else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
+ && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
struct nd_cmd_get_config_data_hdr *p = buf;
out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
} else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
- && nfit_mem->has_lsw) {
+ && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
struct nd_cmd_set_config_hdr *p = buf;
out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
@@ -1547,7 +1552,12 @@ static DEVICE_ATTR_RO(dsm_mask);
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u16 flags = to_nfit_memdev(dev)->flags;
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ u16 flags = __to_nfit_memdev(nfit_mem)->flags;
+
+ if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
+ flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
return sprintf(buf, "%s%s%s%s%s%s%s\n",
flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
@@ -1578,6 +1588,16 @@ static ssize_t id_show(struct device *dev,
}
static DEVICE_ATTR_RO(id);
+static ssize_t dirty_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
+ return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
+}
+static DEVICE_ATTR_RO(dirty_shutdown);
+
static struct attribute *acpi_nfit_dimm_attributes[] = {
&dev_attr_handle.attr,
&dev_attr_phys_id.attr,
@@ -1595,6 +1615,7 @@ static struct attribute *acpi_nfit_dimm_attributes[] = {
&dev_attr_id.attr,
&dev_attr_family.attr,
&dev_attr_dsm_mask.attr,
+ &dev_attr_dirty_shutdown.attr,
NULL,
};
@@ -1603,6 +1624,7 @@ static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
if (!to_nfit_dcr(dev)) {
/* Without a dcr only the memdev attributes can be surfaced */
@@ -1616,6 +1638,11 @@ static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
return 0;
+
+ if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
+ && a == &dev_attr_dirty_shutdown.attr)
+ return 0;
+
return a->mode;
}
@@ -1694,6 +1721,56 @@ static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
return false;
}
+__weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
+{
+ struct nd_intel_smart smart = { 0 };
+ union acpi_object in_buf = {
+ .type = ACPI_TYPE_BUFFER,
+ .buffer.pointer = (char *) &smart,
+ .buffer.length = sizeof(smart),
+ };
+ union acpi_object in_obj = {
+ .type = ACPI_TYPE_PACKAGE,
+ .package.count = 1,
+ .package.elements = &in_buf,
+ };
+ const u8 func = ND_INTEL_SMART;
+ const guid_t *guid = to_nfit_uuid(nfit_mem->family);
+ u8 revid = nfit_dsm_revid(nfit_mem->family, func);
+ struct acpi_device *adev = nfit_mem->adev;
+ acpi_handle handle = adev->handle;
+ union acpi_object *out_obj;
+
+ if ((nfit_mem->dsm_mask & (1 << func)) == 0)
+ return;
+
+ out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
+ if (!out_obj)
+ return;
+
+ if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
+ if (smart.shutdown_state)
+ set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
+ }
+
+ if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
+ set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
+ nfit_mem->dirty_shutdown = smart.shutdown_count;
+ }
+ ACPI_FREE(out_obj);
+}
+
+static void populate_shutdown_status(struct nfit_mem *nfit_mem)
+{
+ /*
+ * For DIMMs that provide a dynamic facility to retrieve a
+ * dirty-shutdown status and/or a dirty-shutdown count, cache
+ * these values in nfit_mem.
+ */
+ if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
+ nfit_intel_shutdown_status(nfit_mem);
+}
+
static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, u32 device_handle)
{
@@ -1708,8 +1785,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
nfit_mem->family = NVDIMM_FAMILY_INTEL;
adev = to_acpi_dev(acpi_desc);
- if (!adev)
+ if (!adev) {
+ /* unit test case */
+ populate_shutdown_status(nfit_mem);
return 0;
+ }
adev_dimm = acpi_find_child_device(adev, device_handle, false);
nfit_mem->adev = adev_dimm;
@@ -1784,14 +1864,17 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
&& acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
- nfit_mem->has_lsr = true;
+ set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
}
- if (nfit_mem->has_lsr && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
+ && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
- nfit_mem->has_lsw = true;
+ set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
}
+ populate_shutdown_status(nfit_mem);
+
return 0;
}
@@ -1878,11 +1961,11 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
}
- if (nfit_mem->has_lsr) {
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
}
- if (nfit_mem->has_lsw)
+ if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
@@ -2466,7 +2549,8 @@ static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
return cmd_rc;
}
-static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
+static int ars_start(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
{
int rc;
int cmd_rc;
@@ -2477,7 +2561,7 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa
memset(&ars_start, 0, sizeof(ars_start));
ars_start.address = spa->address;
ars_start.length = spa->length;
- if (test_bit(ARS_SHORT, &nfit_spa->ars_state))
+ if (req_type == ARS_REQ_SHORT)
ars_start.flags = ND_ARS_RETURN_PREV_DATA;
if (nfit_spa_type(spa) == NFIT_SPA_PM)
ars_start.type = ND_ARS_PERSISTENT;
@@ -2534,6 +2618,15 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc,
struct nd_region *nd_region = nfit_spa->nd_region;
struct device *dev;
+ lockdep_assert_held(&acpi_desc->init_mutex);
+ /*
+ * Only advance the ARS state for ARS runs initiated by the
+ * kernel, ignore ARS results from BIOS initiated runs for scrub
+ * completion tracking.
+ */
+ if (acpi_desc->scrub_spa != nfit_spa)
+ return;
+
if ((ars_status->address >= spa->address && ars_status->address
< spa->address + spa->length)
|| (ars_status->address < spa->address)) {
@@ -2553,28 +2646,13 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc,
} else
return;
- if (test_bit(ARS_DONE, &nfit_spa->ars_state))
- return;
-
- if (!test_and_clear_bit(ARS_REQ, &nfit_spa->ars_state))
- return;
-
+ acpi_desc->scrub_spa = NULL;
if (nd_region) {
dev = nd_region_dev(nd_region);
nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
} else
dev = acpi_desc->dev;
-
- dev_dbg(dev, "ARS: range %d %s complete\n", spa->range_index,
- test_bit(ARS_SHORT, &nfit_spa->ars_state)
- ? "short" : "long");
- clear_bit(ARS_SHORT, &nfit_spa->ars_state);
- if (test_and_clear_bit(ARS_REQ_REDO, &nfit_spa->ars_state)) {
- set_bit(ARS_SHORT, &nfit_spa->ars_state);
- set_bit(ARS_REQ, &nfit_spa->ars_state);
- dev_dbg(dev, "ARS: processing scrub request received while in progress\n");
- } else
- set_bit(ARS_DONE, &nfit_spa->ars_state);
+ dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
}
static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
@@ -2855,46 +2933,55 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
return 0;
}
-static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa,
- int *query_rc)
+static int ars_register(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa)
{
- int rc = *query_rc;
+ int rc;
- if (no_init_ars)
+ if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
return acpi_nfit_register_region(acpi_desc, nfit_spa);
- set_bit(ARS_REQ, &nfit_spa->ars_state);
- set_bit(ARS_SHORT, &nfit_spa->ars_state);
+ set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
+ set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
- switch (rc) {
+ switch (acpi_nfit_query_poison(acpi_desc)) {
case 0:
case -EAGAIN:
- rc = ars_start(acpi_desc, nfit_spa);
- if (rc == -EBUSY) {
- *query_rc = rc;
+ rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
+ /* shouldn't happen, try again later */
+ if (rc == -EBUSY)
break;
- } else if (rc == 0) {
- rc = acpi_nfit_query_poison(acpi_desc);
- } else {
+ if (rc) {
set_bit(ARS_FAILED, &nfit_spa->ars_state);
break;
}
- if (rc == -EAGAIN)
- clear_bit(ARS_SHORT, &nfit_spa->ars_state);
- else if (rc == 0)
- ars_complete(acpi_desc, nfit_spa);
+ clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
+ rc = acpi_nfit_query_poison(acpi_desc);
+ if (rc)
+ break;
+ acpi_desc->scrub_spa = nfit_spa;
+ ars_complete(acpi_desc, nfit_spa);
+ /*
+ * If ars_complete() says we didn't complete the
+ * short scrub, we'll try again with a long
+ * request.
+ */
+ acpi_desc->scrub_spa = NULL;
break;
case -EBUSY:
+ case -ENOMEM:
case -ENOSPC:
+ /*
+ * BIOS was using ARS, wait for it to complete (or
+ * resources to become available) and then perform our
+ * own scrubs.
+ */
break;
default:
set_bit(ARS_FAILED, &nfit_spa->ars_state);
break;
}
- if (test_and_clear_bit(ARS_DONE, &nfit_spa->ars_state))
- set_bit(ARS_REQ, &nfit_spa->ars_state);
-
return acpi_nfit_register_region(acpi_desc, nfit_spa);
}
@@ -2916,6 +3003,8 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
struct device *dev = acpi_desc->dev;
struct nfit_spa *nfit_spa;
+ lockdep_assert_held(&acpi_desc->init_mutex);
+
if (acpi_desc->cancel)
return 0;
@@ -2939,21 +3028,49 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
ars_complete_all(acpi_desc);
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+ enum nfit_ars_state req_type;
+ int rc;
+
if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
continue;
- if (test_bit(ARS_REQ, &nfit_spa->ars_state)) {
- int rc = ars_start(acpi_desc, nfit_spa);
-
- clear_bit(ARS_DONE, &nfit_spa->ars_state);
- dev = nd_region_dev(nfit_spa->nd_region);
- dev_dbg(dev, "ARS: range %d ARS start (%d)\n",
- nfit_spa->spa->range_index, rc);
- if (rc == 0 || rc == -EBUSY)
- return 1;
- dev_err(dev, "ARS: range %d ARS failed (%d)\n",
- nfit_spa->spa->range_index, rc);
- set_bit(ARS_FAILED, &nfit_spa->ars_state);
+
+ /* prefer short ARS requests first */
+ if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
+ req_type = ARS_REQ_SHORT;
+ else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
+ req_type = ARS_REQ_LONG;
+ else
+ continue;
+ rc = ars_start(acpi_desc, nfit_spa, req_type);
+
+ dev = nd_region_dev(nfit_spa->nd_region);
+ dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
+ nfit_spa->spa->range_index,
+ req_type == ARS_REQ_SHORT ? "short" : "long",
+ rc);
+ /*
+ * Hmm, we raced someone else starting ARS? Try again in
+ * a bit.
+ */
+ if (rc == -EBUSY)
+ return 1;
+ if (rc == 0) {
+ dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
+ "scrub start while range %d active\n",
+ acpi_desc->scrub_spa->spa->range_index);
+ clear_bit(req_type, &nfit_spa->ars_state);
+ acpi_desc->scrub_spa = nfit_spa;
+ /*
+ * Consider this spa last for future scrub
+ * requests
+ */
+ list_move_tail(&nfit_spa->list, &acpi_desc->spas);
+ return 1;
}
+
+ dev_err(dev, "ARS: range %d ARS failed (%d)\n",
+ nfit_spa->spa->range_index, rc);
+ set_bit(ARS_FAILED, &nfit_spa->ars_state);
}
return 0;
}
@@ -3009,6 +3126,7 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
struct nd_cmd_ars_cap ars_cap;
int rc;
+ set_bit(ARS_FAILED, &nfit_spa->ars_state);
memset(&ars_cap, 0, sizeof(ars_cap));
rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
if (rc < 0)
@@ -3025,16 +3143,14 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
clear_bit(ARS_FAILED, &nfit_spa->ars_state);
- set_bit(ARS_REQ, &nfit_spa->ars_state);
}
static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_spa *nfit_spa;
- int rc, query_rc;
+ int rc;
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
- set_bit(ARS_FAILED, &nfit_spa->ars_state);
switch (nfit_spa_type(nfit_spa->spa)) {
case NFIT_SPA_VOLATILE:
case NFIT_SPA_PM:
@@ -3043,20 +3159,12 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
}
}
- /*
- * Reap any results that might be pending before starting new
- * short requests.
- */
- query_rc = acpi_nfit_query_poison(acpi_desc);
- if (query_rc == 0)
- ars_complete_all(acpi_desc);
-
list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
switch (nfit_spa_type(nfit_spa->spa)) {
case NFIT_SPA_VOLATILE:
case NFIT_SPA_PM:
/* register regions and kick off initial ARS run */
- rc = ars_register(acpi_desc, nfit_spa, &query_rc);
+ rc = ars_register(acpi_desc, nfit_spa);
if (rc)
return rc;
break;
@@ -3233,6 +3341,8 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct nfit_spa *nfit_spa;
+ int rc = 0;
if (nvdimm)
return 0;
@@ -3242,16 +3352,24 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
/*
* The kernel and userspace may race to initiate a scrub, but
* the scrub thread is prepared to lose that initial race. It
- * just needs guarantees that any ars it initiates are not
- * interrupted by any intervening start reqeusts from userspace.
+ * just needs guarantees that any ARS it initiates are not
+ * interrupted by any intervening start requests from userspace.
*/
- if (work_busy(&acpi_desc->dwork.work))
- return -EBUSY;
+ mutex_lock(&acpi_desc->init_mutex);
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ if (acpi_desc->scrub_spa
+ || test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)
+ || test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) {
+ rc = -EBUSY;
+ break;
+ }
+ mutex_unlock(&acpi_desc->init_mutex);
- return 0;
+ return rc;
}
-int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
+int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
+ enum nfit_ars_state req_type)
{
struct device *dev = acpi_desc->dev;
int scheduled = 0, busy = 0;
@@ -3271,14 +3389,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
continue;
- if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) {
+ if (test_and_set_bit(req_type, &nfit_spa->ars_state))
busy++;
- set_bit(ARS_REQ_REDO, &nfit_spa->ars_state);
- } else {
- if (test_bit(ARS_SHORT, &flags))
- set_bit(ARS_SHORT, &nfit_spa->ars_state);
+ else
scheduled++;
- }
}
if (scheduled) {
sched_ars(acpi_desc);
@@ -3464,10 +3578,11 @@ static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
{
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
- unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ?
- 0 : 1 << ARS_SHORT;
- acpi_nfit_ars_rescan(acpi_desc, flags);
+ if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
+ acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
+ else
+ acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
}
void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
diff --git a/drivers/acpi/nfit/intel.h b/drivers/acpi/nfit/intel.h
new file mode 100644
index 000000000000..86746312381f
--- /dev/null
+++ b/drivers/acpi/nfit/intel.h
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ * Intel specific definitions for NVDIMM Firmware Interface Table - NFIT
+ */
+#ifndef _NFIT_INTEL_H_
+#define _NFIT_INTEL_H_
+
+#define ND_INTEL_SMART 1
+
+#define ND_INTEL_SMART_SHUTDOWN_COUNT_VALID (1 << 5)
+#define ND_INTEL_SMART_SHUTDOWN_VALID (1 << 10)
+
+struct nd_intel_smart {
+ u32 status;
+ union {
+ struct {
+ u32 flags;
+ u8 reserved0[4];
+ u8 health;
+ u8 spares;
+ u8 life_used;
+ u8 alarm_flags;
+ u16 media_temperature;
+ u16 ctrl_temperature;
+ u32 shutdown_count;
+ u8 ait_status;
+ u16 pmic_temperature;
+ u8 reserved1[8];
+ u8 shutdown_state;
+ u32 vendor_size;
+ u8 vendor_data[92];
+ } __packed;
+ u8 data[128];
+ };
+} __packed;
+
+#endif
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index d1274ea2d251..df0f6b8407e7 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -118,10 +118,8 @@ enum nfit_dimm_notifiers {
};
enum nfit_ars_state {
- ARS_REQ,
- ARS_REQ_REDO,
- ARS_DONE,
- ARS_SHORT,
+ ARS_REQ_SHORT,
+ ARS_REQ_LONG,
ARS_FAILED,
};
@@ -159,6 +157,13 @@ struct nfit_memdev {
struct acpi_nfit_memory_map memdev[0];
};
+enum nfit_mem_flags {
+ NFIT_MEM_LSR,
+ NFIT_MEM_LSW,
+ NFIT_MEM_DIRTY,
+ NFIT_MEM_DIRTY_COUNT,
+};
+
/* assembled tables for a given dimm/memory-device */
struct nfit_mem {
struct nvdimm *nvdimm;
@@ -178,9 +183,9 @@ struct nfit_mem {
struct acpi_nfit_desc *acpi_desc;
struct resource *flush_wpq;
unsigned long dsm_mask;
+ unsigned long flags;
+ u32 dirty_shutdown;
int family;
- bool has_lsr;
- bool has_lsw;
};
struct acpi_nfit_desc {
@@ -198,6 +203,7 @@ struct acpi_nfit_desc {
struct device *dev;
u8 ars_start_flags;
struct nd_cmd_ars_status *ars_status;
+ struct nfit_spa *scrub_spa;
struct delayed_work dwork;
struct list_head list;
struct kernfs_node *scrub_count_state;
@@ -252,7 +258,8 @@ struct nfit_blk {
extern struct list_head acpi_descs;
extern struct mutex acpi_desc_lock;
-int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags);
+int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
+ enum nfit_ars_state req_type);
#ifdef CONFIG_X86_MCE
void nfit_mce_register(void);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 8df9abfa947b..b48874b8e1ea 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -617,15 +617,18 @@ void acpi_os_stall(u32 us)
}
/*
- * Support ACPI 3.0 AML Timer operand
- * Returns 64-bit free-running, monotonically increasing timer
- * with 100ns granularity
+ * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running,
+ * monotonically increasing timer with 100ns granularity. Do not use
+ * ktime_get() to implement this function because this function may get
+ * called after timekeeping has been suspended. Note: calling this function
+ * after timekeeping has been suspended may lead to unexpected results
+ * because when timekeeping is suspended the jiffies counter is not
+ * incremented. See also timekeeping_suspend().
*/
u64 acpi_os_get_timer(void)
{
- u64 time_ns = ktime_to_ns(ktime_get());
- do_div(time_ns, 100);
- return time_ns;
+ return (get_jiffies_64() - INITIAL_JIFFIES) *
+ (ACPI_100NSEC_PER_SEC / HZ);
}
acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
@@ -1129,6 +1132,7 @@ void acpi_os_wait_events_complete(void)
flush_workqueue(kacpid_wq);
flush_workqueue(kacpi_notify_wq);
}
+EXPORT_SYMBOL(acpi_os_wait_events_complete);
struct acpi_hp_work {
struct work_struct work;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 7433035ded95..707aafc7c2aa 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -421,7 +421,8 @@ out:
}
EXPORT_SYMBOL(acpi_pci_osc_control_set);
-static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
+static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
+ bool is_pcie)
{
u32 support, control, requested;
acpi_status status;
@@ -455,9 +456,15 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
decode_osc_support(root, "OS supports", support);
status = acpi_pci_osc_support(root, support);
if (ACPI_FAILURE(status)) {
- dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
- acpi_format_exception(status));
*no_aspm = 1;
+
+ /* _OSC is optional for PCI host bridges */
+ if ((status == AE_NOT_FOUND) && !is_pcie)
+ return;
+
+ dev_info(&device->dev, "_OSC failed (%s)%s\n",
+ acpi_format_exception(status),
+ pcie_aspm_support_enabled() ? "; disabling ASPM" : "");
return;
}
@@ -533,6 +540,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_handle handle = device->handle;
int no_aspm = 0;
bool hotadd = system_state == SYSTEM_RUNNING;
+ bool is_pcie;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -590,7 +598,8 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle);
- negotiate_os_control(root, &no_aspm);
+ is_pcie = strcmp(acpi_device_hid(device), "PNP0A08") == 0;
+ negotiate_os_control(root, &no_aspm, is_pcie);
/*
* TBD: Need PCI interface for enumeration/configuration of roots.
diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c
index 886ac8b93cd0..bd7621edd60b 100644
--- a/drivers/acpi/pmic/intel_pmic_bxtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_pmic_bxtwc.c - Intel BXT WhiskeyCove PMIC operation region driver
+ * Intel BXT WhiskeyCove PMIC operation region driver
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/init.h>
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index f6d73a243d80..7ccd7d9660bc 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Dollar Cove TI PMIC operation region driver
* Copyright (C) 2014 Intel Corporation. All rights reserved.
diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c
index 9912422c8185..078b0448f30a 100644
--- a/drivers/acpi/pmic/intel_pmic_chtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_chtwc.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel CHT Whiskey Cove PMIC operation region driver
* Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
*
* Based on various non upstream patches to support the CHT Whiskey Cove PMIC:
* Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/acpi.h>
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index 22c9e374c923..a0f411a6e5ac 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -1,23 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_pmic_crc.c - Intel CrystalCove PMIC operation region driver
+ * Intel CrystalCove PMIC operation region driver
*
* Copyright (C) 2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
-#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/init.h>
#include <linux/mfd/intel_soc_pmic.h>
-#include <linux/regmap.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include "intel_pmic.h"
#define PWR_SOURCE_SELECT BIT(1)
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 316e55174aa9..aadc86db804c 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -1,23 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_pmic_xpower.c - XPower AXP288 PMIC operation region driver
+ * XPower AXP288 PMIC operation region driver
*
* Copyright (C) 2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
-#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/init.h>
#include <linux/mfd/axp20x.h>
-#include <linux/regmap.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include "intel_pmic.h"
#define XPOWER_GPADC_LOW 0x5b
diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c
index a083de507009..ebd03e472955 100644
--- a/drivers/acpi/pmic/tps68470_pmic.c
+++ b/drivers/acpi/pmic/tps68470_pmic.c
@@ -10,8 +10,8 @@
*/
#include <linux/acpi.h>
-#include <linux/mfd/tps68470.h>
#include <linux/init.h>
+#include <linux/mfd/tps68470.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index d1e26cb599bf..da031b1df6f5 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -338,9 +338,6 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
return found;
}
-/* total number of attributes checked by the properties code */
-#define PPTT_CHECKED_ATTRIBUTES 4
-
/**
* update_cache_properties() - Update cacheinfo for the given processor
* @this_leaf: Kernel cache info structure being updated
@@ -357,25 +354,15 @@ static void update_cache_properties(struct cacheinfo *this_leaf,
struct acpi_pptt_cache *found_cache,
struct acpi_pptt_processor *cpu_node)
{
- int valid_flags = 0;
-
this_leaf->fw_token = cpu_node;
- if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID) {
+ if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID)
this_leaf->size = found_cache->size;
- valid_flags++;
- }
- if (found_cache->flags & ACPI_PPTT_LINE_SIZE_VALID) {
+ if (found_cache->flags & ACPI_PPTT_LINE_SIZE_VALID)
this_leaf->coherency_line_size = found_cache->line_size;
- valid_flags++;
- }
- if (found_cache->flags & ACPI_PPTT_NUMBER_OF_SETS_VALID) {
+ if (found_cache->flags & ACPI_PPTT_NUMBER_OF_SETS_VALID)
this_leaf->number_of_sets = found_cache->number_of_sets;
- valid_flags++;
- }
- if (found_cache->flags & ACPI_PPTT_ASSOCIATIVITY_VALID) {
+ if (found_cache->flags & ACPI_PPTT_ASSOCIATIVITY_VALID)
this_leaf->ways_of_associativity = found_cache->associativity;
- valid_flags++;
- }
if (found_cache->flags & ACPI_PPTT_WRITE_POLICY_VALID) {
switch (found_cache->attributes & ACPI_PPTT_MASK_WRITE_POLICY) {
case ACPI_PPTT_CACHE_POLICY_WT:
@@ -402,11 +389,17 @@ static void update_cache_properties(struct cacheinfo *this_leaf,
}
}
/*
- * If the above flags are valid, and the cache type is NOCACHE
- * update the cache type as well.
+ * If cache type is NOCACHE, then the cache hasn't been specified
+ * via other mechanisms. Update the type if a cache type has been
+ * provided.
+ *
+ * Note, we assume such caches are unified based on conventional system
+ * design and known examples. Significant work is required elsewhere to
+ * fully support data/instruction only type caches which are only
+ * specified in PPTT.
*/
if (this_leaf->type == CACHE_TYPE_NOCACHE &&
- valid_flags == PPTT_CHECKED_ATTRIBUTES)
+ found_cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)
this_leaf->type = CACHE_TYPE_UNIFIED;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index abb559cd28d7..b2131c4ea124 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -205,6 +205,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
static void tsc_check_state(int state)
{
switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 693cf05b0cc4..8c7c4583b52d 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -24,11 +24,15 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
acpi_object_type type,
const union acpi_object **obj);
-/* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
-static const guid_t prp_guid =
+static const guid_t prp_guids[] = {
+ /* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c,
- 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01);
-/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
+ 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01),
+ /* Hotplug in D3 GUID: 6211e2c0-58a3-4af3-90e1-927a4e0c55a4 */
+ GUID_INIT(0x6211e2c0, 0x58a3, 0x4af3,
+ 0x90, 0xe1, 0x92, 0x7a, 0x4e, 0x0c, 0x55, 0xa4),
+};
+
static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
@@ -56,6 +60,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
dn->name = link->package.elements[0].string.pointer;
dn->fwnode.ops = &acpi_data_fwnode_ops;
dn->parent = parent;
+ INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
result = acpi_extract_properties(desc, &dn->data);
@@ -288,6 +293,35 @@ static void acpi_init_of_compatible(struct acpi_device *adev)
adev->flags.of_compatible_ok = 1;
}
+static bool acpi_is_property_guid(const guid_t *guid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prp_guids); i++) {
+ if (guid_equal(guid, &prp_guids[i]))
+ return true;
+ }
+
+ return false;
+}
+
+struct acpi_device_properties *
+acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
+ const union acpi_object *properties)
+{
+ struct acpi_device_properties *props;
+
+ props = kzalloc(sizeof(*props), GFP_KERNEL);
+ if (props) {
+ INIT_LIST_HEAD(&props->list);
+ props->guid = guid;
+ props->properties = properties;
+ list_add_tail(&props->list, &data->properties);
+ }
+
+ return props;
+}
+
static bool acpi_extract_properties(const union acpi_object *desc,
struct acpi_device_data *data)
{
@@ -312,7 +346,7 @@ static bool acpi_extract_properties(const union acpi_object *desc,
properties->type != ACPI_TYPE_PACKAGE)
break;
- if (!guid_equal((guid_t *)guid->buffer.pointer, &prp_guid))
+ if (!acpi_is_property_guid((guid_t *)guid->buffer.pointer))
continue;
/*
@@ -320,13 +354,13 @@ static bool acpi_extract_properties(const union acpi_object *desc,
* package immediately following it.
*/
if (!acpi_properties_format_valid(properties))
- break;
+ continue;
- data->properties = properties;
- return true;
+ acpi_data_add_props(data, (const guid_t *)guid->buffer.pointer,
+ properties);
}
- return false;
+ return !list_empty(&data->properties);
}
void acpi_init_properties(struct acpi_device *adev)
@@ -336,6 +370,7 @@ void acpi_init_properties(struct acpi_device *adev)
acpi_status status;
bool acpi_of = false;
+ INIT_LIST_HEAD(&adev->data.properties);
INIT_LIST_HEAD(&adev->data.subnodes);
if (!adev->handle)
@@ -398,11 +433,16 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list)
void acpi_free_properties(struct acpi_device *adev)
{
+ struct acpi_device_properties *props, *tmp;
+
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
ACPI_FREE((void *)adev->data.pointer);
adev->data.of_compatible = NULL;
adev->data.pointer = NULL;
- adev->data.properties = NULL;
+ list_for_each_entry_safe(props, tmp, &adev->data.properties, list) {
+ list_del(&props->list);
+ kfree(props);
+ }
}
/**
@@ -427,32 +467,37 @@ static int acpi_data_get_property(const struct acpi_device_data *data,
const char *name, acpi_object_type type,
const union acpi_object **obj)
{
- const union acpi_object *properties;
- int i;
+ const struct acpi_device_properties *props;
if (!data || !name)
return -EINVAL;
- if (!data->pointer || !data->properties)
+ if (!data->pointer || list_empty(&data->properties))
return -EINVAL;
- properties = data->properties;
- for (i = 0; i < properties->package.count; i++) {
- const union acpi_object *propname, *propvalue;
- const union acpi_object *property;
+ list_for_each_entry(props, &data->properties, list) {
+ const union acpi_object *properties;
+ unsigned int i;
- property = &properties->package.elements[i];
+ properties = props->properties;
+ for (i = 0; i < properties->package.count; i++) {
+ const union acpi_object *propname, *propvalue;
+ const union acpi_object *property;
- propname = &property->package.elements[0];
- propvalue = &property->package.elements[1];
+ property = &properties->package.elements[i];
- if (!strcmp(name, propname->string.pointer)) {
- if (type != ACPI_TYPE_ANY && propvalue->type != type)
- return -EPROTO;
- if (obj)
- *obj = propvalue;
+ propname = &property->package.elements[0];
+ propvalue = &property->package.elements[1];
- return 0;
+ if (!strcmp(name, propname->string.pointer)) {
+ if (type != ACPI_TYPE_ANY &&
+ propvalue->type != type)
+ return -EPROTO;
+ if (obj)
+ *obj = propvalue;
+
+ return 0;
+ }
}
}
return -EINVAL;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 295b59271189..96c5e27967f4 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -441,9 +441,13 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs)
/*
* The spec requires that bit 4 always be 1. If it's not set, assume
- * that the implementation doesn't support an SBS charger
+ * that the implementation doesn't support an SBS charger.
+ *
+ * And on some MacBooks a status of 0xffff is always returned, no
+ * matter whether the charger is plugged in or not, which is also
+ * wrong, so ignore the SBS charger for those too.
*/
- if (!((status >> 4) & 0x1))
+ if (!((status >> 4) & 0x1) || status == 0xffff)
return -ENODEV;
sbs->charger_present = (status >> 15) & 0x1;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 7a3431018e0a..5008ead4609a 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -196,6 +196,7 @@ int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc)
hc->callback = NULL;
hc->context = NULL;
mutex_unlock(&hc->lock);
+ acpi_os_wait_events_complete();
return 0;
}
@@ -292,6 +293,7 @@ static int acpi_smbus_hc_remove(struct acpi_device *device)
hc = acpi_driver_data(device);
acpi_ec_remove_query_handler(hc->ec, hc->query_bit);
+ acpi_os_wait_events_complete();
kfree(hc);
device->driver_data = NULL;
return 0;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e1b6231cfa1c..bd1c59fb0e17 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1469,16 +1469,6 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
}
EXPORT_SYMBOL_GPL(acpi_dma_configure);
-/**
- * acpi_dma_deconfigure - Tear-down DMA configuration for the device.
- * @dev: The pointer to the device
- */
-void acpi_dma_deconfigure(struct device *dev)
-{
- arch_teardown_dma_ops(dev);
-}
-EXPORT_SYMBOL_GPL(acpi_dma_deconfigure);
-
static void acpi_init_coherency(struct acpi_device *adev)
{
unsigned long long cca = 0;
@@ -1550,6 +1540,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
*/
static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
{"BSG1160", },
+ {"INT33FE", },
{}
};
diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c
index 51b4cf9f25da..b7c98ff82d78 100644
--- a/drivers/acpi/x86/apple.c
+++ b/drivers/acpi/x86/apple.c
@@ -62,7 +62,7 @@ void acpi_extract_apple_properties(struct acpi_device *adev)
if (!numprops)
goto out_free;
- valid = kcalloc(BITS_TO_LONGS(numprops), sizeof(long), GFP_KERNEL);
+ valid = bitmap_zalloc(numprops, GFP_KERNEL);
if (!valid)
goto out_free;
@@ -132,10 +132,10 @@ void acpi_extract_apple_properties(struct acpi_device *adev)
}
WARN_ON(free_space != (void *)newprops + newsize);
- adev->data.properties = newprops;
adev->data.pointer = newprops;
+ acpi_data_add_props(&adev->data, &apple_prp_guid, newprops);
out_free:
ACPI_FREE(props);
- kfree(valid);
+ bitmap_free(valid);
}
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 06c31ec3cc70..9a8e286dd86f 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -54,7 +54,7 @@ static const struct always_present_id always_present_ids[] = {
* Bay / Cherry Trail PWM directly poked by GPU driver in win10,
* but Linux uses a separate PWM driver, harmless if not used.
*/
- ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1), {}),
+ ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT), {}),
ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
/*
* The INT0002 device is necessary to clear wakeup interrupt sources
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 39b181d6bd0d..4ca7a6b4eaae 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -33,7 +33,6 @@ if ATA
config ATA_NONSTANDARD
bool
- default n
config ATA_VERBOSE_ERROR
bool "Verbose ATA error reporting"
@@ -62,7 +61,6 @@ config ATA_ACPI
config SATA_ZPODD
bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
depends on ATA_ACPI && PM
- default n
help
This option adds support for SATA Zero Power Optical Disc
Drive (ZPODD). It requires both the ODD and the platform
@@ -121,7 +119,8 @@ config SATA_AHCI_PLATFORM
config AHCI_BRCM
tristate "Broadcom AHCI SATA support"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \
+ ARCH_BCM_63XX
help
This option enables support for the AHCI SATA3 controller found on
Broadcom SoC's.
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 6a1515f0da40..ef356e70e6de 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -352,6 +352,8 @@ struct ahci_host_priv {
struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
struct reset_control *rsts; /* Optional */
struct regulator **target_pwrs; /* Optional */
+ struct regulator *ahci_regulator;/* Optional */
+ struct regulator *phy_regulator;/* Optional */
/*
* If platform uses PHYs. There is a 1:1 relation between the port number and
* the PHY position in this array.
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index f3d557777d82..fba5a3044c8a 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/string.h>
#include "ahci.h"
@@ -94,6 +95,7 @@ struct brcm_ahci_priv {
u32 port_mask;
u32 quirks;
enum brcm_ahci_version version;
+ struct reset_control *rcdev;
};
static inline u32 brcm_sata_readreg(void __iomem *addr)
@@ -381,6 +383,7 @@ static struct scsi_host_template ahci_platform_sht = {
static const struct of_device_id ahci_of_match[] = {
{.compatible = "brcm,bcm7425-ahci", .data = (void *)BRCM_SATA_BCM7425},
{.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445},
+ {.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
{},
};
@@ -411,6 +414,11 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
+ /* Reset is optional depending on platform */
+ priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci");
+ if (!IS_ERR_OR_NULL(priv->rcdev))
+ reset_control_deassert(priv->rcdev);
+
if ((priv->version == BRCM_SATA_BCM7425) ||
(priv->version == BRCM_SATA_NSP)) {
priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 46f0bd75eff7..cf1e0e18a7a9 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -33,6 +33,13 @@ static const struct ata_port_info ahci_port_info = {
.port_ops = &ahci_platform_ops,
};
+static const struct ata_port_info ahci_port_info_nolpm = {
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_LPM,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
+};
+
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
@@ -41,6 +48,7 @@ static int ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
+ const struct ata_port_info *port;
int rc;
hpriv = ahci_platform_get_resources(pdev,
@@ -58,7 +66,11 @@ static int ahci_probe(struct platform_device *pdev)
if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
- rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
+ port = acpi_device_get_match_data(dev);
+ if (!port)
+ port = &ahci_port_info;
+
+ rc = ahci_platform_init_host(pdev, hpriv, port,
&ahci_platform_sht);
if (rc)
goto disable_resources;
@@ -85,6 +97,7 @@ static const struct of_device_id ahci_of_match[] = {
MODULE_DEVICE_TABLE(of, ahci_of_match);
static const struct acpi_device_id ahci_acpi_match[] = {
+ { "APMC0D33", (unsigned long)&ahci_port_info_nolpm },
{ ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) },
{},
};
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 631610b72aa5..911710643305 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -181,7 +181,7 @@ static int ahci_sunxi_probe(struct platform_device *pdev)
struct ahci_host_priv *hpriv;
int rc;
- hpriv = ahci_platform_get_resources(pdev, 0);
+ hpriv = ahci_platform_get_resources(pdev, AHCI_PLATFORM_GET_RESETS);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
@@ -250,6 +250,7 @@ static SIMPLE_DEV_PM_OPS(ahci_sunxi_pm_ops, ahci_platform_suspend,
static const struct of_device_id ahci_sunxi_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-ahci", },
+ { .compatible = "allwinner,sun8i-r40-ahci", },
{ },
};
MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index c92c10d55374..4b900fc659f7 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -139,7 +139,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
* ahci_platform_enable_regulators - Enable regulators
* @hpriv: host private area to store config values
*
- * This function enables all the regulators found in
+ * This function enables all the regulators found in controller and
* hpriv->target_pwrs, if any. If a regulator fails to be enabled, it
* disables all the regulators already enabled in reverse order and
* returns an error.
@@ -151,6 +151,18 @@ int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv)
{
int rc, i;
+ if (hpriv->ahci_regulator) {
+ rc = regulator_enable(hpriv->ahci_regulator);
+ if (rc)
+ return rc;
+ }
+
+ if (hpriv->phy_regulator) {
+ rc = regulator_enable(hpriv->phy_regulator);
+ if (rc)
+ goto disable_ahci_pwrs;
+ }
+
for (i = 0; i < hpriv->nports; i++) {
if (!hpriv->target_pwrs[i])
continue;
@@ -167,6 +179,11 @@ disable_target_pwrs:
if (hpriv->target_pwrs[i])
regulator_disable(hpriv->target_pwrs[i]);
+ if (hpriv->phy_regulator)
+ regulator_disable(hpriv->phy_regulator);
+disable_ahci_pwrs:
+ if (hpriv->ahci_regulator)
+ regulator_disable(hpriv->ahci_regulator);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
@@ -175,7 +192,8 @@ EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
* ahci_platform_disable_regulators - Disable regulators
* @hpriv: host private area to store config values
*
- * This function disables all regulators found in hpriv->target_pwrs.
+ * This function disables all regulators found in hpriv->target_pwrs and
+ * AHCI controller.
*/
void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv)
{
@@ -186,6 +204,11 @@ void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv)
continue;
regulator_disable(hpriv->target_pwrs[i]);
}
+
+ if (hpriv->ahci_regulator)
+ regulator_disable(hpriv->ahci_regulator);
+ if (hpriv->phy_regulator)
+ regulator_disable(hpriv->phy_regulator);
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
/**
@@ -303,8 +326,8 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
/* No PHY support. Check if PHY is required. */
if (of_find_property(node, "phys", NULL)) {
dev_err(dev,
- "couldn't get PHY in node %s: ENOSYS\n",
- node->name);
+ "couldn't get PHY in node %pOFn: ENOSYS\n",
+ node);
break;
}
/* fall through */
@@ -316,8 +339,8 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
default:
dev_err(dev,
- "couldn't get PHY in node %s: %d\n",
- node->name, rc);
+ "couldn't get PHY in node %pOFn: %d\n",
+ node, rc);
break;
}
@@ -351,6 +374,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
*
* 1) mmio registers (IORESOURCE_MEM 0, mandatory)
* 2) regulator for controlling the targets power (optional)
+ * regulator for controlling the AHCI controller (optional)
* 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
* or for non devicetree enabled platforms a single clock
* 4) resets, if flags has AHCI_PLATFORM_GET_RESETS (optional)
@@ -408,6 +432,24 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
hpriv->clks[i] = clk;
}
+ hpriv->ahci_regulator = devm_regulator_get_optional(dev, "ahci");
+ if (IS_ERR(hpriv->ahci_regulator)) {
+ rc = PTR_ERR(hpriv->ahci_regulator);
+ if (rc == -EPROBE_DEFER)
+ goto err_out;
+ rc = 0;
+ hpriv->ahci_regulator = NULL;
+ }
+
+ hpriv->phy_regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(hpriv->phy_regulator)) {
+ rc = PTR_ERR(hpriv->phy_regulator);
+ if (rc == -EPROBE_DEFER)
+ goto err_out;
+ rc = 0;
+ hpriv->phy_regulator = NULL;
+ }
+
if (flags & AHCI_PLATFORM_GET_RESETS) {
hpriv->rsts = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(hpriv->rsts)) {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 599e01bcdef2..a9dd4ea7467d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5359,10 +5359,20 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
*/
int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
{
+ u64 done_mask, ap_qc_active = ap->qc_active;
int nr_done = 0;
- u64 done_mask;
- done_mask = ap->qc_active ^ qc_active;
+ /*
+ * If the internal tag is set on ap->qc_active, then we care about
+ * bit0 on the passed in qc_active mask. Move that bit up to match
+ * the internal tag.
+ */
+ if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
+ qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
+ qc_active ^= qc_active & 0x01;
+ }
+
+ done_mask = ap_qc_active ^ qc_active;
if (unlikely(done_mask & qc_active)) {
ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1984fc78c750..3d4887d0e84a 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -639,8 +639,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */
scsi_cmd[6] = args[3];
scsi_cmd[8] = args[1];
- scsi_cmd[10] = 0x4f;
- scsi_cmd[12] = 0xc2;
+ scsi_cmd[10] = ATA_SMART_LBAM_PASS;
+ scsi_cmd[12] = ATA_SMART_LBAH_PASS;
} else {
scsi_cmd[6] = args[1];
}
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 4d49fd3c927b..843bb200a1ee 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -279,7 +279,7 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
const struct ata_port_info *ppi[] = { &info, &info };
/* SB600 doesn't have secondary port wired */
- if((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE))
+ if (pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE)
ppi[1] = &ata_dummy_port_info;
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 0a550190955a..cc6d06c1b2c7 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -659,7 +659,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
* start of new transfer.
*/
drv_data->dma_rx_data.port = EP93XX_DMA_IDE;
- drv_data->dma_rx_data.direction = DMA_FROM_DEVICE;
+ drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM;
drv_data->dma_rx_data.name = "ep93xx-pata-rx";
drv_data->dma_rx_channel = dma_request_channel(mask,
ep93xx_pata_dma_filter, &drv_data->dma_rx_data);
@@ -667,7 +667,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
return;
drv_data->dma_tx_data.port = EP93XX_DMA_IDE;
- drv_data->dma_tx_data.direction = DMA_TO_DEVICE;
+ drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV;
drv_data->dma_tx_data.name = "ep93xx-pata-tx";
drv_data->dma_tx_channel = dma_request_channel(mask,
ep93xx_pata_dma_filter, &drv_data->dma_tx_data);
@@ -678,7 +678,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
/* Configure receive channel direction and source address */
memset(&conf, 0, sizeof(conf));
- conf.direction = DMA_FROM_DEVICE;
+ conf.direction = DMA_DEV_TO_MEM;
conf.src_addr = drv_data->udma_in_phys;
conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) {
@@ -689,7 +689,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
/* Configure transmit channel direction and destination address */
memset(&conf, 0, sizeof(conf));
- conf.direction = DMA_TO_DEVICE;
+ conf.direction = DMA_MEM_TO_DEV;
conf.dst_addr = drv_data->udma_out_phys;
conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) {
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 9b6d7930d1c7..e0bcf9b2dab0 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -873,7 +873,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* like others but it will lock up the whole machine HARD if
* 65536 byte PRD entry is fed. Reduce maximum segment size.
*/
- rc = pci_set_dma_max_seg_size(pdev, 65536 - 512);
+ rc = dma_set_max_seg_size(&pdev->dev, 65536 - 512);
if (rc) {
dev_err(&pdev->dev, "failed to set the maximum segment size\n");
return rc;
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 6470e3c4c990..f8c703426c90 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -241,7 +241,8 @@ static void __iomem *eni_alloc_mem(struct eni_dev *eni_dev, unsigned long *size)
len = eni_dev->free_len;
if (*size < MID_MIN_BUF_SIZE) *size = MID_MIN_BUF_SIZE;
if (*size > MID_MAX_BUF_SIZE) return NULL;
- for (order = 0; (1 << order) < *size; order++);
+ for (order = 0; (1 << order) < *size; order++)
+ ;
DPRINTK("trying: %ld->%d\n",*size,order);
best_order = 65; /* we don't have more than 2^64 of anything ... */
index = 0; /* silence GCC */
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 99a38115b0a8..f55ffde877b5 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -106,7 +106,6 @@
static const struct atmdev_ops fore200e_ops;
-static const struct fore200e_bus fore200e_bus[];
static LIST_HEAD(fore200e_boards);
@@ -183,10 +182,9 @@ fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, i
alignment = 0;
chunk->alloc_size = size + alignment;
- chunk->align_size = size;
chunk->direction = direction;
- chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
+ chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL);
if (chunk->alloc_addr == NULL)
return -ENOMEM;
@@ -195,8 +193,12 @@ fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, i
chunk->align_addr = chunk->alloc_addr + offset;
- chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
-
+ chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr,
+ size, direction);
+ if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) {
+ kfree(chunk->alloc_addr);
+ return -ENOMEM;
+ }
return 0;
}
@@ -206,11 +208,39 @@ fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, i
static void
fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
{
- fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
-
+ dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size,
+ chunk->direction);
kfree(chunk->alloc_addr);
}
+/*
+ * Allocate a DMA consistent chunk of memory intended to act as a communication
+ * mechanism (to hold descriptors, status, queues, etc.) shared by the driver
+ * and the adapter.
+ */
+static int
+fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
+ int size, int nbr, int alignment)
+{
+ /* returned chunks are page-aligned */
+ chunk->alloc_size = size * nbr;
+ chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size,
+ &chunk->dma_addr, GFP_KERNEL);
+ if (!chunk->alloc_addr)
+ return -ENOMEM;
+ chunk->align_addr = chunk->alloc_addr;
+ return 0;
+}
+
+/*
+ * Free a DMA consistent chunk of memory.
+ */
+static void
+fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
+{
+ dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr,
+ chunk->dma_addr);
+}
static void
fore200e_spin(int msecs)
@@ -303,10 +333,10 @@ fore200e_uninit_bs_queue(struct fore200e* fore200e)
struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
if (status->alloc_addr)
- fore200e->bus->dma_chunk_free(fore200e, status);
+ fore200e_dma_chunk_free(fore200e, status);
if (rbd_block->alloc_addr)
- fore200e->bus->dma_chunk_free(fore200e, rbd_block);
+ fore200e_dma_chunk_free(fore200e, rbd_block);
}
}
}
@@ -372,17 +402,17 @@ fore200e_shutdown(struct fore200e* fore200e)
/* fall through */
case FORE200E_STATE_INIT_RXQ:
- fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
- fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
+ fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status);
+ fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
/* fall through */
case FORE200E_STATE_INIT_TXQ:
- fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
- fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
+ fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status);
+ fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
/* fall through */
case FORE200E_STATE_INIT_CMDQ:
- fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
+ fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
/* fall through */
case FORE200E_STATE_INITIALIZE:
@@ -429,81 +459,6 @@ static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
writel(cpu_to_le32(val), addr);
}
-
-static u32
-fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
-{
- u32 dma_addr = dma_map_single(&((struct pci_dev *) fore200e->bus_dev)->dev, virt_addr, size, direction);
-
- DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
- virt_addr, size, direction, dma_addr);
-
- return dma_addr;
-}
-
-
-static void
-fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
-{
- DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
- dma_addr, size, direction);
-
- dma_unmap_single(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
-}
-
-
-static void
-fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
-{
- DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
-
- dma_sync_single_for_cpu(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
-}
-
-static void
-fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
-{
- DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
-
- dma_sync_single_for_device(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
-}
-
-
-/* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
- (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
-
-static int
-fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
- int size, int nbr, int alignment)
-{
- /* returned chunks are page-aligned */
- chunk->alloc_size = size * nbr;
- chunk->alloc_addr = dma_alloc_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
- chunk->alloc_size,
- &chunk->dma_addr,
- GFP_KERNEL);
-
- if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
- return -ENOMEM;
-
- chunk->align_addr = chunk->alloc_addr;
-
- return 0;
-}
-
-
-/* free a DMA consistent chunk of memory */
-
-static void
-fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
-{
- dma_free_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
- chunk->alloc_size,
- chunk->alloc_addr,
- chunk->dma_addr);
-}
-
-
static int
fore200e_pca_irq_check(struct fore200e* fore200e)
{
@@ -571,7 +526,7 @@ fore200e_pca_unmap(struct fore200e* fore200e)
static int fore200e_pca_configure(struct fore200e *fore200e)
{
- struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
+ struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
u8 master_ctrl, latency;
DPRINTK(2, "device %s being configured\n", fore200e->name);
@@ -623,7 +578,10 @@ fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
opcode.opcode = OPCODE_GET_PROM;
opcode.pad = 0;
- prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
+ prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(fore200e->dev, prom_dma))
+ return -ENOMEM;
fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
@@ -635,7 +593,7 @@ fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
*entry->status = STATUS_FREE;
- fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
+ dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
if (ok == 0) {
printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
@@ -658,15 +616,31 @@ fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
static int
fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
{
- struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
+ struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
}
+static const struct fore200e_bus fore200e_pci_ops = {
+ .model_name = "PCA-200E",
+ .proc_name = "pca200e",
+ .descr_alignment = 32,
+ .buffer_alignment = 4,
+ .status_alignment = 32,
+ .read = fore200e_pca_read,
+ .write = fore200e_pca_write,
+ .configure = fore200e_pca_configure,
+ .map = fore200e_pca_map,
+ .reset = fore200e_pca_reset,
+ .prom_read = fore200e_pca_prom_read,
+ .unmap = fore200e_pca_unmap,
+ .irq_check = fore200e_pca_irq_check,
+ .irq_ack = fore200e_pca_irq_ack,
+ .proc_read = fore200e_pca_proc_read,
+};
#endif /* CONFIG_PCI */
-
#ifdef CONFIG_SBUS
static u32 fore200e_sba_read(volatile u32 __iomem *addr)
@@ -679,78 +653,6 @@ static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
sbus_writel(val, addr);
}
-static u32 fore200e_sba_dma_map(struct fore200e *fore200e, void* virt_addr, int size, int direction)
-{
- struct platform_device *op = fore200e->bus_dev;
- u32 dma_addr;
-
- dma_addr = dma_map_single(&op->dev, virt_addr, size, direction);
-
- DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
- virt_addr, size, direction, dma_addr);
-
- return dma_addr;
-}
-
-static void fore200e_sba_dma_unmap(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
-{
- struct platform_device *op = fore200e->bus_dev;
-
- DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
- dma_addr, size, direction);
-
- dma_unmap_single(&op->dev, dma_addr, size, direction);
-}
-
-static void fore200e_sba_dma_sync_for_cpu(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
-{
- struct platform_device *op = fore200e->bus_dev;
-
- DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
-
- dma_sync_single_for_cpu(&op->dev, dma_addr, size, direction);
-}
-
-static void fore200e_sba_dma_sync_for_device(struct fore200e *fore200e, u32 dma_addr, int size, int direction)
-{
- struct platform_device *op = fore200e->bus_dev;
-
- DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
-
- dma_sync_single_for_device(&op->dev, dma_addr, size, direction);
-}
-
-/* Allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
- * (to hold descriptors, status, queues, etc.) shared by the driver and the adapter.
- */
-static int fore200e_sba_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
- int size, int nbr, int alignment)
-{
- struct platform_device *op = fore200e->bus_dev;
-
- chunk->alloc_size = chunk->align_size = size * nbr;
-
- /* returned chunks are page-aligned */
- chunk->alloc_addr = dma_alloc_coherent(&op->dev, chunk->alloc_size,
- &chunk->dma_addr, GFP_ATOMIC);
-
- if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
- return -ENOMEM;
-
- chunk->align_addr = chunk->alloc_addr;
-
- return 0;
-}
-
-/* free a DVMA consistent chunk of memory */
-static void fore200e_sba_dma_chunk_free(struct fore200e *fore200e, struct chunk *chunk)
-{
- struct platform_device *op = fore200e->bus_dev;
-
- dma_free_coherent(&op->dev, chunk->alloc_size,
- chunk->alloc_addr, chunk->dma_addr);
-}
-
static void fore200e_sba_irq_enable(struct fore200e *fore200e)
{
u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
@@ -777,7 +679,7 @@ static void fore200e_sba_reset(struct fore200e *fore200e)
static int __init fore200e_sba_map(struct fore200e *fore200e)
{
- struct platform_device *op = fore200e->bus_dev;
+ struct platform_device *op = to_platform_device(fore200e->dev);
unsigned int bursts;
/* gain access to the SBA specific registers */
@@ -807,7 +709,7 @@ static int __init fore200e_sba_map(struct fore200e *fore200e)
static void fore200e_sba_unmap(struct fore200e *fore200e)
{
- struct platform_device *op = fore200e->bus_dev;
+ struct platform_device *op = to_platform_device(fore200e->dev);
of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
@@ -823,7 +725,7 @@ static int __init fore200e_sba_configure(struct fore200e *fore200e)
static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
{
- struct platform_device *op = fore200e->bus_dev;
+ struct platform_device *op = to_platform_device(fore200e->dev);
const u8 *prop;
int len;
@@ -847,7 +749,7 @@ static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_
static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
{
- struct platform_device *op = fore200e->bus_dev;
+ struct platform_device *op = to_platform_device(fore200e->dev);
const struct linux_prom_registers *regs;
regs = of_get_property(op->dev.of_node, "reg", NULL);
@@ -855,8 +757,26 @@ static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n",
(regs ? regs->which_io : 0), op->dev.of_node->name);
}
-#endif /* CONFIG_SBUS */
+static const struct fore200e_bus fore200e_sbus_ops = {
+ .model_name = "SBA-200E",
+ .proc_name = "sba200e",
+ .descr_alignment = 32,
+ .buffer_alignment = 64,
+ .status_alignment = 32,
+ .read = fore200e_sba_read,
+ .write = fore200e_sba_write,
+ .configure = fore200e_sba_configure,
+ .map = fore200e_sba_map,
+ .reset = fore200e_sba_reset,
+ .prom_read = fore200e_sba_prom_read,
+ .unmap = fore200e_sba_unmap,
+ .irq_enable = fore200e_sba_irq_enable,
+ .irq_check = fore200e_sba_irq_check,
+ .irq_ack = fore200e_sba_irq_ack,
+ .proc_read = fore200e_sba_proc_read,
+};
+#endif /* CONFIG_SBUS */
static void
fore200e_tx_irq(struct fore200e* fore200e)
@@ -884,7 +804,7 @@ fore200e_tx_irq(struct fore200e* fore200e)
kfree(entry->data);
/* remove DMA mapping */
- fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
+ dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
DMA_TO_DEVICE);
vc_map = entry->vc_map;
@@ -1105,12 +1025,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
/* Make device DMA transfer visible to CPU. */
- fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr,
+ rpd->rsd[i].length, DMA_FROM_DEVICE);
skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
/* Now let the device get at it again. */
- fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr,
+ rpd->rsd[i].length, DMA_FROM_DEVICE);
}
DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
@@ -1611,7 +1533,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
}
if (tx_copy) {
- data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
+ data = kmalloc(tx_len, GFP_ATOMIC);
if (data == NULL) {
if (vcc->pop) {
vcc->pop(vcc, skb);
@@ -1679,7 +1601,14 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
entry->data = tx_copy ? data : NULL;
tpd = entry->tpd;
- tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
+ tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) {
+ if (tx_copy)
+ kfree(data);
+ spin_unlock_irqrestore(&fore200e->q_lock, flags);
+ return -ENOMEM;
+ }
tpd->tsd[ 0 ].length = tx_len;
FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
@@ -1747,13 +1676,15 @@ fore200e_getstats(struct fore200e* fore200e)
u32 stats_dma_addr;
if (fore200e->stats == NULL) {
- fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
+ fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL);
if (fore200e->stats == NULL)
return -ENOMEM;
}
- stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
- sizeof(struct stats), DMA_FROM_DEVICE);
+ stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats,
+ sizeof(struct stats), DMA_FROM_DEVICE);
+ if (dma_mapping_error(fore200e->dev, stats_dma_addr))
+ return -ENOMEM;
FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
@@ -1770,7 +1701,7 @@ fore200e_getstats(struct fore200e* fore200e)
*entry->status = STATUS_FREE;
- fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
+ dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
if (ok == 0) {
printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
@@ -2049,7 +1980,7 @@ static int fore200e_irq_request(struct fore200e *fore200e)
static int fore200e_get_esi(struct fore200e *fore200e)
{
- struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
+ struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL);
int ok, i;
if (!prom)
@@ -2156,7 +2087,7 @@ static int fore200e_init_bs_queue(struct fore200e *fore200e)
bsq = &fore200e->host_bsq[ scheme ][ magn ];
/* allocate and align the array of status words */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&bsq->status,
sizeof(enum status),
QUEUE_SIZE_BS,
@@ -2165,13 +2096,13 @@ static int fore200e_init_bs_queue(struct fore200e *fore200e)
}
/* allocate and align the array of receive buffer descriptors */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&bsq->rbd_block,
sizeof(struct rbd_block),
QUEUE_SIZE_BS,
fore200e->bus->descr_alignment) < 0) {
- fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
+ fore200e_dma_chunk_free(fore200e, &bsq->status);
return -ENOMEM;
}
@@ -2212,7 +2143,7 @@ static int fore200e_init_rx_queue(struct fore200e *fore200e)
DPRINTK(2, "receive queue is being initialized\n");
/* allocate and align the array of status words */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&rxq->status,
sizeof(enum status),
QUEUE_SIZE_RX,
@@ -2221,13 +2152,13 @@ static int fore200e_init_rx_queue(struct fore200e *fore200e)
}
/* allocate and align the array of receive PDU descriptors */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&rxq->rpd,
sizeof(struct rpd),
QUEUE_SIZE_RX,
fore200e->bus->descr_alignment) < 0) {
- fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
+ fore200e_dma_chunk_free(fore200e, &rxq->status);
return -ENOMEM;
}
@@ -2271,7 +2202,7 @@ static int fore200e_init_tx_queue(struct fore200e *fore200e)
DPRINTK(2, "transmit queue is being initialized\n");
/* allocate and align the array of status words */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&txq->status,
sizeof(enum status),
QUEUE_SIZE_TX,
@@ -2280,13 +2211,13 @@ static int fore200e_init_tx_queue(struct fore200e *fore200e)
}
/* allocate and align the array of transmit PDU descriptors */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&txq->tpd,
sizeof(struct tpd),
QUEUE_SIZE_TX,
fore200e->bus->descr_alignment) < 0) {
- fore200e->bus->dma_chunk_free(fore200e, &txq->status);
+ fore200e_dma_chunk_free(fore200e, &txq->status);
return -ENOMEM;
}
@@ -2333,7 +2264,7 @@ static int fore200e_init_cmd_queue(struct fore200e *fore200e)
DPRINTK(2, "command queue is being initialized\n");
/* allocate and align the array of status words */
- if (fore200e->bus->dma_chunk_alloc(fore200e,
+ if (fore200e_dma_chunk_alloc(fore200e,
&cmdq->status,
sizeof(enum status),
QUEUE_SIZE_CMD,
@@ -2487,25 +2418,15 @@ static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
static int fore200e_load_and_start_fw(struct fore200e *fore200e)
{
const struct firmware *firmware;
- struct device *device;
const struct fw_header *fw_header;
const __le32 *fw_data;
u32 fw_size;
u32 __iomem *load_addr;
char buf[48];
- int err = -ENODEV;
-
- if (strcmp(fore200e->bus->model_name, "PCA-200E") == 0)
- device = &((struct pci_dev *) fore200e->bus_dev)->dev;
-#ifdef CONFIG_SBUS
- else if (strcmp(fore200e->bus->model_name, "SBA-200E") == 0)
- device = &((struct platform_device *) fore200e->bus_dev)->dev;
-#endif
- else
- return err;
+ int err;
sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
- if ((err = request_firmware(&firmware, buf, device)) < 0) {
+ if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) {
printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
return err;
}
@@ -2631,7 +2552,6 @@ static const struct of_device_id fore200e_sba_match[];
static int fore200e_sba_probe(struct platform_device *op)
{
const struct of_device_id *match;
- const struct fore200e_bus *bus;
struct fore200e *fore200e;
static int index = 0;
int err;
@@ -2639,18 +2559,17 @@ static int fore200e_sba_probe(struct platform_device *op)
match = of_match_device(fore200e_sba_match, &op->dev);
if (!match)
return -EINVAL;
- bus = match->data;
fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
if (!fore200e)
return -ENOMEM;
- fore200e->bus = bus;
- fore200e->bus_dev = op;
+ fore200e->bus = &fore200e_sbus_ops;
+ fore200e->dev = &op->dev;
fore200e->irq = op->archdata.irqs[0];
fore200e->phys_base = op->resource[0].start;
- sprintf(fore200e->name, "%s-%d", bus->model_name, index);
+ sprintf(fore200e->name, "SBA-200E-%d", index);
err = fore200e_init(fore200e, &op->dev);
if (err < 0) {
@@ -2678,7 +2597,6 @@ static int fore200e_sba_remove(struct platform_device *op)
static const struct of_device_id fore200e_sba_match[] = {
{
.name = SBA200E_PROM_NAME,
- .data = (void *) &fore200e_bus[1],
},
{},
};
@@ -2698,7 +2616,6 @@ static struct platform_driver fore200e_sba_driver = {
static int fore200e_pca_detect(struct pci_dev *pci_dev,
const struct pci_device_id *pci_ent)
{
- const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
struct fore200e* fore200e;
int err = 0;
static int index = 0;
@@ -2719,20 +2636,19 @@ static int fore200e_pca_detect(struct pci_dev *pci_dev,
goto out_disable;
}
- fore200e->bus = bus;
- fore200e->bus_dev = pci_dev;
+ fore200e->bus = &fore200e_pci_ops;
+ fore200e->dev = &pci_dev->dev;
fore200e->irq = pci_dev->irq;
fore200e->phys_base = pci_resource_start(pci_dev, 0);
- sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
+ sprintf(fore200e->name, "PCA-200E-%d", index - 1);
pci_set_master(pci_dev);
- printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
- fore200e->bus->model_name,
+ printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n",
fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
- sprintf(fore200e->name, "%s-%d", bus->model_name, index);
+ sprintf(fore200e->name, "PCA-200E-%d", index);
err = fore200e_init(fore200e, &pci_dev->dev);
if (err < 0) {
@@ -2767,8 +2683,7 @@ static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
static const struct pci_device_id fore200e_pca_tbl[] = {
- { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, (unsigned long) &fore200e_bus[0] },
+ { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID },
{ 0, }
};
@@ -3108,8 +3023,7 @@ module_init(fore200e_module_init);
module_exit(fore200e_module_cleanup);
-static const struct atmdev_ops fore200e_ops =
-{
+static const struct atmdev_ops fore200e_ops = {
.open = fore200e_open,
.close = fore200e_close,
.ioctl = fore200e_ioctl,
@@ -3121,53 +3035,6 @@ static const struct atmdev_ops fore200e_ops =
.owner = THIS_MODULE
};
-
-static const struct fore200e_bus fore200e_bus[] = {
-#ifdef CONFIG_PCI
- { "PCA-200E", "pca200e", 32, 4, 32,
- fore200e_pca_read,
- fore200e_pca_write,
- fore200e_pca_dma_map,
- fore200e_pca_dma_unmap,
- fore200e_pca_dma_sync_for_cpu,
- fore200e_pca_dma_sync_for_device,
- fore200e_pca_dma_chunk_alloc,
- fore200e_pca_dma_chunk_free,
- fore200e_pca_configure,
- fore200e_pca_map,
- fore200e_pca_reset,
- fore200e_pca_prom_read,
- fore200e_pca_unmap,
- NULL,
- fore200e_pca_irq_check,
- fore200e_pca_irq_ack,
- fore200e_pca_proc_read,
- },
-#endif
-#ifdef CONFIG_SBUS
- { "SBA-200E", "sba200e", 32, 64, 32,
- fore200e_sba_read,
- fore200e_sba_write,
- fore200e_sba_dma_map,
- fore200e_sba_dma_unmap,
- fore200e_sba_dma_sync_for_cpu,
- fore200e_sba_dma_sync_for_device,
- fore200e_sba_dma_chunk_alloc,
- fore200e_sba_dma_chunk_free,
- fore200e_sba_configure,
- fore200e_sba_map,
- fore200e_sba_reset,
- fore200e_sba_prom_read,
- fore200e_sba_unmap,
- fore200e_sba_irq_enable,
- fore200e_sba_irq_check,
- fore200e_sba_irq_ack,
- fore200e_sba_proc_read,
- },
-#endif
- {}
-};
-
MODULE_LICENSE("GPL");
#ifdef CONFIG_PCI
#ifdef __LITTLE_ENDIAN__
diff --git a/drivers/atm/fore200e.h b/drivers/atm/fore200e.h
index c8a02c8fba15..caf0ea6a328a 100644
--- a/drivers/atm/fore200e.h
+++ b/drivers/atm/fore200e.h
@@ -805,12 +805,6 @@ typedef struct fore200e_bus {
int status_alignment; /* status words DMA alignment requirement */
u32 (*read)(volatile u32 __iomem *);
void (*write)(u32, volatile u32 __iomem *);
- u32 (*dma_map)(struct fore200e*, void*, int, int);
- void (*dma_unmap)(struct fore200e*, u32, int, int);
- void (*dma_sync_for_cpu)(struct fore200e*, u32, int, int);
- void (*dma_sync_for_device)(struct fore200e*, u32, int, int);
- int (*dma_chunk_alloc)(struct fore200e*, struct chunk*, int, int, int);
- void (*dma_chunk_free)(struct fore200e*, struct chunk*);
int (*configure)(struct fore200e*);
int (*map)(struct fore200e*);
void (*reset)(struct fore200e*);
@@ -844,7 +838,7 @@ typedef struct fore200e {
enum fore200e_state state; /* device state */
char name[16]; /* device name */
- void* bus_dev; /* bus-specific kernel data */
+ struct device *dev;
int irq; /* irq number */
unsigned long phys_base; /* physical base address */
void __iomem * virt_base; /* virtual base address */
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index cbec9adc01c7..ae4aa02e4dc6 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -2689,11 +2689,10 @@ static void ns_poll(struct timer_list *unused)
PRINTK("nicstar: Entering ns_poll().\n");
for (i = 0; i < num_cards; i++) {
card = cards[i];
- if (spin_is_locked(&card->int_lock)) {
+ if (!spin_trylock_irqsave(&card->int_lock, flags)) {
/* Probably it isn't worth spinning */
continue;
}
- spin_lock_irqsave(&card->int_lock, flags);
stat_w = 0;
stat_r = readl(card->membase + STAT);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index e89146ddede6..d5c76b50d357 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -126,7 +126,7 @@ static unsigned long dummy[2] = {0,0};
#define zin_n(r) inl(zatm_dev->base+r*4)
#define zin(r) inl(zatm_dev->base+uPD98401_##r*4)
#define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4)
-#define zwait while (zin(CMR) & uPD98401_BUSY)
+#define zwait() do {} while (zin(CMR) & uPD98401_BUSY)
/* RX0, RX1, TX0, TX1 */
static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 };
@@ -140,7 +140,7 @@ static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */
static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
{
- zwait;
+ zwait();
zout(value,CER);
zout(uPD98401_IND_ACC | uPD98401_IA_BALL |
(uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
@@ -149,10 +149,10 @@ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr)
{
- zwait;
+ zwait();
zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW |
(uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
- zwait;
+ zwait();
return zin(CER);
}
@@ -241,7 +241,7 @@ static void refill_pool(struct atm_dev *dev,int pool)
}
if (first) {
spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait;
+ zwait();
zout(virt_to_bus(first),CER);
zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
CMR);
@@ -508,9 +508,9 @@ static int open_rx_first(struct atm_vcc *vcc)
}
if (zatm_vcc->pool < 0) return -EMSGSIZE;
spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait;
+ zwait();
zout(uPD98401_OPEN_CHAN,CMR);
- zwait;
+ zwait();
DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
spin_unlock_irqrestore(&zatm_dev->lock, flags);
@@ -571,21 +571,21 @@ static void close_rx(struct atm_vcc *vcc)
pos = vcc->vci >> 1;
shift = (1-(vcc->vci & 1)) << 4;
zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos);
- zwait;
+ zwait();
zout(uPD98401_NOP,CMR);
- zwait;
+ zwait();
zout(uPD98401_NOP,CMR);
spin_unlock_irqrestore(&zatm_dev->lock, flags);
}
spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait;
+ zwait();
zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait;
+ zwait();
udelay(10); /* why oh why ... ? */
zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait;
+ zwait();
if (!(zin(CMR) & uPD98401_CHAN_ADDR))
printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel "
"%d\n",vcc->dev->number,zatm_vcc->rx_chan);
@@ -699,7 +699,7 @@ printk("NONONONOO!!!!\n");
skb_queue_tail(&zatm_vcc->tx_queue,skb);
DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
uPD98401_TXVC_QRP));
- zwait;
+ zwait();
zout(uPD98401_TX_READY | (zatm_vcc->tx_chan <<
uPD98401_CHAN_ADDR_SHIFT),CMR);
spin_unlock_irqrestore(&zatm_dev->lock, flags);
@@ -891,12 +891,12 @@ static void close_tx(struct atm_vcc *vcc)
}
spin_lock_irqsave(&zatm_dev->lock, flags);
#if 0
- zwait;
+ zwait();
zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
#endif
- zwait;
+ zwait();
zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait;
+ zwait();
if (!(zin(CMR) & uPD98401_CHAN_ADDR))
printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel "
"%d\n",vcc->dev->number,chan);
@@ -926,9 +926,9 @@ static int open_tx_first(struct atm_vcc *vcc)
zatm_vcc->tx_chan = 0;
if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait;
+ zwait();
zout(uPD98401_OPEN_CHAN,CMR);
- zwait;
+ zwait();
DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
spin_unlock_irqrestore(&zatm_dev->lock, flags);
@@ -1557,7 +1557,7 @@ static void zatm_phy_put(struct atm_dev *dev,unsigned char value,
struct zatm_dev *zatm_dev;
zatm_dev = ZATM_DEV(dev);
- zwait;
+ zwait();
zout(value,CER);
zout(uPD98401_IND_ACC | uPD98401_IA_B0 |
(uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
@@ -1569,10 +1569,10 @@ static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr)
struct zatm_dev *zatm_dev;
zatm_dev = ZATM_DEV(dev);
- zwait;
+ zwait();
zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW |
(uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
- zwait;
+ zwait();
return zin(CER) & 0xff;
}
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index f1a42f0f1ded..9ad93ea42fdc 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -62,20 +62,15 @@ static void hd44780_strobe_gpio(struct hd44780 *hd)
/* write to an LCD panel register in 8 bit GPIO mode */
static void hd44780_write_gpio8(struct hd44780 *hd, u8 val, unsigned int rs)
{
- int values[10]; /* for DATA[0-7], RS, RW */
- unsigned int i, n;
-
- for (i = 0; i < 8; i++)
- values[PIN_DATA0 + i] = !!(val & BIT(i));
- values[PIN_CTRL_RS] = rs;
- n = 9;
- if (hd->pins[PIN_CTRL_RW]) {
- values[PIN_CTRL_RW] = 0;
- n++;
- }
+ DECLARE_BITMAP(values, 10); /* for DATA[0-7], RS, RW */
+ unsigned int n;
+
+ values[0] = val;
+ __assign_bit(8, values, rs);
+ n = hd->pins[PIN_CTRL_RW] ? 10 : 9;
/* Present the data to the port */
- gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA0], values);
+ gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA0], NULL, values);
hd44780_strobe_gpio(hd);
}
@@ -83,32 +78,25 @@ static void hd44780_write_gpio8(struct hd44780 *hd, u8 val, unsigned int rs)
/* write to an LCD panel register in 4 bit GPIO mode */
static void hd44780_write_gpio4(struct hd44780 *hd, u8 val, unsigned int rs)
{
- int values[10]; /* for DATA[0-7], RS, RW, but DATA[0-3] is unused */
- unsigned int i, n;
+ DECLARE_BITMAP(values, 6); /* for DATA[4-7], RS, RW */
+ unsigned int n;
/* High nibble + RS, RW */
- for (i = 4; i < 8; i++)
- values[PIN_DATA0 + i] = !!(val & BIT(i));
- values[PIN_CTRL_RS] = rs;
- n = 5;
- if (hd->pins[PIN_CTRL_RW]) {
- values[PIN_CTRL_RW] = 0;
- n++;
- }
+ values[0] = val >> 4;
+ __assign_bit(4, values, rs);
+ n = hd->pins[PIN_CTRL_RW] ? 6 : 5;
/* Present the data to the port */
- gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4],
- &values[PIN_DATA4]);
+ gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4], NULL, values);
hd44780_strobe_gpio(hd);
/* Low nibble */
- for (i = 0; i < 4; i++)
- values[PIN_DATA4 + i] = !!(val & BIT(i));
+ values[0] &= ~0x0fUL;
+ values[0] |= val & 0x0f;
/* Present the data to the port */
- gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4],
- &values[PIN_DATA4]);
+ gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4], NULL, values);
hd44780_strobe_gpio(hd);
}
@@ -155,23 +143,16 @@ static void hd44780_write_cmd_gpio4(struct charlcd *lcd, int cmd)
/* Send 4-bits of a command to the LCD panel in raw 4 bit GPIO mode */
static void hd44780_write_cmd_raw_gpio4(struct charlcd *lcd, int cmd)
{
- int values[10]; /* for DATA[0-7], RS, RW, but DATA[0-3] is unused */
+ DECLARE_BITMAP(values, 6); /* for DATA[4-7], RS, RW */
struct hd44780 *hd = lcd->drvdata;
- unsigned int i, n;
+ unsigned int n;
/* Command nibble + RS, RW */
- for (i = 0; i < 4; i++)
- values[PIN_DATA4 + i] = !!(cmd & BIT(i));
- values[PIN_CTRL_RS] = 0;
- n = 5;
- if (hd->pins[PIN_CTRL_RW]) {
- values[PIN_CTRL_RW] = 0;
- n++;
- }
+ values[0] = cmd & 0x0f;
+ n = hd->pins[PIN_CTRL_RW] ? 6 : 5;
/* Present the data to the port */
- gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4],
- &values[PIN_DATA4]);
+ gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4], NULL, values);
hd44780_strobe_gpio(hd);
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index e7cb0c6ade81..edfcf8d982e4 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/sched/topology.h>
+#include <linux/cpuset.h>
DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
@@ -47,6 +48,9 @@ static ssize_t cpu_capacity_show(struct device *dev,
return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
}
+static void update_topology_flags_workfn(struct work_struct *work);
+static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
+
static ssize_t cpu_capacity_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -72,6 +76,8 @@ static ssize_t cpu_capacity_store(struct device *dev,
topology_set_cpu_scale(i, new_capacity);
mutex_unlock(&cpu_scale_mutex);
+ schedule_work(&update_topology_flags_work);
+
return count;
}
@@ -96,6 +102,25 @@ static int register_cpu_capacity_sysctl(void)
}
subsys_initcall(register_cpu_capacity_sysctl);
+static int update_topology;
+
+int topology_update_cpu_topology(void)
+{
+ return update_topology;
+}
+
+/*
+ * Updating the sched_domains can't be done directly from cpufreq callbacks
+ * due to locking, so queue the work for later.
+ */
+static void update_topology_flags_workfn(struct work_struct *work)
+{
+ update_topology = 1;
+ rebuild_sched_domains();
+ pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
+ update_topology = 0;
+}
+
static u32 capacity_scale;
static u32 *raw_capacity;
@@ -201,6 +226,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
if (cpumask_empty(cpus_to_visit)) {
topology_normalize_cpu_scale();
+ schedule_work(&update_topology_flags_work);
free_raw_capacity();
pr_debug("cpu_capacity: parsing done\n");
schedule_work(&parsing_done_work);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 5d5b5988e88b..cf78fa6d470d 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -615,6 +615,8 @@ static int cache_add_dev(unsigned int cpu)
this_leaf = this_cpu_ci->info_list + i;
if (this_leaf->disable_sysfs)
continue;
+ if (this_leaf->type == CACHE_TYPE_NOCACHE)
+ break;
cache_groups = cache_get_attribute_groups(this_leaf);
ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
"index%1u", i);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index edfc9f0b1180..169412ee4ae8 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -480,9 +480,11 @@ re_probe:
if (ret)
goto pinctrl_bind_failed;
- ret = dma_configure(dev);
- if (ret)
- goto dma_failed;
+ if (dev->bus->dma_configure) {
+ ret = dev->bus->dma_configure(dev);
+ if (ret)
+ goto dma_failed;
+ }
if (driver_sysfs_add(dev)) {
printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
@@ -537,7 +539,7 @@ re_probe:
goto done;
probe_failed:
- dma_deconfigure(dev);
+ arch_teardown_dma_ops(dev);
dma_failed:
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
@@ -966,7 +968,7 @@ static void __device_release_driver(struct device *dev, struct device *parent)
drv->remove(dev);
device_links_driver_cleanup(dev);
- dma_deconfigure(dev);
+ arch_teardown_dma_ops(dev);
devres_release_all(dev);
dev->driver = NULL;
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index b3c0498ee433..8e9213b36e31 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -226,8 +226,11 @@ static int alloc_lookup_fw_priv(const char *fw_name,
}
tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
- if (tmp && !(opt_flags & FW_OPT_NOCACHE))
- list_add(&tmp->list, &fwc->head);
+ if (tmp) {
+ INIT_LIST_HEAD(&tmp->list);
+ if (!(opt_flags & FW_OPT_NOCACHE))
+ list_add(&tmp->list, &fwc->head);
+ }
spin_unlock(&fwc->lock);
*fw_priv = tmp;
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 60d6cc618f1c..f39a920496fb 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -321,11 +321,12 @@ void *platform_msi_get_host_data(struct irq_domain *domain)
* Returns an irqdomain for @nvec interrupts
*/
struct irq_domain *
-platform_msi_create_device_domain(struct device *dev,
- unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg,
- const struct irq_domain_ops *ops,
- void *host_data)
+__platform_msi_create_device_domain(struct device *dev,
+ unsigned int nvec,
+ bool is_tree,
+ irq_write_msi_msg_t write_msi_msg,
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
struct platform_msi_priv_data *data;
struct irq_domain *domain;
@@ -336,7 +337,8 @@ platform_msi_create_device_domain(struct device *dev,
return NULL;
data->host_data = host_data;
- domain = irq_domain_create_hierarchy(dev->msi_domain, 0, nvec,
+ domain = irq_domain_create_hierarchy(dev->msi_domain, 0,
+ is_tree ? 0 : nvec,
dev->fwnode, ops, data);
if (!domain)
goto free_priv;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index dff82a3c2caa..23cf4427f425 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1180,7 +1180,7 @@ int __init platform_bus_init(void)
}
#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
-u64 dma_get_required_mask(struct device *dev)
+static u64 dma_default_get_required_mask(struct device *dev)
{
u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
@@ -1198,6 +1198,15 @@ u64 dma_get_required_mask(struct device *dev)
}
return mask;
}
+
+u64 dma_get_required_mask(struct device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops->get_required_mask)
+ return ops->get_required_mask(dev);
+ return dma_default_get_required_mask(dev);
+}
EXPORT_SYMBOL_GPL(dma_get_required_mask);
#endif
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 4b5714199490..7f38a92b444a 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -467,6 +467,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
return -EAGAIN;
}
+ /* Default to shallowest state. */
+ if (!genpd->gov)
+ genpd->state_idx = 0;
+
if (genpd->power_off) {
int ret;
@@ -1687,6 +1691,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
ret = genpd_set_default_power_state(genpd);
if (ret)
return ret;
+ } else if (!gov) {
+ pr_warn("%s : no governor for states\n", genpd->name);
}
device_initialize(&genpd->dev);
@@ -2478,8 +2484,8 @@ static int genpd_iterate_idle_states(struct device_node *dn,
*
* Returns the device states parsed from the OF node. The memory for the states
* is allocated by this function and is the responsibility of the caller to
- * free the memory after use. If no domain idle states is found it returns
- * -EINVAL and in case of errors, a negative error code.
+ * free the memory after use. If any or zero compatible domain idle states is
+ * found it returns 0 and in case of errors, a negative error code is returned.
*/
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n)
@@ -2488,8 +2494,14 @@ int of_genpd_parse_idle_states(struct device_node *dn,
int ret;
ret = genpd_iterate_idle_states(dn, NULL);
- if (ret <= 0)
- return ret < 0 ? ret : -EINVAL;
+ if (ret < 0)
+ return ret;
+
+ if (!ret) {
+ *states = NULL;
+ *n = 0;
+ return 0;
+ }
st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
if (!st)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 3f68e2919dc5..a690fd400260 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1713,8 +1713,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (async_error) {
+ dev->power.direct_complete = false;
goto Complete;
+ }
/*
* If a device configured to wake up the system from sleep states
@@ -1726,6 +1728,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_wakeup_event(dev, 0);
if (pm_wakeup_pending()) {
+ dev->power.direct_complete = false;
async_error = -EBUSY;
goto Complete;
}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index a6bf34d6394e..a98fced9bff8 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -94,11 +94,13 @@ struct regmap {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg);
bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
const struct regmap_access_table *volatile_table;
const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *wr_noinc_table;
const struct regmap_access_table *rd_noinc_table;
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
@@ -149,7 +151,7 @@ struct regmap {
/* if set, converts bulk read to single read */
bool use_single_read;
- /* if set, converts bulk read to single read */
+ /* if set, converts bulk write to single write */
bool use_single_write;
/* if set, the device supports multi write mode */
bool can_multi_write;
@@ -183,6 +185,7 @@ bool regmap_writeable(struct regmap *map, unsigned int reg);
bool regmap_readable(struct regmap *map, unsigned int reg);
bool regmap_volatile(struct regmap *map, unsigned int reg);
bool regmap_precious(struct regmap *map, unsigned int reg);
+bool regmap_writeable_noinc(struct regmap *map, unsigned int reg);
bool regmap_readable_noinc(struct regmap *map, unsigned int reg);
int _regmap_write(struct regmap *map, unsigned int reg,
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0360a90ad6b6..4f822e087def 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -35,6 +35,16 @@
*/
#undef LOG_DEVICE
+#ifdef LOG_DEVICE
+static inline bool regmap_should_log(struct regmap *map)
+{
+ return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
+}
+#else
+static inline bool regmap_should_log(struct regmap *map) { return false; }
+#endif
+
+
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool force_write);
@@ -168,6 +178,17 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
return false;
}
+bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
+{
+ if (map->writeable_noinc_reg)
+ return map->writeable_noinc_reg(map->dev, reg);
+
+ if (map->wr_noinc_table)
+ return regmap_check_range_table(map, reg, map->wr_noinc_table);
+
+ return true;
+}
+
bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
{
if (map->readable_noinc_reg)
@@ -762,8 +783,8 @@ struct regmap *__regmap_init(struct device *dev,
map->reg_stride_order = ilog2(map->reg_stride);
else
map->reg_stride_order = -1;
- map->use_single_read = config->use_single_rw || !bus || !bus->read;
- map->use_single_write = config->use_single_rw || !bus || !bus->write;
+ map->use_single_read = config->use_single_read || !bus || !bus->read;
+ map->use_single_write = config->use_single_write || !bus || !bus->write;
map->can_multi_write = config->can_multi_write && bus && bus->write;
if (bus) {
map->max_raw_read = bus->max_raw_read;
@@ -777,11 +798,13 @@ struct regmap *__regmap_init(struct device *dev,
map->rd_table = config->rd_table;
map->volatile_table = config->volatile_table;
map->precious_table = config->precious_table;
+ map->wr_noinc_table = config->wr_noinc_table;
map->rd_noinc_table = config->rd_noinc_table;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
+ map->writeable_noinc_reg = config->writeable_noinc_reg;
map->readable_noinc_reg = config->readable_noinc_reg;
map->cache_type = config->cache_type;
@@ -1298,6 +1321,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
+ map->writeable_noinc_reg = config->writeable_noinc_reg;
map->readable_noinc_reg = config->readable_noinc_reg;
map->cache_type = config->cache_type;
@@ -1755,10 +1779,8 @@ int _regmap_write(struct regmap *map, unsigned int reg,
}
}
-#ifdef LOG_DEVICE
- if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ if (regmap_should_log(map))
dev_info(map->dev, "%x <= %x\n", reg, val);
-#endif
trace_regmap_reg_write(map, reg, val);
@@ -1898,6 +1920,69 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
EXPORT_SYMBOL_GPL(regmap_raw_write);
/**
+ * regmap_noinc_write(): Write data from a register without incrementing the
+ * register number
+ *
+ * @map: Register map to write to
+ * @reg: Register to write to
+ * @val: Pointer to data buffer
+ * @val_len: Length of output buffer in bytes.
+ *
+ * The regmap API usually assumes that bulk bus write operations will write a
+ * range of registers. Some devices have certain registers for which a write
+ * operation can write to an internal FIFO.
+ *
+ * The target register must be volatile but registers after it can be
+ * completely unrelated cacheable registers.
+ *
+ * This will attempt multiple writes as required to write val_len bytes.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_noinc_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ size_t write_len;
+ int ret;
+
+ if (!map->bus)
+ return -EINVAL;
+ if (!map->bus->write)
+ return -ENOTSUPP;
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+ if (val_len == 0)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ while (val_len) {
+ if (map->max_raw_write && map->max_raw_write < val_len)
+ write_len = map->max_raw_write;
+ else
+ write_len = val_len;
+ ret = _regmap_raw_write(map, reg, val, write_len);
+ if (ret)
+ goto out_unlock;
+ val = ((u8 *)val) + write_len;
+ val_len -= write_len;
+ }
+
+out_unlock:
+ map->unlock(map->lock_arg);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_noinc_write);
+
+/**
* regmap_field_update_bits_base() - Perform a read/modify/write cycle a
* register field.
*
@@ -2450,10 +2535,8 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
ret = map->reg_read(context, reg, val);
if (ret == 0) {
-#ifdef LOG_DEVICE
- if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ if (regmap_should_log(map))
dev_info(map->dev, "%x => %x\n", reg, *val);
-#endif
trace_regmap_reg_read(map, reg, *val);
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
deleted file mode 100644
index 581312ac375f..000000000000
--- a/drivers/block/DAC960.c
+++ /dev/null
@@ -1,7229 +0,0 @@
-/*
-
- Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
-
- Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
- Portions Copyright 2002 by Mylex (An IBM Business Unit)
-
- This program is free software; you may redistribute and/or modify it under
- the terms of the GNU General Public License Version 2 as published by the
- Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- for complete details.
-
-*/
-
-
-#define DAC960_DriverVersion "2.5.49"
-#define DAC960_DriverDate "21 Aug 2007"
-
-
-#include <linux/compiler.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-#include <linux/blkdev.h>
-#include <linux/bio.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/genhd.h>
-#include <linux/hdreg.h>
-#include <linux/blkpg.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/reboot.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include "DAC960.h"
-
-#define DAC960_GAM_MINOR 252
-
-
-static DEFINE_MUTEX(DAC960_mutex);
-static DAC960_Controller_T *DAC960_Controllers[DAC960_MaxControllers];
-static int DAC960_ControllerCount;
-static struct proc_dir_entry *DAC960_ProcDirectoryEntry;
-
-static long disk_size(DAC960_Controller_T *p, int drive_nr)
-{
- if (p->FirmwareType == DAC960_V1_Controller) {
- if (drive_nr >= p->LogicalDriveCount)
- return 0;
- return p->V1.LogicalDriveInformation[drive_nr].
- LogicalDriveSize;
- } else {
- DAC960_V2_LogicalDeviceInfo_T *i =
- p->V2.LogicalDeviceInformation[drive_nr];
- if (i == NULL)
- return 0;
- return i->ConfigurableDeviceSize;
- }
-}
-
-static int DAC960_open(struct block_device *bdev, fmode_t mode)
-{
- struct gendisk *disk = bdev->bd_disk;
- DAC960_Controller_T *p = disk->queue->queuedata;
- int drive_nr = (long)disk->private_data;
- int ret = -ENXIO;
-
- mutex_lock(&DAC960_mutex);
- if (p->FirmwareType == DAC960_V1_Controller) {
- if (p->V1.LogicalDriveInformation[drive_nr].
- LogicalDriveState == DAC960_V1_LogicalDrive_Offline)
- goto out;
- } else {
- DAC960_V2_LogicalDeviceInfo_T *i =
- p->V2.LogicalDeviceInformation[drive_nr];
- if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline)
- goto out;
- }
-
- check_disk_change(bdev);
-
- if (!get_capacity(p->disks[drive_nr]))
- goto out;
- ret = 0;
-out:
- mutex_unlock(&DAC960_mutex);
- return ret;
-}
-
-static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct gendisk *disk = bdev->bd_disk;
- DAC960_Controller_T *p = disk->queue->queuedata;
- int drive_nr = (long)disk->private_data;
-
- if (p->FirmwareType == DAC960_V1_Controller) {
- geo->heads = p->V1.GeometryTranslationHeads;
- geo->sectors = p->V1.GeometryTranslationSectors;
- geo->cylinders = p->V1.LogicalDriveInformation[drive_nr].
- LogicalDriveSize / (geo->heads * geo->sectors);
- } else {
- DAC960_V2_LogicalDeviceInfo_T *i =
- p->V2.LogicalDeviceInformation[drive_nr];
- switch (i->DriveGeometry) {
- case DAC960_V2_Geometry_128_32:
- geo->heads = 128;
- geo->sectors = 32;
- break;
- case DAC960_V2_Geometry_255_63:
- geo->heads = 255;
- geo->sectors = 63;
- break;
- default:
- DAC960_Error("Illegal Logical Device Geometry %d\n",
- p, i->DriveGeometry);
- return -EINVAL;
- }
-
- geo->cylinders = i->ConfigurableDeviceSize /
- (geo->heads * geo->sectors);
- }
-
- return 0;
-}
-
-static unsigned int DAC960_check_events(struct gendisk *disk,
- unsigned int clearing)
-{
- DAC960_Controller_T *p = disk->queue->queuedata;
- int drive_nr = (long)disk->private_data;
-
- if (!p->LogicalDriveInitiallyAccessible[drive_nr])
- return DISK_EVENT_MEDIA_CHANGE;
- return 0;
-}
-
-static int DAC960_revalidate_disk(struct gendisk *disk)
-{
- DAC960_Controller_T *p = disk->queue->queuedata;
- int unit = (long)disk->private_data;
-
- set_capacity(disk, disk_size(p, unit));
- return 0;
-}
-
-static const struct block_device_operations DAC960_BlockDeviceOperations = {
- .owner = THIS_MODULE,
- .open = DAC960_open,
- .getgeo = DAC960_getgeo,
- .check_events = DAC960_check_events,
- .revalidate_disk = DAC960_revalidate_disk,
-};
-
-
-/*
- DAC960_AnnounceDriver announces the Driver Version and Date, Author's Name,
- Copyright Notice, and Electronic Mail Address.
-*/
-
-static void DAC960_AnnounceDriver(DAC960_Controller_T *Controller)
-{
- DAC960_Announce("***** DAC960 RAID Driver Version "
- DAC960_DriverVersion " of "
- DAC960_DriverDate " *****\n", Controller);
- DAC960_Announce("Copyright 1998-2001 by Leonard N. Zubkoff "
- "<lnz@dandelion.com>\n", Controller);
-}
-
-
-/*
- DAC960_Failure prints a standardized error message, and then returns false.
-*/
-
-static bool DAC960_Failure(DAC960_Controller_T *Controller,
- unsigned char *ErrorMessage)
-{
- DAC960_Error("While configuring DAC960 PCI RAID Controller at\n",
- Controller);
- if (Controller->IO_Address == 0)
- DAC960_Error("PCI Bus %d Device %d Function %d I/O Address N/A "
- "PCI Address 0x%X\n", Controller,
- Controller->Bus, Controller->Device,
- Controller->Function, Controller->PCI_Address);
- else DAC960_Error("PCI Bus %d Device %d Function %d I/O Address "
- "0x%X PCI Address 0x%X\n", Controller,
- Controller->Bus, Controller->Device,
- Controller->Function, Controller->IO_Address,
- Controller->PCI_Address);
- DAC960_Error("%s FAILED - DETACHING\n", Controller, ErrorMessage);
- return false;
-}
-
-/*
- init_dma_loaf() and slice_dma_loaf() are helper functions for
- aggregating the dma-mapped memory for a well-known collection of
- data structures that are of different lengths.
-
- These routines don't guarantee any alignment. The caller must
- include any space needed for alignment in the sizes of the structures
- that are passed in.
- */
-
-static bool init_dma_loaf(struct pci_dev *dev, struct dma_loaf *loaf,
- size_t len)
-{
- void *cpu_addr;
- dma_addr_t dma_handle;
-
- cpu_addr = pci_alloc_consistent(dev, len, &dma_handle);
- if (cpu_addr == NULL)
- return false;
-
- loaf->cpu_free = loaf->cpu_base = cpu_addr;
- loaf->dma_free =loaf->dma_base = dma_handle;
- loaf->length = len;
- memset(cpu_addr, 0, len);
- return true;
-}
-
-static void *slice_dma_loaf(struct dma_loaf *loaf, size_t len,
- dma_addr_t *dma_handle)
-{
- void *cpu_end = loaf->cpu_free + len;
- void *cpu_addr = loaf->cpu_free;
-
- BUG_ON(cpu_end > loaf->cpu_base + loaf->length);
- *dma_handle = loaf->dma_free;
- loaf->cpu_free = cpu_end;
- loaf->dma_free += len;
- return cpu_addr;
-}
-
-static void free_dma_loaf(struct pci_dev *dev, struct dma_loaf *loaf_handle)
-{
- if (loaf_handle->cpu_base != NULL)
- pci_free_consistent(dev, loaf_handle->length,
- loaf_handle->cpu_base, loaf_handle->dma_base);
-}
-
-
-/*
- DAC960_CreateAuxiliaryStructures allocates and initializes the auxiliary
- data structures for Controller. It returns true on success and false on
- failure.
-*/
-
-static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
-{
- int CommandAllocationLength, CommandAllocationGroupSize;
- int CommandsRemaining = 0, CommandIdentifier, CommandGroupByteCount;
- void *AllocationPointer = NULL;
- void *ScatterGatherCPU = NULL;
- dma_addr_t ScatterGatherDMA;
- struct dma_pool *ScatterGatherPool;
- void *RequestSenseCPU = NULL;
- dma_addr_t RequestSenseDMA;
- struct dma_pool *RequestSensePool = NULL;
-
- if (Controller->FirmwareType == DAC960_V1_Controller)
- {
- CommandAllocationLength = offsetof(DAC960_Command_T, V1.EndMarker);
- CommandAllocationGroupSize = DAC960_V1_CommandAllocationGroupSize;
- ScatterGatherPool = dma_pool_create("DAC960_V1_ScatterGather",
- &Controller->PCIDevice->dev,
- DAC960_V1_ScatterGatherLimit * sizeof(DAC960_V1_ScatterGatherSegment_T),
- sizeof(DAC960_V1_ScatterGatherSegment_T), 0);
- if (ScatterGatherPool == NULL)
- return DAC960_Failure(Controller,
- "AUXILIARY STRUCTURE CREATION (SG)");
- Controller->ScatterGatherPool = ScatterGatherPool;
- }
- else
- {
- CommandAllocationLength = offsetof(DAC960_Command_T, V2.EndMarker);
- CommandAllocationGroupSize = DAC960_V2_CommandAllocationGroupSize;
- ScatterGatherPool = dma_pool_create("DAC960_V2_ScatterGather",
- &Controller->PCIDevice->dev,
- DAC960_V2_ScatterGatherLimit * sizeof(DAC960_V2_ScatterGatherSegment_T),
- sizeof(DAC960_V2_ScatterGatherSegment_T), 0);
- if (ScatterGatherPool == NULL)
- return DAC960_Failure(Controller,
- "AUXILIARY STRUCTURE CREATION (SG)");
- RequestSensePool = dma_pool_create("DAC960_V2_RequestSense",
- &Controller->PCIDevice->dev, sizeof(DAC960_SCSI_RequestSense_T),
- sizeof(int), 0);
- if (RequestSensePool == NULL) {
- dma_pool_destroy(ScatterGatherPool);
- return DAC960_Failure(Controller,
- "AUXILIARY STRUCTURE CREATION (SG)");
- }
- Controller->ScatterGatherPool = ScatterGatherPool;
- Controller->V2.RequestSensePool = RequestSensePool;
- }
- Controller->CommandAllocationGroupSize = CommandAllocationGroupSize;
- Controller->FreeCommands = NULL;
- for (CommandIdentifier = 1;
- CommandIdentifier <= Controller->DriverQueueDepth;
- CommandIdentifier++)
- {
- DAC960_Command_T *Command;
- if (--CommandsRemaining <= 0)
- {
- CommandsRemaining =
- Controller->DriverQueueDepth - CommandIdentifier + 1;
- if (CommandsRemaining > CommandAllocationGroupSize)
- CommandsRemaining = CommandAllocationGroupSize;
- CommandGroupByteCount =
- CommandsRemaining * CommandAllocationLength;
- AllocationPointer = kzalloc(CommandGroupByteCount, GFP_ATOMIC);
- if (AllocationPointer == NULL)
- return DAC960_Failure(Controller,
- "AUXILIARY STRUCTURE CREATION");
- }
- Command = (DAC960_Command_T *) AllocationPointer;
- AllocationPointer += CommandAllocationLength;
- Command->CommandIdentifier = CommandIdentifier;
- Command->Controller = Controller;
- Command->Next = Controller->FreeCommands;
- Controller->FreeCommands = Command;
- Controller->Commands[CommandIdentifier-1] = Command;
- ScatterGatherCPU = dma_pool_alloc(ScatterGatherPool, GFP_ATOMIC,
- &ScatterGatherDMA);
- if (ScatterGatherCPU == NULL)
- return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION");
-
- if (RequestSensePool != NULL) {
- RequestSenseCPU = dma_pool_alloc(RequestSensePool, GFP_ATOMIC,
- &RequestSenseDMA);
- if (RequestSenseCPU == NULL) {
- dma_pool_free(ScatterGatherPool, ScatterGatherCPU,
- ScatterGatherDMA);
- return DAC960_Failure(Controller,
- "AUXILIARY STRUCTURE CREATION");
- }
- }
- if (Controller->FirmwareType == DAC960_V1_Controller) {
- Command->cmd_sglist = Command->V1.ScatterList;
- Command->V1.ScatterGatherList =
- (DAC960_V1_ScatterGatherSegment_T *)ScatterGatherCPU;
- Command->V1.ScatterGatherListDMA = ScatterGatherDMA;
- sg_init_table(Command->cmd_sglist, DAC960_V1_ScatterGatherLimit);
- } else {
- Command->cmd_sglist = Command->V2.ScatterList;
- Command->V2.ScatterGatherList =
- (DAC960_V2_ScatterGatherSegment_T *)ScatterGatherCPU;
- Command->V2.ScatterGatherListDMA = ScatterGatherDMA;
- Command->V2.RequestSense =
- (DAC960_SCSI_RequestSense_T *)RequestSenseCPU;
- Command->V2.RequestSenseDMA = RequestSenseDMA;
- sg_init_table(Command->cmd_sglist, DAC960_V2_ScatterGatherLimit);
- }
- }
- return true;
-}
-
-
-/*
- DAC960_DestroyAuxiliaryStructures deallocates the auxiliary data
- structures for Controller.
-*/
-
-static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *Controller)
-{
- int i;
- struct dma_pool *ScatterGatherPool = Controller->ScatterGatherPool;
- struct dma_pool *RequestSensePool = NULL;
- void *ScatterGatherCPU;
- dma_addr_t ScatterGatherDMA;
- void *RequestSenseCPU;
- dma_addr_t RequestSenseDMA;
- DAC960_Command_T *CommandGroup = NULL;
-
-
- if (Controller->FirmwareType == DAC960_V2_Controller)
- RequestSensePool = Controller->V2.RequestSensePool;
-
- Controller->FreeCommands = NULL;
- for (i = 0; i < Controller->DriverQueueDepth; i++)
- {
- DAC960_Command_T *Command = Controller->Commands[i];
-
- if (Command == NULL)
- continue;
-
- if (Controller->FirmwareType == DAC960_V1_Controller) {
- ScatterGatherCPU = (void *)Command->V1.ScatterGatherList;
- ScatterGatherDMA = Command->V1.ScatterGatherListDMA;
- RequestSenseCPU = NULL;
- RequestSenseDMA = (dma_addr_t)0;
- } else {
- ScatterGatherCPU = (void *)Command->V2.ScatterGatherList;
- ScatterGatherDMA = Command->V2.ScatterGatherListDMA;
- RequestSenseCPU = (void *)Command->V2.RequestSense;
- RequestSenseDMA = Command->V2.RequestSenseDMA;
- }
- if (ScatterGatherCPU != NULL)
- dma_pool_free(ScatterGatherPool, ScatterGatherCPU, ScatterGatherDMA);
- if (RequestSenseCPU != NULL)
- dma_pool_free(RequestSensePool, RequestSenseCPU, RequestSenseDMA);
-
- if ((Command->CommandIdentifier
- % Controller->CommandAllocationGroupSize) == 1) {
- /*
- * We can't free the group of commands until all of the
- * request sense and scatter gather dma structures are free.
- * Remember the beginning of the group, but don't free it
- * until we've reached the beginning of the next group.
- */
- kfree(CommandGroup);
- CommandGroup = Command;
- }
- Controller->Commands[i] = NULL;
- }
- kfree(CommandGroup);
-
- if (Controller->CombinedStatusBuffer != NULL)
- {
- kfree(Controller->CombinedStatusBuffer);
- Controller->CombinedStatusBuffer = NULL;
- Controller->CurrentStatusBuffer = NULL;
- }
-
- dma_pool_destroy(ScatterGatherPool);
- if (Controller->FirmwareType == DAC960_V1_Controller)
- return;
-
- dma_pool_destroy(RequestSensePool);
-
- for (i = 0; i < DAC960_MaxLogicalDrives; i++) {
- kfree(Controller->V2.LogicalDeviceInformation[i]);
- Controller->V2.LogicalDeviceInformation[i] = NULL;
- }
-
- for (i = 0; i < DAC960_V2_MaxPhysicalDevices; i++)
- {
- kfree(Controller->V2.PhysicalDeviceInformation[i]);
- Controller->V2.PhysicalDeviceInformation[i] = NULL;
- kfree(Controller->V2.InquiryUnitSerialNumber[i]);
- Controller->V2.InquiryUnitSerialNumber[i] = NULL;
- }
-}
-
-
-/*
- DAC960_V1_ClearCommand clears critical fields of Command for DAC960 V1
- Firmware Controllers.
-*/
-
-static inline void DAC960_V1_ClearCommand(DAC960_Command_T *Command)
-{
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- memset(CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T));
- Command->V1.CommandStatus = 0;
-}
-
-
-/*
- DAC960_V2_ClearCommand clears critical fields of Command for DAC960 V2
- Firmware Controllers.
-*/
-
-static inline void DAC960_V2_ClearCommand(DAC960_Command_T *Command)
-{
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- memset(CommandMailbox, 0, sizeof(DAC960_V2_CommandMailbox_T));
- Command->V2.CommandStatus = 0;
-}
-
-
-/*
- DAC960_AllocateCommand allocates a Command structure from Controller's
- free list. During driver initialization, a special initialization command
- has been placed on the free list to guarantee that command allocation can
- never fail.
-*/
-
-static inline DAC960_Command_T *DAC960_AllocateCommand(DAC960_Controller_T
- *Controller)
-{
- DAC960_Command_T *Command = Controller->FreeCommands;
- if (Command == NULL) return NULL;
- Controller->FreeCommands = Command->Next;
- Command->Next = NULL;
- return Command;
-}
-
-
-/*
- DAC960_DeallocateCommand deallocates Command, returning it to Controller's
- free list.
-*/
-
-static inline void DAC960_DeallocateCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
-
- Command->Request = NULL;
- Command->Next = Controller->FreeCommands;
- Controller->FreeCommands = Command;
-}
-
-
-/*
- DAC960_WaitForCommand waits for a wake_up on Controller's Command Wait Queue.
-*/
-
-static void DAC960_WaitForCommand(DAC960_Controller_T *Controller)
-{
- spin_unlock_irq(&Controller->queue_lock);
- __wait_event(Controller->CommandWaitQueue, Controller->FreeCommands);
- spin_lock_irq(&Controller->queue_lock);
-}
-
-/*
- DAC960_GEM_QueueCommand queues Command for DAC960 GEM Series Controllers.
-*/
-
-static void DAC960_GEM_QueueCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandMailbox_T *NextCommandMailbox =
- Controller->V2.NextCommandMailbox;
-
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_GEM_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
-
- if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V2.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_GEM_MemoryMailboxNewCommand(ControllerBaseAddress);
-
- Controller->V2.PreviousCommandMailbox2 =
- Controller->V2.PreviousCommandMailbox1;
- Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox;
-
- if (++NextCommandMailbox > Controller->V2.LastCommandMailbox)
- NextCommandMailbox = Controller->V2.FirstCommandMailbox;
-
- Controller->V2.NextCommandMailbox = NextCommandMailbox;
-}
-
-/*
- DAC960_BA_QueueCommand queues Command for DAC960 BA Series Controllers.
-*/
-
-static void DAC960_BA_QueueCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandMailbox_T *NextCommandMailbox =
- Controller->V2.NextCommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_BA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
- if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V2.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_BA_MemoryMailboxNewCommand(ControllerBaseAddress);
- Controller->V2.PreviousCommandMailbox2 =
- Controller->V2.PreviousCommandMailbox1;
- Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox;
- if (++NextCommandMailbox > Controller->V2.LastCommandMailbox)
- NextCommandMailbox = Controller->V2.FirstCommandMailbox;
- Controller->V2.NextCommandMailbox = NextCommandMailbox;
-}
-
-
-/*
- DAC960_LP_QueueCommand queues Command for DAC960 LP Series Controllers.
-*/
-
-static void DAC960_LP_QueueCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandMailbox_T *NextCommandMailbox =
- Controller->V2.NextCommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_LP_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
- if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V2.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_LP_MemoryMailboxNewCommand(ControllerBaseAddress);
- Controller->V2.PreviousCommandMailbox2 =
- Controller->V2.PreviousCommandMailbox1;
- Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox;
- if (++NextCommandMailbox > Controller->V2.LastCommandMailbox)
- NextCommandMailbox = Controller->V2.FirstCommandMailbox;
- Controller->V2.NextCommandMailbox = NextCommandMailbox;
-}
-
-
-/*
- DAC960_LA_QueueCommandDualMode queues Command for DAC960 LA Series
- Controllers with Dual Mode Firmware.
-*/
-
-static void DAC960_LA_QueueCommandDualMode(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandMailbox_T *NextCommandMailbox =
- Controller->V1.NextCommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_LA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
- if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_LA_MemoryMailboxNewCommand(ControllerBaseAddress);
- Controller->V1.PreviousCommandMailbox2 =
- Controller->V1.PreviousCommandMailbox1;
- Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
- if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
- NextCommandMailbox = Controller->V1.FirstCommandMailbox;
- Controller->V1.NextCommandMailbox = NextCommandMailbox;
-}
-
-
-/*
- DAC960_LA_QueueCommandSingleMode queues Command for DAC960 LA Series
- Controllers with Single Mode Firmware.
-*/
-
-static void DAC960_LA_QueueCommandSingleMode(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandMailbox_T *NextCommandMailbox =
- Controller->V1.NextCommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_LA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
- if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_LA_HardwareMailboxNewCommand(ControllerBaseAddress);
- Controller->V1.PreviousCommandMailbox2 =
- Controller->V1.PreviousCommandMailbox1;
- Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
- if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
- NextCommandMailbox = Controller->V1.FirstCommandMailbox;
- Controller->V1.NextCommandMailbox = NextCommandMailbox;
-}
-
-
-/*
- DAC960_PG_QueueCommandDualMode queues Command for DAC960 PG Series
- Controllers with Dual Mode Firmware.
-*/
-
-static void DAC960_PG_QueueCommandDualMode(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandMailbox_T *NextCommandMailbox =
- Controller->V1.NextCommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_PG_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
- if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_PG_MemoryMailboxNewCommand(ControllerBaseAddress);
- Controller->V1.PreviousCommandMailbox2 =
- Controller->V1.PreviousCommandMailbox1;
- Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
- if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
- NextCommandMailbox = Controller->V1.FirstCommandMailbox;
- Controller->V1.NextCommandMailbox = NextCommandMailbox;
-}
-
-
-/*
- DAC960_PG_QueueCommandSingleMode queues Command for DAC960 PG Series
- Controllers with Single Mode Firmware.
-*/
-
-static void DAC960_PG_QueueCommandSingleMode(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandMailbox_T *NextCommandMailbox =
- Controller->V1.NextCommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_PG_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
- if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_PG_HardwareMailboxNewCommand(ControllerBaseAddress);
- Controller->V1.PreviousCommandMailbox2 =
- Controller->V1.PreviousCommandMailbox1;
- Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
- if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
- NextCommandMailbox = Controller->V1.FirstCommandMailbox;
- Controller->V1.NextCommandMailbox = NextCommandMailbox;
-}
-
-
-/*
- DAC960_PD_QueueCommand queues Command for DAC960 PD Series Controllers.
-*/
-
-static void DAC960_PD_QueueCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- while (DAC960_PD_MailboxFullP(ControllerBaseAddress))
- udelay(1);
- DAC960_PD_WriteCommandMailbox(ControllerBaseAddress, CommandMailbox);
- DAC960_PD_NewCommand(ControllerBaseAddress);
-}
-
-
-/*
- DAC960_P_QueueCommand queues Command for DAC960 P Series Controllers.
-*/
-
-static void DAC960_P_QueueCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- switch (CommandMailbox->Common.CommandOpcode)
- {
- case DAC960_V1_Enquiry:
- CommandMailbox->Common.CommandOpcode = DAC960_V1_Enquiry_Old;
- break;
- case DAC960_V1_GetDeviceState:
- CommandMailbox->Common.CommandOpcode = DAC960_V1_GetDeviceState_Old;
- break;
- case DAC960_V1_Read:
- CommandMailbox->Common.CommandOpcode = DAC960_V1_Read_Old;
- DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
- break;
- case DAC960_V1_Write:
- CommandMailbox->Common.CommandOpcode = DAC960_V1_Write_Old;
- DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
- break;
- case DAC960_V1_ReadWithScatterGather:
- CommandMailbox->Common.CommandOpcode =
- DAC960_V1_ReadWithScatterGather_Old;
- DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
- break;
- case DAC960_V1_WriteWithScatterGather:
- CommandMailbox->Common.CommandOpcode =
- DAC960_V1_WriteWithScatterGather_Old;
- DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
- break;
- default:
- break;
- }
- while (DAC960_PD_MailboxFullP(ControllerBaseAddress))
- udelay(1);
- DAC960_PD_WriteCommandMailbox(ControllerBaseAddress, CommandMailbox);
- DAC960_PD_NewCommand(ControllerBaseAddress);
-}
-
-
-/*
- DAC960_ExecuteCommand executes Command and waits for completion.
-*/
-
-static void DAC960_ExecuteCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DECLARE_COMPLETION_ONSTACK(Completion);
- unsigned long flags;
- Command->Completion = &Completion;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_QueueCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
-
- if (in_interrupt())
- return;
- wait_for_completion(&Completion);
-}
-
-
-/*
- DAC960_V1_ExecuteType3 executes a DAC960 V1 Firmware Controller Type 3
- Command and waits for completion. It returns true on success and false
- on failure.
-*/
-
-static bool DAC960_V1_ExecuteType3(DAC960_Controller_T *Controller,
- DAC960_V1_CommandOpcode_T CommandOpcode,
- dma_addr_t DataDMA)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandStatus_T CommandStatus;
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->Type3.CommandOpcode = CommandOpcode;
- CommandMailbox->Type3.BusAddress = DataDMA;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V1.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V1_NormalCompletion);
-}
-
-
-/*
- DAC960_V1_ExecuteTypeB executes a DAC960 V1 Firmware Controller Type 3B
- Command and waits for completion. It returns true on success and false
- on failure.
-*/
-
-static bool DAC960_V1_ExecuteType3B(DAC960_Controller_T *Controller,
- DAC960_V1_CommandOpcode_T CommandOpcode,
- unsigned char CommandOpcode2,
- dma_addr_t DataDMA)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandStatus_T CommandStatus;
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->Type3B.CommandOpcode = CommandOpcode;
- CommandMailbox->Type3B.CommandOpcode2 = CommandOpcode2;
- CommandMailbox->Type3B.BusAddress = DataDMA;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V1.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V1_NormalCompletion);
-}
-
-
-/*
- DAC960_V1_ExecuteType3D executes a DAC960 V1 Firmware Controller Type 3D
- Command and waits for completion. It returns true on success and false
- on failure.
-*/
-
-static bool DAC960_V1_ExecuteType3D(DAC960_Controller_T *Controller,
- DAC960_V1_CommandOpcode_T CommandOpcode,
- unsigned char Channel,
- unsigned char TargetID,
- dma_addr_t DataDMA)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandStatus_T CommandStatus;
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->Type3D.CommandOpcode = CommandOpcode;
- CommandMailbox->Type3D.Channel = Channel;
- CommandMailbox->Type3D.TargetID = TargetID;
- CommandMailbox->Type3D.BusAddress = DataDMA;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V1.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V1_NormalCompletion);
-}
-
-
-/*
- DAC960_V2_GeneralInfo executes a DAC960 V2 Firmware General Information
- Reading IOCTL Command and waits for completion. It returns true on success
- and false on failure.
-
- Return data in The controller's HealthStatusBuffer, which is dma-able memory
-*/
-
-static bool DAC960_V2_GeneralInfo(DAC960_Controller_T *Controller)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->Common.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->Common.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->Common.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->Common.DataTransferSize = sizeof(DAC960_V2_HealthStatusBuffer_T);
- CommandMailbox->Common.IOCTL_Opcode = DAC960_V2_GetHealthStatus;
- CommandMailbox->Common.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.HealthStatusBufferDMA;
- CommandMailbox->Common.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->Common.DataTransferSize;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V2_ControllerInfo executes a DAC960 V2 Firmware Controller
- Information Reading IOCTL Command and waits for completion. It returns
- true on success and false on failure.
-
- Data is returned in the controller's V2.NewControllerInformation dma-able
- memory buffer.
-*/
-
-static bool DAC960_V2_NewControllerInfo(DAC960_Controller_T *Controller)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->ControllerInfo.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->ControllerInfo.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->ControllerInfo.DataTransferSize = sizeof(DAC960_V2_ControllerInfo_T);
- CommandMailbox->ControllerInfo.ControllerNumber = 0;
- CommandMailbox->ControllerInfo.IOCTL_Opcode = DAC960_V2_GetControllerInfo;
- CommandMailbox->ControllerInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewControllerInformationDMA;
- CommandMailbox->ControllerInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->ControllerInfo.DataTransferSize;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V2_LogicalDeviceInfo executes a DAC960 V2 Firmware Controller Logical
- Device Information Reading IOCTL Command and waits for completion. It
- returns true on success and false on failure.
-
- Data is returned in the controller's V2.NewLogicalDeviceInformation
-*/
-
-static bool DAC960_V2_NewLogicalDeviceInfo(DAC960_Controller_T *Controller,
- unsigned short LogicalDeviceNumber)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
-
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->LogicalDeviceInfo.CommandOpcode =
- DAC960_V2_IOCTL;
- CommandMailbox->LogicalDeviceInfo.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->LogicalDeviceInfo.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->LogicalDeviceInfo.DataTransferSize =
- sizeof(DAC960_V2_LogicalDeviceInfo_T);
- CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
- LogicalDeviceNumber;
- CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode = DAC960_V2_GetLogicalDeviceInfoValid;
- CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewLogicalDeviceInformationDMA;
- CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->LogicalDeviceInfo.DataTransferSize;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V2_PhysicalDeviceInfo executes a DAC960 V2 Firmware Controller "Read
- Physical Device Information" IOCTL Command and waits for completion. It
- returns true on success and false on failure.
-
- The Channel, TargetID, LogicalUnit arguments should be 0 the first time
- this function is called for a given controller. This will return data
- for the "first" device on that controller. The returned data includes a
- Channel, TargetID, LogicalUnit that can be passed in to this routine to
- get data for the NEXT device on that controller.
-
- Data is stored in the controller's V2.NewPhysicalDeviceInfo dma-able
- memory buffer.
-
-*/
-
-static bool DAC960_V2_NewPhysicalDeviceInfo(DAC960_Controller_T *Controller,
- unsigned char Channel,
- unsigned char TargetID,
- unsigned char LogicalUnit)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
-
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->PhysicalDeviceInfo.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->PhysicalDeviceInfo.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->PhysicalDeviceInfo.DataTransferSize =
- sizeof(DAC960_V2_PhysicalDeviceInfo_T);
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.LogicalUnit = LogicalUnit;
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = TargetID;
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel = Channel;
- CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode =
- DAC960_V2_GetPhysicalDeviceInfoValid;
- CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewPhysicalDeviceInformationDMA;
- CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->PhysicalDeviceInfo.DataTransferSize;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-static void DAC960_V2_ConstructNewUnitSerialNumber(
- DAC960_Controller_T *Controller,
- DAC960_V2_CommandMailbox_T *CommandMailbox, int Channel, int TargetID,
- int LogicalUnit)
-{
- CommandMailbox->SCSI_10.CommandOpcode = DAC960_V2_SCSI_10_Passthru;
- CommandMailbox->SCSI_10.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->SCSI_10.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->SCSI_10.DataTransferSize =
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
- CommandMailbox->SCSI_10.PhysicalDevice.LogicalUnit = LogicalUnit;
- CommandMailbox->SCSI_10.PhysicalDevice.TargetID = TargetID;
- CommandMailbox->SCSI_10.PhysicalDevice.Channel = Channel;
- CommandMailbox->SCSI_10.CDBLength = 6;
- CommandMailbox->SCSI_10.SCSI_CDB[0] = 0x12; /* INQUIRY */
- CommandMailbox->SCSI_10.SCSI_CDB[1] = 1; /* EVPD = 1 */
- CommandMailbox->SCSI_10.SCSI_CDB[2] = 0x80; /* Page Code */
- CommandMailbox->SCSI_10.SCSI_CDB[3] = 0; /* Reserved */
- CommandMailbox->SCSI_10.SCSI_CDB[4] =
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
- CommandMailbox->SCSI_10.SCSI_CDB[5] = 0; /* Control */
- CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewInquiryUnitSerialNumberDMA;
- CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->SCSI_10.DataTransferSize;
-}
-
-
-/*
- DAC960_V2_NewUnitSerialNumber executes an SCSI pass-through
- Inquiry command to a SCSI device identified by Channel number,
- Target id, Logical Unit Number. This function Waits for completion
- of the command.
-
- The return data includes Unit Serial Number information for the
- specified device.
-
- Data is stored in the controller's V2.NewPhysicalDeviceInfo dma-able
- memory buffer.
-*/
-
-static bool DAC960_V2_NewInquiryUnitSerialNumber(DAC960_Controller_T *Controller,
- int Channel, int TargetID, int LogicalUnit)
-{
- DAC960_Command_T *Command;
- DAC960_V2_CommandMailbox_T *CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
-
- Command = DAC960_AllocateCommand(Controller);
- CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
-
- DAC960_V2_ConstructNewUnitSerialNumber(Controller, CommandMailbox,
- Channel, TargetID, LogicalUnit);
-
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V2_DeviceOperation executes a DAC960 V2 Firmware Controller Device
- Operation IOCTL Command and waits for completion. It returns true on
- success and false on failure.
-*/
-
-static bool DAC960_V2_DeviceOperation(DAC960_Controller_T *Controller,
- DAC960_V2_IOCTL_Opcode_T IOCTL_Opcode,
- DAC960_V2_OperationDevice_T
- OperationDevice)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->DeviceOperation.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->DeviceOperation.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->DeviceOperation.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->DeviceOperation.IOCTL_Opcode = IOCTL_Opcode;
- CommandMailbox->DeviceOperation.OperationDevice = OperationDevice;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V1_EnableMemoryMailboxInterface enables the Memory Mailbox Interface
- for DAC960 V1 Firmware Controllers.
-
- PD and P controller types have no memory mailbox, but still need the
- other dma mapped memory.
-*/
-
-static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T
- *Controller)
-{
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_HardwareType_T hw_type = Controller->HardwareType;
- struct pci_dev *PCI_Device = Controller->PCIDevice;
- struct dma_loaf *DmaPages = &Controller->DmaPages;
- size_t DmaPagesSize;
- size_t CommandMailboxesSize;
- size_t StatusMailboxesSize;
-
- DAC960_V1_CommandMailbox_T *CommandMailboxesMemory;
- dma_addr_t CommandMailboxesMemoryDMA;
-
- DAC960_V1_StatusMailbox_T *StatusMailboxesMemory;
- dma_addr_t StatusMailboxesMemoryDMA;
-
- DAC960_V1_CommandMailbox_T CommandMailbox;
- DAC960_V1_CommandStatus_T CommandStatus;
- int TimeoutCounter;
- int i;
-
- memset(&CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T));
-
- if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32)))
- return DAC960_Failure(Controller, "DMA mask out of range");
-
- if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) {
- CommandMailboxesSize = 0;
- StatusMailboxesSize = 0;
- } else {
- CommandMailboxesSize = DAC960_V1_CommandMailboxCount * sizeof(DAC960_V1_CommandMailbox_T);
- StatusMailboxesSize = DAC960_V1_StatusMailboxCount * sizeof(DAC960_V1_StatusMailbox_T);
- }
- DmaPagesSize = CommandMailboxesSize + StatusMailboxesSize +
- sizeof(DAC960_V1_DCDB_T) + sizeof(DAC960_V1_Enquiry_T) +
- sizeof(DAC960_V1_ErrorTable_T) + sizeof(DAC960_V1_EventLogEntry_T) +
- sizeof(DAC960_V1_RebuildProgress_T) +
- sizeof(DAC960_V1_LogicalDriveInformationArray_T) +
- sizeof(DAC960_V1_BackgroundInitializationStatus_T) +
- sizeof(DAC960_V1_DeviceState_T) + sizeof(DAC960_SCSI_Inquiry_T) +
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
-
- if (!init_dma_loaf(PCI_Device, DmaPages, DmaPagesSize))
- return false;
-
-
- if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller))
- goto skip_mailboxes;
-
- CommandMailboxesMemory = slice_dma_loaf(DmaPages,
- CommandMailboxesSize, &CommandMailboxesMemoryDMA);
-
- /* These are the base addresses for the command memory mailbox array */
- Controller->V1.FirstCommandMailbox = CommandMailboxesMemory;
- Controller->V1.FirstCommandMailboxDMA = CommandMailboxesMemoryDMA;
-
- CommandMailboxesMemory += DAC960_V1_CommandMailboxCount - 1;
- Controller->V1.LastCommandMailbox = CommandMailboxesMemory;
- Controller->V1.NextCommandMailbox = Controller->V1.FirstCommandMailbox;
- Controller->V1.PreviousCommandMailbox1 = Controller->V1.LastCommandMailbox;
- Controller->V1.PreviousCommandMailbox2 =
- Controller->V1.LastCommandMailbox - 1;
-
- /* These are the base addresses for the status memory mailbox array */
- StatusMailboxesMemory = slice_dma_loaf(DmaPages,
- StatusMailboxesSize, &StatusMailboxesMemoryDMA);
-
- Controller->V1.FirstStatusMailbox = StatusMailboxesMemory;
- Controller->V1.FirstStatusMailboxDMA = StatusMailboxesMemoryDMA;
- StatusMailboxesMemory += DAC960_V1_StatusMailboxCount - 1;
- Controller->V1.LastStatusMailbox = StatusMailboxesMemory;
- Controller->V1.NextStatusMailbox = Controller->V1.FirstStatusMailbox;
-
-skip_mailboxes:
- Controller->V1.MonitoringDCDB = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_DCDB_T),
- &Controller->V1.MonitoringDCDB_DMA);
-
- Controller->V1.NewEnquiry = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_Enquiry_T),
- &Controller->V1.NewEnquiryDMA);
-
- Controller->V1.NewErrorTable = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_ErrorTable_T),
- &Controller->V1.NewErrorTableDMA);
-
- Controller->V1.EventLogEntry = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_EventLogEntry_T),
- &Controller->V1.EventLogEntryDMA);
-
- Controller->V1.RebuildProgress = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_RebuildProgress_T),
- &Controller->V1.RebuildProgressDMA);
-
- Controller->V1.NewLogicalDriveInformation = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_LogicalDriveInformationArray_T),
- &Controller->V1.NewLogicalDriveInformationDMA);
-
- Controller->V1.BackgroundInitializationStatus = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_BackgroundInitializationStatus_T),
- &Controller->V1.BackgroundInitializationStatusDMA);
-
- Controller->V1.NewDeviceState = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_DeviceState_T),
- &Controller->V1.NewDeviceStateDMA);
-
- Controller->V1.NewInquiryStandardData = slice_dma_loaf(DmaPages,
- sizeof(DAC960_SCSI_Inquiry_T),
- &Controller->V1.NewInquiryStandardDataDMA);
-
- Controller->V1.NewInquiryUnitSerialNumber = slice_dma_loaf(DmaPages,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
- &Controller->V1.NewInquiryUnitSerialNumberDMA);
-
- if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller))
- return true;
-
- /* Enable the Memory Mailbox Interface. */
- Controller->V1.DualModeMemoryMailboxInterface = true;
- CommandMailbox.TypeX.CommandOpcode = 0x2B;
- CommandMailbox.TypeX.CommandIdentifier = 0;
- CommandMailbox.TypeX.CommandOpcode2 = 0x14;
- CommandMailbox.TypeX.CommandMailboxesBusAddress =
- Controller->V1.FirstCommandMailboxDMA;
- CommandMailbox.TypeX.StatusMailboxesBusAddress =
- Controller->V1.FirstStatusMailboxDMA;
-#define TIMEOUT_COUNT 1000000
-
- for (i = 0; i < 2; i++)
- switch (Controller->HardwareType)
- {
- case DAC960_LA_Controller:
- TimeoutCounter = TIMEOUT_COUNT;
- while (--TimeoutCounter >= 0)
- {
- if (!DAC960_LA_HardwareMailboxFullP(ControllerBaseAddress))
- break;
- udelay(10);
- }
- if (TimeoutCounter < 0) return false;
- DAC960_LA_WriteHardwareMailbox(ControllerBaseAddress, &CommandMailbox);
- DAC960_LA_HardwareMailboxNewCommand(ControllerBaseAddress);
- TimeoutCounter = TIMEOUT_COUNT;
- while (--TimeoutCounter >= 0)
- {
- if (DAC960_LA_HardwareMailboxStatusAvailableP(
- ControllerBaseAddress))
- break;
- udelay(10);
- }
- if (TimeoutCounter < 0) return false;
- CommandStatus = DAC960_LA_ReadStatusRegister(ControllerBaseAddress);
- DAC960_LA_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
- DAC960_LA_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
- if (CommandStatus == DAC960_V1_NormalCompletion) return true;
- Controller->V1.DualModeMemoryMailboxInterface = false;
- CommandMailbox.TypeX.CommandOpcode2 = 0x10;
- break;
- case DAC960_PG_Controller:
- TimeoutCounter = TIMEOUT_COUNT;
- while (--TimeoutCounter >= 0)
- {
- if (!DAC960_PG_HardwareMailboxFullP(ControllerBaseAddress))
- break;
- udelay(10);
- }
- if (TimeoutCounter < 0) return false;
- DAC960_PG_WriteHardwareMailbox(ControllerBaseAddress, &CommandMailbox);
- DAC960_PG_HardwareMailboxNewCommand(ControllerBaseAddress);
-
- TimeoutCounter = TIMEOUT_COUNT;
- while (--TimeoutCounter >= 0)
- {
- if (DAC960_PG_HardwareMailboxStatusAvailableP(
- ControllerBaseAddress))
- break;
- udelay(10);
- }
- if (TimeoutCounter < 0) return false;
- CommandStatus = DAC960_PG_ReadStatusRegister(ControllerBaseAddress);
- DAC960_PG_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
- DAC960_PG_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
- if (CommandStatus == DAC960_V1_NormalCompletion) return true;
- Controller->V1.DualModeMemoryMailboxInterface = false;
- CommandMailbox.TypeX.CommandOpcode2 = 0x10;
- break;
- default:
- DAC960_Failure(Controller, "Unknown Controller Type\n");
- break;
- }
- return false;
-}
-
-
-/*
- DAC960_V2_EnableMemoryMailboxInterface enables the Memory Mailbox Interface
- for DAC960 V2 Firmware Controllers.
-
- Aggregate the space needed for the controller's memory mailbox and
- the other data structures that will be targets of dma transfers with
- the controller. Allocate a dma-mapped region of memory to hold these
- structures. Then, save CPU pointers and dma_addr_t values to reference
- the structures that are contained in that region.
-*/
-
-static bool DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T
- *Controller)
-{
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- struct pci_dev *PCI_Device = Controller->PCIDevice;
- struct dma_loaf *DmaPages = &Controller->DmaPages;
- size_t DmaPagesSize;
- size_t CommandMailboxesSize;
- size_t StatusMailboxesSize;
-
- DAC960_V2_CommandMailbox_T *CommandMailboxesMemory;
- dma_addr_t CommandMailboxesMemoryDMA;
-
- DAC960_V2_StatusMailbox_T *StatusMailboxesMemory;
- dma_addr_t StatusMailboxesMemoryDMA;
-
- DAC960_V2_CommandMailbox_T *CommandMailbox;
- dma_addr_t CommandMailboxDMA;
- DAC960_V2_CommandStatus_T CommandStatus;
-
- if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(64)) &&
- pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32)))
- return DAC960_Failure(Controller, "DMA mask out of range");
-
- /* This is a temporary dma mapping, used only in the scope of this function */
- CommandMailbox = pci_alloc_consistent(PCI_Device,
- sizeof(DAC960_V2_CommandMailbox_T), &CommandMailboxDMA);
- if (CommandMailbox == NULL)
- return false;
-
- CommandMailboxesSize = DAC960_V2_CommandMailboxCount * sizeof(DAC960_V2_CommandMailbox_T);
- StatusMailboxesSize = DAC960_V2_StatusMailboxCount * sizeof(DAC960_V2_StatusMailbox_T);
- DmaPagesSize =
- CommandMailboxesSize + StatusMailboxesSize +
- sizeof(DAC960_V2_HealthStatusBuffer_T) +
- sizeof(DAC960_V2_ControllerInfo_T) +
- sizeof(DAC960_V2_LogicalDeviceInfo_T) +
- sizeof(DAC960_V2_PhysicalDeviceInfo_T) +
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T) +
- sizeof(DAC960_V2_Event_T) +
- sizeof(DAC960_V2_PhysicalToLogicalDevice_T);
-
- if (!init_dma_loaf(PCI_Device, DmaPages, DmaPagesSize)) {
- pci_free_consistent(PCI_Device, sizeof(DAC960_V2_CommandMailbox_T),
- CommandMailbox, CommandMailboxDMA);
- return false;
- }
-
- CommandMailboxesMemory = slice_dma_loaf(DmaPages,
- CommandMailboxesSize, &CommandMailboxesMemoryDMA);
-
- /* These are the base addresses for the command memory mailbox array */
- Controller->V2.FirstCommandMailbox = CommandMailboxesMemory;
- Controller->V2.FirstCommandMailboxDMA = CommandMailboxesMemoryDMA;
-
- CommandMailboxesMemory += DAC960_V2_CommandMailboxCount - 1;
- Controller->V2.LastCommandMailbox = CommandMailboxesMemory;
- Controller->V2.NextCommandMailbox = Controller->V2.FirstCommandMailbox;
- Controller->V2.PreviousCommandMailbox1 = Controller->V2.LastCommandMailbox;
- Controller->V2.PreviousCommandMailbox2 =
- Controller->V2.LastCommandMailbox - 1;
-
- /* These are the base addresses for the status memory mailbox array */
- StatusMailboxesMemory = slice_dma_loaf(DmaPages,
- StatusMailboxesSize, &StatusMailboxesMemoryDMA);
-
- Controller->V2.FirstStatusMailbox = StatusMailboxesMemory;
- Controller->V2.FirstStatusMailboxDMA = StatusMailboxesMemoryDMA;
- StatusMailboxesMemory += DAC960_V2_StatusMailboxCount - 1;
- Controller->V2.LastStatusMailbox = StatusMailboxesMemory;
- Controller->V2.NextStatusMailbox = Controller->V2.FirstStatusMailbox;
-
- Controller->V2.HealthStatusBuffer = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V2_HealthStatusBuffer_T),
- &Controller->V2.HealthStatusBufferDMA);
-
- Controller->V2.NewControllerInformation = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V2_ControllerInfo_T),
- &Controller->V2.NewControllerInformationDMA);
-
- Controller->V2.NewLogicalDeviceInformation = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V2_LogicalDeviceInfo_T),
- &Controller->V2.NewLogicalDeviceInformationDMA);
-
- Controller->V2.NewPhysicalDeviceInformation = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V2_PhysicalDeviceInfo_T),
- &Controller->V2.NewPhysicalDeviceInformationDMA);
-
- Controller->V2.NewInquiryUnitSerialNumber = slice_dma_loaf(DmaPages,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
- &Controller->V2.NewInquiryUnitSerialNumberDMA);
-
- Controller->V2.Event = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V2_Event_T),
- &Controller->V2.EventDMA);
-
- Controller->V2.PhysicalToLogicalDevice = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V2_PhysicalToLogicalDevice_T),
- &Controller->V2.PhysicalToLogicalDeviceDMA);
-
- /*
- Enable the Memory Mailbox Interface.
-
- I don't know why we can't just use one of the memory mailboxes
- we just allocated to do this, instead of using this temporary one.
- Try this change later.
- */
- memset(CommandMailbox, 0, sizeof(DAC960_V2_CommandMailbox_T));
- CommandMailbox->SetMemoryMailbox.CommandIdentifier = 1;
- CommandMailbox->SetMemoryMailbox.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->SetMemoryMailbox.CommandControlBits.NoAutoRequestSense = true;
- CommandMailbox->SetMemoryMailbox.FirstCommandMailboxSizeKB =
- (DAC960_V2_CommandMailboxCount * sizeof(DAC960_V2_CommandMailbox_T)) >> 10;
- CommandMailbox->SetMemoryMailbox.FirstStatusMailboxSizeKB =
- (DAC960_V2_StatusMailboxCount * sizeof(DAC960_V2_StatusMailbox_T)) >> 10;
- CommandMailbox->SetMemoryMailbox.SecondCommandMailboxSizeKB = 0;
- CommandMailbox->SetMemoryMailbox.SecondStatusMailboxSizeKB = 0;
- CommandMailbox->SetMemoryMailbox.RequestSenseSize = 0;
- CommandMailbox->SetMemoryMailbox.IOCTL_Opcode = DAC960_V2_SetMemoryMailbox;
- CommandMailbox->SetMemoryMailbox.HealthStatusBufferSizeKB = 1;
- CommandMailbox->SetMemoryMailbox.HealthStatusBufferBusAddress =
- Controller->V2.HealthStatusBufferDMA;
- CommandMailbox->SetMemoryMailbox.FirstCommandMailboxBusAddress =
- Controller->V2.FirstCommandMailboxDMA;
- CommandMailbox->SetMemoryMailbox.FirstStatusMailboxBusAddress =
- Controller->V2.FirstStatusMailboxDMA;
- switch (Controller->HardwareType)
- {
- case DAC960_GEM_Controller:
- while (DAC960_GEM_HardwareMailboxFullP(ControllerBaseAddress))
- udelay(1);
- DAC960_GEM_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA);
- DAC960_GEM_HardwareMailboxNewCommand(ControllerBaseAddress);
- while (!DAC960_GEM_HardwareMailboxStatusAvailableP(ControllerBaseAddress))
- udelay(1);
- CommandStatus = DAC960_GEM_ReadCommandStatus(ControllerBaseAddress);
- DAC960_GEM_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
- DAC960_GEM_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
- break;
- case DAC960_BA_Controller:
- while (DAC960_BA_HardwareMailboxFullP(ControllerBaseAddress))
- udelay(1);
- DAC960_BA_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA);
- DAC960_BA_HardwareMailboxNewCommand(ControllerBaseAddress);
- while (!DAC960_BA_HardwareMailboxStatusAvailableP(ControllerBaseAddress))
- udelay(1);
- CommandStatus = DAC960_BA_ReadCommandStatus(ControllerBaseAddress);
- DAC960_BA_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
- DAC960_BA_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
- break;
- case DAC960_LP_Controller:
- while (DAC960_LP_HardwareMailboxFullP(ControllerBaseAddress))
- udelay(1);
- DAC960_LP_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA);
- DAC960_LP_HardwareMailboxNewCommand(ControllerBaseAddress);
- while (!DAC960_LP_HardwareMailboxStatusAvailableP(ControllerBaseAddress))
- udelay(1);
- CommandStatus = DAC960_LP_ReadCommandStatus(ControllerBaseAddress);
- DAC960_LP_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
- DAC960_LP_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
- break;
- default:
- DAC960_Failure(Controller, "Unknown Controller Type\n");
- CommandStatus = DAC960_V2_AbormalCompletion;
- break;
- }
- pci_free_consistent(PCI_Device, sizeof(DAC960_V2_CommandMailbox_T),
- CommandMailbox, CommandMailboxDMA);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V1_ReadControllerConfiguration reads the Configuration Information
- from DAC960 V1 Firmware Controllers and initializes the Controller structure.
-*/
-
-static bool DAC960_V1_ReadControllerConfiguration(DAC960_Controller_T
- *Controller)
-{
- DAC960_V1_Enquiry2_T *Enquiry2;
- dma_addr_t Enquiry2DMA;
- DAC960_V1_Config2_T *Config2;
- dma_addr_t Config2DMA;
- int LogicalDriveNumber, Channel, TargetID;
- struct dma_loaf local_dma;
-
- if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
- sizeof(DAC960_V1_Enquiry2_T) + sizeof(DAC960_V1_Config2_T)))
- return DAC960_Failure(Controller, "LOGICAL DEVICE ALLOCATION");
-
- Enquiry2 = slice_dma_loaf(&local_dma, sizeof(DAC960_V1_Enquiry2_T), &Enquiry2DMA);
- Config2 = slice_dma_loaf(&local_dma, sizeof(DAC960_V1_Config2_T), &Config2DMA);
-
- if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_Enquiry,
- Controller->V1.NewEnquiryDMA)) {
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "ENQUIRY");
- }
- memcpy(&Controller->V1.Enquiry, Controller->V1.NewEnquiry,
- sizeof(DAC960_V1_Enquiry_T));
-
- if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_Enquiry2, Enquiry2DMA)) {
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "ENQUIRY2");
- }
-
- if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_ReadConfig2, Config2DMA)) {
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "READ CONFIG2");
- }
-
- if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_GetLogicalDriveInformation,
- Controller->V1.NewLogicalDriveInformationDMA)) {
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "GET LOGICAL DRIVE INFORMATION");
- }
- memcpy(&Controller->V1.LogicalDriveInformation,
- Controller->V1.NewLogicalDriveInformation,
- sizeof(DAC960_V1_LogicalDriveInformationArray_T));
-
- for (Channel = 0; Channel < Enquiry2->ActualChannels; Channel++)
- for (TargetID = 0; TargetID < Enquiry2->MaxTargets; TargetID++) {
- if (!DAC960_V1_ExecuteType3D(Controller, DAC960_V1_GetDeviceState,
- Channel, TargetID,
- Controller->V1.NewDeviceStateDMA)) {
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "GET DEVICE STATE");
- }
- memcpy(&Controller->V1.DeviceState[Channel][TargetID],
- Controller->V1.NewDeviceState, sizeof(DAC960_V1_DeviceState_T));
- }
- /*
- Initialize the Controller Model Name and Full Model Name fields.
- */
- switch (Enquiry2->HardwareID.SubModel)
- {
- case DAC960_V1_P_PD_PU:
- if (Enquiry2->SCSICapability.BusSpeed == DAC960_V1_Ultra)
- strcpy(Controller->ModelName, "DAC960PU");
- else strcpy(Controller->ModelName, "DAC960PD");
- break;
- case DAC960_V1_PL:
- strcpy(Controller->ModelName, "DAC960PL");
- break;
- case DAC960_V1_PG:
- strcpy(Controller->ModelName, "DAC960PG");
- break;
- case DAC960_V1_PJ:
- strcpy(Controller->ModelName, "DAC960PJ");
- break;
- case DAC960_V1_PR:
- strcpy(Controller->ModelName, "DAC960PR");
- break;
- case DAC960_V1_PT:
- strcpy(Controller->ModelName, "DAC960PT");
- break;
- case DAC960_V1_PTL0:
- strcpy(Controller->ModelName, "DAC960PTL0");
- break;
- case DAC960_V1_PRL:
- strcpy(Controller->ModelName, "DAC960PRL");
- break;
- case DAC960_V1_PTL1:
- strcpy(Controller->ModelName, "DAC960PTL1");
- break;
- case DAC960_V1_1164P:
- strcpy(Controller->ModelName, "DAC1164P");
- break;
- default:
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "MODEL VERIFICATION");
- }
- strcpy(Controller->FullModelName, "Mylex ");
- strcat(Controller->FullModelName, Controller->ModelName);
- /*
- Initialize the Controller Firmware Version field and verify that it
- is a supported firmware version. The supported firmware versions are:
-
- DAC1164P 5.06 and above
- DAC960PTL/PRL/PJ/PG 4.06 and above
- DAC960PU/PD/PL 3.51 and above
- DAC960PU/PD/PL/P 2.73 and above
- */
-#if defined(CONFIG_ALPHA)
- /*
- DEC Alpha machines were often equipped with DAC960 cards that were
- OEMed from Mylex, and had their own custom firmware. Version 2.70,
- the last custom FW revision to be released by DEC for these older
- controllers, appears to work quite well with this driver.
-
- Cards tested successfully were several versions each of the PD and
- PU, called by DEC the KZPSC and KZPAC, respectively, and having
- the Manufacturer Numbers (from Mylex), usually on a sticker on the
- back of the board, of:
-
- KZPSC: D040347 (1-channel) or D040348 (2-channel) or D040349 (3-channel)
- KZPAC: D040395 (1-channel) or D040396 (2-channel) or D040397 (3-channel)
- */
-# define FIRMWARE_27X "2.70"
-#else
-# define FIRMWARE_27X "2.73"
-#endif
-
- if (Enquiry2->FirmwareID.MajorVersion == 0)
- {
- Enquiry2->FirmwareID.MajorVersion =
- Controller->V1.Enquiry.MajorFirmwareVersion;
- Enquiry2->FirmwareID.MinorVersion =
- Controller->V1.Enquiry.MinorFirmwareVersion;
- Enquiry2->FirmwareID.FirmwareType = '0';
- Enquiry2->FirmwareID.TurnID = 0;
- }
- snprintf(Controller->FirmwareVersion, sizeof(Controller->FirmwareVersion),
- "%d.%02d-%c-%02d",
- Enquiry2->FirmwareID.MajorVersion,
- Enquiry2->FirmwareID.MinorVersion,
- Enquiry2->FirmwareID.FirmwareType,
- Enquiry2->FirmwareID.TurnID);
- if (!((Controller->FirmwareVersion[0] == '5' &&
- strcmp(Controller->FirmwareVersion, "5.06") >= 0) ||
- (Controller->FirmwareVersion[0] == '4' &&
- strcmp(Controller->FirmwareVersion, "4.06") >= 0) ||
- (Controller->FirmwareVersion[0] == '3' &&
- strcmp(Controller->FirmwareVersion, "3.51") >= 0) ||
- (Controller->FirmwareVersion[0] == '2' &&
- strcmp(Controller->FirmwareVersion, FIRMWARE_27X) >= 0)))
- {
- DAC960_Failure(Controller, "FIRMWARE VERSION VERIFICATION");
- DAC960_Error("Firmware Version = '%s'\n", Controller,
- Controller->FirmwareVersion);
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return false;
- }
- /*
- Initialize the Controller Channels, Targets, Memory Size, and SAF-TE
- Enclosure Management Enabled fields.
- */
- Controller->Channels = Enquiry2->ActualChannels;
- Controller->Targets = Enquiry2->MaxTargets;
- Controller->MemorySize = Enquiry2->MemorySize >> 20;
- Controller->V1.SAFTE_EnclosureManagementEnabled =
- (Enquiry2->FaultManagementType == DAC960_V1_SAFTE);
- /*
- Initialize the Controller Queue Depth, Driver Queue Depth, Logical Drive
- Count, Maximum Blocks per Command, Controller Scatter/Gather Limit, and
- Driver Scatter/Gather Limit. The Driver Queue Depth must be at most one
- less than the Controller Queue Depth to allow for an automatic drive
- rebuild operation.
- */
- Controller->ControllerQueueDepth = Controller->V1.Enquiry.MaxCommands;
- Controller->DriverQueueDepth = Controller->ControllerQueueDepth - 1;
- if (Controller->DriverQueueDepth > DAC960_MaxDriverQueueDepth)
- Controller->DriverQueueDepth = DAC960_MaxDriverQueueDepth;
- Controller->LogicalDriveCount =
- Controller->V1.Enquiry.NumberOfLogicalDrives;
- Controller->MaxBlocksPerCommand = Enquiry2->MaxBlocksPerCommand;
- Controller->ControllerScatterGatherLimit = Enquiry2->MaxScatterGatherEntries;
- Controller->DriverScatterGatherLimit =
- Controller->ControllerScatterGatherLimit;
- if (Controller->DriverScatterGatherLimit > DAC960_V1_ScatterGatherLimit)
- Controller->DriverScatterGatherLimit = DAC960_V1_ScatterGatherLimit;
- /*
- Initialize the Stripe Size, Segment Size, and Geometry Translation.
- */
- Controller->V1.StripeSize = Config2->BlocksPerStripe * Config2->BlockFactor
- >> (10 - DAC960_BlockSizeBits);
- Controller->V1.SegmentSize = Config2->BlocksPerCacheLine * Config2->BlockFactor
- >> (10 - DAC960_BlockSizeBits);
- switch (Config2->DriveGeometry)
- {
- case DAC960_V1_Geometry_128_32:
- Controller->V1.GeometryTranslationHeads = 128;
- Controller->V1.GeometryTranslationSectors = 32;
- break;
- case DAC960_V1_Geometry_255_63:
- Controller->V1.GeometryTranslationHeads = 255;
- Controller->V1.GeometryTranslationSectors = 63;
- break;
- default:
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "CONFIG2 DRIVE GEOMETRY");
- }
- /*
- Initialize the Background Initialization Status.
- */
- if ((Controller->FirmwareVersion[0] == '4' &&
- strcmp(Controller->FirmwareVersion, "4.08") >= 0) ||
- (Controller->FirmwareVersion[0] == '5' &&
- strcmp(Controller->FirmwareVersion, "5.08") >= 0))
- {
- Controller->V1.BackgroundInitializationStatusSupported = true;
- DAC960_V1_ExecuteType3B(Controller,
- DAC960_V1_BackgroundInitializationControl, 0x20,
- Controller->
- V1.BackgroundInitializationStatusDMA);
- memcpy(&Controller->V1.LastBackgroundInitializationStatus,
- Controller->V1.BackgroundInitializationStatus,
- sizeof(DAC960_V1_BackgroundInitializationStatus_T));
- }
- /*
- Initialize the Logical Drive Initially Accessible flag.
- */
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < Controller->LogicalDriveCount;
- LogicalDriveNumber++)
- if (Controller->V1.LogicalDriveInformation
- [LogicalDriveNumber].LogicalDriveState !=
- DAC960_V1_LogicalDrive_Offline)
- Controller->LogicalDriveInitiallyAccessible[LogicalDriveNumber] = true;
- Controller->V1.LastRebuildStatus = DAC960_V1_NoRebuildOrCheckInProgress;
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return true;
-}
-
-
-/*
- DAC960_V2_ReadControllerConfiguration reads the Configuration Information
- from DAC960 V2 Firmware Controllers and initializes the Controller structure.
-*/
-
-static bool DAC960_V2_ReadControllerConfiguration(DAC960_Controller_T
- *Controller)
-{
- DAC960_V2_ControllerInfo_T *ControllerInfo =
- &Controller->V2.ControllerInformation;
- unsigned short LogicalDeviceNumber = 0;
- int ModelNameLength;
-
- /* Get data into dma-able area, then copy into permanent location */
- if (!DAC960_V2_NewControllerInfo(Controller))
- return DAC960_Failure(Controller, "GET CONTROLLER INFO");
- memcpy(ControllerInfo, Controller->V2.NewControllerInformation,
- sizeof(DAC960_V2_ControllerInfo_T));
-
-
- if (!DAC960_V2_GeneralInfo(Controller))
- return DAC960_Failure(Controller, "GET HEALTH STATUS");
-
- /*
- Initialize the Controller Model Name and Full Model Name fields.
- */
- ModelNameLength = sizeof(ControllerInfo->ControllerName);
- if (ModelNameLength > sizeof(Controller->ModelName)-1)
- ModelNameLength = sizeof(Controller->ModelName)-1;
- memcpy(Controller->ModelName, ControllerInfo->ControllerName,
- ModelNameLength);
- ModelNameLength--;
- while (Controller->ModelName[ModelNameLength] == ' ' ||
- Controller->ModelName[ModelNameLength] == '\0')
- ModelNameLength--;
- Controller->ModelName[++ModelNameLength] = '\0';
- strcpy(Controller->FullModelName, "Mylex ");
- strcat(Controller->FullModelName, Controller->ModelName);
- /*
- Initialize the Controller Firmware Version field.
- */
- sprintf(Controller->FirmwareVersion, "%d.%02d-%02d",
- ControllerInfo->FirmwareMajorVersion,
- ControllerInfo->FirmwareMinorVersion,
- ControllerInfo->FirmwareTurnNumber);
- if (ControllerInfo->FirmwareMajorVersion == 6 &&
- ControllerInfo->FirmwareMinorVersion == 0 &&
- ControllerInfo->FirmwareTurnNumber < 1)
- {
- DAC960_Info("FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n",
- Controller, Controller->FirmwareVersion);
- DAC960_Info("STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n",
- Controller);
- DAC960_Info("PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
- Controller);
- }
- /*
- Initialize the Controller Channels, Targets, and Memory Size.
- */
- Controller->Channels = ControllerInfo->NumberOfPhysicalChannelsPresent;
- Controller->Targets =
- ControllerInfo->MaximumTargetsPerChannel
- [ControllerInfo->NumberOfPhysicalChannelsPresent-1];
- Controller->MemorySize = ControllerInfo->MemorySizeMB;
- /*
- Initialize the Controller Queue Depth, Driver Queue Depth, Logical Drive
- Count, Maximum Blocks per Command, Controller Scatter/Gather Limit, and
- Driver Scatter/Gather Limit. The Driver Queue Depth must be at most one
- less than the Controller Queue Depth to allow for an automatic drive
- rebuild operation.
- */
- Controller->ControllerQueueDepth = ControllerInfo->MaximumParallelCommands;
- Controller->DriverQueueDepth = Controller->ControllerQueueDepth - 1;
- if (Controller->DriverQueueDepth > DAC960_MaxDriverQueueDepth)
- Controller->DriverQueueDepth = DAC960_MaxDriverQueueDepth;
- Controller->LogicalDriveCount = ControllerInfo->LogicalDevicesPresent;
- Controller->MaxBlocksPerCommand =
- ControllerInfo->MaximumDataTransferSizeInBlocks;
- Controller->ControllerScatterGatherLimit =
- ControllerInfo->MaximumScatterGatherEntries;
- Controller->DriverScatterGatherLimit =
- Controller->ControllerScatterGatherLimit;
- if (Controller->DriverScatterGatherLimit > DAC960_V2_ScatterGatherLimit)
- Controller->DriverScatterGatherLimit = DAC960_V2_ScatterGatherLimit;
- /*
- Initialize the Logical Device Information.
- */
- while (true)
- {
- DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo =
- Controller->V2.NewLogicalDeviceInformation;
- DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo;
- DAC960_V2_PhysicalDevice_T PhysicalDevice;
-
- if (!DAC960_V2_NewLogicalDeviceInfo(Controller, LogicalDeviceNumber))
- break;
- LogicalDeviceNumber = NewLogicalDeviceInfo->LogicalDeviceNumber;
- if (LogicalDeviceNumber >= DAC960_MaxLogicalDrives) {
- DAC960_Error("DAC960: Logical Drive Number %d not supported\n",
- Controller, LogicalDeviceNumber);
- break;
- }
- if (NewLogicalDeviceInfo->DeviceBlockSizeInBytes != DAC960_BlockSize) {
- DAC960_Error("DAC960: Logical Drive Block Size %d not supported\n",
- Controller, NewLogicalDeviceInfo->DeviceBlockSizeInBytes);
- LogicalDeviceNumber++;
- continue;
- }
- PhysicalDevice.Controller = 0;
- PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel;
- PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID;
- PhysicalDevice.LogicalUnit = NewLogicalDeviceInfo->LogicalUnit;
- Controller->V2.LogicalDriveToVirtualDevice[LogicalDeviceNumber] =
- PhysicalDevice;
- if (NewLogicalDeviceInfo->LogicalDeviceState !=
- DAC960_V2_LogicalDevice_Offline)
- Controller->LogicalDriveInitiallyAccessible[LogicalDeviceNumber] = true;
- LogicalDeviceInfo = kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T),
- GFP_ATOMIC);
- if (LogicalDeviceInfo == NULL)
- return DAC960_Failure(Controller, "LOGICAL DEVICE ALLOCATION");
- Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] =
- LogicalDeviceInfo;
- memcpy(LogicalDeviceInfo, NewLogicalDeviceInfo,
- sizeof(DAC960_V2_LogicalDeviceInfo_T));
- LogicalDeviceNumber++;
- }
- return true;
-}
-
-
-/*
- DAC960_ReportControllerConfiguration reports the Configuration Information
- for Controller.
-*/
-
-static bool DAC960_ReportControllerConfiguration(DAC960_Controller_T
- *Controller)
-{
- DAC960_Info("Configuring Mylex %s PCI RAID Controller\n",
- Controller, Controller->ModelName);
- DAC960_Info(" Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
- Controller, Controller->FirmwareVersion,
- Controller->Channels, Controller->MemorySize);
- DAC960_Info(" PCI Bus: %d, Device: %d, Function: %d, I/O Address: ",
- Controller, Controller->Bus,
- Controller->Device, Controller->Function);
- if (Controller->IO_Address == 0)
- DAC960_Info("Unassigned\n", Controller);
- else DAC960_Info("0x%X\n", Controller, Controller->IO_Address);
- DAC960_Info(" PCI Address: 0x%X mapped at 0x%lX, IRQ Channel: %d\n",
- Controller, Controller->PCI_Address,
- (unsigned long) Controller->BaseAddress,
- Controller->IRQ_Channel);
- DAC960_Info(" Controller Queue Depth: %d, "
- "Maximum Blocks per Command: %d\n",
- Controller, Controller->ControllerQueueDepth,
- Controller->MaxBlocksPerCommand);
- DAC960_Info(" Driver Queue Depth: %d, "
- "Scatter/Gather Limit: %d of %d Segments\n",
- Controller, Controller->DriverQueueDepth,
- Controller->DriverScatterGatherLimit,
- Controller->ControllerScatterGatherLimit);
- if (Controller->FirmwareType == DAC960_V1_Controller)
- {
- DAC960_Info(" Stripe Size: %dKB, Segment Size: %dKB, "
- "BIOS Geometry: %d/%d\n", Controller,
- Controller->V1.StripeSize,
- Controller->V1.SegmentSize,
- Controller->V1.GeometryTranslationHeads,
- Controller->V1.GeometryTranslationSectors);
- if (Controller->V1.SAFTE_EnclosureManagementEnabled)
- DAC960_Info(" SAF-TE Enclosure Management Enabled\n", Controller);
- }
- return true;
-}
-
-
-/*
- DAC960_V1_ReadDeviceConfiguration reads the Device Configuration Information
- for DAC960 V1 Firmware Controllers by requesting the SCSI Inquiry and SCSI
- Inquiry Unit Serial Number information for each device connected to
- Controller.
-*/
-
-static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
- *Controller)
-{
- struct dma_loaf local_dma;
-
- dma_addr_t DCDBs_dma[DAC960_V1_MaxChannels];
- DAC960_V1_DCDB_T *DCDBs_cpu[DAC960_V1_MaxChannels];
-
- dma_addr_t SCSI_Inquiry_dma[DAC960_V1_MaxChannels];
- DAC960_SCSI_Inquiry_T *SCSI_Inquiry_cpu[DAC960_V1_MaxChannels];
-
- dma_addr_t SCSI_NewInquiryUnitSerialNumberDMA[DAC960_V1_MaxChannels];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *SCSI_NewInquiryUnitSerialNumberCPU[DAC960_V1_MaxChannels];
-
- struct completion Completions[DAC960_V1_MaxChannels];
- unsigned long flags;
- int Channel, TargetID;
-
- if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
- DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
- sizeof(DAC960_SCSI_Inquiry_T) +
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T))))
- return DAC960_Failure(Controller,
- "DMA ALLOCATION FAILED IN ReadDeviceConfiguration");
-
- for (Channel = 0; Channel < Controller->Channels; Channel++) {
- DCDBs_cpu[Channel] = slice_dma_loaf(&local_dma,
- sizeof(DAC960_V1_DCDB_T), DCDBs_dma + Channel);
- SCSI_Inquiry_cpu[Channel] = slice_dma_loaf(&local_dma,
- sizeof(DAC960_SCSI_Inquiry_T),
- SCSI_Inquiry_dma + Channel);
- SCSI_NewInquiryUnitSerialNumberCPU[Channel] = slice_dma_loaf(&local_dma,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
- SCSI_NewInquiryUnitSerialNumberDMA + Channel);
- }
-
- for (TargetID = 0; TargetID < Controller->Targets; TargetID++)
- {
- /*
- * For each channel, submit a probe for a device on that channel.
- * The timeout interval for a device that is present is 10 seconds.
- * With this approach, the timeout periods can elapse in parallel
- * on each channel.
- */
- for (Channel = 0; Channel < Controller->Channels; Channel++)
- {
- dma_addr_t NewInquiryStandardDataDMA = SCSI_Inquiry_dma[Channel];
- DAC960_V1_DCDB_T *DCDB = DCDBs_cpu[Channel];
- dma_addr_t DCDB_dma = DCDBs_dma[Channel];
- DAC960_Command_T *Command = Controller->Commands[Channel];
- struct completion *Completion = &Completions[Channel];
-
- init_completion(Completion);
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- Command->Completion = Completion;
- Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB;
- Command->V1.CommandMailbox.Type3.BusAddress = DCDB_dma;
- DCDB->Channel = Channel;
- DCDB->TargetID = TargetID;
- DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
- DCDB->EarlyStatus = false;
- DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
- DCDB->NoAutomaticRequestSense = false;
- DCDB->DisconnectPermitted = true;
- DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_T);
- DCDB->BusAddress = NewInquiryStandardDataDMA;
- DCDB->CDBLength = 6;
- DCDB->TransferLengthHigh4 = 0;
- DCDB->SenseLength = sizeof(DCDB->SenseData);
- DCDB->CDB[0] = 0x12; /* INQUIRY */
- DCDB->CDB[1] = 0; /* EVPD = 0 */
- DCDB->CDB[2] = 0; /* Page Code */
- DCDB->CDB[3] = 0; /* Reserved */
- DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_T);
- DCDB->CDB[5] = 0; /* Control */
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_QueueCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- }
- /*
- * Wait for the problems submitted in the previous loop
- * to complete. On the probes that are successful,
- * get the serial number of the device that was found.
- */
- for (Channel = 0; Channel < Controller->Channels; Channel++)
- {
- DAC960_SCSI_Inquiry_T *InquiryStandardData =
- &Controller->V1.InquiryStandardData[Channel][TargetID];
- DAC960_SCSI_Inquiry_T *NewInquiryStandardData = SCSI_Inquiry_cpu[Channel];
- dma_addr_t NewInquiryUnitSerialNumberDMA =
- SCSI_NewInquiryUnitSerialNumberDMA[Channel];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber =
- SCSI_NewInquiryUnitSerialNumberCPU[Channel];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- &Controller->V1.InquiryUnitSerialNumber[Channel][TargetID];
- DAC960_Command_T *Command = Controller->Commands[Channel];
- DAC960_V1_DCDB_T *DCDB = DCDBs_cpu[Channel];
- struct completion *Completion = &Completions[Channel];
-
- wait_for_completion(Completion);
-
- if (Command->V1.CommandStatus != DAC960_V1_NormalCompletion) {
- memset(InquiryStandardData, 0, sizeof(DAC960_SCSI_Inquiry_T));
- InquiryStandardData->PeripheralDeviceType = 0x1F;
- continue;
- } else
- memcpy(InquiryStandardData, NewInquiryStandardData, sizeof(DAC960_SCSI_Inquiry_T));
-
- /* Preserve Channel and TargetID values from the previous loop */
- Command->Completion = Completion;
- DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
- DCDB->BusAddress = NewInquiryUnitSerialNumberDMA;
- DCDB->SenseLength = sizeof(DCDB->SenseData);
- DCDB->CDB[0] = 0x12; /* INQUIRY */
- DCDB->CDB[1] = 1; /* EVPD = 1 */
- DCDB->CDB[2] = 0x80; /* Page Code */
- DCDB->CDB[3] = 0; /* Reserved */
- DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
- DCDB->CDB[5] = 0; /* Control */
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_QueueCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- wait_for_completion(Completion);
-
- if (Command->V1.CommandStatus != DAC960_V1_NormalCompletion) {
- memset(InquiryUnitSerialNumber, 0,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
- } else
- memcpy(InquiryUnitSerialNumber, NewInquiryUnitSerialNumber,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- }
- }
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return true;
-}
-
-
-/*
- DAC960_V2_ReadDeviceConfiguration reads the Device Configuration Information
- for DAC960 V2 Firmware Controllers by requesting the Physical Device
- Information and SCSI Inquiry Unit Serial Number information for each
- device connected to Controller.
-*/
-
-static bool DAC960_V2_ReadDeviceConfiguration(DAC960_Controller_T
- *Controller)
-{
- unsigned char Channel = 0, TargetID = 0, LogicalUnit = 0;
- unsigned short PhysicalDeviceIndex = 0;
-
- while (true)
- {
- DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo =
- Controller->V2.NewPhysicalDeviceInformation;
- DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo;
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber =
- Controller->V2.NewInquiryUnitSerialNumber;
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber;
-
- if (!DAC960_V2_NewPhysicalDeviceInfo(Controller, Channel, TargetID, LogicalUnit))
- break;
-
- PhysicalDeviceInfo = kmalloc(sizeof(DAC960_V2_PhysicalDeviceInfo_T),
- GFP_ATOMIC);
- if (PhysicalDeviceInfo == NULL)
- return DAC960_Failure(Controller, "PHYSICAL DEVICE ALLOCATION");
- Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex] =
- PhysicalDeviceInfo;
- memcpy(PhysicalDeviceInfo, NewPhysicalDeviceInfo,
- sizeof(DAC960_V2_PhysicalDeviceInfo_T));
-
- InquiryUnitSerialNumber = kmalloc(
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T), GFP_ATOMIC);
- if (InquiryUnitSerialNumber == NULL) {
- kfree(PhysicalDeviceInfo);
- return DAC960_Failure(Controller, "SERIAL NUMBER ALLOCATION");
- }
- Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex] =
- InquiryUnitSerialNumber;
-
- Channel = NewPhysicalDeviceInfo->Channel;
- TargetID = NewPhysicalDeviceInfo->TargetID;
- LogicalUnit = NewPhysicalDeviceInfo->LogicalUnit;
-
- /*
- Some devices do NOT have Unit Serial Numbers.
- This command fails for them. But, we still want to
- remember those devices are there. Construct a
- UnitSerialNumber structure for the failure case.
- */
- if (!DAC960_V2_NewInquiryUnitSerialNumber(Controller, Channel, TargetID, LogicalUnit)) {
- memset(InquiryUnitSerialNumber, 0,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
- } else
- memcpy(InquiryUnitSerialNumber, NewInquiryUnitSerialNumber,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
-
- PhysicalDeviceIndex++;
- LogicalUnit++;
- }
- return true;
-}
-
-
-/*
- DAC960_SanitizeInquiryData sanitizes the Vendor, Model, Revision, and
- Product Serial Number fields of the Inquiry Standard Data and Inquiry
- Unit Serial Number structures.
-*/
-
-static void DAC960_SanitizeInquiryData(DAC960_SCSI_Inquiry_T
- *InquiryStandardData,
- DAC960_SCSI_Inquiry_UnitSerialNumber_T
- *InquiryUnitSerialNumber,
- unsigned char *Vendor,
- unsigned char *Model,
- unsigned char *Revision,
- unsigned char *SerialNumber)
-{
- int SerialNumberLength, i;
- if (InquiryStandardData->PeripheralDeviceType == 0x1F) return;
- for (i = 0; i < sizeof(InquiryStandardData->VendorIdentification); i++)
- {
- unsigned char VendorCharacter =
- InquiryStandardData->VendorIdentification[i];
- Vendor[i] = (VendorCharacter >= ' ' && VendorCharacter <= '~'
- ? VendorCharacter : ' ');
- }
- Vendor[sizeof(InquiryStandardData->VendorIdentification)] = '\0';
- for (i = 0; i < sizeof(InquiryStandardData->ProductIdentification); i++)
- {
- unsigned char ModelCharacter =
- InquiryStandardData->ProductIdentification[i];
- Model[i] = (ModelCharacter >= ' ' && ModelCharacter <= '~'
- ? ModelCharacter : ' ');
- }
- Model[sizeof(InquiryStandardData->ProductIdentification)] = '\0';
- for (i = 0; i < sizeof(InquiryStandardData->ProductRevisionLevel); i++)
- {
- unsigned char RevisionCharacter =
- InquiryStandardData->ProductRevisionLevel[i];
- Revision[i] = (RevisionCharacter >= ' ' && RevisionCharacter <= '~'
- ? RevisionCharacter : ' ');
- }
- Revision[sizeof(InquiryStandardData->ProductRevisionLevel)] = '\0';
- if (InquiryUnitSerialNumber->PeripheralDeviceType == 0x1F) return;
- SerialNumberLength = InquiryUnitSerialNumber->PageLength;
- if (SerialNumberLength >
- sizeof(InquiryUnitSerialNumber->ProductSerialNumber))
- SerialNumberLength = sizeof(InquiryUnitSerialNumber->ProductSerialNumber);
- for (i = 0; i < SerialNumberLength; i++)
- {
- unsigned char SerialNumberCharacter =
- InquiryUnitSerialNumber->ProductSerialNumber[i];
- SerialNumber[i] =
- (SerialNumberCharacter >= ' ' && SerialNumberCharacter <= '~'
- ? SerialNumberCharacter : ' ');
- }
- SerialNumber[SerialNumberLength] = '\0';
-}
-
-
-/*
- DAC960_V1_ReportDeviceConfiguration reports the Device Configuration
- Information for DAC960 V1 Firmware Controllers.
-*/
-
-static bool DAC960_V1_ReportDeviceConfiguration(DAC960_Controller_T
- *Controller)
-{
- int LogicalDriveNumber, Channel, TargetID;
- DAC960_Info(" Physical Devices:\n", Controller);
- for (Channel = 0; Channel < Controller->Channels; Channel++)
- for (TargetID = 0; TargetID < Controller->Targets; TargetID++)
- {
- DAC960_SCSI_Inquiry_T *InquiryStandardData =
- &Controller->V1.InquiryStandardData[Channel][TargetID];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- &Controller->V1.InquiryUnitSerialNumber[Channel][TargetID];
- DAC960_V1_DeviceState_T *DeviceState =
- &Controller->V1.DeviceState[Channel][TargetID];
- DAC960_V1_ErrorTableEntry_T *ErrorEntry =
- &Controller->V1.ErrorTable.ErrorTableEntries[Channel][TargetID];
- char Vendor[1+sizeof(InquiryStandardData->VendorIdentification)];
- char Model[1+sizeof(InquiryStandardData->ProductIdentification)];
- char Revision[1+sizeof(InquiryStandardData->ProductRevisionLevel)];
- char SerialNumber[1+sizeof(InquiryUnitSerialNumber
- ->ProductSerialNumber)];
- if (InquiryStandardData->PeripheralDeviceType == 0x1F) continue;
- DAC960_SanitizeInquiryData(InquiryStandardData, InquiryUnitSerialNumber,
- Vendor, Model, Revision, SerialNumber);
- DAC960_Info(" %d:%d%s Vendor: %s Model: %s Revision: %s\n",
- Controller, Channel, TargetID, (TargetID < 10 ? " " : ""),
- Vendor, Model, Revision);
- if (InquiryUnitSerialNumber->PeripheralDeviceType != 0x1F)
- DAC960_Info(" Serial Number: %s\n", Controller, SerialNumber);
- if (DeviceState->Present &&
- DeviceState->DeviceType == DAC960_V1_DiskType)
- {
- if (Controller->V1.DeviceResetCount[Channel][TargetID] > 0)
- DAC960_Info(" Disk Status: %s, %u blocks, %d resets\n",
- Controller,
- (DeviceState->DeviceState == DAC960_V1_Device_Dead
- ? "Dead"
- : DeviceState->DeviceState
- == DAC960_V1_Device_WriteOnly
- ? "Write-Only"
- : DeviceState->DeviceState
- == DAC960_V1_Device_Online
- ? "Online" : "Standby"),
- DeviceState->DiskSize,
- Controller->V1.DeviceResetCount[Channel][TargetID]);
- else
- DAC960_Info(" Disk Status: %s, %u blocks\n", Controller,
- (DeviceState->DeviceState == DAC960_V1_Device_Dead
- ? "Dead"
- : DeviceState->DeviceState
- == DAC960_V1_Device_WriteOnly
- ? "Write-Only"
- : DeviceState->DeviceState
- == DAC960_V1_Device_Online
- ? "Online" : "Standby"),
- DeviceState->DiskSize);
- }
- if (ErrorEntry->ParityErrorCount > 0 ||
- ErrorEntry->SoftErrorCount > 0 ||
- ErrorEntry->HardErrorCount > 0 ||
- ErrorEntry->MiscErrorCount > 0)
- DAC960_Info(" Errors - Parity: %d, Soft: %d, "
- "Hard: %d, Misc: %d\n", Controller,
- ErrorEntry->ParityErrorCount,
- ErrorEntry->SoftErrorCount,
- ErrorEntry->HardErrorCount,
- ErrorEntry->MiscErrorCount);
- }
- DAC960_Info(" Logical Drives:\n", Controller);
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < Controller->LogicalDriveCount;
- LogicalDriveNumber++)
- {
- DAC960_V1_LogicalDriveInformation_T *LogicalDriveInformation =
- &Controller->V1.LogicalDriveInformation[LogicalDriveNumber];
- DAC960_Info(" /dev/rd/c%dd%d: RAID-%d, %s, %u blocks, %s\n",
- Controller, Controller->ControllerNumber, LogicalDriveNumber,
- LogicalDriveInformation->RAIDLevel,
- (LogicalDriveInformation->LogicalDriveState
- == DAC960_V1_LogicalDrive_Online
- ? "Online"
- : LogicalDriveInformation->LogicalDriveState
- == DAC960_V1_LogicalDrive_Critical
- ? "Critical" : "Offline"),
- LogicalDriveInformation->LogicalDriveSize,
- (LogicalDriveInformation->WriteBack
- ? "Write Back" : "Write Thru"));
- }
- return true;
-}
-
-
-/*
- DAC960_V2_ReportDeviceConfiguration reports the Device Configuration
- Information for DAC960 V2 Firmware Controllers.
-*/
-
-static bool DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T
- *Controller)
-{
- int PhysicalDeviceIndex, LogicalDriveNumber;
- DAC960_Info(" Physical Devices:\n", Controller);
- for (PhysicalDeviceIndex = 0;
- PhysicalDeviceIndex < DAC960_V2_MaxPhysicalDevices;
- PhysicalDeviceIndex++)
- {
- DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo =
- Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex];
- DAC960_SCSI_Inquiry_T *InquiryStandardData =
- (DAC960_SCSI_Inquiry_T *) &PhysicalDeviceInfo->SCSI_InquiryData;
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex];
- char Vendor[1+sizeof(InquiryStandardData->VendorIdentification)];
- char Model[1+sizeof(InquiryStandardData->ProductIdentification)];
- char Revision[1+sizeof(InquiryStandardData->ProductRevisionLevel)];
- char SerialNumber[1+sizeof(InquiryUnitSerialNumber->ProductSerialNumber)];
- if (PhysicalDeviceInfo == NULL) break;
- DAC960_SanitizeInquiryData(InquiryStandardData, InquiryUnitSerialNumber,
- Vendor, Model, Revision, SerialNumber);
- DAC960_Info(" %d:%d%s Vendor: %s Model: %s Revision: %s\n",
- Controller,
- PhysicalDeviceInfo->Channel,
- PhysicalDeviceInfo->TargetID,
- (PhysicalDeviceInfo->TargetID < 10 ? " " : ""),
- Vendor, Model, Revision);
- if (PhysicalDeviceInfo->NegotiatedSynchronousMegaTransfers == 0)
- DAC960_Info(" %sAsynchronous\n", Controller,
- (PhysicalDeviceInfo->NegotiatedDataWidthBits == 16
- ? "Wide " :""));
- else
- DAC960_Info(" %sSynchronous at %d MB/sec\n", Controller,
- (PhysicalDeviceInfo->NegotiatedDataWidthBits == 16
- ? "Wide " :""),
- (PhysicalDeviceInfo->NegotiatedSynchronousMegaTransfers
- * PhysicalDeviceInfo->NegotiatedDataWidthBits/8));
- if (InquiryUnitSerialNumber->PeripheralDeviceType != 0x1F)
- DAC960_Info(" Serial Number: %s\n", Controller, SerialNumber);
- if (PhysicalDeviceInfo->PhysicalDeviceState ==
- DAC960_V2_Device_Unconfigured)
- continue;
- DAC960_Info(" Disk Status: %s, %u blocks\n", Controller,
- (PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Online
- ? "Online"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Rebuild
- ? "Rebuild"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Missing
- ? "Missing"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Critical
- ? "Critical"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Dead
- ? "Dead"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_SuspectedDead
- ? "Suspected-Dead"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_CommandedOffline
- ? "Commanded-Offline"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Standby
- ? "Standby" : "Unknown"),
- PhysicalDeviceInfo->ConfigurableDeviceSize);
- if (PhysicalDeviceInfo->ParityErrors == 0 &&
- PhysicalDeviceInfo->SoftErrors == 0 &&
- PhysicalDeviceInfo->HardErrors == 0 &&
- PhysicalDeviceInfo->MiscellaneousErrors == 0 &&
- PhysicalDeviceInfo->CommandTimeouts == 0 &&
- PhysicalDeviceInfo->Retries == 0 &&
- PhysicalDeviceInfo->Aborts == 0 &&
- PhysicalDeviceInfo->PredictedFailuresDetected == 0)
- continue;
- DAC960_Info(" Errors - Parity: %d, Soft: %d, "
- "Hard: %d, Misc: %d\n", Controller,
- PhysicalDeviceInfo->ParityErrors,
- PhysicalDeviceInfo->SoftErrors,
- PhysicalDeviceInfo->HardErrors,
- PhysicalDeviceInfo->MiscellaneousErrors);
- DAC960_Info(" Timeouts: %d, Retries: %d, "
- "Aborts: %d, Predicted: %d\n", Controller,
- PhysicalDeviceInfo->CommandTimeouts,
- PhysicalDeviceInfo->Retries,
- PhysicalDeviceInfo->Aborts,
- PhysicalDeviceInfo->PredictedFailuresDetected);
- }
- DAC960_Info(" Logical Drives:\n", Controller);
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < DAC960_MaxLogicalDrives;
- LogicalDriveNumber++)
- {
- DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
- Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
- static const unsigned char *ReadCacheStatus[] = {
- "Read Cache Disabled",
- "Read Cache Enabled",
- "Read Ahead Enabled",
- "Intelligent Read Ahead Enabled",
- "-", "-", "-", "-"
- };
- static const unsigned char *WriteCacheStatus[] = {
- "Write Cache Disabled",
- "Logical Device Read Only",
- "Write Cache Enabled",
- "Intelligent Write Cache Enabled",
- "-", "-", "-", "-"
- };
- unsigned char *GeometryTranslation;
- if (LogicalDeviceInfo == NULL) continue;
- switch (LogicalDeviceInfo->DriveGeometry)
- {
- case DAC960_V2_Geometry_128_32:
- GeometryTranslation = "128/32";
- break;
- case DAC960_V2_Geometry_255_63:
- GeometryTranslation = "255/63";
- break;
- default:
- GeometryTranslation = "Invalid";
- DAC960_Error("Illegal Logical Device Geometry %d\n",
- Controller, LogicalDeviceInfo->DriveGeometry);
- break;
- }
- DAC960_Info(" /dev/rd/c%dd%d: RAID-%d, %s, %u blocks\n",
- Controller, Controller->ControllerNumber, LogicalDriveNumber,
- LogicalDeviceInfo->RAIDLevel,
- (LogicalDeviceInfo->LogicalDeviceState
- == DAC960_V2_LogicalDevice_Online
- ? "Online"
- : LogicalDeviceInfo->LogicalDeviceState
- == DAC960_V2_LogicalDevice_Critical
- ? "Critical" : "Offline"),
- LogicalDeviceInfo->ConfigurableDeviceSize);
- DAC960_Info(" Logical Device %s, BIOS Geometry: %s\n",
- Controller,
- (LogicalDeviceInfo->LogicalDeviceControl
- .LogicalDeviceInitialized
- ? "Initialized" : "Uninitialized"),
- GeometryTranslation);
- if (LogicalDeviceInfo->StripeSize == 0)
- {
- if (LogicalDeviceInfo->CacheLineSize == 0)
- DAC960_Info(" Stripe Size: N/A, "
- "Segment Size: N/A\n", Controller);
- else
- DAC960_Info(" Stripe Size: N/A, "
- "Segment Size: %dKB\n", Controller,
- 1 << (LogicalDeviceInfo->CacheLineSize - 2));
- }
- else
- {
- if (LogicalDeviceInfo->CacheLineSize == 0)
- DAC960_Info(" Stripe Size: %dKB, "
- "Segment Size: N/A\n", Controller,
- 1 << (LogicalDeviceInfo->StripeSize - 2));
- else
- DAC960_Info(" Stripe Size: %dKB, "
- "Segment Size: %dKB\n", Controller,
- 1 << (LogicalDeviceInfo->StripeSize - 2),
- 1 << (LogicalDeviceInfo->CacheLineSize - 2));
- }
- DAC960_Info(" %s, %s\n", Controller,
- ReadCacheStatus[
- LogicalDeviceInfo->LogicalDeviceControl.ReadCache],
- WriteCacheStatus[
- LogicalDeviceInfo->LogicalDeviceControl.WriteCache]);
- if (LogicalDeviceInfo->SoftErrors > 0 ||
- LogicalDeviceInfo->CommandsFailed > 0 ||
- LogicalDeviceInfo->DeferredWriteErrors)
- DAC960_Info(" Errors - Soft: %d, Failed: %d, "
- "Deferred Write: %d\n", Controller,
- LogicalDeviceInfo->SoftErrors,
- LogicalDeviceInfo->CommandsFailed,
- LogicalDeviceInfo->DeferredWriteErrors);
-
- }
- return true;
-}
-
-/*
- DAC960_RegisterBlockDevice registers the Block Device structures
- associated with Controller.
-*/
-
-static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
-{
- int MajorNumber = DAC960_MAJOR + Controller->ControllerNumber;
- int n;
-
- /*
- Register the Block Device Major Number for this DAC960 Controller.
- */
- if (register_blkdev(MajorNumber, "dac960") < 0)
- return false;
-
- for (n = 0; n < DAC960_MaxLogicalDrives; n++) {
- struct gendisk *disk = Controller->disks[n];
- struct request_queue *RequestQueue;
-
- /* for now, let all request queues share controller's lock */
- RequestQueue = blk_init_queue(DAC960_RequestFunction,&Controller->queue_lock);
- if (!RequestQueue) {
- printk("DAC960: failure to allocate request queue\n");
- continue;
- }
- Controller->RequestQueue[n] = RequestQueue;
- RequestQueue->queuedata = Controller;
- blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
- blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
- disk->queue = RequestQueue;
- sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
- disk->major = MajorNumber;
- disk->first_minor = n << DAC960_MaxPartitionsBits;
- disk->fops = &DAC960_BlockDeviceOperations;
- }
- /*
- Indicate the Block Device Registration completed successfully,
- */
- return true;
-}
-
-
-/*
- DAC960_UnregisterBlockDevice unregisters the Block Device structures
- associated with Controller.
-*/
-
-static void DAC960_UnregisterBlockDevice(DAC960_Controller_T *Controller)
-{
- int MajorNumber = DAC960_MAJOR + Controller->ControllerNumber;
- int disk;
-
- /* does order matter when deleting gendisk and cleanup in request queue? */
- for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++) {
- del_gendisk(Controller->disks[disk]);
- blk_cleanup_queue(Controller->RequestQueue[disk]);
- Controller->RequestQueue[disk] = NULL;
- }
-
- /*
- Unregister the Block Device Major Number for this DAC960 Controller.
- */
- unregister_blkdev(MajorNumber, "dac960");
-}
-
-/*
- DAC960_ComputeGenericDiskInfo computes the values for the Generic Disk
- Information Partition Sector Counts and Block Sizes.
-*/
-
-static void DAC960_ComputeGenericDiskInfo(DAC960_Controller_T *Controller)
-{
- int disk;
- for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++)
- set_capacity(Controller->disks[disk], disk_size(Controller, disk));
-}
-
-/*
- DAC960_ReportErrorStatus reports Controller BIOS Messages passed through
- the Error Status Register when the driver performs the BIOS handshaking.
- It returns true for fatal errors and false otherwise.
-*/
-
-static bool DAC960_ReportErrorStatus(DAC960_Controller_T *Controller,
- unsigned char ErrorStatus,
- unsigned char Parameter0,
- unsigned char Parameter1)
-{
- switch (ErrorStatus)
- {
- case 0x00:
- DAC960_Notice("Physical Device %d:%d Not Responding\n",
- Controller, Parameter1, Parameter0);
- break;
- case 0x08:
- if (Controller->DriveSpinUpMessageDisplayed) break;
- DAC960_Notice("Spinning Up Drives\n", Controller);
- Controller->DriveSpinUpMessageDisplayed = true;
- break;
- case 0x30:
- DAC960_Notice("Configuration Checksum Error\n", Controller);
- break;
- case 0x60:
- DAC960_Notice("Mirror Race Recovery Failed\n", Controller);
- break;
- case 0x70:
- DAC960_Notice("Mirror Race Recovery In Progress\n", Controller);
- break;
- case 0x90:
- DAC960_Notice("Physical Device %d:%d COD Mismatch\n",
- Controller, Parameter1, Parameter0);
- break;
- case 0xA0:
- DAC960_Notice("Logical Drive Installation Aborted\n", Controller);
- break;
- case 0xB0:
- DAC960_Notice("Mirror Race On A Critical Logical Drive\n", Controller);
- break;
- case 0xD0:
- DAC960_Notice("New Controller Configuration Found\n", Controller);
- break;
- case 0xF0:
- DAC960_Error("Fatal Memory Parity Error for Controller at\n", Controller);
- return true;
- default:
- DAC960_Error("Unknown Initialization Error %02X for Controller at\n",
- Controller, ErrorStatus);
- return true;
- }
- return false;
-}
-
-
-/*
- * DAC960_DetectCleanup releases the resources that were allocated
- * during DAC960_DetectController(). DAC960_DetectController can
- * has several internal failure points, so not ALL resources may
- * have been allocated. It's important to free only
- * resources that HAVE been allocated. The code below always
- * tests that the resource has been allocated before attempting to
- * free it.
- */
-static void DAC960_DetectCleanup(DAC960_Controller_T *Controller)
-{
- int i;
-
- /* Free the memory mailbox, status, and related structures */
- free_dma_loaf(Controller->PCIDevice, &Controller->DmaPages);
- if (Controller->MemoryMappedAddress) {
- switch(Controller->HardwareType)
- {
- case DAC960_GEM_Controller:
- DAC960_GEM_DisableInterrupts(Controller->BaseAddress);
- break;
- case DAC960_BA_Controller:
- DAC960_BA_DisableInterrupts(Controller->BaseAddress);
- break;
- case DAC960_LP_Controller:
- DAC960_LP_DisableInterrupts(Controller->BaseAddress);
- break;
- case DAC960_LA_Controller:
- DAC960_LA_DisableInterrupts(Controller->BaseAddress);
- break;
- case DAC960_PG_Controller:
- DAC960_PG_DisableInterrupts(Controller->BaseAddress);
- break;
- case DAC960_PD_Controller:
- DAC960_PD_DisableInterrupts(Controller->BaseAddress);
- break;
- case DAC960_P_Controller:
- DAC960_PD_DisableInterrupts(Controller->BaseAddress);
- break;
- }
- iounmap(Controller->MemoryMappedAddress);
- }
- if (Controller->IRQ_Channel)
- free_irq(Controller->IRQ_Channel, Controller);
- if (Controller->IO_Address)
- release_region(Controller->IO_Address, 0x80);
- pci_disable_device(Controller->PCIDevice);
- for (i = 0; (i < DAC960_MaxLogicalDrives) && Controller->disks[i]; i++)
- put_disk(Controller->disks[i]);
- DAC960_Controllers[Controller->ControllerNumber] = NULL;
- kfree(Controller);
-}
-
-
-/*
- DAC960_DetectController detects Mylex DAC960/AcceleRAID/eXtremeRAID
- PCI RAID Controllers by interrogating the PCI Configuration Space for
- Controller Type.
-*/
-
-static DAC960_Controller_T *
-DAC960_DetectController(struct pci_dev *PCI_Device,
- const struct pci_device_id *entry)
-{
- struct DAC960_privdata *privdata =
- (struct DAC960_privdata *)entry->driver_data;
- irq_handler_t InterruptHandler = privdata->InterruptHandler;
- unsigned int MemoryWindowSize = privdata->MemoryWindowSize;
- DAC960_Controller_T *Controller = NULL;
- unsigned char DeviceFunction = PCI_Device->devfn;
- unsigned char ErrorStatus, Parameter0, Parameter1;
- unsigned int IRQ_Channel;
- void __iomem *BaseAddress;
- int i;
-
- Controller = kzalloc(sizeof(DAC960_Controller_T), GFP_ATOMIC);
- if (Controller == NULL) {
- DAC960_Error("Unable to allocate Controller structure for "
- "Controller at\n", NULL);
- return NULL;
- }
- Controller->ControllerNumber = DAC960_ControllerCount;
- DAC960_Controllers[DAC960_ControllerCount++] = Controller;
- Controller->Bus = PCI_Device->bus->number;
- Controller->FirmwareType = privdata->FirmwareType;
- Controller->HardwareType = privdata->HardwareType;
- Controller->Device = DeviceFunction >> 3;
- Controller->Function = DeviceFunction & 0x7;
- Controller->PCIDevice = PCI_Device;
- strcpy(Controller->FullModelName, "DAC960");
-
- if (pci_enable_device(PCI_Device))
- goto Failure;
-
- switch (Controller->HardwareType)
- {
- case DAC960_GEM_Controller:
- Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
- break;
- case DAC960_BA_Controller:
- Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
- break;
- case DAC960_LP_Controller:
- Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
- break;
- case DAC960_LA_Controller:
- Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
- break;
- case DAC960_PG_Controller:
- Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
- break;
- case DAC960_PD_Controller:
- Controller->IO_Address = pci_resource_start(PCI_Device, 0);
- Controller->PCI_Address = pci_resource_start(PCI_Device, 1);
- break;
- case DAC960_P_Controller:
- Controller->IO_Address = pci_resource_start(PCI_Device, 0);
- Controller->PCI_Address = pci_resource_start(PCI_Device, 1);
- break;
- }
-
- pci_set_drvdata(PCI_Device, (void *)((long)Controller->ControllerNumber));
- for (i = 0; i < DAC960_MaxLogicalDrives; i++) {
- Controller->disks[i] = alloc_disk(1<<DAC960_MaxPartitionsBits);
- if (!Controller->disks[i])
- goto Failure;
- Controller->disks[i]->private_data = (void *)((long)i);
- }
- init_waitqueue_head(&Controller->CommandWaitQueue);
- init_waitqueue_head(&Controller->HealthStatusWaitQueue);
- spin_lock_init(&Controller->queue_lock);
- DAC960_AnnounceDriver(Controller);
- /*
- Map the Controller Register Window.
- */
- if (MemoryWindowSize < PAGE_SIZE)
- MemoryWindowSize = PAGE_SIZE;
- Controller->MemoryMappedAddress =
- ioremap_nocache(Controller->PCI_Address & PAGE_MASK, MemoryWindowSize);
- Controller->BaseAddress =
- Controller->MemoryMappedAddress + (Controller->PCI_Address & ~PAGE_MASK);
- if (Controller->MemoryMappedAddress == NULL)
- {
- DAC960_Error("Unable to map Controller Register Window for "
- "Controller at\n", Controller);
- goto Failure;
- }
- BaseAddress = Controller->BaseAddress;
- switch (Controller->HardwareType)
- {
- case DAC960_GEM_Controller:
- DAC960_GEM_DisableInterrupts(BaseAddress);
- DAC960_GEM_AcknowledgeHardwareMailboxStatus(BaseAddress);
- udelay(1000);
- while (DAC960_GEM_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_GEM_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V2_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to Enable Memory Mailbox Interface "
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_GEM_EnableInterrupts(BaseAddress);
- Controller->QueueCommand = DAC960_GEM_QueueCommand;
- Controller->ReadControllerConfiguration =
- DAC960_V2_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V2_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V2_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V2_QueueReadWriteCommand;
- break;
- case DAC960_BA_Controller:
- DAC960_BA_DisableInterrupts(BaseAddress);
- DAC960_BA_AcknowledgeHardwareMailboxStatus(BaseAddress);
- udelay(1000);
- while (DAC960_BA_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_BA_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V2_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to Enable Memory Mailbox Interface "
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_BA_EnableInterrupts(BaseAddress);
- Controller->QueueCommand = DAC960_BA_QueueCommand;
- Controller->ReadControllerConfiguration =
- DAC960_V2_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V2_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V2_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V2_QueueReadWriteCommand;
- break;
- case DAC960_LP_Controller:
- DAC960_LP_DisableInterrupts(BaseAddress);
- DAC960_LP_AcknowledgeHardwareMailboxStatus(BaseAddress);
- udelay(1000);
- while (DAC960_LP_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_LP_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V2_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to Enable Memory Mailbox Interface "
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_LP_EnableInterrupts(BaseAddress);
- Controller->QueueCommand = DAC960_LP_QueueCommand;
- Controller->ReadControllerConfiguration =
- DAC960_V2_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V2_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V2_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V2_QueueReadWriteCommand;
- break;
- case DAC960_LA_Controller:
- DAC960_LA_DisableInterrupts(BaseAddress);
- DAC960_LA_AcknowledgeHardwareMailboxStatus(BaseAddress);
- udelay(1000);
- while (DAC960_LA_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_LA_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to Enable Memory Mailbox Interface "
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_LA_EnableInterrupts(BaseAddress);
- if (Controller->V1.DualModeMemoryMailboxInterface)
- Controller->QueueCommand = DAC960_LA_QueueCommandDualMode;
- else Controller->QueueCommand = DAC960_LA_QueueCommandSingleMode;
- Controller->ReadControllerConfiguration =
- DAC960_V1_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V1_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V1_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V1_QueueReadWriteCommand;
- break;
- case DAC960_PG_Controller:
- DAC960_PG_DisableInterrupts(BaseAddress);
- DAC960_PG_AcknowledgeHardwareMailboxStatus(BaseAddress);
- udelay(1000);
- while (DAC960_PG_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_PG_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to Enable Memory Mailbox Interface "
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_PG_EnableInterrupts(BaseAddress);
- if (Controller->V1.DualModeMemoryMailboxInterface)
- Controller->QueueCommand = DAC960_PG_QueueCommandDualMode;
- else Controller->QueueCommand = DAC960_PG_QueueCommandSingleMode;
- Controller->ReadControllerConfiguration =
- DAC960_V1_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V1_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V1_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V1_QueueReadWriteCommand;
- break;
- case DAC960_PD_Controller:
- if (!request_region(Controller->IO_Address, 0x80,
- Controller->FullModelName)) {
- DAC960_Error("IO port 0x%lx busy for Controller at\n",
- Controller, Controller->IO_Address);
- goto Failure;
- }
- DAC960_PD_DisableInterrupts(BaseAddress);
- DAC960_PD_AcknowledgeStatus(BaseAddress);
- udelay(1000);
- while (DAC960_PD_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_PD_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to allocate DMA mapped memory "
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_PD_EnableInterrupts(BaseAddress);
- Controller->QueueCommand = DAC960_PD_QueueCommand;
- Controller->ReadControllerConfiguration =
- DAC960_V1_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V1_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V1_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V1_QueueReadWriteCommand;
- break;
- case DAC960_P_Controller:
- if (!request_region(Controller->IO_Address, 0x80,
- Controller->FullModelName)){
- DAC960_Error("IO port 0x%lx busy for Controller at\n",
- Controller, Controller->IO_Address);
- goto Failure;
- }
- DAC960_PD_DisableInterrupts(BaseAddress);
- DAC960_PD_AcknowledgeStatus(BaseAddress);
- udelay(1000);
- while (DAC960_PD_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_PD_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to allocate DMA mapped memory"
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_PD_EnableInterrupts(BaseAddress);
- Controller->QueueCommand = DAC960_P_QueueCommand;
- Controller->ReadControllerConfiguration =
- DAC960_V1_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V1_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V1_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V1_QueueReadWriteCommand;
- break;
- }
- /*
- Acquire shared access to the IRQ Channel.
- */
- IRQ_Channel = PCI_Device->irq;
- if (request_irq(IRQ_Channel, InterruptHandler, IRQF_SHARED,
- Controller->FullModelName, Controller) < 0)
- {
- DAC960_Error("Unable to acquire IRQ Channel %d for Controller at\n",
- Controller, Controller->IRQ_Channel);
- goto Failure;
- }
- Controller->IRQ_Channel = IRQ_Channel;
- Controller->InitialCommand.CommandIdentifier = 1;
- Controller->InitialCommand.Controller = Controller;
- Controller->Commands[0] = &Controller->InitialCommand;
- Controller->FreeCommands = &Controller->InitialCommand;
- return Controller;
-
-Failure:
- if (Controller->IO_Address == 0)
- DAC960_Error("PCI Bus %d Device %d Function %d I/O Address N/A "
- "PCI Address 0x%X\n", Controller,
- Controller->Bus, Controller->Device,
- Controller->Function, Controller->PCI_Address);
- else
- DAC960_Error("PCI Bus %d Device %d Function %d I/O Address "
- "0x%X PCI Address 0x%X\n", Controller,
- Controller->Bus, Controller->Device,
- Controller->Function, Controller->IO_Address,
- Controller->PCI_Address);
- DAC960_DetectCleanup(Controller);
- DAC960_ControllerCount--;
- return NULL;
-}
-
-/*
- DAC960_InitializeController initializes Controller.
-*/
-
-static bool
-DAC960_InitializeController(DAC960_Controller_T *Controller)
-{
- if (DAC960_ReadControllerConfiguration(Controller) &&
- DAC960_ReportControllerConfiguration(Controller) &&
- DAC960_CreateAuxiliaryStructures(Controller) &&
- DAC960_ReadDeviceConfiguration(Controller) &&
- DAC960_ReportDeviceConfiguration(Controller) &&
- DAC960_RegisterBlockDevice(Controller))
- {
- /*
- Initialize the Monitoring Timer.
- */
- timer_setup(&Controller->MonitoringTimer,
- DAC960_MonitoringTimerFunction, 0);
- Controller->MonitoringTimer.expires =
- jiffies + DAC960_MonitoringTimerInterval;
- add_timer(&Controller->MonitoringTimer);
- Controller->ControllerInitialized = true;
- return true;
- }
- return false;
-}
-
-
-/*
- DAC960_FinalizeController finalizes Controller.
-*/
-
-static void DAC960_FinalizeController(DAC960_Controller_T *Controller)
-{
- if (Controller->ControllerInitialized)
- {
- unsigned long flags;
-
- /*
- * Acquiring and releasing lock here eliminates
- * a very low probability race.
- *
- * The code below allocates controller command structures
- * from the free list without holding the controller lock.
- * This is safe assuming there is no other activity on
- * the controller at the time.
- *
- * But, there might be a monitoring command still
- * in progress. Setting the Shutdown flag while holding
- * the lock ensures that there is no monitoring command
- * in the interrupt handler currently, and any monitoring
- * commands that complete from this time on will NOT return
- * their command structure to the free list.
- */
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- Controller->ShutdownMonitoringTimer = 1;
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
-
- del_timer_sync(&Controller->MonitoringTimer);
- if (Controller->FirmwareType == DAC960_V1_Controller)
- {
- DAC960_Notice("Flushing Cache...", Controller);
- DAC960_V1_ExecuteType3(Controller, DAC960_V1_Flush, 0);
- DAC960_Notice("done\n", Controller);
-
- if (Controller->HardwareType == DAC960_PD_Controller)
- release_region(Controller->IO_Address, 0x80);
- }
- else
- {
- DAC960_Notice("Flushing Cache...", Controller);
- DAC960_V2_DeviceOperation(Controller, DAC960_V2_PauseDevice,
- DAC960_V2_RAID_Controller);
- DAC960_Notice("done\n", Controller);
- }
- }
- DAC960_UnregisterBlockDevice(Controller);
- DAC960_DestroyAuxiliaryStructures(Controller);
- DAC960_DestroyProcEntries(Controller);
- DAC960_DetectCleanup(Controller);
-}
-
-
-/*
- DAC960_Probe verifies controller's existence and
- initializes the DAC960 Driver for that controller.
-*/
-
-static int
-DAC960_Probe(struct pci_dev *dev, const struct pci_device_id *entry)
-{
- int disk;
- DAC960_Controller_T *Controller;
-
- if (DAC960_ControllerCount == DAC960_MaxControllers)
- {
- DAC960_Error("More than %d DAC960 Controllers detected - "
- "ignoring from Controller at\n",
- NULL, DAC960_MaxControllers);
- return -ENODEV;
- }
-
- Controller = DAC960_DetectController(dev, entry);
- if (!Controller)
- return -ENODEV;
-
- if (!DAC960_InitializeController(Controller)) {
- DAC960_FinalizeController(Controller);
- return -ENODEV;
- }
-
- for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++) {
- set_capacity(Controller->disks[disk], disk_size(Controller, disk));
- add_disk(Controller->disks[disk]);
- }
- DAC960_CreateProcEntries(Controller);
- return 0;
-}
-
-
-/*
- DAC960_Finalize finalizes the DAC960 Driver.
-*/
-
-static void DAC960_Remove(struct pci_dev *PCI_Device)
-{
- int Controller_Number = (long)pci_get_drvdata(PCI_Device);
- DAC960_Controller_T *Controller = DAC960_Controllers[Controller_Number];
- if (Controller != NULL)
- DAC960_FinalizeController(Controller);
-}
-
-
-/*
- DAC960_V1_QueueReadWriteCommand prepares and queues a Read/Write Command for
- DAC960 V1 Firmware Controllers.
-*/
-
-static void DAC960_V1_QueueReadWriteCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_ScatterGatherSegment_T *ScatterGatherList =
- Command->V1.ScatterGatherList;
- struct scatterlist *ScatterList = Command->V1.ScatterList;
-
- DAC960_V1_ClearCommand(Command);
-
- if (Command->SegmentCount == 1)
- {
- if (Command->DmaDirection == PCI_DMA_FROMDEVICE)
- CommandMailbox->Type5.CommandOpcode = DAC960_V1_Read;
- else
- CommandMailbox->Type5.CommandOpcode = DAC960_V1_Write;
-
- CommandMailbox->Type5.LD.TransferLength = Command->BlockCount;
- CommandMailbox->Type5.LD.LogicalDriveNumber = Command->LogicalDriveNumber;
- CommandMailbox->Type5.LogicalBlockAddress = Command->BlockNumber;
- CommandMailbox->Type5.BusAddress =
- (DAC960_BusAddress32_T)sg_dma_address(ScatterList);
- }
- else
- {
- int i;
-
- if (Command->DmaDirection == PCI_DMA_FROMDEVICE)
- CommandMailbox->Type5.CommandOpcode = DAC960_V1_ReadWithScatterGather;
- else
- CommandMailbox->Type5.CommandOpcode = DAC960_V1_WriteWithScatterGather;
-
- CommandMailbox->Type5.LD.TransferLength = Command->BlockCount;
- CommandMailbox->Type5.LD.LogicalDriveNumber = Command->LogicalDriveNumber;
- CommandMailbox->Type5.LogicalBlockAddress = Command->BlockNumber;
- CommandMailbox->Type5.BusAddress = Command->V1.ScatterGatherListDMA;
-
- CommandMailbox->Type5.ScatterGatherCount = Command->SegmentCount;
-
- for (i = 0; i < Command->SegmentCount; i++, ScatterList++, ScatterGatherList++) {
- ScatterGatherList->SegmentDataPointer =
- (DAC960_BusAddress32_T)sg_dma_address(ScatterList);
- ScatterGatherList->SegmentByteCount =
- (DAC960_ByteCount32_T)sg_dma_len(ScatterList);
- }
- }
- DAC960_QueueCommand(Command);
-}
-
-
-/*
- DAC960_V2_QueueReadWriteCommand prepares and queues a Read/Write Command for
- DAC960 V2 Firmware Controllers.
-*/
-
-static void DAC960_V2_QueueReadWriteCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- struct scatterlist *ScatterList = Command->V2.ScatterList;
-
- DAC960_V2_ClearCommand(Command);
-
- CommandMailbox->SCSI_10.CommandOpcode = DAC960_V2_SCSI_10;
- CommandMailbox->SCSI_10.CommandControlBits.DataTransferControllerToHost =
- (Command->DmaDirection == PCI_DMA_FROMDEVICE);
- CommandMailbox->SCSI_10.DataTransferSize =
- Command->BlockCount << DAC960_BlockSizeBits;
- CommandMailbox->SCSI_10.RequestSenseBusAddress = Command->V2.RequestSenseDMA;
- CommandMailbox->SCSI_10.PhysicalDevice =
- Controller->V2.LogicalDriveToVirtualDevice[Command->LogicalDriveNumber];
- CommandMailbox->SCSI_10.RequestSenseSize = sizeof(DAC960_SCSI_RequestSense_T);
- CommandMailbox->SCSI_10.CDBLength = 10;
- CommandMailbox->SCSI_10.SCSI_CDB[0] =
- (Command->DmaDirection == PCI_DMA_FROMDEVICE ? 0x28 : 0x2A);
- CommandMailbox->SCSI_10.SCSI_CDB[2] = Command->BlockNumber >> 24;
- CommandMailbox->SCSI_10.SCSI_CDB[3] = Command->BlockNumber >> 16;
- CommandMailbox->SCSI_10.SCSI_CDB[4] = Command->BlockNumber >> 8;
- CommandMailbox->SCSI_10.SCSI_CDB[5] = Command->BlockNumber;
- CommandMailbox->SCSI_10.SCSI_CDB[7] = Command->BlockCount >> 8;
- CommandMailbox->SCSI_10.SCSI_CDB[8] = Command->BlockCount;
-
- if (Command->SegmentCount == 1)
- {
- CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- (DAC960_BusAddress64_T)sg_dma_address(ScatterList);
- CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->SCSI_10.DataTransferSize;
- }
- else
- {
- DAC960_V2_ScatterGatherSegment_T *ScatterGatherList;
- int i;
-
- if (Command->SegmentCount > 2)
- {
- ScatterGatherList = Command->V2.ScatterGatherList;
- CommandMailbox->SCSI_10.CommandControlBits
- .AdditionalScatterGatherListMemory = true;
- CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ExtendedScatterGather.ScatterGatherList0Length = Command->SegmentCount;
- CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ExtendedScatterGather.ScatterGatherList0Address =
- Command->V2.ScatterGatherListDMA;
- }
- else
- ScatterGatherList = CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ScatterGatherSegments;
-
- for (i = 0; i < Command->SegmentCount; i++, ScatterList++, ScatterGatherList++) {
- ScatterGatherList->SegmentDataPointer =
- (DAC960_BusAddress64_T)sg_dma_address(ScatterList);
- ScatterGatherList->SegmentByteCount =
- (DAC960_ByteCount64_T)sg_dma_len(ScatterList);
- }
- }
- DAC960_QueueCommand(Command);
-}
-
-
-static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_queue *req_q)
-{
- struct request *Request;
- DAC960_Command_T *Command;
-
- while(1) {
- Request = blk_peek_request(req_q);
- if (!Request)
- return 1;
-
- Command = DAC960_AllocateCommand(Controller);
- if (Command == NULL)
- return 0;
-
- if (rq_data_dir(Request) == READ) {
- Command->DmaDirection = PCI_DMA_FROMDEVICE;
- Command->CommandType = DAC960_ReadCommand;
- } else {
- Command->DmaDirection = PCI_DMA_TODEVICE;
- Command->CommandType = DAC960_WriteCommand;
- }
- Command->Completion = Request->end_io_data;
- Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
- Command->BlockNumber = blk_rq_pos(Request);
- Command->BlockCount = blk_rq_sectors(Request);
- Command->Request = Request;
- blk_start_request(Request);
- Command->SegmentCount = blk_rq_map_sg(req_q,
- Command->Request, Command->cmd_sglist);
- /* pci_map_sg MAY change the value of SegCount */
- Command->SegmentCount = pci_map_sg(Controller->PCIDevice, Command->cmd_sglist,
- Command->SegmentCount, Command->DmaDirection);
-
- DAC960_QueueReadWriteCommand(Command);
- }
-}
-
-/*
- DAC960_ProcessRequest attempts to remove one I/O Request from Controller's
- I/O Request Queue and queues it to the Controller. WaitForCommand is true if
- this function should wait for a Command to become available if necessary.
- This function returns true if an I/O Request was queued and false otherwise.
-*/
-static void DAC960_ProcessRequest(DAC960_Controller_T *controller)
-{
- int i;
-
- if (!controller->ControllerInitialized)
- return;
-
- /* Do this better later! */
- for (i = controller->req_q_index; i < DAC960_MaxLogicalDrives; i++) {
- struct request_queue *req_q = controller->RequestQueue[i];
-
- if (req_q == NULL)
- continue;
-
- if (!DAC960_process_queue(controller, req_q)) {
- controller->req_q_index = i;
- return;
- }
- }
-
- if (controller->req_q_index == 0)
- return;
-
- for (i = 0; i < controller->req_q_index; i++) {
- struct request_queue *req_q = controller->RequestQueue[i];
-
- if (req_q == NULL)
- continue;
-
- if (!DAC960_process_queue(controller, req_q)) {
- controller->req_q_index = i;
- return;
- }
- }
-}
-
-
-/*
- DAC960_queue_partial_rw extracts one bio from the request already
- associated with argument command, and construct a new command block to retry I/O
- only on that bio. Queue that command to the controller.
-
- This function re-uses a previously-allocated Command,
- there is no failure mode from trying to allocate a command.
-*/
-
-static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- struct request *Request = Command->Request;
- struct request_queue *req_q = Controller->RequestQueue[Command->LogicalDriveNumber];
-
- if (Command->DmaDirection == PCI_DMA_FROMDEVICE)
- Command->CommandType = DAC960_ReadRetryCommand;
- else
- Command->CommandType = DAC960_WriteRetryCommand;
-
- /*
- * We could be more efficient with these mapping requests
- * and map only the portions that we need. But since this
- * code should almost never be called, just go with a
- * simple coding.
- */
- (void)blk_rq_map_sg(req_q, Command->Request, Command->cmd_sglist);
-
- (void)pci_map_sg(Controller->PCIDevice, Command->cmd_sglist, 1, Command->DmaDirection);
- /*
- * Resubmitting the request sector at a time is really tedious.
- * But, this should almost never happen. So, we're willing to pay
- * this price so that in the end, as much of the transfer is completed
- * successfully as possible.
- */
- Command->SegmentCount = 1;
- Command->BlockNumber = blk_rq_pos(Request);
- Command->BlockCount = 1;
- DAC960_QueueReadWriteCommand(Command);
- return;
-}
-
-/*
- DAC960_RequestFunction is the I/O Request Function for DAC960 Controllers.
-*/
-
-static void DAC960_RequestFunction(struct request_queue *RequestQueue)
-{
- DAC960_ProcessRequest(RequestQueue->queuedata);
-}
-
-/*
- DAC960_ProcessCompletedBuffer performs completion processing for an
- individual Buffer.
-*/
-
-static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
- bool SuccessfulIO)
-{
- struct request *Request = Command->Request;
- blk_status_t Error = SuccessfulIO ? BLK_STS_OK : BLK_STS_IOERR;
-
- pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
- Command->SegmentCount, Command->DmaDirection);
-
- if (!__blk_end_request(Request, Error, Command->BlockCount << 9)) {
- if (Command->Completion) {
- complete(Command->Completion);
- Command->Completion = NULL;
- }
- return true;
- }
- return false;
-}
-
-/*
- DAC960_V1_ReadWriteError prints an appropriate error message for Command
- when an error occurs on a Read or Write operation.
-*/
-
-static void DAC960_V1_ReadWriteError(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- unsigned char *CommandName = "UNKNOWN";
- switch (Command->CommandType)
- {
- case DAC960_ReadCommand:
- case DAC960_ReadRetryCommand:
- CommandName = "READ";
- break;
- case DAC960_WriteCommand:
- case DAC960_WriteRetryCommand:
- CommandName = "WRITE";
- break;
- case DAC960_MonitoringCommand:
- case DAC960_ImmediateCommand:
- case DAC960_QueuedCommand:
- break;
- }
- switch (Command->V1.CommandStatus)
- {
- case DAC960_V1_IrrecoverableDataError:
- DAC960_Error("Irrecoverable Data Error on %s:\n",
- Controller, CommandName);
- break;
- case DAC960_V1_LogicalDriveNonexistentOrOffline:
- DAC960_Error("Logical Drive Nonexistent or Offline on %s:\n",
- Controller, CommandName);
- break;
- case DAC960_V1_AccessBeyondEndOfLogicalDrive:
- DAC960_Error("Attempt to Access Beyond End of Logical Drive "
- "on %s:\n", Controller, CommandName);
- break;
- case DAC960_V1_BadDataEncountered:
- DAC960_Error("Bad Data Encountered on %s:\n", Controller, CommandName);
- break;
- default:
- DAC960_Error("Unexpected Error Status %04X on %s:\n",
- Controller, Command->V1.CommandStatus, CommandName);
- break;
- }
- DAC960_Error(" /dev/rd/c%dd%d: absolute blocks %u..%u\n",
- Controller, Controller->ControllerNumber,
- Command->LogicalDriveNumber, Command->BlockNumber,
- Command->BlockNumber + Command->BlockCount - 1);
-}
-
-
-/*
- DAC960_V1_ProcessCompletedCommand performs completion processing for Command
- for DAC960 V1 Firmware Controllers.
-*/
-
-static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DAC960_CommandType_T CommandType = Command->CommandType;
- DAC960_V1_CommandOpcode_T CommandOpcode =
- Command->V1.CommandMailbox.Common.CommandOpcode;
- DAC960_V1_CommandStatus_T CommandStatus = Command->V1.CommandStatus;
-
- if (CommandType == DAC960_ReadCommand ||
- CommandType == DAC960_WriteCommand)
- {
-
-#ifdef FORCE_RETRY_DEBUG
- CommandStatus = DAC960_V1_IrrecoverableDataError;
-#endif
-
- if (CommandStatus == DAC960_V1_NormalCompletion) {
-
- if (!DAC960_ProcessCompletedRequest(Command, true))
- BUG();
-
- } else if (CommandStatus == DAC960_V1_IrrecoverableDataError ||
- CommandStatus == DAC960_V1_BadDataEncountered)
- {
- /*
- * break the command down into pieces and resubmit each
- * piece, hoping that some of them will succeed.
- */
- DAC960_queue_partial_rw(Command);
- return;
- }
- else
- {
- if (CommandStatus != DAC960_V1_LogicalDriveNonexistentOrOffline)
- DAC960_V1_ReadWriteError(Command);
-
- if (!DAC960_ProcessCompletedRequest(Command, false))
- BUG();
- }
- }
- else if (CommandType == DAC960_ReadRetryCommand ||
- CommandType == DAC960_WriteRetryCommand)
- {
- bool normal_completion;
-#ifdef FORCE_RETRY_FAILURE_DEBUG
- static int retry_count = 1;
-#endif
- /*
- Perform completion processing for the portion that was
- retried, and submit the next portion, if any.
- */
- normal_completion = true;
- if (CommandStatus != DAC960_V1_NormalCompletion) {
- normal_completion = false;
- if (CommandStatus != DAC960_V1_LogicalDriveNonexistentOrOffline)
- DAC960_V1_ReadWriteError(Command);
- }
-
-#ifdef FORCE_RETRY_FAILURE_DEBUG
- if (!(++retry_count % 10000)) {
- printk("V1 error retry failure test\n");
- normal_completion = false;
- DAC960_V1_ReadWriteError(Command);
- }
-#endif
-
- if (!DAC960_ProcessCompletedRequest(Command, normal_completion)) {
- DAC960_queue_partial_rw(Command);
- return;
- }
- }
-
- else if (CommandType == DAC960_MonitoringCommand)
- {
- if (Controller->ShutdownMonitoringTimer)
- return;
- if (CommandOpcode == DAC960_V1_Enquiry)
- {
- DAC960_V1_Enquiry_T *OldEnquiry = &Controller->V1.Enquiry;
- DAC960_V1_Enquiry_T *NewEnquiry = Controller->V1.NewEnquiry;
- unsigned int OldCriticalLogicalDriveCount =
- OldEnquiry->CriticalLogicalDriveCount;
- unsigned int NewCriticalLogicalDriveCount =
- NewEnquiry->CriticalLogicalDriveCount;
- if (NewEnquiry->NumberOfLogicalDrives > Controller->LogicalDriveCount)
- {
- int LogicalDriveNumber = Controller->LogicalDriveCount - 1;
- while (++LogicalDriveNumber < NewEnquiry->NumberOfLogicalDrives)
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "Now Exists\n", Controller,
- LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- Controller->LogicalDriveCount = NewEnquiry->NumberOfLogicalDrives;
- DAC960_ComputeGenericDiskInfo(Controller);
- }
- if (NewEnquiry->NumberOfLogicalDrives < Controller->LogicalDriveCount)
- {
- int LogicalDriveNumber = NewEnquiry->NumberOfLogicalDrives - 1;
- while (++LogicalDriveNumber < Controller->LogicalDriveCount)
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "No Longer Exists\n", Controller,
- LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- Controller->LogicalDriveCount = NewEnquiry->NumberOfLogicalDrives;
- DAC960_ComputeGenericDiskInfo(Controller);
- }
- if (NewEnquiry->StatusFlags.DeferredWriteError !=
- OldEnquiry->StatusFlags.DeferredWriteError)
- DAC960_Critical("Deferred Write Error Flag is now %s\n", Controller,
- (NewEnquiry->StatusFlags.DeferredWriteError
- ? "TRUE" : "FALSE"));
- if ((NewCriticalLogicalDriveCount > 0 ||
- NewCriticalLogicalDriveCount != OldCriticalLogicalDriveCount) ||
- (NewEnquiry->OfflineLogicalDriveCount > 0 ||
- NewEnquiry->OfflineLogicalDriveCount !=
- OldEnquiry->OfflineLogicalDriveCount) ||
- (NewEnquiry->DeadDriveCount > 0 ||
- NewEnquiry->DeadDriveCount !=
- OldEnquiry->DeadDriveCount) ||
- (NewEnquiry->EventLogSequenceNumber !=
- OldEnquiry->EventLogSequenceNumber) ||
- Controller->MonitoringTimerCount == 0 ||
- time_after_eq(jiffies, Controller->SecondaryMonitoringTime
- + DAC960_SecondaryMonitoringInterval))
- {
- Controller->V1.NeedLogicalDriveInformation = true;
- Controller->V1.NewEventLogSequenceNumber =
- NewEnquiry->EventLogSequenceNumber;
- Controller->V1.NeedErrorTableInformation = true;
- Controller->V1.NeedDeviceStateInformation = true;
- Controller->V1.StartDeviceStateScan = true;
- Controller->V1.NeedBackgroundInitializationStatus =
- Controller->V1.BackgroundInitializationStatusSupported;
- Controller->SecondaryMonitoringTime = jiffies;
- }
- if (NewEnquiry->RebuildFlag == DAC960_V1_StandbyRebuildInProgress ||
- NewEnquiry->RebuildFlag
- == DAC960_V1_BackgroundRebuildInProgress ||
- OldEnquiry->RebuildFlag == DAC960_V1_StandbyRebuildInProgress ||
- OldEnquiry->RebuildFlag == DAC960_V1_BackgroundRebuildInProgress)
- {
- Controller->V1.NeedRebuildProgress = true;
- Controller->V1.RebuildProgressFirst =
- (NewEnquiry->CriticalLogicalDriveCount <
- OldEnquiry->CriticalLogicalDriveCount);
- }
- if (OldEnquiry->RebuildFlag == DAC960_V1_BackgroundCheckInProgress)
- switch (NewEnquiry->RebuildFlag)
- {
- case DAC960_V1_NoStandbyRebuildOrCheckInProgress:
- DAC960_Progress("Consistency Check Completed Successfully\n",
- Controller);
- break;
- case DAC960_V1_StandbyRebuildInProgress:
- case DAC960_V1_BackgroundRebuildInProgress:
- break;
- case DAC960_V1_BackgroundCheckInProgress:
- Controller->V1.NeedConsistencyCheckProgress = true;
- break;
- case DAC960_V1_StandbyRebuildCompletedWithError:
- DAC960_Progress("Consistency Check Completed with Error\n",
- Controller);
- break;
- case DAC960_V1_BackgroundRebuildOrCheckFailed_DriveFailed:
- DAC960_Progress("Consistency Check Failed - "
- "Physical Device Failed\n", Controller);
- break;
- case DAC960_V1_BackgroundRebuildOrCheckFailed_LogicalDriveFailed:
- DAC960_Progress("Consistency Check Failed - "
- "Logical Drive Failed\n", Controller);
- break;
- case DAC960_V1_BackgroundRebuildOrCheckFailed_OtherCauses:
- DAC960_Progress("Consistency Check Failed - Other Causes\n",
- Controller);
- break;
- case DAC960_V1_BackgroundRebuildOrCheckSuccessfullyTerminated:
- DAC960_Progress("Consistency Check Successfully Terminated\n",
- Controller);
- break;
- }
- else if (NewEnquiry->RebuildFlag
- == DAC960_V1_BackgroundCheckInProgress)
- Controller->V1.NeedConsistencyCheckProgress = true;
- Controller->MonitoringAlertMode =
- (NewEnquiry->CriticalLogicalDriveCount > 0 ||
- NewEnquiry->OfflineLogicalDriveCount > 0 ||
- NewEnquiry->DeadDriveCount > 0);
- if (NewEnquiry->RebuildFlag > DAC960_V1_BackgroundCheckInProgress)
- {
- Controller->V1.PendingRebuildFlag = NewEnquiry->RebuildFlag;
- Controller->V1.RebuildFlagPending = true;
- }
- memcpy(&Controller->V1.Enquiry, &Controller->V1.NewEnquiry,
- sizeof(DAC960_V1_Enquiry_T));
- }
- else if (CommandOpcode == DAC960_V1_PerformEventLogOperation)
- {
- static char
- *DAC960_EventMessages[] =
- { "killed because write recovery failed",
- "killed because of SCSI bus reset failure",
- "killed because of double check condition",
- "killed because it was removed",
- "killed because of gross error on SCSI chip",
- "killed because of bad tag returned from drive",
- "killed because of timeout on SCSI command",
- "killed because of reset SCSI command issued from system",
- "killed because busy or parity error count exceeded limit",
- "killed because of 'kill drive' command from system",
- "killed because of selection timeout",
- "killed due to SCSI phase sequence error",
- "killed due to unknown status" };
- DAC960_V1_EventLogEntry_T *EventLogEntry =
- Controller->V1.EventLogEntry;
- if (EventLogEntry->SequenceNumber ==
- Controller->V1.OldEventLogSequenceNumber)
- {
- unsigned char SenseKey = EventLogEntry->SenseKey;
- unsigned char AdditionalSenseCode =
- EventLogEntry->AdditionalSenseCode;
- unsigned char AdditionalSenseCodeQualifier =
- EventLogEntry->AdditionalSenseCodeQualifier;
- if (SenseKey == DAC960_SenseKey_VendorSpecific &&
- AdditionalSenseCode == 0x80 &&
- AdditionalSenseCodeQualifier <
- ARRAY_SIZE(DAC960_EventMessages))
- DAC960_Critical("Physical Device %d:%d %s\n", Controller,
- EventLogEntry->Channel,
- EventLogEntry->TargetID,
- DAC960_EventMessages[
- AdditionalSenseCodeQualifier]);
- else if (SenseKey == DAC960_SenseKey_UnitAttention &&
- AdditionalSenseCode == 0x29)
- {
- if (Controller->MonitoringTimerCount > 0)
- Controller->V1.DeviceResetCount[EventLogEntry->Channel]
- [EventLogEntry->TargetID]++;
- }
- else if (!(SenseKey == DAC960_SenseKey_NoSense ||
- (SenseKey == DAC960_SenseKey_NotReady &&
- AdditionalSenseCode == 0x04 &&
- (AdditionalSenseCodeQualifier == 0x01 ||
- AdditionalSenseCodeQualifier == 0x02))))
- {
- DAC960_Critical("Physical Device %d:%d Error Log: "
- "Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
- Controller,
- EventLogEntry->Channel,
- EventLogEntry->TargetID,
- SenseKey,
- AdditionalSenseCode,
- AdditionalSenseCodeQualifier);
- DAC960_Critical("Physical Device %d:%d Error Log: "
- "Information = %02X%02X%02X%02X "
- "%02X%02X%02X%02X\n",
- Controller,
- EventLogEntry->Channel,
- EventLogEntry->TargetID,
- EventLogEntry->Information[0],
- EventLogEntry->Information[1],
- EventLogEntry->Information[2],
- EventLogEntry->Information[3],
- EventLogEntry->CommandSpecificInformation[0],
- EventLogEntry->CommandSpecificInformation[1],
- EventLogEntry->CommandSpecificInformation[2],
- EventLogEntry->CommandSpecificInformation[3]);
- }
- }
- Controller->V1.OldEventLogSequenceNumber++;
- }
- else if (CommandOpcode == DAC960_V1_GetErrorTable)
- {
- DAC960_V1_ErrorTable_T *OldErrorTable = &Controller->V1.ErrorTable;
- DAC960_V1_ErrorTable_T *NewErrorTable = Controller->V1.NewErrorTable;
- int Channel, TargetID;
- for (Channel = 0; Channel < Controller->Channels; Channel++)
- for (TargetID = 0; TargetID < Controller->Targets; TargetID++)
- {
- DAC960_V1_ErrorTableEntry_T *NewErrorEntry =
- &NewErrorTable->ErrorTableEntries[Channel][TargetID];
- DAC960_V1_ErrorTableEntry_T *OldErrorEntry =
- &OldErrorTable->ErrorTableEntries[Channel][TargetID];
- if ((NewErrorEntry->ParityErrorCount !=
- OldErrorEntry->ParityErrorCount) ||
- (NewErrorEntry->SoftErrorCount !=
- OldErrorEntry->SoftErrorCount) ||
- (NewErrorEntry->HardErrorCount !=
- OldErrorEntry->HardErrorCount) ||
- (NewErrorEntry->MiscErrorCount !=
- OldErrorEntry->MiscErrorCount))
- DAC960_Critical("Physical Device %d:%d Errors: "
- "Parity = %d, Soft = %d, "
- "Hard = %d, Misc = %d\n",
- Controller, Channel, TargetID,
- NewErrorEntry->ParityErrorCount,
- NewErrorEntry->SoftErrorCount,
- NewErrorEntry->HardErrorCount,
- NewErrorEntry->MiscErrorCount);
- }
- memcpy(&Controller->V1.ErrorTable, Controller->V1.NewErrorTable,
- sizeof(DAC960_V1_ErrorTable_T));
- }
- else if (CommandOpcode == DAC960_V1_GetDeviceState)
- {
- DAC960_V1_DeviceState_T *OldDeviceState =
- &Controller->V1.DeviceState[Controller->V1.DeviceStateChannel]
- [Controller->V1.DeviceStateTargetID];
- DAC960_V1_DeviceState_T *NewDeviceState =
- Controller->V1.NewDeviceState;
- if (NewDeviceState->DeviceState != OldDeviceState->DeviceState)
- DAC960_Critical("Physical Device %d:%d is now %s\n", Controller,
- Controller->V1.DeviceStateChannel,
- Controller->V1.DeviceStateTargetID,
- (NewDeviceState->DeviceState
- == DAC960_V1_Device_Dead
- ? "DEAD"
- : NewDeviceState->DeviceState
- == DAC960_V1_Device_WriteOnly
- ? "WRITE-ONLY"
- : NewDeviceState->DeviceState
- == DAC960_V1_Device_Online
- ? "ONLINE" : "STANDBY"));
- if (OldDeviceState->DeviceState == DAC960_V1_Device_Dead &&
- NewDeviceState->DeviceState != DAC960_V1_Device_Dead)
- {
- Controller->V1.NeedDeviceInquiryInformation = true;
- Controller->V1.NeedDeviceSerialNumberInformation = true;
- Controller->V1.DeviceResetCount
- [Controller->V1.DeviceStateChannel]
- [Controller->V1.DeviceStateTargetID] = 0;
- }
- memcpy(OldDeviceState, NewDeviceState,
- sizeof(DAC960_V1_DeviceState_T));
- }
- else if (CommandOpcode == DAC960_V1_GetLogicalDriveInformation)
- {
- int LogicalDriveNumber;
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < Controller->LogicalDriveCount;
- LogicalDriveNumber++)
- {
- DAC960_V1_LogicalDriveInformation_T *OldLogicalDriveInformation =
- &Controller->V1.LogicalDriveInformation[LogicalDriveNumber];
- DAC960_V1_LogicalDriveInformation_T *NewLogicalDriveInformation =
- &(*Controller->V1.NewLogicalDriveInformation)[LogicalDriveNumber];
- if (NewLogicalDriveInformation->LogicalDriveState !=
- OldLogicalDriveInformation->LogicalDriveState)
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "is now %s\n", Controller,
- LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (NewLogicalDriveInformation->LogicalDriveState
- == DAC960_V1_LogicalDrive_Online
- ? "ONLINE"
- : NewLogicalDriveInformation->LogicalDriveState
- == DAC960_V1_LogicalDrive_Critical
- ? "CRITICAL" : "OFFLINE"));
- if (NewLogicalDriveInformation->WriteBack !=
- OldLogicalDriveInformation->WriteBack)
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "is now %s\n", Controller,
- LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (NewLogicalDriveInformation->WriteBack
- ? "WRITE BACK" : "WRITE THRU"));
- }
- memcpy(&Controller->V1.LogicalDriveInformation,
- Controller->V1.NewLogicalDriveInformation,
- sizeof(DAC960_V1_LogicalDriveInformationArray_T));
- }
- else if (CommandOpcode == DAC960_V1_GetRebuildProgress)
- {
- unsigned int LogicalDriveNumber =
- Controller->V1.RebuildProgress->LogicalDriveNumber;
- unsigned int LogicalDriveSize =
- Controller->V1.RebuildProgress->LogicalDriveSize;
- unsigned int BlocksCompleted =
- LogicalDriveSize - Controller->V1.RebuildProgress->RemainingBlocks;
- if (CommandStatus == DAC960_V1_NoRebuildOrCheckInProgress &&
- Controller->V1.LastRebuildStatus == DAC960_V1_NormalCompletion)
- CommandStatus = DAC960_V1_RebuildSuccessful;
- switch (CommandStatus)
- {
- case DAC960_V1_NormalCompletion:
- Controller->EphemeralProgressMessage = true;
- DAC960_Progress("Rebuild in Progress: "
- "Logical Drive %d (/dev/rd/c%dd%d) "
- "%d%% completed\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (100 * (BlocksCompleted >> 7))
- / (LogicalDriveSize >> 7));
- Controller->EphemeralProgressMessage = false;
- break;
- case DAC960_V1_RebuildFailed_LogicalDriveFailure:
- DAC960_Progress("Rebuild Failed due to "
- "Logical Drive Failure\n", Controller);
- break;
- case DAC960_V1_RebuildFailed_BadBlocksOnOther:
- DAC960_Progress("Rebuild Failed due to "
- "Bad Blocks on Other Drives\n", Controller);
- break;
- case DAC960_V1_RebuildFailed_NewDriveFailed:
- DAC960_Progress("Rebuild Failed due to "
- "Failure of Drive Being Rebuilt\n", Controller);
- break;
- case DAC960_V1_NoRebuildOrCheckInProgress:
- break;
- case DAC960_V1_RebuildSuccessful:
- DAC960_Progress("Rebuild Completed Successfully\n", Controller);
- break;
- case DAC960_V1_RebuildSuccessfullyTerminated:
- DAC960_Progress("Rebuild Successfully Terminated\n", Controller);
- break;
- }
- Controller->V1.LastRebuildStatus = CommandStatus;
- if (CommandType != DAC960_MonitoringCommand &&
- Controller->V1.RebuildStatusPending)
- {
- Command->V1.CommandStatus = Controller->V1.PendingRebuildStatus;
- Controller->V1.RebuildStatusPending = false;
- }
- else if (CommandType == DAC960_MonitoringCommand &&
- CommandStatus != DAC960_V1_NormalCompletion &&
- CommandStatus != DAC960_V1_NoRebuildOrCheckInProgress)
- {
- Controller->V1.PendingRebuildStatus = CommandStatus;
- Controller->V1.RebuildStatusPending = true;
- }
- }
- else if (CommandOpcode == DAC960_V1_RebuildStat)
- {
- unsigned int LogicalDriveNumber =
- Controller->V1.RebuildProgress->LogicalDriveNumber;
- unsigned int LogicalDriveSize =
- Controller->V1.RebuildProgress->LogicalDriveSize;
- unsigned int BlocksCompleted =
- LogicalDriveSize - Controller->V1.RebuildProgress->RemainingBlocks;
- if (CommandStatus == DAC960_V1_NormalCompletion)
- {
- Controller->EphemeralProgressMessage = true;
- DAC960_Progress("Consistency Check in Progress: "
- "Logical Drive %d (/dev/rd/c%dd%d) "
- "%d%% completed\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (100 * (BlocksCompleted >> 7))
- / (LogicalDriveSize >> 7));
- Controller->EphemeralProgressMessage = false;
- }
- }
- else if (CommandOpcode == DAC960_V1_BackgroundInitializationControl)
- {
- unsigned int LogicalDriveNumber =
- Controller->V1.BackgroundInitializationStatus->LogicalDriveNumber;
- unsigned int LogicalDriveSize =
- Controller->V1.BackgroundInitializationStatus->LogicalDriveSize;
- unsigned int BlocksCompleted =
- Controller->V1.BackgroundInitializationStatus->BlocksCompleted;
- switch (CommandStatus)
- {
- case DAC960_V1_NormalCompletion:
- switch (Controller->V1.BackgroundInitializationStatus->Status)
- {
- case DAC960_V1_BackgroundInitializationInvalid:
- break;
- case DAC960_V1_BackgroundInitializationStarted:
- DAC960_Progress("Background Initialization Started\n",
- Controller);
- break;
- case DAC960_V1_BackgroundInitializationInProgress:
- if (BlocksCompleted ==
- Controller->V1.LastBackgroundInitializationStatus.
- BlocksCompleted &&
- LogicalDriveNumber ==
- Controller->V1.LastBackgroundInitializationStatus.
- LogicalDriveNumber)
- break;
- Controller->EphemeralProgressMessage = true;
- DAC960_Progress("Background Initialization in Progress: "
- "Logical Drive %d (/dev/rd/c%dd%d) "
- "%d%% completed\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (100 * (BlocksCompleted >> 7))
- / (LogicalDriveSize >> 7));
- Controller->EphemeralProgressMessage = false;
- break;
- case DAC960_V1_BackgroundInitializationSuspended:
- DAC960_Progress("Background Initialization Suspended\n",
- Controller);
- break;
- case DAC960_V1_BackgroundInitializationCancelled:
- DAC960_Progress("Background Initialization Cancelled\n",
- Controller);
- break;
- }
- memcpy(&Controller->V1.LastBackgroundInitializationStatus,
- Controller->V1.BackgroundInitializationStatus,
- sizeof(DAC960_V1_BackgroundInitializationStatus_T));
- break;
- case DAC960_V1_BackgroundInitSuccessful:
- if (Controller->V1.BackgroundInitializationStatus->Status ==
- DAC960_V1_BackgroundInitializationInProgress)
- DAC960_Progress("Background Initialization "
- "Completed Successfully\n", Controller);
- Controller->V1.BackgroundInitializationStatus->Status =
- DAC960_V1_BackgroundInitializationInvalid;
- break;
- case DAC960_V1_BackgroundInitAborted:
- if (Controller->V1.BackgroundInitializationStatus->Status ==
- DAC960_V1_BackgroundInitializationInProgress)
- DAC960_Progress("Background Initialization Aborted\n",
- Controller);
- Controller->V1.BackgroundInitializationStatus->Status =
- DAC960_V1_BackgroundInitializationInvalid;
- break;
- case DAC960_V1_NoBackgroundInitInProgress:
- break;
- }
- }
- else if (CommandOpcode == DAC960_V1_DCDB)
- {
- /*
- This is a bit ugly.
-
- The InquiryStandardData and
- the InquiryUntitSerialNumber information
- retrieval operations BOTH use the DAC960_V1_DCDB
- commands. the test above can't distinguish between
- these two cases.
-
- Instead, we rely on the order of code later in this
- function to ensure that DeviceInquiryInformation commands
- are submitted before DeviceSerialNumber commands.
- */
- if (Controller->V1.NeedDeviceInquiryInformation)
- {
- DAC960_SCSI_Inquiry_T *InquiryStandardData =
- &Controller->V1.InquiryStandardData
- [Controller->V1.DeviceStateChannel]
- [Controller->V1.DeviceStateTargetID];
- if (CommandStatus != DAC960_V1_NormalCompletion)
- {
- memset(InquiryStandardData, 0,
- sizeof(DAC960_SCSI_Inquiry_T));
- InquiryStandardData->PeripheralDeviceType = 0x1F;
- }
- else
- memcpy(InquiryStandardData,
- Controller->V1.NewInquiryStandardData,
- sizeof(DAC960_SCSI_Inquiry_T));
- Controller->V1.NeedDeviceInquiryInformation = false;
- }
- else if (Controller->V1.NeedDeviceSerialNumberInformation)
- {
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- &Controller->V1.InquiryUnitSerialNumber
- [Controller->V1.DeviceStateChannel]
- [Controller->V1.DeviceStateTargetID];
- if (CommandStatus != DAC960_V1_NormalCompletion)
- {
- memset(InquiryUnitSerialNumber, 0,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
- }
- else
- memcpy(InquiryUnitSerialNumber,
- Controller->V1.NewInquiryUnitSerialNumber,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- Controller->V1.NeedDeviceSerialNumberInformation = false;
- }
- }
- /*
- Begin submitting new monitoring commands.
- */
- if (Controller->V1.NewEventLogSequenceNumber
- - Controller->V1.OldEventLogSequenceNumber > 0)
- {
- Command->V1.CommandMailbox.Type3E.CommandOpcode =
- DAC960_V1_PerformEventLogOperation;
- Command->V1.CommandMailbox.Type3E.OperationType =
- DAC960_V1_GetEventLogEntry;
- Command->V1.CommandMailbox.Type3E.OperationQualifier = 1;
- Command->V1.CommandMailbox.Type3E.SequenceNumber =
- Controller->V1.OldEventLogSequenceNumber;
- Command->V1.CommandMailbox.Type3E.BusAddress =
- Controller->V1.EventLogEntryDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedErrorTableInformation)
- {
- Controller->V1.NeedErrorTableInformation = false;
- Command->V1.CommandMailbox.Type3.CommandOpcode =
- DAC960_V1_GetErrorTable;
- Command->V1.CommandMailbox.Type3.BusAddress =
- Controller->V1.NewErrorTableDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedRebuildProgress &&
- Controller->V1.RebuildProgressFirst)
- {
- Controller->V1.NeedRebuildProgress = false;
- Command->V1.CommandMailbox.Type3.CommandOpcode =
- DAC960_V1_GetRebuildProgress;
- Command->V1.CommandMailbox.Type3.BusAddress =
- Controller->V1.RebuildProgressDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedDeviceStateInformation)
- {
- if (Controller->V1.NeedDeviceInquiryInformation)
- {
- DAC960_V1_DCDB_T *DCDB = Controller->V1.MonitoringDCDB;
- dma_addr_t DCDB_DMA = Controller->V1.MonitoringDCDB_DMA;
-
- dma_addr_t NewInquiryStandardDataDMA =
- Controller->V1.NewInquiryStandardDataDMA;
-
- Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB;
- Command->V1.CommandMailbox.Type3.BusAddress = DCDB_DMA;
- DCDB->Channel = Controller->V1.DeviceStateChannel;
- DCDB->TargetID = Controller->V1.DeviceStateTargetID;
- DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
- DCDB->EarlyStatus = false;
- DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
- DCDB->NoAutomaticRequestSense = false;
- DCDB->DisconnectPermitted = true;
- DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_T);
- DCDB->BusAddress = NewInquiryStandardDataDMA;
- DCDB->CDBLength = 6;
- DCDB->TransferLengthHigh4 = 0;
- DCDB->SenseLength = sizeof(DCDB->SenseData);
- DCDB->CDB[0] = 0x12; /* INQUIRY */
- DCDB->CDB[1] = 0; /* EVPD = 0 */
- DCDB->CDB[2] = 0; /* Page Code */
- DCDB->CDB[3] = 0; /* Reserved */
- DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_T);
- DCDB->CDB[5] = 0; /* Control */
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedDeviceSerialNumberInformation)
- {
- DAC960_V1_DCDB_T *DCDB = Controller->V1.MonitoringDCDB;
- dma_addr_t DCDB_DMA = Controller->V1.MonitoringDCDB_DMA;
- dma_addr_t NewInquiryUnitSerialNumberDMA =
- Controller->V1.NewInquiryUnitSerialNumberDMA;
-
- Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB;
- Command->V1.CommandMailbox.Type3.BusAddress = DCDB_DMA;
- DCDB->Channel = Controller->V1.DeviceStateChannel;
- DCDB->TargetID = Controller->V1.DeviceStateTargetID;
- DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
- DCDB->EarlyStatus = false;
- DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
- DCDB->NoAutomaticRequestSense = false;
- DCDB->DisconnectPermitted = true;
- DCDB->TransferLength =
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
- DCDB->BusAddress = NewInquiryUnitSerialNumberDMA;
- DCDB->CDBLength = 6;
- DCDB->TransferLengthHigh4 = 0;
- DCDB->SenseLength = sizeof(DCDB->SenseData);
- DCDB->CDB[0] = 0x12; /* INQUIRY */
- DCDB->CDB[1] = 1; /* EVPD = 1 */
- DCDB->CDB[2] = 0x80; /* Page Code */
- DCDB->CDB[3] = 0; /* Reserved */
- DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
- DCDB->CDB[5] = 0; /* Control */
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.StartDeviceStateScan)
- {
- Controller->V1.DeviceStateChannel = 0;
- Controller->V1.DeviceStateTargetID = 0;
- Controller->V1.StartDeviceStateScan = false;
- }
- else if (++Controller->V1.DeviceStateTargetID == Controller->Targets)
- {
- Controller->V1.DeviceStateChannel++;
- Controller->V1.DeviceStateTargetID = 0;
- }
- if (Controller->V1.DeviceStateChannel < Controller->Channels)
- {
- Controller->V1.NewDeviceState->DeviceState =
- DAC960_V1_Device_Dead;
- Command->V1.CommandMailbox.Type3D.CommandOpcode =
- DAC960_V1_GetDeviceState;
- Command->V1.CommandMailbox.Type3D.Channel =
- Controller->V1.DeviceStateChannel;
- Command->V1.CommandMailbox.Type3D.TargetID =
- Controller->V1.DeviceStateTargetID;
- Command->V1.CommandMailbox.Type3D.BusAddress =
- Controller->V1.NewDeviceStateDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- Controller->V1.NeedDeviceStateInformation = false;
- }
- if (Controller->V1.NeedLogicalDriveInformation)
- {
- Controller->V1.NeedLogicalDriveInformation = false;
- Command->V1.CommandMailbox.Type3.CommandOpcode =
- DAC960_V1_GetLogicalDriveInformation;
- Command->V1.CommandMailbox.Type3.BusAddress =
- Controller->V1.NewLogicalDriveInformationDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedRebuildProgress)
- {
- Controller->V1.NeedRebuildProgress = false;
- Command->V1.CommandMailbox.Type3.CommandOpcode =
- DAC960_V1_GetRebuildProgress;
- Command->V1.CommandMailbox.Type3.BusAddress =
- Controller->V1.RebuildProgressDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedConsistencyCheckProgress)
- {
- Controller->V1.NeedConsistencyCheckProgress = false;
- Command->V1.CommandMailbox.Type3.CommandOpcode =
- DAC960_V1_RebuildStat;
- Command->V1.CommandMailbox.Type3.BusAddress =
- Controller->V1.RebuildProgressDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedBackgroundInitializationStatus)
- {
- Controller->V1.NeedBackgroundInitializationStatus = false;
- Command->V1.CommandMailbox.Type3B.CommandOpcode =
- DAC960_V1_BackgroundInitializationControl;
- Command->V1.CommandMailbox.Type3B.CommandOpcode2 = 0x20;
- Command->V1.CommandMailbox.Type3B.BusAddress =
- Controller->V1.BackgroundInitializationStatusDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- Controller->MonitoringTimerCount++;
- Controller->MonitoringTimer.expires =
- jiffies + DAC960_MonitoringTimerInterval;
- add_timer(&Controller->MonitoringTimer);
- }
- if (CommandType == DAC960_ImmediateCommand)
- {
- complete(Command->Completion);
- Command->Completion = NULL;
- return;
- }
- if (CommandType == DAC960_QueuedCommand)
- {
- DAC960_V1_KernelCommand_T *KernelCommand = Command->V1.KernelCommand;
- KernelCommand->CommandStatus = Command->V1.CommandStatus;
- Command->V1.KernelCommand = NULL;
- if (CommandOpcode == DAC960_V1_DCDB)
- Controller->V1.DirectCommandActive[KernelCommand->DCDB->Channel]
- [KernelCommand->DCDB->TargetID] =
- false;
- DAC960_DeallocateCommand(Command);
- KernelCommand->CompletionFunction(KernelCommand);
- return;
- }
- /*
- Queue a Status Monitoring Command to the Controller using the just
- completed Command if one was deferred previously due to lack of a
- free Command when the Monitoring Timer Function was called.
- */
- if (Controller->MonitoringCommandDeferred)
- {
- Controller->MonitoringCommandDeferred = false;
- DAC960_V1_QueueMonitoringCommand(Command);
- return;
- }
- /*
- Deallocate the Command.
- */
- DAC960_DeallocateCommand(Command);
- /*
- Wake up any processes waiting on a free Command.
- */
- wake_up(&Controller->CommandWaitQueue);
-}
-
-
-/*
- DAC960_V2_ReadWriteError prints an appropriate error message for Command
- when an error occurs on a Read or Write operation.
-*/
-
-static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- static const unsigned char *SenseErrors[] = {
- "NO SENSE", "RECOVERED ERROR",
- "NOT READY", "MEDIUM ERROR",
- "HARDWARE ERROR", "ILLEGAL REQUEST",
- "UNIT ATTENTION", "DATA PROTECT",
- "BLANK CHECK", "VENDOR-SPECIFIC",
- "COPY ABORTED", "ABORTED COMMAND",
- "EQUAL", "VOLUME OVERFLOW",
- "MISCOMPARE", "RESERVED"
- };
- unsigned char *CommandName = "UNKNOWN";
- switch (Command->CommandType)
- {
- case DAC960_ReadCommand:
- case DAC960_ReadRetryCommand:
- CommandName = "READ";
- break;
- case DAC960_WriteCommand:
- case DAC960_WriteRetryCommand:
- CommandName = "WRITE";
- break;
- case DAC960_MonitoringCommand:
- case DAC960_ImmediateCommand:
- case DAC960_QueuedCommand:
- break;
- }
- DAC960_Error("Error Condition %s on %s:\n", Controller,
- SenseErrors[Command->V2.RequestSense->SenseKey], CommandName);
- DAC960_Error(" /dev/rd/c%dd%d: absolute blocks %u..%u\n",
- Controller, Controller->ControllerNumber,
- Command->LogicalDriveNumber, Command->BlockNumber,
- Command->BlockNumber + Command->BlockCount - 1);
-}
-
-
-/*
- DAC960_V2_ReportEvent prints an appropriate message when a Controller Event
- occurs.
-*/
-
-static void DAC960_V2_ReportEvent(DAC960_Controller_T *Controller,
- DAC960_V2_Event_T *Event)
-{
- DAC960_SCSI_RequestSense_T *RequestSense =
- (DAC960_SCSI_RequestSense_T *) &Event->RequestSenseData;
- unsigned char MessageBuffer[DAC960_LineBufferSize];
- static struct { int EventCode; unsigned char *EventMessage; } EventList[] =
- { /* Physical Device Events (0x0000 - 0x007F) */
- { 0x0001, "P Online" },
- { 0x0002, "P Standby" },
- { 0x0005, "P Automatic Rebuild Started" },
- { 0x0006, "P Manual Rebuild Started" },
- { 0x0007, "P Rebuild Completed" },
- { 0x0008, "P Rebuild Cancelled" },
- { 0x0009, "P Rebuild Failed for Unknown Reasons" },
- { 0x000A, "P Rebuild Failed due to New Physical Device" },
- { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
- { 0x000C, "S Offline" },
- { 0x000D, "P Found" },
- { 0x000E, "P Removed" },
- { 0x000F, "P Unconfigured" },
- { 0x0010, "P Expand Capacity Started" },
- { 0x0011, "P Expand Capacity Completed" },
- { 0x0012, "P Expand Capacity Failed" },
- { 0x0013, "P Command Timed Out" },
- { 0x0014, "P Command Aborted" },
- { 0x0015, "P Command Retried" },
- { 0x0016, "P Parity Error" },
- { 0x0017, "P Soft Error" },
- { 0x0018, "P Miscellaneous Error" },
- { 0x0019, "P Reset" },
- { 0x001A, "P Active Spare Found" },
- { 0x001B, "P Warm Spare Found" },
- { 0x001C, "S Sense Data Received" },
- { 0x001D, "P Initialization Started" },
- { 0x001E, "P Initialization Completed" },
- { 0x001F, "P Initialization Failed" },
- { 0x0020, "P Initialization Cancelled" },
- { 0x0021, "P Failed because Write Recovery Failed" },
- { 0x0022, "P Failed because SCSI Bus Reset Failed" },
- { 0x0023, "P Failed because of Double Check Condition" },
- { 0x0024, "P Failed because Device Cannot Be Accessed" },
- { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
- { 0x0026, "P Failed because of Bad Tag from Device" },
- { 0x0027, "P Failed because of Command Timeout" },
- { 0x0028, "P Failed because of System Reset" },
- { 0x0029, "P Failed because of Busy Status or Parity Error" },
- { 0x002A, "P Failed because Host Set Device to Failed State" },
- { 0x002B, "P Failed because of Selection Timeout" },
- { 0x002C, "P Failed because of SCSI Bus Phase Error" },
- { 0x002D, "P Failed because Device Returned Unknown Status" },
- { 0x002E, "P Failed because Device Not Ready" },
- { 0x002F, "P Failed because Device Not Found at Startup" },
- { 0x0030, "P Failed because COD Write Operation Failed" },
- { 0x0031, "P Failed because BDT Write Operation Failed" },
- { 0x0039, "P Missing at Startup" },
- { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
- { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
- { 0x003D, "P Standby Rebuild Started" },
- /* Logical Device Events (0x0080 - 0x00FF) */
- { 0x0080, "M Consistency Check Started" },
- { 0x0081, "M Consistency Check Completed" },
- { 0x0082, "M Consistency Check Cancelled" },
- { 0x0083, "M Consistency Check Completed With Errors" },
- { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
- { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
- { 0x0086, "L Offline" },
- { 0x0087, "L Critical" },
- { 0x0088, "L Online" },
- { 0x0089, "M Automatic Rebuild Started" },
- { 0x008A, "M Manual Rebuild Started" },
- { 0x008B, "M Rebuild Completed" },
- { 0x008C, "M Rebuild Cancelled" },
- { 0x008D, "M Rebuild Failed for Unknown Reasons" },
- { 0x008E, "M Rebuild Failed due to New Physical Device" },
- { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
- { 0x0090, "M Initialization Started" },
- { 0x0091, "M Initialization Completed" },
- { 0x0092, "M Initialization Cancelled" },
- { 0x0093, "M Initialization Failed" },
- { 0x0094, "L Found" },
- { 0x0095, "L Deleted" },
- { 0x0096, "M Expand Capacity Started" },
- { 0x0097, "M Expand Capacity Completed" },
- { 0x0098, "M Expand Capacity Failed" },
- { 0x0099, "L Bad Block Found" },
- { 0x009A, "L Size Changed" },
- { 0x009B, "L Type Changed" },
- { 0x009C, "L Bad Data Block Found" },
- { 0x009E, "L Read of Data Block in BDT" },
- { 0x009F, "L Write Back Data for Disk Block Lost" },
- { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
- { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
- { 0x00A2, "L Standby Rebuild Started" },
- /* Fault Management Events (0x0100 - 0x017F) */
- { 0x0140, "E Fan %d Failed" },
- { 0x0141, "E Fan %d OK" },
- { 0x0142, "E Fan %d Not Present" },
- { 0x0143, "E Power Supply %d Failed" },
- { 0x0144, "E Power Supply %d OK" },
- { 0x0145, "E Power Supply %d Not Present" },
- { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
- { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
- { 0x0148, "E Temperature Sensor %d Temperature Normal" },
- { 0x0149, "E Temperature Sensor %d Not Present" },
- { 0x014A, "E Enclosure Management Unit %d Access Critical" },
- { 0x014B, "E Enclosure Management Unit %d Access OK" },
- { 0x014C, "E Enclosure Management Unit %d Access Offline" },
- /* Controller Events (0x0180 - 0x01FF) */
- { 0x0181, "C Cache Write Back Error" },
- { 0x0188, "C Battery Backup Unit Found" },
- { 0x0189, "C Battery Backup Unit Charge Level Low" },
- { 0x018A, "C Battery Backup Unit Charge Level OK" },
- { 0x0193, "C Installation Aborted" },
- { 0x0195, "C Battery Backup Unit Physically Removed" },
- { 0x0196, "C Memory Error During Warm Boot" },
- { 0x019E, "C Memory Soft ECC Error Corrected" },
- { 0x019F, "C Memory Hard ECC Error Corrected" },
- { 0x01A2, "C Battery Backup Unit Failed" },
- { 0x01AB, "C Mirror Race Recovery Failed" },
- { 0x01AC, "C Mirror Race on Critical Drive" },
- /* Controller Internal Processor Events */
- { 0x0380, "C Internal Controller Hung" },
- { 0x0381, "C Internal Controller Firmware Breakpoint" },
- { 0x0390, "C Internal Controller i960 Processor Specific Error" },
- { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
- { 0, "" } };
- int EventListIndex = 0, EventCode;
- unsigned char EventType, *EventMessage;
- if (Event->EventCode == 0x1C &&
- RequestSense->SenseKey == DAC960_SenseKey_VendorSpecific &&
- (RequestSense->AdditionalSenseCode == 0x80 ||
- RequestSense->AdditionalSenseCode == 0x81))
- Event->EventCode = ((RequestSense->AdditionalSenseCode - 0x80) << 8) |
- RequestSense->AdditionalSenseCodeQualifier;
- while (true)
- {
- EventCode = EventList[EventListIndex].EventCode;
- if (EventCode == Event->EventCode || EventCode == 0) break;
- EventListIndex++;
- }
- EventType = EventList[EventListIndex].EventMessage[0];
- EventMessage = &EventList[EventListIndex].EventMessage[2];
- if (EventCode == 0)
- {
- DAC960_Critical("Unknown Controller Event Code %04X\n",
- Controller, Event->EventCode);
- return;
- }
- switch (EventType)
- {
- case 'P':
- DAC960_Critical("Physical Device %d:%d %s\n", Controller,
- Event->Channel, Event->TargetID, EventMessage);
- break;
- case 'L':
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) %s\n", Controller,
- Event->LogicalUnit, Controller->ControllerNumber,
- Event->LogicalUnit, EventMessage);
- break;
- case 'M':
- DAC960_Progress("Logical Drive %d (/dev/rd/c%dd%d) %s\n", Controller,
- Event->LogicalUnit, Controller->ControllerNumber,
- Event->LogicalUnit, EventMessage);
- break;
- case 'S':
- if (RequestSense->SenseKey == DAC960_SenseKey_NoSense ||
- (RequestSense->SenseKey == DAC960_SenseKey_NotReady &&
- RequestSense->AdditionalSenseCode == 0x04 &&
- (RequestSense->AdditionalSenseCodeQualifier == 0x01 ||
- RequestSense->AdditionalSenseCodeQualifier == 0x02)))
- break;
- DAC960_Critical("Physical Device %d:%d %s\n", Controller,
- Event->Channel, Event->TargetID, EventMessage);
- DAC960_Critical("Physical Device %d:%d Request Sense: "
- "Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
- Controller,
- Event->Channel,
- Event->TargetID,
- RequestSense->SenseKey,
- RequestSense->AdditionalSenseCode,
- RequestSense->AdditionalSenseCodeQualifier);
- DAC960_Critical("Physical Device %d:%d Request Sense: "
- "Information = %02X%02X%02X%02X "
- "%02X%02X%02X%02X\n",
- Controller,
- Event->Channel,
- Event->TargetID,
- RequestSense->Information[0],
- RequestSense->Information[1],
- RequestSense->Information[2],
- RequestSense->Information[3],
- RequestSense->CommandSpecificInformation[0],
- RequestSense->CommandSpecificInformation[1],
- RequestSense->CommandSpecificInformation[2],
- RequestSense->CommandSpecificInformation[3]);
- break;
- case 'E':
- if (Controller->SuppressEnclosureMessages) break;
- sprintf(MessageBuffer, EventMessage, Event->LogicalUnit);
- DAC960_Critical("Enclosure %d %s\n", Controller,
- Event->TargetID, MessageBuffer);
- break;
- case 'C':
- DAC960_Critical("Controller %s\n", Controller, EventMessage);
- break;
- default:
- DAC960_Critical("Unknown Controller Event Code %04X\n",
- Controller, Event->EventCode);
- break;
- }
-}
-
-
-/*
- DAC960_V2_ReportProgress prints an appropriate progress message for
- Logical Device Long Operations.
-*/
-
-static void DAC960_V2_ReportProgress(DAC960_Controller_T *Controller,
- unsigned char *MessageString,
- unsigned int LogicalDeviceNumber,
- unsigned long BlocksCompleted,
- unsigned long LogicalDeviceSize)
-{
- Controller->EphemeralProgressMessage = true;
- DAC960_Progress("%s in Progress: Logical Drive %d (/dev/rd/c%dd%d) "
- "%d%% completed\n", Controller,
- MessageString,
- LogicalDeviceNumber,
- Controller->ControllerNumber,
- LogicalDeviceNumber,
- (100 * (BlocksCompleted >> 7)) / (LogicalDeviceSize >> 7));
- Controller->EphemeralProgressMessage = false;
-}
-
-
-/*
- DAC960_V2_ProcessCompletedCommand performs completion processing for Command
- for DAC960 V2 Firmware Controllers.
-*/
-
-static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DAC960_CommandType_T CommandType = Command->CommandType;
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_IOCTL_Opcode_T IOCTLOpcode = CommandMailbox->Common.IOCTL_Opcode;
- DAC960_V2_CommandOpcode_T CommandOpcode = CommandMailbox->SCSI_10.CommandOpcode;
- DAC960_V2_CommandStatus_T CommandStatus = Command->V2.CommandStatus;
-
- if (CommandType == DAC960_ReadCommand ||
- CommandType == DAC960_WriteCommand)
- {
-
-#ifdef FORCE_RETRY_DEBUG
- CommandStatus = DAC960_V2_AbormalCompletion;
-#endif
- Command->V2.RequestSense->SenseKey = DAC960_SenseKey_MediumError;
-
- if (CommandStatus == DAC960_V2_NormalCompletion) {
-
- if (!DAC960_ProcessCompletedRequest(Command, true))
- BUG();
-
- } else if (Command->V2.RequestSense->SenseKey == DAC960_SenseKey_MediumError)
- {
- /*
- * break the command down into pieces and resubmit each
- * piece, hoping that some of them will succeed.
- */
- DAC960_queue_partial_rw(Command);
- return;
- }
- else
- {
- if (Command->V2.RequestSense->SenseKey != DAC960_SenseKey_NotReady)
- DAC960_V2_ReadWriteError(Command);
- /*
- Perform completion processing for all buffers in this I/O Request.
- */
- (void)DAC960_ProcessCompletedRequest(Command, false);
- }
- }
- else if (CommandType == DAC960_ReadRetryCommand ||
- CommandType == DAC960_WriteRetryCommand)
- {
- bool normal_completion;
-
-#ifdef FORCE_RETRY_FAILURE_DEBUG
- static int retry_count = 1;
-#endif
- /*
- Perform completion processing for the portion that was
- retried, and submit the next portion, if any.
- */
- normal_completion = true;
- if (CommandStatus != DAC960_V2_NormalCompletion) {
- normal_completion = false;
- if (Command->V2.RequestSense->SenseKey != DAC960_SenseKey_NotReady)
- DAC960_V2_ReadWriteError(Command);
- }
-
-#ifdef FORCE_RETRY_FAILURE_DEBUG
- if (!(++retry_count % 10000)) {
- printk("V2 error retry failure test\n");
- normal_completion = false;
- DAC960_V2_ReadWriteError(Command);
- }
-#endif
-
- if (!DAC960_ProcessCompletedRequest(Command, normal_completion)) {
- DAC960_queue_partial_rw(Command);
- return;
- }
- }
- else if (CommandType == DAC960_MonitoringCommand)
- {
- if (Controller->ShutdownMonitoringTimer)
- return;
- if (IOCTLOpcode == DAC960_V2_GetControllerInfo)
- {
- DAC960_V2_ControllerInfo_T *NewControllerInfo =
- Controller->V2.NewControllerInformation;
- DAC960_V2_ControllerInfo_T *ControllerInfo =
- &Controller->V2.ControllerInformation;
- Controller->LogicalDriveCount =
- NewControllerInfo->LogicalDevicesPresent;
- Controller->V2.NeedLogicalDeviceInformation = true;
- Controller->V2.NeedPhysicalDeviceInformation = true;
- Controller->V2.StartLogicalDeviceInformationScan = true;
- Controller->V2.StartPhysicalDeviceInformationScan = true;
- Controller->MonitoringAlertMode =
- (NewControllerInfo->LogicalDevicesCritical > 0 ||
- NewControllerInfo->LogicalDevicesOffline > 0 ||
- NewControllerInfo->PhysicalDisksCritical > 0 ||
- NewControllerInfo->PhysicalDisksOffline > 0);
- memcpy(ControllerInfo, NewControllerInfo,
- sizeof(DAC960_V2_ControllerInfo_T));
- }
- else if (IOCTLOpcode == DAC960_V2_GetEvent)
- {
- if (CommandStatus == DAC960_V2_NormalCompletion) {
- DAC960_V2_ReportEvent(Controller, Controller->V2.Event);
- }
- Controller->V2.NextEventSequenceNumber++;
- }
- else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid &&
- CommandStatus == DAC960_V2_NormalCompletion)
- {
- DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo =
- Controller->V2.NewPhysicalDeviceInformation;
- unsigned int PhysicalDeviceIndex = Controller->V2.PhysicalDeviceIndex;
- DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo =
- Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex];
- unsigned int DeviceIndex;
- while (PhysicalDeviceInfo != NULL &&
- (NewPhysicalDeviceInfo->Channel >
- PhysicalDeviceInfo->Channel ||
- (NewPhysicalDeviceInfo->Channel ==
- PhysicalDeviceInfo->Channel &&
- (NewPhysicalDeviceInfo->TargetID >
- PhysicalDeviceInfo->TargetID ||
- (NewPhysicalDeviceInfo->TargetID ==
- PhysicalDeviceInfo->TargetID &&
- NewPhysicalDeviceInfo->LogicalUnit >
- PhysicalDeviceInfo->LogicalUnit)))))
- {
- DAC960_Critical("Physical Device %d:%d No Longer Exists\n",
- Controller,
- PhysicalDeviceInfo->Channel,
- PhysicalDeviceInfo->TargetID);
- Controller->V2.PhysicalDeviceInformation
- [PhysicalDeviceIndex] = NULL;
- Controller->V2.InquiryUnitSerialNumber
- [PhysicalDeviceIndex] = NULL;
- kfree(PhysicalDeviceInfo);
- kfree(InquiryUnitSerialNumber);
- for (DeviceIndex = PhysicalDeviceIndex;
- DeviceIndex < DAC960_V2_MaxPhysicalDevices - 1;
- DeviceIndex++)
- {
- Controller->V2.PhysicalDeviceInformation[DeviceIndex] =
- Controller->V2.PhysicalDeviceInformation[DeviceIndex+1];
- Controller->V2.InquiryUnitSerialNumber[DeviceIndex] =
- Controller->V2.InquiryUnitSerialNumber[DeviceIndex+1];
- }
- Controller->V2.PhysicalDeviceInformation
- [DAC960_V2_MaxPhysicalDevices-1] = NULL;
- Controller->V2.InquiryUnitSerialNumber
- [DAC960_V2_MaxPhysicalDevices-1] = NULL;
- PhysicalDeviceInfo =
- Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex];
- InquiryUnitSerialNumber =
- Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex];
- }
- if (PhysicalDeviceInfo == NULL ||
- (NewPhysicalDeviceInfo->Channel !=
- PhysicalDeviceInfo->Channel) ||
- (NewPhysicalDeviceInfo->TargetID !=
- PhysicalDeviceInfo->TargetID) ||
- (NewPhysicalDeviceInfo->LogicalUnit !=
- PhysicalDeviceInfo->LogicalUnit))
- {
- PhysicalDeviceInfo =
- kmalloc(sizeof(DAC960_V2_PhysicalDeviceInfo_T), GFP_ATOMIC);
- InquiryUnitSerialNumber =
- kmalloc(sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
- GFP_ATOMIC);
- if (InquiryUnitSerialNumber == NULL ||
- PhysicalDeviceInfo == NULL)
- {
- kfree(InquiryUnitSerialNumber);
- InquiryUnitSerialNumber = NULL;
- kfree(PhysicalDeviceInfo);
- PhysicalDeviceInfo = NULL;
- }
- DAC960_Critical("Physical Device %d:%d Now Exists%s\n",
- Controller,
- NewPhysicalDeviceInfo->Channel,
- NewPhysicalDeviceInfo->TargetID,
- (PhysicalDeviceInfo != NULL
- ? "" : " - Allocation Failed"));
- if (PhysicalDeviceInfo != NULL)
- {
- memset(PhysicalDeviceInfo, 0,
- sizeof(DAC960_V2_PhysicalDeviceInfo_T));
- PhysicalDeviceInfo->PhysicalDeviceState =
- DAC960_V2_Device_InvalidState;
- memset(InquiryUnitSerialNumber, 0,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
- for (DeviceIndex = DAC960_V2_MaxPhysicalDevices - 1;
- DeviceIndex > PhysicalDeviceIndex;
- DeviceIndex--)
- {
- Controller->V2.PhysicalDeviceInformation[DeviceIndex] =
- Controller->V2.PhysicalDeviceInformation[DeviceIndex-1];
- Controller->V2.InquiryUnitSerialNumber[DeviceIndex] =
- Controller->V2.InquiryUnitSerialNumber[DeviceIndex-1];
- }
- Controller->V2.PhysicalDeviceInformation
- [PhysicalDeviceIndex] =
- PhysicalDeviceInfo;
- Controller->V2.InquiryUnitSerialNumber
- [PhysicalDeviceIndex] =
- InquiryUnitSerialNumber;
- Controller->V2.NeedDeviceSerialNumberInformation = true;
- }
- }
- if (PhysicalDeviceInfo != NULL)
- {
- if (NewPhysicalDeviceInfo->PhysicalDeviceState !=
- PhysicalDeviceInfo->PhysicalDeviceState)
- DAC960_Critical(
- "Physical Device %d:%d is now %s\n", Controller,
- NewPhysicalDeviceInfo->Channel,
- NewPhysicalDeviceInfo->TargetID,
- (NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Online
- ? "ONLINE"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Rebuild
- ? "REBUILD"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Missing
- ? "MISSING"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Critical
- ? "CRITICAL"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Dead
- ? "DEAD"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_SuspectedDead
- ? "SUSPECTED-DEAD"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_CommandedOffline
- ? "COMMANDED-OFFLINE"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Standby
- ? "STANDBY" : "UNKNOWN"));
- if ((NewPhysicalDeviceInfo->ParityErrors !=
- PhysicalDeviceInfo->ParityErrors) ||
- (NewPhysicalDeviceInfo->SoftErrors !=
- PhysicalDeviceInfo->SoftErrors) ||
- (NewPhysicalDeviceInfo->HardErrors !=
- PhysicalDeviceInfo->HardErrors) ||
- (NewPhysicalDeviceInfo->MiscellaneousErrors !=
- PhysicalDeviceInfo->MiscellaneousErrors) ||
- (NewPhysicalDeviceInfo->CommandTimeouts !=
- PhysicalDeviceInfo->CommandTimeouts) ||
- (NewPhysicalDeviceInfo->Retries !=
- PhysicalDeviceInfo->Retries) ||
- (NewPhysicalDeviceInfo->Aborts !=
- PhysicalDeviceInfo->Aborts) ||
- (NewPhysicalDeviceInfo->PredictedFailuresDetected !=
- PhysicalDeviceInfo->PredictedFailuresDetected))
- {
- DAC960_Critical("Physical Device %d:%d Errors: "
- "Parity = %d, Soft = %d, "
- "Hard = %d, Misc = %d\n",
- Controller,
- NewPhysicalDeviceInfo->Channel,
- NewPhysicalDeviceInfo->TargetID,
- NewPhysicalDeviceInfo->ParityErrors,
- NewPhysicalDeviceInfo->SoftErrors,
- NewPhysicalDeviceInfo->HardErrors,
- NewPhysicalDeviceInfo->MiscellaneousErrors);
- DAC960_Critical("Physical Device %d:%d Errors: "
- "Timeouts = %d, Retries = %d, "
- "Aborts = %d, Predicted = %d\n",
- Controller,
- NewPhysicalDeviceInfo->Channel,
- NewPhysicalDeviceInfo->TargetID,
- NewPhysicalDeviceInfo->CommandTimeouts,
- NewPhysicalDeviceInfo->Retries,
- NewPhysicalDeviceInfo->Aborts,
- NewPhysicalDeviceInfo
- ->PredictedFailuresDetected);
- }
- if ((PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Dead ||
- PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_InvalidState) &&
- NewPhysicalDeviceInfo->PhysicalDeviceState
- != DAC960_V2_Device_Dead)
- Controller->V2.NeedDeviceSerialNumberInformation = true;
- memcpy(PhysicalDeviceInfo, NewPhysicalDeviceInfo,
- sizeof(DAC960_V2_PhysicalDeviceInfo_T));
- }
- NewPhysicalDeviceInfo->LogicalUnit++;
- Controller->V2.PhysicalDeviceIndex++;
- }
- else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid)
- {
- unsigned int DeviceIndex;
- for (DeviceIndex = Controller->V2.PhysicalDeviceIndex;
- DeviceIndex < DAC960_V2_MaxPhysicalDevices;
- DeviceIndex++)
- {
- DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo =
- Controller->V2.PhysicalDeviceInformation[DeviceIndex];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- Controller->V2.InquiryUnitSerialNumber[DeviceIndex];
- if (PhysicalDeviceInfo == NULL) break;
- DAC960_Critical("Physical Device %d:%d No Longer Exists\n",
- Controller,
- PhysicalDeviceInfo->Channel,
- PhysicalDeviceInfo->TargetID);
- Controller->V2.PhysicalDeviceInformation[DeviceIndex] = NULL;
- Controller->V2.InquiryUnitSerialNumber[DeviceIndex] = NULL;
- kfree(PhysicalDeviceInfo);
- kfree(InquiryUnitSerialNumber);
- }
- Controller->V2.NeedPhysicalDeviceInformation = false;
- }
- else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid &&
- CommandStatus == DAC960_V2_NormalCompletion)
- {
- DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo =
- Controller->V2.NewLogicalDeviceInformation;
- unsigned short LogicalDeviceNumber =
- NewLogicalDeviceInfo->LogicalDeviceNumber;
- DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
- Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber];
- if (LogicalDeviceInfo == NULL)
- {
- DAC960_V2_PhysicalDevice_T PhysicalDevice;
- PhysicalDevice.Controller = 0;
- PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel;
- PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID;
- PhysicalDevice.LogicalUnit = NewLogicalDeviceInfo->LogicalUnit;
- Controller->V2.LogicalDriveToVirtualDevice[LogicalDeviceNumber] =
- PhysicalDevice;
- LogicalDeviceInfo = kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T),
- GFP_ATOMIC);
- Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] =
- LogicalDeviceInfo;
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "Now Exists%s\n", Controller,
- LogicalDeviceNumber,
- Controller->ControllerNumber,
- LogicalDeviceNumber,
- (LogicalDeviceInfo != NULL
- ? "" : " - Allocation Failed"));
- if (LogicalDeviceInfo != NULL)
- {
- memset(LogicalDeviceInfo, 0,
- sizeof(DAC960_V2_LogicalDeviceInfo_T));
- DAC960_ComputeGenericDiskInfo(Controller);
- }
- }
- if (LogicalDeviceInfo != NULL)
- {
- unsigned long LogicalDeviceSize =
- NewLogicalDeviceInfo->ConfigurableDeviceSize;
- if (NewLogicalDeviceInfo->LogicalDeviceState !=
- LogicalDeviceInfo->LogicalDeviceState)
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "is now %s\n", Controller,
- LogicalDeviceNumber,
- Controller->ControllerNumber,
- LogicalDeviceNumber,
- (NewLogicalDeviceInfo->LogicalDeviceState
- == DAC960_V2_LogicalDevice_Online
- ? "ONLINE"
- : NewLogicalDeviceInfo->LogicalDeviceState
- == DAC960_V2_LogicalDevice_Critical
- ? "CRITICAL" : "OFFLINE"));
- if ((NewLogicalDeviceInfo->SoftErrors !=
- LogicalDeviceInfo->SoftErrors) ||
- (NewLogicalDeviceInfo->CommandsFailed !=
- LogicalDeviceInfo->CommandsFailed) ||
- (NewLogicalDeviceInfo->DeferredWriteErrors !=
- LogicalDeviceInfo->DeferredWriteErrors))
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) Errors: "
- "Soft = %d, Failed = %d, Deferred Write = %d\n",
- Controller, LogicalDeviceNumber,
- Controller->ControllerNumber,
- LogicalDeviceNumber,
- NewLogicalDeviceInfo->SoftErrors,
- NewLogicalDeviceInfo->CommandsFailed,
- NewLogicalDeviceInfo->DeferredWriteErrors);
- if (NewLogicalDeviceInfo->ConsistencyCheckInProgress)
- DAC960_V2_ReportProgress(Controller,
- "Consistency Check",
- LogicalDeviceNumber,
- NewLogicalDeviceInfo
- ->ConsistencyCheckBlockNumber,
- LogicalDeviceSize);
- else if (NewLogicalDeviceInfo->RebuildInProgress)
- DAC960_V2_ReportProgress(Controller,
- "Rebuild",
- LogicalDeviceNumber,
- NewLogicalDeviceInfo
- ->RebuildBlockNumber,
- LogicalDeviceSize);
- else if (NewLogicalDeviceInfo->BackgroundInitializationInProgress)
- DAC960_V2_ReportProgress(Controller,
- "Background Initialization",
- LogicalDeviceNumber,
- NewLogicalDeviceInfo
- ->BackgroundInitializationBlockNumber,
- LogicalDeviceSize);
- else if (NewLogicalDeviceInfo->ForegroundInitializationInProgress)
- DAC960_V2_ReportProgress(Controller,
- "Foreground Initialization",
- LogicalDeviceNumber,
- NewLogicalDeviceInfo
- ->ForegroundInitializationBlockNumber,
- LogicalDeviceSize);
- else if (NewLogicalDeviceInfo->DataMigrationInProgress)
- DAC960_V2_ReportProgress(Controller,
- "Data Migration",
- LogicalDeviceNumber,
- NewLogicalDeviceInfo
- ->DataMigrationBlockNumber,
- LogicalDeviceSize);
- else if (NewLogicalDeviceInfo->PatrolOperationInProgress)
- DAC960_V2_ReportProgress(Controller,
- "Patrol Operation",
- LogicalDeviceNumber,
- NewLogicalDeviceInfo
- ->PatrolOperationBlockNumber,
- LogicalDeviceSize);
- if (LogicalDeviceInfo->BackgroundInitializationInProgress &&
- !NewLogicalDeviceInfo->BackgroundInitializationInProgress)
- DAC960_Progress("Logical Drive %d (/dev/rd/c%dd%d) "
- "Background Initialization %s\n",
- Controller,
- LogicalDeviceNumber,
- Controller->ControllerNumber,
- LogicalDeviceNumber,
- (NewLogicalDeviceInfo->LogicalDeviceControl
- .LogicalDeviceInitialized
- ? "Completed" : "Failed"));
- memcpy(LogicalDeviceInfo, NewLogicalDeviceInfo,
- sizeof(DAC960_V2_LogicalDeviceInfo_T));
- }
- Controller->V2.LogicalDriveFoundDuringScan
- [LogicalDeviceNumber] = true;
- NewLogicalDeviceInfo->LogicalDeviceNumber++;
- }
- else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid)
- {
- int LogicalDriveNumber;
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < DAC960_MaxLogicalDrives;
- LogicalDriveNumber++)
- {
- DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
- Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
- if (LogicalDeviceInfo == NULL ||
- Controller->V2.LogicalDriveFoundDuringScan
- [LogicalDriveNumber])
- continue;
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "No Longer Exists\n", Controller,
- LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- Controller->V2.LogicalDeviceInformation
- [LogicalDriveNumber] = NULL;
- kfree(LogicalDeviceInfo);
- Controller->LogicalDriveInitiallyAccessible
- [LogicalDriveNumber] = false;
- DAC960_ComputeGenericDiskInfo(Controller);
- }
- Controller->V2.NeedLogicalDeviceInformation = false;
- }
- else if (CommandOpcode == DAC960_V2_SCSI_10_Passthru)
- {
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- Controller->V2.InquiryUnitSerialNumber[Controller->V2.PhysicalDeviceIndex - 1];
-
- if (CommandStatus != DAC960_V2_NormalCompletion) {
- memset(InquiryUnitSerialNumber,
- 0, sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
- } else
- memcpy(InquiryUnitSerialNumber,
- Controller->V2.NewInquiryUnitSerialNumber,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
-
- Controller->V2.NeedDeviceSerialNumberInformation = false;
- }
-
- if (Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
- - Controller->V2.NextEventSequenceNumber > 0)
- {
- CommandMailbox->GetEvent.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->GetEvent.DataTransferSize = sizeof(DAC960_V2_Event_T);
- CommandMailbox->GetEvent.EventSequenceNumberHigh16 =
- Controller->V2.NextEventSequenceNumber >> 16;
- CommandMailbox->GetEvent.ControllerNumber = 0;
- CommandMailbox->GetEvent.IOCTL_Opcode =
- DAC960_V2_GetEvent;
- CommandMailbox->GetEvent.EventSequenceNumberLow16 =
- Controller->V2.NextEventSequenceNumber & 0xFFFF;
- CommandMailbox->GetEvent.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.EventDMA;
- CommandMailbox->GetEvent.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->GetEvent.DataTransferSize;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V2.NeedPhysicalDeviceInformation)
- {
- if (Controller->V2.NeedDeviceSerialNumberInformation)
- {
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- Controller->V2.NewInquiryUnitSerialNumber;
- InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
-
- DAC960_V2_ConstructNewUnitSerialNumber(Controller, CommandMailbox,
- Controller->V2.NewPhysicalDeviceInformation->Channel,
- Controller->V2.NewPhysicalDeviceInformation->TargetID,
- Controller->V2.NewPhysicalDeviceInformation->LogicalUnit - 1);
-
-
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V2.StartPhysicalDeviceInformationScan)
- {
- Controller->V2.PhysicalDeviceIndex = 0;
- Controller->V2.NewPhysicalDeviceInformation->Channel = 0;
- Controller->V2.NewPhysicalDeviceInformation->TargetID = 0;
- Controller->V2.NewPhysicalDeviceInformation->LogicalUnit = 0;
- Controller->V2.StartPhysicalDeviceInformationScan = false;
- }
- CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->PhysicalDeviceInfo.DataTransferSize =
- sizeof(DAC960_V2_PhysicalDeviceInfo_T);
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.LogicalUnit =
- Controller->V2.NewPhysicalDeviceInformation->LogicalUnit;
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID =
- Controller->V2.NewPhysicalDeviceInformation->TargetID;
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel =
- Controller->V2.NewPhysicalDeviceInformation->Channel;
- CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode =
- DAC960_V2_GetPhysicalDeviceInfoValid;
- CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewPhysicalDeviceInformationDMA;
- CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->PhysicalDeviceInfo.DataTransferSize;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V2.NeedLogicalDeviceInformation)
- {
- if (Controller->V2.StartLogicalDeviceInformationScan)
- {
- int LogicalDriveNumber;
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < DAC960_MaxLogicalDrives;
- LogicalDriveNumber++)
- Controller->V2.LogicalDriveFoundDuringScan
- [LogicalDriveNumber] = false;
- Controller->V2.NewLogicalDeviceInformation->LogicalDeviceNumber = 0;
- Controller->V2.StartLogicalDeviceInformationScan = false;
- }
- CommandMailbox->LogicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->LogicalDeviceInfo.DataTransferSize =
- sizeof(DAC960_V2_LogicalDeviceInfo_T);
- CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
- Controller->V2.NewLogicalDeviceInformation->LogicalDeviceNumber;
- CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode =
- DAC960_V2_GetLogicalDeviceInfoValid;
- CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewLogicalDeviceInformationDMA;
- CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->LogicalDeviceInfo.DataTransferSize;
- DAC960_QueueCommand(Command);
- return;
- }
- Controller->MonitoringTimerCount++;
- Controller->MonitoringTimer.expires =
- jiffies + DAC960_HealthStatusMonitoringInterval;
- add_timer(&Controller->MonitoringTimer);
- }
- if (CommandType == DAC960_ImmediateCommand)
- {
- complete(Command->Completion);
- Command->Completion = NULL;
- return;
- }
- if (CommandType == DAC960_QueuedCommand)
- {
- DAC960_V2_KernelCommand_T *KernelCommand = Command->V2.KernelCommand;
- KernelCommand->CommandStatus = CommandStatus;
- KernelCommand->RequestSenseLength = Command->V2.RequestSenseLength;
- KernelCommand->DataTransferLength = Command->V2.DataTransferResidue;
- Command->V2.KernelCommand = NULL;
- DAC960_DeallocateCommand(Command);
- KernelCommand->CompletionFunction(KernelCommand);
- return;
- }
- /*
- Queue a Status Monitoring Command to the Controller using the just
- completed Command if one was deferred previously due to lack of a
- free Command when the Monitoring Timer Function was called.
- */
- if (Controller->MonitoringCommandDeferred)
- {
- Controller->MonitoringCommandDeferred = false;
- DAC960_V2_QueueMonitoringCommand(Command);
- return;
- }
- /*
- Deallocate the Command.
- */
- DAC960_DeallocateCommand(Command);
- /*
- Wake up any processes waiting on a free Command.
- */
- wake_up(&Controller->CommandWaitQueue);
-}
-
-/*
- DAC960_GEM_InterruptHandler handles hardware interrupts from DAC960 GEM Series
- Controllers.
-*/
-
-static irqreturn_t DAC960_GEM_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V2_StatusMailbox_T *NextStatusMailbox;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_GEM_AcknowledgeInterrupt(ControllerBaseAddress);
- NextStatusMailbox = Controller->V2.NextStatusMailbox;
- while (NextStatusMailbox->Fields.CommandIdentifier > 0)
- {
- DAC960_V2_CommandIdentifier_T CommandIdentifier =
- NextStatusMailbox->Fields.CommandIdentifier;
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
- Command->V2.RequestSenseLength =
- NextStatusMailbox->Fields.RequestSenseLength;
- Command->V2.DataTransferResidue =
- NextStatusMailbox->Fields.DataTransferResidue;
- NextStatusMailbox->Words[0] = 0;
- if (++NextStatusMailbox > Controller->V2.LastStatusMailbox)
- NextStatusMailbox = Controller->V2.FirstStatusMailbox;
- DAC960_V2_ProcessCompletedCommand(Command);
- }
- Controller->V2.NextStatusMailbox = NextStatusMailbox;
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-/*
- DAC960_BA_InterruptHandler handles hardware interrupts from DAC960 BA Series
- Controllers.
-*/
-
-static irqreturn_t DAC960_BA_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V2_StatusMailbox_T *NextStatusMailbox;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_BA_AcknowledgeInterrupt(ControllerBaseAddress);
- NextStatusMailbox = Controller->V2.NextStatusMailbox;
- while (NextStatusMailbox->Fields.CommandIdentifier > 0)
- {
- DAC960_V2_CommandIdentifier_T CommandIdentifier =
- NextStatusMailbox->Fields.CommandIdentifier;
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
- Command->V2.RequestSenseLength =
- NextStatusMailbox->Fields.RequestSenseLength;
- Command->V2.DataTransferResidue =
- NextStatusMailbox->Fields.DataTransferResidue;
- NextStatusMailbox->Words[0] = 0;
- if (++NextStatusMailbox > Controller->V2.LastStatusMailbox)
- NextStatusMailbox = Controller->V2.FirstStatusMailbox;
- DAC960_V2_ProcessCompletedCommand(Command);
- }
- Controller->V2.NextStatusMailbox = NextStatusMailbox;
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- DAC960_LP_InterruptHandler handles hardware interrupts from DAC960 LP Series
- Controllers.
-*/
-
-static irqreturn_t DAC960_LP_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V2_StatusMailbox_T *NextStatusMailbox;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_LP_AcknowledgeInterrupt(ControllerBaseAddress);
- NextStatusMailbox = Controller->V2.NextStatusMailbox;
- while (NextStatusMailbox->Fields.CommandIdentifier > 0)
- {
- DAC960_V2_CommandIdentifier_T CommandIdentifier =
- NextStatusMailbox->Fields.CommandIdentifier;
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
- Command->V2.RequestSenseLength =
- NextStatusMailbox->Fields.RequestSenseLength;
- Command->V2.DataTransferResidue =
- NextStatusMailbox->Fields.DataTransferResidue;
- NextStatusMailbox->Words[0] = 0;
- if (++NextStatusMailbox > Controller->V2.LastStatusMailbox)
- NextStatusMailbox = Controller->V2.FirstStatusMailbox;
- DAC960_V2_ProcessCompletedCommand(Command);
- }
- Controller->V2.NextStatusMailbox = NextStatusMailbox;
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- DAC960_LA_InterruptHandler handles hardware interrupts from DAC960 LA Series
- Controllers.
-*/
-
-static irqreturn_t DAC960_LA_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_StatusMailbox_T *NextStatusMailbox;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_LA_AcknowledgeInterrupt(ControllerBaseAddress);
- NextStatusMailbox = Controller->V1.NextStatusMailbox;
- while (NextStatusMailbox->Fields.Valid)
- {
- DAC960_V1_CommandIdentifier_T CommandIdentifier =
- NextStatusMailbox->Fields.CommandIdentifier;
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- Command->V1.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
- NextStatusMailbox->Word = 0;
- if (++NextStatusMailbox > Controller->V1.LastStatusMailbox)
- NextStatusMailbox = Controller->V1.FirstStatusMailbox;
- DAC960_V1_ProcessCompletedCommand(Command);
- }
- Controller->V1.NextStatusMailbox = NextStatusMailbox;
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- DAC960_PG_InterruptHandler handles hardware interrupts from DAC960 PG Series
- Controllers.
-*/
-
-static irqreturn_t DAC960_PG_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_StatusMailbox_T *NextStatusMailbox;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_PG_AcknowledgeInterrupt(ControllerBaseAddress);
- NextStatusMailbox = Controller->V1.NextStatusMailbox;
- while (NextStatusMailbox->Fields.Valid)
- {
- DAC960_V1_CommandIdentifier_T CommandIdentifier =
- NextStatusMailbox->Fields.CommandIdentifier;
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- Command->V1.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
- NextStatusMailbox->Word = 0;
- if (++NextStatusMailbox > Controller->V1.LastStatusMailbox)
- NextStatusMailbox = Controller->V1.FirstStatusMailbox;
- DAC960_V1_ProcessCompletedCommand(Command);
- }
- Controller->V1.NextStatusMailbox = NextStatusMailbox;
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- DAC960_PD_InterruptHandler handles hardware interrupts from DAC960 PD Series
- Controllers.
-*/
-
-static irqreturn_t DAC960_PD_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while (DAC960_PD_StatusAvailableP(ControllerBaseAddress))
- {
- DAC960_V1_CommandIdentifier_T CommandIdentifier =
- DAC960_PD_ReadStatusCommandIdentifier(ControllerBaseAddress);
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- Command->V1.CommandStatus =
- DAC960_PD_ReadStatusRegister(ControllerBaseAddress);
- DAC960_PD_AcknowledgeInterrupt(ControllerBaseAddress);
- DAC960_PD_AcknowledgeStatus(ControllerBaseAddress);
- DAC960_V1_ProcessCompletedCommand(Command);
- }
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- DAC960_P_InterruptHandler handles hardware interrupts from DAC960 P Series
- Controllers.
-
- Translations of DAC960_V1_Enquiry and DAC960_V1_GetDeviceState rely
- on the data having been placed into DAC960_Controller_T, rather than
- an arbitrary buffer.
-*/
-
-static irqreturn_t DAC960_P_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while (DAC960_PD_StatusAvailableP(ControllerBaseAddress))
- {
- DAC960_V1_CommandIdentifier_T CommandIdentifier =
- DAC960_PD_ReadStatusCommandIdentifier(ControllerBaseAddress);
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandOpcode_T CommandOpcode =
- CommandMailbox->Common.CommandOpcode;
- Command->V1.CommandStatus =
- DAC960_PD_ReadStatusRegister(ControllerBaseAddress);
- DAC960_PD_AcknowledgeInterrupt(ControllerBaseAddress);
- DAC960_PD_AcknowledgeStatus(ControllerBaseAddress);
- switch (CommandOpcode)
- {
- case DAC960_V1_Enquiry_Old:
- Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Enquiry;
- DAC960_P_To_PD_TranslateEnquiry(Controller->V1.NewEnquiry);
- break;
- case DAC960_V1_GetDeviceState_Old:
- Command->V1.CommandMailbox.Common.CommandOpcode =
- DAC960_V1_GetDeviceState;
- DAC960_P_To_PD_TranslateDeviceState(Controller->V1.NewDeviceState);
- break;
- case DAC960_V1_Read_Old:
- Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Read;
- DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
- break;
- case DAC960_V1_Write_Old:
- Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Write;
- DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
- break;
- case DAC960_V1_ReadWithScatterGather_Old:
- Command->V1.CommandMailbox.Common.CommandOpcode =
- DAC960_V1_ReadWithScatterGather;
- DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
- break;
- case DAC960_V1_WriteWithScatterGather_Old:
- Command->V1.CommandMailbox.Common.CommandOpcode =
- DAC960_V1_WriteWithScatterGather;
- DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
- break;
- default:
- break;
- }
- DAC960_V1_ProcessCompletedCommand(Command);
- }
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- DAC960_V1_QueueMonitoringCommand queues a Monitoring Command to DAC960 V1
- Firmware Controllers.
-*/
-
-static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_MonitoringCommand;
- CommandMailbox->Type3.CommandOpcode = DAC960_V1_Enquiry;
- CommandMailbox->Type3.BusAddress = Controller->V1.NewEnquiryDMA;
- DAC960_QueueCommand(Command);
-}
-
-
-/*
- DAC960_V2_QueueMonitoringCommand queues a Monitoring Command to DAC960 V2
- Firmware Controllers.
-*/
-
-static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_MonitoringCommand;
- CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->ControllerInfo.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->ControllerInfo.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->ControllerInfo.DataTransferSize =
- sizeof(DAC960_V2_ControllerInfo_T);
- CommandMailbox->ControllerInfo.ControllerNumber = 0;
- CommandMailbox->ControllerInfo.IOCTL_Opcode = DAC960_V2_GetControllerInfo;
- CommandMailbox->ControllerInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewControllerInformationDMA;
- CommandMailbox->ControllerInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->ControllerInfo.DataTransferSize;
- DAC960_QueueCommand(Command);
-}
-
-
-/*
- DAC960_MonitoringTimerFunction is the timer function for monitoring
- the status of DAC960 Controllers.
-*/
-
-static void DAC960_MonitoringTimerFunction(struct timer_list *t)
-{
- DAC960_Controller_T *Controller = from_timer(Controller, t, MonitoringTimer);
- DAC960_Command_T *Command;
- unsigned long flags;
-
- if (Controller->FirmwareType == DAC960_V1_Controller)
- {
- spin_lock_irqsave(&Controller->queue_lock, flags);
- /*
- Queue a Status Monitoring Command to Controller.
- */
- Command = DAC960_AllocateCommand(Controller);
- if (Command != NULL)
- DAC960_V1_QueueMonitoringCommand(Command);
- else Controller->MonitoringCommandDeferred = true;
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- }
- else
- {
- DAC960_V2_ControllerInfo_T *ControllerInfo =
- &Controller->V2.ControllerInformation;
- unsigned int StatusChangeCounter =
- Controller->V2.HealthStatusBuffer->StatusChangeCounter;
- bool ForceMonitoringCommand = false;
- if (time_after(jiffies, Controller->SecondaryMonitoringTime
- + DAC960_SecondaryMonitoringInterval))
- {
- int LogicalDriveNumber;
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < DAC960_MaxLogicalDrives;
- LogicalDriveNumber++)
- {
- DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
- Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
- if (LogicalDeviceInfo == NULL) continue;
- if (!LogicalDeviceInfo->LogicalDeviceControl
- .LogicalDeviceInitialized)
- {
- ForceMonitoringCommand = true;
- break;
- }
- }
- Controller->SecondaryMonitoringTime = jiffies;
- }
- if (StatusChangeCounter == Controller->V2.StatusChangeCounter &&
- Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
- == Controller->V2.NextEventSequenceNumber &&
- (ControllerInfo->BackgroundInitializationsActive +
- ControllerInfo->LogicalDeviceInitializationsActive +
- ControllerInfo->PhysicalDeviceInitializationsActive +
- ControllerInfo->ConsistencyChecksActive +
- ControllerInfo->RebuildsActive +
- ControllerInfo->OnlineExpansionsActive == 0 ||
- time_before(jiffies, Controller->PrimaryMonitoringTime
- + DAC960_MonitoringTimerInterval)) &&
- !ForceMonitoringCommand)
- {
- Controller->MonitoringTimer.expires =
- jiffies + DAC960_HealthStatusMonitoringInterval;
- add_timer(&Controller->MonitoringTimer);
- return;
- }
- Controller->V2.StatusChangeCounter = StatusChangeCounter;
- Controller->PrimaryMonitoringTime = jiffies;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- /*
- Queue a Status Monitoring Command to Controller.
- */
- Command = DAC960_AllocateCommand(Controller);
- if (Command != NULL)
- DAC960_V2_QueueMonitoringCommand(Command);
- else Controller->MonitoringCommandDeferred = true;
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- /*
- Wake up any processes waiting on a Health Status Buffer change.
- */
- wake_up(&Controller->HealthStatusWaitQueue);
- }
-}
-
-/*
- DAC960_CheckStatusBuffer verifies that there is room to hold ByteCount
- additional bytes in the Combined Status Buffer and grows the buffer if
- necessary. It returns true if there is enough room and false otherwise.
-*/
-
-static bool DAC960_CheckStatusBuffer(DAC960_Controller_T *Controller,
- unsigned int ByteCount)
-{
- unsigned char *NewStatusBuffer;
- if (Controller->InitialStatusLength + 1 +
- Controller->CurrentStatusLength + ByteCount + 1 <=
- Controller->CombinedStatusBufferLength)
- return true;
- if (Controller->CombinedStatusBufferLength == 0)
- {
- unsigned int NewStatusBufferLength = DAC960_InitialStatusBufferSize;
- while (NewStatusBufferLength < ByteCount)
- NewStatusBufferLength *= 2;
- Controller->CombinedStatusBuffer = kmalloc(NewStatusBufferLength,
- GFP_ATOMIC);
- if (Controller->CombinedStatusBuffer == NULL) return false;
- Controller->CombinedStatusBufferLength = NewStatusBufferLength;
- return true;
- }
- NewStatusBuffer = kmalloc_array(2, Controller->CombinedStatusBufferLength,
- GFP_ATOMIC);
- if (NewStatusBuffer == NULL)
- {
- DAC960_Warning("Unable to expand Combined Status Buffer - Truncating\n",
- Controller);
- return false;
- }
- memcpy(NewStatusBuffer, Controller->CombinedStatusBuffer,
- Controller->CombinedStatusBufferLength);
- kfree(Controller->CombinedStatusBuffer);
- Controller->CombinedStatusBuffer = NewStatusBuffer;
- Controller->CombinedStatusBufferLength *= 2;
- Controller->CurrentStatusBuffer =
- &NewStatusBuffer[Controller->InitialStatusLength + 1];
- return true;
-}
-
-
-/*
- DAC960_Message prints Driver Messages.
-*/
-
-static void DAC960_Message(DAC960_MessageLevel_T MessageLevel,
- unsigned char *Format,
- DAC960_Controller_T *Controller,
- ...)
-{
- static unsigned char Buffer[DAC960_LineBufferSize];
- static bool BeginningOfLine = true;
- va_list Arguments;
- int Length = 0;
- va_start(Arguments, Controller);
- Length = vsprintf(Buffer, Format, Arguments);
- va_end(Arguments);
- if (Controller == NULL)
- printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
- DAC960_ControllerCount, Buffer);
- else if (MessageLevel == DAC960_AnnounceLevel ||
- MessageLevel == DAC960_InfoLevel)
- {
- if (!Controller->ControllerInitialized)
- {
- if (DAC960_CheckStatusBuffer(Controller, Length))
- {
- strcpy(&Controller->CombinedStatusBuffer
- [Controller->InitialStatusLength],
- Buffer);
- Controller->InitialStatusLength += Length;
- Controller->CurrentStatusBuffer =
- &Controller->CombinedStatusBuffer
- [Controller->InitialStatusLength + 1];
- }
- if (MessageLevel == DAC960_AnnounceLevel)
- {
- static int AnnouncementLines = 0;
- if (++AnnouncementLines <= 2)
- printk("%sDAC960: %s", DAC960_MessageLevelMap[MessageLevel],
- Buffer);
- }
- else
- {
- if (BeginningOfLine)
- {
- if (Buffer[0] != '\n' || Length > 1)
- printk("%sDAC960#%d: %s",
- DAC960_MessageLevelMap[MessageLevel],
- Controller->ControllerNumber, Buffer);
- }
- else printk("%s", Buffer);
- }
- }
- else if (DAC960_CheckStatusBuffer(Controller, Length))
- {
- strcpy(&Controller->CurrentStatusBuffer[
- Controller->CurrentStatusLength], Buffer);
- Controller->CurrentStatusLength += Length;
- }
- }
- else if (MessageLevel == DAC960_ProgressLevel)
- {
- strcpy(Controller->ProgressBuffer, Buffer);
- Controller->ProgressBufferLength = Length;
- if (Controller->EphemeralProgressMessage)
- {
- if (time_after_eq(jiffies, Controller->LastProgressReportTime
- + DAC960_ProgressReportingInterval))
- {
- printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
- Controller->ControllerNumber, Buffer);
- Controller->LastProgressReportTime = jiffies;
- }
- }
- else printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
- Controller->ControllerNumber, Buffer);
- }
- else if (MessageLevel == DAC960_UserCriticalLevel)
- {
- strcpy(&Controller->UserStatusBuffer[Controller->UserStatusLength],
- Buffer);
- Controller->UserStatusLength += Length;
- if (Buffer[0] != '\n' || Length > 1)
- printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
- Controller->ControllerNumber, Buffer);
- }
- else
- {
- if (BeginningOfLine)
- printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
- Controller->ControllerNumber, Buffer);
- else printk("%s", Buffer);
- }
- BeginningOfLine = (Buffer[Length-1] == '\n');
-}
-
-
-/*
- DAC960_ParsePhysicalDevice parses spaces followed by a Physical Device
- Channel:TargetID specification from a User Command string. It updates
- Channel and TargetID and returns true on success and false on failure.
-*/
-
-static bool DAC960_ParsePhysicalDevice(DAC960_Controller_T *Controller,
- char *UserCommandString,
- unsigned char *Channel,
- unsigned char *TargetID)
-{
- char *NewUserCommandString = UserCommandString;
- unsigned long XChannel, XTargetID;
- while (*UserCommandString == ' ') UserCommandString++;
- if (UserCommandString == NewUserCommandString)
- return false;
- XChannel = simple_strtoul(UserCommandString, &NewUserCommandString, 10);
- if (NewUserCommandString == UserCommandString ||
- *NewUserCommandString != ':' ||
- XChannel >= Controller->Channels)
- return false;
- UserCommandString = ++NewUserCommandString;
- XTargetID = simple_strtoul(UserCommandString, &NewUserCommandString, 10);
- if (NewUserCommandString == UserCommandString ||
- *NewUserCommandString != '\0' ||
- XTargetID >= Controller->Targets)
- return false;
- *Channel = XChannel;
- *TargetID = XTargetID;
- return true;
-}
-
-
-/*
- DAC960_ParseLogicalDrive parses spaces followed by a Logical Drive Number
- specification from a User Command string. It updates LogicalDriveNumber and
- returns true on success and false on failure.
-*/
-
-static bool DAC960_ParseLogicalDrive(DAC960_Controller_T *Controller,
- char *UserCommandString,
- unsigned char *LogicalDriveNumber)
-{
- char *NewUserCommandString = UserCommandString;
- unsigned long XLogicalDriveNumber;
- while (*UserCommandString == ' ') UserCommandString++;
- if (UserCommandString == NewUserCommandString)
- return false;
- XLogicalDriveNumber =
- simple_strtoul(UserCommandString, &NewUserCommandString, 10);
- if (NewUserCommandString == UserCommandString ||
- *NewUserCommandString != '\0' ||
- XLogicalDriveNumber > DAC960_MaxLogicalDrives - 1)
- return false;
- *LogicalDriveNumber = XLogicalDriveNumber;
- return true;
-}
-
-
-/*
- DAC960_V1_SetDeviceState sets the Device State for a Physical Device for
- DAC960 V1 Firmware Controllers.
-*/
-
-static void DAC960_V1_SetDeviceState(DAC960_Controller_T *Controller,
- DAC960_Command_T *Command,
- unsigned char Channel,
- unsigned char TargetID,
- DAC960_V1_PhysicalDeviceState_T
- DeviceState,
- const unsigned char *DeviceStateString)
-{
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- CommandMailbox->Type3D.CommandOpcode = DAC960_V1_StartDevice;
- CommandMailbox->Type3D.Channel = Channel;
- CommandMailbox->Type3D.TargetID = TargetID;
- CommandMailbox->Type3D.DeviceState = DeviceState;
- CommandMailbox->Type3D.Modifier = 0;
- DAC960_ExecuteCommand(Command);
- switch (Command->V1.CommandStatus)
- {
- case DAC960_V1_NormalCompletion:
- DAC960_UserCritical("%s of Physical Device %d:%d Succeeded\n", Controller,
- DeviceStateString, Channel, TargetID);
- break;
- case DAC960_V1_UnableToStartDevice:
- DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
- "Unable to Start Device\n", Controller,
- DeviceStateString, Channel, TargetID);
- break;
- case DAC960_V1_NoDeviceAtAddress:
- DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
- "No Device at Address\n", Controller,
- DeviceStateString, Channel, TargetID);
- break;
- case DAC960_V1_InvalidChannelOrTargetOrModifier:
- DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
- "Invalid Channel or Target or Modifier\n",
- Controller, DeviceStateString, Channel, TargetID);
- break;
- case DAC960_V1_ChannelBusy:
- DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
- "Channel Busy\n", Controller,
- DeviceStateString, Channel, TargetID);
- break;
- default:
- DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
- "Unexpected Status %04X\n", Controller,
- DeviceStateString, Channel, TargetID,
- Command->V1.CommandStatus);
- break;
- }
-}
-
-
-/*
- DAC960_V1_ExecuteUserCommand executes a User Command for DAC960 V1 Firmware
- Controllers.
-*/
-
-static bool DAC960_V1_ExecuteUserCommand(DAC960_Controller_T *Controller,
- unsigned char *UserCommand)
-{
- DAC960_Command_T *Command;
- DAC960_V1_CommandMailbox_T *CommandMailbox;
- unsigned long flags;
- unsigned char Channel, TargetID, LogicalDriveNumber;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
- DAC960_WaitForCommand(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- Controller->UserStatusLength = 0;
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox = &Command->V1.CommandMailbox;
- if (strcmp(UserCommand, "flush-cache") == 0)
- {
- CommandMailbox->Type3.CommandOpcode = DAC960_V1_Flush;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Cache Flush Completed\n", Controller);
- }
- else if (strncmp(UserCommand, "kill", 4) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[4],
- &Channel, &TargetID))
- {
- DAC960_V1_DeviceState_T *DeviceState =
- &Controller->V1.DeviceState[Channel][TargetID];
- if (DeviceState->Present &&
- DeviceState->DeviceType == DAC960_V1_DiskType &&
- DeviceState->DeviceState != DAC960_V1_Device_Dead)
- DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID,
- DAC960_V1_Device_Dead, "Kill");
- else DAC960_UserCritical("Kill of Physical Device %d:%d Illegal\n",
- Controller, Channel, TargetID);
- }
- else if (strncmp(UserCommand, "make-online", 11) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[11],
- &Channel, &TargetID))
- {
- DAC960_V1_DeviceState_T *DeviceState =
- &Controller->V1.DeviceState[Channel][TargetID];
- if (DeviceState->Present &&
- DeviceState->DeviceType == DAC960_V1_DiskType &&
- DeviceState->DeviceState == DAC960_V1_Device_Dead)
- DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID,
- DAC960_V1_Device_Online, "Make Online");
- else DAC960_UserCritical("Make Online of Physical Device %d:%d Illegal\n",
- Controller, Channel, TargetID);
-
- }
- else if (strncmp(UserCommand, "make-standby", 12) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[12],
- &Channel, &TargetID))
- {
- DAC960_V1_DeviceState_T *DeviceState =
- &Controller->V1.DeviceState[Channel][TargetID];
- if (DeviceState->Present &&
- DeviceState->DeviceType == DAC960_V1_DiskType &&
- DeviceState->DeviceState == DAC960_V1_Device_Dead)
- DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID,
- DAC960_V1_Device_Standby, "Make Standby");
- else DAC960_UserCritical("Make Standby of Physical "
- "Device %d:%d Illegal\n",
- Controller, Channel, TargetID);
- }
- else if (strncmp(UserCommand, "rebuild", 7) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[7],
- &Channel, &TargetID))
- {
- CommandMailbox->Type3D.CommandOpcode = DAC960_V1_RebuildAsync;
- CommandMailbox->Type3D.Channel = Channel;
- CommandMailbox->Type3D.TargetID = TargetID;
- DAC960_ExecuteCommand(Command);
- switch (Command->V1.CommandStatus)
- {
- case DAC960_V1_NormalCompletion:
- DAC960_UserCritical("Rebuild of Physical Device %d:%d Initiated\n",
- Controller, Channel, TargetID);
- break;
- case DAC960_V1_AttemptToRebuildOnlineDrive:
- DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
- "Attempt to Rebuild Online or "
- "Unresponsive Drive\n",
- Controller, Channel, TargetID);
- break;
- case DAC960_V1_NewDiskFailedDuringRebuild:
- DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
- "New Disk Failed During Rebuild\n",
- Controller, Channel, TargetID);
- break;
- case DAC960_V1_InvalidDeviceAddress:
- DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
- "Invalid Device Address\n",
- Controller, Channel, TargetID);
- break;
- case DAC960_V1_RebuildOrCheckAlreadyInProgress:
- DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
- "Rebuild or Consistency Check Already "
- "in Progress\n", Controller, Channel, TargetID);
- break;
- default:
- DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
- "Unexpected Status %04X\n", Controller,
- Channel, TargetID, Command->V1.CommandStatus);
- break;
- }
- }
- else if (strncmp(UserCommand, "check-consistency", 17) == 0 &&
- DAC960_ParseLogicalDrive(Controller, &UserCommand[17],
- &LogicalDriveNumber))
- {
- CommandMailbox->Type3C.CommandOpcode = DAC960_V1_CheckConsistencyAsync;
- CommandMailbox->Type3C.LogicalDriveNumber = LogicalDriveNumber;
- CommandMailbox->Type3C.AutoRestore = true;
- DAC960_ExecuteCommand(Command);
- switch (Command->V1.CommandStatus)
- {
- case DAC960_V1_NormalCompletion:
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) Initiated\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- break;
- case DAC960_V1_DependentDiskIsDead:
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) Failed - "
- "Dependent Physical Device is DEAD\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- break;
- case DAC960_V1_InvalidOrNonredundantLogicalDrive:
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) Failed - "
- "Invalid or Nonredundant Logical Drive\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- break;
- case DAC960_V1_RebuildOrCheckAlreadyInProgress:
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) Failed - Rebuild or "
- "Consistency Check Already in Progress\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- break;
- default:
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) Failed - "
- "Unexpected Status %04X\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber, Command->V1.CommandStatus);
- break;
- }
- }
- else if (strcmp(UserCommand, "cancel-rebuild") == 0 ||
- strcmp(UserCommand, "cancel-consistency-check") == 0)
- {
- /*
- the OldRebuildRateConstant is never actually used
- once its value is retrieved from the controller.
- */
- unsigned char *OldRebuildRateConstant;
- dma_addr_t OldRebuildRateConstantDMA;
-
- OldRebuildRateConstant = pci_alloc_consistent( Controller->PCIDevice,
- sizeof(char), &OldRebuildRateConstantDMA);
- if (OldRebuildRateConstant == NULL) {
- DAC960_UserCritical("Cancellation of Rebuild or "
- "Consistency Check Failed - "
- "Out of Memory",
- Controller);
- goto failure;
- }
- CommandMailbox->Type3R.CommandOpcode = DAC960_V1_RebuildControl;
- CommandMailbox->Type3R.RebuildRateConstant = 0xFF;
- CommandMailbox->Type3R.BusAddress = OldRebuildRateConstantDMA;
- DAC960_ExecuteCommand(Command);
- switch (Command->V1.CommandStatus)
- {
- case DAC960_V1_NormalCompletion:
- DAC960_UserCritical("Rebuild or Consistency Check Cancelled\n",
- Controller);
- break;
- default:
- DAC960_UserCritical("Cancellation of Rebuild or "
- "Consistency Check Failed - "
- "Unexpected Status %04X\n",
- Controller, Command->V1.CommandStatus);
- break;
- }
-failure:
- pci_free_consistent(Controller->PCIDevice, sizeof(char),
- OldRebuildRateConstant, OldRebuildRateConstantDMA);
- }
- else DAC960_UserCritical("Illegal User Command: '%s'\n",
- Controller, UserCommand);
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_DeallocateCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return true;
-}
-
-
-/*
- DAC960_V2_TranslatePhysicalDevice translates a Physical Device Channel and
- TargetID into a Logical Device. It returns true on success and false
- on failure.
-*/
-
-static bool DAC960_V2_TranslatePhysicalDevice(DAC960_Command_T *Command,
- unsigned char Channel,
- unsigned char TargetID,
- unsigned short
- *LogicalDeviceNumber)
-{
- DAC960_V2_CommandMailbox_T SavedCommandMailbox, *CommandMailbox;
- DAC960_Controller_T *Controller = Command->Controller;
-
- CommandMailbox = &Command->V2.CommandMailbox;
- memcpy(&SavedCommandMailbox, CommandMailbox,
- sizeof(DAC960_V2_CommandMailbox_T));
-
- CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->PhysicalDeviceInfo.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->PhysicalDeviceInfo.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->PhysicalDeviceInfo.DataTransferSize =
- sizeof(DAC960_V2_PhysicalToLogicalDevice_T);
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = TargetID;
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel = Channel;
- CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode =
- DAC960_V2_TranslatePhysicalToLogicalDevice;
- CommandMailbox->Common.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.PhysicalToLogicalDeviceDMA;
- CommandMailbox->Common.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->Common.DataTransferSize;
-
- DAC960_ExecuteCommand(Command);
- *LogicalDeviceNumber = Controller->V2.PhysicalToLogicalDevice->LogicalDeviceNumber;
-
- memcpy(CommandMailbox, &SavedCommandMailbox,
- sizeof(DAC960_V2_CommandMailbox_T));
- return (Command->V2.CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V2_ExecuteUserCommand executes a User Command for DAC960 V2 Firmware
- Controllers.
-*/
-
-static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
- unsigned char *UserCommand)
-{
- DAC960_Command_T *Command;
- DAC960_V2_CommandMailbox_T *CommandMailbox;
- unsigned long flags;
- unsigned char Channel, TargetID, LogicalDriveNumber;
- unsigned short LogicalDeviceNumber;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
- DAC960_WaitForCommand(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- Controller->UserStatusLength = 0;
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox = &Command->V2.CommandMailbox;
- CommandMailbox->Common.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->Common.CommandControlBits.DataTransferControllerToHost = true;
- CommandMailbox->Common.CommandControlBits.NoAutoRequestSense = true;
- if (strcmp(UserCommand, "flush-cache") == 0)
- {
- CommandMailbox->DeviceOperation.IOCTL_Opcode = DAC960_V2_PauseDevice;
- CommandMailbox->DeviceOperation.OperationDevice =
- DAC960_V2_RAID_Controller;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Cache Flush Completed\n", Controller);
- }
- else if (strncmp(UserCommand, "kill", 4) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[4],
- &Channel, &TargetID) &&
- DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
- &LogicalDeviceNumber))
- {
- CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber =
- LogicalDeviceNumber;
- CommandMailbox->SetDeviceState.IOCTL_Opcode =
- DAC960_V2_SetDeviceState;
- CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState =
- DAC960_V2_Device_Dead;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Kill of Physical Device %d:%d %s\n",
- Controller, Channel, TargetID,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Succeeded" : "Failed"));
- }
- else if (strncmp(UserCommand, "make-online", 11) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[11],
- &Channel, &TargetID) &&
- DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
- &LogicalDeviceNumber))
- {
- CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber =
- LogicalDeviceNumber;
- CommandMailbox->SetDeviceState.IOCTL_Opcode =
- DAC960_V2_SetDeviceState;
- CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState =
- DAC960_V2_Device_Online;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Make Online of Physical Device %d:%d %s\n",
- Controller, Channel, TargetID,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Succeeded" : "Failed"));
- }
- else if (strncmp(UserCommand, "make-standby", 12) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[12],
- &Channel, &TargetID) &&
- DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
- &LogicalDeviceNumber))
- {
- CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber =
- LogicalDeviceNumber;
- CommandMailbox->SetDeviceState.IOCTL_Opcode =
- DAC960_V2_SetDeviceState;
- CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState =
- DAC960_V2_Device_Standby;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Make Standby of Physical Device %d:%d %s\n",
- Controller, Channel, TargetID,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Succeeded" : "Failed"));
- }
- else if (strncmp(UserCommand, "rebuild", 7) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[7],
- &Channel, &TargetID) &&
- DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
- &LogicalDeviceNumber))
- {
- CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
- LogicalDeviceNumber;
- CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode =
- DAC960_V2_RebuildDeviceStart;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Rebuild of Physical Device %d:%d %s\n",
- Controller, Channel, TargetID,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Initiated" : "Not Initiated"));
- }
- else if (strncmp(UserCommand, "cancel-rebuild", 14) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[14],
- &Channel, &TargetID) &&
- DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
- &LogicalDeviceNumber))
- {
- CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
- LogicalDeviceNumber;
- CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode =
- DAC960_V2_RebuildDeviceStop;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Rebuild of Physical Device %d:%d %s\n",
- Controller, Channel, TargetID,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Cancelled" : "Not Cancelled"));
- }
- else if (strncmp(UserCommand, "check-consistency", 17) == 0 &&
- DAC960_ParseLogicalDrive(Controller, &UserCommand[17],
- &LogicalDriveNumber))
- {
- CommandMailbox->ConsistencyCheck.LogicalDevice.LogicalDeviceNumber =
- LogicalDriveNumber;
- CommandMailbox->ConsistencyCheck.IOCTL_Opcode =
- DAC960_V2_ConsistencyCheckStart;
- CommandMailbox->ConsistencyCheck.RestoreConsistency = true;
- CommandMailbox->ConsistencyCheck.InitializedAreaOnly = false;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) %s\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Initiated" : "Not Initiated"));
- }
- else if (strncmp(UserCommand, "cancel-consistency-check", 24) == 0 &&
- DAC960_ParseLogicalDrive(Controller, &UserCommand[24],
- &LogicalDriveNumber))
- {
- CommandMailbox->ConsistencyCheck.LogicalDevice.LogicalDeviceNumber =
- LogicalDriveNumber;
- CommandMailbox->ConsistencyCheck.IOCTL_Opcode =
- DAC960_V2_ConsistencyCheckStop;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) %s\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Cancelled" : "Not Cancelled"));
- }
- else if (strcmp(UserCommand, "perform-discovery") == 0)
- {
- CommandMailbox->Common.IOCTL_Opcode = DAC960_V2_StartDiscovery;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Discovery %s\n", Controller,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Initiated" : "Not Initiated"));
- if (Command->V2.CommandStatus == DAC960_V2_NormalCompletion)
- {
- CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->ControllerInfo.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->ControllerInfo.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->ControllerInfo.DataTransferSize =
- sizeof(DAC960_V2_ControllerInfo_T);
- CommandMailbox->ControllerInfo.ControllerNumber = 0;
- CommandMailbox->ControllerInfo.IOCTL_Opcode =
- DAC960_V2_GetControllerInfo;
- /*
- * How does this NOT race with the queued Monitoring
- * usage of this structure?
- */
- CommandMailbox->ControllerInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewControllerInformationDMA;
- CommandMailbox->ControllerInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->ControllerInfo.DataTransferSize;
- while (1) {
- DAC960_ExecuteCommand(Command);
- if (!Controller->V2.NewControllerInformation->PhysicalScanActive)
- break;
- msleep(1000);
- }
- DAC960_UserCritical("Discovery Completed\n", Controller);
- }
- }
- else if (strcmp(UserCommand, "suppress-enclosure-messages") == 0)
- Controller->SuppressEnclosureMessages = true;
- else DAC960_UserCritical("Illegal User Command: '%s'\n",
- Controller, UserCommand);
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_DeallocateCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return true;
-}
-
-static int __maybe_unused dac960_proc_show(struct seq_file *m, void *v)
-{
- unsigned char *StatusMessage = "OK\n";
- int ControllerNumber;
- for (ControllerNumber = 0;
- ControllerNumber < DAC960_ControllerCount;
- ControllerNumber++)
- {
- DAC960_Controller_T *Controller = DAC960_Controllers[ControllerNumber];
- if (Controller == NULL) continue;
- if (Controller->MonitoringAlertMode)
- {
- StatusMessage = "ALERT\n";
- break;
- }
- }
- seq_puts(m, StatusMessage);
- return 0;
-}
-
-static int __maybe_unused dac960_initial_status_proc_show(struct seq_file *m,
- void *v)
-{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
- seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer);
- return 0;
-}
-
-static int __maybe_unused dac960_current_status_proc_show(struct seq_file *m,
- void *v)
-{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private;
- unsigned char *StatusMessage =
- "No Rebuild or Consistency Check in Progress\n";
- int ProgressMessageLength = strlen(StatusMessage);
- if (jiffies != Controller->LastCurrentStatusTime)
- {
- Controller->CurrentStatusLength = 0;
- DAC960_AnnounceDriver(Controller);
- DAC960_ReportControllerConfiguration(Controller);
- DAC960_ReportDeviceConfiguration(Controller);
- if (Controller->ProgressBufferLength > 0)
- ProgressMessageLength = Controller->ProgressBufferLength;
- if (DAC960_CheckStatusBuffer(Controller, 2 + ProgressMessageLength))
- {
- unsigned char *CurrentStatusBuffer = Controller->CurrentStatusBuffer;
- CurrentStatusBuffer[Controller->CurrentStatusLength++] = ' ';
- CurrentStatusBuffer[Controller->CurrentStatusLength++] = ' ';
- if (Controller->ProgressBufferLength > 0)
- strcpy(&CurrentStatusBuffer[Controller->CurrentStatusLength],
- Controller->ProgressBuffer);
- else
- strcpy(&CurrentStatusBuffer[Controller->CurrentStatusLength],
- StatusMessage);
- Controller->CurrentStatusLength += ProgressMessageLength;
- }
- Controller->LastCurrentStatusTime = jiffies;
- }
- seq_printf(m, "%.*s", Controller->CurrentStatusLength, Controller->CurrentStatusBuffer);
- return 0;
-}
-
-static int dac960_user_command_proc_show(struct seq_file *m, void *v)
-{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
-
- seq_printf(m, "%.*s", Controller->UserStatusLength, Controller->UserStatusBuffer);
- return 0;
-}
-
-static int dac960_user_command_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dac960_user_command_proc_show, PDE_DATA(inode));
-}
-
-static ssize_t dac960_user_command_proc_write(struct file *file,
- const char __user *Buffer,
- size_t Count, loff_t *pos)
-{
- DAC960_Controller_T *Controller = PDE_DATA(file_inode(file));
- unsigned char CommandBuffer[80];
- int Length;
- if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
- if (copy_from_user(CommandBuffer, Buffer, Count)) return -EFAULT;
- CommandBuffer[Count] = '\0';
- Length = strlen(CommandBuffer);
- if (Length > 0 && CommandBuffer[Length-1] == '\n')
- CommandBuffer[--Length] = '\0';
- if (Controller->FirmwareType == DAC960_V1_Controller)
- return (DAC960_V1_ExecuteUserCommand(Controller, CommandBuffer)
- ? Count : -EBUSY);
- else
- return (DAC960_V2_ExecuteUserCommand(Controller, CommandBuffer)
- ? Count : -EBUSY);
-}
-
-static const struct file_operations dac960_user_command_proc_fops = {
- .owner = THIS_MODULE,
- .open = dac960_user_command_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = dac960_user_command_proc_write,
-};
-
-/*
- DAC960_CreateProcEntries creates the /proc/rd/... entries for the
- DAC960 Driver.
-*/
-
-static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
-{
- struct proc_dir_entry *ControllerProcEntry;
-
- if (DAC960_ProcDirectoryEntry == NULL) {
- DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
- proc_create_single("status", 0, DAC960_ProcDirectoryEntry,
- dac960_proc_show);
- }
-
- snprintf(Controller->ControllerName, sizeof(Controller->ControllerName),
- "c%d", Controller->ControllerNumber);
- ControllerProcEntry = proc_mkdir(Controller->ControllerName,
- DAC960_ProcDirectoryEntry);
- proc_create_single_data("initial_status", 0, ControllerProcEntry,
- dac960_initial_status_proc_show, Controller);
- proc_create_single_data("current_status", 0, ControllerProcEntry,
- dac960_current_status_proc_show, Controller);
- proc_create_data("user_command", 0600, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
- Controller->ControllerProcEntry = ControllerProcEntry;
-}
-
-
-/*
- DAC960_DestroyProcEntries destroys the /proc/rd/... entries for the
- DAC960 Driver.
-*/
-
-static void DAC960_DestroyProcEntries(DAC960_Controller_T *Controller)
-{
- if (Controller->ControllerProcEntry == NULL)
- return;
- remove_proc_entry("initial_status", Controller->ControllerProcEntry);
- remove_proc_entry("current_status", Controller->ControllerProcEntry);
- remove_proc_entry("user_command", Controller->ControllerProcEntry);
- remove_proc_entry(Controller->ControllerName, DAC960_ProcDirectoryEntry);
- Controller->ControllerProcEntry = NULL;
-}
-
-#ifdef DAC960_GAM_MINOR
-
-static long DAC960_gam_get_controller_info(DAC960_ControllerInfo_T __user *UserSpaceControllerInfo)
-{
- DAC960_ControllerInfo_T ControllerInfo;
- DAC960_Controller_T *Controller;
- int ControllerNumber;
- long ErrorCode;
-
- if (UserSpaceControllerInfo == NULL)
- ErrorCode = -EINVAL;
- else ErrorCode = get_user(ControllerNumber,
- &UserSpaceControllerInfo->ControllerNumber);
- if (ErrorCode != 0)
- goto out;
- ErrorCode = -ENXIO;
- if (ControllerNumber < 0 ||
- ControllerNumber > DAC960_ControllerCount - 1) {
- goto out;
- }
- Controller = DAC960_Controllers[ControllerNumber];
- if (Controller == NULL)
- goto out;
- memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T));
- ControllerInfo.ControllerNumber = ControllerNumber;
- ControllerInfo.FirmwareType = Controller->FirmwareType;
- ControllerInfo.Channels = Controller->Channels;
- ControllerInfo.Targets = Controller->Targets;
- ControllerInfo.PCI_Bus = Controller->Bus;
- ControllerInfo.PCI_Device = Controller->Device;
- ControllerInfo.PCI_Function = Controller->Function;
- ControllerInfo.IRQ_Channel = Controller->IRQ_Channel;
- ControllerInfo.PCI_Address = Controller->PCI_Address;
- strcpy(ControllerInfo.ModelName, Controller->ModelName);
- strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion);
- ErrorCode = (copy_to_user(UserSpaceControllerInfo, &ControllerInfo,
- sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0);
-out:
- return ErrorCode;
-}
-
-static long DAC960_gam_v1_execute_command(DAC960_V1_UserCommand_T __user *UserSpaceUserCommand)
-{
- DAC960_V1_UserCommand_T UserCommand;
- DAC960_Controller_T *Controller;
- DAC960_Command_T *Command = NULL;
- DAC960_V1_CommandOpcode_T CommandOpcode;
- DAC960_V1_CommandStatus_T CommandStatus;
- DAC960_V1_DCDB_T DCDB;
- DAC960_V1_DCDB_T *DCDB_IOBUF = NULL;
- dma_addr_t DCDB_IOBUFDMA;
- unsigned long flags;
- int ControllerNumber, DataTransferLength;
- unsigned char *DataTransferBuffer = NULL;
- dma_addr_t DataTransferBufferDMA;
- long ErrorCode;
-
- if (UserSpaceUserCommand == NULL) {
- ErrorCode = -EINVAL;
- goto out;
- }
- if (copy_from_user(&UserCommand, UserSpaceUserCommand,
- sizeof(DAC960_V1_UserCommand_T))) {
- ErrorCode = -EFAULT;
- goto out;
- }
- ControllerNumber = UserCommand.ControllerNumber;
- ErrorCode = -ENXIO;
- if (ControllerNumber < 0 ||
- ControllerNumber > DAC960_ControllerCount - 1)
- goto out;
- Controller = DAC960_Controllers[ControllerNumber];
- if (Controller == NULL)
- goto out;
- ErrorCode = -EINVAL;
- if (Controller->FirmwareType != DAC960_V1_Controller)
- goto out;
- CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode;
- DataTransferLength = UserCommand.DataTransferLength;
- if (CommandOpcode & 0x80)
- goto out;
- if (CommandOpcode == DAC960_V1_DCDB)
- {
- if (copy_from_user(&DCDB, UserCommand.DCDB,
- sizeof(DAC960_V1_DCDB_T))) {
- ErrorCode = -EFAULT;
- goto out;
- }
- if (DCDB.Channel >= DAC960_V1_MaxChannels)
- goto out;
- if (!((DataTransferLength == 0 &&
- DCDB.Direction
- == DAC960_V1_DCDB_NoDataTransfer) ||
- (DataTransferLength > 0 &&
- DCDB.Direction
- == DAC960_V1_DCDB_DataTransferDeviceToSystem) ||
- (DataTransferLength < 0 &&
- DCDB.Direction
- == DAC960_V1_DCDB_DataTransferSystemToDevice)))
- goto out;
- if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength)
- != abs(DataTransferLength))
- goto out;
- DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice,
- sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA);
- if (DCDB_IOBUF == NULL) {
- ErrorCode = -ENOMEM;
- goto out;
- }
- }
- ErrorCode = -ENOMEM;
- if (DataTransferLength > 0)
- {
- DataTransferBuffer = pci_zalloc_consistent(Controller->PCIDevice,
- DataTransferLength,
- &DataTransferBufferDMA);
- if (DataTransferBuffer == NULL)
- goto out;
- }
- else if (DataTransferLength < 0)
- {
- DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
- -DataTransferLength, &DataTransferBufferDMA);
- if (DataTransferBuffer == NULL)
- goto out;
- if (copy_from_user(DataTransferBuffer,
- UserCommand.DataTransferBuffer,
- -DataTransferLength)) {
- ErrorCode = -EFAULT;
- goto out;
- }
- }
- if (CommandOpcode == DAC960_V1_DCDB)
- {
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
- DAC960_WaitForCommand(Controller);
- while (Controller->V1.DirectCommandActive[DCDB.Channel]
- [DCDB.TargetID])
- {
- spin_unlock_irq(&Controller->queue_lock);
- __wait_event(Controller->CommandWaitQueue,
- !Controller->V1.DirectCommandActive
- [DCDB.Channel][DCDB.TargetID]);
- spin_lock_irq(&Controller->queue_lock);
- }
- Controller->V1.DirectCommandActive[DCDB.Channel]
- [DCDB.TargetID] = true;
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- memcpy(&Command->V1.CommandMailbox, &UserCommand.CommandMailbox,
- sizeof(DAC960_V1_CommandMailbox_T));
- Command->V1.CommandMailbox.Type3.BusAddress = DCDB_IOBUFDMA;
- DCDB.BusAddress = DataTransferBufferDMA;
- memcpy(DCDB_IOBUF, &DCDB, sizeof(DAC960_V1_DCDB_T));
- }
- else
- {
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
- DAC960_WaitForCommand(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- memcpy(&Command->V1.CommandMailbox, &UserCommand.CommandMailbox,
- sizeof(DAC960_V1_CommandMailbox_T));
- if (DataTransferBuffer != NULL)
- Command->V1.CommandMailbox.Type3.BusAddress =
- DataTransferBufferDMA;
- }
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V1.CommandStatus;
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_DeallocateCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- if (DataTransferLength > 0)
- {
- if (copy_to_user(UserCommand.DataTransferBuffer,
- DataTransferBuffer, DataTransferLength)) {
- ErrorCode = -EFAULT;
- goto Failure1;
- }
- }
- if (CommandOpcode == DAC960_V1_DCDB)
- {
- /*
- I don't believe Target or Channel in the DCDB_IOBUF
- should be any different from the contents of DCDB.
- */
- Controller->V1.DirectCommandActive[DCDB.Channel]
- [DCDB.TargetID] = false;
- if (copy_to_user(UserCommand.DCDB, DCDB_IOBUF,
- sizeof(DAC960_V1_DCDB_T))) {
- ErrorCode = -EFAULT;
- goto Failure1;
- }
- }
- ErrorCode = CommandStatus;
- Failure1:
- if (DataTransferBuffer != NULL)
- pci_free_consistent(Controller->PCIDevice, abs(DataTransferLength),
- DataTransferBuffer, DataTransferBufferDMA);
- if (DCDB_IOBUF != NULL)
- pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T),
- DCDB_IOBUF, DCDB_IOBUFDMA);
- out:
- return ErrorCode;
-}
-
-static long DAC960_gam_v2_execute_command(DAC960_V2_UserCommand_T __user *UserSpaceUserCommand)
-{
- DAC960_V2_UserCommand_T UserCommand;
- DAC960_Controller_T *Controller;
- DAC960_Command_T *Command = NULL;
- DAC960_V2_CommandMailbox_T *CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
- unsigned long flags;
- int ControllerNumber, DataTransferLength;
- int DataTransferResidue, RequestSenseLength;
- unsigned char *DataTransferBuffer = NULL;
- dma_addr_t DataTransferBufferDMA;
- unsigned char *RequestSenseBuffer = NULL;
- dma_addr_t RequestSenseBufferDMA;
- long ErrorCode = -EINVAL;
-
- if (UserSpaceUserCommand == NULL)
- goto out;
- if (copy_from_user(&UserCommand, UserSpaceUserCommand,
- sizeof(DAC960_V2_UserCommand_T))) {
- ErrorCode = -EFAULT;
- goto out;
- }
- ErrorCode = -ENXIO;
- ControllerNumber = UserCommand.ControllerNumber;
- if (ControllerNumber < 0 ||
- ControllerNumber > DAC960_ControllerCount - 1)
- goto out;
- Controller = DAC960_Controllers[ControllerNumber];
- if (Controller == NULL)
- goto out;
- if (Controller->FirmwareType != DAC960_V2_Controller){
- ErrorCode = -EINVAL;
- goto out;
- }
- DataTransferLength = UserCommand.DataTransferLength;
- ErrorCode = -ENOMEM;
- if (DataTransferLength > 0)
- {
- DataTransferBuffer = pci_zalloc_consistent(Controller->PCIDevice,
- DataTransferLength,
- &DataTransferBufferDMA);
- if (DataTransferBuffer == NULL)
- goto out;
- }
- else if (DataTransferLength < 0)
- {
- DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
- -DataTransferLength, &DataTransferBufferDMA);
- if (DataTransferBuffer == NULL)
- goto out;
- if (copy_from_user(DataTransferBuffer,
- UserCommand.DataTransferBuffer,
- -DataTransferLength)) {
- ErrorCode = -EFAULT;
- goto Failure2;
- }
- }
- RequestSenseLength = UserCommand.RequestSenseLength;
- if (RequestSenseLength > 0)
- {
- RequestSenseBuffer = pci_zalloc_consistent(Controller->PCIDevice,
- RequestSenseLength,
- &RequestSenseBufferDMA);
- if (RequestSenseBuffer == NULL)
- {
- ErrorCode = -ENOMEM;
- goto Failure2;
- }
- }
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
- DAC960_WaitForCommand(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox = &Command->V2.CommandMailbox;
- memcpy(CommandMailbox, &UserCommand.CommandMailbox,
- sizeof(DAC960_V2_CommandMailbox_T));
- CommandMailbox->Common.CommandControlBits
- .AdditionalScatterGatherListMemory = false;
- CommandMailbox->Common.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->Common.DataTransferSize = 0;
- CommandMailbox->Common.DataTransferPageNumber = 0;
- memset(&CommandMailbox->Common.DataTransferMemoryAddress, 0,
- sizeof(DAC960_V2_DataTransferMemoryAddress_T));
- if (DataTransferLength != 0)
- {
- if (DataTransferLength > 0)
- {
- CommandMailbox->Common.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->Common.DataTransferSize = DataTransferLength;
- }
- else
- {
- CommandMailbox->Common.CommandControlBits
- .DataTransferControllerToHost = false;
- CommandMailbox->Common.DataTransferSize = -DataTransferLength;
- }
- CommandMailbox->Common.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer = DataTransferBufferDMA;
- CommandMailbox->Common.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->Common.DataTransferSize;
- }
- if (RequestSenseLength > 0)
- {
- CommandMailbox->Common.CommandControlBits
- .NoAutoRequestSense = false;
- CommandMailbox->Common.RequestSenseSize = RequestSenseLength;
- CommandMailbox->Common.RequestSenseBusAddress =
- RequestSenseBufferDMA;
- }
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- RequestSenseLength = Command->V2.RequestSenseLength;
- DataTransferResidue = Command->V2.DataTransferResidue;
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_DeallocateCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- if (RequestSenseLength > UserCommand.RequestSenseLength)
- RequestSenseLength = UserCommand.RequestSenseLength;
- if (copy_to_user(&UserSpaceUserCommand->DataTransferLength,
- &DataTransferResidue,
- sizeof(DataTransferResidue))) {
- ErrorCode = -EFAULT;
- goto Failure2;
- }
- if (copy_to_user(&UserSpaceUserCommand->RequestSenseLength,
- &RequestSenseLength, sizeof(RequestSenseLength))) {
- ErrorCode = -EFAULT;
- goto Failure2;
- }
- if (DataTransferLength > 0)
- {
- if (copy_to_user(UserCommand.DataTransferBuffer,
- DataTransferBuffer, DataTransferLength)) {
- ErrorCode = -EFAULT;
- goto Failure2;
- }
- }
- if (RequestSenseLength > 0)
- {
- if (copy_to_user(UserCommand.RequestSenseBuffer,
- RequestSenseBuffer, RequestSenseLength)) {
- ErrorCode = -EFAULT;
- goto Failure2;
- }
- }
- ErrorCode = CommandStatus;
- Failure2:
- pci_free_consistent(Controller->PCIDevice, abs(DataTransferLength),
- DataTransferBuffer, DataTransferBufferDMA);
- if (RequestSenseBuffer != NULL)
- pci_free_consistent(Controller->PCIDevice, RequestSenseLength,
- RequestSenseBuffer, RequestSenseBufferDMA);
-out:
- return ErrorCode;
-}
-
-static long DAC960_gam_v2_get_health_status(DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus)
-{
- DAC960_V2_GetHealthStatus_T GetHealthStatus;
- DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer;
- DAC960_Controller_T *Controller;
- int ControllerNumber;
- long ErrorCode;
-
- if (UserSpaceGetHealthStatus == NULL) {
- ErrorCode = -EINVAL;
- goto out;
- }
- if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus,
- sizeof(DAC960_V2_GetHealthStatus_T))) {
- ErrorCode = -EFAULT;
- goto out;
- }
- ErrorCode = -ENXIO;
- ControllerNumber = GetHealthStatus.ControllerNumber;
- if (ControllerNumber < 0 ||
- ControllerNumber > DAC960_ControllerCount - 1)
- goto out;
- Controller = DAC960_Controllers[ControllerNumber];
- if (Controller == NULL)
- goto out;
- if (Controller->FirmwareType != DAC960_V2_Controller) {
- ErrorCode = -EINVAL;
- goto out;
- }
- if (copy_from_user(&HealthStatusBuffer,
- GetHealthStatus.HealthStatusBuffer,
- sizeof(DAC960_V2_HealthStatusBuffer_T))) {
- ErrorCode = -EFAULT;
- goto out;
- }
- ErrorCode = wait_event_interruptible_timeout(Controller->HealthStatusWaitQueue,
- !(Controller->V2.HealthStatusBuffer->StatusChangeCounter
- == HealthStatusBuffer.StatusChangeCounter &&
- Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
- == HealthStatusBuffer.NextEventSequenceNumber),
- DAC960_MonitoringTimerInterval);
- if (ErrorCode == -ERESTARTSYS) {
- ErrorCode = -EINTR;
- goto out;
- }
- if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
- Controller->V2.HealthStatusBuffer,
- sizeof(DAC960_V2_HealthStatusBuffer_T)))
- ErrorCode = -EFAULT;
- else
- ErrorCode = 0;
-
-out:
- return ErrorCode;
-}
-
-/*
- * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
-*/
-
-static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
- unsigned long Argument)
-{
- long ErrorCode = 0;
- void __user *argp = (void __user *)Argument;
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-
- mutex_lock(&DAC960_mutex);
- switch (Request)
- {
- case DAC960_IOCTL_GET_CONTROLLER_COUNT:
- ErrorCode = DAC960_ControllerCount;
- break;
- case DAC960_IOCTL_GET_CONTROLLER_INFO:
- ErrorCode = DAC960_gam_get_controller_info(argp);
- break;
- case DAC960_IOCTL_V1_EXECUTE_COMMAND:
- ErrorCode = DAC960_gam_v1_execute_command(argp);
- break;
- case DAC960_IOCTL_V2_EXECUTE_COMMAND:
- ErrorCode = DAC960_gam_v2_execute_command(argp);
- break;
- case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
- ErrorCode = DAC960_gam_v2_get_health_status(argp);
- break;
- default:
- ErrorCode = -ENOTTY;
- }
- mutex_unlock(&DAC960_mutex);
- return ErrorCode;
-}
-
-static const struct file_operations DAC960_gam_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = DAC960_gam_ioctl,
- .llseek = noop_llseek,
-};
-
-static struct miscdevice DAC960_gam_dev = {
- DAC960_GAM_MINOR,
- "dac960_gam",
- &DAC960_gam_fops
-};
-
-static int DAC960_gam_init(void)
-{
- int ret;
-
- ret = misc_register(&DAC960_gam_dev);
- if (ret)
- printk(KERN_ERR "DAC960_gam: can't misc_register on minor %d\n", DAC960_GAM_MINOR);
- return ret;
-}
-
-static void DAC960_gam_cleanup(void)
-{
- misc_deregister(&DAC960_gam_dev);
-}
-
-#endif /* DAC960_GAM_MINOR */
-
-static struct DAC960_privdata DAC960_GEM_privdata = {
- .HardwareType = DAC960_GEM_Controller,
- .FirmwareType = DAC960_V2_Controller,
- .InterruptHandler = DAC960_GEM_InterruptHandler,
- .MemoryWindowSize = DAC960_GEM_RegisterWindowSize,
-};
-
-
-static struct DAC960_privdata DAC960_BA_privdata = {
- .HardwareType = DAC960_BA_Controller,
- .FirmwareType = DAC960_V2_Controller,
- .InterruptHandler = DAC960_BA_InterruptHandler,
- .MemoryWindowSize = DAC960_BA_RegisterWindowSize,
-};
-
-static struct DAC960_privdata DAC960_LP_privdata = {
- .HardwareType = DAC960_LP_Controller,
- .FirmwareType = DAC960_V2_Controller,
- .InterruptHandler = DAC960_LP_InterruptHandler,
- .MemoryWindowSize = DAC960_LP_RegisterWindowSize,
-};
-
-static struct DAC960_privdata DAC960_LA_privdata = {
- .HardwareType = DAC960_LA_Controller,
- .FirmwareType = DAC960_V1_Controller,
- .InterruptHandler = DAC960_LA_InterruptHandler,
- .MemoryWindowSize = DAC960_LA_RegisterWindowSize,
-};
-
-static struct DAC960_privdata DAC960_PG_privdata = {
- .HardwareType = DAC960_PG_Controller,
- .FirmwareType = DAC960_V1_Controller,
- .InterruptHandler = DAC960_PG_InterruptHandler,
- .MemoryWindowSize = DAC960_PG_RegisterWindowSize,
-};
-
-static struct DAC960_privdata DAC960_PD_privdata = {
- .HardwareType = DAC960_PD_Controller,
- .FirmwareType = DAC960_V1_Controller,
- .InterruptHandler = DAC960_PD_InterruptHandler,
- .MemoryWindowSize = DAC960_PD_RegisterWindowSize,
-};
-
-static struct DAC960_privdata DAC960_P_privdata = {
- .HardwareType = DAC960_P_Controller,
- .FirmwareType = DAC960_V1_Controller,
- .InterruptHandler = DAC960_P_InterruptHandler,
- .MemoryWindowSize = DAC960_PD_RegisterWindowSize,
-};
-
-static const struct pci_device_id DAC960_id_table[] = {
- {
- .vendor = PCI_VENDOR_ID_MYLEX,
- .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM,
- .subvendor = PCI_VENDOR_ID_MYLEX,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) &DAC960_GEM_privdata,
- },
- {
- .vendor = PCI_VENDOR_ID_MYLEX,
- .device = PCI_DEVICE_ID_MYLEX_DAC960_BA,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) &DAC960_BA_privdata,
- },
- {
- .vendor = PCI_VENDOR_ID_MYLEX,
- .device = PCI_DEVICE_ID_MYLEX_DAC960_LP,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) &DAC960_LP_privdata,
- },
- {
- .vendor = PCI_VENDOR_ID_DEC,
- .device = PCI_DEVICE_ID_DEC_21285,
- .subvendor = PCI_VENDOR_ID_MYLEX,
- .subdevice = PCI_DEVICE_ID_MYLEX_DAC960_LA,
- .driver_data = (unsigned long) &DAC960_LA_privdata,
- },
- {
- .vendor = PCI_VENDOR_ID_MYLEX,
- .device = PCI_DEVICE_ID_MYLEX_DAC960_PG,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) &DAC960_PG_privdata,
- },
- {
- .vendor = PCI_VENDOR_ID_MYLEX,
- .device = PCI_DEVICE_ID_MYLEX_DAC960_PD,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) &DAC960_PD_privdata,
- },
- {
- .vendor = PCI_VENDOR_ID_MYLEX,
- .device = PCI_DEVICE_ID_MYLEX_DAC960_P,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) &DAC960_P_privdata,
- },
- {0, },
-};
-
-MODULE_DEVICE_TABLE(pci, DAC960_id_table);
-
-static struct pci_driver DAC960_pci_driver = {
- .name = "DAC960",
- .id_table = DAC960_id_table,
- .probe = DAC960_Probe,
- .remove = DAC960_Remove,
-};
-
-static int __init DAC960_init_module(void)
-{
- int ret;
-
- ret = pci_register_driver(&DAC960_pci_driver);
-#ifdef DAC960_GAM_MINOR
- if (!ret)
- DAC960_gam_init();
-#endif
- return ret;
-}
-
-static void __exit DAC960_cleanup_module(void)
-{
- int i;
-
-#ifdef DAC960_GAM_MINOR
- DAC960_gam_cleanup();
-#endif
-
- for (i = 0; i < DAC960_ControllerCount; i++) {
- DAC960_Controller_T *Controller = DAC960_Controllers[i];
- if (Controller == NULL)
- continue;
- DAC960_FinalizeController(Controller);
- }
- if (DAC960_ProcDirectoryEntry != NULL) {
- remove_proc_entry("rd/status", NULL);
- remove_proc_entry("rd", NULL);
- }
- DAC960_ControllerCount = 0;
- pci_unregister_driver(&DAC960_pci_driver);
-}
-
-module_init(DAC960_init_module);
-module_exit(DAC960_cleanup_module);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h
deleted file mode 100644
index 1439e651928b..000000000000
--- a/drivers/block/DAC960.h
+++ /dev/null
@@ -1,4414 +0,0 @@
-/*
-
- Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
-
- Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
-
- This program is free software; you may redistribute and/or modify it under
- the terms of the GNU General Public License Version 2 as published by the
- Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- for complete details.
-
- The author respectfully requests that any modifications to this software be
- sent directly to him for evaluation and testing.
-
-*/
-
-
-/*
- Define the maximum number of DAC960 Controllers supported by this driver.
-*/
-
-#define DAC960_MaxControllers 8
-
-
-/*
- Define the maximum number of Controller Channels supported by DAC960
- V1 and V2 Firmware Controllers.
-*/
-
-#define DAC960_V1_MaxChannels 3
-#define DAC960_V2_MaxChannels 4
-
-
-/*
- Define the maximum number of Targets per Channel supported by DAC960
- V1 and V2 Firmware Controllers.
-*/
-
-#define DAC960_V1_MaxTargets 16
-#define DAC960_V2_MaxTargets 128
-
-
-/*
- Define the maximum number of Logical Drives supported by DAC960
- V1 and V2 Firmware Controllers.
-*/
-
-#define DAC960_MaxLogicalDrives 32
-
-
-/*
- Define the maximum number of Physical Devices supported by DAC960
- V1 and V2 Firmware Controllers.
-*/
-
-#define DAC960_V1_MaxPhysicalDevices 45
-#define DAC960_V2_MaxPhysicalDevices 272
-
-/*
- Define a 32/64 bit I/O Address data type.
-*/
-
-typedef unsigned long DAC960_IO_Address_T;
-
-
-/*
- Define a 32/64 bit PCI Bus Address data type.
-*/
-
-typedef unsigned long DAC960_PCI_Address_T;
-
-
-/*
- Define a 32 bit Bus Address data type.
-*/
-
-typedef unsigned int DAC960_BusAddress32_T;
-
-
-/*
- Define a 64 bit Bus Address data type.
-*/
-
-typedef unsigned long long DAC960_BusAddress64_T;
-
-
-/*
- Define a 32 bit Byte Count data type.
-*/
-
-typedef unsigned int DAC960_ByteCount32_T;
-
-
-/*
- Define a 64 bit Byte Count data type.
-*/
-
-typedef unsigned long long DAC960_ByteCount64_T;
-
-
-/*
- dma_loaf is used by helper routines to divide a region of
- dma mapped memory into smaller pieces, where those pieces
- are not of uniform size.
- */
-
-struct dma_loaf {
- void *cpu_base;
- dma_addr_t dma_base;
- size_t length;
- void *cpu_free;
- dma_addr_t dma_free;
-};
-
-/*
- Define the SCSI INQUIRY Standard Data structure.
-*/
-
-typedef struct DAC960_SCSI_Inquiry
-{
- unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */
- unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */
- unsigned char DeviceTypeModifier:7; /* Byte 1 Bits 0-6 */
- bool RMB:1; /* Byte 1 Bit 7 */
- unsigned char ANSI_ApprovedVersion:3; /* Byte 2 Bits 0-2 */
- unsigned char ECMA_Version:3; /* Byte 2 Bits 3-5 */
- unsigned char ISO_Version:2; /* Byte 2 Bits 6-7 */
- unsigned char ResponseDataFormat:4; /* Byte 3 Bits 0-3 */
- unsigned char :2; /* Byte 3 Bits 4-5 */
- bool TrmIOP:1; /* Byte 3 Bit 6 */
- bool AENC:1; /* Byte 3 Bit 7 */
- unsigned char AdditionalLength; /* Byte 4 */
- unsigned char :8; /* Byte 5 */
- unsigned char :8; /* Byte 6 */
- bool SftRe:1; /* Byte 7 Bit 0 */
- bool CmdQue:1; /* Byte 7 Bit 1 */
- bool :1; /* Byte 7 Bit 2 */
- bool Linked:1; /* Byte 7 Bit 3 */
- bool Sync:1; /* Byte 7 Bit 4 */
- bool WBus16:1; /* Byte 7 Bit 5 */
- bool WBus32:1; /* Byte 7 Bit 6 */
- bool RelAdr:1; /* Byte 7 Bit 7 */
- unsigned char VendorIdentification[8]; /* Bytes 8-15 */
- unsigned char ProductIdentification[16]; /* Bytes 16-31 */
- unsigned char ProductRevisionLevel[4]; /* Bytes 32-35 */
-}
-DAC960_SCSI_Inquiry_T;
-
-
-/*
- Define the SCSI INQUIRY Unit Serial Number structure.
-*/
-
-typedef struct DAC960_SCSI_Inquiry_UnitSerialNumber
-{
- unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */
- unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */
- unsigned char PageCode; /* Byte 1 */
- unsigned char :8; /* Byte 2 */
- unsigned char PageLength; /* Byte 3 */
- unsigned char ProductSerialNumber[28]; /* Bytes 4-31 */
-}
-DAC960_SCSI_Inquiry_UnitSerialNumber_T;
-
-
-/*
- Define the SCSI REQUEST SENSE Sense Key type.
-*/
-
-typedef enum
-{
- DAC960_SenseKey_NoSense = 0x0,
- DAC960_SenseKey_RecoveredError = 0x1,
- DAC960_SenseKey_NotReady = 0x2,
- DAC960_SenseKey_MediumError = 0x3,
- DAC960_SenseKey_HardwareError = 0x4,
- DAC960_SenseKey_IllegalRequest = 0x5,
- DAC960_SenseKey_UnitAttention = 0x6,
- DAC960_SenseKey_DataProtect = 0x7,
- DAC960_SenseKey_BlankCheck = 0x8,
- DAC960_SenseKey_VendorSpecific = 0x9,
- DAC960_SenseKey_CopyAborted = 0xA,
- DAC960_SenseKey_AbortedCommand = 0xB,
- DAC960_SenseKey_Equal = 0xC,
- DAC960_SenseKey_VolumeOverflow = 0xD,
- DAC960_SenseKey_Miscompare = 0xE,
- DAC960_SenseKey_Reserved = 0xF
-}
-__attribute__ ((packed))
-DAC960_SCSI_RequestSenseKey_T;
-
-
-/*
- Define the SCSI REQUEST SENSE structure.
-*/
-
-typedef struct DAC960_SCSI_RequestSense
-{
- unsigned char ErrorCode:7; /* Byte 0 Bits 0-6 */
- bool Valid:1; /* Byte 0 Bit 7 */
- unsigned char SegmentNumber; /* Byte 1 */
- DAC960_SCSI_RequestSenseKey_T SenseKey:4; /* Byte 2 Bits 0-3 */
- unsigned char :1; /* Byte 2 Bit 4 */
- bool ILI:1; /* Byte 2 Bit 5 */
- bool EOM:1; /* Byte 2 Bit 6 */
- bool Filemark:1; /* Byte 2 Bit 7 */
- unsigned char Information[4]; /* Bytes 3-6 */
- unsigned char AdditionalSenseLength; /* Byte 7 */
- unsigned char CommandSpecificInformation[4]; /* Bytes 8-11 */
- unsigned char AdditionalSenseCode; /* Byte 12 */
- unsigned char AdditionalSenseCodeQualifier; /* Byte 13 */
-}
-DAC960_SCSI_RequestSense_T;
-
-
-/*
- Define the DAC960 V1 Firmware Command Opcodes.
-*/
-
-typedef enum
-{
- /* I/O Commands */
- DAC960_V1_ReadExtended = 0x33,
- DAC960_V1_WriteExtended = 0x34,
- DAC960_V1_ReadAheadExtended = 0x35,
- DAC960_V1_ReadExtendedWithScatterGather = 0xB3,
- DAC960_V1_WriteExtendedWithScatterGather = 0xB4,
- DAC960_V1_Read = 0x36,
- DAC960_V1_ReadWithScatterGather = 0xB6,
- DAC960_V1_Write = 0x37,
- DAC960_V1_WriteWithScatterGather = 0xB7,
- DAC960_V1_DCDB = 0x04,
- DAC960_V1_DCDBWithScatterGather = 0x84,
- DAC960_V1_Flush = 0x0A,
- /* Controller Status Related Commands */
- DAC960_V1_Enquiry = 0x53,
- DAC960_V1_Enquiry2 = 0x1C,
- DAC960_V1_GetLogicalDriveElement = 0x55,
- DAC960_V1_GetLogicalDriveInformation = 0x19,
- DAC960_V1_IOPortRead = 0x39,
- DAC960_V1_IOPortWrite = 0x3A,
- DAC960_V1_GetSDStats = 0x3E,
- DAC960_V1_GetPDStats = 0x3F,
- DAC960_V1_PerformEventLogOperation = 0x72,
- /* Device Related Commands */
- DAC960_V1_StartDevice = 0x10,
- DAC960_V1_GetDeviceState = 0x50,
- DAC960_V1_StopChannel = 0x13,
- DAC960_V1_StartChannel = 0x12,
- DAC960_V1_ResetChannel = 0x1A,
- /* Commands Associated with Data Consistency and Errors */
- DAC960_V1_Rebuild = 0x09,
- DAC960_V1_RebuildAsync = 0x16,
- DAC960_V1_CheckConsistency = 0x0F,
- DAC960_V1_CheckConsistencyAsync = 0x1E,
- DAC960_V1_RebuildStat = 0x0C,
- DAC960_V1_GetRebuildProgress = 0x27,
- DAC960_V1_RebuildControl = 0x1F,
- DAC960_V1_ReadBadBlockTable = 0x0B,
- DAC960_V1_ReadBadDataTable = 0x25,
- DAC960_V1_ClearBadDataTable = 0x26,
- DAC960_V1_GetErrorTable = 0x17,
- DAC960_V1_AddCapacityAsync = 0x2A,
- DAC960_V1_BackgroundInitializationControl = 0x2B,
- /* Configuration Related Commands */
- DAC960_V1_ReadConfig2 = 0x3D,
- DAC960_V1_WriteConfig2 = 0x3C,
- DAC960_V1_ReadConfigurationOnDisk = 0x4A,
- DAC960_V1_WriteConfigurationOnDisk = 0x4B,
- DAC960_V1_ReadConfiguration = 0x4E,
- DAC960_V1_ReadBackupConfiguration = 0x4D,
- DAC960_V1_WriteConfiguration = 0x4F,
- DAC960_V1_AddConfiguration = 0x4C,
- DAC960_V1_ReadConfigurationLabel = 0x48,
- DAC960_V1_WriteConfigurationLabel = 0x49,
- /* Firmware Upgrade Related Commands */
- DAC960_V1_LoadImage = 0x20,
- DAC960_V1_StoreImage = 0x21,
- DAC960_V1_ProgramImage = 0x22,
- /* Diagnostic Commands */
- DAC960_V1_SetDiagnosticMode = 0x31,
- DAC960_V1_RunDiagnostic = 0x32,
- /* Subsystem Service Commands */
- DAC960_V1_GetSubsystemData = 0x70,
- DAC960_V1_SetSubsystemParameters = 0x71,
- /* Version 2.xx Firmware Commands */
- DAC960_V1_Enquiry_Old = 0x05,
- DAC960_V1_GetDeviceState_Old = 0x14,
- DAC960_V1_Read_Old = 0x02,
- DAC960_V1_Write_Old = 0x03,
- DAC960_V1_ReadWithScatterGather_Old = 0x82,
- DAC960_V1_WriteWithScatterGather_Old = 0x83
-}
-__attribute__ ((packed))
-DAC960_V1_CommandOpcode_T;
-
-
-/*
- Define the DAC960 V1 Firmware Command Identifier type.
-*/
-
-typedef unsigned char DAC960_V1_CommandIdentifier_T;
-
-
-/*
- Define the DAC960 V1 Firmware Command Status Codes.
-*/
-
-#define DAC960_V1_NormalCompletion 0x0000 /* Common */
-#define DAC960_V1_CheckConditionReceived 0x0002 /* Common */
-#define DAC960_V1_NoDeviceAtAddress 0x0102 /* Common */
-#define DAC960_V1_InvalidDeviceAddress 0x0105 /* Common */
-#define DAC960_V1_InvalidParameter 0x0105 /* Common */
-#define DAC960_V1_IrrecoverableDataError 0x0001 /* I/O */
-#define DAC960_V1_LogicalDriveNonexistentOrOffline 0x0002 /* I/O */
-#define DAC960_V1_AccessBeyondEndOfLogicalDrive 0x0105 /* I/O */
-#define DAC960_V1_BadDataEncountered 0x010C /* I/O */
-#define DAC960_V1_DeviceBusy 0x0008 /* DCDB */
-#define DAC960_V1_DeviceNonresponsive 0x000E /* DCDB */
-#define DAC960_V1_CommandTerminatedAbnormally 0x000F /* DCDB */
-#define DAC960_V1_UnableToStartDevice 0x0002 /* Device */
-#define DAC960_V1_InvalidChannelOrTargetOrModifier 0x0105 /* Device */
-#define DAC960_V1_ChannelBusy 0x0106 /* Device */
-#define DAC960_V1_ChannelNotStopped 0x0002 /* Device */
-#define DAC960_V1_AttemptToRebuildOnlineDrive 0x0002 /* Consistency */
-#define DAC960_V1_RebuildBadBlocksEncountered 0x0003 /* Consistency */
-#define DAC960_V1_NewDiskFailedDuringRebuild 0x0004 /* Consistency */
-#define DAC960_V1_RebuildOrCheckAlreadyInProgress 0x0106 /* Consistency */
-#define DAC960_V1_DependentDiskIsDead 0x0002 /* Consistency */
-#define DAC960_V1_InconsistentBlocksFound 0x0003 /* Consistency */
-#define DAC960_V1_InvalidOrNonredundantLogicalDrive 0x0105 /* Consistency */
-#define DAC960_V1_NoRebuildOrCheckInProgress 0x0105 /* Consistency */
-#define DAC960_V1_RebuildInProgress_DataValid 0x0000 /* Consistency */
-#define DAC960_V1_RebuildFailed_LogicalDriveFailure 0x0002 /* Consistency */
-#define DAC960_V1_RebuildFailed_BadBlocksOnOther 0x0003 /* Consistency */
-#define DAC960_V1_RebuildFailed_NewDriveFailed 0x0004 /* Consistency */
-#define DAC960_V1_RebuildSuccessful 0x0100 /* Consistency */
-#define DAC960_V1_RebuildSuccessfullyTerminated 0x0107 /* Consistency */
-#define DAC960_V1_BackgroundInitSuccessful 0x0100 /* Consistency */
-#define DAC960_V1_BackgroundInitAborted 0x0005 /* Consistency */
-#define DAC960_V1_NoBackgroundInitInProgress 0x0105 /* Consistency */
-#define DAC960_V1_AddCapacityInProgress 0x0004 /* Consistency */
-#define DAC960_V1_AddCapacityFailedOrSuspended 0x00F4 /* Consistency */
-#define DAC960_V1_Config2ChecksumError 0x0002 /* Configuration */
-#define DAC960_V1_ConfigurationSuspended 0x0106 /* Configuration */
-#define DAC960_V1_FailedToConfigureNVRAM 0x0105 /* Configuration */
-#define DAC960_V1_ConfigurationNotSavedStateChange 0x0106 /* Configuration */
-#define DAC960_V1_SubsystemNotInstalled 0x0001 /* Subsystem */
-#define DAC960_V1_SubsystemFailed 0x0002 /* Subsystem */
-#define DAC960_V1_SubsystemBusy 0x0106 /* Subsystem */
-
-typedef unsigned short DAC960_V1_CommandStatus_T;
-
-
-/*
- Define the DAC960 V1 Firmware Enquiry Command reply structure.
-*/
-
-typedef struct DAC960_V1_Enquiry
-{
- unsigned char NumberOfLogicalDrives; /* Byte 0 */
- unsigned int :24; /* Bytes 1-3 */
- unsigned int LogicalDriveSizes[32]; /* Bytes 4-131 */
- unsigned short FlashAge; /* Bytes 132-133 */
- struct {
- bool DeferredWriteError:1; /* Byte 134 Bit 0 */
- bool BatteryLow:1; /* Byte 134 Bit 1 */
- unsigned char :6; /* Byte 134 Bits 2-7 */
- } StatusFlags;
- unsigned char :8; /* Byte 135 */
- unsigned char MinorFirmwareVersion; /* Byte 136 */
- unsigned char MajorFirmwareVersion; /* Byte 137 */
- enum {
- DAC960_V1_NoStandbyRebuildOrCheckInProgress = 0x00,
- DAC960_V1_StandbyRebuildInProgress = 0x01,
- DAC960_V1_BackgroundRebuildInProgress = 0x02,
- DAC960_V1_BackgroundCheckInProgress = 0x03,
- DAC960_V1_StandbyRebuildCompletedWithError = 0xFF,
- DAC960_V1_BackgroundRebuildOrCheckFailed_DriveFailed = 0xF0,
- DAC960_V1_BackgroundRebuildOrCheckFailed_LogicalDriveFailed = 0xF1,
- DAC960_V1_BackgroundRebuildOrCheckFailed_OtherCauses = 0xF2,
- DAC960_V1_BackgroundRebuildOrCheckSuccessfullyTerminated = 0xF3
- } __attribute__ ((packed)) RebuildFlag; /* Byte 138 */
- unsigned char MaxCommands; /* Byte 139 */
- unsigned char OfflineLogicalDriveCount; /* Byte 140 */
- unsigned char :8; /* Byte 141 */
- unsigned short EventLogSequenceNumber; /* Bytes 142-143 */
- unsigned char CriticalLogicalDriveCount; /* Byte 144 */
- unsigned int :24; /* Bytes 145-147 */
- unsigned char DeadDriveCount; /* Byte 148 */
- unsigned char :8; /* Byte 149 */
- unsigned char RebuildCount; /* Byte 150 */
- struct {
- unsigned char :3; /* Byte 151 Bits 0-2 */
- bool BatteryBackupUnitPresent:1; /* Byte 151 Bit 3 */
- unsigned char :3; /* Byte 151 Bits 4-6 */
- unsigned char :1; /* Byte 151 Bit 7 */
- } MiscFlags;
- struct {
- unsigned char TargetID;
- unsigned char Channel;
- } DeadDrives[21]; /* Bytes 152-194 */
- unsigned char Reserved[62]; /* Bytes 195-255 */
-}
-__attribute__ ((packed))
-DAC960_V1_Enquiry_T;
-
-
-/*
- Define the DAC960 V1 Firmware Enquiry2 Command reply structure.
-*/
-
-typedef struct DAC960_V1_Enquiry2
-{
- struct {
- enum {
- DAC960_V1_P_PD_PU = 0x01,
- DAC960_V1_PL = 0x02,
- DAC960_V1_PG = 0x10,
- DAC960_V1_PJ = 0x11,
- DAC960_V1_PR = 0x12,
- DAC960_V1_PT = 0x13,
- DAC960_V1_PTL0 = 0x14,
- DAC960_V1_PRL = 0x15,
- DAC960_V1_PTL1 = 0x16,
- DAC960_V1_1164P = 0x20
- } __attribute__ ((packed)) SubModel; /* Byte 0 */
- unsigned char ActualChannels; /* Byte 1 */
- enum {
- DAC960_V1_FiveChannelBoard = 0x01,
- DAC960_V1_ThreeChannelBoard = 0x02,
- DAC960_V1_TwoChannelBoard = 0x03,
- DAC960_V1_ThreeChannelASIC_DAC = 0x04
- } __attribute__ ((packed)) Model; /* Byte 2 */
- enum {
- DAC960_V1_EISA_Controller = 0x01,
- DAC960_V1_MicroChannel_Controller = 0x02,
- DAC960_V1_PCI_Controller = 0x03,
- DAC960_V1_SCSItoSCSI_Controller = 0x08
- } __attribute__ ((packed)) ProductFamily; /* Byte 3 */
- } HardwareID; /* Bytes 0-3 */
- /* MajorVersion.MinorVersion-FirmwareType-TurnID */
- struct {
- unsigned char MajorVersion; /* Byte 4 */
- unsigned char MinorVersion; /* Byte 5 */
- unsigned char TurnID; /* Byte 6 */
- char FirmwareType; /* Byte 7 */
- } FirmwareID; /* Bytes 4-7 */
- unsigned char :8; /* Byte 8 */
- unsigned int :24; /* Bytes 9-11 */
- unsigned char ConfiguredChannels; /* Byte 12 */
- unsigned char ActualChannels; /* Byte 13 */
- unsigned char MaxTargets; /* Byte 14 */
- unsigned char MaxTags; /* Byte 15 */
- unsigned char MaxLogicalDrives; /* Byte 16 */
- unsigned char MaxArms; /* Byte 17 */
- unsigned char MaxSpans; /* Byte 18 */
- unsigned char :8; /* Byte 19 */
- unsigned int :32; /* Bytes 20-23 */
- unsigned int MemorySize; /* Bytes 24-27 */
- unsigned int CacheSize; /* Bytes 28-31 */
- unsigned int FlashMemorySize; /* Bytes 32-35 */
- unsigned int NonVolatileMemorySize; /* Bytes 36-39 */
- struct {
- enum {
- DAC960_V1_RamType_DRAM = 0x0,
- DAC960_V1_RamType_EDO = 0x1,
- DAC960_V1_RamType_SDRAM = 0x2,
- DAC960_V1_RamType_Last = 0x7
- } __attribute__ ((packed)) RamType:3; /* Byte 40 Bits 0-2 */
- enum {
- DAC960_V1_ErrorCorrection_None = 0x0,
- DAC960_V1_ErrorCorrection_Parity = 0x1,
- DAC960_V1_ErrorCorrection_ECC = 0x2,
- DAC960_V1_ErrorCorrection_Last = 0x7
- } __attribute__ ((packed)) ErrorCorrection:3; /* Byte 40 Bits 3-5 */
- bool FastPageMode:1; /* Byte 40 Bit 6 */
- bool LowPowerMemory:1; /* Byte 40 Bit 7 */
- unsigned char :8; /* Bytes 41 */
- } MemoryType;
- unsigned short ClockSpeed; /* Bytes 42-43 */
- unsigned short MemorySpeed; /* Bytes 44-45 */
- unsigned short HardwareSpeed; /* Bytes 46-47 */
- unsigned int :32; /* Bytes 48-51 */
- unsigned int :32; /* Bytes 52-55 */
- unsigned char :8; /* Byte 56 */
- unsigned char :8; /* Byte 57 */
- unsigned short :16; /* Bytes 58-59 */
- unsigned short MaxCommands; /* Bytes 60-61 */
- unsigned short MaxScatterGatherEntries; /* Bytes 62-63 */
- unsigned short MaxDriveCommands; /* Bytes 64-65 */
- unsigned short MaxIODescriptors; /* Bytes 66-67 */
- unsigned short MaxCombinedSectors; /* Bytes 68-69 */
- unsigned char Latency; /* Byte 70 */
- unsigned char :8; /* Byte 71 */
- unsigned char SCSITimeout; /* Byte 72 */
- unsigned char :8; /* Byte 73 */
- unsigned short MinFreeLines; /* Bytes 74-75 */
- unsigned int :32; /* Bytes 76-79 */
- unsigned int :32; /* Bytes 80-83 */
- unsigned char RebuildRateConstant; /* Byte 84 */
- unsigned char :8; /* Byte 85 */
- unsigned char :8; /* Byte 86 */
- unsigned char :8; /* Byte 87 */
- unsigned int :32; /* Bytes 88-91 */
- unsigned int :32; /* Bytes 92-95 */
- unsigned short PhysicalDriveBlockSize; /* Bytes 96-97 */
- unsigned short LogicalDriveBlockSize; /* Bytes 98-99 */
- unsigned short MaxBlocksPerCommand; /* Bytes 100-101 */
- unsigned short BlockFactor; /* Bytes 102-103 */
- unsigned short CacheLineSize; /* Bytes 104-105 */
- struct {
- enum {
- DAC960_V1_Narrow_8bit = 0x0,
- DAC960_V1_Wide_16bit = 0x1,
- DAC960_V1_Wide_32bit = 0x2
- } __attribute__ ((packed)) BusWidth:2; /* Byte 106 Bits 0-1 */
- enum {
- DAC960_V1_Fast = 0x0,
- DAC960_V1_Ultra = 0x1,
- DAC960_V1_Ultra2 = 0x2
- } __attribute__ ((packed)) BusSpeed:2; /* Byte 106 Bits 2-3 */
- bool Differential:1; /* Byte 106 Bit 4 */
- unsigned char :3; /* Byte 106 Bits 5-7 */
- } SCSICapability;
- unsigned char :8; /* Byte 107 */
- unsigned int :32; /* Bytes 108-111 */
- unsigned short FirmwareBuildNumber; /* Bytes 112-113 */
- enum {
- DAC960_V1_AEMI = 0x01,
- DAC960_V1_OEM1 = 0x02,
- DAC960_V1_OEM2 = 0x04,
- DAC960_V1_OEM3 = 0x08,
- DAC960_V1_Conner = 0x10,
- DAC960_V1_SAFTE = 0x20
- } __attribute__ ((packed)) FaultManagementType; /* Byte 114 */
- unsigned char :8; /* Byte 115 */
- struct {
- bool Clustering:1; /* Byte 116 Bit 0 */
- bool MylexOnlineRAIDExpansion:1; /* Byte 116 Bit 1 */
- bool ReadAhead:1; /* Byte 116 Bit 2 */
- bool BackgroundInitialization:1; /* Byte 116 Bit 3 */
- unsigned int :28; /* Bytes 116-119 */
- } FirmwareFeatures;
- unsigned int :32; /* Bytes 120-123 */
- unsigned int :32; /* Bytes 124-127 */
-}
-DAC960_V1_Enquiry2_T;
-
-
-/*
- Define the DAC960 V1 Firmware Logical Drive State type.
-*/
-
-typedef enum
-{
- DAC960_V1_LogicalDrive_Online = 0x03,
- DAC960_V1_LogicalDrive_Critical = 0x04,
- DAC960_V1_LogicalDrive_Offline = 0xFF
-}
-__attribute__ ((packed))
-DAC960_V1_LogicalDriveState_T;
-
-
-/*
- Define the DAC960 V1 Firmware Logical Drive Information structure.
-*/
-
-typedef struct DAC960_V1_LogicalDriveInformation
-{
- unsigned int LogicalDriveSize; /* Bytes 0-3 */
- DAC960_V1_LogicalDriveState_T LogicalDriveState; /* Byte 4 */
- unsigned char RAIDLevel:7; /* Byte 5 Bits 0-6 */
- bool WriteBack:1; /* Byte 5 Bit 7 */
- unsigned short :16; /* Bytes 6-7 */
-}
-DAC960_V1_LogicalDriveInformation_T;
-
-
-/*
- Define the DAC960 V1 Firmware Get Logical Drive Information Command
- reply structure.
-*/
-
-typedef DAC960_V1_LogicalDriveInformation_T
- DAC960_V1_LogicalDriveInformationArray_T[DAC960_MaxLogicalDrives];
-
-
-/*
- Define the DAC960 V1 Firmware Perform Event Log Operation Types.
-*/
-
-typedef enum
-{
- DAC960_V1_GetEventLogEntry = 0x00
-}
-__attribute__ ((packed))
-DAC960_V1_PerformEventLogOpType_T;
-
-
-/*
- Define the DAC960 V1 Firmware Get Event Log Entry Command reply structure.
-*/
-
-typedef struct DAC960_V1_EventLogEntry
-{
- unsigned char MessageType; /* Byte 0 */
- unsigned char MessageLength; /* Byte 1 */
- unsigned char TargetID:5; /* Byte 2 Bits 0-4 */
- unsigned char Channel:3; /* Byte 2 Bits 5-7 */
- unsigned char LogicalUnit:6; /* Byte 3 Bits 0-5 */
- unsigned char :2; /* Byte 3 Bits 6-7 */
- unsigned short SequenceNumber; /* Bytes 4-5 */
- unsigned char ErrorCode:7; /* Byte 6 Bits 0-6 */
- bool Valid:1; /* Byte 6 Bit 7 */
- unsigned char SegmentNumber; /* Byte 7 */
- DAC960_SCSI_RequestSenseKey_T SenseKey:4; /* Byte 8 Bits 0-3 */
- unsigned char :1; /* Byte 8 Bit 4 */
- bool ILI:1; /* Byte 8 Bit 5 */
- bool EOM:1; /* Byte 8 Bit 6 */
- bool Filemark:1; /* Byte 8 Bit 7 */
- unsigned char Information[4]; /* Bytes 9-12 */
- unsigned char AdditionalSenseLength; /* Byte 13 */
- unsigned char CommandSpecificInformation[4]; /* Bytes 14-17 */
- unsigned char AdditionalSenseCode; /* Byte 18 */
- unsigned char AdditionalSenseCodeQualifier; /* Byte 19 */
- unsigned char Dummy[12]; /* Bytes 20-31 */
-}
-DAC960_V1_EventLogEntry_T;
-
-
-/*
- Define the DAC960 V1 Firmware Physical Device State type.
-*/
-
-typedef enum
-{
- DAC960_V1_Device_Dead = 0x00,
- DAC960_V1_Device_WriteOnly = 0x02,
- DAC960_V1_Device_Online = 0x03,
- DAC960_V1_Device_Standby = 0x10
-}
-__attribute__ ((packed))
-DAC960_V1_PhysicalDeviceState_T;
-
-
-/*
- Define the DAC960 V1 Firmware Get Device State Command reply structure.
- The structure is padded by 2 bytes for compatibility with Version 2.xx
- Firmware.
-*/
-
-typedef struct DAC960_V1_DeviceState
-{
- bool Present:1; /* Byte 0 Bit 0 */
- unsigned char :7; /* Byte 0 Bits 1-7 */
- enum {
- DAC960_V1_OtherType = 0x0,
- DAC960_V1_DiskType = 0x1,
- DAC960_V1_SequentialType = 0x2,
- DAC960_V1_CDROM_or_WORM_Type = 0x3
- } __attribute__ ((packed)) DeviceType:2; /* Byte 1 Bits 0-1 */
- bool :1; /* Byte 1 Bit 2 */
- bool Fast20:1; /* Byte 1 Bit 3 */
- bool Sync:1; /* Byte 1 Bit 4 */
- bool Fast:1; /* Byte 1 Bit 5 */
- bool Wide:1; /* Byte 1 Bit 6 */
- bool TaggedQueuingSupported:1; /* Byte 1 Bit 7 */
- DAC960_V1_PhysicalDeviceState_T DeviceState; /* Byte 2 */
- unsigned char :8; /* Byte 3 */
- unsigned char SynchronousMultiplier; /* Byte 4 */
- unsigned char SynchronousOffset:5; /* Byte 5 Bits 0-4 */
- unsigned char :3; /* Byte 5 Bits 5-7 */
- unsigned int DiskSize __attribute__ ((packed)); /* Bytes 6-9 */
- unsigned short :16; /* Bytes 10-11 */
-}
-DAC960_V1_DeviceState_T;
-
-
-/*
- Define the DAC960 V1 Firmware Get Rebuild Progress Command reply structure.
-*/
-
-typedef struct DAC960_V1_RebuildProgress
-{
- unsigned int LogicalDriveNumber; /* Bytes 0-3 */
- unsigned int LogicalDriveSize; /* Bytes 4-7 */
- unsigned int RemainingBlocks; /* Bytes 8-11 */
-}
-DAC960_V1_RebuildProgress_T;
-
-
-/*
- Define the DAC960 V1 Firmware Background Initialization Status Command
- reply structure.
-*/
-
-typedef struct DAC960_V1_BackgroundInitializationStatus
-{
- unsigned int LogicalDriveSize; /* Bytes 0-3 */
- unsigned int BlocksCompleted; /* Bytes 4-7 */
- unsigned char Reserved1[12]; /* Bytes 8-19 */
- unsigned int LogicalDriveNumber; /* Bytes 20-23 */
- unsigned char RAIDLevel; /* Byte 24 */
- enum {
- DAC960_V1_BackgroundInitializationInvalid = 0x00,
- DAC960_V1_BackgroundInitializationStarted = 0x02,
- DAC960_V1_BackgroundInitializationInProgress = 0x04,
- DAC960_V1_BackgroundInitializationSuspended = 0x05,
- DAC960_V1_BackgroundInitializationCancelled = 0x06
- } __attribute__ ((packed)) Status; /* Byte 25 */
- unsigned char Reserved2[6]; /* Bytes 26-31 */
-}
-DAC960_V1_BackgroundInitializationStatus_T;
-
-
-/*
- Define the DAC960 V1 Firmware Error Table Entry structure.
-*/
-
-typedef struct DAC960_V1_ErrorTableEntry
-{
- unsigned char ParityErrorCount; /* Byte 0 */
- unsigned char SoftErrorCount; /* Byte 1 */
- unsigned char HardErrorCount; /* Byte 2 */
- unsigned char MiscErrorCount; /* Byte 3 */
-}
-DAC960_V1_ErrorTableEntry_T;
-
-
-/*
- Define the DAC960 V1 Firmware Get Error Table Command reply structure.
-*/
-
-typedef struct DAC960_V1_ErrorTable
-{
- DAC960_V1_ErrorTableEntry_T
- ErrorTableEntries[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
-}
-DAC960_V1_ErrorTable_T;
-
-
-/*
- Define the DAC960 V1 Firmware Read Config2 Command reply structure.
-*/
-
-typedef struct DAC960_V1_Config2
-{
- unsigned char :1; /* Byte 0 Bit 0 */
- bool ActiveNegationEnabled:1; /* Byte 0 Bit 1 */
- unsigned char :5; /* Byte 0 Bits 2-6 */
- bool NoRescanIfResetReceivedDuringScan:1; /* Byte 0 Bit 7 */
- bool StorageWorksSupportEnabled:1; /* Byte 1 Bit 0 */
- bool HewlettPackardSupportEnabled:1; /* Byte 1 Bit 1 */
- bool NoDisconnectOnFirstCommand:1; /* Byte 1 Bit 2 */
- unsigned char :2; /* Byte 1 Bits 3-4 */
- bool AEMI_ARM:1; /* Byte 1 Bit 5 */
- bool AEMI_OFM:1; /* Byte 1 Bit 6 */
- unsigned char :1; /* Byte 1 Bit 7 */
- enum {
- DAC960_V1_OEMID_Mylex = 0x00,
- DAC960_V1_OEMID_IBM = 0x08,
- DAC960_V1_OEMID_HP = 0x0A,
- DAC960_V1_OEMID_DEC = 0x0C,
- DAC960_V1_OEMID_Siemens = 0x10,
- DAC960_V1_OEMID_Intel = 0x12
- } __attribute__ ((packed)) OEMID; /* Byte 2 */
- unsigned char OEMModelNumber; /* Byte 3 */
- unsigned char PhysicalSector; /* Byte 4 */
- unsigned char LogicalSector; /* Byte 5 */
- unsigned char BlockFactor; /* Byte 6 */
- bool ReadAheadEnabled:1; /* Byte 7 Bit 0 */
- bool LowBIOSDelay:1; /* Byte 7 Bit 1 */
- unsigned char :2; /* Byte 7 Bits 2-3 */
- bool ReassignRestrictedToOneSector:1; /* Byte 7 Bit 4 */
- unsigned char :1; /* Byte 7 Bit 5 */
- bool ForceUnitAccessDuringWriteRecovery:1; /* Byte 7 Bit 6 */
- bool EnableLeftSymmetricRAID5Algorithm:1; /* Byte 7 Bit 7 */
- unsigned char DefaultRebuildRate; /* Byte 8 */
- unsigned char :8; /* Byte 9 */
- unsigned char BlocksPerCacheLine; /* Byte 10 */
- unsigned char BlocksPerStripe; /* Byte 11 */
- struct {
- enum {
- DAC960_V1_Async = 0x0,
- DAC960_V1_Sync_8MHz = 0x1,
- DAC960_V1_Sync_5MHz = 0x2,
- DAC960_V1_Sync_10or20MHz = 0x3 /* Byte 11 Bits 0-1 */
- } __attribute__ ((packed)) Speed:2;
- bool Force8Bit:1; /* Byte 11 Bit 2 */
- bool DisableFast20:1; /* Byte 11 Bit 3 */
- unsigned char :3; /* Byte 11 Bits 4-6 */
- bool EnableTaggedQueuing:1; /* Byte 11 Bit 7 */
- } __attribute__ ((packed)) ChannelParameters[6]; /* Bytes 12-17 */
- unsigned char SCSIInitiatorID; /* Byte 18 */
- unsigned char :8; /* Byte 19 */
- enum {
- DAC960_V1_StartupMode_ControllerSpinUp = 0x00,
- DAC960_V1_StartupMode_PowerOnSpinUp = 0x01
- } __attribute__ ((packed)) StartupMode; /* Byte 20 */
- unsigned char SimultaneousDeviceSpinUpCount; /* Byte 21 */
- unsigned char SecondsDelayBetweenSpinUps; /* Byte 22 */
- unsigned char Reserved1[29]; /* Bytes 23-51 */
- bool BIOSDisabled:1; /* Byte 52 Bit 0 */
- bool CDROMBootEnabled:1; /* Byte 52 Bit 1 */
- unsigned char :3; /* Byte 52 Bits 2-4 */
- enum {
- DAC960_V1_Geometry_128_32 = 0x0,
- DAC960_V1_Geometry_255_63 = 0x1,
- DAC960_V1_Geometry_Reserved1 = 0x2,
- DAC960_V1_Geometry_Reserved2 = 0x3
- } __attribute__ ((packed)) DriveGeometry:2; /* Byte 52 Bits 5-6 */
- unsigned char :1; /* Byte 52 Bit 7 */
- unsigned char Reserved2[9]; /* Bytes 53-61 */
- unsigned short Checksum; /* Bytes 62-63 */
-}
-DAC960_V1_Config2_T;
-
-
-/*
- Define the DAC960 V1 Firmware DCDB request structure.
-*/
-
-typedef struct DAC960_V1_DCDB
-{
- unsigned char TargetID:4; /* Byte 0 Bits 0-3 */
- unsigned char Channel:4; /* Byte 0 Bits 4-7 */
- enum {
- DAC960_V1_DCDB_NoDataTransfer = 0,
- DAC960_V1_DCDB_DataTransferDeviceToSystem = 1,
- DAC960_V1_DCDB_DataTransferSystemToDevice = 2,
- DAC960_V1_DCDB_IllegalDataTransfer = 3
- } __attribute__ ((packed)) Direction:2; /* Byte 1 Bits 0-1 */
- bool EarlyStatus:1; /* Byte 1 Bit 2 */
- unsigned char :1; /* Byte 1 Bit 3 */
- enum {
- DAC960_V1_DCDB_Timeout_24_hours = 0,
- DAC960_V1_DCDB_Timeout_10_seconds = 1,
- DAC960_V1_DCDB_Timeout_60_seconds = 2,
- DAC960_V1_DCDB_Timeout_10_minutes = 3
- } __attribute__ ((packed)) Timeout:2; /* Byte 1 Bits 4-5 */
- bool NoAutomaticRequestSense:1; /* Byte 1 Bit 6 */
- bool DisconnectPermitted:1; /* Byte 1 Bit 7 */
- unsigned short TransferLength; /* Bytes 2-3 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 4-7 */
- unsigned char CDBLength:4; /* Byte 8 Bits 0-3 */
- unsigned char TransferLengthHigh4:4; /* Byte 8 Bits 4-7 */
- unsigned char SenseLength; /* Byte 9 */
- unsigned char CDB[12]; /* Bytes 10-21 */
- unsigned char SenseData[64]; /* Bytes 22-85 */
- unsigned char Status; /* Byte 86 */
- unsigned char :8; /* Byte 87 */
-}
-DAC960_V1_DCDB_T;
-
-
-/*
- Define the DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address
- 32 Bit Byte Count structure.
-*/
-
-typedef struct DAC960_V1_ScatterGatherSegment
-{
- DAC960_BusAddress32_T SegmentDataPointer; /* Bytes 0-3 */
- DAC960_ByteCount32_T SegmentByteCount; /* Bytes 4-7 */
-}
-DAC960_V1_ScatterGatherSegment_T;
-
-
-/*
- Define the 13 Byte DAC960 V1 Firmware Command Mailbox structure. Bytes 13-15
- are not used. The Command Mailbox structure is padded to 16 bytes for
- efficient access.
-*/
-
-typedef union DAC960_V1_CommandMailbox
-{
- unsigned int Words[4]; /* Words 0-3 */
- unsigned char Bytes[16]; /* Bytes 0-15 */
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char Dummy[14]; /* Bytes 2-15 */
- } __attribute__ ((packed)) Common;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char Dummy1[6]; /* Bytes 2-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char Dummy2[4]; /* Bytes 12-15 */
- } __attribute__ ((packed)) Type3;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char CommandOpcode2; /* Byte 2 */
- unsigned char Dummy1[5]; /* Bytes 3-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char Dummy2[4]; /* Bytes 12-15 */
- } __attribute__ ((packed)) Type3B;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char Dummy1[5]; /* Bytes 2-6 */
- unsigned char LogicalDriveNumber:6; /* Byte 7 Bits 0-6 */
- bool AutoRestore:1; /* Byte 7 Bit 7 */
- unsigned char Dummy2[8]; /* Bytes 8-15 */
- } __attribute__ ((packed)) Type3C;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char Channel; /* Byte 2 */
- unsigned char TargetID; /* Byte 3 */
- DAC960_V1_PhysicalDeviceState_T DeviceState:5; /* Byte 4 Bits 0-4 */
- unsigned char Modifier:3; /* Byte 4 Bits 5-7 */
- unsigned char Dummy1[3]; /* Bytes 5-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char Dummy2[4]; /* Bytes 12-15 */
- } __attribute__ ((packed)) Type3D;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- DAC960_V1_PerformEventLogOpType_T OperationType; /* Byte 2 */
- unsigned char OperationQualifier; /* Byte 3 */
- unsigned short SequenceNumber; /* Bytes 4-5 */
- unsigned char Dummy1[2]; /* Bytes 6-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char Dummy2[4]; /* Bytes 12-15 */
- } __attribute__ ((packed)) Type3E;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char Dummy1[2]; /* Bytes 2-3 */
- unsigned char RebuildRateConstant; /* Byte 4 */
- unsigned char Dummy2[3]; /* Bytes 5-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char Dummy3[4]; /* Bytes 12-15 */
- } __attribute__ ((packed)) Type3R;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned short TransferLength; /* Bytes 2-3 */
- unsigned int LogicalBlockAddress; /* Bytes 4-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char LogicalDriveNumber; /* Byte 12 */
- unsigned char Dummy[3]; /* Bytes 13-15 */
- } __attribute__ ((packed)) Type4;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- struct {
- unsigned short TransferLength:11; /* Bytes 2-3 */
- unsigned char LogicalDriveNumber:5; /* Byte 3 Bits 3-7 */
- } __attribute__ ((packed)) LD;
- unsigned int LogicalBlockAddress; /* Bytes 4-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char ScatterGatherCount:6; /* Byte 12 Bits 0-5 */
- enum {
- DAC960_V1_ScatterGather_32BitAddress_32BitByteCount = 0x0,
- DAC960_V1_ScatterGather_32BitAddress_16BitByteCount = 0x1,
- DAC960_V1_ScatterGather_32BitByteCount_32BitAddress = 0x2,
- DAC960_V1_ScatterGather_16BitByteCount_32BitAddress = 0x3
- } __attribute__ ((packed)) ScatterGatherType:2; /* Byte 12 Bits 6-7 */
- unsigned char Dummy[3]; /* Bytes 13-15 */
- } __attribute__ ((packed)) Type5;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char CommandOpcode2; /* Byte 2 */
- unsigned char :8; /* Byte 3 */
- DAC960_BusAddress32_T CommandMailboxesBusAddress; /* Bytes 4-7 */
- DAC960_BusAddress32_T StatusMailboxesBusAddress; /* Bytes 8-11 */
- unsigned char Dummy[4]; /* Bytes 12-15 */
- } __attribute__ ((packed)) TypeX;
-}
-DAC960_V1_CommandMailbox_T;
-
-
-/*
- Define the DAC960 V2 Firmware Command Opcodes.
-*/
-
-typedef enum
-{
- DAC960_V2_MemCopy = 0x01,
- DAC960_V2_SCSI_10_Passthru = 0x02,
- DAC960_V2_SCSI_255_Passthru = 0x03,
- DAC960_V2_SCSI_10 = 0x04,
- DAC960_V2_SCSI_256 = 0x05,
- DAC960_V2_IOCTL = 0x20
-}
-__attribute__ ((packed))
-DAC960_V2_CommandOpcode_T;
-
-
-/*
- Define the DAC960 V2 Firmware IOCTL Opcodes.
-*/
-
-typedef enum
-{
- DAC960_V2_GetControllerInfo = 0x01,
- DAC960_V2_GetLogicalDeviceInfoValid = 0x03,
- DAC960_V2_GetPhysicalDeviceInfoValid = 0x05,
- DAC960_V2_GetHealthStatus = 0x11,
- DAC960_V2_GetEvent = 0x15,
- DAC960_V2_StartDiscovery = 0x81,
- DAC960_V2_SetDeviceState = 0x82,
- DAC960_V2_RebuildDeviceStart = 0x88,
- DAC960_V2_RebuildDeviceStop = 0x89,
- DAC960_V2_ConsistencyCheckStart = 0x8C,
- DAC960_V2_ConsistencyCheckStop = 0x8D,
- DAC960_V2_SetMemoryMailbox = 0x8E,
- DAC960_V2_PauseDevice = 0x92,
- DAC960_V2_TranslatePhysicalToLogicalDevice = 0xC5
-}
-__attribute__ ((packed))
-DAC960_V2_IOCTL_Opcode_T;
-
-
-/*
- Define the DAC960 V2 Firmware Command Identifier type.
-*/
-
-typedef unsigned short DAC960_V2_CommandIdentifier_T;
-
-
-/*
- Define the DAC960 V2 Firmware Command Status Codes.
-*/
-
-#define DAC960_V2_NormalCompletion 0x00
-#define DAC960_V2_AbormalCompletion 0x02
-#define DAC960_V2_DeviceBusy 0x08
-#define DAC960_V2_DeviceNonresponsive 0x0E
-#define DAC960_V2_DeviceNonresponsive2 0x0F
-#define DAC960_V2_DeviceRevervationConflict 0x18
-
-typedef unsigned char DAC960_V2_CommandStatus_T;
-
-
-/*
- Define the DAC960 V2 Firmware Memory Type structure.
-*/
-
-typedef struct DAC960_V2_MemoryType
-{
- enum {
- DAC960_V2_MemoryType_Reserved = 0x00,
- DAC960_V2_MemoryType_DRAM = 0x01,
- DAC960_V2_MemoryType_EDRAM = 0x02,
- DAC960_V2_MemoryType_EDO = 0x03,
- DAC960_V2_MemoryType_SDRAM = 0x04,
- DAC960_V2_MemoryType_Last = 0x1F
- } __attribute__ ((packed)) MemoryType:5; /* Byte 0 Bits 0-4 */
- bool :1; /* Byte 0 Bit 5 */
- bool MemoryParity:1; /* Byte 0 Bit 6 */
- bool MemoryECC:1; /* Byte 0 Bit 7 */
-}
-DAC960_V2_MemoryType_T;
-
-
-/*
- Define the DAC960 V2 Firmware Processor Type structure.
-*/
-
-typedef enum
-{
- DAC960_V2_ProcessorType_i960CA = 0x01,
- DAC960_V2_ProcessorType_i960RD = 0x02,
- DAC960_V2_ProcessorType_i960RN = 0x03,
- DAC960_V2_ProcessorType_i960RP = 0x04,
- DAC960_V2_ProcessorType_NorthBay = 0x05,
- DAC960_V2_ProcessorType_StrongArm = 0x06,
- DAC960_V2_ProcessorType_i960RM = 0x07
-}
-__attribute__ ((packed))
-DAC960_V2_ProcessorType_T;
-
-
-/*
- Define the DAC960 V2 Firmware Get Controller Info reply structure.
-*/
-
-typedef struct DAC960_V2_ControllerInfo
-{
- unsigned char :8; /* Byte 0 */
- enum {
- DAC960_V2_SCSI_Bus = 0x00,
- DAC960_V2_Fibre_Bus = 0x01,
- DAC960_V2_PCI_Bus = 0x03
- } __attribute__ ((packed)) BusInterfaceType; /* Byte 1 */
- enum {
- DAC960_V2_DAC960E = 0x01,
- DAC960_V2_DAC960M = 0x08,
- DAC960_V2_DAC960PD = 0x10,
- DAC960_V2_DAC960PL = 0x11,
- DAC960_V2_DAC960PU = 0x12,
- DAC960_V2_DAC960PE = 0x13,
- DAC960_V2_DAC960PG = 0x14,
- DAC960_V2_DAC960PJ = 0x15,
- DAC960_V2_DAC960PTL0 = 0x16,
- DAC960_V2_DAC960PR = 0x17,
- DAC960_V2_DAC960PRL = 0x18,
- DAC960_V2_DAC960PT = 0x19,
- DAC960_V2_DAC1164P = 0x1A,
- DAC960_V2_DAC960PTL1 = 0x1B,
- DAC960_V2_EXR2000P = 0x1C,
- DAC960_V2_EXR3000P = 0x1D,
- DAC960_V2_AcceleRAID352 = 0x1E,
- DAC960_V2_AcceleRAID170 = 0x1F,
- DAC960_V2_AcceleRAID160 = 0x20,
- DAC960_V2_DAC960S = 0x60,
- DAC960_V2_DAC960SU = 0x61,
- DAC960_V2_DAC960SX = 0x62,
- DAC960_V2_DAC960SF = 0x63,
- DAC960_V2_DAC960SS = 0x64,
- DAC960_V2_DAC960FL = 0x65,
- DAC960_V2_DAC960LL = 0x66,
- DAC960_V2_DAC960FF = 0x67,
- DAC960_V2_DAC960HP = 0x68,
- DAC960_V2_RAIDBRICK = 0x69,
- DAC960_V2_METEOR_FL = 0x6A,
- DAC960_V2_METEOR_FF = 0x6B
- } __attribute__ ((packed)) ControllerType; /* Byte 2 */
- unsigned char :8; /* Byte 3 */
- unsigned short BusInterfaceSpeedMHz; /* Bytes 4-5 */
- unsigned char BusWidthBits; /* Byte 6 */
- unsigned char FlashCodeTypeOrProductID; /* Byte 7 */
- unsigned char NumberOfHostPortsPresent; /* Byte 8 */
- unsigned char Reserved1[7]; /* Bytes 9-15 */
- unsigned char BusInterfaceName[16]; /* Bytes 16-31 */
- unsigned char ControllerName[16]; /* Bytes 32-47 */
- unsigned char Reserved2[16]; /* Bytes 48-63 */
- /* Firmware Release Information */
- unsigned char FirmwareMajorVersion; /* Byte 64 */
- unsigned char FirmwareMinorVersion; /* Byte 65 */
- unsigned char FirmwareTurnNumber; /* Byte 66 */
- unsigned char FirmwareBuildNumber; /* Byte 67 */
- unsigned char FirmwareReleaseDay; /* Byte 68 */
- unsigned char FirmwareReleaseMonth; /* Byte 69 */
- unsigned char FirmwareReleaseYearHigh2Digits; /* Byte 70 */
- unsigned char FirmwareReleaseYearLow2Digits; /* Byte 71 */
- /* Hardware Release Information */
- unsigned char HardwareRevision; /* Byte 72 */
- unsigned int :24; /* Bytes 73-75 */
- unsigned char HardwareReleaseDay; /* Byte 76 */
- unsigned char HardwareReleaseMonth; /* Byte 77 */
- unsigned char HardwareReleaseYearHigh2Digits; /* Byte 78 */
- unsigned char HardwareReleaseYearLow2Digits; /* Byte 79 */
- /* Hardware Manufacturing Information */
- unsigned char ManufacturingBatchNumber; /* Byte 80 */
- unsigned char :8; /* Byte 81 */
- unsigned char ManufacturingPlantNumber; /* Byte 82 */
- unsigned char :8; /* Byte 83 */
- unsigned char HardwareManufacturingDay; /* Byte 84 */
- unsigned char HardwareManufacturingMonth; /* Byte 85 */
- unsigned char HardwareManufacturingYearHigh2Digits; /* Byte 86 */
- unsigned char HardwareManufacturingYearLow2Digits; /* Byte 87 */
- unsigned char MaximumNumberOfPDDperXLD; /* Byte 88 */
- unsigned char MaximumNumberOfILDperXLD; /* Byte 89 */
- unsigned short NonvolatileMemorySizeKB; /* Bytes 90-91 */
- unsigned char MaximumNumberOfXLD; /* Byte 92 */
- unsigned int :24; /* Bytes 93-95 */
- /* Unique Information per Controller */
- unsigned char ControllerSerialNumber[16]; /* Bytes 96-111 */
- unsigned char Reserved3[16]; /* Bytes 112-127 */
- /* Vendor Information */
- unsigned int :24; /* Bytes 128-130 */
- unsigned char OEM_Code; /* Byte 131 */
- unsigned char VendorName[16]; /* Bytes 132-147 */
- /* Other Physical/Controller/Operation Information */
- bool BBU_Present:1; /* Byte 148 Bit 0 */
- bool ActiveActiveClusteringMode:1; /* Byte 148 Bit 1 */
- unsigned char :6; /* Byte 148 Bits 2-7 */
- unsigned char :8; /* Byte 149 */
- unsigned short :16; /* Bytes 150-151 */
- /* Physical Device Scan Information */
- bool PhysicalScanActive:1; /* Byte 152 Bit 0 */
- unsigned char :7; /* Byte 152 Bits 1-7 */
- unsigned char PhysicalDeviceChannelNumber; /* Byte 153 */
- unsigned char PhysicalDeviceTargetID; /* Byte 154 */
- unsigned char PhysicalDeviceLogicalUnit; /* Byte 155 */
- /* Maximum Command Data Transfer Sizes */
- unsigned short MaximumDataTransferSizeInBlocks; /* Bytes 156-157 */
- unsigned short MaximumScatterGatherEntries; /* Bytes 158-159 */
- /* Logical/Physical Device Counts */
- unsigned short LogicalDevicesPresent; /* Bytes 160-161 */
- unsigned short LogicalDevicesCritical; /* Bytes 162-163 */
- unsigned short LogicalDevicesOffline; /* Bytes 164-165 */
- unsigned short PhysicalDevicesPresent; /* Bytes 166-167 */
- unsigned short PhysicalDisksPresent; /* Bytes 168-169 */
- unsigned short PhysicalDisksCritical; /* Bytes 170-171 */
- unsigned short PhysicalDisksOffline; /* Bytes 172-173 */
- unsigned short MaximumParallelCommands; /* Bytes 174-175 */
- /* Channel and Target ID Information */
- unsigned char NumberOfPhysicalChannelsPresent; /* Byte 176 */
- unsigned char NumberOfVirtualChannelsPresent; /* Byte 177 */
- unsigned char NumberOfPhysicalChannelsPossible; /* Byte 178 */
- unsigned char NumberOfVirtualChannelsPossible; /* Byte 179 */
- unsigned char MaximumTargetsPerChannel[16]; /* Bytes 180-195 */
- unsigned char Reserved4[12]; /* Bytes 196-207 */
- /* Memory/Cache Information */
- unsigned short MemorySizeMB; /* Bytes 208-209 */
- unsigned short CacheSizeMB; /* Bytes 210-211 */
- unsigned int ValidCacheSizeInBytes; /* Bytes 212-215 */
- unsigned int DirtyCacheSizeInBytes; /* Bytes 216-219 */
- unsigned short MemorySpeedMHz; /* Bytes 220-221 */
- unsigned char MemoryDataWidthBits; /* Byte 222 */
- DAC960_V2_MemoryType_T MemoryType; /* Byte 223 */
- unsigned char CacheMemoryTypeName[16]; /* Bytes 224-239 */
- /* Execution Memory Information */
- unsigned short ExecutionMemorySizeMB; /* Bytes 240-241 */
- unsigned short ExecutionL2CacheSizeMB; /* Bytes 242-243 */
- unsigned char Reserved5[8]; /* Bytes 244-251 */
- unsigned short ExecutionMemorySpeedMHz; /* Bytes 252-253 */
- unsigned char ExecutionMemoryDataWidthBits; /* Byte 254 */
- DAC960_V2_MemoryType_T ExecutionMemoryType; /* Byte 255 */
- unsigned char ExecutionMemoryTypeName[16]; /* Bytes 256-271 */
- /* First CPU Type Information */
- unsigned short FirstProcessorSpeedMHz; /* Bytes 272-273 */
- DAC960_V2_ProcessorType_T FirstProcessorType; /* Byte 274 */
- unsigned char FirstProcessorCount; /* Byte 275 */
- unsigned char Reserved6[12]; /* Bytes 276-287 */
- unsigned char FirstProcessorName[16]; /* Bytes 288-303 */
- /* Second CPU Type Information */
- unsigned short SecondProcessorSpeedMHz; /* Bytes 304-305 */
- DAC960_V2_ProcessorType_T SecondProcessorType; /* Byte 306 */
- unsigned char SecondProcessorCount; /* Byte 307 */
- unsigned char Reserved7[12]; /* Bytes 308-319 */
- unsigned char SecondProcessorName[16]; /* Bytes 320-335 */
- /* Debugging/Profiling/Command Time Tracing Information */
- unsigned short CurrentProfilingDataPageNumber; /* Bytes 336-337 */
- unsigned short ProgramsAwaitingProfilingData; /* Bytes 338-339 */
- unsigned short CurrentCommandTimeTraceDataPageNumber; /* Bytes 340-341 */
- unsigned short ProgramsAwaitingCommandTimeTraceData; /* Bytes 342-343 */
- unsigned char Reserved8[8]; /* Bytes 344-351 */
- /* Error Counters on Physical Devices */
- unsigned short PhysicalDeviceBusResets; /* Bytes 352-353 */
- unsigned short PhysicalDeviceParityErrors; /* Bytes 355-355 */
- unsigned short PhysicalDeviceSoftErrors; /* Bytes 356-357 */
- unsigned short PhysicalDeviceCommandsFailed; /* Bytes 358-359 */
- unsigned short PhysicalDeviceMiscellaneousErrors; /* Bytes 360-361 */
- unsigned short PhysicalDeviceCommandTimeouts; /* Bytes 362-363 */
- unsigned short PhysicalDeviceSelectionTimeouts; /* Bytes 364-365 */
- unsigned short PhysicalDeviceRetriesDone; /* Bytes 366-367 */
- unsigned short PhysicalDeviceAbortsDone; /* Bytes 368-369 */
- unsigned short PhysicalDeviceHostCommandAbortsDone; /* Bytes 370-371 */
- unsigned short PhysicalDevicePredictedFailuresDetected; /* Bytes 372-373 */
- unsigned short PhysicalDeviceHostCommandsFailed; /* Bytes 374-375 */
- unsigned short PhysicalDeviceHardErrors; /* Bytes 376-377 */
- unsigned char Reserved9[6]; /* Bytes 378-383 */
- /* Error Counters on Logical Devices */
- unsigned short LogicalDeviceSoftErrors; /* Bytes 384-385 */
- unsigned short LogicalDeviceCommandsFailed; /* Bytes 386-387 */
- unsigned short LogicalDeviceHostCommandAbortsDone; /* Bytes 388-389 */
- unsigned short :16; /* Bytes 390-391 */
- /* Error Counters on Controller */
- unsigned short ControllerMemoryErrors; /* Bytes 392-393 */
- unsigned short ControllerHostCommandAbortsDone; /* Bytes 394-395 */
- unsigned int :32; /* Bytes 396-399 */
- /* Long Duration Activity Information */
- unsigned short BackgroundInitializationsActive; /* Bytes 400-401 */
- unsigned short LogicalDeviceInitializationsActive; /* Bytes 402-403 */
- unsigned short PhysicalDeviceInitializationsActive; /* Bytes 404-405 */
- unsigned short ConsistencyChecksActive; /* Bytes 406-407 */
- unsigned short RebuildsActive; /* Bytes 408-409 */
- unsigned short OnlineExpansionsActive; /* Bytes 410-411 */
- unsigned short PatrolActivitiesActive; /* Bytes 412-413 */
- unsigned short :16; /* Bytes 414-415 */
- /* Flash ROM Information */
- unsigned char FlashType; /* Byte 416 */
- unsigned char :8; /* Byte 417 */
- unsigned short FlashSizeMB; /* Bytes 418-419 */
- unsigned int FlashLimit; /* Bytes 420-423 */
- unsigned int FlashCount; /* Bytes 424-427 */
- unsigned int :32; /* Bytes 428-431 */
- unsigned char FlashTypeName[16]; /* Bytes 432-447 */
- /* Firmware Run Time Information */
- unsigned char RebuildRate; /* Byte 448 */
- unsigned char BackgroundInitializationRate; /* Byte 449 */
- unsigned char ForegroundInitializationRate; /* Byte 450 */
- unsigned char ConsistencyCheckRate; /* Byte 451 */
- unsigned int :32; /* Bytes 452-455 */
- unsigned int MaximumDP; /* Bytes 456-459 */
- unsigned int FreeDP; /* Bytes 460-463 */
- unsigned int MaximumIOP; /* Bytes 464-467 */
- unsigned int FreeIOP; /* Bytes 468-471 */
- unsigned short MaximumCombLengthInBlocks; /* Bytes 472-473 */
- unsigned short NumberOfConfigurationGroups; /* Bytes 474-475 */
- bool InstallationAbortStatus:1; /* Byte 476 Bit 0 */
- bool MaintenanceModeStatus:1; /* Byte 476 Bit 1 */
- unsigned int :24; /* Bytes 476-479 */
- unsigned char Reserved10[32]; /* Bytes 480-511 */
- unsigned char Reserved11[512]; /* Bytes 512-1023 */
-}
-DAC960_V2_ControllerInfo_T;
-
-
-/*
- Define the DAC960 V2 Firmware Logical Device State type.
-*/
-
-typedef enum
-{
- DAC960_V2_LogicalDevice_Online = 0x01,
- DAC960_V2_LogicalDevice_Offline = 0x08,
- DAC960_V2_LogicalDevice_Critical = 0x09
-}
-__attribute__ ((packed))
-DAC960_V2_LogicalDeviceState_T;
-
-
-/*
- Define the DAC960 V2 Firmware Get Logical Device Info reply structure.
-*/
-
-typedef struct DAC960_V2_LogicalDeviceInfo
-{
- unsigned char :8; /* Byte 0 */
- unsigned char Channel; /* Byte 1 */
- unsigned char TargetID; /* Byte 2 */
- unsigned char LogicalUnit; /* Byte 3 */
- DAC960_V2_LogicalDeviceState_T LogicalDeviceState; /* Byte 4 */
- unsigned char RAIDLevel; /* Byte 5 */
- unsigned char StripeSize; /* Byte 6 */
- unsigned char CacheLineSize; /* Byte 7 */
- struct {
- enum {
- DAC960_V2_ReadCacheDisabled = 0x0,
- DAC960_V2_ReadCacheEnabled = 0x1,
- DAC960_V2_ReadAheadEnabled = 0x2,
- DAC960_V2_IntelligentReadAheadEnabled = 0x3,
- DAC960_V2_ReadCache_Last = 0x7
- } __attribute__ ((packed)) ReadCache:3; /* Byte 8 Bits 0-2 */
- enum {
- DAC960_V2_WriteCacheDisabled = 0x0,
- DAC960_V2_LogicalDeviceReadOnly = 0x1,
- DAC960_V2_WriteCacheEnabled = 0x2,
- DAC960_V2_IntelligentWriteCacheEnabled = 0x3,
- DAC960_V2_WriteCache_Last = 0x7
- } __attribute__ ((packed)) WriteCache:3; /* Byte 8 Bits 3-5 */
- bool :1; /* Byte 8 Bit 6 */
- bool LogicalDeviceInitialized:1; /* Byte 8 Bit 7 */
- } LogicalDeviceControl; /* Byte 8 */
- /* Logical Device Operations Status */
- bool ConsistencyCheckInProgress:1; /* Byte 9 Bit 0 */
- bool RebuildInProgress:1; /* Byte 9 Bit 1 */
- bool BackgroundInitializationInProgress:1; /* Byte 9 Bit 2 */
- bool ForegroundInitializationInProgress:1; /* Byte 9 Bit 3 */
- bool DataMigrationInProgress:1; /* Byte 9 Bit 4 */
- bool PatrolOperationInProgress:1; /* Byte 9 Bit 5 */
- unsigned char :2; /* Byte 9 Bits 6-7 */
- unsigned char RAID5WriteUpdate; /* Byte 10 */
- unsigned char RAID5Algorithm; /* Byte 11 */
- unsigned short LogicalDeviceNumber; /* Bytes 12-13 */
- /* BIOS Info */
- bool BIOSDisabled:1; /* Byte 14 Bit 0 */
- bool CDROMBootEnabled:1; /* Byte 14 Bit 1 */
- bool DriveCoercionEnabled:1; /* Byte 14 Bit 2 */
- bool WriteSameDisabled:1; /* Byte 14 Bit 3 */
- bool HBA_ModeEnabled:1; /* Byte 14 Bit 4 */
- enum {
- DAC960_V2_Geometry_128_32 = 0x0,
- DAC960_V2_Geometry_255_63 = 0x1,
- DAC960_V2_Geometry_Reserved1 = 0x2,
- DAC960_V2_Geometry_Reserved2 = 0x3
- } __attribute__ ((packed)) DriveGeometry:2; /* Byte 14 Bits 5-6 */
- bool SuperReadAheadEnabled:1; /* Byte 14 Bit 7 */
- unsigned char :8; /* Byte 15 */
- /* Error Counters */
- unsigned short SoftErrors; /* Bytes 16-17 */
- unsigned short CommandsFailed; /* Bytes 18-19 */
- unsigned short HostCommandAbortsDone; /* Bytes 20-21 */
- unsigned short DeferredWriteErrors; /* Bytes 22-23 */
- unsigned int :32; /* Bytes 24-27 */
- unsigned int :32; /* Bytes 28-31 */
- /* Device Size Information */
- unsigned short :16; /* Bytes 32-33 */
- unsigned short DeviceBlockSizeInBytes; /* Bytes 34-35 */
- unsigned int OriginalDeviceSize; /* Bytes 36-39 */
- unsigned int ConfigurableDeviceSize; /* Bytes 40-43 */
- unsigned int :32; /* Bytes 44-47 */
- unsigned char LogicalDeviceName[32]; /* Bytes 48-79 */
- unsigned char SCSI_InquiryData[36]; /* Bytes 80-115 */
- unsigned char Reserved1[12]; /* Bytes 116-127 */
- DAC960_ByteCount64_T LastReadBlockNumber; /* Bytes 128-135 */
- DAC960_ByteCount64_T LastWrittenBlockNumber; /* Bytes 136-143 */
- DAC960_ByteCount64_T ConsistencyCheckBlockNumber; /* Bytes 144-151 */
- DAC960_ByteCount64_T RebuildBlockNumber; /* Bytes 152-159 */
- DAC960_ByteCount64_T BackgroundInitializationBlockNumber; /* Bytes 160-167 */
- DAC960_ByteCount64_T ForegroundInitializationBlockNumber; /* Bytes 168-175 */
- DAC960_ByteCount64_T DataMigrationBlockNumber; /* Bytes 176-183 */
- DAC960_ByteCount64_T PatrolOperationBlockNumber; /* Bytes 184-191 */
- unsigned char Reserved2[64]; /* Bytes 192-255 */
-}
-DAC960_V2_LogicalDeviceInfo_T;
-
-
-/*
- Define the DAC960 V2 Firmware Physical Device State type.
-*/
-
-typedef enum
-{
- DAC960_V2_Device_Unconfigured = 0x00,
- DAC960_V2_Device_Online = 0x01,
- DAC960_V2_Device_Rebuild = 0x03,
- DAC960_V2_Device_Missing = 0x04,
- DAC960_V2_Device_Critical = 0x05,
- DAC960_V2_Device_Dead = 0x08,
- DAC960_V2_Device_SuspectedDead = 0x0C,
- DAC960_V2_Device_CommandedOffline = 0x10,
- DAC960_V2_Device_Standby = 0x21,
- DAC960_V2_Device_InvalidState = 0xFF
-}
-__attribute__ ((packed))
-DAC960_V2_PhysicalDeviceState_T;
-
-
-/*
- Define the DAC960 V2 Firmware Get Physical Device Info reply structure.
-*/
-
-typedef struct DAC960_V2_PhysicalDeviceInfo
-{
- unsigned char :8; /* Byte 0 */
- unsigned char Channel; /* Byte 1 */
- unsigned char TargetID; /* Byte 2 */
- unsigned char LogicalUnit; /* Byte 3 */
- /* Configuration Status Bits */
- bool PhysicalDeviceFaultTolerant:1; /* Byte 4 Bit 0 */
- bool PhysicalDeviceConnected:1; /* Byte 4 Bit 1 */
- bool PhysicalDeviceLocalToController:1; /* Byte 4 Bit 2 */
- unsigned char :5; /* Byte 4 Bits 3-7 */
- /* Multiple Host/Controller Status Bits */
- bool RemoteHostSystemDead:1; /* Byte 5 Bit 0 */
- bool RemoteControllerDead:1; /* Byte 5 Bit 1 */
- unsigned char :6; /* Byte 5 Bits 2-7 */
- DAC960_V2_PhysicalDeviceState_T PhysicalDeviceState; /* Byte 6 */
- unsigned char NegotiatedDataWidthBits; /* Byte 7 */
- unsigned short NegotiatedSynchronousMegaTransfers; /* Bytes 8-9 */
- /* Multiported Physical Device Information */
- unsigned char NumberOfPortConnections; /* Byte 10 */
- unsigned char DriveAccessibilityBitmap; /* Byte 11 */
- unsigned int :32; /* Bytes 12-15 */
- unsigned char NetworkAddress[16]; /* Bytes 16-31 */
- unsigned short MaximumTags; /* Bytes 32-33 */
- /* Physical Device Operations Status */
- bool ConsistencyCheckInProgress:1; /* Byte 34 Bit 0 */
- bool RebuildInProgress:1; /* Byte 34 Bit 1 */
- bool MakingDataConsistentInProgress:1; /* Byte 34 Bit 2 */
- bool PhysicalDeviceInitializationInProgress:1; /* Byte 34 Bit 3 */
- bool DataMigrationInProgress:1; /* Byte 34 Bit 4 */
- bool PatrolOperationInProgress:1; /* Byte 34 Bit 5 */
- unsigned char :2; /* Byte 34 Bits 6-7 */
- unsigned char LongOperationStatus; /* Byte 35 */
- unsigned char ParityErrors; /* Byte 36 */
- unsigned char SoftErrors; /* Byte 37 */
- unsigned char HardErrors; /* Byte 38 */
- unsigned char MiscellaneousErrors; /* Byte 39 */
- unsigned char CommandTimeouts; /* Byte 40 */
- unsigned char Retries; /* Byte 41 */
- unsigned char Aborts; /* Byte 42 */
- unsigned char PredictedFailuresDetected; /* Byte 43 */
- unsigned int :32; /* Bytes 44-47 */
- unsigned short :16; /* Bytes 48-49 */
- unsigned short DeviceBlockSizeInBytes; /* Bytes 50-51 */
- unsigned int OriginalDeviceSize; /* Bytes 52-55 */
- unsigned int ConfigurableDeviceSize; /* Bytes 56-59 */
- unsigned int :32; /* Bytes 60-63 */
- unsigned char PhysicalDeviceName[16]; /* Bytes 64-79 */
- unsigned char Reserved1[16]; /* Bytes 80-95 */
- unsigned char Reserved2[32]; /* Bytes 96-127 */
- unsigned char SCSI_InquiryData[36]; /* Bytes 128-163 */
- unsigned char Reserved3[20]; /* Bytes 164-183 */
- unsigned char Reserved4[8]; /* Bytes 184-191 */
- DAC960_ByteCount64_T LastReadBlockNumber; /* Bytes 192-199 */
- DAC960_ByteCount64_T LastWrittenBlockNumber; /* Bytes 200-207 */
- DAC960_ByteCount64_T ConsistencyCheckBlockNumber; /* Bytes 208-215 */
- DAC960_ByteCount64_T RebuildBlockNumber; /* Bytes 216-223 */
- DAC960_ByteCount64_T MakingDataConsistentBlockNumber; /* Bytes 224-231 */
- DAC960_ByteCount64_T DeviceInitializationBlockNumber; /* Bytes 232-239 */
- DAC960_ByteCount64_T DataMigrationBlockNumber; /* Bytes 240-247 */
- DAC960_ByteCount64_T PatrolOperationBlockNumber; /* Bytes 248-255 */
- unsigned char Reserved5[256]; /* Bytes 256-511 */
-}
-DAC960_V2_PhysicalDeviceInfo_T;
-
-
-/*
- Define the DAC960 V2 Firmware Health Status Buffer structure.
-*/
-
-typedef struct DAC960_V2_HealthStatusBuffer
-{
- unsigned int MicrosecondsFromControllerStartTime; /* Bytes 0-3 */
- unsigned int MillisecondsFromControllerStartTime; /* Bytes 4-7 */
- unsigned int SecondsFrom1January1970; /* Bytes 8-11 */
- unsigned int :32; /* Bytes 12-15 */
- unsigned int StatusChangeCounter; /* Bytes 16-19 */
- unsigned int :32; /* Bytes 20-23 */
- unsigned int DebugOutputMessageBufferIndex; /* Bytes 24-27 */
- unsigned int CodedMessageBufferIndex; /* Bytes 28-31 */
- unsigned int CurrentTimeTracePageNumber; /* Bytes 32-35 */
- unsigned int CurrentProfilerPageNumber; /* Bytes 36-39 */
- unsigned int NextEventSequenceNumber; /* Bytes 40-43 */
- unsigned int :32; /* Bytes 44-47 */
- unsigned char Reserved1[16]; /* Bytes 48-63 */
- unsigned char Reserved2[64]; /* Bytes 64-127 */
-}
-DAC960_V2_HealthStatusBuffer_T;
-
-
-/*
- Define the DAC960 V2 Firmware Get Event reply structure.
-*/
-
-typedef struct DAC960_V2_Event
-{
- unsigned int EventSequenceNumber; /* Bytes 0-3 */
- unsigned int EventTime; /* Bytes 4-7 */
- unsigned int EventCode; /* Bytes 8-11 */
- unsigned char :8; /* Byte 12 */
- unsigned char Channel; /* Byte 13 */
- unsigned char TargetID; /* Byte 14 */
- unsigned char LogicalUnit; /* Byte 15 */
- unsigned int :32; /* Bytes 16-19 */
- unsigned int EventSpecificParameter; /* Bytes 20-23 */
- unsigned char RequestSenseData[40]; /* Bytes 24-63 */
-}
-DAC960_V2_Event_T;
-
-
-/*
- Define the DAC960 V2 Firmware Command Control Bits structure.
-*/
-
-typedef struct DAC960_V2_CommandControlBits
-{
- bool ForceUnitAccess:1; /* Byte 0 Bit 0 */
- bool DisablePageOut:1; /* Byte 0 Bit 1 */
- bool :1; /* Byte 0 Bit 2 */
- bool AdditionalScatterGatherListMemory:1; /* Byte 0 Bit 3 */
- bool DataTransferControllerToHost:1; /* Byte 0 Bit 4 */
- bool :1; /* Byte 0 Bit 5 */
- bool NoAutoRequestSense:1; /* Byte 0 Bit 6 */
- bool DisconnectProhibited:1; /* Byte 0 Bit 7 */
-}
-DAC960_V2_CommandControlBits_T;
-
-
-/*
- Define the DAC960 V2 Firmware Command Timeout structure.
-*/
-
-typedef struct DAC960_V2_CommandTimeout
-{
- unsigned char TimeoutValue:6; /* Byte 0 Bits 0-5 */
- enum {
- DAC960_V2_TimeoutScale_Seconds = 0,
- DAC960_V2_TimeoutScale_Minutes = 1,
- DAC960_V2_TimeoutScale_Hours = 2,
- DAC960_V2_TimeoutScale_Reserved = 3
- } __attribute__ ((packed)) TimeoutScale:2; /* Byte 0 Bits 6-7 */
-}
-DAC960_V2_CommandTimeout_T;
-
-
-/*
- Define the DAC960 V2 Firmware Physical Device structure.
-*/
-
-typedef struct DAC960_V2_PhysicalDevice
-{
- unsigned char LogicalUnit; /* Byte 0 */
- unsigned char TargetID; /* Byte 1 */
- unsigned char Channel:3; /* Byte 2 Bits 0-2 */
- unsigned char Controller:5; /* Byte 2 Bits 3-7 */
-}
-__attribute__ ((packed))
-DAC960_V2_PhysicalDevice_T;
-
-
-/*
- Define the DAC960 V2 Firmware Logical Device structure.
-*/
-
-typedef struct DAC960_V2_LogicalDevice
-{
- unsigned short LogicalDeviceNumber; /* Bytes 0-1 */
- unsigned char :3; /* Byte 2 Bits 0-2 */
- unsigned char Controller:5; /* Byte 2 Bits 3-7 */
-}
-__attribute__ ((packed))
-DAC960_V2_LogicalDevice_T;
-
-
-/*
- Define the DAC960 V2 Firmware Operation Device type.
-*/
-
-typedef enum
-{
- DAC960_V2_Physical_Device = 0x00,
- DAC960_V2_RAID_Device = 0x01,
- DAC960_V2_Physical_Channel = 0x02,
- DAC960_V2_RAID_Channel = 0x03,
- DAC960_V2_Physical_Controller = 0x04,
- DAC960_V2_RAID_Controller = 0x05,
- DAC960_V2_Configuration_Group = 0x10,
- DAC960_V2_Enclosure = 0x11
-}
-__attribute__ ((packed))
-DAC960_V2_OperationDevice_T;
-
-
-/*
- Define the DAC960 V2 Firmware Translate Physical To Logical Device structure.
-*/
-
-typedef struct DAC960_V2_PhysicalToLogicalDevice
-{
- unsigned short LogicalDeviceNumber; /* Bytes 0-1 */
- unsigned short :16; /* Bytes 2-3 */
- unsigned char PreviousBootController; /* Byte 4 */
- unsigned char PreviousBootChannel; /* Byte 5 */
- unsigned char PreviousBootTargetID; /* Byte 6 */
- unsigned char PreviousBootLogicalUnit; /* Byte 7 */
-}
-DAC960_V2_PhysicalToLogicalDevice_T;
-
-
-
-/*
- Define the DAC960 V2 Firmware Scatter/Gather List Entry structure.
-*/
-
-typedef struct DAC960_V2_ScatterGatherSegment
-{
- DAC960_BusAddress64_T SegmentDataPointer; /* Bytes 0-7 */
- DAC960_ByteCount64_T SegmentByteCount; /* Bytes 8-15 */
-}
-DAC960_V2_ScatterGatherSegment_T;
-
-
-/*
- Define the DAC960 V2 Firmware Data Transfer Memory Address structure.
-*/
-
-typedef union DAC960_V2_DataTransferMemoryAddress
-{
- DAC960_V2_ScatterGatherSegment_T ScatterGatherSegments[2]; /* Bytes 0-31 */
- struct {
- unsigned short ScatterGatherList0Length; /* Bytes 0-1 */
- unsigned short ScatterGatherList1Length; /* Bytes 2-3 */
- unsigned short ScatterGatherList2Length; /* Bytes 4-5 */
- unsigned short :16; /* Bytes 6-7 */
- DAC960_BusAddress64_T ScatterGatherList0Address; /* Bytes 8-15 */
- DAC960_BusAddress64_T ScatterGatherList1Address; /* Bytes 16-23 */
- DAC960_BusAddress64_T ScatterGatherList2Address; /* Bytes 24-31 */
- } ExtendedScatterGather;
-}
-DAC960_V2_DataTransferMemoryAddress_T;
-
-
-/*
- Define the 64 Byte DAC960 V2 Firmware Command Mailbox structure.
-*/
-
-typedef union DAC960_V2_CommandMailbox
-{
- unsigned int Words[16]; /* Words 0-15 */
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- unsigned int :24; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- unsigned char Reserved[10]; /* Bytes 22-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } Common;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize; /* Bytes 4-7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char CDBLength; /* Byte 21 */
- unsigned char SCSI_CDB[10]; /* Bytes 22-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } SCSI_10;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize; /* Bytes 4-7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char CDBLength; /* Byte 21 */
- unsigned short :16; /* Bytes 22-23 */
- DAC960_BusAddress64_T SCSI_CDB_BusAddress; /* Bytes 24-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } SCSI_255;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- unsigned short :16; /* Bytes 16-17 */
- unsigned char ControllerNumber; /* Byte 18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- unsigned char Reserved[10]; /* Bytes 22-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } ControllerInfo;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- unsigned char Reserved[10]; /* Bytes 22-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } LogicalDeviceInfo;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- unsigned char Reserved[10]; /* Bytes 22-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } PhysicalDeviceInfo;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- unsigned short EventSequenceNumberHigh16; /* Bytes 16-17 */
- unsigned char ControllerNumber; /* Byte 18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- unsigned short EventSequenceNumberLow16; /* Bytes 22-23 */
- unsigned char Reserved[8]; /* Bytes 24-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } GetEvent;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- union {
- DAC960_V2_LogicalDeviceState_T LogicalDeviceState;
- DAC960_V2_PhysicalDeviceState_T PhysicalDeviceState;
- } DeviceState; /* Byte 22 */
- unsigned char Reserved[9]; /* Bytes 23-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } SetDeviceState;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- bool RestoreConsistency:1; /* Byte 22 Bit 0 */
- bool InitializedAreaOnly:1; /* Byte 22 Bit 1 */
- unsigned char :6; /* Byte 22 Bits 2-7 */
- unsigned char Reserved[9]; /* Bytes 23-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } ConsistencyCheck;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- unsigned char FirstCommandMailboxSizeKB; /* Byte 4 */
- unsigned char FirstStatusMailboxSizeKB; /* Byte 5 */
- unsigned char SecondCommandMailboxSizeKB; /* Byte 6 */
- unsigned char SecondStatusMailboxSizeKB; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- unsigned int :24; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- unsigned char HealthStatusBufferSizeKB; /* Byte 22 */
- unsigned char :8; /* Byte 23 */
- DAC960_BusAddress64_T HealthStatusBufferBusAddress; /* Bytes 24-31 */
- DAC960_BusAddress64_T FirstCommandMailboxBusAddress; /* Bytes 32-39 */
- DAC960_BusAddress64_T FirstStatusMailboxBusAddress; /* Bytes 40-47 */
- DAC960_BusAddress64_T SecondCommandMailboxBusAddress; /* Bytes 48-55 */
- DAC960_BusAddress64_T SecondStatusMailboxBusAddress; /* Bytes 56-63 */
- } SetMemoryMailbox;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- DAC960_V2_OperationDevice_T OperationDevice; /* Byte 22 */
- unsigned char Reserved[9]; /* Bytes 23-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } DeviceOperation;
-}
-DAC960_V2_CommandMailbox_T;
-
-
-/*
- Define the DAC960 Driver IOCTL requests.
-*/
-
-#define DAC960_IOCTL_GET_CONTROLLER_COUNT 0xDAC001
-#define DAC960_IOCTL_GET_CONTROLLER_INFO 0xDAC002
-#define DAC960_IOCTL_V1_EXECUTE_COMMAND 0xDAC003
-#define DAC960_IOCTL_V2_EXECUTE_COMMAND 0xDAC004
-#define DAC960_IOCTL_V2_GET_HEALTH_STATUS 0xDAC005
-
-
-/*
- Define the DAC960_IOCTL_GET_CONTROLLER_INFO reply structure.
-*/
-
-typedef struct DAC960_ControllerInfo
-{
- unsigned char ControllerNumber;
- unsigned char FirmwareType;
- unsigned char Channels;
- unsigned char Targets;
- unsigned char PCI_Bus;
- unsigned char PCI_Device;
- unsigned char PCI_Function;
- unsigned char IRQ_Channel;
- DAC960_PCI_Address_T PCI_Address;
- unsigned char ModelName[20];
- unsigned char FirmwareVersion[12];
-}
-DAC960_ControllerInfo_T;
-
-
-/*
- Define the User Mode DAC960_IOCTL_V1_EXECUTE_COMMAND request structure.
-*/
-
-typedef struct DAC960_V1_UserCommand
-{
- unsigned char ControllerNumber;
- DAC960_V1_CommandMailbox_T CommandMailbox;
- int DataTransferLength;
- void __user *DataTransferBuffer;
- DAC960_V1_DCDB_T __user *DCDB;
-}
-DAC960_V1_UserCommand_T;
-
-
-/*
- Define the Kernel Mode DAC960_IOCTL_V1_EXECUTE_COMMAND request structure.
-*/
-
-typedef struct DAC960_V1_KernelCommand
-{
- unsigned char ControllerNumber;
- DAC960_V1_CommandMailbox_T CommandMailbox;
- int DataTransferLength;
- void *DataTransferBuffer;
- DAC960_V1_DCDB_T *DCDB;
- DAC960_V1_CommandStatus_T CommandStatus;
- void (*CompletionFunction)(struct DAC960_V1_KernelCommand *);
- void *CompletionData;
-}
-DAC960_V1_KernelCommand_T;
-
-
-/*
- Define the User Mode DAC960_IOCTL_V2_EXECUTE_COMMAND request structure.
-*/
-
-typedef struct DAC960_V2_UserCommand
-{
- unsigned char ControllerNumber;
- DAC960_V2_CommandMailbox_T CommandMailbox;
- int DataTransferLength;
- int RequestSenseLength;
- void __user *DataTransferBuffer;
- void __user *RequestSenseBuffer;
-}
-DAC960_V2_UserCommand_T;
-
-
-/*
- Define the Kernel Mode DAC960_IOCTL_V2_EXECUTE_COMMAND request structure.
-*/
-
-typedef struct DAC960_V2_KernelCommand
-{
- unsigned char ControllerNumber;
- DAC960_V2_CommandMailbox_T CommandMailbox;
- int DataTransferLength;
- int RequestSenseLength;
- void *DataTransferBuffer;
- void *RequestSenseBuffer;
- DAC960_V2_CommandStatus_T CommandStatus;
- void (*CompletionFunction)(struct DAC960_V2_KernelCommand *);
- void *CompletionData;
-}
-DAC960_V2_KernelCommand_T;
-
-
-/*
- Define the User Mode DAC960_IOCTL_V2_GET_HEALTH_STATUS request structure.
-*/
-
-typedef struct DAC960_V2_GetHealthStatus
-{
- unsigned char ControllerNumber;
- DAC960_V2_HealthStatusBuffer_T __user *HealthStatusBuffer;
-}
-DAC960_V2_GetHealthStatus_T;
-
-
-/*
- Import the Kernel Mode IOCTL interface.
-*/
-
-extern int DAC960_KernelIOCTL(unsigned int Request, void *Argument);
-
-
-/*
- DAC960_DriverVersion protects the private portion of this file.
-*/
-
-#ifdef DAC960_DriverVersion
-
-
-/*
- Define the maximum Driver Queue Depth and Controller Queue Depth supported
- by DAC960 V1 and V2 Firmware Controllers.
-*/
-
-#define DAC960_MaxDriverQueueDepth 511
-#define DAC960_MaxControllerQueueDepth 512
-
-
-/*
- Define the maximum number of Scatter/Gather Segments supported for any
- DAC960 V1 and V2 Firmware controller.
-*/
-
-#define DAC960_V1_ScatterGatherLimit 33
-#define DAC960_V2_ScatterGatherLimit 128
-
-
-/*
- Define the number of Command Mailboxes and Status Mailboxes used by the
- DAC960 V1 and V2 Firmware Memory Mailbox Interface.
-*/
-
-#define DAC960_V1_CommandMailboxCount 256
-#define DAC960_V1_StatusMailboxCount 1024
-#define DAC960_V2_CommandMailboxCount 512
-#define DAC960_V2_StatusMailboxCount 512
-
-
-/*
- Define the DAC960 Controller Monitoring Timer Interval.
-*/
-
-#define DAC960_MonitoringTimerInterval (10 * HZ)
-
-
-/*
- Define the DAC960 Controller Secondary Monitoring Interval.
-*/
-
-#define DAC960_SecondaryMonitoringInterval (60 * HZ)
-
-
-/*
- Define the DAC960 Controller Health Status Monitoring Interval.
-*/
-
-#define DAC960_HealthStatusMonitoringInterval (1 * HZ)
-
-
-/*
- Define the DAC960 Controller Progress Reporting Interval.
-*/
-
-#define DAC960_ProgressReportingInterval (60 * HZ)
-
-
-/*
- Define the maximum number of Partitions allowed for each Logical Drive.
-*/
-
-#define DAC960_MaxPartitions 8
-#define DAC960_MaxPartitionsBits 3
-
-/*
- Define the DAC960 Controller fixed Block Size and Block Size Bits.
-*/
-
-#define DAC960_BlockSize 512
-#define DAC960_BlockSizeBits 9
-
-
-/*
- Define the number of Command structures that should be allocated as a
- group to optimize kernel memory allocation.
-*/
-
-#define DAC960_V1_CommandAllocationGroupSize 11
-#define DAC960_V2_CommandAllocationGroupSize 29
-
-
-/*
- Define the Controller Line Buffer, Progress Buffer, User Message, and
- Initial Status Buffer sizes.
-*/
-
-#define DAC960_LineBufferSize 100
-#define DAC960_ProgressBufferSize 200
-#define DAC960_UserMessageSize 200
-#define DAC960_InitialStatusBufferSize (8192-32)
-
-
-/*
- Define the DAC960 Controller Firmware Types.
-*/
-
-typedef enum
-{
- DAC960_V1_Controller = 1,
- DAC960_V2_Controller = 2
-}
-DAC960_FirmwareType_T;
-
-
-/*
- Define the DAC960 Controller Hardware Types.
-*/
-
-typedef enum
-{
- DAC960_BA_Controller = 1, /* eXtremeRAID 2000 */
- DAC960_LP_Controller = 2, /* AcceleRAID 352 */
- DAC960_LA_Controller = 3, /* DAC1164P */
- DAC960_PG_Controller = 4, /* DAC960PTL/PJ/PG */
- DAC960_PD_Controller = 5, /* DAC960PU/PD/PL/P */
- DAC960_P_Controller = 6, /* DAC960PU/PD/PL/P */
- DAC960_GEM_Controller = 7, /* AcceleRAID 4/5/600 */
-}
-DAC960_HardwareType_T;
-
-
-/*
- Define the Driver Message Levels.
-*/
-
-typedef enum DAC960_MessageLevel
-{
- DAC960_AnnounceLevel = 0,
- DAC960_InfoLevel = 1,
- DAC960_NoticeLevel = 2,
- DAC960_WarningLevel = 3,
- DAC960_ErrorLevel = 4,
- DAC960_ProgressLevel = 5,
- DAC960_CriticalLevel = 6,
- DAC960_UserCriticalLevel = 7
-}
-DAC960_MessageLevel_T;
-
-static char
- *DAC960_MessageLevelMap[] =
- { KERN_NOTICE, KERN_NOTICE, KERN_NOTICE, KERN_WARNING,
- KERN_ERR, KERN_CRIT, KERN_CRIT, KERN_CRIT };
-
-
-/*
- Define Driver Message macros.
-*/
-
-#define DAC960_Announce(Format, Arguments...) \
- DAC960_Message(DAC960_AnnounceLevel, Format, ##Arguments)
-
-#define DAC960_Info(Format, Arguments...) \
- DAC960_Message(DAC960_InfoLevel, Format, ##Arguments)
-
-#define DAC960_Notice(Format, Arguments...) \
- DAC960_Message(DAC960_NoticeLevel, Format, ##Arguments)
-
-#define DAC960_Warning(Format, Arguments...) \
- DAC960_Message(DAC960_WarningLevel, Format, ##Arguments)
-
-#define DAC960_Error(Format, Arguments...) \
- DAC960_Message(DAC960_ErrorLevel, Format, ##Arguments)
-
-#define DAC960_Progress(Format, Arguments...) \
- DAC960_Message(DAC960_ProgressLevel, Format, ##Arguments)
-
-#define DAC960_Critical(Format, Arguments...) \
- DAC960_Message(DAC960_CriticalLevel, Format, ##Arguments)
-
-#define DAC960_UserCritical(Format, Arguments...) \
- DAC960_Message(DAC960_UserCriticalLevel, Format, ##Arguments)
-
-
-struct DAC960_privdata {
- DAC960_HardwareType_T HardwareType;
- DAC960_FirmwareType_T FirmwareType;
- irq_handler_t InterruptHandler;
- unsigned int MemoryWindowSize;
-};
-
-
-/*
- Define the DAC960 V1 Firmware Controller Status Mailbox structure.
-*/
-
-typedef union DAC960_V1_StatusMailbox
-{
- unsigned int Word; /* Word 0 */
- struct {
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 0 */
- unsigned char :7; /* Byte 1 Bits 0-6 */
- bool Valid:1; /* Byte 1 Bit 7 */
- DAC960_V1_CommandStatus_T CommandStatus; /* Bytes 2-3 */
- } Fields;
-}
-DAC960_V1_StatusMailbox_T;
-
-
-/*
- Define the DAC960 V2 Firmware Controller Status Mailbox structure.
-*/
-
-typedef union DAC960_V2_StatusMailbox
-{
- unsigned int Words[2]; /* Words 0-1 */
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandStatus_T CommandStatus; /* Byte 2 */
- unsigned char RequestSenseLength; /* Byte 3 */
- int DataTransferResidue; /* Bytes 4-7 */
- } Fields;
-}
-DAC960_V2_StatusMailbox_T;
-
-
-/*
- Define the DAC960 Driver Command Types.
-*/
-
-typedef enum
-{
- DAC960_ReadCommand = 1,
- DAC960_WriteCommand = 2,
- DAC960_ReadRetryCommand = 3,
- DAC960_WriteRetryCommand = 4,
- DAC960_MonitoringCommand = 5,
- DAC960_ImmediateCommand = 6,
- DAC960_QueuedCommand = 7
-}
-DAC960_CommandType_T;
-
-
-/*
- Define the DAC960 Driver Command structure.
-*/
-
-typedef struct DAC960_Command
-{
- int CommandIdentifier;
- DAC960_CommandType_T CommandType;
- struct DAC960_Controller *Controller;
- struct DAC960_Command *Next;
- struct completion *Completion;
- unsigned int LogicalDriveNumber;
- unsigned int BlockNumber;
- unsigned int BlockCount;
- unsigned int SegmentCount;
- int DmaDirection;
- struct scatterlist *cmd_sglist;
- struct request *Request;
- union {
- struct {
- DAC960_V1_CommandMailbox_T CommandMailbox;
- DAC960_V1_KernelCommand_T *KernelCommand;
- DAC960_V1_CommandStatus_T CommandStatus;
- DAC960_V1_ScatterGatherSegment_T *ScatterGatherList;
- dma_addr_t ScatterGatherListDMA;
- struct scatterlist ScatterList[DAC960_V1_ScatterGatherLimit];
- unsigned int EndMarker[0];
- } V1;
- struct {
- DAC960_V2_CommandMailbox_T CommandMailbox;
- DAC960_V2_KernelCommand_T *KernelCommand;
- DAC960_V2_CommandStatus_T CommandStatus;
- unsigned char RequestSenseLength;
- int DataTransferResidue;
- DAC960_V2_ScatterGatherSegment_T *ScatterGatherList;
- dma_addr_t ScatterGatherListDMA;
- DAC960_SCSI_RequestSense_T *RequestSense;
- dma_addr_t RequestSenseDMA;
- struct scatterlist ScatterList[DAC960_V2_ScatterGatherLimit];
- unsigned int EndMarker[0];
- } V2;
- } FW;
-}
-DAC960_Command_T;
-
-
-/*
- Define the DAC960 Driver Controller structure.
-*/
-
-typedef struct DAC960_Controller
-{
- void __iomem *BaseAddress;
- void __iomem *MemoryMappedAddress;
- DAC960_FirmwareType_T FirmwareType;
- DAC960_HardwareType_T HardwareType;
- DAC960_IO_Address_T IO_Address;
- DAC960_PCI_Address_T PCI_Address;
- struct pci_dev *PCIDevice;
- unsigned char ControllerNumber;
- unsigned char ControllerName[4];
- unsigned char ModelName[20];
- unsigned char FullModelName[28];
- unsigned char FirmwareVersion[12];
- unsigned char Bus;
- unsigned char Device;
- unsigned char Function;
- unsigned char IRQ_Channel;
- unsigned char Channels;
- unsigned char Targets;
- unsigned char MemorySize;
- unsigned char LogicalDriveCount;
- unsigned short CommandAllocationGroupSize;
- unsigned short ControllerQueueDepth;
- unsigned short DriverQueueDepth;
- unsigned short MaxBlocksPerCommand;
- unsigned short ControllerScatterGatherLimit;
- unsigned short DriverScatterGatherLimit;
- unsigned int CombinedStatusBufferLength;
- unsigned int InitialStatusLength;
- unsigned int CurrentStatusLength;
- unsigned int ProgressBufferLength;
- unsigned int UserStatusLength;
- struct dma_loaf DmaPages;
- unsigned long MonitoringTimerCount;
- unsigned long PrimaryMonitoringTime;
- unsigned long SecondaryMonitoringTime;
- unsigned long ShutdownMonitoringTimer;
- unsigned long LastProgressReportTime;
- unsigned long LastCurrentStatusTime;
- bool ControllerInitialized;
- bool MonitoringCommandDeferred;
- bool EphemeralProgressMessage;
- bool DriveSpinUpMessageDisplayed;
- bool MonitoringAlertMode;
- bool SuppressEnclosureMessages;
- struct timer_list MonitoringTimer;
- struct gendisk *disks[DAC960_MaxLogicalDrives];
- struct dma_pool *ScatterGatherPool;
- DAC960_Command_T *FreeCommands;
- unsigned char *CombinedStatusBuffer;
- unsigned char *CurrentStatusBuffer;
- struct request_queue *RequestQueue[DAC960_MaxLogicalDrives];
- int req_q_index;
- spinlock_t queue_lock;
- wait_queue_head_t CommandWaitQueue;
- wait_queue_head_t HealthStatusWaitQueue;
- DAC960_Command_T InitialCommand;
- DAC960_Command_T *Commands[DAC960_MaxDriverQueueDepth];
- struct proc_dir_entry *ControllerProcEntry;
- bool LogicalDriveInitiallyAccessible[DAC960_MaxLogicalDrives];
- void (*QueueCommand)(DAC960_Command_T *Command);
- bool (*ReadControllerConfiguration)(struct DAC960_Controller *);
- bool (*ReadDeviceConfiguration)(struct DAC960_Controller *);
- bool (*ReportDeviceConfiguration)(struct DAC960_Controller *);
- void (*QueueReadWriteCommand)(DAC960_Command_T *Command);
- union {
- struct {
- unsigned char GeometryTranslationHeads;
- unsigned char GeometryTranslationSectors;
- unsigned char PendingRebuildFlag;
- unsigned short StripeSize;
- unsigned short SegmentSize;
- unsigned short NewEventLogSequenceNumber;
- unsigned short OldEventLogSequenceNumber;
- unsigned short DeviceStateChannel;
- unsigned short DeviceStateTargetID;
- bool DualModeMemoryMailboxInterface;
- bool BackgroundInitializationStatusSupported;
- bool SAFTE_EnclosureManagementEnabled;
- bool NeedLogicalDriveInformation;
- bool NeedErrorTableInformation;
- bool NeedDeviceStateInformation;
- bool NeedDeviceInquiryInformation;
- bool NeedDeviceSerialNumberInformation;
- bool NeedRebuildProgress;
- bool NeedConsistencyCheckProgress;
- bool NeedBackgroundInitializationStatus;
- bool StartDeviceStateScan;
- bool RebuildProgressFirst;
- bool RebuildFlagPending;
- bool RebuildStatusPending;
-
- dma_addr_t FirstCommandMailboxDMA;
- DAC960_V1_CommandMailbox_T *FirstCommandMailbox;
- DAC960_V1_CommandMailbox_T *LastCommandMailbox;
- DAC960_V1_CommandMailbox_T *NextCommandMailbox;
- DAC960_V1_CommandMailbox_T *PreviousCommandMailbox1;
- DAC960_V1_CommandMailbox_T *PreviousCommandMailbox2;
-
- dma_addr_t FirstStatusMailboxDMA;
- DAC960_V1_StatusMailbox_T *FirstStatusMailbox;
- DAC960_V1_StatusMailbox_T *LastStatusMailbox;
- DAC960_V1_StatusMailbox_T *NextStatusMailbox;
-
- DAC960_V1_DCDB_T *MonitoringDCDB;
- dma_addr_t MonitoringDCDB_DMA;
-
- DAC960_V1_Enquiry_T Enquiry;
- DAC960_V1_Enquiry_T *NewEnquiry;
- dma_addr_t NewEnquiryDMA;
-
- DAC960_V1_ErrorTable_T ErrorTable;
- DAC960_V1_ErrorTable_T *NewErrorTable;
- dma_addr_t NewErrorTableDMA;
-
- DAC960_V1_EventLogEntry_T *EventLogEntry;
- dma_addr_t EventLogEntryDMA;
-
- DAC960_V1_RebuildProgress_T *RebuildProgress;
- dma_addr_t RebuildProgressDMA;
- DAC960_V1_CommandStatus_T LastRebuildStatus;
- DAC960_V1_CommandStatus_T PendingRebuildStatus;
-
- DAC960_V1_LogicalDriveInformationArray_T LogicalDriveInformation;
- DAC960_V1_LogicalDriveInformationArray_T *NewLogicalDriveInformation;
- dma_addr_t NewLogicalDriveInformationDMA;
-
- DAC960_V1_BackgroundInitializationStatus_T
- *BackgroundInitializationStatus;
- dma_addr_t BackgroundInitializationStatusDMA;
- DAC960_V1_BackgroundInitializationStatus_T
- LastBackgroundInitializationStatus;
-
- DAC960_V1_DeviceState_T
- DeviceState[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
- DAC960_V1_DeviceState_T *NewDeviceState;
- dma_addr_t NewDeviceStateDMA;
-
- DAC960_SCSI_Inquiry_T
- InquiryStandardData[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
- DAC960_SCSI_Inquiry_T *NewInquiryStandardData;
- dma_addr_t NewInquiryStandardDataDMA;
-
- DAC960_SCSI_Inquiry_UnitSerialNumber_T
- InquiryUnitSerialNumber[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber;
- dma_addr_t NewInquiryUnitSerialNumberDMA;
-
- int DeviceResetCount[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
- bool DirectCommandActive[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
- } V1;
- struct {
- unsigned int StatusChangeCounter;
- unsigned int NextEventSequenceNumber;
- unsigned int PhysicalDeviceIndex;
- bool NeedLogicalDeviceInformation;
- bool NeedPhysicalDeviceInformation;
- bool NeedDeviceSerialNumberInformation;
- bool StartLogicalDeviceInformationScan;
- bool StartPhysicalDeviceInformationScan;
- struct dma_pool *RequestSensePool;
-
- dma_addr_t FirstCommandMailboxDMA;
- DAC960_V2_CommandMailbox_T *FirstCommandMailbox;
- DAC960_V2_CommandMailbox_T *LastCommandMailbox;
- DAC960_V2_CommandMailbox_T *NextCommandMailbox;
- DAC960_V2_CommandMailbox_T *PreviousCommandMailbox1;
- DAC960_V2_CommandMailbox_T *PreviousCommandMailbox2;
-
- dma_addr_t FirstStatusMailboxDMA;
- DAC960_V2_StatusMailbox_T *FirstStatusMailbox;
- DAC960_V2_StatusMailbox_T *LastStatusMailbox;
- DAC960_V2_StatusMailbox_T *NextStatusMailbox;
-
- dma_addr_t HealthStatusBufferDMA;
- DAC960_V2_HealthStatusBuffer_T *HealthStatusBuffer;
-
- DAC960_V2_ControllerInfo_T ControllerInformation;
- DAC960_V2_ControllerInfo_T *NewControllerInformation;
- dma_addr_t NewControllerInformationDMA;
-
- DAC960_V2_LogicalDeviceInfo_T
- *LogicalDeviceInformation[DAC960_MaxLogicalDrives];
- DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInformation;
- dma_addr_t NewLogicalDeviceInformationDMA;
-
- DAC960_V2_PhysicalDeviceInfo_T
- *PhysicalDeviceInformation[DAC960_V2_MaxPhysicalDevices];
- DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInformation;
- dma_addr_t NewPhysicalDeviceInformationDMA;
-
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber;
- dma_addr_t NewInquiryUnitSerialNumberDMA;
- DAC960_SCSI_Inquiry_UnitSerialNumber_T
- *InquiryUnitSerialNumber[DAC960_V2_MaxPhysicalDevices];
-
- DAC960_V2_Event_T *Event;
- dma_addr_t EventDMA;
-
- DAC960_V2_PhysicalToLogicalDevice_T *PhysicalToLogicalDevice;
- dma_addr_t PhysicalToLogicalDeviceDMA;
-
- DAC960_V2_PhysicalDevice_T
- LogicalDriveToVirtualDevice[DAC960_MaxLogicalDrives];
- bool LogicalDriveFoundDuringScan[DAC960_MaxLogicalDrives];
- } V2;
- } FW;
- unsigned char ProgressBuffer[DAC960_ProgressBufferSize];
- unsigned char UserStatusBuffer[DAC960_UserMessageSize];
-}
-DAC960_Controller_T;
-
-
-/*
- Simplify access to Firmware Version Dependent Data Structure Components
- and Functions.
-*/
-
-#define V1 FW.V1
-#define V2 FW.V2
-#define DAC960_QueueCommand(Command) \
- (Controller->QueueCommand)(Command)
-#define DAC960_ReadControllerConfiguration(Controller) \
- (Controller->ReadControllerConfiguration)(Controller)
-#define DAC960_ReadDeviceConfiguration(Controller) \
- (Controller->ReadDeviceConfiguration)(Controller)
-#define DAC960_ReportDeviceConfiguration(Controller) \
- (Controller->ReportDeviceConfiguration)(Controller)
-#define DAC960_QueueReadWriteCommand(Command) \
- (Controller->QueueReadWriteCommand)(Command)
-
-/*
- * dma_addr_writeql is provided to write dma_addr_t types
- * to a 64-bit pci address space register. The controller
- * will accept having the register written as two 32-bit
- * values.
- *
- * In HIGHMEM kernels, dma_addr_t is a 64-bit value.
- * without HIGHMEM, dma_addr_t is a 32-bit value.
- *
- * The compiler should always fix up the assignment
- * to u.wq appropriately, depending upon the size of
- * dma_addr_t.
- */
-static inline
-void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address)
-{
- union {
- u64 wq;
- uint wl[2];
- } u;
-
- u.wq = addr;
-
- writel(u.wl[0], write_address);
- writel(u.wl[1], write_address + 4);
-}
-
-/*
- Define the DAC960 GEM Series Controller Interface Register Offsets.
- */
-
-#define DAC960_GEM_RegisterWindowSize 0x600
-
-typedef enum
-{
- DAC960_GEM_InboundDoorBellRegisterReadSetOffset = 0x214,
- DAC960_GEM_InboundDoorBellRegisterClearOffset = 0x218,
- DAC960_GEM_OutboundDoorBellRegisterReadSetOffset = 0x224,
- DAC960_GEM_OutboundDoorBellRegisterClearOffset = 0x228,
- DAC960_GEM_InterruptStatusRegisterOffset = 0x208,
- DAC960_GEM_InterruptMaskRegisterReadSetOffset = 0x22C,
- DAC960_GEM_InterruptMaskRegisterClearOffset = 0x230,
- DAC960_GEM_CommandMailboxBusAddressOffset = 0x510,
- DAC960_GEM_CommandStatusOffset = 0x518,
- DAC960_GEM_ErrorStatusRegisterReadSetOffset = 0x224,
- DAC960_GEM_ErrorStatusRegisterClearOffset = 0x228,
-}
-DAC960_GEM_RegisterOffsets_T;
-
-/*
- Define the structure of the DAC960 GEM Series Inbound Door Bell
- */
-
-typedef union DAC960_GEM_InboundDoorBellRegister
-{
- unsigned int All;
- struct {
- unsigned int :24;
- bool HardwareMailboxNewCommand:1;
- bool AcknowledgeHardwareMailboxStatus:1;
- bool GenerateInterrupt:1;
- bool ControllerReset:1;
- bool MemoryMailboxNewCommand:1;
- unsigned int :3;
- } Write;
- struct {
- unsigned int :24;
- bool HardwareMailboxFull:1;
- bool InitializationInProgress:1;
- unsigned int :6;
- } Read;
-}
-DAC960_GEM_InboundDoorBellRegister_T;
-
-/*
- Define the structure of the DAC960 GEM Series Outbound Door Bell Register.
- */
-typedef union DAC960_GEM_OutboundDoorBellRegister
-{
- unsigned int All;
- struct {
- unsigned int :24;
- bool AcknowledgeHardwareMailboxInterrupt:1;
- bool AcknowledgeMemoryMailboxInterrupt:1;
- unsigned int :6;
- } Write;
- struct {
- unsigned int :24;
- bool HardwareMailboxStatusAvailable:1;
- bool MemoryMailboxStatusAvailable:1;
- unsigned int :6;
- } Read;
-}
-DAC960_GEM_OutboundDoorBellRegister_T;
-
-/*
- Define the structure of the DAC960 GEM Series Interrupt Mask Register.
- */
-typedef union DAC960_GEM_InterruptMaskRegister
-{
- unsigned int All;
- struct {
- unsigned int :16;
- unsigned int :8;
- unsigned int HardwareMailboxInterrupt:1;
- unsigned int MemoryMailboxInterrupt:1;
- unsigned int :6;
- } Bits;
-}
-DAC960_GEM_InterruptMaskRegister_T;
-
-/*
- Define the structure of the DAC960 GEM Series Error Status Register.
- */
-
-typedef union DAC960_GEM_ErrorStatusRegister
-{
- unsigned int All;
- struct {
- unsigned int :24;
- unsigned int :5;
- bool ErrorStatusPending:1;
- unsigned int :2;
- } Bits;
-}
-DAC960_GEM_ErrorStatusRegister_T;
-
-/*
- Define inline functions to provide an abstraction for reading and writing the
- DAC960 GEM Series Controller Interface Registers.
-*/
-
-static inline
-void DAC960_GEM_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
-}
-
-static inline
-void DAC960_GEM_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterClearOffset);
-}
-
-static inline
-void DAC960_GEM_GenerateInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.GenerateInterrupt = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
-}
-
-static inline
-void DAC960_GEM_ControllerReset(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.ControllerReset = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
-}
-
-static inline
-void DAC960_GEM_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
-}
-
-static inline
-bool DAC960_GEM_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readl(ControllerBaseAddress +
- DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
- return InboundDoorBellRegister.Read.HardwareMailboxFull;
-}
-
-static inline
-bool DAC960_GEM_InitializationInProgressP(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readl(ControllerBaseAddress +
- DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
- return InboundDoorBellRegister.Read.InitializationInProgress;
-}
-
-static inline
-void DAC960_GEM_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- writel(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_OutboundDoorBellRegisterClearOffset);
-}
-
-static inline
-void DAC960_GEM_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writel(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_OutboundDoorBellRegisterClearOffset);
-}
-
-static inline
-void DAC960_GEM_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writel(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_OutboundDoorBellRegisterClearOffset);
-}
-
-static inline
-bool DAC960_GEM_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readl(ControllerBaseAddress +
- DAC960_GEM_OutboundDoorBellRegisterReadSetOffset);
- return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
-}
-
-static inline
-bool DAC960_GEM_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readl(ControllerBaseAddress +
- DAC960_GEM_OutboundDoorBellRegisterReadSetOffset);
- return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
-}
-
-static inline
-void DAC960_GEM_EnableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0;
- InterruptMaskRegister.Bits.HardwareMailboxInterrupt = true;
- InterruptMaskRegister.Bits.MemoryMailboxInterrupt = true;
- writel(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_GEM_InterruptMaskRegisterClearOffset);
-}
-
-static inline
-void DAC960_GEM_DisableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0;
- InterruptMaskRegister.Bits.HardwareMailboxInterrupt = true;
- InterruptMaskRegister.Bits.MemoryMailboxInterrupt = true;
- writel(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_GEM_InterruptMaskRegisterReadSetOffset);
-}
-
-static inline
-bool DAC960_GEM_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All =
- readl(ControllerBaseAddress +
- DAC960_GEM_InterruptMaskRegisterReadSetOffset);
- return !(InterruptMaskRegister.Bits.HardwareMailboxInterrupt ||
- InterruptMaskRegister.Bits.MemoryMailboxInterrupt);
-}
-
-static inline
-void DAC960_GEM_WriteCommandMailbox(DAC960_V2_CommandMailbox_T
- *MemoryCommandMailbox,
- DAC960_V2_CommandMailbox_T
- *CommandMailbox)
-{
- memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1],
- sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
- wmb();
- MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
- mb();
-}
-
-static inline
-void DAC960_GEM_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
- dma_addr_t CommandMailboxDMA)
-{
- dma_addr_writeql(CommandMailboxDMA,
- ControllerBaseAddress +
- DAC960_GEM_CommandMailboxBusAddressOffset);
-}
-
-static inline DAC960_V2_CommandIdentifier_T
-DAC960_GEM_ReadCommandIdentifier(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_GEM_CommandStatusOffset);
-}
-
-static inline DAC960_V2_CommandStatus_T
-DAC960_GEM_ReadCommandStatus(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_GEM_CommandStatusOffset + 2);
-}
-
-static inline bool
-DAC960_GEM_ReadErrorStatus(void __iomem *ControllerBaseAddress,
- unsigned char *ErrorStatus,
- unsigned char *Parameter0,
- unsigned char *Parameter1)
-{
- DAC960_GEM_ErrorStatusRegister_T ErrorStatusRegister;
- ErrorStatusRegister.All =
- readl(ControllerBaseAddress + DAC960_GEM_ErrorStatusRegisterReadSetOffset);
- if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
- ErrorStatusRegister.Bits.ErrorStatusPending = false;
- *ErrorStatus = ErrorStatusRegister.All;
- *Parameter0 =
- readb(ControllerBaseAddress + DAC960_GEM_CommandMailboxBusAddressOffset + 0);
- *Parameter1 =
- readb(ControllerBaseAddress + DAC960_GEM_CommandMailboxBusAddressOffset + 1);
- writel(0x03000000, ControllerBaseAddress +
- DAC960_GEM_ErrorStatusRegisterClearOffset);
- return true;
-}
-
-/*
- Define the DAC960 BA Series Controller Interface Register Offsets.
-*/
-
-#define DAC960_BA_RegisterWindowSize 0x80
-
-typedef enum
-{
- DAC960_BA_InboundDoorBellRegisterOffset = 0x60,
- DAC960_BA_OutboundDoorBellRegisterOffset = 0x61,
- DAC960_BA_InterruptStatusRegisterOffset = 0x30,
- DAC960_BA_InterruptMaskRegisterOffset = 0x34,
- DAC960_BA_CommandMailboxBusAddressOffset = 0x50,
- DAC960_BA_CommandStatusOffset = 0x58,
- DAC960_BA_ErrorStatusRegisterOffset = 0x63
-}
-DAC960_BA_RegisterOffsets_T;
-
-
-/*
- Define the structure of the DAC960 BA Series Inbound Door Bell Register.
-*/
-
-typedef union DAC960_BA_InboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool HardwareMailboxNewCommand:1; /* Bit 0 */
- bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
- bool GenerateInterrupt:1; /* Bit 2 */
- bool ControllerReset:1; /* Bit 3 */
- bool MemoryMailboxNewCommand:1; /* Bit 4 */
- unsigned char :3; /* Bits 5-7 */
- } Write;
- struct {
- bool HardwareMailboxEmpty:1; /* Bit 0 */
- bool InitializationNotInProgress:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_BA_InboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 BA Series Outbound Door Bell Register.
-*/
-
-typedef union DAC960_BA_OutboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
- bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Write;
- struct {
- bool HardwareMailboxStatusAvailable:1; /* Bit 0 */
- bool MemoryMailboxStatusAvailable:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_BA_OutboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 BA Series Interrupt Mask Register.
-*/
-
-typedef union DAC960_BA_InterruptMaskRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool DisableInterrupts:1; /* Bit 2 */
- bool DisableInterruptsI2O:1; /* Bit 3 */
- unsigned int :4; /* Bits 4-7 */
- } Bits;
-}
-DAC960_BA_InterruptMaskRegister_T;
-
-
-/*
- Define the structure of the DAC960 BA Series Error Status Register.
-*/
-
-typedef union DAC960_BA_ErrorStatusRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool ErrorStatusPending:1; /* Bit 2 */
- unsigned int :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_BA_ErrorStatusRegister_T;
-
-
-/*
- Define inline functions to provide an abstraction for reading and writing the
- DAC960 BA Series Controller Interface Registers.
-*/
-
-static inline
-void DAC960_BA_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_BA_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_BA_GenerateInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.GenerateInterrupt = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_BA_ControllerReset(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.ControllerReset = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_BA_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_BA_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
- return !InboundDoorBellRegister.Read.HardwareMailboxEmpty;
-}
-
-static inline
-bool DAC960_BA_InitializationInProgressP(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
- return !InboundDoorBellRegister.Read.InitializationNotInProgress;
-}
-
-static inline
-void DAC960_BA_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_BA_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_BA_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_BA_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
-}
-
-static inline
-bool DAC960_BA_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
-}
-
-static inline
-void DAC960_BA_EnableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0xFF;
- InterruptMaskRegister.Bits.DisableInterrupts = false;
- InterruptMaskRegister.Bits.DisableInterruptsI2O = true;
- writeb(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset);
-}
-
-static inline
-void DAC960_BA_DisableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0xFF;
- InterruptMaskRegister.Bits.DisableInterrupts = true;
- InterruptMaskRegister.Bits.DisableInterruptsI2O = true;
- writeb(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset);
-}
-
-static inline
-bool DAC960_BA_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All =
- readb(ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset);
- return !InterruptMaskRegister.Bits.DisableInterrupts;
-}
-
-static inline
-void DAC960_BA_WriteCommandMailbox(DAC960_V2_CommandMailbox_T
- *MemoryCommandMailbox,
- DAC960_V2_CommandMailbox_T
- *CommandMailbox)
-{
- memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1],
- sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
- wmb();
- MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
- mb();
-}
-
-
-static inline
-void DAC960_BA_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
- dma_addr_t CommandMailboxDMA)
-{
- dma_addr_writeql(CommandMailboxDMA,
- ControllerBaseAddress +
- DAC960_BA_CommandMailboxBusAddressOffset);
-}
-
-static inline DAC960_V2_CommandIdentifier_T
-DAC960_BA_ReadCommandIdentifier(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_BA_CommandStatusOffset);
-}
-
-static inline DAC960_V2_CommandStatus_T
-DAC960_BA_ReadCommandStatus(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_BA_CommandStatusOffset + 2);
-}
-
-static inline bool
-DAC960_BA_ReadErrorStatus(void __iomem *ControllerBaseAddress,
- unsigned char *ErrorStatus,
- unsigned char *Parameter0,
- unsigned char *Parameter1)
-{
- DAC960_BA_ErrorStatusRegister_T ErrorStatusRegister;
- ErrorStatusRegister.All =
- readb(ControllerBaseAddress + DAC960_BA_ErrorStatusRegisterOffset);
- if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
- ErrorStatusRegister.Bits.ErrorStatusPending = false;
- *ErrorStatus = ErrorStatusRegister.All;
- *Parameter0 =
- readb(ControllerBaseAddress + DAC960_BA_CommandMailboxBusAddressOffset + 0);
- *Parameter1 =
- readb(ControllerBaseAddress + DAC960_BA_CommandMailboxBusAddressOffset + 1);
- writeb(0xFF, ControllerBaseAddress + DAC960_BA_ErrorStatusRegisterOffset);
- return true;
-}
-
-
-/*
- Define the DAC960 LP Series Controller Interface Register Offsets.
-*/
-
-#define DAC960_LP_RegisterWindowSize 0x80
-
-typedef enum
-{
- DAC960_LP_InboundDoorBellRegisterOffset = 0x20,
- DAC960_LP_OutboundDoorBellRegisterOffset = 0x2C,
- DAC960_LP_InterruptStatusRegisterOffset = 0x30,
- DAC960_LP_InterruptMaskRegisterOffset = 0x34,
- DAC960_LP_CommandMailboxBusAddressOffset = 0x10,
- DAC960_LP_CommandStatusOffset = 0x18,
- DAC960_LP_ErrorStatusRegisterOffset = 0x2E
-}
-DAC960_LP_RegisterOffsets_T;
-
-
-/*
- Define the structure of the DAC960 LP Series Inbound Door Bell Register.
-*/
-
-typedef union DAC960_LP_InboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool HardwareMailboxNewCommand:1; /* Bit 0 */
- bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
- bool GenerateInterrupt:1; /* Bit 2 */
- bool ControllerReset:1; /* Bit 3 */
- bool MemoryMailboxNewCommand:1; /* Bit 4 */
- unsigned char :3; /* Bits 5-7 */
- } Write;
- struct {
- bool HardwareMailboxFull:1; /* Bit 0 */
- bool InitializationInProgress:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_LP_InboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 LP Series Outbound Door Bell Register.
-*/
-
-typedef union DAC960_LP_OutboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
- bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Write;
- struct {
- bool HardwareMailboxStatusAvailable:1; /* Bit 0 */
- bool MemoryMailboxStatusAvailable:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_LP_OutboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 LP Series Interrupt Mask Register.
-*/
-
-typedef union DAC960_LP_InterruptMaskRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool DisableInterrupts:1; /* Bit 2 */
- unsigned int :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_LP_InterruptMaskRegister_T;
-
-
-/*
- Define the structure of the DAC960 LP Series Error Status Register.
-*/
-
-typedef union DAC960_LP_ErrorStatusRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool ErrorStatusPending:1; /* Bit 2 */
- unsigned int :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_LP_ErrorStatusRegister_T;
-
-
-/*
- Define inline functions to provide an abstraction for reading and writing the
- DAC960 LP Series Controller Interface Registers.
-*/
-
-static inline
-void DAC960_LP_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LP_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LP_GenerateInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.GenerateInterrupt = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LP_ControllerReset(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.ControllerReset = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LP_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_LP_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
- return InboundDoorBellRegister.Read.HardwareMailboxFull;
-}
-
-static inline
-bool DAC960_LP_InitializationInProgressP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
- return InboundDoorBellRegister.Read.InitializationInProgress;
-}
-
-static inline
-void DAC960_LP_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LP_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LP_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_LP_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
-}
-
-static inline
-bool DAC960_LP_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
-}
-
-static inline
-void DAC960_LP_EnableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0xFF;
- InterruptMaskRegister.Bits.DisableInterrupts = false;
- writeb(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset);
-}
-
-static inline
-void DAC960_LP_DisableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0xFF;
- InterruptMaskRegister.Bits.DisableInterrupts = true;
- writeb(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset);
-}
-
-static inline
-bool DAC960_LP_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All =
- readb(ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset);
- return !InterruptMaskRegister.Bits.DisableInterrupts;
-}
-
-static inline
-void DAC960_LP_WriteCommandMailbox(DAC960_V2_CommandMailbox_T
- *MemoryCommandMailbox,
- DAC960_V2_CommandMailbox_T
- *CommandMailbox)
-{
- memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1],
- sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
- wmb();
- MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
- mb();
-}
-
-static inline
-void DAC960_LP_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
- dma_addr_t CommandMailboxDMA)
-{
- dma_addr_writeql(CommandMailboxDMA,
- ControllerBaseAddress +
- DAC960_LP_CommandMailboxBusAddressOffset);
-}
-
-static inline DAC960_V2_CommandIdentifier_T
-DAC960_LP_ReadCommandIdentifier(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_LP_CommandStatusOffset);
-}
-
-static inline DAC960_V2_CommandStatus_T
-DAC960_LP_ReadCommandStatus(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_LP_CommandStatusOffset + 2);
-}
-
-static inline bool
-DAC960_LP_ReadErrorStatus(void __iomem *ControllerBaseAddress,
- unsigned char *ErrorStatus,
- unsigned char *Parameter0,
- unsigned char *Parameter1)
-{
- DAC960_LP_ErrorStatusRegister_T ErrorStatusRegister;
- ErrorStatusRegister.All =
- readb(ControllerBaseAddress + DAC960_LP_ErrorStatusRegisterOffset);
- if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
- ErrorStatusRegister.Bits.ErrorStatusPending = false;
- *ErrorStatus = ErrorStatusRegister.All;
- *Parameter0 =
- readb(ControllerBaseAddress + DAC960_LP_CommandMailboxBusAddressOffset + 0);
- *Parameter1 =
- readb(ControllerBaseAddress + DAC960_LP_CommandMailboxBusAddressOffset + 1);
- writeb(0xFF, ControllerBaseAddress + DAC960_LP_ErrorStatusRegisterOffset);
- return true;
-}
-
-
-/*
- Define the DAC960 LA Series Controller Interface Register Offsets.
-*/
-
-#define DAC960_LA_RegisterWindowSize 0x80
-
-typedef enum
-{
- DAC960_LA_InboundDoorBellRegisterOffset = 0x60,
- DAC960_LA_OutboundDoorBellRegisterOffset = 0x61,
- DAC960_LA_InterruptMaskRegisterOffset = 0x34,
- DAC960_LA_CommandOpcodeRegisterOffset = 0x50,
- DAC960_LA_CommandIdentifierRegisterOffset = 0x51,
- DAC960_LA_MailboxRegister2Offset = 0x52,
- DAC960_LA_MailboxRegister3Offset = 0x53,
- DAC960_LA_MailboxRegister4Offset = 0x54,
- DAC960_LA_MailboxRegister5Offset = 0x55,
- DAC960_LA_MailboxRegister6Offset = 0x56,
- DAC960_LA_MailboxRegister7Offset = 0x57,
- DAC960_LA_MailboxRegister8Offset = 0x58,
- DAC960_LA_MailboxRegister9Offset = 0x59,
- DAC960_LA_MailboxRegister10Offset = 0x5A,
- DAC960_LA_MailboxRegister11Offset = 0x5B,
- DAC960_LA_MailboxRegister12Offset = 0x5C,
- DAC960_LA_StatusCommandIdentifierRegOffset = 0x5D,
- DAC960_LA_StatusRegisterOffset = 0x5E,
- DAC960_LA_ErrorStatusRegisterOffset = 0x63
-}
-DAC960_LA_RegisterOffsets_T;
-
-
-/*
- Define the structure of the DAC960 LA Series Inbound Door Bell Register.
-*/
-
-typedef union DAC960_LA_InboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool HardwareMailboxNewCommand:1; /* Bit 0 */
- bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
- bool GenerateInterrupt:1; /* Bit 2 */
- bool ControllerReset:1; /* Bit 3 */
- bool MemoryMailboxNewCommand:1; /* Bit 4 */
- unsigned char :3; /* Bits 5-7 */
- } Write;
- struct {
- bool HardwareMailboxEmpty:1; /* Bit 0 */
- bool InitializationNotInProgress:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_LA_InboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 LA Series Outbound Door Bell Register.
-*/
-
-typedef union DAC960_LA_OutboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
- bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Write;
- struct {
- bool HardwareMailboxStatusAvailable:1; /* Bit 0 */
- bool MemoryMailboxStatusAvailable:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_LA_OutboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 LA Series Interrupt Mask Register.
-*/
-
-typedef union DAC960_LA_InterruptMaskRegister
-{
- unsigned char All;
- struct {
- unsigned char :2; /* Bits 0-1 */
- bool DisableInterrupts:1; /* Bit 2 */
- unsigned char :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_LA_InterruptMaskRegister_T;
-
-
-/*
- Define the structure of the DAC960 LA Series Error Status Register.
-*/
-
-typedef union DAC960_LA_ErrorStatusRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool ErrorStatusPending:1; /* Bit 2 */
- unsigned int :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_LA_ErrorStatusRegister_T;
-
-
-/*
- Define inline functions to provide an abstraction for reading and writing the
- DAC960 LA Series Controller Interface Registers.
-*/
-
-static inline
-void DAC960_LA_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LA_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LA_GenerateInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.GenerateInterrupt = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LA_ControllerReset(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.ControllerReset = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LA_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_LA_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
- return !InboundDoorBellRegister.Read.HardwareMailboxEmpty;
-}
-
-static inline
-bool DAC960_LA_InitializationInProgressP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
- return !InboundDoorBellRegister.Read.InitializationNotInProgress;
-}
-
-static inline
-void DAC960_LA_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LA_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LA_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_LA_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
-}
-
-static inline
-bool DAC960_LA_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
-}
-
-static inline
-void DAC960_LA_EnableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0xFF;
- InterruptMaskRegister.Bits.DisableInterrupts = false;
- writeb(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset);
-}
-
-static inline
-void DAC960_LA_DisableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0xFF;
- InterruptMaskRegister.Bits.DisableInterrupts = true;
- writeb(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset);
-}
-
-static inline
-bool DAC960_LA_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All =
- readb(ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset);
- return !InterruptMaskRegister.Bits.DisableInterrupts;
-}
-
-static inline
-void DAC960_LA_WriteCommandMailbox(DAC960_V1_CommandMailbox_T
- *MemoryCommandMailbox,
- DAC960_V1_CommandMailbox_T
- *CommandMailbox)
-{
- MemoryCommandMailbox->Words[1] = CommandMailbox->Words[1];
- MemoryCommandMailbox->Words[2] = CommandMailbox->Words[2];
- MemoryCommandMailbox->Words[3] = CommandMailbox->Words[3];
- wmb();
- MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
- mb();
-}
-
-static inline
-void DAC960_LA_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
- DAC960_V1_CommandMailbox_T *CommandMailbox)
-{
- writel(CommandMailbox->Words[0],
- ControllerBaseAddress + DAC960_LA_CommandOpcodeRegisterOffset);
- writel(CommandMailbox->Words[1],
- ControllerBaseAddress + DAC960_LA_MailboxRegister4Offset);
- writel(CommandMailbox->Words[2],
- ControllerBaseAddress + DAC960_LA_MailboxRegister8Offset);
- writeb(CommandMailbox->Bytes[12],
- ControllerBaseAddress + DAC960_LA_MailboxRegister12Offset);
-}
-
-static inline DAC960_V1_CommandIdentifier_T
-DAC960_LA_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress)
-{
- return readb(ControllerBaseAddress
- + DAC960_LA_StatusCommandIdentifierRegOffset);
-}
-
-static inline DAC960_V1_CommandStatus_T
-DAC960_LA_ReadStatusRegister(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_LA_StatusRegisterOffset);
-}
-
-static inline bool
-DAC960_LA_ReadErrorStatus(void __iomem *ControllerBaseAddress,
- unsigned char *ErrorStatus,
- unsigned char *Parameter0,
- unsigned char *Parameter1)
-{
- DAC960_LA_ErrorStatusRegister_T ErrorStatusRegister;
- ErrorStatusRegister.All =
- readb(ControllerBaseAddress + DAC960_LA_ErrorStatusRegisterOffset);
- if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
- ErrorStatusRegister.Bits.ErrorStatusPending = false;
- *ErrorStatus = ErrorStatusRegister.All;
- *Parameter0 =
- readb(ControllerBaseAddress + DAC960_LA_CommandOpcodeRegisterOffset);
- *Parameter1 =
- readb(ControllerBaseAddress + DAC960_LA_CommandIdentifierRegisterOffset);
- writeb(0xFF, ControllerBaseAddress + DAC960_LA_ErrorStatusRegisterOffset);
- return true;
-}
-
-/*
- Define the DAC960 PG Series Controller Interface Register Offsets.
-*/
-
-#define DAC960_PG_RegisterWindowSize 0x2000
-
-typedef enum
-{
- DAC960_PG_InboundDoorBellRegisterOffset = 0x0020,
- DAC960_PG_OutboundDoorBellRegisterOffset = 0x002C,
- DAC960_PG_InterruptMaskRegisterOffset = 0x0034,
- DAC960_PG_CommandOpcodeRegisterOffset = 0x1000,
- DAC960_PG_CommandIdentifierRegisterOffset = 0x1001,
- DAC960_PG_MailboxRegister2Offset = 0x1002,
- DAC960_PG_MailboxRegister3Offset = 0x1003,
- DAC960_PG_MailboxRegister4Offset = 0x1004,
- DAC960_PG_MailboxRegister5Offset = 0x1005,
- DAC960_PG_MailboxRegister6Offset = 0x1006,
- DAC960_PG_MailboxRegister7Offset = 0x1007,
- DAC960_PG_MailboxRegister8Offset = 0x1008,
- DAC960_PG_MailboxRegister9Offset = 0x1009,
- DAC960_PG_MailboxRegister10Offset = 0x100A,
- DAC960_PG_MailboxRegister11Offset = 0x100B,
- DAC960_PG_MailboxRegister12Offset = 0x100C,
- DAC960_PG_StatusCommandIdentifierRegOffset = 0x1018,
- DAC960_PG_StatusRegisterOffset = 0x101A,
- DAC960_PG_ErrorStatusRegisterOffset = 0x103F
-}
-DAC960_PG_RegisterOffsets_T;
-
-
-/*
- Define the structure of the DAC960 PG Series Inbound Door Bell Register.
-*/
-
-typedef union DAC960_PG_InboundDoorBellRegister
-{
- unsigned int All;
- struct {
- bool HardwareMailboxNewCommand:1; /* Bit 0 */
- bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
- bool GenerateInterrupt:1; /* Bit 2 */
- bool ControllerReset:1; /* Bit 3 */
- bool MemoryMailboxNewCommand:1; /* Bit 4 */
- unsigned int :27; /* Bits 5-31 */
- } Write;
- struct {
- bool HardwareMailboxFull:1; /* Bit 0 */
- bool InitializationInProgress:1; /* Bit 1 */
- unsigned int :30; /* Bits 2-31 */
- } Read;
-}
-DAC960_PG_InboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 PG Series Outbound Door Bell Register.
-*/
-
-typedef union DAC960_PG_OutboundDoorBellRegister
-{
- unsigned int All;
- struct {
- bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
- bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
- unsigned int :30; /* Bits 2-31 */
- } Write;
- struct {
- bool HardwareMailboxStatusAvailable:1; /* Bit 0 */
- bool MemoryMailboxStatusAvailable:1; /* Bit 1 */
- unsigned int :30; /* Bits 2-31 */
- } Read;
-}
-DAC960_PG_OutboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 PG Series Interrupt Mask Register.
-*/
-
-typedef union DAC960_PG_InterruptMaskRegister
-{
- unsigned int All;
- struct {
- unsigned int MessageUnitInterruptMask1:2; /* Bits 0-1 */
- bool DisableInterrupts:1; /* Bit 2 */
- unsigned int MessageUnitInterruptMask2:5; /* Bits 3-7 */
- unsigned int Reserved0:24; /* Bits 8-31 */
- } Bits;
-}
-DAC960_PG_InterruptMaskRegister_T;
-
-
-/*
- Define the structure of the DAC960 PG Series Error Status Register.
-*/
-
-typedef union DAC960_PG_ErrorStatusRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool ErrorStatusPending:1; /* Bit 2 */
- unsigned int :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_PG_ErrorStatusRegister_T;
-
-
-/*
- Define inline functions to provide an abstraction for reading and writing the
- DAC960 PG Series Controller Interface Registers.
-*/
-
-static inline
-void DAC960_PG_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PG_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PG_GenerateInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.GenerateInterrupt = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PG_ControllerReset(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.ControllerReset = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PG_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_PG_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readl(ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
- return InboundDoorBellRegister.Read.HardwareMailboxFull;
-}
-
-static inline
-bool DAC960_PG_InitializationInProgressP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readl(ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
- return InboundDoorBellRegister.Read.InitializationInProgress;
-}
-
-static inline
-void DAC960_PG_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- writel(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PG_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writel(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PG_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writel(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_PG_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readl(ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
-}
-
-static inline
-bool DAC960_PG_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readl(ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
-}
-
-static inline
-void DAC960_PG_EnableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0;
- InterruptMaskRegister.Bits.MessageUnitInterruptMask1 = 0x3;
- InterruptMaskRegister.Bits.DisableInterrupts = false;
- InterruptMaskRegister.Bits.MessageUnitInterruptMask2 = 0x1F;
- writel(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset);
-}
-
-static inline
-void DAC960_PG_DisableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0;
- InterruptMaskRegister.Bits.MessageUnitInterruptMask1 = 0x3;
- InterruptMaskRegister.Bits.DisableInterrupts = true;
- InterruptMaskRegister.Bits.MessageUnitInterruptMask2 = 0x1F;
- writel(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset);
-}
-
-static inline
-bool DAC960_PG_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All =
- readl(ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset);
- return !InterruptMaskRegister.Bits.DisableInterrupts;
-}
-
-static inline
-void DAC960_PG_WriteCommandMailbox(DAC960_V1_CommandMailbox_T
- *MemoryCommandMailbox,
- DAC960_V1_CommandMailbox_T
- *CommandMailbox)
-{
- MemoryCommandMailbox->Words[1] = CommandMailbox->Words[1];
- MemoryCommandMailbox->Words[2] = CommandMailbox->Words[2];
- MemoryCommandMailbox->Words[3] = CommandMailbox->Words[3];
- wmb();
- MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
- mb();
-}
-
-static inline
-void DAC960_PG_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
- DAC960_V1_CommandMailbox_T *CommandMailbox)
-{
- writel(CommandMailbox->Words[0],
- ControllerBaseAddress + DAC960_PG_CommandOpcodeRegisterOffset);
- writel(CommandMailbox->Words[1],
- ControllerBaseAddress + DAC960_PG_MailboxRegister4Offset);
- writel(CommandMailbox->Words[2],
- ControllerBaseAddress + DAC960_PG_MailboxRegister8Offset);
- writeb(CommandMailbox->Bytes[12],
- ControllerBaseAddress + DAC960_PG_MailboxRegister12Offset);
-}
-
-static inline DAC960_V1_CommandIdentifier_T
-DAC960_PG_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress)
-{
- return readb(ControllerBaseAddress
- + DAC960_PG_StatusCommandIdentifierRegOffset);
-}
-
-static inline DAC960_V1_CommandStatus_T
-DAC960_PG_ReadStatusRegister(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_PG_StatusRegisterOffset);
-}
-
-static inline bool
-DAC960_PG_ReadErrorStatus(void __iomem *ControllerBaseAddress,
- unsigned char *ErrorStatus,
- unsigned char *Parameter0,
- unsigned char *Parameter1)
-{
- DAC960_PG_ErrorStatusRegister_T ErrorStatusRegister;
- ErrorStatusRegister.All =
- readb(ControllerBaseAddress + DAC960_PG_ErrorStatusRegisterOffset);
- if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
- ErrorStatusRegister.Bits.ErrorStatusPending = false;
- *ErrorStatus = ErrorStatusRegister.All;
- *Parameter0 =
- readb(ControllerBaseAddress + DAC960_PG_CommandOpcodeRegisterOffset);
- *Parameter1 =
- readb(ControllerBaseAddress + DAC960_PG_CommandIdentifierRegisterOffset);
- writeb(0, ControllerBaseAddress + DAC960_PG_ErrorStatusRegisterOffset);
- return true;
-}
-
-/*
- Define the DAC960 PD Series Controller Interface Register Offsets.
-*/
-
-#define DAC960_PD_RegisterWindowSize 0x80
-
-typedef enum
-{
- DAC960_PD_CommandOpcodeRegisterOffset = 0x00,
- DAC960_PD_CommandIdentifierRegisterOffset = 0x01,
- DAC960_PD_MailboxRegister2Offset = 0x02,
- DAC960_PD_MailboxRegister3Offset = 0x03,
- DAC960_PD_MailboxRegister4Offset = 0x04,
- DAC960_PD_MailboxRegister5Offset = 0x05,
- DAC960_PD_MailboxRegister6Offset = 0x06,
- DAC960_PD_MailboxRegister7Offset = 0x07,
- DAC960_PD_MailboxRegister8Offset = 0x08,
- DAC960_PD_MailboxRegister9Offset = 0x09,
- DAC960_PD_MailboxRegister10Offset = 0x0A,
- DAC960_PD_MailboxRegister11Offset = 0x0B,
- DAC960_PD_MailboxRegister12Offset = 0x0C,
- DAC960_PD_StatusCommandIdentifierRegOffset = 0x0D,
- DAC960_PD_StatusRegisterOffset = 0x0E,
- DAC960_PD_ErrorStatusRegisterOffset = 0x3F,
- DAC960_PD_InboundDoorBellRegisterOffset = 0x40,
- DAC960_PD_OutboundDoorBellRegisterOffset = 0x41,
- DAC960_PD_InterruptEnableRegisterOffset = 0x43
-}
-DAC960_PD_RegisterOffsets_T;
-
-
-/*
- Define the structure of the DAC960 PD Series Inbound Door Bell Register.
-*/
-
-typedef union DAC960_PD_InboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool NewCommand:1; /* Bit 0 */
- bool AcknowledgeStatus:1; /* Bit 1 */
- bool GenerateInterrupt:1; /* Bit 2 */
- bool ControllerReset:1; /* Bit 3 */
- unsigned char :4; /* Bits 4-7 */
- } Write;
- struct {
- bool MailboxFull:1; /* Bit 0 */
- bool InitializationInProgress:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_PD_InboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 PD Series Outbound Door Bell Register.
-*/
-
-typedef union DAC960_PD_OutboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool AcknowledgeInterrupt:1; /* Bit 0 */
- unsigned char :7; /* Bits 1-7 */
- } Write;
- struct {
- bool StatusAvailable:1; /* Bit 0 */
- unsigned char :7; /* Bits 1-7 */
- } Read;
-}
-DAC960_PD_OutboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 PD Series Interrupt Enable Register.
-*/
-
-typedef union DAC960_PD_InterruptEnableRegister
-{
- unsigned char All;
- struct {
- bool EnableInterrupts:1; /* Bit 0 */
- unsigned char :7; /* Bits 1-7 */
- } Bits;
-}
-DAC960_PD_InterruptEnableRegister_T;
-
-
-/*
- Define the structure of the DAC960 PD Series Error Status Register.
-*/
-
-typedef union DAC960_PD_ErrorStatusRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool ErrorStatusPending:1; /* Bit 2 */
- unsigned int :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_PD_ErrorStatusRegister_T;
-
-
-/*
- Define inline functions to provide an abstraction for reading and writing the
- DAC960 PD Series Controller Interface Registers.
-*/
-
-static inline
-void DAC960_PD_NewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.NewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PD_AcknowledgeStatus(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.AcknowledgeStatus = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PD_GenerateInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.GenerateInterrupt = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PD_ControllerReset(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.ControllerReset = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_PD_MailboxFullP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
- return InboundDoorBellRegister.Read.MailboxFull;
-}
-
-static inline
-bool DAC960_PD_InitializationInProgressP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
- return InboundDoorBellRegister.Read.InitializationInProgress;
-}
-
-static inline
-void DAC960_PD_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PD_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_PD_StatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_PD_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.StatusAvailable;
-}
-
-static inline
-void DAC960_PD_EnableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
- InterruptEnableRegister.All = 0;
- InterruptEnableRegister.Bits.EnableInterrupts = true;
- writeb(InterruptEnableRegister.All,
- ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset);
-}
-
-static inline
-void DAC960_PD_DisableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
- InterruptEnableRegister.All = 0;
- InterruptEnableRegister.Bits.EnableInterrupts = false;
- writeb(InterruptEnableRegister.All,
- ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset);
-}
-
-static inline
-bool DAC960_PD_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
- InterruptEnableRegister.All =
- readb(ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset);
- return InterruptEnableRegister.Bits.EnableInterrupts;
-}
-
-static inline
-void DAC960_PD_WriteCommandMailbox(void __iomem *ControllerBaseAddress,
- DAC960_V1_CommandMailbox_T *CommandMailbox)
-{
- writel(CommandMailbox->Words[0],
- ControllerBaseAddress + DAC960_PD_CommandOpcodeRegisterOffset);
- writel(CommandMailbox->Words[1],
- ControllerBaseAddress + DAC960_PD_MailboxRegister4Offset);
- writel(CommandMailbox->Words[2],
- ControllerBaseAddress + DAC960_PD_MailboxRegister8Offset);
- writeb(CommandMailbox->Bytes[12],
- ControllerBaseAddress + DAC960_PD_MailboxRegister12Offset);
-}
-
-static inline DAC960_V1_CommandIdentifier_T
-DAC960_PD_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress)
-{
- return readb(ControllerBaseAddress
- + DAC960_PD_StatusCommandIdentifierRegOffset);
-}
-
-static inline DAC960_V1_CommandStatus_T
-DAC960_PD_ReadStatusRegister(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_PD_StatusRegisterOffset);
-}
-
-static inline bool
-DAC960_PD_ReadErrorStatus(void __iomem *ControllerBaseAddress,
- unsigned char *ErrorStatus,
- unsigned char *Parameter0,
- unsigned char *Parameter1)
-{
- DAC960_PD_ErrorStatusRegister_T ErrorStatusRegister;
- ErrorStatusRegister.All =
- readb(ControllerBaseAddress + DAC960_PD_ErrorStatusRegisterOffset);
- if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
- ErrorStatusRegister.Bits.ErrorStatusPending = false;
- *ErrorStatus = ErrorStatusRegister.All;
- *Parameter0 =
- readb(ControllerBaseAddress + DAC960_PD_CommandOpcodeRegisterOffset);
- *Parameter1 =
- readb(ControllerBaseAddress + DAC960_PD_CommandIdentifierRegisterOffset);
- writeb(0, ControllerBaseAddress + DAC960_PD_ErrorStatusRegisterOffset);
- return true;
-}
-
-static inline void DAC960_P_To_PD_TranslateEnquiry(void *Enquiry)
-{
- memcpy(Enquiry + 132, Enquiry + 36, 64);
- memset(Enquiry + 36, 0, 96);
-}
-
-static inline void DAC960_P_To_PD_TranslateDeviceState(void *DeviceState)
-{
- memcpy(DeviceState + 2, DeviceState + 3, 1);
- memmove(DeviceState + 4, DeviceState + 5, 2);
- memmove(DeviceState + 6, DeviceState + 8, 4);
-}
-
-static inline
-void DAC960_PD_To_P_TranslateReadWriteCommand(DAC960_V1_CommandMailbox_T
- *CommandMailbox)
-{
- int LogicalDriveNumber = CommandMailbox->Type5.LD.LogicalDriveNumber;
- CommandMailbox->Bytes[3] &= 0x7;
- CommandMailbox->Bytes[3] |= CommandMailbox->Bytes[7] << 6;
- CommandMailbox->Bytes[7] = LogicalDriveNumber;
-}
-
-static inline
-void DAC960_P_To_PD_TranslateReadWriteCommand(DAC960_V1_CommandMailbox_T
- *CommandMailbox)
-{
- int LogicalDriveNumber = CommandMailbox->Bytes[7];
- CommandMailbox->Bytes[7] = CommandMailbox->Bytes[3] >> 6;
- CommandMailbox->Bytes[3] &= 0x7;
- CommandMailbox->Bytes[3] |= LogicalDriveNumber << 3;
-}
-
-
-/*
- Define prototypes for the forward referenced DAC960 Driver Internal Functions.
-*/
-
-static void DAC960_FinalizeController(DAC960_Controller_T *);
-static void DAC960_V1_QueueReadWriteCommand(DAC960_Command_T *);
-static void DAC960_V2_QueueReadWriteCommand(DAC960_Command_T *);
-static void DAC960_RequestFunction(struct request_queue *);
-static irqreturn_t DAC960_BA_InterruptHandler(int, void *);
-static irqreturn_t DAC960_LP_InterruptHandler(int, void *);
-static irqreturn_t DAC960_LA_InterruptHandler(int, void *);
-static irqreturn_t DAC960_PG_InterruptHandler(int, void *);
-static irqreturn_t DAC960_PD_InterruptHandler(int, void *);
-static irqreturn_t DAC960_P_InterruptHandler(int, void *);
-static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *);
-static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *);
-static void DAC960_MonitoringTimerFunction(struct timer_list *);
-static void DAC960_Message(DAC960_MessageLevel_T, unsigned char *,
- DAC960_Controller_T *, ...);
-static void DAC960_CreateProcEntries(DAC960_Controller_T *);
-static void DAC960_DestroyProcEntries(DAC960_Controller_T *);
-
-#endif /* DAC960_DriverVersion */
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index d4913516823f..20bb4bfa4be6 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -121,18 +121,6 @@ source "drivers/block/mtip32xx/Kconfig"
source "drivers/block/zram/Kconfig"
-config BLK_DEV_DAC960
- tristate "Mylex DAC960/DAC1100 PCI RAID Controller support"
- depends on PCI
- help
- This driver adds support for the Mylex DAC960, AcceleRAID, and
- eXtremeRAID PCI RAID controllers. See the file
- <file:Documentation/blockdev/README.DAC960> for further information
- about this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called DAC960.
-
config BLK_DEV_UMEM
tristate "Micro Memory MM5415 Battery Backed RAM support"
depends on PCI
@@ -461,7 +449,6 @@ config BLK_DEV_RBD
select LIBCRC32C
select CRYPTO_AES
select CRYPTO
- default n
help
Say Y here if you want include the Rados block device, which stripes
a block device over objects stored in the Ceph distributed object
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 8566b188368b..a53cc1e3a2d3 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
-obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 3aaf6af3ec23..bf996bd44cfc 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -61,10 +61,8 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/mutex.h>
-#include <linux/amifdreg.h>
-#include <linux/amifd.h>
#include <linux/fs.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/elevator.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -87,6 +85,126 @@
*/
/*
+ * CIAAPRA bits (read only)
+ */
+
+#define DSKRDY (0x1<<5) /* disk ready when low */
+#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
+#define DSKPROT (0x1<<3) /* disk protected when low */
+#define DSKCHANGE (0x1<<2) /* low when disk removed */
+
+/*
+ * CIAAPRB bits (read/write)
+ */
+
+#define DSKMOTOR (0x1<<7) /* motor on when low */
+#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
+#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
+#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
+#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
+#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
+#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
+#define DSKSTEP (0x1) /* pulse low to step head 1 track */
+
+/*
+ * DSKBYTR bits (read only)
+ */
+
+#define DSKBYT (1<<15) /* register contains valid byte when set */
+#define DMAON (1<<14) /* disk DMA enabled */
+#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
+#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
+/* bits 7-0 are data */
+
+/*
+ * ADKCON/ADKCONR bits
+ */
+
+#ifndef SETCLR
+#define ADK_SETCLR (1<<15) /* control bit */
+#endif
+#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
+#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
+#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
+#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
+#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
+#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
+
+/*
+ * DSKLEN bits
+ */
+
+#define DSKLEN_DMAEN (1<<15)
+#define DSKLEN_WRITE (1<<14)
+
+/*
+ * INTENA/INTREQ bits
+ */
+
+#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
+
+/*
+ * Misc
+ */
+
+#define MFM_SYNC 0x4489 /* standard MFM sync value */
+
+/* Values for FD_COMMAND */
+#define FD_RECALIBRATE 0x07 /* move to track 0 */
+#define FD_SEEK 0x0F /* seek track */
+#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
+#define FD_WRITE 0xC5 /* write with MT, MFM */
+#define FD_SENSEI 0x08 /* Sense Interrupt Status */
+#define FD_SPECIFY 0x03 /* specify HUT etc */
+#define FD_FORMAT 0x4D /* format one track */
+#define FD_VERSION 0x10 /* get version code */
+#define FD_CONFIGURE 0x13 /* configure FIFO operation */
+#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
+
+#define FD_MAX_UNITS 4 /* Max. Number of drives */
+#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
+
+struct fd_data_type {
+ char *name; /* description of data type */
+ int sects; /* sectors per track */
+ int (*read_fkt)(int); /* read whole track */
+ void (*write_fkt)(int); /* write whole track */
+};
+
+struct fd_drive_type {
+ unsigned long code; /* code returned from drive */
+ char *name; /* description of drive */
+ unsigned int tracks; /* number of tracks */
+ unsigned int heads; /* number of heads */
+ unsigned int read_size; /* raw read size for one track */
+ unsigned int write_size; /* raw write size for one track */
+ unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
+ unsigned int precomp1; /* start track for precomp 1 */
+ unsigned int precomp2; /* start track for precomp 2 */
+ unsigned int step_delay; /* time (in ms) for delay after step */
+ unsigned int settle_time; /* time to settle after dir change */
+ unsigned int side_time; /* time needed to change sides */
+};
+
+struct amiga_floppy_struct {
+ struct fd_drive_type *type; /* type of floppy for this unit */
+ struct fd_data_type *dtype; /* type of floppy for this unit */
+ int track; /* current track (-1 == unknown) */
+ unsigned char *trackbuf; /* current track (kmaloc()'d */
+
+ int blocks; /* total # blocks on disk */
+
+ int changed; /* true when not known */
+ int disk; /* disk in drive (-1 == unknown) */
+ int motor; /* true when motor is at speed */
+ int busy; /* true when drive is active */
+ int dirty; /* true when trackbuf is not on disk */
+ int status; /* current error code for unit */
+ struct gendisk *gendisk;
+ struct blk_mq_tag_set tag_set;
+};
+
+/*
* Error codes
*/
#define FD_OK 0 /* operation succeeded */
@@ -164,7 +282,6 @@ static volatile int selected = -1; /* currently selected drive */
static int writepending;
static int writefromint;
static char *raw_buf;
-static int fdc_queue;
static DEFINE_SPINLOCK(amiflop_lock);
@@ -1337,76 +1454,20 @@ static int get_track(int drive, int track)
return -1;
}
-/*
- * Round-robin between our available drives, doing one request from each
- */
-static struct request *set_next_request(void)
-{
- struct request_queue *q;
- int cnt = FD_MAX_UNITS;
- struct request *rq = NULL;
-
- /* Find next queue we can dispatch from */
- fdc_queue = fdc_queue + 1;
- if (fdc_queue == FD_MAX_UNITS)
- fdc_queue = 0;
-
- for(cnt = FD_MAX_UNITS; cnt > 0; cnt--) {
-
- if (unit[fdc_queue].type->code == FD_NODRIVE) {
- if (++fdc_queue == FD_MAX_UNITS)
- fdc_queue = 0;
- continue;
- }
-
- q = unit[fdc_queue].gendisk->queue;
- if (q) {
- rq = blk_fetch_request(q);
- if (rq)
- break;
- }
-
- if (++fdc_queue == FD_MAX_UNITS)
- fdc_queue = 0;
- }
-
- return rq;
-}
-
-static void redo_fd_request(void)
+static blk_status_t amiflop_rw_cur_segment(struct amiga_floppy_struct *floppy,
+ struct request *rq)
{
- struct request *rq;
+ int drive = floppy - unit;
unsigned int cnt, block, track, sector;
- int drive;
- struct amiga_floppy_struct *floppy;
char *data;
- unsigned long flags;
- blk_status_t err;
-
-next_req:
- rq = set_next_request();
- if (!rq) {
- /* Nothing left to do */
- return;
- }
-
- floppy = rq->rq_disk->private_data;
- drive = floppy - unit;
-next_segment:
- /* Here someone could investigate to be more efficient */
- for (cnt = 0, err = BLK_STS_OK; cnt < blk_rq_cur_sectors(rq); cnt++) {
+ for (cnt = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
#ifdef DEBUG
printk("fd: sector %ld + %d requested for %s\n",
blk_rq_pos(rq), cnt,
(rq_data_dir(rq) == READ) ? "read" : "write");
#endif
block = blk_rq_pos(rq) + cnt;
- if ((int)block > floppy->blocks) {
- err = BLK_STS_IOERR;
- break;
- }
-
track = block / (floppy->dtype->sects * floppy->type->sect_mult);
sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
data = bio_data(rq->bio) + 512 * cnt;
@@ -1415,10 +1476,8 @@ next_segment:
"0x%08lx\n", track, sector, data);
#endif
- if (get_track(drive, track) == -1) {
- err = BLK_STS_IOERR;
- break;
- }
+ if (get_track(drive, track) == -1)
+ return BLK_STS_IOERR;
if (rq_data_dir(rq) == READ) {
memcpy(data, floppy->trackbuf + sector * 512, 512);
@@ -1426,31 +1485,40 @@ next_segment:
memcpy(floppy->trackbuf + sector * 512, data, 512);
/* keep the drive spinning while writes are scheduled */
- if (!fd_motor_on(drive)) {
- err = BLK_STS_IOERR;
- break;
- }
+ if (!fd_motor_on(drive))
+ return BLK_STS_IOERR;
/*
* setup a callback to write the track buffer
* after a short (1 tick) delay.
*/
- local_irq_save(flags);
-
floppy->dirty = 1;
/* reset the timer */
mod_timer (flush_track_timer + drive, jiffies + 1);
- local_irq_restore(flags);
}
}
- if (__blk_end_request_cur(rq, err))
- goto next_segment;
- goto next_req;
+ return BLK_STS_OK;
}
-static void do_fd_request(struct request_queue * q)
+static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- redo_fd_request();
+ struct request *rq = bd->rq;
+ struct amiga_floppy_struct *floppy = rq->rq_disk->private_data;
+ blk_status_t err;
+
+ if (!spin_trylock_irq(&amiflop_lock))
+ return BLK_STS_DEV_RESOURCE;
+
+ blk_mq_start_request(rq);
+
+ do {
+ err = amiflop_rw_cur_segment(floppy, rq);
+ } while (blk_update_request(rq, err, blk_rq_cur_bytes(rq)));
+ blk_mq_end_request(rq, err);
+
+ spin_unlock_irq(&amiflop_lock);
+ return BLK_STS_OK;
}
static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1701,11 +1769,47 @@ static const struct block_device_operations floppy_fops = {
.check_events = amiga_check_events,
};
+static const struct blk_mq_ops amiflop_mq_ops = {
+ .queue_rq = amiflop_queue_rq,
+};
+
+static struct gendisk *fd_alloc_disk(int drive)
+{
+ struct gendisk *disk;
+
+ disk = alloc_disk(1);
+ if (!disk)
+ goto out;
+
+ disk->queue = blk_mq_init_sq_queue(&unit[drive].tag_set, &amiflop_mq_ops,
+ 2, BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(disk->queue)) {
+ disk->queue = NULL;
+ goto out_put_disk;
+ }
+
+ unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL);
+ if (!unit[drive].trackbuf)
+ goto out_cleanup_queue;
+
+ return disk;
+
+out_cleanup_queue:
+ blk_cleanup_queue(disk->queue);
+ disk->queue = NULL;
+ blk_mq_free_tag_set(&unit[drive].tag_set);
+out_put_disk:
+ put_disk(disk);
+out:
+ unit[drive].type->code = FD_NODRIVE;
+ return NULL;
+}
+
static int __init fd_probe_drives(void)
{
int drive,drives,nomem;
- printk(KERN_INFO "FD: probing units\nfound ");
+ pr_info("FD: probing units\nfound");
drives=0;
nomem=0;
for(drive=0;drive<FD_MAX_UNITS;drive++) {
@@ -1713,27 +1817,17 @@ static int __init fd_probe_drives(void)
fd_probe(drive);
if (unit[drive].type->code == FD_NODRIVE)
continue;
- disk = alloc_disk(1);
+
+ disk = fd_alloc_disk(drive);
if (!disk) {
- unit[drive].type->code = FD_NODRIVE;
+ pr_cont(" no mem for fd%d", drive);
+ nomem = 1;
continue;
}
unit[drive].gendisk = disk;
-
- disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
- if (!disk->queue) {
- unit[drive].type->code = FD_NODRIVE;
- continue;
- }
-
drives++;
- if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) {
- printk("no mem for ");
- unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */
- drives--;
- nomem = 1;
- }
- printk("fd%d ",drive);
+
+ pr_cont(" fd%d",drive);
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive;
disk->fops = &floppy_fops;
@@ -1744,11 +1838,11 @@ static int __init fd_probe_drives(void)
}
if ((drives > 0) || (nomem == 0)) {
if (drives == 0)
- printk("no drives");
- printk("\n");
+ pr_cont(" no drives");
+ pr_cont("\n");
return drives;
}
- printk("\n");
+ pr_cont("\n");
return -ENOMEM;
}
@@ -1831,30 +1925,6 @@ out_blkdev:
return ret;
}
-#if 0 /* not safe to unload */
-static int __exit amiga_floppy_remove(struct platform_device *pdev)
-{
- int i;
-
- for( i = 0; i < FD_MAX_UNITS; i++) {
- if (unit[i].type->code != FD_NODRIVE) {
- struct request_queue *q = unit[i].gendisk->queue;
- del_gendisk(unit[i].gendisk);
- put_disk(unit[i].gendisk);
- kfree(unit[i].trackbuf);
- if (q)
- blk_cleanup_queue(q);
- }
- }
- blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
- free_irq(IRQ_AMIGA_CIAA_TB, NULL);
- free_irq(IRQ_AMIGA_DSKBLK, NULL);
- custom.dmacon = DMAF_DISK; /* disable DMA */
- amiga_chip_free(raw_buf);
- unregister_blkdev(FLOPPY_MAJOR, "fd");
-}
-#endif
-
static struct platform_driver amiga_floppy_driver = {
.driver = {
.name = "amiga-floppy",
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index c0ebda1283cc..7ca76ed2e71a 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -1,4 +1,6 @@
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
+#include <linux/blk-mq.h>
+
#define VERSION "85"
#define AOE_MAJOR 152
#define DEVICE_NAME "aoe"
@@ -164,6 +166,8 @@ struct aoedev {
struct gendisk *gd;
struct dentry *debugfs;
struct request_queue *blkq;
+ struct list_head rq_list;
+ struct blk_mq_tag_set tag_set;
struct hd_geometry geo;
sector_t ssize;
struct timer_list timer;
@@ -201,7 +205,6 @@ int aoeblk_init(void);
void aoeblk_exit(void);
void aoeblk_gdalloc(void *);
void aoedisk_rm_debugfs(struct aoedev *d);
-void aoedisk_rm_sysfs(struct aoedev *d);
int aoechr_init(void);
void aoechr_exit(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 429ebb84b592..ed26b7287256 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include <linux/hdreg.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
@@ -177,10 +177,15 @@ static struct attribute *aoe_attrs[] = {
NULL,
};
-static const struct attribute_group attr_group = {
+static const struct attribute_group aoe_attr_group = {
.attrs = aoe_attrs,
};
+static const struct attribute_group *aoe_attr_groups[] = {
+ &aoe_attr_group,
+ NULL,
+};
+
static const struct file_operations aoe_debugfs_fops = {
.open = aoe_debugfs_open,
.read = seq_read,
@@ -220,17 +225,6 @@ aoedisk_rm_debugfs(struct aoedev *d)
}
static int
-aoedisk_add_sysfs(struct aoedev *d)
-{
- return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
-}
-void
-aoedisk_rm_sysfs(struct aoedev *d)
-{
- sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
-}
-
-static int
aoeblk_open(struct block_device *bdev, fmode_t mode)
{
struct aoedev *d = bdev->bd_disk->private_data;
@@ -274,23 +268,25 @@ aoeblk_release(struct gendisk *disk, fmode_t mode)
spin_unlock_irqrestore(&d->lock, flags);
}
-static void
-aoeblk_request(struct request_queue *q)
+static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct aoedev *d;
- struct request *rq;
+ struct aoedev *d = hctx->queue->queuedata;
+
+ spin_lock_irq(&d->lock);
- d = q->queuedata;
if ((d->flags & DEVFL_UP) == 0) {
pr_info_ratelimited("aoe: device %ld.%d is not up\n",
d->aoemajor, d->aoeminor);
- while ((rq = blk_peek_request(q))) {
- blk_start_request(rq);
- aoe_end_request(d, rq, 1);
- }
- return;
+ spin_unlock_irq(&d->lock);
+ blk_mq_start_request(bd->rq);
+ return BLK_STS_IOERR;
}
+
+ list_add_tail(&bd->rq->queuelist, &d->rq_list);
aoecmd_work(d);
+ spin_unlock_irq(&d->lock);
+ return BLK_STS_OK;
}
static int
@@ -345,6 +341,10 @@ static const struct block_device_operations aoe_bdops = {
.owner = THIS_MODULE,
};
+static const struct blk_mq_ops aoeblk_mq_ops = {
+ .queue_rq = aoeblk_queue_rq,
+};
+
/* alloc_disk and add_disk can sleep */
void
aoeblk_gdalloc(void *vp)
@@ -353,9 +353,11 @@ aoeblk_gdalloc(void *vp)
struct gendisk *gd;
mempool_t *mp;
struct request_queue *q;
+ struct blk_mq_tag_set *set;
enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
ulong flags;
int late = 0;
+ int err;
spin_lock_irqsave(&d->lock, flags);
if (d->flags & DEVFL_GDALLOC
@@ -382,10 +384,25 @@ aoeblk_gdalloc(void *vp)
d->aoemajor, d->aoeminor);
goto err_disk;
}
- q = blk_init_queue(aoeblk_request, &d->lock);
- if (q == NULL) {
+
+ set = &d->tag_set;
+ set->ops = &aoeblk_mq_ops;
+ set->nr_hw_queues = 1;
+ set->queue_depth = 128;
+ set->numa_node = NUMA_NO_NODE;
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ err = blk_mq_alloc_tag_set(set);
+ if (err) {
+ pr_err("aoe: cannot allocate tag set for %ld.%d\n",
+ d->aoemajor, d->aoeminor);
+ goto err_mempool;
+ }
+
+ q = blk_mq_init_queue(set);
+ if (IS_ERR(q)) {
pr_err("aoe: cannot allocate block queue for %ld.%d\n",
d->aoemajor, d->aoeminor);
+ blk_mq_free_tag_set(set);
goto err_mempool;
}
@@ -417,8 +434,7 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
- add_disk(gd);
- aoedisk_add_sysfs(d);
+ device_add_disk(NULL, gd, aoe_attr_groups);
aoedisk_add_debugfs(d);
spin_lock_irqsave(&d->lock, flags);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 136dc507d020..bb2fba651bd2 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -7,7 +7,7 @@
#include <linux/ata.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/genhd.h>
@@ -813,7 +813,7 @@ rexmit_timer(struct timer_list *timer)
out:
if ((d->flags & DEVFL_KICKME) && d->blkq) {
d->flags &= ~DEVFL_KICKME;
- d->blkq->request_fn(d->blkq);
+ blk_mq_run_hw_queues(d->blkq, true);
}
d->timer.expires = jiffies + TIMERTICK;
@@ -857,10 +857,12 @@ nextbuf(struct aoedev *d)
return d->ip.buf;
rq = d->ip.rq;
if (rq == NULL) {
- rq = blk_peek_request(q);
+ rq = list_first_entry_or_null(&d->rq_list, struct request,
+ queuelist);
if (rq == NULL)
return NULL;
- blk_start_request(rq);
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
d->ip.rq = rq;
d->ip.nxbio = rq->bio;
rq->special = (void *) rqbiocnt(rq);
@@ -1045,6 +1047,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
struct bio *bio;
int bok;
struct request_queue *q;
+ blk_status_t err = BLK_STS_OK;
q = d->blkq;
if (rq == d->ip.rq)
@@ -1052,11 +1055,15 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
do {
bio = rq->bio;
bok = !fastfail && !bio->bi_status;
- } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
+ if (!bok)
+ err = BLK_STS_IOERR;
+ } while (blk_update_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
+
+ __blk_mq_end_request(rq, err);
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
- __blk_run_queue(q);
+ blk_mq_run_hw_queues(q, true);
}
static void
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 41060e9cedf2..9063f8efbd3b 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -5,7 +5,7 @@
*/
#include <linux/hdreg.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -197,7 +197,6 @@ aoedev_downdev(struct aoedev *d)
{
struct aoetgt *t, **tt, **te;
struct list_head *head, *pos, *nx;
- struct request *rq;
int i;
d->flags &= ~DEVFL_UP;
@@ -225,10 +224,11 @@ aoedev_downdev(struct aoedev *d)
/* fast fail all pending I/O */
if (d->blkq) {
- while ((rq = blk_peek_request(d->blkq))) {
- blk_start_request(rq);
- aoe_end_request(d, rq, 1);
- }
+ /* UP is cleared, freeze+quiesce to insure all are errored */
+ blk_mq_freeze_queue(d->blkq);
+ blk_mq_quiesce_queue(d->blkq);
+ blk_mq_unquiesce_queue(d->blkq);
+ blk_mq_unfreeze_queue(d->blkq);
}
if (d->gd)
@@ -275,9 +275,9 @@ freedev(struct aoedev *d)
del_timer_sync(&d->timer);
if (d->gd) {
aoedisk_rm_debugfs(d);
- aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
put_disk(d->gd);
+ blk_mq_free_tag_set(&d->tag_set);
blk_cleanup_queue(d->blkq);
}
t = d->targets;
@@ -464,6 +464,7 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
d->ntargets = NTARGETS;
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
+ INIT_LIST_HEAD(&d->rq_list);
skb_queue_head_init(&d->skbpool);
timer_setup(&d->timer, dummy_timer, 0);
d->timer.expires = jiffies + HZ;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index dfb2c2622e5a..f88b4c26d422 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -66,13 +66,11 @@
#include <linux/fd.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/wait.h>
-#include <asm/atafd.h>
-#include <asm/atafdreg.h>
#include <asm/atariints.h>
#include <asm/atari_stdma.h>
#include <asm/atari_stram.h>
@@ -83,7 +81,87 @@
static DEFINE_MUTEX(ataflop_mutex);
static struct request *fd_request;
-static int fdc_queue;
+
+/*
+ * WD1772 stuff
+ */
+
+/* register codes */
+
+#define FDCSELREG_STP (0x80) /* command/status register */
+#define FDCSELREG_TRA (0x82) /* track register */
+#define FDCSELREG_SEC (0x84) /* sector register */
+#define FDCSELREG_DTA (0x86) /* data register */
+
+/* register names for FDC_READ/WRITE macros */
+
+#define FDCREG_CMD 0
+#define FDCREG_STATUS 0
+#define FDCREG_TRACK 2
+#define FDCREG_SECTOR 4
+#define FDCREG_DATA 6
+
+/* command opcodes */
+
+#define FDCCMD_RESTORE (0x00) /* - */
+#define FDCCMD_SEEK (0x10) /* | */
+#define FDCCMD_STEP (0x20) /* | TYP 1 Commands */
+#define FDCCMD_STIN (0x40) /* | */
+#define FDCCMD_STOT (0x60) /* - */
+#define FDCCMD_RDSEC (0x80) /* - TYP 2 Commands */
+#define FDCCMD_WRSEC (0xa0) /* - " */
+#define FDCCMD_RDADR (0xc0) /* - */
+#define FDCCMD_RDTRA (0xe0) /* | TYP 3 Commands */
+#define FDCCMD_WRTRA (0xf0) /* - */
+#define FDCCMD_FORCI (0xd0) /* - TYP 4 Command */
+
+/* command modifier bits */
+
+#define FDCCMDADD_SR6 (0x00) /* step rate settings */
+#define FDCCMDADD_SR12 (0x01)
+#define FDCCMDADD_SR2 (0x02)
+#define FDCCMDADD_SR3 (0x03)
+#define FDCCMDADD_V (0x04) /* verify */
+#define FDCCMDADD_H (0x08) /* wait for spin-up */
+#define FDCCMDADD_U (0x10) /* update track register */
+#define FDCCMDADD_M (0x10) /* multiple sector access */
+#define FDCCMDADD_E (0x04) /* head settling flag */
+#define FDCCMDADD_P (0x02) /* precompensation off */
+#define FDCCMDADD_A0 (0x01) /* DAM flag */
+
+/* status register bits */
+
+#define FDCSTAT_MOTORON (0x80) /* motor on */
+#define FDCSTAT_WPROT (0x40) /* write protected (FDCCMD_WR*) */
+#define FDCSTAT_SPINUP (0x20) /* motor speed stable (Type I) */
+#define FDCSTAT_DELDAM (0x20) /* sector has deleted DAM (Type II+III) */
+#define FDCSTAT_RECNF (0x10) /* record not found */
+#define FDCSTAT_CRC (0x08) /* CRC error */
+#define FDCSTAT_TR00 (0x04) /* Track 00 flag (Type I) */
+#define FDCSTAT_LOST (0x04) /* Lost Data (Type II+III) */
+#define FDCSTAT_IDX (0x02) /* Index status (Type I) */
+#define FDCSTAT_DRQ (0x02) /* DRQ status (Type II+III) */
+#define FDCSTAT_BUSY (0x01) /* FDC is busy */
+
+
+/* PSG Port A Bit Nr 0 .. Side Sel .. 0 -> Side 1 1 -> Side 2 */
+#define DSKSIDE (0x01)
+
+#define DSKDRVNONE (0x06)
+#define DSKDRV0 (0x02)
+#define DSKDRV1 (0x04)
+
+/* step rates */
+#define FDCSTEP_6 0x00
+#define FDCSTEP_12 0x01
+#define FDCSTEP_2 0x02
+#define FDCSTEP_3 0x03
+
+struct atari_format_descr {
+ int track; /* to be formatted */
+ int head; /* "" "" */
+ int sect_offset; /* offset of first sector */
+};
/* Disk types: DD, HD, ED */
static struct atari_disk_type {
@@ -221,6 +299,7 @@ static struct atari_floppy_struct {
struct gendisk *disk;
int ref;
int type;
+ struct blk_mq_tag_set tag_set;
} unit[FD_MAX_UNITS];
#define UD unit[drive]
@@ -300,9 +379,6 @@ static int IsFormatting = 0, FormatError;
static int UserSteprate[FD_MAX_UNITS] = { -1, -1 };
module_param_array(UserSteprate, int, NULL, 0);
-/* Synchronization of FDC access. */
-static volatile int fdc_busy = 0;
-static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_COMPLETION(format_wait);
static unsigned long changed_floppies = 0xff, fake_change = 0;
@@ -362,7 +438,6 @@ static void fd_times_out(struct timer_list *unused);
static void finish_fdc( void );
static void finish_fdc_done( int dummy );
static void setup_req_params( int drive );
-static void redo_fd_request( void);
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
cmd, unsigned long param);
static void fd_probe( int drive );
@@ -380,8 +455,11 @@ static DEFINE_TIMER(fd_timer, check_change);
static void fd_end_request_cur(blk_status_t err)
{
- if (!__blk_end_request_cur(fd_request, err))
+ if (!blk_update_request(fd_request, err,
+ blk_rq_cur_bytes(fd_request))) {
+ __blk_mq_end_request(fd_request, err);
fd_request = NULL;
+ }
}
static inline void start_motor_off_timer(void)
@@ -627,7 +705,6 @@ static void fd_error( void )
if (SelectedDrive != -1)
SUD.track = -1;
}
- redo_fd_request();
}
@@ -645,14 +722,15 @@ static void fd_error( void )
static int do_format(int drive, int type, struct atari_format_descr *desc)
{
+ struct request_queue *q = unit[drive].disk->queue;
unsigned char *p;
int sect, nsect;
unsigned long flags;
+ int ret;
- DPRINT(("do_format( dr=%d tr=%d he=%d offs=%d )\n",
- drive, desc->track, desc->head, desc->sect_offset ));
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
- wait_event(fdc_wait, cmpxchg(&fdc_busy, 0, 1) == 0);
local_irq_save(flags);
stdma_lock(floppy_irq, NULL);
atari_turnon_irq( IRQ_MFP_FDC ); /* should be already, just to be sure */
@@ -661,16 +739,16 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
if (type) {
if (--type >= NUM_DISK_MINORS ||
minor2disktype[type].drive_types > DriveType) {
- redo_fd_request();
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
type = minor2disktype[type].index;
UDT = &atari_disk_type[type];
}
if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) {
- redo_fd_request();
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
nsect = UDT->spt;
@@ -709,8 +787,11 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
wait_for_completion(&format_wait);
- redo_fd_request();
- return( FormatError ? -EIO : 0 );
+ ret = FormatError ? -EIO : 0;
+out:
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
+ return ret;
}
@@ -740,7 +821,6 @@ static void do_fd_action( int drive )
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
- redo_fd_request();
return;
}
}
@@ -1145,7 +1225,6 @@ static void fd_rwsec_done1(int status)
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
- redo_fd_request();
}
return;
@@ -1303,8 +1382,6 @@ static void finish_fdc_done( int dummy )
local_irq_save(flags);
stdma_release();
- fdc_busy = 0;
- wake_up( &fdc_wait );
local_irq_restore(flags);
DPRINT(("finish_fdc() finished\n"));
@@ -1394,59 +1471,34 @@ static void setup_req_params( int drive )
ReqTrack, ReqSector, (unsigned long)ReqData ));
}
-/*
- * Round-robin between our available drives, doing one request from each
- */
-static struct request *set_next_request(void)
+static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct request_queue *q;
- int old_pos = fdc_queue;
- struct request *rq = NULL;
-
- do {
- q = unit[fdc_queue].disk->queue;
- if (++fdc_queue == FD_MAX_UNITS)
- fdc_queue = 0;
- if (q) {
- rq = blk_fetch_request(q);
- if (rq) {
- rq->error_count = 0;
- break;
- }
- }
- } while (fdc_queue != old_pos);
-
- return rq;
-}
-
+ struct atari_floppy_struct *floppy = bd->rq->rq_disk->private_data;
+ int drive = floppy - unit;
+ int type = floppy->type;
-static void redo_fd_request(void)
-{
- int drive, type;
- struct atari_floppy_struct *floppy;
+ spin_lock_irq(&ataflop_lock);
+ if (fd_request) {
+ spin_unlock_irq(&ataflop_lock);
+ return BLK_STS_DEV_RESOURCE;
+ }
+ if (!stdma_try_lock(floppy_irq, NULL)) {
+ spin_unlock_irq(&ataflop_lock);
+ return BLK_STS_RESOURCE;
+ }
+ fd_request = bd->rq;
+ blk_mq_start_request(fd_request);
- DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n",
- fd_request, fd_request ? fd_request->rq_disk->disk_name : "",
- fd_request ? blk_rq_pos(fd_request) : 0 ));
+ atari_disable_irq( IRQ_MFP_FDC );
IsFormatting = 0;
-repeat:
- if (!fd_request) {
- fd_request = set_next_request();
- if (!fd_request)
- goto the_end;
- }
-
- floppy = fd_request->rq_disk->private_data;
- drive = floppy - unit;
- type = floppy->type;
-
if (!UD.connected) {
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
fd_end_request_cur(BLK_STS_IOERR);
- goto repeat;
+ goto out;
}
if (type == 0) {
@@ -1462,23 +1514,18 @@ repeat:
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
- goto repeat;
+ goto out;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
- goto repeat;
+ goto out;
}
type = minor2disktype[type].index;
UDT = &atari_disk_type[type];
set_capacity(floppy->disk, UDT->blocks);
UD.autoprobe = 0;
}
-
- if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
- fd_end_request_cur(BLK_STS_IOERR);
- goto repeat;
- }
/* stop deselect timer */
del_timer( &motor_off_timer );
@@ -1490,22 +1537,13 @@ repeat:
setup_req_params( drive );
do_fd_action( drive );
- return;
-
- the_end:
- finish_fdc();
-}
-
-
-void do_fd_request(struct request_queue * q)
-{
- DPRINT(("do_fd_request for pid %d\n",current->pid));
- wait_event(fdc_wait, cmpxchg(&fdc_busy, 0, 1) == 0);
- stdma_lock(floppy_irq, NULL);
-
- atari_disable_irq( IRQ_MFP_FDC );
- redo_fd_request();
+ if (bd->last)
+ finish_fdc();
atari_enable_irq( IRQ_MFP_FDC );
+
+out:
+ spin_unlock_irq(&ataflop_lock);
+ return BLK_STS_OK;
}
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
@@ -1583,7 +1621,6 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* what if type > 0 here? Overwrite specified entry ? */
if (type) {
/* refuse to re-set a predefined type for now */
- redo_fd_request();
return -EINVAL;
}
@@ -1651,10 +1688,8 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* sanity check */
if (setprm.track != dtp->blocks/dtp->spt/2 ||
- setprm.head != 2) {
- redo_fd_request();
+ setprm.head != 2)
return -EINVAL;
- }
UDT = dtp;
set_capacity(floppy->disk, UDT->blocks);
@@ -1910,6 +1945,10 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
+static const struct blk_mq_ops ataflop_mq_ops = {
+ .queue_rq = ataflop_queue_rq,
+};
+
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
{
int drive = *part & 3;
@@ -1923,6 +1962,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
static int __init atari_floppy_init (void)
{
int i;
+ int ret;
if (!MACH_IS_ATARI)
/* Amiga, Mac, ... don't have Atari-compatible floppy :-) */
@@ -1933,8 +1973,19 @@ static int __init atari_floppy_init (void)
for (i = 0; i < FD_MAX_UNITS; i++) {
unit[i].disk = alloc_disk(1);
- if (!unit[i].disk)
- goto Enomem;
+ if (!unit[i].disk) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ unit[i].disk->queue = blk_mq_init_sq_queue(&unit[i].tag_set,
+ &ataflop_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(unit[i].disk->queue)) {
+ ret = PTR_ERR(unit[i].disk->queue);
+ unit[i].disk->queue = NULL;
+ goto err;
+ }
}
if (UseTrackbuffer < 0)
@@ -1951,7 +2002,8 @@ static int __init atari_floppy_init (void)
DMABuffer = atari_stram_alloc(BUFFER_SIZE+512, "ataflop");
if (!DMABuffer) {
printk(KERN_ERR "atari_floppy_init: cannot get dma buffer\n");
- goto Enomem;
+ ret = -ENOMEM;
+ goto err;
}
TrackBuffer = DMABuffer + 512;
PhysDMABuffer = atari_stram_to_phys(DMABuffer);
@@ -1966,10 +2018,6 @@ static int __init atari_floppy_init (void)
sprintf(unit[i].disk->disk_name, "fd%d", i);
unit[i].disk->fops = &floppy_fops;
unit[i].disk->private_data = &unit[i];
- unit[i].disk->queue = blk_init_queue(do_fd_request,
- &ataflop_lock);
- if (!unit[i].disk->queue)
- goto Enomem;
set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
add_disk(unit[i].disk);
}
@@ -1983,17 +2031,23 @@ static int __init atari_floppy_init (void)
config_types();
return 0;
-Enomem:
- while (i--) {
- struct request_queue *q = unit[i].disk->queue;
- put_disk(unit[i].disk);
- if (q)
- blk_cleanup_queue(q);
- }
+err:
+ do {
+ struct gendisk *disk = unit[i].disk;
+
+ if (disk) {
+ if (disk->queue) {
+ blk_cleanup_queue(disk->queue);
+ disk->queue = NULL;
+ }
+ blk_mq_free_tag_set(&unit[i].tag_set);
+ put_disk(unit[i].disk);
+ }
+ } while (i--);
unregister_blkdev(FLOPPY_MAJOR, "fd");
- return -ENOMEM;
+ return ret;
}
#ifndef MODULE
@@ -2040,11 +2094,10 @@ static void __exit atari_floppy_exit(void)
int i;
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
for (i = 0; i < FD_MAX_UNITS; i++) {
- struct request_queue *q = unit[i].disk->queue;
-
del_gendisk(unit[i].disk);
+ blk_cleanup_queue(unit[i].disk->queue);
+ blk_mq_free_tag_set(&unit[i].tag_set);
put_disk(unit[i].disk);
- blk_cleanup_queue(q);
}
unregister_blkdev(FLOPPY_MAJOR, "fd");
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 7033a4beda66..254ee7d54e91 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -45,7 +45,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
char cms[LO_NAME_SIZE]; /* cipher-mode string */
char *mode;
char *cmsp = cms; /* c-m string pointer */
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
/* encryption breaks for non sector aligned offsets */
@@ -80,13 +80,13 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
*cmsp++ = ')';
*cmsp = 0;
- tfm = crypto_alloc_skcipher(cms, 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_sync_skcipher(cms, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
- err = crypto_skcipher_setkey(tfm, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
-
+ err = crypto_sync_skcipher_setkey(tfm, info->lo_encrypt_key,
+ info->lo_encrypt_key_size);
+
if (err != 0)
goto out_free_tfm;
@@ -94,7 +94,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
return 0;
out_free_tfm:
- crypto_free_skcipher(tfm);
+ crypto_free_sync_skcipher(tfm);
out:
return err;
@@ -109,8 +109,8 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
struct page *loop_page, unsigned loop_off,
int size, sector_t IV)
{
- struct crypto_skcipher *tfm = lo->key_data;
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ struct crypto_sync_skcipher *tfm = lo->key_data;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg_out;
struct scatterlist sg_in;
@@ -119,7 +119,7 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
unsigned in_offs, out_offs;
int err;
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
@@ -175,9 +175,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
static int
cryptoloop_release(struct loop_device *lo)
{
- struct crypto_skcipher *tfm = lo->key_data;
+ struct crypto_sync_skcipher *tfm = lo->key_data;
if (tfm != NULL) {
- crypto_free_skcipher(tfm);
+ crypto_free_sync_skcipher(tfm);
lo->key_data = NULL;
return 0;
}
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index 87aab6910d2d..52d885cdccb5 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -11,7 +11,6 @@ config BLK_DEV_DRBD
depends on PROC_FS && INET
select LRU_CACHE
select LIBCRC32C
- default n
help
NOTE: In order to authenticate connections you have to select
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e35a234b0a8f..1e47db57b9d2 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -429,7 +429,7 @@ enum {
__EE_CALL_AL_COMPLETE_IO,
__EE_MAY_SET_IN_SYNC,
- /* is this a TRIM aka REQ_DISCARD? */
+ /* is this a TRIM aka REQ_OP_DISCARD? */
__EE_IS_TRIM,
/* In case a barrier failed,
@@ -724,10 +724,10 @@ struct drbd_connection {
struct list_head transfer_log; /* all requests not yet fully processed */
struct crypto_shash *cram_hmac_tfm;
- struct crypto_ahash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
- struct crypto_ahash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
- struct crypto_ahash *csums_tfm;
- struct crypto_ahash *verify_tfm;
+ struct crypto_shash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
+ struct crypto_shash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
+ struct crypto_shash *csums_tfm;
+ struct crypto_shash *verify_tfm;
void *int_dig_in;
void *int_dig_vv;
@@ -1531,8 +1531,9 @@ static inline void ov_out_of_sync_print(struct drbd_device *device)
}
-extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
-extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
+extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
+extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *,
+ void *);
/* worker callbacks */
extern int w_e_end_data_req(struct drbd_work *, int);
extern int w_e_end_rsdata_req(struct drbd_work *, int);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index ef8212a4b73e..55fd104f1ed4 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1377,7 +1377,7 @@ void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd
struct p_data *dp, int data_size)
{
if (peer_device->connection->peer_integrity_tfm)
- data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
+ data_size -= crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
_drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
dp->block_id);
}
@@ -1673,7 +1673,7 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
}
-/* Used to send write or TRIM aka REQ_DISCARD requests
+/* Used to send write or TRIM aka REQ_OP_DISCARD requests
* R_PRIMARY -> Peer (P_DATA, P_TRIM)
*/
int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
@@ -1690,7 +1690,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
digest_size = peer_device->connection->integrity_tfm ?
- crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
+ crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -1796,7 +1796,7 @@ int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
p = drbd_prepare_command(peer_device, sock);
digest_size = peer_device->connection->integrity_tfm ?
- crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
+ crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -2557,11 +2557,11 @@ void conn_free_crypto(struct drbd_connection *connection)
{
drbd_free_sock(connection);
- crypto_free_ahash(connection->csums_tfm);
- crypto_free_ahash(connection->verify_tfm);
+ crypto_free_shash(connection->csums_tfm);
+ crypto_free_shash(connection->verify_tfm);
crypto_free_shash(connection->cram_hmac_tfm);
- crypto_free_ahash(connection->integrity_tfm);
- crypto_free_ahash(connection->peer_integrity_tfm);
+ crypto_free_shash(connection->integrity_tfm);
+ crypto_free_shash(connection->peer_integrity_tfm);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index b4f02768ba47..d15703b1ffe8 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2303,10 +2303,10 @@ check_net_options(struct drbd_connection *connection, struct net_conf *new_net_c
}
struct crypto {
- struct crypto_ahash *verify_tfm;
- struct crypto_ahash *csums_tfm;
+ struct crypto_shash *verify_tfm;
+ struct crypto_shash *csums_tfm;
struct crypto_shash *cram_hmac_tfm;
- struct crypto_ahash *integrity_tfm;
+ struct crypto_shash *integrity_tfm;
};
static int
@@ -2324,36 +2324,21 @@ alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
return NO_ERROR;
}
-static int
-alloc_ahash(struct crypto_ahash **tfm, char *tfm_name, int err_alg)
-{
- if (!tfm_name[0])
- return NO_ERROR;
-
- *tfm = crypto_alloc_ahash(tfm_name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(*tfm)) {
- *tfm = NULL;
- return err_alg;
- }
-
- return NO_ERROR;
-}
-
static enum drbd_ret_code
alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
{
char hmac_name[CRYPTO_MAX_ALG_NAME];
enum drbd_ret_code rv;
- rv = alloc_ahash(&crypto->csums_tfm, new_net_conf->csums_alg,
+ rv = alloc_shash(&crypto->csums_tfm, new_net_conf->csums_alg,
ERR_CSUMS_ALG);
if (rv != NO_ERROR)
return rv;
- rv = alloc_ahash(&crypto->verify_tfm, new_net_conf->verify_alg,
+ rv = alloc_shash(&crypto->verify_tfm, new_net_conf->verify_alg,
ERR_VERIFY_ALG);
if (rv != NO_ERROR)
return rv;
- rv = alloc_ahash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
+ rv = alloc_shash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
ERR_INTEGRITY_ALG);
if (rv != NO_ERROR)
return rv;
@@ -2371,9 +2356,9 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
static void free_crypto(struct crypto *crypto)
{
crypto_free_shash(crypto->cram_hmac_tfm);
- crypto_free_ahash(crypto->integrity_tfm);
- crypto_free_ahash(crypto->csums_tfm);
- crypto_free_ahash(crypto->verify_tfm);
+ crypto_free_shash(crypto->integrity_tfm);
+ crypto_free_shash(crypto->csums_tfm);
+ crypto_free_shash(crypto->verify_tfm);
}
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
@@ -2450,17 +2435,17 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
rcu_assign_pointer(connection->net_conf, new_net_conf);
if (!rsr) {
- crypto_free_ahash(connection->csums_tfm);
+ crypto_free_shash(connection->csums_tfm);
connection->csums_tfm = crypto.csums_tfm;
crypto.csums_tfm = NULL;
}
if (!ovr) {
- crypto_free_ahash(connection->verify_tfm);
+ crypto_free_shash(connection->verify_tfm);
connection->verify_tfm = crypto.verify_tfm;
crypto.verify_tfm = NULL;
}
- crypto_free_ahash(connection->integrity_tfm);
+ crypto_free_shash(connection->integrity_tfm);
connection->integrity_tfm = crypto.integrity_tfm;
if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
/* Do this without trying to take connection->data.mutex again. */
diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h
index c3081f93051c..48dabbb21e11 100644
--- a/drivers/block/drbd/drbd_protocol.h
+++ b/drivers/block/drbd/drbd_protocol.h
@@ -57,7 +57,7 @@ enum drbd_packet {
P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */
/* 0x2e to 0x30 reserved, used in drbd 9 */
- /* REQ_DISCARD. We used "discard" in different contexts before,
+ /* REQ_OP_DISCARD. We used "discard" in different contexts before,
* which is why I chose TRIM here, to disambiguate. */
P_TRIM = 0x31,
@@ -126,7 +126,7 @@ struct p_header100 {
#define DP_UNPLUG 8 /* not used anymore */
#define DP_FUA 16 /* equals REQ_FUA */
#define DP_FLUSH 32 /* equals REQ_PREFLUSH */
-#define DP_DISCARD 64 /* equals REQ_DISCARD */
+#define DP_DISCARD 64 /* equals REQ_OP_DISCARD */
#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
#define DP_WSAME 512 /* equiv. REQ_WRITE_SAME */
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 75f6b47169e6..fc67fd853375 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1732,7 +1732,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
}
/* quick wrapper in case payload size != request_size (write same) */
-static void drbd_csum_ee_size(struct crypto_ahash *h,
+static void drbd_csum_ee_size(struct crypto_shash *h,
struct drbd_peer_request *r, void *d,
unsigned int payload_size)
{
@@ -1769,7 +1769,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
digest_size = 0;
if (!trim && peer_device->connection->peer_integrity_tfm) {
- digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
/*
* FIXME: Receive the incoming digest into the receive buffer
* here, together with its struct p_data?
@@ -1905,7 +1905,7 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
digest_size = 0;
if (peer_device->connection->peer_integrity_tfm) {
- digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
if (err)
return err;
@@ -3542,7 +3542,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
int p_proto, p_discard_my_data, p_two_primaries, cf;
struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
char integrity_alg[SHARED_SECRET_MAX] = "";
- struct crypto_ahash *peer_integrity_tfm = NULL;
+ struct crypto_shash *peer_integrity_tfm = NULL;
void *int_dig_in = NULL, *int_dig_vv = NULL;
p_proto = be32_to_cpu(p->protocol);
@@ -3623,7 +3623,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
* change.
*/
- peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
+ peer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(peer_integrity_tfm)) {
peer_integrity_tfm = NULL;
drbd_err(connection, "peer data-integrity-alg %s not supported\n",
@@ -3631,7 +3631,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
goto disconnect;
}
- hash_size = crypto_ahash_digestsize(peer_integrity_tfm);
+ hash_size = crypto_shash_digestsize(peer_integrity_tfm);
int_dig_in = kmalloc(hash_size, GFP_KERNEL);
int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
if (!(int_dig_in && int_dig_vv)) {
@@ -3661,7 +3661,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
mutex_unlock(&connection->resource->conf_update);
mutex_unlock(&connection->data.mutex);
- crypto_free_ahash(connection->peer_integrity_tfm);
+ crypto_free_shash(connection->peer_integrity_tfm);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);
connection->peer_integrity_tfm = peer_integrity_tfm;
@@ -3679,7 +3679,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
disconnect_rcu_unlock:
rcu_read_unlock();
disconnect:
- crypto_free_ahash(peer_integrity_tfm);
+ crypto_free_shash(peer_integrity_tfm);
kfree(int_dig_in);
kfree(int_dig_vv);
conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
@@ -3691,15 +3691,16 @@ disconnect:
* return: NULL (alg name was "")
* ERR_PTR(error) if something goes wrong
* or the crypto hash ptr, if it worked out ok. */
-static struct crypto_ahash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
+static struct crypto_shash *drbd_crypto_alloc_digest_safe(
+ const struct drbd_device *device,
const char *alg, const char *name)
{
- struct crypto_ahash *tfm;
+ struct crypto_shash *tfm;
if (!alg[0])
return NULL;
- tfm = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_shash(alg, 0, 0);
if (IS_ERR(tfm)) {
drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
alg, name, PTR_ERR(tfm));
@@ -3752,8 +3753,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
struct drbd_device *device;
struct p_rs_param_95 *p;
unsigned int header_size, data_size, exp_max_sz;
- struct crypto_ahash *verify_tfm = NULL;
- struct crypto_ahash *csums_tfm = NULL;
+ struct crypto_shash *verify_tfm = NULL;
+ struct crypto_shash *csums_tfm = NULL;
struct net_conf *old_net_conf, *new_net_conf = NULL;
struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
const int apv = connection->agreed_pro_version;
@@ -3900,14 +3901,14 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
if (verify_tfm) {
strcpy(new_net_conf->verify_alg, p->verify_alg);
new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
- crypto_free_ahash(peer_device->connection->verify_tfm);
+ crypto_free_shash(peer_device->connection->verify_tfm);
peer_device->connection->verify_tfm = verify_tfm;
drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
}
if (csums_tfm) {
strcpy(new_net_conf->csums_alg, p->csums_alg);
new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
- crypto_free_ahash(peer_device->connection->csums_tfm);
+ crypto_free_shash(peer_device->connection->csums_tfm);
peer_device->connection->csums_tfm = csums_tfm;
drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
}
@@ -3951,9 +3952,9 @@ disconnect:
mutex_unlock(&connection->resource->conf_update);
/* just for completeness: actually not needed,
* as this is not reached if csums_tfm was ok. */
- crypto_free_ahash(csums_tfm);
+ crypto_free_shash(csums_tfm);
/* but free the verify_tfm again, if csums_tfm did not work out */
- crypto_free_ahash(verify_tfm);
+ crypto_free_shash(verify_tfm);
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 19cac36e9737..1c4da17e902e 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -650,7 +650,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
case DISCARD_COMPLETED_NOTSUPP:
case DISCARD_COMPLETED_WITH_ERROR:
/* I'd rather not detach from local disk just because it
- * failed a REQ_DISCARD. */
+ * failed a REQ_OP_DISCARD. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index b8f77e83d456..99255d0c9e2f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -152,7 +152,7 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l
do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee);
- /* FIXME do we want to detach for failed REQ_DISCARD?
+ /* FIXME do we want to detach for failed REQ_OP_DISCARD?
* ((peer_req->flags & (EE_WAS_ERROR|EE_IS_TRIM)) == EE_WAS_ERROR) */
if (peer_req->flags & EE_WAS_ERROR)
__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
@@ -295,60 +295,61 @@ void drbd_request_endio(struct bio *bio)
complete_master_bio(device, &m);
}
-void drbd_csum_ee(struct crypto_ahash *tfm, struct drbd_peer_request *peer_req, void *digest)
+void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req, void *digest)
{
- AHASH_REQUEST_ON_STACK(req, tfm);
- struct scatterlist sg;
+ SHASH_DESC_ON_STACK(desc, tfm);
struct page *page = peer_req->pages;
struct page *tmp;
unsigned len;
+ void *src;
- ahash_request_set_tfm(req, tfm);
- ahash_request_set_callback(req, 0, NULL, NULL);
+ desc->tfm = tfm;
+ desc->flags = 0;
- sg_init_table(&sg, 1);
- crypto_ahash_init(req);
+ crypto_shash_init(desc);
+ src = kmap_atomic(page);
while ((tmp = page_chain_next(page))) {
/* all but the last page will be fully used */
- sg_set_page(&sg, page, PAGE_SIZE, 0);
- ahash_request_set_crypt(req, &sg, NULL, sg.length);
- crypto_ahash_update(req);
+ crypto_shash_update(desc, src, PAGE_SIZE);
+ kunmap_atomic(src);
page = tmp;
+ src = kmap_atomic(page);
}
/* and now the last, possibly only partially used page */
len = peer_req->i.size & (PAGE_SIZE - 1);
- sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
- ahash_request_set_crypt(req, &sg, digest, sg.length);
- crypto_ahash_finup(req);
- ahash_request_zero(req);
+ crypto_shash_update(desc, src, len ?: PAGE_SIZE);
+ kunmap_atomic(src);
+
+ crypto_shash_final(desc, digest);
+ shash_desc_zero(desc);
}
-void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
+void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
{
- AHASH_REQUEST_ON_STACK(req, tfm);
- struct scatterlist sg;
+ SHASH_DESC_ON_STACK(desc, tfm);
struct bio_vec bvec;
struct bvec_iter iter;
- ahash_request_set_tfm(req, tfm);
- ahash_request_set_callback(req, 0, NULL, NULL);
+ desc->tfm = tfm;
+ desc->flags = 0;
- sg_init_table(&sg, 1);
- crypto_ahash_init(req);
+ crypto_shash_init(desc);
bio_for_each_segment(bvec, bio, iter) {
- sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
- ahash_request_set_crypt(req, &sg, NULL, sg.length);
- crypto_ahash_update(req);
+ u8 *src;
+
+ src = kmap_atomic(bvec.bv_page);
+ crypto_shash_update(desc, src + bvec.bv_offset, bvec.bv_len);
+ kunmap_atomic(src);
+
/* REQ_OP_WRITE_SAME has only one segment,
* checksum the payload only once. */
if (bio_op(bio) == REQ_OP_WRITE_SAME)
break;
}
- ahash_request_set_crypt(req, NULL, digest, 0);
- crypto_ahash_final(req);
- ahash_request_zero(req);
+ crypto_shash_final(desc, digest);
+ shash_desc_zero(desc);
}
/* MAYBE merge common code with w_e_end_ov_req */
@@ -367,7 +368,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out;
- digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
sector_t sector = peer_req->i.sector;
@@ -1205,7 +1206,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
* a real fix would be much more involved,
* introducing more locking mechanisms */
if (peer_device->connection->csums_tfm) {
- digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
D_ASSERT(device, digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO);
}
@@ -1255,7 +1256,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
if (unlikely(cancel))
goto out;
- digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
err = 1; /* terminate the connection in case the allocation failed */
@@ -1327,7 +1328,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
di = peer_req->digest;
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 48f622728ce6..a8cfa011c284 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -252,13 +252,13 @@ static int allowed_drive_mask = 0x33;
static int irqdma_allocated;
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/cdrom.h> /* for the compatibility eject ioctl */
#include <linux/completion.h>
+static LIST_HEAD(floppy_reqs);
static struct request *current_req;
-static void do_fd_request(struct request_queue *q);
static int set_next_request(void);
#ifndef fd_get_dma_residue
@@ -414,10 +414,10 @@ static struct floppy_drive_struct drive_state[N_DRIVE];
static struct floppy_write_errors write_errors[N_DRIVE];
static struct timer_list motor_off_timer[N_DRIVE];
static struct gendisk *disks[N_DRIVE];
+static struct blk_mq_tag_set tag_sets[N_DRIVE];
static struct block_device *opened_bdev[N_DRIVE];
static DEFINE_MUTEX(open_lock);
static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
-static int fdc_queue;
/*
* This struct defines the different floppy types.
@@ -2216,8 +2216,9 @@ static void floppy_end_request(struct request *req, blk_status_t error)
/* current_count_sectors can be zero if transfer failed */
if (error)
nr_sectors = blk_rq_cur_sectors(req);
- if (__blk_end_request(req, error, nr_sectors << 9))
+ if (blk_update_request(req, error, nr_sectors << 9))
return;
+ __blk_mq_end_request(req, error);
/* We're done with the request */
floppy_off(drive);
@@ -2797,27 +2798,14 @@ static int make_raw_rw_request(void)
return 2;
}
-/*
- * Round-robin between our available drives, doing one request from each
- */
static int set_next_request(void)
{
- struct request_queue *q;
- int old_pos = fdc_queue;
-
- do {
- q = disks[fdc_queue]->queue;
- if (++fdc_queue == N_DRIVE)
- fdc_queue = 0;
- if (q) {
- current_req = blk_fetch_request(q);
- if (current_req) {
- current_req->error_count = 0;
- break;
- }
- }
- } while (fdc_queue != old_pos);
-
+ current_req = list_first_entry_or_null(&floppy_reqs, struct request,
+ queuelist);
+ if (current_req) {
+ current_req->error_count = 0;
+ list_del_init(&current_req->queuelist);
+ }
return current_req != NULL;
}
@@ -2901,29 +2889,38 @@ static void process_fd_request(void)
schedule_bh(redo_fd_request);
}
-static void do_fd_request(struct request_queue *q)
+static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ blk_mq_start_request(bd->rq);
+
if (WARN(max_buffer_sectors == 0,
"VFS: %s called on non-open device\n", __func__))
- return;
+ return BLK_STS_IOERR;
if (WARN(atomic_read(&usage_count) == 0,
"warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
current_req, (long)blk_rq_pos(current_req),
(unsigned long long) current_req->cmd_flags))
- return;
+ return BLK_STS_IOERR;
+
+ spin_lock_irq(&floppy_lock);
+ list_add_tail(&bd->rq->queuelist, &floppy_reqs);
+ spin_unlock_irq(&floppy_lock);
if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
- return;
+ return BLK_STS_OK;
}
+
command_status = FD_COMMAND_NONE;
__reschedule_timeout(MAXTIMEOUT, "fd_request");
set_fdc(0);
process_fd_request();
is_alive(__func__, "");
+ return BLK_STS_OK;
}
static const struct cont_t poll_cont = {
@@ -3467,6 +3464,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
(struct floppy_struct **)&outparam);
if (ret)
return ret;
+ memcpy(&inparam.g, outparam,
+ offsetof(struct floppy_struct, name));
+ outparam = &inparam.g;
break;
case FDMSGON:
UDP->flags |= FTD_MSG;
@@ -4483,6 +4483,10 @@ static struct platform_driver floppy_driver = {
},
};
+static const struct blk_mq_ops floppy_mq_ops = {
+ .queue_rq = floppy_queue_rq,
+};
+
static struct platform_device floppy_device[N_DRIVE];
static bool floppy_available(int drive)
@@ -4530,9 +4534,12 @@ static int __init do_floppy_init(void)
goto out_put_disk;
}
- disks[drive]->queue = blk_init_queue(do_fd_request, &floppy_lock);
- if (!disks[drive]->queue) {
- err = -ENOMEM;
+ disks[drive]->queue = blk_mq_init_sq_queue(&tag_sets[drive],
+ &floppy_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(disks[drive]->queue)) {
+ err = PTR_ERR(disks[drive]->queue);
+ disks[drive]->queue = NULL;
goto out_put_disk;
}
@@ -4676,7 +4683,7 @@ static int __init do_floppy_init(void)
/* to be cleaned up... */
disks[drive]->private_data = (void *)(long)drive;
disks[drive]->flags |= GENHD_FL_REMOVABLE;
- device_add_disk(&floppy_device[drive].dev, disks[drive]);
+ device_add_disk(&floppy_device[drive].dev, disks[drive], NULL);
}
return 0;
@@ -4705,6 +4712,7 @@ out_put_disk:
del_timer_sync(&motor_off_timer[drive]);
blk_cleanup_queue(disks[drive]->queue);
disks[drive]->queue = NULL;
+ blk_mq_free_tag_set(&tag_sets[drive]);
}
put_disk(disks[drive]);
}
@@ -4932,6 +4940,7 @@ static void __exit floppy_module_exit(void)
platform_device_unregister(&floppy_device[drive]);
}
blk_cleanup_queue(disks[drive]->queue);
+ blk_mq_free_tag_set(&tag_sets[drive]);
/*
* These disks have not called add_disk(). Don't put down
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ea9debf59b22..abad6d15f956 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -77,6 +77,7 @@
#include <linux/falloc.h>
#include <linux/uio.h>
#include <linux/ioprio.h>
+#include <linux/blk-cgroup.h>
#include "loop.h"
@@ -1760,8 +1761,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* always use the first bio's css */
#ifdef CONFIG_BLK_CGROUP
- if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
- cmd->css = rq->bio->bi_css;
+ if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
+ cmd->css = &bio_blkcg(rq->bio)->css;
css_get(cmd->css);
} else
#endif
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index d0666f5ce003..dfc8de6ce525 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1862,11 +1862,9 @@ static int exec_drive_taskfile(struct driver_data *dd,
if (IS_ERR(outbuf))
return PTR_ERR(outbuf);
- outbuf_dma = pci_map_single(dd->pdev,
- outbuf,
- taskout,
- DMA_TO_DEVICE);
- if (pci_dma_mapping_error(dd->pdev, outbuf_dma)) {
+ outbuf_dma = dma_map_single(&dd->pdev->dev, outbuf,
+ taskout, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pdev->dev, outbuf_dma)) {
err = -ENOMEM;
goto abort;
}
@@ -1880,10 +1878,9 @@ static int exec_drive_taskfile(struct driver_data *dd,
inbuf = NULL;
goto abort;
}
- inbuf_dma = pci_map_single(dd->pdev,
- inbuf,
- taskin, DMA_FROM_DEVICE);
- if (pci_dma_mapping_error(dd->pdev, inbuf_dma)) {
+ inbuf_dma = dma_map_single(&dd->pdev->dev, inbuf,
+ taskin, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&dd->pdev->dev, inbuf_dma)) {
err = -ENOMEM;
goto abort;
}
@@ -2002,11 +1999,11 @@ static int exec_drive_taskfile(struct driver_data *dd,
/* reclaim the DMA buffers.*/
if (inbuf_dma)
- pci_unmap_single(dd->pdev, inbuf_dma,
- taskin, DMA_FROM_DEVICE);
+ dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
+ DMA_FROM_DEVICE);
if (outbuf_dma)
- pci_unmap_single(dd->pdev, outbuf_dma,
- taskout, DMA_TO_DEVICE);
+ dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
+ DMA_TO_DEVICE);
inbuf_dma = 0;
outbuf_dma = 0;
@@ -2053,11 +2050,11 @@ static int exec_drive_taskfile(struct driver_data *dd,
}
abort:
if (inbuf_dma)
- pci_unmap_single(dd->pdev, inbuf_dma,
- taskin, DMA_FROM_DEVICE);
+ dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
+ DMA_FROM_DEVICE);
if (outbuf_dma)
- pci_unmap_single(dd->pdev, outbuf_dma,
- taskout, DMA_TO_DEVICE);
+ dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
+ DMA_TO_DEVICE);
kfree(outbuf);
kfree(inbuf);
@@ -3861,7 +3858,7 @@ skip_create_disk:
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
- device_add_disk(&dd->pdev->dev, dd->disk);
+ device_add_disk(&dd->pdev->dev, dd->disk, NULL);
dd->bdev = bdget_disk(dd->disk, 0);
/*
@@ -4216,18 +4213,10 @@ static int mtip_pci_probe(struct pci_dev *pdev,
goto iomap_err;
}
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-
- if (rv) {
- rv = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
- if (rv) {
- dev_warn(&pdev->dev,
- "64-bit DMA enable failed\n");
- goto setmask_err;
- }
- }
+ rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rv) {
+ dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
+ goto setmask_err;
}
/* Copy the info we may need later into the private data structure. */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 093b614d6524..e94591021682 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -606,20 +606,12 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
- struct request_queue *q = NULL;
int queue_mode = cmd->nq->dev->queue_mode;
- if (cmd->rq)
- q = cmd->rq->q;
-
switch (queue_mode) {
case NULL_Q_MQ:
blk_mq_end_request(cmd->rq, cmd->error);
return;
- case NULL_Q_RQ:
- INIT_LIST_HEAD(&cmd->rq->queuelist);
- blk_end_request_all(cmd->rq, cmd->error);
- break;
case NULL_Q_BIO:
cmd->bio->bi_status = cmd->error;
bio_endio(cmd->bio);
@@ -627,15 +619,6 @@ static void end_cmd(struct nullb_cmd *cmd)
}
free_cmd(cmd);
-
- /* Restart queue if needed, as we are freeing a tag */
- if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue_async(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
@@ -1136,25 +1119,14 @@ static void null_stop_queue(struct nullb *nullb)
if (nullb->dev->queue_mode == NULL_Q_MQ)
blk_mq_stop_hw_queues(q);
- else {
- spin_lock_irq(q->queue_lock);
- blk_stop_queue(q);
- spin_unlock_irq(q->queue_lock);
- }
}
static void null_restart_queue_async(struct nullb *nullb)
{
struct request_queue *q = nullb->q;
- unsigned long flags;
if (nullb->dev->queue_mode == NULL_Q_MQ)
blk_mq_start_stopped_hw_queues(q, true);
- else {
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue_async(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
}
static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
@@ -1197,17 +1169,8 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
/* race with timer */
if (atomic_long_read(&nullb->cur_bytes) > 0)
null_restart_queue_async(nullb);
- if (dev->queue_mode == NULL_Q_RQ) {
- struct request_queue *q = nullb->q;
-
- spin_lock_irq(q->queue_lock);
- rq->rq_flags |= RQF_DONTPREP;
- blk_requeue_request(q, rq);
- spin_unlock_irq(q->queue_lock);
- return BLK_STS_OK;
- } else
- /* requeue request */
- return BLK_STS_DEV_RESOURCE;
+ /* requeue request */
+ return BLK_STS_DEV_RESOURCE;
}
}
@@ -1278,9 +1241,6 @@ out:
case NULL_Q_MQ:
blk_mq_complete_request(cmd->rq);
break;
- case NULL_Q_RQ:
- blk_complete_request(cmd->rq);
- break;
case NULL_Q_BIO:
/*
* XXX: no proper submitting cpu information available.
@@ -1349,30 +1309,6 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
-static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
-{
- pr_info("null: rq %p timed out\n", rq);
- __blk_complete_request(rq);
- return BLK_EH_DONE;
-}
-
-static int null_rq_prep_fn(struct request_queue *q, struct request *req)
-{
- struct nullb *nullb = q->queuedata;
- struct nullb_queue *nq = nullb_to_queue(nullb);
- struct nullb_cmd *cmd;
-
- cmd = alloc_cmd(nq, 0);
- if (cmd) {
- cmd->rq = req;
- req->special = cmd;
- return BLKPREP_OK;
- }
- blk_stop_queue(q);
-
- return BLKPREP_DEFER;
-}
-
static bool should_timeout_request(struct request *rq)
{
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
@@ -1391,27 +1327,6 @@ static bool should_requeue_request(struct request *rq)
return false;
}
-static void null_request_fn(struct request_queue *q)
-{
- struct request *rq;
-
- while ((rq = blk_fetch_request(q)) != NULL) {
- struct nullb_cmd *cmd = rq->special;
-
- /* just ignore the request */
- if (should_timeout_request(rq))
- continue;
- if (should_requeue_request(rq)) {
- blk_requeue_request(q, rq);
- continue;
- }
-
- spin_unlock_irq(q->queue_lock);
- null_handle_cmd(cmd);
- spin_lock_irq(q->queue_lock);
- }
-}
-
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
pr_info("null: rq %p timed out\n", rq);
@@ -1766,24 +1681,6 @@ static int null_add_dev(struct nullb_device *dev)
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
- } else {
- nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
- dev->home_node);
- if (!nullb->q) {
- rv = -ENOMEM;
- goto out_cleanup_queues;
- }
-
- if (!null_setup_fault())
- goto out_cleanup_blk_queue;
-
- blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
- blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
- blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn);
- nullb->q->rq_timeout = 5 * HZ;
- rv = init_driver_queues(nullb);
- if (rv)
- goto out_cleanup_blk_queue;
}
if (dev->mbps) {
@@ -1865,6 +1762,10 @@ static int __init null_init(void)
return -EINVAL;
}
+ if (g_queue_mode == NULL_Q_RQ) {
+ pr_err("null_blk: legacy IO path no longer available\n");
+ return -EINVAL;
+ }
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.\n",
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index a026211afb51..96670eefaeb2 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -137,7 +137,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
#include <linux/delay.h>
#include <linux/cdrom.h>
#include <linux/spinlock.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
@@ -186,7 +186,8 @@ static int pcd_packet(struct cdrom_device_info *cdi,
static int pcd_detect(void);
static void pcd_probe_capabilities(void);
static void do_pcd_read_drq(void);
-static void do_pcd_request(struct request_queue * q);
+static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd);
static void do_pcd_read(void);
struct pcd_unit {
@@ -199,6 +200,8 @@ struct pcd_unit {
char *name; /* pcd0, pcd1, etc */
struct cdrom_device_info info; /* uniform cdrom interface */
struct gendisk *disk;
+ struct blk_mq_tag_set tag_set;
+ struct list_head rq_list;
};
static struct pcd_unit pcd[PCD_UNITS];
@@ -292,6 +295,10 @@ static const struct cdrom_device_ops pcd_dops = {
CDC_CD_RW,
};
+static const struct blk_mq_ops pcd_mq_ops = {
+ .queue_rq = pcd_queue_rq,
+};
+
static void pcd_init_units(void)
{
struct pcd_unit *cd;
@@ -300,13 +307,19 @@ static void pcd_init_units(void)
pcd_drive_count = 0;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
struct gendisk *disk = alloc_disk(1);
+
if (!disk)
continue;
- disk->queue = blk_init_queue(do_pcd_request, &pcd_lock);
- if (!disk->queue) {
- put_disk(disk);
+
+ disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
+ 1, BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(disk->queue)) {
+ disk->queue = NULL;
continue;
}
+
+ INIT_LIST_HEAD(&cd->rq_list);
+ disk->queue->queuedata = cd;
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
cd->disk = disk;
cd->pi = &cd->pia;
@@ -748,18 +761,18 @@ static int pcd_queue;
static int set_next_request(void)
{
struct pcd_unit *cd;
- struct request_queue *q;
int old_pos = pcd_queue;
do {
cd = &pcd[pcd_queue];
- q = cd->present ? cd->disk->queue : NULL;
if (++pcd_queue == PCD_UNITS)
pcd_queue = 0;
- if (q) {
- pcd_req = blk_fetch_request(q);
- if (pcd_req)
- break;
+ if (cd->present && !list_empty(&cd->rq_list)) {
+ pcd_req = list_first_entry(&cd->rq_list, struct request,
+ queuelist);
+ list_del_init(&pcd_req->queuelist);
+ blk_mq_start_request(pcd_req);
+ break;
}
} while (pcd_queue != old_pos);
@@ -768,33 +781,41 @@ static int set_next_request(void)
static void pcd_request(void)
{
+ struct pcd_unit *cd;
+
if (pcd_busy)
return;
- while (1) {
- if (!pcd_req && !set_next_request())
- return;
- if (rq_data_dir(pcd_req) == READ) {
- struct pcd_unit *cd = pcd_req->rq_disk->private_data;
- if (cd != pcd_current)
- pcd_bufblk = -1;
- pcd_current = cd;
- pcd_sector = blk_rq_pos(pcd_req);
- pcd_count = blk_rq_cur_sectors(pcd_req);
- pcd_buf = bio_data(pcd_req->bio);
- pcd_busy = 1;
- ps_set_intr(do_pcd_read, NULL, 0, nice);
- return;
- } else {
- __blk_end_request_all(pcd_req, BLK_STS_IOERR);
- pcd_req = NULL;
- }
- }
+ if (!pcd_req && !set_next_request())
+ return;
+
+ cd = pcd_req->rq_disk->private_data;
+ if (cd != pcd_current)
+ pcd_bufblk = -1;
+ pcd_current = cd;
+ pcd_sector = blk_rq_pos(pcd_req);
+ pcd_count = blk_rq_cur_sectors(pcd_req);
+ pcd_buf = bio_data(pcd_req->bio);
+ pcd_busy = 1;
+ ps_set_intr(do_pcd_read, NULL, 0, nice);
}
-static void do_pcd_request(struct request_queue *q)
+static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct pcd_unit *cd = hctx->queue->queuedata;
+
+ if (rq_data_dir(bd->rq) != READ) {
+ blk_mq_start_request(bd->rq);
+ return BLK_STS_IOERR;
+ }
+
+ spin_lock_irq(&pcd_lock);
+ list_add_tail(&bd->rq->queuelist, &cd->rq_list);
pcd_request();
+ spin_unlock_irq(&pcd_lock);
+
+ return BLK_STS_OK;
}
static inline void next_request(blk_status_t err)
@@ -802,8 +823,10 @@ static inline void next_request(blk_status_t err)
unsigned long saved_flags;
spin_lock_irqsave(&pcd_lock, saved_flags);
- if (!__blk_end_request_cur(pcd_req, err))
+ if (!blk_update_request(pcd_req, err, blk_rq_cur_bytes(pcd_req))) {
+ __blk_mq_end_request(pcd_req, err);
pcd_req = NULL;
+ }
pcd_busy = 0;
pcd_request();
spin_unlock_irqrestore(&pcd_lock, saved_flags);
@@ -1011,6 +1034,7 @@ static void __exit pcd_exit(void)
unregister_cdrom(&cd->info);
}
blk_cleanup_queue(cd->disk->queue);
+ blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
}
unregister_blkdev(major, name);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 7cf947586fe4..ae4971e5d9a8 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -151,7 +151,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
#include <linux/delay.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h> /* for the eject ioctl */
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
@@ -236,6 +236,8 @@ struct pd_unit {
int alt_geom;
char name[PD_NAMELEN]; /* pda, pdb, etc ... */
struct gendisk *gd;
+ struct blk_mq_tag_set tag_set;
+ struct list_head rq_list;
};
static struct pd_unit pd[PD_UNITS];
@@ -399,9 +401,17 @@ static int set_next_request(void)
if (++pd_queue == PD_UNITS)
pd_queue = 0;
if (q) {
- pd_req = blk_fetch_request(q);
- if (pd_req)
- break;
+ struct pd_unit *disk = q->queuedata;
+
+ if (list_empty(&disk->rq_list))
+ continue;
+
+ pd_req = list_first_entry(&disk->rq_list,
+ struct request,
+ queuelist);
+ list_del_init(&pd_req->queuelist);
+ blk_mq_start_request(pd_req);
+ break;
}
} while (pd_queue != old_pos);
@@ -412,7 +422,6 @@ static void run_fsm(void)
{
while (1) {
enum action res;
- unsigned long saved_flags;
int stop = 0;
if (!phase) {
@@ -433,19 +442,24 @@ static void run_fsm(void)
}
switch(res = phase()) {
- case Ok: case Fail:
+ case Ok: case Fail: {
+ blk_status_t err;
+
+ err = res == Ok ? 0 : BLK_STS_IOERR;
pi_disconnect(pi_current);
pd_claimed = 0;
phase = NULL;
- spin_lock_irqsave(&pd_lock, saved_flags);
- if (!__blk_end_request_cur(pd_req,
- res == Ok ? 0 : BLK_STS_IOERR)) {
- if (!set_next_request())
- stop = 1;
+ spin_lock_irq(&pd_lock);
+ if (!blk_update_request(pd_req, err,
+ blk_rq_cur_bytes(pd_req))) {
+ __blk_mq_end_request(pd_req, err);
+ pd_req = NULL;
+ stop = !set_next_request();
}
- spin_unlock_irqrestore(&pd_lock, saved_flags);
+ spin_unlock_irq(&pd_lock);
if (stop)
return;
+ }
/* fall through */
case Hold:
schedule_fsm();
@@ -505,11 +519,17 @@ static int pd_next_buf(void)
if (pd_count)
return 0;
spin_lock_irqsave(&pd_lock, saved_flags);
- __blk_end_request_cur(pd_req, 0);
- pd_count = blk_rq_cur_sectors(pd_req);
- pd_buf = bio_data(pd_req->bio);
+ if (!blk_update_request(pd_req, 0, blk_rq_cur_bytes(pd_req))) {
+ __blk_mq_end_request(pd_req, 0);
+ pd_req = NULL;
+ pd_count = 0;
+ pd_buf = NULL;
+ } else {
+ pd_count = blk_rq_cur_sectors(pd_req);
+ pd_buf = bio_data(pd_req->bio);
+ }
spin_unlock_irqrestore(&pd_lock, saved_flags);
- return 0;
+ return !pd_count;
}
static unsigned long pd_timeout;
@@ -726,15 +746,21 @@ static enum action pd_identify(struct pd_unit *disk)
/* end of io request engine */
-static void do_pd_request(struct request_queue * q)
+static blk_status_t pd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- if (pd_req)
- return;
- pd_req = blk_fetch_request(q);
- if (!pd_req)
- return;
+ struct pd_unit *disk = hctx->queue->queuedata;
+
+ spin_lock_irq(&pd_lock);
+ if (!pd_req) {
+ pd_req = bd->rq;
+ blk_mq_start_request(pd_req);
+ } else
+ list_add_tail(&bd->rq->queuelist, &disk->rq_list);
+ spin_unlock_irq(&pd_lock);
- schedule_fsm();
+ run_fsm();
+ return BLK_STS_OK;
}
static int pd_special_command(struct pd_unit *disk,
@@ -847,23 +873,33 @@ static const struct block_device_operations pd_fops = {
/* probing */
+static const struct blk_mq_ops pd_mq_ops = {
+ .queue_rq = pd_queue_rq,
+};
+
static void pd_probe_drive(struct pd_unit *disk)
{
- struct gendisk *p = alloc_disk(1 << PD_BITS);
+ struct gendisk *p;
+
+ p = alloc_disk(1 << PD_BITS);
if (!p)
return;
+
strcpy(p->disk_name, disk->name);
p->fops = &pd_fops;
p->major = major;
p->first_minor = (disk - pd) << PD_BITS;
disk->gd = p;
p->private_data = disk;
- p->queue = blk_init_queue(do_pd_request, &pd_lock);
- if (!p->queue) {
- disk->gd = NULL;
- put_disk(p);
+
+ p->queue = blk_mq_init_sq_queue(&disk->tag_set, &pd_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ if (IS_ERR(p->queue)) {
+ p->queue = NULL;
return;
}
+
+ p->queue->queuedata = disk;
blk_queue_max_hw_sectors(p->queue, cluster);
blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
@@ -895,6 +931,7 @@ static int pd_detect(void)
disk->standby = parm[D_SBY];
if (parm[D_PRT])
pd_drive_count++;
+ INIT_LIST_HEAD(&disk->rq_list);
}
par_drv = pi_register_driver(name);
@@ -972,6 +1009,7 @@ static void __exit pd_exit(void)
disk->gd = NULL;
del_gendisk(p);
blk_cleanup_queue(p->queue);
+ blk_mq_free_tag_set(&disk->tag_set);
put_disk(p);
pi_release(disk->pi);
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index eef7a91f667d..e92e7a8eeeb2 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -152,7 +152,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/spinlock.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
@@ -206,7 +206,8 @@ module_param_array(drive3, int, NULL, 0);
#define ATAPI_WRITE_10 0x2a
static int pf_open(struct block_device *bdev, fmode_t mode);
-static void do_pf_request(struct request_queue * q);
+static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd);
static int pf_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -238,6 +239,8 @@ struct pf_unit {
int present; /* device present ? */
char name[PF_NAMELEN]; /* pf0, pf1, ... */
struct gendisk *disk;
+ struct blk_mq_tag_set tag_set;
+ struct list_head rq_list;
};
static struct pf_unit units[PF_UNITS];
@@ -277,6 +280,10 @@ static const struct block_device_operations pf_fops = {
.check_events = pf_check_events,
};
+static const struct blk_mq_ops pf_mq_ops = {
+ .queue_rq = pf_queue_rq,
+};
+
static void __init pf_init_units(void)
{
struct pf_unit *pf;
@@ -284,14 +291,22 @@ static void __init pf_init_units(void)
pf_drive_count = 0;
for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
- struct gendisk *disk = alloc_disk(1);
+ struct gendisk *disk;
+
+ disk = alloc_disk(1);
if (!disk)
continue;
- disk->queue = blk_init_queue(do_pf_request, &pf_spin_lock);
- if (!disk->queue) {
+
+ disk->queue = blk_mq_init_sq_queue(&pf->tag_set, &pf_mq_ops,
+ 1, BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(disk->queue)) {
put_disk(disk);
- return;
+ disk->queue = NULL;
+ continue;
}
+
+ INIT_LIST_HEAD(&pf->rq_list);
+ disk->queue->queuedata = pf;
blk_queue_max_segments(disk->queue, cluster);
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
pf->disk = disk;
@@ -784,18 +799,18 @@ static int pf_queue;
static int set_next_request(void)
{
struct pf_unit *pf;
- struct request_queue *q;
int old_pos = pf_queue;
do {
pf = &units[pf_queue];
- q = pf->present ? pf->disk->queue : NULL;
if (++pf_queue == PF_UNITS)
pf_queue = 0;
- if (q) {
- pf_req = blk_fetch_request(q);
- if (pf_req)
- break;
+ if (pf->present && !list_empty(&pf->rq_list)) {
+ pf_req = list_first_entry(&pf->rq_list, struct request,
+ queuelist);
+ list_del_init(&pf_req->queuelist);
+ blk_mq_start_request(pf_req);
+ break;
}
} while (pf_queue != old_pos);
@@ -804,8 +819,12 @@ static int set_next_request(void)
static void pf_end_request(blk_status_t err)
{
- if (pf_req && !__blk_end_request_cur(pf_req, err))
+ if (!pf_req)
+ return;
+ if (!blk_update_request(pf_req, err, blk_rq_cur_bytes(pf_req))) {
+ __blk_mq_end_request(pf_req, err);
pf_req = NULL;
+ }
}
static void pf_request(void)
@@ -842,9 +861,17 @@ repeat:
}
}
-static void do_pf_request(struct request_queue *q)
+static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct pf_unit *pf = hctx->queue->queuedata;
+
+ spin_lock_irq(&pf_spin_lock);
+ list_add_tail(&bd->rq->queuelist, &pf->rq_list);
pf_request();
+ spin_unlock_irq(&pf_spin_lock);
+
+ return BLK_STS_OK;
}
static int pf_next_buf(void)
@@ -1024,6 +1051,7 @@ static void __exit pf_exit(void)
continue;
del_gendisk(pf->disk);
blk_cleanup_queue(pf->disk->queue);
+ blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
pi_release(pf->pi);
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 6f1d25c1eb64..9381f4e3b221 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2645,7 +2645,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
*/
if (pd->refcnt == 1)
pkt_lock_door(pd, 0);
- /* fallthru */
+ /* fall through */
/*
* forward selected CDROM ioctls to CD-ROM, for UDF
*/
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index afe1508d82c6..4e1d9b31f60c 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -19,7 +19,7 @@
*/
#include <linux/ata.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -42,6 +42,7 @@
struct ps3disk_private {
spinlock_t lock; /* Request queue spinlock */
struct request_queue *queue;
+ struct blk_mq_tag_set tag_set;
struct gendisk *gendisk;
unsigned int blocking_factor;
struct request *req;
@@ -118,8 +119,8 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
}
}
-static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
- struct request *req)
+static blk_status_t ps3disk_submit_request_sg(struct ps3_storage_device *dev,
+ struct request *req)
{
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
int write = rq_data_dir(req), res;
@@ -158,16 +159,15 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
__LINE__, op, res);
- __blk_end_request_all(req, BLK_STS_IOERR);
- return 0;
+ return BLK_STS_IOERR;
}
priv->req = req;
- return 1;
+ return BLK_STS_OK;
}
-static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
- struct request *req)
+static blk_status_t ps3disk_submit_flush_request(struct ps3_storage_device *dev,
+ struct request *req)
{
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
u64 res;
@@ -180,50 +180,45 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
- __blk_end_request_all(req, BLK_STS_IOERR);
- return 0;
+ return BLK_STS_IOERR;
}
priv->req = req;
- return 1;
+ return BLK_STS_OK;
}
-static void ps3disk_do_request(struct ps3_storage_device *dev,
- struct request_queue *q)
+static blk_status_t ps3disk_do_request(struct ps3_storage_device *dev,
+ struct request *req)
{
- struct request *req;
-
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
- while ((req = blk_fetch_request(q))) {
- switch (req_op(req)) {
- case REQ_OP_FLUSH:
- if (ps3disk_submit_flush_request(dev, req))
- return;
- break;
- case REQ_OP_READ:
- case REQ_OP_WRITE:
- if (ps3disk_submit_request_sg(dev, req))
- return;
- break;
- default:
- blk_dump_rq_flags(req, DEVICE_NAME " bad request");
- __blk_end_request_all(req, BLK_STS_IOERR);
- }
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
+ return ps3disk_submit_flush_request(dev, req);
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ return ps3disk_submit_request_sg(dev, req);
+ default:
+ blk_dump_rq_flags(req, DEVICE_NAME " bad request");
+ return BLK_STS_IOERR;
}
}
-static void ps3disk_request(struct request_queue *q)
+static blk_status_t ps3disk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct request_queue *q = hctx->queue;
struct ps3_storage_device *dev = q->queuedata;
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ blk_status_t ret;
- if (priv->req) {
- dev_dbg(&dev->sbd.core, "%s:%u busy\n", __func__, __LINE__);
- return;
- }
+ blk_mq_start_request(bd->rq);
+
+ spin_lock_irq(&priv->lock);
+ ret = ps3disk_do_request(dev, bd->rq);
+ spin_unlock_irq(&priv->lock);
- ps3disk_do_request(dev, q);
+ return ret;
}
static irqreturn_t ps3disk_interrupt(int irq, void *data)
@@ -280,11 +275,11 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
}
spin_lock(&priv->lock);
- __blk_end_request_all(req, error);
priv->req = NULL;
- ps3disk_do_request(dev, priv->queue);
+ blk_mq_end_request(req, error);
spin_unlock(&priv->lock);
+ blk_mq_run_hw_queues(priv->queue, true);
return IRQ_HANDLED;
}
@@ -404,6 +399,10 @@ static unsigned long ps3disk_mask;
static DEFINE_MUTEX(ps3disk_mask_mutex);
+static const struct blk_mq_ops ps3disk_mq_ops = {
+ .queue_rq = ps3disk_queue_rq,
+};
+
static int ps3disk_probe(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
@@ -454,11 +453,12 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
ps3disk_identify(dev);
- queue = blk_init_queue(ps3disk_request, &priv->lock);
- if (!queue) {
- dev_err(&dev->sbd.core, "%s:%u: blk_init_queue failed\n",
+ queue = blk_mq_init_sq_queue(&priv->tag_set, &ps3disk_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(queue)) {
+ dev_err(&dev->sbd.core, "%s:%u: blk_mq_init_queue failed\n",
__func__, __LINE__);
- error = -ENOMEM;
+ error = PTR_ERR(queue);
goto fail_teardown;
}
@@ -500,11 +500,12 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
get_capacity(gendisk) >> 11);
- device_add_disk(&dev->sbd.core, gendisk);
+ device_add_disk(&dev->sbd.core, gendisk, NULL);
return 0;
fail_cleanup_queue:
blk_cleanup_queue(queue);
+ blk_mq_free_tag_set(&priv->tag_set);
fail_teardown:
ps3stor_teardown(dev);
fail_free_bounce:
@@ -530,6 +531,7 @@ static int ps3disk_remove(struct ps3_system_bus_device *_dev)
mutex_unlock(&ps3disk_mask_mutex);
del_gendisk(priv->gendisk);
blk_cleanup_queue(priv->queue);
+ blk_mq_free_tag_set(&priv->tag_set);
put_disk(priv->gendisk);
dev_notice(&dev->sbd.core, "Synchronizing disk cache\n");
ps3disk_sync_cache(dev);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 1e3d5de9d838..c0c50816a10b 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -769,7 +769,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
- device_add_disk(&dev->core, gendisk);
+ device_add_disk(&dev->core, gendisk, NULL);
return 0;
fail_cleanup_queue:
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index f2c631ce793c..0cf4509d575c 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -780,9 +780,9 @@ static int rsxx_pci_probe(struct pci_dev *dev,
goto failed_enable;
pci_set_master(dev);
- pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
+ dma_set_max_seg_size(&dev->dev, RSXX_HW_BLK_SIZE);
- st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
+ st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
if (st) {
dev_err(CARD_TO_DEV(card),
"No usable DMA configuration,aborting\n");
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
index c148e83e4ed7..d9a8758682c9 100644
--- a/drivers/block/rsxx/cregs.c
+++ b/drivers/block/rsxx/cregs.c
@@ -276,7 +276,7 @@ static void creg_cmd_done(struct work_struct *work)
st = -EIO;
}
- if ((cmd->op == CREG_OP_READ)) {
+ if (cmd->op == CREG_OP_READ) {
unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
/* Paranoid Sanity Checks */
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 1a92f9e65937..3894aa0f350b 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -226,7 +226,7 @@ int rsxx_attach_dev(struct rsxx_cardinfo *card)
set_capacity(card->gendisk, card->size8 >> 9);
else
set_capacity(card->gendisk, 0);
- device_add_disk(CARD_TO_DEV(card), card->gendisk);
+ device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
card->bdev_attached = 1;
}
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 8fbc1bf6db3d..af9cf0215164 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -224,12 +224,12 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
{
if (dma->cmd != HW_CMD_BLK_DISCARD) {
- if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
- pci_unmap_page(ctrl->card->dev, dma->dma_addr,
+ if (!dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
+ dma_unmap_page(&ctrl->card->dev->dev, dma->dma_addr,
get_dma_size(dma),
dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
+ DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
}
}
@@ -438,23 +438,23 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
if (dma->cmd != HW_CMD_BLK_DISCARD) {
if (dma->cmd == HW_CMD_BLK_WRITE)
- dir = PCI_DMA_TODEVICE;
+ dir = DMA_TO_DEVICE;
else
- dir = PCI_DMA_FROMDEVICE;
+ dir = DMA_FROM_DEVICE;
/*
- * The function pci_map_page is placed here because we
+ * The function dma_map_page is placed here because we
* can only, by design, issue up to 255 commands to the
* hardware at one time per DMA channel. So the maximum
* amount of mapped memory would be 255 * 4 channels *
* 4096 Bytes which is less than 2GB, the limit of a x8
- * Non-HWWD PCIe slot. This way the pci_map_page
+ * Non-HWWD PCIe slot. This way the dma_map_page
* function should never fail because of a lack of
* mappable memory.
*/
- dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
+ dma->dma_addr = dma_map_page(&ctrl->card->dev->dev, dma->page,
dma->pg_off, dma->sub_page.cnt << 9, dir);
- if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
+ if (dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
push_tracker(ctrl->trackers, tag);
rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
continue;
@@ -776,10 +776,10 @@ bvec_err:
/*----------------- DMA Engine Initialization & Setup -------------------*/
int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
{
- ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
- &ctrl->status.dma_addr);
- ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
- &ctrl->cmd.dma_addr);
+ ctrl->status.buf = dma_alloc_coherent(&dev->dev, STATUS_BUFFER_SIZE8,
+ &ctrl->status.dma_addr, GFP_KERNEL);
+ ctrl->cmd.buf = dma_alloc_coherent(&dev->dev, COMMAND_BUFFER_SIZE8,
+ &ctrl->cmd.dma_addr, GFP_KERNEL);
if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
return -ENOMEM;
@@ -962,12 +962,12 @@ failed_dma_setup:
vfree(ctrl->trackers);
if (ctrl->status.buf)
- pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
- ctrl->status.buf,
- ctrl->status.dma_addr);
+ dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
+ ctrl->status.buf,
+ ctrl->status.dma_addr);
if (ctrl->cmd.buf)
- pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
- ctrl->cmd.buf, ctrl->cmd.dma_addr);
+ dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
+ ctrl->cmd.buf, ctrl->cmd.dma_addr);
}
return st;
@@ -1023,10 +1023,10 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
vfree(ctrl->trackers);
- pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
- ctrl->status.buf, ctrl->status.dma_addr);
- pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
- ctrl->cmd.buf, ctrl->cmd.dma_addr);
+ dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
+ ctrl->status.buf, ctrl->status.dma_addr);
+ dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
+ ctrl->cmd.buf, ctrl->cmd.dma_addr);
}
}
@@ -1059,11 +1059,11 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
card->ctrl[i].stats.reads_issued--;
if (dma->cmd != HW_CMD_BLK_DISCARD) {
- pci_unmap_page(card->dev, dma->dma_addr,
+ dma_unmap_page(&card->dev->dev, dma->dma_addr,
get_dma_size(dma),
dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
+ DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
}
list_add_tail(&dma->list, &issued_dmas[i]);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 87b9e7fbf062..7c5fc6942f32 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -632,7 +632,7 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
* Map scatterlist to PCI bus addresses.
* Note PCI might change the number of entries.
*/
- n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
+ n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir);
if (n_sg <= 0)
return false;
@@ -682,7 +682,8 @@ static void skd_postop_sg_list(struct skd_device *skdev,
skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
skreq->sksg_dma_address +
((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
- pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
+ dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg,
+ skreq->data_dir);
}
/*
@@ -1416,7 +1417,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_BUSY_IMMINENT:
skd_log_skreq(skdev, skreq, "retry(busy)");
- blk_requeue_request(skdev->queue, req);
+ blk_mq_requeue_request(req, true);
dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
skdev->timer_countdown = SKD_TIMER_MINUTES(20);
@@ -1426,7 +1427,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_REQUEUE_REQUEST:
if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
skd_log_skreq(skdev, skreq, "retry");
- blk_requeue_request(skdev->queue, req);
+ blk_mq_requeue_request(req, true);
break;
}
/* fall through */
@@ -2632,8 +2633,8 @@ static int skd_cons_skcomp(struct skd_device *skdev)
"comp pci_alloc, total bytes %zd entries %d\n",
SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
- skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
- &skdev->cq_dma_address);
+ skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
+ &skdev->cq_dma_address, GFP_KERNEL);
if (skcomp == NULL) {
rc = -ENOMEM;
@@ -2674,10 +2675,10 @@ static int skd_cons_skmsg(struct skd_device *skdev)
skmsg->id = i + SKD_ID_FIT_MSG;
- skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
- SKD_N_FITMSG_BYTES,
- &skmsg->mb_dma_address);
-
+ skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev,
+ SKD_N_FITMSG_BYTES,
+ &skmsg->mb_dma_address,
+ GFP_KERNEL);
if (skmsg->msg_buf == NULL) {
rc = -ENOMEM;
goto err_out;
@@ -2971,8 +2972,8 @@ err_out:
static void skd_free_skcomp(struct skd_device *skdev)
{
if (skdev->skcomp_table)
- pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
- skdev->skcomp_table, skdev->cq_dma_address);
+ dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
+ skdev->skcomp_table, skdev->cq_dma_address);
skdev->skcomp_table = NULL;
skdev->cq_dma_address = 0;
@@ -2991,8 +2992,8 @@ static void skd_free_skmsg(struct skd_device *skdev)
skmsg = &skdev->skmsg_table[i];
if (skmsg->msg_buf != NULL) {
- pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
- skmsg->msg_buf,
+ dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES,
+ skmsg->msg_buf,
skmsg->mb_dma_address);
}
skmsg->msg_buf = NULL;
@@ -3104,7 +3105,7 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
{
dev_dbg(&skdev->pdev->dev, "add_disk\n");
- device_add_disk(parent, skdev->disk);
+ device_add_disk(parent, skdev->disk, NULL);
return 0;
}
@@ -3172,18 +3173,12 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out;
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!rc) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- dev_err(&pdev->dev, "consistent DMA mask error %d\n",
- rc);
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "DMA mask error %d\n", rc);
- goto err_out_regions;
- }
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(&pdev->dev, "DMA mask error %d\n", rc);
+ goto err_out_regions;
}
if (!skd_major) {
@@ -3367,20 +3362,12 @@ static int skd_pci_resume(struct pci_dev *pdev)
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out;
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!rc) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
-
- dev_err(&pdev->dev, "consistent DMA mask error %d\n",
- rc);
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
-
- dev_err(&pdev->dev, "DMA mask error %d\n", rc);
- goto err_out_regions;
- }
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(&pdev->dev, "DMA mask error %d\n", rc);
+ goto err_out_regions;
}
pci_set_master(pdev);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5ca56bfae63c..b54fa6726303 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -36,6 +36,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define VDC_TX_RING_SIZE 512
#define VDC_DEFAULT_BLK_SIZE 512
+#define MAX_XFER_BLKS (128 * 1024)
+#define MAX_XFER_SIZE (MAX_XFER_BLKS / VDC_DEFAULT_BLK_SIZE)
+#define MAX_RING_COOKIES ((MAX_XFER_BLKS / PAGE_SIZE) + 2)
+
#define WAITING_FOR_LINK_UP 0x01
#define WAITING_FOR_TX_SPACE 0x02
#define WAITING_FOR_GEN_CMD 0x04
@@ -450,7 +454,7 @@ static int __send_request(struct request *req)
{
struct vdc_port *port = req->rq_disk->private_data;
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- struct scatterlist sg[port->ring_cookies];
+ struct scatterlist sg[MAX_RING_COOKIES];
struct vdc_req_entry *rqe;
struct vio_disk_desc *desc;
unsigned int map_perm;
@@ -458,6 +462,9 @@ static int __send_request(struct request *req)
u64 len;
u8 op;
+ if (WARN_ON(port->ring_cookies > MAX_RING_COOKIES))
+ return -EINVAL;
+
map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
if (rq_data_dir(req) == READ) {
@@ -850,7 +857,7 @@ static int probe_disk(struct vdc_port *port)
port->vdisk_size, (port->vdisk_size >> (20 - 9)),
port->vio.ver.major, port->vio.ver.minor);
- device_add_disk(&port->vio.vdev->dev, g);
+ device_add_disk(&port->vio.vdev->dev, g, NULL);
return 0;
}
@@ -984,9 +991,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto err_out_free_port;
port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
- port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
- port->ring_cookies = ((port->max_xfer_size *
- port->vdisk_block_size) / PAGE_SIZE) + 2;
+ port->max_xfer_size = MAX_XFER_SIZE;
+ port->ring_cookies = MAX_RING_COOKIES;
err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
if (err)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 0e31884a9519..3fa6fcc34790 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/fd.h>
#include <linux/slab.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/hdreg.h>
#include <linux/kernel.h>
@@ -190,6 +190,7 @@ struct floppy_state {
int ref_count;
struct gendisk *disk;
+ struct blk_mq_tag_set tag_set;
/* parent controller */
@@ -211,7 +212,6 @@ enum head {
struct swim_priv {
struct swim __iomem *base;
spinlock_t lock;
- int fdc_queue;
int floppy_count;
struct floppy_state unit[FD_MAX_UNIT];
};
@@ -525,58 +525,36 @@ static blk_status_t floppy_read_sectors(struct floppy_state *fs,
return 0;
}
-static struct request *swim_next_request(struct swim_priv *swd)
+static blk_status_t swim_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct request_queue *q;
- struct request *rq;
- int old_pos = swd->fdc_queue;
+ struct floppy_state *fs = hctx->queue->queuedata;
+ struct swim_priv *swd = fs->swd;
+ struct request *req = bd->rq;
+ blk_status_t err;
- do {
- q = swd->unit[swd->fdc_queue].disk->queue;
- if (++swd->fdc_queue == swd->floppy_count)
- swd->fdc_queue = 0;
- if (q) {
- rq = blk_fetch_request(q);
- if (rq)
- return rq;
- }
- } while (swd->fdc_queue != old_pos);
+ if (!spin_trylock_irq(&swd->lock))
+ return BLK_STS_DEV_RESOURCE;
- return NULL;
-}
+ blk_mq_start_request(req);
-static void do_fd_request(struct request_queue *q)
-{
- struct swim_priv *swd = q->queuedata;
- struct request *req;
- struct floppy_state *fs;
+ if (!fs->disk_in || rq_data_dir(req) == WRITE) {
+ err = BLK_STS_IOERR;
+ goto out;
+ }
- req = swim_next_request(swd);
- while (req) {
- blk_status_t err = BLK_STS_IOERR;
+ do {
+ err = floppy_read_sectors(fs, blk_rq_pos(req),
+ blk_rq_cur_sectors(req),
+ bio_data(req->bio));
+ } while (blk_update_request(req, err, blk_rq_cur_bytes(req)));
+ __blk_mq_end_request(req, err);
- fs = req->rq_disk->private_data;
- if (blk_rq_pos(req) >= fs->total_secs)
- goto done;
- if (!fs->disk_in)
- goto done;
- if (rq_data_dir(req) == WRITE && fs->write_protected)
- goto done;
+ err = BLK_STS_OK;
+out:
+ spin_unlock_irq(&swd->lock);
+ return err;
- switch (rq_data_dir(req)) {
- case WRITE:
- /* NOT IMPLEMENTED */
- break;
- case READ:
- err = floppy_read_sectors(fs, blk_rq_pos(req),
- blk_rq_cur_sectors(req),
- bio_data(req->bio));
- break;
- }
- done:
- if (!__blk_end_request_cur(req, err))
- req = swim_next_request(swd);
- }
}
static struct floppy_struct floppy_type[4] = {
@@ -823,6 +801,10 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
return 0;
}
+static const struct blk_mq_ops swim_mq_ops = {
+ .queue_rq = swim_queue_rq,
+};
+
static int swim_floppy_init(struct swim_priv *swd)
{
int err;
@@ -852,20 +834,25 @@ static int swim_floppy_init(struct swim_priv *swd)
spin_lock_init(&swd->lock);
for (drive = 0; drive < swd->floppy_count; drive++) {
+ struct request_queue *q;
+
swd->unit[drive].disk = alloc_disk(1);
if (swd->unit[drive].disk == NULL) {
err = -ENOMEM;
goto exit_put_disks;
}
- swd->unit[drive].disk->queue = blk_init_queue(do_fd_request,
- &swd->lock);
- if (!swd->unit[drive].disk->queue) {
- err = -ENOMEM;
+
+ q = blk_mq_init_sq_queue(&swd->unit[drive].tag_set, &swim_mq_ops,
+ 2, BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
goto exit_put_disks;
}
+
+ swd->unit[drive].disk->queue = q;
blk_queue_bounce_limit(swd->unit[drive].disk->queue,
BLK_BOUNCE_HIGH);
- swd->unit[drive].disk->queue->queuedata = swd;
+ swd->unit[drive].disk->queue->queuedata = &swd->unit[drive];
swd->unit[drive].swd = swd;
}
@@ -887,8 +874,18 @@ static int swim_floppy_init(struct swim_priv *swd)
exit_put_disks:
unregister_blkdev(FLOPPY_MAJOR, "fd");
- while (drive--)
- put_disk(swd->unit[drive].disk);
+ do {
+ struct gendisk *disk = swd->unit[drive].disk;
+
+ if (disk) {
+ if (disk->queue) {
+ blk_cleanup_queue(disk->queue);
+ disk->queue = NULL;
+ }
+ blk_mq_free_tag_set(&swd->unit[drive].tag_set);
+ put_disk(disk);
+ }
+ } while (drive--);
return err;
}
@@ -961,6 +958,7 @@ static int swim_remove(struct platform_device *dev)
for (drive = 0; drive < swd->floppy_count; drive++) {
del_gendisk(swd->unit[drive].disk);
blk_cleanup_queue(swd->unit[drive].disk->queue);
+ blk_mq_free_tag_set(&swd->unit[drive].tag_set);
put_disk(swd->unit[drive].disk);
}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 469541c1e51e..c1c676a33e4a 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -25,7 +25,7 @@
#include <linux/delay.h>
#include <linux/fd.h>
#include <linux/ioctl.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -206,6 +206,7 @@ struct floppy_state {
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
int index;
struct request *cur_req;
+ struct blk_mq_tag_set tag_set;
};
#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
@@ -260,16 +261,15 @@ static int floppy_revalidate(struct gendisk *disk);
static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
{
struct request *req = fs->cur_req;
- int rc;
swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
err, nr_bytes, req);
if (err)
nr_bytes = blk_rq_cur_bytes(req);
- rc = __blk_end_request(req, err, nr_bytes);
- if (rc)
+ if (blk_update_request(req, err, nr_bytes))
return true;
+ __blk_mq_end_request(req, err);
fs->cur_req = NULL;
return false;
}
@@ -309,86 +309,58 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
return (stat & DATA) == 0;
}
-static void start_request(struct floppy_state *fs)
+static blk_status_t swim3_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct request *req;
+ struct floppy_state *fs = hctx->queue->queuedata;
+ struct request *req = bd->rq;
unsigned long x;
- swim3_dbg("start request, initial state=%d\n", fs->state);
-
- if (fs->state == idle && fs->wanted) {
- fs->state = available;
- wake_up(&fs->wait);
- return;
+ spin_lock_irq(&swim3_lock);
+ if (fs->cur_req || fs->state != idle) {
+ spin_unlock_irq(&swim3_lock);
+ return BLK_STS_DEV_RESOURCE;
}
- while (fs->state == idle) {
- swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
- if (!fs->cur_req) {
- fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
- swim3_dbg(" fetched request %p\n", fs->cur_req);
- if (!fs->cur_req)
- break;
- }
- req = fs->cur_req;
-
- if (fs->mdev->media_bay &&
- check_media_bay(fs->mdev->media_bay) != MB_FD) {
- swim3_dbg("%s", " media bay absent, dropping req\n");
- swim3_end_request(fs, BLK_STS_IOERR, 0);
- continue;
- }
-
-#if 0 /* This is really too verbose */
- swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
- req->rq_disk->disk_name, req->cmd,
- (long)blk_rq_pos(req), blk_rq_sectors(req),
- bio_data(req->bio));
- swim3_dbg(" current_nr_sectors=%u\n",
- blk_rq_cur_sectors(req));
-#endif
-
- if (blk_rq_pos(req) >= fs->total_secs) {
- swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
- (long)blk_rq_pos(req), (long)fs->total_secs);
- swim3_end_request(fs, BLK_STS_IOERR, 0);
- continue;
- }
- if (fs->ejected) {
- swim3_dbg("%s", " disk ejected\n");
+ blk_mq_start_request(req);
+ fs->cur_req = req;
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD) {
+ swim3_dbg("%s", " media bay absent, dropping req\n");
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
+ goto out;
+ }
+ if (fs->ejected) {
+ swim3_dbg("%s", " disk ejected\n");
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
+ goto out;
+ }
+ if (rq_data_dir(req) == WRITE) {
+ if (fs->write_prot < 0)
+ fs->write_prot = swim3_readbit(fs, WRITE_PROT);
+ if (fs->write_prot) {
+ swim3_dbg("%s", " try to write, disk write protected\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
- continue;
+ goto out;
}
-
- if (rq_data_dir(req) == WRITE) {
- if (fs->write_prot < 0)
- fs->write_prot = swim3_readbit(fs, WRITE_PROT);
- if (fs->write_prot) {
- swim3_dbg("%s", " try to write, disk write protected\n");
- swim3_end_request(fs, BLK_STS_IOERR, 0);
- continue;
- }
- }
-
- /* Do not remove the cast. blk_rq_pos(req) is now a
- * sector_t and can be 64 bits, but it will never go
- * past 32 bits for this driver anyway, so we can
- * safely cast it down and not have to do a 64/32
- * division
- */
- fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
- x = ((long)blk_rq_pos(req)) % fs->secpercyl;
- fs->head = x / fs->secpertrack;
- fs->req_sector = x % fs->secpertrack + 1;
- fs->state = do_transfer;
- fs->retries = 0;
-
- act(fs);
}
-}
-static void do_fd_request(struct request_queue * q)
-{
- start_request(q->queuedata);
+ /*
+ * Do not remove the cast. blk_rq_pos(req) is now a sector_t and can be
+ * 64 bits, but it will never go past 32 bits for this driver anyway, so
+ * we can safely cast it down and not have to do a 64/32 division
+ */
+ fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
+ x = ((long)blk_rq_pos(req)) % fs->secpercyl;
+ fs->head = x / fs->secpertrack;
+ fs->req_sector = x % fs->secpertrack + 1;
+ fs->state = do_transfer;
+ fs->retries = 0;
+
+ act(fs);
+
+out:
+ spin_unlock_irq(&swim3_lock);
+ return BLK_STS_OK;
}
static void set_timeout(struct floppy_state *fs, int nticks,
@@ -585,7 +557,6 @@ static void scan_timeout(struct timer_list *t)
if (fs->retries > 5) {
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
- start_request(fs);
} else {
fs->state = jogging;
act(fs);
@@ -609,7 +580,6 @@ static void seek_timeout(struct timer_list *t)
swim3_err("%s", "Seek timeout\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
- start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
}
@@ -638,7 +608,6 @@ static void settle_timeout(struct timer_list *t)
swim3_err("%s", "Seek settle timeout\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
- start_request(fs);
unlock:
spin_unlock_irqrestore(&swim3_lock, flags);
}
@@ -667,7 +636,6 @@ static void xfer_timeout(struct timer_list *t)
(long)blk_rq_pos(fs->cur_req));
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
- start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
}
@@ -704,7 +672,6 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (fs->retries > 5) {
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
- start_request(fs);
} else {
fs->state = jogging;
act(fs);
@@ -796,7 +763,6 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->state, rq_data_dir(req), intr, err);
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
- start_request(fs);
break;
}
fs->retries = 0;
@@ -813,8 +779,6 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
} else
fs->state = idle;
}
- if (fs->state == idle)
- start_request(fs);
break;
default:
swim3_err("Don't know what to do in state %d\n", fs->state);
@@ -862,14 +826,19 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
static void release_drive(struct floppy_state *fs)
{
+ struct request_queue *q = disks[fs->index]->queue;
unsigned long flags;
swim3_dbg("%s", "-> release drive\n");
spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
- start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
+
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
}
static int fd_eject(struct floppy_state *fs)
@@ -1089,6 +1058,10 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
+static const struct blk_mq_ops swim3_mq_ops = {
+ .queue_rq = swim3_queue_rq,
+};
+
static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
{
struct floppy_state *fs = macio_get_drvdata(mdev);
@@ -1202,47 +1175,63 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int swim3_attach(struct macio_dev *mdev,
const struct of_device_id *match)
{
+ struct floppy_state *fs;
struct gendisk *disk;
- int index, rc;
+ int rc;
- index = floppy_count++;
- if (index >= MAX_FLOPPIES)
+ if (floppy_count >= MAX_FLOPPIES)
return -ENXIO;
- /* Add the drive */
- rc = swim3_add_device(mdev, index);
- if (rc)
- return rc;
- /* Now register that disk. Same comment about failure handling */
- disk = disks[index] = alloc_disk(1);
- if (disk == NULL)
- return -ENOMEM;
- disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
- if (disk->queue == NULL) {
- put_disk(disk);
- return -ENOMEM;
+ if (floppy_count == 0) {
+ rc = register_blkdev(FLOPPY_MAJOR, "fd");
+ if (rc)
+ return rc;
}
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
- disk->queue->queuedata = &floppy_states[index];
- if (index == 0) {
- /* If we failed, there isn't much we can do as the driver is still
- * too dumb to remove the device, just bail out
- */
- if (register_blkdev(FLOPPY_MAJOR, "fd"))
- return 0;
+ fs = &floppy_states[floppy_count];
+
+ disk = alloc_disk(1);
+ if (disk == NULL) {
+ rc = -ENOMEM;
+ goto out_unregister;
+ }
+
+ disk->queue = blk_mq_init_sq_queue(&fs->tag_set, &swim3_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(disk->queue)) {
+ rc = PTR_ERR(disk->queue);
+ disk->queue = NULL;
+ goto out_put_disk;
}
+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
+ disk->queue->queuedata = fs;
+
+ rc = swim3_add_device(mdev, floppy_count);
+ if (rc)
+ goto out_cleanup_queue;
disk->major = FLOPPY_MAJOR;
- disk->first_minor = index;
+ disk->first_minor = floppy_count;
disk->fops = &floppy_fops;
- disk->private_data = &floppy_states[index];
+ disk->private_data = fs;
disk->flags |= GENHD_FL_REMOVABLE;
- sprintf(disk->disk_name, "fd%d", index);
+ sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
add_disk(disk);
+ disks[floppy_count++] = disk;
return 0;
+
+out_cleanup_queue:
+ blk_cleanup_queue(disk->queue);
+ disk->queue = NULL;
+ blk_mq_free_tag_set(&fs->tag_set);
+out_put_disk:
+ put_disk(disk);
+out_unregister:
+ if (floppy_count == 0)
+ unregister_blkdev(FLOPPY_MAJOR, "fd");
+ return rc;
}
static const struct of_device_id swim3_match[] =
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 4d90e5eba2f5..064b8c5c7a32 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -16,7 +16,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
@@ -197,7 +197,6 @@ enum {
FL_NON_RAID = FW_VER_NON_RAID,
FL_4PORT = FW_VER_4PORT,
FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT),
- FL_DAC = (1 << 16),
FL_DYN_MAJOR = (1 << 17),
};
@@ -244,6 +243,7 @@ struct carm_port {
unsigned int port_no;
struct gendisk *disk;
struct carm_host *host;
+ struct blk_mq_tag_set tag_set;
/* attached device characteristics */
u64 capacity;
@@ -279,6 +279,7 @@ struct carm_host {
unsigned int state;
u32 fw_ver;
+ struct blk_mq_tag_set tag_set;
struct request_queue *oob_q;
unsigned int n_oob;
@@ -750,7 +751,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
struct request *req = crq->rq;
int rc;
- __blk_end_request_all(req, error);
+ blk_mq_end_request(req, error);
rc = carm_put_request(host, crq);
assert(rc == 0);
@@ -760,7 +761,7 @@ static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
{
unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
- blk_stop_queue(q);
+ blk_mq_stop_hw_queues(q);
VPRINTK("STOPPED QUEUE %p\n", q);
host->wait_q[idx] = q;
@@ -785,7 +786,7 @@ static inline void carm_round_robin(struct carm_host *host)
{
struct request_queue *q = carm_pop_q(host);
if (q) {
- blk_start_queue(q);
+ blk_mq_start_hw_queues(q);
VPRINTK("STARTED QUEUE %p\n", q);
}
}
@@ -802,82 +803,86 @@ static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
}
}
-static void carm_oob_rq_fn(struct request_queue *q)
+static blk_status_t carm_oob_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct request_queue *q = hctx->queue;
struct carm_host *host = q->queuedata;
struct carm_request *crq;
- struct request *rq;
int rc;
- while (1) {
- DPRINTK("get req\n");
- rq = blk_fetch_request(q);
- if (!rq)
- break;
+ blk_mq_start_request(bd->rq);
- crq = rq->special;
- assert(crq != NULL);
- assert(crq->rq == rq);
+ spin_lock_irq(&host->lock);
- crq->n_elem = 0;
+ crq = bd->rq->special;
+ assert(crq != NULL);
+ assert(crq->rq == bd->rq);
- DPRINTK("send req\n");
- rc = carm_send_msg(host, crq);
- if (rc) {
- blk_requeue_request(q, rq);
- carm_push_q(host, q);
- return; /* call us again later, eventually */
- }
+ crq->n_elem = 0;
+
+ DPRINTK("send req\n");
+ rc = carm_send_msg(host, crq);
+ if (rc) {
+ carm_push_q(host, q);
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_DEV_RESOURCE;
}
+
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_OK;
}
-static void carm_rq_fn(struct request_queue *q)
+static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct request_queue *q = hctx->queue;
struct carm_port *port = q->queuedata;
struct carm_host *host = port->host;
struct carm_msg_rw *msg;
struct carm_request *crq;
- struct request *rq;
+ struct request *rq = bd->rq;
struct scatterlist *sg;
int writing = 0, pci_dir, i, n_elem, rc;
u32 tmp;
unsigned int msg_size;
-queue_one_request:
- VPRINTK("get req\n");
- rq = blk_peek_request(q);
- if (!rq)
- return;
+ blk_mq_start_request(rq);
+
+ spin_lock_irq(&host->lock);
crq = carm_get_request(host);
if (!crq) {
carm_push_q(host, q);
- return; /* call us again later, eventually */
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_DEV_RESOURCE;
}
crq->rq = rq;
- blk_start_request(rq);
-
if (rq_data_dir(rq) == WRITE) {
writing = 1;
- pci_dir = PCI_DMA_TODEVICE;
+ pci_dir = DMA_TO_DEVICE;
} else {
- pci_dir = PCI_DMA_FROMDEVICE;
+ pci_dir = DMA_FROM_DEVICE;
}
/* get scatterlist from block layer */
sg = &crq->sg[0];
n_elem = blk_rq_map_sg(q, rq, sg);
if (n_elem <= 0) {
+ /* request with no s/g entries? */
carm_end_rq(host, crq, BLK_STS_IOERR);
- return; /* request with no s/g entries? */
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_IOERR;
}
/* map scatterlist to PCI bus addresses */
- n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
+ n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, pci_dir);
if (n_elem <= 0) {
+ /* request with no s/g entries? */
carm_end_rq(host, crq, BLK_STS_IOERR);
- return; /* request with no s/g entries? */
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_IOERR;
}
crq->n_elem = n_elem;
crq->port = port;
@@ -927,12 +932,13 @@ queue_one_request:
rc = carm_send_msg(host, crq);
if (rc) {
carm_put_request(host, crq);
- blk_requeue_request(q, rq);
carm_push_q(host, q);
- return; /* call us again later, eventually */
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_DEV_RESOURCE;
}
- goto queue_one_request;
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_OK;
}
static void carm_handle_array_info(struct carm_host *host,
@@ -1052,11 +1058,11 @@ static inline void carm_handle_rw(struct carm_host *host,
VPRINTK("ENTER\n");
if (rq_data_dir(crq->rq) == WRITE)
- pci_dir = PCI_DMA_TODEVICE;
+ pci_dir = DMA_TO_DEVICE;
else
- pci_dir = PCI_DMA_FROMDEVICE;
+ pci_dir = DMA_FROM_DEVICE;
- pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir);
+ dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem, pci_dir);
carm_end_rq(host, crq, error);
}
@@ -1485,6 +1491,14 @@ static int carm_init_host(struct carm_host *host)
return 0;
}
+static const struct blk_mq_ops carm_oob_mq_ops = {
+ .queue_rq = carm_oob_queue_rq,
+};
+
+static const struct blk_mq_ops carm_mq_ops = {
+ .queue_rq = carm_queue_rq,
+};
+
static int carm_init_disks(struct carm_host *host)
{
unsigned int i;
@@ -1513,9 +1527,10 @@ static int carm_init_disks(struct carm_host *host)
disk->fops = &carm_bd_ops;
disk->private_data = port;
- q = blk_init_queue(carm_rq_fn, &host->lock);
- if (!q) {
- rc = -ENOMEM;
+ q = blk_mq_init_sq_queue(&port->tag_set, &carm_mq_ops,
+ max_queue, BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(q)) {
+ rc = PTR_ERR(q);
break;
}
disk->queue = q;
@@ -1533,14 +1548,18 @@ static void carm_free_disks(struct carm_host *host)
unsigned int i;
for (i = 0; i < CARM_MAX_PORTS; i++) {
- struct gendisk *disk = host->port[i].disk;
+ struct carm_port *port = &host->port[i];
+ struct gendisk *disk = port->disk;
+
if (disk) {
struct request_queue *q = disk->queue;
if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
- if (q)
+ if (q) {
+ blk_mq_free_tag_set(&port->tag_set);
blk_cleanup_queue(q);
+ }
put_disk(disk);
}
}
@@ -1548,8 +1567,8 @@ static void carm_free_disks(struct carm_host *host)
static int carm_init_shm(struct carm_host *host)
{
- host->shm = pci_alloc_consistent(host->pdev, CARM_SHM_SIZE,
- &host->shm_dma);
+ host->shm = dma_alloc_coherent(&host->pdev->dev, CARM_SHM_SIZE,
+ &host->shm_dma, GFP_KERNEL);
if (!host->shm)
return -ENOMEM;
@@ -1565,7 +1584,6 @@ static int carm_init_shm(struct carm_host *host)
static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct carm_host *host;
- unsigned int pci_dac;
int rc;
struct request_queue *q;
unsigned int i;
@@ -1580,28 +1598,12 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_out;
-#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!rc) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
- pci_name(pdev));
- goto err_out_regions;
- }
- pci_dac = 1;
- } else {
-#endif
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
- pci_name(pdev));
- goto err_out_regions;
- }
- pci_dac = 0;
-#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc) {
+ printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
+ pci_name(pdev));
+ goto err_out_regions;
}
-#endif
host = kzalloc(sizeof(*host), GFP_KERNEL);
if (!host) {
@@ -1612,7 +1614,6 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
}
host->pdev = pdev;
- host->flags = pci_dac ? FL_DAC : 0;
spin_lock_init(&host->lock);
INIT_WORK(&host->fsm_task, carm_fsm_task);
init_completion(&host->probe_comp);
@@ -1636,12 +1637,13 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
- q = blk_init_queue(carm_oob_rq_fn, &host->lock);
- if (!q) {
+ q = blk_mq_init_sq_queue(&host->tag_set, &carm_oob_mq_ops, 1,
+ BLK_MQ_F_NO_SCHED);
+ if (IS_ERR(q)) {
printk(KERN_ERR DRV_NAME "(%s): OOB queue alloc failure\n",
pci_name(pdev));
- rc = -ENOMEM;
- goto err_out_pci_free;
+ rc = PTR_ERR(q);
+ goto err_out_dma_free;
}
host->oob_q = q;
q->queuedata = host;
@@ -1705,8 +1707,9 @@ err_out_free_majors:
else if (host->major == 161)
clear_bit(1, &carm_major_alloc);
blk_cleanup_queue(host->oob_q);
-err_out_pci_free:
- pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
+ blk_mq_free_tag_set(&host->tag_set);
+err_out_dma_free:
+ dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
err_out_iounmap:
iounmap(host->mmio);
err_out_kfree:
@@ -1736,7 +1739,8 @@ static void carm_remove_one (struct pci_dev *pdev)
else if (host->major == 161)
clear_bit(1, &carm_major_alloc);
blk_cleanup_queue(host->oob_q);
- pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
+ blk_mq_free_tag_set(&host->tag_set);
+ dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
iounmap(host->mmio);
kfree(host);
pci_release_regions(pdev);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 5c7fb8cc4149..be3e3ab79950 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -363,12 +363,12 @@ static int add_bio(struct cardinfo *card)
vec = bio_iter_iovec(bio, card->current_iter);
- dma_handle = pci_map_page(card->dev,
+ dma_handle = dma_map_page(&card->dev->dev,
vec.bv_page,
vec.bv_offset,
vec.bv_len,
bio_op(bio) == REQ_OP_READ ?
- PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
p = &card->mm_pages[card->Ready];
desc = &p->desc[p->cnt];
@@ -421,7 +421,7 @@ static void process_page(unsigned long data)
struct cardinfo *card = (struct cardinfo *)data;
unsigned int dma_status = card->dma_status;
- spin_lock_bh(&card->lock);
+ spin_lock(&card->lock);
if (card->Active < 0)
goto out_unlock;
page = &card->mm_pages[card->Active];
@@ -448,10 +448,10 @@ static void process_page(unsigned long data)
page->iter = page->bio->bi_iter;
}
- pci_unmap_page(card->dev, desc->data_dma_handle,
+ dma_unmap_page(&card->dev->dev, desc->data_dma_handle,
vec.bv_len,
(control & DMASCR_TRANSFER_READ) ?
- PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (control & DMASCR_HARD_ERROR) {
/* error */
bio->bi_status = BLK_STS_IOERR;
@@ -496,7 +496,7 @@ static void process_page(unsigned long data)
mm_start_io(card);
}
out_unlock:
- spin_unlock_bh(&card->lock);
+ spin_unlock(&card->lock);
while (return_bio) {
struct bio *bio = return_bio;
@@ -817,8 +817,8 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_printk(KERN_INFO, &dev->dev,
"Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n");
- if (pci_set_dma_mask(dev, DMA_BIT_MASK(64)) &&
- pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&dev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) {
dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n");
return -ENOMEM;
}
@@ -871,12 +871,10 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto failed_magic;
}
- card->mm_pages[0].desc = pci_alloc_consistent(card->dev,
- PAGE_SIZE * 2,
- &card->mm_pages[0].page_dma);
- card->mm_pages[1].desc = pci_alloc_consistent(card->dev,
- PAGE_SIZE * 2,
- &card->mm_pages[1].page_dma);
+ card->mm_pages[0].desc = dma_alloc_coherent(&card->dev->dev,
+ PAGE_SIZE * 2, &card->mm_pages[0].page_dma, GFP_KERNEL);
+ card->mm_pages[1].desc = dma_alloc_coherent(&card->dev->dev,
+ PAGE_SIZE * 2, &card->mm_pages[1].page_dma, GFP_KERNEL);
if (card->mm_pages[0].desc == NULL ||
card->mm_pages[1].desc == NULL) {
dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
@@ -1002,13 +1000,13 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
failed_req_irq:
failed_alloc:
if (card->mm_pages[0].desc)
- pci_free_consistent(card->dev, PAGE_SIZE*2,
- card->mm_pages[0].desc,
- card->mm_pages[0].page_dma);
+ dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
+ card->mm_pages[0].desc,
+ card->mm_pages[0].page_dma);
if (card->mm_pages[1].desc)
- pci_free_consistent(card->dev, PAGE_SIZE*2,
- card->mm_pages[1].desc,
- card->mm_pages[1].page_dma);
+ dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
+ card->mm_pages[1].desc,
+ card->mm_pages[1].page_dma);
failed_magic:
iounmap(card->csr_remap);
failed_remap_csr:
@@ -1027,11 +1025,11 @@ static void mm_pci_remove(struct pci_dev *dev)
iounmap(card->csr_remap);
if (card->mm_pages[0].desc)
- pci_free_consistent(card->dev, PAGE_SIZE*2,
+ dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
card->mm_pages[0].desc,
card->mm_pages[0].page_dma);
if (card->mm_pages[1].desc)
- pci_free_consistent(card->dev, PAGE_SIZE*2,
+ dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
card->mm_pages[1].desc,
card->mm_pages[1].page_dma);
blk_cleanup_queue(card->queue);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 23752dc99b00..086c6bb12baa 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -351,8 +351,8 @@ static int minor_to_index(int minor)
return minor >> PART_BITS;
}
-static ssize_t virtblk_serial_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t serial_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
int err;
@@ -371,7 +371,7 @@ static ssize_t virtblk_serial_show(struct device *dev,
return err;
}
-static DEVICE_ATTR(serial, 0444, virtblk_serial_show, NULL);
+static DEVICE_ATTR_RO(serial);
/* The queue's logical block size must be set before calling this */
static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
@@ -545,8 +545,8 @@ static const char *const virtblk_cache_types[] = {
};
static ssize_t
-virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+cache_type_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
@@ -564,8 +564,7 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
@@ -575,12 +574,38 @@ virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
}
-static const struct device_attribute dev_attr_cache_type_ro =
- __ATTR(cache_type, 0444,
- virtblk_cache_type_show, NULL);
-static const struct device_attribute dev_attr_cache_type_rw =
- __ATTR(cache_type, 0644,
- virtblk_cache_type_show, virtblk_cache_type_store);
+static DEVICE_ATTR_RW(cache_type);
+
+static struct attribute *virtblk_attrs[] = {
+ &dev_attr_serial.attr,
+ &dev_attr_cache_type.attr,
+ NULL,
+};
+
+static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gendisk *disk = dev_to_disk(dev);
+ struct virtio_blk *vblk = disk->private_data;
+ struct virtio_device *vdev = vblk->vdev;
+
+ if (a == &dev_attr_cache_type.attr &&
+ !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
+ return S_IRUGO;
+
+ return a->mode;
+}
+
+static const struct attribute_group virtblk_attr_group = {
+ .attrs = virtblk_attrs,
+ .is_visible = virtblk_attrs_are_visible,
+};
+
+static const struct attribute_group *virtblk_attr_groups[] = {
+ &virtblk_attr_group,
+ NULL,
+};
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
@@ -780,24 +805,9 @@ static int virtblk_probe(struct virtio_device *vdev)
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
- device_add_disk(&vdev->dev, vblk->disk);
- err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
- if (err)
- goto out_del_disk;
-
- if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
- err = device_create_file(disk_to_dev(vblk->disk),
- &dev_attr_cache_type_rw);
- else
- err = device_create_file(disk_to_dev(vblk->disk),
- &dev_attr_cache_type_ro);
- if (err)
- goto out_del_disk;
+ device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
return 0;
-out_del_disk:
- del_gendisk(vblk->disk);
- blk_cleanup_queue(vblk->disk->queue);
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);
out_put_disk:
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a71d817e900d..9eea83ae01c6 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2420,7 +2420,7 @@ static void blkfront_connect(struct blkfront_info *info)
for (i = 0; i < info->nr_rings; i++)
kick_pending_request_queues(&info->rinfo[i]);
- device_add_disk(&info->xbdev->dev, info->gd);
+ device_add_disk(&info->xbdev->dev, info->gd, NULL);
info->is_ready = 1;
return;
@@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info)
list_del(&gnt_list_entry->node);
gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
rinfo->persistent_gnts_c--;
- __free_page(gnt_list_entry->page);
- kfree(gnt_list_entry);
+ gnt_list_entry->gref = GRANT_INVALID_REF;
+ list_add_tail(&gnt_list_entry->node, &rinfo->grants);
}
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index c24589414c75..87ccef4bd69e 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -88,7 +88,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/ata.h>
#include <linux/hdreg.h>
@@ -209,6 +209,8 @@ struct ace_device {
struct device *dev;
struct request_queue *queue;
struct gendisk *gd;
+ struct blk_mq_tag_set tag_set;
+ struct list_head rq_list;
/* Inserted CF card parameters */
u16 cf_id[ATA_ID_WORDS];
@@ -462,18 +464,26 @@ static inline void ace_fsm_yieldirq(struct ace_device *ace)
ace->fsm_continue_flag = 0;
}
+static bool ace_has_next_request(struct request_queue *q)
+{
+ struct ace_device *ace = q->queuedata;
+
+ return !list_empty(&ace->rq_list);
+}
+
/* Get the next read/write request; ending requests that we don't handle */
static struct request *ace_get_next_request(struct request_queue *q)
{
- struct request *req;
+ struct ace_device *ace = q->queuedata;
+ struct request *rq;
- while ((req = blk_peek_request(q)) != NULL) {
- if (!blk_rq_is_passthrough(req))
- break;
- blk_start_request(req);
- __blk_end_request_all(req, BLK_STS_IOERR);
+ rq = list_first_entry_or_null(&ace->rq_list, struct request, queuelist);
+ if (rq) {
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
}
- return req;
+
+ return NULL;
}
static void ace_fsm_dostate(struct ace_device *ace)
@@ -499,11 +509,11 @@ static void ace_fsm_dostate(struct ace_device *ace)
/* Drop all in-flight and pending requests */
if (ace->req) {
- __blk_end_request_all(ace->req, BLK_STS_IOERR);
+ blk_mq_end_request(ace->req, BLK_STS_IOERR);
ace->req = NULL;
}
- while ((req = blk_fetch_request(ace->queue)) != NULL)
- __blk_end_request_all(req, BLK_STS_IOERR);
+ while ((req = ace_get_next_request(ace->queue)) != NULL)
+ blk_mq_end_request(req, BLK_STS_IOERR);
/* Drop back to IDLE state and notify waiters */
ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -517,7 +527,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
switch (ace->fsm_state) {
case ACE_FSM_STATE_IDLE:
/* See if there is anything to do */
- if (ace->id_req_count || ace_get_next_request(ace->queue)) {
+ if (ace->id_req_count || ace_has_next_request(ace->queue)) {
ace->fsm_iter_num++;
ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
mod_timer(&ace->stall_timer, jiffies + HZ);
@@ -651,7 +661,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
ace->fsm_state = ACE_FSM_STATE_IDLE;
break;
}
- blk_start_request(req);
/* Okay, it's a data request, set it up for transfer */
dev_dbg(ace->dev,
@@ -728,7 +737,8 @@ static void ace_fsm_dostate(struct ace_device *ace)
}
/* bio finished; is there another one? */
- if (__blk_end_request_cur(ace->req, BLK_STS_OK)) {
+ if (blk_update_request(ace->req, BLK_STS_OK,
+ blk_rq_cur_bytes(ace->req))) {
/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
* blk_rq_sectors(ace->req),
* blk_rq_cur_sectors(ace->req));
@@ -854,17 +864,23 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
/* ---------------------------------------------------------------------
* Block ops
*/
-static void ace_request(struct request_queue * q)
+static blk_status_t ace_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct request *req;
- struct ace_device *ace;
-
- req = ace_get_next_request(q);
+ struct ace_device *ace = hctx->queue->queuedata;
+ struct request *req = bd->rq;
- if (req) {
- ace = req->rq_disk->private_data;
- tasklet_schedule(&ace->fsm_tasklet);
+ if (blk_rq_is_passthrough(req)) {
+ blk_mq_start_request(req);
+ return BLK_STS_IOERR;
}
+
+ spin_lock_irq(&ace->lock);
+ list_add_tail(&req->queuelist, &ace->rq_list);
+ spin_unlock_irq(&ace->lock);
+
+ tasklet_schedule(&ace->fsm_tasklet);
+ return BLK_STS_OK;
}
static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing)
@@ -957,6 +973,10 @@ static const struct block_device_operations ace_fops = {
.getgeo = ace_getgeo,
};
+static const struct blk_mq_ops ace_mq_ops = {
+ .queue_rq = ace_queue_rq,
+};
+
/* --------------------------------------------------------------------
* SystemACE device setup/teardown code
*/
@@ -972,6 +992,7 @@ static int ace_setup(struct ace_device *ace)
spin_lock_init(&ace->lock);
init_completion(&ace->id_completion);
+ INIT_LIST_HEAD(&ace->rq_list);
/*
* Map the device
@@ -989,9 +1010,15 @@ static int ace_setup(struct ace_device *ace)
/*
* Initialize the request queue
*/
- ace->queue = blk_init_queue(ace_request, &ace->lock);
- if (ace->queue == NULL)
+ ace->queue = blk_mq_init_sq_queue(&ace->tag_set, &ace_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(ace->queue)) {
+ rc = PTR_ERR(ace->queue);
+ ace->queue = NULL;
goto err_blk_initq;
+ }
+ ace->queue->queuedata = ace;
+
blk_queue_logical_block_size(ace->queue, 512);
blk_queue_bounce_limit(ace->queue, BLK_BOUNCE_HIGH);
@@ -1066,6 +1093,7 @@ err_read:
put_disk(ace->gd);
err_alloc_disk:
blk_cleanup_queue(ace->queue);
+ blk_mq_free_tag_set(&ace->tag_set);
err_blk_initq:
iounmap(ace->baseaddr);
err_ioremap:
@@ -1081,8 +1109,10 @@ static void ace_teardown(struct ace_device *ace)
put_disk(ace->gd);
}
- if (ace->queue)
+ if (ace->queue) {
blk_cleanup_queue(ace->queue);
+ blk_mq_free_tag_set(&ace->tag_set);
+ }
tasklet_kill(&ace->fsm_tasklet);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index d0c5bc4e0703..1106c076fa4b 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -31,7 +31,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -66,43 +66,44 @@ static DEFINE_SPINLOCK(z2ram_lock);
static struct gendisk *z2ram_gendisk;
-static void do_z2_request(struct request_queue *q)
+static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct request *req;
-
- req = blk_fetch_request(q);
- while (req) {
- unsigned long start = blk_rq_pos(req) << 9;
- unsigned long len = blk_rq_cur_bytes(req);
- blk_status_t err = BLK_STS_OK;
-
- if (start + len > z2ram_size) {
- pr_err(DEVICE_NAME ": bad access: block=%llu, "
- "count=%u\n",
- (unsigned long long)blk_rq_pos(req),
- blk_rq_cur_sectors(req));
- err = BLK_STS_IOERR;
- goto done;
- }
- while (len) {
- unsigned long addr = start & Z2RAM_CHUNKMASK;
- unsigned long size = Z2RAM_CHUNKSIZE - addr;
- void *buffer = bio_data(req->bio);
-
- if (len < size)
- size = len;
- addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
- if (rq_data_dir(req) == READ)
- memcpy(buffer, (char *)addr, size);
- else
- memcpy((char *)addr, buffer, size);
- start += size;
- len -= size;
- }
- done:
- if (!__blk_end_request_cur(req, err))
- req = blk_fetch_request(q);
+ struct request *req = bd->rq;
+ unsigned long start = blk_rq_pos(req) << 9;
+ unsigned long len = blk_rq_cur_bytes(req);
+
+ blk_mq_start_request(req);
+
+ if (start + len > z2ram_size) {
+ pr_err(DEVICE_NAME ": bad access: block=%llu, "
+ "count=%u\n",
+ (unsigned long long)blk_rq_pos(req),
+ blk_rq_cur_sectors(req));
+ return BLK_STS_IOERR;
+ }
+
+ spin_lock_irq(&z2ram_lock);
+
+ while (len) {
+ unsigned long addr = start & Z2RAM_CHUNKMASK;
+ unsigned long size = Z2RAM_CHUNKSIZE - addr;
+ void *buffer = bio_data(req->bio);
+
+ if (len < size)
+ size = len;
+ addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
+ if (rq_data_dir(req) == READ)
+ memcpy(buffer, (char *)addr, size);
+ else
+ memcpy((char *)addr, buffer, size);
+ start += size;
+ len -= size;
}
+
+ spin_unlock_irq(&z2ram_lock);
+ blk_mq_end_request(req, BLK_STS_OK);
+ return BLK_STS_OK;
}
static void
@@ -337,6 +338,11 @@ static struct kobject *z2_find(dev_t dev, int *part, void *data)
}
static struct request_queue *z2_queue;
+static struct blk_mq_tag_set tag_set;
+
+static const struct blk_mq_ops z2_mq_ops = {
+ .queue_rq = z2_queue_rq,
+};
static int __init
z2_init(void)
@@ -355,9 +361,13 @@ z2_init(void)
if (!z2ram_gendisk)
goto out_disk;
- z2_queue = blk_init_queue(do_z2_request, &z2ram_lock);
- if (!z2_queue)
+ z2_queue = blk_mq_init_sq_queue(&tag_set, &z2_mq_ops, 16,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(z2_queue)) {
+ ret = PTR_ERR(z2_queue);
+ z2_queue = NULL;
goto out_queue;
+ }
z2ram_gendisk->major = Z2RAM_MAJOR;
z2ram_gendisk->first_minor = 0;
@@ -387,6 +397,7 @@ static void __exit z2_exit(void)
del_gendisk(z2ram_gendisk);
put_disk(z2ram_gendisk);
blk_cleanup_queue(z2_queue);
+ blk_mq_free_tag_set(&tag_set);
if ( current_device != -1 )
{
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 635235759a0a..fcd055457364 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -3,7 +3,6 @@ config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO
select CRYPTO_LZO
- default n
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
Pages written to these disks are compressed and stored in memory
@@ -18,7 +17,6 @@ config ZRAM
config ZRAM_WRITEBACK
bool "Write back incompressible page to backing device"
depends on ZRAM
- default n
help
With incompressible page, there is no memory saving to keep it
in memory. Instead, write it out to backing device.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index a1d6b5597c17..4879595200e1 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1636,6 +1636,11 @@ static const struct attribute_group zram_disk_attr_group = {
.attrs = zram_disk_attrs,
};
+static const struct attribute_group *zram_disk_attr_groups[] = {
+ &zram_disk_attr_group,
+ NULL,
+};
+
/*
* Allocate and initialize new zram device. the function returns
* '>= 0' device_id upon success, and negative value otherwise.
@@ -1716,24 +1721,14 @@ static int zram_add(void)
zram->disk->queue->backing_dev_info->capabilities |=
(BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
- add_disk(zram->disk);
-
- ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
- &zram_disk_attr_group);
- if (ret < 0) {
- pr_err("Error creating sysfs group for device %d\n",
- device_id);
- goto out_free_disk;
- }
+ device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
+
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
-out_free_disk:
- del_gendisk(zram->disk);
- put_disk(zram->disk);
out_free_queue:
blk_cleanup_queue(queue);
out_free_idr:
@@ -1762,15 +1757,6 @@ static int zram_remove(struct zram *zram)
mutex_unlock(&bdev->bd_mutex);
zram_debugfs_unregister(zram);
- /*
- * Remove sysfs first, so no one will perform a disksize
- * store while we destroy the devices. This also helps during
- * hot_remove -- zram_reset_device() is the last holder of
- * ->init_lock, no later/concurrent disksize_store() or any
- * other sysfs handlers are possible.
- */
- sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
- &zram_disk_attr_group);
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 3d7a5c149af3..1ad4991753bb 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -203,10 +203,11 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ } /* Terminating entry */
};
-static inline void ath3k_log_failed_loading(int err, int len, int size)
+static inline void ath3k_log_failed_loading(int err, int len, int size,
+ int count)
{
- BT_ERR("Error in firmware loading err = %d, len = %d, size = %d",
- err, len, size);
+ BT_ERR("Firmware loading err = %d, len = %d, size = %d, count = %d",
+ err, len, size, count);
}
#define USB_REQ_DFU_DNLOAD 1
@@ -257,7 +258,7 @@ static int ath3k_load_firmware(struct usb_device *udev,
&len, 3000);
if (err || (len != size)) {
- ath3k_log_failed_loading(err, len, size);
+ ath3k_log_failed_loading(err, len, size, count);
goto error;
}
@@ -356,7 +357,7 @@ static int ath3k_load_fwfile(struct usb_device *udev,
err = usb_bulk_msg(udev, pipe, send_buf, size,
&len, 3000);
if (err || (len != size)) {
- ath3k_log_failed_loading(err, len, size);
+ ath3k_log_failed_loading(err, len, size, count);
kfree(send_buf);
return err;
}
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 25b0cf952b91..54713833951a 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -448,7 +448,7 @@ static int bt3c_load_firmware(struct bt3c_info *info,
{
char *ptr = (char *) firmware;
char b[9];
- unsigned int iobase, tmp;
+ unsigned int iobase, tmp, tn;
unsigned long size, addr, fcs;
int i, err = 0;
@@ -490,7 +490,9 @@ static int bt3c_load_firmware(struct bt3c_info *info,
memset(b, 0, sizeof(b));
for (tmp = 0, i = 0; i < size; i++) {
memcpy(b, ptr + (i * 2) + 2, 2);
- tmp += simple_strtol(b, NULL, 16);
+ if (kstrtouint(b, 16, &tn))
+ return -EINVAL;
+ tmp += tn;
}
if (((tmp + fcs) & 0xff) != 0xff) {
@@ -505,7 +507,8 @@ static int bt3c_load_firmware(struct bt3c_info *info,
memset(b, 0, sizeof(b));
for (i = 0; i < (size - 4) / 2; i++) {
memcpy(b, ptr + (i * 4) + 12, 4);
- tmp = simple_strtoul(b, NULL, 16);
+ if (kstrtouint(b, 16, &tmp))
+ return -EINVAL;
bt3c_put(iobase, tmp);
}
}
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 99cde1f9467d..e3e4d929e74f 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -324,6 +324,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x4103, "BCM4330B1" }, /* 002.001.003 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
+ { 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
{ 0x2122, "BCM4343A0" }, /* 001.001.034 */
{ 0x2209, "BCM43430A1" }, /* 001.002.009 */
diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c
index 60d1419590ba..3951f7b23840 100644
--- a/drivers/bluetooth/btrsi.c
+++ b/drivers/bluetooth/btrsi.c
@@ -21,8 +21,9 @@
#include <net/rsi_91x.h>
#include <net/genetlink.h>
-#define RSI_HEADROOM_FOR_BT_HAL 16
+#define RSI_DMA_ALIGN 8
#define RSI_FRAME_DESC_SIZE 16
+#define RSI_HEADROOM_FOR_BT_HAL (RSI_FRAME_DESC_SIZE + RSI_DMA_ALIGN)
struct rsi_hci_adapter {
void *priv;
@@ -70,6 +71,16 @@ static int rsi_hci_send_pkt(struct hci_dev *hdev, struct sk_buff *skb)
bt_cb(new_skb)->pkt_type = hci_skb_pkt_type(skb);
kfree_skb(skb);
skb = new_skb;
+ if (!IS_ALIGNED((unsigned long)skb->data, RSI_DMA_ALIGN)) {
+ u8 *skb_data = skb->data;
+ int skb_len = skb->len;
+
+ skb_push(skb, RSI_DMA_ALIGN);
+ skb_pull(skb, PTR_ALIGN(skb->data,
+ RSI_DMA_ALIGN) - skb->data);
+ memmove(skb->data, skb_data, skb_len);
+ skb_trim(skb, skb_len);
+ }
}
return h_adapter->proto_ops->coex_send_pkt(h_adapter->priv, skb,
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 7f9ea8e4c1b2..41405de27d66 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -138,6 +138,13 @@ static const struct id_table ic_id_table[] = {
.fw_name = "rtl_bt/rtl8761a_fw.bin",
.cfg_name = "rtl_bt/rtl8761a_config" },
+ /* 8822C with USB interface */
+ { IC_INFO(RTL_ROM_LMP_8822B, 0xc),
+ .config_needed = false,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8822cu_fw.bin",
+ .cfg_name = "rtl_bt/rtl8822cu_config" },
+
/* 8822B */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xb),
.config_needed = true,
@@ -206,7 +213,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev,
unsigned char **_buf)
{
- const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
+ static const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
struct rtl_epatch_header *epatch_info;
unsigned char *buf;
int i, len;
@@ -228,6 +235,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8822B, 8 },
{ RTL_ROM_LMP_8723B, 9 }, /* 8723D */
{ RTL_ROM_LMP_8821A, 10 }, /* 8821C */
+ { RTL_ROM_LMP_8822B, 13 }, /* 8822C */
};
min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 20142bc77554..282d1af1d3ba 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -293,13 +293,17 @@ static int btsdio_probe(struct sdio_func *func,
tuple = tuple->next;
}
- /* BCM43341 devices soldered onto the PCB (non-removable) use an
- * uart connection for bluetooth, ignore the BT SDIO interface.
+ /* Broadcom devices soldered onto the PCB (non-removable) use an
+ * UART connection for Bluetooth, ignore the BT SDIO interface.
*/
if (func->vendor == SDIO_VENDOR_ID_BROADCOM &&
- func->device == SDIO_DEVICE_ID_BROADCOM_43341 &&
- !mmc_card_is_removable(func->card->host))
- return -ENODEV;
+ !mmc_card_is_removable(func->card->host)) {
+ switch (func->device) {
+ case SDIO_DEVICE_ID_BROADCOM_43341:
+ case SDIO_DEVICE_ID_BROADCOM_43430:
+ return -ENODEV;
+ }
+ }
data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
if (!data)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index cd2e5cf14ea5..7439a7eb50ac 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -264,6 +264,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* QCA ROME chipset */
+ { USB_DEVICE(0x0cf3, 0x535b), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
@@ -3096,6 +3097,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->set_diag = btintel_set_diag;
hdev->set_bdaddr = btintel_set_bdaddr;
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 963bb0309e25..fbf7b4df23ab 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -543,6 +543,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
}
clear_bit(HCI_UART_PROTO_SET, &hu->flags);
+ percpu_free_rwsem(&hu->proto_lock);
+
kfree(hu);
}
@@ -821,6 +823,7 @@ static int __init hci_uart_init(void)
hci_uart_ldisc.read = hci_uart_tty_read;
hci_uart_ldisc.write = hci_uart_tty_write;
hci_uart_ldisc.ioctl = hci_uart_tty_ioctl;
+ hci_uart_ldisc.compat_ioctl = hci_uart_tty_ioctl;
hci_uart_ldisc.poll = hci_uart_tty_poll;
hci_uart_ldisc.receive_buf = hci_uart_tty_receive;
hci_uart_ldisc.write_wakeup = hci_uart_tty_wakeup;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index e182f6019f68..f036c8f98ea3 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -40,6 +40,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
+#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -63,6 +64,9 @@
/* susclk rate */
#define SUSCLK_RATE_32KHZ 32768
+/* Controller debug log header */
+#define QCA_DEBUG_HANDLE 0x2EDC
+
/* HCI_IBS transmit side sleep protocol states */
enum tx_ibs_states {
HCI_IBS_TX_ASLEEP,
@@ -167,7 +171,8 @@ struct qca_serdev {
};
static int qca_power_setup(struct hci_uart *hu, bool on);
-static void qca_power_shutdown(struct hci_dev *hdev);
+static void qca_power_shutdown(struct hci_uart *hu);
+static int qca_power_off(struct hci_dev *hdev);
static void __serial_clock_on(struct tty_struct *tty)
{
@@ -499,7 +504,6 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca;
if (hu->serdev) {
- serdev_device_open(hu->serdev);
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->btsoc_type != QCA_WCN3990) {
@@ -609,11 +613,10 @@ static int qca_close(struct hci_uart *hu)
if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->btsoc_type == QCA_WCN3990)
- qca_power_shutdown(hu->hdev);
+ qca_power_shutdown(hu);
else
gpiod_set_value_cansleep(qcadev->bt_en, 0);
- serdev_device_close(hu->serdev);
}
kfree_skb(qca->rx_skb);
@@ -850,6 +853,19 @@ static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ /* We receive debug logs from chip as an ACL packets.
+ * Instead of sending the data to ACL to decode the
+ * received data, we are pushing them to the above layers
+ * as a diagnostic packet.
+ */
+ if (get_unaligned_le16(skb->data) == QCA_DEBUG_HANDLE)
+ return hci_recv_diag(hdev, skb);
+
+ return hci_recv_frame(hdev, skb);
+}
+
#define QCA_IBS_SLEEP_IND_EVENT \
.type = HCI_IBS_SLEEP_IND, \
.hlen = 0, \
@@ -872,7 +888,7 @@ static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
.maxlen = HCI_MAX_IBS_SIZE
static const struct h4_recv_pkt qca_recv_pkts[] = {
- { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_ACL, .recv = qca_recv_acl_data },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
{ QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
@@ -1101,8 +1117,26 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
static int qca_wcn3990_init(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
+ struct qca_serdev *qcadev;
int ret;
+ /* Check for vregs status, may be hci down has turned
+ * off the voltage regulator.
+ */
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (!qcadev->bt_power->vregs_on) {
+ serdev_device_close(hu->serdev);
+ ret = qca_power_setup(hu, true);
+ if (ret)
+ return ret;
+
+ ret = serdev_device_open(hu->serdev);
+ if (ret) {
+ bt_dev_err(hu->hdev, "failed to open port");
+ return ret;
+ }
+ }
+
/* Forcefully enable wcn3990 to enter in to boot mode. */
host_set_baudrate(hu, 2400);
ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
@@ -1154,6 +1188,12 @@ static int qca_setup(struct hci_uart *hu)
if (qcadev->btsoc_type == QCA_WCN3990) {
bt_dev_info(hdev, "setting up wcn3990");
+
+ /* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute
+ * setup for every hci up.
+ */
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hu->hdev->shutdown = qca_power_off;
ret = qca_wcn3990_init(hu);
if (ret)
return ret;
@@ -1232,15 +1272,26 @@ static const struct qca_vreg_data qca_soc_data = {
.num_vregs = 4,
};
-static void qca_power_shutdown(struct hci_dev *hdev)
+static void qca_power_shutdown(struct hci_uart *hu)
{
- struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = hu->serdev;
+ unsigned char cmd = QCA_WCN3990_POWEROFF_PULSE;
host_set_baudrate(hu, 2400);
- qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
+ hci_uart_set_flow_control(hu, true);
+ serdev_device_write_buf(serdev, &cmd, sizeof(cmd));
+ hci_uart_set_flow_control(hu, false);
qca_power_setup(hu, false);
}
+static int qca_power_off(struct hci_dev *hdev)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
+ qca_power_shutdown(hu);
+ return 0;
+}
+
static int qca_enable_regulator(struct qca_vreg vregs,
struct regulator *regulator)
{
@@ -1322,7 +1373,7 @@ static int qca_init_regulators(struct qca_power *qca,
{
int i;
- qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs *
+ qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs,
sizeof(struct regulator_bulk_data),
GFP_KERNEL);
if (!qca->vreg_bulk)
@@ -1413,7 +1464,7 @@ static void qca_serdev_remove(struct serdev_device *serdev)
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
if (qcadev->btsoc_type == QCA_WCN3990)
- qca_power_shutdown(qcadev->serdev_hu.hdev);
+ qca_power_shutdown(&qcadev->serdev_hu);
else
clk_disable_unprepare(qcadev->susclk);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index aa2543b3c286..c445aa9ac511 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -57,9 +57,10 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
{
struct sk_buff *skb = hu->tx_skb;
- if (!skb)
- skb = hu->proto->dequeue(hu);
- else
+ if (!skb) {
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ skb = hu->proto->dequeue(hu);
+ } else
hu->tx_skb = NULL;
return skb;
@@ -94,7 +95,7 @@ static void hci_uart_write_work(struct work_struct *work)
hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
kfree_skb(skb);
}
- } while(test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
+ } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
clear_bit(HCI_UART_SENDING, &hu->tx_state);
}
@@ -368,6 +369,7 @@ void hci_uart_unregister_device(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 5d8266c6571f..4552b06fe601 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -188,6 +188,10 @@ struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
+struct device_type fsl_mc_bus_dpseci_type = {
+ .name = "fsl_mc_bus_dpseci"
+};
+
static struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
@@ -203,6 +207,7 @@ static struct device_type *fsl_mc_get_device_type(const char *type)
{ &fsl_mc_bus_dpmcp_type, "dpmcp" },
{ &fsl_mc_bus_dpmac_type, "dpmac" },
{ &fsl_mc_bus_dprtc_type, "dprtc" },
+ { &fsl_mc_bus_dpseci_type, "dpseci" },
{ NULL, NULL }
};
int i;
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
index 073fd9011154..9989ce904a37 100644
--- a/drivers/bus/ts-nbus.c
+++ b/drivers/bus/ts-nbus.c
@@ -110,13 +110,12 @@ static void ts_nbus_set_direction(struct ts_nbus *ts_nbus, int direction)
*/
static void ts_nbus_reset_bus(struct ts_nbus *ts_nbus)
{
- int i;
- int values[8];
+ DECLARE_BITMAP(values, 8);
- for (i = 0; i < 8; i++)
- values[i] = 0;
+ values[0] = 0;
- gpiod_set_array_value_cansleep(8, ts_nbus->data->desc, values);
+ gpiod_set_array_value_cansleep(8, ts_nbus->data->desc,
+ ts_nbus->data->info, values);
gpiod_set_value_cansleep(ts_nbus->csn, 0);
gpiod_set_value_cansleep(ts_nbus->strobe, 0);
gpiod_set_value_cansleep(ts_nbus->ale, 0);
@@ -157,16 +156,11 @@ static int ts_nbus_read_byte(struct ts_nbus *ts_nbus, u8 *val)
static void ts_nbus_write_byte(struct ts_nbus *ts_nbus, u8 byte)
{
struct gpio_descs *gpios = ts_nbus->data;
- int i;
- int values[8];
+ DECLARE_BITMAP(values, 8);
- for (i = 0; i < 8; i++)
- if (byte & BIT(i))
- values[i] = 1;
- else
- values[i] = 0;
+ values[0] = byte;
- gpiod_set_array_value_cansleep(8, gpios->desc, values);
+ gpiod_set_array_value_cansleep(8, gpios->desc, gpios->info, values);
}
/*
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index a5d5a96479bf..614ecdbb4ab7 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -410,10 +410,10 @@ static int cdrom_get_disc_info(struct cdrom_device_info *cdi,
* hack to have the capability flags defined const, while we can still
* change it here without gcc complaining at every line.
*/
-#define ENSURE(call, bits) \
-do { \
- if (cdo->call == NULL) \
- *change_capability &= ~(bits); \
+#define ENSURE(cdo, call, bits) \
+do { \
+ if (cdo->call == NULL) \
+ WARN_ON_ONCE((cdo)->capability & (bits)); \
} while (0)
/*
@@ -589,7 +589,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
{
static char banner_printed;
const struct cdrom_device_ops *cdo = cdi->ops;
- int *change_capability = (int *)&cdo->capability; /* hack */
cd_dbg(CD_OPEN, "entering register_cdrom\n");
@@ -601,16 +600,16 @@ int register_cdrom(struct cdrom_device_info *cdi)
cdrom_sysctl_register();
}
- ENSURE(drive_status, CDC_DRIVE_STATUS);
+ ENSURE(cdo, drive_status, CDC_DRIVE_STATUS);
if (cdo->check_events == NULL && cdo->media_changed == NULL)
- *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
- ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
- ENSURE(lock_door, CDC_LOCK);
- ENSURE(select_speed, CDC_SELECT_SPEED);
- ENSURE(get_last_session, CDC_MULTI_SESSION);
- ENSURE(get_mcn, CDC_MCN);
- ENSURE(reset, CDC_RESET);
- ENSURE(generic_packet, CDC_GENERIC_PACKET);
+ WARN_ON_ONCE(cdo->capability & (CDC_MEDIA_CHANGED | CDC_SELECT_DISC));
+ ENSURE(cdo, tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
+ ENSURE(cdo, lock_door, CDC_LOCK);
+ ENSURE(cdo, select_speed, CDC_SELECT_SPEED);
+ ENSURE(cdo, get_last_session, CDC_MULTI_SESSION);
+ ENSURE(cdo, get_mcn, CDC_MCN);
+ ENSURE(cdo, reset, CDC_RESET);
+ ENSURE(cdo, generic_packet, CDC_GENERIC_PACKET);
cdi->mc_flags = 0;
cdi->options = CDO_USE_FFLAGS;
@@ -2445,7 +2444,7 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
return -ENOSYS;
if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
- if ((int)arg >= cdi->capacity)
+ if (arg >= cdi->capacity)
return -EINVAL;
}
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index ae3a7537cf0f..757e85b81879 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -31,12 +31,11 @@
#include <linux/cdrom.h>
#include <linux/genhd.h>
#include <linux/bio.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/wait.h>
-#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <scsi/scsi.h>
#include <asm/io.h>
@@ -102,11 +101,6 @@ static int gdrom_major;
static DECLARE_WAIT_QUEUE_HEAD(command_queue);
static DECLARE_WAIT_QUEUE_HEAD(request_queue);
-static DEFINE_SPINLOCK(gdrom_lock);
-static void gdrom_readdisk_dma(struct work_struct *work);
-static DECLARE_WORK(work, gdrom_readdisk_dma);
-static LIST_HEAD(gdrom_deferred);
-
struct gdromtoc {
unsigned int entry[99];
unsigned int first, last;
@@ -122,6 +116,7 @@ static struct gdrom_unit {
char disk_type;
struct gdromtoc *toc;
struct request_queue *gdrom_rq;
+ struct blk_mq_tag_set tag_set;
} gd;
struct gdrom_id {
@@ -584,103 +579,83 @@ static int gdrom_set_interrupt_handlers(void)
* 9 -> sectors >> 8
* 10 -> sectors
*/
-static void gdrom_readdisk_dma(struct work_struct *work)
+static blk_status_t gdrom_readdisk_dma(struct request *req)
{
int block, block_cnt;
blk_status_t err;
struct packet_command *read_command;
- struct list_head *elem, *next;
- struct request *req;
unsigned long timeout;
- if (list_empty(&gdrom_deferred))
- return;
read_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
if (!read_command)
- return; /* get more memory later? */
+ return BLK_STS_RESOURCE;
+
read_command->cmd[0] = 0x30;
read_command->cmd[1] = 0x20;
- spin_lock(&gdrom_lock);
- list_for_each_safe(elem, next, &gdrom_deferred) {
- req = list_entry(elem, struct request, queuelist);
- spin_unlock(&gdrom_lock);
- block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
- block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
- __raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG);
- __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
- __raw_writel(1, GDROM_DMA_DIRECTION_REG);
- __raw_writel(1, GDROM_DMA_ENABLE_REG);
- read_command->cmd[2] = (block >> 16) & 0xFF;
- read_command->cmd[3] = (block >> 8) & 0xFF;
- read_command->cmd[4] = block & 0xFF;
- read_command->cmd[8] = (block_cnt >> 16) & 0xFF;
- read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
- read_command->cmd[10] = block_cnt & 0xFF;
- /* set for DMA */
- __raw_writeb(1, GDROM_ERROR_REG);
- /* other registers */
- __raw_writeb(0, GDROM_SECNUM_REG);
- __raw_writeb(0, GDROM_BCL_REG);
- __raw_writeb(0, GDROM_BCH_REG);
- __raw_writeb(0, GDROM_DSEL_REG);
- __raw_writeb(0, GDROM_INTSEC_REG);
- /* Wait for registers to reset after any previous activity */
- timeout = jiffies + HZ / 2;
- while (gdrom_is_busy() && time_before(jiffies, timeout))
- cpu_relax();
- __raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
- timeout = jiffies + HZ / 2;
- /* Wait for packet command to finish */
- while (gdrom_is_busy() && time_before(jiffies, timeout))
- cpu_relax();
- gd.pending = 1;
- gd.transfer = 1;
- outsw(GDROM_DATA_REG, &read_command->cmd, 6);
- timeout = jiffies + HZ / 2;
- /* Wait for any pending DMA to finish */
- while (__raw_readb(GDROM_DMA_STATUS_REG) &&
- time_before(jiffies, timeout))
- cpu_relax();
- /* start transfer */
- __raw_writeb(1, GDROM_DMA_STATUS_REG);
- wait_event_interruptible_timeout(request_queue,
- gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
- err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK;
- gd.transfer = 0;
- gd.pending = 0;
- /* now seek to take the request spinlock
- * before handling ending the request */
- spin_lock(&gdrom_lock);
- list_del_init(&req->queuelist);
- __blk_end_request_all(req, err);
- }
- spin_unlock(&gdrom_lock);
+ block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
+ block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
+ __raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG);
+ __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
+ __raw_writel(1, GDROM_DMA_DIRECTION_REG);
+ __raw_writel(1, GDROM_DMA_ENABLE_REG);
+ read_command->cmd[2] = (block >> 16) & 0xFF;
+ read_command->cmd[3] = (block >> 8) & 0xFF;
+ read_command->cmd[4] = block & 0xFF;
+ read_command->cmd[8] = (block_cnt >> 16) & 0xFF;
+ read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
+ read_command->cmd[10] = block_cnt & 0xFF;
+ /* set for DMA */
+ __raw_writeb(1, GDROM_ERROR_REG);
+ /* other registers */
+ __raw_writeb(0, GDROM_SECNUM_REG);
+ __raw_writeb(0, GDROM_BCL_REG);
+ __raw_writeb(0, GDROM_BCH_REG);
+ __raw_writeb(0, GDROM_DSEL_REG);
+ __raw_writeb(0, GDROM_INTSEC_REG);
+ /* Wait for registers to reset after any previous activity */
+ timeout = jiffies + HZ / 2;
+ while (gdrom_is_busy() && time_before(jiffies, timeout))
+ cpu_relax();
+ __raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
+ timeout = jiffies + HZ / 2;
+ /* Wait for packet command to finish */
+ while (gdrom_is_busy() && time_before(jiffies, timeout))
+ cpu_relax();
+ gd.pending = 1;
+ gd.transfer = 1;
+ outsw(GDROM_DATA_REG, &read_command->cmd, 6);
+ timeout = jiffies + HZ / 2;
+ /* Wait for any pending DMA to finish */
+ while (__raw_readb(GDROM_DMA_STATUS_REG) &&
+ time_before(jiffies, timeout))
+ cpu_relax();
+ /* start transfer */
+ __raw_writeb(1, GDROM_DMA_STATUS_REG);
+ wait_event_interruptible_timeout(request_queue,
+ gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
+ err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK;
+ gd.transfer = 0;
+ gd.pending = 0;
+
+ blk_mq_end_request(req, err);
kfree(read_command);
+ return BLK_STS_OK;
}
-static void gdrom_request(struct request_queue *rq)
-{
- struct request *req;
-
- while ((req = blk_fetch_request(rq)) != NULL) {
- switch (req_op(req)) {
- case REQ_OP_READ:
- /*
- * Add to list of deferred work and then schedule
- * workqueue.
- */
- list_add_tail(&req->queuelist, &gdrom_deferred);
- schedule_work(&work);
- break;
- case REQ_OP_WRITE:
- pr_notice("Read only device - write request ignored\n");
- __blk_end_request_all(req, BLK_STS_IOERR);
- break;
- default:
- printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
- __blk_end_request_all(req, BLK_STS_IOERR);
- break;
- }
+static blk_status_t gdrom_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ blk_mq_start_request(bd->rq);
+
+ switch (req_op(bd->rq)) {
+ case REQ_OP_READ:
+ return gdrom_readdisk_dma(bd->rq);
+ case REQ_OP_WRITE:
+ pr_notice("Read only device - write request ignored\n");
+ return BLK_STS_IOERR;
+ default:
+ printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
+ return BLK_STS_IOERR;
}
}
@@ -768,6 +743,10 @@ static int probe_gdrom_setupqueue(void)
return gdrom_init_dma_mode();
}
+static const struct blk_mq_ops gdrom_mq_ops = {
+ .queue_rq = gdrom_queue_rq,
+};
+
/*
* register this as a block device and as compliant with the
* universal CD Rom driver interface
@@ -811,11 +790,15 @@ static int probe_gdrom(struct platform_device *devptr)
err = gdrom_set_interrupt_handlers();
if (err)
goto probe_fail_cmdirq_register;
- gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock);
- if (!gd.gdrom_rq) {
- err = -ENOMEM;
+
+ gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ if (IS_ERR(gd.gdrom_rq)) {
+ rc = PTR_ERR(gd.gdrom_rq);
+ gd.gdrom_rq = NULL;
goto probe_fail_requestq;
}
+
blk_queue_bounce_limit(gd.gdrom_rq, BLK_BOUNCE_HIGH);
err = probe_gdrom_setupqueue();
@@ -832,6 +815,7 @@ static int probe_gdrom(struct platform_device *devptr)
probe_fail_toc:
blk_cleanup_queue(gd.gdrom_rq);
+ blk_mq_free_tag_set(&gd.tag_set);
probe_fail_requestq:
free_irq(HW_EVENT_GDROM_DMA, &gd);
free_irq(HW_EVENT_GDROM_CMD, &gd);
@@ -849,8 +833,8 @@ probe_fail_no_mem:
static int remove_gdrom(struct platform_device *devptr)
{
- flush_work(&work);
blk_cleanup_queue(gd.gdrom_rq);
+ blk_mq_free_tag_set(&gd.tag_set);
free_irq(HW_EVENT_GDROM_CMD, &gd);
free_irq(HW_EVENT_GDROM_DMA, &gd);
del_gendisk(gd.disk);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index aaf9e5afaad4..95be7228f327 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -44,10 +44,10 @@ static unsigned short default_quality; /* = 0; default to "off" */
module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
- "current hwrng entropy estimation per mill");
+ "current hwrng entropy estimation per 1024 bits of input");
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
- "default entropy content of hwrng per mill");
+ "default entropy content of hwrng per 1024 bits of input");
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 97d6856c9c0f..f3f216cdf686 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -8,6 +8,8 @@
* Author: Rocky Craig <first.last@hp.com>
*/
+#define DEBUG /* So dev_dbg() is always available. */
+
#include <linux/kernel.h> /* For printk. */
#include <linux/string.h>
#include <linux/module.h>
@@ -215,11 +217,11 @@ static int bt_start_transaction(struct si_sm_data *bt,
return IPMI_NOT_IN_MY_STATE_ERR;
if (bt_debug & BT_DEBUG_MSG) {
- printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
- printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
+ dev_dbg(bt->io->dev, "+++++++++++++++++ New command\n");
+ dev_dbg(bt->io->dev, "NetFn/LUN CMD [%d data]:", size - 2);
for (i = 0; i < size; i ++)
- printk(" %02x", data[i]);
- printk("\n");
+ pr_cont(" %02x", data[i]);
+ pr_cont("\n");
}
bt->write_data[0] = size + 1; /* all data plus seq byte */
bt->write_data[1] = *data; /* NetFn/LUN */
@@ -260,10 +262,10 @@ static int bt_get_result(struct si_sm_data *bt,
memcpy(data + 2, bt->read_data + 4, msg_len - 2);
if (bt_debug & BT_DEBUG_MSG) {
- printk(KERN_WARNING "BT: result %d bytes:", msg_len);
+ dev_dbg(bt->io->dev, "result %d bytes:", msg_len);
for (i = 0; i < msg_len; i++)
- printk(" %02x", data[i]);
- printk("\n");
+ pr_cont(" %02x", data[i]);
+ pr_cont("\n");
}
return msg_len;
}
@@ -274,8 +276,7 @@ static int bt_get_result(struct si_sm_data *bt,
static void reset_flags(struct si_sm_data *bt)
{
if (bt_debug)
- printk(KERN_WARNING "IPMI BT: flag reset %s\n",
- status2txt(BT_STATUS));
+ dev_dbg(bt->io->dev, "flag reset %s\n", status2txt(BT_STATUS));
if (BT_STATUS & BT_H_BUSY)
BT_CONTROL(BT_H_BUSY); /* force clear */
BT_CONTROL(BT_CLR_WR_PTR); /* always reset */
@@ -301,14 +302,14 @@ static void drain_BMC2HOST(struct si_sm_data *bt)
BT_CONTROL(BT_B2H_ATN); /* some BMCs are stubborn */
BT_CONTROL(BT_CLR_RD_PTR); /* always reset */
if (bt_debug)
- printk(KERN_WARNING "IPMI BT: stale response %s; ",
+ dev_dbg(bt->io->dev, "stale response %s; ",
status2txt(BT_STATUS));
size = BMC2HOST;
for (i = 0; i < size ; i++)
BMC2HOST;
BT_CONTROL(BT_H_BUSY); /* now clear */
if (bt_debug)
- printk("drained %d bytes\n", size + 1);
+ pr_cont("drained %d bytes\n", size + 1);
}
static inline void write_all_bytes(struct si_sm_data *bt)
@@ -316,11 +317,11 @@ static inline void write_all_bytes(struct si_sm_data *bt)
int i;
if (bt_debug & BT_DEBUG_MSG) {
- printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
+ dev_dbg(bt->io->dev, "write %d bytes seq=0x%02X",
bt->write_count, bt->seq);
for (i = 0; i < bt->write_count; i++)
- printk(" %02x", bt->write_data[i]);
- printk("\n");
+ pr_cont(" %02x", bt->write_data[i]);
+ pr_cont("\n");
}
for (i = 0; i < bt->write_count; i++)
HOST2BMC(bt->write_data[i]);
@@ -340,8 +341,8 @@ static inline int read_all_bytes(struct si_sm_data *bt)
if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) {
if (bt_debug & BT_DEBUG_MSG)
- printk(KERN_WARNING "BT: bad raw rsp len=%d\n",
- bt->read_count);
+ dev_dbg(bt->io->dev,
+ "bad raw rsp len=%d\n", bt->read_count);
bt->truncated = 1;
return 1; /* let next XACTION START clean it up */
}
@@ -352,13 +353,13 @@ static inline int read_all_bytes(struct si_sm_data *bt)
if (bt_debug & BT_DEBUG_MSG) {
int max = bt->read_count;
- printk(KERN_WARNING "BT: got %d bytes seq=0x%02X",
- max, bt->read_data[2]);
+ dev_dbg(bt->io->dev,
+ "got %d bytes seq=0x%02X", max, bt->read_data[2]);
if (max > 16)
max = 16;
for (i = 0; i < max; i++)
- printk(KERN_CONT " %02x", bt->read_data[i]);
- printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ...");
+ pr_cont(" %02x", bt->read_data[i]);
+ pr_cont("%s\n", bt->read_count == max ? "" : " ...");
}
/* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
@@ -368,10 +369,11 @@ static inline int read_all_bytes(struct si_sm_data *bt)
return 1;
if (bt_debug & BT_DEBUG_MSG)
- printk(KERN_WARNING "IPMI BT: bad packet: "
- "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
- bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3],
- bt->read_data[1], bt->read_data[2], bt->read_data[3]);
+ dev_dbg(bt->io->dev,
+ "IPMI BT: bad packet: want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
+ bt->write_data[1] | 0x04, bt->write_data[2],
+ bt->write_data[3],
+ bt->read_data[1], bt->read_data[2], bt->read_data[3]);
return 0;
}
@@ -394,8 +396,8 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
break;
}
- printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */
- reason, STATE2TXT, STATUS2TXT);
+ dev_warn(bt->io->dev, "IPMI BT: %s in %s %s ", /* open-ended line */
+ reason, STATE2TXT, STATUS2TXT);
/*
* Per the IPMI spec, retries are based on the sequence number
@@ -403,20 +405,20 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
*/
(bt->error_retries)++;
if (bt->error_retries < bt->BT_CAP_retries) {
- printk("%d retries left\n",
+ pr_cont("%d retries left\n",
bt->BT_CAP_retries - bt->error_retries);
bt->state = BT_STATE_RESTART;
return SI_SM_CALL_WITHOUT_DELAY;
}
- printk(KERN_WARNING "failed %d retries, sending error response\n",
- bt->BT_CAP_retries);
+ dev_warn(bt->io->dev, "failed %d retries, sending error response\n",
+ bt->BT_CAP_retries);
if (!bt->nonzero_status)
- printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
+ dev_err(bt->io->dev, "stuck, try power cycle\n");
/* this is most likely during insmod */
else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) {
- printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n");
+ dev_warn(bt->io->dev, "BT reset (takes 5 secs)\n");
bt->state = BT_STATE_RESET1;
return SI_SM_CALL_WITHOUT_DELAY;
}
@@ -452,7 +454,7 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
status = BT_STATUS;
bt->nonzero_status |= status;
if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) {
- printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
+ dev_dbg(bt->io->dev, "BT: %s %s TO=%ld - %ld\n",
STATE2TXT,
STATUS2TXT,
bt->timeout,
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 1a486aec99b6..effab11887ca 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -818,8 +818,7 @@ static void ipmi_new_smi(int if_num, struct device *device)
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
- printk(KERN_ERR "ipmi_devintf: Unable to create the"
- " ipmi class device link\n");
+ pr_err("ipmi_devintf: Unable to create the ipmi class device link\n");
return;
}
entry->dev = dev;
@@ -861,18 +860,18 @@ static int __init init_ipmi_devintf(void)
if (ipmi_major < 0)
return -EINVAL;
- printk(KERN_INFO "ipmi device interface\n");
+ pr_info("ipmi device interface\n");
ipmi_class = class_create(THIS_MODULE, "ipmi");
if (IS_ERR(ipmi_class)) {
- printk(KERN_ERR "ipmi: can't register device class\n");
+ pr_err("ipmi: can't register device class\n");
return PTR_ERR(ipmi_class);
}
rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
if (rv < 0) {
class_destroy(ipmi_class);
- printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
+ pr_err("ipmi: can't get major %d\n", ipmi_major);
return rv;
}
@@ -884,7 +883,7 @@ static int __init init_ipmi_devintf(void)
if (rv) {
unregister_chrdev(ipmi_major, DEVICE_NAME);
class_destroy(ipmi_class);
- printk(KERN_WARNING "ipmi: can't register smi watcher\n");
+ pr_warn("ipmi: can't register smi watcher\n");
return rv;
}
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
index e2c143861b1e..249880457b17 100644
--- a/drivers/char/ipmi/ipmi_dmi.c
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -4,6 +4,9 @@
* allow autoloading of the IPMI drive based on SMBIOS entries.
*/
+#define pr_fmt(fmt) "%s" fmt, "ipmi:dmi: "
+#define dev_fmt pr_fmt
+
#include <linux/ipmi.h>
#include <linux/init.h>
#include <linux/dmi.h>
@@ -41,7 +44,7 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
unsigned int num_r = 1, size;
struct property_entry p[5];
unsigned int pidx = 0;
- char *name, *override;
+ char *name;
int rv;
enum si_type si_type;
struct ipmi_dmi_info *info;
@@ -49,11 +52,9 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
memset(p, 0, sizeof(p));
name = "dmi-ipmi-si";
- override = "ipmi_si";
switch (type) {
case IPMI_DMI_TYPE_SSIF:
name = "dmi-ipmi-ssif";
- override = "ipmi_ssif";
offset = 1;
size = 1;
si_type = SI_TYPE_INVALID;
@@ -71,7 +72,7 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
si_type = SI_SMIC;
break;
default:
- pr_err("ipmi:dmi: Invalid IPMI type: %d\n", type);
+ pr_err("Invalid IPMI type: %d\n", type);
return;
}
@@ -83,7 +84,7 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
- pr_warn("ipmi:dmi: Could not allocate dmi info\n");
+ pr_warn("Could not allocate dmi info\n");
} else {
info->si_type = si_type;
info->flags = flags;
@@ -95,13 +96,9 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
pdev = platform_device_alloc(name, ipmi_dmi_nr);
if (!pdev) {
- pr_err("ipmi:dmi: Error allocation IPMI platform device\n");
+ pr_err("Error allocation IPMI platform device\n");
return;
}
- pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
- override);
- if (!pdev->driver_override)
- goto err;
if (type == IPMI_DMI_TYPE_SSIF) {
p[pidx++] = PROPERTY_ENTRY_U16("i2c-addr", base_addr);
@@ -141,22 +138,20 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
rv = platform_device_add_resources(pdev, r, num_r);
if (rv) {
- dev_err(&pdev->dev,
- "ipmi:dmi: Unable to add resources: %d\n", rv);
+ dev_err(&pdev->dev, "Unable to add resources: %d\n", rv);
goto err;
}
add_properties:
rv = platform_device_add_properties(pdev, p);
if (rv) {
- dev_err(&pdev->dev,
- "ipmi:dmi: Unable to add properties: %d\n", rv);
+ dev_err(&pdev->dev, "Unable to add properties: %d\n", rv);
goto err;
}
rv = platform_device_add(pdev);
if (rv) {
- dev_err(&pdev->dev, "ipmi:dmi: Unable to add device: %d\n", rv);
+ dev_err(&pdev->dev, "Unable to add device: %d\n", rv);
goto err;
}
@@ -217,6 +212,10 @@ static void __init dmi_decode_ipmi(const struct dmi_header *dm)
slave_addr = data[DMI_IPMI_SLAVEADDR];
memcpy(&base_addr, data + DMI_IPMI_ADDR, sizeof(unsigned long));
+ if (!base_addr) {
+ pr_err("Base address is zero, assuming no IPMI interface\n");
+ return;
+ }
if (len >= DMI_IPMI_VER2_LENGTH) {
if (type == IPMI_DMI_TYPE_SSIF) {
offset = 0;
@@ -263,7 +262,7 @@ static void __init dmi_decode_ipmi(const struct dmi_header *dm)
offset = 16;
break;
default:
- pr_err("ipmi:dmi: Invalid offset: 0\n");
+ pr_err("Invalid offset: 0\n");
return;
}
}
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index f4ea9f47230a..2e7cda08b079 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -274,8 +274,8 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
if (kcs_debug & KCS_DEBUG_MSG) {
printk(KERN_DEBUG "start_kcs_transaction -");
for (i = 0; i < size; i++)
- printk(" %02x", (unsigned char) (data [i]));
- printk("\n");
+ pr_cont(" %02x", data[i]);
+ pr_cont("\n");
}
kcs->error_retries = 0;
memcpy(kcs->write_data, data, size);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 7fc9612070a1..a74ce885b541 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -11,6 +11,9 @@
* Copyright 2002 MontaVista Software Inc.
*/
+#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
+#define dev_fmt pr_fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/poll.h>
@@ -30,8 +33,6 @@
#include <linux/workqueue.h>
#include <linux/uuid.h>
-#define PFX "IPMI message handler: "
-
#define IPMI_DRIVER_VERSION "39.2"
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
@@ -1343,7 +1344,7 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
user->intf->addrinfo[channel].lun = LUN & 0x3;
release_ipmi_user(user, index);
- return 0;
+ return rv;
}
EXPORT_SYMBOL(ipmi_set_my_LUN);
@@ -1474,8 +1475,7 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
list_move_tail(&msg->link, &msgs);
intf->waiting_events_count = 0;
if (intf->event_msg_printed) {
- dev_warn(intf->si_dev,
- PFX "Event queue no longer full\n");
+ dev_warn(intf->si_dev, "Event queue no longer full\n");
intf->event_msg_printed = 0;
}
@@ -2276,16 +2276,15 @@ static void bmc_device_id_handler(struct ipmi_smi *intf,
|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
|| (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
dev_warn(intf->si_dev,
- PFX "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
- msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
+ "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
+ msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
return;
}
rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
if (rv) {
- dev_warn(intf->si_dev,
- PFX "device id demangle failed: %d\n", rv);
+ dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
intf->bmc->dyn_id_set = 0;
} else {
/*
@@ -2908,8 +2907,7 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
mutex_unlock(&bmc->dyn_mutex);
dev_info(intf->si_dev,
- "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
- " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
bmc->id.manufacturer_id,
bmc->id.product_id,
bmc->id.device_id);
@@ -2948,7 +2946,7 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
rv = platform_device_register(&bmc->pdev);
if (rv) {
dev_err(intf->si_dev,
- PFX " Unable to register bmc device: %d\n",
+ "Unable to register bmc device: %d\n",
rv);
goto out_list_del;
}
@@ -2966,8 +2964,7 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
*/
rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
if (rv) {
- dev_err(intf->si_dev,
- PFX "Unable to create bmc symlink: %d\n", rv);
+ dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
goto out_put_bmc;
}
@@ -2976,8 +2973,8 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
if (!intf->my_dev_name) {
rv = -ENOMEM;
- dev_err(intf->si_dev,
- PFX "Unable to allocate link from BMC: %d\n", rv);
+ dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
+ rv);
goto out_unlink1;
}
@@ -2986,8 +2983,8 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
if (rv) {
kfree(intf->my_dev_name);
intf->my_dev_name = NULL;
- dev_err(intf->si_dev,
- PFX "Unable to create symlink to bmc: %d\n", rv);
+ dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
+ rv);
goto out_free_my_dev_name;
}
@@ -3071,7 +3068,7 @@ static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
if (msg->msg.data_len < 17) {
bmc->dyn_guid_set = 0;
dev_warn(intf->si_dev,
- PFX "The GUID response from the BMC was too short, it was %d but should have been 17. Assuming GUID is not available.\n",
+ "The GUID response from the BMC was too short, it was %d but should have been 17. Assuming GUID is not available.\n",
msg->msg.data_len);
goto out;
}
@@ -3195,7 +3192,7 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
if (rv) {
/* Got an error somehow, just give up. */
dev_warn(intf->si_dev,
- PFX "Error sending channel information for channel %d: %d\n",
+ "Error sending channel information for channel %d: %d\n",
intf->curr_channel, rv);
intf->channel_list = intf->wchannels + set;
@@ -4075,7 +4072,7 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* message.
*/
dev_warn(intf->si_dev,
- PFX "Event queue full, discarding incoming events\n");
+ "Event queue full, discarding incoming events\n");
intf->event_msg_printed = 1;
}
@@ -4094,7 +4091,7 @@ static int handle_bmc_rsp(struct ipmi_smi *intf,
recv_msg = (struct ipmi_recv_msg *) msg->user_data;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
- "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vender for assistance\n");
+ "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
return 0;
}
@@ -4130,7 +4127,7 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
if (msg->rsp_size < 2) {
/* Message is too small to be correct. */
dev_warn(intf->si_dev,
- PFX "BMC returned to small a message for netfn %x cmd %x, got %d bytes\n",
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
(msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
/* Generate an error response for the message. */
@@ -4145,7 +4142,7 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
* marginally correct.
*/
dev_warn(intf->si_dev,
- PFX "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
+ "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
(msg->data[0] >> 2) | 1, msg->data[1],
msg->rsp[0] >> 2, msg->rsp[1]);
@@ -5035,11 +5032,11 @@ static int ipmi_init_msghandler(void)
rv = driver_register(&ipmidriver.driver);
if (rv) {
- pr_err(PFX "Could not register IPMI driver\n");
+ pr_err("Could not register IPMI driver\n");
return rv;
}
- pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n");
+ pr_info("version " IPMI_DRIVER_VERSION "\n");
timer_setup(&ipmi_timer, ipmi_timeout, 0);
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
@@ -5086,10 +5083,10 @@ static void __exit cleanup_ipmi(void)
/* Check for buffer leaks. */
count = atomic_read(&smi_msg_inuse_count);
if (count != 0)
- pr_warn(PFX "SMI message count %d at exit\n", count);
+ pr_warn("SMI message count %d at exit\n", count);
count = atomic_read(&recv_msg_inuse_count);
if (count != 0)
- pr_warn(PFX "recv message count %d at exit\n", count);
+ pr_warn("recv message count %d at exit\n", count);
}
module_exit(cleanup_ipmi);
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index e96500372ce2..da22a8cbe68e 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -19,7 +19,7 @@
struct ipmi_smi_powernv {
u64 interface_id;
- ipmi_smi_t intf;
+ struct ipmi_smi *intf;
unsigned int irq;
/**
@@ -33,7 +33,7 @@ struct ipmi_smi_powernv {
struct opal_ipmi_msg *opal_msg;
};
-static int ipmi_powernv_start_processing(void *send_info, ipmi_smi_t intf)
+static int ipmi_powernv_start_processing(void *send_info, struct ipmi_smi *intf)
{
struct ipmi_smi_powernv *smi = send_info;
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index f6e19410dc57..bc3a18daf97a 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -11,6 +11,9 @@
*
* Copyright 2002,2004 MontaVista Software Inc.
*/
+
+#define pr_fmt(fmt) "IPMI poweroff: " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/proc_fs.h>
@@ -21,8 +24,6 @@
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
-#define PFX "IPMI poweroff: "
-
static void ipmi_po_smi_gone(int if_num);
static void ipmi_po_new_smi(int if_num, struct device *device);
@@ -192,7 +193,7 @@ static void pps_poweroff_atca(struct ipmi_user *user)
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
- printk(KERN_INFO PFX "PPS powerdown hook used");
+ pr_info("PPS powerdown hook used\n");
send_msg.netfn = IPMI_NETFN_OEM;
send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
@@ -201,10 +202,9 @@ static void pps_poweroff_atca(struct ipmi_user *user)
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
- if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
- printk(KERN_ERR PFX "Unable to send ATCA ,"
- " IPMI error 0x%x\n", rv);
- }
+ if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE)
+ pr_err("Unable to send ATCA, IPMI error 0x%x\n", rv);
+
return;
}
@@ -234,12 +234,10 @@ static int ipmi_atca_detect(struct ipmi_user *user)
(struct ipmi_addr *) &smi_addr,
&send_msg);
- printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n",
- mfg_id, prod_id);
+ pr_info("ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id);
if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
&& (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
- printk(KERN_INFO PFX
- "Installing Pigeon Point Systems Poweroff Hook\n");
+ pr_info("Installing Pigeon Point Systems Poweroff Hook\n");
atca_oem_poweroff_hook = pps_poweroff_atca;
}
return !rv;
@@ -259,7 +257,7 @@ static void ipmi_poweroff_atca(struct ipmi_user *user)
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
- printk(KERN_INFO PFX "Powering down via ATCA power command\n");
+ pr_info("Powering down via ATCA power command\n");
/*
* Power down
@@ -282,8 +280,8 @@ static void ipmi_poweroff_atca(struct ipmi_user *user)
* return code
*/
if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
- printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
- " IPMI error 0x%x\n", rv);
+ pr_err("Unable to send ATCA powerdown message, IPMI error 0x%x\n",
+ rv);
goto out;
}
@@ -334,7 +332,7 @@ static void ipmi_poweroff_cpi1(struct ipmi_user *user)
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
- printk(KERN_INFO PFX "Powering down via CPI1 power command\n");
+ pr_info("Powering down via CPI1 power command\n");
/*
* Get IPMI ipmb address
@@ -482,7 +480,7 @@ static void ipmi_poweroff_chassis(struct ipmi_user *user)
smi_addr.lun = 0;
powercyclefailed:
- printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n",
+ pr_info("Powering %s via IPMI chassis control command\n",
(poweroff_powercycle ? "cycle" : "down"));
/*
@@ -502,14 +500,14 @@ static void ipmi_poweroff_chassis(struct ipmi_user *user)
if (rv) {
if (poweroff_powercycle) {
/* power cycle failed, default to power down */
- printk(KERN_ERR PFX "Unable to send chassis power " \
- "cycle message, IPMI error 0x%x\n", rv);
+ pr_err("Unable to send chassis power cycle message, IPMI error 0x%x\n",
+ rv);
poweroff_powercycle = 0;
goto powercyclefailed;
}
- printk(KERN_ERR PFX "Unable to send chassis power " \
- "down message, IPMI error 0x%x\n", rv);
+ pr_err("Unable to send chassis power down message, IPMI error 0x%x\n",
+ rv);
}
}
@@ -571,8 +569,7 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
&ipmi_user);
if (rv) {
- printk(KERN_ERR PFX "could not create IPMI user, error %d\n",
- rv);
+ pr_err("could not create IPMI user, error %d\n", rv);
return;
}
@@ -594,14 +591,13 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv) {
- printk(KERN_ERR PFX "Unable to send IPMI get device id info,"
- " IPMI error 0x%x\n", rv);
+ pr_err("Unable to send IPMI get device id info, IPMI error 0x%x\n",
+ rv);
goto out_err;
}
if (halt_recv_msg.msg.data_len < 12) {
- printk(KERN_ERR PFX "(chassis) IPMI get device id info too,"
- " short, was %d bytes, needed %d bytes\n",
+ pr_err("(chassis) IPMI get device id info too short, was %d bytes, needed %d bytes\n",
halt_recv_msg.msg.data_len, 12);
goto out_err;
}
@@ -622,14 +618,13 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
}
out_err:
- printk(KERN_ERR PFX "Unable to find a poweroff function that"
- " will work, giving up\n");
+ pr_err("Unable to find a poweroff function that will work, giving up\n");
ipmi_destroy_user(ipmi_user);
return;
found:
- printk(KERN_INFO PFX "Found a %s style poweroff function\n",
- poweroff_functions[i].platform_type);
+ pr_info("Found a %s style poweroff function\n",
+ poweroff_functions[i].platform_type);
specific_poweroff_func = poweroff_functions[i].poweroff_func;
old_poweroff_func = pm_power_off;
pm_power_off = ipmi_poweroff_function;
@@ -692,16 +687,15 @@ static int __init ipmi_poweroff_init(void)
{
int rv;
- printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -"
- " IPMI Powerdown via sys_reboot.\n");
+ pr_info("Copyright (C) 2004 MontaVista Software - IPMI Powerdown via sys_reboot\n");
if (poweroff_powercycle)
- printk(KERN_INFO PFX "Power cycle is enabled.\n");
+ pr_info("Power cycle is enabled\n");
#ifdef CONFIG_PROC_FS
ipmi_table_header = register_sysctl_table(ipmi_root_table);
if (!ipmi_table_header) {
- printk(KERN_ERR PFX "Unable to register powercycle sysctl\n");
+ pr_err("Unable to register powercycle sysctl\n");
rv = -ENOMEM;
goto out_err;
}
@@ -712,7 +706,7 @@ static int __init ipmi_poweroff_init(void)
#ifdef CONFIG_PROC_FS
if (rv) {
unregister_sysctl_table(ipmi_table_header);
- printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
+ pr_err("Unable to register SMI watcher: %d\n", rv);
goto out_err;
}
@@ -735,8 +729,7 @@ static void __exit ipmi_poweroff_cleanup(void)
if (ready) {
rv = ipmi_destroy_user(ipmi_user);
if (rv)
- printk(KERN_ERR PFX "could not cleanup the IPMI"
- " user: 0x%x\n", rv);
+ pr_err("could not cleanup the IPMI user: 0x%x\n", rv);
pm_power_off = old_poweroff_func;
}
}
diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
index 10219f24546b..487642809c58 100644
--- a/drivers/char/ipmi/ipmi_si_hardcode.c
+++ b/drivers/char/ipmi/ipmi_si_hardcode.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0+
+#define pr_fmt(fmt) "ipmi_hardcode: " fmt
+
#include <linux/moduleparam.h>
#include "ipmi_si.h"
-#define PFX "ipmi_hardcode: "
/*
* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
* a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS.
@@ -100,7 +101,7 @@ int ipmi_si_hardcode_find_bmc(void)
continue;
io.addr_source = SI_HARDCODED;
- pr_info(PFX "probing via hardcoded address\n");
+ pr_info("probing via hardcoded address\n");
if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
io.si_type = SI_KCS;
@@ -109,7 +110,7 @@ int ipmi_si_hardcode_find_bmc(void)
} else if (strcmp(si_type[i], "bt") == 0) {
io.si_type = SI_BT;
} else {
- pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
+ pr_warn("Interface type specified for interface %d, was invalid: %s\n",
i, si_type[i]);
continue;
}
@@ -123,7 +124,7 @@ int ipmi_si_hardcode_find_bmc(void)
io.addr_data = addrs[i];
io.addr_type = IPMI_MEM_ADDR_SPACE;
} else {
- pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
+ pr_warn("Interface type specified for interface %d, but port and address were not set or set to zero\n",
i);
continue;
}
diff --git a/drivers/char/ipmi/ipmi_si_hotmod.c b/drivers/char/ipmi/ipmi_si_hotmod.c
index a98ca42a50b1..c0067fd0480d 100644
--- a/drivers/char/ipmi/ipmi_si_hotmod.c
+++ b/drivers/char/ipmi/ipmi_si_hotmod.c
@@ -5,12 +5,13 @@
* Handling for dynamically adding/removing IPMI devices through
* a module parameter (and thus sysfs).
*/
+
+#define pr_fmt(fmt) "ipmi_hotmod: " fmt
+
#include <linux/moduleparam.h>
#include <linux/ipmi.h>
#include "ipmi_si.h"
-#define PFX "ipmi_hotmod: "
-
static int hotmod_handler(const char *val, const struct kernel_param *kp);
module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
@@ -61,7 +62,7 @@ static int parse_str(const struct hotmod_vals *v, int *val, char *name,
s = strchr(*curr, ',');
if (!s) {
- pr_warn(PFX "No hotmod %s given.\n", name);
+ pr_warn("No hotmod %s given\n", name);
return -EINVAL;
}
*s = '\0';
@@ -74,7 +75,7 @@ static int parse_str(const struct hotmod_vals *v, int *val, char *name,
}
}
- pr_warn(PFX "Invalid hotmod %s '%s'\n", name, *curr);
+ pr_warn("Invalid hotmod %s '%s'\n", name, *curr);
return -EINVAL;
}
@@ -85,12 +86,12 @@ static int check_hotmod_int_op(const char *curr, const char *option,
if (strcmp(curr, name) == 0) {
if (!option) {
- pr_warn(PFX "No option given for '%s'\n", curr);
+ pr_warn("No option given for '%s'\n", curr);
return -EINVAL;
}
*val = simple_strtoul(option, &n, 0);
if ((*n != '\0') || (*option == '\0')) {
- pr_warn(PFX "Bad option given for '%s'\n", curr);
+ pr_warn("Bad option given for '%s'\n", curr);
return -EINVAL;
}
return 1;
@@ -160,7 +161,7 @@ static int hotmod_handler(const char *val, const struct kernel_param *kp)
}
addr = simple_strtoul(curr, &n, 0);
if ((*n != '\0') || (*curr == '\0')) {
- pr_warn(PFX "Invalid hotmod address '%s'\n", curr);
+ pr_warn("Invalid hotmod address '%s'\n", curr);
break;
}
@@ -203,7 +204,7 @@ static int hotmod_handler(const char *val, const struct kernel_param *kp)
continue;
rv = -EINVAL;
- pr_warn(PFX "Invalid hotmod option '%s'\n", curr);
+ pr_warn("Invalid hotmod option '%s'\n", curr);
goto out;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 5faa917df1b6..677618e6f1f7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -19,6 +19,8 @@
* and drives the real SMI state machine.
*/
+#define pr_fmt(fmt) "ipmi_si: " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
@@ -41,8 +43,6 @@
#include <linux/string.h>
#include <linux/ctype.h>
-#define PFX "ipmi_si: "
-
/* Measure times between events in the driver. */
#undef DEBUG_TIMING
@@ -269,7 +269,7 @@ void debug_timestamp(char *msg)
{
struct timespec64 t;
- getnstimeofday64(&t);
+ ktime_get_ts64(&t);
pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
}
#else
@@ -961,12 +961,12 @@ static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
ipmi_si_set_not_busy(busy_until);
else if (!ipmi_si_is_busy(busy_until)) {
- getnstimeofday64(busy_until);
+ ktime_get_ts64(busy_until);
timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
} else {
struct timespec64 now;
- getnstimeofday64(&now);
+ ktime_get_ts64(&now);
if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
ipmi_si_set_not_busy(busy_until);
return 0;
@@ -1530,7 +1530,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- pr_warn(PFX "Error getting response from get global enables command, the event buffer is not enabled.\n");
+ pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n");
goto out;
}
@@ -1541,7 +1541,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
- pr_warn(PFX "Invalid return from get global enables command, cannot enable the event buffer.\n");
+ pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n");
rv = -EINVAL;
goto out;
}
@@ -1559,7 +1559,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- pr_warn(PFX "Error getting response from set global, enables command, the event buffer is not enabled.\n");
+ pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n");
goto out;
}
@@ -1569,7 +1569,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
- pr_warn(PFX "Invalid return from get global, enables command, not enable the event buffer.\n");
+ pr_warn("Invalid return from get global, enables command, not enable the event buffer\n");
rv = -EINVAL;
goto out;
}
@@ -1900,7 +1900,7 @@ int ipmi_si_add_smi(struct si_sm_io *io)
}
}
- pr_info(PFX "Adding %s-specified %s state machine\n",
+ pr_info("Adding %s-specified %s state machine\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
si_to_str[new_smi->io.si_type]);
@@ -1924,7 +1924,7 @@ static int try_smi_init(struct smi_info *new_smi)
int i;
char *init_name = NULL;
- pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
+ pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
si_to_str[new_smi->io.si_type],
addr_space_to_str[new_smi->io.addr_type],
@@ -1964,7 +1964,7 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->pdev = platform_device_alloc("ipmi_si",
new_smi->si_num);
if (!new_smi->pdev) {
- pr_err(PFX "Unable to allocate platform device\n");
+ pr_err("Unable to allocate platform device\n");
rv = -ENOMEM;
goto out_err;
}
@@ -2097,7 +2097,7 @@ static int init_ipmi_si(void)
if (initialized)
return 0;
- pr_info("IPMI System Interface driver.\n");
+ pr_info("IPMI System Interface driver\n");
/* If the user gave us a device, they presumably want us to use it */
if (!ipmi_si_hardcode_find_bmc())
@@ -2151,7 +2151,7 @@ skip_fallback_noirq:
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
cleanup_ipmi_si();
- pr_warn(PFX "Unable to find any System Interface(s)\n");
+ pr_warn("Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c
index 1b869d530884..fd0ec8d6bf0e 100644
--- a/drivers/char/ipmi/ipmi_si_mem_io.c
+++ b/drivers/char/ipmi/ipmi_si_mem_io.c
@@ -51,7 +51,7 @@ static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset)
static void mem_outq(const struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
- writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
+ writeq((u64)b << io->regshift, (io->addr)+(offset * io->regspacing));
}
#endif
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index f54ca6869ed2..ce00c0da5866 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -4,12 +4,13 @@
*
* Handling for IPMI devices on the PCI bus.
*/
+
+#define pr_fmt(fmt) "ipmi_pci: " fmt
+
#include <linux/module.h>
#include <linux/pci.h>
#include "ipmi_si.h"
-#define PFX "ipmi_pci: "
-
static bool pci_registered;
static bool si_trypci = true;
@@ -18,11 +19,6 @@ module_param_named(trypci, si_trypci, bool, 0);
MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
" default scan of the interfaces identified via pci");
-#define PCI_CLASS_SERIAL_IPMI 0x0c07
-#define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700
-#define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701
-#define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702
-
#define PCI_DEVICE_ID_HP_MMC 0x121A
static void ipmi_pci_cleanup(struct si_sm_io *io)
@@ -45,8 +41,7 @@ static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
io->regspacing = regspacing;
if (io->io_setup(io)) {
- dev_err(io->dev,
- "Could not setup I/O space\n");
+ dev_err(io->dev, "Could not setup I/O space\n");
return DEFAULT_REGSPACING;
}
/* write invalid cmd */
@@ -120,6 +115,8 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
}
io.addr_data = pci_resource_start(pdev, 0);
+ io.dev = &pdev->dev;
+
io.regspacing = ipmi_pci_probe_regspacing(&io);
io.regsize = DEFAULT_REGSIZE;
io.regshift = 0;
@@ -128,10 +125,8 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
if (io.irq)
io.irq_setup = ipmi_std_irq_setup;
- io.dev = &pdev->dev;
-
dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
- &pdev->resource[0], io.regsize, io.regspacing, io.irq);
+ &pdev->resource[0], io.regsize, io.regspacing, io.irq);
rv = ipmi_si_add_smi(&io);
if (rv)
@@ -166,7 +161,7 @@ void ipmi_si_pci_init(void)
if (si_trypci) {
int rv = pci_register_driver(&ipmi_pci_driver);
if (rv)
- pr_err(PFX "Unable to register PCI driver: %d\n", rv);
+ pr_err("Unable to register PCI driver: %d\n", rv);
else
pci_registered = true;
}
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index bf69927502bd..15cf819f884f 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -5,6 +5,10 @@
* Handling for platform devices in IPMI (ACPI, OF, and things
* coming from the platform.
*/
+
+#define pr_fmt(fmt) "ipmi_platform: " fmt
+#define dev_fmt pr_fmt
+
#include <linux/types.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -15,8 +19,6 @@
#include "ipmi_si.h"
#include "ipmi_dmi.h"
-#define PFX "ipmi_platform: "
-
static bool si_tryplatform = true;
#ifdef CONFIG_ACPI
static bool si_tryacpi = true;
@@ -158,7 +160,7 @@ static int platform_ipmi_probe(struct platform_device *pdev)
memset(&io, 0, sizeof(io));
io.addr_source = addr_source;
- dev_info(&pdev->dev, PFX "probing via %s\n",
+ dev_info(&pdev->dev, "probing via %s\n",
ipmi_addr_src_to_str(addr_source));
switch (type) {
@@ -236,25 +238,25 @@ static int of_ipmi_probe(struct platform_device *pdev)
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
- dev_warn(&pdev->dev, PFX "invalid address from OF\n");
+ dev_warn(&pdev->dev, "invalid address from OF\n");
return ret;
}
regsize = of_get_property(np, "reg-size", &proplen);
if (regsize && proplen != 4) {
- dev_warn(&pdev->dev, PFX "invalid regsize from OF\n");
+ dev_warn(&pdev->dev, "invalid regsize from OF\n");
return -EINVAL;
}
regspacing = of_get_property(np, "reg-spacing", &proplen);
if (regspacing && proplen != 4) {
- dev_warn(&pdev->dev, PFX "invalid regspacing from OF\n");
+ dev_warn(&pdev->dev, "invalid regspacing from OF\n");
return -EINVAL;
}
regshift = of_get_property(np, "reg-shift", &proplen);
if (regshift && proplen != 4) {
- dev_warn(&pdev->dev, PFX "invalid regshift from OF\n");
+ dev_warn(&pdev->dev, "invalid regshift from OF\n");
return -EINVAL;
}
@@ -326,7 +328,7 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
memset(&io, 0, sizeof(io));
io.addr_source = SI_ACPI;
- dev_info(&pdev->dev, PFX "probing via ACPI\n");
+ dev_info(&pdev->dev, "probing via ACPI\n");
io.addr_info.acpi_info.acpi_handle = handle;
@@ -417,6 +419,11 @@ static int ipmi_remove(struct platform_device *pdev)
return ipmi_si_remove_by_dev(&pdev->dev);
}
+static const struct platform_device_id si_plat_ids[] = {
+ { "dmi-ipmi-si", 0 },
+ { }
+};
+
struct platform_driver ipmi_platform_driver = {
.driver = {
.name = DEVICE_NAME,
@@ -425,13 +432,14 @@ struct platform_driver ipmi_platform_driver = {
},
.probe = ipmi_probe,
.remove = ipmi_remove,
+ .id_table = si_plat_ids
};
void ipmi_si_platform_init(void)
{
int rv = platform_driver_register(&ipmi_platform_driver);
if (rv)
- pr_err(PFX "Unable to register driver: %d\n", rv);
+ pr_err("Unable to register driver: %d\n", rv);
}
void ipmi_si_platform_shutdown(void)
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index 466a5aac5298..b6225bba2532 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -132,8 +132,8 @@ static int start_smic_transaction(struct si_sm_data *smic,
if (smic_debug & SMIC_DEBUG_MSG) {
printk(KERN_DEBUG "start_smic_transaction -");
for (i = 0; i < size; i++)
- printk(" %02x", (unsigned char) data[i]);
- printk("\n");
+ pr_cont(" %02x", data[i]);
+ pr_cont("\n");
}
smic->error_retries = 0;
memcpy(smic->write_data, data, size);
@@ -154,8 +154,8 @@ static int smic_get_result(struct si_sm_data *smic,
if (smic_debug & SMIC_DEBUG_MSG) {
printk(KERN_DEBUG "smic_get result -");
for (i = 0; i < smic->read_pos; i++)
- printk(" %02x", smic->read_data[i]);
- printk("\n");
+ pr_cont(" %02x", smic->read_data[i]);
+ pr_cont("\n");
}
if (length < smic->read_pos) {
smic->read_pos = length;
@@ -212,8 +212,7 @@ static inline void start_error_recovery(struct si_sm_data *smic, char *reason)
(smic->error_retries)++;
if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
if (smic_debug & SMIC_DEBUG_ENABLE)
- printk(KERN_WARNING
- "ipmi_smic_drv: smic hosed: %s\n", reason);
+ pr_warn("ipmi_smic_drv: smic hosed: %s\n", reason);
smic->state = SMIC_HOSED;
} else {
smic->write_count = smic->orig_write_count;
@@ -326,8 +325,7 @@ static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
if (smic->state != SMIC_IDLE) {
if (smic_debug & SMIC_DEBUG_STATES)
printk(KERN_DEBUG
- "smic_event - smic->smic_timeout = %ld,"
- " time = %ld\n",
+ "smic_event - smic->smic_timeout = %ld, time = %ld\n",
smic->smic_timeout, time);
/*
* FIXME: smic_event is sometimes called with time >
@@ -347,9 +345,7 @@ static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
status = read_smic_status(smic);
if (smic_debug & SMIC_DEBUG_STATES)
- printk(KERN_DEBUG
- "smic_event - state = %d, flags = 0x%02x,"
- " status = 0x%02x\n",
+ printk(KERN_DEBUG "smic_event - state = %d, flags = 0x%02x, status = 0x%02x\n",
smic->state, flags, status);
switch (smic->state) {
@@ -440,8 +436,8 @@ static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
data = read_smic_data(smic);
if (data != 0) {
if (smic_debug & SMIC_DEBUG_ENABLE)
- printk(KERN_DEBUG
- "SMIC_WRITE_END: data = %02x\n", data);
+ printk(KERN_DEBUG "SMIC_WRITE_END: data = %02x\n",
+ data);
start_error_recovery(smic,
"state = SMIC_WRITE_END, "
"data != SUCCESS");
@@ -520,8 +516,8 @@ static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
/* data register holds an error code */
if (data != 0) {
if (smic_debug & SMIC_DEBUG_ENABLE)
- printk(KERN_DEBUG
- "SMIC_READ_END: data = %02x\n", data);
+ printk(KERN_DEBUG "SMIC_READ_END: data = %02x\n",
+ data);
start_error_recovery(smic,
"state = SMIC_READ_END, "
"data != SUCCESS");
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 29e67a80fb20..ca9528c4f183 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -27,6 +27,8 @@
* interface into the I2C driver, I believe.
*/
+#define pr_fmt(fmt) "ipmi_ssif: " fmt
+
#if defined(MODVERSIONS)
#include <linux/modversions.h>
#endif
@@ -52,7 +54,6 @@
#include "ipmi_si_sm.h"
#include "ipmi_dmi.h"
-#define PFX "ipmi_ssif: "
#define DEVICE_NAME "ipmi_ssif"
#define IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD 0x57
@@ -60,6 +61,7 @@
#define SSIF_IPMI_REQUEST 2
#define SSIF_IPMI_MULTI_PART_REQUEST_START 6
#define SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE 7
+#define SSIF_IPMI_MULTI_PART_REQUEST_END 8
#define SSIF_IPMI_RESPONSE 3
#define SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE 9
@@ -271,6 +273,7 @@ struct ssif_info {
/* Info from SSIF cmd */
unsigned char max_xmit_msg_size;
unsigned char max_recv_msg_size;
+ bool cmd8_works; /* See test_multipart_messages() for details. */
unsigned int multi_support;
int supports_pec;
@@ -316,9 +319,8 @@ static void deliver_recv_msg(struct ssif_info *ssif_info,
{
if (msg->rsp_size < 0) {
return_hosed_msg(ssif_info, msg);
- pr_err(PFX
- "Malformed message in deliver_recv_msg: rsp_size = %d\n",
- msg->rsp_size);
+ pr_err("%s: Malformed message: rsp_size = %d\n",
+ __func__, msg->rsp_size);
} else {
ipmi_smi_msg_received(ssif_info->intf, msg);
}
@@ -606,8 +608,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
ssif_info->waiting_alert = true;
ssif_info->rtc_us_timer = SSIF_MSG_USEC;
- mod_timer(&ssif_info->retry_timer,
- jiffies + SSIF_MSG_JIFFIES);
+ if (!ssif_info->stopping)
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_JIFFIES);
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -652,7 +655,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
if (len == 0) {
result = -EIO;
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
- pr_info(PFX "Middle message with no data\n");
+ pr_info("Middle message with no data\n");
goto continue_op;
}
@@ -696,8 +699,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
I2C_SMBUS_BLOCK_DATA);
if (rv < 0) {
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
- pr_info(PFX
- "Error from ssif_i2c_send\n");
+ pr_info("Error from ssif_i2c_send\n");
result = -EIO;
} else
@@ -715,7 +717,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
continue_op:
if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
- pr_info(PFX "DONE 1: state = %d, result=%d.\n",
+ pr_info("DONE 1: state = %d, result=%d\n",
ssif_info->ssif_state, result);
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
@@ -749,8 +751,8 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
*/
ssif_info->ssif_state = SSIF_NORMAL;
ipmi_ssif_unlock_cond(ssif_info, flags);
- pr_warn(PFX "Error getting flags: %d %d, %x\n",
- result, len, (len >= 3) ? data[2] : 0);
+ pr_warn("Error getting flags: %d %d, %x\n",
+ result, len, (len >= 3) ? data[2] : 0);
} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|| data[1] != IPMI_GET_MSG_FLAGS_CMD) {
/*
@@ -758,7 +760,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
* response to a previous command.
*/
ipmi_ssif_unlock_cond(ssif_info, flags);
- pr_warn(PFX "Invalid response getting flags: %x %x\n",
+ pr_warn("Invalid response getting flags: %x %x\n",
data[0], data[1]);
} else {
ssif_inc_stat(ssif_info, flag_fetches);
@@ -771,11 +773,11 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
/* We cleared the flags. */
if ((result < 0) || (len < 3) || (data[2] != 0)) {
/* Error clearing flags */
- pr_warn(PFX "Error clearing flags: %d %d, %x\n",
- result, len, (len >= 3) ? data[2] : 0);
+ pr_warn("Error clearing flags: %d %d, %x\n",
+ result, len, (len >= 3) ? data[2] : 0);
} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|| data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
- pr_warn(PFX "Invalid response clearing flags: %x %x\n",
+ pr_warn("Invalid response clearing flags: %x %x\n",
data[0], data[1]);
}
ssif_info->ssif_state = SSIF_NORMAL;
@@ -792,7 +794,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
handle_flags(ssif_info, flags);
} else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|| msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) {
- pr_warn(PFX "Invalid response getting events: %x %x\n",
+ pr_warn("Invalid response getting events: %x %x\n",
msg->rsp[0], msg->rsp[1]);
msg->done(msg);
/* Take off the event flag. */
@@ -815,7 +817,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
handle_flags(ssif_info, flags);
} else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|| msg->rsp[1] != IPMI_GET_MSG_CMD) {
- pr_warn(PFX "Invalid response clearing flags: %x %x\n",
+ pr_warn("Invalid response clearing flags: %x %x\n",
msg->rsp[0], msg->rsp[1]);
msg->done(msg);
@@ -842,7 +844,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
ipmi_ssif_unlock_cond(ssif_info, flags);
if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
- pr_info(PFX "DONE 2: state = %d.\n", ssif_info->ssif_state);
+ pr_info("DONE 2: state = %d.\n", ssif_info->ssif_state);
}
static void msg_written_handler(struct ssif_info *ssif_info, int result,
@@ -862,8 +864,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
ssif_inc_stat(ssif_info, send_errors);
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
- pr_info(PFX
- "Out of retries in msg_written_handler\n");
+ pr_info("%s: Out of retries\n", __func__);
msg_done_handler(ssif_info, -EIO, NULL, 0);
return;
}
@@ -887,32 +888,33 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
* in the SSIF_MULTI_n_PART case in the probe function
* for details on the intricacies of this.
*/
- int left;
+ int left, to_write;
unsigned char *data_to_send;
+ unsigned char cmd;
ssif_inc_stat(ssif_info, sent_messages_parts);
left = ssif_info->multi_len - ssif_info->multi_pos;
- if (left > 32)
- left = 32;
+ to_write = left;
+ if (to_write > 32)
+ to_write = 32;
/* Length byte. */
- ssif_info->multi_data[ssif_info->multi_pos] = left;
+ ssif_info->multi_data[ssif_info->multi_pos] = to_write;
data_to_send = ssif_info->multi_data + ssif_info->multi_pos;
- ssif_info->multi_pos += left;
- if (left < 32)
- /*
- * Write is finished. Note that we must end
- * with a write of less than 32 bytes to
- * complete the transaction, even if it is
- * zero bytes.
- */
+ ssif_info->multi_pos += to_write;
+ cmd = SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE;
+ if (ssif_info->cmd8_works) {
+ if (left == to_write) {
+ cmd = SSIF_IPMI_MULTI_PART_REQUEST_END;
+ ssif_info->multi_data = NULL;
+ }
+ } else if (to_write < 32) {
ssif_info->multi_data = NULL;
+ }
rv = ssif_i2c_send(ssif_info, msg_written_handler,
- I2C_SMBUS_WRITE,
- SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
- data_to_send,
- I2C_SMBUS_BLOCK_DATA);
+ I2C_SMBUS_WRITE, cmd,
+ data_to_send, I2C_SMBUS_BLOCK_DATA);
if (rv < 0) {
/* request failed, just return the error. */
ssif_inc_stat(ssif_info, send_errors);
@@ -939,8 +941,9 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
ssif_info->waiting_alert = true;
ssif_info->retries_left = SSIF_RECV_RETRIES;
ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
- mod_timer(&ssif_info->retry_timer,
- jiffies + SSIF_MSG_PART_JIFFIES);
+ if (!ssif_info->stopping)
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_PART_JIFFIES);
ipmi_ssif_unlock_cond(ssif_info, flags);
}
}
@@ -1043,8 +1046,8 @@ static void sender(void *send_info,
ktime_get_real_ts64(&t);
pr_info("**Enqueue %02x %02x: %lld.%6.6ld\n",
- msg->data[0], msg->data[1],
- (long long) t.tv_sec, (long) t.tv_nsec / NSEC_PER_USEC);
+ msg->data[0], msg->data[1],
+ (long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC);
}
}
@@ -1244,6 +1247,24 @@ static int ssif_remove(struct i2c_client *client)
return 0;
}
+static int read_response(struct i2c_client *client, unsigned char *resp)
+{
+ int ret = -ENODEV, retry_cnt = SSIF_RECV_RETRIES;
+
+ while (retry_cnt > 0) {
+ ret = i2c_smbus_read_block_data(client, SSIF_IPMI_RESPONSE,
+ resp);
+ if (ret > 0)
+ break;
+ msleep(SSIF_MSG_MSEC);
+ retry_cnt--;
+ if (retry_cnt <= 0)
+ break;
+ }
+
+ return ret;
+}
+
static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
int *resp_len, unsigned char *resp)
{
@@ -1260,26 +1281,16 @@ static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
return -ENODEV;
}
- ret = -ENODEV;
- retry_cnt = SSIF_RECV_RETRIES;
- while (retry_cnt > 0) {
- ret = i2c_smbus_read_block_data(client, SSIF_IPMI_RESPONSE,
- resp);
- if (ret > 0)
- break;
- msleep(SSIF_MSG_MSEC);
- retry_cnt--;
- if (retry_cnt <= 0)
- break;
- }
-
+ ret = read_response(client, resp);
if (ret > 0) {
/* Validate that the response is correct. */
if (ret < 3 ||
(resp[0] != (msg[0] | (1 << 2))) ||
(resp[1] != msg[1]))
ret = -EINVAL;
- else {
+ else if (ret > IPMI_MAX_MSG_LENGTH) {
+ ret = -E2BIG;
+ } else {
*resp_len = ret;
ret = 0;
}
@@ -1391,6 +1402,121 @@ static int find_slave_address(struct i2c_client *client, int slave_addr)
return slave_addr;
}
+static int start_multipart_test(struct i2c_client *client,
+ unsigned char *msg, bool do_middle)
+{
+ int retry_cnt = SSIF_SEND_RETRIES, ret;
+
+retry_write:
+ ret = i2c_smbus_write_block_data(client,
+ SSIF_IPMI_MULTI_PART_REQUEST_START,
+ 32, msg);
+ if (ret) {
+ retry_cnt--;
+ if (retry_cnt > 0)
+ goto retry_write;
+ dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n");
+ return ret;
+ }
+
+ if (!do_middle)
+ return 0;
+
+ ret = i2c_smbus_write_block_data(client,
+ SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
+ 32, msg + 32);
+ if (ret) {
+ dev_err(&client->dev, "Could not write multi-part middle, though the BMC said it could handle it. Just limit sends to one part.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void test_multipart_messages(struct i2c_client *client,
+ struct ssif_info *ssif_info,
+ unsigned char *resp)
+{
+ unsigned char msg[65];
+ int ret;
+ bool do_middle;
+
+ if (ssif_info->max_xmit_msg_size <= 32)
+ return;
+
+ do_middle = ssif_info->max_xmit_msg_size > 63;
+
+ memset(msg, 0, sizeof(msg));
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+
+ /*
+ * The specification is all messed up dealing with sending
+ * multi-part messages. Per what the specification says, it
+ * is impossible to send a message that is a multiple of 32
+ * bytes, except for 32 itself. It talks about a "start"
+ * transaction (cmd=6) that must be 32 bytes, "middle"
+ * transaction (cmd=7) that must be 32 bytes, and an "end"
+ * transaction. The "end" transaction is shown as cmd=7 in
+ * the text, but if that's the case there is no way to
+ * differentiate between a middle and end part except the
+ * length being less than 32. But there is a table at the far
+ * end of the section (that I had never noticed until someone
+ * pointed it out to me) that mentions it as cmd=8.
+ *
+ * After some thought, I think the example is wrong and the
+ * end transaction should be cmd=8. But some systems don't
+ * implement cmd=8, they use a zero-length end transaction,
+ * even though that violates the SMBus specification.
+ *
+ * So, to work around this, this code tests if cmd=8 works.
+ * If it does, then we use that. If not, it tests zero-
+ * byte end transactions. If that works, good. If not,
+ * we only allow 63-byte transactions max.
+ */
+
+ ret = start_multipart_test(client, msg, do_middle);
+ if (ret)
+ goto out_no_multi_part;
+
+ ret = i2c_smbus_write_block_data(client,
+ SSIF_IPMI_MULTI_PART_REQUEST_END,
+ 1, msg + 64);
+
+ if (!ret)
+ ret = read_response(client, resp);
+
+ if (ret > 0) {
+ /* End transactions work, we are good. */
+ ssif_info->cmd8_works = true;
+ return;
+ }
+
+ ret = start_multipart_test(client, msg, do_middle);
+ if (ret) {
+ dev_err(&client->dev, "Second multipart test failed.\n");
+ goto out_no_multi_part;
+ }
+
+ ret = i2c_smbus_write_block_data(client,
+ SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
+ 0, msg + 64);
+ if (!ret)
+ ret = read_response(client, resp);
+ if (ret > 0)
+ /* Zero-size end parts work, use those. */
+ return;
+
+ /* Limit to 63 bytes and use a short middle command to mark the end. */
+ if (ssif_info->max_xmit_msg_size > 63)
+ ssif_info->max_xmit_msg_size = 63;
+ return;
+
+out_no_multi_part:
+ ssif_info->max_xmit_msg_size = 32;
+ return;
+}
+
/*
* Global enables we care about.
*/
@@ -1435,9 +1561,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
slave_addr = find_slave_address(client, slave_addr);
- pr_info(PFX "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
- ipmi_addr_src_to_str(ssif_info->addr_source),
- client->addr, client->adapter->name, slave_addr);
+ pr_info("Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
+ ipmi_addr_src_to_str(ssif_info->addr_source),
+ client->addr, client->adapter->name, slave_addr);
ssif_info->client = client;
i2c_set_clientdata(client, ssif_info);
@@ -1450,7 +1576,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (!rv && (len >= 3) && (resp[2] == 0)) {
if (len < 7) {
if (ssif_dbg_probe)
- pr_info(PFX "SSIF info too short: %d\n", len);
+ pr_info("SSIF info too short: %d\n", len);
goto no_support;
}
@@ -1477,26 +1603,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
break;
case SSIF_MULTI_n_PART:
- /*
- * The specification is rather confusing at
- * this point, but I think I understand what
- * is meant. At least I have a workable
- * solution. With multi-part messages, you
- * cannot send a message that is a multiple of
- * 32-bytes in length, because the start and
- * middle messages are 32-bytes and the end
- * message must be at least one byte. You
- * can't fudge on an extra byte, that would
- * screw up things like fru data writes. So
- * we limit the length to 63 bytes. That way
- * a 32-byte message gets sent as a single
- * part. A larger message will be a 32-byte
- * start and the next message is always going
- * to be 1-31 bytes in length. Not ideal, but
- * it should work.
- */
- if (ssif_info->max_xmit_msg_size > 63)
- ssif_info->max_xmit_msg_size = 63;
+ /* We take whatever size given, but do some testing. */
break;
default:
@@ -1506,8 +1613,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
} else {
no_support:
/* Assume no multi-part or PEC support */
- pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
- rv, len, resp[2]);
+ pr_info("Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
+ rv, len, resp[2]);
ssif_info->max_xmit_msg_size = 32;
ssif_info->max_recv_msg_size = 32;
@@ -1515,13 +1622,15 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
ssif_info->supports_pec = 0;
}
+ test_multipart_messages(client, ssif_info, resp);
+
/* Make sure the NMI timeout is cleared. */
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
msg[2] = WDT_PRE_TIMEOUT_INT;
rv = do_cmd(client, 3, msg, &len, resp);
if (rv || (len < 3) || (resp[2] != 0))
- pr_warn(PFX "Unable to clear message flags: %d %d %2.2x\n",
+ pr_warn("Unable to clear message flags: %d %d %2.2x\n",
rv, len, resp[2]);
/* Attempt to enable the event buffer. */
@@ -1529,7 +1638,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
rv = do_cmd(client, 2, msg, &len, resp);
if (rv || (len < 4) || (resp[2] != 0)) {
- pr_warn(PFX "Error getting global enables: %d %d %2.2x\n",
+ pr_warn("Error getting global enables: %d %d %2.2x\n",
rv, len, resp[2]);
rv = 0; /* Not fatal */
goto found;
@@ -1548,7 +1657,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF;
rv = do_cmd(client, 3, msg, &len, resp);
if (rv || (len < 2)) {
- pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
+ pr_warn("Error setting global enables: %d %d %2.2x\n",
rv, len, resp[2]);
rv = 0; /* Not fatal */
goto found;
@@ -1569,7 +1678,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR;
rv = do_cmd(client, 3, msg, &len, resp);
if (rv || (len < 2)) {
- pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
+ pr_warn("Error setting global enables: %d %d %2.2x\n",
rv, len, resp[2]);
rv = 0; /* Not fatal */
goto found;
@@ -1637,7 +1746,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
&ssif_info->client->dev,
slave_addr);
if (rv) {
- pr_err(PFX "Unable to register device: error %d\n", rv);
+ pr_err("Unable to register device: error %d\n", rv);
goto out_remove_attr;
}
@@ -1741,7 +1850,7 @@ static void free_ssif_clients(void)
static unsigned short *ssif_address_list(void)
{
struct ssif_addr_info *info;
- unsigned int count = 0, i;
+ unsigned int count = 0, i = 0;
unsigned short *address_list;
list_for_each_entry(info, &ssif_infos, link)
@@ -1752,18 +1861,17 @@ static unsigned short *ssif_address_list(void)
if (!address_list)
return NULL;
- i = 0;
list_for_each_entry(info, &ssif_infos, link) {
unsigned short addr = info->binfo.addr;
int j;
for (j = 0; j < i; j++) {
if (address_list[j] == addr)
- goto skip_addr;
+ /* Found a dup. */
+ break;
}
- address_list[i] = addr;
-skip_addr:
- i++;
+ if (j == i) /* Didn't find it in the list. */
+ address_list[i++] = addr;
}
address_list[i] = I2C_CLIENT_END;
@@ -1790,7 +1898,7 @@ static int dmi_ipmi_probe(struct platform_device *pdev)
rv = device_property_read_u16(&pdev->dev, "i2c-addr", &i2c_addr);
if (rv) {
- dev_warn(&pdev->dev, PFX "No i2c-addr property\n");
+ dev_warn(&pdev->dev, "No i2c-addr property\n");
return -ENODEV;
}
@@ -1847,12 +1955,18 @@ static int ssif_platform_remove(struct platform_device *dev)
return 0;
}
+static const struct platform_device_id ssif_plat_ids[] = {
+ { "dmi-ipmi-ssif", 0 },
+ { }
+};
+
static struct platform_driver ipmi_driver = {
.driver = {
.name = DEVICE_NAME,
},
.probe = ssif_platform_probe,
.remove = ssif_platform_remove,
+ .id_table = ssif_plat_ids
};
static int init_ipmi_ssif(void)
@@ -1871,8 +1985,7 @@ static int init_ipmi_ssif(void)
dbg[i], slave_addrs[i],
SI_HARDCODED, NULL);
if (rv)
- pr_err(PFX
- "Couldn't add hardcoded device at addr 0x%x\n",
+ pr_err("Couldn't add hardcoded device at addr 0x%x\n",
addr[i]);
}
@@ -1883,7 +1996,7 @@ static int init_ipmi_ssif(void)
if (ssif_trydmi) {
rv = platform_driver_register(&ipmi_driver);
if (rv)
- pr_err(PFX "Unable to register driver: %d\n", rv);
+ pr_err("Unable to register driver: %d\n", rv);
}
ssif_i2c_driver.address_list = ssif_address_list();
@@ -1905,6 +2018,8 @@ static void cleanup_ipmi_ssif(void)
i2c_del_driver(&ssif_i2c_driver);
+ kfree(ssif_i2c_driver.address_list);
+
platform_driver_unregister(&ipmi_driver);
free_ssif_clients();
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index ca1c5c5109f0..2924a4bc4a32 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -11,6 +11,8 @@
* Copyright 2002 MontaVista Software Inc.
*/
+#define pr_fmt(fmt) "IPMI Watchdog: " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ipmi.h>
@@ -50,8 +52,6 @@
#define HAVE_DIE_NMI
#endif
-#define PFX "IPMI Watchdog: "
-
/*
* The IPMI command/response information for the watchdog timer.
*/
@@ -407,7 +407,7 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
recv_msg,
1);
if (rv)
- pr_warn(PFX "set timeout error: %d\n", rv);
+ pr_warn("set timeout error: %d\n", rv);
else if (send_heartbeat_now)
*send_heartbeat_now = hbnow;
@@ -530,7 +530,7 @@ static void panic_halt_ipmi_set_timeout(void)
&send_heartbeat_now);
if (rv) {
atomic_sub(1, &panic_done_count);
- pr_warn(PFX "Unable to extend the watchdog timeout.");
+ pr_warn("Unable to extend the watchdog timeout\n");
} else {
if (send_heartbeat_now)
panic_halt_ipmi_heartbeat();
@@ -573,7 +573,7 @@ restart:
&recv_msg,
1);
if (rv) {
- pr_warn(PFX "heartbeat send failure: %d\n", rv);
+ pr_warn("heartbeat send failure: %d\n", rv);
return rv;
}
@@ -583,7 +583,7 @@ restart:
if (recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) {
timeout_retries++;
if (timeout_retries > 3) {
- pr_err(PFX ": Unable to restore the IPMI watchdog's settings, giving up.\n");
+ pr_err("Unable to restore the IPMI watchdog's settings, giving up\n");
rv = -EIO;
goto out;
}
@@ -598,7 +598,7 @@ restart:
*/
rv = _ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
if (rv) {
- pr_err(PFX ": Unable to send the command to set the watchdog's settings, giving up.\n");
+ pr_err("Unable to send the command to set the watchdog's settings, giving up\n");
goto out;
}
@@ -876,8 +876,7 @@ static int ipmi_close(struct inode *ino, struct file *filep)
_ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
mutex_unlock(&ipmi_watchdog_mutex);
} else {
- pr_crit(PFX
- "Unexpected close, not stopping watchdog!\n");
+ pr_crit("Unexpected close, not stopping watchdog!\n");
ipmi_heartbeat();
}
clear_bit(0, &ipmi_wdog_open);
@@ -911,9 +910,9 @@ static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
{
if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
- pr_info(PFX "response: The IPMI controller appears to have been reset, will attempt to reinitialize the watchdog timer\n");
+ pr_info("response: The IPMI controller appears to have been reset, will attempt to reinitialize the watchdog timer\n");
else if (msg->msg.data[0] != 0)
- pr_err(PFX "response: Error %x on cmd %x\n",
+ pr_err("response: Error %x on cmd %x\n",
msg->msg.data[0],
msg->msg.cmd);
@@ -985,7 +984,7 @@ static void ipmi_register_watchdog(int ipmi_intf)
rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
if (rv < 0) {
- pr_crit(PFX "Unable to register with ipmi\n");
+ pr_crit("Unable to register with ipmi\n");
goto out;
}
@@ -993,7 +992,7 @@ static void ipmi_register_watchdog(int ipmi_intf)
&ipmi_version_major,
&ipmi_version_minor);
if (rv) {
- pr_warn(PFX "Unable to get IPMI version, assuming 1.0\n");
+ pr_warn("Unable to get IPMI version, assuming 1.0\n");
ipmi_version_major = 1;
ipmi_version_minor = 0;
}
@@ -1002,7 +1001,7 @@ static void ipmi_register_watchdog(int ipmi_intf)
if (rv < 0) {
ipmi_destroy_user(watchdog_user);
watchdog_user = NULL;
- pr_crit(PFX "Unable to register misc device\n");
+ pr_crit("Unable to register misc device\n");
}
#ifdef HAVE_DIE_NMI
@@ -1024,7 +1023,7 @@ static void ipmi_register_watchdog(int ipmi_intf)
rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
if (rv) {
- pr_warn(PFX "Error starting timer to test NMI: 0x%x. The NMI pretimeout will likely not work\n",
+ pr_warn("Error starting timer to test NMI: 0x%x. The NMI pretimeout will likely not work\n",
rv);
rv = 0;
goto out_restore;
@@ -1033,7 +1032,7 @@ static void ipmi_register_watchdog(int ipmi_intf)
msleep(1500);
if (testing_nmi != 2) {
- pr_warn(PFX "IPMI NMI didn't seem to occur. The NMI pretimeout will likely not work\n");
+ pr_warn("IPMI NMI didn't seem to occur. The NMI pretimeout will likely not work\n");
}
out_restore:
testing_nmi = 0;
@@ -1049,7 +1048,7 @@ static void ipmi_register_watchdog(int ipmi_intf)
start_now = 0; /* Disable this function after first startup. */
ipmi_watchdog_state = action_val;
ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
- pr_info(PFX "Starting now!\n");
+ pr_info("Starting now!\n");
} else {
/* Stop the timer now. */
ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
@@ -1086,7 +1085,7 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
/* Disconnect from IPMI. */
rv = ipmi_destroy_user(loc_user);
if (rv)
- pr_warn(PFX "error unlinking from IPMI: %d\n", rv);
+ pr_warn("error unlinking from IPMI: %d\n", rv);
/* If it comes back, restart it properly. */
ipmi_start_timer_on_heartbeat = 1;
@@ -1127,7 +1126,7 @@ ipmi_nmi(unsigned int val, struct pt_regs *regs)
the timer. So do so. */
atomic_set(&pretimeout_since_last_heartbeat, 1);
if (atomic_inc_and_test(&preop_panic_excl))
- nmi_panic(regs, PFX "pre-timeout");
+ nmi_panic(regs, "pre-timeout");
}
return NMI_HANDLED;
@@ -1259,7 +1258,7 @@ static void check_parms(void)
if (preaction_val == WDOG_PRETIMEOUT_NMI) {
do_nmi = 1;
if (preop_val == WDOG_PREOP_GIVE_DATA) {
- pr_warn(PFX "Pretimeout op is to give data but NMI pretimeout is enabled, setting pretimeout op to none\n");
+ pr_warn("Pretimeout op is to give data but NMI pretimeout is enabled, setting pretimeout op to none\n");
preop_op("preop_none", NULL);
do_nmi = 0;
}
@@ -1268,7 +1267,7 @@ static void check_parms(void)
rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0,
"ipmi");
if (rv) {
- pr_warn(PFX "Can't register nmi handler\n");
+ pr_warn("Can't register nmi handler\n");
return;
} else
nmi_handler_registered = 1;
@@ -1285,19 +1284,18 @@ static int __init ipmi_wdog_init(void)
if (action_op(action, NULL)) {
action_op("reset", NULL);
- pr_info(PFX "Unknown action '%s', defaulting to reset\n",
- action);
+ pr_info("Unknown action '%s', defaulting to reset\n", action);
}
if (preaction_op(preaction, NULL)) {
preaction_op("pre_none", NULL);
- pr_info(PFX "Unknown preaction '%s', defaulting to none\n",
+ pr_info("Unknown preaction '%s', defaulting to none\n",
preaction);
}
if (preop_op(preop, NULL)) {
preop_op("preop_none", NULL);
- pr_info(PFX "Unknown preop '%s', defaulting to none\n", preop);
+ pr_info("Unknown preop '%s', defaulting to none\n", preop);
}
check_parms();
@@ -1311,11 +1309,11 @@ static int __init ipmi_wdog_init(void)
unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
#endif
unregister_reboot_notifier(&wdog_reboot_notifier);
- pr_warn(PFX "can't register smi watcher\n");
+ pr_warn("can't register smi watcher\n");
return rv;
}
- pr_info(PFX "driver initialized\n");
+ pr_info("driver initialized\n");
return 0;
}
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index a219964cb770..809507bf8f1c 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -530,7 +530,7 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
DEBUGP(5, dev, "NumRecBytes is valid\n");
break;
}
- mdelay(10);
+ usleep_range(10000, 11000);
}
if (i == 100) {
DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting "
@@ -546,7 +546,7 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read);
break;
}
- mdelay(10);
+ usleep_range(10000, 11000);
}
/* check whether it is a short PTS reply? */
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index f80965407d3c..d5e43606339c 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -505,7 +505,7 @@ static void cm4040_reader_release(struct pcmcia_device *link)
DEBUGP(3, dev, "-> cm4040_reader_release\n");
while (link->open) {
- DEBUGP(3, dev, KERN_INFO MODULE_NAME ": delaying release "
+ DEBUGP(3, dev, MODULE_NAME ": delaying release "
"until process has terminated\n");
wait_event(dev->devq, (link->open == 0));
}
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 66b04194aa9f..82f9a6a814ae 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2237,8 +2237,7 @@ static int mgslpc_ioctl(struct tty_struct *tty,
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl"))
return -ENODEV;
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
- (cmd != TIOCMIWAIT)) {
+ if (cmd != TIOCMIWAIT) {
if (tty_io_error(tty))
return -EIO;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c75b6cdf0053..2eb70e76ed35 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -433,9 +433,9 @@ static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
- __u32 out[CHACHA20_BLOCK_WORDS]);
+ __u8 out[CHACHA20_BLOCK_SIZE]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u32 tmp[CHACHA20_BLOCK_WORDS], int used);
+ __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -926,7 +926,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
unsigned long flags;
int i, num;
union {
- __u32 block[CHACHA20_BLOCK_WORDS];
+ __u8 block[CHACHA20_BLOCK_SIZE];
__u32 key[8];
} buf;
@@ -973,7 +973,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
static void _extract_crng(struct crng_state *crng,
- __u32 out[CHACHA20_BLOCK_WORDS])
+ __u8 out[CHACHA20_BLOCK_SIZE])
{
unsigned long v, flags;
@@ -990,7 +990,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
+static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
{
struct crng_state *crng = NULL;
@@ -1008,7 +1008,7 @@ static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u32 tmp[CHACHA20_BLOCK_WORDS], int used)
+ __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
{
unsigned long flags;
__u32 *s, *d;
@@ -1020,14 +1020,14 @@ static void _crng_backtrack_protect(struct crng_state *crng,
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = &tmp[used / sizeof(__u32)];
+ s = (__u32 *) &tmp[used];
d = &crng->state[4];
for (i=0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
+static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
{
struct crng_state *crng = NULL;
@@ -1043,7 +1043,7 @@ static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
- __u32 tmp[CHACHA20_BLOCK_WORDS];
+ __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1622,7 +1622,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u32 tmp[CHACHA20_BLOCK_WORDS];
+ __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -2248,7 +2248,7 @@ u64 get_random_u64(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((__u32 *)batch->entropy_u64);
+ extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
@@ -2278,7 +2278,7 @@ u32 get_random_u32(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng(batch->entropy_u32);
+ extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 18c81cbe4704..536e55d3919f 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -5,7 +5,7 @@
menuconfig TCG_TPM
tristate "TPM Hardware Support"
depends on HAS_IOMEM
- select SECURITYFS
+ imply SECURITYFS
select CRYPTO
select CRYPTO_HASH_INFO
---help---
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index e4a04b2d3c32..99b5133a9d05 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -17,11 +17,36 @@
* License.
*
*/
+#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/workqueue.h>
#include "tpm.h"
#include "tpm-dev.h"
+static struct workqueue_struct *tpm_dev_wq;
+static DEFINE_MUTEX(tpm_dev_wq_lock);
+
+static void tpm_async_work(struct work_struct *work)
+{
+ struct file_priv *priv =
+ container_of(work, struct file_priv, async_work);
+ ssize_t ret;
+
+ mutex_lock(&priv->buffer_mutex);
+ priv->command_enqueued = false;
+ ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer), 0);
+
+ tpm_put_ops(priv->chip);
+ if (ret > 0) {
+ priv->data_pending = ret;
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ }
+ mutex_unlock(&priv->buffer_mutex);
+ wake_up_interruptible(&priv->async_wait);
+}
+
static void user_reader_timeout(struct timer_list *t)
{
struct file_priv *priv = from_timer(priv, t, user_read_timer);
@@ -29,27 +54,32 @@ static void user_reader_timeout(struct timer_list *t)
pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
task_tgid_nr(current));
- schedule_work(&priv->work);
+ schedule_work(&priv->timeout_work);
}
-static void timeout_work(struct work_struct *work)
+static void tpm_timeout_work(struct work_struct *work)
{
- struct file_priv *priv = container_of(work, struct file_priv, work);
+ struct file_priv *priv = container_of(work, struct file_priv,
+ timeout_work);
mutex_lock(&priv->buffer_mutex);
priv->data_pending = 0;
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
mutex_unlock(&priv->buffer_mutex);
+ wake_up_interruptible(&priv->async_wait);
}
void tpm_common_open(struct file *file, struct tpm_chip *chip,
- struct file_priv *priv)
+ struct file_priv *priv, struct tpm_space *space)
{
priv->chip = chip;
+ priv->space = space;
+
mutex_init(&priv->buffer_mutex);
timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
- INIT_WORK(&priv->work, timeout_work);
-
+ INIT_WORK(&priv->timeout_work, tpm_timeout_work);
+ INIT_WORK(&priv->async_work, tpm_async_work);
+ init_waitqueue_head(&priv->async_wait);
file->private_data = priv;
}
@@ -61,15 +91,17 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
int rc;
del_singleshot_timer_sync(&priv->user_read_timer);
- flush_work(&priv->work);
+ flush_work(&priv->timeout_work);
mutex_lock(&priv->buffer_mutex);
if (priv->data_pending) {
ret_size = min_t(ssize_t, size, priv->data_pending);
- rc = copy_to_user(buf, priv->data_buffer, ret_size);
- memset(priv->data_buffer, 0, priv->data_pending);
- if (rc)
- ret_size = -EFAULT;
+ if (ret_size > 0) {
+ rc = copy_to_user(buf, priv->data_buffer, ret_size);
+ memset(priv->data_buffer, 0, priv->data_pending);
+ if (rc)
+ ret_size = -EFAULT;
+ }
priv->data_pending = 0;
}
@@ -79,13 +111,12 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
}
ssize_t tpm_common_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off, struct tpm_space *space)
+ size_t size, loff_t *off)
{
struct file_priv *priv = file->private_data;
- size_t in_size = size;
- ssize_t out_size;
+ int ret = 0;
- if (in_size > TPM_BUFSIZE)
+ if (size > TPM_BUFSIZE)
return -E2BIG;
mutex_lock(&priv->buffer_mutex);
@@ -94,21 +125,20 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
* tpm_read or a user_read_timer timeout. This also prevents split
* buffered writes from blocking here.
*/
- if (priv->data_pending != 0) {
- mutex_unlock(&priv->buffer_mutex);
- return -EBUSY;
+ if (priv->data_pending != 0 || priv->command_enqueued) {
+ ret = -EBUSY;
+ goto out;
}
- if (copy_from_user
- (priv->data_buffer, (void __user *) buf, in_size)) {
- mutex_unlock(&priv->buffer_mutex);
- return -EFAULT;
+ if (copy_from_user(priv->data_buffer, buf, size)) {
+ ret = -EFAULT;
+ goto out;
}
- if (in_size < 6 ||
- in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
- mutex_unlock(&priv->buffer_mutex);
- return -EINVAL;
+ if (size < 6 ||
+ size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
+ ret = -EINVAL;
+ goto out;
}
/* atomic tpm command send and result receive. We only hold the ops
@@ -116,25 +146,50 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
* the char dev is held open.
*/
if (tpm_try_get_ops(priv->chip)) {
- mutex_unlock(&priv->buffer_mutex);
- return -EPIPE;
+ ret = -EPIPE;
+ goto out;
}
- out_size = tpm_transmit(priv->chip, space, priv->data_buffer,
- sizeof(priv->data_buffer), 0);
- tpm_put_ops(priv->chip);
- if (out_size < 0) {
+ /*
+ * If in nonblocking mode schedule an async job to send
+ * the command return the size.
+ * In case of error the err code will be returned in
+ * the subsequent read call.
+ */
+ if (file->f_flags & O_NONBLOCK) {
+ priv->command_enqueued = true;
+ queue_work(tpm_dev_wq, &priv->async_work);
mutex_unlock(&priv->buffer_mutex);
- return out_size;
+ return size;
}
- priv->data_pending = out_size;
+ ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer), 0);
+ tpm_put_ops(priv->chip);
+
+ if (ret > 0) {
+ priv->data_pending = ret;
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ ret = size;
+ }
+out:
mutex_unlock(&priv->buffer_mutex);
+ return ret;
+}
+
+__poll_t tpm_common_poll(struct file *file, poll_table *wait)
+{
+ struct file_priv *priv = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &priv->async_wait, wait);
- /* Set a timeout by which the reader must come claim the result */
- mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ if (priv->data_pending)
+ mask = EPOLLIN | EPOLLRDNORM;
+ else
+ mask = EPOLLOUT | EPOLLWRNORM;
- return in_size;
+ return mask;
}
/*
@@ -142,8 +197,24 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
*/
void tpm_common_release(struct file *file, struct file_priv *priv)
{
+ flush_work(&priv->async_work);
del_singleshot_timer_sync(&priv->user_read_timer);
- flush_work(&priv->work);
+ flush_work(&priv->timeout_work);
file->private_data = NULL;
priv->data_pending = 0;
}
+
+int __init tpm_dev_common_init(void)
+{
+ tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
+
+ return !tpm_dev_wq ? -ENOMEM : 0;
+}
+
+void __exit tpm_dev_common_exit(void)
+{
+ if (tpm_dev_wq) {
+ destroy_workqueue(tpm_dev_wq);
+ tpm_dev_wq = NULL;
+ }
+}
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index ebd74ab5abef..32f9738f1cb2 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -39,7 +39,7 @@ static int tpm_open(struct inode *inode, struct file *file)
if (priv == NULL)
goto out;
- tpm_common_open(file, chip, priv);
+ tpm_common_open(file, chip, priv, NULL);
return 0;
@@ -48,12 +48,6 @@ static int tpm_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
-static ssize_t tpm_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off)
-{
- return tpm_common_write(file, buf, size, off, NULL);
-}
-
/*
* Called on file close
*/
@@ -73,6 +67,7 @@ const struct file_operations tpm_fops = {
.llseek = no_llseek,
.open = tpm_open,
.read = tpm_common_read,
- .write = tpm_write,
+ .write = tpm_common_write,
+ .poll = tpm_common_poll,
.release = tpm_release,
};
diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
index b24cfb4d3ee1..a126b575cb8c 100644
--- a/drivers/char/tpm/tpm-dev.h
+++ b/drivers/char/tpm/tpm-dev.h
@@ -2,27 +2,33 @@
#ifndef _TPM_DEV_H
#define _TPM_DEV_H
+#include <linux/poll.h>
#include "tpm.h"
struct file_priv {
struct tpm_chip *chip;
+ struct tpm_space *space;
- /* Data passed to and from the tpm via the read/write calls */
- size_t data_pending;
+ /* Holds the amount of data passed or an error code from async op */
+ ssize_t data_pending;
struct mutex buffer_mutex;
struct timer_list user_read_timer; /* user needs to claim result */
- struct work_struct work;
+ struct work_struct timeout_work;
+ struct work_struct async_work;
+ wait_queue_head_t async_wait;
+ bool command_enqueued;
u8 data_buffer[TPM_BUFSIZE];
};
void tpm_common_open(struct file *file, struct tpm_chip *chip,
- struct file_priv *priv);
+ struct file_priv *priv, struct tpm_space *space);
ssize_t tpm_common_read(struct file *file, char __user *buf,
size_t size, loff_t *off);
ssize_t tpm_common_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off, struct tpm_space *space);
-void tpm_common_release(struct file *file, struct file_priv *priv);
+ size_t size, loff_t *off);
+__poll_t tpm_common_poll(struct file *file, poll_table *wait);
+void tpm_common_release(struct file *file, struct file_priv *priv);
#endif
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 1a803b0cf980..129f640424b7 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -663,7 +663,8 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space,
return len;
err = be32_to_cpu(header->return_code);
- if (err != 0 && desc)
+ if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED
+ && desc)
dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
desc);
if (err)
@@ -1321,7 +1322,8 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
}
rlength = be32_to_cpu(tpm_cmd.header.out.length);
- if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
+ if (rlength < TPM_HEADER_SIZE +
+ offsetof(struct tpm_getrandom_out, rng_data) +
recd) {
total = -EFAULT;
break;
@@ -1407,19 +1409,32 @@ static int __init tpm_init(void)
tpmrm_class = class_create(THIS_MODULE, "tpmrm");
if (IS_ERR(tpmrm_class)) {
pr_err("couldn't create tpmrm class\n");
- class_destroy(tpm_class);
- return PTR_ERR(tpmrm_class);
+ rc = PTR_ERR(tpmrm_class);
+ goto out_destroy_tpm_class;
}
rc = alloc_chrdev_region(&tpm_devt, 0, 2*TPM_NUM_DEVICES, "tpm");
if (rc < 0) {
pr_err("tpm: failed to allocate char dev region\n");
- class_destroy(tpmrm_class);
- class_destroy(tpm_class);
- return rc;
+ goto out_destroy_tpmrm_class;
+ }
+
+ rc = tpm_dev_common_init();
+ if (rc) {
+ pr_err("tpm: failed to allocate char dev region\n");
+ goto out_unreg_chrdev;
}
return 0;
+
+out_unreg_chrdev:
+ unregister_chrdev_region(tpm_devt, 2 * TPM_NUM_DEVICES);
+out_destroy_tpmrm_class:
+ class_destroy(tpmrm_class);
+out_destroy_tpm_class:
+ class_destroy(tpm_class);
+
+ return rc;
}
static void __exit tpm_exit(void)
@@ -1428,6 +1443,7 @@ static void __exit tpm_exit(void)
class_destroy(tpm_class);
class_destroy(tpmrm_class);
unregister_chrdev_region(tpm_devt, 2*TPM_NUM_DEVICES);
+ tpm_dev_common_exit();
}
subsys_initcall(tpm_init);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index f3501d05264f..f20dc8ece348 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -604,4 +604,6 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
int tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
+int tpm_dev_common_init(void);
+void tpm_dev_common_exit(void);
#endif
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index c31b490bd41d..3acf4fd4e5a5 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -329,7 +329,9 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
&buf.data[TPM_HEADER_SIZE];
recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
if (tpm_buf_length(&buf) <
- offsetof(struct tpm2_get_random_out, buffer) + recd) {
+ TPM_HEADER_SIZE +
+ offsetof(struct tpm2_get_random_out, buffer) +
+ recd) {
err = -EFAULT;
goto out;
}
diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
index 1a0e97a5da5a..0c751a79bbed 100644
--- a/drivers/char/tpm/tpmrm-dev.c
+++ b/drivers/char/tpm/tpmrm-dev.c
@@ -28,7 +28,7 @@ static int tpmrm_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
- tpm_common_open(file, chip, &priv->priv);
+ tpm_common_open(file, chip, &priv->priv, &priv->space);
return 0;
}
@@ -45,21 +45,12 @@ static int tpmrm_release(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t tpmrm_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off)
-{
- struct file_priv *fpriv = file->private_data;
- struct tpmrm_priv *priv = container_of(fpriv, struct tpmrm_priv, priv);
-
- return tpm_common_write(file, buf, size, off, &priv->space);
-}
-
const struct file_operations tpmrm_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = tpmrm_open,
.read = tpm_common_read,
- .write = tpmrm_write,
+ .write = tpm_common_write,
+ .poll = tpm_common_poll,
.release = tpmrm_release,
};
-
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 911475d36800..b150f87f38f5 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -264,7 +264,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
return -ENOMEM;
}
- rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref);
+ rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
if (rv < 0)
return rv;
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index ffa5dac221e4..129ebd2588fd 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -1434,8 +1434,16 @@ static void __init sun4i_ccu_init(struct device_node *node,
return;
}
- /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN4I_PLL_AUDIO_REG);
+
+ /*
+ * Force VCO and PLL bias current to lowest setting. Higher
+ * settings interfere with sigma-delta modulation and result
+ * in audible noise and distortions when using SPDIF or I2S.
+ */
+ val &= ~GENMASK(25, 16);
+
+ /* Force the PLL-Audio-1x divider to 1 */
val &= ~GENMASK(29, 26);
writel(val | (1 << 26), reg + SUN4I_PLL_AUDIO_REG);
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 08ef69945ffb..d977193842df 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -55,6 +55,7 @@ struct clk_plt_data {
u8 nparents;
struct clk_plt *clks[PMC_CLK_NUM];
struct clk_lookup *mclk_lookup;
+ struct clk_lookup *ether_clk_lookup;
};
/* Return an index in parent table */
@@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
spin_lock_init(&pclk->lock);
- /*
- * If the clock was already enabled by the firmware mark it as critical
- * to avoid it being gated by the clock framework if no driver owns it.
- */
- if (plt_clk_is_enabled(&pclk->hw))
- init.flags |= CLK_IS_CRITICAL;
-
ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
if (ret) {
pclk = ERR_PTR(ret);
@@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev)
goto err_unreg_clk_plt;
}
+ data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
+ "ether_clk", NULL);
+ if (!data->ether_clk_lookup) {
+ err = -ENOMEM;
+ goto err_drop_mclk;
+ }
+
plt_clk_free_parent_names_loop(parent_names, data->nparents);
platform_set_drvdata(pdev, data);
return 0;
+err_drop_mclk:
+ clkdev_drop(data->mclk_lookup);
err_unreg_clk_plt:
plt_clk_unregister_loop(data, i);
plt_clk_unregister_parents(data);
@@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev)
data = platform_get_drvdata(pdev);
+ clkdev_drop(data->ether_clk_lookup);
clkdev_drop(data->mclk_lookup);
plt_clk_unregister_loop(data, PMC_CLK_NUM);
plt_clk_unregister_parents(data);
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index db51b2427e8a..e33b21d3f9d8 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -23,8 +23,8 @@ obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
obj-$(CONFIG_ROCKCHIP_TIMER) += rockchip_timer.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
-obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
-obj-$(CONFIG_ORION_TIMER) += time-orion.o
+obj-$(CONFIG_ARMADA_370_XP_TIMER) += timer-armada-370-xp.o
+obj-$(CONFIG_ORION_TIMER) += timer-orion.o
obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o
obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o
obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o
@@ -36,25 +36,25 @@ obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o
obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o
-obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
-obj-$(CONFIG_NSPIRE_TIMER) += zevio-timer.o
+obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o
+obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o
obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
-obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
-obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
+obj-$(CONFIG_CADENCE_TTC_TIMER) += timer-cadence-ttc.o
+obj-$(CONFIG_CLKSRC_EFM32) += timer-efm32.o
obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
-obj-$(CONFIG_CLKSRC_LPC32XX) += time-lpc32xx.o
+obj-$(CONFIG_CLKSRC_LPC32XX) += timer-lpc32xx.o
obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
-obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
-obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
-obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
+obj-$(CONFIG_FSL_FTM_TIMER) += timer-fsl-ftm.o
+obj-$(CONFIG_VF_PIT_TIMER) += timer-vf-pit.o
+obj-$(CONFIG_CLKSRC_QCOM) += timer-qcom.o
obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
-obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
+obj-$(CONFIG_CLKSRC_PISTACHIO) += timer-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
-obj-$(CONFIG_OWL_TIMER) += owl-timer.o
+obj-$(CONFIG_OWL_TIMER) += timer-owl.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
@@ -66,7 +66,7 @@ obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
-obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
+obj-$(CONFIG_CLKSRC_VERSATILE) += timer-versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index d8c7f5750cdb..9a7d4dc00b6e 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -319,6 +319,13 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
}
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+static u64 notrace arm64_1188873_read_cntvct_el0(void)
+{
+ return read_sysreg(cntvct_el0);
+}
+#endif
+
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
@@ -408,6 +415,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+ {
+ .match_type = ate_match_local_cap_id,
+ .id = (void *)ARM64_WORKAROUND_1188873,
+ .desc = "ARM erratum 1188873",
+ .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
+ },
+#endif
};
typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 38cd2feb87c4..fbaee04fd1d9 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -193,7 +193,7 @@ static int __init asm9260_timer_init(struct device_node *np)
priv.base = of_io_request_and_map(np, 0, np->name);
if (IS_ERR(priv.base)) {
- pr_err("%s: unable to map resource\n", np->name);
+ pr_err("%pOFn: unable to map resource\n", np);
return PTR_ERR(priv.base);
}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 69866cd8f4bb..db410acd8964 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -22,6 +22,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/sched_clock.h>
static void __init timer_get_base_and_rate(struct device_node *np,
@@ -29,11 +30,22 @@ static void __init timer_get_base_and_rate(struct device_node *np,
{
struct clk *timer_clk;
struct clk *pclk;
+ struct reset_control *rstc;
*base = of_iomap(np, 0);
if (!*base)
- panic("Unable to map regs for %s", np->name);
+ panic("Unable to map regs for %pOFn", np);
+
+ /*
+ * Reset the timer if the reset control is available, wiping
+ * out the state the firmware may have left it
+ */
+ rstc = of_reset_control_get(np, NULL);
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
+ }
/*
* Not all implementations use a periphal clock, so don't panic
@@ -42,8 +54,8 @@ static void __init timer_get_base_and_rate(struct device_node *np,
pclk = of_clk_get_by_name(np, "pclk");
if (!IS_ERR(pclk))
if (clk_prepare_enable(pclk))
- pr_warn("pclk for %s is present, but could not be activated\n",
- np->name);
+ pr_warn("pclk for %pOFn is present, but could not be activated\n",
+ np);
timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk))
@@ -57,7 +69,7 @@ static void __init timer_get_base_and_rate(struct device_node *np,
try_clock_freq:
if (of_property_read_u32(np, "clock-freq", rate) &&
of_property_read_u32(np, "clock-frequency", rate))
- panic("No clock nor clock-frequency property for %s", np->name);
+ panic("No clock nor clock-frequency property for %pOFn", np);
}
static void __init add_clockevent(struct device_node *event_timer)
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 08cd6eaf3795..395837938301 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -191,13 +191,13 @@ static int __init pxa_timer_dt_init(struct device_node *np)
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
if (!timer_base) {
- pr_err("%s: unable to map resource\n", np->name);
+ pr_err("%pOFn: unable to map resource\n", np);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
- pr_crit("%s: unable to get clk\n", np->name);
+ pr_crit("%pOFn: unable to get clk\n", np);
return PTR_ERR(clk);
}
@@ -210,7 +210,7 @@ static int __init pxa_timer_dt_init(struct device_node *np)
/* we are only interested in OS-timer0 irq */
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
- pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
+ pr_crit("%pOFn: unable to parse OS-timer0 irq\n", np);
return -EINVAL;
}
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 6cffd7c6001a..61d5f3b539ce 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Timer Support - OSTM
*
* Copyright (C) 2017 Renesas Electronics America, Inc.
* Copyright (C) 2017 Chris Brandt
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/of_address.h>
diff --git a/drivers/clocksource/riscv_timer.c b/drivers/clocksource/riscv_timer.c
index 4e8b347e43e2..084e97dc10ed 100644
--- a/drivers/clocksource/riscv_timer.c
+++ b/drivers/clocksource/riscv_timer.c
@@ -8,6 +8,7 @@
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/irq.h>
+#include <asm/smp.h>
#include <asm/sbi.h>
/*
@@ -84,13 +85,16 @@ void riscv_timer_interrupt(void)
static int __init riscv_timer_init_dt(struct device_node *n)
{
- int cpu_id = riscv_of_processor_hart(n), error;
+ int cpuid, hartid, error;
struct clocksource *cs;
- if (cpu_id != smp_processor_id())
+ hartid = riscv_of_processor_hartid(n);
+ cpuid = riscv_hartid_to_cpuid(hartid);
+
+ if (cpuid != smp_processor_id())
return 0;
- cs = per_cpu_ptr(&riscv_clocksource, cpu_id);
+ cs = per_cpu_ptr(&riscv_clocksource, cpuid);
clocksource_register_hz(cs, riscv_timebase);
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
@@ -98,7 +102,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
if (error)
pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
- error, cpu_id);
+ error, cpuid);
return error;
}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index bbbf37c471a3..55d3e03f2cd4 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Timer Support - CMT
*
* Copyright (C) 2008 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -78,18 +70,17 @@ struct sh_cmt_info {
unsigned int channels_mask;
unsigned long width; /* 16 or 32 bit version of hardware block */
- unsigned long overflow_bit;
- unsigned long clear_bits;
+ u32 overflow_bit;
+ u32 clear_bits;
/* callbacks for CMSTR and CMCSR access */
- unsigned long (*read_control)(void __iomem *base, unsigned long offs);
+ u32 (*read_control)(void __iomem *base, unsigned long offs);
void (*write_control)(void __iomem *base, unsigned long offs,
- unsigned long value);
+ u32 value);
/* callbacks for CMCNT and CMCOR access */
- unsigned long (*read_count)(void __iomem *base, unsigned long offs);
- void (*write_count)(void __iomem *base, unsigned long offs,
- unsigned long value);
+ u32 (*read_count)(void __iomem *base, unsigned long offs);
+ void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
};
struct sh_cmt_channel {
@@ -103,13 +94,13 @@ struct sh_cmt_channel {
unsigned int timer_bit;
unsigned long flags;
- unsigned long match_value;
- unsigned long next_match_value;
- unsigned long max_match_value;
+ u32 match_value;
+ u32 next_match_value;
+ u32 max_match_value;
raw_spinlock_t lock;
struct clock_event_device ced;
struct clocksource cs;
- unsigned long total_cycles;
+ u64 total_cycles;
bool cs_enabled;
};
@@ -160,24 +151,22 @@ struct sh_cmt_device {
#define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
#define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
-static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
+static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
{
return ioread16(base + (offs << 1));
}
-static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
+static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
{
return ioread32(base + (offs << 2));
}
-static void sh_cmt_write16(void __iomem *base, unsigned long offs,
- unsigned long value)
+static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
{
iowrite16(value, base + (offs << 1));
}
-static void sh_cmt_write32(void __iomem *base, unsigned long offs,
- unsigned long value)
+static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
{
iowrite32(value, base + (offs << 2));
}
@@ -242,7 +231,7 @@ static const struct sh_cmt_info sh_cmt_info[] = {
#define CMCNT 1 /* channel register */
#define CMCOR 2 /* channel register */
-static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
+static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
{
if (ch->iostart)
return ch->cmt->info->read_control(ch->iostart, 0);
@@ -250,8 +239,7 @@ static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
}
-static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
- unsigned long value)
+static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
{
if (ch->iostart)
ch->cmt->info->write_control(ch->iostart, 0, value);
@@ -259,39 +247,35 @@ static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
}
-static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
+static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
{
return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
}
-static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch,
- unsigned long value)
+static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
{
ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
}
-static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
+static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
{
return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
}
-static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch,
- unsigned long value)
+static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
{
ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
}
-static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch,
- unsigned long value)
+static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
{
ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
}
-static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
- int *has_wrapped)
+static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
{
- unsigned long v1, v2, v3;
- int o1, o2;
+ u32 v1, v2, v3;
+ u32 o1, o2;
o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
@@ -311,7 +295,8 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
{
- unsigned long flags, value;
+ unsigned long flags;
+ u32 value;
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&ch->cmt->lock, flags);
@@ -418,11 +403,11 @@ static void sh_cmt_disable(struct sh_cmt_channel *ch)
static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
int absolute)
{
- unsigned long new_match;
- unsigned long value = ch->next_match_value;
- unsigned long delay = 0;
- unsigned long now = 0;
- int has_wrapped;
+ u32 value = ch->next_match_value;
+ u32 new_match;
+ u32 delay = 0;
+ u32 now = 0;
+ u32 has_wrapped;
now = sh_cmt_get_counter(ch, &has_wrapped);
ch->flags |= FLAG_REPROGRAM; /* force reprogram */
@@ -619,9 +604,10 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
static u64 sh_cmt_clocksource_read(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
- unsigned long flags, raw;
- unsigned long value;
- int has_wrapped;
+ unsigned long flags;
+ u32 has_wrapped;
+ u64 value;
+ u32 raw;
raw_spin_lock_irqsave(&ch->lock, flags);
value = ch->total_cycles;
@@ -694,7 +680,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
cs->disable = sh_cmt_clocksource_disable;
cs->suspend = sh_cmt_clocksource_suspend;
cs->resume = sh_cmt_clocksource_resume;
- cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
+ cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
@@ -941,8 +927,22 @@ static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
.compatible = "renesas,cmt-48-gen2",
.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
},
- { .compatible = "renesas,rcar-gen2-cmt0", .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] },
- { .compatible = "renesas,rcar-gen2-cmt1", .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] },
+ {
+ .compatible = "renesas,rcar-gen2-cmt0",
+ .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
+ },
+ {
+ .compatible = "renesas,rcar-gen2-cmt1",
+ .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
+ },
+ {
+ .compatible = "renesas,rcar-gen3-cmt0",
+ .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
+ },
+ {
+ .compatible = "renesas,rcar-gen3-cmt1",
+ .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 6812e099b6a3..354b27d14a19 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Timer Support - MTU2
*
* Copyright (C) 2009 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index c74a6c543ca2..49f1c805fc95 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Timer Support - TMU
*
* Copyright (C) 2009 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c
index edf1a46269f1..edf1a46269f1 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/timer-armada-370-xp.c
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index ec8a4376f74f..2fab18fae4fc 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
data->base = of_iomap(node, 0);
if (!data->base) {
pr_err("Could not map PIT address\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto exit;
}
data->mck = of_clk_get(node, 0);
if (IS_ERR(data->mck)) {
pr_err("Unable to get mck clk\n");
- return PTR_ERR(data->mck);
+ ret = PTR_ERR(data->mck);
+ goto exit;
}
ret = clk_prepare_enable(data->mck);
if (ret) {
pr_err("Unable to enable mck\n");
- return ret;
+ goto exit;
}
/* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0);
if (!data->irq) {
pr_err("Unable to get IRQ from DT\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit;
}
/*
@@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
ret = clocksource_register_hz(&data->clksrc, pit_rate);
if (ret) {
pr_err("Failed to register clocksource\n");
- return ret;
+ goto exit;
}
/* Set up irq handler */
@@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
"at91_tick", data);
if (ret) {
pr_err("Unable to setup IRQ\n");
- return ret;
+ clocksource_unregister(&data->clksrc);
+ goto exit;
}
/* Set up and register clockevents */
@@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
clockevents_register_device(&data->clkevt);
return 0;
+
+exit:
+ kfree(data);
+ return ret;
}
TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
at91sam926x_pit_dt_init);
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/timer-cadence-ttc.c
index 29d51755e18b..b33402980b6f 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -535,7 +535,7 @@ static int __init ttc_timer_init(struct device_node *timer)
if (ret)
return ret;
- pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
+ pr_info("%pOFn #0 at %p, irq=%d\n", timer, timer_baseaddr, irq);
return 0;
}
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/timer-efm32.c
index 257e810ec1ad..257e810ec1ad 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/timer-efm32.c
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/timer-fsl-ftm.c
index 846d18daf893..846d18daf893 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/timer-fsl-ftm.c
diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c
index c020038ebfab..cf93f6419b51 100644
--- a/drivers/clocksource/timer-fttmr010.c
+++ b/drivers/clocksource/timer-fttmr010.c
@@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
cr &= ~fttmr010->t1_enable_val;
writel(cr, fttmr010->base + TIMER_CR);
- /* Setup the match register forward/backward in time */
- cr = readl(fttmr010->base + TIMER1_COUNT);
- if (fttmr010->count_down)
- cr -= cycles;
- else
- cr += cycles;
- writel(cr, fttmr010->base + TIMER1_MATCH1);
+ if (fttmr010->count_down) {
+ /*
+ * ASPEED Timer Controller will load TIMER1_LOAD register
+ * into TIMER1_COUNT register when the timer is re-enabled.
+ */
+ writel(cycles, fttmr010->base + TIMER1_LOAD);
+ } else {
+ /* Setup the match register forward in time */
+ cr = readl(fttmr010->base + TIMER1_COUNT);
+ writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
+ }
/* Start */
cr = readl(fttmr010->base + TIMER_CR);
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 62d24690ba02..76e526f58620 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -190,7 +190,7 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
- pr_err("No clock for %s\n", node->name);
+ pr_err("No clock for %pOFn\n", node);
return PTR_ERR(clk);
}
clk_prepare_enable(clk);
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/timer-lpc32xx.c
index d51a62a79ef7..d51a62a79ef7 100644
--- a/drivers/clocksource/time-lpc32xx.c
+++ b/drivers/clocksource/timer-lpc32xx.c
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/timer-orion.c
index 12202067fe4b..7d487107e3cd 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/timer-orion.c
@@ -129,13 +129,13 @@ static int __init orion_timer_init(struct device_node *np)
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
if (!timer_base) {
- pr_err("%s: unable to map resource\n", np->name);
+ pr_err("%pOFn: unable to map resource\n", np);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
- pr_err("%s: unable to get clk\n", np->name);
+ pr_err("%pOFn: unable to get clk\n", np);
return PTR_ERR(clk);
}
@@ -148,7 +148,7 @@ static int __init orion_timer_init(struct device_node *np)
/* we are only interested in timer1 irq */
irq = irq_of_parse_and_map(np, 1);
if (irq <= 0) {
- pr_err("%s: unable to parse timer1 irq\n", np->name);
+ pr_err("%pOFn: unable to parse timer1 irq\n", np);
return -EINVAL;
}
@@ -174,7 +174,7 @@ static int __init orion_timer_init(struct device_node *np)
/* setup timer1 as clockevent timer */
ret = setup_irq(irq, &orion_clkevt_irq);
if (ret) {
- pr_err("%s: unable to setup irq\n", np->name);
+ pr_err("%pOFn: unable to setup irq\n", np);
return ret;
}
diff --git a/drivers/clocksource/owl-timer.c b/drivers/clocksource/timer-owl.c
index ea00a5e8f95d..ea00a5e8f95d 100644
--- a/drivers/clocksource/owl-timer.c
+++ b/drivers/clocksource/timer-owl.c
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/timer-pistachio.c
index a2dd85d0c1d7..a2dd85d0c1d7 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/timer-pistachio.c
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/timer-qcom.c
index 89816f89ff3f..89816f89ff3f 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/timer-qcom.c
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index e01222ea888f..052b230ca312 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -249,7 +249,7 @@ static int __init sp804_of_init(struct device_node *np)
if (of_clk_get_parent_count(np) == 3) {
clk2 = of_clk_get(np, 1);
if (IS_ERR(clk2)) {
- pr_err("sp804: %s clock not found: %d\n", np->name,
+ pr_err("sp804: %pOFn clock not found: %d\n", np,
(int)PTR_ERR(clk2));
clk2 = NULL;
}
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 29e2e1a78a43..6949a9113dbb 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
return -ENXIO;
}
+ if (!of_machine_is_compatible("ti,am43"))
+ ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
+
ti_32k_timer.counter = ti_32k_timer.base;
/*
diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/timer-versatile.c
index 39725d38aede..39725d38aede 100644
--- a/drivers/clocksource/versatile.c
+++ b/drivers/clocksource/timer-versatile.c
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/timer-vf-pit.c
index 0f92089ec08c..0f92089ec08c 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/timer-vf-pit.c
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/timer-vt8500.c
index e0f7489cfc8e..e0f7489cfc8e 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/timer-vt8500.c
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/timer-zevio.c
index f74689334f7c..6127e8062a71 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/timer-zevio.c
@@ -148,12 +148,12 @@ static int __init zevio_timer_add(struct device_node *node)
of_address_to_resource(node, 0, &res);
scnprintf(timer->clocksource_name, sizeof(timer->clocksource_name),
- "%llx.%s_clocksource",
- (unsigned long long)res.start, node->name);
+ "%llx.%pOFn_clocksource",
+ (unsigned long long)res.start, node);
scnprintf(timer->clockevent_name, sizeof(timer->clockevent_name),
- "%llx.%s_clockevent",
- (unsigned long long)res.start, node->name);
+ "%llx.%pOFn_clockevent",
+ (unsigned long long)res.start, node);
if (timer->interrupt_regs && irqnr) {
timer->clkevt.name = timer->clockevent_name;
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index b61f4ec43e06..d62fd374d5c7 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -61,6 +61,7 @@ enum {
#define INTEL_MSR_RANGE (0xffff)
#define AMD_MSR_RANGE (0x7)
+#define HYGON_MSR_RANGE (0x7)
#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
@@ -95,6 +96,7 @@ static bool boost_state(unsigned int cpu)
rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
msr = lo | ((u64)hi << 32);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
+ case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
msr = lo | ((u64)hi << 32);
@@ -113,6 +115,7 @@ static int boost_set_msr(bool enable)
msr_addr = MSR_IA32_MISC_ENABLE;
msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
break;
+ case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
msr_addr = MSR_K7_HWCR;
msr_mask = MSR_K7_HWCR_CPB_DIS;
@@ -225,6 +228,8 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
msr &= AMD_MSR_RANGE;
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ msr &= HYGON_MSR_RANGE;
else
msr &= INTEL_MSR_RANGE;
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index be926d9a66e5..4ac7c3cf34be 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -111,11 +111,16 @@ static int __init amd_freq_sensitivity_init(void)
{
u64 val;
struct pci_dev *pcidev;
+ unsigned int pci_vendor;
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ pci_vendor = PCI_VENDOR_ID_AMD;
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ pci_vendor = PCI_VENDOR_ID_HYGON;
+ else
return -ENODEV;
- pcidev = pci_get_device(PCI_VENDOR_ID_AMD,
+ pcidev = pci_get_device(pci_vendor,
PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
if (!pcidev) {
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 30f302149730..fd25c21cee72 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -428,7 +428,7 @@ MODULE_LICENSE("GPL");
late_initcall(cppc_cpufreq_init);
-static const struct acpi_device_id cppc_acpi_ids[] = {
+static const struct acpi_device_id cppc_acpi_ids[] __used = {
{ACPI_PROCESSOR_DEVICE_HID, },
{}
};
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index fe14c57de6ca..b1c5468dca16 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -58,6 +58,7 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "renesas,r8a73a4", },
{ .compatible = "renesas,r8a7740", },
{ .compatible = "renesas,r8a7743", },
+ { .compatible = "renesas,r8a7744", },
{ .compatible = "renesas,r8a7745", },
{ .compatible = "renesas,r8a7778", },
{ .compatible = "renesas,r8a7779", },
@@ -78,7 +79,10 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "rockchip,rk3328", },
{ .compatible = "rockchip,rk3366", },
{ .compatible = "rockchip,rk3368", },
- { .compatible = "rockchip,rk3399", },
+ { .compatible = "rockchip,rk3399",
+ .data = &(struct cpufreq_dt_platform_data)
+ { .have_governor_per_policy = true, },
+ },
{ .compatible = "st-ericsson,u8500", },
{ .compatible = "st-ericsson,u8540", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 0a9ebf00be46..e58bfcb1169e 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -32,6 +32,7 @@ struct private_data {
struct device *cpu_dev;
struct thermal_cooling_device *cdev;
const char *reg_name;
+ bool have_static_opps;
};
static struct freq_attr *cpufreq_dt_attr[] = {
@@ -204,6 +205,15 @@ static int cpufreq_init(struct cpufreq_policy *policy)
}
}
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_put_regulator;
+ }
+
+ priv->reg_name = name;
+ priv->opp_table = opp_table;
+
/*
* Initialize OPP tables for all policy->cpus. They will be shared by
* all CPUs which have marked their CPUs shared with OPP bindings.
@@ -214,7 +224,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
*
* OPPs might be populated at runtime, don't check for error here
*/
- dev_pm_opp_of_cpumask_add_table(policy->cpus);
+ if (!dev_pm_opp_of_cpumask_add_table(policy->cpus))
+ priv->have_static_opps = true;
/*
* But we need OPP table to function so if it is not there let's
@@ -240,19 +251,10 @@ static int cpufreq_init(struct cpufreq_policy *policy)
__func__, ret);
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto out_free_opp;
- }
-
- priv->reg_name = name;
- priv->opp_table = opp_table;
-
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
- goto out_free_priv;
+ goto out_free_opp;
}
priv->cpu_dev = cpu_dev;
@@ -282,10 +284,11 @@ static int cpufreq_init(struct cpufreq_policy *policy)
out_free_cpufreq_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
-out_free_priv:
- kfree(priv);
out_free_opp:
- dev_pm_opp_of_cpumask_remove_table(policy->cpus);
+ if (priv->have_static_opps)
+ dev_pm_opp_of_cpumask_remove_table(policy->cpus);
+ kfree(priv);
+out_put_regulator:
if (name)
dev_pm_opp_put_regulators(opp_table);
out_put_clk:
@@ -300,7 +303,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
- dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+ if (priv->have_static_opps)
+ dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
if (priv->reg_name)
dev_pm_opp_put_regulators(priv->opp_table);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f53fb41efb7b..7aa3dcad2175 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -403,7 +403,7 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int transition_failed)
{
- if (unlikely(WARN_ON(!policy->transition_ongoing)))
+ if (WARN_ON(!policy->transition_ongoing))
return;
cpufreq_notify_post_transition(policy, freqs, transition_failed);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index f20f20a77d4d..4268f87e99fc 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -80,8 +80,10 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
* changed in the meantime, so fall back to current frequency in that
* case.
*/
- if (requested_freq > policy->max || requested_freq < policy->min)
+ if (requested_freq > policy->max || requested_freq < policy->min) {
requested_freq = policy->cur;
+ dbs_info->requested_freq = requested_freq;
+ }
freq_step = get_freq_step(cs_tuners, policy);
@@ -92,7 +94,7 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
if (policy_dbs->idle_periods < UINT_MAX) {
unsigned int freq_steps = policy_dbs->idle_periods * freq_step;
- if (requested_freq > freq_steps)
+ if (requested_freq > policy->min + freq_steps)
requested_freq -= freq_steps;
else
requested_freq = policy->min;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index b2ff423ad7f8..8cfee0ab804b 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -12,6 +12,7 @@
#include <linux/cpu_cooling.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pm_opp.h>
@@ -290,20 +291,32 @@ put_node:
#define OCOTP_CFG3_6ULL_SPEED_792MHZ 0x2
#define OCOTP_CFG3_6ULL_SPEED_900MHZ 0x3
-static void imx6ul_opp_check_speed_grading(struct device *dev)
+static int imx6ul_opp_check_speed_grading(struct device *dev)
{
- struct device_node *np;
- void __iomem *base;
u32 val;
+ int ret = 0;
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
- if (!np)
- return;
+ if (of_find_property(dev->of_node, "nvmem-cells", NULL)) {
+ ret = nvmem_cell_read_u32(dev, "speed_grade", &val);
+ if (ret)
+ return ret;
+ } else {
+ struct device_node *np;
+ void __iomem *base;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
+ if (!np)
+ return -ENOENT;
+
+ base = of_iomap(np, 0);
+ of_node_put(np);
+ if (!base) {
+ dev_err(dev, "failed to map ocotp\n");
+ return -EFAULT;
+ }
- base = of_iomap(np, 0);
- if (!base) {
- dev_err(dev, "failed to map ocotp\n");
- goto put_node;
+ val = readl_relaxed(base + OCOTP_CFG3);
+ iounmap(base);
}
/*
@@ -314,7 +327,6 @@ static void imx6ul_opp_check_speed_grading(struct device *dev)
* 2b'11: 900000000Hz on i.MX6ULL only;
* We need to set the max speed of ARM according to fuse map.
*/
- val = readl_relaxed(base + OCOTP_CFG3);
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
@@ -334,9 +346,7 @@ static void imx6ul_opp_check_speed_grading(struct device *dev)
dev_warn(dev, "failed to disable 900MHz OPP\n");
}
- iounmap(base);
-put_node:
- of_node_put(np);
+ return ret;
}
static int imx6q_cpufreq_probe(struct platform_device *pdev)
@@ -394,10 +404,18 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
}
if (of_machine_is_compatible("fsl,imx6ul") ||
- of_machine_is_compatible("fsl,imx6ull"))
- imx6ul_opp_check_speed_grading(cpu_dev);
- else
+ of_machine_is_compatible("fsl,imx6ull")) {
+ ret = imx6ul_opp_check_speed_grading(cpu_dev);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ if (ret) {
+ dev_err(cpu_dev, "failed to read ocotp: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
imx6q_opp_check_speed_grading(cpu_dev);
+ }
/* Because we have added the OPPs here, we must free them */
free_opp = true;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b6a1aadaff9f..49c0abf2d48f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -373,10 +373,28 @@ static void intel_pstate_set_itmt_prio(int cpu)
}
}
}
+
+static int intel_pstate_get_cppc_guranteed(int cpu)
+{
+ struct cppc_perf_caps cppc_perf;
+ int ret;
+
+ ret = cppc_get_perf_caps(cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ return cppc_perf.guaranteed_perf;
+}
+
#else
static void intel_pstate_set_itmt_prio(int cpu)
{
}
+
+static int intel_pstate_get_cppc_guranteed(int cpu)
+{
+ return -ENOTSUPP;
+}
#endif
static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
@@ -699,9 +717,29 @@ static ssize_t show_energy_performance_preference(
cpufreq_freq_attr_rw(energy_performance_preference);
+static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
+{
+ struct cpudata *cpu;
+ u64 cap;
+ int ratio;
+
+ ratio = intel_pstate_get_cppc_guranteed(policy->cpu);
+ if (ratio <= 0) {
+ rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
+ ratio = HWP_GUARANTEED_PERF(cap);
+ }
+
+ cpu = all_cpu_data[policy->cpu];
+
+ return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling);
+}
+
+cpufreq_freq_attr_ro(base_frequency);
+
static struct freq_attr *hwp_cpufreq_attrs[] = {
&energy_performance_preference,
&energy_performance_available_preferences,
+ &base_frequency,
NULL,
};
@@ -1778,7 +1816,7 @@ static const struct pstate_funcs knl_funcs = {
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs),
ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs),
- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_funcs),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_funcs),
ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs),
ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs),
ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs),
@@ -1795,7 +1833,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs),
- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, core_funcs),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, core_funcs),
ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
{}
};
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index 31513bd42705..6d33a639f902 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -84,9 +84,10 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
if (ret) {
+ dev_pm_opp_remove(cpu_dev, clk_get_rate(clk));
clk_put(clk);
dev_err(cpu_dev, "Failed to register OPPs\n");
- goto opp_register_failed;
+ return ret;
}
ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
@@ -99,11 +100,5 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return 0;
-
-opp_register_failed:
- /* As registering has failed remove all the opp for all cpus */
- dev_pm_opp_cpumask_remove_table(cpu_possible_mask);
-
- return ret;
}
device_initcall(armada_xp_pmsu_cpufreq_init);
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index a1830fa25fc5..2a3675c24032 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -44,7 +44,7 @@ enum _msm8996_version {
struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
-static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
+static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
{
size_t len;
u32 *msm_id;
@@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void)
}
module_init(qcom_cpufreq_kryo_init);
-static void __init qcom_cpufreq_kryo_exit(void)
+static void __exit qcom_cpufreq_kryo_exit(void)
{
platform_device_unregister(kryo_cpufreq_pdev);
platform_driver_unregister(&qcom_cpufreq_kryo_driver);
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 5d31c2db12a3..dbecd7667db2 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -611,8 +611,8 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") {
id = of_alias_get_id(np, "dmc");
if (id < 0 || id >= ARRAY_SIZE(dmc_base)) {
- pr_err("%s: failed to get alias of dmc node '%s'\n",
- __func__, np->name);
+ pr_err("%s: failed to get alias of dmc node '%pOFn'\n",
+ __func__, np);
of_node_put(np);
return id;
}
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 1f59966573aa..f1e09022b819 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -121,7 +121,7 @@ static struct cpufreq_frequency_table *init_vhint_table(
void *virt;
virt = dma_alloc_coherent(bpmp->dev, sizeof(*data), &phys,
- GFP_KERNEL | GFP_DMA32);
+ GFP_KERNEL);
if (!virt)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 6df894d65d9e..4a97446f66d8 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -247,17 +247,17 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
- diff = ktime_us_delta(time_end, time_start);
- if (diff > INT_MAX)
- diff = INT_MAX;
-
- dev->last_residency = (int) diff;
-
if (entered_state >= 0) {
- /* Update cpuidle counters */
- /* This can be moved to within driver enter routine
+ /*
+ * Update cpuidle counters
+ * This can be moved to within driver enter routine,
* but that results in multiple copies of same code.
*/
+ diff = ktime_us_delta(time_end, time_start);
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ dev->last_residency = (int)diff;
dev->states_usage[entered_state].time += dev->last_residency;
dev->states_usage[entered_state].usage++;
} else {
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 704880a6612a..f0dddc66af26 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -80,7 +80,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
last_state = &ldev->states[last_idx];
- last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
+ last_residency = dev->last_residency - drv->states[last_idx].exit_latency;
/* consider promotion */
if (last_idx < drv->state_count - 1 &&
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index e26a40971b26..575a68f31761 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -124,7 +124,6 @@ struct menu_device {
int tick_wakeup;
unsigned int next_timer_us;
- unsigned int predicted_us;
unsigned int bucket;
unsigned int correction_factor[BUCKETS];
unsigned int intervals[INTERVALS];
@@ -197,10 +196,11 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
* of points is below a threshold. If it is... then use the
* average of these 8 points as the estimated value.
*/
-static unsigned int get_typical_interval(struct menu_device *data)
+static unsigned int get_typical_interval(struct menu_device *data,
+ unsigned int predicted_us)
{
int i, divisor;
- unsigned int max, thresh, avg;
+ unsigned int min, max, thresh, avg;
uint64_t sum, variance;
thresh = UINT_MAX; /* Discard outliers above this value */
@@ -208,6 +208,7 @@ static unsigned int get_typical_interval(struct menu_device *data)
again:
/* First calculate the average of past intervals */
+ min = UINT_MAX;
max = 0;
sum = 0;
divisor = 0;
@@ -218,8 +219,19 @@ again:
divisor++;
if (value > max)
max = value;
+
+ if (value < min)
+ min = value;
}
}
+
+ /*
+ * If the result of the computation is going to be discarded anyway,
+ * avoid the computation altogether.
+ */
+ if (min >= predicted_us)
+ return UINT_MAX;
+
if (divisor == INTERVALS)
avg = sum >> INTERVAL_SHIFT;
else
@@ -286,10 +298,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct menu_device *data = this_cpu_ptr(&menu_devices);
int latency_req = cpuidle_governor_latency_req(dev->cpu);
int i;
- int first_idx;
int idx;
unsigned int interactivity_req;
- unsigned int expected_interval;
+ unsigned int predicted_us;
unsigned long nr_iowaiters, cpu_load;
ktime_t delta_next;
@@ -298,50 +309,36 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
data->needs_update = 0;
}
- /* Special case when user has set very strict latency requirement */
- if (unlikely(latency_req == 0)) {
- *stop_tick = false;
- return 0;
- }
-
/* determine the expected residency time, round up */
data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
get_iowait_load(&nr_iowaiters, &cpu_load);
data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
+ if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
+ ((data->next_timer_us < drv->states[1].target_residency ||
+ latency_req < drv->states[1].exit_latency) &&
+ !drv->states[0].disabled && !dev->states_usage[0].disable)) {
+ /*
+ * In this case state[0] will be used no matter what, so return
+ * it right away and keep the tick running.
+ */
+ *stop_tick = false;
+ return 0;
+ }
+
/*
* Force the result of multiplication to be 64 bits even if both
* operands are 32 bits.
* Make sure to round up for half microseconds.
*/
- data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
+ predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
data->correction_factor[data->bucket],
RESOLUTION * DECAY);
-
- expected_interval = get_typical_interval(data);
- expected_interval = min(expected_interval, data->next_timer_us);
-
- first_idx = 0;
- if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) {
- struct cpuidle_state *s = &drv->states[1];
- unsigned int polling_threshold;
-
- /*
- * Default to a physical idle state, not to busy polling, unless
- * a timer is going to trigger really really soon.
- */
- polling_threshold = max_t(unsigned int, 20, s->target_residency);
- if (data->next_timer_us > polling_threshold &&
- latency_req > s->exit_latency && !s->disabled &&
- !dev->states_usage[1].disable)
- first_idx = 1;
- }
-
/*
* Use the lowest expected idle interval to pick the idle state.
*/
- data->predicted_us = min(data->predicted_us, expected_interval);
+ predicted_us = min(predicted_us, get_typical_interval(data, predicted_us));
if (tick_nohz_tick_stopped()) {
/*
@@ -352,34 +349,46 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* the known time till the closest timer event for the idle
* state selection.
*/
- if (data->predicted_us < TICK_USEC)
- data->predicted_us = ktime_to_us(delta_next);
+ if (predicted_us < TICK_USEC)
+ predicted_us = ktime_to_us(delta_next);
} else {
/*
* Use the performance multiplier and the user-configurable
* latency_req to determine the maximum exit latency.
*/
- interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
+ interactivity_req = predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
if (latency_req > interactivity_req)
latency_req = interactivity_req;
}
- expected_interval = data->predicted_us;
/*
* Find the idle state with the lowest power while satisfying
* our constraints.
*/
idx = -1;
- for (i = first_idx; i < drv->state_count; i++) {
+ for (i = 0; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
struct cpuidle_state_usage *su = &dev->states_usage[i];
if (s->disabled || su->disable)
continue;
+
if (idx == -1)
idx = i; /* first enabled state */
- if (s->target_residency > data->predicted_us) {
- if (data->predicted_us < TICK_USEC)
+
+ if (s->target_residency > predicted_us) {
+ /*
+ * Use a physical idle state, not busy polling, unless
+ * a timer is going to trigger soon enough.
+ */
+ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+ s->exit_latency <= latency_req &&
+ s->target_residency <= data->next_timer_us) {
+ predicted_us = s->target_residency;
+ idx = i;
+ break;
+ }
+ if (predicted_us < TICK_USEC)
break;
if (!tick_nohz_tick_stopped()) {
@@ -389,7 +398,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* tick in that case and let the governor run
* again in the next iteration of the loop.
*/
- expected_interval = drv->states[idx].target_residency;
+ predicted_us = drv->states[idx].target_residency;
break;
}
@@ -403,7 +412,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
s->target_residency <= ktime_to_us(delta_next))
idx = i;
- goto out;
+ return idx;
}
if (s->exit_latency > latency_req) {
/*
@@ -412,7 +421,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* expected idle duration so that the tick is retained
* as long as that target residency is low enough.
*/
- expected_interval = drv->states[idx].target_residency;
+ predicted_us = drv->states[idx].target_residency;
break;
}
idx = i;
@@ -426,7 +435,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* expected idle duration is shorter than the tick period length.
*/
if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
- expected_interval < TICK_USEC) && !tick_nohz_tick_stopped()) {
+ predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
unsigned int delta_next_us = ktime_to_us(delta_next);
*stop_tick = false;
@@ -450,10 +459,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
}
}
-out:
- data->last_state_idx = idx;
-
- return data->last_state_idx;
+ return idx;
}
/**
@@ -512,9 +518,19 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* duration predictor do a better job next time.
*/
measured_us = 9 * MAX_INTERESTING / 10;
+ } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
+ dev->poll_time_limit) {
+ /*
+ * The CPU exited the "polling" state due to a time limit, so
+ * the idle duration prediction leading to the selection of that
+ * state was inaccurate. If a better prediction had been made,
+ * the CPU might have been woken up from idle by the next timer.
+ * Assume that to be the case.
+ */
+ measured_us = data->next_timer_us;
} else {
/* measured value */
- measured_us = cpuidle_get_last_residency(dev);
+ measured_us = dev->last_residency;
/* Deduct exit latency */
if (measured_us > 2 * target->exit_latency)
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index 3f86d23c592e..85792d371add 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -9,7 +9,6 @@
#include <linux/sched/clock.h>
#include <linux/sched/idle.h>
-#define POLL_IDLE_TIME_LIMIT (TICK_NSEC / 16)
#define POLL_IDLE_RELAX_COUNT 200
static int __cpuidle poll_idle(struct cpuidle_device *dev,
@@ -17,8 +16,11 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
{
u64 time_start = local_clock();
+ dev->poll_time_limit = false;
+
local_irq_enable();
if (!current_set_polling_and_test()) {
+ u64 limit = (u64)drv->states[1].target_residency * NSEC_PER_USEC;
unsigned int loop_count = 0;
while (!need_resched()) {
@@ -27,8 +29,10 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
continue;
loop_count = 0;
- if (local_clock() - time_start > POLL_IDLE_TIME_LIMIT)
+ if (local_clock() - time_start > limit) {
+ dev->poll_time_limit = true;
break;
+ }
}
}
current_clr_polling();
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index a8c4ce07fc9d..caa98a7fe392 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -73,6 +73,17 @@ config ZCRYPT
+ Crypto Express 2,3,4 or 5 Accelerator (CEXxA)
+ Crypto Express 4 or 5 EP11 Coprocessor (CEXxP)
+config ZCRYPT_MULTIDEVNODES
+ bool "Support for multiple zcrypt device nodes"
+ default y
+ depends on S390
+ depends on ZCRYPT
+ help
+ With this option enabled the zcrypt device driver can
+ provide multiple devices nodes in /dev. Each device
+ node can get customized to limit access and narrow
+ down the use of the available crypto hardware.
+
config PKEY
tristate "Kernel API for protected key handling"
depends on S390
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c23396f32c8a..8e7e225d2446 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 801aeab5ab1e..2b7af44c7b85 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Some ideas are from omap-aes.c driver.
*/
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index 2a60d1224143..cbd37a2edada 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* API for Atmel Secure Protocol Layers Improved Performances (SPLIP)
*
@@ -5,18 +6,6 @@
*
* Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
* This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
*/
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 74f083f45e97..ba00e4563ca0 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Microchip / Atmel ECC (I2C) driver.
*
* Copyright (c) 2017, Microchip Technology Inc.
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/bitrev.h>
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h
index 25232c8abcc2..643a3b947338 100644
--- a/drivers/crypto/atmel-ecc.h
+++ b/drivers/crypto/atmel-ecc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017, Microchip Technology Inc.
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
*/
#ifndef __ATMEL_ECC_H__
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8a19df2fba6a..ab0cfe748931 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Some ideas are from omap-sham.c drivers.
*/
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 97b0423efa7f..438e1ffb2ec0 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Some ideas are from omap-aes.c drivers.
*/
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 7f07a5085e9b..f3442c2bdbdc 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -330,7 +330,7 @@ struct artpec6_cryptotfm_context {
size_t key_length;
u32 key_md;
int crypto_type;
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
struct artpec6_crypto_aead_hw_ctx {
@@ -1199,15 +1199,15 @@ artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
pr_debug("counter %x will overflow (nblks %u), falling back\n",
counter, counter + nblks);
- ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
- ctx->key_length);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
+ ctx->key_length);
if (ret)
return ret;
{
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -1561,10 +1561,9 @@ static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
- 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->fallback =
+ crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback))
return PTR_ERR(ctx->fallback);
@@ -1605,7 +1604,7 @@ static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
artpec6_crypto_aes_exit(tfm);
}
@@ -3174,7 +3173,6 @@ static struct platform_driver artpec6_crypto_driver = {
.remove = artpec6_crypto_remove,
.driver = {
.name = "artpec6-crypto",
- .owner = THIS_MODULE,
.of_match_table = artpec6_crypto_of_match,
},
};
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 1eb852765469..c4b1cade55c1 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,7 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+config CRYPTO_DEV_FSL_CAAM_COMMON
+ tristate
+
config CRYPTO_DEV_FSL_CAAM
- tristate "Freescale CAAM-Multicore driver backend"
+ tristate "Freescale CAAM-Multicore platform driver backend"
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
select SOC_BUS
+ select CRYPTO_DEV_FSL_CAAM_COMMON
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -12,9 +17,16 @@ config CRYPTO_DEV_FSL_CAAM
To compile this driver as a module, choose M here: the module
will be called caam.
+if CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_CAAM_DEBUG
+ bool "Enable debug output in CAAM driver"
+ help
+ Selecting this will enable printing of various debug
+ information in the CAAM driver.
+
config CRYPTO_DEV_FSL_CAAM_JR
tristate "Freescale CAAM Job Ring driver backend"
- depends on CRYPTO_DEV_FSL_CAAM
default y
help
Enables the driver module for Job Rings which are part of
@@ -25,9 +37,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
To compile this driver as a module, choose M here: the module
will be called caam_jr.
+if CRYPTO_DEV_FSL_CAAM_JR
+
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size"
- depends on CRYPTO_DEV_FSL_CAAM_JR
range 2 9
default "9"
help
@@ -45,7 +58,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
- depends on CRYPTO_DEV_FSL_CAAM_JR
help
Enable the Job Ring's interrupt coalescing feature.
@@ -75,7 +87,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -90,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
tristate "Queue Interface as Crypto API backend"
- depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
+ depends on FSL_DPAA && NET
default y
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
@@ -107,7 +118,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -119,7 +129,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_PKC_API
tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RSA
help
@@ -131,7 +140,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -142,13 +150,32 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
-config CRYPTO_DEV_FSL_CAAM_DEBUG
- bool "Enable debug output in CAAM driver"
- depends on CRYPTO_DEV_FSL_CAAM
+endif # CRYPTO_DEV_FSL_CAAM_JR
+
+endif # CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_DPAA2_CAAM
+ tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
+ depends on FSL_MC_DPIO
+ depends on NETDEVICES
+ select CRYPTO_DEV_FSL_CAAM_COMMON
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AUTHENC
+ select CRYPTO_AEAD
+ select CRYPTO_HASH
help
- Selecting this will enable printing of various debug
- information in the CAAM driver.
+ CAAM driver for QorIQ Data Path Acceleration Architecture 2.
+ It handles DPSECI DPAA2 objects that sit on the Management Complex
+ (MC) fsl-mc bus.
+
+ To compile this as a module, choose M here: the module
+ will be called dpaa2_caam.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
- CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
+ CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
+ CRYPTO_DEV_FSL_DPAA2_CAAM)
+
+config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
+ def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
+ CRYPTO_DEV_FSL_DPAA2_CAAM)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index cb652ee7dfc8..7bbfd06a11ff 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
ccflags-y := -DDEBUG
endif
+ccflags-y += -DVERSION=\"\"
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
caam-objs := ctrl.o
-caam_jr-objs := jr.o key_gen.o error.o
+caam_jr-objs := jr.o key_gen.o
caam_pkc-y := caampkc.o pkc_desc.o
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
ccflags-y += -DCONFIG_CAAM_QI
caam-objs += qi.o
endif
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
+
+dpaa2_caam-y := caamalg_qi2.o dpseci.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index d67667970f7e..869f092432de 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1,8 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*
* Based on talitos crypto API driver.
*
@@ -81,8 +82,6 @@
#define debug(format, arg...)
#endif
-static struct list_head alg_list;
-
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
@@ -96,17 +95,21 @@ struct caam_aead_alg {
bool registered;
};
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
/*
* per-session context
*/
struct caam_ctx {
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
- dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
enum dma_data_direction dir;
struct device *jrdev;
@@ -648,20 +651,20 @@ static int rfc4543_setkey(struct crypto_aead *aead,
return rfc4543_set_sh_desc(aead);
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
- const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
+ skcipher);
struct device *jrdev = ctx->jrdev;
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = (ctr_mode &&
- (strstr(alg_name, "rfc3686") != NULL));
+ const bool is_rfc3686 = alg->caam.rfc3686;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
@@ -689,40 +692,32 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* ablkcipher_encrypt shared descriptor */
+ /* skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
+ cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
- /* ablkcipher_decrypt shared descriptor */
+ /* skcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
+ cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
- /* ablkcipher_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
- cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
- desc_bytes(desc), ctx->dir);
-
return 0;
}
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(ablkcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
}
@@ -731,15 +726,15 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* xts_ablkcipher_encrypt shared descriptor */
+ /* xts_skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
- /* xts_ablkcipher_decrypt shared descriptor */
+ /* xts_skcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
@@ -765,22 +760,20 @@ struct aead_edesc {
};
/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
- * @iv_dir: DMA mapping direction for IV
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
* and IV
*/
-struct ablkcipher_edesc {
+struct skcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
- enum dma_data_direction iv_dir;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -790,8 +783,7 @@ struct ablkcipher_edesc {
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents,
- dma_addr_t iv_dma, int ivsize,
- enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
+ dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
int sec4_sg_bytes)
{
if (dst != src) {
@@ -803,7 +795,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE);
@@ -814,20 +806,19 @@ static void aead_unmap(struct device *dev,
struct aead_request *req)
{
caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
+ edesc->src_nents, edesc->dst_nents, 0, 0,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
-static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst,
edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->iv_dir,
+ edesc->iv_dma, ivsize,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
@@ -881,87 +872,74 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
aead_request_complete(req, err);
}
-static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct ablkcipher_request *req = context;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_request *req = context;
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
#ifdef DEBUG
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+ edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
#endif
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
ivsize, 0);
- /* In case initial IV was generated, copy it in GIVCIPHER request */
- if (edesc->iv_dir == DMA_FROM_DEVICE) {
- u8 *iv;
- struct skcipher_givcrypt_request *greq;
-
- greq = container_of(req, struct skcipher_givcrypt_request,
- creq);
- iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
- edesc->sec4_sg_bytes;
- memcpy(greq->giv, iv, ivsize);
- }
-
kfree(edesc);
- ablkcipher_request_complete(req, err);
+ skcipher_request_complete(req, err);
}
-static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct ablkcipher_request *req = context;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_request *req = context;
+ struct skcipher_edesc *edesc;
#ifdef DEBUG
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+ edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
#endif
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
- ablkcipher_request_complete(req, err);
+ skcipher_request_complete(req, err);
}
/*
@@ -1103,34 +1081,38 @@ static void init_authenc_job(struct aead_request *req,
}
/*
- * Fill in ablkcipher job descriptor
+ * Fill in skcipher job descriptor
*/
-static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void init_skcipher_job(struct skcipher_request *req,
+ struct skcipher_edesc *edesc,
+ const bool encrypt)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc;
+ u32 *sh_desc;
u32 out_options = 0;
- dma_addr_t dst_dma;
+ dma_addr_t dst_dma, ptr;
int len;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
- pr_err("asked=%d, nbytes%d\n",
- (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+ pr_err("asked=%d, cryptlen%d\n",
+ (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
#endif
caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
+
+ sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
+ ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize,
LDST_SGF);
if (likely(req->src == req->dst)) {
@@ -1145,48 +1127,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
out_options = LDST_SGF;
}
}
- append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
-}
-
-/*
- * Fill in ablkcipher givencrypt job descriptor
- */
-static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- u32 *desc = edesc->hw_desc;
- u32 in_options;
- dma_addr_t dst_dma, src_dma;
- int len, sec4_sg_index = 0;
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
-#endif
- caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-
- if (edesc->src_nents == 1) {
- src_dma = sg_dma_address(req->src);
- in_options = 0;
- } else {
- src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
- in_options = LDST_SGF;
- }
- append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
-
- dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
- sizeof(struct sec4_sg_entry);
- append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
}
/*
@@ -1275,7 +1216,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
GFP_DMA | flags);
if (!edesc) {
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1476,35 +1417,35 @@ static int aead_decrypt(struct aead_request *req)
}
/*
- * allocate and map the ablkcipher extended descriptor for ablkcipher
+ * allocate and map the skcipher extended descriptor for skcipher
*/
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
- *req, int desc_bytes)
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+ int desc_bytes)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_edesc *edesc;
dma_addr_t iv_dma;
u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(src_nents);
}
if (req->dst != req->src) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (unlikely(dst_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(dst_nents);
}
}
@@ -1546,26 +1487,25 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
- desc_bytes;
- edesc->iv_dir = DMA_TO_DEVICE;
+ edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+ desc_bytes);
/* Make sure IV is located in a DMAable area */
iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- memcpy(iv, req->info, ivsize);
+ memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1583,7 +1523,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
+ iv_dma, ivsize, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1591,7 +1531,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->iv_dma = iv_dma;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
sec4_sg_bytes, 1);
#endif
@@ -1599,362 +1539,187 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
return edesc;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
+ init_skcipher_job(req, edesc, true);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
return ret;
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
ivsize, 0);
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
+ init_skcipher_job(req, edesc, false);
desc = edesc->hw_desc;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
return ret;
}
-/*
- * allocate and map the ablkcipher extended descriptor
- * for ablkcipher givencrypt
- */
-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
- struct skcipher_givcrypt_request *greq,
- int desc_bytes)
-{
- struct ablkcipher_request *req = &greq->creq;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
- struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma;
- u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
-
- src_nents = sg_nents_for_len(req->src, req->nbytes);
- if (unlikely(src_nents < 0)) {
- dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
- return ERR_PTR(src_nents);
- }
-
- if (likely(req->src == req->dst)) {
- mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
- DMA_BIDIRECTIONAL);
- if (unlikely(!mapped_src_nents)) {
- dev_err(jrdev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = src_nents;
- mapped_dst_nents = src_nents;
- } else {
- mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
- DMA_TO_DEVICE);
- if (unlikely(!mapped_src_nents)) {
- dev_err(jrdev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
- if (unlikely(dst_nents < 0)) {
- dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
- return ERR_PTR(dst_nents);
- }
-
- mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(jrdev, "unable to map destination\n");
- dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
- }
- }
-
- sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
- dst_sg_idx = sec4_sg_ents;
- sec4_sg_ents += 1 + mapped_dst_nents;
-
- /*
- * allocate space for base edesc and hw desc commands, link tables, IV
- */
- sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
- GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
- desc_bytes;
- edesc->iv_dir = DMA_FROM_DEVICE;
-
- /* Make sure IV is located in a DMAable area */
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
- if (mapped_src_nents > 1)
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
- 0);
-
- dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
- dst_sg_idx + 1, 0);
-
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
- }
- edesc->iv_dma = iv_dma;
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
- sec4_sg_bytes, 1);
-#endif
-
- return edesc;
-}
-
-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *jrdev = ctx->jrdev;
- u32 *desc;
- int ret = 0;
-
- /* allocate extended descriptor */
- edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- /* Create and submit job descriptor*/
- init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
- edesc, req);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
-
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ablkcipher_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
-
- return ret;
-}
-
-#define template_aead template_u.aead
-#define template_ablkcipher template_u.ablkcipher
-struct caam_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- u32 type;
- union {
- struct ablkcipher_alg ablkcipher;
- } template_u;
- u32 class1_alg_type;
- u32 class2_alg_type;
-};
-
-static struct caam_alg_template driver_algs[] = {
- /* ablkcipher descriptor */
+static struct caam_skcipher_alg driver_algs[] = {
{
- .name = "cbc(aes)",
- .driver_name = "cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des3_ede)",
- .driver_name = "cbc-3des-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des)",
- .driver_name = "cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .name = "ctr(aes)",
- .driver_name = "ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "chainiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "rfc3686(ctr(aes))",
- .driver_name = "rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
+ },
},
{
- .name = "xts(aes)",
- .driver_name = "xts-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = xts_ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
};
@@ -3239,12 +3004,6 @@ static struct caam_aead_alg driver_aeads[] = {
},
};
-struct caam_crypto_alg {
- struct crypto_alg crypto_alg;
- struct list_head entry;
- struct caam_alg_entry caam;
-};
-
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{
@@ -3276,8 +3035,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
ctx->sh_desc_enc_dma = dma_addr;
ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
sh_desc_dec);
- ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
- sh_desc_givenc);
ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
/* copy descriptor header template value */
@@ -3287,14 +3044,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
return 0;
}
-static int caam_cra_init(struct crypto_tfm *tfm)
+static int caam_cra_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct caam_crypto_alg *caam_alg =
- container_of(alg, struct caam_crypto_alg, crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
- return caam_init_common(ctx, &caam_alg->caam, false);
+ return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+ false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3316,9 +3073,9 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_jr_free(ctx->jrdev);
}
-static void caam_cra_exit(struct crypto_tfm *tfm)
+static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_tfm_ctx(tfm));
+ caam_exit_common(crypto_skcipher_ctx(tfm));
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -3328,8 +3085,6 @@ static void caam_aead_exit(struct crypto_aead *tfm)
static void __exit caam_algapi_exit(void)
{
-
- struct caam_crypto_alg *t_alg, *n;
int i;
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
@@ -3339,57 +3094,25 @@ static void __exit caam_algapi_exit(void)
crypto_unregister_aead(&t_alg->aead);
}
- if (!alg_list.next)
- return;
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
- *template)
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct caam_crypto_alg *t_alg;
- struct crypto_alg *alg;
+ struct skcipher_alg *alg = &t_alg->skcipher;
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg) {
- pr_err("failed to allocate t_alg\n");
- return ERR_PTR(-ENOMEM);
- }
-
- alg = &t_alg->crypto_alg;
-
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
- alg->cra_module = THIS_MODULE;
- alg->cra_init = caam_cra_init;
- alg->cra_exit = caam_cra_exit;
- alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_blocksize = template->blocksize;
- alg->cra_alignmask = 0;
- alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
- switch (template->type) {
- case CRYPTO_ALG_TYPE_GIVCIPHER:
- alg->cra_type = &crypto_givcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- }
-
- t_alg->caam.class1_alg_type = template->class1_alg_type;
- t_alg->caam.class2_alg_type = template->class2_alg_type;
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- return t_alg;
+ alg->init = caam_cra_init;
+ alg->exit = caam_cra_exit;
}
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
@@ -3441,8 +3164,6 @@ static int __init caam_algapi_init(void)
return -ENODEV;
- INIT_LIST_HEAD(&alg_list);
-
/*
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
@@ -3458,9 +3179,8 @@ static int __init caam_algapi_init(void)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- struct caam_crypto_alg *t_alg;
- struct caam_alg_template *alg = driver_algs + i;
- u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
/* Skip DES algorithms if not supported by device */
if (!des_inst &&
@@ -3477,26 +3197,20 @@ static int __init caam_algapi_init(void)
* on LP devices.
*/
if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
- if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+ if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_XTS)
continue;
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
- err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n", alg->driver_name);
- continue;
- }
+ caam_skcipher_alg_init(t_alg);
- err = crypto_register_alg(&t_alg->crypto_alg);
+ err = crypto_register_skcipher(&t_alg->skcipher);
if (err) {
pr_warn("%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
- kfree(t_alg);
+ t_alg->skcipher.base.cra_driver_name);
continue;
}
- list_add_tail(&t_alg->entry, &alg_list);
+ t_alg->registered = true;
registered = true;
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index a408edd84f34..1a6f0da14106 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Shared descriptors for aead, ablkcipher algorithms
+ * Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*/
#include "compat.h"
@@ -1212,11 +1213,8 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
- */
-static inline void ablkcipher_append_src_dst(u32 *desc)
+/* For skcipher encrypt and decrypt, read from req->src and write to req->dst */
+static inline void skcipher_append_src_dst(u32 *desc)
{
append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1226,7 +1224,7 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
}
/**
- * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
@@ -1235,9 +1233,9 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
*/
-void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
+void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
@@ -1280,18 +1278,18 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
OP_ALG_ENCRYPT);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+ "skcipher enc shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
/**
- * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * cnstr_shdsc_skcipher_decap - skcipher decapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
@@ -1300,9 +1298,9 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
*/
-void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
+void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
@@ -1348,105 +1346,23 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
append_dec_op1(desc, cdata->algtype);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
-}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
-
-/**
- * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
- * with HW-generated initialization vector.
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC.
- * @ivsize: initialization vector size
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @ctx1_iv_off: IV offset in CONTEXT1 register
- */
-void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
-{
- u32 *key_jump_cmd, geniv;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
- (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Copy generated IV to memory */
- append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- if (ctx1_iv_off)
- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ "skcipher dec shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
/**
- * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
- * descriptor
+ * cnstr_shdsc_xts_skcipher_encap - xts skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
*/
-void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
{
__be64 sector_size = cpu_to_be64(512);
u32 *key_jump_cmd;
@@ -1481,24 +1397,23 @@ void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
OP_ALG_ENCRYPT);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+ "xts skcipher enc shdesc@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
/**
- * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
- * descriptor
+ * cnstr_shdsc_xts_skcipher_decap - xts skcipher decapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
*/
-void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
{
__be64 sector_size = cpu_to_be64(512);
u32 *key_jump_cmd;
@@ -1532,15 +1447,15 @@ void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
append_dec_op1(desc, cdata->algtype);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+ "xts skcipher dec shdesc@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM descriptor support");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index a917af5776ce..1315c8f6f951 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Shared descriptors for aead, ablkcipher algorithms
+ * Shared descriptors for aead, skcipher algorithms
*
* Copyright 2016 NXP
*/
@@ -42,10 +42,10 @@
#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
20 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
@@ -96,20 +96,16 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, unsigned int icvsize,
const bool is_qi);
-void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
-void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
-void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata);
-void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
-
-void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata);
#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index d7aa7d7ff102..23c9fc4975f8 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -1,9 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale FSL CAAM support for crypto API over QI backend.
* Based on caamalg.c
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2018 NXP
*/
#include "compat.h"
@@ -43,6 +44,12 @@ struct caam_aead_alg {
bool registered;
};
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
/*
* per-session context
*/
@@ -50,7 +57,6 @@ struct caam_ctx {
struct device *jrdev;
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
enum dma_data_direction dir;
@@ -589,18 +595,19 @@ static int rfc4543_setkey(struct crypto_aead *aead,
return 0;
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
- const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
+ skcipher);
struct device *jrdev = ctx->jrdev;
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+ const bool is_rfc3686 = alg->caam.rfc3686;
int ret = 0;
#ifdef DEBUG
@@ -629,13 +636,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
- cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
- is_rfc3686, ctx1_iv_off);
- cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
- is_rfc3686, ctx1_iv_off);
- cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
- ivsize, is_rfc3686, ctx1_iv_off);
+ /* skcipher encrypt, decrypt shared descriptors */
+ cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
+ cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
/* Now update the driver contexts with the new shared descriptor */
if (ctx->drv_ctx[ENCRYPT]) {
@@ -656,25 +661,16 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
}
}
- if (ctx->drv_ctx[GIVENCRYPT]) {
- ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
- ctx->sh_desc_givenc);
- if (ret) {
- dev_err(jrdev, "driver givenc context update failed\n");
- goto badkey;
- }
- }
-
return ret;
badkey:
- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
int ret = 0;
@@ -687,9 +683,9 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* xts ablkcipher encrypt, decrypt shared descriptors */
- cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
- cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
+ /* xts skcipher encrypt, decrypt shared descriptors */
+ cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
/* Now update the driver contexts with the new shared descriptor */
if (ctx->drv_ctx[ENCRYPT]) {
@@ -712,7 +708,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return ret;
badkey:
- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -741,7 +737,7 @@ struct aead_edesc {
};
/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
@@ -750,7 +746,7 @@ struct aead_edesc {
* @drv_req: driver-specific request structure
* @sgt: the h/w link table, followed by IV
*/
-struct ablkcipher_edesc {
+struct skcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
@@ -781,10 +777,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
if (type == ENCRYPT)
desc = ctx->sh_desc_enc;
- else if (type == DECRYPT)
+ else /* (type == DECRYPT) */
desc = ctx->sh_desc_dec;
- else /* (type == GIVENCRYPT) */
- desc = ctx->sh_desc_givenc;
cpu = smp_processor_id();
drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
@@ -803,8 +797,7 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize,
- enum optype op_type, dma_addr_t qm_sg_dma,
- int qm_sg_bytes)
+ dma_addr_t qm_sg_dma, int qm_sg_bytes)
{
if (dst != src) {
if (src_nents)
@@ -815,9 +808,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize,
- op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
- DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
}
@@ -830,21 +821,18 @@ static void aead_unmap(struct device *dev,
int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
- edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
}
-static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
- edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
}
static void aead_done(struct caam_drv_req *drv_req, u32 status)
@@ -902,9 +890,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
int in_len, out_len;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
- drv_ctx = get_drv_ctx(ctx, op_type);
+ drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
return (struct aead_edesc *)drv_ctx;
@@ -994,7 +981,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1009,7 +996,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, 0, 0, 0);
+ dst_nents, 0, 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1028,7 +1015,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
dev_err(qidev, "unable to map assoclen\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1051,7 +1038,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(qidev, "unable to map S/G table\n");
dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1138,14 +1125,14 @@ static int ipsec_gcm_decrypt(struct aead_request *req)
return aead_crypt(req, false);
}
-static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
{
- struct ablkcipher_edesc *edesc;
- struct ablkcipher_request *req = drv_req->app_ctx;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct skcipher_request *req = drv_req->app_ctx;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
struct device *qidev = caam_ctx->qidev;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
#ifdef DEBUG
dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
@@ -1158,72 +1145,60 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
#endif
- ablkcipher_unmap(qidev, edesc, req);
-
- /* In case initial IV was generated, copy it in GIVCIPHER request */
- if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
- u8 *iv;
- struct skcipher_givcrypt_request *greq;
-
- greq = container_of(req, struct skcipher_givcrypt_request,
- creq);
- iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
- memcpy(greq->giv, iv, ivsize);
- }
+ skcipher_unmap(qidev, edesc, req);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
+ if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
ivsize, ivsize, 0);
qi_cache_free(edesc);
- ablkcipher_request_complete(req, status);
+ skcipher_request_complete(req, status);
}
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
- *req, bool encrypt)
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+ bool encrypt)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *qidev = ctx->qidev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_edesc *edesc;
dma_addr_t iv_dma;
u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
- drv_ctx = get_drv_ctx(ctx, op_type);
+ drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
- return (struct ablkcipher_edesc *)drv_ctx;
+ return (struct skcipher_edesc *)drv_ctx;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(src_nents);
}
if (unlikely(req->src != req->dst)) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (unlikely(dst_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(dst_nents);
}
@@ -1255,12 +1230,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+ if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1269,20 +1244,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
/* Make sure IV is located in a DMAable area */
sg_table = &edesc->sgt[0];
iv = (u8 *)(sg_table + qm_sg_ents);
- memcpy(iv, req->info, ivsize);
+ memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1292,7 +1267,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->iv_dma = iv_dma;
edesc->qm_sg_bytes = qm_sg_bytes;
edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = ablkcipher_done;
+ edesc->drv_req.cbk = skcipher_done;
edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
@@ -1307,7 +1282,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
dev_err(qidev, "unable to map S/G table\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1315,348 +1290,172 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
fd_sgt = &edesc->drv_req.fd_sgt[0];
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
- ivsize + req->nbytes, 0);
+ ivsize + req->cryptlen, 0);
if (req->src == req->dst) {
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
- sizeof(*sg_table), req->nbytes, 0);
+ sizeof(*sg_table), req->cryptlen, 0);
} else if (mapped_dst_nents > 1) {
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), req->nbytes, 0);
+ sizeof(*sg_table), req->cryptlen, 0);
} else {
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
- req->nbytes, 0);
- }
-
- return edesc;
-}
-
-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
- struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *qidev = ctx->qidev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
- struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma;
- u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- struct qm_sg_entry *sg_table, *fd_sgt;
- int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
- struct caam_drv_ctx *drv_ctx;
-
- drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
- return (struct ablkcipher_edesc *)drv_ctx;
-
- src_nents = sg_nents_for_len(req->src, req->nbytes);
- if (unlikely(src_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
- return ERR_PTR(src_nents);
- }
-
- if (unlikely(req->src != req->dst)) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
- if (unlikely(dst_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
- return ERR_PTR(dst_nents);
- }
-
- mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
- DMA_TO_DEVICE);
- if (unlikely(!mapped_src_nents)) {
- dev_err(qidev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(qidev, "unable to map destination\n");
- dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
- }
- } else {
- mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
- DMA_BIDIRECTIONAL);
- if (unlikely(!mapped_src_nents)) {
- dev_err(qidev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = src_nents;
- mapped_dst_nents = src_nents;
- }
-
- qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
- dst_sg_idx = qm_sg_ents;
-
- qm_sg_ents += 1 + mapped_dst_nents;
- qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
- ivsize > CAAM_QI_MEMCACHE_SIZE)) {
- dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
- qm_sg_ents, ivsize);
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- /* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_alloc(GFP_DMA | flags);
- if (!edesc) {
- dev_err(qidev, "could not allocate extended descriptor\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- /* Make sure IV is located in a DMAable area */
- sg_table = &edesc->sgt[0];
- iv = (u8 *)(sg_table + qm_sg_ents);
- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
- if (dma_mapping_error(qidev, iv_dma)) {
- dev_err(qidev, "unable to map IV\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
- edesc->iv_dma = iv_dma;
- edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = ablkcipher_done;
- edesc->drv_req.drv_ctx = drv_ctx;
-
- if (mapped_src_nents > 1)
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
-
- dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
- 0);
-
- edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
- DMA_TO_DEVICE);
- if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
- dev_err(qidev, "unable to map S/G table\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, GIVENCRYPT, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
+ req->cryptlen, 0);
}
- fd_sgt = &edesc->drv_req.fd_sgt[0];
-
- if (mapped_src_nents > 1)
- dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
- 0);
- else
- dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
- req->nbytes, 0);
-
- dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), ivsize + req->nbytes, 0);
-
return edesc;
}
-static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
+static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int ret;
if (unlikely(caam_congested))
return -EAGAIN;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, encrypt);
+ edesc = skcipher_edesc_alloc(req, encrypt);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
if (!encrypt)
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
ivsize, ivsize, 0);
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(ctx->qidev, edesc, req);
+ skcipher_unmap(ctx->qidev, edesc, req);
qi_cache_free(edesc);
}
return ret;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- return ablkcipher_crypt(req, true);
+ return skcipher_crypt(req, true);
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- return ablkcipher_crypt(req, false);
-}
-
-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ret;
-
- if (unlikely(caam_congested))
- return -EAGAIN;
-
- /* allocate extended descriptor */
- edesc = ablkcipher_giv_edesc_alloc(creq);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ablkcipher_unmap(ctx->qidev, edesc, req);
- qi_cache_free(edesc);
- }
-
- return ret;
+ return skcipher_crypt(req, false);
}
-#define template_ablkcipher template_u.ablkcipher
-struct caam_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- u32 type;
- union {
- struct ablkcipher_alg ablkcipher;
- } template_u;
- u32 class1_alg_type;
- u32 class2_alg_type;
-};
-
-static struct caam_alg_template driver_algs[] = {
- /* ablkcipher descriptor */
+static struct caam_skcipher_alg driver_algs[] = {
{
- .name = "cbc(aes)",
- .driver_name = "cbc-aes-caam-qi",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des3_ede)",
- .driver_name = "cbc-3des-caam-qi",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des)",
- .driver_name = "cbc-des-caam-qi",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .name = "ctr(aes)",
- .driver_name = "ctr-aes-caam-qi",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "chainiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "rfc3686(ctr(aes))",
- .driver_name = "rfc3686-ctr-aes-caam-qi",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "xts(aes)",
- .driver_name = "xts-aes-caam-qi",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = xts_ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
};
@@ -2528,12 +2327,6 @@ static struct caam_aead_alg driver_aeads[] = {
},
};
-struct caam_crypto_alg {
- struct list_head entry;
- struct crypto_alg crypto_alg;
- struct caam_alg_entry caam;
-};
-
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{
@@ -2572,19 +2365,18 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
spin_lock_init(&ctx->lock);
ctx->drv_ctx[ENCRYPT] = NULL;
ctx->drv_ctx[DECRYPT] = NULL;
- ctx->drv_ctx[GIVENCRYPT] = NULL;
return 0;
}
-static int caam_cra_init(struct crypto_tfm *tfm)
+static int caam_cra_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
- crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
- return caam_init_common(ctx, &caam_alg->caam, false);
+ return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+ false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2602,16 +2394,15 @@ static void caam_exit_common(struct caam_ctx *ctx)
{
caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
- caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
caam_jr_free(ctx->jrdev);
}
-static void caam_cra_exit(struct crypto_tfm *tfm)
+static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_tfm_ctx(tfm));
+ caam_exit_common(crypto_skcipher_ctx(tfm));
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -2619,10 +2410,8 @@ static void caam_aead_exit(struct crypto_aead *tfm)
caam_exit_common(crypto_aead_ctx(tfm));
}
-static struct list_head alg_list;
static void __exit caam_qi_algapi_exit(void)
{
- struct caam_crypto_alg *t_alg, *n;
int i;
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
@@ -2632,55 +2421,25 @@ static void __exit caam_qi_algapi_exit(void)
crypto_unregister_aead(&t_alg->aead);
}
- if (!alg_list.next)
- return;
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
- *template)
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct caam_crypto_alg *t_alg;
- struct crypto_alg *alg;
-
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg)
- return ERR_PTR(-ENOMEM);
+ struct skcipher_alg *alg = &t_alg->skcipher;
- alg = &t_alg->crypto_alg;
-
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
- alg->cra_module = THIS_MODULE;
- alg->cra_init = caam_cra_init;
- alg->cra_exit = caam_cra_exit;
- alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_blocksize = template->blocksize;
- alg->cra_alignmask = 0;
- alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
- switch (template->type) {
- case CRYPTO_ALG_TYPE_GIVCIPHER:
- alg->cra_type = &crypto_givcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- }
-
- t_alg->caam.class1_alg_type = template->class1_alg_type;
- t_alg->caam.class2_alg_type = template->class2_alg_type;
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- return t_alg;
+ alg->init = caam_cra_init;
+ alg->exit = caam_cra_exit;
}
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
@@ -2734,8 +2493,6 @@ static int __init caam_qi_algapi_init(void)
return -ENODEV;
}
- INIT_LIST_HEAD(&alg_list);
-
/*
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
@@ -2751,9 +2508,8 @@ static int __init caam_qi_algapi_init(void)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- struct caam_crypto_alg *t_alg;
- struct caam_alg_template *alg = driver_algs + i;
- u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
/* Skip DES algorithms if not supported by device */
if (!des_inst &&
@@ -2765,23 +2521,16 @@ static int __init caam_qi_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
- err = PTR_ERR(t_alg);
- dev_warn(priv->qidev, "%s alg allocation failed\n",
- alg->driver_name);
- continue;
- }
+ caam_skcipher_alg_init(t_alg);
- err = crypto_register_alg(&t_alg->crypto_alg);
+ err = crypto_register_skcipher(&t_alg->skcipher);
if (err) {
dev_warn(priv->qidev, "%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
- kfree(t_alg);
+ t_alg->skcipher.base.cra_driver_name);
continue;
}
- list_add_tail(&t_alg->entry, &alg_list);
+ t_alg->registered = true;
registered = true;
}
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
new file mode 100644
index 000000000000..7d8ac0222fa3
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -0,0 +1,5165 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "caamalg_qi2.h"
+#include "dpseci_cmd.h"
+#include "desc_constr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "sg_sw_qm2.h"
+#include "key_gen.h"
+#include "caamalg_desc.h"
+#include "caamhash_desc.h"
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+
+#define CAAM_CRA_PRIORITY 2000
+
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+
+#if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM)
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
+#endif
+
+/*
+ * This is a a cache of buffers, from which the users of CAAM QI driver
+ * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
+ * NOTE: A more elegant solution would be to have some headroom in the frames
+ * being processed. This can be added by the dpaa2-eth driver. This would
+ * pose a problem for userspace application processing which cannot
+ * know of this limitation. So for now, this will work.
+ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
+ */
+static struct kmem_cache *qi_cache;
+
+struct caam_alg_entry {
+ struct device *dev;
+ int class1_alg_type;
+ int class2_alg_type;
+ bool rfc3686;
+ bool geniv;
+};
+
+struct caam_aead_alg {
+ struct aead_alg aead;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+/**
+ * caam_ctx - per-session context
+ * @flc: Flow Contexts array
+ * @key: [authentication key], encryption key
+ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @key_dma: I/O virtual address of the key
+ * @dir: DMA direction for mapping key and Flow Contexts
+ * @dev: dpseci device
+ * @adata: authentication algorithm details
+ * @cdata: encryption algorithm details
+ * @authsize: authentication tag (a.k.a. ICV / MAC) size
+ */
+struct caam_ctx {
+ struct caam_flc flc[NUM_OP];
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t flc_dma[NUM_OP];
+ dma_addr_t key_dma;
+ enum dma_data_direction dir;
+ struct device *dev;
+ struct alginfo adata;
+ struct alginfo cdata;
+ unsigned int authsize;
+};
+
+static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
+ iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+/*
+ * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
+ *
+ * Allocate data on the hotpath. Instead of using kzalloc, one can use the
+ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
+ * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
+ * hosting 16 SG entries.
+ *
+ * @flags - flags that would be used for the equivalent kmalloc(..) call
+ *
+ * Returns a pointer to a retrieved buffer on success or NULL on failure.
+ */
+static inline void *qi_cache_zalloc(gfp_t flags)
+{
+ return kmem_cache_zalloc(qi_cache, flags);
+}
+
+/*
+ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
+ *
+ * @obj - buffer previously allocated by qi_cache_zalloc
+ *
+ * No checking is being done, the call is a passthrough call to
+ * kmem_cache_free(...)
+ */
+static inline void qi_cache_free(void *obj)
+{
+ kmem_cache_free(qi_cache, obj);
+}
+
+static struct caam_request *to_caam_req(struct crypto_async_request *areq)
+{
+ switch (crypto_tfm_alg_type(areq->tfm)) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ return skcipher_request_ctx(skcipher_request_cast(areq));
+ case CRYPTO_ALG_TYPE_AEAD:
+ return aead_request_ctx(container_of(areq, struct aead_request,
+ base));
+ case CRYPTO_ALG_TYPE_AHASH:
+ return ahash_request_ctx(ahash_request_cast(areq));
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents,
+ int dst_nents, dma_addr_t iv_dma, int ivsize,
+ dma_addr_t qm_sg_dma, int qm_sg_bytes)
+{
+ if (dst != src) {
+ if (src_nents)
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ }
+
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+
+ if (qm_sg_bytes)
+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct device *dev = ctx->dev;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ struct caam_flc *flc;
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ u32 *nonce = NULL;
+ unsigned int data_len[2];
+ u32 inl_mask;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
+
+ /* aead_encrypt shared descriptor */
+ if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
+ DESC_QI_AEAD_ENC_LEN) +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+
+ if (alg->caam.geniv)
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686,
+ nonce, ctx1_iv_off, true,
+ priv->sec_attr.era);
+ else
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, true, priv->sec_attr.era);
+
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* aead_decrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, alg->caam.geniv,
+ is_rfc3686, nonce, ctx1_iv_off, true,
+ priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+ dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+
+ ctx->cdata.keylen = keys.enckeylen;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return aead_set_sh_desc(aead);
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
+ return -EINVAL;
+}
+
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_request *req_ctx = aead_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
+ unsigned int authsize = ctx->authsize;
+ int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct dpaa2_sg_entry *sg_table;
+
+ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen);
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize :
+ (-authsize)));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : (-authsize)));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+
+ if (src_nents) {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = 0;
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
+ ivsize = crypto_aead_ivsize(aead);
+
+ /*
+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
+ */
+ qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+ CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_nents, ivsize);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (ivsize) {
+ u8 *iv = (u8 *)(sg_table + qm_sg_nents);
+
+ /* Make sure IV is located in a DMAable area */
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+
+ edesc->assoclen = cpu_to_caam32(req->assoclen);
+ edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->assoclen_dma)) {
+ dev_err(dev, "unable to map assoclen\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
+ qm_sg_index++;
+ if (ivsize) {
+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
+ qm_sg_index++;
+ }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ out_len = req->assoclen + req->cryptlen +
+ (encrypt ? ctx->authsize : (-ctx->authsize));
+ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, in_len);
+
+ if (req->dst == req->src) {
+ if (mapped_src_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma +
+ (1 + !!ivsize) * sizeof(*sg_table));
+ }
+ } else if (mapped_dst_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
+ sizeof(*sg_table));
+ }
+
+ dpaa2_fl_set_len(out_fle, out_len);
+
+ return edesc;
+}
+
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES GCM encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ gcm_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
+ ctx->cdata.keylen = keylen;
+
+ return gcm_set_sh_desc(aead);
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * RFC4106 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4106_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ return rfc4106_set_sh_desc(aead);
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * RFC4543 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4543_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ return rfc4543_set_sh_desc(aead);
+}
+
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher),
+ struct caam_skcipher_alg, skcipher);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* skcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* skcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ u32 *desc;
+
+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ dev_err(dev, "key size mismatch\n");
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* xts_skcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* xts_skcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_request *req_ctx = skcipher_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct skcipher_edesc *edesc;
+ dma_addr_t iv_dma;
+ u8 *iv;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+ int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->cryptlen);
+ return ERR_PTR(src_nents);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->cryptlen);
+ return ERR_PTR(dst_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ qm_sg_ents = 1 + mapped_src_nents;
+ dst_sg_idx = qm_sg_ents;
+
+ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
+ if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
+ ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_ents, ivsize);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Make sure IV is located in a DMAable area */
+ sg_table = &edesc->sgt[0];
+ iv = (u8 *)(sg_table + qm_sg_ents);
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ dst_sg_idx, 0);
+
+ edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
+ dpaa2_fl_set_len(out_fle, req->cryptlen);
+
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+
+ if (req->src == req->dst) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
+ sizeof(*sg_table));
+ } else if (mapped_dst_nents > 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+ }
+
+ return edesc;
+}
+
+static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+}
+
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
+static void aead_encrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct aead_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static void aead_decrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct aead_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ /*
+ * verify hw auth check passed else return -EBADMSG
+ */
+ if ((status & JRSTA_CCBERR_ERRID_MASK) ==
+ JRSTA_CCBERR_ERRID_ICVCHK)
+ ecode = -EBADMSG;
+ else
+ ecode = -EIO;
+ }
+
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->cbk = aead_encrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->cbk = aead_decrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_encrypt(req);
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_decrypt(req);
+}
+
+static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct skcipher_edesc *edesc = req_ctx->edesc;
+ int ecode = 0;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
+
+ skcipher_unmap(ctx->dev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block. This is used e.g. by the CTS mode.
+ */
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
+ ivsize, 0);
+
+ qi_cache_free(edesc);
+ skcipher_request_complete(req, ecode);
+}
+
+static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct skcipher_edesc *edesc = req_ctx->edesc;
+ int ecode = 0;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
+
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ skcipher_request_complete(req, ecode);
+}
+
+static int skcipher_encrypt(struct skcipher_request *req)
+{
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = skcipher_edesc_alloc(req);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->cbk = skcipher_encrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int skcipher_decrypt(struct skcipher_request *req)
+{
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = skcipher_edesc_alloc(req);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block.
+ */
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
+ ivsize, 0);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->cbk = skcipher_decrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
+{
+ dma_addr_t dma_addr;
+ int i;
+
+ /* copy descriptor header template value */
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+
+ ctx->dev = caam->dev;
+ ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
+ offsetof(struct caam_ctx, flc_dma),
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, dma_addr)) {
+ dev_err(ctx->dev, "unable to map key, shared descriptors\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < NUM_OP; i++)
+ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
+ ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
+
+ return 0;
+}
+
+static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
+{
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
+}
+
+static int caam_cra_init_aead(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ aead);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
+ alg->setkey == aead_setkey);
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
+ offsetof(struct caam_ctx, flc_dma), ctx->dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void caam_cra_exit(struct crypto_skcipher *tfm)
+{
+ caam_exit_common(crypto_skcipher_ctx(tfm));
+}
+
+static void caam_cra_exit_aead(struct crypto_aead *tfm)
+{
+ caam_exit_common(crypto_aead_ctx(tfm));
+}
+
+static struct caam_skcipher_alg driver_algs[] = {
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ }
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ /* Galois Counter Mode */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ }
+ },
+ /* single-pass ipsec_esp descriptor */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-desi-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(md5),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha1),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha224),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha256),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha384),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha512),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+};
+
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
+{
+ struct skcipher_alg *alg = &t_alg->skcipher;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_cra_init_skcipher;
+ alg->exit = caam_cra_exit;
+}
+
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+ struct aead_alg *alg = &t_alg->aead;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_cra_init_aead;
+ alg->exit = caam_cra_exit_aead;
+}
+
+/* max hash key is max split key size */
+#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
+
+#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+
+/* caam context sizes for hashes: running digest + 8 */
+#define HASH_MSG_LEN 8
+#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
+
+enum hash_optype {
+ UPDATE = 0,
+ UPDATE_FIRST,
+ FINALIZE,
+ DIGEST,
+ HASH_NUM_OP
+};
+
+/**
+ * caam_hash_ctx - ahash per-session context
+ * @flc: Flow Contexts array
+ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @dev: dpseci device
+ * @ctx_len: size of Context Register
+ * @adata: hashing algorithm details
+ */
+struct caam_hash_ctx {
+ struct caam_flc flc[HASH_NUM_OP];
+ dma_addr_t flc_dma[HASH_NUM_OP];
+ struct device *dev;
+ int ctx_len;
+ struct alginfo adata;
+};
+
+/* ahash state */
+struct caam_hash_state {
+ struct caam_request caam_req;
+ dma_addr_t buf_dma;
+ dma_addr_t ctx_dma;
+ u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_0;
+ u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_1;
+ u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+ int current_buf;
+};
+
+struct caam_export_state {
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
+ u8 caam_ctx[MAX_CTX_LEN];
+ int buflen;
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+};
+
+static inline void switch_buf(struct caam_hash_state *state)
+{
+ state->current_buf ^= 1;
+}
+
+static inline u8 *current_buf(struct caam_hash_state *state)
+{
+ return state->current_buf ? state->buf_1 : state->buf_0;
+}
+
+static inline u8 *alt_buf(struct caam_hash_state *state)
+{
+ return state->current_buf ? state->buf_0 : state->buf_1;
+}
+
+static inline int *current_buflen(struct caam_hash_state *state)
+{
+ return state->current_buf ? &state->buflen_1 : &state->buflen_0;
+}
+
+static inline int *alt_buflen(struct caam_hash_state *state)
+{
+ return state->current_buf ? &state->buflen_0 : &state->buflen_1;
+}
+
+/* Map current buffer in state (if length > 0) and put it in link table */
+static inline int buf_map_to_qm_sg(struct device *dev,
+ struct dpaa2_sg_entry *qm_sg,
+ struct caam_hash_state *state)
+{
+ int buflen = *current_buflen(state);
+
+ if (!buflen)
+ return 0;
+
+ state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, state->buf_dma)) {
+ dev_err(dev, "unable to map buf\n");
+ state->buf_dma = 0;
+ return -ENOMEM;
+ }
+
+ dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
+
+ return 0;
+}
+
+/* Map state->caam_ctx, and add it to link table */
+static inline int ctx_map_to_qm_sg(struct device *dev,
+ struct caam_hash_state *state, int ctx_len,
+ struct dpaa2_sg_entry *qm_sg, u32 flag)
+{
+ state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
+ if (dma_mapping_error(dev, state->ctx_dma)) {
+ dev_err(dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ return -ENOMEM;
+ }
+
+ dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
+
+ return 0;
+}
+
+static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
+ struct caam_flc *flc;
+ u32 *desc;
+
+ /* ahash_update shared descriptor */
+ flc = &ctx->flc[UPDATE];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
+ ctx->ctx_len, true, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_update_first shared descriptor */
+ flc = &ctx->flc[UPDATE_FIRST];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, false, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_final shared descriptor */
+ flc = &ctx->flc[FINALIZE];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
+ ctx->ctx_len, true, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_digest shared descriptor */
+ flc = &ctx->flc[DIGEST];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
+ ctx->ctx_len, false, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ return 0;
+}
+
+struct split_key_sh_result {
+ struct completion completion;
+ int err;
+ struct device *dev;
+};
+
+static void split_key_sh_done(void *cbk_ctx, u32 err)
+{
+ struct split_key_sh_result *res = cbk_ctx;
+
+ dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+
+ if (err)
+ caam_qi2_strstatus(res->dev, err);
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+/* Digest hash size if it is too large */
+static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ u32 *keylen, u8 *key_out, u32 digestsize)
+{
+ struct caam_request *req_ctx;
+ u32 *desc;
+ struct split_key_sh_result result;
+ dma_addr_t src_dma, dst_dma;
+ struct caam_flc *flc;
+ dma_addr_t flc_dma;
+ int ret = -ENOMEM;
+ struct dpaa2_fl_entry *in_fle, *out_fle;
+
+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
+ if (!req_ctx)
+ return -ENOMEM;
+
+ in_fle = &req_ctx->fd_flt[1];
+ out_fle = &req_ctx->fd_flt[0];
+
+ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
+ if (!flc)
+ goto err_flc;
+
+ src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, src_dma)) {
+ dev_err(ctx->dev, "unable to map key input memory\n");
+ goto err_src_dma;
+ }
+ dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, dst_dma)) {
+ dev_err(ctx->dev, "unable to map key output memory\n");
+ goto err_dst_dma;
+ }
+
+ desc = flc->sh_desc;
+
+ init_sh_desc(desc, 0);
+
+ /* descriptor to perform unkeyed hash on key_in */
+ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
+ OP_ALG_AS_INITFINAL);
+ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, flc_dma)) {
+ dev_err(ctx->dev, "unable to map shared descriptor\n");
+ goto err_flc_dma;
+ }
+
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, src_dma);
+ dpaa2_fl_set_len(in_fle, *keylen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+ print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ result.err = 0;
+ init_completion(&result.completion);
+ result.dev = ctx->dev;
+
+ req_ctx->flc = flc;
+ req_ctx->flc_dma = flc_dma;
+ req_ctx->cbk = split_key_sh_done;
+ req_ctx->ctx = &result;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS) {
+ /* in progress */
+ wait_for_completion(&result.completion);
+ ret = result.err;
+ print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+ digestsize, 1);
+ }
+
+ dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
+ DMA_TO_DEVICE);
+err_flc_dma:
+ dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
+err_dst_dma:
+ dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
+err_src_dma:
+ kfree(flc);
+err_flc:
+ kfree(req_ctx);
+
+ *keylen = digestsize;
+
+ return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ int ret;
+ u8 *hashed_key = NULL;
+
+ dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
+
+ if (keylen > blocksize) {
+ hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
+ GFP_KERNEL | GFP_DMA);
+ if (!hashed_key)
+ return -ENOMEM;
+ ret = hash_digest_key(ctx, key, &keylen, hashed_key,
+ digestsize);
+ if (ret)
+ goto bad_free_key;
+ key = hashed_key;
+ }
+
+ ctx->adata.keylen = keylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+ goto bad_free_key;
+
+ ctx->adata.key_virt = key;
+ ctx->adata.key_inline = true;
+
+ ret = ahash_set_sh_desc(ahash);
+ kfree(hashed_key);
+ return ret;
+bad_free_key:
+ kfree(hashed_key);
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ if (edesc->src_nents)
+ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+ if (edesc->dst_dma)
+ dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+
+ if (edesc->qm_sg_bytes)
+ dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+
+ if (state->buf_dma) {
+ dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ DMA_TO_DEVICE);
+ state->buf_dma = 0;
+ }
+}
+
+static inline void ahash_unmap_ctx(struct device *dev,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len,
+ u32 flag)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ if (state->ctx_dma) {
+ dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ state->ctx_dma = 0;
+ }
+ ahash_unmap(dev, edesc, req, dst_len);
+}
+
+static void ahash_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_bi(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ switch_buf(state);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ crypto_ahash_digestsize(ahash), 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+ switch_buf(state);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ crypto_ahash_digestsize(ahash), 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static int ahash_update_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int *buflen = current_buflen(state);
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state), last_buflen;
+ int in_len = *buflen + req->nbytes, to_hash;
+ int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ last_buflen = *next_buflen;
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - (*next_buflen));
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_src_index = 1 + (*buflen ? 1 : 0);
+ qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
+ sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ if (mapped_nents) {
+ sg_to_qm_sg_last(req->src, mapped_nents,
+ sg_table + qm_sg_src_index, 0);
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
+ } else {
+ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
+ true);
+ }
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE];
+ req_ctx->cbk = ahash_done_bi;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+ } else if (*next_buflen) {
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
+ *buflen = *next_buflen;
+ *next_buflen = last_buflen;
+ }
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_final_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, qm_sg_src_index;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc)
+ return -ENOMEM;
+
+ qm_sg_src_index = 1 + (buflen ? 1 : 0);
+ qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_TO_DEVICE);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[FINALIZE];
+ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
+ req_ctx->cbk = ahash_done_ctx_src;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, qm_sg_src_index;
+ int src_nents, mapped_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_src_index = 1 + (buflen ? 1 : 0);
+ qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_TO_DEVICE);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[FINALIZE];
+ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
+ req_ctx->cbk = ahash_done_ctx_src;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = -ENOMEM;
+
+ state->buf_dma = 0;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to map source for DMA\n");
+ return ret;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ edesc->src_nents = src_nents;
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+
+ if (mapped_nents > 1) {
+ int qm_sg_bytes;
+ struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
+
+ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ goto unmap;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ } else {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+ }
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ goto unmap;
+ }
+
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap:
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_final_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int buflen = *current_buflen(state);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ int ret = -ENOMEM;
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc)
+ return ret;
+
+ state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->buf_dma)) {
+ dev_err(ctx->dev, "unable to map src\n");
+ goto unmap;
+ }
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ goto unmap;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, state->buf_dma);
+ dpaa2_fl_set_len(in_fle, buflen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap:
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_update_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int *buflen = current_buflen(state);
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state);
+ int in_len = *buflen + req->nbytes, to_hash;
+ int qm_sg_bytes, src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - *next_buflen);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
+ if (ret)
+ goto unmap_ctx;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
+ ctx->ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, to_hash);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
+ req_ctx->cbk = ahash_done_ctx_dst;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else if (*next_buflen) {
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
+ *buflen = *next_buflen;
+ *next_buflen = 0;
+ }
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, src_nents, mapped_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
+ if (ret)
+ goto unmap;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap;
+
+ return ret;
+unmap:
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+ return -ENOMEM;
+}
+
+static int ahash_update_first(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state);
+ int to_hash;
+ int src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
+ 1);
+ to_hash = req->nbytes - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - (*next_buflen));
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to map source for DMA\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ sg_table = &edesc->sgt[0];
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, to_hash);
+
+ if (mapped_nents > 1) {
+ int qm_sg_bytes;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ } else {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+ }
+
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src, to_hash,
+ *next_buflen, 0);
+
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
+ ctx->ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
+ req_ctx->cbk = ahash_done_ctx_dst;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags &
+ CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else if (*next_buflen) {
+ state->update = ahash_update_no_ctx;
+ state->finup = ahash_finup_no_ctx;
+ state->final = ahash_final_no_ctx;
+ scatterwalk_map_and_copy(next_buf, req->src, 0,
+ req->nbytes, 0);
+ switch_buf(state);
+ }
+
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_first(struct ahash_request *req)
+{
+ return ahash_digest(req);
+}
+
+static int ahash_init(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ state->update = ahash_update_first;
+ state->finup = ahash_finup_first;
+ state->final = ahash_final_no_ctx;
+
+ state->ctx_dma = 0;
+ state->current_buf = 0;
+ state->buf_dma = 0;
+ state->buflen_0 = 0;
+ state->buflen_1 = 0;
+
+ return 0;
+}
+
+static int ahash_update(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->update(req);
+}
+
+static int ahash_finup(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->finup(req);
+}
+
+static int ahash_final(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->final(req);
+}
+
+static int ahash_export(struct ahash_request *req, void *out)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_export_state *export = out;
+ int len;
+ u8 *buf;
+
+ if (state->current_buf) {
+ buf = state->buf_1;
+ len = state->buflen_1;
+ } else {
+ buf = state->buf_0;
+ len = state->buflen_0;
+ }
+
+ memcpy(export->buf, buf, len);
+ memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
+ export->buflen = len;
+ export->update = state->update;
+ export->final = state->final;
+ export->finup = state->finup;
+
+ return 0;
+}
+
+static int ahash_import(struct ahash_request *req, const void *in)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ const struct caam_export_state *export = in;
+
+ memset(state, 0, sizeof(*state));
+ memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
+ state->buflen_0 = export->buflen;
+ state->update = export->update;
+ state->final = export->final;
+ state->finup = export->finup;
+
+ return 0;
+}
+
+struct caam_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ struct ahash_alg template_ahash;
+ u32 alg_type;
+};
+
+/* ahash descriptors */
+static struct caam_hash_template driver_hash[] = {
+ {
+ .name = "sha1",
+ .driver_name = "sha1-caam-qi2",
+ .hmac_name = "hmac(sha1)",
+ .hmac_driver_name = "hmac-sha1-caam-qi2",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA1,
+ }, {
+ .name = "sha224",
+ .driver_name = "sha224-caam-qi2",
+ .hmac_name = "hmac(sha224)",
+ .hmac_driver_name = "hmac-sha224-caam-qi2",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA224,
+ }, {
+ .name = "sha256",
+ .driver_name = "sha256-caam-qi2",
+ .hmac_name = "hmac(sha256)",
+ .hmac_driver_name = "hmac-sha256-caam-qi2",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA256,
+ }, {
+ .name = "sha384",
+ .driver_name = "sha384-caam-qi2",
+ .hmac_name = "hmac(sha384)",
+ .hmac_driver_name = "hmac-sha384-caam-qi2",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA384,
+ }, {
+ .name = "sha512",
+ .driver_name = "sha512-caam-qi2",
+ .hmac_name = "hmac(sha512)",
+ .hmac_driver_name = "hmac-sha512-caam-qi2",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA512,
+ }, {
+ .name = "md5",
+ .driver_name = "md5-caam-qi2",
+ .hmac_name = "hmac(md5)",
+ .hmac_driver_name = "hmac-md5-caam-qi2",
+ .blocksize = MD5_BLOCK_WORDS * 4,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_MD5,
+ }
+};
+
+struct caam_hash_alg {
+ struct list_head entry;
+ struct device *dev;
+ int alg_type;
+ struct ahash_alg ahash_alg;
+};
+
+static int caam_hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct caam_hash_alg *caam_hash =
+ container_of(alg, struct caam_hash_alg, ahash_alg);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
+ HASH_MSG_LEN + SHA1_DIGEST_SIZE,
+ HASH_MSG_LEN + 32,
+ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
+ HASH_MSG_LEN + 64,
+ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ dma_addr_t dma_addr;
+ int i;
+
+ ctx->dev = caam_hash->dev;
+
+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, dma_addr)) {
+ dev_err(ctx->dev, "unable to map shared descriptors\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < HASH_NUM_OP; i++)
+ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
+
+ /* copy descriptor header template value */
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT];
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct caam_hash_state));
+
+ return ahash_set_sh_desc(ahash);
+}
+
+static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
+ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
+ struct caam_hash_template *template, bool keyed)
+{
+ struct caam_hash_alg *t_alg;
+ struct ahash_alg *halg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ t_alg->ahash_alg = template->template_ahash;
+ halg = &t_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_driver_name);
+ } else {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ t_alg->ahash_alg.setkey = NULL;
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = caam_hash_cra_init;
+ alg->cra_exit = caam_hash_cra_exit;
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
+
+ t_alg->alg_type = template->alg_type;
+ t_alg->dev = dev;
+
+ return t_alg;
+}
+
+static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+
+ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
+ napi_schedule_irqoff(&ppriv->napi);
+}
+
+static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct dpaa2_io_notification_ctx *nctx;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ ppriv->priv = priv;
+ nctx = &ppriv->nctx;
+ nctx->is_cdan = 0;
+ nctx->id = ppriv->rsp_fqid;
+ nctx->desired_cpu = cpu;
+ nctx->cb = dpaa2_caam_fqdan_cb;
+
+ /* Register notification callbacks */
+ err = dpaa2_io_service_register(NULL, nctx);
+ if (unlikely(err)) {
+ dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
+ nctx->cb = NULL;
+ /*
+ * If no affine DPIO for this core, there's probably
+ * none available for next cores either. Signal we want
+ * to retry later, in case the DPIO devices weren't
+ * probed yet.
+ */
+ err = -EPROBE_DEFER;
+ goto err;
+ }
+
+ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
+ dev);
+ if (unlikely(!ppriv->store)) {
+ dev_err(dev, "dpaa2_io_store_create() failed\n");
+ err = -ENOMEM;
+ goto err;
+ }
+
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return 0;
+
+err:
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->nctx.cb)
+ break;
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ }
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->store)
+ break;
+ dpaa2_io_store_destroy(ppriv->store);
+ }
+
+ return err;
+}
+
+static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ dpaa2_io_store_destroy(ppriv->store);
+
+ if (++i == priv->num_pairs)
+ return;
+ }
+}
+
+static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
+{
+ struct dpseci_rx_queue_cfg rx_queue_cfg;
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err = 0, i = 0, cpu;
+
+ /* Configure Rx queues */
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+
+ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
+ DPSECI_QUEUE_OPT_USER_CTX;
+ rx_queue_cfg.order_preservation_en = 0;
+ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
+ /*
+ * Rx priority (WQ) doesn't really matter, since we use
+ * pull mode, i.e. volatile dequeues from specific FQs
+ */
+ rx_queue_cfg.dest_cfg.priority = 0;
+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
+
+ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &rx_queue_cfg);
+ if (err) {
+ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
+ err);
+ return err;
+ }
+
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return err;
+}
+
+static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+
+ if (!priv->cscn_mem)
+ return;
+
+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ kfree(priv->cscn_mem);
+}
+
+static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+
+ dpaa2_dpseci_congestion_free(priv);
+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
+static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
+ const struct dpaa2_fd *fd)
+{
+ struct caam_request *req;
+ u32 fd_err;
+
+ if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
+ dev_err(priv->dev, "Only Frame List FD format is supported!\n");
+ return;
+ }
+
+ fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
+ if (unlikely(fd_err))
+ dev_err(priv->dev, "FD error: %08x\n", fd_err);
+
+ /*
+ * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
+ * in FD[ERR] or FD[FRC].
+ */
+ req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
+ dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
+}
+
+static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
+{
+ int err;
+
+ /* Retry while portal is busy */
+ do {
+ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
+ ppriv->store);
+ } while (err == -EBUSY);
+
+ if (unlikely(err))
+ dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
+
+ return err;
+}
+
+static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
+{
+ struct dpaa2_dq *dq;
+ int cleaned = 0, is_last;
+
+ do {
+ dq = dpaa2_io_store_next(ppriv->store, &is_last);
+ if (unlikely(!dq)) {
+ if (unlikely(!is_last)) {
+ dev_dbg(ppriv->priv->dev,
+ "FQ %d returned no valid frames\n",
+ ppriv->rsp_fqid);
+ /*
+ * MUST retry until we get some sort of
+ * valid response token (be it "empty dequeue"
+ * or a valid frame).
+ */
+ continue;
+ }
+ break;
+ }
+
+ /* Process FD */
+ dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
+ cleaned++;
+ } while (!is_last);
+
+ return cleaned;
+}
+
+static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ struct dpaa2_caam_priv *priv;
+ int err, cleaned = 0, store_cleaned;
+
+ ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
+ priv = ppriv->priv;
+
+ if (unlikely(dpaa2_caam_pull_fq(ppriv)))
+ return 0;
+
+ do {
+ store_cleaned = dpaa2_caam_store_consume(ppriv);
+ cleaned += store_cleaned;
+
+ if (store_cleaned == 0 ||
+ cleaned > budget - DPAA2_CAAM_STORE_SIZE)
+ break;
+
+ /* Try to dequeue some more */
+ err = dpaa2_caam_pull_fq(ppriv);
+ if (unlikely(err))
+ break;
+ } while (1);
+
+ if (cleaned < budget) {
+ napi_complete_done(napi, cleaned);
+ err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
+ if (unlikely(err))
+ dev_err(priv->dev, "Notification rearm failed: %d\n",
+ err);
+ }
+
+ return cleaned;
+}
+
+static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
+ u16 token)
+{
+ struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
+ struct device *dev = priv->dev;
+ int err;
+
+ /*
+ * Congestion group feature supported starting with DPSECI API v5.1
+ * and only when object has been created with this capability.
+ */
+ if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
+ !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
+ return 0;
+
+ priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
+ GFP_KERNEL | GFP_DMA);
+ if (!priv->cscn_mem)
+ return -ENOMEM;
+
+ priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, priv->cscn_dma)) {
+ dev_err(dev, "Error mapping CSCN memory area\n");
+ err = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
+ cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
+ cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
+ cong_notif_cfg.message_ctx = (uintptr_t)priv;
+ cong_notif_cfg.message_iova = priv->cscn_dma;
+ cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
+ DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
+ DPSECI_CGN_MODE_COHERENT_WRITE;
+
+ err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
+ &cong_notif_cfg);
+ if (err) {
+ dev_err(dev, "dpseci_set_congestion_notification failed\n");
+ goto err_set_cong;
+ }
+
+ return 0;
+
+err_set_cong:
+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+err_dma_map:
+ kfree(priv->cscn_mem);
+
+ return err;
+}
+
+static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_caam_priv *priv;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, cpu;
+ u8 i;
+
+ priv = dev_get_drvdata(dev);
+
+ priv->dev = dev;
+ priv->dpsec_id = ls_dev->obj_desc.id;
+
+ /* Get a handle for the DPSECI this interface is associate with */
+ err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_open() failed: %d\n", err);
+ goto err_open;
+ }
+
+ err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
+ &priv->minor_ver);
+ if (err) {
+ dev_err(dev, "dpseci_get_api_version() failed\n");
+ goto err_get_vers;
+ }
+
+ dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
+
+ err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->dpseci_attr);
+ if (err) {
+ dev_err(dev, "dpseci_get_attributes() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->sec_attr);
+ if (err) {
+ dev_err(dev, "dpseci_get_sec_attr() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "setup_congestion() failed\n");
+ goto err_get_vers;
+ }
+
+ priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
+ priv->dpseci_attr.num_tx_queues);
+ if (priv->num_pairs > num_online_cpus()) {
+ dev_warn(dev, "%d queues won't be used\n",
+ priv->num_pairs - num_online_cpus());
+ priv->num_pairs = num_online_cpus();
+ }
+
+ for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
+ err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &priv->rx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpseci_get_rx_queue() failed\n");
+ goto err_get_rx_queue;
+ }
+ }
+
+ for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
+ err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &priv->tx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpseci_get_tx_queue() failed\n");
+ goto err_get_rx_queue;
+ }
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i,
+ priv->rx_queue_attr[i].fqid,
+ priv->tx_queue_attr[i].fqid);
+
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
+ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
+ ppriv->prio = i;
+
+ ppriv->net_dev.dev = *dev;
+ INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
+ netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
+ DPAA2_CAAM_NAPI_WEIGHT);
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return 0;
+
+err_get_rx_queue:
+ dpaa2_dpseci_congestion_free(priv);
+err_get_vers:
+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
+err_open:
+ return err;
+}
+
+static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ napi_enable(&ppriv->napi);
+ }
+
+ return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
+static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ int i, err = 0, enabled;
+
+ err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_disable() failed\n");
+ return err;
+ }
+
+ err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
+ if (err) {
+ dev_err(dev, "dpseci_is_enabled() failed\n");
+ return err;
+ }
+
+ dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ napi_disable(&ppriv->napi);
+ netif_napi_del(&ppriv->napi);
+ }
+
+ return 0;
+}
+
+static struct list_head hash_list;
+
+static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
+{
+ struct device *dev;
+ struct dpaa2_caam_priv *priv;
+ int i, err = 0;
+ bool registered = false;
+
+ /*
+ * There is no way to get CAAM endianness - there is no direct register
+ * space access and MC f/w does not provide this attribute.
+ * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
+ * property.
+ */
+ caam_little_end = true;
+
+ caam_imx = false;
+
+ dev = &dpseci_dev->dev;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ priv->domain = iommu_get_domain_for_dev(dev);
+
+ qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
+ 0, SLAB_CACHE_DMA, NULL);
+ if (!qi_cache) {
+ dev_err(dev, "Can't allocate SEC cache\n");
+ return -ENOMEM;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
+ if (err) {
+ dev_err(dev, "dma_set_mask_and_coherent() failed\n");
+ goto err_dma_mask;
+ }
+
+ /* Obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "MC portal allocation failed\n");
+
+ goto err_dma_mask;
+ }
+
+ priv->ppriv = alloc_percpu(*priv->ppriv);
+ if (!priv->ppriv) {
+ dev_err(dev, "alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto err_alloc_ppriv;
+ }
+
+ /* DPSECI initialization */
+ err = dpaa2_dpseci_setup(dpseci_dev);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_setup() failed\n");
+ goto err_dpseci_setup;
+ }
+
+ /* DPIO */
+ err = dpaa2_dpseci_dpio_setup(priv);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
+ goto err_dpio_setup;
+ }
+
+ /* DPSECI binding to DPIO */
+ err = dpaa2_dpseci_bind(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_bind() failed\n");
+ goto err_bind;
+ }
+
+ /* DPSECI enable */
+ err = dpaa2_dpseci_enable(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_enable() failed\n");
+ goto err_bind;
+ }
+
+ /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!priv->sec_attr.des_acc_num &&
+ (alg_sel == OP_ALG_ALGSEL_3DES ||
+ alg_sel == OP_ALG_ALGSEL_DES))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!priv->sec_attr.aes_acc_num &&
+ alg_sel == OP_ALG_ALGSEL_AES)
+ continue;
+
+ t_alg->caam.dev = dev;
+ caam_skcipher_alg_init(t_alg);
+
+ err = crypto_register_skcipher(&t_alg->skcipher);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->skcipher.base.cra_driver_name, err);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+ OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!priv->sec_attr.des_acc_num &&
+ (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
+ c1_alg_sel == OP_ALG_ALGSEL_DES))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!priv->sec_attr.aes_acc_num &&
+ c1_alg_sel == OP_ALG_ALGSEL_AES)
+ continue;
+
+ /*
+ * Skip algorithms requiring message digests
+ * if MD not supported by device.
+ */
+ if (!priv->sec_attr.md_acc_num && c2_alg_sel)
+ continue;
+
+ t_alg->caam.dev = dev;
+ caam_aead_alg_init(t_alg);
+
+ err = crypto_register_aead(&t_alg->aead);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->aead.base.cra_driver_name, err);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+ if (registered)
+ dev_info(dev, "algorithms registered in /proc/crypto\n");
+
+ /* register hash algorithms the device supports */
+ INIT_LIST_HEAD(&hash_list);
+
+ /*
+ * Skip registration of any hashing algorithms if MD block
+ * is not present.
+ */
+ if (!priv->sec_attr.md_acc_num)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
+ struct caam_hash_alg *t_alg;
+ struct caam_hash_template *alg = driver_hash + i;
+
+ /* register hmac version */
+ t_alg = caam_hash_alloc(dev, alg, true);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(dev, "%s hash alg allocation failed: %d\n",
+ alg->driver_name, err);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name,
+ err);
+ kfree(t_alg);
+ } else {
+ list_add_tail(&t_alg->entry, &hash_list);
+ }
+
+ /* register unkeyed version */
+ t_alg = caam_hash_alloc(dev, alg, false);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(dev, "%s alg allocation failed: %d\n",
+ alg->driver_name, err);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name,
+ err);
+ kfree(t_alg);
+ } else {
+ list_add_tail(&t_alg->entry, &hash_list);
+ }
+ }
+ if (!list_empty(&hash_list))
+ dev_info(dev, "hash algorithms registered in /proc/crypto\n");
+
+ return err;
+
+err_bind:
+ dpaa2_dpseci_dpio_free(priv);
+err_dpio_setup:
+ dpaa2_dpseci_free(priv);
+err_dpseci_setup:
+ free_percpu(priv->ppriv);
+err_alloc_ppriv:
+ fsl_mc_portal_free(priv->mc_io);
+err_dma_mask:
+ kmem_cache_destroy(qi_cache);
+
+ return err;
+}
+
+static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev;
+ struct dpaa2_caam_priv *priv;
+ int i;
+
+ dev = &ls_dev->dev;
+ priv = dev_get_drvdata(dev);
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+
+ if (t_alg->registered)
+ crypto_unregister_aead(&t_alg->aead);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
+ }
+
+ if (hash_list.next) {
+ struct caam_hash_alg *t_hash_alg, *p;
+
+ list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+ list_del(&t_hash_alg->entry);
+ kfree(t_hash_alg);
+ }
+ }
+
+ dpaa2_dpseci_disable(priv);
+ dpaa2_dpseci_dpio_free(priv);
+ dpaa2_dpseci_free(priv);
+ free_percpu(priv->ppriv);
+ fsl_mc_portal_free(priv->mc_io);
+ kmem_cache_destroy(qi_cache);
+
+ return 0;
+}
+
+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
+{
+ struct dpaa2_fd fd;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ int err = 0, i, id;
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (priv->cscn_mem) {
+ dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
+ DPAA2_CSCN_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
+ dev_dbg_ratelimited(dev, "Dropping request\n");
+ return -EBUSY;
+ }
+ }
+
+ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
+
+ req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, req->fd_flt_dma)) {
+ dev_err(dev, "DMA mapping error for QI enqueue request\n");
+ goto err_out;
+ }
+
+ memset(&fd, 0, sizeof(fd));
+ dpaa2_fd_set_format(&fd, dpaa2_fd_list);
+ dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
+ dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
+ dpaa2_fd_set_flc(&fd, req->flc_dma);
+
+ /*
+ * There is no guarantee that preemption is disabled here,
+ * thus take action.
+ */
+ preempt_disable();
+ id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
+ for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
+ err = dpaa2_io_service_enqueue_fq(NULL,
+ priv->tx_queue_attr[id].fqid,
+ &fd);
+ if (err != -EBUSY)
+ break;
+ }
+ preempt_enable();
+
+ if (unlikely(err)) {
+ dev_err(dev, "Error enqueuing frame: %d\n", err);
+ goto err_out;
+ }
+
+ return -EINPROGRESS;
+
+err_out:
+ dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ return -EIO;
+}
+EXPORT_SYMBOL(dpaa2_caam_enqueue);
+
+static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpseci",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_caam_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_caam_probe,
+ .remove = dpaa2_caam_remove,
+ .match_id_table = dpaa2_caam_match_id_table
+};
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
+
+module_fsl_mc_driver(dpaa2_caam_driver);
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
new file mode 100644
index 000000000000..9823bdefd029
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef _CAAMALG_QI2_H_
+#define _CAAMALG_QI2_H_
+
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+#include <linux/threads.h>
+#include "dpseci.h"
+#include "desc_constr.h"
+
+#define DPAA2_CAAM_STORE_SIZE 16
+/* NAPI weight *must* be a multiple of the store size. */
+#define DPAA2_CAAM_NAPI_WEIGHT 64
+
+/* The congestion entrance threshold was chosen so that on LS2088
+ * we support the maximum throughput for the available memory
+ */
+#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
+#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
+
+/**
+ * dpaa2_caam_priv - driver private data
+ * @dpseci_id: DPSECI object unique ID
+ * @major_ver: DPSECI major version
+ * @minor_ver: DPSECI minor version
+ * @dpseci_attr: DPSECI attributes
+ * @sec_attr: SEC engine attributes
+ * @rx_queue_attr: array of Rx queue attributes
+ * @tx_queue_attr: array of Tx queue attributes
+ * @cscn_mem: pointer to memory region containing the congestion SCN
+ * it's size is larger than to accommodate alignment
+ * @cscn_mem_aligned: pointer to congestion SCN; it is computed as
+ * PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
+ * @cscn_dma: dma address used by the QMAN to write CSCN messages
+ * @dev: device associated with the DPSECI object
+ * @mc_io: pointer to MC portal's I/O object
+ * @domain: IOMMU domain
+ * @ppriv: per CPU pointers to privata data
+ */
+struct dpaa2_caam_priv {
+ int dpsec_id;
+
+ u16 major_ver;
+ u16 minor_ver;
+
+ struct dpseci_attr dpseci_attr;
+ struct dpseci_sec_attr sec_attr;
+ struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
+ struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
+ int num_pairs;
+
+ /* congestion */
+ void *cscn_mem;
+ void *cscn_mem_aligned;
+ dma_addr_t cscn_dma;
+
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ struct iommu_domain *domain;
+
+ struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
+};
+
+/**
+ * dpaa2_caam_priv_per_cpu - per CPU private data
+ * @napi: napi structure
+ * @net_dev: netdev used by napi
+ * @req_fqid: (virtual) request (Tx / enqueue) FQID
+ * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
+ * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
+ * @nctx: notification context of response FQ
+ * @store: where dequeued frames are stored
+ * @priv: backpointer to dpaa2_caam_priv
+ */
+struct dpaa2_caam_priv_per_cpu {
+ struct napi_struct napi;
+ struct net_device net_dev;
+ int req_fqid;
+ int rsp_fqid;
+ int prio;
+ struct dpaa2_io_notification_ctx nctx;
+ struct dpaa2_io_store *store;
+ struct dpaa2_caam_priv *priv;
+};
+
+/*
+ * The CAAM QI hardware constructs a job descriptor which points
+ * to shared descriptor (as pointed by context_a of FQ to CAAM).
+ * When the job descriptor is executed by deco, the whole job
+ * descriptor together with shared descriptor gets loaded in
+ * deco buffer which is 64 words long (each 32-bit).
+ *
+ * The job descriptor constructed by QI hardware has layout:
+ *
+ * HEADER (1 word)
+ * Shdesc ptr (1 or 2 words)
+ * SEQ_OUT_PTR (1 word)
+ * Out ptr (1 or 2 words)
+ * Out length (1 word)
+ * SEQ_IN_PTR (1 word)
+ * In ptr (1 or 2 words)
+ * In length (1 word)
+ *
+ * The shdesc ptr is used to fetch shared descriptor contents
+ * into deco buffer.
+ *
+ * Apart from shdesc contents, the total number of words that
+ * get loaded in deco buffer are '8' or '11'. The remaining words
+ * in deco buffer can be used for storing shared descriptor.
+ */
+#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
+
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE 512
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen: associated data length, in CAAM endianness
+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @sgt: the h/w link table, followed by IV
+ */
+struct aead_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ unsigned int assoclen;
+ dma_addr_t assoclen_dma;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/*
+ * skcipher_edesc - s/w-extended skcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @sgt: the h/w link table, followed by IV
+ */
+struct skcipher_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/*
+ * ahash_edesc - s/w-extended ahash descriptor
+ * @dst_dma: I/O virtual address of req->result
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @src_nents: number of segments in input scatterlist
+ * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @sgt: pointer to h/w link table
+ */
+struct ahash_edesc {
+ dma_addr_t dst_dma;
+ dma_addr_t qm_sg_dma;
+ int src_nents;
+ int qm_sg_bytes;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/**
+ * caam_flc - Flow Context (FLC)
+ * @flc: Flow Context options
+ * @sh_desc: Shared Descriptor
+ */
+struct caam_flc {
+ u32 flc[16];
+ u32 sh_desc[MAX_SDLEN];
+} ____cacheline_aligned;
+
+enum optype {
+ ENCRYPT = 0,
+ DECRYPT,
+ NUM_OP
+};
+
+/**
+ * caam_request - the request structure the driver application should fill while
+ * submitting a job to driver.
+ * @fd_flt: Frame list table defining input and output
+ * fd_flt[0] - FLE pointing to output buffer
+ * fd_flt[1] - FLE pointing to input buffer
+ * @fd_flt_dma: DMA address for the frame list table
+ * @flc: Flow Context
+ * @flc_dma: I/O virtual address of Flow Context
+ * @cbk: Callback function to invoke when job is completed
+ * @ctx: arbit context attached with request by the application
+ * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
+ */
+struct caam_request {
+ struct dpaa2_fl_entry fd_flt[2];
+ dma_addr_t fd_flt_dma;
+ struct caam_flc *flc;
+ dma_addr_t flc_dma;
+ void (*cbk)(void *ctx, u32 err);
+ void *ctx;
+ void *edesc;
+};
+
+/**
+ * dpaa2_caam_enqueue() - enqueue a crypto request
+ * @dev: device associated with the DPSECI object
+ * @req: pointer to caam_request
+ */
+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
+
+#endif /* _CAAMALG_QI2_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 43975ab5f09c..46924affa0bd 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
@@ -62,6 +63,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include "caamhash_desc.h"
#define CAAM_CRA_PRIORITY 3000
@@ -71,14 +73,6 @@
#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
-/* length of descriptors text */
-#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
-#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
-#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
-
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
CAAM_MAX_HASH_KEY_SIZE)
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -235,60 +229,6 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
return 0;
}
-/*
- * For ahash update, final and finup (import_ctx = true)
- * import context, read and write to seqout
- * For ahash firsts and digest (import_ctx = false)
- * read and write to seqout
- */
-static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
- struct caam_hash_ctx *ctx, bool import_ctx,
- int era)
-{
- u32 op = ctx->adata.algtype;
- u32 *skip_key_load;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Append key if it has been set; ahash update excluded */
- if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
- /* Skip key loading if already shared */
- skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- if (era < 6)
- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
- ctx->adata.keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_proto_dkp(desc, &ctx->adata);
-
- set_jump_tgt_here(desc, skip_key_load);
-
- op |= OP_ALG_AAI_HMAC_PRECOMP;
- }
-
- /* If needed, import context from software */
- if (import_ctx)
- append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
-
- /*
- * Load from buf and/or src and write to req->result or state->context
- * Calculate remaining bytes to read
- */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
- /* Store class2 context bytes */
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-}
-
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -301,8 +241,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
+ ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
@@ -313,8 +253,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
@@ -325,8 +265,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
+ ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
@@ -337,8 +277,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
+ ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
new file mode 100644
index 000000000000..a12f7959a2c3
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Shared descriptors for ahash algorithms
+ *
+ * Copyright 2017 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamhash_desc.h"
+
+/**
+ * cnstr_shdsc_ahash - ahash shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ * SHA256, SHA384, SHA512}.
+ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
+ * @digestsize: algorithm's digest size
+ * @ctx_len: size of Context Register
+ * @import_ctx: true if previous Context Register needs to be restored
+ * must be true for ahash update and final
+ * must be false for for ahash first and digest
+ * @era: SEC Era
+ */
+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, bool import_ctx, int era)
+{
+ u32 op = adata->algtype;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Append key if it has been set; ahash update excluded */
+ if (state != OP_ALG_AS_UPDATE && adata->keylen) {
+ u32 *skip_key_load;
+
+ /* Skip key loading if already shared */
+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ if (era < 6)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_proto_dkp(desc, adata);
+
+ set_jump_tgt_here(desc, skip_key_load);
+
+ op |= OP_ALG_AAI_HMAC_PRECOMP;
+ }
+
+ /* If needed, import context from software */
+ if (import_ctx)
+ append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ /* Class 2 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
+ */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+}
+EXPORT_SYMBOL(cnstr_shdsc_ahash);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
+MODULE_AUTHOR("NXP Semiconductors");
diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h
new file mode 100644
index 000000000000..631fc1ac312c
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Shared descriptors for ahash algorithms
+ *
+ * Copyright 2017 NXP
+ */
+
+#ifndef _CAAMHASH_DESC_H_
+#define _CAAMHASH_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, bool import_ctx, int era);
+
+#endif /* _CAAMHASH_DESC_H_ */
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index f26d62e5533a..4fc209cbbeab 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index fde07d4ff019..4318b0aa6fb9 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for hw_random
*
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 1c71e0cd5098..9604ff7a335e 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -17,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
@@ -39,6 +40,7 @@
#include <crypto/authenc.h>
#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/rsa.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 538c01f428c1..3fc793193821 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* * CAAM control-plane driver backend
* Controller-level driver, kernel property detection, initialization
*
diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
new file mode 100644
index 000000000000..8a68531ded0b
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#include <linux/fsl/mc.h>
+#include "dpseci.h"
+#include "dpseci_cmd.h"
+
+/**
+ * dpseci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpseci_id: DPSECI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an already created
+ * object; an object may have been declared statically in the DPL
+ * or created dynamically.
+ * This function returns a unique authentication token, associated with the
+ * specific object ID and the specific MC portal; this token must be used in all
+ * subsequent commands for this specific object.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_open *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpseci_cmd_open *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpseci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * After this function is called, no further operations are allowed on the
+ * object without opening a new control session.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_is_enabled *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
+ *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_attributes() - Retrieve DPSECI attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_attributes *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_tx_queues = rsp_params->num_tx_queues;
+ attr->num_rx_queues = rsp_params->num_rx_queues;
+ attr->options = le32_to_cpu(rsp_params->options);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
+ * Rx queues identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ cmd_params->queue = queue;
+ dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
+ cfg->order_preservation_en);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->queue = queue;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
+ attr->dest_cfg.priority = cmd_params->priority;
+ attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
+ DEST_TYPE);
+ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
+ attr->fqid = le32_to_cpu(cmd_params->fqid);
+ attr->order_preservation_en =
+ dpseci_get_field(cmd_params->order_preservation_en,
+ ORDER_PRESERVATION);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ struct dpseci_rsp_get_tx_queue *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->queue = queue;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->priority = rsp_params->priority;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned SEC attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_sec_attr *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
+ attr->ip_id = le16_to_cpu(rsp_params->ip_id);
+ attr->major_rev = rsp_params->major_rev;
+ attr->minor_rev = rsp_params->minor_rev;
+ attr->era = rsp_params->era;
+ attr->deco_num = rsp_params->deco_num;
+ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
+ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
+ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
+ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
+ attr->crc_acc_num = rsp_params->crc_acc_num;
+ attr->pk_acc_num = rsp_params->pk_acc_num;
+ attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
+ attr->rng_acc_num = rsp_params->rng_acc_num;
+ attr->md_acc_num = rsp_params->md_acc_num;
+ attr->arc4_acc_num = rsp_params->arc4_acc_num;
+ attr->des_acc_num = rsp_params->des_acc_num;
+ attr->aes_acc_num = rsp_params->aes_acc_num;
+ attr->ccha_acc_num = rsp_params->ccha_acc_num;
+ attr->ptha_acc_num = rsp_params->ptha_acc_num;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path sec API
+ * @minor_ver: Minor version of data path sec API
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_congestion_notification() - Set congestion group
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_congestion_notification() - Get congestion group notification
+ * configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->dest_cfg.priority = rsp_params->priority;
+ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
+ CGN_DEST_TYPE);
+ cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+
+ return 0;
+}
diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
new file mode 100644
index 000000000000..4550e134d166
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+#ifndef _DPSECI_H_
+#define _DPSECI_H_
+
+/*
+ * Data Path SEC Interface API
+ * Contains initialization APIs and runtime control APIs for DPSECI
+ */
+
+struct fsl_mc_io;
+
+/**
+ * General DPSECI macros
+ */
+
+/**
+ * Maximum number of Tx/Rx queues per DPSECI object
+ */
+#define DPSECI_MAX_QUEUE_NUM 16
+
+/**
+ * All queues considered; see dpseci_set_rx_queue()
+ */
+#define DPSECI_ALL_QUEUES (u8)(-1)
+
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token);
+
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/**
+ * Enable the Congestion Group support
+ */
+#define DPSECI_OPT_HAS_CG 0x000020
+
+/**
+ * struct dpseci_cfg - Structure representing DPSECI configuration
+ * @options: Any combination of the following flags:
+ * DPSECI_OPT_HAS_CG
+ * @num_tx_queues: num of queues towards the SEC
+ * @num_rx_queues: num of queues back from the SEC
+ * @priorities: Priorities for the SEC hardware processing;
+ * each place in the array is the priority of the tx queue
+ * towards the SEC;
+ * valid priorities are configured with values 1-8;
+ */
+struct dpseci_cfg {
+ u32 options;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u8 priorities[DPSECI_MAX_QUEUE_NUM];
+};
+
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en);
+
+/**
+ * struct dpseci_attr - Structure representing DPSECI attributes
+ * @id: DPSECI object ID
+ * @num_tx_queues: number of queues towards the SEC
+ * @num_rx_queues: number of queues back from the SEC
+ * @options: any combination of the following flags:
+ * DPSECI_OPT_HAS_CG
+ */
+struct dpseci_attr {
+ int id;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u32 options;
+};
+
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr);
+
+/**
+ * enum dpseci_dest - DPSECI destination types
+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue from
+ * the queue only after notification is received
+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpseci_dest {
+ DPSECI_DEST_NONE = 0,
+ DPSECI_DEST_DPIO,
+ DPSECI_DEST_DPCON
+};
+
+/**
+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that channel;
+ * not relevant for 'DPSECI_DEST_NONE' option
+ */
+struct dpseci_dest_cfg {
+ enum dpseci_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/**
+ * DPSECI queue modification options
+ */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPSECI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Select to modify the queue's order preservation
+ */
+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
+
+/**
+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
+ * @order_preservation_en: order preservation configuration for the rx queue
+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
+ * in 'options'
+ * @dest_cfg: Queue destination parameters; valid only if
+ * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpseci_rx_queue_cfg {
+ u32 options;
+ int order_preservation_en;
+ u64 user_ctx;
+ struct dpseci_dest_cfg dest_cfg;
+};
+
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @order_preservation_en: Status of the order preservation configuration on the
+ * queue
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpseci_rx_queue_attr {
+ u64 user_ctx;
+ int order_preservation_en;
+ struct dpseci_dest_cfg dest_cfg;
+ u32 fqid;
+};
+
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr);
+
+/**
+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
+ * @priority: SEC hardware processing priority for the queue
+ */
+struct dpseci_tx_queue_attr {
+ u32 fqid;
+ u8 priority;
+};
+
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr);
+
+/**
+ * struct dpseci_sec_attr - Structure representing attributes of the SEC
+ * hardware accelerator
+ * @ip_id: ID for SEC
+ * @major_rev: Major revision number for SEC
+ * @minor_rev: Minor revision number for SEC
+ * @era: SEC Era
+ * @deco_num: The number of copies of the DECO that are implemented in this
+ * version of SEC
+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
+ * version of SEC
+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
+ * version of SEC
+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
+ * implemented in this version of SEC
+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
+ * implemented in this version of SEC
+ * @crc_acc_num: The number of copies of the CRC module that are implemented in
+ * this version of SEC
+ * @pk_acc_num: The number of copies of the Public Key module that are
+ * implemented in this version of SEC
+ * @kasumi_acc_num: The number of copies of the Kasumi module that are
+ * implemented in this version of SEC
+ * @rng_acc_num: The number of copies of the Random Number Generator that are
+ * implemented in this version of SEC
+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
+ * implemented in this version of SEC
+ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
+ * in this version of SEC
+ * @des_acc_num: The number of copies of the DES module that are implemented in
+ * this version of SEC
+ * @aes_acc_num: The number of copies of the AES module that are implemented in
+ * this version of SEC
+ * @ccha_acc_num: The number of copies of the ChaCha20 module that are
+ * implemented in this version of SEC.
+ * @ptha_acc_num: The number of copies of the Poly1305 module that are
+ * implemented in this version of SEC.
+ **/
+struct dpseci_sec_attr {
+ u16 ip_id;
+ u8 major_rev;
+ u8 minor_rev;
+ u8 era;
+ u8 deco_num;
+ u8 zuc_auth_acc_num;
+ u8 zuc_enc_acc_num;
+ u8 snow_f8_acc_num;
+ u8 snow_f9_acc_num;
+ u8 crc_acc_num;
+ u8 pk_acc_num;
+ u8 kasumi_acc_num;
+ u8 rng_acc_num;
+ u8 md_acc_num;
+ u8 arc4_acc_num;
+ u8 des_acc_num;
+ u8 aes_acc_num;
+ u8 ccha_acc_num;
+ u8 ptha_acc_num;
+};
+
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr);
+
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+/**
+ * enum dpseci_congestion_unit - DPSECI congestion units
+ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpseci_congestion_unit {
+ DPSECI_CONGESTION_UNIT_BYTES = 0,
+ DPSECI_CONGESTION_UNIT_FRAMES
+};
+
+/**
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
+
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
+
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
+ */
+#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
+ * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
+ * (if enabled)
+ */
+#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
+
+/**
+ * struct dpseci_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned;
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
+ * values
+ */
+struct dpseci_congestion_notification_cfg {
+ enum dpseci_congestion_unit units;
+ u32 threshold_entry;
+ u32 threshold_exit;
+ u64 message_ctx;
+ u64 message_iova;
+ struct dpseci_dest_cfg dest_cfg;
+ u16 notification_mode;
+};
+
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg);
+
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg);
+
+#endif /* _DPSECI_H_ */
diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
new file mode 100644
index 000000000000..6ab77ead6e3d
--- /dev/null
+++ b/drivers/crypto/caam/dpseci_cmd.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef _DPSECI_CMD_H_
+#define _DPSECI_CMD_H_
+
+/* DPSECI Version */
+#define DPSECI_VER_MAJOR 5
+#define DPSECI_VER_MINOR 3
+
+#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
+#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
+
+/* Command versioning */
+#define DPSECI_CMD_BASE_VERSION 1
+#define DPSECI_CMD_BASE_VERSION_V2 2
+#define DPSECI_CMD_ID_OFFSET 4
+
+#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
+ DPSECI_CMD_BASE_VERSION)
+
+#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
+ DPSECI_CMD_BASE_VERSION_V2)
+
+/* Command IDs */
+#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
+#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
+#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
+
+#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
+#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
+#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
+#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
+
+#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
+#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
+#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
+#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
+#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
+
+/* Macros for accessing command fields smaller than 1 byte */
+#define DPSECI_MASK(field) \
+ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
+ DPSECI_##field##_SHIFT)
+
+#define dpseci_set_field(var, field, val) \
+ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
+
+#define dpseci_get_field(var, field) \
+ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
+
+struct dpseci_cmd_open {
+ __le32 dpseci_id;
+};
+
+#define DPSECI_ENABLE_SHIFT 0
+#define DPSECI_ENABLE_SIZE 1
+
+struct dpseci_rsp_is_enabled {
+ u8 is_enabled;
+};
+
+struct dpseci_rsp_get_attributes {
+ __le32 id;
+ __le32 pad0;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u8 pad1[6];
+ __le32 options;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+
+#define DPSECI_ORDER_PRESERVATION_SHIFT 0
+#define DPSECI_ORDER_PRESERVATION_SIZE 1
+
+struct dpseci_cmd_queue {
+ __le32 dest_id;
+ u8 priority;
+ u8 queue;
+ u8 dest_type;
+ u8 pad;
+ __le64 user_ctx;
+ union {
+ __le32 options;
+ __le32 fqid;
+ };
+ u8 order_preservation_en;
+};
+
+struct dpseci_rsp_get_tx_queue {
+ __le32 pad;
+ __le32 fqid;
+ u8 priority;
+};
+
+struct dpseci_rsp_get_sec_attr {
+ __le16 ip_id;
+ u8 major_rev;
+ u8 minor_rev;
+ u8 era;
+ u8 pad0[3];
+ u8 deco_num;
+ u8 zuc_auth_acc_num;
+ u8 zuc_enc_acc_num;
+ u8 pad1;
+ u8 snow_f8_acc_num;
+ u8 snow_f9_acc_num;
+ u8 crc_acc_num;
+ u8 pad2;
+ u8 pk_acc_num;
+ u8 kasumi_acc_num;
+ u8 rng_acc_num;
+ u8 pad3;
+ u8 md_acc_num;
+ u8 arc4_acc_num;
+ u8 des_acc_num;
+ u8 aes_acc_num;
+ u8 ccha_acc_num;
+ u8 ptha_acc_num;
+};
+
+struct dpseci_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+#define DPSECI_CGN_DEST_TYPE_SHIFT 0
+#define DPSECI_CGN_DEST_TYPE_SIZE 4
+#define DPSECI_CGN_UNITS_SHIFT 4
+#define DPSECI_CGN_UNITS_SIZE 2
+
+struct dpseci_cmd_congestion_notification {
+ __le32 dest_id;
+ __le16 notification_mode;
+ u8 priority;
+ u8 options;
+ __le64 message_iova;
+ __le64 message_ctx;
+ __le32 threshold_entry;
+ __le32 threshold_exit;
+};
+
+#endif /* _DPSECI_CMD_H_ */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 8da88beb1abb..7e8d690f2827 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -108,6 +108,54 @@ static const struct {
{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
};
+static const struct {
+ u8 value;
+ const char *error_text;
+} qi_error_list[] = {
+ { 0x1F, "Job terminated by FQ or ICID flush" },
+ { 0x20, "FD format error"},
+ { 0x21, "FD command format error"},
+ { 0x23, "FL format error"},
+ { 0x25, "CRJD specified in FD, but not enabled in FLC"},
+ { 0x30, "Max. buffer size too small"},
+ { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
+ { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
+ { 0x33, "Size over/underflow (allocate mode)"},
+ { 0x34, "Size over/underflow (reuse mode)"},
+ { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
+ { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
+ { 0x41, "SBC frame format not supported (allocate mode)"},
+ { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
+ { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
+ { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
+ { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
+ { 0x46, "Annotation length exceeds offset (reuse mode)"},
+ { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
+ { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
+ { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
+ { 0x51, "Unsupported IF reuse mode"},
+ { 0x52, "Unsupported FL use mode"},
+ { 0x53, "Unsupported RJD use mode"},
+ { 0x54, "Unsupported inline descriptor use mode"},
+ { 0xC0, "Table buffer pool 0 depletion"},
+ { 0xC1, "Table buffer pool 1 depletion"},
+ { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
+ { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
+ { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
+ { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
+ { 0xD0, "FLC read error"},
+ { 0xD1, "FL read error"},
+ { 0xD2, "FL write error"},
+ { 0xD3, "OF SGT write error"},
+ { 0xD4, "PTA read error"},
+ { 0xD5, "PTA write error"},
+ { 0xD6, "OF SGT F-bit write error"},
+ { 0xD7, "ASA write error"},
+ { 0xE1, "FLC[ICR]=0 ICID error"},
+ { 0xE2, "FLC[ICR]=1 ICID error"},
+ { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
+};
+
static const char * const cha_id_list[] = {
"",
"AES",
@@ -236,6 +284,27 @@ static void report_deco_status(struct device *jrdev, const u32 status,
status, error, idx_str, idx, err_str, err_err_code);
}
+static void report_qi_status(struct device *qidev, const u32 status,
+ const char *error)
+{
+ u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
+ const char *err_str = "unidentified error value 0x";
+ char err_err_code[3] = { 0 };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
+ if (qi_error_list[i].value == err_id)
+ break;
+
+ if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
+ err_str = qi_error_list[i].error_text;
+ else
+ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+ dev_err(qidev, "%08x: %s: %s%s\n",
+ status, error, err_str, err_err_code);
+}
+
static void report_jr_status(struct device *jrdev, const u32 status,
const char *error)
{
@@ -250,7 +319,7 @@ static void report_cond_code_status(struct device *jrdev, const u32 status,
status, error, __func__);
}
-void caam_jr_strstatus(struct device *jrdev, u32 status)
+void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
{
static const struct stat_src {
void (*report_ssed)(struct device *jrdev, const u32 status,
@@ -262,7 +331,7 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
- { NULL, "Queue Manager Interface" },
+ { report_qi_status, "Queue Manager Interface" },
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
{ NULL, NULL },
@@ -288,4 +357,8 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
else
dev_err(jrdev, "%d: unknown error source\n", ssrc);
}
-EXPORT_SYMBOL(caam_jr_strstatus);
+EXPORT_SYMBOL(caam_strstatus);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM error reporting");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 5aa332bac4b0..67ea94079837 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -8,7 +8,11 @@
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
#define CAAM_ERROR_STR_MAX 302
-void caam_jr_strstatus(struct device *jrdev, u32 status);
+
+void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
+
+#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
+#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index acdd72016ffe..d50085a03597 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* CAAM/SEC 4.x transport/backend driver
* JobR backend functionality
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 67f7f8c42c93..b84e6c8b1e13 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -84,13 +84,6 @@ static u64 times_congested;
#endif
/*
- * CPU from where the module initialised. This is required because QMan driver
- * requires CGRs to be removed from same CPU from where they were originally
- * allocated.
- */
-static int mod_init_cpu;
-
-/*
* This is a a cache of buffers, from which the users of CAAM QI driver
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
* doing malloc on the hotpath.
@@ -492,12 +485,11 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
}
EXPORT_SYMBOL(caam_drv_ctx_rel);
-int caam_qi_shutdown(struct device *qidev)
+void caam_qi_shutdown(struct device *qidev)
{
- int i, ret;
+ int i;
struct caam_qi_priv *priv = dev_get_drvdata(qidev);
const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
for_each_cpu(i, cpus) {
struct napi_struct *irqtask;
@@ -510,26 +502,12 @@ int caam_qi_shutdown(struct device *qidev)
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
}
- /*
- * QMan driver requires CGRs to be deleted from same CPU from where they
- * were instantiated. Hence we get the module removal execute from the
- * same CPU from where it was originally inserted.
- */
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
- ret = qman_delete_cgr(&priv->cgr);
- if (ret)
- dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
- else
- qman_release_cgrid(priv->cgr.cgrid);
+ qman_delete_cgr_safe(&priv->cgr);
+ qman_release_cgrid(priv->cgr.cgrid);
kmem_cache_destroy(qi_cache);
- /* Now that we're done with the CGRs, restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
-
platform_device_unregister(priv->qi_pdev);
- return ret;
}
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
@@ -718,22 +696,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
static struct platform_device_info qi_pdev_info = {
.name = "caam_qi",
.id = PLATFORM_DEVID_NONE
};
- /*
- * QMAN requires CGRs to be removed from same CPU+portal from where it
- * was originally allocated. Hence we need to note down the
- * initialisation CPU and use the same CPU for module exit.
- * We select the first CPU to from the list of portal owning CPUs.
- * Then we pin module init to this CPU.
- */
- mod_init_cpu = cpumask_first(cpus);
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
qi_pdev_info.parent = ctrldev;
qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
qi_pdev = platform_device_register_full(&qi_pdev_info);
@@ -795,8 +762,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
return -ENOMEM;
}
- /* Done with the CGRs; restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
&times_congested, &caam_fops_u64_ro);
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 357b69f57072..f93c9c7ed430 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -62,7 +62,6 @@ typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
enum optype {
ENCRYPT,
DECRYPT,
- GIVENCRYPT,
NUM_OP
};
@@ -174,7 +173,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
int caam_qi_init(struct platform_device *pdev);
-int caam_qi_shutdown(struct device *dev);
+void caam_qi_shutdown(struct device *dev);
/**
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 4fb91ba39c36..457815f965c0 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -70,22 +70,22 @@
extern bool caam_little_end;
extern bool caam_imx;
-#define caam_to_cpu(len) \
-static inline u##len caam##len ## _to_cpu(u##len val) \
-{ \
- if (caam_little_end) \
- return le##len ## _to_cpu(val); \
- else \
- return be##len ## _to_cpu(val); \
+#define caam_to_cpu(len) \
+static inline u##len caam##len ## _to_cpu(u##len val) \
+{ \
+ if (caam_little_end) \
+ return le##len ## _to_cpu((__force __le##len)val); \
+ else \
+ return be##len ## _to_cpu((__force __be##len)val); \
}
-#define cpu_to_caam(len) \
-static inline u##len cpu_to_caam##len(u##len val) \
-{ \
- if (caam_little_end) \
- return cpu_to_le##len(val); \
- else \
- return cpu_to_be##len(val); \
+#define cpu_to_caam(len) \
+static inline u##len cpu_to_caam##len(u##len val) \
+{ \
+ if (caam_little_end) \
+ return (__force u##len)cpu_to_le##len(val); \
+ else \
+ return (__force u##len)cpu_to_be##len(val); \
}
caam_to_cpu(16)
@@ -633,6 +633,8 @@ struct caam_job_ring {
#define JRSTA_DECOERR_INVSIGN 0x86
#define JRSTA_DECOERR_DSASIGN 0x87
+#define JRSTA_QIERR_ERROR_MASK 0x00ff
+
#define JRSTA_CCBERR_JUMP 0x08000000
#define JRSTA_CCBERR_INDEX_MASK 0xff00
#define JRSTA_CCBERR_INDEX_SHIFT 8
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
index d000b4df745f..b3e1aaaeffea 100644
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
* Copyright 2016-2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SG_SW_QM_H
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
index b5b4c12179df..c9378402a5f8 100644
--- a/drivers/crypto/caam/sg_sw_qm2.h
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -1,35 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2015-2016 Freescale Semiconductor, Inc.
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the names of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SG_SW_QM2_H_
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index b0ba4331944b..ca549c5dc08e 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -308,21 +308,11 @@ void do_request_cleanup(struct cpt_vf *cptvf,
}
}
- if (info->scatter_components)
- kzfree(info->scatter_components);
-
- if (info->gather_components)
- kzfree(info->gather_components);
-
- if (info->out_buffer)
- kzfree(info->out_buffer);
-
- if (info->in_buffer)
- kzfree(info->in_buffer);
-
- if (info->completion_addr)
- kzfree((void *)info->completion_addr);
-
+ kzfree(info->scatter_components);
+ kzfree(info->gather_components);
+ kzfree(info->out_buffer);
+ kzfree(info->in_buffer);
+ kzfree((void *)info->completion_addr);
kzfree(info);
}
diff --git a/drivers/crypto/cavium/nitrox/Makefile b/drivers/crypto/cavium/nitrox/Makefile
index 45b7379e8e30..e12954791673 100644
--- a/drivers/crypto/cavium/nitrox/Makefile
+++ b/drivers/crypto/cavium/nitrox/Makefile
@@ -7,3 +7,6 @@ n5pf-objs := nitrox_main.o \
nitrox_hal.o \
nitrox_reqmgr.o \
nitrox_algs.o
+
+n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o
+n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o
diff --git a/drivers/crypto/cavium/nitrox/nitrox_common.h b/drivers/crypto/cavium/nitrox/nitrox_common.h
index 312f72801af6..863143a8336b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_common.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_common.h
@@ -12,32 +12,15 @@ void crypto_free_context(void *ctx);
struct nitrox_device *nitrox_get_first_device(void);
void nitrox_put_device(struct nitrox_device *ndev);
-void nitrox_pf_cleanup_isr(struct nitrox_device *ndev);
-int nitrox_pf_init_isr(struct nitrox_device *ndev);
-
int nitrox_common_sw_init(struct nitrox_device *ndev);
void nitrox_common_sw_cleanup(struct nitrox_device *ndev);
-void pkt_slc_resp_handler(unsigned long data);
+void pkt_slc_resp_tasklet(unsigned long data);
int nitrox_process_se_request(struct nitrox_device *ndev,
struct se_crypto_request *req,
completion_t cb,
struct skcipher_request *skreq);
void backlog_qflush_work(struct work_struct *work);
-void nitrox_config_emu_unit(struct nitrox_device *ndev);
-void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
-void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
-void nitrox_config_vfmode(struct nitrox_device *ndev, int mode);
-void nitrox_config_nps_unit(struct nitrox_device *ndev);
-void nitrox_config_pom_unit(struct nitrox_device *ndev);
-void nitrox_config_rand_unit(struct nitrox_device *ndev);
-void nitrox_config_efl_unit(struct nitrox_device *ndev);
-void nitrox_config_bmi_unit(struct nitrox_device *ndev);
-void nitrox_config_bmo_unit(struct nitrox_device *ndev);
-void nitrox_config_lbc_unit(struct nitrox_device *ndev);
-void invalidate_lbc(struct nitrox_device *ndev);
-void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
-void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
#endif /* __NITROX_COMMON_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h
index 9dcb7fdbe0a7..1ad27b1a87c5 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_csr.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h
@@ -7,9 +7,16 @@
/* EMU clusters */
#define NR_CLUSTERS 4
+/* Maximum cores per cluster,
+ * varies based on partname
+ */
#define AE_CORES_PER_CLUSTER 20
#define SE_CORES_PER_CLUSTER 16
+#define AE_MAX_CORES (AE_CORES_PER_CLUSTER * NR_CLUSTERS)
+#define SE_MAX_CORES (SE_CORES_PER_CLUSTER * NR_CLUSTERS)
+#define ZIP_MAX_CORES 5
+
/* BIST registers */
#define EMU_BIST_STATUSX(_i) (0x1402700 + ((_i) * 0x40000))
#define UCD_BIST_STATUS 0x12C0070
@@ -111,6 +118,9 @@
#define LBC_ELM_VF65_128_INT 0x120C000
#define LBC_ELM_VF65_128_INT_ENA_W1S 0x120F000
+#define RST_BOOT 0x10C1600
+#define FUS_DAT1 0x10C1408
+
/* PEM registers */
#define PEM0_INT 0x1080428
@@ -1082,4 +1092,105 @@ union lbc_inval_status {
} s;
};
+/**
+ * struct rst_boot: RST Boot Register
+ * @jtcsrdis: when set, internal CSR access via JTAG TAP controller
+ * is disabled
+ * @jt_tst_mode: JTAG test mode
+ * @io_supply: I/O power supply setting based on IO_VDD_SELECT pin:
+ * 0x1 = 1.8V
+ * 0x2 = 2.5V
+ * 0x4 = 3.3V
+ * All other values are reserved
+ * @pnr_mul: clock multiplier
+ * @lboot: last boot cause mask, resets only with PLL_DC_OK
+ * @rboot: determines whether core 0 remains in reset after
+ * chip cold or warm or soft reset
+ * @rboot_pin: read only access to REMOTE_BOOT pin
+ */
+union rst_boot {
+ u64 value;
+ struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+ u64 raz_63 : 1;
+ u64 jtcsrdis : 1;
+ u64 raz_59_61 : 3;
+ u64 jt_tst_mode : 1;
+ u64 raz_40_57 : 18;
+ u64 io_supply : 3;
+ u64 raz_30_36 : 7;
+ u64 pnr_mul : 6;
+ u64 raz_12_23 : 12;
+ u64 lboot : 10;
+ u64 rboot : 1;
+ u64 rboot_pin : 1;
+#else
+ u64 rboot_pin : 1;
+ u64 rboot : 1;
+ u64 lboot : 10;
+ u64 raz_12_23 : 12;
+ u64 pnr_mul : 6;
+ u64 raz_30_36 : 7;
+ u64 io_supply : 3;
+ u64 raz_40_57 : 18;
+ u64 jt_tst_mode : 1;
+ u64 raz_59_61 : 3;
+ u64 jtcsrdis : 1;
+ u64 raz_63 : 1;
+#endif
+ };
+};
+
+/**
+ * struct fus_dat1: Fuse Data 1 Register
+ * @pll_mul: main clock PLL multiplier hardware limit
+ * @pll_half_dis: main clock PLL control
+ * @efus_lck: efuse lockdown
+ * @zip_info: ZIP information
+ * @bar2_sz_conf: when zero, BAR2 size conforms to
+ * PCIe specification
+ * @efus_ign: efuse ignore
+ * @nozip: ZIP disable
+ * @pll_alt_matrix: select alternate PLL matrix
+ * @pll_bwadj_denom: select CLKF denominator for
+ * BWADJ value
+ * @chip_id: chip ID
+ */
+union fus_dat1 {
+ u64 value;
+ struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+ u64 raz_57_63 : 7;
+ u64 pll_mul : 3;
+ u64 pll_half_dis : 1;
+ u64 raz_43_52 : 10;
+ u64 efus_lck : 3;
+ u64 raz_26_39 : 14;
+ u64 zip_info : 5;
+ u64 bar2_sz_conf : 1;
+ u64 efus_ign : 1;
+ u64 nozip : 1;
+ u64 raz_11_17 : 7;
+ u64 pll_alt_matrix : 1;
+ u64 pll_bwadj_denom : 2;
+ u64 chip_id : 8;
+#else
+ u64 chip_id : 8;
+ u64 pll_bwadj_denom : 2;
+ u64 pll_alt_matrix : 1;
+ u64 raz_11_17 : 7;
+ u64 nozip : 1;
+ u64 efus_ign : 1;
+ u64 bar2_sz_conf : 1;
+ u64 zip_info : 5;
+ u64 raz_26_39 : 14;
+ u64 efus_lck : 3;
+ u64 raz_43_52 : 10;
+ u64 pll_half_dis : 1;
+ u64 pll_mul : 3;
+ u64 raz_57_63 : 7;
+#endif
+ };
+};
+
#endif /* __NITROX_CSR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
new file mode 100644
index 000000000000..5f3cd5fafe04
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include "nitrox_csr.h"
+#include "nitrox_dev.h"
+
+static int firmware_show(struct seq_file *s, void *v)
+{
+ struct nitrox_device *ndev = s->private;
+
+ seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
+ return 0;
+}
+
+static int firmware_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, firmware_show, inode->i_private);
+}
+
+static const struct file_operations firmware_fops = {
+ .owner = THIS_MODULE,
+ .open = firmware_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int device_show(struct seq_file *s, void *v)
+{
+ struct nitrox_device *ndev = s->private;
+
+ seq_printf(s, "NITROX [%d]\n", ndev->idx);
+ seq_printf(s, " Part Name: %s\n", ndev->hw.partname);
+ seq_printf(s, " Frequency: %d MHz\n", ndev->hw.freq);
+ seq_printf(s, " Device ID: 0x%0x\n", ndev->hw.device_id);
+ seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id);
+ seq_printf(s, " Cores: [AE=%u SE=%u ZIP=%u]\n",
+ ndev->hw.ae_cores, ndev->hw.se_cores, ndev->hw.zip_cores);
+
+ return 0;
+}
+
+static int nitrox_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, device_show, inode->i_private);
+}
+
+static const struct file_operations nitrox_fops = {
+ .owner = THIS_MODULE,
+ .open = nitrox_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int stats_show(struct seq_file *s, void *v)
+{
+ struct nitrox_device *ndev = s->private;
+
+ seq_printf(s, "NITROX [%d] Request Statistics\n", ndev->idx);
+ seq_printf(s, " Posted: %llu\n",
+ (u64)atomic64_read(&ndev->stats.posted));
+ seq_printf(s, " Completed: %llu\n",
+ (u64)atomic64_read(&ndev->stats.completed));
+ seq_printf(s, " Dropped: %llu\n",
+ (u64)atomic64_read(&ndev->stats.dropped));
+
+ return 0;
+}
+
+static int nitrox_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, stats_show, inode->i_private);
+}
+
+static const struct file_operations nitrox_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = nitrox_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nitrox_debugfs_exit(struct nitrox_device *ndev)
+{
+ debugfs_remove_recursive(ndev->debugfs_dir);
+ ndev->debugfs_dir = NULL;
+}
+
+int nitrox_debugfs_init(struct nitrox_device *ndev)
+{
+ struct dentry *dir, *f;
+
+ dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ ndev->debugfs_dir = dir;
+ f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
+ if (!f)
+ goto err;
+ f = debugfs_create_file("device", 0400, dir, ndev, &nitrox_fops);
+ if (!f)
+ goto err;
+ f = debugfs_create_file("stats", 0400, dir, ndev, &nitrox_stats_fops);
+ if (!f)
+ goto err;
+
+ return 0;
+
+err:
+ nitrox_debugfs_exit(ndev);
+ return -ENODEV;
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index af596455b420..283e252385fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -5,92 +5,123 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/if.h>
#define VERSION_LEN 32
+/**
+ * struct nitrox_cmdq - NITROX command queue
+ * @cmd_qlock: command queue lock
+ * @resp_qlock: response queue lock
+ * @backlog_qlock: backlog queue lock
+ * @ndev: NITROX device
+ * @response_head: submitted request list
+ * @backlog_head: backlog queue
+ * @dbell_csr_addr: doorbell register address for this queue
+ * @compl_cnt_csr_addr: completion count register address of the slc port
+ * @base: command queue base address
+ * @dma: dma address of the base
+ * @pending_count: request pending at device
+ * @backlog_count: backlog request count
+ * @write_idx: next write index for the command
+ * @instr_size: command size
+ * @qno: command queue number
+ * @qsize: command queue size
+ * @unalign_base: unaligned base address
+ * @unalign_dma: unaligned dma address
+ */
struct nitrox_cmdq {
- /* command queue lock */
- spinlock_t cmdq_lock;
- /* response list lock */
- spinlock_t response_lock;
- /* backlog list lock */
- spinlock_t backlog_lock;
-
- /* request submitted to chip, in progress */
+ spinlock_t cmd_qlock;
+ spinlock_t resp_qlock;
+ spinlock_t backlog_qlock;
+
+ struct nitrox_device *ndev;
struct list_head response_head;
- /* hw queue full, hold in backlog list */
struct list_head backlog_head;
- /* doorbell address */
u8 __iomem *dbell_csr_addr;
- /* base address of the queue */
- u8 *head;
+ u8 __iomem *compl_cnt_csr_addr;
+ u8 *base;
+ dma_addr_t dma;
- struct nitrox_device *ndev;
- /* flush pending backlog commands */
struct work_struct backlog_qflush;
- /* requests posted waiting for completion */
atomic_t pending_count;
- /* requests in backlog queues */
atomic_t backlog_count;
int write_idx;
- /* command size 32B/64B */
u8 instr_size;
u8 qno;
u32 qsize;
- /* unaligned addresses */
- u8 *head_unaligned;
- dma_addr_t dma_unaligned;
- /* dma address of the base */
- dma_addr_t dma;
+ u8 *unalign_base;
+ dma_addr_t unalign_dma;
};
+/**
+ * struct nitrox_hw - NITROX hardware information
+ * @partname: partname ex: CNN55xxx-xxx
+ * @fw_name: firmware version
+ * @freq: NITROX frequency
+ * @vendor_id: vendor ID
+ * @device_id: device ID
+ * @revision_id: revision ID
+ * @se_cores: number of symmetric cores
+ * @ae_cores: number of asymmetric cores
+ * @zip_cores: number of zip cores
+ */
struct nitrox_hw {
- /* firmware version */
+ char partname[IFNAMSIZ * 2];
char fw_name[VERSION_LEN];
+ int freq;
u16 vendor_id;
u16 device_id;
u8 revision_id;
- /* CNN55XX cores */
u8 se_cores;
u8 ae_cores;
u8 zip_cores;
};
-#define MAX_MSIX_VECTOR_NAME 20
-/**
- * vectors for queues (64 AE, 64 SE and 64 ZIP) and
- * error condition/mailbox.
- */
-#define MAX_MSIX_VECTORS 192
-
-struct nitrox_msix {
- struct msix_entry *entries;
- char **names;
- DECLARE_BITMAP(irqs, MAX_MSIX_VECTORS);
- u32 nr_entries;
+struct nitrox_stats {
+ atomic64_t posted;
+ atomic64_t completed;
+ atomic64_t dropped;
};
-struct bh_data {
- /* slc port completion count address */
- u8 __iomem *completion_cnt_csr_addr;
+#define IRQ_NAMESZ 32
+
+struct nitrox_q_vector {
+ char name[IRQ_NAMESZ];
+ bool valid;
+ int ring;
+ struct tasklet_struct resp_tasklet;
+ union {
+ struct nitrox_cmdq *cmdq;
+ struct nitrox_device *ndev;
+ };
+};
- struct nitrox_cmdq *cmdq;
- struct tasklet_struct resp_handler;
+/*
+ * NITROX Device states
+ */
+enum ndev_state {
+ __NDEV_NOT_READY,
+ __NDEV_READY,
+ __NDEV_IN_RESET,
};
-struct nitrox_bh {
- struct bh_data *slc;
+/* NITROX support modes for VF(s) */
+enum vf_mode {
+ __NDEV_MODE_PF,
+ __NDEV_MODE_VF16,
+ __NDEV_MODE_VF32,
+ __NDEV_MODE_VF64,
+ __NDEV_MODE_VF128,
};
-/* NITROX-V driver state */
-#define NITROX_UCODE_LOADED 0
-#define NITROX_READY 1
+#define __NDEV_SRIOV_BIT 0
/* command queue size */
#define DEFAULT_CMD_QLEN 2048
@@ -98,7 +129,6 @@ struct nitrox_bh {
#define CMD_TIMEOUT 2000
#define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
-#define PF_MODE 0
#define NITROX_CSR_ADDR(ndev, offset) \
((ndev)->bar_addr + (offset))
@@ -108,17 +138,18 @@ struct nitrox_bh {
* @list: pointer to linked list of devices
* @bar_addr: iomap address
* @pdev: PCI device information
- * @status: NITROX status
+ * @state: NITROX device state
+ * @flags: flags to indicate device the features
* @timeout: Request timeout in jiffies
* @refcnt: Device usage count
* @idx: device index (0..N)
* @node: NUMA node id attached
* @qlen: Command queue length
* @nr_queues: Number of command queues
+ * @mode: Device mode PF/VF
* @ctx_pool: DMA pool for crypto context
- * @pkt_cmdqs: SE Command queues
- * @msix: MSI-X information
- * @bh: post processing work
+ * @pkt_inq: Packet input rings
+ * @qvec: MSI-X queue vectors information
* @hw: hardware information
* @debugfs_dir: debugfs directory
*/
@@ -128,7 +159,8 @@ struct nitrox_device {
u8 __iomem *bar_addr;
struct pci_dev *pdev;
- unsigned long status;
+ atomic_t state;
+ unsigned long flags;
unsigned long timeout;
refcount_t refcnt;
@@ -136,13 +168,16 @@ struct nitrox_device {
int node;
u16 qlen;
u16 nr_queues;
+ int num_vfs;
+ enum vf_mode mode;
struct dma_pool *ctx_pool;
- struct nitrox_cmdq *pkt_cmdqs;
+ struct nitrox_cmdq *pkt_inq;
- struct nitrox_msix msix;
- struct nitrox_bh bh;
+ struct nitrox_q_vector *qvec;
+ int num_vecs;
+ struct nitrox_stats stats;
struct nitrox_hw hw;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *debugfs_dir;
@@ -173,9 +208,22 @@ static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
writeq(value, (ndev->bar_addr + offset));
}
-static inline int nitrox_ready(struct nitrox_device *ndev)
+static inline bool nitrox_ready(struct nitrox_device *ndev)
{
- return test_bit(NITROX_READY, &ndev->status);
+ return atomic_read(&ndev->state) == __NDEV_READY;
}
+#ifdef CONFIG_DEBUG_FS
+int nitrox_debugfs_init(struct nitrox_device *ndev);
+void nitrox_debugfs_exit(struct nitrox_device *ndev);
+#else
+static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
+{
+ return 0;
+}
+
+static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
+{ }
+#endif
+
#endif /* __NITROX_DEV_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index ab4ccf2f9e77..a9b82387cf53 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -4,6 +4,8 @@
#include "nitrox_dev.h"
#include "nitrox_csr.h"
+#define PLL_REF_CLK 50
+
/**
* emu_enable_cores - Enable EMU cluster cores.
* @ndev: N5 device
@@ -117,7 +119,7 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
int i;
for (i = 0; i < ndev->nr_queues; i++) {
- struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
+ struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
union nps_pkt_in_instr_rsize pkt_in_rsize;
u64 offset;
@@ -256,7 +258,7 @@ void nitrox_config_nps_unit(struct nitrox_device *ndev)
/* disable ILK interface */
core_gbl_vfcfg.value = 0;
core_gbl_vfcfg.s.ilk_disable = 1;
- core_gbl_vfcfg.s.cfg = PF_MODE;
+ core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
/* config input and solicit ports */
nitrox_config_pkt_input_rings(ndev);
@@ -400,3 +402,68 @@ void nitrox_config_lbc_unit(struct nitrox_device *ndev)
offset = LBC_ELM_VF65_128_INT_ENA_W1S;
nitrox_write_csr(ndev, offset, (~0ULL));
}
+
+void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
+{
+ union nps_core_gbl_vfcfg vfcfg;
+
+ vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG);
+ vfcfg.s.cfg = mode & 0x7;
+
+ nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
+}
+
+void nitrox_get_hwinfo(struct nitrox_device *ndev)
+{
+ union emu_fuse_map emu_fuse;
+ union rst_boot rst_boot;
+ union fus_dat1 fus_dat1;
+ unsigned char name[IFNAMSIZ * 2] = {};
+ int i, dead_cores;
+ u64 offset;
+
+ /* get core frequency */
+ offset = RST_BOOT;
+ rst_boot.value = nitrox_read_csr(ndev, offset);
+ ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK;
+
+ for (i = 0; i < NR_CLUSTERS; i++) {
+ offset = EMU_FUSE_MAPX(i);
+ emu_fuse.value = nitrox_read_csr(ndev, offset);
+ if (emu_fuse.s.valid) {
+ dead_cores = hweight32(emu_fuse.s.ae_fuse);
+ ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
+ dead_cores = hweight16(emu_fuse.s.se_fuse);
+ ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
+ }
+ }
+ /* find zip hardware availability */
+ offset = FUS_DAT1;
+ fus_dat1.value = nitrox_read_csr(ndev, offset);
+ if (!fus_dat1.nozip) {
+ dead_cores = hweight8(fus_dat1.zip_info);
+ ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
+ }
+
+ /* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/
+ if (ndev->hw.ae_cores == AE_MAX_CORES) {
+ switch (ndev->hw.se_cores) {
+ case SE_MAX_CORES:
+ i = snprintf(name, sizeof(name), "CNN5560");
+ break;
+ case 40:
+ i = snprintf(name, sizeof(name), "CNN5560s");
+ break;
+ }
+ } else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
+ i = snprintf(name, sizeof(name), "CNN5530");
+ } else {
+ i = snprintf(name, sizeof(name), "CNN5560i");
+ }
+
+ snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
+ ndev->hw.freq, ndev->hw.revision_id);
+
+ /* copy partname */
+ strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h
new file mode 100644
index 000000000000..489ee64c119e
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NITROX_HAL_H
+#define __NITROX_HAL_H
+
+#include "nitrox_dev.h"
+
+void nitrox_config_emu_unit(struct nitrox_device *ndev);
+void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
+void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
+void nitrox_config_nps_unit(struct nitrox_device *ndev);
+void nitrox_config_pom_unit(struct nitrox_device *ndev);
+void nitrox_config_rand_unit(struct nitrox_device *ndev);
+void nitrox_config_efl_unit(struct nitrox_device *ndev);
+void nitrox_config_bmi_unit(struct nitrox_device *ndev);
+void nitrox_config_bmo_unit(struct nitrox_device *ndev);
+void nitrox_config_lbc_unit(struct nitrox_device *ndev);
+void invalidate_lbc(struct nitrox_device *ndev);
+void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
+void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
+void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode);
+void nitrox_get_hwinfo(struct nitrox_device *ndev);
+
+#endif /* __NITROX_HAL_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index ee0d70ba25d5..88a77b8fb3fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -6,9 +6,16 @@
#include "nitrox_dev.h"
#include "nitrox_csr.h"
#include "nitrox_common.h"
+#include "nitrox_hal.h"
+/**
+ * One vector for each type of ring
+ * - NPS packet ring, AQMQ ring and ZQMQ ring
+ */
#define NR_RING_VECTORS 3
-#define NPS_CORE_INT_ACTIVE_ENTRY 192
+/* base entry for packet ring/port */
+#define PKT_RING_MSIX_BASE 0
+#define NON_RING_MSIX_BASE 192
/**
* nps_pkt_slc_isr - IRQ handler for NPS solicit port
@@ -17,13 +24,14 @@
*/
static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
{
- struct bh_data *slc = data;
- union nps_pkt_slc_cnts pkt_slc_cnts;
+ struct nitrox_q_vector *qvec = data;
+ union nps_pkt_slc_cnts slc_cnts;
+ struct nitrox_cmdq *cmdq = qvec->cmdq;
- pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr);
+ slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
/* New packet on SLC output port */
- if (pkt_slc_cnts.s.slc_int)
- tasklet_hi_schedule(&slc->resp_handler);
+ if (slc_cnts.s.slc_int)
+ tasklet_hi_schedule(&qvec->resp_tasklet);
return IRQ_HANDLED;
}
@@ -190,165 +198,92 @@ static void clear_bmi_err_intr(struct nitrox_device *ndev)
dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value);
}
+static void nps_core_int_tasklet(unsigned long data)
+{
+ struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
+ struct nitrox_device *ndev = qvec->ndev;
+
+ /* if pf mode do queue recovery */
+ if (ndev->mode == __NDEV_MODE_PF) {
+ } else {
+ /**
+ * if VF(s) enabled communicate the error information
+ * to VF(s)
+ */
+ }
+}
+
/**
- * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts
- * @ndev: NITROX device
+ * nps_core_int_isr - interrupt handler for NITROX errors and
+ * mailbox communication
*/
-static void clear_nps_core_int_active(struct nitrox_device *ndev)
+static irqreturn_t nps_core_int_isr(int irq, void *data)
{
- union nps_core_int_active core_int_active;
+ struct nitrox_device *ndev = data;
+ union nps_core_int_active core_int;
- core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
+ core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
- if (core_int_active.s.nps_core)
+ if (core_int.s.nps_core)
clear_nps_core_err_intr(ndev);
- if (core_int_active.s.nps_pkt)
+ if (core_int.s.nps_pkt)
clear_nps_pkt_err_intr(ndev);
- if (core_int_active.s.pom)
+ if (core_int.s.pom)
clear_pom_err_intr(ndev);
- if (core_int_active.s.pem)
+ if (core_int.s.pem)
clear_pem_err_intr(ndev);
- if (core_int_active.s.lbc)
+ if (core_int.s.lbc)
clear_lbc_err_intr(ndev);
- if (core_int_active.s.efl)
+ if (core_int.s.efl)
clear_efl_err_intr(ndev);
- if (core_int_active.s.bmi)
+ if (core_int.s.bmi)
clear_bmi_err_intr(ndev);
/* If more work callback the ISR, set resend */
- core_int_active.s.resend = 1;
- nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value);
-}
-
-static irqreturn_t nps_core_int_isr(int irq, void *data)
-{
- struct nitrox_device *ndev = data;
-
- clear_nps_core_int_active(ndev);
+ core_int.s.resend = 1;
+ nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
return IRQ_HANDLED;
}
-static int nitrox_enable_msix(struct nitrox_device *ndev)
+void nitrox_unregister_interrupts(struct nitrox_device *ndev)
{
- struct msix_entry *entries;
- char **names;
- int i, nr_entries, ret;
-
- /*
- * PF MSI-X vectors
- *
- * Entry 0: NPS PKT ring 0
- * Entry 1: AQMQ ring 0
- * Entry 2: ZQM ring 0
- * Entry 3: NPS PKT ring 1
- * Entry 4: AQMQ ring 1
- * Entry 5: ZQM ring 1
- * ....
- * Entry 192: NPS_CORE_INT_ACTIVE
- */
- nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
- entries = kcalloc_node(nr_entries, sizeof(struct msix_entry),
- GFP_KERNEL, ndev->node);
- if (!entries)
- return -ENOMEM;
-
- names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL);
- if (!names) {
- kfree(entries);
- return -ENOMEM;
- }
-
- /* fill entires */
- for (i = 0; i < (nr_entries - 1); i++)
- entries[i].entry = i;
-
- entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
-
- for (i = 0; i < nr_entries; i++) {
- *(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
- if (!(*(names + i))) {
- ret = -ENOMEM;
- goto msix_fail;
- }
- }
- ndev->msix.entries = entries;
- ndev->msix.names = names;
- ndev->msix.nr_entries = nr_entries;
-
- ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
- ndev->msix.nr_entries);
- if (ret) {
- dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
- ret);
- goto msix_fail;
- }
- return 0;
-
-msix_fail:
- for (i = 0; i < nr_entries; i++)
- kfree(*(names + i));
-
- kfree(entries);
- kfree(names);
- return ret;
-}
-
-static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
-{
- int i;
-
- if (!ndev->bh.slc)
- return;
-
- for (i = 0; i < ndev->nr_queues; i++) {
- struct bh_data *bh = &ndev->bh.slc[i];
-
- tasklet_disable(&bh->resp_handler);
- tasklet_kill(&bh->resp_handler);
- }
- kfree(ndev->bh.slc);
- ndev->bh.slc = NULL;
-}
-
-static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
-{
- u32 size;
+ struct pci_dev *pdev = ndev->pdev;
int i;
- size = ndev->nr_queues * sizeof(struct bh_data);
- ndev->bh.slc = kzalloc(size, GFP_KERNEL);
- if (!ndev->bh.slc)
- return -ENOMEM;
+ for (i = 0; i < ndev->num_vecs; i++) {
+ struct nitrox_q_vector *qvec;
+ int vec;
- for (i = 0; i < ndev->nr_queues; i++) {
- struct bh_data *bh = &ndev->bh.slc[i];
- u64 offset;
+ qvec = ndev->qvec + i;
+ if (!qvec->valid)
+ continue;
- offset = NPS_PKT_SLC_CNTSX(i);
- /* pre calculate completion count address */
- bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
- bh->cmdq = &ndev->pkt_cmdqs[i];
+ /* get the vector number */
+ vec = pci_irq_vector(pdev, i);
+ irq_set_affinity_hint(vec, NULL);
+ free_irq(vec, qvec);
- tasklet_init(&bh->resp_handler, pkt_slc_resp_handler,
- (unsigned long)bh);
+ tasklet_disable(&qvec->resp_tasklet);
+ tasklet_kill(&qvec->resp_tasklet);
+ qvec->valid = false;
}
-
- return 0;
+ kfree(ndev->qvec);
+ pci_free_irq_vectors(pdev);
}
-static int nitrox_request_irqs(struct nitrox_device *ndev)
+int nitrox_register_interrupts(struct nitrox_device *ndev)
{
struct pci_dev *pdev = ndev->pdev;
- struct msix_entry *msix_ent = ndev->msix.entries;
- int nr_ring_vectors, i = 0, ring, cpu, ret;
- char *name;
+ struct nitrox_q_vector *qvec;
+ int nr_vecs, vec, cpu;
+ int ret, i;
/*
* PF MSI-X vectors
@@ -357,112 +292,76 @@ static int nitrox_request_irqs(struct nitrox_device *ndev)
* Entry 1: AQMQ ring 0
* Entry 2: ZQM ring 0
* Entry 3: NPS PKT ring 1
+ * Entry 4: AQMQ ring 1
+ * Entry 5: ZQM ring 1
* ....
* Entry 192: NPS_CORE_INT_ACTIVE
*/
- nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;
-
- /* request irq for pkt ring/ports only */
- while (i < nr_ring_vectors) {
- name = *(ndev->msix.names + i);
- ring = (i / NR_RING_VECTORS);
- snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
- ndev->idx, ring);
+ nr_vecs = pci_msix_vec_count(pdev);
- ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
- name, &ndev->bh.slc[ring]);
- if (ret) {
- dev_err(&pdev->dev, "failed to get irq %d for %s\n",
- msix_ent[i].vector, name);
- return ret;
- }
- cpu = ring % num_online_cpus();
- irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
-
- set_bit(i, ndev->msix.irqs);
- i += NR_RING_VECTORS;
- }
-
- /* Request IRQ for NPS_CORE_INT_ACTIVE */
- name = *(ndev->msix.names + i);
- snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
- ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
- if (ret) {
- dev_err(&pdev->dev, "failed to get irq %d for %s\n",
- msix_ent[i].vector, name);
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
return ret;
}
- set_bit(i, ndev->msix.irqs);
+ ndev->num_vecs = nr_vecs;
- return 0;
-}
-
-static void nitrox_disable_msix(struct nitrox_device *ndev)
-{
- struct msix_entry *msix_ent = ndev->msix.entries;
- char **names = ndev->msix.names;
- int i = 0, ring, nr_ring_vectors;
-
- nr_ring_vectors = ndev->msix.nr_entries - 1;
-
- /* clear pkt ring irqs */
- while (i < nr_ring_vectors) {
- if (test_and_clear_bit(i, ndev->msix.irqs)) {
- ring = (i / NR_RING_VECTORS);
- irq_set_affinity_hint(msix_ent[i].vector, NULL);
- free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
- }
- i += NR_RING_VECTORS;
+ ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
+ if (!ndev->qvec) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
}
- irq_set_affinity_hint(msix_ent[i].vector, NULL);
- free_irq(msix_ent[i].vector, ndev);
- clear_bit(i, ndev->msix.irqs);
- kfree(ndev->msix.entries);
- for (i = 0; i < ndev->msix.nr_entries; i++)
- kfree(*(names + i));
+ /* request irqs for packet rings/ports */
+ for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
+ qvec = &ndev->qvec[i];
- kfree(names);
- pci_disable_msix(ndev->pdev);
-}
-
-/**
- * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ
- * @ndev: NITROX device
- */
-void nitrox_pf_cleanup_isr(struct nitrox_device *ndev)
-{
- nitrox_disable_msix(ndev);
- nitrox_cleanup_pkt_slc_bh(ndev);
-}
+ qvec->ring = i / NR_RING_VECTORS;
+ if (qvec->ring >= ndev->nr_queues)
+ break;
-/**
- * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ
- * @ndev: NITROX device
- *
- * Return: 0 on success, a negative value on failure.
- */
-int nitrox_pf_init_isr(struct nitrox_device *ndev)
-{
- int err;
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
+ /* get the vector number */
+ vec = pci_irq_vector(pdev, i);
+ ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
+ if (ret) {
+ dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
+ qvec->ring);
+ goto irq_fail;
+ }
+ cpu = qvec->ring % num_online_cpus();
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
- err = nitrox_setup_pkt_slc_bh(ndev);
- if (err)
- return err;
+ tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
+ (unsigned long)qvec);
+ qvec->cmdq = &ndev->pkt_inq[qvec->ring];
+ qvec->valid = true;
+ }
- err = nitrox_enable_msix(ndev);
- if (err)
- goto msix_fail;
+ /* request irqs for non ring vectors */
+ i = NON_RING_MSIX_BASE;
+ qvec = &ndev->qvec[i];
- err = nitrox_request_irqs(ndev);
- if (err)
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
+ /* get the vector number */
+ vec = pci_irq_vector(pdev, i);
+ ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
+ if (ret) {
+ dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
goto irq_fail;
+ }
+ cpu = num_online_cpus();
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
+
+ tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
+ (unsigned long)qvec);
+ qvec->ndev = ndev;
+ qvec->valid = true;
return 0;
irq_fail:
- nitrox_disable_msix(ndev);
-msix_fail:
- nitrox_cleanup_pkt_slc_bh(ndev);
- return err;
+ nitrox_unregister_interrupts(ndev);
+ return ret;
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.h b/drivers/crypto/cavium/nitrox/nitrox_isr.h
new file mode 100644
index 000000000000..63418a6cc52c
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NITROX_ISR_H
+#define __NITROX_ISR_H
+
+#include "nitrox_dev.h"
+
+int nitrox_register_interrupts(struct nitrox_device *ndev);
+void nitrox_unregister_interrupts(struct nitrox_device *ndev);
+
+#endif /* __NITROX_ISR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4d31df07777f..2260efa42308 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -17,30 +17,27 @@
#define CRYPTO_CTX_SIZE 256
-/* command queue alignments */
-#define PKT_IN_ALIGN 16
+/* packet inuput ring alignments */
+#define PKTIN_Q_ALIGN_BYTES 16
-static int cmdq_common_init(struct nitrox_cmdq *cmdq)
+static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
{
struct nitrox_device *ndev = cmdq->ndev;
- u32 qsize;
-
- qsize = (ndev->qlen) * cmdq->instr_size;
- cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev),
- (qsize + PKT_IN_ALIGN),
- &cmdq->dma_unaligned,
- GFP_KERNEL);
- if (!cmdq->head_unaligned)
+
+ cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
+ cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize,
+ &cmdq->unalign_dma,
+ GFP_KERNEL);
+ if (!cmdq->unalign_base)
return -ENOMEM;
- cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
- cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
- cmdq->qsize = (qsize + PKT_IN_ALIGN);
+ cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
+ cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
cmdq->write_idx = 0;
- spin_lock_init(&cmdq->response_lock);
- spin_lock_init(&cmdq->cmdq_lock);
- spin_lock_init(&cmdq->backlog_lock);
+ spin_lock_init(&cmdq->cmd_qlock);
+ spin_lock_init(&cmdq->resp_qlock);
+ spin_lock_init(&cmdq->backlog_qlock);
INIT_LIST_HEAD(&cmdq->response_head);
INIT_LIST_HEAD(&cmdq->backlog_head);
@@ -51,68 +48,83 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
return 0;
}
-static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq)
+static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
+{
+ cmdq->write_idx = 0;
+ atomic_set(&cmdq->pending_count, 0);
+ atomic_set(&cmdq->backlog_count, 0);
+}
+
+static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = cmdq->ndev;
+ if (!cmdq->unalign_base)
+ return;
+
cancel_work_sync(&cmdq->backlog_qflush);
dma_free_coherent(DEV(ndev), cmdq->qsize,
- cmdq->head_unaligned, cmdq->dma_unaligned);
-
- atomic_set(&cmdq->pending_count, 0);
- atomic_set(&cmdq->backlog_count, 0);
+ cmdq->unalign_base, cmdq->unalign_dma);
+ nitrox_cmdq_reset(cmdq);
cmdq->dbell_csr_addr = NULL;
- cmdq->head = NULL;
+ cmdq->compl_cnt_csr_addr = NULL;
+ cmdq->unalign_base = NULL;
+ cmdq->base = NULL;
+ cmdq->unalign_dma = 0;
cmdq->dma = 0;
cmdq->qsize = 0;
cmdq->instr_size = 0;
}
-static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev)
+static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
{
int i;
for (i = 0; i < ndev->nr_queues; i++) {
- struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
+ struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
- cmdq_common_cleanup(cmdq);
+ nitrox_cmdq_cleanup(cmdq);
}
- kfree(ndev->pkt_cmdqs);
- ndev->pkt_cmdqs = NULL;
+ kfree(ndev->pkt_inq);
+ ndev->pkt_inq = NULL;
}
-static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev)
+static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
{
- int i, err, size;
+ int i, err;
- size = ndev->nr_queues * sizeof(struct nitrox_cmdq);
- ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL);
- if (!ndev->pkt_cmdqs)
+ ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
+ sizeof(struct nitrox_cmdq),
+ GFP_KERNEL, ndev->node);
+ if (!ndev->pkt_inq)
return -ENOMEM;
for (i = 0; i < ndev->nr_queues; i++) {
struct nitrox_cmdq *cmdq;
u64 offset;
- cmdq = &ndev->pkt_cmdqs[i];
+ cmdq = &ndev->pkt_inq[i];
cmdq->ndev = ndev;
cmdq->qno = i;
cmdq->instr_size = sizeof(struct nps_pkt_instr);
+ /* packet input ring doorbell address */
offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
- /* SE ring doorbell address for this queue */
cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
+ /* packet solicit port completion count address */
+ offset = NPS_PKT_SLC_CNTSX(i);
+ cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
- err = cmdq_common_init(cmdq);
+ err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
if (err)
- goto pkt_cmdq_fail;
+ goto pktq_fail;
}
return 0;
-pkt_cmdq_fail:
- nitrox_cleanup_pkt_cmdqs(ndev);
+pktq_fail:
+ nitrox_free_pktin_queues(ndev);
return err;
}
@@ -122,7 +134,7 @@ static int create_crypto_dma_pool(struct nitrox_device *ndev)
/* Crypto context pool, 16 byte aligned */
size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
- ndev->ctx_pool = dma_pool_create("crypto-context",
+ ndev->ctx_pool = dma_pool_create("nitrox-context",
DEV(ndev), size, 16, 0);
if (!ndev->ctx_pool)
return -ENOMEM;
@@ -149,7 +161,7 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
void *vaddr;
dma_addr_t dma;
- vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
+ vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
if (!vaddr)
return NULL;
@@ -194,7 +206,7 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
if (err)
return err;
- err = nitrox_init_pkt_cmdqs(ndev);
+ err = nitrox_alloc_pktin_queues(ndev);
if (err)
destroy_crypto_dma_pool(ndev);
@@ -207,6 +219,6 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
*/
void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
{
- nitrox_cleanup_pkt_cmdqs(ndev);
+ nitrox_free_pktin_queues(ndev);
destroy_crypto_dma_pool(ndev);
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index fee7cb2ce747..6595c95af9f1 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -11,13 +11,15 @@
#include "nitrox_dev.h"
#include "nitrox_common.h"
#include "nitrox_csr.h"
+#include "nitrox_hal.h"
+#include "nitrox_isr.h"
#define CNN55XX_DEV_ID 0x12
#define MAX_PF_QUEUES 64
#define UCODE_HLEN 48
#define SE_GROUP 0
-#define DRIVER_VERSION "1.0"
+#define DRIVER_VERSION "1.1"
#define FW_DIR "cavium/"
/* SE microcode */
#define SE_FW FW_DIR "cnn55xx_se.fw"
@@ -42,6 +44,15 @@ static unsigned int qlen = DEFAULT_CMD_QLEN;
module_param(qlen, uint, 0644);
MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
+#ifdef CONFIG_PCI_IOV
+int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs);
+#else
+int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ return 0;
+}
+#endif
+
/**
* struct ucode - Firmware Header
* @id: microcode ID
@@ -136,9 +147,6 @@ static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name)
write_to_ucd_unit(ndev, ucode);
release_firmware(fw);
- set_bit(NITROX_UCODE_LOADED, &ndev->status);
- /* barrier to sync with other cpus */
- smp_mb__after_atomic();
return 0;
}
@@ -210,7 +218,7 @@ void nitrox_put_device(struct nitrox_device *ndev)
smp_mb__after_atomic();
}
-static int nitrox_reset_device(struct pci_dev *pdev)
+static int nitrox_device_flr(struct pci_dev *pdev)
{
int pos = 0;
@@ -220,15 +228,10 @@ static int nitrox_reset_device(struct pci_dev *pdev)
return -ENOMEM;
}
- pos = pci_pcie_cap(pdev);
- if (!pos)
- return -ENOTTY;
+ /* check flr support */
+ if (pcie_has_flr(pdev))
+ pcie_flr(pdev);
- if (!pci_wait_for_pending_transaction(pdev))
- dev_err(&pdev->dev, "waiting for pending transaction\n");
-
- pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
- msleep(100);
pci_restore_state(pdev);
return 0;
@@ -242,7 +245,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
if (err)
return err;
- err = nitrox_pf_init_isr(ndev);
+ err = nitrox_register_interrupts(ndev);
if (err)
nitrox_common_sw_cleanup(ndev);
@@ -251,7 +254,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
{
- nitrox_pf_cleanup_isr(ndev);
+ nitrox_unregister_interrupts(ndev);
nitrox_common_sw_cleanup(ndev);
}
@@ -284,26 +287,6 @@ static int nitrox_bist_check(struct nitrox_device *ndev)
return 0;
}
-static void nitrox_get_hwinfo(struct nitrox_device *ndev)
-{
- union emu_fuse_map emu_fuse;
- u64 offset;
- int i;
-
- for (i = 0; i < NR_CLUSTERS; i++) {
- u8 dead_cores;
-
- offset = EMU_FUSE_MAPX(i);
- emu_fuse.value = nitrox_read_csr(ndev, offset);
- if (emu_fuse.s.valid) {
- dead_cores = hweight32(emu_fuse.s.ae_fuse);
- ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
- dead_cores = hweight16(emu_fuse.s.se_fuse);
- ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
- }
- }
-}
-
static int nitrox_pf_hw_init(struct nitrox_device *ndev)
{
int err;
@@ -336,135 +319,6 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev)
return 0;
}
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-static int registers_show(struct seq_file *s, void *v)
-{
- struct nitrox_device *ndev = s->private;
- u64 offset;
-
- /* NPS DMA stats */
- offset = NPS_STATS_PKT_DMA_RD_CNT;
- seq_printf(s, "NPS_STATS_PKT_DMA_RD_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
- offset = NPS_STATS_PKT_DMA_WR_CNT;
- seq_printf(s, "NPS_STATS_PKT_DMA_WR_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
-
- /* BMI/BMO stats */
- offset = BMI_NPS_PKT_CNT;
- seq_printf(s, "BMI_NPS_PKT_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
- offset = BMO_NPS_SLC_PKT_CNT;
- seq_printf(s, "BMO_NPS_PKT_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
-
- return 0;
-}
-
-static int registers_open(struct inode *inode, struct file *file)
-{
- return single_open(file, registers_show, inode->i_private);
-}
-
-static const struct file_operations register_fops = {
- .owner = THIS_MODULE,
- .open = registers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int firmware_show(struct seq_file *s, void *v)
-{
- struct nitrox_device *ndev = s->private;
-
- seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
- return 0;
-}
-
-static int firmware_open(struct inode *inode, struct file *file)
-{
- return single_open(file, firmware_show, inode->i_private);
-}
-
-static const struct file_operations firmware_fops = {
- .owner = THIS_MODULE,
- .open = firmware_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int nitrox_show(struct seq_file *s, void *v)
-{
- struct nitrox_device *ndev = s->private;
-
- seq_printf(s, "NITROX-5 [idx: %d]\n", ndev->idx);
- seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id);
- seq_printf(s, " Cores [AE: %u SE: %u]\n",
- ndev->hw.ae_cores, ndev->hw.se_cores);
- seq_printf(s, " Number of Queues: %u\n", ndev->nr_queues);
- seq_printf(s, " Queue length: %u\n", ndev->qlen);
- seq_printf(s, " Node: %u\n", ndev->node);
-
- return 0;
-}
-
-static int nitrox_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nitrox_show, inode->i_private);
-}
-
-static const struct file_operations nitrox_fops = {
- .owner = THIS_MODULE,
- .open = nitrox_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void nitrox_debugfs_exit(struct nitrox_device *ndev)
-{
- debugfs_remove_recursive(ndev->debugfs_dir);
- ndev->debugfs_dir = NULL;
-}
-
-static int nitrox_debugfs_init(struct nitrox_device *ndev)
-{
- struct dentry *dir, *f;
-
- dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!dir)
- return -ENOMEM;
-
- ndev->debugfs_dir = dir;
- f = debugfs_create_file("counters", 0400, dir, ndev, &register_fops);
- if (!f)
- goto err;
- f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
- if (!f)
- goto err;
- f = debugfs_create_file("nitrox", 0400, dir, ndev, &nitrox_fops);
- if (!f)
- goto err;
-
- return 0;
-
-err:
- nitrox_debugfs_exit(ndev);
- return -ENODEV;
-}
-#else
-static int nitrox_debugfs_init(struct nitrox_device *ndev)
-{
- return 0;
-}
-
-static void nitrox_debugfs_exit(struct nitrox_device *ndev)
-{
-}
-#endif
-
/**
* nitrox_probe - NITROX Initialization function.
* @pdev: PCI device information struct
@@ -487,7 +341,7 @@ static int nitrox_probe(struct pci_dev *pdev,
return err;
/* do FLR */
- err = nitrox_reset_device(pdev);
+ err = nitrox_device_flr(pdev);
if (err) {
dev_err(&pdev->dev, "FLR failed\n");
pci_disable_device(pdev);
@@ -555,7 +409,12 @@ static int nitrox_probe(struct pci_dev *pdev,
if (err)
goto pf_hw_fail;
- set_bit(NITROX_READY, &ndev->status);
+ /* clear the statistics */
+ atomic64_set(&ndev->stats.posted, 0);
+ atomic64_set(&ndev->stats.completed, 0);
+ atomic64_set(&ndev->stats.dropped, 0);
+
+ atomic_set(&ndev->state, __NDEV_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
@@ -567,7 +426,7 @@ static int nitrox_probe(struct pci_dev *pdev,
crypto_fail:
nitrox_debugfs_exit(ndev);
- clear_bit(NITROX_READY, &ndev->status);
+ atomic_set(&ndev->state, __NDEV_NOT_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
pf_hw_fail:
@@ -602,11 +461,16 @@ static void nitrox_remove(struct pci_dev *pdev)
dev_info(DEV(ndev), "Removing Device %x:%x\n",
ndev->hw.vendor_id, ndev->hw.device_id);
- clear_bit(NITROX_READY, &ndev->status);
+ atomic_set(&ndev->state, __NDEV_NOT_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
nitrox_remove_from_devlist(ndev);
+
+#ifdef CONFIG_PCI_IOV
+ /* disable SR-IOV */
+ nitrox_sriov_configure(pdev, 0);
+#endif
nitrox_crypto_unregister();
nitrox_debugfs_exit(ndev);
nitrox_pf_sw_cleanup(ndev);
@@ -632,6 +496,9 @@ static struct pci_driver nitrox_driver = {
.probe = nitrox_probe,
.remove = nitrox_remove,
.shutdown = nitrox_shutdown,
+#ifdef CONFIG_PCI_IOV
+ .sriov_configure = nitrox_sriov_configure,
+#endif
};
module_pci_driver(nitrox_driver);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4a362fc22f62..3987cd84c033 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -382,11 +382,11 @@ static inline void backlog_list_add(struct nitrox_softreq *sr,
{
INIT_LIST_HEAD(&sr->backlog);
- spin_lock_bh(&cmdq->backlog_lock);
+ spin_lock_bh(&cmdq->backlog_qlock);
list_add_tail(&sr->backlog, &cmdq->backlog_head);
atomic_inc(&cmdq->backlog_count);
atomic_set(&sr->status, REQ_BACKLOG);
- spin_unlock_bh(&cmdq->backlog_lock);
+ spin_unlock_bh(&cmdq->backlog_qlock);
}
static inline void response_list_add(struct nitrox_softreq *sr,
@@ -394,17 +394,17 @@ static inline void response_list_add(struct nitrox_softreq *sr,
{
INIT_LIST_HEAD(&sr->response);
- spin_lock_bh(&cmdq->response_lock);
+ spin_lock_bh(&cmdq->resp_qlock);
list_add_tail(&sr->response, &cmdq->response_head);
- spin_unlock_bh(&cmdq->response_lock);
+ spin_unlock_bh(&cmdq->resp_qlock);
}
static inline void response_list_del(struct nitrox_softreq *sr,
struct nitrox_cmdq *cmdq)
{
- spin_lock_bh(&cmdq->response_lock);
+ spin_lock_bh(&cmdq->resp_qlock);
list_del(&sr->response);
- spin_unlock_bh(&cmdq->response_lock);
+ spin_unlock_bh(&cmdq->resp_qlock);
}
static struct nitrox_softreq *
@@ -439,11 +439,11 @@ static void post_se_instr(struct nitrox_softreq *sr,
int idx;
u8 *ent;
- spin_lock_bh(&cmdq->cmdq_lock);
+ spin_lock_bh(&cmdq->cmd_qlock);
idx = cmdq->write_idx;
/* copy the instruction */
- ent = cmdq->head + (idx * cmdq->instr_size);
+ ent = cmdq->base + (idx * cmdq->instr_size);
memcpy(ent, &sr->instr, cmdq->instr_size);
atomic_set(&sr->status, REQ_POSTED);
@@ -459,7 +459,10 @@ static void post_se_instr(struct nitrox_softreq *sr,
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
- spin_unlock_bh(&cmdq->cmdq_lock);
+ spin_unlock_bh(&cmdq->cmd_qlock);
+
+ /* increment the posted command count */
+ atomic64_inc(&ndev->stats.posted);
}
static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
@@ -471,7 +474,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
if (!atomic_read(&cmdq->backlog_count))
return 0;
- spin_lock_bh(&cmdq->backlog_lock);
+ spin_lock_bh(&cmdq->backlog_qlock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
struct skcipher_request *skreq;
@@ -494,7 +497,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* backlog requests are posted, wakeup with -EINPROGRESS */
skcipher_request_complete(skreq, -EINPROGRESS);
}
- spin_unlock_bh(&cmdq->backlog_lock);
+ spin_unlock_bh(&cmdq->backlog_qlock);
return ret;
}
@@ -508,8 +511,11 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
post_backlog_cmds(cmdq);
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
- if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ /* increment drop count */
+ atomic64_inc(&ndev->stats.dropped);
return -ENOSPC;
+ }
/* add to backlog list */
backlog_list_add(sr, cmdq);
return -EBUSY;
@@ -572,7 +578,7 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
/* select the queue */
qno = smp_processor_id() % ndev->nr_queues;
- sr->cmdq = &ndev->pkt_cmdqs[qno];
+ sr->cmdq = &ndev->pkt_inq[qno];
/*
* 64-Byte Instruction Format
@@ -694,6 +700,7 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
READ_ONCE(sr->resp.orh));
}
atomic_dec(&cmdq->pending_count);
+ atomic64_inc(&ndev->stats.completed);
/* sync with other cpus */
smp_mb__after_atomic();
/* remove from response list */
@@ -714,18 +721,18 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
}
/**
- * pkt_slc_resp_handler - post processing of SE responses
+ * pkt_slc_resp_tasklet - post processing of SE responses
*/
-void pkt_slc_resp_handler(unsigned long data)
+void pkt_slc_resp_tasklet(unsigned long data)
{
- struct bh_data *bh = (void *)(uintptr_t)(data);
- struct nitrox_cmdq *cmdq = bh->cmdq;
- union nps_pkt_slc_cnts pkt_slc_cnts;
+ struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
+ struct nitrox_cmdq *cmdq = qvec->cmdq;
+ union nps_pkt_slc_cnts slc_cnts;
/* read completion count */
- pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr);
+ slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
/* resend the interrupt if more work to do */
- pkt_slc_cnts.s.resend = 1;
+ slc_cnts.s.resend = 1;
process_response_list(cmdq);
@@ -733,7 +740,7 @@ void pkt_slc_resp_handler(unsigned long data)
* clear the interrupt with resend bit enabled,
* MSI-X interrupt generates if Completion count > Threshold
*/
- writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr);
+ writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
/* order the writes */
mmiowb();
diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
new file mode 100644
index 000000000000..30c0aa874583
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_hal.h"
+#include "nitrox_common.h"
+#include "nitrox_isr.h"
+
+static inline bool num_vfs_valid(int num_vfs)
+{
+ bool valid = false;
+
+ switch (num_vfs) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ valid = true;
+ break;
+ }
+
+ return valid;
+}
+
+static inline enum vf_mode num_vfs_to_mode(int num_vfs)
+{
+ enum vf_mode mode = 0;
+
+ switch (num_vfs) {
+ case 0:
+ mode = __NDEV_MODE_PF;
+ break;
+ case 16:
+ mode = __NDEV_MODE_VF16;
+ break;
+ case 32:
+ mode = __NDEV_MODE_VF32;
+ break;
+ case 64:
+ mode = __NDEV_MODE_VF64;
+ break;
+ case 128:
+ mode = __NDEV_MODE_VF128;
+ break;
+ }
+
+ return mode;
+}
+
+static void pf_sriov_cleanup(struct nitrox_device *ndev)
+{
+ /* PF has no queues in SR-IOV mode */
+ atomic_set(&ndev->state, __NDEV_NOT_READY);
+ /* unregister crypto algorithms */
+ nitrox_crypto_unregister();
+
+ /* cleanup PF resources */
+ nitrox_unregister_interrupts(ndev);
+ nitrox_common_sw_cleanup(ndev);
+}
+
+static int pf_sriov_init(struct nitrox_device *ndev)
+{
+ int err;
+
+ /* allocate resources for PF */
+ err = nitrox_common_sw_init(ndev);
+ if (err)
+ return err;
+
+ err = nitrox_register_interrupts(ndev);
+ if (err) {
+ nitrox_common_sw_cleanup(ndev);
+ return err;
+ }
+
+ /* configure the packet queues */
+ nitrox_config_pkt_input_rings(ndev);
+ nitrox_config_pkt_solicit_ports(ndev);
+
+ /* set device to ready state */
+ atomic_set(&ndev->state, __NDEV_READY);
+
+ /* register crypto algorithms */
+ return nitrox_crypto_register();
+}
+
+static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct nitrox_device *ndev = pci_get_drvdata(pdev);
+ int err;
+
+ if (!num_vfs_valid(num_vfs)) {
+ dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs);
+ return -EINVAL;
+ }
+
+ if (pci_num_vf(pdev) == num_vfs)
+ return num_vfs;
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_err(DEV(ndev), "failed to enable PCI sriov %d\n", err);
+ return err;
+ }
+ dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs);
+
+ ndev->num_vfs = num_vfs;
+ ndev->mode = num_vfs_to_mode(num_vfs);
+ /* set bit in flags */
+ set_bit(__NDEV_SRIOV_BIT, &ndev->flags);
+
+ /* cleanup PF resources */
+ pf_sriov_cleanup(ndev);
+
+ config_nps_core_vfcfg_mode(ndev, ndev->mode);
+
+ return num_vfs;
+}
+
+static int nitrox_sriov_disable(struct pci_dev *pdev)
+{
+ struct nitrox_device *ndev = pci_get_drvdata(pdev);
+
+ if (!test_bit(__NDEV_SRIOV_BIT, &ndev->flags))
+ return 0;
+
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(DEV(ndev), "VFs are attached to VM. Can't disable SR-IOV\n");
+ return -EPERM;
+ }
+ pci_disable_sriov(pdev);
+ /* clear bit in flags */
+ clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
+
+ ndev->num_vfs = 0;
+ ndev->mode = __NDEV_MODE_PF;
+
+ config_nps_core_vfcfg_mode(ndev, ndev->mode);
+
+ return pf_sriov_init(ndev);
+}
+
+int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (!num_vfs)
+ return nitrox_sriov_disable(pdev);
+
+ return nitrox_sriov_enable(pdev, num_vfs);
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 94b5bcf5b628..ca4630b8395f 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -102,7 +102,7 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
ctx->u.aes.key_len = key_len / 2;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
- return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
+ return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
}
static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
@@ -151,12 +151,13 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
(ctx->u.aes.key_len != AES_KEYSIZE_256))
fallback = 1;
if (fallback) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq,
+ ctx->u.aes.tfm_skcipher);
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
*/
- skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher);
+ skcipher_request_set_sync_tfm(subreq, ctx->u.aes.tfm_skcipher);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -203,12 +204,12 @@ static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_skcipher *fallback_tfm;
+ struct crypto_sync_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
ctx->u.aes.key_len = 0;
- fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
+ fallback_tfm = crypto_alloc_sync_skcipher("xts(aes)", 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
@@ -226,7 +227,7 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
+ crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher);
}
static int ccp_register_aes_xts_alg(struct list_head *head,
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index b9fd090c46c2..28819e11db96 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -88,7 +88,7 @@ static inline struct ccp_crypto_ahash_alg *
/***** AES related defines *****/
struct ccp_aes_ctx {
/* Fallback cipher for XTS with unsupported unit sizes */
- struct crypto_skcipher *tfm_skcipher;
+ struct crypto_sync_skcipher *tfm_skcipher;
/* Cipher used to generate CMAC K1/K2 keys */
struct crypto_cipher *tfm_cipher;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 218739b961fe..d64a78ccc03e 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -31,13 +31,25 @@
((psp_master->api_major) >= _maj && \
(psp_master->api_minor) >= _min)
-#define DEVICE_NAME "sev"
-#define SEV_FW_FILE "amd/sev.fw"
+#define DEVICE_NAME "sev"
+#define SEV_FW_FILE "amd/sev.fw"
+#define SEV_FW_NAME_SIZE 64
static DEFINE_MUTEX(sev_cmd_mutex);
static struct sev_misc_dev *misc_dev;
static struct psp_device *psp_master;
+static int psp_cmd_timeout = 100;
+module_param(psp_cmd_timeout, int, 0644);
+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
+
+static int psp_probe_timeout = 5;
+module_param(psp_probe_timeout, int, 0644);
+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+
+static bool psp_dead;
+static int psp_timeout;
+
static struct psp_device *psp_alloc_struct(struct sp_device *sp)
{
struct device *dev = sp->dev;
@@ -82,10 +94,19 @@ done:
return IRQ_HANDLED;
}
-static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
+static int sev_wait_cmd_ioc(struct psp_device *psp,
+ unsigned int *reg, unsigned int timeout)
{
- wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
+ int ret;
+
+ ret = wait_event_timeout(psp->sev_int_queue,
+ psp->sev_int_rcvd, timeout * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
*reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
+
+ return 0;
}
static int sev_cmd_buffer_len(int cmd)
@@ -133,12 +154,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
if (!psp)
return -ENODEV;
+ if (psp_dead)
+ return -EBUSY;
+
/* Get the physical address of the command buffer */
phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
- dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n",
- cmd, phys_msb, phys_lsb);
+ dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
+ cmd, phys_msb, phys_lsb, psp_timeout);
print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
sev_cmd_buffer_len(cmd), false);
@@ -154,7 +178,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
/* wait for command completion */
- sev_wait_cmd_ioc(psp, &reg);
+ ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
+ if (ret) {
+ if (psp_ret)
+ *psp_ret = 0;
+
+ dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
+ psp_dead = true;
+
+ return ret;
+ }
+
+ psp_timeout = psp_cmd_timeout;
if (psp_ret)
*psp_ret = reg & PSP_CMDRESP_ERR_MASK;
@@ -389,7 +424,7 @@ EXPORT_SYMBOL_GPL(psp_copy_user_blob);
static int sev_get_api_version(void)
{
struct sev_user_data_status *status;
- int error, ret;
+ int error = 0, ret;
status = &psp_master->status_cmd_buf;
ret = sev_platform_status(status, &error);
@@ -406,6 +441,41 @@ static int sev_get_api_version(void)
return 0;
}
+static int sev_get_firmware(struct device *dev,
+ const struct firmware **firmware)
+{
+ char fw_name_specific[SEV_FW_NAME_SIZE];
+ char fw_name_subset[SEV_FW_NAME_SIZE];
+
+ snprintf(fw_name_specific, sizeof(fw_name_specific),
+ "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+ snprintf(fw_name_subset, sizeof(fw_name_subset),
+ "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
+ boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
+
+ /* Check for SEV FW for a particular model.
+ * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
+ *
+ * or
+ *
+ * Check for SEV FW common to a subset of models.
+ * Ex. amd_sev_fam17h_model0xh.sbin for
+ * Family 17h Model 00h -- Family 17h Model 0Fh
+ *
+ * or
+ *
+ * Fall-back to using generic name: sev.fw
+ */
+ if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ return 0;
+
+ return -ENOENT;
+}
+
/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
static int sev_update_firmware(struct device *dev)
{
@@ -415,9 +485,10 @@ static int sev_update_firmware(struct device *dev)
struct page *p;
u64 data_size;
- ret = request_firmware(&firmware, SEV_FW_FILE, dev);
- if (ret < 0)
+ if (sev_get_firmware(dev, &firmware) == -ENOENT) {
+ dev_dbg(dev, "No SEV firmware file present\n");
return -1;
+ }
/*
* SEV FW expects the physical address given to it to be 32
@@ -888,6 +959,8 @@ void psp_pci_init(void)
psp_master = sp->psp_data;
+ psp_timeout = psp_probe_timeout;
+
if (sev_get_api_version())
goto err;
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 71734f254fd1..b75dc7db2d4a 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -33,8 +33,31 @@ struct sp_platform {
unsigned int irq_count;
};
-static const struct acpi_device_id sp_acpi_match[];
-static const struct of_device_id sp_of_match[];
+static const struct sp_dev_vdata dev_vdata[] = {
+ {
+ .bar = 0,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv3_platform,
+#endif
+ },
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id sp_acpi_match[] = {
+ { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id sp_of_match[] = {
+ { .compatible = "amd,ccp-seattle-v1a",
+ .data = (const void *)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sp_of_match);
+#endif
static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
{
@@ -201,32 +224,6 @@ static int sp_platform_resume(struct platform_device *pdev)
}
#endif
-static const struct sp_dev_vdata dev_vdata[] = {
- {
- .bar = 0,
-#ifdef CONFIG_CRYPTO_DEV_SP_CCP
- .ccp_vdata = &ccpv3_platform,
-#endif
- },
-};
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id sp_acpi_match[] = {
- { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id sp_of_match[] = {
- { .compatible = "amd,ccp-seattle-v1a",
- .data = (const void *)&dev_vdata[0] },
- { },
-};
-MODULE_DEVICE_TABLE(of, sp_of_match);
-#endif
-
static struct platform_driver sp_platform_driver = {
.driver = {
.name = "ccp",
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index a091ae57f902..45985b955d2c 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -449,8 +449,7 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc,
* @pdesc: pointer HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
-static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
- enum drv_cipher_mode mode)
+static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
}
@@ -461,8 +460,7 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
* @pdesc: pointer HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
-static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
- enum drv_crypto_direction mode)
+static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
}
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 5c539af8ed60..db203f8be429 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
walk->to = (struct phys_sge_pairs *)(dsgl + 1);
}
-static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
+ int pci_chan_id)
{
struct cpl_rx_phys_dsgl *phys_cpl;
@@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
phys_cpl->rss_hdr_int.qid = htons(qid);
phys_cpl->rss_hdr_int.hash_val = 0;
+ phys_cpl->rss_hdr_int.channel = pci_chan_id;
}
static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
@@ -671,7 +673,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
return min(srclen, dstlen);
}
-static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
+static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
u32 flags,
struct scatterlist *src,
struct scatterlist *dst,
@@ -681,9 +683,9 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
{
int err;
- SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
- skcipher_request_set_tfm(subreq, cipher);
+ skcipher_request_set_sync_tfm(subreq, cipher);
skcipher_request_set_callback(subreq, flags, NULL, NULL);
skcipher_request_set_crypt(subreq, src, dst,
nbytes, iv);
@@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
!!lcb, ctx->tx_qidx);
- chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
qid);
chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
((sizeof(chcr_req->wreq)) >> 4)));
@@ -854,13 +856,14 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
int err = 0;
- crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
+ crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
+ cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |=
- crypto_skcipher_get_flags(ablkctx->sw_cipher) &
+ crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
CRYPTO_TFM_RES_MASK;
return err;
}
@@ -1335,20 +1338,26 @@ static int chcr_device_init(struct chcr_context *ctx)
}
ctx->dev = u_ctx->dev;
adap = padap(ctx->dev);
- ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
- adap->vres.ncrypto_fc);
+ ntxq = u_ctx->lldi.ntxq;
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
- rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
- rxq_idx += id % rxq_perchan;
- txq_idx = ctx->dev->tx_channel_id * txq_perchan;
- txq_idx += id % txq_perchan;
spin_lock(&ctx->dev->lock_chcr_dev);
- ctx->rx_qidx = rxq_idx;
- ctx->tx_qidx = txq_idx;
+ ctx->tx_chan_id = ctx->dev->tx_channel_id;
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
ctx->dev->rx_channel_id = 0;
spin_unlock(&ctx->dev->lock_chcr_dev);
+ rxq_idx = ctx->tx_chan_id * rxq_perchan;
+ rxq_idx += id % rxq_perchan;
+ txq_idx = ctx->tx_chan_id * txq_perchan;
+ txq_idx += id % txq_perchan;
+ ctx->rx_qidx = rxq_idx;
+ ctx->tx_qidx = txq_idx;
+ /* Channel Id used by SGE to forward packet to Host.
+ * Same value should be used in cpl_fw6_pld RSS_CH field
+ * by FW. Driver programs PCI channel ID to be used in fw
+ * at the time of queue allocation with value "pi->tx_chan"
+ */
+ ctx->pci_chan_id = txq_idx / txq_perchan;
}
out:
return err;
@@ -1360,8 +1369,8 @@ static int chcr_cra_init(struct crypto_tfm *tfm)
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
return PTR_ERR(ablkctx->sw_cipher);
@@ -1390,8 +1399,8 @@ static int chcr_rfc3686_init(struct crypto_tfm *tfm)
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
* cannot be used as fallback in chcr_handle_cipher_response
*/
- ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
return PTR_ERR(ablkctx->sw_cipher);
@@ -1406,7 +1415,7 @@ static void chcr_cra_exit(struct crypto_tfm *tfm)
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- crypto_free_skcipher(ablkctx->sw_cipher);
+ crypto_free_sync_skcipher(ablkctx->sw_cipher);
if (ablkctx->aes_generic)
crypto_free_cipher(ablkctx->aes_generic);
}
@@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct dsgl_walk dsgl_walk;
unsigned int authsize = crypto_aead_authsize(tfm);
+ struct chcr_context *ctx = a_ctx(tfm);
u32 temp;
dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
- dsgl_walk_end(&dsgl_walk, qid);
+ dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
@@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
unsigned short qid)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+ struct chcr_context *ctx = c_ctx(tfm);
struct dsgl_walk dsgl_walk;
dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
reqctx->dstsg = dsgl_walk.last_sg;
reqctx->dst_ofst = dsgl_walk.last_sg_len;
- dsgl_walk_end(&dsgl_walk, qid);
+ dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}
void chcr_add_hash_src_ent(struct ahash_request *req,
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 04f277cade7c..2c472e3c6aeb 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -43,7 +43,7 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
static struct cxgb4_uld_info chcr_uld_info = {
.name = DRV_MODULE_NAME,
.nrxq = MAX_ULD_QSETS,
- .ntxq = MAX_ULD_QSETS,
+ /* Max ntxq will be derived from fw config file*/
.rxq_size = 1024,
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
@@ -237,9 +237,7 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void)
{
- if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
- pr_err("ULD register fail: No chcr crypto support in cxgb4\n");
-
+ cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
return 0;
}
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 54835cb109e5..d37ef41f9ebe 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -170,7 +170,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
}
struct ablk_ctx {
- struct crypto_skcipher *sw_cipher;
+ struct crypto_sync_skcipher *sw_cipher;
struct crypto_cipher *aes_generic;
__be32 key_ctx_hdr;
unsigned int enckey_len;
@@ -255,6 +255,8 @@ struct chcr_context {
struct chcr_dev *dev;
unsigned char tx_qidx;
unsigned char rx_qidx;
+ unsigned char tx_chan_id;
+ unsigned char pci_chan_id;
struct __crypto_ctx crypto_ctx[0];
};
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 0997e166ea57..20209e29f814 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -234,8 +234,7 @@ static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
return;
out:
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
}
static void release_tcp_port(struct sock *sk)
@@ -406,12 +405,10 @@ static int wait_for_states(struct sock *sk, unsigned int states)
int chtls_disconnect(struct sock *sk, int flags)
{
- struct chtls_sock *csk;
struct tcp_sock *tp;
int err;
tp = tcp_sk(sk);
- csk = rcu_dereference_sk_user_data(sk);
chtls_purge_recv_queue(sk);
chtls_purge_receive_queue(sk);
chtls_purge_write_queue(sk);
@@ -1014,7 +1011,6 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
- const struct tcphdr *tcph;
struct inet_sock *newinet;
const struct iphdr *iph;
struct net_device *ndev;
@@ -1036,7 +1032,6 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (!dst)
goto free_sk;
- tcph = (struct tcphdr *)(iph + 1);
n = dst_neigh_lookup(dst, &iph->saddr);
if (!n)
goto free_sk;
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index f59b044ebd25..f472c51abe56 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -272,8 +272,7 @@ static void chtls_free_uld(struct chtls_dev *cdev)
for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
kfree_skb(cdev->rspq_skb_cache[i]);
kfree(cdev->lldi);
- if (cdev->askb)
- kfree_skb(cdev->askb);
+ kfree_skb(cdev->askb);
kfree(cdev);
}
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 7e71043457a6..86c699c14f84 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1044,7 +1044,8 @@ static int safexcel_probe(struct platform_device *pdev)
safexcel_configure(priv);
- priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
+ priv->ring = devm_kcalloc(dev, priv->config.rings,
+ sizeof(*priv->ring),
GFP_KERNEL);
if (!priv->ring) {
ret = -ENOMEM;
@@ -1063,8 +1064,9 @@ static int safexcel_probe(struct platform_device *pdev)
if (ret)
goto err_reg_clk;
- priv->ring[i].rdr_req = devm_kzalloc(dev,
- sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
+ priv->ring[i].rdr_req = devm_kcalloc(dev,
+ EIP197_DEFAULT_RING_SIZE,
+ sizeof(priv->ring[i].rdr_req),
GFP_KERNEL);
if (!priv->ring[i].rdr_req) {
ret = -ENOMEM;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index a10c418d4e5c..4e6ff32f8a7e 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -28,9 +28,24 @@
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
+#define DCP_SHA_PAY_SZ 64
#define DCP_ALIGNMENT 64
+/*
+ * Null hashes to align with hw behavior on imx6sl and ull
+ * these are flipped for consistency with hw output
+ */
+static const uint8_t sha1_null_hash[] =
+ "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
+ "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
+
+static const uint8_t sha256_null_hash[] =
+ "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
+ "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
+ "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
+ "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
+
/* DCP DMA descriptor. */
struct dcp_dma_desc {
uint32_t next_cmd_addr;
@@ -48,6 +63,7 @@ struct dcp_coherent_block {
uint8_t aes_in_buf[DCP_BUF_SZ];
uint8_t aes_out_buf[DCP_BUF_SZ];
uint8_t sha_in_buf[DCP_BUF_SZ];
+ uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
uint8_t aes_key[2 * AES_KEYSIZE_128];
@@ -63,7 +79,7 @@ struct dcp {
struct dcp_coherent_block *coh;
struct completion completion[DCP_MAX_CHANS];
- struct mutex mutex[DCP_MAX_CHANS];
+ spinlock_t lock[DCP_MAX_CHANS];
struct task_struct *thread[DCP_MAX_CHANS];
struct crypto_queue queue[DCP_MAX_CHANS];
};
@@ -84,7 +100,7 @@ struct dcp_async_ctx {
unsigned int hot:1;
/* Crypto-specific context */
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
unsigned int key_len;
uint8_t key[AES_KEYSIZE_128];
};
@@ -99,6 +115,11 @@ struct dcp_sha_req_ctx {
unsigned int fini:1;
};
+struct dcp_export_state {
+ struct dcp_sha_req_ctx req_ctx;
+ struct dcp_async_ctx async_ctx;
+};
+
/*
* There can even be only one instance of the MXS DCP due to the
* design of Linux Crypto API.
@@ -209,6 +230,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
DCP_BUF_SZ, DMA_FROM_DEVICE);
+ if (actx->fill % AES_BLOCK_SIZE) {
+ dev_err(sdcp->dev, "Invalid block size!\n");
+ ret = -EINVAL;
+ goto aes_done_run;
+ }
+
/* Fill in the DMA descriptor. */
desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
MXS_DCP_CONTROL0_INTERRUPT |
@@ -238,6 +265,7 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
ret = mxs_dcp_start_dma(actx);
+aes_done_run:
dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
@@ -264,13 +292,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
uint32_t dst_off = 0;
+ uint32_t last_out_len = 0;
uint8_t *key = sdcp->coh->aes_key;
int ret = 0;
int split = 0;
- unsigned int i, len, clen, rem = 0;
+ unsigned int i, len, clen, rem = 0, tlen = 0;
int init = 0;
+ bool limit_hit = false;
actx->fill = 0;
@@ -289,6 +319,11 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
for_each_sg(req->src, src, nents, i) {
src_buf = sg_virt(src);
len = sg_dma_len(src);
+ tlen += len;
+ limit_hit = tlen > req->nbytes;
+
+ if (limit_hit)
+ len = req->nbytes - (tlen - len);
do {
if (actx->fill + len > out_off)
@@ -305,13 +340,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
* If we filled the buffer or this is the last SG,
* submit the buffer.
*/
- if (actx->fill == out_off || sg_is_last(src)) {
+ if (actx->fill == out_off || sg_is_last(src) ||
+ limit_hit) {
ret = mxs_dcp_run_aes(actx, req, init);
if (ret)
return ret;
init = 0;
out_tmp = out_buf;
+ last_out_len = actx->fill;
while (dst && actx->fill) {
if (!split) {
dst_buf = sg_virt(dst);
@@ -334,6 +371,19 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
}
}
} while (len);
+
+ if (limit_hit)
+ break;
+ }
+
+ /* Copy the IV for CBC for chaining */
+ if (!rctx->ecb) {
+ if (rctx->enc)
+ memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
+ AES_BLOCK_SIZE);
+ else
+ memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
+ AES_BLOCK_SIZE);
}
return ret;
@@ -349,13 +399,20 @@ static int dcp_chan_thread_aes(void *data)
int ret;
- do {
- __set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&sdcp->mutex[chan]);
+ spin_lock(&sdcp->lock[chan]);
backlog = crypto_get_backlog(&sdcp->queue[chan]);
arq = crypto_dequeue_request(&sdcp->queue[chan]);
- mutex_unlock(&sdcp->mutex[chan]);
+ spin_unlock(&sdcp->lock[chan]);
+
+ if (!backlog && !arq) {
+ schedule();
+ continue;
+ }
+
+ set_current_state(TASK_RUNNING);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -363,11 +420,8 @@ static int dcp_chan_thread_aes(void *data)
if (arq) {
ret = mxs_dcp_aes_block_crypt(arq);
arq->complete(arq, ret);
- continue;
}
-
- schedule();
- } while (!kthread_should_stop());
+ }
return 0;
}
@@ -376,10 +430,10 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
int ret;
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->nbytes, req->info);
@@ -409,9 +463,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
rctx->ecb = ecb;
actx->chan = DCP_CHAN_CRYPTO;
- mutex_lock(&sdcp->mutex[actx->chan]);
+ spin_lock(&sdcp->lock[actx->chan]);
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
- mutex_unlock(&sdcp->mutex[actx->chan]);
+ spin_unlock(&sdcp->lock[actx->chan]);
wake_up_process(sdcp->thread[actx->chan]);
@@ -460,16 +514,16 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
* but is supported by in-kernel software implementation, we use
* software fallback.
*/
- crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(actx->fallback,
+ crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(actx->fallback,
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(actx->fallback, key, len);
+ ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
if (!ret)
return 0;
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
+ tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
CRYPTO_TFM_RES_MASK;
return ret;
@@ -478,11 +532,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
- const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
- struct crypto_skcipher *blk;
+ struct crypto_sync_skcipher *blk;
- blk = crypto_alloc_skcipher(name, 0, flags);
+ blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
@@ -495,7 +548,7 @@ static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
{
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(actx->fallback);
+ crypto_free_sync_skcipher(actx->fallback);
}
/*
@@ -509,8 +562,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
-
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
dma_addr_t digest_phys = 0;
@@ -532,10 +583,23 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
desc->payload = 0;
desc->status = 0;
+ /*
+ * Align driver with hw behavior when generating null hashes
+ */
+ if (rctx->init && rctx->fini && desc->size == 0) {
+ struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
+ const uint8_t *sha_buf =
+ (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
+ sha1_null_hash : sha256_null_hash;
+ memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
+ ret = 0;
+ goto done_run;
+ }
+
/* Set HASH_TERM bit for last transfer block. */
if (rctx->fini) {
- digest_phys = dma_map_single(sdcp->dev, req->result,
- halg->digestsize, DMA_FROM_DEVICE);
+ digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
+ DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
desc->payload = digest_phys;
}
@@ -543,9 +607,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
ret = mxs_dcp_start_dma(actx);
if (rctx->fini)
- dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
+ dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
DMA_FROM_DEVICE);
+done_run:
dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
return ret;
@@ -563,6 +628,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
const int nents = sg_nents(req->src);
uint8_t *in_buf = sdcp->coh->sha_in_buf;
+ uint8_t *out_buf = sdcp->coh->sha_out_buf;
uint8_t *src_buf;
@@ -617,11 +683,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
actx->fill = 0;
- /* For some reason, the result is flipped. */
- for (i = 0; i < halg->digestsize / 2; i++) {
- swap(req->result[i],
- req->result[halg->digestsize - i - 1]);
- }
+ /* For some reason the result is flipped */
+ for (i = 0; i < halg->digestsize; i++)
+ req->result[i] = out_buf[halg->digestsize - i - 1];
}
return 0;
@@ -640,13 +704,20 @@ static int dcp_chan_thread_sha(void *data)
struct ahash_request *req;
int ret, fini;
- do {
- __set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&sdcp->mutex[chan]);
+ spin_lock(&sdcp->lock[chan]);
backlog = crypto_get_backlog(&sdcp->queue[chan]);
arq = crypto_dequeue_request(&sdcp->queue[chan]);
- mutex_unlock(&sdcp->mutex[chan]);
+ spin_unlock(&sdcp->lock[chan]);
+
+ if (!backlog && !arq) {
+ schedule();
+ continue;
+ }
+
+ set_current_state(TASK_RUNNING);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -658,12 +729,8 @@ static int dcp_chan_thread_sha(void *data)
ret = dcp_sha_req_to_buf(arq);
fini = rctx->fini;
arq->complete(arq, ret);
- if (!fini)
- continue;
}
-
- schedule();
- } while (!kthread_should_stop());
+ }
return 0;
}
@@ -721,9 +788,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
rctx->init = 1;
}
- mutex_lock(&sdcp->mutex[actx->chan]);
+ spin_lock(&sdcp->lock[actx->chan]);
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
- mutex_unlock(&sdcp->mutex[actx->chan]);
+ spin_unlock(&sdcp->lock[actx->chan]);
wake_up_process(sdcp->thread[actx->chan]);
mutex_unlock(&actx->mutex);
@@ -759,14 +826,32 @@ static int dcp_sha_digest(struct ahash_request *req)
return dcp_sha_finup(req);
}
-static int dcp_sha_noimport(struct ahash_request *req, const void *in)
+static int dcp_sha_import(struct ahash_request *req, const void *in)
{
- return -ENOSYS;
+ struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+ const struct dcp_export_state *export = in;
+
+ memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
+ memset(actx, 0, sizeof(struct dcp_async_ctx));
+ memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
+ memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
+
+ return 0;
}
-static int dcp_sha_noexport(struct ahash_request *req, void *out)
+static int dcp_sha_export(struct ahash_request *req, void *out)
{
- return -ENOSYS;
+ struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
+ struct dcp_export_state *export = out;
+
+ memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
+ memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
+
+ return 0;
}
static int dcp_sha_cra_init(struct crypto_tfm *tfm)
@@ -839,10 +924,11 @@ static struct ahash_alg dcp_sha1_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
- .import = dcp_sha_noimport,
- .export = dcp_sha_noexport,
+ .import = dcp_sha_import,
+ .export = dcp_sha_export,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct dcp_export_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-dcp",
@@ -865,10 +951,11 @@ static struct ahash_alg dcp_sha256_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
- .import = dcp_sha_noimport,
- .export = dcp_sha_noexport,
+ .import = dcp_sha_import,
+ .export = dcp_sha_export,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct dcp_export_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-dcp",
@@ -997,7 +1084,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sdcp);
for (i = 0; i < DCP_MAX_CHANS; i++) {
- mutex_init(&sdcp->mutex[i]);
+ spin_lock_init(&sdcp->lock[i]);
init_completion(&sdcp->completion[i]);
crypto_init_queue(&sdcp->queue[i], 50);
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9019f6b67986..a553ffddb11b 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -522,9 +522,9 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
!!(mode & FLAGS_CBC));
if (req->nbytes < aes_fallback_sz) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL,
NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -564,11 +564,11 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
return 0;
@@ -613,11 +613,10 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
static int omap_aes_cra_init(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
- const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_skcipher *blk;
+ struct crypto_sync_skcipher *blk;
- blk = crypto_alloc_skcipher(name, 0, flags);
+ blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
@@ -667,7 +666,7 @@ static void omap_aes_cra_exit(struct crypto_tfm *tfm)
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback)
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index fc3b46a85809..7e02920ef6f8 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -101,7 +101,7 @@ struct omap_aes_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct crypto_skcipher *ctr;
};
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 321d5e2ac833..a28f1d18fe01 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -171,7 +171,7 @@ struct spacc_ablk_ctx {
* The fallback cipher. If the operation can't be done in hardware,
* fallback to a software version.
*/
- struct crypto_skcipher *sw_cipher;
+ struct crypto_sync_skcipher *sw_cipher;
};
/* AEAD cipher context. */
@@ -799,17 +799,17 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* Set the fallback transform to use the same request flags as
* the hardware transform.
*/
- crypto_skcipher_clear_flags(ctx->sw_cipher,
+ crypto_sync_skcipher_clear_flags(ctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->sw_cipher,
+ crypto_sync_skcipher_set_flags(ctx->sw_cipher,
cipher->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
+ err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |=
- crypto_skcipher_get_flags(ctx->sw_cipher) &
+ crypto_sync_skcipher_get_flags(ctx->sw_cipher) &
CRYPTO_TFM_RES_MASK;
if (err)
@@ -914,7 +914,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
struct crypto_tfm *old_tfm =
crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
int err;
/*
@@ -922,7 +922,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
* the ciphering has completed, put the old transform back into the
* request.
*/
- skcipher_request_set_tfm(subreq, ctx->sw_cipher);
+ skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->nbytes, req->info);
@@ -1020,9 +1020,8 @@ static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->sw_cipher = crypto_alloc_skcipher(
- alg->cra_name, 0, CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->sw_cipher = crypto_alloc_sync_skcipher(
+ alg->cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher)) {
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
alg->cra_name);
@@ -1041,7 +1040,7 @@ static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
{
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->sw_cipher);
+ crypto_free_sync_skcipher(ctx->sw_cipher);
}
static int spacc_ablk_encrypt(struct ablkcipher_request *req)
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index ba197f34c252..763c2166ee0e 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_C3XXX_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 24ec908eb26c..613c7d5644ce 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_C3XXXIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 59a5a0df50b6..9cb832963357 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_C62X_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index b9f3e0e4fde9..278452b8ef81 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_C62XIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index 9225d060e18f..f5e960d23a7a 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -198,7 +198,6 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
pr_err("QAT: Can't find acceleration device\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 1138e41d6805..d2698299896f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -113,6 +113,13 @@ struct qat_alg_aead_ctx {
struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
struct qat_crypto_instance *inst;
+ union {
+ struct sha1_state sha1;
+ struct sha256_state sha256;
+ struct sha512_state sha512;
+ };
+ char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
+ char opad[SHA512_BLOCK_SIZE];
};
struct qat_alg_ablkcipher_ctx {
@@ -148,37 +155,32 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
unsigned int auth_keylen)
{
SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
- struct sha1_state sha1;
- struct sha256_state sha256;
- struct sha512_state sha512;
int block_size = crypto_shash_blocksize(ctx->hash_tfm);
int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- char ipad[block_size];
- char opad[block_size];
__be32 *hash_state_out;
__be64 *hash512_state_out;
int i, offset;
- memset(ipad, 0, block_size);
- memset(opad, 0, block_size);
+ memset(ctx->ipad, 0, block_size);
+ memset(ctx->opad, 0, block_size);
shash->tfm = ctx->hash_tfm;
shash->flags = 0x0;
if (auth_keylen > block_size) {
int ret = crypto_shash_digest(shash, auth_key,
- auth_keylen, ipad);
+ auth_keylen, ctx->ipad);
if (ret)
return ret;
- memcpy(opad, ipad, digest_size);
+ memcpy(ctx->opad, ctx->ipad, digest_size);
} else {
- memcpy(ipad, auth_key, auth_keylen);
- memcpy(opad, auth_key, auth_keylen);
+ memcpy(ctx->ipad, auth_key, auth_keylen);
+ memcpy(ctx->opad, auth_key, auth_keylen);
}
for (i = 0; i < block_size; i++) {
- char *ipad_ptr = ipad + i;
- char *opad_ptr = opad + i;
+ char *ipad_ptr = ctx->ipad + i;
+ char *opad_ptr = ctx->opad + i;
*ipad_ptr ^= HMAC_IPAD_VALUE;
*opad_ptr ^= HMAC_OPAD_VALUE;
}
@@ -186,7 +188,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
if (crypto_shash_init(shash))
return -EFAULT;
- if (crypto_shash_update(shash, ipad, block_size))
+ if (crypto_shash_update(shash, ctx->ipad, block_size))
return -EFAULT;
hash_state_out = (__be32 *)hash->sha.state1;
@@ -194,22 +196,22 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &sha1))
+ if (crypto_shash_export(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha1.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &sha256))
+ if (crypto_shash_export(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha256.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &sha512))
+ if (crypto_shash_export(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
break;
default:
return -EFAULT;
@@ -218,7 +220,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
if (crypto_shash_init(shash))
return -EFAULT;
- if (crypto_shash_update(shash, opad, block_size))
+ if (crypto_shash_update(shash, ctx->opad, block_size))
return -EFAULT;
offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
@@ -227,28 +229,28 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &sha1))
+ if (crypto_shash_export(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha1.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &sha256))
+ if (crypto_shash_export(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha256.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &sha512))
+ if (crypto_shash_export(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
break;
default:
return -EFAULT;
}
- memzero_explicit(ipad, block_size);
- memzero_explicit(opad, block_size);
+ memzero_explicit(ctx->ipad, block_size);
+ memzero_explicit(ctx->opad, block_size);
return 0;
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index be5c5a988ca5..3a9708ef4ce2 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 26ab17bfc6da..3da0f951cb59 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_DH895XCCIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index ea4d96bf47e8..585e1cab9ae3 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -189,7 +189,7 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
fallback:
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
ctx->enc_keylen = keylen;
return ret;
@@ -212,9 +212,9 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
ctx->enc_keylen != AES_KEYSIZE_256) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -245,9 +245,8 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm)
memset(ctx, 0, sizeof(*ctx));
tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
- ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
}
@@ -255,7 +254,7 @@ static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
{
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
}
struct qce_ablkcipher_def {
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 2b0278bb6e92..ee055bfe98a0 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -22,7 +22,7 @@
struct qce_cipher_ctx {
u8 enc_key[QCE_MAX_KEY_SIZE];
unsigned int enc_keylen;
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
/**
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index faa282074e5a..0064be0e3941 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -249,8 +249,8 @@ struct s5p_aes_reqctx {
struct s5p_aes_ctx {
struct s5p_aes_dev *dev;
- uint8_t aes_key[AES_MAX_KEY_SIZE];
- uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
+ u8 aes_key[AES_MAX_KEY_SIZE];
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
int keylen;
};
@@ -475,9 +475,9 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
}
/* Calls the completion. Cannot be called with dev->lock hold. */
-static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+static void s5p_aes_complete(struct ablkcipher_request *req, int err)
{
- dev->req->base.complete(&dev->req->base, err);
+ req->base.complete(&req->base, err);
}
static void s5p_unset_outdata(struct s5p_aes_dev *dev)
@@ -491,7 +491,7 @@ static void s5p_unset_indata(struct s5p_aes_dev *dev)
}
static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
- struct scatterlist **dst)
+ struct scatterlist **dst)
{
void *pages;
int len;
@@ -518,46 +518,28 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
- int err;
-
- if (!sg->length) {
- err = -EINVAL;
- goto exit;
- }
+ if (!sg->length)
+ return -EINVAL;
- err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
- if (!err) {
- err = -ENOMEM;
- goto exit;
- }
+ if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
+ return -ENOMEM;
dev->sg_dst = sg;
- err = 0;
-exit:
- return err;
+ return 0;
}
static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
- int err;
-
- if (!sg->length) {
- err = -EINVAL;
- goto exit;
- }
+ if (!sg->length)
+ return -EINVAL;
- err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
- if (!err) {
- err = -ENOMEM;
- goto exit;
- }
+ if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
+ return -ENOMEM;
dev->sg_src = sg;
- err = 0;
-exit:
- return err;
+ return 0;
}
/*
@@ -655,14 +637,14 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
+ struct ablkcipher_request *req;
int err_dma_tx = 0;
int err_dma_rx = 0;
int err_dma_hx = 0;
bool tx_end = false;
bool hx_end = false;
unsigned long flags;
- uint32_t status;
- u32 st_bits;
+ u32 status, st_bits;
int err;
spin_lock_irqsave(&dev->lock, flags);
@@ -727,7 +709,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev, 0);
+ s5p_aes_complete(dev->req, 0);
/* Device is still busy */
tasklet_schedule(&dev->tasklet);
} else {
@@ -752,11 +734,12 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
error:
s5p_sg_done(dev);
dev->busy = false;
+ req = dev->req;
if (err_dma_hx == 1)
s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev, err);
+ s5p_aes_complete(req, err);
hash_irq_end:
/*
@@ -1830,7 +1813,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
};
static void s5p_set_aes(struct s5p_aes_dev *dev,
- const uint8_t *key, const uint8_t *iv,
+ const u8 *key, const u8 *iv, const u8 *ctr,
unsigned int keylen)
{
void __iomem *keystart;
@@ -1838,6 +1821,9 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
if (iv)
memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+ if (ctr)
+ memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, 0x10);
+
if (keylen == AES_KEYSIZE_256)
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
else if (keylen == AES_KEYSIZE_192)
@@ -1887,7 +1873,7 @@ static int s5p_set_indata_start(struct s5p_aes_dev *dev,
}
static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct ablkcipher_request *req)
{
struct scatterlist *sg;
int err;
@@ -1916,11 +1902,12 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
struct ablkcipher_request *req = dev->req;
- uint32_t aes_control;
+ u32 aes_control;
unsigned long flags;
int err;
- u8 *iv;
+ u8 *iv, *ctr;
+ /* This sets bit [13:12] to 00, which selects 128-bit counter */
aes_control = SSS_AES_KEY_CHANGE_MODE;
if (mode & FLAGS_AES_DECRYPT)
aes_control |= SSS_AES_MODE_DECRYPT;
@@ -1928,11 +1915,14 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
aes_control |= SSS_AES_CHAIN_MODE_CBC;
iv = req->info;
+ ctr = NULL;
} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
aes_control |= SSS_AES_CHAIN_MODE_CTR;
- iv = req->info;
+ iv = NULL;
+ ctr = req->info;
} else {
iv = NULL; /* AES_ECB */
+ ctr = NULL;
}
if (dev->ctx->keylen == AES_KEYSIZE_192)
@@ -1964,7 +1954,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
goto outdata_error;
SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
- s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
+ s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
s5p_set_dma_indata(dev, dev->sg_src);
s5p_set_dma_outdata(dev, dev->sg_dst);
@@ -1983,7 +1973,7 @@ indata_error:
s5p_sg_done(dev);
dev->busy = false;
spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev, err);
+ s5p_aes_complete(req, err);
}
static void s5p_tasklet_cb(unsigned long data)
@@ -2024,7 +2014,7 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
err = ablkcipher_enqueue_request(&dev->queue, req);
if (dev->busy) {
spin_unlock_irqrestore(&dev->lock, flags);
- goto exit;
+ return err;
}
dev->busy = true;
@@ -2032,7 +2022,6 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
tasklet_schedule(&dev->tasklet);
-exit:
return err;
}
@@ -2043,7 +2032,8 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct s5p_aes_dev *dev = ctx->dev;
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
+ ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
return -EINVAL;
}
@@ -2054,7 +2044,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
}
static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
- const uint8_t *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2090,6 +2080,11 @@ static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
}
+static int s5p_aes_ctr_crypt(struct ablkcipher_request *req)
+{
+ return s5p_aes_crypt(req, FLAGS_AES_CTR);
+}
+
static int s5p_aes_cra_init(struct crypto_tfm *tfm)
{
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2144,6 +2139,28 @@ static struct crypto_alg algs[] = {
.decrypt = s5p_aes_cbc_decrypt,
}
},
+ {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-s5p",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_ctr_crypt,
+ .decrypt = s5p_aes_ctr_crypt,
+ }
+ },
};
static int s5p_aes_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index e7540a5b8197..bbf166a97ad3 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -149,7 +149,7 @@ struct sahara_ctx {
/* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
struct sahara_aes_reqctx {
@@ -621,14 +621,14 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
/*
* The requested key size is not supported by HW, do a fallback.
*/
- crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
+ tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
CRYPTO_TFM_RES_MASK;
return ret;
}
@@ -666,9 +666,9 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -688,9 +688,9 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -710,9 +710,9 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -732,9 +732,9 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -752,8 +752,7 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm)
const char *name = crypto_tfm_alg_name(tfm);
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->fallback = crypto_alloc_skcipher(name, 0,
- CRYPTO_ALG_ASYNC |
+ ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
pr_err("Error allocating fallback algo %s\n", name);
@@ -769,7 +768,7 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
{
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
}
static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index b71895871be3..c5c5ff82b52e 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -32,7 +32,7 @@
#include "aesp8-ppc.h"
struct p8_aes_cbc_ctx {
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
};
@@ -40,11 +40,11 @@ struct p8_aes_cbc_ctx {
static int p8_aes_cbc_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback = crypto_alloc_skcipher(alg, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
@@ -53,7 +53,7 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
- crypto_skcipher_set_flags(
+ crypto_sync_skcipher_set_flags(
fallback,
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
@@ -66,7 +66,7 @@ static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -86,7 +86,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -100,8 +100,8 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_skcipher_encrypt(req);
@@ -139,8 +139,8 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_skcipher_decrypt(req);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index cd777c75291d..8a2fe092cb8e 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -32,18 +32,18 @@
#include "aesp8-ppc.h"
struct p8_aes_ctr_ctx {
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct aes_key enc_key;
};
static int p8_aes_ctr_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback = crypto_alloc_skcipher(alg, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
"Failed to allocate transformation for '%s': %ld\n",
@@ -51,7 +51,7 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
- crypto_skcipher_set_flags(
+ crypto_sync_skcipher_set_flags(
fallback,
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
@@ -64,7 +64,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -83,7 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -119,8 +119,8 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_skcipher_encrypt(req);
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index e9954a7d4694..ecd64e5cc5bb 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -33,7 +33,7 @@
#include "aesp8-ppc.h"
struct p8_aes_xts_ctx {
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
struct aes_key tweak_key;
@@ -42,11 +42,11 @@ struct p8_aes_xts_ctx {
static int p8_aes_xts_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback = crypto_alloc_skcipher(alg, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
"Failed to allocate transformation for '%s': %ld\n",
@@ -54,7 +54,7 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
- crypto_skcipher_set_flags(
+ crypto_sync_skcipher_set_flags(
fallback,
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
@@ -67,7 +67,7 @@ static void p8_aes_xts_exit(struct crypto_tfm *tfm)
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -92,7 +92,7 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -109,8 +109,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index bbe4d72ca105..948806e57cee 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -535,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
}
+static const struct address_space_operations dev_dax_aops = {
+ .set_page_dirty = noop_set_page_dirty,
+ .invalidatepage = noop_invalidatepage,
+};
+
static int dax_open(struct inode *inode, struct file *filp)
{
struct dax_device *dax_dev = inode_dax(inode);
@@ -544,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp)
dev_dbg(&dev_dax->dev, "trace\n");
inode->i_mapping = __dax_inode->i_mapping;
inode->i_mapping->host = __dax_inode;
+ inode->i_mapping->a_ops = &dev_dax_aops;
filp->f_mapping = inode->i_mapping;
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
filp->private_data = dev_dax;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 4c49bb1330b5..141413067b5c 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -11,6 +11,7 @@
*/
#include <linux/kernel.h>
+#include <linux/kmod.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/err.h>
@@ -28,9 +29,6 @@
#include <linux/of.h>
#include "governor.h"
-#define MAX(a,b) ((a > b) ? a : b)
-#define MIN(a,b) ((a < b) ? a : b)
-
static struct class *devfreq_class;
/*
@@ -221,6 +219,49 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
return ERR_PTR(-ENODEV);
}
+/**
+ * try_then_request_governor() - Try to find the governor and request the
+ * module if is not found.
+ * @name: name of the governor
+ *
+ * Search the list of devfreq governors and request the module and try again
+ * if is not found. This can happen when both drivers (the governor driver
+ * and the driver that call devfreq_add_device) are built as modules.
+ * devfreq_list_lock should be held by the caller. Returns the matched
+ * governor's pointer.
+ */
+static struct devfreq_governor *try_then_request_governor(const char *name)
+{
+ struct devfreq_governor *governor;
+ int err = 0;
+
+ if (IS_ERR_OR_NULL(name)) {
+ pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ WARN(!mutex_is_locked(&devfreq_list_lock),
+ "devfreq_list_lock must be locked.");
+
+ governor = find_devfreq_governor(name);
+ if (IS_ERR(governor)) {
+ mutex_unlock(&devfreq_list_lock);
+
+ if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ DEVFREQ_NAME_LEN))
+ err = request_module("governor_%s", "simpleondemand");
+ else
+ err = request_module("governor_%s", name);
+ /* Restore previous state before return */
+ mutex_lock(&devfreq_list_lock);
+ if (err)
+ return NULL;
+
+ governor = find_devfreq_governor(name);
+ }
+
+ return governor;
+}
+
static int devfreq_notify_transition(struct devfreq *devfreq,
struct devfreq_freqs *freqs, unsigned int state)
{
@@ -280,14 +321,14 @@ int update_devfreq(struct devfreq *devfreq)
* max_freq
* min_freq
*/
- max_freq = MIN(devfreq->scaling_max_freq, devfreq->max_freq);
- min_freq = MAX(devfreq->scaling_min_freq, devfreq->min_freq);
+ max_freq = min(devfreq->scaling_max_freq, devfreq->max_freq);
+ min_freq = max(devfreq->scaling_min_freq, devfreq->min_freq);
- if (min_freq && freq < min_freq) {
+ if (freq < min_freq) {
freq = min_freq;
flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
}
- if (max_freq && freq > max_freq) {
+ if (freq > max_freq) {
freq = max_freq;
flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
}
@@ -534,10 +575,6 @@ static void devfreq_dev_release(struct device *dev)
list_del(&devfreq->node);
mutex_unlock(&devfreq_list_lock);
- if (devfreq->governor)
- devfreq->governor->event_handler(devfreq,
- DEVFREQ_GOV_STOP, NULL);
-
if (devfreq->profile->exit)
devfreq->profile->exit(devfreq->dev.parent);
@@ -646,9 +683,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_unlock(&devfreq->lock);
mutex_lock(&devfreq_list_lock);
- list_add(&devfreq->node, &devfreq_list);
- governor = find_devfreq_governor(devfreq->governor_name);
+ governor = try_then_request_governor(devfreq->governor_name);
if (IS_ERR(governor)) {
dev_err(dev, "%s: Unable to find governor for the device\n",
__func__);
@@ -664,19 +700,20 @@ struct devfreq *devfreq_add_device(struct device *dev,
__func__);
goto err_init;
}
+
+ list_add(&devfreq->node, &devfreq_list);
+
mutex_unlock(&devfreq_list_lock);
return devfreq;
err_init:
- list_del(&devfreq->node);
mutex_unlock(&devfreq_list_lock);
- device_unregister(&devfreq->dev);
+ devfreq_remove_device(devfreq);
devfreq = NULL;
err_dev:
- if (devfreq)
- kfree(devfreq);
+ kfree(devfreq);
err_out:
return ERR_PTR(err);
}
@@ -693,6 +730,9 @@ int devfreq_remove_device(struct devfreq *devfreq)
if (!devfreq)
return -EINVAL;
+ if (devfreq->governor)
+ devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_STOP, NULL);
device_unregister(&devfreq->dev);
return 0;
@@ -991,7 +1031,7 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&devfreq_list_lock);
- governor = find_devfreq_governor(str_governor);
+ governor = try_then_request_governor(str_governor);
if (IS_ERR(governor)) {
ret = PTR_ERR(governor);
goto out;
@@ -1126,17 +1166,26 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
struct devfreq *df = to_devfreq(dev);
unsigned long value;
int ret;
- unsigned long max;
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
return -EINVAL;
mutex_lock(&df->lock);
- max = df->max_freq;
- if (value && max && value > max) {
- ret = -EINVAL;
- goto unlock;
+
+ if (value) {
+ if (value > df->max_freq) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ } else {
+ unsigned long *freq_table = df->profile->freq_table;
+
+ /* Get minimum frequency according to sorting order */
+ if (freq_table[0] < freq_table[df->profile->max_state - 1])
+ value = freq_table[0];
+ else
+ value = freq_table[df->profile->max_state - 1];
}
df->min_freq = value;
@@ -1152,7 +1201,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
{
struct devfreq *df = to_devfreq(dev);
- return sprintf(buf, "%lu\n", MAX(df->scaling_min_freq, df->min_freq));
+ return sprintf(buf, "%lu\n", max(df->scaling_min_freq, df->min_freq));
}
static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
@@ -1161,17 +1210,26 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
struct devfreq *df = to_devfreq(dev);
unsigned long value;
int ret;
- unsigned long min;
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
return -EINVAL;
mutex_lock(&df->lock);
- min = df->min_freq;
- if (value && min && value < min) {
- ret = -EINVAL;
- goto unlock;
+
+ if (value) {
+ if (value < df->min_freq) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ } else {
+ unsigned long *freq_table = df->profile->freq_table;
+
+ /* Get maximum frequency according to sorting order */
+ if (freq_table[0] < freq_table[df->profile->max_state - 1])
+ value = freq_table[df->profile->max_state - 1];
+ else
+ value = freq_table[0];
}
df->max_freq = value;
@@ -1188,7 +1246,7 @@ static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
{
struct devfreq *df = to_devfreq(dev);
- return sprintf(buf, "%lu\n", MIN(df->scaling_max_freq, df->max_freq));
+ return sprintf(buf, "%lu\n", min(df->scaling_max_freq, df->max_freq));
}
static DEVICE_ATTR_RW(max_freq);
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index a9c64f0d3284..c61de0bdf053 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -535,8 +535,8 @@ static int of_get_devfreq_events(struct device_node *np,
if (i == ARRAY_SIZE(ppmu_events)) {
dev_warn(dev,
- "don't know how to configure events : %s\n",
- node->name);
+ "don't know how to configure events : %pOFn\n",
+ node);
continue;
}
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index cfc50a61a90d..f53339ca610f 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -25,6 +25,9 @@
#define DEVFREQ_GOV_SUSPEND 0x4
#define DEVFREQ_GOV_RESUME 0x5
+#define DEVFREQ_MIN_FREQ 0
+#define DEVFREQ_MAX_FREQ ULONG_MAX
+
/**
* struct devfreq_governor - Devfreq policy governor
* @node: list node - contains registered devfreq governors
@@ -54,9 +57,6 @@ struct devfreq_governor {
unsigned int event, void *data);
};
-/* Caution: devfreq->lock must be locked before calling update_devfreq */
-extern int update_devfreq(struct devfreq *devfreq);
-
extern void devfreq_monitor_start(struct devfreq *devfreq);
extern void devfreq_monitor_stop(struct devfreq *devfreq);
extern void devfreq_monitor_suspend(struct devfreq *devfreq);
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index 4d23ecfbd948..ded429fd51be 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -20,10 +20,7 @@ static int devfreq_performance_func(struct devfreq *df,
* target callback should be able to get floor value as
* said in devfreq.h
*/
- if (!df->max_freq)
- *freq = UINT_MAX;
- else
- *freq = df->max_freq;
+ *freq = DEVFREQ_MAX_FREQ;
return 0;
}
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index 0c42f23249ef..9e8897f5ac42 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -20,7 +20,7 @@ static int devfreq_powersave_func(struct devfreq *df,
* target callback should be able to get ceiling value as
* said in devfreq.h
*/
- *freq = df->min_freq;
+ *freq = DEVFREQ_MIN_FREQ;
return 0;
}
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index 28e0f2de7100..c0417f0e081e 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -27,7 +27,6 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
struct devfreq_simple_ondemand_data *data = df->data;
- unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
err = devfreq_update_stats(df);
if (err)
@@ -47,7 +46,7 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
/* Assume MAX if it is going to be divided by zero */
if (stat->total_time == 0) {
- *freq = max;
+ *freq = DEVFREQ_MAX_FREQ;
return 0;
}
@@ -60,13 +59,13 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
/* Set MAX if it's busy enough */
if (stat->busy_time * 100 >
stat->total_time * dfso_upthreshold) {
- *freq = max;
+ *freq = DEVFREQ_MAX_FREQ;
return 0;
}
/* Set MAX if we do not know the initial frequency */
if (stat->current_frequency == 0) {
- *freq = max;
+ *freq = DEVFREQ_MAX_FREQ;
return 0;
}
@@ -85,11 +84,6 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
*freq = (unsigned long) b;
- if (df->min_freq && *freq < df->min_freq)
- *freq = df->min_freq;
- if (df->max_freq && *freq > df->max_freq)
- *freq = df->max_freq;
-
return 0;
}
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 080607c3f34d..378d84c011df 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -26,19 +26,11 @@ static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
{
struct userspace_data *data = df->data;
- if (data->valid) {
- unsigned long adjusted_freq = data->user_frequency;
-
- if (df->max_freq && adjusted_freq > df->max_freq)
- adjusted_freq = df->max_freq;
-
- if (df->min_freq && adjusted_freq < df->min_freq)
- adjusted_freq = df->min_freq;
-
- *freq = adjusted_freq;
- } else {
+ if (data->valid)
+ *freq = data->user_frequency;
+ else
*freq = df->previous_freq; /* No user freq specified yet */
- }
+
return 0;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index dacf3f42426d..de511db021cc 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -143,7 +143,7 @@ config DMA_JZ4740
config DMA_JZ4780
tristate "JZ4780 DMA support"
- depends on MACH_JZ4780 || COMPILE_TEST
+ depends on MIPS || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -321,6 +321,17 @@ config LPC18XX_DMAMUX
Enable support for DMA on NXP LPC18xx/43xx platforms
with PL080 and multiplexed DMA request lines.
+config MCF_EDMA
+ tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
+ depends on M5441x || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the Freescale ColdFire eDMA engine, 64-channel
+ implementation that performs complex data transfers with
+ minimal intervention from a host processor.
+ This module can be found on Freescale ColdFire mcf5441x SoCs.
+
config MMP_PDMA
bool "MMP PDMA support"
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c91702d88b95..7fcc4d8e336d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -31,7 +31,8 @@ obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
-obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
+obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 75f38d19fcbe..7cbac6e8c113 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1320,7 +1320,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
if (unlikely(!is_slave_direction(direction)))
goto err_out;
- if (sconfig->direction == DMA_MEM_TO_DEV)
+ if (direction == DMA_MEM_TO_DEV)
reg_width = convert_buswidth(sconfig->dst_addr_width);
else
reg_width = convert_buswidth(sconfig->src_addr_width);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 4bf72561667c..4e557684f792 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1600,7 +1600,7 @@ static void at_xdmac_tasklet(unsigned long data)
if (atchan->status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
- spin_lock_bh(&atchan->lock);
+ spin_lock(&atchan->lock);
desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc,
xfer_node);
@@ -1610,7 +1610,7 @@ static void at_xdmac_tasklet(unsigned long data)
txd = &desc->tx_dma_desc;
at_xdmac_remove_xfer(atchan, desc);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock(&atchan->lock);
if (!at_xdmac_chan_is_cyclic(atchan)) {
dma_cookie_complete(txd);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 847f84a41a69..cad55ab80d41 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -778,14 +778,6 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan,
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
- if ((cfg->direction == DMA_DEV_TO_MEM &&
- cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
- (cfg->direction == DMA_MEM_TO_DEV &&
- cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
- !is_slave_direction(cfg->direction)) {
- return -EINVAL;
- }
-
c->cfg = *cfg;
return 0;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index da74fd74636b..eebaba3d9e78 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1306,6 +1306,7 @@ struct coh901318_chan {
unsigned long nbr_active_done;
unsigned long busy;
+ struct dma_slave_config config;
u32 addr;
u32 ctrl;
@@ -1402,6 +1403,10 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
return container_of(chan, struct coh901318_chan, chan);
}
+static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
+ struct dma_slave_config *config,
+ enum dma_transfer_direction direction);
+
static inline const struct coh901318_params *
cohc_chan_param(struct coh901318_chan *cohc)
{
@@ -2360,6 +2365,8 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (lli == NULL)
goto err_dma_alloc;
+ coh901318_dma_set_runtimeconfig(chan, &cohc->config, direction);
+
/* initiate allocated lli list */
ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
cohc->addr,
@@ -2499,7 +2506,8 @@ static const struct burst_table burst_sizes[] = {
};
static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
- struct dma_slave_config *config)
+ struct dma_slave_config *config,
+ enum dma_transfer_direction direction)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
dma_addr_t addr;
@@ -2509,11 +2517,11 @@ static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
int i = 0;
/* We only support mem to per or per to mem transfers */
- if (config->direction == DMA_DEV_TO_MEM) {
+ if (direction == DMA_DEV_TO_MEM) {
addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
- } else if (config->direction == DMA_MEM_TO_DEV) {
+ } else if (direction == DMA_MEM_TO_DEV) {
addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
@@ -2579,6 +2587,16 @@ static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
return 0;
}
+static int coh901318_dma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+
+ memcpy(&cohc->config, config, sizeof(*config));
+
+ return 0;
+}
+
static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base)
{
@@ -2684,7 +2702,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
base->dma_slave.device_tx_status = coh901318_tx_status;
base->dma_slave.device_issue_pending = coh901318_issue_pending;
- base->dma_slave.device_config = coh901318_dma_set_runtimeconfig;
+ base->dma_slave.device_config = coh901318_dma_slave_config;
base->dma_slave.device_pause = coh901318_pause;
base->dma_slave.device_resume = coh901318_resume;
base->dma_slave.device_terminate_all = coh901318_terminate_all;
@@ -2707,7 +2725,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
base->dma_memcpy.device_tx_status = coh901318_tx_status;
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
- base->dma_memcpy.device_config = coh901318_dma_set_runtimeconfig;
+ base->dma_memcpy.device_config = coh901318_dma_slave_config;
base->dma_memcpy.device_pause = coh901318_pause;
base->dma_memcpy.device_resume = coh901318_resume;
base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index afd5e10f8927..5253e3c0dc04 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -113,6 +113,7 @@ struct jz4740_dma_desc {
struct jz4740_dmaengine_chan {
struct virt_dma_chan vchan;
unsigned int id;
+ struct dma_slave_config config;
dma_addr_t fifo_addr;
unsigned int transfer_shift;
@@ -203,8 +204,9 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
}
-static int jz4740_dma_slave_config(struct dma_chan *c,
- struct dma_slave_config *config)
+static int jz4740_dma_slave_config_write(struct dma_chan *c,
+ struct dma_slave_config *config,
+ enum dma_transfer_direction direction)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -214,7 +216,7 @@ static int jz4740_dma_slave_config(struct dma_chan *c,
enum jz4740_dma_flags flags;
uint32_t cmd;
- switch (config->direction) {
+ switch (direction) {
case DMA_MEM_TO_DEV:
flags = JZ4740_DMA_SRC_AUTOINC;
transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
@@ -265,6 +267,15 @@ static int jz4740_dma_slave_config(struct dma_chan *c,
return 0;
}
+static int jz4740_dma_slave_config(struct dma_chan *c,
+ struct dma_slave_config *config)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+
+ memcpy(&chan->config, config, sizeof(*config));
+ return 0;
+}
+
static int jz4740_dma_terminate_all(struct dma_chan *c)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
@@ -407,6 +418,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
desc->direction = direction;
desc->cyclic = false;
+ jz4740_dma_slave_config_write(c, &chan->config, direction);
+
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
}
@@ -438,6 +451,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
desc->direction = direction;
desc->cyclic = true;
+ jz4740_dma_slave_config_write(c, &chan->config, direction);
+
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 85820a2d69d4..a8b6225faa12 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -23,33 +24,35 @@
#include "dmaengine.h"
#include "virt-dma.h"
-#define JZ_DMA_NR_CHANNELS 32
-
/* Global registers. */
-#define JZ_DMA_REG_DMAC 0x1000
-#define JZ_DMA_REG_DIRQP 0x1004
-#define JZ_DMA_REG_DDR 0x1008
-#define JZ_DMA_REG_DDRS 0x100c
-#define JZ_DMA_REG_DMACP 0x101c
-#define JZ_DMA_REG_DSIRQP 0x1020
-#define JZ_DMA_REG_DSIRQM 0x1024
-#define JZ_DMA_REG_DCIRQP 0x1028
-#define JZ_DMA_REG_DCIRQM 0x102c
+#define JZ_DMA_REG_DMAC 0x00
+#define JZ_DMA_REG_DIRQP 0x04
+#define JZ_DMA_REG_DDR 0x08
+#define JZ_DMA_REG_DDRS 0x0c
+#define JZ_DMA_REG_DCKE 0x10
+#define JZ_DMA_REG_DCKES 0x14
+#define JZ_DMA_REG_DCKEC 0x18
+#define JZ_DMA_REG_DMACP 0x1c
+#define JZ_DMA_REG_DSIRQP 0x20
+#define JZ_DMA_REG_DSIRQM 0x24
+#define JZ_DMA_REG_DCIRQP 0x28
+#define JZ_DMA_REG_DCIRQM 0x2c
/* Per-channel registers. */
#define JZ_DMA_REG_CHAN(n) (n * 0x20)
-#define JZ_DMA_REG_DSA(n) (0x00 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DTA(n) (0x04 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DTC(n) (0x08 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DRT(n) (0x0c + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DCS(n) (0x10 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DCM(n) (0x14 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DDA(n) (0x18 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DSD(n) (0x1c + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DSA 0x00
+#define JZ_DMA_REG_DTA 0x04
+#define JZ_DMA_REG_DTC 0x08
+#define JZ_DMA_REG_DRT 0x0c
+#define JZ_DMA_REG_DCS 0x10
+#define JZ_DMA_REG_DCM 0x14
+#define JZ_DMA_REG_DDA 0x18
+#define JZ_DMA_REG_DSD 0x1c
#define JZ_DMA_DMAC_DMAE BIT(0)
#define JZ_DMA_DMAC_AR BIT(2)
#define JZ_DMA_DMAC_HLT BIT(3)
+#define JZ_DMA_DMAC_FAIC BIT(27)
#define JZ_DMA_DMAC_FMSC BIT(31)
#define JZ_DMA_DRT_AUTO 0x8
@@ -86,6 +89,14 @@
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+#define JZ4780_DMA_CTRL_OFFSET 0x1000
+
+/* macros for use with jz4780_dma_soc_data.flags */
+#define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0)
+#define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
+#define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
+#define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
+
/**
* struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
* @dcm: value for the DCM (channel command) register
@@ -94,17 +105,12 @@
* @dtc: transfer count (number of blocks of the transfer size specified in DCM
* to transfer) in the low 24 bits, offset of the next descriptor from the
* descriptor base address in the upper 8 bits.
- * @sd: target/source stride difference (in stride transfer mode).
- * @drt: request type
*/
struct jz4780_dma_hwdesc {
uint32_t dcm;
uint32_t dsa;
uint32_t dta;
uint32_t dtc;
- uint32_t sd;
- uint32_t drt;
- uint32_t reserved[2];
};
/* Size of allocations for hardware descriptor blocks. */
@@ -135,14 +141,22 @@ struct jz4780_dma_chan {
unsigned int curr_hwdesc;
};
+struct jz4780_dma_soc_data {
+ unsigned int nb_channels;
+ unsigned int transfer_ord_max;
+ unsigned long flags;
+};
+
struct jz4780_dma_dev {
struct dma_device dma_device;
- void __iomem *base;
+ void __iomem *chn_base;
+ void __iomem *ctrl_base;
struct clk *clk;
unsigned int irq;
+ const struct jz4780_dma_soc_data *soc_data;
uint32_t chan_reserved;
- struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
+ struct jz4780_dma_chan chan[];
};
struct jz4780_dma_filter_data {
@@ -169,16 +183,51 @@ static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
dma_device);
}
-static inline uint32_t jz4780_dma_readl(struct jz4780_dma_dev *jzdma,
+static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
+ unsigned int chn, unsigned int reg)
+{
+ return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
+}
+
+static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
+ unsigned int chn, unsigned int reg, uint32_t val)
+{
+ writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
+}
+
+static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
unsigned int reg)
{
- return readl(jzdma->base + reg);
+ return readl(jzdma->ctrl_base + reg);
}
-static inline void jz4780_dma_writel(struct jz4780_dma_dev *jzdma,
+static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
unsigned int reg, uint32_t val)
{
- writel(val, jzdma->base + reg);
+ writel(val, jzdma->ctrl_base + reg);
+}
+
+static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma,
+ unsigned int chn)
+{
+ if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) {
+ unsigned int reg;
+
+ if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)
+ reg = JZ_DMA_REG_DCKE;
+ else
+ reg = JZ_DMA_REG_DCKES;
+
+ jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn));
+ }
+}
+
+static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
+ unsigned int chn)
+{
+ if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) &&
+ !(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC))
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
}
static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
@@ -215,8 +264,10 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
kfree(desc);
}
-static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
+static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
+ unsigned long val, uint32_t *shift)
{
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
int ord = ffs(val) - 1;
/*
@@ -228,8 +279,8 @@ static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
*/
if (ord == 3)
ord = 2;
- else if (ord > 7)
- ord = 7;
+ else if (ord > jzdma->soc_data->transfer_ord_max)
+ ord = jzdma->soc_data->transfer_ord_max;
*shift = ord;
@@ -262,7 +313,6 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
desc->dcm = JZ_DMA_DCM_SAI;
desc->dsa = addr;
desc->dta = config->dst_addr;
- desc->drt = jzchan->transfer_type;
width = config->dst_addr_width;
maxburst = config->dst_maxburst;
@@ -270,7 +320,6 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
desc->dcm = JZ_DMA_DCM_DAI;
desc->dsa = config->src_addr;
desc->dta = addr;
- desc->drt = jzchan->transfer_type;
width = config->src_addr_width;
maxburst = config->src_maxburst;
@@ -283,7 +332,7 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
* divisible by the transfer size, and we must not use more than the
* maximum burst specified by the user.
*/
- tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst),
+ tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst),
&jzchan->transfer_shift);
switch (width) {
@@ -412,12 +461,13 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
if (!desc)
return NULL;
- tsz = jz4780_dma_transfer_size(dest | src | len,
+ tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
&jzchan->transfer_shift);
+ jzchan->transfer_type = JZ_DMA_DRT_AUTO;
+
desc->desc[0].dsa = src;
desc->desc[0].dta = dest;
- desc->desc[0].drt = JZ_DMA_DRT_AUTO;
desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
tsz << JZ_DMA_DCM_TSZ_SHIFT |
JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
@@ -472,18 +522,34 @@ static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
(jzchan->curr_hwdesc + 1) % jzchan->desc->count;
}
- /* Use 8-word descriptors. */
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), JZ_DMA_DCS_DES8);
+ /* Enable the channel's clock. */
+ jz4780_dma_chan_enable(jzdma, jzchan->id);
+
+ /* Use 4-word descriptors. */
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
+
+ /* Set transfer type. */
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
+ jzchan->transfer_type);
+
+ /*
+ * Set the transfer count. This is redundant for a descriptor-driven
+ * transfer. However, there can be a delay between the transfer start
+ * time and when DTCn reg contains the new transfer count. Setting
+ * it explicitly ensures residue is computed correctly at all times.
+ */
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC,
+ jzchan->desc->desc[jzchan->curr_hwdesc].dtc);
/* Write descriptor address and initiate descriptor fetch. */
desc_phys = jzchan->desc->desc_phys +
(jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DDA(jzchan->id), desc_phys);
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
/* Enable the channel. */
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id),
- JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE);
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
+ JZ_DMA_DCS_CTE);
}
static void jz4780_dma_issue_pending(struct dma_chan *chan)
@@ -509,12 +575,14 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&jzchan->vchan.lock, flags);
/* Clear the DMA status and stop the transfer. */
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
if (jzchan->desc) {
vchan_terminate_vdesc(&jzchan->desc->vdesc);
jzchan->desc = NULL;
}
+ jz4780_dma_chan_disable(jzdma, jzchan->id);
+
vchan_get_all_descriptors(&jzchan->vchan, &head);
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
@@ -526,8 +594,10 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
static void jz4780_dma_synchronize(struct dma_chan *chan)
{
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
vchan_synchronize(&jzchan->vchan);
+ jz4780_dma_chan_disable(jzdma, jzchan->id);
}
static int jz4780_dma_config(struct dma_chan *chan,
@@ -549,21 +619,17 @@ static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
struct jz4780_dma_desc *desc, unsigned int next_sg)
{
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
- unsigned int residue, count;
+ unsigned int count = 0;
unsigned int i;
- residue = 0;
-
for (i = next_sg; i < desc->count; i++)
- residue += desc->desc[i].dtc << jzchan->transfer_shift;
+ count += desc->desc[i].dtc & GENMASK(23, 0);
- if (next_sg != 0) {
- count = jz4780_dma_readl(jzdma,
- JZ_DMA_REG_DTC(jzchan->id));
- residue += count << jzchan->transfer_shift;
- }
+ if (next_sg != 0)
+ count += jz4780_dma_chn_readl(jzdma, jzchan->id,
+ JZ_DMA_REG_DTC);
- return residue;
+ return count << jzchan->transfer_shift;
}
static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
@@ -573,6 +639,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
struct virt_dma_desc *vdesc;
enum dma_status status;
unsigned long flags;
+ unsigned long residue = 0;
status = dma_cookie_status(chan, cookie, txstate);
if ((status == DMA_COMPLETE) || (txstate == NULL))
@@ -583,13 +650,13 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
vdesc = vchan_find_desc(&jzchan->vchan, cookie);
if (vdesc) {
/* On the issued list, so hasn't been processed yet */
- txstate->residue = jz4780_dma_desc_residue(jzchan,
+ residue = jz4780_dma_desc_residue(jzchan,
to_jz4780_dma_desc(vdesc), 0);
} else if (cookie == jzchan->desc->vdesc.tx.cookie) {
- txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
- (jzchan->curr_hwdesc + 1) % jzchan->desc->count);
- } else
- txstate->residue = 0;
+ residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
+ jzchan->curr_hwdesc + 1);
+ }
+ dma_set_residue(txstate, residue);
if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
&& jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
@@ -606,8 +673,8 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
spin_lock(&jzchan->vchan.lock);
- dcs = jz4780_dma_readl(jzdma, JZ_DMA_REG_DCS(jzchan->id));
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
+ dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
if (dcs & JZ_DMA_DCS_AR) {
dev_warn(&jzchan->vchan.chan.dev->device,
@@ -646,9 +713,9 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
uint32_t pending, dmac;
int i;
- pending = jz4780_dma_readl(jzdma, JZ_DMA_REG_DIRQP);
+ pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
- for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
+ for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
if (!(pending & (1<<i)))
continue;
@@ -656,12 +723,12 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
}
/* Clear halt and address error status of all channels. */
- dmac = jz4780_dma_readl(jzdma, JZ_DMA_REG_DMAC);
+ dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
/* Clear interrupt pending status. */
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
return IRQ_HANDLED;
}
@@ -728,7 +795,7 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
data.channel = dma_spec->args[1];
if (data.channel > -1) {
- if (data.channel >= JZ_DMA_NR_CHANNELS) {
+ if (data.channel >= jzdma->soc_data->nb_channels) {
dev_err(jzdma->dma_device.dev,
"device requested non-existent channel %u\n",
data.channel);
@@ -755,16 +822,29 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
static int jz4780_dma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct jz4780_dma_soc_data *soc_data;
struct jz4780_dma_dev *jzdma;
struct jz4780_dma_chan *jzchan;
struct dma_device *dd;
struct resource *res;
int i, ret;
- jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
+ if (!dev->of_node) {
+ dev_err(dev, "This driver must be probed from devicetree\n");
+ return -EINVAL;
+ }
+
+ soc_data = device_get_match_data(dev);
+ if (!soc_data)
+ return -EINVAL;
+
+ jzdma = devm_kzalloc(dev, sizeof(*jzdma)
+ + sizeof(*jzdma->chan) * soc_data->nb_channels,
+ GFP_KERNEL);
if (!jzdma)
return -ENOMEM;
+ jzdma->soc_data = soc_data;
platform_set_drvdata(pdev, jzdma);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -773,9 +853,26 @@ static int jz4780_dma_probe(struct platform_device *pdev)
return -EINVAL;
}
- jzdma->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(jzdma->base))
- return PTR_ERR(jzdma->base);
+ jzdma->chn_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(jzdma->chn_base))
+ return PTR_ERR(jzdma->chn_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ jzdma->ctrl_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(jzdma->ctrl_base))
+ return PTR_ERR(jzdma->ctrl_base);
+ } else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) {
+ /*
+ * On JZ4780, if the second memory resource was not supplied,
+ * assume we're using an old devicetree, and calculate the
+ * offset to the control registers.
+ */
+ jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
+ } else {
+ dev_err(dev, "failed to get I/O memory\n");
+ return -EINVAL;
+ }
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
@@ -833,13 +930,15 @@ static int jz4780_dma_probe(struct platform_device *pdev)
* Also set the FMSC bit - it increases MSC performance, so it makes
* little sense not to enable it.
*/
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC,
- JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC);
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DMACP, 0);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE |
+ JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC);
+
+ if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA)
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
INIT_LIST_HEAD(&dd->channels);
- for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
+ for (i = 0; i < soc_data->nb_channels; i++) {
jzchan = &jzdma->chan[i];
jzchan->id = i;
@@ -847,7 +946,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzchan->vchan.desc_free = jz4780_dma_desc_free;
}
- ret = dma_async_device_register(dd);
+ ret = dmaenginem_async_device_register(dd);
if (ret) {
dev_err(dev, "failed to register device\n");
goto err_disable_clk;
@@ -858,15 +957,12 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzdma);
if (ret) {
dev_err(dev, "failed to register OF DMA controller\n");
- goto err_unregister_dev;
+ goto err_disable_clk;
}
dev_info(dev, "JZ4780 DMA controller initialised\n");
return 0;
-err_unregister_dev:
- dma_async_device_unregister(dd);
-
err_disable_clk:
clk_disable_unprepare(jzdma->clk);
@@ -884,15 +980,40 @@ static int jz4780_dma_remove(struct platform_device *pdev)
free_irq(jzdma->irq, jzdma);
- for (i = 0; i < JZ_DMA_NR_CHANNELS; i++)
+ for (i = 0; i < jzdma->soc_data->nb_channels; i++)
tasklet_kill(&jzdma->chan[i].vchan.task);
- dma_async_device_unregister(&jzdma->dma_device);
return 0;
}
+static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
+ .nb_channels = 6,
+ .transfer_ord_max = 5,
+};
+
+static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
+ .nb_channels = 6,
+ .transfer_ord_max = 5,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+};
+
+static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
+ .nb_channels = 6,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM,
+};
+
+static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
+ .nb_channels = 32,
+ .transfer_ord_max = 7,
+ .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
static const struct of_device_id jz4780_dma_dt_match[] = {
- { .compatible = "ingenic,jz4780-dma", .data = NULL },
+ { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
+ { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
+ { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
+ { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index c4eb55e3011c..b2ac1d2c5b86 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -934,7 +934,7 @@ static int dw_probe(struct platform_device *pdev)
pm_runtime_put(chip->dev);
- ret = dma_async_device_register(&dw->dma);
+ ret = dmaenginem_async_device_register(&dw->dma);
if (ret)
goto err_pm_disable;
@@ -977,8 +977,6 @@ static int dw_remove(struct platform_device *pdev)
tasklet_kill(&chan->vc.task);
}
- dma_async_device_unregister(&dw->dma);
-
return 0;
}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index f43e6dafe446..d0c3e50b39fb 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -886,12 +886,7 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
*/
u32 s = dw->pdata->is_idma32 ? 1 : 2;
- /* Check if chan will be configured for slave transfers */
- if (!is_slave_direction(sconfig->direction))
- return -EINVAL;
-
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
- dwc->direction = sconfig->direction;
sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0;
sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index f62dd0944908..f01b2c173fa6 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -284,6 +284,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
#ifdef CONFIG_ACPI
static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "INTL9C60", 0 },
+ { "80862286", 0 },
+ { "808622C0", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index a15592383d4e..f674eb5fbbef 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -109,6 +109,9 @@
#define DMA_MAX_CHAN_DESCRIPTORS 32
struct ep93xx_dma_engine;
+static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *config);
/**
* struct ep93xx_dma_desc - EP93xx specific transaction descriptor
@@ -180,6 +183,7 @@ struct ep93xx_dma_chan {
struct list_head free_list;
u32 runtime_addr;
u32 runtime_ctrl;
+ struct dma_slave_config slave_config;
};
/**
@@ -1051,6 +1055,8 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
+ ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
+
first = NULL;
for_each_sg(sgl, sg, sg_len, i) {
size_t len = sg_dma_len(sg);
@@ -1136,6 +1142,8 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
return NULL;
}
+ ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
+
/* Split the buffer into period size chunks */
first = NULL;
for (offset = 0; offset < buf_len; offset += period_len) {
@@ -1227,6 +1235,17 @@ static int ep93xx_dma_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+
+ memcpy(&edmac->slave_config, config, sizeof(*config));
+
+ return 0;
+}
+
+static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *config)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
enum dma_slave_buswidth width;
unsigned long flags;
u32 addr, ctrl;
@@ -1234,7 +1253,7 @@ static int ep93xx_dma_slave_config(struct dma_chan *chan,
if (!edmac->edma->m2m)
return -EINVAL;
- switch (config->direction) {
+ switch (dir) {
case DMA_DEV_TO_MEM:
width = config->src_addr_width;
addr = config->src_addr;
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
new file mode 100644
index 000000000000..8876c4c1bb2c
--- /dev/null
+++ b/drivers/dma/fsl-edma-common.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
+// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
+
+#include <linux/dmapool.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "fsl-edma-common.h"
+
+#define EDMA_CR 0x00
+#define EDMA_ES 0x04
+#define EDMA_ERQ 0x0C
+#define EDMA_EEI 0x14
+#define EDMA_SERQ 0x1B
+#define EDMA_CERQ 0x1A
+#define EDMA_SEEI 0x19
+#define EDMA_CEEI 0x18
+#define EDMA_CINT 0x1F
+#define EDMA_CERR 0x1E
+#define EDMA_SSRT 0x1D
+#define EDMA_CDNE 0x1C
+#define EDMA_INTR 0x24
+#define EDMA_ERR 0x2C
+
+#define EDMA64_ERQH 0x08
+#define EDMA64_EEIH 0x10
+#define EDMA64_SERQ 0x18
+#define EDMA64_CERQ 0x19
+#define EDMA64_SEEI 0x1a
+#define EDMA64_CEEI 0x1b
+#define EDMA64_CINT 0x1c
+#define EDMA64_CERR 0x1d
+#define EDMA64_SSRT 0x1e
+#define EDMA64_CDNE 0x1f
+#define EDMA64_INTH 0x20
+#define EDMA64_INTL 0x24
+#define EDMA64_ERRH 0x28
+#define EDMA64_ERRL 0x2c
+
+#define EDMA_TCD 0x1000
+
+static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
+{
+ struct edma_regs *regs = &fsl_chan->edma->regs;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ if (fsl_chan->edma->version == v1) {
+ edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
+ edma_writeb(fsl_chan->edma, ch, regs->serq);
+ } else {
+ /* ColdFire is big endian, and accesses natively
+ * big endian I/O peripherals
+ */
+ iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
+ iowrite8(ch, regs->serq);
+ }
+}
+
+void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
+{
+ struct edma_regs *regs = &fsl_chan->edma->regs;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ if (fsl_chan->edma->version == v1) {
+ edma_writeb(fsl_chan->edma, ch, regs->cerq);
+ edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
+ } else {
+ /* ColdFire is big endian, and accesses natively
+ * big endian I/O peripherals
+ */
+ iowrite8(ch, regs->cerq);
+ iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
+ }
+}
+EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
+
+void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
+ unsigned int slot, bool enable)
+{
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+ void __iomem *muxaddr;
+ unsigned int chans_per_mux, ch_off;
+
+ chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
+ ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+ muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
+ slot = EDMAMUX_CHCFG_SOURCE(slot);
+
+ if (enable)
+ iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
+ else
+ iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
+
+static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
+{
+ switch (addr_width) {
+ case 1:
+ return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
+ case 2:
+ return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
+ case 4:
+ return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+ case 8:
+ return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
+ default:
+ return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+ }
+}
+
+void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct fsl_edma_desc *fsl_desc;
+ int i;
+
+ fsl_desc = to_fsl_edma_desc(vdesc);
+ for (i = 0; i < fsl_desc->n_tcds; i++)
+ dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
+ kfree(fsl_desc);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
+
+int fsl_edma_terminate_all(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_edma_disable_request(fsl_chan);
+ fsl_chan->edesc = NULL;
+ fsl_chan->idle = true;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
+
+int fsl_edma_pause(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->edesc) {
+ fsl_edma_disable_request(fsl_chan);
+ fsl_chan->status = DMA_PAUSED;
+ fsl_chan->idle = true;
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_pause);
+
+int fsl_edma_resume(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->edesc) {
+ fsl_edma_enable_request(fsl_chan);
+ fsl_chan->status = DMA_IN_PROGRESS;
+ fsl_chan->idle = false;
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_resume);
+
+int fsl_edma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+ memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
+
+static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
+ struct virt_dma_desc *vdesc, bool in_progress)
+{
+ struct fsl_edma_desc *edesc = fsl_chan->edesc;
+ struct edma_regs *regs = &fsl_chan->edma->regs;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+ enum dma_transfer_direction dir = edesc->dirn;
+ dma_addr_t cur_addr, dma_addr;
+ size_t len, size;
+ int i;
+
+ /* calculate the total size in this desc */
+ for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
+ len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+
+ if (!in_progress)
+ return len;
+
+ if (dir == DMA_MEM_TO_DEV)
+ cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
+ else
+ cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
+
+ /* figure out the finished and calculate the residue */
+ for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
+ size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ if (dir == DMA_MEM_TO_DEV)
+ dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
+ else
+ dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
+
+ len -= size;
+ if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
+ len += dma_addr + size - cur_addr;
+ break;
+ }
+ }
+
+ return len;
+}
+
+enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ if (status == DMA_COMPLETE)
+ return status;
+
+ if (!txstate)
+ return fsl_chan->status;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
+ if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
+ txstate->residue =
+ fsl_edma_desc_residue(fsl_chan, vdesc, true);
+ else if (vdesc)
+ txstate->residue =
+ fsl_edma_desc_residue(fsl_chan, vdesc, false);
+ else
+ txstate->residue = 0;
+
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ return fsl_chan->status;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
+
+static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
+ struct fsl_edma_hw_tcd *tcd)
+{
+ struct fsl_edma_engine *edma = fsl_chan->edma;
+ struct edma_regs *regs = &fsl_chan->edma->regs;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ /*
+ * TCD parameters are stored in struct fsl_edma_hw_tcd in little
+ * endian format. However, we need to load the TCD registers in
+ * big- or little-endian obeying the eDMA engine model endian.
+ */
+ edma_writew(edma, 0, &regs->tcd[ch].csr);
+ edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
+ edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
+
+ edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
+ edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
+
+ edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
+ edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
+
+ edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
+ edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
+ edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
+
+ edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
+ &regs->tcd[ch].dlast_sga);
+
+ edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
+}
+
+static inline
+void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+ u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
+ u16 biter, u16 doff, u32 dlast_sga, bool major_int,
+ bool disable_req, bool enable_sg)
+{
+ u16 csr = 0;
+
+ /*
+ * eDMA hardware SGs require the TCDs to be stored in little
+ * endian format irrespective of the register endian model.
+ * So we put the value in little endian in memory, waiting
+ * for fsl_edma_set_tcd_regs doing the swap.
+ */
+ tcd->saddr = cpu_to_le32(src);
+ tcd->daddr = cpu_to_le32(dst);
+
+ tcd->attr = cpu_to_le16(attr);
+
+ tcd->soff = cpu_to_le16(soff);
+
+ tcd->nbytes = cpu_to_le32(nbytes);
+ tcd->slast = cpu_to_le32(slast);
+
+ tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
+ tcd->doff = cpu_to_le16(doff);
+
+ tcd->dlast_sga = cpu_to_le32(dlast_sga);
+
+ tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
+ if (major_int)
+ csr |= EDMA_TCD_CSR_INT_MAJOR;
+
+ if (disable_req)
+ csr |= EDMA_TCD_CSR_D_REQ;
+
+ if (enable_sg)
+ csr |= EDMA_TCD_CSR_E_SG;
+
+ tcd->csr = cpu_to_le16(csr);
+}
+
+static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
+ int sg_len)
+{
+ struct fsl_edma_desc *fsl_desc;
+ int i;
+
+ fsl_desc = kzalloc(sizeof(*fsl_desc) +
+ sizeof(struct fsl_edma_sw_tcd) *
+ sg_len, GFP_NOWAIT);
+ if (!fsl_desc)
+ return NULL;
+
+ fsl_desc->echan = fsl_chan;
+ fsl_desc->n_tcds = sg_len;
+ for (i = 0; i < sg_len; i++) {
+ fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
+ GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
+ if (!fsl_desc->tcd[i].vtcd)
+ goto err;
+ }
+ return fsl_desc;
+
+err:
+ while (--i >= 0)
+ dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
+ kfree(fsl_desc);
+ return NULL;
+}
+
+struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct fsl_edma_desc *fsl_desc;
+ dma_addr_t dma_buf_next;
+ int sg_len, i;
+ u32 src_addr, dst_addr, last_sg, nbytes;
+ u16 soff, doff, iter;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ sg_len = buf_len / period_len;
+ fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = true;
+ fsl_desc->dirn = direction;
+
+ dma_buf_next = dma_addr;
+ if (direction == DMA_MEM_TO_DEV) {
+ fsl_chan->attr =
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
+ nbytes = fsl_chan->cfg.dst_addr_width *
+ fsl_chan->cfg.dst_maxburst;
+ } else {
+ fsl_chan->attr =
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
+ nbytes = fsl_chan->cfg.src_addr_width *
+ fsl_chan->cfg.src_maxburst;
+ }
+
+ iter = period_len / nbytes;
+
+ for (i = 0; i < sg_len; i++) {
+ if (dma_buf_next >= dma_addr + buf_len)
+ dma_buf_next = dma_addr;
+
+ /* get next sg's physical address */
+ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ src_addr = dma_buf_next;
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = fsl_chan->cfg.dst_addr_width;
+ doff = 0;
+ } else {
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = dma_buf_next;
+ soff = 0;
+ doff = fsl_chan->cfg.src_addr_width;
+ }
+
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
+ fsl_chan->attr, soff, nbytes, 0, iter,
+ iter, doff, last_sg, true, false, true);
+ dma_buf_next += period_len;
+ }
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
+
+struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct fsl_edma_desc *fsl_desc;
+ struct scatterlist *sg;
+ u32 src_addr, dst_addr, last_sg, nbytes;
+ u16 soff, doff, iter;
+ int i;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = false;
+ fsl_desc->dirn = direction;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ fsl_chan->attr =
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
+ nbytes = fsl_chan->cfg.dst_addr_width *
+ fsl_chan->cfg.dst_maxburst;
+ } else {
+ fsl_chan->attr =
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
+ nbytes = fsl_chan->cfg.src_addr_width *
+ fsl_chan->cfg.src_maxburst;
+ }
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ /* get next sg's physical address */
+ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ src_addr = sg_dma_address(sg);
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = fsl_chan->cfg.dst_addr_width;
+ doff = 0;
+ } else {
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = sg_dma_address(sg);
+ soff = 0;
+ doff = fsl_chan->cfg.src_addr_width;
+ }
+
+ iter = sg_dma_len(sg) / nbytes;
+ if (i < sg_len - 1) {
+ last_sg = fsl_desc->tcd[(i + 1)].ptcd;
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ dst_addr, fsl_chan->attr, soff,
+ nbytes, 0, iter, iter, doff, last_sg,
+ false, false, true);
+ } else {
+ last_sg = 0;
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ dst_addr, fsl_chan->attr, soff,
+ nbytes, 0, iter, iter, doff, last_sg,
+ true, true, false);
+ }
+ }
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
+
+void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
+{
+ struct virt_dma_desc *vdesc;
+
+ vdesc = vchan_next_desc(&fsl_chan->vchan);
+ if (!vdesc)
+ return;
+ fsl_chan->edesc = to_fsl_edma_desc(vdesc);
+ fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
+ fsl_edma_enable_request(fsl_chan);
+ fsl_chan->status = DMA_IN_PROGRESS;
+ fsl_chan->idle = false;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
+
+void fsl_edma_issue_pending(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+
+ if (unlikely(fsl_chan->pm_state != RUNNING)) {
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ /* cannot submit due to suspend */
+ return;
+ }
+
+ if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
+ fsl_edma_xfer_desc(fsl_chan);
+
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
+
+int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+ fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
+ sizeof(struct fsl_edma_hw_tcd),
+ 32, 0);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
+
+void fsl_edma_free_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_edma_disable_request(fsl_chan);
+ fsl_edma_chan_mux(fsl_chan, 0, false);
+ fsl_chan->edesc = NULL;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ dma_pool_destroy(fsl_chan->tcd_pool);
+ fsl_chan->tcd_pool = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
+
+void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
+{
+ struct fsl_edma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan,
+ &dmadev->channels, vchan.chan.device_node) {
+ list_del(&chan->vchan.chan.device_node);
+ tasklet_kill(&chan->vchan.task);
+ }
+}
+EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
+
+/*
+ * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
+ * register offsets are different compared to ColdFire mcf5441x 64 channels
+ * edma (here called "v2").
+ *
+ * This function sets up register offsets as per proper declared version
+ * so must be called in xxx_edma_probe() just after setting the
+ * edma "version" and "membase" appropriately.
+ */
+void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
+{
+ edma->regs.cr = edma->membase + EDMA_CR;
+ edma->regs.es = edma->membase + EDMA_ES;
+ edma->regs.erql = edma->membase + EDMA_ERQ;
+ edma->regs.eeil = edma->membase + EDMA_EEI;
+
+ edma->regs.serq = edma->membase + ((edma->version == v1) ?
+ EDMA_SERQ : EDMA64_SERQ);
+ edma->regs.cerq = edma->membase + ((edma->version == v1) ?
+ EDMA_CERQ : EDMA64_CERQ);
+ edma->regs.seei = edma->membase + ((edma->version == v1) ?
+ EDMA_SEEI : EDMA64_SEEI);
+ edma->regs.ceei = edma->membase + ((edma->version == v1) ?
+ EDMA_CEEI : EDMA64_CEEI);
+ edma->regs.cint = edma->membase + ((edma->version == v1) ?
+ EDMA_CINT : EDMA64_CINT);
+ edma->regs.cerr = edma->membase + ((edma->version == v1) ?
+ EDMA_CERR : EDMA64_CERR);
+ edma->regs.ssrt = edma->membase + ((edma->version == v1) ?
+ EDMA_SSRT : EDMA64_SSRT);
+ edma->regs.cdne = edma->membase + ((edma->version == v1) ?
+ EDMA_CDNE : EDMA64_CDNE);
+ edma->regs.intl = edma->membase + ((edma->version == v1) ?
+ EDMA_INTR : EDMA64_INTL);
+ edma->regs.errl = edma->membase + ((edma->version == v1) ?
+ EDMA_ERR : EDMA64_ERRL);
+
+ if (edma->version == v2) {
+ edma->regs.erqh = edma->membase + EDMA64_ERQH;
+ edma->regs.eeih = edma->membase + EDMA64_EEIH;
+ edma->regs.errh = edma->membase + EDMA64_ERRH;
+ edma->regs.inth = edma->membase + EDMA64_INTH;
+ }
+
+ edma->regs.tcd = edma->membase + EDMA_TCD;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
new file mode 100644
index 000000000000..8917e8865959
--- /dev/null
+++ b/drivers/dma/fsl-edma-common.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2013-2014 Freescale Semiconductor, Inc.
+ * Copyright 2018 Angelo Dureghello <angelo@sysam.it>
+ */
+#ifndef _FSL_EDMA_COMMON_H_
+#define _FSL_EDMA_COMMON_H_
+
+#include "virt-dma.h"
+
+#define EDMA_CR_EDBG BIT(1)
+#define EDMA_CR_ERCA BIT(2)
+#define EDMA_CR_ERGA BIT(3)
+#define EDMA_CR_HOE BIT(4)
+#define EDMA_CR_HALT BIT(5)
+#define EDMA_CR_CLM BIT(6)
+#define EDMA_CR_EMLM BIT(7)
+#define EDMA_CR_ECX BIT(16)
+#define EDMA_CR_CX BIT(17)
+
+#define EDMA_SEEI_SEEI(x) ((x) & GENMASK(4, 0))
+#define EDMA_CEEI_CEEI(x) ((x) & GENMASK(4, 0))
+#define EDMA_CINT_CINT(x) ((x) & GENMASK(4, 0))
+#define EDMA_CERR_CERR(x) ((x) & GENMASK(4, 0))
+
+#define EDMA_TCD_ATTR_DSIZE(x) (((x) & GENMASK(2, 0)))
+#define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
+#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
+#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
+#define EDMA_TCD_ATTR_DSIZE_8BIT 0
+#define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
+#define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
+#define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
+#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0))
+#define EDMA_TCD_ATTR_SSIZE_8BIT 0
+#define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
+#define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
+#define EDMA_TCD_ATTR_SSIZE_64BIT (EDMA_TCD_ATTR_DSIZE_64BIT << 8)
+#define EDMA_TCD_ATTR_SSIZE_32BYTE (EDMA_TCD_ATTR_DSIZE_32BYTE << 8)
+
+#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
+#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
+
+#define EDMA_TCD_CSR_START BIT(0)
+#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
+#define EDMA_TCD_CSR_INT_HALF BIT(2)
+#define EDMA_TCD_CSR_D_REQ BIT(3)
+#define EDMA_TCD_CSR_E_SG BIT(4)
+#define EDMA_TCD_CSR_E_LINK BIT(5)
+#define EDMA_TCD_CSR_ACTIVE BIT(6)
+#define EDMA_TCD_CSR_DONE BIT(7)
+
+#define EDMAMUX_CHCFG_DIS 0x0
+#define EDMAMUX_CHCFG_ENBL 0x80
+#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
+
+#define DMAMUX_NR 2
+
+#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+enum fsl_edma_pm_state {
+ RUNNING = 0,
+ SUSPENDED,
+};
+
+struct fsl_edma_hw_tcd {
+ __le32 saddr;
+ __le16 soff;
+ __le16 attr;
+ __le32 nbytes;
+ __le32 slast;
+ __le32 daddr;
+ __le16 doff;
+ __le16 citer;
+ __le32 dlast_sga;
+ __le16 csr;
+ __le16 biter;
+};
+
+/*
+ * These are iomem pointers, for both v32 and v64.
+ */
+struct edma_regs {
+ void __iomem *cr;
+ void __iomem *es;
+ void __iomem *erqh;
+ void __iomem *erql; /* aka erq on v32 */
+ void __iomem *eeih;
+ void __iomem *eeil; /* aka eei on v32 */
+ void __iomem *seei;
+ void __iomem *ceei;
+ void __iomem *serq;
+ void __iomem *cerq;
+ void __iomem *cint;
+ void __iomem *cerr;
+ void __iomem *ssrt;
+ void __iomem *cdne;
+ void __iomem *inth;
+ void __iomem *intl;
+ void __iomem *errh;
+ void __iomem *errl;
+ struct fsl_edma_hw_tcd __iomem *tcd;
+};
+
+struct fsl_edma_sw_tcd {
+ dma_addr_t ptcd;
+ struct fsl_edma_hw_tcd *vtcd;
+};
+
+struct fsl_edma_chan {
+ struct virt_dma_chan vchan;
+ enum dma_status status;
+ enum fsl_edma_pm_state pm_state;
+ bool idle;
+ u32 slave_id;
+ struct fsl_edma_engine *edma;
+ struct fsl_edma_desc *edesc;
+ struct dma_slave_config cfg;
+ u32 attr;
+ struct dma_pool *tcd_pool;
+};
+
+struct fsl_edma_desc {
+ struct virt_dma_desc vdesc;
+ struct fsl_edma_chan *echan;
+ bool iscyclic;
+ enum dma_transfer_direction dirn;
+ unsigned int n_tcds;
+ struct fsl_edma_sw_tcd tcd[];
+};
+
+enum edma_version {
+ v1, /* 32ch, Vybdir, mpc57x, etc */
+ v2, /* 64ch Coldfire */
+};
+
+struct fsl_edma_engine {
+ struct dma_device dma_dev;
+ void __iomem *membase;
+ void __iomem *muxbase[DMAMUX_NR];
+ struct clk *muxclk[DMAMUX_NR];
+ struct mutex fsl_edma_mutex;
+ u32 n_chans;
+ int txirq;
+ int errirq;
+ bool big_endian;
+ enum edma_version version;
+ struct edma_regs regs;
+ struct fsl_edma_chan chans[];
+};
+
+/*
+ * R/W functions for big- or little-endian registers:
+ * The eDMA controller's endian is independent of the CPU core's endian.
+ * For the big-endian IP module, the offset for 8-bit or 16-bit registers
+ * should also be swapped opposite to that in little-endian IP.
+ */
+static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+ if (edma->big_endian)
+ return ioread32be(addr);
+ else
+ return ioread32(addr);
+}
+
+static inline void edma_writeb(struct fsl_edma_engine *edma,
+ u8 val, void __iomem *addr)
+{
+ /* swap the reg offset for these in big-endian mode */
+ if (edma->big_endian)
+ iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
+ else
+ iowrite8(val, addr);
+}
+
+static inline void edma_writew(struct fsl_edma_engine *edma,
+ u16 val, void __iomem *addr)
+{
+ /* swap the reg offset for these in big-endian mode */
+ if (edma->big_endian)
+ iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
+ else
+ iowrite16(val, addr);
+}
+
+static inline void edma_writel(struct fsl_edma_engine *edma,
+ u32 val, void __iomem *addr)
+{
+ if (edma->big_endian)
+ iowrite32be(val, addr);
+ else
+ iowrite32(val, addr);
+}
+
+static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct fsl_edma_chan, vchan.chan);
+}
+
+static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct fsl_edma_desc, vdesc);
+}
+
+void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
+void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
+ unsigned int slot, bool enable);
+void fsl_edma_free_desc(struct virt_dma_desc *vdesc);
+int fsl_edma_terminate_all(struct dma_chan *chan);
+int fsl_edma_pause(struct dma_chan *chan);
+int fsl_edma_resume(struct dma_chan *chan);
+int fsl_edma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg);
+enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate);
+struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags);
+struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
+void fsl_edma_issue_pending(struct dma_chan *chan);
+int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
+void fsl_edma_free_chan_resources(struct dma_chan *chan);
+void fsl_edma_cleanup_vchan(struct dma_device *dmadev);
+void fsl_edma_setup_regs(struct fsl_edma_engine *edma);
+
+#endif /* _FSL_EDMA_COMMON_H_ */
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index c7568869284e..34d70112fcc9 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -13,671 +13,31 @@
* option) any later version.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_dma.h>
-#include "virt-dma.h"
-
-#define EDMA_CR 0x00
-#define EDMA_ES 0x04
-#define EDMA_ERQ 0x0C
-#define EDMA_EEI 0x14
-#define EDMA_SERQ 0x1B
-#define EDMA_CERQ 0x1A
-#define EDMA_SEEI 0x19
-#define EDMA_CEEI 0x18
-#define EDMA_CINT 0x1F
-#define EDMA_CERR 0x1E
-#define EDMA_SSRT 0x1D
-#define EDMA_CDNE 0x1C
-#define EDMA_INTR 0x24
-#define EDMA_ERR 0x2C
-
-#define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
-#define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
-#define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
-#define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
-#define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
-#define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
-#define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
-#define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
-#define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
-#define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
-#define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
-#define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
-#define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
-
-#define EDMA_CR_EDBG BIT(1)
-#define EDMA_CR_ERCA BIT(2)
-#define EDMA_CR_ERGA BIT(3)
-#define EDMA_CR_HOE BIT(4)
-#define EDMA_CR_HALT BIT(5)
-#define EDMA_CR_CLM BIT(6)
-#define EDMA_CR_EMLM BIT(7)
-#define EDMA_CR_ECX BIT(16)
-#define EDMA_CR_CX BIT(17)
-
-#define EDMA_SEEI_SEEI(x) ((x) & 0x1F)
-#define EDMA_CEEI_CEEI(x) ((x) & 0x1F)
-#define EDMA_CINT_CINT(x) ((x) & 0x1F)
-#define EDMA_CERR_CERR(x) ((x) & 0x1F)
-
-#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
-#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
-#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
-#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
-#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
-#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
-#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
-#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
-#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
-#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
-#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
-#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
-#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
-#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
-
-#define EDMA_TCD_SOFF_SOFF(x) (x)
-#define EDMA_TCD_NBYTES_NBYTES(x) (x)
-#define EDMA_TCD_SLAST_SLAST(x) (x)
-#define EDMA_TCD_DADDR_DADDR(x) (x)
-#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
-#define EDMA_TCD_DOFF_DOFF(x) (x)
-#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
-#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
-
-#define EDMA_TCD_CSR_START BIT(0)
-#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
-#define EDMA_TCD_CSR_INT_HALF BIT(2)
-#define EDMA_TCD_CSR_D_REQ BIT(3)
-#define EDMA_TCD_CSR_E_SG BIT(4)
-#define EDMA_TCD_CSR_E_LINK BIT(5)
-#define EDMA_TCD_CSR_ACTIVE BIT(6)
-#define EDMA_TCD_CSR_DONE BIT(7)
-
-#define EDMAMUX_CHCFG_DIS 0x0
-#define EDMAMUX_CHCFG_ENBL 0x80
-#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
-
-#define DMAMUX_NR 2
-
-#define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
-enum fsl_edma_pm_state {
- RUNNING = 0,
- SUSPENDED,
-};
-
-struct fsl_edma_hw_tcd {
- __le32 saddr;
- __le16 soff;
- __le16 attr;
- __le32 nbytes;
- __le32 slast;
- __le32 daddr;
- __le16 doff;
- __le16 citer;
- __le32 dlast_sga;
- __le16 csr;
- __le16 biter;
-};
-
-struct fsl_edma_sw_tcd {
- dma_addr_t ptcd;
- struct fsl_edma_hw_tcd *vtcd;
-};
-
-struct fsl_edma_slave_config {
- enum dma_transfer_direction dir;
- enum dma_slave_buswidth addr_width;
- u32 dev_addr;
- u32 burst;
- u32 attr;
-};
-
-struct fsl_edma_chan {
- struct virt_dma_chan vchan;
- enum dma_status status;
- enum fsl_edma_pm_state pm_state;
- bool idle;
- u32 slave_id;
- struct fsl_edma_engine *edma;
- struct fsl_edma_desc *edesc;
- struct fsl_edma_slave_config fsc;
- struct dma_pool *tcd_pool;
-};
-
-struct fsl_edma_desc {
- struct virt_dma_desc vdesc;
- struct fsl_edma_chan *echan;
- bool iscyclic;
- unsigned int n_tcds;
- struct fsl_edma_sw_tcd tcd[];
-};
-
-struct fsl_edma_engine {
- struct dma_device dma_dev;
- void __iomem *membase;
- void __iomem *muxbase[DMAMUX_NR];
- struct clk *muxclk[DMAMUX_NR];
- struct mutex fsl_edma_mutex;
- u32 n_chans;
- int txirq;
- int errirq;
- bool big_endian;
- struct fsl_edma_chan chans[];
-};
-
-/*
- * R/W functions for big- or little-endian registers:
- * The eDMA controller's endian is independent of the CPU core's endian.
- * For the big-endian IP module, the offset for 8-bit or 16-bit registers
- * should also be swapped opposite to that in little-endian IP.
- */
-
-static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
-{
- if (edma->big_endian)
- return ioread32be(addr);
- else
- return ioread32(addr);
-}
-
-static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
-{
- /* swap the reg offset for these in big-endian mode */
- if (edma->big_endian)
- iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
- else
- iowrite8(val, addr);
-}
-
-static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
-{
- /* swap the reg offset for these in big-endian mode */
- if (edma->big_endian)
- iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
- else
- iowrite16(val, addr);
-}
-
-static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
-{
- if (edma->big_endian)
- iowrite32be(val, addr);
- else
- iowrite32(val, addr);
-}
-
-static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct fsl_edma_chan, vchan.chan);
-}
-
-static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
-{
- return container_of(vd, struct fsl_edma_desc, vdesc);
-}
-
-static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
-{
- void __iomem *addr = fsl_chan->edma->membase;
- u32 ch = fsl_chan->vchan.chan.chan_id;
-
- edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
- edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
-}
-
-static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
-{
- void __iomem *addr = fsl_chan->edma->membase;
- u32 ch = fsl_chan->vchan.chan.chan_id;
-
- edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
- edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
-}
-
-static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
- unsigned int slot, bool enable)
-{
- u32 ch = fsl_chan->vchan.chan.chan_id;
- void __iomem *muxaddr;
- unsigned chans_per_mux, ch_off;
-
- chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
- ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
- muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
- slot = EDMAMUX_CHCFG_SOURCE(slot);
-
- if (enable)
- iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
- else
- iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
-}
-
-static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
-{
- switch (addr_width) {
- case 1:
- return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
- case 2:
- return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
- case 4:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- case 8:
- return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
- default:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- }
-}
-
-static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
-{
- struct fsl_edma_desc *fsl_desc;
- int i;
-
- fsl_desc = to_fsl_edma_desc(vdesc);
- for (i = 0; i < fsl_desc->n_tcds; i++)
- dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
- fsl_desc->tcd[i].ptcd);
- kfree(fsl_desc);
-}
-
-static int fsl_edma_terminate_all(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
- LIST_HEAD(head);
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- fsl_edma_disable_request(fsl_chan);
- fsl_chan->edesc = NULL;
- fsl_chan->idle = true;
- vchan_get_all_descriptors(&fsl_chan->vchan, &head);
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
- vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
- return 0;
-}
-
-static int fsl_edma_pause(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- if (fsl_chan->edesc) {
- fsl_edma_disable_request(fsl_chan);
- fsl_chan->status = DMA_PAUSED;
- fsl_chan->idle = true;
- }
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
- return 0;
-}
-
-static int fsl_edma_resume(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- if (fsl_chan->edesc) {
- fsl_edma_enable_request(fsl_chan);
- fsl_chan->status = DMA_IN_PROGRESS;
- fsl_chan->idle = false;
- }
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
- return 0;
-}
-
-static int fsl_edma_slave_config(struct dma_chan *chan,
- struct dma_slave_config *cfg)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
-
- fsl_chan->fsc.dir = cfg->direction;
- if (cfg->direction == DMA_DEV_TO_MEM) {
- fsl_chan->fsc.dev_addr = cfg->src_addr;
- fsl_chan->fsc.addr_width = cfg->src_addr_width;
- fsl_chan->fsc.burst = cfg->src_maxburst;
- fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
- } else if (cfg->direction == DMA_MEM_TO_DEV) {
- fsl_chan->fsc.dev_addr = cfg->dst_addr;
- fsl_chan->fsc.addr_width = cfg->dst_addr_width;
- fsl_chan->fsc.burst = cfg->dst_maxburst;
- fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
- } else {
- return -EINVAL;
- }
- return 0;
-}
-
-static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
- struct virt_dma_desc *vdesc, bool in_progress)
-{
- struct fsl_edma_desc *edesc = fsl_chan->edesc;
- void __iomem *addr = fsl_chan->edma->membase;
- u32 ch = fsl_chan->vchan.chan.chan_id;
- enum dma_transfer_direction dir = fsl_chan->fsc.dir;
- dma_addr_t cur_addr, dma_addr;
- size_t len, size;
- int i;
-
- /* calculate the total size in this desc */
- for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
- len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
-
- if (!in_progress)
- return len;
-
- if (dir == DMA_MEM_TO_DEV)
- cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
- else
- cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
-
- /* figure out the finished and calculate the residue */
- for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
- if (dir == DMA_MEM_TO_DEV)
- dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
- else
- dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
-
- len -= size;
- if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
- len += dma_addr + size - cur_addr;
- break;
- }
- }
-
- return len;
-}
-
-static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- struct virt_dma_desc *vdesc;
- enum dma_status status;
- unsigned long flags;
-
- status = dma_cookie_status(chan, cookie, txstate);
- if (status == DMA_COMPLETE)
- return status;
-
- if (!txstate)
- return fsl_chan->status;
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
- if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
- txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
- else if (vdesc)
- txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
- else
- txstate->residue = 0;
-
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-
- return fsl_chan->status;
-}
-
-static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
- struct fsl_edma_hw_tcd *tcd)
-{
- struct fsl_edma_engine *edma = fsl_chan->edma;
- void __iomem *addr = fsl_chan->edma->membase;
- u32 ch = fsl_chan->vchan.chan.chan_id;
-
- /*
- * TCD parameters are stored in struct fsl_edma_hw_tcd in little
- * endian format. However, we need to load the TCD registers in
- * big- or little-endian obeying the eDMA engine model endian.
- */
- edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch));
- edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch));
- edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch));
-
- edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch));
- edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch));
-
- edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch));
- edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch));
-
- edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch));
- edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch));
- edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch));
-
- edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch));
-
- edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch));
-}
-
-static inline
-void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
- u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
- u16 biter, u16 doff, u32 dlast_sga, bool major_int,
- bool disable_req, bool enable_sg)
-{
- u16 csr = 0;
-
- /*
- * eDMA hardware SGs require the TCDs to be stored in little
- * endian format irrespective of the register endian model.
- * So we put the value in little endian in memory, waiting
- * for fsl_edma_set_tcd_regs doing the swap.
- */
- tcd->saddr = cpu_to_le32(src);
- tcd->daddr = cpu_to_le32(dst);
-
- tcd->attr = cpu_to_le16(attr);
-
- tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
-
- tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
- tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
-
- tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
- tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
-
- tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
-
- tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
- if (major_int)
- csr |= EDMA_TCD_CSR_INT_MAJOR;
-
- if (disable_req)
- csr |= EDMA_TCD_CSR_D_REQ;
-
- if (enable_sg)
- csr |= EDMA_TCD_CSR_E_SG;
-
- tcd->csr = cpu_to_le16(csr);
-}
-
-static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
- int sg_len)
-{
- struct fsl_edma_desc *fsl_desc;
- int i;
-
- fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
- GFP_NOWAIT);
- if (!fsl_desc)
- return NULL;
-
- fsl_desc->echan = fsl_chan;
- fsl_desc->n_tcds = sg_len;
- for (i = 0; i < sg_len; i++) {
- fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
- GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
- if (!fsl_desc->tcd[i].vtcd)
- goto err;
- }
- return fsl_desc;
-
-err:
- while (--i >= 0)
- dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
- fsl_desc->tcd[i].ptcd);
- kfree(fsl_desc);
- return NULL;
-}
-
-static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
- struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- struct fsl_edma_desc *fsl_desc;
- dma_addr_t dma_buf_next;
- int sg_len, i;
- u32 src_addr, dst_addr, last_sg, nbytes;
- u16 soff, doff, iter;
-
- if (!is_slave_direction(fsl_chan->fsc.dir))
- return NULL;
-
- sg_len = buf_len / period_len;
- fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
- if (!fsl_desc)
- return NULL;
- fsl_desc->iscyclic = true;
-
- dma_buf_next = dma_addr;
- nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
- iter = period_len / nbytes;
-
- for (i = 0; i < sg_len; i++) {
- if (dma_buf_next >= dma_addr + buf_len)
- dma_buf_next = dma_addr;
-
- /* get next sg's physical address */
- last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
-
- if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
- src_addr = dma_buf_next;
- dst_addr = fsl_chan->fsc.dev_addr;
- soff = fsl_chan->fsc.addr_width;
- doff = 0;
- } else {
- src_addr = fsl_chan->fsc.dev_addr;
- dst_addr = dma_buf_next;
- soff = 0;
- doff = fsl_chan->fsc.addr_width;
- }
-
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
- fsl_chan->fsc.attr, soff, nbytes, 0, iter,
- iter, doff, last_sg, true, false, true);
- dma_buf_next += period_len;
- }
-
- return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
-}
-
-static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- struct fsl_edma_desc *fsl_desc;
- struct scatterlist *sg;
- u32 src_addr, dst_addr, last_sg, nbytes;
- u16 soff, doff, iter;
- int i;
-
- if (!is_slave_direction(fsl_chan->fsc.dir))
- return NULL;
-
- fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
- if (!fsl_desc)
- return NULL;
- fsl_desc->iscyclic = false;
-
- nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
- for_each_sg(sgl, sg, sg_len, i) {
- /* get next sg's physical address */
- last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
-
- if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
- src_addr = sg_dma_address(sg);
- dst_addr = fsl_chan->fsc.dev_addr;
- soff = fsl_chan->fsc.addr_width;
- doff = 0;
- } else {
- src_addr = fsl_chan->fsc.dev_addr;
- dst_addr = sg_dma_address(sg);
- soff = 0;
- doff = fsl_chan->fsc.addr_width;
- }
-
- iter = sg_dma_len(sg) / nbytes;
- if (i < sg_len - 1) {
- last_sg = fsl_desc->tcd[(i + 1)].ptcd;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
- dst_addr, fsl_chan->fsc.attr, soff,
- nbytes, 0, iter, iter, doff, last_sg,
- false, false, true);
- } else {
- last_sg = 0;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
- dst_addr, fsl_chan->fsc.attr, soff,
- nbytes, 0, iter, iter, doff, last_sg,
- true, true, false);
- }
- }
-
- return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
-}
-
-static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
-{
- struct virt_dma_desc *vdesc;
-
- vdesc = vchan_next_desc(&fsl_chan->vchan);
- if (!vdesc)
- return;
- fsl_chan->edesc = to_fsl_edma_desc(vdesc);
- fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
- fsl_edma_enable_request(fsl_chan);
- fsl_chan->status = DMA_IN_PROGRESS;
- fsl_chan->idle = false;
-}
+#include "fsl-edma-common.h"
static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int intr, ch;
- void __iomem *base_addr;
+ struct edma_regs *regs = &fsl_edma->regs;
struct fsl_edma_chan *fsl_chan;
- base_addr = fsl_edma->membase;
-
- intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
+ intr = edma_readl(fsl_edma, regs->intl);
if (!intr)
return IRQ_NONE;
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (intr & (0x1 << ch)) {
- edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
- base_addr + EDMA_CINT);
+ edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
fsl_chan = &fsl_edma->chans[ch];
@@ -705,16 +65,16 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int err, ch;
+ struct edma_regs *regs = &fsl_edma->regs;
- err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
+ err = edma_readl(fsl_edma, regs->errl);
if (!err)
return IRQ_NONE;
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (err & (0x1 << ch)) {
fsl_edma_disable_request(&fsl_edma->chans[ch]);
- edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
- fsl_edma->membase + EDMA_CERR);
+ edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
fsl_edma->chans[ch].status = DMA_ERROR;
fsl_edma->chans[ch].idle = true;
}
@@ -730,25 +90,6 @@ static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
return fsl_edma_err_handler(irq, dev_id);
}
-static void fsl_edma_issue_pending(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
-
- if (unlikely(fsl_chan->pm_state != RUNNING)) {
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
- /* cannot submit due to suspend */
- return;
- }
-
- if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
- fsl_edma_xfer_desc(fsl_chan);
-
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-}
-
static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
@@ -781,34 +122,6 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
-static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
-
- fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
- sizeof(struct fsl_edma_hw_tcd),
- 32, 0);
- return 0;
-}
-
-static void fsl_edma_free_chan_resources(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
- LIST_HEAD(head);
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- fsl_edma_disable_request(fsl_chan);
- fsl_edma_chan_mux(fsl_chan, 0, false);
- fsl_chan->edesc = NULL;
- vchan_get_all_descriptors(&fsl_chan->vchan, &head);
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-
- vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
- dma_pool_destroy(fsl_chan->tcd_pool);
- fsl_chan->tcd_pool = NULL;
-}
-
static int
fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
@@ -876,6 +189,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma;
struct fsl_edma_chan *fsl_chan;
+ struct edma_regs *regs;
struct resource *res;
int len, chans;
int ret, i;
@@ -891,6 +205,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (!fsl_edma)
return -ENOMEM;
+ fsl_edma->version = v1;
fsl_edma->n_chans = chans;
mutex_init(&fsl_edma->fsl_edma_mutex);
@@ -899,6 +214,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
+ fsl_edma_setup_regs(fsl_edma);
+ regs = &fsl_edma->regs;
+
for (i = 0; i < DMAMUX_NR; i++) {
char clkname[32];
@@ -939,11 +257,11 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
- edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+ edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
}
- edma_writel(fsl_edma, ~0, fsl_edma->membase + EDMA_INTR);
+ edma_writel(fsl_edma, ~0, regs->intl);
ret = fsl_edma_irq_init(pdev, fsl_edma);
if (ret)
return ret;
@@ -990,22 +308,11 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
/* enable round robin arbitration */
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
-static void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
-{
- struct fsl_edma_chan *chan, *_chan;
-
- list_for_each_entry_safe(chan, _chan,
- &dmadev->channels, vchan.chan.device_node) {
- list_del(&chan->vchan.chan.device_node);
- tasklet_kill(&chan->vchan.task);
- }
-}
-
static int fsl_edma_remove(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1048,18 +355,18 @@ static int fsl_edma_resume_early(struct device *dev)
{
struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
struct fsl_edma_chan *fsl_chan;
+ struct edma_regs *regs = &fsl_edma->regs;
int i;
for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i];
fsl_chan->pm_state = RUNNING;
- edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+ edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
if (fsl_chan->slave_id != 0)
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
}
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA,
- fsl_edma->membase + EDMA_CR);
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 1117b5123a6f..9d360a3fbae3 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -987,7 +987,7 @@ static void dma_do_tasklet(unsigned long data)
chan_dbg(chan, "tasklet entry\n");
- spin_lock_bh(&chan->desc_lock);
+ spin_lock(&chan->desc_lock);
/* the hardware is now idle and ready for more */
chan->idle = true;
@@ -995,7 +995,7 @@ static void dma_do_tasklet(unsigned long data)
/* Run all cleanup for descriptors which have been completed */
fsldma_cleanup_descriptors(chan);
- spin_unlock_bh(&chan->desc_lock);
+ spin_unlock(&chan->desc_lock);
chan_dbg(chan, "tasklet exit\n");
}
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 202ffa9f7611..e06f20272fd7 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -348,10 +348,6 @@ static int hsu_dma_slave_config(struct dma_chan *chan,
{
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
- /* Check if chan will be configured for slave transfers */
- if (!is_slave_direction(config->direction))
- return -EINVAL;
-
memcpy(&hsuc->config, config, sizeof(hsuc->config));
return 0;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 1fbf9cb9b742..0baf9797cc09 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -142,9 +142,8 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
{
struct idma64_chan *idma64c = &idma64->chan[c];
struct idma64_desc *desc;
- unsigned long flags;
- spin_lock_irqsave(&idma64c->vchan.lock, flags);
+ spin_lock(&idma64c->vchan.lock);
desc = idma64c->desc;
if (desc) {
if (status_err & (1 << c)) {
@@ -161,7 +160,7 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
if (idma64c->desc == NULL || desc->status == DMA_ERROR)
idma64_stop_transfer(idma64c);
}
- spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+ spin_unlock(&idma64c->vchan.lock);
}
static irqreturn_t idma64_irq(int irq, void *dev)
@@ -408,10 +407,6 @@ static int idma64_slave_config(struct dma_chan *chan,
{
struct idma64_chan *idma64c = to_idma64_chan(chan);
- /* Check if chan will be configured for slave transfers */
- if (!is_slave_direction(config->direction))
- return -EINVAL;
-
memcpy(&idma64c->config, config, sizeof(idma64c->config));
convert_burst(&idma64c->config.src_maxburst);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 75b6ff0415ee..c2fff3f6c9ca 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -162,6 +162,7 @@ struct imxdma_channel {
bool enabled_2d;
int slot_2d;
unsigned int irq;
+ struct dma_slave_config config;
};
enum imx_dma_type {
@@ -675,14 +676,15 @@ static int imxdma_terminate_all(struct dma_chan *chan)
return 0;
}
-static int imxdma_config(struct dma_chan *chan,
- struct dma_slave_config *dmaengine_cfg)
+static int imxdma_config_write(struct dma_chan *chan,
+ struct dma_slave_config *dmaengine_cfg,
+ enum dma_transfer_direction direction)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
unsigned int mode = 0;
- if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+ if (direction == DMA_DEV_TO_MEM) {
imxdmac->per_address = dmaengine_cfg->src_addr;
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
imxdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -723,6 +725,16 @@ static int imxdma_config(struct dma_chan *chan,
return 0;
}
+static int imxdma_config(struct dma_chan *chan,
+ struct dma_slave_config *dmaengine_cfg)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+
+ memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg));
+
+ return 0;
+}
+
static enum dma_status imxdma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
@@ -905,6 +917,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
desc->desc.callback = NULL;
desc->desc.callback_param = NULL;
+ imxdma_config_write(chan, &imxdmac->config, direction);
+
return &desc->desc;
}
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 4fa4c06c9edb..2d810dfcdc48 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -129,7 +129,7 @@ static void
ioat_init_channel(struct ioatdma_device *ioat_dma,
struct ioatdma_chan *ioat_chan, int idx);
static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
-static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
+static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
static int ioat_dca_enabled = 1;
@@ -575,7 +575,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
* ioat_enumerate_channels - find and initialize the device's channels
* @ioat_dma: the ioat dma device to be enumerated
*/
-static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
+static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
{
struct ioatdma_chan *ioat_chan;
struct device *dev = &ioat_dma->pdev->dev;
@@ -594,7 +594,7 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_log &= 0x1f; /* bits [4:0] valid */
if (xfercap_log == 0)
- return 0;
+ return;
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) {
@@ -611,7 +611,6 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
}
}
dma->chancnt = i;
- return i;
}
/**
@@ -1205,8 +1204,15 @@ static void ioat_shutdown(struct pci_dev *pdev)
spin_lock_bh(&ioat_chan->prep_lock);
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- del_timer_sync(&ioat_chan->timer);
spin_unlock_bh(&ioat_chan->prep_lock);
+ /*
+ * Synchronization rule for del_timer_sync():
+ * - The caller must not hold locks which would prevent
+ * completion of the timer's handler.
+ * So prep_lock cannot be held before calling it.
+ */
+ del_timer_sync(&ioat_chan->timer);
+
/* this should quiesce then reset */
ioat_reset_hw(ioat_chan);
}
@@ -1252,7 +1258,6 @@ static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
{
pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
- int err;
dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
@@ -1267,12 +1272,6 @@ static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "AER uncorrect error status clear failed: %#x\n", err);
- }
-
return result;
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 6bfa217ed6d0..fdec2b6cfbb0 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -87,10 +87,10 @@ struct k3_dma_chan {
struct virt_dma_chan vc;
struct k3_dma_phy *phy;
struct list_head node;
- enum dma_transfer_direction dir;
dma_addr_t dev_addr;
enum dma_status status;
bool cyclic;
+ struct dma_slave_config slave_config;
};
struct k3_dma_phy {
@@ -118,6 +118,10 @@ struct k3_dma_dev {
#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
+static int k3_dma_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *cfg);
+
static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
{
return container_of(chan, struct k3_dma_chan, vc.chan);
@@ -501,14 +505,8 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
copy = min_t(size_t, len, DMA_MAX_SIZE);
k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
- if (c->dir == DMA_MEM_TO_DEV) {
- src += copy;
- } else if (c->dir == DMA_DEV_TO_MEM) {
- dst += copy;
- } else {
- src += copy;
- dst += copy;
- }
+ src += copy;
+ dst += copy;
len -= copy;
} while (len);
@@ -542,6 +540,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
if (!ds)
return NULL;
num = 0;
+ k3_dma_config_write(chan, dir, &c->slave_config);
for_each_sg(sgl, sg, sglen, i) {
addr = sg_dma_address(sg);
@@ -602,6 +601,7 @@ k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
avail = buf_len;
total = avail;
num = 0;
+ k3_dma_config_write(chan, dir, &c->slave_config);
if (period_len < modulo)
modulo = period_len;
@@ -642,18 +642,26 @@ static int k3_dma_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
struct k3_dma_chan *c = to_k3_chan(chan);
+
+ memcpy(&c->slave_config, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+static int k3_dma_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *cfg)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
u32 maxburst = 0, val = 0;
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
- if (cfg == NULL)
- return -EINVAL;
- c->dir = cfg->direction;
- if (c->dir == DMA_DEV_TO_MEM) {
+ if (dir == DMA_DEV_TO_MEM) {
c->ccfg = CX_CFG_DSTINCR;
c->dev_addr = cfg->src_addr;
maxburst = cfg->src_maxburst;
width = cfg->src_addr_width;
- } else if (c->dir == DMA_MEM_TO_DEV) {
+ } else if (dir == DMA_MEM_TO_DEV) {
c->ccfg = CX_CFG_SRCINCR;
c->dev_addr = cfg->dst_addr;
maxburst = cfg->dst_maxburst;
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
new file mode 100644
index 000000000000..5de1b07eddff
--- /dev/null
+++ b/drivers/dma/mcf-edma.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
+// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/dma-mcf-edma.h>
+
+#include "fsl-edma-common.h"
+
+#define EDMA_CHANNELS 64
+#define EDMA_MASK_CH(x) ((x) & GENMASK(5, 0))
+
+static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *mcf_edma = dev_id;
+ struct edma_regs *regs = &mcf_edma->regs;
+ unsigned int ch;
+ struct fsl_edma_chan *mcf_chan;
+ u64 intmap;
+
+ intmap = ioread32(regs->inth);
+ intmap <<= 32;
+ intmap |= ioread32(regs->intl);
+ if (!intmap)
+ return IRQ_NONE;
+
+ for (ch = 0; ch < mcf_edma->n_chans; ch++) {
+ if (intmap & BIT(ch)) {
+ iowrite8(EDMA_MASK_CH(ch), regs->cint);
+
+ mcf_chan = &mcf_edma->chans[ch];
+
+ spin_lock(&mcf_chan->vchan.lock);
+ if (!mcf_chan->edesc->iscyclic) {
+ list_del(&mcf_chan->edesc->vdesc.node);
+ vchan_cookie_complete(&mcf_chan->edesc->vdesc);
+ mcf_chan->edesc = NULL;
+ mcf_chan->status = DMA_COMPLETE;
+ mcf_chan->idle = true;
+ } else {
+ vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
+ }
+
+ if (!mcf_chan->edesc)
+ fsl_edma_xfer_desc(mcf_chan);
+
+ spin_unlock(&mcf_chan->vchan.lock);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *mcf_edma = dev_id;
+ struct edma_regs *regs = &mcf_edma->regs;
+ unsigned int err, ch;
+
+ err = ioread32(regs->errl);
+ if (!err)
+ return IRQ_NONE;
+
+ for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
+ if (err & BIT(ch)) {
+ fsl_edma_disable_request(&mcf_edma->chans[ch]);
+ iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
+ mcf_edma->chans[ch].status = DMA_ERROR;
+ mcf_edma->chans[ch].idle = true;
+ }
+ }
+
+ err = ioread32(regs->errh);
+ if (!err)
+ return IRQ_NONE;
+
+ for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
+ if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
+ fsl_edma_disable_request(&mcf_edma->chans[ch]);
+ iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
+ mcf_edma->chans[ch].status = DMA_ERROR;
+ mcf_edma->chans[ch].idle = true;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mcf_edma_irq_init(struct platform_device *pdev,
+ struct fsl_edma_engine *mcf_edma)
+{
+ int ret = 0, i;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "edma-tx-00-15");
+ if (!res)
+ return -1;
+
+ for (ret = 0, i = res->start; i <= res->end; ++i)
+ ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "edma-tx-16-55");
+ if (!res)
+ return -1;
+
+ for (ret = 0, i = res->start; i <= res->end; ++i)
+ ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
+ if (ret)
+ return ret;
+
+ ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
+ if (ret != -ENXIO) {
+ ret = request_irq(ret, mcf_edma_tx_handler,
+ 0, "eDMA", mcf_edma);
+ if (ret)
+ return ret;
+ }
+
+ ret = platform_get_irq_byname(pdev, "edma-err");
+ if (ret != -ENXIO) {
+ ret = request_irq(ret, mcf_edma_err_handler,
+ 0, "eDMA", mcf_edma);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mcf_edma_irq_free(struct platform_device *pdev,
+ struct fsl_edma_engine *mcf_edma)
+{
+ int irq;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "edma-tx-00-15");
+ if (res) {
+ for (irq = res->start; irq <= res->end; irq++)
+ free_irq(irq, mcf_edma);
+ }
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "edma-tx-16-55");
+ if (res) {
+ for (irq = res->start; irq <= res->end; irq++)
+ free_irq(irq, mcf_edma);
+ }
+
+ irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
+ if (irq != -ENXIO)
+ free_irq(irq, mcf_edma);
+
+ irq = platform_get_irq_byname(pdev, "edma-err");
+ if (irq != -ENXIO)
+ free_irq(irq, mcf_edma);
+}
+
+static int mcf_edma_probe(struct platform_device *pdev)
+{
+ struct mcf_edma_platform_data *pdata;
+ struct fsl_edma_engine *mcf_edma;
+ struct fsl_edma_chan *mcf_chan;
+ struct edma_regs *regs;
+ struct resource *res;
+ int ret, i, len, chans;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ return -EINVAL;
+ }
+
+ chans = pdata->dma_channels;
+ len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
+ mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!mcf_edma)
+ return -ENOMEM;
+
+ mcf_edma->n_chans = chans;
+
+ /* Set up version for ColdFire edma */
+ mcf_edma->version = v2;
+ mcf_edma->big_endian = 1;
+
+ if (!mcf_edma->n_chans) {
+ dev_info(&pdev->dev, "setting default channel number to 64");
+ mcf_edma->n_chans = 64;
+ }
+
+ mutex_init(&mcf_edma->fsl_edma_mutex);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcf_edma->membase))
+ return PTR_ERR(mcf_edma->membase);
+
+ fsl_edma_setup_regs(mcf_edma);
+ regs = &mcf_edma->regs;
+
+ INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
+ for (i = 0; i < mcf_edma->n_chans; i++) {
+ struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
+
+ mcf_chan->edma = mcf_edma;
+ mcf_chan->slave_id = i;
+ mcf_chan->idle = true;
+ mcf_chan->vchan.desc_free = fsl_edma_free_desc;
+ vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
+ iowrite32(0x0, &regs->tcd[i].csr);
+ }
+
+ iowrite32(~0, regs->inth);
+ iowrite32(~0, regs->intl);
+
+ ret = mcf_edma_irq_init(pdev, mcf_edma);
+ if (ret)
+ return ret;
+
+ dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
+ dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
+
+ mcf_edma->dma_dev.dev = &pdev->dev;
+ mcf_edma->dma_dev.device_alloc_chan_resources =
+ fsl_edma_alloc_chan_resources;
+ mcf_edma->dma_dev.device_free_chan_resources =
+ fsl_edma_free_chan_resources;
+ mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
+ mcf_edma->dma_dev.device_prep_dma_cyclic =
+ fsl_edma_prep_dma_cyclic;
+ mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
+ mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
+ mcf_edma->dma_dev.device_pause = fsl_edma_pause;
+ mcf_edma->dma_dev.device_resume = fsl_edma_resume;
+ mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
+ mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
+
+ mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
+ mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
+ mcf_edma->dma_dev.directions =
+ BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+ mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
+ mcf_edma->dma_dev.filter.map = pdata->slave_map;
+ mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
+
+ platform_set_drvdata(pdev, mcf_edma);
+
+ ret = dma_async_device_register(&mcf_edma->dma_dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't register Freescale eDMA engine. (%d)\n", ret);
+ return ret;
+ }
+
+ /* Enable round robin arbitration */
+ iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
+
+ return 0;
+}
+
+static int mcf_edma_remove(struct platform_device *pdev)
+{
+ struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
+
+ mcf_edma_irq_free(pdev, mcf_edma);
+ fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
+ dma_async_device_unregister(&mcf_edma->dma_dev);
+
+ return 0;
+}
+
+static struct platform_driver mcf_edma_driver = {
+ .driver = {
+ .name = "mcf-edma",
+ },
+ .probe = mcf_edma_probe,
+ .remove = mcf_edma_remove,
+};
+
+bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
+{
+ if (chan->device->dev->driver == &mcf_edma_driver.driver) {
+ struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
+
+ return (mcf_chan->slave_id == (uintptr_t)param);
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(mcf_edma_filter_fn);
+
+static int __init mcf_edma_init(void)
+{
+ return platform_driver_register(&mcf_edma_driver);
+}
+subsys_initcall(mcf_edma_init);
+
+static void __exit mcf_edma_exit(void)
+{
+ platform_driver_unregister(&mcf_edma_driver);
+}
+module_exit(mcf_edma_exit);
+
+MODULE_ALIAS("platform:mcf-edma");
+MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 13c68b6434ce..0c56faa03e9a 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -116,6 +116,7 @@ struct mmp_tdma_chan {
u32 burst_sz;
enum dma_slave_buswidth buswidth;
enum dma_status status;
+ struct dma_slave_config slave_config;
int idx;
enum mmp_tdma_type type;
@@ -139,6 +140,10 @@ struct mmp_tdma_device {
#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
+static int mmp_tdma_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *dmaengine_cfg);
+
static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
{
writel(phys, tdmac->reg_base + TDNDPR);
@@ -442,6 +447,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
if (!desc)
goto err_out;
+ mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
+
while (buf < buf_len) {
desc = &tdmac->desc_arr[i];
@@ -495,7 +502,18 @@ static int mmp_tdma_config(struct dma_chan *chan,
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
- if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+ memcpy(&tdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
+
+ return 0;
+}
+
+static int mmp_tdma_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *dmaengine_cfg)
+{
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
+ if (dir == DMA_DEV_TO_MEM) {
tdmac->dev_addr = dmaengine_cfg->src_addr;
tdmac->burst_sz = dmaengine_cfg->src_maxburst;
tdmac->buswidth = dmaengine_cfg->src_addr_width;
@@ -504,7 +522,7 @@ static int mmp_tdma_config(struct dma_chan *chan,
tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
tdmac->buswidth = dmaengine_cfg->dst_addr_width;
}
- tdmac->dir = dmaengine_cfg->direction;
+ tdmac->dir = dir;
return mmp_tdma_config_chan(chan);
}
@@ -530,9 +548,6 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
static int mmp_tdma_remove(struct platform_device *pdev)
{
- struct mmp_tdma_device *tdev = platform_get_drvdata(pdev);
-
- dma_async_device_unregister(&tdev->device);
return 0;
}
@@ -696,7 +711,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
platform_set_drvdata(pdev, tdev);
- ret = dma_async_device_register(&tdev->device);
+ ret = dmaenginem_async_device_register(&tdev->device);
if (ret) {
dev_err(tdev->device.dev, "unable to register\n");
return ret;
@@ -708,7 +723,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
if (ret) {
dev_err(tdev->device.dev,
"failed to register controller\n");
- dma_async_device_unregister(&tdev->device);
+ return ret;
}
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 969534c1a6c6..7f595355fb79 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -348,9 +348,9 @@ static void mv_xor_tasklet(unsigned long data)
{
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
- spin_lock_bh(&chan->lock);
+ spin_lock(&chan->lock);
mv_chan_slot_cleanup(chan);
- spin_unlock_bh(&chan->lock);
+ spin_unlock(&chan->lock);
}
static struct mv_xor_desc_slot *
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index ae5182ff0128..35193b31a9e0 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -847,7 +847,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
- ret = dma_async_device_register(&mxs_dma->dma_device);
+ ret = dmaenginem_async_device_register(&mxs_dma->dma_device);
if (ret) {
dev_err(mxs_dma->dma_device.dev, "unable to register\n");
return ret;
@@ -857,7 +857,6 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
if (ret) {
dev_err(mxs_dma->dma_device.dev,
"failed to register controller\n");
- dma_async_device_unregister(&mxs_dma->dma_device);
}
dev_info(mxs_dma->dma_device.dev, "initialized\n");
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 8c7b2e8703da..a67b292190f4 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/bitmap.h>
@@ -1095,8 +1092,8 @@ static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
if (!dchan)
return NULL;
- dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__,
- dma_spec->np->name);
+ dev_dbg(dchan->device->dev, "Entry %s(%pOFn)\n", __func__,
+ dma_spec->np);
chan = nbpf_to_chan(dchan);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 7812a6338acd..90bbcef99ef8 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -21,6 +21,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/slab.h>
#include "virt-dma.h"
@@ -161,10 +162,12 @@ struct owl_dma_lli {
* struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
* @vd: virtual DMA descriptor
* @lli_list: link list of lli nodes
+ * @cyclic: flag to indicate cyclic transfers
*/
struct owl_dma_txd {
struct virt_dma_desc vd;
struct list_head lli_list;
+ bool cyclic;
};
/**
@@ -186,11 +189,15 @@ struct owl_dma_pchan {
* @vc: wrappped virtual channel
* @pchan: the physical channel utilized by this channel
* @txd: active transaction on this channel
+ * @cfg: slave configuration for this channel
+ * @drq: physical DMA request ID for this channel
*/
struct owl_dma_vchan {
struct virt_dma_chan vc;
struct owl_dma_pchan *pchan;
struct owl_dma_txd *txd;
+ struct dma_slave_config cfg;
+ u8 drq;
};
/**
@@ -200,6 +207,7 @@ struct owl_dma_vchan {
* @clk: clock for the DMA controller
* @lock: a lock to use when change DMA controller global register
* @lli_pool: a pool for the LLI descriptors
+ * @irq: interrupt ID for the DMA controller
* @nr_pchans: the number of physical channels
* @pchans: array of data for the physical channels
* @nr_vchans: the number of physical channels
@@ -336,9 +344,11 @@ static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
struct owl_dma_lli *prev,
- struct owl_dma_lli *next)
+ struct owl_dma_lli *next,
+ bool is_cyclic)
{
- list_add_tail(&next->node, &txd->lli_list);
+ if (!is_cyclic)
+ list_add_tail(&next->node, &txd->lli_list);
if (prev) {
prev->hw.next_lli = next->phys;
@@ -351,7 +361,9 @@ static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
struct owl_dma_lli *lli,
dma_addr_t src, dma_addr_t dst,
- u32 len, enum dma_transfer_direction dir)
+ u32 len, enum dma_transfer_direction dir,
+ struct dma_slave_config *sconfig,
+ bool is_cyclic)
{
struct owl_dma_lli_hw *hw = &lli->hw;
u32 mode;
@@ -365,6 +377,32 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
OWL_DMA_MODE_DAM_INC;
break;
+ case DMA_MEM_TO_DEV:
+ mode |= OWL_DMA_MODE_TS(vchan->drq)
+ | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV
+ | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST;
+
+ /*
+ * Hardware only supports 32bit and 8bit buswidth. Since the
+ * default is 32bit, select 8bit only when requested.
+ */
+ if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+ mode |= OWL_DMA_MODE_NDDBW_8BIT;
+
+ break;
+ case DMA_DEV_TO_MEM:
+ mode |= OWL_DMA_MODE_TS(vchan->drq)
+ | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU
+ | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC;
+
+ /*
+ * Hardware only supports 32bit and 8bit buswidth. Since the
+ * default is 32bit, select 8bit only when requested.
+ */
+ if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+ mode |= OWL_DMA_MODE_NDDBW_8BIT;
+
+ break;
default:
return -EINVAL;
}
@@ -381,7 +419,10 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
OWL_DMA_LLC_SAV_LOAD_NEXT |
OWL_DMA_LLC_DAV_LOAD_NEXT);
- hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
+ if (is_cyclic)
+ hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK);
+ else
+ hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
return 0;
}
@@ -443,6 +484,16 @@ static void owl_dma_terminate_pchan(struct owl_dma *od,
spin_unlock_irqrestore(&od->lock, flags);
}
+static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan)
+{
+ pchan_writel(pchan, 1, OWL_DMAX_PAUSE);
+}
+
+static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan)
+{
+ pchan_writel(pchan, 0, OWL_DMAX_PAUSE);
+}
+
static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
{
struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
@@ -464,7 +515,10 @@ static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
lli = list_first_entry(&txd->lli_list,
struct owl_dma_lli, node);
- int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
+ if (txd->cyclic)
+ int_ctl = OWL_DMA_INTCTL_BLOCK;
+ else
+ int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
@@ -627,6 +681,54 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static int owl_dma_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+
+ /* Reject definitely invalid configurations */
+ if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ return -EINVAL;
+
+ memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config));
+
+ return 0;
+}
+
+static int owl_dma_pause(struct dma_chan *chan)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ owl_dma_pause_pchan(vchan->pchan);
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ return 0;
+}
+
+static int owl_dma_resume(struct dma_chan *chan)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ unsigned long flags;
+
+ if (!vchan->pchan && !vchan->txd)
+ return 0;
+
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ owl_dma_resume_pchan(vchan->pchan);
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ return 0;
+}
+
static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan)
{
struct owl_dma_pchan *pchan;
@@ -754,13 +856,14 @@ static struct dma_async_tx_descriptor
bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
- bytes, DMA_MEM_TO_MEM);
+ bytes, DMA_MEM_TO_MEM,
+ &vchan->cfg, txd->cyclic);
if (ret) {
dev_warn(chan2dev(chan), "failed to config lli\n");
goto err_txd_free;
}
- prev = owl_dma_add_lli(txd, prev, lli);
+ prev = owl_dma_add_lli(txd, prev, lli, false);
}
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
@@ -770,6 +873,133 @@ err_txd_free:
return NULL;
}
+static struct dma_async_tx_descriptor
+ *owl_dma_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct owl_dma *od = to_owl_dma(chan->device);
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct owl_dma_txd *txd;
+ struct owl_dma_lli *lli, *prev = NULL;
+ struct scatterlist *sg;
+ dma_addr_t addr, src = 0, dst = 0;
+ size_t len;
+ int ret, i;
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ INIT_LIST_HEAD(&txd->lli_list);
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if (len > OWL_DMA_FRAME_MAX_LENGTH) {
+ dev_err(od->dma.dev,
+ "frame length exceeds max supported length");
+ goto err_txd_free;
+ }
+
+ lli = owl_dma_alloc_lli(od);
+ if (!lli) {
+ dev_err(chan2dev(chan), "failed to allocate lli");
+ goto err_txd_free;
+ }
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = addr;
+ dst = sconfig->dst_addr;
+ } else {
+ src = sconfig->src_addr;
+ dst = addr;
+ }
+
+ ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig,
+ txd->cyclic);
+ if (ret) {
+ dev_warn(chan2dev(chan), "failed to config lli");
+ goto err_txd_free;
+ }
+
+ prev = owl_dma_add_lli(txd, prev, lli, false);
+ }
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
+err_txd_free:
+ owl_dma_free_txd(od, txd);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor
+ *owl_prep_dma_cyclic(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len,
+ enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct owl_dma *od = to_owl_dma(chan->device);
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct owl_dma_txd *txd;
+ struct owl_dma_lli *lli, *prev = NULL, *first = NULL;
+ dma_addr_t src = 0, dst = 0;
+ unsigned int periods = buf_len / period_len;
+ int ret, i;
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ INIT_LIST_HEAD(&txd->lli_list);
+ txd->cyclic = true;
+
+ for (i = 0; i < periods; i++) {
+ lli = owl_dma_alloc_lli(od);
+ if (!lli) {
+ dev_warn(chan2dev(chan), "failed to allocate lli");
+ goto err_txd_free;
+ }
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = buf_addr + (period_len * i);
+ dst = sconfig->dst_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = sconfig->src_addr;
+ dst = buf_addr + (period_len * i);
+ }
+
+ ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len,
+ dir, sconfig, txd->cyclic);
+ if (ret) {
+ dev_warn(chan2dev(chan), "failed to config lli");
+ goto err_txd_free;
+ }
+
+ if (!first)
+ first = lli;
+
+ prev = owl_dma_add_lli(txd, prev, lli, false);
+ }
+
+ /* close the cyclic list */
+ owl_dma_add_lli(txd, prev, first, true);
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
+err_txd_free:
+ owl_dma_free_txd(od, txd);
+
+ return NULL;
+}
+
static void owl_dma_free_chan_resources(struct dma_chan *chan)
{
struct owl_dma_vchan *vchan = to_owl_vchan(chan);
@@ -790,6 +1020,27 @@ static inline void owl_dma_free(struct owl_dma *od)
}
}
+static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct owl_dma *od = ofdma->of_dma_data;
+ struct owl_dma_vchan *vchan;
+ struct dma_chan *chan;
+ u8 drq = dma_spec->args[0];
+
+ if (drq > od->nr_vchans)
+ return NULL;
+
+ chan = dma_get_any_slave_channel(&od->dma);
+ if (!chan)
+ return NULL;
+
+ vchan = to_owl_vchan(chan);
+ vchan->drq = drq;
+
+ return chan;
+}
+
static int owl_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -833,12 +1084,19 @@ static int owl_dma_probe(struct platform_device *pdev)
spin_lock_init(&od->lock);
dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
+ dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
od->dma.dev = &pdev->dev;
od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
od->dma.device_tx_status = owl_dma_tx_status;
od->dma.device_issue_pending = owl_dma_issue_pending;
od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
+ od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
+ od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
+ od->dma.device_config = owl_dma_config;
+ od->dma.device_pause = owl_dma_pause;
+ od->dma.device_resume = owl_dma_resume;
od->dma.device_terminate_all = owl_dma_terminate_all;
od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
@@ -910,8 +1168,18 @@ static int owl_dma_probe(struct platform_device *pdev)
goto err_pool_free;
}
+ /* Device-tree DMA controller registration */
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ owl_dma_of_xlate, od);
+ if (ret) {
+ dev_err(&pdev->dev, "of_dma_controller_register failed\n");
+ goto err_dma_unregister;
+ }
+
return 0;
+err_dma_unregister:
+ dma_async_device_unregister(&od->dma);
err_pool_free:
clk_disable_unprepare(od->clk);
dma_pool_destroy(od->lli_pool);
@@ -923,6 +1191,7 @@ static int owl_dma_remove(struct platform_device *pdev)
{
struct owl_dma *od = platform_get_drvdata(pdev);
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&od->dma);
/* Mask all interrupts for this execution environment */
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 4cf0d4d0cecf..25610286979f 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4360,7 +4360,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
}
static DRIVER_ATTR_RW(enable);
-static ssize_t poly_store(struct device_driver *dev, char *buf)
+static ssize_t poly_show(struct device_driver *dev, char *buf)
{
ssize_t size = 0;
u32 reg;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index b31c28b67ad3..825725057e00 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1285,7 +1285,6 @@ static int pxad_remove(struct platform_device *op)
pxad_cleanup_debugfs(pdev);
pxad_free_channels(&pdev->slave);
- dma_async_device_unregister(&pdev->slave);
return 0;
}
@@ -1396,7 +1395,7 @@ static int pxad_init_dmadev(struct platform_device *op,
init_waitqueue_head(&c->wq_state);
}
- return dma_async_device_register(&pdev->slave);
+ return dmaenginem_async_device_register(&pdev->slave);
}
static int pxad_probe(struct platform_device *op)
@@ -1433,7 +1432,7 @@ static int pxad_probe(struct platform_device *op)
"#dma-requests set to default 32 as missing in OF: %d",
ret);
nb_requestors = 32;
- };
+ }
} else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels;
nb_requestors = pdata->nb_requestors;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 48ee35e2bce6..74fa2b1a6a86 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -198,6 +198,7 @@ struct rcar_dmac {
struct dma_device engine;
struct device *dev;
void __iomem *iomem;
+ struct device_dma_parameters parms;
unsigned int n_channels;
struct rcar_dmac_chan *channels;
@@ -1792,6 +1793,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
+ dmac->dev->dma_parms = &dmac->parms;
+ dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
ret = rcar_dmac_parse_of(&pdev->dev, dmac);
diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h
index a1b0ef45d6a2..7459f9a13b5b 100644
--- a/drivers/dma/sh/shdma-arm.h
+++ b/drivers/dma/sh/shdma-arm.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Renesas SuperH DMA Engine support
*
* Copyright (C) 2013 Renesas Electronics, Inc.
- *
- * This is free software; you can redistribute it and/or modify it under the
- * terms of version 2 the GNU General Public License as published by the Free
- * Software Foundation.
*/
#ifndef SHDMA_ARM_H
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 6b5626e299b2..c51de498b5b4 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Dmaengine driver base library for DMA controllers, found on SH-based SoCs
*
@@ -7,10 +8,6 @@
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index f999f9b0d314..be89dd894328 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SHDMA Device Tree glue
*
* Copyright (C) 2013 Renesas Electronics Inc.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/dmaengine.h>
diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c
index 96ea3828c3eb..ddc9a3578353 100644
--- a/drivers/dma/sh/shdma-r8a73a4.c
+++ b/drivers/dma/sh/shdma-r8a73a4.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs
*
* Copyright (C) 2013 Renesas Electronics, Inc.
- *
- * This is free software; you can redistribute it and/or modify it under the
- * terms of version 2 the GNU General Public License as published by the Free
- * Software Foundation.
*/
#include <linux/sh_dma.h>
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 2c0a969adc9f..bfb69909bd19 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Renesas SuperH DMA Engine support
*
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
*
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#ifndef __DMA_SHDMA_H
#define __DMA_SHDMA_H
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 04a74e0a95b7..7971ea275387 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Renesas SuperH DMA Engine support
*
@@ -8,11 +9,6 @@
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
*
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* - DMA of SuperH does not have Hardware DMA chain mode.
* - MAX DMA size is 16MB.
*
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index 69b9564dc9d9..30cc3553cb8b 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas SUDMAC support
*
@@ -8,10 +9,6 @@
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/dmaengine.h>
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 1bb1a8e09025..7f7184c3cf95 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USB DMA Controller Driver
*
@@ -6,10 +7,6 @@
* based on rcar-dmac.c
* Copyright (C) 2014 Renesas Electronics Inc.
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 55df0d41355b..38d4e4f07c66 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -68,6 +68,7 @@
/* SPRD_DMA_CHN_CFG register definition */
#define SPRD_DMA_CHN_EN BIT(0)
+#define SPRD_DMA_LINKLIST_EN BIT(4)
#define SPRD_DMA_WAIT_BDONE_OFFSET 24
#define SPRD_DMA_DONOT_WAIT_BDONE 1
@@ -103,7 +104,7 @@
#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
#define SPRD_DMA_FIX_SEL_OFFSET 21
#define SPRD_DMA_FIX_EN_OFFSET 20
-#define SPRD_DMA_LLIST_END_OFFSET 19
+#define SPRD_DMA_LLIST_END BIT(19)
#define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
/* SPRD_DMA_CHN_BLK_LEN register definition */
@@ -164,6 +165,7 @@ struct sprd_dma_desc {
struct sprd_dma_chn {
struct virt_dma_chan vc;
void __iomem *chn_base;
+ struct sprd_dma_linklist linklist;
struct dma_slave_config slave_cfg;
u32 chn_num;
u32 dev_id;
@@ -582,7 +584,8 @@ static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
}
static int sprd_dma_fill_desc(struct dma_chan *chan,
- struct sprd_dma_desc *sdesc,
+ struct sprd_dma_chn_hw *hw,
+ unsigned int sglen, int sg_index,
dma_addr_t src, dma_addr_t dst, u32 len,
enum dma_transfer_direction dir,
unsigned long flags,
@@ -590,7 +593,6 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
{
struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
- struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
u32 int_mode = flags & SPRD_DMA_INT_MASK;
int src_datawidth, dst_datawidth, src_step, dst_step;
@@ -670,12 +672,52 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
hw->trsf_step = temp;
+ /* link-list configuration */
+ if (schan->linklist.phy_addr) {
+ if (sg_index == sglen - 1)
+ hw->frg_len |= SPRD_DMA_LLIST_END;
+
+ hw->cfg |= SPRD_DMA_LINKLIST_EN;
+
+ /* link-list index */
+ temp = (sg_index + 1) % sglen;
+ /* Next link-list configuration's physical address offset */
+ temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR;
+ /*
+ * Set the link-list pointer point to next link-list
+ * configuration's physical address.
+ */
+ hw->llist_ptr = schan->linklist.phy_addr + temp;
+ } else {
+ hw->llist_ptr = 0;
+ }
+
hw->frg_step = 0;
hw->src_blk_step = 0;
hw->des_blk_step = 0;
return 0;
}
+static int sprd_dma_fill_linklist_desc(struct dma_chan *chan,
+ unsigned int sglen, int sg_index,
+ dma_addr_t src, dma_addr_t dst, u32 len,
+ enum dma_transfer_direction dir,
+ unsigned long flags,
+ struct dma_slave_config *slave_cfg)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct sprd_dma_chn_hw *hw;
+
+ if (!schan->linklist.virt_addr)
+ return -EINVAL;
+
+ hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr +
+ sg_index * sizeof(*hw));
+
+ return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len,
+ dir, flags, slave_cfg);
+}
+
static struct dma_async_tx_descriptor *
sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
@@ -744,10 +786,20 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 len = 0;
int ret, i;
- /* TODO: now we only support one sg for each DMA configuration. */
- if (!is_slave_direction(dir) || sglen > 1)
+ if (!is_slave_direction(dir))
return NULL;
+ if (context) {
+ struct sprd_dma_linklist *ll_cfg =
+ (struct sprd_dma_linklist *)context;
+
+ schan->linklist.phy_addr = ll_cfg->phy_addr;
+ schan->linklist.virt_addr = ll_cfg->virt_addr;
+ } else {
+ schan->linklist.phy_addr = 0;
+ schan->linklist.virt_addr = 0;
+ }
+
sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
if (!sdesc)
return NULL;
@@ -762,10 +814,25 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
src = slave_cfg->src_addr;
dst = sg_dma_address(sg);
}
+
+ /*
+ * The link-list mode needs at least 2 link-list
+ * configurations. If there is only one sg, it doesn't
+ * need to fill the link-list configuration.
+ */
+ if (sglen < 2)
+ break;
+
+ ret = sprd_dma_fill_linklist_desc(chan, sglen, i, src, dst, len,
+ dir, flags, slave_cfg);
+ if (ret) {
+ kfree(sdesc);
+ return NULL;
+ }
}
- ret = sprd_dma_fill_desc(chan, sdesc, src, dst, len, dir, flags,
- slave_cfg);
+ ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
+ dir, flags, slave_cfg);
if (ret) {
kfree(sdesc);
return NULL;
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index bfb79bd0c6de..07c20aa2e955 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -833,7 +833,7 @@ static int st_fdma_probe(struct platform_device *pdev)
fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- ret = dma_async_device_register(&fdev->dma_device);
+ ret = dmaenginem_async_device_register(&fdev->dma_device);
if (ret) {
dev_err(&pdev->dev,
"Failed to register DMA device (%d)\n", ret);
@@ -844,15 +844,13 @@ static int st_fdma_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to register controller (%d)\n", ret);
- goto err_dma_dev;
+ goto err_rproc;
}
dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
return 0;
-err_dma_dev:
- dma_async_device_unregister(&fdev->dma_device);
err_rproc:
st_fdma_free(fdev);
st_slim_rproc_put(fdev->slim_rproc);
@@ -867,7 +865,6 @@ static int st_fdma_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, fdev->irq, fdev);
st_slim_rproc_put(fdev->slim_rproc);
of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&fdev->dma_device);
return 0;
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index f4edfc56f34e..5e328bd10c27 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2839,7 +2839,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
d40_ops_init(base, &base->dma_slave);
- err = dma_async_device_register(&base->dma_slave);
+ err = dmaenginem_async_device_register(&base->dma_slave);
if (err) {
d40_err(base->dev, "Failed to register slave channels\n");
@@ -2854,12 +2854,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
d40_ops_init(base, &base->dma_memcpy);
- err = dma_async_device_register(&base->dma_memcpy);
+ err = dmaenginem_async_device_register(&base->dma_memcpy);
if (err) {
d40_err(base->dev,
"Failed to register memcpy only channels\n");
- goto unregister_slave;
+ goto exit;
}
d40_chan_init(base, &base->dma_both, base->phy_chans,
@@ -2871,18 +2871,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
d40_ops_init(base, &base->dma_both);
- err = dma_async_device_register(&base->dma_both);
+ err = dmaenginem_async_device_register(&base->dma_both);
if (err) {
d40_err(base->dev,
"Failed to register logical and physical capable channels\n");
- goto unregister_memcpy;
+ goto exit;
}
return 0;
- unregister_memcpy:
- dma_async_device_unregister(&base->dma_memcpy);
- unregister_slave:
- dma_async_device_unregister(&base->dma_slave);
exit:
return err;
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 379e8d534e61..4903a408fc14 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -308,20 +308,12 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
{
- switch (threshold) {
- case STM32_DMA_FIFO_THRESHOLD_FULL:
- if (buf_len >= STM32_DMA_MAX_BURST)
- return true;
- else
- return false;
- case STM32_DMA_FIFO_THRESHOLD_HALFFULL:
- if (buf_len >= STM32_DMA_MAX_BURST / 2)
- return true;
- else
- return false;
- default:
- return false;
- }
+ /*
+ * Buffer or period length has to be aligned on FIFO depth.
+ * Otherwise bytes may be stuck within FIFO at buffer or period
+ * length.
+ */
+ return ((buf_len % ((threshold + 1) * 4)) == 0);
}
static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 06dd1725375e..390e4cae0e1a 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1656,7 +1656,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return ret;
}
- ret = dma_async_device_register(dd);
+ ret = dmaenginem_async_device_register(dd);
if (ret)
return ret;
@@ -1674,8 +1674,6 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return 0;
err_unregister:
- dma_async_device_unregister(dd);
-
return ret;
}
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 395c698edb4d..fc0f9c8766a8 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
- td_desc->desc_list_len, DMA_MEM_TO_DEV);
+ td_desc->desc_list_len, DMA_TO_DEVICE);
return &td_desc->txd;
}
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 5762c3c383f2..c89d82aa2776 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -69,25 +69,6 @@ static const struct altr_sdram_prv_data a10_data = {
.ue_set_mask = A10_DIAGINT_TDERRA_MASK,
};
-static const struct altr_sdram_prv_data s10_data = {
- .ecc_ctrl_offset = S10_ECCCTRL1_OFST,
- .ecc_ctl_en_mask = A10_ECCCTRL1_ECC_EN,
- .ecc_stat_offset = S10_INTSTAT_OFST,
- .ecc_stat_ce_mask = A10_INTSTAT_SBEERR,
- .ecc_stat_ue_mask = A10_INTSTAT_DBEERR,
- .ecc_saddr_offset = S10_SERRADDR_OFST,
- .ecc_daddr_offset = S10_DERRADDR_OFST,
- .ecc_irq_en_offset = S10_ERRINTEN_OFST,
- .ecc_irq_en_mask = A10_ECC_IRQ_EN_MASK,
- .ecc_irq_clr_offset = S10_INTSTAT_OFST,
- .ecc_irq_clr_mask = (A10_INTSTAT_SBEERR | A10_INTSTAT_DBEERR),
- .ecc_cnt_rst_offset = S10_ECCCTRL1_OFST,
- .ecc_cnt_rst_mask = A10_ECC_CNT_RESET_MASK,
- .ce_ue_trgr_offset = S10_DIAGINTTEST_OFST,
- .ce_set_mask = A10_DIAGINT_TSERRA_MASK,
- .ue_set_mask = A10_DIAGINT_TDERRA_MASK,
-};
-
/*********************** EDAC Memory Controller Functions ****************/
/* The SDRAM controller uses the EDAC Memory Controller framework. */
@@ -239,7 +220,7 @@ static unsigned long get_total_mem(void)
static const struct of_device_id altr_sdram_ctrl_of_match[] = {
{ .compatible = "altr,sdram-edac", .data = &c5_data},
{ .compatible = "altr,sdram-edac-a10", .data = &a10_data},
- { .compatible = "altr,sdram-edac-s10", .data = &s10_data},
+ { .compatible = "altr,sdram-edac-s10", .data = &a10_data},
{},
};
MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match);
@@ -293,6 +274,7 @@ release:
return ret;
}
+static int socfpga_is_a10(void);
static int altr_sdram_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
@@ -416,7 +398,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
goto err;
/* Only the Arria10 has separate IRQs */
- if (irq2 > 0) {
+ if (socfpga_is_a10()) {
/* Arria10 specific initialization */
res = a10_init(mc_vbase);
if (res < 0)
@@ -502,8 +484,9 @@ static int s10_protected_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct arm_smccc_res result;
+ unsigned long offset = (unsigned long)context;
- arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, reg, val, 0, 0,
+ arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, offset + reg, val, 0, 0,
0, 0, 0, &result);
return (int)result.a0;
@@ -523,8 +506,9 @@ static int s10_protected_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
struct arm_smccc_res result;
+ unsigned long offset = (unsigned long)context;
- arm_smccc_smc(INTEL_SIP_SMC_REG_READ, reg, 0, 0, 0,
+ arm_smccc_smc(INTEL_SIP_SMC_REG_READ, offset + reg, 0, 0, 0,
0, 0, 0, &result);
*val = (unsigned int)result.a1;
@@ -532,245 +516,18 @@ static int s10_protected_reg_read(void *context, unsigned int reg,
return (int)result.a0;
}
-static bool s10_sdram_writeable_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S10_ECCCTRL1_OFST:
- case S10_ERRINTEN_OFST:
- case S10_INTMODE_OFST:
- case S10_INTSTAT_OFST:
- case S10_DIAGINTTEST_OFST:
- case S10_SYSMGR_ECC_INTMASK_VAL_OFST:
- case S10_SYSMGR_ECC_INTMASK_SET_OFST:
- case S10_SYSMGR_ECC_INTMASK_CLR_OFST:
- return true;
- }
- return false;
-}
-
-static bool s10_sdram_readable_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S10_ECCCTRL1_OFST:
- case S10_ERRINTEN_OFST:
- case S10_INTMODE_OFST:
- case S10_INTSTAT_OFST:
- case S10_DERRADDR_OFST:
- case S10_SERRADDR_OFST:
- case S10_DIAGINTTEST_OFST:
- case S10_SYSMGR_ECC_INTMASK_VAL_OFST:
- case S10_SYSMGR_ECC_INTMASK_SET_OFST:
- case S10_SYSMGR_ECC_INTMASK_CLR_OFST:
- case S10_SYSMGR_ECC_INTSTAT_SERR_OFST:
- case S10_SYSMGR_ECC_INTSTAT_DERR_OFST:
- return true;
- }
- return false;
-}
-
-static bool s10_sdram_volatile_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S10_ECCCTRL1_OFST:
- case S10_ERRINTEN_OFST:
- case S10_INTMODE_OFST:
- case S10_INTSTAT_OFST:
- case S10_DERRADDR_OFST:
- case S10_SERRADDR_OFST:
- case S10_DIAGINTTEST_OFST:
- case S10_SYSMGR_ECC_INTMASK_VAL_OFST:
- case S10_SYSMGR_ECC_INTMASK_SET_OFST:
- case S10_SYSMGR_ECC_INTMASK_CLR_OFST:
- case S10_SYSMGR_ECC_INTSTAT_SERR_OFST:
- case S10_SYSMGR_ECC_INTSTAT_DERR_OFST:
- return true;
- }
- return false;
-}
-
static const struct regmap_config s10_sdram_regmap_cfg = {
.name = "s10_ddr",
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0xffffffff,
- .writeable_reg = s10_sdram_writeable_reg,
- .readable_reg = s10_sdram_readable_reg,
- .volatile_reg = s10_sdram_volatile_reg,
+ .max_register = 0xffd12228,
.reg_read = s10_protected_reg_read,
.reg_write = s10_protected_reg_write,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
-static int altr_s10_sdram_probe(struct platform_device *pdev)
-{
- const struct of_device_id *id;
- struct edac_mc_layer layers[2];
- struct mem_ctl_info *mci;
- struct altr_sdram_mc_data *drvdata;
- const struct altr_sdram_prv_data *priv;
- struct regmap *regmap;
- struct dimm_info *dimm;
- u32 read_reg;
- int irq, ret = 0;
- unsigned long mem_size;
-
- id = of_match_device(altr_sdram_ctrl_of_match, &pdev->dev);
- if (!id)
- return -ENODEV;
-
- /* Grab specific offsets and masks for Stratix10 */
- priv = of_match_node(altr_sdram_ctrl_of_match,
- pdev->dev.of_node)->data;
-
- regmap = devm_regmap_init(&pdev->dev, NULL, (void *)priv,
- &s10_sdram_regmap_cfg);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- /* Validate the SDRAM controller has ECC enabled */
- if (regmap_read(regmap, priv->ecc_ctrl_offset, &read_reg) ||
- ((read_reg & priv->ecc_ctl_en_mask) != priv->ecc_ctl_en_mask)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "No ECC/ECC disabled [0x%08X]\n", read_reg);
- return -ENODEV;
- }
-
- /* Grab memory size from device tree. */
- mem_size = get_total_mem();
- if (!mem_size) {
- edac_printk(KERN_ERR, EDAC_MC, "Unable to calculate memory size\n");
- return -ENODEV;
- }
-
- /* Ensure the SDRAM Interrupt is disabled */
- if (regmap_update_bits(regmap, priv->ecc_irq_en_offset,
- priv->ecc_irq_en_mask, 0)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error disabling SDRAM ECC IRQ\n");
- return -ENODEV;
- }
-
- /* Toggle to clear the SDRAM Error count */
- if (regmap_update_bits(regmap, priv->ecc_cnt_rst_offset,
- priv->ecc_cnt_rst_mask,
- priv->ecc_cnt_rst_mask)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error clearing SDRAM ECC count\n");
- return -ENODEV;
- }
-
- if (regmap_update_bits(regmap, priv->ecc_cnt_rst_offset,
- priv->ecc_cnt_rst_mask, 0)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error clearing SDRAM ECC count\n");
- return -ENODEV;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- edac_printk(KERN_ERR, EDAC_MC,
- "No irq %d in DT\n", irq);
- return -ENODEV;
- }
-
- layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = 1;
- layers[0].is_virt_csrow = true;
- layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = 1;
- layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
- sizeof(struct altr_sdram_mc_data));
- if (!mci)
- return -ENOMEM;
-
- mci->pdev = &pdev->dev;
- drvdata = mci->pvt_info;
- drvdata->mc_vbase = regmap;
- drvdata->data = priv;
- platform_set_drvdata(pdev, mci);
-
- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Unable to get managed device resource\n");
- ret = -ENOMEM;
- goto free;
- }
-
- mci->mtype_cap = MEM_FLAG_DDR3;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_SECDED;
- mci->mod_name = EDAC_MOD_STR;
- mci->ctl_name = dev_name(&pdev->dev);
- mci->scrub_mode = SCRUB_SW_SRC;
- mci->dev_name = dev_name(&pdev->dev);
-
- dimm = *mci->dimms;
- dimm->nr_pages = ((mem_size - 1) >> PAGE_SHIFT) + 1;
- dimm->grain = 8;
- dimm->dtype = DEV_X8;
- dimm->mtype = MEM_DDR3;
- dimm->edac_mode = EDAC_SECDED;
-
- ret = edac_mc_add_mc(mci);
- if (ret < 0)
- goto err;
-
- ret = devm_request_irq(&pdev->dev, irq, altr_sdram_mc_err_handler,
- IRQF_SHARED, dev_name(&pdev->dev), mci);
- if (ret < 0) {
- edac_mc_printk(mci, KERN_ERR,
- "Unable to request irq %d\n", irq);
- ret = -ENODEV;
- goto err2;
- }
-
- if (regmap_write(regmap, S10_SYSMGR_ECC_INTMASK_CLR_OFST,
- S10_DDR0_IRQ_MASK)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error clearing SDRAM ECC count\n");
- ret = -ENODEV;
- goto err2;
- }
-
- if (regmap_update_bits(drvdata->mc_vbase, priv->ecc_irq_en_offset,
- priv->ecc_irq_en_mask, priv->ecc_irq_en_mask)) {
- edac_mc_printk(mci, KERN_ERR,
- "Error enabling SDRAM ECC IRQ\n");
- ret = -ENODEV;
- goto err2;
- }
-
- altr_sdr_mc_create_debugfs_nodes(mci);
-
- devres_close_group(&pdev->dev, NULL);
-
- return 0;
-
-err2:
- edac_mc_del_mc(&pdev->dev);
-err:
- devres_release_group(&pdev->dev, NULL);
-free:
- edac_mc_free(mci);
- edac_printk(KERN_ERR, EDAC_MC,
- "EDAC Probe Failed; Error %d\n", ret);
-
- return ret;
-}
-
-static int altr_s10_sdram_remove(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci = platform_get_drvdata(pdev);
-
- edac_mc_del_mc(&pdev->dev);
- edac_mc_free(mci);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
/************** </Stratix10 EDAC Memory Controller Functions> ***********/
/*
@@ -804,20 +561,6 @@ static struct platform_driver altr_sdram_edac_driver = {
module_platform_driver(altr_sdram_edac_driver);
-static struct platform_driver altr_s10_sdram_edac_driver = {
- .probe = altr_s10_sdram_probe,
- .remove = altr_s10_sdram_remove,
- .driver = {
- .name = "altr_s10_sdram_edac",
-#ifdef CONFIG_PM
- .pm = &altr_sdram_pm_ops,
-#endif
- .of_match_table = altr_sdram_ctrl_of_match,
- },
-};
-
-module_platform_driver(altr_s10_sdram_edac_driver);
-
/************************* EDAC Parent Probe *************************/
static const struct of_device_id altr_edac_device_of_match[];
@@ -971,6 +714,16 @@ static const struct file_operations altr_edac_a10_device_inject_fops = {
.llseek = generic_file_llseek,
};
+static ssize_t altr_edac_a10_device_trig2(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos);
+
+static const struct file_operations altr_edac_a10_device_inject2_fops = {
+ .open = simple_open,
+ .write = altr_edac_a10_device_trig2,
+ .llseek = generic_file_llseek,
+};
+
static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
const struct edac_device_prv_data *priv)
{
@@ -1252,6 +1005,16 @@ static int __maybe_unused altr_init_memory_port(void __iomem *ioaddr, int port)
return ret;
}
+static int socfpga_is_a10(void)
+{
+ return of_machine_is_compatible("altr,socfpga-arria10");
+}
+
+static int socfpga_is_s10(void)
+{
+ return of_machine_is_compatible("altr,socfpga-stratix10");
+}
+
static __init int __maybe_unused
altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
u32 ecc_ctrl_en_mask, bool dual_port)
@@ -1266,8 +1029,32 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
/* Get the ECC Manager - parent of the device EDACs */
np_eccmgr = of_get_parent(np);
- ecc_mgr_map = syscon_regmap_lookup_by_phandle(np_eccmgr,
- "altr,sysmgr-syscon");
+
+ if (socfpga_is_a10()) {
+ ecc_mgr_map = syscon_regmap_lookup_by_phandle(np_eccmgr,
+ "altr,sysmgr-syscon");
+ } else {
+ struct device_node *sysmgr_np;
+ struct resource res;
+ uintptr_t base;
+
+ sysmgr_np = of_parse_phandle(np_eccmgr,
+ "altr,sysmgr-syscon", 0);
+ if (!sysmgr_np) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to find altr,sysmgr-syscon\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(sysmgr_np, 0, &res))
+ return -ENOMEM;
+
+ /* Need physical address for SMCC call */
+ base = res.start;
+
+ ecc_mgr_map = regmap_init(NULL, NULL, (void *)base,
+ &s10_sdram_regmap_cfg);
+ }
of_node_put(np_eccmgr);
if (IS_ERR(ecc_mgr_map)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
@@ -1325,11 +1112,6 @@ out:
return ret;
}
-static int socfpga_is_a10(void)
-{
- return of_machine_is_compatible("altr,socfpga-arria10");
-}
-
static int validate_parent_available(struct device_node *np);
static const struct of_device_id altr_edac_a10_device_of_match[];
static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
@@ -1337,7 +1119,7 @@ static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
int irq;
struct device_node *child, *np;
- if (!socfpga_is_a10())
+ if (!socfpga_is_a10() && !socfpga_is_s10())
return -ENODEV;
np = of_find_compatible_node(NULL, NULL,
@@ -1583,7 +1365,7 @@ static const struct edac_device_prv_data a10_enetecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject_fops,
+ .inject_fops = &altr_edac_a10_device_inject2_fops,
};
static int __init socfpga_init_ethernet_ecc(void)
@@ -1661,7 +1443,7 @@ static const struct edac_device_prv_data a10_usbecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject_fops,
+ .inject_fops = &altr_edac_a10_device_inject2_fops,
};
static int __init socfpga_init_usb_ecc(void)
@@ -1859,7 +1641,7 @@ static int __init socfpga_init_sdmmc_ecc(void)
int rc = -ENODEV;
struct device_node *child;
- if (!socfpga_is_a10())
+ if (!socfpga_is_a10() && !socfpga_is_s10())
return -ENODEV;
child = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
@@ -1943,6 +1725,74 @@ static ssize_t altr_edac_a10_device_trig(struct file *file,
writel(priv->ue_set_mask, set_addr);
else
writel(priv->ce_set_mask, set_addr);
+
+ /* Ensure the interrupt test bits are set */
+ wmb();
+ local_irq_restore(flags);
+
+ return count;
+}
+
+/*
+ * The Stratix10 EDAC Error Injection Functions differ from Arria10
+ * slightly. A few Arria10 peripherals can use this injection function.
+ * Inject the error into the memory and then readback to trigger the IRQ.
+ */
+static ssize_t altr_edac_a10_device_trig2(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct edac_device_ctl_info *edac_dci = file->private_data;
+ struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
+ const struct edac_device_prv_data *priv = drvdata->data;
+ void __iomem *set_addr = (drvdata->base + priv->set_err_ofst);
+ unsigned long flags;
+ u8 trig_type;
+
+ if (!user_buf || get_user(trig_type, user_buf))
+ return -EFAULT;
+
+ local_irq_save(flags);
+ if (trig_type == ALTR_UE_TRIGGER_CHAR) {
+ writel(priv->ue_set_mask, set_addr);
+ } else {
+ /* Setup write of 0 to first 4 bytes */
+ writel(0x0, drvdata->base + ECC_BLK_WDATA0_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA1_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA2_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA3_OFST);
+ /* Setup write of 4 bytes */
+ writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
+ /* Setup Address to 0 */
+ writel(0x0, drvdata->base + ECC_BLK_ADDRESS_OFST);
+ /* Setup accctrl to write & data override */
+ writel(ECC_WRITE_DOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ /* Kick it. */
+ writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
+ /* Setup accctrl to read & ecc override */
+ writel(ECC_READ_EOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ /* Kick it. */
+ writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
+ /* Setup write for single bit change */
+ writel(0x1, drvdata->base + ECC_BLK_WDATA0_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA1_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA2_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA3_OFST);
+ /* Copy Read ECC to Write ECC */
+ writel(readl(drvdata->base + ECC_BLK_RECC0_OFST),
+ drvdata->base + ECC_BLK_WECC0_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RECC1_OFST),
+ drvdata->base + ECC_BLK_WECC1_OFST);
+ /* Setup accctrl to write & ecc override & data override */
+ writel(ECC_WRITE_EDOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ /* Kick it. */
+ writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
+ /* Setup accctrl to read & ecc overwrite & data overwrite */
+ writel(ECC_READ_EDOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ /* Kick it. */
+ writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
+ }
+
/* Ensure the interrupt test bits are set */
wmb();
local_irq_restore(flags);
@@ -2146,6 +1996,35 @@ static const struct irq_domain_ops a10_eccmgr_ic_ops = {
.xlate = irq_domain_xlate_twocell,
};
+/************** Stratix 10 EDAC Double Bit Error Handler ************/
+#define to_a10edac(p, m) container_of(p, struct altr_arria10_edac, m)
+
+/*
+ * The double bit error is handled through SError which is fatal. This is
+ * called as a panic notifier to printout ECC error info as part of the panic.
+ */
+static int s10_edac_dberr_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct altr_arria10_edac *edac = to_a10edac(this, panic_notifier);
+ int err_addr, dberror;
+
+ regmap_read(edac->ecc_mgr_map, S10_SYSMGR_ECC_INTSTAT_DERR_OFST,
+ &dberror);
+ regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST, dberror);
+ if (dberror & S10_DDR0_IRQ_MASK) {
+ regmap_read(edac->ecc_mgr_map, A10_DERRADDR_OFST, &err_addr);
+ regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST,
+ err_addr);
+ edac_printk(KERN_ERR, EDAC_MC,
+ "EDAC: [Uncorrectable errors @ 0x%08X]\n\n",
+ err_addr);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/****************** Arria 10 EDAC Probe Function *********************/
static int altr_edac_a10_probe(struct platform_device *pdev)
{
struct altr_arria10_edac *edac;
@@ -2159,8 +2038,34 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, edac);
INIT_LIST_HEAD(&edac->a10_ecc_devices);
- edac->ecc_mgr_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ if (socfpga_is_a10()) {
+ edac->ecc_mgr_map =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"altr,sysmgr-syscon");
+ } else {
+ struct device_node *sysmgr_np;
+ struct resource res;
+ uintptr_t base;
+
+ sysmgr_np = of_parse_phandle(pdev->dev.of_node,
+ "altr,sysmgr-syscon", 0);
+ if (!sysmgr_np) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to find altr,sysmgr-syscon\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(sysmgr_np, 0, &res))
+ return -ENOMEM;
+
+ /* Need physical address for SMCC call */
+ base = res.start;
+
+ edac->ecc_mgr_map = devm_regmap_init(&pdev->dev, NULL,
+ (void *)base,
+ &s10_sdram_regmap_cfg);
+ }
+
if (IS_ERR(edac->ecc_mgr_map)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"Unable to get syscon altr,sysmgr-syscon\n");
@@ -2187,14 +2092,38 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
altr_edac_a10_irq_handler,
edac);
- edac->db_irq = platform_get_irq(pdev, 1);
- if (edac->db_irq < 0) {
- dev_err(&pdev->dev, "No DBERR IRQ resource\n");
- return edac->db_irq;
+ if (socfpga_is_a10()) {
+ edac->db_irq = platform_get_irq(pdev, 1);
+ if (edac->db_irq < 0) {
+ dev_err(&pdev->dev, "No DBERR IRQ resource\n");
+ return edac->db_irq;
+ }
+ irq_set_chained_handler_and_data(edac->db_irq,
+ altr_edac_a10_irq_handler,
+ edac);
+ } else {
+ int dberror, err_addr;
+
+ edac->panic_notifier.notifier_call = s10_edac_dberr_handler;
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &edac->panic_notifier);
+
+ /* Printout a message if uncorrectable error previously. */
+ regmap_read(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST,
+ &dberror);
+ if (dberror) {
+ regmap_read(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST,
+ &err_addr);
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Previous Boot UE detected[0x%X] @ 0x%X\n",
+ dberror, err_addr);
+ /* Reset the sticky registers */
+ regmap_write(edac->ecc_mgr_map,
+ S10_SYSMGR_UE_VAL_OFST, 0);
+ regmap_write(edac->ecc_mgr_map,
+ S10_SYSMGR_UE_ADDR_OFST, 0);
+ }
}
- irq_set_chained_handler_and_data(edac->db_irq,
- altr_edac_a10_irq_handler,
- edac);
for_each_child_of_node(pdev->dev.of_node, child) {
if (!of_device_is_available(child))
@@ -2211,7 +2140,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
altr_edac_a10_device_add(edac, child);
- else if (of_device_is_compatible(child, "altr,sdram-edac-a10"))
+ else if ((of_device_is_compatible(child, "altr,sdram-edac-a10")) ||
+ (of_device_is_compatible(child, "altr,sdram-edac-s10")))
of_platform_populate(pdev->dev.of_node,
altr_sdram_ctrl_of_match,
NULL, &pdev->dev);
@@ -2222,6 +2152,7 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
static const struct of_device_id altr_edac_a10_of_match[] = {
{ .compatible = "altr,socfpga-a10-ecc-manager" },
+ { .compatible = "altr,socfpga-s10-ecc-manager" },
{},
};
MODULE_DEVICE_TABLE(of, altr_edac_a10_of_match);
@@ -2235,171 +2166,6 @@ static struct platform_driver altr_edac_a10_driver = {
};
module_platform_driver(altr_edac_a10_driver);
-/************** Stratix 10 EDAC Device Controller Functions> ************/
-
-#define to_s10edac(p, m) container_of(p, struct altr_stratix10_edac, m)
-
-/*
- * The double bit error is handled through SError which is fatal. This is
- * called as a panic notifier to printout ECC error info as part of the panic.
- */
-static int s10_edac_dberr_handler(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct altr_stratix10_edac *edac = to_s10edac(this, panic_notifier);
- int err_addr, dberror;
-
- s10_protected_reg_read(edac, S10_SYSMGR_ECC_INTSTAT_DERR_OFST,
- &dberror);
- /* Remember the UE Errors for a reboot */
- s10_protected_reg_write(edac, S10_SYSMGR_UE_VAL_OFST, dberror);
- if (dberror & S10_DDR0_IRQ_MASK) {
- s10_protected_reg_read(edac, S10_DERRADDR_OFST, &err_addr);
- /* Remember the UE Error address */
- s10_protected_reg_write(edac, S10_SYSMGR_UE_ADDR_OFST,
- err_addr);
- edac_printk(KERN_ERR, EDAC_MC,
- "EDAC: [Uncorrectable errors @ 0x%08X]\n\n",
- err_addr);
- }
-
- return NOTIFY_DONE;
-}
-
-static void altr_edac_s10_irq_handler(struct irq_desc *desc)
-{
- struct altr_stratix10_edac *edac = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- int irq = irq_desc_get_irq(desc);
- int bit, sm_offset, irq_status;
-
- sm_offset = S10_SYSMGR_ECC_INTSTAT_SERR_OFST;
-
- chained_irq_enter(chip, desc);
-
- s10_protected_reg_read(NULL, sm_offset, &irq_status);
-
- for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
- irq = irq_linear_revmap(edac->domain, bit);
- if (irq)
- generic_handle_irq(irq);
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void s10_eccmgr_irq_mask(struct irq_data *d)
-{
- struct altr_stratix10_edac *edac = irq_data_get_irq_chip_data(d);
-
- s10_protected_reg_write(edac, S10_SYSMGR_ECC_INTMASK_SET_OFST,
- BIT(d->hwirq));
-}
-
-static void s10_eccmgr_irq_unmask(struct irq_data *d)
-{
- struct altr_stratix10_edac *edac = irq_data_get_irq_chip_data(d);
-
- s10_protected_reg_write(edac, S10_SYSMGR_ECC_INTMASK_CLR_OFST,
- BIT(d->hwirq));
-}
-
-static int s10_eccmgr_irqdomain_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct altr_stratix10_edac *edac = d->host_data;
-
- irq_set_chip_and_handler(irq, &edac->irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, edac);
- irq_set_noprobe(irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops s10_eccmgr_ic_ops = {
- .map = s10_eccmgr_irqdomain_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-static int altr_edac_s10_probe(struct platform_device *pdev)
-{
- struct altr_stratix10_edac *edac;
- struct device_node *child;
- int dberror, err_addr;
-
- edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL);
- if (!edac)
- return -ENOMEM;
-
- edac->dev = &pdev->dev;
- platform_set_drvdata(pdev, edac);
- INIT_LIST_HEAD(&edac->s10_ecc_devices);
-
- edac->irq_chip.name = pdev->dev.of_node->name;
- edac->irq_chip.irq_mask = s10_eccmgr_irq_mask;
- edac->irq_chip.irq_unmask = s10_eccmgr_irq_unmask;
- edac->domain = irq_domain_add_linear(pdev->dev.of_node, 64,
- &s10_eccmgr_ic_ops, edac);
- if (!edac->domain) {
- dev_err(&pdev->dev, "Error adding IRQ domain\n");
- return -ENOMEM;
- }
-
- edac->sb_irq = platform_get_irq(pdev, 0);
- if (edac->sb_irq < 0) {
- dev_err(&pdev->dev, "No SBERR IRQ resource\n");
- return edac->sb_irq;
- }
-
- irq_set_chained_handler_and_data(edac->sb_irq,
- altr_edac_s10_irq_handler,
- edac);
-
- edac->panic_notifier.notifier_call = s10_edac_dberr_handler;
- atomic_notifier_chain_register(&panic_notifier_list,
- &edac->panic_notifier);
-
- /* Printout a message if uncorrectable error previously. */
- s10_protected_reg_read(edac, S10_SYSMGR_UE_VAL_OFST, &dberror);
- if (dberror) {
- s10_protected_reg_read(edac, S10_SYSMGR_UE_ADDR_OFST,
- &err_addr);
- edac_printk(KERN_ERR, EDAC_DEVICE,
- "Previous Boot UE detected[0x%X] @ 0x%X\n",
- dberror, err_addr);
- /* Reset the sticky registers */
- s10_protected_reg_write(edac, S10_SYSMGR_UE_VAL_OFST, 0);
- s10_protected_reg_write(edac, S10_SYSMGR_UE_ADDR_OFST, 0);
- }
-
- for_each_child_of_node(pdev->dev.of_node, child) {
- if (!of_device_is_available(child))
- continue;
-
- if (of_device_is_compatible(child, "altr,sdram-edac-s10"))
- of_platform_populate(pdev->dev.of_node,
- altr_sdram_ctrl_of_match,
- NULL, &pdev->dev);
- }
-
- return 0;
-}
-
-static const struct of_device_id altr_edac_s10_of_match[] = {
- { .compatible = "altr,socfpga-s10-ecc-manager" },
- {},
-};
-MODULE_DEVICE_TABLE(of, altr_edac_s10_of_match);
-
-static struct platform_driver altr_edac_s10_driver = {
- .probe = altr_edac_s10_probe,
- .driver = {
- .name = "socfpga_s10_ecc_manager",
- .of_match_table = altr_edac_s10_of_match,
- },
-};
-module_platform_driver(altr_edac_s10_driver);
-
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Thor Thayer");
MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 81f0554e09de..4213cb0bb2a7 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -156,34 +156,6 @@
#define A10_INTMASK_CLR_OFST 0x10
#define A10_DDR0_IRQ_MASK BIT(17)
-/************* Stratix10 Defines **************/
-
-/* SDRAM Controller EccCtrl Register */
-#define S10_ECCCTRL1_OFST 0xF8011100
-
-/* SDRAM Controller DRAM IRQ Register */
-#define S10_ERRINTEN_OFST 0xF8011110
-
-/* SDRAM Interrupt Mode Register */
-#define S10_INTMODE_OFST 0xF801111C
-
-/* SDRAM Controller Error Status Register */
-#define S10_INTSTAT_OFST 0xF8011120
-
-/* SDRAM Controller ECC Error Address Register */
-#define S10_DERRADDR_OFST 0xF801112C
-#define S10_SERRADDR_OFST 0xF8011130
-
-/* SDRAM Controller ECC Diagnostic Register */
-#define S10_DIAGINTTEST_OFST 0xF8011124
-
-/* SDRAM Single Bit Error Count Compare Set Register */
-#define S10_SERRCNTREG_OFST 0xF801113C
-
-/* Sticky registers for Uncorrected Errors */
-#define S10_SYSMGR_UE_VAL_OFST 0xFFD12220
-#define S10_SYSMGR_UE_ADDR_OFST 0xFFD12224
-
struct altr_sdram_prv_data {
int ecc_ctrl_offset;
int ecc_ctl_en_mask;
@@ -319,15 +291,40 @@ struct altr_sdram_mc_data {
/************* Stratix10 Defines **************/
/* Stratix10 ECC Manager Defines */
-#define S10_SYSMGR_ECC_INTMASK_VAL_OFST 0xFFD12090
-#define S10_SYSMGR_ECC_INTMASK_SET_OFST 0xFFD12094
-#define S10_SYSMGR_ECC_INTMASK_CLR_OFST 0xFFD12098
+#define S10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
+#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
-#define S10_SYSMGR_ECC_INTSTAT_SERR_OFST 0xFFD1209C
-#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xFFD120A0
+/* Sticky registers for Uncorrected Errors */
+#define S10_SYSMGR_UE_VAL_OFST 0x120
+#define S10_SYSMGR_UE_ADDR_OFST 0x124
#define S10_DDR0_IRQ_MASK BIT(16)
+/* Define ECC Block Offsets for peripherals */
+#define ECC_BLK_ADDRESS_OFST 0x40
+#define ECC_BLK_RDATA0_OFST 0x44
+#define ECC_BLK_RDATA1_OFST 0x48
+#define ECC_BLK_RDATA2_OFST 0x4C
+#define ECC_BLK_RDATA3_OFST 0x50
+#define ECC_BLK_WDATA0_OFST 0x54
+#define ECC_BLK_WDATA1_OFST 0x58
+#define ECC_BLK_WDATA2_OFST 0x5C
+#define ECC_BLK_WDATA3_OFST 0x60
+#define ECC_BLK_RECC0_OFST 0x64
+#define ECC_BLK_RECC1_OFST 0x68
+#define ECC_BLK_WECC0_OFST 0x6C
+#define ECC_BLK_WECC1_OFST 0x70
+#define ECC_BLK_DBYTECTRL_OFST 0x74
+#define ECC_BLK_ACCCTRL_OFST 0x78
+#define ECC_BLK_STARTACC_OFST 0x7C
+
+#define ECC_XACT_KICK 0x10000
+#define ECC_WORD_WRITE 0xF
+#define ECC_WRITE_DOVR 0x101
+#define ECC_WRITE_EDOVR 0x103
+#define ECC_READ_EOVR 0x2
+#define ECC_READ_EDOVR 0x3
+
struct altr_edac_device_dev;
struct edac_device_prv_data {
@@ -370,6 +367,7 @@ struct altr_arria10_edac {
struct irq_domain *domain;
struct irq_chip irq_chip;
struct list_head a10_ecc_devices;
+ struct notifier_block panic_notifier;
};
/*
@@ -437,13 +435,4 @@ struct altr_arria10_edac {
#define INTEL_SIP_SMC_REG_WRITE \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE)
-struct altr_stratix10_edac {
- struct device *dev;
- int sb_irq;
- struct irq_domain *domain;
- struct irq_chip irq_chip;
- struct list_head s10_ecc_devices;
- struct notifier_block panic_notifier;
-};
-
#endif /* #ifndef _ALTERA_EDAC_H */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 18aeabb1d5ee..6ea98575a402 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -211,7 +211,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- if (pvt->fam == 0x17) {
+ if (pvt->fam == 0x17 || pvt->fam == 0x18) {
__f17h_set_scrubval(pvt, scrubval);
} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
@@ -264,6 +264,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
break;
case 0x17:
+ case 0x18:
amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
if (scrubval & BIT(0)) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
@@ -1044,6 +1045,7 @@ static void determine_memory_type(struct amd64_pvt *pvt)
goto ddr3;
case 0x17:
+ case 0x18:
if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
pvt->dram_type = MEM_LRDDR4;
else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
@@ -2200,6 +2202,15 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_base_addr_to_cs_size,
}
},
+ [F17_M10H_CPUS] = {
+ .ctl_name = "F17h_M10h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_base_addr_to_cs_size,
+ }
+ },
};
/*
@@ -3188,8 +3199,18 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
break;
case 0x17:
+ if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
+ fam_type = &family_types[F17_M10H_CPUS];
+ pvt->ops = &family_types[F17_M10H_CPUS].ops;
+ break;
+ }
+ /* fall through */
+ case 0x18:
fam_type = &family_types[F17_CPUS];
pvt->ops = &family_types[F17_CPUS].ops;
+
+ if (pvt->fam == 0x18)
+ family_types[F17_CPUS].ctl_name = "F18h";
break;
default:
@@ -3428,6 +3449,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
{ X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 1d4b74e9a037..4242f8e39c18 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -115,6 +115,8 @@
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
#define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460
#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
/*
* Function 1 - Address Map
@@ -281,6 +283,7 @@ enum amd_families {
F16_CPUS,
F16_M30H_CPUS,
F17_CPUS,
+ F17_M10H_CPUS,
NUM_FAMILIES,
};
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 473aeec4b1da..49396bf6ad88 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -81,6 +81,18 @@ static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
(*num_dimm)++;
}
+static int get_dimm_smbios_index(u16 handle)
+{
+ struct mem_ctl_info *mci = ghes_pvt->mci;
+ int i;
+
+ for (i = 0; i < mci->tot_dimms; i++) {
+ if (mci->dimms[i]->smbios_handle == handle)
+ return i;
+ }
+ return -1;
+}
+
static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
{
struct ghes_edac_dimm_fill *dimm_fill = arg;
@@ -177,6 +189,8 @@ static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
entry->total_width, entry->data_width);
}
+ dimm->smbios_handle = entry->handle;
+
dimm_fill->count++;
}
}
@@ -327,12 +341,21 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) {
const char *bank = NULL, *device = NULL;
+ int index = -1;
+
dmi_memdev_name(mem_err->mem_dev_handle, &bank, &device);
if (bank != NULL && device != NULL)
p += sprintf(p, "DIMM location:%s %s ", bank, device);
else
p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
mem_err->mem_dev_handle);
+
+ index = get_dimm_smbios_index(mem_err->mem_dev_handle);
+ if (index >= 0) {
+ e->top_layer = index;
+ e->enable_per_layer_report = true;
+ }
+
}
if (p > e->location)
*(p - 1) = '\0';
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index d92d56cee101..299b441647cd 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -399,7 +399,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
if (nr_pages == 0)
continue;
- edac_dbg(0, "csrow %d, channel %d%s, size = %ld Mb\n", i, j,
+ edac_dbg(0, "csrow %d, channel %d%s, size = %ld MiB\n", i, j,
stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages));
dimm->nr_pages = nr_pages;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 8e120bf60624..9ef448fef12f 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -597,7 +597,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
/* DDR3 has 8 I/O banks */
size = (rows * cols * banks * ranks) >> (20 - 3);
- edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
+ edac_dbg(0, "\tdimm %d %d MiB offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
j, size,
RANKOFFSET(dimm_dod[j]),
banks, ranks, rows, cols);
@@ -1711,6 +1711,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
u32 errnum = find_first_bit(&error, 32);
if (uncorrected_error) {
+ core_err_cnt = 1;
if (ripv)
tp_event = HW_EVENT_ERR_FATAL;
else
@@ -1815,14 +1816,12 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
struct mce *mce = (struct mce *)data;
struct i7core_dev *i7_dev;
struct mem_ctl_info *mci;
- struct i7core_pvt *pvt;
i7_dev = get_i7core_dev(mce->socketid);
if (!i7_dev)
return NOTIFY_DONE;
mci = i7_dev->mci;
- pvt = mci->pvt_info;
/*
* Just let mcelog handle it if the error is
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 2ab4d61ee47e..c605089d899f 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1059,7 +1059,8 @@ static int __init mce_amd_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
- if (c->x86_vendor != X86_VENDOR_AMD)
+ if (c->x86_vendor != X86_VENDOR_AMD &&
+ c->x86_vendor != X86_VENDOR_HYGON)
return -ENODEV;
fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
@@ -1113,6 +1114,7 @@ static int __init mce_amd_init(void)
break;
case 0x17:
+ case 0x18:
xec_mask = 0x3f;
if (!boot_cpu_has(X86_FEATURE_SMCA)) {
printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index df28b65358d2..903a4f1fadcc 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -1541,7 +1541,7 @@ static struct dunit_ops dnv_ops = {
static const struct x86_cpu_id pnd2_cpuids[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X, 0, (kernel_ulong_t)&dnv_ops },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 07726fb00321..9353c3fc7c05 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -326,6 +326,7 @@ struct sbridge_info {
const struct interleave_pkg *interleave_pkg;
u8 max_sad;
u8 (*get_node_id)(struct sbridge_pvt *pvt);
+ u8 (*get_ha)(u8 bank);
enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr);
struct pci_dev *pci_vtd;
@@ -1002,6 +1003,39 @@ static u8 knl_get_node_id(struct sbridge_pvt *pvt)
return GET_BITFIELD(reg, 0, 2);
}
+/*
+ * Use the reporting bank number to determine which memory
+ * controller (also known as "ha" for "home agent"). Sandy
+ * Bridge only has one memory controller per socket, so the
+ * answer is always zero.
+ */
+static u8 sbridge_get_ha(u8 bank)
+{
+ return 0;
+}
+
+/*
+ * On Ivy Bridge, Haswell and Broadwell the error may be in a
+ * home agent bank (7, 8), or one of the per-channel memory
+ * controller banks (9 .. 16).
+ */
+static u8 ibridge_get_ha(u8 bank)
+{
+ switch (bank) {
+ case 7 ... 8:
+ return bank - 7;
+ case 9 ... 16:
+ return (bank - 9) / 4;
+ default:
+ return 0xff;
+ }
+}
+
+/* Not used, but included for safety/symmetry */
+static u8 knl_get_ha(u8 bank)
+{
+ return 0xff;
+}
static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
{
@@ -1622,7 +1656,7 @@ static int __populate_dimms(struct mem_ctl_info *mci,
size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
- edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
size, npages,
banks, ranks, rows, cols);
@@ -2207,6 +2241,60 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
return 0;
}
+static int get_memory_error_data_from_mce(struct mem_ctl_info *mci,
+ const struct mce *m, u8 *socket,
+ u8 *ha, long *channel_mask,
+ char *msg)
+{
+ u32 reg, channel = GET_BITFIELD(m->status, 0, 3);
+ struct mem_ctl_info *new_mci;
+ struct sbridge_pvt *pvt;
+ struct pci_dev *pci_ha;
+ bool tad0;
+
+ if (channel >= NUM_CHANNELS) {
+ sprintf(msg, "Invalid channel 0x%x", channel);
+ return -EINVAL;
+ }
+
+ pvt = mci->pvt_info;
+ if (!pvt->info.get_ha) {
+ sprintf(msg, "No get_ha()");
+ return -EINVAL;
+ }
+ *ha = pvt->info.get_ha(m->bank);
+ if (*ha != 0 && *ha != 1) {
+ sprintf(msg, "Impossible bank %d", m->bank);
+ return -EINVAL;
+ }
+
+ *socket = m->socketid;
+ new_mci = get_mci_for_node_id(*socket, *ha);
+ if (!new_mci) {
+ strcpy(msg, "mci socket got corrupted!");
+ return -EINVAL;
+ }
+
+ pvt = new_mci->pvt_info;
+ pci_ha = pvt->pci_ha;
+ pci_read_config_dword(pci_ha, tad_dram_rule[0], &reg);
+ tad0 = m->addr <= TAD_LIMIT(reg);
+
+ *channel_mask = 1 << channel;
+ if (pvt->mirror_mode == FULL_MIRRORING ||
+ (pvt->mirror_mode == ADDR_RANGE_MIRRORING && tad0)) {
+ *channel_mask |= 1 << ((channel + 2) % 4);
+ pvt->is_cur_addr_mirrored = true;
+ } else {
+ pvt->is_cur_addr_mirrored = false;
+ }
+
+ if (pvt->is_lockstep)
+ *channel_mask |= 1 << ((channel + 1) % 4);
+
+ return 0;
+}
+
/****************************************************************************
Device initialization routines: put/get, init/exit
****************************************************************************/
@@ -2877,10 +2965,16 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
u32 errcode = GET_BITFIELD(m->status, 0, 15);
u32 channel = GET_BITFIELD(m->status, 0, 3);
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+ /*
+ * Bits 5-0 of MCi_MISC give the least significant bit that is valid.
+ * A value 6 is for cache line aligned address, a value 12 is for page
+ * aligned address reported by patrol scrubber.
+ */
+ u32 lsb = GET_BITFIELD(m->misc, 0, 5);
long channel_mask, first_channel;
- u8 rank, socket, ha;
+ u8 rank = 0xff, socket, ha;
int rc, dimm;
- char *area_type = NULL;
+ char *area_type = "DRAM";
if (pvt->info.type != SANDY_BRIDGE)
recoverable = true;
@@ -2888,6 +2982,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
recoverable = GET_BITFIELD(m->status, 56, 56);
if (uncorrected_error) {
+ core_err_cnt = 1;
if (ripv) {
type = "FATAL";
tp_event = HW_EVENT_ERR_FATAL;
@@ -2911,35 +3006,27 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
* cccc = channel
* If the mask doesn't match, report an error to the parsing logic
*/
- if (! ((errcode & 0xef80) == 0x80)) {
- optype = "Can't parse: it is not a mem";
- } else {
- switch (optypenum) {
- case 0:
- optype = "generic undef request error";
- break;
- case 1:
- optype = "memory read error";
- break;
- case 2:
- optype = "memory write error";
- break;
- case 3:
- optype = "addr/cmd error";
- break;
- case 4:
- optype = "memory scrubbing error";
- break;
- default:
- optype = "reserved";
- break;
- }
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request error";
+ break;
+ case 1:
+ optype = "memory read error";
+ break;
+ case 2:
+ optype = "memory write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "memory scrubbing error";
+ break;
+ default:
+ optype = "reserved";
+ break;
}
- /* Only decode errors with an valid address (ADDRV) */
- if (!GET_BITFIELD(m->status, 58, 58))
- return;
-
if (pvt->info.type == KNIGHTS_LANDING) {
if (channel == 14) {
edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
@@ -2972,9 +3059,13 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
optype, msg);
}
return;
- } else {
+ } else if (lsb < 12) {
rc = get_memory_error_data(mci, m->addr, &socket, &ha,
- &channel_mask, &rank, &area_type, msg);
+ &channel_mask, &rank,
+ &area_type, msg);
+ } else {
+ rc = get_memory_error_data_from_mce(mci, m, &socket, &ha,
+ &channel_mask, msg);
}
if (rc < 0)
@@ -2989,14 +3080,15 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
- if (rank < 4)
+ if (rank == 0xff)
+ dimm = -1;
+ else if (rank < 4)
dimm = 0;
else if (rank < 8)
dimm = 1;
else
dimm = 2;
-
/*
* FIXME: On some memory configurations (mirror, lockstep), the
* Memory Controller can't point the error to a single DIMM. The
@@ -3045,17 +3137,11 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
{
struct mce *mce = (struct mce *)data;
struct mem_ctl_info *mci;
- struct sbridge_pvt *pvt;
char *type;
if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
return NOTIFY_DONE;
- mci = get_mci_for_node_id(mce->socketid, IMC0);
- if (!mci)
- return NOTIFY_DONE;
- pvt = mci->pvt_info;
-
/*
* Just let mcelog handle it if the error is
* outside the memory controller. A memory error
@@ -3065,6 +3151,22 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
if ((mce->status & 0xefff) >> 7 != 1)
return NOTIFY_DONE;
+ /* Check ADDRV bit in STATUS */
+ if (!GET_BITFIELD(mce->status, 58, 58))
+ return NOTIFY_DONE;
+
+ /* Check MISCV bit in STATUS */
+ if (!GET_BITFIELD(mce->status, 59, 59))
+ return NOTIFY_DONE;
+
+ /* Check address type in MISC (physical address only) */
+ if (GET_BITFIELD(mce->misc, 6, 8) != 2)
+ return NOTIFY_DONE;
+
+ mci = get_mci_for_node_id(mce->socketid, IMC0);
+ if (!mci)
+ return NOTIFY_DONE;
+
if (mce->mcgstatus & MCG_STATUS_MCIP)
type = "Exception";
else
@@ -3173,6 +3275,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = ibridge_dram_rule;
pvt->info.get_memory_type = get_memory_type;
pvt->info.get_node_id = get_node_id;
+ pvt->info.get_ha = ibridge_get_ha;
pvt->info.rir_limit = rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
@@ -3197,6 +3300,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = sbridge_dram_rule;
pvt->info.get_memory_type = get_memory_type;
pvt->info.get_node_id = get_node_id;
+ pvt->info.get_ha = sbridge_get_ha;
pvt->info.rir_limit = rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
@@ -3221,6 +3325,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = ibridge_dram_rule;
pvt->info.get_memory_type = haswell_get_memory_type;
pvt->info.get_node_id = haswell_get_node_id;
+ pvt->info.get_ha = ibridge_get_ha;
pvt->info.rir_limit = haswell_rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
@@ -3245,6 +3350,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = ibridge_dram_rule;
pvt->info.get_memory_type = haswell_get_memory_type;
pvt->info.get_node_id = haswell_get_node_id;
+ pvt->info.get_ha = ibridge_get_ha;
pvt->info.rir_limit = haswell_rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
@@ -3269,6 +3375,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = knl_dram_rule;
pvt->info.get_memory_type = knl_get_memory_type;
pvt->info.get_node_id = knl_get_node_id;
+ pvt->info.get_ha = knl_get_ha;
pvt->info.rir_limit = NULL;
pvt->info.sad_limit = knl_sad_limit;
pvt->info.interleave_mode = knl_interleave_mode;
@@ -3320,17 +3427,14 @@ fail0:
return rc;
}
-#define ICPU(model, table) \
- { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
-
static const struct x86_cpu_id sbridge_cpuids[] = {
- ICPU(INTEL_FAM6_SANDYBRIDGE_X, pci_dev_descr_sbridge_table),
- ICPU(INTEL_FAM6_IVYBRIDGE_X, pci_dev_descr_ibridge_table),
- ICPU(INTEL_FAM6_HASWELL_X, pci_dev_descr_haswell_table),
- ICPU(INTEL_FAM6_BROADWELL_X, pci_dev_descr_broadwell_table),
- ICPU(INTEL_FAM6_BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
- ICPU(INTEL_FAM6_XEON_PHI_KNL, pci_dev_descr_knl_table),
- ICPU(INTEL_FAM6_XEON_PHI_KNM, pci_dev_descr_knl_table),
+ INTEL_CPU_FAM6(SANDYBRIDGE_X, pci_dev_descr_sbridge_table),
+ INTEL_CPU_FAM6(IVYBRIDGE_X, pci_dev_descr_ibridge_table),
+ INTEL_CPU_FAM6(HASWELL_X, pci_dev_descr_haswell_table),
+ INTEL_CPU_FAM6(BROADWELL_X, pci_dev_descr_broadwell_table),
+ INTEL_CPU_FAM6(BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
+ INTEL_CPU_FAM6(XEON_PHI_KNL, pci_dev_descr_knl_table),
+ INTEL_CPU_FAM6(XEON_PHI_KNM, pci_dev_descr_knl_table),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index fae095162c01..dd209e0dd9ab 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -364,7 +364,7 @@ static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
- edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
imc->mc, chan, dimmno, size, npages,
banks, 1 << ranks, rows, cols);
@@ -424,7 +424,7 @@ unknown_size:
dimm->mtype = MEM_NVDIMM;
dimm->edac_mode = EDAC_SECDED; /* likely better than this */
- edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu Mb (%u pages)\n",
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu MiB (%u pages)\n",
imc->mc, chan, dimmno, size >> 20, dimm->nr_pages);
snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
@@ -668,7 +668,7 @@ sad_found:
break;
case 2:
lchan = (addr >> shift) % 2;
- lchan = (lchan << 1) | ~lchan;
+ lchan = (lchan << 1) | !lchan;
break;
case 3:
lchan = ((addr >> shift) % 2) << 1;
@@ -959,6 +959,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
recoverable = GET_BITFIELD(m->status, 56, 56);
if (uncorrected_error) {
+ core_err_cnt = 1;
if (ripv) {
type = "FATAL";
tp_event = HW_EVENT_ERR_FATAL;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index c009d94f40c5..34be60fe6892 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -1884,7 +1884,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
default:
dev_err(&l2c->pdev->dev, "Unsupported device: %04x\n",
l2c->pdev->device);
- return IRQ_NONE;
+ goto err_free;
}
while (CIRC_CNT(l2c->ring_head, l2c->ring_tail,
@@ -1906,7 +1906,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
l2c->ring_tail++;
}
- return IRQ_HANDLED;
+ ret = IRQ_HANDLED;
err_free:
kfree(other);
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 051327a951b1..35e784cffc23 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -337,9 +337,16 @@ static void deallocate_channel(struct fw_card *card, int irm_id,
/**
* fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth
+ * @card: card interface for this action
+ * @generation: bus generation
+ * @channels_mask: bitmask for channel allocation
+ * @channel: pointer for returning channel allocation result
+ * @bandwidth: pointer for returning bandwidth allocation result
+ * @allocate: whether to allocate (true) or deallocate (false)
*
* In parameters: card, generation, channels_mask, bandwidth, allocate
* Out parameters: channel, bandwidth
+ *
* This function blocks (sleeps) during communication with the IRM.
*
* Allocates or deallocates at most one channel out of channels_mask.
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 4372f9e4b0da..50bf1fe1775f 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -410,6 +410,14 @@ static void transaction_callback(struct fw_card *card, int rcode,
/**
* fw_run_transaction() - send request and sleep until transaction is completed
+ * @card: card interface for this request
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
*
* Returns the RCODE. See fw_send_request() for parameter documentation.
* Unlike fw_send_request(), @data points to the payload of the request or/and
@@ -604,6 +612,7 @@ EXPORT_SYMBOL(fw_core_add_address_handler);
/**
* fw_core_remove_address_handler() - unregister an address handler
+ * @handler: callback
*
* To be called in process context.
*
@@ -828,6 +837,7 @@ EXPORT_SYMBOL(fw_send_response);
/**
* fw_get_request_speed() - returns speed at which the @request was received
+ * @request: firewire request data
*/
int fw_get_request_speed(struct fw_request *request)
{
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index d8e159feb573..89110dfc7127 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -90,14 +90,17 @@ config EFI_ARMSTUB
config EFI_ARMSTUB_DTB_LOADER
bool "Enable the DTB loader"
depends on EFI_ARMSTUB
+ default y
help
Select this config option to add support for the dtb= command
line parameter, allowing a device tree blob to be loaded into
memory from the EFI System Partition by the stub.
- The device tree is typically provided by the platform or by
- the bootloader, so this option is mostly for development
- purposes only.
+ If the device tree is provided by the platform or by
+ the bootloader this option may not be needed.
+ But, for various development reasons and to maintain existing
+ functionality for bootloaders that do not have such support
+ this option is necessary.
config EFI_BOOTLOADER_CONTROL
tristate "EFI Bootloader Control"
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2a29dd9c986d..249eb70691b0 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -52,7 +52,8 @@ struct efi __read_mostly efi = {
.properties_table = EFI_INVALID_TABLE_ADDR,
.mem_attr_table = EFI_INVALID_TABLE_ADDR,
.rng_seed = EFI_INVALID_TABLE_ADDR,
- .tpm_log = EFI_INVALID_TABLE_ADDR
+ .tpm_log = EFI_INVALID_TABLE_ADDR,
+ .mem_reserve = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
@@ -484,6 +485,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
{LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
+ {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &efi.mem_reserve},
{NULL_GUID, NULL, NULL},
};
@@ -591,6 +593,29 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
early_memunmap(tbl, sizeof(*tbl));
}
+ if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
+ unsigned long prsv = efi.mem_reserve;
+
+ while (prsv) {
+ struct linux_efi_memreserve *rsv;
+
+ /* reserve the entry itself */
+ memblock_reserve(prsv, sizeof(*rsv));
+
+ rsv = early_memremap(prsv, sizeof(*rsv));
+ if (rsv == NULL) {
+ pr_err("Could not map UEFI memreserve entry!\n");
+ return -ENOMEM;
+ }
+
+ if (rsv->size)
+ memblock_reserve(rsv->base, rsv->size);
+
+ prsv = rsv->next;
+ early_memunmap(rsv, sizeof(*rsv));
+ }
+ }
+
return 0;
}
@@ -937,6 +962,38 @@ bool efi_is_table_address(unsigned long phys_addr)
return false;
}
+static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
+
+int efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+{
+ struct linux_efi_memreserve *rsv, *parent;
+
+ if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ rsv = kmalloc(sizeof(*rsv), GFP_KERNEL);
+ if (!rsv)
+ return -ENOMEM;
+
+ parent = memremap(efi.mem_reserve, sizeof(*rsv), MEMREMAP_WB);
+ if (!parent) {
+ kfree(rsv);
+ return -ENOMEM;
+ }
+
+ rsv->base = addr;
+ rsv->size = size;
+
+ spin_lock(&efi_mem_reserve_persistent_lock);
+ rsv->next = parent->next;
+ parent->next = __pa(rsv);
+ spin_unlock(&efi_mem_reserve_persistent_lock);
+
+ memunmap(parent);
+
+ return 0;
+}
+
#ifdef CONFIG_KEXEC
static int update_efi_random_seed(struct notifier_block *nb,
unsigned long code, void *unused)
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 14c40a7750d1..c51627660dbb 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -16,7 +16,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \
$(DISABLE_STACKLEAK_PLUGIN)
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
- -fno-builtin -fpic -mno-single-pic-base
+ -fno-builtin -fpic \
+ $(call cc-option,-mno-single-pic-base)
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 6920033de6d4..30ac0c975f8a 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -69,6 +69,31 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
return si;
}
+void install_memreserve_table(efi_system_table_t *sys_table_arg)
+{
+ struct linux_efi_memreserve *rsv;
+ efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
+ efi_status_t status;
+
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
+ (void **)&rsv);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n");
+ return;
+ }
+
+ rsv->next = 0;
+ rsv->base = 0;
+ rsv->size = 0;
+
+ status = efi_call_early(install_configuration_table,
+ &memreserve_table_guid,
+ rsv);
+ if (status != EFI_SUCCESS)
+ pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n");
+}
+
+
/*
* This function handles the architcture specific differences between arm and
* arm64 regarding where the kernel image must be loaded and any memory that
@@ -235,6 +260,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
}
}
+ install_memreserve_table(sys_table);
+
new_fdt_addr = fdt_addr;
status = allocate_new_fdt_and_exit_boot(sys_table, handle,
&new_fdt_addr, efi_get_max_fdt_addr(dram_base),
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index aa66cbf23512..a19d845bdb06 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -45,39 +45,7 @@
#define __efi_call_virt(f, args...) \
__efi_call_virt_pointer(efi.systab->runtime, f, args)
-/* efi_runtime_service() function identifiers */
-enum efi_rts_ids {
- GET_TIME,
- SET_TIME,
- GET_WAKEUP_TIME,
- SET_WAKEUP_TIME,
- GET_VARIABLE,
- GET_NEXT_VARIABLE,
- SET_VARIABLE,
- QUERY_VARIABLE_INFO,
- GET_NEXT_HIGH_MONO_COUNT,
- UPDATE_CAPSULE,
- QUERY_CAPSULE_CAPS,
-};
-
-/*
- * efi_runtime_work: Details of EFI Runtime Service work
- * @arg<1-5>: EFI Runtime Service function arguments
- * @status: Status of executing EFI Runtime Service
- * @efi_rts_id: EFI Runtime Service function identifier
- * @efi_rts_comp: Struct used for handling completions
- */
-struct efi_runtime_work {
- void *arg1;
- void *arg2;
- void *arg3;
- void *arg4;
- void *arg5;
- efi_status_t status;
- struct work_struct work;
- enum efi_rts_ids efi_rts_id;
- struct completion efi_rts_comp;
-};
+struct efi_runtime_work efi_rts_work;
/*
* efi_queue_work: Queue efi_runtime_service() and wait until it's done
@@ -91,9 +59,13 @@ struct efi_runtime_work {
*/
#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5) \
({ \
- struct efi_runtime_work efi_rts_work; \
efi_rts_work.status = EFI_ABORTED; \
\
+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) { \
+ pr_warn_once("EFI Runtime Services are disabled!\n"); \
+ goto exit; \
+ } \
+ \
init_completion(&efi_rts_work.efi_rts_comp); \
INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \
efi_rts_work.arg1 = _arg1; \
@@ -112,6 +84,8 @@ struct efi_runtime_work {
else \
pr_err("Failed to queue work to efi_rts_wq.\n"); \
\
+exit: \
+ efi_rts_work.efi_rts_id = NONE; \
efi_rts_work.status; \
})
@@ -184,18 +158,16 @@ static DEFINE_SEMAPHORE(efi_runtime_lock);
*/
static void efi_call_rts(struct work_struct *work)
{
- struct efi_runtime_work *efi_rts_work;
void *arg1, *arg2, *arg3, *arg4, *arg5;
efi_status_t status = EFI_NOT_FOUND;
- efi_rts_work = container_of(work, struct efi_runtime_work, work);
- arg1 = efi_rts_work->arg1;
- arg2 = efi_rts_work->arg2;
- arg3 = efi_rts_work->arg3;
- arg4 = efi_rts_work->arg4;
- arg5 = efi_rts_work->arg5;
+ arg1 = efi_rts_work.arg1;
+ arg2 = efi_rts_work.arg2;
+ arg3 = efi_rts_work.arg3;
+ arg4 = efi_rts_work.arg4;
+ arg5 = efi_rts_work.arg5;
- switch (efi_rts_work->efi_rts_id) {
+ switch (efi_rts_work.efi_rts_id) {
case GET_TIME:
status = efi_call_virt(get_time, (efi_time_t *)arg1,
(efi_time_cap_t *)arg2);
@@ -253,8 +225,8 @@ static void efi_call_rts(struct work_struct *work)
*/
pr_err("Requested executing invalid EFI Runtime Service.\n");
}
- efi_rts_work->status = status;
- complete(&efi_rts_work->efi_rts_comp);
+ efi_rts_work.status = status;
+ complete(&efi_rts_work.efi_rts_comp);
}
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
@@ -428,6 +400,7 @@ static void virt_efi_reset_system(int reset_type,
"could not get exclusive access to the firmware\n");
return;
}
+ efi_rts_work.efi_rts_id = RESET_SYSTEM;
__efi_call_virt(reset_system, reset_type, status, data_size, data);
up(&efi_runtime_lock);
}
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 41c48a1e8baa..769640940c9f 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -542,6 +542,30 @@ static long efi_runtime_get_nexthighmonocount(unsigned long arg)
return 0;
}
+static long efi_runtime_reset_system(unsigned long arg)
+{
+ struct efi_resetsystem __user *resetsystem_user;
+ struct efi_resetsystem resetsystem;
+ void *data = NULL;
+
+ resetsystem_user = (struct efi_resetsystem __user *)arg;
+ if (copy_from_user(&resetsystem, resetsystem_user,
+ sizeof(resetsystem)))
+ return -EFAULT;
+ if (resetsystem.data_size != 0) {
+ data = memdup_user((void *)resetsystem.data,
+ resetsystem.data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
+
+ efi.reset_system(resetsystem.reset_type, resetsystem.status,
+ resetsystem.data_size, (efi_char16_t *)data);
+
+ kfree(data);
+ return 0;
+}
+
static long efi_runtime_query_variableinfo(unsigned long arg)
{
struct efi_queryvariableinfo __user *queryvariableinfo_user;
@@ -682,6 +706,9 @@ static long efi_test_ioctl(struct file *file, unsigned int cmd,
case EFI_RUNTIME_QUERY_CAPSULECAPABILITIES:
return efi_runtime_query_capsulecaps(arg);
+
+ case EFI_RUNTIME_RESET_SYSTEM:
+ return efi_runtime_reset_system(arg);
}
return -ENOTTY;
diff --git a/drivers/firmware/efi/test/efi_test.h b/drivers/firmware/efi/test/efi_test.h
index 9812c6a02b40..5f4818bf112f 100644
--- a/drivers/firmware/efi/test/efi_test.h
+++ b/drivers/firmware/efi/test/efi_test.h
@@ -81,6 +81,13 @@ struct efi_querycapsulecapabilities {
efi_status_t *status;
} __packed;
+struct efi_resetsystem {
+ int reset_type;
+ efi_status_t status;
+ unsigned long data_size;
+ efi_char16_t *data;
+} __packed;
+
#define EFI_RUNTIME_GET_VARIABLE \
_IOWR('p', 0x01, struct efi_getvariable)
#define EFI_RUNTIME_SET_VARIABLE \
@@ -108,4 +115,7 @@ struct efi_querycapsulecapabilities {
#define EFI_RUNTIME_QUERY_CAPSULECAPABILITIES \
_IOR('p', 0x0A, struct efi_querycapsulecapabilities)
+#define EFI_RUNTIME_RESET_SYSTEM \
+ _IOW('p', 0x0B, struct efi_resetsystem)
+
#endif /* _DRIVERS_FIRMWARE_EFI_TEST_H_ */
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 0b7e19c27c6d..51a5ac2293a7 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -14,6 +14,7 @@
*/
#include <linux/module.h>
+#include <linux/fpga/fpga-mgr.h>
#include <linux/fpga/fpga-region.h>
#include "dfl-fme-pr.h"
@@ -66,9 +67,10 @@ eprobe_mgr_put:
static int fme_region_remove(struct platform_device *pdev)
{
struct fpga_region *region = dev_get_drvdata(&pdev->dev);
+ struct fpga_manager *mgr = region->mgr;
fpga_region_unregister(region);
- fpga_mgr_put(region->mgr);
+ fpga_mgr_put(mgr);
return 0;
}
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 24b8f98b73ec..c983dac97501 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -125,7 +125,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
*
* Given a device, get an exclusive reference to a fpga bridge.
*
- * Return: fpga manager struct or IS_ERR() condition containing error code.
+ * Return: fpga bridge struct or IS_ERR() condition containing error code.
*/
struct fpga_bridge *fpga_bridge_get(struct device *dev,
struct fpga_image_info *info)
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index 35fabb8083fb..052a1342ab7e 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -437,9 +437,10 @@ eprobe_mgr_put:
static int of_fpga_region_remove(struct platform_device *pdev)
{
struct fpga_region *region = platform_get_drvdata(pdev);
+ struct fpga_manager *mgr = region->mgr;
fpga_region_unregister(region);
- fpga_mgr_put(region->mgr);
+ fpga_mgr_put(mgr);
return 0;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 4f52c3a8ec99..833a1b51c948 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -200,6 +200,7 @@ config GPIO_EP93XX
def_bool y
depends on ARCH_EP93XX
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
config GPIO_EXAR
tristate "Support for GPIO pins on XR17V352/354/358"
@@ -267,17 +268,6 @@ config GPIO_ICH
If unsure, say N.
-config GPIO_INGENIC
- tristate "Ingenic JZ47xx SoCs GPIO support"
- depends on OF
- depends on MACH_INGENIC || COMPILE_TEST
- select GPIOLIB_IRQCHIP
- help
- Say yes here to support the GPIO functionality present on the
- JZ4740 and JZ4780 SoCs from Ingenic.
-
- If unsure, say N.
-
config GPIO_IOP
tristate "Intel IOP GPIO"
depends on ARCH_IOP32X || ARCH_IOP33X || COMPILE_TEST
@@ -439,6 +429,24 @@ config GPIO_REG
A 32-bit single register GPIO fixed in/out implementation. This
can be used to represent any register as a set of GPIO signals.
+config GPIO_SIOX
+ tristate "SIOX GPIO support"
+ depends on SIOX
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support SIOX I/O devices. These are units connected
+ via a SIOX bus and have a number of fixed-direction I/O lines.
+
+config GPIO_SNPS_CREG
+ bool "Synopsys GPIO via CREG (Control REGisters) driver"
+ depends on ARC || COMPILE_TEST
+ depends on OF_GPIO
+ help
+ This driver supports GPIOs via CREG on various Synopsys SoCs.
+ This is a single-register MMIO GPIO driver for complex cases
+ where only several fields in register belong to GPIO lines and
+ each GPIO line owns a field with different length and on/off value.
+
config GPIO_SPEAR_SPICS
bool "ST SPEAr13xx SPI Chip Select as GPIO support"
depends on PLAT_SPEAR
@@ -480,6 +488,7 @@ config GPIO_SYSCON
config GPIO_TB10X
bool
+ select GPIO_GENERIC
select GENERIC_IRQ_CHIP
select OF_GPIO
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index c256aff66a65..671c4477c951 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -3,8 +3,8 @@
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
-obj-$(CONFIG_GPIOLIB) += devres.o
obj-$(CONFIG_GPIOLIB) += gpiolib.o
+obj-$(CONFIG_GPIOLIB) += gpiolib-devres.o
obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o
obj-$(CONFIG_GPIOLIB) += gpiolib-devprop.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
@@ -57,7 +57,6 @@ obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
-obj-$(CONFIG_GPIO_INGENIC) += gpio-ingenic.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
@@ -111,6 +110,7 @@ obj-$(CONFIG_GPIO_REG) += gpio-reg.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
+obj-$(CONFIG_GPIO_SNPS_CREG) += gpio-creg-snps.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
obj-$(CONFIG_GPIO_SPRD) += gpio-sprd.o
@@ -125,6 +125,7 @@ obj-$(CONFIG_GPIO_TEGRA186) += gpio-tegra186.o
obj-$(CONFIG_GPIO_THUNDERX) += gpio-thunderx.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
+obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
obj-$(CONFIG_GPIO_TPIC2810) += gpio-tpic2810.o
obj-$(CONFIG_GPIO_TPS65086) += gpio-tps65086.o
obj-$(CONFIG_GPIO_TPS65218) += gpio-tps65218.o
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 21452622d954..e321955782a1 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -172,7 +172,7 @@ static struct platform_driver adp5520_gpio_driver = {
module_platform_driver(adp5520_gpio_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("GPIO ADP5520 Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:adp5520-gpio");
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index da9781a2ef4a..cc33d8986ad3 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -494,6 +494,6 @@ static struct i2c_driver adp5588_gpio_driver = {
module_i2c_driver(adp5588_gpio_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("GPIO ADP5588 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index d0707fc23afd..c5536a509b59 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -373,6 +373,7 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
val = readl(reg_base + GPIO_INT_MASK(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_MASK(bank_id));
+ gpiochip_disable_irq(&kona_gpio->gpio_chip, gpio);
raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
@@ -394,6 +395,7 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_MSKCLR(bank_id));
+ gpiochip_enable_irq(&kona_gpio->gpio_chip, gpio);
raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
@@ -485,23 +487,15 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
- int ret;
- ret = gpiochip_lock_as_irq(&kona_gpio->gpio_chip, d->hwirq);
- if (ret) {
- dev_err(kona_gpio->gpio_chip.parent,
- "unable to lock HW IRQ %lu for IRQ\n",
- d->hwirq);
- return ret;
- }
- return 0;
+ return gpiochip_reqres_irq(&kona_gpio->gpio_chip, d->hwirq);
}
static void bcm_kona_gpio_irq_relres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
- gpiochip_unlock_as_irq(&kona_gpio->gpio_chip, d->hwirq);
+ gpiochip_relres_irq(&kona_gpio->gpio_chip, d->hwirq);
}
static struct irq_chip bcm_gpio_irq_chip = {
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 16c7f9f49416..af936dcca659 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -664,6 +664,18 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
struct brcmstb_gpio_bank *bank;
struct gpio_chip *gc;
+ /*
+ * If bank_width is 0, then there is an empty bank in the
+ * register block. Special handling for this case.
+ */
+ if (bank_width == 0) {
+ dev_dbg(dev, "Width 0 found: Empty bank @ %d\n",
+ num_banks);
+ num_banks++;
+ gpio_base += MAX_GPIO_PER_BANK;
+ continue;
+ }
+
bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL);
if (!bank) {
err = -ENOMEM;
@@ -740,9 +752,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
goto fail;
}
- dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
- num_banks, priv->gpio_base, gpio_base - 1);
-
if (priv->parent_wake_irq && need_wakeup_event)
pm_wakeup_event(dev, 0);
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
new file mode 100644
index 000000000000..8cbc94d0d424
--- /dev/null
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Synopsys CREG (Control REGisters) GPIO driver
+//
+// Copyright (C) 2018 Synopsys
+// Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#define MAX_GPIO 32
+
+struct creg_layout {
+ u8 ngpio;
+ u8 shift[MAX_GPIO];
+ u8 on[MAX_GPIO];
+ u8 off[MAX_GPIO];
+ u8 bit_per_gpio[MAX_GPIO];
+};
+
+struct creg_gpio {
+ struct gpio_chip gc;
+ void __iomem *regs;
+ spinlock_t lock;
+ const struct creg_layout *layout;
+};
+
+static void creg_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+{
+ struct creg_gpio *hcg = gpiochip_get_data(gc);
+ const struct creg_layout *layout = hcg->layout;
+ u32 reg, reg_shift, value;
+ unsigned long flags;
+ int i;
+
+ value = val ? hcg->layout->on[offset] : hcg->layout->off[offset];
+
+ reg_shift = layout->shift[offset];
+ for (i = 0; i < offset; i++)
+ reg_shift += layout->bit_per_gpio[i] + layout->shift[i];
+
+ spin_lock_irqsave(&hcg->lock, flags);
+ reg = readl(hcg->regs);
+ reg &= ~(GENMASK(layout->bit_per_gpio[i] - 1, 0) << reg_shift);
+ reg |= (value << reg_shift);
+ writel(reg, hcg->regs);
+ spin_unlock_irqrestore(&hcg->lock, flags);
+}
+
+static int creg_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
+{
+ creg_gpio_set(gc, offset, val);
+
+ return 0;
+}
+
+static int creg_gpio_validate_pg(struct device *dev, struct creg_gpio *hcg,
+ int i)
+{
+ const struct creg_layout *layout = hcg->layout;
+
+ if (layout->bit_per_gpio[i] < 1 || layout->bit_per_gpio[i] > 8)
+ return -EINVAL;
+
+ /* Check that on valiue fits it's placeholder */
+ if (GENMASK(31, layout->bit_per_gpio[i]) & layout->on[i])
+ return -EINVAL;
+
+ /* Check that off valiue fits it's placeholder */
+ if (GENMASK(31, layout->bit_per_gpio[i]) & layout->off[i])
+ return -EINVAL;
+
+ if (layout->on[i] == layout->off[i])
+ return -EINVAL;
+
+ return 0;
+}
+
+static int creg_gpio_validate(struct device *dev, struct creg_gpio *hcg,
+ u32 ngpios)
+{
+ u32 reg_len = 0;
+ int i;
+
+ if (hcg->layout->ngpio < 1 || hcg->layout->ngpio > MAX_GPIO)
+ return -EINVAL;
+
+ if (ngpios < 1 || ngpios > hcg->layout->ngpio) {
+ dev_err(dev, "ngpios must be in [1:%u]\n", hcg->layout->ngpio);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hcg->layout->ngpio; i++) {
+ if (creg_gpio_validate_pg(dev, hcg, i))
+ return -EINVAL;
+
+ reg_len += hcg->layout->shift[i] + hcg->layout->bit_per_gpio[i];
+ }
+
+ /* Check that we fit in 32 bit register */
+ if (reg_len > 32)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct creg_layout hsdk_cs_ctl = {
+ .ngpio = 10,
+ .shift = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ .off = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 },
+ .on = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+ .bit_per_gpio = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }
+};
+
+static const struct creg_layout axs10x_flsh_cs_ctl = {
+ .ngpio = 1,
+ .shift = { 0 },
+ .off = { 1 },
+ .on = { 3 },
+ .bit_per_gpio = { 2 }
+};
+
+static const struct of_device_id creg_gpio_ids[] = {
+ {
+ .compatible = "snps,creg-gpio-axs10x",
+ .data = &axs10x_flsh_cs_ctl
+ }, {
+ .compatible = "snps,creg-gpio-hsdk",
+ .data = &hsdk_cs_ctl
+ }, { /* sentinel */ }
+};
+
+static int creg_gpio_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct creg_gpio *hcg;
+ struct resource *mem;
+ u32 ngpios;
+ int ret;
+
+ hcg = devm_kzalloc(dev, sizeof(struct creg_gpio), GFP_KERNEL);
+ if (!hcg)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hcg->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(hcg->regs))
+ return PTR_ERR(hcg->regs);
+
+ match = of_match_node(creg_gpio_ids, pdev->dev.of_node);
+ hcg->layout = match->data;
+ if (!hcg->layout)
+ return -EINVAL;
+
+ ret = of_property_read_u32(dev->of_node, "ngpios", &ngpios);
+ if (ret)
+ return ret;
+
+ ret = creg_gpio_validate(dev, hcg, ngpios);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&hcg->lock);
+
+ hcg->gc.label = dev_name(dev);
+ hcg->gc.base = -1;
+ hcg->gc.ngpio = ngpios;
+ hcg->gc.set = creg_gpio_set;
+ hcg->gc.direction_output = creg_gpio_dir_out;
+ hcg->gc.of_node = dev->of_node;
+
+ ret = devm_gpiochip_add_data(dev, &hcg->gc, hcg);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "GPIO controller with %d gpios probed\n", ngpios);
+
+ return 0;
+}
+
+static struct platform_driver creg_gpio_snps_driver = {
+ .driver = {
+ .name = "snps-creg-gpio",
+ .of_match_table = creg_gpio_ids,
+ },
+ .probe = creg_gpio_probe,
+};
+builtin_platform_driver(creg_gpio_snps_driver);
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index a5ece8ea79bc..5c1564fcc24e 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -9,6 +9,7 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
+
#include <linux/gpio/driver.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -24,6 +25,12 @@
#include <linux/platform_device.h>
#include <linux/platform_data/gpio-davinci.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/spinlock.h>
+
+#include <asm-generic/gpio.h>
+
+#define MAX_REGS_BANKS 5
+#define MAX_INT_PER_BANK 32
struct davinci_gpio_regs {
u32 dir;
@@ -41,11 +48,31 @@ struct davinci_gpio_regs {
typedef struct irq_chip *(*gpio_get_irq_chip_cb_t)(unsigned int irq);
#define BINTEN 0x8 /* GPIO Interrupt Per-Bank Enable Register */
-#define MAX_LABEL_SIZE 20
static void __iomem *gpio_base;
static unsigned int offset_array[5] = {0x10, 0x38, 0x60, 0x88, 0xb0};
+struct davinci_gpio_irq_data {
+ void __iomem *regs;
+ struct davinci_gpio_controller *chip;
+ int bank_num;
+};
+
+struct davinci_gpio_controller {
+ struct gpio_chip chip;
+ struct irq_domain *irq_domain;
+ /* Serialize access to GPIO registers */
+ spinlock_t lock;
+ void __iomem *regs[MAX_REGS_BANKS];
+ int gpio_unbanked;
+ int irqs[MAX_INT_PER_BANK];
+};
+
+static inline u32 __gpio_mask(unsigned gpio)
+{
+ return 1 << (gpio % 32);
+}
+
static inline struct davinci_gpio_regs __iomem *irq2regs(struct irq_data *d)
{
struct davinci_gpio_regs __iomem *g;
@@ -166,14 +193,12 @@ of_err:
static int davinci_gpio_probe(struct platform_device *pdev)
{
- static int ctrl_num, bank_base;
- int gpio, bank, i, ret = 0;
+ int bank, i, ret = 0;
unsigned int ngpio, nbank, nirq;
struct davinci_gpio_controller *chips;
struct davinci_gpio_platform_data *pdata;
struct device *dev = &pdev->dev;
struct resource *res;
- char label[MAX_LABEL_SIZE];
pdata = davinci_gpio_get_pdata(pdev);
if (!pdata) {
@@ -207,10 +232,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
else
nirq = DIV_ROUND_UP(ngpio, 16);
- nbank = DIV_ROUND_UP(ngpio, 32);
- chips = devm_kcalloc(dev,
- nbank, sizeof(struct davinci_gpio_controller),
- GFP_KERNEL);
+ chips = devm_kzalloc(dev, sizeof(*chips), GFP_KERNEL);
if (!chips)
return -ENOMEM;
@@ -228,10 +250,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
}
}
- snprintf(label, MAX_LABEL_SIZE, "davinci_gpio.%d", ctrl_num++);
- chips->chip.label = devm_kstrdup(dev, label, GFP_KERNEL);
- if (!chips->chip.label)
- return -ENOMEM;
+ chips->chip.label = dev_name(dev);
chips->chip.direction_input = davinci_direction_in;
chips->chip.get = davinci_gpio_get;
@@ -239,7 +258,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.set = davinci_gpio_set;
chips->chip.ngpio = ngpio;
- chips->chip.base = bank_base;
+ chips->chip.base = -1;
#ifdef CONFIG_OF_GPIO
chips->chip.of_gpio_n_cells = 2;
@@ -252,28 +271,21 @@ static int davinci_gpio_probe(struct platform_device *pdev)
}
#endif
spin_lock_init(&chips->lock);
- bank_base += ngpio;
- for (gpio = 0, bank = 0; gpio < ngpio; gpio += 32, bank++)
+ nbank = DIV_ROUND_UP(ngpio, 32);
+ for (bank = 0; bank < nbank; bank++)
chips->regs[bank] = gpio_base + offset_array[bank];
ret = devm_gpiochip_add_data(dev, &chips->chip, chips);
if (ret)
- goto err;
+ return ret;
platform_set_drvdata(pdev, chips);
ret = davinci_gpio_irq_setup(pdev);
if (ret)
- goto err;
+ return ret;
return 0;
-
-err:
- /* Revert the static variable increments */
- ctrl_num--;
- bank_base -= ngpio;
-
- return ret;
}
/*--------------------------------------------------------------------------*/
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 45d384039e9b..71728d6e0bca 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic EP93xx GPIO handling
*
@@ -6,10 +7,6 @@
*
* Based on code originally from:
* linux/arch/arm/mach-ep93xx/core.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/init.h>
@@ -19,16 +16,26 @@
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/gpio/driver.h>
-/* FIXME: this is here for gpio_to_irq() - get rid of this! */
-#include <linux/gpio.h>
+#include <linux/bitops.h>
+
+#define EP93XX_GPIO_F_INT_STATUS 0x5c
+#define EP93XX_GPIO_A_INT_STATUS 0xa0
+#define EP93XX_GPIO_B_INT_STATUS 0xbc
+
+/* Maximum value for gpio line identifiers */
+#define EP93XX_GPIO_LINE_MAX 63
-#include <mach/hardware.h>
-#include <mach/gpio-ep93xx.h>
+/* Maximum value for irq capable line identifiers */
+#define EP93XX_GPIO_LINE_MAX_IRQ 23
-#define irq_to_gpio(irq) ((irq) - gpio_to_irq(0))
+/*
+ * Static mapping of GPIO bank F IRQS:
+ * F0..F7 (16..24) to irq 80..87.
+ */
+#define EP93XX_GPIO_F_IRQ_BASE 80
struct ep93xx_gpio {
- void __iomem *mmio_base;
+ void __iomem *base;
struct gpio_chip gc[8];
};
@@ -48,27 +55,45 @@ static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 };
static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 };
static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 };
-static void ep93xx_gpio_update_int_params(unsigned port)
+static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg, unsigned port)
{
BUG_ON(port > 2);
- writeb_relaxed(0, EP93XX_GPIO_REG(int_en_register_offset[port]));
+ writeb_relaxed(0, epg->base + int_en_register_offset[port]);
writeb_relaxed(gpio_int_type2[port],
- EP93XX_GPIO_REG(int_type2_register_offset[port]));
+ epg->base + int_type2_register_offset[port]);
writeb_relaxed(gpio_int_type1[port],
- EP93XX_GPIO_REG(int_type1_register_offset[port]));
+ epg->base + int_type1_register_offset[port]);
writeb(gpio_int_unmasked[port] & gpio_int_enabled[port],
- EP93XX_GPIO_REG(int_en_register_offset[port]));
+ epg->base + int_en_register_offset[port]);
+}
+
+static int ep93xx_gpio_port(struct gpio_chip *gc)
+{
+ struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ int port = 0;
+
+ while (port < ARRAY_SIZE(epg->gc) && gc != &epg->gc[port])
+ port++;
+
+ /* This should not happen but is there as a last safeguard */
+ if (port == ARRAY_SIZE(epg->gc)) {
+ pr_crit("can't find the GPIO port\n");
+ return 0;
+ }
+
+ return port;
}
-static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable)
+static void ep93xx_gpio_int_debounce(struct gpio_chip *gc,
+ unsigned int offset, bool enable)
{
- int line = irq_to_gpio(irq);
- int port = line >> 3;
- int port_mask = 1 << (line & 7);
+ struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ int port = ep93xx_gpio_port(gc);
+ int port_mask = BIT(offset);
if (enable)
gpio_int_debounce[port] |= port_mask;
@@ -76,29 +101,36 @@ static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable)
gpio_int_debounce[port] &= ~port_mask;
writeb(gpio_int_debounce[port],
- EP93XX_GPIO_REG(int_debounce_register_offset[port]));
+ epg->base + int_debounce_register_offset[port]);
}
static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
{
- unsigned char status;
- int i;
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned long stat;
+ int offset;
- status = readb(EP93XX_GPIO_A_INT_STATUS);
- for (i = 0; i < 8; i++) {
- if (status & (1 << i)) {
- int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_A(0)) + i;
- generic_handle_irq(gpio_irq);
- }
- }
+ chained_irq_enter(irqchip, desc);
- status = readb(EP93XX_GPIO_B_INT_STATUS);
- for (i = 0; i < 8; i++) {
- if (status & (1 << i)) {
- int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_B(0)) + i;
- generic_handle_irq(gpio_irq);
- }
- }
+ /*
+ * Dispatch the IRQs to the irqdomain of each A and B
+ * gpiochip irqdomains depending on what has fired.
+ * The tricky part is that the IRQ line is shared
+ * between bank A and B and each has their own gpiochip.
+ */
+ stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS);
+ for_each_set_bit(offset, &stat, 8)
+ generic_handle_irq(irq_find_mapping(epg->gc[0].irq.domain,
+ offset));
+
+ stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS);
+ for_each_set_bit(offset, &stat, 8)
+ generic_handle_irq(irq_find_mapping(epg->gc[1].irq.domain,
+ offset));
+
+ chained_irq_exit(irqchip, desc);
}
static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
@@ -106,60 +138,67 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
/*
* map discontiguous hw irq range to continuous sw irq range:
*
- * IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7})
+ * IRQ_EP93XX_GPIO{0..7}MUX -> EP93XX_GPIO_LINE_F{0..7}
*/
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned int irq = irq_desc_get_irq(desc);
int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
- int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx;
+ int gpio_irq = EP93XX_GPIO_F_IRQ_BASE + port_f_idx;
+ chained_irq_enter(irqchip, desc);
generic_handle_irq(gpio_irq);
+ chained_irq_exit(irqchip, desc);
}
static void ep93xx_gpio_irq_ack(struct irq_data *d)
{
- int line = irq_to_gpio(d->irq);
- int port = line >> 3;
- int port_mask = 1 << (line & 7);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ int port = ep93xx_gpio_port(gc);
+ int port_mask = BIT(d->irq & 7);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
gpio_int_type2[port] ^= port_mask; /* switch edge direction */
- ep93xx_gpio_update_int_params(port);
+ ep93xx_gpio_update_int_params(epg, port);
}
- writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
+ writeb(port_mask, epg->base + eoi_register_offset[port]);
}
static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
{
- int line = irq_to_gpio(d->irq);
- int port = line >> 3;
- int port_mask = 1 << (line & 7);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ int port = ep93xx_gpio_port(gc);
+ int port_mask = BIT(d->irq & 7);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
gpio_int_type2[port] ^= port_mask; /* switch edge direction */
gpio_int_unmasked[port] &= ~port_mask;
- ep93xx_gpio_update_int_params(port);
+ ep93xx_gpio_update_int_params(epg, port);
- writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
+ writeb(port_mask, epg->base + eoi_register_offset[port]);
}
static void ep93xx_gpio_irq_mask(struct irq_data *d)
{
- int line = irq_to_gpio(d->irq);
- int port = line >> 3;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ int port = ep93xx_gpio_port(gc);
- gpio_int_unmasked[port] &= ~(1 << (line & 7));
- ep93xx_gpio_update_int_params(port);
+ gpio_int_unmasked[port] &= ~BIT(d->irq & 7);
+ ep93xx_gpio_update_int_params(epg, port);
}
static void ep93xx_gpio_irq_unmask(struct irq_data *d)
{
- int line = irq_to_gpio(d->irq);
- int port = line >> 3;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ int port = ep93xx_gpio_port(gc);
- gpio_int_unmasked[port] |= 1 << (line & 7);
- ep93xx_gpio_update_int_params(port);
+ gpio_int_unmasked[port] |= BIT(d->irq & 7);
+ ep93xx_gpio_update_int_params(epg, port);
}
/*
@@ -169,12 +208,14 @@ static void ep93xx_gpio_irq_unmask(struct irq_data *d)
*/
static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
{
- const int gpio = irq_to_gpio(d->irq);
- const int port = gpio >> 3;
- const int port_mask = 1 << (gpio & 7);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio *epg = gpiochip_get_data(gc);
+ int port = ep93xx_gpio_port(gc);
+ int offset = d->irq & 7;
+ int port_mask = BIT(offset);
irq_flow_handler_t handler;
- gpio_direction_input(gpio);
+ gc->direction_input(gc, offset);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
@@ -200,7 +241,7 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_EDGE_BOTH:
gpio_int_type1[port] |= port_mask;
/* set initial polarity based on current input level */
- if (gpio_get_value(gpio))
+ if (gc->get(gc, offset))
gpio_int_type2[port] &= ~port_mask; /* falling */
else
gpio_int_type2[port] |= port_mask; /* rising */
@@ -214,7 +255,7 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
gpio_int_enabled[port] |= port_mask;
- ep93xx_gpio_update_int_params(port);
+ ep93xx_gpio_update_int_params(epg, port);
return 0;
}
@@ -228,35 +269,53 @@ static struct irq_chip ep93xx_gpio_irq_chip = {
.irq_set_type = ep93xx_gpio_irq_type,
};
-static void ep93xx_gpio_init_irq(void)
+static int ep93xx_gpio_init_irq(struct platform_device *pdev,
+ struct ep93xx_gpio *epg)
{
+ int ab_parent_irq = platform_get_irq(pdev, 0);
+ struct device *dev = &pdev->dev;
int gpio_irq;
+ int ret;
+ int i;
- for (gpio_irq = gpio_to_irq(0);
- gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) {
+ /* The A bank */
+ ret = gpiochip_irqchip_add(&epg->gc[0], &ep93xx_gpio_irq_chip,
+ 64, handle_level_irq,
+ IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "Could not add irqchip 0\n");
+ return ret;
+ }
+ gpiochip_set_chained_irqchip(&epg->gc[0], &ep93xx_gpio_irq_chip,
+ ab_parent_irq,
+ ep93xx_gpio_ab_irq_handler);
+
+ /* The B bank */
+ ret = gpiochip_irqchip_add(&epg->gc[1], &ep93xx_gpio_irq_chip,
+ 72, handle_level_irq,
+ IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "Could not add irqchip 1\n");
+ return ret;
+ }
+ gpiochip_set_chained_irqchip(&epg->gc[1], &ep93xx_gpio_irq_chip,
+ ab_parent_irq,
+ ep93xx_gpio_ab_irq_handler);
+
+ /* The F bank */
+ for (i = 0; i < 8; i++) {
+ gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i;
+ irq_set_chip_data(gpio_irq, &epg->gc[5]);
irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip,
handle_level_irq);
irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
}
- irq_set_chained_handler(IRQ_EP93XX_GPIO_AB,
- ep93xx_gpio_ab_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO0MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO1MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO2MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO3MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO4MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO5MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO6MUX,
- ep93xx_gpio_f_irq_handler);
- irq_set_chained_handler(IRQ_EP93XX_GPIO7MUX,
- ep93xx_gpio_f_irq_handler);
+ for (i = 1; i <= 8; i++)
+ irq_set_chained_handler_and_data(platform_get_irq(pdev, i),
+ ep93xx_gpio_f_irq_handler,
+ &epg->gc[i]);
+ return 0;
}
@@ -268,68 +327,54 @@ struct ep93xx_gpio_bank {
int data;
int dir;
int base;
- bool has_debounce;
+ bool has_irq;
};
-#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _debounce) \
+#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq) \
{ \
.label = _label, \
.data = _data, \
.dir = _dir, \
.base = _base, \
- .has_debounce = _debounce, \
+ .has_irq = _has_irq, \
}
static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
- EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true),
- EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true),
+ EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true), /* Bank A has 8 IRQs */
+ EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true), /* Bank B has 8 IRQs */
EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false),
EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false),
EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false),
- EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, true),
+ EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, true), /* Bank F has 8 IRQs */
EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false),
EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false),
};
-static int ep93xx_gpio_set_config(struct gpio_chip *chip, unsigned offset,
+static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
unsigned long config)
{
- int gpio = chip->base + offset;
- int irq = gpio_to_irq(gpio);
u32 debounce;
if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
return -ENOTSUPP;
- if (irq < 0)
- return -EINVAL;
-
debounce = pinconf_to_config_argument(config);
- ep93xx_gpio_int_debounce(irq, debounce ? true : false);
+ ep93xx_gpio_int_debounce(gc, offset, debounce ? true : false);
return 0;
}
-/*
- * Map GPIO A0..A7 (0..7) to irq 64..71,
- * B0..B7 (7..15) to irq 72..79, and
- * F0..F7 (16..24) to irq 80..87.
- */
-static int ep93xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+static int ep93xx_gpio_f_to_irq(struct gpio_chip *gc, unsigned offset)
{
- int gpio = chip->base + offset;
-
- if (gpio > EP93XX_GPIO_LINE_MAX_IRQ)
- return -EINVAL;
-
- return 64 + gpio;
+ return EP93XX_GPIO_F_IRQ_BASE + offset;
}
static int ep93xx_gpio_add_bank(struct gpio_chip *gc, struct device *dev,
- void __iomem *mmio_base, struct ep93xx_gpio_bank *bank)
+ struct ep93xx_gpio *epg,
+ struct ep93xx_gpio_bank *bank)
{
- void __iomem *data = mmio_base + bank->data;
- void __iomem *dir = mmio_base + bank->dir;
+ void __iomem *data = epg->base + bank->data;
+ void __iomem *dir = epg->base + bank->dir;
int err;
err = bgpio_init(gc, dev, 1, data, NULL, NULL, dir, NULL, 0);
@@ -339,41 +384,41 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc, struct device *dev,
gc->label = bank->label;
gc->base = bank->base;
- if (bank->has_debounce) {
+ if (bank->has_irq)
gc->set_config = ep93xx_gpio_set_config;
- gc->to_irq = ep93xx_gpio_to_irq;
- }
- return devm_gpiochip_add_data(dev, gc, NULL);
+ return devm_gpiochip_add_data(dev, gc, epg);
}
static int ep93xx_gpio_probe(struct platform_device *pdev)
{
- struct ep93xx_gpio *ep93xx_gpio;
+ struct ep93xx_gpio *epg;
struct resource *res;
int i;
struct device *dev = &pdev->dev;
- ep93xx_gpio = devm_kzalloc(dev, sizeof(struct ep93xx_gpio), GFP_KERNEL);
- if (!ep93xx_gpio)
+ epg = devm_kzalloc(dev, sizeof(*epg), GFP_KERNEL);
+ if (!epg)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ep93xx_gpio->mmio_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ep93xx_gpio->mmio_base))
- return PTR_ERR(ep93xx_gpio->mmio_base);
+ epg->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(epg->base))
+ return PTR_ERR(epg->base);
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
- struct gpio_chip *gc = &ep93xx_gpio->gc[i];
+ struct gpio_chip *gc = &epg->gc[i];
struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
- if (ep93xx_gpio_add_bank(gc, &pdev->dev,
- ep93xx_gpio->mmio_base, bank))
+ if (ep93xx_gpio_add_bank(gc, &pdev->dev, epg, bank))
dev_warn(&pdev->dev, "Unable to add gpio bank %s\n",
- bank->label);
+ bank->label);
+ /* Only bank F has especially funky IRQ handling */
+ if (i == 5)
+ gc->to_irq = ep93xx_gpio_f_to_irq;
}
- ep93xx_gpio_init_irq();
+ ep93xx_gpio_init_irq(pdev, epg);
return 0;
}
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 868bf8501560..95f578804b0e 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
+#include <linux/clk.h>
/* GPIO registers definition */
#define GPIO_DATA_OUT 0x00
@@ -40,11 +41,14 @@
* struct ftgpio_gpio - Gemini GPIO state container
* @dev: containing device for this instance
* @gc: gpiochip for this instance
+ * @base: remapped I/O-memory base
+ * @clk: silicon clock
*/
struct ftgpio_gpio {
struct device *dev;
struct gpio_chip gc;
void __iomem *base;
+ struct clk *clk;
};
static void ftgpio_gpio_ack_irq(struct irq_data *d)
@@ -157,6 +161,73 @@ static void ftgpio_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
+static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ enum pin_config_param param = pinconf_to_config_param(config);
+ u32 arg = pinconf_to_config_argument(config);
+ struct ftgpio_gpio *g = gpiochip_get_data(gc);
+ unsigned long pclk_freq;
+ u32 deb_div;
+ u32 val;
+
+ if (param != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ /*
+ * Debounce only works if interrupts are enabled. The manual
+ * states that if PCLK is 66 MHz, and this is set to 0x7D0, then
+ * PCLK is divided down to 33 kHz for the debounce timer. 0x7D0 is
+ * 2000 decimal, so what they mean is simply that the PCLK is
+ * divided by this value.
+ *
+ * As we get a debounce setting in microseconds, we calculate the
+ * desired period time and see if we can get a suitable debounce
+ * time.
+ */
+ pclk_freq = clk_get_rate(g->clk);
+ deb_div = DIV_ROUND_CLOSEST(pclk_freq, arg);
+
+ /* This register is only 24 bits wide */
+ if (deb_div > (1 << 24))
+ return -ENOTSUPP;
+
+ dev_dbg(g->dev, "prescale divisor: %08x, resulting frequency %lu Hz\n",
+ deb_div, (pclk_freq/deb_div));
+
+ val = readl(g->base + GPIO_DEBOUNCE_PRESCALE);
+ if (val == deb_div) {
+ /*
+ * The debounce timer happens to already be set to the
+ * desireable value, what a coincidence! We can just enable
+ * debounce on this GPIO line and return. This happens more
+ * often than you think, for example when all GPIO keys
+ * on a system are requesting the same debounce interval.
+ */
+ val = readl(g->base + GPIO_DEBOUNCE_EN);
+ val |= BIT(offset);
+ writel(val, g->base + GPIO_DEBOUNCE_EN);
+ return 0;
+ }
+
+ val = readl(g->base + GPIO_DEBOUNCE_EN);
+ if (val) {
+ /*
+ * Oh no! Someone is already using the debounce with
+ * another setting than what we need. Bummer.
+ */
+ return -ENOTSUPP;
+ }
+
+ /* First come, first serve */
+ writel(deb_div, g->base + GPIO_DEBOUNCE_PRESCALE);
+ /* Enable debounce */
+ val |= BIT(offset);
+ writel(val, g->base + GPIO_DEBOUNCE_EN);
+
+ return 0;
+}
+
static int ftgpio_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -180,6 +251,19 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
if (irq <= 0)
return irq ? irq : -EINVAL;
+ g->clk = devm_clk_get(dev, NULL);
+ if (!IS_ERR(g->clk)) {
+ ret = clk_prepare_enable(g->clk);
+ if (ret)
+ return ret;
+ } else if (PTR_ERR(g->clk) == -EPROBE_DEFER) {
+ /*
+ * Percolate deferrals, for anything else,
+ * just live without the clocking.
+ */
+ return PTR_ERR(g->clk);
+ }
+
ret = bgpio_init(&g->gc, dev, 4,
g->base + GPIO_DATA_IN,
g->base + GPIO_DATA_SET,
@@ -189,7 +273,7 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
0);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
- return ret;
+ goto dis_clk;
}
g->gc.label = "FTGPIO010";
g->gc.base = -1;
@@ -197,28 +281,50 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
g->gc.owner = THIS_MODULE;
/* ngpio is set by bgpio_init() */
+ /* We need a silicon clock to do debounce */
+ if (!IS_ERR(g->clk))
+ g->gc.set_config = ftgpio_gpio_set_config;
+
ret = devm_gpiochip_add_data(dev, &g->gc, g);
if (ret)
- return ret;
+ goto dis_clk;
/* Disable, unmask and clear all interrupts */
writel(0x0, g->base + GPIO_INT_EN);
writel(0x0, g->base + GPIO_INT_MASK);
writel(~0x0, g->base + GPIO_INT_CLR);
+ /* Clear any use of debounce */
+ writel(0x0, g->base + GPIO_DEBOUNCE_EN);
+
ret = gpiochip_irqchip_add(&g->gc, &ftgpio_gpio_irqchip,
0, handle_bad_irq,
IRQ_TYPE_NONE);
if (ret) {
dev_info(dev, "could not add irqchip\n");
- return ret;
+ goto dis_clk;
}
gpiochip_set_chained_irqchip(&g->gc, &ftgpio_gpio_irqchip,
irq, ftgpio_gpio_irq_handler);
+ platform_set_drvdata(pdev, g);
dev_info(dev, "FTGPIO010 @%p registered\n", g->base);
return 0;
+
+dis_clk:
+ if (!IS_ERR(g->clk))
+ clk_disable_unprepare(g->clk);
+ return ret;
+}
+
+static int ftgpio_gpio_remove(struct platform_device *pdev)
+{
+ struct ftgpio_gpio *g = platform_get_drvdata(pdev);
+
+ if (!IS_ERR(g->clk))
+ clk_disable_unprepare(g->clk);
+ return 0;
}
static const struct of_device_id ftgpio_gpio_of_match[] = {
@@ -239,6 +345,7 @@ static struct platform_driver ftgpio_gpio_driver = {
.name = "ftgpio010-gpio",
.of_match_table = of_match_ptr(ftgpio_gpio_of_match),
},
- .probe = ftgpio_gpio_probe,
+ .probe = ftgpio_gpio_probe,
+ .remove = ftgpio_gpio_remove,
};
builtin_platform_driver(ftgpio_gpio_driver);
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index ad6e5b518669..9d3ac51a765c 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -189,7 +189,6 @@ static void egpio_set(struct gpio_chip *chip, unsigned offset, int value)
unsigned long flag;
struct egpio_chip *egpio;
struct egpio_info *ei;
- unsigned bit;
int pos;
int reg;
int shift;
@@ -199,7 +198,6 @@ static void egpio_set(struct gpio_chip *chip, unsigned offset, int value)
egpio = gpiochip_get_data(chip);
ei = dev_get_drvdata(egpio->dev);
- bit = egpio_bit(ei, offset);
pos = egpio_pos(ei, offset);
reg = egpio->reg_start + pos;
shift = pos << ei->reg_shift;
@@ -334,7 +332,13 @@ static int __init egpio_probe(struct platform_device *pdev)
ei->chip[i].is_out = pdata->chip[i].direction;
ei->chip[i].dev = &(pdev->dev);
chip = &(ei->chip[i].chip);
- chip->label = "htc-egpio";
+ chip->label = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "htc-egpio-%d",
+ i);
+ if (!chip->label) {
+ ret = -ENOMEM;
+ goto fail;
+ }
chip->parent = &pdev->dev;
chip->owner = THIS_MODULE;
chip->get = egpio_get;
diff --git a/drivers/gpio/gpio-ingenic.c b/drivers/gpio/gpio-ingenic.c
deleted file mode 100644
index e738e384a5ca..000000000000
--- a/drivers/gpio/gpio-ingenic.c
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * Ingenic JZ47xx GPIO driver
- *
- * Copyright (c) 2017 Paul Cercueil <paul@crapouillou.net>
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/gpio/driver.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/regmap.h>
-
-#define GPIO_PIN 0x00
-#define GPIO_MSK 0x20
-
-#define JZ4740_GPIO_DATA 0x10
-#define JZ4740_GPIO_SELECT 0x50
-#define JZ4740_GPIO_DIR 0x60
-#define JZ4740_GPIO_TRIG 0x70
-#define JZ4740_GPIO_FLAG 0x80
-
-#define JZ4770_GPIO_INT 0x10
-#define JZ4770_GPIO_PAT1 0x30
-#define JZ4770_GPIO_PAT0 0x40
-#define JZ4770_GPIO_FLAG 0x50
-
-#define REG_SET(x) ((x) + 0x4)
-#define REG_CLEAR(x) ((x) + 0x8)
-
-enum jz_version {
- ID_JZ4740,
- ID_JZ4770,
- ID_JZ4780,
-};
-
-struct ingenic_gpio_chip {
- struct regmap *map;
- struct gpio_chip gc;
- struct irq_chip irq_chip;
- unsigned int irq, reg_base;
- enum jz_version version;
-};
-
-static u32 gpio_ingenic_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
-{
- unsigned int val;
-
- regmap_read(jzgc->map, jzgc->reg_base + reg, &val);
-
- return (u32) val;
-}
-
-static void gpio_ingenic_set_bit(struct ingenic_gpio_chip *jzgc,
- u8 reg, u8 offset, bool set)
-{
- if (set)
- reg = REG_SET(reg);
- else
- reg = REG_CLEAR(reg);
-
- regmap_write(jzgc->map, jzgc->reg_base + reg, BIT(offset));
-}
-
-static inline bool gpio_get_value(struct ingenic_gpio_chip *jzgc, u8 offset)
-{
- unsigned int val = gpio_ingenic_read_reg(jzgc, GPIO_PIN);
-
- return !!(val & BIT(offset));
-}
-
-static void gpio_set_value(struct ingenic_gpio_chip *jzgc, u8 offset, int value)
-{
- if (jzgc->version >= ID_JZ4770)
- gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
- else
- gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
-}
-
-static void irq_set_type(struct ingenic_gpio_chip *jzgc,
- u8 offset, unsigned int type)
-{
- u8 reg1, reg2;
-
- if (jzgc->version >= ID_JZ4770) {
- reg1 = JZ4770_GPIO_PAT1;
- reg2 = JZ4770_GPIO_PAT0;
- } else {
- reg1 = JZ4740_GPIO_TRIG;
- reg2 = JZ4740_GPIO_DIR;
- }
-
- switch (type) {
- case IRQ_TYPE_EDGE_RISING:
- gpio_ingenic_set_bit(jzgc, reg2, offset, true);
- gpio_ingenic_set_bit(jzgc, reg1, offset, true);
- break;
- case IRQ_TYPE_EDGE_FALLING:
- gpio_ingenic_set_bit(jzgc, reg2, offset, false);
- gpio_ingenic_set_bit(jzgc, reg1, offset, true);
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- gpio_ingenic_set_bit(jzgc, reg2, offset, true);
- gpio_ingenic_set_bit(jzgc, reg1, offset, false);
- break;
- case IRQ_TYPE_LEVEL_LOW:
- default:
- gpio_ingenic_set_bit(jzgc, reg2, offset, false);
- gpio_ingenic_set_bit(jzgc, reg1, offset, false);
- break;
- }
-}
-
-static void ingenic_gpio_irq_mask(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
-
- gpio_ingenic_set_bit(jzgc, GPIO_MSK, irqd->hwirq, true);
-}
-
-static void ingenic_gpio_irq_unmask(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
-
- gpio_ingenic_set_bit(jzgc, GPIO_MSK, irqd->hwirq, false);
-}
-
-static void ingenic_gpio_irq_enable(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
-
- if (jzgc->version >= ID_JZ4770)
- gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
- else
- gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
-
- ingenic_gpio_irq_unmask(irqd);
-}
-
-static void ingenic_gpio_irq_disable(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
-
- ingenic_gpio_irq_mask(irqd);
-
- if (jzgc->version >= ID_JZ4770)
- gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
- else
- gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
-}
-
-static void ingenic_gpio_irq_ack(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- int irq = irqd->hwirq;
- bool high;
-
- if (irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) {
- /*
- * Switch to an interrupt for the opposite edge to the one that
- * triggered the interrupt being ACKed.
- */
- high = gpio_get_value(jzgc, irq);
- if (high)
- irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_FALLING);
- else
- irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING);
- }
-
- if (jzgc->version >= ID_JZ4770)
- gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
- else
- gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
-}
-
-static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
-
- switch (type) {
- case IRQ_TYPE_EDGE_BOTH:
- case IRQ_TYPE_EDGE_RISING:
- case IRQ_TYPE_EDGE_FALLING:
- irq_set_handler_locked(irqd, handle_edge_irq);
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- case IRQ_TYPE_LEVEL_LOW:
- irq_set_handler_locked(irqd, handle_level_irq);
- break;
- default:
- irq_set_handler_locked(irqd, handle_bad_irq);
- }
-
- if (type == IRQ_TYPE_EDGE_BOTH) {
- /*
- * The hardware does not support interrupts on both edges. The
- * best we can do is to set up a single-edge interrupt and then
- * switch to the opposing edge when ACKing the interrupt.
- */
- bool high = gpio_get_value(jzgc, irqd->hwirq);
-
- type = high ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
- }
-
- irq_set_type(jzgc, irqd->hwirq, type);
- return 0;
-}
-
-static int ingenic_gpio_irq_set_wake(struct irq_data *irqd, unsigned int on)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
-
- return irq_set_irq_wake(jzgc->irq, on);
-}
-
-static void ingenic_gpio_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
- struct irq_chip *irq_chip = irq_data_get_irq_chip(&desc->irq_data);
- unsigned long flag, i;
-
- chained_irq_enter(irq_chip, desc);
-
- if (jzgc->version >= ID_JZ4770)
- flag = gpio_ingenic_read_reg(jzgc, JZ4770_GPIO_FLAG);
- else
- flag = gpio_ingenic_read_reg(jzgc, JZ4740_GPIO_FLAG);
-
- for_each_set_bit(i, &flag, 32)
- generic_handle_irq(irq_linear_revmap(gc->irq.domain, i));
- chained_irq_exit(irq_chip, desc);
-}
-
-static void ingenic_gpio_set(struct gpio_chip *gc,
- unsigned int offset, int value)
-{
- struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
-
- gpio_set_value(jzgc, offset, value);
-}
-
-static int ingenic_gpio_get(struct gpio_chip *gc, unsigned int offset)
-{
- struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
-
- return (int) gpio_get_value(jzgc, offset);
-}
-
-static int ingenic_gpio_direction_input(struct gpio_chip *gc,
- unsigned int offset)
-{
- return pinctrl_gpio_direction_input(gc->base + offset);
-}
-
-static int ingenic_gpio_direction_output(struct gpio_chip *gc,
- unsigned int offset, int value)
-{
- ingenic_gpio_set(gc, offset, value);
- return pinctrl_gpio_direction_output(gc->base + offset);
-}
-
-static const struct of_device_id ingenic_gpio_of_match[] = {
- { .compatible = "ingenic,jz4740-gpio", .data = (void *)ID_JZ4740 },
- { .compatible = "ingenic,jz4770-gpio", .data = (void *)ID_JZ4770 },
- { .compatible = "ingenic,jz4780-gpio", .data = (void *)ID_JZ4780 },
- {},
-};
-MODULE_DEVICE_TABLE(of, ingenic_gpio_of_match);
-
-static int ingenic_gpio_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ingenic_gpio_chip *jzgc;
- u32 bank;
- int err;
-
- jzgc = devm_kzalloc(dev, sizeof(*jzgc), GFP_KERNEL);
- if (!jzgc)
- return -ENOMEM;
-
- jzgc->map = dev_get_drvdata(dev->parent);
- if (!jzgc->map) {
- dev_err(dev, "Cannot get parent regmap\n");
- return -ENXIO;
- }
-
- err = of_property_read_u32(dev->of_node, "reg", &bank);
- if (err) {
- dev_err(dev, "Cannot read \"reg\" property: %i\n", err);
- return err;
- }
-
- jzgc->reg_base = bank * 0x100;
-
- jzgc->gc.label = devm_kasprintf(dev, GFP_KERNEL, "GPIO%c", 'A' + bank);
- if (!jzgc->gc.label)
- return -ENOMEM;
-
- /* DO NOT EXPAND THIS: FOR BACKWARD GPIO NUMBERSPACE COMPATIBIBILITY
- * ONLY: WORK TO TRANSITION CONSUMERS TO USE THE GPIO DESCRIPTOR API IN
- * <linux/gpio/consumer.h> INSTEAD.
- */
- jzgc->gc.base = bank * 32;
-
- jzgc->gc.ngpio = 32;
- jzgc->gc.parent = dev;
- jzgc->gc.of_node = dev->of_node;
- jzgc->gc.owner = THIS_MODULE;
- jzgc->version = (enum jz_version)of_device_get_match_data(dev);
-
- jzgc->gc.set = ingenic_gpio_set;
- jzgc->gc.get = ingenic_gpio_get;
- jzgc->gc.direction_input = ingenic_gpio_direction_input;
- jzgc->gc.direction_output = ingenic_gpio_direction_output;
-
- if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
- jzgc->gc.request = gpiochip_generic_request;
- jzgc->gc.free = gpiochip_generic_free;
- }
-
- err = devm_gpiochip_add_data(dev, &jzgc->gc, jzgc);
- if (err)
- return err;
-
- jzgc->irq = irq_of_parse_and_map(dev->of_node, 0);
- if (!jzgc->irq)
- return -EINVAL;
-
- jzgc->irq_chip.name = jzgc->gc.label;
- jzgc->irq_chip.irq_enable = ingenic_gpio_irq_enable;
- jzgc->irq_chip.irq_disable = ingenic_gpio_irq_disable;
- jzgc->irq_chip.irq_unmask = ingenic_gpio_irq_unmask;
- jzgc->irq_chip.irq_mask = ingenic_gpio_irq_mask;
- jzgc->irq_chip.irq_ack = ingenic_gpio_irq_ack;
- jzgc->irq_chip.irq_set_type = ingenic_gpio_irq_set_type;
- jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
- jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
-
- err = gpiochip_irqchip_add(&jzgc->gc, &jzgc->irq_chip, 0,
- handle_level_irq, IRQ_TYPE_NONE);
- if (err)
- return err;
-
- gpiochip_set_chained_irqchip(&jzgc->gc, &jzgc->irq_chip,
- jzgc->irq, ingenic_gpio_irq_handler);
- return 0;
-}
-
-static int ingenic_gpio_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static struct platform_driver ingenic_gpio_driver = {
- .driver = {
- .name = "gpio-ingenic",
- .of_match_table = of_match_ptr(ingenic_gpio_of_match),
- },
- .probe = ingenic_gpio_probe,
- .remove = ingenic_gpio_remove,
-};
-
-static int __init ingenic_gpio_drv_register(void)
-{
- return platform_driver_register(&ingenic_gpio_driver);
-}
-subsys_initcall(ingenic_gpio_drv_register);
-
-static void __exit ingenic_gpio_drv_unregister(void)
-{
- platform_driver_unregister(&ingenic_gpio_driver);
-}
-module_exit(ingenic_gpio_drv_unregister);
-
-MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
-MODULE_DESCRIPTION("Ingenic JZ47xx GPIO driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index b5b9cb1fda50..9a8876abeb57 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -313,18 +313,21 @@ static int max3191x_set_config(struct gpio_chip *gpio, unsigned int offset,
static void gpiod_set_array_single_value_cansleep(unsigned int ndescs,
struct gpio_desc **desc,
+ struct gpio_array *info,
int value)
{
- int i, *values;
+ unsigned long *values;
- values = kmalloc_array(ndescs, sizeof(*values), GFP_KERNEL);
+ values = bitmap_alloc(ndescs, GFP_KERNEL);
if (!values)
return;
- for (i = 0; i < ndescs; i++)
- values[i] = value;
+ if (value)
+ bitmap_fill(values, ndescs);
+ else
+ bitmap_zero(values, ndescs);
- gpiod_set_array_value_cansleep(ndescs, desc, values);
+ gpiod_set_array_value_cansleep(ndescs, desc, info, values);
kfree(values);
}
@@ -397,7 +400,8 @@ static int max3191x_probe(struct spi_device *spi)
if (max3191x->modesel_pins)
gpiod_set_array_single_value_cansleep(
max3191x->modesel_pins->ndescs,
- max3191x->modesel_pins->desc, max3191x->mode);
+ max3191x->modesel_pins->desc,
+ max3191x->modesel_pins->info, max3191x->mode);
max3191x->ignore_uv = device_property_read_bool(dev,
"maxim,ignore-undervoltage");
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 935292a30c99..50bdc29591c0 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Generic driver for memory-mapped GPIO controllers.
*
* Copyright 2008 MontaVista Software, Inc.
* Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* ....``.```~~~~````.`.`.`.`.```````'',,,.........`````......`.......
* ...`` ```````..
* ..The simplest form of a GPIO controller that the driver supports is``
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index d66b7a768ecd..8269cffc2967 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -18,6 +18,7 @@
#include <linux/irq_sim.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
+#include <linux/property.h>
#include "gpiolib.h"
@@ -28,6 +29,8 @@
* of GPIO lines.
*/
#define GPIO_MOCKUP_MAX_RANGES (GPIO_MOCKUP_MAX_GC * 2)
+/* Maximum of three properties + the sentinel. */
+#define GPIO_MOCKUP_MAX_PROP 4
#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
@@ -59,13 +62,6 @@ struct gpio_mockup_dbgfs_private {
int offset;
};
-struct gpio_mockup_platform_data {
- int base;
- int ngpio;
- int index;
- bool named_lines;
-};
-
static int gpio_mockup_ranges[GPIO_MOCKUP_MAX_RANGES];
static int gpio_mockup_num_ranges;
module_param_array(gpio_mockup_ranges, int, &gpio_mockup_num_ranges, 0400);
@@ -255,26 +251,37 @@ static int gpio_mockup_name_lines(struct device *dev,
static int gpio_mockup_probe(struct platform_device *pdev)
{
- struct gpio_mockup_platform_data *pdata;
struct gpio_mockup_chip *chip;
struct gpio_chip *gc;
- int rv, base, ngpio;
struct device *dev;
- char *name;
+ const char *name;
+ int rv, base;
+ u16 ngpio;
dev = &pdev->dev;
- pdata = dev_get_platdata(dev);
- base = pdata->base;
- ngpio = pdata->ngpio;
+
+ rv = device_property_read_u32(dev, "gpio-base", &base);
+ if (rv)
+ base = -1;
+
+ rv = device_property_read_u16(dev, "nr-gpios", &ngpio);
+ if (rv)
+ return rv;
+
+ rv = device_property_read_string(dev, "chip-name", &name);
+ if (rv)
+ name = NULL;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- name = devm_kasprintf(dev, GFP_KERNEL, "%s-%c",
- pdev->name, pdata->index);
- if (!name)
- return -ENOMEM;
+ if (!name) {
+ name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%c", pdev->name, pdev->id + 'A');
+ if (!name)
+ return -ENOMEM;
+ }
gc = &chip->gc;
gc->base = base;
@@ -295,7 +302,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
if (!chip->lines)
return -ENOMEM;
- if (pdata->named_lines) {
+ if (device_property_read_bool(dev, "named-gpio-lines")) {
rv = gpio_mockup_name_lines(dev, chip);
if (rv)
return rv;
@@ -339,9 +346,11 @@ static void gpio_mockup_unregister_pdevs(void)
static int __init gpio_mockup_init(void)
{
- int i, num_chips, err = 0, index = 'A';
- struct gpio_mockup_platform_data pdata;
+ struct property_entry properties[GPIO_MOCKUP_MAX_PROP];
+ int i, prop, num_chips, err = 0, base;
+ struct platform_device_info pdevinfo;
struct platform_device *pdev;
+ u16 ngpio;
if ((gpio_mockup_num_ranges < 2) ||
(gpio_mockup_num_ranges % 2) ||
@@ -371,17 +380,28 @@ static int __init gpio_mockup_init(void)
}
for (i = 0; i < num_chips; i++) {
- pdata.index = index++;
- pdata.base = gpio_mockup_range_base(i);
- pdata.ngpio = pdata.base < 0
- ? gpio_mockup_range_ngpio(i)
- : gpio_mockup_range_ngpio(i) - pdata.base;
- pdata.named_lines = gpio_mockup_named_lines;
-
- pdev = platform_device_register_resndata(NULL,
- GPIO_MOCKUP_NAME,
- i, NULL, 0, &pdata,
- sizeof(pdata));
+ memset(properties, 0, sizeof(properties));
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ prop = 0;
+
+ base = gpio_mockup_range_base(i);
+ if (base >= 0)
+ properties[prop++] = PROPERTY_ENTRY_U32("gpio-base",
+ base);
+
+ ngpio = base < 0 ? gpio_mockup_range_ngpio(i)
+ : gpio_mockup_range_ngpio(i) - base;
+ properties[prop++] = PROPERTY_ENTRY_U16("nr-gpios", ngpio);
+
+ if (gpio_mockup_named_lines)
+ properties[prop++] = PROPERTY_ENTRY_BOOL(
+ "named-gpio-lines");
+
+ pdevinfo.name = GPIO_MOCKUP_NAME;
+ pdevinfo.id = i;
+ pdevinfo.properties = properties;
+
+ pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
gpio_mockup_err("error registering device");
platform_driver_unregister(&gpio_mockup_driver);
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index df30490da820..ea874fd033a5 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -18,8 +18,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gpio/driver.h>
-/* FIXME: for gpio_get_value(), replace this by direct register read */
-#include <linux/gpio.h>
#include <linux/module.h>
#define MXS_SET 0x4
@@ -86,7 +84,7 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
port->both_edges &= ~pin_mask;
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
- val = gpio_get_value(port->gc.base + d->hwirq);
+ val = port->gc.get(&port->gc, d->hwirq);
if (val)
edge = GPIO_INT_FALL_EDGE;
else
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index e81008678a38..9887c3db6e16 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -19,6 +19,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/cpu_pm.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pm.h>
@@ -28,10 +29,10 @@
#include <linux/bitops.h>
#include <linux/platform_data/gpio-omap.h>
-#define OFF_MODE 1
#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
-static LIST_HEAD(omap_gpio_list);
+#define OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER BIT(2)
+#define OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN BIT(1)
struct gpio_regs {
u32 irqenable1;
@@ -48,6 +49,13 @@ struct gpio_regs {
u32 debounce_en;
};
+struct gpio_bank;
+
+struct gpio_omap_funcs {
+ void (*idle_enable_level_quirk)(struct gpio_bank *bank);
+ void (*idle_disable_level_quirk)(struct gpio_bank *bank);
+};
+
struct gpio_bank {
struct list_head node;
void __iomem *base;
@@ -55,6 +63,7 @@ struct gpio_bank {
u32 non_wakeup_gpios;
u32 enabled_non_wakeup_gpios;
struct gpio_regs context;
+ struct gpio_omap_funcs funcs;
u32 saved_datain;
u32 level_mask;
u32 toggle_mask;
@@ -62,6 +71,8 @@ struct gpio_bank {
raw_spinlock_t wa_lock;
struct gpio_chip chip;
struct clk *dbck;
+ struct notifier_block nb;
+ unsigned int is_suspended:1;
u32 mod_usage;
u32 irq_usage;
u32 dbck_enable_mask;
@@ -73,8 +84,8 @@ struct gpio_bank {
int stride;
u32 width;
int context_loss_count;
- int power_mode;
bool workaround_enabled;
+ u32 quirks;
void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
void (*set_dataout_multiple)(struct gpio_bank *bank,
@@ -368,9 +379,18 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
readl_relaxed(bank->base + bank->regs->fallingdetect);
if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
- omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
- bank->context.wake_en =
- readl_relaxed(bank->base + bank->regs->wkup_en);
+ /* Defer wkup_en register update until we idle? */
+ if (bank->quirks & OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN) {
+ if (trigger)
+ bank->context.wake_en |= gpio_bit;
+ else
+ bank->context.wake_en &= ~gpio_bit;
+ } else {
+ omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit,
+ trigger != 0);
+ bank->context.wake_en =
+ readl_relaxed(bank->base + bank->regs->wkup_en);
+ }
}
/* This part needs to be executed always for OMAP{34xx, 44xx} */
@@ -682,12 +702,7 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
struct gpio_bank *bank = gpiochip_get_data(chip);
unsigned long flags;
- /*
- * If this is the first gpio_request for the bank,
- * enable the bank module.
- */
- if (!BANK_USED(bank))
- pm_runtime_get_sync(chip->parent);
+ pm_runtime_get_sync(chip->parent);
raw_spin_lock_irqsave(&bank->lock, flags);
omap_enable_gpio_module(bank, offset);
@@ -711,12 +726,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
omap_disable_gpio_module(bank, offset);
raw_spin_unlock_irqrestore(&bank->lock, flags);
- /*
- * If this is the last gpio to be freed in the bank,
- * disable the bank module.
- */
- if (!BANK_USED(bank))
- pm_runtime_put(chip->parent);
+ pm_runtime_put(chip->parent);
}
/*
@@ -741,7 +751,9 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
if (WARN_ON(!isr_reg))
goto exit;
- pm_runtime_get_sync(bank->chip.parent);
+ if (WARN_ONCE(!pm_runtime_active(bank->chip.parent),
+ "gpio irq%i while runtime suspended?\n", irq))
+ return IRQ_NONE;
while (1) {
raw_spin_lock_irqsave(&bank->lock, lock_flags);
@@ -792,7 +804,6 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
}
}
exit:
- pm_runtime_put(bank->chip.parent);
return IRQ_HANDLED;
}
@@ -841,20 +852,14 @@ static void omap_gpio_irq_bus_lock(struct irq_data *data)
{
struct gpio_bank *bank = omap_irq_data_get_bank(data);
- if (!BANK_USED(bank))
- pm_runtime_get_sync(bank->chip.parent);
+ pm_runtime_get_sync(bank->chip.parent);
}
static void gpio_irq_bus_sync_unlock(struct irq_data *data)
{
struct gpio_bank *bank = omap_irq_data_get_bank(data);
- /*
- * If this is the last IRQ to be freed in the bank,
- * disable the bank module.
- */
- if (!BANK_USED(bank))
- pm_runtime_put(bank->chip.parent);
+ pm_runtime_put(bank->chip.parent);
}
static void omap_gpio_ack_irq(struct irq_data *d)
@@ -899,6 +904,82 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
+/*
+ * Only edges can generate a wakeup event to the PRCM.
+ *
+ * Therefore, ensure any wake-up capable GPIOs have
+ * edge-detection enabled before going idle to ensure a wakeup
+ * to the PRCM is generated on a GPIO transition. (c.f. 34xx
+ * NDA TRM 25.5.3.1)
+ *
+ * The normal values will be restored upon ->runtime_resume()
+ * by writing back the values saved in bank->context.
+ */
+static void __maybe_unused
+omap2_gpio_enable_level_quirk(struct gpio_bank *bank)
+{
+ u32 wake_low, wake_hi;
+
+ /* Enable additional edge detection for level gpios for idle */
+ wake_low = bank->context.leveldetect0 & bank->context.wake_en;
+ if (wake_low)
+ writel_relaxed(wake_low | bank->context.fallingdetect,
+ bank->base + bank->regs->fallingdetect);
+
+ wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
+ if (wake_hi)
+ writel_relaxed(wake_hi | bank->context.risingdetect,
+ bank->base + bank->regs->risingdetect);
+}
+
+static void __maybe_unused
+omap2_gpio_disable_level_quirk(struct gpio_bank *bank)
+{
+ /* Disable edge detection for level gpios after idle */
+ writel_relaxed(bank->context.fallingdetect,
+ bank->base + bank->regs->fallingdetect);
+ writel_relaxed(bank->context.risingdetect,
+ bank->base + bank->regs->risingdetect);
+}
+
+/*
+ * On omap4 and later SoC variants a level interrupt with wkup_en
+ * enabled blocks the GPIO functional clock from idling until the GPIO
+ * instance has been reset. To avoid that, we must set wkup_en only for
+ * idle for level interrupts, and clear level registers for the duration
+ * of idle. The level interrupts will be still there on wakeup by their
+ * nature.
+ */
+static void __maybe_unused
+omap4_gpio_enable_level_quirk(struct gpio_bank *bank)
+{
+ /* Update wake register for idle, edge bits might be already set */
+ writel_relaxed(bank->context.wake_en,
+ bank->base + bank->regs->wkup_en);
+
+ /* Clear level registers for idle */
+ writel_relaxed(0, bank->base + bank->regs->leveldetect0);
+ writel_relaxed(0, bank->base + bank->regs->leveldetect1);
+}
+
+static void __maybe_unused
+omap4_gpio_disable_level_quirk(struct gpio_bank *bank)
+{
+ /* Restore level registers after idle */
+ writel_relaxed(bank->context.leveldetect0,
+ bank->base + bank->regs->leveldetect0);
+ writel_relaxed(bank->context.leveldetect1,
+ bank->base + bank->regs->leveldetect1);
+
+ /* Clear saved wkup_en for level, it will be set for next idle again */
+ bank->context.wake_en &= ~(bank->context.leveldetect0 |
+ bank->context.leveldetect1);
+
+ /* Update wake with only edge configuration */
+ writel_relaxed(bank->context.wake_en,
+ bank->base + bank->regs->wkup_en);
+}
+
/*---------------------------------------------------------------------*/
static int omap_mpuio_suspend_noirq(struct device *dev)
@@ -1218,6 +1299,36 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
return ret;
}
+static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context);
+static void omap_gpio_unidle(struct gpio_bank *bank);
+
+static int gpio_omap_cpu_notifier(struct notifier_block *nb,
+ unsigned long cmd, void *v)
+{
+ struct gpio_bank *bank;
+ unsigned long flags;
+
+ bank = container_of(nb, struct gpio_bank, nb);
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ if (bank->is_suspended)
+ break;
+ omap_gpio_idle(bank, true);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ if (bank->is_suspended)
+ break;
+ omap_gpio_unidle(bank);
+ break;
+ }
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return NOTIFY_OK;
+}
+
static const struct of_device_id omap_gpio_match[];
static int omap_gpio_probe(struct platform_device *pdev)
@@ -1256,6 +1367,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
irqc->name = dev_name(&pdev->dev);
irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
+ irqc->parent_device = dev;
bank->irq = platform_get_irq(pdev, 0);
if (bank->irq <= 0) {
@@ -1270,6 +1382,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
bank->chip.parent = dev;
bank->chip.owner = THIS_MODULE;
bank->dbck_flag = pdata->dbck_flag;
+ bank->quirks = pdata->quirks;
bank->stride = pdata->bank_stride;
bank->width = pdata->bank_width;
bank->is_mpuio = pdata->is_mpuio;
@@ -1278,6 +1391,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
#ifdef CONFIG_OF_GPIO
bank->chip.of_node = of_node_get(node);
#endif
+
if (node) {
if (!of_property_read_bool(node, "ti,gpio-always-on"))
bank->loses_context = true;
@@ -1298,6 +1412,18 @@ static int omap_gpio_probe(struct platform_device *pdev)
omap_set_gpio_dataout_mask_multiple;
}
+ if (bank->quirks & OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN) {
+ bank->funcs.idle_enable_level_quirk =
+ omap4_gpio_enable_level_quirk;
+ bank->funcs.idle_disable_level_quirk =
+ omap4_gpio_disable_level_quirk;
+ } else if (bank->quirks & OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER) {
+ bank->funcs.idle_enable_level_quirk =
+ omap2_gpio_enable_level_quirk;
+ bank->funcs.idle_disable_level_quirk =
+ omap2_gpio_disable_level_quirk;
+ }
+
raw_spin_lock_init(&bank->lock);
raw_spin_lock_init(&bank->wa_lock);
@@ -1322,7 +1448,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bank);
pm_runtime_enable(dev);
- pm_runtime_irq_safe(dev);
pm_runtime_get_sync(dev);
if (bank->is_mpuio)
@@ -1341,9 +1466,13 @@ static int omap_gpio_probe(struct platform_device *pdev)
omap_gpio_show_rev(bank);
- pm_runtime_put(dev);
+ if (bank->funcs.idle_enable_level_quirk &&
+ bank->funcs.idle_disable_level_quirk) {
+ bank->nb.notifier_call = gpio_omap_cpu_notifier;
+ cpu_pm_register_notifier(&bank->nb);
+ }
- list_add_tail(&bank->node, &omap_gpio_list);
+ pm_runtime_put(dev);
return 0;
}
@@ -1352,6 +1481,8 @@ static int omap_gpio_remove(struct platform_device *pdev)
{
struct gpio_bank *bank = platform_get_drvdata(pdev);
+ if (bank->nb.notifier_call)
+ cpu_pm_unregister_notifier(&bank->nb);
list_del(&bank->node);
gpiochip_remove(&bank->chip);
pm_runtime_disable(&pdev->dev);
@@ -1361,48 +1492,22 @@ static int omap_gpio_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_ARCH_OMAP2PLUS
-
-#if defined(CONFIG_PM)
static void omap_gpio_restore_context(struct gpio_bank *bank);
-static int omap_gpio_runtime_suspend(struct device *dev)
+static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_bank *bank = platform_get_drvdata(pdev);
+ struct device *dev = bank->chip.parent;
u32 l1 = 0, l2 = 0;
- unsigned long flags;
- u32 wake_low, wake_hi;
- raw_spin_lock_irqsave(&bank->lock, flags);
-
- /*
- * Only edges can generate a wakeup event to the PRCM.
- *
- * Therefore, ensure any wake-up capable GPIOs have
- * edge-detection enabled before going idle to ensure a wakeup
- * to the PRCM is generated on a GPIO transition. (c.f. 34xx
- * NDA TRM 25.5.3.1)
- *
- * The normal values will be restored upon ->runtime_resume()
- * by writing back the values saved in bank->context.
- */
- wake_low = bank->context.leveldetect0 & bank->context.wake_en;
- if (wake_low)
- writel_relaxed(wake_low | bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
- wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
- if (wake_hi)
- writel_relaxed(wake_hi | bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
+ if (bank->funcs.idle_enable_level_quirk)
+ bank->funcs.idle_enable_level_quirk(bank);
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;
- if (bank->power_mode != OFF_MODE) {
- bank->power_mode = 0;
+ if (!may_lose_context)
goto update_gpio_context_count;
- }
+
/*
* If going to OFF, remove triggering for all
* non-wakeup GPIOs. Otherwise spurious IRQs will be
@@ -1427,23 +1532,16 @@ update_gpio_context_count:
bank->get_context_loss_count(dev);
omap_gpio_dbck_disable(bank);
- raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
}
static void omap_gpio_init_context(struct gpio_bank *p);
-static int omap_gpio_runtime_resume(struct device *dev)
+static void omap_gpio_unidle(struct gpio_bank *bank)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_bank *bank = platform_get_drvdata(pdev);
+ struct device *dev = bank->chip.parent;
u32 l = 0, gen, gen0, gen1;
- unsigned long flags;
int c;
- raw_spin_lock_irqsave(&bank->lock, flags);
-
/*
* On the first resume during the probe, the context has not
* been initialised and so initialise it now. Also initialise
@@ -1459,16 +1557,8 @@ static int omap_gpio_runtime_resume(struct device *dev)
omap_gpio_dbck_enable(bank);
- /*
- * In ->runtime_suspend(), level-triggered, wakeup-enabled
- * GPIOs were set to edge trigger also in order to be able to
- * generate a PRCM wakeup. Here we restore the
- * pre-runtime_suspend() values for edge triggering.
- */
- writel_relaxed(bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
- writel_relaxed(bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
+ if (bank->funcs.idle_disable_level_quirk)
+ bank->funcs.idle_disable_level_quirk(bank);
if (bank->loses_context) {
if (!bank->get_context_loss_count) {
@@ -1478,16 +1568,13 @@ static int omap_gpio_runtime_resume(struct device *dev)
if (c != bank->context_loss_count) {
omap_gpio_restore_context(bank);
} else {
- raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
+ return;
}
}
}
- if (!bank->workaround_enabled) {
- raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
+ if (!bank->workaround_enabled)
+ return;
l = readl_relaxed(bank->base + bank->regs->datain);
@@ -1540,41 +1627,8 @@ static int omap_gpio_runtime_resume(struct device *dev)
}
bank->workaround_enabled = false;
- raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
-}
-#endif /* CONFIG_PM */
-
-#if IS_BUILTIN(CONFIG_GPIO_OMAP)
-void omap2_gpio_prepare_for_idle(int pwr_mode)
-{
- struct gpio_bank *bank;
-
- list_for_each_entry(bank, &omap_gpio_list, node) {
- if (!BANK_USED(bank) || !bank->loses_context)
- continue;
-
- bank->power_mode = pwr_mode;
-
- pm_runtime_put_sync_suspend(bank->chip.parent);
- }
-}
-
-void omap2_gpio_resume_after_idle(void)
-{
- struct gpio_bank *bank;
-
- list_for_each_entry(bank, &omap_gpio_list, node) {
- if (!BANK_USED(bank) || !bank->loses_context)
- continue;
-
- pm_runtime_get_sync(bank->chip.parent);
- }
}
-#endif
-#if defined(CONFIG_PM)
static void omap_gpio_init_context(struct gpio_bank *p)
{
struct omap_gpio_reg_offs *regs = p->regs;
@@ -1631,17 +1685,57 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
writel_relaxed(bank->context.irqenable2,
bank->base + bank->regs->irqenable2);
}
-#endif /* CONFIG_PM */
-#else
-#define omap_gpio_runtime_suspend NULL
-#define omap_gpio_runtime_resume NULL
-static inline void omap_gpio_init_context(struct gpio_bank *p) {}
-#endif
+static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ unsigned long flags;
+ int error = 0;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ /* Must be idled only by CPU_CLUSTER_PM_ENTER? */
+ if (bank->irq_usage) {
+ error = -EBUSY;
+ goto unlock;
+ }
+ omap_gpio_idle(bank, true);
+ bank->is_suspended = true;
+unlock:
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return error;
+}
+
+static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ unsigned long flags;
+ int error = 0;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ /* Must be unidled only by CPU_CLUSTER_PM_ENTER? */
+ if (bank->irq_usage) {
+ error = -EBUSY;
+ goto unlock;
+ }
+ omap_gpio_unidle(bank);
+ bank->is_suspended = false;
+unlock:
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return error;
+}
+
+#ifdef CONFIG_ARCH_OMAP2PLUS
static const struct dev_pm_ops gpio_pm_ops = {
SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
NULL)
};
+#else
+static const struct dev_pm_ops gpio_pm_ops;
+#endif /* CONFIG_ARCH_OMAP2PLUS */
#if defined(CONFIG_OF)
static struct omap_gpio_reg_offs omap2_gpio_regs = {
@@ -1690,6 +1784,11 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
.fallingdetect = OMAP4_GPIO_FALLINGDETECT,
};
+/*
+ * Note that omap2 does not currently support idle modes with context loss so
+ * no need to add OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER quirk flag to save
+ * and restore context.
+ */
static const struct omap_gpio_platform_data omap2_pdata = {
.regs = &omap2_gpio_regs,
.bank_width = 32,
@@ -1700,12 +1799,15 @@ static const struct omap_gpio_platform_data omap3_pdata = {
.regs = &omap2_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
+ .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER,
};
static const struct omap_gpio_platform_data omap4_pdata = {
.regs = &omap4_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
+ .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER |
+ OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN,
};
static const struct of_device_id omap_gpio_match[] = {
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index c18712dabf93..bfe4c5c9f41c 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -776,6 +776,9 @@ static int pxa_gpio_suspend(void)
struct pxa_gpio_bank *c;
int gpio;
+ if (!pchip)
+ return 0;
+
for_each_gpio_bank(gpio, c, pchip) {
c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
@@ -794,6 +797,9 @@ static void pxa_gpio_resume(void)
struct pxa_gpio_bank *c;
int gpio;
+ if (!pchip)
+ return;
+
for_each_gpio_bank(gpio, c, pchip) {
/* restore level with set/clear */
writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 55cc61086d99..3c82bb3c2030 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -321,6 +321,9 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
u32 val, bankmask;
bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
+ if (chip->valid_mask)
+ bankmask &= chip->valid_mask[0];
+
if (!bankmask)
return;
@@ -558,6 +561,9 @@ static int gpio_rcar_resume(struct device *dev)
u32 mask;
for (offset = 0; offset < p->gpio_chip.ngpio; offset++) {
+ if (!gpiochip_line_is_valid(&p->gpio_chip, offset))
+ continue;
+
mask = BIT(offset);
/* I/O pin */
if (!(p->bank_info.iointsel & mask)) {
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
new file mode 100644
index 000000000000..571b2a81c6de
--- /dev/null
+++ b/drivers/gpio/gpio-siox.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2018 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de>
+ */
+
+#include <linux/module.h>
+#include <linux/siox.h>
+#include <linux/gpio/driver.h>
+#include <linux/of.h>
+
+struct gpio_siox_ddata {
+ struct gpio_chip gchip;
+ struct irq_chip ichip;
+ struct mutex lock;
+ u8 setdata[1];
+ u8 getdata[3];
+
+ spinlock_t irqlock;
+ u32 irq_enable;
+ u32 irq_status;
+ u32 irq_type[20];
+};
+
+/*
+ * Note that this callback only sets the value that is clocked out in the next
+ * cycle.
+ */
+static int gpio_siox_set_data(struct siox_device *sdevice, u8 status, u8 buf[])
+{
+ struct gpio_siox_ddata *ddata = dev_get_drvdata(&sdevice->dev);
+
+ mutex_lock(&ddata->lock);
+ buf[0] = ddata->setdata[0];
+ mutex_unlock(&ddata->lock);
+
+ return 0;
+}
+
+static int gpio_siox_get_data(struct siox_device *sdevice, const u8 buf[])
+{
+ struct gpio_siox_ddata *ddata = dev_get_drvdata(&sdevice->dev);
+ size_t offset;
+ u32 trigger;
+
+ mutex_lock(&ddata->lock);
+
+ spin_lock_irq(&ddata->irqlock);
+
+ for (offset = 0; offset < 12; ++offset) {
+ unsigned int bitpos = 11 - offset;
+ unsigned int gpiolevel = buf[bitpos / 8] & (1 << bitpos % 8);
+ unsigned int prev_level =
+ ddata->getdata[bitpos / 8] & (1 << (bitpos % 8));
+ u32 irq_type = ddata->irq_type[offset];
+
+ if (gpiolevel) {
+ if ((irq_type & IRQ_TYPE_LEVEL_HIGH) ||
+ ((irq_type & IRQ_TYPE_EDGE_RISING) && !prev_level))
+ ddata->irq_status |= 1 << offset;
+ } else {
+ if ((irq_type & IRQ_TYPE_LEVEL_LOW) ||
+ ((irq_type & IRQ_TYPE_EDGE_FALLING) && prev_level))
+ ddata->irq_status |= 1 << offset;
+ }
+ }
+
+ trigger = ddata->irq_status & ddata->irq_enable;
+
+ spin_unlock_irq(&ddata->irqlock);
+
+ ddata->getdata[0] = buf[0];
+ ddata->getdata[1] = buf[1];
+ ddata->getdata[2] = buf[2];
+
+ mutex_unlock(&ddata->lock);
+
+ for (offset = 0; offset < 12; ++offset) {
+ if (trigger & (1 << offset)) {
+ struct irq_domain *irqdomain = ddata->gchip.irq.domain;
+ unsigned int irq = irq_find_mapping(irqdomain, offset);
+
+ /*
+ * Conceptually handle_nested_irq should call the flow
+ * handler of the irq chip. But it doesn't, so we have
+ * to clean the irq_status here.
+ */
+ spin_lock_irq(&ddata->irqlock);
+ ddata->irq_status &= ~(1 << offset);
+ spin_unlock_irq(&ddata->irqlock);
+
+ handle_nested_irq(irq);
+ }
+ }
+
+ return 0;
+}
+
+static void gpio_siox_irq_ack(struct irq_data *d)
+{
+ struct irq_chip *ic = irq_data_get_irq_chip(d);
+ struct gpio_siox_ddata *ddata =
+ container_of(ic, struct gpio_siox_ddata, ichip);
+
+ spin_lock_irq(&ddata->irqlock);
+ ddata->irq_status &= ~(1 << d->hwirq);
+ spin_unlock_irq(&ddata->irqlock);
+}
+
+static void gpio_siox_irq_mask(struct irq_data *d)
+{
+ struct irq_chip *ic = irq_data_get_irq_chip(d);
+ struct gpio_siox_ddata *ddata =
+ container_of(ic, struct gpio_siox_ddata, ichip);
+
+ spin_lock_irq(&ddata->irqlock);
+ ddata->irq_enable &= ~(1 << d->hwirq);
+ spin_unlock_irq(&ddata->irqlock);
+}
+
+static void gpio_siox_irq_unmask(struct irq_data *d)
+{
+ struct irq_chip *ic = irq_data_get_irq_chip(d);
+ struct gpio_siox_ddata *ddata =
+ container_of(ic, struct gpio_siox_ddata, ichip);
+
+ spin_lock_irq(&ddata->irqlock);
+ ddata->irq_enable |= 1 << d->hwirq;
+ spin_unlock_irq(&ddata->irqlock);
+}
+
+static int gpio_siox_irq_set_type(struct irq_data *d, u32 type)
+{
+ struct irq_chip *ic = irq_data_get_irq_chip(d);
+ struct gpio_siox_ddata *ddata =
+ container_of(ic, struct gpio_siox_ddata, ichip);
+
+ spin_lock_irq(&ddata->irqlock);
+ ddata->irq_type[d->hwirq] = type;
+ spin_unlock_irq(&ddata->irqlock);
+
+ return 0;
+}
+
+static int gpio_siox_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpio_siox_ddata *ddata =
+ container_of(chip, struct gpio_siox_ddata, gchip);
+ int ret;
+
+ mutex_lock(&ddata->lock);
+
+ if (offset >= 12) {
+ unsigned int bitpos = 19 - offset;
+
+ ret = ddata->setdata[0] & (1 << bitpos);
+ } else {
+ unsigned int bitpos = 11 - offset;
+
+ ret = ddata->getdata[bitpos / 8] & (1 << (bitpos % 8));
+ }
+
+ mutex_unlock(&ddata->lock);
+
+ return ret;
+}
+
+static void gpio_siox_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct gpio_siox_ddata *ddata =
+ container_of(chip, struct gpio_siox_ddata, gchip);
+ u8 mask = 1 << (19 - offset);
+
+ mutex_lock(&ddata->lock);
+
+ if (value)
+ ddata->setdata[0] |= mask;
+ else
+ ddata->setdata[0] &= ~mask;
+
+ mutex_unlock(&ddata->lock);
+}
+
+static int gpio_siox_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ if (offset >= 12)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int gpio_siox_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ if (offset < 12)
+ return -EINVAL;
+
+ gpio_siox_set(chip, offset, value);
+ return 0;
+}
+
+static int gpio_siox_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ if (offset < 12)
+ return 1; /* input */
+ else
+ return 0; /* output */
+}
+
+static int gpio_siox_probe(struct siox_device *sdevice)
+{
+ struct gpio_siox_ddata *ddata;
+ int ret;
+
+ ddata = devm_kzalloc(&sdevice->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ dev_set_drvdata(&sdevice->dev, ddata);
+
+ mutex_init(&ddata->lock);
+ spin_lock_init(&ddata->irqlock);
+
+ ddata->gchip.base = -1;
+ ddata->gchip.can_sleep = 1;
+ ddata->gchip.parent = &sdevice->dev;
+ ddata->gchip.owner = THIS_MODULE;
+ ddata->gchip.get = gpio_siox_get;
+ ddata->gchip.set = gpio_siox_set;
+ ddata->gchip.direction_input = gpio_siox_direction_input;
+ ddata->gchip.direction_output = gpio_siox_direction_output;
+ ddata->gchip.get_direction = gpio_siox_get_direction;
+ ddata->gchip.ngpio = 20;
+
+ ddata->ichip.name = "siox-gpio";
+ ddata->ichip.irq_ack = gpio_siox_irq_ack;
+ ddata->ichip.irq_mask = gpio_siox_irq_mask;
+ ddata->ichip.irq_unmask = gpio_siox_irq_unmask;
+ ddata->ichip.irq_set_type = gpio_siox_irq_set_type;
+
+ ret = gpiochip_add(&ddata->gchip);
+ if (ret) {
+ dev_err(&sdevice->dev,
+ "Failed to register gpio chip (%d)\n", ret);
+ goto err_gpiochip;
+ }
+
+ ret = gpiochip_irqchip_add(&ddata->gchip, &ddata->ichip,
+ 0, handle_level_irq, IRQ_TYPE_EDGE_RISING);
+ if (ret) {
+ dev_err(&sdevice->dev,
+ "Failed to register irq chip (%d)\n", ret);
+err_gpiochip:
+ gpiochip_remove(&ddata->gchip);
+ }
+
+ return ret;
+}
+
+static int gpio_siox_remove(struct siox_device *sdevice)
+{
+ struct gpio_siox_ddata *ddata = dev_get_drvdata(&sdevice->dev);
+
+ gpiochip_remove(&ddata->gchip);
+ return 0;
+}
+
+static struct siox_driver gpio_siox_driver = {
+ .probe = gpio_siox_probe,
+ .remove = gpio_siox_remove,
+ .set_data = gpio_siox_set_data,
+ .get_data = gpio_siox_get_data,
+ .driver = {
+ .name = "gpio-siox",
+ },
+};
+
+static int __init gpio_siox_init(void)
+{
+ return siox_driver_register(&gpio_siox_driver);
+}
+module_init(gpio_siox_init);
+
+static void __exit gpio_siox_exit(void)
+{
+ siox_driver_unregister(&gpio_siox_driver);
+}
+module_exit(gpio_siox_exit);
+
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_DESCRIPTION("SIOX gpio driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 87c18a544513..7f3da34c7874 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -122,7 +122,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val)
BIT(offs % SYSCON_REG_BITS));
}
- priv->data->set(chip, offset, val);
+ chip->set(chip, offset, val);
return 0;
}
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index a12cd0b5c972..d5e5d19f4c0a 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -45,14 +45,12 @@
/**
- * @spinlock: used for atomic read/modify/write of registers
* @base: register base address
* @domain: IRQ domain of GPIO generated interrupts managed by this controller
* @irq: Interrupt line of parent interrupt controller
* @gc: gpio_chip structure associated to this GPIO controller
*/
struct tb10x_gpio {
- spinlock_t spinlock;
void __iomem *base;
struct irq_domain *domain;
int irq;
@@ -76,60 +74,14 @@ static inline void tb10x_set_bits(struct tb10x_gpio *gpio, unsigned int offs,
u32 r;
unsigned long flags;
- spin_lock_irqsave(&gpio->spinlock, flags);
+ spin_lock_irqsave(&gpio->gc.bgpio_lock, flags);
r = tb10x_reg_read(gpio, offs);
r = (r & ~mask) | (val & mask);
tb10x_reg_write(gpio, offs, r);
- spin_unlock_irqrestore(&gpio->spinlock, flags);
-}
-
-static int tb10x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
-{
- struct tb10x_gpio *tb10x_gpio = gpiochip_get_data(chip);
- int mask = BIT(offset);
- int val = TB10X_GPIO_DIR_IN << offset;
-
- tb10x_set_bits(tb10x_gpio, OFFSET_TO_REG_DDR, mask, val);
-
- return 0;
-}
-
-static int tb10x_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct tb10x_gpio *tb10x_gpio = gpiochip_get_data(chip);
- int val;
-
- val = tb10x_reg_read(tb10x_gpio, OFFSET_TO_REG_DATA);
-
- if (val & BIT(offset))
- return 1;
- else
- return 0;
-}
-
-static void tb10x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct tb10x_gpio *tb10x_gpio = gpiochip_get_data(chip);
- int mask = BIT(offset);
- int val = value << offset;
-
- tb10x_set_bits(tb10x_gpio, OFFSET_TO_REG_DATA, mask, val);
-}
-
-static int tb10x_gpio_direction_out(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct tb10x_gpio *tb10x_gpio = gpiochip_get_data(chip);
- int mask = BIT(offset);
- int val = TB10X_GPIO_DIR_OUT << offset;
-
- tb10x_gpio_set(chip, offset, value);
- tb10x_set_bits(tb10x_gpio, OFFSET_TO_REG_DDR, mask, val);
-
- return 0;
+ spin_unlock_irqrestore(&gpio->gc.bgpio_lock, flags);
}
static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -169,72 +121,85 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
{
struct tb10x_gpio *tb10x_gpio;
struct resource *mem;
- struct device_node *dn = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
int ret = -EBUSY;
u32 ngpio;
- if (!dn)
+ if (!np)
return -EINVAL;
- if (of_property_read_u32(dn, "abilis,ngpio", &ngpio))
+ if (of_property_read_u32(np, "abilis,ngpio", &ngpio))
return -EINVAL;
- tb10x_gpio = devm_kzalloc(&pdev->dev, sizeof(*tb10x_gpio), GFP_KERNEL);
+ tb10x_gpio = devm_kzalloc(dev, sizeof(*tb10x_gpio), GFP_KERNEL);
if (tb10x_gpio == NULL)
return -ENOMEM;
- spin_lock_init(&tb10x_gpio->spinlock);
-
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tb10x_gpio->base = devm_ioremap_resource(&pdev->dev, mem);
+ tb10x_gpio->base = devm_ioremap_resource(dev, mem);
if (IS_ERR(tb10x_gpio->base))
return PTR_ERR(tb10x_gpio->base);
- tb10x_gpio->gc.label =
- devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
+ tb10x_gpio->gc.label =
+ devm_kasprintf(dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
if (!tb10x_gpio->gc.label)
return -ENOMEM;
- tb10x_gpio->gc.parent = &pdev->dev;
- tb10x_gpio->gc.owner = THIS_MODULE;
- tb10x_gpio->gc.direction_input = tb10x_gpio_direction_in;
- tb10x_gpio->gc.get = tb10x_gpio_get;
- tb10x_gpio->gc.direction_output = tb10x_gpio_direction_out;
- tb10x_gpio->gc.set = tb10x_gpio_set;
- tb10x_gpio->gc.request = gpiochip_generic_request;
- tb10x_gpio->gc.free = gpiochip_generic_free;
- tb10x_gpio->gc.base = -1;
- tb10x_gpio->gc.ngpio = ngpio;
- tb10x_gpio->gc.can_sleep = false;
-
-
- ret = devm_gpiochip_add_data(&pdev->dev, &tb10x_gpio->gc, tb10x_gpio);
+ /*
+ * Initialize generic GPIO with one single register for reading and setting
+ * the lines, no special set or clear registers and a data direction register
+ * wher 1 means "output".
+ */
+ ret = bgpio_init(&tb10x_gpio->gc, dev, 4,
+ tb10x_gpio->base + OFFSET_TO_REG_DATA,
+ NULL,
+ NULL,
+ tb10x_gpio->base + OFFSET_TO_REG_DDR,
+ NULL,
+ 0);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+ tb10x_gpio->gc.base = -1;
+ tb10x_gpio->gc.parent = dev;
+ tb10x_gpio->gc.owner = THIS_MODULE;
+ /*
+ * ngpio is set by bgpio_init() but we override it, this .request()
+ * callback also overrides the one set up by generic GPIO.
+ */
+ tb10x_gpio->gc.ngpio = ngpio;
+ tb10x_gpio->gc.request = gpiochip_generic_request;
+ tb10x_gpio->gc.free = gpiochip_generic_free;
+
+ ret = devm_gpiochip_add_data(dev, &tb10x_gpio->gc, tb10x_gpio);
if (ret < 0) {
- dev_err(&pdev->dev, "Could not add gpiochip.\n");
+ dev_err(dev, "Could not add gpiochip.\n");
return ret;
}
platform_set_drvdata(pdev, tb10x_gpio);
- if (of_find_property(dn, "interrupt-controller", NULL)) {
+ if (of_find_property(np, "interrupt-controller", NULL)) {
struct irq_chip_generic *gc;
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
- dev_err(&pdev->dev, "No interrupt specified.\n");
+ dev_err(dev, "No interrupt specified.\n");
return ret;
}
tb10x_gpio->gc.to_irq = tb10x_gpio_to_irq;
tb10x_gpio->irq = ret;
- ret = devm_request_irq(&pdev->dev, ret, tb10x_gpio_irq_cascade,
+ ret = devm_request_irq(dev, ret, tb10x_gpio_irq_cascade,
IRQF_TRIGGER_NONE | IRQF_SHARED,
- dev_name(&pdev->dev), tb10x_gpio);
+ dev_name(dev), tb10x_gpio);
if (ret != 0)
return ret;
- tb10x_gpio->domain = irq_domain_add_linear(dn,
+ tb10x_gpio->domain = irq_domain_add_linear(np,
tb10x_gpio->gc.ngpio,
&irq_generic_chip_ops, NULL);
if (!tb10x_gpio->domain) {
diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c
index b23c4d2429be..2eea98ff4ea3 100644
--- a/drivers/gpio/gpio-tps65086.c
+++ b/drivers/gpio/gpio-tps65086.c
@@ -1,20 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65912 driver
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index 042b9a20781a..9b6cc74f47c8 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TI TPS6586x GPIO driver
*
@@ -7,22 +8,10 @@
* Based on tps6586x.c
* Copyright (c) 2010 CompuLab Ltd.
* Mike Rapoport <mike@compulab.co.il>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/errno.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mfd/tps6586x.h>
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index e63d7dabf78b..0c785b0fd161 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* TI TPS6591x GPIO driver
*
@@ -5,18 +6,12 @@
*
* Author: Graeme Gregory <gg@slimlogic.co.uk>
* Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/mfd/tps65910.h>
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index abc0798ef843..3ad68bd78282 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -1,23 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPIO driver for TI TPS65912x PMICs
*
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the Arizona GPIO driver and the previous TPS65912 driver by
* Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -40,9 +32,9 @@ static int tps65912_gpio_get_direction(struct gpio_chip *gc,
return ret;
if (val & GPIO_CFG_MASK)
- return GPIOF_DIR_OUT;
+ return 0;
else
- return GPIOF_DIR_IN;
+ return 1;
}
static int tps65912_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index 6cfeba07f882..c91890488402 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Digital I/O driver for Technologic Systems TS-5500
*
@@ -16,17 +17,12 @@
* TS-5600:
* Documentation: http://wiki.embeddedarm.com/wiki/TS-5600
* Blocks: LCD port (identical to TS-5500 LCD).
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/platform_data/gpio-ts5500.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -318,7 +314,6 @@ static void ts5500_disable_irq(struct ts5500_priv *priv)
static int ts5500_dio_probe(struct platform_device *pdev)
{
enum ts5500_blocks block = platform_get_device_id(pdev)->driver_data;
- struct ts5500_dio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
const char *name = dev_name(dev);
struct ts5500_priv *priv;
@@ -349,10 +344,6 @@ static int ts5500_dio_probe(struct platform_device *pdev)
priv->gpio_chip.set = ts5500_gpio_set;
priv->gpio_chip.to_irq = ts5500_gpio_to_irq;
priv->gpio_chip.base = -1;
- if (pdata) {
- priv->gpio_chip.base = pdata->base;
- priv->strap = pdata->strap;
- }
switch (block) {
case TS5500_DIO1:
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 9b511df5450e..fbfb648d3502 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Access to GPIOs on TWL4030/TPS659x0 chips
*
@@ -9,20 +10,6 @@
*
* Initial Code:
* Andy Lowe / Nishanth Menon
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
@@ -30,7 +17,7 @@
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/irqdomain.h>
@@ -167,6 +154,23 @@ static int twl4030_set_gpio_direction(int gpio, int is_input)
return ret;
}
+static int twl4030_get_gpio_direction(int gpio)
+{
+ u8 d_bnk = gpio >> 3;
+ u8 d_msk = BIT(gpio & 0x7);
+ u8 base = REG_GPIODATADIR1 + d_bnk;
+ int ret = 0;
+
+ ret = gpio_twl4030_read(base);
+ if (ret < 0)
+ return ret;
+
+ /* 1 = output, but gpiolib semantics are inverse so invert */
+ ret = !(ret & d_msk);
+
+ return ret;
+}
+
static int twl4030_set_gpio_dataout(int gpio, int enable)
{
u8 d_bnk = gpio >> 3;
@@ -372,6 +376,28 @@ static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
return ret;
}
+static int twl_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_twl4030_priv *priv = gpiochip_get_data(chip);
+ /*
+ * Default 0 = output
+ * LED GPIOs >= TWL4030_GPIO_MAX are always output
+ */
+ int ret = 0;
+
+ mutex_lock(&priv->mutex);
+ if (offset < TWL4030_GPIO_MAX) {
+ ret = twl4030_get_gpio_direction(offset);
+ if (ret) {
+ mutex_unlock(&priv->mutex);
+ return ret;
+ }
+ }
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct gpio_twl4030_priv *priv = gpiochip_get_data(chip);
@@ -387,8 +413,9 @@ static const struct gpio_chip template_chip = {
.request = twl_request,
.free = twl_free,
.direction_input = twl_direction_in,
- .get = twl_get,
.direction_output = twl_direction_out,
+ .get_direction = twl_get_direction,
+ .get = twl_get,
.set = twl_set,
.to_irq = twl_to_irq,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index dadeacf43e0c..c845b2ff1f43 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Access to GPOs on TWL6040 chip
*
@@ -6,28 +7,15 @@
* Authors:
* Sergio Aguirre <saaguirre@ti.com>
* Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
+#include <linux/bitops.h>
#include <linux/of.h>
#include <linux/mfd/twl6040.h>
@@ -41,7 +29,13 @@ static int twl6040gpo_get(struct gpio_chip *chip, unsigned offset)
if (ret < 0)
return ret;
- return (ret >> offset) & 1;
+ return !!(ret & BIT(offset));
+}
+
+static int twl6040gpo_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ /* This means "out" */
+ return 0;
}
static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned offset,
@@ -62,9 +56,9 @@ static void twl6040gpo_set(struct gpio_chip *chip, unsigned offset, int value)
return;
if (value)
- gpoctl = ret | (1 << offset);
+ gpoctl = ret | BIT(offset);
else
- gpoctl = ret & ~(1 << offset);
+ gpoctl = ret & ~BIT(offset);
twl6040_reg_write(twl6040, TWL6040_REG_GPOCTL, gpoctl);
}
@@ -74,6 +68,7 @@ static struct gpio_chip twl6040gpo_chip = {
.owner = THIS_MODULE,
.get = twl6040gpo_get,
.direction_output = twl6040gpo_direction_out,
+ .get_direction = twl6040gpo_get_direction,
.set = twl6040gpo_set,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 7fdac9060979..74551cbdb2e8 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -12,7 +12,7 @@
* GNU General Public License for more details.
*/
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/gpio/driver.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index d4ad6d0e02a2..5960396c8d9a 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -1,23 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale vf610 GPIO support through PORT and GPIO
*
* Copyright (c) 2014 Toradex AG.
*
* Author: Stefan Agner <stefan@agner.ch>.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
-
#include <linux/bitops.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index e6d1328dddfa..9b604f13e302 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Nano River Technologies viperboard GPIO lib driver
*
* (C) 2012 by Lemonage GmbH
* Author: Lars Poeschel <poeschel@lemonage.de>
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
@@ -19,9 +14,8 @@
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
-
#include <linux/usb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/viperboard.h>
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index 027699cec911..b13a49c89cc1 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -1,27 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for NEC VR4100 series General-purpose I/O Unit.
*
* Copyright (C) 2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <source@mvista.com>
* Copyright (C) 2003-2009 Yoichi Yuasa <yuasa@linux-mips.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/errno.h>
#include <linux/fs.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -384,44 +371,6 @@ static int giu_set_direction(struct gpio_chip *chip, unsigned pin, int dir)
return 0;
}
-int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull)
-{
- u16 reg, mask;
- unsigned long flags;
-
- if ((giu_flags & GPIO_HAS_PULLUPDOWN_IO) != GPIO_HAS_PULLUPDOWN_IO)
- return -EPERM;
-
- if (pin >= 15)
- return -EINVAL;
-
- mask = 1 << pin;
-
- spin_lock_irqsave(&giu_lock, flags);
-
- if (pull == GPIO_PULL_UP || pull == GPIO_PULL_DOWN) {
- reg = giu_read(GIUTERMUPDN);
- if (pull == GPIO_PULL_UP)
- reg |= mask;
- else
- reg &= ~mask;
- giu_write(GIUTERMUPDN, reg);
-
- reg = giu_read(GIUUSEUPDN);
- reg |= mask;
- giu_write(GIUUSEUPDN, reg);
- } else {
- reg = giu_read(GIUUSEUPDN);
- reg &= ~mask;
- giu_write(GIUUSEUPDN, reg);
- }
-
- spin_unlock_irqrestore(&giu_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vr41xx_gpio_pullupdown);
-
static int vr41xx_gpio_get(struct gpio_chip *chip, unsigned pin)
{
u16 reg, mask;
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 98a6f1fcc561..4ff146ca32fe 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Linux GPIOlib driver for the VIA VX855 integrated southbridge GPIO
*
@@ -5,27 +6,10 @@
* Copyright (C) 2010 One Laptop per Child
* Author: Harald Welte <HaraldWelte@viatech.com>
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
- *
*/
-
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 324813e8304e..a3a32a77041f 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* gpiolib support for Wolfson WM831x PMICs
*
@@ -5,17 +6,12 @@
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/core.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index e46752e73dd9..460f0a4b04bd 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* gpiolib support for Wolfson WM835x PMICs
*
@@ -5,17 +6,12 @@
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/core.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 1e35756ac55b..9af89cf7f6bc 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* gpiolib support for Wolfson WM8994
*
@@ -5,17 +6,12 @@
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/core.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 8e4275eaa7d7..0a3607fd21af 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2003-2015 Broadcom Corporation
* All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/module.h>
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index f16c0427952e..43d3fa5f511a 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 TangoTec Ltd.
* Author: Baruch Siach <baruch@tkos.co.il>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Driver for the Xtensa LX4 GPIO32 Option
*
* Documentation: Xtensa LX4 Microprocessor Data Book, Section 2.22
@@ -30,7 +27,7 @@
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index 3926ce9c2840..57432397e5e5 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -16,7 +16,7 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
/*
* Memory layout:
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 8b9d7e42c600..55b72fbe1631 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1,17 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ACPI helpers for GPIO API
*
* Copyright (C) 2012, Intel Corporation
* Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/errno.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
@@ -1198,7 +1194,7 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
{
/* Never allow fallback if the device has properties */
- if (adev->data.properties || adev->driver_gpios)
+ if (acpi_dev_has_props(adev) || adev->driver_gpios)
return false;
return con_id == NULL;
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
index f748aa3e77f7..dd517098ab95 100644
--- a/drivers/gpio/gpiolib-devprop.c
+++ b/drivers/gpio/gpiolib-devprop.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Device property helpers for GPIO chips.
*
* Copyright (C) 2016, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/property.h>
@@ -32,32 +29,29 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip,
struct gpio_device *gdev = chip->gpiodev;
const char **names;
int ret, i;
+ int count;
- ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
- NULL, 0);
- if (ret < 0)
+ count = fwnode_property_read_string_array(fwnode, "gpio-line-names",
+ NULL, 0);
+ if (count < 0)
return;
- if (ret != gdev->ngpio) {
- dev_warn(&gdev->dev,
- "names %d do not match number of GPIOs %d\n", ret,
- gdev->ngpio);
- return;
- }
+ if (count > gdev->ngpio)
+ count = gdev->ngpio;
- names = kcalloc(gdev->ngpio, sizeof(*names), GFP_KERNEL);
+ names = kcalloc(count, sizeof(*names), GFP_KERNEL);
if (!names)
return;
ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
- names, gdev->ngpio);
+ names, count);
if (ret < 0) {
dev_warn(&gdev->dev, "failed to read GPIO line names\n");
kfree(names);
return;
}
- for (i = 0; i < gdev->ngpio; i++)
+ for (i = 0; i < count; i++)
gdev->descs[i].name = names[i];
kfree(names);
diff --git a/drivers/gpio/devres.c b/drivers/gpio/gpiolib-devres.c
index e82cc763633c..01959369360b 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * drivers/gpio/devres.c - managed gpio resources
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
+ * devres.c - managed gpio resources
* This file is based on kernel/irq/devres.c
*
* Copyright (c) 2011 John Crispin <john@phrozen.org>
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index 8b830996fe02..30e2476a6dc4 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index d4e7a09598fa..7f1260c78270 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OF helpers for the GPIO API
*
* Copyright (c) 2007-2008 MontaVista Software, Inc.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/device.h>
@@ -58,7 +54,8 @@ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
}
static void of_gpio_flags_quirks(struct device_node *np,
- enum of_gpio_flags *flags)
+ enum of_gpio_flags *flags,
+ int index)
{
/*
* Some GPIO fixed regulator quirks.
@@ -92,6 +89,51 @@ static void of_gpio_flags_quirks(struct device_node *np,
pr_info("%s uses legacy open drain flag - update the DTS if you can\n",
of_node_full_name(np));
}
+
+ /*
+ * Legacy handling of SPI active high chip select. If we have a
+ * property named "cs-gpios" we need to inspect the child node
+ * to determine if the flags should have inverted semantics.
+ */
+ if (IS_ENABLED(CONFIG_SPI_MASTER) &&
+ of_property_read_bool(np, "cs-gpios")) {
+ struct device_node *child;
+ u32 cs;
+ int ret;
+
+ for_each_child_of_node(np, child) {
+ ret = of_property_read_u32(child, "reg", &cs);
+ if (!ret)
+ continue;
+ if (cs == index) {
+ /*
+ * SPI children have active low chip selects
+ * by default. This can be specified negatively
+ * by just omitting "spi-cs-high" in the
+ * device node, or actively by tagging on
+ * GPIO_ACTIVE_LOW as flag in the device
+ * tree. If the line is simultaneously
+ * tagged as active low in the device tree
+ * and has the "spi-cs-high" set, we get a
+ * conflict and the "spi-cs-high" flag will
+ * take precedence.
+ */
+ if (of_property_read_bool(np, "spi-cs-high")) {
+ if (*flags & OF_GPIO_ACTIVE_LOW) {
+ pr_warn("%s GPIO handle specifies active low - ignored\n",
+ of_node_full_name(np));
+ *flags &= ~OF_GPIO_ACTIVE_LOW;
+ }
+ } else {
+ if (!(*flags & OF_GPIO_ACTIVE_LOW))
+ pr_info("%s enforce active low on chipselect handle\n",
+ of_node_full_name(np));
+ *flags |= OF_GPIO_ACTIVE_LOW;
+ }
+ break;
+ }
+ }
+ }
}
/**
@@ -132,7 +174,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
goto out;
if (flags)
- of_gpio_flags_quirks(np, flags);
+ of_gpio_flags_quirks(np, flags, index);
pr_debug("%s: parsed '%s' property of node '%pOF[%d]' - status (%d)\n",
__func__, propname, np, index,
@@ -349,8 +391,8 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
else if (of_property_read_bool(np, "output-high"))
*dflags |= GPIOD_OUT_HIGH;
else {
- pr_warn("GPIO line %d (%s): no hogging state specified, bailing out\n",
- desc_to_gpio(desc), np->name);
+ pr_warn("GPIO line %d (%pOFn): no hogging state specified, bailing out\n",
+ desc_to_gpio(desc), np);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 3dbaf489a8a5..fbf6b1a0a4fa 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -1,8 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/sysfs.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
@@ -444,11 +444,6 @@ static struct attribute *gpiochip_attrs[] = {
};
ATTRIBUTE_GROUPS(gpiochip);
-static struct gpio_desc *gpio_to_valid_desc(int gpio)
-{
- return gpio_is_valid(gpio) ? gpio_to_desc(gpio) : NULL;
-}
-
/*
* /sys/class/gpio/export ... write-only
* integer N ... number of GPIO to export (full access)
@@ -467,7 +462,7 @@ static ssize_t export_store(struct class *class,
if (status < 0)
goto done;
- desc = gpio_to_valid_desc(gpio);
+ desc = gpio_to_desc(gpio);
/* reject invalid GPIOs */
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
@@ -514,7 +509,7 @@ static ssize_t unexport_store(struct class *class,
if (status < 0)
goto done;
- desc = gpio_to_valid_desc(gpio);
+ desc = gpio_to_desc(gpio);
/* reject bogus commands (gpio_unexport ignores them) */
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index e8f8a1999393..230e41562462 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -210,15 +211,15 @@ static int gpiochip_find_base(int ngpio)
*/
int gpiod_get_direction(struct gpio_desc *desc)
{
- struct gpio_chip *chip;
- unsigned offset;
- int status = -EINVAL;
+ struct gpio_chip *chip;
+ unsigned offset;
+ int status;
chip = gpiod_to_chip(desc);
offset = gpio_chip_hwgpio(desc);
if (!chip->get_direction)
- return status;
+ return -ENOTSUPP;
status = chip->get_direction(chip, offset);
if (status > 0) {
@@ -359,7 +360,7 @@ static unsigned long *gpiochip_allocate_mask(struct gpio_chip *chip)
return p;
}
-static int gpiochip_init_valid_mask(struct gpio_chip *gpiochip)
+static int gpiochip_alloc_valid_mask(struct gpio_chip *gpiochip)
{
#ifdef CONFIG_OF_GPIO
int size;
@@ -380,6 +381,14 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gpiochip)
return 0;
}
+static int gpiochip_init_valid_mask(struct gpio_chip *gpiochip)
+{
+ if (gpiochip->init_valid_mask)
+ return gpiochip->init_valid_mask(gpiochip);
+
+ return 0;
+}
+
static void gpiochip_free_valid_mask(struct gpio_chip *gpiochip)
{
kfree(gpiochip->valid_mask);
@@ -427,7 +436,7 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
struct linehandle_state *lh = filep->private_data;
void __user *ip = (void __user *)arg;
struct gpiohandle_data ghd;
- int vals[GPIOHANDLES_MAX];
+ DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
int i;
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
@@ -436,13 +445,14 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
true,
lh->numdescs,
lh->descs,
+ NULL,
vals);
if (ret)
return ret;
memset(&ghd, 0, sizeof(ghd));
for (i = 0; i < lh->numdescs; i++)
- ghd.values[i] = vals[i];
+ ghd.values[i] = test_bit(i, vals);
if (copy_to_user(ip, &ghd, sizeof(ghd)))
return -EFAULT;
@@ -461,13 +471,14 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
/* Clamp all values to [0,1] */
for (i = 0; i < lh->numdescs; i++)
- vals[i] = !!ghd.values[i];
+ __assign_bit(i, vals, ghd.values[i]);
/* Reuse the array setting function */
return gpiod_set_array_value_complex(false,
true,
lh->numdescs,
lh->descs,
+ NULL,
vals);
}
return -EINVAL;
@@ -571,7 +582,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_descs;
lh->descs[i] = desc;
- count = i;
+ count = i + 1;
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@@ -812,26 +823,26 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
{
struct lineevent_state *le = p;
struct gpioevent_data ge;
- int ret, level;
+ int ret;
/* Do not leak kernel stack to userspace */
memset(&ge, 0, sizeof(ge));
ge.timestamp = le->timestamp;
- level = gpiod_get_value_cansleep(le->desc);
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
&& le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
+ int level = gpiod_get_value_cansleep(le->desc);
if (level)
/* Emit low-to-high event */
ge.id = GPIOEVENT_EVENT_RISING_EDGE;
else
/* Emit high-to-low event */
ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
- } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
+ } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
/* Emit low-to-high event */
ge.id = GPIOEVENT_EVENT_RISING_EDGE;
- } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
+ } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
/* Emit high-to-low event */
ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
} else {
@@ -942,7 +953,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
irqflags |= IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
- irqflags |= IRQF_SHARED;
INIT_KFIFO(le->events);
init_waitqueue_head(&le->wait);
@@ -1341,19 +1351,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
spin_unlock_irqrestore(&gpio_lock, flags);
- for (i = 0; i < chip->ngpio; i++) {
- struct gpio_desc *desc = &gdev->descs[i];
-
- desc->gdev = gdev;
-
- /* REVISIT: most hardware initializes GPIOs as inputs (often
- * with pullups enabled) so power usage is minimized. Linux
- * code should set the gpio direction first thing; but until
- * it does, and in case chip->get_direction is not set, we may
- * expose the wrong direction in sysfs.
- */
- desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
- }
+ for (i = 0; i < chip->ngpio; i++)
+ gdev->descs[i].gdev = gdev;
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
@@ -1367,7 +1366,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (status)
goto err_remove_from_list;
- status = gpiochip_init_valid_mask(chip);
+ status = gpiochip_alloc_valid_mask(chip);
if (status)
goto err_remove_irqchip_mask;
@@ -1379,6 +1378,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (status)
goto err_remove_chip;
+ status = gpiochip_init_valid_mask(chip);
+ if (status)
+ goto err_remove_chip;
+
+ for (i = 0; i < chip->ngpio; i++) {
+ struct gpio_desc *desc = &gdev->descs[i];
+
+ if (chip->get_direction && gpiochip_line_is_valid(chip, i))
+ desc->flags = !chip->get_direction(chip, i) ?
+ (1 << FLAG_IS_OUT) : 0;
+ else
+ desc->flags = !chip->direction_input ?
+ (1 << FLAG_IS_OUT) : 0;
+ }
+
acpi_gpiochip_add(chip);
machine_gpiochip_add(chip);
@@ -1512,7 +1526,7 @@ static int devm_gpio_chip_match(struct device *dev, void *res, void *data)
/**
* devm_gpiochip_add_data() - Resource manager gpiochip_add_data()
- * @dev: the device pointer on which irq_chip belongs to.
+ * @dev: pointer to the device that gpio_chip belongs to.
* @chip: the chip to register, with chip->base initialized
* @data: driver-private data associated with this chip
*
@@ -1649,7 +1663,6 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
/**
* gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
* @gpiochip: the gpiochip to set the irqchip chain to
- * @irqchip: the irqchip to chain to the gpiochip
* @parent_irq: the irq number corresponding to the parent IRQ for this
* chained irqchip
* @parent_handler: the parent interrupt handler for the accumulated IRQ
@@ -1657,12 +1670,9 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
* cascaded, pass NULL in this handler argument
*/
static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
unsigned int parent_irq,
irq_flow_handler_t parent_handler)
{
- unsigned int offset;
-
if (!gpiochip->irq.domain) {
chip_err(gpiochip, "called %s before setting up irqchip\n",
__func__);
@@ -1682,17 +1692,10 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
irq_set_chained_handler_and_data(parent_irq, parent_handler,
gpiochip);
- gpiochip->irq.parents = &parent_irq;
+ gpiochip->irq.parent_irq = parent_irq;
+ gpiochip->irq.parents = &gpiochip->irq.parent_irq;
gpiochip->irq.num_parents = 1;
}
-
- /* Set the parent IRQ for all affected IRQs */
- for (offset = 0; offset < gpiochip->ngpio; offset++) {
- if (!gpiochip_irqchip_irq_valid(gpiochip, offset))
- continue;
- irq_set_parent(irq_find_mapping(gpiochip->irq.domain, offset),
- parent_irq);
- }
}
/**
@@ -1702,8 +1705,7 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
* @parent_irq: the irq number corresponding to the parent IRQ for this
* chained irqchip
* @parent_handler: the parent interrupt handler for the accumulated IRQ
- * coming out of the gpiochip. If the interrupt is nested rather than
- * cascaded, pass NULL in this handler argument
+ * coming out of the gpiochip.
*/
void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
@@ -1715,8 +1717,7 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
return;
}
- gpiochip_set_cascaded_irqchip(gpiochip, irqchip, parent_irq,
- parent_handler);
+ gpiochip_set_cascaded_irqchip(gpiochip, parent_irq, parent_handler);
}
EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
@@ -1731,8 +1732,7 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int parent_irq)
{
- gpiochip_set_cascaded_irqchip(gpiochip, irqchip, parent_irq,
- NULL);
+ gpiochip_set_cascaded_irqchip(gpiochip, parent_irq, NULL);
}
EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip);
@@ -1804,39 +1804,75 @@ static const struct irq_domain_ops gpiochip_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
+static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ if (!gpiochip_irqchip_irq_valid(chip, offset))
+ return -ENXIO;
+
+ return irq_create_mapping(chip->irq.domain, offset);
+}
+
static int gpiochip_irq_reqres(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- int ret;
-
- if (!try_module_get(chip->gpiodev->owner))
- return -ENODEV;
- ret = gpiochip_lock_as_irq(chip, d->hwirq);
- if (ret) {
- chip_err(chip,
- "unable to lock HW IRQ %lu for IRQ\n",
- d->hwirq);
- module_put(chip->gpiodev->owner);
- return ret;
- }
- return 0;
+ return gpiochip_reqres_irq(chip, d->hwirq);
}
static void gpiochip_irq_relres(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- gpiochip_unlock_as_irq(chip, d->hwirq);
- module_put(chip->gpiodev->owner);
+ gpiochip_relres_irq(chip, d->hwirq);
}
-static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
+static void gpiochip_irq_enable(struct irq_data *d)
{
- if (!gpiochip_irqchip_irq_valid(chip, offset))
- return -ENXIO;
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- return irq_create_mapping(chip->irq.domain, offset);
+ gpiochip_enable_irq(chip, d->hwirq);
+ if (chip->irq.irq_enable)
+ chip->irq.irq_enable(d);
+ else
+ chip->irq.chip->irq_unmask(d);
+}
+
+static void gpiochip_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ if (chip->irq.irq_disable)
+ chip->irq.irq_disable(d);
+ else
+ chip->irq.chip->irq_mask(d);
+ gpiochip_disable_irq(chip, d->hwirq);
+}
+
+static void gpiochip_set_irq_hooks(struct gpio_chip *gpiochip)
+{
+ struct irq_chip *irqchip = gpiochip->irq.chip;
+
+ if (!irqchip->irq_request_resources &&
+ !irqchip->irq_release_resources) {
+ irqchip->irq_request_resources = gpiochip_irq_reqres;
+ irqchip->irq_release_resources = gpiochip_irq_relres;
+ }
+ if (WARN_ON(gpiochip->irq.irq_enable))
+ return;
+ /* Check if the irqchip already has this hook... */
+ if (irqchip->irq_enable == gpiochip_irq_enable) {
+ /*
+ * ...and if so, give a gentle warning that this is bad
+ * practice.
+ */
+ chip_info(gpiochip,
+ "detected irqchip that is shared with multiple gpiochips: please fix the driver.\n");
+ return;
+ }
+ gpiochip->irq.irq_enable = irqchip->irq_enable;
+ gpiochip->irq.irq_disable = irqchip->irq_disable;
+ irqchip->irq_enable = gpiochip_irq_enable;
+ irqchip->irq_disable = gpiochip_irq_disable;
}
/**
@@ -1897,16 +1933,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
if (!gpiochip->irq.domain)
return -EINVAL;
- /*
- * It is possible for a driver to override this, but only if the
- * alternative functions are both implemented.
- */
- if (!irqchip->irq_request_resources &&
- !irqchip->irq_release_resources) {
- irqchip->irq_request_resources = gpiochip_irq_reqres;
- irqchip->irq_release_resources = gpiochip_irq_relres;
- }
-
if (gpiochip->irq.parent_handler) {
void *data = gpiochip->irq.parent_handler_data ?: gpiochip;
@@ -1922,6 +1948,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
}
}
+ gpiochip_set_irq_hooks(gpiochip);
+
acpi_gpiochip_request_interrupts(gpiochip);
return 0;
@@ -1935,11 +1963,12 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
*/
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
{
+ struct irq_chip *irqchip = gpiochip->irq.chip;
unsigned int offset;
acpi_gpiochip_free_interrupts(gpiochip);
- if (gpiochip->irq.chip && gpiochip->irq.parent_handler) {
+ if (irqchip && gpiochip->irq.parent_handler) {
struct gpio_irq_chip *irq = &gpiochip->irq;
unsigned int i;
@@ -1963,11 +1992,19 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
irq_domain_remove(gpiochip->irq.domain);
}
- if (gpiochip->irq.chip) {
- gpiochip->irq.chip->irq_request_resources = NULL;
- gpiochip->irq.chip->irq_release_resources = NULL;
- gpiochip->irq.chip = NULL;
+ if (irqchip) {
+ if (irqchip->irq_request_resources == gpiochip_irq_reqres) {
+ irqchip->irq_request_resources = NULL;
+ irqchip->irq_release_resources = NULL;
+ }
+ if (irqchip->irq_enable == gpiochip_irq_enable) {
+ irqchip->irq_enable = gpiochip->irq.irq_enable;
+ irqchip->irq_disable = gpiochip->irq.irq_disable;
+ }
}
+ gpiochip->irq.irq_enable = NULL;
+ gpiochip->irq.irq_disable = NULL;
+ gpiochip->irq.chip = NULL;
gpiochip_irqchip_free_valid_mask(gpiochip);
}
@@ -2056,15 +2093,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
return -EINVAL;
}
- /*
- * It is possible for a driver to override this, but only if the
- * alternative functions are both implemented.
- */
- if (!irqchip->irq_request_resources &&
- !irqchip->irq_release_resources) {
- irqchip->irq_request_resources = gpiochip_irq_reqres;
- irqchip->irq_release_resources = gpiochip_irq_relres;
- }
+ gpiochip_set_irq_hooks(gpiochip);
acpi_gpiochip_request_interrupts(gpiochip);
@@ -2512,19 +2541,38 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
int gpiod_direction_input(struct gpio_desc *desc)
{
struct gpio_chip *chip;
- int status = -EINVAL;
+ int status = 0;
VALIDATE_DESC(desc);
chip = desc->gdev->chip;
- if (!chip->get || !chip->direction_input) {
+ /*
+ * It is legal to have no .get() and .direction_input() specified if
+ * the chip is output-only, but you can't specify .direction_input()
+ * and not support the .get() operation, that doesn't make sense.
+ */
+ if (!chip->get && chip->direction_input) {
gpiod_warn(desc,
- "%s: missing get() or direction_input() operations\n",
- __func__);
+ "%s: missing get() but have direction_input()\n",
+ __func__);
return -EIO;
}
- status = chip->direction_input(chip, gpio_chip_hwgpio(desc));
+ /*
+ * If we have a .direction_input() callback, things are simple,
+ * just call it. Else we are some input-only chip so try to check the
+ * direction (if .get_direction() is supported) else we silently
+ * assume we are in input mode after this.
+ */
+ if (chip->direction_input) {
+ status = chip->direction_input(chip, gpio_chip_hwgpio(desc));
+ } else if (chip->get_direction &&
+ (chip->get_direction(chip, gpio_chip_hwgpio(desc)) != 1)) {
+ gpiod_warn(desc,
+ "%s: missing direction_input() operation and line is output\n",
+ __func__);
+ return -EIO;
+ }
if (status == 0)
clear_bit(FLAG_IS_OUT, &desc->flags);
@@ -2546,16 +2594,38 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
{
struct gpio_chip *gc = desc->gdev->chip;
int val = !!value;
- int ret;
+ int ret = 0;
- if (!gc->set || !gc->direction_output) {
+ /*
+ * It's OK not to specify .direction_output() if the gpiochip is
+ * output-only, but if there is then not even a .set() operation it
+ * is pretty tricky to drive the output line.
+ */
+ if (!gc->set && !gc->direction_output) {
gpiod_warn(desc,
- "%s: missing set() or direction_output() operations\n",
- __func__);
+ "%s: missing set() and direction_output() operations\n",
+ __func__);
return -EIO;
}
- ret = gc->direction_output(gc, gpio_chip_hwgpio(desc), val);
+ if (gc->direction_output) {
+ ret = gc->direction_output(gc, gpio_chip_hwgpio(desc), val);
+ } else {
+ /* Check that we are in output mode if we can */
+ if (gc->get_direction &&
+ gc->get_direction(gc, gpio_chip_hwgpio(desc))) {
+ gpiod_warn(desc,
+ "%s: missing direction_output() operation\n",
+ __func__);
+ return -EIO;
+ }
+ /*
+ * If we can't actively set the direction, we are some
+ * output-only chip, so just drive the output as desired.
+ */
+ gc->set(gc, gpio_chip_hwgpio(desc), val);
+ }
+
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
trace_gpio_value(desc_to_gpio(desc), 0, val);
@@ -2604,8 +2674,9 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
else
value = !!value;
- /* GPIOs used for IRQs shall not be set as output */
- if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) {
+ /* GPIOs used for enabled IRQs shall not be set as output */
+ if (test_bit(FLAG_USED_AS_IRQ, &desc->flags) &&
+ test_bit(FLAG_IRQ_IS_ENABLED, &desc->flags)) {
gpiod_err(desc,
"%s: tried to set a GPIO tied to an IRQ as output\n",
__func__);
@@ -2784,9 +2855,39 @@ static int gpio_chip_get_multiple(struct gpio_chip *chip,
int gpiod_get_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
- int i = 0;
+ int err, i = 0;
+
+ /*
+ * Validate array_info against desc_array and its size.
+ * It should immediately follow desc_array if both
+ * have been obtained from the same gpiod_get_array() call.
+ */
+ if (array_info && array_info->desc == desc_array &&
+ array_size <= array_info->size &&
+ (void *)array_info == desc_array + array_info->size) {
+ if (!can_sleep)
+ WARN_ON(array_info->chip->can_sleep);
+
+ err = gpio_chip_get_multiple(array_info->chip,
+ array_info->get_mask,
+ value_bitmap);
+ if (err)
+ return err;
+
+ if (!raw && !bitmap_empty(array_info->invert_mask, array_size))
+ bitmap_xor(value_bitmap, value_bitmap,
+ array_info->invert_mask, array_size);
+
+ if (bitmap_full(array_info->get_mask, array_size))
+ return 0;
+
+ i = find_first_zero_bit(array_info->get_mask, array_size);
+ } else {
+ array_info = NULL;
+ }
while (i < array_size) {
struct gpio_chip *chip = desc_array[i]->gdev->chip;
@@ -2818,6 +2919,10 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
__set_bit(hwgpio, mask);
i++;
+
+ if (array_info)
+ i = find_next_zero_bit(array_info->get_mask,
+ array_size, i);
} while ((i < array_size) &&
(desc_array[i]->gdev->chip == chip));
@@ -2828,15 +2933,20 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
return ret;
}
- for (j = first; j < i; j++) {
+ for (j = first; j < i; ) {
const struct gpio_desc *desc = desc_array[j];
int hwgpio = gpio_chip_hwgpio(desc);
int value = test_bit(hwgpio, bits);
if (!raw && test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
- value_array[j] = value;
+ __assign_bit(j, value_bitmap, value);
trace_gpio_value(desc_to_gpio(desc), 1, value);
+ j++;
+
+ if (array_info)
+ j = find_next_zero_bit(array_info->get_mask, i,
+ j);
}
if (mask != fastpath)
@@ -2895,9 +3005,10 @@ EXPORT_SYMBOL_GPL(gpiod_get_value);
/**
* gpiod_get_raw_array_value() - read raw values from an array of GPIOs
- * @array_size: number of elements in the descriptor / value arrays
+ * @array_size: number of elements in the descriptor array / value bitmap
* @desc_array: array of GPIO descriptors whose values will be read
- * @value_array: array to store the read values
+ * @array_info: information on applicability of fast bitmap processing path
+ * @value_bitmap: bitmap to store the read values
*
* Read the raw values of the GPIOs, i.e. the values of the physical lines
* without regard for their ACTIVE_LOW status. Return 0 in case of success,
@@ -2907,20 +3018,24 @@ EXPORT_SYMBOL_GPL(gpiod_get_value);
* and it will complain if the GPIO chip functions potentially sleep.
*/
int gpiod_get_raw_array_value(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array)
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
if (!desc_array)
return -EINVAL;
return gpiod_get_array_value_complex(true, false, array_size,
- desc_array, value_array);
+ desc_array, array_info,
+ value_bitmap);
}
EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value);
/**
* gpiod_get_array_value() - read values from an array of GPIOs
- * @array_size: number of elements in the descriptor / value arrays
+ * @array_size: number of elements in the descriptor array / value bitmap
* @desc_array: array of GPIO descriptors whose values will be read
- * @value_array: array to store the read values
+ * @array_info: information on applicability of fast bitmap processing path
+ * @value_bitmap: bitmap to store the read values
*
* Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
* into account. Return 0 in case of success, else an error code.
@@ -2929,12 +3044,15 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value);
* and it will complain if the GPIO chip functions potentially sleep.
*/
int gpiod_get_array_value(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array)
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
if (!desc_array)
return -EINVAL;
return gpiod_get_array_value_complex(false, false, array_size,
- desc_array, value_array);
+ desc_array, array_info,
+ value_bitmap);
}
EXPORT_SYMBOL_GPL(gpiod_get_array_value);
@@ -3025,12 +3143,39 @@ static void gpio_chip_set_multiple(struct gpio_chip *chip,
}
int gpiod_set_array_value_complex(bool raw, bool can_sleep,
- unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+ unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
int i = 0;
+ /*
+ * Validate array_info against desc_array and its size.
+ * It should immediately follow desc_array if both
+ * have been obtained from the same gpiod_get_array() call.
+ */
+ if (array_info && array_info->desc == desc_array &&
+ array_size <= array_info->size &&
+ (void *)array_info == desc_array + array_info->size) {
+ if (!can_sleep)
+ WARN_ON(array_info->chip->can_sleep);
+
+ if (!raw && !bitmap_empty(array_info->invert_mask, array_size))
+ bitmap_xor(value_bitmap, value_bitmap,
+ array_info->invert_mask, array_size);
+
+ gpio_chip_set_multiple(array_info->chip, array_info->set_mask,
+ value_bitmap);
+
+ if (bitmap_full(array_info->set_mask, array_size))
+ return 0;
+
+ i = find_first_zero_bit(array_info->set_mask, array_size);
+ } else {
+ array_info = NULL;
+ }
+
while (i < array_size) {
struct gpio_chip *chip = desc_array[i]->gdev->chip;
unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
@@ -3056,9 +3201,16 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
do {
struct gpio_desc *desc = desc_array[i];
int hwgpio = gpio_chip_hwgpio(desc);
- int value = value_array[i];
+ int value = test_bit(i, value_bitmap);
- if (!raw && test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ /*
+ * Pins applicable for fast input but not for
+ * fast output processing may have been already
+ * inverted inside the fast path, skip them.
+ */
+ if (!raw && !(array_info &&
+ test_bit(i, array_info->invert_mask)) &&
+ test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
trace_gpio_value(desc_to_gpio(desc), 0, value);
/*
@@ -3078,6 +3230,10 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
count++;
}
i++;
+
+ if (array_info)
+ i = find_next_zero_bit(array_info->set_mask,
+ array_size, i);
} while ((i < array_size) &&
(desc_array[i]->gdev->chip == chip));
/* push collected bits to outputs */
@@ -3152,9 +3308,10 @@ EXPORT_SYMBOL_GPL(gpiod_set_value);
/**
* gpiod_set_raw_array_value() - assign values to an array of GPIOs
- * @array_size: number of elements in the descriptor / value arrays
+ * @array_size: number of elements in the descriptor array / value bitmap
* @desc_array: array of GPIO descriptors whose values will be assigned
- * @value_array: array of values to assign
+ * @array_info: information on applicability of fast bitmap processing path
+ * @value_bitmap: bitmap of values to assign
*
* Set the raw values of the GPIOs, i.e. the values of the physical lines
* without regard for their ACTIVE_LOW status.
@@ -3163,20 +3320,23 @@ EXPORT_SYMBOL_GPL(gpiod_set_value);
* complain if the GPIO chip functions potentially sleep.
*/
int gpiod_set_raw_array_value(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array)
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
if (!desc_array)
return -EINVAL;
return gpiod_set_array_value_complex(true, false, array_size,
- desc_array, value_array);
+ desc_array, array_info, value_bitmap);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value);
/**
* gpiod_set_array_value() - assign values to an array of GPIOs
- * @array_size: number of elements in the descriptor / value arrays
+ * @array_size: number of elements in the descriptor array / value bitmap
* @desc_array: array of GPIO descriptors whose values will be assigned
- * @value_array: array of values to assign
+ * @array_info: information on applicability of fast bitmap processing path
+ * @value_bitmap: bitmap of values to assign
*
* Set the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
* into account.
@@ -3184,13 +3344,16 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value);
* This function should be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
*/
-void gpiod_set_array_value(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array)
+int gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
if (!desc_array)
- return;
- gpiod_set_array_value_complex(false, false, array_size, desc_array,
- value_array);
+ return -EINVAL;
+ return gpiod_set_array_value_complex(false, false, array_size,
+ desc_array, array_info,
+ value_bitmap);
}
EXPORT_SYMBOL_GPL(gpiod_set_array_value);
@@ -3292,6 +3455,7 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
}
set_bit(FLAG_USED_AS_IRQ, &desc->flags);
+ set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
/*
* If the consumer has not set up a label (such as when the
@@ -3322,6 +3486,7 @@ void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
return;
clear_bit(FLAG_USED_AS_IRQ, &desc->flags);
+ clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
/* If we only had this marking, erase it */
if (desc->label && !strcmp(desc->label, "interrupt"))
@@ -3329,6 +3494,28 @@ void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
}
EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
+void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpio_desc *desc = gpiochip_get_desc(chip, offset);
+
+ if (!IS_ERR(desc) &&
+ !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags)))
+ clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+}
+EXPORT_SYMBOL_GPL(gpiochip_disable_irq);
+
+void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpio_desc *desc = gpiochip_get_desc(chip, offset);
+
+ if (!IS_ERR(desc) &&
+ !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) {
+ WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags));
+ set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ }
+}
+EXPORT_SYMBOL_GPL(gpiochip_enable_irq);
+
bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset)
{
if (offset >= chip->ngpio)
@@ -3338,6 +3525,30 @@ bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset)
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_irq);
+int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret;
+
+ if (!try_module_get(chip->gpiodev->owner))
+ return -ENODEV;
+
+ ret = gpiochip_lock_as_irq(chip, offset);
+ if (ret) {
+ chip_err(chip, "unable to lock HW IRQ %u for IRQ\n", offset);
+ module_put(chip->gpiodev->owner);
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gpiochip_reqres_irq);
+
+void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ gpiochip_unlock_as_irq(chip, offset);
+ module_put(chip->gpiodev->owner);
+}
+EXPORT_SYMBOL_GPL(gpiochip_relres_irq);
+
bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset)
{
if (offset >= chip->ngpio)
@@ -3410,9 +3621,10 @@ EXPORT_SYMBOL_GPL(gpiod_get_value_cansleep);
/**
* gpiod_get_raw_array_value_cansleep() - read raw values from an array of GPIOs
- * @array_size: number of elements in the descriptor / value arrays
+ * @array_size: number of elements in the descriptor array / value bitmap
* @desc_array: array of GPIO descriptors whose values will be read
- * @value_array: array to store the read values
+ * @array_info: information on applicability of fast bitmap processing path
+ * @value_bitmap: bitmap to store the read values
*
* Read the raw values of the GPIOs, i.e. the values of the physical lines
* without regard for their ACTIVE_LOW status. Return 0 in case of success,
@@ -3422,21 +3634,24 @@ EXPORT_SYMBOL_GPL(gpiod_get_value_cansleep);
*/
int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
might_sleep_if(extra_checks);
if (!desc_array)
return -EINVAL;
return gpiod_get_array_value_complex(true, true, array_size,
- desc_array, value_array);
+ desc_array, array_info,
+ value_bitmap);
}
EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value_cansleep);
/**
* gpiod_get_array_value_cansleep() - read values from an array of GPIOs
- * @array_size: number of elements in the descriptor / value arrays
+ * @array_size: number of elements in the descriptor array / value bitmap
* @desc_array: array of GPIO descriptors whose values will be read
- * @value_array: array to store the read values
+ * @array_info: information on applicability of fast bitmap processing path
+ * @value_bitmap: bitmap to store the read values
*
* Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
* into account. Return 0 in case of success, else an error code.
@@ -3445,13 +3660,15 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value_cansleep);
*/
int gpiod_get_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
might_sleep_if(extra_checks);
if (!desc_array)
return -EINVAL;
return gpiod_get_array_value_complex(false, true, array_size,
- desc_array, value_array);
+ desc_array, array_info,
+ value_bitmap);
}
EXPORT_SYMBOL_GPL(gpiod_get_array_value_cansleep);
@@ -3493,9 +3710,10 @@ EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
/**
* gpiod_set_raw_array_value_cansleep() - assign values to an array of GPIOs
- * @array_size: number of elements in the descriptor / value arrays
+ * @array_size: number of elements in the descriptor array / value bitmap
* @desc_array: array of GPIO descriptors whose values will be assigned
- * @value_array: array of values to assign
+ * @array_info: information on applicability of fast bitmap processing path
+ * @value_bitmap: bitmap of values to assign
*
* Set the raw values of the GPIOs, i.e. the values of the physical lines
* without regard for their ACTIVE_LOW status.
@@ -3503,14 +3721,15 @@ EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
* This function is to be called from contexts that can sleep.
*/
int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
might_sleep_if(extra_checks);
if (!desc_array)
return -EINVAL;
return gpiod_set_array_value_complex(true, true, array_size, desc_array,
- value_array);
+ array_info, value_bitmap);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value_cansleep);
@@ -3533,24 +3752,27 @@ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n)
/**
* gpiod_set_array_value_cansleep() - assign values to an array of GPIOs
- * @array_size: number of elements in the descriptor / value arrays
+ * @array_size: number of elements in the descriptor array / value bitmap
* @desc_array: array of GPIO descriptors whose values will be assigned
- * @value_array: array of values to assign
+ * @array_info: information on applicability of fast bitmap processing path
+ * @value_bitmap: bitmap of values to assign
*
* Set the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
* into account.
*
* This function is to be called from contexts that can sleep.
*/
-void gpiod_set_array_value_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+int gpiod_set_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
might_sleep_if(extra_checks);
if (!desc_array)
- return;
- gpiod_set_array_value_complex(false, true, array_size, desc_array,
- value_array);
+ return -EINVAL;
+ return gpiod_set_array_value_complex(false, true, array_size,
+ desc_array, array_info,
+ value_bitmap);
}
EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep);
@@ -3908,8 +4130,23 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
* the device name as label
*/
status = gpiod_request(desc, con_id ? con_id : devname);
- if (status < 0)
- return ERR_PTR(status);
+ if (status < 0) {
+ if (status == -EBUSY && flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
+ /*
+ * This happens when there are several consumers for
+ * the same GPIO line: we just return here without
+ * further initialization. It is a bit if a hack.
+ * This is necessary to support fixed regulators.
+ *
+ * FIXME: Make this more sane and safe.
+ */
+ dev_info(dev, "nonexclusive access to GPIO for %s\n",
+ con_id ? con_id : devname);
+ return desc;
+ } else {
+ return ERR_PTR(status);
+ }
+ }
status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
if (status < 0) {
@@ -4170,7 +4407,9 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
{
struct gpio_desc *desc;
struct gpio_descs *descs;
- int count;
+ struct gpio_array *array_info = NULL;
+ struct gpio_chip *chip;
+ int count, bitmap_size;
count = gpiod_count(dev, con_id);
if (count < 0)
@@ -4186,9 +4425,92 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
gpiod_put_array(descs);
return ERR_CAST(desc);
}
+
descs->desc[descs->ndescs] = desc;
+
+ chip = gpiod_to_chip(desc);
+ /*
+ * If pin hardware number of array member 0 is also 0, select
+ * its chip as a candidate for fast bitmap processing path.
+ */
+ if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) {
+ struct gpio_descs *array;
+
+ bitmap_size = BITS_TO_LONGS(chip->ngpio > count ?
+ chip->ngpio : count);
+
+ array = kzalloc(struct_size(descs, desc, count) +
+ struct_size(array_info, invert_mask,
+ 3 * bitmap_size), GFP_KERNEL);
+ if (!array) {
+ gpiod_put_array(descs);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memcpy(array, descs,
+ struct_size(descs, desc, descs->ndescs + 1));
+ kfree(descs);
+
+ descs = array;
+ array_info = (void *)(descs->desc + count);
+ array_info->get_mask = array_info->invert_mask +
+ bitmap_size;
+ array_info->set_mask = array_info->get_mask +
+ bitmap_size;
+
+ array_info->desc = descs->desc;
+ array_info->size = count;
+ array_info->chip = chip;
+ bitmap_set(array_info->get_mask, descs->ndescs,
+ count - descs->ndescs);
+ bitmap_set(array_info->set_mask, descs->ndescs,
+ count - descs->ndescs);
+ descs->info = array_info;
+ }
+ /* Unmark array members which don't belong to the 'fast' chip */
+ if (array_info && array_info->chip != chip) {
+ __clear_bit(descs->ndescs, array_info->get_mask);
+ __clear_bit(descs->ndescs, array_info->set_mask);
+ }
+ /*
+ * Detect array members which belong to the 'fast' chip
+ * but their pins are not in hardware order.
+ */
+ else if (array_info &&
+ gpio_chip_hwgpio(desc) != descs->ndescs) {
+ /*
+ * Don't use fast path if all array members processed so
+ * far belong to the same chip as this one but its pin
+ * hardware number is different from its array index.
+ */
+ if (bitmap_full(array_info->get_mask, descs->ndescs)) {
+ array_info = NULL;
+ } else {
+ __clear_bit(descs->ndescs,
+ array_info->get_mask);
+ __clear_bit(descs->ndescs,
+ array_info->set_mask);
+ }
+ } else if (array_info) {
+ /* Exclude open drain or open source from fast output */
+ if (gpiochip_line_is_open_drain(chip, descs->ndescs) ||
+ gpiochip_line_is_open_source(chip, descs->ndescs))
+ __clear_bit(descs->ndescs,
+ array_info->set_mask);
+ /* Identify 'fast' pins which require invertion */
+ if (gpiod_is_active_low(desc))
+ __set_bit(descs->ndescs,
+ array_info->invert_mask);
+ }
+
descs->ndescs++;
}
+ if (array_info)
+ dev_dbg(dev,
+ "GPIO array info: chip=%s, size=%d, get_mask=%lx, set_mask=%lx, invert_mask=%lx\n",
+ array_info->chip->label, array_info->size,
+ *array_info->get_mask, *array_info->set_mask,
+ *array_info->invert_mask);
return descs;
}
EXPORT_SYMBOL_GPL(gpiod_get_array);
@@ -4275,8 +4597,9 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
struct gpio_chip *chip = gdev->chip;
unsigned gpio = gdev->base;
struct gpio_desc *gdesc = &gdev->descs[0];
- int is_out;
- int is_irq;
+ bool is_out;
+ bool is_irq;
+ bool active_low;
for (i = 0; i < gdev->ngpio; i++, gpio++, gdesc++) {
if (!test_bit(FLAG_REQUESTED, &gdesc->flags)) {
@@ -4290,11 +4613,13 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
gpiod_get_direction(gdesc);
is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
is_irq = test_bit(FLAG_USED_AS_IRQ, &gdesc->flags);
- seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s",
+ active_low = test_bit(FLAG_ACTIVE_LOW, &gdesc->flags);
+ seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s%s",
gpio, gdesc->name ? gdesc->name : "", gdesc->label,
is_out ? "out" : "in ",
chip->get ? (chip->get(chip, i) ? "hi" : "lo") : "? ",
- is_irq ? "IRQ" : " ");
+ is_irq ? "IRQ " : "",
+ active_low ? "ACTIVE LOW" : "");
seq_printf(s, "\n");
}
}
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index a7e49fef73d4..087d865286a0 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Internal GPIO functions.
*
* Copyright (C) 2013, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef GPIOLIB_H
@@ -183,15 +180,26 @@ static inline bool acpi_can_fallback_to_crs(struct acpi_device *adev,
}
#endif
+struct gpio_array {
+ struct gpio_desc **desc;
+ unsigned int size;
+ struct gpio_chip *chip;
+ unsigned long *get_mask;
+ unsigned long *set_mask;
+ unsigned long invert_mask[];
+};
+
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
int gpiod_get_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_set_array_value_complex(bool raw, bool can_sleep,
- unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+ unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
/* This is just passed between gpiolib and devres */
struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
@@ -214,6 +222,7 @@ struct gpio_desc {
#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
+#define FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index f8bbbb3a9504..0c791e35acf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr)
+ void **cpu_ptr, bool mqd_gfx9)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
@@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+
+ if (mqd_gfx9)
+ bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
+
r = amdgpu_bo_create(adev, &bp, &bo);
if (r) {
dev_err(adev->dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 2f379c183ed2..cc9aeab5468c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
/* Shared API */
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr);
+ void **cpu_ptr, bool mqd_gfx9);
void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
void get_local_mem_info(struct kgd_dev *kgd,
struct kfd_local_mem_info *mem_info);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ea3f698aef5e..9803b91f3e77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
while (true) {
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (time_after(jiffies, end_jiffies))
return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 693ec5ea4950..8816c697b205 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
break;
case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU) {
- if ((adev->pdev->device == 0x67df) &&
- ((adev->pdev->revision == 0xe0) ||
- (adev->pdev->revision == 0xe3) ||
- (adev->pdev->revision == 0xe4) ||
- (adev->pdev->revision == 0xe5) ||
- (adev->pdev->revision == 0xe7) ||
+ if (((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe3) ||
+ (adev->pdev->revision == 0xe4) ||
+ (adev->pdev->revision == 0xe5) ||
+ (adev->pdev->revision == 0xe7) ||
+ (adev->pdev->revision == 0xef))) ||
+ ((adev->pdev->device == 0x6fdf) &&
(adev->pdev->revision == 0xef))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8843a06360fa..0f41d8647376 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
/* Polaris12 */
{0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0cc5190f4f36..5f3f54073818 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
{
int i;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
if (adev->vce.vcpu_bo == NULL)
return 0;
@@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_VCE_HANDLES)
return 0;
- cancel_delayed_work_sync(&adev->vce.idle_work);
/* TODO: suspending running encoding sessions isn't supported */
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index fd654a4406db..400fc74bbae2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -153,11 +153,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
unsigned size;
void *ptr;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->vcn.vcpu_bo == NULL)
return 0;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
size = amdgpu_bo_size(adev->vcn.vcpu_bo);
ptr = adev->vcn.cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 1b048715ab8a..29ac74f40dce 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->init_gtt_mem_allocation(
kfd->kgd, size, &kfd->gtt_mem,
- &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
+ &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
+ false)) {
dev_err(kfd_device, "Could not allocate %d bytes\n", size);
goto out;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index ec0d62a16e53..4f22e745df51 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd)
{
- int retval;
struct mqd_manager *mqd_mgr;
+ int retval;
mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (!mqd_mgr)
@@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
if (!q->properties.is_active)
return 0;
- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
- &q->properties, q->process->mm);
+ if (WARN(q->process->mm != current->mm,
+ "should only run in user thread"))
+ retval = -EFAULT;
+ else
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+ &q->properties, current->mm);
if (retval)
goto out_uninit_mqd;
@@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
retval = map_queues_cpsch(dqm);
else if (q->properties.is_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
- q->properties.type == KFD_QUEUE_TYPE_SDMA))
- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
- &q->properties, q->process->mm);
+ q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+ if (WARN(q->process->mm != current->mm,
+ "should only run in user thread"))
+ retval = -EFAULT;
+ else
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
+ q->pipe, q->queue,
+ &q->properties, current->mm);
+ }
out_unlock:
dqm_unlock(dqm);
@@ -653,6 +663,7 @@ out:
static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
+ struct mm_struct *mm = NULL;
struct queue *q;
struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
@@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
kfd_flush_tlb(pdd);
}
+ /* Take a safe reference to the mm_struct, which may otherwise
+ * disappear even while the kfd_process is still referenced.
+ */
+ mm = get_task_mm(pdd->process->lead_thread);
+ if (!mm) {
+ retval = -EFAULT;
+ goto out;
+ }
+
/* activate all active queues on the qpd */
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_evicted)
@@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
q->properties.is_evicted = false;
q->properties.is_active = true;
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
- q->queue, &q->properties,
- q->process->mm);
+ q->queue, &q->properties, mm);
if (retval)
goto out;
dqm->queue_count++;
}
qpd->evicted = 0;
out:
+ if (mm)
+ mmput(mm);
dqm_unlock(dqm);
return retval;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 7a61f38c09e6..01494752c36a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
struct amd_iommu_device_info iommu_info;
unsigned int pasid_limit;
int err;
+ struct kfd_topology_device *top_dev;
- if (!kfd->device_info->needs_iommu_device)
+ top_dev = kfd_topology_device_by_id(kfd->id);
+
+ /*
+ * Overwrite ATS capability according to needs_iommu_device to fix
+ * potential missing corresponding bit in CRAT of BIOS.
+ */
+ if (!kfd->device_info->needs_iommu_device) {
+ top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
return 0;
+ }
+
+ top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
iommu_info.flags = 0;
err = amd_iommu_device_info(kfd->pdev, &iommu_info);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index f5fc3675f21e..0cedb37cf513 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
&((*mqd_mem_obj)->gtt_mem),
&((*mqd_mem_obj)->gpu_addr),
- (void *)&((*mqd_mem_obj)->cpu_ptr));
+ (void *)&((*mqd_mem_obj)->cpu_ptr), true);
} else
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
mqd_mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f971710f1c91..92b285ca73aa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu);
int kfd_topology_remove_device(struct kfd_dev *gpu);
struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
uint32_t proximity_domain);
+struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index bc95d4dfee2e..80f5db4ef75f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
return device;
}
-struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
{
- struct kfd_topology_device *top_dev;
- struct kfd_dev *device = NULL;
+ struct kfd_topology_device *top_dev = NULL;
+ struct kfd_topology_device *ret = NULL;
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
if (top_dev->gpu_id == gpu_id) {
- device = top_dev->gpu;
+ ret = top_dev;
break;
}
up_read(&topology_lock);
- return device;
+ return ret;
+}
+
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+{
+ struct kfd_topology_device *top_dev;
+
+ top_dev = kfd_topology_device_by_id(gpu_id);
+ if (!top_dev)
+ return NULL;
+
+ return top_dev->gpu;
}
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 800f481a6995..6903fe6c894b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -641,6 +641,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
return NULL;
}
+static void emulated_link_detect(struct dc_link *link)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct display_sink_capability sink_caps = { 0 };
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct dc_sink *sink = NULL;
+ struct dc_sink *prev_sink = NULL;
+
+ link->type = dc_connection_none;
+ prev_sink = link->local_sink;
+
+ if (prev_sink != NULL)
+ dc_sink_retain(prev_sink);
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_DUAL_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_LVDS: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_LVDS;
+ break;
+ }
+
+ case SIGNAL_TYPE_EDP: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_EDP;
+ break;
+ }
+
+ case SIGNAL_TYPE_DISPLAY_PORT: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
+ break;
+ }
+
+ default:
+ DC_ERROR("Invalid connector type! signal:%d\n",
+ link->connector_signal);
+ return;
+ }
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = sink_caps.signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DC_ERROR("Failed to create sink!\n");
+ return;
+ }
+
+ link->local_sink = sink;
+
+ edid_status = dm_helpers_read_local_edid(
+ link->ctx,
+ link,
+ sink);
+
+ if (edid_status != EDID_OK)
+ DC_ERROR("Failed to read EDID");
+
+}
+
static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
@@ -654,6 +735,7 @@ static int dm_resume(void *handle)
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
+ enum dc_connection_type new_connection_type = dc_connection_none;
int ret;
int i;
@@ -684,7 +766,13 @@ static int dm_resume(void *handle)
continue;
mutex_lock(&aconnector->hpd_lock);
- dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none)
+ emulated_link_detect(aconnector->dc_link);
+ else
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
aconnector->fake_enable = false;
@@ -922,6 +1010,7 @@ static void handle_hpd_irq(void *param)
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
/* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in it's own context.
@@ -931,7 +1020,21 @@ static void handle_hpd_irq(void *param)
if (aconnector->fake_enable)
aconnector->fake_enable = false;
- if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+ if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(aconnector->dc_link);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+
+ } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
amdgpu_dm_update_connector_after_detect(aconnector);
@@ -1031,6 +1134,7 @@ static void handle_hpd_rx_irq(void *param)
struct drm_device *dev = connector->dev;
struct dc_link *dc_link = aconnector->dc_link;
bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+ enum dc_connection_type new_connection_type = dc_connection_none;
/* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be
@@ -1042,7 +1146,24 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
!is_mst_root_connector) {
/* Downstream Port status changed. */
- if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+ if (!dc_link_detect_sink(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(dc_link);
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_hotplug_event(dev);
+ } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
if (aconnector->fake_enable)
aconnector->fake_enable = false;
@@ -1433,6 +1554,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt;
int32_t total_overlay_planes, total_primary_planes;
+ enum dc_connection_type new_connection_type = dc_connection_none;
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1499,7 +1621,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link = dc_get_link_at_index(dm->dc, i);
- if (dc_link_detect(link, DETECT_REASON_BOOT)) {
+ if (!dc_link_detect_sink(link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(link);
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
}
@@ -2494,7 +2623,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (dm_state && dm_state->freesync_capable)
stream->ignore_msa_timing_param = true;
finish:
- if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
+ if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
dc_sink_release(sink);
return stream;
@@ -4504,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- /* Signal HW programming completion */
- drm_atomic_helper_commit_hw_done(state);
if (wait_for_vblank)
drm_atomic_helper_wait_for_flip_done(dev, state);
+ /*
+ * FIXME:
+ * Delay hw_done() until flip_done() is signaled. This is to block
+ * another commit from freeing the CRTC state while we're still
+ * waiting on flip_done.
+ */
+ drm_atomic_helper_commit_hw_done(state);
+
drm_atomic_helper_cleanup_planes(dev, state);
/* Finally, drop a runtime PM reference for each newly disabled CRTC,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 37eaf72ace54..fced3c1c2ef5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -195,7 +195,7 @@ static bool program_hpd_filter(
return result;
}
-static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
{
uint32_t is_hpd_high = 0;
struct gpio *hpd_pin;
@@ -604,7 +604,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
return false;
- if (false == detect_sink(link, &new_connection_type)) {
+ if (false == dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index d43cefbc43d3..1b48ab9aea89 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -215,6 +215,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
bool dc_link_is_dp_sink_present(struct dc_link *link);
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
/*
* DPCD access interfaces
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 14384d9675a8..b2f308766a9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2560,7 +2560,7 @@ static void pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg;
}
-void dce110_set_bandwidth(
+static void dce110_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index e4c5db75c4c6..d6db3dbd9015 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -68,11 +68,6 @@ void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg);
-void dce110_set_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- bool decrease_allowed);
-
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 5853522a6182..eb0f5f9a973b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -244,17 +244,6 @@ static void dce120_update_dchub(
dh_data->dchub_info_valid = false;
}
-static void dce120_set_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- bool decrease_allowed)
-{
- if (context->stream_count <= 0)
- return;
-
- dce110_set_bandwidth(dc, context, decrease_allowed);
-}
-
void dce120_hw_sequencer_construct(struct dc *dc)
{
/* All registers used by dce11.2 match those in dce11 in offset and
@@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc);
dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub;
- dc->hwss.set_bandwidth = dce120_set_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 14391b06080c..43b82e14007e 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -292,7 +292,7 @@ struct tile_config {
struct kfd2kgd_calls {
int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr);
+ void **cpu_ptr, bool mqd_gfx9);
void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 08b5bb219816..94d6dabec2dc 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -754,6 +754,7 @@ static int malidp_bind(struct device *dev)
drm->irq_enabled = true;
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ drm_crtc_vblank_reset(&malidp->crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
goto vblank_fail;
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index c94a4422e0e9..2781e462c1ed 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
dma_addr_t *addrs, s32 *pitches,
- int num_planes, u16 w, u16 h, u32 fmt_id)
+ int num_planes, u16 w, u16 h, u32 fmt_id,
+ const s16 *rgb2yuv_coeffs)
{
u32 base = MALIDP500_SE_MEMWRITE_BASE;
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
MALIDP500_SE_MEMWRITE_OUT_SIZE);
+
+ if (rgb2yuv_coeffs) {
+ int i;
+
+ for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+ malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
+ MALIDP500_SE_RGB_YUV_COEFFS + i * 4);
+ }
+ }
+
malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
return 0;
@@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
dma_addr_t *addrs, s32 *pitches,
- int num_planes, u16 w, u16 h, u32 fmt_id)
+ int num_planes, u16 w, u16 h, u32 fmt_id,
+ const s16 *rgb2yuv_coeffs)
{
u32 base = MALIDP550_SE_MEMWRITE_BASE;
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
MALIDP550_SE_CONTROL);
+ if (rgb2yuv_coeffs) {
+ int i;
+
+ for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+ malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
+ MALIDP550_SE_RGB_YUV_COEFFS + i * 4);
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index ad2e96915d44..9fc94c08190f 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -191,7 +191,8 @@ struct malidp_hw {
* @param fmt_id - internal format ID of output buffer
*/
int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
- s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id);
+ s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id,
+ const s16 *rgb2yuv_coeffs);
/*
* Disable the writing to memory of the next frame's content.
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index ba6ae66387c9..91472e5e0c8b 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -26,6 +26,8 @@ struct malidp_mw_connector_state {
s32 pitches[2];
u8 format;
u8 n_planes;
+ bool rgb2yuv_initialized;
+ const s16 *rgb2yuv_coeffs;
};
static int malidp_mw_connector_get_modes(struct drm_connector *connector)
@@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector)
static struct drm_connector_state *
malidp_mw_connector_duplicate_state(struct drm_connector *connector)
{
- struct malidp_mw_connector_state *mw_state;
+ struct malidp_mw_connector_state *mw_state, *mw_current_state;
if (WARN_ON(!connector->state))
return NULL;
@@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector)
if (!mw_state)
return NULL;
- /* No need to preserve any of our driver-local data */
+ mw_current_state = to_mw_state(connector->state);
+ mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs;
+ mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized;
+
__drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
return &mw_state->base;
@@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
+static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = {
+ 47, 157, 16,
+ -26, -87, 112,
+ 112, -102, -10,
+ 16, 128, 128
+};
+
static int
malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
}
mw_state->n_planes = n_planes;
+ if (fb->format->is_yuv)
+ mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited;
+
return 0;
}
@@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
conn_state->writeback_job = NULL;
-
hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
mw_state->pitches, mw_state->n_planes,
- fb->width, fb->height, mw_state->format);
+ fb->width, fb->height, mw_state->format,
+ !mw_state->rgb2yuv_initialized ?
+ mw_state->rgb2yuv_coeffs : NULL);
+ mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs;
} else {
DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
hwdev->hw->disable_memwrite(hwdev);
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 3579d36b2a71..6ffe849774f2 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -205,6 +205,7 @@
#define MALIDP500_SE_BASE 0x00c00
#define MALIDP500_SE_CONTROL 0x00c0c
#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
+#define MALIDP500_SE_RGB_YUV_COEFFS 0x00C74
#define MALIDP500_SE_MEMWRITE_BASE 0x00e00
#define MALIDP500_DC_IRQ_BASE 0x00f00
#define MALIDP500_CONFIG_VALID 0x00f00
@@ -238,6 +239,7 @@
#define MALIDP550_SE_CONTROL 0x08010
#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7)
#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
+#define MALIDP550_SE_RGB_YUV_COEFFS 0x08078
#define MALIDP550_SE_MEMWRITE_BASE 0x08100
#define MALIDP550_DC_BASE 0x0c000
#define MALIDP550_DC_CONTROL 0x0c010
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3eb061e11e2e..281cf9cbb44c 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -174,6 +174,11 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
state->crtcs[i].state = NULL;
state->crtcs[i].old_state = NULL;
state->crtcs[i].new_state = NULL;
+
+ if (state->crtcs[i].commit) {
+ drm_crtc_commit_put(state->crtcs[i].commit);
+ state->crtcs[i].commit = NULL;
+ }
}
for (i = 0; i < config->num_total_plane; i++) {
@@ -2067,7 +2072,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ if (!drm_drv_uses_atomic_modeset(dev))
return;
list_for_each_entry(plane, &config->plane_list, head) {
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 80be74df7ba6..1bb4c318bdd4 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1408,15 +1408,16 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
- struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
- struct drm_crtc_commit *commit = new_crtc_state->commit;
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
int ret;
- if (!commit)
+ crtc = old_state->crtcs[i].ptr;
+
+ if (!crtc || !commit)
continue;
ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
@@ -1934,6 +1935,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
drm_crtc_commit_get(commit);
commit->abort_completion = true;
+
+ state->crtcs[i].commit = commit;
+ drm_crtc_commit_get(commit);
}
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index baff50a4c234..df31c3815092 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -63,20 +63,21 @@ static void drm_client_close(struct drm_client_dev *client)
EXPORT_SYMBOL(drm_client_close);
/**
- * drm_client_new - Create a DRM client
+ * drm_client_init - Initialise a DRM client
* @dev: DRM device
* @client: DRM client
* @name: Client name
* @funcs: DRM client functions (optional)
*
+ * This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
* The caller needs to hold a reference on @dev before calling this function.
* The client is freed when the &drm_device is unregistered. See drm_client_release().
*
* Returns:
* Zero on success or negative error code on failure.
*/
-int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
- const char *name, const struct drm_client_funcs *funcs)
+int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
+ const char *name, const struct drm_client_funcs *funcs)
{
int ret;
@@ -95,10 +96,6 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
if (ret)
goto err_put_module;
- mutex_lock(&dev->clientlist_mutex);
- list_add(&client->list, &dev->clientlist);
- mutex_unlock(&dev->clientlist_mutex);
-
drm_dev_get(dev);
return 0;
@@ -109,13 +106,33 @@ err_put_module:
return ret;
}
-EXPORT_SYMBOL(drm_client_new);
+EXPORT_SYMBOL(drm_client_init);
+
+/**
+ * drm_client_add - Add client to the device list
+ * @client: DRM client
+ *
+ * Add the client to the &drm_device client list to activate its callbacks.
+ * @client must be initialized by a call to drm_client_init(). After
+ * drm_client_add() it is no longer permissible to call drm_client_release()
+ * directly (outside the unregister callback), instead cleanup will happen
+ * automatically on driver unload.
+ */
+void drm_client_add(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_add(&client->list, &dev->clientlist);
+ mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_add);
/**
* drm_client_release - Release DRM client resources
* @client: DRM client
*
- * Releases resources by closing the &drm_file that was opened by drm_client_new().
+ * Releases resources by closing the &drm_file that was opened by drm_client_init().
* It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
*
* This function should only be called from the unregister callback. An exception
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index bae43938c8f6..9cbe8f5c9aca 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -567,9 +567,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_crtc *crtc_req = data;
struct drm_crtc *crtc;
struct drm_plane *plane;
- struct drm_connector **connector_set = NULL, *connector;
- struct drm_framebuffer *fb = NULL;
- struct drm_display_mode *mode = NULL;
+ struct drm_connector **connector_set, *connector;
+ struct drm_framebuffer *fb;
+ struct drm_display_mode *mode;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx;
@@ -598,6 +598,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
mutex_lock(&crtc->dev->mode_config.mutex);
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
+ connector_set = NULL;
+ fb = NULL;
+ mode = NULL;
+
ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6f28fe58f169..373bd4c2b698 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
return ret;
}
- if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ if (drm_drv_uses_atomic_modeset(dev)) {
ret = drm_atomic_debugfs_init(minor);
if (ret) {
DRM_ERROR("Failed to create atomic debugfs files\n");
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3c9fc99648b7..ff0bfc65a8c1 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -113,6 +113,9 @@ static const struct edid_quirk {
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+ /* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
+ { "BOE", 0x78b, EDID_QUIRK_FORCE_6BPC },
+
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
{ "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
@@ -4279,7 +4282,7 @@ static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
dc_mask = db[7] & DRM_EDID_YCBCR420_DC_MASK;
- hdmi->y420_dc_modes |= dc_mask;
+ hdmi->y420_dc_modes = dc_mask;
}
static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 9da36a6271d3..9ac1f2e0f064 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -160,7 +160,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
fb_helper = &fbdev_cma->fb_helper;
- ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev", NULL);
if (ret)
goto err_free;
@@ -169,6 +169,8 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
if (ret)
goto err_client_put;
+ drm_client_add(&fb_helper->client);
+
return fbdev_cma;
err_client_put:
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4b0dd20bccb8..9628dd617826 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1580,6 +1580,25 @@ unlock:
}
EXPORT_SYMBOL(drm_fb_helper_ioctl);
+static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1,
+ const struct fb_var_screeninfo *var_2)
+{
+ return var_1->bits_per_pixel == var_2->bits_per_pixel &&
+ var_1->grayscale == var_2->grayscale &&
+ var_1->red.offset == var_2->red.offset &&
+ var_1->red.length == var_2->red.length &&
+ var_1->red.msb_right == var_2->red.msb_right &&
+ var_1->green.offset == var_2->green.offset &&
+ var_1->green.length == var_2->green.length &&
+ var_1->green.msb_right == var_2->green.msb_right &&
+ var_1->blue.offset == var_2->blue.offset &&
+ var_1->blue.length == var_2->blue.length &&
+ var_1->blue.msb_right == var_2->blue.msb_right &&
+ var_1->transp.offset == var_2->transp.offset &&
+ var_1->transp.length == var_2->transp.length &&
+ var_1->transp.msb_right == var_2->transp.msb_right;
+}
+
/**
* drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
* @var: screeninfo to check
@@ -1590,7 +1609,6 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
- int depth;
if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
@@ -1610,72 +1628,15 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
- switch (var->bits_per_pixel) {
- case 16:
- depth = (var->green.length == 6) ? 16 : 15;
- break;
- case 32:
- depth = (var->transp.length > 0) ? 32 : 24;
- break;
- default:
- depth = var->bits_per_pixel;
- break;
- }
-
- switch (depth) {
- case 8:
- var->red.offset = 0;
- var->green.offset = 0;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 15:
- var->red.offset = 10;
- var->green.offset = 5;
- var->blue.offset = 0;
- var->red.length = 5;
- var->green.length = 5;
- var->blue.length = 5;
- var->transp.length = 1;
- var->transp.offset = 15;
- break;
- case 16:
- var->red.offset = 11;
- var->green.offset = 5;
- var->blue.offset = 0;
- var->red.length = 5;
- var->green.length = 6;
- var->blue.length = 5;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 24:
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 32:
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 8;
- var->transp.offset = 24;
- break;
- default:
+ /*
+ * drm fbdev emulation doesn't support changing the pixel format at all,
+ * so reject all pixel format changing requests.
+ */
+ if (!drm_fb_pixel_format_equal(var, &info->var)) {
+ DRM_DEBUG("fbdev emulation doesn't support changing the pixel format\n");
return -EINVAL;
}
+
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_check_var);
@@ -2370,7 +2331,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
{
int c, o;
struct drm_connector *connector;
- const struct drm_connector_helper_funcs *connector_funcs;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn;
@@ -2399,8 +2359,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (drm_has_preferred_mode(fb_helper_conn, width, height))
my_score++;
- connector_funcs = connector->helper_private;
-
/*
* select a crtc for this connector and then attempt to configure
* remaining connectors
@@ -3221,12 +3179,14 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
if (!fb_helper)
return -ENOMEM;
- ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
if (ret) {
kfree(fb_helper);
return ret;
}
+ drm_client_add(&fb_helper->client);
+
fb_helper->preferred_bpp = preferred_bpp;
drm_fbdev_client_hotplug(&fb_helper->client);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index b54fb78a283c..b82da96ded5c 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -566,14 +566,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
lessee_priv->is_master = 1;
lessee_priv->authenticated = 1;
- /* Hook up the fd */
- fd_install(fd, lessee_file);
-
/* Pass fd back to userspace */
DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
cl->fd = fd;
cl->lessee_id = lessee->lessee_id;
+ /* Hook up the fd */
+ fd_install(fd, lessee_file);
+
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
return 0;
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index b902361dee6e..1d9a9d2fe0e0 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -24,7 +24,6 @@
#include <linux/err.h>
#include <linux/module.h>
-#include <drm/drm_device.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
@@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
if (panel->connector)
return -EBUSY;
- panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
- if (!panel->link) {
- dev_err(panel->dev, "failed to link panel to %s\n",
- dev_name(connector->dev->dev));
- return -EINVAL;
- }
-
panel->connector = connector;
panel->drm = connector->dev;
@@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach);
*/
int drm_panel_detach(struct drm_panel *panel)
{
- device_link_del(panel->link);
-
panel->connector = NULL;
panel->drm = NULL;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index adb3cb27d31e..759278fef35a 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -97,6 +97,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
{
int ret;
+ WARN_ON(*fence);
+
*fence = drm_syncobj_fence_get(syncobj);
if (*fence)
return 1;
@@ -743,6 +745,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
for (i = 0; i < count; ++i) {
+ if (entries[i].fence)
+ continue;
+
drm_syncobj_fence_get_or_add_callback(syncobjs[i],
&entries[i].fence,
&entries[i].syncobj_cb,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 9b2720b41571..83c1f46670bf 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct component_match *match = NULL;
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-
if (!dev->platform_data) {
struct device_node *core_node;
@@ -655,13 +653,30 @@ static int __init etnaviv_init(void)
for_each_compatible_node(np, NULL, "vivante,gc") {
if (!of_device_is_available(np))
continue;
- pdev = platform_device_register_simple("etnaviv", -1,
- NULL, 0);
- if (IS_ERR(pdev)) {
- ret = PTR_ERR(pdev);
+
+ pdev = platform_device_alloc("etnaviv", -1);
+ if (!pdev) {
+ ret = -ENOMEM;
+ of_node_put(np);
+ goto unregister_platform_driver;
+ }
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ /*
+ * Apply the same DMA configuration to the virtual etnaviv
+ * device as the GPU we found. This assumes that all Vivante
+ * GPUs in the system share the same DMA constraints.
+ */
+ of_dma_configure(&pdev->dev, np, true);
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ platform_device_put(pdev);
of_node_put(np);
goto unregister_platform_driver;
}
+
etnaviv_drm = pdev;
of_node_put(np);
break;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 87f6b5672e11..797d9ee5f15a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
unsigned long start, unsigned long size)
{
- struct iommu_domain *domain;
- int ret;
-
- domain = iommu_domain_alloc(priv->dma_dev->bus);
- if (!domain)
- return -ENOMEM;
-
- ret = iommu_get_dma_cookie(domain);
- if (ret)
- goto free_domain;
-
- ret = iommu_dma_init_domain(domain, start, size, NULL);
- if (ret)
- goto put_cookie;
-
- priv->mapping = domain;
+ priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
return 0;
-
-put_cookie:
- iommu_put_dma_cookie(domain);
-free_domain:
- iommu_domain_free(domain);
- return ret;
}
static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
{
- struct iommu_domain *domain = priv->mapping;
-
- iommu_put_dma_cookie(domain);
- iommu_domain_free(domain);
priv->mapping = NULL;
}
@@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
{
struct iommu_domain *domain = priv->mapping;
- return iommu_attach_device(domain, dev);
+ if (dev != priv->dma_dev)
+ return iommu_attach_device(domain, dev);
+ return 0;
}
static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
@@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
{
struct iommu_domain *domain = priv->mapping;
- iommu_detach_device(domain, dev);
+ if (dev != priv->dma_dev)
+ iommu_detach_device(domain, dev);
}
#else
#error Unsupported architecture and IOMMU/DMA-mapping glue code
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 5d2f0d548469..250b5e02a314 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
break;
}
/* TDA9950 executes all retries for us */
- tx_status |= CEC_TX_STATUS_MAX_RETRIES;
+ if (tx_status != CEC_TX_STATUS_OK)
+ tx_status |= CEC_TX_STATUS_MAX_RETRIES;
cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
nack_cnt, 0, err_cnt);
break;
@@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
/* Wait up to .5s for it to signal non-busy */
do {
csr = tda9950_read(client, REG_CSR);
- if (!(csr & CSR_BUSY) || --timeout)
+ if (!(csr & CSR_BUSY) || !--timeout)
break;
msleep(10);
} while (1);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 72afa518edd9..94c1089ecf59 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -3210,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
+ MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index c7afee37b2b8..9ad89e38f6c0 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1833,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
+ int idx;
+ bool ret;
if (!handle_valid(handle))
return false;
@@ -1840,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
- return kvm_is_visible_gfn(kvm, gfn);
+ idx = srcu_read_lock(&kvm->srcu);
+ ret = kvm_is_visible_gfn(kvm, gfn);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return ret;
}
struct intel_gvt_mpt kvmgt_mpt = {
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 994366035364..9bb9a85c992c 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
+
+ if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
+ ~(BIT(0) | BIT(1));
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
+ ~PHY_POWER_GOOD;
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
+ ~PHY_POWER_GOOD;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &=
+ ~BIT(30);
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &=
+ ~BIT(30);
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
+ ~BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
+ BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
+ ~BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
+ BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
+ ~BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
+ BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK;
+ }
} else {
#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
/* only reset the engine related, so starting with 0x44200
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index a4e8e3cf74fd..c628be05fbfe 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
intel_vgpu_clean_opregion(vgpu);
+ intel_vgpu_reset_ggtt(vgpu, true);
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f7f2aa71d8d9..a262a64f5625 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
return true;
}
+static void *compress_next_page(struct drm_i915_error_object *dst)
+{
+ unsigned long page;
+
+ if (dst->page_count >= dst->num_pages)
+ return ERR_PTR(-ENOSPC);
+
+ page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ return dst->pages[dst->page_count++] = (void *)page;
+}
+
static int compress_page(struct compress *c,
void *src,
struct drm_i915_error_object *dst)
@@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
do {
if (zstream->avail_out == 0) {
- unsigned long page;
-
- page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
- if (!page)
- return -ENOMEM;
+ zstream->next_out = compress_next_page(dst);
+ if (IS_ERR(zstream->next_out))
+ return PTR_ERR(zstream->next_out);
- dst->pages[dst->page_count++] = (void *)page;
-
- zstream->next_out = (void *)page;
zstream->avail_out = PAGE_SIZE;
}
- if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
+ if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
} while (zstream->avail_in);
@@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
return 0;
}
-static void compress_fini(struct compress *c,
+static int compress_flush(struct compress *c,
struct drm_i915_error_object *dst)
{
struct z_stream_s *zstream = &c->zstream;
- if (dst) {
- zlib_deflate(zstream, Z_FINISH);
- dst->unused = zstream->avail_out;
- }
+ do {
+ switch (zlib_deflate(zstream, Z_FINISH)) {
+ case Z_OK: /* more space requested */
+ zstream->next_out = compress_next_page(dst);
+ if (IS_ERR(zstream->next_out))
+ return PTR_ERR(zstream->next_out);
+
+ zstream->avail_out = PAGE_SIZE;
+ break;
+
+ case Z_STREAM_END:
+ goto end;
+
+ default: /* any error */
+ return -EIO;
+ }
+ } while (1);
+
+end:
+ memset(zstream->next_out, 0, zstream->avail_out);
+ dst->unused = zstream->avail_out;
+ return 0;
+}
+
+static void compress_fini(struct compress *c,
+ struct drm_i915_error_object *dst)
+{
+ struct z_stream_s *zstream = &c->zstream;
zlib_deflateEnd(zstream);
kfree(zstream->workspace);
-
if (c->tmp)
free_page((unsigned long)c->tmp);
}
@@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
return 0;
}
+static int compress_flush(struct compress *c,
+ struct drm_i915_error_object *dst)
+{
+ return 0;
+}
+
static void compress_fini(struct compress *c,
struct drm_i915_error_object *dst)
{
@@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
unsigned long num_pages;
struct sgt_iter iter;
dma_addr_t dma;
+ int ret;
if (!vma)
return NULL;
@@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
+ dst->num_pages = num_pages;
dst->page_count = 0;
dst->unused = 0;
@@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
return NULL;
}
+ ret = -EINVAL;
for_each_sgt_dma(dma, iter, vma->pages) {
void __iomem *s;
- int ret;
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst);
io_mapping_unmap_atomic(s);
-
if (ret)
- goto unwind;
+ break;
}
- goto out;
-unwind:
- while (dst->page_count--)
- free_page((unsigned long)dst->pages[dst->page_count]);
- kfree(dst);
- dst = NULL;
+ if (ret || compress_flush(&compress, dst)) {
+ while (dst->page_count--)
+ free_page((unsigned long)dst->pages[dst->page_count]);
+ kfree(dst);
+ dst = NULL;
+ }
-out:
compress_fini(&compress, dst);
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index f893a4e8b783..8710fb18ed74 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -135,6 +135,7 @@ struct i915_gpu_state {
struct drm_i915_error_object {
u64 gtt_offset;
u64 gtt_size;
+ int num_pages;
int page_count;
int unused;
u32 *pages[0];
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 90628a47ae17..29877969310d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
spin_unlock(&i915->irq_lock);
}
-static void
-gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
- u32 *iir)
+static u32
+gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
{
void __iomem * const regs = dev_priv->regs;
+ u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
- return;
+ return 0;
+
+ iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+ if (likely(iir))
+ raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
- *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
- if (likely(*iir))
- raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
+ return iir;
}
static void
-gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
- const u32 master_ctl, const u32 iir)
+gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
{
- if (!(master_ctl & GEN11_GU_MISC_IRQ))
- return;
-
- if (unlikely(!iir)) {
- DRM_ERROR("GU_MISC iir blank!\n");
- return;
- }
-
if (iir & GEN11_GU_MISC_GSE)
intel_opregion_asle_intr(dev_priv);
- else
- DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
}
static irqreturn_t gen11_irq_handler(int irq, void *arg)
@@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(i915);
}
- gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
+ gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
/* Acknowledge and enable interrupts. */
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
- gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
+ gen11_gu_misc_irq_handler(i915, gu_misc_iir);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6a4d1388ad2d..1df3ce134cd0 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = {
GEN10_FEATURES, \
GEN(11), \
.ddb_size = 2048, \
- .has_csr = 0, \
.has_logical_ring_elsq = 1
static const struct intel_device_info intel_icelake_11_info = {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 0b976dfd04df..92ecb9bf982c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -600,7 +600,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
- mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
+ mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
sizeof(struct drm_plane),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
index 790d39f816dc..b557687b1964 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -153,8 +153,8 @@ int msm_dss_parse_clock(struct platform_device *pdev,
return 0;
}
- mp->clk_config = devm_kzalloc(&pdev->dev,
- sizeof(struct dss_clk) * num_clk,
+ mp->clk_config = devm_kcalloc(&pdev->dev,
+ num_clk, sizeof(struct dss_clk),
GFP_KERNEL);
if (!mp->clk_config)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 5691dfa1db6f..041e7daf8a33 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -900,9 +900,22 @@ static enum drm_connector_status
nv50_mstc_detect(struct drm_connector *connector, bool force)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
+ enum drm_connector_status conn_status;
+ int ret;
+
if (!mstc->port)
return connector_status_disconnected;
- return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
+
+ ret = pm_runtime_get_sync(connector->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return connector_status_disconnected;
+
+ conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
+ mstc->port);
+
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ return conn_status;
}
static void
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
index a534b225e31b..5fa0441bb6df 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev)
}
static const struct of_device_id vexpress_muxfpga_match[] = {
- { .compatible = "arm,vexpress-muxfpga", }
+ { .compatible = "arm,vexpress-muxfpga", },
+ {}
};
static struct platform_driver vexpress_muxfpga_driver = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index e36004fbe453..2a15f2f9271e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -81,9 +81,19 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
int i;
for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) {
- unsigned long ideal = rate * i;
+ u64 ideal = (u64)rate * i;
unsigned long rounded;
+ /*
+ * ideal has overflowed the max value that can be stored in an
+ * unsigned long, and every clk operation we might do on a
+ * truncated u64 value will give us incorrect results.
+ * Let's just stop there since bigger dividers will result in
+ * the same overflow issue.
+ */
+ if (ideal > ULONG_MAX)
+ goto out;
+
rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
ideal);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index dd19d674055c..8b0cd08034e0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ .compatible = "allwinner,sun8i-a83t-display-engine" },
{ .compatible = "allwinner,sun8i-h3-display-engine" },
- { .compatible = "allwinner,sun8i-r40-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" },
{ }
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 82502b351aec..a564b5dfe082 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
.has_phy_clk = true,
- .has_second_pll = true,
.phy_init = &sun8i_hdmi_phy_init_h3,
.phy_disable = &sun8i_hdmi_phy_disable_h3,
.phy_config = &sun8i_hdmi_phy_config_h3,
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index fc3713608f78..cb65b0ed53fd 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.vi_num = 1,
};
-static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
- .ccsc = 0,
- .mod_rate = 297000000,
- .scaler_mask = 0xf,
- .ui_num = 3,
- .vi_num = 1,
-};
-
-static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
- .ccsc = 1,
- .mod_rate = 297000000,
- .scaler_mask = 0x3,
- .ui_num = 1,
- .vi_num = 1,
-};
-
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2,
.ui_num = 1,
@@ -583,14 +567,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.data = &sun8i_h3_mixer0_cfg,
},
{
- .compatible = "allwinner,sun8i-r40-de2-mixer-0",
- .data = &sun8i_r40_mixer0_cfg,
- },
- {
- .compatible = "allwinner,sun8i-r40-de2-mixer-1",
- .data = &sun8i_r40_mixer1_cfg,
- },
- {
.compatible = "allwinner,sun8i-v3s-de2-mixer",
.data = &sun8i_v3s_mixer_cfg,
},
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 55fe398d8290..d5240b777a8f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
/* sun4i_drv uses this list to check if a device node is a TCON TOP */
const struct of_device_id sun8i_tcon_top_of_table[] = {
- { .compatible = "allwinner,sun8i-r40-tcon-top" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dbb62f6eb48a..dd9ffded223b 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev,
{
drm_fb_helper_unregister_fbi(&ufbdev->helper);
drm_fb_helper_fini(&ufbdev->helper);
- drm_framebuffer_unregister_private(&ufbdev->ufb.base);
- drm_framebuffer_cleanup(&ufbdev->ufb.base);
- drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+ if (ufbdev->ufb.obj) {
+ drm_framebuffer_unregister_private(&ufbdev->ufb.base);
+ drm_framebuffer_cleanup(&ufbdev->ufb.base);
+ drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+ }
}
int udl_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cfb50fedfa2b..a3275fa66b7b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
vc4_state->crtc_h);
+ vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
+ vc4_state->y_scaling[0] == VC4_SCALING_NONE);
+
if (num_planes > 1) {
vc4_state->is_yuv = true;
@@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_get_scaling_mode(vc4_state->src_h[1],
vc4_state->crtc_h);
- /* YUV conversion requires that scaling be enabled,
- * even on a plane that's otherwise 1:1. Choose TPZ
- * for simplicity.
+ /* YUV conversion requires that horizontal scaling be enabled,
+ * even on a plane that's otherwise 1:1. Looks like only PPF
+ * works in that case, so let's pick that one.
*/
- if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
- vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
- if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
- vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+ if (vc4_state->is_unity)
+ vc4_state->x_scaling[0] = VC4_SCALING_PPF;
} else {
vc4_state->x_scaling[1] = VC4_SCALING_NONE;
vc4_state->y_scaling[1] = VC4_SCALING_NONE;
}
- vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
- vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
- vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
- vc4_state->y_scaling[1] == VC4_SCALING_NONE);
-
/* No configuring scaling on the cursor plane, since it gets
non-vblank-synced updates, and scaling requires requires
LBM changes which have to be vblank-synced.
@@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
}
- if (!vc4_state->is_unity) {
+ if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
/* LBM Base Address. */
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 1f134570b759..f0ab6b2313bb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
{
struct vmw_buffer_object *vbo =
container_of(bo, struct vmw_buffer_object, base);
- struct ttm_operation_ctx ctx = { interruptible, true };
+ struct ttm_operation_ctx ctx = { interruptible, false };
int ret;
if (vbo->pin_count > 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 23beff5d8e3c..6a712a8d59e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
struct drm_rect *rects)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_rect bounding_box = {0};
u64 total_pixels = 0, pixel_mem, bb_mem;
int i;
for (i = 0; i < num_rects; i++) {
/*
- * Currently this check is limiting the topology within max
- * texture/screentarget size. This should change in future when
- * user-space support multiple fb with topology.
+ * For STDU only individual screen (screen target) is limited by
+ * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
*/
- if (rects[i].x1 < 0 || rects[i].y1 < 0 ||
- rects[i].x2 > mode_config->max_width ||
- rects[i].y2 > mode_config->max_height) {
- DRM_ERROR("Invalid GUI layout.\n");
+ if (dev_priv->active_display_unit == vmw_du_screen_target &&
+ (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
+ drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
+ DRM_ERROR("Screen size not supported.\n");
return -EINVAL;
}
@@ -1615,7 +1613,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
- if (!new_crtc_state->enable && old_crtc_state->enable) {
+ if (!new_crtc_state->enable) {
rects[i].x1 = 0;
rects[i].y1 = 0;
rects[i].x2 = 0;
@@ -2216,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
if (dev_priv->assume_16bpp)
assumed_bpp = 2;
+ max_width = min(max_width, dev_priv->texture_max_width);
+ max_height = min(max_height, dev_priv->texture_max_height);
+
+ /*
+ * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
+ * HEIGHT registers.
+ */
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(max_width, dev_priv->stdu_max_width);
- max_width = min(max_width, dev_priv->texture_max_width);
-
max_height = min(max_height, dev_priv->stdu_max_height);
- max_height = min(max_height, dev_priv->texture_max_height);
}
/* Add preferred mode */
@@ -2376,6 +2378,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
void __user *user_rects;
@@ -2421,6 +2424,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
drm_rects[i].y1 = curr_rect.y;
drm_rects[i].x2 = curr_rect.x + curr_rect.w;
drm_rects[i].y2 = curr_rect.y + curr_rect.h;
+
+ /*
+ * Currently this check is limiting the topology within
+ * mode_config->max (which actually is max texture size
+ * supported by virtual device). This limit is here to address
+ * window managers that create a big framebuffer for whole
+ * topology.
+ */
+ if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
+ drm_rects[i].x2 > mode_config->max_width ||
+ drm_rects[i].y2 > mode_config->max_height) {
+ DRM_ERROR("Invalid GUI layout.\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
}
ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 93f6b96ca7bb..f30e839f7bfd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
dev_priv->active_display_unit = vmw_du_screen_target;
- if (dev_priv->capabilities & SVGA_CAP_3D) {
- /*
- * For 3D VMs, display (scanout) buffer size is the smaller of
- * max texture and max STDU
- */
- uint32_t max_width, max_height;
-
- max_width = min(dev_priv->texture_max_width,
- dev_priv->stdu_max_width);
- max_height = min(dev_priv->texture_max_height,
- dev_priv->stdu_max_height);
-
- dev->mode_config.max_width = max_width;
- dev->mode_config.max_height = max_height;
- } else {
- /*
- * Given various display aspect ratios, there's no way to
- * estimate these using prim_bb_mem. So just set these to
- * something arbitrarily large and we will reject any layout
- * that doesn't fit prim_bb_mem later
- */
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- }
-
vmw_kms_create_implicit_placement_property(dev_priv, false);
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index e125233e074b..80a01cd4c051 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
*srf_out = NULL;
if (for_scanout) {
- uint32_t max_width, max_height;
-
if (!svga3dsurface_is_screen_target_format(format)) {
DRM_ERROR("Invalid Screen Target surface format.");
return -EINVAL;
}
- max_width = min(dev_priv->texture_max_width,
- dev_priv->stdu_max_width);
- max_height = min(dev_priv->texture_max_height,
- dev_priv->stdu_max_height);
-
- if (size.width > max_width || size.height > max_height) {
+ if (size.width > dev_priv->texture_max_width ||
+ size.height > dev_priv->texture_max_height) {
DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
size.width, size.height,
- max_width, max_height);
+ dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
return -EINVAL;
}
} else {
@@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
srf->res.backup_size += sizeof(SVGA3dDXSOState);
+ /*
+ * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
+ * size greater than STDU max width/height. This is really a workaround
+ * to support creation of big framebuffer requested by some user-space
+ * for whole topology. That big framebuffer won't really be used for
+ * binding with screen target as during prepare_fb a separate surface is
+ * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
+ */
if (dev_priv->active_display_unit == vmw_du_screen_target &&
- for_scanout)
+ for_scanout && size.width <= dev_priv->stdu_max_width &&
+ size.height <= dev_priv->stdu_max_height)
srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
/*
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index a96bf46bc483..cf2a18571d48 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -215,6 +215,8 @@ static void vga_switcheroo_enable(void)
return;
client->id = ret | ID_BIT_AUDIO;
+ if (client->ops->gpu_bound)
+ client->ops->gpu_bound(client->pdev, ret);
}
vga_switcheroo_debugfs_init(&vgasr_priv);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 61e1953ff921..18c846477ba2 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -182,6 +182,19 @@ config HID_BETOP_FF
Currently the following devices are known to be supported:
- BETOP 2185 PC & BFM MODE
+config HID_BIGBEN_FF
+ tristate "BigBen Interactive Kids' gamepad support"
+ depends on USB_HID
+ depends on NEW_LEDS
+ depends on LEDS_CLASS
+ select INPUT_FF_MEMLESS
+ default !EXPERT
+ help
+ Support for the "Kid-friendly Wired Controller" PS3OFMINIPAD
+ gamepad made by BigBen Interactive, originally sold as a PS3
+ accessory. This driver fixes input mapping and adds support for
+ force feedback effects and LEDs on the device.
+
config HID_CHERRY
tristate "Cherry Cymotion keyboard"
depends on HID
@@ -351,7 +364,7 @@ config HOLTEK_FF
config HID_GOOGLE_HAMMER
tristate "Google Hammer Keyboard"
- depends on USB_HID && LEDS_CLASS
+ depends on USB_HID && LEDS_CLASS && MFD_CROS_EC
---help---
Say Y here if you have a Google Hammer device.
@@ -596,6 +609,7 @@ config HID_MICROSOFT
tristate "Microsoft non-fully HID-compliant devices"
depends on HID
default !EXPERT
+ select INPUT_FF_MEMLESS
---help---
Support for Microsoft devices that are not fully compliant with HID standard.
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index bd7ac53b75c5..896a51ce7ce0 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_HID_ASUS) += hid-asus.o
obj-$(CONFIG_HID_AUREAL) += hid-aureal.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
obj-$(CONFIG_HID_BETOP_FF) += hid-betopff.o
+obj-$(CONFIG_HID_BIGBEN_FF) += hid-bigbenff.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CMEDIA) += hid-cmedia.o
diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
new file mode 100644
index 000000000000..3f6abd190df4
--- /dev/null
+++ b/drivers/hid/hid-bigbenff.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * LED & force feedback support for BigBen Interactive
+ *
+ * 0x146b:0x0902 "Bigben Interactive Bigben Game Pad"
+ * "Kid-friendly Wired Controller" PS3OFMINIPAD SONY
+ * sold for use with the PS3
+ *
+ * Copyright (c) 2018 Hanno Zulla <kontakt@hanno.de>
+ */
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/leds.h>
+#include <linux/hid.h>
+
+#include "hid-ids.h"
+
+
+/*
+ * The original descriptor for 0x146b:0x0902
+ *
+ * 0x05, 0x01, // Usage Page (Generic Desktop Ctrls)
+ * 0x09, 0x05, // Usage (Game Pad)
+ * 0xA1, 0x01, // Collection (Application)
+ * 0x15, 0x00, // Logical Minimum (0)
+ * 0x25, 0x01, // Logical Maximum (1)
+ * 0x35, 0x00, // Physical Minimum (0)
+ * 0x45, 0x01, // Physical Maximum (1)
+ * 0x75, 0x01, // Report Size (1)
+ * 0x95, 0x0D, // Report Count (13)
+ * 0x05, 0x09, // Usage Page (Button)
+ * 0x19, 0x01, // Usage Minimum (0x01)
+ * 0x29, 0x0D, // Usage Maximum (0x0D)
+ * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x95, 0x03, // Report Count (3)
+ * 0x81, 0x01, // Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x05, 0x01, // Usage Page (Generic Desktop Ctrls)
+ * 0x25, 0x07, // Logical Maximum (7)
+ * 0x46, 0x3B, 0x01, // Physical Maximum (315)
+ * 0x75, 0x04, // Report Size (4)
+ * 0x95, 0x01, // Report Count (1)
+ * 0x65, 0x14, // Unit (System: English Rotation, Length: Centimeter)
+ * 0x09, 0x39, // Usage (Hat switch)
+ * 0x81, 0x42, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,Null State)
+ * 0x65, 0x00, // Unit (None)
+ * 0x95, 0x01, // Report Count (1)
+ * 0x81, 0x01, // Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x26, 0xFF, 0x00, // Logical Maximum (255)
+ * 0x46, 0xFF, 0x00, // Physical Maximum (255)
+ * 0x09, 0x30, // Usage (X)
+ * 0x09, 0x31, // Usage (Y)
+ * 0x09, 0x32, // Usage (Z)
+ * 0x09, 0x35, // Usage (Rz)
+ * 0x75, 0x08, // Report Size (8)
+ * 0x95, 0x04, // Report Count (4)
+ * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x06, 0x00, 0xFF, // Usage Page (Vendor Defined 0xFF00)
+ * 0x09, 0x20, // Usage (0x20)
+ * 0x09, 0x21, // Usage (0x21)
+ * 0x09, 0x22, // Usage (0x22)
+ * 0x09, 0x23, // Usage (0x23)
+ * 0x09, 0x24, // Usage (0x24)
+ * 0x09, 0x25, // Usage (0x25)
+ * 0x09, 0x26, // Usage (0x26)
+ * 0x09, 0x27, // Usage (0x27)
+ * 0x09, 0x28, // Usage (0x28)
+ * 0x09, 0x29, // Usage (0x29)
+ * 0x09, 0x2A, // Usage (0x2A)
+ * 0x09, 0x2B, // Usage (0x2B)
+ * 0x95, 0x0C, // Report Count (12)
+ * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x0A, 0x21, 0x26, // Usage (0x2621)
+ * 0x95, 0x08, // Report Count (8)
+ * 0xB1, 0x02, // Feature (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile)
+ * 0x0A, 0x21, 0x26, // Usage (0x2621)
+ * 0x91, 0x02, // Output (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile)
+ * 0x26, 0xFF, 0x03, // Logical Maximum (1023)
+ * 0x46, 0xFF, 0x03, // Physical Maximum (1023)
+ * 0x09, 0x2C, // Usage (0x2C)
+ * 0x09, 0x2D, // Usage (0x2D)
+ * 0x09, 0x2E, // Usage (0x2E)
+ * 0x09, 0x2F, // Usage (0x2F)
+ * 0x75, 0x10, // Report Size (16)
+ * 0x95, 0x04, // Report Count (4)
+ * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0xC0, // End Collection
+ */
+
+#define PID0902_RDESC_ORIG_SIZE 137
+
+/*
+ * The fixed descriptor for 0x146b:0x0902
+ *
+ * - map buttons according to gamepad.rst
+ * - assign right stick from Z/Rz to Rx/Ry
+ * - map previously unused analog trigger data to Z/RZ
+ * - simplify feature and output descriptor
+ */
+static __u8 pid0902_rdesc_fixed[] = {
+ 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */
+ 0x09, 0x05, /* Usage (Game Pad) */
+ 0xA1, 0x01, /* Collection (Application) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x25, 0x01, /* Logical Maximum (1) */
+ 0x35, 0x00, /* Physical Minimum (0) */
+ 0x45, 0x01, /* Physical Maximum (1) */
+ 0x75, 0x01, /* Report Size (1) */
+ 0x95, 0x0D, /* Report Count (13) */
+ 0x05, 0x09, /* Usage Page (Button) */
+ 0x09, 0x05, /* Usage (BTN_WEST) */
+ 0x09, 0x01, /* Usage (BTN_SOUTH) */
+ 0x09, 0x02, /* Usage (BTN_EAST) */
+ 0x09, 0x04, /* Usage (BTN_NORTH) */
+ 0x09, 0x07, /* Usage (BTN_TL) */
+ 0x09, 0x08, /* Usage (BTN_TR) */
+ 0x09, 0x09, /* Usage (BTN_TL2) */
+ 0x09, 0x0A, /* Usage (BTN_TR2) */
+ 0x09, 0x0B, /* Usage (BTN_SELECT) */
+ 0x09, 0x0C, /* Usage (BTN_START) */
+ 0x09, 0x0E, /* Usage (BTN_THUMBL) */
+ 0x09, 0x0F, /* Usage (BTN_THUMBR) */
+ 0x09, 0x0D, /* Usage (BTN_MODE) */
+ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x75, 0x01, /* Report Size (1) */
+ 0x95, 0x03, /* Report Count (3) */
+ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */
+ 0x25, 0x07, /* Logical Maximum (7) */
+ 0x46, 0x3B, 0x01, /* Physical Maximum (315) */
+ 0x75, 0x04, /* Report Size (4) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x65, 0x14, /* Unit (System: English Rotation, Length: Centimeter) */
+ 0x09, 0x39, /* Usage (Hat switch) */
+ 0x81, 0x42, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,Null State) */
+ 0x65, 0x00, /* Unit (None) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
+ 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
+ 0x09, 0x30, /* Usage (X) */
+ 0x09, 0x31, /* Usage (Y) */
+ 0x09, 0x33, /* Usage (Rx) */
+ 0x09, 0x34, /* Usage (Ry) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x04, /* Report Count (4) */
+ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x95, 0x0A, /* Report Count (10) */
+ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
+ 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
+ 0x09, 0x32, /* Usage (Z) */
+ 0x09, 0x35, /* Usage (Rz) */
+ 0x95, 0x02, /* Report Count (2) */
+ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x95, 0x08, /* Report Count (8) */
+ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined 0xFF00) */
+ 0xB1, 0x02, /* Feature (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) */
+ 0x0A, 0x21, 0x26, /* Usage (0x2621) */
+ 0x95, 0x08, /* Report Count (8) */
+ 0x91, 0x02, /* Output (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) */
+ 0x0A, 0x21, 0x26, /* Usage (0x2621) */
+ 0x95, 0x08, /* Report Count (8) */
+ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0xC0, /* End Collection */
+};
+
+#define NUM_LEDS 4
+
+struct bigben_device {
+ struct hid_device *hid;
+ struct hid_report *report;
+ u8 led_state; /* LED1 = 1 .. LED4 = 8 */
+ u8 right_motor_on; /* right motor off/on 0/1 */
+ u8 left_motor_force; /* left motor force 0-255 */
+ struct led_classdev *leds[NUM_LEDS];
+ bool work_led;
+ bool work_ff;
+ struct work_struct worker;
+};
+
+
+static void bigben_worker(struct work_struct *work)
+{
+ struct bigben_device *bigben = container_of(work,
+ struct bigben_device, worker);
+ struct hid_field *report_field = bigben->report->field[0];
+
+ if (bigben->work_led) {
+ bigben->work_led = false;
+ report_field->value[0] = 0x01; /* 1 = led message */
+ report_field->value[1] = 0x08; /* reserved value, always 8 */
+ report_field->value[2] = bigben->led_state;
+ report_field->value[3] = 0x00; /* padding */
+ report_field->value[4] = 0x00; /* padding */
+ report_field->value[5] = 0x00; /* padding */
+ report_field->value[6] = 0x00; /* padding */
+ report_field->value[7] = 0x00; /* padding */
+ hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
+ }
+
+ if (bigben->work_ff) {
+ bigben->work_ff = false;
+ report_field->value[0] = 0x02; /* 2 = rumble effect message */
+ report_field->value[1] = 0x08; /* reserved value, always 8 */
+ report_field->value[2] = bigben->right_motor_on;
+ report_field->value[3] = bigben->left_motor_force;
+ report_field->value[4] = 0xff; /* duration 0-254 (255 = nonstop) */
+ report_field->value[5] = 0x00; /* padding */
+ report_field->value[6] = 0x00; /* padding */
+ report_field->value[7] = 0x00; /* padding */
+ hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
+ }
+}
+
+static int hid_bigben_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct bigben_device *bigben = data;
+ u8 right_motor_on;
+ u8 left_motor_force;
+
+ if (effect->type != FF_RUMBLE)
+ return 0;
+
+ right_motor_on = effect->u.rumble.weak_magnitude ? 1 : 0;
+ left_motor_force = effect->u.rumble.strong_magnitude / 256;
+
+ if (right_motor_on != bigben->right_motor_on ||
+ left_motor_force != bigben->left_motor_force) {
+ bigben->right_motor_on = right_motor_on;
+ bigben->left_motor_force = left_motor_force;
+ bigben->work_ff = true;
+ schedule_work(&bigben->worker);
+ }
+
+ return 0;
+}
+
+static void bigben_set_led(struct led_classdev *led,
+ enum led_brightness value)
+{
+ struct device *dev = led->dev->parent;
+ struct hid_device *hid = to_hid_device(dev);
+ struct bigben_device *bigben = hid_get_drvdata(hid);
+ int n;
+ bool work;
+
+ if (!bigben) {
+ hid_err(hid, "no device data\n");
+ return;
+ }
+
+ for (n = 0; n < NUM_LEDS; n++) {
+ if (led == bigben->leds[n]) {
+ if (value == LED_OFF) {
+ work = (bigben->led_state & BIT(n));
+ bigben->led_state &= ~BIT(n);
+ } else {
+ work = !(bigben->led_state & BIT(n));
+ bigben->led_state |= BIT(n);
+ }
+
+ if (work) {
+ bigben->work_led = true;
+ schedule_work(&bigben->worker);
+ }
+ return;
+ }
+ }
+}
+
+static enum led_brightness bigben_get_led(struct led_classdev *led)
+{
+ struct device *dev = led->dev->parent;
+ struct hid_device *hid = to_hid_device(dev);
+ struct bigben_device *bigben = hid_get_drvdata(hid);
+ int n;
+
+ if (!bigben) {
+ hid_err(hid, "no device data\n");
+ return LED_OFF;
+ }
+
+ for (n = 0; n < NUM_LEDS; n++) {
+ if (led == bigben->leds[n])
+ return (bigben->led_state & BIT(n)) ? LED_ON : LED_OFF;
+ }
+
+ return LED_OFF;
+}
+
+static void bigben_remove(struct hid_device *hid)
+{
+ struct bigben_device *bigben = hid_get_drvdata(hid);
+
+ cancel_work_sync(&bigben->worker);
+ hid_hw_close(hid);
+ hid_hw_stop(hid);
+}
+
+static int bigben_probe(struct hid_device *hid,
+ const struct hid_device_id *id)
+{
+ struct bigben_device *bigben;
+ struct hid_input *hidinput;
+ struct list_head *report_list;
+ struct led_classdev *led;
+ char *name;
+ size_t name_sz;
+ int n, error;
+
+ bigben = devm_kzalloc(&hid->dev, sizeof(*bigben), GFP_KERNEL);
+ if (!bigben)
+ return -ENOMEM;
+ hid_set_drvdata(hid, bigben);
+ bigben->hid = hid;
+
+ error = hid_parse(hid);
+ if (error) {
+ hid_err(hid, "parse failed\n");
+ return error;
+ }
+
+ error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (error) {
+ hid_err(hid, "hw start failed\n");
+ return error;
+ }
+
+ report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ bigben->report = list_entry(report_list->next,
+ struct hid_report, list);
+
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ set_bit(FF_RUMBLE, hidinput->input->ffbit);
+
+ INIT_WORK(&bigben->worker, bigben_worker);
+
+ error = input_ff_create_memless(hidinput->input, bigben,
+ hid_bigben_play_effect);
+ if (error)
+ return error;
+
+ name_sz = strlen(dev_name(&hid->dev)) + strlen(":red:bigben#") + 1;
+
+ for (n = 0; n < NUM_LEDS; n++) {
+ led = devm_kzalloc(
+ &hid->dev,
+ sizeof(struct led_classdev) + name_sz,
+ GFP_KERNEL
+ );
+ if (!led)
+ return -ENOMEM;
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz,
+ "%s:red:bigben%d",
+ dev_name(&hid->dev), n + 1
+ );
+ led->name = name;
+ led->brightness = (n == 0) ? LED_ON : LED_OFF;
+ led->max_brightness = 1;
+ led->brightness_get = bigben_get_led;
+ led->brightness_set = bigben_set_led;
+ bigben->leds[n] = led;
+ error = devm_led_classdev_register(&hid->dev, led);
+ if (error)
+ return error;
+ }
+
+ /* initial state: LED1 is on, no rumble effect */
+ bigben->led_state = BIT(0);
+ bigben->right_motor_on = 0;
+ bigben->left_motor_force = 0;
+ bigben->work_led = true;
+ bigben->work_ff = true;
+ schedule_work(&bigben->worker);
+
+ hid_info(hid, "LED and force feedback support for BigBen gamepad\n");
+
+ return 0;
+}
+
+static __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize == PID0902_RDESC_ORIG_SIZE) {
+ rdesc = pid0902_rdesc_fixed;
+ *rsize = sizeof(pid0902_rdesc_fixed);
+ } else
+ hid_warn(hid, "unexpected rdesc, please submit for review\n");
+ return rdesc;
+}
+
+static const struct hid_device_id bigben_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_BIGBEN, USB_DEVICE_ID_BIGBEN_PS3OFMINIPAD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, bigben_devices);
+
+static struct hid_driver bigben_driver = {
+ .name = "bigben",
+ .id_table = bigben_devices,
+ .probe = bigben_probe,
+ .report_fixup = bigben_report_fixup,
+ .remove = bigben_remove,
+};
+module_hid_driver(bigben_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 44564f61e9cc..5bec9244c45b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -406,7 +406,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
parser->global.report_size = item_udata(item);
- if (parser->global.report_size > 128) {
+ if (parser->global.report_size > 256) {
hid_err(parser->device, "invalid report_size %d\n",
parser->global.report_size);
return -1;
diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c
index ad2e87de7dc5..3f0916b64c60 100644
--- a/drivers/hid/hid-cougar.c
+++ b/drivers/hid/hid-cougar.c
@@ -7,6 +7,7 @@
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/printk.h>
#include "hid-ids.h"
@@ -15,11 +16,9 @@ MODULE_DESCRIPTION("Cougar 500k Gaming Keyboard");
MODULE_LICENSE("GPL");
MODULE_INFO(key_mappings, "G1-G6 are mapped to F13-F18");
-static int cougar_g6_is_space = 1;
-module_param_named(g6_is_space, cougar_g6_is_space, int, 0600);
+static bool g6_is_space = true;
MODULE_PARM_DESC(g6_is_space,
- "If set, G6 programmable key sends SPACE instead of F18 (0=off, 1=on) (default=1)");
-
+ "If true, G6 programmable key sends SPACE instead of F18 (default=true)");
#define COUGAR_VENDOR_USAGE 0xff00ff00
@@ -82,20 +81,23 @@ struct cougar {
static LIST_HEAD(cougar_udev_list);
static DEFINE_MUTEX(cougar_udev_list_lock);
-static void cougar_fix_g6_mapping(struct hid_device *hdev)
+/**
+ * cougar_fix_g6_mapping - configure the mapping for key G6/Spacebar
+ */
+static void cougar_fix_g6_mapping(void)
{
int i;
for (i = 0; cougar_mapping[i][0]; i++) {
if (cougar_mapping[i][0] == COUGAR_KEY_G6) {
cougar_mapping[i][1] =
- cougar_g6_is_space ? KEY_SPACE : KEY_F18;
- hid_info(hdev, "G6 mapped to %s\n",
- cougar_g6_is_space ? "space" : "F18");
+ g6_is_space ? KEY_SPACE : KEY_F18;
+ pr_info("cougar: G6 mapped to %s\n",
+ g6_is_space ? "space" : "F18");
return;
}
}
- hid_warn(hdev, "no mapping defined for G6/spacebar");
+ pr_warn("cougar: no mappings defined for G6/spacebar");
}
/*
@@ -154,7 +156,8 @@ static void cougar_remove_shared_data(void *resource)
* Bind the device group's shared data to this cougar struct.
* If no shared data exists for this group, create and initialize it.
*/
-static int cougar_bind_shared_data(struct hid_device *hdev, struct cougar *cougar)
+static int cougar_bind_shared_data(struct hid_device *hdev,
+ struct cougar *cougar)
{
struct cougar_shared *shared;
int error = 0;
@@ -228,7 +231,6 @@ static int cougar_probe(struct hid_device *hdev,
* to it.
*/
if (hdev->collection->usage == HID_GD_KEYBOARD) {
- cougar_fix_g6_mapping(hdev);
list_for_each_entry_safe(hidinput, next, &hdev->inputs, list) {
if (hidinput->registered && hidinput->input != NULL) {
cougar->shared->input = hidinput->input;
@@ -237,6 +239,8 @@ static int cougar_probe(struct hid_device *hdev,
}
}
} else if (hdev->collection->usage == COUGAR_VENDOR_USAGE) {
+ /* Preinit the mapping table */
+ cougar_fix_g6_mapping();
error = hid_hw_open(hdev);
if (error)
goto fail_stop_and_cleanup;
@@ -257,26 +261,32 @@ static int cougar_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *data, int size)
{
struct cougar *cougar;
+ struct cougar_shared *shared;
unsigned char code, action;
int i;
cougar = hid_get_drvdata(hdev);
- if (!cougar->special_intf || !cougar->shared ||
- !cougar->shared->input || !cougar->shared->enabled)
+ shared = cougar->shared;
+ if (!cougar->special_intf || !shared)
return 0;
+ if (!shared->enabled || !shared->input)
+ return -EPERM;
+
code = data[COUGAR_FIELD_CODE];
action = data[COUGAR_FIELD_ACTION];
for (i = 0; cougar_mapping[i][0]; i++) {
if (code == cougar_mapping[i][0]) {
- input_event(cougar->shared->input, EV_KEY,
+ input_event(shared->input, EV_KEY,
cougar_mapping[i][1], action);
- input_sync(cougar->shared->input);
- return 0;
+ input_sync(shared->input);
+ return -EPERM;
}
}
- hid_warn(hdev, "unmapped special key code %x: ignoring\n", code);
- return 0;
+ /* Avoid warnings on the same unmapped key twice */
+ if (action != 0)
+ hid_warn(hdev, "unmapped special key code %0x: ignoring\n", code);
+ return -EPERM;
}
static void cougar_remove(struct hid_device *hdev)
@@ -293,6 +303,26 @@ static void cougar_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
+static int cougar_param_set_g6_is_space(const char *val,
+ const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(val, kp);
+ if (ret)
+ return ret;
+
+ cougar_fix_g6_mapping();
+
+ return 0;
+}
+
+static const struct kernel_param_ops cougar_g6_is_space_ops = {
+ .set = cougar_param_set_g6_is_space,
+ .get = param_get_bool,
+};
+module_param_cb(g6_is_space, &cougar_g6_is_space_ops, &g6_is_space, 0644);
+
static struct hid_device_id cougar_id_table[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD) },
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index 07e26c3567eb..0bfd6d1b44c1 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -497,7 +497,7 @@ static int elan_probe(struct hid_device *hdev, const struct hid_device_id *id)
return 0;
if (!drvdata->input) {
- hid_err(hdev, "Input device is not registred\n");
+ hid_err(hdev, "Input device is not registered\n");
ret = -ENAVAIL;
goto err;
}
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 6bf4da7ad63a..ee5e0bdcf078 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -13,16 +13,268 @@
* any later version.
*/
+#include <linux/acpi.h>
#include <linux/hid.h>
#include <linux/leds.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <asm/unaligned.h>
#include "hid-ids.h"
-#define MAX_BRIGHTNESS 100
+/*
+ * C(hrome)B(ase)A(ttached)S(witch) - switch exported by Chrome EC and reporting
+ * state of the "Whiskers" base - attached or detached. Whiskers USB device also
+ * reports position of the keyboard - folded or not. Combining base state and
+ * position allows us to generate proper "Tablet mode" events.
+ */
+struct cbas_ec {
+ struct device *dev; /* The platform device (EC) */
+ struct input_dev *input;
+ bool base_present;
+ struct notifier_block notifier;
+};
-/* HID usage for keyboard backlight (Alphanumeric display brightness) */
-#define HID_AD_BRIGHTNESS 0x00140046
+static struct cbas_ec cbas_ec;
+static DEFINE_SPINLOCK(cbas_ec_lock);
+static DEFINE_MUTEX(cbas_ec_reglock);
+
+static bool cbas_parse_base_state(const void *data)
+{
+ u32 switches = get_unaligned_le32(data);
+
+ return !!(switches & BIT(EC_MKBP_BASE_ATTACHED));
+}
+
+static int cbas_ec_query_base(struct cros_ec_device *ec_dev, bool get_state,
+ bool *state)
+{
+ struct ec_params_mkbp_info *params;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(sizeof(u32), sizeof(*params)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_MKBP_INFO;
+ msg->version = 1;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(u32);
+ params = (struct ec_params_mkbp_info *)msg->data;
+ params->info_type = get_state ?
+ EC_MKBP_INFO_CURRENT : EC_MKBP_INFO_SUPPORTED;
+ params->event_type = EC_MKBP_EVENT_SWITCH;
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret >= 0) {
+ if (ret != sizeof(u32)) {
+ dev_warn(ec_dev->dev, "wrong result size: %d != %zu\n",
+ ret, sizeof(u32));
+ ret = -EPROTO;
+ } else {
+ *state = cbas_parse_base_state(msg->data);
+ ret = 0;
+ }
+ }
+
+ kfree(msg);
+
+ return ret;
+}
+
+static int cbas_ec_notify(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_device *ec = _notify;
+ unsigned long flags;
+ bool base_present;
+
+ if (ec->event_data.event_type == EC_MKBP_EVENT_SWITCH) {
+ base_present = cbas_parse_base_state(
+ &ec->event_data.data.switches);
+ dev_dbg(cbas_ec.dev,
+ "%s: base: %d\n", __func__, base_present);
+
+ if (device_may_wakeup(cbas_ec.dev) ||
+ !queued_during_suspend) {
+
+ pm_wakeup_event(cbas_ec.dev, 0);
+
+ spin_lock_irqsave(&cbas_ec_lock, flags);
+
+ /*
+ * While input layer dedupes the events, we do not want
+ * to disrupt the state reported by the base by
+ * overriding it with state reported by the LID. Only
+ * report changes, as we assume that on attach the base
+ * is not folded.
+ */
+ if (base_present != cbas_ec.base_present) {
+ input_report_switch(cbas_ec.input,
+ SW_TABLET_MODE,
+ !base_present);
+ input_sync(cbas_ec.input);
+ cbas_ec.base_present = base_present;
+ }
+
+ spin_unlock_irqrestore(&cbas_ec_lock, flags);
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static __maybe_unused int cbas_ec_resume(struct device *dev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(dev->parent);
+ bool base_present;
+ int error;
+
+ error = cbas_ec_query_base(ec, true, &base_present);
+ if (error) {
+ dev_warn(dev, "failed to fetch base state on resume: %d\n",
+ error);
+ } else {
+ spin_lock_irq(&cbas_ec_lock);
+
+ cbas_ec.base_present = base_present;
+
+ /*
+ * Only report if base is disconnected. If base is connected,
+ * it will resend its state on resume, and we'll update it
+ * in hammer_event().
+ */
+ if (!cbas_ec.base_present) {
+ input_report_switch(cbas_ec.input, SW_TABLET_MODE, 1);
+ input_sync(cbas_ec.input);
+ }
+
+ spin_unlock_irq(&cbas_ec_lock);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cbas_ec_pm_ops, NULL, cbas_ec_resume);
+
+static void cbas_ec_set_input(struct input_dev *input)
+{
+ /* Take the lock so hammer_event() does not race with us here */
+ spin_lock_irq(&cbas_ec_lock);
+ cbas_ec.input = input;
+ spin_unlock_irq(&cbas_ec_lock);
+}
+
+static int __cbas_ec_probe(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct input_dev *input;
+ bool base_supported;
+ int error;
+
+ error = cbas_ec_query_base(ec, false, &base_supported);
+ if (error)
+ return error;
+
+ if (!base_supported)
+ return -ENXIO;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Whiskers Tablet Mode Switch";
+ input->id.bustype = BUS_HOST;
+
+ input_set_capability(input, EV_SW, SW_TABLET_MODE);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev, "cannot register input device: %d\n",
+ error);
+ return error;
+ }
+
+ /* Seed the state */
+ error = cbas_ec_query_base(ec, true, &cbas_ec.base_present);
+ if (error) {
+ dev_err(&pdev->dev, "cannot query base state: %d\n", error);
+ return error;
+ }
+
+ input_report_switch(input, SW_TABLET_MODE, !cbas_ec.base_present);
+
+ cbas_ec_set_input(input);
+
+ cbas_ec.dev = &pdev->dev;
+ cbas_ec.notifier.notifier_call = cbas_ec_notify;
+ error = blocking_notifier_chain_register(&ec->event_notifier,
+ &cbas_ec.notifier);
+ if (error) {
+ dev_err(&pdev->dev, "cannot register notifier: %d\n", error);
+ cbas_ec_set_input(NULL);
+ return error;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+ return 0;
+}
+
+static int cbas_ec_probe(struct platform_device *pdev)
+{
+ int retval;
+
+ mutex_lock(&cbas_ec_reglock);
+
+ if (cbas_ec.input) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ retval = __cbas_ec_probe(pdev);
+
+out:
+ mutex_unlock(&cbas_ec_reglock);
+ return retval;
+}
+
+static int cbas_ec_remove(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+
+ mutex_lock(&cbas_ec_reglock);
+
+ blocking_notifier_chain_unregister(&ec->event_notifier,
+ &cbas_ec.notifier);
+ cbas_ec_set_input(NULL);
+
+ mutex_unlock(&cbas_ec_reglock);
+ return 0;
+}
+
+static const struct acpi_device_id cbas_ec_acpi_ids[] = {
+ { "GOOG000B", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids);
+
+static struct platform_driver cbas_ec_driver = {
+ .probe = cbas_ec_probe,
+ .remove = cbas_ec_remove,
+ .driver = {
+ .name = "cbas_ec",
+ .acpi_match_table = ACPI_PTR(cbas_ec_acpi_ids),
+ .pm = &cbas_ec_pm_ops,
+ },
+};
+
+#define MAX_BRIGHTNESS 100
struct hammer_kbd_leds {
struct led_classdev cdev;
@@ -90,33 +342,130 @@ static int hammer_register_leds(struct hid_device *hdev)
return devm_led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
}
-static int hammer_input_configured(struct hid_device *hdev,
- struct hid_input *hi)
+#define HID_UP_GOOGLEVENDOR 0xffd10000
+#define HID_VD_KBD_FOLDED 0x00000019
+#define WHISKERS_KBD_FOLDED (HID_UP_GOOGLEVENDOR | HID_VD_KBD_FOLDED)
+
+/* HID usage for keyboard backlight (Alphanumeric display brightness) */
+#define HID_AD_BRIGHTNESS 0x00140046
+
+static int hammer_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ unsigned long **bit, int *max)
{
- struct list_head *report_list =
- &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
+ if (hdev->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
+ usage->hid == WHISKERS_KBD_FOLDED) {
+ /*
+ * We do not want to have this usage mapped as it will get
+ * mixed in with "base attached" signal and delivered over
+ * separate input device for tablet switch mode.
+ */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int hammer_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ unsigned long flags;
+
+ if (hid->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
+ usage->hid == WHISKERS_KBD_FOLDED) {
+ spin_lock_irqsave(&cbas_ec_lock, flags);
+
+ hid_dbg(hid, "%s: base: %d, folded: %d\n", __func__,
+ cbas_ec.base_present, value);
+
+ /*
+ * We should not get event if base is detached, but in case
+ * we happen to service HID and EC notifications out of order
+ * let's still check the "base present" flag.
+ */
+ if (cbas_ec.input && cbas_ec.base_present) {
+ input_report_switch(cbas_ec.input,
+ SW_TABLET_MODE, value);
+ input_sync(cbas_ec.input);
+ }
+
+ spin_unlock_irqrestore(&cbas_ec_lock, flags);
+ return 1; /* We handled this event */
+ }
+
+ return 0;
+}
+
+static bool hammer_is_keyboard_interface(struct hid_device *hdev)
+{
+ struct hid_report_enum *re = &hdev->report_enum[HID_INPUT_REPORT];
struct hid_report *report;
- if (list_empty(report_list))
- return 0;
+ list_for_each_entry(report, &re->report_list, list)
+ if (report->application == HID_GD_KEYBOARD)
+ return true;
- report = list_first_entry(report_list, struct hid_report, list);
+ return false;
+}
+
+static bool hammer_has_backlight_control(struct hid_device *hdev)
+{
+ struct hid_report_enum *re = &hdev->report_enum[HID_OUTPUT_REPORT];
+ struct hid_report *report;
+ int i, j;
- if (report->maxfield == 1 &&
- report->field[0]->application == HID_GD_KEYBOARD &&
- report->field[0]->maxusage == 1 &&
- report->field[0]->usage[0].hid == HID_AD_BRIGHTNESS) {
- int err = hammer_register_leds(hdev);
+ list_for_each_entry(report, &re->report_list, list) {
+ if (report->application != HID_GD_KEYBOARD)
+ continue;
- if (err)
+ for (i = 0; i < report->maxfield; i++) {
+ struct hid_field *field = report->field[i];
+
+ for (j = 0; j < field->maxusage; j++)
+ if (field->usage[j].hid == HID_AD_BRIGHTNESS)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int hammer_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int error;
+
+ /*
+ * We always want to poll for, and handle tablet mode events from
+ * Whiskers, even when nobody has opened the input device. This also
+ * prevents the hid core from dropping early tablet mode events from
+ * the device.
+ */
+ if (hdev->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
+ hammer_is_keyboard_interface(hdev))
+ hdev->quirks |= HID_QUIRK_ALWAYS_POLL;
+
+ error = hid_parse(hdev);
+ if (error)
+ return error;
+
+ error = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (error)
+ return error;
+
+ if (hammer_has_backlight_control(hdev)) {
+ error = hammer_register_leds(hdev);
+ if (error)
hid_warn(hdev,
"Failed to register keyboard backlight: %d\n",
- err);
+ error);
}
return 0;
}
+
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
@@ -133,8 +482,34 @@ MODULE_DEVICE_TABLE(hid, hammer_devices);
static struct hid_driver hammer_driver = {
.name = "hammer",
.id_table = hammer_devices,
- .input_configured = hammer_input_configured,
+ .probe = hammer_probe,
+ .input_mapping = hammer_input_mapping,
+ .event = hammer_event,
};
-module_hid_driver(hammer_driver);
+
+static int __init hammer_init(void)
+{
+ int error;
+
+ error = platform_driver_register(&cbas_ec_driver);
+ if (error)
+ return error;
+
+ error = hid_register_driver(&hammer_driver);
+ if (error) {
+ platform_driver_unregister(&cbas_ec_driver);
+ return error;
+ }
+
+ return 0;
+}
+module_init(hammer_init);
+
+static void __exit hammer_exit(void)
+{
+ hid_unregister_driver(&hammer_driver);
+ platform_driver_unregister(&cbas_ec_driver);
+}
+module_exit(hammer_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5146ee029db4..f63489c882bb 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -92,6 +92,7 @@
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
+#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f
#define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214
@@ -229,6 +230,9 @@
#define USB_VENDOR_ID_BETOP_2185V2PC 0x8380
#define USB_VENDOR_ID_BETOP_2185V2BFM 0x20bc
+#define USB_VENDOR_ID_BIGBEN 0x146b
+#define USB_DEVICE_ID_BIGBEN_PS3OFMINIPAD 0x0902
+
#define USB_VENDOR_ID_BTC 0x046e
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
@@ -342,6 +346,7 @@
#define USB_DEVICE_ID_DMI_ENC 0x5fab
#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_DEVICE_ID_REDRAGON_SEYMUR2 0x0006
#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
@@ -799,6 +804,7 @@
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
+#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -976,7 +982,6 @@
#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
#define USB_DEVICE_ID_SIS_TS 0x1013
#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
-#define USB_DEVICE_ID_SIS10FB_TOUCH 0x10fb
#define USB_VENDOR_ID_SKYCABLE 0x1223
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index a481eaf39e88..567c3bf64515 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -758,6 +758,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
case HID_UP_DIGITIZER:
+ if ((field->application & 0xff) == 0x01) /* Digitizer */
+ __set_bit(INPUT_PROP_POINTER, input->propbit);
+ else if ((field->application & 0xff) == 0x02) /* Pen */
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
+
switch (usage->hid & 0xff) {
case 0x00: /* Undefined */
goto ignore;
@@ -1516,6 +1521,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
struct hid_input *hidinput = kzalloc(sizeof(*hidinput), GFP_KERNEL);
struct input_dev *input_dev = input_allocate_device();
const char *suffix = NULL;
+ size_t suffix_len, name_len;
if (!hidinput || !input_dev)
goto fail;
@@ -1559,10 +1565,15 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
}
if (suffix) {
- hidinput->name = kasprintf(GFP_KERNEL, "%s %s",
- hid->name, suffix);
- if (!hidinput->name)
- goto fail;
+ name_len = strlen(hid->name);
+ suffix_len = strlen(suffix);
+ if ((name_len < suffix_len) ||
+ strcmp(hid->name + name_len - suffix_len, suffix)) {
+ hidinput->name = kasprintf(GFP_KERNEL, "%s %s",
+ hid->name, suffix);
+ if (!hidinput->name)
+ goto fail;
+ }
}
input_set_drvdata(input_dev, hid);
@@ -1827,3 +1838,48 @@ void hidinput_disconnect(struct hid_device *hid)
}
EXPORT_SYMBOL_GPL(hidinput_disconnect);
+/**
+ * hid_scroll_counter_handle_scroll() - Send high- and low-resolution scroll
+ * events given a high-resolution wheel
+ * movement.
+ * @counter: a hid_scroll_counter struct describing the wheel.
+ * @hi_res_value: the movement of the wheel, in the mouse's high-resolution
+ * units.
+ *
+ * Given a high-resolution movement, this function converts the movement into
+ * microns and emits high-resolution scroll events for the input device. It also
+ * uses the multiplier from &struct hid_scroll_counter to emit low-resolution
+ * scroll events when appropriate for backwards-compatibility with userspace
+ * input libraries.
+ */
+void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
+ int hi_res_value)
+{
+ int low_res_scroll_amount;
+ /* Some wheels will rest 7/8ths of a notch from the previous notch
+ * after slow movement, so we want the threshold for low-res events to
+ * be in the middle of the notches (e.g. after 4/8ths) as opposed to on
+ * the notches themselves (8/8ths).
+ */
+ int threshold = counter->resolution_multiplier / 2;
+
+ input_report_rel(counter->dev, REL_WHEEL_HI_RES,
+ hi_res_value * counter->microns_per_hi_res_unit);
+
+ counter->remainder += hi_res_value;
+ if (abs(counter->remainder) >= threshold) {
+ /* Add (or subtract) 1 because we want to trigger when the wheel
+ * is half-way to the next notch (i.e. scroll 1 notch after a
+ * 1/2 notch movement, 2 notches after a 1 1/2 notch movement,
+ * etc.).
+ */
+ low_res_scroll_amount =
+ counter->remainder / counter->resolution_multiplier
+ + (hi_res_value > 0 ? 1 : -1);
+ input_report_rel(counter->dev, REL_WHEEL,
+ low_res_scroll_amount);
+ counter->remainder -=
+ low_res_scroll_amount * counter->resolution_multiplier;
+ }
+}
+EXPORT_SYMBOL_GPL(hid_scroll_counter_handle_scroll);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 19cc980eebce..f01280898b24 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -64,6 +64,14 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_QUIRK_NO_HIDINPUT BIT(23)
#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24)
#define HIDPP_QUIRK_UNIFYING BIT(25)
+#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
+#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
+#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
+
+/* Convenience constant to check for any high-res support. */
+#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
+ HIDPP_QUIRK_HI_RES_SCROLL_X2120 | \
+ HIDPP_QUIRK_HI_RES_SCROLL_X2121)
#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT
@@ -149,6 +157,7 @@ struct hidpp_device {
unsigned long capabilities;
struct hidpp_battery battery;
+ struct hid_scroll_counter vertical_wheel_counter;
};
/* HID++ 1.0 error codes */
@@ -400,32 +409,53 @@ static void hidpp_prefix_name(char **name, int name_length)
#define HIDPP_SET_LONG_REGISTER 0x82
#define HIDPP_GET_LONG_REGISTER 0x83
-#define HIDPP_REG_GENERAL 0x00
-
-static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
+/**
+ * hidpp10_set_register_bit() - Sets a single bit in a HID++ 1.0 register.
+ * @hidpp_dev: the device to set the register on.
+ * @register_address: the address of the register to modify.
+ * @byte: the byte of the register to modify. Should be less than 3.
+ * Return: 0 if successful, otherwise a negative error code.
+ */
+static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
+ u8 register_address, u8 byte, u8 bit)
{
struct hidpp_report response;
int ret;
u8 params[3] = { 0 };
ret = hidpp_send_rap_command_sync(hidpp_dev,
- REPORT_ID_HIDPP_SHORT,
- HIDPP_GET_REGISTER,
- HIDPP_REG_GENERAL,
- NULL, 0, &response);
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_GET_REGISTER,
+ register_address,
+ NULL, 0, &response);
if (ret)
return ret;
memcpy(params, response.rap.params, 3);
- /* Set the battery bit */
- params[0] |= BIT(4);
+ params[byte] |= BIT(bit);
return hidpp_send_rap_command_sync(hidpp_dev,
- REPORT_ID_HIDPP_SHORT,
- HIDPP_SET_REGISTER,
- HIDPP_REG_GENERAL,
- params, 3, &response);
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_SET_REGISTER,
+ register_address,
+ params, 3, &response);
+}
+
+
+#define HIDPP_REG_GENERAL 0x00
+
+static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
+{
+ return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_GENERAL, 0, 4);
+}
+
+#define HIDPP_REG_FEATURES 0x01
+
+/* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */
+static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev)
+{
+ return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_FEATURES, 0, 6);
}
#define HIDPP_REG_BATTERY_STATUS 0x07
@@ -1137,6 +1167,100 @@ static int hidpp_battery_get_property(struct power_supply *psy,
}
/* -------------------------------------------------------------------------- */
+/* 0x2120: Hi-resolution scrolling */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_HI_RESOLUTION_SCROLLING 0x2120
+
+#define CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE 0x10
+
+static int hidpp_hrs_set_highres_scrolling_mode(struct hidpp_device *hidpp,
+ bool enabled, u8 *multiplier)
+{
+ u8 feature_index;
+ u8 feature_type;
+ int ret;
+ u8 params[1];
+ struct hidpp_report response;
+
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
+ &feature_index,
+ &feature_type);
+ if (ret)
+ return ret;
+
+ params[0] = enabled ? BIT(0) : 0;
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE,
+ params, sizeof(params), &response);
+ if (ret)
+ return ret;
+ *multiplier = response.fap.params[1];
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* 0x2121: HiRes Wheel */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_HIRES_WHEEL 0x2121
+
+#define CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY 0x00
+#define CMD_HIRES_WHEEL_SET_WHEEL_MODE 0x20
+
+static int hidpp_hrw_get_wheel_capability(struct hidpp_device *hidpp,
+ u8 *multiplier)
+{
+ u8 feature_index;
+ u8 feature_type;
+ int ret;
+ struct hidpp_report response;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+ &feature_index, &feature_type);
+ if (ret)
+ goto return_default;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY,
+ NULL, 0, &response);
+ if (ret)
+ goto return_default;
+
+ *multiplier = response.fap.params[0];
+ return 0;
+return_default:
+ hid_warn(hidpp->hid_dev,
+ "Couldn't get wheel multiplier (error %d), assuming %d.\n",
+ ret, *multiplier);
+ return ret;
+}
+
+static int hidpp_hrw_set_wheel_mode(struct hidpp_device *hidpp, bool invert,
+ bool high_resolution, bool use_hidpp)
+{
+ u8 feature_index;
+ u8 feature_type;
+ int ret;
+ u8 params[1];
+ struct hidpp_report response;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+ &feature_index, &feature_type);
+ if (ret)
+ return ret;
+
+ params[0] = (invert ? BIT(2) : 0) |
+ (high_resolution ? BIT(1) : 0) |
+ (use_hidpp ? BIT(0) : 0);
+
+ return hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_HIRES_WHEEL_SET_WHEEL_MODE,
+ params, sizeof(params), &response);
+}
+
+/* -------------------------------------------------------------------------- */
/* 0x4301: Solar Keyboard */
/* -------------------------------------------------------------------------- */
@@ -2399,7 +2523,8 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
input_report_rel(mydata->input, REL_Y, v);
v = hid_snto32(data[6], 8);
- input_report_rel(mydata->input, REL_WHEEL, v);
+ hid_scroll_counter_handle_scroll(
+ &hidpp->vertical_wheel_counter, v);
input_sync(mydata->input);
}
@@ -2528,6 +2653,72 @@ static int g920_get_config(struct hidpp_device *hidpp)
}
/* -------------------------------------------------------------------------- */
+/* High-resolution scroll wheels */
+/* -------------------------------------------------------------------------- */
+
+/**
+ * struct hi_res_scroll_info - Stores info on a device's high-res scroll wheel.
+ * @product_id: the HID product ID of the device being described.
+ * @microns_per_hi_res_unit: the distance moved by the user's finger for each
+ * high-resolution unit reported by the device, in
+ * 256ths of a millimetre.
+ */
+struct hi_res_scroll_info {
+ __u32 product_id;
+ int microns_per_hi_res_unit;
+};
+
+static struct hi_res_scroll_info hi_res_scroll_devices[] = {
+ { /* Anywhere MX */
+ .product_id = 0x1017, .microns_per_hi_res_unit = 445 },
+ { /* Performance MX */
+ .product_id = 0x101a, .microns_per_hi_res_unit = 406 },
+ { /* M560 */
+ .product_id = 0x402d, .microns_per_hi_res_unit = 435 },
+ { /* MX Master 2S */
+ .product_id = 0x4069, .microns_per_hi_res_unit = 406 },
+};
+
+static int hi_res_scroll_look_up_microns(__u32 product_id)
+{
+ int i;
+ int num_devices = sizeof(hi_res_scroll_devices)
+ / sizeof(hi_res_scroll_devices[0]);
+ for (i = 0; i < num_devices; i++) {
+ if (hi_res_scroll_devices[i].product_id == product_id)
+ return hi_res_scroll_devices[i].microns_per_hi_res_unit;
+ }
+ /* We don't have a value for this device, so use a sensible default. */
+ return 406;
+}
+
+static int hi_res_scroll_enable(struct hidpp_device *hidpp)
+{
+ int ret;
+ u8 multiplier = 8;
+
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2121) {
+ ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false);
+ hidpp_hrw_get_wheel_capability(hidpp, &multiplier);
+ } else if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2120) {
+ ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true,
+ &multiplier);
+ } else /* if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) */
+ ret = hidpp10_enable_scrolling_acceleration(hidpp);
+
+ if (ret)
+ return ret;
+
+ hidpp->vertical_wheel_counter.resolution_multiplier = multiplier;
+ hidpp->vertical_wheel_counter.microns_per_hi_res_unit =
+ hi_res_scroll_look_up_microns(hidpp->hid_dev->product);
+ hid_info(hidpp->hid_dev, "multiplier = %d, microns = %d\n",
+ multiplier,
+ hidpp->vertical_wheel_counter.microns_per_hi_res_unit);
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
@@ -2572,6 +2763,11 @@ static void hidpp_populate_input(struct hidpp_device *hidpp,
wtp_populate_input(hidpp, input, origin_is_hid_core);
else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
m560_populate_input(hidpp, input, origin_is_hid_core);
+
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) {
+ input_set_capability(input, EV_REL, REL_WHEEL_HI_RES);
+ hidpp->vertical_wheel_counter.dev = input;
+ }
}
static int hidpp_input_configured(struct hid_device *hdev,
@@ -2690,6 +2886,27 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
return 0;
}
+static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ /* This function will only be called for scroll events, due to the
+ * restriction imposed in hidpp_usages.
+ */
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ struct hid_scroll_counter *counter = &hidpp->vertical_wheel_counter;
+ /* A scroll event may occur before the multiplier has been retrieved or
+ * the input device set, or high-res scroll enabling may fail. In such
+ * cases we must return early (falling back to default behaviour) to
+ * avoid a crash in hid_scroll_counter_handle_scroll.
+ */
+ if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
+ || counter->dev == NULL || counter->resolution_multiplier == 0)
+ return 0;
+
+ hid_scroll_counter_handle_scroll(counter, value);
+ return 1;
+}
+
static int hidpp_initialize_battery(struct hidpp_device *hidpp)
{
static atomic_t battery_no = ATOMIC_INIT(0);
@@ -2901,6 +3118,9 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
+ hi_res_scroll_enable(hidpp);
+
if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
/* if the input nodes are already created, we can stop now */
return;
@@ -3086,35 +3306,63 @@ static void hidpp_remove(struct hid_device *hdev)
mutex_destroy(&hidpp->send_mutex);
}
+#define LDJ_DEVICE(product) \
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
+ USB_VENDOR_ID_LOGITECH, (product))
+
static const struct hid_device_id hidpp_devices[] = {
{ /* wireless touchpad */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4011),
+ LDJ_DEVICE(0x4011),
.driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT |
HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS },
{ /* wireless touchpad T650 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4101),
+ LDJ_DEVICE(0x4101),
.driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT },
{ /* wireless touchpad T651 */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_T651),
.driver_data = HIDPP_QUIRK_CLASS_WTP },
+ { /* Mouse Logitech Anywhere MX */
+ LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ { /* Mouse Logitech Cube */
+ LDJ_DEVICE(0x4010), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
+ { /* Mouse Logitech M335 */
+ LDJ_DEVICE(0x4050), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech M515 */
+ LDJ_DEVICE(0x4007), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
{ /* Mouse logitech M560 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x402d),
- .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
+ LDJ_DEVICE(0x402d),
+ .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560
+ | HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
+ { /* Mouse Logitech M705 (firmware RQM17) */
+ LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ { /* Mouse Logitech M705 (firmware RQM67) */
+ LDJ_DEVICE(0x406d), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech M720 */
+ LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Anywhere 2 */
+ LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Anywhere 2S */
+ LDJ_DEVICE(0x406a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Master */
+ LDJ_DEVICE(0x4041), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0x4060), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Master 2S */
+ LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech Performance MX */
+ LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
{ /* Keyboard logitech K400 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4024),
+ LDJ_DEVICE(0x4024),
.driver_data = HIDPP_QUIRK_CLASS_K400 },
{ /* Solar Keyboard Logitech K750 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4002),
+ LDJ_DEVICE(0x4002),
.driver_data = HIDPP_QUIRK_CLASS_K750 },
- { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
+ { LDJ_DEVICE(HID_ANY_ID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
@@ -3123,12 +3371,19 @@ static const struct hid_device_id hidpp_devices[] = {
MODULE_DEVICE_TABLE(hid, hidpp_devices);
+static const struct hid_usage_id hidpp_usages[] = {
+ { HID_GD_WHEEL, EV_REL, REL_WHEEL },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
static struct hid_driver hidpp_driver = {
.name = "logitech-hidpp-device",
.id_table = hidpp_devices,
.probe = hidpp_probe,
.remove = hidpp_remove,
.raw_event = hidpp_raw_event,
+ .usage_table = hidpp_usages,
+ .event = hidpp_event,
.input_configured = hidpp_input_configured,
.input_mapping = hidpp_input_mapping,
.input_mapped = hidpp_input_mapped,
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index b454c4386157..1d5ea678d268 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -54,6 +54,8 @@ module_param(report_undeciphered, bool, 0644);
MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
#define TRACKPAD_REPORT_ID 0x28
+#define TRACKPAD2_USB_REPORT_ID 0x02
+#define TRACKPAD2_BT_REPORT_ID 0x31
#define MOUSE_REPORT_ID 0x29
#define DOUBLE_REPORT_ID 0xf7
/* These definitions are not precise, but they're close enough. (Bits
@@ -91,6 +93,17 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define TRACKPAD_RES_Y \
((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
+#define TRACKPAD2_DIMENSION_X (float)16000
+#define TRACKPAD2_MIN_X -3678
+#define TRACKPAD2_MAX_X 3934
+#define TRACKPAD2_RES_X \
+ ((TRACKPAD2_MAX_X - TRACKPAD2_MIN_X) / (TRACKPAD2_DIMENSION_X / 100))
+#define TRACKPAD2_DIMENSION_Y (float)11490
+#define TRACKPAD2_MIN_Y -2478
+#define TRACKPAD2_MAX_Y 2587
+#define TRACKPAD2_RES_Y \
+ ((TRACKPAD2_MAX_Y - TRACKPAD2_MIN_Y) / (TRACKPAD2_DIMENSION_Y / 100))
+
/**
* struct magicmouse_sc - Tracks Magic Mouse-specific data.
* @input: Input device through which we report events.
@@ -183,6 +196,7 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
{
struct input_dev *input = msc->input;
int id, x, y, size, orientation, touch_major, touch_minor, state, down;
+ int pressure = 0;
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf;
@@ -194,6 +208,17 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
touch_minor = tdata[4];
state = tdata[7] & TOUCH_STATE_MASK;
down = state != TOUCH_STATE_NONE;
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ id = tdata[8] & 0xf;
+ x = (tdata[1] << 27 | tdata[0] << 19) >> 19;
+ y = -((tdata[3] << 30 | tdata[2] << 22 | tdata[1] << 14) >> 19);
+ size = tdata[6];
+ orientation = (tdata[8] >> 5) - 4;
+ touch_major = tdata[4];
+ touch_minor = tdata[5];
+ pressure = tdata[7];
+ state = tdata[3] & 0xC0;
+ down = state == 0x80;
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
id = (tdata[7] << 2 | tdata[6] >> 6) & 0xf;
x = (tdata[1] << 27 | tdata[0] << 19) >> 19;
@@ -215,7 +240,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
/* If requested, emulate a scroll wheel by detecting small
* vertical touch motions.
*/
- if (emulate_scroll_wheel) {
+ if (emulate_scroll_wheel && (input->id.product !=
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)) {
unsigned long now = jiffies;
int step_x = msc->touches[id].scroll_x - x;
int step_y = msc->touches[id].scroll_y - y;
@@ -269,10 +295,14 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+
if (report_undeciphered) {
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
input_event(input, EV_MSC, MSC_RAW, tdata[7]);
- else /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ else if (input->id.product !=
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
input_event(input, EV_MSC, MSC_RAW, tdata[8]);
}
}
@@ -287,6 +317,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
switch (data[0]) {
case TRACKPAD_REPORT_ID:
+ case TRACKPAD2_BT_REPORT_ID:
/* Expect four bytes of prefix, and N*9 bytes of touch data. */
if (size < 4 || ((size - 4) % 9) != 0)
return 0;
@@ -308,6 +339,22 @@ static int magicmouse_raw_event(struct hid_device *hdev,
* ts = data[1] >> 6 | data[2] << 2 | data[3] << 10;
*/
break;
+ case TRACKPAD2_USB_REPORT_ID:
+ /* Expect twelve bytes of prefix and N*9 bytes of touch data. */
+ if (size < 12 || ((size - 12) % 9) != 0)
+ return 0;
+ npoints = (size - 12) / 9;
+ if (npoints > 15) {
+ hid_warn(hdev, "invalid size value (%d) for TRACKPAD2_USB_REPORT_ID\n",
+ size);
+ return 0;
+ }
+ msc->ntouches = 0;
+ for (ii = 0; ii < npoints; ii++)
+ magicmouse_emit_touch(msc, ii, data + ii * 9 + 12);
+
+ clicks = data[1];
+ break;
case MOUSE_REPORT_ID:
/* Expect six bytes of prefix, and N*8 bytes of touch data. */
if (size < 6 || ((size - 6) % 8) != 0)
@@ -352,6 +399,9 @@ static int magicmouse_raw_event(struct hid_device *hdev,
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ input_mt_sync_frame(input);
+ input_report_key(input, BTN_MOUSE, clicks & 1);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_report_key(input, BTN_MOUSE, clicks & 1);
input_mt_report_pointer_emulation(input, true);
@@ -364,6 +414,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
{
int error;
+ int mt_flags = 0;
__set_bit(EV_KEY, input->evbit);
@@ -380,6 +431,22 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(REL_WHEEL, input->relbit);
__set_bit(REL_HWHEEL, input->relbit);
}
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ /* setting the device name to ensure the same driver settings
+ * get loaded, whether connected through bluetooth or USB
+ */
+ input->name = "Apple Inc. Magic Trackpad 2";
+
+ __clear_bit(EV_MSC, input->evbit);
+ __clear_bit(BTN_0, input->keybit);
+ __clear_bit(BTN_RIGHT, input->keybit);
+ __clear_bit(BTN_MIDDLE, input->keybit);
+ __set_bit(BTN_MOUSE, input->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ __set_bit(BTN_TOOL_FINGER, input->keybit);
+
+ mt_flags = INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED |
+ INPUT_MT_TRACK;
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
/* input->keybit is initialized with incorrect button info
* for Magic Trackpad. There really is only one physical
@@ -402,14 +469,13 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(EV_ABS, input->evbit);
- error = input_mt_init_slots(input, 16, 0);
+ error = input_mt_init_slots(input, 16, mt_flags);
if (error)
return error;
input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
4, 0);
input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
4, 0);
- input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
/* Note: Touch Y position from the device is inverted relative
* to how pointer motion is reported (and relative to how USB
@@ -418,6 +484,7 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
* inverse of the reported Y.
*/
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_MT_POSITION_X,
MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y,
@@ -427,7 +494,25 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
MOUSE_RES_X);
input_abs_set_res(input, ABS_MT_POSITION_Y,
MOUSE_RES_Y);
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 253, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 253, 0, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -3, 4, 0, 0);
+ input_set_abs_params(input, ABS_X, TRACKPAD2_MIN_X,
+ TRACKPAD2_MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_Y, TRACKPAD2_MIN_Y,
+ TRACKPAD2_MAX_Y, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ TRACKPAD2_MIN_X, TRACKPAD2_MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ TRACKPAD2_MIN_Y, TRACKPAD2_MAX_Y, 0, 0);
+
+ input_abs_set_res(input, ABS_X, TRACKPAD2_RES_X);
+ input_abs_set_res(input, ABS_Y, TRACKPAD2_RES_Y);
+ input_abs_set_res(input, ABS_MT_POSITION_X, TRACKPAD2_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, TRACKPAD2_RES_Y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
TRACKPAD_MAX_X, 4, 0);
input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
@@ -447,7 +532,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
input_set_events_per_packet(input, 60);
- if (report_undeciphered) {
+ if (report_undeciphered &&
+ input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
__set_bit(EV_MSC, input->evbit);
__set_bit(MSC_RAW, input->mscbit);
}
@@ -465,7 +551,8 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
msc->input = hi->input;
/* Magic Trackpad does not give relative data after switching to MT */
- if (hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD &&
+ if ((hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD ||
+ hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) &&
field->flags & HID_MAIN_ITEM_RELATIVE)
return -1;
@@ -494,11 +581,20 @@ static int magicmouse_input_configured(struct hid_device *hdev,
static int magicmouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- const u8 feature[] = { 0xd7, 0x01 };
+ const u8 *feature;
+ const u8 feature_mt[] = { 0xD7, 0x01 };
+ const u8 feature_mt_trackpad2_usb[] = { 0x02, 0x01 };
+ const u8 feature_mt_trackpad2_bt[] = { 0xF1, 0x02, 0x01 };
u8 *buf;
struct magicmouse_sc *msc;
struct hid_report *report;
int ret;
+ int feature_size;
+
+ if (id->vendor == USB_VENDOR_ID_APPLE &&
+ id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ hdev->type != HID_TYPE_USBMOUSE)
+ return 0;
msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
if (msc == NULL) {
@@ -532,7 +628,14 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
MOUSE_REPORT_ID, 0);
- else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ if (id->vendor == BT_VENDOR_ID_APPLE)
+ report = hid_register_report(hdev, HID_INPUT_REPORT,
+ TRACKPAD2_BT_REPORT_ID, 0);
+ else /* USB_VENDOR_ID_APPLE */
+ report = hid_register_report(hdev, HID_INPUT_REPORT,
+ TRACKPAD2_USB_REPORT_ID, 0);
+ } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD_REPORT_ID, 0);
report = hid_register_report(hdev, HID_INPUT_REPORT,
@@ -546,7 +649,20 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
- buf = kmemdup(feature, sizeof(feature), GFP_KERNEL);
+ if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ if (id->vendor == BT_VENDOR_ID_APPLE) {
+ feature_size = sizeof(feature_mt_trackpad2_bt);
+ feature = feature_mt_trackpad2_bt;
+ } else { /* USB_VENDOR_ID_APPLE */
+ feature_size = sizeof(feature_mt_trackpad2_usb);
+ feature = feature_mt_trackpad2_usb;
+ }
+ } else {
+ feature_size = sizeof(feature_mt);
+ feature = feature_mt;
+ }
+
+ buf = kmemdup(feature, feature_size, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_stop_hw;
@@ -560,10 +676,10 @@ static int magicmouse_probe(struct hid_device *hdev,
* but there seems to be no other way of switching the mode.
* Thus the super-ugly hacky success check below.
*/
- ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature),
+ ret = hid_hw_raw_request(hdev, buf[0], buf, feature_size,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
kfree(buf);
- if (ret != -EIO && ret != sizeof(feature)) {
+ if (ret != -EIO && ret != feature_size) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
@@ -579,6 +695,10 @@ static const struct hid_device_id magic_mice[] = {
USB_DEVICE_ID_APPLE_MAGICMOUSE), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
{ }
};
MODULE_DEVICE_TABLE(hid, magic_mice);
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 72d983626afd..330cb073cb66 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -29,11 +29,41 @@
#define MS_NOGET BIT(4)
#define MS_DUPLICATE_USAGES BIT(5)
#define MS_SURFACE_DIAL BIT(6)
+#define MS_QUIRK_FF BIT(7)
+
+struct ms_data {
+ unsigned long quirks;
+ struct hid_device *hdev;
+ struct work_struct ff_worker;
+ __u8 strong;
+ __u8 weak;
+ void *output_report_dmabuf;
+};
+
+#define XB1S_FF_REPORT 3
+#define ENABLE_WEAK BIT(0)
+#define ENABLE_STRONG BIT(1)
+
+enum {
+ MAGNITUDE_STRONG = 2,
+ MAGNITUDE_WEAK,
+ MAGNITUDE_NUM
+};
+
+struct xb1s_ff_report {
+ __u8 report_id;
+ __u8 enable;
+ __u8 magnitude[MAGNITUDE_NUM];
+ __u8 duration_10ms;
+ __u8 start_delay_10ms;
+ __u8 loop_count;
+} __packed;
static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct ms_data *ms = hid_get_drvdata(hdev);
+ unsigned long quirks = ms->quirks;
/*
* Microsoft Wireless Desktop Receiver (Model 1028) has
@@ -159,7 +189,8 @@ static int ms_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct ms_data *ms = hid_get_drvdata(hdev);
+ unsigned long quirks = ms->quirks;
if (quirks & MS_ERGONOMY) {
int ret = ms_ergonomy_kb_quirk(hi, usage, bit, max);
@@ -185,7 +216,8 @@ static int ms_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct ms_data *ms = hid_get_drvdata(hdev);
+ unsigned long quirks = ms->quirks;
if (quirks & MS_DUPLICATE_USAGES)
clear_bit(usage->code, *bit);
@@ -196,7 +228,8 @@ static int ms_input_mapped(struct hid_device *hdev, struct hid_input *hi,
static int ms_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct ms_data *ms = hid_get_drvdata(hdev);
+ unsigned long quirks = ms->quirks;
struct input_dev *input;
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
@@ -251,12 +284,97 @@ static int ms_event(struct hid_device *hdev, struct hid_field *field,
return 0;
}
+static void ms_ff_worker(struct work_struct *work)
+{
+ struct ms_data *ms = container_of(work, struct ms_data, ff_worker);
+ struct hid_device *hdev = ms->hdev;
+ struct xb1s_ff_report *r = ms->output_report_dmabuf;
+ int ret;
+
+ memset(r, 0, sizeof(*r));
+
+ r->report_id = XB1S_FF_REPORT;
+ r->enable = ENABLE_WEAK | ENABLE_STRONG;
+ /*
+ * Specifying maximum duration and maximum loop count should
+ * cover maximum duration of a single effect, which is 65536
+ * ms
+ */
+ r->duration_10ms = U8_MAX;
+ r->loop_count = U8_MAX;
+ r->magnitude[MAGNITUDE_STRONG] = ms->strong; /* left actuator */
+ r->magnitude[MAGNITUDE_WEAK] = ms->weak; /* right actuator */
+
+ ret = hid_hw_output_report(hdev, (__u8 *)r, sizeof(*r));
+ if (ret)
+ hid_warn(hdev, "failed to send FF report\n");
+}
+
+static int ms_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct ms_data *ms = hid_get_drvdata(hid);
+
+ if (effect->type != FF_RUMBLE)
+ return 0;
+
+ /*
+ * Magnitude is 0..100 so scale the 16-bit input here
+ */
+ ms->strong = ((u32) effect->u.rumble.strong_magnitude * 100) / U16_MAX;
+ ms->weak = ((u32) effect->u.rumble.weak_magnitude * 100) / U16_MAX;
+
+ schedule_work(&ms->ff_worker);
+ return 0;
+}
+
+static int ms_init_ff(struct hid_device *hdev)
+{
+ struct hid_input *hidinput = list_entry(hdev->inputs.next,
+ struct hid_input, list);
+ struct input_dev *input_dev = hidinput->input;
+ struct ms_data *ms = hid_get_drvdata(hdev);
+
+ if (!(ms->quirks & MS_QUIRK_FF))
+ return 0;
+
+ ms->hdev = hdev;
+ INIT_WORK(&ms->ff_worker, ms_ff_worker);
+
+ ms->output_report_dmabuf = devm_kzalloc(&hdev->dev,
+ sizeof(struct xb1s_ff_report),
+ GFP_KERNEL);
+ if (ms->output_report_dmabuf == NULL)
+ return -ENOMEM;
+
+ input_set_capability(input_dev, EV_FF, FF_RUMBLE);
+ return input_ff_create_memless(input_dev, NULL, ms_play_effect);
+}
+
+static void ms_remove_ff(struct hid_device *hdev)
+{
+ struct ms_data *ms = hid_get_drvdata(hdev);
+
+ if (!(ms->quirks & MS_QUIRK_FF))
+ return;
+
+ cancel_work_sync(&ms->ff_worker);
+}
+
static int ms_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
unsigned long quirks = id->driver_data;
+ struct ms_data *ms;
int ret;
- hid_set_drvdata(hdev, (void *)quirks);
+ ms = devm_kzalloc(&hdev->dev, sizeof(*ms), GFP_KERNEL);
+ if (ms == NULL)
+ return -ENOMEM;
+
+ ms->quirks = quirks;
+
+ hid_set_drvdata(hdev, ms);
if (quirks & MS_NOGET)
hdev->quirks |= HID_QUIRK_NOGET;
@@ -277,11 +395,21 @@ static int ms_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_free;
}
+ ret = ms_init_ff(hdev);
+ if (ret)
+ hid_err(hdev, "could not initialize ff, continuing anyway");
+
return 0;
err_free:
return ret;
}
+static void ms_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ ms_remove_ff(hdev);
+}
+
static const struct hid_device_id ms_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV),
.driver_data = MS_HIDINPUT },
@@ -318,6 +446,8 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_PRESENTER },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, 0x091B),
.driver_data = MS_SURFACE_DIAL },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER),
+ .driver_data = MS_QUIRK_FF },
{ }
};
MODULE_DEVICE_TABLE(hid, ms_devices);
@@ -330,6 +460,7 @@ static struct hid_driver ms_driver = {
.input_mapped = ms_input_mapped,
.event = ms_event,
.probe = ms_probe,
+ .remove = ms_remove,
};
module_hid_driver(ms_driver);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index da954f3f4da7..f7c6de2b6730 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1319,6 +1319,13 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return mt_touch_input_mapping(hdev, hi, field, usage, bit, max,
application);
+ /*
+ * some egalax touchscreens have "application == DG_TOUCHSCREEN"
+ * for the stylus. Overwrite the hid_input application
+ */
+ if (field->physical == HID_DG_STYLUS)
+ hi->application = HID_DG_STYLUS;
+
/* let hid-core decide for the others */
return 0;
}
@@ -1507,14 +1514,12 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
struct mt_device *td = hid_get_drvdata(hdev);
char *name;
const char *suffix = NULL;
- unsigned int application = 0;
struct mt_report_data *rdata;
struct mt_application *mt_application = NULL;
struct hid_report *report;
int ret;
list_for_each_entry(report, &hi->reports, hidinput_list) {
- application = report->application;
rdata = mt_find_report_data(td, report);
if (!rdata) {
hid_err(hdev, "failed to allocate data for report\n");
@@ -1529,46 +1534,33 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (ret)
return ret;
}
-
- /*
- * some egalax touchscreens have "application == DG_TOUCHSCREEN"
- * for the stylus. Check this first, and then rely on
- * the application field.
- */
- if (report->field[0]->physical == HID_DG_STYLUS) {
- suffix = "Pen";
- /* force BTN_STYLUS to allow tablet matching in udev */
- __set_bit(BTN_STYLUS, hi->input->keybit);
- }
}
- if (!suffix) {
- switch (application) {
- case HID_GD_KEYBOARD:
- case HID_GD_KEYPAD:
- case HID_GD_MOUSE:
- case HID_DG_TOUCHPAD:
- case HID_GD_SYSTEM_CONTROL:
- case HID_CP_CONSUMER_CONTROL:
- case HID_GD_WIRELESS_RADIO_CTLS:
- case HID_GD_SYSTEM_MULTIAXIS:
- /* already handled by hid core */
- break;
- case HID_DG_TOUCHSCREEN:
- /* we do not set suffix = "Touchscreen" */
- hi->input->name = hdev->name;
- break;
- case HID_DG_STYLUS:
- /* force BTN_STYLUS to allow tablet matching in udev */
- __set_bit(BTN_STYLUS, hi->input->keybit);
- break;
- case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
- suffix = "Custom Media Keys";
- break;
- default:
- suffix = "UNKNOWN";
- break;
- }
+ switch (hi->application) {
+ case HID_GD_KEYBOARD:
+ case HID_GD_KEYPAD:
+ case HID_GD_MOUSE:
+ case HID_DG_TOUCHPAD:
+ case HID_GD_SYSTEM_CONTROL:
+ case HID_CP_CONSUMER_CONTROL:
+ case HID_GD_WIRELESS_RADIO_CTLS:
+ case HID_GD_SYSTEM_MULTIAXIS:
+ /* already handled by hid core */
+ break;
+ case HID_DG_TOUCHSCREEN:
+ /* we do not set suffix = "Touchscreen" */
+ hi->input->name = hdev->name;
+ break;
+ case HID_DG_STYLUS:
+ /* force BTN_STYLUS to allow tablet matching in udev */
+ __set_bit(BTN_STYLUS, hi->input->keybit);
+ break;
+ case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
+ suffix = "Custom Media Keys";
+ break;
+ default:
+ suffix = "UNKNOWN";
+ break;
}
if (suffix) {
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 249d49b6b16c..52c3b01917e7 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -70,6 +70,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
index 832d8f9aaba2..099e1ce2f234 100644
--- a/drivers/hid/i2c-hid/Makefile
+++ b/drivers/hid/i2c-hid/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_I2C_HID) += i2c-hid.o
+
+i2c-hid-objs = i2c-hid-core.o
+i2c-hid-$(CONFIG_DMI) += i2c-hid-dmi-quirks.o
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index f3076659361a..4aab96cf0818 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -43,11 +43,12 @@
#include <linux/platform_data/i2c-hid.h>
#include "../hid-ids.h"
+#include "i2c-hid.h"
/* quirks to control the device */
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
-#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2)
+#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
/* flags */
#define I2C_HID_STARTED 0
@@ -169,9 +170,8 @@ static const struct i2c_hid_quirks {
{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
- I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
- I2C_HID_QUIRK_RESEND_REPORT_DESCR },
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
+ I2C_HID_QUIRK_NO_RUNTIME_PM },
{ 0, 0 }
};
@@ -669,6 +669,7 @@ static int i2c_hid_parse(struct hid_device *hid)
char *rdesc;
int ret;
int tries = 3;
+ char *use_override;
i2c_hid_dbg(ihid, "entering %s\n", __func__);
@@ -687,26 +688,37 @@ static int i2c_hid_parse(struct hid_device *hid)
if (ret)
return ret;
- rdesc = kzalloc(rsize, GFP_KERNEL);
+ use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
+ &rsize);
- if (!rdesc) {
- dbg_hid("couldn't allocate rdesc memory\n");
- return -ENOMEM;
- }
-
- i2c_hid_dbg(ihid, "asking HID report descriptor\n");
-
- ret = i2c_hid_command(client, &hid_report_descr_cmd, rdesc, rsize);
- if (ret) {
- hid_err(hid, "reading report descriptor failed\n");
- kfree(rdesc);
- return -EIO;
+ if (use_override) {
+ rdesc = use_override;
+ i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
+ } else {
+ rdesc = kzalloc(rsize, GFP_KERNEL);
+
+ if (!rdesc) {
+ dbg_hid("couldn't allocate rdesc memory\n");
+ return -ENOMEM;
+ }
+
+ i2c_hid_dbg(ihid, "asking HID report descriptor\n");
+
+ ret = i2c_hid_command(client, &hid_report_descr_cmd,
+ rdesc, rsize);
+ if (ret) {
+ hid_err(hid, "reading report descriptor failed\n");
+ kfree(rdesc);
+ return -EIO;
+ }
}
i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
ret = hid_parse_report(hid, rdesc, rsize);
- kfree(rdesc);
+ if (!use_override)
+ kfree(rdesc);
+
if (ret) {
dbg_hid("parsing report descriptor failed\n");
return ret;
@@ -833,12 +845,19 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
int ret;
/* i2c hid fetch using a fixed descriptor size (30 bytes) */
- i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
- ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer,
- sizeof(struct i2c_hid_desc));
- if (ret) {
- dev_err(&client->dev, "hid_descr_cmd failed\n");
- return -ENODEV;
+ if (i2c_hid_get_dmi_i2c_hid_desc_override(client->name)) {
+ i2c_hid_dbg(ihid, "Using a HID descriptor override\n");
+ ihid->hdesc =
+ *i2c_hid_get_dmi_i2c_hid_desc_override(client->name);
+ } else {
+ i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
+ ret = i2c_hid_command(client, &hid_descr_cmd,
+ ihid->hdesc_buffer,
+ sizeof(struct i2c_hid_desc));
+ if (ret) {
+ dev_err(&client->dev, "hid_descr_cmd failed\n");
+ return -ENODEV;
+ }
}
/* Validate the length of HID descriptor, the 4 first bytes:
@@ -1105,7 +1124,9 @@ static int i2c_hid_probe(struct i2c_client *client,
goto err_mem_free;
}
- pm_runtime_put(&client->dev);
+ if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+ pm_runtime_put(&client->dev);
+
return 0;
err_mem_free:
@@ -1130,7 +1151,8 @@ static int i2c_hid_remove(struct i2c_client *client)
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid;
- pm_runtime_get_sync(&client->dev);
+ if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+ pm_runtime_get_sync(&client->dev);
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
pm_runtime_put_noidle(&client->dev);
@@ -1236,22 +1258,13 @@ static int i2c_hid_resume(struct device *dev)
/* Instead of resetting device, simply powers the device on. This
* solves "incomplete reports" on Raydium devices 2386:3118 and
- * 2386:4B33
+ * 2386:4B33 and fixes various SIS touchscreens no longer sending
+ * data after a suspend/resume.
*/
ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
if (ret)
return ret;
- /* Some devices need to re-send report descr cmd
- * after resume, after this it will be back normal.
- * otherwise it issues too many incomplete reports.
- */
- if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
- ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
- if (ret)
- return ret;
- }
-
if (hid->driver && hid->driver->reset_resume) {
ret = hid->driver->reset_resume(hid);
return ret;
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
new file mode 100644
index 000000000000..1d645c9ab417
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Quirks for I2C-HID devices that do not supply proper descriptors
+ *
+ * Copyright (c) 2018 Julian Sax <jsbc@gmx.de>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/dmi.h>
+#include <linux/mod_devicetable.h>
+
+#include "i2c-hid.h"
+
+
+struct i2c_hid_desc_override {
+ union {
+ struct i2c_hid_desc *i2c_hid_desc;
+ uint8_t *i2c_hid_desc_buffer;
+ };
+ uint8_t *hid_report_desc;
+ unsigned int hid_report_desc_size;
+ uint8_t *i2c_name;
+};
+
+
+/*
+ * descriptors for the SIPODEV SP1064 touchpad
+ *
+ * This device does not supply any descriptors and on windows a filter
+ * driver operates between the i2c-hid layer and the device and injects
+ * these descriptors when the device is prompted. The descriptors were
+ * extracted by listening to the i2c-hid traffic that occurs between the
+ * windows filter driver and the windows i2c-hid driver.
+ */
+
+static const struct i2c_hid_desc_override sipodev_desc = {
+ .i2c_hid_desc_buffer = (uint8_t [])
+ {0x1e, 0x00, /* Length of descriptor */
+ 0x00, 0x01, /* Version of descriptor */
+ 0xdb, 0x01, /* Length of report descriptor */
+ 0x21, 0x00, /* Location of report descriptor */
+ 0x24, 0x00, /* Location of input report */
+ 0x1b, 0x00, /* Max input report length */
+ 0x25, 0x00, /* Location of output report */
+ 0x11, 0x00, /* Max output report length */
+ 0x22, 0x00, /* Location of command register */
+ 0x23, 0x00, /* Location of data register */
+ 0x11, 0x09, /* Vendor ID */
+ 0x88, 0x52, /* Product ID */
+ 0x06, 0x00, /* Version ID */
+ 0x00, 0x00, 0x00, 0x00 /* Reserved */
+ },
+
+ .hid_report_desc = (uint8_t [])
+ {0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x02, /* Usage (Mouse), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x01, /* Report ID (1), */
+ 0x09, 0x01, /* Usage (Pointer), */
+ 0xA1, 0x00, /* Collection (Physical), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x02, /* Usage Maximum (02h), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x15, 0x81, /* Logical Minimum (-127), */
+ 0x25, 0x7F, /* Logical Maximum (127), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0xC0, /* End Collection, */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x05, /* Usage (Touchpad), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x04, /* Report ID (4), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x55, 0x0E, /* Unit Exponent (14), */
+ 0x65, 0x11, /* Unit (Centimeter), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x55, 0x0C, /* Unit Exponent (12), */
+ 0x66, 0x01, 0x10, /* Unit (Seconds), */
+ 0x47, 0xFF, 0xFF, 0x00, 0x00,/* Physical Maximum (65535), */
+ 0x27, 0xFF, 0xFF, 0x00, 0x00,/* Logical Maximum (65535), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x09, 0x56, /* Usage (Scan Time), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x54, /* Usage (Contact Count), */
+ 0x25, 0x7F, /* Logical Maximum (127), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x09, 0x01, /* Usage (01h), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x07, /* Report Count (7), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x85, 0x02, /* Report ID (2), */
+ 0x09, 0x55, /* Usage (Contact Count Maximum), */
+ 0x09, 0x59, /* Usage (59h), */
+ 0x75, 0x04, /* Report Size (4), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x25, 0x0F, /* Logical Maximum (15), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x85, 0x07, /* Report ID (7), */
+ 0x09, 0x60, /* Usage (60h), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x95, 0x07, /* Report Count (7), */
+ 0xB1, 0x03, /* Feature (Constant, Variable), */
+ 0x85, 0x06, /* Report ID (6), */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0xC5, /* Usage (C5h), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x96, 0x00, 0x01, /* Report Count (256), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0x01, /* Usage (01h), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x0D, /* Report ID (13), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x02, /* Usage Maximum (02h), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x0E, /* Usage (Configuration), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x03, /* Report ID (3), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x09, 0x52, /* Usage (Device Mode), */
+ 0x25, 0x0A, /* Logical Maximum (10), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x00, /* Collection (Physical), */
+ 0x85, 0x05, /* Report ID (5), */
+ 0x09, 0x57, /* Usage (57h), */
+ 0x09, 0x58, /* Usage (58h), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0xB1, 0x03, /* Feature (Constant, Variable),*/
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+ },
+ .hid_report_desc_size = 475,
+ .i2c_name = "SYNA3602:00"
+};
+
+
+static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
+ {
+ .ident = "Teclast F6 Pro",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F6 Pro"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Teclast F7",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F7"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Trekstor Primebook C13",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C13"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Trekstor Primebook C11",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Direkt-Tek DTLAPY116-2",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY116-2"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Mediacom Flexbook Edge 11",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ }
+};
+
+
+struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+{
+ struct i2c_hid_desc_override *override;
+ const struct dmi_system_id *system_id;
+
+ system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
+ if (!system_id)
+ return NULL;
+
+ override = system_id->driver_data;
+ if (strcmp(override->i2c_name, i2c_name))
+ return NULL;
+
+ return override->i2c_hid_desc;
+}
+
+char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ unsigned int *size)
+{
+ struct i2c_hid_desc_override *override;
+ const struct dmi_system_id *system_id;
+
+ system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
+ if (!system_id)
+ return NULL;
+
+ override = system_id->driver_data;
+ if (strcmp(override->i2c_name, i2c_name))
+ return NULL;
+
+ *size = override->hid_report_desc_size;
+ return override->hid_report_desc;
+}
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
new file mode 100644
index 000000000000..a8c19aef5824
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef I2C_HID_H
+#define I2C_HID_H
+
+
+#ifdef CONFIG_DMI
+struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
+char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ unsigned int *size);
+#else
+static inline struct i2c_hid_desc
+ *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+{ return NULL; }
+static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ unsigned int *size)
+{ return NULL; }
+#endif
+
+#endif
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index da133716bed0..08a8327dfd22 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -29,6 +29,7 @@
#define CNL_Ax_DEVICE_ID 0x9DFC
#define GLK_Ax_DEVICE_ID 0x31A2
#define CNL_H_DEVICE_ID 0xA37C
+#define ICL_MOBILE_DEVICE_ID 0x34FC
#define SPT_H_DEVICE_ID 0xA135
#define REVISION_ID_CHT_A0 0x6
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index bfbca7ec54ce..742191bb24c6 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -280,14 +280,14 @@ static int write_ipc_from_queue(struct ishtp_device *dev)
* if tx send list is empty - return 0;
* may happen, as RX_COMPLETE handler doesn't check list emptiness.
*/
- if (list_empty(&dev->wr_processing_list_head.link)) {
+ if (list_empty(&dev->wr_processing_list)) {
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
out_ipc_locked = 0;
return 0;
}
- ipc_link = list_entry(dev->wr_processing_list_head.link.next,
- struct wr_msg_ctl_info, link);
+ ipc_link = list_first_entry(&dev->wr_processing_list,
+ struct wr_msg_ctl_info, link);
/* first 4 bytes of the data is the doorbell value (IPC header) */
length = ipc_link->length - sizeof(uint32_t);
doorbell_val = *(uint32_t *)ipc_link->inline_data;
@@ -338,7 +338,7 @@ static int write_ipc_from_queue(struct ishtp_device *dev)
ipc_send_compl = ipc_link->ipc_send_compl;
ipc_send_compl_prm = ipc_link->ipc_send_compl_prm;
list_del_init(&ipc_link->link);
- list_add_tail(&ipc_link->link, &dev->wr_free_list_head.link);
+ list_add(&ipc_link->link, &dev->wr_free_list);
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
/*
@@ -372,18 +372,18 @@ static int write_ipc_to_queue(struct ishtp_device *dev,
unsigned char *msg, int length)
{
struct wr_msg_ctl_info *ipc_link;
- unsigned long flags;
+ unsigned long flags;
if (length > IPC_FULL_MSG_SIZE)
return -EMSGSIZE;
spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
- if (list_empty(&dev->wr_free_list_head.link)) {
+ if (list_empty(&dev->wr_free_list)) {
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
return -ENOMEM;
}
- ipc_link = list_entry(dev->wr_free_list_head.link.next,
- struct wr_msg_ctl_info, link);
+ ipc_link = list_first_entry(&dev->wr_free_list,
+ struct wr_msg_ctl_info, link);
list_del_init(&ipc_link->link);
ipc_link->ipc_send_compl = ipc_send_compl;
@@ -391,7 +391,7 @@ static int write_ipc_to_queue(struct ishtp_device *dev,
ipc_link->length = length;
memcpy(ipc_link->inline_data, msg, length);
- list_add_tail(&ipc_link->link, &dev->wr_processing_list_head.link);
+ list_add_tail(&ipc_link->link, &dev->wr_processing_list);
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
write_ipc_from_queue(dev);
@@ -487,17 +487,13 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
{
uint32_t reset_id;
unsigned long flags;
- struct wr_msg_ctl_info *processing, *next;
/* Read reset ID */
reset_id = ish_reg_read(dev, IPC_REG_ISH2HOST_MSG) & 0xFFFF;
/* Clear IPC output queue */
spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
- list_for_each_entry_safe(processing, next,
- &dev->wr_processing_list_head.link, link) {
- list_move_tail(&processing->link, &dev->wr_free_list_head.link);
- }
+ list_splice_init(&dev->wr_processing_list, &dev->wr_free_list);
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
/* ISHTP notification in IPC_RESET */
@@ -921,9 +917,9 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
spin_lock_init(&dev->out_ipc_spinlock);
/* Init IPC processing and free lists */
- INIT_LIST_HEAD(&dev->wr_processing_list_head.link);
- INIT_LIST_HEAD(&dev->wr_free_list_head.link);
- for (i = 0; i < IPC_TX_FIFO_SIZE; ++i) {
+ INIT_LIST_HEAD(&dev->wr_processing_list);
+ INIT_LIST_HEAD(&dev->wr_free_list);
+ for (i = 0; i < IPC_TX_FIFO_SIZE; i++) {
struct wr_msg_ctl_info *tx_buf;
tx_buf = devm_kzalloc(&pdev->dev,
@@ -939,7 +935,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
i);
break;
}
- list_add_tail(&tx_buf->link, &dev->wr_free_list_head.link);
+ list_add_tail(&tx_buf->link, &dev->wr_free_list);
}
dev->ops = &ish_hw_ops;
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index a1125a5c7965..8793cc49f855 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
{0, }
};
@@ -114,18 +115,19 @@ static const struct pci_device_id ish_invalid_pci_ids[] = {
*/
static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ishtp_device *dev;
+ int ret;
struct ish_hw *hw;
- int ret;
+ struct ishtp_device *ishtp;
+ struct device *dev = &pdev->dev;
/* Check for invalid platforms for ISH support */
if (pci_dev_present(ish_invalid_pci_ids))
return -ENODEV;
/* enable pci dev */
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
- dev_err(&pdev->dev, "ISH: Failed to enable PCI device\n");
+ dev_err(dev, "ISH: Failed to enable PCI device\n");
return ret;
}
@@ -133,65 +135,44 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* pci request regions for ISH driver */
- ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ ret = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
if (ret) {
- dev_err(&pdev->dev, "ISH: Failed to get PCI regions\n");
- goto disable_device;
+ dev_err(dev, "ISH: Failed to get PCI regions\n");
+ return ret;
}
/* allocates and initializes the ISH dev structure */
- dev = ish_dev_init(pdev);
- if (!dev) {
+ ishtp = ish_dev_init(pdev);
+ if (!ishtp) {
ret = -ENOMEM;
- goto release_regions;
+ return ret;
}
- hw = to_ish_hw(dev);
- dev->print_log = ish_event_tracer;
+ hw = to_ish_hw(ishtp);
+ ishtp->print_log = ish_event_tracer;
/* mapping IO device memory */
- hw->mem_addr = pci_iomap(pdev, 0, 0);
- if (!hw->mem_addr) {
- dev_err(&pdev->dev, "ISH: mapping I/O range failure\n");
- ret = -ENOMEM;
- goto free_device;
- }
-
- dev->pdev = pdev;
-
+ hw->mem_addr = pcim_iomap_table(pdev)[0];
+ ishtp->pdev = pdev;
pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
/* request and enable interrupt */
- ret = request_irq(pdev->irq, ish_irq_handler, IRQF_SHARED,
- KBUILD_MODNAME, dev);
+ ret = devm_request_irq(dev, pdev->irq, ish_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, ishtp);
if (ret) {
- dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n",
- pdev->irq);
- goto free_device;
+ dev_err(dev, "ISH: request IRQ %d failed\n", pdev->irq);
+ return ret;
}
- dev_set_drvdata(dev->devc, dev);
+ dev_set_drvdata(ishtp->devc, ishtp);
- init_waitqueue_head(&dev->suspend_wait);
- init_waitqueue_head(&dev->resume_wait);
+ init_waitqueue_head(&ishtp->suspend_wait);
+ init_waitqueue_head(&ishtp->resume_wait);
- ret = ish_init(dev);
+ ret = ish_init(ishtp);
if (ret)
- goto free_irq;
+ return ret;
return 0;
-
-free_irq:
- free_irq(pdev->irq, dev);
-free_device:
- pci_iounmap(pdev, hw->mem_addr);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_clear_master(pdev);
- pci_disable_device(pdev);
- dev_err(&pdev->dev, "ISH: PCI driver initialization failed.\n");
-
- return ret;
}
/**
@@ -203,16 +184,9 @@ disable_device:
static void ish_remove(struct pci_dev *pdev)
{
struct ishtp_device *ishtp_dev = pci_get_drvdata(pdev);
- struct ish_hw *hw = to_ish_hw(ishtp_dev);
ishtp_bus_remove_all_clients(ishtp_dev, false);
ish_device_disable(ishtp_dev);
-
- free_irq(pdev->irq, ishtp_dev);
- pci_iounmap(pdev, hw->mem_addr);
- pci_release_regions(pdev);
- pci_clear_master(pdev);
- pci_disable_device(pdev);
}
static struct device __maybe_unused *ish_resume_device;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 2d28cffc1404..e64243bc9c96 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -320,23 +320,14 @@ do_get_report:
*/
static void ish_cl_event_cb(struct ishtp_cl_device *device)
{
- struct ishtp_cl *hid_ishtp_cl = device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(device);
struct ishtp_cl_rb *rb_in_proc;
size_t r_length;
- unsigned long flags;
if (!hid_ishtp_cl)
return;
- spin_lock_irqsave(&hid_ishtp_cl->in_process_spinlock, flags);
- while (!list_empty(&hid_ishtp_cl->in_process_list.list)) {
- rb_in_proc = list_entry(
- hid_ishtp_cl->in_process_list.list.next,
- struct ishtp_cl_rb, list);
- list_del_init(&rb_in_proc->list);
- spin_unlock_irqrestore(&hid_ishtp_cl->in_process_spinlock,
- flags);
-
+ while ((rb_in_proc = ishtp_cl_rx_get_rb(hid_ishtp_cl)) != NULL) {
if (!rb_in_proc->buffer.data)
return;
@@ -346,9 +337,7 @@ static void ish_cl_event_cb(struct ishtp_cl_device *device)
process_recv(hid_ishtp_cl, rb_in_proc->buffer.data, r_length);
ishtp_cl_io_rb_recycle(rb_in_proc);
- spin_lock_irqsave(&hid_ishtp_cl->in_process_spinlock, flags);
}
- spin_unlock_irqrestore(&hid_ishtp_cl->in_process_spinlock, flags);
}
/**
@@ -637,8 +626,8 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
{
struct ishtp_device *dev;
- unsigned long flags;
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_fw_client *fw_client;
int i;
int rv;
@@ -660,16 +649,14 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
hid_ishtp_cl->rx_ring_size = HID_CL_RX_RING_SIZE;
hid_ishtp_cl->tx_ring_size = HID_CL_TX_RING_SIZE;
- spin_lock_irqsave(&dev->fw_clients_lock, flags);
- i = ishtp_fw_cl_by_uuid(dev, &hid_ishtp_guid);
- if (i < 0) {
- spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+ fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_guid);
+ if (!fw_client) {
dev_err(&client_data->cl_device->dev,
"ish client uuid not found\n");
- return i;
+ return -ENOENT;
}
- hid_ishtp_cl->fw_client_id = dev->fw_clients[i].client_id;
- spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+
+ hid_ishtp_cl->fw_client_id = fw_client->client_id;
hid_ishtp_cl->state = ISHTP_CL_CONNECTING;
rv = ishtp_cl_connect(hid_ishtp_cl);
@@ -765,7 +752,7 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
if (!hid_ishtp_cl)
return;
- cl_device->driver_data = hid_ishtp_cl;
+ ishtp_set_drvdata(cl_device, hid_ishtp_cl);
hid_ishtp_cl->client_data = client_data;
client_data->hid_ishtp_cl = hid_ishtp_cl;
@@ -814,7 +801,7 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
if (!hid_ishtp_cl)
return -ENOMEM;
- cl_device->driver_data = hid_ishtp_cl;
+ ishtp_set_drvdata(cl_device, hid_ishtp_cl);
hid_ishtp_cl->client_data = client_data;
client_data->hid_ishtp_cl = hid_ishtp_cl;
client_data->cl_device = cl_device;
@@ -844,7 +831,7 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
*/
static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
{
- struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
@@ -874,7 +861,7 @@ static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
*/
static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
{
- struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
@@ -898,7 +885,7 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
static int hid_ishtp_cl_suspend(struct device *device)
{
struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
- struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
@@ -919,7 +906,7 @@ static int hid_ishtp_cl_suspend(struct device *device)
static int hid_ishtp_cl_resume(struct device *device)
{
struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
- struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 2623a567ffba..728dc6d4561a 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -149,6 +149,31 @@ int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *uuid)
EXPORT_SYMBOL(ishtp_fw_cl_by_uuid);
/**
+ * ishtp_fw_cl_get_client() - return client information to client
+ * @dev: the ishtp device structure
+ * @uuid: uuid of the client to search
+ *
+ * Search firmware client using UUID and reture related client information.
+ *
+ * Return: pointer of client information on success, NULL on failure.
+ */
+struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
+ const uuid_le *uuid)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->fw_clients_lock, flags);
+ i = ishtp_fw_cl_by_uuid(dev, uuid);
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+ if (i < 0 || dev->fw_clients[i].props.fixed_address)
+ return NULL;
+
+ return &dev->fw_clients[i];
+}
+EXPORT_SYMBOL(ishtp_fw_cl_get_client);
+
+/**
* ishtp_fw_cl_by_id() - return index to fw_clients for client_id
* @dev: the ishtp device structure
* @client_id: fw client id to search
@@ -564,6 +589,33 @@ void ishtp_put_device(struct ishtp_cl_device *cl_device)
EXPORT_SYMBOL(ishtp_put_device);
/**
+ * ishtp_set_drvdata() - set client driver data
+ * @cl_device: client device instance
+ * @data: driver data need to be set
+ *
+ * Set client driver data to cl_device->driver_data.
+ */
+void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data)
+{
+ cl_device->driver_data = data;
+}
+EXPORT_SYMBOL(ishtp_set_drvdata);
+
+/**
+ * ishtp_get_drvdata() - get client driver data
+ * @cl_device: client device instance
+ *
+ * Get client driver data from cl_device->driver_data.
+ *
+ * Return: pointer of driver data
+ */
+void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device)
+{
+ return cl_device->driver_data;
+}
+EXPORT_SYMBOL(ishtp_get_drvdata);
+
+/**
* ishtp_bus_new_client() - Create a new client
* @dev: ISHTP device instance
*
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h
index a1ffae7f26ad..b8a5bcc82536 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.h
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.h
@@ -101,6 +101,9 @@ void ishtp_reset_compl_handler(struct ishtp_device *dev);
void ishtp_put_device(struct ishtp_cl_device *);
void ishtp_get_device(struct ishtp_cl_device *);
+void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data);
+void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device);
+
int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
struct module *owner);
#define ishtp_cl_driver_register(driver) \
@@ -110,5 +113,7 @@ void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
int ishtp_register_event_cb(struct ishtp_cl_device *device,
void (*read_cb)(struct ishtp_cl_device *));
int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *cuuid);
+struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
+ const uuid_le *uuid);
#endif /* _LINUX_ISHTP_CL_BUS_H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
index b9b917d2d50d..248651c35497 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
@@ -69,6 +69,8 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
int j;
unsigned long flags;
+ cl->tx_ring_free_size = 0;
+
/* Allocate pool to free Tx bufs */
for (j = 0; j < cl->tx_ring_size; ++j) {
struct ishtp_cl_tx_ring *tx_buf;
@@ -85,6 +87,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
}
return 0;
@@ -144,6 +147,7 @@ void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
tx_buf = list_entry(cl->tx_free_list.list.next,
struct ishtp_cl_tx_ring, list);
list_del(&tx_buf->list);
+ --cl->tx_ring_free_size;
kfree(tx_buf->send_buf.data);
kfree(tx_buf);
}
@@ -255,3 +259,48 @@ int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
return rets;
}
EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
+
+/**
+ * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
+ * @cl: Pointer to client device instance
+ *
+ * Look client device tx buffer list, and check whether this list is empty
+ *
+ * Return: true if client tx buffer list is empty else false
+ */
+bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
+{
+ int tx_list_empty;
+ unsigned long tx_flags;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ tx_list_empty = list_empty(&cl->tx_list.list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ return !!tx_list_empty;
+}
+EXPORT_SYMBOL(ishtp_cl_tx_empty);
+
+/**
+ * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
+ * @cl: Pointer to client device instance
+ *
+ * Check client device in-processing buffer list and get a rb from it.
+ *
+ * Return: rb pointer if buffer list isn't empty else NULL
+ */
+struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl)
+{
+ unsigned long rx_flags;
+ struct ishtp_cl_rb *rb;
+
+ spin_lock_irqsave(&cl->in_process_spinlock, rx_flags);
+ rb = list_first_entry_or_null(&cl->in_process_list.list,
+ struct ishtp_cl_rb, list);
+ if (rb)
+ list_del_init(&rb->list);
+ spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags);
+
+ return rb;
+}
+EXPORT_SYMBOL(ishtp_cl_rx_get_rb);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 007443ef5fca..faeccdb1475b 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -22,6 +22,25 @@
#include "hbm.h"
#include "client.h"
+int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
+{
+ unsigned long tx_free_flags;
+ int size;
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+
+ return size;
+}
+EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
+
+int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
+{
+ return cl->tx_ring_free_size;
+}
+EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
+
/**
* ishtp_read_list_flush() - Flush read queue
* @cl: ishtp client instance
@@ -90,6 +109,7 @@ static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
+ cl->tx_ring_free_size = cl->tx_ring_size;
/* dma */
cl->last_tx_path = CL_TX_PATH_IPC;
@@ -577,6 +597,8 @@ int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
* max ISHTP message size per client
*/
list_del_init(&cl_msg->list);
+ --cl->tx_ring_free_size;
+
spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
memcpy(cl_msg->send_buf.data, buf, length);
cl_msg->send_buf.size = length;
@@ -685,6 +707,7 @@ static void ipc_tx_callback(void *prm)
ishtp_write_message(dev, &ishtp_hdr, pmsg);
spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
tx_free_flags);
} else {
@@ -778,6 +801,7 @@ static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
++cl->send_msg_cnt_dma;
}
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
index 79eade547f5d..042f4c4853b1 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.h
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -84,6 +84,7 @@ struct ishtp_cl {
/* Client Tx buffers list */
unsigned int tx_ring_size;
struct ishtp_cl_tx_ring tx_list, tx_free_list;
+ int tx_ring_free_size;
spinlock_t tx_list_spinlock;
spinlock_t tx_free_list_spinlock;
size_t tx_offs; /* Offset in buffer at head of 'tx_list' */
@@ -137,6 +138,8 @@ int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
void ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
void ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
+int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl);
+int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl);
/* DMA I/F functions */
void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
@@ -178,5 +181,7 @@ int ishtp_cl_flush_queues(struct ishtp_cl *cl);
/* exported functions from ISHTP client buffer management scope */
int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
+bool ishtp_cl_tx_empty(struct ishtp_cl *cl);
+struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
#endif /* _ISHTP_CLIENT_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index 6a6d927b78b0..e7c6bfefaf9e 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -207,7 +207,7 @@ struct ishtp_device {
struct work_struct bh_hbm_work;
/* IPC write queue */
- struct wr_msg_ctl_info wr_processing_list_head, wr_free_list_head;
+ struct list_head wr_processing_list, wr_free_list;
/* For both processing list and free list */
spinlock_t wr_processing_spinlock;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index e0a06be5ef5c..5dd3a8245f0f 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -3335,6 +3335,7 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
void wacom_setup_device_quirks(struct wacom *wacom)
{
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct wacom_features *features = &wacom->wacom_wac.features;
/* The pen and pad share the same interface on most devices */
@@ -3464,6 +3465,24 @@ void wacom_setup_device_quirks(struct wacom *wacom)
if (features->type == REMOTE)
features->device_type |= WACOM_DEVICETYPE_WL_MONITOR;
+
+ /* HID descriptor for DTK-2451 / DTH-2452 claims to report lots
+ * of things it shouldn't. Lets fix up the damage...
+ */
+ if (wacom->hdev->product == 0x382 || wacom->hdev->product == 0x37d) {
+ features->quirks &= ~WACOM_QUIRK_TOOLSERIAL;
+ __clear_bit(BTN_TOOL_BRUSH, wacom_wac->pen_input->keybit);
+ __clear_bit(BTN_TOOL_PENCIL, wacom_wac->pen_input->keybit);
+ __clear_bit(BTN_TOOL_AIRBRUSH, wacom_wac->pen_input->keybit);
+ __clear_bit(ABS_Z, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_DISTANCE, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_TILT_X, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_TILT_Y, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_WHEEL, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_MISC, wacom_wac->pen_input->absbit);
+ __clear_bit(MSC_SERIAL, wacom_wac->pen_input->mscbit);
+ __clear_bit(EV_MSC, wacom_wac->pen_input->evbit);
+ }
}
int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index ced041899456..f4d08c8ac7f8 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -76,6 +76,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
__u32 version)
{
int ret = 0;
+ unsigned int cur_cpu;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
@@ -118,9 +119,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
* the CPU attempting to connect may not be CPU 0.
*/
if (version >= VERSION_WIN8_1) {
- msg->target_vcpu =
- hv_cpu_number_to_vp_number(smp_processor_id());
- vmbus_connection.connect_cpu = smp_processor_id();
+ cur_cpu = get_cpu();
+ msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
+ vmbus_connection.connect_cpu = cur_cpu;
+ put_cpu();
} else {
msg->target_vcpu = 0;
vmbus_connection.connect_cpu = 0;
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 5e449eac788a..92de8139d398 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -852,7 +852,7 @@ static int aspeed_create_pwm_cooling(struct device *dev,
dev_err(dev, "Property 'cooling-levels' cannot be read.\n");
return ret;
}
- snprintf(cdev->name, MAX_CDEV_NAME_LEN, "%s%d", child->name, pwm_port);
+ snprintf(cdev->name, MAX_CDEV_NAME_LEN, "%pOFn%d", child, pwm_port);
cdev->tcdev = thermal_of_cooling_device_register(child,
cdev->name,
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index a6636fe42189..a7cf00885c5d 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -1210,10 +1210,8 @@ static int atk_register_hwmon(struct atk_data *data)
data->hwmon_dev = hwmon_device_register_with_groups(dev, "atk0110",
data,
data->attr_groups);
- if (IS_ERR(data->hwmon_dev))
- return PTR_ERR(data->hwmon_dev);
- return 0;
+ return PTR_ERR_OR_ZERO(data->hwmon_dev);
}
static int atk_probe_if(struct atk_data *data)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 33d51281272b..975c95169884 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -24,6 +24,9 @@
#include <linux/string.h>
#include <linux/thermal.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/hwmon.h>
+
#define HWMON_ID_PREFIX "hwmon"
#define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
@@ -171,6 +174,13 @@ static int hwmon_thermal_add_sensor(struct device *dev,
}
#endif /* IS_REACHABLE(CONFIG_THERMAL) && ... */
+static int hwmon_attr_base(enum hwmon_sensor_types type)
+{
+ if (type == hwmon_in)
+ return 0;
+ return 1;
+}
+
/* sysfs attribute management */
static ssize_t hwmon_attr_show(struct device *dev,
@@ -185,6 +195,9 @@ static ssize_t hwmon_attr_show(struct device *dev,
if (ret < 0)
return ret;
+ trace_hwmon_attr_show(hattr->index + hwmon_attr_base(hattr->type),
+ hattr->name, val);
+
return sprintf(buf, "%ld\n", val);
}
@@ -193,6 +206,7 @@ static ssize_t hwmon_attr_show_string(struct device *dev,
char *buf)
{
struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+ enum hwmon_sensor_types type = hattr->type;
const char *s;
int ret;
@@ -201,6 +215,9 @@ static ssize_t hwmon_attr_show_string(struct device *dev,
if (ret < 0)
return ret;
+ trace_hwmon_attr_show_string(hattr->index + hwmon_attr_base(type),
+ hattr->name, s);
+
return sprintf(buf, "%s\n", s);
}
@@ -221,14 +238,10 @@ static ssize_t hwmon_attr_store(struct device *dev,
if (ret < 0)
return ret;
- return count;
-}
+ trace_hwmon_attr_store(hattr->index + hwmon_attr_base(hattr->type),
+ hattr->name, val);
-static int hwmon_attr_base(enum hwmon_sensor_types type)
-{
- if (type == hwmon_in)
- return 0;
- return 1;
+ return count;
}
static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
@@ -356,6 +369,7 @@ static const char * const hwmon_in_attr_templates[] = {
[hwmon_in_max_alarm] = "in%d_max_alarm",
[hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm",
[hwmon_in_crit_alarm] = "in%d_crit_alarm",
+ [hwmon_in_enable] = "in%d_enable",
};
static const char * const hwmon_curr_attr_templates[] = {
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 1f643782ce04..9e92673f6913 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -101,7 +101,7 @@ static struct platform_driver aem_driver = {
struct aem_ipmi_data {
struct completion read_complete;
struct ipmi_addr address;
- ipmi_user_t user;
+ struct ipmi_user *user;
int interface;
struct kernel_ipmi_msg tx_message;
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index ab72cabf5a95..bb17a29af64c 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -84,7 +84,7 @@ struct ibmpex_bmc_data {
struct ipmi_addr address;
struct completion read_complete;
- ipmi_user_t user;
+ struct ipmi_user *user;
int interface;
struct kernel_ipmi_msg tx_message;
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index 83472808c816..0ccca87f5271 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -458,9 +458,6 @@ static int populate_attr_groups(struct platform_device *pdev)
for_each_child_of_node(opal, np) {
const char *label;
- if (np->name == NULL)
- continue;
-
type = get_sensor_type(np);
if (type == MAX_SENSOR_TYPE)
continue;
@@ -589,9 +586,6 @@ static int create_device_attrs(struct platform_device *pdev)
const char *label;
enum sensors type;
- if (np->name == NULL)
- continue;
-
type = get_sensor_type(np);
if (type == MAX_SENSOR_TYPE)
continue;
@@ -603,8 +597,8 @@ static int create_device_attrs(struct platform_device *pdev)
if (of_property_read_u32(np, "sensor-id", &sensor_id) &&
of_property_read_u32(np, "sensor-data", &sensor_id)) {
dev_info(&pdev->dev,
- "'sensor-id' missing in the node '%s'\n",
- np->name);
+ "'sensor-id' missing in the node '%pOFn'\n",
+ np);
continue;
}
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 2f3f875c06ac..eed66e533ee2 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -65,13 +65,9 @@ static int iio_hwmon_probe(struct platform_device *pdev)
int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
enum iio_chan_type type;
struct iio_channel *channels;
- const char *name = "iio_hwmon";
struct device *hwmon_dev;
char *sname;
- if (dev->of_node && dev->of_node->name)
- name = dev->of_node->name;
-
channels = devm_iio_channel_get_all(dev);
if (IS_ERR(channels)) {
if (PTR_ERR(channels) == -ENODEV)
@@ -141,11 +137,15 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->attr_group.attrs = st->attrs;
st->groups[0] = &st->attr_group;
- sname = devm_kstrdup(dev, name, GFP_KERNEL);
- if (!sname)
- return -ENOMEM;
+ if (dev->of_node) {
+ sname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
+ if (!sname)
+ return -ENOMEM;
+ strreplace(sname, '-', '_');
+ } else {
+ sname = "iio_hwmon";
+ }
- strreplace(sname, '-', '_');
hwmon_dev = devm_hwmon_device_register_with_groups(dev, sname, st,
st->groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index e6b49500c52a..d61688f04594 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -38,9 +38,12 @@
#define INA3221_WARN3 0x0c
#define INA3221_MASK_ENABLE 0x0f
-#define INA3221_CONFIG_MODE_SHUNT BIT(1)
-#define INA3221_CONFIG_MODE_BUS BIT(2)
-#define INA3221_CONFIG_MODE_CONTINUOUS BIT(3)
+#define INA3221_CONFIG_MODE_MASK GENMASK(2, 0)
+#define INA3221_CONFIG_MODE_POWERDOWN 0
+#define INA3221_CONFIG_MODE_SHUNT BIT(0)
+#define INA3221_CONFIG_MODE_BUS BIT(1)
+#define INA3221_CONFIG_MODE_CONTINUOUS BIT(2)
+#define INA3221_CONFIG_CHx_EN(x) BIT(14 - (x))
#define INA3221_RSHUNT_DEFAULT 10000
@@ -74,30 +77,37 @@ enum ina3221_channels {
INA3221_NUM_CHANNELS
};
-static const unsigned int register_channel[] = {
- [INA3221_SHUNT1] = INA3221_CHANNEL1,
- [INA3221_SHUNT2] = INA3221_CHANNEL2,
- [INA3221_SHUNT3] = INA3221_CHANNEL3,
- [INA3221_CRIT1] = INA3221_CHANNEL1,
- [INA3221_CRIT2] = INA3221_CHANNEL2,
- [INA3221_CRIT3] = INA3221_CHANNEL3,
- [INA3221_WARN1] = INA3221_CHANNEL1,
- [INA3221_WARN2] = INA3221_CHANNEL2,
- [INA3221_WARN3] = INA3221_CHANNEL3,
+/**
+ * struct ina3221_input - channel input source specific information
+ * @label: label of channel input source
+ * @shunt_resistor: shunt resistor value of channel input source
+ * @disconnected: connection status of channel input source
+ */
+struct ina3221_input {
+ const char *label;
+ int shunt_resistor;
+ bool disconnected;
};
/**
* struct ina3221_data - device specific information
* @regmap: Register map of the device
* @fields: Register fields of the device
- * @shunt_resistors: Array of resistor values per channel
+ * @inputs: Array of channel input source specific structures
+ * @reg_config: Register value of INA3221_CONFIG
*/
struct ina3221_data {
struct regmap *regmap;
struct regmap_field *fields[F_MAX_FIELDS];
- int shunt_resistors[INA3221_NUM_CHANNELS];
+ struct ina3221_input inputs[INA3221_NUM_CHANNELS];
+ u32 reg_config;
};
+static inline bool ina3221_is_enabled(struct ina3221_data *ina, int channel)
+{
+ return ina->reg_config & INA3221_CONFIG_CHx_EN(channel);
+}
+
static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
int *val)
{
@@ -113,107 +123,284 @@ static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
return 0;
}
-static ssize_t ina3221_show_bus_voltage(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static const u8 ina3221_in_reg[] = {
+ INA3221_BUS1,
+ INA3221_BUS2,
+ INA3221_BUS3,
+ INA3221_SHUNT1,
+ INA3221_SHUNT2,
+ INA3221_SHUNT3,
+};
+
+static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
{
- struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ const bool is_shunt = channel > INA3221_CHANNEL3;
struct ina3221_data *ina = dev_get_drvdata(dev);
- unsigned int reg = sd_attr->index;
- int val, voltage_mv, ret;
-
- ret = ina3221_read_value(ina, reg, &val);
- if (ret)
- return ret;
+ u8 reg = ina3221_in_reg[channel];
+ int regval, ret;
+
+ /* Translate shunt channel index to sensor channel index */
+ channel %= INA3221_NUM_CHANNELS;
+
+ switch (attr) {
+ case hwmon_in_input:
+ if (!ina3221_is_enabled(ina, channel))
+ return -ENODATA;
+
+ ret = ina3221_read_value(ina, reg, &regval);
+ if (ret)
+ return ret;
+
+ /*
+ * Scale of shunt voltage (uV): LSB is 40uV
+ * Scale of bus voltage (mV): LSB is 8mV
+ */
+ *val = regval * (is_shunt ? 40 : 8);
+ return 0;
+ case hwmon_in_enable:
+ *val = ina3221_is_enabled(ina, channel);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
- voltage_mv = val * 8;
+static const u8 ina3221_curr_reg[][INA3221_NUM_CHANNELS] = {
+ [hwmon_curr_input] = { INA3221_SHUNT1, INA3221_SHUNT2, INA3221_SHUNT3 },
+ [hwmon_curr_max] = { INA3221_WARN1, INA3221_WARN2, INA3221_WARN3 },
+ [hwmon_curr_crit] = { INA3221_CRIT1, INA3221_CRIT2, INA3221_CRIT3 },
+ [hwmon_curr_max_alarm] = { F_WF1, F_WF2, F_WF3 },
+ [hwmon_curr_crit_alarm] = { F_CF1, F_CF2, F_CF3 },
+};
- return snprintf(buf, PAGE_SIZE, "%d\n", voltage_mv);
+static int ina3221_read_curr(struct device *dev, u32 attr,
+ int channel, long *val)
+{
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ struct ina3221_input *input = &ina->inputs[channel];
+ int resistance_uo = input->shunt_resistor;
+ u8 reg = ina3221_curr_reg[attr][channel];
+ int regval, voltage_nv, ret;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ if (!ina3221_is_enabled(ina, channel))
+ return -ENODATA;
+ /* fall through */
+ case hwmon_curr_crit:
+ case hwmon_curr_max:
+ ret = ina3221_read_value(ina, reg, &regval);
+ if (ret)
+ return ret;
+
+ /* Scale of shunt voltage: LSB is 40uV (40000nV) */
+ voltage_nv = regval * 40000;
+ /* Return current in mA */
+ *val = DIV_ROUND_CLOSEST(voltage_nv, resistance_uo);
+ return 0;
+ case hwmon_curr_crit_alarm:
+ case hwmon_curr_max_alarm:
+ ret = regmap_field_read(ina->fields[reg], &regval);
+ if (ret)
+ return ret;
+ *val = regval;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static ssize_t ina3221_show_shunt_voltage(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int ina3221_write_curr(struct device *dev, u32 attr,
+ int channel, long val)
{
- struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
struct ina3221_data *ina = dev_get_drvdata(dev);
- unsigned int reg = sd_attr->index;
- int val, voltage_uv, ret;
+ struct ina3221_input *input = &ina->inputs[channel];
+ int resistance_uo = input->shunt_resistor;
+ u8 reg = ina3221_curr_reg[attr][channel];
+ int regval, current_ma, voltage_uv;
- ret = ina3221_read_value(ina, reg, &val);
- if (ret)
- return ret;
- voltage_uv = val * 40;
+ /* clamp current */
+ current_ma = clamp_val(val,
+ INT_MIN / resistance_uo,
+ INT_MAX / resistance_uo);
+
+ voltage_uv = DIV_ROUND_CLOSEST(current_ma * resistance_uo, 1000);
- return snprintf(buf, PAGE_SIZE, "%d\n", voltage_uv);
+ /* clamp voltage */
+ voltage_uv = clamp_val(voltage_uv, -163800, 163800);
+
+ /* 1 / 40uV(scale) << 3(register shift) = 5 */
+ regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
+
+ return regmap_write(ina->regmap, reg, regval);
}
-static ssize_t ina3221_show_current(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int ina3221_write_enable(struct device *dev, int channel, bool enable)
{
- struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
struct ina3221_data *ina = dev_get_drvdata(dev);
- unsigned int reg = sd_attr->index;
- unsigned int channel = register_channel[reg];
- int resistance_uo = ina->shunt_resistors[channel];
- int val, current_ma, voltage_nv, ret;
+ u16 config, mask = INA3221_CONFIG_CHx_EN(channel);
+ int ret;
- ret = ina3221_read_value(ina, reg, &val);
+ config = enable ? mask : 0;
+
+ /* Enable or disable the channel */
+ ret = regmap_update_bits(ina->regmap, INA3221_CONFIG, mask, config);
if (ret)
return ret;
- voltage_nv = val * 40000;
- current_ma = DIV_ROUND_CLOSEST(voltage_nv, resistance_uo);
+ /* Cache the latest config register value */
+ ret = regmap_read(ina->regmap, INA3221_CONFIG, &ina->reg_config);
+ if (ret)
+ return ret;
- return snprintf(buf, PAGE_SIZE, "%d\n", current_ma);
+ return 0;
}
-static ssize_t ina3221_set_current(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int ina3221_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_in:
+ /* 0-align channel ID */
+ return ina3221_read_in(dev, attr, channel - 1, val);
+ case hwmon_curr:
+ return ina3221_read_curr(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ina3221_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_in:
+ /* 0-align channel ID */
+ return ina3221_write_enable(dev, channel - 1, val);
+ case hwmon_curr:
+ return ina3221_write_curr(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ina3221_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
{
- struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
struct ina3221_data *ina = dev_get_drvdata(dev);
- unsigned int reg = sd_attr->index;
- unsigned int channel = register_channel[reg];
- int resistance_uo = ina->shunt_resistors[channel];
- int val, current_ma, voltage_uv, ret;
+ int index = channel - 1;
- ret = kstrtoint(buf, 0, &current_ma);
- if (ret)
- return ret;
+ *str = ina->inputs[index].label;
- /* clamp current */
- current_ma = clamp_val(current_ma,
- INT_MIN / resistance_uo,
- INT_MAX / resistance_uo);
+ return 0;
+}
- voltage_uv = DIV_ROUND_CLOSEST(current_ma * resistance_uo, 1000);
+static umode_t ina3221_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct ina3221_data *ina = drvdata;
+ const struct ina3221_input *input = NULL;
+
+ switch (type) {
+ case hwmon_in:
+ /* Ignore in0_ */
+ if (channel == 0)
+ return 0;
+
+ switch (attr) {
+ case hwmon_in_label:
+ if (channel - 1 <= INA3221_CHANNEL3)
+ input = &ina->inputs[channel - 1];
+ /* Hide label node if label is not provided */
+ return (input && input->label) ? 0444 : 0;
+ case hwmon_in_input:
+ return 0444;
+ case hwmon_in_enable:
+ return 0644;
+ default:
+ return 0;
+ }
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_crit_alarm:
+ case hwmon_curr_max_alarm:
+ return 0444;
+ case hwmon_curr_crit:
+ case hwmon_curr_max:
+ return 0644;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
- /* clamp voltage */
- voltage_uv = clamp_val(voltage_uv, -163800, 163800);
+static const u32 ina3221_in_config[] = {
+ /* 0: dummy, skipped in is_visible */
+ HWMON_I_INPUT,
+ /* 1-3: input voltage Channels */
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ /* 4-6: shunt voltage Channels */
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ 0
+};
- /* 1 / 40uV(scale) << 3(register shift) = 5 */
- val = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
+static const struct hwmon_channel_info ina3221_in = {
+ .type = hwmon_in,
+ .config = ina3221_in_config,
+};
- ret = regmap_write(ina->regmap, reg, val);
- if (ret)
- return ret;
+#define INA3221_HWMON_CURR_CONFIG (HWMON_C_INPUT | \
+ HWMON_C_CRIT | HWMON_C_CRIT_ALARM | \
+ HWMON_C_MAX | HWMON_C_MAX_ALARM)
- return count;
-}
+static const u32 ina3221_curr_config[] = {
+ INA3221_HWMON_CURR_CONFIG,
+ INA3221_HWMON_CURR_CONFIG,
+ INA3221_HWMON_CURR_CONFIG,
+ 0
+};
+
+static const struct hwmon_channel_info ina3221_curr = {
+ .type = hwmon_curr,
+ .config = ina3221_curr_config,
+};
+
+static const struct hwmon_channel_info *ina3221_info[] = {
+ &ina3221_in,
+ &ina3221_curr,
+ NULL
+};
+
+static const struct hwmon_ops ina3221_hwmon_ops = {
+ .is_visible = ina3221_is_visible,
+ .read_string = ina3221_read_string,
+ .read = ina3221_read,
+ .write = ina3221_write,
+};
+static const struct hwmon_chip_info ina3221_chip_info = {
+ .ops = &ina3221_hwmon_ops,
+ .info = ina3221_info,
+};
+
+/* Extra attribute groups */
static ssize_t ina3221_show_shunt(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
struct ina3221_data *ina = dev_get_drvdata(dev);
unsigned int channel = sd_attr->index;
- unsigned int resistance_uo;
-
- resistance_uo = ina->shunt_resistors[channel];
+ struct ina3221_input *input = &ina->inputs[channel];
- return snprintf(buf, PAGE_SIZE, "%d\n", resistance_uo);
+ return snprintf(buf, PAGE_SIZE, "%d\n", input->shunt_resistor);
}
static ssize_t ina3221_set_shunt(struct device *dev,
@@ -223,6 +410,7 @@ static ssize_t ina3221_set_shunt(struct device *dev,
struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
struct ina3221_data *ina = dev_get_drvdata(dev);
unsigned int channel = sd_attr->index;
+ struct ina3221_input *input = &ina->inputs[channel];
int val;
int ret;
@@ -232,43 +420,11 @@ static ssize_t ina3221_set_shunt(struct device *dev,
val = clamp_val(val, 1, INT_MAX);
- ina->shunt_resistors[channel] = val;
+ input->shunt_resistor = val;
return count;
}
-static ssize_t ina3221_show_alert(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
- struct ina3221_data *ina = dev_get_drvdata(dev);
- unsigned int field = sd_attr->index;
- unsigned int regval;
- int ret;
-
- ret = regmap_field_read(ina->fields[field], &regval);
- if (ret)
- return ret;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", regval);
-}
-
-/* bus voltage */
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO,
- ina3221_show_bus_voltage, NULL, INA3221_BUS1);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO,
- ina3221_show_bus_voltage, NULL, INA3221_BUS2);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO,
- ina3221_show_bus_voltage, NULL, INA3221_BUS3);
-
-/* calculated current */
-static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO,
- ina3221_show_current, NULL, INA3221_SHUNT1);
-static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO,
- ina3221_show_current, NULL, INA3221_SHUNT2);
-static SENSOR_DEVICE_ATTR(curr3_input, S_IRUGO,
- ina3221_show_current, NULL, INA3221_SHUNT3);
-
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt1_resistor, S_IRUGO | S_IWUSR,
ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL1);
@@ -277,83 +433,16 @@ static SENSOR_DEVICE_ATTR(shunt2_resistor, S_IRUGO | S_IWUSR,
static SENSOR_DEVICE_ATTR(shunt3_resistor, S_IRUGO | S_IWUSR,
ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL3);
-/* critical current */
-static SENSOR_DEVICE_ATTR(curr1_crit, S_IRUGO | S_IWUSR,
- ina3221_show_current, ina3221_set_current, INA3221_CRIT1);
-static SENSOR_DEVICE_ATTR(curr2_crit, S_IRUGO | S_IWUSR,
- ina3221_show_current, ina3221_set_current, INA3221_CRIT2);
-static SENSOR_DEVICE_ATTR(curr3_crit, S_IRUGO | S_IWUSR,
- ina3221_show_current, ina3221_set_current, INA3221_CRIT3);
-
-/* critical current alert */
-static SENSOR_DEVICE_ATTR(curr1_crit_alarm, S_IRUGO,
- ina3221_show_alert, NULL, F_CF1);
-static SENSOR_DEVICE_ATTR(curr2_crit_alarm, S_IRUGO,
- ina3221_show_alert, NULL, F_CF2);
-static SENSOR_DEVICE_ATTR(curr3_crit_alarm, S_IRUGO,
- ina3221_show_alert, NULL, F_CF3);
-
-/* warning current */
-static SENSOR_DEVICE_ATTR(curr1_max, S_IRUGO | S_IWUSR,
- ina3221_show_current, ina3221_set_current, INA3221_WARN1);
-static SENSOR_DEVICE_ATTR(curr2_max, S_IRUGO | S_IWUSR,
- ina3221_show_current, ina3221_set_current, INA3221_WARN2);
-static SENSOR_DEVICE_ATTR(curr3_max, S_IRUGO | S_IWUSR,
- ina3221_show_current, ina3221_set_current, INA3221_WARN3);
-
-/* warning current alert */
-static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO,
- ina3221_show_alert, NULL, F_WF1);
-static SENSOR_DEVICE_ATTR(curr2_max_alarm, S_IRUGO,
- ina3221_show_alert, NULL, F_WF2);
-static SENSOR_DEVICE_ATTR(curr3_max_alarm, S_IRUGO,
- ina3221_show_alert, NULL, F_WF3);
-
-/* shunt voltage */
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO,
- ina3221_show_shunt_voltage, NULL, INA3221_SHUNT1);
-static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO,
- ina3221_show_shunt_voltage, NULL, INA3221_SHUNT2);
-static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO,
- ina3221_show_shunt_voltage, NULL, INA3221_SHUNT3);
-
static struct attribute *ina3221_attrs[] = {
- /* channel 1 */
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_curr1_input.dev_attr.attr,
&sensor_dev_attr_shunt1_resistor.dev_attr.attr,
- &sensor_dev_attr_curr1_crit.dev_attr.attr,
- &sensor_dev_attr_curr1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_curr1_max.dev_attr.attr,
- &sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr,
-
- /* channel 2 */
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_curr2_input.dev_attr.attr,
&sensor_dev_attr_shunt2_resistor.dev_attr.attr,
- &sensor_dev_attr_curr2_crit.dev_attr.attr,
- &sensor_dev_attr_curr2_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_curr2_max.dev_attr.attr,
- &sensor_dev_attr_curr2_max_alarm.dev_attr.attr,
- &sensor_dev_attr_in5_input.dev_attr.attr,
-
- /* channel 3 */
- &sensor_dev_attr_in3_input.dev_attr.attr,
- &sensor_dev_attr_curr3_input.dev_attr.attr,
&sensor_dev_attr_shunt3_resistor.dev_attr.attr,
- &sensor_dev_attr_curr3_crit.dev_attr.attr,
- &sensor_dev_attr_curr3_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_curr3_max.dev_attr.attr,
- &sensor_dev_attr_curr3_max_alarm.dev_attr.attr,
- &sensor_dev_attr_in6_input.dev_attr.attr,
-
NULL,
};
ATTRIBUTE_GROUPS(ina3221);
static const struct regmap_range ina3221_yes_ranges[] = {
- regmap_reg_range(INA3221_SHUNT1, INA3221_BUS3),
+ regmap_reg_range(INA3221_CONFIG, INA3221_BUS3),
regmap_reg_range(INA3221_MASK_ENABLE, INA3221_MASK_ENABLE),
};
@@ -370,6 +459,66 @@ static const struct regmap_config ina3221_regmap_config = {
.volatile_table = &ina3221_volatile_table,
};
+static int ina3221_probe_child_from_dt(struct device *dev,
+ struct device_node *child,
+ struct ina3221_data *ina)
+{
+ struct ina3221_input *input;
+ u32 val;
+ int ret;
+
+ ret = of_property_read_u32(child, "reg", &val);
+ if (ret) {
+ dev_err(dev, "missing reg property of %s\n", child->name);
+ return ret;
+ } else if (val > INA3221_CHANNEL3) {
+ dev_err(dev, "invalid reg %d of %s\n", val, child->name);
+ return ret;
+ }
+
+ input = &ina->inputs[val];
+
+ /* Log the disconnected channel input */
+ if (!of_device_is_available(child)) {
+ input->disconnected = true;
+ return 0;
+ }
+
+ /* Save the connected input label if available */
+ of_property_read_string(child, "label", &input->label);
+
+ /* Overwrite default shunt resistor value optionally */
+ if (!of_property_read_u32(child, "shunt-resistor-micro-ohms", &val)) {
+ if (val < 1 || val > INT_MAX) {
+ dev_err(dev, "invalid shunt resistor value %u of %s\n",
+ val, child->name);
+ return -EINVAL;
+ }
+ input->shunt_resistor = val;
+ }
+
+ return 0;
+}
+
+static int ina3221_probe_from_dt(struct device *dev, struct ina3221_data *ina)
+{
+ const struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int ret;
+
+ /* Compatible with non-DT platforms */
+ if (!np)
+ return 0;
+
+ for_each_child_of_node(np, child) {
+ ret = ina3221_probe_child_from_dt(dev, child, ina);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int ina3221_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -399,7 +548,13 @@ static int ina3221_probe(struct i2c_client *client,
}
for (i = 0; i < INA3221_NUM_CHANNELS; i++)
- ina->shunt_resistors[i] = INA3221_RSHUNT_DEFAULT;
+ ina->inputs[i].shunt_resistor = INA3221_RSHUNT_DEFAULT;
+
+ ret = ina3221_probe_from_dt(dev, ina);
+ if (ret) {
+ dev_err(dev, "Unable to probe from device tree\n");
+ return ret;
+ }
ret = regmap_field_write(ina->fields[F_RST], true);
if (ret) {
@@ -407,9 +562,25 @@ static int ina3221_probe(struct i2c_client *client,
return ret;
}
- hwmon_dev = devm_hwmon_device_register_with_groups(dev,
- client->name,
- ina, ina3221_groups);
+ /* Sync config register after reset */
+ ret = regmap_read(ina->regmap, INA3221_CONFIG, &ina->reg_config);
+ if (ret)
+ return ret;
+
+ /* Disable channels if their inputs are disconnected */
+ for (i = 0; i < INA3221_NUM_CHANNELS; i++) {
+ if (ina->inputs[i].disconnected)
+ ina->reg_config &= ~INA3221_CONFIG_CHx_EN(i);
+ }
+ ret = regmap_write(ina->regmap, INA3221_CONFIG, ina->reg_config);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, ina);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, ina,
+ &ina3221_chip_info,
+ ina3221_groups);
if (IS_ERR(hwmon_dev)) {
dev_err(dev, "Unable to register hwmon device\n");
return PTR_ERR(hwmon_dev);
@@ -418,6 +589,60 @@ static int ina3221_probe(struct i2c_client *client,
return 0;
}
+static int __maybe_unused ina3221_suspend(struct device *dev)
+{
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ int ret;
+
+ /* Save config register value and enable cache-only */
+ ret = regmap_read(ina->regmap, INA3221_CONFIG, &ina->reg_config);
+ if (ret)
+ return ret;
+
+ /* Set to power-down mode for power saving */
+ ret = regmap_update_bits(ina->regmap, INA3221_CONFIG,
+ INA3221_CONFIG_MODE_MASK,
+ INA3221_CONFIG_MODE_POWERDOWN);
+ if (ret)
+ return ret;
+
+ regcache_cache_only(ina->regmap, true);
+ regcache_mark_dirty(ina->regmap);
+
+ return 0;
+}
+
+static int __maybe_unused ina3221_resume(struct device *dev)
+{
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ int ret;
+
+ regcache_cache_only(ina->regmap, false);
+
+ /* Software reset the chip */
+ ret = regmap_field_write(ina->fields[F_RST], true);
+ if (ret) {
+ dev_err(dev, "Unable to reset device\n");
+ return ret;
+ }
+
+ /* Restore cached register values to hardware */
+ ret = regcache_sync(ina->regmap);
+ if (ret)
+ return ret;
+
+ /* Restore config register value to hardware */
+ ret = regmap_write(ina->regmap, INA3221_CONFIG, ina->reg_config);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct dev_pm_ops ina3221_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(ina3221_suspend, ina3221_resume)
+};
+
static const struct of_device_id ina3221_of_match_table[] = {
{ .compatible = "ti,ina3221", },
{ /* sentinel */ }
@@ -435,6 +660,7 @@ static struct i2c_driver ina3221_i2c_driver = {
.driver = {
.name = INA3221_DRIVER_NAME,
.of_match_table = ina3221_of_match_table,
+ .pm = &ina3221_pm,
},
.id_table = ina3221_ids,
};
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index bb15d7816a29..2cef0c37ff6f 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -325,8 +325,9 @@ static int k10temp_probe(struct pci_dev *pdev,
data->pdev = pdev;
- if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
- boot_cpu_data.x86_model == 0x70)) {
+ if (boot_cpu_data.x86 == 0x15 &&
+ ((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
+ (boot_cpu_data.x86_model & 0xf0) == 0x70)) {
data->read_htcreg = read_htcreg_nb_f15;
data->read_tempreg = read_tempreg_nb_f15;
} else if (boot_cpu_data.x86 == 0x17) {
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 49f4b33a5685..c7f20543b2bf 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -47,6 +47,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
lm75b,
max6625,
max6626,
+ max31725,
mcp980x,
stds75,
tcn75,
@@ -64,7 +65,6 @@ enum lm75_type { /* keep sorted in alphabetical order */
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
-
/* The LM75 registers */
#define LM75_REG_TEMP 0x00
#define LM75_REG_CONF 0x01
@@ -76,7 +76,7 @@ struct lm75_data {
struct i2c_client *client;
struct regmap *regmap;
u8 orig_conf;
- u8 resolution; /* In bits, between 9 and 12 */
+ u8 resolution; /* In bits, between 9 and 16 */
u8 resolution_limits;
unsigned int sample_time; /* In ms */
};
@@ -254,7 +254,8 @@ static const struct regmap_config lm75_regmap_config = {
.volatile_reg = lm75_is_volatile_reg,
.val_format_endian = REGMAP_ENDIAN_BIG,
.cache_type = REGCACHE_RBTREE,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
static void lm75_remove(void *data)
@@ -339,6 +340,10 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->resolution_limits = 9;
data->sample_time = MSEC_PER_SEC / 4;
break;
+ case max31725:
+ data->resolution = 16;
+ data->sample_time = MSEC_PER_SEC / 8;
+ break;
case tcn75:
data->resolution = 9;
data->sample_time = MSEC_PER_SEC / 8;
@@ -415,6 +420,8 @@ static const struct i2c_device_id lm75_ids[] = {
{ "lm75b", lm75b, },
{ "max6625", max6625, },
{ "max6626", max6626, },
+ { "max31725", max31725, },
+ { "max31726", max31725, },
{ "mcp980x", mcp980x, },
{ "stds75", stds75, },
{ "tcn75", tcn75, },
@@ -472,6 +479,14 @@ static const struct of_device_id lm75_of_match[] = {
.data = (void *)max6626
},
{
+ .compatible = "maxim,max31725",
+ .data = (void *)max31725
+ },
+ {
+ .compatible = "maxim,max31726",
+ .data = (void *)max31725
+ },
+ {
.compatible = "maxim,mcp980x",
.data = (void *)mcp980x
},
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index d40fe5122e94..e7333f8e185c 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -127,8 +127,8 @@ static struct lm92_data *lm92_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ)
- || !data->valid) {
+ if (time_after(jiffies, data->last_updated + HZ) ||
+ !data->valid) {
dev_dbg(&client->dev, "Updating lm92 data\n");
for (i = 0; i < t_num_regs; i++) {
data->temp[i] =
@@ -153,7 +153,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
}
static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm92_data *data = dev_get_drvdata(dev);
@@ -161,7 +161,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
int nr = attr->index;
long val;
int err;
-
+
err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -178,6 +178,7 @@ static ssize_t show_temp_hyst(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm92_data *data = lm92_update_device(dev);
+
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index])
- TEMP_FROM_REG(data->temp[t_hyst]));
}
@@ -186,6 +187,7 @@ static ssize_t temp1_min_hyst_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
+
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[t_min])
+ TEMP_FROM_REG(data->temp[t_hyst]));
}
@@ -206,7 +208,7 @@ static ssize_t set_temp_hyst(struct device *dev,
val = clamp_val(val, -120000, 220000);
mutex_lock(&data->update_lock);
- data->temp[t_hyst] =
+ data->temp[t_hyst] =
TEMP_TO_REG(TEMP_FROM_REG(data->temp[attr->index]) - val);
i2c_smbus_write_word_swapped(client, LM92_REG_TEMP_HYST,
data->temp[t_hyst]);
@@ -218,6 +220,7 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm92_data *data = lm92_update_device(dev);
+
return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->temp[t_input]));
}
@@ -324,7 +327,6 @@ static int lm92_probe(struct i2c_client *new_client,
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-
/*
* Module and driver stuff
*/
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index 27cb06d65594..996b50246175 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -541,7 +541,8 @@ static const struct regmap_config lm95245_regmap_config = {
.writeable_reg = lm95245_is_writeable_reg,
.volatile_reg = lm95245_is_volatile_reg,
.cache_type = REGCACHE_RBTREE,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
static const u32 lm95245_chip_config[] = {
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index 78fe8759d2a9..825b922a3f92 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for the ADC on Freescale Semiconductor MC13783 and MC13892 PMICs.
*
* Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2009 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 51
- * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/mfd/mc13xxx.h>
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 944f5b63aecd..c3040079b1cb 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -42,6 +42,10 @@
* nct6793d 15 6 6 2+6 0xd120 0xc1 0x5ca3
* nct6795d 14 6 6 2+6 0xd350 0xc1 0x5ca3
* nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3
+ * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3
+ * (0xd451)
+ * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3
+ * (0xd459)
*
* #temp lists the number of monitored temperature sources (first value) plus
* the number of directly connectable temperature sensors (second value).
@@ -69,7 +73,7 @@
#define USE_ALTERNATE
enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793,
- nct6795, nct6796 };
+ nct6795, nct6796, nct6797, nct6798 };
/* used to set data->name = nct6775_device_names[data->sio_kind] */
static const char * const nct6775_device_names[] = {
@@ -82,6 +86,8 @@ static const char * const nct6775_device_names[] = {
"nct6793",
"nct6795",
"nct6796",
+ "nct6797",
+ "nct6798",
};
static const char * const nct6775_sio_names[] __initconst = {
@@ -94,6 +100,8 @@ static const char * const nct6775_sio_names[] __initconst = {
"NCT6793D",
"NCT6795D",
"NCT6796D",
+ "NCT6797D",
+ "NCT6798D",
};
static unsigned short force_id;
@@ -129,7 +137,9 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
#define SIO_NCT6793_ID 0xd120
#define SIO_NCT6795_ID 0xd350
#define SIO_NCT6796_ID 0xd420
-#define SIO_ID_MASK 0xFFF0
+#define SIO_NCT6797_ID 0xd450
+#define SIO_NCT6798_ID 0xd458
+#define SIO_ID_MASK 0xFFF8
enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -207,8 +217,6 @@ superio_exit(int ioreg)
#define NUM_FAN 7
-#define TEMP_SOURCE_VIRTUAL 0x1f
-
/* Common and NCT6775 specific data */
/* Voltage min/max registers for nr=7..14 are in bank 5 */
@@ -299,8 +307,9 @@ static const u16 NCT6775_REG_PWM_READ[] = {
static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d };
-static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 };
-static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 };
+static const u16 NCT6775_REG_FAN_PULSES[NUM_FAN] = {
+ 0x641, 0x642, 0x643, 0x644 };
+static const u16 NCT6775_FAN_PULSE_SHIFT[NUM_FAN] = { };
static const u16 NCT6775_REG_TEMP[] = {
0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d };
@@ -373,6 +382,7 @@ static const char *const nct6775_temp_label[] = {
};
#define NCT6775_TEMP_MASK 0x001ffffe
+#define NCT6775_VIRT_TEMP_MASK 0x00000000
static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = {
[13] = 0x661,
@@ -425,8 +435,8 @@ static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
static const u16 NCT6776_REG_FAN_MIN[] = {
0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };
-static const u16 NCT6776_REG_FAN_PULSES[] = {
- 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 };
+static const u16 NCT6776_REG_FAN_PULSES[NUM_FAN] = {
+ 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = {
0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e };
@@ -461,6 +471,7 @@ static const char *const nct6776_temp_label[] = {
};
#define NCT6776_TEMP_MASK 0x007ffffe
+#define NCT6776_VIRT_TEMP_MASK 0x00000000
static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = {
[14] = 0x401,
@@ -501,9 +512,9 @@ static const s8 NCT6779_BEEP_BITS[] = {
30, 31 }; /* intrusion0, intrusion1 */
static const u16 NCT6779_REG_FAN[] = {
- 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba, 0x660 };
-static const u16 NCT6779_REG_FAN_PULSES[] = {
- 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 };
+ 0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x4ce };
+static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = {
+ 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0x64f };
static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = {
0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 };
@@ -559,7 +570,9 @@ static const char *const nct6779_temp_label[] = {
};
#define NCT6779_TEMP_MASK 0x07ffff7e
+#define NCT6779_VIRT_TEMP_MASK 0x00000000
#define NCT6791_TEMP_MASK 0x87ffff7e
+#define NCT6791_VIRT_TEMP_MASK 0x80000000
static const u16 NCT6779_REG_TEMP_ALTERNATE[32]
= { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0,
@@ -638,6 +651,7 @@ static const char *const nct6792_temp_label[] = {
};
#define NCT6792_TEMP_MASK 0x9fffff7e
+#define NCT6792_VIRT_TEMP_MASK 0x80000000
static const char *const nct6793_temp_label[] = {
"",
@@ -675,6 +689,7 @@ static const char *const nct6793_temp_label[] = {
};
#define NCT6793_TEMP_MASK 0xbfff037e
+#define NCT6793_VIRT_TEMP_MASK 0x80000000
static const char *const nct6795_temp_label[] = {
"",
@@ -699,10 +714,10 @@ static const char *const nct6795_temp_label[] = {
"PCH_CHIP_TEMP",
"PCH_CPU_TEMP",
"PCH_MCH_TEMP",
- "PCH_DIM0_TEMP",
- "PCH_DIM1_TEMP",
- "PCH_DIM2_TEMP",
- "PCH_DIM3_TEMP",
+ "Agent0 Dimm0",
+ "Agent0 Dimm1",
+ "Agent1 Dimm0",
+ "Agent1 Dimm1",
"BYTE_TEMP0",
"BYTE_TEMP1",
"PECI Agent 0 Calibration",
@@ -712,6 +727,7 @@ static const char *const nct6795_temp_label[] = {
};
#define NCT6795_TEMP_MASK 0xbfffff7e
+#define NCT6795_VIRT_TEMP_MASK 0x80000000
static const char *const nct6796_temp_label[] = {
"",
@@ -724,8 +740,8 @@ static const char *const nct6796_temp_label[] = {
"AUXTIN4",
"SMBUSMASTER 0",
"SMBUSMASTER 1",
- "",
- "",
+ "Virtual_TEMP",
+ "Virtual_TEMP",
"",
"",
"",
@@ -736,10 +752,10 @@ static const char *const nct6796_temp_label[] = {
"PCH_CHIP_TEMP",
"PCH_CPU_TEMP",
"PCH_MCH_TEMP",
- "PCH_DIM0_TEMP",
- "PCH_DIM1_TEMP",
- "PCH_DIM2_TEMP",
- "PCH_DIM3_TEMP",
+ "Agent0 Dimm0",
+ "Agent0 Dimm1",
+ "Agent1 Dimm0",
+ "Agent1 Dimm1",
"BYTE_TEMP0",
"BYTE_TEMP1",
"PECI Agent 0 Calibration",
@@ -748,7 +764,46 @@ static const char *const nct6796_temp_label[] = {
"Virtual_TEMP"
};
-#define NCT6796_TEMP_MASK 0xbfff03fe
+#define NCT6796_TEMP_MASK 0xbfff0ffe
+#define NCT6796_VIRT_TEMP_MASK 0x80000c00
+
+static const char *const nct6798_temp_label[] = {
+ "",
+ "SYSTIN",
+ "CPUTIN",
+ "AUXTIN0",
+ "AUXTIN1",
+ "AUXTIN2",
+ "AUXTIN3",
+ "AUXTIN4",
+ "SMBUSMASTER 0",
+ "SMBUSMASTER 1",
+ "Virtual_TEMP",
+ "Virtual_TEMP",
+ "",
+ "",
+ "",
+ "",
+ "PECI Agent 0",
+ "PECI Agent 1",
+ "PCH_CHIP_CPU_MAX_TEMP",
+ "PCH_CHIP_TEMP",
+ "PCH_CPU_TEMP",
+ "PCH_MCH_TEMP",
+ "Agent0 Dimm0",
+ "Agent0 Dimm1",
+ "Agent1 Dimm0",
+ "Agent1 Dimm1",
+ "BYTE_TEMP0",
+ "BYTE_TEMP1",
+ "",
+ "",
+ "",
+ "Virtual_TEMP"
+};
+
+#define NCT6798_TEMP_MASK 0x8fff0ffe
+#define NCT6798_VIRT_TEMP_MASK 0x80000c00
/* NCT6102D/NCT6106D specific data */
@@ -779,8 +834,8 @@ static const u16 NCT6106_REG_TEMP_CONFIG[] = {
static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 };
static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 };
-static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 };
-static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 };
+static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6 };
+static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 };
static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
@@ -917,6 +972,11 @@ static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
return 1350000U / (reg << divreg);
}
+static unsigned int fan_from_reg_rpm(u16 reg, unsigned int divreg)
+{
+ return reg;
+}
+
static u16 fan_to_reg(u32 fan, unsigned int divreg)
{
if (!fan)
@@ -969,6 +1029,7 @@ struct nct6775_data {
u16 reg_temp_config[NUM_TEMP];
const char * const *temp_label;
u32 temp_mask;
+ u32 virt_temp_mask;
u16 REG_CONFIG;
u16 REG_VBAT;
@@ -1275,12 +1336,14 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
case nct6793:
case nct6795:
case nct6796:
+ case nct6797:
+ case nct6798:
return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
- ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) ||
+ (reg & 0xfff0) == 0x4c0 ||
reg == 0x402 ||
reg == 0x63a || reg == 0x63c || reg == 0x63e ||
reg == 0x640 || reg == 0x642 || reg == 0x64a ||
- reg == 0x64c || reg == 0x660 ||
+ reg == 0x64c ||
reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 ||
reg == 0x7b || reg == 0x7d;
}
@@ -1558,7 +1621,7 @@ static void nct6775_update_pwm(struct device *dev)
reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
data->pwm_weight_temp_sel[i] = reg & 0x1f;
/* If weight is disabled, report weight source as 0 */
- if (j == 1 && !(reg & 0x80))
+ if (!(reg & 0x80))
data->pwm_weight_temp_sel[i] = 0;
/* Weight temp data */
@@ -1630,6 +1693,8 @@ static void nct6775_update_pwm_limits(struct device *dev)
case nct6793:
case nct6795:
case nct6796:
+ case nct6797:
+ case nct6798:
reg = nct6775_read_value(data,
data->REG_CRITICAL_PWM_ENABLE[i]);
if (reg & data->CRITICAL_PWM_ENABLE_MASK)
@@ -1682,9 +1747,13 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
if (data->has_fan_min & BIT(i))
data->fan_min[i] = nct6775_read_value(data,
data->REG_FAN_MIN[i]);
- data->fan_pulses[i] =
- (nct6775_read_value(data, data->REG_FAN_PULSES[i])
- >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+
+ if (data->REG_FAN_PULSES[i]) {
+ data->fan_pulses[i] =
+ (nct6775_read_value(data,
+ data->REG_FAN_PULSES[i])
+ >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+ }
nct6775_select_fan_div(dev, data, i, reg);
}
@@ -2830,6 +2899,8 @@ store_temp_tolerance(struct device *dev, struct device_attribute *attr,
* Fan speed tolerance is a tricky beast, since the associated register is
* a tick counter, but the value is reported and configured as rpm.
* Compute resulting low and high rpm values and report the difference.
+ * A fan speed tolerance only makes sense if a fan target speed has been
+ * configured, so only display values other than 0 if that is the case.
*/
static ssize_t
show_speed_tolerance(struct device *dev, struct device_attribute *attr,
@@ -2838,19 +2909,23 @@ show_speed_tolerance(struct device *dev, struct device_attribute *attr,
struct nct6775_data *data = nct6775_update_device(dev);
struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
int nr = sattr->index;
- int low = data->target_speed[nr] - data->target_speed_tolerance[nr];
- int high = data->target_speed[nr] + data->target_speed_tolerance[nr];
- int tolerance;
-
- if (low <= 0)
- low = 1;
- if (high > 0xffff)
- high = 0xffff;
- if (high < low)
- high = low;
-
- tolerance = (fan_from_reg16(low, data->fan_div[nr])
- - fan_from_reg16(high, data->fan_div[nr])) / 2;
+ int target = data->target_speed[nr];
+ int tolerance = 0;
+
+ if (target) {
+ int low = target - data->target_speed_tolerance[nr];
+ int high = target + data->target_speed_tolerance[nr];
+
+ if (low <= 0)
+ low = 1;
+ if (high > 0xffff)
+ high = 0xffff;
+ if (high < low)
+ high = low;
+
+ tolerance = (fan_from_reg16(low, data->fan_div[nr])
+ - fan_from_reg16(high, data->fan_div[nr])) / 2;
+ }
return sprintf(buf, "%d\n", tolerance);
}
@@ -3054,6 +3129,8 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
case nct6793:
case nct6795:
case nct6796:
+ case nct6797:
+ case nct6798:
nct6775_write_value(data, data->REG_CRITICAL_PWM[nr],
val);
reg = nct6775_read_value(data,
@@ -3413,7 +3490,6 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
bool pwm3pin = false, pwm4pin = false, pwm5pin = false;
bool pwm6pin = false, pwm7pin = false;
int sioreg = data->sioreg;
- int regval;
/* Store SIO_REG_ENABLE for use during resume */
superio_select(sioreg, NCT6775_LD_HWM);
@@ -3421,10 +3497,10 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
/* fan4 and fan5 share some pins with the GPIO and serial flash */
if (data->kind == nct6775) {
- regval = superio_inb(sioreg, 0x2c);
+ int cr2c = superio_inb(sioreg, 0x2c);
- fan3pin = regval & BIT(6);
- pwm3pin = regval & BIT(7);
+ fan3pin = cr2c & BIT(6);
+ pwm3pin = cr2c & BIT(7);
/* On NCT6775, fan4 shares pins with the fdc interface */
fan4pin = !(superio_inb(sioreg, 0x2A) & 0x80);
@@ -3469,85 +3545,130 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
fan4min = fan4pin;
pwm3pin = fan3pin;
} else if (data->kind == nct6106) {
- regval = superio_inb(sioreg, 0x24);
- fan3pin = !(regval & 0x80);
- pwm3pin = regval & 0x08;
- } else {
- /* NCT6779D, NCT6791D, NCT6792D, NCT6793D, NCT6795D, NCT6796D */
- int regval_1b, regval_2a, regval_2f;
- bool dsw_en;
-
- regval = superio_inb(sioreg, 0x1c);
-
- fan3pin = !(regval & BIT(5));
- fan4pin = !(regval & BIT(6));
- fan5pin = !(regval & BIT(7));
+ int cr24 = superio_inb(sioreg, 0x24);
- pwm3pin = !(regval & BIT(0));
- pwm4pin = !(regval & BIT(1));
- pwm5pin = !(regval & BIT(2));
+ fan3pin = !(cr24 & 0x80);
+ pwm3pin = cr24 & 0x08;
+ } else {
+ /*
+ * NCT6779D, NCT6791D, NCT6792D, NCT6793D, NCT6795D, NCT6796D,
+ * NCT6797D, NCT6798D
+ */
+ int cr1a = superio_inb(sioreg, 0x1a);
+ int cr1b = superio_inb(sioreg, 0x1b);
+ int cr1c = superio_inb(sioreg, 0x1c);
+ int cr1d = superio_inb(sioreg, 0x1d);
+ int cr2a = superio_inb(sioreg, 0x2a);
+ int cr2b = superio_inb(sioreg, 0x2b);
+ int cr2d = superio_inb(sioreg, 0x2d);
+ int cr2f = superio_inb(sioreg, 0x2f);
+ bool dsw_en = cr2f & BIT(3);
+ bool ddr4_en = cr2f & BIT(4);
+ int cre0;
+ int creb;
+ int cred;
+
+ superio_select(sioreg, NCT6775_LD_12);
+ cre0 = superio_inb(sioreg, 0xe0);
+ creb = superio_inb(sioreg, 0xeb);
+ cred = superio_inb(sioreg, 0xed);
+
+ fan3pin = !(cr1c & BIT(5));
+ fan4pin = !(cr1c & BIT(6));
+ fan5pin = !(cr1c & BIT(7));
+
+ pwm3pin = !(cr1c & BIT(0));
+ pwm4pin = !(cr1c & BIT(1));
+ pwm5pin = !(cr1c & BIT(2));
- regval = superio_inb(sioreg, 0x2d);
switch (data->kind) {
case nct6791:
+ fan6pin = cr2d & BIT(1);
+ pwm6pin = cr2d & BIT(0);
+ break;
case nct6792:
- fan6pin = regval & BIT(1);
- pwm6pin = regval & BIT(0);
+ fan6pin = !dsw_en && (cr2d & BIT(1));
+ pwm6pin = !dsw_en && (cr2d & BIT(0));
break;
case nct6793:
+ fan5pin |= cr1b & BIT(5);
+ fan5pin |= creb & BIT(5);
+
+ fan6pin = creb & BIT(3);
+
+ pwm5pin |= cr2d & BIT(7);
+ pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0));
+
+ pwm6pin = !dsw_en && (cr2d & BIT(0));
+ pwm6pin |= creb & BIT(2);
+ break;
case nct6795:
+ fan5pin |= cr1b & BIT(5);
+ fan5pin |= creb & BIT(5);
+
+ fan6pin = (cr2a & BIT(4)) &&
+ (!dsw_en || (cred & BIT(4)));
+ fan6pin |= creb & BIT(3);
+
+ pwm5pin |= cr2d & BIT(7);
+ pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0));
+
+ pwm6pin = (cr2a & BIT(3)) && (cred & BIT(2));
+ pwm6pin |= creb & BIT(2);
+ break;
case nct6796:
- regval_1b = superio_inb(sioreg, 0x1b);
- regval_2a = superio_inb(sioreg, 0x2a);
- regval_2f = superio_inb(sioreg, 0x2f);
- dsw_en = regval_2f & BIT(3);
+ fan5pin |= cr1b & BIT(5);
+ fan5pin |= (cre0 & BIT(3)) && !(cr1b & BIT(0));
+ fan5pin |= creb & BIT(5);
- if (!pwm5pin)
- pwm5pin = regval & BIT(7);
+ fan6pin = (cr2a & BIT(4)) &&
+ (!dsw_en || (cred & BIT(4)));
+ fan6pin |= creb & BIT(3);
- if (!fan5pin)
- fan5pin = regval_1b & BIT(5);
+ fan7pin = !(cr2b & BIT(2));
- superio_select(sioreg, NCT6775_LD_12);
- if (data->kind != nct6796) {
- int regval_eb = superio_inb(sioreg, 0xeb);
+ pwm5pin |= cr2d & BIT(7);
+ pwm5pin |= (cre0 & BIT(4)) && !(cr1b & BIT(0));
+ pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0));
- if (!dsw_en) {
- fan6pin = regval & BIT(1);
- pwm6pin = regval & BIT(0);
- }
+ pwm6pin = (cr2a & BIT(3)) && (cred & BIT(2));
+ pwm6pin |= creb & BIT(2);
- if (!fan5pin)
- fan5pin = regval_eb & BIT(5);
- if (!pwm5pin)
- pwm5pin = (regval_eb & BIT(4)) &&
- !(regval_2a & BIT(0));
- if (!fan6pin)
- fan6pin = regval_eb & BIT(3);
- if (!pwm6pin)
- pwm6pin = regval_eb & BIT(2);
- }
+ pwm7pin = !(cr1d & (BIT(2) | BIT(3)));
+ break;
+ case nct6797:
+ fan5pin |= !ddr4_en && (cr1b & BIT(5));
+ fan5pin |= creb & BIT(5);
- if (data->kind == nct6795 || data->kind == nct6796) {
- int regval_ed = superio_inb(sioreg, 0xed);
+ fan6pin = cr2a & BIT(4);
+ fan6pin |= creb & BIT(3);
- if (!fan6pin)
- fan6pin = (regval_2a & BIT(4)) &&
- (!dsw_en ||
- (dsw_en && (regval_ed & BIT(4))));
- if (!pwm6pin)
- pwm6pin = (regval_2a & BIT(3)) &&
- (regval_ed & BIT(2));
- }
+ fan7pin = cr1a & BIT(1);
- if (data->kind == nct6796) {
- int regval_1d = superio_inb(sioreg, 0x1d);
- int regval_2b = superio_inb(sioreg, 0x2b);
+ pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0));
+ pwm5pin |= !ddr4_en && (cr2d & BIT(7));
- fan7pin = !(regval_2b & BIT(2));
- pwm7pin = !(regval_1d & (BIT(2) | BIT(3)));
- }
+ pwm6pin = creb & BIT(2);
+ pwm6pin |= cred & BIT(2);
+ pwm7pin = cr1d & BIT(4);
+ break;
+ case nct6798:
+ fan6pin = !(cr1b & BIT(0)) && (cre0 & BIT(3));
+ fan6pin |= cr2a & BIT(4);
+ fan6pin |= creb & BIT(5);
+
+ fan7pin = cr1b & BIT(5);
+ fan7pin |= !(cr2b & BIT(2));
+ fan7pin |= creb & BIT(3);
+
+ pwm6pin = !(cr1b & BIT(0)) && (cre0 & BIT(4));
+ pwm6pin |= !(cred & BIT(2)) && (cr2a & BIT(3));
+ pwm6pin |= (creb & BIT(4)) && !(cr2a & BIT(0));
+
+ pwm7pin = !(cr1d & (BIT(2) | BIT(3)));
+ pwm7pin |= cr2d & BIT(7);
+ pwm7pin |= creb & BIT(2);
break;
default: /* NCT6779D */
break;
@@ -3639,6 +3760,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->temp_label = nct6776_temp_label;
data->temp_mask = NCT6776_TEMP_MASK;
+ data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
data->REG_VBAT = NCT6106_REG_VBAT;
data->REG_DIODE = NCT6106_REG_DIODE;
@@ -3717,6 +3839,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->temp_label = nct6775_temp_label;
data->temp_mask = NCT6775_TEMP_MASK;
+ data->virt_temp_mask = NCT6775_VIRT_TEMP_MASK;
data->REG_CONFIG = NCT6775_REG_CONFIG;
data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3789,6 +3912,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->temp_label = nct6776_temp_label;
data->temp_mask = NCT6776_TEMP_MASK;
+ data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
data->REG_CONFIG = NCT6775_REG_CONFIG;
data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3853,7 +3977,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->ALARM_BITS = NCT6779_ALARM_BITS;
data->BEEP_BITS = NCT6779_BEEP_BITS;
- data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg = fan_from_reg_rpm;
data->fan_from_reg_min = fan_from_reg13;
data->target_temp_mask = 0xff;
data->tolerance_mask = 0x07;
@@ -3861,6 +3985,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->temp_label = nct6779_temp_label;
data->temp_mask = NCT6779_TEMP_MASK;
+ data->virt_temp_mask = NCT6779_VIRT_TEMP_MASK;
data->REG_CONFIG = NCT6775_REG_CONFIG;
data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3922,8 +4047,12 @@ static int nct6775_probe(struct platform_device *pdev)
case nct6793:
case nct6795:
case nct6796:
+ case nct6797:
+ case nct6798:
data->in_num = 15;
- data->pwm_num = (data->kind == nct6796) ? 7 : 6;
+ data->pwm_num = (data->kind == nct6796 ||
+ data->kind == nct6797 ||
+ data->kind == nct6798) ? 7 : 6;
data->auto_pwm_num = 4;
data->has_fan_div = false;
data->temp_fixed_num = 6;
@@ -3933,7 +4062,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->ALARM_BITS = NCT6791_ALARM_BITS;
data->BEEP_BITS = NCT6779_BEEP_BITS;
- data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg = fan_from_reg_rpm;
data->fan_from_reg_min = fan_from_reg13;
data->target_temp_mask = 0xff;
data->tolerance_mask = 0x07;
@@ -3944,22 +4073,33 @@ static int nct6775_probe(struct platform_device *pdev)
case nct6791:
data->temp_label = nct6779_temp_label;
data->temp_mask = NCT6791_TEMP_MASK;
+ data->virt_temp_mask = NCT6791_VIRT_TEMP_MASK;
break;
case nct6792:
data->temp_label = nct6792_temp_label;
data->temp_mask = NCT6792_TEMP_MASK;
+ data->virt_temp_mask = NCT6792_VIRT_TEMP_MASK;
break;
case nct6793:
data->temp_label = nct6793_temp_label;
data->temp_mask = NCT6793_TEMP_MASK;
+ data->virt_temp_mask = NCT6793_VIRT_TEMP_MASK;
break;
case nct6795:
+ case nct6797:
data->temp_label = nct6795_temp_label;
data->temp_mask = NCT6795_TEMP_MASK;
+ data->virt_temp_mask = NCT6795_VIRT_TEMP_MASK;
break;
case nct6796:
data->temp_label = nct6796_temp_label;
data->temp_mask = NCT6796_TEMP_MASK;
+ data->virt_temp_mask = NCT6796_VIRT_TEMP_MASK;
+ break;
+ case nct6798:
+ data->temp_label = nct6798_temp_label;
+ data->temp_mask = NCT6798_TEMP_MASK;
+ data->virt_temp_mask = NCT6798_VIRT_TEMP_MASK;
break;
}
@@ -4143,7 +4283,7 @@ static int nct6775_probe(struct platform_device *pdev)
* for each fan reflects a different temperature, and there
* are no duplicates.
*/
- if (src != TEMP_SOURCE_VIRTUAL) {
+ if (!(data->virt_temp_mask & BIT(src))) {
if (mask & BIT(src))
continue;
mask |= BIT(src);
@@ -4230,6 +4370,8 @@ static int nct6775_probe(struct platform_device *pdev)
case nct6793:
case nct6795:
case nct6796:
+ case nct6797:
+ case nct6798:
break;
}
@@ -4265,6 +4407,8 @@ static int nct6775_probe(struct platform_device *pdev)
case nct6793:
case nct6795:
case nct6796:
+ case nct6797:
+ case nct6798:
tmp |= 0x7e;
break;
}
@@ -4467,6 +4611,12 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
case SIO_NCT6796_ID:
sio_data->kind = nct6796;
break;
+ case SIO_NCT6797_ID:
+ sio_data->kind = nct6797;
+ break;
+ case SIO_NCT6798_ID:
+ sio_data->kind = nct6798;
+ break;
default:
if (val != 0xffff)
pr_debug("unsupported chip ID: 0x%04x\n", val);
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index 8474d601aa63..b3b907bdfb63 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -52,7 +52,7 @@
/* Define the Counter Register, value = 100 for match 100% */
#define NPCM7XX_PWM_COUNTER_DEFAULT_NUM 255
-#define NPCM7XX_PWM_CMR_DEFAULT_NUM 127
+#define NPCM7XX_PWM_CMR_DEFAULT_NUM 255
#define NPCM7XX_PWM_CMR_MAX 255
/* default all PWM channels PRESCALE2 = 1 */
@@ -861,7 +861,7 @@ static int npcm7xx_create_pwm_cooling(struct device *dev,
dev_err(dev, "Property 'cooling-levels' cannot be read.\n");
return ret;
}
- snprintf(cdev->name, THERMAL_NAME_LENGTH, "%s%d", child->name,
+ snprintf(cdev->name, THERMAL_NAME_LENGTH, "%pOFn%d", child,
pwm_port);
cdev->tcdev = thermal_of_cooling_device_register(child,
@@ -908,7 +908,7 @@ static int npcm7xx_en_pwm_fan(struct device *dev,
if (fan_cnt < 1)
return -EINVAL;
- fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL);
+ fan_ch = devm_kcalloc(dev, fan_cnt, sizeof(*fan_ch), GFP_KERNEL);
if (!fan_ch)
return -ENOMEM;
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index a82018aaf473..629cb45f8557 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -5,7 +5,6 @@
menuconfig PMBUS
tristate "PMBus support"
depends on I2C
- default n
help
Say yes here if you want to enable PMBus support.
@@ -28,7 +27,6 @@ config SENSORS_PMBUS
config SENSORS_ADM1275
tristate "Analog Devices ADM1275 and compatibles"
- default n
help
If you say yes here you get hardware monitoring support for Analog
Devices ADM1075, ADM1272, ADM1275, ADM1276, ADM1278, ADM1293,
@@ -49,7 +47,6 @@ config SENSORS_IBM_CFFPS
config SENSORS_IR35221
tristate "Infineon IR35221"
- default n
help
If you say yes here you get hardware monitoring support for the
Infineon IR35221 controller.
@@ -59,7 +56,6 @@ config SENSORS_IR35221
config SENSORS_LM25066
tristate "National Semiconductor LM25066 and compatibles"
- default n
help
If you say yes here you get hardware monitoring support for National
Semiconductor LM25056, LM25066, LM5064, and LM5066.
@@ -69,7 +65,6 @@ config SENSORS_LM25066
config SENSORS_LTC2978
tristate "Linear Technologies LTC2978 and compatibles"
- default n
help
If you say yes here you get hardware monitoring support for Linear
Technology LTC2974, LTC2975, LTC2977, LTC2978, LTC2980, LTC3880,
@@ -83,11 +78,11 @@ config SENSORS_LTC2978_REGULATOR
depends on SENSORS_LTC2978 && REGULATOR
help
If you say yes here you get regulator support for Linear
- Technology LTC2974, LTC2977, LTC2978, LTC3880, LTC3883, and LTM4676.
+ Technology LTC2974, LTC2977, LTC2978, LTC3880, LTC3883, LTM4676
+ and LTM4686.
config SENSORS_LTC3815
tristate "Linear Technologies LTC3815"
- default n
help
If you say yes here you get hardware monitoring support for Linear
Technology LTC3815.
@@ -97,7 +92,6 @@ config SENSORS_LTC3815
config SENSORS_MAX16064
tristate "Maxim MAX16064"
- default n
help
If you say yes here you get hardware monitoring support for Maxim
MAX16064.
@@ -107,7 +101,6 @@ config SENSORS_MAX16064
config SENSORS_MAX20751
tristate "Maxim MAX20751"
- default n
help
If you say yes here you get hardware monitoring support for Maxim
MAX20751.
@@ -117,7 +110,6 @@ config SENSORS_MAX20751
config SENSORS_MAX31785
tristate "Maxim MAX31785 and compatibles"
- default n
help
If you say yes here you get hardware monitoring support for Maxim
MAX31785.
@@ -127,7 +119,6 @@ config SENSORS_MAX31785
config SENSORS_MAX34440
tristate "Maxim MAX34440 and compatibles"
- default n
help
If you say yes here you get hardware monitoring support for Maxim
MAX34440, MAX34441, MAX34446, MAX34451, MAX34460, and MAX34461.
@@ -137,7 +128,6 @@ config SENSORS_MAX34440
config SENSORS_MAX8688
tristate "Maxim MAX8688"
- default n
help
If you say yes here you get hardware monitoring support for Maxim
MAX8688.
@@ -147,7 +137,6 @@ config SENSORS_MAX8688
config SENSORS_TPS40422
tristate "TI TPS40422"
- default n
help
If you say yes here you get hardware monitoring support for TI
TPS40422.
@@ -166,7 +155,6 @@ config SENSORS_TPS53679
config SENSORS_UCD9000
tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910"
- default n
help
If you say yes here you get hardware monitoring support for TI
UCD90120, UCD90124, UCD90160, UCD9090, UCD90910, Sequencer and System
@@ -177,7 +165,6 @@ config SENSORS_UCD9000
config SENSORS_UCD9200
tristate "TI UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, UCD9248"
- default n
help
If you say yes here you get hardware monitoring support for TI
UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, and UCD9248
@@ -188,7 +175,6 @@ config SENSORS_UCD9200
config SENSORS_ZL6100
tristate "Intersil ZL6100 and compatibles"
- default n
help
If you say yes here you get hardware monitoring support for Intersil
ZL2004, ZL2005, ZL2006, ZL2008, ZL2105, ZL2106, ZL6100, ZL6105,
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 58b789c28b48..07afb92bb36b 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011 Ericsson AB.
* Copyright (c) 2013, 2014, 2015 Guenter Roeck
* Copyright (c) 2015 Linear Technology
+ * Copyright (c) 2018 Analog Devices Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,7 +29,7 @@
#include "pmbus.h"
enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
- ltc3883, ltc3886, ltc3887, ltm2987, ltm4675, ltm4676 };
+ ltc3883, ltc3886, ltc3887, ltm2987, ltm4675, ltm4676, ltm4686 };
/* Common for all chips */
#define LTC2978_MFR_VOUT_PEAK 0xdd
@@ -81,6 +82,7 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
#define LTM4676_ID_REV1 0x4400
#define LTM4676_ID_REV2 0x4480
#define LTM4676A_ID 0x47e0
+#define LTM4686_ID 0x4770
#define LTC2974_NUM_PAGES 4
#define LTC2978_NUM_PAGES 8
@@ -512,6 +514,7 @@ static const struct i2c_device_id ltc2978_id[] = {
{"ltm2987", ltm2987},
{"ltm4675", ltm4675},
{"ltm4676", ltm4676},
+ {"ltm4686", ltm4686},
{}
};
MODULE_DEVICE_TABLE(i2c, ltc2978_id);
@@ -588,6 +591,8 @@ static int ltc2978_get_id(struct i2c_client *client)
else if (chip_id == LTM4676_ID_REV1 || chip_id == LTM4676_ID_REV2 ||
chip_id == LTM4676A_ID)
return ltm4676;
+ else if (chip_id == LTM4686_ID)
+ return ltm4686;
dev_err(&client->dev, "Unsupported chip ID 0x%x\n", chip_id);
return -ENODEV;
@@ -684,6 +689,7 @@ static int ltc2978_probe(struct i2c_client *client,
case ltc3887:
case ltm4675:
case ltm4676:
+ case ltm4686:
data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
info->read_word_data = ltc3880_read_word_data;
info->pages = LTC3880_NUM_PAGES;
@@ -770,6 +776,7 @@ static const struct of_device_id ltc2978_of_match[] = {
{ .compatible = "lltc,ltm2987" },
{ .compatible = "lltc,ltm4675" },
{ .compatible = "lltc,ltm4676" },
+ { .compatible = "lltc,ltm4686" },
{ }
};
MODULE_DEVICE_TABLE(of, ltc2978_of_match);
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 7718e58dbda5..7688dab32f6e 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -118,6 +118,8 @@ static int pmbus_identify(struct i2c_client *client,
} else {
info->pages = 1;
}
+
+ pmbus_clear_faults(client);
}
if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 82c3754e21e3..2e2b5851139c 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -2015,7 +2015,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
client->flags |= I2C_CLIENT_PEC;
- pmbus_clear_faults(client);
+ if (data->info->pages)
+ pmbus_clear_faults(client);
+ else
+ pmbus_clear_fault_page(client, -1);
if (info->identify) {
ret = (*info->identify)(client, info);
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 7838af58f92d..7da6a160d45a 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -221,8 +221,12 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->pwm = devm_of_pwm_get(&pdev->dev, pdev->dev.of_node, NULL);
if (IS_ERR(ctx->pwm)) {
- dev_err(&pdev->dev, "Could not get PWM\n");
- return PTR_ERR(ctx->pwm);
+ ret = PTR_ERR(ctx->pwm);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get PWM: %d\n", ret);
+
+ return ret;
}
platform_set_drvdata(pdev, ctx);
@@ -290,9 +294,19 @@ static int pwm_fan_remove(struct platform_device *pdev)
static int pwm_fan_suspend(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+ struct pwm_args args;
+ int ret;
+
+ pwm_get_args(ctx->pwm, &args);
+
+ if (ctx->pwm_value) {
+ ret = pwm_config(ctx->pwm, 0, args.period);
+ if (ret < 0)
+ return ret;
- if (ctx->pwm_value)
pwm_disable(ctx->pwm);
+ }
+
return 0;
}
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 91976b6ca300..2e005edee0c9 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -56,7 +56,7 @@ scmi_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
const struct scmi_sensors *scmi_sensors = drvdata;
sensor = *(scmi_sensors->info[type] + channel);
- if (sensor && sensor->name)
+ if (sensor)
return S_IRUGO;
return 0;
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 7e49da50bc69..111d521e2189 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -286,10 +286,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
* any thermal zones or if the thermal subsystem is
* not configured.
*/
- if (IS_ERR(z)) {
+ if (IS_ERR(z))
devm_kfree(dev, zone);
- continue;
- }
}
return 0;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 2be77752cd56..c878242f3486 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* sht15.c - support for the SHT15 Temperature and Humidity Sensor
*
@@ -9,10 +10,6 @@
*
* Copyright (c) 2007 Wouter Horre
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* For further information, see the Documentation/hwmon/sht15 file.
*/
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index dfc40c740d07..6778283e36f9 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -212,7 +212,8 @@ static const struct regmap_config tmp102_regmap_config = {
.volatile_reg = tmp102_is_volatile_reg,
.val_format_endian = REGMAP_ENDIAN_BIG,
.cache_type = REGCACHE_RBTREE,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
static int tmp102_probe(struct i2c_client *client,
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index 91bb94639286..429bfeae4ca8 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -345,7 +345,8 @@ static const struct regmap_config tmp108_regmap_config = {
.volatile_reg = tmp108_is_volatile_reg,
.val_format_endian = REGMAP_ENDIAN_BIG,
.cache_type = REGCACHE_RBTREE,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
static int tmp108_probe(struct i2c_client *client,
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index e36399213324..8844c9565d2a 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -226,8 +226,10 @@ static int tmp421_detect(struct i2c_client *client,
{
enum chips kind;
struct i2c_adapter *adapter = client->adapter;
- const char * const names[] = { "TMP421", "TMP422", "TMP423",
- "TMP441", "TMP442" };
+ static const char * const names[] = {
+ "TMP421", "TMP422", "TMP423",
+ "TMP441", "TMP442"
+ };
int addr = client->addr;
u8 reg;
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index da962aa2cef5..fc6b7f8b62fb 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev)
th->thdev[i] = NULL;
}
- th->num_thdevs = lowest;
+ if (lowest >= 0)
+ th->num_thdevs = lowest;
}
if (thdrv->attr_group)
@@ -487,7 +488,7 @@ static const struct intel_th_subdevice {
.flags = IORESOURCE_MEM,
},
{
- .start = TH_MMIO_SW,
+ .start = 1, /* use resource[1] */
.end = 0,
.flags = IORESOURCE_MEM,
},
@@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th,
struct intel_th_device *thdev;
struct resource res[3];
unsigned int req = 0;
+ bool is64bit = false;
int r, err;
thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
@@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th,
thdev->drvdata = th->drvdata;
+ for (r = 0; r < th->num_resources; r++)
+ if (th->resource[r].flags & IORESOURCE_MEM_64) {
+ is64bit = true;
+ break;
+ }
+
memcpy(res, subdev->res,
sizeof(struct resource) * subdev->nres);
for (r = 0; r < subdev->nres; r++) {
struct resource *devres = th->resource;
- int bar = TH_MMIO_CONFIG;
+ int bar = 0; /* cut subdevices' MMIO from resource[0] */
/*
* Take .end == 0 to mean 'take the whole bar',
@@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
*/
if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
bar = res[r].start;
+ if (is64bit)
+ bar *= 2;
res[r].start = 0;
res[r].end = resource_size(&devres[bar]) - 1;
}
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index c2e55e5d97f6..1cf6290d6435 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Ice Lake PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 94d94b4a9a0d..18cc324f3ca9 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -34,11 +34,11 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
{
- u32 ic_clk = i2c_dw_clk_rate(dev);
const char *mode_str, *fp_str = "";
u32 comp_param1;
u32 sda_falling_time, scl_falling_time;
struct i2c_timings *t = &dev->timings;
+ u32 ic_clk;
int ret;
ret = i2c_dw_acquire_lock(dev);
@@ -53,6 +53,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
/* Calculate SCL timing parameters for standard mode if not set */
if (!dev->ss_hcnt || !dev->ss_lcnt) {
+ ic_clk = i2c_dw_clk_rate(dev);
dev->ss_hcnt =
i2c_dw_scl_hcnt(ic_clk,
4000, /* tHD;STA = tHIGH = 4.0 us */
@@ -89,6 +90,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
* needed also in high speed mode.
*/
if (!dev->fs_hcnt || !dev->fs_lcnt) {
+ ic_clk = i2c_dw_clk_rate(dev);
dev->fs_hcnt =
i2c_dw_scl_hcnt(ic_clk,
600, /* tHD;STA = tHIGH = 0.6 us */
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 0cf1379f4e80..5c754bf659e2 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -164,7 +164,7 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
* run ~75 kHz instead which should do no harm.
*/
dev_notice(&sch_adapter.dev,
- "Clock divider unitialized. Setting defaults\n");
+ "Clock divider uninitialized. Setting defaults\n");
outw(backbone_speed / (4 * 100), SMBHSTCLK);
}
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 36732eb688a4..9f2eb02481d3 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -367,20 +367,26 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_addr_t rx_dma;
enum geni_se_xfer_mode mode;
unsigned long time_left = XFER_TIMEOUT;
+ void *dma_buf;
gi2c->cur = msg;
- mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+ mode = GENI_SE_FIFO;
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+ if (dma_buf)
+ mode = GENI_SE_DMA;
+
geni_se_select_mode(&gi2c->se, mode);
writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN);
geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param);
if (mode == GENI_SE_DMA) {
int ret;
- ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len,
+ ret = geni_se_rx_dma_prep(&gi2c->se, dma_buf, msg->len,
&rx_dma);
if (ret) {
mode = GENI_SE_FIFO;
geni_se_select_mode(&gi2c->se, mode);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
}
}
@@ -393,6 +399,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
if (gi2c->err)
geni_i2c_rx_fsm_rst(gi2c);
geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
}
return gi2c->err;
}
@@ -403,20 +410,26 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_addr_t tx_dma;
enum geni_se_xfer_mode mode;
unsigned long time_left;
+ void *dma_buf;
gi2c->cur = msg;
- mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+ mode = GENI_SE_FIFO;
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+ if (dma_buf)
+ mode = GENI_SE_DMA;
+
geni_se_select_mode(&gi2c->se, mode);
writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN);
geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param);
if (mode == GENI_SE_DMA) {
int ret;
- ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len,
+ ret = geni_se_tx_dma_prep(&gi2c->se, dma_buf, msg->len,
&tx_dma);
if (ret) {
mode = GENI_SE_FIFO;
geni_se_select_mode(&gi2c->se, mode);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
}
}
@@ -432,6 +445,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
if (gi2c->err)
geni_i2c_tx_fsm_rst(gi2c);
geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
}
return gi2c->err;
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 52cf42b32f0a..4aa7dde876f3 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -806,8 +806,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
time_left = wait_event_timeout(priv->wait, priv->flags & ID_DONE,
num * adap->timeout);
- if (!time_left) {
+
+ /* cleanup DMA if it couldn't complete properly due to an error */
+ if (priv->dma_direction != DMA_NONE)
rcar_i2c_cleanup_dma(priv);
+
+ if (!time_left) {
rcar_i2c_init(priv);
ret = -ETIMEDOUT;
} else if (priv->flags & ID_NACK) {
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index a01389b85f13..7e9a2bbf5ddc 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
mt_params[3].type = ACPI_TYPE_INTEGER;
mt_params[3].integer.value = len;
mt_params[4].type = ACPI_TYPE_BUFFER;
+ mt_params[4].buffer.length = len;
mt_params[4].buffer.pointer = data->block + 1;
}
break;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 9ee9a15e7134..9200e349f29e 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2270,7 +2270,7 @@ EXPORT_SYMBOL(i2c_put_adapter);
*
* Return: NULL if a DMA safe buffer was not obtained. Use msg->buf with PIO.
* Or a valid pointer to be used with DMA. After use, release it by
- * calling i2c_release_dma_safe_msg_buf().
+ * calling i2c_put_dma_safe_msg_buf().
*
* This function must only be called from process context!
*/
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 401308e3d036..13882a2a4f60 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -22,18 +22,16 @@ struct gpiomux {
struct i2c_mux_gpio_platform_data data;
unsigned gpio_base;
struct gpio_desc **gpios;
- int *values;
};
static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
{
- int i;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(val));
- for (i = 0; i < mux->data.n_gpios; i++)
- mux->values[i] = (val >> i) & 1;
+ values[0] = val;
- gpiod_set_array_value_cansleep(mux->data.n_gpios,
- mux->gpios, mux->values);
+ gpiod_set_array_value_cansleep(mux->data.n_gpios, mux->gpios, NULL,
+ values);
}
static int i2c_mux_gpio_select(struct i2c_mux_core *muxc, u32 chan)
@@ -182,15 +180,13 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values,
- mux->data.n_gpios * sizeof(*mux->gpios) +
- mux->data.n_gpios * sizeof(*mux->values), 0,
+ mux->data.n_gpios * sizeof(*mux->gpios), 0,
i2c_mux_gpio_select, NULL);
if (!muxc) {
ret = -ENOMEM;
goto alloc_failed;
}
mux->gpios = muxc->priv;
- mux->values = (int *)(mux->gpios + mux->data.n_gpios);
muxc->priv = mux;
platform_set_drvdata(pdev, muxc);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 44a7a255ef74..f9b59d41813f 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1784,7 +1784,7 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- device_add_disk(&drive->gendev, g);
+ device_add_disk(&drive->gendev, g, NULL);
return 0;
out_free_disk:
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index e823394ed543..04e008e8f6f9 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -416,7 +416,7 @@ static int ide_gd_probe(ide_drive_t *drive)
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
- device_add_disk(&drive->gendev, g);
+ device_add_disk(&drive->gendev, g, NULL);
return 0;
out_free_disk:
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index b2ccce5fb071..8b5d85c91e9d 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1066,46 +1066,43 @@ static const struct idle_cpu idle_cpu_dnv = {
.disable_promotion_to_c1e = true,
};
-#define ICPU(model, cpu) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu }
-
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
- ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem),
- ICPU(INTEL_FAM6_NEHALEM, idle_cpu_nehalem),
- ICPU(INTEL_FAM6_NEHALEM_G, idle_cpu_nehalem),
- ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem),
- ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem),
- ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem),
- ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom),
- ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft),
- ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem),
- ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb),
- ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
- ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, idle_cpu_tangier),
- ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
- ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
- ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
- ICPU(INTEL_FAM6_HASWELL_CORE, idle_cpu_hsw),
- ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw),
- ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw),
- ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw),
- ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn),
- ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw),
- ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw),
- ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw),
- ICPU(INTEL_FAM6_BROADWELL_XEON_D, idle_cpu_bdw),
- ICPU(INTEL_FAM6_SKYLAKE_MOBILE, idle_cpu_skl),
- ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, idle_cpu_skl),
- ICPU(INTEL_FAM6_KABYLAKE_MOBILE, idle_cpu_skl),
- ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, idle_cpu_skl),
- ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx),
- ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
- ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl),
- ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, idle_cpu_bxt),
- ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
+ INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(ATOM_BONNELL, idle_cpu_atom),
+ INTEL_CPU_FAM6(ATOM_BONNELL_MID, idle_cpu_lincroft),
+ INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nehalem),
+ INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb),
+ INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snb),
+ INTEL_CPU_FAM6(ATOM_SALTWELL, idle_cpu_atom),
+ INTEL_CPU_FAM6(ATOM_SILVERMONT, idle_cpu_byt),
+ INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, idle_cpu_tangier),
+ INTEL_CPU_FAM6(ATOM_AIRMONT, idle_cpu_cht),
+ INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb),
+ INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt),
+ INTEL_CPU_FAM6(HASWELL_CORE, idle_cpu_hsw),
+ INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsw),
+ INTEL_CPU_FAM6(HASWELL_ULT, idle_cpu_hsw),
+ INTEL_CPU_FAM6(HASWELL_GT3E, idle_cpu_hsw),
+ INTEL_CPU_FAM6(ATOM_SILVERMONT_X, idle_cpu_avn),
+ INTEL_CPU_FAM6(BROADWELL_CORE, idle_cpu_bdw),
+ INTEL_CPU_FAM6(BROADWELL_GT3E, idle_cpu_bdw),
+ INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdw),
+ INTEL_CPU_FAM6(BROADWELL_XEON_D, idle_cpu_bdw),
+ INTEL_CPU_FAM6(SKYLAKE_MOBILE, idle_cpu_skl),
+ INTEL_CPU_FAM6(SKYLAKE_DESKTOP, idle_cpu_skl),
+ INTEL_CPU_FAM6(KABYLAKE_MOBILE, idle_cpu_skl),
+ INTEL_CPU_FAM6(KABYLAKE_DESKTOP, idle_cpu_skl),
+ INTEL_CPU_FAM6(SKYLAKE_X, idle_cpu_skx),
+ INTEL_CPU_FAM6(XEON_PHI_KNL, idle_cpu_knl),
+ INTEL_CPU_FAM6(XEON_PHI_KNM, idle_cpu_knl),
+ INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt),
+ INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt),
+ INTEL_CPU_FAM6(ATOM_GOLDMONT_X, idle_cpu_dnv),
{}
};
@@ -1322,7 +1319,7 @@ static void intel_idle_state_table_update(void)
ivt_idle_state_table_update();
break;
case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
bxt_idle_state_table_update();
break;
case INTEL_FAM6_SKYLAKE_DESKTOP:
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 80df5a377d30..cafb1dcadc48 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -693,16 +693,12 @@ static int __maybe_unused tiadc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
- struct ti_tscadc_dev *tscadc_dev;
unsigned int idle;
- tscadc_dev = ti_tscadc_dev_get(to_platform_device(dev));
- if (!device_may_wakeup(tscadc_dev->dev)) {
- idle = tiadc_readl(adc_dev, REG_CTRL);
- idle &= ~(CNTRLREG_TSCSSENB);
- tiadc_writel(adc_dev, REG_CTRL, (idle |
- CNTRLREG_POWERDOWN));
- }
+ idle = tiadc_readl(adc_dev, REG_CTRL);
+ idle &= ~(CNTRLREG_TSCSSENB);
+ tiadc_writel(adc_dev, REG_CTRL, (idle |
+ CNTRLREG_POWERDOWN));
return 0;
}
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 1f112ae15f3c..b09b8b60bd83 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -206,7 +206,8 @@ static const struct regmap_config apds9960_regmap_config = {
.name = APDS9960_REGMAP_NAME,
.reg_bits = 8,
.val_bits = 8,
- .use_single_rw = 1,
+ .use_single_read = true,
+ .use_single_write = true,
.volatile_table = &apds9960_volatile_table,
.precious_table = &apds9960_precious_table,
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index bcdb0eb9e537..4067dff2ff6a 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -473,17 +473,18 @@ static bool max44000_precious_reg(struct device *dev, unsigned int reg)
}
static const struct regmap_config max44000_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = MAX44000_REG_PRX_DATA,
- .readable_reg = max44000_readable_reg,
- .writeable_reg = max44000_writeable_reg,
- .volatile_reg = max44000_volatile_reg,
- .precious_reg = max44000_precious_reg,
-
- .use_single_rw = 1,
- .cache_type = REGCACHE_RBTREE,
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = MAX44000_REG_PRX_DATA,
+ .readable_reg = max44000_readable_reg,
+ .writeable_reg = max44000_writeable_reg,
+ .volatile_reg = max44000_volatile_reg,
+ .precious_reg = max44000_precious_reg,
+
+ .use_single_read = true,
+ .use_single_write = true,
+ .cache_type = REGCACHE_RBTREE,
};
static irqreturn_t max44000_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index 9851311aa3fd..be03be719efe 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -140,7 +140,8 @@ static const struct regmap_config mlx90632_regmap = {
.rd_table = &mlx90632_readable_regs_tbl,
.wr_table = &mlx90632_writeable_regs_tbl,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.reg_format_endian = REGMAP_ENDIAN_BIG,
.val_format_endian = REGMAP_ENDIAN_BIG,
.cache_type = REGCACHE_RBTREE,
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index abb6660c099c..0a3ec7c726ec 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -26,6 +26,7 @@ config INFINIBAND_USER_MAD
config INFINIBAND_USER_ACCESS
tristate "InfiniBand userspace access (verbs and CM)"
select ANON_INODES
+ depends on MMU
---help---
Userspace InfiniBand access support. This enables the
kernel side of userspace verbs and the userspace
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 46b855a42884..0dce94e3c495 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -45,6 +45,7 @@
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <rdma/ib_addr.h>
+#include <rdma/ib_sa.h>
#include <rdma/ib.h>
#include <rdma/rdma_netlink.h>
#include <net/netlink.h>
@@ -61,6 +62,7 @@ struct addr_req {
struct rdma_dev_addr *addr, void *context);
unsigned long timeout;
struct delayed_work work;
+ bool resolve_by_gid_attr; /* Consider gid attr in resolve phase */
int status;
u32 seq;
};
@@ -219,60 +221,75 @@ int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
}
EXPORT_SYMBOL(rdma_addr_size_kss);
-void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
- const struct net_device *dev,
- const unsigned char *dst_dev_addr)
+/**
+ * rdma_copy_src_l2_addr - Copy netdevice source addresses
+ * @dev_addr: Destination address pointer where to copy the addresses
+ * @dev: Netdevice whose source addresses to copy
+ *
+ * rdma_copy_src_l2_addr() copies source addresses from the specified netdevice.
+ * This includes unicast address, broadcast address, device type and
+ * interface index.
+ */
+void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+ const struct net_device *dev)
{
dev_addr->dev_type = dev->type;
memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
- if (dst_dev_addr)
- memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
dev_addr->bound_dev_if = dev->ifindex;
}
-EXPORT_SYMBOL(rdma_copy_addr);
+EXPORT_SYMBOL(rdma_copy_src_l2_addr);
-int rdma_translate_ip(const struct sockaddr *addr,
- struct rdma_dev_addr *dev_addr)
+static struct net_device *
+rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in)
{
- struct net_device *dev;
+ struct net_device *dev = NULL;
+ int ret = -EADDRNOTAVAIL;
- if (dev_addr->bound_dev_if) {
- dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
- if (!dev)
- return -ENODEV;
- rdma_copy_addr(dev_addr, dev, NULL);
- dev_put(dev);
- return 0;
- }
-
- switch (addr->sa_family) {
+ switch (src_in->sa_family) {
case AF_INET:
- dev = ip_dev_find(dev_addr->net,
- ((const struct sockaddr_in *)addr)->sin_addr.s_addr);
-
- if (!dev)
- return -EADDRNOTAVAIL;
-
- rdma_copy_addr(dev_addr, dev, NULL);
- dev_put(dev);
+ dev = __ip_dev_find(net,
+ ((const struct sockaddr_in *)src_in)->sin_addr.s_addr,
+ false);
+ if (dev)
+ ret = 0;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- rcu_read_lock();
- for_each_netdev_rcu(dev_addr->net, dev) {
- if (ipv6_chk_addr(dev_addr->net,
- &((const struct sockaddr_in6 *)addr)->sin6_addr,
+ for_each_netdev_rcu(net, dev) {
+ if (ipv6_chk_addr(net,
+ &((const struct sockaddr_in6 *)src_in)->sin6_addr,
dev, 1)) {
- rdma_copy_addr(dev_addr, dev, NULL);
+ ret = 0;
break;
}
}
- rcu_read_unlock();
break;
#endif
}
- return 0;
+ return ret ? ERR_PTR(ret) : dev;
+}
+
+int rdma_translate_ip(const struct sockaddr *addr,
+ struct rdma_dev_addr *dev_addr)
+{
+ struct net_device *dev;
+
+ if (dev_addr->bound_dev_if) {
+ dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+ if (!dev)
+ return -ENODEV;
+ rdma_copy_src_l2_addr(dev_addr, dev);
+ dev_put(dev);
+ return 0;
+ }
+
+ rcu_read_lock();
+ dev = rdma_find_ndev_for_src_ip_rcu(dev_addr->net, addr);
+ if (!IS_ERR(dev))
+ rdma_copy_src_l2_addr(dev_addr, dev);
+ rcu_read_unlock();
+ return PTR_ERR_OR_ZERO(dev);
}
EXPORT_SYMBOL(rdma_translate_ip);
@@ -295,15 +312,12 @@ static void queue_req(struct addr_req *req)
spin_unlock_bh(&lock);
}
-static int ib_nl_fetch_ha(const struct dst_entry *dst,
- struct rdma_dev_addr *dev_addr,
+static int ib_nl_fetch_ha(struct rdma_dev_addr *dev_addr,
const void *daddr, u32 seq, u16 family)
{
- if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS))
+ if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS))
return -EADDRNOTAVAIL;
- /* We fill in what we can, the response will fill the rest */
- rdma_copy_addr(dev_addr, dst->dev, NULL);
return ib_nl_ip_send_msg(dev_addr, daddr, seq, family);
}
@@ -322,7 +336,7 @@ static int dst_fetch_ha(const struct dst_entry *dst,
neigh_event_send(n, NULL);
ret = -ENODATA;
} else {
- rdma_copy_addr(dev_addr, dst->dev, n->ha);
+ memcpy(dev_addr->dst_dev_addr, n->ha, MAX_ADDR_LEN);
}
neigh_release(n);
@@ -356,18 +370,22 @@ static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
(const void *)&dst_in6->sin6_addr;
sa_family_t family = dst_in->sa_family;
- /* Gateway + ARPHRD_INFINIBAND -> IB router */
- if (has_gateway(dst, family) && dst->dev->type == ARPHRD_INFINIBAND)
- return ib_nl_fetch_ha(dst, dev_addr, daddr, seq, family);
+ /* If we have a gateway in IB mode then it must be an IB network */
+ if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB)
+ return ib_nl_fetch_ha(dev_addr, daddr, seq, family);
else
return dst_fetch_ha(dst, dev_addr, daddr);
}
-static int addr4_resolve(struct sockaddr_in *src_in,
- const struct sockaddr_in *dst_in,
+static int addr4_resolve(struct sockaddr *src_sock,
+ const struct sockaddr *dst_sock,
struct rdma_dev_addr *addr,
struct rtable **prt)
{
+ struct sockaddr_in *src_in = (struct sockaddr_in *)src_sock;
+ const struct sockaddr_in *dst_in =
+ (const struct sockaddr_in *)dst_sock;
+
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
struct rtable *rt;
@@ -383,16 +401,8 @@ static int addr4_resolve(struct sockaddr_in *src_in,
if (ret)
return ret;
- src_in->sin_family = AF_INET;
src_in->sin_addr.s_addr = fl4.saddr;
- /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
- * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
- * type accordingly.
- */
- if (rt->rt_uses_gateway && rt->dst.dev->type != ARPHRD_INFINIBAND)
- addr->network = RDMA_NETWORK_IPV4;
-
addr->hoplimit = ip4_dst_hoplimit(&rt->dst);
*prt = rt;
@@ -400,14 +410,16 @@ static int addr4_resolve(struct sockaddr_in *src_in,
}
#if IS_ENABLED(CONFIG_IPV6)
-static int addr6_resolve(struct sockaddr_in6 *src_in,
- const struct sockaddr_in6 *dst_in,
+static int addr6_resolve(struct sockaddr *src_sock,
+ const struct sockaddr *dst_sock,
struct rdma_dev_addr *addr,
struct dst_entry **pdst)
{
+ struct sockaddr_in6 *src_in = (struct sockaddr_in6 *)src_sock;
+ const struct sockaddr_in6 *dst_in =
+ (const struct sockaddr_in6 *)dst_sock;
struct flowi6 fl6;
struct dst_entry *dst;
- struct rt6_info *rt;
int ret;
memset(&fl6, 0, sizeof fl6);
@@ -419,19 +431,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
if (ret < 0)
return ret;
- rt = (struct rt6_info *)dst;
- if (ipv6_addr_any(&src_in->sin6_addr)) {
- src_in->sin6_family = AF_INET6;
+ if (ipv6_addr_any(&src_in->sin6_addr))
src_in->sin6_addr = fl6.saddr;
- }
-
- /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
- * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
- * type accordingly.
- */
- if (rt->rt6i_flags & RTF_GATEWAY &&
- ip6_dst_idev(dst)->dev->type != ARPHRD_INFINIBAND)
- addr->network = RDMA_NETWORK_IPV6;
addr->hoplimit = ip6_dst_hoplimit(dst);
@@ -439,8 +440,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
return 0;
}
#else
-static int addr6_resolve(struct sockaddr_in6 *src_in,
- const struct sockaddr_in6 *dst_in,
+static int addr6_resolve(struct sockaddr *src_sock,
+ const struct sockaddr *dst_sock,
struct rdma_dev_addr *addr,
struct dst_entry **pdst)
{
@@ -451,36 +452,110 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
static int addr_resolve_neigh(const struct dst_entry *dst,
const struct sockaddr *dst_in,
struct rdma_dev_addr *addr,
+ unsigned int ndev_flags,
u32 seq)
{
- if (dst->dev->flags & IFF_LOOPBACK) {
- int ret;
+ int ret = 0;
- ret = rdma_translate_ip(dst_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, addr->src_dev_addr,
- MAX_ADDR_LEN);
+ if (ndev_flags & IFF_LOOPBACK) {
+ memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
+ } else {
+ if (!(ndev_flags & IFF_NOARP)) {
+ /* If the device doesn't do ARP internally */
+ ret = fetch_ha(dst, addr, dst_in, seq);
+ }
+ }
+ return ret;
+}
- return ret;
+static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+ const struct sockaddr *dst_in,
+ const struct dst_entry *dst,
+ const struct net_device *ndev)
+{
+ int ret = 0;
+
+ if (dst->dev->flags & IFF_LOOPBACK)
+ ret = rdma_translate_ip(dst_in, dev_addr);
+ else
+ rdma_copy_src_l2_addr(dev_addr, dst->dev);
+
+ /*
+ * If there's a gateway and type of device not ARPHRD_INFINIBAND,
+ * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
+ * network type accordingly.
+ */
+ if (has_gateway(dst, dst_in->sa_family) &&
+ ndev->type != ARPHRD_INFINIBAND)
+ dev_addr->network = dst_in->sa_family == AF_INET ?
+ RDMA_NETWORK_IPV4 :
+ RDMA_NETWORK_IPV6;
+ else
+ dev_addr->network = RDMA_NETWORK_IB;
+
+ return ret;
+}
+
+static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
+ unsigned int *ndev_flags,
+ const struct sockaddr *dst_in,
+ const struct dst_entry *dst)
+{
+ struct net_device *ndev = READ_ONCE(dst->dev);
+
+ *ndev_flags = ndev->flags;
+ /* A physical device must be the RDMA device to use */
+ if (ndev->flags & IFF_LOOPBACK) {
+ /*
+ * RDMA (IB/RoCE, iWarp) doesn't run on lo interface or
+ * loopback IP address. So if route is resolved to loopback
+ * interface, translate that to a real ndev based on non
+ * loopback IP address.
+ */
+ ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in);
+ if (IS_ERR(ndev))
+ return -ENODEV;
}
- /* If the device doesn't do ARP internally */
- if (!(dst->dev->flags & IFF_NOARP))
- return fetch_ha(dst, addr, dst_in, seq);
+ return copy_src_l2_addr(dev_addr, dst_in, dst, ndev);
+}
+
+static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr)
+{
+ struct net_device *ndev;
- rdma_copy_addr(addr, dst->dev, NULL);
+ ndev = rdma_read_gid_attr_ndev_rcu(addr->sgid_attr);
+ if (IS_ERR(ndev))
+ return PTR_ERR(ndev);
+ /*
+ * Since we are holding the rcu, reading net and ifindex
+ * are safe without any additional reference; because
+ * change_net_namespace() in net/core/dev.c does rcu sync
+ * after it changes the state to IFF_DOWN and before
+ * updating netdev fields {net, ifindex}.
+ */
+ addr->net = dev_net(ndev);
+ addr->bound_dev_if = ndev->ifindex;
return 0;
}
+static void rdma_addr_set_net_defaults(struct rdma_dev_addr *addr)
+{
+ addr->net = &init_net;
+ addr->bound_dev_if = 0;
+}
+
static int addr_resolve(struct sockaddr *src_in,
const struct sockaddr *dst_in,
struct rdma_dev_addr *addr,
bool resolve_neigh,
+ bool resolve_by_gid_attr,
u32 seq)
{
- struct net_device *ndev;
- struct dst_entry *dst;
+ struct dst_entry *dst = NULL;
+ unsigned int ndev_flags = 0;
+ struct rtable *rt = NULL;
int ret;
if (!addr->net) {
@@ -488,58 +563,55 @@ static int addr_resolve(struct sockaddr *src_in,
return -EINVAL;
}
- if (src_in->sa_family == AF_INET) {
- struct rtable *rt = NULL;
- const struct sockaddr_in *dst_in4 =
- (const struct sockaddr_in *)dst_in;
-
- ret = addr4_resolve((struct sockaddr_in *)src_in,
- dst_in4, addr, &rt);
- if (ret)
- return ret;
-
- if (resolve_neigh)
- ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
-
- if (addr->bound_dev_if) {
- ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
- } else {
- ndev = rt->dst.dev;
- dev_hold(ndev);
+ rcu_read_lock();
+ if (resolve_by_gid_attr) {
+ if (!addr->sgid_attr) {
+ rcu_read_unlock();
+ pr_warn_ratelimited("%s: missing gid_attr\n", __func__);
+ return -EINVAL;
}
-
- ip_rt_put(rt);
- } else {
- const struct sockaddr_in6 *dst_in6 =
- (const struct sockaddr_in6 *)dst_in;
-
- ret = addr6_resolve((struct sockaddr_in6 *)src_in,
- dst_in6, addr,
- &dst);
- if (ret)
+ /*
+ * If the request is for a specific gid attribute of the
+ * rdma_dev_addr, derive net from the netdevice of the
+ * GID attribute.
+ */
+ ret = set_addr_netns_by_gid_rcu(addr);
+ if (ret) {
+ rcu_read_unlock();
return ret;
-
- if (resolve_neigh)
- ret = addr_resolve_neigh(dst, dst_in, addr, seq);
-
- if (addr->bound_dev_if) {
- ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
- } else {
- ndev = dst->dev;
- dev_hold(ndev);
}
-
- dst_release(dst);
}
-
- if (ndev) {
- if (ndev->flags & IFF_LOOPBACK)
- ret = rdma_translate_ip(dst_in, addr);
- else
- addr->bound_dev_if = ndev->ifindex;
- dev_put(ndev);
+ if (src_in->sa_family == AF_INET) {
+ ret = addr4_resolve(src_in, dst_in, addr, &rt);
+ dst = &rt->dst;
+ } else {
+ ret = addr6_resolve(src_in, dst_in, addr, &dst);
}
+ if (ret) {
+ rcu_read_unlock();
+ goto done;
+ }
+ ret = rdma_set_src_addr_rcu(addr, &ndev_flags, dst_in, dst);
+ rcu_read_unlock();
+
+ /*
+ * Resolve neighbor destination address if requested and
+ * only if src addr translation didn't fail.
+ */
+ if (!ret && resolve_neigh)
+ ret = addr_resolve_neigh(dst, dst_in, addr, ndev_flags, seq);
+ if (src_in->sa_family == AF_INET)
+ ip_rt_put(rt);
+ else
+ dst_release(dst);
+done:
+ /*
+ * Clear the addr net to go back to its original state, only if it was
+ * derived from GID attribute in this context.
+ */
+ if (resolve_by_gid_attr)
+ rdma_addr_set_net_defaults(addr);
return ret;
}
@@ -554,7 +626,8 @@ static void process_one_req(struct work_struct *_work)
src_in = (struct sockaddr *)&req->src_addr;
dst_in = (struct sockaddr *)&req->dst_addr;
req->status = addr_resolve(src_in, dst_in, req->addr,
- true, req->seq);
+ true, req->resolve_by_gid_attr,
+ req->seq);
if (req->status && time_after_eq(jiffies, req->timeout)) {
req->status = -ETIMEDOUT;
} else if (req->status == -ENODATA) {
@@ -586,10 +659,10 @@ static void process_one_req(struct work_struct *_work)
}
int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr, int timeout_ms,
+ struct rdma_dev_addr *addr, unsigned long timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
- void *context)
+ bool resolve_by_gid_attr, void *context)
{
struct sockaddr *src_in, *dst_in;
struct addr_req *req;
@@ -617,10 +690,12 @@ int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
req->addr = addr;
req->callback = callback;
req->context = context;
+ req->resolve_by_gid_attr = resolve_by_gid_attr;
INIT_DELAYED_WORK(&req->work, process_one_req);
req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
- req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
+ req->status = addr_resolve(src_in, dst_in, addr, true,
+ req->resolve_by_gid_attr, req->seq);
switch (req->status) {
case 0:
req->timeout = jiffies;
@@ -641,25 +716,53 @@ err:
}
EXPORT_SYMBOL(rdma_resolve_ip);
-int rdma_resolve_ip_route(struct sockaddr *src_addr,
- const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr)
+int roce_resolve_route_from_path(struct sa_path_rec *rec,
+ const struct ib_gid_attr *attr)
{
- struct sockaddr_storage ssrc_addr = {};
- struct sockaddr *src_in = (struct sockaddr *)&ssrc_addr;
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid, dgid;
+ struct rdma_dev_addr dev_addr = {};
+ int ret;
- if (src_addr) {
- if (src_addr->sa_family != dst_addr->sa_family)
- return -EINVAL;
+ if (rec->roce.route_resolved)
+ return 0;
- memcpy(src_in, src_addr, rdma_addr_size(src_addr));
- } else {
- src_in->sa_family = dst_addr->sa_family;
- }
+ rdma_gid2ip(&sgid._sockaddr, &rec->sgid);
+ rdma_gid2ip(&dgid._sockaddr, &rec->dgid);
+
+ if (sgid._sockaddr.sa_family != dgid._sockaddr.sa_family)
+ return -EINVAL;
+
+ if (!attr || !attr->ndev)
+ return -EINVAL;
+
+ dev_addr.net = &init_net;
+ dev_addr.sgid_attr = attr;
+
+ ret = addr_resolve(&sgid._sockaddr, &dgid._sockaddr,
+ &dev_addr, false, true, 0);
+ if (ret)
+ return ret;
+
+ if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
+ dev_addr.network == RDMA_NETWORK_IPV6) &&
+ rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
+ return -EINVAL;
- return addr_resolve(src_in, dst_addr, addr, false, 0);
+ rec->roce.route_resolved = true;
+ return 0;
}
+/**
+ * rdma_addr_cancel - Cancel resolve ip request
+ * @addr: Pointer to address structure given previously
+ * during rdma_resolve_ip().
+ * rdma_addr_cancel() is synchronous function which cancels any pending
+ * request if there is any.
+ */
void rdma_addr_cancel(struct rdma_dev_addr *addr)
{
struct addr_req *req, *temp_req;
@@ -687,11 +790,6 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
* guarentees no work is running and none will be started.
*/
cancel_delayed_work_sync(&found->work);
-
- if (found->callback)
- found->callback(-ECANCELED, (struct sockaddr *)&found->src_addr,
- found->addr, found->context);
-
kfree(found);
}
EXPORT_SYMBOL(rdma_addr_cancel);
@@ -710,7 +808,7 @@ static void resolve_cb(int status, struct sockaddr *src_addr,
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *dmac, const struct net_device *ndev,
+ u8 *dmac, const struct ib_gid_attr *sgid_attr,
int *hoplimit)
{
struct rdma_dev_addr dev_addr;
@@ -726,12 +824,12 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
rdma_gid2ip(&dgid_addr._sockaddr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
- dev_addr.bound_dev_if = ndev->ifindex;
dev_addr.net = &init_net;
+ dev_addr.sgid_attr = sgid_attr;
init_completion(&ctx.comp);
ret = rdma_resolve_ip(&sgid_addr._sockaddr, &dgid_addr._sockaddr,
- &dev_addr, 1000, resolve_cb, &ctx);
+ &dev_addr, 1000, resolve_cb, true, &ctx);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 0bee1f4b914e..5b2fce4a7091 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -212,9 +212,8 @@ static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
u8 port_num = entry->attr.port_num;
struct ib_gid_table *table = rdma_gid_table(device, port_num);
- pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- device->name, port_num, entry->attr.index,
- entry->attr.gid.raw);
+ dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__,
+ port_num, entry->attr.index, entry->attr.gid.raw);
if (rdma_cap_roce_gid_table(device, port_num) &&
entry->state != GID_TABLE_ENTRY_INVALID)
@@ -289,9 +288,9 @@ static void store_gid_entry(struct ib_gid_table *table,
{
entry->state = GID_TABLE_ENTRY_VALID;
- pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- entry->attr.device->name, entry->attr.port_num,
- entry->attr.index, entry->attr.gid.raw);
+ dev_dbg(&entry->attr.device->dev, "%s port=%d index=%d gid %pI6\n",
+ __func__, entry->attr.port_num, entry->attr.index,
+ entry->attr.gid.raw);
lockdep_assert_held(&table->lock);
write_lock_irq(&table->rwlock);
@@ -320,17 +319,16 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
int ret;
if (!attr->ndev) {
- pr_err("%s NULL netdev device=%s port=%d index=%d\n",
- __func__, attr->device->name, attr->port_num,
- attr->index);
+ dev_err(&attr->device->dev, "%s NULL netdev port=%d index=%d\n",
+ __func__, attr->port_num, attr->index);
return -EINVAL;
}
if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
ret = attr->device->add_gid(attr, &entry->context);
if (ret) {
- pr_err("%s GID add failed device=%s port=%d index=%d\n",
- __func__, attr->device->name, attr->port_num,
- attr->index);
+ dev_err(&attr->device->dev,
+ "%s GID add failed port=%d index=%d\n",
+ __func__, attr->port_num, attr->index);
return ret;
}
}
@@ -338,6 +336,38 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
}
/**
+ * del_gid - Delete GID table entry
+ *
+ * @ib_dev: IB device whose GID entry to be deleted
+ * @port: Port number of the IB device
+ * @table: GID table of the IB device for a port
+ * @ix: GID entry index to delete
+ *
+ */
+static void del_gid(struct ib_device *ib_dev, u8 port,
+ struct ib_gid_table *table, int ix)
+{
+ struct ib_gid_table_entry *entry;
+
+ lockdep_assert_held(&table->lock);
+
+ dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port,
+ ix, table->data_vec[ix]->attr.gid.raw);
+
+ write_lock_irq(&table->rwlock);
+ entry = table->data_vec[ix];
+ entry->state = GID_TABLE_ENTRY_PENDING_DEL;
+ /*
+ * For non RoCE protocol, GID entry slot is ready to use.
+ */
+ if (!rdma_protocol_roce(ib_dev, port))
+ table->data_vec[ix] = NULL;
+ write_unlock_irq(&table->rwlock);
+
+ put_gid_entry_locked(entry);
+}
+
+/**
* add_modify_gid - Add or modify GID table entry
*
* @table: GID table in which GID to be added or modified
@@ -358,7 +388,7 @@ static int add_modify_gid(struct ib_gid_table *table,
* this index.
*/
if (is_gid_entry_valid(table->data_vec[attr->index]))
- put_gid_entry(table->data_vec[attr->index]);
+ del_gid(attr->device, attr->port_num, table, attr->index);
/*
* Some HCA's report multiple GID entries with only one valid GID, and
@@ -386,39 +416,6 @@ done:
return ret;
}
-/**
- * del_gid - Delete GID table entry
- *
- * @ib_dev: IB device whose GID entry to be deleted
- * @port: Port number of the IB device
- * @table: GID table of the IB device for a port
- * @ix: GID entry index to delete
- *
- */
-static void del_gid(struct ib_device *ib_dev, u8 port,
- struct ib_gid_table *table, int ix)
-{
- struct ib_gid_table_entry *entry;
-
- lockdep_assert_held(&table->lock);
-
- pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- ib_dev->name, port, ix,
- table->data_vec[ix]->attr.gid.raw);
-
- write_lock_irq(&table->rwlock);
- entry = table->data_vec[ix];
- entry->state = GID_TABLE_ENTRY_PENDING_DEL;
- /*
- * For non RoCE protocol, GID entry slot is ready to use.
- */
- if (!rdma_protocol_roce(ib_dev, port))
- table->data_vec[ix] = NULL;
- write_unlock_irq(&table->rwlock);
-
- put_gid_entry_locked(entry);
-}
-
/* rwlock should be read locked, or lock should be held */
static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
const struct ib_gid_attr *val, bool default_gid,
@@ -782,9 +779,9 @@ static void release_gid_table(struct ib_device *device, u8 port,
if (is_gid_entry_free(table->data_vec[i]))
continue;
if (kref_read(&table->data_vec[i]->kref) > 1) {
- pr_err("GID entry ref leak for %s (index %d) ref=%d\n",
- device->name, i,
- kref_read(&table->data_vec[i]->kref));
+ dev_err(&device->dev,
+ "GID entry ref leak for index %d ref=%d\n", i,
+ kref_read(&table->data_vec[i]->kref));
leak = true;
}
}
@@ -1252,6 +1249,39 @@ void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
}
EXPORT_SYMBOL(rdma_hold_gid_attr);
+/**
+ * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
+ * which must be in UP state.
+ *
+ * @attr:Pointer to the GID attribute
+ *
+ * Returns pointer to netdevice if the netdevice was attached to GID and
+ * netdevice is in UP state. Caller must hold RCU lock as this API
+ * reads the netdev flags which can change while netdevice migrates to
+ * different net namespace. Returns ERR_PTR with error code otherwise.
+ *
+ */
+struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(attr, struct ib_gid_table_entry, attr);
+ struct ib_device *device = entry->attr.device;
+ struct net_device *ndev = ERR_PTR(-ENODEV);
+ u8 port_num = entry->attr.port_num;
+ struct ib_gid_table *table;
+ unsigned long flags;
+ bool valid;
+
+ table = rdma_gid_table(device, port_num);
+
+ read_lock_irqsave(&table->rwlock, flags);
+ valid = is_gid_entry_valid(table->data_vec[attr->index]);
+ if (valid && attr->ndev && (READ_ONCE(attr->ndev->flags) & IFF_UP))
+ ndev = attr->ndev;
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return ndev;
+}
+
static int config_non_roce_gid_cache(struct ib_device *device,
u8 port, int gid_tbl_len)
{
@@ -1270,8 +1300,9 @@ static int config_non_roce_gid_cache(struct ib_device *device,
continue;
ret = device->query_gid(device, port, i, &gid_attr.gid);
if (ret) {
- pr_warn("query_gid failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ dev_warn(&device->dev,
+ "query_gid failed (%d) for index %d\n", ret,
+ i);
goto err;
}
gid_attr.index = i;
@@ -1300,8 +1331,7 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_port(device, port, tprops);
if (ret) {
- pr_warn("ib_query_port failed (%d) for %s\n",
- ret, device->name);
+ dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
goto err;
}
@@ -1323,8 +1353,9 @@ static void ib_cache_update(struct ib_device *device,
for (i = 0; i < pkey_cache->table_len; ++i) {
ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
if (ret) {
- pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ dev_warn(&device->dev,
+ "ib_query_pkey failed (%d) for index %d\n",
+ ret, i);
goto err;
}
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 6e39c27dca8e..edb2cb758be7 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3292,8 +3292,11 @@ static int cm_lap_handler(struct cm_work *work)
if (ret)
goto unlock;
- cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av,
- cm_id_priv);
+ ret = cm_init_av_by_path(param->alternate_path, NULL,
+ &cm_id_priv->alt_av, cm_id_priv);
+ if (ret)
+ goto unlock;
+
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
@@ -4367,7 +4370,7 @@ static void cm_add_one(struct ib_device *ib_device)
cm_dev->going_down = 0;
cm_dev->device = device_create(&cm_class, &ib_device->dev,
MKDEV(0, 0), NULL,
- "%s", ib_device->name);
+ "%s", dev_name(&ib_device->dev));
if (IS_ERR(cm_dev->device)) {
kfree(cm_dev);
return;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index a36c94930c31..15d5bb7bf6bb 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -639,13 +639,21 @@ static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
}
-static int cma_acquire_dev(struct rdma_id_private *id_priv,
- const struct rdma_id_private *listen_id_priv)
+/**
+ * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
+ * based on source ip address.
+ * @id_priv: cm_id which should be bound to cma device
+ *
+ * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
+ * based on source IP address. It returns 0 on success or error code otherwise.
+ * It is applicable to active and passive side cm_id.
+ */
+static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
const struct ib_gid_attr *sgid_attr;
- struct cma_device *cma_dev;
union ib_gid gid, iboe_gid, *gidp;
+ struct cma_device *cma_dev;
enum ib_gid_type gid_type;
int ret = -ENODEV;
u8 port;
@@ -654,41 +662,125 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
id_priv->id.ps == RDMA_PS_IPOIB)
return -EINVAL;
- mutex_lock(&lock);
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&iboe_gid);
memcpy(&gid, dev_addr->src_dev_addr +
- rdma_addr_gid_offset(dev_addr), sizeof gid);
-
- if (listen_id_priv) {
- cma_dev = listen_id_priv->cma_dev;
- port = listen_id_priv->id.port_num;
- gidp = rdma_protocol_roce(cma_dev->device, port) ?
- &iboe_gid : &gid;
- gid_type = listen_id_priv->gid_type;
- sgid_attr = cma_validate_port(cma_dev->device, port,
- gid_type, gidp, id_priv);
- if (!IS_ERR(sgid_attr)) {
- id_priv->id.port_num = port;
- cma_bind_sgid_attr(id_priv, sgid_attr);
- ret = 0;
- goto out;
+ rdma_addr_gid_offset(dev_addr), sizeof(gid));
+
+ mutex_lock(&lock);
+ list_for_each_entry(cma_dev, &dev_list, list) {
+ for (port = rdma_start_port(cma_dev->device);
+ port <= rdma_end_port(cma_dev->device); port++) {
+ gidp = rdma_protocol_roce(cma_dev->device, port) ?
+ &iboe_gid : &gid;
+ gid_type = cma_dev->default_gid_type[port - 1];
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, gidp, id_priv);
+ if (!IS_ERR(sgid_attr)) {
+ id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ cma_attach_to_dev(id_priv, cma_dev);
+ ret = 0;
+ goto out;
+ }
}
}
+out:
+ mutex_unlock(&lock);
+ return ret;
+}
+
+/**
+ * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
+ * @id_priv: cm id to bind to cma device
+ * @listen_id_priv: listener cm id to match against
+ * @req: Pointer to req structure containaining incoming
+ * request information
+ * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
+ * rdma device matches for listen_id and incoming request. It also verifies
+ * that a GID table entry is present for the source address.
+ * Returns 0 on success, or returns error code otherwise.
+ */
+static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
+ const struct rdma_id_private *listen_id_priv,
+ struct cma_req_info *req)
+{
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr;
+ enum ib_gid_type gid_type;
+ union ib_gid gid;
+
+ if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
+ id_priv->id.ps == RDMA_PS_IPOIB)
+ return -EINVAL;
+
+ if (rdma_protocol_roce(req->device, req->port))
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &gid);
+ else
+ memcpy(&gid, dev_addr->src_dev_addr +
+ rdma_addr_gid_offset(dev_addr), sizeof(gid));
+
+ gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1];
+ sgid_attr = cma_validate_port(req->device, req->port,
+ gid_type, &gid, id_priv);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
+
+ id_priv->id.port_num = req->port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ /* Need to acquire lock to protect against reader
+ * of cma_dev->id_list such as cma_netdev_callback() and
+ * cma_process_remove().
+ */
+ mutex_lock(&lock);
+ cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
+ mutex_unlock(&lock);
+ return 0;
+}
+
+static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
+ const struct rdma_id_private *listen_id_priv)
+{
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr;
+ struct cma_device *cma_dev;
+ enum ib_gid_type gid_type;
+ int ret = -ENODEV;
+ union ib_gid gid;
+ u8 port;
+
+ if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
+ id_priv->id.ps == RDMA_PS_IPOIB)
+ return -EINVAL;
+
+ memcpy(&gid, dev_addr->src_dev_addr +
+ rdma_addr_gid_offset(dev_addr), sizeof(gid));
+
+ mutex_lock(&lock);
+
+ cma_dev = listen_id_priv->cma_dev;
+ port = listen_id_priv->id.port_num;
+ gid_type = listen_id_priv->gid_type;
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, &gid, id_priv);
+ if (!IS_ERR(sgid_attr)) {
+ id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ ret = 0;
+ goto out;
+ }
list_for_each_entry(cma_dev, &dev_list, list) {
for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
- if (listen_id_priv &&
- listen_id_priv->cma_dev == cma_dev &&
+ if (listen_id_priv->cma_dev == cma_dev &&
listen_id_priv->id.port_num == port)
continue;
- gidp = rdma_protocol_roce(cma_dev->device, port) ?
- &iboe_gid : &gid;
gid_type = cma_dev->default_gid_type[port - 1];
sgid_attr = cma_validate_port(cma_dev->device, port,
- gid_type, gidp, id_priv);
+ gid_type, &gid, id_priv);
if (!IS_ERR(sgid_attr)) {
id_priv->id.port_num = port;
cma_bind_sgid_attr(id_priv, sgid_attr);
@@ -785,10 +877,7 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
if (!id_priv)
return ERR_PTR(-ENOMEM);
- if (caller)
- id_priv->res.kern_name = caller;
- else
- rdma_restrack_set_task(&id_priv->res, current);
+ rdma_restrack_set_task(&id_priv->res, caller);
id_priv->res.type = RDMA_RESTRACK_CM_ID;
id_priv->state = RDMA_CM_IDLE;
id_priv->id.context = context;
@@ -1462,18 +1551,35 @@ static bool cma_protocol_roce(const struct rdma_cm_id *id)
return rdma_protocol_roce(device, port_num);
}
+static bool cma_is_req_ipv6_ll(const struct cma_req_info *req)
+{
+ const struct sockaddr *daddr =
+ (const struct sockaddr *)&req->listen_addr_storage;
+ const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
+
+ /* Returns true if the req is for IPv6 link local */
+ return (daddr->sa_family == AF_INET6 &&
+ (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL));
+}
+
static bool cma_match_net_dev(const struct rdma_cm_id *id,
const struct net_device *net_dev,
- u8 port_num)
+ const struct cma_req_info *req)
{
const struct rdma_addr *addr = &id->route.addr;
if (!net_dev)
/* This request is an AF_IB request */
- return (!id->port_num || id->port_num == port_num) &&
+ return (!id->port_num || id->port_num == req->port) &&
(addr->src_addr.ss_family == AF_IB);
/*
+ * If the request is not for IPv6 link local, allow matching
+ * request to any netdevice of the one or multiport rdma device.
+ */
+ if (!cma_is_req_ipv6_ll(req))
+ return true;
+ /*
* Net namespaces must match, and if the listner is listening
* on a specific netdevice than netdevice must match as well.
*/
@@ -1500,13 +1606,14 @@ static struct rdma_id_private *cma_find_listener(
hlist_for_each_entry(id_priv, &bind_list->owners, node) {
if (cma_match_private_data(id_priv, ib_event->private_data)) {
if (id_priv->id.device == cm_id->device &&
- cma_match_net_dev(&id_priv->id, net_dev, req->port))
+ cma_match_net_dev(&id_priv->id, net_dev, req))
return id_priv;
list_for_each_entry(id_priv_dev,
&id_priv->listen_list,
listen_list) {
if (id_priv_dev->id.device == cm_id->device &&
- cma_match_net_dev(&id_priv_dev->id, net_dev, req->port))
+ cma_match_net_dev(&id_priv_dev->id,
+ net_dev, req))
return id_priv_dev;
}
}
@@ -1518,18 +1625,18 @@ static struct rdma_id_private *cma_find_listener(
static struct rdma_id_private *
cma_ib_id_from_event(struct ib_cm_id *cm_id,
const struct ib_cm_event *ib_event,
+ struct cma_req_info *req,
struct net_device **net_dev)
{
- struct cma_req_info req;
struct rdma_bind_list *bind_list;
struct rdma_id_private *id_priv;
int err;
- err = cma_save_req_info(ib_event, &req);
+ err = cma_save_req_info(ib_event, req);
if (err)
return ERR_PTR(err);
- *net_dev = cma_get_net_dev(ib_event, &req);
+ *net_dev = cma_get_net_dev(ib_event, req);
if (IS_ERR(*net_dev)) {
if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
/* Assuming the protocol is AF_IB */
@@ -1567,17 +1674,17 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
if (!validate_net_dev(*net_dev,
- (struct sockaddr *)&req.listen_addr_storage,
- (struct sockaddr *)&req.src_addr_storage)) {
+ (struct sockaddr *)&req->listen_addr_storage,
+ (struct sockaddr *)&req->src_addr_storage)) {
id_priv = ERR_PTR(-EHOSTUNREACH);
goto err;
}
}
bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
- rdma_ps_from_service_id(req.service_id),
- cma_port_from_service_id(req.service_id));
- id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
+ rdma_ps_from_service_id(req->service_id),
+ cma_port_from_service_id(req->service_id));
+ id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
err:
rcu_read_unlock();
if (IS_ERR(id_priv) && *net_dev) {
@@ -1710,8 +1817,8 @@ void rdma_destroy_id(struct rdma_cm_id *id)
mutex_lock(&id_priv->handler_mutex);
mutex_unlock(&id_priv->handler_mutex);
+ rdma_restrack_del(&id_priv->res);
if (id_priv->cma_dev) {
- rdma_restrack_del(&id_priv->res);
if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
if (id_priv->cm_id.ib)
ib_destroy_cm_id(id_priv->cm_id.ib);
@@ -1902,7 +2009,7 @@ cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
if (net_dev) {
- rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
+ rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev);
} else {
if (!cma_protocol_roce(listen_id) &&
cma_any_addr(cma_src_addr(id_priv))) {
@@ -1952,7 +2059,7 @@ cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
goto err;
if (net_dev) {
- rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
+ rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev);
} else {
if (!cma_any_addr(cma_src_addr(id_priv))) {
ret = cma_translate_addr(cma_src_addr(id_priv),
@@ -1999,11 +2106,12 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
{
struct rdma_id_private *listen_id, *conn_id = NULL;
struct rdma_cm_event event = {};
+ struct cma_req_info req = {};
struct net_device *net_dev;
u8 offset;
int ret;
- listen_id = cma_ib_id_from_event(cm_id, ib_event, &net_dev);
+ listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev);
if (IS_ERR(listen_id))
return PTR_ERR(listen_id);
@@ -2036,7 +2144,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
}
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
- ret = cma_acquire_dev(conn_id, listen_id);
+ ret = cma_ib_acquire_dev(conn_id, listen_id, &req);
if (ret)
goto err2;
@@ -2232,7 +2340,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
goto out;
}
- ret = cma_acquire_dev(conn_id, listen_id);
+ ret = cma_iw_acquire_dev(conn_id, listen_id);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -2354,8 +2462,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
ret = rdma_listen(id, id_priv->backlog);
if (ret)
- pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
- ret, cma_dev->device->name);
+ dev_warn(&cma_dev->device->dev,
+ "RDMA CMA: cma_listen_on_dev, error %d\n", ret);
}
static void cma_listen_on_all(struct rdma_id_private *id_priv)
@@ -2402,8 +2510,8 @@ static void cma_query_handler(int status, struct sa_path_rec *path_rec,
queue_work(cma_wq, &work->work);
}
-static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
- struct cma_work *work)
+static int cma_query_ib_route(struct rdma_id_private *id_priv,
+ unsigned long timeout_ms, struct cma_work *work)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
struct sa_path_rec path_rec;
@@ -2521,7 +2629,8 @@ static void cma_init_resolve_addr_work(struct cma_work *work,
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
}
-static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
+static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
+ unsigned long timeout_ms)
{
struct rdma_route *route = &id_priv->id.route;
struct cma_work *work;
@@ -2643,7 +2752,7 @@ err:
}
EXPORT_SYMBOL(rdma_set_ib_path);
-static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
+static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
{
struct cma_work *work;
@@ -2744,7 +2853,7 @@ err1:
return ret;
}
-int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
+int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv;
int ret;
@@ -2759,7 +2868,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
else if (rdma_protocol_roce(id->device, id->port_num))
ret = cma_resolve_iboe_route(id_priv);
else if (rdma_protocol_iwarp(id->device, id->port_num))
- ret = cma_resolve_iw_route(id_priv, timeout_ms);
+ ret = cma_resolve_iw_route(id_priv);
else
ret = -ENOSYS;
@@ -2862,7 +2971,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
if (!status && !id_priv->cma_dev) {
- status = cma_acquire_dev(id_priv, NULL);
+ status = cma_acquire_dev_by_src_ip(id_priv);
if (status)
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
status);
@@ -2882,13 +2991,11 @@ static void addr_handler(int status, struct sockaddr *src_addr,
if (id_priv->id.event_handler(&id_priv->id, &event)) {
cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
rdma_destroy_id(&id_priv->id);
return;
}
out:
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
}
static int cma_resolve_loopback(struct rdma_id_private *id_priv)
@@ -2966,7 +3073,7 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
}
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- const struct sockaddr *dst_addr, int timeout_ms)
+ const struct sockaddr *dst_addr, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv;
int ret;
@@ -2985,16 +3092,16 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
return -EINVAL;
memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
- atomic_inc(&id_priv->refcount);
if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
} else {
if (dst_addr->sa_family == AF_IB) {
ret = cma_resolve_ib_addr(id_priv);
} else {
- ret = rdma_resolve_ip(cma_src_addr(id_priv),
- dst_addr, &id->route.addr.dev_addr,
- timeout_ms, addr_handler, id_priv);
+ ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
+ &id->route.addr.dev_addr,
+ timeout_ms, addr_handler,
+ false, id_priv);
}
}
if (ret)
@@ -3003,7 +3110,6 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
return 0;
err:
cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
- cma_deref_id(id_priv);
return ret;
}
EXPORT_SYMBOL(rdma_resolve_addr);
@@ -3414,7 +3520,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret)
goto err1;
- ret = cma_acquire_dev(id_priv, NULL);
+ ret = cma_acquire_dev_by_src_ip(id_priv);
if (ret)
goto err1;
}
@@ -3439,10 +3545,9 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
return 0;
err2:
- if (id_priv->cma_dev) {
- rdma_restrack_del(&id_priv->res);
+ rdma_restrack_del(&id_priv->res);
+ if (id_priv->cma_dev)
cma_release_dev(id_priv);
- }
err1:
cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
return ret;
@@ -3839,10 +3944,7 @@ int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
id_priv = container_of(id, struct rdma_id_private, id);
- if (caller)
- id_priv->res.kern_name = caller;
- else
- rdma_restrack_set_task(&id_priv->res, current);
+ rdma_restrack_set_task(&id_priv->res, caller);
if (!cma_comp(id_priv, RDMA_CM_CONNECT))
return -EINVAL;
@@ -4087,9 +4189,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
(!ib_sa_sendonly_fullmem_support(&sa_client,
id_priv->id.device,
id_priv->id.port_num))) {
- pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
- "RDMA CM: SM doesn't support Send Only Full Member option\n",
- id_priv->id.device->name, id_priv->id.port_num);
+ dev_warn(
+ &id_priv->id.device->dev,
+ "RDMA CM: port %u Unable to multicast join: SM doesn't support Send Only Full Member option\n",
+ id_priv->id.port_num);
return -EOPNOTSUPP;
}
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index eee38b40be99..8c2dfb3e294e 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -65,7 +65,7 @@ static struct cma_dev_port_group *to_dev_port_group(struct config_item *item)
static bool filter_by_name(struct ib_device *ib_dev, void *cookie)
{
- return !strcmp(ib_dev->name, cookie);
+ return !strcmp(dev_name(&ib_dev->dev), cookie);
}
static int cma_configfs_params_get(struct config_item *item,
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 77c7005c396c..bb9007a0cca7 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -44,7 +44,7 @@
#include "mad_priv.h"
/* Total number of ports combined across all struct ib_devices's */
-#define RDMA_MAX_PORTS 1024
+#define RDMA_MAX_PORTS 8192
struct pkey_index_qp_list {
struct list_head pkey_index_list;
@@ -87,6 +87,7 @@ int ib_device_register_sysfs(struct ib_device *device,
int (*port_callback)(struct ib_device *,
u8, struct kobject *));
void ib_device_unregister_sysfs(struct ib_device *device);
+int ib_device_rename(struct ib_device *ibdev, const char *name);
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
struct net_device *idev, void *cookie);
@@ -338,7 +339,14 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr,
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *dmac, const struct net_device *ndev,
+ u8 *dmac, const struct ib_gid_attr *sgid_attr,
int *hoplimit);
+void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+ const struct net_device *dev);
+struct sa_path_rec;
+int roce_resolve_route_from_path(struct sa_path_rec *rec,
+ const struct ib_gid_attr *attr);
+
+struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr);
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index af5ad6a56ae4..b1e5365ddafa 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -112,12 +112,12 @@ static void ib_cq_poll_work(struct work_struct *work)
IB_POLL_BATCH);
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
- queue_work(ib_comp_wq, &cq->work);
+ queue_work(cq->comp_wq, &cq->work);
}
static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
{
- queue_work(ib_comp_wq, &cq->work);
+ queue_work(cq->comp_wq, &cq->work);
}
/**
@@ -161,7 +161,7 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
goto out_destroy_cq;
cq->res.type = RDMA_RESTRACK_CQ;
- cq->res.kern_name = caller;
+ rdma_restrack_set_task(&cq->res, caller);
rdma_restrack_add(&cq->res);
switch (cq->poll_ctx) {
@@ -175,9 +175,12 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
break;
case IB_POLL_WORKQUEUE:
+ case IB_POLL_UNBOUND_WORKQUEUE:
cq->comp_handler = ib_cq_completion_workqueue;
INIT_WORK(&cq->work, ib_cq_poll_work);
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
+ ib_comp_wq : ib_comp_unbound_wq;
break;
default:
ret = -EINVAL;
@@ -213,6 +216,7 @@ void ib_free_cq(struct ib_cq *cq)
irq_poll_disable(&cq->iop);
break;
case IB_POLL_WORKQUEUE:
+ case IB_POLL_UNBOUND_WORKQUEUE:
cancel_work_sync(&cq->work);
break;
default:
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index db3b6271f09d..87eb4f2cdd7d 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -61,6 +61,7 @@ struct ib_client_data {
};
struct workqueue_struct *ib_comp_wq;
+struct workqueue_struct *ib_comp_unbound_wq;
struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);
@@ -122,8 +123,9 @@ static int ib_device_check_mandatory(struct ib_device *device)
for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
- pr_warn("Device %s is missing mandatory function %s\n",
- device->name, mandatory_table[i].name);
+ dev_warn(&device->dev,
+ "Device is missing mandatory function %s\n",
+ mandatory_table[i].name);
return -EINVAL;
}
}
@@ -163,16 +165,40 @@ static struct ib_device *__ib_device_get_by_name(const char *name)
struct ib_device *device;
list_for_each_entry(device, &device_list, core_list)
- if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
+ if (!strcmp(name, dev_name(&device->dev)))
return device;
return NULL;
}
-static int alloc_name(char *name)
+int ib_device_rename(struct ib_device *ibdev, const char *name)
+{
+ struct ib_device *device;
+ int ret = 0;
+
+ if (!strcmp(name, dev_name(&ibdev->dev)))
+ return ret;
+
+ mutex_lock(&device_mutex);
+ list_for_each_entry(device, &device_list, core_list) {
+ if (!strcmp(name, dev_name(&device->dev))) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+
+ ret = device_rename(&ibdev->dev, name);
+ if (ret)
+ goto out;
+ strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
+out:
+ mutex_unlock(&device_mutex);
+ return ret;
+}
+
+static int alloc_name(struct ib_device *ibdev, const char *name)
{
unsigned long *inuse;
- char buf[IB_DEVICE_NAME_MAX];
struct ib_device *device;
int i;
@@ -181,24 +207,21 @@ static int alloc_name(char *name)
return -ENOMEM;
list_for_each_entry(device, &device_list, core_list) {
- if (!sscanf(device->name, name, &i))
+ char buf[IB_DEVICE_NAME_MAX];
+
+ if (sscanf(dev_name(&device->dev), name, &i) != 1)
continue;
if (i < 0 || i >= PAGE_SIZE * 8)
continue;
snprintf(buf, sizeof buf, name, i);
- if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
+ if (!strcmp(buf, dev_name(&device->dev)))
set_bit(i, inuse);
}
i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
free_page((unsigned long) inuse);
- snprintf(buf, sizeof buf, name, i);
- if (__ib_device_get_by_name(buf))
- return -ENFILE;
-
- strlcpy(name, buf, IB_DEVICE_NAME_MAX);
- return 0;
+ return dev_set_name(&ibdev->dev, name, i);
}
static void ib_device_release(struct device *device)
@@ -221,9 +244,7 @@ static void ib_device_release(struct device *device)
static int ib_device_uevent(struct device *device,
struct kobj_uevent_env *env)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
-
- if (add_uevent_var(env, "NAME=%s", dev->name))
+ if (add_uevent_var(env, "NAME=%s", dev_name(device)))
return -ENOMEM;
/*
@@ -269,7 +290,7 @@ struct ib_device *ib_alloc_device(size_t size)
INIT_LIST_HEAD(&device->event_handler_list);
spin_lock_init(&device->event_handler_lock);
- spin_lock_init(&device->client_data_lock);
+ rwlock_init(&device->client_data_lock);
INIT_LIST_HEAD(&device->client_data_list);
INIT_LIST_HEAD(&device->port_list);
@@ -285,6 +306,7 @@ EXPORT_SYMBOL(ib_alloc_device);
*/
void ib_dealloc_device(struct ib_device *device)
{
+ WARN_ON(!list_empty(&device->client_data_list));
WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
device->reg_state != IB_DEV_UNINITIALIZED);
rdma_restrack_clean(&device->res);
@@ -295,9 +317,8 @@ EXPORT_SYMBOL(ib_dealloc_device);
static int add_client_context(struct ib_device *device, struct ib_client *client)
{
struct ib_client_data *context;
- unsigned long flags;
- context = kmalloc(sizeof *context, GFP_KERNEL);
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
@@ -306,9 +327,9 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
context->going_down = false;
down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
+ write_lock_irq(&device->client_data_lock);
list_add(&context->list, &device->client_data_list);
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irq(&device->client_data_lock);
up_write(&lists_rwsem);
return 0;
@@ -444,22 +465,8 @@ static u32 __dev_new_index(void)
}
}
-/**
- * ib_register_device - Register an IB device with IB core
- * @device:Device to register
- *
- * Low-level drivers use ib_register_device() to register their
- * devices with the IB core. All registered clients will receive a
- * callback for each device that is added. @device must be allocated
- * with ib_alloc_device().
- */
-int ib_register_device(struct ib_device *device,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *))
+static void setup_dma_device(struct ib_device *device)
{
- int ret;
- struct ib_client *client;
- struct ib_udata uhw = {.outlen = 0, .inlen = 0};
struct device *parent = device->dev.parent;
WARN_ON_ONCE(device->dma_device);
@@ -491,56 +498,113 @@ int ib_register_device(struct ib_device *device,
WARN_ON_ONCE(!parent);
device->dma_device = parent;
}
+}
- mutex_lock(&device_mutex);
+static void cleanup_device(struct ib_device *device)
+{
+ ib_cache_cleanup_one(device);
+ ib_cache_release_one(device);
+ kfree(device->port_pkey_list);
+ kfree(device->port_immutable);
+}
- if (strchr(device->name, '%')) {
- ret = alloc_name(device->name);
- if (ret)
- goto out;
- }
+static int setup_device(struct ib_device *device)
+{
+ struct ib_udata uhw = {.outlen = 0, .inlen = 0};
+ int ret;
- if (ib_device_check_mandatory(device)) {
- ret = -EINVAL;
- goto out;
- }
+ ret = ib_device_check_mandatory(device);
+ if (ret)
+ return ret;
ret = read_port_immutable(device);
if (ret) {
- pr_warn("Couldn't create per port immutable data %s\n",
- device->name);
- goto out;
+ dev_warn(&device->dev,
+ "Couldn't create per port immutable data\n");
+ return ret;
}
- ret = setup_port_pkey_list(device);
+ memset(&device->attrs, 0, sizeof(device->attrs));
+ ret = device->query_device(device, &device->attrs, &uhw);
if (ret) {
- pr_warn("Couldn't create per port_pkey_list\n");
- goto out;
+ dev_warn(&device->dev,
+ "Couldn't query the device attributes\n");
+ goto port_cleanup;
}
- ret = ib_cache_setup_one(device);
+ ret = setup_port_pkey_list(device);
if (ret) {
- pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
+ dev_warn(&device->dev, "Couldn't create per port_pkey_list\n");
goto port_cleanup;
}
- ret = ib_device_register_rdmacg(device);
+ ret = ib_cache_setup_one(device);
if (ret) {
- pr_warn("Couldn't register device with rdma cgroup\n");
- goto cache_cleanup;
+ dev_warn(&device->dev,
+ "Couldn't set up InfiniBand P_Key/GID cache\n");
+ goto pkey_cleanup;
+ }
+ return 0;
+
+pkey_cleanup:
+ kfree(device->port_pkey_list);
+port_cleanup:
+ kfree(device->port_immutable);
+ return ret;
+}
+
+/**
+ * ib_register_device - Register an IB device with IB core
+ * @device:Device to register
+ *
+ * Low-level drivers use ib_register_device() to register their
+ * devices with the IB core. All registered clients will receive a
+ * callback for each device that is added. @device must be allocated
+ * with ib_alloc_device().
+ */
+int ib_register_device(struct ib_device *device, const char *name,
+ int (*port_callback)(struct ib_device *, u8,
+ struct kobject *))
+{
+ int ret;
+ struct ib_client *client;
+
+ setup_dma_device(device);
+
+ mutex_lock(&device_mutex);
+
+ if (strchr(name, '%')) {
+ ret = alloc_name(device, name);
+ if (ret)
+ goto out;
+ } else {
+ ret = dev_set_name(&device->dev, name);
+ if (ret)
+ goto out;
+ }
+ if (__ib_device_get_by_name(dev_name(&device->dev))) {
+ ret = -ENFILE;
+ goto out;
}
+ strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
- memset(&device->attrs, 0, sizeof(device->attrs));
- ret = device->query_device(device, &device->attrs, &uhw);
+ ret = setup_device(device);
+ if (ret)
+ goto out;
+
+ device->index = __dev_new_index();
+
+ ret = ib_device_register_rdmacg(device);
if (ret) {
- pr_warn("Couldn't query the device attributes\n");
- goto cg_cleanup;
+ dev_warn(&device->dev,
+ "Couldn't register device with rdma cgroup\n");
+ goto dev_cleanup;
}
ret = ib_device_register_sysfs(device, port_callback);
if (ret) {
- pr_warn("Couldn't register device %s with driver model\n",
- device->name);
+ dev_warn(&device->dev,
+ "Couldn't register device with driver model\n");
goto cg_cleanup;
}
@@ -550,7 +614,6 @@ int ib_register_device(struct ib_device *device,
if (!add_client_context(device, client) && client->add)
client->add(device);
- device->index = __dev_new_index();
down_write(&lists_rwsem);
list_add_tail(&device->core_list, &device_list);
up_write(&lists_rwsem);
@@ -559,11 +622,8 @@ int ib_register_device(struct ib_device *device,
cg_cleanup:
ib_device_unregister_rdmacg(device);
-cache_cleanup:
- ib_cache_cleanup_one(device);
- ib_cache_release_one(device);
-port_cleanup:
- kfree(device->port_immutable);
+dev_cleanup:
+ cleanup_device(device);
out:
mutex_unlock(&device_mutex);
return ret;
@@ -585,21 +645,20 @@ void ib_unregister_device(struct ib_device *device)
down_write(&lists_rwsem);
list_del(&device->core_list);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ write_lock_irq(&device->client_data_lock);
+ list_for_each_entry(context, &device->client_data_list, list)
context->going_down = true;
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irq(&device->client_data_lock);
downgrade_write(&lists_rwsem);
- list_for_each_entry_safe(context, tmp, &device->client_data_list,
- list) {
+ list_for_each_entry(context, &device->client_data_list, list) {
if (context->client->remove)
context->client->remove(device, context->data);
}
up_read(&lists_rwsem);
- ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device);
+ ib_device_unregister_rdmacg(device);
mutex_unlock(&device_mutex);
@@ -609,10 +668,13 @@ void ib_unregister_device(struct ib_device *device)
kfree(device->port_pkey_list);
down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ write_lock_irqsave(&device->client_data_lock, flags);
+ list_for_each_entry_safe(context, tmp, &device->client_data_list,
+ list) {
+ list_del(&context->list);
kfree(context);
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ }
+ write_unlock_irqrestore(&device->client_data_lock, flags);
up_write(&lists_rwsem);
device->reg_state = IB_DEV_UNREGISTERED;
@@ -662,9 +724,8 @@ EXPORT_SYMBOL(ib_register_client);
*/
void ib_unregister_client(struct ib_client *client)
{
- struct ib_client_data *context, *tmp;
+ struct ib_client_data *context;
struct ib_device *device;
- unsigned long flags;
mutex_lock(&device_mutex);
@@ -676,14 +737,14 @@ void ib_unregister_client(struct ib_client *client)
struct ib_client_data *found_context = NULL;
down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ write_lock_irq(&device->client_data_lock);
+ list_for_each_entry(context, &device->client_data_list, list)
if (context->client == client) {
context->going_down = true;
found_context = context;
break;
}
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irq(&device->client_data_lock);
up_write(&lists_rwsem);
if (client->remove)
@@ -691,17 +752,18 @@ void ib_unregister_client(struct ib_client *client)
found_context->data : NULL);
if (!found_context) {
- pr_warn("No client context found for %s/%s\n",
- device->name, client->name);
+ dev_warn(&device->dev,
+ "No client context found for %s\n",
+ client->name);
continue;
}
down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
+ write_lock_irq(&device->client_data_lock);
list_del(&found_context->list);
- kfree(found_context);
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irq(&device->client_data_lock);
up_write(&lists_rwsem);
+ kfree(found_context);
}
mutex_unlock(&device_mutex);
@@ -722,13 +784,13 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
void *ret = NULL;
unsigned long flags;
- spin_lock_irqsave(&device->client_data_lock, flags);
+ read_lock_irqsave(&device->client_data_lock, flags);
list_for_each_entry(context, &device->client_data_list, list)
if (context->client == client) {
ret = context->data;
break;
}
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ read_unlock_irqrestore(&device->client_data_lock, flags);
return ret;
}
@@ -749,18 +811,18 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
struct ib_client_data *context;
unsigned long flags;
- spin_lock_irqsave(&device->client_data_lock, flags);
+ write_lock_irqsave(&device->client_data_lock, flags);
list_for_each_entry(context, &device->client_data_list, list)
if (context->client == client) {
context->data = data;
goto out;
}
- pr_warn("No client context found for %s/%s\n",
- device->name, client->name);
+ dev_warn(&device->dev, "No client context found for %s\n",
+ client->name);
out:
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irqrestore(&device->client_data_lock, flags);
}
EXPORT_SYMBOL(ib_set_client_data);
@@ -1166,10 +1228,19 @@ static int __init ib_core_init(void)
goto err;
}
+ ib_comp_unbound_wq =
+ alloc_workqueue("ib-comp-unb-wq",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
+ WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
+ if (!ib_comp_unbound_wq) {
+ ret = -ENOMEM;
+ goto err_comp;
+ }
+
ret = class_register(&ib_class);
if (ret) {
pr_warn("Couldn't create InfiniBand device class\n");
- goto err_comp;
+ goto err_comp_unbound;
}
ret = rdma_nl_init();
@@ -1218,6 +1289,8 @@ err_ibnl:
rdma_nl_exit();
err_sysfs:
class_unregister(&ib_class);
+err_comp_unbound:
+ destroy_workqueue(ib_comp_unbound_wq);
err_comp:
destroy_workqueue(ib_comp_wq);
err:
@@ -1236,6 +1309,7 @@ static void __exit ib_core_cleanup(void)
addr_cleanup();
rdma_nl_exit();
class_unregister(&ib_class);
+ destroy_workqueue(ib_comp_unbound_wq);
destroy_workqueue(ib_comp_wq);
/* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index a077500f7f32..83ba0068e8bb 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -213,7 +213,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
device = pd->device;
if (!device->alloc_fmr || !device->dealloc_fmr ||
!device->map_phys_fmr || !device->unmap_fmr) {
- pr_info(PFX "Device %s does not support FMRs\n", device->name);
+ dev_info(&device->dev, "Device does not support FMRs\n");
return ERR_PTR(-ENOSYS);
}
@@ -257,7 +257,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
atomic_set(&pool->flush_ser, 0);
init_waitqueue_head(&pool->force_wait);
- pool->worker = kthread_create_worker(0, "ib_fmr(%s)", device->name);
+ pool->worker =
+ kthread_create_worker(0, "ib_fmr(%s)", dev_name(&device->dev));
if (IS_ERR(pool->worker)) {
pr_warn(PFX "couldn't start cleanup kthread worker\n");
ret = PTR_ERR(pool->worker);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 5d676cff41f4..ba668d49c751 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -509,7 +509,7 @@ static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
cm_id->m_local_addr = cm_id->local_addr;
cm_id->m_remote_addr = cm_id->remote_addr;
- memcpy(pm_reg_msg.dev_name, cm_id->device->name,
+ memcpy(pm_reg_msg.dev_name, dev_name(&cm_id->device->dev),
sizeof(pm_reg_msg.dev_name));
memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname,
sizeof(pm_reg_msg.if_name));
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index ef459f2f2eeb..d7025cd5be28 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -220,33 +220,37 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
int ret2, qpn;
u8 mgmt_class, vclass;
+ if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
+ (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
+ return ERR_PTR(-EPROTONOSUPPORT);
+
/* Validate parameters */
qpn = get_spl_qp_index(qp_type);
if (qpn == -1) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: invalid QP Type %d\n",
- qp_type);
+ dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
+ __func__, qp_type);
goto error1;
}
if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: invalid RMPP Version %u\n",
- rmpp_version);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: invalid RMPP Version %u\n",
+ __func__, rmpp_version);
goto error1;
}
/* Validate MAD registration request if supplied */
if (mad_reg_req) {
if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: invalid Class Version %u\n",
- mad_reg_req->mgmt_class_version);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: invalid Class Version %u\n",
+ __func__,
+ mad_reg_req->mgmt_class_version);
goto error1;
}
if (!recv_handler) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: no recv_handler\n");
+ dev_dbg_ratelimited(&device->dev,
+ "%s: no recv_handler\n", __func__);
goto error1;
}
if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
@@ -256,9 +260,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
*/
if (mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid Mgmt Class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
} else if (mad_reg_req->mgmt_class == 0) {
@@ -266,8 +270,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
* Class 0 is reserved in IBA and is used for
* aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
*/
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid Mgmt Class 0\n");
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid Mgmt Class 0\n",
+ __func__);
goto error1;
} else if (is_vendor_class(mad_reg_req->mgmt_class)) {
/*
@@ -275,18 +280,19 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
* ensure supplied OUI is not zero
*/
if (!is_vendor_oui(mad_reg_req->oui)) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: No OUI specified for class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: No OUI specified for class 0x%x\n",
+ __func__,
+ mad_reg_req->mgmt_class);
goto error1;
}
}
/* Make sure class supplied is consistent with RMPP */
if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
if (rmpp_version) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: RMPP version for non-RMPP class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
}
@@ -297,9 +303,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
(mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid SM QP type: class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
} else {
@@ -307,9 +313,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
(mad_reg_req->mgmt_class ==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid GS QP type: class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
}
@@ -324,18 +330,18 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
/* Validate device and port */
port_priv = ib_get_mad_port(device, port_num);
if (!port_priv) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid port %d\n",
- port_num);
+ dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n",
+ __func__, port_num);
ret = ERR_PTR(-ENODEV);
goto error1;
}
- /* Verify the QP requested is supported. For example, Ethernet devices
- * will not have QP0 */
+ /* Verify the QP requested is supported. For example, Ethernet devices
+ * will not have QP0.
+ */
if (!port_priv->qp_info[qpn].qp) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: QP %d not supported\n", qpn);
+ dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
+ __func__, qpn);
ret = ERR_PTR(-EPROTONOSUPPORT);
goto error1;
}
@@ -2408,7 +2414,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
}
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
- int timeout_ms)
+ unsigned long timeout_ms)
{
mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
wait_for_response(mad_send_wr);
@@ -3183,7 +3189,7 @@ static int ib_mad_port_open(struct ib_device *device,
cq_size *= 2;
port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
- IB_POLL_WORKQUEUE);
+ IB_POLL_UNBOUND_WORKQUEUE);
if (IS_ERR(port_priv->cq)) {
dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
ret = PTR_ERR(port_priv->cq);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d84ae1671898..216509036aa8 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -221,6 +221,6 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr);
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
- int timeout_ms);
+ unsigned long timeout_ms);
#endif /* __IB_MAD_PRIV_H__ */
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 3ccaae18ad75..724f5a62e82f 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -47,9 +47,9 @@ static struct {
const struct rdma_nl_cbs *cb_table;
} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
-int rdma_nl_chk_listeners(unsigned int group)
+bool rdma_nl_chk_listeners(unsigned int group)
{
- return (netlink_has_listeners(nls, group)) ? 0 : -1;
+ return netlink_has_listeners(nls, group);
}
EXPORT_SYMBOL(rdma_nl_chk_listeners);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 0385ab438320..573399e3ccc1 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -179,7 +179,8 @@ static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
{
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
return -EMSGSIZE;
- if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
+ dev_name(&device->dev)))
return -EMSGSIZE;
return 0;
@@ -645,6 +646,36 @@ err:
return err;
}
+static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ u32 index;
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
+ extack);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
+ char name[IB_DEVICE_NAME_MAX] = {};
+
+ nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
+ IB_DEVICE_NAME_MAX);
+ err = ib_device_rename(device, name);
+ }
+
+ put_device(&device->dev);
+ return err;
+}
+
static int _nldev_get_dumpit(struct ib_device *device,
struct sk_buff *skb,
struct netlink_callback *cb,
@@ -1077,6 +1108,10 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.doit = nldev_get_doit,
.dump = nldev_get_dumpit,
},
+ [RDMA_NLDEV_CMD_SET] = {
+ .doit = nldev_set_doit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
[RDMA_NLDEV_CMD_PORT_GET] = {
.doit = nldev_port_get_doit,
.dump = nldev_port_get_dumpit,
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index c4118bcd5103..752a55c6bdce 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -794,44 +794,6 @@ void uverbs_close_fd(struct file *f)
uverbs_uobject_put(uobj);
}
-static void ufile_disassociate_ucontext(struct ib_ucontext *ibcontext)
-{
- struct ib_device *ib_dev = ibcontext->device;
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
-
- owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
- if (!owning_process)
- return;
-
- owning_mm = get_task_mm(owning_process);
- if (!owning_mm) {
- pr_info("no mm, disassociate ucontext is pending task termination\n");
- while (1) {
- put_task_struct(owning_process);
- usleep_range(1000, 2000);
- owning_process = get_pid_task(ibcontext->tgid,
- PIDTYPE_PID);
- if (!owning_process ||
- owning_process->state == TASK_DEAD) {
- pr_info("disassociate ucontext done, task was terminated\n");
- /* in case task was dead need to release the
- * task struct.
- */
- if (owning_process)
- put_task_struct(owning_process);
- return;
- }
- }
- }
-
- down_write(&owning_mm->mmap_sem);
- ib_dev->disassociate_ucontext(ibcontext);
- up_write(&owning_mm->mmap_sem);
- mmput(owning_mm);
- put_task_struct(owning_process);
-}
-
/*
* Drop the ucontext off the ufile and completely disconnect it from the
* ib_device
@@ -840,20 +802,28 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
enum rdma_remove_reason reason)
{
struct ib_ucontext *ucontext = ufile->ucontext;
+ struct ib_device *ib_dev = ucontext->device;
int ret;
- if (reason == RDMA_REMOVE_DRIVER_REMOVE)
- ufile_disassociate_ucontext(ucontext);
+ /*
+ * If we are closing the FD then the user mmap VMAs must have
+ * already been destroyed as they hold on to the filep, otherwise
+ * they need to be zap'd.
+ */
+ if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
+ uverbs_user_mmap_disassociate(ufile);
+ if (ib_dev->disassociate_ucontext)
+ ib_dev->disassociate_ucontext(ucontext);
+ }
- put_pid(ucontext->tgid);
- ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
+ ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
RDMACG_RESOURCE_HCA_HANDLE);
/*
* FIXME: Drivers are not permitted to fail dealloc_ucontext, remove
* the error return.
*/
- ret = ucontext->device->dealloc_ucontext(ucontext);
+ ret = ib_dev->dealloc_ucontext(ucontext);
WARN_ON(ret);
ufile->ucontext = NULL;
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index f962f2a593ba..4886d2bba7c7 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -160,5 +160,6 @@ void uverbs_disassociate_api(struct uverbs_api *uapi);
void uverbs_destroy_api(struct uverbs_api *uapi);
void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
unsigned int num_attrs);
+void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile);
#endif /* RDMA_CORE_H */
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 3b7fa0ccaa08..06d8657ce583 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -50,8 +50,7 @@ void rdma_restrack_clean(struct rdma_restrack_root *res)
dev = container_of(res, struct ib_device, res);
pr_err("restrack: %s", CUT_HERE);
- pr_err("restrack: BUG: RESTRACK detected leak of resources on %s\n",
- dev->name);
+ dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n");
hash_for_each(res->hash, bkt, e, node) {
if (rdma_is_kernel_res(e)) {
owner = e->kern_name;
@@ -156,6 +155,21 @@ static bool res_is_user(struct rdma_restrack_entry *res)
}
}
+void rdma_restrack_set_task(struct rdma_restrack_entry *res,
+ const char *caller)
+{
+ if (caller) {
+ res->kern_name = caller;
+ return;
+ }
+
+ if (res->task)
+ put_task_struct(res->task);
+ get_task_struct(current);
+ res->task = current;
+}
+EXPORT_SYMBOL(rdma_restrack_set_task);
+
void rdma_restrack_add(struct rdma_restrack_entry *res)
{
struct ib_device *dev = res_to_dev(res);
@@ -168,7 +182,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
if (res_is_user(res)) {
if (!res->task)
- rdma_restrack_set_task(res, current);
+ rdma_restrack_set_task(res, NULL);
res->kern_name = NULL;
} else {
set_kern_name(res);
@@ -209,7 +223,7 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
struct ib_device *dev;
if (!res->valid)
- return;
+ goto out;
dev = res_to_dev(res);
if (!dev)
@@ -222,8 +236,12 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
down_write(&dev->res.rwsem);
hash_del(&res->node);
res->valid = false;
- if (res->task)
- put_task_struct(res->task);
up_write(&dev->res.rwsem);
+
+out:
+ if (res->task) {
+ put_task_struct(res->task);
+ res->task = NULL;
+ }
}
EXPORT_SYMBOL(rdma_restrack_del);
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 683e6d11a564..d22c4a2ebac6 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -12,6 +12,7 @@
*/
#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/pci-p2pdma.h>
#include <rdma/mr_pool.h>
#include <rdma/rw.h>
@@ -280,7 +281,11 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
struct ib_device *dev = qp->pd->device;
int ret;
- ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+ if (is_pci_p2pdma_page(sg_page(sg)))
+ ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
+ else
+ ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+
if (!ret)
return -ENOMEM;
sg_cnt = ret;
@@ -602,7 +607,9 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
break;
}
- ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ /* P2PDMA contexts do not need to be unmapped */
+ if (!is_pci_p2pdma_page(sg_page(sg)))
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy);
diff --git a/drivers/infiniband/core/sa.h b/drivers/infiniband/core/sa.h
index b1d4bbf4ce5c..cbaaaa92fff3 100644
--- a/drivers/infiniband/core/sa.h
+++ b/drivers/infiniband/core/sa.h
@@ -49,16 +49,14 @@ static inline void ib_sa_client_put(struct ib_sa_client *client)
}
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
- u8 method,
+ struct ib_device *device, u8 port_num, u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
- void *context,
- struct ib_sa_query **sa_query);
+ void *context, struct ib_sa_query **sa_query);
int mcast_init(void);
void mcast_cleanup(void);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7b794a14d6e8..be5ba5e15496 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -761,7 +761,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
/* Construct the family header first */
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
- memcpy(header->device_name, query->port->agent->device->name,
+ memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
LS_DEVICE_NAME_MAX);
header->port_num = query->port->port_num;
@@ -835,7 +835,6 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
void *data;
- int ret = 0;
struct ib_sa_mad *mad;
int len;
@@ -862,13 +861,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
/* Repair the nlmsg header length */
nlmsg_end(skb, nlh);
- ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
- if (!ret)
- ret = len;
- else
- ret = 0;
-
- return ret;
+ return rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
}
static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
@@ -891,14 +884,12 @@ static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
ret = ib_nl_send_msg(query, gfp_mask);
- if (ret <= 0) {
+ if (ret) {
ret = -EIO;
/* Remove the request */
spin_lock_irqsave(&ib_nl_request_lock, flags);
list_del(&query->list);
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
- } else {
- ret = 0;
}
return ret;
@@ -1227,46 +1218,6 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask;
}
-static int roce_resolve_route_from_path(struct sa_path_rec *rec,
- const struct ib_gid_attr *attr)
-{
- struct rdma_dev_addr dev_addr = {};
- union {
- struct sockaddr _sockaddr;
- struct sockaddr_in _sockaddr_in;
- struct sockaddr_in6 _sockaddr_in6;
- } sgid_addr, dgid_addr;
- int ret;
-
- if (rec->roce.route_resolved)
- return 0;
- if (!attr || !attr->ndev)
- return -EINVAL;
-
- dev_addr.bound_dev_if = attr->ndev->ifindex;
- /* TODO: Use net from the ib_gid_attr once it is added to it,
- * until than, limit itself to init_net.
- */
- dev_addr.net = &init_net;
-
- rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
- rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
-
- /* validate the route */
- ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
- &dgid_addr._sockaddr, &dev_addr);
- if (ret)
- return ret;
-
- if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
- dev_addr.network == RDMA_NETWORK_IPV6) &&
- rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
- return -EINVAL;
-
- rec->roce.route_resolved = true;
- return 0;
-}
-
static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
struct sa_path_rec *rec,
struct rdma_ah_attr *ah_attr,
@@ -1409,7 +1360,8 @@ static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
spin_unlock_irqrestore(&tid_lock, flags);
}
-static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
+static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
+ gfp_t gfp_mask)
{
bool preload = gfpflags_allow_blocking(gfp_mask);
unsigned long flags;
@@ -1433,7 +1385,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
(!(query->flags & IB_SA_QUERY_OPA))) {
- if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
+ if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
if (!ib_nl_make_request(query, gfp_mask))
return id;
}
@@ -1599,7 +1551,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct sa_path_rec *resp,
void *context),
@@ -1753,7 +1705,7 @@ int ib_sa_service_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num, u8 method,
struct ib_sa_service_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_service_rec *resp,
void *context),
@@ -1850,7 +1802,7 @@ int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
@@ -1941,7 +1893,7 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_guidinfo_rec *resp,
void *context),
@@ -2108,7 +2060,7 @@ static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
}
static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
- int timeout_ms,
+ unsigned long timeout_ms,
void (*callback)(void *context),
void *context,
struct ib_sa_query **sa_query)
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 9b0bea8303e0..1143c0448666 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -685,9 +685,8 @@ static int ib_mad_agent_security_change(struct notifier_block *nb,
if (event != LSM_POLICY_CHANGE)
return NOTIFY_DONE;
- ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security,
- ag->device->name,
- ag->port_num);
+ ag->smp_allowed = !security_ib_endport_manage_subnet(
+ ag->security, dev_name(&ag->device->dev), ag->port_num);
return NOTIFY_OK;
}
@@ -708,7 +707,7 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
return 0;
ret = security_ib_endport_manage_subnet(agent->security,
- agent->device->name,
+ dev_name(&agent->device->dev),
agent->port_num);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 7fd14ead7b37..6fcce2c206c6 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -512,7 +512,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
ret = get_perf_mad(p->ibdev, p->port_num, tab_attr->attr_id, &data,
40 + offset / 8, sizeof(data));
if (ret < 0)
- return sprintf(buf, "N/A (no PMA)\n");
+ return ret;
switch (width) {
case 4:
@@ -1036,7 +1036,7 @@ static int add_port(struct ib_device *device, int port_num,
p->port_num = port_num;
ret = kobject_init_and_add(&p->kobj, &port_type,
- device->ports_parent,
+ device->ports_kobj,
"%d", port_num);
if (ret) {
kfree(p);
@@ -1057,10 +1057,12 @@ static int add_port(struct ib_device *device, int port_num,
goto err_put;
}
- p->pma_table = get_counter_table(device, port_num);
- ret = sysfs_create_group(&p->kobj, p->pma_table);
- if (ret)
- goto err_put_gid_attrs;
+ if (device->process_mad) {
+ p->pma_table = get_counter_table(device, port_num);
+ ret = sysfs_create_group(&p->kobj, p->pma_table);
+ if (ret)
+ goto err_put_gid_attrs;
+ }
p->gid_group.name = "gids";
p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len);
@@ -1118,9 +1120,9 @@ static int add_port(struct ib_device *device, int port_num,
}
/*
- * If port == 0, it means we have only one port and the parent
- * device, not this port device, should be the holder of the
- * hw_counters
+ * If port == 0, it means hw_counters are per device and not per
+ * port, so holder should be device. Therefore skip per port conunter
+ * initialization.
*/
if (device->alloc_hw_stats && port_num)
setup_hw_stats(device, p, port_num);
@@ -1173,7 +1175,8 @@ err_free_gid:
p->gid_group.attrs = NULL;
err_remove_pma:
- sysfs_remove_group(&p->kobj, p->pma_table);
+ if (p->pma_table)
+ sysfs_remove_group(&p->kobj, p->pma_table);
err_put_gid_attrs:
kobject_put(&p->gid_attr_group->kobj);
@@ -1183,7 +1186,7 @@ err_put:
return ret;
}
-static ssize_t show_node_type(struct device *device,
+static ssize_t node_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1198,8 +1201,9 @@ static ssize_t show_node_type(struct device *device,
default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
}
}
+static DEVICE_ATTR_RO(node_type);
-static ssize_t show_sys_image_guid(struct device *device,
+static ssize_t sys_image_guid_show(struct device *device,
struct device_attribute *dev_attr, char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1210,8 +1214,9 @@ static ssize_t show_sys_image_guid(struct device *device,
be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[2]),
be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[3]));
}
+static DEVICE_ATTR_RO(sys_image_guid);
-static ssize_t show_node_guid(struct device *device,
+static ssize_t node_guid_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1222,8 +1227,9 @@ static ssize_t show_node_guid(struct device *device,
be16_to_cpu(((__be16 *) &dev->node_guid)[2]),
be16_to_cpu(((__be16 *) &dev->node_guid)[3]));
}
+static DEVICE_ATTR_RO(node_guid);
-static ssize_t show_node_desc(struct device *device,
+static ssize_t node_desc_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1231,9 +1237,9 @@ static ssize_t show_node_desc(struct device *device,
return sprintf(buf, "%.64s\n", dev->node_desc);
}
-static ssize_t set_node_desc(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t node_desc_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
struct ib_device_modify desc = {};
@@ -1249,8 +1255,9 @@ static ssize_t set_node_desc(struct device *device,
return count;
}
+static DEVICE_ATTR_RW(node_desc);
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
+static ssize_t fw_ver_show(struct device *device, struct device_attribute *attr,
char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1259,19 +1266,19 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
strlcat(buf, "\n", IB_FW_VERSION_NAME_MAX);
return strlen(buf);
}
+static DEVICE_ATTR_RO(fw_ver);
+
+static struct attribute *ib_dev_attrs[] = {
+ &dev_attr_node_type.attr,
+ &dev_attr_node_guid.attr,
+ &dev_attr_sys_image_guid.attr,
+ &dev_attr_fw_ver.attr,
+ &dev_attr_node_desc.attr,
+ NULL,
+};
-static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
-static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL);
-static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL);
-static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-
-static struct device_attribute *ib_class_attributes[] = {
- &dev_attr_node_type,
- &dev_attr_sys_image_guid,
- &dev_attr_node_guid,
- &dev_attr_node_desc,
- &dev_attr_fw_ver,
+static const struct attribute_group dev_attr_group = {
+ .attrs = ib_dev_attrs,
};
static void free_port_list_attributes(struct ib_device *device)
@@ -1285,7 +1292,9 @@ static void free_port_list_attributes(struct ib_device *device)
kfree(port->hw_stats);
free_hsag(&port->kobj, port->hw_stats_ag);
}
- sysfs_remove_group(p, port->pma_table);
+
+ if (port->pma_table)
+ sysfs_remove_group(p, port->pma_table);
sysfs_remove_group(p, &port->pkey_group);
sysfs_remove_group(p, &port->gid_group);
sysfs_remove_group(&port->gid_attr_group->kobj,
@@ -1296,7 +1305,7 @@ static void free_port_list_attributes(struct ib_device *device)
kobject_put(p);
}
- kobject_put(device->ports_parent);
+ kobject_put(device->ports_kobj);
}
int ib_device_register_sysfs(struct ib_device *device,
@@ -1307,23 +1316,15 @@ int ib_device_register_sysfs(struct ib_device *device,
int ret;
int i;
- ret = dev_set_name(class_dev, "%s", device->name);
- if (ret)
- return ret;
+ device->groups[0] = &dev_attr_group;
+ class_dev->groups = device->groups;
ret = device_add(class_dev);
if (ret)
goto err;
- for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) {
- ret = device_create_file(class_dev, ib_class_attributes[i]);
- if (ret)
- goto err_unregister;
- }
-
- device->ports_parent = kobject_create_and_add("ports",
- &class_dev->kobj);
- if (!device->ports_parent) {
+ device->ports_kobj = kobject_create_and_add("ports", &class_dev->kobj);
+ if (!device->ports_kobj) {
ret = -ENOMEM;
goto err_put;
}
@@ -1347,20 +1348,15 @@ int ib_device_register_sysfs(struct ib_device *device,
err_put:
free_port_list_attributes(device);
-
-err_unregister:
device_del(class_dev);
-
err:
return ret;
}
void ib_device_unregister_sysfs(struct ib_device *device)
{
- int i;
-
- /* Hold kobject until ib_dealloc_device() */
- kobject_get(&device->dev.kobj);
+ /* Hold device until ib_dealloc_device() */
+ get_device(&device->dev);
free_port_list_attributes(device);
@@ -1369,8 +1365,5 @@ void ib_device_unregister_sysfs(struct ib_device *device)
free_hsag(&device->dev.kobj, device->hw_stats_ag);
}
- for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i)
- device_remove_file(&device->dev, ib_class_attributes[i]);
-
device_unregister(&device->dev);
}
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index faa9e6116b2f..73332b9a25b5 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -46,6 +46,8 @@
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
+
#include <linux/uaccess.h>
#include <rdma/ib.h>
@@ -1120,6 +1122,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
return -EINVAL;
+ hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucm_cmd_table));
if (hdr.in + sizeof(hdr) > len)
return -EINVAL;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5f437d1570fb..01d68ed46c1b 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -44,6 +44,8 @@
#include <linux/module.h>
#include <linux/nsproxy.h>
+#include <linux/nospec.h>
+
#include <rdma/rdma_user_cm.h>
#include <rdma/ib_marshall.h>
#include <rdma/rdma_cm.h>
@@ -1676,6 +1678,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
return -EINVAL;
+ hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
if (hdr.in + sizeof(hdr) > len)
return -EINVAL;
@@ -1759,6 +1762,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
mutex_lock(&mut);
if (!ctx->closing) {
mutex_unlock(&mut);
+ ucma_put_ctx(ctx);
+ wait_for_completion(&ctx->comp);
/* rdma_destroy_id ensures that no event handlers are
* inflight for that id before releasing it.
*/
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index a41792dbae1f..c6144df47ea4 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -85,7 +85,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
struct page **page_list;
struct vm_area_struct **vma_list;
unsigned long lock_limit;
+ unsigned long new_pinned;
unsigned long cur_base;
+ struct mm_struct *mm;
unsigned long npages;
int ret;
int i;
@@ -107,25 +109,32 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (!can_do_mlock())
return ERR_PTR(-EPERM);
- umem = kzalloc(sizeof *umem, GFP_KERNEL);
- if (!umem)
- return ERR_PTR(-ENOMEM);
+ if (access & IB_ACCESS_ON_DEMAND) {
+ umem = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
+ if (!umem)
+ return ERR_PTR(-ENOMEM);
+ umem->is_odp = 1;
+ } else {
+ umem = kzalloc(sizeof(*umem), GFP_KERNEL);
+ if (!umem)
+ return ERR_PTR(-ENOMEM);
+ }
umem->context = context;
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
umem->writable = ib_access_writable(access);
+ umem->owning_mm = mm = current->mm;
+ mmgrab(mm);
if (access & IB_ACCESS_ON_DEMAND) {
- ret = ib_umem_odp_get(context, umem, access);
+ ret = ib_umem_odp_get(to_ib_umem_odp(umem), access);
if (ret)
goto umem_kfree;
return umem;
}
- umem->odp_data = NULL;
-
/* We assume the memory is from hugetlb until proved otherwise */
umem->hugetlb = 1;
@@ -144,25 +153,25 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->hugetlb = 0;
npages = ib_umem_num_pages(umem);
+ if (npages == 0 || npages > UINT_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- down_write(&current->mm->mmap_sem);
- current->mm->pinned_vm += npages;
- if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
- up_write(&current->mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+ if (check_add_overflow(mm->pinned_vm, npages, &new_pinned) ||
+ (new_pinned > lock_limit && !capable(CAP_IPC_LOCK))) {
+ up_write(&mm->mmap_sem);
ret = -ENOMEM;
- goto vma;
+ goto out;
}
- up_write(&current->mm->mmap_sem);
+ mm->pinned_vm = new_pinned;
+ up_write(&mm->mmap_sem);
cur_base = addr & PAGE_MASK;
- if (npages == 0 || npages > UINT_MAX) {
- ret = -EINVAL;
- goto vma;
- }
-
ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
if (ret)
goto vma;
@@ -172,14 +181,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
sg_list_start = umem->sg_head.sgl;
- down_read(&current->mm->mmap_sem);
while (npages) {
+ down_read(&mm->mmap_sem);
ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
gup_flags, page_list, vma_list);
if (ret < 0) {
- up_read(&current->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
goto umem_release;
}
@@ -187,17 +196,20 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
cur_base += ret * PAGE_SIZE;
npages -= ret;
+ /* Continue to hold the mmap_sem as vma_list access
+ * needs to be protected.
+ */
for_each_sg(sg_list_start, sg, ret, i) {
if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
umem->hugetlb = 0;
sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
}
+ up_read(&mm->mmap_sem);
/* preparing for next loop */
sg_list_start = sg;
}
- up_read(&current->mm->mmap_sem);
umem->nmap = ib_dma_map_sg_attrs(context->device,
umem->sg_head.sgl,
@@ -216,29 +228,40 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem_release:
__ib_umem_release(context->device, umem, 0);
vma:
- down_write(&current->mm->mmap_sem);
- current->mm->pinned_vm -= ib_umem_num_pages(umem);
- up_write(&current->mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+ mm->pinned_vm -= ib_umem_num_pages(umem);
+ up_write(&mm->mmap_sem);
out:
if (vma_list)
free_page((unsigned long) vma_list);
free_page((unsigned long) page_list);
umem_kfree:
- if (ret)
+ if (ret) {
+ mmdrop(umem->owning_mm);
kfree(umem);
+ }
return ret ? ERR_PTR(ret) : umem;
}
EXPORT_SYMBOL(ib_umem_get);
-static void ib_umem_account(struct work_struct *work)
+static void __ib_umem_release_tail(struct ib_umem *umem)
+{
+ mmdrop(umem->owning_mm);
+ if (umem->is_odp)
+ kfree(to_ib_umem_odp(umem));
+ else
+ kfree(umem);
+}
+
+static void ib_umem_release_defer(struct work_struct *work)
{
struct ib_umem *umem = container_of(work, struct ib_umem, work);
- down_write(&umem->mm->mmap_sem);
- umem->mm->pinned_vm -= umem->diff;
- up_write(&umem->mm->mmap_sem);
- mmput(umem->mm);
- kfree(umem);
+ down_write(&umem->owning_mm->mmap_sem);
+ umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
+ up_write(&umem->owning_mm->mmap_sem);
+
+ __ib_umem_release_tail(umem);
}
/**
@@ -248,52 +271,36 @@ static void ib_umem_account(struct work_struct *work)
void ib_umem_release(struct ib_umem *umem)
{
struct ib_ucontext *context = umem->context;
- struct mm_struct *mm;
- struct task_struct *task;
- unsigned long diff;
- if (umem->odp_data) {
- ib_umem_odp_release(umem);
+ if (umem->is_odp) {
+ ib_umem_odp_release(to_ib_umem_odp(umem));
+ __ib_umem_release_tail(umem);
return;
}
__ib_umem_release(umem->context->device, umem, 1);
- task = get_pid_task(umem->context->tgid, PIDTYPE_PID);
- if (!task)
- goto out;
- mm = get_task_mm(task);
- put_task_struct(task);
- if (!mm)
- goto out;
-
- diff = ib_umem_num_pages(umem);
-
/*
* We may be called with the mm's mmap_sem already held. This
* can happen when a userspace munmap() is the call that drops
* the last reference to our file and calls our release
* method. If there are memory regions to destroy, we'll end
* up here and not be able to take the mmap_sem. In that case
- * we defer the vm_locked accounting to the system workqueue.
+ * we defer the vm_locked accounting a workqueue.
*/
if (context->closing) {
- if (!down_write_trylock(&mm->mmap_sem)) {
- INIT_WORK(&umem->work, ib_umem_account);
- umem->mm = mm;
- umem->diff = diff;
-
+ if (!down_write_trylock(&umem->owning_mm->mmap_sem)) {
+ INIT_WORK(&umem->work, ib_umem_release_defer);
queue_work(ib_wq, &umem->work);
return;
}
- } else
- down_write(&mm->mmap_sem);
+ } else {
+ down_write(&umem->owning_mm->mmap_sem);
+ }
+ umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
+ up_write(&umem->owning_mm->mmap_sem);
- mm->pinned_vm -= diff;
- up_write(&mm->mmap_sem);
- mmput(mm);
-out:
- kfree(umem);
+ __ib_umem_release_tail(umem);
}
EXPORT_SYMBOL(ib_umem_release);
@@ -303,7 +310,7 @@ int ib_umem_page_count(struct ib_umem *umem)
int n;
struct scatterlist *sg;
- if (umem->odp_data)
+ if (umem->is_odp)
return ib_umem_num_pages(umem);
n = 0;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 6ec748eccff7..2b4c5e7dd5a1 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -58,7 +58,7 @@ static u64 node_start(struct umem_odp_node *n)
struct ib_umem_odp *umem_odp =
container_of(n, struct ib_umem_odp, interval_tree);
- return ib_umem_start(umem_odp->umem);
+ return ib_umem_start(&umem_odp->umem);
}
/* Note that the representation of the intervals in the interval tree
@@ -71,140 +71,86 @@ static u64 node_last(struct umem_odp_node *n)
struct ib_umem_odp *umem_odp =
container_of(n, struct ib_umem_odp, interval_tree);
- return ib_umem_end(umem_odp->umem) - 1;
+ return ib_umem_end(&umem_odp->umem) - 1;
}
INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
node_start, node_last, static, rbt_ib_umem)
-static void ib_umem_notifier_start_account(struct ib_umem *item)
+static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp)
{
- mutex_lock(&item->odp_data->umem_mutex);
-
- /* Only update private counters for this umem if it has them.
- * Otherwise skip it. All page faults will be delayed for this umem. */
- if (item->odp_data->mn_counters_active) {
- int notifiers_count = item->odp_data->notifiers_count++;
-
- if (notifiers_count == 0)
- /* Initialize the completion object for waiting on
- * notifiers. Since notifier_count is zero, no one
- * should be waiting right now. */
- reinit_completion(&item->odp_data->notifier_completion);
- }
- mutex_unlock(&item->odp_data->umem_mutex);
-}
-
-static void ib_umem_notifier_end_account(struct ib_umem *item)
-{
- mutex_lock(&item->odp_data->umem_mutex);
-
- /* Only update private counters for this umem if it has them.
- * Otherwise skip it. All page faults will be delayed for this umem. */
- if (item->odp_data->mn_counters_active) {
+ mutex_lock(&umem_odp->umem_mutex);
+ if (umem_odp->notifiers_count++ == 0)
/*
- * This sequence increase will notify the QP page fault that
- * the page that is going to be mapped in the spte could have
- * been freed.
+ * Initialize the completion object for waiting on
+ * notifiers. Since notifier_count is zero, no one should be
+ * waiting right now.
*/
- ++item->odp_data->notifiers_seq;
- if (--item->odp_data->notifiers_count == 0)
- complete_all(&item->odp_data->notifier_completion);
- }
- mutex_unlock(&item->odp_data->umem_mutex);
+ reinit_completion(&umem_odp->notifier_completion);
+ mutex_unlock(&umem_odp->umem_mutex);
}
-/* Account for a new mmu notifier in an ib_ucontext. */
-static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
+static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp)
{
- atomic_inc(&context->notifier_count);
+ mutex_lock(&umem_odp->umem_mutex);
+ /*
+ * This sequence increase will notify the QP page fault that the page
+ * that is going to be mapped in the spte could have been freed.
+ */
+ ++umem_odp->notifiers_seq;
+ if (--umem_odp->notifiers_count == 0)
+ complete_all(&umem_odp->notifier_completion);
+ mutex_unlock(&umem_odp->umem_mutex);
}
-/* Account for a terminating mmu notifier in an ib_ucontext.
- *
- * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
- * the function takes the semaphore itself. */
-static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
+static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
+ u64 start, u64 end, void *cookie)
{
- int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
-
- if (zero_notifiers &&
- !list_empty(&context->no_private_counters)) {
- /* No currently running mmu notifiers. Now is the chance to
- * add private accounting to all previously added umems. */
- struct ib_umem_odp *odp_data, *next;
-
- /* Prevent concurrent mmu notifiers from working on the
- * no_private_counters list. */
- down_write(&context->umem_rwsem);
-
- /* Read the notifier_count again, with the umem_rwsem
- * semaphore taken for write. */
- if (!atomic_read(&context->notifier_count)) {
- list_for_each_entry_safe(odp_data, next,
- &context->no_private_counters,
- no_private_counters) {
- mutex_lock(&odp_data->umem_mutex);
- odp_data->mn_counters_active = true;
- list_del(&odp_data->no_private_counters);
- complete_all(&odp_data->notifier_completion);
- mutex_unlock(&odp_data->umem_mutex);
- }
- }
-
- up_write(&context->umem_rwsem);
- }
-}
+ struct ib_umem *umem = &umem_odp->umem;
-static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
- u64 end, void *cookie) {
/*
* Increase the number of notifiers running, to
* prevent any further fault handling on this MR.
*/
- ib_umem_notifier_start_account(item);
- item->odp_data->dying = 1;
+ ib_umem_notifier_start_account(umem_odp);
+ umem_odp->dying = 1;
/* Make sure that the fact the umem is dying is out before we release
* all pending page faults. */
smp_wmb();
- complete_all(&item->odp_data->notifier_completion);
- item->context->invalidate_range(item, ib_umem_start(item),
- ib_umem_end(item));
+ complete_all(&umem_odp->notifier_completion);
+ umem->context->invalidate_range(umem_odp, ib_umem_start(umem),
+ ib_umem_end(umem));
return 0;
}
static void ib_umem_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
-
- if (!context->invalidate_range)
- return;
-
- ib_ucontext_notifier_start_account(context);
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
- ULLONG_MAX,
- ib_umem_notifier_release_trampoline,
- true,
- NULL);
- up_read(&context->umem_rwsem);
+ struct ib_ucontext_per_mm *per_mm =
+ container_of(mn, struct ib_ucontext_per_mm, mn);
+
+ down_read(&per_mm->umem_rwsem);
+ if (per_mm->active)
+ rbt_ib_umem_for_each_in_range(
+ &per_mm->umem_tree, 0, ULLONG_MAX,
+ ib_umem_notifier_release_trampoline, true, NULL);
+ up_read(&per_mm->umem_rwsem);
}
-static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
+static int invalidate_page_trampoline(struct ib_umem_odp *item, u64 start,
u64 end, void *cookie)
{
ib_umem_notifier_start_account(item);
- item->context->invalidate_range(item, start, start + PAGE_SIZE);
+ item->umem.context->invalidate_range(item, start, start + PAGE_SIZE);
ib_umem_notifier_end_account(item);
return 0;
}
-static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
- u64 end, void *cookie)
+static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
+ u64 start, u64 end, void *cookie)
{
ib_umem_notifier_start_account(item);
- item->context->invalidate_range(item, start, end);
+ item->umem.context->invalidate_range(item, start, end);
return 0;
}
@@ -214,28 +160,30 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
unsigned long end,
bool blockable)
{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
- int ret;
-
- if (!context->invalidate_range)
- return 0;
+ struct ib_ucontext_per_mm *per_mm =
+ container_of(mn, struct ib_ucontext_per_mm, mn);
if (blockable)
- down_read(&context->umem_rwsem);
- else if (!down_read_trylock(&context->umem_rwsem))
+ down_read(&per_mm->umem_rwsem);
+ else if (!down_read_trylock(&per_mm->umem_rwsem))
return -EAGAIN;
- ib_ucontext_notifier_start_account(context);
- ret = rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
- end,
- invalidate_range_start_trampoline,
- blockable, NULL);
- up_read(&context->umem_rwsem);
+ if (!per_mm->active) {
+ up_read(&per_mm->umem_rwsem);
+ /*
+ * At this point active is permanently set and visible to this
+ * CPU without a lock, that fact is relied on to skip the unlock
+ * in range_end.
+ */
+ return 0;
+ }
- return ret;
+ return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, end,
+ invalidate_range_start_trampoline,
+ blockable, NULL);
}
-static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
+static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
u64 end, void *cookie)
{
ib_umem_notifier_end_account(item);
@@ -247,22 +195,16 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+ struct ib_ucontext_per_mm *per_mm =
+ container_of(mn, struct ib_ucontext_per_mm, mn);
- if (!context->invalidate_range)
+ if (unlikely(!per_mm->active))
return;
- /*
- * TODO: we currently bail out if there is any sleepable work to be done
- * in ib_umem_notifier_invalidate_range_start so we shouldn't really block
- * here. But this is ugly and fragile.
- */
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
+ rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start,
end,
invalidate_range_end_trampoline, true, NULL);
- up_read(&context->umem_rwsem);
- ib_ucontext_notifier_end_account(context);
+ up_read(&per_mm->umem_rwsem);
}
static const struct mmu_notifier_ops ib_umem_notifiers = {
@@ -271,31 +213,158 @@ static const struct mmu_notifier_ops ib_umem_notifiers = {
.invalidate_range_end = ib_umem_notifier_invalidate_range_end,
};
-struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
- unsigned long addr,
- size_t size)
+static void add_umem_to_per_mm(struct ib_umem_odp *umem_odp)
{
- struct ib_umem *umem;
+ struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
+ struct ib_umem *umem = &umem_odp->umem;
+
+ down_write(&per_mm->umem_rwsem);
+ if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ rbt_ib_umem_insert(&umem_odp->interval_tree,
+ &per_mm->umem_tree);
+ up_write(&per_mm->umem_rwsem);
+}
+
+static void remove_umem_from_per_mm(struct ib_umem_odp *umem_odp)
+{
+ struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
+ struct ib_umem *umem = &umem_odp->umem;
+
+ down_write(&per_mm->umem_rwsem);
+ if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ rbt_ib_umem_remove(&umem_odp->interval_tree,
+ &per_mm->umem_tree);
+ complete_all(&umem_odp->notifier_completion);
+
+ up_write(&per_mm->umem_rwsem);
+}
+
+static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx,
+ struct mm_struct *mm)
+{
+ struct ib_ucontext_per_mm *per_mm;
+ int ret;
+
+ per_mm = kzalloc(sizeof(*per_mm), GFP_KERNEL);
+ if (!per_mm)
+ return ERR_PTR(-ENOMEM);
+
+ per_mm->context = ctx;
+ per_mm->mm = mm;
+ per_mm->umem_tree = RB_ROOT_CACHED;
+ init_rwsem(&per_mm->umem_rwsem);
+ per_mm->active = ctx->invalidate_range;
+
+ rcu_read_lock();
+ per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ rcu_read_unlock();
+
+ WARN_ON(mm != current->mm);
+
+ per_mm->mn.ops = &ib_umem_notifiers;
+ ret = mmu_notifier_register(&per_mm->mn, per_mm->mm);
+ if (ret) {
+ dev_err(&ctx->device->dev,
+ "Failed to register mmu_notifier %d\n", ret);
+ goto out_pid;
+ }
+
+ list_add(&per_mm->ucontext_list, &ctx->per_mm_list);
+ return per_mm;
+
+out_pid:
+ put_pid(per_mm->tgid);
+ kfree(per_mm);
+ return ERR_PTR(ret);
+}
+
+static int get_per_mm(struct ib_umem_odp *umem_odp)
+{
+ struct ib_ucontext *ctx = umem_odp->umem.context;
+ struct ib_ucontext_per_mm *per_mm;
+
+ /*
+ * Generally speaking we expect only one or two per_mm in this list,
+ * so no reason to optimize this search today.
+ */
+ mutex_lock(&ctx->per_mm_list_lock);
+ list_for_each_entry(per_mm, &ctx->per_mm_list, ucontext_list) {
+ if (per_mm->mm == umem_odp->umem.owning_mm)
+ goto found;
+ }
+
+ per_mm = alloc_per_mm(ctx, umem_odp->umem.owning_mm);
+ if (IS_ERR(per_mm)) {
+ mutex_unlock(&ctx->per_mm_list_lock);
+ return PTR_ERR(per_mm);
+ }
+
+found:
+ umem_odp->per_mm = per_mm;
+ per_mm->odp_mrs_count++;
+ mutex_unlock(&ctx->per_mm_list_lock);
+
+ return 0;
+}
+
+static void free_per_mm(struct rcu_head *rcu)
+{
+ kfree(container_of(rcu, struct ib_ucontext_per_mm, rcu));
+}
+
+void put_per_mm(struct ib_umem_odp *umem_odp)
+{
+ struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
+ struct ib_ucontext *ctx = umem_odp->umem.context;
+ bool need_free;
+
+ mutex_lock(&ctx->per_mm_list_lock);
+ umem_odp->per_mm = NULL;
+ per_mm->odp_mrs_count--;
+ need_free = per_mm->odp_mrs_count == 0;
+ if (need_free)
+ list_del(&per_mm->ucontext_list);
+ mutex_unlock(&ctx->per_mm_list_lock);
+
+ if (!need_free)
+ return;
+
+ /*
+ * NOTE! mmu_notifier_unregister() can happen between a start/end
+ * callback, resulting in an start/end, and thus an unbalanced
+ * lock. This doesn't really matter to us since we are about to kfree
+ * the memory that holds the lock, however LOCKDEP doesn't like this.
+ */
+ down_write(&per_mm->umem_rwsem);
+ per_mm->active = false;
+ up_write(&per_mm->umem_rwsem);
+
+ WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree.rb_root));
+ mmu_notifier_unregister_no_release(&per_mm->mn, per_mm->mm);
+ put_pid(per_mm->tgid);
+ mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm);
+}
+
+struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
+ unsigned long addr, size_t size)
+{
+ struct ib_ucontext *ctx = per_mm->context;
struct ib_umem_odp *odp_data;
+ struct ib_umem *umem;
int pages = size >> PAGE_SHIFT;
int ret;
- umem = kzalloc(sizeof(*umem), GFP_KERNEL);
- if (!umem)
+ odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
+ if (!odp_data)
return ERR_PTR(-ENOMEM);
-
- umem->context = context;
+ umem = &odp_data->umem;
+ umem->context = ctx;
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
umem->writable = 1;
-
- odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
- if (!odp_data) {
- ret = -ENOMEM;
- goto out_umem;
- }
- odp_data->umem = umem;
+ umem->is_odp = 1;
+ odp_data->per_mm = per_mm;
mutex_init(&odp_data->umem_mutex);
init_completion(&odp_data->notifier_completion);
@@ -314,39 +383,34 @@ struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
goto out_page_list;
}
- down_write(&context->umem_rwsem);
- context->odp_mrs_count++;
- rbt_ib_umem_insert(&odp_data->interval_tree, &context->umem_tree);
- if (likely(!atomic_read(&context->notifier_count)))
- odp_data->mn_counters_active = true;
- else
- list_add(&odp_data->no_private_counters,
- &context->no_private_counters);
- up_write(&context->umem_rwsem);
-
- umem->odp_data = odp_data;
+ /*
+ * Caller must ensure that the umem_odp that the per_mm came from
+ * cannot be freed during the call to ib_alloc_odp_umem.
+ */
+ mutex_lock(&ctx->per_mm_list_lock);
+ per_mm->odp_mrs_count++;
+ mutex_unlock(&ctx->per_mm_list_lock);
+ add_umem_to_per_mm(odp_data);
- return umem;
+ return odp_data;
out_page_list:
vfree(odp_data->page_list);
out_odp_data:
kfree(odp_data);
-out_umem:
- kfree(umem);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(ib_alloc_odp_umem);
-int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
- int access)
+int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
{
+ struct ib_umem *umem = &umem_odp->umem;
+ /*
+ * NOTE: This must called in a process context where umem->owning_mm
+ * == current->mm
+ */
+ struct mm_struct *mm = umem->owning_mm;
int ret_val;
- struct pid *our_pid;
- struct mm_struct *mm = get_task_mm(current);
-
- if (!mm)
- return -EINVAL;
if (access & IB_ACCESS_HUGETLB) {
struct vm_area_struct *vma;
@@ -366,111 +430,43 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
umem->hugetlb = 0;
}
- /* Prevent creating ODP MRs in child processes */
- rcu_read_lock();
- our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
- rcu_read_unlock();
- put_pid(our_pid);
- if (context->tgid != our_pid) {
- ret_val = -EINVAL;
- goto out_mm;
- }
-
- umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
- if (!umem->odp_data) {
- ret_val = -ENOMEM;
- goto out_mm;
- }
- umem->odp_data->umem = umem;
-
- mutex_init(&umem->odp_data->umem_mutex);
+ mutex_init(&umem_odp->umem_mutex);
- init_completion(&umem->odp_data->notifier_completion);
+ init_completion(&umem_odp->notifier_completion);
if (ib_umem_num_pages(umem)) {
- umem->odp_data->page_list =
- vzalloc(array_size(sizeof(*umem->odp_data->page_list),
+ umem_odp->page_list =
+ vzalloc(array_size(sizeof(*umem_odp->page_list),
ib_umem_num_pages(umem)));
- if (!umem->odp_data->page_list) {
- ret_val = -ENOMEM;
- goto out_odp_data;
- }
+ if (!umem_odp->page_list)
+ return -ENOMEM;
- umem->odp_data->dma_list =
- vzalloc(array_size(sizeof(*umem->odp_data->dma_list),
+ umem_odp->dma_list =
+ vzalloc(array_size(sizeof(*umem_odp->dma_list),
ib_umem_num_pages(umem)));
- if (!umem->odp_data->dma_list) {
+ if (!umem_odp->dma_list) {
ret_val = -ENOMEM;
goto out_page_list;
}
}
- /*
- * When using MMU notifiers, we will get a
- * notification before the "current" task (and MM) is
- * destroyed. We use the umem_rwsem semaphore to synchronize.
- */
- down_write(&context->umem_rwsem);
- context->odp_mrs_count++;
- if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
- rbt_ib_umem_insert(&umem->odp_data->interval_tree,
- &context->umem_tree);
- if (likely(!atomic_read(&context->notifier_count)) ||
- context->odp_mrs_count == 1)
- umem->odp_data->mn_counters_active = true;
- else
- list_add(&umem->odp_data->no_private_counters,
- &context->no_private_counters);
- downgrade_write(&context->umem_rwsem);
-
- if (context->odp_mrs_count == 1) {
- /*
- * Note that at this point, no MMU notifier is running
- * for this context!
- */
- atomic_set(&context->notifier_count, 0);
- INIT_HLIST_NODE(&context->mn.hlist);
- context->mn.ops = &ib_umem_notifiers;
- /*
- * Lock-dep detects a false positive for mmap_sem vs.
- * umem_rwsem, due to not grasping downgrade_write correctly.
- */
- lockdep_off();
- ret_val = mmu_notifier_register(&context->mn, mm);
- lockdep_on();
- if (ret_val) {
- pr_err("Failed to register mmu_notifier %d\n", ret_val);
- ret_val = -EBUSY;
- goto out_mutex;
- }
- }
-
- up_read(&context->umem_rwsem);
+ ret_val = get_per_mm(umem_odp);
+ if (ret_val)
+ goto out_dma_list;
+ add_umem_to_per_mm(umem_odp);
- /*
- * Note that doing an mmput can cause a notifier for the relevant mm.
- * If the notifier is called while we hold the umem_rwsem, this will
- * cause a deadlock. Therefore, we release the reference only after we
- * released the semaphore.
- */
- mmput(mm);
return 0;
-out_mutex:
- up_read(&context->umem_rwsem);
- vfree(umem->odp_data->dma_list);
+out_dma_list:
+ vfree(umem_odp->dma_list);
out_page_list:
- vfree(umem->odp_data->page_list);
-out_odp_data:
- kfree(umem->odp_data);
-out_mm:
- mmput(mm);
+ vfree(umem_odp->page_list);
return ret_val;
}
-void ib_umem_odp_release(struct ib_umem *umem)
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
{
- struct ib_ucontext *context = umem->context;
+ struct ib_umem *umem = &umem_odp->umem;
/*
* Ensure that no more pages are mapped in the umem.
@@ -478,61 +474,13 @@ void ib_umem_odp_release(struct ib_umem *umem)
* It is the driver's responsibility to ensure, before calling us,
* that the hardware will not attempt to access the MR any more.
*/
- ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem),
ib_umem_end(umem));
- down_write(&context->umem_rwsem);
- if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
- rbt_ib_umem_remove(&umem->odp_data->interval_tree,
- &context->umem_tree);
- context->odp_mrs_count--;
- if (!umem->odp_data->mn_counters_active) {
- list_del(&umem->odp_data->no_private_counters);
- complete_all(&umem->odp_data->notifier_completion);
- }
-
- /*
- * Downgrade the lock to a read lock. This ensures that the notifiers
- * (who lock the mutex for reading) will be able to finish, and we
- * will be able to enventually obtain the mmu notifiers SRCU. Note
- * that since we are doing it atomically, no other user could register
- * and unregister while we do the check.
- */
- downgrade_write(&context->umem_rwsem);
- if (!context->odp_mrs_count) {
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
-
- owning_process = get_pid_task(context->tgid,
- PIDTYPE_PID);
- if (owning_process == NULL)
- /*
- * The process is already dead, notifier were removed
- * already.
- */
- goto out;
-
- owning_mm = get_task_mm(owning_process);
- if (owning_mm == NULL)
- /*
- * The process' mm is already dead, notifier were
- * removed already.
- */
- goto out_put_task;
- mmu_notifier_unregister(&context->mn, owning_mm);
-
- mmput(owning_mm);
-
-out_put_task:
- put_task_struct(owning_process);
- }
-out:
- up_read(&context->umem_rwsem);
-
- vfree(umem->odp_data->dma_list);
- vfree(umem->odp_data->page_list);
- kfree(umem->odp_data);
- kfree(umem);
+ remove_umem_from_per_mm(umem_odp);
+ put_per_mm(umem_odp);
+ vfree(umem_odp->dma_list);
+ vfree(umem_odp->page_list);
}
/*
@@ -544,7 +492,7 @@ out:
* @access_mask: access permissions needed for this page.
* @current_seq: sequence number for synchronization with invalidations.
* the sequence number is taken from
- * umem->odp_data->notifiers_seq.
+ * umem_odp->notifiers_seq.
*
* The function returns -EFAULT if the DMA mapping operation fails. It returns
* -EAGAIN if a concurrent invalidation prevents us from updating the page.
@@ -554,12 +502,13 @@ out:
* umem.
*/
static int ib_umem_odp_map_dma_single_page(
- struct ib_umem *umem,
+ struct ib_umem_odp *umem_odp,
int page_index,
struct page *page,
u64 access_mask,
unsigned long current_seq)
{
+ struct ib_umem *umem = &umem_odp->umem;
struct ib_device *dev = umem->context->device;
dma_addr_t dma_addr;
int stored_page = 0;
@@ -571,11 +520,11 @@ static int ib_umem_odp_map_dma_single_page(
* handle case of a racing notifier. This check also allows us to bail
* early if we have a notifier running in parallel with us.
*/
- if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
+ if (ib_umem_mmu_notifier_retry(umem_odp, current_seq)) {
ret = -EAGAIN;
goto out;
}
- if (!(umem->odp_data->dma_list[page_index])) {
+ if (!(umem_odp->dma_list[page_index])) {
dma_addr = ib_dma_map_page(dev,
page,
0, BIT(umem->page_shift),
@@ -584,15 +533,15 @@ static int ib_umem_odp_map_dma_single_page(
ret = -EFAULT;
goto out;
}
- umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
- umem->odp_data->page_list[page_index] = page;
+ umem_odp->dma_list[page_index] = dma_addr | access_mask;
+ umem_odp->page_list[page_index] = page;
umem->npages++;
stored_page = 1;
- } else if (umem->odp_data->page_list[page_index] == page) {
- umem->odp_data->dma_list[page_index] |= access_mask;
+ } else if (umem_odp->page_list[page_index] == page) {
+ umem_odp->dma_list[page_index] |= access_mask;
} else {
pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
- umem->odp_data->page_list[page_index], page);
+ umem_odp->page_list[page_index], page);
/* Better remove the mapping now, to prevent any further
* damage. */
remove_existing_mapping = 1;
@@ -605,7 +554,7 @@ out:
if (remove_existing_mapping && umem->context->invalidate_range) {
invalidate_page_trampoline(
- umem,
+ umem_odp,
ib_umem_start(umem) + (page_index >> umem->page_shift),
ib_umem_start(umem) + ((page_index + 1) >>
umem->page_shift),
@@ -621,7 +570,7 @@ out:
*
* Pins the range of pages passed in the argument, and maps them to
* DMA addresses. The DMA addresses of the mapped pages is updated in
- * umem->odp_data->dma_list.
+ * umem_odp->dma_list.
*
* Returns the number of pages mapped in success, negative error code
* for failure.
@@ -629,7 +578,7 @@ out:
* the function from completing its task.
* An -ENOENT error code indicates that userspace process is being terminated
* and mm was already destroyed.
- * @umem: the umem to map and pin
+ * @umem_odp: the umem to map and pin
* @user_virt: the address from which we need to map.
* @bcnt: the minimal number of bytes to pin and map. The mapping might be
* bigger due to alignment, and may also be smaller in case of an error
@@ -639,13 +588,15 @@ out:
* range.
* @current_seq: the MMU notifiers sequance value for synchronization with
* invalidations. the sequance number is read from
- * umem->odp_data->notifiers_seq before calling this function
+ * umem_odp->notifiers_seq before calling this function
*/
-int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
- u64 access_mask, unsigned long current_seq)
+int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
+ u64 bcnt, u64 access_mask,
+ unsigned long current_seq)
{
+ struct ib_umem *umem = &umem_odp->umem;
struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
+ struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
struct page **local_page_list = NULL;
u64 page_mask, off;
int j, k, ret = 0, start_idx, npages = 0, page_shift;
@@ -669,15 +620,14 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
user_virt = user_virt & page_mask;
bcnt += off; /* Charge for the first page offset as well. */
- owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
- if (owning_process == NULL) {
+ /*
+ * owning_process is allowed to be NULL, this means somehow the mm is
+ * existing beyond the lifetime of the originating process.. Presumably
+ * mmget_not_zero will fail in this case.
+ */
+ owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID);
+ if (WARN_ON(!mmget_not_zero(umem_odp->umem.owning_mm))) {
ret = -EINVAL;
- goto out_no_task;
- }
-
- owning_mm = get_task_mm(owning_process);
- if (owning_mm == NULL) {
- ret = -ENOENT;
goto out_put_task;
}
@@ -709,7 +659,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
break;
bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
- mutex_lock(&umem->odp_data->umem_mutex);
+ mutex_lock(&umem_odp->umem_mutex);
for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
if (user_virt & ~page_mask) {
p += PAGE_SIZE;
@@ -722,7 +672,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
}
ret = ib_umem_odp_map_dma_single_page(
- umem, k, local_page_list[j],
+ umem_odp, k, local_page_list[j],
access_mask, current_seq);
if (ret < 0)
break;
@@ -730,7 +680,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
p = page_to_phys(local_page_list[j]);
k++;
}
- mutex_unlock(&umem->odp_data->umem_mutex);
+ mutex_unlock(&umem_odp->umem_mutex);
if (ret < 0) {
/* Release left over pages when handling errors. */
@@ -749,16 +699,17 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
mmput(owning_mm);
out_put_task:
- put_task_struct(owning_process);
-out_no_task:
+ if (owning_process)
+ put_task_struct(owning_process);
free_page((unsigned long)local_page_list);
return ret;
}
EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
-void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
+void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
u64 bound)
{
+ struct ib_umem *umem = &umem_odp->umem;
int idx;
u64 addr;
struct ib_device *dev = umem->context->device;
@@ -770,12 +721,12 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
* faults from completion. We might be racing with other
* invalidations, so we must make sure we free each page only
* once. */
- mutex_lock(&umem->odp_data->umem_mutex);
+ mutex_lock(&umem_odp->umem_mutex);
for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
- if (umem->odp_data->page_list[idx]) {
- struct page *page = umem->odp_data->page_list[idx];
- dma_addr_t dma = umem->odp_data->dma_list[idx];
+ if (umem_odp->page_list[idx]) {
+ struct page *page = umem_odp->page_list[idx];
+ dma_addr_t dma = umem_odp->dma_list[idx];
dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
WARN_ON(!dma_addr);
@@ -798,12 +749,12 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
/* on demand pinning support */
if (!umem->context->invalidate_range)
put_page(page);
- umem->odp_data->page_list[idx] = NULL;
- umem->odp_data->dma_list[idx] = 0;
+ umem_odp->page_list[idx] = NULL;
+ umem_odp->dma_list[idx] = 0;
umem->npages--;
}
}
- mutex_unlock(&umem->odp_data->umem_mutex);
+ mutex_unlock(&umem_odp->umem_mutex);
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
@@ -830,7 +781,7 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
return -EAGAIN;
next = rbt_ib_umem_iter_next(node, start, last - 1);
umem = container_of(node, struct ib_umem_odp, interval_tree);
- ret_val = cb(umem->umem, start, last, cookie) || ret_val;
+ ret_val = cb(umem, start, last, cookie) || ret_val;
}
return ret_val;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index c34a6852d691..f55f48f6b272 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -138,7 +138,7 @@ static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
static dev_t dynamic_umad_dev;
static dev_t dynamic_issm_dev;
-static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
+static DEFINE_IDA(umad_ida);
static void ib_umad_add_one(struct ib_device *device);
static void ib_umad_remove_one(struct ib_device *device, void *client_data);
@@ -1132,7 +1132,7 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
if (!port)
return -ENODEV;
- return sprintf(buf, "%s\n", port->ib_dev->name);
+ return sprintf(buf, "%s\n", dev_name(&port->ib_dev->dev));
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
@@ -1159,11 +1159,10 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
dev_t base_umad;
dev_t base_issm;
- devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
- if (devnum >= IB_UMAD_MAX_PORTS)
+ devnum = ida_alloc_max(&umad_ida, IB_UMAD_MAX_PORTS - 1, GFP_KERNEL);
+ if (devnum < 0)
return -1;
port->dev_num = devnum;
- set_bit(devnum, dev_map);
if (devnum >= IB_UMAD_NUM_FIXED_MINOR) {
base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
@@ -1227,7 +1226,7 @@ err_dev:
err_cdev:
cdev_del(&port->cdev);
- clear_bit(devnum, dev_map);
+ ida_free(&umad_ida, devnum);
return -1;
}
@@ -1261,7 +1260,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
}
mutex_unlock(&port->file_mutex);
- clear_bit(port->dev_num, dev_map);
+ ida_free(&umad_ida, port->dev_num);
}
static void ib_umad_add_one(struct ib_device *device)
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 5df8e548cc14..c97935a0c7c6 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -100,13 +100,14 @@ struct ib_uverbs_device {
atomic_t refcount;
int num_comp_vectors;
struct completion comp;
- struct device *dev;
+ struct device dev;
+ /* First group for device attributes, NULL terminated array */
+ const struct attribute_group *groups[2];
struct ib_device __rcu *ib_dev;
int devnum;
struct cdev cdev;
struct rb_root xrcd_tree;
struct mutex xrcd_tree_mutex;
- struct kobject kobj;
struct srcu_struct disassociate_srcu;
struct mutex lists_mutex; /* protect lists */
struct list_head uverbs_file_list;
@@ -146,7 +147,6 @@ struct ib_uverbs_file {
struct ib_event_handler event_handler;
struct ib_uverbs_async_event_file *async_file;
struct list_head list;
- int is_closed;
/*
* To access the uobjects list hw_destroy_rwsem must be held for write
@@ -158,6 +158,9 @@ struct ib_uverbs_file {
spinlock_t uobjects_lock;
struct list_head uobjects;
+ struct mutex umap_lock;
+ struct list_head umaps;
+
u64 uverbs_cmd_mask;
u64 uverbs_ex_cmd_mask;
@@ -218,12 +221,6 @@ struct ib_ucq_object {
u32 async_events_reported;
};
-struct ib_uflow_resources;
-struct ib_uflow_object {
- struct ib_uobject uobject;
- struct ib_uflow_resources *resources;
-};
-
extern const struct file_operations uverbs_event_fops;
void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue);
struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index a21d5214afc3..a93853770e3c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -117,18 +117,12 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
/* ufile is required when some objects are released */
ucontext->ufile = file;
- rcu_read_lock();
- ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
- rcu_read_unlock();
- ucontext->closing = 0;
+ ucontext->closing = false;
ucontext->cleanup_retryable = false;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- ucontext->umem_tree = RB_ROOT_CACHED;
- init_rwsem(&ucontext->umem_rwsem);
- ucontext->odp_mrs_count = 0;
- INIT_LIST_HEAD(&ucontext->no_private_counters);
-
+ mutex_init(&ucontext->per_mm_list_lock);
+ INIT_LIST_HEAD(&ucontext->per_mm_list);
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
ucontext->invalidate_range = NULL;
@@ -172,7 +166,6 @@ err_fd:
put_unused_fd(resp.async_fd);
err_free:
- put_pid(ucontext->tgid);
ib_dev->dealloc_ucontext(ucontext);
err_alloc:
@@ -2027,33 +2020,55 @@ static int modify_qp(struct ib_uverbs_file *file,
if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
cmd->base.cur_qp_state > IB_QPS_ERR) ||
- cmd->base.qp_state > IB_QPS_ERR) {
+ (cmd->base.attr_mask & IB_QP_STATE &&
+ cmd->base.qp_state > IB_QPS_ERR)) {
ret = -EINVAL;
goto release_qp;
}
- attr->qp_state = cmd->base.qp_state;
- attr->cur_qp_state = cmd->base.cur_qp_state;
- attr->path_mtu = cmd->base.path_mtu;
- attr->path_mig_state = cmd->base.path_mig_state;
- attr->qkey = cmd->base.qkey;
- attr->rq_psn = cmd->base.rq_psn;
- attr->sq_psn = cmd->base.sq_psn;
- attr->dest_qp_num = cmd->base.dest_qp_num;
- attr->qp_access_flags = cmd->base.qp_access_flags;
- attr->pkey_index = cmd->base.pkey_index;
- attr->alt_pkey_index = cmd->base.alt_pkey_index;
- attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
- attr->max_rd_atomic = cmd->base.max_rd_atomic;
- attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
- attr->min_rnr_timer = cmd->base.min_rnr_timer;
- attr->port_num = cmd->base.port_num;
- attr->timeout = cmd->base.timeout;
- attr->retry_cnt = cmd->base.retry_cnt;
- attr->rnr_retry = cmd->base.rnr_retry;
- attr->alt_port_num = cmd->base.alt_port_num;
- attr->alt_timeout = cmd->base.alt_timeout;
- attr->rate_limit = cmd->rate_limit;
+ if (cmd->base.attr_mask & IB_QP_STATE)
+ attr->qp_state = cmd->base.qp_state;
+ if (cmd->base.attr_mask & IB_QP_CUR_STATE)
+ attr->cur_qp_state = cmd->base.cur_qp_state;
+ if (cmd->base.attr_mask & IB_QP_PATH_MTU)
+ attr->path_mtu = cmd->base.path_mtu;
+ if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
+ attr->path_mig_state = cmd->base.path_mig_state;
+ if (cmd->base.attr_mask & IB_QP_QKEY)
+ attr->qkey = cmd->base.qkey;
+ if (cmd->base.attr_mask & IB_QP_RQ_PSN)
+ attr->rq_psn = cmd->base.rq_psn;
+ if (cmd->base.attr_mask & IB_QP_SQ_PSN)
+ attr->sq_psn = cmd->base.sq_psn;
+ if (cmd->base.attr_mask & IB_QP_DEST_QPN)
+ attr->dest_qp_num = cmd->base.dest_qp_num;
+ if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
+ attr->qp_access_flags = cmd->base.qp_access_flags;
+ if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
+ attr->pkey_index = cmd->base.pkey_index;
+ if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+ attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
+ if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+ attr->max_rd_atomic = cmd->base.max_rd_atomic;
+ if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
+ if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
+ attr->min_rnr_timer = cmd->base.min_rnr_timer;
+ if (cmd->base.attr_mask & IB_QP_PORT)
+ attr->port_num = cmd->base.port_num;
+ if (cmd->base.attr_mask & IB_QP_TIMEOUT)
+ attr->timeout = cmd->base.timeout;
+ if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
+ attr->retry_cnt = cmd->base.retry_cnt;
+ if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
+ attr->rnr_retry = cmd->base.rnr_retry;
+ if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
+ attr->alt_port_num = cmd->base.alt_port_num;
+ attr->alt_timeout = cmd->base.alt_timeout;
+ attr->alt_pkey_index = cmd->base.alt_pkey_index;
+ }
+ if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
+ attr->rate_limit = cmd->rate_limit;
if (cmd->base.attr_mask & IB_QP_AV)
copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
@@ -2747,16 +2762,7 @@ out_put:
return ret ? ret : in_len;
}
-struct ib_uflow_resources {
- size_t max;
- size_t num;
- size_t collection_num;
- size_t counters_num;
- struct ib_counters **counters;
- struct ib_flow_action **collection;
-};
-
-static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
+struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
{
struct ib_uflow_resources *resources;
@@ -2786,6 +2792,7 @@ err:
return NULL;
}
+EXPORT_SYMBOL(flow_resources_alloc);
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
{
@@ -2804,10 +2811,11 @@ void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
kfree(uflow_res->counters);
kfree(uflow_res);
}
+EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
-static void flow_resources_add(struct ib_uflow_resources *uflow_res,
- enum ib_flow_spec_type type,
- void *ibobj)
+void flow_resources_add(struct ib_uflow_resources *uflow_res,
+ enum ib_flow_spec_type type,
+ void *ibobj)
{
WARN_ON(uflow_res->num >= uflow_res->max);
@@ -2828,6 +2836,7 @@ static void flow_resources_add(struct ib_uflow_resources *uflow_res,
uflow_res->num++;
}
+EXPORT_SYMBOL(flow_resources_add);
static int kern_spec_to_ib_spec_action(struct ib_uverbs_file *ufile,
struct ib_uverbs_flow_spec *kern_spec,
@@ -3462,7 +3471,6 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
struct ib_uverbs_create_flow cmd;
struct ib_uverbs_create_flow_resp resp;
struct ib_uobject *uobj;
- struct ib_uflow_object *uflow;
struct ib_flow *flow_id;
struct ib_uverbs_flow_attr *kern_flow_attr;
struct ib_flow_attr *flow_attr;
@@ -3601,13 +3609,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
err = PTR_ERR(flow_id);
goto err_free;
}
- atomic_inc(&qp->usecnt);
- flow_id->qp = qp;
- flow_id->device = qp->device;
- flow_id->uobject = uobj;
- uobj->object = flow_id;
- uflow = container_of(uobj, typeof(*uflow), uobject);
- uflow->resources = uflow_res;
+
+ ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
memset(&resp, 0, sizeof(resp));
resp.flow_handle = uobj->id;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 1a6b229e3db3..b0e493e8d860 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -57,6 +57,7 @@ struct bundle_priv {
struct ib_uverbs_attr *uattrs;
DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN);
+ DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN);
/*
* Must be last. bundle ends in a flex array which overlaps
@@ -143,6 +144,86 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
0, uattr->len - len);
}
+static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
+ const struct uverbs_api_attr *attr_uapi,
+ struct uverbs_objs_arr_attr *attr,
+ struct ib_uverbs_attr *uattr,
+ u32 attr_bkey)
+{
+ const struct uverbs_attr_spec *spec = &attr_uapi->spec;
+ size_t array_len;
+ u32 *idr_vals;
+ int ret = 0;
+ size_t i;
+
+ if (uattr->attr_data.reserved)
+ return -EINVAL;
+
+ if (uattr->len % sizeof(u32))
+ return -EINVAL;
+
+ array_len = uattr->len / sizeof(u32);
+ if (array_len < spec->u2.objs_arr.min_len ||
+ array_len > spec->u2.objs_arr.max_len)
+ return -EINVAL;
+
+ attr->uobjects =
+ uverbs_alloc(&pbundle->bundle,
+ array_size(array_len, sizeof(*attr->uobjects)));
+ if (IS_ERR(attr->uobjects))
+ return PTR_ERR(attr->uobjects);
+
+ /*
+ * Since idr is 4B and *uobjects is >= 4B, we can use attr->uobjects
+ * to store idrs array and avoid additional memory allocation. The
+ * idrs array is offset to the end of the uobjects array so we will be
+ * able to read idr and replace with a pointer.
+ */
+ idr_vals = (u32 *)(attr->uobjects + array_len) - array_len;
+
+ if (uattr->len > sizeof(uattr->data)) {
+ ret = copy_from_user(idr_vals, u64_to_user_ptr(uattr->data),
+ uattr->len);
+ if (ret)
+ return -EFAULT;
+ } else {
+ memcpy(idr_vals, &uattr->data, uattr->len);
+ }
+
+ for (i = 0; i != array_len; i++) {
+ attr->uobjects[i] = uverbs_get_uobject_from_file(
+ spec->u2.objs_arr.obj_type, pbundle->bundle.ufile,
+ spec->u2.objs_arr.access, idr_vals[i]);
+ if (IS_ERR(attr->uobjects[i])) {
+ ret = PTR_ERR(attr->uobjects[i]);
+ break;
+ }
+ }
+
+ attr->len = i;
+ __set_bit(attr_bkey, pbundle->spec_finalize);
+ return ret;
+}
+
+static int uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
+ struct uverbs_objs_arr_attr *attr,
+ bool commit)
+{
+ const struct uverbs_attr_spec *spec = &attr_uapi->spec;
+ int current_ret;
+ int ret = 0;
+ size_t i;
+
+ for (i = 0; i != attr->len; i++) {
+ current_ret = uverbs_finalize_object(
+ attr->uobjects[i], spec->u2.objs_arr.access, commit);
+ if (!ret)
+ ret = current_ret;
+ }
+
+ return ret;
+}
+
static int uverbs_process_attr(struct bundle_priv *pbundle,
const struct uverbs_api_attr *attr_uapi,
struct ib_uverbs_attr *uattr, u32 attr_bkey)
@@ -246,6 +327,11 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
}
break;
+
+ case UVERBS_ATTR_TYPE_IDRS_ARRAY:
+ return uverbs_process_idrs_array(pbundle, attr_uapi,
+ &e->objs_arr_attr, uattr,
+ attr_bkey);
default:
return -EOPNOTSUPP;
}
@@ -300,8 +386,7 @@ static int uverbs_set_attr(struct bundle_priv *pbundle,
return -EPROTONOSUPPORT;
return 0;
}
- attr = srcu_dereference(
- *slot, &pbundle->bundle.ufile->device->disassociate_srcu);
+ attr = rcu_dereference_protected(*slot, true);
/* Reject duplicate attributes from user-space */
if (test_bit(attr_bkey, pbundle->bundle.attr_present))
@@ -384,6 +469,7 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
unsigned int i;
int ret = 0;
+ /* fast path for simple uobjects */
i = -1;
while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
i + 1)) < key_bitmap_len) {
@@ -397,6 +483,30 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
ret = current_ret;
}
+ i = -1;
+ while ((i = find_next_bit(pbundle->spec_finalize, key_bitmap_len,
+ i + 1)) < key_bitmap_len) {
+ struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
+ const struct uverbs_api_attr *attr_uapi;
+ void __rcu **slot;
+ int current_ret;
+
+ slot = uapi_get_attr_for_method(
+ pbundle,
+ pbundle->method_key | uapi_bkey_to_key_attr(i));
+ if (WARN_ON(!slot))
+ continue;
+
+ attr_uapi = rcu_dereference_protected(*slot, true);
+
+ if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
+ current_ret = uverbs_free_idrs_array(
+ attr_uapi, &attr->objs_arr_attr, commit);
+ if (!ret)
+ ret = current_ret;
+ }
+ }
+
for (memblock = pbundle->allocated_mem; memblock;) {
struct bundle_alloc_head *tmp = memblock;
@@ -429,7 +539,7 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
uapi_key_ioctl_method(hdr->method_id));
if (unlikely(!slot))
return -EPROTONOSUPPORT;
- method_elm = srcu_dereference(*slot, &ufile->device->disassociate_srcu);
+ method_elm = rcu_dereference_protected(*slot, true);
if (!method_elm->use_stack) {
pbundle = kmalloc(method_elm->bundle_size, GFP_KERNEL);
@@ -461,6 +571,7 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
memset(pbundle->bundle.attr_present, 0,
sizeof(pbundle->bundle.attr_present));
memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
+ memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize));
ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
destroy_ret = bundle_destroy(pbundle, ret == 0);
@@ -611,3 +722,26 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
return 0;
}
EXPORT_SYMBOL(uverbs_copy_to);
+
+int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val)
+{
+ const struct uverbs_attr *attr;
+
+ attr = uverbs_attr_get(attrs_bundle, idx);
+ if (IS_ERR(attr)) {
+ if ((PTR_ERR(attr) != -ENOENT) || !def_val)
+ return PTR_ERR(attr);
+
+ *to = *def_val;
+ } else {
+ *to = attr->ptr_attr.data;
+ }
+
+ if (*to < lower_bound || (*to > 0 && (u64)*to > upper_bound))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(_uverbs_get_const);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 6d974e2363df..6d373f5515b7 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -45,6 +45,7 @@
#include <linux/cdev.h>
#include <linux/anon_inodes.h>
#include <linux/slab.h>
+#include <linux/sched/mm.h>
#include <linux/uaccess.h>
@@ -72,7 +73,7 @@ enum {
static dev_t dynamic_uverbs_dev;
static struct class *uverbs_class;
-static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
+static DEFINE_IDA(uverbs_ida);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
@@ -169,20 +170,16 @@ int uverbs_dealloc_mw(struct ib_mw *mw)
return ret;
}
-static void ib_uverbs_release_dev(struct kobject *kobj)
+static void ib_uverbs_release_dev(struct device *device)
{
struct ib_uverbs_device *dev =
- container_of(kobj, struct ib_uverbs_device, kobj);
+ container_of(device, struct ib_uverbs_device, dev);
uverbs_destroy_api(dev->uapi);
cleanup_srcu_struct(&dev->disassociate_srcu);
kfree(dev);
}
-static struct kobj_type ib_uverbs_dev_ktype = {
- .release = ib_uverbs_release_dev,
-};
-
static void ib_uverbs_release_async_event_file(struct kref *ref)
{
struct ib_uverbs_async_event_file *file =
@@ -265,7 +262,7 @@ void ib_uverbs_release_file(struct kref *ref)
if (atomic_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device);
- kobject_put(&file->device->kobj);
+ put_device(&file->device->dev);
kfree(file);
}
@@ -440,6 +437,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
list_del(&entry->obj_list);
kfree(entry);
}
+ file->ev_queue.is_closed = 1;
spin_unlock_irq(&file->ev_queue.lock);
uverbs_close_fd(filp);
@@ -816,6 +814,226 @@ out:
}
/*
+ * Each time we map IO memory into user space this keeps track of the mapping.
+ * When the device is hot-unplugged we 'zap' the mmaps in user space to point
+ * to the zero page and allow the hot unplug to proceed.
+ *
+ * This is necessary for cases like PCI physical hot unplug as the actual BAR
+ * memory may vanish after this and access to it from userspace could MCE.
+ *
+ * RDMA drivers supporting disassociation must have their user space designed
+ * to cope in some way with their IO pages going to the zero page.
+ */
+struct rdma_umap_priv {
+ struct vm_area_struct *vma;
+ struct list_head list;
+};
+
+static const struct vm_operations_struct rdma_umap_ops;
+
+static void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+
+ priv->vma = vma;
+ vma->vm_private_data = priv;
+ vma->vm_ops = &rdma_umap_ops;
+
+ mutex_lock(&ufile->umap_lock);
+ list_add(&priv->list, &ufile->umaps);
+ mutex_unlock(&ufile->umap_lock);
+}
+
+/*
+ * The VMA has been dup'd, initialize the vm_private_data with a new tracking
+ * struct
+ */
+static void rdma_umap_open(struct vm_area_struct *vma)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+ struct rdma_umap_priv *opriv = vma->vm_private_data;
+ struct rdma_umap_priv *priv;
+
+ if (!opriv)
+ return;
+
+ /* We are racing with disassociation */
+ if (!down_read_trylock(&ufile->hw_destroy_rwsem))
+ goto out_zap;
+ /*
+ * Disassociation already completed, the VMA should already be zapped.
+ */
+ if (!ufile->ucontext)
+ goto out_unlock;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto out_unlock;
+ rdma_umap_priv_init(priv, vma);
+
+ up_read(&ufile->hw_destroy_rwsem);
+ return;
+
+out_unlock:
+ up_read(&ufile->hw_destroy_rwsem);
+out_zap:
+ /*
+ * We can't allow the VMA to be created with the actual IO pages, that
+ * would break our API contract, and it can't be stopped at this
+ * point, so zap it.
+ */
+ vma->vm_private_data = NULL;
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+}
+
+static void rdma_umap_close(struct vm_area_struct *vma)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+ struct rdma_umap_priv *priv = vma->vm_private_data;
+
+ if (!priv)
+ return;
+
+ /*
+ * The vma holds a reference on the struct file that created it, which
+ * in turn means that the ib_uverbs_file is guaranteed to exist at
+ * this point.
+ */
+ mutex_lock(&ufile->umap_lock);
+ list_del(&priv->list);
+ mutex_unlock(&ufile->umap_lock);
+ kfree(priv);
+}
+
+static const struct vm_operations_struct rdma_umap_ops = {
+ .open = rdma_umap_open,
+ .close = rdma_umap_close,
+};
+
+static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma,
+ unsigned long size)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ struct rdma_umap_priv *priv;
+
+ if (vma->vm_end - vma->vm_start != size)
+ return ERR_PTR(-EINVAL);
+
+ /* Driver is using this wrong, must be called by ib_uverbs_mmap */
+ if (WARN_ON(!vma->vm_file ||
+ vma->vm_file->private_data != ufile))
+ return ERR_PTR(-EINVAL);
+ lockdep_assert_held(&ufile->device->disassociate_srcu);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+ return priv;
+}
+
+/*
+ * Map IO memory into a process. This is to be called by drivers as part of
+ * their mmap() functions if they wish to send something like PCI-E BAR memory
+ * to userspace.
+ */
+int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
+
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ vma->vm_page_prot = prot;
+ if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
+ kfree(priv);
+ return -EAGAIN;
+ }
+
+ rdma_umap_priv_init(priv, vma);
+ return 0;
+}
+EXPORT_SYMBOL(rdma_user_mmap_io);
+
+/*
+ * The page case is here for a slightly different reason, the driver expects
+ * to be able to free the page it is sharing to user space when it destroys
+ * its ucontext, which means we need to zap the user space references.
+ *
+ * We could handle this differently by providing an API to allocate a shared
+ * page and then only freeing the shared page when the last ufile is
+ * destroyed.
+ */
+int rdma_user_mmap_page(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma, struct page *page,
+ unsigned long size)
+{
+ struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
+
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size,
+ vma->vm_page_prot)) {
+ kfree(priv);
+ return -EAGAIN;
+ }
+
+ rdma_umap_priv_init(priv, vma);
+ return 0;
+}
+EXPORT_SYMBOL(rdma_user_mmap_page);
+
+void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
+{
+ struct rdma_umap_priv *priv, *next_priv;
+
+ lockdep_assert_held(&ufile->hw_destroy_rwsem);
+
+ while (1) {
+ struct mm_struct *mm = NULL;
+
+ /* Get an arbitrary mm pointer that hasn't been cleaned yet */
+ mutex_lock(&ufile->umap_lock);
+ if (!list_empty(&ufile->umaps)) {
+ mm = list_first_entry(&ufile->umaps,
+ struct rdma_umap_priv, list)
+ ->vma->vm_mm;
+ mmget(mm);
+ }
+ mutex_unlock(&ufile->umap_lock);
+ if (!mm)
+ return;
+
+ /*
+ * The umap_lock is nested under mmap_sem since it used within
+ * the vma_ops callbacks, so we have to clean the list one mm
+ * at a time to get the lock ordering right. Typically there
+ * will only be one mm, so no big deal.
+ */
+ down_write(&mm->mmap_sem);
+ mutex_lock(&ufile->umap_lock);
+ list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
+ list) {
+ struct vm_area_struct *vma = priv->vma;
+
+ if (vma->vm_mm != mm)
+ continue;
+ list_del_init(&priv->list);
+
+ zap_vma_ptes(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start);
+ vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
+ }
+ mutex_unlock(&ufile->umap_lock);
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+}
+
+/*
* ib_uverbs_open() does not need the BKL:
*
* - the ib_uverbs_device structures are properly reference counted and
@@ -838,6 +1056,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
if (!atomic_inc_not_zero(&dev->refcount))
return -ENXIO;
+ get_device(&dev->dev);
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
mutex_lock(&dev->lists_mutex);
ib_dev = srcu_dereference(dev->ib_dev,
@@ -875,9 +1094,10 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
spin_lock_init(&file->uobjects_lock);
INIT_LIST_HEAD(&file->uobjects);
init_rwsem(&file->hw_destroy_rwsem);
+ mutex_init(&file->umap_lock);
+ INIT_LIST_HEAD(&file->umaps);
filp->private_data = file;
- kobject_get(&dev->kobj);
list_add_tail(&file->list, &dev->uverbs_file_list);
mutex_unlock(&dev->lists_mutex);
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
@@ -898,6 +1118,7 @@ err:
if (atomic_dec_and_test(&dev->refcount))
ib_uverbs_comp_dev(dev);
+ put_device(&dev->dev);
return ret;
}
@@ -908,10 +1129,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
mutex_lock(&file->device->lists_mutex);
- if (!file->is_closed) {
- list_del(&file->list);
- file->is_closed = 1;
- }
+ list_del_init(&file->list);
mutex_unlock(&file->device->lists_mutex);
if (file->async_file)
@@ -950,37 +1168,34 @@ static struct ib_client uverbs_client = {
.remove = ib_uverbs_remove_one
};
-static ssize_t show_ibdev(struct device *device, struct device_attribute *attr,
+static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
+ struct ib_uverbs_device *dev =
+ container_of(device, struct ib_uverbs_device, dev);
int ret = -ENODEV;
int srcu_key;
- struct ib_uverbs_device *dev = dev_get_drvdata(device);
struct ib_device *ib_dev;
- if (!dev)
- return -ENODEV;
-
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
if (ib_dev)
- ret = sprintf(buf, "%s\n", ib_dev->name);
+ ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev));
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
return ret;
}
-static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+static DEVICE_ATTR_RO(ibdev);
-static ssize_t show_dev_abi_version(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t abi_version_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- struct ib_uverbs_device *dev = dev_get_drvdata(device);
+ struct ib_uverbs_device *dev =
+ container_of(device, struct ib_uverbs_device, dev);
int ret = -ENODEV;
int srcu_key;
struct ib_device *ib_dev;
- if (!dev)
- return -ENODEV;
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
if (ib_dev)
@@ -989,7 +1204,17 @@ static ssize_t show_dev_abi_version(struct device *device,
return ret;
}
-static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
+static DEVICE_ATTR_RO(abi_version);
+
+static struct attribute *ib_dev_attrs[] = {
+ &dev_attr_abi_version.attr,
+ &dev_attr_ibdev.attr,
+ NULL,
+};
+
+static const struct attribute_group dev_attr_group = {
+ .attrs = ib_dev_attrs,
+};
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_VERBS_ABI_VERSION));
@@ -1027,65 +1252,56 @@ static void ib_uverbs_add_one(struct ib_device *device)
return;
}
+ device_initialize(&uverbs_dev->dev);
+ uverbs_dev->dev.class = uverbs_class;
+ uverbs_dev->dev.parent = device->dev.parent;
+ uverbs_dev->dev.release = ib_uverbs_release_dev;
+ uverbs_dev->groups[0] = &dev_attr_group;
+ uverbs_dev->dev.groups = uverbs_dev->groups;
atomic_set(&uverbs_dev->refcount, 1);
init_completion(&uverbs_dev->comp);
uverbs_dev->xrcd_tree = RB_ROOT;
mutex_init(&uverbs_dev->xrcd_tree_mutex);
- kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
mutex_init(&uverbs_dev->lists_mutex);
INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
+ rcu_assign_pointer(uverbs_dev->ib_dev, device);
+ uverbs_dev->num_comp_vectors = device->num_comp_vectors;
- devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
- if (devnum >= IB_UVERBS_MAX_DEVICES)
+ devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
+ GFP_KERNEL);
+ if (devnum < 0)
goto err;
uverbs_dev->devnum = devnum;
- set_bit(devnum, dev_map);
if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
else
base = IB_UVERBS_BASE_DEV + devnum;
- rcu_assign_pointer(uverbs_dev->ib_dev, device);
- uverbs_dev->num_comp_vectors = device->num_comp_vectors;
-
if (ib_uverbs_create_uapi(device, uverbs_dev))
goto err_uapi;
- cdev_init(&uverbs_dev->cdev, NULL);
+ uverbs_dev->dev.devt = base;
+ dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
+
+ cdev_init(&uverbs_dev->cdev,
+ device->mmap ? &uverbs_mmap_fops : &uverbs_fops);
uverbs_dev->cdev.owner = THIS_MODULE;
- uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
- cdev_set_parent(&uverbs_dev->cdev, &uverbs_dev->kobj);
- kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
- if (cdev_add(&uverbs_dev->cdev, base, 1))
- goto err_cdev;
-
- uverbs_dev->dev = device_create(uverbs_class, device->dev.parent,
- uverbs_dev->cdev.dev, uverbs_dev,
- "uverbs%d", uverbs_dev->devnum);
- if (IS_ERR(uverbs_dev->dev))
- goto err_cdev;
-
- if (device_create_file(uverbs_dev->dev, &dev_attr_ibdev))
- goto err_class;
- if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
- goto err_class;
- ib_set_client_data(device, &uverbs_client, uverbs_dev);
+ ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
+ if (ret)
+ goto err_uapi;
+ ib_set_client_data(device, &uverbs_client, uverbs_dev);
return;
-err_class:
- device_destroy(uverbs_class, uverbs_dev->cdev.dev);
-err_cdev:
- cdev_del(&uverbs_dev->cdev);
err_uapi:
- clear_bit(devnum, dev_map);
+ ida_free(&uverbs_ida, devnum);
err:
if (atomic_dec_and_test(&uverbs_dev->refcount))
ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
- kobject_put(&uverbs_dev->kobj);
+ put_device(&uverbs_dev->dev);
return;
}
@@ -1106,8 +1322,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
while (!list_empty(&uverbs_dev->uverbs_file_list)) {
file = list_first_entry(&uverbs_dev->uverbs_file_list,
struct ib_uverbs_file, list);
- file->is_closed = 1;
- list_del(&file->list);
+ list_del_init(&file->list);
kref_get(&file->ref);
/* We must release the mutex before going ahead and calling
@@ -1155,10 +1370,8 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
if (!uverbs_dev)
return;
- dev_set_drvdata(uverbs_dev->dev, NULL);
- device_destroy(uverbs_class, uverbs_dev->cdev.dev);
- cdev_del(&uverbs_dev->cdev);
- clear_bit(uverbs_dev->devnum, dev_map);
+ cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
+ ida_free(&uverbs_ida, uverbs_dev->devnum);
if (device->disassociate_ucontext) {
/* We disassociate HW resources and immediately return.
@@ -1181,7 +1394,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
if (wait_clients)
wait_for_completion(&uverbs_dev->comp);
- kobject_put(&uverbs_dev->kobj);
+ put_device(&uverbs_dev->dev);
}
static char *uverbs_devnode(struct device *dev, umode_t *mode)
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index d8cfafe23bd9..cb9486ad5c67 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -326,11 +326,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
if (IS_ERR(action))
return PTR_ERR(action);
- atomic_set(&action->usecnt, 0);
- action->device = ib_dev;
- action->type = IB_FLOW_ACTION_ESP;
- action->uobject = uobj;
- uobj->object = action;
+ uverbs_flow_action_fill_action(action, uobj, ib_dev,
+ IB_FLOW_ACTION_ESP);
return 0;
}
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 73ea6f0db88f..86f3fc5e04b4 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -73,6 +73,18 @@ static int uapi_merge_method(struct uverbs_api *uapi,
if (attr->attr.type == UVERBS_ATTR_TYPE_ENUM_IN)
method_elm->driver_method |= is_driver;
+ /*
+ * Like other uobject based things we only support a single
+ * uobject being NEW'd or DESTROY'd
+ */
+ if (attr->attr.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
+ u8 access = attr->attr.u2.objs_arr.access;
+
+ if (WARN_ON(access == UVERBS_ACCESS_NEW ||
+ access == UVERBS_ACCESS_DESTROY))
+ return -EINVAL;
+ }
+
attr_slot =
uapi_add_elm(uapi, method_key | uapi_key_attr(attr->id),
sizeof(*attr_slot));
@@ -248,6 +260,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi)
kfree(rcu_dereference_protected(*slot, true));
radix_tree_iter_delete(&uapi->radix, &iter, slot);
}
+ kfree(uapi);
}
struct uverbs_api *uverbs_alloc_api(
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 6ee03d6089eb..178899e3ce73 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -264,7 +264,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
}
pd->res.type = RDMA_RESTRACK_PD;
- pd->res.kern_name = caller;
+ rdma_restrack_set_task(&pd->res, caller);
rdma_restrack_add(&pd->res);
if (mr_access_flags) {
@@ -710,7 +710,7 @@ static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
ah_attr->roce.dmac,
- sgid_attr->ndev, &hop_limit);
+ sgid_attr, &hop_limit);
grh->hop_limit = hop_limit;
return ret;
@@ -1509,8 +1509,7 @@ static const struct {
};
bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
- enum ib_qp_type type, enum ib_qp_attr_mask mask,
- enum rdma_link_layer ll)
+ enum ib_qp_type type, enum ib_qp_attr_mask mask)
{
enum ib_qp_attr_mask req_param, opt_param;
@@ -1629,14 +1628,16 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
if (rdma_ib_or_roce(qp->device, port)) {
if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
- pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n",
- __func__, qp->device->name);
+ dev_warn(&qp->device->dev,
+ "%s rq_psn overflow, masking to 24 bits\n",
+ __func__);
attr->rq_psn &= 0xffffff;
}
if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
- pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n",
- __func__, qp->device->name);
+ dev_warn(&qp->device->dev,
+ " %s sq_psn overflow, masking to 24 bits\n",
+ __func__);
attr->sq_psn &= 0xffffff;
}
}
@@ -1888,7 +1889,7 @@ struct ib_cq *__ib_create_cq(struct ib_device *device,
cq->cq_context = cq_context;
atomic_set(&cq->usecnt, 0);
cq->res.type = RDMA_RESTRACK_CQ;
- cq->res.kern_name = caller;
+ rdma_restrack_set_task(&cq->res, caller);
rdma_restrack_add(&cq->res);
}
@@ -2621,3 +2622,49 @@ void ib_drain_qp(struct ib_qp *qp)
ib_drain_rq(qp);
}
EXPORT_SYMBOL(ib_drain_qp);
+
+struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *))
+{
+ struct rdma_netdev_alloc_params params;
+ struct net_device *netdev;
+ int rc;
+
+ if (!device->rdma_netdev_get_params)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ rc = device->rdma_netdev_get_params(device, port_num, type, &params);
+ if (rc)
+ return ERR_PTR(rc);
+
+ netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
+ setup, params.txqs, params.rxqs);
+ if (!netdev)
+ return ERR_PTR(-ENOMEM);
+
+ return netdev;
+}
+EXPORT_SYMBOL(rdma_alloc_netdev);
+
+int rdma_init_netdev(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *),
+ struct net_device *netdev)
+{
+ struct rdma_netdev_alloc_params params;
+ int rc;
+
+ if (!device->rdma_netdev_get_params)
+ return -EOPNOTSUPP;
+
+ rc = device->rdma_netdev_get_params(device, port_num, type, &params);
+ if (rc)
+ return rc;
+
+ return params.initialize_rdma_netdev(device, port_num,
+ netdev, params.param);
+}
+EXPORT_SYMBOL(rdma_init_netdev);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 96f76896488d..31baa8939a4f 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -40,7 +40,6 @@
#ifndef __BNXT_RE_H__
#define __BNXT_RE_H__
#define ROCE_DRV_MODULE_NAME "bnxt_re"
-#define ROCE_DRV_MODULE_VERSION "1.0.0"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
#define BNXT_RE_PAGE_SHIFT_4K (12)
@@ -120,6 +119,8 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
+#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
+#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 77416bc61e6e..604b71875f5f 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -68,6 +68,8 @@ static const char * const bnxt_re_stat_name[] = {
[BNXT_RE_TX_PKTS] = "tx_pkts",
[BNXT_RE_TX_BYTES] = "tx_bytes",
[BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors",
+ [BNXT_RE_RX_DROPS] = "rx_roce_drops",
+ [BNXT_RE_RX_DISCARDS] = "rx_roce_discards",
[BNXT_RE_TO_RETRANSMITS] = "to_retransmits",
[BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd",
[BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded",
@@ -106,7 +108,8 @@ static const char * const bnxt_re_stat_name[] = {
[BNXT_RE_RES_CQ_LOAD_ERR] = "res_cq_load_err",
[BNXT_RE_RES_SRQ_LOAD_ERR] = "res_srq_load_err",
[BNXT_RE_RES_TX_PCI_ERR] = "res_tx_pci_err",
- [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err"
+ [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err",
+ [BNXT_RE_OUT_OF_SEQ_ERR] = "oos_drop_count"
};
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
@@ -128,6 +131,10 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
if (bnxt_re_stats) {
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
+ stats->value[BNXT_RE_RX_DROPS] =
+ le64_to_cpu(bnxt_re_stats->rx_drop_pkts);
+ stats->value[BNXT_RE_RX_DISCARDS] =
+ le64_to_cpu(bnxt_re_stats->rx_discard_pkts);
stats->value[BNXT_RE_RX_PKTS] =
le64_to_cpu(bnxt_re_stats->rx_ucast_pkts);
stats->value[BNXT_RE_RX_BYTES] =
@@ -220,6 +227,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
rdev->stats.res_tx_pci_err;
stats->value[BNXT_RE_RES_RX_PCI_ERR] =
rdev->stats.res_rx_pci_err;
+ stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
+ rdev->stats.res_oos_drop_count;
}
return ARRAY_SIZE(bnxt_re_stat_name);
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index a01a922717d5..76399f477e5c 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -51,6 +51,8 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_PKTS,
BNXT_RE_TX_BYTES,
BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_RX_DROPS,
+ BNXT_RE_RX_DISCARDS,
BNXT_RE_TO_RETRANSMITS,
BNXT_RE_SEQ_ERR_NAKS_RCVD,
BNXT_RE_MAX_RETRY_EXCEEDED,
@@ -90,6 +92,7 @@ enum bnxt_re_hw_stats {
BNXT_RE_RES_SRQ_LOAD_ERR,
BNXT_RE_RES_TX_PCI_ERR,
BNXT_RE_RES_RX_PCI_ERR,
+ BNXT_RE_OUT_OF_SEQ_ERR,
BNXT_RE_NUM_COUNTERS
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index bc2b9e038439..54fdd4cf5288 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1598,8 +1598,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
new_qp_state = qp_attr->qp_state;
if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
- ib_qp->qp_type, qp_attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ ib_qp->qp_type, qp_attr_mask)) {
dev_err(rdev_to_dev(rdev),
"Invalid attribute mask: %#x specified ",
qp_attr_mask);
@@ -2664,6 +2663,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
nq->budget++;
atomic_inc(&rdev->cq_count);
+ spin_lock_init(&cq->cq_lock);
if (context) {
struct bnxt_re_cq_resp resp;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 20b9f31052bf..cf2282654210 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -67,7 +67,7 @@
#include "hw_counters.h"
static char version[] =
- BNXT_RE_DESC " v" ROCE_DRV_MODULE_VERSION "\n";
+ BNXT_RE_DESC "\n";
MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
@@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
/* Mutex to protect the list of bnxt_re devices added */
static DEFINE_MUTEX(bnxt_re_dev_lock);
static struct workqueue_struct *bnxt_re_wq;
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
/* SR-IOV helper functions */
@@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p)
if (!rdev)
return;
- bnxt_re_ib_unreg(rdev, false);
+ bnxt_re_ib_unreg(rdev);
}
static void bnxt_re_stop_irq(void *handle)
@@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
/* Driver registration routines used to let the networking driver (bnxt_en)
* to know that the RoCE driver is now installed
*/
-static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
+static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev;
int rc;
@@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
return -EINVAL;
en_dev = rdev->en_dev;
- /* Acquire rtnl lock if it is not invokded from netdev event */
- if (lock_wait)
- rtnl_lock();
rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
BNXT_ROCE_ULP);
- if (lock_wait)
- rtnl_unlock();
return rc;
}
@@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
en_dev = rdev->en_dev;
- rtnl_lock();
rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
&bnxt_re_ulp_ops, rdev);
- rtnl_unlock();
return rc;
}
-static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
+static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev;
int rc;
@@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
en_dev = rdev->en_dev;
- if (lock_wait)
- rtnl_lock();
rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
- if (lock_wait)
- rtnl_unlock();
return rc;
}
@@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
- rtnl_lock();
num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
rdev->msix_entries,
num_msix_want);
@@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
}
rdev->num_msix = num_msix_got;
done:
- rtnl_unlock();
return rc;
}
@@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
fw_msg->timeout = timeout;
}
-static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
- bool lock_wait)
+static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_ring_free_input req = {0};
struct hwrm_ring_free_output resp;
struct bnxt_fw_msg fw_msg;
- bool do_unlock = false;
int rc = -EINVAL;
if (!en_dev)
return rc;
memset(&fw_msg, 0, sizeof(fw_msg));
- if (lock_wait) {
- rtnl_lock();
- do_unlock = true;
- }
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
if (rc)
dev_err(rdev_to_dev(rdev),
"Failed to free HW ring:%d :%#x", req.ring_id, rc);
- if (do_unlock)
- rtnl_unlock();
return rc;
}
@@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
return rc;
memset(&fw_msg, 0, sizeof(fw_msg));
- rtnl_lock();
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
req.enables = 0;
req.page_tbl_addr = cpu_to_le64(dma_arr[0]);
@@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
if (!rc)
*fw_ring_id = le16_to_cpu(resp.ring_id);
- rtnl_unlock();
return rc;
}
static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
- u32 fw_stats_ctx_id, bool lock_wait)
+ u32 fw_stats_ctx_id)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_stat_ctx_free_input req = {0};
struct bnxt_fw_msg fw_msg;
- bool do_unlock = false;
int rc = -EINVAL;
if (!en_dev)
return rc;
memset(&fw_msg, 0, sizeof(fw_msg));
- if (lock_wait) {
- rtnl_lock();
- do_unlock = true;
- }
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
@@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
dev_err(rdev_to_dev(rdev),
"Failed to free HW stats context %#x", rc);
- if (do_unlock)
- rtnl_unlock();
return rc;
}
@@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
return rc;
memset(&fw_msg, 0, sizeof(fw_msg));
- rtnl_lock();
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000);
@@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
if (!rc)
*fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
- rtnl_unlock();
return rc;
}
@@ -567,6 +535,34 @@ static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
return en_dev;
}
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *bnxt_re_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group bnxt_re_dev_attr_group = {
+ .attrs = bnxt_re_attributes,
+};
+
static void bnxt_re_unregister_ib(struct bnxt_re_dev *rdev)
{
ib_unregister_device(&rdev->ibdev);
@@ -579,7 +575,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
/* ib device init */
ibdev->owner = THIS_MODULE;
ibdev->node_type = RDMA_NODE_IB_CA;
- strlcpy(ibdev->name, "bnxt_re%d", IB_DEVICE_NAME_MAX);
strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
strlen(BNXT_RE_DESC) + 5);
ibdev->phys_port_cnt = 1;
@@ -671,34 +666,11 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->get_hw_stats = bnxt_re_ib_get_hw_stats;
ibdev->alloc_hw_stats = bnxt_re_ib_alloc_hw_stats;
+ rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group);
ibdev->driver_id = RDMA_DRIVER_BNXT_RE;
- return ib_register_device(ibdev, NULL);
+ return ib_register_device(ibdev, "bnxt_re%d", NULL);
}
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
-
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
-}
-
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
-}
-
-static DEVICE_ATTR(hw_rev, 0444, show_rev, NULL);
-static DEVICE_ATTR(hca_type, 0444, show_hca, NULL);
-
-static struct device_attribute *bnxt_re_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type
-};
-
static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
{
dev_put(rdev->netdev);
@@ -896,10 +868,8 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
{
int i;
- if (rdev->nq[0].hwq.max_elements) {
- for (i = 1; i < rdev->num_msix; i++)
- bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
- }
+ for (i = 1; i < rdev->num_msix; i++)
+ bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
if (rdev->qplib_res.rcfw)
bnxt_qplib_cleanup_res(&rdev->qplib_res);
@@ -908,6 +878,7 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
{
int rc = 0, i;
+ int num_vec_enabled = 0;
bnxt_qplib_init_res(&rdev->qplib_res);
@@ -923,25 +894,29 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
"Failed to enable NQ with rc = 0x%x", rc);
goto fail;
}
+ num_vec_enabled++;
}
return 0;
fail:
+ for (i = num_vec_enabled; i >= 0; i--)
+ bnxt_qplib_disable_nq(&rdev->nq[i]);
+
return rc;
}
-static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
{
int i;
for (i = 0; i < rdev->num_msix - 1; i++) {
- bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait);
+ bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
bnxt_qplib_free_nq(&rdev->nq[i]);
}
}
-static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
{
- bnxt_re_free_nq_res(rdev, lock_wait);
+ bnxt_re_free_nq_res(rdev);
if (rdev->qplib_res.dpi_tbl.max) {
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
@@ -957,6 +932,7 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait)
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
int rc = 0, i;
+ int num_vec_created = 0;
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
@@ -983,7 +959,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
if (rc) {
dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
i, rc);
- goto dealloc_dpi;
+ goto free_nq;
}
rc = bnxt_re_net_ring_alloc
(rdev, rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr,
@@ -996,14 +972,17 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
dev_err(rdev_to_dev(rdev),
"Failed to allocate NQ fw id with rc = 0x%x",
rc);
+ bnxt_qplib_free_nq(&rdev->nq[i]);
goto free_nq;
}
+ num_vec_created++;
}
return 0;
free_nq:
- for (i = 0; i < rdev->num_msix - 1; i++)
+ for (i = num_vec_created; i >= 0; i--) {
+ bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
bnxt_qplib_free_nq(&rdev->nq[i]);
-dealloc_dpi:
+ }
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
&rdev->qplib_res.dpi_tbl,
&rdev->dpi_privileged);
@@ -1021,12 +1000,17 @@ static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
struct ib_event ib_event;
ib_event.device = ibdev;
- if (qp)
+ if (qp) {
ib_event.element.qp = qp;
- else
+ ib_event.event = event;
+ if (qp->event_handler)
+ qp->event_handler(&ib_event, qp->qp_context);
+
+ } else {
ib_event.element.port_num = port_num;
- ib_event.event = event;
- ib_dispatch_event(&ib_event);
+ ib_event.event = event;
+ ib_dispatch_event(&ib_event);
+ }
}
#define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
@@ -1219,43 +1203,42 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return 0;
}
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
{
- int i, rc;
+ int rc;
if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
- for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++)
- device_remove_file(&rdev->ibdev.dev,
- bnxt_re_attributes[i]);
/* Cleanup ib dev */
bnxt_re_unregister_ib(rdev);
}
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
- cancel_delayed_work(&rdev->worker);
+ cancel_delayed_work_sync(&rdev->worker);
- bnxt_re_cleanup_res(rdev);
- bnxt_re_free_res(rdev, lock_wait);
+ if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
+ &rdev->flags))
+ bnxt_re_cleanup_res(rdev);
+ if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
+ bnxt_re_free_res(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
if (rc)
dev_warn(rdev_to_dev(rdev),
"Failed to deinitialize RCFW: %#x", rc);
- bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id,
- lock_wait);
+ bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
- bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait);
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
- rc = bnxt_re_free_msix(rdev, lock_wait);
+ rc = bnxt_re_free_msix(rdev);
if (rc)
dev_warn(rdev_to_dev(rdev),
"Failed to free MSI-X vectors: %#x", rc);
}
if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
- rc = bnxt_re_unregister_netdev(rdev, lock_wait);
+ rc = bnxt_re_unregister_netdev(rdev);
if (rc)
dev_warn(rdev_to_dev(rdev),
"Failed to unregister with netdev: %#x", rc);
@@ -1274,7 +1257,13 @@ static void bnxt_re_worker(struct work_struct *work)
static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
{
- int i, j, rc;
+ int rc;
+
+ bool locked;
+
+ /* Acquire rtnl lock through out this function */
+ rtnl_lock();
+ locked = true;
/* Registered a new RoCE device instance to netdev */
rc = bnxt_re_register_netdev(rdev);
@@ -1358,12 +1347,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
pr_err("Failed to allocate resources: %#x\n", rc);
goto fail;
}
+ set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
rc = bnxt_re_init_res(rdev);
if (rc) {
pr_err("Failed to initialize resources: %#x\n", rc);
goto fail;
}
+ set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
+
if (!rdev->is_virtfn) {
rc = bnxt_re_setup_qos(rdev);
if (rc)
@@ -1374,28 +1366,17 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
}
+ rtnl_unlock();
+ locked = false;
+
/* Register ib dev */
rc = bnxt_re_register_ib(rdev);
if (rc) {
pr_err("Failed to register with IB: %#x\n", rc);
goto fail;
}
- dev_info(rdev_to_dev(rdev), "Device registered successfully");
- for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
- rc = device_create_file(&rdev->ibdev.dev,
- bnxt_re_attributes[i]);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to create IB sysfs: %#x", rc);
- /* Must clean up all created device files */
- for (j = 0; j < i; j++)
- device_remove_file(&rdev->ibdev.dev,
- bnxt_re_attributes[j]);
- bnxt_re_unregister_ib(rdev);
- goto fail;
- }
- }
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
+ dev_info(rdev_to_dev(rdev), "Device registered successfully");
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width);
set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
@@ -1404,17 +1385,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
return 0;
free_sctx:
- bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true);
+ bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
free_ctx:
bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
disable_rcfw:
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
free_ring:
- bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true);
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
free_rcfw:
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
fail:
- bnxt_re_ib_unreg(rdev, true);
+ if (!locked)
+ rtnl_lock();
+ bnxt_re_ib_unreg(rdev);
+ rtnl_unlock();
+
return rc;
}
@@ -1567,7 +1552,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
*/
if (atomic_read(&rdev->sched_count) > 0)
goto exit;
- bnxt_re_ib_unreg(rdev, false);
+ bnxt_re_ib_unreg(rdev);
bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev);
break;
@@ -1646,7 +1631,10 @@ static void __exit bnxt_re_mod_exit(void)
*/
flush_workqueue(bnxt_re_wq);
bnxt_re_dev_stop(rdev);
- bnxt_re_ib_unreg(rdev, true);
+ /* Acquire the rtnl_lock as the L2 resources are freed here */
+ rtnl_lock();
+ bnxt_re_ib_unreg(rdev);
+ rtnl_unlock();
bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev);
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 6ad0d46ab879..b98b054148cd 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -36,6 +36,8 @@
* Description: Fast Path Operators
*/
+#define dev_fmt(fmt) "QPLIB: " fmt
+
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
@@ -71,8 +73,7 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
if (!qp->sq.flushed) {
dev_dbg(&scq->hwq.pdev->dev,
- "QPLIB: FP: Adding to SQ Flush list = %p",
- qp);
+ "FP: Adding to SQ Flush list = %p\n", qp);
bnxt_qplib_cancel_phantom_processing(qp);
list_add_tail(&qp->sq_flush, &scq->sqf_head);
qp->sq.flushed = true;
@@ -80,8 +81,7 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
if (!qp->srq) {
if (!qp->rq.flushed) {
dev_dbg(&rcq->hwq.pdev->dev,
- "QPLIB: FP: Adding to RQ Flush list = %p",
- qp);
+ "FP: Adding to RQ Flush list = %p\n", qp);
list_add_tail(&qp->rq_flush, &rcq->rqf_head);
qp->rq.flushed = true;
}
@@ -207,7 +207,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
if (!qp->sq_hdr_buf) {
rc = -ENOMEM;
dev_err(&res->pdev->dev,
- "QPLIB: Failed to create sq_hdr_buf");
+ "Failed to create sq_hdr_buf\n");
goto fail;
}
}
@@ -221,7 +221,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
if (!qp->rq_hdr_buf) {
rc = -ENOMEM;
dev_err(&res->pdev->dev,
- "QPLIB: Failed to create rq_hdr_buf");
+ "Failed to create rq_hdr_buf\n");
goto fail;
}
}
@@ -277,8 +277,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
num_cqne_processed++;
else
dev_warn(&nq->pdev->dev,
- "QPLIB: cqn - type 0x%x not handled",
- type);
+ "cqn - type 0x%x not handled\n", type);
spin_unlock_bh(&cq->compl_lock);
break;
}
@@ -298,7 +297,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
num_srqne_processed++;
else
dev_warn(&nq->pdev->dev,
- "QPLIB: SRQ event 0x%x not handled",
+ "SRQ event 0x%x not handled\n",
nqsrqe->event);
break;
}
@@ -306,8 +305,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
break;
default:
dev_warn(&nq->pdev->dev,
- "QPLIB: nqe with type = 0x%x not handled",
- type);
+ "nqe with type = 0x%x not handled\n", type);
break;
}
raw_cons++;
@@ -360,7 +358,8 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
}
/* Make sure the HW is stopped! */
- bnxt_qplib_nq_stop_irq(nq, true);
+ if (nq->requested)
+ bnxt_qplib_nq_stop_irq(nq, true);
if (nq->bar_reg_iomem)
iounmap(nq->bar_reg_iomem);
@@ -396,7 +395,7 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
rc = irq_set_affinity_hint(nq->vector, &nq->mask);
if (rc) {
dev_warn(&nq->pdev->dev,
- "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
+ "set affinity failed; vector: %d nq_idx: %d\n",
nq->vector, nq_indx);
}
nq->requested = true;
@@ -443,7 +442,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
if (rc) {
dev_err(&nq->pdev->dev,
- "QPLIB: Failed to request irq for nq-idx %d", nq_idx);
+ "Failed to request irq for nq-idx %d\n", nq_idx);
goto fail;
}
@@ -662,8 +661,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
spin_lock(&srq_hwq->lock);
if (srq->start_idx == srq->last_idx) {
- dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!",
- srq->id);
+ dev_err(&srq_hwq->pdev->dev,
+ "FP: SRQ (0x%x) is full!\n", srq->id);
rc = -EINVAL;
spin_unlock(&srq_hwq->lock);
goto done;
@@ -1324,7 +1323,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
}
if (i == res->sgid_tbl.max)
- dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
+ dev_warn(&res->pdev->dev, "SGID not found??\n");
qp->ah.hop_limit = sb->hop_limit;
qp->ah.traffic_class = sb->traffic_class;
@@ -1536,7 +1535,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
if (bnxt_qplib_queue_full(sq)) {
dev_err(&sq->hwq.pdev->dev,
- "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
+ "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
sq->q_full_delta);
rc = -ENOMEM;
@@ -1561,7 +1560,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
/* Copy the inline data */
if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
dev_warn(&sq->hwq.pdev->dev,
- "QPLIB: Inline data length > 96 detected");
+ "Inline data length > 96 detected\n");
data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
} else {
data_len = wqe->inline_len;
@@ -1776,7 +1775,7 @@ done:
queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
} else {
dev_err(&sq->hwq.pdev->dev,
- "QPLIB: FP: Failed to allocate SQ nq_work!");
+ "FP: Failed to allocate SQ nq_work!\n");
rc = -ENOMEM;
}
}
@@ -1815,13 +1814,12 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
sch_handler = true;
dev_dbg(&rq->hwq.pdev->dev,
- "%s Error QP. Scheduling for poll_cq\n",
- __func__);
+ "%s: Error QP. Scheduling for poll_cq\n", __func__);
goto queue_err;
}
if (bnxt_qplib_queue_full(rq)) {
dev_err(&rq->hwq.pdev->dev,
- "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
+ "FP: QP (0x%x) RQ is full!\n", qp->id);
rc = -EINVAL;
goto done;
}
@@ -1870,7 +1868,7 @@ queue_err:
queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
} else {
dev_err(&rq->hwq.pdev->dev,
- "QPLIB: FP: Failed to allocate RQ nq_work!");
+ "FP: Failed to allocate RQ nq_work!\n");
rc = -ENOMEM;
}
}
@@ -1932,7 +1930,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
if (!cq->dpi) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: FP: CREATE_CQ failed due to NULL DPI");
+ "FP: CREATE_CQ failed due to NULL DPI\n");
return -EINVAL;
}
req.dpi = cpu_to_le32(cq->dpi->dpi);
@@ -1969,6 +1967,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
INIT_LIST_HEAD(&cq->sqf_head);
INIT_LIST_HEAD(&cq->rqf_head);
spin_lock_init(&cq->compl_lock);
+ spin_lock_init(&cq->flush_lock);
bnxt_qplib_arm_cq_enable(cq);
return 0;
@@ -2172,7 +2171,7 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
* comes back
*/
dev_dbg(&cq->hwq.pdev->dev,
- "FP:Got Phantom CQE");
+ "FP: Got Phantom CQE\n");
sq->condition = false;
sq->single = true;
rc = 0;
@@ -2189,7 +2188,7 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
peek_raw_cq_cons++;
}
dev_err(&cq->hwq.pdev->dev,
- "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
+ "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
rc = -EINVAL;
}
@@ -2213,7 +2212,7 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: Process Req qp is NULL");
+ "FP: Process Req qp is NULL\n");
return -EINVAL;
}
sq = &qp->sq;
@@ -2221,16 +2220,14 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
if (cqe_sq_cons > sq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process req reported ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
+ "FP: CQ Process req reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
cqe_sq_cons, sq->hwq.max_elements);
return -EINVAL;
}
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
/* Require to walk the sq's swq to fabricate CQEs for all previously
@@ -2262,9 +2259,7 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
hwcqe->status != CQ_REQ_STATUS_OK) {
cqe->status = hwcqe->status;
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Processed Req ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
+ "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
sw_sq_cons, cqe->wr_id, cqe->status);
cqe++;
(*budget)--;
@@ -2330,12 +2325,12 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
+ dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
return -EINVAL;
}
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
@@ -2356,9 +2351,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
return -EINVAL;
if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process RC ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
wr_id_idx, srq->hwq.max_elements);
return -EINVAL;
}
@@ -2371,9 +2364,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
rq = &qp->rq;
if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process RC ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
@@ -2409,12 +2400,12 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
+ dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
return -EINVAL;
}
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
cqe = *pcqe;
@@ -2439,9 +2430,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process UD ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
wr_id_idx, srq->hwq.max_elements);
return -EINVAL;
}
@@ -2454,9 +2443,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
rq = &qp->rq;
if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process UD ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
@@ -2508,13 +2495,12 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: process_cq Raw/QP1 qp is NULL");
+ dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
return -EINVAL;
}
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
cqe = *pcqe;
@@ -2543,14 +2529,12 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
srq = qp->srq;
if (!srq) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: SRQ used but not defined??");
+ "FP: SRQ used but not defined??\n");
return -EINVAL;
}
if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Raw/QP1 ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
wr_id_idx, srq->hwq.max_elements);
return -EINVAL;
}
@@ -2563,9 +2547,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
rq = &qp->rq;
if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: ix 0x%x exceeded RQ max 0x%x",
+ "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
@@ -2600,14 +2582,14 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
/* Check the Status */
if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
dev_warn(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
+ "FP: CQ Process Terminal Error status = 0x%x\n",
hwcqe->status);
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process terminal qp is NULL");
+ "FP: CQ Process terminal qp is NULL\n");
return -EINVAL;
}
@@ -2623,16 +2605,14 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
if (cqe_cons > sq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process terminal reported ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
+ "FP: CQ Process terminal reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
cqe_cons, sq->hwq.max_elements);
goto do_rq;
}
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto sq_done;
}
@@ -2673,16 +2653,14 @@ do_rq:
goto done;
} else if (cqe_cons > rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Processed terminal ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
+ "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
cqe_cons, rq->hwq.max_elements);
goto done;
}
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
rc = 0;
goto done;
}
@@ -2704,7 +2682,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
/* Check the Status */
if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
+ "FP: CQ Process Cutoff Error status = 0x%x\n",
hwcqe->status);
return -EINVAL;
}
@@ -2724,16 +2702,12 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
spin_lock_irqsave(&cq->flush_lock, flags);
list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
- dev_dbg(&cq->hwq.pdev->dev,
- "QPLIB: FP: Flushing SQ QP= %p",
- qp);
+ dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
__flush_sq(&qp->sq, qp, &cqe, &budget);
}
list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
- dev_dbg(&cq->hwq.pdev->dev,
- "QPLIB: FP: Flushing RQ QP= %p",
- qp);
+ dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
__flush_rq(&qp->rq, qp, &cqe, &budget);
}
spin_unlock_irqrestore(&cq->flush_lock, flags);
@@ -2801,7 +2775,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
goto exit;
default:
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: process_cq unknown type 0x%lx",
+ "process_cq unknown type 0x%lx\n",
hw_cqe->cqe_type_toggle &
CQ_BASE_CQE_TYPE_MASK);
rc = -EINVAL;
@@ -2814,7 +2788,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
* next one
*/
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: process_cqe error rc = 0x%x", rc);
+ "process_cqe error rc = 0x%x\n", rc);
}
raw_cons++;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 2852d350ada1..be4e33e9f962 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -35,6 +35,9 @@
*
* Description: RDMA Controller HW interface
*/
+
+#define dev_fmt(fmt) "QPLIB: " fmt
+
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
@@ -96,14 +99,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW not initialized, reject opcode 0x%x",
- opcode);
+ "RCFW not initialized, reject opcode 0x%x\n", opcode);
return -EINVAL;
}
if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
- dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
+ dev_err(&rcfw->pdev->dev, "RCFW already initialized!\n");
return -EINVAL;
}
@@ -115,7 +117,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
*/
spin_lock_irqsave(&cmdq->lock, flags);
if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
+ dev_err(&rcfw->pdev->dev, "RCFW: CMDQ is full!\n");
spin_unlock_irqrestore(&cmdq->lock, flags);
return -EAGAIN;
}
@@ -154,7 +156,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
if (!cmdqe) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW request failed with no cmdqe!");
+ "RCFW request failed with no cmdqe!\n");
goto done;
}
/* Copy a segment of the req cmd to the cmdq */
@@ -210,7 +212,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
/* send failed */
- dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x send failed\n",
cookie, opcode);
return rc;
}
@@ -224,7 +226,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
rc = __wait_for_resp(rcfw, cookie);
if (rc) {
/* timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x timedout (%d)msec\n",
cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
return rc;
@@ -232,7 +234,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
if (evnt->status) {
/* failed with status */
- dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
cookie, opcode, evnt->status);
rc = -EFAULT;
}
@@ -298,9 +300,9 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
qp_id = le32_to_cpu(err_event->xid);
qp = rcfw->qp_tbl[qp_id].qp_handle;
dev_dbg(&rcfw->pdev->dev,
- "QPLIB: Received QP error notification");
+ "Received QP error notification\n");
dev_dbg(&rcfw->pdev->dev,
- "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
+ "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
qp_id, err_event->req_err_state_reason,
err_event->res_err_state_reason);
if (!qp)
@@ -309,8 +311,17 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
rcfw->aeq_handler(rcfw, qp_event, qp);
break;
default:
- /* Command Response */
- spin_lock_irqsave(&cmdq->lock, flags);
+ /*
+ * Command Response
+ * cmdq->lock needs to be acquired to synchronie
+ * the command send and completion reaping. This function
+ * is always called with creq->lock held. Using
+ * the nested variant of spin_lock.
+ *
+ */
+
+ spin_lock_irqsave_nested(&cmdq->lock, flags,
+ SINGLE_DEPTH_NESTING);
cookie = le16_to_cpu(qp_event->cookie);
mcookie = qp_event->cookie;
blocked = cookie & RCFW_CMD_IS_BLOCKING;
@@ -322,14 +333,16 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
crsqe->resp = NULL;
} else {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
- crsqe->resp ? "mismatch" : "collision",
- crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
+ if (crsqe->resp && crsqe->resp->cookie)
+ dev_err(&rcfw->pdev->dev,
+ "CMD %s cookie sent=%#x, recd=%#x\n",
+ crsqe->resp ? "mismatch" : "collision",
+ crsqe->resp ? crsqe->resp->cookie : 0,
+ mcookie);
}
if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
dev_warn(&rcfw->pdev->dev,
- "QPLIB: CMD bit %d was not requested", cbit);
+ "CMD bit %d was not requested\n", cbit);
cmdq->cons += crsqe->req_size;
crsqe->req_size = 0;
@@ -376,14 +389,14 @@ static void bnxt_qplib_service_creq(unsigned long data)
(rcfw, (struct creq_func_event *)creqe))
rcfw->creq_func_event_processed++;
else
- dev_warn
- (&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled",
- type);
+ dev_warn(&rcfw->pdev->dev,
+ "aeqe:%#x Not handled\n", type);
break;
default:
- dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with ");
- dev_warn(&rcfw->pdev->dev,
- "QPLIB: op_event = 0x%x not handled", type);
+ if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT)
+ dev_warn(&rcfw->pdev->dev,
+ "creqe with event 0x%x not handled\n",
+ type);
break;
}
raw_cons++;
@@ -551,7 +564,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
HWQ_TYPE_L2_CMPL)) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: HW channel CREQ allocation failed");
+ "HW channel CREQ allocation failed\n");
goto fail;
}
rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT;
@@ -560,7 +573,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE,
HWQ_TYPE_CTX)) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: HW channel CMDQ allocation failed");
+ "HW channel CMDQ allocation failed\n");
goto fail;
}
@@ -605,21 +618,18 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
bnxt_qplib_rcfw_stop_irq(rcfw, true);
- if (rcfw->cmdq_bar_reg_iomem)
- iounmap(rcfw->cmdq_bar_reg_iomem);
- rcfw->cmdq_bar_reg_iomem = NULL;
-
- if (rcfw->creq_bar_reg_iomem)
- iounmap(rcfw->creq_bar_reg_iomem);
- rcfw->creq_bar_reg_iomem = NULL;
+ iounmap(rcfw->cmdq_bar_reg_iomem);
+ iounmap(rcfw->creq_bar_reg_iomem);
indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
if (indx != rcfw->bmap_size)
dev_err(&rcfw->pdev->dev,
- "QPLIB: disabling RCFW with pending cmd-bit %lx", indx);
+ "disabling RCFW with pending cmd-bit %lx\n", indx);
kfree(rcfw->cmdq_bitmap);
rcfw->bmap_size = 0;
+ rcfw->cmdq_bar_reg_iomem = NULL;
+ rcfw->creq_bar_reg_iomem = NULL;
rcfw->aeq_handler = NULL;
rcfw->vector = 0;
}
@@ -681,8 +691,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
RCFW_COMM_BASE_OFFSET,
RCFW_COMM_SIZE);
if (!rcfw->cmdq_bar_reg_iomem) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: CMDQ BAR region %d mapping failed",
+ dev_err(&rcfw->pdev->dev, "CMDQ BAR region %d mapping failed\n",
rcfw->cmdq_bar_reg);
return -ENOMEM;
}
@@ -697,14 +706,15 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
if (!res_base)
dev_err(&rcfw->pdev->dev,
- "QPLIB: CREQ BAR region %d resc start is 0!",
+ "CREQ BAR region %d resc start is 0!\n",
rcfw->creq_bar_reg);
rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
4);
if (!rcfw->creq_bar_reg_iomem) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: CREQ BAR region %d mapping failed",
+ dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n",
rcfw->creq_bar_reg);
+ iounmap(rcfw->cmdq_bar_reg_iomem);
+ rcfw->cmdq_bar_reg_iomem = NULL;
return -ENOMEM;
}
rcfw->creq_qp_event_processed = 0;
@@ -717,7 +727,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
if (rc) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
+ "Failed to request IRQ for CREQ rc = 0x%x\n", rc);
bnxt_qplib_disable_rcfw_channel(rcfw);
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 46416dfe8830..9a8687dc0a79 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -154,6 +154,8 @@ struct bnxt_qplib_qp_node {
void *qp_handle; /* ptr to qplib_qp */
};
+#define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF
+
/* RCFW Communication Channels */
struct bnxt_qplib_rcfw {
struct pci_dev *pdev;
@@ -190,6 +192,8 @@ struct bnxt_qplib_rcfw {
struct bnxt_qplib_crsq *crsqe_tbl;
int qp_tbl_size;
struct bnxt_qplib_qp_node *qp_tbl;
+ u64 oos_prev;
+ u32 init_oos_stats;
};
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 539a5d44e6db..59eeac55626f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -36,6 +36,8 @@
* Description: QPLib resource manager
*/
+#define dev_fmt(fmt) "QPLIB: " fmt
+
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -68,8 +70,7 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
pbl->pg_map_arr[i]);
else
dev_warn(&pdev->dev,
- "QPLIB: PBL free pg_arr[%d] empty?!",
- i);
+ "PBL free pg_arr[%d] empty?!\n", i);
pbl->pg_arr[i] = NULL;
}
}
@@ -537,7 +538,7 @@ static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl)
{
if (!pkey_tbl->tbl)
- dev_dbg(&res->pdev->dev, "QPLIB: PKEY tbl not present");
+ dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
else
kfree(pkey_tbl->tbl);
@@ -578,7 +579,7 @@ int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd *pd)
{
if (test_and_set_bit(pd->id, pdt->tbl)) {
- dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d",
+ dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
pd->id);
return -EINVAL;
}
@@ -639,11 +640,11 @@ int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
struct bnxt_qplib_dpi *dpi)
{
if (dpi->dpi >= dpit->max) {
- dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d", dpi->dpi);
+ dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
return -EINVAL;
}
if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
- dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d",
+ dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
dpi->dpi);
return -EINVAL;
}
@@ -673,22 +674,21 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
u32 dbr_len, bytes;
if (dpit->dbr_bar_reg_iomem) {
- dev_err(&res->pdev->dev,
- "QPLIB: DBR BAR region %d already mapped", dbr_bar_reg);
+ dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
+ dbr_bar_reg);
return -EALREADY;
}
bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
if (!bar_reg_base) {
- dev_err(&res->pdev->dev,
- "QPLIB: BAR region %d resc start failed", dbr_bar_reg);
+ dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
+ dbr_bar_reg);
return -ENOMEM;
}
dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
- dev_err(&res->pdev->dev, "QPLIB: Invalid DBR length %d",
- dbr_len);
+ dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
return -ENOMEM;
}
@@ -696,8 +696,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
dbr_len);
if (!dpit->dbr_bar_reg_iomem) {
dev_err(&res->pdev->dev,
- "QPLIB: FP: DBR BAR region %d mapping failed",
- dbr_bar_reg);
+ "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
return -ENOMEM;
}
@@ -767,7 +766,7 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
&stats->dma_map, GFP_KERNEL);
if (!stats->dma) {
- dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
+ dev_err(&pdev->dev, "Stats DMA allocation failed\n");
return -ENOMEM;
}
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 4097f3fa25c5..5216b5f844cc 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -36,6 +36,8 @@
* Description: Slow Path Operators
*/
+#define dev_fmt(fmt) "QPLIB: " fmt
+
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
@@ -89,7 +91,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
if (!sbuf) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: SP: QUERY_FUNC alloc side buffer failed");
+ "SP: QUERY_FUNC alloc side buffer failed\n");
return -ENOMEM;
}
@@ -135,8 +137,16 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_srq = le16_to_cpu(sb->max_srq);
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
attr->max_srq_sges = sb->max_srq_sge;
- /* Bono only reports 1 PKEY for now, but it can support > 1 */
attr->max_pkey = le32_to_cpu(sb->max_pkeys);
+ /*
+ * Some versions of FW reports more than 0xFFFF.
+ * Restrict it for now to 0xFFFF to avoid
+ * reporting trucated value
+ */
+ if (attr->max_pkey > 0xFFFF) {
+ /* ib_port_attr::pkey_tbl_len is u16 */
+ attr->max_pkey = 0xFFFF;
+ }
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) *
@@ -186,8 +196,7 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
(void *)&resp,
NULL, 0);
if (rc) {
- dev_err(&res->pdev->dev,
- "QPLIB: Failed to set function resources");
+ dev_err(&res->pdev->dev, "Failed to set function resources\n");
}
return rc;
}
@@ -199,7 +208,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
{
if (index >= sgid_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: Index %d exceeded SGID table max (%d)",
+ "Index %d exceeded SGID table max (%d)\n",
index, sgid_tbl->max);
return -EINVAL;
}
@@ -217,13 +226,12 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int index;
if (!sgid_tbl) {
- dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
+ dev_err(&res->pdev->dev, "SGID table not allocated\n");
return -EINVAL;
}
/* Do we need a sgid_lock here? */
if (!sgid_tbl->active) {
- dev_err(&res->pdev->dev,
- "QPLIB: SGID table has no active entries");
+ dev_err(&res->pdev->dev, "SGID table has no active entries\n");
return -ENOMEM;
}
for (index = 0; index < sgid_tbl->max; index++) {
@@ -231,7 +239,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
break;
}
if (index == sgid_tbl->max) {
- dev_warn(&res->pdev->dev, "GID not found in the SGID table");
+ dev_warn(&res->pdev->dev, "GID not found in the SGID table\n");
return 0;
}
/* Remove GID from the SGID table */
@@ -244,7 +252,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
if (sgid_tbl->hw_id[index] == 0xFFFF) {
dev_err(&res->pdev->dev,
- "QPLIB: GID entry contains an invalid HW id");
+ "GID entry contains an invalid HW id\n");
return -EINVAL;
}
req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
@@ -258,7 +266,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
sgid_tbl->vlan[index] = 0;
sgid_tbl->active--;
dev_dbg(&res->pdev->dev,
- "QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x",
+ "SGID deleted hw_id[0x%x] = 0x%x active = 0x%x\n",
index, sgid_tbl->hw_id[index], sgid_tbl->active);
sgid_tbl->hw_id[index] = (u16)-1;
@@ -277,20 +285,19 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int i, free_idx;
if (!sgid_tbl) {
- dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
+ dev_err(&res->pdev->dev, "SGID table not allocated\n");
return -EINVAL;
}
/* Do we need a sgid_lock here? */
if (sgid_tbl->active == sgid_tbl->max) {
- dev_err(&res->pdev->dev, "QPLIB: SGID table is full");
+ dev_err(&res->pdev->dev, "SGID table is full\n");
return -ENOMEM;
}
free_idx = sgid_tbl->max;
for (i = 0; i < sgid_tbl->max; i++) {
if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
dev_dbg(&res->pdev->dev,
- "QPLIB: SGID entry already exist in entry %d!",
- i);
+ "SGID entry already exist in entry %d!\n", i);
*index = i;
return -EALREADY;
} else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
@@ -301,7 +308,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
if (free_idx == sgid_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: SGID table is FULL but count is not MAX??");
+ "SGID table is FULL but count is not MAX??\n");
return -ENOMEM;
}
if (update) {
@@ -348,7 +355,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
sgid_tbl->vlan[free_idx] = 1;
dev_dbg(&res->pdev->dev,
- "QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x",
+ "SGID added hw_id[0x%x] = 0x%x active = 0x%x\n",
free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
*index = free_idx;
@@ -404,7 +411,7 @@ int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
}
if (index >= pkey_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: Index %d exceeded PKEY table max (%d)",
+ "Index %d exceeded PKEY table max (%d)\n",
index, pkey_tbl->max);
return -EINVAL;
}
@@ -419,14 +426,13 @@ int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
int i, rc = 0;
if (!pkey_tbl) {
- dev_err(&res->pdev->dev, "QPLIB: PKEY table not allocated");
+ dev_err(&res->pdev->dev, "PKEY table not allocated\n");
return -EINVAL;
}
/* Do we need a pkey_lock here? */
if (!pkey_tbl->active) {
- dev_err(&res->pdev->dev,
- "QPLIB: PKEY table has no active entries");
+ dev_err(&res->pdev->dev, "PKEY table has no active entries\n");
return -ENOMEM;
}
for (i = 0; i < pkey_tbl->max; i++) {
@@ -435,8 +441,7 @@ int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
}
if (i == pkey_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: PKEY 0x%04x not found in the pkey table",
- *pkey);
+ "PKEY 0x%04x not found in the pkey table\n", *pkey);
return -ENOMEM;
}
memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey));
@@ -453,13 +458,13 @@ int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
int i, free_idx, rc = 0;
if (!pkey_tbl) {
- dev_err(&res->pdev->dev, "QPLIB: PKEY table not allocated");
+ dev_err(&res->pdev->dev, "PKEY table not allocated\n");
return -EINVAL;
}
/* Do we need a pkey_lock here? */
if (pkey_tbl->active == pkey_tbl->max) {
- dev_err(&res->pdev->dev, "QPLIB: PKEY table is full");
+ dev_err(&res->pdev->dev, "PKEY table is full\n");
return -ENOMEM;
}
free_idx = pkey_tbl->max;
@@ -471,7 +476,7 @@ int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
}
if (free_idx == pkey_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: PKEY table is FULL but count is not MAX??");
+ "PKEY table is FULL but count is not MAX??\n");
return -ENOMEM;
}
/* Add PKEY to the pkey_tbl */
@@ -555,8 +560,7 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
int rc;
if (mrw->lkey == 0xFFFFFFFF) {
- dev_info(&res->pdev->dev,
- "QPLIB: SP: Free a reserved lkey MRW");
+ dev_info(&res->pdev->dev, "SP: Free a reserved lkey MRW\n");
return 0;
}
@@ -666,9 +670,8 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
pages++;
if (pages > MAX_PBL_LVL_1_PGS) {
- dev_err(&res->pdev->dev, "QPLIB: SP: Reg MR pages ");
dev_err(&res->pdev->dev,
- "requested (0x%x) exceeded max (0x%x)",
+ "SP: Reg MR pages requested (0x%x) exceeded max (0x%x)\n",
pages, MAX_PBL_LVL_1_PGS);
return -ENOMEM;
}
@@ -684,7 +687,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
HWQ_TYPE_CTX);
if (rc) {
dev_err(&res->pdev->dev,
- "SP: Reg MR memory allocation failed");
+ "SP: Reg MR memory allocation failed\n");
return -ENOMEM;
}
/* Write to the hwq */
@@ -795,7 +798,7 @@ int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
if (!sbuf) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: SP: QUERY_ROCE_STATS alloc side buffer failed");
+ "SP: QUERY_ROCE_STATS alloc side buffer failed\n");
return -ENOMEM;
}
@@ -845,6 +848,16 @@ int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err);
stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err);
stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err);
+ if (!rcfw->init_oos_stats) {
+ rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
+ rcfw->init_oos_stats = 1;
+ } else {
+ stats->res_oos_drop_count +=
+ (le64_to_cpu(sb->res_oos_drop_count) -
+ rcfw->oos_prev) & BNXT_QPLIB_OOS_COUNT_MASK;
+ rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
+ }
+
bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 9d3e8b994945..8079d7f5a008 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -205,6 +205,16 @@ struct bnxt_qplib_roce_stats {
/* res_tx_pci_err is 64 b */
u64 res_rx_pci_err;
/* res_rx_pci_err is 64 b */
+ u64 res_oos_drop_count;
+ /* res_oos_drop_count */
+ u64 active_qp_count_p0;
+ /* port 0 active qps */
+ u64 active_qp_count_p1;
+ /* port 1 active qps */
+ u64 active_qp_count_p2;
+ /* port 2 active qps */
+ u64 active_qp_count_p3;
+ /* port 3 active qps */
};
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 3e5a4f760d0e..8a9ead419ac2 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -2929,6 +2929,11 @@ struct creq_query_roce_stats_resp_sb {
__le64 res_srq_load_err;
__le64 res_tx_pci_err;
__le64 res_rx_pci_err;
+ __le64 res_oos_drop_count;
+ __le64 active_qp_count_p0;
+ __le64 active_qp_count_p1;
+ __le64 active_qp_count_p2;
+ __le64 active_qp_count_p3;
};
/* QP error notification event (16 bytes) */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 1b9ff21aa1d5..ebbec02cebe0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1127,17 +1127,18 @@ static int iwch_query_port(struct ib_device *ibdev,
return 0;
}
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
pr_debug("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
@@ -1148,9 +1149,10 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
@@ -1158,6 +1160,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
iwch_dev->rdev.rnic_info.pdev->device);
}
+static DEVICE_ATTR_RO(board_id);
enum counters {
IPINRECEIVES,
@@ -1274,14 +1277,15 @@ static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
return stats->num_counters;
}
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *iwch_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *iwch_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
+static const struct attribute_group iwch_attr_group = {
+ .attrs = iwch_class_attributes,
};
static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
@@ -1316,10 +1320,8 @@ static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str)
int iwch_register_device(struct iwch_dev *dev)
{
int ret;
- int i;
pr_debug("%s iwch_dev %p\n", __func__, dev);
- strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
dev->ibdev.owner = THIS_MODULE;
@@ -1402,33 +1404,16 @@ int iwch_register_device(struct iwch_dev *dev)
sizeof(dev->ibdev.iwcm->ifname));
dev->ibdev.driver_id = RDMA_DRIVER_CXGB3;
- ret = ib_register_device(&dev->ibdev, NULL);
+ rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
+ ret = ib_register_device(&dev->ibdev, "cxgb3_%d", NULL);
if (ret)
- goto bail1;
-
- for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
- ret = device_create_file(&dev->ibdev.dev,
- iwch_class_attributes[i]);
- if (ret) {
- goto bail2;
- }
- }
- return 0;
-bail2:
- ib_unregister_device(&dev->ibdev);
-bail1:
- kfree(dev->ibdev.iwcm);
+ kfree(dev->ibdev.iwcm);
return ret;
}
void iwch_unregister_device(struct iwch_dev *dev)
{
- int i;
-
pr_debug("%s iwch_dev %p\n", __func__, dev);
- for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
- device_remove_file(&dev->ibdev.dev,
- iwch_class_attributes[i]);
ib_unregister_device(&dev->ibdev);
kfree(dev->ibdev.iwcm);
return;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0f83cbec33f3..615413bd3e8d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -403,8 +403,7 @@ void _c4iw_free_ep(struct kref *kref)
ep->com.local_addr.ss_family);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
- if (ep->mpa_skb)
- kfree_skb(ep->mpa_skb);
+ kfree_skb(ep->mpa_skb);
}
if (!skb_queue_empty(&ep->com.ep_skb_list))
skb_queue_purge(&ep->com.ep_skb_list);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 6d3042794094..1fd8798d91a7 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -161,7 +161,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
cq->gts = rdev->lldi.gts_reg;
cq->rdev = rdev;
- cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
+ cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS,
&cq->bar2_qid,
user ? &cq->bar2_pa : NULL);
if (user && !cq->bar2_pa) {
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 4eda6872e617..cbb3c0ddd990 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -373,8 +373,8 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
return 0;
}
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
@@ -382,9 +382,10 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n",
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
@@ -395,9 +396,10 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
@@ -405,6 +407,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
c4iw_dev->rdev.lldi.pdev->device);
}
+static DEVICE_ATTR_RO(board_id);
enum counters {
IP4INSEGS,
@@ -461,14 +464,15 @@ static int c4iw_get_mib(struct ib_device *ibdev,
return stats->num_counters;
}
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *c4iw_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *c4iw_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
+static const struct attribute_group c4iw_attr_group = {
+ .attrs = c4iw_class_attributes,
};
static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
@@ -530,12 +534,10 @@ static int fill_res_entry(struct sk_buff *msg, struct rdma_restrack_entry *res)
void c4iw_register_device(struct work_struct *work)
{
int ret;
- int i;
struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
struct c4iw_dev *dev = ctx->dev;
pr_debug("c4iw_dev %p\n", dev);
- strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
dev->ibdev.owner = THIS_MODULE;
@@ -626,20 +628,13 @@ void c4iw_register_device(struct work_struct *work)
memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
sizeof(dev->ibdev.iwcm->ifname));
+ rdma_set_device_sysfs_group(&dev->ibdev, &c4iw_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_CXGB4;
- ret = ib_register_device(&dev->ibdev, NULL);
+ ret = ib_register_device(&dev->ibdev, "cxgb4_%d", NULL);
if (ret)
goto err_kfree_iwcm;
-
- for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
- ret = device_create_file(&dev->ibdev.dev,
- c4iw_class_attributes[i]);
- if (ret)
- goto err_unregister_device;
- }
return;
-err_unregister_device:
- ib_unregister_device(&dev->ibdev);
+
err_kfree_iwcm:
kfree(dev->ibdev.iwcm);
err_dealloc_ctx:
@@ -651,12 +646,7 @@ err_dealloc_ctx:
void c4iw_unregister_device(struct c4iw_dev *dev)
{
- int i;
-
pr_debug("c4iw_dev %p\n", dev);
- for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
- device_remove_file(&dev->ibdev.dev,
- c4iw_class_attributes[i]);
ib_unregister_device(&dev->ibdev);
kfree(dev->ibdev.iwcm);
return;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 347fe18b1a41..13478f3b7057 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -99,7 +99,7 @@ static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{
dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
- pci_unmap_addr(sq, mapping));
+ dma_unmap_addr(sq, mapping));
}
static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
@@ -132,7 +132,7 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
if (!sq->queue)
return -ENOMEM;
sq->phys_addr = virt_to_phys(sq->queue);
- pci_unmap_addr_set(sq, mapping, sq->dma_addr);
+ dma_unmap_addr_set(sq, mapping, sq->dma_addr);
return 0;
}
@@ -279,12 +279,13 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
wq->db = rdev->lldi.db_reg;
- wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
+ wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid,
+ CXGB4_BAR2_QTYPE_EGRESS,
&wq->sq.bar2_qid,
user ? &wq->sq.bar2_pa : NULL);
if (need_rq)
wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
- T4_BAR2_QTYPE_EGRESS,
+ CXGB4_BAR2_QTYPE_EGRESS,
&wq->rq.bar2_qid,
user ? &wq->rq.bar2_pa : NULL);
@@ -2521,7 +2522,7 @@ static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
dma_free_coherent(&rdev->lldi.pdev->dev,
wq->memsize, wq->queue,
- pci_unmap_addr(wq, mapping));
+ dma_unmap_addr(wq, mapping));
c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
kfree(wq->sw_rq);
c4iw_put_qpid(rdev, wq->qid, uctx);
@@ -2570,9 +2571,9 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
goto err_free_rqtpool;
memset(wq->queue, 0, wq->memsize);
- pci_unmap_addr_set(wq, mapping, wq->dma_addr);
+ dma_unmap_addr_set(wq, mapping, wq->dma_addr);
- wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, T4_BAR2_QTYPE_EGRESS,
+ wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS,
&wq->bar2_qid,
user ? &wq->bar2_pa : NULL);
@@ -2649,7 +2650,7 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
err_free_queue:
dma_free_coherent(&rdev->lldi.pdev->dev,
wq->memsize, wq->queue,
- pci_unmap_addr(wq, mapping));
+ dma_unmap_addr(wq, mapping));
err_free_rqtpool:
c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
err_free_pending_wrs:
@@ -2813,8 +2814,7 @@ err_free_queue:
free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
srq->wr_waitp);
err_free_skb:
- if (srq->destroy_skb)
- kfree_skb(srq->destroy_skb);
+ kfree_skb(srq->destroy_skb);
err_free_srq_idx:
c4iw_free_srq_idx(&rhp->rdev, srq->idx);
err_free_wr_wait:
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index e42021fd6fd6..fff6d48d262f 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -397,7 +397,7 @@ struct t4_srq_pending_wr {
struct t4_srq {
union t4_recv_wr *queue;
dma_addr_t dma_addr;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_ADDR(mapping);
struct t4_swrqe *sw_rq;
void __iomem *bar2_va;
u64 bar2_pa;
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index f451ba912f47..ff790390c91a 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -8,12 +8,42 @@
#
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
-hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
- eprom.o exp_rcv.o file_ops.o firmware.o \
- init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
- qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o \
- uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \
- verbs_txreq.o vnic_main.o vnic_sdma.o
+hfi1-y := \
+ affinity.o \
+ chip.o \
+ device.o \
+ driver.o \
+ efivar.o \
+ eprom.o \
+ exp_rcv.o \
+ file_ops.o \
+ firmware.o \
+ init.o \
+ intr.o \
+ iowait.o \
+ mad.o \
+ mmu_rb.o \
+ msix.o \
+ pcie.o \
+ pio.o \
+ pio_copy.o \
+ platform.o \
+ qp.o \
+ qsfp.o \
+ rc.o \
+ ruc.o \
+ sdma.o \
+ sysfs.o \
+ trace.o \
+ uc.o \
+ ud.o \
+ user_exp_rcv.o \
+ user_pages.o \
+ user_sdma.o \
+ verbs.o \
+ verbs_txreq.o \
+ vnic_main.o \
+ vnic_sdma.o
ifdef CONFIG_DEBUG_FS
hfi1-y += debugfs.o
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index bedd5fba33b0..2baf38cc1e23 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -817,10 +817,10 @@ static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
set = &entry->def_intr;
cpumask_set_cpu(cpu, &set->mask);
cpumask_set_cpu(cpu, &set->used);
- for (i = 0; i < dd->num_msix_entries; i++) {
+ for (i = 0; i < dd->msix_info.max_requested; i++) {
struct hfi1_msix_entry *other_msix;
- other_msix = &dd->msix_entries[i];
+ other_msix = &dd->msix_info.msix_entries[i];
if (other_msix->type != IRQ_SDMA || other_msix == msix)
continue;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 2c19bf772451..9b20479dc710 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -67,8 +67,6 @@
#include "debugfs.h"
#include "fault.h"
-#define NUM_IB_PORTS 1
-
uint kdeth_qp;
module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
@@ -1100,9 +1098,9 @@ struct err_reg_info {
const char *desc;
};
-#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
-#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
-#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
+#define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
+#define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
+#define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
/*
* Helpers for building HFI and DC error interrupt table entries. Different
@@ -6733,6 +6731,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
struct hfi1_devdata *dd = ppd->dd;
struct send_context *sc;
int i;
+ int sc_flags;
if (flags & FREEZE_SELF)
write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
@@ -6743,11 +6742,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
/* notify all SDMA engines that they are going into a freeze */
sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
+ sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
+ SCF_LINK_DOWN : 0);
/* do halt pre-handling on all enabled send contexts */
for (i = 0; i < dd->num_send_contexts; i++) {
sc = dd->send_contexts[i].sc;
if (sc && (sc->flags & SCF_ENABLED))
- sc_stop(sc, SCF_FROZEN | SCF_HALTED);
+ sc_stop(sc, sc_flags);
}
/* Send context are frozen. Notify user space */
@@ -8178,7 +8179,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
/**
* is_rcv_urgent_int() - User receive context urgent IRQ handler
* @dd: valid dd
- * @source: logical IRQ source (ofse from IS_RCVURGENT_START)
+ * @source: logical IRQ source (offset from IS_RCVURGENT_START)
*
* RX block receive urgent interrupt. Source is < 160.
*
@@ -8228,7 +8229,7 @@ static const struct is_table is_table[] = {
is_sdma_eng_err_name, is_sdma_eng_err_int },
{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
is_sendctxt_err_name, is_sendctxt_err_int },
-{ IS_SDMA_START, IS_SDMA_END,
+{ IS_SDMA_START, IS_SDMA_IDLE_END,
is_sdma_eng_name, is_sdma_eng_int },
{ IS_VARIOUS_START, IS_VARIOUS_END,
is_various_name, is_various_int },
@@ -8254,7 +8255,7 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
/* avoids a double compare by walking the table in-order */
for (entry = &is_table[0]; entry->is_name; entry++) {
- if (source < entry->end) {
+ if (source <= entry->end) {
trace_hfi1_interrupt(dd, entry, source);
entry->is_int(dd, source - entry->start);
return;
@@ -8273,7 +8274,7 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
* context DATA IRQs are threaded and are not supported by this handler.
*
*/
-static irqreturn_t general_interrupt(int irq, void *data)
+irqreturn_t general_interrupt(int irq, void *data)
{
struct hfi1_devdata *dd = data;
u64 regs[CCE_NUM_INT_CSRS];
@@ -8306,7 +8307,7 @@ static irqreturn_t general_interrupt(int irq, void *data)
return handled;
}
-static irqreturn_t sdma_interrupt(int irq, void *data)
+irqreturn_t sdma_interrupt(int irq, void *data)
{
struct sdma_engine *sde = data;
struct hfi1_devdata *dd = sde->dd;
@@ -8398,7 +8399,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
* invoked) is finished. The intent is to avoid extra interrupts while we
* are processing packets anyway.
*/
-static irqreturn_t receive_context_interrupt(int irq, void *data)
+irqreturn_t receive_context_interrupt(int irq, void *data)
{
struct hfi1_ctxtdata *rcd = data;
struct hfi1_devdata *dd = rcd->dd;
@@ -8438,7 +8439,7 @@ static irqreturn_t receive_context_interrupt(int irq, void *data)
* Receive packet thread handler. This expects to be invoked with the
* receive interrupt still blocked.
*/
-static irqreturn_t receive_context_thread(int irq, void *data)
+irqreturn_t receive_context_thread(int irq, void *data)
{
struct hfi1_ctxtdata *rcd = data;
int present;
@@ -9648,30 +9649,10 @@ void qsfp_event(struct work_struct *work)
}
}
-static void init_qsfp_int(struct hfi1_devdata *dd)
+void init_qsfp_int(struct hfi1_devdata *dd)
{
struct hfi1_pportdata *ppd = dd->pport;
- u64 qsfp_mask, cce_int_mask;
- const int qsfp1_int_smask = QSFP1_INT % 64;
- const int qsfp2_int_smask = QSFP2_INT % 64;
-
- /*
- * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
- * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
- * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
- * the index of the appropriate CSR in the CCEIntMask CSR array
- */
- cce_int_mask = read_csr(dd, CCE_INT_MASK +
- (8 * (QSFP1_INT / 64)));
- if (dd->hfi1_id) {
- cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
- write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
- cce_int_mask);
- } else {
- cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
- write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
- cce_int_mask);
- }
+ u64 qsfp_mask;
qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
/* Clear current status to avoid spurious interrupts */
@@ -9688,6 +9669,12 @@ static void init_qsfp_int(struct hfi1_devdata *dd)
write_csr(dd,
dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
qsfp_mask);
+
+ /* Enable the appropriate QSFP IRQ source */
+ if (!dd->hfi1_id)
+ set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
+ else
+ set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
}
/*
@@ -10574,12 +10561,29 @@ void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
}
}
-/*
- * Verify if BCT for data VLs is non-zero.
+/**
+ * data_vls_operational() - Verify if data VL BCT credits and MTU
+ * are both set.
+ * @ppd: pointer to hfi1_pportdata structure
+ *
+ * Return: true - Ok, false -otherwise.
*/
static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
{
- return !!ppd->actual_vls_operational;
+ int i;
+ u64 reg;
+
+ if (!ppd->actual_vls_operational)
+ return false;
+
+ for (i = 0; i < ppd->vls_supported; i++) {
+ reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
+ if ((reg && !ppd->dd->vld[i].mtu) ||
+ (!reg && ppd->dd->vld[i].mtu))
+ return false;
+ }
+
+ return true;
}
/*
@@ -10674,6 +10678,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
handle_linkup_change(dd, 1);
+ pio_kernel_linkup(dd);
/*
* After link up, a new link width will have been set.
@@ -10691,7 +10696,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
if (!data_vls_operational(ppd)) {
dd_dev_err(dd,
- "%s: data VLs not operational\n", __func__);
+ "%s: Invalid data VL credits or mtu\n",
+ __func__);
ret = -EINVAL;
break;
}
@@ -11928,10 +11934,16 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
}
- if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
+ if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
+ set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
+ IS_RCVAVAIL_START + rcd->ctxt, true);
rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
- if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
+ }
+ if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
+ set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
+ IS_RCVAVAIL_START + rcd->ctxt, false);
rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
+ }
if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
@@ -11961,6 +11973,13 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
+ if (op & HFI1_RCVCTRL_URGENT_ENB)
+ set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
+ IS_RCVURGENT_START + rcd->ctxt, true);
+ if (op & HFI1_RCVCTRL_URGENT_DIS)
+ set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
+ IS_RCVURGENT_START + rcd->ctxt, false);
+
hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
@@ -12959,63 +12978,71 @@ int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
return ret;
}
+/* ========================================================================= */
+
/**
- * get_int_mask - get 64 bit int mask
- * @dd - the devdata
- * @i - the csr (relative to CCE_INT_MASK)
+ * read_mod_write() - Calculate the IRQ register index and set/clear the bits
+ * @dd: valid devdata
+ * @src: IRQ source to determine register index from
+ * @bits: the bits to set or clear
+ * @set: true == set the bits, false == clear the bits
*
- * Returns the mask with the urgent interrupt mask
- * bit clear for kernel receive contexts.
*/
-static u64 get_int_mask(struct hfi1_devdata *dd, u32 i)
+static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
+ bool set)
{
- u64 mask = U64_MAX; /* default to no change */
-
- if (i >= (IS_RCVURGENT_START / 64) && i < (IS_RCVURGENT_END / 64)) {
- int j = (i - (IS_RCVURGENT_START / 64)) * 64;
- int k = !j ? IS_RCVURGENT_START % 64 : 0;
+ u64 reg;
+ u16 idx = src / BITS_PER_REGISTER;
- if (j)
- j -= IS_RCVURGENT_START % 64;
- /* j = 0..dd->first_dyn_alloc_ctxt - 1,k = 0..63 */
- for (; j < dd->first_dyn_alloc_ctxt && k < 64; j++, k++)
- /* convert to bit in mask and clear */
- mask &= ~BIT_ULL(k);
- }
- return mask;
+ spin_lock(&dd->irq_src_lock);
+ reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
+ if (set)
+ reg |= bits;
+ else
+ reg &= ~bits;
+ write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
+ spin_unlock(&dd->irq_src_lock);
}
-/* ========================================================================= */
-
-/*
- * Enable/disable chip from delivering interrupts.
+/**
+ * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
+ * @dd: valid devdata
+ * @first: first IRQ source to set/clear
+ * @last: last IRQ source (inclusive) to set/clear
+ * @set: true == set the bits, false == clear the bits
+ *
+ * If first == last, set the exact source.
*/
-void set_intr_state(struct hfi1_devdata *dd, u32 enable)
+int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
{
- int i;
+ u64 bits = 0;
+ u64 bit;
+ u16 src;
- /*
- * In HFI, the mask needs to be 1 to allow interrupts.
- */
- if (enable) {
- /* enable all interrupts but urgent on kernel contexts */
- for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
- u64 mask = get_int_mask(dd, i);
+ if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
+ return -EINVAL;
- write_csr(dd, CCE_INT_MASK + (8 * i), mask);
- }
+ if (last < first)
+ return -ERANGE;
- init_qsfp_int(dd);
- } else {
- for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
+ for (src = first; src <= last; src++) {
+ bit = src % BITS_PER_REGISTER;
+ /* wrapped to next register? */
+ if (!bit && bits) {
+ read_mod_write(dd, src - 1, bits, set);
+ bits = 0;
+ }
+ bits |= BIT_ULL(bit);
}
+ read_mod_write(dd, last, bits, set);
+
+ return 0;
}
/*
* Clear all interrupt sources on the chip.
*/
-static void clear_all_interrupts(struct hfi1_devdata *dd)
+void clear_all_interrupts(struct hfi1_devdata *dd)
{
int i;
@@ -13039,38 +13066,11 @@ static void clear_all_interrupts(struct hfi1_devdata *dd)
write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
}
-/**
- * hfi1_clean_up_interrupts() - Free all IRQ resources
- * @dd: valid device data data structure
- *
- * Free the MSIx and assoicated PCI resources, if they have been allocated.
- */
-void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
-{
- int i;
- struct hfi1_msix_entry *me = dd->msix_entries;
-
- /* remove irqs - must happen before disabling/turning off */
- for (i = 0; i < dd->num_msix_entries; i++, me++) {
- if (!me->arg) /* => no irq, no affinity */
- continue;
- hfi1_put_irq_affinity(dd, me);
- pci_free_irq(dd->pcidev, i, me->arg);
- }
-
- /* clean structures */
- kfree(dd->msix_entries);
- dd->msix_entries = NULL;
- dd->num_msix_entries = 0;
-
- pci_free_irq_vectors(dd->pcidev);
-}
-
/*
* Remap the interrupt source from the general handler to the given MSI-X
* interrupt.
*/
-static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
+void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
{
u64 reg;
int m, n;
@@ -13094,8 +13094,7 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
write_csr(dd, CCE_INT_MAP + (8 * m), reg);
}
-static void remap_sdma_interrupts(struct hfi1_devdata *dd,
- int engine, int msix_intr)
+void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
{
/*
* SDMA engine interrupt sources grouped by type, rather than
@@ -13104,204 +13103,16 @@ static void remap_sdma_interrupts(struct hfi1_devdata *dd,
* SDMAProgress
* SDMAIdle
*/
- remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
- remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
- remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
-}
-
-static int request_msix_irqs(struct hfi1_devdata *dd)
-{
- int first_general, last_general;
- int first_sdma, last_sdma;
- int first_rx, last_rx;
- int i, ret = 0;
-
- /* calculate the ranges we are going to use */
- first_general = 0;
- last_general = first_general + 1;
- first_sdma = last_general;
- last_sdma = first_sdma + dd->num_sdma;
- first_rx = last_sdma;
- last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
-
- /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
- dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
-
- /*
- * Sanity check - the code expects all SDMA chip source
- * interrupts to be in the same CSR, starting at bit 0. Verify
- * that this is true by checking the bit location of the start.
- */
- BUILD_BUG_ON(IS_SDMA_START % 64);
-
- for (i = 0; i < dd->num_msix_entries; i++) {
- struct hfi1_msix_entry *me = &dd->msix_entries[i];
- const char *err_info;
- irq_handler_t handler;
- irq_handler_t thread = NULL;
- void *arg = NULL;
- int idx;
- struct hfi1_ctxtdata *rcd = NULL;
- struct sdma_engine *sde = NULL;
- char name[MAX_NAME_SIZE];
-
- /* obtain the arguments to pci_request_irq */
- if (first_general <= i && i < last_general) {
- idx = i - first_general;
- handler = general_interrupt;
- arg = dd;
- snprintf(name, sizeof(name),
- DRIVER_NAME "_%d", dd->unit);
- err_info = "general";
- me->type = IRQ_GENERAL;
- } else if (first_sdma <= i && i < last_sdma) {
- idx = i - first_sdma;
- sde = &dd->per_sdma[idx];
- handler = sdma_interrupt;
- arg = sde;
- snprintf(name, sizeof(name),
- DRIVER_NAME "_%d sdma%d", dd->unit, idx);
- err_info = "sdma";
- remap_sdma_interrupts(dd, idx, i);
- me->type = IRQ_SDMA;
- } else if (first_rx <= i && i < last_rx) {
- idx = i - first_rx;
- rcd = hfi1_rcd_get_by_index_safe(dd, idx);
- if (rcd) {
- /*
- * Set the interrupt register and mask for this
- * context's interrupt.
- */
- rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
- rcd->imask = ((u64)1) <<
- ((IS_RCVAVAIL_START + idx) % 64);
- handler = receive_context_interrupt;
- thread = receive_context_thread;
- arg = rcd;
- snprintf(name, sizeof(name),
- DRIVER_NAME "_%d kctxt%d",
- dd->unit, idx);
- err_info = "receive context";
- remap_intr(dd, IS_RCVAVAIL_START + idx, i);
- me->type = IRQ_RCVCTXT;
- rcd->msix_intr = i;
- hfi1_rcd_put(rcd);
- }
- } else {
- /* not in our expected range - complain, then
- * ignore it
- */
- dd_dev_err(dd,
- "Unexpected extra MSI-X interrupt %d\n", i);
- continue;
- }
- /* no argument, no interrupt */
- if (!arg)
- continue;
- /* make sure the name is terminated */
- name[sizeof(name) - 1] = 0;
- me->irq = pci_irq_vector(dd->pcidev, i);
- ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
- name);
- if (ret) {
- dd_dev_err(dd,
- "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
- err_info, me->irq, idx, ret);
- return ret;
- }
- /*
- * assign arg after pci_request_irq call, so it will be
- * cleaned up
- */
- me->arg = arg;
-
- ret = hfi1_get_irq_affinity(dd, me);
- if (ret)
- dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
- }
-
- return ret;
-}
-
-void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
-{
- int i;
-
- for (i = 0; i < dd->vnic.num_ctxt; i++) {
- struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
- struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
-
- synchronize_irq(me->irq);
- }
-}
-
-void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
-{
- struct hfi1_devdata *dd = rcd->dd;
- struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
-
- if (!me->arg) /* => no irq, no affinity */
- return;
-
- hfi1_put_irq_affinity(dd, me);
- pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
-
- me->arg = NULL;
-}
-
-void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
-{
- struct hfi1_devdata *dd = rcd->dd;
- struct hfi1_msix_entry *me;
- int idx = rcd->ctxt;
- void *arg = rcd;
- int ret;
-
- rcd->msix_intr = dd->vnic.msix_idx++;
- me = &dd->msix_entries[rcd->msix_intr];
-
- /*
- * Set the interrupt register and mask for this
- * context's interrupt.
- */
- rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
- rcd->imask = ((u64)1) <<
- ((IS_RCVAVAIL_START + idx) % 64);
- me->type = IRQ_RCVCTXT;
- me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
- remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
-
- ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
- receive_context_interrupt,
- receive_context_thread, arg,
- DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
- if (ret) {
- dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
- me->irq, idx, ret);
- return;
- }
- /*
- * assign arg after pci_request_irq call, so it will be
- * cleaned up
- */
- me->arg = arg;
-
- ret = hfi1_get_irq_affinity(dd, me);
- if (ret) {
- dd_dev_err(dd,
- "unable to pin IRQ %d\n", ret);
- pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
- }
+ remap_intr(dd, IS_SDMA_START + engine, msix_intr);
+ remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
+ remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
}
/*
* Set the general handler to accept all interrupts, remap all
* chip interrupts back to MSI-X 0.
*/
-static void reset_interrupts(struct hfi1_devdata *dd)
+void reset_interrupts(struct hfi1_devdata *dd)
{
int i;
@@ -13314,54 +13125,33 @@ static void reset_interrupts(struct hfi1_devdata *dd)
write_csr(dd, CCE_INT_MAP + (8 * i), 0);
}
+/**
+ * set_up_interrupts() - Initialize the IRQ resources and state
+ * @dd: valid devdata
+ *
+ */
static int set_up_interrupts(struct hfi1_devdata *dd)
{
- u32 total;
- int ret, request;
-
- /*
- * Interrupt count:
- * 1 general, "slow path" interrupt (includes the SDMA engines
- * slow source, SDMACleanupDone)
- * N interrupts - one per used SDMA engine
- * M interrupt - one per kernel receive context
- * V interrupt - one for each VNIC context
- */
- total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
-
- /* ask for MSI-X interrupts */
- request = request_msix(dd, total);
- if (request < 0) {
- ret = request;
- goto fail;
- } else {
- dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
- GFP_KERNEL);
- if (!dd->msix_entries) {
- ret = -ENOMEM;
- goto fail;
- }
- /* using MSI-X */
- dd->num_msix_entries = total;
- dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
- }
+ int ret;
/* mask all interrupts */
- set_intr_state(dd, 0);
+ set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
+
/* clear all pending interrupts */
clear_all_interrupts(dd);
/* reset general handler mask, chip MSI-X mappings */
reset_interrupts(dd);
- ret = request_msix_irqs(dd);
+ /* ask for MSI-X interrupts */
+ ret = msix_initialize(dd);
if (ret)
- goto fail;
+ return ret;
- return 0;
+ ret = msix_request_irqs(dd);
+ if (ret)
+ msix_clean_up_interrupts(dd);
-fail:
- hfi1_clean_up_interrupts(dd);
return ret;
}
@@ -14914,20 +14704,16 @@ err_exit:
}
/**
- * Allocate and initialize the device structure for the hfi.
+ * hfi1_init_dd() - Initialize most of the dd structure.
* @dev: the pci_dev for hfi1_ib device
* @ent: pci_device_id struct for this dev
*
- * Also allocates, initializes, and returns the devdata struct for this
- * device instance
- *
* This is global, and is called directly at init to set up the
* chip-specific function pointers for later use.
*/
-struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+int hfi1_init_dd(struct hfi1_devdata *dd)
{
- struct hfi1_devdata *dd;
+ struct pci_dev *pdev = dd->pcidev;
struct hfi1_pportdata *ppd;
u64 reg;
int i, ret;
@@ -14938,13 +14724,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
"Functional simulator"
};
struct pci_dev *parent = pdev->bus->self;
- u32 sdma_engines;
+ u32 sdma_engines = chip_sdma_engines(dd);
- dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
- sizeof(struct hfi1_pportdata));
- if (IS_ERR(dd))
- goto bail;
- sdma_engines = chip_sdma_engines(dd);
ppd = dd->pport;
for (i = 0; i < dd->num_pports; i++, ppd++) {
int vl;
@@ -15123,6 +14904,12 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret)
goto bail_cleanup;
+ /*
+ * This should probably occur in hfi1_pcie_init(), but historically
+ * occurs after the do_pcie_gen3_transition() code.
+ */
+ tune_pcie_caps(dd);
+
/* start setting dd values and adjusting CSRs */
init_early_variables(dd);
@@ -15235,14 +15022,13 @@ bail_free_cntrs:
free_cntrs(dd);
bail_clear_intr:
hfi1_comp_vectors_clean_up(dd);
- hfi1_clean_up_interrupts(dd);
+ msix_clean_up_interrupts(dd);
bail_cleanup:
hfi1_pcie_ddcleanup(dd);
bail_free:
hfi1_free_devdata(dd);
- dd = ERR_PTR(ret);
bail:
- return dd;
+ return ret;
}
static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 36b04d6300e5..6b9c8f12dff8 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -52,9 +52,7 @@
*/
/* sizes */
-#define CCE_NUM_MSIX_VECTORS 256
-#define CCE_NUM_INT_CSRS 12
-#define CCE_NUM_INT_MAP_CSRS 96
+#define BITS_PER_REGISTER (BITS_PER_BYTE * sizeof(u64))
#define NUM_INTERRUPT_SOURCES 768
#define RXE_NUM_CONTEXTS 160
#define RXE_PER_CONTEXT_SIZE 0x1000 /* 4k */
@@ -161,34 +159,49 @@
(CR_CREDIT_RETURN_DUE_TO_FORCE_MASK << \
CR_CREDIT_RETURN_DUE_TO_FORCE_SHIFT)
-/* interrupt source numbers */
-#define IS_GENERAL_ERR_START 0
-#define IS_SDMAENG_ERR_START 16
-#define IS_SENDCTXT_ERR_START 32
-#define IS_SDMA_START 192 /* includes SDmaProgress,SDmaIdle */
+/* Specific IRQ sources */
+#define CCE_ERR_INT 0
+#define RXE_ERR_INT 1
+#define MISC_ERR_INT 2
+#define PIO_ERR_INT 4
+#define SDMA_ERR_INT 5
+#define EGRESS_ERR_INT 6
+#define TXE_ERR_INT 7
+#define PBC_INT 240
+#define GPIO_ASSERT_INT 241
+#define QSFP1_INT 242
+#define QSFP2_INT 243
+#define TCRIT_INT 244
+
+/* interrupt source ranges */
+#define IS_FIRST_SOURCE CCE_ERR_INT
+#define IS_GENERAL_ERR_START 0
+#define IS_SDMAENG_ERR_START 16
+#define IS_SENDCTXT_ERR_START 32
+#define IS_SDMA_START 192
+#define IS_SDMA_PROGRESS_START 208
+#define IS_SDMA_IDLE_START 224
#define IS_VARIOUS_START 240
#define IS_DC_START 248
#define IS_RCVAVAIL_START 256
#define IS_RCVURGENT_START 416
#define IS_SENDCREDIT_START 576
#define IS_RESERVED_START 736
-#define IS_MAX_SOURCES 768
+#define IS_LAST_SOURCE 767
/* derived interrupt source values */
-#define IS_GENERAL_ERR_END IS_SDMAENG_ERR_START
-#define IS_SDMAENG_ERR_END IS_SENDCTXT_ERR_START
-#define IS_SENDCTXT_ERR_END IS_SDMA_START
-#define IS_SDMA_END IS_VARIOUS_START
-#define IS_VARIOUS_END IS_DC_START
-#define IS_DC_END IS_RCVAVAIL_START
-#define IS_RCVAVAIL_END IS_RCVURGENT_START
-#define IS_RCVURGENT_END IS_SENDCREDIT_START
-#define IS_SENDCREDIT_END IS_RESERVED_START
-#define IS_RESERVED_END IS_MAX_SOURCES
-
-/* absolute interrupt numbers for QSFP1Int and QSFP2Int */
-#define QSFP1_INT 242
-#define QSFP2_INT 243
+#define IS_GENERAL_ERR_END 7
+#define IS_SDMAENG_ERR_END 31
+#define IS_SENDCTXT_ERR_END 191
+#define IS_SDMA_END 207
+#define IS_SDMA_PROGRESS_END 223
+#define IS_SDMA_IDLE_END 239
+#define IS_VARIOUS_END 244
+#define IS_DC_END 255
+#define IS_RCVAVAIL_END 415
+#define IS_RCVURGENT_END 575
+#define IS_SENDCREDIT_END 735
+#define IS_RESERVED_END IS_LAST_SOURCE
/* DCC_CFG_PORT_CONFIG logical link states */
#define LSTATE_DOWN 0x1
@@ -1416,6 +1429,18 @@ void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality);
void hfi1_init_vnic_rsm(struct hfi1_devdata *dd);
void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd);
+irqreturn_t general_interrupt(int irq, void *data);
+irqreturn_t sdma_interrupt(int irq, void *data);
+irqreturn_t receive_context_interrupt(int irq, void *data);
+irqreturn_t receive_context_thread(int irq, void *data);
+
+int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set);
+void init_qsfp_int(struct hfi1_devdata *dd);
+void clear_all_interrupts(struct hfi1_devdata *dd);
+void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
+void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);
+void reset_interrupts(struct hfi1_devdata *dd);
+
/*
* Interrupt source table.
*
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index ee6dca5e2a2f..c6163a347e93 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -878,6 +878,10 @@
#define SEND_CTRL (TXE + 0x000000000000)
#define SEND_CTRL_CM_RESET_SMASK 0x4ull
#define SEND_CTRL_SEND_ENABLE_SMASK 0x1ull
+#define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
+#define SEND_CTRL_UNSUPPORTED_VL_MASK 0xFFull
+#define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
+ << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
#define SEND_CTRL_VL_ARBITER_ENABLE_SMASK 0x2ull
#define SEND_CTXT_CHECK_ENABLE (TXE + 0x000000100080)
#define SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 0x80ull
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 1fc75647e47b..c22ebc774a6a 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -681,7 +681,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
HFI1_RCVCTRL_TAILUPD_DIS |
HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
- HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
+ HFI1_RCVCTRL_NO_EGR_DROP_DIS |
+ HFI1_RCVCTRL_URGENT_DIS, uctxt);
/* Clear the context's J_KEY */
hfi1_clear_ctxt_jkey(dd, uctxt);
/*
@@ -1096,6 +1097,7 @@ static void user_init(struct hfi1_ctxtdata *uctxt)
hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
+ rcvctrl_ops |= HFI1_RCVCTRL_URGENT_ENB;
if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
/*
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index d9470317983f..1401b6ea4a28 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -80,6 +80,7 @@
#include "qsfp.h"
#include "platform.h"
#include "affinity.h"
+#include "msix.h"
/* bumped 1 from s/w major version of TrueScale */
#define HFI1_CHIP_VERS_MAJ 3U
@@ -620,6 +621,8 @@ struct rvt_sge_state;
#define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000
#define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000
#define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000
+#define HFI1_RCVCTRL_URGENT_ENB 0x40000
+#define HFI1_RCVCTRL_URGENT_DIS 0x80000
/* partition enforcement flags */
#define HFI1_PART_ENFORCE_IN 0x1
@@ -667,6 +670,14 @@ struct hfi1_msix_entry {
struct irq_affinity_notify notify;
};
+struct hfi1_msix_info {
+ /* lock to synchronize in_use_msix access */
+ spinlock_t msix_lock;
+ DECLARE_BITMAP(in_use_msix, CCE_NUM_MSIX_VECTORS);
+ struct hfi1_msix_entry *msix_entries;
+ u16 max_requested;
+};
+
/* per-SL CCA information */
struct cca_timer {
struct hrtimer hrtimer;
@@ -992,7 +1003,6 @@ struct hfi1_vnic_data {
struct idr vesw_idr;
u8 rmt_start;
u8 num_ctxt;
- u32 msix_idx;
};
struct hfi1_vnic_vport_info;
@@ -1205,11 +1215,6 @@ struct hfi1_devdata {
struct diag_client *diag_client;
- /* MSI-X information */
- struct hfi1_msix_entry *msix_entries;
- u32 num_msix_entries;
- u32 first_dyn_msix_idx;
-
/* general interrupt: mask of handled interrupts */
u64 gi_mask[CCE_NUM_INT_CSRS];
@@ -1223,6 +1228,9 @@ struct hfi1_devdata {
*/
struct timer_list synth_stats_timer;
+ /* MSI-X information */
+ struct hfi1_msix_info msix_info;
+
/*
* device counters
*/
@@ -1349,6 +1357,8 @@ struct hfi1_devdata {
/* vnic data */
struct hfi1_vnic_data vnic;
+ /* Lock to protect IRQ SRC register access */
+ spinlock_t irq_src_lock;
};
static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare)
@@ -1431,9 +1441,6 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
void set_all_slowpath(struct hfi1_devdata *dd);
-void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd);
-void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd);
-void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd);
extern const struct pci_device_id hfi1_pci_tbl[];
void hfi1_make_ud_req_9B(struct rvt_qp *qp,
@@ -1887,10 +1894,8 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
#define HFI1_CTXT_WAITING_URG 4
/* free up any allocated data at closes */
-struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
- const struct pci_device_id *ent);
+int hfi1_init_dd(struct hfi1_devdata *dd);
void hfi1_free_devdata(struct hfi1_devdata *dd);
-struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
/* LED beaconing functions */
void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
@@ -1963,6 +1968,7 @@ static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
*/
extern const char ib_hfi1_version[];
+extern const struct attribute_group ib_hfi1_attr_group;
int hfi1_device_create(struct hfi1_devdata *dd);
void hfi1_device_remove(struct hfi1_devdata *dd);
@@ -1974,16 +1980,15 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
/* Hook for sysfs read of QSFP */
int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
-int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent);
-void hfi1_clean_up_interrupts(struct hfi1_devdata *dd);
+int hfi1_pcie_init(struct hfi1_devdata *dd);
void hfi1_pcie_cleanup(struct pci_dev *pdev);
int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
int pcie_speeds(struct hfi1_devdata *dd);
-int request_msix(struct hfi1_devdata *dd, u32 msireq);
int restore_pci_variables(struct hfi1_devdata *dd);
int save_pci_variables(struct hfi1_devdata *dd);
int do_pcie_gen3_transition(struct hfi1_devdata *dd);
+void tune_pcie_caps(struct hfi1_devdata *dd);
int parse_platform_config(struct hfi1_devdata *dd);
int get_platform_config_field(struct hfi1_devdata *dd,
enum platform_config_table_type_encoding
@@ -2124,19 +2129,6 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
return base_sdma_integrity;
}
-/*
- * hfi1_early_err is used (only!) to print early errors before devdata is
- * allocated, or when dd->pcidev may not be valid, and at the tail end of
- * cleanup when devdata may have been freed, etc. hfi1_dev_porterr is
- * the same as dd_dev_err, but is used when the message really needs
- * the IB port# to be definitive as to what's happening..
- */
-#define hfi1_early_err(dev, fmt, ...) \
- dev_err(dev, fmt, ##__VA_ARGS__)
-
-#define hfi1_early_info(dev, fmt, ...) \
- dev_info(dev, fmt, ##__VA_ARGS__)
-
#define dd_dev_emerg(dd, fmt, ...) \
dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 758d273c32cf..09044905284f 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -83,6 +83,8 @@
#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
+#define NUM_IB_PORTS 1
+
/*
* Number of user receive contexts we are configured to use (to allow for more
* pio buffers per ctxt, etc.) Zero means use one user context per CPU.
@@ -654,9 +656,8 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
if (loopback) {
- hfi1_early_err(&pdev->dev,
- "Faking data partition 0x8001 in idx %u\n",
- !default_pkey_idx);
+ dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
+ !default_pkey_idx);
ppd->pkeys[!default_pkey_idx] = 0x8001;
}
@@ -702,9 +703,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
return;
bail:
-
- hfi1_early_err(&pdev->dev,
- "Congestion Control Agent disabled for port %d\n", port);
+ dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
}
/*
@@ -833,6 +832,23 @@ wq_error:
}
/**
+ * enable_general_intr() - Enable the IRQs that will be handled by the
+ * general interrupt handler.
+ * @dd: valid devdata
+ *
+ */
+static void enable_general_intr(struct hfi1_devdata *dd)
+{
+ set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
+ set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
+ set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
+ set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
+ set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
+ set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
+ set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
+}
+
+/**
* hfi1_init - do the actual initialization sequence on the chip
* @dd: the hfi1_ib device
* @reinit: re-initializing, so don't allocate new memory
@@ -916,6 +932,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
"failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
ret = lastfail;
}
+ /* enable IRQ */
hfi1_rcd_put(rcd);
}
@@ -954,7 +971,8 @@ done:
HFI1_STATUS_INITTED;
if (!ret) {
/* enable all interrupts from the chip */
- set_intr_state(dd, 1);
+ enable_general_intr(dd);
+ init_qsfp_int(dd);
/* chip is OK for user apps; mark it as initialized */
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
@@ -1051,9 +1069,9 @@ static void shutdown_device(struct hfi1_devdata *dd)
}
dd->flags &= ~HFI1_INITTED;
- /* mask and clean up interrupts, but not errors */
- set_intr_state(dd, 0);
- hfi1_clean_up_interrupts(dd);
+ /* mask and clean up interrupts */
+ set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
+ msix_clean_up_interrupts(dd);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
@@ -1246,15 +1264,19 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
kobject_put(&dd->kobj);
}
-/*
- * Allocate our primary per-unit data structure. Must be done via verbs
- * allocator, because the verbs cleanup process both does cleanup and
- * free of the data structure.
+/**
+ * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
+ * @pdev: Valid PCI device
+ * @extra: How many bytes to alloc past the default
+ *
+ * Must be done via verbs allocator, because the verbs cleanup process
+ * both does cleanup and free of the data structure.
* "extra" is for chip-specific data.
*
* Use the idr mechanism to get a unit number for this unit.
*/
-struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
+static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
+ size_t extra)
{
unsigned long flags;
struct hfi1_devdata *dd;
@@ -1287,8 +1309,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
idr_preload_end();
if (ret < 0) {
- hfi1_early_err(&pdev->dev,
- "Could not allocate unit ID: error %d\n", -ret);
+ dev_err(&pdev->dev,
+ "Could not allocate unit ID: error %d\n", -ret);
goto bail;
}
rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
@@ -1309,6 +1331,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
spin_lock_init(&dd->pio_map_lock);
mutex_init(&dd->dc8051_lock);
init_waitqueue_head(&dd->event_queue);
+ spin_lock_init(&dd->irq_src_lock);
dd->int_counter = alloc_percpu(u64);
if (!dd->int_counter) {
@@ -1481,9 +1504,6 @@ static int __init hfi1_mod_init(void)
idr_init(&hfi1_unit_table);
hfi1_dbg_init();
- ret = hfi1_wss_init();
- if (ret < 0)
- goto bail_wss;
ret = pci_register_driver(&hfi1_pci_driver);
if (ret < 0) {
pr_err("Unable to register driver: error %d\n", -ret);
@@ -1492,8 +1512,6 @@ static int __init hfi1_mod_init(void)
goto bail; /* all OK */
bail_dev:
- hfi1_wss_exit();
-bail_wss:
hfi1_dbg_exit();
idr_destroy(&hfi1_unit_table);
dev_cleanup();
@@ -1510,7 +1528,6 @@ static void __exit hfi1_mod_cleanup(void)
{
pci_unregister_driver(&hfi1_pci_driver);
node_affinity_destroy_all();
- hfi1_wss_exit();
hfi1_dbg_exit();
idr_destroy(&hfi1_unit_table);
@@ -1604,23 +1621,23 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
hfi1_free_devdata(dd);
}
-static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
+static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
{
if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(dev, "Receive header queue count too small\n");
+ dd_dev_err(dd, "Receive header queue count too small\n");
return -EINVAL;
}
if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(dev,
- "Receive header queue count cannot be greater than %u\n",
- HFI1_MAX_HDRQ_EGRBUF_CNT);
+ dd_dev_err(dd,
+ "Receive header queue count cannot be greater than %u\n",
+ HFI1_MAX_HDRQ_EGRBUF_CNT);
return -EINVAL;
}
if (thecnt % HDRQ_INCREMENT) {
- hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
- thecnt, HDRQ_INCREMENT);
+ dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
+ thecnt, HDRQ_INCREMENT);
return -EINVAL;
}
@@ -1639,22 +1656,29 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Validate dev ids */
if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
ent->device == PCI_DEVICE_ID_INTEL1)) {
- hfi1_early_err(&pdev->dev,
- "Failing on unknown Intel deviceid 0x%x\n",
- ent->device);
+ dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n",
+ ent->device);
ret = -ENODEV;
goto bail;
}
+ /* Allocate the dd so we can get to work */
+ dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
+ sizeof(struct hfi1_pportdata));
+ if (IS_ERR(dd)) {
+ ret = PTR_ERR(dd);
+ goto bail;
+ }
+
/* Validate some global module parameters */
- ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
+ ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt);
if (ret)
goto bail;
/* use the encoding function as a sanitization check */
if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
- hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
- hfi1_hdrq_entsize);
+ dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
+ hfi1_hdrq_entsize);
ret = -EINVAL;
goto bail;
}
@@ -1676,10 +1700,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
clamp_val(eager_buffer_size,
MIN_EAGER_BUFFER * 8,
MAX_EAGER_BUFFER_TOTAL);
- hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
- eager_buffer_size);
+ dd_dev_info(dd, "Eager buffer size %u\n",
+ eager_buffer_size);
} else {
- hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
+ dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
ret = -EINVAL;
goto bail;
}
@@ -1687,7 +1711,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* restrict value of hfi1_rcvarr_split */
hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
- ret = hfi1_pcie_init(pdev, ent);
+ ret = hfi1_pcie_init(dd);
if (ret)
goto bail;
@@ -1695,12 +1719,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* Do device-specific initialization, function table setup, dd
* allocation, etc.
*/
- dd = hfi1_init_dd(pdev, ent);
-
- if (IS_ERR(dd)) {
- ret = PTR_ERR(dd);
+ ret = hfi1_init_dd(dd);
+ if (ret)
goto clean_bail; /* error already printed */
- }
ret = create_workqueues(dd);
if (ret)
@@ -1731,7 +1752,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
if (initfail || ret) {
- hfi1_clean_up_interrupts(dd);
+ msix_clean_up_interrupts(dd);
stop_timers(dd);
flush_workqueue(ib_wq);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c
new file mode 100644
index 000000000000..582f1ba136ff
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/iowait.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#include "iowait.h"
+#include "trace_iowait.h"
+
+void iowait_set_flag(struct iowait *wait, u32 flag)
+{
+ trace_hfi1_iowait_set(wait, flag);
+ set_bit(flag, &wait->flags);
+}
+
+bool iowait_flag_set(struct iowait *wait, u32 flag)
+{
+ return test_bit(flag, &wait->flags);
+}
+
+inline void iowait_clear_flag(struct iowait *wait, u32 flag)
+{
+ trace_hfi1_iowait_clear(wait, flag);
+ clear_bit(flag, &wait->flags);
+}
+
+/**
+ * iowait_init() - initialize wait structure
+ * @wait: wait struct to initialize
+ * @tx_limit: limit for overflow queuing
+ * @func: restart function for workqueue
+ * @sleep: sleep function for no space
+ * @resume: wakeup function for no space
+ *
+ * This function initializes the iowait
+ * structure embedded in the QP or PQ.
+ *
+ */
+void iowait_init(struct iowait *wait, u32 tx_limit,
+ void (*func)(struct work_struct *work),
+ void (*tidfunc)(struct work_struct *work),
+ int (*sleep)(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *tx,
+ uint seq,
+ bool pkts_sent),
+ void (*wakeup)(struct iowait *wait, int reason),
+ void (*sdma_drained)(struct iowait *wait))
+{
+ int i;
+
+ wait->count = 0;
+ INIT_LIST_HEAD(&wait->list);
+ init_waitqueue_head(&wait->wait_dma);
+ init_waitqueue_head(&wait->wait_pio);
+ atomic_set(&wait->sdma_busy, 0);
+ atomic_set(&wait->pio_busy, 0);
+ wait->tx_limit = tx_limit;
+ wait->sleep = sleep;
+ wait->wakeup = wakeup;
+ wait->sdma_drained = sdma_drained;
+ wait->flags = 0;
+ for (i = 0; i < IOWAIT_SES; i++) {
+ wait->wait[i].iow = wait;
+ INIT_LIST_HEAD(&wait->wait[i].tx_head);
+ if (i == IOWAIT_IB_SE)
+ INIT_WORK(&wait->wait[i].iowork, func);
+ else
+ INIT_WORK(&wait->wait[i].iowork, tidfunc);
+ }
+}
+
+/**
+ * iowait_cancel_work - cancel all work in iowait
+ * @w: the iowait struct
+ */
+void iowait_cancel_work(struct iowait *w)
+{
+ cancel_work_sync(&iowait_get_ib_work(w)->iowork);
+ cancel_work_sync(&iowait_get_tid_work(w)->iowork);
+}
+
+/**
+ * iowait_set_work_flag - set work flag based on leg
+ * @w - the iowait work struct
+ */
+int iowait_set_work_flag(struct iowait_work *w)
+{
+ if (w == &w->iow->wait[IOWAIT_IB_SE]) {
+ iowait_set_flag(w->iow, IOWAIT_PENDING_IB);
+ return IOWAIT_IB_SE;
+ }
+ iowait_set_flag(w->iow, IOWAIT_PENDING_TID);
+ return IOWAIT_TID_SE;
+}
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 3d9c32c7c340..23a58ac0d47c 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_IOWAIT_H
#define _HFI1_IOWAIT_H
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -49,6 +49,7 @@
#include <linux/list.h>
#include <linux/workqueue.h>
+#include <linux/wait.h>
#include <linux/sched.h>
#include "sdma_txreq.h"
@@ -59,16 +60,47 @@
*/
typedef void (*restart_t)(struct work_struct *work);
+#define IOWAIT_PENDING_IB 0x0
+#define IOWAIT_PENDING_TID 0x1
+
+/*
+ * A QP can have multiple Send Engines (SEs).
+ *
+ * The current use case is for supporting a TID RDMA
+ * packet build/xmit mechanism independent from verbs.
+ */
+#define IOWAIT_SES 2
+#define IOWAIT_IB_SE 0
+#define IOWAIT_TID_SE 1
+
struct sdma_txreq;
struct sdma_engine;
/**
- * struct iowait - linkage for delayed progress/waiting
+ * @iowork: the work struct
+ * @tx_head: list of prebuilt packets
+ * @iow: the parent iowait structure
+ *
+ * This structure is the work item (process) specific
+ * details associated with the each of the two SEs of the
+ * QP.
+ *
+ * The workstruct and the queued TXs are unique to each
+ * SE.
+ */
+struct iowait;
+struct iowait_work {
+ struct work_struct iowork;
+ struct list_head tx_head;
+ struct iowait *iow;
+};
+
+/**
* @list: used to add/insert into QP/PQ wait lists
- * @lock: uses to record the list head lock
* @tx_head: overflow list of sdma_txreq's
* @sleep: no space callback
* @wakeup: space callback wakeup
* @sdma_drained: sdma count drained
+ * @lock: lock protected head of wait queue
* @iowork: workqueue overhead
* @wait_dma: wait for sdma_busy == 0
* @wait_pio: wait for pio_busy == 0
@@ -76,6 +108,8 @@ struct sdma_engine;
* @count: total number of descriptors in tx_head'ed list
* @tx_limit: limit for overflow queuing
* @tx_count: number of tx entry's in tx_head'ed list
+ * @flags: wait flags (one per QP)
+ * @wait: SE array
*
* This is to be embedded in user's state structure
* (QP or PQ).
@@ -98,13 +132,11 @@ struct sdma_engine;
* Waiters explicity know that, but the destroy
* code that unwaits QPs does not.
*/
-
struct iowait {
struct list_head list;
- struct list_head tx_head;
int (*sleep)(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *tx,
uint seq,
bool pkts_sent
@@ -112,7 +144,6 @@ struct iowait {
void (*wakeup)(struct iowait *wait, int reason);
void (*sdma_drained)(struct iowait *wait);
seqlock_t *lock;
- struct work_struct iowork;
wait_queue_head_t wait_dma;
wait_queue_head_t wait_pio;
atomic_t sdma_busy;
@@ -121,63 +152,37 @@ struct iowait {
u32 tx_limit;
u32 tx_count;
u8 starved_cnt;
+ unsigned long flags;
+ struct iowait_work wait[IOWAIT_SES];
};
#define SDMA_AVAIL_REASON 0
-/**
- * iowait_init() - initialize wait structure
- * @wait: wait struct to initialize
- * @tx_limit: limit for overflow queuing
- * @func: restart function for workqueue
- * @sleep: sleep function for no space
- * @resume: wakeup function for no space
- *
- * This function initializes the iowait
- * structure embedded in the QP or PQ.
- *
- */
+void iowait_set_flag(struct iowait *wait, u32 flag);
+bool iowait_flag_set(struct iowait *wait, u32 flag);
+void iowait_clear_flag(struct iowait *wait, u32 flag);
-static inline void iowait_init(
- struct iowait *wait,
- u32 tx_limit,
- void (*func)(struct work_struct *work),
- int (*sleep)(
- struct sdma_engine *sde,
- struct iowait *wait,
- struct sdma_txreq *tx,
- uint seq,
- bool pkts_sent),
- void (*wakeup)(struct iowait *wait, int reason),
- void (*sdma_drained)(struct iowait *wait))
-{
- wait->count = 0;
- wait->lock = NULL;
- INIT_LIST_HEAD(&wait->list);
- INIT_LIST_HEAD(&wait->tx_head);
- INIT_WORK(&wait->iowork, func);
- init_waitqueue_head(&wait->wait_dma);
- init_waitqueue_head(&wait->wait_pio);
- atomic_set(&wait->sdma_busy, 0);
- atomic_set(&wait->pio_busy, 0);
- wait->tx_limit = tx_limit;
- wait->sleep = sleep;
- wait->wakeup = wakeup;
- wait->sdma_drained = sdma_drained;
-}
+void iowait_init(struct iowait *wait, u32 tx_limit,
+ void (*func)(struct work_struct *work),
+ void (*tidfunc)(struct work_struct *work),
+ int (*sleep)(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *tx,
+ uint seq,
+ bool pkts_sent),
+ void (*wakeup)(struct iowait *wait, int reason),
+ void (*sdma_drained)(struct iowait *wait));
/**
- * iowait_schedule() - initialize wait structure
+ * iowait_schedule() - schedule the default send engine work
* @wait: wait struct to schedule
* @wq: workqueue for schedule
* @cpu: cpu
*/
-static inline void iowait_schedule(
- struct iowait *wait,
- struct workqueue_struct *wq,
- int cpu)
+static inline bool iowait_schedule(struct iowait *wait,
+ struct workqueue_struct *wq, int cpu)
{
- queue_work_on(cpu, wq, &wait->iowork);
+ return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_IB_SE].iowork);
}
/**
@@ -228,6 +233,8 @@ static inline void iowait_sdma_add(struct iowait *wait, int count)
*/
static inline int iowait_sdma_dec(struct iowait *wait)
{
+ if (!wait)
+ return 0;
return atomic_dec_and_test(&wait->sdma_busy);
}
@@ -267,11 +274,13 @@ static inline void iowait_pio_inc(struct iowait *wait)
}
/**
- * iowait_sdma_dec - note pio complete
+ * iowait_pio_dec - note pio complete
* @wait: iowait structure
*/
static inline int iowait_pio_dec(struct iowait *wait)
{
+ if (!wait)
+ return 0;
return atomic_dec_and_test(&wait->pio_busy);
}
@@ -293,9 +302,9 @@ static inline void iowait_drain_wakeup(struct iowait *wait)
/**
* iowait_get_txhead() - get packet off of iowait list
*
- * @wait wait struture
+ * @wait iowait_work struture
*/
-static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait)
+static inline struct sdma_txreq *iowait_get_txhead(struct iowait_work *wait)
{
struct sdma_txreq *tx = NULL;
@@ -309,6 +318,28 @@ static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait)
return tx;
}
+static inline u16 iowait_get_desc(struct iowait_work *w)
+{
+ u16 num_desc = 0;
+ struct sdma_txreq *tx = NULL;
+
+ if (!list_empty(&w->tx_head)) {
+ tx = list_first_entry(&w->tx_head, struct sdma_txreq,
+ list);
+ num_desc = tx->num_desc;
+ }
+ return num_desc;
+}
+
+static inline u32 iowait_get_all_desc(struct iowait *w)
+{
+ u32 num_desc = 0;
+
+ num_desc = iowait_get_desc(&w->wait[IOWAIT_IB_SE]);
+ num_desc += iowait_get_desc(&w->wait[IOWAIT_TID_SE]);
+ return num_desc;
+}
+
/**
* iowait_queue - Put the iowait on a wait queue
* @pkts_sent: have some packets been sent before queuing?
@@ -372,12 +403,57 @@ static inline void iowait_starve_find_max(struct iowait *w, u8 *max,
}
/**
- * iowait_packet_queued() - determine if a packet is already built
- * @wait: the wait structure
+ * iowait_packet_queued() - determine if a packet is queued
+ * @wait: the iowait_work structure
*/
-static inline bool iowait_packet_queued(struct iowait *wait)
+static inline bool iowait_packet_queued(struct iowait_work *wait)
{
return !list_empty(&wait->tx_head);
}
+/**
+ * inc_wait_count - increment wait counts
+ * @w: the log work struct
+ * @n: the count
+ */
+static inline void iowait_inc_wait_count(struct iowait_work *w, u16 n)
+{
+ if (!w)
+ return;
+ w->iow->tx_count++;
+ w->iow->count += n;
+}
+
+/**
+ * iowait_get_tid_work - return iowait_work for tid SE
+ * @w: the iowait struct
+ */
+static inline struct iowait_work *iowait_get_tid_work(struct iowait *w)
+{
+ return &w->wait[IOWAIT_TID_SE];
+}
+
+/**
+ * iowait_get_ib_work - return iowait_work for ib SE
+ * @w: the iowait struct
+ */
+static inline struct iowait_work *iowait_get_ib_work(struct iowait *w)
+{
+ return &w->wait[IOWAIT_IB_SE];
+}
+
+/**
+ * iowait_ioww_to_iow - return iowait given iowait_work
+ * @w: the iowait_work struct
+ */
+static inline struct iowait *iowait_ioww_to_iow(struct iowait_work *w)
+{
+ if (likely(w))
+ return w->iow;
+ return NULL;
+}
+
+void iowait_cancel_work(struct iowait *w);
+int iowait_set_work_flag(struct iowait_work *w);
+
#endif
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 0307405491e0..88a0cf930136 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -4836,7 +4836,7 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
int ret;
int pkey_idx;
int local_mad = 0;
- u32 resp_len = 0;
+ u32 resp_len = in_wc->byte_len - sizeof(*in_grh);
struct hfi1_ibport *ibp = to_iport(ibdev, port);
pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
new file mode 100644
index 000000000000..d920b165d696
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "hfi.h"
+#include "affinity.h"
+#include "sdma.h"
+
+/**
+ * msix_initialize() - Calculate, request and configure MSIx IRQs
+ * @dd: valid hfi1 devdata
+ *
+ */
+int msix_initialize(struct hfi1_devdata *dd)
+{
+ u32 total;
+ int ret;
+ struct hfi1_msix_entry *entries;
+
+ /*
+ * MSIx interrupt count:
+ * one for the general, "slow path" interrupt
+ * one per used SDMA engine
+ * one per kernel receive context
+ * one for each VNIC context
+ * ...any new IRQs should be added here.
+ */
+ total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
+
+ if (total >= CCE_NUM_MSIX_VECTORS)
+ return -EINVAL;
+
+ ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret);
+ return ret;
+ }
+
+ entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries),
+ GFP_KERNEL);
+ if (!entries) {
+ pci_free_irq_vectors(dd->pcidev);
+ return -ENOMEM;
+ }
+
+ dd->msix_info.msix_entries = entries;
+ spin_lock_init(&dd->msix_info.msix_lock);
+ bitmap_zero(dd->msix_info.in_use_msix, total);
+ dd->msix_info.max_requested = total;
+ dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
+
+ return 0;
+}
+
+/**
+ * msix_request_irq() - Allocate a free MSIx IRQ
+ * @dd: valid devdata
+ * @arg: context information for the IRQ
+ * @handler: IRQ handler
+ * @thread: IRQ thread handler (could be NULL)
+ * @idx: zero base idx if multiple devices are needed
+ * @type: affinty IRQ type
+ *
+ * Allocated an MSIx vector if available, and then create the appropriate
+ * meta data needed to keep track of the pci IRQ request.
+ *
+ * Return:
+ * < 0 Error
+ * >= 0 MSIx vector
+ *
+ */
+static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
+ irq_handler_t handler, irq_handler_t thread,
+ u32 idx, enum irq_type type)
+{
+ unsigned long nr;
+ int irq;
+ int ret;
+ const char *err_info;
+ char name[MAX_NAME_SIZE];
+ struct hfi1_msix_entry *me;
+
+ /* Allocate an MSIx vector */
+ spin_lock(&dd->msix_info.msix_lock);
+ nr = find_first_zero_bit(dd->msix_info.in_use_msix,
+ dd->msix_info.max_requested);
+ if (nr < dd->msix_info.max_requested)
+ __set_bit(nr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+
+ if (nr == dd->msix_info.max_requested)
+ return -ENOSPC;
+
+ /* Specific verification and determine the name */
+ switch (type) {
+ case IRQ_GENERAL:
+ /* general interrupt must be MSIx vector 0 */
+ if (nr) {
+ spin_lock(&dd->msix_info.msix_lock);
+ __clear_bit(nr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+ dd_dev_err(dd, "Invalid index %lu for GENERAL IRQ\n",
+ nr);
+ return -EINVAL;
+ }
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
+ err_info = "general";
+ break;
+ case IRQ_SDMA:
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
+ dd->unit, idx);
+ err_info = "sdma";
+ break;
+ case IRQ_RCVCTXT:
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
+ dd->unit, idx);
+ err_info = "receive context";
+ break;
+ case IRQ_OTHER:
+ default:
+ return -EINVAL;
+ }
+ name[sizeof(name) - 1] = 0;
+
+ irq = pci_irq_vector(dd->pcidev, nr);
+ ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: request for IRQ %d failed, MSIx %d, err %d\n",
+ err_info, irq, idx, ret);
+ spin_lock(&dd->msix_info.msix_lock);
+ __clear_bit(nr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+ return ret;
+ }
+
+ /*
+ * assign arg after pci_request_irq call, so it will be
+ * cleaned up
+ */
+ me = &dd->msix_info.msix_entries[nr];
+ me->irq = irq;
+ me->arg = arg;
+ me->type = type;
+
+ /* This is a request, so a failure is not fatal */
+ ret = hfi1_get_irq_affinity(dd, me);
+ if (ret)
+ dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
+
+ return nr;
+}
+
+/**
+ * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
+ * @rcd: valid rcd context
+ *
+ */
+int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
+{
+ int nr;
+
+ nr = msix_request_irq(rcd->dd, rcd, receive_context_interrupt,
+ receive_context_thread, rcd->ctxt, IRQ_RCVCTXT);
+ if (nr < 0)
+ return nr;
+
+ /*
+ * Set the interrupt register and mask for this
+ * context's interrupt.
+ */
+ rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64;
+ rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64);
+ rcd->msix_intr = nr;
+ remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr);
+
+ return 0;
+}
+
+/**
+ * msix_request_smda_ira() - Helper for getting SDMA IRQ resources
+ * @sde: valid sdma engine
+ *
+ */
+int msix_request_sdma_irq(struct sdma_engine *sde)
+{
+ int nr;
+
+ nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
+ sde->this_idx, IRQ_SDMA);
+ if (nr < 0)
+ return nr;
+ sde->msix_intr = nr;
+ remap_sdma_interrupts(sde->dd, sde->this_idx, nr);
+
+ return 0;
+}
+
+/**
+ * enable_sdma_src() - Helper to enable SDMA IRQ srcs
+ * @dd: valid devdata structure
+ * @i: index of SDMA engine
+ */
+static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
+{
+ set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true);
+ set_intr_bits(dd, IS_SDMA_PROGRESS_START + i,
+ IS_SDMA_PROGRESS_START + i, true);
+ set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true);
+ set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i,
+ true);
+}
+
+/**
+ * msix_request_irqs() - Allocate all MSIx IRQs
+ * @dd: valid devdata structure
+ *
+ * Helper function to request the used MSIx IRQs.
+ *
+ */
+int msix_request_irqs(struct hfi1_devdata *dd)
+{
+ int i;
+ int ret;
+
+ ret = msix_request_irq(dd, dd, general_interrupt, NULL, 0, IRQ_GENERAL);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dd->num_sdma; i++) {
+ struct sdma_engine *sde = &dd->per_sdma[i];
+
+ ret = msix_request_sdma_irq(sde);
+ if (ret)
+ return ret;
+ enable_sdma_srcs(sde->dd, i);
+ }
+
+ for (i = 0; i < dd->n_krcv_queues; i++) {
+ struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i);
+
+ if (rcd)
+ ret = msix_request_rcd_irq(rcd);
+ hfi1_rcd_put(rcd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * msix_free_irq() - Free the specified MSIx resources and IRQ
+ * @dd: valid devdata
+ * @msix_intr: MSIx vector to free.
+ *
+ */
+void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
+{
+ struct hfi1_msix_entry *me;
+
+ if (msix_intr >= dd->msix_info.max_requested)
+ return;
+
+ me = &dd->msix_info.msix_entries[msix_intr];
+
+ if (!me->arg) /* => no irq, no affinity */
+ return;
+
+ hfi1_put_irq_affinity(dd, me);
+ pci_free_irq(dd->pcidev, msix_intr, me->arg);
+
+ me->arg = NULL;
+
+ spin_lock(&dd->msix_info.msix_lock);
+ __clear_bit(msix_intr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+}
+
+/**
+ * hfi1_clean_up_msix_interrupts() - Free all MSIx IRQ resources
+ * @dd: valid device data data structure
+ *
+ * Free the MSIx and associated PCI resources, if they have been allocated.
+ */
+void msix_clean_up_interrupts(struct hfi1_devdata *dd)
+{
+ int i;
+ struct hfi1_msix_entry *me = dd->msix_info.msix_entries;
+
+ /* remove irqs - must happen before disabling/turning off */
+ for (i = 0; i < dd->msix_info.max_requested; i++, me++)
+ msix_free_irq(dd, i);
+
+ /* clean structures */
+ kfree(dd->msix_info.msix_entries);
+ dd->msix_info.msix_entries = NULL;
+ dd->msix_info.max_requested = 0;
+
+ pci_free_irq_vectors(dd->pcidev);
+}
+
+/**
+ * msix_vnic_syncrhonize_irq() - Vnic IRQ synchronize
+ * @dd: valid devdata
+ */
+void msix_vnic_synchronize_irq(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < dd->vnic.num_ctxt; i++) {
+ struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
+ struct hfi1_msix_entry *me;
+
+ me = &dd->msix_info.msix_entries[rcd->msix_intr];
+
+ synchronize_irq(me->irq);
+ }
+}
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/drivers/infiniband/hw/hfi1/msix.h
index d08805032f01..a514881632a4 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
+++ b/drivers/infiniband/hw/hfi1/msix.h
@@ -1,13 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
- * Initialization code for multi buffer SHA256 algorithm for AVX2
+ * Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2016 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -17,26 +16,21 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@@ -49,21 +43,22 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*/
+#ifndef _HFI1_MSIX_H
+#define _HFI1_MSIX_H
-#include "sha512_mb_mgr.h"
+#include "hfi.h"
-void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
-{
- unsigned int j;
+/* MSIx interface */
+int msix_initialize(struct hfi1_devdata *dd);
+int msix_request_irqs(struct hfi1_devdata *dd);
+void msix_clean_up_interrupts(struct hfi1_devdata *dd);
+int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd);
+int msix_request_sdma_irq(struct sdma_engine *sde);
+void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr);
- /* initially all lanes are unused */
- state->lens[0] = 0xFFFFFFFF00000000;
- state->lens[1] = 0xFFFFFFFF00000001;
- state->lens[2] = 0xFFFFFFFF00000002;
- state->lens[3] = 0xFFFFFFFF00000003;
+/* VNIC interface */
+void msix_vnic_synchronize_irq(struct hfi1_devdata *dd);
- state->unused_lanes = 0xFF03020100;
- for (j = 0; j < 4; j++)
- state->ldata[j].job_in_lane = NULL;
-}
+#endif
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 6c967dde58e7..c96d193bb236 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -61,19 +61,12 @@
*/
/*
- * Code to adjust PCIe capabilities.
- */
-static void tune_pcie_caps(struct hfi1_devdata *);
-
-/*
* Do all the common PCIe setup and initialization.
- * devdata is not yet allocated, and is not allocated until after this
- * routine returns success. Therefore dd_dev_err() can't be used for error
- * printing.
*/
-int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+int hfi1_pcie_init(struct hfi1_devdata *dd)
{
int ret;
+ struct pci_dev *pdev = dd->pcidev;
ret = pci_enable_device(pdev);
if (ret) {
@@ -89,15 +82,13 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
* about that, it appears. If the original BAR was retained
* in the kernel data structures, this may be OK.
*/
- hfi1_early_err(&pdev->dev, "pci enable failed: error %d\n",
- -ret);
- goto done;
+ dd_dev_err(dd, "pci enable failed: error %d\n", -ret);
+ return ret;
}
ret = pci_request_regions(pdev, DRIVER_NAME);
if (ret) {
- hfi1_early_err(&pdev->dev,
- "pci_request_regions fails: err %d\n", -ret);
+ dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret);
goto bail;
}
@@ -110,8 +101,7 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- hfi1_early_err(&pdev->dev,
- "Unable to set DMA mask: %d\n", ret);
+ dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret);
goto bail;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -119,18 +109,16 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (ret) {
- hfi1_early_err(&pdev->dev,
- "Unable to set DMA consistent mask: %d\n", ret);
+ dd_dev_err(dd, "Unable to set DMA consistent mask: %d\n", ret);
goto bail;
}
pci_set_master(pdev);
(void)pci_enable_pcie_error_reporting(pdev);
- goto done;
+ return 0;
bail:
hfi1_pcie_cleanup(pdev);
-done:
return ret;
}
@@ -206,7 +194,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
dd_dev_err(dd, "WC mapping of send buffers failed\n");
goto nomem;
}
- dd_dev_info(dd, "WC piobase: %p\n for %x", dd->piobase, TXE_PIO_SIZE);
+ dd_dev_info(dd, "WC piobase: %p for %x\n", dd->piobase, TXE_PIO_SIZE);
dd->physaddr = addr; /* used for io_remap, etc. */
@@ -344,26 +332,6 @@ int pcie_speeds(struct hfi1_devdata *dd)
return 0;
}
-/*
- * Returns:
- * - actual number of interrupts allocated or
- * - error
- */
-int request_msix(struct hfi1_devdata *dd, u32 msireq)
-{
- int nvec;
-
- nvec = pci_alloc_irq_vectors(dd->pcidev, msireq, msireq, PCI_IRQ_MSIX);
- if (nvec < 0) {
- dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", nvec);
- return nvec;
- }
-
- tune_pcie_caps(dd);
-
- return nvec;
-}
-
/* restore command and BARs after a reset has wiped them out */
int restore_pci_variables(struct hfi1_devdata *dd)
{
@@ -479,14 +447,19 @@ error:
* Check and optionally adjust them to maximize our throughput.
*/
static int hfi1_pcie_caps;
-module_param_named(pcie_caps, hfi1_pcie_caps, int, S_IRUGO);
+module_param_named(pcie_caps, hfi1_pcie_caps, int, 0444);
MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
uint aspm_mode = ASPM_MODE_DISABLED;
-module_param_named(aspm, aspm_mode, uint, S_IRUGO);
+module_param_named(aspm, aspm_mode, uint, 0444);
MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
-static void tune_pcie_caps(struct hfi1_devdata *dd)
+/**
+ * tune_pcie_caps() - Code to adjust PCIe capabilities.
+ * @dd: Valid device data structure
+ *
+ */
+void tune_pcie_caps(struct hfi1_devdata *dd)
{
struct pci_dev *parent;
u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
@@ -650,7 +623,6 @@ pci_resume(struct pci_dev *pdev)
struct hfi1_devdata *dd = pci_get_drvdata(pdev);
dd_dev_info(dd, "HFI1 resume function called\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
/*
* Running jobs will fail, since it's asynchronous
* unlike sysfs-requested reset. Better than
@@ -1029,6 +1001,7 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
const u8 (*ctle_tunings)[4];
uint static_ctle_mode;
int return_error = 0;
+ u32 target_width;
/* PCIe Gen3 is for the ASIC only */
if (dd->icode != ICODE_RTL_SILICON)
@@ -1068,6 +1041,9 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
return 0;
}
+ /* Previous Gen1/Gen2 bus width */
+ target_width = dd->lbus_width;
+
/*
* Do the Gen3 transition. Steps are those of the PCIe Gen3
* recipe.
@@ -1436,11 +1412,12 @@ retry:
dd_dev_info(dd, "%s: new speed and width: %s\n", __func__,
dd->lbus_info);
- if (dd->lbus_speed != target_speed) { /* not target */
+ if (dd->lbus_speed != target_speed ||
+ dd->lbus_width < target_width) { /* not target */
/* maybe retry */
do_retry = retry_count < pcie_retry;
- dd_dev_err(dd, "PCIe link speed did not switch to Gen%d%s\n",
- pcie_target, do_retry ? ", retrying" : "");
+ dd_dev_err(dd, "PCIe link speed or width did not match target%s\n",
+ do_retry ? ", retrying" : "");
retry_count++;
if (do_retry) {
msleep(100); /* allow time to settle */
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index c2c1cba5b23b..9ab50d2308dc 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -71,14 +71,6 @@ void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
}
}
-/* defined in header release 48 and higher */
-#ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT
-#define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
-#define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull
-#define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
- << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
-#endif
-
/* global control of PIO send */
void pio_send_control(struct hfi1_devdata *dd, int op)
{
@@ -86,6 +78,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
unsigned long flags;
int write = 1; /* write sendctrl back */
int flush = 0; /* re-read sendctrl to make sure it is flushed */
+ int i;
spin_lock_irqsave(&dd->sendctrl_lock, flags);
@@ -95,9 +88,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
reg |= SEND_CTRL_SEND_ENABLE_SMASK;
/* Fall through */
case PSC_DATA_VL_ENABLE:
+ mask = 0;
+ for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
+ if (!dd->vld[i].mtu)
+ mask |= BIT_ULL(i);
/* Disallow sending on VLs not enabled */
- mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
- SEND_CTRL_UNSUPPORTED_VL_SHIFT;
+ mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
+ SEND_CTRL_UNSUPPORTED_VL_SHIFT;
reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
break;
case PSC_GLOBAL_DISABLE:
@@ -921,20 +918,18 @@ void sc_free(struct send_context *sc)
void sc_disable(struct send_context *sc)
{
u64 reg;
- unsigned long flags;
struct pio_buf *pbuf;
if (!sc)
return;
/* do all steps, even if already disabled */
- spin_lock_irqsave(&sc->alloc_lock, flags);
+ spin_lock_irq(&sc->alloc_lock);
reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
sc->flags &= ~SCF_ENABLED;
sc_wait_for_packet_egress(sc, 1);
write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
- spin_unlock_irqrestore(&sc->alloc_lock, flags);
/*
* Flush any waiters. Once the context is disabled,
@@ -944,7 +939,7 @@ void sc_disable(struct send_context *sc)
* proceed with the flush.
*/
udelay(1);
- spin_lock_irqsave(&sc->release_lock, flags);
+ spin_lock(&sc->release_lock);
if (sc->sr) { /* this context has a shadow ring */
while (sc->sr_tail != sc->sr_head) {
pbuf = &sc->sr[sc->sr_tail].pbuf;
@@ -955,7 +950,8 @@ void sc_disable(struct send_context *sc)
sc->sr_tail = 0;
}
}
- spin_unlock_irqrestore(&sc->release_lock, flags);
+ spin_unlock(&sc->release_lock);
+ spin_unlock_irq(&sc->alloc_lock);
}
/* return SendEgressCtxtStatus.PacketOccupancy */
@@ -1178,11 +1174,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
sc = dd->send_contexts[i].sc;
if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
continue;
+ if (sc->flags & SCF_LINK_DOWN)
+ continue;
sc_enable(sc); /* will clear the sc frozen flag */
}
}
+/**
+ * pio_kernel_linkup() - Re-enable send contexts after linkup event
+ * @dd: valid devive data
+ *
+ * When the link goes down, the freeze path is taken. However, a link down
+ * event is different from a freeze because if the send context is re-enabled
+ * whowever is sending data will start sending data again, which will hang
+ * any QP that is sending data.
+ *
+ * The freeze path now looks at the type of event that occurs and takes this
+ * path for link down event.
+ */
+void pio_kernel_linkup(struct hfi1_devdata *dd)
+{
+ struct send_context *sc;
+ int i;
+
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ sc = dd->send_contexts[i].sc;
+ if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
+ continue;
+
+ sc_enable(sc); /* will clear the sc link down flag */
+ }
+}
+
/*
* Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
* Returns:
@@ -1382,11 +1406,10 @@ void sc_stop(struct send_context *sc, int flag)
{
unsigned long flags;
- /* mark the context */
- sc->flags |= flag;
-
/* stop buffer allocations */
spin_lock_irqsave(&sc->alloc_lock, flags);
+ /* mark the context */
+ sc->flags |= flag;
sc->flags &= ~SCF_ENABLED;
spin_unlock_irqrestore(&sc->alloc_lock, flags);
wake_up(&sc->halt_wait);
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index 058b08f459ab..aaf372c3e5d6 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -139,6 +139,7 @@ struct send_context {
#define SCF_IN_FREE 0x02
#define SCF_HALTED 0x04
#define SCF_FROZEN 0x08
+#define SCF_LINK_DOWN 0x10
struct send_context_info {
struct send_context *sc; /* allocated working context */
@@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc);
void pio_reset_all(struct hfi1_devdata *dd);
void pio_freeze(struct hfi1_devdata *dd);
void pio_kernel_unfreeze(struct hfi1_devdata *dd);
+void pio_kernel_linkup(struct hfi1_devdata *dd);
/* global PIO send control operations */
#define PSC_GLOBAL_ENABLE 0
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 9b1e84a6b1cc..6f3bc4dab858 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -66,7 +66,7 @@ MODULE_PARM_DESC(qp_table_size, "QP table size");
static void flush_tx_list(struct rvt_qp *qp);
static int iowait_sleep(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *stx,
unsigned int seq,
bool pkts_sent);
@@ -134,15 +134,13 @@ const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
};
-static void flush_tx_list(struct rvt_qp *qp)
+static void flush_list_head(struct list_head *l)
{
- struct hfi1_qp_priv *priv = qp->priv;
-
- while (!list_empty(&priv->s_iowait.tx_head)) {
+ while (!list_empty(l)) {
struct sdma_txreq *tx;
tx = list_first_entry(
- &priv->s_iowait.tx_head,
+ l,
struct sdma_txreq,
list);
list_del_init(&tx->list);
@@ -151,6 +149,14 @@ static void flush_tx_list(struct rvt_qp *qp)
}
}
+static void flush_tx_list(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ flush_list_head(&iowait_get_ib_work(&priv->s_iowait)->tx_head);
+ flush_list_head(&iowait_get_tid_work(&priv->s_iowait)->tx_head);
+}
+
static void flush_iowait(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
@@ -282,33 +288,46 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
}
/**
- * hfi1_check_send_wqe - validate wqe
+ * hfi1_setup_wqe - set up the wqe
* @qp - The qp
* @wqe - The built wqe
+ * @call_send - Determine if the send should be posted or scheduled.
*
- * validate wqe. This is called
- * prior to inserting the wqe into
- * the ring but after the wqe has been
- * setup.
+ * Perform setup of the wqe. This is called
+ * prior to inserting the wqe into the ring but after
+ * the wqe has been setup by RDMAVT. This function
+ * allows the driver the opportunity to perform
+ * validation and additional setup of the wqe.
*
* Returns 0 on success, -EINVAL on failure
*
*/
-int hfi1_check_send_wqe(struct rvt_qp *qp,
- struct rvt_swqe *wqe)
+int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct rvt_ah *ah;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
case IB_QPT_UC:
if (wqe->length > 0x80000000U)
return -EINVAL;
+ if (wqe->length > qp->pmtu)
+ *call_send = false;
break;
case IB_QPT_SMI:
- ah = ibah_to_rvtah(wqe->ud_wr.ah);
- if (wqe->length > (1 << ah->log_pmtu))
+ /*
+ * SM packets should exclusively use VL15 and their SL is
+ * ignored (IBTA v1.3, Section 3.5.8.2). Therefore, when ah
+ * is created, SL is 0 in most cases and as a result some
+ * fields (vl and pmtu) in ah may not be set correctly,
+ * depending on the SL2SC and SC2VL tables at the time.
+ */
+ ppd = ppd_from_ibp(ibp);
+ dd = dd_from_ppd(ppd);
+ if (wqe->length > dd->vld[15].mtu)
return -EINVAL;
break;
case IB_QPT_GSI:
@@ -321,7 +340,7 @@ int hfi1_check_send_wqe(struct rvt_qp *qp,
default:
break;
}
- return wqe->length <= piothreshold;
+ return 0;
}
/**
@@ -333,7 +352,7 @@ int hfi1_check_send_wqe(struct rvt_qp *qp,
* It is only used in the post send, which doesn't hold
* the s_lock.
*/
-void _hfi1_schedule_send(struct rvt_qp *qp)
+bool _hfi1_schedule_send(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibport *ibp =
@@ -341,10 +360,10 @@ void _hfi1_schedule_send(struct rvt_qp *qp)
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
- priv->s_sde ?
- priv->s_sde->cpu :
- cpumask_first(cpumask_of_node(dd->node)));
+ return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
+ priv->s_sde ?
+ priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(dd->node)));
}
static void qp_pio_drain(struct rvt_qp *qp)
@@ -372,12 +391,32 @@ static void qp_pio_drain(struct rvt_qp *qp)
*
* This schedules qp progress and caller should hold
* the s_lock.
+ * @return true if the first leg is scheduled;
+ * false if the first leg is not scheduled.
*/
-void hfi1_schedule_send(struct rvt_qp *qp)
+bool hfi1_schedule_send(struct rvt_qp *qp)
{
lockdep_assert_held(&qp->s_lock);
- if (hfi1_send_ok(qp))
+ if (hfi1_send_ok(qp)) {
_hfi1_schedule_send(qp);
+ return true;
+ }
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
+ iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
+ IOWAIT_PENDING_IB);
+ return false;
+}
+
+static void hfi1_qp_schedule(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ bool ret;
+
+ if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) {
+ ret = hfi1_schedule_send(qp);
+ if (ret)
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+ }
}
void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
@@ -388,16 +427,22 @@ void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
if (qp->s_flags & flag) {
qp->s_flags &= ~flag;
trace_hfi1_qpwakeup(qp, flag);
- hfi1_schedule_send(qp);
+ hfi1_qp_schedule(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
/* Notify hfi1_destroy_qp() if it is waiting. */
rvt_put_qp(qp);
}
+void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait)
+{
+ if (iowait_set_work_flag(wait) == IOWAIT_IB_SE)
+ qp->s_flags &= ~RVT_S_BUSY;
+}
+
static int iowait_sleep(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *stx,
uint seq,
bool pkts_sent)
@@ -438,7 +483,7 @@ static int iowait_sleep(
rvt_get_qp(qp);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_qp_unbusy(qp, wait);
spin_unlock_irqrestore(&qp->s_lock, flags);
ret = -EBUSY;
} else {
@@ -637,6 +682,7 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
&priv->s_iowait,
1,
_hfi1_do_send,
+ NULL,
iowait_sleep,
iowait_wakeup,
iowait_sdma_drained);
@@ -686,7 +732,7 @@ void stop_send_queue(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
- cancel_work_sync(&priv->s_iowait.iowork);
+ iowait_cancel_work(&priv->s_iowait);
}
void quiesce_qp(struct rvt_qp *qp)
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index 078cff7560b6..7adb6dff6813 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -58,18 +58,6 @@ extern unsigned int hfi1_qp_table_size;
extern const struct rvt_operation_params hfi1_post_parms[];
/*
- * Send if not busy or waiting for I/O and either
- * a RC response is pending or we can process send work requests.
- */
-static inline int hfi1_send_ok(struct rvt_qp *qp)
-{
- return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
- (verbs_txreq_queued(qp) ||
- (qp->s_flags & RVT_S_RESP_PENDING) ||
- !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
-}
-
-/*
* Driver specific s_flags starting at bit 31 down to HFI1_S_MIN_BIT_MASK
*
* HFI1_S_AHG_VALID - ahg header valid on chip
@@ -90,6 +78,20 @@ static inline int hfi1_send_ok(struct rvt_qp *qp)
#define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
/*
+ * Send if not busy or waiting for I/O and either
+ * a RC response is pending or we can process send work requests.
+ */
+static inline int hfi1_send_ok(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ return !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)) &&
+ (verbs_txreq_queued(iowait_get_ib_work(&priv->s_iowait)) ||
+ (qp->s_flags & RVT_S_RESP_PENDING) ||
+ !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
+}
+
+/*
* free_ahg - clear ahg from QP
*/
static inline void clear_ahg(struct rvt_qp *qp)
@@ -129,8 +131,8 @@ struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5);
void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter);
-void _hfi1_schedule_send(struct rvt_qp *qp);
-void hfi1_schedule_send(struct rvt_qp *qp);
+bool _hfi1_schedule_send(struct rvt_qp *qp);
+bool hfi1_schedule_send(struct rvt_qp *qp);
void hfi1_migrate_qp(struct rvt_qp *qp);
@@ -150,4 +152,5 @@ void quiesce_qp(struct rvt_qp *qp);
u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
int mtu_to_path_mtu(u32 mtu);
void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl);
+void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait);
#endif /* _QP_H */
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 9bd63abb2dfe..188aa4f686a0 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -309,7 +309,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
clear_ahg(qp);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
+ rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */
goto done_free_tx;
@@ -378,9 +378,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
wqe->wr.ex.invalidate_rkey);
local_ops = 1;
}
- hfi1_send_complete(qp, wqe,
- err ? IB_WC_LOC_PROT_ERR
- : IB_WC_SUCCESS);
+ rvt_send_complete(qp, wqe,
+ err ? IB_WC_LOC_PROT_ERR
+ : IB_WC_SUCCESS);
if (local_ops)
atomic_dec(&qp->local_ops_pending);
goto done_free_tx;
@@ -1043,7 +1043,7 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
hfi1_migrate_qp(qp);
qp->s_retry = qp->s_retry_cnt;
} else if (qp->s_last == qp->s_acked) {
- hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
return;
} else { /* need to handle delayed completion */
@@ -1468,7 +1468,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
ibp->rvp.n_other_naks++;
class_b:
if (qp->s_last == qp->s_acked) {
- hfi1_send_complete(qp, wqe, status);
+ rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
break;
@@ -1644,7 +1644,8 @@ read_middle:
qp->s_rdma_read_len -= pmtu;
update_last_psn(qp, psn);
spin_unlock_irqrestore(&qp->s_lock, flags);
- hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, false, false);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, pmtu, false, false);
goto bail;
case OP(RDMA_READ_RESPONSE_ONLY):
@@ -1684,7 +1685,8 @@ read_last:
if (unlikely(tlen != qp->s_rdma_read_len))
goto ack_len_err;
aeth = be32_to_cpu(ohdr->u.aeth);
- hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, false, false);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, tlen, false, false);
WARN_ON(qp->s_rdma_read_sge.num_sge);
(void)do_rc_ack(qp, aeth, psn,
OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
@@ -1704,7 +1706,7 @@ ack_len_err:
status = IB_WC_LOC_LEN_ERR;
ack_err:
if (qp->s_last == qp->s_acked) {
- hfi1_send_complete(qp, wqe, status);
+ rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
ack_done:
@@ -2144,7 +2146,7 @@ send_middle:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto nack_inv;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -2200,7 +2202,7 @@ send_last:
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv;
- hfi1_copy_sge(&qp->r_sge, data, tlen, true, copy_last);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last);
rvt_put_ss(&qp->r_sge);
qp->r_msn++;
if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 5f56f3c1b4c4..7fb317c711df 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -156,333 +156,6 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
}
/**
- * ruc_loopback - handle UC and RC loopback requests
- * @sqp: the sending QP
- *
- * This is called from hfi1_do_send() to
- * forward a WQE addressed to the same HFI.
- * Note that although we are single threaded due to the send engine, we still
- * have to protect against post_send(). We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
-static void ruc_loopback(struct rvt_qp *sqp)
-{
- struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct rvt_qp *qp;
- struct rvt_swqe *wqe;
- struct rvt_sge *sge;
- unsigned long flags;
- struct ib_wc wc;
- u64 sdata;
- atomic64_t *maddr;
- enum ib_wc_status send_status;
- bool release;
- int ret;
- bool copy_last = false;
- int local_ops = 0;
-
- rcu_read_lock();
-
- /*
- * Note that we check the responder QP state after
- * checking the requester's state.
- */
- qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
- sqp->remote_qpn);
-
- spin_lock_irqsave(&sqp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT)) ||
- !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- goto unlock;
-
- sqp->s_flags |= RVT_S_BUSY;
-
-again:
- if (sqp->s_last == READ_ONCE(sqp->s_head))
- goto clr_busy;
- wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
-
- /* Return if it is not OK to start a new work request. */
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
- goto clr_busy;
- /* We are in the error state, flush the work request. */
- send_status = IB_WC_WR_FLUSH_ERR;
- goto flush_send;
- }
-
- /*
- * We can rely on the entry not changing without the s_lock
- * being held until we update s_last.
- * We increment s_cur to indicate s_last is in progress.
- */
- if (sqp->s_last == sqp->s_cur) {
- if (++sqp->s_cur >= sqp->s_size)
- sqp->s_cur = 0;
- }
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-
- if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
- qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- ibp->rvp.n_pkt_drops++;
- /*
- * For RC, the requester would timeout and retry so
- * shortcut the timeouts and just signal too many retries.
- */
- if (sqp->ibqp.qp_type == IB_QPT_RC)
- send_status = IB_WC_RETRY_EXC_ERR;
- else
- send_status = IB_WC_SUCCESS;
- goto serr;
- }
-
- memset(&wc, 0, sizeof(wc));
- send_status = IB_WC_SUCCESS;
-
- release = true;
- sqp->s_sge.sge = wqe->sg_list[0];
- sqp->s_sge.sg_list = wqe->sg_list + 1;
- sqp->s_sge.num_sge = wqe->wr.num_sge;
- sqp->s_len = wqe->length;
- switch (wqe->wr.opcode) {
- case IB_WR_REG_MR:
- goto send_comp;
-
- case IB_WR_LOCAL_INV:
- if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
- if (rvt_invalidate_rkey(sqp,
- wqe->wr.ex.invalidate_rkey))
- send_status = IB_WC_LOC_PROT_ERR;
- local_ops = 1;
- }
- goto send_comp;
-
- case IB_WR_SEND_WITH_INV:
- if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
- wc.wc_flags = IB_WC_WITH_INVALIDATE;
- wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
- }
- goto send;
-
- case IB_WR_SEND_WITH_IMM:
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- /* FALLTHROUGH */
- case IB_WR_SEND:
-send:
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- break;
-
- case IB_WR_RDMA_WRITE_WITH_IMM:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- /* skip copy_last set and qp_access_flags recheck */
- goto do_write;
- case IB_WR_RDMA_WRITE:
- copy_last = rvt_is_user_qp(qp);
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
-do_write:
- if (wqe->length == 0)
- break;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_WRITE)))
- goto acc_err;
- qp->r_sge.sg_list = NULL;
- qp->r_sge.num_sge = 1;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_RDMA_READ:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_READ)))
- goto acc_err;
- release = false;
- sqp->s_sge.sg_list = NULL;
- sqp->s_sge.num_sge = 1;
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->wr.num_sge;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- wqe->atomic_wr.remote_addr,
- wqe->atomic_wr.rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto acc_err;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
- sdata = wqe->atomic_wr.compare_add;
- *(u64 *)sqp->s_sge.sge.vaddr =
- (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
- (u64)atomic64_add_return(sdata, maddr) - sdata :
- (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
- sdata, wqe->atomic_wr.swap);
- rvt_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- goto send_comp;
-
- default:
- send_status = IB_WC_LOC_QP_OP_ERR;
- goto serr;
- }
-
- sge = &sqp->s_sge.sge;
- while (sqp->s_len) {
- u32 len = sqp->s_len;
-
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- WARN_ON_ONCE(len == 0);
- hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (!release)
- rvt_put_mr(sge->mr);
- if (--sqp->s_sge.num_sge)
- *sge = *sqp->s_sge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- sqp->s_len -= len;
- }
- if (release)
- rvt_put_ss(&qp->r_sge);
-
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- goto send_comp;
-
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
- wc.port_num = 1;
- /* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
-
-send_comp:
- spin_lock_irqsave(&sqp->s_lock, flags);
- ibp->rvp.n_loop_pkts++;
-flush_send:
- sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
- hfi1_send_complete(sqp, wqe, send_status);
- if (local_ops) {
- atomic_dec(&sqp->local_ops_pending);
- local_ops = 0;
- }
- goto again;
-
-rnr_nak:
- /* Handle RNR NAK */
- if (qp->ibqp.qp_type == IB_QPT_UC)
- goto send_comp;
- ibp->rvp.n_rnr_naks++;
- /*
- * Note: we don't need the s_lock held since the BUSY flag
- * makes this single threaded.
- */
- if (sqp->s_rnr_retry == 0) {
- send_status = IB_WC_RNR_RETRY_EXC_ERR;
- goto serr;
- }
- if (sqp->s_rnr_retry_cnt < 7)
- sqp->s_rnr_retry--;
- spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
- goto clr_busy;
- rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
- IB_AETH_CREDIT_SHIFT);
- goto clr_busy;
-
-op_err:
- send_status = IB_WC_REM_OP_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-inv_err:
- send_status = IB_WC_REM_INV_REQ_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-acc_err:
- send_status = IB_WC_REM_ACCESS_ERR;
- wc.status = IB_WC_LOC_PROT_ERR;
-err:
- /* responder goes to error state */
- rvt_rc_error(qp, wc.status);
-
-serr:
- spin_lock_irqsave(&sqp->s_lock, flags);
- hfi1_send_complete(sqp, wqe, send_status);
- if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
-
- sqp->s_flags &= ~RVT_S_BUSY;
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = sqp->ibqp.device;
- ev.element.qp = &sqp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
- }
- goto done;
- }
-clr_busy:
- sqp->s_flags &= ~RVT_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-done:
- rcu_read_unlock();
-}
-
-/**
* hfi1_make_grh - construct a GRH header
* @ibp: a pointer to the IB port
* @hdr: a pointer to the GRH header being constructed
@@ -825,8 +498,8 @@ void hfi1_do_send_from_rvt(struct rvt_qp *qp)
void _hfi1_do_send(struct work_struct *work)
{
- struct iowait *wait = container_of(work, struct iowait, iowork);
- struct rvt_qp *qp = iowait_to_qp(wait);
+ struct iowait_work *w = container_of(work, struct iowait_work, iowork);
+ struct rvt_qp *qp = iowait_to_qp(w->iow);
hfi1_do_send(qp, true);
}
@@ -850,6 +523,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
ps.ppd = ppd_from_ibp(ps.ibp);
ps.in_thread = in_thread;
+ ps.wait = iowait_get_ib_work(&priv->s_iowait);
trace_hfi1_rc_do_send(qp, in_thread);
@@ -858,7 +532,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
~((1 << ps.ppd->lmc) - 1)) ==
ps.ppd->lid)) {
- ruc_loopback(qp);
+ rvt_ruc_loopback(qp);
return;
}
make_req = hfi1_make_rc_req;
@@ -868,7 +542,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
~((1 << ps.ppd->lmc) - 1)) ==
ps.ppd->lid)) {
- ruc_loopback(qp);
+ rvt_ruc_loopback(qp);
return;
}
make_req = hfi1_make_uc_req;
@@ -883,6 +557,8 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
/* Return if we are already busy processing a work request. */
if (!hfi1_send_ok(qp)) {
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
spin_unlock_irqrestore(&qp->s_lock, ps.flags);
return;
}
@@ -896,7 +572,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
ps.pkts_sent = false;
/* insure a pre-built packet is handled */
- ps.s_txreq = get_waiting_verbs_txreq(qp);
+ ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
do {
/* Check for a constructed packet to be sent. */
if (ps.s_txreq) {
@@ -907,6 +583,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
*/
if (hfi1_verbs_send(qp, &ps))
return;
+
/* allow other tasks to run */
if (schedule_send_yield(qp, &ps))
return;
@@ -917,44 +594,3 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
spin_unlock_irqrestore(&qp->s_lock, ps.flags);
}
-
-/*
- * This should be called with s_lock held.
- */
-void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status)
-{
- u32 old_last, last;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- return;
-
- last = qp->s_last;
- old_last = last;
- trace_hfi1_qp_send_completion(qp, wqe, last);
- if (++last >= qp->s_size)
- last = 0;
- trace_hfi1_qp_send_completion(qp, wqe, last);
- qp->s_last = last;
- /* See post_send() */
- barrier();
- rvt_put_swqe(wqe);
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
-
- rvt_qp_swqe_complete(qp,
- wqe,
- ib_hfi1_wc_opcode[wqe->wr.opcode],
- status);
-
- if (qp->s_acked == old_last)
- qp->s_acked = last;
- if (qp->s_cur == old_last)
- qp->s_cur = last;
- if (qp->s_tail == old_last)
- qp->s_tail = last;
- if (qp->state == IB_QPS_SQD && last == qp->s_cur)
- qp->s_draining = 0;
-}
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 88e326d6cc49..891d2386d1ca 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -378,7 +378,7 @@ static inline void complete_tx(struct sdma_engine *sde,
__sdma_txclean(sde->dd, tx);
if (complete)
(*complete)(tx, res);
- if (wait && iowait_sdma_dec(wait))
+ if (iowait_sdma_dec(wait))
iowait_drain_wakeup(wait);
}
@@ -1758,7 +1758,6 @@ static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
struct iowait *wait, *nw;
struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
uint i, n = 0, seq, max_idx = 0;
- struct sdma_txreq *stx;
struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
u8 max_starved_cnt = 0;
@@ -1779,19 +1778,13 @@ static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
nw,
&sde->dmawait,
list) {
- u16 num_desc = 0;
+ u32 num_desc;
if (!wait->wakeup)
continue;
if (n == ARRAY_SIZE(waits))
break;
- if (!list_empty(&wait->tx_head)) {
- stx = list_first_entry(
- &wait->tx_head,
- struct sdma_txreq,
- list);
- num_desc = stx->num_desc;
- }
+ num_desc = iowait_get_all_desc(wait);
if (num_desc > avail)
break;
avail -= num_desc;
@@ -2346,7 +2339,7 @@ static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
*/
static int sdma_check_progress(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *tx,
bool pkts_sent)
{
@@ -2356,12 +2349,12 @@ static int sdma_check_progress(
if (tx->num_desc <= sde->desc_avail)
return -EAGAIN;
/* pulse the head_lock */
- if (wait && wait->sleep) {
+ if (wait && iowait_ioww_to_iow(wait)->sleep) {
unsigned seq;
seq = raw_seqcount_begin(
(const seqcount_t *)&sde->head_lock.seqcount);
- ret = wait->sleep(sde, wait, tx, seq, pkts_sent);
+ ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent);
if (ret == -EAGAIN)
sde->desc_avail = sdma_descq_freecnt(sde);
} else {
@@ -2373,7 +2366,7 @@ static int sdma_check_progress(
/**
* sdma_send_txreq() - submit a tx req to ring
* @sde: sdma engine to use
- * @wait: wait structure to use when full (may be NULL)
+ * @wait: SE wait structure to use when full (may be NULL)
* @tx: sdma_txreq to submit
* @pkts_sent: has any packet been sent yet?
*
@@ -2386,7 +2379,7 @@ static int sdma_check_progress(
* -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
*/
int sdma_send_txreq(struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *tx,
bool pkts_sent)
{
@@ -2397,7 +2390,7 @@ int sdma_send_txreq(struct sdma_engine *sde,
/* user should have supplied entire packet */
if (unlikely(tx->tlen))
return -EINVAL;
- tx->wait = wait;
+ tx->wait = iowait_ioww_to_iow(wait);
spin_lock_irqsave(&sde->tail_lock, flags);
retry:
if (unlikely(!__sdma_running(sde)))
@@ -2406,14 +2399,14 @@ retry:
goto nodesc;
tail = submit_tx(sde, tx);
if (wait)
- iowait_sdma_inc(wait);
+ iowait_sdma_inc(iowait_ioww_to_iow(wait));
sdma_update_tail(sde, tail);
unlock:
spin_unlock_irqrestore(&sde->tail_lock, flags);
return ret;
unlock_noconn:
if (wait)
- iowait_sdma_inc(wait);
+ iowait_sdma_inc(iowait_ioww_to_iow(wait));
tx->next_descq_idx = 0;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
tx->sn = sde->tail_sn++;
@@ -2422,10 +2415,7 @@ unlock_noconn:
spin_lock(&sde->flushlist_lock);
list_add_tail(&tx->list, &sde->flushlist);
spin_unlock(&sde->flushlist_lock);
- if (wait) {
- wait->tx_count++;
- wait->count += tx->num_desc;
- }
+ iowait_inc_wait_count(wait, tx->num_desc);
schedule_work(&sde->flush_worker);
ret = -ECOMM;
goto unlock;
@@ -2442,9 +2432,9 @@ nodesc:
/**
* sdma_send_txlist() - submit a list of tx req to ring
* @sde: sdma engine to use
- * @wait: wait structure to use when full (may be NULL)
+ * @wait: SE wait structure to use when full (may be NULL)
* @tx_list: list of sdma_txreqs to submit
- * @count: pointer to a u32 which, after return will contain the total number of
+ * @count: pointer to a u16 which, after return will contain the total number of
* sdma_txreqs removed from the tx_list. This will include sdma_txreqs
* whose SDMA descriptors are submitted to the ring and the sdma_txreqs
* which are added to SDMA engine flush list if the SDMA engine state is
@@ -2467,8 +2457,8 @@ nodesc:
* -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
* -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
*/
-int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
- struct list_head *tx_list, u32 *count_out)
+int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait,
+ struct list_head *tx_list, u16 *count_out)
{
struct sdma_txreq *tx, *tx_next;
int ret = 0;
@@ -2479,7 +2469,7 @@ int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
spin_lock_irqsave(&sde->tail_lock, flags);
retry:
list_for_each_entry_safe(tx, tx_next, tx_list, list) {
- tx->wait = wait;
+ tx->wait = iowait_ioww_to_iow(wait);
if (unlikely(!__sdma_running(sde)))
goto unlock_noconn;
if (unlikely(tx->num_desc > sde->desc_avail))
@@ -2500,8 +2490,9 @@ retry:
update_tail:
total_count = submit_count + flush_count;
if (wait) {
- iowait_sdma_add(wait, total_count);
- iowait_starve_clear(submit_count > 0, wait);
+ iowait_sdma_add(iowait_ioww_to_iow(wait), total_count);
+ iowait_starve_clear(submit_count > 0,
+ iowait_ioww_to_iow(wait));
}
if (tail != INVALID_TAIL)
sdma_update_tail(sde, tail);
@@ -2511,7 +2502,7 @@ update_tail:
unlock_noconn:
spin_lock(&sde->flushlist_lock);
list_for_each_entry_safe(tx, tx_next, tx_list, list) {
- tx->wait = wait;
+ tx->wait = iowait_ioww_to_iow(wait);
list_del_init(&tx->list);
tx->next_descq_idx = 0;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
@@ -2520,10 +2511,7 @@ unlock_noconn:
#endif
list_add_tail(&tx->list, &sde->flushlist);
flush_count++;
- if (wait) {
- wait->tx_count++;
- wait->count += tx->num_desc;
- }
+ iowait_inc_wait_count(wait, tx->num_desc);
}
spin_unlock(&sde->flushlist_lock);
schedule_work(&sde->flush_worker);
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 46c775f255d1..6dc63d7c5685 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_SDMA_H
#define _HFI1_SDMA_H
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -62,16 +62,6 @@
/* Hardware limit for SDMA packet size */
#define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
-#define SDMA_TXREQ_S_OK 0
-#define SDMA_TXREQ_S_SENDERROR 1
-#define SDMA_TXREQ_S_ABORTED 2
-#define SDMA_TXREQ_S_SHUTDOWN 3
-
-/* flags bits */
-#define SDMA_TXREQ_F_URGENT 0x0001
-#define SDMA_TXREQ_F_AHG_COPY 0x0002
-#define SDMA_TXREQ_F_USE_AHG 0x0004
-
#define SDMA_MAP_NONE 0
#define SDMA_MAP_SINGLE 1
#define SDMA_MAP_PAGE 2
@@ -415,6 +405,7 @@ struct sdma_engine {
struct list_head flushlist;
struct cpumask cpu_mask;
struct kobject kobj;
+ u32 msix_intr;
};
int sdma_init(struct hfi1_devdata *dd, u8 port);
@@ -849,16 +840,16 @@ static inline int sdma_txadd_kvaddr(
dd, SDMA_MAP_SINGLE, tx, addr, len);
}
-struct iowait;
+struct iowait_work;
int sdma_send_txreq(struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *tx,
bool pkts_sent);
int sdma_send_txlist(struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct list_head *tx_list,
- u32 *count);
+ u16 *count_out);
int sdma_ahg_alloc(struct sdma_engine *sde);
void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 25e867393463..2be513d4c9da 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -494,17 +494,18 @@ static struct kobj_type hfi1_vl2mtu_ktype = {
* Start of per-unit (or driver, in some cases, but replicated
* per unit) functions (these get a device *)
*/
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
{
struct hfi1_ibdev *dev =
container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hfi(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
@@ -517,8 +518,9 @@ static ssize_t show_hfi(struct device *device, struct device_attribute *attr,
ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
return ret;
}
+static DEVICE_ATTR_RO(board_id);
-static ssize_t show_boardversion(struct device *device,
+static ssize_t boardversion_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -528,8 +530,9 @@ static ssize_t show_boardversion(struct device *device,
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
}
+static DEVICE_ATTR_RO(boardversion);
-static ssize_t show_nctxts(struct device *device,
+static ssize_t nctxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -546,8 +549,9 @@ static ssize_t show_nctxts(struct device *device,
min(dd->num_user_contexts,
(u32)dd->sc_sizes[SC_USER].count));
}
+static DEVICE_ATTR_RO(nctxts);
-static ssize_t show_nfreectxts(struct device *device,
+static ssize_t nfreectxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -557,8 +561,9 @@ static ssize_t show_nfreectxts(struct device *device,
/* Return the number of free user ports (contexts) available. */
return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
}
+static DEVICE_ATTR_RO(nfreectxts);
-static ssize_t show_serial(struct device *device,
+static ssize_t serial_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -567,8 +572,9 @@ static ssize_t show_serial(struct device *device,
return scnprintf(buf, PAGE_SIZE, "%s", dd->serial);
}
+static DEVICE_ATTR_RO(serial);
-static ssize_t store_chip_reset(struct device *device,
+static ssize_t chip_reset_store(struct device *device,
struct device_attribute *attr, const char *buf,
size_t count)
{
@@ -586,6 +592,7 @@ static ssize_t store_chip_reset(struct device *device,
bail:
return ret < 0 ? ret : count;
}
+static DEVICE_ATTR_WO(chip_reset);
/*
* Convert the reported temperature from an integer (reported in
@@ -598,7 +605,7 @@ bail:
/*
* Dump tempsense values, in decimal, to ease shell-scripts.
*/
-static ssize_t show_tempsense(struct device *device,
+static ssize_t tempsense_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -622,6 +629,7 @@ static ssize_t show_tempsense(struct device *device,
}
return ret;
}
+static DEVICE_ATTR_RO(tempsense);
/*
* end of per-unit (or driver, in some cases, but replicated
@@ -629,24 +637,20 @@ static ssize_t show_tempsense(struct device *device,
*/
/* start of per-unit file structures and support code */
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_hfi, NULL);
-static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
-static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
-static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
-
-static struct device_attribute *hfi1_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_board_id,
- &dev_attr_nctxts,
- &dev_attr_nfreectxts,
- &dev_attr_serial,
- &dev_attr_boardversion,
- &dev_attr_tempsense,
- &dev_attr_chip_reset,
+static struct attribute *hfi1_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_nctxts.attr,
+ &dev_attr_nfreectxts.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_boardversion.attr,
+ &dev_attr_tempsense.attr,
+ &dev_attr_chip_reset.attr,
+ NULL,
+};
+
+const struct attribute_group ib_hfi1_attr_group = {
+ .attrs = hfi1_attributes,
};
int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
@@ -832,12 +836,6 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
struct device *class_dev = &dev->dev;
int i, j, ret;
- for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i) {
- ret = device_create_file(&dev->dev, hfi1_attributes[i]);
- if (ret)
- goto bail;
- }
-
for (i = 0; i < dd->num_sdma; i++) {
ret = kobject_init_and_add(&dd->per_sdma[i].kobj,
&sde_ktype, &class_dev->kobj,
@@ -855,9 +853,6 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
return 0;
bail:
- for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i)
- device_remove_file(&dev->dev, hfi1_attributes[i]);
-
for (i = 0; i < dd->num_sdma; i++)
kobject_del(&dd->per_sdma[i].kobj);
diff --git a/drivers/infiniband/hw/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index 8540463ef3f7..84458f1325e1 100644
--- a/drivers/infiniband/hw/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -62,3 +62,4 @@ __print_symbolic(etype, \
#include "trace_rx.h"
#include "trace_tx.h"
#include "trace_mmu.h"
+#include "trace_iowait.h"
diff --git a/drivers/infiniband/hw/hfi1/trace_iowait.h b/drivers/infiniband/hw/hfi1/trace_iowait.h
new file mode 100644
index 000000000000..27f4334ece2b
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_iowait.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#if !defined(__HFI1_TRACE_IOWAIT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_IOWAIT_H
+
+#include <linux/tracepoint.h>
+#include "iowait.h"
+#include "verbs.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_iowait
+
+DECLARE_EVENT_CLASS(hfi1_iowait_template,
+ TP_PROTO(struct iowait *wait, u32 flag),
+ TP_ARGS(wait, flag),
+ TP_STRUCT__entry(/* entry */
+ __field(unsigned long, addr)
+ __field(unsigned long, flags)
+ __field(u32, flag)
+ __field(u32, qpn)
+ ),
+ TP_fast_assign(/* assign */
+ __entry->addr = (unsigned long)wait;
+ __entry->flags = wait->flags;
+ __entry->flag = (1 << flag);
+ __entry->qpn = iowait_to_qp(wait)->ibqp.qp_num;
+ ),
+ TP_printk(/* print */
+ "iowait 0x%lx qp %u flags 0x%lx flag 0x%x",
+ __entry->addr,
+ __entry->qpn,
+ __entry->flags,
+ __entry->flag
+ )
+ );
+
+DEFINE_EVENT(hfi1_iowait_template, hfi1_iowait_set,
+ TP_PROTO(struct iowait *wait, u32 flag),
+ TP_ARGS(wait, flag));
+
+DEFINE_EVENT(hfi1_iowait_template, hfi1_iowait_clear,
+ TP_PROTO(struct iowait *wait, u32 flag),
+ TP_ARGS(wait, flag));
+
+#endif /* __HFI1_TRACE_IOWAIT_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_iowait
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index e254dcec6f64..6aca0c5a7f97 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -88,7 +88,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
clear_ahg(qp);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done_free_tx;
}
@@ -140,7 +140,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp, wqe->wr.ex.invalidate_rkey);
local_ops = 1;
}
- hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
+ rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
: IB_WC_SUCCESS);
if (local_ops)
atomic_dec(&qp->local_ops_pending);
@@ -426,7 +426,7 @@ send_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto rewind;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, false, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, false, false);
break;
case OP(SEND_LAST_WITH_IMMEDIATE):
@@ -449,7 +449,7 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
- hfi1_copy_sge(&qp->r_sge, data, tlen, false, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, false, false);
rvt_put_ss(&qp->s_rdma_read_sge);
last_imm:
wc.wr_id = qp->r_wr_id;
@@ -523,7 +523,7 @@ rdma_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto drop;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -550,7 +550,7 @@ rdma_last_imm:
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
goto last_imm;
@@ -564,7 +564,7 @@ rdma_last:
tlen -= (hdrsize + extra_bytes);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
break;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 70d39fc450a1..4baa8f4d49de 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -210,8 +210,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
}
hfi1_make_grh(ibp, &grh, &grd, 0, 0);
- hfi1_copy_sge(&qp->r_sge, &grh,
- sizeof(grh), true, false);
+ rvt_copy_sge(qp, &qp->r_sge, &grh,
+ sizeof(grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else {
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
@@ -228,7 +228,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (len > sge->sge_length)
len = sge->sge_length;
WARN_ON_ONCE(len == 0);
- hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
@@ -518,7 +518,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
}
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done_free_tx;
}
@@ -560,7 +560,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, tflags);
ps->flags = tflags;
- hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done_free_tx;
}
}
@@ -1019,8 +1019,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
goto drop;
}
if (packet->grh) {
- hfi1_copy_sge(&qp->r_sge, packet->grh,
- sizeof(struct ib_grh), true, false);
+ rvt_copy_sge(qp, &qp->r_sge, packet->grh,
+ sizeof(struct ib_grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else if (packet->etype == RHF_RCV_TYPE_BYPASS) {
struct ib_grh grh;
@@ -1030,14 +1030,14 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
* out when creating 16B, add back the GRH here.
*/
hfi1_make_ext_grh(packet, &grh, slid, dlid);
- hfi1_copy_sge(&qp->r_sge, &grh,
- sizeof(struct ib_grh), true, false);
+ rvt_copy_sge(qp, &qp->r_sge, &grh,
+ sizeof(struct ib_grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else {
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
}
- hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
- true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
+ true, false);
rvt_put_ss(&qp->r_sge);
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
return;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index a3a7b33196d6..3f0aadccd9f6 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -76,8 +76,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
static unsigned initial_pkt_count = 8;
-static int user_sdma_send_pkts(struct user_sdma_request *req,
- unsigned maxpkts);
+static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
@@ -101,7 +100,7 @@ static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
static int defer_packet_queue(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *txreq,
uint seq,
bool pkts_sent);
@@ -124,13 +123,13 @@ static struct mmu_rb_ops sdma_rb_ops = {
static int defer_packet_queue(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *txreq,
uint seq,
bool pkts_sent)
{
struct hfi1_user_sdma_pkt_q *pq =
- container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
+ container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
struct user_sdma_txreq *tx =
container_of(txreq, struct user_sdma_txreq, txreq);
@@ -187,13 +186,12 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
pq->ctxt = uctxt->ctxt;
pq->subctxt = fd->subctxt;
pq->n_max_reqs = hfi1_sdma_comp_ring_size;
- pq->state = SDMA_PKT_Q_INACTIVE;
atomic_set(&pq->n_reqs, 0);
init_waitqueue_head(&pq->wait);
atomic_set(&pq->n_locked, 0);
pq->mm = fd->mm;
- iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
+ iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
activate_packet_queue, NULL);
pq->reqidx = 0;
@@ -276,7 +274,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
/* Wait until all requests have been freed. */
wait_event_interruptible(
pq->wait,
- (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
+ !atomic_read(&pq->n_reqs));
kfree(pq->reqs);
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
@@ -312,6 +310,13 @@ static u8 dlid_to_selector(u16 dlid)
return mapping[hash];
}
+/**
+ * hfi1_user_sdma_process_request() - Process and start a user sdma request
+ * @fd: valid file descriptor
+ * @iovec: array of io vectors to process
+ * @dim: overall iovec array size
+ * @count: number of io vector array entries processed
+ */
int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
struct iovec *iovec, unsigned long dim,
unsigned long *count)
@@ -328,7 +333,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
u8 opcode, sc, vl;
u16 pkey;
u32 slid;
- int req_queued = 0;
u16 dlid;
u32 selector;
@@ -392,7 +396,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
req->data_len = 0;
req->pq = pq;
req->cq = cq;
- req->status = -1;
req->ahg_idx = -1;
req->iov_idx = 0;
req->sent = 0;
@@ -400,12 +403,14 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
req->seqcomp = 0;
req->seqsubmitted = 0;
req->tids = NULL;
- req->done = 0;
req->has_error = 0;
INIT_LIST_HEAD(&req->txps);
memcpy(&req->info, &info, sizeof(info));
+ /* The request is initialized, count it */
+ atomic_inc(&pq->n_reqs);
+
if (req_opcode(info.ctrl) == EXPECTED) {
/* expected must have a TID info and at least one data vector */
if (req->data_iovs < 2) {
@@ -500,7 +505,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
ret = pin_vector_pages(req, &req->iovs[i]);
if (ret) {
req->data_iovs = i;
- req->status = ret;
goto free_req;
}
req->data_len += req->iovs[i].iov.iov_len;
@@ -561,23 +565,11 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
req->ahg_idx = sdma_ahg_alloc(req->sde);
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
- atomic_inc(&pq->n_reqs);
- req_queued = 1;
+ pq->state = SDMA_PKT_Q_ACTIVE;
/* Send the first N packets in the request to buy us some time */
ret = user_sdma_send_pkts(req, pcount);
- if (unlikely(ret < 0 && ret != -EBUSY)) {
- req->status = ret;
+ if (unlikely(ret < 0 && ret != -EBUSY))
goto free_req;
- }
-
- /*
- * It is possible that the SDMA engine would have processed all the
- * submitted packets by the time we get here. Therefore, only set
- * packet queue state to ACTIVE if there are still uncompleted
- * requests.
- */
- if (atomic_read(&pq->n_reqs))
- xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
/*
* This is a somewhat blocking send implementation.
@@ -588,14 +580,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
while (req->seqsubmitted != req->info.npkts) {
ret = user_sdma_send_pkts(req, pcount);
if (ret < 0) {
- if (ret != -EBUSY) {
- req->status = ret;
- WRITE_ONCE(req->has_error, 1);
- if (READ_ONCE(req->seqcomp) ==
- req->seqsubmitted - 1)
- goto free_req;
- return ret;
- }
+ if (ret != -EBUSY)
+ goto free_req;
wait_event_interruptible_timeout(
pq->busy.wait_dma,
(pq->state == SDMA_PKT_Q_ACTIVE),
@@ -606,10 +592,19 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
*count += idx;
return 0;
free_req:
- user_sdma_free_request(req, true);
- if (req_queued)
+ /*
+ * If the submitted seqsubmitted == npkts, the completion routine
+ * controls the final state. If sequbmitted < npkts, wait for any
+ * outstanding packets to finish before cleaning up.
+ */
+ if (req->seqsubmitted < req->info.npkts) {
+ if (req->seqsubmitted)
+ wait_event(pq->busy.wait_dma,
+ (req->seqcomp == req->seqsubmitted - 1));
+ user_sdma_free_request(req, true);
pq_update(pq);
- set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
+ set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
+ }
return ret;
}
@@ -760,9 +755,10 @@ static int user_sdma_txadd(struct user_sdma_request *req,
return ret;
}
-static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
+static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
{
- int ret = 0, count;
+ int ret = 0;
+ u16 count;
unsigned npkts = 0;
struct user_sdma_txreq *tx = NULL;
struct hfi1_user_sdma_pkt_q *pq = NULL;
@@ -828,7 +824,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
if (++req->iov_idx == req->data_iovs) {
ret = -EFAULT;
- goto free_txreq;
+ goto free_tx;
}
iovec = &req->iovs[req->iov_idx];
WARN_ON(iovec->offset);
@@ -864,8 +860,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
changes = set_txreq_header_ahg(req, tx,
datalen);
- if (changes < 0)
+ if (changes < 0) {
+ ret = changes;
goto free_tx;
+ }
}
} else {
ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
@@ -914,10 +912,11 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
npkts++;
}
dosend:
- ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
+ ret = sdma_send_txlist(req->sde,
+ iowait_get_ib_work(&pq->busy),
+ &req->txps, &count);
req->seqsubmitted += count;
if (req->seqsubmitted == req->info.npkts) {
- WRITE_ONCE(req->done, 1);
/*
* The txreq has already been submitted to the HW queue
* so we can free the AHG entry now. Corruption will not
@@ -1365,11 +1364,15 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
return idx;
}
-/*
- * SDMA tx request completion callback. Called when the SDMA progress
- * state machine gets notification that the SDMA descriptors for this
- * tx request have been processed by the DMA engine. Called in
- * interrupt context.
+/**
+ * user_sdma_txreq_cb() - SDMA tx request completion callback.
+ * @txreq: valid sdma tx request
+ * @status: success/failure of request
+ *
+ * Called when the SDMA progress state machine gets notification that
+ * the SDMA descriptors for this tx request have been processed by the
+ * DMA engine. Called in interrupt context.
+ * Only do work on completed sequences.
*/
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
{
@@ -1378,7 +1381,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
struct user_sdma_request *req;
struct hfi1_user_sdma_pkt_q *pq;
struct hfi1_user_sdma_comp_q *cq;
- u16 idx;
+ enum hfi1_sdma_comp_state state = COMPLETE;
if (!tx->req)
return;
@@ -1391,39 +1394,25 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
SDMA_DBG(req, "SDMA completion with error %d",
status);
WRITE_ONCE(req->has_error, 1);
+ state = ERROR;
}
req->seqcomp = tx->seqnum;
kmem_cache_free(pq->txreq_cache, tx);
- tx = NULL;
-
- idx = req->info.comp_idx;
- if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
- if (req->seqcomp == req->info.npkts - 1) {
- req->status = 0;
- user_sdma_free_request(req, false);
- pq_update(pq);
- set_comp_state(pq, cq, idx, COMPLETE, 0);
- }
- } else {
- if (status != SDMA_TXREQ_S_OK)
- req->status = status;
- if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) &&
- (READ_ONCE(req->done) ||
- READ_ONCE(req->has_error))) {
- user_sdma_free_request(req, false);
- pq_update(pq);
- set_comp_state(pq, cq, idx, ERROR, req->status);
- }
- }
+
+ /* sequence isn't complete? We are done */
+ if (req->seqcomp != req->info.npkts - 1)
+ return;
+
+ user_sdma_free_request(req, false);
+ set_comp_state(pq, cq, req->info.comp_idx, state, status);
+ pq_update(pq);
}
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
{
- if (atomic_dec_and_test(&pq->n_reqs)) {
- xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
+ if (atomic_dec_and_test(&pq->n_reqs))
wake_up(&pq->wait);
- }
}
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
@@ -1448,6 +1437,8 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
if (!node)
continue;
+ req->iovs[i].node = NULL;
+
if (unpin)
hfi1_mmu_rb_remove(req->pq->handler,
&node->rb);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index d2bc77f75253..14dfd757dafd 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -105,9 +105,10 @@ static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
-#define SDMA_PKT_Q_INACTIVE BIT(0)
-#define SDMA_PKT_Q_ACTIVE BIT(1)
-#define SDMA_PKT_Q_DEFERRED BIT(2)
+enum pkt_q_sdma_state {
+ SDMA_PKT_Q_ACTIVE,
+ SDMA_PKT_Q_DEFERRED,
+};
/*
* Maximum retry attempts to submit a TX request
@@ -133,7 +134,7 @@ struct hfi1_user_sdma_pkt_q {
struct user_sdma_request *reqs;
unsigned long *req_in_use;
struct iowait busy;
- unsigned state;
+ enum pkt_q_sdma_state state;
wait_queue_head_t wait;
unsigned long unpinned;
struct mmu_rb_handler *handler;
@@ -203,14 +204,12 @@ struct user_sdma_request {
s8 ahg_idx;
/* Writeable fields shared with interrupt */
- u64 seqcomp ____cacheline_aligned_in_smp;
- u64 seqsubmitted;
- /* status of the last txreq completed */
- int status;
+ u16 seqcomp ____cacheline_aligned_in_smp;
+ u16 seqsubmitted;
/* Send side fields */
struct list_head txps ____cacheline_aligned_in_smp;
- u64 seqnum;
+ u16 seqnum;
/*
* KDETH.OFFSET (TID) field
* The offset can cover multiple packets, depending on the
@@ -228,7 +227,6 @@ struct user_sdma_request {
u16 tididx;
/* progress index moving along the iovs array */
u8 iov_idx;
- u8 done;
u8 has_error;
struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
@@ -248,7 +246,7 @@ struct user_sdma_txreq {
struct user_sdma_request *req;
u16 flags;
unsigned int busycount;
- u64 seqnum;
+ u16 seqnum;
};
int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 13374c727b14..48e11e510358 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -129,8 +129,6 @@ unsigned short piothreshold = 256;
module_param(piothreshold, ushort, S_IRUGO);
MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
-#define COPY_CACHELESS 1
-#define COPY_ADAPTIVE 2
static unsigned int sge_copy_mode;
module_param(sge_copy_mode, uint, S_IRUGO);
MODULE_PARM_DESC(sge_copy_mode,
@@ -151,159 +149,13 @@ static int pio_wait(struct rvt_qp *qp,
/* 16B trailing buffer */
static const u8 trail_buf[MAX_16B_PADDING];
-static uint wss_threshold;
+static uint wss_threshold = 80;
module_param(wss_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
static uint wss_clean_period = 256;
module_param(wss_clean_period, uint, S_IRUGO);
MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
-/* memory working set size */
-struct hfi1_wss {
- unsigned long *entries;
- atomic_t total_count;
- atomic_t clean_counter;
- atomic_t clean_entry;
-
- int threshold;
- int num_entries;
- long pages_mask;
-};
-
-static struct hfi1_wss wss;
-
-int hfi1_wss_init(void)
-{
- long llc_size;
- long llc_bits;
- long table_size;
- long table_bits;
-
- /* check for a valid percent range - default to 80 if none or invalid */
- if (wss_threshold < 1 || wss_threshold > 100)
- wss_threshold = 80;
- /* reject a wildly large period */
- if (wss_clean_period > 1000000)
- wss_clean_period = 256;
- /* reject a zero period */
- if (wss_clean_period == 0)
- wss_clean_period = 1;
-
- /*
- * Calculate the table size - the next power of 2 larger than the
- * LLC size. LLC size is in KiB.
- */
- llc_size = wss_llc_size() * 1024;
- table_size = roundup_pow_of_two(llc_size);
-
- /* one bit per page in rounded up table */
- llc_bits = llc_size / PAGE_SIZE;
- table_bits = table_size / PAGE_SIZE;
- wss.pages_mask = table_bits - 1;
- wss.num_entries = table_bits / BITS_PER_LONG;
-
- wss.threshold = (llc_bits * wss_threshold) / 100;
- if (wss.threshold == 0)
- wss.threshold = 1;
-
- atomic_set(&wss.clean_counter, wss_clean_period);
-
- wss.entries = kcalloc(wss.num_entries, sizeof(*wss.entries),
- GFP_KERNEL);
- if (!wss.entries) {
- hfi1_wss_exit();
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void hfi1_wss_exit(void)
-{
- /* coded to handle partially initialized and repeat callers */
- kfree(wss.entries);
- wss.entries = NULL;
-}
-
-/*
- * Advance the clean counter. When the clean period has expired,
- * clean an entry.
- *
- * This is implemented in atomics to avoid locking. Because multiple
- * variables are involved, it can be racy which can lead to slightly
- * inaccurate information. Since this is only a heuristic, this is
- * OK. Any innaccuracies will clean themselves out as the counter
- * advances. That said, it is unlikely the entry clean operation will
- * race - the next possible racer will not start until the next clean
- * period.
- *
- * The clean counter is implemented as a decrement to zero. When zero
- * is reached an entry is cleaned.
- */
-static void wss_advance_clean_counter(void)
-{
- int entry;
- int weight;
- unsigned long bits;
-
- /* become the cleaner if we decrement the counter to zero */
- if (atomic_dec_and_test(&wss.clean_counter)) {
- /*
- * Set, not add, the clean period. This avoids an issue
- * where the counter could decrement below the clean period.
- * Doing a set can result in lost decrements, slowing the
- * clean advance. Since this a heuristic, this possible
- * slowdown is OK.
- *
- * An alternative is to loop, advancing the counter by a
- * clean period until the result is > 0. However, this could
- * lead to several threads keeping another in the clean loop.
- * This could be mitigated by limiting the number of times
- * we stay in the loop.
- */
- atomic_set(&wss.clean_counter, wss_clean_period);
-
- /*
- * Uniquely grab the entry to clean and move to next.
- * The current entry is always the lower bits of
- * wss.clean_entry. The table size, wss.num_entries,
- * is always a power-of-2.
- */
- entry = (atomic_inc_return(&wss.clean_entry) - 1)
- & (wss.num_entries - 1);
-
- /* clear the entry and count the bits */
- bits = xchg(&wss.entries[entry], 0);
- weight = hweight64((u64)bits);
- /* only adjust the contended total count if needed */
- if (weight)
- atomic_sub(weight, &wss.total_count);
- }
-}
-
-/*
- * Insert the given address into the working set array.
- */
-static void wss_insert(void *address)
-{
- u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss.pages_mask;
- u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
- u32 nr = page & (BITS_PER_LONG - 1);
-
- if (!test_and_set_bit(nr, &wss.entries[entry]))
- atomic_inc(&wss.total_count);
-
- wss_advance_clean_counter();
-}
-
-/*
- * Is the working set larger than the threshold?
- */
-static inline bool wss_exceeds_threshold(void)
-{
- return atomic_read(&wss.total_count) >= wss.threshold;
-}
-
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
@@ -438,79 +290,6 @@ static const u32 pio_opmask[BIT(3)] = {
*/
__be64 ib_hfi1_sys_image_guid;
-/**
- * hfi1_copy_sge - copy data to SGE memory
- * @ss: the SGE state
- * @data: the data to copy
- * @length: the length of the data
- * @release: boolean to release MR
- * @copy_last: do a separate copy of the last 8 bytes
- */
-void hfi1_copy_sge(
- struct rvt_sge_state *ss,
- void *data, u32 length,
- bool release,
- bool copy_last)
-{
- struct rvt_sge *sge = &ss->sge;
- int i;
- bool in_last = false;
- bool cacheless_copy = false;
-
- if (sge_copy_mode == COPY_CACHELESS) {
- cacheless_copy = length >= PAGE_SIZE;
- } else if (sge_copy_mode == COPY_ADAPTIVE) {
- if (length >= PAGE_SIZE) {
- /*
- * NOTE: this *assumes*:
- * o The first vaddr is the dest.
- * o If multiple pages, then vaddr is sequential.
- */
- wss_insert(sge->vaddr);
- if (length >= (2 * PAGE_SIZE))
- wss_insert(sge->vaddr + PAGE_SIZE);
-
- cacheless_copy = wss_exceeds_threshold();
- } else {
- wss_advance_clean_counter();
- }
- }
- if (copy_last) {
- if (length > 8) {
- length -= 8;
- } else {
- copy_last = false;
- in_last = true;
- }
- }
-
-again:
- while (length) {
- u32 len = rvt_get_sge_length(sge, length);
-
- WARN_ON_ONCE(len == 0);
- if (unlikely(in_last)) {
- /* enforce byte transfer ordering */
- for (i = 0; i < len; i++)
- ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
- } else if (cacheless_copy) {
- cacheless_memcpy(sge->vaddr, data, len);
- } else {
- memcpy(sge->vaddr, data, len);
- }
- rvt_update_sge(ss, len, release);
- data += len;
- length -= len;
- }
-
- if (copy_last) {
- copy_last = false;
- in_last = true;
- length = 8;
- goto again;
- }
-}
-
/*
* Make sure the QP is ready and able to accept the given opcode.
*/
@@ -713,7 +492,7 @@ static void verbs_sdma_complete(
spin_lock(&qp->s_lock);
if (tx->wqe) {
- hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
struct hfi1_opa_header *hdr;
@@ -737,7 +516,7 @@ static int wait_kmem(struct hfi1_ibdev *dev,
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
write_seqlock(&dev->iowait_lock);
list_add_tail(&ps->s_txreq->txreq.list,
- &priv->s_iowait.tx_head);
+ &ps->wait->tx_head);
if (list_empty(&priv->s_iowait.list)) {
if (list_empty(&dev->memwait))
mod_timer(&dev->mem_timer, jiffies + 1);
@@ -748,7 +527,7 @@ static int wait_kmem(struct hfi1_ibdev *dev,
rvt_get_qp(qp);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_qp_unbusy(qp, ps->wait);
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -950,8 +729,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (unlikely(ret))
goto bail_build;
}
- ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq,
- ps->pkts_sent);
+ ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent);
if (unlikely(ret < 0)) {
if (ret == -ECOMM)
goto bail_ecomm;
@@ -1001,7 +779,7 @@ static int pio_wait(struct rvt_qp *qp,
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
write_seqlock(&dev->iowait_lock);
list_add_tail(&ps->s_txreq->txreq.list,
- &priv->s_iowait.tx_head);
+ &ps->wait->tx_head);
if (list_empty(&priv->s_iowait.list)) {
struct hfi1_ibdev *dev = &dd->verbs_dev;
int was_empty;
@@ -1020,7 +798,7 @@ static int pio_wait(struct rvt_qp *qp,
hfi1_sc_wantpiobuf_intr(sc, 1);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_qp_unbusy(qp, ps->wait);
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1160,7 +938,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
pio_bail:
if (qp->s_wqe) {
spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_send_complete(qp, qp->s_wqe, wc_status);
+ rvt_send_complete(qp, qp->s_wqe, wc_status);
spin_unlock_irqrestore(&qp->s_lock, flags);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
spin_lock_irqsave(&qp->s_lock, flags);
@@ -1367,7 +1145,7 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
hfi1_cdbg(PIO, "%s() Failed. Completing with err",
__func__);
spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
+ rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
return -EINVAL;
@@ -1582,6 +1360,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
struct hfi1_pportdata *ppd;
struct hfi1_devdata *dd;
u8 sc5;
+ u8 sl;
if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
@@ -1590,8 +1369,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
/* test the mapping for validity */
ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
ppd = ppd_from_ibp(ibp);
- sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
dd = dd_from_ppd(ppd);
+
+ sl = rdma_ah_get_sl(ah_attr);
+ if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
+ return -EINVAL;
+
+ sc5 = ibp->sl_to_sc[sl];
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
return -EINVAL;
return 0;
@@ -1937,7 +1721,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
- dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
+ dd->verbs_dev.rdi.driver_f.setup_wqe = hfi1_setup_wqe;
dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup =
hfi1_comp_vect_mappings_lookup;
@@ -1950,10 +1734,16 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
+ dd->verbs_dev.rdi.dparms.sge_copy_mode = sge_copy_mode;
+ dd->verbs_dev.rdi.dparms.wss_threshold = wss_threshold;
+ dd->verbs_dev.rdi.dparms.wss_clean_period = wss_clean_period;
/* post send table */
dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
+ /* opcode translation table */
+ dd->verbs_dev.rdi.wc_opcode = ib_hfi1_wc_opcode;
+
ppd = dd->pport;
for (i = 0; i < dd->num_pports; i++, ppd++)
rvt_init_port(&dd->verbs_dev.rdi,
@@ -1961,6 +1751,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
i,
ppd->pkeys);
+ rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev,
+ &ib_hfi1_attr_group);
+
ret = rvt_register_device(&dd->verbs_dev.rdi, RDMA_DRIVER_HFI1);
if (ret)
goto err_verbs_txreq;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index a4d06502f06d..64c9054db5f3 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -166,11 +166,13 @@ struct hfi1_qp_priv {
* This structure is used to hold commonly lookedup and computed values during
* the send engine progress.
*/
+struct iowait_work;
struct hfi1_pkt_state {
struct hfi1_ibdev *dev;
struct hfi1_ibport *ibp;
struct hfi1_pportdata *ppd;
struct verbs_txreq *s_txreq;
+ struct iowait_work *wait;
unsigned long flags;
unsigned long timeout;
unsigned long timeout_int;
@@ -247,7 +249,7 @@ static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev)
return container_of(rdi, struct hfi1_ibdev, rdi);
}
-static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
+static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
{
struct hfi1_qp_priv *priv;
@@ -313,9 +315,6 @@ void hfi1_put_txreq(struct verbs_txreq *tx);
int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
-void hfi1_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
- bool release, bool copy_last);
-
void hfi1_cnp_rcv(struct hfi1_packet *packet);
void hfi1_uc_rcv(struct hfi1_packet *packet);
@@ -343,7 +342,8 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
-int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
+int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ bool *call_send);
extern const u32 rc_only_opcode;
extern const u32 uc_only_opcode;
@@ -363,9 +363,6 @@ void hfi1_do_send_from_rvt(struct rvt_qp *qp);
void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
-void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status);
-
void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn);
int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
@@ -390,28 +387,6 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc);
-int hfi1_wss_init(void);
-void hfi1_wss_exit(void);
-
-/* platform specific: return the lowest level cache (llc) size, in KiB */
-static inline int wss_llc_size(void)
-{
- /* assume that the boot CPU value is universal for all CPUs */
- return boot_cpu_data.x86_cache_size;
-}
-
-/* platform specific: cacheless copy */
-static inline void cacheless_memcpy(void *dst, void *src, size_t n)
-{
- /*
- * Use the only available X64 cacheless copy. Add a __user cast
- * to quiet sparse. The src agument is already in the kernel so
- * there are no security issues. The extra fault recovery machinery
- * is not invoked.
- */
- __copy_user_nocache(dst, (void __user *)src, n, 0);
-}
-
static inline bool opa_bth_is_migration(struct ib_other_headers *ohdr)
{
return ohdr->bth[1] & cpu_to_be32(OPA_BTH_MIG_REQ);
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 1c19bbc764b2..2a77af26a231 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -102,22 +102,19 @@ static inline struct sdma_txreq *get_sdma_txreq(struct verbs_txreq *tx)
return &tx->txreq;
}
-static inline struct verbs_txreq *get_waiting_verbs_txreq(struct rvt_qp *qp)
+static inline struct verbs_txreq *get_waiting_verbs_txreq(struct iowait_work *w)
{
struct sdma_txreq *stx;
- struct hfi1_qp_priv *priv = qp->priv;
- stx = iowait_get_txhead(&priv->s_iowait);
+ stx = iowait_get_txhead(w);
if (stx)
return container_of(stx, struct verbs_txreq, txreq);
return NULL;
}
-static inline bool verbs_txreq_queued(struct rvt_qp *qp)
+static inline bool verbs_txreq_queued(struct iowait_work *w)
{
- struct hfi1_qp_priv *priv = qp->priv;
-
- return iowait_packet_queued(&priv->s_iowait);
+ return iowait_packet_queued(w);
}
void hfi1_put_txreq(struct verbs_txreq *tx);
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index c643d80c5a53..c9876d9e3cb9 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -120,7 +120,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
uctxt->seq_cnt = 1;
uctxt->is_vnic = true;
- hfi1_set_vnic_msix_info(uctxt);
+ msix_request_rcd_irq(uctxt);
hfi1_stats.sps_ctxts++;
dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
@@ -135,8 +135,6 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
dd_dev_dbg(dd, "closing vnic context %d\n", uctxt->ctxt);
flush_wc();
- hfi1_reset_vnic_msix_info(uctxt);
-
/*
* Disable receive context and interrupt available, reset all
* RcvCtxtCtrl bits to default values.
@@ -148,6 +146,10 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
+ /* msix_intr will always be > 0, only clean up if this is true */
+ if (uctxt->msix_intr)
+ msix_free_irq(dd, uctxt->msix_intr);
+
uctxt->event_flags = 0;
hfi1_clear_tids(uctxt);
@@ -626,7 +628,7 @@ static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo)
idr_remove(&dd->vnic.vesw_idr, vinfo->vesw_id);
/* ensure irqs see the change */
- hfi1_vnic_synchronize_irq(dd);
+ msix_vnic_synchronize_irq(dd);
/* remove unread skbs */
for (i = 0; i < vinfo->num_rx_q; i++) {
@@ -690,8 +692,6 @@ static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo)
rc = hfi1_vnic_txreq_init(dd);
if (rc)
goto txreq_fail;
-
- dd->vnic.msix_idx = dd->first_dyn_msix_idx;
}
for (i = dd->vnic.num_ctxt; i < vinfo->num_rx_q; i++) {
diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
index c3c96c5869ed..97bd940a056a 100644
--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
+++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -198,8 +198,8 @@ int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
goto free_desc;
tx->retry_count = 0;
- ret = sdma_send_txreq(sde, &vnic_sdma->wait, &tx->txreq,
- vnic_sdma->pkts_sent);
+ ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
+ &tx->txreq, vnic_sdma->pkts_sent);
/* When -ECOMM, sdma callback will be called with ABORT status */
if (unlikely(ret && unlikely(ret != -ECOMM)))
goto free_desc;
@@ -230,13 +230,13 @@ tx_err:
* become available.
*/
static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *txreq,
uint seq,
bool pkts_sent)
{
struct hfi1_vnic_sdma *vnic_sdma =
- container_of(wait, struct hfi1_vnic_sdma, wait);
+ container_of(wait->iow, struct hfi1_vnic_sdma, wait);
struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
@@ -247,7 +247,7 @@ static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
write_seqlock(&dev->iowait_lock);
if (list_empty(&vnic_sdma->wait.list))
- iowait_queue(pkts_sent, wait, &sde->dmawait);
+ iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
write_sequnlock(&dev->iowait_lock);
return -EBUSY;
}
@@ -285,7 +285,8 @@ void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
for (i = 0; i < vinfo->num_tx_q; i++) {
struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
- iowait_init(&vnic_sdma->wait, 0, NULL, hfi1_vnic_sdma_sleep,
+ iowait_init(&vnic_sdma->wait, 0, NULL, NULL,
+ hfi1_vnic_sdma_sleep,
hfi1_vnic_sdma_wakeup, NULL);
vnic_sdma->sde = &vinfo->dd->per_sdma[i];
vnic_sdma->dd = vinfo->dd;
@@ -295,10 +296,12 @@ void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
/* Add a free descriptor watermark for wakeups */
if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
+ struct iowait_work *work;
+
INIT_LIST_HEAD(&vnic_sdma->stx.list);
vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
- list_add_tail(&vnic_sdma->stx.list,
- &vnic_sdma->wait.tx_head);
+ work = iowait_get_ib_work(&vnic_sdma->wait);
+ list_add_tail(&vnic_sdma->stx.list, &work->tx_head);
}
}
}
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index fddb5fdf92de..21c2100b2ea9 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_HNS
tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
+ depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
depends on ARM64 || (COMPILE_TEST && 64BIT)
---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 0d96c5bb38cd..9990dc9eb96a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -49,6 +49,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ bool vlan_en = false;
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
@@ -58,8 +59,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
gid_attr = ah_attr->grh.sgid_attr;
- if (is_vlan_dev(gid_attr->ndev))
+ if (is_vlan_dev(gid_attr->ndev)) {
vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
+ vlan_en = true;
+ }
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) &
@@ -71,6 +74,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
HNS_ROCE_PORT_NUM_SHIFT));
ah->av.gid_index = grh->sgid_index;
ah->av.vlan = cpu_to_le16(vlan_tag);
+ ah->av.vlan_en = vlan_en;
dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
ah->av.vlan);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 9a24fd0ee3e7..d39bdfdb5de9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -88,8 +88,11 @@
#define BITMAP_RR 1
#define MR_TYPE_MR 0x00
+#define MR_TYPE_FRMR 0x01
#define MR_TYPE_DMA 0x03
+#define HNS_ROCE_FRMR_MAX_PA 512
+
#define PKEY_ID 0xffff
#define GUID_LEN 8
#define NODE_DESC_SIZE 64
@@ -193,6 +196,9 @@ enum {
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3),
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4),
+ HNS_ROCE_CAP_FLAG_MW = BIT(7),
+ HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
+ HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
};
enum hns_roce_mtt_type {
@@ -219,19 +225,11 @@ struct hns_roce_uar {
unsigned long logic_idx;
};
-struct hns_roce_vma_data {
- struct list_head list;
- struct vm_area_struct *vma;
- struct mutex *vma_list_mutex;
-};
-
struct hns_roce_ucontext {
struct ib_ucontext ibucontext;
struct hns_roce_uar uar;
struct list_head page_list;
struct mutex page_mutex;
- struct list_head vma_list;
- struct mutex vma_list_mutex;
};
struct hns_roce_pd {
@@ -293,6 +291,16 @@ struct hns_roce_mtt {
enum hns_roce_mtt_type mtt_type;
};
+struct hns_roce_mw {
+ struct ib_mw ibmw;
+ u32 pdn;
+ u32 rkey;
+ int enabled; /* MW's active status */
+ u32 pbl_hop_num;
+ u32 pbl_ba_pg_sz;
+ u32 pbl_buf_pg_sz;
+};
+
/* Only support 4K page size for mr register */
#define MR_SIZE_4K 0
@@ -304,6 +312,7 @@ struct hns_roce_mr {
u32 key; /* Key of MR */
u32 pd; /* PD num of MR */
u32 access;/* Access permission of MR */
+ u32 npages;
int enabled; /* MR's active status */
int type; /* MR's register type */
u64 *pbl_buf;/* MR's PBL space */
@@ -457,6 +466,7 @@ struct hns_roce_av {
u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[6];
__le16 vlan;
+ bool vlan_en;
};
struct hns_roce_ah {
@@ -656,6 +666,7 @@ struct hns_roce_eq_table {
};
struct hns_roce_caps {
+ u64 fw_ver;
u8 num_ports;
int gid_table_len[HNS_ROCE_MAX_PORTS];
int pkey_table_len[HNS_ROCE_MAX_PORTS];
@@ -665,7 +676,9 @@ struct hns_roce_caps {
u32 max_sq_sg; /* 2 */
u32 max_sq_inline; /* 32 */
u32 max_rq_sg; /* 2 */
+ u32 max_extend_sg;
int num_qps; /* 256k */
+ int reserved_qps;
u32 max_wqes; /* 16k */
u32 max_sq_desc_sz; /* 64 */
u32 max_rq_desc_sz; /* 64 */
@@ -738,6 +751,7 @@ struct hns_roce_work {
struct hns_roce_dev *hr_dev;
struct work_struct work;
u32 qpn;
+ u32 cqn;
int event_type;
int sub_type;
};
@@ -764,6 +778,8 @@ struct hns_roce_hw {
struct hns_roce_mr *mr, int flags, u32 pdn,
int mr_access_flags, u64 iova, u64 size,
void *mb_buf);
+ int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
+ int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector);
@@ -863,6 +879,11 @@ static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
return container_of(ibmr, struct hns_roce_mr, ibmr);
}
+static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct hns_roce_mw, ibmw);
+}
+
static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct hns_roce_qp, ibqp);
@@ -968,12 +989,20 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata);
+struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
int hns_roce_dereg_mr(struct ib_mr *ibmr);
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
+struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
+ struct ib_udata *udata);
+int hns_roce_dealloc_mw(struct ib_mw *ibmw);
+
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 081aa91fc162..ca05810c92dc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -731,7 +731,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
cq_init_attr.comp_vector = 0;
cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
if (IS_ERR(cq)) {
- dev_err(dev, "Create cq for reseved loop qp failed!");
+ dev_err(dev, "Create cq for reserved loop qp failed!");
return -ENOMEM;
}
free_mr->mr_free_cq = to_hr_cq(cq);
@@ -744,7 +744,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
if (IS_ERR(pd)) {
- dev_err(dev, "Create pd for reseved loop qp failed!");
+ dev_err(dev, "Create pd for reserved loop qp failed!");
ret = -ENOMEM;
goto alloc_pd_failed;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 0218c0f8c2a7..a4c62ae23a9a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -54,6 +54,59 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
+static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ struct hns_roce_wqe_frmr_seg *fseg,
+ const struct ib_reg_wr *wr)
+{
+ struct hns_roce_mr *mr = to_hr_mr(wr->mr);
+
+ /* use ib_access_flags */
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
+ wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
+ wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_RR_S,
+ wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_RW_S,
+ wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_LW_S,
+ wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
+
+ /* Data structure reuse may lead to confusion */
+ rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
+ rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
+
+ rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
+ rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
+ rc_sq_wqe->rkey = cpu_to_le32(wr->key);
+ rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
+
+ fseg->pbl_size = cpu_to_le32(mr->pbl_size);
+ roce_set_field(fseg->mode_buf_pg_sz,
+ V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
+ V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
+ mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+ roce_set_bit(fseg->mode_buf_pg_sz,
+ V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
+}
+
+static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
+ const struct ib_atomic_wr *wr)
+{
+ if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
+ aseg->cmp_data = cpu_to_le64(wr->compare_add);
+ } else {
+ aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
+ aseg->cmp_data = 0;
+ }
+}
+
static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
unsigned int *sge_ind)
{
@@ -121,6 +174,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
}
if (wr->opcode == IB_WR_RDMA_READ) {
+ *bad_wr = wr;
dev_err(hr_dev->dev, "Not support inline data!\n");
return -EINVAL;
}
@@ -179,6 +233,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ struct hns_roce_wqe_frmr_seg *fseg;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
struct ib_qp_attr attr;
@@ -191,6 +246,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
int attr_mask;
u32 tmp_len;
int ret = 0;
+ u32 hr_op;
u8 *smac;
int nreq;
int i;
@@ -356,6 +412,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
V2_UD_SEND_WQE_BYTE_40_PORTN_S,
qp->port);
+ roce_set_bit(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
+ ah->av.vlan_en ? 1 : 0);
roce_set_field(ud_sq_wqe->byte_48,
V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
@@ -406,99 +465,100 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
+ wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
switch (wr->opcode) {
case IB_WR_RDMA_READ:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_READ);
+ hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_RDMA_WRITE:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
+ hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
+ hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_SEND:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND);
+ hr_op = HNS_ROCE_V2_WQE_OP_SEND;
break;
case IB_WR_SEND_WITH_INV:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
+ hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
break;
case IB_WR_SEND_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
+ hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
break;
case IB_WR_LOCAL_INV:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_LOCAL_INV);
+ hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
+ rc_sq_wqe->inv_key =
+ cpu_to_le32(wr->ex.invalidate_rkey);
+ break;
+ case IB_WR_REG_MR:
+ hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
+ fseg = wqe;
+ set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
break;
case IB_WR_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
+ hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
+ rc_sq_wqe->rkey =
+ cpu_to_le32(atomic_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(atomic_wr(wr)->remote_addr);
break;
case IB_WR_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
+ hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
+ rc_sq_wqe->rkey =
+ cpu_to_le32(atomic_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(atomic_wr(wr)->remote_addr);
break;
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
+ hr_op =
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
break;
case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
+ hr_op =
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
break;
default:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_MASK);
+ hr_op = HNS_ROCE_V2_WQE_OP_MASK;
break;
}
- wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
+
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ struct hns_roce_v2_wqe_data_seg *dseg;
+
+ dseg = wqe;
+ set_data_seg_v2(dseg, wr->sg_list);
+ wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
+ set_atomic_seg(wqe, atomic_wr(wr));
+ roce_set_field(rc_sq_wqe->byte_16,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
+ wr->num_sge);
+ } else if (wr->opcode != IB_WR_REG_MR) {
+ ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
+ wqe, &sge_ind, bad_wr);
+ if (ret)
+ goto out;
+ }
- ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
- &sge_ind, bad_wr);
- if (ret)
- goto out;
ind++;
} else {
dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
@@ -935,7 +995,24 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
resp = (struct hns_roce_query_version *)desc.data;
hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
- hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id);
+ hr_dev->vendor_id = hr_dev->pci_dev->vendor;
+
+ return 0;
+}
+
+static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_query_fw_info *resp;
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ resp = (struct hns_roce_query_fw_info *)desc.data;
+ hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
return 0;
}
@@ -1158,6 +1235,13 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
ret = hns_roce_cmq_query_hw_info(hr_dev);
if (ret) {
+ dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = hns_roce_query_fw_ver(hr_dev);
+ if (ret) {
dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
ret);
return ret;
@@ -1185,14 +1269,16 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
- hr_dev->vendor_part_id = 0;
- hr_dev->sys_image_guid = 0;
+
+ hr_dev->vendor_part_id = hr_dev->pci_dev->device;
+ hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
+ caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
@@ -1222,6 +1308,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->reserved_mrws = 1;
caps->reserved_uars = 0;
caps->reserved_cqs = 0;
+ caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
caps->qpc_ba_pg_sz = 0;
caps->qpc_buf_pg_sz = 0;
@@ -1255,6 +1342,11 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
HNS_ROCE_CAP_FLAG_RQ_INLINE |
HNS_ROCE_CAP_FLAG_RECORD_DB |
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
+
+ if (hr_dev->pci_dev->revision == 0x21)
+ caps->flags |= HNS_ROCE_CAP_FLAG_MW |
+ HNS_ROCE_CAP_FLAG_FRMR;
+
caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
@@ -1262,6 +1354,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096;
+ if (hr_dev->pci_dev->revision == 0x21)
+ caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC;
+
ret = hns_roce_v2_set_bt(hr_dev);
if (ret)
dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
@@ -1690,10 +1785,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
(mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
+ mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
(mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
@@ -1817,6 +1913,88 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
return 0;
}
+static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry;
+
+ mpt_entry = mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+ V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+ V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
+ mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, mr->pd);
+
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
+
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
+
+ mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+ roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
+ V2_MPT_BYTE_48_PBL_BA_H_S,
+ upper_32_bits(mr->pbl_ba >> 3));
+
+ roce_set_field(mpt_entry->byte_64_buf_pa1,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+ mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+
+ return 0;
+}
+
+static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry;
+
+ mpt_entry = mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+ V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, mw->pdn);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st,
+ V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+ V2_MPT_BYTE_4_PBL_HOP_NUM_S,
+ mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
+ 0 : mw->pbl_hop_num);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
+ mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
+
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
+ mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
+
+ roce_set_field(mpt_entry->byte_64_buf_pa1,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+ mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+
+ mpt_entry->lkey = cpu_to_le32(mw->rkey);
+
+ return 0;
+}
+
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
@@ -2274,6 +2452,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->src_qp = (u8)roce_get_field(cqe->byte_32,
V2_CQE_BYTE_32_RMT_QPN_M,
V2_CQE_BYTE_32_RMT_QPN_S);
+ wc->slid = 0;
wc->wc_flags |= (roce_get_bit(cqe->byte_32,
V2_CQE_BYTE_32_GRH_S) ?
IB_WC_GRH : 0);
@@ -2287,7 +2466,14 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->smac[5] = roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_SMAC_5_M,
V2_CQE_BYTE_28_SMAC_5_S);
- wc->vlan_id = 0xffff;
+ if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
+ wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_VID_M,
+ V2_CQE_BYTE_28_VID_S);
+ } else {
+ wc->vlan_id = 0xffff;
+ }
+
wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
wc->network_hdr_type = roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_PORT_TYPE_M,
@@ -2589,21 +2775,16 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
- roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M,
- V2_QPC_BYTE_60_MAPID_S, 0);
+ roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
+ V2_QPC_BYTE_60_TEMPID_S, 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid,
- V2_QPC_BYTE_60_INNER_MAP_IND_S, 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S,
- 0);
+ roce_set_field(qpc_mask->byte_60_qpst_tempid,
+ V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_tempid,
+ V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_tempid,
+ V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
@@ -2685,7 +2866,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
- roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0);
+ roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
+ 0);
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
@@ -2694,8 +2876,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_144_raq,
V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
- roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S,
- 0);
roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
@@ -2721,14 +2901,12 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
- roce_set_field(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
-
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
roce_set_bit(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
roce_set_bit(qpc_mask->byte_168_irrl_idx,
@@ -2746,6 +2924,9 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
0);
+ roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
+ roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
+
roce_set_field(qpc_mask->byte_176_msg_pktn,
V2_QPC_BYTE_176_MSG_USE_PKTN_M,
V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
@@ -2790,6 +2971,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
+ roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
+ roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
+ 0);
+
qpc_mask->irrl_cur_sge_offset = 0;
roce_set_field(qpc_mask->byte_240_irrl_tail,
@@ -2955,13 +3143,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_56_dqpn_err,
V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
}
- roce_set_field(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
}
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
@@ -3271,13 +3452,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
- roce_set_field(context->byte_60_qpst_mapid,
- V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
- V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt);
- roce_set_field(qpc_mask->byte_60_qpst_mapid,
- V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
- V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0);
-
context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
@@ -3538,6 +3712,17 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
}
+ if (is_vlan_dev(gid_attr->ndev)) {
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
+ roce_set_bit(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
+ }
+
roce_set_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, vlan);
@@ -3584,8 +3769,15 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S, grh->traffic_class);
+ if (hr_dev->pci_dev->revision == 0x21 &&
+ gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ roce_set_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
+ grh->traffic_class >> 2);
+ else
+ roce_set_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
+ grh->traffic_class);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, 0);
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
@@ -3606,9 +3798,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
/* Every status migrate must change state */
- roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
+ roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, new_state);
- roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
+ roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, 0);
/* SW pass context to HW */
@@ -3728,7 +3920,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
goto out;
}
- state = roce_get_field(context->byte_60_qpst_mapid,
+ state = roce_get_field(context->byte_60_qpst_tempid,
V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
if (tmp_qp_state == -1) {
@@ -3995,13 +4187,103 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
{
struct hns_roce_work *irq_work =
container_of(work, struct hns_roce_work, work);
+ struct device *dev = irq_work->hr_dev->dev;
u32 qpn = irq_work->qpn;
+ u32 cqn = irq_work->cqn;
switch (irq_work->event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_info(dev, "Path migrated succeeded.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "Path migration failed.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_info(dev, "Communication established.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "Send queue drained.\n");
+ break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ dev_err(dev, "Local work queue catastrophic error.\n");
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ switch (irq_work->sub_type) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_err(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_err(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_err(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_err(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_err(dev, "QP %d, WQE shift error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n",
+ irq_work->sub_type);
+ break;
+ }
+ break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_err(dev, "Invalid request local work queue error.\n");
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ dev_err(dev, "Local access violation work queue error.\n");
hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ switch (irq_work->sub_type) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_err(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_err(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_err(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_err(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_err(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_err(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n",
+ irq_work->sub_type);
+ break;
+ }
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ dev_warn(dev, "SRQ limit reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ dev_warn(dev, "SRQ last wqe reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ dev_err(dev, "SRQ catas error.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_err(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ dev_warn(dev, "DB overflow.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_FLR:
+ dev_warn(dev, "Function level reset.\n");
break;
default:
break;
@@ -4011,7 +4293,8 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
}
static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq, u32 qpn)
+ struct hns_roce_eq *eq,
+ u32 qpn, u32 cqn)
{
struct hns_roce_work *irq_work;
@@ -4022,6 +4305,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
irq_work->hr_dev = hr_dev;
irq_work->qpn = qpn;
+ irq_work->cqn = cqn;
irq_work->event_type = eq->event_type;
irq_work->sub_type = eq->sub_type;
queue_work(hr_dev->irq_workq, &(irq_work->work));
@@ -4058,124 +4342,6 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
hns_roce_write64_k(doorbell, eq->doorbell);
}
-static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- u32 qpn)
-{
- struct device *dev = hr_dev->dev;
- int sub_type;
-
- dev_warn(dev, "Local work queue catastrophic error.\n");
- sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
- HNS_ROCE_V2_AEQE_SUB_TYPE_S);
- switch (sub_type) {
- case HNS_ROCE_LWQCE_QPC_ERROR:
- dev_warn(dev, "QP %d, QPC error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_MTU_ERROR:
- dev_warn(dev, "QP %d, MTU error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
- dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
- break;
- default:
- dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
- break;
- }
-}
-
-static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe, u32 qpn)
-{
- struct device *dev = hr_dev->dev;
- int sub_type;
-
- dev_warn(dev, "Local access violation work queue error.\n");
- sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
- HNS_ROCE_V2_AEQE_SUB_TYPE_S);
- switch (sub_type) {
- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
- dev_warn(dev, "QP %d, R_key violation.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
- dev_warn(dev, "QP %d, length error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_VA_ERROR:
- dev_warn(dev, "QP %d, VA error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_PD_ERROR:
- dev_err(dev, "QP %d, PD error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
- dev_warn(dev, "QP %d, rw acc error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
- dev_warn(dev, "QP %d, key state error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
- dev_warn(dev, "QP %d, MR operation error.\n", qpn);
- break;
- default:
- dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
- break;
- }
-}
-
-static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type, u32 qpn)
-{
- struct device *dev = hr_dev->dev;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_COMM_EST:
- dev_warn(dev, "Communication established.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- dev_warn(dev, "Send queue drained.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "Invalid request local work queue error.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
- break;
- default:
- break;
- }
-
- hns_roce_qp_event(hr_dev, qpn, event_type);
-}
-
-static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type, u32 cqn)
-{
- struct device *dev = hr_dev->dev;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%x access err.\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%x overflow\n", cqn);
- break;
- default:
- break;
- }
-
- hns_roce_cq_event(hr_dev, cqn, event_type);
-}
-
static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
{
u32 buf_chk_sz;
@@ -4251,31 +4417,23 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- dev_warn(dev, "Path migrated succeeded.\n");
- break;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- dev_warn(dev, "Path migration failed.\n");
- break;
case HNS_ROCE_EVENT_TYPE_COMM_EST:
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type,
- qpn);
+ hns_roce_qp_event(hr_dev, qpn, event_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- dev_warn(dev, "SRQ not support.\n");
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type,
- cqn);
+ hns_roce_cq_event(hr_dev, cqn, event_type);
break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
- dev_warn(dev, "DB overflow.\n");
break;
case HNS_ROCE_EVENT_TYPE_MB:
hns_roce_cmd_event(hr_dev,
@@ -4284,10 +4442,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
le64_to_cpu(aeqe->event.cmd.out_param));
break;
case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
- dev_warn(dev, "CEQ overflow.\n");
break;
case HNS_ROCE_EVENT_TYPE_FLR:
- dev_warn(dev, "Function level reset.\n");
break;
default:
dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
@@ -4304,7 +4460,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
dev_warn(dev, "cons_index overflow, set back to 0.\n");
eq->cons_index = 0;
}
- hns_roce_v2_init_irq_work(hr_dev, eq, qpn);
+ hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
}
set_eq_cons_index_v2(eq);
@@ -5125,6 +5281,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
create_singlethread_workqueue("hns_roce_irq_workqueue");
if (!hr_dev->irq_workq) {
dev_err(dev, "Create irq workqueue failed!\n");
+ ret = -ENOMEM;
goto err_request_irq_fail;
}
@@ -5195,6 +5352,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.set_mac = hns_roce_v2_set_mac,
.write_mtpt = hns_roce_v2_write_mtpt,
.rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
+ .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
+ .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
.write_cqc = hns_roce_v2_write_cqc,
.set_hem = hns_roce_v2_set_hem,
.clear_hem = hns_roce_v2_clear_hem,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 14aa308befef..8bc820635bbd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -50,6 +50,7 @@
#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
+#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
@@ -78,6 +79,7 @@
#define HNS_ROCE_INVALID_LKEY 0x100
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
+#define HNS_ROCE_V2_RSV_QPS 8
#define HNS_ROCE_CONTEXT_HOP_NUM 1
#define HNS_ROCE_MTT_HOP_NUM 1
@@ -201,6 +203,7 @@ enum {
/* CMQ command */
enum hns_roce_opcode_type {
+ HNS_QUERY_FW_VER = 0x0001,
HNS_ROCE_OPC_QUERY_HW_VER = 0x8000,
HNS_ROCE_OPC_CFG_GLOBAL_PARAM = 0x8001,
HNS_ROCE_OPC_ALLOC_PF_RES = 0x8004,
@@ -324,6 +327,7 @@ struct hns_roce_v2_cq_context {
enum{
V2_MPT_ST_VALID = 0x1,
+ V2_MPT_ST_FREE = 0x2,
};
enum hns_roce_v2_qp_state {
@@ -350,7 +354,7 @@ struct hns_roce_v2_qp_context {
__le32 dmac;
__le32 byte_52_udpspn_dmac;
__le32 byte_56_dqpn_err;
- __le32 byte_60_qpst_mapid;
+ __le32 byte_60_qpst_tempid;
__le32 qkey_xrcd;
__le32 byte_68_rq_db;
__le32 rq_db_record_addr;
@@ -492,26 +496,15 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_56_LP_PKTN_INI_S 28
#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
-#define V2_QPC_BYTE_60_MAPID_S 0
-#define V2_QPC_BYTE_60_MAPID_M GENMASK(12, 0)
+#define V2_QPC_BYTE_60_TEMPID_S 0
+#define V2_QPC_BYTE_60_TEMPID_M GENMASK(7, 0)
-#define V2_QPC_BYTE_60_INNER_MAP_IND_S 13
+#define V2_QPC_BYTE_60_SCC_TOKEN_S 8
+#define V2_QPC_BYTE_60_SCC_TOKEN_M GENMASK(26, 8)
-#define V2_QPC_BYTE_60_SQ_MAP_IND_S 14
+#define V2_QPC_BYTE_60_SQ_DB_DOING_S 27
-#define V2_QPC_BYTE_60_RQ_MAP_IND_S 15
-
-#define V2_QPC_BYTE_60_TEMPID_S 16
-#define V2_QPC_BYTE_60_TEMPID_M GENMASK(22, 16)
-
-#define V2_QPC_BYTE_60_EXT_MAP_IND_S 23
-
-#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S 24
-#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M GENMASK(26, 24)
-
-#define V2_QPC_BYTE_60_SQ_RLS_IND_S 27
-
-#define V2_QPC_BYTE_60_SQ_EXT_IND_S 28
+#define V2_QPC_BYTE_60_RQ_DB_DOING_S 28
#define V2_QPC_BYTE_60_QP_ST_S 29
#define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29)
@@ -534,6 +527,7 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_76_RQIE_S 28
+#define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30
#define V2_QPC_BYTE_80_RX_CQN_S 0
#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
@@ -588,7 +582,7 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_140_RR_MAX_S 12
#define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12)
-#define V2_QPC_BYTE_140_RSVD_RAQ_MAP_S 15
+#define V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S 15
#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16
#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16)
@@ -599,8 +593,6 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0
#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0)
-#define V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S 24
-
#define V2_QPC_BYTE_144_RAQ_CREDIT_S 25
#define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25)
@@ -637,9 +629,10 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_168_LP_SGEN_INI_S 22
#define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 22)
-#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_S 24
-#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_M GENMASK(27, 24)
-
+#define V2_QPC_BYTE_168_SQ_VLAN_EN_S 24
+#define V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S 25
+#define V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S 26
+#define V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S 27
#define V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28
#define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28)
@@ -725,6 +718,10 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20
#define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20)
+#define V2_QPC_BYTE_232_SO_LP_VLD_S 29
+#define V2_QPC_BYTE_232_FENCE_LP_VLD_S 30
+#define V2_QPC_BYTE_232_IRRL_LP_VLD_S 31
+
#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0
#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0)
@@ -743,6 +740,9 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_244_RNR_CNT_S 27
#define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27)
+#define V2_QPC_BYTE_244_LCL_OP_FLG_S 30
+#define V2_QPC_BYTE_244_IRRL_RD_FLG_S 31
+
#define V2_QPC_BYTE_248_IRRL_PSN_S 0
#define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0)
@@ -818,6 +818,11 @@ struct hns_roce_v2_cqe {
#define V2_CQE_BYTE_28_PORT_TYPE_S 16
#define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16)
+#define V2_CQE_BYTE_28_VID_S 18
+#define V2_CQE_BYTE_28_VID_M GENMASK(29, 18)
+
+#define V2_CQE_BYTE_28_VID_VLD_S 30
+
#define V2_CQE_BYTE_32_RMT_QPN_S 0
#define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0)
@@ -878,8 +883,19 @@ struct hns_roce_v2_mpt_entry {
#define V2_MPT_BYTE_8_LW_EN_S 7
+#define V2_MPT_BYTE_8_MW_CNT_S 8
+#define V2_MPT_BYTE_8_MW_CNT_M GENMASK(31, 8)
+
+#define V2_MPT_BYTE_12_FRE_S 0
+
#define V2_MPT_BYTE_12_PA_S 1
+#define V2_MPT_BYTE_12_MR_MW_S 4
+
+#define V2_MPT_BYTE_12_BPD_S 5
+
+#define V2_MPT_BYTE_12_BQP_S 6
+
#define V2_MPT_BYTE_12_INNER_PA_VLD_S 7
#define V2_MPT_BYTE_12_MW_BIND_QPN_S 8
@@ -988,6 +1004,8 @@ struct hns_roce_v2_ud_send_wqe {
#define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24
#define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24)
+#define V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S 30
+
#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31
#define V2_UD_SEND_WQE_DMAC_0_S 0
@@ -1042,6 +1060,16 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
+#define V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S 19
+
+#define V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S 20
+
+#define V2_RC_FRMR_WQE_BYTE_4_RR_S 21
+
+#define V2_RC_FRMR_WQE_BYTE_4_RW_S 22
+
+#define V2_RC_FRMR_WQE_BYTE_4_LW_S 23
+
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0)
@@ -1051,6 +1079,16 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+struct hns_roce_wqe_frmr_seg {
+ __le32 pbl_size;
+ __le32 mode_buf_pg_sz;
+};
+
+#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S 4
+#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M GENMASK(7, 4)
+
+#define V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S 8
+
struct hns_roce_v2_wqe_data_seg {
__le32 len;
__le32 lkey;
@@ -1068,6 +1106,11 @@ struct hns_roce_query_version {
__le32 rsv[5];
};
+struct hns_roce_query_fw_info {
+ __le32 fw_ver;
+ __le32 rsv[5];
+};
+
struct hns_roce_cfg_llm_a {
__le32 base_addr_l;
__le32 base_addr_h;
@@ -1564,4 +1607,9 @@ struct hns_roce_eq_context {
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
+struct hns_roce_wqe_atomic_seg {
+ __le64 fetchadd_swap_data;
+ __le64 cmp_data;
+};
+
#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c5cae9a38c04..1b3ee514f2ef 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -196,6 +196,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
memset(props, 0, sizeof(*props));
+ props->fw_ver = hr_dev->caps.fw_ver;
props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
props->max_mr_size = (u64)(~(0ULL));
props->page_size_cap = hr_dev->caps.page_size_cap;
@@ -215,7 +216,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_pd = hr_dev->caps.num_pds;
props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
- props->atomic_cap = IB_ATOMIC_NONE;
+ props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
+ IB_ATOMIC_HCA : IB_ATOMIC_NONE;
props->max_pkeys = 1;
props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
@@ -344,8 +346,6 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
if (ret)
goto error_fail_uar_alloc;
- INIT_LIST_HEAD(&context->vma_list);
- mutex_init(&context->vma_list_mutex);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
INIT_LIST_HEAD(&context->page_list);
mutex_init(&context->page_mutex);
@@ -376,76 +376,34 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
return 0;
}
-static void hns_roce_vma_open(struct vm_area_struct *vma)
-{
- vma->vm_ops = NULL;
-}
-
-static void hns_roce_vma_close(struct vm_area_struct *vma)
-{
- struct hns_roce_vma_data *vma_data;
-
- vma_data = (struct hns_roce_vma_data *)vma->vm_private_data;
- vma_data->vma = NULL;
- mutex_lock(vma_data->vma_list_mutex);
- list_del(&vma_data->list);
- mutex_unlock(vma_data->vma_list_mutex);
- kfree(vma_data);
-}
-
-static const struct vm_operations_struct hns_roce_vm_ops = {
- .open = hns_roce_vma_open,
- .close = hns_roce_vma_close,
-};
-
-static int hns_roce_set_vma_data(struct vm_area_struct *vma,
- struct hns_roce_ucontext *context)
-{
- struct list_head *vma_head = &context->vma_list;
- struct hns_roce_vma_data *vma_data;
-
- vma_data = kzalloc(sizeof(*vma_data), GFP_KERNEL);
- if (!vma_data)
- return -ENOMEM;
-
- vma_data->vma = vma;
- vma_data->vma_list_mutex = &context->vma_list_mutex;
- vma->vm_private_data = vma_data;
- vma->vm_ops = &hns_roce_vm_ops;
-
- mutex_lock(&context->vma_list_mutex);
- list_add(&vma_data->list, vma_head);
- mutex_unlock(&context->vma_list_mutex);
-
- return 0;
-}
-
static int hns_roce_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma)
{
struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
- if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
- return -EINVAL;
+ switch (vma->vm_pgoff) {
+ case 0:
+ return rdma_user_mmap_io(context, vma,
+ to_hr_ucontext(context)->uar.pfn,
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot));
+
+ /* vm_pgoff: 1 -- TPTR */
+ case 1:
+ if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
+ return -EINVAL;
+ /*
+ * FIXME: using io_remap_pfn_range on the dma address returned
+ * by dma_alloc_coherent is totally wrong.
+ */
+ return rdma_user_mmap_io(context, vma,
+ hr_dev->tptr_dma_addr >> PAGE_SHIFT,
+ hr_dev->tptr_size,
+ vma->vm_page_prot);
- if (vma->vm_pgoff == 0) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_hr_ucontext(context)->uar.pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
- } else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr &&
- hr_dev->tptr_size) {
- /* vm_pgoff: 1 -- TPTR */
- if (io_remap_pfn_range(vma, vma->vm_start,
- hr_dev->tptr_dma_addr >> PAGE_SHIFT,
- hr_dev->tptr_size,
- vma->vm_page_prot))
- return -EAGAIN;
- } else
+ default:
return -EINVAL;
-
- return hns_roce_set_vma_data(vma, to_hr_ucontext(context));
+ }
}
static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
@@ -471,21 +429,6 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
- struct hns_roce_vma_data *vma_data, *n;
- struct vm_area_struct *vma;
-
- mutex_lock(&context->vma_list_mutex);
- list_for_each_entry_safe(vma_data, n, &context->vma_list, list) {
- vma = vma_data->vma;
- zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
-
- vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
- vma->vm_ops = NULL;
- list_del(&vma_data->list);
- kfree(vma_data);
- }
- mutex_unlock(&context->vma_list_mutex);
}
static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
@@ -508,7 +451,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev;
- strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
ib_dev->owner = THIS_MODULE;
ib_dev->node_type = RDMA_NODE_IB_CA;
@@ -584,12 +526,27 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
}
+ /* MW */
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
+ ib_dev->alloc_mw = hns_roce_alloc_mw;
+ ib_dev->dealloc_mw = hns_roce_dealloc_mw;
+ ib_dev->uverbs_cmd_mask |=
+ (1ULL << IB_USER_VERBS_CMD_ALLOC_MW) |
+ (1ULL << IB_USER_VERBS_CMD_DEALLOC_MW);
+ }
+
+ /* FRMR */
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
+ ib_dev->alloc_mr = hns_roce_alloc_mr;
+ ib_dev->map_mr_sg = hns_roce_map_mr_sg;
+ }
+
/* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable;
ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext;
ib_dev->driver_id = RDMA_DRIVER_HNS;
- ret = ib_register_device(ib_dev, NULL);
+ ret = ib_register_device(ib_dev, "hns_%d", NULL);
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
return ret;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index eb26a5f6fc58..521ad2aa3a4e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -329,7 +329,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
u64 bt_idx;
u64 size;
- mhop_num = hr_dev->caps.pbl_hop_num;
+ mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
@@ -351,7 +351,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_dma_addr;
- mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+ mr->pbl_hop_num = mhop_num;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0;
@@ -511,7 +511,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->key = hw_index_to_key(index); /* MR key */
if (size == ~0ull) {
- mr->type = MR_TYPE_DMA;
mr->pbl_buf = NULL;
mr->pbl_dma_addr = 0;
/* PBL multi-hop addressing parameters */
@@ -522,7 +521,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->pbl_l1_dma_addr = NULL;
mr->pbl_l0_dma_addr = 0;
} else {
- mr->type = MR_TYPE_MR;
if (!hr_dev->caps.pbl_hop_num) {
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr),
@@ -548,9 +546,9 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
u32 mhop_num;
u64 bt_idx;
- npages = ib_umem_page_count(mr->umem);
+ npages = mr->pbl_size;
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- mhop_num = hr_dev->caps.pbl_hop_num;
+ mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num;
if (mhop_num == HNS_ROCE_HOP_NUM_0)
return;
@@ -636,7 +634,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
}
if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
+ if (mr->type == MR_TYPE_MR)
+ npages = ib_umem_page_count(mr->umem);
if (!hr_dev->caps.pbl_hop_num)
dma_free_coherent(dev, (unsigned int)(npages * 8),
@@ -674,7 +673,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
goto err_table;
}
- ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+ if (mr->type != MR_TYPE_FRMR)
+ ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+ else
+ ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
if (ret) {
dev_err(dev, "Write mtpt fail!\n");
goto err_page;
@@ -855,6 +857,8 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
if (mr == NULL)
return ERR_PTR(-ENOMEM);
+ mr->type = MR_TYPE_DMA;
+
/* Allocate memory region key */
ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
~0ULL, acc, 0, mr);
@@ -1031,6 +1035,8 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
}
+ mr->type = MR_TYPE_MR;
+
ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
access_flags, n, mr);
if (ret)
@@ -1201,3 +1207,193 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr)
return ret;
}
+
+struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_mr *mr;
+ u64 length;
+ u32 page_size;
+ int ret;
+
+ page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT);
+ length = max_num_sg * page_size;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
+ dev_err(dev, "max_num_sg larger than %d\n",
+ HNS_ROCE_FRMR_MAX_PA);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->type = MR_TYPE_FRMR;
+
+ /* Allocate memory region key */
+ ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length,
+ 0, max_num_sg, mr);
+ if (ret)
+ goto err_free;
+
+ ret = hns_roce_mr_enable(hr_dev, mr);
+ if (ret)
+ goto err_mr;
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+
+err_mr:
+ hns_roce_mr_free(to_hr_dev(pd->device), mr);
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
+static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+
+ mr->pbl_buf[mr->npages++] = cpu_to_le64(addr);
+
+ return 0;
+}
+
+int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+
+ mr->npages = 0;
+
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+}
+
+static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mw *mw)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ if (mw->enabled) {
+ ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey)
+ & (hr_dev->caps.num_mtpts - 1));
+ if (ret)
+ dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret);
+
+ hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
+ key_to_hw_index(mw->rkey));
+ }
+
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
+ key_to_hw_index(mw->rkey), BITMAP_NO_RR);
+}
+
+static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mw *mw)
+{
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct device *dev = hr_dev->dev;
+ unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
+ int ret;
+
+ /* prepare HEM entry memory */
+ ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
+ if (ret)
+ return ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox)) {
+ ret = PTR_ERR(mailbox);
+ goto err_table;
+ }
+
+ ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
+ if (ret) {
+ dev_err(dev, "MW write mtpt fail!\n");
+ goto err_page;
+ }
+
+ ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ if (ret) {
+ dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret);
+ goto err_page;
+ }
+
+ mw->enabled = 1;
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+
+err_page:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+err_table:
+ hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
+
+ return ret;
+}
+
+struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device);
+ struct hns_roce_mw *mw;
+ unsigned long index = 0;
+ int ret;
+
+ mw = kmalloc(sizeof(*mw), GFP_KERNEL);
+ if (!mw)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate a key for mw from bitmap */
+ ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
+ if (ret)
+ goto err_bitmap;
+
+ mw->rkey = hw_index_to_key(index);
+
+ mw->ibmw.rkey = mw->rkey;
+ mw->ibmw.type = type;
+ mw->pdn = to_hr_pd(ib_pd)->pdn;
+ mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+ mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+ mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+
+ ret = hns_roce_mw_enable(hr_dev, mw);
+ if (ret)
+ goto err_mw;
+
+ return &mw->ibmw;
+
+err_mw:
+ hns_roce_mw_free(hr_dev, mw);
+
+err_bitmap:
+ kfree(mw);
+
+ return ERR_PTR(ret);
+}
+
+int hns_roce_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
+ struct hns_roce_mw *mw = to_hr_mw(ibmw);
+
+ hns_roce_mw_free(hr_dev, mw);
+ kfree(mw);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index efb7e961ca65..5ebf481a39d9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
@@ -343,6 +344,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
{
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
u8 max_sq_stride = ilog2(roundup_sq_stride);
+ u32 ex_sge_num;
u32 page_size;
u32 max_cnt;
@@ -372,7 +374,18 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
if (hr_qp->sq.max_gs > 2)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
+
+ if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
+ if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
+ dev_err(hr_dev->dev,
+ "The extended sge cnt error! sge_cnt=%d\n",
+ hr_qp->sge.sge_cnt);
+ return -EINVAL;
+ }
+ }
+
hr_qp->sge.sge_shift = 4;
+ ex_sge_num = hr_qp->sge.sge_cnt;
/* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) {
@@ -386,6 +399,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ hr_qp->sge.sge_cnt =
+ max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num);
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
@@ -394,7 +409,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift), page_size);
hr_qp->sq.offset = 0;
- if (hr_qp->sge.sge_cnt) {
+ if (ex_sge_num) {
hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
@@ -465,6 +480,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sge.sge_shift = 4;
}
+ if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
+ if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
+ dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
+ hr_qp->sge.sge_cnt);
+ return -EINVAL;
+ }
+ }
+
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
@@ -472,6 +495,8 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
+ hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
+ (u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size;
size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift, page_size);
@@ -952,8 +977,8 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
}
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask)) {
dev_err(dev, "ib_modify_qp_is_ok failed\n");
goto out;
}
@@ -1106,14 +1131,20 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
int reserved_from_top = 0;
+ int reserved_from_bot;
int ret;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
- /* A port include two SQP, six port total 12 */
+ /* In hw v1, a port include two SQP, six ports total 12 */
+ if (hr_dev->caps.max_sq_sg <= 2)
+ reserved_from_bot = SQP_NUM;
+ else
+ reserved_from_bot = hr_dev->caps.reserved_qps;
+
ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
- hr_dev->caps.num_qps - 1, SQP_NUM,
+ hr_dev->caps.num_qps - 1, reserved_from_bot,
reserved_from_top);
if (ret) {
dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 423818a7d333..771eb6bd0785 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1689,7 +1689,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
unsigned long flags;
rtnl_lock();
- for_each_netdev_rcu(&init_net, ip_dev) {
+ for_each_netdev(&init_net, ip_dev) {
if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
(rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
(ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index e2e6c74a7452..102875872bea 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -2135,10 +2135,10 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
}
/**
- * i40iw_show_rev
+ * hw_rev_show
*/
-static ssize_t i40iw_show_rev(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct i40iw_ib_device *iwibdev = container_of(dev,
struct i40iw_ib_device,
@@ -2147,34 +2147,37 @@ static ssize_t i40iw_show_rev(struct device *dev,
return sprintf(buf, "%x\n", hw_rev);
}
+static DEVICE_ATTR_RO(hw_rev);
/**
- * i40iw_show_hca
+ * hca_type_show
*/
-static ssize_t i40iw_show_hca(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "I40IW\n");
}
+static DEVICE_ATTR_RO(hca_type);
/**
- * i40iw_show_board
+ * board_id_show
*/
-static ssize_t i40iw_show_board(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
+static struct attribute *i40iw_dev_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *i40iw_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group i40iw_attr_group = {
+ .attrs = i40iw_dev_attributes,
};
/**
@@ -2752,7 +2755,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
i40iw_pr_err("iwdev == NULL\n");
return NULL;
}
- strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
iwibdev->ibdev.owner = THIS_MODULE;
iwdev->iwibdev = iwibdev;
iwibdev->iwdev = iwdev;
@@ -2851,20 +2853,6 @@ void i40iw_port_ibevent(struct i40iw_device *iwdev)
}
/**
- * i40iw_unregister_rdma_device - unregister of iwarp from IB
- * @iwibdev: rdma device ptr
- */
-static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
- device_remove_file(&iwibdev->ibdev.dev,
- i40iw_dev_attributes[i]);
- ib_unregister_device(&iwibdev->ibdev);
-}
-
-/**
* i40iw_destroy_rdma_device - destroy rdma device and free resources
* @iwibdev: IB device ptr
*/
@@ -2873,7 +2861,7 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
if (!iwibdev)
return;
- i40iw_unregister_rdma_device(iwibdev);
+ ib_unregister_device(&iwibdev->ibdev);
kfree(iwibdev->ibdev.iwcm);
iwibdev->ibdev.iwcm = NULL;
wait_event_timeout(iwibdev->iwdev->close_wq,
@@ -2888,32 +2876,19 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
*/
int i40iw_register_rdma_device(struct i40iw_device *iwdev)
{
- int i, ret;
+ int ret;
struct i40iw_ib_device *iwibdev;
iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
if (!iwdev->iwibdev)
return -ENOMEM;
iwibdev = iwdev->iwibdev;
-
+ rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
iwibdev->ibdev.driver_id = RDMA_DRIVER_I40IW;
- ret = ib_register_device(&iwibdev->ibdev, NULL);
+ ret = ib_register_device(&iwibdev->ibdev, "i40iw%d", NULL);
if (ret)
goto error;
- for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
- ret =
- device_create_file(&iwibdev->ibdev.dev,
- i40iw_dev_attributes[i]);
- if (ret) {
- while (i > 0) {
- i--;
- device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
- }
- ib_unregister_device(&iwibdev->ibdev);
- goto error;
- }
- }
return 0;
error:
kfree(iwdev->iwibdev->ibdev.iwcm);
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index db4aa13ebae0..d1de3285fd88 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,7 @@
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
depends on NETDEVICES && ETHERNET && PCI && INET
+ depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
depends on MAY_USE_DEVLINK
select NET_VENDOR_MELLANOX
select MLX4_CORE
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index e5466d786bb1..8942f5f7f04d 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -807,15 +807,17 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int err;
struct ib_port_attr pattr;
- if (in_wc && in_wc->qp->qp_num) {
- pr_debug("received MAD: slid:%d sqpn:%d "
- "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
- in_wc->slid, in_wc->src_qp,
- in_wc->dlid_path_bits,
- in_wc->qp->qp_num,
- in_wc->wc_flags,
- in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
- be16_to_cpu(in_mad->mad_hdr.attr_id));
+ if (in_wc && in_wc->qp) {
+ pr_debug("received MAD: port:%d slid:%d sqpn:%d "
+ "dlid_bits:%d dqpn:%d wc_flags:0x%x tid:%016llx cls:%x mtd:%x atr:%x\n",
+ port_num,
+ in_wc->slid, in_wc->src_qp,
+ in_wc->dlid_path_bits,
+ in_wc->qp->qp_num,
+ in_wc->wc_flags,
+ be64_to_cpu(in_mad->mad_hdr.tid),
+ in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
+ be16_to_cpu(in_mad->mad_hdr.attr_id));
if (in_wc->wc_flags & IB_WC_GRH) {
pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
be64_to_cpu(in_grh->sgid.global.subnet_prefix),
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0bbeaaae47e0..0def2323459c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1140,144 +1140,50 @@ static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
return 0;
}
-static void mlx4_ib_vma_open(struct vm_area_struct *area)
-{
- /* vma_open is called when a new VMA is created on top of our VMA.
- * This is done through either mremap flow or split_vma (usually due
- * to mlock, madvise, munmap, etc.). We do not support a clone of the
- * vma, as this VMA is strongly hardware related. Therefore we set the
- * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
- * calling us again and trying to do incorrect actions. We assume that
- * the original vma size is exactly a single page that there will be no
- * "splitting" operations on.
- */
- area->vm_ops = NULL;
-}
-
-static void mlx4_ib_vma_close(struct vm_area_struct *area)
-{
- struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
-
- /* It's guaranteed that all VMAs opened on a FD are closed before the
- * file itself is closed, therefore no sync is needed with the regular
- * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
- * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
- * The close operation is usually called under mm->mmap_sem except when
- * process is exiting. The exiting case is handled explicitly as part
- * of mlx4_ib_disassociate_ucontext.
- */
- mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
- area->vm_private_data;
-
- /* set the vma context pointer to null in the mlx4_ib driver's private
- * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
- */
- mlx4_ib_vma_priv_data->vma = NULL;
-}
-
-static const struct vm_operations_struct mlx4_ib_vm_ops = {
- .open = mlx4_ib_vma_open,
- .close = mlx4_ib_vma_close
-};
-
static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- int i;
- struct vm_area_struct *vma;
- struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
-
- /* need to protect from a race on closing the vma as part of
- * mlx4_ib_vma_close().
- */
- for (i = 0; i < HW_BAR_COUNT; i++) {
- vma = context->hw_bar_info[i].vma;
- if (!vma)
- continue;
-
- zap_vma_ptes(context->hw_bar_info[i].vma,
- context->hw_bar_info[i].vma->vm_start, PAGE_SIZE);
-
- context->hw_bar_info[i].vma->vm_flags &=
- ~(VM_SHARED | VM_MAYSHARE);
- /* context going to be destroyed, should not access ops any more */
- context->hw_bar_info[i].vma->vm_ops = NULL;
- }
-}
-
-static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
- struct mlx4_ib_vma_private_data *vma_private_data)
-{
- vma_private_data->vma = vma;
- vma->vm_private_data = vma_private_data;
- vma->vm_ops = &mlx4_ib_vm_ops;
}
static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct mlx4_ib_dev *dev = to_mdev(context->device);
- struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
- return -EINVAL;
-
- if (vma->vm_pgoff == 0) {
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_DB].vma)
- return -EINVAL;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_mucontext(context)->uar.pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
+ switch (vma->vm_pgoff) {
+ case 0:
+ return rdma_user_mmap_io(context, vma,
+ to_mucontext(context)->uar.pfn,
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot));
- } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_BF].vma)
+ case 1:
+ if (dev->dev->caps.bf_reg_size == 0)
return -EINVAL;
+ return rdma_user_mmap_io(
+ context, vma,
+ to_mucontext(context)->uar.pfn +
+ dev->dev->caps.num_uars,
+ PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_mucontext(context)->uar.pfn +
- dev->dev->caps.num_uars,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
-
- } else if (vma->vm_pgoff == 3) {
+ case 3: {
struct mlx4_clock_params params;
int ret;
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
- return -EINVAL;
-
ret = mlx4_get_internal_clock_params(dev->dev, &params);
-
if (ret)
return ret;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start,
- (pci_resource_start(dev->dev->persist->pdev,
- params.bar) +
- params.offset)
- >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma,
- &mucontext->hw_bar_info[HW_BAR_CLOCK]);
- } else {
- return -EINVAL;
+ return rdma_user_mmap_io(
+ context, vma,
+ (pci_resource_start(dev->dev->persist->pdev,
+ params.bar) +
+ params.offset) >>
+ PAGE_SHIFT,
+ PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
}
- return 0;
+ default:
+ return -EINVAL;
+ }
}
static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
@@ -2133,39 +2039,43 @@ out:
return err;
}
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "%x\n", dev->dev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
dev->dev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *mlx4_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *mlx4_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group mlx4_attr_group = {
+ .attrs = mlx4_class_attributes,
};
struct diag_counter {
@@ -2636,7 +2546,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->dev = dev;
ibdev->bond_next_port = 0;
- strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
ibdev->ib_dev.owner = THIS_MODULE;
ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
@@ -2898,8 +2807,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_ib_alloc_diag_counters(ibdev))
goto err_steer_free_bitmap;
+ rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
ibdev->ib_dev.driver_id = RDMA_DRIVER_MLX4;
- if (ib_register_device(&ibdev->ib_dev, NULL))
+ if (ib_register_device(&ibdev->ib_dev, "mlx4_%d", NULL))
goto err_diag_counters;
if (mlx4_ib_mad_init(ibdev))
@@ -2922,12 +2832,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_notif;
}
- for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
- if (device_create_file(&ibdev->ib_dev.dev,
- mlx4_class_attributes[j]))
- goto err_notif;
- }
-
ibdev->ib_active = true;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 81ffc007e0a1..d844831179cf 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -673,7 +673,7 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
if (!list_empty(&group->pending_list))
req = list_first_entry(&group->pending_list,
struct mcast_req, group_list);
- if ((method == IB_MGMT_METHOD_GET_RESP)) {
+ if (method == IB_MGMT_METHOD_GET_RESP) {
if (req) {
send_reply_to_slave(req->func, group, &req->sa_mad, status);
--group->func[req->func].num_pend_reqs;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e10dccc7958f..8850dfc3826d 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -80,16 +80,11 @@ enum hw_bar_type {
HW_BAR_COUNT
};
-struct mlx4_ib_vma_private_data {
- struct vm_area_struct *vma;
-};
-
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
struct list_head db_page_list;
struct mutex db_page_mutex;
- struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
struct list_head wqn_ranges_list;
struct mutex wqn_ranges_mutex; /* protect wqn_ranges_list */
};
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 6dd3cd2c2f80..0711ca1dfb8f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2629,7 +2629,6 @@ enum {
static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
- enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
struct mlx4_ib_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
@@ -2639,13 +2638,8 @@ static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (cur_state != new_state || cur_state != IB_QPS_RESET) {
- int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- ll = rdma_port_get_link_layer(&dev->ib_dev, port);
- }
-
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, ll)) {
+ attr_mask)) {
pr_debug("qpn 0x%x: invalid attribute mask specified "
"for transition %d to %d. qp_type %d,"
" attr_mask 0x%x\n",
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index e219093d2764..752bdd536130 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -818,9 +818,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
if (!mlx4_is_master(dev->dev))
return 0;
- dev->iov_parent =
- kobject_create_and_add("iov",
- kobject_get(dev->ib_dev.ports_parent->parent));
+ dev->iov_parent = kobject_create_and_add("iov", &dev->ib_dev.dev.kobj);
if (!dev->iov_parent) {
ret = -ENOMEM;
goto err;
@@ -850,7 +848,6 @@ err_add_entries:
err_ports:
kobject_put(dev->iov_parent);
err:
- kobject_put(dev->ib_dev.ports_parent->parent);
pr_err("mlx4_ib_device_register_sysfs error (%d)\n", ret);
return ret;
}
@@ -886,5 +883,4 @@ void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device)
kobject_put(device->ports_parent);
kobject_put(device->iov_parent);
kobject_put(device->iov_parent);
- kobject_put(device->ib_dev.ports_parent->parent);
}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index c84fef9a8a08..ca060a2e2b36 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -197,3 +197,132 @@ int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPCNT,
0, 0);
}
+
+void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {};
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+ MLX5_SET(destroy_tir_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+ MLX5_SET(destroy_tis_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {};
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(destroy_rqt_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
+ u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
+ int err;
+
+ MLX5_SET(alloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *tdn = MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+
+ return err;
+}
+
+void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
+ u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
+
+ MLX5_SET(dealloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
+
+ MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, in, pd, pdn);
+ MLX5_SET(dealloc_pd_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
+ void *gid;
+
+ MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
+ MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
+ MLX5_SET(attach_to_mcg_in, in, uid, uid);
+ gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
+ void *gid;
+
+ MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
+ MLX5_SET(detach_from_mcg_in, in, uid, uid);
+ gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
+ MLX5_SET(alloc_xrcd_in, in, uid, uid);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
+ return err;
+}
+
+int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
+
+ MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
+ MLX5_SET(dealloc_xrcd_in, in, uid, uid);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 88cbb1c41703..c03c56455534 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -47,4 +47,18 @@ int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
u64 length, u32 alignment);
int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length);
+void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
+void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
+void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
+void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid);
+int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
+ u16 uid);
+void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
+ u16 uid);
+int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid);
+int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid);
+int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
+int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 088205d7f1a1..7d769b5538b4 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -393,7 +393,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
- mlx5_frag_buf_free(dev->mdev, &buf->fbc.frag_buf);
+ mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
}
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@@ -728,16 +728,11 @@ static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
int nent,
int cqe_size)
{
- struct mlx5_frag_buf_ctrl *c = &buf->fbc;
- struct mlx5_frag_buf *frag_buf = &c->frag_buf;
- u32 cqc_buff[MLX5_ST_SZ_DW(cqc)] = {0};
+ struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
+ u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
+ u8 log_wq_sz = ilog2(cqe_size);
int err;
- MLX5_SET(cqc, cqc_buff, log_cq_size, ilog2(cqe_size));
- MLX5_SET(cqc, cqc_buff, cqe_sz, (cqe_size == 128) ? 1 : 0);
-
- mlx5_core_init_cq_frag_buf(&buf->fbc, cqc_buff);
-
err = mlx5_frag_buf_alloc_node(dev->mdev,
nent * cqe_size,
frag_buf,
@@ -745,6 +740,8 @@ static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
if (err)
return err;
+ mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
+
buf->cqe_size = cqe_size;
buf->nent = nent;
@@ -877,6 +874,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
}
+ MLX5_SET(create_cq_in, *cqb, uid, to_mucontext(context)->devx_uid);
return 0;
err_cqb:
@@ -934,7 +932,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
- cq->buf.fbc.frag_buf.npages;
+ cq->buf.frag_buf.npages;
*cqb = kvzalloc(*inlen, GFP_KERNEL);
if (!*cqb) {
err = -ENOMEM;
@@ -942,11 +940,11 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
}
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
- mlx5_fill_page_frag_array(&cq->buf.fbc.frag_buf, pas);
+ mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
- cq->buf.fbc.frag_buf.page_shift -
+ cq->buf.frag_buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
*index = dev->mdev->priv.uar->index;
@@ -1365,11 +1363,10 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
cqe_size = 64;
err = resize_kernel(dev, cq, entries, cqe_size);
if (!err) {
- struct mlx5_frag_buf_ctrl *c;
+ struct mlx5_frag_buf *frag_buf = &cq->resize_buf->frag_buf;
- c = &cq->resize_buf->fbc;
- npas = c->frag_buf.npages;
- page_shift = c->frag_buf.page_shift;
+ npas = frag_buf->npages;
+ page_shift = frag_buf->page_shift;
}
}
@@ -1390,8 +1387,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
pas, 0);
else
- mlx5_fill_page_frag_array(&cq->resize_buf->fbc.frag_buf,
- pas);
+ mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
MLX5_SET(modify_cq_in, in,
modify_field_select_resize_field_select.resize_field_select.resize_field_select,
@@ -1459,7 +1455,7 @@ ex:
return err;
}
-int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
{
struct mlx5_ib_cq *cq;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index ac116d63e466..61aab7c0c513 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -19,7 +19,7 @@
#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
struct devx_obj {
struct mlx5_core_dev *mdev;
- u32 obj_id;
+ u64 obj_id;
u32 dinlen; /* destroy inbox length */
u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
};
@@ -45,13 +45,14 @@ static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
return to_mucontext(ib_uverbs_get_ucontext(file));
}
-int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
u64 general_obj_types;
void *hdr;
int err;
+ u16 uid;
hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
@@ -60,9 +61,6 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *contex
!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
return -EINVAL;
- if (!capable(CAP_NET_RAW))
- return -EPERM;
-
MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
@@ -70,19 +68,18 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *contex
if (err)
return err;
- context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
- return 0;
+ uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return uid;
}
-void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context)
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, uid);
mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
@@ -109,150 +106,218 @@ bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
}
}
+/*
+ * As the obj_id in the firmware is not globally unique the object type
+ * must be considered upon checking for a valid object id.
+ * For that the opcode of the creator command is encoded as part of the obj_id.
+ */
+static u64 get_enc_obj_id(u16 opcode, u32 obj_id)
+{
+ return ((u64)opcode << 32) | obj_id;
+}
+
static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
- u32 obj_id;
+ u64 obj_id;
switch (opcode) {
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
- obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT,
+ MLX5_GET(general_obj_in_cmd_hdr, in,
+ obj_id));
break;
case MLX5_CMD_OP_QUERY_MKEY:
- obj_id = MLX5_GET(query_mkey_in, in, mkey_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
+ MLX5_GET(query_mkey_in, in,
+ mkey_index));
break;
case MLX5_CMD_OP_QUERY_CQ:
- obj_id = MLX5_GET(query_cq_in, in, cqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
+ MLX5_GET(query_cq_in, in, cqn));
break;
case MLX5_CMD_OP_MODIFY_CQ:
- obj_id = MLX5_GET(modify_cq_in, in, cqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
+ MLX5_GET(modify_cq_in, in, cqn));
break;
case MLX5_CMD_OP_QUERY_SQ:
- obj_id = MLX5_GET(query_sq_in, in, sqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
+ MLX5_GET(query_sq_in, in, sqn));
break;
case MLX5_CMD_OP_MODIFY_SQ:
- obj_id = MLX5_GET(modify_sq_in, in, sqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
+ MLX5_GET(modify_sq_in, in, sqn));
break;
case MLX5_CMD_OP_QUERY_RQ:
- obj_id = MLX5_GET(query_rq_in, in, rqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(query_rq_in, in, rqn));
break;
case MLX5_CMD_OP_MODIFY_RQ:
- obj_id = MLX5_GET(modify_rq_in, in, rqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(modify_rq_in, in, rqn));
break;
case MLX5_CMD_OP_QUERY_RMP:
- obj_id = MLX5_GET(query_rmp_in, in, rmpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
+ MLX5_GET(query_rmp_in, in, rmpn));
break;
case MLX5_CMD_OP_MODIFY_RMP:
- obj_id = MLX5_GET(modify_rmp_in, in, rmpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
+ MLX5_GET(modify_rmp_in, in, rmpn));
break;
case MLX5_CMD_OP_QUERY_RQT:
- obj_id = MLX5_GET(query_rqt_in, in, rqtn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
+ MLX5_GET(query_rqt_in, in, rqtn));
break;
case MLX5_CMD_OP_MODIFY_RQT:
- obj_id = MLX5_GET(modify_rqt_in, in, rqtn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
+ MLX5_GET(modify_rqt_in, in, rqtn));
break;
case MLX5_CMD_OP_QUERY_TIR:
- obj_id = MLX5_GET(query_tir_in, in, tirn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
+ MLX5_GET(query_tir_in, in, tirn));
break;
case MLX5_CMD_OP_MODIFY_TIR:
- obj_id = MLX5_GET(modify_tir_in, in, tirn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
+ MLX5_GET(modify_tir_in, in, tirn));
break;
case MLX5_CMD_OP_QUERY_TIS:
- obj_id = MLX5_GET(query_tis_in, in, tisn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
+ MLX5_GET(query_tis_in, in, tisn));
break;
case MLX5_CMD_OP_MODIFY_TIS:
- obj_id = MLX5_GET(modify_tis_in, in, tisn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
+ MLX5_GET(modify_tis_in, in, tisn));
break;
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
- obj_id = MLX5_GET(query_flow_table_in, in, table_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
+ MLX5_GET(query_flow_table_in, in,
+ table_id));
break;
case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
- obj_id = MLX5_GET(modify_flow_table_in, in, table_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
+ MLX5_GET(modify_flow_table_in, in,
+ table_id));
break;
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
- obj_id = MLX5_GET(query_flow_group_in, in, group_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
+ MLX5_GET(query_flow_group_in, in,
+ group_id));
break;
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
- obj_id = MLX5_GET(query_fte_in, in, flow_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
+ MLX5_GET(query_fte_in, in,
+ flow_index));
break;
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
- obj_id = MLX5_GET(set_fte_in, in, flow_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
+ MLX5_GET(set_fte_in, in, flow_index));
break;
case MLX5_CMD_OP_QUERY_Q_COUNTER:
- obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
+ MLX5_GET(query_q_counter_in, in,
+ counter_set_id));
break;
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
- obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
+ MLX5_GET(query_flow_counter_in, in,
+ flow_counter_id));
break;
case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
- obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
+ MLX5_GET(general_obj_in_cmd_hdr, in,
+ obj_id));
break;
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
- obj_id = MLX5_GET(query_scheduling_element_in, in,
- scheduling_element_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
+ MLX5_GET(query_scheduling_element_in,
+ in, scheduling_element_id));
break;
case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
- obj_id = MLX5_GET(modify_scheduling_element_in, in,
- scheduling_element_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
+ MLX5_GET(modify_scheduling_element_in,
+ in, scheduling_element_id));
break;
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
- obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
+ MLX5_GET(add_vxlan_udp_dport_in, in,
+ vxlan_udp_port));
break;
case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
- obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
+ MLX5_GET(query_l2_table_entry_in, in,
+ table_index));
break;
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
- obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
+ MLX5_GET(set_l2_table_entry_in, in,
+ table_index));
break;
case MLX5_CMD_OP_QUERY_QP:
- obj_id = MLX5_GET(query_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(query_qp_in, in, qpn));
break;
case MLX5_CMD_OP_RST2INIT_QP:
- obj_id = MLX5_GET(rst2init_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rst2init_qp_in, in, qpn));
break;
case MLX5_CMD_OP_INIT2RTR_QP:
- obj_id = MLX5_GET(init2rtr_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(init2rtr_qp_in, in, qpn));
break;
case MLX5_CMD_OP_RTR2RTS_QP:
- obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rtr2rts_qp_in, in, qpn));
break;
case MLX5_CMD_OP_RTS2RTS_QP:
- obj_id = MLX5_GET(rts2rts_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rts2rts_qp_in, in, qpn));
break;
case MLX5_CMD_OP_SQERR2RTS_QP:
- obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(sqerr2rts_qp_in, in, qpn));
break;
case MLX5_CMD_OP_2ERR_QP:
- obj_id = MLX5_GET(qp_2err_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(qp_2err_in, in, qpn));
break;
case MLX5_CMD_OP_2RST_QP:
- obj_id = MLX5_GET(qp_2rst_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(qp_2rst_in, in, qpn));
break;
case MLX5_CMD_OP_QUERY_DCT:
- obj_id = MLX5_GET(query_dct_in, in, dctn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ MLX5_GET(query_dct_in, in, dctn));
break;
case MLX5_CMD_OP_QUERY_XRQ:
- obj_id = MLX5_GET(query_xrq_in, in, xrqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
+ MLX5_GET(query_xrq_in, in, xrqn));
break;
case MLX5_CMD_OP_QUERY_XRC_SRQ:
- obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
+ MLX5_GET(query_xrc_srq_in, in,
+ xrc_srqn));
break;
case MLX5_CMD_OP_ARM_XRC_SRQ:
- obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
+ MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
break;
case MLX5_CMD_OP_QUERY_SRQ:
- obj_id = MLX5_GET(query_srq_in, in, srqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
+ MLX5_GET(query_srq_in, in, srqn));
break;
case MLX5_CMD_OP_ARM_RQ:
- obj_id = MLX5_GET(arm_rq_in, in, srq_number);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(arm_rq_in, in, srq_number));
break;
case MLX5_CMD_OP_DRAIN_DCT:
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
- obj_id = MLX5_GET(drain_dct_in, in, dctn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ MLX5_GET(drain_dct_in, in, dctn));
break;
case MLX5_CMD_OP_ARM_XRQ:
- obj_id = MLX5_GET(arm_xrq_in, in, xrqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
+ MLX5_GET(arm_xrq_in, in, xrqn));
break;
default:
return false;
@@ -264,11 +329,102 @@ static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
return false;
}
-static bool devx_is_obj_create_cmd(const void *in)
+static void devx_set_umem_valid(const void *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
switch (opcode) {
+ case MLX5_CMD_OP_CREATE_MKEY:
+ MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
+ break;
+ case MLX5_CMD_OP_CREATE_CQ:
+ {
+ void *cqc;
+
+ MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
+ break;
+ }
+ case MLX5_CMD_OP_CREATE_QP:
+ {
+ void *qpc;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
+ MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_RQ:
+ {
+ void *rqc, *wq;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_SQ:
+ {
+ void *sqc, *wq;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_MODIFY_CQ:
+ MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
+ break;
+
+ case MLX5_CMD_OP_CREATE_RMP:
+ {
+ void *rmpc, *wq;
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_XRQ:
+ {
+ void *xrqc, *wq;
+
+ xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
+ wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ {
+ void *xrc_srqc;
+
+ MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
+ xrc_srq_context_entry);
+ MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
+ break;
+ }
+
+ default:
+ return;
+ }
+}
+
+static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
+{
+ *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (*opcode) {
case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
case MLX5_CMD_OP_CREATE_MKEY:
case MLX5_CMD_OP_CREATE_CQ:
@@ -284,7 +440,7 @@ static bool devx_is_obj_create_cmd(const void *in)
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
- case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
@@ -385,12 +541,49 @@ static bool devx_is_obj_query_cmd(const void *in)
}
}
+static bool devx_is_whitelist_cmd(void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
+{
+ if (devx_is_whitelist_cmd(cmd_in)) {
+ struct mlx5_ib_dev *dev;
+
+ if (c->devx_uid)
+ return c->devx_uid;
+
+ dev = to_mdev(c->ibucontext.device);
+ if (dev->devx_whitelist_uid)
+ return dev->devx_whitelist_uid;
+
+ return -EOPNOTSUPP;
+ }
+
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ return c->devx_uid;
+}
static bool devx_is_general_cmd(void *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
switch (opcode) {
case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_VPORT_STATE:
case MLX5_CMD_OP_QUERY_ADAPTER:
case MLX5_CMD_OP_QUERY_ISSI:
@@ -498,14 +691,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
void *cmd_out;
int err;
+ int uid;
c = devx_ufile2uctx(file);
if (IS_ERR(c))
return PTR_ERR(c);
dev = to_mdev(c->ibucontext.device);
- if (!c->devx_uid)
- return -EPERM;
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
/* Only white list of some general HCA commands are allowed for this method. */
if (!devx_is_general_cmd(cmd_in))
@@ -515,7 +710,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
if (IS_ERR(cmd_out))
return PTR_ERR(cmd_out);
- MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
err = mlx5_cmd_exec(dev->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
cmd_out, cmd_out_len);
@@ -627,9 +822,9 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
break;
- case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
- MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
break;
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
@@ -723,13 +918,18 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct devx_obj *obj;
int err;
+ int uid;
+ u32 obj_id;
+ u16 opcode;
- if (!c->devx_uid)
- return -EPERM;
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
- if (!devx_is_obj_create_cmd(cmd_in))
+ if (!devx_is_obj_create_cmd(cmd_in, &opcode))
return -EINVAL;
cmd_out = uverbs_zalloc(attrs, cmd_out_len);
@@ -740,7 +940,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (!obj)
return -ENOMEM;
- MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ devx_set_umem_valid(cmd_in);
+
err = mlx5_cmd_exec(dev->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN),
cmd_out, cmd_out_len);
@@ -749,15 +951,19 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
uobj->object = obj;
obj->mdev = dev->mdev;
- devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id);
+ devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
+ &obj_id);
WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
if (err)
- goto obj_free;
+ goto obj_destroy;
+ obj->obj_id = get_enc_obj_id(opcode, obj_id);
return 0;
+obj_destroy:
+ mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
obj_free:
kfree(obj);
return err;
@@ -775,9 +981,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
struct devx_obj *obj = uobj->object;
void *cmd_out;
int err;
+ int uid;
- if (!c->devx_uid)
- return -EPERM;
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
if (!devx_is_obj_modify_cmd(cmd_in))
return -EINVAL;
@@ -789,7 +997,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
if (IS_ERR(cmd_out))
return PTR_ERR(cmd_out);
- MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ devx_set_umem_valid(cmd_in);
+
err = mlx5_cmd_exec(obj->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
cmd_out, cmd_out_len);
@@ -812,9 +1022,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
struct devx_obj *obj = uobj->object;
void *cmd_out;
int err;
+ int uid;
- if (!c->devx_uid)
- return -EPERM;
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
if (!devx_is_obj_query_cmd(cmd_in))
return -EINVAL;
@@ -826,7 +1038,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
if (IS_ERR(cmd_out))
return PTR_ERR(cmd_out);
- MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
err = mlx5_cmd_exec(obj->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
cmd_out, cmd_out_len);
@@ -925,6 +1137,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
int err;
if (!c->devx_uid)
+ return -EINVAL;
+
+ if (!capable(CAP_NET_RAW))
return -EPERM;
obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index 1a29f47f836e..f86cdcafdafc 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -7,7 +7,9 @@
#include <rdma/ib_verbs.h>
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_ioctl.h>
+#include <rdma/uverbs_std_types.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/ib_umem.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
@@ -16,6 +18,24 @@
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
+static int
+mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
+ enum mlx5_flow_namespace_type *namespace)
+{
+ switch (table_type) {
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX:
+ *namespace = MLX5_FLOW_NAMESPACE_BYPASS;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX:
+ *namespace = MLX5_FLOW_NAMESPACE_EGRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
[MLX5_IB_FLOW_TYPE_NORMAL] = {
.type = UVERBS_ATTR_TYPE_PTR_IN,
@@ -38,11 +58,15 @@ static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
},
};
+#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
+ struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
struct mlx5_ib_flow_handler *flow_handler;
struct mlx5_ib_flow_matcher *fs_matcher;
+ struct ib_uobject **arr_flow_actions;
+ struct ib_uflow_resources *uflow_res;
void *devx_obj;
int dest_id, dest_type;
void *cmd_in;
@@ -52,6 +76,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct ib_uobject *uobj =
uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+ int len, ret, i;
if (!capable(CAP_NET_RAW))
return -EPERM;
@@ -61,7 +86,14 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
dest_qp = uverbs_attr_is_valid(attrs,
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
- if ((dest_devx && dest_qp) || (!dest_devx && !dest_qp))
+ fs_matcher = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS &&
+ ((dest_devx && dest_qp) || (!dest_devx && !dest_qp)))
+ return -EINVAL;
+
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS &&
+ (dest_devx || dest_qp))
return -EINVAL;
if (dest_devx) {
@@ -75,7 +107,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
*/
if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
return -EINVAL;
- } else {
+ } else if (dest_qp) {
struct mlx5_ib_qp *mqp;
qp = uverbs_attr_get_obj(attrs,
@@ -92,6 +124,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
else
dest_id = mqp->raw_packet_qp.rq.tirn;
dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ } else {
+ dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
}
if (dev->rep)
@@ -101,16 +135,48 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
inlen = uverbs_attr_get_len(attrs,
MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
- fs_matcher = uverbs_attr_get_obj(attrs,
- MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
- flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, cmd_in, inlen,
+
+ uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
+ if (!uflow_res)
+ return -ENOMEM;
+
+ len = uverbs_attr_get_uobjs_arr(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
+ for (i = 0; i < len; i++) {
+ struct mlx5_ib_flow_action *maction =
+ to_mflow_act(arr_flow_actions[i]->object);
+
+ ret = parse_flow_flow_action(maction, false, &flow_act);
+ if (ret)
+ goto err_out;
+ flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE,
+ arr_flow_actions[i]->object);
+ }
+
+ ret = uverbs_copy_from(&flow_act.flow_tag, attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_TAG);
+ if (!ret) {
+ if (flow_act.flow_tag >= BIT(24)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+ flow_act.flags |= FLOW_ACT_HAS_TAG;
+ }
+
+ flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, &flow_act,
+ cmd_in, inlen,
dest_id, dest_type);
- if (IS_ERR(flow_handler))
- return PTR_ERR(flow_handler);
+ if (IS_ERR(flow_handler)) {
+ ret = PTR_ERR(flow_handler);
+ goto err_out;
+ }
- ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev);
+ ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res);
return 0;
+err_out:
+ ib_uverbs_flow_resources_free(uflow_res);
+ return ret;
}
static int flow_matcher_cleanup(struct ib_uobject *uobject,
@@ -134,12 +200,14 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
struct mlx5_ib_flow_matcher *obj;
+ u32 flags;
int err;
obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
if (!obj)
return -ENOMEM;
+ obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
obj->mask_len = uverbs_attr_get_len(
attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
err = uverbs_copy_from(&obj->matcher_mask,
@@ -165,6 +233,19 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
if (err)
goto end;
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
+ IB_FLOW_ATTR_FLAGS_EGRESS);
+ if (err)
+ goto end;
+
+ if (flags) {
+ err = mlx5_ib_ft_type_to_namespace(
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX, &obj->ns_type);
+ if (err)
+ goto end;
+ }
+
uobj->object = obj;
obj->mdev = dev->mdev;
atomic_set(&obj->usecnt, 0);
@@ -175,6 +256,248 @@ end:
return err;
}
+void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
+{
+ switch (maction->flow_action_raw.sub_type) {
+ case MLX5_IB_FLOW_ACTION_MODIFY_HEADER:
+ mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
+ maction->flow_action_raw.action_id);
+ break;
+ case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT:
+ mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
+ maction->flow_action_raw.action_id);
+ break;
+ case MLX5_IB_FLOW_ACTION_DECAP:
+ break;
+ default:
+ break;
+ }
+}
+
+static struct ib_flow_action *
+mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
+ enum mlx5_ib_uapi_flow_table_type ft_type,
+ u8 num_actions, void *in)
+{
+ enum mlx5_flow_namespace_type namespace;
+ struct mlx5_ib_flow_action *maction;
+ int ret;
+
+ ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ maction = kzalloc(sizeof(*maction), GFP_KERNEL);
+ if (!maction)
+ return ERR_PTR(-ENOMEM);
+
+ ret = mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in,
+ &maction->flow_action_raw.action_id);
+
+ if (ret) {
+ kfree(maction);
+ return ERR_PTR(ret);
+ }
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER;
+ maction->flow_action_raw.dev = dev;
+
+ return &maction->ib_action;
+}
+
+static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev)
+{
+ return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ max_modify_header_actions) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, max_modify_header_actions);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
+ struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE);
+ struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
+ enum mlx5_ib_uapi_flow_table_type ft_type;
+ struct ib_flow_action *action;
+ size_t num_actions;
+ void *in;
+ int len;
+ int ret;
+
+ if (!mlx5_ib_modify_header_supported(mdev))
+ return -EOPNOTSUPP;
+
+ in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
+ len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
+
+ if (len % MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto))
+ return -EINVAL;
+
+ ret = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE);
+ if (ret)
+ return ret;
+
+ num_actions = len / MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto),
+ action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in);
+ if (IS_ERR(action))
+ return PTR_ERR(action);
+
+ uverbs_flow_action_fill_action(action, uobj, uobj->context->device,
+ IB_FLOW_ACTION_UNSPECIFIED);
+
+ return 0;
+}
+
+static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev,
+ u8 packet_reformat_type,
+ u8 ft_type)
+{
+ switch (packet_reformat_type) {
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
+ return MLX5_CAP_FLOWTABLE(ibdev->mdev,
+ encap_general_header);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
+ return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev,
+ reformat_l2_to_l3_tunnel);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
+ return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev,
+ reformat_l3_tunnel_to_l2);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
+ return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap);
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt)
+{
+ switch (dv_prt) {
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ *prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5_ib_flow_action_create_packet_reformat_ctx(
+ struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_action *maction,
+ u8 ft_type, u8 dv_prt,
+ void *in, size_t len)
+{
+ enum mlx5_flow_namespace_type namespace;
+ u8 prm_prt;
+ int ret;
+
+ ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
+ if (ret)
+ return ret;
+
+ ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt);
+ if (ret)
+ return ret;
+
+ ret = mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
+ in, namespace,
+ &maction->flow_action_raw.action_id);
+ if (ret)
+ return ret;
+
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT;
+ maction->flow_action_raw.dev = dev;
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)(
+ struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE);
+ struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
+ enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt;
+ enum mlx5_ib_uapi_flow_table_type ft_type;
+ struct mlx5_ib_flow_action *maction;
+ int ret;
+
+ ret = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_const(&dv_prt, attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE);
+ if (ret)
+ return ret;
+
+ if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type))
+ return -EOPNOTSUPP;
+
+ maction = kzalloc(sizeof(*maction), GFP_KERNEL);
+ if (!maction)
+ return -ENOMEM;
+
+ if (dv_prt ==
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) {
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_DECAP;
+ maction->flow_action_raw.dev = mdev;
+ } else {
+ void *in;
+ int len;
+
+ in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
+ if (IS_ERR(in)) {
+ ret = PTR_ERR(in);
+ goto free_maction;
+ }
+
+ len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
+
+ ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev,
+ maction, ft_type, dv_prt, in, len);
+ if (ret)
+ goto free_maction;
+ }
+
+ uverbs_flow_action_fill_action(&maction->ib_action, uobj,
+ uobj->context->device,
+ IB_FLOW_ACTION_UNSPECIFIED);
+ return 0;
+
+free_maction:
+ kfree(maction);
+ return ret;
+}
+
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_CREATE_FLOW,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
@@ -195,7 +518,15 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ACCESS_READ),
UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
MLX5_IB_OBJECT_DEVX_OBJ,
- UVERBS_ACCESS_READ));
+ UVERBS_ACCESS_READ),
+ UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_READ, 1,
+ MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_DESTROY_FLOW,
@@ -210,6 +541,44 @@ ADD_UVERBS_METHODS(mlx5_ib_fs,
&UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
+ UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
+ set_action_in_add_action_in_auto)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
+ UVERBS_ATTR_MIN_SIZE(1),
+ UA_ALLOC_AND_COPY,
+ UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
+ enum mlx5_ib_uapi_flow_action_packet_reformat_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(
+ mlx5_ib_flow_actions,
+ UVERBS_OBJECT_FLOW_ACTION,
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT));
+
+DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
MLX5_IB_OBJECT_FLOW_MATCHER,
@@ -224,7 +593,10 @@ DECLARE_UVERBS_NAMED_METHOD(
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
UVERBS_ATTR_TYPE(u8),
- UA_MANDATORY));
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
+ enum ib_flow_flags,
+ UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
@@ -247,6 +619,7 @@ int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
root[i++] = &flow_objects;
root[i++] = &mlx5_ib_fs;
+ root[i++] = &mlx5_ib_flow_actions;
return i;
}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 35a0e04c38f2..584ff2ea7810 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -39,9 +39,6 @@ static const struct mlx5_ib_profile rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
mlx5_ib_stage_post_ib_reg_umr_init,
NULL),
- STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
- mlx5_ib_stage_class_attr_init,
- NULL),
};
static int
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c414f3809e5c..e9c428071df3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1571,14 +1571,57 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
}
-static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
+int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+{
+ int err = 0;
+
+ mutex_lock(&dev->lb.mutex);
+ if (td)
+ dev->lb.user_td++;
+ if (qp)
+ dev->lb.qps++;
+
+ if (dev->lb.user_td == 2 ||
+ dev->lb.qps == 1) {
+ if (!dev->lb.enabled) {
+ err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
+ dev->lb.enabled = true;
+ }
+ }
+
+ mutex_unlock(&dev->lb.mutex);
+
+ return err;
+}
+
+void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+{
+ mutex_lock(&dev->lb.mutex);
+ if (td)
+ dev->lb.user_td--;
+ if (qp)
+ dev->lb.qps--;
+
+ if (dev->lb.user_td == 1 &&
+ dev->lb.qps == 0) {
+ if (dev->lb.enabled) {
+ mlx5_nic_vport_update_local_lb(dev->mdev, false);
+ dev->lb.enabled = false;
+ }
+ }
+
+ mutex_unlock(&dev->lb.mutex);
+}
+
+static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
+ u16 uid)
{
int err;
if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
return 0;
- err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
+ err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
if (err)
return err;
@@ -1587,35 +1630,23 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
!MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return err;
- mutex_lock(&dev->lb_mutex);
- dev->user_td++;
-
- if (dev->user_td == 2)
- err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
-
- mutex_unlock(&dev->lb_mutex);
- return err;
+ return mlx5_ib_enable_lb(dev, true, false);
}
-static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
+static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
+ u16 uid)
{
if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
return;
- mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
+ mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
(!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
!MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return;
- mutex_lock(&dev->lb_mutex);
- dev->user_td--;
-
- if (dev->user_td < 2)
- mlx5_nic_vport_update_local_lb(dev->mdev, false);
-
- mutex_unlock(&dev->lb_mutex);
+ mlx5_ib_disable_lb(dev, true, false);
}
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
@@ -1727,30 +1758,24 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
#endif
- err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
- if (err)
- goto out_uars;
-
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
- /* Block DEVX on Infiniband as of SELinux */
- if (mlx5_ib_port_link_layer(ibdev, 1) != IB_LINK_LAYER_ETHERNET) {
- err = -EPERM;
- goto out_td;
- }
-
- err = mlx5_ib_devx_create(dev, context);
- if (err)
- goto out_td;
+ err = mlx5_ib_devx_create(dev);
+ if (err < 0)
+ goto out_uars;
+ context->devx_uid = err;
}
+ err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
+ context->devx_uid);
+ if (err)
+ goto out_devx;
+
if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
if (err)
goto out_mdev;
}
- INIT_LIST_HEAD(&context->vma_private_list);
- mutex_init(&context->vma_private_list_mutex);
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
@@ -1826,13 +1851,21 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
context->lib_caps = req.lib_caps;
print_lib_caps(dev, context->lib_caps);
+ if (mlx5_lag_is_active(dev->mdev)) {
+ u8 port = mlx5_core_native_port_num(dev->mdev);
+
+ atomic_set(&context->tx_port_affinity,
+ atomic_add_return(
+ 1, &dev->roce[port].tx_port_affinity));
+ }
+
return &context->ibucontext;
out_mdev:
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+out_devx:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
- mlx5_ib_devx_destroy(dev, context);
-out_td:
- mlx5_ib_dealloc_transport_domain(dev, context->tdn);
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
out_uars:
deallocate_uars(dev, context);
@@ -1855,11 +1888,18 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_bfreg_info *bfregi;
- if (context->devx_uid)
- mlx5_ib_devx_destroy(dev, context);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /* All umem's must be destroyed before destroying the ucontext. */
+ mutex_lock(&ibcontext->per_mm_list_lock);
+ WARN_ON(!list_empty(&ibcontext->per_mm_list));
+ mutex_unlock(&ibcontext->per_mm_list_lock);
+#endif
bfregi = &context->bfregi;
- mlx5_ib_dealloc_transport_domain(dev, context->tdn);
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+
+ if (context->devx_uid)
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
deallocate_uars(dev, context);
kfree(bfregi->sys_pages);
@@ -1900,94 +1940,9 @@ static int get_extended_index(unsigned long offset)
return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
}
-static void mlx5_ib_vma_open(struct vm_area_struct *area)
-{
- /* vma_open is called when a new VMA is created on top of our VMA. This
- * is done through either mremap flow or split_vma (usually due to
- * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
- * as this VMA is strongly hardware related. Therefore we set the
- * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
- * calling us again and trying to do incorrect actions. We assume that
- * the original VMA size is exactly a single page, and therefore all
- * "splitting" operation will not happen to it.
- */
- area->vm_ops = NULL;
-}
-
-static void mlx5_ib_vma_close(struct vm_area_struct *area)
-{
- struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
-
- /* It's guaranteed that all VMAs opened on a FD are closed before the
- * file itself is closed, therefore no sync is needed with the regular
- * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
- * However need a sync with accessing the vma as part of
- * mlx5_ib_disassociate_ucontext.
- * The close operation is usually called under mm->mmap_sem except when
- * process is exiting.
- * The exiting case is handled explicitly as part of
- * mlx5_ib_disassociate_ucontext.
- */
- mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
-
- /* setting the vma context pointer to null in the mlx5_ib driver's
- * private data, to protect a race condition in
- * mlx5_ib_disassociate_ucontext().
- */
- mlx5_ib_vma_priv_data->vma = NULL;
- mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
- list_del(&mlx5_ib_vma_priv_data->list);
- mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
- kfree(mlx5_ib_vma_priv_data);
-}
-
-static const struct vm_operations_struct mlx5_ib_vm_ops = {
- .open = mlx5_ib_vma_open,
- .close = mlx5_ib_vma_close
-};
-
-static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
- struct mlx5_ib_ucontext *ctx)
-{
- struct mlx5_ib_vma_private_data *vma_prv;
- struct list_head *vma_head = &ctx->vma_private_list;
-
- vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
- if (!vma_prv)
- return -ENOMEM;
-
- vma_prv->vma = vma;
- vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
- vma->vm_private_data = vma_prv;
- vma->vm_ops = &mlx5_ib_vm_ops;
-
- mutex_lock(&ctx->vma_private_list_mutex);
- list_add(&vma_prv->list, vma_head);
- mutex_unlock(&ctx->vma_private_list_mutex);
-
- return 0;
-}
static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- struct vm_area_struct *vma;
- struct mlx5_ib_vma_private_data *vma_private, *n;
- struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
-
- mutex_lock(&context->vma_private_list_mutex);
- list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
- list) {
- vma = vma_private->vma;
- zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
- /* context going to be destroyed, should
- * not access ops any more.
- */
- vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
- vma->vm_ops = NULL;
- list_del(&vma_private->list);
- kfree(vma_private);
- }
- mutex_unlock(&context->vma_private_list_mutex);
}
static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
@@ -2010,9 +1965,6 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context)
{
- phys_addr_t pfn;
- int err;
-
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
@@ -2025,13 +1977,8 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
if (!dev->mdev->clock_info_page)
return -EOPNOTSUPP;
- pfn = page_to_pfn(dev->mdev->clock_info_page);
- err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
- vma->vm_page_prot);
- if (err)
- return err;
-
- return mlx5_ib_set_vma_data(vma, context);
+ return rdma_user_mmap_page(&context->ibucontext, vma,
+ dev->mdev->clock_info_page, PAGE_SIZE);
}
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
@@ -2121,21 +2068,15 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
pfn = uar_index2pfn(dev, uar_index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
- vma->vm_page_prot = prot;
- err = io_remap_pfn_range(vma, vma->vm_start, pfn,
- PAGE_SIZE, vma->vm_page_prot);
+ err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
+ prot);
if (err) {
mlx5_ib_err(dev,
- "io_remap_pfn_range failed with error=%d, mmap_cmd=%s\n",
+ "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
err, mmap_cmd2str(cmd));
- err = -EAGAIN;
goto err;
}
- err = mlx5_ib_set_vma_data(vma, context);
- if (err)
- goto err;
-
if (dyn_uar)
bfregi->sys_pages[idx] = uar_index;
return 0;
@@ -2160,7 +2101,6 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
size_t map_size = vma->vm_end - vma->vm_start;
u32 npages = map_size >> PAGE_SHIFT;
phys_addr_t pfn;
- pgprot_t prot;
if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
page_idx + npages)
@@ -2170,14 +2110,8 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
PAGE_SHIFT) +
page_idx;
- prot = pgprot_writecombine(vma->vm_page_prot);
- vma->vm_page_prot = prot;
-
- if (io_remap_pfn_range(vma, vma->vm_start, pfn, map_size,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return mlx5_ib_set_vma_data(vma, mctx);
+ return rdma_user_mmap_io(context, vma, pfn, map_size,
+ pgprot_writecombine(vma->vm_page_prot));
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -2318,21 +2252,30 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
struct mlx5_ib_alloc_pd_resp resp;
struct mlx5_ib_pd *pd;
int err;
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
+ u16 uid = 0;
pd = kmalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
- err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
+ uid = context ? to_mucontext(context)->devx_uid : 0;
+ MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+ MLX5_SET(alloc_pd_in, in, uid, uid);
+ err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
+ out, sizeof(out));
if (err) {
kfree(pd);
return ERR_PTR(err);
}
+ pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
+ pd->uid = uid;
if (context) {
resp.pdn = pd->pdn;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
+ mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
kfree(pd);
return ERR_PTR(-EFAULT);
}
@@ -2346,7 +2289,7 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd);
- mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
+ mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
kfree(mpd);
return 0;
@@ -2452,20 +2395,50 @@ static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
offsetof(typeof(filter), field) -\
sizeof(filter.field))
-static int parse_flow_flow_action(const union ib_flow_spec *ib_spec,
- const struct ib_flow_attr *flow_attr,
- struct mlx5_flow_act *action)
+int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
+ bool is_egress,
+ struct mlx5_flow_act *action)
{
- struct mlx5_ib_flow_action *maction = to_mflow_act(ib_spec->action.act);
switch (maction->ib_action.type) {
case IB_FLOW_ACTION_ESP:
+ if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
+ return -EINVAL;
/* Currently only AES_GCM keymat is supported by the driver */
action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
- action->action |= flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS ?
+ action->action |= is_egress ?
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
return 0;
+ case IB_FLOW_ACTION_UNSPECIFIED:
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ return -EINVAL;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ action->modify_id = maction->flow_action_raw.action_id;
+ return 0;
+ }
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_DECAP) {
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ return -EINVAL;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ return 0;
+ }
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
+ if (action->action &
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
+ return -EINVAL;
+ action->action |=
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ action->reformat_id =
+ maction->flow_action_raw.action_id;
+ return 0;
+ }
+ /* fall through */
default:
return -EOPNOTSUPP;
}
@@ -2793,7 +2766,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
return -EINVAL;
action->flow_tag = ib_spec->flow_tag.tag_id;
- action->has_flow_tag = true;
+ action->flags |= FLOW_ACT_HAS_TAG;
break;
case IB_FLOW_SPEC_ACTION_DROP:
if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
@@ -2802,7 +2775,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
break;
case IB_FLOW_SPEC_ACTION_HANDLE:
- ret = parse_flow_flow_action(ib_spec, flow_attr, action);
+ ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
+ flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
if (ret)
return ret;
break;
@@ -2883,10 +2857,10 @@ is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
* rules would be supported, always return VALID_SPEC_NA.
*/
if (!is_crypto)
- return egress ? VALID_SPEC_INVALID : VALID_SPEC_NA;
+ return VALID_SPEC_NA;
return is_crypto && is_ipsec &&
- (!egress || (!is_drop && !flow_act->has_flow_tag)) ?
+ (!egress || (!is_drop && !(flow_act->flags & FLOW_ACT_HAS_TAG))) ?
VALID_SPEC_VALID : VALID_SPEC_INVALID;
}
@@ -3026,14 +3000,15 @@ enum flow_table_type {
static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
struct mlx5_ib_flow_prio *prio,
int priority,
- int num_entries, int num_groups)
+ int num_entries, int num_groups,
+ u32 flags)
{
struct mlx5_flow_table *ft;
ft = mlx5_create_auto_grouped_flow_table(ns, priority,
num_entries,
num_groups,
- 0, 0);
+ 0, flags);
if (IS_ERR(ft))
return ERR_CAST(ft);
@@ -3053,26 +3028,43 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
int max_table_size;
int num_entries;
int num_groups;
+ u32 flags = 0;
int priority;
max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
log_max_ft_size));
if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
- if (ft_type == MLX5_IB_FT_TX)
- priority = 0;
- else if (flow_is_multicast_only(flow_attr) &&
- !dont_trap)
+ enum mlx5_flow_namespace_type fn_type;
+
+ if (flow_is_multicast_only(flow_attr) &&
+ !dont_trap)
priority = MLX5_IB_FLOW_MCAST_PRIO;
else
priority = ib_prio_to_core_prio(flow_attr->priority,
dont_trap);
- ns = mlx5_get_flow_namespace(dev->mdev,
- ft_type == MLX5_IB_FT_TX ?
- MLX5_FLOW_NAMESPACE_EGRESS :
- MLX5_FLOW_NAMESPACE_BYPASS);
+ if (ft_type == MLX5_IB_FT_RX) {
+ fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
+ prio = &dev->flow_db->prios[priority];
+ if (!dev->rep &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (!dev->rep &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ reformat_l3_tunnel_to_l2))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ } else {
+ max_table_size =
+ BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
+ log_max_ft_size));
+ fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
+ prio = &dev->flow_db->egress_prios[priority];
+ if (!dev->rep &&
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ }
+ ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
num_entries = MLX5_FS_MAX_ENTRIES;
num_groups = MLX5_FS_MAX_TYPES;
- prio = &dev->flow_db->prios[priority];
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
ns = mlx5_get_flow_namespace(dev->mdev,
@@ -3104,7 +3096,8 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ft = prio->flow_table;
if (!ft)
- return _get_prio(ns, prio, priority, num_entries, num_groups);
+ return _get_prio(ns, prio, priority, num_entries, num_groups,
+ flags);
return prio;
}
@@ -3271,6 +3264,9 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!is_valid_attr(dev->mdev, flow_attr))
return ERR_PTR(-EINVAL);
+ if (dev->rep && is_egress)
+ return ERR_PTR(-EINVAL);
+
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
handler = kzalloc(sizeof(*handler), GFP_KERNEL);
if (!handler || !spec) {
@@ -3320,15 +3316,18 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ struct mlx5_ib_mcounters *mcounters;
+
err = flow_counters_set_data(flow_act.counters, ucmd);
if (err)
goto free;
+ mcounters = to_mcounters(flow_act.counters);
handler->ibcounters = flow_act.counters;
dest_arr[dest_num].type =
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest_arr[dest_num].counter =
- to_mcounters(flow_act.counters)->hw_cntrs_hndl;
+ dest_arr[dest_num].counter_id =
+ mlx5_fc_id(mcounters->hw_cntrs_hndl);
dest_num++;
}
@@ -3346,7 +3345,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
}
- if (flow_act.has_flow_tag &&
+ if ((flow_act.flags & FLOW_ACT_HAS_TAG) &&
(flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
@@ -3658,34 +3657,54 @@ free_ucmd:
return ERR_PTR(err);
}
-static struct mlx5_ib_flow_prio *_get_flow_table(struct mlx5_ib_dev *dev,
- int priority, bool mcast)
+static struct mlx5_ib_flow_prio *
+_get_flow_table(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_matcher *fs_matcher,
+ bool mcast)
{
- int max_table_size;
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_ib_flow_prio *prio;
+ int max_table_size;
+ u32 flags = 0;
+ int priority;
+
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ log_max_ft_size));
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ reformat_l3_tunnel_to_l2))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ } else { /* Can only be MLX5_FLOW_NAMESPACE_EGRESS */
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
+ log_max_ft_size));
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ }
- max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
- log_max_ft_size));
if (max_table_size < MLX5_FS_MAX_ENTRIES)
return ERR_PTR(-ENOMEM);
if (mcast)
priority = MLX5_IB_FLOW_MCAST_PRIO;
else
- priority = ib_prio_to_core_prio(priority, false);
+ priority = ib_prio_to_core_prio(fs_matcher->priority, false);
- ns = mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS);
+ ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
if (!ns)
return ERR_PTR(-ENOTSUPP);
- prio = &dev->flow_db->prios[priority];
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
+ prio = &dev->flow_db->prios[priority];
+ else
+ prio = &dev->flow_db->egress_prios[priority];
if (prio->flow_table)
return prio;
return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
- MLX5_FS_MAX_TYPES);
+ MLX5_FS_MAX_TYPES, flags);
}
static struct mlx5_ib_flow_handler *
@@ -3693,10 +3712,10 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
struct mlx5_flow_destination *dst,
struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_act *flow_act,
void *cmd_in, int inlen)
{
struct mlx5_ib_flow_handler *handler;
- struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft = ft_prio->flow_table;
int err = 0;
@@ -3715,9 +3734,8 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev,
fs_matcher->mask_len);
spec->match_criteria_enable = fs_matcher->match_criteria_enable;
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
handler->rule = mlx5_add_flow_rules(ft, spec,
- &flow_act, dst, 1);
+ flow_act, dst, 1);
if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule);
@@ -3779,12 +3797,12 @@ static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
struct mlx5_ib_flow_handler *
mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_act *flow_act,
void *cmd_in, int inlen, int dest_id,
int dest_type)
{
struct mlx5_flow_destination *dst;
struct mlx5_ib_flow_prio *ft_prio;
- int priority = fs_matcher->priority;
struct mlx5_ib_flow_handler *handler;
bool mcast;
int err;
@@ -3802,7 +3820,7 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
mutex_lock(&dev->flow_db->lock);
- ft_prio = _get_flow_table(dev, priority, mcast);
+ ft_prio = _get_flow_table(dev, fs_matcher, mcast);
if (IS_ERR(ft_prio)) {
err = PTR_ERR(ft_prio);
goto unlock;
@@ -3811,13 +3829,18 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
dst->type = dest_type;
dst->tir_num = dest_id;
- } else {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
dst->ft_num = dest_id;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ } else {
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
}
- handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, cmd_in,
- inlen);
+ handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act,
+ cmd_in, inlen);
if (IS_ERR(handler)) {
err = PTR_ERR(handler);
@@ -3995,6 +4018,9 @@ static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
*/
mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
break;
+ case IB_FLOW_ACTION_UNSPECIFIED:
+ mlx5_ib_destroy_flow_action_raw(maction);
+ break;
default:
WARN_ON(true);
break;
@@ -4009,13 +4035,17 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_ib_qp *mqp = to_mqp(ibqp);
int err;
+ u16 uid;
+
+ uid = ibqp->pd ?
+ to_mpd(ibqp->pd)->uid : 0;
if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
return -EOPNOTSUPP;
}
- err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
+ err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
if (err)
mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
ibqp->qp_num, gid->raw);
@@ -4027,8 +4057,11 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
int err;
+ u16 uid;
- err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
+ uid = ibqp->pd ?
+ to_mpd(ibqp->pd)->uid : 0;
+ err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
if (err)
mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
ibqp->qp_num, gid->raw);
@@ -4049,16 +4082,17 @@ static int init_node_data(struct mlx5_ib_dev *dev)
return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
}
-static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t fw_pages_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
}
+static DEVICE_ATTR_RO(fw_pages);
-static ssize_t show_reg_pages(struct device *device,
+static ssize_t reg_pages_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
@@ -4066,44 +4100,47 @@ static ssize_t show_reg_pages(struct device *device,
return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
}
+static DEVICE_ATTR_RO(reg_pages);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "%x\n", dev->mdev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
dev->mdev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
-static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
+static struct attribute *mlx5_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_fw_pages.attr,
+ &dev_attr_reg_pages.attr,
+ NULL,
+};
-static struct device_attribute *mlx5_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
- &dev_attr_fw_pages,
- &dev_attr_reg_pages,
+static const struct attribute_group mlx5_attr_group = {
+ .attrs = mlx5_class_attributes,
};
static void pkey_change_handler(struct work_struct *work)
@@ -5163,22 +5200,14 @@ done:
return num_counters;
}
-static struct net_device*
-mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
- u8 port_num,
- enum rdma_netdev_t type,
- const char *name,
- unsigned char name_assign_type,
- void (*setup)(struct net_device *))
+static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params)
{
- struct net_device *netdev;
-
if (type != RDMA_NETDEV_IPOIB)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
- netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
- name, setup);
- return netdev;
+ return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
}
static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
@@ -5636,7 +5665,6 @@ void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
- const char *name;
int err;
int i;
@@ -5669,12 +5697,6 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
- if (!mlx5_lag_is_active(mdev))
- name = "mlx5_%d";
- else
- name = "mlx5_bond_%d";
-
- strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
@@ -5824,8 +5846,9 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
- if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
- dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
+ if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
+ IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
+ dev->ib_dev.rdma_netdev_get_params = mlx5_ib_rn_get_params;
if (mlx5_core_is_pf(mdev)) {
dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
@@ -5880,7 +5903,7 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
- mutex_init(&dev->lb_mutex);
+ mutex_init(&dev->lb.mutex);
return 0;
}
@@ -6087,7 +6110,14 @@ static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev)
int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
{
- return ib_register_device(&dev->ib_dev, NULL);
+ const char *name;
+
+ rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
+ if (!mlx5_lag_is_active(dev->mdev))
+ name = "mlx5_%d";
+ else
+ name = "mlx5_bond_%d";
+ return ib_register_device(&dev->ib_dev, name, NULL);
}
void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
@@ -6117,21 +6147,6 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
cancel_delay_drop(dev);
}
-int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
-{
- int err;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
- err = device_create_file(&dev->ib_dev.dev,
- mlx5_class_attributes[i]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
{
mlx5_ib_register_vport_reps(dev);
@@ -6155,6 +6170,8 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
profile->stage[stage].cleanup(dev);
}
+ if (dev->devx_whitelist_uid)
+ mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
ib_dealloc_device((struct ib_device *)dev);
}
@@ -6163,8 +6180,7 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
{
int err;
int i;
-
- printk_once(KERN_INFO "%s", mlx5_version);
+ int uid;
for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
if (profile->stage[i].init) {
@@ -6174,6 +6190,10 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
}
}
+ uid = mlx5_ib_devx_create(dev);
+ if (uid > 0)
+ dev->devx_whitelist_uid = uid;
+
dev->profile = profile;
dev->ib_active = true;
@@ -6234,9 +6254,6 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
mlx5_ib_stage_delay_drop_init,
mlx5_ib_stage_delay_drop_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
- mlx5_ib_stage_class_attr_init,
- NULL),
};
static const struct mlx5_ib_profile nic_rep_profile = {
@@ -6279,9 +6296,6 @@ static const struct mlx5_ib_profile nic_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
mlx5_ib_stage_post_ib_reg_umr_init,
NULL),
- STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
- mlx5_ib_stage_class_attr_init,
- NULL),
STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
mlx5_ib_stage_rep_reg_init,
mlx5_ib_stage_rep_reg_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index f3dbd75a0a96..549234988bb4 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -57,7 +57,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
int entry;
unsigned long page_shift = umem->page_shift;
- if (umem->odp_data) {
+ if (umem->is_odp) {
*ncont = ib_umem_page_count(umem);
*count = *ncont << (page_shift - PAGE_SHIFT);
*shift = page_shift;
@@ -152,14 +152,13 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
struct scatterlist *sg;
int entry;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- const bool odp = umem->odp_data != NULL;
-
- if (odp) {
+ if (umem->is_odp) {
WARN_ON(shift != 0);
WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
for (i = 0; i < num_pages; ++i) {
- dma_addr_t pa = umem->odp_data->dma_list[offset + i];
+ dma_addr_t pa =
+ to_ib_umem_odp(umem)->dma_list[offset + i];
pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 320d4dfe8c2f..b651a7a6fde9 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -39,8 +39,10 @@
#include <rdma/ib_smi.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
+#include <linux/mlx5/fs.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
+#include <linux/mlx5/fs.h>
#include <linux/types.h>
#include <linux/mlx5/transobj.h>
#include <rdma/ib_user_verbs.h>
@@ -48,17 +50,17 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
-#define mlx5_ib_dbg(dev, format, arg...) \
-pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
- __LINE__, current->pid, ##arg)
+#define mlx5_ib_dbg(_dev, format, arg...) \
+ dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
-#define mlx5_ib_err(dev, format, arg...) \
-pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
- __LINE__, current->pid, ##arg)
+#define mlx5_ib_err(_dev, format, arg...) \
+ dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
-#define mlx5_ib_warn(dev, format, arg...) \
-pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
- __LINE__, current->pid, ##arg)
+#define mlx5_ib_warn(_dev, format, arg...) \
+ dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
#define field_avail(type, fld, sz) (offsetof(type, fld) + \
sizeof(((type *)0)->fld) <= (sz))
@@ -114,13 +116,6 @@ enum {
MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
};
-struct mlx5_ib_vma_private_data {
- struct list_head list;
- struct vm_area_struct *vma;
- /* protect vma_private_list add/del */
- struct mutex *vma_private_list_mutex;
-};
-
struct mlx5_ib_ucontext {
struct ib_ucontext ibucontext;
struct list_head db_page_list;
@@ -132,13 +127,12 @@ struct mlx5_ib_ucontext {
u8 cqe_version;
/* Transport Domain number */
u32 tdn;
- struct list_head vma_private_list;
- /* protect vma_private_list add/del */
- struct mutex vma_private_list_mutex;
u64 lib_caps;
DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
u16 devx_uid;
+ /* For RoCE LAG TX affinity */
+ atomic_t tx_port_affinity;
};
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -149,6 +143,13 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte
struct mlx5_ib_pd {
struct ib_pd ibpd;
u32 pdn;
+ u16 uid;
+};
+
+enum {
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
+ MLX5_IB_FLOW_ACTION_DECAP,
};
#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
@@ -180,6 +181,7 @@ struct mlx5_ib_flow_matcher {
struct mlx5_ib_match_params matcher_mask;
int mask_len;
enum mlx5_ib_flow_type flow_type;
+ enum mlx5_flow_namespace_type ns_type;
u16 priority;
struct mlx5_core_dev *mdev;
atomic_t usecnt;
@@ -188,6 +190,7 @@ struct mlx5_ib_flow_matcher {
struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
struct mlx5_flow_table *lag_demux_ft;
@@ -322,6 +325,7 @@ enum {
struct mlx5_ib_rwq_ind_table {
struct ib_rwq_ind_table ib_rwq_ind_tbl;
u32 rqtn;
+ u16 uid;
};
struct mlx5_ib_ubuffer {
@@ -428,13 +432,14 @@ struct mlx5_ib_qp {
struct list_head cq_send_list;
struct mlx5_rate_limit rl;
u32 underlay_qpn;
- bool tunnel_offload_en;
+ u32 flags_en;
/* storage for qp sub type when core qp type is IB_QPT_DRIVER */
enum ib_qp_type qp_sub_type;
};
struct mlx5_ib_cq_buf {
struct mlx5_frag_buf_ctrl fbc;
+ struct mlx5_frag_buf frag_buf;
struct ib_umem *umem;
int cqe_size;
int nent;
@@ -535,6 +540,7 @@ struct mlx5_ib_srq {
struct mlx5_ib_xrcd {
struct ib_xrcd ibxrcd;
u32 xrcdn;
+ u16 uid;
};
enum mlx5_ib_mtt_access_flags {
@@ -699,7 +705,7 @@ struct mlx5_roce {
rwlock_t netdev_lock;
struct net_device *netdev;
struct notifier_block nb;
- atomic_t next_port;
+ atomic_t tx_port_affinity;
enum ib_port_state last_port_state;
struct mlx5_ib_dev *dev;
u8 native_port_num;
@@ -814,6 +820,11 @@ struct mlx5_ib_flow_action {
u64 ib_flags;
struct mlx5_accel_esp_xfrm *ctx;
} esp_aes_gcm;
+ struct {
+ struct mlx5_ib_dev *dev;
+ u32 sub_type;
+ u32 action_id;
+ } flow_action_raw;
};
};
@@ -858,9 +869,20 @@ to_mcounters(struct ib_counters *ibcntrs)
return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
}
+int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
+ bool is_egress,
+ struct mlx5_flow_act *action);
+struct mlx5_ib_lb_state {
+ /* protect the user_td */
+ struct mutex mutex;
+ u32 user_td;
+ int qps;
+ bool enabled;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
- const struct uverbs_object_tree_def *driver_trees[6];
+ const struct uverbs_object_tree_def *driver_trees[7];
struct mlx5_core_dev *mdev;
struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
@@ -899,13 +921,12 @@ struct mlx5_ib_dev {
const struct mlx5_ib_profile *profile;
struct mlx5_eswitch_rep *rep;
- /* protect the user_td */
- struct mutex lb_mutex;
- u32 user_td;
+ struct mlx5_ib_lb_state lb;
u8 umr_fence;
struct list_head ib_dev_list;
u64 sys_image_guid;
struct mlx5_memic memic;
+ u16 devx_whitelist_uid;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1016,6 +1037,8 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
int mlx5_ib_destroy_srq(struct ib_srq *srq);
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
+int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
+void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
@@ -1105,7 +1128,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, __be64 *pas, int access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
-int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
@@ -1140,7 +1163,7 @@ void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
-void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
+void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
unsigned long end);
void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
@@ -1179,7 +1202,6 @@ void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage);
@@ -1228,22 +1250,20 @@ void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num);
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
-int mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context);
-void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context);
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev);
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
- void *cmd_in, int inlen, int dest_id, int dest_type);
+ struct mlx5_flow_act *flow_act, void *cmd_in, int inlen,
+ int dest_id, int dest_type);
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
+void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
#else
static inline int
-mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; };
-static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context) {}
+mlx5_ib_devx_create(struct mlx5_ib_dev *dev) { return -EOPNOTSUPP; };
+static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
static inline const struct uverbs_object_tree_def *
mlx5_ib_get_devx_tree(void) { return NULL; }
static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
@@ -1256,6 +1276,11 @@ mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
{
return 0;
}
+static inline void
+mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
+{
+ return;
+};
#endif
static inline void init_query_mad(struct ib_smp *mad)
{
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 9fb1d9cb9401..9b195d65a13e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -98,7 +98,7 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
static void update_odp_mr(struct mlx5_ib_mr *mr)
{
- if (mr->umem->odp_data) {
+ if (mr->umem->is_odp) {
/*
* This barrier prevents the compiler from moving the
* setting of umem->odp_data->private to point to our
@@ -107,7 +107,7 @@ static void update_odp_mr(struct mlx5_ib_mr *mr)
* handle invalidations.
*/
smp_wmb();
- mr->umem->odp_data->private = mr;
+ to_ib_umem_odp(mr->umem)->private = mr;
/*
* Make sure we will see the new
* umem->odp_data->private value in the invalidation
@@ -544,6 +544,9 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
int shrink = 0;
int c;
+ if (!mr->allocated_from_cache)
+ return;
+
c = order2idx(dev, mr->order);
if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
@@ -688,7 +691,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
init_completion(&ent->compl);
INIT_WORK(&ent->work, cache_work_func);
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
- queue_work(cache->wq, &ent->work);
if (i > MR_CACHE_LAST_STD_ENTRY) {
mlx5_odp_init_mr_cache_entry(ent);
@@ -708,6 +710,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->limit = dev->mdev->profile->mr_cache[i].limit;
else
ent->limit = 0;
+ queue_work(cache->wq, &ent->work);
}
err = mlx5_mr_cache_debugfs_init(dev);
@@ -1624,14 +1627,16 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
struct ib_umem *umem = mr->umem;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (umem && umem->odp_data) {
+ if (umem && umem->is_odp) {
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
+
/* Prevent new page faults from succeeding */
mr->live = 0;
/* Wait for all running page-fault handlers to finish. */
synchronize_srcu(&dev->mr_srcu);
/* Destroy all page mappings */
- if (umem->odp_data->page_list)
- mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
+ if (umem_odp->page_list)
+ mlx5_ib_invalidate_range(umem_odp, ib_umem_start(umem),
ib_umem_end(umem));
else
mlx5_ib_free_implicit_mr(mr);
@@ -1647,18 +1652,19 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
umem = NULL;
}
#endif
-
clean_mr(dev, mr);
+ /*
+ * We should unregister the DMA address from the HCA before
+ * remove the DMA mapping.
+ */
+ mlx5_mr_cache_free(dev, mr);
if (umem) {
ib_umem_release(umem);
atomic_sub(npages, &dev->mdev->priv.reg_pages);
}
-
if (!mr->allocated_from_cache)
kfree(mr);
- else
- mlx5_mr_cache_free(dev, mr);
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index d216e0d2921d..b04eb6775326 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -61,13 +61,21 @@ static int check_parent(struct ib_umem_odp *odp,
return mr && mr->parent == parent && !odp->dying;
}
+struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
+{
+ if (WARN_ON(!mr || !mr->umem || !mr->umem->is_odp))
+ return NULL;
+
+ return to_ib_umem_odp(mr->umem)->per_mm;
+}
+
static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
{
struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
- struct ib_ucontext *ctx = odp->umem->context;
+ struct ib_ucontext_per_mm *per_mm = odp->per_mm;
struct rb_node *rb;
- down_read(&ctx->umem_rwsem);
+ down_read(&per_mm->umem_rwsem);
while (1) {
rb = rb_next(&odp->interval_tree.rb);
if (!rb)
@@ -79,19 +87,19 @@ static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
not_found:
odp = NULL;
end:
- up_read(&ctx->umem_rwsem);
+ up_read(&per_mm->umem_rwsem);
return odp;
}
-static struct ib_umem_odp *odp_lookup(struct ib_ucontext *ctx,
- u64 start, u64 length,
+static struct ib_umem_odp *odp_lookup(u64 start, u64 length,
struct mlx5_ib_mr *parent)
{
+ struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent);
struct ib_umem_odp *odp;
struct rb_node *rb;
- down_read(&ctx->umem_rwsem);
- odp = rbt_ib_umem_lookup(&ctx->umem_tree, start, length);
+ down_read(&per_mm->umem_rwsem);
+ odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length);
if (!odp)
goto end;
@@ -102,13 +110,13 @@ static struct ib_umem_odp *odp_lookup(struct ib_ucontext *ctx,
if (!rb)
goto not_found;
odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
- if (ib_umem_start(odp->umem) > start + length)
+ if (ib_umem_start(&odp->umem) > start + length)
goto not_found;
}
not_found:
odp = NULL;
end:
- up_read(&ctx->umem_rwsem);
+ up_read(&per_mm->umem_rwsem);
return odp;
}
@@ -116,7 +124,6 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
size_t nentries, struct mlx5_ib_mr *mr, int flags)
{
struct ib_pd *pd = mr->ibmr.pd;
- struct ib_ucontext *ctx = pd->uobject->context;
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem_odp *odp;
unsigned long va;
@@ -131,13 +138,13 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
return;
}
- odp = odp_lookup(ctx, offset * MLX5_IMR_MTT_SIZE,
- nentries * MLX5_IMR_MTT_SIZE, mr);
+ odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
+ nentries * MLX5_IMR_MTT_SIZE, mr);
for (i = 0; i < nentries; i++, pklm++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
va = (offset + i) * MLX5_IMR_MTT_SIZE;
- if (odp && odp->umem->address == va) {
+ if (odp && odp->umem.address == va) {
struct mlx5_ib_mr *mtt = odp->private;
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
@@ -153,13 +160,13 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
static void mr_leaf_free_action(struct work_struct *work)
{
struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
- int idx = ib_umem_start(odp->umem) >> MLX5_IMR_MTT_SHIFT;
+ int idx = ib_umem_start(&odp->umem) >> MLX5_IMR_MTT_SHIFT;
struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
mr->parent = NULL;
synchronize_srcu(&mr->dev->mr_srcu);
- ib_umem_release(odp->umem);
+ ib_umem_release(&odp->umem);
if (imr->live)
mlx5_ib_update_xlt(imr, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
@@ -170,22 +177,24 @@ static void mr_leaf_free_action(struct work_struct *work)
wake_up(&imr->q_leaf_free);
}
-void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
+void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
unsigned long end)
{
struct mlx5_ib_mr *mr;
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
sizeof(struct mlx5_mtt)) - 1;
u64 idx = 0, blk_start_idx = 0;
+ struct ib_umem *umem;
int in_block = 0;
u64 addr;
- if (!umem || !umem->odp_data) {
+ if (!umem_odp) {
pr_err("invalidation called on NULL umem or non-ODP umem\n");
return;
}
+ umem = &umem_odp->umem;
- mr = umem->odp_data->private;
+ mr = umem_odp->private;
if (!mr || !mr->ibmr.pd)
return;
@@ -208,7 +217,7 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
* estimate the cost of another UMR vs. the cost of bigger
* UMR.
*/
- if (umem->odp_data->dma_list[idx] &
+ if (umem_odp->dma_list[idx] &
(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
if (!in_block) {
blk_start_idx = idx;
@@ -237,13 +246,13 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
* needed.
*/
- ib_umem_odp_unmap_dma_pages(umem, start, end);
+ ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
if (unlikely(!umem->npages && mr->parent &&
- !umem->odp_data->dying)) {
- WRITE_ONCE(umem->odp_data->dying, 1);
+ !umem_odp->dying)) {
+ WRITE_ONCE(umem_odp->dying, 1);
atomic_inc(&mr->parent->num_leaf_free);
- schedule_work(&umem->odp_data->work);
+ schedule_work(&umem_odp->work);
}
}
@@ -366,16 +375,15 @@ fail:
static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
u64 io_virt, size_t bcnt)
{
- struct ib_ucontext *ctx = mr->ibmr.pd->uobject->context;
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
struct ib_umem_odp *odp, *result = NULL;
+ struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
u64 addr = io_virt & MLX5_IMR_MTT_MASK;
int nentries = 0, start_idx = 0, ret;
struct mlx5_ib_mr *mtt;
- struct ib_umem *umem;
- mutex_lock(&mr->umem->odp_data->umem_mutex);
- odp = odp_lookup(ctx, addr, 1, mr);
+ mutex_lock(&odp_mr->umem_mutex);
+ odp = odp_lookup(addr, 1, mr);
mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
io_virt, bcnt, addr, odp);
@@ -385,22 +393,23 @@ next_mr:
if (nentries)
nentries++;
} else {
- umem = ib_alloc_odp_umem(ctx, addr, MLX5_IMR_MTT_SIZE);
- if (IS_ERR(umem)) {
- mutex_unlock(&mr->umem->odp_data->umem_mutex);
- return ERR_CAST(umem);
+ odp = ib_alloc_odp_umem(odp_mr->per_mm, addr,
+ MLX5_IMR_MTT_SIZE);
+ if (IS_ERR(odp)) {
+ mutex_unlock(&odp_mr->umem_mutex);
+ return ERR_CAST(odp);
}
- mtt = implicit_mr_alloc(mr->ibmr.pd, umem, 0, mr->access_flags);
+ mtt = implicit_mr_alloc(mr->ibmr.pd, &odp->umem, 0,
+ mr->access_flags);
if (IS_ERR(mtt)) {
- mutex_unlock(&mr->umem->odp_data->umem_mutex);
- ib_umem_release(umem);
+ mutex_unlock(&odp_mr->umem_mutex);
+ ib_umem_release(&odp->umem);
return ERR_CAST(mtt);
}
- odp = umem->odp_data;
odp->private = mtt;
- mtt->umem = umem;
+ mtt->umem = &odp->umem;
mtt->mmkey.iova = addr;
mtt->parent = mr;
INIT_WORK(&odp->work, mr_leaf_free_action);
@@ -417,7 +426,7 @@ next_mr:
addr += MLX5_IMR_MTT_SIZE;
if (unlikely(addr < io_virt + bcnt)) {
odp = odp_next(odp);
- if (odp && odp->umem->address != addr)
+ if (odp && odp->umem.address != addr)
odp = NULL;
goto next_mr;
}
@@ -432,7 +441,7 @@ next_mr:
}
}
- mutex_unlock(&mr->umem->odp_data->umem_mutex);
+ mutex_unlock(&odp_mr->umem_mutex);
return result;
}
@@ -460,36 +469,36 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
return imr;
}
-static int mr_leaf_free(struct ib_umem *umem, u64 start,
- u64 end, void *cookie)
+static int mr_leaf_free(struct ib_umem_odp *umem_odp, u64 start, u64 end,
+ void *cookie)
{
- struct mlx5_ib_mr *mr = umem->odp_data->private, *imr = cookie;
+ struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie;
+ struct ib_umem *umem = &umem_odp->umem;
if (mr->parent != imr)
return 0;
- ib_umem_odp_unmap_dma_pages(umem,
- ib_umem_start(umem),
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem),
ib_umem_end(umem));
- if (umem->odp_data->dying)
+ if (umem_odp->dying)
return 0;
- WRITE_ONCE(umem->odp_data->dying, 1);
+ WRITE_ONCE(umem_odp->dying, 1);
atomic_inc(&imr->num_leaf_free);
- schedule_work(&umem->odp_data->work);
+ schedule_work(&umem_odp->work);
return 0;
}
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
- struct ib_ucontext *ctx = imr->ibmr.pd->uobject->context;
+ struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
- down_read(&ctx->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&ctx->umem_tree, 0, ULLONG_MAX,
+ down_read(&per_mm->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0, ULLONG_MAX,
mr_leaf_free, true, imr);
- up_read(&ctx->umem_rwsem);
+ up_read(&per_mm->umem_rwsem);
wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
}
@@ -497,6 +506,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
u64 io_virt, size_t bcnt, u32 *bytes_mapped)
{
+ struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
u64 access_mask = ODP_READ_ALLOWED_BIT;
int npages = 0, page_shift, np;
u64 start_idx, page_mask;
@@ -505,7 +515,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
size_t size;
int ret;
- if (!mr->umem->odp_data->page_list) {
+ if (!odp_mr->page_list) {
odp = implicit_mr_get_data(mr, io_virt, bcnt);
if (IS_ERR(odp))
@@ -513,11 +523,11 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
mr = odp->private;
} else {
- odp = mr->umem->odp_data;
+ odp = odp_mr;
}
next_mr:
- size = min_t(size_t, bcnt, ib_umem_end(odp->umem) - io_virt);
+ size = min_t(size_t, bcnt, ib_umem_end(&odp->umem) - io_virt);
page_shift = mr->umem->page_shift;
page_mask = ~(BIT(page_shift) - 1);
@@ -533,7 +543,7 @@ next_mr:
*/
smp_rmb();
- ret = ib_umem_odp_map_dma_pages(mr->umem, io_virt, size,
+ ret = ib_umem_odp_map_dma_pages(to_ib_umem_odp(mr->umem), io_virt, size,
access_mask, current_seq);
if (ret < 0)
@@ -542,7 +552,8 @@ next_mr:
np = ret;
mutex_lock(&odp->umem_mutex);
- if (!ib_umem_mmu_notifier_retry(mr->umem, current_seq)) {
+ if (!ib_umem_mmu_notifier_retry(to_ib_umem_odp(mr->umem),
+ current_seq)) {
/*
* No need to check whether the MTTs really belong to
* this MR, since ib_umem_odp_map_dma_pages already
@@ -575,7 +586,7 @@ next_mr:
io_virt += size;
next = odp_next(odp);
- if (unlikely(!next || next->umem->address != io_virt)) {
+ if (unlikely(!next || next->umem.address != io_virt)) {
mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
io_virt, next);
return -EAGAIN;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 6cba2a02d11b..6841c0f9237f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
#include "ib_rep.h"
+#include "cmd.h"
/* not supported currently */
static int wq_signature;
@@ -850,6 +851,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_umem;
}
+ MLX5_SET(create_qp_in, *in, uid, to_mpd(pd)->uid);
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem)
mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
@@ -1051,7 +1053,8 @@ static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
static int is_connected(enum ib_qp_type qp_type)
{
- if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
+ if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC ||
+ qp_type == MLX5_IB_QPT_DCI)
return 1;
return 0;
@@ -1059,11 +1062,13 @@ static int is_connected(enum ib_qp_type qp_type)
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
- struct mlx5_ib_sq *sq, u32 tdn)
+ struct mlx5_ib_sq *sq, u32 tdn,
+ struct ib_pd *pd)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
MLX5_SET(tisc, tisc, transport_domain, tdn);
if (qp->flags & MLX5_IB_QP_UNDERLAY)
MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
@@ -1072,9 +1077,9 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
}
static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
- struct mlx5_ib_sq *sq)
+ struct mlx5_ib_sq *sq, struct ib_pd *pd)
{
- mlx5_core_destroy_tis(dev->mdev, sq->tisn);
+ mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
}
static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
@@ -1114,6 +1119,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
goto err_umem;
}
+ MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid);
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
@@ -1188,7 +1194,7 @@ static size_t get_rq_pas_size(void *qpc)
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, void *qpin,
- size_t qpinlen)
+ size_t qpinlen, struct ib_pd *pd)
{
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas;
@@ -1209,6 +1215,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
if (!in)
return -ENOMEM;
+ MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
MLX5_SET(rqc, rqc, vsd, 1);
@@ -1256,10 +1263,23 @@ static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
}
+static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq,
+ u32 qp_flags_en,
+ struct ib_pd *pd)
+{
+ if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
+ mlx5_ib_disable_lb(dev, false, true);
+ mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
+}
+
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, u32 tdn,
- bool tunnel_offload_en)
+ u32 *qp_flags_en,
+ struct ib_pd *pd)
{
+ u8 lb_flag = 0;
u32 *in;
void *tirc;
int inlen;
@@ -1270,33 +1290,45 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
if (!in)
return -ENOMEM;
+ MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
MLX5_SET(tirc, tirc, transport_domain, tdn);
- if (tunnel_offload_en)
+ if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
- if (dev->rep)
- MLX5_SET(tirc, tirc, self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_);
+ if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+
+ if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+
+ if (dev->rep) {
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ *qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
+ }
+
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
+ err = mlx5_ib_enable_lb(dev, false, true);
+
+ if (err)
+ destroy_raw_packet_qp_tir(dev, rq, 0, pd);
+ }
kvfree(in);
return err;
}
-static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq)
-{
- mlx5_core_destroy_tir(dev->mdev, rq->tirn);
-}
-
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 *in, size_t inlen,
- struct ib_pd *pd)
+ struct ib_pd *pd,
+ struct ib_udata *udata,
+ struct mlx5_ib_create_qp_resp *resp)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
@@ -1306,9 +1338,10 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
int err;
u32 tdn = mucontext->tdn;
+ u16 uid = to_mpd(pd)->uid;
if (qp->sq.wqe_cnt) {
- err = create_raw_packet_qp_tis(dev, qp, sq, tdn);
+ err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
if (err)
return err;
@@ -1316,6 +1349,13 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
goto err_destroy_tis;
+ if (uid) {
+ resp->tisn = sq->tisn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN;
+ resp->sqn = sq->base.mqp.qpn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN;
+ }
+
sq->base.container_mibqp = qp;
sq->base.mqp.event = mlx5_ib_qp_event;
}
@@ -1327,22 +1367,32 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
- err = create_raw_packet_qp_rq(dev, rq, in, inlen);
+ err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd);
if (err)
goto err_destroy_sq;
-
- err = create_raw_packet_qp_tir(dev, rq, tdn,
- qp->tunnel_offload_en);
+ err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd);
if (err)
goto err_destroy_rq;
+
+ if (uid) {
+ resp->rqn = rq->base.mqp.qpn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
+ resp->tirn = rq->tirn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ }
}
qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
rq->base.mqp.qpn;
+ err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
+ if (err)
+ goto err_destroy_tir;
return 0;
+err_destroy_tir:
+ destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, pd);
err_destroy_rq:
destroy_raw_packet_qp_rq(dev, rq);
err_destroy_sq:
@@ -1350,7 +1400,7 @@ err_destroy_sq:
return err;
destroy_raw_packet_qp_sq(dev, sq);
err_destroy_tis:
- destroy_raw_packet_qp_tis(dev, sq);
+ destroy_raw_packet_qp_tis(dev, sq, pd);
return err;
}
@@ -1363,13 +1413,13 @@ static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
if (qp->rq.wqe_cnt) {
- destroy_raw_packet_qp_tir(dev, rq);
+ destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
destroy_raw_packet_qp_rq(dev, rq);
}
if (qp->sq.wqe_cnt) {
destroy_raw_packet_qp_sq(dev, sq);
- destroy_raw_packet_qp_tis(dev, sq);
+ destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd);
}
}
@@ -1387,7 +1437,11 @@ static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
- mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
+ if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
+ mlx5_ib_disable_lb(dev, false, true);
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
+ to_mpd(qp->ibqp.pd)->uid);
}
static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
@@ -1410,6 +1464,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 tdn = mucontext->tdn;
struct mlx5_ib_create_qp_rss ucmd = {};
size_t required_cmd_sz;
+ u8 lb_flag = 0;
if (init_attr->qp_type != IB_QPT_RAW_PACKET)
return -EOPNOTSUPP;
@@ -1444,7 +1499,9 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EOPNOTSUPP;
}
- if (ucmd.flags & ~MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
+ if (ucmd.flags & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) {
mlx5_ib_dbg(dev, "invalid flags\n");
return -EOPNOTSUPP;
}
@@ -1461,6 +1518,16 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EOPNOTSUPP;
}
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->rep) {
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
+ }
+
err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (err) {
mlx5_ib_dbg(dev, "copy failed\n");
@@ -1472,6 +1539,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!in)
return -ENOMEM;
+ MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, disp_type,
MLX5_TIRC_DISP_TYPE_INDIRECT);
@@ -1484,6 +1552,8 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
+
if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
else
@@ -1580,26 +1650,141 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
create_tir:
- if (dev->rep)
- MLX5_SET(tirc, tirc, self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_);
-
err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
+ err = mlx5_ib_enable_lb(dev, false, true);
+
+ if (err)
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
+ to_mpd(pd)->uid);
+ }
+
if (err)
goto err;
+ if (mucontext->devx_uid) {
+ resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ resp.tirn = qp->rss_qp.tirn;
+ }
+
+ err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
+ if (err)
+ goto err_copy;
+
kvfree(in);
/* qpn is reserved for that QP */
qp->trans_qp.base.mqp.qpn = 0;
qp->flags |= MLX5_IB_QP_RSS;
return 0;
+err_copy:
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, mucontext->devx_uid);
err:
kvfree(in);
return err;
}
+static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
+ void *qpc)
+{
+ int rcqe_sz;
+
+ if (init_attr->qp_type == MLX5_IB_QPT_DCI)
+ return;
+
+ rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
+
+ if (rcqe_sz == 128) {
+ MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+ return;
+ }
+
+ if (init_attr->qp_type != MLX5_IB_QPT_DCT)
+ MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
+}
+
+static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_create_qp *ucmd,
+ void *qpc)
+{
+ enum ib_qp_type qpt = init_attr->qp_type;
+ int scqe_sz;
+ bool allow_scat_cqe = 0;
+
+ if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
+ return;
+
+ if (ucmd)
+ allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
+
+ if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
+ return;
+
+ scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
+ if (scqe_sz == 128) {
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
+ return;
+ }
+
+ if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
+ MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
+}
+
+static int atomic_size_to_mode(int size_mask)
+{
+ /* driver does not support atomic_size > 256B
+ * and does not know how to translate bigger sizes
+ */
+ int supported_size_mask = size_mask & 0x1ff;
+ int log_max_size;
+
+ if (!supported_size_mask)
+ return -EOPNOTSUPP;
+
+ log_max_size = __fls(supported_size_mask);
+
+ if (log_max_size > 3)
+ return log_max_size;
+
+ return MLX5_ATOMIC_MODE_8B;
+}
+
+static int get_atomic_mode(struct mlx5_ib_dev *dev,
+ enum ib_qp_type qp_type)
+{
+ u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
+ u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
+ int atomic_mode = -EOPNOTSUPP;
+ int atomic_size_mask;
+
+ if (!atomic)
+ return -EOPNOTSUPP;
+
+ if (qp_type == MLX5_IB_QPT_DCT)
+ atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
+ else
+ atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
+
+ if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) ||
+ (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD))
+ atomic_mode = atomic_size_to_mode(atomic_size_mask);
+
+ if (atomic_mode <= 0 &&
+ (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP &&
+ atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD))
+ atomic_mode = MLX5_ATOMIC_MODE_IB_COMP;
+
+ return atomic_mode;
+}
+
+static inline bool check_flags_mask(uint64_t input, uint64_t supported)
+{
+ return (input & ~supported) == 0;
+}
+
static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, struct mlx5_ib_qp *qp)
@@ -1697,20 +1882,47 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return -EFAULT;
}
+ if (!check_flags_mask(ucmd.flags,
+ MLX5_QP_FLAG_SIGNATURE |
+ MLX5_QP_FLAG_SCATTER_CQE |
+ MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_BFREG_INDEX |
+ MLX5_QP_FLAG_TYPE_DCT |
+ MLX5_QP_FLAG_TYPE_DCI |
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE))
+ return -EINVAL;
+
err = get_qp_user_index(to_mucontext(pd->uobject->context),
&ucmd, udata->inlen, &uidx);
if (err)
return err;
qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
- qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
+ if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
+ qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
!tunnel_offload_supported(mdev)) {
mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
return -EOPNOTSUPP;
}
- qp->tunnel_offload_en = true;
+ qp->flags_en |= MLX5_QP_FLAG_TUNNEL_OFFLOADS;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) {
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ mlx5_ib_dbg(dev, "Self-LB UC isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ mlx5_ib_dbg(dev, "Self-LB UM isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
}
if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
@@ -1811,23 +2023,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, cd_slave_receive, 1);
if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
- int rcqe_sz;
- int scqe_sz;
-
- rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
- scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
-
- if (rcqe_sz == 128)
- MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
- else
- MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
-
- if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
- if (scqe_sz == 128)
- MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
- else
- MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
- }
+ configure_responder_scat_cqe(init_attr, qpc);
+ configure_requester_scat_cqe(dev, init_attr,
+ (pd && pd->uobject) ? &ucmd : NULL,
+ qpc);
}
if (qp->rq.wqe_cnt) {
@@ -1911,7 +2110,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->flags & MLX5_IB_QP_UNDERLAY) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
- err = create_raw_packet_qp(dev, qp, in, inlen, pd);
+ err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
+ &resp);
} else {
err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
}
@@ -2192,6 +2392,7 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
goto err_free;
}
+ MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
qp->qp_sub_type = MLX5_IB_QPT_DCT;
MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
@@ -2200,6 +2401,9 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
MLX5_SET(dctc, dctc, user_index, uidx);
+ if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE)
+ configure_responder_scat_cqe(attr, dctc);
+
qp->state = IB_QPS_RESET;
return &qp->ibqp;
@@ -2405,13 +2609,15 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
return 0;
}
-static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
- int attr_mask)
+static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, __be32 *hw_access_flags)
{
- u32 hw_access_flags = 0;
u8 dest_rd_atomic;
u32 access_flags;
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
dest_rd_atomic = attr->max_dest_rd_atomic;
else
@@ -2426,13 +2632,25 @@ static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_att
access_flags &= IB_ACCESS_REMOTE_WRITE;
if (access_flags & IB_ACCESS_REMOTE_READ)
- hw_access_flags |= MLX5_QP_BIT_RRE;
- if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
- hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
+ *hw_access_flags |= MLX5_QP_BIT_RRE;
+ if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+ qp->ibqp.qp_type == IB_QPT_RC) {
+ int atomic_mode;
+
+ atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
+ if (atomic_mode < 0)
+ return -EOPNOTSUPP;
+
+ *hw_access_flags |= MLX5_QP_BIT_RAE;
+ *hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET;
+ }
+
if (access_flags & IB_ACCESS_REMOTE_WRITE)
- hw_access_flags |= MLX5_QP_BIT_RWE;
+ *hw_access_flags |= MLX5_QP_BIT_RWE;
+
+ *hw_access_flags = cpu_to_be32(*hw_access_flags);
- return cpu_to_be32(hw_access_flags);
+ return 0;
}
enum {
@@ -2458,7 +2676,8 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
}
static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
- struct mlx5_ib_sq *sq, u8 sl)
+ struct mlx5_ib_sq *sq, u8 sl,
+ struct ib_pd *pd)
{
void *in;
void *tisc;
@@ -2471,6 +2690,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
return -ENOMEM;
MLX5_SET(modify_tis_in, in, bitmask.prio, 1);
+ MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
@@ -2483,7 +2703,8 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
}
static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
- struct mlx5_ib_sq *sq, u8 tx_affinity)
+ struct mlx5_ib_sq *sq, u8 tx_affinity,
+ struct ib_pd *pd)
{
void *in;
void *tisc;
@@ -2496,6 +2717,7 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
return -ENOMEM;
MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
+ MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
@@ -2580,7 +2802,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
&qp->raw_packet_qp.sq,
- sl & 0xf);
+ sl & 0xf, qp->ibqp.pd);
return 0;
}
@@ -2728,9 +2950,9 @@ static int ib_mask_to_mlx5_opt(int ib_mask)
return result;
}
-static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq, int new_state,
- const struct mlx5_modify_raw_qp_param *raw_qp_param)
+static int modify_raw_packet_qp_rq(
+ struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
{
void *in;
void *rqc;
@@ -2743,6 +2965,7 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
return -ENOMEM;
MLX5_SET(modify_rq_in, in, rq_state, rq->state);
+ MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid);
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(rqc, rqc, state, new_state);
@@ -2753,8 +2976,9 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
} else
- pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n",
- dev->ib_dev.name);
+ dev_info_once(
+ &dev->ib_dev.dev,
+ "RAW PACKET QP counters are not supported on current FW\n");
}
err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
@@ -2768,10 +2992,9 @@ out:
return err;
}
-static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
- struct mlx5_ib_sq *sq,
- int new_state,
- const struct mlx5_modify_raw_qp_param *raw_qp_param)
+static int modify_raw_packet_qp_sq(
+ struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
{
struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
struct mlx5_rate_limit old_rl = ibqp->rl;
@@ -2788,6 +3011,7 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
if (!in)
return -ENOMEM;
+ MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid);
MLX5_SET(modify_sq_in, in, sq_state, sq->state);
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
@@ -2890,7 +3114,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
if (modify_rq) {
- err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
+ err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param,
+ qp->ibqp.pd);
if (err)
return err;
}
@@ -2898,17 +3123,50 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (modify_sq) {
if (tx_affinity) {
err = modify_raw_packet_tx_affinity(dev->mdev, sq,
- tx_affinity);
+ tx_affinity,
+ qp->ibqp.pd);
if (err)
return err;
}
- return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param);
+ return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
+ raw_qp_param, qp->ibqp.pd);
}
return 0;
}
+static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_pd *pd,
+ struct mlx5_ib_qp_base *qp_base,
+ u8 port_num)
+{
+ struct mlx5_ib_ucontext *ucontext = NULL;
+ unsigned int tx_port_affinity;
+
+ if (pd && pd->ibpd.uobject && pd->ibpd.uobject->context)
+ ucontext = to_mucontext(pd->ibpd.uobject->context);
+
+ if (ucontext) {
+ tx_port_affinity = (unsigned int)atomic_add_return(
+ 1, &ucontext->tx_port_affinity) %
+ MLX5_MAX_PORTS +
+ 1;
+ mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
+ tx_port_affinity, qp_base->mqp.qpn, ucontext);
+ } else {
+ tx_port_affinity =
+ (unsigned int)atomic_add_return(
+ 1, &dev->roce[port_num].tx_port_affinity) %
+ MLX5_MAX_PORTS +
+ 1;
+ mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
+ tx_port_affinity, qp_base->mqp.qpn);
+ }
+
+ return tx_port_affinity;
+}
+
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, enum ib_qp_state new_state,
@@ -2974,6 +3232,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (!context)
return -ENOMEM;
+ pd = get_pd(qp);
context->flags = cpu_to_be32(mlx5_st << 16);
if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
@@ -3002,9 +3261,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (mlx5_lag_is_active(dev->mdev)) {
u8 p = mlx5_core_native_port_num(dev->mdev);
- tx_affinity = (unsigned int)atomic_add_return(1,
- &dev->roce[p].next_port) %
- MLX5_MAX_PORTS + 1;
+ tx_affinity = get_tx_affinity(dev, pd, base, p);
context->flags |= cpu_to_be32(tx_affinity << 24);
}
}
@@ -3062,7 +3319,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
goto out;
}
- pd = get_pd(qp);
get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
&send_cq, &recv_cq);
@@ -3092,8 +3348,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
}
- if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
- context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
+ __be32 access_flags = 0;
+
+ err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags);
+ if (err)
+ goto out;
+
+ context->params2 |= access_flags;
+ }
if (attr_mask & IB_QP_MIN_RNR_TIMER)
context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
@@ -3243,7 +3506,9 @@ static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new
int req = IB_QP_STATE;
int opt = 0;
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ if (new_state == IB_QPS_RESET) {
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
return is_valid_mask(attr_mask, req, opt);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
@@ -3307,10 +3572,14 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
MLX5_SET(dctc, dctc, rwe, 1);
if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
- if (!mlx5_ib_dc_atomic_is_supported(dev))
+ int atomic_mode;
+
+ atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT);
+ if (atomic_mode < 0)
return -EOPNOTSUPP;
+
+ MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
MLX5_SET(dctc, dctc, rae, 1);
- MLX5_SET(dctc, dctc, atomic_mode, MLX5_ATOMIC_MODE_DCT_CX);
}
MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
MLX5_SET(dctc, dctc, port, attr->port_num);
@@ -3367,7 +3636,6 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
size_t required_cmd_sz;
int err = -EINVAL;
int port;
- enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
if (ibqp->rwq_ind_tbl)
return -ENOSYS;
@@ -3413,7 +3681,6 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
}
if (qp->flags & MLX5_IB_QP_UNDERLAY) {
@@ -3424,7 +3691,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
} else if (qp_type != MLX5_IB_QPT_REG_UMR &&
qp_type != MLX5_IB_QPT_DCI &&
- !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
+ !ib_modify_qp_is_ok(cur_state, new_state, qp_type,
+ attr_mask)) {
mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
cur_state, new_state, ibqp->qp_type, attr_mask);
goto out;
@@ -4371,6 +4639,12 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
u8 next_fence = 0;
u8 fence;
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
@@ -4380,13 +4654,6 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) {
- err = -EIO;
- *bad_wr = wr;
- nreq = 0;
- goto out;
- }
-
for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
mlx5_ib_warn(dev, "\n");
@@ -4700,18 +4967,17 @@ static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int ind;
int i;
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
spin_lock_irqsave(&qp->rq.lock, flags);
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) {
- err = -EIO;
- *bad_wr = wr;
- nreq = 0;
- goto out;
- }
-
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; nreq++, wr = wr->next) {
@@ -5175,6 +5441,7 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_xrcd *xrcd;
int err;
+ u16 uid;
if (!MLX5_CAP_GEN(dev->mdev, xrc))
return ERR_PTR(-ENOSYS);
@@ -5183,12 +5450,14 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
if (!xrcd)
return ERR_PTR(-ENOMEM);
- err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
+ uid = context ? to_mucontext(context)->devx_uid : 0;
+ err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, uid);
if (err) {
kfree(xrcd);
return ERR_PTR(-ENOMEM);
}
+ xrcd->uid = uid;
return &xrcd->ibxrcd;
}
@@ -5196,9 +5465,10 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
+ u16 uid = to_mxrcd(xrcd)->uid;
int err;
- err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
+ err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, uid);
if (err)
mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
@@ -5268,6 +5538,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
if (!in)
return -ENOMEM;
+ MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
MLX5_SET(rqc, rqc, mem_rq_type,
MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
@@ -5443,8 +5714,7 @@ static int prepare_user_rq(struct ib_pd *pd,
err = create_user_rq(dev, pd, rwq, &ucmd);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
- if (err)
- return err;
+ return err;
}
rwq->user_index = ucmd.user_index;
@@ -5573,6 +5843,9 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
for (i = 0; i < sz; i++)
MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
+ rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid;
+ MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
+
err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
kvfree(in);
@@ -5591,7 +5864,7 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
return &rwq_ind_tbl->ib_rwq_ind_tbl;
err_copy:
- mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn);
+ mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
err:
kfree(rwq_ind_tbl);
return ERR_PTR(err);
@@ -5602,7 +5875,7 @@ int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
- mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn);
+ mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
kfree(rwq_ind_tbl);
return 0;
@@ -5653,6 +5926,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
if (wq_state == IB_WQS_ERR)
wq_state = MLX5_RQC_STATE_ERR;
MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
+ MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid);
MLX5_SET(rqc, rqc, state, wq_state);
if (wq_attr_mask & IB_WQ_FLAGS) {
@@ -5684,8 +5958,9 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
MLX5_SET(rqc, rqc, counter_set_id,
dev->port->cnts.set_id);
} else
- pr_info_once("%s: Receive WQ counters are not supported on current FW\n",
- dev->ib_dev.name);
+ dev_info_once(
+ &dev->ib_dev.dev,
+ "Receive WQ counters are not supported on current FW\n");
}
err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index d359fecf7a5b..d012e7dbcc38 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -144,6 +144,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->page_offset = offset;
+ in->uid = to_mpd(pd)->uid;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
in->type != IB_SRQT_BASIC)
in->user_index = uidx;
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 093f7755c843..2e5dc0a67cfc 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -58,8 +58,9 @@ static int mthca_update_rate(struct mthca_dev *dev, u8 port_num)
ret = ib_query_port(&dev->ib_dev, port_num, tprops);
if (ret) {
- printk(KERN_WARNING "ib_query_port failed (%d) for %s port %d\n",
- ret, dev->ib_dev.name, port_num);
+ dev_warn(&dev->ib_dev.dev,
+ "ib_query_port failed (%d) forport %d\n", ret,
+ port_num);
goto out;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index f3e80dec1334..92c49bff22bc 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -986,7 +986,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
goto err_free_dev;
}
- if (mthca_cmd_init(mdev)) {
+ err = mthca_cmd_init(mdev);
+ if (err) {
mthca_err(mdev, "Failed to init command interface, aborting.\n");
goto err_free_dev;
}
@@ -1014,8 +1015,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
err = mthca_setup_hca(mdev);
if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
- if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
- pci_free_irq_vectors(pdev);
+ pci_free_irq_vectors(pdev);
mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
err = mthca_setup_hca(mdev);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 0d3473b4596e..691c6f048938 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1076,16 +1076,17 @@ static int mthca_unmap_fmr(struct list_head *fmr_list)
return err;
}
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
return sprintf(buf, "%x\n", dev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
@@ -1103,23 +1104,26 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
return sprintf(buf, "unknown\n");
}
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *mthca_dev_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *mthca_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group mthca_attr_group = {
+ .attrs = mthca_dev_attributes,
};
static int mthca_init_node_data(struct mthca_dev *dev)
@@ -1192,13 +1196,11 @@ static void get_dev_fw_str(struct ib_device *device, char *str)
int mthca_register_device(struct mthca_dev *dev)
{
int ret;
- int i;
ret = mthca_init_node_data(dev);
if (ret)
return ret;
- strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
@@ -1296,20 +1298,12 @@ int mthca_register_device(struct mthca_dev *dev)
mutex_init(&dev->cap_mask_mutex);
+ rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
dev->ib_dev.driver_id = RDMA_DRIVER_MTHCA;
- ret = ib_register_device(&dev->ib_dev, NULL);
+ ret = ib_register_device(&dev->ib_dev, "mthca%d", NULL);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) {
- ret = device_create_file(&dev->ib_dev.dev,
- mthca_dev_attributes[i]);
- if (ret) {
- ib_unregister_device(&dev->ib_dev);
- return ret;
- }
- }
-
mthca_start_catas_poll(dev);
return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 3d37f2373d63..9d178ee3c96a 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -872,8 +872,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_UNSPECIFIED)) {
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask)) {
mthca_dbg(dev, "Bad QP transition (transport %d) "
"%d->%d with attr 0x%08x\n",
qp->transport, cur_state, new_state,
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 42b68aa999fc..e00add6d78ec 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -456,9 +456,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
void __iomem *mmio_regs = NULL;
u8 hw_rev;
- assert(pcidev != NULL);
- assert(ent != NULL);
-
printk(KERN_INFO PFX "NetEffect RNIC driver v%s loading. (%s)\n",
DRV_VERSION, pci_name(pcidev));
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bedaa02749fb..a895fe980d10 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -149,18 +149,9 @@ do { \
printk(KERN_ERR PFX "%s[%u]: " fmt, __func__, __LINE__, ##args); \
} while (0)
-#define assert(expr) \
-do { \
- if (!(expr)) { \
- printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-
#define NES_EVENT_TIMEOUT 1200000
#else
#define nes_debug(level, fmt, args...) no_printk(fmt, ##args)
-#define assert(expr) do {} while (0)
#define NES_EVENT_TIMEOUT 100000
#endif
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index bd0675d8f298..5517e392bc01 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1443,7 +1443,7 @@ static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_inde
mdelay(1);
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- } while ((temp_phy_data2 == temp_phy_data));
+ } while (temp_phy_data2 == temp_phy_data);
/* wait for tracking */
counter = 0;
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index 9bdb84dc225c..e96ffff61c3a 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -198,9 +198,9 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp
if (skb) {
/* Continue processing fpdu */
- if (skb->next == (struct sk_buff *)&nesqp->pau_list)
+ skb = skb_peek_next(skb, &nesqp->pau_list);
+ if (!skb)
goto out;
- skb = skb->next;
processacks = false;
} else {
/* Starting a new one */
@@ -553,12 +553,10 @@ static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct ne
if (skb_queue_len(&nesqp->pau_list) == 0) {
skb_queue_head(&nesqp->pau_list, skb);
} else {
- tmpskb = nesqp->pau_list.next;
- while (tmpskb != (struct sk_buff *)&nesqp->pau_list) {
+ skb_queue_walk(&nesqp->pau_list, tmpskb) {
cb = (struct nes_rskb_cb *)&tmpskb->cb[0];
if (before(seqnum, cb->seqnum))
break;
- tmpskb = tmpskb->next;
}
skb_insert(tmpskb, skb, &nesqp->pau_list);
}
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 61014e251555..16f33454c198 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -146,8 +146,6 @@ static int nes_netdev_open(struct net_device *netdev)
struct list_head *list_pos, *list_temp;
unsigned long flags;
- assert(nesdev != NULL);
-
if (nesvnic->netdev_open == 1)
return 0;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 6940c7215961..92d1cadd4cfd 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -687,7 +687,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
}
nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n",
- nespd, nesvnic->nesibdev->ibdev.name);
+ nespd, dev_name(&nesvnic->nesibdev->ibdev.dev));
nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd;
@@ -2556,8 +2556,8 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
/**
* show_rev
*/
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct nes_ib_device *nesibdev =
container_of(dev, struct nes_ib_device, ibdev.dev);
@@ -2566,40 +2566,40 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
nes_debug(NES_DBG_INIT, "\n");
return sprintf(buf, "%x\n", nesvnic->nesdev->nesadapter->hw_rev);
}
-
+static DEVICE_ATTR_RO(hw_rev);
/**
* show_hca
*/
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
nes_debug(NES_DBG_INIT, "\n");
return sprintf(buf, "NES020\n");
}
-
+static DEVICE_ATTR_RO(hca_type);
/**
* show_board
*/
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
nes_debug(NES_DBG_INIT, "\n");
return sprintf(buf, "%.*s\n", 32, "NES020 Board ID");
}
+static DEVICE_ATTR_RO(board_id);
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-
-static struct device_attribute *nes_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static struct attribute *nes_dev_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
};
+static const struct attribute_group nes_attr_group = {
+ .attrs = nes_dev_attributes,
+};
/**
* nes_query_qp
@@ -3640,7 +3640,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
if (nesibdev == NULL) {
return NULL;
}
- strlcpy(nesibdev->ibdev.name, "nes%d", IB_DEVICE_NAME_MAX);
nesibdev->ibdev.owner = THIS_MODULE;
nesibdev->ibdev.node_type = RDMA_NODE_RNIC;
@@ -3795,10 +3794,11 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
struct nes_vnic *nesvnic = nesibdev->nesvnic;
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- int i, ret;
+ int ret;
+ rdma_set_device_sysfs_group(&nesvnic->nesibdev->ibdev, &nes_attr_group);
nesvnic->nesibdev->ibdev.driver_id = RDMA_DRIVER_NES;
- ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL);
+ ret = ib_register_device(&nesvnic->nesibdev->ibdev, "nes%d", NULL);
if (ret) {
return ret;
}
@@ -3809,19 +3809,6 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
nesibdev->max_qp = (nesadapter->max_qp-NES_FIRST_QPN) / nesadapter->port_count;
nesibdev->max_pd = nesadapter->max_pd / nesadapter->port_count;
- for (i = 0; i < ARRAY_SIZE(nes_dev_attributes); ++i) {
- ret = device_create_file(&nesibdev->ibdev.dev, nes_dev_attributes[i]);
- if (ret) {
- while (i > 0) {
- i--;
- device_remove_file(&nesibdev->ibdev.dev,
- nes_dev_attributes[i]);
- }
- ib_unregister_device(&nesibdev->ibdev);
- return ret;
- }
- }
-
nesvnic->of_device_registered = 1;
return 0;
@@ -3834,15 +3821,9 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
{
struct nes_vnic *nesvnic = nesibdev->nesvnic;
- int i;
- for (i = 0; i < ARRAY_SIZE(nes_dev_attributes); ++i) {
- device_remove_file(&nesibdev->ibdev.dev, nes_dev_attributes[i]);
- }
-
- if (nesvnic->of_device_registered) {
+ if (nesvnic->of_device_registered)
ib_unregister_device(&nesibdev->ibdev);
- }
nesvnic->of_device_registered = 0;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index e578281471af..241a57a07485 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -792,7 +792,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
qp->srq->ibsrq.
srq_context);
} else if (dev_event) {
- pr_err("%s: Fatal event received\n", dev->ibdev.name);
+ dev_err(&dev->ibdev.dev, "Fatal event received\n");
ib_dispatch_event(&ib_evt);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 7832ee3e0c84..873cc7f6fe61 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -114,9 +114,37 @@ static void get_dev_fw_str(struct ib_device *device, char *str)
snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", &dev->attr.fw_ver[0]);
}
+/* OCRDMA sysfs interface */
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ocrdma_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ocrdma_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *ocrdma_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group ocrdma_attr_group = {
+ .attrs = ocrdma_attributes,
+};
+
static int ocrdma_register_device(struct ocrdma_dev *dev)
{
- strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
@@ -213,8 +241,9 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.destroy_srq = ocrdma_destroy_srq;
dev->ibdev.post_srq_recv = ocrdma_post_srq_recv;
}
+ rdma_set_device_sysfs_group(&dev->ibdev, &ocrdma_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_OCRDMA;
- return ib_register_device(&dev->ibdev, NULL);
+ return ib_register_device(&dev->ibdev, "ocrdma%d", NULL);
}
static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
@@ -260,42 +289,9 @@ static void ocrdma_free_resources(struct ocrdma_dev *dev)
kfree(dev->cq_tbl);
}
-/* OCRDMA sysfs interface */
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
-}
-
-static ssize_t show_hca_type(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
-
-static struct device_attribute *ocrdma_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type
-};
-
-static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
- device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
-}
-
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
- int status = 0, i;
+ int status = 0;
u8 lstate = 0;
struct ocrdma_dev *dev;
@@ -331,9 +327,6 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (!status)
ocrdma_update_link_state(dev, lstate);
- for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
- if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
- goto sysfs_err;
/* Init stats */
ocrdma_add_port_stats(dev);
/* Interrupt Moderation */
@@ -348,8 +341,6 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
dev_name(&dev->nic_info.pdev->dev), dev->id);
return dev;
-sysfs_err:
- ocrdma_remove_sysfiles(dev);
alloc_err:
ocrdma_free_resources(dev);
ocrdma_cleanup_hw(dev);
@@ -376,7 +367,6 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
* of the registered clients.
*/
cancel_delayed_work_sync(&dev->eqd_work);
- ocrdma_remove_sysfiles(dev);
ib_unregister_device(&dev->ibdev);
ocrdma_rem_port_stats(dev);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 24d20a4aa262..290d776edf48 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -764,7 +764,8 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
return;
/* Create post stats base dir */
- dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir);
+ dev->dir =
+ debugfs_create_dir(dev_name(&dev->ibdev.dev), ocrdma_dbgfs_dir);
if (!dev->dir)
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index c158ca9fde6d..06d2a7f3304c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1480,8 +1480,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_qps = old_qps;
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
__func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index a0af6d424aed..8d6ff9df49fe 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -133,6 +133,33 @@ static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+/* QEDR sysfs interface */
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qedr_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *qedr_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group qedr_attr_group = {
+ .attrs = qedr_attributes,
+};
+
static int qedr_iw_register_device(struct qedr_dev *dev)
{
dev->ibdev.node_type = RDMA_NODE_RNIC;
@@ -170,8 +197,6 @@ static int qedr_register_device(struct qedr_dev *dev)
{
int rc;
- strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
-
dev->ibdev.node_guid = dev->attr.node_guid;
memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
dev->ibdev.owner = THIS_MODULE;
@@ -262,9 +287,9 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.get_link_layer = qedr_link_layer;
dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
-
+ rdma_set_device_sysfs_group(&dev->ibdev, &qedr_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_QEDR;
- return ib_register_device(&dev->ibdev, NULL);
+ return ib_register_device(&dev->ibdev, "qedr%d", NULL);
}
/* This function allocates fast-path status block memory */
@@ -404,37 +429,6 @@ err1:
return rc;
}
-/* QEDR sysfs interface */
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct qedr_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
-}
-
-static ssize_t show_hca_type(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
-
-static struct device_attribute *qedr_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type
-};
-
-static void qedr_remove_sysfiles(struct qedr_dev *dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
- device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
-}
-
static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
{
int rc = pci_enable_atomic_ops_to_root(pdev,
@@ -855,7 +849,7 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
{
struct qed_dev_rdma_info dev_info;
struct qedr_dev *dev;
- int rc = 0, i;
+ int rc = 0;
dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
if (!dev) {
@@ -914,18 +908,12 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
goto reg_err;
}
- for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
- if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
- goto sysfs_err;
-
if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
return dev;
-sysfs_err:
- ib_unregister_device(&dev->ibdev);
reg_err:
qedr_sync_free_irqs(dev);
irq_err:
@@ -944,7 +932,6 @@ static void qedr_remove(struct qedr_dev *dev)
/* First unregister with stack to stop all the active traffic
* of the registered clients.
*/
- qedr_remove_sysfiles(dev);
ib_unregister_device(&dev->ibdev);
qedr_stop_hw(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index a2d708dceb8d..53bbe6b4e6e6 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -43,7 +43,7 @@
#include "qedr_hsi_rdma.h"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
-#define DP_NAME(dev) ((dev)->ibdev.name)
+#define DP_NAME(_dev) dev_name(&(_dev)->ibdev.dev)
#define IS_IWARP(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_IWARP)
#define IS_ROCE(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_ROCE)
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index 85578887421b..e1ac2fd60bb1 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -519,9 +519,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
}
if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
- packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+ packet->tx_dest = QED_LL2_TX_DEST_LB;
else
- packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
+ packet->tx_dest = QED_LL2_TX_DEST_NW;
packet->roce_mode = roce_mode;
memcpy(packet->header.vaddr, ud_header_buffer, header_size);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 8cc3df24e04e..82ee4b4a7084 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1447,7 +1447,6 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
u64 pbl_base_addr, phy_prod_pair_addr;
struct ib_ucontext *ib_ctx = NULL;
struct qedr_srq_hwq_info *hw_srq;
- struct qedr_ucontext *ctx = NULL;
u32 page_cnt, page_size;
struct qedr_srq *srq;
int rc = 0;
@@ -1473,7 +1472,6 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
if (udata && ibpd->uobject && ibpd->uobject->context) {
ib_ctx = ibpd->uobject->context;
- ctx = get_qedr_ucontext(ib_ctx);
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
DP_ERR(dev,
@@ -2240,8 +2238,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (rdma_protocol_roce(&dev->ibdev, 1)) {
if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
- ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ ibqp->qp_type, attr_mask)) {
DP_ERR(dev,
"modify qp: invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 3461df002f81..83d2349188db 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1390,13 +1390,13 @@ static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
*/
extern const char ib_qib_version[];
+extern const struct attribute_group qib_attr_group;
int qib_device_create(struct qib_devdata *);
void qib_device_remove(struct qib_devdata *);
int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
struct kobject *kobj);
-int qib_verbs_register_sysfs(struct qib_devdata *);
void qib_verbs_unregister_sysfs(struct qib_devdata *);
/* Hook for sysfs read of QSFP */
extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 5ac7b31c346b..30595b358d8f 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -597,7 +597,6 @@ qib_pci_resume(struct pci_dev *pdev)
struct qib_devdata *dd = pci_get_drvdata(pdev);
qib_devinfo(pdev, "QIB resume function called\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
/*
* Running jobs will fail, since it's asynchronous
* unlike sysfs-requested reset. Better than
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 344e401915f7..a81905df2d0f 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -378,25 +378,22 @@ void qib_flush_qp_waiters(struct rvt_qp *qp)
* qib_check_send_wqe - validate wr/wqe
* @qp - The qp
* @wqe - The built wqe
+ * @call_send - Determine if the send should be posted or scheduled
*
- * validate wr/wqe. This is called
- * prior to inserting the wqe into
- * the ring but after the wqe has been
- * setup.
- *
- * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
+ * Returns 0 on success, -EINVAL on failure
*/
int qib_check_send_wqe(struct rvt_qp *qp,
- struct rvt_swqe *wqe)
+ struct rvt_swqe *wqe, bool *call_send)
{
struct rvt_ah *ah;
- int ret = 0;
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
case IB_QPT_UC:
if (wqe->length > 0x80000000U)
return -EINVAL;
+ if (wqe->length > qp->pmtu)
+ *call_send = false;
break;
case IB_QPT_SMI:
case IB_QPT_GSI:
@@ -405,12 +402,12 @@ int qib_check_send_wqe(struct rvt_qp *qp,
if (wqe->length > (1 << ah->log_pmtu))
return -EINVAL;
/* progress hint */
- ret = 1;
+ *call_send = true;
break;
default:
break;
}
- return ret;
+ return 0;
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index f35fdeb14347..6fa002940451 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -254,7 +254,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
}
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
+ rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */
goto done;
@@ -838,7 +838,7 @@ void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
qib_migrate_qp(qp);
qp->s_retry = qp->s_retry_cnt;
} else if (qp->s_last == qp->s_acked) {
- qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
return;
} else /* XXX need to handle delayed completion */
@@ -1221,7 +1221,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
ibp->rvp.n_other_naks++;
class_b:
if (qp->s_last == qp->s_acked) {
- qib_send_complete(qp, wqe, status);
+ rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
break;
@@ -1425,7 +1425,8 @@ read_middle:
qp->s_rdma_read_len -= pmtu;
update_last_psn(qp, psn);
spin_unlock_irqrestore(&qp->s_lock, flags);
- qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, pmtu, false, false);
goto bail;
case OP(RDMA_READ_RESPONSE_ONLY):
@@ -1471,7 +1472,8 @@ read_last:
if (unlikely(tlen != qp->s_rdma_read_len))
goto ack_len_err;
aeth = be32_to_cpu(ohdr->u.aeth);
- qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, tlen, false, false);
WARN_ON(qp->s_rdma_read_sge.num_sge);
(void) do_rc_ack(qp, aeth, psn,
OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
@@ -1490,7 +1492,7 @@ ack_len_err:
status = IB_WC_LOC_LEN_ERR;
ack_err:
if (qp->s_last == qp->s_acked) {
- qib_send_complete(qp, wqe, status);
+ rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
ack_done:
@@ -1844,7 +1846,7 @@ send_middle:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto nack_inv;
- qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -1890,7 +1892,7 @@ send_last:
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv;
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
qp->r_msn++;
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index f8a7de795beb..1fa21938f310 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -171,307 +171,6 @@ err:
}
/**
- * qib_ruc_loopback - handle UC and RC lookback requests
- * @sqp: the sending QP
- *
- * This is called from qib_do_send() to
- * forward a WQE addressed to the same HCA.
- * Note that although we are single threaded due to the tasklet, we still
- * have to protect against post_send(). We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
-static void qib_ruc_loopback(struct rvt_qp *sqp)
-{
- struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = ppd->dd;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- struct rvt_qp *qp;
- struct rvt_swqe *wqe;
- struct rvt_sge *sge;
- unsigned long flags;
- struct ib_wc wc;
- u64 sdata;
- atomic64_t *maddr;
- enum ib_wc_status send_status;
- int release;
- int ret;
-
- rcu_read_lock();
- /*
- * Note that we check the responder QP state after
- * checking the requester's state.
- */
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, sqp->remote_qpn);
- if (!qp)
- goto done;
-
- spin_lock_irqsave(&sqp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
- !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- goto unlock;
-
- sqp->s_flags |= RVT_S_BUSY;
-
-again:
- if (sqp->s_last == READ_ONCE(sqp->s_head))
- goto clr_busy;
- wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
-
- /* Return if it is not OK to start a new work reqeust. */
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
- goto clr_busy;
- /* We are in the error state, flush the work request. */
- send_status = IB_WC_WR_FLUSH_ERR;
- goto flush_send;
- }
-
- /*
- * We can rely on the entry not changing without the s_lock
- * being held until we update s_last.
- * We increment s_cur to indicate s_last is in progress.
- */
- if (sqp->s_last == sqp->s_cur) {
- if (++sqp->s_cur >= sqp->s_size)
- sqp->s_cur = 0;
- }
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-
- if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
- qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- ibp->rvp.n_pkt_drops++;
- /*
- * For RC, the requester would timeout and retry so
- * shortcut the timeouts and just signal too many retries.
- */
- if (sqp->ibqp.qp_type == IB_QPT_RC)
- send_status = IB_WC_RETRY_EXC_ERR;
- else
- send_status = IB_WC_SUCCESS;
- goto serr;
- }
-
- memset(&wc, 0, sizeof(wc));
- send_status = IB_WC_SUCCESS;
-
- release = 1;
- sqp->s_sge.sge = wqe->sg_list[0];
- sqp->s_sge.sg_list = wqe->sg_list + 1;
- sqp->s_sge.num_sge = wqe->wr.num_sge;
- sqp->s_len = wqe->length;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND_WITH_IMM:
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- /* FALLTHROUGH */
- case IB_WR_SEND:
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- break;
-
- case IB_WR_RDMA_WRITE_WITH_IMM:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- /* FALLTHROUGH */
- case IB_WR_RDMA_WRITE:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- if (wqe->length == 0)
- break;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_WRITE)))
- goto acc_err;
- qp->r_sge.sg_list = NULL;
- qp->r_sge.num_sge = 1;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_RDMA_READ:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_READ)))
- goto acc_err;
- release = 0;
- sqp->s_sge.sg_list = NULL;
- sqp->s_sge.num_sge = 1;
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->wr.num_sge;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- wqe->atomic_wr.remote_addr,
- wqe->atomic_wr.rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto acc_err;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
- sdata = wqe->atomic_wr.compare_add;
- *(u64 *) sqp->s_sge.sge.vaddr =
- (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
- (u64) atomic64_add_return(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
- sdata, wqe->atomic_wr.swap);
- rvt_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- goto send_comp;
-
- default:
- send_status = IB_WC_LOC_QP_OP_ERR;
- goto serr;
- }
-
- sge = &sqp->s_sge.sge;
- while (sqp->s_len) {
- u32 len = sqp->s_len;
-
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (!release)
- rvt_put_mr(sge->mr);
- if (--sqp->s_sge.num_sge)
- *sge = *sqp->s_sge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- sqp->s_len -= len;
- }
- if (release)
- rvt_put_ss(&qp->r_sge);
-
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- goto send_comp;
-
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
- wc.port_num = 1;
- /* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
-
-send_comp:
- spin_lock_irqsave(&sqp->s_lock, flags);
- ibp->rvp.n_loop_pkts++;
-flush_send:
- sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
- qib_send_complete(sqp, wqe, send_status);
- goto again;
-
-rnr_nak:
- /* Handle RNR NAK */
- if (qp->ibqp.qp_type == IB_QPT_UC)
- goto send_comp;
- ibp->rvp.n_rnr_naks++;
- /*
- * Note: we don't need the s_lock held since the BUSY flag
- * makes this single threaded.
- */
- if (sqp->s_rnr_retry == 0) {
- send_status = IB_WC_RNR_RETRY_EXC_ERR;
- goto serr;
- }
- if (sqp->s_rnr_retry_cnt < 7)
- sqp->s_rnr_retry--;
- spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
- goto clr_busy;
- rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
- IB_AETH_CREDIT_SHIFT);
- goto clr_busy;
-
-op_err:
- send_status = IB_WC_REM_OP_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-inv_err:
- send_status = IB_WC_REM_INV_REQ_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-acc_err:
- send_status = IB_WC_REM_ACCESS_ERR;
- wc.status = IB_WC_LOC_PROT_ERR;
-err:
- /* responder goes to error state */
- rvt_rc_error(qp, wc.status);
-
-serr:
- spin_lock_irqsave(&sqp->s_lock, flags);
- qib_send_complete(sqp, wqe, send_status);
- if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
-
- sqp->s_flags &= ~RVT_S_BUSY;
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = sqp->ibqp.device;
- ev.element.qp = &sqp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
- }
- goto done;
- }
-clr_busy:
- sqp->s_flags &= ~RVT_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-done:
- rcu_read_unlock();
-}
-
-/**
* qib_make_grh - construct a GRH header
* @ibp: a pointer to the IB port
* @hdr: a pointer to the GRH header being constructed
@@ -573,7 +272,7 @@ void qib_do_send(struct rvt_qp *qp)
qp->ibqp.qp_type == IB_QPT_UC) &&
(rdma_ah_get_dlid(&qp->remote_ah_attr) &
~((1 << ppd->lmc) - 1)) == ppd->lid) {
- qib_ruc_loopback(qp);
+ rvt_ruc_loopback(qp);
return;
}
@@ -613,42 +312,3 @@ void qib_do_send(struct rvt_qp *qp)
spin_unlock_irqrestore(&qp->s_lock, flags);
}
-
-/*
- * This should be called with s_lock held.
- */
-void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status)
-{
- u32 old_last, last;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- return;
-
- last = qp->s_last;
- old_last = last;
- if (++last >= qp->s_size)
- last = 0;
- qp->s_last = last;
- /* See post_send() */
- barrier();
- rvt_put_swqe(wqe);
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
-
- rvt_qp_swqe_complete(qp,
- wqe,
- ib_qib_wc_opcode[wqe->wr.opcode],
- status);
-
- if (qp->s_acked == old_last)
- qp->s_acked = last;
- if (qp->s_cur == old_last)
- qp->s_cur = last;
- if (qp->s_tail == old_last)
- qp->s_tail = last;
- if (qp->state == IB_QPS_SQD && last == qp->s_cur)
- qp->s_draining = 0;
-}
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index d0723d4aef5c..757d4c9d713d 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -651,7 +651,7 @@ unmap:
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
rvt_error_qp(qp, IB_WC_GENERAL_ERR);
} else if (qp->s_wqe)
- qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
+ rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock(&qp->s_lock);
spin_unlock(&qp->r_lock);
/* return zero to process the next send work request */
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index ca2638d8f35e..1cf4ca3f23e3 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -551,17 +551,18 @@ static struct kobj_type qib_diagc_ktype = {
* Start of per-unit (or driver, in some cases, but replicated
* per unit) functions (these get a device *)
*/
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, rdi.ibdev.dev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, rdi.ibdev.dev);
@@ -574,15 +575,18 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
return ret;
}
+static DEVICE_ATTR_RO(hca_type);
+static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
-static ssize_t show_version(struct device *device,
+static ssize_t version_show(struct device *device,
struct device_attribute *attr, char *buf)
{
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
}
+static DEVICE_ATTR_RO(version);
-static ssize_t show_boardversion(struct device *device,
+static ssize_t boardversion_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -592,9 +596,9 @@ static ssize_t show_boardversion(struct device *device,
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
}
+static DEVICE_ATTR_RO(boardversion);
-
-static ssize_t show_localbus_info(struct device *device,
+static ssize_t localbus_info_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -604,9 +608,9 @@ static ssize_t show_localbus_info(struct device *device,
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
}
+static DEVICE_ATTR_RO(localbus_info);
-
-static ssize_t show_nctxts(struct device *device,
+static ssize_t nctxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -620,9 +624,10 @@ static ssize_t show_nctxts(struct device *device,
(dd->first_user_ctxt > dd->cfgctxts) ? 0 :
(dd->cfgctxts - dd->first_user_ctxt));
}
+static DEVICE_ATTR_RO(nctxts);
-static ssize_t show_nfreectxts(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t nfreectxts_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, rdi.ibdev.dev);
@@ -631,8 +636,9 @@ static ssize_t show_nfreectxts(struct device *device,
/* Return the number of free user ports (contexts) available. */
return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
}
+static DEVICE_ATTR_RO(nfreectxts);
-static ssize_t show_serial(struct device *device,
+static ssize_t serial_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -644,8 +650,9 @@ static ssize_t show_serial(struct device *device,
strcat(buf, "\n");
return strlen(buf);
}
+static DEVICE_ATTR_RO(serial);
-static ssize_t store_chip_reset(struct device *device,
+static ssize_t chip_reset_store(struct device *device,
struct device_attribute *attr, const char *buf,
size_t count)
{
@@ -663,11 +670,12 @@ static ssize_t store_chip_reset(struct device *device,
bail:
return ret < 0 ? ret : count;
}
+static DEVICE_ATTR_WO(chip_reset);
/*
* Dump tempsense regs. in decimal, to ease shell-scripts.
*/
-static ssize_t show_tempsense(struct device *device,
+static ssize_t tempsense_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -695,6 +703,7 @@ static ssize_t show_tempsense(struct device *device,
*(signed char *)(regvals + 7));
return ret;
}
+static DEVICE_ATTR_RO(tempsense);
/*
* end of per-unit (or driver, in some cases, but replicated
@@ -702,30 +711,23 @@ static ssize_t show_tempsense(struct device *device,
*/
/* start of per-unit file structures and support code */
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
-static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
-static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
-static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
-static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
-
-static struct device_attribute *qib_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
- &dev_attr_version,
- &dev_attr_nctxts,
- &dev_attr_nfreectxts,
- &dev_attr_serial,
- &dev_attr_boardversion,
- &dev_attr_tempsense,
- &dev_attr_localbus_info,
- &dev_attr_chip_reset,
+static struct attribute *qib_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_version.attr,
+ &dev_attr_nctxts.attr,
+ &dev_attr_nfreectxts.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_boardversion.attr,
+ &dev_attr_tempsense.attr,
+ &dev_attr_localbus_info.attr,
+ &dev_attr_chip_reset.attr,
+ NULL,
+};
+
+const struct attribute_group qib_attr_group = {
+ .attrs = qib_attributes,
};
int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
@@ -827,27 +829,6 @@ bail:
}
/*
- * Register and create our files in /sys/class/infiniband.
- */
-int qib_verbs_register_sysfs(struct qib_devdata *dd)
-{
- struct ib_device *dev = &dd->verbs_dev.rdi.ibdev;
- int i, ret;
-
- for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
- ret = device_create_file(&dev->dev, qib_attributes[i]);
- if (ret)
- goto bail;
- }
-
- return 0;
-bail:
- for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i)
- device_remove_file(&dev->dev, qib_attributes[i]);
- return ret;
-}
-
-/*
* Unregister and remove our files in /sys/class/infiniband.
*/
void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 3e54bc11e0ae..30c70ad0f4bf 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -68,7 +68,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
}
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
@@ -359,7 +359,7 @@ send_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto rewind;
- qib_copy_sge(&qp->r_sge, data, pmtu, 0);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, false, false);
break;
case OP(SEND_LAST_WITH_IMMEDIATE):
@@ -385,7 +385,7 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
- qib_copy_sge(&qp->r_sge, data, tlen, 0);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, false, false);
rvt_put_ss(&qp->s_rdma_read_sge);
last_imm:
wc.wr_id = qp->r_wr_id;
@@ -449,7 +449,7 @@ rdma_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto drop;
- qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -479,7 +479,7 @@ rdma_last_imm:
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
goto last_imm;
@@ -495,7 +495,7 @@ rdma_last:
tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
break;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index f8d029a2390f..4d4c31ea4e2d 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -162,8 +162,8 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
qib_make_grh(ibp, &grh, grd, 0, 0);
- qib_copy_sge(&qp->r_sge, &grh,
- sizeof(grh), 1);
+ rvt_copy_sge(qp, &qp->r_sge, &grh,
+ sizeof(grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
@@ -179,7 +179,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (len > sge->sge_length)
len = sge->sge_length;
BUG_ON(len == 0);
- qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
+ rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
@@ -260,7 +260,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
}
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
@@ -304,7 +304,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
qib_ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, tflags);
*flags = tflags;
- qib_send_complete(qp, wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done;
}
}
@@ -551,12 +551,13 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
goto drop;
}
if (has_grh) {
- qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
- sizeof(struct ib_grh), 1);
+ rvt_copy_sge(qp, &qp->r_sge, &hdr->u.l.grh,
+ sizeof(struct ib_grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
- qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
+ true, false);
rvt_put_ss(&qp->r_sge);
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
return;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 41babbc0db58..4b0f5761a646 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -131,27 +131,6 @@ const enum ib_wc_opcode ib_qib_wc_opcode[] = {
*/
__be64 ib_qib_sys_image_guid;
-/**
- * qib_copy_sge - copy data to SGE memory
- * @ss: the SGE state
- * @data: the data to copy
- * @length: the length of the data
- */
-void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
-{
- struct rvt_sge *sge = &ss->sge;
-
- while (length) {
- u32 len = rvt_get_sge_length(sge, length);
-
- WARN_ON_ONCE(len == 0);
- memcpy(sge->vaddr, data, len);
- rvt_update_sge(ss, len, release);
- data += len;
- length -= len;
- }
-}
-
/*
* Count the number of DMA descriptors needed to send length bytes of data.
* Don't modify the qib_sge_state to get the count.
@@ -752,7 +731,7 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
spin_lock(&qp->s_lock);
if (tx->wqe)
- qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
else if (qp->ibqp.qp_type == IB_QPT_RC) {
struct ib_header *hdr;
@@ -1025,7 +1004,7 @@ done:
}
if (qp->s_wqe) {
spin_lock_irqsave(&qp->s_lock, flags);
- qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
spin_unlock_irqrestore(&qp->s_lock, flags);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
spin_lock_irqsave(&qp->s_lock, flags);
@@ -1512,6 +1491,9 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_mcast_grp;
/* post send table */
dd->verbs_dev.rdi.post_parms = qib_post_parms;
+
+ /* opcode translation table */
+ dd->verbs_dev.rdi.wc_opcode = ib_qib_wc_opcode;
}
/**
@@ -1588,7 +1570,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
- dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
+ dd->verbs_dev.rdi.driver_f.setup_wqe = qib_check_send_wqe;
dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
@@ -1631,6 +1613,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
+ dd->verbs_dev.rdi.dparms.sge_copy_mode = RVT_SGE_COPY_MEMCPY;
qib_fill_device_attr(dd);
@@ -1642,19 +1625,14 @@ int qib_register_ib_device(struct qib_devdata *dd)
i,
dd->rcd[ctxt]->pkeys);
}
+ rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev, &qib_attr_group);
ret = rvt_register_device(&dd->verbs_dev.rdi, RDMA_DRIVER_QIB);
if (ret)
goto err_tx;
- ret = qib_verbs_register_sysfs(dd);
- if (ret)
- goto err_class;
-
return ret;
-err_class:
- rvt_unregister_device(&dd->verbs_dev.rdi);
err_tx:
while (!list_empty(&dev->txreq_free)) {
struct list_head *l = dev->txreq_free.next;
@@ -1716,14 +1694,14 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
* It is only used in post send, which doesn't hold
* the s_lock.
*/
-void _qib_schedule_send(struct rvt_qp *qp)
+bool _qib_schedule_send(struct rvt_qp *qp)
{
struct qib_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_qp_priv *priv = qp->priv;
- queue_work(ppd->qib_wq, &priv->s_work);
+ return queue_work(ppd->qib_wq, &priv->s_work);
}
/**
@@ -1733,8 +1711,9 @@ void _qib_schedule_send(struct rvt_qp *qp)
* This schedules qp progress. The s_lock
* should be held.
*/
-void qib_schedule_send(struct rvt_qp *qp)
+bool qib_schedule_send(struct rvt_qp *qp)
{
if (qib_send_ok(qp))
- _qib_schedule_send(qp);
+ return _qib_schedule_send(qp);
+ return false;
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 666613eef88f..a4426c24b0d1 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
+ * Copyright (c) 2012 - 2018 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
@@ -223,8 +223,8 @@ static inline int qib_send_ok(struct rvt_qp *qp)
!(qp->s_flags & RVT_S_ANY_WAIT_SEND));
}
-void _qib_schedule_send(struct rvt_qp *qp);
-void qib_schedule_send(struct rvt_qp *qp);
+bool _qib_schedule_send(struct rvt_qp *qp);
+bool qib_schedule_send(struct rvt_qp *qp);
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
{
@@ -292,9 +292,6 @@ void qib_put_txreq(struct qib_verbs_txreq *tx);
int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
u32 hdrwords, struct rvt_sge_state *ss, u32 len);
-void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
- int release);
-
void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
@@ -303,7 +300,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
-int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
+int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ bool *call_send);
struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
@@ -333,9 +331,6 @@ void _qib_do_send(struct work_struct *work);
void qib_do_send(struct rvt_qp *qp);
-void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status);
-
void qib_send_rc_ack(struct rvt_qp *qp);
int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags);
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c
index 92dc66cc2d50..a3115709fb03 100644
--- a/drivers/infiniband/hw/usnic/usnic_debugfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c
@@ -165,6 +165,5 @@ void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow)
void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow)
{
- if (!IS_ERR_OR_NULL(qp_flow->dbgfs_dentry))
- debugfs_remove(qp_flow->dbgfs_dentry);
+ debugfs_remove(qp_flow->dbgfs_dentry);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index f0538a460328..73bd00f8d2c8 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -76,7 +76,7 @@ static LIST_HEAD(usnic_ib_ibdev_list);
static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
{
struct usnic_ib_vf *vf = obj;
- return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name);
+ return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev));
}
/* End callback dump funcs */
@@ -138,7 +138,7 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
netdev = us_ibdev->netdev;
switch (event) {
case NETDEV_REBOOT:
- usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
ib_event.event = IB_EVENT_PORT_ERR;
ib_event.device = &us_ibdev->ib_dev;
@@ -151,7 +151,8 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
if (!us_ibdev->ufdev->link_up &&
netif_carrier_ok(netdev)) {
usnic_fwd_carrier_up(us_ibdev->ufdev);
- usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("Link UP on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
ib_event.event = IB_EVENT_PORT_ACTIVE;
ib_event.device = &us_ibdev->ib_dev;
ib_event.element.port_num = 1;
@@ -159,7 +160,8 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
} else if (us_ibdev->ufdev->link_up &&
!netif_carrier_ok(netdev)) {
usnic_fwd_carrier_down(us_ibdev->ufdev);
- usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("Link DOWN on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
ib_event.event = IB_EVENT_PORT_ERR;
ib_event.device = &us_ibdev->ib_dev;
@@ -168,17 +170,17 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
} else {
usnic_dbg("Ignoring %s on %s\n",
netdev_cmd_to_name(event),
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
break;
case NETDEV_CHANGEADDR:
if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
sizeof(us_ibdev->ufdev->mac))) {
usnic_dbg("Ignoring addr change on %s\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
} else {
usnic_info(" %s old mac: %pM new mac: %pM\n",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
us_ibdev->ufdev->mac,
netdev->dev_addr);
usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
@@ -193,19 +195,19 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
case NETDEV_CHANGEMTU:
if (us_ibdev->ufdev->mtu != netdev->mtu) {
usnic_info("MTU Change on %s old: %u new: %u\n",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
us_ibdev->ufdev->mtu, netdev->mtu);
usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
} else {
usnic_dbg("Ignoring MTU change on %s\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
break;
default:
usnic_dbg("Ignoring event %s on %s",
netdev_cmd_to_name(event),
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
mutex_unlock(&us_ibdev->usdev_lock);
}
@@ -267,7 +269,7 @@ static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
default:
usnic_info("Ignoring event %s on %s",
netdev_cmd_to_name(event),
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
mutex_unlock(&us_ibdev->usdev_lock);
@@ -364,7 +366,6 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
us_ibdev->ib_dev.dev.parent = &dev->dev;
us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
- strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX);
us_ibdev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -416,7 +417,9 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->ib_dev.driver_id = RDMA_DRIVER_USNIC;
- if (ib_register_device(&us_ibdev->ib_dev, NULL))
+ rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group);
+
+ if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", NULL))
goto err_fwd_dealloc;
usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
@@ -437,9 +440,9 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
kref_init(&us_ibdev->vf_cnt);
usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
- us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev),
- us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up,
- us_ibdev->ufdev->mtu);
+ dev_name(&us_ibdev->ib_dev.dev),
+ netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac,
+ us_ibdev->ufdev->link_up, us_ibdev->ufdev->mtu);
return us_ibdev;
err_fwd_dealloc:
@@ -452,7 +455,7 @@ err_dealloc:
static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
{
- usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name);
+ usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_sysfs_unregister_usdev(us_ibdev);
usnic_fwd_dev_free(us_ibdev->ufdev);
ib_unregister_device(&us_ibdev->ib_dev);
@@ -591,7 +594,7 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
mutex_unlock(&pf->usdev_lock);
usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
- pf->ib_dev.name);
+ dev_name(&pf->ib_dev.dev));
usnic_ib_log_vf(vf);
return 0;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 4210ca14014d..a7e4b2ccfaf8 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -46,9 +46,8 @@
#include "usnic_ib_sysfs.h"
#include "usnic_log.h"
-static ssize_t usnic_ib_show_board(struct device *device,
- struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev =
container_of(device, struct usnic_ib_dev, ib_dev.dev);
@@ -60,13 +59,13 @@ static ssize_t usnic_ib_show_board(struct device *device,
return scnprintf(buf, PAGE_SIZE, "%hu\n", subsystem_device_id);
}
+static DEVICE_ATTR_RO(board_id);
/*
* Report the configuration for this PF
*/
static ssize_t
-usnic_ib_show_config(struct device *device, struct device_attribute *attr,
- char *buf)
+config_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
char *ptr;
@@ -94,7 +93,7 @@ usnic_ib_show_config(struct device *device, struct device_attribute *attr,
n = scnprintf(ptr, left,
"%s: %s:%d.%d, %s, %pM, %u VFs\n Per VF:",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
busname,
PCI_SLOT(us_ibdev->pdev->devfn),
PCI_FUNC(us_ibdev->pdev->devfn),
@@ -119,17 +118,17 @@ usnic_ib_show_config(struct device *device, struct device_attribute *attr,
UPDATE_PTR_LEFT(n, ptr, left);
} else {
n = scnprintf(ptr, left, "%s: no VFs\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
UPDATE_PTR_LEFT(n, ptr, left);
}
mutex_unlock(&us_ibdev->usdev_lock);
return ptr - buf;
}
+static DEVICE_ATTR_RO(config);
static ssize_t
-usnic_ib_show_iface(struct device *device, struct device_attribute *attr,
- char *buf)
+iface_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
@@ -138,10 +137,10 @@ usnic_ib_show_iface(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%s\n",
netdev_name(us_ibdev->netdev));
}
+static DEVICE_ATTR_RO(iface);
static ssize_t
-usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
- char *buf)
+max_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
@@ -150,10 +149,10 @@ usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%u\n",
kref_read(&us_ibdev->vf_cnt));
}
+static DEVICE_ATTR_RO(max_vf);
static ssize_t
-usnic_ib_show_qp_per_vf(struct device *device, struct device_attribute *attr,
- char *buf)
+qp_per_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
int qp_per_vf;
@@ -165,10 +164,10 @@ usnic_ib_show_qp_per_vf(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE,
"%d\n", qp_per_vf);
}
+static DEVICE_ATTR_RO(qp_per_vf);
static ssize_t
-usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
- char *buf)
+cq_per_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
@@ -177,21 +176,20 @@ usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%d\n",
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
}
+static DEVICE_ATTR_RO(cq_per_vf);
+
+static struct attribute *usnic_class_attributes[] = {
+ &dev_attr_board_id.attr,
+ &dev_attr_config.attr,
+ &dev_attr_iface.attr,
+ &dev_attr_max_vf.attr,
+ &dev_attr_qp_per_vf.attr,
+ &dev_attr_cq_per_vf.attr,
+ NULL
+};
-static DEVICE_ATTR(board_id, S_IRUGO, usnic_ib_show_board, NULL);
-static DEVICE_ATTR(config, S_IRUGO, usnic_ib_show_config, NULL);
-static DEVICE_ATTR(iface, S_IRUGO, usnic_ib_show_iface, NULL);
-static DEVICE_ATTR(max_vf, S_IRUGO, usnic_ib_show_max_vf, NULL);
-static DEVICE_ATTR(qp_per_vf, S_IRUGO, usnic_ib_show_qp_per_vf, NULL);
-static DEVICE_ATTR(cq_per_vf, S_IRUGO, usnic_ib_show_cq_per_vf, NULL);
-
-static struct device_attribute *usnic_class_attributes[] = {
- &dev_attr_board_id,
- &dev_attr_config,
- &dev_attr_iface,
- &dev_attr_max_vf,
- &dev_attr_qp_per_vf,
- &dev_attr_cq_per_vf,
+const struct attribute_group usnic_attr_group = {
+ .attrs = usnic_class_attributes,
};
struct qpn_attribute {
@@ -278,18 +276,6 @@ static struct kobj_type usnic_ib_qpn_type = {
int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
{
- int i;
- int err;
- for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
- err = device_create_file(&us_ibdev->ib_dev.dev,
- usnic_class_attributes[i]);
- if (err) {
- usnic_err("Failed to create device file %d for %s eith err %d",
- i, us_ibdev->ib_dev.name, err);
- return -EINVAL;
- }
- }
-
/* create kernel object for looking at individual QPs */
kobject_get(&us_ibdev->ib_dev.dev.kobj);
us_ibdev->qpn_kobj = kobject_create_and_add("qpn",
@@ -304,12 +290,6 @@ int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
- device_remove_file(&us_ibdev->ib_dev.dev,
- usnic_class_attributes[i]);
- }
-
kobject_put(us_ibdev->qpn_kobj);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
index 3d98e16cfeaf..b1f064cec850 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
@@ -41,4 +41,6 @@ void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev);
void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp);
void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp);
+extern const struct attribute_group usnic_attr_group;
+
#endif /* !USNIC_IB_SYSFS_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 9973ac893635..0b91ff36768a 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -159,7 +159,8 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
err = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (err) {
- usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
+ usnic_err("Failed to copy udata for %s",
+ dev_name(&us_ibdev->ib_dev.dev));
return err;
}
@@ -197,7 +198,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
vnic = vf->vnic;
if (!usnic_vnic_check_room(vnic, res_spec)) {
usnic_dbg("Found used vnic %s from %s\n",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
pci_name(usnic_vnic_get_pdev(
vnic)));
qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev,
@@ -230,7 +231,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
spin_unlock(&vf->lock);
}
- usnic_info("No free qp grp found on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("No free qp grp found on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
return ERR_PTR(-ENOMEM);
qp_grp_check:
@@ -471,7 +473,7 @@ struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
}
usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
- pd, context, ibdev->name);
+ pd, context, dev_name(&ibdev->dev));
return &pd->ibpd;
}
@@ -508,20 +510,20 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) {
usnic_err("%s: cannot copy udata for create_qp\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
return ERR_PTR(-EINVAL);
}
err = create_qp_validate_user_data(cmd);
if (err) {
usnic_err("%s: Failed to validate user data\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
return ERR_PTR(-EINVAL);
}
if (init_attr->qp_type != IB_QPT_UD) {
usnic_err("%s asked to make a non-UD QP: %d\n",
- us_ibdev->ib_dev.name, init_attr->qp_type);
+ dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c
index e0a95538c364..82dd810bc000 100644
--- a/drivers/infiniband/hw/usnic/usnic_transport.c
+++ b/drivers/infiniband/hw/usnic/usnic_transport.c
@@ -121,7 +121,7 @@ void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num)
if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
spin_lock(&roce_bitmap_lock);
if (!port_num) {
- usnic_err("Unreserved unvalid port num 0 for %s\n",
+ usnic_err("Unreserved invalid port num 0 for %s\n",
usnic_transport_to_str(type));
goto out_roce_custom;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 9dd39daa602b..49275a548751 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -54,18 +54,6 @@ static struct workqueue_struct *usnic_uiom_wq;
((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
-static void usnic_uiom_reg_account(struct work_struct *work)
-{
- struct usnic_uiom_reg *umem = container_of(work,
- struct usnic_uiom_reg, work);
-
- down_write(&umem->mm->mmap_sem);
- umem->mm->locked_vm -= umem->diff;
- up_write(&umem->mm->mmap_sem);
- mmput(umem->mm);
- kfree(umem);
-}
-
static int usnic_uiom_dma_fault(struct iommu_domain *domain,
struct device *dev,
unsigned long iova, int flags,
@@ -99,8 +87,9 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
}
static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
- int dmasync, struct list_head *chunk_list)
+ int dmasync, struct usnic_uiom_reg *uiomr)
{
+ struct list_head *chunk_list = &uiomr->chunk_list;
struct page **page_list;
struct scatterlist *sg;
struct usnic_uiom_chunk *chunk;
@@ -114,6 +103,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int flags;
dma_addr_t pa;
unsigned int gup_flags;
+ struct mm_struct *mm;
/*
* If the combination of the addr and size requested for this memory
@@ -136,7 +126,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
- down_write(&current->mm->mmap_sem);
+ uiomr->owning_mm = mm = current->mm;
+ down_write(&mm->mmap_sem);
locked = npages + current->mm->pinned_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -196,10 +187,12 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
out:
if (ret < 0)
usnic_uiom_put_pages(chunk_list, 0);
- else
- current->mm->pinned_vm = locked;
+ else {
+ mm->pinned_vm = locked;
+ mmgrab(uiomr->owning_mm);
+ }
- up_write(&current->mm->mmap_sem);
+ up_write(&mm->mmap_sem);
free_page((unsigned long) page_list);
return ret;
}
@@ -379,7 +372,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
uiomr->pd = pd;
err = usnic_uiom_get_pages(addr, size, writable, dmasync,
- &uiomr->chunk_list);
+ uiomr);
if (err) {
usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
vpn_start, vpn_last, err);
@@ -426,29 +419,39 @@ out_put_intervals:
out_put_pages:
usnic_uiom_put_pages(&uiomr->chunk_list, 0);
spin_unlock(&pd->lock);
+ mmdrop(uiomr->owning_mm);
out_free_uiomr:
kfree(uiomr);
return ERR_PTR(err);
}
-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
- struct ib_ucontext *ucontext)
+static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
{
- struct task_struct *task;
- struct mm_struct *mm;
- unsigned long diff;
+ mmdrop(uiomr->owning_mm);
+ kfree(uiomr);
+}
- __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
+static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
+{
+ return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+}
- task = get_pid_task(ucontext->tgid, PIDTYPE_PID);
- if (!task)
- goto out;
- mm = get_task_mm(task);
- put_task_struct(task);
- if (!mm)
- goto out;
+static void usnic_uiom_release_defer(struct work_struct *work)
+{
+ struct usnic_uiom_reg *uiomr =
+ container_of(work, struct usnic_uiom_reg, work);
- diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+ down_write(&uiomr->owning_mm->mmap_sem);
+ uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
+ up_write(&uiomr->owning_mm->mmap_sem);
+
+ __usnic_uiom_release_tail(uiomr);
+}
+
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
+ struct ib_ucontext *context)
+{
+ __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
/*
* We may be called with the mm's mmap_sem already held. This
@@ -456,25 +459,21 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
* the last reference to our file and calls our release
* method. If there are memory regions to destroy, we'll end
* up here and not be able to take the mmap_sem. In that case
- * we defer the vm_locked accounting to the system workqueue.
+ * we defer the vm_locked accounting to a workqueue.
*/
- if (ucontext->closing) {
- if (!down_write_trylock(&mm->mmap_sem)) {
- INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
- uiomr->mm = mm;
- uiomr->diff = diff;
-
+ if (context->closing) {
+ if (!down_write_trylock(&uiomr->owning_mm->mmap_sem)) {
+ INIT_WORK(&uiomr->work, usnic_uiom_release_defer);
queue_work(usnic_uiom_wq, &uiomr->work);
return;
}
- } else
- down_write(&mm->mmap_sem);
+ } else {
+ down_write(&uiomr->owning_mm->mmap_sem);
+ }
+ uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
+ up_write(&uiomr->owning_mm->mmap_sem);
- mm->pinned_vm -= diff;
- up_write(&mm->mmap_sem);
- mmput(mm);
-out:
- kfree(uiomr);
+ __usnic_uiom_release_tail(uiomr);
}
struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
index 8c096acff123..b86a9731071b 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -71,8 +71,7 @@ struct usnic_uiom_reg {
int writable;
struct list_head chunk_list;
struct work_struct work;
- struct mm_struct *mm;
- unsigned long diff;
+ struct mm_struct *owning_mm;
};
struct usnic_uiom_chunk {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index a5719899f49a..398443f43dc3 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -65,32 +65,36 @@ static struct workqueue_struct *event_wq;
static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context);
static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", PVRDMA_REV_ID);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", PVRDMA_BOARD_ID);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *pvrdma_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL,
+};
-static struct device_attribute *pvrdma_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group pvrdma_attr_group = {
+ .attrs = pvrdma_class_attributes,
};
static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str)
@@ -160,9 +164,7 @@ static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev,
static int pvrdma_register_device(struct pvrdma_dev *dev)
{
int ret = -1;
- int i = 0;
- strlcpy(dev->ib_dev.name, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
dev->flags = 0;
@@ -266,24 +268,16 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
}
dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA;
spin_lock_init(&dev->srq_tbl_lock);
+ rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
- ret = ib_register_device(&dev->ib_dev, NULL);
+ ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", NULL);
if (ret)
goto err_srq_free;
- for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) {
- ret = device_create_file(&dev->ib_dev.dev,
- pvrdma_class_attributes[i]);
- if (ret)
- goto err_class;
- }
-
dev->ib_active = true;
return 0;
-err_class:
- ib_unregister_device(&dev->ib_dev);
err_srq_free:
kfree(dev->srq_tbl);
err_qp_free:
@@ -735,7 +729,7 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
default:
dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
- event, dev->ib_dev.name);
+ event, dev_name(&dev->ib_dev.dev));
break;
}
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 60083c0363a5..cf22f57a9f0d 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -499,7 +499,7 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type,
- attr_mask, IB_LINK_LAYER_ETHERNET)) {
+ attr_mask)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
index 98e798007f75..7df896a18d38 100644
--- a/drivers/infiniband/sw/rdmavt/Kconfig
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library"
- depends on 64BIT && ARCH_DMA_ADDR_T_64BIT
+ depends on X86_64 && ARCH_DMA_ADDR_T_64BIT
depends on PCI
select DMA_VIRT_OPS
---help---
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 5ce403c6cddb..1735deb1a9d4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -118,6 +118,187 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
};
EXPORT_SYMBOL(ib_rvt_state_ops);
+/* platform specific: return the last level cache (llc) size, in KiB */
+static int rvt_wss_llc_size(void)
+{
+ /* assume that the boot CPU value is universal for all CPUs */
+ return boot_cpu_data.x86_cache_size;
+}
+
+/* platform specific: cacheless copy */
+static void cacheless_memcpy(void *dst, void *src, size_t n)
+{
+ /*
+ * Use the only available X64 cacheless copy. Add a __user cast
+ * to quiet sparse. The src agument is already in the kernel so
+ * there are no security issues. The extra fault recovery machinery
+ * is not invoked.
+ */
+ __copy_user_nocache(dst, (void __user *)src, n, 0);
+}
+
+void rvt_wss_exit(struct rvt_dev_info *rdi)
+{
+ struct rvt_wss *wss = rdi->wss;
+
+ if (!wss)
+ return;
+
+ /* coded to handle partially initialized and repeat callers */
+ kfree(wss->entries);
+ wss->entries = NULL;
+ kfree(rdi->wss);
+ rdi->wss = NULL;
+}
+
+/**
+ * rvt_wss_init - Init wss data structures
+ *
+ * Return: 0 on success
+ */
+int rvt_wss_init(struct rvt_dev_info *rdi)
+{
+ unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
+ unsigned int wss_threshold = rdi->dparms.wss_threshold;
+ unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
+ long llc_size;
+ long llc_bits;
+ long table_size;
+ long table_bits;
+ struct rvt_wss *wss;
+ int node = rdi->dparms.node;
+
+ if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
+ rdi->wss = NULL;
+ return 0;
+ }
+
+ rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
+ if (!rdi->wss)
+ return -ENOMEM;
+ wss = rdi->wss;
+
+ /* check for a valid percent range - default to 80 if none or invalid */
+ if (wss_threshold < 1 || wss_threshold > 100)
+ wss_threshold = 80;
+
+ /* reject a wildly large period */
+ if (wss_clean_period > 1000000)
+ wss_clean_period = 256;
+
+ /* reject a zero period */
+ if (wss_clean_period == 0)
+ wss_clean_period = 1;
+
+ /*
+ * Calculate the table size - the next power of 2 larger than the
+ * LLC size. LLC size is in KiB.
+ */
+ llc_size = rvt_wss_llc_size() * 1024;
+ table_size = roundup_pow_of_two(llc_size);
+
+ /* one bit per page in rounded up table */
+ llc_bits = llc_size / PAGE_SIZE;
+ table_bits = table_size / PAGE_SIZE;
+ wss->pages_mask = table_bits - 1;
+ wss->num_entries = table_bits / BITS_PER_LONG;
+
+ wss->threshold = (llc_bits * wss_threshold) / 100;
+ if (wss->threshold == 0)
+ wss->threshold = 1;
+
+ wss->clean_period = wss_clean_period;
+ atomic_set(&wss->clean_counter, wss_clean_period);
+
+ wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
+ GFP_KERNEL, node);
+ if (!wss->entries) {
+ rvt_wss_exit(rdi);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Advance the clean counter. When the clean period has expired,
+ * clean an entry.
+ *
+ * This is implemented in atomics to avoid locking. Because multiple
+ * variables are involved, it can be racy which can lead to slightly
+ * inaccurate information. Since this is only a heuristic, this is
+ * OK. Any innaccuracies will clean themselves out as the counter
+ * advances. That said, it is unlikely the entry clean operation will
+ * race - the next possible racer will not start until the next clean
+ * period.
+ *
+ * The clean counter is implemented as a decrement to zero. When zero
+ * is reached an entry is cleaned.
+ */
+static void wss_advance_clean_counter(struct rvt_wss *wss)
+{
+ int entry;
+ int weight;
+ unsigned long bits;
+
+ /* become the cleaner if we decrement the counter to zero */
+ if (atomic_dec_and_test(&wss->clean_counter)) {
+ /*
+ * Set, not add, the clean period. This avoids an issue
+ * where the counter could decrement below the clean period.
+ * Doing a set can result in lost decrements, slowing the
+ * clean advance. Since this a heuristic, this possible
+ * slowdown is OK.
+ *
+ * An alternative is to loop, advancing the counter by a
+ * clean period until the result is > 0. However, this could
+ * lead to several threads keeping another in the clean loop.
+ * This could be mitigated by limiting the number of times
+ * we stay in the loop.
+ */
+ atomic_set(&wss->clean_counter, wss->clean_period);
+
+ /*
+ * Uniquely grab the entry to clean and move to next.
+ * The current entry is always the lower bits of
+ * wss.clean_entry. The table size, wss.num_entries,
+ * is always a power-of-2.
+ */
+ entry = (atomic_inc_return(&wss->clean_entry) - 1)
+ & (wss->num_entries - 1);
+
+ /* clear the entry and count the bits */
+ bits = xchg(&wss->entries[entry], 0);
+ weight = hweight64((u64)bits);
+ /* only adjust the contended total count if needed */
+ if (weight)
+ atomic_sub(weight, &wss->total_count);
+ }
+}
+
+/*
+ * Insert the given address into the working set array.
+ */
+static void wss_insert(struct rvt_wss *wss, void *address)
+{
+ u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
+ u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
+ u32 nr = page & (BITS_PER_LONG - 1);
+
+ if (!test_and_set_bit(nr, &wss->entries[entry]))
+ atomic_inc(&wss->total_count);
+
+ wss_advance_clean_counter(wss);
+}
+
+/*
+ * Is the working set larger than the threshold?
+ */
+static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
+{
+ return atomic_read(&wss->total_count) >= wss->threshold;
+}
+
static void get_map_page(struct rvt_qpn_table *qpt,
struct rvt_qpn_map *map)
{
@@ -1164,11 +1345,8 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int lastwqe = 0;
int mig = 0;
int pmtu = 0; /* for gcc warning only */
- enum rdma_link_layer link;
int opa_ah;
- link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
-
spin_lock_irq(&qp->r_lock);
spin_lock(&qp->s_hlock);
spin_lock(&qp->s_lock);
@@ -1179,7 +1357,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, link))
+ attr_mask))
goto inval;
if (rdi->driver_f.check_modify_qp &&
@@ -1718,7 +1896,7 @@ static inline int rvt_qp_is_avail(
*/
static int rvt_post_one_wr(struct rvt_qp *qp,
const struct ib_send_wr *wr,
- int *call_send)
+ bool *call_send)
{
struct rvt_swqe *wqe;
u32 next;
@@ -1823,15 +2001,11 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
wqe->wr.num_sge = j;
}
- /* general part of wqe valid - allow for driver checks */
- if (rdi->driver_f.check_send_wqe) {
- ret = rdi->driver_f.check_send_wqe(qp, wqe);
- if (ret < 0)
- goto bail_inval_free;
- if (ret)
- *call_send = ret;
- }
-
+ /*
+ * Calculate and set SWQE PSN values prior to handing it off
+ * to the driver's check routine. This give the driver the
+ * opportunity to adjust PSN values based on internal checks.
+ */
log_pmtu = qp->log_pmtu;
if (qp->ibqp.qp_type != IB_QPT_UC &&
qp->ibqp.qp_type != IB_QPT_RC) {
@@ -1856,8 +2030,18 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
(wqe->length ?
((wqe->length - 1) >> log_pmtu) :
0);
- qp->s_next_psn = wqe->lpsn + 1;
}
+
+ /* general part of wqe valid - allow for driver checks */
+ if (rdi->driver_f.setup_wqe) {
+ ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
+ if (ret < 0)
+ goto bail_inval_free_ref;
+ }
+
+ if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
+ qp->s_next_psn = wqe->lpsn + 1;
+
if (unlikely(reserved_op)) {
wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
rvt_qp_wqe_reserve(qp, wqe);
@@ -1871,6 +2055,10 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
return 0;
+bail_inval_free_ref:
+ if (qp->ibqp.qp_type != IB_QPT_UC &&
+ qp->ibqp.qp_type != IB_QPT_RC)
+ atomic_dec(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
bail_inval_free:
/* release mr holds */
while (j) {
@@ -1897,7 +2085,7 @@ int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
unsigned long flags = 0;
- int call_send;
+ bool call_send;
unsigned nreq = 0;
int err = 0;
@@ -1930,7 +2118,11 @@ int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
bail:
spin_unlock_irqrestore(&qp->s_hlock, flags);
if (nreq) {
- if (call_send)
+ /*
+ * Only call do_send if there is exactly one packet, and the
+ * driver said it was ok.
+ */
+ if (nreq == 1 && call_send)
rdi->driver_f.do_send(qp);
else
rdi->driver_f.schedule_send_no_lock(qp);
@@ -2465,3 +2657,454 @@ void rvt_qp_iter(struct rvt_dev_info *rdi,
rcu_read_unlock();
}
EXPORT_SYMBOL(rvt_qp_iter);
+
+/*
+ * This should be called with s_lock held.
+ */
+void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ enum ib_wc_status status)
+{
+ u32 old_last, last;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
+ return;
+
+ last = qp->s_last;
+ old_last = last;
+ trace_rvt_qp_send_completion(qp, wqe, last);
+ if (++last >= qp->s_size)
+ last = 0;
+ trace_rvt_qp_send_completion(qp, wqe, last);
+ qp->s_last = last;
+ /* See post_send() */
+ barrier();
+ rvt_put_swqe(wqe);
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
+
+ rvt_qp_swqe_complete(qp,
+ wqe,
+ rdi->wc_opcode[wqe->wr.opcode],
+ status);
+
+ if (qp->s_acked == old_last)
+ qp->s_acked = last;
+ if (qp->s_cur == old_last)
+ qp->s_cur = last;
+ if (qp->s_tail == old_last)
+ qp->s_tail = last;
+ if (qp->state == IB_QPS_SQD && last == qp->s_cur)
+ qp->s_draining = 0;
+}
+EXPORT_SYMBOL(rvt_send_complete);
+
+/**
+ * rvt_copy_sge - copy data to SGE memory
+ * @qp: associated QP
+ * @ss: the SGE state
+ * @data: the data to copy
+ * @length: the length of the data
+ * @release: boolean to release MR
+ * @copy_last: do a separate copy of the last 8 bytes
+ */
+void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
+ void *data, u32 length,
+ bool release, bool copy_last)
+{
+ struct rvt_sge *sge = &ss->sge;
+ int i;
+ bool in_last = false;
+ bool cacheless_copy = false;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ struct rvt_wss *wss = rdi->wss;
+ unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
+
+ if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
+ cacheless_copy = length >= PAGE_SIZE;
+ } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
+ if (length >= PAGE_SIZE) {
+ /*
+ * NOTE: this *assumes*:
+ * o The first vaddr is the dest.
+ * o If multiple pages, then vaddr is sequential.
+ */
+ wss_insert(wss, sge->vaddr);
+ if (length >= (2 * PAGE_SIZE))
+ wss_insert(wss, (sge->vaddr + PAGE_SIZE));
+
+ cacheless_copy = wss_exceeds_threshold(wss);
+ } else {
+ wss_advance_clean_counter(wss);
+ }
+ }
+
+ if (copy_last) {
+ if (length > 8) {
+ length -= 8;
+ } else {
+ copy_last = false;
+ in_last = true;
+ }
+ }
+
+again:
+ while (length) {
+ u32 len = rvt_get_sge_length(sge, length);
+
+ WARN_ON_ONCE(len == 0);
+ if (unlikely(in_last)) {
+ /* enforce byte transfer ordering */
+ for (i = 0; i < len; i++)
+ ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
+ } else if (cacheless_copy) {
+ cacheless_memcpy(sge->vaddr, data, len);
+ } else {
+ memcpy(sge->vaddr, data, len);
+ }
+ rvt_update_sge(ss, len, release);
+ data += len;
+ length -= len;
+ }
+
+ if (copy_last) {
+ copy_last = false;
+ in_last = true;
+ length = 8;
+ goto again;
+ }
+}
+EXPORT_SYMBOL(rvt_copy_sge);
+
+/**
+ * ruc_loopback - handle UC and RC loopback requests
+ * @sqp: the sending QP
+ *
+ * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
+ * Note that although we are single threaded due to the send engine, we still
+ * have to protect against post_send(). We don't have to worry about
+ * receive interrupts since this is a connected protocol and all packets
+ * will pass through here.
+ */
+void rvt_ruc_loopback(struct rvt_qp *sqp)
+{
+ struct rvt_ibport *rvp = NULL;
+ struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
+ struct rvt_qp *qp;
+ struct rvt_swqe *wqe;
+ struct rvt_sge *sge;
+ unsigned long flags;
+ struct ib_wc wc;
+ u64 sdata;
+ atomic64_t *maddr;
+ enum ib_wc_status send_status;
+ bool release;
+ int ret;
+ bool copy_last = false;
+ int local_ops = 0;
+
+ rcu_read_lock();
+ rvp = rdi->ports[sqp->port_num - 1];
+
+ /*
+ * Note that we check the responder QP state after
+ * checking the requester's state.
+ */
+
+ qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
+ sqp->remote_qpn);
+
+ spin_lock_irqsave(&sqp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
+ !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
+ goto unlock;
+
+ sqp->s_flags |= RVT_S_BUSY;
+
+again:
+ if (sqp->s_last == READ_ONCE(sqp->s_head))
+ goto clr_busy;
+ wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
+
+ /* Return if it is not OK to start a new work request. */
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
+ goto clr_busy;
+ /* We are in the error state, flush the work request. */
+ send_status = IB_WC_WR_FLUSH_ERR;
+ goto flush_send;
+ }
+
+ /*
+ * We can rely on the entry not changing without the s_lock
+ * being held until we update s_last.
+ * We increment s_cur to indicate s_last is in progress.
+ */
+ if (sqp->s_last == sqp->s_cur) {
+ if (++sqp->s_cur >= sqp->s_size)
+ sqp->s_cur = 0;
+ }
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+
+ if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
+ qp->ibqp.qp_type != sqp->ibqp.qp_type) {
+ rvp->n_pkt_drops++;
+ /*
+ * For RC, the requester would timeout and retry so
+ * shortcut the timeouts and just signal too many retries.
+ */
+ if (sqp->ibqp.qp_type == IB_QPT_RC)
+ send_status = IB_WC_RETRY_EXC_ERR;
+ else
+ send_status = IB_WC_SUCCESS;
+ goto serr;
+ }
+
+ memset(&wc, 0, sizeof(wc));
+ send_status = IB_WC_SUCCESS;
+
+ release = true;
+ sqp->s_sge.sge = wqe->sg_list[0];
+ sqp->s_sge.sg_list = wqe->sg_list + 1;
+ sqp->s_sge.num_sge = wqe->wr.num_sge;
+ sqp->s_len = wqe->length;
+ switch (wqe->wr.opcode) {
+ case IB_WR_REG_MR:
+ goto send_comp;
+
+ case IB_WR_LOCAL_INV:
+ if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
+ if (rvt_invalidate_rkey(sqp,
+ wqe->wr.ex.invalidate_rkey))
+ send_status = IB_WC_LOC_PROT_ERR;
+ local_ops = 1;
+ }
+ goto send_comp;
+
+ case IB_WR_SEND_WITH_INV:
+ if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
+ wc.wc_flags = IB_WC_WITH_INVALIDATE;
+ wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
+ }
+ goto send;
+
+ case IB_WR_SEND_WITH_IMM:
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ /* FALLTHROUGH */
+ case IB_WR_SEND:
+send:
+ ret = rvt_get_rwqe(qp, false);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ ret = rvt_get_rwqe(qp, true);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ /* skip copy_last set and qp_access_flags recheck */
+ goto do_write;
+ case IB_WR_RDMA_WRITE:
+ copy_last = rvt_is_user_qp(qp);
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+do_write:
+ if (wqe->length == 0)
+ break;
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+ wqe->rdma_wr.remote_addr,
+ wqe->rdma_wr.rkey,
+ IB_ACCESS_REMOTE_WRITE)))
+ goto acc_err;
+ qp->r_sge.sg_list = NULL;
+ qp->r_sge.num_sge = 1;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_RDMA_READ:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto inv_err;
+ if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+ wqe->rdma_wr.remote_addr,
+ wqe->rdma_wr.rkey,
+ IB_ACCESS_REMOTE_READ)))
+ goto acc_err;
+ release = false;
+ sqp->s_sge.sg_list = NULL;
+ sqp->s_sge.num_sge = 1;
+ qp->r_sge.sge = wqe->sg_list[0];
+ qp->r_sge.sg_list = wqe->sg_list + 1;
+ qp->r_sge.num_sge = wqe->wr.num_sge;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto inv_err;
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ wqe->atomic_wr.remote_addr,
+ wqe->atomic_wr.rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto acc_err;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
+ sdata = wqe->atomic_wr.compare_add;
+ *(u64 *)sqp->s_sge.sge.vaddr =
+ (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+ (u64)atomic64_add_return(sdata, maddr) - sdata :
+ (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
+ sdata, wqe->atomic_wr.swap);
+ rvt_put_mr(qp->r_sge.sge.mr);
+ qp->r_sge.num_sge = 0;
+ goto send_comp;
+
+ default:
+ send_status = IB_WC_LOC_QP_OP_ERR;
+ goto serr;
+ }
+
+ sge = &sqp->s_sge.sge;
+ while (sqp->s_len) {
+ u32 len = sqp->s_len;
+
+ if (len > sge->length)
+ len = sge->length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ WARN_ON_ONCE(len == 0);
+ rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
+ len, release, copy_last);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (!release)
+ rvt_put_mr(sge->mr);
+ if (--sqp->s_sge.num_sge)
+ *sge = *sqp->s_sge.sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= RVT_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ sqp->s_len -= len;
+ }
+ if (release)
+ rvt_put_ss(&qp->r_sge);
+
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
+ goto send_comp;
+
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
+ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
+ wc.port_num = 1;
+ /* Signal completion event if the solicited bit is set. */
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
+ wqe->wr.send_flags & IB_SEND_SOLICITED);
+
+send_comp:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ rvp->n_loop_pkts++;
+flush_send:
+ sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
+ rvt_send_complete(sqp, wqe, send_status);
+ if (local_ops) {
+ atomic_dec(&sqp->local_ops_pending);
+ local_ops = 0;
+ }
+ goto again;
+
+rnr_nak:
+ /* Handle RNR NAK */
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ goto send_comp;
+ rvp->n_rnr_naks++;
+ /*
+ * Note: we don't need the s_lock held since the BUSY flag
+ * makes this single threaded.
+ */
+ if (sqp->s_rnr_retry == 0) {
+ send_status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto serr;
+ }
+ if (sqp->s_rnr_retry_cnt < 7)
+ sqp->s_rnr_retry--;
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
+ goto clr_busy;
+ rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
+ IB_AETH_CREDIT_SHIFT);
+ goto clr_busy;
+
+op_err:
+ send_status = IB_WC_REM_OP_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+inv_err:
+ send_status = IB_WC_REM_INV_REQ_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+acc_err:
+ send_status = IB_WC_REM_ACCESS_ERR;
+ wc.status = IB_WC_LOC_PROT_ERR;
+err:
+ /* responder goes to error state */
+ rvt_rc_error(qp, wc.status);
+
+serr:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ rvt_send_complete(sqp, wqe, send_status);
+ if (sqp->ibqp.qp_type == IB_QPT_RC) {
+ int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+
+ sqp->s_flags &= ~RVT_S_BUSY;
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = sqp->ibqp.device;
+ ev.element.qp = &sqp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
+ }
+ goto done;
+ }
+clr_busy:
+ sqp->s_flags &= ~RVT_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+done:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(rvt_ruc_loopback);
diff --git a/drivers/infiniband/sw/rdmavt/qp.h b/drivers/infiniband/sw/rdmavt/qp.h
index 264811fdc530..6d883972e0b8 100644
--- a/drivers/infiniband/sw/rdmavt/qp.h
+++ b/drivers/infiniband/sw/rdmavt/qp.h
@@ -66,4 +66,6 @@ int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
+int rvt_wss_init(struct rvt_dev_info *rdi);
+void rvt_wss_exit(struct rvt_dev_info *rdi);
#endif /* DEF_RVTQP_H */
diff --git a/drivers/infiniband/sw/rdmavt/trace_tx.h b/drivers/infiniband/sw/rdmavt/trace_tx.h
index 0ef25fc49f25..d5df352eadb1 100644
--- a/drivers/infiniband/sw/rdmavt/trace_tx.h
+++ b/drivers/infiniband/sw/rdmavt/trace_tx.h
@@ -153,6 +153,48 @@ TRACE_EVENT(
)
);
+TRACE_EVENT(
+ rvt_qp_send_completion,
+ TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
+ TP_ARGS(qp, wqe, idx),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(struct rvt_swqe *, wqe)
+ __field(u64, wr_id)
+ __field(u32, qpn)
+ __field(u32, qpt)
+ __field(u32, length)
+ __field(u32, idx)
+ __field(u32, ssn)
+ __field(enum ib_wr_opcode, opcode)
+ __field(int, send_flags)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->wqe = wqe;
+ __entry->wr_id = wqe->wr.wr_id;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->qpt = qp->ibqp.qp_type;
+ __entry->length = wqe->length;
+ __entry->idx = idx;
+ __entry->ssn = wqe->ssn;
+ __entry->opcode = wqe->wr.opcode;
+ __entry->send_flags = wqe->wr.send_flags;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->qpt,
+ __entry->wqe,
+ __entry->idx,
+ __entry->wr_id,
+ __entry->length,
+ __entry->ssn,
+ __entry->opcode,
+ __entry->send_flags
+ )
+);
#endif /* __RVT_TRACE_TX_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 17e4abc067af..723d3daf2eba 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -774,6 +774,13 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
goto bail_no_mr;
}
+ /* Memory Working Set Size */
+ ret = rvt_wss_init(rdi);
+ if (ret) {
+ rvt_pr_err(rdi, "Error in WSS init.\n");
+ goto bail_mr;
+ }
+
/* Completion queues */
spin_lock_init(&rdi->n_cqs_lock);
@@ -828,10 +835,11 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
rdi->ibdev.driver_id = driver_id;
/* We are now good to announce we exist */
- ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback);
+ ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev),
+ rdi->driver_f.port_callback);
if (ret) {
rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
- goto bail_mr;
+ goto bail_wss;
}
rvt_create_mad_agents(rdi);
@@ -839,6 +847,8 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
rvt_pr_info(rdi, "Registration with rdmavt done.\n");
return ret;
+bail_wss:
+ rvt_wss_exit(rdi);
bail_mr:
rvt_mr_exit(rdi);
@@ -862,6 +872,7 @@ void rvt_unregister_device(struct rvt_dev_info *rdi)
rvt_free_mad_agents(rdi);
ib_unregister_device(&rdi->ibdev);
+ rvt_wss_exit(rdi);
rvt_mr_exit(rdi);
rvt_qp_exit(rdi);
}
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 10999fa69281..383e65c7bbc0 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -103,7 +103,7 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
rxe->attr.max_ee_init_rd_atom = RXE_MAX_EE_INIT_RD_ATOM;
- rxe->attr.atomic_cap = RXE_ATOMIC_CAP;
+ rxe->attr.atomic_cap = IB_ATOMIC_HCA;
rxe->attr.max_ee = RXE_MAX_EE;
rxe->attr.max_rdd = RXE_MAX_RDD;
rxe->attr.max_mw = RXE_MAX_MW;
@@ -128,9 +128,9 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
/* initialize port attributes */
static int rxe_init_port_param(struct rxe_port *port)
{
- port->attr.state = RXE_PORT_STATE;
- port->attr.max_mtu = RXE_PORT_MAX_MTU;
- port->attr.active_mtu = RXE_PORT_ACTIVE_MTU;
+ port->attr.state = IB_PORT_DOWN;
+ port->attr.max_mtu = IB_MTU_4096;
+ port->attr.active_mtu = IB_MTU_256;
port->attr.gid_tbl_len = RXE_PORT_GID_TBL_LEN;
port->attr.port_cap_flags = RXE_PORT_PORT_CAP_FLAGS;
port->attr.max_msg_sz = RXE_PORT_MAX_MSG_SZ;
@@ -147,8 +147,7 @@ static int rxe_init_port_param(struct rxe_port *port)
port->attr.active_width = RXE_PORT_ACTIVE_WIDTH;
port->attr.active_speed = RXE_PORT_ACTIVE_SPEED;
port->attr.phys_state = RXE_PORT_PHYS_STATE;
- port->mtu_cap =
- ib_mtu_enum_to_int(RXE_PORT_ACTIVE_MTU);
+ port->mtu_cap = ib_mtu_enum_to_int(IB_MTU_256);
port->subnet_prefix = cpu_to_be64(RXE_PORT_SUBNET_PREFIX);
return 0;
@@ -300,7 +299,7 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
mtu = eth_mtu_int_to_enum(ndev_mtu);
/* Make sure that new MTU in range */
- mtu = mtu ? min_t(enum ib_mtu, mtu, RXE_PORT_MAX_MTU) : IB_MTU_256;
+ mtu = mtu ? min_t(enum ib_mtu, mtu, IB_MTU_4096) : IB_MTU_256;
port->attr.active_mtu = mtu;
port->mtu_cap = ib_mtu_enum_to_int(mtu);
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 83311dd07019..ea089cb091ad 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -191,6 +191,7 @@ static inline void reset_retry_counters(struct rxe_qp *qp)
{
qp->comp.retry_cnt = qp->attr.retry_cnt;
qp->comp.rnr_retry = qp->attr.rnr_retry;
+ qp->comp.started_retry = 0;
}
static inline enum comp_state check_psn(struct rxe_qp *qp,
@@ -253,6 +254,17 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
+ /* read retries of partial data may restart from
+ * read response first or response only.
+ */
+ if ((pkt->psn == wqe->first_psn &&
+ pkt->opcode ==
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) ||
+ (wqe->first_psn == wqe->last_psn &&
+ pkt->opcode ==
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY))
+ break;
+
return COMPST_ERROR;
}
break;
@@ -499,11 +511,11 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- qp->comp.opcode = -1;
-
- if (pkt) {
- if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
- qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ if (pkt && wqe->state == wqe_state_pending) {
+ if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
+ qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
+ qp->comp.opcode = -1;
+ }
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
@@ -676,6 +688,20 @@ int rxe_completer(void *arg)
goto exit;
}
+ /* if we've started a retry, don't start another
+ * retry sequence, unless this is a timeout.
+ */
+ if (qp->comp.started_retry &&
+ !qp->comp.timeout_retry) {
+ if (pkt) {
+ rxe_drop_ref(pkt->qp);
+ kfree_skb(skb);
+ skb = NULL;
+ }
+
+ goto done;
+ }
+
if (qp->comp.retry_cnt > 0) {
if (qp->comp.retry_cnt != 7)
qp->comp.retry_cnt--;
@@ -692,6 +718,7 @@ int rxe_completer(void *arg)
rxe_counter_inc(rxe,
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
+ qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task, 1);
}
@@ -701,7 +728,7 @@ int rxe_completer(void *arg)
skb = NULL;
}
- goto exit;
+ goto done;
} else {
rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED);
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 2ee4b08b00ea..a57276f2cb84 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -30,7 +30,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-
+#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
@@ -97,7 +97,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context,
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
if (err) {
- kvfree(cq->queue->buf);
+ vfree(cq->queue->buf);
kfree(cq->queue);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 87d14f7ef21b..afd53f57a62b 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -144,8 +144,7 @@ void rxe_loopback(struct sk_buff *skb);
int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb);
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt);
-int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, u32 *crc);
+int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);
enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num);
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
struct device *rxe_dma_device(struct rxe_dev *rxe);
@@ -196,7 +195,7 @@ static inline int qp_mtu(struct rxe_qp *qp)
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
return qp->attr.path_mtu;
else
- return RXE_PORT_MAX_MTU;
+ return IB_MTU_4096;
}
static inline int rcv_wqe_size(int max_sge)
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index dff605fdf60f..9d3916b93f23 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -573,33 +573,20 @@ struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
int index = key >> 8;
- if (index >= RXE_MIN_MR_INDEX && index <= RXE_MAX_MR_INDEX) {
- mem = rxe_pool_get_index(&rxe->mr_pool, index);
- if (!mem)
- goto err1;
- } else {
- goto err1;
+ mem = rxe_pool_get_index(&rxe->mr_pool, index);
+ if (!mem)
+ return NULL;
+
+ if (unlikely((type == lookup_local && mem->lkey != key) ||
+ (type == lookup_remote && mem->rkey != key) ||
+ mem->pd != pd ||
+ (access && !(access & mem->access)) ||
+ mem->state != RXE_MEM_STATE_VALID)) {
+ rxe_drop_ref(mem);
+ mem = NULL;
}
- if ((type == lookup_local && mem->lkey != key) ||
- (type == lookup_remote && mem->rkey != key))
- goto err2;
-
- if (mem->pd != pd)
- goto err2;
-
- if (access && !(access & mem->access))
- goto err2;
-
- if (mem->state != RXE_MEM_STATE_VALID)
- goto err2;
-
return mem;
-
-err2:
- rxe_drop_ref(mem);
-err1:
- return NULL;
}
int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 8094cbaa54a9..40e82e0f6c2d 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -72,7 +72,7 @@ struct rxe_dev *get_rxe_by_name(const char *name)
spin_lock_bh(&dev_list_lock);
list_for_each_entry(rxe, &rxe_dev_list, list) {
- if (!strcmp(name, rxe->ib_dev.name)) {
+ if (!strcmp(name, dev_name(&rxe->ib_dev.dev))) {
found = rxe;
break;
}
@@ -182,19 +182,11 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
#endif
-static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
+static struct dst_entry *rxe_find_route(struct net_device *ndev,
struct rxe_qp *qp,
struct rxe_av *av)
{
- const struct ib_gid_attr *attr;
struct dst_entry *dst = NULL;
- struct net_device *ndev;
-
- attr = rdma_get_gid_attr(&rxe->ib_dev, qp->attr.port_num,
- av->grh.sgid_index);
- if (IS_ERR(attr))
- return NULL;
- ndev = attr->ndev;
if (qp_type(qp) == IB_QPT_RC)
dst = sk_dst_get(qp->sk->sk);
@@ -229,7 +221,6 @@ static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
sk_dst_set(qp->sk->sk, dst);
}
}
- rdma_put_gid_attr(attr);
return dst;
}
@@ -377,8 +368,8 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
}
-static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, struct rxe_av *av)
+static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb,
+ struct rxe_av *av)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
@@ -387,7 +378,7 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
- dst = rxe_find_route(rxe, qp, av);
+ dst = rxe_find_route(skb->dev, qp, av);
if (!dst) {
pr_err("Host not reachable\n");
return -EHOSTUNREACH;
@@ -396,8 +387,8 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
if (!memcmp(saddr, daddr, sizeof(*daddr)))
pkt->mask |= RXE_LOOPBACK_MASK;
- prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
- htons(ROCE_V2_UDP_DPORT));
+ prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
+ cpu_to_be16(ROCE_V2_UDP_DPORT));
prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
av->grh.traffic_class, av->grh.hop_limit, df, xnet);
@@ -406,15 +397,15 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return 0;
}
-static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, struct rxe_av *av)
+static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb,
+ struct rxe_av *av)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
- dst = rxe_find_route(rxe, qp, av);
+ dst = rxe_find_route(skb->dev, qp, av);
if (!dst) {
pr_err("Host not reachable\n");
return -EHOSTUNREACH;
@@ -423,8 +414,8 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
if (!memcmp(saddr, daddr, sizeof(*daddr)))
pkt->mask |= RXE_LOOPBACK_MASK;
- prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
- htons(ROCE_V2_UDP_DPORT));
+ prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
+ cpu_to_be16(ROCE_V2_UDP_DPORT));
prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
av->grh.traffic_class,
@@ -434,16 +425,15 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return 0;
}
-int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, u32 *crc)
+int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
{
int err = 0;
struct rxe_av *av = rxe_get_av(pkt);
if (av->network_type == RDMA_NETWORK_IPV4)
- err = prepare4(rxe, pkt, skb, av);
+ err = prepare4(pkt, skb, av);
else if (av->network_type == RDMA_NETWORK_IPV6)
- err = prepare6(rxe, pkt, skb, av);
+ err = prepare6(pkt, skb, av);
*crc = rxe_icrc_hdr(pkt, skb);
@@ -501,11 +491,6 @@ void rxe_loopback(struct sk_buff *skb)
rxe_rcv(skb);
}
-static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
-{
- return rxe->port.port_guid == av->grh.dgid.global.interface_id;
-}
-
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt)
{
@@ -625,7 +610,7 @@ void rxe_port_up(struct rxe_dev *rxe)
port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
- pr_info("set %s active\n", rxe->ib_dev.name);
+ dev_info(&rxe->ib_dev.dev, "set active\n");
}
/* Caller must hold net_info_lock */
@@ -638,7 +623,7 @@ void rxe_port_down(struct rxe_dev *rxe)
port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
rxe_port_event(rxe, IB_EVENT_PORT_ERR);
- pr_info("set %s down\n", rxe->ib_dev.name);
+ dev_info(&rxe->ib_dev.dev, "set down\n");
}
static int rxe_notify(struct notifier_block *not_blk,
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 4555510d86c4..bdea899a58ac 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -90,7 +90,6 @@ enum rxe_device_param {
RXE_MAX_RES_RD_ATOM = 0x3f000,
RXE_MAX_QP_INIT_RD_ATOM = 128,
RXE_MAX_EE_INIT_RD_ATOM = 0,
- RXE_ATOMIC_CAP = 1,
RXE_MAX_EE = 0,
RXE_MAX_RDD = 0,
RXE_MAX_MW = 0,
@@ -139,9 +138,6 @@ enum rxe_device_param {
/* default/initial rxe port parameters */
enum rxe_port_param {
- RXE_PORT_STATE = IB_PORT_DOWN,
- RXE_PORT_MAX_MTU = IB_MTU_4096,
- RXE_PORT_ACTIVE_MTU = IB_MTU_256,
RXE_PORT_GID_TBL_LEN = 1024,
RXE_PORT_PORT_CAP_FLAGS = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP,
RXE_PORT_MAX_MSG_SZ = 0x800000,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index b4a8acc7bb7d..36b53fb94a49 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -207,7 +207,7 @@ int rxe_pool_init(
kref_init(&pool->ref_cnt);
- spin_lock_init(&pool->pool_lock);
+ rwlock_init(&pool->pool_lock);
if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
err = rxe_pool_init_index(pool,
@@ -222,7 +222,7 @@ int rxe_pool_init(
pool->key_size = rxe_type_info[type].key_size;
}
- pool->state = rxe_pool_valid;
+ pool->state = RXE_POOL_STATE_VALID;
out:
return err;
@@ -232,7 +232,7 @@ static void rxe_pool_release(struct kref *kref)
{
struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
- pool->state = rxe_pool_invalid;
+ pool->state = RXE_POOL_STATE_INVALID;
kfree(pool->table);
}
@@ -245,12 +245,12 @@ int rxe_pool_cleanup(struct rxe_pool *pool)
{
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
- pool->state = rxe_pool_invalid;
+ write_lock_irqsave(&pool->pool_lock, flags);
+ pool->state = RXE_POOL_STATE_INVALID;
if (atomic_read(&pool->num_elem) > 0)
pr_warn("%s pool destroyed with unfree'd elem\n",
pool_name(pool));
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
rxe_pool_put(pool);
@@ -336,10 +336,10 @@ void rxe_add_key(void *arg, void *key)
struct rxe_pool *pool = elem->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_irqsave(&pool->pool_lock, flags);
memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
insert_key(pool, elem);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
}
void rxe_drop_key(void *arg)
@@ -348,9 +348,9 @@ void rxe_drop_key(void *arg)
struct rxe_pool *pool = elem->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_irqsave(&pool->pool_lock, flags);
rb_erase(&elem->node, &pool->tree);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
}
void rxe_add_index(void *arg)
@@ -359,10 +359,10 @@ void rxe_add_index(void *arg)
struct rxe_pool *pool = elem->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_irqsave(&pool->pool_lock, flags);
elem->index = alloc_index(pool);
insert_index(pool, elem);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
}
void rxe_drop_index(void *arg)
@@ -371,10 +371,10 @@ void rxe_drop_index(void *arg)
struct rxe_pool *pool = elem->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_irqsave(&pool->pool_lock, flags);
clear_bit(elem->index - pool->min_index, pool->table);
rb_erase(&elem->node, &pool->tree);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
}
void *rxe_alloc(struct rxe_pool *pool)
@@ -384,13 +384,13 @@ void *rxe_alloc(struct rxe_pool *pool)
might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
- spin_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != rxe_pool_valid) {
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ read_lock_irqsave(&pool->pool_lock, flags);
+ if (pool->state != RXE_POOL_STATE_VALID) {
+ read_unlock_irqrestore(&pool->pool_lock, flags);
return NULL;
}
kref_get(&pool->ref_cnt);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
kref_get(&pool->rxe->ref_cnt);
@@ -436,9 +436,9 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
struct rxe_pool_entry *elem = NULL;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != rxe_pool_valid)
+ if (pool->state != RXE_POOL_STATE_VALID)
goto out;
node = pool->tree.rb_node;
@@ -450,15 +450,14 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
node = node->rb_left;
else if (elem->index < index)
node = node->rb_right;
- else
+ else {
+ kref_get(&elem->ref_cnt);
break;
+ }
}
- if (node)
- kref_get(&elem->ref_cnt);
-
out:
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
return node ? elem : NULL;
}
@@ -469,9 +468,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
int cmp;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != rxe_pool_valid)
+ if (pool->state != RXE_POOL_STATE_VALID)
goto out;
node = pool->tree.rb_node;
@@ -494,6 +493,6 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
kref_get(&elem->ref_cnt);
out:
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
return node ? elem : NULL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 47df28e43acf..aa4ba307097b 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -74,8 +74,8 @@ struct rxe_type_info {
extern struct rxe_type_info rxe_type_info[];
enum rxe_pool_state {
- rxe_pool_invalid,
- rxe_pool_valid,
+ RXE_POOL_STATE_INVALID,
+ RXE_POOL_STATE_VALID,
};
struct rxe_pool_entry {
@@ -90,7 +90,7 @@ struct rxe_pool_entry {
struct rxe_pool {
struct rxe_dev *rxe;
- spinlock_t pool_lock; /* pool spinlock */
+ rwlock_t pool_lock; /* protects pool add/del/search */
size_t elem_size;
struct kref ref_cnt;
void (*cleanup)(struct rxe_pool_entry *obj);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index c58452daffc7..b9710907dac2 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -34,6 +34,7 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/sched.h>
+#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
@@ -227,6 +228,16 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
qp->sk->sk->sk_user_data = qp;
+ /* pick a source UDP port number for this QP based on
+ * the source QPN. this spreads traffic for different QPs
+ * across different NIC RX queues (while using a single
+ * flow for a given QP to maintain packet order).
+ * the port number must be in the Dynamic Ports range
+ * (0xc000 - 0xffff).
+ */
+ qp->src_port = RXE_ROCE_V2_SPORT +
+ (hash_32_generic(qp_num(qp), 14) & 0x3fff);
+
qp->sq.max_wr = init->cap.max_send_wr;
qp->sq.max_sge = init->cap.max_send_sge;
qp->sq.max_inline = init->cap.max_inline_data;
@@ -247,7 +258,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
&qp->sq.queue->ip);
if (err) {
- kvfree(qp->sq.queue->buf);
+ vfree(qp->sq.queue->buf);
kfree(qp->sq.queue);
return err;
}
@@ -300,7 +311,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->rq.queue->buf, qp->rq.queue->buf_size,
&qp->rq.queue->ip);
if (err) {
- kvfree(qp->rq.queue->buf);
+ vfree(qp->rq.queue->buf);
kfree(qp->rq.queue);
return err;
}
@@ -408,8 +419,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
attr->qp_state : cur_state;
- if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
- IB_LINK_LAYER_ETHERNET)) {
+ if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
pr_warn("invalid mask or state for qp\n");
goto err1;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index d30dbac24583..5c29a1bb575a 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -122,7 +122,7 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
set_bad_pkey_cntr(port);
goto err1;
}
- } else if (qpn != 0) {
+ } else {
if (unlikely(!pkey_match(pkey,
port->pkey_tbl[qp->attr.pkey_index]
))) {
@@ -134,7 +134,7 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
}
if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) &&
- qpn != 0 && pkt->mask) {
+ pkt->mask) {
u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
if (unlikely(deth_qkey(pkt) != qkey)) {
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 8be27238a86e..6c361d70d7cd 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -73,9 +73,6 @@ static void req_retry(struct rxe_qp *qp)
int npsn;
int first = 1;
- wqe = queue_head(qp->sq.queue);
- npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK;
-
qp->req.wqe_index = consumer_index(qp->sq.queue);
qp->req.psn = qp->comp.psn;
qp->req.opcode = -1;
@@ -107,11 +104,17 @@ static void req_retry(struct rxe_qp *qp)
if (first) {
first = 0;
- if (mask & WR_WRITE_OR_SEND_MASK)
+ if (mask & WR_WRITE_OR_SEND_MASK) {
+ npsn = (qp->comp.psn - wqe->first_psn) &
+ BTH_PSN_MASK;
retry_first_write_send(qp, wqe, mask, npsn);
+ }
- if (mask & WR_READ_MASK)
+ if (mask & WR_READ_MASK) {
+ npsn = (wqe->dma.length - wqe->dma.resid) /
+ qp->mtu;
wqe->iova += npsn * qp->mtu;
+ }
}
wqe->state = wqe_state_posted;
@@ -435,7 +438,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
if (pkt->mask & RXE_RETH_MASK) {
reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
reth_set_va(pkt, wqe->iova);
- reth_set_len(pkt, wqe->dma.length);
+ reth_set_len(pkt, wqe->dma.resid);
}
if (pkt->mask & RXE_IMMDT_MASK)
@@ -476,7 +479,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
u32 *p;
int err;
- err = rxe_prepare(rxe, pkt, skb, &crc);
+ err = rxe_prepare(pkt, skb, &crc);
if (err)
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index aa5833318372..c962160292f4 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -637,7 +637,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
if (ack->mask & RXE_ATMACK_MASK)
atmack_set_orig(ack, qp->resp.atomic_orig);
- err = rxe_prepare(rxe, ack, skb, &crc);
+ err = rxe_prepare(ack, skb, &crc);
if (err) {
kfree_skb(skb);
return NULL;
@@ -682,6 +682,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
rxe_advance_resp_resource(qp);
res->type = RXE_READ_MASK;
+ res->replay = 0;
res->read.va = qp->resp.va;
res->read.va_org = qp->resp.va;
@@ -752,7 +753,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
state = RESPST_DONE;
} else {
qp->resp.res = NULL;
- qp->resp.opcode = -1;
+ if (!res->replay)
+ qp->resp.opcode = -1;
if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
qp->resp.psn = res->cur_psn;
state = RESPST_CLEANUP;
@@ -814,6 +816,7 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
/* next expected psn, read handles this separately */
qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ qp->resp.ack_psn = qp->resp.psn;
qp->resp.opcode = pkt->opcode;
qp->resp.status = IB_WC_SUCCESS;
@@ -1065,7 +1068,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
enum resp_states rc;
- u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
+ u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
if (pkt->mask & RXE_SEND_MASK ||
pkt->mask & RXE_WRITE_MASK) {
@@ -1108,6 +1111,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
res->state = (pkt->psn == res->first_psn) ?
rdatm_res_state_new :
rdatm_res_state_replay;
+ res->replay = 1;
/* Reset the resource, except length. */
res->read.va_org = iova;
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 0d6c04ba7fc3..c41a5fee81f7 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
@@ -129,13 +130,18 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf,
q->buf_size, &q->ip);
- if (err)
+ if (err) {
+ vfree(q->buf);
+ kfree(q);
return err;
+ }
if (uresp) {
if (copy_to_user(&uresp->srq_num, &srq->srq_num,
- sizeof(uresp->srq_num)))
+ sizeof(uresp->srq_num))) {
+ rxe_queue_cleanup(q);
return -EFAULT;
+ }
}
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
index d5ed7571128f..73a19f808e1b 100644
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
@@ -105,7 +105,7 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
}
rxe_set_port_state(ndev);
- pr_info("added %s to %s\n", rxe->ib_dev.name, intf);
+ dev_info(&rxe->ib_dev.dev, "added %s\n", intf);
err:
if (ndev)
dev_put(ndev);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index f5b1e0ad6142..9c19f2027511 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1148,18 +1148,21 @@ static ssize_t parent_show(struct device *device,
static DEVICE_ATTR_RO(parent);
-static struct device_attribute *rxe_dev_attributes[] = {
- &dev_attr_parent,
+static struct attribute *rxe_dev_attributes[] = {
+ &dev_attr_parent.attr,
+ NULL
+};
+
+static const struct attribute_group rxe_attr_group = {
+ .attrs = rxe_dev_attributes,
};
int rxe_register_device(struct rxe_dev *rxe)
{
int err;
- int i;
struct ib_device *dev = &rxe->ib_dev;
struct crypto_shash *tfm;
- strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
dev->owner = THIS_MODULE;
@@ -1260,26 +1263,16 @@ int rxe_register_device(struct rxe_dev *rxe)
}
rxe->tfm = tfm;
+ rdma_set_device_sysfs_group(dev, &rxe_attr_group);
dev->driver_id = RDMA_DRIVER_RXE;
- err = ib_register_device(dev, NULL);
+ err = ib_register_device(dev, "rxe%d", NULL);
if (err) {
pr_warn("%s failed with error %d\n", __func__, err);
goto err1;
}
- for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
- err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
- if (err) {
- pr_warn("%s failed with error %d for attr number %d\n",
- __func__, err, i);
- goto err2;
- }
- }
-
return 0;
-err2:
- ib_unregister_device(dev);
err1:
crypto_free_shash(rxe->tfm);
@@ -1288,12 +1281,8 @@ err1:
int rxe_unregister_device(struct rxe_dev *rxe)
{
- int i;
struct ib_device *dev = &rxe->ib_dev;
- for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
- device_remove_file(&dev->dev, rxe_dev_attributes[i]);
-
ib_unregister_device(dev);
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index af1470d29391..82e670d6eeea 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -158,6 +158,7 @@ struct rxe_comp_info {
int opcode;
int timeout;
int timeout_retry;
+ int started_retry;
u32 retry_cnt;
u32 rnr_retry;
struct rxe_task task;
@@ -171,6 +172,7 @@ enum rdatm_res_state {
struct resp_res {
int type;
+ int replay;
u32 first_psn;
u32 last_psn;
u32 cur_psn;
@@ -195,6 +197,7 @@ struct rxe_resp_info {
enum rxe_qp_state state;
u32 msn;
u32 psn;
+ u32 ack_psn;
int opcode;
int drop_msg;
int goto_error;
@@ -248,6 +251,7 @@ struct rxe_qp {
struct socket *sk;
u32 dst_cookie;
+ u16 src_port;
struct rxe_av pri_av;
struct rxe_av alt_av;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 1abe3c62f106..1da119d901a9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -499,8 +499,10 @@ void ipoib_reap_ah(struct work_struct *work);
struct ipoib_path *__path_find(struct net_device *dev, void *gid);
void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
-struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
- const char *format);
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port,
+ const char *format);
+int ipoib_intf_init(struct ib_device *hca, u8 port, const char *format,
+ struct net_device *dev);
void ipoib_ib_tx_timer_func(struct timer_list *t);
void ipoib_ib_dev_flush_light(struct work_struct *work);
void ipoib_ib_dev_flush_normal(struct work_struct *work);
@@ -531,6 +533,8 @@ int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
struct ipoib_tx_buf *tx_req);
+struct rtnl_link_ops *ipoib_get_link_ops(void);
+
static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
struct ipoib_tx_buf *tx_req)
{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 3d5424f335cb..0428e01e8f69 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1438,11 +1438,15 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
- if (skb->protocol == htons(ETH_P_IP))
+ if (skb->protocol == htons(ETH_P_IP)) {
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ }
#if IS_ENABLED(CONFIG_IPV6)
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ }
#endif
dev_kfree_skb_any(skb);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e3d28f9ad9c0..8710214594d8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -243,7 +243,8 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
- if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
+ if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) ||
+ new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
return -EINVAL;
priv->admin_mtu = new_mtu;
@@ -1880,6 +1881,8 @@ static int ipoib_parent_init(struct net_device *ndev)
sizeof(union ib_gid));
SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
+ priv->dev->dev_port = priv->port - 1;
+ /* Let's set this one too for backwards compatibility. */
priv->dev->dev_id = priv->port - 1;
return 0;
@@ -2115,82 +2118,58 @@ static const struct net_device_ops ipoib_netdev_default_pf = {
.ndo_stop = ipoib_ib_dev_stop_default,
};
-static struct net_device
-*ipoib_create_netdev_default(struct ib_device *hca,
- const char *name,
- unsigned char name_assign_type,
- void (*setup)(struct net_device *))
+static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
+ const char *name)
{
struct net_device *dev;
- struct rdma_netdev *rn;
- dev = alloc_netdev((int)sizeof(struct rdma_netdev),
- name,
- name_assign_type, setup);
- if (!dev)
- return NULL;
-
- rn = netdev_priv(dev);
-
- rn->send = ipoib_send;
- rn->attach_mcast = ipoib_mcast_attach;
- rn->detach_mcast = ipoib_mcast_detach;
- rn->hca = hca;
- dev->netdev_ops = &ipoib_netdev_default_pf;
-
- return dev;
-}
-
-static struct net_device *ipoib_get_netdev(struct ib_device *hca, u8 port,
- const char *name)
-{
- struct net_device *dev;
-
- if (hca->alloc_rdma_netdev) {
- dev = hca->alloc_rdma_netdev(hca, port,
- RDMA_NETDEV_IPOIB, name,
- NET_NAME_UNKNOWN,
- ipoib_setup_common);
- if (IS_ERR_OR_NULL(dev) && PTR_ERR(dev) != -EOPNOTSUPP)
- return NULL;
- }
-
- if (!hca->alloc_rdma_netdev || PTR_ERR(dev) == -EOPNOTSUPP)
- dev = ipoib_create_netdev_default(hca, name, NET_NAME_UNKNOWN,
- ipoib_setup_common);
+ dev = rdma_alloc_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
+ NET_NAME_UNKNOWN, ipoib_setup_common);
+ if (!IS_ERR(dev) || PTR_ERR(dev) != -EOPNOTSUPP)
+ return dev;
+ dev = alloc_netdev(sizeof(struct rdma_netdev), name, NET_NAME_UNKNOWN,
+ ipoib_setup_common);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
return dev;
}
-struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
- const char *name)
+int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
+ struct net_device *dev)
{
- struct net_device *dev;
+ struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_dev_priv *priv;
- struct rdma_netdev *rn;
+ int rc;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
- return NULL;
+ return -ENOMEM;
priv->ca = hca;
priv->port = port;
- dev = ipoib_get_netdev(hca, port, name);
- if (!dev)
- goto free_priv;
+ rc = rdma_init_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
+ NET_NAME_UNKNOWN, ipoib_setup_common, dev);
+ if (rc) {
+ if (rc != -EOPNOTSUPP)
+ goto out;
+
+ dev->netdev_ops = &ipoib_netdev_default_pf;
+ rn->send = ipoib_send;
+ rn->attach_mcast = ipoib_mcast_attach;
+ rn->detach_mcast = ipoib_mcast_detach;
+ rn->hca = hca;
+ }
priv->rn_ops = dev->netdev_ops;
- /* fixme : should be after the query_cap */
- if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION)
+ if (hca->attrs.device_cap_flags & IB_DEVICE_VIRTUAL_FUNCTION)
dev->netdev_ops = &ipoib_netdev_ops_vf;
else
dev->netdev_ops = &ipoib_netdev_ops_pf;
- rn = netdev_priv(dev);
rn->clnt_priv = priv;
-
/*
* Only the child register_netdev flows can handle priv_destructor
* being set, so we force it to NULL here and handle manually until it
@@ -2201,10 +2180,35 @@ struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
ipoib_build_priv(dev);
- return priv;
-free_priv:
+ return 0;
+
+out:
kfree(priv);
- return NULL;
+ return rc;
+}
+
+struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port,
+ const char *name)
+{
+ struct net_device *dev;
+ int rc;
+
+ dev = ipoib_alloc_netdev(hca, port, name);
+ if (IS_ERR(dev))
+ return dev;
+
+ rc = ipoib_intf_init(hca, port, name, dev);
+ if (rc) {
+ free_netdev(dev);
+ return ERR_PTR(rc);
+ }
+
+ /*
+ * Upon success the caller must ensure ipoib_intf_free is called or
+ * register_netdevice succeed'd and priv_destructor is set to
+ * ipoib_intf_free.
+ */
+ return dev;
}
void ipoib_intf_free(struct net_device *dev)
@@ -2384,19 +2388,51 @@ int ipoib_add_pkey_attr(struct net_device *dev)
return device_create_file(&dev->dev, &dev_attr_pkey);
}
+/*
+ * We erroneously exposed the iface's port number in the dev_id
+ * sysfs field long after dev_port was introduced for that purpose[1],
+ * and we need to stop everyone from relying on that.
+ * Let's overload the shower routine for the dev_id file here
+ * to gently bring the issue up.
+ *
+ * [1] https://www.spinics.net/lists/netdev/msg272123.html
+ */
+static ssize_t dev_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+
+ if (ndev->dev_id == ndev->dev_port)
+ netdev_info_once(ndev,
+ "\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
+ current->comm);
+
+ return sprintf(buf, "%#x\n", ndev->dev_id);
+}
+static DEVICE_ATTR_RO(dev_id);
+
+int ipoib_intercept_dev_id_attr(struct net_device *dev)
+{
+ device_remove_file(&dev->dev, &dev_attr_dev_id);
+ return device_create_file(&dev->dev, &dev_attr_dev_id);
+}
+
static struct net_device *ipoib_add_port(const char *format,
struct ib_device *hca, u8 port)
{
+ struct rtnl_link_ops *ops = ipoib_get_link_ops();
+ struct rdma_netdev_alloc_params params;
struct ipoib_dev_priv *priv;
struct net_device *ndev;
int result;
- priv = ipoib_intf_alloc(hca, port, format);
- if (!priv) {
- pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port);
- return ERR_PTR(-ENOMEM);
+ ndev = ipoib_intf_alloc(hca, port, format);
+ if (IS_ERR(ndev)) {
+ pr_warn("%s, %d: ipoib_intf_alloc failed %ld\n", hca->name, port,
+ PTR_ERR(ndev));
+ return ndev;
}
- ndev = priv->dev;
+ priv = ipoib_priv(ndev);
INIT_IB_EVENT_HANDLER(&priv->event_handler,
priv->ca, ipoib_event);
@@ -2417,6 +2453,14 @@ static struct net_device *ipoib_add_port(const char *format,
return ERR_PTR(result);
}
+ if (hca->rdma_netdev_get_params) {
+ int rc = hca->rdma_netdev_get_params(hca, port,
+ RDMA_NETDEV_IPOIB,
+ &params);
+
+ if (!rc && ops->priv_size < params.sizeof_priv)
+ ops->priv_size = params.sizeof_priv;
+ }
/*
* We cannot set priv_destructor before register_netdev because we
* need priv to be always valid during the error flow to execute
@@ -2425,6 +2469,8 @@ static struct net_device *ipoib_add_port(const char *format,
*/
ndev->priv_destructor = ipoib_intf_free;
+ if (ipoib_intercept_dev_id_attr(ndev))
+ goto sysfs_failed;
if (ipoib_cm_add_mode_attr(ndev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(ndev))
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index d4d553a51fa9..38c984d16996 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -122,12 +122,26 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
} else
child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
+ err = ipoib_intf_init(ppriv->ca, ppriv->port, dev->name, dev);
+ if (err) {
+ ipoib_warn(ppriv, "failed to initialize pkey device\n");
+ return err;
+ }
+
err = __ipoib_vlan_add(ppriv, ipoib_priv(dev),
child_pkey, IPOIB_RTNL_CHILD);
+ if (err)
+ return err;
- if (!err && data)
+ if (data) {
err = ipoib_changelink(dev, tb, data, extack);
- return err;
+ if (err) {
+ unregister_netdevice(dev);
+ return err;
+ }
+ }
+
+ return 0;
}
static size_t ipoib_get_size(const struct net_device *dev)
@@ -149,6 +163,11 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
.fill_info = ipoib_fill_info,
};
+struct rtnl_link_ops *ipoib_get_link_ops(void)
+{
+ return &ipoib_link_ops;
+}
+
int __init ipoib_netlink_init(void)
{
return rtnl_link_register(&ipoib_link_ops);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 9f36ca786df8..1e88213459f2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -277,7 +277,7 @@ void ipoib_event(struct ib_event_handler *handler,
return;
ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event,
- record->device->name, record->element.port_num);
+ dev_name(&record->device->dev), record->element.port_num);
if (record->event == IB_EVENT_SM_CHANGE ||
record->event == IB_EVENT_CLIENT_REREGISTER) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 341753fbda54..8ac8e18fbe0c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -85,7 +85,7 @@ static bool is_child_unique(struct ipoib_dev_priv *ppriv,
/*
* NOTE: If this function fails then the priv->dev will remain valid, however
- * priv can have been freed and must not be touched by caller in the error
+ * priv will have been freed and must not be touched by caller in the error
* case.
*
* If (ndev->reg_state == NETREG_UNINITIALIZED) then it is up to the caller to
@@ -101,6 +101,12 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
ASSERT_RTNL();
/*
+ * We do not need to touch priv if register_netdevice fails, so just
+ * always use this flow.
+ */
+ ndev->priv_destructor = ipoib_intf_free;
+
+ /*
* Racing with unregister of the parent must be prevented by the
* caller.
*/
@@ -120,9 +126,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
goto out_early;
}
- /* We do not need to touch priv if register_netdevice fails */
- ndev->priv_destructor = ipoib_intf_free;
-
result = register_netdevice(ndev);
if (result) {
ipoib_warn(priv, "failed to initialize; error %i", result);
@@ -182,12 +185,12 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
snprintf(intf_name, sizeof(intf_name), "%s.%04x",
ppriv->dev->name, pkey);
- priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
- if (!priv) {
- result = -ENOMEM;
+ ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
+ if (IS_ERR(ndev)) {
+ result = PTR_ERR(ndev);
goto out;
}
- ndev = priv->dev;
+ priv = ipoib_priv(ndev);
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 2f6388596f88..96af06cfe0af 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -589,13 +589,19 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
ib_conn->post_recv_buf_count--;
}
-static inline void
+static inline int
iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
{
- if (likely(rkey == desc->rsc.mr->rkey))
+ if (likely(rkey == desc->rsc.mr->rkey)) {
desc->rsc.mr_valid = 0;
- else if (likely(rkey == desc->pi_ctx->sig_mr->rkey))
+ } else if (likely(desc->pi_ctx && rkey == desc->pi_ctx->sig_mr->rkey)) {
desc->pi_ctx->sig_mr_valid = 0;
+ } else {
+ iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
+ return -EINVAL;
+ }
+
+ return 0;
}
static int
@@ -623,12 +629,14 @@ iser_check_remote_inv(struct iser_conn *iser_conn,
if (iser_task->dir[ISER_DIR_IN]) {
desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h;
- iser_inv_desc(desc, rkey);
+ if (unlikely(iser_inv_desc(desc, rkey)))
+ return -EINVAL;
}
if (iser_task->dir[ISER_DIR_OUT]) {
desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h;
- iser_inv_desc(desc, rkey);
+ if (unlikely(iser_inv_desc(desc, rkey)))
+ return -EINVAL;
}
} else {
iser_err("failed to get task for itt=%d\n", hdr->itt);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b686a4aaffe8..946b623ba5eb 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -55,7 +55,7 @@ static void iser_event_handler(struct ib_event_handler *handler,
{
iser_err("async event %s (%d) on device %s port %d\n",
ib_event_msg(event->event), event->event,
- event->device->name, event->element.port_num);
+ dev_name(&event->device->dev), event->element.port_num);
}
/**
@@ -85,7 +85,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe);
iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
- device->comps_used, ib_dev->name,
+ device->comps_used, dev_name(&ib_dev->dev),
ib_dev->num_comp_vectors, max_cqe);
device->pd = ib_alloc_pd(ib_dev,
@@ -468,7 +468,8 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
iser_conn->max_cmds =
ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr);
iser_dbg("device %s supports max_send_wr %d\n",
- device->ib_device->name, ib_dev->attrs.max_qp_wr);
+ dev_name(&device->ib_device->dev),
+ ib_dev->attrs.max_qp_wr);
}
}
@@ -764,7 +765,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
IB_DEVICE_SIGNATURE_HANDOVER)) {
iser_warn("T10-PI requested but not supported on %s, "
"continue without T10-PI\n",
- ib_conn->device->ib_device->name);
+ dev_name(&ib_conn->device->ib_device->dev));
ib_conn->pi_support = false;
} else {
ib_conn->pi_support = true;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index f39670c5c25c..e3dd13798d79 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -262,7 +262,7 @@ isert_alloc_comps(struct isert_device *device)
isert_info("Using %d CQs, %s supports %d vectors support "
"pi_capable %d\n",
- device->comps_used, device->ib_device->name,
+ device->comps_used, dev_name(&device->ib_device->dev),
device->ib_device->num_comp_vectors,
device->pi_capable);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
index 267da8215e08..31cd361416ac 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
@@ -351,7 +351,8 @@ static uint32_t opa_vnic_get_dlid(struct opa_vnic_adapter *adapter,
if (unlikely(!dlid))
v_warn("Null dlid in MAC address\n");
} else if (def_port != OPA_VNIC_INVALID_PORT) {
- dlid = info->vesw.u_ucast_dlid[def_port];
+ if (def_port < OPA_VESW_MAX_NUM_DEF_PORT)
+ dlid = info->vesw.u_ucast_dlid[def_port];
}
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 15711dcc6f58..d119d9afa845 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -888,7 +888,8 @@ static void opa_vnic_event(struct ib_event_handler *handler,
return;
c_dbg("OPA_VNIC received event %d on device %s port %d\n",
- record->event, record->device->name, record->element.port_num);
+ record->event, dev_name(&record->device->dev),
+ record->element.port_num);
if (record->event == IB_EVENT_PORT_ERR)
idr_for_each(&port->vport_idr, vema_disable_vport, NULL);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 444d16520506..eed0eb3bb04c 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1330,17 +1330,8 @@ static void srp_terminate_io(struct srp_rport *rport)
{
struct srp_target_port *target = rport->lld_data;
struct srp_rdma_ch *ch;
- struct Scsi_Host *shost = target->scsi_host;
- struct scsi_device *sdev;
int i, j;
- /*
- * Invoking srp_terminate_io() while srp_queuecommand() is running
- * is not safe. Hence the warning statement below.
- */
- shost_for_each_device(sdev, shost)
- WARN_ON_ONCE(sdev->request_queue->request_fn_active);
-
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
@@ -2951,7 +2942,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
- int i;
+ int i, j;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2965,8 +2956,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
- for (i = 0; i < target->req_ring_size; ++i) {
- struct srp_request *req = &ch->req_ring[i];
+ for (j = 0; j < target->req_ring_size; ++j) {
+ struct srp_request *req = &ch->req_ring[j];
srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
}
@@ -3124,7 +3115,8 @@ static ssize_t show_local_ib_device(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
+ return sprintf(buf, "%s\n",
+ dev_name(&target->srp_host->srp_dev->dev->dev));
}
static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
@@ -3987,7 +3979,7 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
- return sprintf(buf, "%s\n", host->srp_dev->dev->name);
+ return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
@@ -4019,7 +4011,8 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
host->dev.class = &srp_class;
host->dev.parent = device->dev->dev.parent;
- dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
+ dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
+ port);
if (device_register(&host->dev))
goto free_host;
@@ -4095,7 +4088,7 @@ static void srp_add_one(struct ib_device *device)
srp_dev->mr_max_size = srp_dev->mr_page_size *
srp_dev->max_pages_per_mr;
pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
- device->name, mr_page_shift, attr->max_mr_size,
+ dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
attr->max_fast_reg_page_list_len,
srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index f37cbad022a2..2357aa727dcf 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -148,7 +148,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
return;
pr_debug("ASYNC event= %d on device= %s\n", event->event,
- sdev->device->name);
+ dev_name(&sdev->device->dev));
switch (event->event) {
case IB_EVENT_PORT_ERR:
@@ -1941,7 +1941,8 @@ static void __srpt_close_all_ch(struct srpt_port *sport)
if (srpt_disconnect_ch(ch) >= 0)
pr_info("Closing channel %s because target %s_%d has been disabled\n",
ch->sess_name,
- sport->sdev->device->name, sport->port);
+ dev_name(&sport->sdev->device->dev),
+ sport->port);
srpt_close_ch(ch);
}
}
@@ -2127,7 +2128,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
if (!sport->enabled) {
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
- sport->sdev->device->name, port_num);
+ dev_name(&sport->sdev->device->dev), port_num);
goto reject;
}
@@ -2267,7 +2268,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
rej->reason = cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
- sdev->device->name, port_num);
+ dev_name(&sdev->device->dev), port_num);
mutex_unlock(&sport->mutex);
goto reject;
}
@@ -2708,7 +2709,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
break;
}
- if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
+ if (WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))
return;
/* For read commands, transfer the data to the initiator. */
@@ -2842,7 +2843,7 @@ static int srpt_release_sport(struct srpt_port *sport)
while (wait_event_timeout(sport->ch_releaseQ,
srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
pr_info("%s_%d: waiting for session unregistration ...\n",
- sport->sdev->device->name, sport->port);
+ dev_name(&sport->sdev->device->dev), sport->port);
rcu_read_lock();
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
@@ -2932,7 +2933,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
}
pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
- sdev->device->attrs.max_srq_wr, device->name);
+ sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
sdev->ioctx_ring = (struct srpt_recv_ioctx **)
srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
@@ -2965,8 +2966,8 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
} else if (use_srq && !sdev->srq) {
ret = srpt_alloc_srq(sdev);
}
- pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__, device->name,
- sdev->use_srq, ret);
+ pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__,
+ dev_name(&device->dev), sdev->use_srq, ret);
return ret;
}
@@ -3052,7 +3053,7 @@ static void srpt_add_one(struct ib_device *device)
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
- sdev->device->name, i);
+ dev_name(&sdev->device->dev), i);
goto err_event;
}
}
@@ -3063,7 +3064,7 @@ static void srpt_add_one(struct ib_device *device)
out:
ib_set_client_data(device, &srpt_client, sdev);
- pr_debug("added %s.\n", device->name);
+ pr_debug("added %s.\n", dev_name(&device->dev));
return;
err_event:
@@ -3078,7 +3079,7 @@ free_dev:
kfree(sdev);
err:
sdev = NULL;
- pr_info("%s(%s) failed.\n", __func__, device->name);
+ pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
goto out;
}
@@ -3093,7 +3094,8 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
int i;
if (!sdev) {
- pr_info("%s(%s): nothing to do.\n", __func__, device->name);
+ pr_info("%s(%s): nothing to do.\n", __func__,
+ dev_name(&device->dev));
return;
}
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 370206f987f9..f48369d6f3a0 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -564,6 +564,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
input_inject_event(&evdev->handle,
event.type, event.code, event.value);
+ cond_resched();
}
out:
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index cd620e009bad..d4b9db487b16 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -231,6 +231,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -530,6 +531,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index 6f62da2909ec..6caee807cafa 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -75,8 +75,7 @@ MODULE_LICENSE("GPL");
*/
-static unsigned char atakbd_keycode[0x72] = { /* American layout */
- [0] = KEY_GRAVE,
+static unsigned char atakbd_keycode[0x73] = { /* American layout */
[1] = KEY_ESC,
[2] = KEY_1,
[3] = KEY_2,
@@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
[38] = KEY_L,
[39] = KEY_SEMICOLON,
[40] = KEY_APOSTROPHE,
- [41] = KEY_BACKSLASH, /* FIXME, '#' */
+ [41] = KEY_GRAVE,
[42] = KEY_LEFTSHIFT,
- [43] = KEY_GRAVE, /* FIXME: '~' */
+ [43] = KEY_BACKSLASH,
[44] = KEY_Z,
[45] = KEY_X,
[46] = KEY_C,
@@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
[66] = KEY_F8,
[67] = KEY_F9,
[68] = KEY_F10,
- [69] = KEY_ESC,
- [70] = KEY_DELETE,
- [71] = KEY_KP7,
- [72] = KEY_KP8,
- [73] = KEY_KP9,
+ [71] = KEY_HOME,
+ [72] = KEY_UP,
[74] = KEY_KPMINUS,
- [75] = KEY_KP4,
- [76] = KEY_KP5,
- [77] = KEY_KP6,
+ [75] = KEY_LEFT,
+ [77] = KEY_RIGHT,
[78] = KEY_KPPLUS,
- [79] = KEY_KP1,
- [80] = KEY_KP2,
- [81] = KEY_KP3,
- [82] = KEY_KP0,
- [83] = KEY_KPDOT,
- [90] = KEY_KPLEFTPAREN,
- [91] = KEY_KPRIGHTPAREN,
- [92] = KEY_KPASTERISK, /* FIXME */
- [93] = KEY_KPASTERISK,
- [94] = KEY_KPPLUS,
- [95] = KEY_HELP,
+ [80] = KEY_DOWN,
+ [82] = KEY_INSERT,
+ [83] = KEY_DELETE,
[96] = KEY_102ND,
- [97] = KEY_KPASTERISK, /* FIXME */
- [98] = KEY_KPSLASH,
+ [97] = KEY_UNDO,
+ [98] = KEY_HELP,
[99] = KEY_KPLEFTPAREN,
[100] = KEY_KPRIGHTPAREN,
[101] = KEY_KPSLASH,
[102] = KEY_KPASTERISK,
- [103] = KEY_UP,
- [104] = KEY_KPASTERISK, /* FIXME */
- [105] = KEY_LEFT,
- [106] = KEY_RIGHT,
- [107] = KEY_KPASTERISK, /* FIXME */
- [108] = KEY_DOWN,
- [109] = KEY_KPASTERISK, /* FIXME */
- [110] = KEY_KPASTERISK, /* FIXME */
- [111] = KEY_KPASTERISK, /* FIXME */
- [112] = KEY_KPASTERISK, /* FIXME */
- [113] = KEY_KPASTERISK /* FIXME */
+ [103] = KEY_KP7,
+ [104] = KEY_KP8,
+ [105] = KEY_KP9,
+ [106] = KEY_KP4,
+ [107] = KEY_KP5,
+ [108] = KEY_KP6,
+ [109] = KEY_KP1,
+ [110] = KEY_KP2,
+ [111] = KEY_KP3,
+ [112] = KEY_KP0,
+ [113] = KEY_KPDOT,
+ [114] = KEY_KPENTER,
};
static struct input_dev *atakbd_dev;
@@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev;
static void atakbd_interrupt(unsigned char scancode, char down)
{
- if (scancode < 0x72) { /* scancodes < 0xf2 are keys */
+ if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
// report raw events here?
scancode = atakbd_keycode[scancode];
- if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
- input_report_key(atakbd_dev, scancode, 1);
- input_report_key(atakbd_dev, scancode, 0);
- input_sync(atakbd_dev);
- } else {
- input_report_key(atakbd_dev, scancode, down);
- input_sync(atakbd_dev);
- }
- } else /* scancodes >= 0xf2 are mouse data, most likely */
+ input_report_key(atakbd_dev, scancode, down);
+ input_sync(atakbd_dev);
+ } else /* scancodes >= 0xf3 are mouse data, most likely */
printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
return;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 96a887f33698..8ec483e8688b 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
min = abs->minimum;
max = abs->maximum;
- if ((min != 0 || max != 0) && max <= min) {
+ if ((min != 0 || max != 0) && max < min) {
printk(KERN_DEBUG
"%s: invalid abs[%02x] min:%d max:%d\n",
UINPUT_NAME, code, min, max);
@@ -598,6 +598,7 @@ static ssize_t uinput_inject_events(struct uinput_device *udev,
input_event(udev->dev, ev.type, ev.code, ev.value);
bytes += input_event_size();
+ cond_resched();
}
return bytes;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index f5ae24865355..b0f9d19b3410 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1346,6 +1346,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
{ "ELAN0618", 0 },
+ { "ELAN061C", 0 },
{ "ELAN061D", 0 },
{ "ELAN0622", 0 },
{ "ELAN1000", 0 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 44f57cf6675b..2d95e8d93cc7 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
static const char * const middle_button_pnp_ids[] = {
"LEN2131", /* ThinkPad P52 w/ NFC */
"LEN2132", /* ThinkPad P52 */
+ "LEN2133", /* ThinkPad P72 w/ NFC */
+ "LEN2134", /* ThinkPad P72 */
NULL
};
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index e08228061bcd..412fa71245af 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -707,6 +707,7 @@ static ssize_t mousedev_write(struct file *file, const char __user *buffer,
mousedev_generate_response(client, c);
spin_unlock_irq(&client->packet_lock);
+ cond_resched();
}
kill_fasync(&client->fasync, SIGIO, POLL_IN);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index b8bc71569349..95a78ccbd847 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1395,15 +1395,26 @@ static void __init i8042_register_ports(void)
for (i = 0; i < I8042_NUM_PORTS; i++) {
struct serio *serio = i8042_ports[i].serio;
- if (serio) {
- printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n",
- serio->name,
- (unsigned long) I8042_DATA_REG,
- (unsigned long) I8042_COMMAND_REG,
- i8042_ports[i].irq);
- serio_register_port(serio);
- device_set_wakeup_capable(&serio->dev, true);
- }
+ if (!serio)
+ continue;
+
+ printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n",
+ serio->name,
+ (unsigned long) I8042_DATA_REG,
+ (unsigned long) I8042_COMMAND_REG,
+ i8042_ports[i].irq);
+ serio_register_port(serio);
+ device_set_wakeup_capable(&serio->dev, true);
+
+ /*
+ * On platforms using suspend-to-idle, allow the keyboard to
+ * wake up the system from sleep by enabling keyboard wakeups
+ * by default. This is consistent with keyboard wakeup
+ * behavior on many platforms using suspend-to-RAM (ACPI S3)
+ * by default.
+ */
+ if (pm_suspend_via_s2idle() && i == I8042_KBD_PORT_NO)
+ device_set_wakeup_enable(&serio->dev, true);
}
}
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index f8ead9f9c77e..5977b8a34ebe 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -226,7 +226,7 @@ static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
#ifdef CONFIG_COMPAT
#define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t)
-static long serport_ldisc_compat_ioctl(struct tty_struct *tty,
+static int serport_ldisc_compat_ioctl(struct tty_struct *tty,
struct file *file,
unsigned int cmd, unsigned long arg)
{
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 80e69bb8283e..83ac8c128192 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
int ret;
+ if (device_may_wakeup(dev))
+ return enable_irq_wake(client->irq);
+
ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
return ret > 0 ? 0 : ret;
}
@@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
+ if (device_may_wakeup(dev))
+ return disable_irq_wake(client->irq);
+
return egalax_wake_up_device(client);
}
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index b86c1e5fbc11..9e8684ab48f4 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/sort.h>
+#include <linux/pm_wakeirq.h>
#include <linux/mfd/ti_am335x_tscadc.h>
@@ -46,6 +47,7 @@ static const int config_pins[] = {
struct titsc {
struct input_dev *input;
struct ti_tscadc_dev *mfd_tscadc;
+ struct device *dev;
unsigned int irq;
unsigned int wires;
unsigned int x_plate_resistance;
@@ -276,7 +278,7 @@ static irqreturn_t titsc_irq(int irq, void *dev)
if (status & IRQENB_HW_PEN) {
ts_dev->pen_down = true;
irqclr |= IRQENB_HW_PEN;
- pm_stay_awake(ts_dev->mfd_tscadc->dev);
+ pm_stay_awake(ts_dev->dev);
}
if (status & IRQENB_PENUP) {
@@ -286,7 +288,7 @@ static irqreturn_t titsc_irq(int irq, void *dev)
input_report_key(input_dev, BTN_TOUCH, 0);
input_report_abs(input_dev, ABS_PRESSURE, 0);
input_sync(input_dev);
- pm_relax(ts_dev->mfd_tscadc->dev);
+ pm_relax(ts_dev->dev);
} else {
ts_dev->pen_down = true;
}
@@ -422,6 +424,7 @@ static int titsc_probe(struct platform_device *pdev)
ts_dev->mfd_tscadc = tscadc_dev;
ts_dev->input = input_dev;
ts_dev->irq = tscadc_dev->irq;
+ ts_dev->dev = &pdev->dev;
err = titsc_parse_dt(pdev, ts_dev);
if (err) {
@@ -436,6 +439,11 @@ static int titsc_probe(struct platform_device *pdev)
goto err_free_mem;
}
+ device_init_wakeup(&pdev->dev, true);
+ err = dev_pm_set_wake_irq(&pdev->dev, ts_dev->irq);
+ if (err)
+ dev_err(&pdev->dev, "irq wake enable failed.\n");
+
titsc_writel(ts_dev, REG_IRQSTATUS, TSC_IRQENB_MASK);
titsc_writel(ts_dev, REG_IRQENABLE, IRQENB_FIFO0THRES);
titsc_writel(ts_dev, REG_IRQENABLE, IRQENB_EOS);
@@ -467,6 +475,8 @@ static int titsc_probe(struct platform_device *pdev)
return 0;
err_free_irq:
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
free_irq(ts_dev->irq, ts_dev);
err_free_mem:
input_free_device(input_dev);
@@ -479,6 +489,8 @@ static int titsc_remove(struct platform_device *pdev)
struct titsc *ts_dev = platform_get_drvdata(pdev);
u32 steps;
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
free_irq(ts_dev->irq, ts_dev);
/* total steps followed by the enable mask */
@@ -499,7 +511,7 @@ static int __maybe_unused titsc_suspend(struct device *dev)
unsigned int idle;
tscadc_dev = ti_tscadc_dev_get(to_platform_device(dev));
- if (device_may_wakeup(tscadc_dev->dev)) {
+ if (device_may_wakeup(dev)) {
titsc_writel(ts_dev, REG_IRQSTATUS, TSC_IRQENB_MASK);
idle = titsc_readl(ts_dev, REG_IRQENABLE);
titsc_writel(ts_dev, REG_IRQENABLE,
@@ -515,11 +527,11 @@ static int __maybe_unused titsc_resume(struct device *dev)
struct ti_tscadc_dev *tscadc_dev;
tscadc_dev = ti_tscadc_dev_get(to_platform_device(dev));
- if (device_may_wakeup(tscadc_dev->dev)) {
+ if (device_may_wakeup(dev)) {
titsc_writel(ts_dev, REG_IRQWAKEUP,
0x00);
titsc_writel(ts_dev, REG_IRQCLR, IRQENB_HW_PEN);
- pm_relax(ts_dev->mfd_tscadc->dev);
+ pm_relax(dev);
}
titsc_step_config(ts_dev);
titsc_writel(ts_dev, REG_FIFO0THR,
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index e0fde590df8e..62973ac01381 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -68,7 +68,8 @@ const struct regmap_config tsc200x_regmap_config = {
.read_flag_mask = TSC200X_REG_READ,
.write_flag_mask = TSC200X_REG_PND0,
.wr_table = &tsc200x_writable_table,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
EXPORT_SYMBOL_GPL(tsc200x_regmap_config);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c60395b7470f..83e6d993fca5 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -372,6 +372,14 @@ config S390_CCW_IOMMU
Enables bits of IOMMU API required by VFIO. The iommu_ops
is not implemented as it is not necessary for VFIO.
+config S390_AP_IOMMU
+ bool "S390 AP IOMMU Support"
+ depends on S390 && ZCRYPT
+ select IOMMU_API
+ help
+ Enables bits of IOMMU API required by VFIO. The iommu_ops
+ is not implemented as it is not necessary for VFIO.
+
config MTK_IOMMU
bool "MTK IOMMU Support"
depends on ARM || ARM64
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4e04fff23977..bee0dfb7b93b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -246,7 +246,13 @@ static u16 get_alias(struct device *dev)
/* The callers make sure that get_device_id() does not fail here */
devid = get_device_id(dev);
+
+ /* For ACPI HID devices, we simply return the devid as such */
+ if (!dev_is_pci(dev))
+ return devid;
+
ivrs_alias = amd_iommu_alias_table[devid];
+
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
if (ivrs_alias == pci_alias)
@@ -3063,7 +3069,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
return 0;
offset_mask = pte_pgsize - 1;
- __pte = *pte & PM_ADDR_MASK;
+ __pte = __sme_clr(*pte & PM_ADDR_MASK);
return (__pte & ~offset_mask) | (iova & offset_mask);
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 84b3e4445d46..3931c7de7c69 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -902,12 +902,22 @@ static bool copy_device_table(void)
}
}
- old_devtb_phys = entry & PAGE_MASK;
+ /*
+ * When SME is enabled in the first kernel, the entry includes the
+ * memory encryption mask(sme_me_mask), we must remove the memory
+ * encryption mask to obtain the true physical address in kdump kernel.
+ */
+ old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
+
if (old_devtb_phys >= 0x100000000ULL) {
pr_err("The address of old device table is above 4G, not trustworthy!\n");
return false;
}
- old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
+ old_devtb = (sme_active() && is_kdump_kernel())
+ ? (__force void *)ioremap_encrypted(old_devtb_phys,
+ dev_table_size)
+ : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
+
if (!old_devtb)
return false;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 5f3f10cf9d9d..76f0a5d16ed3 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2540,9 +2540,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev && dev_is_pci(dev) && info->pasid_supported) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- return NULL;
+ pr_warn("No pasid table for %s, pasid disabled\n",
+ dev_name(dev));
+ info->pasid_supported = 0;
}
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -3895,7 +3895,7 @@ static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
return !dma_addr;
}
-const struct dma_map_ops intel_dma_ops = {
+static const struct dma_map_ops intel_dma_ops = {
.alloc = intel_alloc_coherent,
.free = intel_free_coherent,
.map_sg = intel_map_sg,
@@ -3903,9 +3903,7 @@ const struct dma_map_ops intel_dma_ops = {
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
.mapping_error = intel_mapping_error,
-#ifdef CONFIG_X86
.dma_supported = dma_direct_supported,
-#endif
};
static inline int iommu_domain_cache_init(void)
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h
index 1c05ed6fc5a5..1fb5e12b029a 100644
--- a/drivers/iommu/intel-pasid.h
+++ b/drivers/iommu/intel-pasid.h
@@ -11,7 +11,7 @@
#define __INTEL_PASID_H
#define PASID_MIN 0x1
-#define PASID_MAX 0x100000
+#define PASID_MAX 0x20000
struct pasid_entry {
u64 val;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 258115b10fa9..ad3e2b97469e 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1241,6 +1241,12 @@ err_unprepare_clocks:
static void rk_iommu_shutdown(struct platform_device *pdev)
{
+ struct rk_iommu *iommu = platform_get_drvdata(pdev);
+ int i = 0, irq;
+
+ while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
+ devm_free_irq(iommu->dev, irq, iommu);
+
pm_runtime_force_suspend(&pdev->dev);
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 383e7b70221d..96451b581452 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -310,6 +310,9 @@ config MVEBU_ODMI
config MVEBU_PIC
bool
+config MVEBU_SEI
+ bool
+
config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
depends on PCI && PCI_MSI
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index fbd1ec8070ef..b822199445ff 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_MVEBU_GICP) += irq-mvebu-gicp.o
obj-$(CONFIG_MVEBU_ICU) += irq-mvebu-icu.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o
+obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c2df341ff6fa..db20e992a40f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -19,13 +19,16 @@
#include <linux/acpi_iort.h>
#include <linux/bitmap.h>
#include <linux/cpu.h>
+#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
+#include <linux/efi.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/list_sort.h>
#include <linux/log2.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/msi.h>
#include <linux/of.h>
@@ -52,6 +55,7 @@
#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
+#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
static u32 lpi_id_bits;
@@ -64,7 +68,7 @@ static u32 lpi_id_bits;
#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
-#define LPI_PROP_DEFAULT_PRIO 0xa0
+#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
/*
* Collection structure - just an ID, and a redistributor address to
@@ -173,6 +177,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);
static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
+#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
@@ -1028,7 +1033,7 @@ static inline u32 its_get_event_id(struct irq_data *d)
static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
{
irq_hw_number_t hwirq;
- struct page *prop_page;
+ void *va;
u8 *cfg;
if (irqd_is_forwarded_to_vcpu(d)) {
@@ -1036,7 +1041,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
u32 event = its_get_event_id(d);
struct its_vlpi_map *map;
- prop_page = its_dev->event_map.vm->vprop_page;
+ va = page_address(its_dev->event_map.vm->vprop_page);
map = &its_dev->event_map.vlpi_maps[event];
hwirq = map->vintid;
@@ -1044,11 +1049,11 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
map->properties &= ~clr;
map->properties |= set | LPI_PROP_GROUP1;
} else {
- prop_page = gic_rdists->prop_page;
+ va = gic_rdists->prop_table_va;
hwirq = d->hwirq;
}
- cfg = page_address(prop_page) + hwirq - 8192;
+ cfg = va + hwirq - 8192;
*cfg &= ~clr;
*cfg |= set | LPI_PROP_GROUP1;
@@ -1597,6 +1602,15 @@ static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
kfree(bitmap);
}
+static void gic_reset_prop_table(void *va)
+{
+ /* Priority 0xa0, Group-1, disabled */
+ memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
+
+ /* Make sure the GIC will observe the written configuration */
+ gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
+}
+
static struct page *its_allocate_prop_table(gfp_t gfp_flags)
{
struct page *prop_page;
@@ -1605,13 +1619,7 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
if (!prop_page)
return NULL;
- /* Priority 0xa0, Group-1, disabled */
- memset(page_address(prop_page),
- LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
- LPI_PROPBASE_SZ);
-
- /* Make sure the GIC will observe the written configuration */
- gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
+ gic_reset_prop_table(page_address(prop_page));
return prop_page;
}
@@ -1622,20 +1630,74 @@ static void its_free_prop_table(struct page *prop_page)
get_order(LPI_PROPBASE_SZ));
}
-static int __init its_alloc_lpi_tables(void)
+static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
{
- phys_addr_t paddr;
+ phys_addr_t start, end, addr_end;
+ u64 i;
- lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
- ITS_MAX_LPI_NRBITS);
- gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
- if (!gic_rdists->prop_page) {
- pr_err("Failed to allocate PROPBASE\n");
- return -ENOMEM;
+ /*
+ * We don't bother checking for a kdump kernel as by
+ * construction, the LPI tables are out of this kernel's
+ * memory map.
+ */
+ if (is_kdump_kernel())
+ return true;
+
+ addr_end = addr + size - 1;
+
+ for_each_reserved_mem_region(i, &start, &end) {
+ if (addr >= start && addr_end <= end)
+ return true;
+ }
+
+ /* Not found, not a good sign... */
+ pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
+ &addr, &addr_end);
+ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+ return false;
+}
+
+static int gic_reserve_range(phys_addr_t addr, unsigned long size)
+{
+ if (efi_enabled(EFI_CONFIG_TABLES))
+ return efi_mem_reserve_persistent(addr, size);
+
+ return 0;
+}
+
+static int __init its_setup_lpi_prop_table(void)
+{
+ if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
+ u64 val;
+
+ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
+ lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
+
+ gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
+ gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
+ LPI_PROPBASE_SZ,
+ MEMREMAP_WB);
+ gic_reset_prop_table(gic_rdists->prop_table_va);
+ } else {
+ struct page *page;
+
+ lpi_id_bits = min_t(u32,
+ GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
+ ITS_MAX_LPI_NRBITS);
+ page = its_allocate_prop_table(GFP_NOWAIT);
+ if (!page) {
+ pr_err("Failed to allocate PROPBASE\n");
+ return -ENOMEM;
+ }
+
+ gic_rdists->prop_table_pa = page_to_phys(page);
+ gic_rdists->prop_table_va = page_address(page);
+ WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
+ LPI_PROPBASE_SZ));
}
- paddr = page_to_phys(gic_rdists->prop_page);
- pr_info("GIC: using LPI property table @%pa\n", &paddr);
+ pr_info("GICv3: using LPI property table @%pa\n",
+ &gic_rdists->prop_table_pa);
return its_lpi_init(lpi_id_bits);
}
@@ -1924,12 +1986,9 @@ static int its_alloc_collections(struct its_node *its)
static struct page *its_allocate_pending_table(gfp_t gfp_flags)
{
struct page *pend_page;
- /*
- * The pending pages have to be at least 64kB aligned,
- * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
- */
+
pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
- get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+ get_order(LPI_PENDBASE_SZ));
if (!pend_page)
return NULL;
@@ -1941,36 +2000,103 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
static void its_free_pending_table(struct page *pt)
{
- free_pages((unsigned long)page_address(pt),
- get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+ free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
+}
+
+/*
+ * Booting with kdump and LPIs enabled is generally fine. Any other
+ * case is wrong in the absence of firmware/EFI support.
+ */
+static bool enabled_lpis_allowed(void)
+{
+ phys_addr_t addr;
+ u64 val;
+
+ /* Check whether the property table is in a reserved region */
+ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
+ addr = val & GENMASK_ULL(51, 12);
+
+ return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
+}
+
+static int __init allocate_lpi_tables(void)
+{
+ u64 val;
+ int err, cpu;
+
+ /*
+ * If LPIs are enabled while we run this from the boot CPU,
+ * flag the RD tables as pre-allocated if the stars do align.
+ */
+ val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
+ if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
+ gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
+ RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
+ pr_info("GICv3: Using preallocated redistributor tables\n");
+ }
+
+ err = its_setup_lpi_prop_table();
+ if (err)
+ return err;
+
+ /*
+ * We allocate all the pending tables anyway, as we may have a
+ * mix of RDs that have had LPIs enabled, and some that
+ * don't. We'll free the unused ones as each CPU comes online.
+ */
+ for_each_possible_cpu(cpu) {
+ struct page *pend_page;
+
+ pend_page = its_allocate_pending_table(GFP_NOWAIT);
+ if (!pend_page) {
+ pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
+ return -ENOMEM;
+ }
+
+ gic_data_rdist_cpu(cpu)->pend_page = pend_page;
+ }
+
+ return 0;
}
static void its_cpu_init_lpis(void)
{
void __iomem *rbase = gic_data_rdist_rd_base();
struct page *pend_page;
+ phys_addr_t paddr;
u64 val, tmp;
- /* If we didn't allocate the pending table yet, do it now */
- pend_page = gic_data_rdist()->pend_page;
- if (!pend_page) {
- phys_addr_t paddr;
+ if (gic_data_rdist()->lpi_enabled)
+ return;
- pend_page = its_allocate_pending_table(GFP_NOWAIT);
- if (!pend_page) {
- pr_err("Failed to allocate PENDBASE for CPU%d\n",
- smp_processor_id());
- return;
- }
+ val = readl_relaxed(rbase + GICR_CTLR);
+ if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
+ (val & GICR_CTLR_ENABLE_LPIS)) {
+ /*
+ * Check that we get the same property table on all
+ * RDs. If we don't, this is hopeless.
+ */
+ paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
+ paddr &= GENMASK_ULL(51, 12);
+ if (WARN_ON(gic_rdists->prop_table_pa != paddr))
+ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+
+ paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
+ paddr &= GENMASK_ULL(51, 16);
- paddr = page_to_phys(pend_page);
- pr_info("CPU%d: using LPI pending table @%pa\n",
- smp_processor_id(), &paddr);
- gic_data_rdist()->pend_page = pend_page;
+ WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
+ its_free_pending_table(gic_data_rdist()->pend_page);
+ gic_data_rdist()->pend_page = NULL;
+
+ goto out;
}
+ pend_page = gic_data_rdist()->pend_page;
+ paddr = page_to_phys(pend_page);
+ WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
+
/* set PROPBASE */
- val = (page_to_phys(gic_rdists->prop_page) |
+ val = (gic_rdists->prop_table_pa |
GICR_PROPBASER_InnerShareable |
GICR_PROPBASER_RaWaWb |
((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
@@ -2020,6 +2146,12 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */
dsb(sy);
+out:
+ gic_data_rdist()->lpi_enabled = true;
+ pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
+ smp_processor_id(),
+ gic_data_rdist()->pend_page ? "allocated" : "reserved",
+ &paddr);
}
static void its_cpu_init_collection(struct its_node *its)
@@ -3498,16 +3630,6 @@ static int redist_disable_lpis(void)
u64 timeout = USEC_PER_SEC;
u64 val;
- /*
- * If coming via a CPU hotplug event, we don't need to disable
- * LPIs before trying to re-enable them. They are already
- * configured and all is well in the world. Detect this case
- * by checking the allocation of the pending table for the
- * current CPU.
- */
- if (gic_data_rdist()->pend_page)
- return 0;
-
if (!gic_rdists_supports_plpis()) {
pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
return -ENXIO;
@@ -3517,7 +3639,21 @@ static int redist_disable_lpis(void)
if (!(val & GICR_CTLR_ENABLE_LPIS))
return 0;
- pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
+ /*
+ * If coming via a CPU hotplug event, we don't need to disable
+ * LPIs before trying to re-enable them. They are already
+ * configured and all is well in the world.
+ *
+ * If running with preallocated tables, there is nothing to do.
+ */
+ if (gic_data_rdist()->lpi_enabled ||
+ (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
+ return 0;
+
+ /*
+ * From that point on, we only try to do some damage control.
+ */
+ pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
smp_processor_id());
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
@@ -3773,7 +3909,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
}
gic_rdists = rdists;
- err = its_alloc_lpi_tables();
+
+ err = allocate_lpi_tables();
if (err)
return err;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index d5912f1ec884..8f87f40c9460 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -348,48 +348,45 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
{
u32 irqnr;
- do {
- irqnr = gic_read_iar();
+ irqnr = gic_read_iar();
- if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
- int err;
+ if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
+ int err;
- if (static_branch_likely(&supports_deactivate_key))
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_write_eoir(irqnr);
+ else
+ isb();
+
+ err = handle_domain_irq(gic_data.domain, irqnr, regs);
+ if (err) {
+ WARN_ONCE(true, "Unexpected interrupt received!\n");
+ if (static_branch_likely(&supports_deactivate_key)) {
+ if (irqnr < 8192)
+ gic_write_dir(irqnr);
+ } else {
gic_write_eoir(irqnr);
- else
- isb();
-
- err = handle_domain_irq(gic_data.domain, irqnr, regs);
- if (err) {
- WARN_ONCE(true, "Unexpected interrupt received!\n");
- if (static_branch_likely(&supports_deactivate_key)) {
- if (irqnr < 8192)
- gic_write_dir(irqnr);
- } else {
- gic_write_eoir(irqnr);
- }
}
- continue;
}
- if (irqnr < 16) {
- gic_write_eoir(irqnr);
- if (static_branch_likely(&supports_deactivate_key))
- gic_write_dir(irqnr);
+ return;
+ }
+ if (irqnr < 16) {
+ gic_write_eoir(irqnr);
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_write_dir(irqnr);
#ifdef CONFIG_SMP
- /*
- * Unlike GICv2, we don't need an smp_rmb() here.
- * The control dependency from gic_read_iar to
- * the ISB in gic_write_eoir is enough to ensure
- * that any shared data read by handle_IPI will
- * be read after the ACK.
- */
- handle_IPI(irqnr, regs);
+ /*
+ * Unlike GICv2, we don't need an smp_rmb() here.
+ * The control dependency from gic_read_iar to
+ * the ISB in gic_write_eoir is enough to ensure
+ * that any shared data read by handle_IPI will
+ * be read after the ACK.
+ */
+ handle_IPI(irqnr, regs);
#else
- WARN_ONCE(true, "Unexpected SGI received!\n");
+ WARN_ONCE(true, "Unexpected SGI received!\n");
#endif
- continue;
- }
- } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
+ }
}
static void __init gic_dist_init(void)
@@ -653,7 +650,9 @@ early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
static int gic_dist_supports_lpis(void)
{
- return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && !gicv3_nolpi;
+ return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
+ !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
+ !gicv3_nolpi);
}
static void gic_cpu_init(void)
@@ -673,10 +672,6 @@ static void gic_cpu_init(void)
gic_cpu_config(rbase, gic_redist_wait_for_rwp);
- /* Give LPIs a spin */
- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
- its_cpu_init();
-
/* initialise system registers */
gic_cpu_sys_reg_init();
}
@@ -689,6 +684,10 @@ static void gic_cpu_init(void)
static int gic_starting_cpu(unsigned int cpu)
{
gic_cpu_init();
+
+ if (gic_dist_supports_lpis())
+ its_cpu_init();
+
return 0;
}
@@ -1127,14 +1126,16 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_update_vlpi_properties();
- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
- its_init(handle, &gic_data.rdists, gic_data.domain);
-
gic_smp_init();
gic_dist_init();
gic_cpu_init();
gic_cpu_pm_init();
+ if (gic_dist_supports_lpis()) {
+ its_init(handle, &gic_data.rdists, gic_data.domain);
+ its_cpu_init();
+ }
+
return 0;
out_free:
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 13063339b416..547045d89c4b 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -13,6 +13,7 @@
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
+#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
@@ -26,6 +27,10 @@
#define ICU_SETSPI_NSR_AH 0x14
#define ICU_CLRSPI_NSR_AL 0x18
#define ICU_CLRSPI_NSR_AH 0x1c
+#define ICU_SET_SEI_AL 0x50
+#define ICU_SET_SEI_AH 0x54
+#define ICU_CLR_SEI_AL 0x58
+#define ICU_CLR_SEI_AH 0x5C
#define ICU_INT_CFG(x) (0x100 + 4 * (x))
#define ICU_INT_ENABLE BIT(24)
#define ICU_IS_EDGE BIT(28)
@@ -36,12 +41,23 @@
#define ICU_SATA0_ICU_ID 109
#define ICU_SATA1_ICU_ID 107
+struct mvebu_icu_subset_data {
+ unsigned int icu_group;
+ unsigned int offset_set_ah;
+ unsigned int offset_set_al;
+ unsigned int offset_clr_ah;
+ unsigned int offset_clr_al;
+};
+
struct mvebu_icu {
- struct irq_chip irq_chip;
void __iomem *base;
- struct irq_domain *domain;
struct device *dev;
+};
+
+struct mvebu_icu_msi_data {
+ struct mvebu_icu *icu;
atomic_t initialized;
+ const struct mvebu_icu_subset_data *subset_data;
};
struct mvebu_icu_irq_data {
@@ -50,28 +66,40 @@ struct mvebu_icu_irq_data {
unsigned int type;
};
-static void mvebu_icu_init(struct mvebu_icu *icu, struct msi_msg *msg)
+DEFINE_STATIC_KEY_FALSE(legacy_bindings);
+
+static void mvebu_icu_init(struct mvebu_icu *icu,
+ struct mvebu_icu_msi_data *msi_data,
+ struct msi_msg *msg)
{
- if (atomic_cmpxchg(&icu->initialized, false, true))
+ const struct mvebu_icu_subset_data *subset = msi_data->subset_data;
+
+ if (atomic_cmpxchg(&msi_data->initialized, false, true))
return;
- /* Set Clear/Set ICU SPI message address in AP */
- writel_relaxed(msg[0].address_hi, icu->base + ICU_SETSPI_NSR_AH);
- writel_relaxed(msg[0].address_lo, icu->base + ICU_SETSPI_NSR_AL);
- writel_relaxed(msg[1].address_hi, icu->base + ICU_CLRSPI_NSR_AH);
- writel_relaxed(msg[1].address_lo, icu->base + ICU_CLRSPI_NSR_AL);
+ /* Set 'SET' ICU SPI message address in AP */
+ writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah);
+ writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al);
+
+ if (subset->icu_group != ICU_GRP_NSR)
+ return;
+
+ /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */
+ writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah);
+ writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al);
}
static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
{
struct irq_data *d = irq_get_irq_data(desc->irq);
+ struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d->domain);
struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
struct mvebu_icu *icu = icu_irqd->icu;
unsigned int icu_int;
if (msg->address_lo || msg->address_hi) {
- /* One off initialization */
- mvebu_icu_init(icu, msg);
+ /* One off initialization per domain */
+ mvebu_icu_init(icu, msi_data, msg);
/* Configure the ICU with irq number & type */
icu_int = msg->data | ICU_INT_ENABLE;
if (icu_irqd->type & IRQ_TYPE_EDGE_RISING)
@@ -101,37 +129,66 @@ static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
}
}
+static struct irq_chip mvebu_icu_nsr_chip = {
+ .name = "ICU-NSR",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static struct irq_chip mvebu_icu_sei_chip = {
+ .name = "ICU-SEI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
static int
mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
- struct mvebu_icu *icu = d->host_data;
- unsigned int icu_group;
+ struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d);
+ struct mvebu_icu *icu = platform_msi_get_host_data(d);
+ unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
/* Check the count of the parameters in dt */
- if (WARN_ON(fwspec->param_count < 3)) {
+ if (WARN_ON(fwspec->param_count != param_count)) {
dev_err(icu->dev, "wrong ICU parameter count %d\n",
fwspec->param_count);
return -EINVAL;
}
- /* Only ICU group type is handled */
- icu_group = fwspec->param[0];
- if (icu_group != ICU_GRP_NSR && icu_group != ICU_GRP_SR &&
- icu_group != ICU_GRP_SEI && icu_group != ICU_GRP_REI) {
- dev_err(icu->dev, "wrong ICU group type %x\n", icu_group);
- return -EINVAL;
+ if (static_branch_unlikely(&legacy_bindings)) {
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ if (fwspec->param[0] != ICU_GRP_NSR) {
+ dev_err(icu->dev, "wrong ICU group type %x\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+ } else {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ /*
+ * The ICU receives level interrupts. While the NSR are also
+ * level interrupts, SEI are edge interrupts. Force the type
+ * here in this case. Please note that this makes the interrupt
+ * handling unreliable.
+ */
+ if (msi_data->subset_data->icu_group == ICU_GRP_SEI)
+ *type = IRQ_TYPE_EDGE_RISING;
}
- *hwirq = fwspec->param[1];
if (*hwirq >= ICU_MAX_IRQS) {
dev_err(icu->dev, "invalid interrupt number %ld\n", *hwirq);
return -EINVAL;
}
- /* Mask the type to prevent wrong DT configuration */
- *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
-
return 0;
}
@@ -142,8 +199,10 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
int err;
unsigned long hwirq;
struct irq_fwspec *fwspec = args;
- struct mvebu_icu *icu = platform_msi_get_host_data(domain);
+ struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(domain);
+ struct mvebu_icu *icu = msi_data->icu;
struct mvebu_icu_irq_data *icu_irqd;
+ struct irq_chip *chip = &mvebu_icu_nsr_chip;
icu_irqd = kmalloc(sizeof(*icu_irqd), GFP_KERNEL);
if (!icu_irqd)
@@ -156,7 +215,10 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
goto free_irqd;
}
- icu_irqd->icu_group = fwspec->param[0];
+ if (static_branch_unlikely(&legacy_bindings))
+ icu_irqd->icu_group = fwspec->param[0];
+ else
+ icu_irqd->icu_group = msi_data->subset_data->icu_group;
icu_irqd->icu = icu;
err = platform_msi_domain_alloc(domain, virq, nr_irqs);
@@ -170,8 +232,11 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (err)
goto free_msi;
+ if (icu_irqd->icu_group == ICU_GRP_SEI)
+ chip = &mvebu_icu_sei_chip;
+
err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- &icu->irq_chip, icu_irqd);
+ chip, icu_irqd);
if (err) {
dev_err(icu->dev, "failed to set the data to IRQ domain\n");
goto free_msi;
@@ -204,11 +269,84 @@ static const struct irq_domain_ops mvebu_icu_domain_ops = {
.free = mvebu_icu_irq_domain_free,
};
+static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data = {
+ .icu_group = ICU_GRP_NSR,
+ .offset_set_ah = ICU_SETSPI_NSR_AH,
+ .offset_set_al = ICU_SETSPI_NSR_AL,
+ .offset_clr_ah = ICU_CLRSPI_NSR_AH,
+ .offset_clr_al = ICU_CLRSPI_NSR_AL,
+};
+
+static const struct mvebu_icu_subset_data mvebu_icu_sei_subset_data = {
+ .icu_group = ICU_GRP_SEI,
+ .offset_set_ah = ICU_SET_SEI_AH,
+ .offset_set_al = ICU_SET_SEI_AL,
+};
+
+static const struct of_device_id mvebu_icu_subset_of_match[] = {
+ {
+ .compatible = "marvell,cp110-icu-nsr",
+ .data = &mvebu_icu_nsr_subset_data,
+ },
+ {
+ .compatible = "marvell,cp110-icu-sei",
+ .data = &mvebu_icu_sei_subset_data,
+ },
+ {},
+};
+
+static int mvebu_icu_subset_probe(struct platform_device *pdev)
+{
+ struct mvebu_icu_msi_data *msi_data;
+ struct device_node *msi_parent_dn;
+ struct device *dev = &pdev->dev;
+ struct irq_domain *irq_domain;
+
+ msi_data = devm_kzalloc(dev, sizeof(*msi_data), GFP_KERNEL);
+ if (!msi_data)
+ return -ENOMEM;
+
+ if (static_branch_unlikely(&legacy_bindings)) {
+ msi_data->icu = dev_get_drvdata(dev);
+ msi_data->subset_data = &mvebu_icu_nsr_subset_data;
+ } else {
+ msi_data->icu = dev_get_drvdata(dev->parent);
+ msi_data->subset_data = of_device_get_match_data(dev);
+ }
+
+ dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_PLATFORM_MSI);
+ if (!dev->msi_domain)
+ return -EPROBE_DEFER;
+
+ msi_parent_dn = irq_domain_get_of_node(dev->msi_domain);
+ if (!msi_parent_dn)
+ return -ENODEV;
+
+ irq_domain = platform_msi_create_device_tree_domain(dev, ICU_MAX_IRQS,
+ mvebu_icu_write_msg,
+ &mvebu_icu_domain_ops,
+ msi_data);
+ if (!irq_domain) {
+ dev_err(dev, "Failed to create ICU MSI domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct platform_driver mvebu_icu_subset_driver = {
+ .probe = mvebu_icu_subset_probe,
+ .driver = {
+ .name = "mvebu-icu-subset",
+ .of_match_table = mvebu_icu_subset_of_match,
+ },
+};
+builtin_platform_driver(mvebu_icu_subset_driver);
+
static int mvebu_icu_probe(struct platform_device *pdev)
{
struct mvebu_icu *icu;
- struct device_node *node = pdev->dev.of_node;
- struct device_node *gicp_dn;
struct resource *res;
int i;
@@ -226,53 +364,38 @@ static int mvebu_icu_probe(struct platform_device *pdev)
return PTR_ERR(icu->base);
}
- icu->irq_chip.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "ICU.%x",
- (unsigned int)res->start);
- if (!icu->irq_chip.name)
- return -ENOMEM;
-
- icu->irq_chip.irq_mask = irq_chip_mask_parent;
- icu->irq_chip.irq_unmask = irq_chip_unmask_parent;
- icu->irq_chip.irq_eoi = irq_chip_eoi_parent;
- icu->irq_chip.irq_set_type = irq_chip_set_type_parent;
-#ifdef CONFIG_SMP
- icu->irq_chip.irq_set_affinity = irq_chip_set_affinity_parent;
-#endif
-
/*
- * We're probed after MSI domains have been resolved, so force
- * resolution here.
+ * Legacy bindings: ICU is one node with one MSI parent: force manually
+ * the probe of the NSR interrupts side.
+ * New bindings: ICU node has children, one per interrupt controller
+ * having its own MSI parent: call platform_populate().
+ * All ICU instances should use the same bindings.
*/
- pdev->dev.msi_domain = of_msi_get_domain(&pdev->dev, node,
- DOMAIN_BUS_PLATFORM_MSI);
- if (!pdev->dev.msi_domain)
- return -EPROBE_DEFER;
-
- gicp_dn = irq_domain_get_of_node(pdev->dev.msi_domain);
- if (!gicp_dn)
- return -ENODEV;
+ if (!of_get_child_count(pdev->dev.of_node))
+ static_branch_enable(&legacy_bindings);
/*
- * Clean all ICU interrupts with type SPI_NSR, required to
+ * Clean all ICU interrupts of type NSR and SEI, required to
* avoid unpredictable SPI assignments done by firmware.
*/
for (i = 0 ; i < ICU_MAX_IRQS ; i++) {
- u32 icu_int = readl_relaxed(icu->base + ICU_INT_CFG(i));
- if ((icu_int >> ICU_GROUP_SHIFT) == ICU_GRP_NSR)
+ u32 icu_int, icu_grp;
+
+ icu_int = readl_relaxed(icu->base + ICU_INT_CFG(i));
+ icu_grp = icu_int >> ICU_GROUP_SHIFT;
+
+ if (icu_grp == ICU_GRP_NSR ||
+ (icu_grp == ICU_GRP_SEI &&
+ !static_branch_unlikely(&legacy_bindings)))
writel_relaxed(0x0, icu->base + ICU_INT_CFG(i));
}
- icu->domain =
- platform_msi_create_device_domain(&pdev->dev, ICU_MAX_IRQS,
- mvebu_icu_write_msg,
- &mvebu_icu_domain_ops, icu);
- if (!icu->domain) {
- dev_err(&pdev->dev, "Failed to create ICU domain\n");
- return -ENOMEM;
- }
+ platform_set_drvdata(pdev, icu);
- return 0;
+ if (static_branch_unlikely(&legacy_bindings))
+ return mvebu_icu_subset_probe(pdev);
+ else
+ return devm_of_platform_populate(&pdev->dev);
}
static const struct of_device_id mvebu_icu_of_match[] = {
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
new file mode 100644
index 000000000000..566d69a2edbc
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "mvebu-sei: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+/* Cause register */
+#define GICP_SECR(idx) (0x0 + ((idx) * 0x4))
+/* Mask register */
+#define GICP_SEMR(idx) (0x20 + ((idx) * 0x4))
+#define GICP_SET_SEI_OFFSET 0x30
+
+#define SEI_IRQ_COUNT_PER_REG 32
+#define SEI_IRQ_REG_COUNT 2
+#define SEI_IRQ_COUNT (SEI_IRQ_COUNT_PER_REG * SEI_IRQ_REG_COUNT)
+#define SEI_IRQ_REG_IDX(irq_id) ((irq_id) / SEI_IRQ_COUNT_PER_REG)
+#define SEI_IRQ_REG_BIT(irq_id) ((irq_id) % SEI_IRQ_COUNT_PER_REG)
+
+struct mvebu_sei_interrupt_range {
+ u32 first;
+ u32 size;
+};
+
+struct mvebu_sei_caps {
+ struct mvebu_sei_interrupt_range ap_range;
+ struct mvebu_sei_interrupt_range cp_range;
+};
+
+struct mvebu_sei {
+ struct device *dev;
+ void __iomem *base;
+ struct resource *res;
+ struct irq_domain *sei_domain;
+ struct irq_domain *ap_domain;
+ struct irq_domain *cp_domain;
+ const struct mvebu_sei_caps *caps;
+
+ /* Lock on MSI allocations/releases */
+ struct mutex cp_msi_lock;
+ DECLARE_BITMAP(cp_msi_bitmap, SEI_IRQ_COUNT);
+
+ /* Lock on IRQ masking register */
+ raw_spinlock_t mask_lock;
+};
+
+static void mvebu_sei_ack_irq(struct irq_data *d)
+{
+ struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
+ u32 reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
+
+ writel_relaxed(BIT(SEI_IRQ_REG_BIT(d->hwirq)),
+ sei->base + GICP_SECR(reg_idx));
+}
+
+static void mvebu_sei_mask_irq(struct irq_data *d)
+{
+ struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
+ u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
+ unsigned long flags;
+
+ /* 1 disables the interrupt */
+ raw_spin_lock_irqsave(&sei->mask_lock, flags);
+ reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
+ reg |= BIT(SEI_IRQ_REG_BIT(d->hwirq));
+ writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
+ raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
+}
+
+static void mvebu_sei_unmask_irq(struct irq_data *d)
+{
+ struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
+ u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
+ unsigned long flags;
+
+ /* 0 enables the interrupt */
+ raw_spin_lock_irqsave(&sei->mask_lock, flags);
+ reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
+ reg &= ~BIT(SEI_IRQ_REG_BIT(d->hwirq));
+ writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
+ raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
+}
+
+static int mvebu_sei_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static int mvebu_sei_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ /* We can only clear the pending state by acking the interrupt */
+ if (which != IRQCHIP_STATE_PENDING || state)
+ return -EINVAL;
+
+ mvebu_sei_ack_irq(d);
+ return 0;
+}
+
+static struct irq_chip mvebu_sei_irq_chip = {
+ .name = "SEI",
+ .irq_ack = mvebu_sei_ack_irq,
+ .irq_mask = mvebu_sei_mask_irq,
+ .irq_unmask = mvebu_sei_unmask_irq,
+ .irq_set_affinity = mvebu_sei_set_affinity,
+ .irq_set_irqchip_state = mvebu_sei_set_irqchip_state,
+};
+
+static int mvebu_sei_ap_set_type(struct irq_data *data, unsigned int type)
+{
+ if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip mvebu_sei_ap_irq_chip = {
+ .name = "AP SEI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = mvebu_sei_ap_set_type,
+};
+
+static void mvebu_sei_cp_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct mvebu_sei *sei = data->chip_data;
+ phys_addr_t set = sei->res->start + GICP_SET_SEI_OFFSET;
+
+ msg->data = data->hwirq + sei->caps->cp_range.first;
+ msg->address_lo = lower_32_bits(set);
+ msg->address_hi = upper_32_bits(set);
+}
+
+static int mvebu_sei_cp_set_type(struct irq_data *data, unsigned int type)
+{
+ if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip mvebu_sei_cp_irq_chip = {
+ .name = "CP SEI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = mvebu_sei_cp_set_type,
+ .irq_compose_msi_msg = mvebu_sei_cp_compose_msi_msg,
+};
+
+static int mvebu_sei_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_fwspec *fwspec = arg;
+
+ /* Not much to do, just setup the irqdata */
+ irq_domain_set_hwirq_and_chip(domain, virq, fwspec->param[0],
+ &mvebu_sei_irq_chip, sei);
+
+ return 0;
+}
+
+static void mvebu_sei_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops mvebu_sei_domain_ops = {
+ .alloc = mvebu_sei_domain_alloc,
+ .free = mvebu_sei_domain_free,
+};
+
+static int mvebu_sei_ap_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ *hwirq = fwspec->param[0];
+ *type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static int mvebu_sei_ap_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_fwspec fwspec;
+ unsigned long hwirq;
+ unsigned int type;
+ int err;
+
+ mvebu_sei_ap_translate(domain, arg, &hwirq, &type);
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq + sei->caps->ap_range.first;
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, hwirq,
+ &mvebu_sei_ap_irq_chip, sei,
+ handle_level_irq, NULL, NULL);
+ irq_set_probe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mvebu_sei_ap_domain_ops = {
+ .translate = mvebu_sei_ap_translate,
+ .alloc = mvebu_sei_ap_alloc,
+ .free = irq_domain_free_irqs_parent,
+};
+
+static void mvebu_sei_cp_release_irq(struct mvebu_sei *sei, unsigned long hwirq)
+{
+ mutex_lock(&sei->cp_msi_lock);
+ clear_bit(hwirq, sei->cp_msi_bitmap);
+ mutex_unlock(&sei->cp_msi_lock);
+}
+
+static int mvebu_sei_cp_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_fwspec fwspec;
+ unsigned long hwirq;
+ int ret;
+
+ /* The software only supports single allocations for now */
+ if (nr_irqs != 1)
+ return -ENOTSUPP;
+
+ mutex_lock(&sei->cp_msi_lock);
+ hwirq = find_first_zero_bit(sei->cp_msi_bitmap,
+ sei->caps->cp_range.size);
+ if (hwirq < sei->caps->cp_range.size)
+ set_bit(hwirq, sei->cp_msi_bitmap);
+ mutex_unlock(&sei->cp_msi_lock);
+
+ if (hwirq == sei->caps->cp_range.size)
+ return -ENOSPC;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq + sei->caps->cp_range.first;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ goto free_irq;
+
+ irq_domain_set_info(domain, virq, hwirq,
+ &mvebu_sei_cp_irq_chip, sei,
+ handle_edge_irq, NULL, NULL);
+
+ return 0;
+
+free_irq:
+ mvebu_sei_cp_release_irq(sei, hwirq);
+ return ret;
+}
+
+static void mvebu_sei_cp_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ if (nr_irqs != 1 || d->hwirq >= sei->caps->cp_range.size) {
+ dev_err(sei->dev, "Invalid hwirq %lu\n", d->hwirq);
+ return;
+ }
+
+ mvebu_sei_cp_release_irq(sei, d->hwirq);
+ irq_domain_free_irqs_parent(domain, virq, 1);
+}
+
+static const struct irq_domain_ops mvebu_sei_cp_domain_ops = {
+ .alloc = mvebu_sei_cp_domain_alloc,
+ .free = mvebu_sei_cp_domain_free,
+};
+
+static struct irq_chip mvebu_sei_msi_irq_chip = {
+ .name = "SEI pMSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+};
+
+static struct msi_domain_ops mvebu_sei_msi_ops = {
+};
+
+static struct msi_domain_info mvebu_sei_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS,
+ .ops = &mvebu_sei_msi_ops,
+ .chip = &mvebu_sei_msi_irq_chip,
+};
+
+static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc)
+{
+ struct mvebu_sei *sei = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 idx;
+
+ chained_irq_enter(chip, desc);
+
+ for (idx = 0; idx < SEI_IRQ_REG_COUNT; idx++) {
+ unsigned long irqmap;
+ int bit;
+
+ irqmap = readl_relaxed(sei->base + GICP_SECR(idx));
+ for_each_set_bit(bit, &irqmap, SEI_IRQ_COUNT_PER_REG) {
+ unsigned long hwirq;
+ unsigned int virq;
+
+ hwirq = idx * SEI_IRQ_COUNT_PER_REG + bit;
+ virq = irq_find_mapping(sei->sei_domain, hwirq);
+ if (likely(virq)) {
+ generic_handle_irq(virq);
+ continue;
+ }
+
+ dev_warn(sei->dev,
+ "Spurious IRQ detected (hwirq %lu)\n", hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void mvebu_sei_reset(struct mvebu_sei *sei)
+{
+ u32 reg_idx;
+
+ /* Clear IRQ cause registers, mask all interrupts */
+ for (reg_idx = 0; reg_idx < SEI_IRQ_REG_COUNT; reg_idx++) {
+ writel_relaxed(0xFFFFFFFF, sei->base + GICP_SECR(reg_idx));
+ writel_relaxed(0xFFFFFFFF, sei->base + GICP_SEMR(reg_idx));
+ }
+}
+
+static int mvebu_sei_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct irq_domain *plat_domain;
+ struct mvebu_sei *sei;
+ u32 parent_irq;
+ int ret;
+
+ sei = devm_kzalloc(&pdev->dev, sizeof(*sei), GFP_KERNEL);
+ if (!sei)
+ return -ENOMEM;
+
+ sei->dev = &pdev->dev;
+
+ mutex_init(&sei->cp_msi_lock);
+ raw_spin_lock_init(&sei->mask_lock);
+
+ sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sei->base = devm_ioremap_resource(sei->dev, sei->res);
+ if (!sei->base) {
+ dev_err(sei->dev, "Failed to remap SEI resource\n");
+ return -ENODEV;
+ }
+
+ /* Retrieve the SEI capabilities with the interrupt ranges */
+ sei->caps = of_device_get_match_data(&pdev->dev);
+ if (!sei->caps) {
+ dev_err(sei->dev,
+ "Could not retrieve controller capabilities\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Reserve the single (top-level) parent SPI IRQ from which all the
+ * interrupts handled by this driver will be signaled.
+ */
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (parent_irq <= 0) {
+ dev_err(sei->dev, "Failed to retrieve top-level SPI IRQ\n");
+ return -ENODEV;
+ }
+
+ /* Create the root SEI domain */
+ sei->sei_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ (sei->caps->ap_range.size +
+ sei->caps->cp_range.size),
+ &mvebu_sei_domain_ops,
+ sei);
+ if (!sei->sei_domain) {
+ dev_err(sei->dev, "Failed to create SEI IRQ domain\n");
+ ret = -ENOMEM;
+ goto dispose_irq;
+ }
+
+ irq_domain_update_bus_token(sei->sei_domain, DOMAIN_BUS_NEXUS);
+
+ /* Create the 'wired' domain */
+ sei->ap_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
+ sei->caps->ap_range.size,
+ of_node_to_fwnode(node),
+ &mvebu_sei_ap_domain_ops,
+ sei);
+ if (!sei->ap_domain) {
+ dev_err(sei->dev, "Failed to create AP IRQ domain\n");
+ ret = -ENOMEM;
+ goto remove_sei_domain;
+ }
+
+ irq_domain_update_bus_token(sei->ap_domain, DOMAIN_BUS_WIRED);
+
+ /* Create the 'MSI' domain */
+ sei->cp_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
+ sei->caps->cp_range.size,
+ of_node_to_fwnode(node),
+ &mvebu_sei_cp_domain_ops,
+ sei);
+ if (!sei->cp_domain) {
+ pr_err("Failed to create CPs IRQ domain\n");
+ ret = -ENOMEM;
+ goto remove_ap_domain;
+ }
+
+ irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
+
+ plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
+ &mvebu_sei_msi_domain_info,
+ sei->cp_domain);
+ if (!plat_domain) {
+ pr_err("Failed to create CPs MSI domain\n");
+ ret = -ENOMEM;
+ goto remove_cp_domain;
+ }
+
+ mvebu_sei_reset(sei);
+
+ irq_set_chained_handler_and_data(parent_irq,
+ mvebu_sei_handle_cascade_irq,
+ sei);
+
+ return 0;
+
+remove_cp_domain:
+ irq_domain_remove(sei->cp_domain);
+remove_ap_domain:
+ irq_domain_remove(sei->ap_domain);
+remove_sei_domain:
+ irq_domain_remove(sei->sei_domain);
+dispose_irq:
+ irq_dispose_mapping(parent_irq);
+
+ return ret;
+}
+
+struct mvebu_sei_caps mvebu_sei_ap806_caps = {
+ .ap_range = {
+ .first = 0,
+ .size = 21,
+ },
+ .cp_range = {
+ .first = 21,
+ .size = 43,
+ },
+};
+
+static const struct of_device_id mvebu_sei_of_match[] = {
+ {
+ .compatible = "marvell,ap806-sei",
+ .data = &mvebu_sei_ap806_caps,
+ },
+ {},
+};
+
+static struct platform_driver mvebu_sei_driver = {
+ .probe = mvebu_sei_probe,
+ .driver = {
+ .name = "mvebu-sei",
+ .of_match_table = mvebu_sei_of_match,
+ },
+};
+builtin_platform_driver(mvebu_sei_driver);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 532e9d68c704..357e9daf94ae 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -15,6 +15,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
+#include <asm/smp.h>
/*
* This driver implements a version of the RISC-V PLIC with the actual layout
@@ -176,7 +177,7 @@ static int plic_find_hart_id(struct device_node *node)
{
for (; node; node = node->parent) {
if (of_device_is_compatible(node, "riscv"))
- return riscv_of_processor_hart(node);
+ return riscv_of_processor_hartid(node);
}
return -1;
@@ -218,7 +219,7 @@ static int __init plic_init(struct device_node *node,
struct of_phandle_args parent;
struct plic_handler *handler;
irq_hw_number_t hwirq;
- int cpu;
+ int cpu, hartid;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
@@ -229,12 +230,13 @@ static int __init plic_init(struct device_node *node,
if (parent.args[0] == -1)
continue;
- cpu = plic_find_hart_id(parent.np);
- if (cpu < 0) {
+ hartid = plic_find_hart_id(parent.np);
+ if (hartid < 0) {
pr_warn("failed to parse hart ID for context %d.\n", i);
continue;
}
+ cpu = riscv_hartid_to_cpuid(hartid);
handler = per_cpu_ptr(&plic_handlers, cpu);
handler->present = true;
handler->ctxid = i;
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index b1b47a40a278..faa7d61b9d6c 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -124,6 +124,7 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
break;
case IRQ_TYPE_EDGE_BOTH:
pdc_type = PDC_EDGE_DUAL;
+ type = IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_LEVEL_HIGH:
pdc_type = PDC_LEVEL_HIGH;
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index ef5560b848ab..e1da70a9530c 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1155,12 +1155,6 @@ static int capinc_tty_chars_in_buffer(struct tty_struct *tty)
return mp->outbytes;
}
-static int capinc_tty_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
static void capinc_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
pr_debug("capinc_tty_set_termios\n");
@@ -1236,7 +1230,6 @@ static const struct tty_operations capinc_ops = {
.flush_chars = capinc_tty_flush_chars,
.write_room = capinc_tty_write_room,
.chars_in_buffer = capinc_tty_chars_in_buffer,
- .ioctl = capinc_tty_ioctl,
.set_termios = capinc_tty_set_termios,
.throttle = capinc_tty_throttle,
.unthrottle = capinc_tty_unthrottle,
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index bc208557f783..c0cbee06bc21 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -65,7 +65,7 @@ static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
cs->respdata[0] = 0;
break;
}
- /* --v-- fall through --v-- */
+ /* fall through */
case '\r':
/* end of message line, pass to response handler */
if (cbytes >= MAX_RESP_SIZE) {
@@ -100,7 +100,7 @@ static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
goto exit;
}
/* quoted or not in DLE mode: treat as regular data */
- /* --v-- fall through --v-- */
+ /* fall through */
default:
/* append to line buffer if possible */
if (cbytes < MAX_RESP_SIZE)
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 1cfcea62aed9..182826e9d07c 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -1036,7 +1036,7 @@ static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
break;
default:
dev_err(cs->dev, "internal error: disposition=%d\n", retval);
- /* --v-- fall through --v-- */
+ /* fall through */
case ICALL_IGNORE:
case ICALL_REJECT:
/* hang up actively
@@ -1319,7 +1319,7 @@ static void do_action(int action, struct cardstate *cs,
cs->commands_pending = 1;
break;
}
- /* bad cid: fall through */
+ /* fall through - bad cid */
case ACT_FAILCID:
cs->cur_at_seq = SEQ_NONE;
channel = cs->curchannel;
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 600c79b030cd..d9a578ac32cd 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -206,7 +206,7 @@ static int if_ioctl(struct tty_struct *tty,
? -EFAULT : 0;
if (retval >= 0) {
gigaset_dbg_buffer(DEBUG_IF, "GIGASET_BRKCHARS",
- 6, (const unsigned char *) arg);
+ 6, buf);
retval = cs->ops->brkchars(cs, buf);
}
break;
@@ -233,6 +233,14 @@ static int if_ioctl(struct tty_struct *tty,
return retval;
}
+#ifdef CONFIG_COMPAT
+static long if_compat_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ return if_ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
static int if_tiocmget(struct tty_struct *tty)
{
struct cardstate *cs = tty->driver_data;
@@ -472,6 +480,9 @@ static const struct tty_operations if_ops = {
.open = if_open,
.close = if_close,
.ioctl = if_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = if_compat_ioctl,
+#endif
.write = if_write,
.write_room = if_write_room,
.chars_in_buffer = if_chars_in_buffer,
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index 97e00118ccfe..f9264ba0fe77 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -906,7 +906,7 @@ static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf)
cs->respdata[0] = 0;
break;
}
- /* --v-- fall through --v-- */
+ /* fall through */
case '\r':
/* end of message line, pass to response handler */
if (cbytes >= MAX_RESP_SIZE) {
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index 77debda2221b..6c336366128c 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -625,7 +625,7 @@ Amd7930_l1hw(struct PStack *st, int pr, void *arg)
break;
case (HW_RESET | REQUEST):
spin_lock_irqsave(&cs->lock, flags);
- if ((cs->dc.amd7930.ph_state == 8)) {
+ if (cs->dc.amd7930.ph_state == 8) {
/* b-channels off, PH-AR cleared
* change to F3 */
Amd7930_ph_command(cs, 0x20, "HW_RESET REQUEST"); //LMR1 bit 5
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 8e5b03161b2f..ea0e4c6de3fb 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -86,7 +86,7 @@ release_io_hfcpci(struct IsdnCardState *cs)
pci_free_consistent(cs->hw.hfcpci.dev, 0x8000,
cs->hw.hfcpci.fifos, cs->hw.hfcpci.dma);
cs->hw.hfcpci.fifos = NULL;
- iounmap((void *)cs->hw.hfcpci.pci_io);
+ iounmap(cs->hw.hfcpci.pci_io);
}
/********************************************************************************/
@@ -128,7 +128,7 @@ reset_hfcpci(struct IsdnCardState *cs)
Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
/* Clear already pending ints */
- if (Read_hfc(cs, HFCPCI_INT_S1));
+ Read_hfc(cs, HFCPCI_INT_S1);
Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 2); /* HFC ST 2 */
udelay(10);
@@ -158,7 +158,7 @@ reset_hfcpci(struct IsdnCardState *cs)
/* Finally enable IRQ output */
cs->hw.hfcpci.int_m2 = HFCPCI_IRQ_ENABLE;
Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
- if (Read_hfc(cs, HFCPCI_INT_S1));
+ Read_hfc(cs, HFCPCI_INT_S1);
}
/***************************************************/
@@ -1537,7 +1537,7 @@ hfcpci_bh(struct work_struct *work)
cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
/* Clear already pending ints */
- if (Read_hfc(cs, HFCPCI_INT_S1));
+ Read_hfc(cs, HFCPCI_INT_S1);
Write_hfc(cs, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
udelay(10);
Write_hfc(cs, HFCPCI_STATES, 4);
@@ -1692,7 +1692,7 @@ setup_hfcpci(struct IsdnCard *card)
printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
return (0);
}
- cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
+ cs->hw.hfcpci.pci_io = ioremap(dev_hfcpci->resource[1].start, 256);
printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
if (!cs->hw.hfcpci.pci_io) {
@@ -1716,7 +1716,6 @@ setup_hfcpci(struct IsdnCard *card)
return 0;
}
pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u32)cs->hw.hfcpci.dma);
- cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
printk(KERN_INFO
"HFC-PCI: defined at mem %p fifo %p(%lx) IRQ %d HZ %d\n",
cs->hw.hfcpci.pci_io,
diff --git a/drivers/isdn/hisax/hfc_pci.h b/drivers/isdn/hisax/hfc_pci.h
index 4e58700a3e61..4c3b3ba35726 100644
--- a/drivers/isdn/hisax/hfc_pci.h
+++ b/drivers/isdn/hisax/hfc_pci.h
@@ -228,8 +228,8 @@ typedef union {
} fifo_area;
-#define Write_hfc(a, b, c) (*(((u_char *)a->hw.hfcpci.pci_io) + b) = c)
-#define Read_hfc(a, b) (*(((u_char *)a->hw.hfcpci.pci_io) + b))
+#define Write_hfc(a, b, c) (writeb(c, (a->hw.hfcpci.pci_io) + b))
+#define Read_hfc(a, b) (readb((a->hw.hfcpci.pci_io) + b))
extern void main_irq_hcpci(struct BCState *bcs);
extern void releasehfcpci(struct IsdnCardState *cs);
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 4d3b4b2f2612..12af628d9b2c 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -381,7 +381,7 @@ reset_hfcsx(struct IsdnCardState *cs)
Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1);
/* Clear already pending ints */
- if (Read_hfc(cs, HFCSX_INT_S1));
+ Read_hfc(cs, HFCSX_INT_S1);
Write_hfc(cs, HFCSX_STATES, HFCSX_LOAD_STATE | 2); /* HFC ST 2 */
udelay(10);
@@ -411,7 +411,7 @@ reset_hfcsx(struct IsdnCardState *cs)
/* Finally enable IRQ output */
cs->hw.hfcsx.int_m2 = HFCSX_IRQ_ENABLE;
Write_hfc(cs, HFCSX_INT_M2, cs->hw.hfcsx.int_m2);
- if (Read_hfc(cs, HFCSX_INT_S2));
+ Read_hfc(cs, HFCSX_INT_S2);
}
/***************************************************/
@@ -1288,7 +1288,7 @@ hfcsx_bh(struct work_struct *work)
cs->hw.hfcsx.int_m1 &= ~HFCSX_INTS_TIMER;
Write_hfc(cs, HFCSX_INT_M1, cs->hw.hfcsx.int_m1);
/* Clear already pending ints */
- if (Read_hfc(cs, HFCSX_INT_S1));
+ Read_hfc(cs, HFCSX_INT_S1);
Write_hfc(cs, HFCSX_STATES, 4 | HFCSX_LOAD_STATE);
udelay(10);
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 338d0408b377..40080e06421c 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -703,7 +703,7 @@ struct hfcPCI_hw {
unsigned char nt_mode;
int nt_timer;
struct pci_dev *dev;
- unsigned char *pci_io; /* start of PCI IO memory */
+ void __iomem *pci_io; /* start of PCI IO memory */
dma_addr_t dma; /* dma handle for Fifos */
void *fifos; /* FIFO memory */
int last_bfifo_cnt[2]; /* marker saving last b-fifo frame count */
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index c4be1644f5bb..36eefaa3a7d9 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -72,7 +72,7 @@ W6692_new_ph(struct IsdnCardState *cs)
case (W_L1CMD_RST):
ph_command(cs, W_L1CMD_DRC);
l1_msg(cs, HW_RESET | INDICATION, NULL);
- /* fallthru */
+ /* fall through */
case (W_L1IND_CD):
l1_msg(cs, HW_DEACTIVATE | CONFIRM, NULL);
break;
@@ -624,7 +624,7 @@ W6692_l1hw(struct PStack *st, int pr, void *arg)
break;
case (HW_RESET | REQUEST):
spin_lock_irqsave(&cs->lock, flags);
- if ((cs->dc.w6692.ph_state == W_L1IND_DRD)) {
+ if (cs->dc.w6692.ph_state == W_L1IND_DRD) {
ph_command(cs, W_L1CMD_ECK);
spin_unlock_irqrestore(&cs->lock, flags);
} else {
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index b730037a0e2d..1b2239c1d569 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1412,31 +1412,12 @@ static int
isdn_tty_ioctl(struct tty_struct *tty, uint cmd, ulong arg)
{
modem_info *info = (modem_info *) tty->driver_data;
- int retval;
if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_ioctl"))
return -ENODEV;
if (tty_io_error(tty))
return -EIO;
switch (cmd) {
- case TCSBRK: /* SVID version: non-zero arg --> no break */
-#ifdef ISDN_DEBUG_MODEM_IOCTL
- printk(KERN_DEBUG "ttyI%d ioctl TCSBRK\n", info->line);
-#endif
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- return 0;
- case TCSBRKP: /* support for POSIX tcsendbreak() */
-#ifdef ISDN_DEBUG_MODEM_IOCTL
- printk(KERN_DEBUG "ttyI%d ioctl TCSBRKP\n", info->line);
-#endif
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- return 0;
case TIOCSERGETLSR: /* Get line status register */
#ifdef ISDN_DEBUG_MODEM_IOCTL
printk(KERN_DEBUG "ttyI%d ioctl TIOCSERGETLSR\n", info->line);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 18c0a1281914..15d3ca37669a 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -236,8 +236,7 @@ mISDN_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
}
done:
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
release_sock(sk);
return err;
}
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index 12d9e5f4beb1..58635b5f296f 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -1180,8 +1180,7 @@ static int
ctrl_teimanager(struct manager *mgr, void *arg)
{
/* currently we only have one option */
- int *val = (int *)arg;
- int ret = 0;
+ unsigned int *val = (unsigned int *)arg;
switch (val[0]) {
case IMCLEAR_L2:
@@ -1197,9 +1196,9 @@ ctrl_teimanager(struct manager *mgr, void *arg)
test_and_clear_bit(OPTION_L1_HOLD, &mgr->options);
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
- return ret;
+ return 0;
}
/* This function does create a L2 for fixed TEI in NT Mode */
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 44097a3e0fcc..a72f97fca57b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -58,6 +58,16 @@ config LEDS_AAT1290
help
This option enables support for the LEDs on the AAT1290.
+config LEDS_AN30259A
+ tristate "LED support for Panasonic AN30259A"
+ depends on LEDS_CLASS && I2C && OF
+ help
+ This option enables support for the AN30259A 3-channel
+ LED driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-an30259a.
+
config LEDS_APU
tristate "Front panel LED support for PC Engines APU/APU2/APU3 boards"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 420b5d2cfa62..4c1b0054f379 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o
obj-$(CONFIG_LEDS_APU) += leds-apu.o
obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o
+obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o
obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o
obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
new file mode 100644
index 000000000000..1c1f0c8c56f4
--- /dev/null
+++ b/drivers/leds/leds-an30259a.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Driver for Panasonic AN30259A 3-channel LED driver
+//
+// Copyright (c) 2018 Simon Shields <simon@lineageos.org>
+//
+// Datasheet:
+// https://www.alliedelec.com/m/d/a9d2b3ee87c2d1a535a41dd747b1c247.pdf
+
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <uapi/linux/uleds.h>
+
+#define AN30259A_MAX_LEDS 3
+
+#define AN30259A_REG_SRESET 0x00
+#define AN30259A_LED_SRESET BIT(0)
+
+/* LED power registers */
+#define AN30259A_REG_LED_ON 0x01
+#define AN30259A_LED_EN(x) BIT((x) - 1)
+#define AN30259A_LED_SLOPE(x) BIT(((x) - 1) + 4)
+
+#define AN30259A_REG_LEDCC(x) (0x03 + ((x) - 1))
+
+/* slope control registers */
+#define AN30259A_REG_SLOPE(x) (0x06 + ((x) - 1))
+#define AN30259A_LED_SLOPETIME1(x) (x)
+#define AN30259A_LED_SLOPETIME2(x) ((x) << 4)
+
+#define AN30259A_REG_LEDCNT1(x) (0x09 + (4 * ((x) - 1)))
+#define AN30259A_LED_DUTYMAX(x) ((x) << 4)
+#define AN30259A_LED_DUTYMID(x) (x)
+
+#define AN30259A_REG_LEDCNT2(x) (0x0A + (4 * ((x) - 1)))
+#define AN30259A_LED_DELAY(x) ((x) << 4)
+#define AN30259A_LED_DUTYMIN(x) (x)
+
+/* detention time control (length of each slope step) */
+#define AN30259A_REG_LEDCNT3(x) (0x0B + (4 * ((x) - 1)))
+#define AN30259A_LED_DT1(x) (x)
+#define AN30259A_LED_DT2(x) ((x) << 4)
+
+#define AN30259A_REG_LEDCNT4(x) (0x0C + (4 * ((x) - 1)))
+#define AN30259A_LED_DT3(x) (x)
+#define AN30259A_LED_DT4(x) ((x) << 4)
+
+#define AN30259A_REG_MAX 0x14
+
+#define AN30259A_BLINK_MAX_TIME 7500 /* ms */
+#define AN30259A_SLOPE_RESOLUTION 500 /* ms */
+
+#define STATE_OFF 0
+#define STATE_KEEP 1
+#define STATE_ON 2
+
+struct an30259a;
+
+struct an30259a_led {
+ struct an30259a *chip;
+ struct led_classdev cdev;
+ u32 num;
+ u32 default_state;
+ bool sloping;
+ char label[LED_MAX_NAME_SIZE];
+};
+
+struct an30259a {
+ struct mutex mutex; /* held when writing to registers */
+ struct i2c_client *client;
+ struct an30259a_led leds[AN30259A_MAX_LEDS];
+ struct regmap *regmap;
+ int num_leds;
+};
+
+static int an30259a_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct an30259a_led *led;
+ int ret;
+ unsigned int led_on;
+
+ led = container_of(cdev, struct an30259a_led, cdev);
+ mutex_lock(&led->chip->mutex);
+
+ ret = regmap_read(led->chip->regmap, AN30259A_REG_LED_ON, &led_on);
+ if (ret)
+ goto error;
+
+ switch (brightness) {
+ case LED_OFF:
+ led_on &= ~AN30259A_LED_EN(led->num);
+ led_on &= ~AN30259A_LED_SLOPE(led->num);
+ led->sloping = false;
+ break;
+ default:
+ led_on |= AN30259A_LED_EN(led->num);
+ if (led->sloping)
+ led_on |= AN30259A_LED_SLOPE(led->num);
+ ret = regmap_write(led->chip->regmap,
+ AN30259A_REG_LEDCNT1(led->num),
+ AN30259A_LED_DUTYMAX(0xf) |
+ AN30259A_LED_DUTYMID(0xf));
+ if (ret)
+ goto error;
+ break;
+ }
+
+ ret = regmap_write(led->chip->regmap, AN30259A_REG_LED_ON, led_on);
+ if (ret)
+ goto error;
+
+ ret = regmap_write(led->chip->regmap, AN30259A_REG_LEDCC(led->num),
+ brightness);
+
+error:
+ mutex_unlock(&led->chip->mutex);
+
+ return ret;
+}
+
+static int an30259a_blink_set(struct led_classdev *cdev,
+ unsigned long *delay_off, unsigned long *delay_on)
+{
+ struct an30259a_led *led;
+ int ret, num;
+ unsigned int led_on;
+ unsigned long off = *delay_off, on = *delay_on;
+
+ led = container_of(cdev, struct an30259a_led, cdev);
+
+ mutex_lock(&led->chip->mutex);
+ num = led->num;
+
+ /* slope time can only be a multiple of 500ms. */
+ if (off % AN30259A_SLOPE_RESOLUTION || on % AN30259A_SLOPE_RESOLUTION) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* up to a maximum of 7500ms. */
+ if (off > AN30259A_BLINK_MAX_TIME || on > AN30259A_BLINK_MAX_TIME) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* if no blink specified, default to 1 Hz. */
+ if (!off && !on) {
+ *delay_off = off = 500;
+ *delay_on = on = 500;
+ }
+
+ /* convert into values the HW will understand. */
+ off /= AN30259A_SLOPE_RESOLUTION;
+ on /= AN30259A_SLOPE_RESOLUTION;
+
+ /* duty min should be zero (=off), delay should be zero. */
+ ret = regmap_write(led->chip->regmap, AN30259A_REG_LEDCNT2(num),
+ AN30259A_LED_DELAY(0) | AN30259A_LED_DUTYMIN(0));
+ if (ret)
+ goto error;
+
+ /* reset detention time (no "breathing" effect). */
+ ret = regmap_write(led->chip->regmap, AN30259A_REG_LEDCNT3(num),
+ AN30259A_LED_DT1(0) | AN30259A_LED_DT2(0));
+ if (ret)
+ goto error;
+ ret = regmap_write(led->chip->regmap, AN30259A_REG_LEDCNT4(num),
+ AN30259A_LED_DT3(0) | AN30259A_LED_DT4(0));
+ if (ret)
+ goto error;
+
+ /* slope time controls on/off cycle length. */
+ ret = regmap_write(led->chip->regmap, AN30259A_REG_SLOPE(num),
+ AN30259A_LED_SLOPETIME1(off) |
+ AN30259A_LED_SLOPETIME2(on));
+ if (ret)
+ goto error;
+
+ /* Finally, enable slope mode. */
+ ret = regmap_read(led->chip->regmap, AN30259A_REG_LED_ON, &led_on);
+ if (ret)
+ goto error;
+
+ led_on |= AN30259A_LED_SLOPE(num) | AN30259A_LED_EN(led->num);
+
+ ret = regmap_write(led->chip->regmap, AN30259A_REG_LED_ON, led_on);
+
+ if (!ret)
+ led->sloping = true;
+error:
+ mutex_unlock(&led->chip->mutex);
+
+ return ret;
+}
+
+static int an30259a_dt_init(struct i2c_client *client,
+ struct an30259a *chip)
+{
+ struct device_node *np = client->dev.of_node, *child;
+ int count, ret;
+ int i = 0;
+ const char *str;
+ struct an30259a_led *led;
+
+ count = of_get_child_count(np);
+ if (!count || count > AN30259A_MAX_LEDS)
+ return -EINVAL;
+
+ for_each_available_child_of_node(np, child) {
+ u32 source;
+
+ ret = of_property_read_u32(child, "reg", &source);
+ if (ret != 0 || !source || source > AN30259A_MAX_LEDS) {
+ dev_err(&client->dev, "Couldn't read LED address: %d\n",
+ ret);
+ count--;
+ continue;
+ }
+
+ led = &chip->leds[i];
+
+ led->num = source;
+ led->chip = chip;
+
+ if (of_property_read_string(child, "label", &str))
+ snprintf(led->label, sizeof(led->label), "an30259a::");
+ else
+ snprintf(led->label, sizeof(led->label), "an30259a:%s",
+ str);
+
+ led->cdev.name = led->label;
+
+ if (!of_property_read_string(child, "default-state", &str)) {
+ if (!strcmp(str, "on"))
+ led->default_state = STATE_ON;
+ else if (!strcmp(str, "keep"))
+ led->default_state = STATE_KEEP;
+ else
+ led->default_state = STATE_OFF;
+ }
+
+ of_property_read_string(child, "linux,default-trigger",
+ &led->cdev.default_trigger);
+
+ i++;
+ }
+
+ if (!count)
+ return -EINVAL;
+
+ chip->num_leds = i;
+
+ return 0;
+}
+
+static const struct regmap_config an30259a_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AN30259A_REG_MAX,
+};
+
+static void an30259a_init_default_state(struct an30259a_led *led)
+{
+ struct an30259a *chip = led->chip;
+ int led_on, err;
+
+ switch (led->default_state) {
+ case STATE_ON:
+ led->cdev.brightness = LED_FULL;
+ break;
+ case STATE_KEEP:
+ err = regmap_read(chip->regmap, AN30259A_REG_LED_ON, &led_on);
+ if (err)
+ break;
+
+ if (!(led_on & AN30259A_LED_EN(led->num))) {
+ led->cdev.brightness = LED_OFF;
+ break;
+ }
+ regmap_read(chip->regmap, AN30259A_REG_LEDCC(led->num),
+ &led->cdev.brightness);
+ break;
+ default:
+ led->cdev.brightness = LED_OFF;
+ }
+
+ an30259a_brightness_set(&led->cdev, led->cdev.brightness);
+}
+
+static int an30259a_probe(struct i2c_client *client)
+{
+ struct an30259a *chip;
+ int i, err;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ err = an30259a_dt_init(client, chip);
+ if (err < 0)
+ return err;
+
+ mutex_init(&chip->mutex);
+ chip->client = client;
+ i2c_set_clientdata(client, chip);
+
+ chip->regmap = devm_regmap_init_i2c(client, &an30259a_regmap_config);
+
+ for (i = 0; i < chip->num_leds; i++) {
+ an30259a_init_default_state(&chip->leds[i]);
+ chip->leds[i].cdev.brightness_set_blocking =
+ an30259a_brightness_set;
+ chip->leds[i].cdev.blink_set = an30259a_blink_set;
+
+ err = devm_led_classdev_register(&client->dev,
+ &chip->leds[i].cdev);
+ if (err < 0)
+ goto exit;
+ }
+ return 0;
+
+exit:
+ mutex_destroy(&chip->mutex);
+ return err;
+}
+
+static int an30259a_remove(struct i2c_client *client)
+{
+ struct an30259a *chip = i2c_get_clientdata(client);
+
+ mutex_destroy(&chip->mutex);
+
+ return 0;
+}
+
+static const struct of_device_id an30259a_match_table[] = {
+ { .compatible = "panasonic,an30259a", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, an30259a_match_table);
+
+static const struct i2c_device_id an30259a_id[] = {
+ { "an30259a", 0 },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(i2c, an30259a_id);
+
+static struct i2c_driver an30259a_driver = {
+ .driver = {
+ .name = "leds-an32059a",
+ .of_match_table = of_match_ptr(an30259a_match_table),
+ },
+ .probe_new = an30259a_probe,
+ .remove = an30259a_remove,
+ .id_table = an30259a_id,
+};
+
+module_i2c_driver(an30259a_driver);
+
+MODULE_AUTHOR("Simon Shields <simon@lineageos.org>");
+MODULE_DESCRIPTION("AN32059A LED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index f883616d9e60..98a69b1a43f9 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -529,7 +529,7 @@ static int as3645a_parse_node(struct as3645a *flash,
strlcpy(names->flash, name, sizeof(names->flash));
else
snprintf(names->flash, sizeof(names->flash),
- "%s:flash", node->name);
+ "%pOFn:flash", node);
rval = of_property_read_u32(flash->flash_node, "flash-timeout-us",
&cfg->flash_timeout_us);
@@ -573,7 +573,7 @@ static int as3645a_parse_node(struct as3645a *flash,
strlcpy(names->indicator, name, sizeof(names->indicator));
else
snprintf(names->indicator, sizeof(names->indicator),
- "%s:indicator", node->name);
+ "%pOFn:indicator", node);
rval = of_property_read_u32(flash->indicator_node, "led-max-microamp",
&cfg->indicator_max_ua);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 764c31301f90..32fa752565bc 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -81,35 +81,6 @@ static int create_gpio_led(const struct gpio_led *template,
{
int ret, state;
- led_dat->gpiod = template->gpiod;
- if (!led_dat->gpiod) {
- /*
- * This is the legacy code path for platform code that
- * still uses GPIO numbers. Ultimately we would like to get
- * rid of this block completely.
- */
- unsigned long flags = GPIOF_OUT_INIT_LOW;
-
- /* skip leds that aren't available */
- if (!gpio_is_valid(template->gpio)) {
- dev_info(parent, "Skipping unavailable LED gpio %d (%s)\n",
- template->gpio, template->name);
- return 0;
- }
-
- if (template->active_low)
- flags |= GPIOF_ACTIVE_LOW;
-
- ret = devm_gpio_request_one(parent, template->gpio, flags,
- template->name);
- if (ret < 0)
- return ret;
-
- led_dat->gpiod = gpio_to_desc(template->gpio);
- if (!led_dat->gpiod)
- return -EINVAL;
- }
-
led_dat->cdev.name = template->name;
led_dat->cdev.default_trigger = template->default_trigger;
led_dat->can_sleep = gpiod_cansleep(led_dat->gpiod);
@@ -231,6 +202,52 @@ static const struct of_device_id of_gpio_leds_match[] = {
MODULE_DEVICE_TABLE(of, of_gpio_leds_match);
+static struct gpio_desc *gpio_led_get_gpiod(struct device *dev, int idx,
+ const struct gpio_led *template)
+{
+ struct gpio_desc *gpiod;
+ unsigned long flags = GPIOF_OUT_INIT_LOW;
+ int ret;
+
+ /*
+ * This means the LED does not come from the device tree
+ * or ACPI, so let's try just getting it by index from the
+ * device, this will hit the board file, if any and get
+ * the GPIO from there.
+ */
+ gpiod = devm_gpiod_get_index(dev, NULL, idx, flags);
+ if (!IS_ERR(gpiod)) {
+ gpiod_set_consumer_name(gpiod, template->name);
+ return gpiod;
+ }
+ if (PTR_ERR(gpiod) != -ENOENT)
+ return gpiod;
+
+ /*
+ * This is the legacy code path for platform code that
+ * still uses GPIO numbers. Ultimately we would like to get
+ * rid of this block completely.
+ */
+
+ /* skip leds that aren't available */
+ if (!gpio_is_valid(template->gpio))
+ return ERR_PTR(-ENOENT);
+
+ if (template->active_low)
+ flags |= GPIOF_ACTIVE_LOW;
+
+ ret = devm_gpio_request_one(dev, template->gpio, flags,
+ template->name);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ gpiod = gpio_to_desc(template->gpio);
+ if (!gpiod)
+ return ERR_PTR(-EINVAL);
+
+ return gpiod;
+}
+
static int gpio_led_probe(struct platform_device *pdev)
{
struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -246,7 +263,22 @@ static int gpio_led_probe(struct platform_device *pdev)
priv->num_leds = pdata->num_leds;
for (i = 0; i < priv->num_leds; i++) {
- ret = create_gpio_led(&pdata->leds[i], &priv->leds[i],
+ const struct gpio_led *template = &pdata->leds[i];
+ struct gpio_led_data *led_dat = &priv->leds[i];
+
+ if (template->gpiod)
+ led_dat->gpiod = template->gpiod;
+ else
+ led_dat->gpiod =
+ gpio_led_get_gpiod(&pdev->dev,
+ i, template);
+ if (IS_ERR(led_dat->gpiod)) {
+ dev_info(&pdev->dev, "Skipping unavailable LED gpio %d (%s)\n",
+ template->gpio, template->name);
+ continue;
+ }
+
+ ret = create_gpio_led(template, led_dat,
&pdev->dev, NULL,
pdata->gpio_blink_set);
if (ret < 0)
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index df80c89ebe7f..5d3faae51d59 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -100,8 +100,9 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
led_data->pwm = devm_pwm_get(dev, led->name);
if (IS_ERR(led_data->pwm)) {
ret = PTR_ERR(led_data->pwm);
- dev_err(dev, "unable to request PWM for %s: %d\n",
- led->name, ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "unable to request PWM for %s: %d\n",
+ led->name, ret);
return ret;
}
diff --git a/drivers/leds/leds-sc27xx-bltc.c b/drivers/leds/leds-sc27xx-bltc.c
index 9d9b7aab843f..fecf27fb1cdc 100644
--- a/drivers/leds/leds-sc27xx-bltc.c
+++ b/drivers/leds/leds-sc27xx-bltc.c
@@ -32,8 +32,18 @@
#define SC27XX_DUTY_MASK GENMASK(15, 0)
#define SC27XX_MOD_MASK GENMASK(7, 0)
+#define SC27XX_CURVE_SHIFT 8
+#define SC27XX_CURVE_L_MASK GENMASK(7, 0)
+#define SC27XX_CURVE_H_MASK GENMASK(15, 8)
+
#define SC27XX_LEDS_OFFSET 0x10
#define SC27XX_LEDS_MAX 3
+#define SC27XX_LEDS_PATTERN_CNT 4
+/* Stage duration step, in milliseconds */
+#define SC27XX_LEDS_STEP 125
+/* Minimum and maximum duration, in milliseconds */
+#define SC27XX_DELTA_T_MIN SC27XX_LEDS_STEP
+#define SC27XX_DELTA_T_MAX (SC27XX_LEDS_STEP * 255)
struct sc27xx_led {
char name[LED_MAX_NAME_SIZE];
@@ -122,6 +132,113 @@ static int sc27xx_led_set(struct led_classdev *ldev, enum led_brightness value)
return err;
}
+static void sc27xx_led_clamp_align_delta_t(u32 *delta_t)
+{
+ u32 v, offset, t = *delta_t;
+
+ v = t + SC27XX_LEDS_STEP / 2;
+ v = clamp_t(u32, v, SC27XX_DELTA_T_MIN, SC27XX_DELTA_T_MAX);
+ offset = v - SC27XX_DELTA_T_MIN;
+ offset = SC27XX_LEDS_STEP * (offset / SC27XX_LEDS_STEP);
+
+ *delta_t = SC27XX_DELTA_T_MIN + offset;
+}
+
+static int sc27xx_led_pattern_clear(struct led_classdev *ldev)
+{
+ struct sc27xx_led *leds = to_sc27xx_led(ldev);
+ struct regmap *regmap = leds->priv->regmap;
+ u32 base = sc27xx_led_get_offset(leds);
+ u32 ctrl_base = leds->priv->base + SC27XX_LEDS_CTRL;
+ u8 ctrl_shift = SC27XX_CTRL_SHIFT * leds->line;
+ int err;
+
+ mutex_lock(&leds->priv->lock);
+
+ /* Reset the rise, high, fall and low time to zero. */
+ regmap_write(regmap, base + SC27XX_LEDS_CURVE0, 0);
+ regmap_write(regmap, base + SC27XX_LEDS_CURVE1, 0);
+
+ err = regmap_update_bits(regmap, ctrl_base,
+ (SC27XX_LED_RUN | SC27XX_LED_TYPE) << ctrl_shift, 0);
+
+ ldev->brightness = LED_OFF;
+
+ mutex_unlock(&leds->priv->lock);
+
+ return err;
+}
+
+static int sc27xx_led_pattern_set(struct led_classdev *ldev,
+ struct led_pattern *pattern,
+ u32 len, int repeat)
+{
+ struct sc27xx_led *leds = to_sc27xx_led(ldev);
+ u32 base = sc27xx_led_get_offset(leds);
+ u32 ctrl_base = leds->priv->base + SC27XX_LEDS_CTRL;
+ u8 ctrl_shift = SC27XX_CTRL_SHIFT * leds->line;
+ struct regmap *regmap = leds->priv->regmap;
+ int err;
+
+ /*
+ * Must contain 4 tuples to configure the rise time, high time, fall
+ * time and low time to enable the breathing mode.
+ */
+ if (len != SC27XX_LEDS_PATTERN_CNT)
+ return -EINVAL;
+
+ mutex_lock(&leds->priv->lock);
+
+ sc27xx_led_clamp_align_delta_t(&pattern[0].delta_t);
+ err = regmap_update_bits(regmap, base + SC27XX_LEDS_CURVE0,
+ SC27XX_CURVE_L_MASK,
+ pattern[0].delta_t / SC27XX_LEDS_STEP);
+ if (err)
+ goto out;
+
+ sc27xx_led_clamp_align_delta_t(&pattern[1].delta_t);
+ err = regmap_update_bits(regmap, base + SC27XX_LEDS_CURVE1,
+ SC27XX_CURVE_L_MASK,
+ pattern[1].delta_t / SC27XX_LEDS_STEP);
+ if (err)
+ goto out;
+
+ sc27xx_led_clamp_align_delta_t(&pattern[2].delta_t);
+ err = regmap_update_bits(regmap, base + SC27XX_LEDS_CURVE0,
+ SC27XX_CURVE_H_MASK,
+ (pattern[2].delta_t / SC27XX_LEDS_STEP) <<
+ SC27XX_CURVE_SHIFT);
+ if (err)
+ goto out;
+
+ sc27xx_led_clamp_align_delta_t(&pattern[3].delta_t);
+ err = regmap_update_bits(regmap, base + SC27XX_LEDS_CURVE1,
+ SC27XX_CURVE_H_MASK,
+ (pattern[3].delta_t / SC27XX_LEDS_STEP) <<
+ SC27XX_CURVE_SHIFT);
+ if (err)
+ goto out;
+
+ err = regmap_update_bits(regmap, base + SC27XX_LEDS_DUTY,
+ SC27XX_DUTY_MASK,
+ (pattern[1].brightness << SC27XX_DUTY_SHIFT) |
+ SC27XX_MOD_MASK);
+ if (err)
+ goto out;
+
+ /* Enable the LED breathing mode */
+ err = regmap_update_bits(regmap, ctrl_base,
+ SC27XX_LED_RUN << ctrl_shift,
+ SC27XX_LED_RUN << ctrl_shift);
+ if (!err)
+ ldev->brightness = pattern[1].brightness;
+
+out:
+ mutex_unlock(&leds->priv->lock);
+
+ return err;
+}
+
static int sc27xx_led_register(struct device *dev, struct sc27xx_led_priv *priv)
{
int i, err;
@@ -140,6 +257,9 @@ static int sc27xx_led_register(struct device *dev, struct sc27xx_led_priv *priv)
led->priv = priv;
led->ldev.name = led->name;
led->ldev.brightness_set_blocking = sc27xx_led_set;
+ led->ldev.pattern_set = sc27xx_led_pattern_set;
+ led->ldev.pattern_clear = sc27xx_led_pattern_clear;
+ led->ldev.default_trigger = "pattern";
err = devm_led_classdev_register(dev, &led->ldev);
if (err)
@@ -241,4 +361,5 @@ module_platform_driver(sc27xx_led_driver);
MODULE_DESCRIPTION("Spreadtrum SC27xx breathing light controller driver");
MODULE_AUTHOR("Xiaotong Lu <xiaotong.lu@spreadtrum.com>");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 4018af769969..b76fc3cdc8f8 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -129,4 +129,11 @@ config LEDS_TRIGGER_NETDEV
This allows LEDs to be controlled by network device activity.
If unsure, say Y.
+config LEDS_TRIGGER_PATTERN
+ tristate "LED Pattern Trigger"
+ help
+ This allows LEDs to be controlled by a software or hardware pattern
+ which is a series of tuples, of brightness and duration (ms).
+ If unsure, say N
+
endif # LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index f3cfe1950538..9bcb64ee8123 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o
obj-$(CONFIG_LEDS_TRIGGER_PANIC) += ledtrig-panic.o
obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
+obj-$(CONFIG_LEDS_TRIGGER_PATTERN) += ledtrig-pattern.o
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
new file mode 100644
index 000000000000..ce7acd115dd8
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * LED pattern trigger
+ *
+ * Idea discussed with Pavel Machek. Raphael Teysseyre implemented
+ * the first version, Baolin Wang simplified and improved the approach.
+ */
+
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#define MAX_PATTERNS 1024
+/*
+ * When doing gradual dimming, the led brightness will be updated
+ * every 50 milliseconds.
+ */
+#define UPDATE_INTERVAL 50
+
+struct pattern_trig_data {
+ struct led_classdev *led_cdev;
+ struct led_pattern patterns[MAX_PATTERNS];
+ struct led_pattern *curr;
+ struct led_pattern *next;
+ struct mutex lock;
+ u32 npatterns;
+ int repeat;
+ int last_repeat;
+ int delta_t;
+ bool is_indefinite;
+ bool is_hw_pattern;
+ struct timer_list timer;
+};
+
+static void pattern_trig_update_patterns(struct pattern_trig_data *data)
+{
+ data->curr = data->next;
+ if (!data->is_indefinite && data->curr == data->patterns)
+ data->repeat--;
+
+ if (data->next == data->patterns + data->npatterns - 1)
+ data->next = data->patterns;
+ else
+ data->next++;
+
+ data->delta_t = 0;
+}
+
+static int pattern_trig_compute_brightness(struct pattern_trig_data *data)
+{
+ int step_brightness;
+
+ /*
+ * If current tuple's duration is less than the dimming interval,
+ * we should treat it as a step change of brightness instead of
+ * doing gradual dimming.
+ */
+ if (data->delta_t == 0 || data->curr->delta_t < UPDATE_INTERVAL)
+ return data->curr->brightness;
+
+ step_brightness = abs(data->next->brightness - data->curr->brightness);
+ step_brightness = data->delta_t * step_brightness / data->curr->delta_t;
+
+ if (data->next->brightness > data->curr->brightness)
+ return data->curr->brightness + step_brightness;
+ else
+ return data->curr->brightness - step_brightness;
+}
+
+static void pattern_trig_timer_function(struct timer_list *t)
+{
+ struct pattern_trig_data *data = from_timer(data, t, timer);
+
+ mutex_lock(&data->lock);
+
+ for (;;) {
+ if (!data->is_indefinite && !data->repeat)
+ break;
+
+ if (data->curr->brightness == data->next->brightness) {
+ /* Step change of brightness */
+ led_set_brightness(data->led_cdev,
+ data->curr->brightness);
+ mod_timer(&data->timer,
+ jiffies + msecs_to_jiffies(data->curr->delta_t));
+
+ /* Skip the tuple with zero duration */
+ pattern_trig_update_patterns(data);
+ /* Select next tuple */
+ pattern_trig_update_patterns(data);
+ } else {
+ /* Gradual dimming */
+
+ /*
+ * If the accumulation time is larger than current
+ * tuple's duration, we should go next one and re-check
+ * if we repeated done.
+ */
+ if (data->delta_t > data->curr->delta_t) {
+ pattern_trig_update_patterns(data);
+ continue;
+ }
+
+ led_set_brightness(data->led_cdev,
+ pattern_trig_compute_brightness(data));
+ mod_timer(&data->timer,
+ jiffies + msecs_to_jiffies(UPDATE_INTERVAL));
+
+ /* Accumulate the gradual dimming time */
+ data->delta_t += UPDATE_INTERVAL;
+ }
+
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+}
+
+static int pattern_trig_start_pattern(struct led_classdev *led_cdev)
+{
+ struct pattern_trig_data *data = led_cdev->trigger_data;
+
+ if (!data->npatterns)
+ return 0;
+
+ if (data->is_hw_pattern) {
+ return led_cdev->pattern_set(led_cdev, data->patterns,
+ data->npatterns, data->repeat);
+ }
+
+ /* At least 2 tuples for software pattern. */
+ if (data->npatterns < 2)
+ return -EINVAL;
+
+ data->delta_t = 0;
+ data->curr = data->patterns;
+ data->next = data->patterns + 1;
+ data->timer.expires = jiffies;
+ add_timer(&data->timer);
+
+ return 0;
+}
+
+static ssize_t repeat_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct pattern_trig_data *data = led_cdev->trigger_data;
+ int repeat;
+
+ mutex_lock(&data->lock);
+
+ repeat = data->last_repeat;
+
+ mutex_unlock(&data->lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", repeat);
+}
+
+static ssize_t repeat_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct pattern_trig_data *data = led_cdev->trigger_data;
+ int err, res;
+
+ err = kstrtos32(buf, 10, &res);
+ if (err)
+ return err;
+
+ /* Number 0 and negative numbers except -1 are invalid. */
+ if (res < -1 || res == 0)
+ return -EINVAL;
+
+ /*
+ * Clear previous patterns' performence firstly, and remove the timer
+ * without mutex lock to avoid dead lock.
+ */
+ del_timer_sync(&data->timer);
+
+ mutex_lock(&data->lock);
+
+ if (data->is_hw_pattern)
+ led_cdev->pattern_clear(led_cdev);
+
+ data->last_repeat = data->repeat = res;
+ /* -1 means repeat indefinitely */
+ if (data->repeat == -1)
+ data->is_indefinite = true;
+ else
+ data->is_indefinite = false;
+
+ err = pattern_trig_start_pattern(led_cdev);
+
+ mutex_unlock(&data->lock);
+ return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR_RW(repeat);
+
+static ssize_t pattern_trig_show_patterns(struct pattern_trig_data *data,
+ char *buf, bool hw_pattern)
+{
+ ssize_t count = 0;
+ int i;
+
+ mutex_lock(&data->lock);
+
+ if (!data->npatterns || (data->is_hw_pattern ^ hw_pattern))
+ goto out;
+
+ for (i = 0; i < data->npatterns; i++) {
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "%d %u ",
+ data->patterns[i].brightness,
+ data->patterns[i].delta_t);
+ }
+
+ buf[count - 1] = '\n';
+
+out:
+ mutex_unlock(&data->lock);
+ return count;
+}
+
+static ssize_t pattern_trig_store_patterns(struct led_classdev *led_cdev,
+ const char *buf, size_t count,
+ bool hw_pattern)
+{
+ struct pattern_trig_data *data = led_cdev->trigger_data;
+ int ccount, cr, offset = 0, err = 0;
+
+ /*
+ * Clear previous patterns' performence firstly, and remove the timer
+ * without mutex lock to avoid dead lock.
+ */
+ del_timer_sync(&data->timer);
+
+ mutex_lock(&data->lock);
+
+ if (data->is_hw_pattern)
+ led_cdev->pattern_clear(led_cdev);
+
+ data->is_hw_pattern = hw_pattern;
+ data->npatterns = 0;
+
+ while (offset < count - 1 && data->npatterns < MAX_PATTERNS) {
+ cr = 0;
+ ccount = sscanf(buf + offset, "%d %u %n",
+ &data->patterns[data->npatterns].brightness,
+ &data->patterns[data->npatterns].delta_t, &cr);
+ if (ccount != 2) {
+ data->npatterns = 0;
+ err = -EINVAL;
+ goto out;
+ }
+
+ offset += cr;
+ data->npatterns++;
+ }
+
+ err = pattern_trig_start_pattern(led_cdev);
+ if (err)
+ data->npatterns = 0;
+
+out:
+ mutex_unlock(&data->lock);
+ return err < 0 ? err : count;
+}
+
+static ssize_t pattern_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct pattern_trig_data *data = led_cdev->trigger_data;
+
+ return pattern_trig_show_patterns(data, buf, false);
+}
+
+static ssize_t pattern_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ return pattern_trig_store_patterns(led_cdev, buf, count, false);
+}
+
+static DEVICE_ATTR_RW(pattern);
+
+static ssize_t hw_pattern_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct pattern_trig_data *data = led_cdev->trigger_data;
+
+ return pattern_trig_show_patterns(data, buf, true);
+}
+
+static ssize_t hw_pattern_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ return pattern_trig_store_patterns(led_cdev, buf, count, true);
+}
+
+static DEVICE_ATTR_RW(hw_pattern);
+
+static umode_t pattern_trig_attrs_mode(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ if (attr == &dev_attr_repeat.attr || attr == &dev_attr_pattern.attr)
+ return attr->mode;
+ else if (attr == &dev_attr_hw_pattern.attr && led_cdev->pattern_set)
+ return attr->mode;
+
+ return 0;
+}
+
+static struct attribute *pattern_trig_attrs[] = {
+ &dev_attr_pattern.attr,
+ &dev_attr_hw_pattern.attr,
+ &dev_attr_repeat.attr,
+ NULL
+};
+
+static const struct attribute_group pattern_trig_group = {
+ .attrs = pattern_trig_attrs,
+ .is_visible = pattern_trig_attrs_mode,
+};
+
+static const struct attribute_group *pattern_trig_groups[] = {
+ &pattern_trig_group,
+ NULL,
+};
+
+static int pattern_trig_activate(struct led_classdev *led_cdev)
+{
+ struct pattern_trig_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (!!led_cdev->pattern_set ^ !!led_cdev->pattern_clear) {
+ dev_warn(led_cdev->dev,
+ "Hardware pattern ops validation failed\n");
+ led_cdev->pattern_set = NULL;
+ led_cdev->pattern_clear = NULL;
+ }
+
+ data->is_indefinite = true;
+ data->last_repeat = -1;
+ mutex_init(&data->lock);
+ data->led_cdev = led_cdev;
+ led_set_trigger_data(led_cdev, data);
+ timer_setup(&data->timer, pattern_trig_timer_function, 0);
+ led_cdev->activated = true;
+
+ return 0;
+}
+
+static void pattern_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct pattern_trig_data *data = led_cdev->trigger_data;
+
+ if (!led_cdev->activated)
+ return;
+
+ if (led_cdev->pattern_clear)
+ led_cdev->pattern_clear(led_cdev);
+
+ del_timer_sync(&data->timer);
+
+ led_set_brightness(led_cdev, LED_OFF);
+ kfree(data);
+ led_cdev->activated = false;
+}
+
+static struct led_trigger pattern_led_trigger = {
+ .name = "pattern",
+ .activate = pattern_trig_activate,
+ .deactivate = pattern_trig_deactivate,
+ .groups = pattern_trig_groups,
+};
+
+static int __init pattern_trig_init(void)
+{
+ return led_trigger_register(&pattern_led_trigger);
+}
+
+static void __exit pattern_trig_exit(void)
+{
+ led_trigger_unregister(&pattern_led_trigger);
+}
+
+module_init(pattern_trig_init);
+module_exit(pattern_trig_exit);
+
+MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org");
+MODULE_DESCRIPTION("LED Pattern trigger");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 439bf90d084d..a872cd720967 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -4,8 +4,7 @@
menuconfig NVM
bool "Open-Channel SSD target support"
- depends on BLOCK && PCI
- select BLK_DEV_NVME
+ depends on BLOCK
help
Say Y here to get to enable Open-channel SSDs.
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 60aa7bc5a630..efb976a863d2 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -355,6 +355,11 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
return -EINVAL;
}
+ if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
+ pr_err("nvm: device is incompatible with target L2P type.\n");
+ return -EINVAL;
+ }
+
if (nvm_target_exists(create->tgtname)) {
pr_err("nvm: target name already exists (%s)\n",
create->tgtname);
@@ -598,22 +603,16 @@ static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
- if (rqd->nr_ppas == 1) {
- nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
- return;
- }
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
- nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
+ nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
}
static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
- if (rqd->nr_ppas == 1) {
- nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
- return;
- }
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
- nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
+ nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
}
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
@@ -712,45 +711,23 @@ static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
}
-int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct nvm_chk_meta *meta,
- struct ppa_addr ppa, int nchks)
+static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
{
- struct nvm_dev *dev = tgt_dev->parent;
-
- nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
-
- return dev->ops->get_chk_meta(tgt_dev->parent, meta,
- (sector_t)ppa.ppa, nchks);
-}
-EXPORT_SYMBOL(nvm_get_chunk_meta);
-
-int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
- int nr_ppas, int type)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_rq rqd;
- int ret;
+ int flags = 0;
- if (nr_ppas > NVM_MAX_VLBA) {
- pr_err("nvm: unable to update all blocks atomically\n");
- return -EINVAL;
- }
+ if (geo->version == NVM_OCSSD_SPEC_20)
+ return 0;
- memset(&rqd, 0, sizeof(struct nvm_rq));
+ if (rqd->is_seq)
+ flags |= geo->pln_mode >> 1;
- nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
- nvm_rq_tgt_to_dev(tgt_dev, &rqd);
+ if (rqd->opcode == NVM_OP_PREAD)
+ flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
+ else if (rqd->opcode == NVM_OP_PWRITE)
+ flags |= NVM_IO_SCRAMBLE_ENABLE;
- ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
- nvm_free_rqd_ppalist(tgt_dev, &rqd);
- if (ret) {
- pr_err("nvm: failed bb mark\n");
- return -EINVAL;
- }
-
- return 0;
+ return flags;
}
-EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
@@ -763,6 +740,7 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
nvm_rq_tgt_to_dev(tgt_dev, rqd);
rqd->dev = tgt_dev;
+ rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
/* In case of error, fail with right address format */
ret = dev->ops->submit_io(dev, rqd);
@@ -783,6 +761,7 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
nvm_rq_tgt_to_dev(tgt_dev, rqd);
rqd->dev = tgt_dev;
+ rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
/* In case of error, fail with right address format */
ret = dev->ops->submit_io_sync(dev, rqd);
@@ -805,27 +784,159 @@ void nvm_end_io(struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_end_io);
+static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+ if (!dev->ops->submit_io_sync)
+ return -ENODEV;
+
+ rqd->flags = nvm_set_flags(&dev->geo, rqd);
+
+ return dev->ops->submit_io_sync(dev, rqd);
+}
+
+static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
+{
+ struct nvm_rq rqd = { NULL };
+ struct bio bio;
+ struct bio_vec bio_vec;
+ struct page *page;
+ int ret;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ bio_init(&bio, &bio_vec, 1);
+ bio_add_page(&bio, page, PAGE_SIZE, 0);
+ bio_set_op_attrs(&bio, REQ_OP_READ, 0);
+
+ rqd.bio = &bio;
+ rqd.opcode = NVM_OP_PREAD;
+ rqd.is_seq = 1;
+ rqd.nr_ppas = 1;
+ rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
+
+ ret = nvm_submit_io_sync_raw(dev, &rqd);
+ if (ret)
+ return ret;
+
+ __free_page(page);
+
+ return rqd.error;
+}
+
/*
- * folds a bad block list from its plane representation to its virtual
- * block representation. The fold is done in place and reduced size is
- * returned.
- *
- * If any of the planes status are bad or grown bad block, the virtual block
- * is marked bad. If not bad, the first plane state acts as the block state.
+ * Scans a 1.2 chunk first and last page to determine if its state.
+ * If the chunk is found to be open, also scan it to update the write
+ * pointer.
*/
-int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
+static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
+ struct nvm_chk_meta *meta)
{
struct nvm_geo *geo = &dev->geo;
- int blk, offset, pl, blktype;
+ int ret, pg, pl;
- if (nr_blks != geo->num_chk * geo->pln_mode)
- return -EINVAL;
+ /* sense first page */
+ ret = nvm_bb_chunk_sense(dev, ppa);
+ if (ret < 0) /* io error */
+ return ret;
+ else if (ret == 0) /* valid data */
+ meta->state = NVM_CHK_ST_OPEN;
+ else if (ret > 0) {
+ /*
+ * If empty page, the chunk is free, else it is an
+ * actual io error. In that case, mark it offline.
+ */
+ switch (ret) {
+ case NVM_RSP_ERR_EMPTYPAGE:
+ meta->state = NVM_CHK_ST_FREE;
+ return 0;
+ case NVM_RSP_ERR_FAILCRC:
+ case NVM_RSP_ERR_FAILECC:
+ case NVM_RSP_WARN_HIGHECC:
+ meta->state = NVM_CHK_ST_OPEN;
+ goto scan;
+ default:
+ return -ret; /* other io error */
+ }
+ }
+
+ /* sense last page */
+ ppa.g.pg = geo->num_pg - 1;
+ ppa.g.pl = geo->num_pln - 1;
+
+ ret = nvm_bb_chunk_sense(dev, ppa);
+ if (ret < 0) /* io error */
+ return ret;
+ else if (ret == 0) { /* Chunk fully written */
+ meta->state = NVM_CHK_ST_CLOSED;
+ meta->wp = geo->clba;
+ return 0;
+ } else if (ret > 0) {
+ switch (ret) {
+ case NVM_RSP_ERR_EMPTYPAGE:
+ case NVM_RSP_ERR_FAILCRC:
+ case NVM_RSP_ERR_FAILECC:
+ case NVM_RSP_WARN_HIGHECC:
+ meta->state = NVM_CHK_ST_OPEN;
+ break;
+ default:
+ return -ret; /* other io error */
+ }
+ }
+
+scan:
+ /*
+ * chunk is open, we scan sequentially to update the write pointer.
+ * We make the assumption that targets write data across all planes
+ * before moving to the next page.
+ */
+ for (pg = 0; pg < geo->num_pg; pg++) {
+ for (pl = 0; pl < geo->num_pln; pl++) {
+ ppa.g.pg = pg;
+ ppa.g.pl = pl;
+
+ ret = nvm_bb_chunk_sense(dev, ppa);
+ if (ret < 0) /* io error */
+ return ret;
+ else if (ret == 0) {
+ meta->wp += geo->ws_min;
+ } else if (ret > 0) {
+ switch (ret) {
+ case NVM_RSP_ERR_EMPTYPAGE:
+ return 0;
+ case NVM_RSP_ERR_FAILCRC:
+ case NVM_RSP_ERR_FAILECC:
+ case NVM_RSP_WARN_HIGHECC:
+ meta->wp += geo->ws_min;
+ break;
+ default:
+ return -ret; /* other io error */
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * folds a bad block list from its plane representation to its
+ * chunk representation.
+ *
+ * If any of the planes status are bad or grown bad, the chunk is marked
+ * offline. If not bad, the first plane state acts as the chunk state.
+ */
+static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
+{
+ struct nvm_geo *geo = &dev->geo;
+ int ret, blk, pl, offset, blktype;
for (blk = 0; blk < geo->num_chk; blk++) {
offset = blk * geo->pln_mode;
blktype = blks[offset];
- /* Bad blocks on any planes take precedence over other types */
for (pl = 0; pl < geo->pln_mode; pl++) {
if (blks[offset + pl] &
(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
@@ -834,23 +945,124 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
}
}
- blks[blk] = blktype;
+ ppa.g.blk = blk;
+
+ meta->wp = 0;
+ meta->type = NVM_CHK_TP_W_SEQ;
+ meta->wi = 0;
+ meta->slba = generic_to_dev_addr(dev, ppa).ppa;
+ meta->cnlb = dev->geo.clba;
+
+ if (blktype == NVM_BLK_T_FREE) {
+ ret = nvm_bb_chunk_scan(dev, ppa, meta);
+ if (ret)
+ return ret;
+ } else {
+ meta->state = NVM_CHK_ST_OFFLINE;
+ }
+
+ meta++;
}
- return geo->num_chk;
+ return 0;
+}
+
+static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
+ int nchks, struct nvm_chk_meta *meta)
+{
+ struct nvm_geo *geo = &dev->geo;
+ struct ppa_addr ppa;
+ u8 *blks;
+ int ch, lun, nr_blks;
+ int ret;
+
+ ppa.ppa = slba;
+ ppa = dev_to_generic_addr(dev, ppa);
+
+ if (ppa.g.blk != 0)
+ return -EINVAL;
+
+ if ((nchks % geo->num_chk) != 0)
+ return -EINVAL;
+
+ nr_blks = geo->num_chk * geo->pln_mode;
+
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;
+
+ for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
+ for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
+ struct ppa_addr ppa_gen, ppa_dev;
+
+ if (!nchks)
+ goto done;
+
+ ppa_gen.ppa = 0;
+ ppa_gen.g.ch = ch;
+ ppa_gen.g.lun = lun;
+ ppa_dev = generic_to_dev_addr(dev, ppa_gen);
+
+ ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
+ if (ret)
+ goto done;
+
+ ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
+ meta);
+ if (ret)
+ goto done;
+
+ meta += geo->num_chk;
+ nchks -= geo->num_chk;
+ }
+ }
+done:
+ kfree(blks);
+ return ret;
}
-EXPORT_SYMBOL(nvm_bb_tbl_fold);
-int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
- u8 *blks)
+int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
+ int nchks, struct nvm_chk_meta *meta)
{
struct nvm_dev *dev = tgt_dev->parent;
nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
- return dev->ops->get_bb_tbl(dev, ppa, blks);
+ if (dev->geo.version == NVM_OCSSD_SPEC_12)
+ return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
+
+ return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
+}
+EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
+
+int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
+ int nr_ppas, int type)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct nvm_rq rqd;
+ int ret;
+
+ if (dev->geo.version == NVM_OCSSD_SPEC_20)
+ return 0;
+
+ if (nr_ppas > NVM_MAX_VLBA) {
+ pr_err("nvm: unable to update all blocks atomically\n");
+ return -EINVAL;
+ }
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
+ nvm_rq_tgt_to_dev(tgt_dev, &rqd);
+
+ ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
+ nvm_free_rqd_ppalist(tgt_dev, &rqd);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
}
-EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
+EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
static int nvm_core_init(struct nvm_dev *dev)
{
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index f565a56b898a..c9fa26f95659 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 00984b486fea..6944aac43b01 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -16,7 +17,10 @@
*
*/
+#define CREATE_TRACE_POINTS
+
#include "pblk.h"
+#include "pblk-trace.h"
static void pblk_line_mark_bb(struct work_struct *work)
{
@@ -27,12 +31,12 @@ static void pblk_line_mark_bb(struct work_struct *work)
struct ppa_addr *ppa = line_ws->priv;
int ret;
- ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
+ ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
if (ret) {
struct pblk_line *line;
int pos;
- line = &pblk->lines[pblk_ppa_to_line(*ppa)];
+ line = pblk_ppa_to_line(pblk, *ppa);
pos = pblk_ppa_to_pos(&dev->geo, *ppa);
pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
@@ -80,19 +84,28 @@ static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
struct pblk_line *line;
int pos;
- line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
+ line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
chunk = &line->chks[pos];
atomic_dec(&line->left_seblks);
if (rqd->error) {
+ trace_pblk_chunk_reset(pblk_disk_name(pblk),
+ &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
+
chunk->state = NVM_CHK_ST_OFFLINE;
pblk_mark_bb(pblk, line, rqd->ppa_addr);
} else {
+ trace_pblk_chunk_reset(pblk_disk_name(pblk),
+ &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
+
chunk->state = NVM_CHK_ST_FREE;
}
+ trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
+ chunk->state);
+
atomic_dec(&pblk->inflight_io);
}
@@ -108,9 +121,9 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
/*
* Get information for all chunks from the device.
*
- * The caller is responsible for freeing the returned structure
+ * The caller is responsible for freeing (vmalloc) the returned structure
*/
-struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
+struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
@@ -122,11 +135,11 @@ struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
ppa.ppa = 0;
len = geo->all_chunks * sizeof(*meta);
- meta = kzalloc(len, GFP_KERNEL);
+ meta = vzalloc(len);
if (!meta)
return ERR_PTR(-ENOMEM);
- ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks);
+ ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
if (ret) {
kfree(meta);
return ERR_PTR(-EIO);
@@ -192,7 +205,6 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
{
struct pblk_line *line;
u64 paddr;
- int line_id;
#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a device address */
@@ -200,8 +212,7 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
BUG_ON(pblk_ppa_empty(ppa));
#endif
- line_id = pblk_ppa_to_line(ppa);
- line = &pblk->lines[line_id];
+ line = pblk_ppa_to_line(pblk, ppa);
paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
__pblk_map_invalidate(pblk, line, paddr);
@@ -227,6 +238,33 @@ static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
spin_unlock(&pblk->trans_lock);
}
+int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+
+ rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
+ &rqd->dma_meta_list);
+ if (!rqd->meta_list)
+ return -ENOMEM;
+
+ if (rqd->nr_ppas == 1)
+ return 0;
+
+ rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
+ rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
+
+ return 0;
+}
+
+void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+
+ if (rqd->meta_list)
+ nvm_dev_dma_free(dev->parent, rqd->meta_list,
+ rqd->dma_meta_list);
+}
+
/* Caller must guarantee that the request is a valid type */
struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
{
@@ -258,7 +296,6 @@ struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
/* Typically used on completion path. Cannot guarantee request consistency */
void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
{
- struct nvm_tgt_dev *dev = pblk->dev;
mempool_t *pool;
switch (type) {
@@ -279,9 +316,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
return;
}
- if (rqd->meta_list)
- nvm_dev_dma_free(dev->parent, rqd->meta_list,
- rqd->dma_meta_list);
+ pblk_free_rqd_meta(pblk, rqd);
mempool_free(rqd, pool);
}
@@ -409,6 +444,9 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
}
} else {
line->state = PBLK_LINESTATE_CORRUPT;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
+
line->gc_group = PBLK_LINEGC_NONE;
move_list = &l_mg->corrupt_list;
pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
@@ -479,9 +517,30 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
return nvm_submit_io(dev, rqd);
}
+void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
+
+ int i;
+
+ for (i = 0; i < rqd->nr_ppas; i++) {
+ struct ppa_addr *ppa = &ppa_list[i];
+ struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
+ u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
+
+ if (caddr == 0)
+ trace_pblk_chunk_state(pblk_disk_name(pblk),
+ ppa, NVM_CHK_ST_OPEN);
+ else if (caddr == chunk->cnlb)
+ trace_pblk_chunk_state(pblk_disk_name(pblk),
+ ppa, NVM_CHK_ST_CLOSED);
+ }
+}
+
int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ int ret;
atomic_inc(&pblk->inflight_io);
@@ -490,7 +549,27 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
return NVM_IO_ERR;
#endif
- return nvm_submit_io_sync(dev, rqd);
+ ret = nvm_submit_io_sync(dev, rqd);
+
+ if (trace_pblk_chunk_state_enabled() && !ret &&
+ rqd->opcode == NVM_OP_PWRITE)
+ pblk_check_chunk_state_update(pblk, rqd);
+
+ return ret;
+}
+
+int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct ppa_addr *ppa_list;
+ int ret;
+
+ ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+
+ pblk_down_chunk(pblk, ppa_list[0]);
+ ret = pblk_submit_io_sync(pblk, rqd);
+ pblk_up_chunk(pblk, ppa_list[0]);
+
+ return ret;
}
static void pblk_bio_map_addr_endio(struct bio *bio)
@@ -621,262 +700,227 @@ u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
return paddr;
}
-/*
- * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
- * taking the per LUN semaphore.
- */
-static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
- void *emeta_buf, u64 paddr, int dir)
+u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
- void *ppa_list, *meta_list;
- struct bio *bio;
- struct nvm_rq rqd;
- dma_addr_t dma_ppa_list, dma_meta_list;
- int min = pblk->min_write_pgs;
- int left_ppas = lm->emeta_sec[0];
- int id = line->id;
- int rq_ppas, rq_len;
- int cmd_op, bio_op;
- int i, j;
- int ret;
+ int bit;
- if (dir == PBLK_WRITE) {
- bio_op = REQ_OP_WRITE;
- cmd_op = NVM_OP_PWRITE;
- } else if (dir == PBLK_READ) {
- bio_op = REQ_OP_READ;
- cmd_op = NVM_OP_PREAD;
- } else
- return -EINVAL;
+ /* This usually only happens on bad lines */
+ bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
+ if (bit >= lm->blk_per_line)
+ return -1;
- meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &dma_meta_list);
- if (!meta_list)
- return -ENOMEM;
+ return bit * geo->ws_opt;
+}
- ppa_list = meta_list + pblk_dma_meta_size;
- dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
+int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct pblk_line_meta *lm = &pblk->lm;
+ struct bio *bio;
+ struct nvm_rq rqd;
+ u64 paddr = pblk_line_smeta_start(pblk, line);
+ int i, ret;
-next_rq:
memset(&rqd, 0, sizeof(struct nvm_rq));
- rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
- rq_len = rq_ppas * geo->csecs;
+ ret = pblk_alloc_rqd_meta(pblk, &rqd);
+ if (ret)
+ return ret;
- bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
- l_mg->emeta_alloc_type, GFP_KERNEL);
+ bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- goto free_rqd_dma;
+ goto clear_rqd;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, bio_op, 0);
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
rqd.bio = bio;
- rqd.meta_list = meta_list;
- rqd.ppa_list = ppa_list;
- rqd.dma_meta_list = dma_meta_list;
- rqd.dma_ppa_list = dma_ppa_list;
- rqd.opcode = cmd_op;
- rqd.nr_ppas = rq_ppas;
-
- if (dir == PBLK_WRITE) {
- struct pblk_sec_meta *meta_list = rqd.meta_list;
-
- rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
- for (i = 0; i < rqd.nr_ppas; ) {
- spin_lock(&line->lock);
- paddr = __pblk_alloc_page(pblk, line, min);
- spin_unlock(&line->lock);
- for (j = 0; j < min; j++, i++, paddr++) {
- meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
- rqd.ppa_list[i] =
- addr_to_gen_ppa(pblk, paddr, id);
- }
- }
- } else {
- for (i = 0; i < rqd.nr_ppas; ) {
- struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
- int pos = pblk_ppa_to_pos(geo, ppa);
- int read_type = PBLK_READ_RANDOM;
-
- if (pblk_io_aligned(pblk, rq_ppas))
- read_type = PBLK_READ_SEQUENTIAL;
- rqd.flags = pblk_set_read_mode(pblk, read_type);
-
- while (test_bit(pos, line->blk_bitmap)) {
- paddr += min;
- if (pblk_boundary_paddr_checks(pblk, paddr)) {
- pblk_err(pblk, "corrupt emeta line:%d\n",
- line->id);
- bio_put(bio);
- ret = -EINTR;
- goto free_rqd_dma;
- }
-
- ppa = addr_to_gen_ppa(pblk, paddr, id);
- pos = pblk_ppa_to_pos(geo, ppa);
- }
-
- if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
- pblk_err(pblk, "corrupt emeta line:%d\n",
- line->id);
- bio_put(bio);
- ret = -EINTR;
- goto free_rqd_dma;
- }
+ rqd.opcode = NVM_OP_PREAD;
+ rqd.nr_ppas = lm->smeta_sec;
+ rqd.is_seq = 1;
- for (j = 0; j < min; j++, i++, paddr++)
- rqd.ppa_list[i] =
- addr_to_gen_ppa(pblk, paddr, line->id);
- }
- }
+ for (i = 0; i < lm->smeta_sec; i++, paddr++)
+ rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
- pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
bio_put(bio);
- goto free_rqd_dma;
+ goto clear_rqd;
}
atomic_dec(&pblk->inflight_io);
- if (rqd.error) {
- if (dir == PBLK_WRITE)
- pblk_log_write_err(pblk, &rqd);
- else
- pblk_log_read_err(pblk, &rqd);
- }
+ if (rqd.error)
+ pblk_log_read_err(pblk, &rqd);
- emeta_buf += rq_len;
- left_ppas -= rq_ppas;
- if (left_ppas)
- goto next_rq;
-free_rqd_dma:
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
+clear_rqd:
+ pblk_free_rqd_meta(pblk, &rqd);
return ret;
}
-u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- struct pblk_line_meta *lm = &pblk->lm;
- int bit;
-
- /* This usually only happens on bad lines */
- bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
- if (bit >= lm->blk_per_line)
- return -1;
-
- return bit * geo->ws_opt;
-}
-
-static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
- u64 paddr, int dir)
+static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
+ u64 paddr)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
struct bio *bio;
struct nvm_rq rqd;
- __le64 *lba_list = NULL;
+ __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
+ __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
int i, ret;
- int cmd_op, bio_op;
- int flags;
-
- if (dir == PBLK_WRITE) {
- bio_op = REQ_OP_WRITE;
- cmd_op = NVM_OP_PWRITE;
- flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
- lba_list = emeta_to_lbas(pblk, line->emeta->buf);
- } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
- bio_op = REQ_OP_READ;
- cmd_op = NVM_OP_PREAD;
- flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
- } else
- return -EINVAL;
memset(&rqd, 0, sizeof(struct nvm_rq));
- rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd.dma_meta_list);
- if (!rqd.meta_list)
- return -ENOMEM;
-
- rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
- rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
+ ret = pblk_alloc_rqd_meta(pblk, &rqd);
+ if (ret)
+ return ret;
bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- goto free_ppa_list;
+ goto clear_rqd;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, bio_op, 0);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
rqd.bio = bio;
- rqd.opcode = cmd_op;
- rqd.flags = flags;
+ rqd.opcode = NVM_OP_PWRITE;
rqd.nr_ppas = lm->smeta_sec;
+ rqd.is_seq = 1;
for (i = 0; i < lm->smeta_sec; i++, paddr++) {
struct pblk_sec_meta *meta_list = rqd.meta_list;
rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
-
- if (dir == PBLK_WRITE) {
- __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
- meta_list[i].lba = lba_list[paddr] = addr_empty;
- }
+ meta_list[i].lba = lba_list[paddr] = addr_empty;
}
- /*
- * This I/O is sent by the write thread when a line is replace. Since
- * the write thread is the only one sending write and erase commands,
- * there is no need to take the LUN semaphore.
- */
- ret = pblk_submit_io_sync(pblk, &rqd);
+ ret = pblk_submit_io_sync_sem(pblk, &rqd);
if (ret) {
pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
bio_put(bio);
- goto free_ppa_list;
+ goto clear_rqd;
}
atomic_dec(&pblk->inflight_io);
if (rqd.error) {
- if (dir == PBLK_WRITE) {
- pblk_log_write_err(pblk, &rqd);
- ret = 1;
- } else if (dir == PBLK_READ)
- pblk_log_read_err(pblk, &rqd);
+ pblk_log_write_err(pblk, &rqd);
+ ret = -EIO;
}
-free_ppa_list:
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
-
+clear_rqd:
+ pblk_free_rqd_meta(pblk, &rqd);
return ret;
}
-int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
+int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
+ void *emeta_buf)
{
- u64 bpaddr = pblk_line_smeta_start(pblk, line);
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_line_meta *lm = &pblk->lm;
+ void *ppa_list, *meta_list;
+ struct bio *bio;
+ struct nvm_rq rqd;
+ u64 paddr = line->emeta_ssec;
+ dma_addr_t dma_ppa_list, dma_meta_list;
+ int min = pblk->min_write_pgs;
+ int left_ppas = lm->emeta_sec[0];
+ int line_id = line->id;
+ int rq_ppas, rq_len;
+ int i, j;
+ int ret;
- return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
-}
+ meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
+ &dma_meta_list);
+ if (!meta_list)
+ return -ENOMEM;
-int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
- void *emeta_buf)
-{
- return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
- line->emeta_ssec, PBLK_READ);
+ ppa_list = meta_list + pblk_dma_meta_size;
+ dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
+
+next_rq:
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
+ rq_len = rq_ppas * geo->csecs;
+
+ bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
+ l_mg->emeta_alloc_type, GFP_KERNEL);
+ if (IS_ERR(bio)) {
+ ret = PTR_ERR(bio);
+ goto free_rqd_dma;
+ }
+
+ bio->bi_iter.bi_sector = 0; /* internal bio */
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
+
+ rqd.bio = bio;
+ rqd.meta_list = meta_list;
+ rqd.ppa_list = ppa_list;
+ rqd.dma_meta_list = dma_meta_list;
+ rqd.dma_ppa_list = dma_ppa_list;
+ rqd.opcode = NVM_OP_PREAD;
+ rqd.nr_ppas = rq_ppas;
+
+ for (i = 0; i < rqd.nr_ppas; ) {
+ struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
+ int pos = pblk_ppa_to_pos(geo, ppa);
+
+ if (pblk_io_aligned(pblk, rq_ppas))
+ rqd.is_seq = 1;
+
+ while (test_bit(pos, line->blk_bitmap)) {
+ paddr += min;
+ if (pblk_boundary_paddr_checks(pblk, paddr)) {
+ bio_put(bio);
+ ret = -EINTR;
+ goto free_rqd_dma;
+ }
+
+ ppa = addr_to_gen_ppa(pblk, paddr, line_id);
+ pos = pblk_ppa_to_pos(geo, ppa);
+ }
+
+ if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
+ bio_put(bio);
+ ret = -EINTR;
+ goto free_rqd_dma;
+ }
+
+ for (j = 0; j < min; j++, i++, paddr++)
+ rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
+ }
+
+ ret = pblk_submit_io_sync(pblk, &rqd);
+ if (ret) {
+ pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
+ bio_put(bio);
+ goto free_rqd_dma;
+ }
+
+ atomic_dec(&pblk->inflight_io);
+
+ if (rqd.error)
+ pblk_log_read_err(pblk, &rqd);
+
+ emeta_buf += rq_len;
+ left_ppas -= rq_ppas;
+ if (left_ppas)
+ goto next_rq;
+
+free_rqd_dma:
+ nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
+ return ret;
}
static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -885,16 +929,17 @@ static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd->opcode = NVM_OP_ERASE;
rqd->ppa_addr = ppa;
rqd->nr_ppas = 1;
- rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
+ rqd->is_seq = 1;
rqd->bio = NULL;
}
static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
{
- struct nvm_rq rqd;
- int ret = 0;
+ struct nvm_rq rqd = {NULL};
+ int ret;
- memset(&rqd, 0, sizeof(struct nvm_rq));
+ trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
+ PBLK_CHUNK_RESET_START);
pblk_setup_e_rq(pblk, &rqd, ppa);
@@ -902,19 +947,6 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
* with writes. Thus, there is no need to take the LUN semaphore.
*/
ret = pblk_submit_io_sync(pblk, &rqd);
- if (ret) {
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
-
- pblk_err(pblk, "could not sync erase line:%d,blk:%d\n",
- pblk_ppa_to_line(ppa),
- pblk_ppa_to_pos(geo, ppa));
-
- rqd.error = ret;
- goto out;
- }
-
-out:
rqd.private = pblk;
__pblk_end_io_erase(pblk, &rqd);
@@ -1008,6 +1040,8 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
spin_lock(&l_mg->free_lock);
spin_lock(&line->lock);
line->state = PBLK_LINESTATE_BAD;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
spin_unlock(&line->lock);
list_add_tail(&line->list, &l_mg->bad_list);
@@ -1071,15 +1105,18 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
{
struct pblk_line_meta *lm = &pblk->lm;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
- line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
+ line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
if (!line->map_bitmap)
return -ENOMEM;
+ memset(line->map_bitmap, 0, lm->sec_bitmap_len);
+
/* will be initialized using bb info from map_bitmap */
- line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
+ line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
if (!line->invalid_bitmap) {
- kfree(line->map_bitmap);
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
line->map_bitmap = NULL;
return -ENOMEM;
}
@@ -1122,7 +1159,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
line->smeta_ssec = off;
line->cur_sec = off + lm->smeta_sec;
- if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
+ if (init && pblk_line_smeta_write(pblk, line, off)) {
pblk_debug(pblk, "line smeta I/O failed. Retry\n");
return 0;
}
@@ -1152,6 +1189,8 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
spin_lock(&line->lock);
line->state = PBLK_LINESTATE_BAD;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
spin_unlock(&line->lock);
list_add_tail(&line->list, &l_mg->bad_list);
@@ -1204,6 +1243,8 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
if (line->state == PBLK_LINESTATE_NEW) {
blk_to_erase = pblk_prepare_new_line(pblk, line);
line->state = PBLK_LINESTATE_FREE;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
} else {
blk_to_erase = blk_in_line;
}
@@ -1221,6 +1262,8 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
}
line->state = PBLK_LINESTATE_OPEN;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
atomic_set(&line->left_eblks, blk_to_erase);
atomic_set(&line->left_seblks, blk_to_erase);
@@ -1265,7 +1308,9 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
{
- kfree(line->map_bitmap);
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
line->map_bitmap = NULL;
line->smeta = NULL;
line->emeta = NULL;
@@ -1283,8 +1328,11 @@ static void pblk_line_reinit(struct pblk_line *line)
void pblk_line_free(struct pblk_line *line)
{
- kfree(line->map_bitmap);
- kfree(line->invalid_bitmap);
+ struct pblk *pblk = line->pblk;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
+ mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
pblk_line_reinit(line);
}
@@ -1312,6 +1360,8 @@ retry:
if (unlikely(bit >= lm->blk_per_line)) {
spin_lock(&line->lock);
line->state = PBLK_LINESTATE_BAD;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
spin_unlock(&line->lock);
list_add_tail(&line->list, &l_mg->bad_list);
@@ -1446,12 +1496,32 @@ retry_setup:
return line;
}
+void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
+{
+ struct pblk_line *line;
+
+ line = pblk_ppa_to_line(pblk, ppa);
+ kref_put(&line->ref, pblk_line_put_wq);
+}
+
+void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct ppa_addr *ppa_list;
+ int i;
+
+ ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+
+ for (i = 0; i < rqd->nr_ppas; i++)
+ pblk_ppa_to_line_put(pblk, ppa_list[i]);
+}
+
static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
{
lockdep_assert_held(&pblk->l_mg.free_lock);
pblk_set_space_limit(pblk);
pblk->state = PBLK_STATE_STOPPING;
+ trace_pblk_state(pblk_disk_name(pblk), pblk->state);
}
static void pblk_line_close_meta_sync(struct pblk *pblk)
@@ -1501,6 +1571,7 @@ void __pblk_pipeline_flush(struct pblk *pblk)
return;
}
pblk->state = PBLK_STATE_RECOVERING;
+ trace_pblk_state(pblk_disk_name(pblk), pblk->state);
spin_unlock(&l_mg->free_lock);
pblk_flush_writer(pblk);
@@ -1522,6 +1593,7 @@ void __pblk_pipeline_stop(struct pblk *pblk)
spin_lock(&l_mg->free_lock);
pblk->state = PBLK_STATE_STOPPED;
+ trace_pblk_state(pblk_disk_name(pblk), pblk->state);
l_mg->data_line = NULL;
l_mg->data_next = NULL;
spin_unlock(&l_mg->free_lock);
@@ -1539,13 +1611,14 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
struct pblk_line *cur, *new = NULL;
unsigned int left_seblks;
- cur = l_mg->data_line;
new = l_mg->data_next;
if (!new)
goto out;
- l_mg->data_line = new;
spin_lock(&l_mg->free_lock);
+ cur = l_mg->data_line;
+ l_mg->data_line = new;
+
pblk_line_setup_metadata(new, l_mg, &pblk->lm);
spin_unlock(&l_mg->free_lock);
@@ -1612,6 +1685,8 @@ static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_GC);
line->state = PBLK_LINESTATE_FREE;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
line->gc_group = PBLK_LINEGC_NONE;
pblk_line_free(line);
@@ -1680,6 +1755,9 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
rqd->end_io = pblk_end_io_erase;
rqd->private = pblk;
+ trace_pblk_chunk_reset(pblk_disk_name(pblk),
+ &ppa, PBLK_CHUNK_RESET_START);
+
/* The write thread schedules erases so that it minimizes disturbances
* with writes. Thus, there is no need to take the LUN semaphore.
*/
@@ -1689,7 +1767,7 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_geo *geo = &dev->geo;
pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
- pblk_ppa_to_line(ppa),
+ pblk_ppa_to_line_id(ppa),
pblk_ppa_to_pos(geo, ppa));
}
@@ -1741,10 +1819,9 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
WARN_ON(line->state != PBLK_LINESTATE_OPEN);
line->state = PBLK_LINESTATE_CLOSED;
move_list = pblk_line_gc_list(pblk, line);
-
list_add_tail(&line->list, move_list);
- kfree(line->map_bitmap);
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
line->map_bitmap = NULL;
line->smeta = NULL;
line->emeta = NULL;
@@ -1760,6 +1837,9 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
spin_unlock(&line->lock);
spin_unlock(&l_mg->gc_lock);
+
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
}
void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
@@ -1778,6 +1858,17 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
+ if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
+ emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
+ memcpy(emeta_buf->header.uuid, pblk->instance_uuid, 16);
+ emeta_buf->header.id = cpu_to_le32(line->id);
+ emeta_buf->header.type = cpu_to_le16(line->type);
+ emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
+ emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
+ emeta_buf->header.crc = cpu_to_le32(
+ pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
+ }
+
emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
@@ -1795,8 +1886,6 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
spin_unlock(&l_mg->close_lock);
pblk_line_should_sync_meta(pblk);
-
-
}
static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
@@ -1847,8 +1936,7 @@ void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
queue_work(wq, &line_ws->ws);
}
-static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
- int nr_ppas, int pos)
+static void __pblk_down_chunk(struct pblk *pblk, int pos)
{
struct pblk_lun *rlun = &pblk->luns[pos];
int ret;
@@ -1857,13 +1945,6 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
* Only send one inflight I/O per LUN. Since we map at a page
* granurality, all ppas in the I/O will map to the same LUN
*/
-#ifdef CONFIG_NVM_PBLK_DEBUG
- int i;
-
- for (i = 1; i < nr_ppas; i++)
- WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
- ppa_list[0].a.ch != ppa_list[i].a.ch);
-#endif
ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
if (ret == -ETIME || ret == -EINTR)
@@ -1871,21 +1952,21 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
-ret);
}
-void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
+void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
+ int pos = pblk_ppa_to_pos(geo, ppa);
- __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
+ __pblk_down_chunk(pblk, pos);
}
-void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
+void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
unsigned long *lun_bitmap)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
+ int pos = pblk_ppa_to_pos(geo, ppa);
/* If the LUN has been locked for this same request, do no attempt to
* lock it again
@@ -1893,30 +1974,21 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
if (test_and_set_bit(pos, lun_bitmap))
return;
- __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
+ __pblk_down_chunk(pblk, pos);
}
-void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
+void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_lun *rlun;
- int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
- int i;
-
- for (i = 1; i < nr_ppas; i++)
- WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
- ppa_list[0].a.ch != ppa_list[i].a.ch);
-#endif
+ int pos = pblk_ppa_to_pos(geo, ppa);
rlun = &pblk->luns[pos];
up(&rlun->wr_sem);
}
-void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
- unsigned long *lun_bitmap)
+void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
@@ -2060,8 +2132,7 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
/* If the L2P entry maps to a line, the reference is valid */
if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
- int line_id = pblk_ppa_to_line(ppa);
- struct pblk_line *line = &pblk->lines[line_id];
+ struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
kref_get(&line->ref);
}
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 157c2567c9e8..2fa118c8eb71 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -16,8 +17,10 @@
*/
#include "pblk.h"
+#include "pblk-trace.h"
#include <linux/delay.h>
+
static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
{
if (gc_rq->data)
@@ -64,6 +67,8 @@ static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_GC);
line->state = PBLK_LINESTATE_CLOSED;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
move_list = pblk_line_gc_list(pblk, line);
spin_unlock(&line->lock);
@@ -144,7 +149,7 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
if (!emeta_buf)
return NULL;
- ret = pblk_line_read_emeta(pblk, line, emeta_buf);
+ ret = pblk_line_emeta_read(pblk, line, emeta_buf);
if (ret) {
pblk_err(pblk, "line %d read emeta failed (%d)\n",
line->id, ret);
@@ -405,6 +410,8 @@ void pblk_gc_free_full_lines(struct pblk *pblk)
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
line->state = PBLK_LINESTATE_GC;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
spin_unlock(&line->lock);
list_del(&line->list);
@@ -451,6 +458,8 @@ next_gc_group:
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
line->state = PBLK_LINESTATE_GC;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
spin_unlock(&line->lock);
list_del(&line->list);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 537e98f2b24a..13822594647c 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
* Copyright (C) 2016 CNEX Labs
@@ -19,15 +20,31 @@
*/
#include "pblk.h"
+#include "pblk-trace.h"
static unsigned int write_buffer_size;
module_param(write_buffer_size, uint, 0644);
MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
-static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
- *pblk_w_rq_cache;
-static DECLARE_RWSEM(pblk_lock);
+struct pblk_global_caches {
+ struct kmem_cache *ws;
+ struct kmem_cache *rec;
+ struct kmem_cache *g_rq;
+ struct kmem_cache *w_rq;
+
+ struct kref kref;
+
+ struct mutex mutex; /* Ensures consistency between
+ * caches and kref
+ */
+};
+
+static struct pblk_global_caches pblk_caches = {
+ .mutex = __MUTEX_INITIALIZER(pblk_caches.mutex),
+ .kref = KREF_INIT(0),
+};
+
struct bio_set pblk_bio_set;
static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
@@ -168,36 +185,26 @@ static void pblk_rwb_free(struct pblk *pblk)
if (pblk_rb_tear_down_check(&pblk->rwb))
pblk_err(pblk, "write buffer error on tear down\n");
- pblk_rb_data_free(&pblk->rwb);
- vfree(pblk_rb_entries_ref(&pblk->rwb));
+ pblk_rb_free(&pblk->rwb);
}
static int pblk_rwb_init(struct pblk *pblk)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- struct pblk_rb_entry *entries;
- unsigned long nr_entries, buffer_size;
- unsigned int power_size, power_seg_sz;
- int pgs_in_buffer;
+ unsigned long buffer_size;
+ int pgs_in_buffer, threshold;
- pgs_in_buffer = max(geo->mw_cunits, geo->ws_opt) * geo->all_luns;
+ threshold = geo->mw_cunits * geo->all_luns;
+ pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt)
+ * geo->all_luns;
if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
buffer_size = write_buffer_size;
else
buffer_size = pgs_in_buffer;
- nr_entries = pblk_rb_calculate_size(buffer_size);
-
- entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
- if (!entries)
- return -ENOMEM;
-
- power_size = get_count_order(nr_entries);
- power_seg_sz = get_count_order(geo->csecs);
-
- return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
+ return pblk_rb_init(&pblk->rwb, buffer_size, threshold, geo->csecs);
}
/* Minimum pages needed within a lun */
@@ -306,53 +313,80 @@ static int pblk_set_addrf(struct pblk *pblk)
return 0;
}
-static int pblk_init_global_caches(struct pblk *pblk)
+static int pblk_create_global_caches(void)
{
- down_write(&pblk_lock);
- pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
+
+ pblk_caches.ws = kmem_cache_create("pblk_blk_ws",
sizeof(struct pblk_line_ws), 0, 0, NULL);
- if (!pblk_ws_cache) {
- up_write(&pblk_lock);
+ if (!pblk_caches.ws)
return -ENOMEM;
- }
- pblk_rec_cache = kmem_cache_create("pblk_rec",
+ pblk_caches.rec = kmem_cache_create("pblk_rec",
sizeof(struct pblk_rec_ctx), 0, 0, NULL);
- if (!pblk_rec_cache) {
- kmem_cache_destroy(pblk_ws_cache);
- up_write(&pblk_lock);
- return -ENOMEM;
- }
+ if (!pblk_caches.rec)
+ goto fail_destroy_ws;
- pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
+ pblk_caches.g_rq = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
0, 0, NULL);
- if (!pblk_g_rq_cache) {
- kmem_cache_destroy(pblk_ws_cache);
- kmem_cache_destroy(pblk_rec_cache);
- up_write(&pblk_lock);
- return -ENOMEM;
- }
+ if (!pblk_caches.g_rq)
+ goto fail_destroy_rec;
- pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
+ pblk_caches.w_rq = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
0, 0, NULL);
- if (!pblk_w_rq_cache) {
- kmem_cache_destroy(pblk_ws_cache);
- kmem_cache_destroy(pblk_rec_cache);
- kmem_cache_destroy(pblk_g_rq_cache);
- up_write(&pblk_lock);
- return -ENOMEM;
- }
- up_write(&pblk_lock);
+ if (!pblk_caches.w_rq)
+ goto fail_destroy_g_rq;
return 0;
+
+fail_destroy_g_rq:
+ kmem_cache_destroy(pblk_caches.g_rq);
+fail_destroy_rec:
+ kmem_cache_destroy(pblk_caches.rec);
+fail_destroy_ws:
+ kmem_cache_destroy(pblk_caches.ws);
+
+ return -ENOMEM;
}
-static void pblk_free_global_caches(struct pblk *pblk)
+static int pblk_get_global_caches(void)
{
- kmem_cache_destroy(pblk_ws_cache);
- kmem_cache_destroy(pblk_rec_cache);
- kmem_cache_destroy(pblk_g_rq_cache);
- kmem_cache_destroy(pblk_w_rq_cache);
+ int ret;
+
+ mutex_lock(&pblk_caches.mutex);
+
+ if (kref_read(&pblk_caches.kref) > 0) {
+ kref_get(&pblk_caches.kref);
+ mutex_unlock(&pblk_caches.mutex);
+ return 0;
+ }
+
+ ret = pblk_create_global_caches();
+
+ if (!ret)
+ kref_get(&pblk_caches.kref);
+
+ mutex_unlock(&pblk_caches.mutex);
+
+ return ret;
+}
+
+static void pblk_destroy_global_caches(struct kref *ref)
+{
+ struct pblk_global_caches *c;
+
+ c = container_of(ref, struct pblk_global_caches, kref);
+
+ kmem_cache_destroy(c->ws);
+ kmem_cache_destroy(c->rec);
+ kmem_cache_destroy(c->g_rq);
+ kmem_cache_destroy(c->w_rq);
+}
+
+static void pblk_put_global_caches(void)
+{
+ mutex_lock(&pblk_caches.mutex);
+ kref_put(&pblk_caches.kref, pblk_destroy_global_caches);
+ mutex_unlock(&pblk_caches.mutex);
}
static int pblk_core_init(struct pblk *pblk)
@@ -371,23 +405,19 @@ static int pblk_core_init(struct pblk *pblk)
atomic64_set(&pblk->nr_flush, 0);
pblk->nr_flush_rst = 0;
- pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
+ pblk->min_write_pgs = geo->ws_opt;
max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
+ pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
+ queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
- if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
- pblk_err(pblk, "vector list too big(%u > %u)\n",
- pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS);
- return -EINVAL;
- }
-
pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
GFP_KERNEL);
if (!pblk->pad_dist)
return -ENOMEM;
- if (pblk_init_global_caches(pblk))
+ if (pblk_get_global_caches())
goto fail_free_pad_dist;
/* Internal bios can be at most the sectors signaled by the device. */
@@ -396,27 +426,27 @@ static int pblk_core_init(struct pblk *pblk)
goto free_global_caches;
ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
- pblk_ws_cache);
+ pblk_caches.ws);
if (ret)
goto free_page_bio_pool;
ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
- pblk_rec_cache);
+ pblk_caches.rec);
if (ret)
goto free_gen_ws_pool;
ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
- pblk_g_rq_cache);
+ pblk_caches.g_rq);
if (ret)
goto free_rec_pool;
ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
- pblk_g_rq_cache);
+ pblk_caches.g_rq);
if (ret)
goto free_r_rq_pool;
ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
- pblk_w_rq_cache);
+ pblk_caches.w_rq);
if (ret)
goto free_e_rq_pool;
@@ -462,7 +492,7 @@ free_gen_ws_pool:
free_page_bio_pool:
mempool_exit(&pblk->page_bio_pool);
free_global_caches:
- pblk_free_global_caches(pblk);
+ pblk_put_global_caches();
fail_free_pad_dist:
kfree(pblk->pad_dist);
return -ENOMEM;
@@ -486,7 +516,7 @@ static void pblk_core_free(struct pblk *pblk)
mempool_exit(&pblk->e_rq_pool);
mempool_exit(&pblk->w_rq_pool);
- pblk_free_global_caches(pblk);
+ pblk_put_global_caches();
kfree(pblk->pad_dist);
}
@@ -504,6 +534,9 @@ static void pblk_line_mg_free(struct pblk *pblk)
pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
kfree(l_mg->eline_meta[i]);
}
+
+ mempool_destroy(l_mg->bitmap_pool);
+ kmem_cache_destroy(l_mg->bitmap_cache);
}
static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
@@ -540,67 +573,6 @@ static void pblk_lines_free(struct pblk *pblk)
kfree(pblk->lines);
}
-static int pblk_bb_get_tbl(struct nvm_tgt_dev *dev, struct pblk_lun *rlun,
- u8 *blks, int nr_blks)
-{
- struct ppa_addr ppa;
- int ret;
-
- ppa.ppa = 0;
- ppa.g.ch = rlun->bppa.g.ch;
- ppa.g.lun = rlun->bppa.g.lun;
-
- ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
- if (ret)
- return ret;
-
- nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
- if (nr_blks < 0)
- return -EIO;
-
- return 0;
-}
-
-static void *pblk_bb_get_meta(struct pblk *pblk)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- u8 *meta;
- int i, nr_blks, blk_per_lun;
- int ret;
-
- blk_per_lun = geo->num_chk * geo->pln_mode;
- nr_blks = blk_per_lun * geo->all_luns;
-
- meta = kmalloc(nr_blks, GFP_KERNEL);
- if (!meta)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < geo->all_luns; i++) {
- struct pblk_lun *rlun = &pblk->luns[i];
- u8 *meta_pos = meta + i * blk_per_lun;
-
- ret = pblk_bb_get_tbl(dev, rlun, meta_pos, blk_per_lun);
- if (ret) {
- kfree(meta);
- return ERR_PTR(-EIO);
- }
- }
-
- return meta;
-}
-
-static void *pblk_chunk_get_meta(struct pblk *pblk)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12)
- return pblk_bb_get_meta(pblk);
- else
- return pblk_chunk_get_info(pblk);
-}
-
static int pblk_luns_init(struct pblk *pblk)
{
struct nvm_tgt_dev *dev = pblk->dev;
@@ -699,51 +671,7 @@ static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
}
-static int pblk_setup_line_meta_12(struct pblk *pblk, struct pblk_line *line,
- void *chunk_meta)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- struct pblk_line_meta *lm = &pblk->lm;
- int i, chk_per_lun, nr_bad_chks = 0;
-
- chk_per_lun = geo->num_chk * geo->pln_mode;
-
- for (i = 0; i < lm->blk_per_line; i++) {
- struct pblk_lun *rlun = &pblk->luns[i];
- struct nvm_chk_meta *chunk;
- int pos = pblk_ppa_to_pos(geo, rlun->bppa);
- u8 *lun_bb_meta = chunk_meta + pos * chk_per_lun;
-
- chunk = &line->chks[pos];
-
- /*
- * In 1.2 spec. chunk state is not persisted by the device. Thus
- * some of the values are reset each time pblk is instantiated,
- * so we have to assume that the block is closed.
- */
- if (lun_bb_meta[line->id] == NVM_BLK_T_FREE)
- chunk->state = NVM_CHK_ST_CLOSED;
- else
- chunk->state = NVM_CHK_ST_OFFLINE;
-
- chunk->type = NVM_CHK_TP_W_SEQ;
- chunk->wi = 0;
- chunk->slba = -1;
- chunk->cnlb = geo->clba;
- chunk->wp = 0;
-
- if (!(chunk->state & NVM_CHK_ST_OFFLINE))
- continue;
-
- set_bit(pos, line->blk_bitmap);
- nr_bad_chks++;
- }
-
- return nr_bad_chks;
-}
-
-static int pblk_setup_line_meta_20(struct pblk *pblk, struct pblk_line *line,
+static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line,
struct nvm_chk_meta *meta)
{
struct nvm_tgt_dev *dev = pblk->dev;
@@ -772,6 +700,9 @@ static int pblk_setup_line_meta_20(struct pblk *pblk, struct pblk_line *line,
chunk->cnlb = chunk_meta->cnlb;
chunk->wp = chunk_meta->wp;
+ trace_pblk_chunk_state(pblk_disk_name(pblk), &ppa,
+ chunk->state);
+
if (chunk->type & NVM_CHK_TP_SZ_SPEC) {
WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n");
continue;
@@ -790,8 +721,6 @@ static int pblk_setup_line_meta_20(struct pblk *pblk, struct pblk_line *line,
static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
void *chunk_meta, int line_id)
{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
long nr_bad_chks, chk_in_line;
@@ -804,10 +733,7 @@ static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
line->vsc = &l_mg->vsc_list[line_id];
spin_lock_init(&line->lock);
- if (geo->version == NVM_OCSSD_SPEC_12)
- nr_bad_chks = pblk_setup_line_meta_12(pblk, line, chunk_meta);
- else
- nr_bad_chks = pblk_setup_line_meta_20(pblk, line, chunk_meta);
+ nr_bad_chks = pblk_setup_line_meta_chk(pblk, line, chunk_meta);
chk_in_line = lm->blk_per_line - nr_bad_chks;
if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line ||
@@ -913,6 +839,17 @@ static int pblk_line_mg_init(struct pblk *pblk)
goto fail_free_smeta;
}
+ l_mg->bitmap_cache = kmem_cache_create("pblk_lm_bitmap",
+ lm->sec_bitmap_len, 0, 0, NULL);
+ if (!l_mg->bitmap_cache)
+ goto fail_free_smeta;
+
+ /* the bitmap pool is used for both valid and map bitmaps */
+ l_mg->bitmap_pool = mempool_create_slab_pool(PBLK_DATA_LINES * 2,
+ l_mg->bitmap_cache);
+ if (!l_mg->bitmap_pool)
+ goto fail_destroy_bitmap_cache;
+
/* emeta allocates three different buffers for managing metadata with
* in-memory and in-media layouts
*/
@@ -965,6 +902,10 @@ fail_free_emeta:
kfree(l_mg->eline_meta[i]->buf);
kfree(l_mg->eline_meta[i]);
}
+
+ mempool_destroy(l_mg->bitmap_pool);
+fail_destroy_bitmap_cache:
+ kmem_cache_destroy(l_mg->bitmap_cache);
fail_free_smeta:
for (i = 0; i < PBLK_DATA_LINES; i++)
kfree(l_mg->sline_meta[i]);
@@ -1058,7 +999,7 @@ static int pblk_lines_init(struct pblk *pblk)
if (ret)
goto fail_free_meta;
- chunk_meta = pblk_chunk_get_meta(pblk);
+ chunk_meta = pblk_get_chunk_meta(pblk);
if (IS_ERR(chunk_meta)) {
ret = PTR_ERR(chunk_meta);
goto fail_free_luns;
@@ -1079,16 +1020,20 @@ static int pblk_lines_init(struct pblk *pblk)
goto fail_free_lines;
nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i);
+
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
}
if (!nr_free_chks) {
pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
- return -EINTR;
+ ret = -EINTR;
+ goto fail_free_lines;
}
pblk_set_provision(pblk, nr_free_chks);
- kfree(chunk_meta);
+ vfree(chunk_meta);
return 0;
fail_free_lines:
@@ -1165,7 +1110,6 @@ static void pblk_exit(void *private, bool graceful)
{
struct pblk *pblk = private;
- down_write(&pblk_lock);
pblk_gc_exit(pblk, graceful);
pblk_tear_down(pblk, graceful);
@@ -1174,7 +1118,6 @@ static void pblk_exit(void *private, bool graceful)
#endif
pblk_free(pblk);
- up_write(&pblk_lock);
}
static sector_t pblk_capacity(void *private)
@@ -1200,6 +1143,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
pblk->dev = dev;
pblk->disk = tdisk;
pblk->state = PBLK_STATE_RUNNING;
+ trace_pblk_state(pblk_disk_name(pblk), pblk->state);
pblk->gc.gc_enabled = 0;
if (!(geo->version == NVM_OCSSD_SPEC_12 ||
@@ -1210,13 +1154,6 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
return ERR_PTR(-EINVAL);
}
- if (geo->version == NVM_OCSSD_SPEC_12 && geo->dom & NVM_RSP_L2P) {
- pblk_err(pblk, "host-side L2P table not supported. (%x)\n",
- geo->dom);
- kfree(pblk);
- return ERR_PTR(-EINVAL);
- }
-
spin_lock_init(&pblk->resubmit_lock);
spin_lock_init(&pblk->trans_lock);
spin_lock_init(&pblk->lock);
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 953ca31dda68..6dcbd44e3acb 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -79,7 +80,7 @@ static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
}
}
- pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap);
+ pblk_down_rq(pblk, ppa_list[0], lun_bitmap);
return 0;
}
@@ -88,13 +89,14 @@ void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
unsigned int off)
{
struct pblk_sec_meta *meta_list = rqd->meta_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
unsigned int map_secs;
int min = pblk->min_write_pgs;
int i;
for (i = off; i < rqd->nr_ppas; i += min) {
map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
- if (pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
+ if (pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
lun_bitmap, &meta_list[i], map_secs)) {
bio_put(rqd->bio);
pblk_free_rqd(pblk, rqd, PBLK_WRITE);
@@ -112,6 +114,7 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct nvm_geo *geo = &dev->geo;
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_sec_meta *meta_list = rqd->meta_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
struct pblk_line *e_line, *d_line;
unsigned int map_secs;
int min = pblk->min_write_pgs;
@@ -119,14 +122,14 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
for (i = 0; i < rqd->nr_ppas; i += min) {
map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
- if (pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
+ if (pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
lun_bitmap, &meta_list[i], map_secs)) {
bio_put(rqd->bio);
pblk_free_rqd(pblk, rqd, PBLK_WRITE);
pblk_pipeline_stop(pblk);
}
- erase_lun = pblk_ppa_to_pos(geo, rqd->ppa_list[i]);
+ erase_lun = pblk_ppa_to_pos(geo, ppa_list[i]);
/* line can change after page map. We might also be writing the
* last line.
@@ -141,7 +144,7 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
set_bit(erase_lun, e_line->erase_bitmap);
atomic_dec(&e_line->left_eblks);
- *erase_ppa = rqd->ppa_list[i];
+ *erase_ppa = ppa_list[i];
erase_ppa->a.blk = e_line->id;
spin_unlock(&e_line->lock);
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index f6eec0212dfc..b1f4b51783f4 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -22,7 +23,7 @@
static DECLARE_RWSEM(pblk_rb_lock);
-void pblk_rb_data_free(struct pblk_rb *rb)
+static void pblk_rb_data_free(struct pblk_rb *rb)
{
struct pblk_rb_pages *p, *t;
@@ -35,25 +36,51 @@ void pblk_rb_data_free(struct pblk_rb *rb)
up_write(&pblk_rb_lock);
}
+void pblk_rb_free(struct pblk_rb *rb)
+{
+ pblk_rb_data_free(rb);
+ vfree(rb->entries);
+}
+
+/*
+ * pblk_rb_calculate_size -- calculate the size of the write buffer
+ */
+static unsigned int pblk_rb_calculate_size(unsigned int nr_entries)
+{
+ /* Alloc a write buffer that can at least fit 128 entries */
+ return (1 << max(get_count_order(nr_entries), 7));
+}
+
/*
* Initialize ring buffer. The data and metadata buffers must be previously
* allocated and their size must be a power of two
* (Documentation/core-api/circular-buffers.rst)
*/
-int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
- unsigned int power_size, unsigned int power_seg_sz)
+int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
+ unsigned int seg_size)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
+ struct pblk_rb_entry *entries;
unsigned int init_entry = 0;
- unsigned int alloc_order = power_size;
unsigned int max_order = MAX_ORDER - 1;
- unsigned int order, iter;
+ unsigned int power_size, power_seg_sz;
+ unsigned int alloc_order, order, iter;
+ unsigned int nr_entries;
+
+ nr_entries = pblk_rb_calculate_size(size);
+ entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
+ if (!entries)
+ return -ENOMEM;
+
+ power_size = get_count_order(size);
+ power_seg_sz = get_count_order(seg_size);
down_write(&pblk_rb_lock);
- rb->entries = rb_entry_base;
+ rb->entries = entries;
rb->seg_size = (1 << power_seg_sz);
rb->nr_entries = (1 << power_size);
rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
+ rb->back_thres = threshold;
rb->flush_point = EMPTY_ENTRY;
spin_lock_init(&rb->w_lock);
@@ -61,6 +88,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
INIT_LIST_HEAD(&rb->pages);
+ alloc_order = power_size;
if (alloc_order >= max_order) {
order = max_order;
iter = (1 << (alloc_order - max_order));
@@ -79,6 +107,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL);
if (!page_set) {
up_write(&pblk_rb_lock);
+ vfree(entries);
return -ENOMEM;
}
@@ -88,6 +117,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
kfree(page_set);
pblk_rb_data_free(rb);
up_write(&pblk_rb_lock);
+ vfree(entries);
return -ENOMEM;
}
kaddr = page_address(page_set->pages);
@@ -124,20 +154,6 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
return 0;
}
-/*
- * pblk_rb_calculate_size -- calculate the size of the write buffer
- */
-unsigned int pblk_rb_calculate_size(unsigned int nr_entries)
-{
- /* Alloc a write buffer that can at least fit 128 entries */
- return (1 << max(get_count_order(nr_entries), 7));
-}
-
-void *pblk_rb_entries_ref(struct pblk_rb *rb)
-{
- return rb->entries;
-}
-
static void clean_wctx(struct pblk_w_ctx *w_ctx)
{
int flags;
@@ -168,6 +184,12 @@ static unsigned int pblk_rb_space(struct pblk_rb *rb)
return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries);
}
+unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
+ unsigned int nr_entries)
+{
+ return (p + nr_entries) & (rb->nr_entries - 1);
+}
+
/*
* Buffer count is calculated with respect to the submission entry signaling the
* entries that are available to send to the media
@@ -194,8 +216,7 @@ unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
subm = READ_ONCE(rb->subm);
/* Commit read means updating submission pointer */
- smp_store_release(&rb->subm,
- (subm + nr_entries) & (rb->nr_entries - 1));
+ smp_store_release(&rb->subm, pblk_rb_ptr_wrap(rb, subm, nr_entries));
return subm;
}
@@ -225,10 +246,10 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
entry->cacheline);
- line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
+ line = pblk_ppa_to_line(pblk, w_ctx->ppa);
kref_put(&line->ref, pblk_line_put);
clean_wctx(w_ctx);
- rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1);
+ rb->l2p_update = pblk_rb_ptr_wrap(rb, rb->l2p_update, 1);
}
pblk_rl_out(&pblk->rl, user_io, gc_io);
@@ -385,11 +406,14 @@ static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
{
unsigned int mem;
unsigned int sync;
+ unsigned int threshold;
sync = READ_ONCE(rb->sync);
mem = READ_ONCE(rb->mem);
- if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < nr_entries)
+ threshold = nr_entries + rb->back_thres;
+
+ if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < threshold)
return 0;
if (pblk_rb_update_l2p(rb, nr_entries, mem, sync))
@@ -407,7 +431,7 @@ static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
return 0;
/* Protect from read count */
- smp_store_release(&rb->mem, (*pos + nr_entries) & (rb->nr_entries - 1));
+ smp_store_release(&rb->mem, pblk_rb_ptr_wrap(rb, *pos, nr_entries));
return 1;
}
@@ -431,7 +455,7 @@ static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
if (!__pblk_rb_may_write(rb, nr_entries, pos))
return 0;
- mem = (*pos + nr_entries) & (rb->nr_entries - 1);
+ mem = pblk_rb_ptr_wrap(rb, *pos, nr_entries);
*io_ret = NVM_IO_DONE;
if (bio->bi_opf & REQ_PREFLUSH) {
@@ -571,7 +595,7 @@ try:
/* Release flags on context. Protect from writes */
smp_store_release(&entry->w_ctx.flags, flags);
- pos = (pos + 1) & (rb->nr_entries - 1);
+ pos = pblk_rb_ptr_wrap(rb, pos, 1);
}
if (pad) {
@@ -651,7 +675,7 @@ out:
struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos)
{
- unsigned int entry = pos & (rb->nr_entries - 1);
+ unsigned int entry = pblk_rb_ptr_wrap(rb, pos, 0);
return &rb->entries[entry].w_ctx;
}
@@ -697,7 +721,7 @@ unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
}
}
- sync = (sync + nr_entries) & (rb->nr_entries - 1);
+ sync = pblk_rb_ptr_wrap(rb, sync, nr_entries);
/* Protect from counts */
smp_store_release(&rb->sync, sync);
@@ -728,32 +752,6 @@ unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
return (submitted < to_flush) ? (to_flush - submitted) : 0;
}
-/*
- * Scan from the current position of the sync pointer to find the entry that
- * corresponds to the given ppa. This is necessary since write requests can be
- * completed out of order. The assumption is that the ppa is close to the sync
- * pointer thus the search will not take long.
- *
- * The caller of this function must guarantee that the sync pointer will no
- * reach the entry while it is using the metadata associated with it. With this
- * assumption in mind, there is no need to take the sync lock.
- */
-struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
- struct ppa_addr *ppa)
-{
- unsigned int sync, subm, count;
- unsigned int i;
-
- sync = READ_ONCE(rb->sync);
- subm = READ_ONCE(rb->subm);
- count = pblk_rb_ring_count(subm, sync, rb->nr_entries);
-
- for (i = 0; i < count; i++)
- sync = (sync + 1) & (rb->nr_entries - 1);
-
- return NULL;
-}
-
int pblk_rb_tear_down_check(struct pblk_rb *rb)
{
struct pblk_rb_entry *entry;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 5a46d7f9302f..9fba614adeeb 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -43,7 +44,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
unsigned long *read_bitmap)
{
struct pblk_sec_meta *meta_list = rqd->meta_list;
- struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
+ struct ppa_addr ppas[NVM_MAX_VLBA];
int nr_secs = rqd->nr_ppas;
bool advanced_bio = false;
int i, j = 0;
@@ -93,9 +94,7 @@ next:
}
if (pblk_io_aligned(pblk, nr_secs))
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
- else
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+ rqd->is_seq = 1;
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(nr_secs, &pblk->inflight_reads);
@@ -118,10 +117,9 @@ static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
if (lba != blba + i) {
#ifdef CONFIG_NVM_PBLK_DEBUG
- struct ppa_addr *p;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
- p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
- print_ppa(pblk, p, "seq", i);
+ print_ppa(pblk, &ppa_list[i], "seq", i);
#endif
pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
lba, (u64)blba + i);
@@ -150,14 +148,12 @@ static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
if (lba != meta_lba) {
#ifdef CONFIG_NVM_PBLK_DEBUG
- struct ppa_addr *p;
- int nr_ppas = rqd->nr_ppas;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
- p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
- print_ppa(pblk, p, "seq", j);
+ print_ppa(pblk, &ppa_list[j], "rnd", j);
#endif
pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
- lba, meta_lba);
+ meta_lba, lba);
WARN_ON(1);
}
@@ -167,22 +163,6 @@ static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
}
-static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
-{
- struct ppa_addr *ppa_list;
- int i;
-
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-
- for (i = 0; i < rqd->nr_ppas; i++) {
- struct ppa_addr ppa = ppa_list[i];
- struct pblk_line *line;
-
- line = &pblk->lines[pblk_ppa_to_line(ppa)];
- kref_put(&line->ref, pblk_line_put_wq);
- }
-}
-
static void pblk_end_user_read(struct bio *bio)
{
#ifdef CONFIG_NVM_PBLK_DEBUG
@@ -210,7 +190,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
bio_put(int_bio);
if (put_line)
- pblk_read_put_rqd_kref(pblk, rqd);
+ pblk_rq_to_line_put(pblk, rqd);
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
@@ -270,9 +250,9 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
i = 0;
hole = find_first_zero_bit(read_bitmap, nr_secs);
do {
- int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
- struct pblk_line *line = &pblk->lines[line_id];
+ struct pblk_line *line;
+ line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
kref_put(&line->ref, pblk_line_put);
meta_list[hole].lba = lba_list_media[i];
@@ -344,7 +324,6 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
rqd->bio = new_bio;
rqd->nr_ppas = nr_holes;
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
pr_ctx->ppa_ptr = NULL;
pr_ctx->orig_bio = bio;
@@ -438,8 +417,6 @@ retry:
} else {
rqd->ppa_addr = ppa;
}
-
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
}
int pblk_submit_read(struct pblk *pblk, struct bio *bio)
@@ -454,13 +431,6 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
int ret = NVM_IO_ERR;
- /* logic error: lba out-of-bounds. Ignore read request */
- if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
- WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
- (unsigned long long)blba, nr_secs);
- return NVM_IO_ERR;
- }
-
generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
&pblk->disk->part0);
@@ -484,21 +454,13 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
*/
bio_init_idx = pblk_get_bi_idx(bio);
- rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd->dma_meta_list);
- if (!rqd->meta_list) {
- pblk_err(pblk, "not able to allocate ppa list\n");
+ if (pblk_alloc_rqd_meta(pblk, rqd))
goto fail_rqd_free;
- }
-
- if (nr_secs > 1) {
- rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
- rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
+ if (nr_secs > 1)
pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
- } else {
+ else
pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
- }
if (bitmap_full(read_bitmap, nr_secs)) {
atomic_inc(&pblk->inflight_io);
@@ -552,7 +514,7 @@ static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_line *line, u64 *lba_list,
u64 *paddr_list_gc, unsigned int nr_secs)
{
- struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
+ struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
struct ppa_addr ppa_gc;
int valid_secs = 0;
int i;
@@ -625,15 +587,11 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
memset(&rqd, 0, sizeof(struct nvm_rq));
- rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd.dma_meta_list);
- if (!rqd.meta_list)
- return -ENOMEM;
+ ret = pblk_alloc_rqd_meta(pblk, &rqd);
+ if (ret)
+ return ret;
if (gc_rq->nr_secs > 1) {
- rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
- rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
-
gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
gc_rq->lba_list,
gc_rq->paddr_list,
@@ -654,7 +612,8 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
pblk_err(pblk, "could not allocate GC bio (%lu)\n",
- PTR_ERR(bio));
+ PTR_ERR(bio));
+ ret = PTR_ERR(bio);
goto err_free_dma;
}
@@ -663,7 +622,6 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = gc_rq->secs_to_gc;
- rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
rqd.bio = bio;
if (pblk_submit_io_sync(pblk, &rqd)) {
@@ -690,12 +648,12 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
#endif
out:
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
+ pblk_free_rqd_meta(pblk, &rqd);
return ret;
err_free_bio:
bio_put(bio);
err_free_dma:
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
+ pblk_free_rqd_meta(pblk, &rqd);
return ret;
}
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index e232e47e1353..5740b7509bd8 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial: Javier Gonzalez <javier@cnexlabs.com>
@@ -15,6 +16,7 @@
*/
#include "pblk.h"
+#include "pblk-trace.h"
int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
{
@@ -85,15 +87,39 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
return 0;
}
-static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
+static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
+ u64 written_secs)
+{
+ int i;
+
+ for (i = 0; i < written_secs; i += pblk->min_write_pgs)
+ pblk_alloc_page(pblk, line, pblk->min_write_pgs);
+}
+
+static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
struct pblk_line_meta *lm = &pblk->lm;
int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
+ u64 written_secs = 0;
+ int valid_chunks = 0;
+ int i;
+
+ for (i = 0; i < lm->blk_per_line; i++) {
+ struct nvm_chk_meta *chunk = &line->chks[i];
+
+ if (chunk->state & NVM_CHK_ST_OFFLINE)
+ continue;
+
+ written_secs += chunk->wp;
+ valid_chunks++;
+ }
+
+ if (lm->blk_per_line - nr_bb != valid_chunks)
+ pblk_err(pblk, "recovery line %d is bad\n", line->id);
- return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
- nr_bb * geo->clba;
+ pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);
+
+ return written_secs;
}
struct pblk_recov_alloc {
@@ -105,115 +131,6 @@ struct pblk_recov_alloc {
dma_addr_t dma_meta_list;
};
-static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
- struct pblk_recov_alloc p, u64 r_ptr)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- struct ppa_addr *ppa_list;
- struct pblk_sec_meta *meta_list;
- struct nvm_rq *rqd;
- struct bio *bio;
- void *data;
- dma_addr_t dma_ppa_list, dma_meta_list;
- u64 r_ptr_int;
- int left_ppas;
- int rq_ppas, rq_len;
- int i, j;
- int ret = 0;
-
- ppa_list = p.ppa_list;
- meta_list = p.meta_list;
- rqd = p.rqd;
- data = p.data;
- dma_ppa_list = p.dma_ppa_list;
- dma_meta_list = p.dma_meta_list;
-
- left_ppas = line->cur_sec - r_ptr;
- if (!left_ppas)
- return 0;
-
- r_ptr_int = r_ptr;
-
-next_read_rq:
- memset(rqd, 0, pblk_g_rq_size);
-
- rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
- if (!rq_ppas)
- rq_ppas = pblk->min_write_pgs;
- rq_len = rq_ppas * geo->csecs;
-
- bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
-
- rqd->bio = bio;
- rqd->opcode = NVM_OP_PREAD;
- rqd->meta_list = meta_list;
- rqd->nr_ppas = rq_ppas;
- rqd->ppa_list = ppa_list;
- rqd->dma_ppa_list = dma_ppa_list;
- rqd->dma_meta_list = dma_meta_list;
-
- if (pblk_io_aligned(pblk, rq_ppas))
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
- else
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
-
- for (i = 0; i < rqd->nr_ppas; ) {
- struct ppa_addr ppa;
- int pos;
-
- ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_ppa_to_pos(geo, ppa);
-
- while (test_bit(pos, line->blk_bitmap)) {
- r_ptr_int += pblk->min_write_pgs;
- ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_ppa_to_pos(geo, ppa);
- }
-
- for (j = 0; j < pblk->min_write_pgs; j++, i++, r_ptr_int++)
- rqd->ppa_list[i] =
- addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- }
-
- /* If read fails, more padding is needed */
- ret = pblk_submit_io_sync(pblk, rqd);
- if (ret) {
- pblk_err(pblk, "I/O submission failed: %d\n", ret);
- return ret;
- }
-
- atomic_dec(&pblk->inflight_io);
-
- /* At this point, the read should not fail. If it does, it is a problem
- * we cannot recover from here. Need FTL log.
- */
- if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
- pblk_err(pblk, "L2P recovery failed (%d)\n", rqd->error);
- return -EINTR;
- }
-
- for (i = 0; i < rqd->nr_ppas; i++) {
- u64 lba = le64_to_cpu(meta_list[i].lba);
-
- if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
- continue;
-
- pblk_update_map(pblk, lba, rqd->ppa_list[i]);
- }
-
- left_ppas -= rq_ppas;
- if (left_ppas > 0)
- goto next_read_rq;
-
- return 0;
-}
-
static void pblk_recov_complete(struct kref *ref)
{
struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
@@ -223,10 +140,11 @@ static void pblk_recov_complete(struct kref *ref)
static void pblk_end_io_recov(struct nvm_rq *rqd)
{
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
struct pblk_pad_rq *pad_rq = rqd->private;
struct pblk *pblk = pad_rq->pblk;
- pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+ pblk_up_chunk(pblk, ppa_list[0]);
pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
@@ -234,18 +152,17 @@ static void pblk_end_io_recov(struct nvm_rq *rqd)
kref_put(&pad_rq->ref, pblk_recov_complete);
}
-static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
- int left_ppas)
+/* pad line using line bitmap. */
+static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
+ int left_ppas)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- struct ppa_addr *ppa_list;
struct pblk_sec_meta *meta_list;
struct pblk_pad_rq *pad_rq;
struct nvm_rq *rqd;
struct bio *bio;
void *data;
- dma_addr_t dma_ppa_list, dma_meta_list;
__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
u64 w_ptr = line->cur_sec;
int left_line_ppas, rq_ppas, rq_len;
@@ -279,20 +196,11 @@ next_pad_rq:
rq_len = rq_ppas * geo->csecs;
- meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
- if (!meta_list) {
- ret = -ENOMEM;
- goto fail_free_pad;
- }
-
- ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
- dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
-
bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- goto fail_free_meta;
+ goto fail_free_pad;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
@@ -300,17 +208,19 @@ next_pad_rq:
rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
+ ret = pblk_alloc_rqd_meta(pblk, rqd);
+ if (ret)
+ goto fail_free_rqd;
+
rqd->bio = bio;
rqd->opcode = NVM_OP_PWRITE;
- rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
- rqd->meta_list = meta_list;
+ rqd->is_seq = 1;
rqd->nr_ppas = rq_ppas;
- rqd->ppa_list = ppa_list;
- rqd->dma_ppa_list = dma_ppa_list;
- rqd->dma_meta_list = dma_meta_list;
rqd->end_io = pblk_end_io_recov;
rqd->private = pad_rq;
+ meta_list = rqd->meta_list;
+
for (i = 0; i < rqd->nr_ppas; ) {
struct ppa_addr ppa;
int pos;
@@ -338,13 +248,13 @@ next_pad_rq:
}
kref_get(&pad_rq->ref);
- pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+ pblk_down_chunk(pblk, rqd->ppa_list[0]);
ret = pblk_submit_io(pblk, rqd);
if (ret) {
pblk_err(pblk, "I/O submission failed: %d\n", ret);
- pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
- goto fail_free_bio;
+ pblk_up_chunk(pblk, rqd->ppa_list[0]);
+ goto fail_free_rqd;
}
left_line_ppas -= rq_ppas;
@@ -368,157 +278,60 @@ free_rq:
kfree(pad_rq);
return ret;
-fail_free_bio:
+fail_free_rqd:
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
bio_put(bio);
-fail_free_meta:
- nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
fail_free_pad:
kfree(pad_rq);
vfree(data);
return ret;
}
-/* When this function is called, it means that not all upper pages have been
- * written in a page that contains valid data. In order to recover this data, we
- * first find the write pointer on the device, then we pad all necessary
- * sectors, and finally attempt to read the valid data
- */
-static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
- struct pblk_recov_alloc p)
+static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- struct ppa_addr *ppa_list;
- struct pblk_sec_meta *meta_list;
- struct nvm_rq *rqd;
- struct bio *bio;
- void *data;
- dma_addr_t dma_ppa_list, dma_meta_list;
- u64 w_ptr = 0, r_ptr;
- int rq_ppas, rq_len;
- int i, j;
- int ret = 0;
- int rec_round;
- int left_ppas = pblk_calc_sec_in_line(pblk, line) - line->cur_sec;
-
- ppa_list = p.ppa_list;
- meta_list = p.meta_list;
- rqd = p.rqd;
- data = p.data;
- dma_ppa_list = p.dma_ppa_list;
- dma_meta_list = p.dma_meta_list;
-
- /* we could recover up until the line write pointer */
- r_ptr = line->cur_sec;
- rec_round = 0;
-
-next_rq:
- memset(rqd, 0, pblk_g_rq_size);
+ int distance = geo->mw_cunits * geo->all_luns * geo->ws_opt;
- rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
- if (!rq_ppas)
- rq_ppas = pblk->min_write_pgs;
- rq_len = rq_ppas * geo->csecs;
-
- bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ return (distance > line->left_msecs) ? line->left_msecs : distance;
+}
- rqd->bio = bio;
- rqd->opcode = NVM_OP_PREAD;
- rqd->meta_list = meta_list;
- rqd->nr_ppas = rq_ppas;
- rqd->ppa_list = ppa_list;
- rqd->dma_ppa_list = dma_ppa_list;
- rqd->dma_meta_list = dma_meta_list;
+static int pblk_line_wp_is_unbalanced(struct pblk *pblk,
+ struct pblk_line *line)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line_meta *lm = &pblk->lm;
+ struct pblk_lun *rlun;
+ struct nvm_chk_meta *chunk;
+ struct ppa_addr ppa;
+ u64 line_wp;
+ int pos, i;
- if (pblk_io_aligned(pblk, rq_ppas))
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
- else
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+ rlun = &pblk->luns[0];
+ ppa = rlun->bppa;
+ pos = pblk_ppa_to_pos(geo, ppa);
+ chunk = &line->chks[pos];
- for (i = 0; i < rqd->nr_ppas; ) {
- struct ppa_addr ppa;
- int pos;
+ line_wp = chunk->wp;
- w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
- ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
+ for (i = 1; i < lm->blk_per_line; i++) {
+ rlun = &pblk->luns[i];
+ ppa = rlun->bppa;
pos = pblk_ppa_to_pos(geo, ppa);
+ chunk = &line->chks[pos];
- while (test_bit(pos, line->blk_bitmap)) {
- w_ptr += pblk->min_write_pgs;
- ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
- pos = pblk_ppa_to_pos(geo, ppa);
- }
-
- for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++)
- rqd->ppa_list[i] =
- addr_to_gen_ppa(pblk, w_ptr, line->id);
- }
-
- ret = pblk_submit_io_sync(pblk, rqd);
- if (ret) {
- pblk_err(pblk, "I/O submission failed: %d\n", ret);
- return ret;
- }
-
- atomic_dec(&pblk->inflight_io);
-
- /* This should not happen since the read failed during normal recovery,
- * but the media works funny sometimes...
- */
- if (!rec_round++ && !rqd->error) {
- rec_round = 0;
- for (i = 0; i < rqd->nr_ppas; i++, r_ptr++) {
- u64 lba = le64_to_cpu(meta_list[i].lba);
-
- if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
- continue;
-
- pblk_update_map(pblk, lba, rqd->ppa_list[i]);
- }
- }
-
- /* Reached the end of the written line */
- if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
- int pad_secs, nr_error_bits, bit;
- int ret;
-
- bit = find_first_bit((void *)&rqd->ppa_status, rqd->nr_ppas);
- nr_error_bits = rqd->nr_ppas - bit;
-
- /* Roll back failed sectors */
- line->cur_sec -= nr_error_bits;
- line->left_msecs += nr_error_bits;
- bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
-
- pad_secs = pblk_pad_distance(pblk);
- if (pad_secs > line->left_msecs)
- pad_secs = line->left_msecs;
-
- ret = pblk_recov_pad_oob(pblk, line, pad_secs);
- if (ret)
- pblk_err(pblk, "OOB padding failed (err:%d)\n", ret);
-
- ret = pblk_recov_read_oob(pblk, line, p, r_ptr);
- if (ret)
- pblk_err(pblk, "OOB read failed (err:%d)\n", ret);
-
- left_ppas = 0;
+ if (chunk->wp > line_wp)
+ return 1;
+ else if (chunk->wp < line_wp)
+ line_wp = chunk->wp;
}
- left_ppas -= rq_ppas;
- if (left_ppas > 0)
- goto next_rq;
-
- return ret;
+ return 0;
}
static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
- struct pblk_recov_alloc p, int *done)
+ struct pblk_recov_alloc p)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
@@ -528,11 +341,16 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
struct bio *bio;
void *data;
dma_addr_t dma_ppa_list, dma_meta_list;
- u64 paddr;
+ __le64 *lba_list;
+ u64 paddr = 0;
+ bool padded = false;
int rq_ppas, rq_len;
int i, j;
- int ret = 0;
- int left_ppas = pblk_calc_sec_in_line(pblk, line);
+ int ret;
+ u64 left_ppas = pblk_sec_in_open_line(pblk, line);
+
+ if (pblk_line_wp_is_unbalanced(pblk, line))
+ pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
ppa_list = p.ppa_list;
meta_list = p.meta_list;
@@ -541,7 +359,7 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
dma_ppa_list = p.dma_ppa_list;
dma_meta_list = p.dma_meta_list;
- *done = 1;
+ lba_list = emeta_to_lbas(pblk, line->emeta->buf);
next_rq:
memset(rqd, 0, pblk_g_rq_size);
@@ -567,15 +385,13 @@ next_rq:
rqd->dma_meta_list = dma_meta_list;
if (pblk_io_aligned(pblk, rq_ppas))
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
- else
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+ rqd->is_seq = 1;
+retry_rq:
for (i = 0; i < rqd->nr_ppas; ) {
struct ppa_addr ppa;
int pos;
- paddr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
ppa = addr_to_gen_ppa(pblk, paddr, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
@@ -585,9 +401,9 @@ next_rq:
pos = pblk_ppa_to_pos(geo, ppa);
}
- for (j = 0; j < pblk->min_write_pgs; j++, i++, paddr++)
+ for (j = 0; j < pblk->min_write_pgs; j++, i++)
rqd->ppa_list[i] =
- addr_to_gen_ppa(pblk, paddr, line->id);
+ addr_to_gen_ppa(pblk, paddr + j, line->id);
}
ret = pblk_submit_io_sync(pblk, rqd);
@@ -599,31 +415,33 @@ next_rq:
atomic_dec(&pblk->inflight_io);
- /* Reached the end of the written line */
+ /* If a read fails, do a best effort by padding the line and retrying */
if (rqd->error) {
- int nr_error_bits, bit;
+ int pad_distance, ret;
- bit = find_first_bit((void *)&rqd->ppa_status, rqd->nr_ppas);
- nr_error_bits = rqd->nr_ppas - bit;
-
- /* Roll back failed sectors */
- line->cur_sec -= nr_error_bits;
- line->left_msecs += nr_error_bits;
- bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
+ if (padded) {
+ pblk_log_read_err(pblk, rqd);
+ return -EINTR;
+ }
- left_ppas = 0;
- rqd->nr_ppas = bit;
+ pad_distance = pblk_pad_distance(pblk, line);
+ ret = pblk_recov_pad_line(pblk, line, pad_distance);
+ if (ret)
+ return ret;
- if (rqd->error != NVM_RSP_ERR_EMPTYPAGE)
- *done = 0;
+ padded = true;
+ goto retry_rq;
}
for (i = 0; i < rqd->nr_ppas; i++) {
u64 lba = le64_to_cpu(meta_list[i].lba);
+ lba_list[paddr++] = cpu_to_le64(lba);
+
if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
continue;
+ line->nr_valid_lbas++;
pblk_update_map(pblk, lba, rqd->ppa_list[i]);
}
@@ -631,7 +449,11 @@ next_rq:
if (left_ppas > 0)
goto next_rq;
- return ret;
+#ifdef CONFIG_NVM_PBLK_DEBUG
+ WARN_ON(padded && !pblk_line_is_full(line));
+#endif
+
+ return 0;
}
/* Scan line for lbas on out of bound area */
@@ -645,7 +467,7 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
struct pblk_recov_alloc p;
void *data;
dma_addr_t dma_ppa_list, dma_meta_list;
- int done, ret = 0;
+ int ret = 0;
meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
if (!meta_list)
@@ -660,7 +482,8 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
goto free_meta_list;
}
- rqd = pblk_alloc_rqd(pblk, PBLK_READ);
+ rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
+ memset(rqd, 0, pblk_g_rq_size);
p.ppa_list = ppa_list;
p.meta_list = meta_list;
@@ -669,24 +492,17 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
p.dma_ppa_list = dma_ppa_list;
p.dma_meta_list = dma_meta_list;
- ret = pblk_recov_scan_oob(pblk, line, p, &done);
+ ret = pblk_recov_scan_oob(pblk, line, p);
if (ret) {
- pblk_err(pblk, "could not recover L2P from OOB\n");
+ pblk_err(pblk, "could not recover L2P form OOB\n");
goto out;
}
- if (!done) {
- ret = pblk_recov_scan_all_oob(pblk, line, p);
- if (ret) {
- pblk_err(pblk, "could not recover L2P from OOB\n");
- goto out;
- }
- }
-
if (pblk_line_is_full(line))
pblk_line_recov_close(pblk, line);
out:
+ mempool_free(rqd, &pblk->r_rq_pool);
kfree(data);
free_meta_list:
nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
@@ -775,7 +591,7 @@ static void pblk_recov_wa_counters(struct pblk *pblk,
}
static int pblk_line_was_written(struct pblk_line *line,
- struct pblk *pblk)
+ struct pblk *pblk)
{
struct pblk_line_meta *lm = &pblk->lm;
@@ -801,6 +617,18 @@ static int pblk_line_was_written(struct pblk_line *line,
return 1;
}
+static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
+{
+ struct pblk_line_meta *lm = &pblk->lm;
+ int i;
+
+ for (i = 0; i < lm->blk_per_line; i++)
+ if (line->chks[i].state & NVM_CHK_ST_OPEN)
+ return true;
+
+ return false;
+}
+
struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
{
struct pblk_line_meta *lm = &pblk->lm;
@@ -841,7 +669,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
continue;
/* Lines that cannot be read are assumed as not written here */
- if (pblk_line_read_smeta(pblk, line))
+ if (pblk_line_smeta_read(pblk, line))
continue;
crc = pblk_calc_smeta_crc(pblk, smeta_buf);
@@ -911,7 +739,12 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
line->emeta = emeta;
memset(line->emeta->buf, 0, lm->emeta_len[0]);
- if (pblk_line_read_emeta(pblk, line, line->emeta->buf)) {
+ if (pblk_line_is_open(pblk, line)) {
+ pblk_recov_l2p_from_oob(pblk, line);
+ goto next;
+ }
+
+ if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) {
pblk_recov_l2p_from_oob(pblk, line);
goto next;
}
@@ -935,6 +768,8 @@ next:
spin_lock(&line->lock);
line->state = PBLK_LINESTATE_CLOSED;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
move_list = pblk_line_gc_list(pblk, line);
spin_unlock(&line->lock);
@@ -942,26 +777,36 @@ next:
list_move_tail(&line->list, move_list);
spin_unlock(&l_mg->gc_lock);
- kfree(line->map_bitmap);
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
line->map_bitmap = NULL;
line->smeta = NULL;
line->emeta = NULL;
} else {
- if (open_lines > 1)
- pblk_err(pblk, "failed to recover L2P\n");
+ spin_lock(&line->lock);
+ line->state = PBLK_LINESTATE_OPEN;
+ spin_unlock(&line->lock);
+
+ line->emeta->mem = 0;
+ atomic_set(&line->emeta->sync, 0);
+
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
- open_lines++;
- line->meta_line = meta_line;
data_line = line;
+ line->meta_line = meta_line;
+
+ open_lines++;
}
}
- spin_lock(&l_mg->free_lock);
if (!open_lines) {
+ spin_lock(&l_mg->free_lock);
WARN_ON_ONCE(!test_and_clear_bit(meta_line,
&l_mg->meta_bitmap));
+ spin_unlock(&l_mg->free_lock);
pblk_line_replace_data(pblk);
} else {
+ spin_lock(&l_mg->free_lock);
/* Allocate next line for preparation */
l_mg->data_next = pblk_line_get(pblk);
if (l_mg->data_next) {
@@ -969,8 +814,8 @@ next:
l_mg->data_next->type = PBLK_LINETYPE_DATA;
is_next = 1;
}
+ spin_unlock(&l_mg->free_lock);
}
- spin_unlock(&l_mg->free_lock);
if (is_next)
pblk_line_erase(pblk, l_mg->data_next);
@@ -998,7 +843,7 @@ int pblk_recov_pad(struct pblk *pblk)
left_msecs = line->left_msecs;
spin_unlock(&l_mg->free_lock);
- ret = pblk_recov_pad_oob(pblk, line, left_msecs);
+ ret = pblk_recov_pad_line(pblk, line, left_msecs);
if (ret) {
pblk_err(pblk, "tear down padding failed (%d)\n", ret);
return ret;
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index 6a0616a6fcaf..db55a1c89997 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -127,7 +128,7 @@ static void __pblk_rl_update_rates(struct pblk_rl *rl,
} else if (free_blocks < rl->high) {
int shift = rl->high_pw - rl->rb_windows_pw;
int user_windows = free_blocks >> shift;
- int user_max = user_windows << PBLK_MAX_REQ_ADDRS_PW;
+ int user_max = user_windows << ilog2(NVM_MAX_VLBA);
rl->rb_user_max = user_max;
rl->rb_gc_max = max - user_max;
@@ -228,7 +229,7 @@ void pblk_rl_init(struct pblk_rl *rl, int budget)
rl->rsv_blocks = min_blocks;
/* This will always be a power-of-2 */
- rb_windows = budget / PBLK_MAX_REQ_ADDRS;
+ rb_windows = budget / NVM_MAX_VLBA;
rl->rb_windows_pw = get_count_order(rb_windows);
/* To start with, all buffer is available to user I/O writers */
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index 9fc3dfa168b4..2d2818155aa8 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -262,8 +263,14 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
sec_in_line = l_mg->data_line->sec_in_line;
meta_weight = bitmap_weight(&l_mg->meta_bitmap,
PBLK_DATA_LINES);
- map_weight = bitmap_weight(l_mg->data_line->map_bitmap,
+
+ spin_lock(&l_mg->data_line->lock);
+ if (l_mg->data_line->map_bitmap)
+ map_weight = bitmap_weight(l_mg->data_line->map_bitmap,
lm->sec_per_line);
+ else
+ map_weight = 0;
+ spin_unlock(&l_mg->data_line->lock);
}
spin_unlock(&l_mg->free_lock);
@@ -337,7 +344,6 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
{
int sz;
-
sz = snprintf(page, PAGE_SIZE,
"user:%lld gc:%lld pad:%lld WA:",
user, gc, pad);
@@ -349,7 +355,7 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
u32 wa_frac;
wa_int = (user + gc + pad) * 100000;
- wa_int = div_u64(wa_int, user);
+ wa_int = div64_u64(wa_int, user);
wa_int = div_u64_rem(wa_int, 100000, &wa_frac);
sz += snprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n",
diff --git a/drivers/lightnvm/pblk-trace.h b/drivers/lightnvm/pblk-trace.h
new file mode 100644
index 000000000000..679e5c458ca6
--- /dev/null
+++ b/drivers/lightnvm/pblk-trace.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pblk
+
+#if !defined(_TRACE_PBLK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PBLK_H
+
+#include <linux/tracepoint.h>
+
+struct ppa_addr;
+
+#define show_chunk_flags(state) __print_flags(state, "", \
+ { NVM_CHK_ST_FREE, "FREE", }, \
+ { NVM_CHK_ST_CLOSED, "CLOSED", }, \
+ { NVM_CHK_ST_OPEN, "OPEN", }, \
+ { NVM_CHK_ST_OFFLINE, "OFFLINE", })
+
+#define show_line_state(state) __print_symbolic(state, \
+ { PBLK_LINESTATE_NEW, "NEW", }, \
+ { PBLK_LINESTATE_FREE, "FREE", }, \
+ { PBLK_LINESTATE_OPEN, "OPEN", }, \
+ { PBLK_LINESTATE_CLOSED, "CLOSED", }, \
+ { PBLK_LINESTATE_GC, "GC", }, \
+ { PBLK_LINESTATE_BAD, "BAD", }, \
+ { PBLK_LINESTATE_CORRUPT, "CORRUPT" })
+
+
+#define show_pblk_state(state) __print_symbolic(state, \
+ { PBLK_STATE_RUNNING, "RUNNING", }, \
+ { PBLK_STATE_STOPPING, "STOPPING", }, \
+ { PBLK_STATE_RECOVERING, "RECOVERING", }, \
+ { PBLK_STATE_STOPPED, "STOPPED" })
+
+#define show_chunk_erase_state(state) __print_symbolic(state, \
+ { PBLK_CHUNK_RESET_START, "START", }, \
+ { PBLK_CHUNK_RESET_DONE, "OK", }, \
+ { PBLK_CHUNK_RESET_FAILED, "FAILED" })
+
+
+TRACE_EVENT(pblk_chunk_reset,
+
+ TP_PROTO(const char *name, struct ppa_addr *ppa, int state),
+
+ TP_ARGS(name, ppa, state),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(u64, ppa)
+ __field(int, state);
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->ppa = ppa->ppa;
+ __entry->state = state;
+ ),
+
+ TP_printk("dev=%s grp=%llu pu=%llu chk=%llu state=%s", __get_str(name),
+ (u64)(((struct ppa_addr *)(&__entry->ppa))->m.grp),
+ (u64)(((struct ppa_addr *)(&__entry->ppa))->m.pu),
+ (u64)(((struct ppa_addr *)(&__entry->ppa))->m.chk),
+ show_chunk_erase_state((int)__entry->state))
+
+);
+
+TRACE_EVENT(pblk_chunk_state,
+
+ TP_PROTO(const char *name, struct ppa_addr *ppa, int state),
+
+ TP_ARGS(name, ppa, state),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(u64, ppa)
+ __field(int, state);
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->ppa = ppa->ppa;
+ __entry->state = state;
+ ),
+
+ TP_printk("dev=%s grp=%llu pu=%llu chk=%llu state=%s", __get_str(name),
+ (u64)(((struct ppa_addr *)(&__entry->ppa))->m.grp),
+ (u64)(((struct ppa_addr *)(&__entry->ppa))->m.pu),
+ (u64)(((struct ppa_addr *)(&__entry->ppa))->m.chk),
+ show_chunk_flags((int)__entry->state))
+
+);
+
+TRACE_EVENT(pblk_line_state,
+
+ TP_PROTO(const char *name, int line, int state),
+
+ TP_ARGS(name, line, state),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(int, line)
+ __field(int, state);
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->line = line;
+ __entry->state = state;
+ ),
+
+ TP_printk("dev=%s line=%d state=%s", __get_str(name),
+ (int)__entry->line,
+ show_line_state((int)__entry->state))
+
+);
+
+TRACE_EVENT(pblk_state,
+
+ TP_PROTO(const char *name, int state),
+
+ TP_ARGS(name, state),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(int, state);
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->state = state;
+ ),
+
+ TP_printk("dev=%s state=%s", __get_str(name),
+ show_pblk_state((int)__entry->state))
+
+);
+
+#endif /* !defined(_TRACE_PBLK_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../../drivers/lightnvm
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE pblk-trace
+#include <trace/define_trace.h>
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index ee774a86cf1e..fa8726493b39 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -16,6 +17,7 @@
*/
#include "pblk.h"
+#include "pblk-trace.h"
static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx)
@@ -81,8 +83,7 @@ static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
#endif
-
- pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
+ pblk_up_rq(pblk, c_ctx->lun_bitmap);
pos = pblk_rb_sync_init(&pblk->rwb, &flags);
if (pos == c_ctx->sentry) {
@@ -106,14 +107,12 @@ retry:
/* Map remaining sectors in chunk, starting from ppa */
static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
struct pblk_line *line;
struct ppa_addr map_ppa = *ppa;
u64 paddr;
int done = 0;
- line = &pblk->lines[pblk_ppa_to_line(*ppa)];
+ line = pblk_ppa_to_line(pblk, *ppa);
spin_lock(&line->lock);
while (!done) {
@@ -125,15 +124,7 @@ static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
if (!test_and_set_bit(paddr, line->invalid_bitmap))
le32_add_cpu(line->vsc, -1);
- if (geo->version == NVM_OCSSD_SPEC_12) {
- map_ppa.ppa++;
- if (map_ppa.g.pg == geo->num_pg)
- done = 1;
- } else {
- map_ppa.m.sec++;
- if (map_ppa.m.sec == geo->clba)
- done = 1;
- }
+ done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
}
line->w_err_gc->has_write_err = 1;
@@ -149,12 +140,11 @@ static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
struct pblk_w_ctx *w_ctx;
struct ppa_addr ppa_l2p;
int flags;
- unsigned int pos, i;
+ unsigned int i;
spin_lock(&pblk->trans_lock);
- pos = sentry;
for (i = 0; i < nr_entries; i++) {
- entry = &rb->entries[pos];
+ entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
w_ctx = &entry->w_ctx;
/* Check if the lba has been overwritten */
@@ -168,13 +158,11 @@ static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
/* Release flags on write context. Protect from writes */
smp_store_release(&w_ctx->flags, flags);
- /* Decrese the reference count to the line as we will
+ /* Decrease the reference count to the line as we will
* re-map these entries
*/
- line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
+ line = pblk_ppa_to_line(pblk, w_ctx->ppa);
kref_put(&line->ref, pblk_line_put);
-
- pos = (pos + 1) & (rb->nr_entries - 1);
}
spin_unlock(&pblk->trans_lock);
}
@@ -208,19 +196,14 @@ static void pblk_submit_rec(struct work_struct *work)
struct pblk *pblk = recovery->pblk;
struct nvm_rq *rqd = recovery->rqd;
struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
- struct ppa_addr *ppa_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
pblk_log_write_err(pblk, rqd);
- if (rqd->nr_ppas == 1)
- ppa_list = &rqd->ppa_addr;
- else
- ppa_list = rqd->ppa_list;
-
pblk_map_remaining(pblk, ppa_list);
pblk_queue_resubmit(pblk, c_ctx);
- pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
+ pblk_up_rq(pblk, c_ctx->lun_bitmap);
if (c_ctx->nr_padded)
pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
c_ctx->nr_padded);
@@ -257,11 +240,13 @@ static void pblk_end_io_write(struct nvm_rq *rqd)
if (rqd->error) {
pblk_end_w_fail(pblk, rqd);
return;
- }
+ } else {
+ if (trace_pblk_chunk_state_enabled())
+ pblk_check_chunk_state_update(pblk, rqd);
#ifdef CONFIG_NVM_PBLK_DEBUG
- else
WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
#endif
+ }
pblk_complete_write(pblk, rqd, c_ctx);
atomic_dec(&pblk->inflight_io);
@@ -273,14 +258,18 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
struct pblk_line *line = m_ctx->private;
struct pblk_emeta *emeta = line->emeta;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
int sync;
- pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+ pblk_up_chunk(pblk, ppa_list[0]);
if (rqd->error) {
pblk_log_write_err(pblk, rqd);
pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
line->w_err_gc->has_write_err = 1;
+ } else {
+ if (trace_pblk_chunk_state_enabled())
+ pblk_check_chunk_state_update(pblk, rqd);
}
sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
@@ -294,27 +283,16 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
}
static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int nr_secs,
- nvm_end_io_fn(*end_io))
+ unsigned int nr_secs, nvm_end_io_fn(*end_io))
{
- struct nvm_tgt_dev *dev = pblk->dev;
-
/* Setup write request */
rqd->opcode = NVM_OP_PWRITE;
rqd->nr_ppas = nr_secs;
- rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
+ rqd->is_seq = 1;
rqd->private = pblk;
rqd->end_io = end_io;
- rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd->dma_meta_list);
- if (!rqd->meta_list)
- return -ENOMEM;
-
- rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
- rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
-
- return 0;
+ return pblk_alloc_rqd_meta(pblk, rqd);
}
static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -375,6 +353,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_emeta *emeta = meta_line->emeta;
+ struct ppa_addr *ppa_list;
struct pblk_g_ctx *m_ctx;
struct bio *bio;
struct nvm_rq *rqd;
@@ -409,22 +388,22 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
if (ret)
goto fail_free_bio;
+ ppa_list = nvm_rq_to_ppa_list(rqd);
for (i = 0; i < rqd->nr_ppas; ) {
spin_lock(&meta_line->lock);
paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
spin_unlock(&meta_line->lock);
for (j = 0; j < rq_ppas; j++, i++, paddr++)
- rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
}
+ spin_lock(&l_mg->close_lock);
emeta->mem += rq_len;
- if (emeta->mem >= lm->emeta_len[0]) {
- spin_lock(&l_mg->close_lock);
+ if (emeta->mem >= lm->emeta_len[0])
list_del(&meta_line->list);
- spin_unlock(&l_mg->close_lock);
- }
+ spin_unlock(&l_mg->close_lock);
- pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+ pblk_down_chunk(pblk, ppa_list[0]);
ret = pblk_submit_io(pblk, rqd);
if (ret) {
@@ -435,7 +414,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
return NVM_IO_OK;
fail_rollback:
- pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+ pblk_up_chunk(pblk, ppa_list[0]);
spin_lock(&l_mg->close_lock);
pblk_dealloc_page(pblk, meta_line, rq_ppas);
list_add(&meta_line->list, &meta_line->list);
@@ -491,14 +470,15 @@ static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
struct pblk_line *meta_line;
spin_lock(&l_mg->close_lock);
-retry:
if (list_empty(&l_mg->emeta_list)) {
spin_unlock(&l_mg->close_lock);
return NULL;
}
meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
- if (meta_line->emeta->mem >= lm->emeta_len[0])
- goto retry;
+ if (meta_line->emeta->mem >= lm->emeta_len[0]) {
+ spin_unlock(&l_mg->close_lock);
+ return NULL;
+ }
spin_unlock(&l_mg->close_lock);
if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 4760af7b6499..02bb2e98f8a9 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
* Copyright (C) 2016 CNEX Labs
@@ -37,8 +38,6 @@
#define PBLK_SECTOR (512)
#define PBLK_EXPOSED_PAGE_SIZE (4096)
-#define PBLK_MAX_REQ_ADDRS (64)
-#define PBLK_MAX_REQ_ADDRS_PW (6)
#define PBLK_NR_CLOSE_JOBS (4)
@@ -81,6 +80,12 @@ enum {
PBLK_BLK_ST_CLOSED = 0x2,
};
+enum {
+ PBLK_CHUNK_RESET_START,
+ PBLK_CHUNK_RESET_DONE,
+ PBLK_CHUNK_RESET_FAILED,
+};
+
struct pblk_sec_meta {
u64 reserved;
__le64 lba;
@@ -99,8 +104,8 @@ enum {
PBLK_RL_LOW = 4
};
-#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
-#define pblk_dma_ppa_size (sizeof(u64) * PBLK_MAX_REQ_ADDRS)
+#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * NVM_MAX_VLBA)
+#define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA)
/* write buffer completion context */
struct pblk_c_ctx {
@@ -198,6 +203,11 @@ struct pblk_rb {
* will be 4KB
*/
+ unsigned int back_thres; /* Threshold that shall be maintained by
+ * the backpointer in order to respect
+ * geo->mw_cunits on a per chunk basis
+ */
+
struct list_head pages; /* List of data pages */
spinlock_t w_lock; /* Write lock */
@@ -218,8 +228,8 @@ struct pblk_lun {
struct pblk_gc_rq {
struct pblk_line *line;
void *data;
- u64 paddr_list[PBLK_MAX_REQ_ADDRS];
- u64 lba_list[PBLK_MAX_REQ_ADDRS];
+ u64 paddr_list[NVM_MAX_VLBA];
+ u64 lba_list[NVM_MAX_VLBA];
int nr_secs;
int secs_to_gc;
struct list_head list;
@@ -532,6 +542,10 @@ struct pblk_line_mgmt {
struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
unsigned long meta_bitmap;
+ /* Cache and mempool for map/invalid bitmaps */
+ struct kmem_cache *bitmap_cache;
+ mempool_t *bitmap_pool;
+
/* Helpers for fast bitmap calculations */
unsigned long *bb_template;
unsigned long *bb_aux;
@@ -725,10 +739,8 @@ struct pblk_line_ws {
/*
* pblk ring buffer operations
*/
-int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
- unsigned int power_size, unsigned int power_seg_sz);
-unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
-void *pblk_rb_entries_ref(struct pblk_rb *rb);
+int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
+ unsigned int seg_sz);
int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
unsigned int nr_entries, unsigned int *pos);
int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
@@ -751,8 +763,8 @@ unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
-struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
- struct ppa_addr *ppa);
+unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
+ unsigned int nr_entries);
void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
@@ -762,7 +774,7 @@ unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
int pblk_rb_tear_down_check(struct pblk_rb *rb);
int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
-void pblk_rb_data_free(struct pblk_rb *rb);
+void pblk_rb_free(struct pblk_rb *rb);
ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
/*
@@ -770,11 +782,13 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
*/
struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
+int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
+void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx);
void pblk_discard(struct pblk *pblk, struct bio *bio);
-struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk);
+struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk);
struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
struct nvm_chk_meta *lp,
struct ppa_addr ppa);
@@ -782,13 +796,17 @@ void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
+int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
+void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd);
struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
unsigned int nr_secs, unsigned int len,
int alloc_type, gfp_t gfp_mask);
struct pblk_line *pblk_line_get(struct pblk *pblk);
struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
+void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa);
+void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
struct pblk_line *pblk_line_get_data(struct pblk *pblk);
@@ -806,8 +824,8 @@ void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
void (*work)(struct work_struct *), gfp_t gfp_mask,
struct workqueue_struct *wq);
u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
-int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
-int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
+int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line);
+int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
void *emeta_buf);
int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
void pblk_line_put(struct kref *ref);
@@ -819,12 +837,11 @@ u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
unsigned long secs_to_flush);
-void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
-void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
+void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
unsigned long *lun_bitmap);
-void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
-void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
- unsigned long *lun_bitmap);
+void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa);
+void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa);
+void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap);
int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
int nr_pages);
void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
@@ -976,17 +993,15 @@ static inline int pblk_line_vsc(struct pblk_line *line)
return le32_to_cpu(*line->vsc);
}
-static inline int pblk_pad_distance(struct pblk *pblk)
+static inline int pblk_ppa_to_line_id(struct ppa_addr p)
{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
-
- return geo->mw_cunits * geo->all_luns * geo->ws_opt;
+ return p.a.blk;
}
-static inline int pblk_ppa_to_line(struct ppa_addr p)
+static inline struct pblk_line *pblk_ppa_to_line(struct pblk *pblk,
+ struct ppa_addr p)
{
- return p.a.blk;
+ return &pblk->lines[pblk_ppa_to_line_id(p)];
}
static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
@@ -1034,6 +1049,25 @@ static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
return ppa;
}
+static inline struct nvm_chk_meta *pblk_dev_ppa_to_chunk(struct pblk *pblk,
+ struct ppa_addr p)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line *line = pblk_ppa_to_line(pblk, p);
+ int pos = pblk_ppa_to_pos(geo, p);
+
+ return &line->chks[pos];
+}
+
+static inline u64 pblk_dev_ppa_to_chunk_addr(struct pblk *pblk,
+ struct ppa_addr p)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+
+ return dev_to_chunk_addr(dev->parent, &pblk->addrf, p);
+}
+
static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
struct ppa_addr p)
{
@@ -1067,86 +1101,16 @@ static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
{
- struct ppa_addr ppa64;
-
- ppa64.ppa = 0;
-
- if (ppa32 == -1) {
- ppa64.ppa = ADDR_EMPTY;
- } else if (ppa32 & (1U << 31)) {
- ppa64.c.line = ppa32 & ((~0U) >> 1);
- ppa64.c.is_cached = 1;
- } else {
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf =
- (struct nvm_addrf_12 *)&pblk->addrf;
-
- ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
- ppaf->ch_offset;
- ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
- ppaf->lun_offset;
- ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
- ppaf->blk_offset;
- ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
- ppaf->pg_offset;
- ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
- ppaf->pln_offset;
- ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
- ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = &pblk->addrf;
-
- ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
- lbaf->ch_offset;
- ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
- lbaf->lun_offset;
- ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
- lbaf->chk_offset;
- ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
- lbaf->sec_offset;
- }
- }
+ struct nvm_tgt_dev *dev = pblk->dev;
- return ppa64;
+ return nvm_ppa32_to_ppa64(dev->parent, &pblk->addrf, ppa32);
}
static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
{
- u32 ppa32 = 0;
-
- if (ppa64.ppa == ADDR_EMPTY) {
- ppa32 = ~0U;
- } else if (ppa64.c.is_cached) {
- ppa32 |= ppa64.c.line;
- ppa32 |= 1U << 31;
- } else {
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf =
- (struct nvm_addrf_12 *)&pblk->addrf;
-
- ppa32 |= ppa64.g.ch << ppaf->ch_offset;
- ppa32 |= ppa64.g.lun << ppaf->lun_offset;
- ppa32 |= ppa64.g.blk << ppaf->blk_offset;
- ppa32 |= ppa64.g.pg << ppaf->pg_offset;
- ppa32 |= ppa64.g.pl << ppaf->pln_offset;
- ppa32 |= ppa64.g.sec << ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = &pblk->addrf;
-
- ppa32 |= ppa64.m.grp << lbaf->ch_offset;
- ppa32 |= ppa64.m.pu << lbaf->lun_offset;
- ppa32 |= ppa64.m.chk << lbaf->chk_offset;
- ppa32 |= ppa64.m.sec << lbaf->sec_offset;
- }
- }
+ struct nvm_tgt_dev *dev = pblk->dev;
- return ppa32;
+ return nvm_ppa64_to_ppa32(dev->parent, &pblk->addrf, ppa64);
}
static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
@@ -1255,44 +1219,6 @@ static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
return crc;
}
-static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- int flags;
-
- if (geo->version == NVM_OCSSD_SPEC_20)
- return 0;
-
- flags = geo->pln_mode >> 1;
-
- if (type == PBLK_WRITE)
- flags |= NVM_IO_SCRAMBLE_ENABLE;
-
- return flags;
-}
-
-enum {
- PBLK_READ_RANDOM = 0,
- PBLK_READ_SEQUENTIAL = 1,
-};
-
-static inline int pblk_set_read_mode(struct pblk *pblk, int type)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- int flags;
-
- if (geo->version == NVM_OCSSD_SPEC_20)
- return 0;
-
- flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
- if (type == PBLK_READ_SEQUENTIAL)
- flags |= geo->pln_mode >> 1;
-
- return flags;
-}
-
static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
{
return !(nr_secs % pblk->min_write_pgs);
@@ -1375,9 +1301,7 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
{
struct nvm_tgt_dev *dev = pblk->dev;
- struct ppa_addr *ppa_list;
-
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
WARN_ON(1);
@@ -1386,12 +1310,10 @@ static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
if (rqd->opcode == NVM_OP_PWRITE) {
struct pblk_line *line;
- struct ppa_addr ppa;
int i;
for (i = 0; i < rqd->nr_ppas; i++) {
- ppa = ppa_list[i];
- line = &pblk->lines[pblk_ppa_to_line(ppa)];
+ line = pblk_ppa_to_line(pblk, ppa_list[i]);
spin_lock(&line->lock);
if (line->state != PBLK_LINESTATE_OPEN) {
@@ -1441,4 +1363,11 @@ static inline void pblk_setup_uuid(struct pblk *pblk)
uuid_le_gen(&uuid);
memcpy(pblk->instance_uuid, uuid.b, 16);
}
+
+static inline char *pblk_disk_name(struct pblk *pblk)
+{
+ struct gendisk *disk = pblk->disk;
+
+ return disk->disk_name;
+}
#endif /* PBLK_H_ */
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 311e91b1a14f..256f18b67e8a 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -461,8 +461,11 @@ static int __init acpi_pcc_probe(void)
count = acpi_table_parse_entries_array(ACPI_SIG_PCCT,
sizeof(struct acpi_table_pcct), proc,
ACPI_PCCT_TYPE_RESERVED, MAX_PCC_SUBSPACES);
- if (count == 0 || count > MAX_PCC_SUBSPACES) {
- pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
+ if (count <= 0 || count > MAX_PCC_SUBSPACES) {
+ if (count < 0)
+ pr_warn("Error parsing PCC subspaces from PCCT\n");
+ else
+ pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
return -EINVAL;
}
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 7a28232d868b..5002838ea476 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -484,7 +484,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
int i;
lockdep_assert_held(&c->bucket_lock);
- BUG_ON(!n || n > c->caches_loaded || n > 8);
+ BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
bkey_init(k);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 83504dd8100a..b61b83bbcfff 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca);
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
extern struct workqueue_struct *bcache_wq;
+extern struct workqueue_struct *bch_journal_wq;
extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets;
@@ -1003,7 +1004,7 @@ void bch_open_buckets_free(struct cache_set *c);
int bch_cache_allocator_start(struct cache *ca);
void bch_debug_exit(void);
-void bch_debug_init(struct kobject *kobj);
+void bch_debug_init(void);
void bch_request_exit(void);
int bch_request_init(void);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index e7d4817681f2..3f4211b5cd33 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2434,7 +2434,7 @@ static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
struct keybuf *buf = refill->buf;
int ret = MAP_CONTINUE;
- if (bkey_cmp(k, refill->end) >= 0) {
+ if (bkey_cmp(k, refill->end) > 0) {
ret = MAP_DONE;
goto out;
}
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index eca0d496b686..c88cdc4ae4ec 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -345,7 +345,8 @@ do { \
} while (0)
/**
- * closure_return - finish execution of a closure, with destructor
+ * closure_return_with_destructor - finish execution of a closure,
+ * with destructor
*
* Works like closure_return(), except @destructor will be called when all
* outstanding refs on @cl have been dropped; @destructor may be used to safely
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 06da66b2488a..8f448b9c96a1 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -253,7 +253,7 @@ void bch_debug_exit(void)
debugfs_remove_recursive(bcache_debug);
}
-void __init bch_debug_init(struct kobject *kobj)
+void __init bch_debug_init(void)
{
/*
* it is unnecessary to check return value of
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index c809724e6571..956004366699 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -553,7 +553,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
for (i = 0; i < KEY_PTRS(k); i++) {
stale = ptr_stale(b->c, k, i);
- btree_bug_on(stale > 96, b,
+ btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
"key too stale: %i, need_gc %u",
stale, b->c->need_gc);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6116bbf870d8..522c7426f3a0 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca)
closure_get(&ca->set->cl);
INIT_WORK(&ja->discard_work, journal_discard_work);
- schedule_work(&ja->discard_work);
+ queue_work(bch_journal_wq, &ja->discard_work);
}
}
@@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl)
: &j->w[0];
__closure_wake_up(&w->wait);
- continue_at_nobarrier(cl, journal_write, system_wq);
+ continue_at_nobarrier(cl, journal_write, bch_journal_wq);
}
static void journal_write_unlock(struct closure *cl)
@@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl)
spin_unlock(&c->journal.lock);
btree_flush_write(c);
- continue_at(cl, journal_write, system_wq);
+ continue_at(cl, journal_write, bch_journal_wq);
return;
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 51be355a3309..3bf35914bb57 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -395,7 +395,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
* unless the read-ahead request is for metadata (eg, for gfs2).
*/
if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
- !(bio->bi_opf & REQ_META))
+ !(bio->bi_opf & REQ_PRIO))
goto skip;
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
@@ -850,7 +850,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
bch_mark_cache_accounting(s->iop.c, s->d,
!s->cache_missed, s->iop.bypass);
- trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
+ trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
if (s->iop.status)
continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
@@ -877,7 +877,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
}
if (!(bio->bi_opf & REQ_RAHEAD) &&
- !(bio->bi_opf & REQ_META) &&
+ !(bio->bi_opf & REQ_PRIO) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9,
get_capacity(bio->bi_disk) - bio_end_sector(bio));
@@ -1218,6 +1218,9 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+ if (dc->io_disable)
+ return -EIO;
+
return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
}
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index aa055cfeb099..721bf336ed1a 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -39,6 +39,6 @@ void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
void bch_flash_dev_request_init(struct bcache_device *d);
-extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
+extern struct kmem_cache *bch_search_cache;
#endif /* _BCACHE_REQUEST_H_ */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 94c756c66bd7..7bbd670a5a84 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -47,6 +47,7 @@ static int bcache_major;
static DEFINE_IDA(bcache_device_idx);
static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
+struct workqueue_struct *bch_journal_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
/* limitation of partitions number on single bcache device */
@@ -417,6 +418,7 @@ static int __uuid_write(struct cache_set *c)
{
BKEY_PADDED(key) k;
struct closure cl;
+ struct cache *ca;
closure_init_stack(&cl);
lockdep_assert_held(&bch_register_lock);
@@ -428,6 +430,10 @@ static int __uuid_write(struct cache_set *c)
uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
closure_sync(&cl);
+ /* Only one bucket used for uuid write */
+ ca = PTR_CACHE(c, &k.key, 0);
+ atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
+
bkey_copy(&c->uuid_bucket, &k.key);
bkey_put(c, &k.key);
return 0;
@@ -642,10 +648,6 @@ static int ioctl_dev(struct block_device *b, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct bcache_device *d = b->bd_disk->private_data;
- struct cached_dev *dc = container_of(d, struct cached_dev, disk);
-
- if (dc->io_disable)
- return -EIO;
return d->ioctl(d, mode, cmd, arg);
}
@@ -1007,6 +1009,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
bch_write_bdev_super(dc, &cl);
closure_sync(&cl);
+ calc_cached_dev_sectors(dc->disk.c);
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
@@ -1151,11 +1154,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
}
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
bch_writeback_queue(dc);
}
+ bch_sectors_dirty_init(&dc->disk);
+
bch_cached_dev_run(dc);
bcache_device_link(&dc->disk, c, "bdev");
atomic_inc(&c->attached_dev_nr);
@@ -2048,6 +2052,8 @@ static int cache_alloc(struct cache *ca)
size_t free;
size_t btree_buckets;
struct bucket *b;
+ int ret = -ENOMEM;
+ const char *err = NULL;
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
@@ -2065,27 +2071,93 @@ static int cache_alloc(struct cache *ca)
*/
btree_buckets = ca->sb.njournal_buckets ?: 8;
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
+ if (!free) {
+ ret = -EPERM;
+ err = "ca->sb.nbuckets is too small";
+ goto err_free;
+ }
- if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
- !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
- !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
- !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
- !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
- !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
- !(ca->buckets = vzalloc(array_size(sizeof(struct bucket),
- ca->sb.nbuckets))) ||
- !(ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
- prio_buckets(ca), 2),
- GFP_KERNEL)) ||
- !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)))
- return -ENOMEM;
+ if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets,
+ GFP_KERNEL)) {
+ err = "ca->free[RESERVE_BTREE] alloc failed";
+ goto err_btree_alloc;
+ }
+
+ if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca),
+ GFP_KERNEL)) {
+ err = "ca->free[RESERVE_PRIO] alloc failed";
+ goto err_prio_alloc;
+ }
+
+ if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) {
+ err = "ca->free[RESERVE_MOVINGGC] alloc failed";
+ goto err_movinggc_alloc;
+ }
+
+ if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) {
+ err = "ca->free[RESERVE_NONE] alloc failed";
+ goto err_none_alloc;
+ }
+
+ if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) {
+ err = "ca->free_inc alloc failed";
+ goto err_free_inc_alloc;
+ }
+
+ if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) {
+ err = "ca->heap alloc failed";
+ goto err_heap_alloc;
+ }
+
+ ca->buckets = vzalloc(array_size(sizeof(struct bucket),
+ ca->sb.nbuckets));
+ if (!ca->buckets) {
+ err = "ca->buckets alloc failed";
+ goto err_buckets_alloc;
+ }
+
+ ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
+ prio_buckets(ca), 2),
+ GFP_KERNEL);
+ if (!ca->prio_buckets) {
+ err = "ca->prio_buckets alloc failed";
+ goto err_prio_buckets_alloc;
+ }
+
+ ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca);
+ if (!ca->disk_buckets) {
+ err = "ca->disk_buckets alloc failed";
+ goto err_disk_buckets_alloc;
+ }
ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
for_each_bucket(b, ca)
atomic_set(&b->pin, 0);
-
return 0;
+
+err_disk_buckets_alloc:
+ kfree(ca->prio_buckets);
+err_prio_buckets_alloc:
+ vfree(ca->buckets);
+err_buckets_alloc:
+ free_heap(&ca->heap);
+err_heap_alloc:
+ free_fifo(&ca->free_inc);
+err_free_inc_alloc:
+ free_fifo(&ca->free[RESERVE_NONE]);
+err_none_alloc:
+ free_fifo(&ca->free[RESERVE_MOVINGGC]);
+err_movinggc_alloc:
+ free_fifo(&ca->free[RESERVE_PRIO]);
+err_prio_alloc:
+ free_fifo(&ca->free[RESERVE_BTREE]);
+err_btree_alloc:
+err_free:
+ module_put(THIS_MODULE);
+ if (err)
+ pr_notice("error %s: %s", ca->cache_dev_name, err);
+ return ret;
}
static int register_cache(struct cache_sb *sb, struct page *sb_page,
@@ -2111,6 +2183,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM";
+ else if (ret == -EPERM)
+ err = "cache_alloc(): cache device is too small";
else
err = "cache_alloc(): unknown error";
goto err;
@@ -2341,6 +2415,9 @@ static void bcache_exit(void)
kobject_put(bcache_kobj);
if (bcache_wq)
destroy_workqueue(bcache_wq);
+ if (bch_journal_wq)
+ destroy_workqueue(bch_journal_wq);
+
if (bcache_major)
unregister_blkdev(bcache_major, "bcache");
unregister_reboot_notifier(&reboot);
@@ -2370,6 +2447,10 @@ static int __init bcache_init(void)
if (!bcache_wq)
goto err;
+ bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+ if (!bch_journal_wq)
+ goto err;
+
bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
if (!bcache_kobj)
goto err;
@@ -2378,7 +2459,7 @@ static int __init bcache_init(void)
sysfs_create_files(bcache_kobj, files))
goto err;
- bch_debug_init(bcache_kobj);
+ bch_debug_init();
closure_debug_init();
return 0;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 150cf4f4cf74..26f035a0c5b9 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -285,6 +285,7 @@ STORE(__cached_dev)
1, WRITEBACK_RATE_UPDATE_SECS_MAX);
d_strtoul(writeback_rate_i_term_inverse);
d_strtoul_nonzero(writeback_rate_p_term_inverse);
+ d_strtoul_nonzero(writeback_rate_minimum);
sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
@@ -412,6 +413,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_rate_update_seconds,
&sysfs_writeback_rate_i_term_inverse,
&sysfs_writeback_rate_p_term_inverse,
+ &sysfs_writeback_rate_minimum,
&sysfs_writeback_rate_debug,
&sysfs_errors,
&sysfs_io_error_limit,
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 69dddeab124c..5936de71883f 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1455,8 +1455,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
if (hints_valid) {
r = dm_array_cursor_next(&cmd->hint_cursor);
if (r) {
- DMERR("dm_array_cursor_next for hint failed");
- goto out;
+ dm_array_cursor_end(&cmd->hint_cursor);
+ hints_valid = false;
}
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index a53413371725..b29a8327eed1 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3009,8 +3009,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{
- if (from_cblock(new_size) > from_cblock(cache->cache_size))
- return true;
+ if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+ if (cache->sized) {
+ DMERR("%s: unable to extend cache due to missing cache table reload",
+ cache_device_name(cache));
+ return false;
+ }
+ }
/*
* We can't drop a dirty block when shrinking the cache.
@@ -3479,14 +3484,13 @@ static int __init dm_cache_init(void)
int r;
migration_cache = KMEM_CACHE(dm_cache_migration, 0);
- if (!migration_cache) {
- dm_unregister_target(&cache_target);
+ if (!migration_cache)
return -ENOMEM;
- }
r = dm_register_target(&cache_target);
if (r) {
DMERR("cache target registration failed: %d", r);
+ kmem_cache_destroy(migration_cache);
return r;
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 21d126a5078c..32aabe27b37c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -467,7 +467,9 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
.version = {1, 5, 0},
+#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_ZONED_HM,
+#endif
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 89ccb64342de..bb3096bf2cc6 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -559,7 +559,12 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
}
memset(result + size, 0, JOURNAL_MAC_SIZE - size);
} else {
- __u8 digest[size];
+ __u8 digest[HASH_MAX_DIGESTSIZE];
+
+ if (WARN_ON(size > sizeof(digest))) {
+ dm_integrity_io_error(ic, "digest_size", -EINVAL);
+ goto err;
+ }
r = crypto_shash_final(desc, digest);
if (unlikely(r)) {
dm_integrity_io_error(ic, "crypto_shash_final", r);
@@ -1324,7 +1329,7 @@ static void integrity_metadata(struct work_struct *w)
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
- char checksums_onstack[ic->tag_size + extra_space];
+ char checksums_onstack[HASH_MAX_DIGESTSIZE];
unsigned sectors_to_process = dio->range.n_sectors;
sector_t sector = dio->range.logical_sector;
@@ -1333,8 +1338,14 @@ static void integrity_metadata(struct work_struct *w)
checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
- if (!checksums)
+ if (!checksums) {
checksums = checksums_onstack;
+ if (WARN_ON(extra_space &&
+ digest_size > sizeof(checksums_onstack))) {
+ r = -EINVAL;
+ goto error;
+ }
+ }
__bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
unsigned pos;
@@ -1546,7 +1557,7 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
#ifdef INTERNAL_VERIFY
if (ic->internal_hash) {
- char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
+ char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
@@ -1596,7 +1607,7 @@ retry_kmap:
if (ic->internal_hash) {
unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
if (unlikely(digest_size > ic->tag_size)) {
- char checksums_onstack[digest_size];
+ char checksums_onstack[HASH_MAX_DIGESTSIZE];
integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
} else
@@ -2023,7 +2034,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
unlikely(from_replay) &&
#endif
ic->internal_hash) {
- char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
+ char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
(char *)access_journal_data(ic, i, l), test_tag);
@@ -3462,7 +3473,8 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
- ic->recalc_tags = kvmalloc((RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size, GFP_KERNEL);
+ ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
+ ic->tag_size, GFP_KERNEL);
if (!ic->recalc_tags) {
ti->error = "Cannot allocate tags for recalculating";
r = -ENOMEM;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index d10964d41fd7..2f7c44a006c4 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -102,6 +102,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
+#ifdef CONFIG_BLK_DEV_ZONED
static int linear_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{
@@ -112,6 +113,7 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio,
return DM_ENDIO_DONE;
}
+#endif
static void linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@@ -208,12 +210,16 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
static struct target_type linear_target = {
.name = "linear",
.version = {1, 4, 0},
+#ifdef CONFIG_BLK_DEV_ZONED
+ .end_io = linear_end_io,
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
+#else
+ .features = DM_TARGET_PASSES_INTEGRITY,
+#endif
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
.map = linear_map,
- .end_io = linear_end_io,
.status = linear_status,
.prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d94ba6f72ff5..419362c2d8ac 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -806,19 +806,19 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
}
static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
- const char *attached_handler_name, char **error)
+ const char **attached_handler_name, char **error)
{
struct request_queue *q = bdev_get_queue(bdev);
int r;
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
retain:
- if (attached_handler_name) {
+ if (*attached_handler_name) {
/*
* Clear any hw_handler_params associated with a
* handler that isn't already attached.
*/
- if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
+ if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
kfree(m->hw_handler_params);
m->hw_handler_params = NULL;
}
@@ -830,7 +830,8 @@ retain:
* handler instead of the original table passed in.
*/
kfree(m->hw_handler_name);
- m->hw_handler_name = attached_handler_name;
+ m->hw_handler_name = *attached_handler_name;
+ *attached_handler_name = NULL;
}
}
@@ -867,7 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
struct pgpath *p;
struct multipath *m = ti->private;
struct request_queue *q;
- const char *attached_handler_name;
+ const char *attached_handler_name = NULL;
/* we need at least a path arg */
if (as->argc < 1) {
@@ -890,7 +891,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
if (attached_handler_name || m->hw_handler_name) {
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
- r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
+ r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
if (r) {
dm_put_device(ti, p->path.dev);
goto bad;
@@ -905,6 +906,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
return p;
bad:
+ kfree(attached_handler_name);
free_pgpath(p);
return ERR_PTR(r);
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 5ba067fa0c72..c44925e4e481 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3353,7 +3353,7 @@ static const char *sync_str(enum sync_state state)
};
/* Return enum sync_state for @mddev derived from @recovery flags */
-static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
+static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
{
if (test_bit(MD_RECOVERY_FROZEN, &recovery))
return st_frozen;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 74f6770c70b1..20b0776e39ef 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -832,10 +832,8 @@ static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
if (r) {
DMERR("could not get size of metadata device");
pmd->metadata_reserve = max_blocks;
- } else {
- sector_div(total, 10);
- pmd->metadata_reserve = min(max_blocks, total);
- }
+ } else
+ pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
}
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 684af08d0747..0ce04e5b4afb 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -212,12 +212,15 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
struct dm_verity_fec_io *fio = fec_io(io);
u64 block, ileaved;
u8 *bbuf, *rs_block;
- u8 want_digest[v->digest_size];
+ u8 want_digest[HASH_MAX_DIGESTSIZE];
unsigned n, k;
if (neras)
*neras = 0;
+ if (WARN_ON(v->digest_size > sizeof(want_digest)))
+ return -EINVAL;
+
/*
* read each of the rsn data blocks that are part of the RS block, and
* interleave contents to available bufs
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 20f7e4ef5342..45abb54037fc 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1155,12 +1155,14 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
/*
- * The zone descriptors obtained with a zone report indicate
- * zone positions within the target device. The zone descriptors
- * must be remapped to match their position within the dm device.
- * A target may call dm_remap_zone_report after completion of a
- * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
- * from the target device mapping to the dm device.
+ * The zone descriptors obtained with a zone report indicate zone positions
+ * within the target backing device, regardless of that device is a partition
+ * and regardless of the target mapping start sector on the device or partition.
+ * The zone descriptors start sector and write pointer position must be adjusted
+ * to match their relative position within the dm device.
+ * A target may call dm_remap_zone_report() after completion of a
+ * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
+ * backing device.
*/
void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
{
@@ -1171,6 +1173,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
struct blk_zone *zone;
unsigned int nr_rep = 0;
unsigned int ofst;
+ sector_t part_offset;
struct bio_vec bvec;
struct bvec_iter iter;
void *addr;
@@ -1179,6 +1182,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
return;
/*
+ * bio sector was incremented by the request size on completion. Taking
+ * into account the original request sector, the target start offset on
+ * the backing device and the target mapping offset (ti->begin), the
+ * start sector of the backing device. The partition offset is always 0
+ * if the target uses a whole device.
+ */
+ part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
+
+ /*
* Remap the start sector of the reported zones. For sequential zones,
* also remap the write pointer position.
*/
@@ -1195,6 +1207,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
/* Set zones start sector */
while (hdr->nr_zones && ofst < bvec.bv_len) {
zone = addr + ofst;
+ zone->start -= part_offset;
if (zone->start >= start + ti->len) {
hdr->nr_zones = 0;
break;
@@ -1206,7 +1219,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
else if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->wp = zone->start;
else
- zone->wp = zone->wp + ti->begin - start;
+ zone->wp = zone->wp + ti->begin - start - part_offset;
}
ofst += sizeof(struct blk_zone);
hdr->nr_zones--;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ac1cffd2a09b..f3fb5bb8c82a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -542,7 +542,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
!discard_bio)
continue;
bio_chain(discard_bio, bio);
- bio_clone_blkcg_association(discard_bio, bio);
+ bio_clone_blkg_association(discard_bio, bio);
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rdev->bdev),
discard_bio, disk_devt(mddev->gendisk),
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index b5410aeb5fe2..bb41bea950ac 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client)
V4L2_CID_AUTO_WHITE_BALANCE,
0, 1, 1,
V4L2_WHITE_BALANCE_AUTO);
- if (IS_ERR_OR_NULL(mt9v111->auto_awb)) {
- ret = PTR_ERR(mt9v111->auto_awb);
- goto error_free_ctrls;
- }
-
mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls,
&mt9v111_ctrl_ops,
V4L2_CID_EXPOSURE_AUTO,
V4L2_EXPOSURE_MANUAL,
0, V4L2_EXPOSURE_AUTO);
- if (IS_ERR_OR_NULL(mt9v111->auto_exp)) {
- ret = PTR_ERR(mt9v111->auto_exp);
- goto error_free_ctrls;
- }
-
- /* Initialize timings */
mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
V4L2_CID_HBLANK,
MT9V111_CORE_R05_MIN_HBLANK,
MT9V111_CORE_R05_MAX_HBLANK, 1,
MT9V111_CORE_R05_DEF_HBLANK);
- if (IS_ERR_OR_NULL(mt9v111->hblank)) {
- ret = PTR_ERR(mt9v111->hblank);
- goto error_free_ctrls;
- }
-
mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
V4L2_CID_VBLANK,
MT9V111_CORE_R06_MIN_VBLANK,
MT9V111_CORE_R06_MAX_VBLANK, 1,
MT9V111_CORE_R06_DEF_VBLANK);
- if (IS_ERR_OR_NULL(mt9v111->vblank)) {
- ret = PTR_ERR(mt9v111->vblank);
- goto error_free_ctrls;
- }
/* PIXEL_RATE is fixed: just expose it to user space. */
v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
@@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client)
DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1,
DIV_ROUND_CLOSEST(mt9v111->sysclk, 2));
+ if (mt9v111->ctrls.error) {
+ ret = mt9v111->ctrls.error;
+ goto error_free_ctrls;
+ }
mt9v111->sd.ctrl_handler = &mt9v111->ctrls;
/* Start with default configuration: 640x480 UYVY. */
@@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client)
mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
if (ret)
- goto error_free_ctrls;
+ goto error_free_entity;
#endif
ret = mt9v111_chip_probe(mt9v111);
if (ret)
- goto error_free_ctrls;
+ goto error_free_entity;
ret = v4l2_async_register_subdev(&mt9v111->sd);
if (ret)
- goto error_free_ctrls;
+ goto error_free_entity;
return 0;
-error_free_ctrls:
- v4l2_ctrl_handler_free(&mt9v111->ctrls);
-
+error_free_entity:
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&mt9v111->sd.entity);
#endif
+error_free_ctrls:
+ v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
mutex_destroy(&mt9v111->pwr_mutex);
mutex_destroy(&mt9v111->stream_mutex);
@@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
- v4l2_ctrl_handler_free(&mt9v111->ctrls);
-
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&sd->entity);
#endif
+ v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
mutex_destroy(&mt9v111->pwr_mutex);
mutex_destroy(&mt9v111->stream_mutex);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 94c1fe0e9787..54fe90acb5b2 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC
depends on MFD_CROS_EC
select CEC_CORE
select CEC_NOTIFIER
+ select CHROME_PLATFORMS
+ select CROS_EC_PROTO
---help---
If you say yes here you will get support for the
ChromeOS Embedded Controller's CEC.
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 729b31891466..a5ae85674ffb 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
index c832539397d7..12bce391d71f 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index bcd0dfd33618..2e65caf1ecae 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6))
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 4559f3b1b38c..008afb85023b 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index 7f269021d08c..1f33b4eb198c 100644
--- a/drivers/media/platform/qcom/camss/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
@@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
else
return -EINVAL;
- ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line),
- GFP_KERNEL);
+ ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line),
+ GFP_KERNEL);
if (!ispif->line)
return -ENOMEM;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
index da3a9fed9f2d..174a36be6f5d 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -9,6 +9,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss-vfe.h"
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
index 4c584bffd179..0dca8bf9281e 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -9,6 +9,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss-vfe.h"
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index dcc0c30ef1b1..669615fff6a0 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev)
return -EINVAL;
}
- camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy),
- GFP_KERNEL);
+ camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
+ sizeof(*camss->csiphy), GFP_KERNEL);
if (!camss->csiphy)
return -ENOMEM;
- camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid),
- GFP_KERNEL);
+ camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
+ GFP_KERNEL);
if (!camss->csid)
return -ENOMEM;
- camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL);
+ camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
+ GFP_KERNEL);
if (!camss->vfe)
return -ENOMEM;
@@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = {
MODULE_DEVICE_TABLE(of, camss_dt_match);
-static int camss_runtime_suspend(struct device *dev)
+static int __maybe_unused camss_runtime_suspend(struct device *dev)
{
return 0;
}
-static int camss_runtime_resume(struct device *dev)
+static int __maybe_unused camss_runtime_resume(struct device *dev)
{
return 0;
}
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 666d319d3d1a..1f6c1eefe389 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000;
- ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
- msg[0].len - 3);
+ ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
+ &msg[0].buf[3],
+ msg[0].len - 3)
+ : -EOPNOTSUPP;
} else {
/* I2C write */
u8 buf[MAX_XFER_SIZE];
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 8e799ae1df69..67481fc82445 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -116,6 +116,7 @@ static void em28xx_audio_isocirq(struct urb *urb)
stride = runtime->frame_bits >> 3;
for (i = 0; i < urb->number_of_packets; i++) {
+ unsigned long flags;
int length =
urb->iso_frame_desc[i].actual_length / stride;
cp = (unsigned char *)urb->transfer_buffer +
@@ -137,7 +138,7 @@ static void em28xx_audio_isocirq(struct urb *urb)
length * stride);
}
- snd_pcm_stream_lock(substream);
+ snd_pcm_stream_lock_irqsave(substream, flags);
dev->adev.hwptr_done_capture += length;
if (dev->adev.hwptr_done_capture >=
@@ -153,7 +154,7 @@ static void em28xx_audio_isocirq(struct urb *urb)
period_elapsed = 1;
}
- snd_pcm_stream_unlock(substream);
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
}
if (period_elapsed)
snd_pcm_period_elapsed(substream);
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 5657f8710ca6..2b8c84a5c9a8 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -777,6 +777,7 @@ EXPORT_SYMBOL_GPL(em28xx_set_mode);
static void em28xx_irq_callback(struct urb *urb)
{
struct em28xx *dev = urb->context;
+ unsigned long flags;
int i;
switch (urb->status) {
@@ -793,9 +794,9 @@ static void em28xx_irq_callback(struct urb *urb)
}
/* Copy data from URB */
- spin_lock(&dev->slock);
+ spin_lock_irqsave(&dev->slock, flags);
dev->usb_ctl.urb_data_copy(dev, urb);
- spin_unlock(&dev->slock);
+ spin_unlock_irqrestore(&dev->slock, flags);
/* Reset urb buffers */
for (i = 0; i < urb->number_of_packets; i++) {
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 96055de6e8ce..7d268f2404e1 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -419,6 +419,7 @@ static void tm6000_irq_callback(struct urb *urb)
{
struct tm6000_dmaqueue *dma_q = urb->context;
struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ unsigned long flags;
int i;
switch (urb->status) {
@@ -436,9 +437,9 @@ static void tm6000_irq_callback(struct urb *urb)
break;
}
- spin_lock(&dev->slock);
+ spin_lock_irqsave(&dev->slock, flags);
tm6000_isoc_copy(urb);
- spin_unlock(&dev->slock);
+ spin_unlock_irqrestore(&dev->slock, flags);
/* Reset urb buffers */
for (i = 0; i < urb->number_of_packets; i++) {
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 127fe6eb91d9..a3ef1f50a4b3 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
if (sev == NULL)
return;
- /*
- * If the event has been added to the fh->subscribed list, but its
- * add op has not completed yet elems will be 0, treat this as
- * not being subscribed.
- */
- if (!sev->elems)
- return;
-
/* Increase event sequence number on fh. */
fh->sequence++;
@@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
unsigned i;
+ int ret = 0;
if (sub->type == V4L2_EVENT_ALL)
return -EINVAL;
@@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
sev->flags = sub->flags;
sev->fh = fh;
sev->ops = ops;
+ sev->elems = elems;
+
+ mutex_lock(&fh->subscribe_lock);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
- if (!found_ev)
- list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
+ /* Already listening */
kvfree(sev);
- return 0; /* Already listening */
+ goto out_unlock;
}
if (sev->ops && sev->ops->add) {
- int ret = sev->ops->add(sev, elems);
+ ret = sev->ops->add(sev, elems);
if (ret) {
- sev->ops = NULL;
- v4l2_event_unsubscribe(fh, sub);
- return ret;
+ kvfree(sev);
+ goto out_unlock;
}
}
- /* Mark as ready for use */
- sev->elems = elems;
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add(&sev->list, &fh->subscribed);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- return 0;
+out_unlock:
+ mutex_unlock(&fh->subscribe_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
@@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
return 0;
}
+ mutex_lock(&fh->subscribe_lock);
+
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
if (sev && sev->ops && sev->ops->del)
sev->ops->del(sev);
+ mutex_unlock(&fh->subscribe_lock);
+
kvfree(sev);
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 3895999bf880..c91a7bd3ecfc 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
INIT_LIST_HEAD(&fh->available);
INIT_LIST_HEAD(&fh->subscribed);
fh->sequence = -1;
+ mutex_init(&fh->subscribe_lock);
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
@@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
return;
v4l_disable_media_source(fh->vdev);
v4l2_event_unsubscribe_all(fh);
+ mutex_destroy(&fh->subscribe_lock);
fh->vdev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 716fc8ed31d3..8a02f11076f9 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2146,7 +2146,7 @@ static int msb_init_disk(struct memstick_dev *card)
set_disk_ro(msb->disk, 1);
msb_start(card);
- device_add_disk(&card->dev, msb->disk);
+ device_add_disk(&card->dev, msb->disk, NULL);
dbg("Disk added");
return 0;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 5ee932631fae..0cd30dcb6801 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1236,7 +1236,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
set_capacity(msb->disk, capacity);
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
- device_add_disk(&card->dev, msb->disk);
+ device_add_disk(&card->dev, msb->disk, NULL);
msb->active = 1;
return 0;
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 059997f8ebce..178f414ea8f9 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -2004,7 +2004,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_6
U64 LinkFailureCount; /* 50h */
U64 LossOfSyncCount; /* 58h */
U64 LossOfSignalCount; /* 60h */
- U64 PrimativeSeqErrCount; /* 68h */
+ U64 PrimitiveSeqErrCount; /* 68h */
U64 InvalidTxWordCount; /* 70h */
U64 InvalidCrcCount; /* 78h */
U64 FcpInitiatorIoCount; /* 80h */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index e6b4ae558767..ba551d8dfba4 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -335,11 +335,11 @@ static int mpt_remove_dead_ioc_func(void *arg)
MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
struct pci_dev *pdev;
- if ((ioc == NULL))
+ if (!ioc)
return -1;
pdev = ioc->pcidev;
- if ((pdev == NULL))
+ if (!pdev)
return -1;
pci_stop_and_remove_bus_device_locked(pdev);
@@ -7570,11 +7570,11 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
u8 phy_num = (u8)(evData0);
u8 port_num = (u8)(evData0 >> 8);
u8 port_width = (u8)(evData0 >> 16);
- u8 primative = (u8)(evData0 >> 24);
+ u8 primitive = (u8)(evData0 >> 24);
snprintf(evStr, EVENT_DESCR_STR_SZ,
- "SAS Broadcase Primative: phy=%d port=%d "
- "width=%d primative=0x%02x",
- phy_num, port_num, port_width, primative);
+ "SAS Broadcast Primitive: phy=%d port=%d "
+ "width=%d primitive=0x%02x",
+ phy_num, port_num, port_width, primitive);
break;
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b8cf2658649e..9b404fc69c90 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -129,7 +129,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc,
static void mptsas_send_expander_event(struct fw_event_work *fw_event);
static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
-static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
+static void mptsas_broadcast_primitive_work(struct fw_event_work *fw_event);
static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
void mptsas_schedule_target_reset(void *ioc);
@@ -1665,7 +1665,7 @@ mptsas_firmware_event_work(struct work_struct *work)
mptsas_free_fw_event(ioc, fw_event);
break;
case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
- mptsas_broadcast_primative_work(fw_event);
+ mptsas_broadcast_primitive_work(fw_event);
break;
case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
mptsas_send_expander_event(fw_event);
@@ -4826,13 +4826,13 @@ mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
}
/**
- * mptsas_broadcast_primative_work - Handle broadcast primitives
+ * mptsas_broadcast_primitive_work - Handle broadcast primitives
* @work: work queue payload containing info describing the event
*
* this will be handled in workqueue context.
*/
static void
-mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
+mptsas_broadcast_primitive_work(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc = fw_event->ioc;
MPT_FRAME_HDR *mf;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 11841f4b7b2b..8c5dfdce4326 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -99,6 +99,15 @@ config MFD_AAT2870_CORE
additional drivers must be enabled in order to use the
functionality of the device.
+config MFD_AT91_USART
+ tristate "AT91 USART Driver"
+ select MFD_CORE
+ help
+ Select this to get support for AT91 USART IP. This is a wrapper
+ over at91-usart-serial driver and usart-spi-driver. Only one function
+ can be used at a time. The choice is done at boot time by the probe
+ function of this MFD driver according to a device tree property.
+
config MFD_ATMEL_FLEXCOM
tristate "Atmel Flexcom (Flexible Serial Communication Unit)"
select MFD_CORE
@@ -1023,16 +1032,23 @@ config MFD_RN5T618
functionality of the device.
config MFD_SEC_CORE
- bool "SAMSUNG Electronics PMIC Series Support"
+ tristate "SAMSUNG Electronics PMIC Series Support"
depends on I2C=y
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
help
- Support for the Samsung Electronics MFD series.
- This driver provides common support for accessing the device,
- additional drivers must be enabled in order to use the functionality
- of the device
+ Support for the Samsung Electronics PMIC devices coming
+ usually along with Samsung Exynos SoC chipset.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device
+
+ To compile this driver as a module, choose M here: the
+ module will be called sec-core.
+ Have in mind that important core drivers (like regulators) depend
+ on this driver so building this as a module might require proper
+ initial ramdisk or might not boot up as well in certain scenarios.
config MFD_SI476X_CORE
tristate "Silicon Laboratories 4761/64/68 AM/FM radio."
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 5856a9489cbd..12980a4ad460 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -196,6 +196,7 @@ obj-$(CONFIG_MFD_SPMI_PMIC) += qcom-spmi-pmic.o
obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
obj-$(CONFIG_MFD_TPS65090) += tps65090.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
+obj-$(CONFIG_MFD_AT91_USART) += at91-usart.o
obj-$(CONFIG_MFD_ATMEL_FLEXCOM) += atmel-flexcom.o
obj-$(CONFIG_MFD_ATMEL_HLCDC) += atmel-hlcdc.o
obj-$(CONFIG_MFD_ATMEL_SMC) += atmel-smc.o
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index d817f202da5b..be0497b96720 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -360,6 +360,6 @@ static struct i2c_driver adp5520_driver = {
module_i2c_driver(adp5520_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADP5520(01) PMIC-MFD Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/altera-a10sr.c b/drivers/mfd/altera-a10sr.c
index 96e7d2cb7b89..400e0b51844b 100644
--- a/drivers/mfd/altera-a10sr.c
+++ b/drivers/mfd/altera-a10sr.c
@@ -108,7 +108,8 @@ static const struct regmap_config altr_a10sr_regmap_config = {
.cache_type = REGCACHE_NONE,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.read_flag_mask = 1,
.write_flag_mask = 0,
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 5f1e37d23943..27b61639cdc7 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -52,8 +52,10 @@ int arizona_clk32k_enable(struct arizona *arizona)
if (ret != 0)
goto err_ref;
ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]);
- if (ret != 0)
- goto err_pm;
+ if (ret != 0) {
+ pm_runtime_put_sync(arizona->dev);
+ goto err_ref;
+ }
break;
case ARIZONA_32KZ_MCLK2:
ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK2]);
@@ -67,8 +69,6 @@ int arizona_clk32k_enable(struct arizona *arizona)
ARIZONA_CLK_32K_ENA);
}
-err_pm:
- pm_runtime_put_sync(arizona->dev);
err_ref:
if (ret != 0)
arizona->clk32k_ref--;
@@ -990,7 +990,7 @@ static const struct mfd_cell wm8998_devs[] = {
int arizona_dev_init(struct arizona *arizona)
{
- const char * const mclk_name[] = { "mclk1", "mclk2" };
+ static const char * const mclk_name[] = { "mclk1", "mclk2" };
struct device *dev = arizona->dev;
const char *type_name = NULL;
unsigned int reg, val;
diff --git a/drivers/mfd/at91-usart.c b/drivers/mfd/at91-usart.c
new file mode 100644
index 000000000000..d20747f612c1
--- /dev/null
+++ b/drivers/mfd/at91-usart.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for AT91 USART
+ *
+ * Copyright (C) 2018 Microchip Technology
+ *
+ * Author: Radu Pirea <radu.pirea@microchip.com>
+ *
+ */
+
+#include <dt-bindings/mfd/at91-usart.h>
+
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/of.h>
+#include <linux/property.h>
+
+static struct mfd_cell at91_usart_spi_subdev = {
+ .name = "at91_usart_spi",
+ .of_compatible = "microchip,at91sam9g45-usart-spi",
+ };
+
+static struct mfd_cell at91_usart_serial_subdev = {
+ .name = "atmel_usart_serial",
+ .of_compatible = "atmel,at91rm9200-usart-serial",
+ };
+
+static int at91_usart_mode_probe(struct platform_device *pdev)
+{
+ struct mfd_cell cell;
+ u32 opmode = AT91_USART_MODE_SERIAL;
+
+ device_property_read_u32(&pdev->dev, "atmel,usart-mode", &opmode);
+
+ switch (opmode) {
+ case AT91_USART_MODE_SPI:
+ cell = at91_usart_spi_subdev;
+ break;
+ case AT91_USART_MODE_SERIAL:
+ cell = at91_usart_serial_subdev;
+ break;
+ default:
+ dev_err(&pdev->dev, "atmel,usart-mode has an invalid value %u\n",
+ opmode);
+ return -EINVAL;
+ }
+
+ return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, &cell, 1,
+ NULL, 0, NULL);
+}
+
+static const struct of_device_id at91_usart_mode_of_match[] = {
+ { .compatible = "atmel,at91rm9200-usart" },
+ { .compatible = "atmel,at91sam9260-usart" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91_usart_mode_of_match);
+
+static struct platform_driver at91_usart_mfd = {
+ .probe = at91_usart_mode_probe,
+ .driver = {
+ .name = "at91_usart_mode",
+ .of_match_table = at91_usart_mode_of_match,
+ },
+};
+
+module_platform_driver(at91_usart_mfd);
+
+MODULE_AUTHOR("Radu Pirea <radu.pirea@microchip.com>");
+MODULE_DESCRIPTION("AT91 USART MFD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index 65a9757a6d21..fe6f83766144 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -218,7 +218,8 @@ EXPORT_SYMBOL(cros_ec_suspend);
static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
{
- while (cros_ec_get_next_event(ec_dev, NULL) > 0)
+ while (ec_dev->mkbp_event_supported &&
+ cros_ec_get_next_event(ec_dev, NULL) > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier,
1, ec_dev);
}
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 999dac752bcc..8f9d6964173e 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -546,6 +546,7 @@ static struct platform_driver cros_ec_dev_driver = {
.name = DRV_NAME,
.pm = &cros_ec_dev_pm_ops,
},
+ .id_table = cros_ec_id,
.probe = ec_device_probe,
.remove = ec_device_remove,
.shutdown = ec_device_shutdown,
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index abfb11818fdc..fdae1288bc6d 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -46,7 +46,8 @@ static int da9052_spi_probe(struct spi_device *spi)
config.reg_bits = 7;
config.pad_bits = 1;
config.val_bits = 8;
- config.use_single_rw = 1;
+ config.use_single_read = true;
+ config.use_single_write = true;
da9052->regmap = devm_regmap_init_spi(spi, &config);
if (IS_ERR(da9052->regmap)) {
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 2017446c5b4b..bb24c2a07900 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Intel MSIC
*
* Copyright (C) 2011, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/err.h>
@@ -54,68 +51,44 @@ struct intel_msic {
};
static struct resource msic_touch_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_adc_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_battery_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_gpio_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_audio_resources[] = {
- {
- .name = "IRQ",
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(0, "IRQ"),
/*
* We will pass IRQ_BASE to the driver now but this can be removed
* when/if the driver starts to use intel_msic_irq_read().
*/
- {
- .name = "IRQ_BASE",
- .flags = IORESOURCE_MEM,
- .start = MSIC_IRQ_STATUS_ACCDET,
- .end = MSIC_IRQ_STATUS_ACCDET,
- },
+ DEFINE_RES_MEM_NAMED(MSIC_IRQ_STATUS_ACCDET, 1, "IRQ_BASE"),
};
static struct resource msic_hdmi_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_thermal_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_power_btn_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_ocd_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
/*
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index 15bc052704a6..6310c3bdb991 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -1,27 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MFD core driver for Intel Broxton Whiskey Cove PMIC
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
-#include <linux/module.h>
#include <linux/acpi.h>
-#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/mfd/intel_soc_pmic_bxtwc.h>
+#include <linux/module.h>
+
#include <asm/intel_pmc_ipc.h>
/* PMIC device registers */
@@ -31,8 +24,8 @@
/* Interrupt Status Registers */
#define BXTWC_IRQLVL1 0x4E02
-#define BXTWC_PWRBTNIRQ 0x4E03
+#define BXTWC_PWRBTNIRQ 0x4E03
#define BXTWC_THRM0IRQ 0x4E04
#define BXTWC_THRM1IRQ 0x4E05
#define BXTWC_THRM2IRQ 0x4E06
@@ -47,10 +40,9 @@
/* Interrupt MASK Registers */
#define BXTWC_MIRQLVL1 0x4E0E
-#define BXTWC_MPWRTNIRQ 0x4E0F
-
#define BXTWC_MIRQLVL1_MCHGR BIT(5)
+#define BXTWC_MPWRBTNIRQ 0x4E0F
#define BXTWC_MTHRM0IRQ 0x4E12
#define BXTWC_MTHRM1IRQ 0x4E13
#define BXTWC_MTHRM2IRQ 0x4E14
@@ -66,9 +58,7 @@
/* Whiskey Cove PMIC share same ACPI ID between different platforms */
#define BROXTON_PMIC_WC_HRV 4
-/* Manage in two IRQ chips since mask registers are not consecutive */
enum bxtwc_irqs {
- /* Level 1 */
BXTWC_PWRBTN_LVL1_IRQ = 0,
BXTWC_TMU_LVL1_IRQ,
BXTWC_THRM_LVL1_IRQ,
@@ -77,9 +67,11 @@ enum bxtwc_irqs {
BXTWC_CHGR_LVL1_IRQ,
BXTWC_GPIO_LVL1_IRQ,
BXTWC_CRIT_LVL1_IRQ,
+};
- /* Level 2 */
- BXTWC_PWRBTN_IRQ,
+enum bxtwc_irqs_pwrbtn {
+ BXTWC_PWRBTN_IRQ = 0,
+ BXTWC_UIBTN_IRQ,
};
enum bxtwc_irqs_bcu {
@@ -113,7 +105,10 @@ static const struct regmap_irq bxtwc_regmap_irqs[] = {
REGMAP_IRQ_REG(BXTWC_CHGR_LVL1_IRQ, 0, BIT(5)),
REGMAP_IRQ_REG(BXTWC_GPIO_LVL1_IRQ, 0, BIT(6)),
REGMAP_IRQ_REG(BXTWC_CRIT_LVL1_IRQ, 0, BIT(7)),
- REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 1, 0x03),
+};
+
+static const struct regmap_irq bxtwc_regmap_irqs_pwrbtn[] = {
+ REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, 0x01),
};
static const struct regmap_irq bxtwc_regmap_irqs_bcu[] = {
@@ -125,7 +120,7 @@ static const struct regmap_irq bxtwc_regmap_irqs_adc[] = {
};
static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = {
- REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, BIT(5)),
+ REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, 0x20),
REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f),
REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f),
};
@@ -144,7 +139,16 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
.mask_base = BXTWC_MIRQLVL1,
.irqs = bxtwc_regmap_irqs,
.num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs),
- .num_regs = 2,
+ .num_regs = 1,
+};
+
+static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
+ .name = "bxtwc_irq_chip_pwrbtn",
+ .status_base = BXTWC_PWRBTNIRQ,
+ .mask_base = BXTWC_MPWRBTNIRQ,
+ .irqs = bxtwc_regmap_irqs_pwrbtn,
+ .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_pwrbtn),
+ .num_regs = 1,
};
static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
@@ -473,6 +477,16 @@ static int bxtwc_probe(struct platform_device *pdev)
}
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+ BXTWC_PWRBTN_LVL1_IRQ,
+ IRQF_ONESHOT,
+ &bxtwc_regmap_irq_chip_pwrbtn,
+ &pmic->irq_chip_data_pwrbtn);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add PWRBTN IRQ chip\n");
+ return ret;
+ }
+
+ ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_TMU_LVL1_IRQ,
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_tmu,
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
index 861277c6580a..64b5c3cc30e7 100644
--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Device access for Dollar Cove TI PMIC
*
@@ -6,10 +7,6 @@
*
* Cleanup and forward-ported
* Copyright (c) 2017 Takashi Iwai <tiwai@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index b35da01d5bcf..64a3aece9c5e 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MFD core driver for Intel Cherrytrail Whiskey Cove PMIC
*
@@ -5,10 +6,6 @@
*
* Based on various non upstream patches to support the CHT Whiskey Cove PMIC:
* Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 274306d98ac1..c9f35378d391 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -1,31 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_soc_pmic_core.c - Intel SoC PMIC MFD Driver
+ * Intel SoC PMIC MFD Driver
*
* Copyright (C) 2013, 2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
-#include <linux/module.h>
-#include <linux/mfd/core.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/gpio/consumer.h>
-#include <linux/acpi.h>
-#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/intel_soc_pmic.h>
-#include <linux/gpio/machine.h>
#include <linux/pwm.h>
+#include <linux/regmap.h>
+
#include "intel_soc_pmic_core.h"
/* Crystal Cove PMIC shares same ACPI ID between different platforms */
diff --git a/drivers/mfd/intel_soc_pmic_core.h b/drivers/mfd/intel_soc_pmic_core.h
index 90a1416d4dac..d490685845eb 100644
--- a/drivers/mfd/intel_soc_pmic_core.h
+++ b/drivers/mfd/intel_soc_pmic_core.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * intel_soc_pmic_core.h - Intel SoC PMIC MFD Driver
+ * Intel SoC PMIC MFD Driver
*
* Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 6d19a6d0fb97..b6ab72fa0569 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -1,25 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_soc_pmic_crc.c - Device access for Crystal Cove PMIC
+ * Device access for Crystal Cove PMIC
*
* Copyright (C) 2013, 2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
-#include <linux/mfd/core.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/intel_soc_pmic.h>
+
#include "intel_soc_pmic_core.h"
#define CRYSTAL_COVE_MAX_REGISTER 0xC6
@@ -36,48 +29,23 @@
#define CRYSTAL_COVE_IRQ_VHDMIOCP 6
static struct resource gpio_resources[] = {
- {
- .name = "GPIO",
- .start = CRYSTAL_COVE_IRQ_GPIO,
- .end = CRYSTAL_COVE_IRQ_GPIO,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_GPIO, "GPIO"),
};
static struct resource pwrsrc_resources[] = {
- {
- .name = "PWRSRC",
- .start = CRYSTAL_COVE_IRQ_PWRSRC,
- .end = CRYSTAL_COVE_IRQ_PWRSRC,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_PWRSRC, "PWRSRC"),
};
static struct resource adc_resources[] = {
- {
- .name = "ADC",
- .start = CRYSTAL_COVE_IRQ_ADC,
- .end = CRYSTAL_COVE_IRQ_ADC,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_ADC, "ADC"),
};
static struct resource thermal_resources[] = {
- {
- .name = "THERMAL",
- .start = CRYSTAL_COVE_IRQ_THRM,
- .end = CRYSTAL_COVE_IRQ_THRM,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_THRM, "THERMAL"),
};
static struct resource bcu_resources[] = {
- {
- .name = "BCU",
- .start = CRYSTAL_COVE_IRQ_BCU,
- .end = CRYSTAL_COVE_IRQ_BCU,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_BCU, "BCU"),
};
static struct mfd_cell crystal_cove_byt_dev[] = {
@@ -134,27 +102,13 @@ static const struct regmap_config crystal_cove_regmap_config = {
};
static const struct regmap_irq crystal_cove_irqs[] = {
- [CRYSTAL_COVE_IRQ_PWRSRC] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_PWRSRC),
- },
- [CRYSTAL_COVE_IRQ_THRM] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_THRM),
- },
- [CRYSTAL_COVE_IRQ_BCU] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_BCU),
- },
- [CRYSTAL_COVE_IRQ_ADC] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_ADC),
- },
- [CRYSTAL_COVE_IRQ_CHGR] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_CHGR),
- },
- [CRYSTAL_COVE_IRQ_GPIO] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_GPIO),
- },
- [CRYSTAL_COVE_IRQ_VHDMIOCP] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_VHDMIOCP),
- },
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_PWRSRC, 0, BIT(CRYSTAL_COVE_IRQ_PWRSRC)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_THRM, 0, BIT(CRYSTAL_COVE_IRQ_THRM)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_BCU, 0, BIT(CRYSTAL_COVE_IRQ_BCU)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_ADC, 0, BIT(CRYSTAL_COVE_IRQ_ADC)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_CHGR, 0, BIT(CRYSTAL_COVE_IRQ_CHGR)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_GPIO, 0, BIT(CRYSTAL_COVE_IRQ_GPIO)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_VHDMIOCP, 0, BIT(CRYSTAL_COVE_IRQ_VHDMIOCP)),
};
static const struct regmap_irq_chip crystal_cove_irq_chip = {
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 8cfea969b060..440030cecbbd 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -132,32 +132,39 @@ const char *madera_name_from_type(enum madera_type type)
}
EXPORT_SYMBOL_GPL(madera_name_from_type);
-#define MADERA_BOOT_POLL_MAX_INTERVAL_US 5000
-#define MADERA_BOOT_POLL_TIMEOUT_US 25000
+#define MADERA_BOOT_POLL_INTERVAL_USEC 5000
+#define MADERA_BOOT_POLL_TIMEOUT_USEC 25000
static int madera_wait_for_boot(struct madera *madera)
{
+ ktime_t timeout;
unsigned int val;
- int ret;
+ int ret = 0;
/*
* We can't use an interrupt as we need to runtime resume to do so,
* so we poll the status bit. This won't race with the interrupt
* handler because it will be blocked on runtime resume.
+ * The chip could NAK a read request while it is booting so ignore
+ * errors from regmap_read.
*/
- ret = regmap_read_poll_timeout(madera->regmap,
- MADERA_IRQ1_RAW_STATUS_1,
- val,
- (val & MADERA_BOOT_DONE_STS1),
- MADERA_BOOT_POLL_MAX_INTERVAL_US,
- MADERA_BOOT_POLL_TIMEOUT_US);
-
- if (ret)
- dev_err(madera->dev, "Polling BOOT_DONE_STS failed: %d\n", ret);
+ timeout = ktime_add_us(ktime_get(), MADERA_BOOT_POLL_TIMEOUT_USEC);
+ regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val);
+ while (!(val & MADERA_BOOT_DONE_STS1) &&
+ !ktime_after(ktime_get(), timeout)) {
+ usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2,
+ MADERA_BOOT_POLL_INTERVAL_USEC);
+ regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val);
+ };
+
+ if (!(val & MADERA_BOOT_DONE_STS1)) {
+ dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n");
+ ret = -ETIMEDOUT;
+ }
/*
* BOOT_DONE defaults to unmasked on boot so we must ack it.
- * Do this unconditionally to avoid interrupt storms.
+ * Do this even after a timeout to avoid interrupt storms.
*/
regmap_write(madera->regmap, MADERA_IRQ1_STATUS_1,
MADERA_BOOT_DONE_EINT1);
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 6cbe96b28f42..ebb13d5de530 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -1,22 +1,12 @@
-/*
- * max14577.c - mfd core driver for the Maxim 14577/77836
- *
- * Copyright (C) 2014 Samsung Electronics
- * Chanwoo Choi <cw00.choi@samsung.com>
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max14577.c - mfd core driver for the Maxim 14577/77836
+//
+// Copyright (C) 2014 Samsung Electronics
+// Chanwoo Choi <cw00.choi@samsung.com>
+// Krzysztof Kozlowski <krzk@kernel.org>
+//
+// This driver is based on max8997.c
#include <linux/err.h>
#include <linux/module.h>
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index b1700b5fa640..d8217366ed36 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -285,7 +285,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
}
if (fps_id == MAX77620_FPS_COUNT) {
- dev_err(dev, "FPS node name %s is not valid\n", fps_np->name);
+ dev_err(dev, "FPS node name %pOFn is not valid\n", fps_np);
return -EINVAL;
}
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index b0e8e13c0049..71faf503844b 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -1,26 +1,12 @@
-/*
- * max77686.c - mfd core driver for the Maxim 77686/802
- *
- * Copyright (C) 2012 Samsung Electronics
- * Chiwoong Byun <woong.byun@samsung.com>
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77686.c - mfd core driver for the Maxim 77686/802
+//
+// Copyright (C) 2012 Samsung Electronics
+// Chiwoong Byun <woong.byun@samsung.com>
+// Jonghwa Lee <jonghwa3.lee@samsung.com>
+//
+//This driver is based on max8997.c
#include <linux/export.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 1c05ea0cba61..901d99d65924 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -1,27 +1,13 @@
-/*
- * max77693.c - mfd core driver for the MAX 77693
- *
- * Copyright (C) 2012 Samsung Electronics
- * SangYoung Son <hello.son@samsung.com>
- *
- * This program is not provided / owned by Maxim Integrated Products.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77693.c - mfd core driver for the MAX 77693
+//
+// Copyright (C) 2012 Samsung Electronics
+// SangYoung Son <hello.son@samsung.com>
+//
+// This program is not provided / owned by Maxim Integrated Products.
+//
+// This driver is based on max8997.c
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index da9612dbb222..25cbb2242b26 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -1,15 +1,10 @@
-/*
- * MFD core driver for the Maxim MAX77843
- *
- * Copyright (C) 2015 Samsung Electronics
- * Author: Jaewon Kim <jaewon02.kim@samsung.com>
- * Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// MFD core driver for the Maxim MAX77843
+//
+// Copyright (C) 2015 Samsung Electronics
+// Author: Jaewon Kim <jaewon02.kim@samsung.com>
+// Author: Beomho Seo <beomho.seo@samsung.com>
#include <linux/err.h>
#include <linux/i2c.h>
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index 326f17b632a7..93a3b1698d9c 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -1,25 +1,11 @@
-/*
- * max8997-irq.c - Interrupt controller support for MAX8997
- *
- * Copyright (C) 2011 Samsung Electronics Co.Ltd
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8998-irq.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8997-irq.c - Interrupt controller support for MAX8997
+//
+// Copyright (C) 2011 Samsung Electronics Co.Ltd
+// MyungJoo Ham <myungjoo.ham@samsung.com>
+//
+// This driver is based on max8998-irq.c
#include <linux/err.h>
#include <linux/irq.h>
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 3f554c447521..8c06c09e36d1 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -1,25 +1,11 @@
-/*
- * max8997.c - mfd core driver for the Maxim 8966 and 8997
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8998.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8997.c - mfd core driver for the Maxim 8966 and 8997
+//
+// Copyright (C) 2011 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
+//
+// This driver is based on max8998.c
#include <linux/err.h>
#include <linux/slab.h>
@@ -153,12 +139,6 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
pd->ono = irq_of_parse_and_map(dev->of_node, 1);
- /*
- * ToDo: the 'wakeup' member in the platform data is more of a linux
- * specfic information. Hence, there is no binding for that yet and
- * not parsed here.
- */
-
return pd;
}
@@ -246,7 +226,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
*/
/* MAX8997 has a power button input. */
- device_init_wakeup(max8997->dev, pdata->wakeup);
+ device_init_wakeup(max8997->dev, true);
return ret;
@@ -468,6 +448,7 @@ static int max8997_suspend(struct device *dev)
struct i2c_client *i2c = to_i2c_client(dev);
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ disable_irq(max8997->irq);
if (device_may_wakeup(dev))
irq_set_irq_wake(max8997->irq, 1);
return 0;
@@ -480,6 +461,7 @@ static int max8997_resume(struct device *dev)
if (device_may_wakeup(dev))
irq_set_irq_wake(max8997->irq, 0);
+ enable_irq(max8997->irq);
return max8997_irq_resume(max8997);
}
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 90bad9ffa7e2..83b6f510bc05 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -1,15 +1,9 @@
-/*
- * Interrupt controller support for MAX8998
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Interrupt controller support for MAX8998
+//
+// Copyright (C) 2010 Samsung Electronics Co.Ltd
+// Author: Joonyoung Shim <jy0922.shim@samsung.com>
#include <linux/device.h>
#include <linux/interrupt.h>
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index b1d3f70782d9..56409df120f8 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -1,24 +1,10 @@
-/*
- * max8998.c - mfd core driver for the Maxim 8998
- *
- * Copyright (C) 2009-2010 Samsung Electronics
- * Kyungmin Park <kyungmin.park@samsung.com>
- * Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8998.c - mfd core driver for the Maxim 8998
+//
+// Copyright (C) 2009-2010 Samsung Electronics
+// Kyungmin Park <kyungmin.park@samsung.com>
+// Marek Szyprowski <m.szyprowski@samsung.com>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index c63e331738c1..f475e848252f 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -276,7 +276,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
- adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
+ adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
+ MC13XXX_ADC0_CHRGRAWDIV;
adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
/*
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index cbc1e5ed599c..ee3411cc5ce4 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -57,7 +57,8 @@ static const struct regmap_config mc13xxx_regmap_spi_config = {
.max_register = MC13XXX_NUMREGS,
.cache_type = REGCACHE_NONE,
- .use_single_rw = 1,
+ .use_single_read = true,
+ .use_single_write = true,
};
static int mc13xxx_spi_read(void *context, const void *reg, size_t reg_size,
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 29b7164a823b..d28ebe7ecd21 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1094,6 +1094,7 @@ static void menelaus_rtc_alarm_work(struct menelaus_chip *m)
static inline void menelaus_rtc_init(struct menelaus_chip *m)
{
int alarm = (m->client->irq > 0);
+ int err;
/* assume 32KDETEN pin is pulled high */
if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) {
@@ -1101,6 +1102,12 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
return;
}
+ m->rtc = devm_rtc_allocate_device(&m->client->dev);
+ if (IS_ERR(m->rtc))
+ return;
+
+ m->rtc->ops = &menelaus_rtc_ops;
+
/* support RTC alarm; it can issue wakeups */
if (alarm) {
if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ,
@@ -1125,10 +1132,8 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
}
- m->rtc = rtc_device_register(DRIVER_NAME,
- &m->client->dev,
- &menelaus_rtc_ops, THIS_MODULE);
- if (IS_ERR(m->rtc)) {
+ err = rtc_register_device(m->rtc);
+ if (err) {
if (alarm) {
menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
device_init_wakeup(&m->client->dev, 0);
diff --git a/drivers/mfd/motorola-cpcap.c b/drivers/mfd/motorola-cpcap.c
index 5276911caaec..20d9692640e1 100644
--- a/drivers/mfd/motorola-cpcap.c
+++ b/drivers/mfd/motorola-cpcap.c
@@ -18,6 +18,7 @@
#include <linux/regmap.h>
#include <linux/sysfs.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/motorola-cpcap.h>
#include <linux/spi/spi.h>
@@ -216,6 +217,53 @@ static const struct regmap_config cpcap_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
+static const struct mfd_cell cpcap_mfd_devices[] = {
+ {
+ .name = "cpcap_adc",
+ .of_compatible = "motorola,mapphone-cpcap-adc",
+ }, {
+ .name = "cpcap_battery",
+ .of_compatible = "motorola,cpcap-battery",
+ }, {
+ .name = "cpcap-charger",
+ .of_compatible = "motorola,mapphone-cpcap-charger",
+ }, {
+ .name = "cpcap-regulator",
+ .of_compatible = "motorola,mapphone-cpcap-regulator",
+ }, {
+ .name = "cpcap-rtc",
+ .of_compatible = "motorola,cpcap-rtc",
+ }, {
+ .name = "cpcap-pwrbutton",
+ .of_compatible = "motorola,cpcap-pwrbutton",
+ }, {
+ .name = "cpcap-usb-phy",
+ .of_compatible = "motorola,mapphone-cpcap-usb-phy",
+ }, {
+ .name = "cpcap-led",
+ .id = 0,
+ .of_compatible = "motorola,cpcap-led-red",
+ }, {
+ .name = "cpcap-led",
+ .id = 1,
+ .of_compatible = "motorola,cpcap-led-green",
+ }, {
+ .name = "cpcap-led",
+ .id = 2,
+ .of_compatible = "motorola,cpcap-led-blue",
+ }, {
+ .name = "cpcap-led",
+ .id = 3,
+ .of_compatible = "motorola,cpcap-led-adl",
+ }, {
+ .name = "cpcap-led",
+ .id = 4,
+ .of_compatible = "motorola,cpcap-led-cp",
+ }, {
+ .name = "cpcap-codec",
+ }
+};
+
static int cpcap_probe(struct spi_device *spi)
{
const struct of_device_id *match;
@@ -260,7 +308,8 @@ static int cpcap_probe(struct spi_device *spi)
if (ret)
return ret;
- return devm_of_platform_populate(&cpcap->spi->dev);
+ return devm_mfd_add_devices(&spi->dev, 0, cpcap_mfd_devices,
+ ARRAY_SIZE(cpcap_mfd_devices), NULL, 0, NULL);
}
static struct spi_driver cpcap_driver = {
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index e11ab12fbdf2..800986a79704 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -528,8 +528,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev,
}
static const struct of_device_id usbhs_child_match_table[] = {
- { .compatible = "ti,omap-ehci", },
- { .compatible = "ti,omap-ohci", },
+ { .compatible = "ti,ehci-omap", },
+ { .compatible = "ti,ohci-omap3", },
{ }
};
@@ -855,6 +855,7 @@ static struct platform_driver usbhs_omap_driver = {
.pm = &usbhsomap_dev_pm_ops,
.of_match_table = usbhs_omap_dt_ids,
},
+ .probe = usbhs_omap_probe,
.remove = usbhs_omap_remove,
};
@@ -864,9 +865,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
-static int __init omap_usbhs_drvinit(void)
+static int omap_usbhs_drvinit(void)
{
- return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
+ return platform_driver_register(&usbhs_omap_driver);
}
/*
@@ -878,7 +879,7 @@ static int __init omap_usbhs_drvinit(void)
*/
fs_initcall_sync(omap_usbhs_drvinit);
-static void __exit omap_usbhs_drvexit(void)
+static void omap_usbhs_drvexit(void)
{
platform_driver_unregister(&usbhs_omap_driver);
}
diff --git a/drivers/mfd/rohm-bd718x7.c b/drivers/mfd/rohm-bd718x7.c
index 75c8ec659547..a29d529a96f4 100644
--- a/drivers/mfd/rohm-bd718x7.c
+++ b/drivers/mfd/rohm-bd718x7.c
@@ -2,26 +2,21 @@
//
// Copyright (C) 2018 ROHM Semiconductors
//
-// ROHM BD71837MWV PMIC driver
+// ROHM BD71837MWV and BD71847MWV PMIC driver
//
-// Datasheet available from
+// Datasheet for BD71837MWV available from
// https://www.rohm.com/datasheet/BD71837MWV/bd71837mwv-e
+#include <linux/gpio_keys.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/mfd/rohm-bd718x7.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
-
-/*
- * gpio_keys.h requires definiton of bool. It is brought in
- * by above includes. Keep this as last until gpio_keys.h gets fixed.
- */
-#include <linux/gpio_keys.h>
-
-static const u8 supported_revisions[] = { 0xA2 /* BD71837 */ };
+#include <linux/types.h>
static struct gpio_keys_button button = {
.code = KEY_POWER,
@@ -35,42 +30,42 @@ static struct gpio_keys_platform_data bd718xx_powerkey_data = {
.name = "bd718xx-pwrkey",
};
-static struct mfd_cell bd71837_mfd_cells[] = {
+static struct mfd_cell bd718xx_mfd_cells[] = {
{
.name = "gpio-keys",
.platform_data = &bd718xx_powerkey_data,
.pdata_size = sizeof(bd718xx_powerkey_data),
},
- { .name = "bd71837-clk", },
- { .name = "bd71837-pmic", },
+ { .name = "bd718xx-clk", },
+ { .name = "bd718xx-pmic", },
};
-static const struct regmap_irq bd71837_irqs[] = {
- REGMAP_IRQ_REG(BD71837_INT_SWRST, 0, BD71837_INT_SWRST_MASK),
- REGMAP_IRQ_REG(BD71837_INT_PWRBTN_S, 0, BD71837_INT_PWRBTN_S_MASK),
- REGMAP_IRQ_REG(BD71837_INT_PWRBTN_L, 0, BD71837_INT_PWRBTN_L_MASK),
- REGMAP_IRQ_REG(BD71837_INT_PWRBTN, 0, BD71837_INT_PWRBTN_MASK),
- REGMAP_IRQ_REG(BD71837_INT_WDOG, 0, BD71837_INT_WDOG_MASK),
- REGMAP_IRQ_REG(BD71837_INT_ON_REQ, 0, BD71837_INT_ON_REQ_MASK),
- REGMAP_IRQ_REG(BD71837_INT_STBY_REQ, 0, BD71837_INT_STBY_REQ_MASK),
+static const struct regmap_irq bd718xx_irqs[] = {
+ REGMAP_IRQ_REG(BD718XX_INT_SWRST, 0, BD718XX_INT_SWRST_MASK),
+ REGMAP_IRQ_REG(BD718XX_INT_PWRBTN_S, 0, BD718XX_INT_PWRBTN_S_MASK),
+ REGMAP_IRQ_REG(BD718XX_INT_PWRBTN_L, 0, BD718XX_INT_PWRBTN_L_MASK),
+ REGMAP_IRQ_REG(BD718XX_INT_PWRBTN, 0, BD718XX_INT_PWRBTN_MASK),
+ REGMAP_IRQ_REG(BD718XX_INT_WDOG, 0, BD718XX_INT_WDOG_MASK),
+ REGMAP_IRQ_REG(BD718XX_INT_ON_REQ, 0, BD718XX_INT_ON_REQ_MASK),
+ REGMAP_IRQ_REG(BD718XX_INT_STBY_REQ, 0, BD718XX_INT_STBY_REQ_MASK),
};
-static struct regmap_irq_chip bd71837_irq_chip = {
- .name = "bd71837-irq",
- .irqs = bd71837_irqs,
- .num_irqs = ARRAY_SIZE(bd71837_irqs),
+static struct regmap_irq_chip bd718xx_irq_chip = {
+ .name = "bd718xx-irq",
+ .irqs = bd718xx_irqs,
+ .num_irqs = ARRAY_SIZE(bd718xx_irqs),
.num_regs = 1,
.irq_reg_stride = 1,
- .status_base = BD71837_REG_IRQ,
- .mask_base = BD71837_REG_MIRQ,
- .ack_base = BD71837_REG_IRQ,
+ .status_base = BD718XX_REG_IRQ,
+ .mask_base = BD718XX_REG_MIRQ,
+ .ack_base = BD718XX_REG_IRQ,
.init_ack_masked = true,
.mask_invert = false,
};
static const struct regmap_range pmic_status_range = {
- .range_min = BD71837_REG_IRQ,
- .range_max = BD71837_REG_POW_STATE,
+ .range_min = BD718XX_REG_IRQ,
+ .range_max = BD718XX_REG_POW_STATE,
};
static const struct regmap_access_table volatile_regs = {
@@ -78,67 +73,53 @@ static const struct regmap_access_table volatile_regs = {
.n_yes_ranges = 1,
};
-static const struct regmap_config bd71837_regmap_config = {
+static const struct regmap_config bd718xx_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_table = &volatile_regs,
- .max_register = BD71837_MAX_REGISTER - 1,
+ .max_register = BD718XX_MAX_REGISTER - 1,
.cache_type = REGCACHE_RBTREE,
};
-static int bd71837_i2c_probe(struct i2c_client *i2c,
+static int bd718xx_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- struct bd71837 *bd71837;
- int ret, i;
- unsigned int val;
-
- bd71837 = devm_kzalloc(&i2c->dev, sizeof(struct bd71837), GFP_KERNEL);
+ struct bd718xx *bd718xx;
+ int ret;
- if (!bd71837)
- return -ENOMEM;
-
- bd71837->chip_irq = i2c->irq;
-
- if (!bd71837->chip_irq) {
+ if (!i2c->irq) {
dev_err(&i2c->dev, "No IRQ configured\n");
return -EINVAL;
}
- bd71837->dev = &i2c->dev;
- dev_set_drvdata(&i2c->dev, bd71837);
+ bd718xx = devm_kzalloc(&i2c->dev, sizeof(struct bd718xx), GFP_KERNEL);
- bd71837->regmap = devm_regmap_init_i2c(i2c, &bd71837_regmap_config);
- if (IS_ERR(bd71837->regmap)) {
- dev_err(&i2c->dev, "regmap initialization failed\n");
- return PTR_ERR(bd71837->regmap);
- }
+ if (!bd718xx)
+ return -ENOMEM;
- ret = regmap_read(bd71837->regmap, BD71837_REG_REV, &val);
- if (ret) {
- dev_err(&i2c->dev, "Read BD71837_REG_DEVICE failed\n");
- return ret;
- }
- for (i = 0; i < ARRAY_SIZE(supported_revisions); i++)
- if (supported_revisions[i] == val)
- break;
+ bd718xx->chip_irq = i2c->irq;
+ bd718xx->chip_type = (unsigned int)(uintptr_t)
+ of_device_get_match_data(&i2c->dev);
+ bd718xx->dev = &i2c->dev;
+ dev_set_drvdata(&i2c->dev, bd718xx);
- if (i == ARRAY_SIZE(supported_revisions)) {
- dev_err(&i2c->dev, "Unsupported chip revision\n");
- return -ENODEV;
+ bd718xx->regmap = devm_regmap_init_i2c(i2c, &bd718xx_regmap_config);
+ if (IS_ERR(bd718xx->regmap)) {
+ dev_err(&i2c->dev, "regmap initialization failed\n");
+ return PTR_ERR(bd718xx->regmap);
}
- ret = devm_regmap_add_irq_chip(&i2c->dev, bd71837->regmap,
- bd71837->chip_irq, IRQF_ONESHOT, 0,
- &bd71837_irq_chip, &bd71837->irq_data);
+ ret = devm_regmap_add_irq_chip(&i2c->dev, bd718xx->regmap,
+ bd718xx->chip_irq, IRQF_ONESHOT, 0,
+ &bd718xx_irq_chip, &bd718xx->irq_data);
if (ret) {
dev_err(&i2c->dev, "Failed to add irq_chip\n");
return ret;
}
/* Configure short press to 10 milliseconds */
- ret = regmap_update_bits(bd71837->regmap,
- BD71837_REG_PWRONCONFIG0,
+ ret = regmap_update_bits(bd718xx->regmap,
+ BD718XX_REG_PWRONCONFIG0,
BD718XX_PWRBTN_PRESS_DURATION_MASK,
BD718XX_PWRBTN_SHORT_PRESS_10MS);
if (ret) {
@@ -148,8 +129,8 @@ static int bd71837_i2c_probe(struct i2c_client *i2c,
}
/* Configure long press to 10 seconds */
- ret = regmap_update_bits(bd71837->regmap,
- BD71837_REG_PWRONCONFIG1,
+ ret = regmap_update_bits(bd718xx->regmap,
+ BD718XX_REG_PWRONCONFIG1,
BD718XX_PWRBTN_PRESS_DURATION_MASK,
BD718XX_PWRBTN_LONG_PRESS_10S);
@@ -159,7 +140,7 @@ static int bd71837_i2c_probe(struct i2c_client *i2c,
return ret;
}
- ret = regmap_irq_get_virq(bd71837->irq_data, BD71837_INT_PWRBTN_S);
+ ret = regmap_irq_get_virq(bd718xx->irq_data, BD718XX_INT_PWRBTN_S);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to get the IRQ\n");
@@ -168,44 +149,51 @@ static int bd71837_i2c_probe(struct i2c_client *i2c,
button.irq = ret;
- ret = devm_mfd_add_devices(bd71837->dev, PLATFORM_DEVID_AUTO,
- bd71837_mfd_cells,
- ARRAY_SIZE(bd71837_mfd_cells), NULL, 0,
- regmap_irq_get_domain(bd71837->irq_data));
+ ret = devm_mfd_add_devices(bd718xx->dev, PLATFORM_DEVID_AUTO,
+ bd718xx_mfd_cells,
+ ARRAY_SIZE(bd718xx_mfd_cells), NULL, 0,
+ regmap_irq_get_domain(bd718xx->irq_data));
if (ret)
dev_err(&i2c->dev, "Failed to create subdevices\n");
return ret;
}
-static const struct of_device_id bd71837_of_match[] = {
- { .compatible = "rohm,bd71837", },
+static const struct of_device_id bd718xx_of_match[] = {
+ {
+ .compatible = "rohm,bd71837",
+ .data = (void *)BD718XX_TYPE_BD71837,
+ },
+ {
+ .compatible = "rohm,bd71847",
+ .data = (void *)BD718XX_TYPE_BD71847,
+ },
{ }
};
-MODULE_DEVICE_TABLE(of, bd71837_of_match);
+MODULE_DEVICE_TABLE(of, bd718xx_of_match);
-static struct i2c_driver bd71837_i2c_driver = {
+static struct i2c_driver bd718xx_i2c_driver = {
.driver = {
.name = "rohm-bd718x7",
- .of_match_table = bd71837_of_match,
+ .of_match_table = bd718xx_of_match,
},
- .probe = bd71837_i2c_probe,
+ .probe = bd718xx_i2c_probe,
};
-static int __init bd71837_i2c_init(void)
+static int __init bd718xx_i2c_init(void)
{
- return i2c_add_driver(&bd71837_i2c_driver);
+ return i2c_add_driver(&bd718xx_i2c_driver);
}
/* Initialise early so consumer devices can complete system boot */
-subsys_initcall(bd71837_i2c_init);
+subsys_initcall(bd718xx_i2c_init);
-static void __exit bd71837_i2c_exit(void)
+static void __exit bd718xx_i2c_exit(void)
{
- i2c_del_driver(&bd71837_i2c_driver);
+ i2c_del_driver(&bd718xx_i2c_driver);
}
-module_exit(bd71837_i2c_exit);
+module_exit(bd718xx_i2c_exit);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("ROHM BD71837 Power Management IC driver");
+MODULE_DESCRIPTION("ROHM BD71837/BD71847 Power Management IC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 9613b4257302..e0835c9df7a1 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -1,15 +1,7 @@
-/*
- * sec-core.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2012 Samsung Electronics Co., Ltd
+// http://www.samsung.com
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 5eb59c233d52..ad0099077e7e 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -1,19 +1,12 @@
-/*
- * sec-irq.c
- *
- * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
+// http://www.samsung.com
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/mfd/samsung/core.h>
@@ -501,3 +494,10 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
return 0;
}
+EXPORT_SYMBOL_GPL(sec_irq_init);
+
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+MODULE_DESCRIPTION("Interrupt support for the S5M MFD");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ti-lmu.c b/drivers/mfd/ti-lmu.c
index cfb411cde51c..37d0bdb291c3 100644
--- a/drivers/mfd/ti-lmu.c
+++ b/drivers/mfd/ti-lmu.c
@@ -12,7 +12,7 @@
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/mfd/core.h>
@@ -21,28 +21,18 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
struct ti_lmu_data {
- struct mfd_cell *cells;
+ const struct mfd_cell *cells;
int num_cells;
unsigned int max_register;
};
static int ti_lmu_enable_hw(struct ti_lmu *lmu, enum ti_lmu_id id)
{
- int ret;
-
- if (gpio_is_valid(lmu->en_gpio)) {
- ret = devm_gpio_request_one(lmu->dev, lmu->en_gpio,
- GPIOF_OUT_INIT_HIGH, "lmu_hwen");
- if (ret) {
- dev_err(lmu->dev, "Can not request enable GPIO: %d\n",
- ret);
- return ret;
- }
- }
+ if (lmu->en_gpio)
+ gpiod_set_value(lmu->en_gpio, 1);
/* Delay about 1ms after HW enable pin control */
usleep_range(1000, 1500);
@@ -57,13 +47,14 @@ static int ti_lmu_enable_hw(struct ti_lmu *lmu, enum ti_lmu_id id)
return 0;
}
-static void ti_lmu_disable_hw(struct ti_lmu *lmu)
+static void ti_lmu_disable_hw(void *data)
{
- if (gpio_is_valid(lmu->en_gpio))
- gpio_set_value(lmu->en_gpio, 0);
+ struct ti_lmu *lmu = data;
+ if (lmu->en_gpio)
+ gpiod_set_value(lmu->en_gpio, 0);
}
-static struct mfd_cell lm3532_devices[] = {
+static const struct mfd_cell lm3532_devices[] = {
{
.name = "ti-lmu-backlight",
.id = LM3532,
@@ -78,7 +69,7 @@ static struct mfd_cell lm3532_devices[] = {
.of_compatible = "ti,lm363x-regulator", \
} \
-static struct mfd_cell lm3631_devices[] = {
+static const struct mfd_cell lm3631_devices[] = {
LM363X_REGULATOR(LM3631_BOOST),
LM363X_REGULATOR(LM3631_LDO_CONT),
LM363X_REGULATOR(LM3631_LDO_OREF),
@@ -91,7 +82,7 @@ static struct mfd_cell lm3631_devices[] = {
},
};
-static struct mfd_cell lm3632_devices[] = {
+static const struct mfd_cell lm3632_devices[] = {
LM363X_REGULATOR(LM3632_BOOST),
LM363X_REGULATOR(LM3632_LDO_POS),
LM363X_REGULATOR(LM3632_LDO_NEG),
@@ -102,7 +93,7 @@ static struct mfd_cell lm3632_devices[] = {
},
};
-static struct mfd_cell lm3633_devices[] = {
+static const struct mfd_cell lm3633_devices[] = {
{
.name = "ti-lmu-backlight",
.id = LM3633,
@@ -120,7 +111,7 @@ static struct mfd_cell lm3633_devices[] = {
},
};
-static struct mfd_cell lm3695_devices[] = {
+static const struct mfd_cell lm3695_devices[] = {
{
.name = "ti-lmu-backlight",
.id = LM3695,
@@ -128,7 +119,7 @@ static struct mfd_cell lm3695_devices[] = {
},
};
-static struct mfd_cell lm3697_devices[] = {
+static const struct mfd_cell lm3697_devices[] = {
{
.name = "ti-lmu-backlight",
.id = LM3697,
@@ -157,34 +148,21 @@ TI_LMU_DATA(lm3633, LM3633_MAX_REG);
TI_LMU_DATA(lm3695, LM3695_MAX_REG);
TI_LMU_DATA(lm3697, LM3697_MAX_REG);
-static const struct of_device_id ti_lmu_of_match[] = {
- { .compatible = "ti,lm3532", .data = &lm3532_data },
- { .compatible = "ti,lm3631", .data = &lm3631_data },
- { .compatible = "ti,lm3632", .data = &lm3632_data },
- { .compatible = "ti,lm3633", .data = &lm3633_data },
- { .compatible = "ti,lm3695", .data = &lm3695_data },
- { .compatible = "ti,lm3697", .data = &lm3697_data },
- { }
-};
-MODULE_DEVICE_TABLE(of, ti_lmu_of_match);
-
static int ti_lmu_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
struct device *dev = &cl->dev;
- const struct of_device_id *match;
const struct ti_lmu_data *data;
struct regmap_config regmap_cfg;
struct ti_lmu *lmu;
int ret;
- match = of_match_device(ti_lmu_of_match, dev);
- if (!match)
- return -ENODEV;
/*
* Get device specific data from of_match table.
* This data is defined by using TI_LMU_DATA() macro.
*/
- data = (struct ti_lmu_data *)match->data;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
lmu = devm_kzalloc(dev, sizeof(*lmu), GFP_KERNEL);
if (!lmu)
@@ -204,11 +182,21 @@ static int ti_lmu_probe(struct i2c_client *cl, const struct i2c_device_id *id)
return PTR_ERR(lmu->regmap);
/* HW enable pin control and additional power up sequence if required */
- lmu->en_gpio = of_get_named_gpio(dev->of_node, "enable-gpios", 0);
+ lmu->en_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(lmu->en_gpio)) {
+ ret = PTR_ERR(lmu->en_gpio);
+ dev_err(dev, "Can not request enable GPIO: %d\n", ret);
+ return ret;
+ }
+
ret = ti_lmu_enable_hw(lmu, id->driver_data);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, ti_lmu_disable_hw, lmu);
+ if (ret)
+ return ret;
+
/*
* Fault circuit(open/short) can be detected by ti-lmu-fault-monitor.
* After fault detection is done, some devices should re-initialize
@@ -218,18 +206,20 @@ static int ti_lmu_probe(struct i2c_client *cl, const struct i2c_device_id *id)
i2c_set_clientdata(cl, lmu);
- return mfd_add_devices(lmu->dev, 0, data->cells,
- data->num_cells, NULL, 0, NULL);
+ return devm_mfd_add_devices(lmu->dev, 0, data->cells,
+ data->num_cells, NULL, 0, NULL);
}
-static int ti_lmu_remove(struct i2c_client *cl)
-{
- struct ti_lmu *lmu = i2c_get_clientdata(cl);
-
- ti_lmu_disable_hw(lmu);
- mfd_remove_devices(lmu->dev);
- return 0;
-}
+static const struct of_device_id ti_lmu_of_match[] = {
+ { .compatible = "ti,lm3532", .data = &lm3532_data },
+ { .compatible = "ti,lm3631", .data = &lm3631_data },
+ { .compatible = "ti,lm3632", .data = &lm3632_data },
+ { .compatible = "ti,lm3633", .data = &lm3633_data },
+ { .compatible = "ti,lm3695", .data = &lm3695_data },
+ { .compatible = "ti,lm3697", .data = &lm3697_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ti_lmu_of_match);
static const struct i2c_device_id ti_lmu_ids[] = {
{ "lm3532", LM3532 },
@@ -244,7 +234,6 @@ MODULE_DEVICE_TABLE(i2c, ti_lmu_ids);
static struct i2c_driver ti_lmu_driver = {
.probe = ti_lmu_probe,
- .remove = ti_lmu_remove,
.driver = {
.name = "ti-lmu",
.of_match_table = ti_lmu_of_match,
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 7a30546880a4..c2d47d78705b 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -269,7 +269,6 @@ static int ti_tscadc_probe(struct platform_device *pdev)
if (err < 0)
goto err_disable_clk;
- device_init_wakeup(&pdev->dev, true);
platform_set_drvdata(pdev, tscadc);
return 0;
@@ -294,11 +293,24 @@ static int ti_tscadc_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused ti_tscadc_can_wakeup(struct device *dev, void *data)
+{
+ return device_may_wakeup(dev);
+}
+
static int __maybe_unused tscadc_suspend(struct device *dev)
{
struct ti_tscadc_dev *tscadc = dev_get_drvdata(dev);
regmap_write(tscadc->regmap, REG_SE, 0x00);
+ if (device_for_each_child(dev, NULL, ti_tscadc_can_wakeup)) {
+ u32 ctrl;
+
+ regmap_read(tscadc->regmap, REG_CTRL, &ctrl);
+ ctrl &= ~(CNTRLREG_POWERDOWN);
+ ctrl |= CNTRLREG_TSCSSENB;
+ regmap_write(tscadc->regmap, REG_CTRL, ctrl);
+ }
pm_runtime_put_sync(dev);
return 0;
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index dd19f17a1b63..7c3c5fd5fcd0 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -613,7 +613,8 @@ static const struct regmap_config twl6040_regmap_config = {
.writeable_reg = twl6040_writeable_reg,
.cache_type = REGCACHE_RBTREE,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
static const struct regmap_irq twl6040_irqs[] = {
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 120738d6e58b..77ed3967c5b0 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -408,7 +408,7 @@ struct genwqe_file {
struct file *filp;
struct fasync_struct *async_queue;
- struct task_struct *owner;
+ struct pid *opener;
struct list_head list; /* entry in list of open files */
spinlock_t map_lock; /* lock for dma_mappings */
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index f453ab82f0d7..8c1b63a4337b 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -52,7 +52,7 @@ static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
{
unsigned long flags;
- cfile->owner = current;
+ cfile->opener = get_pid(task_tgid(current));
spin_lock_irqsave(&cd->file_lock, flags);
list_add(&cfile->list, &cd->file_list);
spin_unlock_irqrestore(&cd->file_lock, flags);
@@ -65,6 +65,7 @@ static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
spin_lock_irqsave(&cd->file_lock, flags);
list_del(&cfile->list);
spin_unlock_irqrestore(&cd->file_lock, flags);
+ put_pid(cfile->opener);
return 0;
}
@@ -275,7 +276,7 @@ static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
return files;
}
-static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
+static int genwqe_terminate(struct genwqe_dev *cd)
{
unsigned int files = 0;
unsigned long flags;
@@ -283,7 +284,7 @@ static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
spin_lock_irqsave(&cd->file_lock, flags);
list_for_each_entry(cfile, &cd->file_list, list) {
- force_sig(sig, cfile->owner);
+ kill_pid(cfile->opener, SIGKILL, 1);
files++;
}
spin_unlock_irqrestore(&cd->file_lock, flags);
@@ -1352,7 +1353,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
dev_warn(&pci_dev->dev,
"[%s] send SIGKILL and wait ...\n", __func__);
- rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */
+ rc = genwqe_terminate(cd);
if (rc) {
/* Give kill_timout more seconds to end processes */
for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 2154d1bfd18b..5a755590d3dc 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -183,6 +183,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
CRASHTYPE(USERCOPY_STACK_BEYOND),
CRASHTYPE(USERCOPY_KERNEL),
+ CRASHTYPE(USERCOPY_KERNEL_DS),
};
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 9e513dcfd809..07db641d71d0 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -82,5 +82,6 @@ void lkdtm_USERCOPY_STACK_FRAME_TO(void);
void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
void lkdtm_USERCOPY_STACK_BEYOND(void);
void lkdtm_USERCOPY_KERNEL(void);
+void lkdtm_USERCOPY_KERNEL_DS(void);
#endif
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 9725aed305bb..389475b25bb7 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -322,6 +322,19 @@ free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
+void lkdtm_USERCOPY_KERNEL_DS(void)
+{
+ char __user *user_ptr = (char __user *)ERR_PTR(-EINVAL);
+ mm_segment_t old_fs = get_fs();
+ char buf[10] = {0};
+
+ pr_info("attempting copy_to_user on unmapped kernel address\n");
+ set_fs(KERNEL_DS);
+ if (copy_to_user(user_ptr, buf, sizeof(buf)))
+ pr_info("copy_to_user un unmapped kernel address failed\n");
+ set_fs(old_fs);
+}
+
void __init lkdtm_usercopy_init(void)
{
/* Prepare cache that lacks SLAB_USERCOPY flag. */
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 42e89060cd41..2f38a7ad07e0 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -14,7 +14,7 @@ config PWRSEQ_EMMC
config PWRSEQ_SD8787
tristate "HW reset support for SD8787 BT + Wifi module"
- depends on OF && (MWIFIEX || BT_MRVL_SDIO)
+ depends on OF && (MWIFIEX || BT_MRVL_SDIO || LIBERTAS_SDIO)
help
This selects hardware reset support for the SD8787 BT + Wifi
module. By default this option is set to n.
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index a0b9102c4c6e..c35b5b08bb33 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1371,6 +1371,16 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
if (brq->data.blocks > 1) {
/*
+ * Some SD cards in SPI mode return a CRC error or even lock up
+ * completely when trying to read the last block using a
+ * multiblock read command.
+ */
+ if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
+ (blk_rq_pos(req) + blk_rq_sectors(req) ==
+ get_capacity(md->disk)))
+ brq->data.blocks--;
+
+ /*
* After a read error, we redo the request one sector
* at a time in order to accurately determine which
* sectors can be read successfully.
@@ -2698,7 +2708,7 @@ static int mmc_add_disk(struct mmc_blk_data *md)
int ret;
struct mmc_card *card = md->queue.card;
- device_add_disk(md->parent, md->disk);
+ device_add_disk(md->parent, md->disk, NULL);
md->force_ro.show = force_ro_show;
md->force_ro.store = force_ro_store;
sysfs_attr_init(&md->force_ro.attr);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index abf9e884386c..f57f5de54206 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -235,7 +235,7 @@ int mmc_of_parse(struct mmc_host *host)
host->caps |= MMC_CAP_NEEDS_POLL;
ret = mmc_gpiod_request_cd(host, "cd", 0, true,
- cd_debounce_delay_ms,
+ cd_debounce_delay_ms * 1000,
&cd_gpio_invert);
if (!ret)
dev_info(host->parent, "Got CD GPIO\n");
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index a8b9fee4d62a..ece34c734693 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -40,17 +40,21 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
struct gpio_descs *reset_gpios = pwrseq->reset_gpios;
if (!IS_ERR(reset_gpios)) {
- int i, *values;
+ unsigned long *values;
int nvalues = reset_gpios->ndescs;
- values = kmalloc_array(nvalues, sizeof(int), GFP_KERNEL);
+ values = bitmap_alloc(nvalues, GFP_KERNEL);
if (!values)
return;
- for (i = 0; i < nvalues; i++)
- values[i] = value;
+ if (value)
+ bitmap_fill(values, nvalues);
+ else
+ bitmap_zero(values, nvalues);
+
+ gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc,
+ reset_gpios->info, values);
- gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc, values);
kfree(values);
}
}
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 2a833686784b..86803a3a04dc 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -271,7 +271,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
- ctx->cd_debounce_delay_ms = debounce;
+ ctx->cd_debounce_delay_ms = debounce / 1000;
}
if (gpio_invert)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 694d0828215d..1b58739d9744 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -34,6 +34,16 @@ config MMC_QCOM_DML
if unsure, say N.
+config MMC_STM32_SDMMC
+ bool "STMicroelectronics STM32 SDMMC Controller"
+ depends on MMC_ARMMMCI
+ default y
+ help
+ This selects the STMicroelectronics STM32 SDMMC host controller.
+ If you have a STM32 sdmmc host with internal DMA say Y here.
+
+ If unsure, say N.
+
config MMC_PXA
tristate "Intel PXA25x/26x/27x Multimedia Card Interface support"
depends on ARCH_PXA
@@ -345,6 +355,7 @@ config MMC_SDHCI_IPROC
tristate "SDHCI support for the BCM2835 & iProc SD/MMC Controller"
depends on ARCH_BCM2835 || ARCH_BCM_IPROC || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
+ depends on OF || ACPI
default ARCH_BCM_IPROC
select MMC_SDHCI_IO_ACCESSORS
help
@@ -592,6 +603,19 @@ config MMC_SDRICOH_CS
To compile this driver as a module, choose M here: the
module will be called sdricoh_cs.
+config MMC_SDHCI_SPRD
+ tristate "Spreadtrum SDIO host Controller"
+ depends on ARCH_SPRD
+ depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This selects the SDIO Host Controller in Spreadtrum
+ SoCs, this driver supports R11(IP version: R11P0).
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_TMIO_CORE
tristate
@@ -622,14 +646,24 @@ config MMC_SDHI_SYS_DMAC
config MMC_SDHI_INTERNAL_DMAC
tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering"
- depends on ARM64 || COMPILE_TEST
+ depends on ARM64 || ARCH_R8A77470 || COMPILE_TEST
depends on MMC_SDHI
- default MMC_SDHI if ARM64
+ default MMC_SDHI if (ARM64 || ARCH_R8A77470)
help
This provides DMA support for SDHI SD/SDIO controllers
using on-chip bus mastering. This supports the controllers
found in arm64 based SoCs.
+config MMC_UNIPHIER
+ tristate "UniPhier SD/eMMC Host Controller support"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF
+ select MMC_TMIO_CORE
+ help
+ This provides support for the SD/eMMC controller found in
+ UniPhier SoCs. The eMMC variant of this controller is used
+ only for 32-bit SoCs.
+
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
depends on PCI
@@ -772,7 +806,7 @@ config MMC_SH_MMCIF
config MMC_JZ4740
tristate "Ingenic JZ47xx SD/Multimedia Card Interface support"
- depends on MACH_JZ4740 || MACH_JZ4780
+ depends on MIPS
help
This selects support for the SD/MMC controller on Ingenic
JZ4740, JZ4750, JZ4770 and JZ4780 SoCs.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index ce8398e6f2c0..720d37777098 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_MMC_ARMMMCI) += armmmci.o
armmmci-y := mmci.o
armmmci-$(CONFIG_MMC_QCOM_DML) += mmci_qcom_dml.o
+armmmci-$(CONFIG_MMC_STM32_SDMMC) += mmci_stm32_sdmmc.o
obj-$(CONFIG_MMC_PXA) += pxamci.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
@@ -42,6 +43,7 @@ obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o
obj-$(CONFIG_MMC_SDHI_SYS_DMAC) += renesas_sdhi_sys_dmac.o
obj-$(CONFIG_MMC_SDHI_INTERNAL_DMAC) += renesas_sdhi_internal_dmac.o
+obj-$(CONFIG_MMC_UNIPHIER) += uniphier-sd.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
octeon-mmc-objs := cavium.o cavium-octeon.o
@@ -91,6 +93,7 @@ obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
+obj-$(CONFIG_MMC_SDHCI_SPRD) += sdhci-sprd.o
obj-$(CONFIG_MMC_CQHCI) += cqhci.o
ifeq ($(CONFIG_CB710_DEBUG),y)
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index ab47b018716a..d46c3439b508 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -253,6 +253,8 @@ static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
if (timing == MMC_TIMING_MMC_HS400) {
dqs |= DATA_STROBE_EN;
strobe = DQS_CTRL_RD_DELAY(strobe, priv->dqs_delay);
+ } else if (timing == MMC_TIMING_UHS_SDR104) {
+ dqs &= 0xffffff00;
} else {
dqs &= ~DATA_STROBE_EN;
}
@@ -312,6 +314,15 @@ static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios)
if (ios->bus_width == MMC_BUS_WIDTH_8)
wanted <<= 1;
break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_UHS_SDR50:
+ clksel = (priv->sdr_timing & 0xfff8ffff) |
+ (priv->ciu_div << 16);
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ clksel = (priv->ddr_timing & 0xfff8ffff) |
+ (priv->ciu_div << 16);
+ break;
default:
clksel = priv->sdr_timing;
}
diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c
index f9b333ff259e..bc51cef47c47 100644
--- a/drivers/mmc/host/dw_mmc-hi3798cv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c
@@ -23,6 +23,12 @@ struct hi3798cv200_priv {
struct clk *drive_clk;
};
+static unsigned long dw_mci_hi3798cv200_caps[] = {
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23
+};
+
static void dw_mci_hi3798cv200_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
struct hi3798cv200_priv *priv = host->priv;
@@ -160,6 +166,8 @@ disable_sample_clk:
}
static const struct dw_mci_drv_data hi3798cv200_data = {
+ .caps = dw_mci_hi3798cv200_caps,
+ .num_caps = ARRAY_SIZE(dw_mci_hi3798cv200_caps),
.init = dw_mci_hi3798cv200_init,
.set_ios = dw_mci_hi3798cv200_set_ios,
.execute_tuning = dw_mci_hi3798cv200_execute_tuning,
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 993386c9ea50..0c1efd5100b7 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -115,7 +115,7 @@
enum jz4740_mmc_version {
JZ_MMC_JZ4740,
- JZ_MMC_JZ4750,
+ JZ_MMC_JZ4725B,
JZ_MMC_JZ4780,
};
@@ -176,7 +176,7 @@ struct jz4740_mmc_host {
static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host,
uint32_t val)
{
- if (host->version >= JZ_MMC_JZ4750)
+ if (host->version >= JZ_MMC_JZ4725B)
return writel(val, host->base + JZ_REG_MMC_IMASK);
else
return writew(val, host->base + JZ_REG_MMC_IMASK);
@@ -1012,6 +1012,7 @@ static void jz4740_mmc_free_gpios(struct platform_device *pdev)
static const struct of_device_id jz4740_mmc_of_match[] = {
{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
+ { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
{ .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
{},
};
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 2cfec33178c1..abe253c262a2 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -294,7 +294,7 @@ static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_OFF:
vdd = 0;
- /* fall-through: */
+ /* fall through */
case MMC_POWER_UP:
if (!IS_ERR(mmc->supply.vmmc)) {
host->error = mmc_regulator_set_ocr(mmc,
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 1841d250e9e2..82bab35fff41 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -28,8 +28,7 @@
#include <linux/amba/bus.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -37,6 +36,7 @@
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/reset.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -46,41 +46,77 @@
#define DRIVER_NAME "mmci-pl18x"
+#ifdef CONFIG_DMA_ENGINE
+void mmci_variant_init(struct mmci_host *host);
+#else
+static inline void mmci_variant_init(struct mmci_host *host) {}
+#endif
+
+#ifdef CONFIG_MMC_STM32_SDMMC
+void sdmmc_variant_init(struct mmci_host *host);
+#else
+static inline void sdmmc_variant_init(struct mmci_host *host) {}
+#endif
+
static unsigned int fmax = 515633;
static struct variant_data variant_arm = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
+ .datactrl_blocksz = 11,
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.reversed_irq_handling = true,
.mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_ROD,
+ .init = mmci_variant_init,
};
static struct variant_data variant_arm_extended_fifo = {
.fifosize = 128 * 4,
.fifohalfsize = 64 * 4,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
+ .datactrl_blocksz = 11,
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_ROD,
+ .init = mmci_variant_init,
};
static struct variant_data variant_arm_extended_fifo_hwfc = {
.fifosize = 128 * 4,
.fifohalfsize = 64 * 4,
.clkreg_enable = MCI_ARM_HWFCEN,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
+ .datactrl_blocksz = 11,
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_ROD,
+ .init = mmci_variant_init,
};
static struct variant_data variant_u300 = {
@@ -88,7 +124,13 @@ static struct variant_data variant_u300 = {
.fifohalfsize = 8 * 4,
.clkreg_enable = MCI_ST_U300_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 16,
+ .datactrl_blocksz = 11,
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.pwrreg_powerup = MCI_PWR_ON,
@@ -97,8 +139,10 @@ static struct variant_data variant_u300 = {
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
.mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
+ .init = mmci_variant_init,
};
static struct variant_data variant_nomadik = {
@@ -106,7 +150,13 @@ static struct variant_data variant_nomadik = {
.fifohalfsize = 8 * 4,
.clkreg = MCI_CLK_ENABLE,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
+ .datactrl_blocksz = 11,
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -116,8 +166,10 @@ static struct variant_data variant_nomadik = {
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
.mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
+ .init = mmci_variant_init,
};
static struct variant_data variant_ux500 = {
@@ -127,7 +179,13 @@ static struct variant_data variant_ux500 = {
.clkreg_enable = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datalength_bits = 24,
+ .datactrl_blocksz = 11,
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -141,8 +199,10 @@ static struct variant_data variant_ux500 = {
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
.mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
+ .init = mmci_variant_init,
};
static struct variant_data variant_ux500v2 = {
@@ -152,8 +212,14 @@ static struct variant_data variant_ux500v2 = {
.clkreg_enable = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
.datalength_bits = 24,
+ .datactrl_blocksz = 11,
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -168,8 +234,10 @@ static struct variant_data variant_ux500v2 = {
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
.mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
+ .init = mmci_variant_init,
};
static struct variant_data variant_stm32 = {
@@ -179,7 +247,14 @@ static struct variant_data variant_stm32 = {
.clkreg_enable = MCI_ST_UX500_HWFCEN,
.clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
.clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
.datalength_bits = 24,
+ .datactrl_blocksz = 11,
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.st_sdio = true,
.st_clkdiv = true,
@@ -187,6 +262,26 @@ static struct variant_data variant_stm32 = {
.f_max = 48000000,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .init = mmci_variant_init,
+};
+
+static struct variant_data variant_stm32_sdmmc = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
+ .f_max = 208000000,
+ .stm32_clkdiv = true,
+ .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
+ .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
+ .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
+ .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
+ .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
+ .datactrl_first = true,
+ .datacnt_useless = true,
+ .datalength_bits = 25,
+ .datactrl_blocksz = 14,
+ .stm32_idmabsize_mask = GENMASK(12, 5),
+ .init = sdmmc_variant_init,
};
static struct variant_data variant_qcom = {
@@ -197,15 +292,22 @@ static struct variant_data variant_qcom = {
MCI_QCOM_CLK_SELECT_IN_FBCLK,
.clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
.datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
+ .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
+ .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
+ .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
+ .cmdreg_srsp = MCI_CPSM_RESPONSE,
.data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
.blksz_datactrl4 = true,
.datalength_bits = 24,
+ .datactrl_blocksz = 11,
+ .datactrl_dpsm_enable = MCI_DPSM_ENABLE,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 208000000,
.explicit_mclk_control = true,
.qcom_fifo = true,
.qcom_dml = true,
.mmcimask1 = true,
+ .irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_ROD,
.init = qcom_variant_init,
@@ -226,24 +328,6 @@ static int mmci_card_busy(struct mmc_host *mmc)
return busy;
}
-/*
- * Validate mmc prerequisites
- */
-static int mmci_validate_data(struct mmci_host *host,
- struct mmc_data *data)
-{
- if (!data)
- return 0;
-
- if (!is_power_of_2(data->blksz)) {
- dev_err(mmc_dev(host->mmc),
- "unsupported block size (%d bytes)\n", data->blksz);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void mmci_reg_delay(struct mmci_host *host)
{
/*
@@ -262,7 +346,7 @@ static void mmci_reg_delay(struct mmci_host *host)
/*
* This must be called with host->lock held
*/
-static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
+void mmci_write_clkreg(struct mmci_host *host, u32 clk)
{
if (host->clk_reg != clk) {
host->clk_reg = clk;
@@ -273,7 +357,7 @@ static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
/*
* This must be called with host->lock held
*/
-static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
+void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
{
if (host->pwr_reg != pwr) {
host->pwr_reg = pwr;
@@ -357,6 +441,135 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
mmci_write_clkreg(host, clk);
}
+void mmci_dma_release(struct mmci_host *host)
+{
+ if (host->ops && host->ops->dma_release)
+ host->ops->dma_release(host);
+
+ host->use_dma = false;
+}
+
+void mmci_dma_setup(struct mmci_host *host)
+{
+ if (!host->ops || !host->ops->dma_setup)
+ return;
+
+ if (host->ops->dma_setup(host))
+ return;
+
+ /* initialize pre request cookie */
+ host->next_cookie = 1;
+
+ host->use_dma = true;
+}
+
+/*
+ * Validate mmc prerequisites
+ */
+static int mmci_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ if (!data)
+ return 0;
+
+ if (!is_power_of_2(data->blksz)) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported block size (%d bytes)\n", data->blksz);
+ return -EINVAL;
+ }
+
+ if (host->ops && host->ops->validate_data)
+ return host->ops->validate_data(host, data);
+
+ return 0;
+}
+
+int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
+{
+ int err;
+
+ if (!host->ops || !host->ops->prep_data)
+ return 0;
+
+ err = host->ops->prep_data(host, data, next);
+
+ if (next && !err)
+ data->host_cookie = ++host->next_cookie < 0 ?
+ 1 : host->next_cookie;
+
+ return err;
+}
+
+void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
+ int err)
+{
+ if (host->ops && host->ops->unprep_data)
+ host->ops->unprep_data(host, data, err);
+
+ data->host_cookie = 0;
+}
+
+void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+{
+ WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
+
+ if (host->ops && host->ops->get_next_data)
+ host->ops->get_next_data(host, data);
+}
+
+int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
+{
+ struct mmc_data *data = host->data;
+ int ret;
+
+ if (!host->use_dma)
+ return -EINVAL;
+
+ ret = mmci_prep_data(host, data, false);
+ if (ret)
+ return ret;
+
+ if (!host->ops || !host->ops->dma_start)
+ return -EINVAL;
+
+ /* Okay, go for it. */
+ dev_vdbg(mmc_dev(host->mmc),
+ "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
+ data->sg_len, data->blksz, data->blocks, data->flags);
+
+ host->ops->dma_start(host, &datactrl);
+
+ /* Trigger the DMA transfer */
+ mmci_write_datactrlreg(host, datactrl);
+
+ /*
+ * Let the MMCI say when the data is ended and it's time
+ * to fire next DMA request. When that happens, MMCI will
+ * call mmci_data_end()
+ */
+ writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
+ host->base + MMCIMASK0);
+ return 0;
+}
+
+void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+{
+ if (!host->use_dma)
+ return;
+
+ if (host->ops && host->ops->dma_finalize)
+ host->ops->dma_finalize(host, data);
+}
+
+void mmci_dma_error(struct mmci_host *host)
+{
+ if (!host->use_dma)
+ return;
+
+ if (host->ops && host->ops->dma_error)
+ host->ops->dma_error(host);
+}
+
static void
mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
{
@@ -378,7 +591,7 @@ static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
if (host->singleirq) {
unsigned int mask0 = readl(base + MMCIMASK0);
- mask0 &= ~MCI_IRQ1MASK;
+ mask0 &= ~variant->irq_pio_mask;
mask0 |= mask;
writel(mask0, base + MMCIMASK0);
@@ -415,31 +628,50 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
* no custom DMA interfaces are supported.
*/
#ifdef CONFIG_DMA_ENGINE
-static void mmci_dma_setup(struct mmci_host *host)
+struct mmci_dmae_next {
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan;
+};
+
+struct mmci_dmae_priv {
+ struct dma_chan *cur;
+ struct dma_chan *rx_channel;
+ struct dma_chan *tx_channel;
+ struct dma_async_tx_descriptor *desc_current;
+ struct mmci_dmae_next next_data;
+};
+
+int mmci_dmae_setup(struct mmci_host *host)
{
const char *rxname, *txname;
+ struct mmci_dmae_priv *dmae;
- host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
- host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
+ dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
+ if (!dmae)
+ return -ENOMEM;
- /* initialize pre request cookie */
- host->next_data.cookie = 1;
+ host->dma_priv = dmae;
+
+ dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
+ "rx");
+ dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
+ "tx");
/*
* If only an RX channel is specified, the driver will
* attempt to use it bidirectionally, however if it is
* is specified but cannot be located, DMA will be disabled.
*/
- if (host->dma_rx_channel && !host->dma_tx_channel)
- host->dma_tx_channel = host->dma_rx_channel;
+ if (dmae->rx_channel && !dmae->tx_channel)
+ dmae->tx_channel = dmae->rx_channel;
- if (host->dma_rx_channel)
- rxname = dma_chan_name(host->dma_rx_channel);
+ if (dmae->rx_channel)
+ rxname = dma_chan_name(dmae->rx_channel);
else
rxname = "none";
- if (host->dma_tx_channel)
- txname = dma_chan_name(host->dma_tx_channel);
+ if (dmae->tx_channel)
+ txname = dma_chan_name(dmae->tx_channel);
else
txname = "none";
@@ -450,66 +682,84 @@ static void mmci_dma_setup(struct mmci_host *host)
* Limit the maximum segment size in any SG entry according to
* the parameters of the DMA engine device.
*/
- if (host->dma_tx_channel) {
- struct device *dev = host->dma_tx_channel->device->dev;
+ if (dmae->tx_channel) {
+ struct device *dev = dmae->tx_channel->device->dev;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
if (max_seg_size < host->mmc->max_seg_size)
host->mmc->max_seg_size = max_seg_size;
}
- if (host->dma_rx_channel) {
- struct device *dev = host->dma_rx_channel->device->dev;
+ if (dmae->rx_channel) {
+ struct device *dev = dmae->rx_channel->device->dev;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
if (max_seg_size < host->mmc->max_seg_size)
host->mmc->max_seg_size = max_seg_size;
}
- if (host->ops && host->ops->dma_setup)
- host->ops->dma_setup(host);
+ if (!dmae->tx_channel || !dmae->rx_channel) {
+ mmci_dmae_release(host);
+ return -EINVAL;
+ }
+
+ return 0;
}
/*
* This is used in or so inline it
* so it can be discarded.
*/
-static inline void mmci_dma_release(struct mmci_host *host)
+void mmci_dmae_release(struct mmci_host *host)
{
- if (host->dma_rx_channel)
- dma_release_channel(host->dma_rx_channel);
- if (host->dma_tx_channel)
- dma_release_channel(host->dma_tx_channel);
- host->dma_rx_channel = host->dma_tx_channel = NULL;
-}
+ struct mmci_dmae_priv *dmae = host->dma_priv;
-static void mmci_dma_data_error(struct mmci_host *host)
-{
- dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
- dmaengine_terminate_all(host->dma_current);
- host->dma_in_progress = false;
- host->dma_current = NULL;
- host->dma_desc_current = NULL;
- host->data->host_cookie = 0;
+ if (dmae->rx_channel)
+ dma_release_channel(dmae->rx_channel);
+ if (dmae->tx_channel)
+ dma_release_channel(dmae->tx_channel);
+ dmae->rx_channel = dmae->tx_channel = NULL;
}
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
+ struct mmci_dmae_priv *dmae = host->dma_priv;
struct dma_chan *chan;
if (data->flags & MMC_DATA_READ)
- chan = host->dma_rx_channel;
+ chan = dmae->rx_channel;
else
- chan = host->dma_tx_channel;
+ chan = dmae->tx_channel;
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
-static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+void mmci_dmae_error(struct mmci_host *host)
{
+ struct mmci_dmae_priv *dmae = host->dma_priv;
+
+ if (!dma_inprogress(host))
+ return;
+
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(dmae->cur);
+ host->dma_in_progress = false;
+ dmae->cur = NULL;
+ dmae->desc_current = NULL;
+ host->data->host_cookie = 0;
+
+ mmci_dma_unmap(host, host->data);
+}
+
+void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
+{
+ struct mmci_dmae_priv *dmae = host->dma_priv;
u32 status;
int i;
+ if (!dma_inprogress(host))
+ return;
+
/* Wait up to 1ms for the DMA to complete */
for (i = 0; ; i++) {
status = readl(host->base + MMCISTATUS);
@@ -525,13 +775,12 @@ static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
* contiguous buffers. On TX, we'll get a FIFO underrun error.
*/
if (status & MCI_RXDATAAVLBLMASK) {
- mmci_dma_data_error(host);
+ mmci_dma_error(host);
if (!data->error)
data->error = -EIO;
- }
-
- if (!data->host_cookie)
+ } else if (!data->host_cookie) {
mmci_dma_unmap(host, data);
+ }
/*
* Use of DMA with scatter-gather is impossible.
@@ -543,15 +792,16 @@ static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
}
host->dma_in_progress = false;
- host->dma_current = NULL;
- host->dma_desc_current = NULL;
+ dmae->cur = NULL;
+ dmae->desc_current = NULL;
}
/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
-static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
struct dma_chan **dma_chan,
struct dma_async_tx_descriptor **dma_desc)
{
+ struct mmci_dmae_priv *dmae = host->dma_priv;
struct variant_data *variant = host->variant;
struct dma_slave_config conf = {
.src_addr = host->phybase + MMCIFIFO,
@@ -570,10 +820,10 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_DEV_TO_MEM;
- chan = host->dma_rx_channel;
+ chan = dmae->rx_channel;
} else {
conf.direction = DMA_MEM_TO_DEV;
- chan = host->dma_tx_channel;
+ chan = dmae->tx_channel;
}
/* If there's no DMA channel, fall back to PIO */
@@ -610,160 +860,137 @@ static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
return -ENOMEM;
}
-static inline int mmci_dma_prep_data(struct mmci_host *host,
- struct mmc_data *data)
+int mmci_dmae_prep_data(struct mmci_host *host,
+ struct mmc_data *data,
+ bool next)
{
+ struct mmci_dmae_priv *dmae = host->dma_priv;
+ struct mmci_dmae_next *nd = &dmae->next_data;
+
+ if (!host->use_dma)
+ return -EINVAL;
+
+ if (next)
+ return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
/* Check if next job is already prepared. */
- if (host->dma_current && host->dma_desc_current)
+ if (dmae->cur && dmae->desc_current)
return 0;
/* No job were prepared thus do it now. */
- return __mmci_dma_prep_data(host, data, &host->dma_current,
- &host->dma_desc_current);
-}
-
-static inline int mmci_dma_prep_next(struct mmci_host *host,
- struct mmc_data *data)
-{
- struct mmci_host_next *nd = &host->next_data;
- return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
+ return _mmci_dmae_prep_data(host, data, &dmae->cur,
+ &dmae->desc_current);
}
-static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
{
- int ret;
+ struct mmci_dmae_priv *dmae = host->dma_priv;
struct mmc_data *data = host->data;
- ret = mmci_dma_prep_data(host, host->data);
- if (ret)
- return ret;
-
- /* Okay, go for it. */
- dev_vdbg(mmc_dev(host->mmc),
- "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
- data->sg_len, data->blksz, data->blocks, data->flags);
host->dma_in_progress = true;
- dmaengine_submit(host->dma_desc_current);
- dma_async_issue_pending(host->dma_current);
+ dmaengine_submit(dmae->desc_current);
+ dma_async_issue_pending(dmae->cur);
if (host->variant->qcom_dml)
dml_start_xfer(host, data);
- datactrl |= MCI_DPSM_DMAENABLE;
+ *datactrl |= MCI_DPSM_DMAENABLE;
- /* Trigger the DMA transfer */
- mmci_write_datactrlreg(host, datactrl);
-
- /*
- * Let the MMCI say when the data is ended and it's time
- * to fire next DMA request. When that happens, MMCI will
- * call mmci_data_end()
- */
- writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
- host->base + MMCIMASK0);
return 0;
}
-static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
{
- struct mmci_host_next *next = &host->next_data;
-
- WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
- WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
-
- host->dma_desc_current = next->dma_desc;
- host->dma_current = next->dma_chan;
- next->dma_desc = NULL;
- next->dma_chan = NULL;
-}
+ struct mmci_dmae_priv *dmae = host->dma_priv;
+ struct mmci_dmae_next *next = &dmae->next_data;
-static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct mmci_host *host = mmc_priv(mmc);
- struct mmc_data *data = mrq->data;
- struct mmci_host_next *nd = &host->next_data;
-
- if (!data)
+ if (!host->use_dma)
return;
- BUG_ON(data->host_cookie);
+ WARN_ON(!data->host_cookie && (next->desc || next->chan));
- if (mmci_validate_data(host, data))
- return;
-
- if (!mmci_dma_prep_next(host, data))
- data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
+ dmae->desc_current = next->desc;
+ dmae->cur = next->chan;
+ next->desc = NULL;
+ next->chan = NULL;
}
-static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
- int err)
+void mmci_dmae_unprep_data(struct mmci_host *host,
+ struct mmc_data *data, int err)
+
{
- struct mmci_host *host = mmc_priv(mmc);
- struct mmc_data *data = mrq->data;
+ struct mmci_dmae_priv *dmae = host->dma_priv;
- if (!data || !data->host_cookie)
+ if (!host->use_dma)
return;
mmci_dma_unmap(host, data);
if (err) {
- struct mmci_host_next *next = &host->next_data;
+ struct mmci_dmae_next *next = &dmae->next_data;
struct dma_chan *chan;
if (data->flags & MMC_DATA_READ)
- chan = host->dma_rx_channel;
+ chan = dmae->rx_channel;
else
- chan = host->dma_tx_channel;
+ chan = dmae->tx_channel;
dmaengine_terminate_all(chan);
- if (host->dma_desc_current == next->dma_desc)
- host->dma_desc_current = NULL;
+ if (dmae->desc_current == next->desc)
+ dmae->desc_current = NULL;
- if (host->dma_current == next->dma_chan) {
+ if (dmae->cur == next->chan) {
host->dma_in_progress = false;
- host->dma_current = NULL;
+ dmae->cur = NULL;
}
- next->dma_desc = NULL;
- next->dma_chan = NULL;
- data->host_cookie = 0;
+ next->desc = NULL;
+ next->chan = NULL;
}
}
-#else
-/* Blank functions if the DMA engine is not available */
-static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
-{
-}
-static inline void mmci_dma_setup(struct mmci_host *host)
-{
-}
+static struct mmci_host_ops mmci_variant_ops = {
+ .prep_data = mmci_dmae_prep_data,
+ .unprep_data = mmci_dmae_unprep_data,
+ .get_next_data = mmci_dmae_get_next_data,
+ .dma_setup = mmci_dmae_setup,
+ .dma_release = mmci_dmae_release,
+ .dma_start = mmci_dmae_start,
+ .dma_finalize = mmci_dmae_finalize,
+ .dma_error = mmci_dmae_error,
+};
-static inline void mmci_dma_release(struct mmci_host *host)
+void mmci_variant_init(struct mmci_host *host)
{
+ host->ops = &mmci_variant_ops;
}
+#endif
-static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
+static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
-}
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
-static inline void mmci_dma_finalize(struct mmci_host *host,
- struct mmc_data *data)
-{
-}
+ if (!data)
+ return;
-static inline void mmci_dma_data_error(struct mmci_host *host)
-{
+ WARN_ON(data->host_cookie);
+
+ if (mmci_validate_data(host, data))
+ return;
+
+ mmci_prep_data(host, data, true);
}
-static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
{
- return -ENOSYS;
-}
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
-#define mmci_pre_request NULL
-#define mmci_post_request NULL
+ if (!data || !data->host_cookie)
+ return;
-#endif
+ mmci_unprep_data(host, data, err);
+}
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
{
@@ -793,11 +1020,11 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
BUG_ON(1 << blksz_bits != data->blksz);
if (variant->blksz_datactrl16)
- datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
+ datactrl = variant->datactrl_dpsm_enable | (data->blksz << 16);
else if (variant->blksz_datactrl4)
- datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
+ datactrl = variant->datactrl_dpsm_enable | (data->blksz << 4);
else
- datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
+ datactrl = variant->datactrl_dpsm_enable | blksz_bits << 4;
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
@@ -831,7 +1058,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
* Attempt to use DMA operation mode, if this
* should fail, fall back to PIO mode
*/
- if (!mmci_dma_start_data(host, datactrl))
+ if (!mmci_dma_start(host, datactrl))
return;
/* IRQ mode, map the SG list for CPU reading/writing */
@@ -868,16 +1095,19 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
cmd->opcode, cmd->arg, cmd->flags);
- if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
+ if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
writel(0, base + MMCICOMMAND);
mmci_reg_delay(host);
}
- c |= cmd->opcode | MCI_CPSM_ENABLE;
+ c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136)
- c |= MCI_CPSM_LONGRSP;
- c |= MCI_CPSM_RESPONSE;
+ c |= host->variant->cmdreg_lrsp_crc;
+ else if (cmd->flags & MMC_RSP_CRC)
+ c |= host->variant->cmdreg_srsp_crc;
+ else
+ c |= host->variant->cmdreg_srsp;
}
if (/*interrupt*/0)
c |= MCI_CPSM_INTERRUPT;
@@ -895,21 +1125,22 @@ static void
mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
unsigned int status)
{
+ unsigned int status_err;
+
/* Make sure we have data to handle */
if (!data)
return;
/* First check for errors */
- if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
- host->variant->start_err |
- MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
+ status_err = status & (host->variant->start_err |
+ MCI_DATACRCFAIL | MCI_DATATIMEOUT |
+ MCI_TXUNDERRUN | MCI_RXOVERRUN);
+
+ if (status_err) {
u32 remain, success;
/* Terminate the DMA transfer */
- if (dma_inprogress(host)) {
- mmci_dma_data_error(host);
- mmci_dma_unmap(host, data);
- }
+ mmci_dma_error(host);
/*
* Calculate how far we are into the transfer. Note that
@@ -918,22 +1149,26 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
* can be as much as a FIFO-worth of data ahead. This
* matters for FIFO overruns only.
*/
- remain = readl(host->base + MMCIDATACNT);
- success = data->blksz * data->blocks - remain;
+ if (!host->variant->datacnt_useless) {
+ remain = readl(host->base + MMCIDATACNT);
+ success = data->blksz * data->blocks - remain;
+ } else {
+ success = 0;
+ }
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
- status, success);
- if (status & MCI_DATACRCFAIL) {
+ status_err, success);
+ if (status_err & MCI_DATACRCFAIL) {
/* Last block was not successful */
success -= 1;
data->error = -EILSEQ;
- } else if (status & MCI_DATATIMEOUT) {
+ } else if (status_err & MCI_DATATIMEOUT) {
data->error = -ETIMEDOUT;
- } else if (status & MCI_STARTBITERR) {
+ } else if (status_err & MCI_STARTBITERR) {
data->error = -ECOMM;
- } else if (status & MCI_TXUNDERRUN) {
+ } else if (status_err & MCI_TXUNDERRUN) {
data->error = -EIO;
- } else if (status & MCI_RXOVERRUN) {
+ } else if (status_err & MCI_RXOVERRUN) {
if (success > host->variant->fifosize)
success -= host->variant->fifosize;
else
@@ -947,8 +1182,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
if (status & MCI_DATAEND || data->error) {
- if (dma_inprogress(host))
- mmci_dma_finalize(host, data);
+ mmci_dma_finalize(host, data);
+
mmci_stop_data(host);
if (!data->error)
@@ -1055,16 +1290,15 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
if ((!sbc && !cmd->data) || cmd->error) {
if (host->data) {
/* Terminate the DMA transfer */
- if (dma_inprogress(host)) {
- mmci_dma_data_error(host);
- mmci_dma_unmap(host, host->data);
- }
+ mmci_dma_error(host);
+
mmci_stop_data(host);
}
mmci_request_end(host, host->mrq);
} else if (sbc) {
mmci_start_command(host, host->mrq->cmd, 0);
- } else if (!(cmd->data->flags & MMC_DATA_READ)) {
+ } else if (!host->variant->datactrl_first &&
+ !(cmd->data->flags & MMC_DATA_READ)) {
mmci_start_data(host, cmd->data);
}
}
@@ -1264,7 +1498,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
if (status & host->mask1_reg)
mmci_pio_irq(irq, dev_id);
- status &= ~MCI_IRQ1MASK;
+ status &= ~host->variant->irq_pio_mask;
}
/*
@@ -1328,7 +1562,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (mrq->data)
mmci_get_next_data(host, mrq->data);
- if (mrq->data && mrq->data->flags & MMC_DATA_READ)
+ if (mrq->data &&
+ (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
mmci_start_data(host, mrq->data);
if (mrq->sbc)
@@ -1438,8 +1673,16 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_lock_irqsave(&host->lock, flags);
- mmci_set_clkreg(host, ios->clock);
- mmci_write_pwrreg(host, pwr);
+ if (host->ops && host->ops->set_clkreg)
+ host->ops->set_clkreg(host, ios->clock);
+ else
+ mmci_set_clkreg(host, ios->clock);
+
+ if (host->ops && host->ops->set_pwrreg)
+ host->ops->set_pwrreg(host, pwr);
+ else
+ mmci_write_pwrreg(host, pwr);
+
mmci_reg_delay(host);
spin_unlock_irqrestore(&host->lock, flags);
@@ -1518,6 +1761,12 @@ static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
host->pwr_reg_add |= MCI_ST_CMDDIREN;
if (of_get_property(np, "st,sig-pin-fbclk", NULL))
host->pwr_reg_add |= MCI_ST_FBCLKEN;
+ if (of_get_property(np, "st,sig-dir", NULL))
+ host->pwr_reg_add |= MCI_STM32_DIRPOL;
+ if (of_get_property(np, "st,neg-edge", NULL))
+ host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
+ if (of_get_property(np, "st,use-ckin", NULL))
+ host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
@@ -1644,6 +1893,8 @@ static int mmci_probe(struct amba_device *dev,
*/
if (variant->st_clkdiv)
mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
+ else if (variant->stm32_clkdiv)
+ mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
else if (variant->explicit_mclk_control)
mmc->f_min = clk_round_rate(host->clk, 100000);
else
@@ -1665,6 +1916,12 @@ static int mmci_probe(struct amba_device *dev,
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
+ host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
+ if (IS_ERR(host->rst)) {
+ ret = PTR_ERR(host->rst);
+ goto clk_disable;
+ }
+
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);
if (ret)
@@ -1675,13 +1932,6 @@ static int mmci_probe(struct amba_device *dev,
else if (plat->ocr_mask)
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
- /* DT takes precedence over platform data. */
- if (!np) {
- if (!plat->cd_invert)
- mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
- mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
- }
-
/* We support these capabilities. */
mmc->caps |= MMC_CAP_CMD23;
@@ -1727,13 +1977,13 @@ static int mmci_probe(struct amba_device *dev,
/*
* Block size can be up to 2048 bytes, but must be a power of two.
*/
- mmc->max_blk_size = 1 << 11;
+ mmc->max_blk_size = 1 << variant->datactrl_blocksz;
/*
* Limit the number of blocks transferred so that we don't overflow
* the maximum request size.
*/
- mmc->max_blk_count = mmc->max_req_size >> 11;
+ mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
spin_lock_init(&host->lock);
@@ -1749,30 +1999,16 @@ static int mmci_probe(struct amba_device *dev,
* - not using DT but using a descriptor table, or
* - using a table of descriptors ALONGSIDE DT, or
* look up these descriptors named "cd" and "wp" right here, fail
- * silently of these do not exist and proceed to try platform data
+ * silently of these do not exist
*/
if (!np) {
ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
- if (ret < 0) {
- if (ret == -EPROBE_DEFER)
- goto clk_disable;
- else if (gpio_is_valid(plat->gpio_cd)) {
- ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
- if (ret)
- goto clk_disable;
- }
- }
+ if (ret == -EPROBE_DEFER)
+ goto clk_disable;
ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
- if (ret < 0) {
- if (ret == -EPROBE_DEFER)
- goto clk_disable;
- else if (gpio_is_valid(plat->gpio_wp)) {
- ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
- if (ret)
- goto clk_disable;
- }
- }
+ if (ret == -EPROBE_DEFER)
+ goto clk_disable;
}
ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
@@ -1789,7 +2025,7 @@ static int mmci_probe(struct amba_device *dev,
goto clk_disable;
}
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
amba_set_drvdata(dev, mmc);
@@ -1876,7 +2112,8 @@ static void mmci_restore(struct mmci_host *host)
writel(host->datactrl_reg, host->base + MMCIDATACTRL);
writel(host->pwr_reg, host->base + MMCIPOWER);
}
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ writel(MCI_IRQENABLE | host->variant->start_err,
+ host->base + MMCIMASK0);
mmci_reg_delay(host);
spin_unlock_irqrestore(&host->lock, flags);
@@ -1971,6 +2208,11 @@ static const struct amba_id mmci_ids[] = {
.mask = 0x00ffffff,
.data = &variant_stm32,
},
+ {
+ .id = 0x10153180,
+ .mask = 0xf0ffffff,
+ .data = &variant_stm32_sdmmc,
+ },
/* Qualcomm variants */
{
.id = 0x00051180,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 517591d219e9..550dd3914461 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -23,6 +23,14 @@
#define MCI_ST_DATA31DIREN (1 << 5)
#define MCI_ST_FBCLKEN (1 << 7)
#define MCI_ST_DATA74DIREN (1 << 8)
+/*
+ * The STM32 sdmmc does not have PWR_UP/OD/ROD
+ * and uses the power register for
+ */
+#define MCI_STM32_PWR_CYC 0x02
+#define MCI_STM32_VSWITCH BIT(2)
+#define MCI_STM32_VSWITCHEN BIT(3)
+#define MCI_STM32_DIRPOL BIT(4)
#define MMCICLOCK 0x004
#define MCI_CLK_ENABLE (1 << 8)
@@ -50,6 +58,19 @@
#define MCI_QCOM_CLK_SELECT_IN_FBCLK BIT(15)
#define MCI_QCOM_CLK_SELECT_IN_DDR_MODE (BIT(14) | BIT(15))
+/* Modified on STM32 sdmmc */
+#define MCI_STM32_CLK_CLKDIV_MSK GENMASK(9, 0)
+#define MCI_STM32_CLK_WIDEBUS_4 BIT(14)
+#define MCI_STM32_CLK_WIDEBUS_8 BIT(15)
+#define MCI_STM32_CLK_NEGEDGE BIT(16)
+#define MCI_STM32_CLK_HWFCEN BIT(17)
+#define MCI_STM32_CLK_DDR BIT(18)
+#define MCI_STM32_CLK_BUSSPEED BIT(19)
+#define MCI_STM32_CLK_SEL_MSK GENMASK(21, 20)
+#define MCI_STM32_CLK_SELCK (0 << 20)
+#define MCI_STM32_CLK_SELCKIN (1 << 20)
+#define MCI_STM32_CLK_SELFBCK (2 << 20)
+
#define MMCIARGUMENT 0x008
/* The command register controls the Command Path State Machine (CPSM) */
@@ -72,6 +93,15 @@
#define MCI_CPSM_QCOM_CCSDISABLE BIT(15)
#define MCI_CPSM_QCOM_AUTO_CMD19 BIT(16)
#define MCI_CPSM_QCOM_AUTO_CMD21 BIT(21)
+/* Command register in STM32 sdmmc versions */
+#define MCI_CPSM_STM32_CMDTRANS BIT(6)
+#define MCI_CPSM_STM32_CMDSTOP BIT(7)
+#define MCI_CPSM_STM32_WAITRESP_MASK GENMASK(9, 8)
+#define MCI_CPSM_STM32_NORSP (0 << 8)
+#define MCI_CPSM_STM32_SRSP_CRC (1 << 8)
+#define MCI_CPSM_STM32_SRSP (2 << 8)
+#define MCI_CPSM_STM32_LRSP_CRC (3 << 8)
+#define MCI_CPSM_STM32_ENABLE BIT(12)
#define MMCIRESPCMD 0x010
#define MMCIRESPONSE0 0x014
@@ -130,6 +160,8 @@
#define MCI_ST_SDIOIT (1 << 22)
#define MCI_ST_CEATAEND (1 << 23)
#define MCI_ST_CARDBUSY (1 << 24)
+/* Extended status bits for the STM32 variants */
+#define MCI_STM32_BUSYD0 BIT(20)
#define MMCICLEAR 0x038
#define MCI_CMDCRCFAILCLR (1 << 0)
@@ -175,21 +207,45 @@
#define MCI_ST_SDIOITMASK (1 << 22)
#define MCI_ST_CEATAENDMASK (1 << 23)
#define MCI_ST_BUSYENDMASK (1 << 24)
+/* Extended status bits for the STM32 variants */
+#define MCI_STM32_BUSYD0ENDMASK BIT(21)
#define MMCIMASK1 0x040
#define MMCIFIFOCNT 0x048
#define MMCIFIFO 0x080 /* to 0x0bc */
+/* STM32 sdmmc registers for IDMA (Internal DMA) */
+#define MMCI_STM32_IDMACTRLR 0x050
+#define MMCI_STM32_IDMAEN BIT(0)
+#define MMCI_STM32_IDMALLIEN BIT(1)
+
+#define MMCI_STM32_IDMABSIZER 0x054
+#define MMCI_STM32_IDMABNDT_SHIFT 5
+#define MMCI_STM32_IDMABNDT_MASK GENMASK(12, 5)
+
+#define MMCI_STM32_IDMABASE0R 0x058
+
+#define MMCI_STM32_IDMALAR 0x64
+#define MMCI_STM32_IDMALA_MASK GENMASK(13, 0)
+#define MMCI_STM32_ABR BIT(29)
+#define MMCI_STM32_ULS BIT(30)
+#define MMCI_STM32_ULA BIT(31)
+
+#define MMCI_STM32_IDMABAR 0x68
+
#define MCI_IRQENABLE \
- (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
- MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
- MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_STARTBITERRMASK)
+ (MCI_CMDCRCFAILMASK | MCI_DATACRCFAILMASK | MCI_CMDTIMEOUTMASK | \
+ MCI_DATATIMEOUTMASK | MCI_TXUNDERRUNMASK | MCI_RXOVERRUNMASK | \
+ MCI_CMDRESPENDMASK | MCI_CMDSENTMASK)
/* These interrupts are directed to IRQ1 when two IRQ lines are available */
-#define MCI_IRQ1MASK \
+#define MCI_IRQ_PIO_MASK \
(MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \
MCI_TXFIFOHALFEMPTYMASK)
+#define MCI_IRQ_PIO_STM32_MASK \
+ (MCI_RXFIFOHALFFULLMASK | MCI_TXFIFOHALFEMPTYMASK)
+
#define NR_SG 128
#define MMCI_PINCTRL_STATE_OPENDRAIN "opendrain"
@@ -204,6 +260,10 @@ struct mmci_host;
* @clkreg_enable: enable value for MMCICLOCK register
* @clkreg_8bit_bus_enable: enable value for 8 bit bus
* @clkreg_neg_edge_enable: enable value for inverted data/cmd output
+ * @cmdreg_cpsm_enable: enable value for CPSM
+ * @cmdreg_lrsp_crc: enable value for long response with crc
+ * @cmdreg_srsp_crc: enable value for short response with crc
+ * @cmdreg_srsp: enable value for short response without crc
* @datalength_bits: number of bits in the MMCIDATALENGTH register
* @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
* is asserted (likewise for RX)
@@ -212,11 +272,17 @@ struct mmci_host;
* @data_cmd_enable: enable value for data commands.
* @st_sdio: enable ST specific SDIO logic
* @st_clkdiv: true if using a ST-specific clock divider algorithm
+ * @stm32_clkdiv: true if using a STM32-specific clock divider algorithm
* @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
* @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
* register
* @datactrl_mask_sdio: SDIO enable mask in datactrl register
+ * @datactrl_blksz: block size in power of two
+ * @datactrl_dpsm_enable: enable value for DPSM
+ * @datactrl_first: true if data must be setup before send command
+ * @datacnt_useless: true if you could not use datacnt register to read
+ * remaining data
* @pwrreg_powerup: power up value for MMCIPOWER register
* @f_max: maximum clk frequency supported by the controller.
* @signal_direction: input/out direction of bus signals can be indicated
@@ -233,53 +299,75 @@ struct mmci_host;
* @qcom_dml: enables qcom specific dma glue for dma transfers.
* @reversed_irq_handling: handle data irq before cmd irq.
* @mmcimask1: true if variant have a MMCIMASK1 register.
+ * @irq_pio_mask: bitmask used to manage interrupt pio transfert in mmcimask
+ * register
* @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS
* register.
* @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register
+ * @dma_lli: true if variant has dma link list feature.
+ * @stm32_idmabsize_mask: stm32 sdmmc idma buffer size.
*/
struct variant_data {
unsigned int clkreg;
unsigned int clkreg_enable;
unsigned int clkreg_8bit_bus_enable;
unsigned int clkreg_neg_edge_enable;
+ unsigned int cmdreg_cpsm_enable;
+ unsigned int cmdreg_lrsp_crc;
+ unsigned int cmdreg_srsp_crc;
+ unsigned int cmdreg_srsp;
unsigned int datalength_bits;
unsigned int fifosize;
unsigned int fifohalfsize;
unsigned int data_cmd_enable;
unsigned int datactrl_mask_ddrmode;
unsigned int datactrl_mask_sdio;
- bool st_sdio;
- bool st_clkdiv;
- bool blksz_datactrl16;
- bool blksz_datactrl4;
+ unsigned int datactrl_blocksz;
+ unsigned int datactrl_dpsm_enable;
+ u8 datactrl_first:1;
+ u8 datacnt_useless:1;
+ u8 st_sdio:1;
+ u8 st_clkdiv:1;
+ u8 stm32_clkdiv:1;
+ u8 blksz_datactrl16:1;
+ u8 blksz_datactrl4:1;
u32 pwrreg_powerup;
u32 f_max;
- bool signal_direction;
- bool pwrreg_clkgate;
- bool busy_detect;
+ u8 signal_direction:1;
+ u8 pwrreg_clkgate:1;
+ u8 busy_detect:1;
u32 busy_dpsm_flag;
u32 busy_detect_flag;
u32 busy_detect_mask;
- bool pwrreg_nopower;
- bool explicit_mclk_control;
- bool qcom_fifo;
- bool qcom_dml;
- bool reversed_irq_handling;
- bool mmcimask1;
+ u8 pwrreg_nopower:1;
+ u8 explicit_mclk_control:1;
+ u8 qcom_fifo:1;
+ u8 qcom_dml:1;
+ u8 reversed_irq_handling:1;
+ u8 mmcimask1:1;
+ unsigned int irq_pio_mask;
u32 start_err;
u32 opendrain;
+ u8 dma_lli:1;
+ u32 stm32_idmabsize_mask;
void (*init)(struct mmci_host *host);
};
/* mmci variant callbacks */
struct mmci_host_ops {
- void (*dma_setup)(struct mmci_host *host);
-};
-
-struct mmci_host_next {
- struct dma_async_tx_descriptor *dma_desc;
- struct dma_chan *dma_chan;
- s32 cookie;
+ int (*validate_data)(struct mmci_host *host, struct mmc_data *data);
+ int (*prep_data)(struct mmci_host *host, struct mmc_data *data,
+ bool next);
+ void (*unprep_data)(struct mmci_host *host, struct mmc_data *data,
+ int err);
+ void (*get_next_data)(struct mmci_host *host, struct mmc_data *data);
+ int (*dma_setup)(struct mmci_host *host);
+ void (*dma_release)(struct mmci_host *host);
+ int (*dma_start)(struct mmci_host *host, unsigned int *datactrl);
+ void (*dma_finalize)(struct mmci_host *host, struct mmc_data *data);
+ void (*dma_error)(struct mmci_host *host);
+ void (*set_clkreg)(struct mmci_host *host, unsigned int desired);
+ void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr);
};
struct mmci_host {
@@ -290,7 +378,9 @@ struct mmci_host {
struct mmc_data *data;
struct mmc_host *mmc;
struct clk *clk;
- bool singleirq;
+ u8 singleirq:1;
+
+ struct reset_control *rst;
spinlock_t lock;
@@ -301,10 +391,11 @@ struct mmci_host {
u32 pwr_reg;
u32 pwr_reg_add;
u32 clk_reg;
+ u32 clk_reg_add;
u32 datactrl_reg;
u32 busy_status;
u32 mask1_reg;
- bool vqmmc_enabled;
+ u8 vqmmc_enabled:1;
struct mmci_platform_data *plat;
struct mmci_host_ops *ops;
struct variant_data *variant;
@@ -323,18 +414,25 @@ struct mmci_host {
unsigned int size;
int (*get_rx_fifocnt)(struct mmci_host *h, u32 status, int remain);
-#ifdef CONFIG_DMA_ENGINE
- /* DMA stuff */
- struct dma_chan *dma_current;
- struct dma_chan *dma_rx_channel;
- struct dma_chan *dma_tx_channel;
- struct dma_async_tx_descriptor *dma_desc_current;
- struct mmci_host_next next_data;
- bool dma_in_progress;
+ u8 use_dma:1;
+ u8 dma_in_progress:1;
+ void *dma_priv;
-#define dma_inprogress(host) ((host)->dma_in_progress)
-#else
-#define dma_inprogress(host) (0)
-#endif
+ s32 next_cookie;
};
+#define dma_inprogress(host) ((host)->dma_in_progress)
+
+void mmci_write_clkreg(struct mmci_host *host, u32 clk);
+void mmci_write_pwrreg(struct mmci_host *host, u32 pwr);
+
+int mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
+ bool next);
+void mmci_dmae_unprep_data(struct mmci_host *host, struct mmc_data *data,
+ int err);
+void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data);
+int mmci_dmae_setup(struct mmci_host *host);
+void mmci_dmae_release(struct mmci_host *host);
+int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl);
+void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data);
+void mmci_dmae_error(struct mmci_host *host);
diff --git a/drivers/mmc/host/mmci_qcom_dml.c b/drivers/mmc/host/mmci_qcom_dml.c
index be3fab5db83f..25d0a75533ea 100644
--- a/drivers/mmc/host/mmci_qcom_dml.c
+++ b/drivers/mmc/host/mmci_qcom_dml.c
@@ -119,19 +119,23 @@ static int of_get_dml_pipe_index(struct device_node *np, const char *name)
}
/* Initialize the dml hardware connected to SD Card controller */
-static void qcom_dma_setup(struct mmci_host *host)
+static int qcom_dma_setup(struct mmci_host *host)
{
u32 config;
void __iomem *base;
int consumer_id, producer_id;
struct device_node *np = host->mmc->parent->of_node;
+ if (mmci_dmae_setup(host))
+ return -EINVAL;
+
consumer_id = of_get_dml_pipe_index(np, "tx");
producer_id = of_get_dml_pipe_index(np, "rx");
if (producer_id < 0 || consumer_id < 0) {
host->variant->qcom_dml = false;
- return;
+ mmci_dmae_release(host);
+ return -EINVAL;
}
base = host->base + DML_OFFSET;
@@ -175,10 +179,19 @@ static void qcom_dma_setup(struct mmci_host *host)
/* Make sure dml initialization is finished */
mb();
+
+ return 0;
}
static struct mmci_host_ops qcom_variant_ops = {
+ .prep_data = mmci_dmae_prep_data,
+ .unprep_data = mmci_dmae_unprep_data,
+ .get_next_data = mmci_dmae_get_next_data,
.dma_setup = qcom_dma_setup,
+ .dma_release = mmci_dmae_release,
+ .dma_start = mmci_dmae_start,
+ .dma_finalize = mmci_dmae_finalize,
+ .dma_error = mmci_dmae_error,
};
void qcom_variant_init(struct mmci_host *host)
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
new file mode 100644
index 000000000000..cfbfc6f1048f
--- /dev/null
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Ludovic.barre@st.com for STMicroelectronics.
+ */
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/reset.h>
+#include <linux/scatterlist.h>
+#include "mmci.h"
+
+#define SDMMC_LLI_BUF_LEN PAGE_SIZE
+#define SDMMC_IDMA_BURST BIT(MMCI_STM32_IDMABNDT_SHIFT)
+
+struct sdmmc_lli_desc {
+ u32 idmalar;
+ u32 idmabase;
+ u32 idmasize;
+};
+
+struct sdmmc_priv {
+ dma_addr_t sg_dma;
+ void *sg_cpu;
+};
+
+int sdmmc_idma_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ int i;
+
+ /*
+ * idma has constraints on idmabase & idmasize for each element
+ * excepted the last element which has no constraint on idmasize
+ */
+ for_each_sg(data->sg, sg, data->sg_len - 1, i) {
+ if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32)) ||
+ !IS_ALIGNED(sg_dma_len(data->sg), SDMMC_IDMA_BURST)) {
+ dev_err(mmc_dev(host->mmc),
+ "unaligned scatterlist: ofst:%x length:%d\n",
+ data->sg->offset, data->sg->length);
+ return -EINVAL;
+ }
+ }
+
+ if (!IS_ALIGNED(sg_dma_address(data->sg), sizeof(u32))) {
+ dev_err(mmc_dev(host->mmc),
+ "unaligned last scatterlist: ofst:%x length:%d\n",
+ data->sg->offset, data->sg->length);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int _sdmmc_idma_prep_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ int n_elem;
+
+ n_elem = dma_map_sg(mmc_dev(host->mmc),
+ data->sg,
+ data->sg_len,
+ mmc_get_dma_dir(data));
+
+ if (!n_elem) {
+ dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sdmmc_idma_prep_data(struct mmci_host *host,
+ struct mmc_data *data, bool next)
+{
+ /* Check if job is already prepared. */
+ if (!next && data->host_cookie == host->next_cookie)
+ return 0;
+
+ return _sdmmc_idma_prep_data(host, data);
+}
+
+static void sdmmc_idma_unprep_data(struct mmci_host *host,
+ struct mmc_data *data, int err)
+{
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+}
+
+static int sdmmc_idma_setup(struct mmci_host *host)
+{
+ struct sdmmc_priv *idma;
+
+ idma = devm_kzalloc(mmc_dev(host->mmc), sizeof(*idma), GFP_KERNEL);
+ if (!idma)
+ return -ENOMEM;
+
+ host->dma_priv = idma;
+
+ if (host->variant->dma_lli) {
+ idma->sg_cpu = dmam_alloc_coherent(mmc_dev(host->mmc),
+ SDMMC_LLI_BUF_LEN,
+ &idma->sg_dma, GFP_KERNEL);
+ if (!idma->sg_cpu) {
+ dev_err(mmc_dev(host->mmc),
+ "Failed to alloc IDMA descriptor\n");
+ return -ENOMEM;
+ }
+ host->mmc->max_segs = SDMMC_LLI_BUF_LEN /
+ sizeof(struct sdmmc_lli_desc);
+ host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask;
+ } else {
+ host->mmc->max_segs = 1;
+ host->mmc->max_seg_size = host->mmc->max_req_size;
+ }
+
+ return 0;
+}
+
+static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
+
+{
+ struct sdmmc_priv *idma = host->dma_priv;
+ struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu;
+ struct mmc_data *data = host->data;
+ struct scatterlist *sg;
+ int i;
+
+ if (!host->variant->dma_lli || data->sg_len == 1) {
+ writel_relaxed(sg_dma_address(data->sg),
+ host->base + MMCI_STM32_IDMABASE0R);
+ writel_relaxed(MMCI_STM32_IDMAEN,
+ host->base + MMCI_STM32_IDMACTRLR);
+ return 0;
+ }
+
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ desc[i].idmalar = (i + 1) * sizeof(struct sdmmc_lli_desc);
+ desc[i].idmalar |= MMCI_STM32_ULA | MMCI_STM32_ULS
+ | MMCI_STM32_ABR;
+ desc[i].idmabase = sg_dma_address(sg);
+ desc[i].idmasize = sg_dma_len(sg);
+ }
+
+ /* notice the end of link list */
+ desc[data->sg_len - 1].idmalar &= ~MMCI_STM32_ULA;
+
+ dma_wmb();
+ writel_relaxed(idma->sg_dma, host->base + MMCI_STM32_IDMABAR);
+ writel_relaxed(desc[0].idmalar, host->base + MMCI_STM32_IDMALAR);
+ writel_relaxed(desc[0].idmabase, host->base + MMCI_STM32_IDMABASE0R);
+ writel_relaxed(desc[0].idmasize, host->base + MMCI_STM32_IDMABSIZER);
+ writel_relaxed(MMCI_STM32_IDMAEN | MMCI_STM32_IDMALLIEN,
+ host->base + MMCI_STM32_IDMACTRLR);
+
+ return 0;
+}
+
+static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
+{
+ writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+}
+
+static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired)
+{
+ unsigned int clk = 0, ddr = 0;
+
+ if (host->mmc->ios.timing == MMC_TIMING_MMC_DDR52 ||
+ host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+ ddr = MCI_STM32_CLK_DDR;
+
+ /*
+ * cclk = mclk / (2 * clkdiv)
+ * clkdiv 0 => bypass
+ * in ddr mode bypass is not possible
+ */
+ if (desired) {
+ if (desired >= host->mclk && !ddr) {
+ host->cclk = host->mclk;
+ } else {
+ clk = DIV_ROUND_UP(host->mclk, 2 * desired);
+ if (clk > MCI_STM32_CLK_CLKDIV_MSK)
+ clk = MCI_STM32_CLK_CLKDIV_MSK;
+ host->cclk = host->mclk / (2 * clk);
+ }
+ } else {
+ /*
+ * while power-on phase the clock can't be define to 0,
+ * Only power-off and power-cyc deactivate the clock.
+ * if desired clock is 0, set max divider
+ */
+ clk = MCI_STM32_CLK_CLKDIV_MSK;
+ host->cclk = host->mclk / (2 * clk);
+ }
+
+ /* Set actual clock for debug */
+ if (host->mmc->ios.power_mode == MMC_POWER_ON)
+ host->mmc->actual_clock = host->cclk;
+ else
+ host->mmc->actual_clock = 0;
+
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+ clk |= MCI_STM32_CLK_WIDEBUS_4;
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+ clk |= MCI_STM32_CLK_WIDEBUS_8;
+
+ clk |= MCI_STM32_CLK_HWFCEN;
+ clk |= host->clk_reg_add;
+ clk |= ddr;
+
+ /*
+ * SDMMC_FBCK is selected when an external Delay Block is needed
+ * with SDR104.
+ */
+ if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50) {
+ clk |= MCI_STM32_CLK_BUSSPEED;
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) {
+ clk &= ~MCI_STM32_CLK_SEL_MSK;
+ clk |= MCI_STM32_CLK_SELFBCK;
+ }
+ }
+
+ mmci_write_clkreg(host, clk);
+}
+
+static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr)
+{
+ struct mmc_ios ios = host->mmc->ios;
+
+ pwr = host->pwr_reg_add;
+
+ if (ios.power_mode == MMC_POWER_OFF) {
+ /* Only a reset could power-off sdmmc */
+ reset_control_assert(host->rst);
+ udelay(2);
+ reset_control_deassert(host->rst);
+
+ /*
+ * Set the SDMMC in Power-cycle state.
+ * This will make that the SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK
+ * are driven low, to prevent the Card from being supplied
+ * through the signal lines.
+ */
+ mmci_write_pwrreg(host, MCI_STM32_PWR_CYC | pwr);
+ } else if (ios.power_mode == MMC_POWER_ON) {
+ /*
+ * After power-off (reset): the irq mask defined in probe
+ * functionis lost
+ * ault irq mask (probe) must be activated
+ */
+ writel(MCI_IRQENABLE | host->variant->start_err,
+ host->base + MMCIMASK0);
+
+ /*
+ * After a power-cycle state, we must set the SDMMC in
+ * Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are
+ * driven high. Then we can set the SDMMC to Power-on state
+ */
+ mmci_write_pwrreg(host, MCI_PWR_OFF | pwr);
+ mdelay(1);
+ mmci_write_pwrreg(host, MCI_PWR_ON | pwr);
+ }
+}
+
+static struct mmci_host_ops sdmmc_variant_ops = {
+ .validate_data = sdmmc_idma_validate_data,
+ .prep_data = sdmmc_idma_prep_data,
+ .unprep_data = sdmmc_idma_unprep_data,
+ .dma_setup = sdmmc_idma_setup,
+ .dma_start = sdmmc_idma_start,
+ .dma_finalize = sdmmc_idma_finalize,
+ .set_clkreg = mmci_sdmmc_set_clkreg,
+ .set_pwrreg = mmci_sdmmc_set_pwrreg,
+};
+
+void sdmmc_variant_init(struct mmci_host *host)
+{
+ host->ops = &sdmmc_variant_ops;
+}
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 04841386b65d..6334cc752d8b 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -87,6 +87,13 @@
#define SDC_FIFO_CFG 0x228
/*--------------------------------------------------------------------------*/
+/* Top Pad Register Offset */
+/*--------------------------------------------------------------------------*/
+#define EMMC_TOP_CONTROL 0x00
+#define EMMC_TOP_CMD 0x04
+#define EMMC50_PAD_DS_TUNE 0x0c
+
+/*--------------------------------------------------------------------------*/
/* Register Mask */
/*--------------------------------------------------------------------------*/
@@ -261,6 +268,23 @@
#define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */
#define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */
+/* EMMC_TOP_CONTROL mask */
+#define PAD_RXDLY_SEL (0x1 << 0) /* RW */
+#define DELAY_EN (0x1 << 1) /* RW */
+#define PAD_DAT_RD_RXDLY2 (0x1f << 2) /* RW */
+#define PAD_DAT_RD_RXDLY (0x1f << 7) /* RW */
+#define PAD_DAT_RD_RXDLY2_SEL (0x1 << 12) /* RW */
+#define PAD_DAT_RD_RXDLY_SEL (0x1 << 13) /* RW */
+#define DATA_K_VALUE_SEL (0x1 << 14) /* RW */
+#define SDC_RX_ENH_EN (0x1 << 15) /* TW */
+
+/* EMMC_TOP_CMD mask */
+#define PAD_CMD_RXDLY2 (0x1f << 0) /* RW */
+#define PAD_CMD_RXDLY (0x1f << 5) /* RW */
+#define PAD_CMD_RD_RXDLY2_SEL (0x1 << 10) /* RW */
+#define PAD_CMD_RD_RXDLY_SEL (0x1 << 11) /* RW */
+#define PAD_CMD_TX_DLY (0x1f << 12) /* RW */
+
#define REQ_CMD_EIO (0x1 << 0)
#define REQ_CMD_TMO (0x1 << 1)
#define REQ_DAT_ERR (0x1 << 2)
@@ -333,6 +357,9 @@ struct msdc_save_para {
u32 emmc50_cfg0;
u32 emmc50_cfg3;
u32 sdc_fifo_cfg;
+ u32 emmc_top_control;
+ u32 emmc_top_cmd;
+ u32 emmc50_pad_ds_tune;
};
struct mtk_mmc_compatible {
@@ -351,6 +378,8 @@ struct msdc_tune_para {
u32 iocon;
u32 pad_tune;
u32 pad_cmd_tune;
+ u32 emmc_top_control;
+ u32 emmc_top_cmd;
};
struct msdc_delay_phase {
@@ -372,6 +401,7 @@ struct msdc_host {
int error;
void __iomem *base; /* host base address */
+ void __iomem *top_base; /* host top register base address */
struct msdc_dma dma; /* dma channel */
u64 dma_mask;
@@ -387,10 +417,10 @@ struct msdc_host {
struct clk *src_clk; /* msdc source clock */
struct clk *h_clk; /* msdc h_clk */
+ struct clk *bus_clk; /* bus clock which used to access register */
struct clk *src_clk_cg; /* msdc source clock control gate */
u32 mclk; /* mmc subsystem clock frequency */
u32 src_clk_freq; /* source clock frequency */
- u32 sclk; /* SD/MS bus clock frequency */
unsigned char timing;
bool vqmmc_enabled;
u32 latch_ck;
@@ -429,6 +459,18 @@ static const struct mtk_mmc_compatible mt8173_compat = {
.support_64g = false,
};
+static const struct mtk_mmc_compatible mt8183_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+ .enhance_rx = true,
+ .support_64g = true,
+};
+
static const struct mtk_mmc_compatible mt2701_compat = {
.clk_div_bits = 12,
.hs400_tune = false,
@@ -468,6 +510,7 @@ static const struct mtk_mmc_compatible mt7622_compat = {
static const struct of_device_id msdc_of_ids[] = {
{ .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
{ .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
+ { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat},
{ .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
{ .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
{ .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat},
@@ -635,10 +678,10 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
host->timeout_ns = ns;
host->timeout_clks = clks;
- if (host->sclk == 0) {
+ if (host->mmc->actual_clock == 0) {
timeout = 0;
} else {
- clk_ns = 1000000000UL / host->sclk;
+ clk_ns = 1000000000UL / host->mmc->actual_clock;
timeout = (ns + clk_ns - 1) / clk_ns + clks;
/* in 1048576 sclk cycle unit */
timeout = (timeout + (0x1 << 20) - 1) >> 20;
@@ -660,12 +703,14 @@ static void msdc_gate_clock(struct msdc_host *host)
{
clk_disable_unprepare(host->src_clk_cg);
clk_disable_unprepare(host->src_clk);
+ clk_disable_unprepare(host->bus_clk);
clk_disable_unprepare(host->h_clk);
}
static void msdc_ungate_clock(struct msdc_host *host)
{
clk_prepare_enable(host->h_clk);
+ clk_prepare_enable(host->bus_clk);
clk_prepare_enable(host->src_clk);
clk_prepare_enable(host->src_clk_cg);
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
@@ -683,6 +728,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
if (!hz) {
dev_dbg(host->dev, "set mclk to 0\n");
host->mclk = 0;
+ host->mmc->actual_clock = 0;
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
return;
}
@@ -761,7 +807,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
- host->sclk = sclk;
+ host->mmc->actual_clock = sclk;
host->mclk = hz;
host->timing = timing;
/* need because clk changed. */
@@ -772,14 +818,30 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
* mmc_select_hs400() will drop to 50Mhz and High speed mode,
* tune result of hs200/200Mhz is not suitable for 50Mhz
*/
- if (host->sclk <= 52000000) {
+ if (host->mmc->actual_clock <= 52000000) {
writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
- writel(host->def_tune_para.pad_tune, host->base + tune_reg);
+ if (host->top_base) {
+ writel(host->def_tune_para.emmc_top_control,
+ host->top_base + EMMC_TOP_CONTROL);
+ writel(host->def_tune_para.emmc_top_cmd,
+ host->top_base + EMMC_TOP_CMD);
+ } else {
+ writel(host->def_tune_para.pad_tune,
+ host->base + tune_reg);
+ }
} else {
writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
- writel(host->saved_tune_para.pad_tune, host->base + tune_reg);
writel(host->saved_tune_para.pad_cmd_tune,
host->base + PAD_CMD_TUNE);
+ if (host->top_base) {
+ writel(host->saved_tune_para.emmc_top_control,
+ host->top_base + EMMC_TOP_CONTROL);
+ writel(host->saved_tune_para.emmc_top_cmd,
+ host->top_base + EMMC_TOP_CMD);
+ } else {
+ writel(host->saved_tune_para.pad_tune,
+ host->base + tune_reg);
+ }
}
if (timing == MMC_TIMING_MMC_HS400 &&
@@ -787,7 +849,8 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
sdr_set_field(host->base + PAD_CMD_TUNE,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs400_cmd_int_delay);
- dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
+ dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
+ timing);
}
static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
@@ -1055,6 +1118,7 @@ static void msdc_start_command(struct msdc_host *host,
WARN_ON(host->cmd);
host->cmd = cmd;
+ mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
if (!msdc_cmd_is_ready(host, mrq, cmd))
return;
@@ -1066,7 +1130,6 @@ static void msdc_start_command(struct msdc_host *host,
cmd->error = 0;
rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
- mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
writel(cmd->arg, host->base + SDC_ARG);
@@ -1351,7 +1414,12 @@ static void msdc_init_hw(struct msdc_host *host)
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
- writel(0, host->base + tune_reg);
+ if (host->top_base) {
+ writel(0, host->top_base + EMMC_TOP_CONTROL);
+ writel(0, host->top_base + EMMC_TOP_CMD);
+ } else {
+ writel(0, host->base + tune_reg);
+ }
writel(0, host->base + MSDC_IOCON);
sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
writel(0x403c0046, host->base + MSDC_PATCH_BIT);
@@ -1375,8 +1443,12 @@ static void msdc_init_hw(struct msdc_host *host)
sdr_set_field(host->base + MSDC_PATCH_BIT2,
MSDC_PB2_RESPWAIT, 3);
if (host->dev_comp->enhance_rx) {
- sdr_set_bits(host->base + SDC_ADV_CFG0,
- SDC_RX_ENHANCE_EN);
+ if (host->top_base)
+ sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
+ SDC_RX_ENH_EN);
+ else
+ sdr_set_bits(host->base + SDC_ADV_CFG0,
+ SDC_RX_ENHANCE_EN);
} else {
sdr_set_field(host->base + MSDC_PATCH_BIT2,
MSDC_PB2_RESPSTSENSEL, 2);
@@ -1394,11 +1466,26 @@ static void msdc_init_hw(struct msdc_host *host)
sdr_set_bits(host->base + MSDC_PATCH_BIT2,
MSDC_PB2_SUPPORT_64G);
if (host->dev_comp->data_tune) {
- sdr_set_bits(host->base + tune_reg,
- MSDC_PAD_TUNE_RD_SEL | MSDC_PAD_TUNE_CMD_SEL);
+ if (host->top_base) {
+ sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
+ PAD_DAT_RD_RXDLY_SEL);
+ sdr_clr_bits(host->top_base + EMMC_TOP_CONTROL,
+ DATA_K_VALUE_SEL);
+ sdr_set_bits(host->top_base + EMMC_TOP_CMD,
+ PAD_CMD_RD_RXDLY_SEL);
+ } else {
+ sdr_set_bits(host->base + tune_reg,
+ MSDC_PAD_TUNE_RD_SEL |
+ MSDC_PAD_TUNE_CMD_SEL);
+ }
} else {
/* choose clock tune */
- sdr_set_bits(host->base + tune_reg, MSDC_PAD_TUNE_RXDLYSEL);
+ if (host->top_base)
+ sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
+ PAD_RXDLY_SEL);
+ else
+ sdr_set_bits(host->base + tune_reg,
+ MSDC_PAD_TUNE_RXDLYSEL);
}
/* Configure to enable SDIO mode.
@@ -1413,9 +1500,20 @@ static void msdc_init_hw(struct msdc_host *host)
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
- host->def_tune_para.pad_tune = readl(host->base + tune_reg);
host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
- host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
+ if (host->top_base) {
+ host->def_tune_para.emmc_top_control =
+ readl(host->top_base + EMMC_TOP_CONTROL);
+ host->def_tune_para.emmc_top_cmd =
+ readl(host->top_base + EMMC_TOP_CMD);
+ host->saved_tune_para.emmc_top_control =
+ readl(host->top_base + EMMC_TOP_CONTROL);
+ host->saved_tune_para.emmc_top_cmd =
+ readl(host->top_base + EMMC_TOP_CMD);
+ } else {
+ host->def_tune_para.pad_tune = readl(host->base + tune_reg);
+ host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
+ }
dev_dbg(host->dev, "init hardware done!");
}
@@ -1563,6 +1661,30 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
return delay_phase;
}
+static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value)
+{
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
+
+ if (host->top_base)
+ sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY,
+ value);
+ else
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
+ value);
+}
+
+static inline void msdc_set_data_delay(struct msdc_host *host, u32 value)
+{
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
+
+ if (host->top_base)
+ sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
+ PAD_DAT_RD_RXDLY, value);
+ else
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY,
+ value);
+}
+
static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
@@ -1583,8 +1705,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + tune_reg,
- MSDC_PAD_TUNE_CMDRDLY, i);
+ msdc_set_cmd_delay(host, i);
/*
* Using the same parameters, it may sometimes pass the test,
* but sometimes it may fail. To make sure the parameters are
@@ -1608,8 +1729,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + tune_reg,
- MSDC_PAD_TUNE_CMDRDLY, i);
+ msdc_set_cmd_delay(host, i);
/*
* Using the same parameters, it may sometimes pass the test,
* but sometimes it may fail. To make sure the parameters are
@@ -1633,15 +1753,13 @@ skip_fall:
final_maxlen = final_fall_delay.maxlen;
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
- final_rise_delay.final_phase);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
- final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
}
+ msdc_set_cmd_delay(host, final_delay);
+
if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay)
goto skip_internal;
@@ -1716,7 +1834,6 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
u32 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
- u32 tune_reg = host->dev_comp->pad_tune_reg;
int i, ret;
sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
@@ -1724,8 +1841,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + tune_reg,
- MSDC_PAD_TUNE_DATRRDLY, i);
+ msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
rise_delay |= (1 << i);
@@ -1739,8 +1855,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + tune_reg,
- MSDC_PAD_TUNE_DATRRDLY, i);
+ msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
fall_delay |= (1 << i);
@@ -1752,29 +1867,97 @@ skip_fall:
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- sdr_set_field(host->base + tune_reg,
- MSDC_PAD_TUNE_DATRRDLY,
- final_rise_delay.final_phase);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- sdr_set_field(host->base + tune_reg,
- MSDC_PAD_TUNE_DATRRDLY,
- final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
}
+ msdc_set_data_delay(host, final_delay);
dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay);
return final_delay == 0xff ? -EIO : 0;
}
+/*
+ * MSDC IP which supports data tune + async fifo can do CMD/DAT tune
+ * together, which can save the tuning time.
+ */
+static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ u32 rise_delay = 0, fall_delay = 0;
+ struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
+ u8 final_delay, final_maxlen;
+ int i, ret;
+
+ sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
+ host->latch_ck);
+
+ sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+ sdr_clr_bits(host->base + MSDC_IOCON,
+ MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
+ for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+ msdc_set_cmd_delay(host, i);
+ msdc_set_data_delay(host, i);
+ ret = mmc_send_tuning(mmc, opcode, NULL);
+ if (!ret)
+ rise_delay |= (1 << i);
+ }
+ final_rise_delay = get_best_delay(host, rise_delay);
+ /* if rising edge has enough margin, then do not scan falling edge */
+ if (final_rise_delay.maxlen >= 12 ||
+ (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
+ goto skip_fall;
+
+ sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+ sdr_set_bits(host->base + MSDC_IOCON,
+ MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
+ for (i = 0; i < PAD_DELAY_MAX; i++) {
+ msdc_set_cmd_delay(host, i);
+ msdc_set_data_delay(host, i);
+ ret = mmc_send_tuning(mmc, opcode, NULL);
+ if (!ret)
+ fall_delay |= (1 << i);
+ }
+ final_fall_delay = get_best_delay(host, fall_delay);
+
+skip_fall:
+ final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
+ if (final_maxlen == final_rise_delay.maxlen) {
+ sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+ sdr_clr_bits(host->base + MSDC_IOCON,
+ MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
+ final_delay = final_rise_delay.final_phase;
+ } else {
+ sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
+ sdr_set_bits(host->base + MSDC_IOCON,
+ MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
+ final_delay = final_fall_delay.final_phase;
+ }
+
+ msdc_set_cmd_delay(host, final_delay);
+ msdc_set_data_delay(host, final_delay);
+
+ dev_dbg(host->dev, "Final pad delay: %x\n", final_delay);
+ return final_delay == 0xff ? -EIO : 0;
+}
+
static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
int ret;
u32 tune_reg = host->dev_comp->pad_tune_reg;
+ if (host->dev_comp->data_tune && host->dev_comp->async_fifo) {
+ ret = msdc_tune_together(mmc, opcode);
+ if (host->hs400_mode) {
+ sdr_clr_bits(host->base + MSDC_IOCON,
+ MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
+ msdc_set_data_delay(host, 0);
+ }
+ goto tune_done;
+ }
if (host->hs400_mode &&
host->dev_comp->hs400_tune)
ret = hs400_tune_response(mmc, opcode);
@@ -1790,9 +1973,16 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
dev_err(host->dev, "Tune data fail!\n");
}
+tune_done:
host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
+ if (host->top_base) {
+ host->saved_tune_para.emmc_top_control = readl(host->top_base +
+ EMMC_TOP_CONTROL);
+ host->saved_tune_para.emmc_top_cmd = readl(host->top_base +
+ EMMC_TOP_CMD);
+ }
return ret;
}
@@ -1801,7 +1991,11 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
struct msdc_host *host = mmc_priv(mmc);
host->hs400_mode = true;
- writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ if (host->top_base)
+ writel(host->hs400_ds_delay,
+ host->top_base + EMMC50_PAD_DS_TUNE);
+ else
+ writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
/* hs400 mode must set it to 0 */
sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
/* to improve read performance, set outstanding to 2 */
@@ -1884,6 +2078,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
goto host_free;
}
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ host->top_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->top_base))
+ host->top_base = NULL;
+
ret = mmc_regulator_get_supply(mmc);
if (ret)
goto host_free;
@@ -1900,6 +2099,9 @@ static int msdc_drv_probe(struct platform_device *pdev)
goto host_free;
}
+ host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(host->bus_clk))
+ host->bus_clk = NULL;
/*source clock control gate is optional clock*/
host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg");
if (IS_ERR(host->src_clk_cg))
@@ -2049,7 +2251,6 @@ static void msdc_save_reg(struct msdc_host *host)
host->save_para.msdc_cfg = readl(host->base + MSDC_CFG);
host->save_para.iocon = readl(host->base + MSDC_IOCON);
host->save_para.sdc_cfg = readl(host->base + SDC_CFG);
- host->save_para.pad_tune = readl(host->base + tune_reg);
host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2);
@@ -2058,6 +2259,16 @@ static void msdc_save_reg(struct msdc_host *host)
host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3);
host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG);
+ if (host->top_base) {
+ host->save_para.emmc_top_control =
+ readl(host->top_base + EMMC_TOP_CONTROL);
+ host->save_para.emmc_top_cmd =
+ readl(host->top_base + EMMC_TOP_CMD);
+ host->save_para.emmc50_pad_ds_tune =
+ readl(host->top_base + EMMC50_PAD_DS_TUNE);
+ } else {
+ host->save_para.pad_tune = readl(host->base + tune_reg);
+ }
}
static void msdc_restore_reg(struct msdc_host *host)
@@ -2067,7 +2278,6 @@ static void msdc_restore_reg(struct msdc_host *host)
writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
writel(host->save_para.iocon, host->base + MSDC_IOCON);
writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
- writel(host->save_para.pad_tune, host->base + tune_reg);
writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2);
@@ -2076,6 +2286,16 @@ static void msdc_restore_reg(struct msdc_host *host)
writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3);
writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG);
+ if (host->top_base) {
+ writel(host->save_para.emmc_top_control,
+ host->top_base + EMMC_TOP_CONTROL);
+ writel(host->save_para.emmc_top_cmd,
+ host->top_base + EMMC_TOP_CMD);
+ writel(host->save_para.emmc50_pad_ds_tune,
+ host->top_base + EMMC50_PAD_DS_TUNE);
+ } else {
+ writel(host->save_para.pad_tune, host->base + tune_reg);
+ }
}
static int msdc_runtime_suspend(struct device *dev)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index de4e6e5bf304..4d17032d15ee 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -728,7 +728,6 @@ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
static irqreturn_t mxcmci_irq(int irq, void *devid)
{
struct mxcmci_host *host = devid;
- unsigned long flags;
bool sdio_irq;
u32 stat;
@@ -740,9 +739,9 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)
dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
- spin_lock_irqsave(&host->lock, flags);
+ spin_lock(&host->lock);
sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
- spin_unlock_irqrestore(&host->lock, flags);
+ spin_unlock(&host->lock);
if (mxcmci_use_dma(host) && (stat & (STATUS_WRITE_OP_DONE)))
mxcmci_writel(host, STATUS_WRITE_OP_DONE, MMC_REG_STATUS);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 68760d4a5d3d..467d889a1638 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -30,7 +30,6 @@
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/of_device.h>
#include <linux/mmc/host.h>
#include <linux/mmc/core.h>
@@ -38,7 +37,6 @@
#include <linux/mmc/slot-gpio.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
@@ -198,7 +196,6 @@ struct omap_hsmmc_host {
struct dma_chan *rx_chan;
int response_busy;
int context_loss;
- int protect_card;
int reqs_blocked;
int req_in_progress;
unsigned long clk_rate;
@@ -207,16 +204,6 @@ struct omap_hsmmc_host {
#define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */
struct omap_hsmmc_next next_data;
struct omap_hsmmc_platform_data *pdata;
-
- /* return MMC cover switch state, can be NULL if not supported.
- *
- * possible return values:
- * 0 - closed
- * 1 - open
- */
- int (*get_cover_state)(struct device *dev);
-
- int (*card_detect)(struct device *dev);
};
struct omap_mmc_of_data {
@@ -226,20 +213,6 @@ struct omap_mmc_of_data {
static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host);
-static int omap_hsmmc_card_detect(struct device *dev)
-{
- struct omap_hsmmc_host *host = dev_get_drvdata(dev);
-
- return mmc_gpio_get_cd(host->mmc);
-}
-
-static int omap_hsmmc_get_cover_state(struct device *dev)
-{
- struct omap_hsmmc_host *host = dev_get_drvdata(dev);
-
- return mmc_gpio_get_cd(host->mmc);
-}
-
static int omap_hsmmc_enable_supply(struct mmc_host *mmc)
{
int ret;
@@ -484,38 +457,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
return 0;
}
-static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id);
-
-static int omap_hsmmc_gpio_init(struct mmc_host *mmc,
- struct omap_hsmmc_host *host,
- struct omap_hsmmc_platform_data *pdata)
-{
- int ret;
-
- if (gpio_is_valid(pdata->gpio_cod)) {
- ret = mmc_gpio_request_cd(mmc, pdata->gpio_cod, 0);
- if (ret)
- return ret;
-
- host->get_cover_state = omap_hsmmc_get_cover_state;
- mmc_gpio_set_cd_isr(mmc, omap_hsmmc_cover_irq);
- } else if (gpio_is_valid(pdata->gpio_cd)) {
- ret = mmc_gpio_request_cd(mmc, pdata->gpio_cd, 0);
- if (ret)
- return ret;
-
- host->card_detect = omap_hsmmc_card_detect;
- }
-
- if (gpio_is_valid(pdata->gpio_wp)) {
- ret = mmc_gpio_request_ro(mmc, pdata->gpio_wp);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
/*
* Start clock to the card
*/
@@ -781,9 +722,6 @@ static void send_init_stream(struct omap_hsmmc_host *host)
int reg = 0;
unsigned long timeout;
- if (host->protect_card)
- return;
-
disable_irq(host->irq);
OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
@@ -804,29 +742,6 @@ static void send_init_stream(struct omap_hsmmc_host *host)
enable_irq(host->irq);
}
-static inline
-int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host)
-{
- int r = 1;
-
- if (host->get_cover_state)
- r = host->get_cover_state(host->dev);
- return r;
-}
-
-static ssize_t
-omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
- struct omap_hsmmc_host *host = mmc_priv(mmc);
-
- return sprintf(buf, "%s\n",
- omap_hsmmc_cover_is_closed(host) ? "closed" : "open");
-}
-
-static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL);
-
static ssize_t
omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1247,44 +1162,6 @@ err:
return ret;
}
-/* Protect the card while the cover is open */
-static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
-{
- if (!host->get_cover_state)
- return;
-
- host->reqs_blocked = 0;
- if (host->get_cover_state(host->dev)) {
- if (host->protect_card) {
- dev_info(host->dev, "%s: cover is closed, "
- "card is now accessible\n",
- mmc_hostname(host->mmc));
- host->protect_card = 0;
- }
- } else {
- if (!host->protect_card) {
- dev_info(host->dev, "%s: cover is open, "
- "card is now inaccessible\n",
- mmc_hostname(host->mmc));
- host->protect_card = 1;
- }
- }
-}
-
-/*
- * irq handler when (cell-phone) cover is mounted/removed
- */
-static irqreturn_t omap_hsmmc_cover_irq(int irq, void *dev_id)
-{
- struct omap_hsmmc_host *host = dev_id;
-
- sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
-
- omap_hsmmc_protect_card(host);
- mmc_detect_change(host->mmc, (HZ * 200) / 1000);
- return IRQ_HANDLED;
-}
-
static void omap_hsmmc_dma_callback(void *param)
{
struct omap_hsmmc_host *host = param;
@@ -1555,24 +1432,7 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
BUG_ON(host->req_in_progress);
BUG_ON(host->dma_ch != -1);
- if (host->protect_card) {
- if (host->reqs_blocked < 3) {
- /*
- * Ensure the controller is left in a consistent
- * state by resetting the command and data state
- * machines.
- */
- omap_hsmmc_reset_controller_fsm(host, SRD);
- omap_hsmmc_reset_controller_fsm(host, SRC);
- host->reqs_blocked += 1;
- }
- req->cmd->error = -EBADF;
- if (req->data)
- req->data->error = -EBADF;
- req->cmd->retries = 0;
- mmc_request_done(mmc, req);
- return;
- } else if (host->reqs_blocked)
+ if (host->reqs_blocked)
host->reqs_blocked = 0;
WARN_ON(host->mrq != NULL);
host->mrq = req;
@@ -1646,15 +1506,6 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
omap_hsmmc_set_bus_mode(host);
}
-static int omap_hsmmc_get_cd(struct mmc_host *mmc)
-{
- struct omap_hsmmc_host *host = mmc_priv(mmc);
-
- if (!host->card_detect)
- return -ENOSYS;
- return host->card_detect(host->dev);
-}
-
static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
@@ -1793,7 +1644,7 @@ static struct mmc_host_ops omap_hsmmc_ops = {
.pre_req = omap_hsmmc_pre_req,
.request = omap_hsmmc_request,
.set_ios = omap_hsmmc_set_ios,
- .get_cd = omap_hsmmc_get_cd,
+ .get_cd = mmc_gpio_get_cd,
.get_ro = mmc_gpio_get_ro,
.init_card = omap_hsmmc_init_card,
.enable_sdio_irq = omap_hsmmc_enable_sdio_irq,
@@ -1920,10 +1771,6 @@ static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
if (of_find_property(np, "ti,dual-volt", NULL))
pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;
- pdata->gpio_cd = -EINVAL;
- pdata->gpio_cod = -EINVAL;
- pdata->gpio_wp = -EINVAL;
-
if (of_find_property(np, "ti,non-removable", NULL)) {
pdata->nonremovable = true;
pdata->no_regulator_off_init = true;
@@ -2008,10 +1855,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
host->pbias_enabled = 0;
host->vqmmc_enabled = 0;
- ret = omap_hsmmc_gpio_init(mmc, host, pdata);
- if (ret)
- goto err_gpio;
-
platform_set_drvdata(pdev, host);
if (pdev->dev.of_node)
@@ -2125,8 +1968,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
if (!ret)
mmc->caps |= MMC_CAP_SDIO_IRQ;
- omap_hsmmc_protect_card(host);
-
mmc_add_host(mmc);
if (mmc_pdata(host)->name != NULL) {
@@ -2134,12 +1975,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
if (ret < 0)
goto err_slot_name;
}
- if (host->get_cover_state) {
- ret = device_create_file(&mmc->class_dev,
- &dev_attr_cover_switch);
- if (ret < 0)
- goto err_slot_name;
- }
omap_hsmmc_debugfs(mmc);
pm_runtime_mark_last_busy(host->dev);
@@ -2161,7 +1996,6 @@ err_irq:
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
err1:
-err_gpio:
mmc_free_host(mmc);
err:
return ret;
@@ -2231,7 +2065,6 @@ static int omap_hsmmc_resume(struct device *dev)
if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
omap_hsmmc_conf_bus_power(host);
- omap_hsmmc_protect_card(host);
pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
return 0;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index f13f798d8506..da1e49c45bec 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Renesas Mobile SDHI
*
* Copyright (C) 2017 Horms Solutions Ltd., Simon Horman
* Copyright (C) 2017 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef RENESAS_SDHI_H
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 777e32b0e410..d3ac43c3d0b6 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas SDHI
*
@@ -6,10 +7,6 @@
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
* Copyright (C) 2009 Magnus Damm
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Based on "Compaq ASIC3 support":
*
* Copyright 2001 Compaq Computer Corporation.
@@ -155,6 +152,52 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
return ret == 0 ? best_freq : clk_get_rate(priv->clk);
}
+static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
+ unsigned int new_clock)
+{
+ u32 clk = 0, clock;
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ if (new_clock == 0)
+ goto out;
+
+ /*
+ * Both HS400 and HS200/SD104 set 200MHz, but some devices need to
+ * set 400MHz to distinguish the CPG settings in HS400.
+ */
+ if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
+ host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400 &&
+ new_clock == 200000000)
+ new_clock = 400000000;
+
+ clock = renesas_sdhi_clk_update(host, new_clock) / 512;
+
+ for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
+ clock <<= 1;
+
+ /* 1/1 clock is option */
+ if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1)) {
+ if (!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400))
+ clk |= 0xff;
+ else
+ clk &= ~0xff;
+ }
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
+ if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
+ usleep_range(10000, 11000);
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+out:
+ /* HW engineers overrode docs: no sleep needed on R-Car2+ */
+ if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
+ usleep_range(10000, 11000);
+}
+
static void renesas_sdhi_clk_disable(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
@@ -443,6 +486,19 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
{
struct renesas_sdhi *priv = host_to_priv(host);
+ bool use_4tap = host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400;
+
+ /*
+ * Skip checking SCC errors when running on 4 taps in HS400 mode as
+ * any retuning would still result in the same 4 taps being used.
+ */
+ if (!(host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) &&
+ !(host->mmc->ios.timing == MMC_TIMING_MMC_HS200) &&
+ !(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && !use_4tap))
+ return false;
+
+ if (mmc_doing_retune(host->mmc))
+ return false;
/* Check SCC error */
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
@@ -620,8 +676,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->write16_hook = renesas_sdhi_write16_hook;
host->clk_enable = renesas_sdhi_clk_enable;
- host->clk_update = renesas_sdhi_clk_update;
host->clk_disable = renesas_sdhi_clk_disable;
+ host->set_clock = renesas_sdhi_set_clock;
host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
host->dma_ops = dma_ops;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index ca0b43973769..b6f54102bfdd 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DMA support for Internal DMAC with SDHI SD/SDIO controller
*
* Copyright (C) 2016-17 Renesas Electronics Corporation
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
@@ -35,8 +32,8 @@
/* DM_CM_DTRAN_MODE */
#define DTRAN_MODE_CH_NUM_CH0 0 /* "downstream" = for write commands */
-#define DTRAN_MODE_CH_NUM_CH1 BIT(16) /* "uptream" = for read commands */
-#define DTRAN_MODE_BUS_WID_TH (BIT(5) | BIT(4))
+#define DTRAN_MODE_CH_NUM_CH1 BIT(16) /* "upstream" = for read commands */
+#define DTRAN_MODE_BUS_WIDTH (BIT(5) | BIT(4))
#define DTRAN_MODE_ADDR_MODE BIT(0) /* 1 = Increment address */
/* DM_CM_DTRAN_CTRL */
@@ -116,6 +113,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
};
static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
+ { .compatible = "renesas,sdhi-mmc-r8a77470", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_r8a7795_compatible, },
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_r8a7795_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
@@ -174,7 +172,7 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
struct scatterlist *sg = host->sg_ptr;
- u32 dtran_mode = DTRAN_MODE_BUS_WID_TH | DTRAN_MODE_ADDR_MODE;
+ u32 dtran_mode = DTRAN_MODE_BUS_WIDTH | DTRAN_MODE_ADDR_MODE;
if (!dma_map_sg(&host->pdev->dev, sg, host->sg_len,
mmc_get_dma_dir(data)))
@@ -201,13 +199,14 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR,
sg_dma_address(sg));
+ host->dma_on = true;
+
return;
force_pio_with_unmap:
dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data));
force_pio:
- host->force_pio = true;
renesas_sdhi_internal_dmac_enable_dma(host, false);
}
@@ -291,16 +290,19 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
* Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC
* implementation as others may use a different implementation.
*/
-static const struct soc_device_attribute gen3_soc_whitelist[] = {
+static const struct soc_device_attribute soc_whitelist[] = {
/* specific ones */
{ .soc_id = "r8a7795", .revision = "ES1.*",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
{ .soc_id = "r8a7796", .revision = "ES1.0",
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
/* generic ones */
+ { .soc_id = "r8a774a1" },
+ { .soc_id = "r8a77470" },
{ .soc_id = "r8a7795" },
{ .soc_id = "r8a7796" },
{ .soc_id = "r8a77965" },
+ { .soc_id = "r8a77970" },
{ .soc_id = "r8a77980" },
{ .soc_id = "r8a77995" },
{ /* sentinel */ }
@@ -308,13 +310,21 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
{
- const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist);
+ const struct soc_device_attribute *soc = soc_device_match(soc_whitelist);
+ struct device *dev = &pdev->dev;
if (!soc)
return -ENODEV;
global_flags |= (unsigned long)soc->data;
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+
+ /* value is max of SD_SECCNT. Confirmed by HW engineers */
+ dma_set_max_seg_size(dev, 0xffffffff);
+
return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops);
}
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 890f192dedbd..1a4016f635d3 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DMA support use of SYS DMAC with SDHI SD/SDIO controller
*
@@ -5,10 +6,6 @@
* Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
* Copyright (C) 2017 Horms Solutions, Simon Horman
* Copyright (C) 2010-2011 Guennadi Liakhovetski
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/device.h>
@@ -213,10 +210,8 @@ static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
goto pio;
}
- if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
- host->force_pio = true;
+ if (sg->length < TMIO_MMC_MIN_DMA_LEN)
return;
- }
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
@@ -240,6 +235,7 @@ static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
desc = NULL;
ret = cookie;
}
+ host->dma_on = true;
}
pio:
if (!desc) {
@@ -286,10 +282,8 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
goto pio;
}
- if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
- host->force_pio = true;
+ if (sg->length < TMIO_MMC_MIN_DMA_LEN)
return;
- }
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
@@ -318,6 +312,7 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
desc = NULL;
ret = cookie;
}
+ host->dma_on = true;
}
pio:
if (!desc) {
@@ -498,7 +493,8 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
{
- if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible &&
+ if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible ||
+ of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) &&
!soc_device_match(gen3_soc_whitelist))
return -ENODEV;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 32321bd596d8..057e24f4a620 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -76,6 +76,7 @@ struct sdhci_acpi_slot {
size_t priv_size;
int (*probe_slot)(struct platform_device *, const char *, const char *);
int (*remove_slot)(struct platform_device *);
+ int (*free_slot)(struct platform_device *pdev);
int (*setup_host)(struct platform_device *pdev);
};
@@ -246,7 +247,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
static bool sdhci_acpi_byt(void)
{
static const struct x86_cpu_id byt[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
{}
};
@@ -470,10 +471,70 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
.priv_size = sizeof(struct intel_host),
};
+#define VENDOR_SPECIFIC_PWRCTL_CLEAR_REG 0x1a8
+#define VENDOR_SPECIFIC_PWRCTL_CTL_REG 0x1ac
+static irqreturn_t sdhci_acpi_qcom_handler(int irq, void *ptr)
+{
+ struct sdhci_host *host = ptr;
+
+ sdhci_writel(host, 0x3, VENDOR_SPECIFIC_PWRCTL_CLEAR_REG);
+ sdhci_writel(host, 0x1, VENDOR_SPECIFIC_PWRCTL_CTL_REG);
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_probe_slot(struct platform_device *pdev, const char *hid,
+ const char *uid)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host = c->host;
+ int *irq = sdhci_acpi_priv(c);
+
+ *irq = -EINVAL;
+
+ if (strcmp(hid, "QCOM8051"))
+ return 0;
+
+ *irq = platform_get_irq(pdev, 1);
+ if (*irq < 0)
+ return 0;
+
+ return request_threaded_irq(*irq, NULL, sdhci_acpi_qcom_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ "sdhci_qcom", host);
+}
+
+static int qcom_free_slot(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host = c->host;
+ struct acpi_device *adev;
+ int *irq = sdhci_acpi_priv(c);
+ const char *hid;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENODEV;
+
+ hid = acpi_device_hid(adev);
+ if (strcmp(hid, "QCOM8051"))
+ return 0;
+
+ if (*irq < 0)
+ return 0;
+
+ free_irq(*irq, host);
+ return 0;
+}
+
static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
.quirks2 = SDHCI_QUIRK2_NO_1_8_V,
.caps = MMC_CAP_NONREMOVABLE,
+ .priv_size = sizeof(int),
+ .probe_slot = qcom_probe_slot,
+ .free_slot = qcom_free_slot,
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
@@ -756,6 +817,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
err_cleanup:
sdhci_cleanup_host(c->host);
err_free:
+ if (c->slot && c->slot->free_slot)
+ c->slot->free_slot(pdev);
+
sdhci_free_host(c->host);
return err;
}
@@ -777,6 +841,10 @@ static int sdhci_acpi_remove(struct platform_device *pdev)
dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0);
sdhci_remove_host(c->host, dead);
+
+ if (c->slot && c->slot->free_slot)
+ c->slot->free_slot(pdev);
+
sdhci_free_host(c->host);
return 0;
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index dfa58f8b8dfa..3f16d9c90ba2 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -60,6 +60,7 @@
/* Tuning Block Control Register */
#define ESDHC_TBCTL 0x120
#define ESDHC_TB_EN 0x00000004
+#define ESDHC_TBPTR 0x128
/* Control Register for DMA transfer */
#define ESDHC_DMA_SYSCTL 0x40c
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index d0e83db42ae5..0db99057c44f 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -15,6 +15,7 @@
* iProc SDHCI platform driver
*/
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mmc/host.h>
@@ -162,9 +163,19 @@ static void sdhci_iproc_writeb(struct sdhci_host *host, u8 val, int reg)
sdhci_iproc_writel(host, newval, reg & ~3);
}
+static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ if (pltfm_host->clk)
+ return sdhci_pltfm_clk_get_max_clock(host);
+ else
+ return pltfm_host->clock;
+}
+
static const struct sdhci_ops sdhci_iproc_ops = {
.set_clock = sdhci_set_clock,
- .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_max_clock = sdhci_iproc_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -178,7 +189,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
.write_w = sdhci_iproc_writew,
.write_b = sdhci_iproc_writeb,
.set_clock = sdhci_set_clock,
- .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_max_clock = sdhci_iproc_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -256,19 +267,25 @@ static const struct of_device_id sdhci_iproc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sdhci_iproc_of_match);
+static const struct acpi_device_id sdhci_iproc_acpi_ids[] = {
+ { .id = "BRCM5871", .driver_data = (kernel_ulong_t)&iproc_cygnus_data },
+ { .id = "BRCM5872", .driver_data = (kernel_ulong_t)&iproc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, sdhci_iproc_acpi_ids);
+
static int sdhci_iproc_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
- const struct sdhci_iproc_data *iproc_data;
+ struct device *dev = &pdev->dev;
+ const struct sdhci_iproc_data *iproc_data = NULL;
struct sdhci_host *host;
struct sdhci_iproc_host *iproc_host;
struct sdhci_pltfm_host *pltfm_host;
int ret;
- match = of_match_device(sdhci_iproc_of_match, &pdev->dev);
- if (!match)
- return -EINVAL;
- iproc_data = match->data;
+ iproc_data = device_get_match_data(dev);
+ if (!iproc_data)
+ return -ENODEV;
host = sdhci_pltfm_init(pdev, iproc_data->pdata, sizeof(*iproc_host));
if (IS_ERR(host))
@@ -280,19 +297,21 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
iproc_host->data = iproc_data;
mmc_of_parse(host->mmc);
- sdhci_get_of_property(pdev);
+ sdhci_get_property(pdev);
host->mmc->caps |= iproc_host->data->mmc_caps;
- pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pltfm_host->clk)) {
- ret = PTR_ERR(pltfm_host->clk);
- goto err;
- }
- ret = clk_prepare_enable(pltfm_host->clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable host clk\n");
- goto err;
+ if (dev->of_node) {
+ pltfm_host->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pltfm_host->clk)) {
+ ret = PTR_ERR(pltfm_host->clk);
+ goto err;
+ }
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable host clk\n");
+ goto err;
+ }
}
if (iproc_host->data->pdata->quirks & SDHCI_QUIRK_MISSING_CAPS) {
@@ -307,7 +326,8 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
return 0;
err_clk:
- clk_disable_unprepare(pltfm_host->clk);
+ if (dev->of_node)
+ clk_disable_unprepare(pltfm_host->clk);
err:
sdhci_pltfm_free(pdev);
return ret;
@@ -317,6 +337,7 @@ static struct platform_driver sdhci_iproc_driver = {
.driver = {
.name = "sdhci-iproc",
.of_match_table = sdhci_iproc_of_match,
+ .acpi_match_table = ACPI_PTR(sdhci_iproc_acpi_ids),
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_iproc_probe,
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index a40bcc27f187..142c4b802f31 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -107,6 +107,11 @@ struct sdhci_arasan_data {
#define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1)
};
+struct sdhci_arasan_of_data {
+ const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
+ const struct sdhci_pltfm_data *pdata;
+};
+
static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
.baseclkfreq = { .reg = 0xf000, .width = 8, .shift = 8 },
.clockmultiplier = { .reg = 0xf02c, .width = 8, .shift = 0},
@@ -226,6 +231,25 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
}
}
+static void sdhci_arasan_am654_set_clock(struct sdhci_host *host,
+ unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+
+ if (sdhci_arasan->is_phy_on) {
+ phy_power_off(sdhci_arasan->phy);
+ sdhci_arasan->is_phy_on = false;
+ }
+
+ sdhci_set_clock(host, clock);
+
+ if (clock > PHY_CLK_TOO_SLOW_HZ) {
+ phy_power_on(sdhci_arasan->phy);
+ sdhci_arasan->is_phy_on = true;
+ }
+}
+
static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -307,6 +331,33 @@ static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
SDHCI_QUIRK2_STOP_WITH_TC,
};
+static struct sdhci_arasan_of_data sdhci_arasan_data = {
+ .pdata = &sdhci_arasan_pdata,
+};
+
+static const struct sdhci_ops sdhci_arasan_am654_ops = {
+ .set_clock = sdhci_arasan_am654_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_arasan_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_pltfm_data sdhci_arasan_am654_pdata = {
+ .ops = &sdhci_arasan_am654_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
+ SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
+};
+
+static const struct sdhci_arasan_of_data sdhci_arasan_am654_data = {
+ .pdata = &sdhci_arasan_am654_pdata,
+};
+
static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask)
{
int cmd_error = 0;
@@ -363,6 +414,11 @@ static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
+static struct sdhci_arasan_of_data sdhci_arasan_rk3399_data = {
+ .soc_ctl_map = &rk3399_soc_ctl_map,
+ .pdata = &sdhci_arasan_cqe_pdata,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -462,14 +518,25 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
/* SoC-specific compatible strings w/ soc_ctl_map */
{
.compatible = "rockchip,rk3399-sdhci-5.1",
- .data = &rk3399_soc_ctl_map,
+ .data = &sdhci_arasan_rk3399_data,
+ },
+ {
+ .compatible = "ti,am654-sdhci-5.1",
+ .data = &sdhci_arasan_am654_data,
},
-
/* Generic compatible below here */
- { .compatible = "arasan,sdhci-8.9a" },
- { .compatible = "arasan,sdhci-5.1" },
- { .compatible = "arasan,sdhci-4.9a" },
-
+ {
+ .compatible = "arasan,sdhci-8.9a",
+ .data = &sdhci_arasan_data,
+ },
+ {
+ .compatible = "arasan,sdhci-5.1",
+ .data = &sdhci_arasan_data,
+ },
+ {
+ .compatible = "arasan,sdhci-4.9a",
+ .data = &sdhci_arasan_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
@@ -707,14 +774,11 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_arasan_data *sdhci_arasan;
struct device_node *np = pdev->dev.of_node;
- const struct sdhci_pltfm_data *pdata;
-
- if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-5.1"))
- pdata = &sdhci_arasan_cqe_pdata;
- else
- pdata = &sdhci_arasan_pdata;
+ const struct sdhci_arasan_of_data *data;
- host = sdhci_pltfm_init(pdev, pdata, sizeof(*sdhci_arasan));
+ match = of_match_node(sdhci_arasan_of_match, pdev->dev.of_node);
+ data = match->data;
+ host = sdhci_pltfm_init(pdev, data->pdata, sizeof(*sdhci_arasan));
if (IS_ERR(host))
return PTR_ERR(host);
@@ -723,8 +787,7 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
sdhci_arasan->host = host;
- match = of_match_node(sdhci_arasan_of_match, pdev->dev.of_node);
- sdhci_arasan->soc_ctl_map = match->data;
+ sdhci_arasan->soc_ctl_map = data->soc_ctl_map;
node = of_parse_phandle(pdev->dev.of_node, "arasan,soc-ctl-syscon", 0);
if (node) {
@@ -788,7 +851,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
ret = mmc_of_parse(host->mmc);
if (ret) {
- dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
goto unreg_clk;
}
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 1b7cd144fb01..a5137845a1c7 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -8,21 +8,51 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/sizes.h>
#include "sdhci-pltfm.h"
+#define BOUNDARY_OK(addr, len) \
+ ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
+
struct dwcmshc_priv {
struct clk *bus_clk;
};
+/*
+ * If DMA addr spans 128MB boundary, we split the DMA transfer into two
+ * so that each DMA transfer doesn't exceed the boundary.
+ */
+static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
+ dma_addr_t addr, int len, unsigned int cmd)
+{
+ int tmplen, offset;
+
+ if (likely(!len || BOUNDARY_OK(addr, len))) {
+ sdhci_adma_write_desc(host, desc, addr, len, cmd);
+ return;
+ }
+
+ offset = addr & (SZ_128M - 1);
+ tmplen = SZ_128M - offset;
+ sdhci_adma_write_desc(host, desc, addr, tmplen, cmd);
+
+ addr += tmplen;
+ len -= tmplen;
+ sdhci_adma_write_desc(host, desc, addr, len, cmd);
+}
+
static const struct sdhci_ops sdhci_dwcmshc_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.reset = sdhci_reset,
+ .adma_write_desc = dwcmshc_adma_write_desc,
};
static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
@@ -36,12 +66,21 @@ static int dwcmshc_probe(struct platform_device *pdev)
struct sdhci_host *host;
struct dwcmshc_priv *priv;
int err;
+ u32 extra;
host = sdhci_pltfm_init(pdev, &sdhci_dwcmshc_pdata,
sizeof(struct dwcmshc_priv));
if (IS_ERR(host))
return PTR_ERR(host);
+ /*
+ * extra adma table cnt for cross 128M boundary handling.
+ */
+ extra = DIV_ROUND_UP_ULL(dma_get_required_mask(&pdev->dev), SZ_128M);
+ if (extra > SDHCI_MAX_SEGS)
+ extra = SDHCI_MAX_SEGS;
+ host->adma_table_cnt += extra;
+
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 9cb7554a463d..86fc9f022002 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -78,8 +78,10 @@ struct sdhci_esdhc {
u8 vendor_ver;
u8 spec_ver;
bool quirk_incorrect_hostver;
+ bool quirk_fixup_tuning;
unsigned int peripheral_clock;
const struct esdhc_clk_fixup *clk_fixup;
+ u32 div_ratio;
};
/**
@@ -580,6 +582,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
clock, host->max_clk / pre_div / div);
host->mmc->actual_clock = host->max_clk / pre_div / div;
+ esdhc->div_ratio = pre_div * div;
pre_div >>= 1;
div--;
@@ -712,9 +715,24 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
}
}
+static struct soc_device_attribute soc_fixup_tuning[] = {
+ { .family = "QorIQ T1040", .revision = "1.0", },
+ { .family = "QorIQ T2080", .revision = "1.0", },
+ { .family = "QorIQ T1023", .revision = "1.0", },
+ { .family = "QorIQ LS1021A", .revision = "1.0", },
+ { .family = "QorIQ LS1080A", .revision = "1.0", },
+ { .family = "QorIQ LS2080A", .revision = "1.0", },
+ { .family = "QorIQ LS1012A", .revision = "1.0", },
+ { .family = "QorIQ LS1043A", .revision = "1.*", },
+ { .family = "QorIQ LS1046A", .revision = "1.0", },
+ { },
+};
+
static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u32 val;
/* Use tuning block for tuning procedure */
@@ -728,7 +746,26 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
sdhci_writel(host, val, ESDHC_TBCTL);
esdhc_clock_enable(host, true);
- return sdhci_execute_tuning(mmc, opcode);
+ sdhci_execute_tuning(mmc, opcode);
+ if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) {
+
+ /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and
+ * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO
+ */
+ val = sdhci_readl(host, ESDHC_TBPTR);
+ val = (val & ~((0x7f << 8) | 0x7f)) |
+ (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8);
+ sdhci_writel(host, val, ESDHC_TBPTR);
+
+ /* program the software tuning mode by setting
+ * TBCTL[TB_MODE]=2'h3
+ */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val |= 0x3;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+ sdhci_execute_tuning(mmc, opcode);
+ }
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -903,6 +940,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
esdhc = sdhci_pltfm_priv(pltfm_host);
+ if (soc_device_match(soc_fixup_tuning))
+ esdhc->quirk_fixup_tuning = true;
+ else
+ esdhc->quirk_fixup_tuning = false;
+
if (esdhc->vendor_ver == VENDOR_V_22)
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 77e9bc4aaee9..cc3ffeffd7a2 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -490,6 +490,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
case PCI_DEVICE_ID_O2_SEABIRD0:
+ if (chip->pdev->revision == 0x01)
+ chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
+ /* fall through */
case PCI_DEVICE_ID_O2_SEABIRD1:
/* UnLock WP */
ret = pci_read_config_byte(chip->pdev,
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 02bea6159d79..b231c9a3f888 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -30,6 +30,7 @@
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/of.h>
#ifdef CONFIG_PPC
#include <asm/machdep.h>
@@ -51,11 +52,10 @@ static const struct sdhci_ops sdhci_pltfm_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
-#ifdef CONFIG_OF
-static bool sdhci_of_wp_inverted(struct device_node *np)
+static bool sdhci_wp_inverted(struct device *dev)
{
- if (of_get_property(np, "sdhci,wp-inverted", NULL) ||
- of_get_property(np, "wp-inverted", NULL))
+ if (device_property_present(dev, "sdhci,wp-inverted") ||
+ device_property_present(dev, "wp-inverted"))
return true;
/* Old device trees don't have the wp-inverted property. */
@@ -66,52 +66,64 @@ static bool sdhci_of_wp_inverted(struct device_node *np)
#endif /* CONFIG_PPC */
}
-void sdhci_get_of_property(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static void sdhci_get_compatibility(struct platform_device *pdev)
{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return;
+
+ if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc"))
+ host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
+
+ if (of_device_is_compatible(np, "fsl,p2020-esdhc") ||
+ of_device_is_compatible(np, "fsl,p1010-esdhc") ||
+ of_device_is_compatible(np, "fsl,t4240-esdhc") ||
+ of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+}
+#else
+void sdhci_get_compatibility(struct platform_device *pdev) {}
+#endif /* CONFIG_OF */
+
+void sdhci_get_property(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
u32 bus_width;
- if (of_get_property(np, "sdhci,auto-cmd12", NULL))
+ if (device_property_present(dev, "sdhci,auto-cmd12"))
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
- if (of_get_property(np, "sdhci,1-bit-only", NULL) ||
- (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
+ if (device_property_present(dev, "sdhci,1-bit-only") ||
+ (device_property_read_u32(dev, "bus-width", &bus_width) == 0 &&
bus_width == 1))
host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
- if (sdhci_of_wp_inverted(np))
+ if (sdhci_wp_inverted(dev))
host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
- if (of_get_property(np, "broken-cd", NULL))
+ if (device_property_present(dev, "broken-cd"))
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
- if (of_get_property(np, "no-1-8-v", NULL))
+ if (device_property_present(dev, "no-1-8-v"))
host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
- if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc"))
- host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
-
- if (of_device_is_compatible(np, "fsl,p2020-esdhc") ||
- of_device_is_compatible(np, "fsl,p1010-esdhc") ||
- of_device_is_compatible(np, "fsl,t4240-esdhc") ||
- of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
- host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ sdhci_get_compatibility(pdev);
- of_property_read_u32(np, "clock-frequency", &pltfm_host->clock);
+ device_property_read_u32(dev, "clock-frequency", &pltfm_host->clock);
- if (of_find_property(np, "keep-power-in-suspend", NULL))
+ if (device_property_present(dev, "keep-power-in-suspend"))
host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
- if (of_property_read_bool(np, "wakeup-source") ||
- of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
+ if (device_property_read_bool(dev, "wakeup-source") ||
+ device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
}
-#else
-void sdhci_get_of_property(struct platform_device *pdev) {}
-#endif /* CONFIG_OF */
-EXPORT_SYMBOL_GPL(sdhci_get_of_property);
+EXPORT_SYMBOL_GPL(sdhci_get_property);
struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
const struct sdhci_pltfm_data *pdata,
@@ -184,7 +196,7 @@ int sdhci_pltfm_register(struct platform_device *pdev,
if (IS_ERR(host))
return PTR_ERR(host);
- sdhci_get_of_property(pdev);
+ sdhci_get_property(pdev);
ret = sdhci_add_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 1e91fb1c020e..6109987fc3b5 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -90,7 +90,12 @@ static inline void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
}
#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
-extern void sdhci_get_of_property(struct platform_device *pdev);
+void sdhci_get_property(struct platform_device *pdev);
+
+static inline void sdhci_get_of_property(struct platform_device *pdev)
+{
+ return sdhci_get_property(pdev);
+}
extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
const struct sdhci_pltfm_data *pdata,
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index b8e96f392428..1783e29eae04 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -21,17 +21,14 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/slot-gpio.h>
#include <linux/platform_data/pxa_sdhci.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/mbus.h>
@@ -452,16 +449,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
host->mmc->caps2 |= pdata->host_caps2;
if (pdata->pm_caps)
host->mmc->pm_caps |= pdata->pm_caps;
-
- if (gpio_is_valid(pdata->ext_cd_gpio)) {
- ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio,
- 0);
- if (ret) {
- dev_err(mmc_dev(host->mmc),
- "failed to allocate card detect gpio\n");
- goto err_cd_req;
- }
- }
}
pm_runtime_get_noresume(&pdev->dev);
@@ -486,7 +473,6 @@ err_add_host:
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
err_of_parse:
-err_cd_req:
err_mbus_win:
clk_disable_unprepare(pxa->clk_io);
clk_disable_unprepare(pxa->clk_core);
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 391d52b467ca..5eada6f87e60 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -11,7 +11,6 @@
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/mmc/slot-gpio.h>
#include "sdhci-pltfm.h"
@@ -19,10 +18,6 @@
#define SDHCI_SIRF_8BITBUS BIT(3)
#define SIRF_TUNING_COUNT 16384
-struct sdhci_sirf_priv {
- int gpio_cd;
-};
-
static void sdhci_sirf_set_bus_width(struct sdhci_host *host, int width)
{
u8 ctrl;
@@ -170,9 +165,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
- struct sdhci_sirf_priv *priv;
struct clk *clk;
- int gpio_cd;
int ret;
clk = devm_clk_get(&pdev->dev, NULL);
@@ -181,19 +174,12 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
return PTR_ERR(clk);
}
- if (pdev->dev.of_node)
- gpio_cd = of_get_named_gpio(pdev->dev.of_node, "cd-gpios", 0);
- else
- gpio_cd = -EINVAL;
-
- host = sdhci_pltfm_init(pdev, &sdhci_sirf_pdata, sizeof(struct sdhci_sirf_priv));
+ host = sdhci_pltfm_init(pdev, &sdhci_sirf_pdata, 0);
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
pltfm_host->clk = clk;
- priv = sdhci_pltfm_priv(pltfm_host);
- priv->gpio_cd = gpio_cd;
sdhci_get_of_property(pdev);
@@ -209,15 +195,11 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
* We must request the IRQ after sdhci_add_host(), as the tasklet only
* gets setup in sdhci_add_host() and we oops.
*/
- if (gpio_is_valid(priv->gpio_cd)) {
- ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd, 0);
- if (ret) {
- dev_err(&pdev->dev, "card detect irq request failed: %d\n",
- ret);
- goto err_request_cd;
- }
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ if (ret == -EPROBE_DEFER)
+ goto err_request_cd;
+ if (!ret)
mmc_gpiod_request_cd_irq(host->mmc);
- }
return 0;
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 9247d51f2eed..916b5b09c3d1 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -15,13 +15,11 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -32,7 +30,6 @@
struct spear_sdhci {
struct clk *clk;
- int card_int_gpio;
};
/* sdhci ops */
@@ -43,18 +40,6 @@ static const struct sdhci_ops sdhci_pltfm_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
-static void sdhci_probe_config_dt(struct device_node *np,
- struct spear_sdhci *host)
-{
- int cd_gpio;
-
- cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
- if (!gpio_is_valid(cd_gpio))
- cd_gpio = -1;
-
- host->card_int_gpio = cd_gpio;
-}
-
static int sdhci_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
@@ -109,21 +94,13 @@ static int sdhci_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Error setting desired clk, clk=%lu\n",
clk_get_rate(sdhci->clk));
- sdhci_probe_config_dt(pdev->dev.of_node, sdhci);
/*
- * It is optional to use GPIOs for sdhci card detection. If
- * sdhci->card_int_gpio < 0, then use original sdhci lines otherwise
- * GPIO lines. We use the built-in GPIO support for this.
+ * It is optional to use GPIOs for sdhci card detection. If we
+ * find a descriptor using slot GPIO, we use it.
*/
- if (sdhci->card_int_gpio >= 0) {
- ret = mmc_gpio_request_cd(host->mmc, sdhci->card_int_gpio, 0);
- if (ret < 0) {
- dev_dbg(&pdev->dev,
- "failed to request card-detect gpio%d\n",
- sdhci->card_int_gpio);
- goto disable_clk;
- }
- }
+ ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL);
+ if (ret == -EPROBE_DEFER)
+ goto disable_clk;
ret = sdhci_add_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
new file mode 100644
index 000000000000..9a822e2e9f0b
--- /dev/null
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Secure Digital Host Controller
+//
+// Copyright (C) 2018 Spreadtrum, Inc.
+// Author: Chunyan Zhang <chunyan.zhang@unisoc.com>
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "sdhci-pltfm.h"
+
+/* SDHCI_ARGUMENT2 register high 16bit */
+#define SDHCI_SPRD_ARG2_STUFF GENMASK(31, 16)
+
+#define SDHCI_SPRD_REG_32_DLL_DLY_OFFSET 0x208
+#define SDHCIBSPRD_IT_WR_DLY_INV BIT(5)
+#define SDHCI_SPRD_BIT_CMD_DLY_INV BIT(13)
+#define SDHCI_SPRD_BIT_POSRD_DLY_INV BIT(21)
+#define SDHCI_SPRD_BIT_NEGRD_DLY_INV BIT(29)
+
+#define SDHCI_SPRD_REG_32_BUSY_POSI 0x250
+#define SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN BIT(25)
+#define SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN BIT(24)
+
+#define SDHCI_SPRD_REG_DEBOUNCE 0x28C
+#define SDHCI_SPRD_BIT_DLL_BAK BIT(0)
+#define SDHCI_SPRD_BIT_DLL_VAL BIT(1)
+
+#define SDHCI_SPRD_INT_SIGNAL_MASK 0x1B7F410B
+
+/* SDHCI_HOST_CONTROL2 */
+#define SDHCI_SPRD_CTRL_HS200 0x0005
+#define SDHCI_SPRD_CTRL_HS400 0x0006
+
+/*
+ * According to the standard specification, BIT(3) of SDHCI_SOFTWARE_RESET is
+ * reserved, and only used on Spreadtrum's design, the hardware cannot work
+ * if this bit is cleared.
+ * 1 : normal work
+ * 0 : hardware reset
+ */
+#define SDHCI_HW_RESET_CARD BIT(3)
+
+#define SDHCI_SPRD_MAX_CUR 0xFFFFFF
+#define SDHCI_SPRD_CLK_MAX_DIV 1023
+
+#define SDHCI_SPRD_CLK_DEF_RATE 26000000
+
+struct sdhci_sprd_host {
+ u32 version;
+ struct clk *clk_sdio;
+ struct clk *clk_enable;
+ u32 base_rate;
+ int flags; /* backup of host attribute */
+};
+
+#define TO_SPRD_HOST(host) sdhci_pltfm_priv(sdhci_priv(host))
+
+static void sdhci_sprd_init_config(struct sdhci_host *host)
+{
+ u16 val;
+
+ /* set dll backup mode */
+ val = sdhci_readl(host, SDHCI_SPRD_REG_DEBOUNCE);
+ val |= SDHCI_SPRD_BIT_DLL_BAK | SDHCI_SPRD_BIT_DLL_VAL;
+ sdhci_writel(host, val, SDHCI_SPRD_REG_DEBOUNCE);
+}
+
+static inline u32 sdhci_sprd_readl(struct sdhci_host *host, int reg)
+{
+ if (unlikely(reg == SDHCI_MAX_CURRENT))
+ return SDHCI_SPRD_MAX_CUR;
+
+ return readl_relaxed(host->ioaddr + reg);
+}
+
+static inline void sdhci_sprd_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ /* SDHCI_MAX_CURRENT is reserved on Spreadtrum's platform */
+ if (unlikely(reg == SDHCI_MAX_CURRENT))
+ return;
+
+ if (unlikely(reg == SDHCI_SIGNAL_ENABLE || reg == SDHCI_INT_ENABLE))
+ val = val & SDHCI_SPRD_INT_SIGNAL_MASK;
+
+ writel_relaxed(val, host->ioaddr + reg);
+}
+
+static inline void sdhci_sprd_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ /* SDHCI_BLOCK_COUNT is Read Only on Spreadtrum's platform */
+ if (unlikely(reg == SDHCI_BLOCK_COUNT))
+ return;
+
+ writew_relaxed(val, host->ioaddr + reg);
+}
+
+static inline void sdhci_sprd_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ /*
+ * Since BIT(3) of SDHCI_SOFTWARE_RESET is reserved according to the
+ * standard specification, sdhci_reset() write this register directly
+ * without checking other reserved bits, that will clear BIT(3) which
+ * is defined as hardware reset on Spreadtrum's platform and clearing
+ * it by mistake will lead the card not work. So here we need to work
+ * around it.
+ */
+ if (unlikely(reg == SDHCI_SOFTWARE_RESET)) {
+ if (readb_relaxed(host->ioaddr + reg) & SDHCI_HW_RESET_CARD)
+ val |= SDHCI_HW_RESET_CARD;
+ }
+
+ writeb_relaxed(val, host->ioaddr + reg);
+}
+
+static inline void sdhci_sprd_sd_clk_off(struct sdhci_host *host)
+{
+ u16 ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+
+ ctrl &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
+}
+
+static inline void
+sdhci_sprd_set_dll_invert(struct sdhci_host *host, u32 mask, bool en)
+{
+ u32 dll_dly_offset;
+
+ dll_dly_offset = sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_DLY_OFFSET);
+ if (en)
+ dll_dly_offset |= mask;
+ else
+ dll_dly_offset &= ~mask;
+ sdhci_writel(host, dll_dly_offset, SDHCI_SPRD_REG_32_DLL_DLY_OFFSET);
+}
+
+static inline u32 sdhci_sprd_calc_div(u32 base_clk, u32 clk)
+{
+ u32 div;
+
+ /* select 2x clock source */
+ if (base_clk <= clk * 2)
+ return 0;
+
+ div = (u32) (base_clk / (clk * 2));
+
+ if ((base_clk / div) > (clk * 2))
+ div++;
+
+ if (div > SDHCI_SPRD_CLK_MAX_DIV)
+ div = SDHCI_SPRD_CLK_MAX_DIV;
+
+ if (div % 2)
+ div = (div + 1) / 2;
+ else
+ div = div / 2;
+
+ return div;
+}
+
+static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
+ unsigned int clk)
+{
+ struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+ u32 div, val, mask;
+
+ div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
+
+ clk |= ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
+ sdhci_enable_clk(host, clk);
+
+ /* enable auto gate sdhc_enable_auto_gate */
+ val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
+ mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN |
+ SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
+ if (mask != (val & mask)) {
+ val |= mask;
+ sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
+ }
+}
+
+static void sdhci_sprd_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ bool en = false;
+
+ if (clock == 0) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ } else if (clock != host->clock) {
+ sdhci_sprd_sd_clk_off(host);
+ _sdhci_sprd_set_clock(host, clock);
+
+ if (clock <= 400000)
+ en = true;
+ sdhci_sprd_set_dll_invert(host, SDHCI_SPRD_BIT_CMD_DLY_INV |
+ SDHCI_SPRD_BIT_POSRD_DLY_INV, en);
+ } else {
+ _sdhci_sprd_set_clock(host, clock);
+ }
+}
+
+static unsigned int sdhci_sprd_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+
+ return clk_round_rate(sprd_host->clk_sdio, ULONG_MAX);
+}
+
+static unsigned int sdhci_sprd_get_min_clock(struct sdhci_host *host)
+{
+ return 400000;
+}
+
+static void sdhci_sprd_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ u16 ctrl_2;
+
+ if (timing == host->timing)
+ return;
+
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ switch (timing) {
+ case MMC_TIMING_UHS_SDR12:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ break;
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ ctrl_2 |= SDHCI_SPRD_CTRL_HS200;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ ctrl_2 |= SDHCI_SPRD_CTRL_HS400;
+ break;
+ default:
+ break;
+ }
+
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_sprd_hw_reset(struct sdhci_host *host)
+{
+ int val;
+
+ /*
+ * Note: don't use sdhci_writeb() API here since it is redirected to
+ * sdhci_sprd_writeb() in which we have a workaround for
+ * SDHCI_SOFTWARE_RESET which would make bit SDHCI_HW_RESET_CARD can
+ * not be cleared.
+ */
+ val = readb_relaxed(host->ioaddr + SDHCI_SOFTWARE_RESET);
+ val &= ~SDHCI_HW_RESET_CARD;
+ writeb_relaxed(val, host->ioaddr + SDHCI_SOFTWARE_RESET);
+ /* wait for 10 us */
+ usleep_range(10, 20);
+
+ val |= SDHCI_HW_RESET_CARD;
+ writeb_relaxed(val, host->ioaddr + SDHCI_SOFTWARE_RESET);
+ usleep_range(300, 500);
+}
+
+static struct sdhci_ops sdhci_sprd_ops = {
+ .read_l = sdhci_sprd_readl,
+ .write_l = sdhci_sprd_writel,
+ .write_b = sdhci_sprd_writeb,
+ .set_clock = sdhci_sprd_set_clock,
+ .get_max_clock = sdhci_sprd_get_max_clock,
+ .get_min_clock = sdhci_sprd_get_min_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_sprd_set_uhs_signaling,
+ .hw_reset = sdhci_sprd_hw_reset,
+};
+
+static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+
+ host->flags |= sprd_host->flags & SDHCI_AUTO_CMD23;
+
+ /*
+ * From version 4.10 onward, ARGUMENT2 register is also as 32-bit
+ * block count register which doesn't support stuff bits of
+ * CMD23 argument on Spreadtrum's sd host controller.
+ */
+ if (host->version >= SDHCI_SPEC_410 &&
+ mrq->sbc && (mrq->sbc->arg & SDHCI_SPRD_ARG2_STUFF) &&
+ (host->flags & SDHCI_AUTO_CMD23))
+ host->flags &= ~SDHCI_AUTO_CMD23;
+
+ sdhci_request(mmc, mrq);
+}
+
+static const struct sdhci_pltfm_data sdhci_sprd_pdata = {
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
+ SDHCI_QUIRK2_USE_32BIT_BLK_CNT,
+ .ops = &sdhci_sprd_ops,
+};
+
+static int sdhci_sprd_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_sprd_host *sprd_host;
+ struct clk *clk;
+ int ret = 0;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_sprd_pdata, sizeof(*sprd_host));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ host->dma_mask = DMA_BIT_MASK(64);
+ pdev->dev.dma_mask = &host->dma_mask;
+ host->mmc_host_ops.request = sdhci_sprd_request;
+
+ host->mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_ERASE | MMC_CAP_CMD23;
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto pltfm_free;
+
+ sprd_host = TO_SPRD_HOST(host);
+
+ clk = devm_clk_get(&pdev->dev, "sdio");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto pltfm_free;
+ }
+ sprd_host->clk_sdio = clk;
+ sprd_host->base_rate = clk_get_rate(sprd_host->clk_sdio);
+ if (!sprd_host->base_rate)
+ sprd_host->base_rate = SDHCI_SPRD_CLK_DEF_RATE;
+
+ clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto pltfm_free;
+ }
+ sprd_host->clk_enable = clk;
+
+ ret = clk_prepare_enable(sprd_host->clk_sdio);
+ if (ret)
+ goto pltfm_free;
+
+ clk_prepare_enable(sprd_host->clk_enable);
+ if (ret)
+ goto clk_disable;
+
+ sdhci_sprd_init_config(host);
+ host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
+ sprd_host->version = ((host->version & SDHCI_VENDOR_VER_MASK) >>
+ SDHCI_VENDOR_VER_SHIFT);
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_suspend_ignore_children(&pdev->dev, 1);
+
+ sdhci_enable_v4_mode(host);
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ goto pm_runtime_disable;
+
+ sprd_host->flags = host->flags;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto err_cleanup_host;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+err_cleanup_host:
+ sdhci_cleanup_host(host);
+
+pm_runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ clk_disable_unprepare(sprd_host->clk_enable);
+
+clk_disable:
+ clk_disable_unprepare(sprd_host->clk_sdio);
+
+pltfm_free:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_sprd_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_remove_host(mmc);
+ clk_disable_unprepare(sprd_host->clk_sdio);
+ clk_disable_unprepare(sprd_host->clk_enable);
+
+ mmc_free_host(mmc);
+
+ return 0;
+}
+
+static const struct of_device_id sdhci_sprd_of_match[] = {
+ { .compatible = "sprd,sdhci-r11", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_sprd_of_match);
+
+#ifdef CONFIG_PM
+static int sdhci_sprd_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+
+ sdhci_runtime_suspend_host(host);
+
+ clk_disable_unprepare(sprd_host->clk_sdio);
+ clk_disable_unprepare(sprd_host->clk_enable);
+
+ return 0;
+}
+
+static int sdhci_sprd_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+ int ret;
+
+ ret = clk_prepare_enable(sprd_host->clk_enable);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(sprd_host->clk_sdio);
+ if (ret) {
+ clk_disable_unprepare(sprd_host->clk_enable);
+ return ret;
+ }
+
+ sdhci_runtime_resume_host(host);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops sdhci_sprd_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(sdhci_sprd_runtime_suspend,
+ sdhci_sprd_runtime_resume, NULL)
+};
+
+static struct platform_driver sdhci_sprd_driver = {
+ .probe = sdhci_sprd_probe,
+ .remove = sdhci_sprd_remove,
+ .driver = {
+ .name = "sdhci_sprd_r11",
+ .of_match_table = of_match_ptr(sdhci_sprd_of_match),
+ .pm = &sdhci_sprd_pm_ops,
+ },
+};
+module_platform_driver(sdhci_sprd_driver);
+
+MODULE_DESCRIPTION("Spreadtrum sdio host controller r11 driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sdhci-sprd-r11");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 908b23e6a03c..7b95d088fdef 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -16,17 +16,21 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/gpio/consumer.h>
+#include <linux/ktime.h>
#include "sdhci-pltfm.h"
@@ -34,40 +38,96 @@
#define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100
#define SDHCI_CLOCK_CTRL_TAP_MASK 0x00ff0000
#define SDHCI_CLOCK_CTRL_TAP_SHIFT 16
+#define SDHCI_CLOCK_CTRL_TRIM_MASK 0x1f000000
+#define SDHCI_CLOCK_CTRL_TRIM_SHIFT 24
#define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE BIT(5)
#define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE BIT(3)
#define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE BIT(2)
-#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
-#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
-#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
-#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
-#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200
+#define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL 0x104
+#define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE BIT(31)
-#define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4
-#define SDHCI_AUTO_CAL_START BIT(31)
-#define SDHCI_AUTO_CAL_ENABLE BIT(29)
+#define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES 0x10c
+#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK 0x00003f00
+#define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT 8
-#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
-#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
-#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
-#define NVQUIRK_ENABLE_SDR50 BIT(3)
-#define NVQUIRK_ENABLE_SDR104 BIT(4)
-#define NVQUIRK_ENABLE_DDR50 BIT(5)
-#define NVQUIRK_HAS_PADCALIB BIT(6)
+#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
+#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8
+#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10
+#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
+#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200
+
+#define SDHCI_TEGRA_VENDOR_DLLCAL_CFG 0x1b0
+#define SDHCI_TEGRA_DLLCAL_CALIBRATE BIT(31)
+
+#define SDHCI_TEGRA_VENDOR_DLLCAL_STA 0x1bc
+#define SDHCI_TEGRA_DLLCAL_STA_ACTIVE BIT(31)
+
+#define SDHCI_VNDR_TUN_CTRL0_0 0x1c0
+#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000
+
+#define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4
+#define SDHCI_AUTO_CAL_START BIT(31)
+#define SDHCI_AUTO_CAL_ENABLE BIT(29)
+#define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK 0x0000ffff
+
+#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL 0x1e0
+#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f
+#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7
+#define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31)
+
+#define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec
+#define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31)
+
+#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
+#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
+#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
+#define NVQUIRK_ENABLE_SDR50 BIT(3)
+#define NVQUIRK_ENABLE_SDR104 BIT(4)
+#define NVQUIRK_ENABLE_DDR50 BIT(5)
+#define NVQUIRK_HAS_PADCALIB BIT(6)
+#define NVQUIRK_NEEDS_PAD_CONTROL BIT(7)
+#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
struct sdhci_tegra_soc_data {
const struct sdhci_pltfm_data *pdata;
u32 nvquirks;
};
+/* Magic pull up and pull down pad calibration offsets */
+struct sdhci_tegra_autocal_offsets {
+ u32 pull_up_3v3;
+ u32 pull_down_3v3;
+ u32 pull_up_3v3_timeout;
+ u32 pull_down_3v3_timeout;
+ u32 pull_up_1v8;
+ u32 pull_down_1v8;
+ u32 pull_up_1v8_timeout;
+ u32 pull_down_1v8_timeout;
+ u32 pull_up_sdr104;
+ u32 pull_down_sdr104;
+ u32 pull_up_hs400;
+ u32 pull_down_hs400;
+};
+
struct sdhci_tegra {
const struct sdhci_tegra_soc_data *soc_data;
struct gpio_desc *power_gpio;
bool ddr_signaling;
bool pad_calib_required;
+ bool pad_control_available;
struct reset_control *rst;
+ struct pinctrl *pinctrl_sdmmc;
+ struct pinctrl_state *pinctrl_state_3v3;
+ struct pinctrl_state *pinctrl_state_1v8;
+
+ struct sdhci_tegra_autocal_offsets autocal_offsets;
+ ktime_t last_calib;
+
+ u32 default_tap;
+ u32 default_trim;
+ u32 dqs_trim;
};
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
@@ -133,23 +193,149 @@ static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
}
}
+static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
+{
+ bool status;
+ u32 reg;
+
+ reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ status = !!(reg & SDHCI_CLOCK_CARD_EN);
+
+ if (status == enable)
+ return status;
+
+ if (enable)
+ reg |= SDHCI_CLOCK_CARD_EN;
+ else
+ reg &= ~SDHCI_CLOCK_CARD_EN;
+
+ sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
+
+ return status;
+}
+
+static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ bool is_tuning_cmd = 0;
+ bool clk_enabled;
+ u8 cmd;
+
+ if (reg == SDHCI_COMMAND) {
+ cmd = SDHCI_GET_CMD(val);
+ is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
+ cmd == MMC_SEND_TUNING_BLOCK_HS200;
+ }
+
+ if (is_tuning_cmd)
+ clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
+
+ writew(val, host->ioaddr + reg);
+
+ if (is_tuning_cmd) {
+ udelay(1);
+ tegra_sdhci_configure_card_clk(host, clk_enabled);
+ }
+}
+
static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
{
return mmc_gpio_get_ro(host->mmc);
}
+static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ int has_1v8, has_3v3;
+
+ /*
+ * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
+ * voltage configuration in order to perform voltage switching. This
+ * means that valid pinctrl info is required on SDHCI instances capable
+ * of performing voltage switching. Whether or not an SDHCI instance is
+ * capable of voltage switching is determined based on the regulator.
+ */
+
+ if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
+ return true;
+
+ if (IS_ERR(host->mmc->supply.vqmmc))
+ return false;
+
+ has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
+ 1700000, 1950000);
+
+ has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
+ 2700000, 3600000);
+
+ if (has_1v8 == 1 && has_3v3 == 1)
+ return tegra_host->pad_control_available;
+
+ /* Fixed voltage, no pad control required. */
+ return true;
+}
+
+static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+ bool card_clk_enabled = false;
+ u32 reg;
+
+ /*
+ * Touching the tap values is a bit tricky on some SoC generations.
+ * The quirk enables a workaround for a glitch that sometimes occurs if
+ * the tap values are changed.
+ */
+
+ if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
+ card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
+
+ reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
+ reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
+ reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
+ sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
+
+ if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
+ card_clk_enabled) {
+ udelay(1);
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ tegra_sdhci_configure_card_clk(host, card_clk_enabled);
+ }
+}
+
+static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 val;
+
+ val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
+
+ if (ios->enhanced_strobe)
+ val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
+ else
+ val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
+
+ sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
+
+}
+
static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
- u32 misc_ctrl, clk_ctrl;
+ u32 misc_ctrl, clk_ctrl, pad_ctrl;
sdhci_reset(host, mask);
if (!(mask & SDHCI_RESET_ALL))
return;
+ tegra_sdhci_set_tap(host, tegra_host->default_tap);
+
misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
@@ -158,15 +344,10 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
SDHCI_MISC_CTRL_ENABLE_DDR50 |
SDHCI_MISC_CTRL_ENABLE_SDR104);
- clk_ctrl &= ~SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE;
+ clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
+ SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
- /*
- * If the board does not define a regulator for the SDHCI
- * IO voltage, then don't advertise support for UHS modes
- * even if the device supports it because the IO voltage
- * cannot be configured.
- */
- if (!IS_ERR(host->mmc->supply.vqmmc)) {
+ if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
/* Erratum: Enable SDHCI spec v3.00 support */
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
@@ -181,24 +362,237 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
}
+ clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
+
sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
- if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
+ if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
+ pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
+ pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
+ pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
+ sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
+
tegra_host->pad_calib_required = true;
+ }
tegra_host->ddr_signaling = false;
}
-static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
+static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
{
u32 val;
- mdelay(1);
+ /*
+ * Enable or disable the additional I/O pad used by the drive strength
+ * calibration process.
+ */
+ val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
+
+ if (enable)
+ val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
+ else
+ val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
+
+ sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
+
+ if (enable)
+ usleep_range(1, 2);
+}
+
+static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
+ u16 pdpu)
+{
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
+ reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
+ reg |= pdpu;
+ sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
+}
+
+static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_tegra_autocal_offsets offsets =
+ tegra_host->autocal_offsets;
+ struct mmc_ios *ios = &host->mmc->ios;
+ bool card_clk_enabled;
+ u16 pdpu;
+ u32 reg;
+ int ret;
+
+ switch (ios->timing) {
+ case MMC_TIMING_UHS_SDR104:
+ pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
+ break;
+ case MMC_TIMING_MMC_HS400:
+ pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
+ break;
+ default:
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
+ pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
+ else
+ pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
+ }
+
+ tegra_sdhci_set_pad_autocal_offset(host, pdpu);
+
+ card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
+
+ tegra_sdhci_configure_cal_pad(host, true);
+
+ reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
+ reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
+ sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
+
+ usleep_range(1, 2);
+ /* 10 ms timeout */
+ ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
+ reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
+ 1000, 10000);
+
+ tegra_sdhci_configure_cal_pad(host, false);
+
+ tegra_sdhci_configure_card_clk(host, card_clk_enabled);
+
+ if (ret) {
+ dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
+ pdpu = offsets.pull_down_1v8_timeout << 8 |
+ offsets.pull_up_1v8_timeout;
+ else
+ pdpu = offsets.pull_down_3v3_timeout << 8 |
+ offsets.pull_up_3v3_timeout;
+
+ /* Disable automatic calibration and use fixed offsets */
+ reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
+ reg &= ~SDHCI_AUTO_CAL_ENABLE;
+ sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
- val = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
- val |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
- sdhci_writel(host,val, SDHCI_TEGRA_AUTO_CAL_CONFIG);
+ tegra_sdhci_set_pad_autocal_offset(host, pdpu);
+ }
+}
+
+static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_tegra_autocal_offsets *autocal =
+ &tegra_host->autocal_offsets;
+ int err;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-up-offset-3v3",
+ &autocal->pull_up_3v3);
+ if (err)
+ autocal->pull_up_3v3 = 0;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-down-offset-3v3",
+ &autocal->pull_down_3v3);
+ if (err)
+ autocal->pull_down_3v3 = 0;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-up-offset-1v8",
+ &autocal->pull_up_1v8);
+ if (err)
+ autocal->pull_up_1v8 = 0;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-down-offset-1v8",
+ &autocal->pull_down_1v8);
+ if (err)
+ autocal->pull_down_1v8 = 0;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-up-offset-3v3-timeout",
+ &autocal->pull_up_3v3);
+ if (err)
+ autocal->pull_up_3v3_timeout = 0;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-down-offset-3v3-timeout",
+ &autocal->pull_down_3v3);
+ if (err)
+ autocal->pull_down_3v3_timeout = 0;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-up-offset-1v8-timeout",
+ &autocal->pull_up_1v8);
+ if (err)
+ autocal->pull_up_1v8_timeout = 0;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-down-offset-1v8-timeout",
+ &autocal->pull_down_1v8);
+ if (err)
+ autocal->pull_down_1v8_timeout = 0;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-up-offset-sdr104",
+ &autocal->pull_up_sdr104);
+ if (err)
+ autocal->pull_up_sdr104 = autocal->pull_up_1v8;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-down-offset-sdr104",
+ &autocal->pull_down_sdr104);
+ if (err)
+ autocal->pull_down_sdr104 = autocal->pull_down_1v8;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-up-offset-hs400",
+ &autocal->pull_up_hs400);
+ if (err)
+ autocal->pull_up_hs400 = autocal->pull_up_1v8;
+
+ err = device_property_read_u32(host->mmc->parent,
+ "nvidia,pad-autocal-pull-down-offset-hs400",
+ &autocal->pull_down_hs400);
+ if (err)
+ autocal->pull_down_hs400 = autocal->pull_down_1v8;
+}
+
+static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
+
+ /* 100 ms calibration interval is specified in the TRM */
+ if (ktime_to_ms(since_calib) > 100) {
+ tegra_sdhci_pad_autocalib(host);
+ tegra_host->last_calib = ktime_get();
+ }
+
+ sdhci_request(mmc, mrq);
+}
+
+static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ int err;
+
+ err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap",
+ &tegra_host->default_tap);
+ if (err)
+ tegra_host->default_tap = 0;
+
+ err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim",
+ &tegra_host->default_trim);
+ if (err)
+ tegra_host->default_trim = 0;
+
+ err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim",
+ &tegra_host->dqs_trim);
+ if (err)
+ tegra_host->dqs_trim = 0x11;
}
static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
@@ -237,34 +631,82 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
}
}
-static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
- unsigned timing)
+static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
- if (timing == MMC_TIMING_UHS_DDR50 ||
- timing == MMC_TIMING_MMC_DDR52)
- tegra_host->ddr_signaling = true;
-
- sdhci_set_uhs_signaling(host, timing);
+ return clk_round_rate(pltfm_host->clk, UINT_MAX);
}
-static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
+static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ u32 val;
- return clk_round_rate(pltfm_host->clk, UINT_MAX);
+ val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
+ val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
+ val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
+ sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
}
-static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
+static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
{
u32 reg;
+ int err;
+
+ reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
+ reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
+ sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
+
+ /* 1 ms sleep, 5 ms timeout */
+ err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
+ reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
+ 1000, 5000);
+ if (err)
+ dev_err(mmc_dev(host->mmc),
+ "HS400 delay line calibration timed out\n");
+}
- reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
- reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
- reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
- sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
+static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
+ unsigned timing)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ bool set_default_tap = false;
+ bool set_dqs_trim = false;
+ bool do_hs400_dll_cal = false;
+
+ switch (timing) {
+ case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* Don't set default tap on tunable modes. */
+ break;
+ case MMC_TIMING_MMC_HS400:
+ set_dqs_trim = true;
+ do_hs400_dll_cal = true;
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ case MMC_TIMING_UHS_DDR50:
+ tegra_host->ddr_signaling = true;
+ set_default_tap = true;
+ break;
+ default:
+ set_default_tap = true;
+ break;
+ }
+
+ sdhci_set_uhs_signaling(host, timing);
+
+ tegra_sdhci_pad_autocalib(host);
+
+ if (set_default_tap)
+ tegra_sdhci_set_tap(host, tegra_host->default_tap);
+
+ if (set_dqs_trim)
+ tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
+
+ if (do_hs400_dll_cal)
+ tegra_sdhci_hs400_dll_cal(host);
}
static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
@@ -301,6 +743,89 @@ static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
return mmc_send_tuning(host->mmc, opcode, NULL);
}
+static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ if (!tegra_host->pad_control_available)
+ return 0;
+
+ if (voltage == MMC_SIGNAL_VOLTAGE_180) {
+ ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
+ tegra_host->pinctrl_state_1v8);
+ if (ret < 0)
+ dev_err(mmc_dev(host->mmc),
+ "setting 1.8V failed, ret: %d\n", ret);
+ } else {
+ ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
+ tegra_host->pinctrl_state_3v3);
+ if (ret < 0)
+ dev_err(mmc_dev(host->mmc),
+ "setting 3.3V failed, ret: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ int ret = 0;
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage);
+ if (ret < 0)
+ return ret;
+ ret = sdhci_start_signal_voltage_switch(mmc, ios);
+ } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ ret = sdhci_start_signal_voltage_switch(mmc, ios);
+ if (ret < 0)
+ return ret;
+ ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage);
+ }
+
+ if (tegra_host->pad_calib_required)
+ tegra_sdhci_pad_autocalib(host);
+
+ return ret;
+}
+
+static int tegra_sdhci_init_pinctrl_info(struct device *dev,
+ struct sdhci_tegra *tegra_host)
+{
+ tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
+ if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
+ dev_dbg(dev, "No pinctrl info, err: %ld\n",
+ PTR_ERR(tegra_host->pinctrl_sdmmc));
+ return -1;
+ }
+
+ tegra_host->pinctrl_state_3v3 =
+ pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
+ if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
+ dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
+ PTR_ERR(tegra_host->pinctrl_state_3v3));
+ return -1;
+ }
+
+ tegra_host->pinctrl_state_1v8 =
+ pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
+ if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
+ dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
+ PTR_ERR(tegra_host->pinctrl_state_1v8));
+ return -1;
+ }
+
+ tegra_host->pad_control_available = true;
+
+ return 0;
+}
+
static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -421,6 +946,19 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
.pdata = &sdhci_tegra124_pdata,
};
+static const struct sdhci_ops tegra210_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
+ .read_w = tegra_sdhci_readw,
+ .write_w = tegra210_sdhci_writew,
+ .write_l = tegra_sdhci_writel,
+ .set_clock = tegra_sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = tegra_sdhci_reset,
+ .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
+ .voltage_switch = tegra_sdhci_voltage_switch,
+ .get_max_clock = tegra_sdhci_get_max_clock,
+};
+
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -429,11 +967,28 @@ static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
- .ops = &tegra114_sdhci_ops,
+ .ops = &tegra210_sdhci_ops,
};
static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
.pdata = &sdhci_tegra210_pdata,
+ .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
+ NVQUIRK_HAS_PADCALIB |
+ NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
+ NVQUIRK_ENABLE_SDR50 |
+ NVQUIRK_ENABLE_SDR104,
+};
+
+static const struct sdhci_ops tegra186_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
+ .read_w = tegra_sdhci_readw,
+ .write_l = tegra_sdhci_writel,
+ .set_clock = tegra_sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = tegra_sdhci_reset,
+ .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
+ .voltage_switch = tegra_sdhci_voltage_switch,
+ .get_max_clock = tegra_sdhci_get_max_clock,
};
static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
@@ -452,11 +1007,16 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
* But it is not supported as of now.
*/
SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
- .ops = &tegra114_sdhci_ops,
+ .ops = &tegra186_sdhci_ops,
};
static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
.pdata = &sdhci_tegra186_pdata,
+ .nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
+ NVQUIRK_HAS_PADCALIB |
+ NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
+ NVQUIRK_ENABLE_SDR50 |
+ NVQUIRK_ENABLE_SDR104,
};
static const struct of_device_id sdhci_tegra_dt_match[] = {
@@ -493,8 +1053,23 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
tegra_host = sdhci_pltfm_priv(pltfm_host);
tegra_host->ddr_signaling = false;
tegra_host->pad_calib_required = false;
+ tegra_host->pad_control_available = false;
tegra_host->soc_data = soc_data;
+ if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
+ rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
+ if (rc == 0)
+ host->mmc_host_ops.start_signal_voltage_switch =
+ sdhci_tegra_start_signal_voltage_switch;
+ }
+
+ /* Hook to periodically rerun pad calibration */
+ if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
+ host->mmc_host_ops.request = tegra_sdhci_request;
+
+ host->mmc_host_ops.hs400_enhanced_strobe =
+ tegra_sdhci_hs400_enhanced_strobe;
+
rc = mmc_of_parse(host->mmc);
if (rc)
goto err_parse_dt;
@@ -502,6 +1077,10 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
host->mmc->caps |= MMC_CAP_1_8V_DDR;
+ tegra_sdhci_parse_pad_autocal_dt(host);
+
+ tegra_sdhci_parse_tap_and_trim(host);
+
tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
GPIOD_OUT_HIGH);
if (IS_ERR(tegra_host->power_gpio)) {
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index c335052d0c02..5956e90380e8 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -660,8 +660,8 @@ static int get_dt_pad_ctrl_data(struct sdhci_host *host,
return 0;
if (of_address_to_resource(np, 1, &iomem)) {
- dev_err(mmc_dev(host->mmc), "Unable to find SoC PAD ctrl register address for %s\n",
- np->name);
+ dev_err(mmc_dev(host->mmc), "Unable to find SoC PAD ctrl register address for %pOFn\n",
+ np);
return -EINVAL;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 1b3fbd9bd5c5..99bdae53fa2e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -123,6 +123,29 @@ EXPORT_SYMBOL_GPL(sdhci_dumpregs);
* *
\*****************************************************************************/
+static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
+{
+ u16 ctrl2;
+
+ ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2);
+ if (ctrl2 & SDHCI_CTRL_V4_MODE)
+ return;
+
+ ctrl2 |= SDHCI_CTRL_V4_MODE;
+ sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL);
+}
+
+/*
+ * This can be called before sdhci_add_host() by Vendor's host controller
+ * driver to enable v4 mode if supported.
+ */
+void sdhci_enable_v4_mode(struct sdhci_host *host)
+{
+ host->v4_mode = true;
+ sdhci_do_enable_v4_mode(host);
+}
+EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
+
static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
{
return cmd->data || cmd->flags & MMC_RSP_BUSY;
@@ -243,6 +266,52 @@ static void sdhci_set_default_irqs(struct sdhci_host *host)
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
+static void sdhci_config_dma(struct sdhci_host *host)
+{
+ u8 ctrl;
+ u16 ctrl2;
+
+ if (host->version < SDHCI_SPEC_200)
+ return;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+ /*
+ * Always adjust the DMA selection as some controllers
+ * (e.g. JMicron) can't do PIO properly when the selection
+ * is ADMA.
+ */
+ ctrl &= ~SDHCI_CTRL_DMA_MASK;
+ if (!(host->flags & SDHCI_REQ_USE_DMA))
+ goto out;
+
+ /* Note if DMA Select is zero then SDMA is selected */
+ if (host->flags & SDHCI_USE_ADMA)
+ ctrl |= SDHCI_CTRL_ADMA32;
+
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+ /*
+ * If v4 mode, all supported DMA can be 64-bit addressing if
+ * controller supports 64-bit system address, otherwise only
+ * ADMA can support 64-bit addressing.
+ */
+ if (host->v4_mode) {
+ ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
+ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
+ } else if (host->flags & SDHCI_USE_ADMA) {
+ /*
+ * Don't need to undo SDHCI_CTRL_ADMA32 in order to
+ * set SDHCI_CTRL_ADMA64.
+ */
+ ctrl |= SDHCI_CTRL_ADMA64;
+ }
+ }
+
+out:
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
static void sdhci_init(struct sdhci_host *host, int soft)
{
struct mmc_host *mmc = host->mmc;
@@ -252,6 +321,9 @@ static void sdhci_init(struct sdhci_host *host, int soft)
else
sdhci_do_reset(host, SDHCI_RESET_ALL);
+ if (host->v4_mode)
+ sdhci_do_enable_v4_mode(host);
+
sdhci_set_default_irqs(host);
host->cqe_on = false;
@@ -554,10 +626,10 @@ static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
local_irq_restore(*flags);
}
-static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
- dma_addr_t addr, int len, unsigned cmd)
+void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
+ dma_addr_t addr, int len, unsigned int cmd)
{
- struct sdhci_adma2_64_desc *dma_desc = desc;
+ struct sdhci_adma2_64_desc *dma_desc = *desc;
/* 32-bit and 64-bit descriptors have these members in same position */
dma_desc->cmd = cpu_to_le16(cmd);
@@ -566,6 +638,19 @@ static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
if (host->flags & SDHCI_USE_64_BIT_DMA)
dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
+
+ *desc += host->desc_sz;
+}
+EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
+
+static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
+ void **desc, dma_addr_t addr,
+ int len, unsigned int cmd)
+{
+ if (host->ops->adma_write_desc)
+ host->ops->adma_write_desc(host, desc, addr, len, cmd);
+ else
+ sdhci_adma_write_desc(host, desc, addr, len, cmd);
}
static void sdhci_adma_mark_end(void *desc)
@@ -618,28 +703,24 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
}
/* tran, valid */
- sdhci_adma_write_desc(host, desc, align_addr, offset,
- ADMA2_TRAN_VALID);
+ __sdhci_adma_write_desc(host, &desc, align_addr,
+ offset, ADMA2_TRAN_VALID);
BUG_ON(offset > 65536);
align += SDHCI_ADMA2_ALIGN;
align_addr += SDHCI_ADMA2_ALIGN;
- desc += host->desc_sz;
-
addr += offset;
len -= offset;
}
BUG_ON(len > 65536);
- if (len) {
- /* tran, valid */
- sdhci_adma_write_desc(host, desc, addr, len,
- ADMA2_TRAN_VALID);
- desc += host->desc_sz;
- }
+ /* tran, valid */
+ if (len)
+ __sdhci_adma_write_desc(host, &desc, addr, len,
+ ADMA2_TRAN_VALID);
/*
* If this triggers then we have a calculation bug
@@ -656,7 +737,7 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
}
} else {
/* Add a terminating entry - nop, end, valid */
- sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
+ __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
}
}
@@ -701,7 +782,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
}
}
-static u32 sdhci_sdma_address(struct sdhci_host *host)
+static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
{
if (host->bounce_buffer)
return host->bounce_addr;
@@ -709,6 +790,17 @@ static u32 sdhci_sdma_address(struct sdhci_host *host)
return sg_dma_address(host->data->sg);
}
+static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
+{
+ if (host->v4_mode) {
+ sdhci_writel(host, addr, SDHCI_ADMA_ADDRESS);
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ sdhci_writel(host, (u64)addr >> 32, SDHCI_ADMA_ADDRESS_HI);
+ } else {
+ sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
+ }
+}
+
static unsigned int sdhci_target_timeout(struct sdhci_host *host,
struct mmc_command *cmd,
struct mmc_data *data)
@@ -876,7 +968,6 @@ static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
{
- u8 ctrl;
struct mmc_data *data = cmd->data;
host->data_timeout = 0;
@@ -968,30 +1059,11 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
SDHCI_ADMA_ADDRESS_HI);
} else {
WARN_ON(sg_cnt != 1);
- sdhci_writel(host, sdhci_sdma_address(host),
- SDHCI_DMA_ADDRESS);
+ sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
}
}
- /*
- * Always adjust the DMA selection as some controllers
- * (e.g. JMicron) can't do PIO properly when the selection
- * is ADMA.
- */
- if (host->version >= SDHCI_SPEC_200) {
- ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
- ctrl &= ~SDHCI_CTRL_DMA_MASK;
- if ((host->flags & SDHCI_REQ_USE_DMA) &&
- (host->flags & SDHCI_USE_ADMA)) {
- if (host->flags & SDHCI_USE_64_BIT_DMA)
- ctrl |= SDHCI_CTRL_ADMA64;
- else
- ctrl |= SDHCI_CTRL_ADMA32;
- } else {
- ctrl |= SDHCI_CTRL_SDMA;
- }
- sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
- }
+ sdhci_config_dma(host);
if (!(host->flags & SDHCI_REQ_USE_DMA)) {
int flags;
@@ -1010,7 +1082,19 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
/* Set the DMA boundary value and block size */
sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
SDHCI_BLOCK_SIZE);
- sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+
+ /*
+ * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
+ * can be supported, in that case 16-bit block count register must be 0.
+ */
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
+ (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
+ if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
+ sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
+ sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
+ } else {
+ sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+ }
}
static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
@@ -1020,6 +1104,43 @@ static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
!mrq->cap_cmd_during_tfr;
}
+static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
+ struct mmc_command *cmd,
+ u16 *mode)
+{
+ bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
+ (cmd->opcode != SD_IO_RW_EXTENDED);
+ bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
+ u16 ctrl2;
+
+ /*
+ * In case of Version 4.10 or later, use of 'Auto CMD Auto
+ * Select' is recommended rather than use of 'Auto CMD12
+ * Enable' or 'Auto CMD23 Enable'.
+ */
+ if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
+ *mode |= SDHCI_TRNS_AUTO_SEL;
+
+ ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (use_cmd23)
+ ctrl2 |= SDHCI_CMD23_ENABLE;
+ else
+ ctrl2 &= ~SDHCI_CMD23_ENABLE;
+ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
+
+ return;
+ }
+
+ /*
+ * If we are sending CMD23, CMD12 never gets sent
+ * on successful completion (so no Auto-CMD12).
+ */
+ if (use_cmd12)
+ *mode |= SDHCI_TRNS_AUTO_CMD12;
+ else if (use_cmd23)
+ *mode |= SDHCI_TRNS_AUTO_CMD23;
+}
+
static void sdhci_set_transfer_mode(struct sdhci_host *host,
struct mmc_command *cmd)
{
@@ -1048,17 +1169,9 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
- /*
- * If we are sending CMD23, CMD12 never gets sent
- * on successful completion (so no Auto-CMD12).
- */
- if (sdhci_auto_cmd12(host, cmd->mrq) &&
- (cmd->opcode != SD_IO_RW_EXTENDED))
- mode |= SDHCI_TRNS_AUTO_CMD12;
- else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
- mode |= SDHCI_TRNS_AUTO_CMD23;
+ sdhci_auto_cmd_select(host, cmd, &mode);
+ if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23))
sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
- }
}
if (data->flags & MMC_DATA_READ)
@@ -1630,7 +1743,7 @@ EXPORT_SYMBOL_GPL(sdhci_set_power);
* *
\*****************************************************************************/
-static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host;
int present;
@@ -1669,6 +1782,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
+EXPORT_SYMBOL_GPL(sdhci_request);
void sdhci_set_bus_width(struct sdhci_host *host, int width)
{
@@ -2219,7 +2333,7 @@ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
}
EXPORT_SYMBOL_GPL(sdhci_send_tuning);
-static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
+static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
{
int i;
@@ -2236,13 +2350,13 @@ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
mmc_hostname(host->mmc));
sdhci_abort_tuning(host, opcode);
- return;
+ return -ETIMEDOUT;
}
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
if (ctrl & SDHCI_CTRL_TUNED_CLK)
- return; /* Success! */
+ return 0; /* Success! */
break;
}
@@ -2254,6 +2368,7 @@ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
mmc_hostname(host->mmc));
sdhci_reset_tuning(host);
+ return -EAGAIN;
}
int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
@@ -2315,7 +2430,7 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
sdhci_start_tuning(host);
- __sdhci_execute_tuning(host, opcode);
+ host->tuning_err = __sdhci_execute_tuning(host, opcode);
sdhci_end_tuning(host);
out:
@@ -2802,7 +2917,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
* some controllers are faulty, don't trust them.
*/
if (intmask & SDHCI_INT_DMA_END) {
- u32 dmastart, dmanow;
+ dma_addr_t dmastart, dmanow;
dmastart = sdhci_sdma_address(host);
dmanow = dmastart + host->data->bytes_xfered;
@@ -2810,12 +2925,12 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
* Force update to the next DMA block boundary.
*/
dmanow = (dmanow &
- ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
+ ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
SDHCI_DEFAULT_BOUNDARY_SIZE;
host->data->bytes_xfered = dmanow - dmastart;
- DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
- dmastart, host->data->bytes_xfered, dmanow);
- sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
+ DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
+ &dmastart, host->data->bytes_xfered, &dmanow);
+ sdhci_set_sdma_addr(host, dmanow);
}
if (intmask & SDHCI_INT_DATA_END) {
@@ -3322,6 +3437,13 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
+ /*
+ * The DMA table descriptor count is calculated as the maximum
+ * number of segments times 2, to allow for an alignment
+ * descriptor for each segment, plus 1 for a nop end descriptor.
+ */
+ host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
+
return host;
}
@@ -3376,6 +3498,9 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
sdhci_do_reset(host, SDHCI_RESET_ALL);
+ if (host->v4_mode)
+ sdhci_do_enable_v4_mode(host);
+
of_property_read_u64(mmc_dev(host->mmc)->of_node,
"sdhci-caps-mask", &dt_caps_mask);
of_property_read_u64(mmc_dev(host->mmc)->of_node,
@@ -3470,6 +3595,19 @@ static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
return 0;
}
+static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
+{
+ /*
+ * According to SD Host Controller spec v4.10, bit[27] added from
+ * version 4.10 in Capabilities Register is used as 64-bit System
+ * Address support for V4 mode.
+ */
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
+ return host->caps & SDHCI_CAN_64BIT_V4;
+
+ return host->caps & SDHCI_CAN_64BIT;
+}
+
int sdhci_setup_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
@@ -3506,7 +3644,7 @@ int sdhci_setup_host(struct sdhci_host *host)
override_timeout_clk = host->timeout_clk;
- if (host->version > SDHCI_SPEC_300) {
+ if (host->version > SDHCI_SPEC_420) {
pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
mmc_hostname(mmc), host->version);
}
@@ -3541,7 +3679,7 @@ int sdhci_setup_host(struct sdhci_host *host)
* SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
* implement.
*/
- if (host->caps & SDHCI_CAN_64BIT)
+ if (sdhci_can_64bit_dma(host))
host->flags |= SDHCI_USE_64_BIT_DMA;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
@@ -3559,32 +3697,30 @@ int sdhci_setup_host(struct sdhci_host *host)
}
}
- /* SDMA does not support 64-bit DMA */
- if (host->flags & SDHCI_USE_64_BIT_DMA)
+ /* SDMA does not support 64-bit DMA if v4 mode not set */
+ if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
host->flags &= ~SDHCI_USE_SDMA;
if (host->flags & SDHCI_USE_ADMA) {
dma_addr_t dma;
void *buf;
- /*
- * The DMA descriptor table size is calculated as the maximum
- * number of segments times 2, to allow for an alignment
- * descriptor for each segment, plus 1 for a nop end descriptor,
- * all multipled by the descriptor size.
- */
if (host->flags & SDHCI_USE_64_BIT_DMA) {
- host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
- SDHCI_ADMA2_64_DESC_SZ;
- host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
+ host->adma_table_sz = host->adma_table_cnt *
+ SDHCI_ADMA2_64_DESC_SZ(host);
+ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
} else {
- host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
+ host->adma_table_sz = host->adma_table_cnt *
SDHCI_ADMA2_32_DESC_SZ;
host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
}
host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
- buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ /*
+ * Use zalloc to zero the reserved high 32-bits of 128-bit
+ * descriptors so that they never need to be written.
+ */
+ buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
host->adma_table_sz, &dma, GFP_KERNEL);
if (!buf) {
pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
@@ -3708,10 +3844,13 @@ int sdhci_setup_host(struct sdhci_host *host)
if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
host->flags |= SDHCI_AUTO_CMD12;
- /* Auto-CMD23 stuff only works in ADMA or PIO. */
+ /*
+ * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
+ * For v4 mode, SDMA may use Auto-CMD23 as well.
+ */
if ((host->version >= SDHCI_SPEC_300) &&
((host->flags & SDHCI_USE_ADMA) ||
- !(host->flags & SDHCI_USE_SDMA)) &&
+ !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
!(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
host->flags |= SDHCI_AUTO_CMD23;
DBG("Auto-CMD23 available\n");
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index f0bd36ce3817..b001cf4d3d7e 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -28,6 +28,7 @@
#define SDHCI_DMA_ADDRESS 0x00
#define SDHCI_ARGUMENT2 SDHCI_DMA_ADDRESS
+#define SDHCI_32BIT_BLK_CNT SDHCI_DMA_ADDRESS
#define SDHCI_BLOCK_SIZE 0x04
#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
@@ -41,6 +42,7 @@
#define SDHCI_TRNS_BLK_CNT_EN 0x02
#define SDHCI_TRNS_AUTO_CMD12 0x04
#define SDHCI_TRNS_AUTO_CMD23 0x08
+#define SDHCI_TRNS_AUTO_SEL 0x0C
#define SDHCI_TRNS_READ 0x10
#define SDHCI_TRNS_MULTI 0x20
@@ -184,6 +186,9 @@
#define SDHCI_CTRL_DRV_TYPE_D 0x0030
#define SDHCI_CTRL_EXEC_TUNING 0x0040
#define SDHCI_CTRL_TUNED_CLK 0x0080
+#define SDHCI_CMD23_ENABLE 0x0800
+#define SDHCI_CTRL_V4_MODE 0x1000
+#define SDHCI_CTRL_64BIT_ADDR 0x2000
#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
#define SDHCI_CAPABILITIES 0x40
@@ -204,6 +209,7 @@
#define SDHCI_CAN_VDD_330 0x01000000
#define SDHCI_CAN_VDD_300 0x02000000
#define SDHCI_CAN_VDD_180 0x04000000
+#define SDHCI_CAN_64BIT_V4 0x08000000
#define SDHCI_CAN_64BIT 0x10000000
#define SDHCI_SUPPORT_SDR50 0x00000001
@@ -270,6 +276,9 @@
#define SDHCI_SPEC_100 0
#define SDHCI_SPEC_200 1
#define SDHCI_SPEC_300 2
+#define SDHCI_SPEC_400 3
+#define SDHCI_SPEC_410 4
+#define SDHCI_SPEC_420 5
/*
* End of controller registers.
@@ -305,8 +314,14 @@ struct sdhci_adma2_32_desc {
*/
#define SDHCI_ADMA2_DESC_ALIGN 8
-/* ADMA2 64-bit DMA descriptor size */
-#define SDHCI_ADMA2_64_DESC_SZ 12
+/*
+ * ADMA2 64-bit DMA descriptor size
+ * According to SD Host Controller spec v4.10, there are two kinds of
+ * descriptors for 64-bit addressing mode: 96-bit Descriptor and 128-bit
+ * Descriptor, if Host Version 4 Enable is set in the Host Control 2
+ * register, 128-bit Descriptor will be selected.
+ */
+#define SDHCI_ADMA2_64_DESC_SZ(host) ((host)->v4_mode ? 16 : 12)
/*
* ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
@@ -450,6 +465,13 @@ struct sdhci_host {
* obtainable timeout.
*/
#define SDHCI_QUIRK2_DISABLE_HW_TIMEOUT (1<<17)
+/*
+ * 32-bit block count may not support eMMC where upper bits of CMD23 are used
+ * for other purposes. Consequently we support 16-bit block count by default.
+ * Otherwise, SDHCI_QUIRK2_USE_32BIT_BLK_CNT can be selected to use 32-bit
+ * block count.
+ */
+#define SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1<<18)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -501,6 +523,7 @@ struct sdhci_host {
bool preset_enabled; /* Preset is enabled */
bool pending_reset; /* Cmd/data reset is pending */
bool irq_wake_enabled; /* IRQ wakeup is enabled */
+ bool v4_mode; /* Host Version 4 Enable */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
@@ -554,6 +577,7 @@ struct sdhci_host {
unsigned int tuning_count; /* Timer count for re-tuning */
unsigned int tuning_mode; /* Re-tuning mode supported by host */
+ unsigned int tuning_err; /* Error code for re-tuning */
#define SDHCI_TUNING_MODE_1 0
#define SDHCI_TUNING_MODE_2 1
#define SDHCI_TUNING_MODE_3 2
@@ -563,6 +587,9 @@ struct sdhci_host {
/* Host SDMA buffer boundary. */
u32 sdma_boundary;
+ /* Host ADMA table count */
+ u32 adma_table_cnt;
+
u64 data_timeout;
unsigned long private[0] ____cacheline_aligned;
@@ -603,6 +630,8 @@ struct sdhci_ops {
void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
void (*card_event)(struct sdhci_host *host);
void (*voltage_switch)(struct sdhci_host *host);
+ void (*adma_write_desc)(struct sdhci_host *host, void **desc,
+ dma_addr_t addr, int len, unsigned int cmd);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -725,6 +754,7 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
+void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
@@ -733,6 +763,8 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios);
void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable);
+void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
+ dma_addr_t addr, int len, unsigned int cmd);
#ifdef CONFIG_PM
int sdhci_suspend_host(struct sdhci_host *host);
@@ -747,6 +779,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
int *data_error);
void sdhci_dumpregs(struct sdhci_host *host);
+void sdhci_enable_v4_mode(struct sdhci_host *host);
void sdhci_start_tuning(struct sdhci_host *host);
void sdhci_end_tuning(struct sdhci_host *host);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 4c2a1f8ddbf3..81bd9afb0980 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MMCIF eMMC driver.
*
* Copyright (C) 2010 Renesas Solutions Corp.
* Yusuke Goda <yusuke.goda.sx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
/*
@@ -1573,6 +1570,6 @@ static struct platform_driver sh_mmcif_driver = {
module_platform_driver(sh_mmcif_driver);
MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 568349e1fbc2..279e326e397e 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -258,11 +258,16 @@ struct sunxi_mmc_cfg {
/* Does DATA0 needs to be masked while the clock is updated */
bool mask_data0;
- /* hardware only supports new timing mode */
+ /*
+ * hardware only supports new timing mode, either due to lack of
+ * a mode switch in the clock controller, or the mmc controller
+ * is permanently configured in the new timing mode, without the
+ * NTSR mode switch.
+ */
bool needs_new_timings;
- /* hardware can switch between old and new timing modes */
- bool has_timings_switch;
+ /* clock hardware can switch between old and new timing modes */
+ bool ccu_has_timings_switch;
};
struct sunxi_mmc_host {
@@ -787,7 +792,7 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
clock <<= 1;
}
- if (host->use_new_timings && host->cfg->has_timings_switch) {
+ if (host->use_new_timings && host->cfg->ccu_has_timings_switch) {
ret = sunxi_ccu_set_mmc_timing_mode(host->clk_mmc, true);
if (ret) {
dev_err(mmc_dev(mmc),
@@ -822,6 +827,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
/* update card clock rate to account for internal divider */
rate /= div;
+ /*
+ * Configure the controller to use the new timing mode if needed.
+ * On controllers that only support the new timing mode, such as
+ * the eMMC controller on the A64, this register does not exist,
+ * and any writes to it are ignored.
+ */
if (host->use_new_timings) {
/* Don't touch the delay bits */
rval = mmc_readl(host, REG_SD_NTSR);
@@ -1145,7 +1156,7 @@ static const struct sunxi_mmc_cfg sun8i_a83t_emmc_cfg = {
.idma_des_size_bits = 16,
.clk_delays = sunxi_mmc_clk_delays,
.can_calibrate = false,
- .has_timings_switch = true,
+ .ccu_has_timings_switch = true,
};
static const struct sunxi_mmc_cfg sun9i_a80_cfg = {
@@ -1166,6 +1177,7 @@ static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = {
.idma_des_size_bits = 13,
.clk_delays = NULL,
.can_calibrate = true,
+ .needs_new_timings = true,
};
static const struct of_device_id sunxi_mmc_of_match[] = {
@@ -1351,7 +1363,7 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
goto error_free_host;
}
- if (host->cfg->has_timings_switch) {
+ if (host->cfg->ccu_has_timings_switch) {
/*
* Supports both old and new timing modes.
* Try setting the clk to new timing mode.
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index a3d8380ab480..b6644ce296b2 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -336,7 +336,8 @@ static unsigned int tifm_sd_op_flags(struct mmc_command *cmd)
rc |= TIFM_MMCSD_RSP_R0;
break;
case MMC_RSP_R1B:
- rc |= TIFM_MMCSD_RSP_BUSY; // deliberate fall-through
+ rc |= TIFM_MMCSD_RSP_BUSY;
+ /* fall-through */
case MMC_RSP_R1:
rc |= TIFM_MMCSD_RSP_R1;
break;
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 43a2ea5cff24..93e83ad25976 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the MMC / SD / SDIO cell found in:
*
@@ -7,12 +8,9 @@
* Copyright (C) 2017 Horms Solutions, Simon Horman
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
@@ -23,6 +21,76 @@
#include "tmio_mmc.h"
+/* Registers specific to this variant */
+#define CTL_SDIO_REGS 0x100
+#define CTL_CLK_AND_WAIT_CTL 0x138
+#define CTL_RESET_SDIO 0x1e0
+
+static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
+{
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ usleep_range(10000, 11000);
+ sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
+ usleep_range(10000, 11000);
+}
+
+static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
+{
+ sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
+ usleep_range(10000, 11000);
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ usleep_range(10000, 11000);
+}
+
+static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
+ unsigned int new_clock)
+{
+ unsigned int divisor;
+ u32 clk = 0;
+ int clk_sel;
+
+ if (new_clock == 0) {
+ tmio_mmc_clk_stop(host);
+ return;
+ }
+
+ divisor = host->pdata->hclk / new_clock;
+
+ /* bit7 set: 1/512, ... bit0 set: 1/4, all bits clear: 1/2 */
+ clk_sel = (divisor <= 1);
+ clk = clk_sel ? 0 : (roundup_pow_of_two(divisor) >> 2);
+
+ host->pdata->set_clk_div(host->pdev, clk_sel);
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
+ usleep_range(10000, 11000);
+
+ tmio_mmc_clk_start(host);
+}
+
+static void tmio_mmc_reset(struct tmio_mmc_host *host)
+{
+ /* FIXME - should we set stop clock reg here */
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
+ sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
+ usleep_range(10000, 11000);
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
+ sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
+ usleep_range(10000, 11000);
+
+ if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
+ sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
+ sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
+ }
+}
+
#ifdef CONFIG_PM_SLEEP
static int tmio_mmc_suspend(struct device *dev)
{
@@ -90,8 +158,6 @@ static int tmio_mmc_probe(struct platform_device *pdev)
goto cell_disable;
}
- pdata->flags |= TMIO_MMC_HAVE_HIGH_REG;
-
host = tmio_mmc_host_alloc(pdev, pdata);
if (IS_ERR(host)) {
ret = PTR_ERR(host);
@@ -100,6 +166,8 @@ static int tmio_mmc_probe(struct platform_device *pdev)
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
host->bus_shift = resource_size(res) >> 10;
+ host->set_clock = tmio_mmc_set_clock;
+ host->reset = tmio_mmc_reset;
host->mmc->f_max = pdata->hclk;
host->mmc->f_min = pdata->hclk / 512;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 5d141f79e175..1e317027bf53 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the MMC / SD / SDIO cell found in:
*
@@ -8,11 +9,6 @@
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#ifndef TMIO_MMC_H
@@ -47,9 +43,6 @@
#define CTL_RESET_SD 0xe0
#define CTL_VERSION 0xe2
#define CTL_SDIF_MODE 0xe6
-#define CTL_SDIO_REGS 0x100
-#define CTL_CLK_AND_WAIT_CTL 0x138
-#define CTL_RESET_SDIO 0x1e0
/* Definitions for values the CTL_STOP_INTERNAL_ACTION register can take */
#define TMIO_STOP_STP BIT(0)
@@ -133,7 +126,6 @@ struct tmio_mmc_host {
/* Callbacks for clock / power control */
void (*set_pwr)(struct platform_device *host, int state);
- void (*set_clk_div)(struct platform_device *host, int state);
/* pio related stuff */
struct scatterlist *sg_ptr;
@@ -146,7 +138,7 @@ struct tmio_mmc_host {
struct tmio_mmc_data *pdata;
/* DMA support */
- bool force_pio;
+ bool dma_on;
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
struct tasklet_struct dma_issue;
@@ -170,14 +162,14 @@ struct tmio_mmc_host {
/* Mandatory callback */
int (*clk_enable)(struct tmio_mmc_host *host);
+ void (*set_clock)(struct tmio_mmc_host *host, unsigned int clock);
/* Optional callbacks */
- unsigned int (*clk_update)(struct tmio_mmc_host *host,
- unsigned int new_clock);
void (*clk_disable)(struct tmio_mmc_host *host);
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
+ void (*reset)(struct tmio_mmc_host *host);
void (*hw_reset)(struct tmio_mmc_host *host);
void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap);
bool (*check_scc_error)(struct tmio_mmc_host *host);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 261b4d62d2b1..8d64f6196f33 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the MMC / SD / SDIO IP found in:
*
@@ -10,10 +11,6 @@
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This driver draws mainly on scattered spec sheets, Reverse engineering
* of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
* support). (Further 4 bit support from a later datasheet).
@@ -160,100 +157,18 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
}
}
-static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
-{
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-
- /* HW engineers overrode docs: no sleep needed on R-Car2+ */
- if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- usleep_range(10000, 11000);
-
- if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
- sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
- usleep_range(10000, 11000);
- }
-}
-
-static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
-{
- if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
- sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
- usleep_range(10000, 11000);
- }
-
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-
- /* HW engineers overrode docs: no sleep needed on R-Car2+ */
- if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- usleep_range(10000, 11000);
-}
-
-static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
- unsigned int new_clock)
-{
- u32 clk = 0, clock;
-
- if (new_clock == 0) {
- tmio_mmc_clk_stop(host);
- return;
- }
- /*
- * Both HS400 and HS200/SD104 set 200MHz, but some devices need to
- * set 400MHz to distinguish the CPG settings in HS400.
- */
- if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
- host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400 &&
- new_clock == 200000000)
- new_clock = 400000000;
-
- if (host->clk_update)
- clock = host->clk_update(host, new_clock) / 512;
- else
- clock = host->mmc->f_min;
-
- for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
- clock <<= 1;
-
- /* 1/1 clock is option */
- if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) &&
- ((clk >> 22) & 0x1)) {
- if (!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400))
- clk |= 0xff;
- else
- clk &= ~0xff;
- }
-
- if (host->set_clk_div)
- host->set_clk_div(host->pdev, (clk >> 22) & 1);
-
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
- if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- usleep_range(10000, 11000);
-
- tmio_mmc_clk_start(host);
-}
-
static void tmio_mmc_reset(struct tmio_mmc_host *host)
{
/* FIXME - should we set stop clock reg here */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
- if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
- sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
usleep_range(10000, 11000);
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
- if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
- sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
usleep_range(10000, 11000);
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
}
-
}
static void tmio_mmc_reset_work(struct work_struct *work)
@@ -294,7 +209,7 @@ static void tmio_mmc_reset_work(struct work_struct *work)
spin_unlock_irqrestore(&host->lock, flags);
- tmio_mmc_reset(host);
+ host->reset(host);
/* Ready for new calls */
host->mrq = NULL;
@@ -446,7 +361,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
unsigned int count;
unsigned long flags;
- if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
+ if (host->dma_on) {
pr_err("PIO IRQ in DMA mode!\n");
return;
} else if (!data) {
@@ -518,7 +433,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
*/
if (data->flags & MMC_DATA_READ) {
- if (host->chan_rx && !host->force_pio)
+ if (host->dma_on)
tmio_mmc_check_bounce_buffer(host);
dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
host->mrq);
@@ -555,7 +470,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
stat & TMIO_STAT_TXUNDERRUN)
data->error = -EILSEQ;
- if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
+ if (host->dma_on && (data->flags & MMC_DATA_WRITE)) {
u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
bool done = false;
@@ -579,7 +494,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
tmio_mmc_dataend_dma(host);
}
- } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
+ } else if (host->dma_on && (data->flags & MMC_DATA_READ)) {
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
tmio_mmc_dataend_dma(host);
} else {
@@ -632,7 +547,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
*/
if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
if (host->data->flags & MMC_DATA_READ) {
- if (host->force_pio || !host->chan_rx) {
+ if (!host->dma_on) {
tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
} else {
tmio_mmc_disable_mmc_irqs(host,
@@ -640,7 +555,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
tasklet_schedule(&host->dma_issue);
}
} else {
- if (host->force_pio || !host->chan_tx) {
+ if (!host->dma_on) {
tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
} else {
tmio_mmc_disable_mmc_irqs(host,
@@ -770,7 +685,7 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
tmio_mmc_init_sg(host, data);
host->data = data;
- host->force_pio = false;
+ host->dma_on = false;
/* Set transfer length / blocksize */
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
@@ -919,8 +834,8 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
if (mrq->cmd->error || (mrq->data && mrq->data->error))
tmio_mmc_abort_dma(host);
- if (host->check_scc_error)
- host->check_scc_error(host);
+ if (host->check_scc_error && host->check_scc_error(host))
+ mrq->cmd->error = -EILSEQ;
/* If SET_BLOCK_COUNT, continue with main command */
if (host->mrq && !mrq->cmd->error) {
@@ -1043,15 +958,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_OFF:
tmio_mmc_power_off(host);
- tmio_mmc_clk_stop(host);
+ host->set_clock(host, 0);
break;
case MMC_POWER_UP:
tmio_mmc_power_on(host, ios->vdd);
- tmio_mmc_set_clock(host, ios->clock);
+ host->set_clock(host, ios->clock);
tmio_mmc_set_bus_width(host, ios->bus_width);
break;
case MMC_POWER_ON:
- tmio_mmc_set_clock(host, ios->clock);
+ host->set_clock(host, ios->clock);
tmio_mmc_set_bus_width(host, ios->bus_width);
break;
}
@@ -1237,7 +1152,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
int ret;
/*
- * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
+ * Check the sanity of mmc->f_min to prevent host->set_clock() from
* looping forever...
*/
if (mmc->f_min == 0)
@@ -1247,7 +1162,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
_host->write16_hook = NULL;
_host->set_pwr = pdata->set_pwr;
- _host->set_clk_div = pdata->set_clk_div;
ret = tmio_mmc_init_ocr(_host);
if (ret < 0)
@@ -1290,6 +1204,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
mmc->caps & MMC_CAP_NEEDS_POLL ||
!mmc_card_is_removable(mmc));
+ if (!_host->reset)
+ _host->reset = tmio_mmc_reset;
+
/*
* On Gen2+, eMMC with NONREMOVABLE currently fails because native
* hotplug gets disabled. It seems RuntimePM related yet we need further
@@ -1310,8 +1227,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
if (pdata->flags & TMIO_MMC_SDIO_IRQ)
_host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
- tmio_mmc_clk_stop(_host);
- tmio_mmc_reset(_host);
+ _host->set_clock(_host, 0);
+ _host->reset(_host);
_host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
@@ -1394,7 +1311,7 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
if (host->clk_cache)
- tmio_mmc_clk_stop(host);
+ host->set_clock(host, 0);
tmio_mmc_clk_disable(host);
@@ -1411,11 +1328,11 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
{
struct tmio_mmc_host *host = dev_get_drvdata(dev);
- tmio_mmc_reset(host);
+ host->reset(host);
tmio_mmc_clk_enable(host);
if (host->clk_cache)
- tmio_mmc_set_clock(host, host->clk_cache);
+ host->set_clock(host, host->clk_cache);
if (host->native_hotplug)
tmio_mmc_enable_mmc_irqs(host,
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
new file mode 100644
index 000000000000..91a2be41edf6
--- /dev/null
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -0,0 +1,698 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2017-2018 Socionext Inc.
+// Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "tmio_mmc.h"
+
+#define UNIPHIER_SD_CLK_CTL_DIV1024 BIT(16)
+#define UNIPHIER_SD_CLK_CTL_DIV1 BIT(10)
+#define UNIPHIER_SD_CLKCTL_OFFEN BIT(9) // auto SDCLK stop
+#define UNIPHIER_SD_CC_EXT_MODE 0x1b0
+#define UNIPHIER_SD_CC_EXT_MODE_DMA BIT(1)
+#define UNIPHIER_SD_HOST_MODE 0x1c8
+#define UNIPHIER_SD_VOLT 0x1e4
+#define UNIPHIER_SD_VOLT_MASK GENMASK(1, 0)
+#define UNIPHIER_SD_VOLT_OFF 0
+#define UNIPHIER_SD_VOLT_330 1 // 3.3V signal
+#define UNIPHIER_SD_VOLT_180 2 // 1.8V signal
+#define UNIPHIER_SD_DMA_MODE 0x410
+#define UNIPHIER_SD_DMA_MODE_DIR_MASK GENMASK(17, 16)
+#define UNIPHIER_SD_DMA_MODE_DIR_TO_DEV 0
+#define UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV 1
+#define UNIPHIER_SD_DMA_MODE_WIDTH_MASK GENMASK(5, 4)
+#define UNIPHIER_SD_DMA_MODE_WIDTH_8 0
+#define UNIPHIER_SD_DMA_MODE_WIDTH_16 1
+#define UNIPHIER_SD_DMA_MODE_WIDTH_32 2
+#define UNIPHIER_SD_DMA_MODE_WIDTH_64 3
+#define UNIPHIER_SD_DMA_MODE_ADDR_INC BIT(0) // 1: inc, 0: fixed
+#define UNIPHIER_SD_DMA_CTL 0x414
+#define UNIPHIER_SD_DMA_CTL_START BIT(0) // start DMA (auto cleared)
+#define UNIPHIER_SD_DMA_RST 0x418
+#define UNIPHIER_SD_DMA_RST_CH1 BIT(9)
+#define UNIPHIER_SD_DMA_RST_CH0 BIT(8)
+#define UNIPHIER_SD_DMA_ADDR_L 0x440
+#define UNIPHIER_SD_DMA_ADDR_H 0x444
+
+/*
+ * IP is extended to support various features: built-in DMA engine,
+ * 1/1024 divisor, etc.
+ */
+#define UNIPHIER_SD_CAP_EXTENDED_IP BIT(0)
+/* RX channel of the built-in DMA controller is broken (Pro5) */
+#define UNIPHIER_SD_CAP_BROKEN_DMA_RX BIT(1)
+
+struct uniphier_sd_priv {
+ struct tmio_mmc_data tmio_data;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pinstate_default;
+ struct pinctrl_state *pinstate_uhs;
+ struct clk *clk;
+ struct reset_control *rst;
+ struct reset_control *rst_br;
+ struct reset_control *rst_hw;
+ struct dma_chan *chan;
+ enum dma_data_direction dma_dir;
+ unsigned long clk_rate;
+ unsigned long caps;
+};
+
+static void *uniphier_sd_priv(struct tmio_mmc_host *host)
+{
+ return container_of(host->pdata, struct uniphier_sd_priv, tmio_data);
+}
+
+static void uniphier_sd_dma_endisable(struct tmio_mmc_host *host, int enable)
+{
+ sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? DMA_ENABLE_DMASDRW : 0);
+}
+
+/* external DMA engine */
+static void uniphier_sd_external_dma_issue(unsigned long arg)
+{
+ struct tmio_mmc_host *host = (void *)arg;
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+
+ uniphier_sd_dma_endisable(host, 1);
+ dma_async_issue_pending(priv->chan);
+}
+
+static void uniphier_sd_external_dma_callback(void *param,
+ const struct dmaengine_result *result)
+{
+ struct tmio_mmc_host *host = param;
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+ unsigned long flags;
+
+ dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len,
+ priv->dma_dir);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (result->result == DMA_TRANS_NOERROR) {
+ /*
+ * When the external DMA engine is enabled, strangely enough,
+ * the DATAEND flag can be asserted even if the DMA engine has
+ * not been kicked yet. Enable the TMIO_STAT_DATAEND irq only
+ * after we make sure the DMA engine finishes the transfer,
+ * hence, in this callback.
+ */
+ tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
+ } else {
+ host->data->error = -ETIMEDOUT;
+ tmio_mmc_do_data_irq(host);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void uniphier_sd_external_dma_start(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+ enum dma_transfer_direction dma_tx_dir;
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ int sg_len;
+
+ if (!priv->chan)
+ goto force_pio;
+
+ if (data->flags & MMC_DATA_READ) {
+ priv->dma_dir = DMA_FROM_DEVICE;
+ dma_tx_dir = DMA_DEV_TO_MEM;
+ } else {
+ priv->dma_dir = DMA_TO_DEVICE;
+ dma_tx_dir = DMA_MEM_TO_DEV;
+ }
+
+ sg_len = dma_map_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len,
+ priv->dma_dir);
+ if (sg_len == 0)
+ goto force_pio;
+
+ desc = dmaengine_prep_slave_sg(priv->chan, host->sg_ptr, sg_len,
+ dma_tx_dir, DMA_CTRL_ACK);
+ if (!desc)
+ goto unmap_sg;
+
+ desc->callback_result = uniphier_sd_external_dma_callback;
+ desc->callback_param = host;
+
+ cookie = dmaengine_submit(desc);
+ if (cookie < 0)
+ goto unmap_sg;
+
+ host->dma_on = true;
+
+ return;
+
+unmap_sg:
+ dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len,
+ priv->dma_dir);
+force_pio:
+ uniphier_sd_dma_endisable(host, 0);
+}
+
+static void uniphier_sd_external_dma_enable(struct tmio_mmc_host *host,
+ bool enable)
+{
+}
+
+static void uniphier_sd_external_dma_request(struct tmio_mmc_host *host,
+ struct tmio_mmc_data *pdata)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+ struct dma_chan *chan;
+
+ chan = dma_request_chan(mmc_dev(host->mmc), "rx-tx");
+ if (IS_ERR(chan)) {
+ dev_warn(mmc_dev(host->mmc),
+ "failed to request DMA channel. falling back to PIO\n");
+ return; /* just use PIO even for -EPROBE_DEFER */
+ }
+
+ /* this driver uses a single channel for both RX an TX */
+ priv->chan = chan;
+ host->chan_rx = chan;
+ host->chan_tx = chan;
+
+ tasklet_init(&host->dma_issue, uniphier_sd_external_dma_issue,
+ (unsigned long)host);
+}
+
+static void uniphier_sd_external_dma_release(struct tmio_mmc_host *host)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+
+ if (priv->chan)
+ dma_release_channel(priv->chan);
+}
+
+static void uniphier_sd_external_dma_abort(struct tmio_mmc_host *host)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+
+ uniphier_sd_dma_endisable(host, 0);
+
+ if (priv->chan)
+ dmaengine_terminate_sync(priv->chan);
+}
+
+static void uniphier_sd_external_dma_dataend(struct tmio_mmc_host *host)
+{
+ uniphier_sd_dma_endisable(host, 0);
+
+ tmio_mmc_do_data_irq(host);
+}
+
+static const struct tmio_mmc_dma_ops uniphier_sd_external_dma_ops = {
+ .start = uniphier_sd_external_dma_start,
+ .enable = uniphier_sd_external_dma_enable,
+ .request = uniphier_sd_external_dma_request,
+ .release = uniphier_sd_external_dma_release,
+ .abort = uniphier_sd_external_dma_abort,
+ .dataend = uniphier_sd_external_dma_dataend,
+};
+
+static void uniphier_sd_internal_dma_issue(unsigned long arg)
+{
+ struct tmio_mmc_host *host = (void *)arg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ uniphier_sd_dma_endisable(host, 1);
+ writel(UNIPHIER_SD_DMA_CTL_START, host->ctl + UNIPHIER_SD_DMA_CTL);
+}
+
+static void uniphier_sd_internal_dma_start(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+ struct scatterlist *sg = host->sg_ptr;
+ dma_addr_t dma_addr;
+ unsigned int dma_mode_dir;
+ u32 dma_mode;
+ int sg_len;
+
+ if ((data->flags & MMC_DATA_READ) && !host->chan_rx)
+ goto force_pio;
+
+ if (WARN_ON(host->sg_len != 1))
+ goto force_pio;
+
+ if (!IS_ALIGNED(sg->offset, 8))
+ goto force_pio;
+
+ if (data->flags & MMC_DATA_READ) {
+ priv->dma_dir = DMA_FROM_DEVICE;
+ dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV;
+ } else {
+ priv->dma_dir = DMA_TO_DEVICE;
+ dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_TO_DEV;
+ }
+
+ sg_len = dma_map_sg(mmc_dev(host->mmc), sg, 1, priv->dma_dir);
+ if (sg_len == 0)
+ goto force_pio;
+
+ dma_mode = FIELD_PREP(UNIPHIER_SD_DMA_MODE_DIR_MASK, dma_mode_dir);
+ dma_mode |= FIELD_PREP(UNIPHIER_SD_DMA_MODE_WIDTH_MASK,
+ UNIPHIER_SD_DMA_MODE_WIDTH_64);
+ dma_mode |= UNIPHIER_SD_DMA_MODE_ADDR_INC;
+
+ writel(dma_mode, host->ctl + UNIPHIER_SD_DMA_MODE);
+
+ dma_addr = sg_dma_address(data->sg);
+ writel(lower_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_L);
+ writel(upper_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_H);
+
+ host->dma_on = true;
+
+ return;
+force_pio:
+ uniphier_sd_dma_endisable(host, 0);
+}
+
+static void uniphier_sd_internal_dma_enable(struct tmio_mmc_host *host,
+ bool enable)
+{
+}
+
+static void uniphier_sd_internal_dma_request(struct tmio_mmc_host *host,
+ struct tmio_mmc_data *pdata)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+
+ /*
+ * Due to a hardware bug, Pro5 cannot use DMA for RX.
+ * We can still use DMA for TX, but PIO for RX.
+ */
+ if (!(priv->caps & UNIPHIER_SD_CAP_BROKEN_DMA_RX))
+ host->chan_rx = (void *)0xdeadbeaf;
+
+ host->chan_tx = (void *)0xdeadbeaf;
+
+ tasklet_init(&host->dma_issue, uniphier_sd_internal_dma_issue,
+ (unsigned long)host);
+}
+
+static void uniphier_sd_internal_dma_release(struct tmio_mmc_host *host)
+{
+ /* Each value is set to zero to assume "disabling" each DMA */
+ host->chan_rx = NULL;
+ host->chan_tx = NULL;
+}
+
+static void uniphier_sd_internal_dma_abort(struct tmio_mmc_host *host)
+{
+ u32 tmp;
+
+ uniphier_sd_dma_endisable(host, 0);
+
+ tmp = readl(host->ctl + UNIPHIER_SD_DMA_RST);
+ tmp &= ~(UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0);
+ writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST);
+
+ tmp |= UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0;
+ writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST);
+}
+
+static void uniphier_sd_internal_dma_dataend(struct tmio_mmc_host *host)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+
+ uniphier_sd_dma_endisable(host, 0);
+ dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, 1, priv->dma_dir);
+
+ tmio_mmc_do_data_irq(host);
+}
+
+static const struct tmio_mmc_dma_ops uniphier_sd_internal_dma_ops = {
+ .start = uniphier_sd_internal_dma_start,
+ .enable = uniphier_sd_internal_dma_enable,
+ .request = uniphier_sd_internal_dma_request,
+ .release = uniphier_sd_internal_dma_release,
+ .abort = uniphier_sd_internal_dma_abort,
+ .dataend = uniphier_sd_internal_dma_dataend,
+};
+
+static int uniphier_sd_clk_enable(struct tmio_mmc_host *host)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+ struct mmc_host *mmc = host->mmc;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(priv->clk, ULONG_MAX);
+ if (ret)
+ goto disable_clk;
+
+ priv->clk_rate = clk_get_rate(priv->clk);
+
+ /* If max-frequency property is set, use it. */
+ if (!mmc->f_max)
+ mmc->f_max = priv->clk_rate;
+
+ /*
+ * 1/512 is the finest divisor in the original IP. Newer versions
+ * also supports 1/1024 divisor. (UniPhier-specific extension)
+ */
+ if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
+ mmc->f_min = priv->clk_rate / 1024;
+ else
+ mmc->f_min = priv->clk_rate / 512;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto disable_clk;
+
+ ret = reset_control_deassert(priv->rst_br);
+ if (ret)
+ goto assert_rst;
+
+ return 0;
+
+assert_rst:
+ reset_control_assert(priv->rst);
+disable_clk:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static void uniphier_sd_clk_disable(struct tmio_mmc_host *host)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+
+ reset_control_assert(priv->rst_br);
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+}
+
+static void uniphier_sd_hw_reset(struct tmio_mmc_host *host)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+
+ reset_control_assert(priv->rst_hw);
+ /* For eMMC, minimum is 1us but give it 9us for good measure */
+ udelay(9);
+ reset_control_deassert(priv->rst_hw);
+ /* For eMMC, minimum is 200us but give it 300us for good measure */
+ usleep_range(300, 1000);
+}
+
+static void uniphier_sd_set_clock(struct tmio_mmc_host *host,
+ unsigned int clock)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+ unsigned long divisor;
+ u32 tmp;
+
+ tmp = readl(host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
+
+ /* stop the clock before changing its rate to avoid a glitch signal */
+ tmp &= ~CLK_CTL_SCLKEN;
+ writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
+
+ if (clock == 0)
+ return;
+
+ tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1024;
+ tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1;
+ tmp &= ~CLK_CTL_DIV_MASK;
+
+ divisor = priv->clk_rate / clock;
+
+ /*
+ * In the original IP, bit[7:0] represents the divisor.
+ * bit7 set: 1/512, ... bit0 set:1/4, all bits clear: 1/2
+ *
+ * The IP does not define a way to achieve 1/1. For UniPhier variants,
+ * bit10 is used for 1/1. Newer versions of UniPhier variants use
+ * bit16 for 1/1024.
+ */
+ if (divisor <= 1)
+ tmp |= UNIPHIER_SD_CLK_CTL_DIV1;
+ else if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP && divisor > 512)
+ tmp |= UNIPHIER_SD_CLK_CTL_DIV1024;
+ else
+ tmp |= roundup_pow_of_two(divisor) >> 2;
+
+ writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
+
+ tmp |= CLK_CTL_SCLKEN;
+ writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
+}
+
+static void uniphier_sd_host_init(struct tmio_mmc_host *host)
+{
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+ u32 val;
+
+ /*
+ * Connected to 32bit AXI.
+ * This register holds settings for SoC-specific internal bus
+ * connection. What is worse, the register spec was changed,
+ * breaking the backward compatibility. Write an appropriate
+ * value depending on a flag associated with a compatible string.
+ */
+ if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
+ val = 0x00000101;
+ else
+ val = 0x00000000;
+
+ writel(val, host->ctl + UNIPHIER_SD_HOST_MODE);
+
+ val = 0;
+ /*
+ * If supported, the controller can automatically
+ * enable/disable the clock line to the card.
+ */
+ if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
+ val |= UNIPHIER_SD_CLKCTL_OFFEN;
+
+ writel(val, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
+}
+
+static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
+ struct pinctrl_state *pinstate;
+ u32 val, tmp;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ val = UNIPHIER_SD_VOLT_330;
+ pinstate = priv->pinstate_default;
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ val = UNIPHIER_SD_VOLT_180;
+ pinstate = priv->pinstate_uhs;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ tmp = readl(host->ctl + UNIPHIER_SD_VOLT);
+ tmp &= ~UNIPHIER_SD_VOLT_MASK;
+ tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val);
+ writel(tmp, host->ctl + UNIPHIER_SD_VOLT);
+
+ pinctrl_select_state(priv->pinctrl, pinstate);
+
+ return 0;
+}
+
+static int uniphier_sd_uhs_init(struct tmio_mmc_host *host,
+ struct uniphier_sd_priv *priv)
+{
+ priv->pinctrl = devm_pinctrl_get(mmc_dev(host->mmc));
+ if (IS_ERR(priv->pinctrl))
+ return PTR_ERR(priv->pinctrl);
+
+ priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(priv->pinstate_default))
+ return PTR_ERR(priv->pinstate_default);
+
+ priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs");
+ if (IS_ERR(priv->pinstate_uhs))
+ return PTR_ERR(priv->pinstate_uhs);
+
+ host->ops.start_signal_voltage_switch =
+ uniphier_sd_start_signal_voltage_switch;
+
+ return 0;
+}
+
+static int uniphier_sd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_sd_priv *priv;
+ struct tmio_mmc_data *tmio_data;
+ struct tmio_mmc_host *host;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to get IRQ number");
+ return irq;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->caps = (unsigned long)of_device_get_match_data(dev);
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ priv->rst = devm_reset_control_get_shared(dev, "host");
+ if (IS_ERR(priv->rst)) {
+ dev_err(dev, "failed to get host reset\n");
+ return PTR_ERR(priv->rst);
+ }
+
+ /* old version has one more reset */
+ if (!(priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)) {
+ priv->rst_br = devm_reset_control_get_shared(dev, "bridge");
+ if (IS_ERR(priv->rst_br)) {
+ dev_err(dev, "failed to get bridge reset\n");
+ return PTR_ERR(priv->rst_br);
+ }
+ }
+
+ tmio_data = &priv->tmio_data;
+ tmio_data->flags |= TMIO_MMC_32BIT_DATA_PORT;
+
+ host = tmio_mmc_host_alloc(pdev, tmio_data);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ if (host->mmc->caps & MMC_CAP_HW_RESET) {
+ priv->rst_hw = devm_reset_control_get_exclusive(dev, "hw");
+ if (IS_ERR(priv->rst_hw)) {
+ dev_err(dev, "failed to get hw reset\n");
+ ret = PTR_ERR(priv->rst_hw);
+ goto free_host;
+ }
+ host->hw_reset = uniphier_sd_hw_reset;
+ }
+
+ if (host->mmc->caps & MMC_CAP_UHS) {
+ ret = uniphier_sd_uhs_init(host, priv);
+ if (ret) {
+ dev_warn(dev,
+ "failed to setup UHS (error %d). Disabling UHS.",
+ ret);
+ host->mmc->caps &= ~MMC_CAP_UHS;
+ }
+ }
+
+ ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
+ dev_name(dev), host);
+ if (ret)
+ goto free_host;
+
+ if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
+ host->dma_ops = &uniphier_sd_internal_dma_ops;
+ else
+ host->dma_ops = &uniphier_sd_external_dma_ops;
+
+ host->bus_shift = 1;
+ host->clk_enable = uniphier_sd_clk_enable;
+ host->clk_disable = uniphier_sd_clk_disable;
+ host->set_clock = uniphier_sd_set_clock;
+
+ ret = uniphier_sd_clk_enable(host);
+ if (ret)
+ goto free_host;
+
+ uniphier_sd_host_init(host);
+
+ tmio_data->ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34;
+ if (host->mmc->caps & MMC_CAP_UHS)
+ tmio_data->ocr_mask |= MMC_VDD_165_195;
+
+ tmio_data->max_segs = 1;
+ tmio_data->max_blk_count = U16_MAX;
+
+ ret = tmio_mmc_host_probe(host);
+ if (ret)
+ goto free_host;
+
+ return 0;
+
+free_host:
+ tmio_mmc_host_free(host);
+
+ return ret;
+}
+
+static int uniphier_sd_remove(struct platform_device *pdev)
+{
+ struct tmio_mmc_host *host = platform_get_drvdata(pdev);
+
+ tmio_mmc_host_remove(host);
+ uniphier_sd_clk_disable(host);
+
+ return 0;
+}
+
+static const struct of_device_id uniphier_sd_match[] = {
+ {
+ .compatible = "socionext,uniphier-sd-v2.91",
+ },
+ {
+ .compatible = "socionext,uniphier-sd-v3.1",
+ .data = (void *)(UNIPHIER_SD_CAP_EXTENDED_IP |
+ UNIPHIER_SD_CAP_BROKEN_DMA_RX),
+ },
+ {
+ .compatible = "socionext,uniphier-sd-v3.1.1",
+ .data = (void *)UNIPHIER_SD_CAP_EXTENDED_IP,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_sd_match);
+
+static struct platform_driver uniphier_sd_driver = {
+ .probe = uniphier_sd_probe,
+ .remove = uniphier_sd_remove,
+ .driver = {
+ .name = "uniphier-sd",
+ .of_match_table = uniphier_sd_match,
+ },
+};
+module_platform_driver(uniphier_sd_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier SD/eMMC host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index cdfeb15b6f05..cd8b1b9d4d8a 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index cbfafc453274..c4a1d04b8c80 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -39,13 +39,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(len, val, 1));
+ SPI_MEM_OP_DATA_IN(len, NULL, 1));
+ void *scratchbuf;
int ret;
+ scratchbuf = kmalloc(len, GFP_KERNEL);
+ if (!scratchbuf)
+ return -ENOMEM;
+
+ op.data.buf.in = scratchbuf;
ret = spi_mem_exec_op(flash->spimem, &op);
if (ret < 0)
dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret,
code);
+ else
+ memcpy(val, scratchbuf, len);
+
+ kfree(scratchbuf);
return ret;
}
@@ -56,9 +66,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, buf, 1));
+ SPI_MEM_OP_DATA_OUT(len, NULL, 1));
+ void *scratchbuf;
+ int ret;
+
+ scratchbuf = kmemdup(buf, len, GFP_KERNEL);
+ if (!scratchbuf)
+ return -ENOMEM;
- return spi_mem_exec_op(flash->spimem, &op);
+ op.data.buf.out = scratchbuf;
+ ret = spi_mem_exec_op(flash->spimem, &op);
+ kfree(scratchbuf);
+
+ return ret;
}
static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
@@ -70,7 +90,6 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(len, buf, 1));
- size_t remaining = len;
int ret;
/* get transfer protocols. */
@@ -81,22 +100,16 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
op.addr.nbytes = 0;
- while (remaining) {
- op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX;
- ret = spi_mem_adjust_op_size(flash->spimem, &op);
- if (ret)
- return ret;
-
- ret = spi_mem_exec_op(flash->spimem, &op);
- if (ret)
- return ret;
+ ret = spi_mem_adjust_op_size(flash->spimem, &op);
+ if (ret)
+ return ret;
+ op.data.nbytes = len < op.data.nbytes ? len : op.data.nbytes;
- op.addr.val += op.data.nbytes;
- remaining -= op.data.nbytes;
- op.data.buf.out += op.data.nbytes;
- }
+ ret = spi_mem_exec_op(flash->spimem, &op);
+ if (ret)
+ return ret;
- return len;
+ return op.data.nbytes;
}
/*
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 9d9723693217..a20e85aa770e 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -14,6 +14,7 @@
*/
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -25,28 +26,24 @@
#include <linux/slab.h>
#include <linux/types.h>
-#define pr_devinit(fmt, args...) \
- ({ static const char __fmt[] = fmt; printk(__fmt, ## args); })
+#define win_mask(x) ((BIT(x)) - 1)
#define DRIVER_NAME "gpio-addr-flash"
-#define PFX DRIVER_NAME ": "
/**
* struct async_state - keep GPIO flash state
* @mtd: MTD state for this mapping
* @map: MTD map state for this flash
- * @gpio_count: number of GPIOs used to address
- * @gpio_addrs: array of GPIOs to twiddle
+ * @gpios: Struct containing the array of GPIO descriptors
* @gpio_values: cached GPIO values
- * @win_size: dedicated memory size (if no GPIOs)
+ * @win_order: dedicated memory size (if no GPIOs)
*/
struct async_state {
struct mtd_info *mtd;
struct map_info map;
- size_t gpio_count;
- unsigned *gpio_addrs;
- int *gpio_values;
- unsigned long win_size;
+ struct gpio_descs *gpios;
+ unsigned int gpio_values;
+ unsigned int win_order;
};
#define gf_map_info_to_state(mi) ((struct async_state *)(mi)->map_priv_1)
@@ -57,21 +54,25 @@ struct async_state {
*
* Rather than call the GPIO framework every time, cache the last-programmed
* value. This speeds up sequential accesses (which are by far the most common
- * type). We rely on the GPIO framework to treat non-zero value as high so
- * that we don't have to normalize the bits.
+ * type).
*/
static void gf_set_gpios(struct async_state *state, unsigned long ofs)
{
- size_t i = 0;
- int value;
- ofs /= state->win_size;
- do {
- value = ofs & (1 << i);
- if (state->gpio_values[i] != value) {
- gpio_set_value(state->gpio_addrs[i], value);
- state->gpio_values[i] = value;
- }
- } while (++i < state->gpio_count);
+ int i;
+
+ ofs >>= state->win_order;
+
+ if (ofs == state->gpio_values)
+ return;
+
+ for (i = 0; i < state->gpios->ndescs; i++) {
+ if ((ofs & BIT(i)) == (state->gpio_values & BIT(i)))
+ continue;
+
+ gpiod_set_value(state->gpios->desc[i], !!(ofs & BIT(i)));
+ }
+
+ state->gpio_values = ofs;
}
/**
@@ -87,7 +88,7 @@ static map_word gf_read(struct map_info *map, unsigned long ofs)
gf_set_gpios(state, ofs);
- word = readw(map->virt + (ofs % state->win_size));
+ word = readw(map->virt + (ofs & win_mask(state->win_order)));
test.x[0] = word;
return test;
}
@@ -109,14 +110,14 @@ static void gf_copy_from(struct map_info *map, void *to, unsigned long from, ssi
int this_len;
while (len) {
- if ((from % state->win_size) + len > state->win_size)
- this_len = state->win_size - (from % state->win_size);
- else
- this_len = len;
+ this_len = from & win_mask(state->win_order);
+ this_len = BIT(state->win_order) - this_len;
+ this_len = min_t(int, len, this_len);
gf_set_gpios(state, from);
- memcpy_fromio(to, map->virt + (from % state->win_size),
- this_len);
+ memcpy_fromio(to,
+ map->virt + (from & win_mask(state->win_order)),
+ this_len);
len -= this_len;
from += this_len;
to += this_len;
@@ -136,7 +137,7 @@ static void gf_write(struct map_info *map, map_word d1, unsigned long ofs)
gf_set_gpios(state, ofs);
d = d1.x[0];
- writew(d, map->virt + (ofs % state->win_size));
+ writew(d, map->virt + (ofs & win_mask(state->win_order)));
}
/**
@@ -156,13 +157,13 @@ static void gf_copy_to(struct map_info *map, unsigned long to,
int this_len;
while (len) {
- if ((to % state->win_size) + len > state->win_size)
- this_len = state->win_size - (to % state->win_size);
- else
- this_len = len;
+ this_len = to & win_mask(state->win_order);
+ this_len = BIT(state->win_order) - this_len;
+ this_len = min_t(int, len, this_len);
gf_set_gpios(state, to);
- memcpy_toio(map->virt + (to % state->win_size), from, len);
+ memcpy_toio(map->virt + (to & win_mask(state->win_order)),
+ from, len);
len -= this_len;
to += this_len;
@@ -180,18 +181,22 @@ static const char * const part_probe_types[] = {
* The platform resource layout expected looks something like:
* struct mtd_partition partitions[] = { ... };
* struct physmap_flash_data flash_data = { ... };
- * unsigned flash_gpios[] = { GPIO_XX, GPIO_XX, ... };
+ * static struct gpiod_lookup_table addr_flash_gpios = {
+ * .dev_id = "gpio-addr-flash.0",
+ * .table = {
+ * GPIO_LOOKUP_IDX("gpio.0", 15, "addr", 0, GPIO_ACTIVE_HIGH),
+ * GPIO_LOOKUP_IDX("gpio.0", 16, "addr", 1, GPIO_ACTIVE_HIGH),
+ * );
+ * };
+ * gpiod_add_lookup_table(&addr_flash_gpios);
+ *
* struct resource flash_resource[] = {
* {
* .name = "cfi_probe",
* .start = 0x20000000,
* .end = 0x201fffff,
* .flags = IORESOURCE_MEM,
- * }, {
- * .start = (unsigned long)flash_gpios,
- * .end = ARRAY_SIZE(flash_gpios),
- * .flags = IORESOURCE_IRQ,
- * }
+ * },
* };
* struct platform_device flash_device = {
* .name = "gpio-addr-flash",
@@ -203,33 +208,25 @@ static const char * const part_probe_types[] = {
*/
static int gpio_flash_probe(struct platform_device *pdev)
{
- size_t i, arr_size;
struct physmap_flash_data *pdata;
struct resource *memory;
- struct resource *gpios;
struct async_state *state;
pdata = dev_get_platdata(&pdev->dev);
memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpios = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!memory || !gpios || !gpios->end)
+ if (!memory)
return -EINVAL;
- arr_size = sizeof(int) * gpios->end;
- state = kzalloc(sizeof(*state) + arr_size, GFP_KERNEL);
+ state = devm_kzalloc(&pdev->dev, sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
- /*
- * We cast start/end to known types in the boards file, so cast
- * away their pointer types here to the known types (gpios->xxx).
- */
- state->gpio_count = gpios->end;
- state->gpio_addrs = (void *)(unsigned long)gpios->start;
- state->gpio_values = (void *)(state + 1);
- state->win_size = resource_size(memory);
- memset(state->gpio_values, 0xff, arr_size);
+ state->gpios = devm_gpiod_get_array(&pdev->dev, "addr", GPIOD_OUT_LOW);
+ if (IS_ERR(state->gpios))
+ return PTR_ERR(state->gpios);
+
+ state->win_order = get_bitmask_order(resource_size(memory)) - 1;
state->map.name = DRIVER_NAME;
state->map.read = gf_read;
@@ -237,38 +234,21 @@ static int gpio_flash_probe(struct platform_device *pdev)
state->map.write = gf_write;
state->map.copy_to = gf_copy_to;
state->map.bankwidth = pdata->width;
- state->map.size = state->win_size * (1 << state->gpio_count);
- state->map.virt = ioremap_nocache(memory->start, state->map.size);
- if (!state->map.virt)
- return -ENOMEM;
+ state->map.size = BIT(state->win_order + state->gpios->ndescs);
+ state->map.virt = devm_ioremap_resource(&pdev->dev, memory);
+ if (IS_ERR(state->map.virt))
+ return PTR_ERR(state->map.virt);
state->map.phys = NO_XIP;
state->map.map_priv_1 = (unsigned long)state;
platform_set_drvdata(pdev, state);
- i = 0;
- do {
- if (gpio_request(state->gpio_addrs[i], DRIVER_NAME)) {
- pr_devinit(KERN_ERR PFX "failed to request gpio %d\n",
- state->gpio_addrs[i]);
- while (i--)
- gpio_free(state->gpio_addrs[i]);
- kfree(state);
- return -EBUSY;
- }
- gpio_direction_output(state->gpio_addrs[i], 0);
- } while (++i < state->gpio_count);
-
- pr_devinit(KERN_NOTICE PFX "probing %d-bit flash bus\n",
- state->map.bankwidth * 8);
+ dev_notice(&pdev->dev, "probing %d-bit flash bus\n",
+ state->map.bankwidth * 8);
state->mtd = do_map_probe(memory->name, &state->map);
- if (!state->mtd) {
- for (i = 0; i < state->gpio_count; ++i)
- gpio_free(state->gpio_addrs[i]);
- kfree(state);
+ if (!state->mtd)
return -ENXIO;
- }
state->mtd->dev.parent = &pdev->dev;
mtd_device_parse_register(state->mtd, part_probe_types, NULL,
@@ -280,13 +260,9 @@ static int gpio_flash_probe(struct platform_device *pdev)
static int gpio_flash_remove(struct platform_device *pdev)
{
struct async_state *state = platform_get_drvdata(pdev);
- size_t i = 0;
- do {
- gpio_free(state->gpio_addrs[i]);
- } while (++i < state->gpio_count);
+
mtd_device_unregister(state->mtd);
map_destroy(state->mtd);
- kfree(state);
return 0;
}
diff --git a/drivers/mtd/maps/physmap_of_core.c b/drivers/mtd/maps/physmap_of_core.c
index 4129535b8e46..ece605d78c21 100644
--- a/drivers/mtd/maps/physmap_of_core.c
+++ b/drivers/mtd/maps/physmap_of_core.c
@@ -31,7 +31,6 @@
struct of_flash_list {
struct mtd_info *mtd;
struct map_info map;
- struct resource *res;
};
struct of_flash {
@@ -56,18 +55,10 @@ static int of_flash_remove(struct platform_device *dev)
mtd_concat_destroy(info->cmtd);
}
- for (i = 0; i < info->list_size; i++) {
+ for (i = 0; i < info->list_size; i++)
if (info->list[i].mtd)
map_destroy(info->list[i].mtd);
- if (info->list[i].map.virt)
- iounmap(info->list[i].map.virt);
-
- if (info->list[i].res) {
- release_resource(info->list[i].res);
- kfree(info->list[i].res);
- }
- }
return 0;
}
@@ -215,10 +206,11 @@ static int of_flash_probe(struct platform_device *dev)
err = -EBUSY;
res_size = resource_size(&res);
- info->list[i].res = request_mem_region(res.start, res_size,
- dev_name(&dev->dev));
- if (!info->list[i].res)
+ info->list[i].map.virt = devm_ioremap_resource(&dev->dev, &res);
+ if (IS_ERR(info->list[i].map.virt)) {
+ err = PTR_ERR(info->list[i].map.virt);
goto err_out;
+ }
err = -ENXIO;
width = of_get_property(dp, "bank-width", NULL);
@@ -246,15 +238,6 @@ static int of_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
- err = -ENOMEM;
- info->list[i].map.virt = ioremap(info->list[i].map.phys,
- info->list[i].map.size);
- if (!info->list[i].map.virt) {
- dev_err(&dev->dev, "Failed to ioremap() flash"
- " region\n");
- goto err_out;
- }
-
simple_map_init(&info->list[i].map);
/*
diff --git a/drivers/mtd/maps/physmap_of_gemini.c b/drivers/mtd/maps/physmap_of_gemini.c
index 830b1b7e702b..9df62ca721d5 100644
--- a/drivers/mtd/maps/physmap_of_gemini.c
+++ b/drivers/mtd/maps/physmap_of_gemini.c
@@ -44,11 +44,6 @@
#define FLASH_PARALLEL_HIGH_PIN_CNT (1 << 20) /* else low pin cnt */
-static const struct of_device_id syscon_match[] = {
- { .compatible = "cortina,gemini-syscon" },
- { },
-};
-
int of_flash_probe_gemini(struct platform_device *pdev,
struct device_node *np,
struct map_info *map)
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 29c0bfd74e8a..b0d44f9214b0 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -27,6 +27,7 @@
#include <linux/mtd/blktrans.h>
#include <linux/mtd/mtd.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
@@ -45,6 +46,8 @@ static void blktrans_dev_release(struct kref *kref)
dev->disk->private_data = NULL;
blk_cleanup_queue(dev->rq);
+ blk_mq_free_tag_set(dev->tag_set);
+ kfree(dev->tag_set);
put_disk(dev->disk);
list_del(&dev->list);
kfree(dev);
@@ -134,28 +137,39 @@ int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
}
EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
-static void mtd_blktrans_work(struct work_struct *work)
+static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
+{
+ struct request *rq;
+
+ rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
+ if (rq) {
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
+ return rq;
+ }
+
+ return NULL;
+}
+
+static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
+ __releases(&dev->queue_lock)
+ __acquires(&dev->queue_lock)
{
- struct mtd_blktrans_dev *dev =
- container_of(work, struct mtd_blktrans_dev, work);
struct mtd_blktrans_ops *tr = dev->tr;
- struct request_queue *rq = dev->rq;
struct request *req = NULL;
int background_done = 0;
- spin_lock_irq(rq->queue_lock);
-
while (1) {
blk_status_t res;
dev->bg_stop = false;
- if (!req && !(req = blk_fetch_request(rq))) {
+ if (!req && !(req = mtd_next_request(dev))) {
if (tr->background && !background_done) {
- spin_unlock_irq(rq->queue_lock);
+ spin_unlock_irq(&dev->queue_lock);
mutex_lock(&dev->lock);
tr->background(dev);
mutex_unlock(&dev->lock);
- spin_lock_irq(rq->queue_lock);
+ spin_lock_irq(&dev->queue_lock);
/*
* Do background processing just once per idle
* period.
@@ -166,35 +180,39 @@ static void mtd_blktrans_work(struct work_struct *work)
break;
}
- spin_unlock_irq(rq->queue_lock);
+ spin_unlock_irq(&dev->queue_lock);
mutex_lock(&dev->lock);
res = do_blktrans_request(dev->tr, dev, req);
mutex_unlock(&dev->lock);
- spin_lock_irq(rq->queue_lock);
-
- if (!__blk_end_request_cur(req, res))
+ if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
+ __blk_mq_end_request(req, res);
req = NULL;
+ }
background_done = 0;
+ spin_lock_irq(&dev->queue_lock);
}
-
- spin_unlock_irq(rq->queue_lock);
}
-static void mtd_blktrans_request(struct request_queue *rq)
+static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
struct mtd_blktrans_dev *dev;
- struct request *req = NULL;
- dev = rq->queuedata;
+ dev = hctx->queue->queuedata;
+ if (!dev) {
+ blk_mq_start_request(bd->rq);
+ return BLK_STS_IOERR;
+ }
+
+ spin_lock_irq(&dev->queue_lock);
+ list_add_tail(&bd->rq->queuelist, &dev->rq_list);
+ mtd_blktrans_work(dev);
+ spin_unlock_irq(&dev->queue_lock);
- if (!dev)
- while ((req = blk_fetch_request(rq)) != NULL)
- __blk_end_request_all(req, BLK_STS_IOERR);
- else
- queue_work(dev->wq, &dev->work);
+ return BLK_STS_OK;
}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
@@ -329,6 +347,10 @@ static const struct block_device_operations mtd_block_ops = {
.getgeo = blktrans_getgeo,
};
+static const struct blk_mq_ops mtd_mq_ops = {
+ .queue_rq = mtd_queue_rq,
+};
+
int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
struct mtd_blktrans_ops *tr = new->tr;
@@ -416,11 +438,20 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
/* Create the request queue */
spin_lock_init(&new->queue_lock);
- new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
+ INIT_LIST_HEAD(&new->rq_list);
- if (!new->rq)
+ new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
+ if (!new->tag_set)
goto error3;
+ new->rq = blk_mq_init_sq_queue(new->tag_set, &mtd_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ if (IS_ERR(new->rq)) {
+ ret = PTR_ERR(new->rq);
+ new->rq = NULL;
+ goto error4;
+ }
+
if (tr->flush)
blk_queue_write_cache(new->rq, true, false);
@@ -437,17 +468,10 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->queue = new->rq;
- /* Create processing workqueue */
- new->wq = alloc_workqueue("%s%d", 0, 0,
- tr->name, new->mtd->index);
- if (!new->wq)
- goto error4;
- INIT_WORK(&new->work, mtd_blktrans_work);
-
if (new->readonly)
set_disk_ro(gd, 1);
- device_add_disk(&new->mtd->dev, gd);
+ device_add_disk(&new->mtd->dev, gd, NULL);
if (new->disk_attributes) {
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
@@ -456,7 +480,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
return 0;
error4:
- blk_cleanup_queue(new->rq);
+ kfree(new->tag_set);
error3:
put_disk(new->disk);
error2:
@@ -481,15 +505,17 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
/* Stop new requests to arrive */
del_gendisk(old->disk);
- /* Stop workqueue. This will perform any pending request. */
- destroy_workqueue(old->wq);
-
/* Kill current requests */
spin_lock_irqsave(&old->queue_lock, flags);
old->rq->queuedata = NULL;
- blk_start_queue(old->rq);
spin_unlock_irqrestore(&old->queue_lock, flags);
+ /* freeze+quiesce queue to ensure all requests are flushed */
+ blk_mq_freeze_queue(old->rq);
+ blk_mq_quiesce_queue(old->rq);
+ blk_mq_unquiesce_queue(old->rq);
+ blk_mq_unfreeze_queue(old->rq);
+
/* If the device is currently open, tell trans driver to close it,
then put mtd device, and don't touch it again */
mutex_lock(&old->lock);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 52e2cb35fc79..99c460facd5e 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -873,8 +873,11 @@ static int mtd_part_of_parse(struct mtd_info *master,
int ret, err = 0;
np = mtd_get_of_node(master);
- if (!mtd_is_partition(master))
+ if (mtd_is_partition(master))
+ of_node_get(np);
+ else
np = of_get_child_by_name(np, "partitions");
+
of_property_for_each_string(np, "compatible", prop, compat) {
parser = mtd_part_get_compatible_parser(compat);
if (!parser)
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 5fc9a1bde4ac..c7efc31384d5 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -227,26 +227,6 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
load time (assuming you build diskonchip as a module) with the module
parameter "inftl_bbt_write=1".
-config MTD_NAND_DOCG4
- tristate "Support for DiskOnChip G4"
- depends on HAS_IOMEM
- select BCH
- select BITREVERSE
- help
- Support for diskonchip G4 nand flash, found in various smartphones and
- PDAs, among them the Palm Treo680, HTC Prophet and Wizard, Toshiba
- Portege G900, Asus P526, and O2 XDA Zinc.
-
- With this driver you will be able to use UBI and create a ubifs on the
- device, so you may wish to consider enabling UBI and UBIFS as well.
-
- These devices ship with the Mys/Sandisk SAFTL formatting, for which
- there is currently no mtd parser, so you may want to use command line
- partitioning to segregate write-protected blocks. On the Treo680, the
- first five erase blocks (256KiB each) are write-protected, followed
- by the block containing the saftl partition table. This is probably
- typical.
-
config MTD_NAND_SHARPSL
tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
depends on ARCH_PXA || COMPILE_TEST
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index d5a5f9832b88..57159b349054 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
obj-$(CONFIG_MTD_NAND_TANGO) += tango_nand.o
obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
-obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o
obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
@@ -58,8 +57,11 @@ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
-nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
+nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
+nand-objs += nand_onfi.o
+nand-objs += nand_jedec.o
nand-objs += nand_amd.o
+nand-objs += nand_esmt.o
nand-objs += nand_hynix.o
nand-objs += nand_macronix.o
nand-objs += nand_micron.o
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index 37a3cc21c7bc..5ba180a291eb 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -20,23 +20,33 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/gpio.h>
#include <linux/platform_data/gpio-omap.h>
#include <asm/io.h>
#include <asm/sizes.h>
-#include <mach/board-ams-delta.h>
-
#include <mach/hardware.h>
/*
* MTD structure for E3 (Delta)
*/
-static struct mtd_info *ams_delta_mtd = NULL;
+
+struct ams_delta_nand {
+ struct nand_chip nand_chip;
+ struct gpio_desc *gpiod_rdy;
+ struct gpio_desc *gpiod_nce;
+ struct gpio_desc *gpiod_nre;
+ struct gpio_desc *gpiod_nwp;
+ struct gpio_desc *gpiod_nwe;
+ struct gpio_desc *gpiod_ale;
+ struct gpio_desc *gpiod_cle;
+ void __iomem *io_base;
+ bool data_in;
+};
/*
* Define partitions for flash devices
@@ -63,48 +73,64 @@ static const struct mtd_partition partition_info[] = {
.size = 3 * SZ_256K },
};
-static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
+static void ams_delta_io_write(struct ams_delta_nand *priv, u_char byte)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- void __iomem *io_base = (void __iomem *)nand_get_controller_data(this);
-
- writew(0, io_base + OMAP_MPUIO_IO_CNTL);
- writew(byte, this->IO_ADDR_W);
- gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 0);
+ writew(byte, priv->nand_chip.legacy.IO_ADDR_W);
+ gpiod_set_value(priv->gpiod_nwe, 0);
ndelay(40);
- gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 1);
+ gpiod_set_value(priv->gpiod_nwe, 1);
}
-static u_char ams_delta_read_byte(struct mtd_info *mtd)
+static u_char ams_delta_io_read(struct ams_delta_nand *priv)
{
u_char res;
- struct nand_chip *this = mtd_to_nand(mtd);
- void __iomem *io_base = (void __iomem *)nand_get_controller_data(this);
- gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 0);
+ gpiod_set_value(priv->gpiod_nre, 0);
ndelay(40);
- writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
- res = readw(this->IO_ADDR_R);
- gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 1);
+ res = readw(priv->nand_chip.legacy.IO_ADDR_R);
+ gpiod_set_value(priv->gpiod_nre, 1);
return res;
}
-static void ams_delta_write_buf(struct mtd_info *mtd, const u_char *buf,
+static void ams_delta_dir_input(struct ams_delta_nand *priv, bool in)
+{
+ writew(in ? ~0 : 0, priv->io_base + OMAP_MPUIO_IO_CNTL);
+ priv->data_in = in;
+}
+
+static void ams_delta_write_buf(struct nand_chip *this, const u_char *buf,
int len)
{
+ struct ams_delta_nand *priv = nand_get_controller_data(this);
int i;
- for (i=0; i<len; i++)
- ams_delta_write_byte(mtd, buf[i]);
+ if (priv->data_in)
+ ams_delta_dir_input(priv, false);
+
+ for (i = 0; i < len; i++)
+ ams_delta_io_write(priv, buf[i]);
}
-static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void ams_delta_read_buf(struct nand_chip *this, u_char *buf, int len)
{
+ struct ams_delta_nand *priv = nand_get_controller_data(this);
int i;
- for (i=0; i<len; i++)
- buf[i] = ams_delta_read_byte(mtd);
+ if (!priv->data_in)
+ ams_delta_dir_input(priv, true);
+
+ for (i = 0; i < len; i++)
+ buf[i] = ams_delta_io_read(priv);
+}
+
+static u_char ams_delta_read_byte(struct nand_chip *this)
+{
+ u_char res;
+
+ ams_delta_read_buf(this, &res, 1);
+
+ return res;
}
/*
@@ -115,67 +141,40 @@ static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len)
* NAND_CLE: bit 1 -> bit 7
* NAND_ALE: bit 2 -> bit 6
*/
-static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
+static void ams_delta_hwcontrol(struct nand_chip *this, int cmd,
unsigned int ctrl)
{
+ struct ams_delta_nand *priv = nand_get_controller_data(this);
if (ctrl & NAND_CTRL_CHANGE) {
- gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NCE,
- (ctrl & NAND_NCE) == 0);
- gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_CLE,
- (ctrl & NAND_CLE) != 0);
- gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_ALE,
- (ctrl & NAND_ALE) != 0);
+ gpiod_set_value(priv->gpiod_nce, !(ctrl & NAND_NCE));
+ gpiod_set_value(priv->gpiod_cle, !!(ctrl & NAND_CLE));
+ gpiod_set_value(priv->gpiod_ale, !!(ctrl & NAND_ALE));
}
- if (cmd != NAND_CMD_NONE)
- ams_delta_write_byte(mtd, cmd);
+ if (cmd != NAND_CMD_NONE) {
+ u_char byte = cmd;
+
+ ams_delta_write_buf(this, &byte, 1);
+ }
}
-static int ams_delta_nand_ready(struct mtd_info *mtd)
+static int ams_delta_nand_ready(struct nand_chip *this)
{
- return gpio_get_value(AMS_DELTA_GPIO_PIN_NAND_RB);
+ struct ams_delta_nand *priv = nand_get_controller_data(this);
+
+ return gpiod_get_value(priv->gpiod_rdy);
}
-static const struct gpio _mandatory_gpio[] = {
- {
- .gpio = AMS_DELTA_GPIO_PIN_NAND_NCE,
- .flags = GPIOF_OUT_INIT_HIGH,
- .label = "nand_nce",
- },
- {
- .gpio = AMS_DELTA_GPIO_PIN_NAND_NRE,
- .flags = GPIOF_OUT_INIT_HIGH,
- .label = "nand_nre",
- },
- {
- .gpio = AMS_DELTA_GPIO_PIN_NAND_NWP,
- .flags = GPIOF_OUT_INIT_HIGH,
- .label = "nand_nwp",
- },
- {
- .gpio = AMS_DELTA_GPIO_PIN_NAND_NWE,
- .flags = GPIOF_OUT_INIT_HIGH,
- .label = "nand_nwe",
- },
- {
- .gpio = AMS_DELTA_GPIO_PIN_NAND_ALE,
- .flags = GPIOF_OUT_INIT_LOW,
- .label = "nand_ale",
- },
- {
- .gpio = AMS_DELTA_GPIO_PIN_NAND_CLE,
- .flags = GPIOF_OUT_INIT_LOW,
- .label = "nand_cle",
- },
-};
/*
* Main initialization routine
*/
static int ams_delta_init(struct platform_device *pdev)
{
+ struct ams_delta_nand *priv;
struct nand_chip *this;
+ struct mtd_info *mtd;
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
void __iomem *io_base;
int err = 0;
@@ -184,15 +183,16 @@ static int ams_delta_init(struct platform_device *pdev)
return -ENXIO;
/* Allocate memory for MTD device structure and private data */
- this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
- if (!this) {
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct ams_delta_nand),
+ GFP_KERNEL);
+ if (!priv) {
pr_warn("Unable to allocate E3 NAND MTD device structure.\n");
- err = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
+ this = &priv->nand_chip;
- ams_delta_mtd = nand_to_mtd(this);
- ams_delta_mtd->owner = THIS_MODULE;
+ mtd = nand_to_mtd(this);
+ mtd->dev.parent = &pdev->dev;
/*
* Don't try to request the memory region from here,
@@ -207,51 +207,93 @@ static int ams_delta_init(struct platform_device *pdev)
goto out_free;
}
- nand_set_controller_data(this, (void *)io_base);
+ priv->io_base = io_base;
+ nand_set_controller_data(this, priv);
/* Set address of NAND IO lines */
- this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
- this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
- this->read_byte = ams_delta_read_byte;
- this->write_buf = ams_delta_write_buf;
- this->read_buf = ams_delta_read_buf;
- this->cmd_ctrl = ams_delta_hwcontrol;
- if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) {
- this->dev_ready = ams_delta_nand_ready;
- } else {
- this->dev_ready = NULL;
- pr_notice("Couldn't request gpio for Delta NAND ready.\n");
+ this->legacy.IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
+ this->legacy.IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
+ this->legacy.read_byte = ams_delta_read_byte;
+ this->legacy.write_buf = ams_delta_write_buf;
+ this->legacy.read_buf = ams_delta_read_buf;
+ this->legacy.cmd_ctrl = ams_delta_hwcontrol;
+
+ priv->gpiod_rdy = devm_gpiod_get_optional(&pdev->dev, "rdy", GPIOD_IN);
+ if (IS_ERR(priv->gpiod_rdy)) {
+ err = PTR_ERR(priv->gpiod_rdy);
+ dev_warn(&pdev->dev, "RDY GPIO request failed (%d)\n", err);
+ goto out_mtd;
}
+
+ if (priv->gpiod_rdy)
+ this->legacy.dev_ready = ams_delta_nand_ready;
+
/* 25 us command delay time */
- this->chip_delay = 30;
+ this->legacy.chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
this->ecc.algo = NAND_ECC_HAMMING;
- platform_set_drvdata(pdev, io_base);
+ platform_set_drvdata(pdev, priv);
/* Set chip enabled, but */
- err = gpio_request_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
- if (err)
- goto out_gpio;
+ priv->gpiod_nwp = devm_gpiod_get(&pdev->dev, "nwp", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->gpiod_nwp)) {
+ err = PTR_ERR(priv->gpiod_nwp);
+ dev_err(&pdev->dev, "NWP GPIO request failed (%d)\n", err);
+ goto out_mtd;
+ }
+
+ priv->gpiod_nce = devm_gpiod_get(&pdev->dev, "nce", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->gpiod_nce)) {
+ err = PTR_ERR(priv->gpiod_nce);
+ dev_err(&pdev->dev, "NCE GPIO request failed (%d)\n", err);
+ goto out_mtd;
+ }
+
+ priv->gpiod_nre = devm_gpiod_get(&pdev->dev, "nre", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->gpiod_nre)) {
+ err = PTR_ERR(priv->gpiod_nre);
+ dev_err(&pdev->dev, "NRE GPIO request failed (%d)\n", err);
+ goto out_mtd;
+ }
+
+ priv->gpiod_nwe = devm_gpiod_get(&pdev->dev, "nwe", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->gpiod_nwe)) {
+ err = PTR_ERR(priv->gpiod_nwe);
+ dev_err(&pdev->dev, "NWE GPIO request failed (%d)\n", err);
+ goto out_mtd;
+ }
+
+ priv->gpiod_ale = devm_gpiod_get(&pdev->dev, "ale", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpiod_ale)) {
+ err = PTR_ERR(priv->gpiod_ale);
+ dev_err(&pdev->dev, "ALE GPIO request failed (%d)\n", err);
+ goto out_mtd;
+ }
+
+ priv->gpiod_cle = devm_gpiod_get(&pdev->dev, "cle", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpiod_cle)) {
+ err = PTR_ERR(priv->gpiod_cle);
+ dev_err(&pdev->dev, "CLE GPIO request failed (%d)\n", err);
+ goto out_mtd;
+ }
+
+ /* Initialize data port direction to a known state */
+ ams_delta_dir_input(priv, true);
/* Scan to find existence of the device */
- err = nand_scan(ams_delta_mtd, 1);
+ err = nand_scan(this, 1);
if (err)
goto out_mtd;
/* Register the partitions */
- mtd_device_register(ams_delta_mtd, partition_info,
- ARRAY_SIZE(partition_info));
+ mtd_device_register(mtd, partition_info, ARRAY_SIZE(partition_info));
goto out;
out_mtd:
- gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
-out_gpio:
- gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
iounmap(io_base);
out_free:
- kfree(this);
out:
return err;
}
@@ -261,18 +303,15 @@ out_free:
*/
static int ams_delta_cleanup(struct platform_device *pdev)
{
- void __iomem *io_base = platform_get_drvdata(pdev);
+ struct ams_delta_nand *priv = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
+ void __iomem *io_base = priv->io_base;
/* Release resources, unregister device */
- nand_release(ams_delta_mtd);
+ nand_release(mtd_to_nand(mtd));
- gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
- gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
iounmap(io_base);
- /* Free the MTD device structure */
- kfree(mtd_to_nand(ams_delta_mtd));
-
return 0;
}
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index a068b214ebaa..fb33f6be7c4f 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -410,25 +410,15 @@ err:
return -EIO;
}
-static u8 atmel_nand_read_byte(struct mtd_info *mtd)
+static u8 atmel_nand_read_byte(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct atmel_nand *nand = to_atmel_nand(chip);
return ioread8(nand->activecs->io.virt);
}
-static u16 atmel_nand_read_word(struct mtd_info *mtd)
+static void atmel_nand_write_byte(struct nand_chip *chip, u8 byte)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct atmel_nand *nand = to_atmel_nand(chip);
-
- return ioread16(nand->activecs->io.virt);
-}
-
-static void atmel_nand_write_byte(struct mtd_info *mtd, u8 byte)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct atmel_nand *nand = to_atmel_nand(chip);
if (chip->options & NAND_BUSWIDTH_16)
@@ -437,9 +427,8 @@ static void atmel_nand_write_byte(struct mtd_info *mtd, u8 byte)
iowrite8(byte, nand->activecs->io.virt);
}
-static void atmel_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+static void atmel_nand_read_buf(struct nand_chip *chip, u8 *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_nand_controller *nc;
@@ -462,9 +451,8 @@ static void atmel_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
ioread8_rep(nand->activecs->io.virt, buf, len);
}
-static void atmel_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+static void atmel_nand_write_buf(struct nand_chip *chip, const u8 *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_nand_controller *nc;
@@ -487,34 +475,31 @@ static void atmel_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
iowrite8_rep(nand->activecs->io.virt, buf, len);
}
-static int atmel_nand_dev_ready(struct mtd_info *mtd)
+static int atmel_nand_dev_ready(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct atmel_nand *nand = to_atmel_nand(chip);
return gpiod_get_value(nand->activecs->rb.gpio);
}
-static void atmel_nand_select_chip(struct mtd_info *mtd, int cs)
+static void atmel_nand_select_chip(struct nand_chip *chip, int cs)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct atmel_nand *nand = to_atmel_nand(chip);
if (cs < 0 || cs >= nand->numcs) {
nand->activecs = NULL;
- chip->dev_ready = NULL;
+ chip->legacy.dev_ready = NULL;
return;
}
nand->activecs = &nand->cs[cs];
if (nand->activecs->rb.type == ATMEL_NAND_GPIO_RB)
- chip->dev_ready = atmel_nand_dev_ready;
+ chip->legacy.dev_ready = atmel_nand_dev_ready;
}
-static int atmel_hsmc_nand_dev_ready(struct mtd_info *mtd)
+static int atmel_hsmc_nand_dev_ready(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_hsmc_nand_controller *nc;
u32 status;
@@ -526,15 +511,15 @@ static int atmel_hsmc_nand_dev_ready(struct mtd_info *mtd)
return status & ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
}
-static void atmel_hsmc_nand_select_chip(struct mtd_info *mtd, int cs)
+static void atmel_hsmc_nand_select_chip(struct nand_chip *chip, int cs)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_hsmc_nand_controller *nc;
nc = to_hsmc_nand_controller(chip->controller);
- atmel_nand_select_chip(mtd, cs);
+ atmel_nand_select_chip(chip, cs);
if (!nand->activecs) {
regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
@@ -543,7 +528,7 @@ static void atmel_hsmc_nand_select_chip(struct mtd_info *mtd, int cs)
}
if (nand->activecs->rb.type == ATMEL_NAND_NATIVE_RB)
- chip->dev_ready = atmel_hsmc_nand_dev_ready;
+ chip->legacy.dev_ready = atmel_hsmc_nand_dev_ready;
regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
@@ -607,10 +592,9 @@ static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
return ret;
}
-static void atmel_hsmc_nand_cmd_ctrl(struct mtd_info *mtd, int dat,
+static void atmel_hsmc_nand_cmd_ctrl(struct nand_chip *chip, int dat,
unsigned int ctrl)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_hsmc_nand_controller *nc;
@@ -634,10 +618,9 @@ static void atmel_hsmc_nand_cmd_ctrl(struct mtd_info *mtd, int dat,
}
}
-static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+static void atmel_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_nand_controller *nc;
@@ -851,7 +834,7 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
if (ret)
return ret;
- atmel_nand_write_buf(mtd, buf, mtd->writesize);
+ atmel_nand_write_buf(chip, buf, mtd->writesize);
ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
if (ret) {
@@ -861,20 +844,18 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
atmel_nand_pmecc_disable(chip, raw);
- atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ atmel_nand_write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
-static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip, const u8 *buf,
+static int atmel_nand_pmecc_write_page(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
}
-static int atmel_nand_pmecc_write_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int atmel_nand_pmecc_write_page_raw(struct nand_chip *chip,
const u8 *buf, int oob_required,
int page)
{
@@ -893,8 +874,8 @@ static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
if (ret)
return ret;
- atmel_nand_read_buf(mtd, buf, mtd->writesize);
- atmel_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ atmel_nand_read_buf(chip, buf, mtd->writesize);
+ atmel_nand_read_buf(chip, chip->oob_poi, mtd->oobsize);
ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
@@ -903,15 +884,13 @@ static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
return ret;
}
-static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
- struct nand_chip *chip, u8 *buf,
+static int atmel_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
}
-static int atmel_nand_pmecc_read_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip, u8 *buf,
+static int atmel_nand_pmecc_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
@@ -956,7 +935,7 @@ static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
if (ret)
return ret;
- atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ atmel_nand_write_buf(chip, chip->oob_poi, mtd->oobsize);
nc->op.cmds[0] = NAND_CMD_PAGEPROG;
nc->op.ncmds = 1;
@@ -966,15 +945,14 @@ static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
dev_err(nc->base.dev, "Failed to program NAND page (err = %d)\n",
ret);
- status = chip->waitfunc(mtd, chip);
+ status = chip->legacy.waitfunc(chip);
if (status & NAND_STATUS_FAIL)
return -EIO;
return ret;
}
-static int atmel_hsmc_nand_pmecc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int atmel_hsmc_nand_pmecc_write_page(struct nand_chip *chip,
const u8 *buf, int oob_required,
int page)
{
@@ -982,8 +960,7 @@ static int atmel_hsmc_nand_pmecc_write_page(struct mtd_info *mtd,
false);
}
-static int atmel_hsmc_nand_pmecc_write_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int atmel_hsmc_nand_pmecc_write_page_raw(struct nand_chip *chip,
const u8 *buf,
int oob_required, int page)
{
@@ -1045,16 +1022,14 @@ static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
return ret;
}
-static int atmel_hsmc_nand_pmecc_read_page(struct mtd_info *mtd,
- struct nand_chip *chip, u8 *buf,
+static int atmel_hsmc_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
false);
}
-static int atmel_hsmc_nand_pmecc_read_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int atmel_hsmc_nand_pmecc_read_page_raw(struct nand_chip *chip,
u8 *buf, int oob_required,
int page)
{
@@ -1473,10 +1448,9 @@ static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand,
return 0;
}
-static int atmel_nand_setup_data_interface(struct mtd_info *mtd, int csline,
+static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline,
const struct nand_data_interface *conf)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_nand_controller *nc;
@@ -1498,19 +1472,18 @@ static void atmel_nand_init(struct atmel_nand_controller *nc,
mtd->dev.parent = nc->dev;
nand->base.controller = &nc->base;
- chip->cmd_ctrl = atmel_nand_cmd_ctrl;
- chip->read_byte = atmel_nand_read_byte;
- chip->read_word = atmel_nand_read_word;
- chip->write_byte = atmel_nand_write_byte;
- chip->read_buf = atmel_nand_read_buf;
- chip->write_buf = atmel_nand_write_buf;
+ chip->legacy.cmd_ctrl = atmel_nand_cmd_ctrl;
+ chip->legacy.read_byte = atmel_nand_read_byte;
+ chip->legacy.write_byte = atmel_nand_write_byte;
+ chip->legacy.read_buf = atmel_nand_read_buf;
+ chip->legacy.write_buf = atmel_nand_write_buf;
chip->select_chip = atmel_nand_select_chip;
if (nc->mck && nc->caps->ops->setup_data_interface)
chip->setup_data_interface = atmel_nand_setup_data_interface;
/* Some NANDs require a longer delay than the default one (20us). */
- chip->chip_delay = 40;
+ chip->legacy.chip_delay = 40;
/*
* Use a bounce buffer when the buffer passed by the MTD user is not
@@ -1551,7 +1524,7 @@ static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
atmel_nand_init(nc, nand);
/* Overload some methods for the HSMC controller. */
- chip->cmd_ctrl = atmel_hsmc_nand_cmd_ctrl;
+ chip->legacy.cmd_ctrl = atmel_hsmc_nand_cmd_ctrl;
chip->select_chip = atmel_hsmc_nand_select_chip;
}
@@ -1586,9 +1559,7 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
return ERR_PTR(-EINVAL);
}
- nand = devm_kzalloc(nc->dev,
- sizeof(*nand) + (numcs * sizeof(*nand->cs)),
- GFP_KERNEL);
+ nand = devm_kzalloc(nc->dev, struct_size(nand, cs, numcs), GFP_KERNEL);
if (!nand) {
dev_err(nc->dev, "Failed to allocate NAND object\n");
return ERR_PTR(-ENOMEM);
@@ -1694,7 +1665,7 @@ atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
nc->caps->ops->nand_init(nc, nand);
- ret = nand_scan(mtd, nand->numcs);
+ ret = nand_scan(chip, nand->numcs);
if (ret) {
dev_err(nc->dev, "NAND scan failed: %d\n", ret);
return ret;
@@ -2063,6 +2034,10 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
nand_np = dev->of_node;
nfc_np = of_find_compatible_node(dev->of_node, NULL,
"atmel,sama5d3-nfc");
+ if (!nfc_np) {
+ dev_err(dev, "Could not find device node for sama5d3-nfc\n");
+ return -ENODEV;
+ }
nc->clk = of_clk_get(nfc_np, 0);
if (IS_ERR(nc->clk)) {
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index 35f5c84cd331..9731c1c487f6 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -24,134 +24,113 @@ struct au1550nd_ctx {
int cs;
void __iomem *base;
- void (*write_byte)(struct mtd_info *, u_char);
+ void (*write_byte)(struct nand_chip *, u_char);
};
/**
* au_read_byte - read one byte from the chip
- * @mtd: MTD device structure
+ * @this: NAND chip object
*
* read function for 8bit buswidth
*/
-static u_char au_read_byte(struct mtd_info *mtd)
+static u_char au_read_byte(struct nand_chip *this)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- u_char ret = readb(this->IO_ADDR_R);
+ u_char ret = readb(this->legacy.IO_ADDR_R);
wmb(); /* drain writebuffer */
return ret;
}
/**
* au_write_byte - write one byte to the chip
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @byte: pointer to data byte to write
*
* write function for 8it buswidth
*/
-static void au_write_byte(struct mtd_info *mtd, u_char byte)
+static void au_write_byte(struct nand_chip *this, u_char byte)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- writeb(byte, this->IO_ADDR_W);
+ writeb(byte, this->legacy.IO_ADDR_W);
wmb(); /* drain writebuffer */
}
/**
* au_read_byte16 - read one byte endianness aware from the chip
- * @mtd: MTD device structure
+ * @this: NAND chip object
*
* read function for 16bit buswidth with endianness conversion
*/
-static u_char au_read_byte16(struct mtd_info *mtd)
+static u_char au_read_byte16(struct nand_chip *this)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- u_char ret = (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
+ u_char ret = (u_char) cpu_to_le16(readw(this->legacy.IO_ADDR_R));
wmb(); /* drain writebuffer */
return ret;
}
/**
* au_write_byte16 - write one byte endianness aware to the chip
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @byte: pointer to data byte to write
*
* write function for 16bit buswidth with endianness conversion
*/
-static void au_write_byte16(struct mtd_info *mtd, u_char byte)
+static void au_write_byte16(struct nand_chip *this, u_char byte)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
+ writew(le16_to_cpu((u16) byte), this->legacy.IO_ADDR_W);
wmb(); /* drain writebuffer */
}
/**
- * au_read_word - read one word from the chip
- * @mtd: MTD device structure
- *
- * read function for 16bit buswidth without endianness conversion
- */
-static u16 au_read_word(struct mtd_info *mtd)
-{
- struct nand_chip *this = mtd_to_nand(mtd);
- u16 ret = readw(this->IO_ADDR_R);
- wmb(); /* drain writebuffer */
- return ret;
-}
-
-/**
* au_write_buf - write buffer to chip
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
*
* write function for 8bit buswidth
*/
-static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+static void au_write_buf(struct nand_chip *this, const u_char *buf, int len)
{
int i;
- struct nand_chip *this = mtd_to_nand(mtd);
for (i = 0; i < len; i++) {
- writeb(buf[i], this->IO_ADDR_W);
+ writeb(buf[i], this->legacy.IO_ADDR_W);
wmb(); /* drain writebuffer */
}
}
/**
* au_read_buf - read chip data into buffer
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*
* read function for 8bit buswidth
*/
-static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void au_read_buf(struct nand_chip *this, u_char *buf, int len)
{
int i;
- struct nand_chip *this = mtd_to_nand(mtd);
for (i = 0; i < len; i++) {
- buf[i] = readb(this->IO_ADDR_R);
+ buf[i] = readb(this->legacy.IO_ADDR_R);
wmb(); /* drain writebuffer */
}
}
/**
* au_write_buf16 - write buffer to chip
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
*
* write function for 16bit buswidth
*/
-static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
+static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len)
{
int i;
- struct nand_chip *this = mtd_to_nand(mtd);
u16 *p = (u16 *) buf;
len >>= 1;
for (i = 0; i < len; i++) {
- writew(p[i], this->IO_ADDR_W);
+ writew(p[i], this->legacy.IO_ADDR_W);
wmb(); /* drain writebuffer */
}
@@ -173,7 +152,7 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
len >>= 1;
for (i = 0; i < len; i++) {
- p[i] = readw(this->IO_ADDR_R);
+ p[i] = readw(this->legacy.IO_ADDR_R);
wmb(); /* drain writebuffer */
}
}
@@ -200,19 +179,19 @@ static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
switch (cmd) {
case NAND_CTL_SETCLE:
- this->IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
+ this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
break;
case NAND_CTL_CLRCLE:
- this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
+ this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
break;
case NAND_CTL_SETALE:
- this->IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
+ this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
break;
case NAND_CTL_CLRALE:
- this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
+ this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
/* FIXME: Nobody knows why this is necessary,
* but it works only that way */
udelay(1);
@@ -229,12 +208,12 @@ static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
break;
}
- this->IO_ADDR_R = this->IO_ADDR_W;
+ this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W;
wmb(); /* Drain the writebuffer */
}
-int au1550_device_ready(struct mtd_info *mtd)
+int au1550_device_ready(struct nand_chip *this)
{
return (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) ? 1 : 0;
}
@@ -248,23 +227,24 @@ int au1550_device_ready(struct mtd_info *mtd)
* chip needs it to be asserted during chip not ready time but the NAND
* controller keeps it released.
*
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @chip: chipnumber to select, -1 for deselect
*/
-static void au1550_select_chip(struct mtd_info *mtd, int chip)
+static void au1550_select_chip(struct nand_chip *this, int chip)
{
}
/**
* au1550_command - Send command to NAND device
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @command: the command to be sent
* @column: the column address for this command, -1 if none
* @page_addr: the page address for this command, -1 if none
*/
-static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+static void au1550_command(struct nand_chip *this, unsigned command,
+ int column, int page_addr)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx,
chip);
int ce_override = 0, i;
@@ -289,9 +269,9 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
column -= 256;
readcmd = NAND_CMD_READ1;
}
- ctx->write_byte(mtd, readcmd);
+ ctx->write_byte(this, readcmd);
}
- ctx->write_byte(mtd, command);
+ ctx->write_byte(this, command);
/* Set ALE and clear CLE to start address cycle */
au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
@@ -305,10 +285,10 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
if (this->options & NAND_BUSWIDTH_16 &&
!nand_opcode_8bits(command))
column >>= 1;
- ctx->write_byte(mtd, column);
+ ctx->write_byte(this, column);
}
if (page_addr != -1) {
- ctx->write_byte(mtd, (u8)(page_addr & 0xff));
+ ctx->write_byte(this, (u8)(page_addr & 0xff));
if (command == NAND_CMD_READ0 ||
command == NAND_CMD_READ1 ||
@@ -326,10 +306,10 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
}
- ctx->write_byte(mtd, (u8)(page_addr >> 8));
+ ctx->write_byte(this, (u8)(page_addr >> 8));
if (this->options & NAND_ROW_ADDR_3)
- ctx->write_byte(mtd,
+ ctx->write_byte(this,
((page_addr >> 16) & 0x0f));
}
/* Latch in address */
@@ -362,7 +342,8 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
/* Apply a short delay always to ensure that we do wait tWB. */
ndelay(100);
/* Wait for a chip to become ready... */
- for (i = this->chip_delay; !this->dev_ready(mtd) && i > 0; --i)
+ for (i = this->legacy.chip_delay;
+ !this->legacy.dev_ready(this) && i > 0; --i)
udelay(1);
/* Release -CE and re-enable interrupts. */
@@ -373,7 +354,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
/* Apply this short delay always to ensure that we do wait tWB. */
ndelay(100);
- while(!this->dev_ready(mtd));
+ while(!this->legacy.dev_ready(this));
}
static int find_nand_cs(unsigned long nand_base)
@@ -448,25 +429,24 @@ static int au1550nd_probe(struct platform_device *pdev)
}
ctx->cs = cs;
- this->dev_ready = au1550_device_ready;
+ this->legacy.dev_ready = au1550_device_ready;
this->select_chip = au1550_select_chip;
- this->cmdfunc = au1550_command;
+ this->legacy.cmdfunc = au1550_command;
/* 30 us command delay time */
- this->chip_delay = 30;
+ this->legacy.chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
this->ecc.algo = NAND_ECC_HAMMING;
if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
- this->read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
+ this->legacy.read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
- this->read_word = au_read_word;
- this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
- this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
+ this->legacy.write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
+ this->legacy.read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(this, 1);
if (ret) {
dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
goto out3;
@@ -492,7 +472,7 @@ static int au1550nd_remove(struct platform_device *pdev)
struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nand_release(nand_to_mtd(&ctx->chip));
+ nand_release(&ctx->chip);
iounmap(ctx->base);
release_mem_region(r->start, 0x1000);
kfree(ctx);
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/main.c b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
index fb31429b70a9..d79694160845 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
@@ -65,7 +65,7 @@ static int bcm47xxnflash_remove(struct platform_device *pdev)
{
struct bcm47xxnflash *nflash = platform_get_drvdata(pdev);
- nand_release(nand_to_mtd(&nflash->nand_chip));
+ nand_release(&nflash->nand_chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index 60874de430eb..9095a79ebc7d 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -170,10 +170,9 @@ static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
* NAND chip ops
**************************************************/
-static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
+static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
+ int cmd, unsigned int ctrl)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
u32 code = 0;
@@ -191,15 +190,14 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct mtd_info *mtd, int cmd,
}
/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
-static void bcm47xxnflash_ops_bcm4706_select_chip(struct mtd_info *mtd,
- int chip)
+static void bcm47xxnflash_ops_bcm4706_select_chip(struct nand_chip *chip,
+ int cs)
{
return;
}
-static int bcm47xxnflash_ops_bcm4706_dev_ready(struct mtd_info *mtd)
+static int bcm47xxnflash_ops_bcm4706_dev_ready(struct nand_chip *nand_chip)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
return !!(bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_CTL) & NCTL_READY);
@@ -212,11 +210,11 @@ static int bcm47xxnflash_ops_bcm4706_dev_ready(struct mtd_info *mtd)
* registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
* standard commands would be much more complicated.
*/
-static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd,
+static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct nand_chip *nand_chip,
unsigned command, int column,
int page_addr)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
struct bcma_drv_cc *cc = b47n->cc;
u32 ctlcode;
@@ -229,10 +227,10 @@ static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd,
switch (command) {
case NAND_CMD_RESET:
- nand_chip->cmd_ctrl(mtd, command, NAND_CTRL_CLE);
+ nand_chip->legacy.cmd_ctrl(nand_chip, command, NAND_CTRL_CLE);
ndelay(100);
- nand_wait_ready(mtd);
+ nand_wait_ready(nand_chip);
break;
case NAND_CMD_READID:
ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
@@ -310,9 +308,9 @@ static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd,
b47n->curr_command = command;
}
-static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct mtd_info *mtd)
+static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct nand_chip *nand_chip)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
struct bcma_drv_cc *cc = b47n->cc;
u32 tmp = 0;
@@ -338,31 +336,31 @@ static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct mtd_info *mtd)
return 0;
}
-static void bcm47xxnflash_ops_bcm4706_read_buf(struct mtd_info *mtd,
+static void bcm47xxnflash_ops_bcm4706_read_buf(struct nand_chip *nand_chip,
uint8_t *buf, int len)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
switch (b47n->curr_command) {
case NAND_CMD_READ0:
case NAND_CMD_READOOB:
- bcm47xxnflash_ops_bcm4706_read(mtd, buf, len);
+ bcm47xxnflash_ops_bcm4706_read(nand_to_mtd(nand_chip), buf,
+ len);
return;
}
pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
}
-static void bcm47xxnflash_ops_bcm4706_write_buf(struct mtd_info *mtd,
+static void bcm47xxnflash_ops_bcm4706_write_buf(struct nand_chip *nand_chip,
const uint8_t *buf, int len)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
switch (b47n->curr_command) {
case NAND_CMD_SEQIN:
- bcm47xxnflash_ops_bcm4706_write(mtd, buf, len);
+ bcm47xxnflash_ops_bcm4706_write(nand_to_mtd(nand_chip), buf,
+ len);
return;
}
@@ -386,16 +384,16 @@ int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
u32 val;
b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
- nand_chip->cmd_ctrl = bcm47xxnflash_ops_bcm4706_cmd_ctrl;
- nand_chip->dev_ready = bcm47xxnflash_ops_bcm4706_dev_ready;
- b47n->nand_chip.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
- b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
- b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
- b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
- b47n->nand_chip.set_features = nand_get_set_features_notsupp;
- b47n->nand_chip.get_features = nand_get_set_features_notsupp;
-
- nand_chip->chip_delay = 50;
+ nand_chip->legacy.cmd_ctrl = bcm47xxnflash_ops_bcm4706_cmd_ctrl;
+ nand_chip->legacy.dev_ready = bcm47xxnflash_ops_bcm4706_dev_ready;
+ b47n->nand_chip.legacy.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
+ b47n->nand_chip.legacy.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
+ b47n->nand_chip.legacy.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
+ b47n->nand_chip.legacy.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
+ b47n->nand_chip.legacy.set_features = nand_get_set_features_notsupp;
+ b47n->nand_chip.legacy.get_features = nand_get_set_features_notsupp;
+
+ nand_chip->legacy.chip_delay = 50;
b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */
@@ -423,7 +421,7 @@ int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
(w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
/* Scan NAND */
- err = nand_scan(nand_to_mtd(&b47n->nand_chip), 1);
+ err = nand_scan(&b47n->nand_chip, 1);
if (err) {
pr_err("Could not scan NAND flash: %d\n", err);
goto exit;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 4b90d5b380c2..482c6f093f99 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -1231,15 +1231,14 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
* NAND MTD API: read/program/erase
***********************************************************************/
-static void brcmnand_cmd_ctrl(struct mtd_info *mtd, int dat,
- unsigned int ctrl)
+static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
+ unsigned int ctrl)
{
/* intentionally left blank */
}
-static int brcmnand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
+static int brcmnand_waitfunc(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
unsigned long timeo = msecs_to_jiffies(100);
@@ -1274,7 +1273,6 @@ static int brcmnand_low_level_op(struct brcmnand_host *host,
enum brcmnand_llop_type type, u32 data,
bool last_op)
{
- struct mtd_info *mtd = nand_to_mtd(&host->chip);
struct nand_chip *chip = &host->chip;
struct brcmnand_controller *ctrl = host->ctrl;
u32 tmp;
@@ -1307,13 +1305,13 @@ static int brcmnand_low_level_op(struct brcmnand_host *host,
(void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
- return brcmnand_waitfunc(mtd, chip);
+ return brcmnand_waitfunc(chip);
}
-static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
+static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
int column, int page_addr)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
u64 addr = (u64)page_addr << chip->page_shift;
@@ -1383,7 +1381,7 @@ static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
brcmnand_send_cmd(host, native_cmd);
- brcmnand_waitfunc(mtd, chip);
+ brcmnand_waitfunc(chip);
if (native_cmd == CMD_PARAMETER_READ ||
native_cmd == CMD_PARAMETER_CHANGE_COL) {
@@ -1417,9 +1415,8 @@ static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
brcmnand_wp(mtd, 1);
}
-static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
+static uint8_t brcmnand_read_byte(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
uint8_t ret = 0;
@@ -1474,19 +1471,18 @@ static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
return ret;
}
-static void brcmnand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
int i;
for (i = 0; i < len; i++, buf++)
- *buf = brcmnand_read_byte(mtd);
+ *buf = brcmnand_read_byte(chip);
}
-static void brcmnand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
- int len)
+static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
+ int len)
{
int i;
- struct nand_chip *chip = mtd_to_nand(mtd);
struct brcmnand_host *host = nand_get_controller_data(chip);
switch (host->last_cmd) {
@@ -1617,7 +1613,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
/* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
brcmnand_send_cmd(host, CMD_PAGE_READ);
- brcmnand_waitfunc(mtd, chip);
+ brcmnand_waitfunc(chip);
if (likely(buf)) {
brcmnand_soc_data_bus_prepare(ctrl->soc, false);
@@ -1689,7 +1685,7 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
sas = mtd->oobsize / chip->ecc.steps;
/* read without ecc for verification */
- ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
+ ret = chip->ecc.read_page_raw(chip, buf, true, page);
if (ret)
return ret;
@@ -1786,9 +1782,10 @@ try_dmaread:
return 0;
}
-static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
@@ -1798,10 +1795,11 @@ static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
}
-static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
int ret;
@@ -1814,17 +1812,18 @@ static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
return ret;
}
-static int brcmnand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int brcmnand_read_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
mtd->writesize >> FC_SHIFT,
NULL, (u8 *)chip->oob_poi);
}
-static int brcmnand_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int brcmnand_read_oob_raw(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
brcmnand_set_ecc_enabled(host, 0);
@@ -1892,7 +1891,7 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
/* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
- status = brcmnand_waitfunc(mtd, chip);
+ status = brcmnand_waitfunc(chip);
if (status & NAND_STATUS_FAIL) {
dev_info(ctrl->dev, "program failed at %llx\n",
@@ -1906,9 +1905,10 @@ out:
return ret;
}
-static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
@@ -1918,10 +1918,10 @@ static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
return nand_prog_page_end_op(chip);
}
-static int brcmnand_write_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf,
+static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
@@ -1933,16 +1933,16 @@ static int brcmnand_write_page_raw(struct mtd_info *mtd,
return nand_prog_page_end_op(chip);
}
-static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int brcmnand_write_oob(struct nand_chip *chip, int page)
{
- return brcmnand_write(mtd, chip, (u64)page << chip->page_shift,
- NULL, chip->oob_poi);
+ return brcmnand_write(nand_to_mtd(chip), chip,
+ (u64)page << chip->page_shift, NULL,
+ chip->oob_poi);
}
-static int brcmnand_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
int ret;
@@ -2270,15 +2270,12 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
- chip->IO_ADDR_R = (void __iomem *)0xdeadbeef;
- chip->IO_ADDR_W = (void __iomem *)0xdeadbeef;
-
- chip->cmd_ctrl = brcmnand_cmd_ctrl;
- chip->cmdfunc = brcmnand_cmdfunc;
- chip->waitfunc = brcmnand_waitfunc;
- chip->read_byte = brcmnand_read_byte;
- chip->read_buf = brcmnand_read_buf;
- chip->write_buf = brcmnand_write_buf;
+ chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl;
+ chip->legacy.cmdfunc = brcmnand_cmdfunc;
+ chip->legacy.waitfunc = brcmnand_waitfunc;
+ chip->legacy.read_byte = brcmnand_read_byte;
+ chip->legacy.read_buf = brcmnand_read_buf;
+ chip->legacy.write_buf = brcmnand_write_buf;
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.read_page = brcmnand_read_page;
@@ -2301,7 +2298,7 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
nand_writereg(ctrl, cfg_offs,
nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
return ret;
@@ -2616,7 +2613,7 @@ int brcmnand_remove(struct platform_device *pdev)
struct brcmnand_host *host;
list_for_each_entry(host, &ctrl->host_list, node)
- nand_release(nand_to_mtd(&host->chip));
+ nand_release(&host->chip);
clk_disable_unprepare(ctrl->clk);
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index 1dbe43adcfe7..c1a745940d12 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -100,9 +100,8 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
#define cafe_readl(cafe, addr) readl((cafe)->mmio + CAFE_##addr)
#define cafe_writel(cafe, datum, addr) writel(datum, (cafe)->mmio + CAFE_##addr)
-static int cafe_device_ready(struct mtd_info *mtd)
+static int cafe_device_ready(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
@@ -117,9 +116,8 @@ static int cafe_device_ready(struct mtd_info *mtd)
}
-static void cafe_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void cafe_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
if (cafe->usedma)
@@ -133,9 +131,8 @@ static void cafe_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
len, cafe->datalen);
}
-static void cafe_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void cafe_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
if (cafe->usedma)
@@ -148,22 +145,21 @@ static void cafe_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
cafe->datalen += len;
}
-static uint8_t cafe_read_byte(struct mtd_info *mtd)
+static uint8_t cafe_read_byte(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
uint8_t d;
- cafe_read_buf(mtd, &d, 1);
+ cafe_read_buf(chip, &d, 1);
cafe_dev_dbg(&cafe->pdev->dev, "Read %02x\n", d);
return d;
}
-static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+static void cafe_nand_cmdfunc(struct nand_chip *chip, unsigned command,
int column, int page_addr)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct cafe_priv *cafe = nand_get_controller_data(chip);
int adrbytes = 0;
uint32_t ctl1;
@@ -313,13 +309,12 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
return;
}
- nand_wait_ready(mtd);
+ nand_wait_ready(chip);
cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
}
-static void cafe_select_chip(struct mtd_info *mtd, int chipnr)
+static void cafe_select_chip(struct nand_chip *chip, int chipnr)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
@@ -346,17 +341,19 @@ static irqreturn_t cafe_nand_interrupt(int irq, void *id)
return IRQ_HANDLED;
}
-static int cafe_nand_write_oob(struct mtd_info *mtd,
- struct nand_chip *chip, int page)
+static int cafe_nand_write_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
mtd->oobsize);
}
/* Don't use -- use nand_read_oob_std for now */
-static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int cafe_nand_read_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/**
@@ -369,9 +366,10 @@ static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
* The hw generator calculates the error syndrome automatically. Therefore
* we need a special oob layout and handling.
*/
-static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int cafe_nand_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct cafe_priv *cafe = nand_get_controller_data(chip);
unsigned int max_bitflips = 0;
@@ -380,7 +378,7 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
cafe_readl(cafe, NAND_ECC_SYN01));
nand_read_page_op(chip, page, 0, buf, mtd->writesize);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
unsigned short syn[8], pat[4];
@@ -531,15 +529,15 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
};
-static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
+static int cafe_nand_write_page_lowlevel(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct cafe_priv *cafe = nand_get_controller_data(chip);
nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
/* Set up ECC autogeneration */
cafe->ctl2 |= (1<<30);
@@ -547,7 +545,7 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
return nand_prog_page_end_op(chip);
}
-static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
+static int cafe_nand_block_bad(struct nand_chip *chip, loff_t ofs)
{
return 0;
}
@@ -705,23 +703,23 @@ static int cafe_nand_probe(struct pci_dev *pdev,
goto out_ior;
}
- cafe->nand.cmdfunc = cafe_nand_cmdfunc;
- cafe->nand.dev_ready = cafe_device_ready;
- cafe->nand.read_byte = cafe_read_byte;
- cafe->nand.read_buf = cafe_read_buf;
- cafe->nand.write_buf = cafe_write_buf;
+ cafe->nand.legacy.cmdfunc = cafe_nand_cmdfunc;
+ cafe->nand.legacy.dev_ready = cafe_device_ready;
+ cafe->nand.legacy.read_byte = cafe_read_byte;
+ cafe->nand.legacy.read_buf = cafe_read_buf;
+ cafe->nand.legacy.write_buf = cafe_write_buf;
cafe->nand.select_chip = cafe_select_chip;
- cafe->nand.set_features = nand_get_set_features_notsupp;
- cafe->nand.get_features = nand_get_set_features_notsupp;
+ cafe->nand.legacy.set_features = nand_get_set_features_notsupp;
+ cafe->nand.legacy.get_features = nand_get_set_features_notsupp;
- cafe->nand.chip_delay = 0;
+ cafe->nand.legacy.chip_delay = 0;
/* Enable the following for a flash based bad block table */
cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
if (skipbbt) {
cafe->nand.options |= NAND_SKIP_BBTSCAN;
- cafe->nand.block_bad = cafe_nand_block_bad;
+ cafe->nand.legacy.block_bad = cafe_nand_block_bad;
}
if (numtimings && numtimings != 3) {
@@ -783,7 +781,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
/* Scan to find existence of the device */
cafe->nand.dummy_controller.ops = &cafe_nand_controller_ops;
- err = nand_scan(mtd, 2);
+ err = nand_scan(&cafe->nand, 2);
if (err)
goto out_irq;
@@ -819,7 +817,7 @@ static void cafe_nand_remove(struct pci_dev *pdev)
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
free_irq(pdev->irq, mtd);
- nand_release(mtd);
+ nand_release(chip);
free_rs(cafe->rs);
pci_iounmap(pdev, cafe->mmio);
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
diff --git a/drivers/mtd/nand/raw/cmx270_nand.c b/drivers/mtd/nand/raw/cmx270_nand.c
index b66e254b6802..143e4acacaae 100644
--- a/drivers/mtd/nand/raw/cmx270_nand.c
+++ b/drivers/mtd/nand/raw/cmx270_nand.c
@@ -49,29 +49,26 @@ static const struct mtd_partition partition_info[] = {
};
#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
-static u_char cmx270_read_byte(struct mtd_info *mtd)
+static u_char cmx270_read_byte(struct nand_chip *this)
{
- struct nand_chip *this = mtd_to_nand(mtd);
-
- return (readl(this->IO_ADDR_R) >> 16);
+ return (readl(this->legacy.IO_ADDR_R) >> 16);
}
-static void cmx270_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+static void cmx270_write_buf(struct nand_chip *this, const u_char *buf,
+ int len)
{
int i;
- struct nand_chip *this = mtd_to_nand(mtd);
for (i=0; i<len; i++)
- writel((*buf++ << 16), this->IO_ADDR_W);
+ writel((*buf++ << 16), this->legacy.IO_ADDR_W);
}
-static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void cmx270_read_buf(struct nand_chip *this, u_char *buf, int len)
{
int i;
- struct nand_chip *this = mtd_to_nand(mtd);
for (i=0; i<len; i++)
- *buf++ = readl(this->IO_ADDR_R) >> 16;
+ *buf++ = readl(this->legacy.IO_ADDR_R) >> 16;
}
static inline void nand_cs_on(void)
@@ -89,11 +86,10 @@ static void nand_cs_off(void)
/*
* hardware specific access to control-lines
*/
-static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
+static void cmx270_hwcontrol(struct nand_chip *this, int dat,
unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- unsigned int nandaddr = (unsigned int)this->IO_ADDR_W;
+ unsigned int nandaddr = (unsigned int)this->legacy.IO_ADDR_W;
dsb();
@@ -113,9 +109,9 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
}
dsb();
- this->IO_ADDR_W = (void __iomem*)nandaddr;
+ this->legacy.IO_ADDR_W = (void __iomem*)nandaddr;
if (dat != NAND_CMD_NONE)
- writel((dat << 16), this->IO_ADDR_W);
+ writel((dat << 16), this->legacy.IO_ADDR_W);
dsb();
}
@@ -123,7 +119,7 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
/*
* read device ready pin
*/
-static int cmx270_device_ready(struct mtd_info *mtd)
+static int cmx270_device_ready(struct nand_chip *this)
{
dsb();
@@ -177,23 +173,23 @@ static int __init cmx270_init(void)
cmx270_nand_mtd->owner = THIS_MODULE;
/* insert callbacks */
- this->IO_ADDR_R = cmx270_nand_io;
- this->IO_ADDR_W = cmx270_nand_io;
- this->cmd_ctrl = cmx270_hwcontrol;
- this->dev_ready = cmx270_device_ready;
+ this->legacy.IO_ADDR_R = cmx270_nand_io;
+ this->legacy.IO_ADDR_W = cmx270_nand_io;
+ this->legacy.cmd_ctrl = cmx270_hwcontrol;
+ this->legacy.dev_ready = cmx270_device_ready;
/* 15 us command delay time */
- this->chip_delay = 20;
+ this->legacy.chip_delay = 20;
this->ecc.mode = NAND_ECC_SOFT;
this->ecc.algo = NAND_ECC_HAMMING;
/* read/write functions */
- this->read_byte = cmx270_read_byte;
- this->read_buf = cmx270_read_buf;
- this->write_buf = cmx270_write_buf;
+ this->legacy.read_byte = cmx270_read_byte;
+ this->legacy.read_buf = cmx270_read_buf;
+ this->legacy.write_buf = cmx270_write_buf;
/* Scan to find existence of the device */
- ret = nand_scan(cmx270_nand_mtd, 1);
+ ret = nand_scan(this, 1);
if (ret) {
pr_notice("No NAND device\n");
goto err_scan;
@@ -228,7 +224,7 @@ module_init(cmx270_init);
static void __exit cmx270_cleanup(void)
{
/* Release resources, unregister device */
- nand_release(cmx270_nand_mtd);
+ nand_release(mtd_to_nand(cmx270_nand_mtd));
gpio_free(GPIO_NAND_RB);
gpio_free(GPIO_NAND_CS);
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index beafad62e7d5..c6f578aff5d9 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -93,83 +93,74 @@
#define CS_NAND_ECC_CLRECC (1<<1)
#define CS_NAND_ECC_ENECC (1<<0)
-static void cs553x_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void cs553x_read_buf(struct nand_chip *this, u_char *buf, int len)
{
- struct nand_chip *this = mtd_to_nand(mtd);
-
while (unlikely(len > 0x800)) {
- memcpy_fromio(buf, this->IO_ADDR_R, 0x800);
+ memcpy_fromio(buf, this->legacy.IO_ADDR_R, 0x800);
buf += 0x800;
len -= 0x800;
}
- memcpy_fromio(buf, this->IO_ADDR_R, len);
+ memcpy_fromio(buf, this->legacy.IO_ADDR_R, len);
}
-static void cs553x_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+static void cs553x_write_buf(struct nand_chip *this, const u_char *buf, int len)
{
- struct nand_chip *this = mtd_to_nand(mtd);
-
while (unlikely(len > 0x800)) {
- memcpy_toio(this->IO_ADDR_R, buf, 0x800);
+ memcpy_toio(this->legacy.IO_ADDR_R, buf, 0x800);
buf += 0x800;
len -= 0x800;
}
- memcpy_toio(this->IO_ADDR_R, buf, len);
+ memcpy_toio(this->legacy.IO_ADDR_R, buf, len);
}
-static unsigned char cs553x_read_byte(struct mtd_info *mtd)
+static unsigned char cs553x_read_byte(struct nand_chip *this)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- return readb(this->IO_ADDR_R);
+ return readb(this->legacy.IO_ADDR_R);
}
-static void cs553x_write_byte(struct mtd_info *mtd, u_char byte)
+static void cs553x_write_byte(struct nand_chip *this, u_char byte)
{
- struct nand_chip *this = mtd_to_nand(mtd);
int i = 100000;
- while (i && readb(this->IO_ADDR_R + MM_NAND_STS) & CS_NAND_CTLR_BUSY) {
+ while (i && readb(this->legacy.IO_ADDR_R + MM_NAND_STS) & CS_NAND_CTLR_BUSY) {
udelay(1);
i--;
}
- writeb(byte, this->IO_ADDR_W + 0x801);
+ writeb(byte, this->legacy.IO_ADDR_W + 0x801);
}
-static void cs553x_hwcontrol(struct mtd_info *mtd, int cmd,
+static void cs553x_hwcontrol(struct nand_chip *this, int cmd,
unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- void __iomem *mmio_base = this->IO_ADDR_R;
+ void __iomem *mmio_base = this->legacy.IO_ADDR_R;
if (ctrl & NAND_CTRL_CHANGE) {
unsigned char ctl = (ctrl & ~NAND_CTRL_CHANGE ) ^ 0x01;
writeb(ctl, mmio_base + MM_NAND_CTL);
}
if (cmd != NAND_CMD_NONE)
- cs553x_write_byte(mtd, cmd);
+ cs553x_write_byte(this, cmd);
}
-static int cs553x_device_ready(struct mtd_info *mtd)
+static int cs553x_device_ready(struct nand_chip *this)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- void __iomem *mmio_base = this->IO_ADDR_R;
+ void __iomem *mmio_base = this->legacy.IO_ADDR_R;
unsigned char foo = readb(mmio_base + MM_NAND_STS);
return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY);
}
-static void cs_enable_hwecc(struct mtd_info *mtd, int mode)
+static void cs_enable_hwecc(struct nand_chip *this, int mode)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- void __iomem *mmio_base = this->IO_ADDR_R;
+ void __iomem *mmio_base = this->legacy.IO_ADDR_R;
writeb(0x07, mmio_base + MM_NAND_ECC_CTL);
}
-static int cs_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
+ u_char *ecc_code)
{
uint32_t ecc;
- struct nand_chip *this = mtd_to_nand(mtd);
- void __iomem *mmio_base = this->IO_ADDR_R;
+ void __iomem *mmio_base = this->legacy.IO_ADDR_R;
ecc = readl(mmio_base + MM_NAND_STS);
@@ -208,20 +199,20 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
new_mtd->owner = THIS_MODULE;
/* map physical address */
- this->IO_ADDR_R = this->IO_ADDR_W = ioremap(adr, 4096);
- if (!this->IO_ADDR_R) {
+ this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = ioremap(adr, 4096);
+ if (!this->legacy.IO_ADDR_R) {
pr_warn("ioremap cs553x NAND @0x%08lx failed\n", adr);
err = -EIO;
goto out_mtd;
}
- this->cmd_ctrl = cs553x_hwcontrol;
- this->dev_ready = cs553x_device_ready;
- this->read_byte = cs553x_read_byte;
- this->read_buf = cs553x_read_buf;
- this->write_buf = cs553x_write_buf;
+ this->legacy.cmd_ctrl = cs553x_hwcontrol;
+ this->legacy.dev_ready = cs553x_device_ready;
+ this->legacy.read_byte = cs553x_read_byte;
+ this->legacy.read_buf = cs553x_read_buf;
+ this->legacy.write_buf = cs553x_write_buf;
- this->chip_delay = 0;
+ this->legacy.chip_delay = 0;
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = 256;
@@ -241,7 +232,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
}
/* Scan to find existence of the device */
- err = nand_scan(new_mtd, 1);
+ err = nand_scan(this, 1);
if (err)
goto out_free;
@@ -251,7 +242,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
out_free:
kfree(new_mtd->name);
out_ior:
- iounmap(this->IO_ADDR_R);
+ iounmap(this->legacy.IO_ADDR_R);
out_mtd:
kfree(this);
out:
@@ -333,10 +324,10 @@ static void __exit cs553x_cleanup(void)
continue;
this = mtd_to_nand(mtd);
- mmio_base = this->IO_ADDR_R;
+ mmio_base = this->legacy.IO_ADDR_R;
/* Release resources, unregister device */
- nand_release(mtd);
+ nand_release(this);
kfree(mtd->name);
cs553x_mtd[i] = NULL;
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 40145e206a6b..80f228d23cd2 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -97,12 +97,11 @@ static inline void davinci_nand_writel(struct davinci_nand_info *info,
* Access to hardware control lines: ALE, CLE, secondary chipselect.
*/
-static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
+static void nand_davinci_hwcontrol(struct nand_chip *nand, int cmd,
unsigned int ctrl)
{
- struct davinci_nand_info *info = to_davinci_nand(mtd);
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand));
void __iomem *addr = info->current_cs;
- struct nand_chip *nand = mtd_to_nand(mtd);
/* Did the control lines change? */
if (ctrl & NAND_CTRL_CHANGE) {
@@ -111,16 +110,16 @@ static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
addr += info->mask_ale;
- nand->IO_ADDR_W = addr;
+ nand->legacy.IO_ADDR_W = addr;
}
if (cmd != NAND_CMD_NONE)
- iowrite8(cmd, nand->IO_ADDR_W);
+ iowrite8(cmd, nand->legacy.IO_ADDR_W);
}
-static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
+static void nand_davinci_select_chip(struct nand_chip *nand, int chip)
{
- struct davinci_nand_info *info = to_davinci_nand(mtd);
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand));
info->current_cs = info->vaddr;
@@ -128,8 +127,8 @@ static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
if (chip > 0)
info->current_cs += info->mask_chipsel;
- info->chip.IO_ADDR_W = info->current_cs;
- info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
+ info->chip.legacy.IO_ADDR_W = info->current_cs;
+ info->chip.legacy.IO_ADDR_R = info->chip.legacy.IO_ADDR_W;
}
/*----------------------------------------------------------------------*/
@@ -146,16 +145,16 @@ static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
+ 4 * info->core_chipsel);
}
-static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
+static void nand_davinci_hwctl_1bit(struct nand_chip *chip, int mode)
{
struct davinci_nand_info *info;
uint32_t nandcfr;
unsigned long flags;
- info = to_davinci_nand(mtd);
+ info = to_davinci_nand(nand_to_mtd(chip));
/* Reset ECC hardware */
- nand_davinci_readecc_1bit(mtd);
+ nand_davinci_readecc_1bit(nand_to_mtd(chip));
spin_lock_irqsave(&davinci_nand_lock, flags);
@@ -170,10 +169,10 @@ static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
/*
* Read hardware ECC value and pack into three bytes
*/
-static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
- const u_char *dat, u_char *ecc_code)
+static int nand_davinci_calculate_1bit(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
{
- unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
+ unsigned int ecc_val = nand_davinci_readecc_1bit(nand_to_mtd(chip));
unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
/* invert so that erased block ecc is correct */
@@ -185,10 +184,9 @@ static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
return 0;
}
-static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
+static int nand_davinci_correct_1bit(struct nand_chip *chip, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
(read_ecc[2] << 16);
uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
@@ -231,9 +229,9 @@ static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
* OOB without recomputing ECC.
*/
-static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+static void nand_davinci_hwctl_4bit(struct nand_chip *chip, int mode)
{
- struct davinci_nand_info *info = to_davinci_nand(mtd);
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
unsigned long flags;
u32 val;
@@ -266,10 +264,10 @@ nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
}
/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
-static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
- const u_char *dat, u_char *ecc_code)
+static int nand_davinci_calculate_4bit(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
{
- struct davinci_nand_info *info = to_davinci_nand(mtd);
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
u32 raw_ecc[4], *p;
unsigned i;
@@ -303,11 +301,11 @@ static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
/* Correct up to 4 bits in data we just read, using state left in the
* hardware plus the ecc_code computed when it was first written.
*/
-static int nand_davinci_correct_4bit(struct mtd_info *mtd,
- u_char *data, u_char *ecc_code, u_char *null)
+static int nand_davinci_correct_4bit(struct nand_chip *chip, u_char *data,
+ u_char *ecc_code, u_char *null)
{
int i;
- struct davinci_nand_info *info = to_davinci_nand(mtd);
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
unsigned short ecc10[8];
unsigned short *ecc16;
u32 syndrome[4];
@@ -436,38 +434,35 @@ correct:
* the two LSBs for NAND access ... so we can issue 32-bit reads/writes
* and have that transparently morphed into multiple NAND operations.
*/
-static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void nand_davinci_read_buf(struct nand_chip *chip, uint8_t *buf,
+ int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
- ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
+ ioread32_rep(chip->legacy.IO_ADDR_R, buf, len >> 2);
else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
- ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
+ ioread16_rep(chip->legacy.IO_ADDR_R, buf, len >> 1);
else
- ioread8_rep(chip->IO_ADDR_R, buf, len);
+ ioread8_rep(chip->legacy.IO_ADDR_R, buf, len);
}
-static void nand_davinci_write_buf(struct mtd_info *mtd,
- const uint8_t *buf, int len)
+static void nand_davinci_write_buf(struct nand_chip *chip, const uint8_t *buf,
+ int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
- iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
+ iowrite32_rep(chip->legacy.IO_ADDR_R, buf, len >> 2);
else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
- iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
+ iowrite16_rep(chip->legacy.IO_ADDR_R, buf, len >> 1);
else
- iowrite8_rep(chip->IO_ADDR_R, buf, len);
+ iowrite8_rep(chip->legacy.IO_ADDR_R, buf, len);
}
/*
* Check hardware register for wait status. Returns 1 if device is ready,
* 0 if it is still busy.
*/
-static int nand_davinci_dev_ready(struct mtd_info *mtd)
+static int nand_davinci_dev_ready(struct nand_chip *chip)
{
- struct davinci_nand_info *info = to_davinci_nand(mtd);
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
}
@@ -764,9 +759,9 @@ static int nand_davinci_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
nand_set_flash_node(&info->chip, pdev->dev.of_node);
- info->chip.IO_ADDR_R = vaddr;
- info->chip.IO_ADDR_W = vaddr;
- info->chip.chip_delay = 0;
+ info->chip.legacy.IO_ADDR_R = vaddr;
+ info->chip.legacy.IO_ADDR_W = vaddr;
+ info->chip.legacy.chip_delay = 0;
info->chip.select_chip = nand_davinci_select_chip;
/* options such as NAND_BBT_USE_FLASH */
@@ -786,12 +781,12 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->mask_cle = pdata->mask_cle ? : MASK_CLE;
/* Set address of hardware control function */
- info->chip.cmd_ctrl = nand_davinci_hwcontrol;
- info->chip.dev_ready = nand_davinci_dev_ready;
+ info->chip.legacy.cmd_ctrl = nand_davinci_hwcontrol;
+ info->chip.legacy.dev_ready = nand_davinci_dev_ready;
/* Speed up buffer I/O */
- info->chip.read_buf = nand_davinci_read_buf;
- info->chip.write_buf = nand_davinci_write_buf;
+ info->chip.legacy.read_buf = nand_davinci_read_buf;
+ info->chip.legacy.write_buf = nand_davinci_write_buf;
/* Use board-specific ECC config */
info->chip.ecc.mode = pdata->ecc_mode;
@@ -807,7 +802,7 @@ static int nand_davinci_probe(struct platform_device *pdev)
/* Scan to find existence of the device(s) */
info->chip.dummy_controller.ops = &davinci_nand_controller_ops;
- ret = nand_scan(mtd, pdata->mask_chipsel ? 2 : 1);
+ ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
return ret;
@@ -841,7 +836,7 @@ static int nand_davinci_remove(struct platform_device *pdev)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
- nand_release(nand_to_mtd(&info->chip));
+ nand_release(&info->chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 67b2065e7a19..830ea247277b 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* NAND Flash Controller Device Driver
* Copyright © 2009-2010, Intel Corporation and its suppliers.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
+ * Copyright (c) 2017 Socionext Inc.
+ * Reworked by Masahiro Yamada <yamada.masahiro@socionext.com>
*/
#include <linux/bitfield.h>
@@ -25,9 +20,8 @@
#include "denali.h"
-MODULE_LICENSE("GPL");
-
#define DENALI_NAND_NAME "denali-nand"
+#define DENALI_DEFAULT_OOB_SKIP_BYTES 8
/* for Indexed Addressing */
#define DENALI_INDEXED_CTRL 0x00
@@ -222,8 +216,9 @@ static uint32_t denali_check_irq(struct denali_nand_info *denali)
return irq_status;
}
-static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void denali_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct denali_nand_info *denali = mtd_to_denali(mtd);
u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
@@ -232,9 +227,10 @@ static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
buf[i] = denali->host_read(denali, addr);
}
-static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void denali_write_buf(struct nand_chip *chip, const uint8_t *buf,
+ int len)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
@@ -242,9 +238,9 @@ static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
denali->host_write(denali, addr, buf[i]);
}
-static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+static void denali_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
uint16_t *buf16 = (uint16_t *)buf;
int i;
@@ -253,10 +249,10 @@ static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
buf16[i] = denali->host_read(denali, addr);
}
-static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
+static void denali_write_buf16(struct nand_chip *chip, const uint8_t *buf,
int len)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
const uint16_t *buf16 = (const uint16_t *)buf;
int i;
@@ -265,32 +261,23 @@ static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
denali->host_write(denali, addr, buf16[i]);
}
-static uint8_t denali_read_byte(struct mtd_info *mtd)
+static uint8_t denali_read_byte(struct nand_chip *chip)
{
uint8_t byte;
- denali_read_buf(mtd, &byte, 1);
+ denali_read_buf(chip, &byte, 1);
return byte;
}
-static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
+static void denali_write_byte(struct nand_chip *chip, uint8_t byte)
{
- denali_write_buf(mtd, &byte, 1);
+ denali_write_buf(chip, &byte, 1);
}
-static uint16_t denali_read_word(struct mtd_info *mtd)
+static void denali_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl)
{
- uint16_t word;
-
- denali_read_buf16(mtd, (uint8_t *)&word, 2);
-
- return word;
-}
-
-static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
-{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
uint32_t type;
if (ctrl & NAND_CLE)
@@ -301,7 +288,8 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
return;
/*
- * Some commands are followed by chip->dev_ready or chip->waitfunc.
+ * Some commands are followed by chip->legacy.dev_ready or
+ * chip->legacy.waitfunc.
* irq_status must be cleared here to catch the R/B# interrupt later.
*/
if (ctrl & NAND_CTRL_CHANGE)
@@ -310,9 +298,9 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
denali->host_write(denali, DENALI_BANK(denali) | type, dat);
}
-static int denali_dev_ready(struct mtd_info *mtd)
+static int denali_dev_ready(struct nand_chip *chip)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
return !!(denali_check_irq(denali) & INTR__INT_ACT);
}
@@ -596,6 +584,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
}
iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
+ /*
+ * The ->setup_dma() hook kicks DMA by using the data/command
+ * interface, which belongs to a different AXI port from the
+ * register interface. Read back the register to avoid a race.
+ */
+ ioread32(denali->reg + DMA_ENABLE);
denali_reset_irq(denali);
denali->setup_dma(denali, dma_addr, page, write);
@@ -692,9 +686,10 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
false);
}
-static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int denali_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct denali_nand_info *denali = mtd_to_denali(mtd);
int writesize = mtd->writesize;
int oobsize = mtd->oobsize;
@@ -767,17 +762,18 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
-static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int denali_read_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
denali_oob_xfer(mtd, chip, page, 0);
return 0;
}
-static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int denali_write_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct denali_nand_info *denali = mtd_to_denali(mtd);
denali_reset_irq(denali);
@@ -787,9 +783,10 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
return nand_prog_page_end_op(chip);
}
-static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int denali_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct denali_nand_info *denali = mtd_to_denali(mtd);
unsigned long uncor_ecc_flags = 0;
int stat = 0;
@@ -808,7 +805,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return stat;
if (uncor_ecc_flags) {
- ret = denali_read_oob(mtd, chip, page);
+ ret = denali_read_oob(chip, page);
if (ret)
return ret;
@@ -819,9 +816,10 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return stat;
}
-static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+static int denali_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct denali_nand_info *denali = mtd_to_denali(mtd);
int writesize = mtd->writesize;
int oobsize = mtd->oobsize;
@@ -897,25 +895,26 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
}
-static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+static int denali_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct denali_nand_info *denali = mtd_to_denali(mtd);
return denali_data_xfer(denali, (void *)buf, mtd->writesize,
page, 0, 1);
}
-static void denali_select_chip(struct mtd_info *mtd, int chip)
+static void denali_select_chip(struct nand_chip *chip, int cs)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
- denali->active_bank = chip;
+ denali->active_bank = cs;
}
-static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+static int denali_waitfunc(struct nand_chip *chip)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
uint32_t irq_status;
/* R/B# pin transitioned from low to high? */
@@ -924,9 +923,9 @@ static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
}
-static int denali_erase(struct mtd_info *mtd, int page)
+static int denali_erase(struct nand_chip *chip, int page)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
uint32_t irq_status;
denali_reset_irq(denali);
@@ -941,10 +940,10 @@ static int denali_erase(struct mtd_info *mtd, int page)
return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
}
-static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
+static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
const struct nand_data_interface *conf)
{
- struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
const struct nand_sdr_timings *timings;
unsigned long t_x, mult_x;
int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
@@ -1099,12 +1098,17 @@ static void denali_hw_init(struct denali_nand_info *denali)
denali->revision = swab16(ioread32(denali->reg + REVISION));
/*
- * tell driver how many bit controller will skip before
- * writing ECC code in OOB, this register may be already
- * set by firmware. So we read this value out.
- * if this value is 0, just let it be.
+ * Set how many bytes should be skipped before writing data in OOB.
+ * If a non-zero value has already been set (by firmware or something),
+ * just use it. Otherwise, set the driver default.
*/
denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
+ if (!denali->oob_skip_bytes) {
+ denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
+ iowrite32(denali->oob_skip_bytes,
+ denali->reg + SPARE_AREA_SKIP_BYTES);
+ }
+
denali_detect_max_banks(denali);
iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
@@ -1271,11 +1275,11 @@ static int denali_attach_chip(struct nand_chip *chip)
mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
if (chip->options & NAND_BUSWIDTH_16) {
- chip->read_buf = denali_read_buf16;
- chip->write_buf = denali_write_buf16;
+ chip->legacy.read_buf = denali_read_buf16;
+ chip->legacy.write_buf = denali_write_buf16;
} else {
- chip->read_buf = denali_read_buf;
- chip->write_buf = denali_write_buf;
+ chip->legacy.read_buf = denali_read_buf;
+ chip->legacy.write_buf = denali_write_buf;
}
chip->ecc.read_page = denali_read_page;
chip->ecc.read_page_raw = denali_read_page_raw;
@@ -1283,7 +1287,7 @@ static int denali_attach_chip(struct nand_chip *chip)
chip->ecc.write_page_raw = denali_write_page_raw;
chip->ecc.read_oob = denali_read_oob;
chip->ecc.write_oob = denali_write_oob;
- chip->erase = denali_erase;
+ chip->legacy.erase = denali_erase;
ret = denali_multidev_fixup(denali);
if (ret)
@@ -1352,12 +1356,11 @@ int denali_init(struct denali_nand_info *denali)
mtd->name = "denali-nand";
chip->select_chip = denali_select_chip;
- chip->read_byte = denali_read_byte;
- chip->write_byte = denali_write_byte;
- chip->read_word = denali_read_word;
- chip->cmd_ctrl = denali_cmd_ctrl;
- chip->dev_ready = denali_dev_ready;
- chip->waitfunc = denali_waitfunc;
+ chip->legacy.read_byte = denali_read_byte;
+ chip->legacy.write_byte = denali_write_byte;
+ chip->legacy.cmd_ctrl = denali_cmd_ctrl;
+ chip->legacy.dev_ready = denali_dev_ready;
+ chip->legacy.waitfunc = denali_waitfunc;
if (features & FEATURES__INDEX_ADDR) {
denali->host_read = denali_indexed_read;
@@ -1372,7 +1375,7 @@ int denali_init(struct denali_nand_info *denali)
chip->setup_data_interface = denali_setup_data_interface;
chip->dummy_controller.ops = &denali_controller_ops;
- ret = nand_scan(mtd, denali->max_banks);
+ ret = nand_scan(chip, denali->max_banks);
if (ret)
goto disable_irq;
@@ -1395,9 +1398,11 @@ EXPORT_SYMBOL(denali_init);
void denali_remove(struct denali_nand_info *denali)
{
- struct mtd_info *mtd = nand_to_mtd(&denali->nand);
-
- nand_release(mtd);
+ nand_release(&denali->nand);
denali_disable_irq(denali);
}
EXPORT_SYMBOL(denali_remove);
+
+MODULE_DESCRIPTION("Driver core for Denali NAND controller");
+MODULE_AUTHOR("Intel Corporation and its suppliers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index 1f8feaf924eb..57a5498f58bb 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009 - 2010, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __DENALI_H__
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 0faaad032e5f..7c6a8a426606 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* NAND Flash Controller Device Driver for DT
*
* Copyright © 2011, Picochip.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/clk.h>
@@ -202,6 +194,6 @@ static struct platform_driver denali_dt_driver = {
};
module_platform_driver(denali_dt_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("DT driver for Denali NAND controller");
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index 7c8efc4c7bdf..48e9ac54ad53 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* NAND Flash Controller Device Driver
* Copyright © 2009-2010, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/errno.h>
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 3c46188dd6d2..3a4c373affab 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -83,9 +83,9 @@ static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
-static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
+static void doc200x_hwcontrol(struct nand_chip *this, int cmd,
unsigned int bitmask);
-static void doc200x_select_chip(struct mtd_info *mtd, int chip);
+static void doc200x_select_chip(struct nand_chip *this, int chip);
static int debug = 0;
module_param(debug, int, 0);
@@ -290,9 +290,8 @@ static inline int DoC_WaitReady(struct doc_priv *doc)
return ret;
}
-static void doc2000_write_byte(struct mtd_info *mtd, u_char datum)
+static void doc2000_write_byte(struct nand_chip *this, u_char datum)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
@@ -302,9 +301,8 @@ static void doc2000_write_byte(struct mtd_info *mtd, u_char datum)
WriteDOC(datum, docptr, 2k_CDSN_IO);
}
-static u_char doc2000_read_byte(struct mtd_info *mtd)
+static u_char doc2000_read_byte(struct nand_chip *this)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
u_char ret;
@@ -317,9 +315,9 @@ static u_char doc2000_read_byte(struct mtd_info *mtd)
return ret;
}
-static void doc2000_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+static void doc2000_writebuf(struct nand_chip *this, const u_char *buf,
+ int len)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
@@ -334,9 +332,8 @@ static void doc2000_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
printk("\n");
}
-static void doc2000_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+static void doc2000_readbuf(struct nand_chip *this, u_char *buf, int len)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
@@ -344,14 +341,12 @@ static void doc2000_readbuf(struct mtd_info *mtd, u_char *buf, int len)
if (debug)
printk("readbuf of %d bytes: ", len);
- for (i = 0; i < len; i++) {
+ for (i = 0; i < len; i++)
buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
- }
}
-static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len)
+static void doc2000_readbuf_dword(struct nand_chip *this, u_char *buf, int len)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
@@ -376,19 +371,19 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
struct doc_priv *doc = nand_get_controller_data(this);
uint16_t ret;
- doc200x_select_chip(mtd, nr);
- doc200x_hwcontrol(mtd, NAND_CMD_READID,
+ doc200x_select_chip(this, nr);
+ doc200x_hwcontrol(this, NAND_CMD_READID,
NAND_CTRL_CLE | NAND_CTRL_CHANGE);
- doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
- doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(this, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(this, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
/* We can't use dev_ready here, but at least we wait for the
* command to complete
*/
udelay(50);
- ret = this->read_byte(mtd) << 8;
- ret |= this->read_byte(mtd);
+ ret = this->legacy.read_byte(this) << 8;
+ ret |= this->legacy.read_byte(this);
if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
/* First chip probe. See if we get same results by 32-bit access */
@@ -398,10 +393,10 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
} ident;
void __iomem *docptr = doc->virtadr;
- doc200x_hwcontrol(mtd, NAND_CMD_READID,
+ doc200x_hwcontrol(this, NAND_CMD_READID,
NAND_CTRL_CLE | NAND_CTRL_CHANGE);
- doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
- doc200x_hwcontrol(mtd, NAND_CMD_NONE,
+ doc200x_hwcontrol(this, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(this, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
udelay(50);
@@ -409,7 +404,7 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
ident.dword = readl(docptr + DoC_2k_CDSN_IO);
if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
pr_info("DiskOnChip 2000 responds to DWORD access\n");
- this->read_buf = &doc2000_readbuf_dword;
+ this->legacy.read_buf = &doc2000_readbuf_dword;
}
}
@@ -438,7 +433,7 @@ static void __init doc2000_count_chips(struct mtd_info *mtd)
pr_debug("Detected %d chips per floor.\n", i);
}
-static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
+static int doc200x_wait(struct nand_chip *this)
{
struct doc_priv *doc = nand_get_controller_data(this);
@@ -447,14 +442,13 @@ static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
DoC_WaitReady(doc);
nand_status_op(this, NULL);
DoC_WaitReady(doc);
- status = (int)this->read_byte(mtd);
+ status = (int)this->legacy.read_byte(this);
return status;
}
-static void doc2001_write_byte(struct mtd_info *mtd, u_char datum)
+static void doc2001_write_byte(struct nand_chip *this, u_char datum)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
@@ -463,9 +457,8 @@ static void doc2001_write_byte(struct mtd_info *mtd, u_char datum)
WriteDOC(datum, docptr, WritePipeTerm);
}
-static u_char doc2001_read_byte(struct mtd_info *mtd)
+static u_char doc2001_read_byte(struct nand_chip *this)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
@@ -477,9 +470,8 @@ static u_char doc2001_read_byte(struct mtd_info *mtd)
return ReadDOC(docptr, LastDataRead);
}
-static void doc2001_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+static void doc2001_writebuf(struct nand_chip *this, const u_char *buf, int len)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
@@ -490,9 +482,8 @@ static void doc2001_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
WriteDOC(0x00, docptr, WritePipeTerm);
}
-static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+static void doc2001_readbuf(struct nand_chip *this, u_char *buf, int len)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
@@ -507,9 +498,8 @@ static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
buf[i] = ReadDOC(docptr, LastDataRead);
}
-static u_char doc2001plus_read_byte(struct mtd_info *mtd)
+static u_char doc2001plus_read_byte(struct nand_chip *this)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
u_char ret;
@@ -522,9 +512,8 @@ static u_char doc2001plus_read_byte(struct mtd_info *mtd)
return ret;
}
-static void doc2001plus_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+static void doc2001plus_writebuf(struct nand_chip *this, const u_char *buf, int len)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
@@ -540,9 +529,8 @@ static void doc2001plus_writebuf(struct mtd_info *mtd, const u_char *buf, int le
printk("\n");
}
-static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+static void doc2001plus_readbuf(struct nand_chip *this, u_char *buf, int len)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
@@ -571,9 +559,8 @@ static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
printk("\n");
}
-static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
+static void doc2001plus_select_chip(struct nand_chip *this, int chip)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int floor = 0;
@@ -598,9 +585,8 @@ static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
doc->curfloor = floor;
}
-static void doc200x_select_chip(struct mtd_info *mtd, int chip)
+static void doc200x_select_chip(struct nand_chip *this, int chip)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int floor = 0;
@@ -615,12 +601,12 @@ static void doc200x_select_chip(struct mtd_info *mtd, int chip)
chip -= (floor * doc->chips_per_floor);
/* 11.4.4 -- deassert CE before changing chip */
- doc200x_hwcontrol(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(this, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
WriteDOC(floor, docptr, FloorSelect);
WriteDOC(chip, docptr, CDSNDeviceSelect);
- doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+ doc200x_hwcontrol(this, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
doc->curchip = chip;
doc->curfloor = floor;
@@ -628,10 +614,9 @@ static void doc200x_select_chip(struct mtd_info *mtd, int chip)
#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE)
-static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
+static void doc200x_hwcontrol(struct nand_chip *this, int cmd,
unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
@@ -646,15 +631,16 @@ static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
}
if (cmd != NAND_CMD_NONE) {
if (DoC_is_2000(doc))
- doc2000_write_byte(mtd, cmd);
+ doc2000_write_byte(this, cmd);
else
- doc2001_write_byte(mtd, cmd);
+ doc2001_write_byte(this, cmd);
}
}
-static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+static void doc2001plus_command(struct nand_chip *this, unsigned command,
+ int column, int page_addr)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
@@ -729,13 +715,13 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
return;
case NAND_CMD_RESET:
- if (this->dev_ready)
+ if (this->legacy.dev_ready)
break;
- udelay(this->chip_delay);
+ udelay(this->legacy.chip_delay);
WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
WriteDOC(0, docptr, Mplus_WritePipeTerm);
WriteDOC(0, docptr, Mplus_WritePipeTerm);
- while (!(this->read_byte(mtd) & 0x40)) ;
+ while (!(this->legacy.read_byte(this) & 0x40)) ;
return;
/* This applies to read commands */
@@ -744,8 +730,8 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
* If we don't have access to the busy pin, we apply the given
* command delay
*/
- if (!this->dev_ready) {
- udelay(this->chip_delay);
+ if (!this->legacy.dev_ready) {
+ udelay(this->legacy.chip_delay);
return;
}
}
@@ -754,12 +740,11 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
* any case on any machine. */
ndelay(100);
/* wait until command is processed */
- while (!this->dev_ready(mtd)) ;
+ while (!this->legacy.dev_ready(this)) ;
}
-static int doc200x_dev_ready(struct mtd_info *mtd)
+static int doc200x_dev_ready(struct nand_chip *this)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
@@ -790,16 +775,15 @@ static int doc200x_dev_ready(struct mtd_info *mtd)
}
}
-static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs)
+static int doc200x_block_bad(struct nand_chip *this, loff_t ofs)
{
/* This is our last resort if we couldn't find or create a BBT. Just
pretend all blocks are good. */
return 0;
}
-static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode)
+static void doc200x_enable_hwecc(struct nand_chip *this, int mode)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
@@ -816,9 +800,8 @@ static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode)
}
}
-static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
+static void doc2001plus_enable_hwecc(struct nand_chip *this, int mode)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
@@ -836,9 +819,9 @@ static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
}
/* This code is only called on write */
-static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, unsigned char *ecc_code)
+static int doc200x_calculate_ecc(struct nand_chip *this, const u_char *dat,
+ unsigned char *ecc_code)
{
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
int i;
@@ -895,11 +878,10 @@ static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, unsign
return 0;
}
-static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
+static int doc200x_correct_data(struct nand_chip *this, u_char *dat,
u_char *read_ecc, u_char *isnull)
{
int i, ret = 0;
- struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
uint8_t calc_ecc[6];
@@ -1357,9 +1339,9 @@ static inline int __init doc2000_init(struct mtd_info *mtd)
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
- this->read_byte = doc2000_read_byte;
- this->write_buf = doc2000_writebuf;
- this->read_buf = doc2000_readbuf;
+ this->legacy.read_byte = doc2000_read_byte;
+ this->legacy.write_buf = doc2000_writebuf;
+ this->legacy.read_buf = doc2000_readbuf;
doc->late_init = nftl_scan_bbt;
doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
@@ -1373,9 +1355,9 @@ static inline int __init doc2001_init(struct mtd_info *mtd)
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
- this->read_byte = doc2001_read_byte;
- this->write_buf = doc2001_writebuf;
- this->read_buf = doc2001_readbuf;
+ this->legacy.read_byte = doc2001_read_byte;
+ this->legacy.write_buf = doc2001_writebuf;
+ this->legacy.read_buf = doc2001_readbuf;
ReadDOC(doc->virtadr, ChipID);
ReadDOC(doc->virtadr, ChipID);
@@ -1403,13 +1385,13 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
- this->read_byte = doc2001plus_read_byte;
- this->write_buf = doc2001plus_writebuf;
- this->read_buf = doc2001plus_readbuf;
+ this->legacy.read_byte = doc2001plus_read_byte;
+ this->legacy.write_buf = doc2001plus_writebuf;
+ this->legacy.read_buf = doc2001plus_readbuf;
doc->late_init = inftl_scan_bbt;
- this->cmd_ctrl = NULL;
+ this->legacy.cmd_ctrl = NULL;
this->select_chip = doc2001plus_select_chip;
- this->cmdfunc = doc2001plus_command;
+ this->legacy.cmdfunc = doc2001plus_command;
this->ecc.hwctl = doc2001plus_enable_hwecc;
doc->chips_per_floor = 1;
@@ -1587,10 +1569,10 @@ static int __init doc_probe(unsigned long physadr)
nand_set_controller_data(nand, doc);
nand->select_chip = doc200x_select_chip;
- nand->cmd_ctrl = doc200x_hwcontrol;
- nand->dev_ready = doc200x_dev_ready;
- nand->waitfunc = doc200x_wait;
- nand->block_bad = doc200x_block_bad;
+ nand->legacy.cmd_ctrl = doc200x_hwcontrol;
+ nand->legacy.dev_ready = doc200x_dev_ready;
+ nand->legacy.waitfunc = doc200x_wait;
+ nand->legacy.block_bad = doc200x_block_bad;
nand->ecc.hwctl = doc200x_enable_hwecc;
nand->ecc.calculate = doc200x_calculate_ecc;
nand->ecc.correct = doc200x_correct_data;
@@ -1620,14 +1602,14 @@ static int __init doc_probe(unsigned long physadr)
else
numchips = doc2001_init(mtd);
- if ((ret = nand_scan(mtd, numchips)) || (ret = doc->late_init(mtd))) {
+ if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) {
/* DBB note: i believe nand_release is necessary here, as
buffers may have been allocated in nand_base. Check with
Thomas. FIX ME! */
/* nand_release will call mtd_device_unregister, but we
haven't yet added it. This is handled without incident by
mtd_device_unregister, as far as I can tell. */
- nand_release(mtd);
+ nand_release(nand);
goto fail;
}
@@ -1662,7 +1644,7 @@ static void release_nanddoc(void)
doc = nand_get_controller_data(nand);
nextmtd = doc->nextdoc;
- nand_release(mtd);
+ nand_release(nand);
iounmap(doc->virtadr);
release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
free_rs(doc->rs_decoder);
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c
deleted file mode 100644
index 427fcbc1b71c..000000000000
--- a/drivers/mtd/nand/raw/docg4.c
+++ /dev/null
@@ -1,1442 +0,0 @@
-/*
- * Copyright © 2012 Mike Dunn <mikedunn@newsguy.com>
- *
- * mtd nand driver for M-Systems DiskOnChip G4
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Tested on the Palm Treo 680. The G4 is also present on Toshiba Portege, Asus
- * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others.
- * Should work on these as well. Let me know!
- *
- * TODO:
- *
- * Mechanism for management of password-protected areas
- *
- * Hamming ecc when reading oob only
- *
- * According to the M-Sys documentation, this device is also available in a
- * "dual-die" configuration having a 256MB capacity, but no mechanism for
- * detecting this variant is documented. Currently this driver assumes 128MB
- * capacity.
- *
- * Support for multiple cascaded devices ("floors"). Not sure which gadgets
- * contain multiple G4s in a cascaded configuration, if any.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/export.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/bch.h>
-#include <linux/bitrev.h>
-#include <linux/jiffies.h>
-
-/*
- * In "reliable mode" consecutive 2k pages are used in parallel (in some
- * fashion) to store the same data. The data can be read back from the
- * even-numbered pages in the normal manner; odd-numbered pages will appear to
- * contain junk. Systems that boot from the docg4 typically write the secondary
- * program loader (SPL) code in this mode. The SPL is loaded by the initial
- * program loader (IPL, stored in the docg4's 2k NOR-like region that is mapped
- * to the reset vector address). This module parameter enables you to use this
- * driver to write the SPL. When in this mode, no more than 2k of data can be
- * written at a time, because the addresses do not increment in the normal
- * manner, and the starting offset must be within an even-numbered 2k region;
- * i.e., invalid starting offsets are 0x800, 0xa00, 0xc00, 0xe00, 0x1800,
- * 0x1a00, ... Reliable mode is a special case and should not be used unless
- * you know what you're doing.
- */
-static bool reliable_mode;
-module_param(reliable_mode, bool, 0);
-MODULE_PARM_DESC(reliable_mode, "pages are programmed in reliable mode");
-
-/*
- * You'll want to ignore badblocks if you're reading a partition that contains
- * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
- * it does not use mtd nand's method for marking bad blocks (using oob area).
- * This will also skip the check of the "page written" flag.
- */
-static bool ignore_badblocks;
-module_param(ignore_badblocks, bool, 0);
-MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed");
-
-struct docg4_priv {
- struct mtd_info *mtd;
- struct device *dev;
- void __iomem *virtadr;
- int status;
- struct {
- unsigned int command;
- int column;
- int page;
- } last_command;
- uint8_t oob_buf[16];
- uint8_t ecc_buf[7];
- int oob_page;
- struct bch_control *bch;
-};
-
-/*
- * Defines prefixed with DOCG4 are unique to the diskonchip G4. All others are
- * shared with other diskonchip devices (P3, G3 at least).
- *
- * Functions with names prefixed with docg4_ are mtd / nand interface functions
- * (though they may also be called internally). All others are internal.
- */
-
-#define DOC_IOSPACE_DATA 0x0800
-
-/* register offsets */
-#define DOC_CHIPID 0x1000
-#define DOC_DEVICESELECT 0x100a
-#define DOC_ASICMODE 0x100c
-#define DOC_DATAEND 0x101e
-#define DOC_NOP 0x103e
-
-#define DOC_FLASHSEQUENCE 0x1032
-#define DOC_FLASHCOMMAND 0x1034
-#define DOC_FLASHADDRESS 0x1036
-#define DOC_FLASHCONTROL 0x1038
-#define DOC_ECCCONF0 0x1040
-#define DOC_ECCCONF1 0x1042
-#define DOC_HAMMINGPARITY 0x1046
-#define DOC_BCH_SYNDROM(idx) (0x1048 + idx)
-
-#define DOC_ASICMODECONFIRM 0x1072
-#define DOC_CHIPID_INV 0x1074
-#define DOC_POWERMODE 0x107c
-
-#define DOCG4_MYSTERY_REG 0x1050
-
-/* apparently used only to write oob bytes 6 and 7 */
-#define DOCG4_OOB_6_7 0x1052
-
-/* DOC_FLASHSEQUENCE register commands */
-#define DOC_SEQ_RESET 0x00
-#define DOCG4_SEQ_PAGE_READ 0x03
-#define DOCG4_SEQ_FLUSH 0x29
-#define DOCG4_SEQ_PAGEWRITE 0x16
-#define DOCG4_SEQ_PAGEPROG 0x1e
-#define DOCG4_SEQ_BLOCKERASE 0x24
-#define DOCG4_SEQ_SETMODE 0x45
-
-/* DOC_FLASHCOMMAND register commands */
-#define DOCG4_CMD_PAGE_READ 0x00
-#define DOC_CMD_ERASECYCLE2 0xd0
-#define DOCG4_CMD_FLUSH 0x70
-#define DOCG4_CMD_READ2 0x30
-#define DOC_CMD_PROG_BLOCK_ADDR 0x60
-#define DOCG4_CMD_PAGEWRITE 0x80
-#define DOC_CMD_PROG_CYCLE2 0x10
-#define DOCG4_CMD_FAST_MODE 0xa3 /* functionality guessed */
-#define DOC_CMD_RELIABLE_MODE 0x22
-#define DOC_CMD_RESET 0xff
-
-/* DOC_POWERMODE register bits */
-#define DOC_POWERDOWN_READY 0x80
-
-/* DOC_FLASHCONTROL register bits */
-#define DOC_CTRL_CE 0x10
-#define DOC_CTRL_UNKNOWN 0x40
-#define DOC_CTRL_FLASHREADY 0x01
-
-/* DOC_ECCCONF0 register bits */
-#define DOC_ECCCONF0_READ_MODE 0x8000
-#define DOC_ECCCONF0_UNKNOWN 0x2000
-#define DOC_ECCCONF0_ECC_ENABLE 0x1000
-#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
-
-/* DOC_ECCCONF1 register bits */
-#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
-#define DOC_ECCCONF1_ECC_ENABLE 0x07
-#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
-
-/* DOC_ASICMODE register bits */
-#define DOC_ASICMODE_RESET 0x00
-#define DOC_ASICMODE_NORMAL 0x01
-#define DOC_ASICMODE_POWERDOWN 0x02
-#define DOC_ASICMODE_MDWREN 0x04
-#define DOC_ASICMODE_BDETCT_RESET 0x08
-#define DOC_ASICMODE_RSTIN_RESET 0x10
-#define DOC_ASICMODE_RAM_WE 0x20
-
-/* good status values read after read/write/erase operations */
-#define DOCG4_PROGSTATUS_GOOD 0x51
-#define DOCG4_PROGSTATUS_GOOD_2 0xe0
-
-/*
- * On read operations (page and oob-only), the first byte read from I/O reg is a
- * status. On error, it reads 0x73; otherwise, it reads either 0x71 (first read
- * after reset only) or 0x51, so bit 1 is presumed to be an error indicator.
- */
-#define DOCG4_READ_ERROR 0x02 /* bit 1 indicates read error */
-
-/* anatomy of the device */
-#define DOCG4_CHIP_SIZE 0x8000000
-#define DOCG4_PAGE_SIZE 0x200
-#define DOCG4_PAGES_PER_BLOCK 0x200
-#define DOCG4_BLOCK_SIZE (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE)
-#define DOCG4_NUMBLOCKS (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE)
-#define DOCG4_OOB_SIZE 0x10
-#define DOCG4_CHIP_SHIFT 27 /* log_2(DOCG4_CHIP_SIZE) */
-#define DOCG4_PAGE_SHIFT 9 /* log_2(DOCG4_PAGE_SIZE) */
-#define DOCG4_ERASE_SHIFT 18 /* log_2(DOCG4_BLOCK_SIZE) */
-
-/* all but the last byte is included in ecc calculation */
-#define DOCG4_BCH_SIZE (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1)
-
-#define DOCG4_USERDATA_LEN 520 /* 512 byte page plus 8 oob avail to user */
-
-/* expected values from the ID registers */
-#define DOCG4_IDREG1_VALUE 0x0400
-#define DOCG4_IDREG2_VALUE 0xfbff
-
-/* primitive polynomial used to build the Galois field used by hw ecc gen */
-#define DOCG4_PRIMITIVE_POLY 0x4443
-
-#define DOCG4_M 14 /* Galois field is of order 2^14 */
-#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
-
-#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
-#define DOCG4_REDUNDANT_BBT_PAGE 24 /* page where redundant factory bbt lives */
-
-/*
- * Bytes 0, 1 are used as badblock marker.
- * Bytes 2 - 6 are available to the user.
- * Byte 7 is hamming ecc for first 7 oob bytes only.
- * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14.
- * Byte 15 (the last) is used by the driver as a "page written" flag.
- */
-static int docg4_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- if (section)
- return -ERANGE;
-
- oobregion->offset = 7;
- oobregion->length = 9;
-
- return 0;
-}
-
-static int docg4_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- if (section)
- return -ERANGE;
-
- oobregion->offset = 2;
- oobregion->length = 5;
-
- return 0;
-}
-
-static const struct mtd_ooblayout_ops docg4_ooblayout_ops = {
- .ecc = docg4_ooblayout_ecc,
- .free = docg4_ooblayout_free,
-};
-
-/*
- * The device has a nop register which M-Sys claims is for the purpose of
- * inserting precise delays. But beware; at least some operations fail if the
- * nop writes are replaced with a generic delay!
- */
-static inline void write_nop(void __iomem *docptr)
-{
- writew(0, docptr + DOC_NOP);
-}
-
-static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
- int i;
- struct nand_chip *nand = mtd_to_nand(mtd);
- uint16_t *p = (uint16_t *) buf;
- len >>= 1;
-
- for (i = 0; i < len; i++)
- p[i] = readw(nand->IO_ADDR_R);
-}
-
-static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
- int i;
- struct nand_chip *nand = mtd_to_nand(mtd);
- uint16_t *p = (uint16_t *) buf;
- len >>= 1;
-
- for (i = 0; i < len; i++)
- writew(p[i], nand->IO_ADDR_W);
-}
-
-static int poll_status(struct docg4_priv *doc)
-{
- /*
- * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL
- * register. Operations known to take a long time (e.g., block erase)
- * should sleep for a while before calling this.
- */
-
- uint16_t flash_status;
- unsigned long timeo;
- void __iomem *docptr = doc->virtadr;
-
- dev_dbg(doc->dev, "%s...\n", __func__);
-
- /* hardware quirk requires reading twice initially */
- flash_status = readw(docptr + DOC_FLASHCONTROL);
-
- timeo = jiffies + msecs_to_jiffies(200); /* generous timeout */
- do {
- cpu_relax();
- flash_status = readb(docptr + DOC_FLASHCONTROL);
- } while (!(flash_status & DOC_CTRL_FLASHREADY) &&
- time_before(jiffies, timeo));
-
- if (unlikely(!(flash_status & DOC_CTRL_FLASHREADY))) {
- dev_err(doc->dev, "%s: timed out!\n", __func__);
- return NAND_STATUS_FAIL;
- }
-
- return 0;
-}
-
-
-static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand)
-{
-
- struct docg4_priv *doc = nand_get_controller_data(nand);
- int status = NAND_STATUS_WP; /* inverse logic?? */
- dev_dbg(doc->dev, "%s...\n", __func__);
-
- /* report any previously unreported error */
- if (doc->status) {
- status |= doc->status;
- doc->status = 0;
- return status;
- }
-
- status |= poll_status(doc);
- return status;
-}
-
-static void docg4_select_chip(struct mtd_info *mtd, int chip)
-{
- /*
- * Select among multiple cascaded chips ("floors"). Multiple floors are
- * not yet supported, so the only valid non-negative value is 0.
- */
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
- void __iomem *docptr = doc->virtadr;
-
- dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip);
-
- if (chip < 0)
- return; /* deselected */
-
- if (chip > 0)
- dev_warn(doc->dev, "multiple floors currently unsupported\n");
-
- writew(0, docptr + DOC_DEVICESELECT);
-}
-
-static void reset(struct mtd_info *mtd)
-{
- /* full device reset */
-
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
- void __iomem *docptr = doc->virtadr;
-
- writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN,
- docptr + DOC_ASICMODE);
- writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN),
- docptr + DOC_ASICMODECONFIRM);
- write_nop(docptr);
-
- writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN,
- docptr + DOC_ASICMODE);
- writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN),
- docptr + DOC_ASICMODECONFIRM);
-
- writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1);
-
- poll_status(doc);
-}
-
-static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf)
-{
- /* read the 7 hw-generated ecc bytes */
-
- int i;
- for (i = 0; i < 7; i++) { /* hw quirk; read twice */
- ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
- ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
- }
-}
-
-static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
-{
- /*
- * Called after a page read when hardware reports bitflips.
- * Up to four bitflips can be corrected.
- */
-
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
- void __iomem *docptr = doc->virtadr;
- int i, numerrs, errpos[4];
- const uint8_t blank_read_hwecc[8] = {
- 0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 };
-
- read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */
-
- /* check if read error is due to a blank page */
- if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7))
- return 0; /* yes */
-
- /* skip additional check of "written flag" if ignore_badblocks */
- if (ignore_badblocks == false) {
-
- /*
- * If the hw ecc bytes are not those of a blank page, there's
- * still a chance that the page is blank, but was read with
- * errors. Check the "written flag" in last oob byte, which
- * is set to zero when a page is written. If more than half
- * the bits are set, assume a blank page. Unfortunately, the
- * bit flips(s) are not reported in stats.
- */
-
- if (nand->oob_poi[15]) {
- int bit, numsetbits = 0;
- unsigned long written_flag = nand->oob_poi[15];
- for_each_set_bit(bit, &written_flag, 8)
- numsetbits++;
- if (numsetbits > 4) { /* assume blank */
- dev_warn(doc->dev,
- "error(s) in blank page "
- "at offset %08x\n",
- page * DOCG4_PAGE_SIZE);
- return 0;
- }
- }
- }
-
- /*
- * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
- * algorithm is used to decode this. However the hw operates on page
- * data in a bit order that is the reverse of that of the bch alg,
- * requiring that the bits be reversed on the result. Thanks to Ivan
- * Djelic for his analysis!
- */
- for (i = 0; i < 7; i++)
- doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]);
-
- numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL,
- doc->ecc_buf, NULL, errpos);
-
- if (numerrs == -EBADMSG) {
- dev_warn(doc->dev, "uncorrectable errors at offset %08x\n",
- page * DOCG4_PAGE_SIZE);
- return -EBADMSG;
- }
-
- BUG_ON(numerrs < 0); /* -EINVAL, or anything other than -EBADMSG */
-
- /* undo last step in BCH alg (modulo mirroring not needed) */
- for (i = 0; i < numerrs; i++)
- errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7));
-
- /* fix the errors */
- for (i = 0; i < numerrs; i++) {
-
- /* ignore if error within oob ecc bytes */
- if (errpos[i] > DOCG4_USERDATA_LEN * 8)
- continue;
-
- /* if error within oob area preceeding ecc bytes... */
- if (errpos[i] > DOCG4_PAGE_SIZE * 8)
- change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
- (unsigned long *)nand->oob_poi);
-
- else /* error in page data */
- change_bit(errpos[i], (unsigned long *)buf);
- }
-
- dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n",
- numerrs, page * DOCG4_PAGE_SIZE);
-
- return numerrs;
-}
-
-static uint8_t docg4_read_byte(struct mtd_info *mtd)
-{
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
-
- dev_dbg(doc->dev, "%s\n", __func__);
-
- if (doc->last_command.command == NAND_CMD_STATUS) {
- int status;
-
- /*
- * Previous nand command was status request, so nand
- * infrastructure code expects to read the status here. If an
- * error occurred in a previous operation, report it.
- */
- doc->last_command.command = 0;
-
- if (doc->status) {
- status = doc->status;
- doc->status = 0;
- }
-
- /* why is NAND_STATUS_WP inverse logic?? */
- else
- status = NAND_STATUS_WP | NAND_STATUS_READY;
-
- return status;
- }
-
- dev_warn(doc->dev, "unexpected call to read_byte()\n");
-
- return 0;
-}
-
-static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr)
-{
- /* write the four address bytes packed in docg4_addr to the device */
-
- void __iomem *docptr = doc->virtadr;
- writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
- docg4_addr >>= 8;
- writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
- docg4_addr >>= 8;
- writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
- docg4_addr >>= 8;
- writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
-}
-
-static int read_progstatus(struct docg4_priv *doc)
-{
- /*
- * This apparently checks the status of programming. Done after an
- * erasure, and after page data is written. On error, the status is
- * saved, to be later retrieved by the nand infrastructure code.
- */
- void __iomem *docptr = doc->virtadr;
-
- /* status is read from the I/O reg */
- uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA);
- uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA);
- uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG);
-
- dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n",
- __func__, status1, status2, status3);
-
- if (status1 != DOCG4_PROGSTATUS_GOOD
- || status2 != DOCG4_PROGSTATUS_GOOD_2
- || status3 != DOCG4_PROGSTATUS_GOOD_2) {
- doc->status = NAND_STATUS_FAIL;
- dev_warn(doc->dev, "read_progstatus failed: "
- "%02x, %02x, %02x\n", status1, status2, status3);
- return -EIO;
- }
- return 0;
-}
-
-static int pageprog(struct mtd_info *mtd)
-{
- /*
- * Final step in writing a page. Writes the contents of its
- * internal buffer out to the flash array, or some such.
- */
-
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
- void __iomem *docptr = doc->virtadr;
- int retval = 0;
-
- dev_dbg(doc->dev, "docg4: %s\n", __func__);
-
- writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE);
- writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND);
- write_nop(docptr);
- write_nop(docptr);
-
- /* Just busy-wait; usleep_range() slows things down noticeably. */
- poll_status(doc);
-
- writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
- writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
- writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
-
- retval = read_progstatus(doc);
- writew(0, docptr + DOC_DATAEND);
- write_nop(docptr);
- poll_status(doc);
- write_nop(docptr);
-
- return retval;
-}
-
-static void sequence_reset(struct mtd_info *mtd)
-{
- /* common starting sequence for all operations */
-
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
- void __iomem *docptr = doc->virtadr;
-
- writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL);
- writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE);
- writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND);
- write_nop(docptr);
- write_nop(docptr);
- poll_status(doc);
- write_nop(docptr);
-}
-
-static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
-{
- /* first step in reading a page */
-
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
- void __iomem *docptr = doc->virtadr;
-
- dev_dbg(doc->dev,
- "docg4: %s: g4 page %08x\n", __func__, docg4_addr);
-
- sequence_reset(mtd);
-
- writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE);
- writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND);
- write_nop(docptr);
-
- write_addr(doc, docg4_addr);
-
- write_nop(docptr);
- writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND);
- write_nop(docptr);
- write_nop(docptr);
-
- poll_status(doc);
-}
-
-static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
-{
- /* first step in writing a page */
-
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
- void __iomem *docptr = doc->virtadr;
-
- dev_dbg(doc->dev,
- "docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
- sequence_reset(mtd);
-
- if (unlikely(reliable_mode)) {
- writew(DOCG4_SEQ_SETMODE, docptr + DOC_FLASHSEQUENCE);
- writew(DOCG4_CMD_FAST_MODE, docptr + DOC_FLASHCOMMAND);
- writew(DOC_CMD_RELIABLE_MODE, docptr + DOC_FLASHCOMMAND);
- write_nop(docptr);
- }
-
- writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
- writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
- write_nop(docptr);
- write_addr(doc, docg4_addr);
- write_nop(docptr);
- write_nop(docptr);
- poll_status(doc);
-}
-
-static uint32_t mtd_to_docg4_address(int page, int column)
-{
- /*
- * Convert mtd address to format used by the device, 32 bit packed.
- *
- * Some notes on G4 addressing... The M-Sys documentation on this device
- * claims that pages are 2K in length, and indeed, the format of the
- * address used by the device reflects that. But within each page are
- * four 512 byte "sub-pages", each with its own oob data that is
- * read/written immediately after the 512 bytes of page data. This oob
- * data contains the ecc bytes for the preceeding 512 bytes.
- *
- * Rather than tell the mtd nand infrastructure that page size is 2k,
- * with four sub-pages each, we engage in a little subterfuge and tell
- * the infrastructure code that pages are 512 bytes in size. This is
- * done because during the course of reverse-engineering the device, I
- * never observed an instance where an entire 2K "page" was read or
- * written as a unit. Each "sub-page" is always addressed individually,
- * its data read/written, and ecc handled before the next "sub-page" is
- * addressed.
- *
- * This requires us to convert addresses passed by the mtd nand
- * infrastructure code to those used by the device.
- *
- * The address that is written to the device consists of four bytes: the
- * first two are the 2k page number, and the second is the index into
- * the page. The index is in terms of 16-bit half-words and includes
- * the preceeding oob data, so e.g., the index into the second
- * "sub-page" is 0x108, and the full device address of the start of mtd
- * page 0x201 is 0x00800108.
- */
- int g4_page = page / 4; /* device's 2K page */
- int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */
- return (g4_page << 16) | g4_index; /* pack */
-}
-
-static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
- int page_addr)
-{
- /* handle standard nand commands */
-
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
- uint32_t g4_addr = mtd_to_docg4_address(page_addr, column);
-
- dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n",
- __func__, command, page_addr, column);
-
- /*
- * Save the command and its arguments. This enables emulation of
- * standard flash devices, and also some optimizations.
- */
- doc->last_command.command = command;
- doc->last_command.column = column;
- doc->last_command.page = page_addr;
-
- switch (command) {
-
- case NAND_CMD_RESET:
- reset(mtd);
- break;
-
- case NAND_CMD_READ0:
- read_page_prologue(mtd, g4_addr);
- break;
-
- case NAND_CMD_STATUS:
- /* next call to read_byte() will expect a status */
- break;
-
- case NAND_CMD_SEQIN:
- if (unlikely(reliable_mode)) {
- uint16_t g4_page = g4_addr >> 16;
-
- /* writes to odd-numbered 2k pages are invalid */
- if (g4_page & 0x01)
- dev_warn(doc->dev,
- "invalid reliable mode address\n");
- }
-
- write_page_prologue(mtd, g4_addr);
-
- /* hack for deferred write of oob bytes */
- if (doc->oob_page == page_addr)
- memcpy(nand->oob_poi, doc->oob_buf, 16);
- break;
-
- case NAND_CMD_PAGEPROG:
- pageprog(mtd);
- break;
-
- /* we don't expect these, based on review of nand_base.c */
- case NAND_CMD_READOOB:
- case NAND_CMD_READID:
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- dev_warn(doc->dev, "docg4_command: "
- "unexpected nand command 0x%x\n", command);
- break;
-
- }
-}
-
-static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
- uint8_t *buf, int page, bool use_ecc)
-{
- struct docg4_priv *doc = nand_get_controller_data(nand);
- void __iomem *docptr = doc->virtadr;
- uint16_t status, edc_err, *buf16;
- int bits_corrected = 0;
-
- dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
-
- nand_read_page_op(nand, page, 0, NULL, 0);
-
- writew(DOC_ECCCONF0_READ_MODE |
- DOC_ECCCONF0_ECC_ENABLE |
- DOC_ECCCONF0_UNKNOWN |
- DOCG4_BCH_SIZE,
- docptr + DOC_ECCCONF0);
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
-
- /* the 1st byte from the I/O reg is a status; the rest is page data */
- status = readw(docptr + DOC_IOSPACE_DATA);
- if (status & DOCG4_READ_ERROR) {
- dev_err(doc->dev,
- "docg4_read_page: bad status: 0x%02x\n", status);
- writew(0, docptr + DOC_DATAEND);
- return -EIO;
- }
-
- dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
-
- docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
-
- /* this device always reads oob after page data */
- /* first 14 oob bytes read from I/O reg */
- docg4_read_buf(mtd, nand->oob_poi, 14);
-
- /* last 2 read from another reg */
- buf16 = (uint16_t *)(nand->oob_poi + 14);
- *buf16 = readw(docptr + DOCG4_MYSTERY_REG);
-
- write_nop(docptr);
-
- if (likely(use_ecc == true)) {
-
- /* read the register that tells us if bitflip(s) detected */
- edc_err = readw(docptr + DOC_ECCCONF1);
- edc_err = readw(docptr + DOC_ECCCONF1);
- dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err);
-
- /* If bitflips are reported, attempt to correct with ecc */
- if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
- bits_corrected = correct_data(mtd, buf, page);
- if (bits_corrected == -EBADMSG)
- mtd->ecc_stats.failed++;
- else
- mtd->ecc_stats.corrected += bits_corrected;
- }
- }
-
- writew(0, docptr + DOC_DATAEND);
- if (bits_corrected == -EBADMSG) /* uncorrectable errors */
- return 0;
- return bits_corrected;
-}
-
-
-static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
- uint8_t *buf, int oob_required, int page)
-{
- return read_page(mtd, nand, buf, page, false);
-}
-
-static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
- uint8_t *buf, int oob_required, int page)
-{
- return read_page(mtd, nand, buf, page, true);
-}
-
-static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
- int page)
-{
- struct docg4_priv *doc = nand_get_controller_data(nand);
- void __iomem *docptr = doc->virtadr;
- uint16_t status;
-
- dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
-
- nand_read_page_op(nand, page, nand->ecc.size, NULL, 0);
-
- writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
-
- /* the 1st byte from the I/O reg is a status; the rest is oob data */
- status = readw(docptr + DOC_IOSPACE_DATA);
- if (status & DOCG4_READ_ERROR) {
- dev_warn(doc->dev,
- "docg4_read_oob failed: status = 0x%02x\n", status);
- return -EIO;
- }
-
- dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
-
- docg4_read_buf(mtd, nand->oob_poi, 16);
-
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
- writew(0, docptr + DOC_DATAEND);
- write_nop(docptr);
-
- return 0;
-}
-
-static int docg4_erase_block(struct mtd_info *mtd, int page)
-{
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
- void __iomem *docptr = doc->virtadr;
- uint16_t g4_page;
- int status;
-
- dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
-
- sequence_reset(mtd);
-
- writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE);
- writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND);
- write_nop(docptr);
-
- /* only 2 bytes of address are written to specify erase block */
- g4_page = (uint16_t)(page / 4); /* to g4's 2k page addressing */
- writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
- g4_page >>= 8;
- writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
- write_nop(docptr);
-
- /* start the erasure */
- writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND);
- write_nop(docptr);
- write_nop(docptr);
-
- usleep_range(500, 1000); /* erasure is long; take a snooze */
- poll_status(doc);
- writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
- writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
- writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
- write_nop(docptr);
-
- read_progstatus(doc);
-
- writew(0, docptr + DOC_DATAEND);
- write_nop(docptr);
- poll_status(doc);
- write_nop(docptr);
-
- status = nand->waitfunc(mtd, nand);
- if (status < 0)
- return status;
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf, int page, bool use_ecc)
-{
- struct docg4_priv *doc = nand_get_controller_data(nand);
- void __iomem *docptr = doc->virtadr;
- uint8_t ecc_buf[8];
-
- dev_dbg(doc->dev, "%s...\n", __func__);
-
- nand_prog_page_begin_op(nand, page, 0, NULL, 0);
-
- writew(DOC_ECCCONF0_ECC_ENABLE |
- DOC_ECCCONF0_UNKNOWN |
- DOCG4_BCH_SIZE,
- docptr + DOC_ECCCONF0);
- write_nop(docptr);
-
- /* write the page data */
- docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE);
-
- /* oob bytes 0 through 5 are written to I/O reg */
- docg4_write_buf16(mtd, nand->oob_poi, 6);
-
- /* oob byte 6 written to a separate reg */
- writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7);
-
- write_nop(docptr);
- write_nop(docptr);
-
- /* write hw-generated ecc bytes to oob */
- if (likely(use_ecc == true)) {
- /* oob byte 7 is hamming code */
- uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY);
- hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */
- writew(hamming, docptr + DOCG4_OOB_6_7);
- write_nop(docptr);
-
- /* read the 7 bch bytes from ecc regs */
- read_hw_ecc(docptr, ecc_buf);
- ecc_buf[7] = 0; /* clear the "page written" flag */
- }
-
- /* write user-supplied bytes to oob */
- else {
- writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7);
- write_nop(docptr);
- memcpy(ecc_buf, &nand->oob_poi[8], 8);
- }
-
- docg4_write_buf16(mtd, ecc_buf, 8);
- write_nop(docptr);
- write_nop(docptr);
- writew(0, docptr + DOC_DATAEND);
- write_nop(docptr);
-
- return nand_prog_page_end_op(nand);
-}
-
-static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf, int oob_required, int page)
-{
- return write_page(mtd, nand, buf, page, false);
-}
-
-static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf, int oob_required, int page)
-{
- return write_page(mtd, nand, buf, page, true);
-}
-
-static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
- int page)
-{
- /*
- * Writing oob-only is not really supported, because MLC nand must write
- * oob bytes at the same time as page data. Nonetheless, we save the
- * oob buffer contents here, and then write it along with the page data
- * if the same page is subsequently written. This allows user space
- * utilities that write the oob data prior to the page data to work
- * (e.g., nandwrite). The disdvantage is that, if the intention was to
- * write oob only, the operation is quietly ignored. Also, oob can get
- * corrupted if two concurrent processes are running nandwrite.
- */
-
- /* note that bytes 7..14 are hw generated hamming/ecc and overwritten */
- struct docg4_priv *doc = nand_get_controller_data(nand);
- doc->oob_page = page;
- memcpy(doc->oob_buf, nand->oob_poi, 16);
- return 0;
-}
-
-static int __init read_factory_bbt(struct mtd_info *mtd)
-{
- /*
- * The device contains a read-only factory bad block table. Read it and
- * update the memory-based bbt accordingly.
- */
-
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
- uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
- uint8_t *buf;
- int i, block;
- __u32 eccfailed_stats = mtd->ecc_stats.failed;
-
- buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- read_page_prologue(mtd, g4_addr);
- docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
-
- /*
- * If no memory-based bbt was created, exit. This will happen if module
- * parameter ignore_badblocks is set. Then why even call this function?
- * For an unknown reason, block erase always fails if it's the first
- * operation after device power-up. The above read ensures it never is.
- * Ugly, I know.
- */
- if (nand->bbt == NULL) /* no memory-based bbt */
- goto exit;
-
- if (mtd->ecc_stats.failed > eccfailed_stats) {
- /*
- * Whoops, an ecc failure ocurred reading the factory bbt.
- * It is stored redundantly, so we get another chance.
- */
- eccfailed_stats = mtd->ecc_stats.failed;
- docg4_read_page(mtd, nand, buf, 0, DOCG4_REDUNDANT_BBT_PAGE);
- if (mtd->ecc_stats.failed > eccfailed_stats) {
- dev_warn(doc->dev,
- "The factory bbt could not be read!\n");
- goto exit;
- }
- }
-
- /*
- * Parse factory bbt and update memory-based bbt. Factory bbt format is
- * simple: one bit per block, block numbers increase left to right (msb
- * to lsb). Bit clear means bad block.
- */
- for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) {
- int bitnum;
- unsigned long bits = ~buf[i];
- for_each_set_bit(bitnum, &bits, 8) {
- int badblock = block + 7 - bitnum;
- nand->bbt[badblock / 4] |=
- 0x03 << ((badblock % 4) * 2);
- mtd->ecc_stats.badblocks++;
- dev_notice(doc->dev, "factory-marked bad block: %d\n",
- badblock);
- }
- }
- exit:
- kfree(buf);
- return 0;
-}
-
-static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
- /*
- * Mark a block as bad. Bad blocks are marked in the oob area of the
- * first page of the block. The default scan_bbt() in the nand
- * infrastructure code works fine for building the memory-based bbt
- * during initialization, as does the nand infrastructure function that
- * checks if a block is bad by reading the bbt. This function replaces
- * the nand default because writes to oob-only are not supported.
- */
-
- int ret, i;
- uint8_t *buf;
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
- struct nand_bbt_descr *bbtd = nand->badblock_pattern;
- int page = (int)(ofs >> nand->page_shift);
- uint32_t g4_addr = mtd_to_docg4_address(page, 0);
-
- dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs);
-
- if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1)))
- dev_warn(doc->dev, "%s: ofs %llx not start of block!\n",
- __func__, ofs);
-
- /* allocate blank buffer for page data */
- buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- /* write bit-wise negation of pattern to oob buffer */
- memset(nand->oob_poi, 0xff, mtd->oobsize);
- for (i = 0; i < bbtd->len; i++)
- nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i];
-
- /* write first page of block */
- write_page_prologue(mtd, g4_addr);
- docg4_write_page(mtd, nand, buf, 1, page);
- ret = pageprog(mtd);
-
- kfree(buf);
-
- return ret;
-}
-
-static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs)
-{
- /* only called when module_param ignore_badblocks is set */
- return 0;
-}
-
-static int docg4_suspend(struct platform_device *pdev, pm_message_t state)
-{
- /*
- * Put the device into "deep power-down" mode. Note that CE# must be
- * deasserted for this to take effect. The xscale, e.g., can be
- * configured to float this signal when the processor enters power-down,
- * and a suitable pull-up ensures its deassertion.
- */
-
- int i;
- uint8_t pwr_down;
- struct docg4_priv *doc = platform_get_drvdata(pdev);
- void __iomem *docptr = doc->virtadr;
-
- dev_dbg(doc->dev, "%s...\n", __func__);
-
- /* poll the register that tells us we're ready to go to sleep */
- for (i = 0; i < 10; i++) {
- pwr_down = readb(docptr + DOC_POWERMODE);
- if (pwr_down & DOC_POWERDOWN_READY)
- break;
- usleep_range(1000, 4000);
- }
-
- if (pwr_down & DOC_POWERDOWN_READY) {
- dev_err(doc->dev, "suspend failed; "
- "timeout polling DOC_POWERDOWN_READY\n");
- return -EIO;
- }
-
- writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN,
- docptr + DOC_ASICMODE);
- writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN),
- docptr + DOC_ASICMODECONFIRM);
-
- write_nop(docptr);
-
- return 0;
-}
-
-static int docg4_resume(struct platform_device *pdev)
-{
-
- /*
- * Exit power-down. Twelve consecutive reads of the address below
- * accomplishes this, assuming CE# has been asserted.
- */
-
- struct docg4_priv *doc = platform_get_drvdata(pdev);
- void __iomem *docptr = doc->virtadr;
- int i;
-
- dev_dbg(doc->dev, "%s...\n", __func__);
-
- for (i = 0; i < 12; i++)
- readb(docptr + 0x1fff);
-
- return 0;
-}
-
-static void init_mtd_structs(struct mtd_info *mtd)
-{
- /* initialize mtd and nand data structures */
-
- /*
- * Note that some of the following initializations are not usually
- * required within a nand driver because they are performed by the nand
- * infrastructure code as part of nand_scan(). In this case they need
- * to be initialized here because we skip call to nand_scan_ident() (the
- * first half of nand_scan()). The call to nand_scan_ident() could be
- * skipped because for this device the chip id is not read in the manner
- * of a standard nand device.
- */
-
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
-
- mtd->size = DOCG4_CHIP_SIZE;
- mtd->name = "Msys_Diskonchip_G4";
- mtd->writesize = DOCG4_PAGE_SIZE;
- mtd->erasesize = DOCG4_BLOCK_SIZE;
- mtd->oobsize = DOCG4_OOB_SIZE;
- mtd_set_ooblayout(mtd, &docg4_ooblayout_ops);
- nand->chipsize = DOCG4_CHIP_SIZE;
- nand->chip_shift = DOCG4_CHIP_SHIFT;
- nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
- nand->chip_delay = 20;
- nand->page_shift = DOCG4_PAGE_SHIFT;
- nand->pagemask = 0x3ffff;
- nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
- nand->badblockbits = 8;
- nand->ecc.mode = NAND_ECC_HW_SYNDROME;
- nand->ecc.size = DOCG4_PAGE_SIZE;
- nand->ecc.prepad = 8;
- nand->ecc.bytes = 8;
- nand->ecc.strength = DOCG4_T;
- nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
- nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
- nand->controller = &nand->dummy_controller;
- nand_controller_init(nand->controller);
-
- /* methods */
- nand->cmdfunc = docg4_command;
- nand->waitfunc = docg4_wait;
- nand->select_chip = docg4_select_chip;
- nand->read_byte = docg4_read_byte;
- nand->block_markbad = docg4_block_markbad;
- nand->read_buf = docg4_read_buf;
- nand->write_buf = docg4_write_buf16;
- nand->erase = docg4_erase_block;
- nand->set_features = nand_get_set_features_notsupp;
- nand->get_features = nand_get_set_features_notsupp;
- nand->ecc.read_page = docg4_read_page;
- nand->ecc.write_page = docg4_write_page;
- nand->ecc.read_page_raw = docg4_read_page_raw;
- nand->ecc.write_page_raw = docg4_write_page_raw;
- nand->ecc.read_oob = docg4_read_oob;
- nand->ecc.write_oob = docg4_write_oob;
-
- /*
- * The way the nand infrastructure code is written, a memory-based bbt
- * is not created if NAND_SKIP_BBTSCAN is set. With no memory bbt,
- * nand->block_bad() is used. So when ignoring bad blocks, we skip the
- * scan and define a dummy block_bad() which always returns 0.
- */
- if (ignore_badblocks) {
- nand->options |= NAND_SKIP_BBTSCAN;
- nand->block_bad = docg4_block_neverbad;
- }
-
-}
-
-static int read_id_reg(struct mtd_info *mtd)
-{
- struct nand_chip *nand = mtd_to_nand(mtd);
- struct docg4_priv *doc = nand_get_controller_data(nand);
- void __iomem *docptr = doc->virtadr;
- uint16_t id1, id2;
-
- /* check for presence of g4 chip by reading id registers */
- id1 = readw(docptr + DOC_CHIPID);
- id1 = readw(docptr + DOCG4_MYSTERY_REG);
- id2 = readw(docptr + DOC_CHIPID_INV);
- id2 = readw(docptr + DOCG4_MYSTERY_REG);
-
- if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) {
- dev_info(doc->dev,
- "NAND device: 128MiB Diskonchip G4 detected\n");
- return 0;
- }
-
- return -ENODEV;
-}
-
-static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
-
-static int docg4_attach_chip(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct docg4_priv *doc = (struct docg4_priv *)(chip + 1);
- int ret;
-
- init_mtd_structs(mtd);
-
- /* Initialize kernel BCH algorithm */
- doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
- if (!doc->bch)
- return -EINVAL;
-
- reset(mtd);
-
- ret = read_id_reg(mtd);
- if (ret)
- free_bch(doc->bch);
-
- return ret;
-}
-
-static void docg4_detach_chip(struct nand_chip *chip)
-{
- struct docg4_priv *doc = (struct docg4_priv *)(chip + 1);
-
- free_bch(doc->bch);
-}
-
-static const struct nand_controller_ops docg4_controller_ops = {
- .attach_chip = docg4_attach_chip,
- .detach_chip = docg4_detach_chip,
-};
-
-static int __init probe_docg4(struct platform_device *pdev)
-{
- struct mtd_info *mtd;
- struct nand_chip *nand;
- void __iomem *virtadr;
- struct docg4_priv *doc;
- int len, retval;
- struct resource *r;
- struct device *dev = &pdev->dev;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- dev_err(dev, "no io memory resource defined!\n");
- return -ENODEV;
- }
-
- virtadr = ioremap(r->start, resource_size(r));
- if (!virtadr) {
- dev_err(dev, "Diskonchip ioremap failed: %pR\n", r);
- return -EIO;
- }
-
- len = sizeof(struct nand_chip) + sizeof(struct docg4_priv);
- nand = kzalloc(len, GFP_KERNEL);
- if (nand == NULL) {
- retval = -ENOMEM;
- goto unmap;
- }
-
- mtd = nand_to_mtd(nand);
- doc = (struct docg4_priv *) (nand + 1);
- nand_set_controller_data(nand, doc);
- mtd->dev.parent = &pdev->dev;
- doc->virtadr = virtadr;
- doc->dev = dev;
- platform_set_drvdata(pdev, doc);
-
- /*
- * Running nand_scan() with maxchips == 0 will skip nand_scan_ident(),
- * which is a specific operation with this driver and done in the
- * ->attach_chip callback.
- */
- nand->dummy_controller.ops = &docg4_controller_ops;
- retval = nand_scan(mtd, 0);
- if (retval)
- goto free_nand;
-
- retval = read_factory_bbt(mtd);
- if (retval)
- goto cleanup_nand;
-
- retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
- if (retval)
- goto cleanup_nand;
-
- doc->mtd = mtd;
-
- return 0;
-
-cleanup_nand:
- nand_cleanup(nand);
-free_nand:
- kfree(nand);
-unmap:
- iounmap(virtadr);
-
- return retval;
-}
-
-static int __exit cleanup_docg4(struct platform_device *pdev)
-{
- struct docg4_priv *doc = platform_get_drvdata(pdev);
- nand_release(doc->mtd);
- kfree(mtd_to_nand(doc->mtd));
- iounmap(doc->virtadr);
- return 0;
-}
-
-static struct platform_driver docg4_driver = {
- .driver = {
- .name = "docg4",
- },
- .suspend = docg4_suspend,
- .resume = docg4_resume,
- .remove = __exit_p(cleanup_docg4),
-};
-
-module_platform_driver_probe(docg4_driver, probe_docg4);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mike Dunn");
-MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver");
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 55f449b711fd..d6ed697fcfe6 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -317,10 +317,10 @@ static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
}
/* cmdfunc send commands to the FCM */
-static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+static void fsl_elbc_cmdfunc(struct nand_chip *chip, unsigned int command,
int column, int page_addr)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
@@ -533,7 +533,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
}
}
-static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip)
+static void fsl_elbc_select_chip(struct nand_chip *chip, int cs)
{
/* The hardware does not seem to support multiple
* chips per bank.
@@ -543,9 +543,9 @@ static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip)
/*
* Write buf to the FCM Controller Data Buffer
*/
-static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+static void fsl_elbc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
unsigned int bufsize = mtd->writesize + mtd->oobsize;
@@ -581,9 +581,8 @@ static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
* read a byte from either the FCM hardware buffer if it has any data left
* otherwise issue a command to read a single byte.
*/
-static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
+static u8 fsl_elbc_read_byte(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
@@ -598,9 +597,8 @@ static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
/*
* Read from the FCM Controller Data Buffer
*/
-static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+static void fsl_elbc_read_buf(struct nand_chip *chip, u8 *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
int avail;
@@ -623,7 +621,7 @@ static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
/* This function is called after Program and Erase Operations to
* check for success or failure.
*/
-static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+static int fsl_elbc_wait(struct nand_chip *chip)
{
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
@@ -660,8 +658,8 @@ static int fsl_elbc_attach_chip(struct nand_chip *chip)
chip->chipsize);
dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
chip->pagemask);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_delay = %d\n",
- chip->chip_delay);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->legacy.chip_delay = %d\n",
+ chip->legacy.chip_delay);
dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
chip->badblockpos);
dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
@@ -710,18 +708,19 @@ static const struct nand_controller_ops fsl_elbc_controller_ops = {
.attach_chip = fsl_elbc_attach_chip,
};
-static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int fsl_elbc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
- fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ fsl_elbc_read_buf(chip, chip->oob_poi, mtd->oobsize);
- if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
+ if (fsl_elbc_wait(chip) & NAND_STATUS_FAIL)
mtd->ecc_stats.failed++;
return elbc_fcm_ctrl->max_bitflips;
@@ -730,11 +729,13 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
-static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+static int fsl_elbc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
- fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ fsl_elbc_write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
@@ -742,13 +743,15 @@ static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
-static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, uint32_t data_len,
- const uint8_t *buf, int oob_required, int page)
+static int fsl_elbc_write_subpage(struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
- fsl_elbc_write_buf(mtd, buf, mtd->writesize);
- fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ fsl_elbc_write_buf(chip, buf, mtd->writesize);
+ fsl_elbc_write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
@@ -773,14 +776,14 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
/* fill in nand_chip structure */
/* set up function call table */
- chip->read_byte = fsl_elbc_read_byte;
- chip->write_buf = fsl_elbc_write_buf;
- chip->read_buf = fsl_elbc_read_buf;
+ chip->legacy.read_byte = fsl_elbc_read_byte;
+ chip->legacy.write_buf = fsl_elbc_write_buf;
+ chip->legacy.read_buf = fsl_elbc_read_buf;
chip->select_chip = fsl_elbc_select_chip;
- chip->cmdfunc = fsl_elbc_cmdfunc;
- chip->waitfunc = fsl_elbc_wait;
- chip->set_features = nand_get_set_features_notsupp;
- chip->get_features = nand_get_set_features_notsupp;
+ chip->legacy.cmdfunc = fsl_elbc_cmdfunc;
+ chip->legacy.waitfunc = fsl_elbc_wait;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
@@ -915,7 +918,7 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
goto err;
priv->chip.controller->ops = &fsl_elbc_controller_ops;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(&priv->chip, 1);
if (ret)
goto err;
@@ -942,9 +945,8 @@ static int fsl_elbc_nand_remove(struct platform_device *pdev)
{
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
- struct mtd_info *mtd = nand_to_mtd(&priv->chip);
- nand_release(mtd);
+ nand_release(&priv->chip);
fsl_elbc_chip_remove(priv);
mutex_lock(&fsl_elbc_nand_mutex);
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 24f59d0066af..6f4afc44381a 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -30,6 +30,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/fsl_ifc.h>
+#include <linux/iopoll.h>
#define ERR_BYTE 0xFF /* Value returned for read
bytes when read failed */
@@ -300,9 +301,9 @@ static void fsl_ifc_do_read(struct nand_chip *chip,
}
/* cmdfunc send commands to the IFC NAND Machine */
-static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
- int column, int page_addr) {
- struct nand_chip *chip = mtd_to_nand(mtd);
+static void fsl_ifc_cmdfunc(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr) {
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
@@ -508,7 +509,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
}
}
-static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip)
+static void fsl_ifc_select_chip(struct nand_chip *chip, int cs)
{
/* The hardware does not seem to support multiple
* chips per bank.
@@ -518,9 +519,9 @@ static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip)
/*
* Write buf to the IFC NAND Controller Data Buffer
*/
-static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+static void fsl_ifc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
unsigned int bufsize = mtd->writesize + mtd->oobsize;
@@ -544,9 +545,8 @@ static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
* Read a byte from either the IFC hardware buffer
* read function for 8-bit buswidth
*/
-static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
+static uint8_t fsl_ifc_read_byte(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
unsigned int offset;
@@ -567,9 +567,8 @@ static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
* Read two bytes from the IFC hardware buffer
* read function for 16-bit buswith
*/
-static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
+static uint8_t fsl_ifc_read_byte16(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
uint16_t data;
@@ -590,9 +589,8 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
/*
* Read from the IFC Controller Data Buffer
*/
-static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+static void fsl_ifc_read_buf(struct nand_chip *chip, u8 *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
int avail;
@@ -616,8 +614,9 @@ static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
* This function is called after Program and Erase Operations to
* check for success or failure.
*/
-static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+static int fsl_ifc_wait(struct nand_chip *chip)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
@@ -678,20 +677,21 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf)
return bitflips;
}
-static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int fsl_ifc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
- fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ fsl_ifc_read_buf(chip, chip->oob_poi, mtd->oobsize);
if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER) {
if (!oob_required)
- fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ fsl_ifc_read_buf(chip, chip->oob_poi, mtd->oobsize);
return check_erased_page(chip, buf);
}
@@ -705,11 +705,13 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
-static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+static int fsl_ifc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
- fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ fsl_ifc_write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
@@ -725,8 +727,8 @@ static int fsl_ifc_attach_chip(struct nand_chip *chip)
chip->chipsize);
dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
chip->pagemask);
- dev_dbg(priv->dev, "%s: nand->chip_delay = %d\n", __func__,
- chip->chip_delay);
+ dev_dbg(priv->dev, "%s: nand->legacy.chip_delay = %d\n", __func__,
+ chip->legacy.chip_delay);
dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
chip->badblockpos);
dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
@@ -761,7 +763,7 @@ static const struct nand_controller_ops fsl_ifc_controller_ops = {
.attach_chip = fsl_ifc_attach_chip,
};
-static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
+static int fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
{
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
@@ -769,6 +771,27 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
uint32_t cs = priv->bank;
+ if (ctrl->version < FSL_IFC_VERSION_1_1_0)
+ return 0;
+
+ if (ctrl->version > FSL_IFC_VERSION_1_1_0) {
+ u32 ncfgr, status;
+ int ret;
+
+ /* Trigger auto initialization */
+ ncfgr = ifc_in32(&ifc_runtime->ifc_nand.ncfgr);
+ ifc_out32(ncfgr | IFC_NAND_NCFGR_SRAM_INIT_EN, &ifc_runtime->ifc_nand.ncfgr);
+
+ /* Wait until done */
+ ret = readx_poll_timeout(ifc_in32, &ifc_runtime->ifc_nand.ncfgr,
+ status, !(status & IFC_NAND_NCFGR_SRAM_INIT_EN),
+ 10, IFC_TIMEOUT_MSECS * 1000);
+ if (ret)
+ dev_err(priv->dev, "Failed to initialize SRAM!\n");
+
+ return ret;
+ }
+
/* Save CSOR and CSOR_ext */
csor = ifc_in32(&ifc_global->csor_cs[cs].csor);
csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext);
@@ -805,12 +828,16 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
msecs_to_jiffies(IFC_TIMEOUT_MSECS));
- if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) {
pr_err("fsl-ifc: Failed to Initialise SRAM\n");
+ return -ETIMEDOUT;
+ }
/* Restore CSOR and CSOR_ext */
ifc_out32(csor, &ifc_global->csor_cs[cs].csor);
ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext);
+
+ return 0;
}
static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
@@ -821,6 +848,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
struct nand_chip *chip = &priv->chip;
struct mtd_info *mtd = nand_to_mtd(&priv->chip);
u32 csor;
+ int ret;
/* Fill in fsl_ifc_mtd structure */
mtd->dev.parent = priv->dev;
@@ -830,17 +858,17 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
/* set up function call table */
if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr))
& CSPR_PORT_SIZE_16)
- chip->read_byte = fsl_ifc_read_byte16;
+ chip->legacy.read_byte = fsl_ifc_read_byte16;
else
- chip->read_byte = fsl_ifc_read_byte;
+ chip->legacy.read_byte = fsl_ifc_read_byte;
- chip->write_buf = fsl_ifc_write_buf;
- chip->read_buf = fsl_ifc_read_buf;
+ chip->legacy.write_buf = fsl_ifc_write_buf;
+ chip->legacy.read_buf = fsl_ifc_read_buf;
chip->select_chip = fsl_ifc_select_chip;
- chip->cmdfunc = fsl_ifc_cmdfunc;
- chip->waitfunc = fsl_ifc_wait;
- chip->set_features = nand_get_set_features_notsupp;
- chip->get_features = nand_get_set_features_notsupp;
+ chip->legacy.cmdfunc = fsl_ifc_cmdfunc;
+ chip->legacy.waitfunc = fsl_ifc_wait;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
@@ -853,10 +881,10 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)
& CSPR_PORT_SIZE_16) {
- chip->read_byte = fsl_ifc_read_byte16;
+ chip->legacy.read_byte = fsl_ifc_read_byte16;
chip->options |= NAND_BUSWIDTH_16;
} else {
- chip->read_byte = fsl_ifc_read_byte;
+ chip->legacy.read_byte = fsl_ifc_read_byte;
}
chip->controller = &ifc_nand_ctrl->controller;
@@ -914,8 +942,9 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.algo = NAND_ECC_HAMMING;
}
- if (ctrl->version >= FSL_IFC_VERSION_1_1_0)
- fsl_ifc_sram_init(priv);
+ ret = fsl_ifc_sram_init(priv);
+ if (ret)
+ return ret;
/*
* As IFC version 2.0.0 has 16KB of internal SRAM as compared to older
@@ -1051,7 +1080,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
goto err;
priv->chip.controller->ops = &fsl_ifc_controller_ops;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(&priv->chip, 1);
if (ret)
goto err;
@@ -1077,9 +1106,8 @@ err:
static int fsl_ifc_nand_remove(struct platform_device *dev)
{
struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
- struct mtd_info *mtd = nand_to_mtd(&priv->chip);
- nand_release(mtd);
+ nand_release(&priv->chip);
fsl_ifc_chip_remove(priv);
mutex_lock(&fsl_ifc_nand_mutex);
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index a88e2cf66e0f..673c5a0c9345 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -52,9 +52,9 @@ static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
chip);
}
-static int fun_chip_ready(struct mtd_info *mtd)
+static int fun_chip_ready(struct nand_chip *chip)
{
- struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
if (gpio_get_value(fun->rnb_gpio[fun->mchip_number]))
return 1;
@@ -69,7 +69,7 @@ static void fun_wait_rnb(struct fsl_upm_nand *fun)
struct mtd_info *mtd = nand_to_mtd(&fun->chip);
int cnt = 1000000;
- while (--cnt && !fun_chip_ready(mtd))
+ while (--cnt && !fun_chip_ready(&fun->chip))
cpu_relax();
if (!cnt)
dev_err(fun->dev, "tired waiting for RNB\n");
@@ -78,10 +78,9 @@ static void fun_wait_rnb(struct fsl_upm_nand *fun)
}
}
-static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+static void fun_cmd_ctrl(struct nand_chip *chip, int cmd, unsigned int ctrl)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
u32 mar;
if (!(ctrl & fun->last_ctrl)) {
@@ -102,51 +101,50 @@ static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
mar = (cmd << (32 - fun->upm.width)) |
fun->mchip_offsets[fun->mchip_number];
- fsl_upm_run_pattern(&fun->upm, chip->IO_ADDR_R, mar);
+ fsl_upm_run_pattern(&fun->upm, chip->legacy.IO_ADDR_R, mar);
if (fun->wait_flags & FSL_UPM_WAIT_RUN_PATTERN)
fun_wait_rnb(fun);
}
-static void fun_select_chip(struct mtd_info *mtd, int mchip_nr)
+static void fun_select_chip(struct nand_chip *chip, int mchip_nr)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
if (mchip_nr == -1) {
- chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
} else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) {
fun->mchip_number = mchip_nr;
- chip->IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr];
- chip->IO_ADDR_W = chip->IO_ADDR_R;
+ chip->legacy.IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr];
+ chip->legacy.IO_ADDR_W = chip->legacy.IO_ADDR_R;
} else {
BUG();
}
}
-static uint8_t fun_read_byte(struct mtd_info *mtd)
+static uint8_t fun_read_byte(struct nand_chip *chip)
{
- struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
- return in_8(fun->chip.IO_ADDR_R);
+ return in_8(fun->chip.legacy.IO_ADDR_R);
}
-static void fun_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void fun_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
- struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
int i;
for (i = 0; i < len; i++)
- buf[i] = in_8(fun->chip.IO_ADDR_R);
+ buf[i] = in_8(fun->chip.legacy.IO_ADDR_R);
}
-static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void fun_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
- struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+ struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip));
int i;
for (i = 0; i < len; i++) {
- out_8(fun->chip.IO_ADDR_W, buf[i]);
+ out_8(fun->chip.legacy.IO_ADDR_W, buf[i]);
if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BYTE)
fun_wait_rnb(fun);
}
@@ -162,20 +160,20 @@ static int fun_chip_init(struct fsl_upm_nand *fun,
int ret;
struct device_node *flash_np;
- fun->chip.IO_ADDR_R = fun->io_base;
- fun->chip.IO_ADDR_W = fun->io_base;
- fun->chip.cmd_ctrl = fun_cmd_ctrl;
- fun->chip.chip_delay = fun->chip_delay;
- fun->chip.read_byte = fun_read_byte;
- fun->chip.read_buf = fun_read_buf;
- fun->chip.write_buf = fun_write_buf;
+ fun->chip.legacy.IO_ADDR_R = fun->io_base;
+ fun->chip.legacy.IO_ADDR_W = fun->io_base;
+ fun->chip.legacy.cmd_ctrl = fun_cmd_ctrl;
+ fun->chip.legacy.chip_delay = fun->chip_delay;
+ fun->chip.legacy.read_byte = fun_read_byte;
+ fun->chip.legacy.read_buf = fun_read_buf;
+ fun->chip.legacy.write_buf = fun_write_buf;
fun->chip.ecc.mode = NAND_ECC_SOFT;
fun->chip.ecc.algo = NAND_ECC_HAMMING;
if (fun->mchip_count > 1)
fun->chip.select_chip = fun_select_chip;
if (fun->rnb_gpio[0] >= 0)
- fun->chip.dev_ready = fun_chip_ready;
+ fun->chip.legacy.dev_ready = fun_chip_ready;
mtd->dev.parent = fun->dev;
@@ -184,14 +182,14 @@ static int fun_chip_init(struct fsl_upm_nand *fun,
return -ENODEV;
nand_set_flash_node(&fun->chip, flash_np);
- mtd->name = kasprintf(GFP_KERNEL, "0x%llx.%s", (u64)io_res->start,
- flash_np->name);
+ mtd->name = kasprintf(GFP_KERNEL, "0x%llx.%pOFn", (u64)io_res->start,
+ flash_np);
if (!mtd->name) {
ret = -ENOMEM;
goto err;
}
- ret = nand_scan(mtd, fun->mchip_count);
+ ret = nand_scan(&fun->chip, fun->mchip_count);
if (ret)
goto err;
@@ -326,7 +324,7 @@ static int fun_remove(struct platform_device *ofdev)
struct mtd_info *mtd = nand_to_mtd(&fun->chip);
int i;
- nand_release(mtd);
+ nand_release(&fun->chip);
kfree(mtd->name);
for (i = 0; i < fun->mchip_count; i++) {
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index f418236fa020..70ac8d875218 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -340,10 +340,9 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
return 0;
}
-static int fsmc_setup_data_interface(struct mtd_info *mtd, int csline,
+static int fsmc_setup_data_interface(struct nand_chip *nand, int csline,
const struct nand_data_interface *conf)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct fsmc_nand_data *host = nand_get_controller_data(nand);
struct fsmc_nand_timings tims;
const struct nand_sdr_timings *sdrt;
@@ -368,9 +367,9 @@ static int fsmc_setup_data_interface(struct mtd_info *mtd, int csline,
/*
* fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
*/
-static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
+static void fsmc_enable_hwecc(struct nand_chip *chip, int mode)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+ struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCPLEN_256,
host->regs_va + FSMC_PC);
@@ -385,10 +384,10 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
* FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
* max of 8-bits)
*/
-static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
+static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const uint8_t *data,
uint8_t *ecc)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+ struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
uint32_t ecc_tmp;
unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
@@ -433,10 +432,10 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
* FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
* max of 1-bit)
*/
-static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
+static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const uint8_t *data,
uint8_t *ecc)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+ struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
uint32_t ecc_tmp;
ecc_tmp = readl_relaxed(host->regs_va + ECC1);
@@ -610,9 +609,9 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
}
/* fsmc_select_chip - assert or deassert nCE */
-static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
+static void fsmc_select_chip(struct nand_chip *chip, int chipnr)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+ struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
u32 pc;
/* Support only one CS */
@@ -707,7 +706,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
/*
* fsmc_read_page_hwecc
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller expects OOB data read to chip->oob_poi
@@ -719,9 +717,10 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
* After this read, fsmc hardware generates and reports error data bits(up to a
* max of 8 bits)
*/
-static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int fsmc_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int i, j, s, stat, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
@@ -740,7 +739,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
nand_read_page_op(chip, page, s * eccsize, NULL, 0);
- chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
nand_read_data_op(chip, p, eccsize, false);
for (j = 0; j < eccbytes;) {
@@ -767,9 +766,9 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
}
memcpy(&ecc_code[i], oob, chip->ecc.bytes);
- chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
- stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
@@ -791,11 +790,10 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* calc_ecc is a 104 bit information containing maximum of 8 error
* offset informations of 13 bits each in 512 bytes of read data.
*/
-static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
- uint8_t *read_ecc, uint8_t *calc_ecc)
+static int fsmc_bch8_correct_data(struct nand_chip *chip, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+ struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
uint32_t err_idx[8];
uint32_t num_err, i;
uint32_t ecc1, ecc2, ecc3, ecc4;
@@ -951,6 +949,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
nand->ecc.correct = nand_correct_data;
nand->ecc.bytes = 3;
nand->ecc.strength = 1;
+ nand->ecc.options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
break;
case NAND_ECC_SOFT:
@@ -1082,7 +1081,6 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
nand->exec_op = fsmc_exec_op;
nand->select_chip = fsmc_select_chip;
- nand->chip_delay = 30;
/*
* Setup default ECC mode. nand_dt_init() called from nand_scan_ident()
@@ -1125,7 +1123,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* Scan to find existence of the device
*/
nand->dummy_controller.ops = &fsmc_nand_controller_ops;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(nand, 1);
if (ret)
goto release_dma_write_chan;
@@ -1161,7 +1159,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
struct fsmc_nand_data *host = platform_get_drvdata(pdev);
if (host) {
- nand_release(nand_to_mtd(&host->nand));
+ nand_release(&host->nand);
if (host->mode == USE_DMA_ACCESS) {
dma_release_channel(host->write_dma_chan);
diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c
index 2780af26d9ab..a6c9a824a7d4 100644
--- a/drivers/mtd/nand/raw/gpio.c
+++ b/drivers/mtd/nand/raw/gpio.c
@@ -73,9 +73,10 @@ static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
#endif
-static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+static void gpio_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
{
- struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
+ struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
gpio_nand_dosync(gpiomtd);
@@ -89,13 +90,13 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
if (cmd == NAND_CMD_NONE)
return;
- writeb(cmd, gpiomtd->nand_chip.IO_ADDR_W);
+ writeb(cmd, gpiomtd->nand_chip.legacy.IO_ADDR_W);
gpio_nand_dosync(gpiomtd);
}
-static int gpio_nand_devready(struct mtd_info *mtd)
+static int gpio_nand_devready(struct nand_chip *chip)
{
- struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
+ struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
return gpiod_get_value(gpiomtd->rdy);
}
@@ -194,7 +195,7 @@ static int gpio_nand_remove(struct platform_device *pdev)
{
struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
- nand_release(nand_to_mtd(&gpiomtd->nand_chip));
+ nand_release(&gpiomtd->nand_chip);
/* Enable write protection and disable the chip */
if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
@@ -224,9 +225,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
chip = &gpiomtd->nand_chip;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->IO_ADDR_R = devm_ioremap_resource(dev, res);
- if (IS_ERR(chip->IO_ADDR_R))
- return PTR_ERR(chip->IO_ADDR_R);
+ chip->legacy.IO_ADDR_R = devm_ioremap_resource(dev, res);
+ if (IS_ERR(chip->legacy.IO_ADDR_R))
+ return PTR_ERR(chip->legacy.IO_ADDR_R);
res = gpio_nand_get_io_sync(pdev);
if (res) {
@@ -270,15 +271,15 @@ static int gpio_nand_probe(struct platform_device *pdev)
}
/* Using RDY pin */
if (gpiomtd->rdy)
- chip->dev_ready = gpio_nand_devready;
+ chip->legacy.dev_ready = gpio_nand_devready;
nand_set_flash_node(chip, pdev->dev.of_node);
- chip->IO_ADDR_W = chip->IO_ADDR_R;
+ chip->legacy.IO_ADDR_W = chip->legacy.IO_ADDR_R;
chip->ecc.mode = NAND_ECC_SOFT;
chip->ecc.algo = NAND_ECC_HAMMING;
chip->options = gpiomtd->plat.options;
- chip->chip_delay = gpiomtd->plat.chip_delay;
- chip->cmd_ctrl = gpio_nand_cmd_ctrl;
+ chip->legacy.chip_delay = gpiomtd->plat.chip_delay;
+ chip->legacy.cmd_ctrl = gpio_nand_cmd_ctrl;
mtd = nand_to_mtd(chip);
mtd->dev.parent = dev;
@@ -289,7 +290,7 @@ static int gpio_nand_probe(struct platform_device *pdev)
if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
gpiod_direction_output(gpiomtd->nwp, 1);
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
goto err_wp;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
index 88ea2203e263..bd4cfac6b5aa 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
@@ -471,10 +471,9 @@ void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
udelay(dll_wait_time_us);
}
-int gpmi_setup_data_interface(struct mtd_info *mtd, int chipnr,
+int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
const struct nand_data_interface *conf)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
const struct nand_sdr_timings *sdr;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 1c1ebbc82824..94c2b7525c85 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -783,9 +783,8 @@ error_alloc:
return -ENOMEM;
}
-static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
+static void gpmi_cmd_ctrl(struct nand_chip *chip, int data, unsigned int ctrl)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
int ret;
@@ -817,17 +816,15 @@ static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
this->command_length = 0;
}
-static int gpmi_dev_ready(struct mtd_info *mtd)
+static int gpmi_dev_ready(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
return gpmi_is_ready(this, this->current_chip);
}
-static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
+static void gpmi_select_chip(struct nand_chip *chip, int chipnr)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
int ret;
@@ -859,9 +856,8 @@ static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
this->current_chip = chipnr;
}
-static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void gpmi_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
dev_dbg(this->dev, "len is %d\n", len);
@@ -869,9 +865,8 @@ static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
gpmi_read_data(this, buf, len);
}
-static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void gpmi_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
dev_dbg(this->dev, "len is %d\n", len);
@@ -879,13 +874,12 @@ static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
gpmi_send_data(this, buf, len);
}
-static uint8_t gpmi_read_byte(struct mtd_info *mtd)
+static uint8_t gpmi_read_byte(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
uint8_t *buf = this->data_buffer_dma;
- gpmi_read_buf(mtd, buf, 1);
+ gpmi_read_buf(chip, buf, 1);
return buf[0];
}
@@ -1085,8 +1079,8 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
return max_bitflips;
}
-static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
nand_read_page_op(chip, page, 0, NULL, 0);
@@ -1094,8 +1088,8 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
}
/* Fake a virtual small page for the subpage read */
-static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offs, uint32_t len, uint8_t *buf, int page)
+static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
+ uint32_t len, uint8_t *buf, int page)
{
struct gpmi_nand_data *this = nand_get_controller_data(chip);
void __iomem *bch_regs = this->resources.bch_regs;
@@ -1130,7 +1124,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
dev_dbg(this->dev,
"page:%d, first:%d, last:%d, marker at:%d\n",
page, first, last, marker_pos);
- return gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+ return gpmi_ecc_read_page(chip, buf, 0, page);
}
}
@@ -1182,9 +1176,10 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
return max_bitflips;
}
-static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
const void *payload_virt;
@@ -1324,9 +1319,9 @@ exit_auxiliary:
* ECC-based or raw view of the page is implicit in which function it calls
* (there is a similar pair of ECC-based/raw functions for writing).
*/
-static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
dev_dbg(this->dev, "page number is %d\n", page);
@@ -1335,7 +1330,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
/* Read out the conventional OOB. */
nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
/*
* Now, we want to make sure the block mark is correct. In the
@@ -1345,15 +1340,15 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
if (GPMI_IS_MX23(this)) {
/* Read the block mark into the first byte of the OOB buffer. */
nand_read_page_op(chip, page, 0, NULL, 0);
- chip->oob_poi[0] = chip->read_byte(mtd);
+ chip->oob_poi[0] = chip->legacy.read_byte(chip);
}
return 0;
}
-static int
-gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
+static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct mtd_oob_region of = { };
/* Do we have available oob area? */
@@ -1380,10 +1375,10 @@ gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
* See set_geometry_by_ecc_info inline comments to have a full description
* of the layout used by the GPMI controller.
*/
-static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf,
+static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
int eccsize = nfc_geo->ecc_chunk_size;
@@ -1464,11 +1459,10 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
* See set_geometry_by_ecc_info inline comments to have a full description
* of the layout used by the GPMI controller.
*/
-static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf,
+static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
int eccsize = nfc_geo->ecc_chunk_size;
@@ -1536,28 +1530,26 @@ static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
mtd->writesize + mtd->oobsize);
}
-static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
{
- return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
+ return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
}
-static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
{
- return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page);
+ return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
}
-static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
+static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct gpmi_nand_data *this = nand_get_controller_data(chip);
int ret = 0;
uint8_t *block_mark;
int column, page, chipnr;
chipnr = (int)(ofs >> chip->chip_shift);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
@@ -1570,7 +1562,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
ret = nand_prog_page_op(chip, page, column, block_mark, 1);
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
return ret;
}
@@ -1607,7 +1599,6 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
struct boot_rom_geometry *rom_geo = &this->rom_geometry;
struct device *dev = this->dev;
struct nand_chip *chip = &this->nand;
- struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int search_area_size_in_strides;
unsigned int stride;
unsigned int page;
@@ -1619,7 +1610,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
saved_chip_number = this->current_chip;
- chip->select_chip(mtd, 0);
+ chip->select_chip(chip, 0);
/*
* Loop through the first search area, looking for the NCB fingerprint.
@@ -1637,7 +1628,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
* and starts in the 12th byte of the page.
*/
nand_read_page_op(chip, page, 12, NULL, 0);
- chip->read_buf(mtd, buffer, strlen(fingerprint));
+ chip->legacy.read_buf(chip, buffer, strlen(fingerprint));
/* Look for the fingerprint. */
if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
@@ -1647,7 +1638,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
}
- chip->select_chip(mtd, saved_chip_number);
+ chip->select_chip(chip, saved_chip_number);
if (found_an_ncb_fingerprint)
dev_dbg(dev, "\tFound a fingerprint\n");
@@ -1690,7 +1681,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Select chip 0. */
saved_chip_number = this->current_chip;
- chip->select_chip(mtd, 0);
+ chip->select_chip(chip, 0);
/* Loop over blocks in the first search area, erasing them. */
dev_dbg(dev, "Erasing the search area...\n");
@@ -1716,13 +1707,13 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Write the first page of the current stride. */
dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
- status = chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
+ status = chip->ecc.write_page_raw(chip, buffer, 0, page);
if (status)
dev_err(dev, "[%s] Write failed.\n", __func__);
}
/* Deselect chip 0. */
- chip->select_chip(mtd, saved_chip_number);
+ chip->select_chip(chip, saved_chip_number);
return 0;
}
@@ -1771,10 +1762,10 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
byte = block << chip->phys_erase_shift;
/* Send the command to read the conventional block mark. */
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
- block_mark = chip->read_byte(mtd);
- chip->select_chip(mtd, -1);
+ block_mark = chip->legacy.read_byte(chip);
+ chip->select_chip(chip, -1);
/*
* Check if the block is marked bad. If so, we need to mark it
@@ -1783,7 +1774,7 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
*/
if (block_mark != 0xff) {
dev_dbg(dev, "Transcribing mark in block %u\n", block);
- ret = chip->block_markbad(mtd, byte);
+ ret = chip->legacy.block_markbad(chip, byte);
if (ret)
dev_err(dev,
"Failed to mark block bad with ret %d\n",
@@ -1911,13 +1902,13 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
nand_set_flash_node(chip, this->pdev->dev.of_node);
chip->select_chip = gpmi_select_chip;
chip->setup_data_interface = gpmi_setup_data_interface;
- chip->cmd_ctrl = gpmi_cmd_ctrl;
- chip->dev_ready = gpmi_dev_ready;
- chip->read_byte = gpmi_read_byte;
- chip->read_buf = gpmi_read_buf;
- chip->write_buf = gpmi_write_buf;
+ chip->legacy.cmd_ctrl = gpmi_cmd_ctrl;
+ chip->legacy.dev_ready = gpmi_dev_ready;
+ chip->legacy.read_byte = gpmi_read_byte;
+ chip->legacy.read_buf = gpmi_read_buf;
+ chip->legacy.write_buf = gpmi_write_buf;
chip->badblock_pattern = &gpmi_bbt_descr;
- chip->block_markbad = gpmi_block_markbad;
+ chip->legacy.block_markbad = gpmi_block_markbad;
chip->options |= NAND_NO_SUBPAGE_WRITE;
/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
@@ -1934,7 +1925,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
goto err_out;
chip->dummy_controller.ops = &gpmi_nand_controller_ops;
- ret = nand_scan(mtd, GPMI_IS_MX6(this) ? 2 : 1);
+ ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
if (ret)
goto err_out;
@@ -2026,7 +2017,7 @@ static int gpmi_nand_remove(struct platform_device *pdev)
{
struct gpmi_nand_data *this = platform_get_drvdata(pdev);
- nand_release(nand_to_mtd(&this->nand));
+ nand_release(&this->nand);
gpmi_free_dma_buffer(this);
release_resources(this);
return 0;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
index 69cd0cbde4f2..d0b79bac2728 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
@@ -178,7 +178,7 @@ int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
int gpmi_send_command(struct gpmi_nand_data *);
int gpmi_enable_clk(struct gpmi_nand_data *this);
int gpmi_disable_clk(struct gpmi_nand_data *this);
-int gpmi_setup_data_interface(struct mtd_info *mtd, int chipnr,
+int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
const struct nand_data_interface *conf);
void gpmi_nfc_apply_timings(struct gpmi_nand_data *this);
int gpmi_read_data(struct gpmi_nand_data *, void *buf, int len);
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index 950dc7789296..f043938ee36b 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -353,9 +353,8 @@ static int hisi_nfc_send_cmd_reset(struct hinfc_host *host, int chipselect)
return 0;
}
-static void hisi_nfc_select_chip(struct mtd_info *mtd, int chipselect)
+static void hisi_nfc_select_chip(struct nand_chip *chip, int chipselect)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct hinfc_host *host = nand_get_controller_data(chip);
if (chipselect < 0)
@@ -364,9 +363,8 @@ static void hisi_nfc_select_chip(struct mtd_info *mtd, int chipselect)
host->chipselect = chipselect;
}
-static uint8_t hisi_nfc_read_byte(struct mtd_info *mtd)
+static uint8_t hisi_nfc_read_byte(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct hinfc_host *host = nand_get_controller_data(chip);
if (host->command == NAND_CMD_STATUS)
@@ -380,28 +378,17 @@ static uint8_t hisi_nfc_read_byte(struct mtd_info *mtd)
return *(uint8_t *)(host->buffer + host->offset - 1);
}
-static u16 hisi_nfc_read_word(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct hinfc_host *host = nand_get_controller_data(chip);
-
- host->offset += 2;
- return *(u16 *)(host->buffer + host->offset - 2);
-}
-
static void
-hisi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+hisi_nfc_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct hinfc_host *host = nand_get_controller_data(chip);
memcpy(host->buffer + host->offset, buf, len);
host->offset += len;
}
-static void hisi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void hisi_nfc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct hinfc_host *host = nand_get_controller_data(chip);
memcpy(buf, host->buffer + host->offset, len);
@@ -442,10 +429,10 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr)
}
}
-static void hisi_nfc_cmdfunc(struct mtd_info *mtd, unsigned command, int column,
- int page_addr)
+static void hisi_nfc_cmdfunc(struct nand_chip *chip, unsigned command,
+ int column, int page_addr)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct hinfc_host *host = nand_get_controller_data(chip);
int is_cache_invalid = 1;
unsigned int flag = 0;
@@ -537,15 +524,16 @@ static irqreturn_t hinfc_irq_handle(int irq, void *devid)
return IRQ_HANDLED;
}
-static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+static int hisi_nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct hinfc_host *host = nand_get_controller_data(chip);
int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
int stat_1, stat_2;
nand_read_page_op(chip, page, 0, buf, mtd->writesize);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
/* errors which can not be corrected by ECC */
if (host->irq_status & HINFC504_INTS_UE) {
@@ -569,9 +557,9 @@ static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
return max_bitflips;
}
-static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int hisi_nand_read_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct hinfc_host *host = nand_get_controller_data(chip);
nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
@@ -585,13 +573,15 @@ static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
-static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf, int oob_required,
- int page)
+static int hisi_nand_write_page_hwecc(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
@@ -792,15 +782,14 @@ static int hisi_nfc_probe(struct platform_device *pdev)
nand_set_controller_data(chip, host);
nand_set_flash_node(chip, np);
- chip->cmdfunc = hisi_nfc_cmdfunc;
+ chip->legacy.cmdfunc = hisi_nfc_cmdfunc;
chip->select_chip = hisi_nfc_select_chip;
- chip->read_byte = hisi_nfc_read_byte;
- chip->read_word = hisi_nfc_read_word;
- chip->write_buf = hisi_nfc_write_buf;
- chip->read_buf = hisi_nfc_read_buf;
- chip->chip_delay = HINFC504_CHIP_DELAY;
- chip->set_features = nand_get_set_features_notsupp;
- chip->get_features = nand_get_set_features_notsupp;
+ chip->legacy.read_byte = hisi_nfc_read_byte;
+ chip->legacy.write_buf = hisi_nfc_write_buf;
+ chip->legacy.read_buf = hisi_nfc_read_buf;
+ chip->legacy.chip_delay = HINFC504_CHIP_DELAY;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
hisi_nfc_host_init(host);
@@ -811,7 +800,7 @@ static int hisi_nfc_probe(struct platform_device *pdev)
}
chip->dummy_controller.ops = &hisi_nfc_controller_ops;
- ret = nand_scan(mtd, max_chips);
+ ret = nand_scan(chip, max_chips);
if (ret)
return ret;
@@ -828,9 +817,8 @@ static int hisi_nfc_probe(struct platform_device *pdev)
static int hisi_nfc_remove(struct platform_device *pdev)
{
struct hinfc_host *host = platform_get_drvdata(pdev);
- struct mtd_info *mtd = nand_to_mtd(&host->chip);
- nand_release(mtd);
+ nand_release(&host->chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
new file mode 100644
index 000000000000..04c2cf74eff3
--- /dev/null
+++ b/drivers/mtd/nand/raw/internals.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 - Bootlin
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ *
+ * Header containing internal definitions to be used only by core files.
+ * NAND controller drivers should not include this file.
+ */
+
+#ifndef __LINUX_RAWNAND_INTERNALS
+#define __LINUX_RAWNAND_INTERNALS
+
+#include <linux/mtd/rawnand.h>
+
+/*
+ * NAND Flash Manufacturer ID Codes
+ */
+#define NAND_MFR_AMD 0x01
+#define NAND_MFR_ATO 0x9b
+#define NAND_MFR_EON 0x92
+#define NAND_MFR_ESMT 0xc8
+#define NAND_MFR_FUJITSU 0x04
+#define NAND_MFR_HYNIX 0xad
+#define NAND_MFR_INTEL 0x89
+#define NAND_MFR_MACRONIX 0xc2
+#define NAND_MFR_MICRON 0x2c
+#define NAND_MFR_NATIONAL 0x8f
+#define NAND_MFR_RENESAS 0x07
+#define NAND_MFR_SAMSUNG 0xec
+#define NAND_MFR_SANDISK 0x45
+#define NAND_MFR_STMICRO 0x20
+#define NAND_MFR_TOSHIBA 0x98
+#define NAND_MFR_WINBOND 0xef
+
+/**
+ * struct nand_manufacturer_ops - NAND Manufacturer operations
+ * @detect: detect the NAND memory organization and capabilities
+ * @init: initialize all vendor specific fields (like the ->read_retry()
+ * implementation) if any.
+ * @cleanup: the ->init() function may have allocated resources, ->cleanup()
+ * is here to let vendor specific code release those resources.
+ * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
+ * page. This is called after the checksum is verified.
+ */
+struct nand_manufacturer_ops {
+ void (*detect)(struct nand_chip *chip);
+ int (*init)(struct nand_chip *chip);
+ void (*cleanup)(struct nand_chip *chip);
+ void (*fixup_onfi_param_page)(struct nand_chip *chip,
+ struct nand_onfi_params *p);
+};
+
+/**
+ * struct nand_manufacturer - NAND Flash Manufacturer structure
+ * @name: Manufacturer name
+ * @id: manufacturer ID code of device.
+ * @ops: manufacturer operations
+ */
+struct nand_manufacturer {
+ int id;
+ char *name;
+ const struct nand_manufacturer_ops *ops;
+};
+
+
+extern struct nand_flash_dev nand_flash_ids[];
+
+extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
+extern const struct nand_manufacturer_ops esmt_nand_manuf_ops;
+extern const struct nand_manufacturer_ops hynix_nand_manuf_ops;
+extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
+extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
+extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
+extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
+
+/* Core functions */
+const struct nand_manufacturer *nand_get_manufacturer(u8 id);
+int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs);
+int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
+ int allowbbt);
+int onfi_fill_data_interface(struct nand_chip *chip,
+ enum nand_data_interface_type type,
+ int timing_mode);
+int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
+int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
+int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page);
+int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page);
+int nand_exit_status_op(struct nand_chip *chip);
+int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len);
+void nand_decode_ext_id(struct nand_chip *chip);
+void panic_nand_wait(struct nand_chip *chip, unsigned long timeo);
+void sanitize_string(uint8_t *s, size_t len);
+
+/* BBT functions */
+int nand_markbad_bbt(struct nand_chip *chip, loff_t offs);
+int nand_isreserved_bbt(struct nand_chip *chip, loff_t offs);
+int nand_isbad_bbt(struct nand_chip *chip, loff_t offs, int allowbbt);
+
+/* Legacy */
+void nand_legacy_set_defaults(struct nand_chip *chip);
+void nand_legacy_adjust_cmdfunc(struct nand_chip *chip);
+int nand_legacy_check_hooks(struct nand_chip *chip);
+
+/* ONFI functions */
+u16 onfi_crc16(u16 crc, u8 const *p, size_t len);
+int nand_onfi_detect(struct nand_chip *chip);
+
+/* JEDEC functions */
+int nand_jedec_detect(struct nand_chip *chip);
+
+#endif /* __LINUX_RAWNAND_INTERNALS */
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c
index a7515452bc59..fb59cfca11a7 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/jz4740_nand.c
@@ -78,10 +78,9 @@ static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd)
return container_of(mtd_to_nand(mtd), struct jz_nand, chip);
}
-static void jz_nand_select_chip(struct mtd_info *mtd, int chipnr)
+static void jz_nand_select_chip(struct nand_chip *chip, int chipnr)
{
- struct jz_nand *nand = mtd_to_jz_nand(mtd);
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct jz_nand *nand = mtd_to_jz_nand(nand_to_mtd(chip));
uint32_t ctrl;
int banknr;
@@ -92,18 +91,18 @@ static void jz_nand_select_chip(struct mtd_info *mtd, int chipnr)
banknr = -1;
} else {
banknr = nand->banks[chipnr] - 1;
- chip->IO_ADDR_R = nand->bank_base[banknr];
- chip->IO_ADDR_W = nand->bank_base[banknr];
+ chip->legacy.IO_ADDR_R = nand->bank_base[banknr];
+ chip->legacy.IO_ADDR_W = nand->bank_base[banknr];
}
writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
nand->selected_bank = banknr;
}
-static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+static void jz_nand_cmd_ctrl(struct nand_chip *chip, int dat,
+ unsigned int ctrl)
{
- struct jz_nand *nand = mtd_to_jz_nand(mtd);
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct jz_nand *nand = mtd_to_jz_nand(nand_to_mtd(chip));
uint32_t reg;
void __iomem *bank_base = nand->bank_base[nand->selected_bank];
@@ -115,7 +114,7 @@ static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
bank_base += JZ_NAND_MEM_ADDR_OFFSET;
else if (ctrl & NAND_CLE)
bank_base += JZ_NAND_MEM_CMD_OFFSET;
- chip->IO_ADDR_W = bank_base;
+ chip->legacy.IO_ADDR_W = bank_base;
reg = readl(nand->base + JZ_REG_NAND_CTRL);
if (ctrl & NAND_NCE)
@@ -125,18 +124,18 @@ static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
writel(reg, nand->base + JZ_REG_NAND_CTRL);
}
if (dat != NAND_CMD_NONE)
- writeb(dat, chip->IO_ADDR_W);
+ writeb(dat, chip->legacy.IO_ADDR_W);
}
-static int jz_nand_dev_ready(struct mtd_info *mtd)
+static int jz_nand_dev_ready(struct nand_chip *chip)
{
- struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ struct jz_nand *nand = mtd_to_jz_nand(nand_to_mtd(chip));
return gpiod_get_value_cansleep(nand->busy_gpio);
}
-static void jz_nand_hwctl(struct mtd_info *mtd, int mode)
+static void jz_nand_hwctl(struct nand_chip *chip, int mode)
{
- struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ struct jz_nand *nand = mtd_to_jz_nand(nand_to_mtd(chip));
uint32_t reg;
writel(0, nand->base + JZ_REG_NAND_IRQ_STAT);
@@ -162,10 +161,10 @@ static void jz_nand_hwctl(struct mtd_info *mtd, int mode)
writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
}
-static int jz_nand_calculate_ecc_rs(struct mtd_info *mtd, const uint8_t *dat,
- uint8_t *ecc_code)
+static int jz_nand_calculate_ecc_rs(struct nand_chip *chip, const uint8_t *dat,
+ uint8_t *ecc_code)
{
- struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ struct jz_nand *nand = mtd_to_jz_nand(nand_to_mtd(chip));
uint32_t reg, status;
int i;
unsigned int timeout = 1000;
@@ -215,10 +214,10 @@ static void jz_nand_correct_data(uint8_t *dat, int index, int mask)
dat[index+1] = (data >> 8) & 0xff;
}
-static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
- uint8_t *read_ecc, uint8_t *calc_ecc)
+static int jz_nand_correct_ecc_rs(struct nand_chip *chip, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
{
- struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ struct jz_nand *nand = mtd_to_jz_nand(nand_to_mtd(chip));
int i, error_count, index;
uint32_t reg, status, error;
unsigned int timeout = 1000;
@@ -331,19 +330,19 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
if (chipnr == 0) {
/* Detect first chip. */
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
goto notfound_id;
/* Retrieve the IDs from the first chip. */
- chip->select_chip(mtd, 0);
+ chip->select_chip(chip, 0);
nand_reset_op(chip);
nand_readid_op(chip, 0, id, sizeof(id));
*nand_maf_id = id[0];
*nand_dev_id = id[1];
} else {
/* Detect additional chip. */
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
nand_reset_op(chip);
nand_readid_op(chip, 0, id, sizeof(id));
if (*nand_maf_id != id[0] || *nand_dev_id != id[1]) {
@@ -426,13 +425,13 @@ static int jz_nand_probe(struct platform_device *pdev)
chip->ecc.strength = 4;
chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
- chip->chip_delay = 50;
- chip->cmd_ctrl = jz_nand_cmd_ctrl;
+ chip->legacy.chip_delay = 50;
+ chip->legacy.cmd_ctrl = jz_nand_cmd_ctrl;
chip->select_chip = jz_nand_select_chip;
chip->dummy_controller.ops = &jz_nand_controller_ops;
if (nand->busy_gpio)
- chip->dev_ready = jz_nand_dev_ready;
+ chip->legacy.dev_ready = jz_nand_dev_ready;
platform_set_drvdata(pdev, nand);
@@ -507,7 +506,7 @@ static int jz_nand_remove(struct platform_device *pdev)
struct jz_nand *nand = platform_get_drvdata(pdev);
size_t i;
- nand_release(nand_to_mtd(&nand->chip));
+ nand_release(&nand->chip);
/* Deassert and disable all chips */
writel(0, nand->base + JZ_REG_NAND_CTRL);
diff --git a/drivers/mtd/nand/raw/jz4780_nand.c b/drivers/mtd/nand/raw/jz4780_nand.c
index db4fa60bd52a..cdf22100ab77 100644
--- a/drivers/mtd/nand/raw/jz4780_nand.c
+++ b/drivers/mtd/nand/raw/jz4780_nand.c
@@ -71,9 +71,9 @@ static inline struct jz4780_nand_controller
return container_of(ctrl, struct jz4780_nand_controller, controller);
}
-static void jz4780_nand_select_chip(struct mtd_info *mtd, int chipnr)
+static void jz4780_nand_select_chip(struct nand_chip *chip, int chipnr)
{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
+ struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
struct jz4780_nand_cs *cs;
@@ -86,10 +86,10 @@ static void jz4780_nand_select_chip(struct mtd_info *mtd, int chipnr)
nfc->selected = chipnr;
}
-static void jz4780_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+static void jz4780_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
+ struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
struct jz4780_nand_cs *cs;
@@ -109,24 +109,24 @@ static void jz4780_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
writeb(cmd, cs->base + OFFSET_CMD);
}
-static int jz4780_nand_dev_ready(struct mtd_info *mtd)
+static int jz4780_nand_dev_ready(struct nand_chip *chip)
{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
+ struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
return !gpiod_get_value_cansleep(nand->busy_gpio);
}
-static void jz4780_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
+static void jz4780_nand_ecc_hwctl(struct nand_chip *chip, int mode)
{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
+ struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
nand->reading = (mode == NAND_ECC_READ);
}
-static int jz4780_nand_ecc_calculate(struct mtd_info *mtd, const u8 *dat,
+static int jz4780_nand_ecc_calculate(struct nand_chip *chip, const u8 *dat,
u8 *ecc_code)
{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
+ struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
struct jz4780_bch_params params;
@@ -144,10 +144,10 @@ static int jz4780_nand_ecc_calculate(struct mtd_info *mtd, const u8 *dat,
return jz4780_bch_calculate(nfc->bch, &params, dat, ecc_code);
}
-static int jz4780_nand_ecc_correct(struct mtd_info *mtd, u8 *dat,
+static int jz4780_nand_ecc_correct(struct nand_chip *chip, u8 *dat,
u8 *read_ecc, u8 *calc_ecc)
{
- struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
+ struct jz4780_nand_chip *nand = to_jz4780_nand_chip(nand_to_mtd(chip));
struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
struct jz4780_bch_params params;
@@ -256,7 +256,7 @@ static int jz4780_nand_init_chip(struct platform_device *pdev,
dev_err(dev, "failed to request busy GPIO: %d\n", ret);
return ret;
} else if (nand->busy_gpio) {
- nand->chip.dev_ready = jz4780_nand_dev_ready;
+ nand->chip.legacy.dev_ready = jz4780_nand_dev_ready;
}
nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
@@ -275,24 +275,24 @@ static int jz4780_nand_init_chip(struct platform_device *pdev,
return -ENOMEM;
mtd->dev.parent = dev;
- chip->IO_ADDR_R = cs->base + OFFSET_DATA;
- chip->IO_ADDR_W = cs->base + OFFSET_DATA;
- chip->chip_delay = RB_DELAY_US;
+ chip->legacy.IO_ADDR_R = cs->base + OFFSET_DATA;
+ chip->legacy.IO_ADDR_W = cs->base + OFFSET_DATA;
+ chip->legacy.chip_delay = RB_DELAY_US;
chip->options = NAND_NO_SUBPAGE_WRITE;
chip->select_chip = jz4780_nand_select_chip;
- chip->cmd_ctrl = jz4780_nand_cmd_ctrl;
+ chip->legacy.cmd_ctrl = jz4780_nand_cmd_ctrl;
chip->ecc.mode = NAND_ECC_HW;
chip->controller = &nfc->controller;
nand_set_flash_node(chip, np);
chip->controller->ops = &jz4780_nand_controller_ops;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
- nand_release(mtd);
+ nand_release(chip);
return ret;
}
@@ -307,7 +307,7 @@ static void jz4780_nand_cleanup_chips(struct jz4780_nand_controller *nfc)
while (!list_empty(&nfc->chips)) {
chip = list_first_entry(&nfc->chips, struct jz4780_nand_chip, chip_list);
- nand_release(nand_to_mtd(&chip->chip));
+ nand_release(&chip->chip);
list_del(&chip->chip_list);
}
}
@@ -352,7 +352,7 @@ static int jz4780_nand_probe(struct platform_device *pdev)
return -ENODEV;
}
- nfc = devm_kzalloc(dev, sizeof(*nfc) + (sizeof(nfc->cs[0]) * num_banks), GFP_KERNEL);
+ nfc = devm_kzalloc(dev, struct_size(nfc, cs, num_banks), GFP_KERNEL);
if (!nfc)
return -ENOMEM;
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index e82abada130a..abbb655fe154 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -286,10 +286,9 @@ static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
/*
* Hardware specific access to control lines
*/
-static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+static void lpc32xx_nand_cmd_ctrl(struct nand_chip *nand_chip, int cmd,
unsigned int ctrl)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
if (cmd != NAND_CMD_NONE) {
@@ -303,9 +302,8 @@ static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
/*
* Read Device Ready (NAND device _and_ controller ready)
*/
-static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
+static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
if ((readb(MLC_ISR(host->io_base)) &
@@ -330,8 +328,9 @@ static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
return IRQ_HANDLED;
}
-static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
+static int lpc32xx_waitfunc_nand(struct nand_chip *chip)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
@@ -349,9 +348,9 @@ exit:
return NAND_STATUS_READY;
}
-static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
- struct nand_chip *chip)
+static int lpc32xx_waitfunc_controller(struct nand_chip *chip)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
@@ -369,10 +368,10 @@ exit:
return NAND_STATUS_READY;
}
-static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+static int lpc32xx_waitfunc(struct nand_chip *chip)
{
- lpc32xx_waitfunc_nand(mtd, chip);
- lpc32xx_waitfunc_controller(mtd, chip);
+ lpc32xx_waitfunc_nand(chip);
+ lpc32xx_waitfunc_controller(chip);
return NAND_STATUS_READY;
}
@@ -442,9 +441,10 @@ out1:
return -ENXIO;
}
-static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int lpc32xx_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
int i, j;
uint8_t *oobbuf = chip->oob_poi;
@@ -470,7 +470,7 @@ static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
/* Wait for Controller Ready */
- lpc32xx_waitfunc_controller(mtd, chip);
+ lpc32xx_waitfunc_controller(chip);
/* Check ECC Error status */
mlc_isr = readl(MLC_ISR(host->io_base));
@@ -507,11 +507,11 @@ static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
-static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int lpc32xx_write_page_lowlevel(struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
const uint8_t *oobbuf = chip->oob_poi;
uint8_t *dma_buf = (uint8_t *)buf;
@@ -551,32 +551,30 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
/* Wait for Controller Ready */
- lpc32xx_waitfunc_controller(mtd, chip);
+ lpc32xx_waitfunc_controller(chip);
}
return nand_prog_page_end_op(chip);
}
-static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int lpc32xx_read_oob(struct nand_chip *chip, int page)
{
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
/* Read whole page - necessary with MLC controller! */
- lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
+ lpc32xx_read_page(chip, host->dummy_buf, 1, page);
return 0;
}
-static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int lpc32xx_write_oob(struct nand_chip *chip, int page)
{
/* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
return 0;
}
/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
-static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
+static void lpc32xx_ecc_enable(struct nand_chip *chip, int mode)
{
/* Always enabled! */
}
@@ -741,11 +739,11 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
if (res)
goto put_clk;
- nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
- nand_chip->dev_ready = lpc32xx_nand_device_ready;
- nand_chip->chip_delay = 25; /* us */
- nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
- nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
+ nand_chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ nand_chip->legacy.dev_ready = lpc32xx_nand_device_ready;
+ nand_chip->legacy.chip_delay = 25; /* us */
+ nand_chip->legacy.IO_ADDR_R = MLC_DATA(host->io_base);
+ nand_chip->legacy.IO_ADDR_W = MLC_DATA(host->io_base);
/* Init NAND controller */
lpc32xx_nand_setup(host);
@@ -762,7 +760,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
nand_chip->ecc.read_oob = lpc32xx_read_oob;
nand_chip->ecc.strength = 4;
nand_chip->ecc.bytes = 10;
- nand_chip->waitfunc = lpc32xx_waitfunc;
+ nand_chip->legacy.waitfunc = lpc32xx_waitfunc;
nand_chip->options = NAND_NO_SUBPAGE_WRITE;
nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
@@ -802,7 +800,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
* SMALL block or LARGE block.
*/
nand_chip->dummy_controller.ops = &lpc32xx_nand_controller_ops;
- res = nand_scan(mtd, 1);
+ res = nand_scan(nand_chip, 1);
if (res)
goto free_irq;
@@ -839,9 +837,8 @@ free_gpio:
static int lpc32xx_nand_remove(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
- struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
- nand_release(mtd);
+ nand_release(&host->nand_chip);
free_irq(host->irq, host);
if (use_dma)
dma_release_channel(host->dma_chan);
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index a4e8b7e75135..f2f2cdbb9d04 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -278,11 +278,10 @@ static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
/*
* Hardware specific access to control lines
*/
-static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
+static void lpc32xx_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
{
uint32_t tmp;
- struct nand_chip *chip = mtd_to_nand(mtd);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
/* Does CE state need to be changed? */
@@ -304,9 +303,8 @@ static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
/*
* Read the Device Ready pin
*/
-static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
+static int lpc32xx_nand_device_ready(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
int rdy = 0;
@@ -337,7 +335,7 @@ static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
/*
* Prepares SLC for transfers with H/W ECC enabled
*/
-static void lpc32xx_nand_ecc_enable(struct mtd_info *mtd, int mode)
+static void lpc32xx_nand_ecc_enable(struct nand_chip *chip, int mode)
{
/* Hardware ECC is enabled automatically in hardware as needed */
}
@@ -345,7 +343,7 @@ static void lpc32xx_nand_ecc_enable(struct mtd_info *mtd, int mode)
/*
* Calculates the ECC for the data
*/
-static int lpc32xx_nand_ecc_calculate(struct mtd_info *mtd,
+static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
const unsigned char *buf,
unsigned char *code)
{
@@ -359,9 +357,8 @@ static int lpc32xx_nand_ecc_calculate(struct mtd_info *mtd,
/*
* Read a single byte from NAND device
*/
-static uint8_t lpc32xx_nand_read_byte(struct mtd_info *mtd)
+static uint8_t lpc32xx_nand_read_byte(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
return (uint8_t)readl(SLC_DATA(host->io_base));
@@ -370,9 +367,8 @@ static uint8_t lpc32xx_nand_read_byte(struct mtd_info *mtd)
/*
* Simple device read without ECC
*/
-static void lpc32xx_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void lpc32xx_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
/* Direct device read with no ECC */
@@ -383,9 +379,9 @@ static void lpc32xx_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
/*
* Simple device write without ECC
*/
-static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void lpc32xx_nand_write_buf(struct nand_chip *chip, const uint8_t *buf,
+ int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
/* Direct device write with no ECC */
@@ -396,18 +392,20 @@ static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int
/*
* Read the OOB data from the device without ECC using FIFO method
*/
-static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip, int page)
+static int lpc32xx_nand_read_oob_syndrome(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
* Write the OOB data to the device without ECC using FIFO method
*/
-static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip, int page)
+static int lpc32xx_nand_write_oob_syndrome(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
mtd->oobsize);
}
@@ -610,10 +608,10 @@ static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
* Read the data and OOB data from the device, use ECC correction with the
* data, disable ECC for the OOB data
*/
-static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf,
+static int lpc32xx_nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
struct mtd_oob_region oobregion = { };
int stat, i, status, error;
@@ -626,7 +624,7 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
/* Get OOB data */
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
/* Convert to stored ECC format */
lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
@@ -639,7 +637,7 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
oobecc = chip->oob_poi + oobregion.offset;
for (i = 0; i < chip->ecc.steps; i++) {
- stat = chip->ecc.correct(mtd, buf, oobecc,
+ stat = chip->ecc.correct(chip, buf, oobecc,
&tmpecc[i * chip->ecc.bytes]);
if (stat < 0)
mtd->ecc_stats.failed++;
@@ -657,17 +655,18 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
* Read the data and OOB data from the device, no ECC correction with the
* data or OOB data
*/
-static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int lpc32xx_nand_read_page_raw_syndrome(struct nand_chip *chip,
uint8_t *buf, int oob_required,
int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
/* Issue read command */
nand_read_page_op(chip, page, 0, NULL, 0);
/* Raw reads can just use the FIFO interface */
- chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.read_buf(chip, buf, chip->ecc.size * chip->ecc.steps);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
return 0;
}
@@ -676,11 +675,11 @@ static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
* Write the data and OOB data to the device, use ECC with the data,
* disable ECC for the OOB data
*/
-static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int lpc32xx_nand_write_page_syndrome(struct nand_chip *chip,
const uint8_t *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
struct mtd_oob_region oobregion = { };
uint8_t *pb;
@@ -705,7 +704,7 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
/* Write ECC data to device */
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
@@ -714,15 +713,16 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
* Write the data and OOB data to the device, no ECC correction with the
* data or OOB data
*/
-static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int lpc32xx_nand_write_page_raw_syndrome(struct nand_chip *chip,
const uint8_t *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
/* Raw writes can just use the FIFO interface */
nand_prog_page_begin_op(chip, page, 0, buf,
chip->ecc.size * chip->ecc.steps);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
@@ -878,11 +878,11 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
goto enable_wp;
/* Set NAND IO addresses and command/ready functions */
- chip->IO_ADDR_R = SLC_DATA(host->io_base);
- chip->IO_ADDR_W = SLC_DATA(host->io_base);
- chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
- chip->dev_ready = lpc32xx_nand_device_ready;
- chip->chip_delay = 20; /* 20us command delay time */
+ chip->legacy.IO_ADDR_R = SLC_DATA(host->io_base);
+ chip->legacy.IO_ADDR_W = SLC_DATA(host->io_base);
+ chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ chip->legacy.dev_ready = lpc32xx_nand_device_ready;
+ chip->legacy.chip_delay = 20; /* 20us command delay time */
/* Init NAND controller */
lpc32xx_nand_setup(host);
@@ -891,9 +891,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
/* NAND callbacks for LPC32xx SLC hardware */
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
- chip->read_byte = lpc32xx_nand_read_byte;
- chip->read_buf = lpc32xx_nand_read_buf;
- chip->write_buf = lpc32xx_nand_write_buf;
+ chip->legacy.read_byte = lpc32xx_nand_read_byte;
+ chip->legacy.read_buf = lpc32xx_nand_read_buf;
+ chip->legacy.write_buf = lpc32xx_nand_write_buf;
chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
@@ -925,7 +925,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
/* Find NAND device */
chip->dummy_controller.ops = &lpc32xx_nand_controller_ops;
- res = nand_scan(mtd, 1);
+ res = nand_scan(chip, 1);
if (res)
goto release_dma;
@@ -956,9 +956,8 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
{
uint32_t tmp;
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
- struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
- nand_release(mtd);
+ nand_release(&host->nand_chip);
dma_release_channel(host->dma_chan);
/* Force CE high */
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 7af4d6213ee5..650f2b490a05 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -5,6 +5,73 @@
* Copyright (C) 2017 Marvell
* Author: Miquel RAYNAL <miquel.raynal@free-electrons.com>
*
+ *
+ * This NAND controller driver handles two versions of the hardware,
+ * one is called NFCv1 and is available on PXA SoCs and the other is
+ * called NFCv2 and is available on Armada SoCs.
+ *
+ * The main visible difference is that NFCv1 only has Hamming ECC
+ * capabilities, while NFCv2 also embeds a BCH ECC engine. Also, DMA
+ * is not used with NFCv2.
+ *
+ * The ECC layouts are depicted in details in Marvell AN-379, but here
+ * is a brief description.
+ *
+ * When using Hamming, the data is split in 512B chunks (either 1, 2
+ * or 4) and each chunk will have its own ECC "digest" of 6B at the
+ * beginning of the OOB area and eventually the remaining free OOB
+ * bytes (also called "spare" bytes in the driver). This engine
+ * corrects up to 1 bit per chunk and detects reliably an error if
+ * there are at most 2 bitflips. Here is the page layout used by the
+ * controller when Hamming is chosen:
+ *
+ * +-------------------------------------------------------------+
+ * | Data 1 | ... | Data N | ECC 1 | ... | ECCN | Free OOB bytes |
+ * +-------------------------------------------------------------+
+ *
+ * When using the BCH engine, there are N identical (data + free OOB +
+ * ECC) sections and potentially an extra one to deal with
+ * configurations where the chosen (data + free OOB + ECC) sizes do
+ * not align with the page (data + OOB) size. ECC bytes are always
+ * 30B per ECC chunk. Here is the page layout used by the controller
+ * when BCH is chosen:
+ *
+ * +-----------------------------------------
+ * | Data 1 | Free OOB bytes 1 | ECC 1 | ...
+ * +-----------------------------------------
+ *
+ * -------------------------------------------
+ * ... | Data N | Free OOB bytes N | ECC N |
+ * -------------------------------------------
+ *
+ * --------------------------------------------+
+ * Last Data | Last Free OOB bytes | Last ECC |
+ * --------------------------------------------+
+ *
+ * In both cases, the layout seen by the user is always: all data
+ * first, then all free OOB bytes and finally all ECC bytes. With BCH,
+ * ECC bytes are 30B long and are padded with 0xFF to align on 32
+ * bytes.
+ *
+ * The controller has certain limitations that are handled by the
+ * driver:
+ * - It can only read 2k at a time. To overcome this limitation, the
+ * driver issues data cycles on the bus, without issuing new
+ * CMD + ADDR cycles. The Marvell term is "naked" operations.
+ * - The ECC strength in BCH mode cannot be tuned. It is fixed 16
+ * bits. What can be tuned is the ECC block size as long as it
+ * stays between 512B and 2kiB. It's usually chosen based on the
+ * chip ECC requirements. For instance, using 2kiB ECC chunks
+ * provides 4b/512B correctability.
+ * - The controller will always treat data bytes, free OOB bytes
+ * and ECC bytes in that order, no matter what the real layout is
+ * (which is usually all data then all OOB bytes). The
+ * marvell_nfc_layouts array below contains the currently
+ * supported layouts.
+ * - Because of these weird layouts, the Bad Block Markers can be
+ * located in data section. In this case, the NAND_BBT_NO_OOB_BBM
+ * option must be set to prevent scanning/writing bad block
+ * markers.
*/
#include <linux/module.h>
@@ -217,8 +284,11 @@ static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0),
MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0),
MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30),
MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
+ MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30),
};
/**
@@ -634,9 +704,8 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
return 0;
}
-static void marvell_nfc_select_chip(struct mtd_info *mtd, int die_nr)
+static void marvell_nfc_select_chip(struct nand_chip *chip, int die_nr)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
u32 ndcr_generic;
@@ -686,7 +755,7 @@ static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
- if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ)))
+ if (st & (NDSR_RDY(0) | NDSR_RDY(1)))
complete(&nfc->complete);
return IRQ_HANDLED;
@@ -959,18 +1028,15 @@ static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
return ret;
}
-static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip, u8 *buf,
+static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
true, page);
}
-static int marvell_nfc_hw_ecc_hmg_read_page(struct mtd_info *mtd,
- struct nand_chip *chip,
- u8 *buf, int oob_required,
- int page)
+static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
{
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
@@ -1008,8 +1074,7 @@ static int marvell_nfc_hw_ecc_hmg_read_page(struct mtd_info *mtd,
* it appears before the ECC bytes when reading), the ->read_oob_raw() function
* also stands for ->read_oob().
*/
-static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct mtd_info *mtd,
- struct nand_chip *chip, int page)
+static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct nand_chip *chip, int page)
{
/* Invalidate page cache */
chip->pagebuf = -1;
@@ -1073,8 +1138,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
return ret;
}
-static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
const u8 *buf,
int oob_required, int page)
{
@@ -1082,8 +1146,7 @@ static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct mtd_info *mtd,
true, page);
}
-static int marvell_nfc_hw_ecc_hmg_write_page(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int marvell_nfc_hw_ecc_hmg_write_page(struct nand_chip *chip,
const u8 *buf,
int oob_required, int page)
{
@@ -1102,10 +1165,11 @@ static int marvell_nfc_hw_ecc_hmg_write_page(struct mtd_info *mtd,
* it appears before the ECC bytes when reading), the ->write_oob_raw() function
* also stands for ->write_oob().
*/
-static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct nand_chip *chip,
int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
/* Invalidate page cache */
chip->pagebuf = -1;
@@ -1116,10 +1180,10 @@ static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct mtd_info *mtd,
}
/* BCH read helpers */
-static int marvell_nfc_hw_ecc_bch_read_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip, u8 *buf,
+static int marvell_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
u8 *oob = chip->oob_poi;
int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
@@ -1228,17 +1292,17 @@ static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
}
}
-static int marvell_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
u8 *buf, int oob_required,
int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
- int data_len = lt->data_bytes, spare_len = lt->spare_bytes, ecc_len;
- u8 *data = buf, *spare = chip->oob_poi, *ecc;
+ int data_len = lt->data_bytes, spare_len = lt->spare_bytes;
+ u8 *data = buf, *spare = chip->oob_poi;
int max_bitflips = 0;
u32 failure_mask = 0;
- int chunk, ecc_offset_in_page, ret;
+ int chunk, ret;
/*
* With BCH, OOB is not fully used (and thus not read entirely), not
@@ -1279,73 +1343,98 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd,
* the controller in normal mode and must be re-read in raw mode. To
* avoid dropping the performances, we prefer not to include them. The
* user should re-read the page in raw mode if ECC bytes are required.
+ */
+
+ /*
+ * In case there is any subpage read error reported by ->correct(), we
+ * usually re-read only ECC bytes in raw mode and check if the whole
+ * page is empty. In this case, it is normal that the ECC check failed
+ * and we just ignore the error.
*
- * However, for any subpage read error reported by ->correct(), the ECC
- * bytes must be read in raw mode and the full subpage must be checked
- * to see if it is entirely empty of if there was an actual error.
+ * However, it has been empirically observed that for some layouts (e.g
+ * 2k page, 8b strength per 512B chunk), the controller tries to correct
+ * bits and may create itself bitflips in the erased area. To overcome
+ * this strange behavior, the whole page is re-read in raw mode, not
+ * only the ECC bytes.
*/
for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ int data_off_in_page, spare_off_in_page, ecc_off_in_page;
+ int data_off, spare_off, ecc_off;
+ int data_len, spare_len, ecc_len;
+
/* No failure reported for this chunk, move to the next one */
if (!(failure_mask & BIT(chunk)))
continue;
- /* Derive ECC bytes positions (in page/buffer) and length */
- ecc = chip->oob_poi +
- (lt->full_chunk_cnt * lt->spare_bytes) +
- lt->last_spare_bytes +
- (chunk * ALIGN(lt->ecc_bytes, 32));
- ecc_offset_in_page =
- (chunk * (lt->data_bytes + lt->spare_bytes +
- lt->ecc_bytes)) +
- (chunk < lt->full_chunk_cnt ?
- lt->data_bytes + lt->spare_bytes :
- lt->last_data_bytes + lt->last_spare_bytes);
- ecc_len = chunk < lt->full_chunk_cnt ?
- lt->ecc_bytes : lt->last_ecc_bytes;
-
- /* Do the actual raw read of the ECC bytes */
- nand_change_read_column_op(chip, ecc_offset_in_page,
- ecc, ecc_len, false);
-
- /* Derive data/spare bytes positions (in buffer) and length */
- data = buf + (chunk * lt->data_bytes);
- data_len = chunk < lt->full_chunk_cnt ?
- lt->data_bytes : lt->last_data_bytes;
- spare = chip->oob_poi + (chunk * (lt->spare_bytes +
- lt->ecc_bytes));
- spare_len = chunk < lt->full_chunk_cnt ?
- lt->spare_bytes : lt->last_spare_bytes;
+ data_off_in_page = chunk * (lt->data_bytes + lt->spare_bytes +
+ lt->ecc_bytes);
+ spare_off_in_page = data_off_in_page +
+ (chunk < lt->full_chunk_cnt ? lt->data_bytes :
+ lt->last_data_bytes);
+ ecc_off_in_page = spare_off_in_page +
+ (chunk < lt->full_chunk_cnt ? lt->spare_bytes :
+ lt->last_spare_bytes);
+
+ data_off = chunk * lt->data_bytes;
+ spare_off = chunk * lt->spare_bytes;
+ ecc_off = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes +
+ (chunk * (lt->ecc_bytes + 2));
+
+ data_len = chunk < lt->full_chunk_cnt ? lt->data_bytes :
+ lt->last_data_bytes;
+ spare_len = chunk < lt->full_chunk_cnt ? lt->spare_bytes :
+ lt->last_spare_bytes;
+ ecc_len = chunk < lt->full_chunk_cnt ? lt->ecc_bytes :
+ lt->last_ecc_bytes;
+
+ /*
+ * Only re-read the ECC bytes, unless we are using the 2k/8b
+ * layout which is buggy in the sense that the ECC engine will
+ * try to correct data bytes anyway, creating bitflips. In this
+ * case, re-read the entire page.
+ */
+ if (lt->writesize == 2048 && lt->strength == 8) {
+ nand_change_read_column_op(chip, data_off_in_page,
+ buf + data_off, data_len,
+ false);
+ nand_change_read_column_op(chip, spare_off_in_page,
+ chip->oob_poi + spare_off, spare_len,
+ false);
+ }
+
+ nand_change_read_column_op(chip, ecc_off_in_page,
+ chip->oob_poi + ecc_off, ecc_len,
+ false);
/* Check the entire chunk (data + spare + ecc) for emptyness */
- marvell_nfc_check_empty_chunk(chip, data, data_len, spare,
- spare_len, ecc, ecc_len,
+ marvell_nfc_check_empty_chunk(chip, buf + data_off, data_len,
+ chip->oob_poi + spare_off, spare_len,
+ chip->oob_poi + ecc_off, ecc_len,
&max_bitflips);
}
return max_bitflips;
}
-static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct mtd_info *mtd,
- struct nand_chip *chip, int page)
+static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct nand_chip *chip, int page)
{
/* Invalidate page cache */
chip->pagebuf = -1;
- return chip->ecc.read_page_raw(mtd, chip, chip->data_buf, true, page);
+ return chip->ecc.read_page_raw(chip, chip->data_buf, true, page);
}
-static int marvell_nfc_hw_ecc_bch_read_oob(struct mtd_info *mtd,
- struct nand_chip *chip, int page)
+static int marvell_nfc_hw_ecc_bch_read_oob(struct nand_chip *chip, int page)
{
/* Invalidate page cache */
chip->pagebuf = -1;
- return chip->ecc.read_page(mtd, chip, chip->data_buf, true, page);
+ return chip->ecc.read_page(chip, chip->data_buf, true, page);
}
/* BCH write helpers */
-static int marvell_nfc_hw_ecc_bch_write_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int marvell_nfc_hw_ecc_bch_write_page_raw(struct nand_chip *chip,
const u8 *buf,
int oob_required, int page)
{
@@ -1458,11 +1547,11 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
return 0;
}
-static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
const u8 *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
const u8 *data = buf;
const u8 *spare = chip->oob_poi;
@@ -1507,27 +1596,29 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
return 0;
}
-static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct nand_chip *chip,
int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
/* Invalidate page cache */
chip->pagebuf = -1;
memset(chip->data_buf, 0xFF, mtd->writesize);
- return chip->ecc.write_page_raw(mtd, chip, chip->data_buf, true, page);
+ return chip->ecc.write_page_raw(chip, chip->data_buf, true, page);
}
-static int marvell_nfc_hw_ecc_bch_write_oob(struct mtd_info *mtd,
- struct nand_chip *chip, int page)
+static int marvell_nfc_hw_ecc_bch_write_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
/* Invalidate page cache */
chip->pagebuf = -1;
memset(chip->data_buf, 0xFF, mtd->writesize);
- return chip->ecc.write_page(mtd, chip, chip->data_buf, true, page);
+ return chip->ecc.write_page(chip, chip->data_buf, true, page);
}
/* NAND framework ->exec_op() hooks and related helpers */
@@ -1547,7 +1638,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
for (op_id = 0; op_id < subop->ninstrs; op_id++) {
unsigned int offset, naddrs;
const u8 *addrs;
- int len = nand_subop_get_data_len(subop, op_id);
+ int len;
instr = &subop->instrs[op_id];
@@ -1593,6 +1684,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
nfc_op->ndcb[0] |=
NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
NDCB0_LEN_OVRD;
+ len = nand_subop_get_data_len(subop, op_id);
nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
}
nfc_op->data_delay_ns = instr->delay_ns;
@@ -1606,6 +1698,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
nfc_op->ndcb[0] |=
NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
NDCB0_LEN_OVRD;
+ len = nand_subop_get_data_len(subop, op_id);
nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
}
nfc_op->data_delay_ns = instr->delay_ns;
@@ -2095,6 +2188,16 @@ static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
return -ENOTSUPP;
}
+ /* Special care for the layout 2k/8-bit/512B */
+ if (l->writesize == 2048 && l->strength == 8) {
+ if (mtd->oobsize < 128) {
+ dev_err(nfc->dev, "Requested layout needs at least 128 OOB bytes\n");
+ return -ENOTSUPP;
+ } else {
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ }
+ }
+
mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
ecc->steps = l->nchunks;
ecc->size = l->data_bytes;
@@ -2190,11 +2293,10 @@ static struct nand_bbt_descr bbt_mirror_descr = {
.pattern = bbt_mirror_pattern
};
-static int marvell_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr,
+static int marvell_nfc_setup_data_interface(struct nand_chip *chip, int chipnr,
const struct nand_data_interface
*conf)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
unsigned int period_ns = 1000000000 / clk_get_rate(nfc->core_clk) * 2;
@@ -2538,7 +2640,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
chip->options |= NAND_BUSWIDTH_AUTO;
- ret = nand_scan(mtd, marvell_nand->nsels);
+ ret = nand_scan(chip, marvell_nand->nsels);
if (ret) {
dev_err(dev, "could not scan the nand chip\n");
return ret;
@@ -2551,7 +2653,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register mtd device: %d\n", ret);
- nand_release(mtd);
+ nand_release(chip);
return ret;
}
@@ -2606,7 +2708,7 @@ static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
struct marvell_nand_chip *entry, *temp;
list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
- nand_release(nand_to_mtd(&entry->chip));
+ nand_release(&entry->chip);
list_del(&entry->node);
}
}
@@ -2697,24 +2799,23 @@ static int marvell_nfc_init(struct marvell_nfc *nfc)
struct regmap *sysctrl_base =
syscon_regmap_lookup_by_phandle(np,
"marvell,system-controller");
- u32 reg;
if (IS_ERR(sysctrl_base))
return PTR_ERR(sysctrl_base);
- reg = GENCONF_SOC_DEVICE_MUX_NFC_EN |
- GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
- GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
- GENCONF_SOC_DEVICE_MUX_NFC_INT_EN;
- regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX,
+ GENCONF_SOC_DEVICE_MUX_NFC_EN |
+ GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
+ GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
+ GENCONF_SOC_DEVICE_MUX_NFC_INT_EN);
- regmap_read(sysctrl_base, GENCONF_CLK_GATING_CTRL, &reg);
- reg |= GENCONF_CLK_GATING_CTRL_ND_GATE;
- regmap_write(sysctrl_base, GENCONF_CLK_GATING_CTRL, reg);
+ regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL,
+ GENCONF_CLK_GATING_CTRL_ND_GATE,
+ GENCONF_CLK_GATING_CTRL_ND_GATE);
- regmap_read(sysctrl_base, GENCONF_ND_CLK_CTRL, &reg);
- reg |= GENCONF_ND_CLK_CTRL_EN;
- regmap_write(sysctrl_base, GENCONF_ND_CLK_CTRL, reg);
+ regmap_update_bits(sysctrl_base, GENCONF_ND_CLK_CTRL,
+ GENCONF_ND_CLK_CTRL_EN,
+ GENCONF_ND_CLK_CTRL_EN);
}
/* Configure the DMA if appropriate */
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index 6d1740d54e0d..86a0aabe08df 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -263,8 +263,10 @@ static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
}
/* Control chip select signals */
-static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
+static void mpc5121_nfc_select_chip(struct nand_chip *nand, int chip)
{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+
if (chip < 0) {
nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
return;
@@ -299,9 +301,9 @@ static int ads5121_chipselect_init(struct mtd_info *mtd)
}
/* Control chips select signal on ADS5121 board */
-static void ads5121_select_chip(struct mtd_info *mtd, int chip)
+static void ads5121_select_chip(struct nand_chip *nand, int chip)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
u8 v;
@@ -309,16 +311,16 @@ static void ads5121_select_chip(struct mtd_info *mtd, int chip)
v |= 0x0F;
if (chip >= 0) {
- mpc5121_nfc_select_chip(mtd, 0);
+ mpc5121_nfc_select_chip(nand, 0);
v &= ~(1 << chip);
} else
- mpc5121_nfc_select_chip(mtd, -1);
+ mpc5121_nfc_select_chip(nand, -1);
out_8(prv->csreg, v);
}
/* Read NAND Ready/Busy signal */
-static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
+static int mpc5121_nfc_dev_ready(struct nand_chip *nand)
{
/*
* NFC handles ready/busy signal internally. Therefore, this function
@@ -328,10 +330,10 @@ static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
}
/* Write command to NAND flash */
-static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
- int column, int page)
+static void mpc5121_nfc_command(struct nand_chip *chip, unsigned command,
+ int column, int page)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
prv->column = (column >= 0) ? column : 0;
@@ -362,7 +364,7 @@ static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
break;
case NAND_CMD_SEQIN:
- mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
+ mpc5121_nfc_command(chip, NAND_CMD_READ0, column, page);
column = 0;
break;
@@ -493,34 +495,24 @@ static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
}
/* Read data from NFC buffers */
-static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void mpc5121_nfc_read_buf(struct nand_chip *chip, u_char *buf, int len)
{
- mpc5121_nfc_buf_copy(mtd, buf, len, 0);
+ mpc5121_nfc_buf_copy(nand_to_mtd(chip), buf, len, 0);
}
/* Write data to NFC buffers */
-static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
- const u_char *buf, int len)
+static void mpc5121_nfc_write_buf(struct nand_chip *chip, const u_char *buf,
+ int len)
{
- mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
+ mpc5121_nfc_buf_copy(nand_to_mtd(chip), (u_char *)buf, len, 1);
}
/* Read byte from NFC buffers */
-static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
+static u8 mpc5121_nfc_read_byte(struct nand_chip *chip)
{
u8 tmp;
- mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
-
- return tmp;
-}
-
-/* Read word from NFC buffers */
-static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
-{
- u16 tmp;
-
- mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
+ mpc5121_nfc_read_buf(chip, &tmp, sizeof(tmp));
return tmp;
}
@@ -700,15 +692,14 @@ static int mpc5121_nfc_probe(struct platform_device *op)
}
mtd->name = "MPC5121 NAND";
- chip->dev_ready = mpc5121_nfc_dev_ready;
- chip->cmdfunc = mpc5121_nfc_command;
- chip->read_byte = mpc5121_nfc_read_byte;
- chip->read_word = mpc5121_nfc_read_word;
- chip->read_buf = mpc5121_nfc_read_buf;
- chip->write_buf = mpc5121_nfc_write_buf;
+ chip->legacy.dev_ready = mpc5121_nfc_dev_ready;
+ chip->legacy.cmdfunc = mpc5121_nfc_command;
+ chip->legacy.read_byte = mpc5121_nfc_read_byte;
+ chip->legacy.read_buf = mpc5121_nfc_read_buf;
+ chip->legacy.write_buf = mpc5121_nfc_write_buf;
chip->select_chip = mpc5121_nfc_select_chip;
- chip->set_features = nand_get_set_features_notsupp;
- chip->get_features = nand_get_set_features_notsupp;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->ecc.mode = NAND_ECC_SOFT;
chip->ecc.algo = NAND_ECC_HAMMING;
@@ -778,7 +769,7 @@ static int mpc5121_nfc_probe(struct platform_device *op)
}
/* Detect NAND chips */
- retval = nand_scan(mtd, be32_to_cpup(chips_no));
+ retval = nand_scan(chip, be32_to_cpup(chips_no));
if (retval) {
dev_err(dev, "NAND Flash not found !\n");
goto error;
@@ -828,7 +819,7 @@ static int mpc5121_nfc_remove(struct platform_device *op)
struct device *dev = &op->dev;
struct mtd_info *mtd = dev_get_drvdata(dev);
- nand_release(mtd);
+ nand_release(mtd_to_nand(mtd));
mpc5121_nfc_free(dev, mtd);
return 0;
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 57b5ed1699e3..2bb0df1b7244 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -389,23 +389,22 @@ static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
return 0;
}
-static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip)
+static void mtk_nfc_select_chip(struct nand_chip *nand, int chip)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct mtk_nfc *nfc = nand_get_controller_data(nand);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
if (chip < 0)
return;
- mtk_nfc_hw_runtime_config(mtd);
+ mtk_nfc_hw_runtime_config(nand_to_mtd(nand));
nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
}
-static int mtk_nfc_dev_ready(struct mtd_info *mtd)
+static int mtk_nfc_dev_ready(struct nand_chip *nand)
{
- struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
return 0;
@@ -413,9 +412,10 @@ static int mtk_nfc_dev_ready(struct mtd_info *mtd)
return 1;
}
-static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+static void mtk_nfc_cmd_ctrl(struct nand_chip *chip, int dat,
+ unsigned int ctrl)
{
- struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
if (ctrl & NAND_ALE) {
mtk_nfc_send_address(nfc, dat);
@@ -438,9 +438,8 @@ static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
dev_err(nfc->dev, "data not ready\n");
}
-static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
+static inline u8 mtk_nfc_read_byte(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
u32 reg;
@@ -467,17 +466,17 @@ static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
return nfi_readb(nfc, NFI_DATAR);
}
-static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+static void mtk_nfc_read_buf(struct nand_chip *chip, u8 *buf, int len)
{
int i;
for (i = 0; i < len; i++)
- buf[i] = mtk_nfc_read_byte(mtd);
+ buf[i] = mtk_nfc_read_byte(chip);
}
-static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
+static void mtk_nfc_write_byte(struct nand_chip *chip, u8 byte)
{
- struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
u32 reg;
reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
@@ -496,18 +495,18 @@ static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
nfi_writeb(nfc, byte, NFI_DATAW);
}
-static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+static void mtk_nfc_write_buf(struct nand_chip *chip, const u8 *buf, int len)
{
int i;
for (i = 0; i < len; i++)
- mtk_nfc_write_byte(mtd, buf[i]);
+ mtk_nfc_write_byte(chip, buf[i]);
}
-static int mtk_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
+static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
const struct nand_data_interface *conf)
{
- struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
const struct nand_sdr_timings *timings;
u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt;
@@ -807,27 +806,27 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
return nand_prog_page_end_op(chip);
}
-static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const u8 *buf,
+static int mtk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
int oob_on, int page)
{
- return mtk_nfc_write_page(mtd, chip, buf, page, 0);
+ return mtk_nfc_write_page(nand_to_mtd(chip), chip, buf, page, 0);
}
-static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const u8 *buf, int oob_on, int pg)
+static int mtk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_on, int pg)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
mtk_nfc_format_page(mtd, buf);
return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
}
-static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, u32 offset,
+static int mtk_nfc_write_subpage_hwecc(struct nand_chip *chip, u32 offset,
u32 data_len, const u8 *buf,
int oob_on, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
int ret;
@@ -839,10 +838,9 @@ static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
}
-static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int mtk_nfc_write_oob_std(struct nand_chip *chip, int page)
{
- return mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
+ return mtk_nfc_write_page_raw(chip, NULL, 1, page);
}
static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
@@ -969,23 +967,25 @@ done:
return bitflips;
}
-static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, u32 off,
+static int mtk_nfc_read_subpage_hwecc(struct nand_chip *chip, u32 off,
u32 len, u8 *p, int pg)
{
- return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0);
+ return mtk_nfc_read_subpage(nand_to_mtd(chip), chip, off, len, p, pg,
+ 0);
}
-static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, u8 *p,
- int oob_on, int pg)
+static int mtk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *p, int oob_on,
+ int pg)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
}
-static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- u8 *buf, int oob_on, int page)
+static int mtk_nfc_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_on,
+ int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
@@ -1011,10 +1011,9 @@ static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
return ret;
}
-static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int mtk_nfc_read_oob_std(struct nand_chip *chip, int page)
{
- return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
+ return mtk_nfc_read_page_raw(chip, NULL, 1, page);
}
static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
@@ -1333,13 +1332,13 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
nand_set_controller_data(nand, nfc);
nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
- nand->dev_ready = mtk_nfc_dev_ready;
+ nand->legacy.dev_ready = mtk_nfc_dev_ready;
nand->select_chip = mtk_nfc_select_chip;
- nand->write_byte = mtk_nfc_write_byte;
- nand->write_buf = mtk_nfc_write_buf;
- nand->read_byte = mtk_nfc_read_byte;
- nand->read_buf = mtk_nfc_read_buf;
- nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
+ nand->legacy.write_byte = mtk_nfc_write_byte;
+ nand->legacy.write_buf = mtk_nfc_write_buf;
+ nand->legacy.read_byte = mtk_nfc_read_byte;
+ nand->legacy.read_buf = mtk_nfc_read_buf;
+ nand->legacy.cmd_ctrl = mtk_nfc_cmd_ctrl;
nand->setup_data_interface = mtk_nfc_setup_data_interface;
/* set default mode in case dt entry is missing */
@@ -1365,14 +1364,14 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
mtk_nfc_hw_init(nfc);
- ret = nand_scan(mtd, nsels);
+ ret = nand_scan(nand, nsels);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "mtd parse partition error\n");
- nand_release(mtd);
+ nand_release(nand);
return ret;
}
@@ -1538,7 +1537,7 @@ static int mtk_nfc_remove(struct platform_device *pdev)
while (!list_empty(&nfc->chips)) {
chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
node);
- nand_release(nand_to_mtd(&chip->nand));
+ nand_release(&chip->nand);
list_del(&chip->node);
}
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 4c9214dea424..88bd3f6a499c 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -136,8 +136,8 @@ struct mxc_nand_devtype_data {
void (*irq_control)(struct mxc_nand_host *, int);
u32 (*get_ecc_status)(struct mxc_nand_host *);
const struct mtd_ooblayout_ops *ooblayout;
- void (*select_chip)(struct mtd_info *mtd, int chip);
- int (*setup_data_interface)(struct mtd_info *mtd, int csline,
+ void (*select_chip)(struct nand_chip *chip, int cs);
+ int (*setup_data_interface)(struct nand_chip *chip, int csline,
const struct nand_data_interface *conf);
void (*enable_hwecc)(struct nand_chip *chip, bool enable);
@@ -701,7 +701,7 @@ static void mxc_nand_enable_hwecc_v3(struct nand_chip *chip, bool enable)
}
/* This functions is used by upper layer to checks if device is ready */
-static int mxc_nand_dev_ready(struct mtd_info *mtd)
+static int mxc_nand_dev_ready(struct nand_chip *chip)
{
/*
* NFC handles R/B internally. Therefore, this function
@@ -816,8 +816,8 @@ static int mxc_nand_read_page_v2_v3(struct nand_chip *chip, void *buf,
return max_bitflips;
}
-static int mxc_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int mxc_nand_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
struct mxc_nand_host *host = nand_get_controller_data(chip);
void *oob_buf;
@@ -830,8 +830,8 @@ static int mxc_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return host->devtype_data->read_page(chip, buf, oob_buf, 1, page);
}
-static int mxc_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int mxc_nand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
struct mxc_nand_host *host = nand_get_controller_data(chip);
void *oob_buf;
@@ -844,8 +844,7 @@ static int mxc_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
return host->devtype_data->read_page(chip, buf, oob_buf, 0, page);
}
-static int mxc_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int mxc_nand_read_oob(struct nand_chip *chip, int page)
{
struct mxc_nand_host *host = nand_get_controller_data(chip);
@@ -874,22 +873,21 @@ static int mxc_nand_write_page(struct nand_chip *chip, const uint8_t *buf,
return 0;
}
-static int mxc_nand_write_page_ecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
+static int mxc_nand_write_page_ecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
return mxc_nand_write_page(chip, buf, true, page);
}
-static int mxc_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+static int mxc_nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
return mxc_nand_write_page(chip, buf, false, page);
}
-static int mxc_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int mxc_nand_write_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct mxc_nand_host *host = nand_get_controller_data(chip);
memset(host->data_buf, 0xff, mtd->writesize);
@@ -897,9 +895,8 @@ static int mxc_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
return mxc_nand_write_page(chip, host->data_buf, false, page);
}
-static u_char mxc_nand_read_byte(struct mtd_info *mtd)
+static u_char mxc_nand_read_byte(struct nand_chip *nand_chip)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
uint8_t ret;
@@ -921,25 +918,13 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
return ret;
}
-static uint16_t mxc_nand_read_word(struct mtd_info *mtd)
-{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
- uint16_t ret;
-
- ret = *(uint16_t *)(host->data_buf + host->buf_start);
- host->buf_start += 2;
-
- return ret;
-}
-
/* Write data of length len to buffer buf. The data to be
* written on NAND Flash is first copied to RAMbuffer. After the Data Input
* Operation by the NFC, the data is written to NAND Flash */
-static void mxc_nand_write_buf(struct mtd_info *mtd,
- const u_char *buf, int len)
+static void mxc_nand_write_buf(struct nand_chip *nand_chip, const u_char *buf,
+ int len)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
u16 col = host->buf_start;
int n = mtd->oobsize + mtd->writesize - col;
@@ -955,9 +940,10 @@ static void mxc_nand_write_buf(struct mtd_info *mtd,
* Flash first the data output cycle is initiated by the NFC, which copies
* the data to RAMbuffer. This data of length len is then copied to buffer buf.
*/
-static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void mxc_nand_read_buf(struct nand_chip *nand_chip, u_char *buf,
+ int len)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
u16 col = host->buf_start;
int n = mtd->oobsize + mtd->writesize - col;
@@ -971,9 +957,8 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
/* This function is used by upper layer for select and
* deselect of the NAND chip */
-static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
+static void mxc_nand_select_chip_v1_v3(struct nand_chip *nand_chip, int chip)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
if (chip == -1) {
@@ -992,9 +977,8 @@ static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
}
}
-static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
+static void mxc_nand_select_chip_v2(struct nand_chip *nand_chip, int chip)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
if (chip == -1) {
@@ -1155,11 +1139,10 @@ static void preset_v1(struct mtd_info *mtd)
writew(0x4, NFC_V1_V2_WRPROT);
}
-static int mxc_nand_v2_setup_data_interface(struct mtd_info *mtd, int csline,
+static int mxc_nand_v2_setup_data_interface(struct nand_chip *chip, int csline,
const struct nand_data_interface *conf)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
int tRC_min_ns, tRC_ps, ret;
unsigned long rate, rate_round;
const struct nand_sdr_timings *timings;
@@ -1349,10 +1332,10 @@ static void preset_v3(struct mtd_info *mtd)
/* Used by the upper layer to write command to NAND Flash for
* different operations to be carried out on NAND Flash */
-static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
- int column, int page_addr)
+static void mxc_nand_command(struct nand_chip *nand_chip, unsigned command,
+ int column, int page_addr)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
@@ -1409,17 +1392,17 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
}
}
-static int mxc_nand_set_features(struct mtd_info *mtd, struct nand_chip *chip,
- int addr, u8 *subfeature_param)
+static int mxc_nand_set_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
int i;
host->buf_start = 0;
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- chip->write_byte(mtd, subfeature_param[i]);
+ chip->legacy.write_byte(chip, subfeature_param[i]);
memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
host->devtype_data->send_cmd(host, NAND_CMD_SET_FEATURES, false);
@@ -1429,11 +1412,11 @@ static int mxc_nand_set_features(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
-static int mxc_nand_get_features(struct mtd_info *mtd, struct nand_chip *chip,
- int addr, u8 *subfeature_param)
+static int mxc_nand_get_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
- struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
int i;
host->devtype_data->send_cmd(host, NAND_CMD_GET_FEATURES, false);
@@ -1443,7 +1426,7 @@ static int mxc_nand_get_features(struct mtd_info *mtd, struct nand_chip *chip,
host->buf_start = 0;
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- *subfeature_param++ = chip->read_byte(mtd);
+ *subfeature_param++ = chip->legacy.read_byte(chip);
return 0;
}
@@ -1786,18 +1769,17 @@ static int mxcnd_probe(struct platform_device *pdev)
mtd->name = DRIVER_NAME;
/* 50 us command delay time */
- this->chip_delay = 5;
+ this->legacy.chip_delay = 5;
nand_set_controller_data(this, host);
nand_set_flash_node(this, pdev->dev.of_node),
- this->dev_ready = mxc_nand_dev_ready;
- this->cmdfunc = mxc_nand_command;
- this->read_byte = mxc_nand_read_byte;
- this->read_word = mxc_nand_read_word;
- this->write_buf = mxc_nand_write_buf;
- this->read_buf = mxc_nand_read_buf;
- this->set_features = mxc_nand_set_features;
- this->get_features = mxc_nand_get_features;
+ this->legacy.dev_ready = mxc_nand_dev_ready;
+ this->legacy.cmdfunc = mxc_nand_command;
+ this->legacy.read_byte = mxc_nand_read_byte;
+ this->legacy.write_buf = mxc_nand_write_buf;
+ this->legacy.read_buf = mxc_nand_read_buf;
+ this->legacy.set_features = mxc_nand_set_features;
+ this->legacy.get_features = mxc_nand_get_features;
host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk))
@@ -1900,7 +1882,7 @@ static int mxcnd_probe(struct platform_device *pdev)
/* Scan the NAND device */
this->dummy_controller.ops = &mxcnd_controller_ops;
- err = nand_scan(mtd, is_imx25_nfc(host) ? 4 : 1);
+ err = nand_scan(this, is_imx25_nfc(host) ? 4 : 1);
if (err)
goto escan;
@@ -1928,7 +1910,7 @@ static int mxcnd_remove(struct platform_device *pdev)
{
struct mxc_nand_host *host = platform_get_drvdata(pdev);
- nand_release(nand_to_mtd(&host->nand));
+ nand_release(&host->nand);
if (host->clk_act)
clk_disable_unprepare(host->clk);
diff --git a/drivers/mtd/nand/raw/nand_amd.c b/drivers/mtd/nand/raw/nand_amd.c
index 22f060f38123..890c5b43e03c 100644
--- a/drivers/mtd/nand/raw/nand_amd.c
+++ b/drivers/mtd/nand/raw/nand_amd.c
@@ -15,7 +15,7 @@
* GNU General Public License for more details.
*/
-#include <linux/mtd/rawnand.h>
+#include "internals.h"
static void amd_nand_decode_id(struct nand_chip *chip)
{
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index d527e448ce19..05bd0779fe9b 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -36,10 +36,8 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/nmi.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/nand_bch.h>
#include <linux/interrupt.h>
@@ -48,6 +46,8 @@
#include <linux/mtd/partitions.h>
#include <linux/of.h>
+#include "internals.h"
+
static int nand_get_device(struct mtd_info *mtd, int new_state);
static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
@@ -253,183 +253,16 @@ static void nand_release_device(struct mtd_info *mtd)
}
/**
- * nand_read_byte - [DEFAULT] read one byte from the chip
- * @mtd: MTD device structure
- *
- * Default read function for 8bit buswidth
- */
-static uint8_t nand_read_byte(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- return readb(chip->IO_ADDR_R);
-}
-
-/**
- * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
- * @mtd: MTD device structure
- *
- * Default read function for 16bit buswidth with endianness conversion.
- *
- */
-static uint8_t nand_read_byte16(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
-}
-
-/**
- * nand_read_word - [DEFAULT] read one word from the chip
- * @mtd: MTD device structure
- *
- * Default read function for 16bit buswidth without endianness conversion.
- */
-static u16 nand_read_word(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- return readw(chip->IO_ADDR_R);
-}
-
-/**
- * nand_select_chip - [DEFAULT] control CE line
- * @mtd: MTD device structure
- * @chipnr: chipnumber to select, -1 for deselect
- *
- * Default select function for 1 chip devices.
- */
-static void nand_select_chip(struct mtd_info *mtd, int chipnr)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
- switch (chipnr) {
- case -1:
- chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
- break;
- case 0:
- break;
-
- default:
- BUG();
- }
-}
-
-/**
- * nand_write_byte - [DEFAULT] write single byte to chip
- * @mtd: MTD device structure
- * @byte: value to write
- *
- * Default function to write a byte to I/O[7:0]
- */
-static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
- chip->write_buf(mtd, &byte, 1);
-}
-
-/**
- * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
- * @mtd: MTD device structure
- * @byte: value to write
- *
- * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
- */
-static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- uint16_t word = byte;
-
- /*
- * It's not entirely clear what should happen to I/O[15:8] when writing
- * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
- *
- * When the host supports a 16-bit bus width, only data is
- * transferred at the 16-bit width. All address and command line
- * transfers shall use only the lower 8-bits of the data bus. During
- * command transfers, the host may place any value on the upper
- * 8-bits of the data bus. During address transfers, the host shall
- * set the upper 8-bits of the data bus to 00h.
- *
- * One user of the write_byte callback is nand_set_features. The
- * four parameters are specified to be written to I/O[7:0], but this is
- * neither an address nor a command transfer. Let's assume a 0 on the
- * upper I/O lines is OK.
- */
- chip->write_buf(mtd, (uint8_t *)&word, 2);
-}
-
-/**
- * nand_write_buf - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- *
- * Default write function for 8bit buswidth.
- */
-static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
- iowrite8_rep(chip->IO_ADDR_W, buf, len);
-}
-
-/**
- * nand_read_buf - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- *
- * Default read function for 8bit buswidth.
- */
-static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
- ioread8_rep(chip->IO_ADDR_R, buf, len);
-}
-
-/**
- * nand_write_buf16 - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- *
- * Default write function for 16bit buswidth.
- */
-static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- u16 *p = (u16 *) buf;
-
- iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
-}
-
-/**
- * nand_read_buf16 - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- *
- * Default read function for 16bit buswidth.
- */
-static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- u16 *p = (u16 *) buf;
-
- ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
-}
-
-/**
* nand_block_bad - [DEFAULT] Read bad block marker from the chip
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @ofs: offset from device start
*
* Check, if the block is bad.
*/
-static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
+static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int page, page_end, res;
- struct nand_chip *chip = mtd_to_nand(mtd);
u8 bad;
if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
@@ -439,7 +272,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
page_end = page + (chip->bbt_options & NAND_BBT_SCAN2NDPAGE ? 2 : 1);
for (; page < page_end; page++) {
- res = chip->ecc.read_oob(mtd, chip, page);
+ res = chip->ecc.read_oob(chip, page);
if (res < 0)
return res;
@@ -458,16 +291,16 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
/**
* nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @ofs: offset from device start
*
* This is the default implementation, which can be overridden by a hardware
* specific driver. It provides the details for writing a bad block marker to a
* block.
*/
-static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct mtd_oob_ops ops;
uint8_t buf[2] = { 0, 0 };
int ret = 0, res, i = 0;
@@ -499,13 +332,34 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
/**
+ * nand_markbad_bbm - mark a block by updating the BBM
+ * @chip: NAND chip object
+ * @ofs: offset of the block to mark bad
+ */
+int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
+{
+ if (chip->legacy.block_markbad)
+ return chip->legacy.block_markbad(chip, ofs);
+
+ return nand_default_block_markbad(chip, ofs);
+}
+
+static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
+{
+ if (chip->legacy.block_bad)
+ return chip->legacy.block_bad(chip, ofs);
+
+ return nand_block_bad(chip, ofs);
+}
+
+/**
* nand_block_markbad_lowlevel - mark a block bad
* @mtd: MTD device structure
* @ofs: offset from device start
*
* This function performs the generic NAND bad block marking steps (i.e., bad
* block table(s) and/or marker(s)). We only allow the hardware driver to
- * specify how to write bad block markers to OOB (chip->block_markbad).
+ * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
*
* We try operations in the following order:
*
@@ -529,17 +383,17 @@ static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
memset(&einfo, 0, sizeof(einfo));
einfo.addr = ofs;
einfo.len = 1ULL << chip->phys_erase_shift;
- nand_erase_nand(mtd, &einfo, 0);
+ nand_erase_nand(chip, &einfo, 0);
/* Write bad block marker to OOB */
nand_get_device(mtd, FL_WRITING);
- ret = chip->block_markbad(mtd, ofs);
+ ret = nand_markbad_bbm(chip, ofs);
nand_release_device(mtd);
}
/* Mark block bad in BBT */
if (chip->bbt) {
- res = nand_markbad_bbt(mtd, ofs);
+ res = nand_markbad_bbt(chip, ofs);
if (!ret)
ret = res;
}
@@ -589,7 +443,7 @@ static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
if (!chip->bbt)
return 0;
/* Return info from the table */
- return nand_isreserved_bbt(mtd, ofs);
+ return nand_isreserved_bbt(chip, ofs);
}
/**
@@ -605,89 +459,14 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
{
struct nand_chip *chip = mtd_to_nand(mtd);
- if (!chip->bbt)
- return chip->block_bad(mtd, ofs);
-
/* Return info from the table */
- return nand_isbad_bbt(mtd, ofs, allowbbt);
-}
+ if (chip->bbt)
+ return nand_isbad_bbt(chip, ofs, allowbbt);
-/**
- * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
- * @mtd: MTD device structure
- * @timeo: Timeout
- *
- * Helper function for nand_wait_ready used when needing to wait in interrupt
- * context.
- */
-static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- int i;
-
- /* Wait for the device to get ready */
- for (i = 0; i < timeo; i++) {
- if (chip->dev_ready(mtd))
- break;
- touch_softlockup_watchdog();
- mdelay(1);
- }
+ return nand_isbad_bbm(chip, ofs);
}
/**
- * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
- * @mtd: MTD device structure
- *
- * Wait for the ready pin after a command, and warn if a timeout occurs.
- */
-void nand_wait_ready(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- unsigned long timeo = 400;
-
- if (in_interrupt() || oops_in_progress)
- return panic_nand_wait_ready(mtd, timeo);
-
- /* Wait until command is processed or timeout occurs */
- timeo = jiffies + msecs_to_jiffies(timeo);
- do {
- if (chip->dev_ready(mtd))
- return;
- cond_resched();
- } while (time_before(jiffies, timeo));
-
- if (!chip->dev_ready(mtd))
- pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
-}
-EXPORT_SYMBOL_GPL(nand_wait_ready);
-
-/**
- * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
- * @mtd: MTD device structure
- * @timeo: Timeout in ms
- *
- * Wait for status ready (i.e. command done) or timeout.
- */
-static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
-{
- register struct nand_chip *chip = mtd_to_nand(mtd);
- int ret;
-
- timeo = jiffies + msecs_to_jiffies(timeo);
- do {
- u8 status;
-
- ret = nand_read_data_op(chip, &status, sizeof(status), true);
- if (ret)
- return;
-
- if (status & NAND_STATUS_READY)
- break;
- touch_softlockup_watchdog();
- } while (time_before(jiffies, timeo));
-};
-
-/**
* nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
* @chip: NAND chip structure
* @timeout_ms: Timeout in ms
@@ -753,273 +532,6 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
/**
- * nand_command - [DEFAULT] Send command to NAND device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
- *
- * Send command to NAND device. This function is used for small page devices
- * (512 Bytes per page).
- */
-static void nand_command(struct mtd_info *mtd, unsigned int command,
- int column, int page_addr)
-{
- register struct nand_chip *chip = mtd_to_nand(mtd);
- int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
-
- /* Write out the command to the device */
- if (command == NAND_CMD_SEQIN) {
- int readcmd;
-
- if (column >= mtd->writesize) {
- /* OOB area */
- column -= mtd->writesize;
- readcmd = NAND_CMD_READOOB;
- } else if (column < 256) {
- /* First 256 bytes --> READ0 */
- readcmd = NAND_CMD_READ0;
- } else {
- column -= 256;
- readcmd = NAND_CMD_READ1;
- }
- chip->cmd_ctrl(mtd, readcmd, ctrl);
- ctrl &= ~NAND_CTRL_CHANGE;
- }
- if (command != NAND_CMD_NONE)
- chip->cmd_ctrl(mtd, command, ctrl);
-
- /* Address cycle, when necessary */
- ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
- /* Serially input address */
- if (column != -1) {
- /* Adjust columns for 16 bit buswidth */
- if (chip->options & NAND_BUSWIDTH_16 &&
- !nand_opcode_8bits(command))
- column >>= 1;
- chip->cmd_ctrl(mtd, column, ctrl);
- ctrl &= ~NAND_CTRL_CHANGE;
- }
- if (page_addr != -1) {
- chip->cmd_ctrl(mtd, page_addr, ctrl);
- ctrl &= ~NAND_CTRL_CHANGE;
- chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
- if (chip->options & NAND_ROW_ADDR_3)
- chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
- }
- chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
-
- /*
- * Program and erase have their own busy handlers status and sequential
- * in needs no delay
- */
- switch (command) {
-
- case NAND_CMD_NONE:
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_SEQIN:
- case NAND_CMD_STATUS:
- case NAND_CMD_READID:
- case NAND_CMD_SET_FEATURES:
- return;
-
- case NAND_CMD_RESET:
- if (chip->dev_ready)
- break;
- udelay(chip->chip_delay);
- chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
- NAND_CTRL_CLE | NAND_CTRL_CHANGE);
- chip->cmd_ctrl(mtd,
- NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
- /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
- nand_wait_status_ready(mtd, 250);
- return;
-
- /* This applies to read commands */
- case NAND_CMD_READ0:
- /*
- * READ0 is sometimes used to exit GET STATUS mode. When this
- * is the case no address cycles are requested, and we can use
- * this information to detect that we should not wait for the
- * device to be ready.
- */
- if (column == -1 && page_addr == -1)
- return;
-
- default:
- /*
- * If we don't have access to the busy pin, we apply the given
- * command delay
- */
- if (!chip->dev_ready) {
- udelay(chip->chip_delay);
- return;
- }
- }
- /*
- * Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine.
- */
- ndelay(100);
-
- nand_wait_ready(mtd);
-}
-
-static void nand_ccs_delay(struct nand_chip *chip)
-{
- /*
- * The controller already takes care of waiting for tCCS when the RNDIN
- * or RNDOUT command is sent, return directly.
- */
- if (!(chip->options & NAND_WAIT_TCCS))
- return;
-
- /*
- * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
- * (which should be safe for all NANDs).
- */
- if (chip->setup_data_interface)
- ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
- else
- ndelay(500);
-}
-
-/**
- * nand_command_lp - [DEFAULT] Send command to NAND large page device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
- *
- * Send command to NAND device. This is the version for the new large page
- * devices. We don't have the separate regions as we have in the small page
- * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
- */
-static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
- int column, int page_addr)
-{
- register struct nand_chip *chip = mtd_to_nand(mtd);
-
- /* Emulate NAND_CMD_READOOB */
- if (command == NAND_CMD_READOOB) {
- column += mtd->writesize;
- command = NAND_CMD_READ0;
- }
-
- /* Command latch cycle */
- if (command != NAND_CMD_NONE)
- chip->cmd_ctrl(mtd, command,
- NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
-
- if (column != -1 || page_addr != -1) {
- int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
-
- /* Serially input address */
- if (column != -1) {
- /* Adjust columns for 16 bit buswidth */
- if (chip->options & NAND_BUSWIDTH_16 &&
- !nand_opcode_8bits(command))
- column >>= 1;
- chip->cmd_ctrl(mtd, column, ctrl);
- ctrl &= ~NAND_CTRL_CHANGE;
-
- /* Only output a single addr cycle for 8bits opcodes. */
- if (!nand_opcode_8bits(command))
- chip->cmd_ctrl(mtd, column >> 8, ctrl);
- }
- if (page_addr != -1) {
- chip->cmd_ctrl(mtd, page_addr, ctrl);
- chip->cmd_ctrl(mtd, page_addr >> 8,
- NAND_NCE | NAND_ALE);
- if (chip->options & NAND_ROW_ADDR_3)
- chip->cmd_ctrl(mtd, page_addr >> 16,
- NAND_NCE | NAND_ALE);
- }
- }
- chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
-
- /*
- * Program and erase have their own busy handlers status, sequential
- * in and status need no delay.
- */
- switch (command) {
-
- case NAND_CMD_NONE:
- case NAND_CMD_CACHEDPROG:
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_SEQIN:
- case NAND_CMD_STATUS:
- case NAND_CMD_READID:
- case NAND_CMD_SET_FEATURES:
- return;
-
- case NAND_CMD_RNDIN:
- nand_ccs_delay(chip);
- return;
-
- case NAND_CMD_RESET:
- if (chip->dev_ready)
- break;
- udelay(chip->chip_delay);
- chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
- NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
- chip->cmd_ctrl(mtd, NAND_CMD_NONE,
- NAND_NCE | NAND_CTRL_CHANGE);
- /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
- nand_wait_status_ready(mtd, 250);
- return;
-
- case NAND_CMD_RNDOUT:
- /* No ready / busy check necessary */
- chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
- NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
- chip->cmd_ctrl(mtd, NAND_CMD_NONE,
- NAND_NCE | NAND_CTRL_CHANGE);
-
- nand_ccs_delay(chip);
- return;
-
- case NAND_CMD_READ0:
- /*
- * READ0 is sometimes used to exit GET STATUS mode. When this
- * is the case no address cycles are requested, and we can use
- * this information to detect that READSTART should not be
- * issued.
- */
- if (column == -1 && page_addr == -1)
- return;
-
- chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
- NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
- chip->cmd_ctrl(mtd, NAND_CMD_NONE,
- NAND_NCE | NAND_CTRL_CHANGE);
-
- /* This applies to read commands */
- default:
- /*
- * If we don't have access to the busy pin, we apply the given
- * command delay.
- */
- if (!chip->dev_ready) {
- udelay(chip->chip_delay);
- return;
- }
- }
-
- /*
- * Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine.
- */
- ndelay(100);
-
- nand_wait_ready(mtd);
-}
-
-/**
* panic_nand_get_device - [GENERIC] Get chip for selected access
* @chip: the nand chip descriptor
* @mtd: MTD device structure
@@ -1086,13 +598,12 @@ retry:
* we are in interrupt context. May happen when in panic and trying to write
* an oops through mtdoops.
*/
-static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
- unsigned long timeo)
+void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
{
int i;
for (i = 0; i < timeo; i++) {
- if (chip->dev_ready) {
- if (chip->dev_ready(mtd))
+ if (chip->legacy.dev_ready) {
+ if (chip->legacy.dev_ready(chip))
break;
} else {
int ret;
@@ -1110,60 +621,6 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
}
}
-/**
- * nand_wait - [DEFAULT] wait until the command is done
- * @mtd: MTD device structure
- * @chip: NAND chip structure
- *
- * Wait for command done. This applies to erase and program only.
- */
-static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
-{
-
- unsigned long timeo = 400;
- u8 status;
- int ret;
-
- /*
- * Apply this short delay always to ensure that we do wait tWB in any
- * case on any machine.
- */
- ndelay(100);
-
- ret = nand_status_op(chip, NULL);
- if (ret)
- return ret;
-
- if (in_interrupt() || oops_in_progress)
- panic_nand_wait(mtd, chip, timeo);
- else {
- timeo = jiffies + msecs_to_jiffies(timeo);
- do {
- if (chip->dev_ready) {
- if (chip->dev_ready(mtd))
- break;
- } else {
- ret = nand_read_data_op(chip, &status,
- sizeof(status), true);
- if (ret)
- return ret;
-
- if (status & NAND_STATUS_READY)
- break;
- }
- cond_resched();
- } while (time_before(jiffies, timeo));
- }
-
- ret = nand_read_data_op(chip, &status, sizeof(status), true);
- if (ret)
- return ret;
-
- /* This can happen if in case of timeout or buggy dev_ready */
- WARN_ON(!(status & NAND_STATUS_READY));
- return status;
-}
-
static bool nand_supports_get_features(struct nand_chip *chip, int addr)
{
return (chip->parameters.supports_set_get_features &&
@@ -1177,48 +634,6 @@ static bool nand_supports_set_features(struct nand_chip *chip, int addr)
}
/**
- * nand_get_features - wrapper to perform a GET_FEATURE
- * @chip: NAND chip info structure
- * @addr: feature address
- * @subfeature_param: the subfeature parameters, a four bytes array
- *
- * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
- * operation cannot be handled.
- */
-int nand_get_features(struct nand_chip *chip, int addr,
- u8 *subfeature_param)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
- if (!nand_supports_get_features(chip, addr))
- return -ENOTSUPP;
-
- return chip->get_features(mtd, chip, addr, subfeature_param);
-}
-EXPORT_SYMBOL_GPL(nand_get_features);
-
-/**
- * nand_set_features - wrapper to perform a SET_FEATURE
- * @chip: NAND chip info structure
- * @addr: feature address
- * @subfeature_param: the subfeature parameters, a four bytes array
- *
- * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
- * operation cannot be handled.
- */
-int nand_set_features(struct nand_chip *chip, int addr,
- u8 *subfeature_param)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
- if (!nand_supports_set_features(chip, addr))
- return -ENOTSUPP;
-
- return chip->set_features(mtd, chip, addr, subfeature_param);
-}
-EXPORT_SYMBOL_GPL(nand_set_features);
-
-/**
* nand_reset_data_interface - Reset data interface and timings
* @chip: The NAND chip
* @chipnr: Internal die id
@@ -1229,7 +644,6 @@ EXPORT_SYMBOL_GPL(nand_set_features);
*/
static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
if (!chip->setup_data_interface)
@@ -1250,7 +664,7 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
*/
onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
- ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
+ ret = chip->setup_data_interface(chip, chipnr, &chip->data_interface);
if (ret)
pr_err("Failed to configure data interface to SDR timing mode 0\n");
@@ -1272,7 +686,6 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
*/
static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
chip->onfi_timing_mode_default,
};
@@ -1283,16 +696,16 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
/* Change the mode on the chip side (if supported by the NAND chip) */
if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
tmode_param);
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
if (ret)
return ret;
}
/* Change the mode on the controller side */
- ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
+ ret = chip->setup_data_interface(chip, chipnr, &chip->data_interface);
if (ret)
return ret;
@@ -1301,10 +714,10 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
return 0;
memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
tmode_param);
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
if (ret)
goto err_reset_chip;
@@ -1322,9 +735,9 @@ err_reset_chip:
* timing mode.
*/
nand_reset_data_interface(chip, chipnr);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
nand_reset_op(chip);
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
return ret;
}
@@ -1345,7 +758,6 @@ err_reset_chip:
*/
static int nand_init_data_interface(struct nand_chip *chip)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
int modes, mode, ret;
if (!chip->setup_data_interface)
@@ -1356,15 +768,15 @@ static int nand_init_data_interface(struct nand_chip *chip)
* if the NAND does not support ONFI, fallback to the default ONFI
* timing mode.
*/
- modes = onfi_get_async_timing_mode(chip);
- if (modes == ONFI_TIMING_MODE_UNKNOWN) {
+ if (chip->parameters.onfi) {
+ modes = chip->parameters.onfi->async_timing_mode;
+ } else {
if (!chip->onfi_timing_mode_default)
return 0;
modes = GENMASK(chip->onfi_timing_mode_default, 0);
}
-
for (mode = fls(modes) - 1; mode >= 0; mode--) {
ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
if (ret)
@@ -1374,7 +786,7 @@ static int nand_init_data_interface(struct nand_chip *chip)
* Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
* controller supports the requested timings.
*/
- ret = chip->setup_data_interface(mtd,
+ ret = chip->setup_data_interface(chip,
NAND_DATA_IFACE_CHECK_ONLY,
&chip->data_interface);
if (!ret) {
@@ -1554,9 +966,9 @@ int nand_read_page_op(struct nand_chip *chip, unsigned int page,
buf, len);
}
- chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
+ chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
if (len)
- chip->read_buf(mtd, buf, len);
+ chip->legacy.read_buf(chip, buf, len);
return 0;
}
@@ -1574,10 +986,9 @@ EXPORT_SYMBOL_GPL(nand_read_page_op);
*
* Returns 0 on success, a negative error code otherwise.
*/
-static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
- unsigned int len)
+int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int i;
u8 *p = buf;
@@ -1603,9 +1014,9 @@ static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
return nand_exec_op(chip, &op);
}
- chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
+ chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
for (i = 0; i < len; i++)
- p[i] = chip->read_byte(mtd);
+ p[i] = chip->legacy.read_byte(chip);
return 0;
}
@@ -1666,9 +1077,9 @@ int nand_change_read_column_op(struct nand_chip *chip,
return nand_exec_op(chip, &op);
}
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
+ chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
if (len)
- chip->read_buf(mtd, buf, len);
+ chip->legacy.read_buf(chip, buf, len);
return 0;
}
@@ -1703,9 +1114,9 @@ int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
mtd->writesize + offset_in_oob,
buf, len);
- chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
+ chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
if (len)
- chip->read_buf(mtd, buf, len);
+ chip->legacy.read_buf(chip, buf, len);
return 0;
}
@@ -1815,10 +1226,10 @@ int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
len, false);
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+ chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
if (buf)
- chip->write_buf(mtd, buf, len);
+ chip->legacy.write_buf(chip, buf, len);
return 0;
}
@@ -1835,7 +1246,6 @@ EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
*/
int nand_prog_page_end_op(struct nand_chip *chip)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
u8 status;
@@ -1857,8 +1267,8 @@ int nand_prog_page_end_op(struct nand_chip *chip)
if (ret)
return ret;
} else {
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- ret = chip->waitfunc(mtd, chip);
+ chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
+ ret = chip->legacy.waitfunc(chip);
if (ret < 0)
return ret;
@@ -1902,10 +1312,11 @@ int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
len, true);
} else {
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
- chip->write_buf(mtd, buf, len);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
+ chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
+ page);
+ chip->legacy.write_buf(chip, buf, len);
+ chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->legacy.waitfunc(chip);
}
if (status & NAND_STATUS_FAIL)
@@ -1970,9 +1381,9 @@ int nand_change_write_column_op(struct nand_chip *chip,
return nand_exec_op(chip, &op);
}
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
+ chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
if (len)
- chip->write_buf(mtd, buf, len);
+ chip->legacy.write_buf(chip, buf, len);
return 0;
}
@@ -1994,7 +1405,6 @@ EXPORT_SYMBOL_GPL(nand_change_write_column_op);
int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
unsigned int len)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int i;
u8 *id = buf;
@@ -2018,10 +1428,10 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
return nand_exec_op(chip, &op);
}
- chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
+ chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
for (i = 0; i < len; i++)
- id[i] = chip->read_byte(mtd);
+ id[i] = chip->legacy.read_byte(chip);
return 0;
}
@@ -2040,8 +1450,6 @@ EXPORT_SYMBOL_GPL(nand_readid_op);
*/
int nand_status_op(struct nand_chip *chip, u8 *status)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
if (chip->exec_op) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
@@ -2058,9 +1466,9 @@ int nand_status_op(struct nand_chip *chip, u8 *status)
return nand_exec_op(chip, &op);
}
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
if (status)
- *status = chip->read_byte(mtd);
+ *status = chip->legacy.read_byte(chip);
return 0;
}
@@ -2079,8 +1487,6 @@ EXPORT_SYMBOL_GPL(nand_status_op);
*/
int nand_exit_status_op(struct nand_chip *chip)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
if (chip->exec_op) {
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READ0, 0),
@@ -2090,11 +1496,10 @@ int nand_exit_status_op(struct nand_chip *chip)
return nand_exec_op(chip, &op);
}
- chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
+ chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
return 0;
}
-EXPORT_SYMBOL_GPL(nand_exit_status_op);
/**
* nand_erase_op - Do an erase operation
@@ -2109,7 +1514,6 @@ EXPORT_SYMBOL_GPL(nand_exit_status_op);
*/
int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int page = eraseblock <<
(chip->phys_erase_shift - chip->page_shift);
int ret;
@@ -2139,10 +1543,10 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
if (ret)
return ret;
} else {
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+ chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
+ chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
- ret = chip->waitfunc(mtd, chip);
+ ret = chip->legacy.waitfunc(chip);
if (ret < 0)
return ret;
@@ -2171,7 +1575,6 @@ EXPORT_SYMBOL_GPL(nand_erase_op);
static int nand_set_features_op(struct nand_chip *chip, u8 feature,
const void *data)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
const u8 *params = data;
int i, ret;
@@ -2190,11 +1593,11 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
return nand_exec_op(chip, &op);
}
- chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
+ chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- chip->write_byte(mtd, params[i]);
+ chip->legacy.write_byte(chip, params[i]);
- ret = chip->waitfunc(mtd, chip);
+ ret = chip->legacy.waitfunc(chip);
if (ret < 0)
return ret;
@@ -2219,7 +1622,6 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
static int nand_get_features_op(struct nand_chip *chip, u8 feature,
void *data)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
u8 *params = data;
int i;
@@ -2239,9 +1641,31 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature,
return nand_exec_op(chip, &op);
}
- chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
+ chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- params[i] = chip->read_byte(mtd);
+ params[i] = chip->legacy.read_byte(chip);
+
+ return 0;
+}
+
+static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
+ unsigned int delay_ns)
+{
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
+ PSEC_TO_NSEC(delay_ns)),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ /* Apply delay or wait for ready/busy pin */
+ if (!chip->legacy.dev_ready)
+ udelay(chip->legacy.chip_delay);
+ else
+ nand_wait_ready(chip);
return 0;
}
@@ -2258,8 +1682,6 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature,
*/
int nand_reset_op(struct nand_chip *chip)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
if (chip->exec_op) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
@@ -2272,7 +1694,7 @@ int nand_reset_op(struct nand_chip *chip)
return nand_exec_op(chip, &op);
}
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
return 0;
}
@@ -2294,8 +1716,6 @@ EXPORT_SYMBOL_GPL(nand_reset_op);
int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
bool force_8bit)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
if (!len || !buf)
return -EINVAL;
@@ -2315,9 +1735,9 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
unsigned int i;
for (i = 0; i < len; i++)
- p[i] = chip->read_byte(mtd);
+ p[i] = chip->legacy.read_byte(chip);
} else {
- chip->read_buf(mtd, buf, len);
+ chip->legacy.read_buf(chip, buf, len);
}
return 0;
@@ -2340,8 +1760,6 @@ EXPORT_SYMBOL_GPL(nand_read_data_op);
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
if (!len || !buf)
return -EINVAL;
@@ -2361,9 +1779,9 @@ int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int i;
for (i = 0; i < len; i++)
- chip->write_byte(mtd, p[i]);
+ chip->legacy.write_byte(chip, p[i]);
} else {
- chip->write_buf(mtd, buf, len);
+ chip->legacy.write_buf(chip, buf, len);
}
return 0;
@@ -2798,7 +2216,6 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
*/
int nand_reset(struct nand_chip *chip, int chipnr)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_data_interface saved_data_intf = chip->data_interface;
int ret;
@@ -2810,9 +2227,9 @@ int nand_reset(struct nand_chip *chip, int chipnr)
* The CS line has to be released before we can apply the new NAND
* interface settings, hence this weird ->select_chip() dance.
*/
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
ret = nand_reset_op(chip);
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
if (ret)
return ret;
@@ -2836,6 +2253,48 @@ int nand_reset(struct nand_chip *chip, int chipnr)
EXPORT_SYMBOL_GPL(nand_reset);
/**
+ * nand_get_features - wrapper to perform a GET_FEATURE
+ * @chip: NAND chip info structure
+ * @addr: feature address
+ * @subfeature_param: the subfeature parameters, a four bytes array
+ *
+ * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
+ * operation cannot be handled.
+ */
+int nand_get_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ if (!nand_supports_get_features(chip, addr))
+ return -ENOTSUPP;
+
+ if (chip->legacy.get_features)
+ return chip->legacy.get_features(chip, addr, subfeature_param);
+
+ return nand_get_features_op(chip, addr, subfeature_param);
+}
+
+/**
+ * nand_set_features - wrapper to perform a SET_FEATURE
+ * @chip: NAND chip info structure
+ * @addr: feature address
+ * @subfeature_param: the subfeature parameters, a four bytes array
+ *
+ * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
+ * operation cannot be handled.
+ */
+int nand_set_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ if (!nand_supports_set_features(chip, addr))
+ return -ENOTSUPP;
+
+ if (chip->legacy.set_features)
+ return chip->legacy.set_features(chip, addr, subfeature_param);
+
+ return nand_set_features_op(chip, addr, subfeature_param);
+}
+
+/**
* nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
* @buf: buffer to test
* @len: buffer length
@@ -2968,7 +2427,6 @@ EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
/**
* nand_read_page_raw_notsupp - dummy read raw page function
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
@@ -2976,16 +2434,14 @@ EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
*
* Returns -ENOTSUPP unconditionally.
*/
-int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
- u8 *buf, int oob_required, int page)
+int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
{
return -ENOTSUPP;
}
-EXPORT_SYMBOL(nand_read_page_raw_notsupp);
/**
* nand_read_page_raw - [INTERN] read raw page data without ecc
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
@@ -2993,9 +2449,10 @@ EXPORT_SYMBOL(nand_read_page_raw_notsupp);
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
-int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
@@ -3015,7 +2472,6 @@ EXPORT_SYMBOL(nand_read_page_raw);
/**
* nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
@@ -3023,10 +2479,10 @@ EXPORT_SYMBOL(nand_read_page_raw);
*
* We need a special oob layout and handling even when OOB isn't used.
*/
-static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf,
+static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
@@ -3080,15 +2536,15 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
/**
* nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*/
-static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
@@ -3097,10 +2553,10 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
- chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
+ chip->ecc.read_page_raw(chip, buf, 1, page);
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
- chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -3113,7 +2569,7 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
- stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
if (stat < 0) {
mtd->ecc_stats.failed++;
} else {
@@ -3126,17 +2582,16 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
/**
* nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @data_offs: offset of requested data within the page
* @readlen: data length
* @bufpoi: buffer to store read data
* @page: page number to read
*/
-static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
- int page)
+static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
+ uint32_t readlen, uint8_t *bufpoi, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int start_step, end_step, num_steps, ret;
uint8_t *p;
int data_col_addr, i, gaps = 0;
@@ -3165,7 +2620,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
/* Calculate ECC */
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
- chip->ecc.calculate(mtd, p, &chip->ecc.calc_buf[i]);
+ chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
/*
* The performance is faster if we position offsets according to
@@ -3214,7 +2669,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
int stat;
- stat = chip->ecc.correct(mtd, p, &chip->ecc.code_buf[i],
+ stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
&chip->ecc.calc_buf[i]);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
@@ -3238,7 +2693,6 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
/**
* nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
@@ -3246,9 +2700,10 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
*
* Not for syndrome calculating ECC controllers which need a special oob layout.
*/
-static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
@@ -3262,13 +2717,13 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
return ret;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
ret = nand_read_data_op(chip, p, eccsize, false);
if (ret)
return ret;
- chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
}
ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
@@ -3286,7 +2741,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
- stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
@@ -3308,7 +2763,6 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
/**
* nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
@@ -3320,9 +2774,10 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
* the data area, by overwriting the NAND manufacturer bad block markings.
*/
-static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
@@ -3348,15 +2803,15 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
- chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
ret = nand_read_data_op(chip, p, eccsize, false);
if (ret)
return ret;
- chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
- stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
@@ -3378,7 +2833,6 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
/**
* nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
@@ -3387,9 +2841,10 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
* The hw generator calculates the error syndrome automatically. Therefore we
* need a special oob layout and handling.
*/
-static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int ret, i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
@@ -3405,7 +2860,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
- chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
ret = nand_read_data_op(chip, p, eccsize, false);
if (ret)
@@ -3420,13 +2875,13 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
oob += chip->ecc.prepad;
}
- chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
+ chip->ecc.hwctl(chip, NAND_ECC_READSYN);
ret = nand_read_data_op(chip, oob, eccbytes, false);
if (ret)
return ret;
- stat = chip->ecc.correct(mtd, p, oob, NULL);
+ stat = chip->ecc.correct(chip, p, oob, NULL);
oob += eccbytes;
@@ -3502,17 +2957,15 @@ static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
/**
* nand_setup_read_retry - [INTERN] Set the READ RETRY mode
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @retry_mode: the retry mode to use
*
* Some vendors supply a special command to shift the Vt threshold, to be used
* when there are too many bitflips in a page (i.e., ECC error). After setting
* a new threshold, the host should retry reading the page.
*/
-static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
+static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
pr_debug("setting READ RETRY mode %d\n", retry_mode);
if (retry_mode >= chip->read_retries)
@@ -3521,7 +2974,18 @@ static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
if (!chip->setup_read_retry)
return -EOPNOTSUPP;
- return chip->setup_read_retry(mtd, retry_mode);
+ return chip->setup_read_retry(chip, retry_mode);
+}
+
+static void nand_wait_readrdy(struct nand_chip *chip)
+{
+ const struct nand_sdr_timings *sdr;
+
+ if (!(chip->options & NAND_NEED_READRDY))
+ return;
+
+ sdr = nand_get_sdr_timings(&chip->data_interface);
+ WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
}
/**
@@ -3549,7 +3013,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
bool ecc_fail = false;
chipnr = (int)(from >> chip->chip_shift);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
realpage = (int)(from >> chip->page_shift);
page = realpage & chip->pagemask;
@@ -3589,16 +3053,15 @@ read_retry:
* the read methods return max bitflips per ecc step.
*/
if (unlikely(ops->mode == MTD_OPS_RAW))
- ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
+ ret = chip->ecc.read_page_raw(chip, bufpoi,
oob_required,
page);
else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
!oob)
- ret = chip->ecc.read_subpage(mtd, chip,
- col, bytes, bufpoi,
- page);
+ ret = chip->ecc.read_subpage(chip, col, bytes,
+ bufpoi, page);
else
- ret = chip->ecc.read_page(mtd, chip, bufpoi,
+ ret = chip->ecc.read_page(chip, bufpoi,
oob_required, page);
if (ret < 0) {
if (use_bufpoi)
@@ -3631,18 +3094,12 @@ read_retry:
}
}
- if (chip->options & NAND_NEED_READRDY) {
- /* Apply delay or wait for ready/busy pin */
- if (!chip->dev_ready)
- udelay(chip->chip_delay);
- else
- nand_wait_ready(mtd);
- }
+ nand_wait_readrdy(chip);
if (mtd->ecc_stats.failed - ecc_failures) {
if (retry_mode + 1 < chip->read_retries) {
retry_mode++;
- ret = nand_setup_read_retry(mtd,
+ ret = nand_setup_read_retry(chip,
retry_mode);
if (ret < 0)
break;
@@ -3669,7 +3126,7 @@ read_retry:
/* Reset to retry mode 0 */
if (retry_mode) {
- ret = nand_setup_read_retry(mtd, 0);
+ ret = nand_setup_read_retry(chip, 0);
if (ret < 0)
break;
retry_mode = 0;
@@ -3687,11 +3144,11 @@ read_retry:
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
- chip->select_chip(mtd, -1);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, -1);
+ chip->select_chip(chip, chipnr);
}
}
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
ops->retlen = ops->len - (size_t) readlen;
if (oob)
@@ -3708,12 +3165,13 @@ read_retry:
/**
* nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @page: page number to read
*/
-int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
+int nand_read_oob_std(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
EXPORT_SYMBOL(nand_read_oob_std);
@@ -3721,13 +3179,12 @@ EXPORT_SYMBOL(nand_read_oob_std);
/**
* nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
* with syndromes
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @page: page number to read
*/
-int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int length = mtd->oobsize;
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size;
@@ -3772,16 +3229,16 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
-EXPORT_SYMBOL(nand_read_oob_syndrome);
/**
* nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @page: page number to write
*/
-int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
+int nand_write_oob_std(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
mtd->oobsize);
}
@@ -3790,13 +3247,12 @@ EXPORT_SYMBOL(nand_write_oob_std);
/**
* nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
* with syndrome - only for large page flash
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @page: page number to write
*/
-int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size, length = mtd->oobsize;
int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
@@ -3860,7 +3316,6 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
return nand_prog_page_end_op(chip);
}
-EXPORT_SYMBOL(nand_write_oob_syndrome);
/**
* nand_do_read_oob - [INTERN] NAND read out-of-band
@@ -3890,7 +3345,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
len = mtd_oobavail(mtd, ops);
chipnr = (int)(from >> chip->chip_shift);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
/* Shift to get page */
realpage = (int)(from >> chip->page_shift);
@@ -3898,9 +3353,9 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
while (1) {
if (ops->mode == MTD_OPS_RAW)
- ret = chip->ecc.read_oob_raw(mtd, chip, page);
+ ret = chip->ecc.read_oob_raw(chip, page);
else
- ret = chip->ecc.read_oob(mtd, chip, page);
+ ret = chip->ecc.read_oob(chip, page);
if (ret < 0)
break;
@@ -3908,13 +3363,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
len = min(len, readlen);
buf = nand_transfer_oob(mtd, buf, ops, len);
- if (chip->options & NAND_NEED_READRDY) {
- /* Apply delay or wait for ready/busy pin */
- if (!chip->dev_ready)
- udelay(chip->chip_delay);
- else
- nand_wait_ready(mtd);
- }
+ nand_wait_readrdy(chip);
max_bitflips = max_t(unsigned int, max_bitflips, ret);
@@ -3929,11 +3378,11 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
- chip->select_chip(mtd, -1);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, -1);
+ chip->select_chip(chip, chipnr);
}
}
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
ops->oobretlen = ops->ooblen - readlen;
@@ -3979,7 +3428,6 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
/**
* nand_write_page_raw_notsupp - dummy raw page write function
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
@@ -3987,16 +3435,14 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
*
* Returns -ENOTSUPP unconditionally.
*/
-int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
- const u8 *buf, int oob_required, int page)
+int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
{
return -ENOTSUPP;
}
-EXPORT_SYMBOL(nand_write_page_raw_notsupp);
/**
* nand_write_page_raw - [INTERN] raw page write function
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
@@ -4004,9 +3450,10 @@ EXPORT_SYMBOL(nand_write_page_raw_notsupp);
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
-int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
@@ -4026,7 +3473,6 @@ EXPORT_SYMBOL(nand_write_page_raw);
/**
* nand_write_page_raw_syndrome - [INTERN] raw page write function
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
@@ -4034,11 +3480,11 @@ EXPORT_SYMBOL(nand_write_page_raw);
*
* We need a special oob layout and handling even when ECC isn't checked.
*/
-static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int nand_write_page_raw_syndrome(struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
@@ -4091,16 +3537,15 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
}
/**
* nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*/
-static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
+static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
@@ -4109,28 +3554,27 @@ static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
/* Software ECC calculation */
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
- chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
- return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
+ return chip->ecc.write_page_raw(chip, buf, 1, page);
}
/**
* nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*/
-static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
+static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
@@ -4142,13 +3586,13 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
return ret;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
ret = nand_write_data_op(chip, p, eccsize, false);
if (ret)
return ret;
- chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
}
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
@@ -4166,7 +3610,6 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
/**
* nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @offset: column address of subpage within the page
* @data_len: data length
@@ -4174,11 +3617,11 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*/
-static int nand_write_subpage_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, uint32_t offset,
- uint32_t data_len, const uint8_t *buf,
- int oob_required, int page)
+static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
uint8_t *oob_buf = chip->oob_poi;
uint8_t *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
@@ -4195,7 +3638,7 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
for (step = 0; step < ecc_steps; step++) {
/* configure controller for WRITE access */
- chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* write data (untouched subpages already masked by 0xFF) */
ret = nand_write_data_op(chip, buf, ecc_size, false);
@@ -4206,7 +3649,7 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
if ((step < start_step) || (step > end_step))
memset(ecc_calc, 0xff, ecc_bytes);
else
- chip->ecc.calculate(mtd, buf, ecc_calc);
+ chip->ecc.calculate(chip, buf, ecc_calc);
/* mask OOB of un-touched subpages by padding 0xFF */
/* if oob_required, preserve OOB metadata of written subpage */
@@ -4237,7 +3680,6 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
/**
* nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
@@ -4246,11 +3688,10 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
* The hw generator calculates the error syndrome automatically. Therefore we
* need a special oob layout and handling.
*/
-static int nand_write_page_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
+static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
@@ -4263,7 +3704,7 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
return ret;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
ret = nand_write_data_op(chip, p, eccsize, false);
if (ret)
@@ -4278,7 +3719,7 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
oob += chip->ecc.prepad;
}
- chip->ecc.calculate(mtd, p, oob);
+ chip->ecc.calculate(chip, p, oob);
ret = nand_write_data_op(chip, oob, eccbytes, false);
if (ret)
@@ -4331,14 +3772,13 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
subpage = 0;
if (unlikely(raw))
- status = chip->ecc.write_page_raw(mtd, chip, buf,
- oob_required, page);
+ status = chip->ecc.write_page_raw(chip, buf, oob_required,
+ page);
else if (subpage)
- status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
- buf, oob_required, page);
+ status = chip->ecc.write_subpage(chip, offset, data_len, buf,
+ oob_required, page);
else
- status = chip->ecc.write_page(mtd, chip, buf, oob_required,
- page);
+ status = chip->ecc.write_page(chip, buf, oob_required, page);
if (status < 0)
return status;
@@ -4423,7 +3863,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
column = to & (mtd->writesize - 1);
chipnr = (int)(to >> chip->chip_shift);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
@@ -4499,8 +3939,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
- chip->select_chip(mtd, -1);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, -1);
+ chip->select_chip(chip, chipnr);
}
}
@@ -4509,7 +3949,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
ops->oobretlen = ops->ooblen;
err_out:
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
return ret;
}
@@ -4535,10 +3975,10 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
/* Wait for the device to get ready */
- panic_nand_wait(mtd, chip, 400);
+ panic_nand_wait(chip, 400);
memset(&ops, 0, sizeof(ops));
ops.len = len;
@@ -4587,14 +4027,14 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
*/
nand_reset(chip, chipnr);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
/* Shift to get page */
page = (int)(to >> chip->page_shift);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
return -EROFS;
}
@@ -4605,11 +4045,11 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
if (ops->mode == MTD_OPS_RAW)
- status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
+ status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
else
- status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
+ status = chip->ecc.write_oob(chip, page & chip->pagemask);
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
if (status)
return status;
@@ -4656,14 +4096,13 @@ out:
/**
* single_erase - [GENERIC] NAND standard block erase command function
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @page: the page address of the block which will be erased
*
* Standard erase command for NAND chips. Returns NAND status.
*/
-static int single_erase(struct mtd_info *mtd, int page)
+static int single_erase(struct nand_chip *chip, int page)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
unsigned int eraseblock;
/* Send commands to erase a block */
@@ -4681,22 +4120,22 @@ static int single_erase(struct mtd_info *mtd, int page)
*/
static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
{
- return nand_erase_nand(mtd, instr, 0);
+ return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
}
/**
* nand_erase_nand - [INTERN] erase block(s)
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @instr: erase instruction
* @allowbbt: allow erasing the bbt area
*
* Erase one ore more blocks.
*/
-int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
+int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
int allowbbt)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int page, status, pages_per_block, ret, chipnr;
- struct nand_chip *chip = mtd_to_nand(mtd);
loff_t len;
pr_debug("%s: start = 0x%012llx, len = %llu\n",
@@ -4717,7 +4156,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
/* Select the NAND device */
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
@@ -4748,7 +4187,11 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
(page + pages_per_block))
chip->pagebuf = -1;
- status = chip->erase(mtd, page & chip->pagemask);
+ if (chip->legacy.erase)
+ status = chip->legacy.erase(chip,
+ page & chip->pagemask);
+ else
+ status = single_erase(chip, page & chip->pagemask);
/* See if block erase succeeded */
if (status) {
@@ -4767,8 +4210,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
/* Check, if we cross a chip boundary */
if (len && !(page & chip->pagemask)) {
chipnr++;
- chip->select_chip(mtd, -1);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, -1);
+ chip->select_chip(chip, chipnr);
}
}
@@ -4776,7 +4219,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
erase_exit:
/* Deselect and wake up anyone waiting on the device */
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
nand_release_device(mtd);
/* Return more or less happy */
@@ -4812,11 +4255,11 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
/* Select the NAND device */
nand_get_device(mtd, FL_READING);
- chip->select_chip(mtd, chipnr);
+ chip->select_chip(chip, chipnr);
ret = nand_block_checkbad(mtd, offs, 0);
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
nand_release_device(mtd);
return ret;
@@ -4879,51 +4322,6 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
}
/**
- * nand_default_set_features- [REPLACEABLE] set NAND chip features
- * @mtd: MTD device structure
- * @chip: nand chip info structure
- * @addr: feature address.
- * @subfeature_param: the subfeature parameters, a four bytes array.
- */
-static int nand_default_set_features(struct mtd_info *mtd,
- struct nand_chip *chip, int addr,
- uint8_t *subfeature_param)
-{
- return nand_set_features_op(chip, addr, subfeature_param);
-}
-
-/**
- * nand_default_get_features- [REPLACEABLE] get NAND chip features
- * @mtd: MTD device structure
- * @chip: nand chip info structure
- * @addr: feature address.
- * @subfeature_param: the subfeature parameters, a four bytes array.
- */
-static int nand_default_get_features(struct mtd_info *mtd,
- struct nand_chip *chip, int addr,
- uint8_t *subfeature_param)
-{
- return nand_get_features_op(chip, addr, subfeature_param);
-}
-
-/**
- * nand_get_set_features_notsupp - set/get features stub returning -ENOTSUPP
- * @mtd: MTD device structure
- * @chip: nand chip info structure
- * @addr: feature address.
- * @subfeature_param: the subfeature parameters, a four bytes array.
- *
- * Should be used by NAND controller drivers that do not support the SET/GET
- * FEATURES operations.
- */
-int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
- int addr, u8 *subfeature_param)
-{
- return -ENOTSUPP;
-}
-EXPORT_SYMBOL(nand_get_set_features_notsupp);
-
-/**
* nand_suspend - [MTD Interface] Suspend the NAND flash
* @mtd: MTD device structure
*/
@@ -4960,44 +4358,7 @@ static void nand_shutdown(struct mtd_info *mtd)
/* Set default functions */
static void nand_set_defaults(struct nand_chip *chip)
{
- unsigned int busw = chip->options & NAND_BUSWIDTH_16;
-
- /* check for proper chip_delay setup, set 20us if not */
- if (!chip->chip_delay)
- chip->chip_delay = 20;
-
- /* check, if a user supplied command function given */
- if (!chip->cmdfunc && !chip->exec_op)
- chip->cmdfunc = nand_command;
-
- /* check, if a user supplied wait function given */
- if (chip->waitfunc == NULL)
- chip->waitfunc = nand_wait;
-
- if (!chip->select_chip)
- chip->select_chip = nand_select_chip;
-
- /* set for ONFI nand */
- if (!chip->set_features)
- chip->set_features = nand_default_set_features;
- if (!chip->get_features)
- chip->get_features = nand_default_get_features;
-
- /* If called twice, pointers that depend on busw may need to be reset */
- if (!chip->read_byte || chip->read_byte == nand_read_byte)
- chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
- if (!chip->read_word)
- chip->read_word = nand_read_word;
- if (!chip->block_bad)
- chip->block_bad = nand_block_bad;
- if (!chip->block_markbad)
- chip->block_markbad = nand_default_block_markbad;
- if (!chip->write_buf || chip->write_buf == nand_write_buf)
- chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
- if (!chip->write_byte || chip->write_byte == nand_write_byte)
- chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
- if (!chip->read_buf || chip->read_buf == nand_read_buf)
- chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
+ nand_legacy_set_defaults(chip);
if (!chip->controller) {
chip->controller = &chip->dummy_controller;
@@ -5009,7 +4370,7 @@ static void nand_set_defaults(struct nand_chip *chip)
}
/* Sanitize ONFI strings so we can safely print them */
-static void sanitize_string(uint8_t *s, size_t len)
+void sanitize_string(uint8_t *s, size_t len)
{
ssize_t i;
@@ -5026,390 +4387,6 @@ static void sanitize_string(uint8_t *s, size_t len)
strim(s);
}
-static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
-{
- int i;
- while (len--) {
- crc ^= *p++ << 8;
- for (i = 0; i < 8; i++)
- crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
- }
-
- return crc;
-}
-
-/* Parse the Extended Parameter Page. */
-static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
- struct nand_onfi_params *p)
-{
- struct onfi_ext_param_page *ep;
- struct onfi_ext_section *s;
- struct onfi_ext_ecc_info *ecc;
- uint8_t *cursor;
- int ret;
- int len;
- int i;
-
- len = le16_to_cpu(p->ext_param_page_length) * 16;
- ep = kmalloc(len, GFP_KERNEL);
- if (!ep)
- return -ENOMEM;
-
- /* Send our own NAND_CMD_PARAM. */
- ret = nand_read_param_page_op(chip, 0, NULL, 0);
- if (ret)
- goto ext_out;
-
- /* Use the Change Read Column command to skip the ONFI param pages. */
- ret = nand_change_read_column_op(chip,
- sizeof(*p) * p->num_of_param_pages,
- ep, len, true);
- if (ret)
- goto ext_out;
-
- ret = -EINVAL;
- if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
- != le16_to_cpu(ep->crc))) {
- pr_debug("fail in the CRC.\n");
- goto ext_out;
- }
-
- /*
- * Check the signature.
- * Do not strictly follow the ONFI spec, maybe changed in future.
- */
- if (strncmp(ep->sig, "EPPS", 4)) {
- pr_debug("The signature is invalid.\n");
- goto ext_out;
- }
-
- /* find the ECC section. */
- cursor = (uint8_t *)(ep + 1);
- for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
- s = ep->sections + i;
- if (s->type == ONFI_SECTION_TYPE_2)
- break;
- cursor += s->length * 16;
- }
- if (i == ONFI_EXT_SECTION_MAX) {
- pr_debug("We can not find the ECC section.\n");
- goto ext_out;
- }
-
- /* get the info we want. */
- ecc = (struct onfi_ext_ecc_info *)cursor;
-
- if (!ecc->codeword_size) {
- pr_debug("Invalid codeword size\n");
- goto ext_out;
- }
-
- chip->ecc_strength_ds = ecc->ecc_bits;
- chip->ecc_step_ds = 1 << ecc->codeword_size;
- ret = 0;
-
-ext_out:
- kfree(ep);
- return ret;
-}
-
-/*
- * Recover data with bit-wise majority
- */
-static void nand_bit_wise_majority(const void **srcbufs,
- unsigned int nsrcbufs,
- void *dstbuf,
- unsigned int bufsize)
-{
- int i, j, k;
-
- for (i = 0; i < bufsize; i++) {
- u8 val = 0;
-
- for (j = 0; j < 8; j++) {
- unsigned int cnt = 0;
-
- for (k = 0; k < nsrcbufs; k++) {
- const u8 *srcbuf = srcbufs[k];
-
- if (srcbuf[i] & BIT(j))
- cnt++;
- }
-
- if (cnt > nsrcbufs / 2)
- val |= BIT(j);
- }
-
- ((u8 *)dstbuf)[i] = val;
- }
-}
-
-/*
- * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
- */
-static int nand_flash_detect_onfi(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_onfi_params *p;
- struct onfi_params *onfi;
- int onfi_version = 0;
- char id[4];
- int i, ret, val;
-
- /* Try ONFI for unknown chip or LP */
- ret = nand_readid_op(chip, 0x20, id, sizeof(id));
- if (ret || strncmp(id, "ONFI", 4))
- return 0;
-
- /* ONFI chip: allocate a buffer to hold its parameter page */
- p = kzalloc((sizeof(*p) * 3), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- ret = nand_read_param_page_op(chip, 0, NULL, 0);
- if (ret) {
- ret = 0;
- goto free_onfi_param_page;
- }
-
- for (i = 0; i < 3; i++) {
- ret = nand_read_data_op(chip, &p[i], sizeof(*p), true);
- if (ret) {
- ret = 0;
- goto free_onfi_param_page;
- }
-
- if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) ==
- le16_to_cpu(p->crc)) {
- if (i)
- memcpy(p, &p[i], sizeof(*p));
- break;
- }
- }
-
- if (i == 3) {
- const void *srcbufs[3] = {p, p + 1, p + 2};
-
- pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
- nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p,
- sizeof(*p));
-
- if (onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254) !=
- le16_to_cpu(p->crc)) {
- pr_err("ONFI parameter recovery failed, aborting\n");
- goto free_onfi_param_page;
- }
- }
-
- if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
- chip->manufacturer.desc->ops->fixup_onfi_param_page)
- chip->manufacturer.desc->ops->fixup_onfi_param_page(chip, p);
-
- /* Check version */
- val = le16_to_cpu(p->revision);
- if (val & ONFI_VERSION_2_3)
- onfi_version = 23;
- else if (val & ONFI_VERSION_2_2)
- onfi_version = 22;
- else if (val & ONFI_VERSION_2_1)
- onfi_version = 21;
- else if (val & ONFI_VERSION_2_0)
- onfi_version = 20;
- else if (val & ONFI_VERSION_1_0)
- onfi_version = 10;
-
- if (!onfi_version) {
- pr_info("unsupported ONFI version: %d\n", val);
- goto free_onfi_param_page;
- }
-
- sanitize_string(p->manufacturer, sizeof(p->manufacturer));
- sanitize_string(p->model, sizeof(p->model));
- chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
- if (!chip->parameters.model) {
- ret = -ENOMEM;
- goto free_onfi_param_page;
- }
-
- mtd->writesize = le32_to_cpu(p->byte_per_page);
-
- /*
- * pages_per_block and blocks_per_lun may not be a power-of-2 size
- * (don't ask me who thought of this...). MTD assumes that these
- * dimensions will be power-of-2, so just truncate the remaining area.
- */
- mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
- mtd->erasesize *= mtd->writesize;
-
- mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
-
- /* See erasesize comment */
- chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
- chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
- chip->bits_per_cell = p->bits_per_cell;
-
- chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
- chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
-
- if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS)
- chip->options |= NAND_BUSWIDTH_16;
-
- if (p->ecc_bits != 0xff) {
- chip->ecc_strength_ds = p->ecc_bits;
- chip->ecc_step_ds = 512;
- } else if (onfi_version >= 21 &&
- (le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
-
- /*
- * The nand_flash_detect_ext_param_page() uses the
- * Change Read Column command which maybe not supported
- * by the chip->cmdfunc. So try to update the chip->cmdfunc
- * now. We do not replace user supplied command function.
- */
- if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
- chip->cmdfunc = nand_command_lp;
-
- /* The Extended Parameter Page is supported since ONFI 2.1. */
- if (nand_flash_detect_ext_param_page(chip, p))
- pr_warn("Failed to detect ONFI extended param page\n");
- } else {
- pr_warn("Could not retrieve ONFI ECC requirements\n");
- }
-
- /* Save some parameters from the parameter page for future use */
- if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_SET_GET_FEATURES) {
- chip->parameters.supports_set_get_features = true;
- bitmap_set(chip->parameters.get_feature_list,
- ONFI_FEATURE_ADDR_TIMING_MODE, 1);
- bitmap_set(chip->parameters.set_feature_list,
- ONFI_FEATURE_ADDR_TIMING_MODE, 1);
- }
-
- onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
- if (!onfi) {
- ret = -ENOMEM;
- goto free_model;
- }
-
- onfi->version = onfi_version;
- onfi->tPROG = le16_to_cpu(p->t_prog);
- onfi->tBERS = le16_to_cpu(p->t_bers);
- onfi->tR = le16_to_cpu(p->t_r);
- onfi->tCCS = le16_to_cpu(p->t_ccs);
- onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
- onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
- memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
- chip->parameters.onfi = onfi;
-
- /* Identification done, free the full ONFI parameter page and exit */
- kfree(p);
-
- return 1;
-
-free_model:
- kfree(chip->parameters.model);
-free_onfi_param_page:
- kfree(p);
-
- return ret;
-}
-
-/*
- * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
- */
-static int nand_flash_detect_jedec(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_jedec_params *p;
- struct jedec_ecc_info *ecc;
- int jedec_version = 0;
- char id[5];
- int i, val, ret;
-
- /* Try JEDEC for unknown chip or LP */
- ret = nand_readid_op(chip, 0x40, id, sizeof(id));
- if (ret || strncmp(id, "JEDEC", sizeof(id)))
- return 0;
-
- /* JEDEC chip: allocate a buffer to hold its parameter page */
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
- if (ret) {
- ret = 0;
- goto free_jedec_param_page;
- }
-
- for (i = 0; i < 3; i++) {
- ret = nand_read_data_op(chip, p, sizeof(*p), true);
- if (ret) {
- ret = 0;
- goto free_jedec_param_page;
- }
-
- if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
- le16_to_cpu(p->crc))
- break;
- }
-
- if (i == 3) {
- pr_err("Could not find valid JEDEC parameter page; aborting\n");
- goto free_jedec_param_page;
- }
-
- /* Check version */
- val = le16_to_cpu(p->revision);
- if (val & (1 << 2))
- jedec_version = 10;
- else if (val & (1 << 1))
- jedec_version = 1; /* vendor specific version */
-
- if (!jedec_version) {
- pr_info("unsupported JEDEC version: %d\n", val);
- goto free_jedec_param_page;
- }
-
- sanitize_string(p->manufacturer, sizeof(p->manufacturer));
- sanitize_string(p->model, sizeof(p->model));
- chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
- if (!chip->parameters.model) {
- ret = -ENOMEM;
- goto free_jedec_param_page;
- }
-
- mtd->writesize = le32_to_cpu(p->byte_per_page);
-
- /* Please reference to the comment for nand_flash_detect_onfi. */
- mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
- mtd->erasesize *= mtd->writesize;
-
- mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
-
- /* Please reference to the comment for nand_flash_detect_onfi. */
- chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
- chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
- chip->bits_per_cell = p->bits_per_cell;
-
- if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS)
- chip->options |= NAND_BUSWIDTH_16;
-
- /* ECC info */
- ecc = &p->ecc_info[0];
-
- if (ecc->codeword_size >= 9) {
- chip->ecc_strength_ds = ecc->ecc_bits;
- chip->ecc_step_ds = 1 << ecc->codeword_size;
- } else {
- pr_warn("Invalid codeword size\n");
- }
-
-free_jedec_param_page:
- kfree(p);
- return ret;
-}
-
/*
* nand_id_has_period - Check if an ID string has a given wraparound period
* @id_data: the ID string
@@ -5625,6 +4602,12 @@ static void nand_manufacturer_cleanup(struct nand_chip *chip)
chip->manufacturer.desc->ops->cleanup(chip);
}
+static const char *
+nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
+{
+ return manufacturer ? manufacturer->name : "Unknown";
+}
+
/*
* Get the flash and manufacturer id and lookup if the type is supported.
*/
@@ -5645,7 +4628,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
return ret;
/* Select the device */
- chip->select_chip(mtd, 0);
+ chip->select_chip(chip, 0);
/* Send the command for reading device ID */
ret = nand_readid_op(chip, 0, id_data, 2);
@@ -5709,14 +4692,14 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
if (!type->name || !type->pagesize) {
/* Check if the chip is ONFI compliant */
- ret = nand_flash_detect_onfi(chip);
+ ret = nand_onfi_detect(chip);
if (ret < 0)
return ret;
else if (ret)
goto ident_done;
/* Check if the chip is JEDEC compliant */
- ret = nand_flash_detect_jedec(chip);
+ ret = nand_jedec_detect(chip);
if (ret < 0)
return ret;
else if (ret)
@@ -5783,11 +4766,8 @@ ident_done:
chip->options |= NAND_ROW_ADDR_3;
chip->badblockbits = 8;
- chip->erase = single_erase;
- /* Do not replace user supplied command function! */
- if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
- chip->cmdfunc = nand_command_lp;
+ nand_legacy_adjust_cmdfunc(chip);
pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
maf_id, dev_id);
@@ -5953,7 +4933,7 @@ static int nand_dt_init(struct nand_chip *chip)
/**
* nand_scan_ident - Scan for the NAND device
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @maxchips: number of chips to scan for
* @table: alternative NAND ID table
*
@@ -5965,11 +4945,12 @@ static int nand_dt_init(struct nand_chip *chip)
* prevented dynamic allocations during this phase which was unconvenient and
* as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
*/
-static int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
struct nand_flash_dev *table)
{
- int i, nand_maf_id, nand_dev_id;
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int nand_maf_id, nand_dev_id;
+ unsigned int i;
int ret;
/* Enforce the right timings for reset/detection */
@@ -5982,21 +4963,15 @@ static int nand_scan_ident(struct mtd_info *mtd, int maxchips,
if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent);
- /*
- * ->cmdfunc() is legacy and will only be used if ->exec_op() is not
- * populated.
- */
- if (!chip->exec_op) {
- /*
- * Default functions assigned for ->cmdfunc() and
- * ->select_chip() both expect ->cmd_ctrl() to be populated.
- */
- if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
- pr_err("->cmd_ctrl() should be provided\n");
- return -EINVAL;
- }
+ if (chip->exec_op && !chip->select_chip) {
+ pr_err("->select_chip() is mandatory when implementing ->exec_op()\n");
+ return -EINVAL;
}
+ ret = nand_legacy_check_hooks(chip);
+ if (ret)
+ return ret;
+
/* Set the default functions */
nand_set_defaults(chip);
@@ -6005,14 +4980,14 @@ static int nand_scan_ident(struct mtd_info *mtd, int maxchips,
if (ret) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
pr_warn("No NAND device found\n");
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
return ret;
}
nand_maf_id = chip->id.data[0];
nand_dev_id = chip->id.data[1];
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
@@ -6021,15 +4996,15 @@ static int nand_scan_ident(struct mtd_info *mtd, int maxchips,
/* See comment in nand_get_flash_type for reset */
nand_reset(chip, i);
- chip->select_chip(mtd, i);
+ chip->select_chip(chip, i);
/* Send the command for reading device ID */
nand_readid_op(chip, 0, id, sizeof(id));
/* Read manufacturer and device IDs */
if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
break;
}
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
}
if (i > 1)
pr_info("%d chips detected\n", i);
@@ -6070,6 +5045,10 @@ static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
ecc->size = 256;
ecc->bytes = 3;
ecc->strength = 1;
+
+ if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC))
+ ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
+
return 0;
case NAND_ECC_BCH:
if (!mtd_nand_has_bch()) {
@@ -6423,15 +5402,15 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
/**
* nand_scan_tail - Scan for the NAND device
- * @mtd: MTD device structure
+ * @chip: NAND chip object
*
* This is the second phase of the normal nand_scan() function. It fills out
* all the uninitialized function pointers with the defaults and scans for a
* bad block table if appropriate.
*/
-static int nand_scan_tail(struct mtd_info *mtd)
+static int nand_scan_tail(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i;
@@ -6451,9 +5430,9 @@ static int nand_scan_tail(struct mtd_info *mtd)
* to explictly select the relevant die when interacting with the NAND
* chip.
*/
- chip->select_chip(mtd, 0);
+ chip->select_chip(chip, 0);
ret = nand_manufacturer_init(chip);
- chip->select_chip(mtd, -1);
+ chip->select_chip(chip, -1);
if (ret)
goto err_free_buf;
@@ -6770,33 +5749,31 @@ static void nand_detach(struct nand_chip *chip)
/**
* nand_scan_with_ids - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: number of chips to scan for. @nand_scan_ident() will not be run if
- * this parameter is zero (useful for specific drivers that must
- * handle this part of the process themselves, e.g docg4).
+ * @chip: NAND chip object
+ * @maxchips: number of chips to scan for.
* @ids: optional flash IDs table
*
* This fills out all the uninitialized function pointers with the defaults.
* The flash ID is read and the mtd/chip structures are filled with the
* appropriate values.
*/
-int nand_scan_with_ids(struct mtd_info *mtd, int maxchips,
+int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
struct nand_flash_dev *ids)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
- if (maxchips) {
- ret = nand_scan_ident(mtd, maxchips, ids);
- if (ret)
- return ret;
- }
+ if (!maxchips)
+ return -EINVAL;
+
+ ret = nand_scan_ident(chip, maxchips, ids);
+ if (ret)
+ return ret;
ret = nand_attach(chip);
if (ret)
goto cleanup_ident;
- ret = nand_scan_tail(mtd);
+ ret = nand_scan_tail(chip);
if (ret)
goto detach_chip;
@@ -6847,12 +5824,12 @@ EXPORT_SYMBOL_GPL(nand_cleanup);
/**
* nand_release - [NAND Interface] Unregister the MTD device and free resources
* held by the NAND device
- * @mtd: MTD device structure
+ * @chip: NAND chip object
*/
-void nand_release(struct mtd_info *mtd)
+void nand_release(struct nand_chip *chip)
{
- mtd_device_unregister(mtd);
- nand_cleanup(mtd_to_nand(mtd));
+ mtd_device_unregister(nand_to_mtd(chip));
+ nand_cleanup(chip);
}
EXPORT_SYMBOL_GPL(nand_release);
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index 39db352f8757..98a826838b60 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -61,13 +61,14 @@
#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/bbm.h>
-#include <linux/mtd/rawnand.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <linux/string.h>
+#include "internals.h"
+
#define BBT_BLOCK_GOOD 0x00
#define BBT_BLOCK_WORN 0x01
#define BBT_BLOCK_RESERVED 0x02
@@ -683,14 +684,13 @@ static void mark_bbt_block_bad(struct nand_chip *this,
struct nand_bbt_descr *td,
int chip, int block)
{
- struct mtd_info *mtd = nand_to_mtd(this);
loff_t to;
int res;
bbt_mark_entry(this, block, BBT_BLOCK_WORN);
to = (loff_t)block << this->bbt_erase_shift;
- res = this->block_markbad(mtd, to);
+ res = nand_markbad_bbm(this, to);
if (res)
pr_warn("nand_bbt: error %d while marking block %d bad\n",
res, block);
@@ -854,7 +854,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
memset(&einfo, 0, sizeof(einfo));
einfo.addr = to;
einfo.len = 1 << this->bbt_erase_shift;
- res = nand_erase_nand(mtd, &einfo, 1);
+ res = nand_erase_nand(this, &einfo, 1);
if (res < 0) {
pr_warn("nand_bbt: error while erasing BBT block %d\n",
res);
@@ -1388,12 +1388,11 @@ EXPORT_SYMBOL(nand_create_bbt);
/**
* nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @offs: offset in the device
*/
-int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs)
+int nand_isreserved_bbt(struct nand_chip *this, loff_t offs)
{
- struct nand_chip *this = mtd_to_nand(mtd);
int block;
block = (int)(offs >> this->bbt_erase_shift);
@@ -1402,13 +1401,12 @@ int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs)
/**
* nand_isbad_bbt - [NAND Interface] Check if a block is bad
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @offs: offset in the device
* @allowbbt: allow access to bad block table region
*/
-int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
+int nand_isbad_bbt(struct nand_chip *this, loff_t offs, int allowbbt)
{
- struct nand_chip *this = mtd_to_nand(mtd);
int block, res;
block = (int)(offs >> this->bbt_erase_shift);
@@ -1430,12 +1428,12 @@ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
/**
* nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @offs: offset of the bad block
*/
-int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
+int nand_markbad_bbt(struct nand_chip *this, loff_t offs)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
int block, ret = 0;
block = (int)(offs >> this->bbt_erase_shift);
diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c
index b7387ace567a..574c0ca16160 100644
--- a/drivers/mtd/nand/raw/nand_bch.c
+++ b/drivers/mtd/nand/raw/nand_bch.c
@@ -43,14 +43,13 @@ struct nand_bch_control {
/**
* nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block
- * @mtd: MTD block structure
+ * @chip: NAND chip object
* @buf: input buffer with raw data
* @code: output buffer with ECC
*/
-int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+int nand_bch_calculate_ecc(struct nand_chip *chip, const unsigned char *buf,
unsigned char *code)
{
- const struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_bch_control *nbc = chip->ecc.priv;
unsigned int i;
@@ -67,17 +66,16 @@ EXPORT_SYMBOL(nand_bch_calculate_ecc);
/**
* nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @mtd: MTD block structure
+ * @chip: NAND chip object
* @buf: raw data read from the chip
* @read_ecc: ECC from the chip
* @calc_ecc: the ECC calculated from raw data
*
* Detect and correct bit errors for a data byte block
*/
-int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
+int nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
- const struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_bch_control *nbc = chip->ecc.priv;
unsigned int *errloc = nbc->errloc;
int i, count;
diff --git a/drivers/mtd/nand/raw/nand_ecc.c b/drivers/mtd/nand/raw/nand_ecc.c
index 8e132edbc5ce..4f4347533058 100644
--- a/drivers/mtd/nand/raw/nand_ecc.c
+++ b/drivers/mtd/nand/raw/nand_ecc.c
@@ -132,9 +132,10 @@ static const char addressbits[256] = {
* @buf: input buffer with raw data
* @eccsize: data bytes per ECC step (256 or 512)
* @code: output buffer with ECC
+ * @sm_order: Smart Media byte ordering
*/
void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
- unsigned char *code)
+ unsigned char *code, bool sm_order)
{
int i;
const uint32_t *bp = (uint32_t *)buf;
@@ -330,45 +331,26 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
* possible, but benchmarks showed that on the system this is developed
* the code below is the fastest
*/
-#ifdef CONFIG_MTD_NAND_ECC_SMC
- code[0] =
- (invparity[rp7] << 7) |
- (invparity[rp6] << 6) |
- (invparity[rp5] << 5) |
- (invparity[rp4] << 4) |
- (invparity[rp3] << 3) |
- (invparity[rp2] << 2) |
- (invparity[rp1] << 1) |
- (invparity[rp0]);
- code[1] =
- (invparity[rp15] << 7) |
- (invparity[rp14] << 6) |
- (invparity[rp13] << 5) |
- (invparity[rp12] << 4) |
- (invparity[rp11] << 3) |
- (invparity[rp10] << 2) |
- (invparity[rp9] << 1) |
- (invparity[rp8]);
-#else
- code[1] =
- (invparity[rp7] << 7) |
- (invparity[rp6] << 6) |
- (invparity[rp5] << 5) |
- (invparity[rp4] << 4) |
- (invparity[rp3] << 3) |
- (invparity[rp2] << 2) |
- (invparity[rp1] << 1) |
- (invparity[rp0]);
- code[0] =
- (invparity[rp15] << 7) |
- (invparity[rp14] << 6) |
- (invparity[rp13] << 5) |
- (invparity[rp12] << 4) |
- (invparity[rp11] << 3) |
- (invparity[rp10] << 2) |
- (invparity[rp9] << 1) |
- (invparity[rp8]);
-#endif
+ if (sm_order) {
+ code[0] = (invparity[rp7] << 7) | (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) | (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) | (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) | (invparity[rp0]);
+ code[1] = (invparity[rp15] << 7) | (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) | (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) | (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) | (invparity[rp8]);
+ } else {
+ code[1] = (invparity[rp7] << 7) | (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) | (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) | (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) | (invparity[rp0]);
+ code[0] = (invparity[rp15] << 7) | (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) | (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) | (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) | (invparity[rp8]);
+ }
+
if (eccsize_mult == 1)
code[2] =
(invparity[par & 0xf0] << 7) |
@@ -394,15 +376,16 @@ EXPORT_SYMBOL(__nand_calculate_ecc);
/**
* nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
* block
- * @mtd: MTD block structure
+ * @chip: NAND chip object
* @buf: input buffer with raw data
* @code: output buffer with ECC
*/
-int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+int nand_calculate_ecc(struct nand_chip *chip, const unsigned char *buf,
unsigned char *code)
{
- __nand_calculate_ecc(buf,
- mtd_to_nand(mtd)->ecc.size, code);
+ bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER;
+
+ __nand_calculate_ecc(buf, chip->ecc.size, code, sm_order);
return 0;
}
@@ -414,12 +397,13 @@ EXPORT_SYMBOL(nand_calculate_ecc);
* @read_ecc: ECC from the chip
* @calc_ecc: the ECC calculated from raw data
* @eccsize: data bytes per ECC step (256 or 512)
+ * @sm_order: Smart Media byte order
*
* Detect and correct a 1 bit error for eccsize byte block
*/
int __nand_correct_data(unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc,
- unsigned int eccsize)
+ unsigned int eccsize, bool sm_order)
{
unsigned char b0, b1, b2, bit_addr;
unsigned int byte_addr;
@@ -431,13 +415,14 @@ int __nand_correct_data(unsigned char *buf,
* we might need the xor result more than once,
* so keep them in a local var
*/
-#ifdef CONFIG_MTD_NAND_ECC_SMC
- b0 = read_ecc[0] ^ calc_ecc[0];
- b1 = read_ecc[1] ^ calc_ecc[1];
-#else
- b0 = read_ecc[1] ^ calc_ecc[1];
- b1 = read_ecc[0] ^ calc_ecc[0];
-#endif
+ if (sm_order) {
+ b0 = read_ecc[0] ^ calc_ecc[0];
+ b1 = read_ecc[1] ^ calc_ecc[1];
+ } else {
+ b0 = read_ecc[1] ^ calc_ecc[1];
+ b1 = read_ecc[0] ^ calc_ecc[0];
+ }
+
b2 = read_ecc[2] ^ calc_ecc[2];
/* check if there are any bitfaults */
@@ -491,18 +476,20 @@ EXPORT_SYMBOL(__nand_correct_data);
/**
* nand_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @mtd: MTD block structure
+ * @chip: NAND chip object
* @buf: raw data read from the chip
* @read_ecc: ECC from the chip
* @calc_ecc: the ECC calculated from raw data
*
* Detect and correct a 1 bit error for 256/512 byte block
*/
-int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+int nand_correct_data(struct nand_chip *chip, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
- return __nand_correct_data(buf, read_ecc, calc_ecc,
- mtd_to_nand(mtd)->ecc.size);
+ bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER;
+
+ return __nand_correct_data(buf, read_ecc, calc_ecc, chip->ecc.size,
+ sm_order);
}
EXPORT_SYMBOL(nand_correct_data);
diff --git a/drivers/mtd/nand/raw/nand_esmt.c b/drivers/mtd/nand/raw/nand_esmt.c
new file mode 100644
index 000000000000..96f039a83bc8
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_esmt.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Toradex AG
+ *
+ * Author: Marcel Ziswiler <marcel.ziswiler@toradex.com>
+ */
+
+#include <linux/mtd/rawnand.h>
+#include "internals.h"
+
+static void esmt_nand_decode_id(struct nand_chip *chip)
+{
+ nand_decode_ext_id(chip);
+
+ /* Extract ECC requirements from 5th id byte. */
+ if (chip->id.len >= 5 && nand_is_slc(chip)) {
+ chip->ecc_step_ds = 512;
+ switch (chip->id.data[4] & 0x3) {
+ case 0x0:
+ chip->ecc_strength_ds = 4;
+ break;
+ case 0x1:
+ chip->ecc_strength_ds = 2;
+ break;
+ case 0x2:
+ chip->ecc_strength_ds = 1;
+ break;
+ default:
+ WARN(1, "Could not get ECC info");
+ chip->ecc_step_ds = 0;
+ break;
+ }
+ }
+}
+
+static int esmt_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops esmt_nand_manuf_ops = {
+ .detect = esmt_nand_decode_id,
+ .init = esmt_nand_init,
+};
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index 4ffbb26e76d6..ac1b5c103968 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -15,10 +15,11 @@
* GNU General Public License for more details.
*/
-#include <linux/mtd/rawnand.h>
#include <linux/sizes.h>
#include <linux/slab.h>
+#include "internals.h"
+
#define NAND_HYNIX_CMD_SET_PARAMS 0x36
#define NAND_HYNIX_CMD_APPLY_PARAMS 0x16
@@ -79,8 +80,6 @@ static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
if (chip->exec_op) {
struct nand_op_instr instrs[] = {
NAND_OP_CMD(cmd, 0),
@@ -90,14 +89,13 @@ static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
return nand_exec_op(chip, &op);
}
- chip->cmdfunc(mtd, cmd, -1, -1);
+ chip->legacy.cmdfunc(chip, cmd, -1, -1);
return 0;
}
static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
u16 column = ((u16)addr << 8) | addr;
if (chip->exec_op) {
@@ -110,15 +108,14 @@ static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
return nand_exec_op(chip, &op);
}
- chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
- chip->write_byte(mtd, val);
+ chip->legacy.cmdfunc(chip, NAND_CMD_NONE, column, -1);
+ chip->legacy.write_byte(chip, val);
return 0;
}
-static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
+static int hynix_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
const u8 *values;
int i, ret;
diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c
index 5423c3bb388e..ea5a342cd91e 100644
--- a/drivers/mtd/nand/raw/nand_ids.c
+++ b/drivers/mtd/nand/raw/nand_ids.c
@@ -6,9 +6,11 @@
* published by the Free Software Foundation.
*
*/
-#include <linux/mtd/rawnand.h>
+
#include <linux/sizes.h>
+#include "internals.h"
+
#define LP_OPTIONS 0
#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
@@ -169,21 +171,21 @@ struct nand_flash_dev nand_flash_ids[] = {
/* Manufacturer IDs */
static const struct nand_manufacturer nand_manufacturers[] = {
- {NAND_MFR_TOSHIBA, "Toshiba", &toshiba_nand_manuf_ops},
- {NAND_MFR_ESMT, "ESMT"},
- {NAND_MFR_SAMSUNG, "Samsung", &samsung_nand_manuf_ops},
+ {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops},
+ {NAND_MFR_ATO, "ATO"},
+ {NAND_MFR_EON, "Eon"},
+ {NAND_MFR_ESMT, "ESMT", &esmt_nand_manuf_ops},
{NAND_MFR_FUJITSU, "Fujitsu"},
- {NAND_MFR_NATIONAL, "National"},
- {NAND_MFR_RENESAS, "Renesas"},
- {NAND_MFR_STMICRO, "ST Micro"},
{NAND_MFR_HYNIX, "Hynix", &hynix_nand_manuf_ops},
- {NAND_MFR_MICRON, "Micron", &micron_nand_manuf_ops},
- {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops},
+ {NAND_MFR_INTEL, "Intel"},
{NAND_MFR_MACRONIX, "Macronix", &macronix_nand_manuf_ops},
- {NAND_MFR_EON, "Eon"},
+ {NAND_MFR_MICRON, "Micron", &micron_nand_manuf_ops},
+ {NAND_MFR_NATIONAL, "National"},
+ {NAND_MFR_RENESAS, "Renesas"},
+ {NAND_MFR_SAMSUNG, "Samsung", &samsung_nand_manuf_ops},
{NAND_MFR_SANDISK, "SanDisk"},
- {NAND_MFR_INTEL, "Intel"},
- {NAND_MFR_ATO, "ATO"},
+ {NAND_MFR_STMICRO, "ST Micro"},
+ {NAND_MFR_TOSHIBA, "Toshiba", &toshiba_nand_manuf_ops},
{NAND_MFR_WINBOND, "Winbond"},
};
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
new file mode 100644
index 000000000000..5c26492c841d
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * This file contains all ONFI helpers.
+ */
+
+#include <linux/slab.h>
+
+#include "internals.h"
+
+/*
+ * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
+ */
+int nand_jedec_detect(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_jedec_params *p;
+ struct jedec_ecc_info *ecc;
+ int jedec_version = 0;
+ char id[5];
+ int i, val, ret;
+
+ /* Try JEDEC for unknown chip or LP */
+ ret = nand_readid_op(chip, 0x40, id, sizeof(id));
+ if (ret || strncmp(id, "JEDEC", sizeof(id)))
+ return 0;
+
+ /* JEDEC chip: allocate a buffer to hold its parameter page */
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
+ if (ret) {
+ ret = 0;
+ goto free_jedec_param_page;
+ }
+
+ for (i = 0; i < 3; i++) {
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret) {
+ ret = 0;
+ goto free_jedec_param_page;
+ }
+
+ if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
+ le16_to_cpu(p->crc))
+ break;
+ }
+
+ if (i == 3) {
+ pr_err("Could not find valid JEDEC parameter page; aborting\n");
+ goto free_jedec_param_page;
+ }
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & (1 << 2))
+ jedec_version = 10;
+ else if (val & (1 << 1))
+ jedec_version = 1; /* vendor specific version */
+
+ if (!jedec_version) {
+ pr_info("unsupported JEDEC version: %d\n", val);
+ goto free_jedec_param_page;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
+ if (!chip->parameters.model) {
+ ret = -ENOMEM;
+ goto free_jedec_param_page;
+ }
+
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize *= mtd->writesize;
+
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ chip->bits_per_cell = p->bits_per_cell;
+
+ if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ /* ECC info */
+ ecc = &p->ecc_info[0];
+
+ if (ecc->codeword_size >= 9) {
+ chip->ecc_strength_ds = ecc->ecc_bits;
+ chip->ecc_step_ds = 1 << ecc->codeword_size;
+ } else {
+ pr_warn("Invalid codeword size\n");
+ }
+
+free_jedec_param_page:
+ kfree(p);
+ return ret;
+}
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
new file mode 100644
index 000000000000..c5ddc86cd98c
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * This file contains all legacy helpers/code that should be removed
+ * at some point.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+
+#include "internals.h"
+
+/**
+ * nand_read_byte - [DEFAULT] read one byte from the chip
+ * @chip: NAND chip object
+ *
+ * Default read function for 8bit buswidth
+ */
+static uint8_t nand_read_byte(struct nand_chip *chip)
+{
+ return readb(chip->legacy.IO_ADDR_R);
+}
+
+/**
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * @chip: NAND chip object
+ *
+ * Default read function for 16bit buswidth with endianness conversion.
+ *
+ */
+static uint8_t nand_read_byte16(struct nand_chip *chip)
+{
+ return (uint8_t) cpu_to_le16(readw(chip->legacy.IO_ADDR_R));
+}
+
+/**
+ * nand_select_chip - [DEFAULT] control CE line
+ * @chip: NAND chip object
+ * @chipnr: chipnumber to select, -1 for deselect
+ *
+ * Default select function for 1 chip devices.
+ */
+static void nand_select_chip(struct nand_chip *chip, int chipnr)
+{
+ switch (chipnr) {
+ case -1:
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ 0 | NAND_CTRL_CHANGE);
+ break;
+ case 0:
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/**
+ * nand_write_byte - [DEFAULT] write single byte to chip
+ * @chip: NAND chip object
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0]
+ */
+static void nand_write_byte(struct nand_chip *chip, uint8_t byte)
+{
+ chip->legacy.write_buf(chip, &byte, 1);
+}
+
+/**
+ * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
+ * @chip: NAND chip object
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
+ */
+static void nand_write_byte16(struct nand_chip *chip, uint8_t byte)
+{
+ uint16_t word = byte;
+
+ /*
+ * It's not entirely clear what should happen to I/O[15:8] when writing
+ * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
+ *
+ * When the host supports a 16-bit bus width, only data is
+ * transferred at the 16-bit width. All address and command line
+ * transfers shall use only the lower 8-bits of the data bus. During
+ * command transfers, the host may place any value on the upper
+ * 8-bits of the data bus. During address transfers, the host shall
+ * set the upper 8-bits of the data bus to 00h.
+ *
+ * One user of the write_byte callback is nand_set_features. The
+ * four parameters are specified to be written to I/O[7:0], but this is
+ * neither an address nor a command transfer. Let's assume a 0 on the
+ * upper I/O lines is OK.
+ */
+ chip->legacy.write_buf(chip, (uint8_t *)&word, 2);
+}
+
+/**
+ * nand_write_buf - [DEFAULT] write buffer to chip
+ * @chip: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 8bit buswidth.
+ */
+static void nand_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ iowrite8_rep(chip->legacy.IO_ADDR_W, buf, len);
+}
+
+/**
+ * nand_read_buf - [DEFAULT] read chip data into buffer
+ * @chip: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 8bit buswidth.
+ */
+static void nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ ioread8_rep(chip->legacy.IO_ADDR_R, buf, len);
+}
+
+/**
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @chip: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 16bit buswidth.
+ */
+static void nand_write_buf16(struct nand_chip *chip, const uint8_t *buf,
+ int len)
+{
+ u16 *p = (u16 *) buf;
+
+ iowrite16_rep(chip->legacy.IO_ADDR_W, p, len >> 1);
+}
+
+/**
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @chip: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 16bit buswidth.
+ */
+static void nand_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ u16 *p = (u16 *) buf;
+
+ ioread16_rep(chip->legacy.IO_ADDR_R, p, len >> 1);
+}
+
+/**
+ * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @mtd: MTD device structure
+ * @timeo: Timeout
+ *
+ * Helper function for nand_wait_ready used when needing to wait in interrupt
+ * context.
+ */
+static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int i;
+
+ /* Wait for the device to get ready */
+ for (i = 0; i < timeo; i++) {
+ if (chip->legacy.dev_ready(chip))
+ break;
+ touch_softlockup_watchdog();
+ mdelay(1);
+ }
+}
+
+/**
+ * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @chip: NAND chip object
+ *
+ * Wait for the ready pin after a command, and warn if a timeout occurs.
+ */
+void nand_wait_ready(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned long timeo = 400;
+
+ if (in_interrupt() || oops_in_progress)
+ return panic_nand_wait_ready(mtd, timeo);
+
+ /* Wait until command is processed or timeout occurs */
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ do {
+ if (chip->legacy.dev_ready(chip))
+ return;
+ cond_resched();
+ } while (time_before(jiffies, timeo));
+
+ if (!chip->legacy.dev_ready(chip))
+ pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
+}
+EXPORT_SYMBOL_GPL(nand_wait_ready);
+
+/**
+ * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
+ * @mtd: MTD device structure
+ * @timeo: Timeout in ms
+ *
+ * Wait for status ready (i.e. command done) or timeout.
+ */
+static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
+{
+ register struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
+
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ do {
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ touch_softlockup_watchdog();
+ } while (time_before(jiffies, timeo));
+};
+
+/**
+ * nand_command - [DEFAULT] Send command to NAND device
+ * @chip: NAND chip object
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This function is used for small page devices
+ * (512 Bytes per page).
+ */
+static void nand_command(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
+
+ /* Write out the command to the device */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ chip->legacy.cmd_ctrl(chip, readcmd, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ if (command != NAND_CMD_NONE)
+ chip->legacy.cmd_ctrl(chip, command, ctrl);
+
+ /* Address cycle, when necessary */
+ ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ chip->legacy.cmd_ctrl(chip, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ if (page_addr != -1) {
+ chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ chip->legacy.cmd_ctrl(chip, page_addr >> 8, ctrl);
+ if (chip->options & NAND_ROW_ADDR_3)
+ chip->legacy.cmd_ctrl(chip, page_addr >> 16, ctrl);
+ }
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status and sequential
+ * in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_NONE:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_READID:
+ case NAND_CMD_SET_FEATURES:
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->legacy.dev_ready)
+ break;
+ udelay(chip->legacy.chip_delay);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(mtd, 250);
+ return;
+
+ /* This applies to read commands */
+ case NAND_CMD_READ0:
+ /*
+ * READ0 is sometimes used to exit GET STATUS mode. When this
+ * is the case no address cycles are requested, and we can use
+ * this information to detect that we should not wait for the
+ * device to be ready.
+ */
+ if (column == -1 && page_addr == -1)
+ return;
+
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!chip->legacy.dev_ready) {
+ udelay(chip->legacy.chip_delay);
+ return;
+ }
+ }
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(chip);
+}
+
+static void nand_ccs_delay(struct nand_chip *chip)
+{
+ /*
+ * The controller already takes care of waiting for tCCS when the RNDIN
+ * or RNDOUT command is sent, return directly.
+ */
+ if (!(chip->options & NAND_WAIT_TCCS))
+ return;
+
+ /*
+ * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
+ * (which should be safe for all NANDs).
+ */
+ if (chip->setup_data_interface)
+ ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
+ else
+ ndelay(500);
+}
+
+/**
+ * nand_command_lp - [DEFAULT] Send command to NAND large page device
+ * @chip: NAND chip object
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This is the version for the new large page
+ * devices. We don't have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ */
+static void nand_command_lp(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Emulate NAND_CMD_READOOB */
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Command latch cycle */
+ if (command != NAND_CMD_NONE)
+ chip->legacy.cmd_ctrl(chip, command,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+
+ if (column != -1 || page_addr != -1) {
+ int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ chip->legacy.cmd_ctrl(chip, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+
+ /* Only output a single addr cycle for 8bits opcodes. */
+ if (!nand_opcode_8bits(command))
+ chip->legacy.cmd_ctrl(chip, column >> 8, ctrl);
+ }
+ if (page_addr != -1) {
+ chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
+ chip->legacy.cmd_ctrl(chip, page_addr >> 8,
+ NAND_NCE | NAND_ALE);
+ if (chip->options & NAND_ROW_ADDR_3)
+ chip->legacy.cmd_ctrl(chip, page_addr >> 16,
+ NAND_NCE | NAND_ALE);
+ }
+ }
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status, sequential
+ * in and status need no delay.
+ */
+ switch (command) {
+
+ case NAND_CMD_NONE:
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_READID:
+ case NAND_CMD_SET_FEATURES:
+ return;
+
+ case NAND_CMD_RNDIN:
+ nand_ccs_delay(chip);
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->legacy.dev_ready)
+ break;
+ udelay(chip->legacy.chip_delay);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(mtd, 250);
+ return;
+
+ case NAND_CMD_RNDOUT:
+ /* No ready / busy check necessary */
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_RNDOUTSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ nand_ccs_delay(chip);
+ return;
+
+ case NAND_CMD_READ0:
+ /*
+ * READ0 is sometimes used to exit GET STATUS mode. When this
+ * is the case no address cycles are requested, and we can use
+ * this information to detect that READSTART should not be
+ * issued.
+ */
+ if (column == -1 && page_addr == -1)
+ return;
+
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_READSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay.
+ */
+ if (!chip->legacy.dev_ready) {
+ udelay(chip->legacy.chip_delay);
+ return;
+ }
+ }
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(chip);
+}
+
+/**
+ * nand_get_set_features_notsupp - set/get features stub returning -ENOTSUPP
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ *
+ * Should be used by NAND controller drivers that do not support the SET/GET
+ * FEATURES operations.
+ */
+int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(nand_get_set_features_notsupp);
+
+/**
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ *
+ * Wait for command done. This applies to erase and program only.
+ */
+static int nand_wait(struct nand_chip *chip)
+{
+
+ unsigned long timeo = 400;
+ u8 status;
+ int ret;
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in any
+ * case on any machine.
+ */
+ ndelay(100);
+
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
+
+ if (in_interrupt() || oops_in_progress)
+ panic_nand_wait(chip, timeo);
+ else {
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ do {
+ if (chip->legacy.dev_ready) {
+ if (chip->legacy.dev_ready(chip))
+ break;
+ } else {
+ ret = nand_read_data_op(chip, &status,
+ sizeof(status), true);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, timeo));
+ }
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return ret;
+
+ /* This can happen if in case of timeout or buggy dev_ready */
+ WARN_ON(!(status & NAND_STATUS_READY));
+ return status;
+}
+
+void nand_legacy_set_defaults(struct nand_chip *chip)
+{
+ unsigned int busw = chip->options & NAND_BUSWIDTH_16;
+
+ if (chip->exec_op)
+ return;
+
+ /* check for proper chip_delay setup, set 20us if not */
+ if (!chip->legacy.chip_delay)
+ chip->legacy.chip_delay = 20;
+
+ /* check, if a user supplied command function given */
+ if (!chip->legacy.cmdfunc && !chip->exec_op)
+ chip->legacy.cmdfunc = nand_command;
+
+ /* check, if a user supplied wait function given */
+ if (chip->legacy.waitfunc == NULL)
+ chip->legacy.waitfunc = nand_wait;
+
+ if (!chip->select_chip)
+ chip->select_chip = nand_select_chip;
+
+ /* If called twice, pointers that depend on busw may need to be reset */
+ if (!chip->legacy.read_byte || chip->legacy.read_byte == nand_read_byte)
+ chip->legacy.read_byte = busw ? nand_read_byte16 : nand_read_byte;
+ if (!chip->legacy.write_buf || chip->legacy.write_buf == nand_write_buf)
+ chip->legacy.write_buf = busw ? nand_write_buf16 : nand_write_buf;
+ if (!chip->legacy.write_byte || chip->legacy.write_byte == nand_write_byte)
+ chip->legacy.write_byte = busw ? nand_write_byte16 : nand_write_byte;
+ if (!chip->legacy.read_buf || chip->legacy.read_buf == nand_read_buf)
+ chip->legacy.read_buf = busw ? nand_read_buf16 : nand_read_buf;
+}
+
+void nand_legacy_adjust_cmdfunc(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Do not replace user supplied command function! */
+ if (mtd->writesize > 512 && chip->legacy.cmdfunc == nand_command)
+ chip->legacy.cmdfunc = nand_command_lp;
+}
+
+int nand_legacy_check_hooks(struct nand_chip *chip)
+{
+ /*
+ * ->legacy.cmdfunc() is legacy and will only be used if ->exec_op() is
+ * not populated.
+ */
+ if (chip->exec_op)
+ return 0;
+
+ /*
+ * Default functions assigned for ->legacy.cmdfunc() and
+ * ->select_chip() both expect ->legacy.cmd_ctrl() to be populated.
+ */
+ if ((!chip->legacy.cmdfunc || !chip->select_chip) &&
+ !chip->legacy.cmd_ctrl) {
+ pr_err("->legacy.cmd_ctrl() should be provided\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 49c546c97c6f..358dcc957bb2 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -15,7 +15,7 @@
* GNU General Public License for more details.
*/
-#include <linux/mtd/rawnand.h>
+#include "internals.h"
/*
* Macronix AC series does not support using SET/GET_FEATURES to change
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index f5dc0a7a2456..b85e1c13b79e 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -15,9 +15,10 @@
* GNU General Public License for more details.
*/
-#include <linux/mtd/rawnand.h>
#include <linux/slab.h>
+#include "internals.h"
+
/*
* Special Micron status bit 3 indicates that the block has been
* corrected by on-die ECC and should be rewritten.
@@ -74,9 +75,8 @@ struct micron_nand {
struct micron_on_die_ecc ecc;
};
-static int micron_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
+static int micron_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
@@ -290,10 +290,10 @@ static int micron_nand_on_die_ecc_status_8(struct nand_chip *chip, u8 status)
}
static int
-micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required,
- int page)
+micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
u8 status;
int ret, max_bitflips = 0;
@@ -332,9 +332,8 @@ out:
}
static int
-micron_nand_write_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
+micron_nand_write_page_on_die_ecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
int ret;
@@ -342,7 +341,7 @@ micron_nand_write_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
if (ret)
return ret;
- ret = nand_write_page_raw(mtd, chip, buf, oob_required, page);
+ ret = nand_write_page_raw(chip, buf, oob_required, page);
micron_nand_on_die_ecc_setup(chip, false);
return ret;
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
new file mode 100644
index 000000000000..d8184cf591ad
--- /dev/null
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * This file contains all ONFI helpers.
+ */
+
+#include <linux/slab.h>
+
+#include "internals.h"
+
+u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 8;
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+ }
+
+ return crc;
+}
+
+/* Parse the Extended Parameter Page. */
+static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ struct onfi_ext_param_page *ep;
+ struct onfi_ext_section *s;
+ struct onfi_ext_ecc_info *ecc;
+ uint8_t *cursor;
+ int ret;
+ int len;
+ int i;
+
+ len = le16_to_cpu(p->ext_param_page_length) * 16;
+ ep = kmalloc(len, GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ /* Send our own NAND_CMD_PARAM. */
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
+ goto ext_out;
+
+ /* Use the Change Read Column command to skip the ONFI param pages. */
+ ret = nand_change_read_column_op(chip,
+ sizeof(*p) * p->num_of_param_pages,
+ ep, len, true);
+ if (ret)
+ goto ext_out;
+
+ ret = -EINVAL;
+ if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+ != le16_to_cpu(ep->crc))) {
+ pr_debug("fail in the CRC.\n");
+ goto ext_out;
+ }
+
+ /*
+ * Check the signature.
+ * Do not strictly follow the ONFI spec, maybe changed in future.
+ */
+ if (strncmp(ep->sig, "EPPS", 4)) {
+ pr_debug("The signature is invalid.\n");
+ goto ext_out;
+ }
+
+ /* find the ECC section. */
+ cursor = (uint8_t *)(ep + 1);
+ for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
+ s = ep->sections + i;
+ if (s->type == ONFI_SECTION_TYPE_2)
+ break;
+ cursor += s->length * 16;
+ }
+ if (i == ONFI_EXT_SECTION_MAX) {
+ pr_debug("We can not find the ECC section.\n");
+ goto ext_out;
+ }
+
+ /* get the info we want. */
+ ecc = (struct onfi_ext_ecc_info *)cursor;
+
+ if (!ecc->codeword_size) {
+ pr_debug("Invalid codeword size\n");
+ goto ext_out;
+ }
+
+ chip->ecc_strength_ds = ecc->ecc_bits;
+ chip->ecc_step_ds = 1 << ecc->codeword_size;
+ ret = 0;
+
+ext_out:
+ kfree(ep);
+ return ret;
+}
+
+/*
+ * Recover data with bit-wise majority
+ */
+static void nand_bit_wise_majority(const void **srcbufs,
+ unsigned int nsrcbufs,
+ void *dstbuf,
+ unsigned int bufsize)
+{
+ int i, j, k;
+
+ for (i = 0; i < bufsize; i++) {
+ u8 val = 0;
+
+ for (j = 0; j < 8; j++) {
+ unsigned int cnt = 0;
+
+ for (k = 0; k < nsrcbufs; k++) {
+ const u8 *srcbuf = srcbufs[k];
+
+ if (srcbuf[i] & BIT(j))
+ cnt++;
+ }
+
+ if (cnt > nsrcbufs / 2)
+ val |= BIT(j);
+ }
+
+ ((u8 *)dstbuf)[i] = val;
+ }
+}
+
+/*
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
+ */
+int nand_onfi_detect(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_onfi_params *p;
+ struct onfi_params *onfi;
+ int onfi_version = 0;
+ char id[4];
+ int i, ret, val;
+
+ /* Try ONFI for unknown chip or LP */
+ ret = nand_readid_op(chip, 0x20, id, sizeof(id));
+ if (ret || strncmp(id, "ONFI", 4))
+ return 0;
+
+ /* ONFI chip: allocate a buffer to hold its parameter page */
+ p = kzalloc((sizeof(*p) * 3), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret) {
+ ret = 0;
+ goto free_onfi_param_page;
+ }
+
+ for (i = 0; i < 3; i++) {
+ ret = nand_read_data_op(chip, &p[i], sizeof(*p), true);
+ if (ret) {
+ ret = 0;
+ goto free_onfi_param_page;
+ }
+
+ if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) ==
+ le16_to_cpu(p->crc)) {
+ if (i)
+ memcpy(p, &p[i], sizeof(*p));
+ break;
+ }
+ }
+
+ if (i == 3) {
+ const void *srcbufs[3] = {p, p + 1, p + 2};
+
+ pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
+ nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p,
+ sizeof(*p));
+
+ if (onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254) !=
+ le16_to_cpu(p->crc)) {
+ pr_err("ONFI parameter recovery failed, aborting\n");
+ goto free_onfi_param_page;
+ }
+ }
+
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+ chip->manufacturer.desc->ops->fixup_onfi_param_page)
+ chip->manufacturer.desc->ops->fixup_onfi_param_page(chip, p);
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & ONFI_VERSION_2_3)
+ onfi_version = 23;
+ else if (val & ONFI_VERSION_2_2)
+ onfi_version = 22;
+ else if (val & ONFI_VERSION_2_1)
+ onfi_version = 21;
+ else if (val & ONFI_VERSION_2_0)
+ onfi_version = 20;
+ else if (val & ONFI_VERSION_1_0)
+ onfi_version = 10;
+
+ if (!onfi_version) {
+ pr_info("unsupported ONFI version: %d\n", val);
+ goto free_onfi_param_page;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ chip->parameters.model = kstrdup(p->model, GFP_KERNEL);
+ if (!chip->parameters.model) {
+ ret = -ENOMEM;
+ goto free_onfi_param_page;
+ }
+
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+
+ /*
+ * pages_per_block and blocks_per_lun may not be a power-of-2 size
+ * (don't ask me who thought of this...). MTD assumes that these
+ * dimensions will be power-of-2, so just truncate the remaining area.
+ */
+ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize *= mtd->writesize;
+
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+
+ /* See erasesize comment */
+ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ chip->bits_per_cell = p->bits_per_cell;
+
+ chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun);
+ chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun);
+
+ if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (p->ecc_bits != 0xff) {
+ chip->ecc_strength_ds = p->ecc_bits;
+ chip->ecc_step_ds = 512;
+ } else if (onfi_version >= 21 &&
+ (le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
+
+ /*
+ * The nand_flash_detect_ext_param_page() uses the
+ * Change Read Column command which maybe not supported
+ * by the chip->legacy.cmdfunc. So try to update the
+ * chip->legacy.cmdfunc now. We do not replace user supplied
+ * command function.
+ */
+ nand_legacy_adjust_cmdfunc(chip);
+
+ /* The Extended Parameter Page is supported since ONFI 2.1. */
+ if (nand_flash_detect_ext_param_page(chip, p))
+ pr_warn("Failed to detect ONFI extended param page\n");
+ } else {
+ pr_warn("Could not retrieve ONFI ECC requirements\n");
+ }
+
+ /* Save some parameters from the parameter page for future use */
+ if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_SET_GET_FEATURES) {
+ chip->parameters.supports_set_get_features = true;
+ bitmap_set(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ bitmap_set(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ }
+
+ onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
+ if (!onfi) {
+ ret = -ENOMEM;
+ goto free_model;
+ }
+
+ onfi->version = onfi_version;
+ onfi->tPROG = le16_to_cpu(p->t_prog);
+ onfi->tBERS = le16_to_cpu(p->t_bers);
+ onfi->tR = le16_to_cpu(p->t_r);
+ onfi->tCCS = le16_to_cpu(p->t_ccs);
+ onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
+ onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
+ memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
+ chip->parameters.onfi = onfi;
+
+ /* Identification done, free the full ONFI parameter page and exit */
+ kfree(p);
+
+ return 1;
+
+free_model:
+ kfree(chip->parameters.model);
+free_onfi_param_page:
+ kfree(p);
+
+ return ret;
+}
diff --git a/drivers/mtd/nand/raw/nand_samsung.c b/drivers/mtd/nand/raw/nand_samsung.c
index ef022f62f74c..e46d4c492ad8 100644
--- a/drivers/mtd/nand/raw/nand_samsung.c
+++ b/drivers/mtd/nand/raw/nand_samsung.c
@@ -15,7 +15,7 @@
* GNU General Public License for more details.
*/
-#include <linux/mtd/rawnand.h>
+#include "internals.h"
static void samsung_nand_decode_id(struct nand_chip *chip)
{
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
index ebc7b5f76f77..bea3062d71d6 100644
--- a/drivers/mtd/nand/raw/nand_timings.c
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -11,7 +11,8 @@
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/mtd/rawnand.h>
+
+#include "internals.h"
#define ONFI_DYN_TIMING_MAX U16_MAX
@@ -271,20 +272,6 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
};
/**
- * onfi_async_timing_mode_to_sdr_timings - [NAND Interface] Retrieve NAND
- * timings according to the given ONFI timing mode
- * @mode: ONFI timing mode
- */
-const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
-{
- if (mode < 0 || mode >= ARRAY_SIZE(onfi_sdr_timings))
- return ERR_PTR(-EINVAL);
-
- return &onfi_sdr_timings[mode].timings.sdr;
-}
-EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
-
-/**
* onfi_fill_data_interface - [NAND Interface] Initialize a data interface from
* given ONFI mode
* @mode: The ONFI timing mode
@@ -339,4 +326,3 @@ int onfi_fill_data_interface(struct nand_chip *chip,
return 0;
}
-EXPORT_SYMBOL(onfi_fill_data_interface);
diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c
index ab43f027cd23..d068163b64b3 100644
--- a/drivers/mtd/nand/raw/nand_toshiba.c
+++ b/drivers/mtd/nand/raw/nand_toshiba.c
@@ -15,7 +15,88 @@
* GNU General Public License for more details.
*/
-#include <linux/mtd/rawnand.h>
+#include "internals.h"
+
+/* Bit for detecting BENAND */
+#define TOSHIBA_NAND_ID4_IS_BENAND BIT(7)
+
+/* Recommended to rewrite for BENAND */
+#define TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED BIT(3)
+
+static int toshiba_nand_benand_eccstatus(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+ unsigned int max_bitflips = 0;
+ u8 status;
+
+ /* Check Status */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL) {
+ /* uncorrected */
+ mtd->ecc_stats.failed++;
+ } else if (status & TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED) {
+ /* corrected */
+ max_bitflips = mtd->bitflip_threshold;
+ mtd->ecc_stats.corrected += max_bitflips;
+ }
+
+ return max_bitflips;
+}
+
+static int
+toshiba_nand_read_page_benand(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = nand_read_page_raw(chip, buf, oob_required, page);
+ if (ret)
+ return ret;
+
+ return toshiba_nand_benand_eccstatus(chip);
+}
+
+static int
+toshiba_nand_read_subpage_benand(struct nand_chip *chip, uint32_t data_offs,
+ uint32_t readlen, uint8_t *bufpoi, int page)
+{
+ int ret;
+
+ ret = nand_read_page_op(chip, page, data_offs,
+ bufpoi + data_offs, readlen);
+ if (ret)
+ return ret;
+
+ return toshiba_nand_benand_eccstatus(chip);
+}
+
+static void toshiba_nand_benand_init(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /*
+ * On BENAND, the entire OOB region can be used by the MTD user.
+ * The calculated ECC bytes are stored into other isolated
+ * area which is not accessible to users.
+ * This is why chip->ecc.bytes = 0.
+ */
+ chip->ecc.bytes = 0;
+ chip->ecc.size = 512;
+ chip->ecc.strength = 8;
+ chip->ecc.read_page = toshiba_nand_read_page_benand;
+ chip->ecc.read_subpage = toshiba_nand_read_subpage_benand;
+ chip->ecc.write_page = nand_write_page_raw;
+ chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
+ chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
+
+ chip->options |= NAND_SUBPAGE_READ;
+
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+}
static void toshiba_nand_decode_id(struct nand_chip *chip)
{
@@ -68,6 +149,11 @@ static int toshiba_nand_init(struct nand_chip *chip)
if (nand_is_slc(chip))
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+ /* Check that chip is BENAND and ECC mode is on-die */
+ if (nand_is_slc(chip) && chip->ecc.mode == NAND_ECC_ON_DIE &&
+ chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND)
+ toshiba_nand_benand_init(chip);
+
return 0;
}
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 71ac034aee9c..c452819f6123 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -656,7 +656,7 @@ static int __init init_nandsim(struct mtd_info *mtd)
}
/* Force mtd to not do delays */
- chip->chip_delay = 0;
+ chip->legacy.chip_delay = 0;
/* Initialize the NAND flash parameters */
ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
@@ -1872,9 +1872,8 @@ static void switch_state(struct nandsim *ns)
}
}
-static u_char ns_nand_read_byte(struct mtd_info *mtd)
+static u_char ns_nand_read_byte(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct nandsim *ns = nand_get_controller_data(chip);
u_char outb = 0x00;
@@ -1934,9 +1933,8 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
return outb;
}
-static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
+static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct nandsim *ns = nand_get_controller_data(chip);
/* Sanity and correctness checks */
@@ -2089,9 +2087,8 @@ static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
return;
}
-static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
+static void ns_hwcontrol(struct nand_chip *chip, int cmd, unsigned int bitmask)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct nandsim *ns = nand_get_controller_data(chip);
ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
@@ -2099,27 +2096,18 @@ static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
if (cmd != NAND_CMD_NONE)
- ns_nand_write_byte(mtd, cmd);
+ ns_nand_write_byte(chip, cmd);
}
-static int ns_device_ready(struct mtd_info *mtd)
+static int ns_device_ready(struct nand_chip *chip)
{
NS_DBG("device_ready\n");
return 1;
}
-static uint16_t ns_nand_read_word(struct mtd_info *mtd)
+static void ns_nand_write_buf(struct nand_chip *chip, const u_char *buf,
+ int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
- NS_DBG("read_word\n");
-
- return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
-}
-
-static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct nandsim *ns = nand_get_controller_data(chip);
/* Check that chip is expecting data input */
@@ -2145,9 +2133,8 @@ static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
}
}
-static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct nandsim *ns = nand_get_controller_data(chip);
/* Sanity and correctness checks */
@@ -2169,7 +2156,7 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
int i;
for (i = 0; i < len; i++)
- buf[i] = mtd_to_nand(mtd)->read_byte(mtd);
+ buf[i] = chip->legacy.read_byte(chip);
return;
}
@@ -2262,12 +2249,11 @@ static int __init ns_init_module(void)
/*
* Register simulator's callbacks.
*/
- chip->cmd_ctrl = ns_hwcontrol;
- chip->read_byte = ns_nand_read_byte;
- chip->dev_ready = ns_device_ready;
- chip->write_buf = ns_nand_write_buf;
- chip->read_buf = ns_nand_read_buf;
- chip->read_word = ns_nand_read_word;
+ chip->legacy.cmd_ctrl = ns_hwcontrol;
+ chip->legacy.read_byte = ns_nand_read_byte;
+ chip->legacy.dev_ready = ns_device_ready;
+ chip->legacy.write_buf = ns_nand_write_buf;
+ chip->legacy.read_buf = ns_nand_read_buf;
chip->ecc.mode = NAND_ECC_SOFT;
chip->ecc.algo = NAND_ECC_HAMMING;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
@@ -2319,7 +2305,7 @@ static int __init ns_init_module(void)
goto error;
chip->dummy_controller.ops = &ns_controller_ops;
- retval = nand_scan(nsmtd, 1);
+ retval = nand_scan(chip, 1);
if (retval) {
NS_ERR("Could not scan NAND Simulator device\n");
goto error;
@@ -2364,7 +2350,7 @@ static int __init ns_init_module(void)
err_exit:
free_nandsim(nand);
- nand_release(nsmtd);
+ nand_release(chip);
for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
kfree(nand->partitions[i].name);
error:
@@ -2386,7 +2372,7 @@ static void __exit ns_cleanup_module(void)
int i;
free_nandsim(ns); /* Free nandsim private resources */
- nand_release(nsmtd); /* Unregister driver */
+ nand_release(chip); /* Unregister driver */
for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
kfree(ns->partitions[i].name);
kfree(mtd_to_nand(nsmtd)); /* Free other structures */
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index 540fa1a0ea24..d49a7a17146c 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -44,10 +44,9 @@ struct ndfc_controller {
static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
-static void ndfc_select_chip(struct mtd_info *mtd, int chip)
+static void ndfc_select_chip(struct nand_chip *nchip, int chip)
{
uint32_t ccr;
- struct nand_chip *nchip = mtd_to_nand(mtd);
struct ndfc_controller *ndfc = nand_get_controller_data(nchip);
ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
@@ -59,9 +58,8 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip)
out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
}
-static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+static void ndfc_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct ndfc_controller *ndfc = nand_get_controller_data(chip);
if (cmd == NAND_CMD_NONE)
@@ -73,18 +71,16 @@ static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE);
}
-static int ndfc_ready(struct mtd_info *mtd)
+static int ndfc_ready(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct ndfc_controller *ndfc = nand_get_controller_data(chip);
return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
}
-static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
+static void ndfc_enable_hwecc(struct nand_chip *chip, int mode)
{
uint32_t ccr;
- struct nand_chip *chip = mtd_to_nand(mtd);
struct ndfc_controller *ndfc = nand_get_controller_data(chip);
ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
@@ -93,10 +89,9 @@ static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
wmb();
}
-static int ndfc_calculate_ecc(struct mtd_info *mtd,
+static int ndfc_calculate_ecc(struct nand_chip *chip,
const u_char *dat, u_char *ecc_code)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct ndfc_controller *ndfc = nand_get_controller_data(chip);
uint32_t ecc;
uint8_t *p = (uint8_t *)&ecc;
@@ -118,9 +113,8 @@ static int ndfc_calculate_ecc(struct mtd_info *mtd,
* functions. No further checking, as nand_base will always read/write
* page aligned.
*/
-static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void ndfc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct ndfc_controller *ndfc = nand_get_controller_data(chip);
uint32_t *p = (uint32_t *) buf;
@@ -128,9 +122,8 @@ static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
*p++ = in_be32(ndfc->ndfcbase + NDFC_DATA);
}
-static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void ndfc_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct ndfc_controller *ndfc = nand_get_controller_data(chip);
uint32_t *p = (uint32_t *) buf;
@@ -149,15 +142,15 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
- chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
- chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
- chip->cmd_ctrl = ndfc_hwcontrol;
- chip->dev_ready = ndfc_ready;
+ chip->legacy.IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
+ chip->legacy.IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
+ chip->legacy.cmd_ctrl = ndfc_hwcontrol;
+ chip->legacy.dev_ready = ndfc_ready;
chip->select_chip = ndfc_select_chip;
- chip->chip_delay = 50;
+ chip->legacy.chip_delay = 50;
chip->controller = &ndfc->ndfc_control;
- chip->read_buf = ndfc_read_buf;
- chip->write_buf = ndfc_write_buf;
+ chip->legacy.read_buf = ndfc_read_buf;
+ chip->legacy.write_buf = ndfc_write_buf;
chip->ecc.correct = nand_correct_data;
chip->ecc.hwctl = ndfc_enable_hwecc;
chip->ecc.calculate = ndfc_calculate_ecc;
@@ -174,14 +167,14 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
return -ENODEV;
nand_set_flash_node(chip, flash_np);
- mtd->name = kasprintf(GFP_KERNEL, "%s.%s", dev_name(&ndfc->ofdev->dev),
- flash_np->name);
+ mtd->name = kasprintf(GFP_KERNEL, "%s.%pOFn", dev_name(&ndfc->ofdev->dev),
+ flash_np);
if (!mtd->name) {
ret = -ENOMEM;
goto err;
}
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
goto err;
@@ -258,7 +251,7 @@ static int ndfc_remove(struct platform_device *ofdev)
struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
struct mtd_info *mtd = nand_to_mtd(&ndfc->chip);
- nand_release(mtd);
+ nand_release(&ndfc->chip);
kfree(mtd->name);
return 0;
diff --git a/drivers/mtd/nand/raw/nuc900_nand.c b/drivers/mtd/nand/raw/nuc900_nand.c
index af5b32c9a791..38b1994e7ed3 100644
--- a/drivers/mtd/nand/raw/nuc900_nand.c
+++ b/drivers/mtd/nand/raw/nuc900_nand.c
@@ -79,31 +79,31 @@ static const struct mtd_partition partitions[] = {
}
};
-static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
+static unsigned char nuc900_nand_read_byte(struct nand_chip *chip)
{
unsigned char ret;
- struct nuc900_nand *nand = mtd_to_nuc900(mtd);
+ struct nuc900_nand *nand = mtd_to_nuc900(nand_to_mtd(chip));
ret = (unsigned char)read_data_reg(nand);
return ret;
}
-static void nuc900_nand_read_buf(struct mtd_info *mtd,
+static void nuc900_nand_read_buf(struct nand_chip *chip,
unsigned char *buf, int len)
{
int i;
- struct nuc900_nand *nand = mtd_to_nuc900(mtd);
+ struct nuc900_nand *nand = mtd_to_nuc900(nand_to_mtd(chip));
for (i = 0; i < len; i++)
buf[i] = (unsigned char)read_data_reg(nand);
}
-static void nuc900_nand_write_buf(struct mtd_info *mtd,
+static void nuc900_nand_write_buf(struct nand_chip *chip,
const unsigned char *buf, int len)
{
int i;
- struct nuc900_nand *nand = mtd_to_nuc900(mtd);
+ struct nuc900_nand *nand = mtd_to_nuc900(nand_to_mtd(chip));
for (i = 0; i < len; i++)
write_data_reg(nand, buf[i]);
@@ -120,19 +120,20 @@ static int nuc900_check_rb(struct nuc900_nand *nand)
return val;
}
-static int nuc900_nand_devready(struct mtd_info *mtd)
+static int nuc900_nand_devready(struct nand_chip *chip)
{
- struct nuc900_nand *nand = mtd_to_nuc900(mtd);
+ struct nuc900_nand *nand = mtd_to_nuc900(nand_to_mtd(chip));
int ready;
ready = (nuc900_check_rb(nand)) ? 1 : 0;
return ready;
}
-static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
+static void nuc900_nand_command_lp(struct nand_chip *chip,
+ unsigned int command,
int column, int page_addr)
{
- register struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nuc900_nand *nand = mtd_to_nuc900(mtd);
if (command == NAND_CMD_READOOB) {
@@ -174,9 +175,9 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
return;
case NAND_CMD_RESET:
- if (chip->dev_ready)
+ if (chip->legacy.dev_ready)
break;
- udelay(chip->chip_delay);
+ udelay(chip->legacy.chip_delay);
write_cmd_reg(nand, NAND_CMD_STATUS);
write_cmd_reg(nand, command);
@@ -195,8 +196,8 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
write_cmd_reg(nand, NAND_CMD_READSTART);
default:
- if (!chip->dev_ready) {
- udelay(chip->chip_delay);
+ if (!chip->legacy.dev_ready) {
+ udelay(chip->legacy.chip_delay);
return;
}
}
@@ -205,7 +206,7 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
* any case on any machine. */
ndelay(100);
- while (!chip->dev_ready(mtd))
+ while (!chip->legacy.dev_ready(chip))
;
}
@@ -253,12 +254,12 @@ static int nuc900_nand_probe(struct platform_device *pdev)
return -ENOENT;
clk_enable(nuc900_nand->clk);
- chip->cmdfunc = nuc900_nand_command_lp;
- chip->dev_ready = nuc900_nand_devready;
- chip->read_byte = nuc900_nand_read_byte;
- chip->write_buf = nuc900_nand_write_buf;
- chip->read_buf = nuc900_nand_read_buf;
- chip->chip_delay = 50;
+ chip->legacy.cmdfunc = nuc900_nand_command_lp;
+ chip->legacy.dev_ready = nuc900_nand_devready;
+ chip->legacy.read_byte = nuc900_nand_read_byte;
+ chip->legacy.write_buf = nuc900_nand_write_buf;
+ chip->legacy.read_buf = nuc900_nand_read_buf;
+ chip->legacy.chip_delay = 50;
chip->options = 0;
chip->ecc.mode = NAND_ECC_SOFT;
chip->ecc.algo = NAND_ECC_HAMMING;
@@ -270,7 +271,7 @@ static int nuc900_nand_probe(struct platform_device *pdev)
nuc900_nand_enable(nuc900_nand);
- if (nand_scan(mtd, 1))
+ if (nand_scan(chip, 1))
return -ENXIO;
mtd_device_register(mtd, partitions, ARRAY_SIZE(partitions));
@@ -284,7 +285,7 @@ static int nuc900_nand_remove(struct platform_device *pdev)
{
struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
- nand_release(nand_to_mtd(&nuc900_nand->chip));
+ nand_release(&nuc900_nand->chip);
clk_disable(nuc900_nand->clk);
return 0;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 4546ac0bed4a..886d05c391ef 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -240,7 +240,7 @@ static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
/**
* omap_hwcontrol - hardware specific access to control-lines
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @cmd: command to device
* @ctrl:
* NAND_NCE: bit 0 -> don't care
@@ -249,9 +249,9 @@ static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
*
* NOTE: boards may use different bits for these!!
*/
-static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+static void omap_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
{
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
if (cmd != NAND_CMD_NONE) {
if (ctrl & NAND_CLE)
@@ -275,7 +275,7 @@ static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *nand = mtd_to_nand(mtd);
- ioread8_rep(nand->IO_ADDR_R, buf, len);
+ ioread8_rep(nand->legacy.IO_ADDR_R, buf, len);
}
/**
@@ -291,7 +291,7 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
bool status;
while (len--) {
- iowrite8(*p++, info->nand.IO_ADDR_W);
+ iowrite8(*p++, info->nand.legacy.IO_ADDR_W);
/* wait until buffer is available for write */
do {
status = info->ops->nand_writebuffer_empty();
@@ -309,7 +309,7 @@ static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *nand = mtd_to_nand(mtd);
- ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
+ ioread16_rep(nand->legacy.IO_ADDR_R, buf, len / 2);
}
/**
@@ -327,7 +327,7 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
len >>= 1;
while (len--) {
- iowrite16(*p++, info->nand.IO_ADDR_W);
+ iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
/* wait until buffer is available for write */
do {
status = info->ops->nand_writebuffer_empty();
@@ -337,12 +337,13 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
/**
* omap_read_buf_pref - read data from NAND controller into buffer
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*/
-static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
+static void omap_read_buf_pref(struct nand_chip *chip, u_char *buf, int len)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
uint32_t r_count = 0;
int ret = 0;
@@ -372,7 +373,7 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
r_count = readl(info->reg.gpmc_prefetch_status);
r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
r_count = r_count >> 2;
- ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
+ ioread32_rep(info->nand.legacy.IO_ADDR_R, p, r_count);
p += r_count;
len -= r_count << 2;
} while (len);
@@ -383,13 +384,14 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
/**
* omap_write_buf_pref - write buffer to NAND controller
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
*/
-static void omap_write_buf_pref(struct mtd_info *mtd,
- const u_char *buf, int len)
+static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
+ int len)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
uint32_t w_count = 0;
int i = 0, ret = 0;
@@ -399,7 +401,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
/* take care of subpage writes */
if (len % 2 != 0) {
- writeb(*buf, info->nand.IO_ADDR_W);
+ writeb(*buf, info->nand.legacy.IO_ADDR_W);
p = (u16 *)(buf + 1);
len--;
}
@@ -419,7 +421,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2)
- iowrite16(*p++, info->nand.IO_ADDR_W);
+ iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
}
/* wait for data to flushed-out before reset the prefetch */
tim = 0;
@@ -528,14 +530,17 @@ out_copy:
/**
* omap_read_buf_dma_pref - read data from NAND controller into buffer
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*/
-static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
+static void omap_read_buf_dma_pref(struct nand_chip *chip, u_char *buf,
+ int len)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
if (len <= mtd->oobsize)
- omap_read_buf_pref(mtd, buf, len);
+ omap_read_buf_pref(chip, buf, len);
else
/* start transfer in DMA mode */
omap_nand_dma_transfer(mtd, buf, len, 0x0);
@@ -543,18 +548,20 @@ static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
/**
* omap_write_buf_dma_pref - write buffer to NAND controller
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
*/
-static void omap_write_buf_dma_pref(struct mtd_info *mtd,
- const u_char *buf, int len)
+static void omap_write_buf_dma_pref(struct nand_chip *chip, const u_char *buf,
+ int len)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
if (len <= mtd->oobsize)
- omap_write_buf_pref(mtd, buf, len);
+ omap_write_buf_pref(chip, buf, len);
else
/* start transfer in DMA mode */
- omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
+ omap_nand_dma_transfer(mtd, (u_char *)buf, len, 0x1);
}
/*
@@ -578,14 +585,14 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
bytes = info->buf_len;
else if (!info->buf_len)
bytes = 0;
- iowrite32_rep(info->nand.IO_ADDR_W,
- (u32 *)info->buf, bytes >> 2);
+ iowrite32_rep(info->nand.legacy.IO_ADDR_W, (u32 *)info->buf,
+ bytes >> 2);
info->buf = info->buf + bytes;
info->buf_len -= bytes;
} else {
- ioread32_rep(info->nand.IO_ADDR_R,
- (u32 *)info->buf, bytes >> 2);
+ ioread32_rep(info->nand.legacy.IO_ADDR_R, (u32 *)info->buf,
+ bytes >> 2);
info->buf = info->buf + bytes;
if (this_irq == info->gpmc_irq_count)
@@ -605,17 +612,19 @@ done:
/*
* omap_read_buf_irq_pref - read data from NAND controller into buffer
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*/
-static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
+static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
+ int len)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
int ret = 0;
if (len <= mtd->oobsize) {
- omap_read_buf_pref(mtd, buf, len);
+ omap_read_buf_pref(chip, buf, len);
return;
}
@@ -651,20 +660,21 @@ out_copy:
/*
* omap_write_buf_irq_pref - write buffer to NAND controller
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
*/
-static void omap_write_buf_irq_pref(struct mtd_info *mtd,
- const u_char *buf, int len)
+static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
+ int len)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct omap_nand_info *info = mtd_to_omap(mtd);
int ret = 0;
unsigned long tim, limit;
u32 val;
if (len <= mtd->oobsize) {
- omap_write_buf_pref(mtd, buf, len);
+ omap_write_buf_pref(chip, buf, len);
return;
}
@@ -857,7 +867,7 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
/**
* omap_correct_data - Compares the ECC read with HW generated ECC
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @dat: page data
* @read_ecc: ecc read from nand flash
* @calc_ecc: ecc read from HW ECC registers
@@ -869,10 +879,10 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
* corrected errors is returned. If uncorrectable errors exist, %-1 is
* returned.
*/
-static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
- u_char *read_ecc, u_char *calc_ecc)
+static int omap_correct_data(struct nand_chip *chip, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
{
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
int blockCnt = 0, i = 0, ret = 0;
int stat = 0;
@@ -900,7 +910,7 @@ static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
/**
* omap_calcuate_ecc - Generate non-inverted ECC bytes.
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
*
@@ -910,10 +920,10 @@ static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
* an erased page will produce an ECC mismatch between generated and read
* ECC bytes that has to be dealt with separately.
*/
-static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code)
+static int omap_calculate_ecc(struct nand_chip *chip, const u_char *dat,
+ u_char *ecc_code)
{
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
u32 val;
val = readl(info->reg.gpmc_ecc_config);
@@ -935,10 +945,9 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
* @mtd: MTD device structure
* @mode: Read/Write mode
*/
-static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
+static void omap_enable_hwecc(struct nand_chip *chip, int mode)
{
- struct omap_nand_info *info = mtd_to_omap(mtd);
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
u32 val;
@@ -972,8 +981,7 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
/**
* omap_wait - wait until the command is done
- * @mtd: MTD device structure
- * @chip: NAND Chip structure
+ * @this: NAND Chip structure
*
* Wait function is called during Program and erase operations and
* the way it is called from MTD layer, we should wait till the NAND
@@ -982,10 +990,9 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
* Erase can take up to 400ms and program up to 20ms according to
* general NAND and SmartMedia specs
*/
-static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
+static int omap_wait(struct nand_chip *this)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this));
unsigned long timeo = jiffies;
int status, state = this->state;
@@ -1012,9 +1019,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
*
* Returns true if ready and false if busy.
*/
-static int omap_dev_ready(struct mtd_info *mtd)
+static int omap_dev_ready(struct nand_chip *chip)
{
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
return gpiod_get_value(info->ready_gpiod);
}
@@ -1030,13 +1037,13 @@ static int omap_dev_ready(struct mtd_info *mtd)
* eccsize0 = 0 (no additional protected byte in spare area)
* eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
*/
-static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
+static void __maybe_unused omap_enable_hwecc_bch(struct nand_chip *chip,
+ int mode)
{
unsigned int bch_type;
unsigned int dev_width, nsectors;
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
enum omap_ecc ecc_opt = info->ecc_opt;
- struct nand_chip *chip = mtd_to_nand(mtd);
u32 val, wr_mode;
unsigned int ecc_size1, ecc_size0;
@@ -1256,7 +1263,7 @@ static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
/**
* omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
*
@@ -1264,10 +1271,10 @@ static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
* when SW based correction is required as ECC is required for one sector
* at a time.
*/
-static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
+static int omap_calculate_ecc_bch_sw(struct nand_chip *chip,
const u_char *dat, u_char *ecc_calc)
{
- return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
+ return _omap_calculate_ecc_bch(nand_to_mtd(chip), dat, ecc_calc, 0);
}
/**
@@ -1339,7 +1346,7 @@ static int erased_sector_bitflips(u_char *data, u_char *oob,
/**
* omap_elm_correct_data - corrects page data area in case error reported
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @data: page data
* @read_ecc: ecc read from nand flash
* @calc_ecc: ecc read from HW ECC registers
@@ -1348,10 +1355,10 @@ static int erased_sector_bitflips(u_char *data, u_char *oob,
* In case of non-zero ecc vector, first filter out erased-pages, and
* then process data via ELM to detect bit-flips.
*/
-static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
- u_char *read_ecc, u_char *calc_ecc)
+static int omap_elm_correct_data(struct nand_chip *chip, u_char *data,
+ u_char *read_ecc, u_char *calc_ecc)
{
- struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
struct nand_ecc_ctrl *ecc = &info->nand.ecc;
int eccsteps = info->nand.ecc.steps;
int i , j, stat = 0;
@@ -1512,7 +1519,6 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
/**
* omap_write_page_bch - BCH ecc based write page function for entire page
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
@@ -1520,19 +1526,20 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
*
* Custom write page method evolved to support multi sector writing in one shot
*/
-static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
uint8_t *ecc_calc = chip->ecc.calc_buf;
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Enable GPMC ecc engine */
- chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* Write data */
- chip->write_buf(mtd, buf, mtd->writesize);
+ chip->legacy.write_buf(chip, buf, mtd->writesize);
/* Update ecc vector from GPMC result registers */
omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
@@ -1543,14 +1550,13 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
return ret;
/* Write ecc vector to OOB area */
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
/**
* omap_write_subpage_bch - BCH hardware ECC based subpage write
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @offset: column address of subpage within the page
* @data_len: data length
@@ -1560,11 +1566,11 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
*
* OMAP optimized subpage write method.
*/
-static int omap_write_subpage_bch(struct mtd_info *mtd,
- struct nand_chip *chip, u32 offset,
+static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
u32 data_len, const u8 *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
u8 *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
@@ -1582,10 +1588,10 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Enable GPMC ECC engine */
- chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
/* Write data */
- chip->write_buf(mtd, buf, mtd->writesize);
+ chip->legacy.write_buf(chip, buf, mtd->writesize);
for (step = 0; step < ecc_steps; step++) {
/* mask ECC of un-touched subpages by padding 0xFF */
@@ -1610,14 +1616,13 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
return ret;
/* write OOB buffer to NAND device */
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
/**
* omap_read_page_bch - BCH ecc based page read function for entire page
- * @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller requires OOB data read to chip->oob_poi
@@ -1630,9 +1635,10 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
* ecc engine enabled. ecc vector updated after read of OOB data.
* For non error pages ecc vector reported as zero.
*/
-static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
uint8_t *ecc_calc = chip->ecc.calc_buf;
uint8_t *ecc_code = chip->ecc.code_buf;
int stat, ret;
@@ -1641,10 +1647,10 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
nand_read_page_op(chip, page, 0, NULL, 0);
/* Enable GPMC ecc engine */
- chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
/* Read data */
- chip->read_buf(mtd, buf, mtd->writesize);
+ chip->legacy.read_buf(chip, buf, mtd->writesize);
/* Read oob bytes */
nand_change_read_column_op(chip,
@@ -1660,7 +1666,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
if (ret)
return ret;
- stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
+ stat = chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
if (stat < 0) {
mtd->ecc_stats.failed++;
@@ -1927,8 +1933,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
/* Re-populate low-level callbacks based on xfer modes */
switch (info->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
- chip->read_buf = omap_read_buf_pref;
- chip->write_buf = omap_write_buf_pref;
+ chip->legacy.read_buf = omap_read_buf_pref;
+ chip->legacy.write_buf = omap_write_buf_pref;
break;
case NAND_OMAP_POLLED:
@@ -1960,8 +1966,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
err);
return err;
}
- chip->read_buf = omap_read_buf_dma_pref;
- chip->write_buf = omap_write_buf_dma_pref;
+ chip->legacy.read_buf = omap_read_buf_dma_pref;
+ chip->legacy.write_buf = omap_write_buf_dma_pref;
}
break;
@@ -1996,8 +2002,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
return err;
}
- chip->read_buf = omap_read_buf_irq_pref;
- chip->write_buf = omap_write_buf_irq_pref;
+ chip->legacy.read_buf = omap_read_buf_irq_pref;
+ chip->legacy.write_buf = omap_write_buf_irq_pref;
break;
@@ -2215,16 +2221,16 @@ static int omap_nand_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(nand_chip->IO_ADDR_R))
- return PTR_ERR(nand_chip->IO_ADDR_R);
+ nand_chip->legacy.IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nand_chip->legacy.IO_ADDR_R))
+ return PTR_ERR(nand_chip->legacy.IO_ADDR_R);
info->phys_base = res->start;
nand_chip->controller = &omap_gpmc_controller;
- nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
- nand_chip->cmd_ctrl = omap_hwcontrol;
+ nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R;
+ nand_chip->legacy.cmd_ctrl = omap_hwcontrol;
info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
GPIOD_IN);
@@ -2241,11 +2247,11 @@ static int omap_nand_probe(struct platform_device *pdev)
* device and read status register until you get a failure or success
*/
if (info->ready_gpiod) {
- nand_chip->dev_ready = omap_dev_ready;
- nand_chip->chip_delay = 0;
+ nand_chip->legacy.dev_ready = omap_dev_ready;
+ nand_chip->legacy.chip_delay = 0;
} else {
- nand_chip->waitfunc = omap_wait;
- nand_chip->chip_delay = 50;
+ nand_chip->legacy.waitfunc = omap_wait;
+ nand_chip->legacy.chip_delay = 50;
}
if (info->flash_bbt)
@@ -2254,7 +2260,7 @@ static int omap_nand_probe(struct platform_device *pdev)
/* scan NAND device connected to chip controller */
nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
- err = nand_scan(mtd, 1);
+ err = nand_scan(nand_chip, 1);
if (err)
goto return_error;
@@ -2290,7 +2296,7 @@ static int omap_nand_remove(struct platform_device *pdev)
}
if (info->dma)
dma_release_channel(info->dma);
- nand_release(mtd);
+ nand_release(nand_chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index 52d435285a3f..d27b39a7223c 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -26,9 +26,9 @@ struct orion_nand_info {
struct clk *clk;
};
-static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+static void orion_nand_cmd_ctrl(struct nand_chip *nc, int cmd,
+ unsigned int ctrl)
{
- struct nand_chip *nc = mtd_to_nand(mtd);
struct orion_nand_data *board = nand_get_controller_data(nc);
u32 offs;
@@ -45,13 +45,12 @@ static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl
if (nc->options & NAND_BUSWIDTH_16)
offs <<= 1;
- writeb(cmd, nc->IO_ADDR_W + offs);
+ writeb(cmd, nc->legacy.IO_ADDR_W + offs);
}
-static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void orion_nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- void __iomem *io_base = chip->IO_ADDR_R;
+ void __iomem *io_base = chip->legacy.IO_ADDR_R;
#if defined(__LINUX_ARM_ARCH__) && __LINUX_ARM_ARCH__ >= 5
uint64_t *buf64;
#endif
@@ -137,14 +136,14 @@ static int __init orion_nand_probe(struct platform_device *pdev)
nand_set_controller_data(nc, board);
nand_set_flash_node(nc, pdev->dev.of_node);
- nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
- nc->cmd_ctrl = orion_nand_cmd_ctrl;
- nc->read_buf = orion_nand_read_buf;
+ nc->legacy.IO_ADDR_R = nc->legacy.IO_ADDR_W = io_base;
+ nc->legacy.cmd_ctrl = orion_nand_cmd_ctrl;
+ nc->legacy.read_buf = orion_nand_read_buf;
nc->ecc.mode = NAND_ECC_SOFT;
nc->ecc.algo = NAND_ECC_HAMMING;
if (board->chip_delay)
- nc->chip_delay = board->chip_delay;
+ nc->legacy.chip_delay = board->chip_delay;
WARN(board->width > 16,
"%d bit bus width out of range",
@@ -174,14 +173,14 @@ static int __init orion_nand_probe(struct platform_device *pdev)
return ret;
}
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(nc, 1);
if (ret)
goto no_dev;
mtd->name = "orion_nand";
ret = mtd_device_register(mtd, board->parts, board->nr_parts);
if (ret) {
- nand_release(mtd);
+ nand_release(nc);
goto no_dev;
}
@@ -196,9 +195,8 @@ static int orion_nand_remove(struct platform_device *pdev)
{
struct orion_nand_info *info = platform_get_drvdata(pdev);
struct nand_chip *chip = &info->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
- nand_release(mtd);
+ nand_release(chip);
clk_disable_unprepare(info->clk);
diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c
index 01b00bb69c1e..0e52dc29141c 100644
--- a/drivers/mtd/nand/raw/oxnas_nand.c
+++ b/drivers/mtd/nand/raw/oxnas_nand.c
@@ -38,35 +38,32 @@ struct oxnas_nand_ctrl {
struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
};
-static uint8_t oxnas_nand_read_byte(struct mtd_info *mtd)
+static uint8_t oxnas_nand_read_byte(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
return readb(oxnas->io_base);
}
-static void oxnas_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+static void oxnas_nand_read_buf(struct nand_chip *chip, u8 *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
ioread8_rep(oxnas->io_base, buf, len);
}
-static void oxnas_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+static void oxnas_nand_write_buf(struct nand_chip *chip, const u8 *buf,
+ int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
iowrite8_rep(oxnas->io_base, buf, len);
}
/* Single CS command control */
-static void oxnas_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+static void oxnas_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
if (ctrl & NAND_CLE)
@@ -135,20 +132,20 @@ static int oxnas_nand_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
mtd->priv = chip;
- chip->cmd_ctrl = oxnas_nand_cmd_ctrl;
- chip->read_buf = oxnas_nand_read_buf;
- chip->read_byte = oxnas_nand_read_byte;
- chip->write_buf = oxnas_nand_write_buf;
- chip->chip_delay = 30;
+ chip->legacy.cmd_ctrl = oxnas_nand_cmd_ctrl;
+ chip->legacy.read_buf = oxnas_nand_read_buf;
+ chip->legacy.read_byte = oxnas_nand_read_byte;
+ chip->legacy.write_buf = oxnas_nand_write_buf;
+ chip->legacy.chip_delay = 30;
/* Scan to find existence of the device */
- err = nand_scan(mtd, 1);
+ err = nand_scan(chip, 1);
if (err)
goto err_clk_unprepare;
err = mtd_device_register(mtd, NULL, 0);
if (err) {
- nand_release(mtd);
+ nand_release(chip);
goto err_clk_unprepare;
}
@@ -176,7 +173,7 @@ static int oxnas_nand_remove(struct platform_device *pdev)
struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev);
if (oxnas->chips[0])
- nand_release(nand_to_mtd(oxnas->chips[0]));
+ nand_release(oxnas->chips[0]);
clk_disable_unprepare(oxnas->clk);
diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c
index a47a7e4bd25a..643cd22af009 100644
--- a/drivers/mtd/nand/raw/pasemi_nand.c
+++ b/drivers/mtd/nand/raw/pasemi_nand.c
@@ -43,49 +43,44 @@ static unsigned int lpcctl;
static struct mtd_info *pasemi_nand_mtd;
static const char driver_name[] = "pasemi-nand";
-static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void pasemi_read_buf(struct nand_chip *chip, u_char *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
while (len > 0x800) {
- memcpy_fromio(buf, chip->IO_ADDR_R, 0x800);
+ memcpy_fromio(buf, chip->legacy.IO_ADDR_R, 0x800);
buf += 0x800;
len -= 0x800;
}
- memcpy_fromio(buf, chip->IO_ADDR_R, len);
+ memcpy_fromio(buf, chip->legacy.IO_ADDR_R, len);
}
-static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+static void pasemi_write_buf(struct nand_chip *chip, const u_char *buf,
+ int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
while (len > 0x800) {
- memcpy_toio(chip->IO_ADDR_R, buf, 0x800);
+ memcpy_toio(chip->legacy.IO_ADDR_R, buf, 0x800);
buf += 0x800;
len -= 0x800;
}
- memcpy_toio(chip->IO_ADDR_R, buf, len);
+ memcpy_toio(chip->legacy.IO_ADDR_R, buf, len);
}
-static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd,
+static void pasemi_hwcontrol(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
- out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
+ out_8(chip->legacy.IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
else
- out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
+ out_8(chip->legacy.IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
/* Push out posted writes */
eieio();
inl(lpcctl);
}
-int pasemi_device_ready(struct mtd_info *mtd)
+int pasemi_device_ready(struct nand_chip *chip)
{
return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
}
@@ -122,10 +117,10 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
/* Link the private data with the MTD structure */
pasemi_nand_mtd->dev.parent = dev;
- chip->IO_ADDR_R = of_iomap(np, 0);
- chip->IO_ADDR_W = chip->IO_ADDR_R;
+ chip->legacy.IO_ADDR_R = of_iomap(np, 0);
+ chip->legacy.IO_ADDR_W = chip->legacy.IO_ADDR_R;
- if (!chip->IO_ADDR_R) {
+ if (!chip->legacy.IO_ADDR_R) {
err = -EIO;
goto out_mtd;
}
@@ -144,11 +139,11 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
goto out_ior;
}
- chip->cmd_ctrl = pasemi_hwcontrol;
- chip->dev_ready = pasemi_device_ready;
- chip->read_buf = pasemi_read_buf;
- chip->write_buf = pasemi_write_buf;
- chip->chip_delay = 0;
+ chip->legacy.cmd_ctrl = pasemi_hwcontrol;
+ chip->legacy.dev_ready = pasemi_device_ready;
+ chip->legacy.read_buf = pasemi_read_buf;
+ chip->legacy.write_buf = pasemi_write_buf;
+ chip->legacy.chip_delay = 0;
chip->ecc.mode = NAND_ECC_SOFT;
chip->ecc.algo = NAND_ECC_HAMMING;
@@ -156,7 +151,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
chip->bbt_options = NAND_BBT_USE_FLASH;
/* Scan to find existence of the device */
- err = nand_scan(pasemi_nand_mtd, 1);
+ err = nand_scan(chip, 1);
if (err)
goto out_lpc;
@@ -174,7 +169,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
out_lpc:
release_region(lpcctl, 4);
out_ior:
- iounmap(chip->IO_ADDR_R);
+ iounmap(chip->legacy.IO_ADDR_R);
out_mtd:
kfree(chip);
out:
@@ -191,11 +186,11 @@ static int pasemi_nand_remove(struct platform_device *ofdev)
chip = mtd_to_nand(pasemi_nand_mtd);
/* Release resources, unregister device */
- nand_release(pasemi_nand_mtd);
+ nand_release(chip);
release_region(lpcctl, 4);
- iounmap(chip->IO_ADDR_R);
+ iounmap(chip->legacy.IO_ADDR_R);
/* Free the MTD device structure */
kfree(chip);
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
index 222626df4b96..86c536ddaf24 100644
--- a/drivers/mtd/nand/raw/plat_nand.c
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -15,8 +15,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
+#include <linux/mtd/platnand.h>
struct plat_nand_data {
struct nand_chip chip;
@@ -60,14 +59,14 @@ static int plat_nand_probe(struct platform_device *pdev)
mtd = nand_to_mtd(&data->chip);
mtd->dev.parent = &pdev->dev;
- data->chip.IO_ADDR_R = data->io_base;
- data->chip.IO_ADDR_W = data->io_base;
- data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
- data->chip.dev_ready = pdata->ctrl.dev_ready;
+ data->chip.legacy.IO_ADDR_R = data->io_base;
+ data->chip.legacy.IO_ADDR_W = data->io_base;
+ data->chip.legacy.cmd_ctrl = pdata->ctrl.cmd_ctrl;
+ data->chip.legacy.dev_ready = pdata->ctrl.dev_ready;
data->chip.select_chip = pdata->ctrl.select_chip;
- data->chip.write_buf = pdata->ctrl.write_buf;
- data->chip.read_buf = pdata->ctrl.read_buf;
- data->chip.chip_delay = pdata->chip.chip_delay;
+ data->chip.legacy.write_buf = pdata->ctrl.write_buf;
+ data->chip.legacy.read_buf = pdata->ctrl.read_buf;
+ data->chip.legacy.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
data->chip.bbt_options |= pdata->chip.bbt_options;
@@ -84,7 +83,7 @@ static int plat_nand_probe(struct platform_device *pdev)
}
/* Scan to find existence of the device */
- err = nand_scan(mtd, pdata->chip.nr_chips);
+ err = nand_scan(&data->chip, pdata->chip.nr_chips);
if (err)
goto out;
@@ -97,7 +96,7 @@ static int plat_nand_probe(struct platform_device *pdev)
if (!err)
return err;
- nand_release(mtd);
+ nand_release(&data->chip);
out:
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
@@ -112,7 +111,7 @@ static int plat_nand_remove(struct platform_device *pdev)
struct plat_nand_data *data = platform_get_drvdata(pdev);
struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
- nand_release(nand_to_mtd(&data->chip));
+ nand_release(&data->chip);
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index d1d470bb32e4..ef75dfa62a4f 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -23,7 +23,6 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/dma/qcom_bam_dma.h>
-#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@@ -350,7 +349,8 @@ struct nandc_regs {
* @data_buffer: our local DMA buffer for page read/writes,
* used when we can't use the buffer provided
* by upper layers directly
- * @buf_size/count/start: markers for chip->read_buf/write_buf functions
+ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
+ * functions
* @reg_read_buf: local buffer for reading back registers via DMA
* @reg_read_dma: contains dma address for register read buffer
* @reg_read_pos: marker for data read in reg_read_buf
@@ -1155,8 +1155,8 @@ static void config_nand_cw_write(struct qcom_nand_controller *nandc)
}
/*
- * the following functions are used within chip->cmdfunc() to perform different
- * NAND_CMD_* commands
+ * the following functions are used within chip->legacy.cmdfunc() to
+ * perform different NAND_CMD_* commands
*/
/* sets up descriptors for NAND_CMD_PARAM */
@@ -1436,15 +1436,14 @@ static void post_command(struct qcom_nand_host *host, int command)
}
/*
- * Implements chip->cmdfunc. It's only used for a limited set of commands.
- * The rest of the commands wouldn't be called by upper layers. For example,
- * NAND_CMD_READOOB would never be called because we have our own versions
- * of read_oob ops for nand_ecc_ctrl.
+ * Implements chip->legacy.cmdfunc. It's only used for a limited set of
+ * commands. The rest of the commands wouldn't be called by upper layers.
+ * For example, NAND_CMD_READOOB would never be called because we have our own
+ * versions of read_oob ops for nand_ecc_ctrl.
*/
-static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
+static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
int column, int page_addr)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
@@ -1949,8 +1948,8 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
}
/* implements ecc->read_page() */
-static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
@@ -1966,10 +1965,10 @@ static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
}
/* implements ecc->read_page_raw() */
-static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf,
+static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int cw, ret;
@@ -1989,8 +1988,7 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
}
/* implements ecc->read_oob() */
-static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
@@ -2007,8 +2005,8 @@ static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
}
/* implements ecc->write_page() */
-static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
@@ -2077,10 +2075,11 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
}
/* implements ecc->write_page_raw() */
-static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf,
- int oob_required, int page)
+static int qcom_nandc_write_page_raw(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -2155,9 +2154,9 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
* since ECC is calculated for the combined codeword. So update the OOB from
* chip->oob_poi, and pad the data area with OxFF before writing.
*/
-static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -2197,9 +2196,9 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
return nand_prog_page_end_op(chip);
}
-static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
+static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -2235,9 +2234,8 @@ err:
return bad;
}
-static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
+static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -2278,14 +2276,13 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
/*
- * the three functions below implement chip->read_byte(), chip->read_buf()
- * and chip->write_buf() respectively. these aren't used for
- * reading/writing page data, they are used for smaller data like reading
- * id, status etc
+ * the three functions below implement chip->legacy.read_byte(),
+ * chip->legacy.read_buf() and chip->legacy.write_buf() respectively. these
+ * aren't used for reading/writing page data, they are used for smaller data
+ * like reading id, status etc
*/
-static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
+static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
u8 *buf = nandc->data_buffer;
@@ -2305,9 +2302,8 @@ static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
return ret;
}
-static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
@@ -2315,10 +2311,9 @@ static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
nandc->buf_start += real_len;
}
-static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
int len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
@@ -2328,9 +2323,8 @@ static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
}
/* we support only one external chip for now */
-static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
+static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
if (chipnr <= 0)
@@ -2809,13 +2803,13 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
- chip->cmdfunc = qcom_nandc_command;
+ chip->legacy.cmdfunc = qcom_nandc_command;
chip->select_chip = qcom_nandc_select_chip;
- chip->read_byte = qcom_nandc_read_byte;
- chip->read_buf = qcom_nandc_read_buf;
- chip->write_buf = qcom_nandc_write_buf;
- chip->set_features = nand_get_set_features_notsupp;
- chip->get_features = nand_get_set_features_notsupp;
+ chip->legacy.read_byte = qcom_nandc_read_byte;
+ chip->legacy.read_buf = qcom_nandc_read_buf;
+ chip->legacy.write_buf = qcom_nandc_write_buf;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
/*
* the bad block marker is readable only when we read the last codeword
@@ -2825,8 +2819,8 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
* and block_markbad helpers until we permanently switch to using
* MTD_OPS_RAW for all drivers (with the help of badblockbits)
*/
- chip->block_bad = qcom_nandc_block_bad;
- chip->block_markbad = qcom_nandc_block_markbad;
+ chip->legacy.block_bad = qcom_nandc_block_bad;
+ chip->legacy.block_markbad = qcom_nandc_block_markbad;
chip->controller = &nandc->controller;
chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
@@ -2835,7 +2829,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
/* set up initial status value */
host->status = NAND_STATUS_READY | NAND_STATUS_WP;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
return ret;
@@ -3000,7 +2994,7 @@ static int qcom_nandc_remove(struct platform_device *pdev)
struct qcom_nand_host *host;
list_for_each_entry(host, &nandc->host_list, node)
- nand_release(nand_to_mtd(&host->chip));
+ nand_release(&host->chip);
qcom_nandc_unalloc(nandc);
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index dcdeb0660e5e..39be65b35ac2 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -232,9 +232,9 @@ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
/*
* Program data lines of the nand chip to send data to it
*/
-static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void r852_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
- struct r852_device *dev = r852_get_dev(mtd);
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
uint32_t reg;
/* Don't allow any access to hardware if we suspect card removal */
@@ -266,9 +266,9 @@ static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
/*
* Read data lines of the nand chip to retrieve data
*/
-static void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void r852_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
- struct r852_device *dev = r852_get_dev(mtd);
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
uint32_t reg;
if (dev->card_unstable) {
@@ -303,9 +303,9 @@ static void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
/*
* Read one byte from nand chip
*/
-static uint8_t r852_read_byte(struct mtd_info *mtd)
+static uint8_t r852_read_byte(struct nand_chip *chip)
{
- struct r852_device *dev = r852_get_dev(mtd);
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
/* Same problem as in r852_read_buf.... */
if (dev->card_unstable)
@@ -317,9 +317,9 @@ static uint8_t r852_read_byte(struct mtd_info *mtd)
/*
* Control several chip lines & send commands
*/
-static void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+static void r852_cmdctl(struct nand_chip *chip, int dat, unsigned int ctrl)
{
- struct r852_device *dev = r852_get_dev(mtd);
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
if (dev->card_unstable)
return;
@@ -362,7 +362,7 @@ static void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
* Wait till card is ready.
* based on nand_wait, but returns errors on DMA error
*/
-static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
+static int r852_wait(struct nand_chip *chip)
{
struct r852_device *dev = nand_get_controller_data(chip);
@@ -373,7 +373,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
msecs_to_jiffies(400) : msecs_to_jiffies(20));
while (time_before(jiffies, timeout))
- if (chip->dev_ready(mtd))
+ if (chip->legacy.dev_ready(chip))
break;
nand_status_op(chip, &status);
@@ -390,9 +390,9 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
* Check if card is ready
*/
-static int r852_ready(struct mtd_info *mtd)
+static int r852_ready(struct nand_chip *chip)
{
- struct r852_device *dev = r852_get_dev(mtd);
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
}
@@ -401,9 +401,9 @@ static int r852_ready(struct mtd_info *mtd)
* Set ECC engine mode
*/
-static void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
+static void r852_ecc_hwctl(struct nand_chip *chip, int mode)
{
- struct r852_device *dev = r852_get_dev(mtd);
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
if (dev->card_unstable)
return;
@@ -433,10 +433,10 @@ static void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
* Calculate ECC, only used for writes
*/
-static int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
- uint8_t *ecc_code)
+static int r852_ecc_calculate(struct nand_chip *chip, const uint8_t *dat,
+ uint8_t *ecc_code)
{
- struct r852_device *dev = r852_get_dev(mtd);
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
struct sm_oob *oob = (struct sm_oob *)ecc_code;
uint32_t ecc1, ecc2;
@@ -465,14 +465,14 @@ static int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
* Correct the data using ECC, hw did almost everything for us
*/
-static int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
- uint8_t *read_ecc, uint8_t *calc_ecc)
+static int r852_ecc_correct(struct nand_chip *chip, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
{
uint32_t ecc_reg;
uint8_t ecc_status, err_byte;
int i, error = 0;
- struct r852_device *dev = r852_get_dev(mtd);
+ struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
if (dev->card_unstable)
return 0;
@@ -521,9 +521,10 @@ exit:
* This is copy of nand_read_oob_std
* nand_read_oob_syndrome assumes we can send column address - we can't
*/
-static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int r852_read_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
@@ -636,7 +637,7 @@ static int r852_register_nand_device(struct r852_device *dev)
{
struct mtd_info *mtd = nand_to_mtd(dev->chip);
- WARN_ON(dev->card_registred);
+ WARN_ON(dev->card_registered);
mtd->dev.parent = &dev->pci_dev->dev;
@@ -653,10 +654,10 @@ static int r852_register_nand_device(struct r852_device *dev)
goto error3;
}
- dev->card_registred = 1;
+ dev->card_registered = 1;
return 0;
error3:
- nand_release(mtd);
+ nand_release(dev->chip);
error1:
/* Force card redetect */
dev->card_detected = 0;
@@ -671,13 +672,13 @@ static void r852_unregister_nand_device(struct r852_device *dev)
{
struct mtd_info *mtd = nand_to_mtd(dev->chip);
- if (!dev->card_registred)
+ if (!dev->card_registered)
return;
device_remove_file(&mtd->dev, &dev_attr_media_type);
- nand_release(mtd);
+ nand_release(dev->chip);
r852_engine_disable(dev);
- dev->card_registred = 0;
+ dev->card_registered = 0;
}
/* Card state updater */
@@ -691,7 +692,7 @@ static void r852_card_detect_work(struct work_struct *work)
dev->card_unstable = 0;
/* False alarm */
- if (dev->card_detected == dev->card_registred)
+ if (dev->card_detected == dev->card_registered)
goto exit;
/* Read media properties */
@@ -852,14 +853,14 @@ static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
goto error4;
/* commands */
- chip->cmd_ctrl = r852_cmdctl;
- chip->waitfunc = r852_wait;
- chip->dev_ready = r852_ready;
+ chip->legacy.cmd_ctrl = r852_cmdctl;
+ chip->legacy.waitfunc = r852_wait;
+ chip->legacy.dev_ready = r852_ready;
/* I/O */
- chip->read_byte = r852_read_byte;
- chip->read_buf = r852_read_buf;
- chip->write_buf = r852_write_buf;
+ chip->legacy.read_byte = r852_read_byte;
+ chip->legacy.read_buf = r852_read_buf;
+ chip->legacy.write_buf = r852_write_buf;
/* ecc */
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
@@ -1025,7 +1026,6 @@ static int r852_suspend(struct device *device)
static int r852_resume(struct device *device)
{
struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
- struct mtd_info *mtd = nand_to_mtd(dev->chip);
r852_disable_irqs(dev);
r852_card_update_present(dev);
@@ -1033,7 +1033,7 @@ static int r852_resume(struct device *device)
/* If card status changed, just do the work */
- if (dev->card_detected != dev->card_registred) {
+ if (dev->card_detected != dev->card_registered) {
dbg("card was %s during low power state",
dev->card_detected ? "added" : "removed");
@@ -1043,11 +1043,11 @@ static int r852_resume(struct device *device)
}
/* Otherwise, initialize the card */
- if (dev->card_registred) {
+ if (dev->card_registered) {
r852_engine_enable(dev);
- dev->chip->select_chip(mtd, 0);
+ dev->chip->select_chip(dev->chip, 0);
nand_reset_op(dev->chip);
- dev->chip->select_chip(mtd, -1);
+ dev->chip->select_chip(dev->chip, -1);
}
/* Program card detection IRQ */
diff --git a/drivers/mtd/nand/raw/r852.h b/drivers/mtd/nand/raw/r852.h
index 1eed2fc2fa42..bc67f5bf67e8 100644
--- a/drivers/mtd/nand/raw/r852.h
+++ b/drivers/mtd/nand/raw/r852.h
@@ -129,7 +129,7 @@ struct r852_device {
/* card status area */
struct delayed_work card_detect_work;
struct workqueue_struct *card_workqueue;
- int card_registred; /* card registered with mtd */
+ int card_registered; /* card registered with mtd */
int card_detected; /* card detected in slot */
int card_unstable; /* whenever the card is inserted,
is not known yet */
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index c21e8892394a..d2e42e9d0e8c 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -404,7 +404,7 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
/**
* s3c2410_nand_select_chip - select the given nand chip
- * @mtd: The MTD instance for this chip.
+ * @this: NAND chip object.
* @chip: The chip number.
*
* This is called by the MTD layer to either select a given chip for the
@@ -415,11 +415,10 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
* platform specific selection code is called to route nFCE to the specific
* chip.
*/
-static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
+static void s3c2410_nand_select_chip(struct nand_chip *this, int chip)
{
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
- struct nand_chip *this = mtd_to_nand(mtd);
unsigned long cur;
nmtd = nand_get_controller_data(this);
@@ -457,9 +456,10 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
* Issue command and address cycles to the chip
*/
-static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+static void s3c2410_nand_hwcontrol(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
if (cmd == NAND_CMD_NONE)
@@ -473,9 +473,10 @@ static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd,
/* command and control functions */
-static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+static void s3c2440_nand_hwcontrol(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
if (cmd == NAND_CMD_NONE)
@@ -492,29 +493,33 @@ static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd,
* returns 0 if the nand is busy, 1 if it is ready
*/
-static int s3c2410_nand_devready(struct mtd_info *mtd)
+static int s3c2410_nand_devready(struct nand_chip *chip)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
}
-static int s3c2440_nand_devready(struct mtd_info *mtd)
+static int s3c2440_nand_devready(struct nand_chip *chip)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
}
-static int s3c2412_nand_devready(struct mtd_info *mtd)
+static int s3c2412_nand_devready(struct nand_chip *chip)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
}
/* ECC handling functions */
-static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+static int s3c2410_nand_correct_data(struct nand_chip *chip, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
unsigned int diff0, diff1, diff2;
unsigned int bit, byte;
@@ -591,38 +596,42 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
* generator block to ECC the data as it passes through]
*/
-static void s3c2410_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+static void s3c2410_nand_enable_hwecc(struct nand_chip *chip, int mode)
{
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ struct s3c2410_nand_info *info;
unsigned long ctrl;
+ info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
ctrl = readl(info->regs + S3C2410_NFCONF);
ctrl |= S3C2410_NFCONF_INITECC;
writel(ctrl, info->regs + S3C2410_NFCONF);
}
-static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+static void s3c2412_nand_enable_hwecc(struct nand_chip *chip, int mode)
{
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ struct s3c2410_nand_info *info;
unsigned long ctrl;
+ info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
ctrl = readl(info->regs + S3C2440_NFCONT);
writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
info->regs + S3C2440_NFCONT);
}
-static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+static void s3c2440_nand_enable_hwecc(struct nand_chip *chip, int mode)
{
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ struct s3c2410_nand_info *info;
unsigned long ctrl;
+ info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
ctrl = readl(info->regs + S3C2440_NFCONT);
writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
}
-static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code)
+static int s3c2410_nand_calculate_ecc(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0);
@@ -634,9 +643,10 @@ static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
return 0;
}
-static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code)
+static int s3c2412_nand_calculate_ecc(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
@@ -649,9 +659,10 @@ static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
return 0;
}
-static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code)
+static int s3c2440_nand_calculate_ecc(struct nand_chip *chip,
+ const u_char *dat, u_char *ecc_code)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
@@ -668,14 +679,14 @@ static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
* use read/write block to move the data buffers to/from the controller
*/
-static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void s3c2410_nand_read_buf(struct nand_chip *this, u_char *buf, int len)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- readsb(this->IO_ADDR_R, buf, len);
+ readsb(this->legacy.IO_ADDR_R, buf, len);
}
-static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void s3c2440_nand_read_buf(struct nand_chip *this, u_char *buf, int len)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
@@ -689,16 +700,16 @@ static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
}
}
-static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
+static void s3c2410_nand_write_buf(struct nand_chip *this, const u_char *buf,
int len)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- writesb(this->IO_ADDR_W, buf, len);
+ writesb(this->legacy.IO_ADDR_W, buf, len);
}
-static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
+static void s3c2440_nand_write_buf(struct nand_chip *this, const u_char *buf,
int len)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
@@ -781,7 +792,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
- nand_release(nand_to_mtd(&ptr->chip));
+ nand_release(&ptr->chip);
}
}
@@ -809,9 +820,10 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
return -ENODEV;
}
-static int s3c2410_nand_setup_data_interface(struct mtd_info *mtd, int csline,
+static int s3c2410_nand_setup_data_interface(struct nand_chip *chip, int csline,
const struct nand_data_interface *conf)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
struct s3c2410_platform_nand *pdata = info->platform;
const struct nand_sdr_timings *timings;
@@ -852,10 +864,10 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
nand_set_flash_node(chip, set->of_node);
- chip->write_buf = s3c2410_nand_write_buf;
- chip->read_buf = s3c2410_nand_read_buf;
+ chip->legacy.write_buf = s3c2410_nand_write_buf;
+ chip->legacy.read_buf = s3c2410_nand_read_buf;
chip->select_chip = s3c2410_nand_select_chip;
- chip->chip_delay = 50;
+ chip->legacy.chip_delay = 50;
nand_set_controller_data(chip, nmtd);
chip->options = set->options;
chip->controller = &info->controller;
@@ -869,29 +881,29 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
switch (info->cpu_type) {
case TYPE_S3C2410:
- chip->IO_ADDR_W = regs + S3C2410_NFDATA;
+ chip->legacy.IO_ADDR_W = regs + S3C2410_NFDATA;
info->sel_reg = regs + S3C2410_NFCONF;
info->sel_bit = S3C2410_NFCONF_nFCE;
- chip->cmd_ctrl = s3c2410_nand_hwcontrol;
- chip->dev_ready = s3c2410_nand_devready;
+ chip->legacy.cmd_ctrl = s3c2410_nand_hwcontrol;
+ chip->legacy.dev_ready = s3c2410_nand_devready;
break;
case TYPE_S3C2440:
- chip->IO_ADDR_W = regs + S3C2440_NFDATA;
+ chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA;
info->sel_reg = regs + S3C2440_NFCONT;
info->sel_bit = S3C2440_NFCONT_nFCE;
- chip->cmd_ctrl = s3c2440_nand_hwcontrol;
- chip->dev_ready = s3c2440_nand_devready;
- chip->read_buf = s3c2440_nand_read_buf;
- chip->write_buf = s3c2440_nand_write_buf;
+ chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->legacy.dev_ready = s3c2440_nand_devready;
+ chip->legacy.read_buf = s3c2440_nand_read_buf;
+ chip->legacy.write_buf = s3c2440_nand_write_buf;
break;
case TYPE_S3C2412:
- chip->IO_ADDR_W = regs + S3C2440_NFDATA;
+ chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA;
info->sel_reg = regs + S3C2440_NFCONT;
info->sel_bit = S3C2412_NFCONT_nFCE0;
- chip->cmd_ctrl = s3c2440_nand_hwcontrol;
- chip->dev_ready = s3c2412_nand_devready;
+ chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->legacy.dev_ready = s3c2412_nand_devready;
if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
dev_info(info->device, "System booted from NAND\n");
@@ -899,7 +911,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
break;
}
- chip->IO_ADDR_R = chip->IO_ADDR_W;
+ chip->legacy.IO_ADDR_R = chip->legacy.IO_ADDR_W;
nmtd->info = info;
nmtd->set = set;
@@ -1170,7 +1182,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
s3c2410_nand_init_chip(info, nmtd, sets);
- err = nand_scan(mtd, sets ? sets->nr_chips : 1);
+ err = nand_scan(&nmtd->chip, sets ? sets->nr_chips : 1);
if (err)
goto exit_error;
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index bb8866e05ff7..4d20d033de7b 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -480,7 +480,7 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
/* initiate DMA transfer */
if (flctl->chan_fifo0_rx && rlen >= 32 &&
- flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0)
goto convert; /* DMA success */
/* do polling transfer */
@@ -539,7 +539,7 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
/* initiate DMA transfer */
if (flctl->chan_fifo0_tx && rlen >= 32 &&
- flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0)
return; /* DMA success */
/* do polling transfer */
@@ -611,21 +611,24 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
writel(flcmcdr_val, FLCMCDR(flctl));
}
-static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int flctl_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
return 0;
}
-static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
+static int flctl_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
return nand_prog_page_end_op(chip);
}
@@ -747,9 +750,10 @@ static void execmd_write_oob(struct mtd_info *mtd)
}
}
-static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
+static void flctl_cmdfunc(struct nand_chip *chip, unsigned int command,
int column, int page_addr)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct sh_flctl *flctl = mtd_to_flctl(mtd);
uint32_t read_cmd = 0;
@@ -923,9 +927,9 @@ runtime_exit:
return;
}
-static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
+static void flctl_select_chip(struct nand_chip *chip, int chipnr)
{
- struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
int ret;
switch (chipnr) {
@@ -967,17 +971,17 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
}
}
-static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void flctl_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
{
- struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
memcpy(&flctl->done_buff[flctl->index], buf, len);
flctl->index += len;
}
-static uint8_t flctl_read_byte(struct mtd_info *mtd)
+static uint8_t flctl_read_byte(struct nand_chip *chip)
{
- struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
uint8_t data;
data = flctl->done_buff[flctl->index];
@@ -985,18 +989,9 @@ static uint8_t flctl_read_byte(struct mtd_info *mtd)
return data;
}
-static uint16_t flctl_read_word(struct mtd_info *mtd)
+static void flctl_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
- struct sh_flctl *flctl = mtd_to_flctl(mtd);
- uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
-
- flctl->index += 2;
- return *buf;
-}
-
-static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
- struct sh_flctl *flctl = mtd_to_flctl(mtd);
+ struct sh_flctl *flctl = mtd_to_flctl(nand_to_mtd(chip));
memcpy(buf, &flctl->done_buff[flctl->index], len);
flctl->index += len;
@@ -1183,16 +1178,15 @@ static int flctl_probe(struct platform_device *pdev)
/* Set address of hardware control function */
/* 20 us command delay time */
- nand->chip_delay = 20;
+ nand->legacy.chip_delay = 20;
- nand->read_byte = flctl_read_byte;
- nand->read_word = flctl_read_word;
- nand->write_buf = flctl_write_buf;
- nand->read_buf = flctl_read_buf;
+ nand->legacy.read_byte = flctl_read_byte;
+ nand->legacy.write_buf = flctl_write_buf;
+ nand->legacy.read_buf = flctl_read_buf;
nand->select_chip = flctl_select_chip;
- nand->cmdfunc = flctl_cmdfunc;
- nand->set_features = nand_get_set_features_notsupp;
- nand->get_features = nand_get_set_features_notsupp;
+ nand->legacy.cmdfunc = flctl_cmdfunc;
+ nand->legacy.set_features = nand_get_set_features_notsupp;
+ nand->legacy.get_features = nand_get_set_features_notsupp;
if (pdata->flcmncr_val & SEL_16BIT)
nand->options |= NAND_BUSWIDTH_16;
@@ -1203,7 +1197,7 @@ static int flctl_probe(struct platform_device *pdev)
flctl_setup_dma(flctl);
nand->dummy_controller.ops = &flctl_nand_controller_ops;
- ret = nand_scan(flctl_mtd, 1);
+ ret = nand_scan(nand, 1);
if (ret)
goto err_chip;
@@ -1226,7 +1220,7 @@ static int flctl_remove(struct platform_device *pdev)
struct sh_flctl *flctl = platform_get_drvdata(pdev);
flctl_release_dma(flctl);
- nand_release(nand_to_mtd(&flctl->chip));
+ nand_release(&flctl->chip);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index fc171b17a39b..c82f26c8b58c 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -59,11 +59,10 @@ static inline struct sharpsl_nand *mtd_to_sharpsl(struct mtd_info *mtd)
* NAND_ALE: bit 2 -> bit 2
*
*/
-static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+static void sharpsl_nand_hwcontrol(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
- struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
if (ctrl & NAND_CTRL_CHANGE) {
unsigned char bits = ctrl & 0x07;
@@ -76,24 +75,25 @@ static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
}
if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
+ writeb(cmd, chip->legacy.IO_ADDR_W);
}
-static int sharpsl_nand_dev_ready(struct mtd_info *mtd)
+static int sharpsl_nand_dev_ready(struct nand_chip *chip)
{
- struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0);
}
-static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+static void sharpsl_nand_enable_hwecc(struct nand_chip *chip, int mode)
{
- struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
writeb(0, sharpsl->io + ECCCLRR);
}
-static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code)
+static int sharpsl_nand_calculate_ecc(struct nand_chip *chip,
+ const u_char * dat, u_char * ecc_code)
{
- struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+ struct sharpsl_nand *sharpsl = mtd_to_sharpsl(nand_to_mtd(chip));
ecc_code[0] = ~readb(sharpsl->io + ECCLPUB);
ecc_code[1] = ~readb(sharpsl->io + ECCLPLB);
ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03;
@@ -153,13 +153,13 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL);
/* Set address of NAND IO lines */
- this->IO_ADDR_R = sharpsl->io + FLASHIO;
- this->IO_ADDR_W = sharpsl->io + FLASHIO;
+ this->legacy.IO_ADDR_R = sharpsl->io + FLASHIO;
+ this->legacy.IO_ADDR_W = sharpsl->io + FLASHIO;
/* Set address of hardware control function */
- this->cmd_ctrl = sharpsl_nand_hwcontrol;
- this->dev_ready = sharpsl_nand_dev_ready;
+ this->legacy.cmd_ctrl = sharpsl_nand_hwcontrol;
+ this->legacy.dev_ready = sharpsl_nand_dev_ready;
/* 15 us command delay time */
- this->chip_delay = 15;
+ this->legacy.chip_delay = 15;
/* set eccmode using hardware ECC */
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = 256;
@@ -171,7 +171,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
this->ecc.correct = nand_correct_data;
/* Scan to find existence of the device */
- err = nand_scan(mtd, 1);
+ err = nand_scan(this, 1);
if (err)
goto err_scan;
@@ -187,7 +187,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
return 0;
err_add:
- nand_release(mtd);
+ nand_release(this);
err_scan:
iounmap(sharpsl->io);
@@ -205,7 +205,7 @@ static int sharpsl_nand_remove(struct platform_device *pdev)
struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
/* Release resources, unregister device */
- nand_release(nand_to_mtd(&sharpsl->chip));
+ nand_release(&sharpsl->chip);
iounmap(sharpsl->io);
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index 73aafe8c3ef3..6f063ef57640 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -99,8 +99,9 @@ static const struct mtd_ooblayout_ops oob_sm_small_ops = {
.free = oob_sm_small_ooblayout_free,
};
-static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
+static int sm_block_markbad(struct nand_chip *chip, loff_t ofs)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct mtd_oob_ops ops;
struct sm_oob oob;
int ret;
@@ -167,7 +168,7 @@ static int sm_attach_chip(struct nand_chip *chip)
/* Bad block marker position */
chip->badblockpos = 0x05;
chip->badblockbits = 7;
- chip->block_markbad = sm_block_markbad;
+ chip->legacy.block_markbad = sm_block_markbad;
/* ECC layout */
if (mtd->writesize == SM_SECTOR_SIZE)
@@ -195,7 +196,7 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia)
/* Scan for card properties */
chip->dummy_controller.ops = &sm_controller_ops;
flash_ids = smartmedia ? nand_smartmedia_flash_ids : nand_xd_flash_ids;
- ret = nand_scan_with_ids(mtd, 1, flash_ids);
+ ret = nand_scan_with_ids(chip, 1, flash_ids);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
index 9824a9923583..8be9a50c7880 100644
--- a/drivers/mtd/nand/raw/socrates_nand.c
+++ b/drivers/mtd/nand/raw/socrates_nand.c
@@ -34,15 +34,14 @@ struct socrates_nand_host {
/**
* socrates_nand_write_buf - write buffer to chip
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: data buffer
* @len: number of bytes to write
*/
-static void socrates_nand_write_buf(struct mtd_info *mtd,
- const uint8_t *buf, int len)
+static void socrates_nand_write_buf(struct nand_chip *this, const uint8_t *buf,
+ int len)
{
int i;
- struct nand_chip *this = mtd_to_nand(mtd);
struct socrates_nand_host *host = nand_get_controller_data(this);
for (i = 0; i < len; i++) {
@@ -54,14 +53,14 @@ static void socrates_nand_write_buf(struct mtd_info *mtd,
/**
* socrates_nand_read_buf - read chip data into buffer
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: buffer to store date
* @len: number of bytes to read
*/
-static void socrates_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void socrates_nand_read_buf(struct nand_chip *this, uint8_t *buf,
+ int len)
{
int i;
- struct nand_chip *this = mtd_to_nand(mtd);
struct socrates_nand_host *host = nand_get_controller_data(this);
uint32_t val;
@@ -78,31 +77,19 @@ static void socrates_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
* socrates_nand_read_byte - read one byte from the chip
* @mtd: MTD device structure
*/
-static uint8_t socrates_nand_read_byte(struct mtd_info *mtd)
+static uint8_t socrates_nand_read_byte(struct nand_chip *this)
{
uint8_t byte;
- socrates_nand_read_buf(mtd, &byte, sizeof(byte));
+ socrates_nand_read_buf(this, &byte, sizeof(byte));
return byte;
}
-/**
- * socrates_nand_read_word - read one word from the chip
- * @mtd: MTD device structure
- */
-static uint16_t socrates_nand_read_word(struct mtd_info *mtd)
-{
- uint16_t word;
- socrates_nand_read_buf(mtd, (uint8_t *)&word, sizeof(word));
- return word;
-}
-
/*
* Hardware specific access to control-lines
*/
-static void socrates_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
+static void socrates_nand_cmd_ctrl(struct nand_chip *nand_chip, int cmd,
+ unsigned int ctrl)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct socrates_nand_host *host = nand_get_controller_data(nand_chip);
uint32_t val;
@@ -125,9 +112,8 @@ static void socrates_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
/*
* Read the Device Ready pin.
*/
-static int socrates_nand_device_ready(struct mtd_info *mtd)
+static int socrates_nand_device_ready(struct nand_chip *nand_chip)
{
- struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct socrates_nand_host *host = nand_get_controller_data(nand_chip);
if (in_be32(host->io_base) & FPGA_NAND_BUSY)
@@ -166,26 +152,21 @@ static int socrates_nand_probe(struct platform_device *ofdev)
mtd->name = "socrates_nand";
mtd->dev.parent = &ofdev->dev;
- /*should never be accessed directly */
- nand_chip->IO_ADDR_R = (void *)0xdeadbeef;
- nand_chip->IO_ADDR_W = (void *)0xdeadbeef;
-
- nand_chip->cmd_ctrl = socrates_nand_cmd_ctrl;
- nand_chip->read_byte = socrates_nand_read_byte;
- nand_chip->read_word = socrates_nand_read_word;
- nand_chip->write_buf = socrates_nand_write_buf;
- nand_chip->read_buf = socrates_nand_read_buf;
- nand_chip->dev_ready = socrates_nand_device_ready;
+ nand_chip->legacy.cmd_ctrl = socrates_nand_cmd_ctrl;
+ nand_chip->legacy.read_byte = socrates_nand_read_byte;
+ nand_chip->legacy.write_buf = socrates_nand_write_buf;
+ nand_chip->legacy.read_buf = socrates_nand_read_buf;
+ nand_chip->legacy.dev_ready = socrates_nand_device_ready;
nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
nand_chip->ecc.algo = NAND_ECC_HAMMING;
/* TODO: I have no idea what real delay is. */
- nand_chip->chip_delay = 20; /* 20us command delay time */
+ nand_chip->legacy.chip_delay = 20; /* 20us command delay time */
dev_set_drvdata(&ofdev->dev, host);
- res = nand_scan(mtd, 1);
+ res = nand_scan(nand_chip, 1);
if (res)
goto out;
@@ -193,7 +174,7 @@ static int socrates_nand_probe(struct platform_device *ofdev)
if (!res)
return res;
- nand_release(mtd);
+ nand_release(nand_chip);
out:
iounmap(host->io_base);
@@ -206,9 +187,8 @@ out:
static int socrates_nand_remove(struct platform_device *ofdev)
{
struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
- struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
- nand_release(mtd);
+ nand_release(&host->nand_chip);
iounmap(host->io_base);
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 1f0b7ee38df5..51b1a548064b 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -400,9 +400,8 @@ static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
nfc->regs + NFC_REG_CTL);
}
-static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
+static int sunxi_nfc_dev_ready(struct nand_chip *nand)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
u32 mask;
@@ -420,9 +419,9 @@ static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
return !!(readl(nfc->regs + NFC_REG_ST) & mask);
}
-static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
+static void sunxi_nfc_select_chip(struct nand_chip *nand, int chip)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(nand);
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
struct sunxi_nand_chip_sel *sel;
@@ -443,9 +442,9 @@ static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
NFC_PAGE_SHIFT(nand->page_shift);
if (sel->rb < 0) {
- nand->dev_ready = NULL;
+ nand->legacy.dev_ready = NULL;
} else {
- nand->dev_ready = sunxi_nfc_dev_ready;
+ nand->legacy.dev_ready = sunxi_nfc_dev_ready;
ctl |= NFC_RB_SEL(sel->rb);
}
@@ -464,9 +463,8 @@ static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
sunxi_nand->selected = chip;
}
-static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void sunxi_nfc_read_buf(struct nand_chip *nand, uint8_t *buf, int len)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
int ret;
@@ -502,10 +500,9 @@ static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
}
}
-static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+static void sunxi_nfc_write_buf(struct nand_chip *nand, const uint8_t *buf,
int len)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
int ret;
@@ -540,19 +537,18 @@ static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
}
}
-static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
+static uint8_t sunxi_nfc_read_byte(struct nand_chip *nand)
{
uint8_t ret = 0;
- sunxi_nfc_read_buf(mtd, &ret, 1);
+ sunxi_nfc_read_buf(nand, &ret, 1);
return ret;
}
-static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
+static void sunxi_nfc_cmd_ctrl(struct nand_chip *nand, int dat,
unsigned int ctrl)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
int ret;
@@ -761,7 +757,7 @@ static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
{
sunxi_nfc_randomizer_config(mtd, page, ecc);
sunxi_nfc_randomizer_enable(mtd);
- sunxi_nfc_write_buf(mtd, buf, len);
+ sunxi_nfc_write_buf(mtd_to_nand(mtd), buf, len);
sunxi_nfc_randomizer_disable(mtd);
}
@@ -770,7 +766,7 @@ static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
{
sunxi_nfc_randomizer_config(mtd, page, ecc);
sunxi_nfc_randomizer_enable(mtd);
- sunxi_nfc_read_buf(mtd, buf, len);
+ sunxi_nfc_read_buf(mtd_to_nand(mtd), buf, len);
sunxi_nfc_randomizer_disable(mtd);
}
@@ -995,7 +991,7 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
false);
if (!randomize)
- sunxi_nfc_read_buf(mtd, oob + offset, len);
+ sunxi_nfc_read_buf(nand, oob + offset, len);
else
sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
false, page);
@@ -1189,10 +1185,10 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
*cur_off = mtd->oobsize + mtd->writesize;
}
-static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf,
+static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
unsigned int max_bitflips = 0;
int ret, i, cur_off = 0;
@@ -1227,10 +1223,10 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
return max_bitflips;
}
-static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
- struct nand_chip *chip, u8 *buf,
+static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
nand_read_page_op(chip, page, 0, NULL, 0);
@@ -1241,14 +1237,14 @@ static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
return ret;
/* Fallback to PIO mode */
- return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
+ return sunxi_nfc_hw_ecc_read_page(chip, buf, oob_required, page);
}
-static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *chip,
u32 data_offs, u32 readlen,
u8 *bufpoi, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
unsigned int max_bitflips = 0;
@@ -1278,11 +1274,11 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
return max_bitflips;
}
-static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *chip,
u32 data_offs, u32 readlen,
u8 *buf, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
int ret;
@@ -1293,15 +1289,15 @@ static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
return ret;
/* Fallback to PIO mode */
- return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
+ return sunxi_nfc_hw_ecc_read_subpage(chip, data_offs, readlen,
buf, page);
}
-static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
@@ -1331,12 +1327,12 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
return nand_prog_page_end_op(chip);
}
-static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *chip,
u32 data_offs, u32 data_len,
const u8 *buf, int oob_required,
int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
@@ -1363,12 +1359,12 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
return nand_prog_page_end_op(chip);
}
-static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip,
const u8 *buf,
int oob_required,
int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
@@ -1425,28 +1421,25 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
return nand_prog_page_end_op(chip);
pio_fallback:
- return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
+ return sunxi_nfc_hw_ecc_write_page(chip, buf, oob_required, page);
}
-static int sunxi_nfc_hw_ecc_read_oob(struct mtd_info *mtd,
- struct nand_chip *chip,
- int page)
+static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *chip, int page)
{
chip->pagebuf = -1;
- return chip->ecc.read_page(mtd, chip, chip->data_buf, 1, page);
+ return chip->ecc.read_page(chip, chip->data_buf, 1, page);
}
-static int sunxi_nfc_hw_ecc_write_oob(struct mtd_info *mtd,
- struct nand_chip *chip,
- int page)
+static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
chip->pagebuf = -1;
memset(chip->data_buf, 0xff, mtd->writesize);
- ret = chip->ecc.write_page(mtd, chip, chip->data_buf, 1, page);
+ ret = chip->ecc.write_page(chip, chip->data_buf, 1, page);
if (ret)
return ret;
@@ -1475,10 +1468,9 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
#define sunxi_nand_lookup_timing(l, p, c) \
_sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
-static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
+static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline,
const struct nand_data_interface *conf)
{
- struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nand_chip *chip = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
const struct nand_sdr_timings *timings;
@@ -1920,7 +1912,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
nand = &chip->nand;
/* Default tR value specified in the ONFI spec (chapter 4.15.1) */
- nand->chip_delay = 200;
+ nand->legacy.chip_delay = 200;
nand->controller = &nfc->controller;
nand->controller->ops = &sunxi_nand_controller_ops;
@@ -1931,23 +1923,23 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
nand->ecc.mode = NAND_ECC_HW;
nand_set_flash_node(nand, np);
nand->select_chip = sunxi_nfc_select_chip;
- nand->cmd_ctrl = sunxi_nfc_cmd_ctrl;
- nand->read_buf = sunxi_nfc_read_buf;
- nand->write_buf = sunxi_nfc_write_buf;
- nand->read_byte = sunxi_nfc_read_byte;
+ nand->legacy.cmd_ctrl = sunxi_nfc_cmd_ctrl;
+ nand->legacy.read_buf = sunxi_nfc_read_buf;
+ nand->legacy.write_buf = sunxi_nfc_write_buf;
+ nand->legacy.read_byte = sunxi_nfc_read_byte;
nand->setup_data_interface = sunxi_nfc_setup_data_interface;
mtd = nand_to_mtd(nand);
mtd->dev.parent = dev;
- ret = nand_scan(mtd, nsels);
+ ret = nand_scan(nand, nsels);
if (ret)
return ret;
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register mtd device: %d\n", ret);
- nand_release(mtd);
+ nand_release(nand);
return ret;
}
@@ -1986,7 +1978,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
while (!list_empty(&nfc->chips)) {
chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
node);
- nand_release(nand_to_mtd(&chip->nand));
+ nand_release(&chip->nand);
sunxi_nand_ecc_cleanup(&chip->nand.ecc);
list_del(&chip->node);
}
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index 72698691727d..8818f893f300 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -116,9 +116,9 @@ struct tango_chip {
#define TIMING(t0, t1, t2, t3) ((t0) << 24 | (t1) << 16 | (t2) << 8 | (t3))
-static void tango_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+static void tango_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl)
{
- struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+ struct tango_chip *tchip = to_tango_chip(chip);
if (ctrl & NAND_CLE)
writeb_relaxed(dat, tchip->base + PBUS_CMD);
@@ -127,38 +127,36 @@ static void tango_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
writeb_relaxed(dat, tchip->base + PBUS_ADDR);
}
-static int tango_dev_ready(struct mtd_info *mtd)
+static int tango_dev_ready(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct tango_nfc *nfc = to_tango_nfc(chip->controller);
return readl_relaxed(nfc->pbus_base + PBUS_CS_CTRL) & PBUS_IORDY;
}
-static u8 tango_read_byte(struct mtd_info *mtd)
+static u8 tango_read_byte(struct nand_chip *chip)
{
- struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+ struct tango_chip *tchip = to_tango_chip(chip);
return readb_relaxed(tchip->base + PBUS_DATA);
}
-static void tango_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+static void tango_read_buf(struct nand_chip *chip, u8 *buf, int len)
{
- struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+ struct tango_chip *tchip = to_tango_chip(chip);
ioread8_rep(tchip->base + PBUS_DATA, buf, len);
}
-static void tango_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+static void tango_write_buf(struct nand_chip *chip, const u8 *buf, int len)
{
- struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+ struct tango_chip *tchip = to_tango_chip(chip);
iowrite8_rep(tchip->base + PBUS_DATA, buf, len);
}
-static void tango_select_chip(struct mtd_info *mtd, int idx)
+static void tango_select_chip(struct nand_chip *chip, int idx)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct tango_nfc *nfc = to_tango_nfc(chip->controller);
struct tango_chip *tchip = to_tango_chip(chip);
@@ -277,14 +275,15 @@ dma_unmap:
return err;
}
-static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- u8 *buf, int oob_required, int page)
+static int tango_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct tango_nfc *nfc = to_tango_nfc(chip->controller);
int err, res, len = mtd->writesize;
if (oob_required)
- chip->ecc.read_oob(mtd, chip, page);
+ chip->ecc.read_oob(chip, page);
err = do_dma(nfc, DMA_FROM_DEVICE, NFC_READ, buf, len, page);
if (err)
@@ -292,16 +291,17 @@ static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip,
res = decode_error_report(chip);
if (res < 0) {
- chip->ecc.read_oob_raw(mtd, chip, page);
+ chip->ecc.read_oob_raw(chip, page);
res = check_erased_page(chip, buf);
}
return res;
}
-static int tango_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const u8 *buf, int oob_required, int page)
+static int tango_write_page(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct tango_nfc *nfc = to_tango_nfc(chip->controller);
int err, status, len = mtd->writesize;
@@ -314,7 +314,7 @@ static int tango_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (err)
return err;
- status = chip->waitfunc(mtd, chip);
+ status = chip->legacy.waitfunc(chip);
if (status & NAND_STATUS_FAIL)
return -EIO;
@@ -323,30 +323,26 @@ static int tango_write_page(struct mtd_info *mtd, struct nand_chip *chip,
static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
*pos += len;
if (!*buf) {
/* skip over "len" bytes */
nand_change_read_column_op(chip, *pos, NULL, 0, false);
} else {
- tango_read_buf(mtd, *buf, len);
+ tango_read_buf(chip, *buf, len);
*buf += len;
}
}
static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
*pos += len;
if (!*buf) {
/* skip over "len" bytes */
nand_change_write_column_op(chip, *pos, NULL, 0, false);
} else {
- tango_write_buf(mtd, *buf, len);
+ tango_write_buf(chip, *buf, len);
*buf += len;
}
}
@@ -424,32 +420,30 @@ static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
aux_write(chip, &oob, ecc_size, &pos);
}
-static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- u8 *buf, int oob_required, int page)
+static int tango_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
{
nand_read_page_op(chip, page, 0, NULL, 0);
raw_read(chip, buf, chip->oob_poi);
return 0;
}
-static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const u8 *buf, int oob_required, int page)
+static int tango_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
{
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
raw_write(chip, buf, chip->oob_poi);
return nand_prog_page_end_op(chip);
}
-static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int tango_read_oob(struct nand_chip *chip, int page)
{
nand_read_page_op(chip, page, 0, NULL, 0);
raw_read(chip, NULL, chip->oob_poi);
return 0;
}
-static int tango_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int tango_write_oob(struct nand_chip *chip, int page)
{
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
raw_write(chip, NULL, chip->oob_poi);
@@ -485,11 +479,10 @@ static u32 to_ticks(int kHz, int ps)
return DIV_ROUND_UP_ULL((u64)kHz * ps, NSEC_PER_SEC);
}
-static int tango_set_timings(struct mtd_info *mtd, int csline,
+static int tango_set_timings(struct nand_chip *chip, int csline,
const struct nand_data_interface *conf)
{
const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf);
- struct nand_chip *chip = mtd_to_nand(mtd);
struct tango_nfc *nfc = to_tango_nfc(chip->controller);
struct tango_chip *tchip = to_tango_chip(chip);
u32 Trdy, Textw, Twc, Twpw, Tacc, Thold, Trpw, Textr;
@@ -571,12 +564,12 @@ static int chip_init(struct device *dev, struct device_node *np)
ecc = &chip->ecc;
mtd = nand_to_mtd(chip);
- chip->read_byte = tango_read_byte;
- chip->write_buf = tango_write_buf;
- chip->read_buf = tango_read_buf;
+ chip->legacy.read_byte = tango_read_byte;
+ chip->legacy.write_buf = tango_write_buf;
+ chip->legacy.read_buf = tango_read_buf;
chip->select_chip = tango_select_chip;
- chip->cmd_ctrl = tango_cmd_ctrl;
- chip->dev_ready = tango_dev_ready;
+ chip->legacy.cmd_ctrl = tango_cmd_ctrl;
+ chip->legacy.dev_ready = tango_dev_ready;
chip->setup_data_interface = tango_set_timings;
chip->options = NAND_USE_BOUNCE_BUFFER |
NAND_NO_SUBPAGE_WRITE |
@@ -588,7 +581,7 @@ static int chip_init(struct device *dev, struct device_node *np)
mtd_set_ooblayout(mtd, &tango_nand_ooblayout_ops);
mtd->dev.parent = dev;
- err = nand_scan(mtd, 1);
+ err = nand_scan(chip, 1);
if (err)
return err;
@@ -617,7 +610,7 @@ static int tango_nand_remove(struct platform_device *pdev)
for (cs = 0; cs < MAX_CS; ++cs) {
if (nfc->chips[cs])
- nand_release(nand_to_mtd(&nfc->chips[cs]->nand_chip));
+ nand_release(&nfc->chips[cs]->nand_chip);
}
return 0;
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index 79da1efc88d1..9767e29d74e2 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -462,9 +462,8 @@ static int tegra_nand_exec_op(struct nand_chip *chip,
check_only);
}
-static void tegra_nand_select_chip(struct mtd_info *mtd, int die_nr)
+static void tegra_nand_select_chip(struct nand_chip *chip, int die_nr)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct tegra_nand_chip *nand = to_tegra_chip(chip);
struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
@@ -615,44 +614,46 @@ err_unmap_dma_page:
return ret;
}
-static int tegra_nand_read_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip, u8 *buf,
+static int tegra_nand_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
void *oob_buf = oob_required ? chip->oob_poi : NULL;
return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
mtd->oobsize, page, true);
}
-static int tegra_nand_write_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip, const u8 *buf,
+static int tegra_nand_write_page_raw(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
void *oob_buf = oob_required ? chip->oob_poi : NULL;
return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
mtd->oobsize, page, false);
}
-static int tegra_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int tegra_nand_read_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
mtd->oobsize, page, true);
}
-static int tegra_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int tegra_nand_write_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
mtd->oobsize, page, false);
}
-static int tegra_nand_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, u8 *buf,
+static int tegra_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
struct tegra_nand_chip *nand = to_tegra_chip(chip);
void *oob_buf = oob_required ? chip->oob_poi : NULL;
@@ -716,7 +717,7 @@ static int tegra_nand_read_page_hwecc(struct mtd_info *mtd,
* erased or if error correction just failed for all sub-
* pages.
*/
- ret = tegra_nand_read_oob(mtd, chip, page);
+ ret = tegra_nand_read_oob(chip, page);
if (ret < 0)
return ret;
@@ -759,10 +760,10 @@ static int tegra_nand_read_page_hwecc(struct mtd_info *mtd,
}
}
-static int tegra_nand_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const u8 *buf,
+static int tegra_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
void *oob_buf = oob_required ? chip->oob_poi : NULL;
int ret;
@@ -813,10 +814,9 @@ static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
writel_relaxed(reg, ctrl->regs + TIMING_2);
}
-static int tegra_nand_setup_data_interface(struct mtd_info *mtd, int csline,
+static int tegra_nand_setup_data_interface(struct nand_chip *chip, int csline,
const struct nand_data_interface *conf)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
const struct nand_sdr_timings *timings;
@@ -1119,7 +1119,7 @@ static int tegra_nand_chips_init(struct device *dev,
chip->select_chip = tegra_nand_select_chip;
chip->setup_data_interface = tegra_nand_setup_data_interface;
- ret = nand_scan(mtd, 1);
+ ret = nand_scan(chip, 1);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
index dcaa924502de..f3b59e649b7d 100644
--- a/drivers/mtd/nand/raw/tmio_nand.c
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -126,11 +126,10 @@ static inline struct tmio_nand *mtd_to_tmio(struct mtd_info *mtd)
/*--------------------------------------------------------------------------*/
-static void tmio_nand_hwcontrol(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
+static void tmio_nand_hwcontrol(struct nand_chip *chip, int cmd,
+ unsigned int ctrl)
{
- struct tmio_nand *tmio = mtd_to_tmio(mtd);
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
if (ctrl & NAND_CTRL_CHANGE) {
u8 mode;
@@ -156,12 +155,12 @@ static void tmio_nand_hwcontrol(struct mtd_info *mtd, int cmd,
}
if (cmd != NAND_CMD_NONE)
- tmio_iowrite8(cmd, chip->IO_ADDR_W);
+ tmio_iowrite8(cmd, chip->legacy.IO_ADDR_W);
}
-static int tmio_nand_dev_ready(struct mtd_info *mtd)
+static int tmio_nand_dev_ready(struct nand_chip *chip)
{
- struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
return !(tmio_ioread8(tmio->fcr + FCR_STATUS) & FCR_STATUS_BUSY);
}
@@ -187,10 +186,9 @@ static irqreturn_t tmio_irq(int irq, void *__tmio)
*erase and write, we enable it to wake us up. The irq handler
*disables the interrupt.
*/
-static int
-tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
+static int tmio_nand_wait(struct nand_chip *nand_chip)
{
- struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(nand_chip));
long timeout;
u8 status;
@@ -199,10 +197,10 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
tmio_iowrite8(0x81, tmio->fcr + FCR_IMR);
timeout = wait_event_timeout(nand_chip->controller->wq,
- tmio_nand_dev_ready(mtd),
+ tmio_nand_dev_ready(nand_chip),
msecs_to_jiffies(nand_chip->state == FL_ERASING ? 400 : 20));
- if (unlikely(!tmio_nand_dev_ready(mtd))) {
+ if (unlikely(!tmio_nand_dev_ready(nand_chip))) {
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
dev_warn(&tmio->dev->dev, "still busy with %s after %d ms\n",
nand_chip->state == FL_ERASING ? "erase" : "program",
@@ -225,9 +223,9 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
*To prevent stale data from being read, tmio_nand_hwcontrol() clears
*tmio->read_good.
*/
-static u_char tmio_nand_read_byte(struct mtd_info *mtd)
+static u_char tmio_nand_read_byte(struct nand_chip *chip)
{
- struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
unsigned int data;
if (tmio->read_good--)
@@ -245,33 +243,33 @@ static u_char tmio_nand_read_byte(struct mtd_info *mtd)
*buffer functions.
*/
static void
-tmio_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+tmio_nand_write_buf(struct nand_chip *chip, const u_char *buf, int len)
{
- struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
tmio_iowrite16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
}
-static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void tmio_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
{
- struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
}
-static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+static void tmio_nand_enable_hwecc(struct nand_chip *chip, int mode)
{
- struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
tmio_iowrite8(FCR_MODE_HWECC_RESET, tmio->fcr + FCR_MODE);
tmio_ioread8(tmio->fcr + FCR_DATA); /* dummy read */
tmio_iowrite8(FCR_MODE_HWECC_CALC, tmio->fcr + FCR_MODE);
}
-static int tmio_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
- u_char *ecc_code)
+static int tmio_nand_calculate_ecc(struct nand_chip *chip, const u_char *dat,
+ u_char *ecc_code)
{
- struct tmio_nand *tmio = mtd_to_tmio(mtd);
+ struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip));
unsigned int ecc;
tmio_iowrite8(FCR_MODE_HWECC_RESULT, tmio->fcr + FCR_MODE);
@@ -290,16 +288,18 @@ static int tmio_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
return 0;
}
-static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
- unsigned char *read_ecc, unsigned char *calc_ecc)
+static int tmio_nand_correct_data(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
{
int r0, r1;
/* assume ecc.size = 512 and ecc.bytes = 6 */
- r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
+ r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256, false);
if (r0 < 0)
return r0;
- r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256);
+ r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256,
+ false);
if (r1 < 0)
return r1;
return r0 + r1;
@@ -400,15 +400,15 @@ static int tmio_probe(struct platform_device *dev)
return retval;
/* Set address of NAND IO lines */
- nand_chip->IO_ADDR_R = tmio->fcr;
- nand_chip->IO_ADDR_W = tmio->fcr;
+ nand_chip->legacy.IO_ADDR_R = tmio->fcr;
+ nand_chip->legacy.IO_ADDR_W = tmio->fcr;
/* Set address of hardware control function */
- nand_chip->cmd_ctrl = tmio_nand_hwcontrol;
- nand_chip->dev_ready = tmio_nand_dev_ready;
- nand_chip->read_byte = tmio_nand_read_byte;
- nand_chip->write_buf = tmio_nand_write_buf;
- nand_chip->read_buf = tmio_nand_read_buf;
+ nand_chip->legacy.cmd_ctrl = tmio_nand_hwcontrol;
+ nand_chip->legacy.dev_ready = tmio_nand_dev_ready;
+ nand_chip->legacy.read_byte = tmio_nand_read_byte;
+ nand_chip->legacy.write_buf = tmio_nand_write_buf;
+ nand_chip->legacy.read_buf = tmio_nand_read_buf;
/* set eccmode using hardware ECC */
nand_chip->ecc.mode = NAND_ECC_HW;
@@ -423,7 +423,7 @@ static int tmio_probe(struct platform_device *dev)
nand_chip->badblock_pattern = data->badblock_pattern;
/* 15 us command delay time */
- nand_chip->chip_delay = 15;
+ nand_chip->legacy.chip_delay = 15;
retval = devm_request_irq(&dev->dev, irq, &tmio_irq, 0,
dev_name(&dev->dev), tmio);
@@ -433,10 +433,10 @@ static int tmio_probe(struct platform_device *dev)
}
tmio->irq = irq;
- nand_chip->waitfunc = tmio_nand_wait;
+ nand_chip->legacy.waitfunc = tmio_nand_wait;
/* Scan to find existence of the device */
- retval = nand_scan(mtd, 1);
+ retval = nand_scan(nand_chip, 1);
if (retval)
goto err_irq;
@@ -449,7 +449,7 @@ static int tmio_probe(struct platform_device *dev)
if (!retval)
return retval;
- nand_release(mtd);
+ nand_release(nand_chip);
err_irq:
tmio_hw_stop(dev, tmio);
@@ -460,7 +460,7 @@ static int tmio_remove(struct platform_device *dev)
{
struct tmio_nand *tmio = platform_get_drvdata(dev);
- nand_release(nand_to_mtd(&tmio->chip));
+ nand_release(&tmio->chip);
tmio_hw_stop(dev, tmio);
return 0;
}
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index 4d61a14fcb65..ddf0420c0997 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -102,17 +102,17 @@ static void txx9ndfmc_write(struct platform_device *dev,
__raw_writel(val, ndregaddr(dev, reg));
}
-static uint8_t txx9ndfmc_read_byte(struct mtd_info *mtd)
+static uint8_t txx9ndfmc_read_byte(struct nand_chip *chip)
{
- struct platform_device *dev = mtd_to_platdev(mtd);
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
return txx9ndfmc_read(dev, TXX9_NDFDTR);
}
-static void txx9ndfmc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+static void txx9ndfmc_write_buf(struct nand_chip *chip, const uint8_t *buf,
int len)
{
- struct platform_device *dev = mtd_to_platdev(mtd);
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
@@ -122,19 +122,18 @@ static void txx9ndfmc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
}
-static void txx9ndfmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void txx9ndfmc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
- struct platform_device *dev = mtd_to_platdev(mtd);
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
while (len--)
*buf++ = __raw_readl(ndfdtr);
}
-static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
+static void txx9ndfmc_cmd_ctrl(struct nand_chip *chip, int cmd,
unsigned int ctrl)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct txx9ndfmc_priv *txx9_priv = nand_get_controller_data(chip);
struct platform_device *dev = txx9_priv->dev;
struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
@@ -163,18 +162,17 @@ static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
mmiowb();
}
-static int txx9ndfmc_dev_ready(struct mtd_info *mtd)
+static int txx9ndfmc_dev_ready(struct nand_chip *chip)
{
- struct platform_device *dev = mtd_to_platdev(mtd);
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
return !(txx9ndfmc_read(dev, TXX9_NDFSR) & TXX9_NDFSR_BUSY);
}
-static int txx9ndfmc_calculate_ecc(struct mtd_info *mtd, const uint8_t *dat,
+static int txx9ndfmc_calculate_ecc(struct nand_chip *chip, const uint8_t *dat,
uint8_t *ecc_code)
{
- struct platform_device *dev = mtd_to_platdev(mtd);
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
int eccbytes;
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
@@ -191,16 +189,17 @@ static int txx9ndfmc_calculate_ecc(struct mtd_info *mtd, const uint8_t *dat,
return 0;
}
-static int txx9ndfmc_correct_data(struct mtd_info *mtd, unsigned char *buf,
- unsigned char *read_ecc, unsigned char *calc_ecc)
+static int txx9ndfmc_correct_data(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
int eccsize;
int corrected = 0;
int stat;
for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
- stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
+ stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256,
+ false);
if (stat < 0)
return stat;
corrected += stat;
@@ -211,9 +210,9 @@ static int txx9ndfmc_correct_data(struct mtd_info *mtd, unsigned char *buf,
return corrected;
}
-static void txx9ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
+static void txx9ndfmc_enable_hwecc(struct nand_chip *chip, int mode)
{
- struct platform_device *dev = mtd_to_platdev(mtd);
+ struct platform_device *dev = mtd_to_platdev(nand_to_mtd(chip));
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
mcr &= ~TXX9_NDFMCR_ECC_ALL;
@@ -326,17 +325,17 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
mtd = nand_to_mtd(chip);
mtd->dev.parent = &dev->dev;
- chip->read_byte = txx9ndfmc_read_byte;
- chip->read_buf = txx9ndfmc_read_buf;
- chip->write_buf = txx9ndfmc_write_buf;
- chip->cmd_ctrl = txx9ndfmc_cmd_ctrl;
- chip->dev_ready = txx9ndfmc_dev_ready;
+ chip->legacy.read_byte = txx9ndfmc_read_byte;
+ chip->legacy.read_buf = txx9ndfmc_read_buf;
+ chip->legacy.write_buf = txx9ndfmc_write_buf;
+ chip->legacy.cmd_ctrl = txx9ndfmc_cmd_ctrl;
+ chip->legacy.dev_ready = txx9ndfmc_dev_ready;
chip->ecc.calculate = txx9ndfmc_calculate_ecc;
chip->ecc.correct = txx9ndfmc_correct_data;
chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.strength = 1;
- chip->chip_delay = 100;
+ chip->legacy.chip_delay = 100;
chip->controller = &drvdata->controller;
nand_set_controller_data(chip, txx9_priv);
@@ -359,7 +358,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
if (plat->wide_mask & (1 << i))
chip->options |= NAND_BUSWIDTH_16;
- if (nand_scan(mtd, 1)) {
+ if (nand_scan(chip, 1)) {
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
continue;
@@ -390,7 +389,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
chip = mtd_to_nand(mtd);
txx9_priv = nand_get_controller_data(chip);
- nand_release(mtd);
+ nand_release(chip);
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index 6f6dcbf9095b..9814fd4a84cf 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -498,9 +498,9 @@ static int vf610_nfc_exec_op(struct nand_chip *chip,
/*
* This function supports Vybrid only (MPC5125 would have full RB and four CS)
*/
-static void vf610_nfc_select_chip(struct mtd_info *mtd, int chip)
+static void vf610_nfc_select_chip(struct nand_chip *chip, int cs)
{
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = mtd_to_nfc(nand_to_mtd(chip));
u32 tmp = vf610_nfc_read(nfc, NFC_ROW_ADDR);
/* Vybrid only (MPC5125 would have full RB and four CS) */
@@ -509,9 +509,9 @@ static void vf610_nfc_select_chip(struct mtd_info *mtd, int chip)
tmp &= ~(ROW_ADDR_CHIP_SEL_RB_MASK | ROW_ADDR_CHIP_SEL_MASK);
- if (chip >= 0) {
+ if (cs >= 0) {
tmp |= 1 << ROW_ADDR_CHIP_SEL_RB_SHIFT;
- tmp |= BIT(chip) << ROW_ADDR_CHIP_SEL_SHIFT;
+ tmp |= BIT(cs) << ROW_ADDR_CHIP_SEL_SHIFT;
}
vf610_nfc_write(nfc, NFC_ROW_ADDR, tmp);
@@ -557,9 +557,10 @@ static void vf610_nfc_fill_row(struct nand_chip *chip, int page, u32 *code,
}
}
-static int vf610_nfc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int vf610_nfc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct vf610_nfc *nfc = mtd_to_nfc(mtd);
int trfr_sz = mtd->writesize + mtd->oobsize;
u32 row = 0, cmd1 = 0, cmd2 = 0, code = 0;
@@ -602,9 +603,10 @@ static int vf610_nfc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
}
}
-static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page)
+static int vf610_nfc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct vf610_nfc *nfc = mtd_to_nfc(mtd);
int trfr_sz = mtd->writesize + mtd->oobsize;
u32 row = 0, cmd1 = 0, cmd2 = 0, code = 0;
@@ -643,24 +645,24 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
-static int vf610_nfc_read_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip, u8 *buf,
+static int vf610_nfc_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct vf610_nfc *nfc = mtd_to_nfc(mtd);
int ret;
nfc->data_access = true;
- ret = nand_read_page_raw(mtd, chip, buf, oob_required, page);
+ ret = nand_read_page_raw(chip, buf, oob_required, page);
nfc->data_access = false;
return ret;
}
-static int vf610_nfc_write_page_raw(struct mtd_info *mtd,
- struct nand_chip *chip, const u8 *buf,
+static int vf610_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct vf610_nfc *nfc = mtd_to_nfc(mtd);
int ret;
@@ -677,22 +679,21 @@ static int vf610_nfc_write_page_raw(struct mtd_info *mtd,
return nand_prog_page_end_op(chip);
}
-static int vf610_nfc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int vf610_nfc_read_oob(struct nand_chip *chip, int page)
{
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = mtd_to_nfc(nand_to_mtd(chip));
int ret;
nfc->data_access = true;
- ret = nand_read_oob_std(mtd, chip, page);
+ ret = nand_read_oob_std(chip, page);
nfc->data_access = false;
return ret;
}
-static int vf610_nfc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+static int vf610_nfc_write_oob(struct nand_chip *chip, int page)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct vf610_nfc *nfc = mtd_to_nfc(mtd);
int ret;
@@ -892,7 +893,7 @@ static int vf610_nfc_probe(struct platform_device *pdev)
/* Scan the NAND chip */
chip->dummy_controller.ops = &vf610_nfc_controller_ops;
- err = nand_scan(mtd, 1);
+ err = nand_scan(chip, 1);
if (err)
goto err_disable_clk;
@@ -916,7 +917,7 @@ static int vf610_nfc_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct vf610_nfc *nfc = mtd_to_nfc(mtd);
- nand_release(mtd);
+ nand_release(mtd_to_nand(mtd));
clk_disable_unprepare(nfc->clk);
return 0;
}
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index 9926b4e3d69d..a234a5cb4868 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -85,9 +85,8 @@ static void xway_writeb(struct mtd_info *mtd, int op, u8 value)
writeb(value, data->nandaddr + op);
}
-static void xway_select_chip(struct mtd_info *mtd, int select)
+static void xway_select_chip(struct nand_chip *chip, int select)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
struct xway_nand_data *data = nand_get_controller_data(chip);
switch (select) {
@@ -106,8 +105,10 @@ static void xway_select_chip(struct mtd_info *mtd, int select)
}
}
-static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+static void xway_cmd_ctrl(struct nand_chip *chip, int cmd, unsigned int ctrl)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
if (cmd == NAND_CMD_NONE)
return;
@@ -120,30 +121,30 @@ static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
;
}
-static int xway_dev_ready(struct mtd_info *mtd)
+static int xway_dev_ready(struct nand_chip *chip)
{
return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD;
}
-static unsigned char xway_read_byte(struct mtd_info *mtd)
+static unsigned char xway_read_byte(struct nand_chip *chip)
{
- return xway_readb(mtd, NAND_READ_DATA);
+ return xway_readb(nand_to_mtd(chip), NAND_READ_DATA);
}
-static void xway_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+static void xway_read_buf(struct nand_chip *chip, u_char *buf, int len)
{
int i;
for (i = 0; i < len; i++)
- buf[i] = xway_readb(mtd, NAND_WRITE_DATA);
+ buf[i] = xway_readb(nand_to_mtd(chip), NAND_WRITE_DATA);
}
-static void xway_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+static void xway_write_buf(struct nand_chip *chip, const u_char *buf, int len)
{
int i;
for (i = 0; i < len; i++)
- xway_writeb(mtd, NAND_WRITE_DATA, buf[i]);
+ xway_writeb(nand_to_mtd(chip), NAND_WRITE_DATA, buf[i]);
}
/*
@@ -173,13 +174,13 @@ static int xway_nand_probe(struct platform_device *pdev)
mtd = nand_to_mtd(&data->chip);
mtd->dev.parent = &pdev->dev;
- data->chip.cmd_ctrl = xway_cmd_ctrl;
- data->chip.dev_ready = xway_dev_ready;
+ data->chip.legacy.cmd_ctrl = xway_cmd_ctrl;
+ data->chip.legacy.dev_ready = xway_dev_ready;
data->chip.select_chip = xway_select_chip;
- data->chip.write_buf = xway_write_buf;
- data->chip.read_buf = xway_read_buf;
- data->chip.read_byte = xway_read_byte;
- data->chip.chip_delay = 30;
+ data->chip.legacy.write_buf = xway_write_buf;
+ data->chip.legacy.read_buf = xway_read_buf;
+ data->chip.legacy.read_byte = xway_read_byte;
+ data->chip.legacy.chip_delay = 30;
data->chip.ecc.mode = NAND_ECC_SOFT;
data->chip.ecc.algo = NAND_ECC_HAMMING;
@@ -205,13 +206,13 @@ static int xway_nand_probe(struct platform_device *pdev)
| cs_flag, EBU_NAND_CON);
/* Scan to find existence of the device */
- err = nand_scan(mtd, 1);
+ err = nand_scan(&data->chip, 1);
if (err)
return err;
err = mtd_device_register(mtd, NULL, 0);
if (err)
- nand_release(mtd);
+ nand_release(&data->chip);
return err;
}
@@ -223,7 +224,7 @@ static int xway_nand_remove(struct platform_device *pdev)
{
struct xway_nand_data *data = platform_get_drvdata(pdev);
- nand_release(nand_to_mtd(&data->chip));
+ nand_release(&data->chip);
return 0;
}
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index f3bd86e13603..89227b1d036a 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -221,14 +221,18 @@ static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
{
uint8_t ecc[3];
- __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
- if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0)
+ __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC)) < 0)
return -EIO;
buffer += SM_SMALL_PAGE;
- __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
- if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0)
+ __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC)) < 0)
return -EIO;
return 0;
}
@@ -393,11 +397,13 @@ restart:
}
if (ftl->smallpagenand) {
- __nand_calculate_ecc(buf + boffset,
- SM_SMALL_PAGE, oob.ecc1);
+ __nand_calculate_ecc(buf + boffset, SM_SMALL_PAGE,
+ oob.ecc1,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
__nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
- SM_SMALL_PAGE, oob.ecc2);
+ SM_SMALL_PAGE, oob.ecc2,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
}
if (!sm_write_sector(ftl, zone, block, boffset,
buf + boffset, &oob))
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 8e714fbfa521..e24db817154e 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -959,7 +959,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
return 0;
}
- dma_dst = dma_map_single(nor->dev, buf, len, DMA_DEV_TO_MEM);
+ dma_dst = dma_map_single(nor->dev, buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(nor->dev, dma_dst)) {
dev_err(nor->dev, "dma mapping failed\n");
return -ENOMEM;
@@ -994,7 +994,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
}
err_unmap:
- dma_unmap_single(nor->dev, dma_dst, len, DMA_DEV_TO_MEM);
+ dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
return 0;
}
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 7d9620c7ff6c..1ff3430f82c8 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -478,6 +478,7 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
{
switch (cmd) {
case SPINOR_OP_READ_1_1_4:
+ case SPINOR_OP_READ_1_1_4_4B:
return SEQID_READ;
case SPINOR_OP_WREN:
return SEQID_WREN;
@@ -543,6 +544,9 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
/* trigger the LUT now */
seqid = fsl_qspi_get_seqid(q, cmd);
+ if (seqid < 0)
+ return seqid;
+
qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len,
base + QUADSPI_IPCR);
@@ -671,7 +675,7 @@ static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
* causes the controller to clear the buffer, and use the sequence pointed
* by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
*/
-static void fsl_qspi_init_ahb_read(struct fsl_qspi *q)
+static int fsl_qspi_init_ahb_read(struct fsl_qspi *q)
{
void __iomem *base = q->iobase;
int seqid;
@@ -696,8 +700,13 @@ static void fsl_qspi_init_ahb_read(struct fsl_qspi *q)
/* Set the default lut sequence for AHB Read. */
seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
+ if (seqid < 0)
+ return seqid;
+
qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
q->iobase + QUADSPI_BFGENCR);
+
+ return 0;
}
/* This function was used to prepare and enable QSPI clock */
@@ -805,9 +814,7 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
fsl_qspi_init_lut(q);
/* Init for AHB read */
- fsl_qspi_init_ahb_read(q);
-
- return 0;
+ return fsl_qspi_init_ahb_read(q);
}
static const struct of_device_id fsl_qspi_dt_ids[] = {
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index c0976f2e3dd1..872b40922608 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -65,6 +65,7 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
{ },
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index f028277fb1ce..9407ca5f9443 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -18,6 +18,7 @@
#include <linux/math64.h>
#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/sort.h>
#include <linux/mtd/mtd.h>
#include <linux/of_platform.h>
@@ -260,6 +261,18 @@ static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+
+ if (!spi_nor_has_uniform_erase(nor)) {
+ struct spi_nor_erase_map *map = &nor->erase_map;
+ struct spi_nor_erase_type *erase;
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ erase = &map->erase_type[i];
+ erase->opcode =
+ spi_nor_convert_3to4_erase(erase->opcode);
+ }
+ }
}
/* Enable/disable 4-byte addressing mode. */
@@ -497,6 +510,277 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
}
+/**
+ * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @dividend: dividend value
+ * @remainder: pointer to u32 remainder (will be updated)
+ *
+ * Return: the result of the division
+ */
+static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
+ u64 dividend, u32 *remainder)
+{
+ /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
+ *remainder = (u32)dividend & erase->size_mask;
+ return dividend >> erase->size_shift;
+}
+
+/**
+ * spi_nor_find_best_erase_type() - find the best erase type for the given
+ * offset in the serial flash memory and the
+ * number of bytes to erase. The region in
+ * which the address fits is expected to be
+ * provided.
+ * @map: the erase map of the SPI NOR
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Return: a pointer to the best fitted erase type, NULL otherwise.
+ */
+static const struct spi_nor_erase_type *
+spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
+ const struct spi_nor_erase_region *region,
+ u64 addr, u32 len)
+{
+ const struct spi_nor_erase_type *erase;
+ u32 rem;
+ int i;
+ u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
+
+ /*
+ * Erase types are ordered by size, with the biggest erase type at
+ * index 0.
+ */
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ /* Does the erase region support the tested erase type? */
+ if (!(erase_mask & BIT(i)))
+ continue;
+
+ erase = &map->erase_type[i];
+
+ /* Don't erase more than what the user has asked for. */
+ if (erase->size > len)
+ continue;
+
+ /* Alignment is not mandatory for overlaid regions */
+ if (region->offset & SNOR_OVERLAID_REGION)
+ return erase;
+
+ spi_nor_div_by_erase_size(erase, addr, &rem);
+ if (rem)
+ continue;
+ else
+ return erase;
+ }
+
+ return NULL;
+}
+
+/**
+ * spi_nor_region_next() - get the next spi nor region
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ *
+ * Return: the next spi nor region or NULL if last region.
+ */
+static struct spi_nor_erase_region *
+spi_nor_region_next(struct spi_nor_erase_region *region)
+{
+ if (spi_nor_region_is_last(region))
+ return NULL;
+ region++;
+ return region;
+}
+
+/**
+ * spi_nor_find_erase_region() - find the region of the serial flash memory in
+ * which the offset fits
+ * @map: the erase map of the SPI NOR
+ * @addr: offset in the serial flash memory
+ *
+ * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
+ * otherwise.
+ */
+static struct spi_nor_erase_region *
+spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
+{
+ struct spi_nor_erase_region *region = map->regions;
+ u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+ u64 region_end = region_start + region->size;
+
+ while (addr < region_start || addr >= region_end) {
+ region = spi_nor_region_next(region);
+ if (!region)
+ return ERR_PTR(-EINVAL);
+
+ region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
+ region_end = region_start + region->size;
+ }
+
+ return region;
+}
+
+/**
+ * spi_nor_init_erase_cmd() - initialize an erase command
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ *
+ * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
+ * otherwise.
+ */
+static struct spi_nor_erase_command *
+spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase)
+{
+ struct spi_nor_erase_command *cmd;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&cmd->list);
+ cmd->opcode = erase->opcode;
+ cmd->count = 1;
+
+ if (region->offset & SNOR_OVERLAID_REGION)
+ cmd->size = region->size;
+ else
+ cmd->size = erase->size;
+
+ return cmd;
+}
+
+/**
+ * spi_nor_destroy_erase_cmd_list() - destroy erase command list
+ * @erase_list: list of erase commands
+ */
+static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
+{
+ struct spi_nor_erase_command *cmd, *next;
+
+ list_for_each_entry_safe(cmd, next, erase_list, list) {
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+}
+
+/**
+ * spi_nor_init_erase_cmd_list() - initialize erase command list
+ * @nor: pointer to a 'struct spi_nor'
+ * @erase_list: list of erase commands to be executed once we validate that the
+ * erase can be performed
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Builds the list of best fitted erase commands and verifies if the erase can
+ * be performed.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
+ struct list_head *erase_list,
+ u64 addr, u32 len)
+{
+ const struct spi_nor_erase_map *map = &nor->erase_map;
+ const struct spi_nor_erase_type *erase, *prev_erase = NULL;
+ struct spi_nor_erase_region *region;
+ struct spi_nor_erase_command *cmd = NULL;
+ u64 region_end;
+ int ret = -EINVAL;
+
+ region = spi_nor_find_erase_region(map, addr);
+ if (IS_ERR(region))
+ return PTR_ERR(region);
+
+ region_end = spi_nor_region_end(region);
+
+ while (len) {
+ erase = spi_nor_find_best_erase_type(map, region, addr, len);
+ if (!erase)
+ goto destroy_erase_cmd_list;
+
+ if (prev_erase != erase ||
+ region->offset & SNOR_OVERLAID_REGION) {
+ cmd = spi_nor_init_erase_cmd(region, erase);
+ if (IS_ERR(cmd)) {
+ ret = PTR_ERR(cmd);
+ goto destroy_erase_cmd_list;
+ }
+
+ list_add_tail(&cmd->list, erase_list);
+ } else {
+ cmd->count++;
+ }
+
+ addr += cmd->size;
+ len -= cmd->size;
+
+ if (len && addr >= region_end) {
+ region = spi_nor_region_next(region);
+ if (!region)
+ goto destroy_erase_cmd_list;
+ region_end = spi_nor_region_end(region);
+ }
+
+ prev_erase = erase;
+ }
+
+ return 0;
+
+destroy_erase_cmd_list:
+ spi_nor_destroy_erase_cmd_list(erase_list);
+ return ret;
+}
+
+/**
+ * spi_nor_erase_multi_sectors() - perform a non-uniform erase
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to erase
+ *
+ * Build a list of best fitted erase commands and execute it once we validate
+ * that the erase can be performed.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
+{
+ LIST_HEAD(erase_list);
+ struct spi_nor_erase_command *cmd, *next;
+ int ret;
+
+ ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
+ if (ret)
+ return ret;
+
+ list_for_each_entry_safe(cmd, next, &erase_list, list) {
+ nor->erase_opcode = cmd->opcode;
+ while (cmd->count) {
+ write_enable(nor);
+
+ ret = spi_nor_erase_sector(nor, addr);
+ if (ret)
+ goto destroy_erase_cmd_list;
+
+ addr += cmd->size;
+ cmd->count--;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto destroy_erase_cmd_list;
+ }
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+
+ return 0;
+
+destroy_erase_cmd_list:
+ spi_nor_destroy_erase_cmd_list(&erase_list);
+ return ret;
+}
+
/*
* Erase an address range on the nor chip. The address range may extend
* one or more erase sectors. Return an error is there is a problem erasing.
@@ -511,9 +795,11 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
(long long)instr->len);
- div_u64_rem(instr->len, mtd->erasesize, &rem);
- if (rem)
- return -EINVAL;
+ if (spi_nor_has_uniform_erase(nor)) {
+ div_u64_rem(instr->len, mtd->erasesize, &rem);
+ if (rem)
+ return -EINVAL;
+ }
addr = instr->addr;
len = instr->len;
@@ -552,7 +838,7 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
*/
/* "sector"-at-a-time erase */
- } else {
+ } else if (spi_nor_has_uniform_erase(nor)) {
while (len) {
write_enable(nor);
@@ -567,6 +853,12 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
if (ret)
goto erase_err;
}
+
+ /* erase multiple sectors */
+ } else {
+ ret = spi_nor_erase_multi_sectors(nor, addr, len);
+ if (ret)
+ goto erase_err;
}
write_disable(nor);
@@ -1464,13 +1756,6 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
goto write_err;
*retlen += written;
i += written;
- if (written != page_remain) {
- dev_err(nor->dev,
- "While writing %zu bytes written %zd bytes\n",
- page_remain, written);
- ret = -EIO;
- goto write_err;
- }
}
write_err:
@@ -1864,6 +2149,36 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
*/
/**
+ * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
+ * addr_width and read_dummy members of the struct spi_nor
+ * should be previously
+ * set.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the serial flash memory
+ * @len: number of bytes to read
+ * @buf: buffer where the data is copied into
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
+{
+ int ret;
+
+ while (len) {
+ ret = nor->read(nor, addr, len, buf);
+ if (!ret || ret > len)
+ return -EIO;
+ if (ret < 0)
+ return ret;
+
+ buf += ret;
+ addr += ret;
+ len -= ret;
+ }
+ return 0;
+}
+
+/**
* spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
* @nor: pointer to a 'struct spi_nor'
* @addr: offset in the SFDP area to start reading data from
@@ -1890,22 +2205,8 @@ static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
nor->addr_width = 3;
nor->read_dummy = 8;
- while (len) {
- ret = nor->read(nor, addr, len, (u8 *)buf);
- if (!ret || ret > len) {
- ret = -EIO;
- goto read_err;
- }
- if (ret < 0)
- goto read_err;
-
- buf += ret;
- addr += ret;
- len -= ret;
- }
- ret = 0;
+ ret = spi_nor_read_raw(nor, addr, len, buf);
-read_err:
nor->read_opcode = read_opcode;
nor->addr_width = addr_width;
nor->read_dummy = read_dummy;
@@ -2166,6 +2467,116 @@ static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
static int spi_nor_hwcaps_read2cmd(u32 hwcaps);
/**
+ * spi_nor_set_erase_type() - set a SPI NOR erase type
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type
+ * @opcode: the SPI command op code to erase the sector/block
+ */
+static void spi_nor_set_erase_type(struct spi_nor_erase_type *erase,
+ u32 size, u8 opcode)
+{
+ erase->size = size;
+ erase->opcode = opcode;
+ /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
+ erase->size_shift = ffs(erase->size) - 1;
+ erase->size_mask = (1 << erase->size_shift) - 1;
+}
+
+/**
+ * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type
+ * @opcode: the SPI command op code to erase the sector/block
+ * @i: erase type index as sorted in the Basic Flash Parameter Table
+ *
+ * The supported Erase Types will be sorted at init in ascending order, with
+ * the smallest Erase Type size being the first member in the erase_type array
+ * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
+ * the Basic Flash Parameter Table since it will be used later on to
+ * synchronize with the supported Erase Types defined in SFDP optional tables.
+ */
+static void
+spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
+ u32 size, u8 opcode, u8 i)
+{
+ erase->idx = i;
+ spi_nor_set_erase_type(erase, size, opcode);
+}
+
+/**
+ * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
+ * @l: member in the left half of the map's erase_type array
+ * @r: member in the right half of the map's erase_type array
+ *
+ * Comparison function used in the sort() call to sort in ascending order the
+ * map's erase types, the smallest erase type size being the first member in the
+ * sorted erase_type array.
+ *
+ * Return: the result of @l->size - @r->size
+ */
+static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
+{
+ const struct spi_nor_erase_type *left = l, *right = r;
+
+ return left->size - right->size;
+}
+
+/**
+ * spi_nor_regions_sort_erase_types() - sort erase types in each region
+ * @map: the erase map of the SPI NOR
+ *
+ * Function assumes that the erase types defined in the erase map are already
+ * sorted in ascending order, with the smallest erase type size being the first
+ * member in the erase_type array. It replicates the sort done for the map's
+ * erase types. Each region's erase bitmask will indicate which erase types are
+ * supported from the sorted erase types defined in the erase map.
+ * Sort the all region's erase type at init in order to speed up the process of
+ * finding the best erase command at runtime.
+ */
+static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
+{
+ struct spi_nor_erase_region *region = map->regions;
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ int i;
+ u8 region_erase_mask, sorted_erase_mask;
+
+ while (region) {
+ region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
+
+ /* Replicate the sort done for the map's erase types. */
+ sorted_erase_mask = 0;
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (erase_type[i].size &&
+ region_erase_mask & BIT(erase_type[i].idx))
+ sorted_erase_mask |= BIT(i);
+
+ /* Overwrite erase mask. */
+ region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
+ sorted_erase_mask;
+
+ region = spi_nor_region_next(region);
+ }
+}
+
+/**
+ * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
+ * @map: the erase map of the SPI NOR
+ * @erase_mask: bitmask encoding erase types that can erase the entire
+ * flash memory
+ * @flash_size: the spi nor flash memory size
+ */
+static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+ u8 erase_mask, u64 flash_size)
+{
+ /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
+ map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
+ SNOR_LAST_REGION;
+ map->uniform_region.size = flash_size;
+ map->regions = &map->uniform_region;
+ map->uniform_erase_type = erase_mask;
+}
+
+/**
* spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
* @nor: pointer to a 'struct spi_nor'
* @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
@@ -2199,12 +2610,14 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
struct spi_nor_flash_parameter *params)
{
- struct mtd_info *mtd = &nor->mtd;
+ struct spi_nor_erase_map *map = &nor->erase_map;
+ struct spi_nor_erase_type *erase_type = map->erase_type;
struct sfdp_bfpt bfpt;
size_t len;
int i, cmd, err;
u32 addr;
u16 half;
+ u8 erase_mask;
/* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
@@ -2273,7 +2686,12 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
}
- /* Sector Erase settings. */
+ /*
+ * Sector Erase settings. Reinitialize the uniform erase map using the
+ * Erase Types defined in the bfpt table.
+ */
+ erase_mask = 0;
+ memset(&nor->erase_map, 0, sizeof(nor->erase_map));
for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
u32 erasesize;
@@ -2288,18 +2706,25 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
erasesize = 1U << erasesize;
opcode = (half >> 8) & 0xff;
-#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
- if (erasesize == SZ_4K) {
- nor->erase_opcode = opcode;
- mtd->erasesize = erasesize;
- break;
- }
-#endif
- if (!mtd->erasesize || mtd->erasesize < erasesize) {
- nor->erase_opcode = opcode;
- mtd->erasesize = erasesize;
- }
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
+ opcode, i);
}
+ spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
+ /*
+ * Sort all the map's Erase Types in ascending order with the smallest
+ * erase size being the first member in the erase_type array.
+ */
+ sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
+ spi_nor_map_cmp_erase_type, NULL);
+ /*
+ * Sort the erase types in the uniform region in order to update the
+ * uniform_erase_type bitmask. The bitmask will be used later on when
+ * selecting the uniform erase.
+ */
+ spi_nor_regions_sort_erase_types(map);
+ map->uniform_erase_type = map->uniform_region.offset &
+ SNOR_ERASE_TYPE_MASK;
/* Stop here if not JESD216 rev A or later. */
if (bfpt_header->length < BFPT_DWORD_MAX)
@@ -2341,6 +2766,277 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
return 0;
}
+#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
+#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
+#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
+
+#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
+#define SMPT_CMD_READ_DUMMY_SHIFT 16
+#define SMPT_CMD_READ_DUMMY(_cmd) \
+ (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
+#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
+
+#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
+#define SMPT_CMD_READ_DATA_SHIFT 24
+#define SMPT_CMD_READ_DATA(_cmd) \
+ (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
+
+#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
+#define SMPT_CMD_OPCODE_SHIFT 8
+#define SMPT_CMD_OPCODE(_cmd) \
+ (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
+
+#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
+#define SMPT_MAP_REGION_COUNT_SHIFT 16
+#define SMPT_MAP_REGION_COUNT(_header) \
+ ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
+ SMPT_MAP_REGION_COUNT_SHIFT) + 1)
+
+#define SMPT_MAP_ID_MASK GENMASK(15, 8)
+#define SMPT_MAP_ID_SHIFT 8
+#define SMPT_MAP_ID(_header) \
+ (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
+
+#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
+#define SMPT_MAP_REGION_SIZE_SHIFT 8
+#define SMPT_MAP_REGION_SIZE(_region) \
+ (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
+ SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
+
+#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
+#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
+ ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
+
+#define SMPT_DESC_TYPE_MAP BIT(1)
+#define SMPT_DESC_END BIT(0)
+
+/**
+ * spi_nor_smpt_addr_width() - return the address width used in the
+ * configuration detection command.
+ * @nor: pointer to a 'struct spi_nor'
+ * @settings: configuration detection command descriptor, dword1
+ */
+static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
+{
+ switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
+ case SMPT_CMD_ADDRESS_LEN_0:
+ return 0;
+ case SMPT_CMD_ADDRESS_LEN_3:
+ return 3;
+ case SMPT_CMD_ADDRESS_LEN_4:
+ return 4;
+ case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
+ /* fall through */
+ default:
+ return nor->addr_width;
+ }
+}
+
+/**
+ * spi_nor_smpt_read_dummy() - return the configuration detection command read
+ * latency, in clock cycles.
+ * @nor: pointer to a 'struct spi_nor'
+ * @settings: configuration detection command descriptor, dword1
+ *
+ * Return: the number of dummy cycles for an SMPT read
+ */
+static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
+{
+ u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
+
+ if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
+ return nor->read_dummy;
+ return read_dummy;
+}
+
+/**
+ * spi_nor_get_map_in_use() - get the configuration map in use
+ * @nor: pointer to a 'struct spi_nor'
+ * @smpt: pointer to the sector map parameter table
+ */
+static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt)
+{
+ const u32 *ret = NULL;
+ u32 i, addr;
+ int err;
+ u8 addr_width, read_opcode, read_dummy;
+ u8 read_data_mask, data_byte, map_id;
+
+ addr_width = nor->addr_width;
+ read_dummy = nor->read_dummy;
+ read_opcode = nor->read_opcode;
+
+ map_id = 0;
+ i = 0;
+ /* Determine if there are any optional Detection Command Descriptors */
+ while (!(smpt[i] & SMPT_DESC_TYPE_MAP)) {
+ read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
+ nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
+ nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
+ nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
+ addr = smpt[i + 1];
+
+ err = spi_nor_read_raw(nor, addr, 1, &data_byte);
+ if (err)
+ goto out;
+
+ /*
+ * Build an index value that is used to select the Sector Map
+ * Configuration that is currently in use.
+ */
+ map_id = map_id << 1 | !!(data_byte & read_data_mask);
+ i = i + 2;
+ }
+
+ /* Find the matching configuration map */
+ while (SMPT_MAP_ID(smpt[i]) != map_id) {
+ if (smpt[i] & SMPT_DESC_END)
+ goto out;
+ /* increment the table index to the next map */
+ i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
+ }
+
+ ret = smpt + i;
+ /* fall through */
+out:
+ nor->addr_width = addr_width;
+ nor->read_dummy = read_dummy;
+ nor->read_opcode = read_opcode;
+ return ret;
+}
+
+/**
+ * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
+ * @region: pointer to a structure that describes a SPI NOR erase region
+ * @erase: pointer to a structure that describes a SPI NOR erase type
+ * @erase_type: erase type bitmask
+ */
+static void
+spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
+ const struct spi_nor_erase_type *erase,
+ const u8 erase_type)
+{
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ if (!(erase_type & BIT(i)))
+ continue;
+ if (region->size & erase[i].size_mask) {
+ spi_nor_region_mark_overlay(region);
+ return;
+ }
+ }
+}
+
+/**
+ * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
+ * @nor: pointer to a 'struct spi_nor'
+ * @smpt: pointer to the sector map parameter table
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ const u32 *smpt)
+{
+ struct spi_nor_erase_map *map = &nor->erase_map;
+ const struct spi_nor_erase_type *erase = map->erase_type;
+ struct spi_nor_erase_region *region;
+ u64 offset;
+ u32 region_count;
+ int i, j;
+ u8 erase_type;
+
+ region_count = SMPT_MAP_REGION_COUNT(*smpt);
+ /*
+ * The regions will be freed when the driver detaches from the
+ * device.
+ */
+ region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+ map->regions = region;
+
+ map->uniform_erase_type = 0xff;
+ offset = 0;
+ /* Populate regions. */
+ for (i = 0; i < region_count; i++) {
+ j = i + 1; /* index for the region dword */
+ region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
+ erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
+ region[i].offset = offset | erase_type;
+
+ spi_nor_region_check_overlay(&region[i], erase, erase_type);
+
+ /*
+ * Save the erase types that are supported in all regions and
+ * can erase the entire flash memory.
+ */
+ map->uniform_erase_type &= erase_type;
+
+ offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
+ region[i].size;
+ }
+
+ spi_nor_region_mark_end(&region[i - 1]);
+
+ return 0;
+}
+
+/**
+ * spi_nor_parse_smpt() - parse Sector Map Parameter Table
+ * @nor: pointer to a 'struct spi_nor'
+ * @smpt_header: sector map parameter table header
+ *
+ * This table is optional, but when available, we parse it to identify the
+ * location and size of sectors within the main data array of the flash memory
+ * device and to identify which Erase Types are supported by each sector.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_smpt(struct spi_nor *nor,
+ const struct sfdp_parameter_header *smpt_header)
+{
+ const u32 *sector_map;
+ u32 *smpt;
+ size_t len;
+ u32 addr;
+ int i, ret;
+
+ /* Read the Sector Map Parameter Table. */
+ len = smpt_header->length * sizeof(*smpt);
+ smpt = kzalloc(len, GFP_KERNEL);
+ if (!smpt)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(smpt_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, smpt);
+ if (ret)
+ goto out;
+
+ /* Fix endianness of the SMPT DWORDs. */
+ for (i = 0; i < smpt_header->length; i++)
+ smpt[i] = le32_to_cpu(smpt[i]);
+
+ sector_map = spi_nor_get_map_in_use(nor, smpt);
+ if (!sector_map) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = spi_nor_init_non_uniform_erase_map(nor, sector_map);
+ if (ret)
+ goto out;
+
+ spi_nor_regions_sort_erase_types(&nor->erase_map);
+ /* fall through */
+out:
+ kfree(smpt);
+ return ret;
+}
+
/**
* spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
* @nor: pointer to a 'struct spi_nor'
@@ -2435,7 +3131,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
switch (SFDP_PARAM_HEADER_ID(param_header)) {
case SFDP_SECTOR_MAP_ID:
- dev_info(dev, "non-uniform erase sector maps are not supported yet.\n");
+ err = spi_nor_parse_smpt(nor, param_header);
break;
default:
@@ -2455,6 +3151,9 @@ static int spi_nor_init_params(struct spi_nor *nor,
const struct flash_info *info,
struct spi_nor_flash_parameter *params)
{
+ struct spi_nor_erase_map *map = &nor->erase_map;
+ u8 i, erase_mask;
+
/* Set legacy flash parameters as default. */
memset(params, 0, sizeof(*params));
@@ -2494,6 +3193,28 @@ static int spi_nor_init_params(struct spi_nor *nor,
spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+ /*
+ * Sector Erase settings. Sort Erase Types in ascending order, with the
+ * smallest erase size starting at BIT(0).
+ */
+ erase_mask = 0;
+ i = 0;
+ if (info->flags & SECT_4K_PMC) {
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], 4096u,
+ SPINOR_OP_BE_4K_PMC);
+ i++;
+ } else if (info->flags & SECT_4K) {
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], 4096u,
+ SPINOR_OP_BE_4K);
+ i++;
+ }
+ erase_mask |= BIT(i);
+ spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
+ SPINOR_OP_SE);
+ spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
+
/* Select the procedure to set the Quad Enable bit. */
if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD |
SNOR_HWCAPS_PP_QUAD)) {
@@ -2521,20 +3242,20 @@ static int spi_nor_init_params(struct spi_nor *nor,
params->quad_enable = info->quad_enable;
}
- /* Override the parameters with data read from SFDP tables. */
- nor->addr_width = 0;
- nor->mtd.erasesize = 0;
if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
!(info->flags & SPI_NOR_SKIP_SFDP)) {
struct spi_nor_flash_parameter sfdp_params;
+ struct spi_nor_erase_map prev_map;
memcpy(&sfdp_params, params, sizeof(sfdp_params));
- if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
- nor->addr_width = 0;
- nor->mtd.erasesize = 0;
- } else {
+ memcpy(&prev_map, &nor->erase_map, sizeof(prev_map));
+
+ if (spi_nor_parse_sfdp(nor, &sfdp_params))
+ /* restore previous erase map */
+ memcpy(&nor->erase_map, &prev_map,
+ sizeof(nor->erase_map));
+ else
memcpy(params, &sfdp_params, sizeof(*params));
- }
}
return 0;
@@ -2643,29 +3364,103 @@ static int spi_nor_select_pp(struct spi_nor *nor,
return 0;
}
-static int spi_nor_select_erase(struct spi_nor *nor,
- const struct flash_info *info)
+/**
+ * spi_nor_select_uniform_erase() - select optimum uniform erase type
+ * @map: the erase map of the SPI NOR
+ * @wanted_size: the erase type size to search for. Contains the value of
+ * info->sector_size or of the "small sector" size in case
+ * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
+ *
+ * Once the optimum uniform sector erase command is found, disable all the
+ * other.
+ *
+ * Return: pointer to erase type on success, NULL otherwise.
+ */
+static const struct spi_nor_erase_type *
+spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
+ const u32 wanted_size)
{
- struct mtd_info *mtd = &nor->mtd;
+ const struct spi_nor_erase_type *tested_erase, *erase = NULL;
+ int i;
+ u8 uniform_erase_type = map->uniform_erase_type;
- /* Do nothing if already configured from SFDP. */
- if (mtd->erasesize)
- return 0;
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (!(uniform_erase_type & BIT(i)))
+ continue;
+
+ tested_erase = &map->erase_type[i];
+
+ /*
+ * If the current erase size is the one, stop here:
+ * we have found the right uniform Sector Erase command.
+ */
+ if (tested_erase->size == wanted_size) {
+ erase = tested_erase;
+ break;
+ }
+
+ /*
+ * Otherwise, the current erase size is still a valid canditate.
+ * Select the biggest valid candidate.
+ */
+ if (!erase && tested_erase->size)
+ erase = tested_erase;
+ /* keep iterating to find the wanted_size */
+ }
+
+ if (!erase)
+ return NULL;
+ /* Disable all other Sector Erase commands. */
+ map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
+ map->uniform_erase_type |= BIT(erase - map->erase_type);
+ return erase;
+}
+
+static int spi_nor_select_erase(struct spi_nor *nor, u32 wanted_size)
+{
+ struct spi_nor_erase_map *map = &nor->erase_map;
+ const struct spi_nor_erase_type *erase = NULL;
+ struct mtd_info *mtd = &nor->mtd;
+ int i;
+
+ /*
+ * The previous implementation handling Sector Erase commands assumed
+ * that the SPI flash memory has an uniform layout then used only one
+ * of the supported erase sizes for all Sector Erase commands.
+ * So to be backward compatible, the new implementation also tries to
+ * manage the SPI flash memory as uniform with a single erase sector
+ * size, when possible.
+ */
#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
/* prefer "small sector" erase if possible */
- if (info->flags & SECT_4K) {
- nor->erase_opcode = SPINOR_OP_BE_4K;
- mtd->erasesize = 4096;
- } else if (info->flags & SECT_4K_PMC) {
- nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
- mtd->erasesize = 4096;
- } else
+ wanted_size = 4096u;
#endif
- {
- nor->erase_opcode = SPINOR_OP_SE;
- mtd->erasesize = info->sector_size;
+
+ if (spi_nor_has_uniform_erase(nor)) {
+ erase = spi_nor_select_uniform_erase(map, wanted_size);
+ if (!erase)
+ return -EINVAL;
+ nor->erase_opcode = erase->opcode;
+ mtd->erasesize = erase->size;
+ return 0;
}
+
+ /*
+ * For non-uniform SPI flash memory, set mtd->erasesize to the
+ * maximum erase sector size. No need to set nor->erase_opcode.
+ */
+ for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
+ if (map->erase_type[i].size) {
+ erase = &map->erase_type[i];
+ break;
+ }
+ }
+
+ if (!erase)
+ return -EINVAL;
+
+ mtd->erasesize = erase->size;
return 0;
}
@@ -2712,7 +3507,7 @@ static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
}
/* Select the Sector Erase command. */
- err = spi_nor_select_erase(nor, info);
+ err = spi_nor_select_erase(nor, info->sector_size);
if (err) {
dev_err(nor->dev,
"can't select erase settings supported by both the SPI controller and memory.\n");
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index 88b6c81cebbe..c71523e94580 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -121,8 +121,10 @@ static int no_bit_error_verify(void *error_data, void *error_ecc,
unsigned char calc_ecc[3];
int ret;
- __nand_calculate_ecc(error_data, size, calc_ecc);
- ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
+ __nand_calculate_ecc(error_data, size, calc_ecc,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
if (ret == 0 && !memcmp(correct_data, error_data, size))
return 0;
@@ -149,8 +151,10 @@ static int single_bit_error_correct(void *error_data, void *error_ecc,
unsigned char calc_ecc[3];
int ret;
- __nand_calculate_ecc(error_data, size, calc_ecc);
- ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
+ __nand_calculate_ecc(error_data, size, calc_ecc,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
if (ret == 1 && !memcmp(correct_data, error_data, size))
return 0;
@@ -184,8 +188,10 @@ static int double_bit_error_detect(void *error_data, void *error_ecc,
unsigned char calc_ecc[3];
int ret;
- __nand_calculate_ecc(error_data, size, calc_ecc);
- ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
+ __nand_calculate_ecc(error_data, size, calc_ecc,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
+ ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
return (ret == -EBADMSG) ? 0 : -EINVAL;
}
@@ -259,7 +265,8 @@ static int nand_ecc_test_run(const size_t size)
}
prandom_bytes(correct_data, size);
- __nand_calculate_ecc(correct_data, size, correct_ecc);
+ __nand_calculate_ecc(correct_data, size, correct_ecc,
+ IS_ENABLED(CONFIG_MTD_NAND_ECC_SMC));
for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
nand_ecc_test[i].prepare(error_data, error_ecc,
diff --git a/drivers/mux/adgs1408.c b/drivers/mux/adgs1408.c
index 0f7cf54e3234..89096f10f4c4 100644
--- a/drivers/mux/adgs1408.c
+++ b/drivers/mux/adgs1408.c
@@ -128,4 +128,4 @@ module_spi_driver(adgs1408_driver);
MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>");
MODULE_DESCRIPTION("Analog Devices ADGS1408 MUX driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mux/gpio.c b/drivers/mux/gpio.c
index 6fdd9316db8b..02c1f2c014e8 100644
--- a/drivers/mux/gpio.c
+++ b/drivers/mux/gpio.c
@@ -17,20 +17,18 @@
struct mux_gpio {
struct gpio_descs *gpios;
- int *val;
};
static int mux_gpio_set(struct mux_control *mux, int state)
{
struct mux_gpio *mux_gpio = mux_chip_priv(mux->chip);
- int i;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(state));
- for (i = 0; i < mux_gpio->gpios->ndescs; i++)
- mux_gpio->val[i] = (state >> i) & 1;
+ values[0] = state;
gpiod_set_array_value_cansleep(mux_gpio->gpios->ndescs,
mux_gpio->gpios->desc,
- mux_gpio->val);
+ mux_gpio->gpios->info, values);
return 0;
}
@@ -58,13 +56,11 @@ static int mux_gpio_probe(struct platform_device *pdev)
if (pins < 0)
return pins;
- mux_chip = devm_mux_chip_alloc(dev, 1, sizeof(*mux_gpio) +
- pins * sizeof(*mux_gpio->val));
+ mux_chip = devm_mux_chip_alloc(dev, 1, sizeof(*mux_gpio));
if (IS_ERR(mux_chip))
return PTR_ERR(mux_chip);
mux_gpio = mux_chip_priv(mux_chip);
- mux_gpio->val = (int *)(mux_gpio + 1);
mux_chip->ops = &mux_gpio_ops;
mux_gpio->gpios = devm_gpiod_get_array(dev, "mux", GPIOD_OUT_LOW);
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 9375cef22420..3d27616d9c85 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCFINDIPDDPRT:
spin_lock_bh(&ipddp_route_lock);
rp = __ipddp_find_route(&rcp);
- if (rp)
- memcpy(&rcp2, rp, sizeof(rcp2));
+ if (rp) {
+ memset(&rcp2, 0, sizeof(rcp2));
+ rcp2.ip = rp->ip;
+ rcp2.at = rp->at;
+ rcp2.flags = rp->flags;
+ }
spin_unlock_bh(&ipddp_route_lock);
if (rp) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a764a83f99da..ffa37adb7681 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
static void bond_slave_arr_handler(struct work_struct *work);
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
/*---------------------------- General routines -----------------------------*/
@@ -962,7 +963,8 @@ static inline void slave_disable_netpoll(struct slave *slave)
return;
slave->np = NULL;
- __netpoll_free_async(np);
+
+ __netpoll_free(np);
}
static void bond_poll_controller(struct net_device *bond_dev)
@@ -971,16 +973,13 @@ static void bond_poll_controller(struct net_device *bond_dev)
struct slave *slave = NULL;
struct list_head *iter;
struct ad_info ad_info;
- struct netpoll_info *ni;
- const struct net_device_ops *ops;
if (BOND_MODE(bond) == BOND_MODE_8023AD)
if (bond_3ad_get_active_agg_info(bond, &ad_info))
return;
bond_for_each_slave_rcu(bond, slave, iter) {
- ops = slave->dev->netdev_ops;
- if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
+ if (!bond_slave_is_up(slave))
continue;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -992,11 +991,7 @@ static void bond_poll_controller(struct net_device *bond_dev)
continue;
}
- ni = rcu_dereference_bh(slave->dev->npinfo);
- if (down_trylock(&ni->dev_lock))
- continue;
- ops->ndo_poll_controller(slave->dev);
- up(&ni->dev_lock);
+ netpoll_poll_dev(slave->dev);
}
}
@@ -1177,9 +1172,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
}
}
- /* don't change skb->dev for link-local packets */
- if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+ /* Link-local multicast packets should be passed to the
+ * stack on the link they arrive as well as pass them to the
+ * bond-master device. These packets are mostly usable when
+ * stack receives it with the link on which they arrive
+ * (e.g. LLDP) they also must be available on master. Some of
+ * the use cases include (but are not limited to): LLDP agents
+ * that must be able to operate both on enslaved interfaces as
+ * well as on bonds themselves; linux bridges that must be able
+ * to process/pass BPDUs from attached bonds when any kind of
+ * STP version is enabled on the network.
+ */
+ if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+ if (nskb) {
+ nskb->dev = bond->dev;
+ nskb->queue_mapping = 0;
+ netif_rx(nskb);
+ }
return RX_HANDLER_PASS;
+ }
if (bond_should_deliver_exact_match(skb, slave, bond))
return RX_HANDLER_EXACT;
@@ -1276,6 +1289,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
return NULL;
}
}
+ INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
return slave;
}
@@ -1283,6 +1298,7 @@ static void bond_free_slave(struct slave *slave)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
+ cancel_delayed_work_sync(&slave->notify_work);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
kfree(SLAVE_AD_INFO(slave));
@@ -1304,39 +1320,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
info->link_failure_count = slave->link_failure_count;
}
-static void bond_netdev_notify(struct net_device *dev,
- struct netdev_bonding_info *info)
-{
- rtnl_lock();
- netdev_bonding_info_change(dev, info);
- rtnl_unlock();
-}
-
static void bond_netdev_notify_work(struct work_struct *_work)
{
- struct netdev_notify_work *w =
- container_of(_work, struct netdev_notify_work, work.work);
+ struct slave *slave = container_of(_work, struct slave,
+ notify_work.work);
+
+ if (rtnl_trylock()) {
+ struct netdev_bonding_info binfo;
- bond_netdev_notify(w->dev, &w->bonding_info);
- dev_put(w->dev);
- kfree(w);
+ bond_fill_ifslave(slave, &binfo.slave);
+ bond_fill_ifbond(slave->bond, &binfo.master);
+ netdev_bonding_info_change(slave->dev, &binfo);
+ rtnl_unlock();
+ } else {
+ queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+ }
}
void bond_queue_slave_event(struct slave *slave)
{
- struct bonding *bond = slave->bond;
- struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
- if (!nnw)
- return;
-
- dev_hold(slave->dev);
- nnw->dev = slave->dev;
- bond_fill_ifslave(slave, &nnw->bonding_info.slave);
- bond_fill_ifbond(bond, &nnw->bonding_info.master);
- INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
- queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+ queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
}
void bond_lower_state_changed(struct slave *slave)
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index d94dae216820..c7d05027a7a0 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -79,7 +79,7 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
int (*compare)(struct sk_buff *a, struct sk_buff *b))
{
- struct sk_buff *pos, *insert = (struct sk_buff *)head;
+ struct sk_buff *pos, *insert = NULL;
skb_queue_reverse_walk(head, pos) {
const struct can_rx_offload_cb *cb_pos, *cb_new;
@@ -99,8 +99,10 @@ static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buf
insert = pos;
break;
}
-
- __skb_queue_after(head, insert, new);
+ if (!insert)
+ __skb_queue_head(head, new);
+ else
+ __skb_queue_after(head, insert, new);
}
static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index d3ce1e4cb4d3..71bb3aebded4 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -23,6 +23,14 @@ config NET_DSA_LOOP
This enables support for a fake mock-up switch chip which
exercises the DSA APIs.
+config NET_DSA_LANTIQ_GSWIP
+ tristate "Lantiq / Intel GSWIP"
+ depends on HAS_IOMEM && NET_DSA
+ select NET_DSA_TAG_GSWIP
+ ---help---
+ This enables support for the Lantiq / Intel GSWIP 2.1 found in
+ the xrx200 / VR9 SoC.
+
config NET_DSA_MT7530
tristate "Mediatek MT7530 Ethernet switch support"
depends on NET_DSA
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 46c1cba91ffe..82e5d794c41f 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
ifdef CONFIG_NET_DSA_LOOP
obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
endif
+obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
index 2f988216dab9..d32469283f97 100644
--- a/drivers/net/dsa/b53/Kconfig
+++ b/drivers/net/dsa/b53/Kconfig
@@ -23,6 +23,7 @@ config B53_MDIO_DRIVER
config B53_MMAP_DRIVER
tristate "B53 MMAP connected switch driver"
depends on B53 && HAS_IOMEM
+ default BCM63XX || BMIPS_GENERIC
help
Select to enable support for memory-mapped switches like the BCM63XX
integrated switches.
@@ -30,6 +31,15 @@ config B53_MMAP_DRIVER
config B53_SRAB_DRIVER
tristate "B53 SRAB connected switch driver"
depends on B53 && HAS_IOMEM
+ depends on B53_SERDES || !B53_SERDES
+ default ARCH_BCM_IPROC
help
Select to enable support for memory-mapped Switch Register Access
Bridge Registers (SRAB) like it is found on the BCM53010
+
+config B53_SERDES
+ tristate "B53 SerDes support"
+ depends on B53
+ default ARCH_BCM_NSP
+ help
+ Select to enable support for SerDes on e.g: Northstar Plus SoCs.
diff --git a/drivers/net/dsa/b53/Makefile b/drivers/net/dsa/b53/Makefile
index 4256fb42a4dd..b1be13023ae4 100644
--- a/drivers/net/dsa/b53/Makefile
+++ b/drivers/net/dsa/b53/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o
obj-$(CONFIG_B53_MDIO_DRIVER) += b53_mdio.o
obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o
obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o
+obj-$(CONFIG_B53_SERDES) += b53_serdes.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index d93c790bfbe8..0e4bbdcc614f 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/platform_data/b53.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <net/dsa.h>
@@ -502,8 +503,14 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
unsigned int cpu_port = ds->ports[port].cpu_dp->index;
+ int ret = 0;
u16 pvlan;
+ if (dev->ops->irq_enable)
+ ret = dev->ops->irq_enable(dev, port);
+ if (ret)
+ return ret;
+
/* Clear the Rx and Tx disable bits and set to no spanning tree */
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
@@ -536,6 +543,9 @@ void b53_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+
+ if (dev->ops->irq_disable)
+ dev->ops->irq_disable(dev, port);
}
EXPORT_SYMBOL(b53_disable_port);
@@ -755,6 +765,8 @@ static int b53_reset_switch(struct b53_device *priv)
memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
+ priv->serdes_lane = B53_INVALID_LANE;
+
return b53_switch_reset(priv);
}
@@ -938,33 +950,50 @@ static int b53_setup(struct dsa_switch *ds)
return ret;
}
-static void b53_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void b53_force_link(struct b53_device *dev, int port, int link)
{
- struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
- u8 rgmii_ctrl = 0, reg = 0, off;
-
- if (!phy_is_pseudo_fixed_link(phydev))
- return;
+ u8 reg, val, off;
/* Override the port settings */
if (port == dev->cpu_port) {
off = B53_PORT_OVERRIDE_CTRL;
- reg = PORT_OVERRIDE_EN;
+ val = PORT_OVERRIDE_EN;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
- reg = GMII_PO_EN;
+ val = GMII_PO_EN;
}
- /* Set the link UP */
- if (phydev->link)
+ b53_read8(dev, B53_CTRL_PAGE, off, &reg);
+ reg |= val;
+ if (link)
reg |= PORT_OVERRIDE_LINK;
+ else
+ reg &= ~PORT_OVERRIDE_LINK;
+ b53_write8(dev, B53_CTRL_PAGE, off, reg);
+}
- if (phydev->duplex == DUPLEX_FULL)
+static void b53_force_port_config(struct b53_device *dev, int port,
+ int speed, int duplex, int pause)
+{
+ u8 reg, val, off;
+
+ /* Override the port settings */
+ if (port == dev->cpu_port) {
+ off = B53_PORT_OVERRIDE_CTRL;
+ val = PORT_OVERRIDE_EN;
+ } else {
+ off = B53_GMII_PORT_OVERRIDE_CTRL(port);
+ val = GMII_PO_EN;
+ }
+
+ b53_read8(dev, B53_CTRL_PAGE, off, &reg);
+ reg |= val;
+ if (duplex == DUPLEX_FULL)
reg |= PORT_OVERRIDE_FULL_DUPLEX;
+ else
+ reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
- switch (phydev->speed) {
+ switch (speed) {
case 2000:
reg |= PORT_OVERRIDE_SPEED_2000M;
/* fallthrough */
@@ -978,21 +1007,41 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
reg |= PORT_OVERRIDE_SPEED_10M;
break;
default:
- dev_err(ds->dev, "unknown speed: %d\n", phydev->speed);
+ dev_err(dev->dev, "unknown speed: %d\n", speed);
return;
}
+ if (pause & MLO_PAUSE_RX)
+ reg |= PORT_OVERRIDE_RX_FLOW;
+ if (pause & MLO_PAUSE_TX)
+ reg |= PORT_OVERRIDE_TX_FLOW;
+
+ b53_write8(dev, B53_CTRL_PAGE, off, reg);
+}
+
+static void b53_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct b53_device *dev = ds->priv;
+ struct ethtool_eee *p = &dev->ports[port].eee;
+ u8 rgmii_ctrl = 0, reg = 0, off;
+ int pause = 0;
+
+ if (!phy_is_pseudo_fixed_link(phydev))
+ return;
+
/* Enable flow control on BCM5301x's CPU port */
if (is5301x(dev) && port == dev->cpu_port)
- reg |= PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW;
+ pause = MLO_PAUSE_TXRX_MASK;
if (phydev->pause) {
if (phydev->asym_pause)
- reg |= PORT_OVERRIDE_TX_FLOW;
- reg |= PORT_OVERRIDE_RX_FLOW;
+ pause |= MLO_PAUSE_TX;
+ pause |= MLO_PAUSE_RX;
}
- b53_write8(dev, B53_CTRL_PAGE, off, reg);
+ b53_force_port_config(dev, port, phydev->speed, phydev->duplex, pause);
+ b53_force_link(dev, port, phydev->link);
if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
if (port == 8)
@@ -1052,16 +1101,9 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
}
} else if (is5301x(dev)) {
if (port != dev->cpu_port) {
- u8 po_reg = B53_GMII_PORT_OVERRIDE_CTRL(dev->cpu_port);
- u8 gmii_po;
-
- b53_read8(dev, B53_CTRL_PAGE, po_reg, &gmii_po);
- gmii_po |= GMII_PO_LINK |
- GMII_PO_RX_FLOW |
- GMII_PO_TX_FLOW |
- GMII_PO_EN |
- GMII_PO_SPEED_2000M;
- b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po);
+ b53_force_port_config(dev, dev->cpu_port, 2000,
+ DUPLEX_FULL, MLO_PAUSE_TXRX_MASK);
+ b53_force_link(dev, dev->cpu_port, 1);
}
}
@@ -1069,6 +1111,148 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
p->eee_enabled = b53_eee_init(ds, port, phydev);
}
+void b53_port_event(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ bool link;
+ u16 sts;
+
+ b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts);
+ link = !!(sts & BIT(port));
+ dsa_port_phylink_mac_change(ds, port, link);
+}
+EXPORT_SYMBOL(b53_port_event);
+
+void b53_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct b53_device *dev = ds->priv;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (dev->ops->serdes_phylink_validate)
+ dev->ops->serdes_phylink_validate(dev, port, mask, state);
+
+ /* Allow all the expected bits */
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
+ * support Gigabit, including Half duplex.
+ */
+ if (state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ !phy_interface_mode_is_8023z(state->interface) &&
+ !(is5325(dev) || is5365(dev))) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ if (!phy_interface_mode_is_8023z(state->interface)) {
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ }
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ phylink_helper_basex_speed(state);
+}
+EXPORT_SYMBOL(b53_phylink_validate);
+
+int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct b53_device *dev = ds->priv;
+ int ret = -EOPNOTSUPP;
+
+ if ((phy_interface_mode_is_8023z(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII) &&
+ dev->ops->serdes_link_state)
+ ret = dev->ops->serdes_link_state(dev, port, state);
+
+ return ret;
+}
+EXPORT_SYMBOL(b53_phylink_mac_link_state);
+
+void b53_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct b53_device *dev = ds->priv;
+
+ if (mode == MLO_AN_PHY)
+ return;
+
+ if (mode == MLO_AN_FIXED) {
+ b53_force_port_config(dev, port, state->speed,
+ state->duplex, state->pause);
+ return;
+ }
+
+ if ((phy_interface_mode_is_8023z(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII) &&
+ dev->ops->serdes_config)
+ dev->ops->serdes_config(dev, port, mode, state);
+}
+EXPORT_SYMBOL(b53_phylink_mac_config);
+
+void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ if (dev->ops->serdes_an_restart)
+ dev->ops->serdes_an_restart(dev, port);
+}
+EXPORT_SYMBOL(b53_phylink_mac_an_restart);
+
+void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct b53_device *dev = ds->priv;
+
+ if (mode == MLO_AN_PHY)
+ return;
+
+ if (mode == MLO_AN_FIXED) {
+ b53_force_link(dev, port, false);
+ return;
+ }
+
+ if (phy_interface_mode_is_8023z(interface) &&
+ dev->ops->serdes_link_set)
+ dev->ops->serdes_link_set(dev, port, mode, interface, false);
+}
+EXPORT_SYMBOL(b53_phylink_mac_link_down);
+
+void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ struct b53_device *dev = ds->priv;
+
+ if (mode == MLO_AN_PHY)
+ return;
+
+ if (mode == MLO_AN_FIXED) {
+ b53_force_link(dev, port, true);
+ return;
+ }
+
+ if (phy_interface_mode_is_8023z(interface) &&
+ dev->ops->serdes_link_set)
+ dev->ops->serdes_link_set(dev, port, mode, interface, true);
+}
+EXPORT_SYMBOL(b53_phylink_mac_link_up);
+
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
return 0;
@@ -1107,7 +1291,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
b53_get_vlan_entry(dev, vid, vl);
vl->members |= BIT(port);
- if (untagged)
+ if (untagged && !dsa_is_cpu_port(ds, port))
vl->untag |= BIT(port);
else
vl->untag &= ~BIT(port);
@@ -1149,7 +1333,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
pvid = 0;
}
- if (untagged)
+ if (untagged && !dsa_is_cpu_port(ds, port))
vl->untag &= ~(BIT(port));
b53_set_vlan_entry(dev, vid, vl);
@@ -1710,6 +1894,12 @@ static const struct dsa_switch_ops b53_switch_ops = {
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
.adjust_link = b53_adjust_link,
+ .phylink_validate = b53_phylink_validate,
+ .phylink_mac_link_state = b53_phylink_mac_link_state,
+ .phylink_mac_config = b53_phylink_mac_config,
+ .phylink_mac_an_restart = b53_phylink_mac_an_restart,
+ .phylink_mac_link_down = b53_phylink_mac_link_down,
+ .phylink_mac_link_up = b53_phylink_mac_link_up,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
.get_mac_eee = b53_get_mac_eee,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index df149756c282..ec796482792d 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -29,6 +29,7 @@
struct b53_device;
struct net_device;
+struct phylink_link_state;
struct b53_io_ops {
int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value);
@@ -43,8 +44,25 @@ struct b53_io_ops {
int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value);
int (*phy_read16)(struct b53_device *dev, int addr, int reg, u16 *value);
int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
+ int (*irq_enable)(struct b53_device *dev, int port);
+ void (*irq_disable)(struct b53_device *dev, int port);
+ u8 (*serdes_map_lane)(struct b53_device *dev, int port);
+ int (*serdes_link_state)(struct b53_device *dev, int port,
+ struct phylink_link_state *state);
+ void (*serdes_config)(struct b53_device *dev, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+ void (*serdes_an_restart)(struct b53_device *dev, int port);
+ void (*serdes_link_set)(struct b53_device *dev, int port,
+ unsigned int mode, phy_interface_t interface,
+ bool link_up);
+ void (*serdes_phylink_validate)(struct b53_device *dev, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state);
};
+#define B53_INVALID_LANE 0xff
+
enum {
BCM5325_DEVICE_ID = 0x25,
BCM5365_DEVICE_ID = 0x65,
@@ -107,6 +125,7 @@ struct b53_device {
/* connect specific data */
u8 current_page;
struct device *dev;
+ u8 serdes_lane;
/* Master MDIO bus we got probed from */
struct mii_bus *bus;
@@ -298,6 +317,23 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
+void b53_port_event(struct dsa_switch *ds, int port);
+void b53_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state);
+int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state);
+void b53_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port);
+void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
+void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
int b53_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
new file mode 100644
index 000000000000..629bf14128a2
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
+/*
+ * Northstar Plus switch SerDes/SGMII PHY main logic
+ *
+ * Copyright (C) 2018 Florian Fainelli <f.fainelli@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <net/dsa.h>
+
+#include "b53_priv.h"
+#include "b53_serdes.h"
+#include "b53_regs.h"
+
+static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block,
+ u16 value)
+{
+ b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block);
+ b53_write16(dev, B53_SERDES_PAGE, offset, value);
+}
+
+static u16 b53_serdes_read_blk(struct b53_device *dev, u8 offset, u16 block)
+{
+ u16 value;
+
+ b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block);
+ b53_read16(dev, B53_SERDES_PAGE, offset, &value);
+
+ return value;
+}
+
+static void b53_serdes_set_lane(struct b53_device *dev, u8 lane)
+{
+ if (dev->serdes_lane == lane)
+ return;
+
+ WARN_ON(lane > 1);
+
+ b53_serdes_write_blk(dev, B53_SERDES_LANE,
+ SERDES_XGXSBLK0_BLOCKADDRESS, lane);
+ dev->serdes_lane = lane;
+}
+
+static void b53_serdes_write(struct b53_device *dev, u8 lane,
+ u8 offset, u16 block, u16 value)
+{
+ b53_serdes_set_lane(dev, lane);
+ b53_serdes_write_blk(dev, offset, block, value);
+}
+
+static u16 b53_serdes_read(struct b53_device *dev, u8 lane,
+ u8 offset, u16 block)
+{
+ b53_serdes_set_lane(dev, lane);
+ return b53_serdes_read_blk(dev, offset, block);
+}
+
+void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+ u16 reg;
+
+ if (lane == B53_INVALID_LANE)
+ return;
+
+ reg = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
+ SERDES_DIGITAL_BLK);
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ reg |= FIBER_MODE_1000X;
+ else
+ reg &= ~FIBER_MODE_1000X;
+ b53_serdes_write(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
+ SERDES_DIGITAL_BLK, reg);
+}
+EXPORT_SYMBOL(b53_serdes_config);
+
+void b53_serdes_an_restart(struct b53_device *dev, int port)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+ u16 reg;
+
+ if (lane == B53_INVALID_LANE)
+ return;
+
+ reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
+ SERDES_MII_BLK);
+ reg |= BMCR_ANRESTART;
+ b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
+ SERDES_MII_BLK, reg);
+}
+EXPORT_SYMBOL(b53_serdes_an_restart);
+
+int b53_serdes_link_state(struct b53_device *dev, int port,
+ struct phylink_link_state *state)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+ u16 dig, bmsr;
+
+ if (lane == B53_INVALID_LANE)
+ return 1;
+
+ dig = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_STATUS,
+ SERDES_DIGITAL_BLK);
+ bmsr = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMSR),
+ SERDES_MII_BLK);
+
+ switch ((dig >> SPEED_STATUS_SHIFT) & SPEED_STATUS_MASK) {
+ case SPEED_STATUS_10:
+ state->speed = SPEED_10;
+ break;
+ case SPEED_STATUS_100:
+ state->speed = SPEED_100;
+ break;
+ case SPEED_STATUS_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ case SPEED_STATUS_2500:
+ state->speed = SPEED_2500;
+ break;
+ }
+
+ state->duplex = dig & DUPLEX_STATUS ? DUPLEX_FULL : DUPLEX_HALF;
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
+ state->link = !!(dig & LINK_STATUS);
+ if (dig & PAUSE_RESOLUTION_RX_SIDE)
+ state->pause |= MLO_PAUSE_RX;
+ if (dig & PAUSE_RESOLUTION_TX_SIDE)
+ state->pause |= MLO_PAUSE_TX;
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_serdes_link_state);
+
+void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
+ phy_interface_t interface, bool link_up)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+ u16 reg;
+
+ if (lane == B53_INVALID_LANE)
+ return;
+
+ reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
+ SERDES_MII_BLK);
+ if (link_up)
+ reg &= ~BMCR_PDOWN;
+ else
+ reg |= BMCR_PDOWN;
+ b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
+ SERDES_MII_BLK, reg);
+}
+EXPORT_SYMBOL(b53_serdes_link_set);
+
+void b53_serdes_phylink_validate(struct b53_device *dev, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+
+ if (lane == B53_INVALID_LANE)
+ return;
+
+ switch (lane) {
+ case 0:
+ phylink_set(supported, 2500baseX_Full);
+ /* fallthrough */
+ case 1:
+ phylink_set(supported, 1000baseX_Full);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(b53_serdes_phylink_validate);
+
+int b53_serdes_init(struct b53_device *dev, int port)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+ u16 id0, msb, lsb;
+
+ if (lane == B53_INVALID_LANE)
+ return -EINVAL;
+
+ id0 = b53_serdes_read(dev, lane, B53_SERDES_ID0, SERDES_ID0);
+ msb = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_PHYSID1),
+ SERDES_MII_BLK);
+ lsb = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_PHYSID2),
+ SERDES_MII_BLK);
+ if (id0 == 0 || id0 == 0xffff) {
+ dev_err(dev->dev, "SerDes not initialized, check settings\n");
+ return -ENODEV;
+ }
+
+ dev_info(dev->dev,
+ "SerDes lane %d, model: %d, rev %c%d (OUI: 0x%08x)\n",
+ lane, id0 & SERDES_ID0_MODEL_MASK,
+ (id0 >> SERDES_ID0_REV_LETTER_SHIFT) + 0x41,
+ (id0 >> SERDES_ID0_REV_NUM_SHIFT) & SERDES_ID0_REV_NUM_MASK,
+ (u32)msb << 16 | lsb);
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_serdes_init);
+
+MODULE_AUTHOR("Florian Fainelli <f.fainelli@gmail.com>");
+MODULE_DESCRIPTION("B53 Switch SerDes driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h
new file mode 100644
index 000000000000..3bb4f91aec9e
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_serdes.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
+ *
+ * Northstar Plus switch SerDes/SGMII PHY definitions
+ *
+ * Copyright (C) 2018 Florian Fainelli <f.fainelli@gmail.com>
+ */
+
+#include <linux/phy.h>
+#include <linux/types.h>
+
+/* Non-standard page used to access SerDes PHY registers on NorthStar Plus */
+#define B53_SERDES_PAGE 0x16
+#define B53_SERDES_BLKADDR 0x3e
+#define B53_SERDES_LANE 0x3c
+
+#define B53_SERDES_ID0 0x20
+#define SERDES_ID0_MODEL_MASK 0x3f
+#define SERDES_ID0_REV_NUM_SHIFT 11
+#define SERDES_ID0_REV_NUM_MASK 0x7
+#define SERDES_ID0_REV_LETTER_SHIFT 14
+
+#define B53_SERDES_MII_REG(x) (0x20 + (x) * 2)
+#define B53_SERDES_DIGITAL_CONTROL(x) (0x1e + (x) * 2)
+#define B53_SERDES_DIGITAL_STATUS 0x28
+
+/* SERDES_DIGITAL_CONTROL1 */
+#define FIBER_MODE_1000X BIT(0)
+#define TBI_INTERFACE BIT(1)
+#define SIGNAL_DETECT_EN BIT(2)
+#define INVERT_SIGNAL_DETECT BIT(3)
+#define AUTODET_EN BIT(4)
+#define SGMII_MASTER_MODE BIT(5)
+#define DISABLE_DLL_PWRDOWN BIT(6)
+#define CRC_CHECKER_DIS BIT(7)
+#define COMMA_DET_EN BIT(8)
+#define ZERO_COMMA_DET_EN BIT(9)
+#define REMOTE_LOOPBACK BIT(10)
+#define SEL_RX_PKTS_FOR_CNTR BIT(11)
+#define MASTER_MDIO_PHY_SEL BIT(13)
+#define DISABLE_SIGNAL_DETECT_FLT BIT(14)
+
+/* SERDES_DIGITAL_CONTROL2 */
+#define EN_PARALLEL_DET BIT(0)
+#define DIS_FALSE_LINK BIT(1)
+#define FLT_FORCE_LINK BIT(2)
+#define EN_AUTONEG_ERR_TIMER BIT(3)
+#define DIS_REMOTE_FAULT_SENSING BIT(4)
+#define FORCE_XMIT_DATA BIT(5)
+#define AUTONEG_FAST_TIMERS BIT(6)
+#define DIS_CARRIER_EXTEND BIT(7)
+#define DIS_TRRR_GENERATION BIT(8)
+#define BYPASS_PCS_RX BIT(9)
+#define BYPASS_PCS_TX BIT(10)
+#define TEST_CNTR_EN BIT(11)
+#define TX_PACKET_SEQ_TEST BIT(12)
+#define TX_IDLE_JAM_SEQ_TEST BIT(13)
+#define CLR_BER_CNTR BIT(14)
+
+/* SERDES_DIGITAL_CONTROL3 */
+#define TX_FIFO_RST BIT(0)
+#define FIFO_ELAST_TX_RX_SHIFT 1
+#define FIFO_ELAST_TX_RX_5K 0
+#define FIFO_ELAST_TX_RX_10K 1
+#define FIFO_ELAST_TX_RX_13_5K 2
+#define FIFO_ELAST_TX_RX_18_5K 3
+#define BLOCK_TXEN_MODE BIT(9)
+#define JAM_FALSE_CARRIER_MODE BIT(10)
+#define EXT_PHY_CRS_MODE BIT(11)
+#define INVERT_EXT_PHY_CRS BIT(12)
+#define DISABLE_TX_CRS BIT(13)
+
+/* SERDES_DIGITAL_STATUS */
+#define SGMII_MODE BIT(0)
+#define LINK_STATUS BIT(1)
+#define DUPLEX_STATUS BIT(2)
+#define SPEED_STATUS_SHIFT 3
+#define SPEED_STATUS_10 0
+#define SPEED_STATUS_100 1
+#define SPEED_STATUS_1000 2
+#define SPEED_STATUS_2500 3
+#define SPEED_STATUS_MASK SPEED_STATUS_2500
+#define PAUSE_RESOLUTION_TX_SIDE BIT(5)
+#define PAUSE_RESOLUTION_RX_SIDE BIT(6)
+#define LINK_STATUS_CHANGE BIT(7)
+#define EARLY_END_EXT_DET BIT(8)
+#define CARRIER_EXT_ERR_DET BIT(9)
+#define RX_ERR_DET BIT(10)
+#define TX_ERR_DET BIT(11)
+#define CRC_ERR_DET BIT(12)
+#define FALSE_CARRIER_ERR_DET BIT(13)
+#define RXFIFO_ERR_DET BIT(14)
+#define TXFIFO_ERR_DET BIT(15)
+
+/* Block offsets */
+#define SERDES_DIGITAL_BLK 0x8300
+#define SERDES_ID0 0x8310
+#define SERDES_MII_BLK 0xffe0
+#define SERDES_XGXSBLK0_BLOCKADDRESS 0xffd0
+
+struct phylink_link_state;
+
+static inline u8 b53_serdes_map_lane(struct b53_device *dev, int port)
+{
+ if (!dev->ops->serdes_map_lane)
+ return B53_INVALID_LANE;
+
+ return dev->ops->serdes_map_lane(dev, port);
+}
+
+int b53_serdes_get_link(struct b53_device *dev, int port);
+int b53_serdes_link_state(struct b53_device *dev, int port,
+ struct phylink_link_state *state);
+void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
+ const struct phylink_link_state *state);
+void b53_serdes_an_restart(struct b53_device *dev, int port);
+void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
+ phy_interface_t interface, bool link_up);
+void b53_serdes_phylink_validate(struct b53_device *dev, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state);
+#if IS_ENABLED(CONFIG_B53_SERDES)
+int b53_serdes_init(struct b53_device *dev, int port);
+#else
+static inline int b53_serdes_init(struct b53_device *dev, int port)
+{
+ return -ENODEV;
+}
+#endif
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 91de2ba99ad1..90f514252987 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -19,11 +19,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/platform_data/b53.h>
#include <linux/of.h>
#include "b53_priv.h"
+#include "b53_serdes.h"
/* command and status register of the SRAB */
#define B53_SRAB_CMDSTAT 0x2c
@@ -47,6 +49,7 @@
/* command and status register of the SRAB */
#define B53_SRAB_CTRLS 0x40
+#define B53_SRAB_CTRLS_HOST_INTR BIT(1)
#define B53_SRAB_CTRLS_RCAREQ BIT(3)
#define B53_SRAB_CTRLS_RCAGNT BIT(4)
#define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6)
@@ -60,8 +63,29 @@
#define B53_SRAB_P7_SLEEP_TIMER BIT(11)
#define B53_SRAB_IMP0_SLEEP_TIMER BIT(12)
+/* Port mux configuration registers */
+#define B53_MUX_CONFIG_P5 0x00
+#define MUX_CONFIG_SGMII 0
+#define MUX_CONFIG_MII_LITE 1
+#define MUX_CONFIG_RGMII 2
+#define MUX_CONFIG_GMII 3
+#define MUX_CONFIG_GPHY 4
+#define MUX_CONFIG_INTERNAL 5
+#define MUX_CONFIG_MASK 0x7
+#define B53_MUX_CONFIG_P4 0x04
+
+struct b53_srab_port_priv {
+ int irq;
+ bool irq_enabled;
+ struct b53_device *dev;
+ unsigned int num;
+ phy_interface_t mode;
+};
+
struct b53_srab_priv {
void __iomem *regs;
+ void __iomem *mux_config;
+ struct b53_srab_port_priv port_intrs[B53_N_PORTS];
};
static int b53_srab_request_grant(struct b53_device *dev)
@@ -344,6 +368,81 @@ err:
return ret;
}
+static irqreturn_t b53_srab_port_thread(int irq, void *dev_id)
+{
+ struct b53_srab_port_priv *port = dev_id;
+ struct b53_device *dev = port->dev;
+
+ if (port->mode == PHY_INTERFACE_MODE_SGMII)
+ b53_port_event(dev->ds, port->num);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t b53_srab_port_isr(int irq, void *dev_id)
+{
+ struct b53_srab_port_priv *port = dev_id;
+ struct b53_device *dev = port->dev;
+ struct b53_srab_priv *priv = dev->priv;
+
+ /* Acknowledge the interrupt */
+ writel(BIT(port->num), priv->regs + B53_SRAB_INTR);
+
+ return IRQ_WAKE_THREAD;
+}
+
+#if IS_ENABLED(CONFIG_B53_SERDES)
+static u8 b53_srab_serdes_map_lane(struct b53_device *dev, int port)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p = &priv->port_intrs[port];
+
+ if (p->mode != PHY_INTERFACE_MODE_SGMII)
+ return B53_INVALID_LANE;
+
+ switch (port) {
+ case 5:
+ return 0;
+ case 4:
+ return 1;
+ default:
+ return B53_INVALID_LANE;
+ }
+}
+#endif
+
+static int b53_srab_irq_enable(struct b53_device *dev, int port)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p = &priv->port_intrs[port];
+ int ret = 0;
+
+ /* Interrupt is optional and was not specified, do not make
+ * this fatal
+ */
+ if (p->irq == -ENXIO)
+ return ret;
+
+ ret = request_threaded_irq(p->irq, b53_srab_port_isr,
+ b53_srab_port_thread, 0,
+ dev_name(dev->dev), p);
+ if (!ret)
+ p->irq_enabled = true;
+
+ return ret;
+}
+
+static void b53_srab_irq_disable(struct b53_device *dev, int port)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p = &priv->port_intrs[port];
+
+ if (p->irq_enabled) {
+ free_irq(p->irq, p);
+ p->irq_enabled = false;
+ }
+}
+
static const struct b53_io_ops b53_srab_ops = {
.read8 = b53_srab_read8,
.read16 = b53_srab_read16,
@@ -355,6 +454,16 @@ static const struct b53_io_ops b53_srab_ops = {
.write32 = b53_srab_write32,
.write48 = b53_srab_write48,
.write64 = b53_srab_write64,
+ .irq_enable = b53_srab_irq_enable,
+ .irq_disable = b53_srab_irq_disable,
+#if IS_ENABLED(CONFIG_B53_SERDES)
+ .serdes_map_lane = b53_srab_serdes_map_lane,
+ .serdes_link_state = b53_serdes_link_state,
+ .serdes_config = b53_serdes_config,
+ .serdes_an_restart = b53_serdes_an_restart,
+ .serdes_link_set = b53_serdes_link_set,
+ .serdes_phylink_validate = b53_serdes_phylink_validate,
+#endif
};
static const struct of_device_id b53_srab_of_match[] = {
@@ -379,6 +488,107 @@ static const struct of_device_id b53_srab_of_match[] = {
};
MODULE_DEVICE_TABLE(of, b53_srab_of_match);
+static void b53_srab_intr_set(struct b53_srab_priv *priv, bool set)
+{
+ u32 reg;
+
+ reg = readl(priv->regs + B53_SRAB_CTRLS);
+ if (set)
+ reg |= B53_SRAB_CTRLS_HOST_INTR;
+ else
+ reg &= ~B53_SRAB_CTRLS_HOST_INTR;
+ writel(reg, priv->regs + B53_SRAB_CTRLS);
+}
+
+static void b53_srab_prepare_irq(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *port;
+ unsigned int i;
+ char *name;
+
+ /* Clear all pending interrupts */
+ writel(0xffffffff, priv->regs + B53_SRAB_INTR);
+
+ if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
+ return;
+
+ for (i = 0; i < B53_N_PORTS; i++) {
+ port = &priv->port_intrs[i];
+
+ /* There is no port 6 */
+ if (i == 6)
+ continue;
+
+ name = kasprintf(GFP_KERNEL, "link_state_p%d", i);
+ if (!name)
+ return;
+
+ port->num = i;
+ port->dev = dev;
+ port->irq = platform_get_irq_byname(pdev, name);
+ kfree(name);
+ }
+
+ b53_srab_intr_set(priv, true);
+}
+
+static void b53_srab_mux_init(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p;
+ struct resource *r;
+ unsigned int port;
+ u32 reg, off = 0;
+ int ret;
+
+ if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
+ return;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->mux_config = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->mux_config))
+ return;
+
+ /* Obtain the port mux configuration so we know which lanes
+ * actually map to SerDes lanes
+ */
+ for (port = 5; port > 3; port--, off += 4) {
+ p = &priv->port_intrs[port];
+
+ reg = readl(priv->mux_config + B53_MUX_CONFIG_P5 + off);
+ switch (reg & MUX_CONFIG_MASK) {
+ case MUX_CONFIG_SGMII:
+ p->mode = PHY_INTERFACE_MODE_SGMII;
+ ret = b53_serdes_init(dev, port);
+ if (ret)
+ continue;
+ break;
+ case MUX_CONFIG_MII_LITE:
+ p->mode = PHY_INTERFACE_MODE_MII;
+ break;
+ case MUX_CONFIG_GMII:
+ p->mode = PHY_INTERFACE_MODE_GMII;
+ break;
+ case MUX_CONFIG_RGMII:
+ p->mode = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case MUX_CONFIG_INTERNAL:
+ p->mode = PHY_INTERFACE_MODE_INTERNAL;
+ break;
+ default:
+ p->mode = PHY_INTERFACE_MODE_NA;
+ break;
+ }
+
+ if (p->mode != PHY_INTERFACE_MODE_NA)
+ dev_info(&pdev->dev, "Port %d mode: %s\n",
+ port, phy_modes(p->mode));
+ }
+}
+
static int b53_srab_probe(struct platform_device *pdev)
{
struct b53_platform_data *pdata = pdev->dev.platform_data;
@@ -417,13 +627,18 @@ static int b53_srab_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
+ b53_srab_prepare_irq(pdev);
+ b53_srab_mux_init(pdev);
+
return b53_switch_register(dev);
}
static int b53_srab_remove(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
+ struct b53_srab_priv *priv = dev->priv;
+ b53_srab_intr_set(priv, false);
if (dev)
b53_switch_remove(dev);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e0066adcd2f3..2eb68769562c 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -465,8 +465,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
mdiobus_unregister(priv->slave_mii_bus);
- if (priv->master_mii_dn)
- of_node_put(priv->master_mii_dn);
+ of_node_put(priv->master_mii_dn);
}
static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
@@ -703,7 +702,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
static int bcm_sf2_sw_resume(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- unsigned int port;
int ret;
ret = bcm_sf2_sw_rst(priv);
@@ -715,14 +713,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, true);
- for (port = 0; port < DSA_MAX_PORTS; port++) {
- if (dsa_is_user_port(ds, port))
- bcm_sf2_port_setup(ds, port, NULL);
- else if (dsa_is_cpu_port(ds, port))
- bcm_sf2_imp_setup(ds, port);
- }
-
- bcm_sf2_enable_acb(ds);
+ ds->ops->setup(ds);
return 0;
}
@@ -1173,10 +1164,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
- /* Disable all ports and interrupts */
priv->wol_ports_mask = 0;
- bcm_sf2_sw_suspend(priv->dev->ds);
dsa_unregister_switch(priv->dev->ds);
+ /* Disable all ports and interrupts */
+ bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
return 0;
@@ -1199,16 +1190,14 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int bcm_sf2_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+ struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
return dsa_switch_suspend(priv->dev->ds);
}
static int bcm_sf2_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+ struct bcm_sf2_priv *priv = dev_get_drvdata(dev);
return dsa_switch_resume(priv->dev->ds);
}
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
new file mode 100644
index 000000000000..693a67f45bef
--- /dev/null
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -0,0 +1,1167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lantiq / Intel GSWIP switch driver for VRX200 SoCs
+ *
+ * Copyright (C) 2010 Lantiq Deutschland
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+#include <dt-bindings/mips/lantiq_rcu_gphy.h>
+
+#include "lantiq_pce.h"
+
+/* GSWIP MDIO Registers */
+#define GSWIP_MDIO_GLOB 0x00
+#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
+#define GSWIP_MDIO_CTRL 0x08
+#define GSWIP_MDIO_CTRL_BUSY BIT(12)
+#define GSWIP_MDIO_CTRL_RD BIT(11)
+#define GSWIP_MDIO_CTRL_WR BIT(10)
+#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
+#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
+#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
+#define GSWIP_MDIO_READ 0x09
+#define GSWIP_MDIO_WRITE 0x0A
+#define GSWIP_MDIO_MDC_CFG0 0x0B
+#define GSWIP_MDIO_MDC_CFG1 0x0C
+#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
+#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
+#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
+#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
+#define GSWIP_MDIO_PHY_LINK_UP 0x2000
+#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
+#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
+#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
+#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
+#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
+#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
+#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
+#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
+#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
+#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
+#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
+#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
+#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
+#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
+#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
+#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
+ GSWIP_MDIO_PHY_FCONRX_MASK | \
+ GSWIP_MDIO_PHY_FCONTX_MASK | \
+ GSWIP_MDIO_PHY_LINK_MASK | \
+ GSWIP_MDIO_PHY_SPEED_MASK | \
+ GSWIP_MDIO_PHY_FDUP_MASK)
+
+/* GSWIP MII Registers */
+#define GSWIP_MII_CFG0 0x00
+#define GSWIP_MII_CFG1 0x02
+#define GSWIP_MII_CFG5 0x04
+#define GSWIP_MII_CFG_EN BIT(14)
+#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
+#define GSWIP_MII_CFG_MODE_MIIP 0x0
+#define GSWIP_MII_CFG_MODE_MIIM 0x1
+#define GSWIP_MII_CFG_MODE_RMIIP 0x2
+#define GSWIP_MII_CFG_MODE_RMIIM 0x3
+#define GSWIP_MII_CFG_MODE_RGMII 0x4
+#define GSWIP_MII_CFG_MODE_MASK 0xf
+#define GSWIP_MII_CFG_RATE_M2P5 0x00
+#define GSWIP_MII_CFG_RATE_M25 0x10
+#define GSWIP_MII_CFG_RATE_M125 0x20
+#define GSWIP_MII_CFG_RATE_M50 0x30
+#define GSWIP_MII_CFG_RATE_AUTO 0x40
+#define GSWIP_MII_CFG_RATE_MASK 0x70
+#define GSWIP_MII_PCDU0 0x01
+#define GSWIP_MII_PCDU1 0x03
+#define GSWIP_MII_PCDU5 0x05
+#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
+#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
+
+/* GSWIP Core Registers */
+#define GSWIP_SWRES 0x000
+#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
+#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
+#define GSWIP_VERSION 0x013
+#define GSWIP_VERSION_REV_SHIFT 0
+#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
+#define GSWIP_VERSION_MOD_SHIFT 8
+#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
+#define GSWIP_VERSION_2_0 0x100
+#define GSWIP_VERSION_2_1 0x021
+#define GSWIP_VERSION_2_2 0x122
+#define GSWIP_VERSION_2_2_ETC 0x022
+
+#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
+#define GSWIP_BM_RAM_ADDR 0x044
+#define GSWIP_BM_RAM_CTRL 0x045
+#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
+#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
+#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_BM_QUEUE_GCTRL 0x04A
+#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
+/* buffer management Port Configuration Register */
+#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
+#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
+#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
+/* buffer management Port Control Register */
+#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
+#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
+#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
+
+/* PCE */
+#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
+#define GSWIP_PCE_TBL_MASK 0x448
+#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
+#define GSWIP_PCE_TBL_ADDR 0x44E
+#define GSWIP_PCE_TBL_CTRL 0x44F
+#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
+#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
+#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
+#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
+#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
+#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
+#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
+#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
+#define GSWIP_PCE_GCTRL_0 0x456
+#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
+#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
+#define GSWIP_PCE_GCTRL_1 0x457
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
+#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
+#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11)
+#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
+#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
+#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
+#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
+#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
+#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
+
+#define GSWIP_MAC_FLEN 0x8C5
+#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
+
+/* Ethernet Switch Fetch DMA Port Control Register */
+#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
+#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
+#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
+#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+
+/* Ethernet Switch Store DMA Port Control Register */
+#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
+#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
+#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
+#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
+
+#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+
+struct gswip_hw_info {
+ int max_ports;
+ int cpu_port;
+};
+
+struct xway_gphy_match_data {
+ char *fe_firmware_name;
+ char *ge_firmware_name;
+};
+
+struct gswip_gphy_fw {
+ struct clk *clk_gate;
+ struct reset_control *reset;
+ u32 fw_addr_offset;
+ char *fw_name;
+};
+
+struct gswip_priv {
+ __iomem void *gswip;
+ __iomem void *mdio;
+ __iomem void *mii;
+ const struct gswip_hw_info *hw_info;
+ const struct xway_gphy_match_data *gphy_fw_name_cfg;
+ struct dsa_switch *ds;
+ struct device *dev;
+ struct regmap *rcu_regmap;
+ int num_gphy_fw;
+ struct gswip_gphy_fw *gphy_fw;
+};
+
+struct gswip_rmon_cnt_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name}
+
+static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = {
+ /** Receive Packet Count (only packets that are accepted and not discarded). */
+ MIB_DESC(1, 0x1F, "RxGoodPkts"),
+ MIB_DESC(1, 0x23, "RxUnicastPkts"),
+ MIB_DESC(1, 0x22, "RxMulticastPkts"),
+ MIB_DESC(1, 0x21, "RxFCSErrorPkts"),
+ MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"),
+ MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"),
+ MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"),
+ MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"),
+ MIB_DESC(1, 0x20, "RxGoodPausePkts"),
+ MIB_DESC(1, 0x1A, "RxAlignErrorPkts"),
+ MIB_DESC(1, 0x12, "Rx64BytePkts"),
+ MIB_DESC(1, 0x13, "Rx127BytePkts"),
+ MIB_DESC(1, 0x14, "Rx255BytePkts"),
+ MIB_DESC(1, 0x15, "Rx511BytePkts"),
+ MIB_DESC(1, 0x16, "Rx1023BytePkts"),
+ /** Receive Size 1024-1522 (or more, if configured) Packet Count. */
+ MIB_DESC(1, 0x17, "RxMaxBytePkts"),
+ MIB_DESC(1, 0x18, "RxDroppedPkts"),
+ MIB_DESC(1, 0x19, "RxFilteredPkts"),
+ MIB_DESC(2, 0x24, "RxGoodBytes"),
+ MIB_DESC(2, 0x26, "RxBadBytes"),
+ MIB_DESC(1, 0x11, "TxAcmDroppedPkts"),
+ MIB_DESC(1, 0x0C, "TxGoodPkts"),
+ MIB_DESC(1, 0x06, "TxUnicastPkts"),
+ MIB_DESC(1, 0x07, "TxMulticastPkts"),
+ MIB_DESC(1, 0x00, "Tx64BytePkts"),
+ MIB_DESC(1, 0x01, "Tx127BytePkts"),
+ MIB_DESC(1, 0x02, "Tx255BytePkts"),
+ MIB_DESC(1, 0x03, "Tx511BytePkts"),
+ MIB_DESC(1, 0x04, "Tx1023BytePkts"),
+ /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */
+ MIB_DESC(1, 0x05, "TxMaxBytePkts"),
+ MIB_DESC(1, 0x08, "TxSingleCollCount"),
+ MIB_DESC(1, 0x09, "TxMultCollCount"),
+ MIB_DESC(1, 0x0A, "TxLateCollCount"),
+ MIB_DESC(1, 0x0B, "TxExcessCollCount"),
+ MIB_DESC(1, 0x0D, "TxPauseCount"),
+ MIB_DESC(1, 0x10, "TxDroppedPkts"),
+ MIB_DESC(2, 0x0E, "TxGoodBytes"),
+};
+
+static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset)
+{
+ return __raw_readl(priv->gswip + (offset * 4));
+}
+
+static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset)
+{
+ __raw_writel(val, priv->gswip + (offset * 4));
+}
+
+static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set,
+ u32 offset)
+{
+ u32 val = gswip_switch_r(priv, offset);
+
+ val &= ~(clear);
+ val |= set;
+ gswip_switch_w(priv, val, offset);
+}
+
+static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset,
+ u32 cleared)
+{
+ u32 val;
+
+ return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val,
+ (val & cleared) == 0, 20, 50000);
+}
+
+static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset)
+{
+ return __raw_readl(priv->mdio + (offset * 4));
+}
+
+static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset)
+{
+ __raw_writel(val, priv->mdio + (offset * 4));
+}
+
+static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set,
+ u32 offset)
+{
+ u32 val = gswip_mdio_r(priv, offset);
+
+ val &= ~(clear);
+ val |= set;
+ gswip_mdio_w(priv, val, offset);
+}
+
+static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset)
+{
+ return __raw_readl(priv->mii + (offset * 4));
+}
+
+static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset)
+{
+ __raw_writel(val, priv->mii + (offset * 4));
+}
+
+static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
+ u32 offset)
+{
+ u32 val = gswip_mii_r(priv, offset);
+
+ val &= ~(clear);
+ val |= set;
+ gswip_mii_w(priv, val, offset);
+}
+
+static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
+ int port)
+{
+ switch (port) {
+ case 0:
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
+ break;
+ case 1:
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
+ break;
+ case 5:
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
+ break;
+ }
+}
+
+static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
+ int port)
+{
+ switch (port) {
+ case 0:
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
+ break;
+ case 1:
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1);
+ break;
+ case 5:
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5);
+ break;
+ }
+}
+
+static int gswip_mdio_poll(struct gswip_priv *priv)
+{
+ int cnt = 100;
+
+ while (likely(cnt--)) {
+ u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL);
+
+ if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0)
+ return 0;
+ usleep_range(20, 40);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct gswip_priv *priv = bus->priv;
+ int err;
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE);
+ gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR |
+ ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
+ (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
+ GSWIP_MDIO_CTRL);
+
+ return 0;
+}
+
+static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
+{
+ struct gswip_priv *priv = bus->priv;
+ int err;
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD |
+ ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) |
+ (reg & GSWIP_MDIO_CTRL_REGAD_MASK),
+ GSWIP_MDIO_CTRL);
+
+ err = gswip_mdio_poll(priv);
+ if (err) {
+ dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n");
+ return err;
+ }
+
+ return gswip_mdio_r(priv, GSWIP_MDIO_READ);
+}
+
+static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
+{
+ struct dsa_switch *ds = priv->ds;
+
+ ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+ if (!ds->slave_mii_bus)
+ return -ENOMEM;
+
+ ds->slave_mii_bus->priv = priv;
+ ds->slave_mii_bus->read = gswip_mdio_rd;
+ ds->slave_mii_bus->write = gswip_mdio_wr;
+ ds->slave_mii_bus->name = "lantiq,xrx200-mdio";
+ snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
+ dev_name(priv->dev));
+ ds->slave_mii_bus->parent = priv->dev;
+ ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
+
+ return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+}
+
+static int gswip_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ /* RMON Counter Enable for port */
+ gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
+
+ /* enable port fetch/store dma & VLAN Modification */
+ gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN |
+ GSWIP_FDMA_PCTRL_VLANMOD_BOTH,
+ GSWIP_FDMA_PCTRLp(port));
+ gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
+ GSWIP_SDMA_PCTRLp(port));
+ gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
+ GSWIP_PCE_PCTRL_0p(port));
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO |
+ GSWIP_MDIO_PHY_SPEED_AUTO |
+ GSWIP_MDIO_PHY_FDUP_AUTO |
+ GSWIP_MDIO_PHY_FCONTX_AUTO |
+ GSWIP_MDIO_PHY_FCONRX_AUTO |
+ (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK);
+
+ gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port));
+ /* Activate MDIO auto polling */
+ gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0);
+ }
+
+ return 0;
+}
+
+static void gswip_port_disable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN,
+ GSWIP_MDIO_PHY_LINK_MASK,
+ GSWIP_MDIO_PHYp(port));
+ /* Deactivate MDIO auto polling */
+ gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0);
+ }
+
+ gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
+ GSWIP_FDMA_PCTRLp(port));
+ gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
+ GSWIP_SDMA_PCTRLp(port));
+}
+
+static int gswip_pce_load_microcode(struct gswip_priv *priv)
+{
+ int i;
+ int err;
+
+ gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
+ gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
+ gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
+ gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
+ GSWIP_PCE_TBL_VAL(0));
+ gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
+ GSWIP_PCE_TBL_VAL(1));
+ gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
+ GSWIP_PCE_TBL_VAL(2));
+ gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
+ GSWIP_PCE_TBL_VAL(3));
+
+ /* start the table access: */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS,
+ GSWIP_PCE_TBL_CTRL);
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ return err;
+ }
+
+ /* tell the switch that the microcode is loaded */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID,
+ GSWIP_PCE_GCTRL_0);
+
+ return 0;
+}
+
+static int gswip_setup(struct dsa_switch *ds)
+{
+ struct gswip_priv *priv = ds->priv;
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ int i;
+ int err;
+
+ gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
+ usleep_range(5000, 10000);
+ gswip_switch_w(priv, 0, GSWIP_SWRES);
+
+ /* disable port fetch/store dma on all ports */
+ for (i = 0; i < priv->hw_info->max_ports; i++)
+ gswip_port_disable(ds, i, NULL);
+
+ /* enable Switch */
+ gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
+
+ err = gswip_pce_load_microcode(priv);
+ if (err) {
+ dev_err(priv->dev, "writing PCE microcode failed, %i", err);
+ return err;
+ }
+
+ /* Default unknown Broadcast/Multicast/Unicast port maps */
+ gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
+ gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
+ gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
+
+ /* disable PHY auto polling */
+ gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
+ /* Configure the MDIO Clock 2.5 MHz */
+ gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
+
+ /* Disable the xMII link */
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
+
+ /* enable special tag insertion on cpu port */
+ gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
+ GSWIP_FDMA_PCTRLp(cpu_port));
+
+ gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
+ GSWIP_MAC_CTRL_2p(cpu_port));
+ gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN);
+ gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
+ GSWIP_BM_QUEUE_GCTRL);
+
+ /* VLAN aware Switching */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
+
+ /* Mac Address Table Lock */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_1_MAC_GLOCK |
+ GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD,
+ GSWIP_PCE_GCTRL_1);
+
+ gswip_port_enable(ds, cpu_port, NULL);
+ return 0;
+}
+
+static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
+ int port)
+{
+ return DSA_TAG_PROTO_GSWIP;
+}
+
+static void gswip_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ switch (port) {
+ case 0:
+ case 1:
+ if (!phy_interface_mode_is_rgmii(state->interface) &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ state->interface != PHY_INTERFACE_MODE_RMII)
+ goto unsupported;
+ break;
+ case 2:
+ case 3:
+ case 4:
+ if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
+ goto unsupported;
+ break;
+ case 5:
+ if (!phy_interface_mode_is_rgmii(state->interface) &&
+ state->interface != PHY_INTERFACE_MODE_INTERNAL)
+ goto unsupported;
+ break;
+ default:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported port: %i\n", port);
+ return;
+ }
+
+ /* Allow all the expected bits */
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ /* With the exclusion of MII and Reverse MII, we support Gigabit,
+ * including Half duplex
+ */
+ if (state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_REVMII) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+
+unsupported:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported interface: %d\n", state->interface);
+ return;
+}
+
+static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct gswip_priv *priv = ds->priv;
+ u32 miicfg = 0;
+
+ miicfg |= GSWIP_MII_CFG_LDCLKDIS;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_INTERNAL:
+ miicfg |= GSWIP_MII_CFG_MODE_MIIM;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ miicfg |= GSWIP_MII_CFG_MODE_MIIP;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ miicfg |= GSWIP_MII_CFG_MODE_RGMII;
+ break;
+ default:
+ dev_err(ds->dev,
+ "Unsupported interface: %d\n", state->interface);
+ return;
+ }
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK |
+ GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port);
+ break;
+ default:
+ break;
+ }
+}
+
+static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
+}
+
+static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ /* Enable the xMII interface only for the external PHY */
+ if (interface != PHY_INTERFACE_MODE_INTERNAL)
+ gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
+}
+
+static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
+ strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
+ u32 index)
+{
+ u32 result;
+ int err;
+
+ gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR);
+ gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK |
+ GSWIP_BM_RAM_CTRL_OPMOD,
+ table | GSWIP_BM_RAM_CTRL_BAS,
+ GSWIP_BM_RAM_CTRL);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
+ GSWIP_BM_RAM_CTRL_BAS);
+ if (err) {
+ dev_err(priv->dev, "timeout while reading table: %u, index: %u",
+ table, index);
+ return 0;
+ }
+
+ result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0));
+ result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16;
+
+ return result;
+}
+
+static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct gswip_priv *priv = ds->priv;
+ const struct gswip_rmon_cnt_desc *rmon_cnt;
+ int i;
+ u64 high;
+
+ for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) {
+ rmon_cnt = &gswip_rmon_cnt[i];
+
+ data[i] = gswip_bcm_ram_entry_read(priv, port,
+ rmon_cnt->offset);
+ if (rmon_cnt->size == 2) {
+ high = gswip_bcm_ram_entry_read(priv, port,
+ rmon_cnt->offset + 1);
+ data[i] |= high << 32;
+ }
+ }
+}
+
+static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(gswip_rmon_cnt);
+}
+
+static const struct dsa_switch_ops gswip_switch_ops = {
+ .get_tag_protocol = gswip_get_tag_protocol,
+ .setup = gswip_setup,
+ .port_enable = gswip_port_enable,
+ .port_disable = gswip_port_disable,
+ .phylink_validate = gswip_phylink_validate,
+ .phylink_mac_config = gswip_phylink_mac_config,
+ .phylink_mac_link_down = gswip_phylink_mac_link_down,
+ .phylink_mac_link_up = gswip_phylink_mac_link_up,
+ .get_strings = gswip_get_strings,
+ .get_ethtool_stats = gswip_get_ethtool_stats,
+ .get_sset_count = gswip_get_sset_count,
+};
+
+static const struct xway_gphy_match_data xrx200a1x_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin",
+ .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin",
+};
+
+static const struct xway_gphy_match_data xrx200a2x_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin",
+ .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin",
+};
+
+static const struct xway_gphy_match_data xrx300_gphy_data = {
+ .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin",
+ .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
+};
+
+static const struct of_device_id xway_gphy_match[] = {
+ { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
+ { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
+ { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
+ { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data },
+ { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data },
+ {},
+};
+
+static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw)
+{
+ struct device *dev = priv->dev;
+ const struct firmware *fw;
+ void *fw_addr;
+ dma_addr_t dma_addr;
+ dma_addr_t dev_addr;
+ size_t size;
+ int ret;
+
+ ret = clk_prepare_enable(gphy_fw->clk_gate);
+ if (ret)
+ return ret;
+
+ reset_control_assert(gphy_fw->reset);
+
+ ret = request_firmware(&fw, gphy_fw->fw_name, dev);
+ if (ret) {
+ dev_err(dev, "failed to load firmware: %s, error: %i\n",
+ gphy_fw->fw_name, ret);
+ return ret;
+ }
+
+ /* GPHY cores need the firmware code in a persistent and contiguous
+ * memory area with a 16 kB boundary aligned start address.
+ */
+ size = fw->size + XRX200_GPHY_FW_ALIGN;
+
+ fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
+ if (fw_addr) {
+ fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
+ dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
+ memcpy(fw_addr, fw->data, fw->size);
+ } else {
+ dev_err(dev, "failed to alloc firmware memory\n");
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ release_firmware(fw);
+
+ ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr);
+ if (ret)
+ return ret;
+
+ reset_control_deassert(gphy_fw->reset);
+
+ return ret;
+}
+
+static int gswip_gphy_fw_probe(struct gswip_priv *priv,
+ struct gswip_gphy_fw *gphy_fw,
+ struct device_node *gphy_fw_np, int i)
+{
+ struct device *dev = priv->dev;
+ u32 gphy_mode;
+ int ret;
+ char gphyname[10];
+
+ snprintf(gphyname, sizeof(gphyname), "gphy%d", i);
+
+ gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
+ if (IS_ERR(gphy_fw->clk_gate)) {
+ dev_err(dev, "Failed to lookup gate clock\n");
+ return PTR_ERR(gphy_fw->clk_gate);
+ }
+
+ ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode);
+ /* Default to GE mode */
+ if (ret)
+ gphy_mode = GPHY_MODE_GE;
+
+ switch (gphy_mode) {
+ case GPHY_MODE_FE:
+ gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name;
+ break;
+ case GPHY_MODE_GE:
+ gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
+ break;
+ default:
+ dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
+ return -EINVAL;
+ }
+
+ gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
+ if (IS_ERR(gphy_fw->reset)) {
+ if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to lookup gphy reset\n");
+ return PTR_ERR(gphy_fw->reset);
+ }
+
+ return gswip_gphy_fw_load(priv, gphy_fw);
+}
+
+static void gswip_gphy_fw_remove(struct gswip_priv *priv,
+ struct gswip_gphy_fw *gphy_fw)
+{
+ int ret;
+
+ /* check if the device was fully probed */
+ if (!gphy_fw->fw_name)
+ return;
+
+ ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
+ if (ret)
+ dev_err(priv->dev, "can not reset GPHY FW pointer");
+
+ clk_disable_unprepare(gphy_fw->clk_gate);
+
+ reset_control_put(gphy_fw->reset);
+}
+
+static int gswip_gphy_fw_list(struct gswip_priv *priv,
+ struct device_node *gphy_fw_list_np, u32 version)
+{
+ struct device *dev = priv->dev;
+ struct device_node *gphy_fw_np;
+ const struct of_device_id *match;
+ int err;
+ int i = 0;
+
+ /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older
+ * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also
+ * needs a different GPHY firmware.
+ */
+ if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) {
+ switch (version) {
+ case GSWIP_VERSION_2_0:
+ priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data;
+ break;
+ case GSWIP_VERSION_2_1:
+ priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
+ break;
+ default:
+ dev_err(dev, "unknown GSWIP version: 0x%x", version);
+ return -ENOENT;
+ }
+ }
+
+ match = of_match_node(xway_gphy_match, gphy_fw_list_np);
+ if (match && match->data)
+ priv->gphy_fw_name_cfg = match->data;
+
+ if (!priv->gphy_fw_name_cfg) {
+ dev_err(dev, "GPHY compatible type not supported");
+ return -ENOENT;
+ }
+
+ priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
+ if (!priv->num_gphy_fw)
+ return -ENOENT;
+
+ priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np,
+ "lantiq,rcu");
+ if (IS_ERR(priv->rcu_regmap))
+ return PTR_ERR(priv->rcu_regmap);
+
+ priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw,
+ sizeof(*priv->gphy_fw),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!priv->gphy_fw)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
+ err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
+ gphy_fw_np, i);
+ if (err)
+ goto remove_gphy;
+ i++;
+ }
+
+ return 0;
+
+remove_gphy:
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ return err;
+}
+
+static int gswip_probe(struct platform_device *pdev)
+{
+ struct gswip_priv *priv;
+ struct resource *gswip_res, *mdio_res, *mii_res;
+ struct device_node *mdio_np, *gphy_fw_np;
+ struct device *dev = &pdev->dev;
+ int err;
+ int i;
+ u32 version;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ gswip_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->gswip = devm_ioremap_resource(dev, gswip_res);
+ if (IS_ERR(priv->gswip))
+ return PTR_ERR(priv->gswip);
+
+ mdio_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->mdio = devm_ioremap_resource(dev, mdio_res);
+ if (IS_ERR(priv->mdio))
+ return PTR_ERR(priv->mdio);
+
+ mii_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ priv->mii = devm_ioremap_resource(dev, mii_res);
+ if (IS_ERR(priv->mii))
+ return PTR_ERR(priv->mii);
+
+ priv->hw_info = of_device_get_match_data(dev);
+ if (!priv->hw_info)
+ return -EINVAL;
+
+ priv->ds = dsa_switch_alloc(dev, priv->hw_info->max_ports);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->priv = priv;
+ priv->ds->ops = &gswip_switch_ops;
+ priv->dev = dev;
+ version = gswip_switch_r(priv, GSWIP_VERSION);
+
+ /* bring up the mdio bus */
+ gphy_fw_np = of_find_compatible_node(pdev->dev.of_node, NULL,
+ "lantiq,gphy-fw");
+ if (gphy_fw_np) {
+ err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
+ if (err) {
+ dev_err(dev, "gphy fw probe failed\n");
+ return err;
+ }
+ }
+
+ /* bring up the mdio bus */
+ mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
+ "lantiq,xrx200-mdio");
+ if (mdio_np) {
+ err = gswip_mdio(priv, mdio_np);
+ if (err) {
+ dev_err(dev, "mdio probe failed\n");
+ goto gphy_fw;
+ }
+ }
+
+ err = dsa_register_switch(priv->ds);
+ if (err) {
+ dev_err(dev, "dsa switch register failed: %i\n", err);
+ goto mdio_bus;
+ }
+ if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
+ dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
+ priv->hw_info->cpu_port);
+ err = -EINVAL;
+ goto mdio_bus;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(dev, "probed GSWIP version %lx mod %lx\n",
+ (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
+ (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
+ return 0;
+
+mdio_bus:
+ if (mdio_np)
+ mdiobus_unregister(priv->ds->slave_mii_bus);
+gphy_fw:
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ return err;
+}
+
+static int gswip_remove(struct platform_device *pdev)
+{
+ struct gswip_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ if (!priv)
+ return 0;
+
+ /* disable the switch */
+ gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
+
+ dsa_unregister_switch(priv->ds);
+
+ if (priv->ds->slave_mii_bus)
+ mdiobus_unregister(priv->ds->slave_mii_bus);
+
+ for (i = 0; i < priv->num_gphy_fw; i++)
+ gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+
+ return 0;
+}
+
+static const struct gswip_hw_info gswip_xrx200 = {
+ .max_ports = 7,
+ .cpu_port = 6,
+};
+
+static const struct of_device_id gswip_of_match[] = {
+ { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gswip_of_match);
+
+static struct platform_driver gswip_driver = {
+ .probe = gswip_probe,
+ .remove = gswip_remove,
+ .driver = {
+ .name = "gswip",
+ .of_match_table = gswip_of_match,
+ },
+};
+
+module_platform_driver(gswip_driver);
+
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lantiq_pce.h b/drivers/net/dsa/lantiq_pce.h
new file mode 100644
index 000000000000..180663138e75
--- /dev/null
+++ b/drivers/net/dsa/lantiq_pce.h
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCE microcode extracted from UGW 7.1.1 switch api
+ *
+ * Copyright (c) 2012, 2014, 2015 Lantiq Deutschland GmbH
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+enum {
+ OUT_MAC0 = 0,
+ OUT_MAC1,
+ OUT_MAC2,
+ OUT_MAC3,
+ OUT_MAC4,
+ OUT_MAC5,
+ OUT_ETHTYP,
+ OUT_VTAG0,
+ OUT_VTAG1,
+ OUT_ITAG0,
+ OUT_ITAG1, /*10 */
+ OUT_ITAG2,
+ OUT_ITAG3,
+ OUT_IP0,
+ OUT_IP1,
+ OUT_IP2,
+ OUT_IP3,
+ OUT_SIP0,
+ OUT_SIP1,
+ OUT_SIP2,
+ OUT_SIP3, /*20*/
+ OUT_SIP4,
+ OUT_SIP5,
+ OUT_SIP6,
+ OUT_SIP7,
+ OUT_DIP0,
+ OUT_DIP1,
+ OUT_DIP2,
+ OUT_DIP3,
+ OUT_DIP4,
+ OUT_DIP5, /*30*/
+ OUT_DIP6,
+ OUT_DIP7,
+ OUT_SESID,
+ OUT_PROT,
+ OUT_APP0,
+ OUT_APP1,
+ OUT_IGMP0,
+ OUT_IGMP1,
+ OUT_IPOFF, /*39*/
+ OUT_NONE = 63,
+};
+
+/* parser's microcode length type */
+#define INSTR 0
+#define IPV6 1
+#define LENACCU 2
+
+/* parser's microcode flag type */
+enum {
+ FLAG_ITAG = 0,
+ FLAG_VLAN,
+ FLAG_SNAP,
+ FLAG_PPPOE,
+ FLAG_IPV6,
+ FLAG_IPV6FL,
+ FLAG_IPV4,
+ FLAG_IGMP,
+ FLAG_TU,
+ FLAG_HOP,
+ FLAG_NN1, /*10 */
+ FLAG_NN2,
+ FLAG_END,
+ FLAG_NO, /*13*/
+};
+
+struct gswip_pce_microcode {
+ u16 val_3;
+ u16 val_2;
+ u16 val_1;
+ u16 val_0;
+};
+
+#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
+ { val, msk, ((ns) << 10 | (out) << 4 | (len) >> 1),\
+ ((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 }
+static const struct gswip_pce_microcode gswip_pce_microcode[] = {
+ /* value mask ns fields L type flags ipv4_len */
+ MC_ENTRY(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0),
+ MC_ENTRY(0x8100, 0xFFFF, 2, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ MC_ENTRY(0x88A8, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ MC_ENTRY(0x8100, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
+ MC_ENTRY(0x8864, 0xFFFF, 17, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0800, 0xFFFF, 21, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x86DD, 0xFFFF, 22, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x8863, 0xFFFF, 16, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0xF800, 10, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 40, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0600, 0x0600, 40, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 12, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0xAAAA, 0xFFFF, 14, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0300, 0xFF00, 41, OUT_NONE, 0, INSTR, FLAG_SNAP, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_DIP7, 3, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 18, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0),
+ MC_ENTRY(0x0021, 0xFFFF, 21, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0057, 0xFFFF, 22, OUT_NONE, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x4000, 0xF000, 24, OUT_IP0, 4, INSTR, FLAG_IPV4, 1),
+ MC_ENTRY(0x6000, 0xF000, 27, OUT_IP0, 3, INSTR, FLAG_IPV6, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 25, OUT_IP3, 2, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 26, OUT_SIP0, 4, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, LENACCU, FLAG_NO, 0),
+ MC_ENTRY(0x1100, 0xFF00, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0600, 0xFF00, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_HOP, 0),
+ MC_ENTRY(0x2B00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN1, 0),
+ MC_ENTRY(0x3C00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN2, 0),
+ MC_ENTRY(0x0000, 0x0000, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x00E0, 35, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_HOP, 0),
+ MC_ENTRY(0x2B00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN1, 0),
+ MC_ENTRY(0x3C00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN2, 0),
+ MC_ENTRY(0x0000, 0x0000, 40, OUT_PROT, 1, IPV6, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 40, OUT_SIP0, 16, INSTR, FLAG_NO, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_APP0, 4, INSTR, FLAG_IGMP, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+ MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0),
+};
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 62e486652e62..a5de9bffe5be 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -658,11 +658,7 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- if (phydev->advertising & ADVERTISED_Pause)
- lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (phydev->advertising & ADVERTISED_Asym_Pause)
- lcl_adv |= ADVERTISE_PAUSE_ASYM;
-
+ lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8da3d39e3218..e05d4eddc935 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -434,7 +434,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
err = request_threaded_irq(chip->irq, NULL,
mv88e6xxx_g1_irq_thread_fn,
- IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_SHARED,
dev_name(chip->dev), chip);
if (err)
mv88e6xxx_g1_irq_free_common(chip);
@@ -575,6 +575,13 @@ restore_link:
return err;
}
+static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ return port < chip->info->num_internal_phys;
+}
+
/* We expect the switch to perform auto negotiation if there is a real
* phy. However, in the case of a fixed link phy, we force the port
* settings from the fixed link settings.
@@ -585,7 +592,8 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!phy_is_pseudo_fixed_link(phydev))
+ if (!phy_is_pseudo_fixed_link(phydev) &&
+ mv88e6xxx_phy_is_internal(ds, port))
return;
mutex_lock(&chip->reg_lock);
@@ -709,13 +717,17 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int speed, duplex, link, pause, err;
- if (mode == MLO_AN_PHY)
+ if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port))
return;
if (mode == MLO_AN_FIXED) {
link = LINK_FORCED_UP;
speed = state->speed;
duplex = state->duplex;
+ } else if (!mv88e6xxx_phy_is_internal(ds, port)) {
+ link = state->link;
+ speed = state->speed;
+ duplex = state->duplex;
} else {
speed = SPEED_UNFORCED;
duplex = DUPLEX_UNFORCED;
@@ -2895,7 +2907,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
+ .port_set_speed = mv88e6341_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3160,6 +3172,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
+ .serdes_irq_setup = mv88e6352_serdes_irq_setup,
+ .serdes_irq_free = mv88e6352_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6352_phylink_validate,
};
@@ -3366,6 +3380,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
+ .serdes_irq_setup = mv88e6352_serdes_irq_setup,
+ .serdes_irq_free = mv88e6352_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -3512,7 +3528,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
+ .port_set_speed = mv88e6341_port_set_speed,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3664,6 +3680,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
+ .serdes_irq_setup = mv88e6352_serdes_irq_setup,
+ .serdes_irq_free = mv88e6352_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 7c791c1da4b9..bef01331266f 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -128,7 +128,7 @@
#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000
#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7)
#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6)
-#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5)
+#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION BIT(5)
#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4)
/* Offset 0x0C: ATU Data Register */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 307410898fc9..5200e4bdce93 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
chip->ports[entry.portvec].atu_member_violation++;
}
- if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
+ if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
dev_err_ratelimited(chip->dev,
"ATU miss violation for %pM portvec %x\n",
entry.mac, entry.portvec);
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index 46af8052e535..152a65d46e0b 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -110,6 +110,9 @@ int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
err = mv88e6xxx_phy_page_get(chip, phy, page);
if (!err) {
err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
+ if (!err)
+ err = mv88e6xxx_phy_write(chip, phy, reg, val);
+
mv88e6xxx_phy_page_put(chip, phy);
}
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 92945841c8e8..cd7db60a508b 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -228,8 +228,11 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
ctrl = MV88E6XXX_PORT_MAC_CTL_SPEED_1000;
break;
case 2500:
- ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
- MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ if (alt_bit)
+ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000 |
+ MV88E6390_PORT_MAC_CTL_ALTSPEED;
+ else
+ ctrl = MV88E6390_PORT_MAC_CTL_SPEED_10000;
break;
case 10000:
/* all bits set, fall through... */
@@ -291,6 +294,24 @@ int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
}
+/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */
+int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+{
+ if (speed == SPEED_MAX)
+ speed = port < 5 ? 1000 : 2500;
+
+ if (speed > 2500)
+ return -EOPNOTSUPP;
+
+ if (speed == 200 && port != 0)
+ return -EOPNOTSUPP;
+
+ if (speed == 2500 && port < 5)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_speed(chip, port, speed, !port, true);
+}
+
/* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
{
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index f32f56af8e35..36904c9bf955 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -269,6 +269,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup);
int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index e82983975754..bb69650ff772 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -185,6 +185,111 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
+static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
+{
+ struct dsa_switch *ds = chip->ds;
+ u16 status;
+ bool up;
+
+ mv88e6352_serdes_read(chip, MII_BMSR, &status);
+
+ /* Status must be read twice in order to give the current link
+ * status. Otherwise the change in link status since the last
+ * read of the register is returned.
+ */
+ mv88e6352_serdes_read(chip, MII_BMSR, &status);
+
+ up = status & BMSR_LSTATUS;
+
+ dsa_port_phylink_mac_change(ds, port, up);
+}
+
+static irqreturn_t mv88e6352_serdes_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_port *port = dev_id;
+ struct mv88e6xxx_chip *chip = port->chip;
+ irqreturn_t ret = IRQ_NONE;
+ u16 status;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+
+ err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_INT_STATUS, &status);
+ if (err)
+ goto out;
+
+ if (status & MV88E6352_SERDES_INT_LINK_CHANGE) {
+ ret = IRQ_HANDLED;
+ mv88e6352_serdes_irq_link(chip, port->port);
+ }
+out:
+ mutex_unlock(&chip->reg_lock);
+
+ return ret;
+}
+
+static int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE,
+ MV88E6352_SERDES_INT_LINK_CHANGE);
+}
+
+static int mv88e6352_serdes_irq_disable(struct mv88e6xxx_chip *chip)
+{
+ return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, 0);
+}
+
+int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
+{
+ int err;
+
+ if (!mv88e6352_port_has_serdes(chip, port))
+ return 0;
+
+ chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain,
+ MV88E6352_SERDES_IRQ);
+ if (chip->ports[port].serdes_irq < 0) {
+ dev_err(chip->dev, "Unable to map SERDES irq: %d\n",
+ chip->ports[port].serdes_irq);
+ return chip->ports[port].serdes_irq;
+ }
+
+ /* Requesting the IRQ will trigger irq callbacks. So we cannot
+ * hold the reg_lock.
+ */
+ mutex_unlock(&chip->reg_lock);
+ err = request_threaded_irq(chip->ports[port].serdes_irq, NULL,
+ mv88e6352_serdes_thread_fn,
+ IRQF_ONESHOT, "mv88e6xxx-serdes",
+ &chip->ports[port]);
+ mutex_lock(&chip->reg_lock);
+
+ if (err) {
+ dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n",
+ err);
+ return err;
+ }
+
+ return mv88e6352_serdes_irq_enable(chip);
+}
+
+void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+{
+ if (!mv88e6352_port_has_serdes(chip, port))
+ return;
+
+ mv88e6352_serdes_irq_disable(chip);
+
+ /* Freeing the IRQ will trigger irq callbacks. So we cannot
+ * hold the reg_lock.
+ */
+ mutex_unlock(&chip->reg_lock);
+ free_irq(chip->ports[port].serdes_irq, &chip->ports[port]);
+ mutex_lock(&chip->reg_lock);
+
+ chip->ports[port].serdes_irq = 0;
+}
+
/* Return the SERDES lane address a port is using. Only Ports 9 and 10
* have SERDES lanes. Returns -ENODEV if a port does not have a lane.
*/
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index b1496de9c6fe..7870c5a9ef12 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -18,6 +18,19 @@
#define MV88E6352_ADDR_SERDES 0x0f
#define MV88E6352_SERDES_PAGE_FIBER 0x01
+#define MV88E6352_SERDES_IRQ 0x0b
+#define MV88E6352_SERDES_INT_ENABLE 0x12
+#define MV88E6352_SERDES_INT_SPEED_CHANGE BIT(14)
+#define MV88E6352_SERDES_INT_DUPLEX_CHANGE BIT(13)
+#define MV88E6352_SERDES_INT_PAGE_RX BIT(12)
+#define MV88E6352_SERDES_INT_AN_COMPLETE BIT(11)
+#define MV88E6352_SERDES_INT_LINK_CHANGE BIT(10)
+#define MV88E6352_SERDES_INT_SYMBOL_ERROR BIT(9)
+#define MV88E6352_SERDES_INT_FALSE_CARRIER BIT(8)
+#define MV88E6352_SERDES_INT_FIFO_OVER_UNDER BIT(7)
+#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4)
+#define MV88E6352_SERDES_INT_STATUS 0x13
+
#define MV88E6341_ADDR_SERDES 0x15
@@ -73,5 +86,8 @@ int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
int lane);
int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port,
int lane);
+int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
+void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
+
#endif
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index cdcde7f8e0b2..7e97e620bd44 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -955,8 +955,7 @@ qca8k_set_pm(struct qca8k_priv *priv, int enable)
static int qca8k_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct qca8k_priv *priv = platform_get_drvdata(pdev);
+ struct qca8k_priv *priv = dev_get_drvdata(dev);
qca8k_set_pm(priv, 0);
@@ -965,8 +964,7 @@ static int qca8k_suspend(struct device *dev)
static int qca8k_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct qca8k_priv *priv = platform_get_drvdata(pdev);
+ struct qca8k_priv *priv = dev_get_drvdata(dev);
qca8k_set_pm(priv, 1);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 2a0ddec1dd56..3dcc61821ed5 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -377,9 +377,7 @@ static int ax_mii_probe(struct net_device *dev)
return ret;
}
- /* mask with MAC supported features */
- phy_dev->supported &= PHY_BASIC_FEATURES;
- phy_dev->advertising = phy_dev->supported;
+ phy_set_max_speed(phy_dev, SPEED_100);
netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq);
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 32e9627e3880..77191a281866 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -564,26 +564,29 @@ static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
sizeof(info->bus_info));
}
-static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int etherh_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->supported = etherh_priv(dev)->supported;
- ethtool_cmd_speed_set(cmd, SPEED_10);
- cmd->duplex = DUPLEX_HALF;
- cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC;
- cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ?
- AUTONEG_ENABLE : AUTONEG_DISABLE);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ etherh_priv(dev)->supported);
+ cmd->base.speed = SPEED_10;
+ cmd->base.duplex = DUPLEX_HALF;
+ cmd->base.port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC;
+ cmd->base.autoneg = (dev->flags & IFF_AUTOMEDIA ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE);
return 0;
}
-static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int etherh_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
- switch (cmd->autoneg) {
+ switch (cmd->base.autoneg) {
case AUTONEG_ENABLE:
dev->flags |= IFF_AUTOMEDIA;
break;
case AUTONEG_DISABLE:
- switch (cmd->port) {
+ switch (cmd->base.port) {
case PORT_TP:
dev->if_port = IF_PORT_10BASET;
break;
@@ -622,12 +625,12 @@ static void etherh_set_msglevel(struct net_device *dev, u32 v)
}
static const struct ethtool_ops etherh_ethtool_ops = {
- .get_settings = etherh_get_settings,
- .set_settings = etherh_set_settings,
- .get_drvinfo = etherh_get_drvinfo,
- .get_ts_info = ethtool_op_get_ts_info,
- .get_msglevel = etherh_get_msglevel,
- .set_msglevel = etherh_set_msglevel,
+ .get_drvinfo = etherh_get_drvinfo,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_msglevel = etherh_get_msglevel,
+ .set_msglevel = etherh_set_msglevel,
+ .get_link_ksettings = etherh_get_link_ksettings,
+ .set_link_ksettings = etherh_set_link_ksettings,
};
static const struct net_device_ops etherh_netdev_ops = {
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 6fde68aa13a4..885e00d17807 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -108,6 +108,13 @@ config LANTIQ_ETOP
---help---
Support for the MII0 inside the Lantiq SoC
+config LANTIQ_XRX200
+ tristate "Lantiq / Intel xRX200 PMAC network driver"
+ depends on SOC_TYPE_XWAY
+ ---help---
+ Support for the PMAC of the Gigabit switch (GSWIP) inside the
+ Lantiq / Intel VRX200 VDSL SoC
+
source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index b45d5f626b59..7b5bf9682066 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
obj-$(CONFIG_JME) += jme.o
obj-$(CONFIG_KORINA) += korina.o
obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
+obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o
obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 4309be3724ad..7c9348a26cbb 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1279,9 +1279,9 @@ static int greth_mdio_probe(struct net_device *dev)
}
if (greth->gbit_mac)
- phy->supported &= PHY_GBIT_FEATURES;
+ phy_set_max_speed(phy, SPEED_1000);
else
- phy->supported &= PHY_BASIC_FEATURES;
+ phy_set_max_speed(phy, SPEED_100);
phy->advertising = phy->supported;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 48220b6c600d..ea34bcb868b5 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3258,19 +3258,11 @@ static int et131x_mii_probe(struct net_device *netdev)
return PTR_ERR(phydev);
}
- phydev->supported &= (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_MII |
- SUPPORTED_TP);
+ phy_set_max_speed(phydev, SPEED_100);
if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
- phydev->supported |= SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full;
+ phy_set_max_speed(phydev, SPEED_1000);
- phydev->advertising = phydev->supported;
phydev->autoneg = AUTONEG_ENABLE;
phy_attached_info(phydev);
diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
index d0c388cfd52f..3add305d34b4 100644
--- a/drivers/net/ethernet/alacritech/slic.h
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -8,7 +8,6 @@
#include <linux/spinlock_types.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
-#include <linux/netdevice.h>
#include <linux/list.h>
#include <linux/u64_stats_sync.h>
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 3143de45baaa..e1acafa82214 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -172,8 +172,7 @@ static int emac_mdio_probe(struct net_device *dev)
}
/* mask with MAC supported features */
- phydev->supported &= PHY_BASIC_FEATURES;
- phydev->advertising = phydev->supported;
+ phy_set_max_speed(phydev, SPEED_100);
db->link = 0;
db->speed = 0;
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index baca8f704a45..02921d877c08 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -835,13 +835,10 @@ static int init_phy(struct net_device *dev)
}
/* Stop Advertising 1000BASE Capability if interface is not GMII
- * Note: Checkpatch throws CHECKs for the camel case defines below,
- * it's ok to ignore.
*/
if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
(priv->phy_iface == PHY_INTERFACE_MODE_RMII))
- phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full);
+ phy_set_max_speed(phydev, SPEED_100);
/* Broken HW is sometimes missing the pull-up resistor on the
* MDIO line, which results in reads to non-existent devices returning
diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
index 99b30353541a..9e87d7b8360f 100644
--- a/drivers/net/ethernet/amazon/Kconfig
+++ b/drivers/net/ethernet/amazon/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_AMAZON
config ENA_ETHERNET
tristate "Elastic Network Adapter (ENA) support"
- depends on (PCI_MSI && X86)
+ depends on PCI_MSI && !CPU_BIG_ENDIAN
---help---
This driver supports Elastic Network Adapter (ENA)"
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 4532e574ebcd..9f80b73f90b1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -32,115 +32,81 @@
#ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_
-enum ena_admin_aq_opcode {
- ENA_ADMIN_CREATE_SQ = 1,
-
- ENA_ADMIN_DESTROY_SQ = 2,
-
- ENA_ADMIN_CREATE_CQ = 3,
-
- ENA_ADMIN_DESTROY_CQ = 4,
- ENA_ADMIN_GET_FEATURE = 8,
-
- ENA_ADMIN_SET_FEATURE = 9,
-
- ENA_ADMIN_GET_STATS = 11,
+enum ena_admin_aq_opcode {
+ ENA_ADMIN_CREATE_SQ = 1,
+ ENA_ADMIN_DESTROY_SQ = 2,
+ ENA_ADMIN_CREATE_CQ = 3,
+ ENA_ADMIN_DESTROY_CQ = 4,
+ ENA_ADMIN_GET_FEATURE = 8,
+ ENA_ADMIN_SET_FEATURE = 9,
+ ENA_ADMIN_GET_STATS = 11,
};
enum ena_admin_aq_completion_status {
- ENA_ADMIN_SUCCESS = 0,
-
- ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
-
- ENA_ADMIN_BAD_OPCODE = 2,
-
- ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
-
- ENA_ADMIN_MALFORMED_REQUEST = 4,
-
+ ENA_ADMIN_SUCCESS = 0,
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+ ENA_ADMIN_BAD_OPCODE = 2,
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
/* Additional status is provided in ACQ entry extended_status */
- ENA_ADMIN_ILLEGAL_PARAMETER = 5,
-
- ENA_ADMIN_UNKNOWN_ERROR = 6,
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
+ ENA_ADMIN_RESOURCE_BUSY = 7,
};
enum ena_admin_aq_feature_id {
- ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
-
- ENA_ADMIN_MAX_QUEUES_NUM = 2,
-
- ENA_ADMIN_HW_HINTS = 3,
-
- ENA_ADMIN_RSS_HASH_FUNCTION = 10,
-
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
-
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
-
- ENA_ADMIN_MTU = 14,
-
- ENA_ADMIN_RSS_HASH_INPUT = 18,
-
- ENA_ADMIN_INTERRUPT_MODERATION = 20,
-
- ENA_ADMIN_AENQ_CONFIG = 26,
-
- ENA_ADMIN_LINK_CONFIG = 27,
-
- ENA_ADMIN_HOST_ATTR_CONFIG = 28,
-
- ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
+ ENA_ADMIN_HW_HINTS = 3,
+ ENA_ADMIN_LLQ = 4,
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+ ENA_ADMIN_MTU = 14,
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
+ ENA_ADMIN_AENQ_CONFIG = 26,
+ ENA_ADMIN_LINK_CONFIG = 27,
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
enum ena_admin_placement_policy_type {
/* descriptors and headers are in host memory */
- ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
-
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
/* descriptors and headers are in device memory (a.k.a Low Latency
* Queue)
*/
- ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
};
enum ena_admin_link_types {
- ENA_ADMIN_LINK_SPEED_1G = 0x1,
-
- ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
-
- ENA_ADMIN_LINK_SPEED_5G = 0x4,
-
- ENA_ADMIN_LINK_SPEED_10G = 0x8,
-
- ENA_ADMIN_LINK_SPEED_25G = 0x10,
-
- ENA_ADMIN_LINK_SPEED_40G = 0x20,
-
- ENA_ADMIN_LINK_SPEED_50G = 0x40,
-
- ENA_ADMIN_LINK_SPEED_100G = 0x80,
-
- ENA_ADMIN_LINK_SPEED_200G = 0x100,
-
- ENA_ADMIN_LINK_SPEED_400G = 0x200,
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
};
enum ena_admin_completion_policy_type {
/* completion queue entry for each sq descriptor */
- ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
-
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
/* completion queue entry upon request in sq descriptor */
- ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
-
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
/* current queue head pointer is updated in OS memory upon sq
* descriptor request
*/
- ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
-
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
/* current queue head pointer is updated in OS memory for each sq
* descriptor
*/
- ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
};
/* basic stats return ena_admin_basic_stats while extanded stats return a
@@ -148,15 +114,13 @@ enum ena_admin_completion_policy_type {
* device id
*/
enum ena_admin_get_stats_type {
- ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
-
- ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
};
enum ena_admin_get_stats_scope {
- ENA_ADMIN_SPECIFIC_QUEUE = 0,
-
- ENA_ADMIN_ETH_TRAFFIC = 1,
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
+ ENA_ADMIN_ETH_TRAFFIC = 1,
};
struct ena_admin_aq_common_desc {
@@ -227,7 +191,9 @@ struct ena_admin_acq_common_desc {
u16 extended_status;
- /* serves as a hint what AQ entries can be revoked */
+ /* indicates to the driver which AQ entry has been consumed by the
+ * device and could be reused
+ */
u16 sq_head_indx;
};
@@ -296,9 +262,8 @@ struct ena_admin_aq_create_sq_cmd {
};
enum ena_admin_sq_direction {
- ENA_ADMIN_SQ_DIRECTION_TX = 1,
-
- ENA_ADMIN_SQ_DIRECTION_RX = 2,
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
};
struct ena_admin_acq_create_sq_resp_desc {
@@ -483,8 +448,85 @@ struct ena_admin_device_attr_feature_desc {
u32 max_mtu;
};
+enum ena_admin_llq_header_location {
+ /* header is in descriptor list */
+ ENA_ADMIN_INLINE_HEADER = 1,
+ /* header in a separate ring, implies 16B descriptor list entry */
+ ENA_ADMIN_HEADER_RING = 2,
+};
+
+enum ena_admin_llq_ring_entry_size {
+ ENA_ADMIN_LIST_ENTRY_SIZE_128B = 1,
+ ENA_ADMIN_LIST_ENTRY_SIZE_192B = 2,
+ ENA_ADMIN_LIST_ENTRY_SIZE_256B = 4,
+};
+
+enum ena_admin_llq_num_descs_before_header {
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0 = 0,
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1 = 1,
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2 = 2,
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4 = 4,
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8 = 8,
+};
+
+/* packet descriptor list entry always starts with one or more descriptors,
+ * followed by a header. The rest of the descriptors are located in the
+ * beginning of the subsequent entry. Stride refers to how the rest of the
+ * descriptors are placed. This field is relevant only for inline header
+ * mode
+ */
+enum ena_admin_llq_stride_ctrl {
+ ENA_ADMIN_SINGLE_DESC_PER_ENTRY = 1,
+ ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2,
+};
+
+struct ena_admin_feature_llq_desc {
+ u32 max_llq_num;
+
+ u32 max_llq_depth;
+
+ /* specify the header locations the device supports. bitfield of
+ * enum ena_admin_llq_header_location.
+ */
+ u16 header_location_ctrl_supported;
+
+ /* the header location the driver selected to use. */
+ u16 header_location_ctrl_enabled;
+
+ /* if inline header is specified - this is the size of descriptor
+ * list entry. If header in a separate ring is specified - this is
+ * the size of header ring entry. bitfield of enum
+ * ena_admin_llq_ring_entry_size. specify the entry sizes the device
+ * supports
+ */
+ u16 entry_size_ctrl_supported;
+
+ /* the entry size the driver selected to use. */
+ u16 entry_size_ctrl_enabled;
+
+ /* valid only if inline header is specified. First entry associated
+ * with the packet includes descriptors and header. Rest of the
+ * entries occupied by descriptors. This parameter defines the max
+ * number of descriptors precedding the header in the first entry.
+ * The field is bitfield of enum
+ * ena_admin_llq_num_descs_before_header and specify the values the
+ * device supports
+ */
+ u16 desc_num_before_header_supported;
+
+ /* the desire field the driver selected to use */
+ u16 desc_num_before_header_enabled;
+
+ /* valid only if inline was chosen. bitfield of enum
+ * ena_admin_llq_stride_ctrl
+ */
+ u16 descriptors_stride_ctrl_supported;
+
+ /* the stride control the driver selected to use */
+ u16 descriptors_stride_ctrl_enabled;
+};
+
struct ena_admin_queue_feature_desc {
- /* including LLQs */
u32 max_sq_num;
u32 max_sq_depth;
@@ -493,9 +535,9 @@ struct ena_admin_queue_feature_desc {
u32 max_cq_depth;
- u32 max_llq_num;
+ u32 max_legacy_llq_num;
- u32 max_llq_depth;
+ u32 max_legacy_llq_depth;
u32 max_header_size;
@@ -583,9 +625,8 @@ struct ena_admin_feature_offload_desc {
};
enum ena_admin_hash_functions {
- ENA_ADMIN_TOEPLITZ = 1,
-
- ENA_ADMIN_CRC32 = 2,
+ ENA_ADMIN_TOEPLITZ = 1,
+ ENA_ADMIN_CRC32 = 2,
};
struct ena_admin_feature_rss_flow_hash_control {
@@ -611,50 +652,35 @@ struct ena_admin_feature_rss_flow_hash_function {
/* RSS flow hash protocols */
enum ena_admin_flow_hash_proto {
- ENA_ADMIN_RSS_TCP4 = 0,
-
- ENA_ADMIN_RSS_UDP4 = 1,
-
- ENA_ADMIN_RSS_TCP6 = 2,
-
- ENA_ADMIN_RSS_UDP6 = 3,
-
- ENA_ADMIN_RSS_IP4 = 4,
-
- ENA_ADMIN_RSS_IP6 = 5,
-
- ENA_ADMIN_RSS_IP4_FRAG = 6,
-
- ENA_ADMIN_RSS_NOT_IP = 7,
-
+ ENA_ADMIN_RSS_TCP4 = 0,
+ ENA_ADMIN_RSS_UDP4 = 1,
+ ENA_ADMIN_RSS_TCP6 = 2,
+ ENA_ADMIN_RSS_UDP6 = 3,
+ ENA_ADMIN_RSS_IP4 = 4,
+ ENA_ADMIN_RSS_IP6 = 5,
+ ENA_ADMIN_RSS_IP4_FRAG = 6,
+ ENA_ADMIN_RSS_NOT_IP = 7,
/* TCPv6 with extension header */
- ENA_ADMIN_RSS_TCP6_EX = 8,
-
+ ENA_ADMIN_RSS_TCP6_EX = 8,
/* IPv6 with extension header */
- ENA_ADMIN_RSS_IP6_EX = 9,
-
- ENA_ADMIN_RSS_PROTO_NUM = 16,
+ ENA_ADMIN_RSS_IP6_EX = 9,
+ ENA_ADMIN_RSS_PROTO_NUM = 16,
};
/* RSS flow hash fields */
enum ena_admin_flow_hash_fields {
/* Ethernet Dest Addr */
- ENA_ADMIN_RSS_L2_DA = BIT(0),
-
+ ENA_ADMIN_RSS_L2_DA = BIT(0),
/* Ethernet Src Addr */
- ENA_ADMIN_RSS_L2_SA = BIT(1),
-
+ ENA_ADMIN_RSS_L2_SA = BIT(1),
/* ipv4/6 Dest Addr */
- ENA_ADMIN_RSS_L3_DA = BIT(2),
-
+ ENA_ADMIN_RSS_L3_DA = BIT(2),
/* ipv4/6 Src Addr */
- ENA_ADMIN_RSS_L3_SA = BIT(3),
-
+ ENA_ADMIN_RSS_L3_SA = BIT(3),
/* tcp/udp Dest Port */
- ENA_ADMIN_RSS_L4_DP = BIT(4),
-
+ ENA_ADMIN_RSS_L4_DP = BIT(4),
/* tcp/udp Src Port */
- ENA_ADMIN_RSS_L4_SP = BIT(5),
+ ENA_ADMIN_RSS_L4_SP = BIT(5),
};
struct ena_admin_proto_input {
@@ -693,15 +719,13 @@ struct ena_admin_feature_rss_flow_hash_input {
};
enum ena_admin_os_type {
- ENA_ADMIN_OS_LINUX = 1,
-
- ENA_ADMIN_OS_WIN = 2,
-
- ENA_ADMIN_OS_DPDK = 3,
-
- ENA_ADMIN_OS_FREEBSD = 4,
-
- ENA_ADMIN_OS_IPXE = 5,
+ ENA_ADMIN_OS_LINUX = 1,
+ ENA_ADMIN_OS_WIN = 2,
+ ENA_ADMIN_OS_DPDK = 3,
+ ENA_ADMIN_OS_FREEBSD = 4,
+ ENA_ADMIN_OS_IPXE = 5,
+ ENA_ADMIN_OS_ESXI = 6,
+ ENA_ADMIN_OS_GROUPS_NUM = 6,
};
struct ena_admin_host_info {
@@ -723,11 +747,27 @@ struct ena_admin_host_info {
/* 7:0 : major
* 15:8 : minor
* 23:16 : sub_minor
+ * 31:24 : module_type
*/
u32 driver_version;
/* features bitmap */
- u32 supported_network_features[4];
+ u32 supported_network_features[2];
+
+ /* ENA spec version of driver */
+ u16 ena_spec_version;
+
+ /* ENA device's Bus, Device and Function
+ * 2:0 : function
+ * 7:3 : device
+ * 15:8 : bus
+ */
+ u16 bdf;
+
+ /* Number of CPUs */
+ u16 num_cpus;
+
+ u16 reserved;
};
struct ena_admin_rss_ind_table_entry {
@@ -800,6 +840,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_device_attr_feature_desc dev_attr;
+ struct ena_admin_feature_llq_desc llq;
+
struct ena_admin_queue_feature_desc max_queue;
struct ena_admin_feature_aenq_desc aenq;
@@ -847,6 +889,9 @@ struct ena_admin_set_feat_cmd {
/* rss indirection table */
struct ena_admin_feature_rss_ind_table ind_table;
+
+ /* LLQ configuration */
+ struct ena_admin_feature_llq_desc llq;
} u;
};
@@ -875,25 +920,18 @@ struct ena_admin_aenq_common_desc {
/* asynchronous event notification groups */
enum ena_admin_aenq_group {
- ENA_ADMIN_LINK_CHANGE = 0,
-
- ENA_ADMIN_FATAL_ERROR = 1,
-
- ENA_ADMIN_WARNING = 2,
-
- ENA_ADMIN_NOTIFICATION = 3,
-
- ENA_ADMIN_KEEP_ALIVE = 4,
-
- ENA_ADMIN_AENQ_GROUPS_NUM = 5,
+ ENA_ADMIN_LINK_CHANGE = 0,
+ ENA_ADMIN_FATAL_ERROR = 1,
+ ENA_ADMIN_WARNING = 2,
+ ENA_ADMIN_NOTIFICATION = 3,
+ ENA_ADMIN_KEEP_ALIVE = 4,
+ ENA_ADMIN_AENQ_GROUPS_NUM = 5,
};
enum ena_admin_aenq_notification_syndrom {
- ENA_ADMIN_SUSPEND = 0,
-
- ENA_ADMIN_RESUME = 1,
-
- ENA_ADMIN_UPDATE_HINTS = 2,
+ ENA_ADMIN_SUSPEND = 0,
+ ENA_ADMIN_RESUME = 1,
+ ENA_ADMIN_UPDATE_HINTS = 2,
};
struct ena_admin_aenq_entry {
@@ -928,27 +966,27 @@ struct ena_admin_ena_mmio_req_read_less_resp {
};
/* aq_common_desc */
-#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
-#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
/* sq */
-#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
-#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
/* acq_common_desc */
-#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
-#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
/* aq_create_sq_cmd */
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0)
/* aq_create_cq_cmd */
@@ -957,12 +995,12 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
/* get_set_feature_common_desc */
-#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
/* get_feature_link_desc */
-#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
-#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
-#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0)
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1
+#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1)
/* feature_offload_desc */
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0)
@@ -974,19 +1012,19 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3)
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4)
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7)
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0)
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1)
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2
#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2)
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
-#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3
+#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3)
/* feature_rss_flow_hash_function */
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
@@ -994,25 +1032,32 @@ struct ena_admin_ena_mmio_req_read_less_resp {
/* feature_rss_flow_hash_input */
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
-#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1)
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2
-#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2)
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1)
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2)
/* host_info */
-#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
-#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
-#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
-#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
-#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0)
+#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
+#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24
+#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24)
+#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0)
+#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3
+#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
+#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
/* aenq_common_desc */
-#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
/* aenq_link_change_desc */
-#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
+#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
#endif /*_ENA_ADMIN_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 7635c38e77dd..420cede41ca4 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -41,9 +41,6 @@
#define ENA_ASYNC_QUEUE_DEPTH 16
#define ENA_ADMIN_QUEUE_DEPTH 32
-#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
- | (ENA_COMMON_SPEC_VERSION_MINOR))
#define ENA_CTRL_MAJOR 0
#define ENA_CTRL_MINOR 0
@@ -61,6 +58,8 @@
#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
+
#define ENA_REGS_ADMIN_INTR_MASK 1
#define ENA_POLL_MS 5
@@ -236,7 +235,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
tail_masked = admin_queue->sq.tail & queue_size_mask;
/* In case of queue FULL */
- cnt = atomic_read(&admin_queue->outstanding_cmds);
+ cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
pr_debug("admin queue is full.\n");
admin_queue->stats.out_of_space++;
@@ -305,7 +304,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue
struct ena_admin_acq_entry *comp,
size_t comp_size_in_bytes)
{
- unsigned long flags;
+ unsigned long flags = 0;
struct ena_comp_ctx *comp_ctx;
spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -333,7 +332,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
- io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
+ io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
io_sq->desc_entry_size =
(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
sizeof(struct ena_eth_io_tx_desc) :
@@ -355,21 +354,48 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
&io_sq->desc_addr.phys_addr,
GFP_KERNEL);
}
- } else {
+
+ if (!io_sq->desc_addr.virt_addr) {
+ pr_err("memory allocation failed");
+ return -ENOMEM;
+ }
+ }
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Allocate bounce buffers */
+ io_sq->bounce_buf_ctrl.buffer_size =
+ ena_dev->llq_info.desc_list_entry_size;
+ io_sq->bounce_buf_ctrl.buffers_num =
+ ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+ io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+ size = io_sq->bounce_buf_ctrl.buffer_size *
+ io_sq->bounce_buf_ctrl.buffers_num;
+
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
- io_sq->desc_addr.virt_addr =
+ io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
- if (!io_sq->desc_addr.virt_addr) {
- io_sq->desc_addr.virt_addr =
+ if (!io_sq->bounce_buf_ctrl.base_buffer)
+ io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+
+ if (!io_sq->bounce_buf_ctrl.base_buffer) {
+ pr_err("bounce buffer memory allocation failed");
+ return -ENOMEM;
}
- }
- if (!io_sq->desc_addr.virt_addr) {
- pr_err("memory allocation failed");
- return -ENOMEM;
+ memcpy(&io_sq->llq_info, &ena_dev->llq_info,
+ sizeof(io_sq->llq_info));
+
+ /* Initiate the first bounce buffer */
+ io_sq->llq_buf_ctrl.curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, io_sq->llq_info.desc_list_entry_size);
+ io_sq->llq_buf_ctrl.descs_left_in_line =
+ io_sq->llq_info.descs_num_before_header;
}
io_sq->tail = 0;
@@ -460,7 +486,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
/* Go over all the completions */
while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
- ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+ ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Do not read the rest of the completion entry before the
* phase bit was validated
*/
@@ -511,7 +537,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
- unsigned long flags, timeout;
+ unsigned long flags = 0;
+ unsigned long timeout;
int ret;
timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
@@ -557,10 +584,160 @@ err:
return ret;
}
+/**
+ * Set the LLQ configurations of the firmware
+ *
+ * The driver provides only the enabled feature values to the device,
+ * which in turn, checks if they are supported.
+ */
+static int ena_com_set_llq(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_admin_queue *admin_queue;
+ struct ena_admin_set_feat_cmd cmd;
+ struct ena_admin_set_feat_resp resp;
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+ int ret;
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ admin_queue = &ena_dev->admin_queue;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
+
+ cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
+ cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
+ cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
+ cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+
+ if (unlikely(ret))
+ pr_err("Failed to set LLQ configurations: %d\n", ret);
+
+ return ret;
+}
+
+static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq_features,
+ struct ena_llq_configurations *llq_default_cfg)
+{
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+ u16 supported_feat;
+ int rc;
+
+ memset(llq_info, 0, sizeof(*llq_info));
+
+ supported_feat = llq_features->header_location_ctrl_supported;
+
+ if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
+ llq_info->header_location_ctrl =
+ llq_default_cfg->llq_header_location;
+ } else {
+ pr_err("Invalid header location control, supported: 0x%x\n",
+ supported_feat);
+ return -EINVAL;
+ }
+
+ if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
+ supported_feat = llq_features->descriptors_stride_ctrl_supported;
+ if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
+ llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
+ } else {
+ if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
+ llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+ } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
+ llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
+ } else {
+ pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
+ supported_feat);
+ return -EINVAL;
+ }
+
+ pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_stride_ctrl, supported_feat,
+ llq_info->desc_stride_ctrl);
+ }
+ } else {
+ llq_info->desc_stride_ctrl = 0;
+ }
+
+ supported_feat = llq_features->entry_size_ctrl_supported;
+ if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
+ llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
+ llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
+ } else {
+ if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
+ llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+ llq_info->desc_list_entry_size = 128;
+ } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
+ llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
+ llq_info->desc_list_entry_size = 192;
+ } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
+ llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+ llq_info->desc_list_entry_size = 256;
+ } else {
+ pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
+ supported_feat);
+ return -EINVAL;
+ }
+
+ pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_ring_entry_size, supported_feat,
+ llq_info->desc_list_entry_size);
+ }
+ if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
+ /* The desc list entry size should be whole multiply of 8
+ * This requirement comes from __iowrite64_copy()
+ */
+ pr_err("illegal entry size %d\n",
+ llq_info->desc_list_entry_size);
+ return -EINVAL;
+ }
+
+ if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
+ llq_info->descs_per_entry = llq_info->desc_list_entry_size /
+ sizeof(struct ena_eth_io_tx_desc);
+ else
+ llq_info->descs_per_entry = 1;
+
+ supported_feat = llq_features->desc_num_before_header_supported;
+ if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
+ llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
+ } else {
+ if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+ } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
+ } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
+ } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
+ } else {
+ pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
+ supported_feat);
+ return -EINVAL;
+ }
+
+ pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_num_decs_before_header,
+ supported_feat, llq_info->descs_num_before_header);
+ }
+
+ rc = ena_com_set_llq(ena_dev);
+ if (rc)
+ pr_err("Cannot set LLQ configuration: %d\n", rc);
+
+ return 0;
+}
+
static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
- unsigned long flags;
+ unsigned long flags = 0;
int ret;
wait_for_completion_timeout(&comp_ctx->wait_event,
@@ -606,7 +783,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
mmio_read->read_resp;
u32 mmio_read_reg, ret, i;
- unsigned long flags;
+ unsigned long flags = 0;
u32 timeout = mmio_read->reg_read_to;
might_sleep();
@@ -728,15 +905,17 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_sq->desc_addr.virt_addr) {
size = io_sq->desc_entry_size * io_sq->q_depth;
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- dma_free_coherent(ena_dev->dmadev, size,
- io_sq->desc_addr.virt_addr,
- io_sq->desc_addr.phys_addr);
- else
- devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
+ dma_free_coherent(ena_dev->dmadev, size,
+ io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr);
io_sq->desc_addr.virt_addr = NULL;
}
+
+ if (io_sq->bounce_buf_ctrl.base_buffer) {
+ devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
+ io_sq->bounce_buf_ctrl.base_buffer = NULL;
+ }
}
static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
@@ -1248,7 +1427,7 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- unsigned long flags;
+ unsigned long flags = 0;
spin_lock_irqsave(&admin_queue->q_lock, flags);
while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
@@ -1292,7 +1471,7 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
- unsigned long flags;
+ unsigned long flags = 0;
spin_lock_irqsave(&admin_queue->q_lock, flags);
ena_dev->admin_queue.running_state = state;
@@ -1326,7 +1505,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
}
if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
- pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+ pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
get_resp.u.aenq.supported_groups, groups_flag);
return -EOPNOTSUPP;
}
@@ -1400,11 +1579,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- if (ver < MIN_ENA_VER) {
- pr_err("ENA version is lower than the minimal version the driver supports\n");
- return -1;
- }
-
pr_info("ena controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
@@ -1479,7 +1653,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
sizeof(*mmio_read->read_resp),
&mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (unlikely(!mmio_read->read_resp))
- return -ENOMEM;
+ goto err;
ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
@@ -1488,6 +1662,10 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
mmio_read->readless_supported = true;
return 0;
+
+err:
+
+ return -ENOMEM;
}
void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
@@ -1523,8 +1701,7 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
}
int ena_com_admin_init(struct ena_com_dev *ena_dev,
- struct ena_aenq_handlers *aenq_handlers,
- bool init_spinlock)
+ struct ena_aenq_handlers *aenq_handlers)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
@@ -1550,8 +1727,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
atomic_set(&admin_queue->outstanding_cmds, 0);
- if (init_spinlock)
- spin_lock_init(&admin_queue->q_lock);
+ spin_lock_init(&admin_queue->q_lock);
ret = ena_com_init_comp_ctxt(admin_queue);
if (ret)
@@ -1748,6 +1924,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
else
return rc;
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
+ if (!rc)
+ memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+ sizeof(get_resp.u.llq));
+ else if (rc == -EOPNOTSUPP)
+ memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+ else
+ return rc;
+
return 0;
}
@@ -1779,6 +1964,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
struct ena_admin_aenq_entry *aenq_e;
struct ena_admin_aenq_common_desc *aenq_common;
struct ena_com_aenq *aenq = &dev->aenq;
+ unsigned long long timestamp;
ena_aenq_handler handler_cb;
u16 masked_head, processed = 0;
u8 phase;
@@ -1796,10 +1982,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
*/
dma_rmb();
+ timestamp =
+ (unsigned long long)aenq_common->timestamp_low |
+ ((unsigned long long)aenq_common->timestamp_high << 32);
pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
- aenq_common->group, aenq_common->syndrom,
- (u64)aenq_common->timestamp_low +
- ((u64)aenq_common->timestamp_high << 32));
+ aenq_common->group, aenq_common->syndrom, timestamp);
/* Handle specific event*/
handler_cb = ena_com_get_specific_aenq_cb(dev,
@@ -2441,6 +2628,10 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
if (unlikely(!host_attr->host_info))
return -ENOMEM;
+ host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
+ (ENA_COMMON_SPEC_VERSION_MINOR));
+
return 0;
}
@@ -2712,3 +2903,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
intr_moder_tbl[level].pkts_per_interval;
entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
}
+
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq_features,
+ struct ena_llq_configurations *llq_default_cfg)
+{
+ int rc;
+ int size;
+
+ if (!llq_features->max_llq_num) {
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
+ rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
+ if (rc)
+ return rc;
+
+ /* Validate the descriptor is not too big */
+ size = ena_dev->tx_max_header_size;
+ size += ena_dev->llq_info.descs_num_before_header *
+ sizeof(struct ena_eth_io_tx_desc);
+
+ if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+ pr_err("the size of the LLQ entry is smaller than needed\n");
+ return -EINVAL;
+ }
+
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 7b784f8a06a6..078d6f2b4f39 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -37,6 +37,8 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/sizes.h>
#include <linux/spinlock.h>
@@ -108,6 +110,14 @@ enum ena_intr_moder_level {
ENA_INTR_MAX_NUM_OF_LEVELS,
};
+struct ena_llq_configurations {
+ enum ena_admin_llq_header_location llq_header_location;
+ enum ena_admin_llq_ring_entry_size llq_ring_entry_size;
+ enum ena_admin_llq_stride_ctrl llq_stride_ctrl;
+ enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header;
+ u16 llq_ring_entry_size_value;
+};
+
struct ena_intr_moder_entry {
unsigned int intr_moder_interval;
unsigned int pkts_per_interval;
@@ -142,6 +152,15 @@ struct ena_com_tx_meta {
u16 l4_hdr_len; /* In words */
};
+struct ena_com_llq_info {
+ u16 header_location_ctrl;
+ u16 desc_stride_ctrl;
+ u16 desc_list_entry_size_ctrl;
+ u16 desc_list_entry_size;
+ u16 descs_num_before_header;
+ u16 descs_per_entry;
+};
+
struct ena_com_io_cq {
struct ena_com_io_desc_addr cdesc_addr;
@@ -179,6 +198,20 @@ struct ena_com_io_cq {
} ____cacheline_aligned;
+struct ena_com_io_bounce_buffer_control {
+ u8 *base_buffer;
+ u16 next_to_use;
+ u16 buffer_size;
+ u16 buffers_num; /* Must be a power of 2 */
+};
+
+/* This struct is to keep tracking the current location of the next llq entry */
+struct ena_com_llq_pkt_ctrl {
+ u8 *curr_bounce_buf;
+ u16 idx;
+ u16 descs_left_in_line;
+};
+
struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
@@ -190,6 +223,9 @@ struct ena_com_io_sq {
u32 msix_vector;
struct ena_com_tx_meta cached_tx_meta;
+ struct ena_com_llq_info llq_info;
+ struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
+ struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
u16 q_depth;
u16 qid;
@@ -197,6 +233,7 @@ struct ena_com_io_sq {
u16 idx;
u16 tail;
u16 next_to_comp;
+ u16 llq_last_copy_tail;
u32 tx_max_header_size;
u8 phase;
u8 desc_entry_size;
@@ -334,6 +371,8 @@ struct ena_com_dev {
u16 intr_delay_resolution;
u32 intr_moder_tx_interval;
struct ena_intr_moder_entry *intr_moder_tbl;
+
+ struct ena_com_llq_info llq_info;
};
struct ena_com_dev_get_features_ctx {
@@ -342,6 +381,7 @@ struct ena_com_dev_get_features_ctx {
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload;
struct ena_admin_ena_hw_hints hw_hints;
+ struct ena_admin_feature_llq_desc llq;
};
struct ena_com_create_io_ctx {
@@ -397,8 +437,6 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
/* ena_com_admin_init - Init the admin and the async queues
* @ena_dev: ENA communication layer struct
* @aenq_handlers: Those handlers to be called upon event.
- * @init_spinlock: Indicate if this method should init the admin spinlock or
- * the spinlock was init before (for example, in a case of FLR).
*
* Initialize the admin submission and completion queues.
* Initialize the asynchronous events notification queues.
@@ -406,8 +444,7 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
* @return - 0 on success, negative value on failure.
*/
int ena_com_admin_init(struct ena_com_dev *ena_dev,
- struct ena_aenq_handlers *aenq_handlers,
- bool init_spinlock);
+ struct ena_aenq_handlers *aenq_handlers);
/* ena_com_admin_destroy - Destroy the admin and the async events queues.
* @ena_dev: ENA communication layer struct
@@ -935,6 +972,16 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
enum ena_intr_moder_level level,
struct ena_intr_moder_entry *entry);
+/* ena_com_config_dev_mode - Configure the placement policy of the device.
+ * @ena_dev: ENA communication layer struct
+ * @llq_features: LLQ feature descriptor, retrieve via
+ * ena_com_get_dev_attr_feat.
+ * @ena_llq_config: The default driver LLQ parameters configurations
+ */
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq_features,
+ struct ena_llq_configurations *llq_default_config);
+
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
@@ -1044,4 +1091,21 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
+static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
+{
+ u16 size, buffers_num;
+ u8 *buf;
+
+ size = bounce_buf_ctrl->buffer_size;
+ buffers_num = bounce_buf_ctrl->buffers_num;
+
+ buf = bounce_buf_ctrl->base_buffer +
+ (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
+
+ prefetchw(bounce_buf_ctrl->base_buffer +
+ (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
+
+ return buf;
+}
+
#endif /* !(ENA_COM) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
index bb8d73676eab..23beb7e7ed7b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
@@ -32,8 +32,8 @@
#ifndef _ENA_COMMON_H_
#define _ENA_COMMON_H_
-#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */
-#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */
+#define ENA_COMMON_SPEC_VERSION_MAJOR 2
+#define ENA_COMMON_SPEC_VERSION_MINOR 0
/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
struct ena_common_mem_addr {
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 1c682b76190f..f6c2d3855be8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -59,16 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
return cdesc;
}
-static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
-{
- io_cq->head++;
-
- /* Switch phase bit in case of wrap around */
- if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
- io_cq->phase ^= 1;
-}
-
-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
{
u16 tail_masked;
u32 offset;
@@ -80,45 +71,159 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
}
-static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
+static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+ u8 *bounce_buffer)
{
- u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
- u32 offset = tail_masked * io_sq->desc_entry_size;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
- /* In case this queue isn't a LLQ */
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- return;
+ u16 dst_tail_mask;
+ u32 dst_offset;
- memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
- io_sq->desc_addr.virt_addr + offset,
- io_sq->desc_entry_size);
-}
+ dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
+ dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
+
+ /* Make sure everything was written into the bounce buffer before
+ * writing the bounce buffer to the device
+ */
+ wmb();
+
+ /* The line is completed. Copy it to dev */
+ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
+ bounce_buffer, (llq_info->desc_list_entry_size) / 8);
-static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
-{
io_sq->tail++;
/* Switch phase bit in case of wrap around */
if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
io_sq->phase ^= 1;
+
+ return 0;
}
-static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
- u8 *head_src, u16 header_len)
+static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+ u8 *header_src,
+ u16 header_len)
{
- u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
- u8 __iomem *dev_head_addr =
- io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+ u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
+ u16 header_offset;
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
return 0;
- if (unlikely(!io_sq->header_addr)) {
- pr_err("Push buffer header ptr is NULL\n");
- return -EINVAL;
+ header_offset =
+ llq_info->descs_num_before_header * io_sq->desc_entry_size;
+
+ if (unlikely((header_offset + header_len) >
+ llq_info->desc_list_entry_size)) {
+ pr_err("trying to write header larger than llq entry can accommodate\n");
+ return -EFAULT;
+ }
+
+ if (unlikely(!bounce_buffer)) {
+ pr_err("bounce buffer is NULL\n");
+ return -EFAULT;
+ }
+
+ memcpy(bounce_buffer + header_offset, header_src, header_len);
+
+ return 0;
+}
+
+static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ u8 *bounce_buffer;
+ void *sq_desc;
+
+ bounce_buffer = pkt_ctrl->curr_bounce_buf;
+
+ if (unlikely(!bounce_buffer)) {
+ pr_err("bounce buffer is NULL\n");
+ return NULL;
+ }
+
+ sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
+ pkt_ctrl->idx++;
+ pkt_ctrl->descs_left_in_line--;
+
+ return sq_desc;
+}
+
+static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+ int rc;
+
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
+ return 0;
+
+ /* bounce buffer was used, so write it and get a new one */
+ if (pkt_ctrl->idx) {
+ rc = ena_com_write_bounce_buffer_to_dev(io_sq,
+ pkt_ctrl->curr_bounce_buf);
+ if (unlikely(rc))
+ return rc;
+
+ pkt_ctrl->curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, llq_info->desc_list_entry_size);
+ }
+
+ pkt_ctrl->idx = 0;
+ pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
+ return 0;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ return get_sq_desc_llq(io_sq);
+
+ return get_sq_desc_regular_queue(io_sq);
+}
+
+static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
+{
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+ int rc;
+
+ if (!pkt_ctrl->descs_left_in_line) {
+ rc = ena_com_write_bounce_buffer_to_dev(io_sq,
+ pkt_ctrl->curr_bounce_buf);
+ if (unlikely(rc))
+ return rc;
+
+ pkt_ctrl->curr_bounce_buf =
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, llq_info->desc_list_entry_size);
+
+ pkt_ctrl->idx = 0;
+ if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
+ pkt_ctrl->descs_left_in_line = 1;
+ else
+ pkt_ctrl->descs_left_in_line =
+ llq_info->desc_list_entry_size / io_sq->desc_entry_size;
}
- memcpy_toio(dev_head_addr, head_src, header_len);
+ return 0;
+}
+
+static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ return ena_com_sq_update_llq_tail(io_sq);
+
+ io_sq->tail++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+ io_sq->phase ^= 1;
return 0;
}
@@ -186,8 +291,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
return false;
}
-static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
- struct ena_com_tx_ctx *ena_tx_ctx)
+static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
{
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
@@ -232,8 +337,7 @@ static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i
memcpy(&io_sq->cached_tx_meta, ena_meta,
sizeof(struct ena_com_tx_meta));
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
- ena_com_sq_update_tail(io_sq);
+ return ena_com_sq_update_tail(io_sq);
}
static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
@@ -245,11 +349,14 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
ena_rx_ctx->l3_csum_err =
- (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+ !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
ena_rx_ctx->l4_csum_err =
- (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+ !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
+ ena_rx_ctx->l4_csum_checked =
+ !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
ena_rx_ctx->hash = cdesc->hash;
ena_rx_ctx->frag =
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
@@ -271,18 +378,19 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
{
struct ena_eth_io_tx_desc *desc = NULL;
struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
- void *push_header = ena_tx_ctx->push_header;
+ void *buffer_to_push = ena_tx_ctx->push_header;
u16 header_len = ena_tx_ctx->header_len;
u16 num_bufs = ena_tx_ctx->num_bufs;
- int total_desc, i, rc;
+ u16 start_tail = io_sq->tail;
+ int i, rc;
bool have_meta;
u64 addr_hi;
WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
/* num_bufs +1 for potential meta desc */
- if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
- pr_err("Not enough space in the tx queue\n");
+ if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
+ pr_debug("Not enough space in the tx queue\n");
return -ENOMEM;
}
@@ -292,23 +400,32 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
return -EINVAL;
}
- /* start with pushing the header (if needed) */
- rc = ena_com_write_header(io_sq, push_header, header_len);
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
+ !buffer_to_push))
+ return -EINVAL;
+
+ rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
if (unlikely(rc))
return rc;
have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
ena_tx_ctx);
- if (have_meta)
- ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+ if (have_meta) {
+ rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+ if (unlikely(rc))
+ return rc;
+ }
- /* If the caller doesn't want send packets */
+ /* If the caller doesn't want to send packets */
if (unlikely(!num_bufs && !header_len)) {
- *nb_hw_desc = have_meta ? 0 : 1;
- return 0;
+ rc = ena_com_close_bounce_buffer(io_sq);
+ *nb_hw_desc = io_sq->tail - start_tail;
+ return rc;
}
desc = get_sq_desc(io_sq);
+ if (unlikely(!desc))
+ return -EFAULT;
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
/* Set first desc when we don't have meta descriptor */
@@ -360,10 +477,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
for (i = 0; i < num_bufs; i++) {
/* The first desc share the same desc as the header */
if (likely(i != 0)) {
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
- ena_com_sq_update_tail(io_sq);
+ rc = ena_com_sq_update_tail(io_sq);
+ if (unlikely(rc))
+ return rc;
desc = get_sq_desc(io_sq);
+ if (unlikely(!desc))
+ return -EFAULT;
+
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
desc->len_ctrl |= (io_sq->phase <<
@@ -386,15 +507,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
/* set the last desc indicator */
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
-
- ena_com_sq_update_tail(io_sq);
+ rc = ena_com_sq_update_tail(io_sq);
+ if (unlikely(rc))
+ return rc;
- total_desc = max_t(u16, num_bufs, 1);
- total_desc += have_meta ? 1 : 0;
+ rc = ena_com_close_bounce_buffer(io_sq);
- *nb_hw_desc = total_desc;
- return 0;
+ *nb_hw_desc = io_sq->tail - start_tail;
+ return rc;
}
int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
@@ -453,15 +573,18 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
- if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
+ if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
return -ENOSPC;
desc = get_sq_desc(io_sq);
+ if (unlikely(!desc))
+ return -EFAULT;
+
memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
desc->length = ena_buf->len;
- desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
+ desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
@@ -472,43 +595,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->buff_addr_hi =
((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
- ena_com_sq_update_tail(io_sq);
-
- return 0;
-}
-
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
-{
- u8 expected_phase, cdesc_phase;
- struct ena_eth_io_tx_cdesc *cdesc;
- u16 masked_head;
-
- masked_head = io_cq->head & (io_cq->q_depth - 1);
- expected_phase = io_cq->phase;
-
- cdesc = (struct ena_eth_io_tx_cdesc *)
- ((uintptr_t)io_cq->cdesc_addr.virt_addr +
- (masked_head * io_cq->cdesc_entry_size_in_bytes));
-
- /* When the current completion descriptor phase isn't the same as the
- * expected, it mean that the device still didn't update
- * this completion.
- */
- cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
- if (cdesc_phase != expected_phase)
- return -EAGAIN;
-
- dma_rmb();
- if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
- pr_err("Invalid req id %d\n", cdesc->req_id);
- return -EINVAL;
- }
-
- ena_com_cq_inc_head(io_cq);
-
- *req_id = READ_ONCE(cdesc->req_id);
-
- return 0;
+ return ena_com_sq_update_tail(io_sq);
}
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 2f7657227cfe..340d02b64ca6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -67,6 +67,7 @@ struct ena_com_rx_ctx {
enum ena_eth_io_l4_proto_index l4_proto;
bool l3_csum_err;
bool l4_csum_err;
+ u8 l4_csum_checked;
/* fragmented packet */
bool frag;
u32 hash;
@@ -86,8 +87,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
struct ena_com_buf *ena_buf,
u16 req_id);
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
-
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
@@ -96,7 +95,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
writel(intr_reg->intr_control, io_cq->unmask_reg);
}
-static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
{
u16 tail, next_to_comp, cnt;
@@ -107,11 +106,28 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
return io_sq->q_depth - 1 - cnt;
}
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+/* Check if the submission queue has enough space to hold required_buffers */
+static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
+ u16 required_buffers)
{
- u16 tail;
+ int temp;
- tail = io_sq->tail;
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return ena_com_free_desc(io_sq) >= required_buffers;
+
+ /* This calculation doesn't need to be 100% accurate. So to reduce
+ * the calculation overhead just Subtract 2 lines from the free descs
+ * (one for the header line and one to compensate the devision
+ * down calculation.
+ */
+ temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
+
+ return ena_com_free_desc(io_sq) > temp;
+}
+
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+{
+ u16 tail = io_sq->tail;
pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
@@ -159,4 +175,48 @@ static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
io_sq->next_to_comp += elem;
}
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
+{
+ io_cq->head++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
+ io_cq->phase ^= 1;
+}
+
+static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
+ u16 *req_id)
+{
+ u8 expected_phase, cdesc_phase;
+ struct ena_eth_io_tx_cdesc *cdesc;
+ u16 masked_head;
+
+ masked_head = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_tx_cdesc *)
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
+
+ /* When the current completion descriptor phase isn't the same as the
+ * expected, it mean that the device still didn't update
+ * this completion.
+ */
+ cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ if (cdesc_phase != expected_phase)
+ return -EAGAIN;
+
+ dma_rmb();
+
+ *req_id = READ_ONCE(cdesc->req_id);
+ if (unlikely(*req_id >= io_cq->q_depth)) {
+ pr_err("Invalid req id %d\n", cdesc->req_id);
+ return -EINVAL;
+ }
+
+ ena_com_cq_inc_head(io_cq);
+
+ return 0;
+}
+
#endif /* ENA_ETH_COM_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
index f320c58793a5..00e0f056a741 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
@@ -33,25 +33,18 @@
#define _ENA_ETH_IO_H_
enum ena_eth_io_l3_proto_index {
- ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
-
- ENA_ETH_IO_L3_PROTO_IPV4 = 8,
-
- ENA_ETH_IO_L3_PROTO_IPV6 = 11,
-
- ENA_ETH_IO_L3_PROTO_FCOE = 21,
-
- ENA_ETH_IO_L3_PROTO_ROCE = 22,
+ ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
+ ENA_ETH_IO_L3_PROTO_IPV4 = 8,
+ ENA_ETH_IO_L3_PROTO_IPV6 = 11,
+ ENA_ETH_IO_L3_PROTO_FCOE = 21,
+ ENA_ETH_IO_L3_PROTO_ROCE = 22,
};
enum ena_eth_io_l4_proto_index {
- ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
-
- ENA_ETH_IO_L4_PROTO_TCP = 12,
-
- ENA_ETH_IO_L4_PROTO_UDP = 13,
-
- ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
+ ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
+ ENA_ETH_IO_L4_PROTO_TCP = 12,
+ ENA_ETH_IO_L4_PROTO_UDP = 13,
+ ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
};
struct ena_eth_io_tx_desc {
@@ -242,9 +235,13 @@ struct ena_eth_io_rx_cdesc_base {
* checksum error detected, or, the controller didn't
* validate the checksum. This bit is valid only when
* l4_proto_idx indicates TCP/UDP packet, and,
- * ipv4_frag is not set
+ * ipv4_frag is not set. This bit is valid only when
+ * l4_csum_checked below is set.
* 15 : ipv4_frag - Indicates IPv4 fragmented packet
- * 23:16 : reserved16
+ * 16 : l4_csum_checked - L4 checksum was verified
+ * (could be OK or error), when cleared the status of
+ * checksum is unknown
+ * 23:17 : reserved17 - MBZ
* 24 : phase
* 25 : l3_csum2 - second checksum engine result
* 26 : first - Indicates first descriptor in
@@ -303,114 +300,116 @@ struct ena_eth_io_numa_node_cfg_reg {
};
/* tx_desc */
-#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
-#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
-#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
-#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
-#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
-#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
-#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
-#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
-#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
-#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
-#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
-#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
-#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
-#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
-#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
-#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
-#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
-#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
-#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
-#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
-#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
-#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
-#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
-#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
-#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
-#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
-#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
-#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
-#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
-#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
-#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
-#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
-#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
+#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16
+#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0)
+#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4
+#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4)
+#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7
+#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7)
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13
+#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14)
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15
+#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15)
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17
+#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17)
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22
+#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0)
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24
+#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24)
/* tx_meta_desc */
-#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
-#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
-#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
-#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
-#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
-#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
-#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
-#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
-#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
-#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
-#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
-#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
-#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
-#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
-#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
-#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
-#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
-#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
-#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
-#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
-#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
-#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
-#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
-#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
-#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
-#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
-#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
+#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
+#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
+#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21)
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23
+#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23)
+#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24
+#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26
+#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27
+#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27)
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28
+#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28)
+#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8
+#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8)
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22
+#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22)
/* tx_cdesc */
-#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0)
/* rx_desc */
-#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
-#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
-#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
-#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
-#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
-#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
-#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
+#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0)
+#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2
+#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2)
+#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3
+#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3)
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4
+#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4)
/* rx_cdesc_base */
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
-#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
-#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
-#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
-#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
-#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
-#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
-#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
-#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
-#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
-#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
-#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
-#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
-#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
-#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
-#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
-#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
-#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0)
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5
+#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
+#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16)
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
+#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
+#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25)
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26
+#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26)
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27
+#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27)
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30
+#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30)
/* intr_reg */
-#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
-#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
-#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
-#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
-#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
+#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0)
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15
+#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15)
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30
+#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30)
/* numa_node_cfg_reg */
-#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
-#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
-#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0)
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31
+#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
#endif /*_ENA_ETH_IO_H_ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 521607bc4393..f3a5a384e6e8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(doorbells),
ENA_STAT_TX_ENTRY(prepare_ctx_err),
ENA_STAT_TX_ENTRY(bad_req_id),
+ ENA_STAT_TX_ENTRY(llq_buffer_copy),
ENA_STAT_TX_ENTRY(missed_tx),
};
@@ -96,6 +97,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(bad_req_id),
ENA_STAT_RX_ENTRY(empty_rx_ring),
+ ENA_STAT_RX_ENTRY(csum_unchecked),
};
static const struct ena_stats ena_stats_ena_com_strings[] = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 29b5774dd32d..18956e7604a3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -39,7 +39,6 @@
#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/numa.h>
#include <linux/pci.h>
#include <linux/utsname.h>
@@ -238,6 +237,17 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
}
}
+ size = tx_ring->tx_max_header_size;
+ tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
+ if (!tx_ring->push_buf_intermediate_buf) {
+ tx_ring->push_buf_intermediate_buf = vzalloc(size);
+ if (!tx_ring->push_buf_intermediate_buf) {
+ vfree(tx_ring->tx_buffer_info);
+ vfree(tx_ring->free_tx_ids);
+ return -ENOMEM;
+ }
+ }
+
/* Req id ring for TX out of order completions */
for (i = 0; i < tx_ring->ring_size; i++)
tx_ring->free_tx_ids[i] = i;
@@ -266,6 +276,9 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
vfree(tx_ring->free_tx_ids);
tx_ring->free_tx_ids = NULL;
+
+ vfree(tx_ring->push_buf_intermediate_buf);
+ tx_ring->push_buf_intermediate_buf = NULL;
}
/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
@@ -603,6 +616,36 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
ena_free_rx_bufs(adapter, i);
}
+static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info)
+{
+ struct ena_com_buf *ena_buf;
+ u32 cnt;
+ int i;
+
+ ena_buf = tx_info->bufs;
+ cnt = tx_info->num_of_bufs;
+
+ if (unlikely(!cnt))
+ return;
+
+ if (tx_info->map_linear_data) {
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len),
+ DMA_TO_DEVICE);
+ ena_buf++;
+ cnt--;
+ }
+
+ /* unmap remaining mapped pages */
+ for (i = 0; i < cnt; i++) {
+ dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
+ dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
+ ena_buf++;
+ }
+}
+
/* ena_free_tx_bufs - Free Tx Buffers per Queue
* @tx_ring: TX ring for which buffers be freed
*/
@@ -613,9 +656,6 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
for (i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
- struct ena_com_buf *ena_buf;
- int nr_frags;
- int j;
if (!tx_info->skb)
continue;
@@ -631,21 +671,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
tx_ring->qid, i);
}
- ena_buf = tx_info->bufs;
- dma_unmap_single(tx_ring->dev,
- ena_buf->paddr,
- ena_buf->len,
- DMA_TO_DEVICE);
-
- /* unmap remaining mapped pages */
- nr_frags = tx_info->num_of_bufs - 1;
- for (j = 0; j < nr_frags; j++) {
- ena_buf++;
- dma_unmap_page(tx_ring->dev,
- ena_buf->paddr,
- ena_buf->len,
- DMA_TO_DEVICE);
- }
+ ena_unmap_tx_skb(tx_ring, tx_info);
dev_kfree_skb_any(tx_info->skb);
}
@@ -736,8 +762,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
while (tx_pkts < budget) {
struct ena_tx_buffer *tx_info;
struct sk_buff *skb;
- struct ena_com_buf *ena_buf;
- int i, nr_frags;
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
&req_id);
@@ -757,24 +781,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_info->skb = NULL;
tx_info->last_jiffies = 0;
- if (likely(tx_info->num_of_bufs != 0)) {
- ena_buf = tx_info->bufs;
-
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(ena_buf, paddr),
- dma_unmap_len(ena_buf, len),
- DMA_TO_DEVICE);
-
- /* unmap remaining mapped pages */
- nr_frags = tx_info->num_of_bufs - 1;
- for (i = 0; i < nr_frags; i++) {
- ena_buf++;
- dma_unmap_page(tx_ring->dev,
- dma_unmap_addr(ena_buf, paddr),
- dma_unmap_len(ena_buf, len),
- DMA_TO_DEVICE);
- }
- }
+ ena_unmap_tx_skb(tx_ring, tx_info);
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
@@ -805,12 +812,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
*/
smp_mb();
- above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
- ENA_TX_WAKEUP_THRESH;
+ above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+ ENA_TX_WAKEUP_THRESH);
if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
__netif_tx_lock(txq, smp_processor_id());
- above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
- ENA_TX_WAKEUP_THRESH;
+ above_thresh =
+ ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+ ENA_TX_WAKEUP_THRESH);
if (netif_tx_queue_stopped(txq) && above_thresh) {
netif_tx_wake_queue(txq);
u64_stats_update_begin(&tx_ring->syncp);
@@ -986,8 +994,19 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
return;
}
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (likely(ena_rx_ctx->l4_csum_checked)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->rx_stats.csum_unchecked++;
+ u64_stats_update_end(&rx_ring->syncp);
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
}
+
}
static void ena_set_rx_hash(struct ena_ring *rx_ring,
@@ -1102,8 +1121,10 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
rx_ring->next_to_clean = next_to_clean;
- refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
- refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
+ refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
+ refill_threshold =
+ min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
+ ENA_RX_REFILL_THRESH_PACKET);
/* Optimization, try to batch new rx buffers */
if (refill_required > refill_threshold) {
@@ -1300,7 +1321,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
/* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
-
netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs);
@@ -1575,8 +1595,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
if (rc)
return rc;
- ena_init_napi(adapter);
-
ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
ena_refill_all_rx_bufs(adapter);
@@ -1593,7 +1611,7 @@ static int ena_up_complete(struct ena_adapter *adapter)
static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
{
- struct ena_com_create_io_ctx ctx = { 0 };
+ struct ena_com_create_io_ctx ctx;
struct ena_com_dev *ena_dev;
struct ena_ring *tx_ring;
u32 msix_vector;
@@ -1606,6 +1624,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
msix_vector = ENA_IO_IRQ_IDX(qid);
ena_qid = ENA_IO_TXQ_IDX(qid);
+ memset(&ctx, 0x0, sizeof(ctx));
+
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
ctx.qid = ena_qid;
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
@@ -1659,7 +1679,7 @@ create_err:
static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
{
struct ena_com_dev *ena_dev;
- struct ena_com_create_io_ctx ctx = { 0 };
+ struct ena_com_create_io_ctx ctx;
struct ena_ring *rx_ring;
u32 msix_vector;
u16 ena_qid;
@@ -1671,6 +1691,8 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
msix_vector = ENA_IO_IRQ_IDX(qid);
ena_qid = ENA_IO_RXQ_IDX(qid);
+ memset(&ctx, 0x0, sizeof(ctx));
+
ctx.qid = ena_qid;
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
@@ -1730,6 +1752,13 @@ static int ena_up(struct ena_adapter *adapter)
ena_setup_io_intr(adapter);
+ /* napi poll functions should be initialized before running
+ * request_irq(), to handle a rare condition where there is a pending
+ * interrupt, causing the ISR to fire immediately while the poll
+ * function wasn't set yet, causing a null dereference
+ */
+ ena_init_napi(adapter);
+
rc = ena_request_io_irq(adapter);
if (rc)
goto err_req_irq;
@@ -1981,73 +2010,70 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
return rc;
}
-/* Called with netif_tx_lock. */
-static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int ena_tx_map_skb(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info,
+ struct sk_buff *skb,
+ void **push_hdr,
+ u16 *header_len)
{
- struct ena_adapter *adapter = netdev_priv(dev);
- struct ena_tx_buffer *tx_info;
- struct ena_com_tx_ctx ena_tx_ctx;
- struct ena_ring *tx_ring;
- struct netdev_queue *txq;
+ struct ena_adapter *adapter = tx_ring->adapter;
struct ena_com_buf *ena_buf;
- void *push_hdr;
- u32 len, last_frag;
- u16 next_to_use;
- u16 req_id;
- u16 push_len;
- u16 header_len;
dma_addr_t dma;
- int qid, rc, nb_hw_desc;
- int i = -1;
-
- netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
- /* Determine which tx ring we will be placed on */
- qid = skb_get_queue_mapping(skb);
- tx_ring = &adapter->tx_ring[qid];
- txq = netdev_get_tx_queue(dev, qid);
-
- rc = ena_check_and_linearize_skb(tx_ring, skb);
- if (unlikely(rc))
- goto error_drop_packet;
+ u32 skb_head_len, frag_len, last_frag;
+ u16 push_len = 0;
+ u16 delta = 0;
+ int i = 0;
- skb_tx_timestamp(skb);
- len = skb_headlen(skb);
-
- next_to_use = tx_ring->next_to_use;
- req_id = tx_ring->free_tx_ids[next_to_use];
- tx_info = &tx_ring->tx_buffer_info[req_id];
- tx_info->num_of_bufs = 0;
-
- WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
- ena_buf = tx_info->bufs;
+ skb_head_len = skb_headlen(skb);
tx_info->skb = skb;
+ ena_buf = tx_info->bufs;
if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- /* prepared the push buffer */
- push_len = min_t(u32, len, tx_ring->tx_max_header_size);
- header_len = push_len;
- push_hdr = skb->data;
+ /* When the device is LLQ mode, the driver will copy
+ * the header into the device memory space.
+ * the ena_com layer assume the header is in a linear
+ * memory space.
+ * This assumption might be wrong since part of the header
+ * can be in the fragmented buffers.
+ * Use skb_header_pointer to make sure the header is in a
+ * linear memory space.
+ */
+
+ push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
+ *push_hdr = skb_header_pointer(skb, 0, push_len,
+ tx_ring->push_buf_intermediate_buf);
+ *header_len = push_len;
+ if (unlikely(skb->data != *push_hdr)) {
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.llq_buffer_copy++;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ delta = push_len - skb_head_len;
+ }
} else {
- push_len = 0;
- header_len = min_t(u32, len, tx_ring->tx_max_header_size);
- push_hdr = NULL;
+ *push_hdr = NULL;
+ *header_len = min_t(u32, skb_head_len,
+ tx_ring->tx_max_header_size);
}
- netif_dbg(adapter, tx_queued, dev,
+ netif_dbg(adapter, tx_queued, adapter->netdev,
"skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
- push_hdr, push_len);
+ *push_hdr, push_len);
- if (len > push_len) {
+ if (skb_head_len > push_len) {
dma = dma_map_single(tx_ring->dev, skb->data + push_len,
- len - push_len, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
+ skb_head_len - push_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
goto error_report_dma_error;
ena_buf->paddr = dma;
- ena_buf->len = len - push_len;
+ ena_buf->len = skb_head_len - push_len;
ena_buf++;
tx_info->num_of_bufs++;
+ tx_info->map_linear_data = 1;
+ } else {
+ tx_info->map_linear_data = 0;
}
last_frag = skb_shinfo(skb)->nr_frags;
@@ -2055,18 +2081,75 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < last_frag; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = skb_frag_size(frag);
- dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
+ frag_len = skb_frag_size(frag);
+
+ if (unlikely(delta >= frag_len)) {
+ delta -= frag_len;
+ continue;
+ }
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
+ frag_len - delta, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
goto error_report_dma_error;
ena_buf->paddr = dma;
- ena_buf->len = len;
+ ena_buf->len = frag_len - delta;
ena_buf++;
+ tx_info->num_of_bufs++;
+ delta = 0;
}
- tx_info->num_of_bufs += last_frag;
+ return 0;
+
+error_report_dma_error:
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.dma_mapping_err++;
+ u64_stats_update_end(&tx_ring->syncp);
+ netdev_warn(adapter->netdev, "failed to map skb\n");
+
+ tx_info->skb = NULL;
+
+ tx_info->num_of_bufs += i;
+ ena_unmap_tx_skb(tx_ring, tx_info);
+
+ return -EINVAL;
+}
+
+/* Called with netif_tx_lock. */
+static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ struct ena_tx_buffer *tx_info;
+ struct ena_com_tx_ctx ena_tx_ctx;
+ struct ena_ring *tx_ring;
+ struct netdev_queue *txq;
+ void *push_hdr;
+ u16 next_to_use, req_id, header_len;
+ int qid, rc, nb_hw_desc;
+
+ netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
+ /* Determine which tx ring we will be placed on */
+ qid = skb_get_queue_mapping(skb);
+ tx_ring = &adapter->tx_ring[qid];
+ txq = netdev_get_tx_queue(dev, qid);
+
+ rc = ena_check_and_linearize_skb(tx_ring, skb);
+ if (unlikely(rc))
+ goto error_drop_packet;
+
+ skb_tx_timestamp(skb);
+
+ next_to_use = tx_ring->next_to_use;
+ req_id = tx_ring->free_tx_ids[next_to_use];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+
+ WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
+
+ rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
+ if (unlikely(rc))
+ goto error_drop_packet;
memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
ena_tx_ctx.ena_bufs = tx_info->bufs;
@@ -2082,14 +2165,22 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
&nb_hw_desc);
+ /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
+ * since the number of free descriptors in the queue is checked
+ * after sending the previous packet. In case there isn't enough
+ * space in the queue for the next packet, it is stopped
+ * until there is again enough available space in the queue.
+ * All other failure reasons of ena_com_prepare_tx() are fatal
+ * and therefore require a device reset.
+ */
if (unlikely(rc)) {
netif_err(adapter, tx_queued, dev,
"failed to prepare tx bufs\n");
u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.queue_stop++;
tx_ring->tx_stats.prepare_ctx_err++;
u64_stats_update_end(&tx_ring->syncp);
- netif_tx_stop_queue(txq);
+ adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
goto error_unmap_dma;
}
@@ -2111,8 +2202,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
* to sgl_size + 2. one for the meta descriptor and one for header
* (if the header is larger than tx_max_header_size).
*/
- if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
- (tx_ring->sgl_size + 2))) {
+ if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+ tx_ring->sgl_size + 2))) {
netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
__func__, qid);
@@ -2131,8 +2222,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
smp_mb();
- if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
- > ENA_TX_WAKEUP_THRESH) {
+ if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+ ENA_TX_WAKEUP_THRESH)) {
netif_tx_wake_queue(txq);
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.queue_wakeup++;
@@ -2152,58 +2243,15 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
-error_report_dma_error:
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.dma_mapping_err++;
- u64_stats_update_end(&tx_ring->syncp);
- netdev_warn(adapter->netdev, "failed to map skb\n");
-
- tx_info->skb = NULL;
-
error_unmap_dma:
- if (i >= 0) {
- /* save value of frag that failed */
- last_frag = i;
-
- /* start back at beginning and unmap skb */
- tx_info->skb = NULL;
- ena_buf = tx_info->bufs;
- dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
- dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
-
- /* unmap remaining mapped pages */
- for (i = 0; i < last_frag; i++) {
- ena_buf++;
- dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
- dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
- }
- }
+ ena_unmap_tx_skb(tx_ring, tx_info);
+ tx_info->skb = NULL;
error_drop_packet:
-
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ena_netpoll(struct net_device *netdev)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- int i;
-
- /* Dont schedule NAPI if the driver is in the middle of reset
- * or netdev is down.
- */
-
- if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
- test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
- return;
-
- for (i = 0; i < adapter->num_queues; i++)
- napi_schedule(&adapter->ena_napi[i].napi);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
@@ -2221,7 +2269,8 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
return qid;
}
-static void ena_config_host_info(struct ena_com_dev *ena_dev)
+static void ena_config_host_info(struct ena_com_dev *ena_dev,
+ struct pci_dev *pdev)
{
struct ena_admin_host_info *host_info;
int rc;
@@ -2235,6 +2284,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
host_info = ena_dev->host_attr.host_info;
+ host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
strncpy(host_info->kernel_ver_str, utsname()->version,
@@ -2245,7 +2295,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
host_info->driver_version =
(DRV_MODULE_VER_MAJOR) |
(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
- (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
+ (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
+ ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
+ host_info->num_cpus = num_online_cpus();
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
@@ -2369,9 +2421,6 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_change_mtu = ena_change_mtu,
.ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ena_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
};
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -2459,7 +2508,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
}
/* ENA admin level init */
- rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
+ rc = ena_com_admin_init(ena_dev, &aenq_handlers);
if (rc) {
dev_err(dev,
"Can not initialize ena admin queue with device\n");
@@ -2472,7 +2521,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
*/
ena_com_set_admin_polling_mode(ena_dev, true);
- ena_config_host_info(ena_dev);
+ ena_config_host_info(ena_dev, pdev);
/* Get Device Attributes*/
rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
@@ -2557,15 +2606,14 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
adapter->dev_up_before_reset = dev_up;
-
if (!graceful)
ena_com_set_admin_running_state(ena_dev, false);
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
- /* Before releasing the ENA resources, a device reset is required.
- * (to prevent the device from accessing them).
+ /* Stop the device from sending AENQ events (in case reset flag is set
+ * and device is up, ena_close already reset the device
* In case the reset flag is set and the device is up, ena_down()
* already perform the reset, so it can be skipped.
*/
@@ -2634,14 +2682,20 @@ static int ena_restore_device(struct ena_adapter *adapter)
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
- dev_err(&pdev->dev, "Device reset completed successfully\n");
+ dev_err(&pdev->dev,
+ "Device reset completed successfully, Driver info: %s\n",
+ version);
return rc;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_device_destroy:
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+ ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@@ -2823,7 +2877,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
rx_ring = &adapter->rx_ring[i];
refill_required =
- ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+ ena_com_free_desc(rx_ring->ena_com_io_sq);
if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
rx_ring->empty_rx_queue++;
@@ -2968,20 +3022,10 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
int io_sq_num, io_queue_num;
/* In case of LLQ use the llq number in the get feature cmd */
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- io_sq_num = get_feat_ctx->max_queues.max_llq_num;
-
- if (io_sq_num == 0) {
- dev_err(&pdev->dev,
- "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
-
- ena_dev->tx_mem_queue_type =
- ENA_ADMIN_PLACEMENT_POLICY_HOST;
- io_sq_num = get_feat_ctx->max_queues.max_sq_num;
- }
- } else {
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ io_sq_num = get_feat_ctx->llq.max_llq_num;
+ else
io_sq_num = get_feat_ctx->max_queues.max_sq_num;
- }
io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
io_queue_num = min_t(int, io_queue_num, io_sq_num);
@@ -2997,18 +3041,52 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
return io_queue_num;
}
-static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_set_queues_placement_policy(struct pci_dev *pdev,
+ struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_llq_desc *llq,
+ struct ena_llq_configurations *llq_default_configurations)
{
bool has_mem_bar;
+ int rc;
+ u32 llq_feature_mask;
+
+ llq_feature_mask = 1 << ENA_ADMIN_LLQ;
+ if (!(ena_dev->supported_features & llq_feature_mask)) {
+ dev_err(&pdev->dev,
+ "LLQ is not supported Fallback to host mode policy.\n");
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
- /* Enable push mode if device supports LLQ */
- if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
- else
+ rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
+ if (unlikely(rc)) {
+ dev_err(&pdev->dev,
+ "Failed to configure the device mode. Fallback to host mode policy.\n");
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
+ /* Nothing to config, exit */
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return 0;
+
+ if (!has_mem_bar) {
+ dev_err(&pdev->dev,
+ "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
+ ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, ENA_MEM_BAR),
+ pci_resource_len(pdev, ENA_MEM_BAR));
+
+ if (!ena_dev->mem_bar)
+ return -EFAULT;
+
+ return 0;
}
static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
@@ -3121,18 +3199,20 @@ err_rss_init:
static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
- int release_bars;
-
- if (ena_dev->mem_bar)
- devm_iounmap(&pdev->dev, ena_dev->mem_bar);
-
- if (ena_dev->reg_bar)
- devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+ int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
- release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
pci_release_selected_regions(pdev, release_bars);
}
+static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
+{
+ llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+ llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+ llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+ llq_config->llq_ring_entry_size_value = 128;
+}
+
static int ena_calc_queue_size(struct pci_dev *pdev,
struct ena_com_dev *ena_dev,
u16 *max_tx_sgl_size,
@@ -3148,7 +3228,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev,
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
queue_size = min_t(u32, queue_size,
- get_feat_ctx->max_queues.max_llq_depth);
+ get_feat_ctx->llq.max_llq_depth);
queue_size = rounddown_pow_of_two(queue_size);
@@ -3181,7 +3261,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int version_printed;
struct net_device *netdev;
struct ena_adapter *adapter;
+ struct ena_llq_configurations llq_config;
struct ena_com_dev *ena_dev = NULL;
+ char *queue_type_str;
static int adapters_found;
int io_queue_num, bars, rc;
int queue_size;
@@ -3235,16 +3317,13 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_region;
}
- ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
+ set_default_llq_configurations(&llq_config);
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
- pci_resource_start(pdev, ENA_MEM_BAR),
- pci_resource_len(pdev, ENA_MEM_BAR));
- if (!ena_dev->mem_bar) {
- rc = -EFAULT;
- goto err_device_destroy;
- }
+ rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
+ &llq_config);
+ if (rc) {
+ dev_err(&pdev->dev, "ena device init failed\n");
+ goto err_device_destroy;
}
/* initial Tx interrupt delay, Assumes 1 usec granularity.
@@ -3259,8 +3338,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_device_destroy;
}
- dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n",
- io_queue_num, queue_size);
+ dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n",
+ io_queue_num, queue_size,
+ (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
+ "ENABLED" : "DISABLED");
/* dev zeroed in init_etherdev */
netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
@@ -3350,9 +3431,15 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&adapter->timer_service, ena_timer_service, 0);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
- dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ queue_type_str = "Regular";
+ else
+ queue_type_str = "Low Latency";
+
+ dev_info(&pdev->dev,
+ "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
DEVICE_NAME, (long)pci_resource_start(pdev, 0),
- netdev->dev_addr, io_queue_num);
+ netdev->dev_addr, io_queue_num, queue_type_str);
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 7c7ae56c52cf..521873642339 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -43,9 +43,9 @@
#include "ena_com.h"
#include "ena_eth_com.h"
-#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 5
-#define DRV_MODULE_VER_SUBMINOR 0
+#define DRV_MODULE_VER_MAJOR 2
+#define DRV_MODULE_VER_MINOR 0
+#define DRV_MODULE_VER_SUBMINOR 1
#define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_VERSION
@@ -61,6 +61,17 @@
#define ENA_ADMIN_MSIX_VEC 1
#define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues))
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passes 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
#define ENA_MIN_MSIX_VEC 2
#define ENA_REG_BAR 0
@@ -70,7 +81,7 @@
#define ENA_DEFAULT_RING_SIZE (1024)
#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
-#define ENA_DEFAULT_RX_COPYBREAK (128 - NET_IP_ALIGN)
+#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
/* limit the buffer size to 600 bytes to handle MTU changes from very
* small to very large, in which case the number of buffers per packet
@@ -95,10 +106,11 @@
*/
#define ENA_TX_POLL_BUDGET_DIVIDER 4
-/* Refill Rx queue when number of available descriptors is below
- * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER
+/* Refill Rx queue when number of required descriptors is above
+ * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER or ENA_RX_REFILL_THRESH_PACKET
*/
#define ENA_RX_REFILL_THRESH_DIVIDER 8
+#define ENA_RX_REFILL_THRESH_PACKET 256
/* Number of queues to check for missing queues per timer service */
#define ENA_MONITORED_TX_QUEUES 4
@@ -151,6 +163,9 @@ struct ena_tx_buffer {
/* num of buffers used by this skb */
u32 num_of_bufs;
+ /* Indicate if bufs[0] map the linear data of the skb. */
+ u8 map_linear_data;
+
/* Used for detect missing tx packets to limit the number of prints */
u32 print_once;
/* Save the last jiffies to detect missing tx packets
@@ -186,6 +201,7 @@ struct ena_stats_tx {
u64 tx_poll;
u64 doorbells;
u64 bad_req_id;
+ u64 llq_buffer_copy;
u64 missed_tx;
};
@@ -201,6 +217,7 @@ struct ena_stats_rx {
u64 rx_copybreak_pkt;
u64 bad_req_id;
u64 empty_rx_ring;
+ u64 csum_unchecked;
};
struct ena_ring {
@@ -257,6 +274,8 @@ struct ena_ring {
struct ena_stats_tx tx_stats;
struct ena_stats_rx rx_stats;
};
+
+ u8 *push_buf_intermediate_buf;
int empty_rx_queue;
} ____cacheline_aligned;
@@ -355,15 +374,4 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_get_sset_count(struct net_device *netdev, int sset);
-/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
- * driver passas 0.
- * Since the max packet size the ENA handles is ~9kB limit the buffer length to
- * 16kB.
- */
-#if PAGE_SIZE > SZ_16K
-#define ENA_PAGE_SIZE SZ_16K
-#else
-#define ENA_PAGE_SIZE PAGE_SIZE
-#endif
-
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 48ca97fbe7bc..04fcafcc059c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -33,137 +33,125 @@
#define _ENA_REGS_H_
enum ena_regs_reset_reason_types {
- ENA_REGS_RESET_NORMAL = 0,
-
- ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
-
- ENA_REGS_RESET_ADMIN_TO = 2,
-
- ENA_REGS_RESET_MISS_TX_CMPL = 3,
-
- ENA_REGS_RESET_INV_RX_REQ_ID = 4,
-
- ENA_REGS_RESET_INV_TX_REQ_ID = 5,
-
- ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
-
- ENA_REGS_RESET_INIT_ERR = 7,
-
- ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
-
- ENA_REGS_RESET_OS_TRIGGER = 9,
-
- ENA_REGS_RESET_OS_NETDEV_WD = 10,
-
- ENA_REGS_RESET_SHUTDOWN = 11,
-
- ENA_REGS_RESET_USER_TRIGGER = 12,
-
- ENA_REGS_RESET_GENERIC = 13,
-
- ENA_REGS_RESET_MISS_INTERRUPT = 14,
+ ENA_REGS_RESET_NORMAL = 0,
+ ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
+ ENA_REGS_RESET_ADMIN_TO = 2,
+ ENA_REGS_RESET_MISS_TX_CMPL = 3,
+ ENA_REGS_RESET_INV_RX_REQ_ID = 4,
+ ENA_REGS_RESET_INV_TX_REQ_ID = 5,
+ ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
+ ENA_REGS_RESET_INIT_ERR = 7,
+ ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
+ ENA_REGS_RESET_OS_TRIGGER = 9,
+ ENA_REGS_RESET_OS_NETDEV_WD = 10,
+ ENA_REGS_RESET_SHUTDOWN = 11,
+ ENA_REGS_RESET_USER_TRIGGER = 12,
+ ENA_REGS_RESET_GENERIC = 13,
+ ENA_REGS_RESET_MISS_INTERRUPT = 14,
};
/* ena_registers offsets */
-#define ENA_REGS_VERSION_OFF 0x0
-#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
-#define ENA_REGS_CAPS_OFF 0x8
-#define ENA_REGS_CAPS_EXT_OFF 0xc
-#define ENA_REGS_AQ_BASE_LO_OFF 0x10
-#define ENA_REGS_AQ_BASE_HI_OFF 0x14
-#define ENA_REGS_AQ_CAPS_OFF 0x18
-#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
-#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
-#define ENA_REGS_ACQ_CAPS_OFF 0x28
-#define ENA_REGS_AQ_DB_OFF 0x2c
-#define ENA_REGS_ACQ_TAIL_OFF 0x30
-#define ENA_REGS_AENQ_CAPS_OFF 0x34
-#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
-#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
-#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
-#define ENA_REGS_AENQ_TAIL_OFF 0x44
-#define ENA_REGS_INTR_MASK_OFF 0x4c
-#define ENA_REGS_DEV_CTL_OFF 0x54
-#define ENA_REGS_DEV_STS_OFF 0x58
-#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
-#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
-#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
-#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+
+/* 0 base */
+#define ENA_REGS_VERSION_OFF 0x0
+#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
+#define ENA_REGS_CAPS_OFF 0x8
+#define ENA_REGS_CAPS_EXT_OFF 0xc
+#define ENA_REGS_AQ_BASE_LO_OFF 0x10
+#define ENA_REGS_AQ_BASE_HI_OFF 0x14
+#define ENA_REGS_AQ_CAPS_OFF 0x18
+#define ENA_REGS_ACQ_BASE_LO_OFF 0x20
+#define ENA_REGS_ACQ_BASE_HI_OFF 0x24
+#define ENA_REGS_ACQ_CAPS_OFF 0x28
+#define ENA_REGS_AQ_DB_OFF 0x2c
+#define ENA_REGS_ACQ_TAIL_OFF 0x30
+#define ENA_REGS_AENQ_CAPS_OFF 0x34
+#define ENA_REGS_AENQ_BASE_LO_OFF 0x38
+#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c
+#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40
+#define ENA_REGS_AENQ_TAIL_OFF 0x44
+#define ENA_REGS_INTR_MASK_OFF 0x4c
+#define ENA_REGS_DEV_CTL_OFF 0x54
+#define ENA_REGS_DEV_STS_OFF 0x58
+#define ENA_REGS_MMIO_REG_READ_OFF 0x5c
+#define ENA_REGS_MMIO_RESP_LO_OFF 0x60
+#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
/* version register */
-#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
-#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
-#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
+#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
+#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
+#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00
/* controller_version register */
-#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
-#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
-#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
-#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
-#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
-#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
-#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
+#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8
+#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16
+#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24
+#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000
/* caps register */
-#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
-#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
-#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
-#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
-#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
-#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
-#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
+#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1
+#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
+#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
/* aq_caps register */
-#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
-#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
-#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
+#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000
/* acq_caps register */
-#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
-#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
-#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
+#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000
/* aenq_caps register */
-#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
-#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
-#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
+#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16
+#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000
/* dev_ctl register */
-#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
-#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
-#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
-#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
-#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
-#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
-#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
-#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
-#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
+#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1
+#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2
+#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2
+#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
+#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
+#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
+#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
/* dev_sts register */
-#define ENA_REGS_DEV_STS_READY_MASK 0x1
-#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
-#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
-#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
-#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
-#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
-#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
-#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
-#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
-#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
-#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
-#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
-#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
-#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
-#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
+#define ENA_REGS_DEV_STS_READY_MASK 0x1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1
+#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2
+#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3
+#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8
+#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4
+#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10
+#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5
+#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7
+#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80
/* mmio_reg_read register */
-#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
-#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
-#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16
+#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
/* rss_ind_entry_update register */
-#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
-#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
-#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
+#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
#endif /*_ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 01d132c02ff9..265039c57023 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -440,7 +440,7 @@ static void am79c961_timeout(struct net_device *dev)
/*
* Transmit a packet
*/
-static int
+static netdev_tx_t
am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index c5b81268c284..d3d44e07afbc 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -339,7 +339,8 @@ static unsigned long lance_probe1( struct net_device *dev, struct lance_addr
*init_rec );
static int lance_open( struct net_device *dev );
static void lance_init_ring( struct net_device *dev );
-static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
static irqreturn_t lance_interrupt( int irq, void *dev_id );
static int lance_rx( struct net_device *dev );
static int lance_close( struct net_device *dev );
@@ -769,7 +770,8 @@ static void lance_tx_timeout (struct net_device *dev)
/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
-static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
+static netdev_tx_t
+lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
struct lance_ioreg *IO = lp->iobase;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 73ca8879ada7..7c1eb304c27e 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -564,17 +564,7 @@ static int au1000_mii_probe(struct net_device *dev)
return PTR_ERR(phydev);
}
- /* mask with MAC supported features */
- phydev->supported &= (SUPPORTED_10baseT_Half
- | SUPPORTED_10baseT_Full
- | SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full
- | SUPPORTED_Autoneg
- /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
- | SUPPORTED_MII
- | SUPPORTED_TP);
-
- phydev->advertising = phydev->supported;
+ phy_set_max_speed(phydev, SPEED_100);
aup->old_link = 0;
aup->old_speed = 0;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 116997a8b593..9f23703dd509 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -894,7 +894,7 @@ static void lance_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
int i, ret;
unsigned long esar_base;
unsigned char *esar;
+ const char *desc;
if (dec_lance_debug && version_printed++ == 0)
printk(version);
@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
*/
switch (type) {
case ASIC_LANCE:
- printk("%s: IOASIC onboard LANCE", name);
+ desc = "IOASIC onboard LANCE";
break;
case PMAD_LANCE:
- printk("%s: PMAD-AA", name);
+ desc = "PMAD-AA";
break;
case PMAX_LANCE:
- printk("%s: PMAX onboard LANCE", name);
+ desc = "PMAX onboard LANCE";
break;
}
for (i = 0; i < 6; i++)
dev->dev_addr[i] = esar[i * 4];
- printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
+ printk("%s: %s, addr = %pM, irq = %d\n",
+ name, desc, dev->dev_addr, dev->irq);
dev->netdev_ops = &lance_netdev_ops;
dev->watchdog_timeo = 5*HZ;
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index e248d1ab3e47..8931ce6bab7b 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -435,10 +435,8 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
}
if(cards[i].vendor_id) {
for(j=0;j<3;j++)
- if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) {
+ if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j])
release_region(ioaddr, cards[i].total_size);
- continue;
- }
}
break;
}
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 77b1db267730..da7e3d4f4166 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -236,7 +236,8 @@ struct lance_private {
static int lance_probe( struct net_device *dev);
static int lance_open( struct net_device *dev );
static void lance_init_ring( struct net_device *dev );
-static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
static irqreturn_t lance_interrupt( int irq, void *dev_id);
static int lance_rx( struct net_device *dev );
static int lance_close( struct net_device *dev );
@@ -511,7 +512,8 @@ static void lance_init_ring( struct net_device *dev )
}
-static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
+static netdev_tx_t
+lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
int entry, len;
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index cdd7a611479b..b4fc0ed5bce8 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1106,7 +1106,7 @@ static void lance_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
int entry, skblen, len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 24f1053b8785..d96a84a62d78 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2009,7 +2009,7 @@ static int xgbe_close(struct net_device *netdev)
return 0;
}
-static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -2018,7 +2018,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
struct xgbe_ring *ring;
struct xgbe_packet_data *packet;
struct netdev_queue *txq;
- int ret;
+ netdev_tx_t ret;
DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 3ceb4f95ca7c..151bdb629e8a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -878,9 +878,10 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- phy_data->phydev->supported = PHY_GBIT_FEATURES;
- phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phy_data->phydev->advertising = phy_data->phydev->supported;
+ phy_data->phydev->supported = PHY_10BT_FEATURES |
+ PHY_100BT_FEATURES |
+ PHY_1000BT_FEATURES;
+ phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
"Finisar PHY quirk in place\n");
@@ -950,9 +951,10 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- phy_data->phydev->supported = PHY_GBIT_FEATURES;
- phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phy_data->phydev->advertising = phy_data->phydev->supported;
+ phy_data->phydev->supported = (PHY_10BT_FEATURES |
+ PHY_100BT_FEATURES |
+ PHY_1000BT_FEATURES);
+ phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
"BelFuse PHY quirk in place\n");
@@ -1495,10 +1497,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
if (!phy_data->phydev)
return;
- if (phy_data->phydev->advertising & ADVERTISED_Pause)
- lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (phy_data->phydev->advertising & ADVERTISED_Asym_Pause)
- lcl_adv |= ADVERTISE_PAUSE_ASYM;
+ lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising);
if (phy_data->phydev->pause) {
XGBE_SET_LP_ADV(lks, Pause);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 4f50f11718f4..78dd09b5beeb 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -306,45 +306,25 @@ static int xgene_set_pauseparam(struct net_device *ndev,
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
- u32 oldadv, newadv;
if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
if (!phydev)
return -EINVAL;
- if (!(phydev->supported & SUPPORTED_Pause) ||
- (!(phydev->supported & SUPPORTED_Asym_Pause) &&
- pp->rx_pause != pp->tx_pause))
+ if (!phy_validate_pause(phydev, pp))
return -EINVAL;
pdata->pause_autoneg = pp->autoneg;
pdata->tx_pause = pp->tx_pause;
pdata->rx_pause = pp->rx_pause;
- oldadv = phydev->advertising;
- newadv = oldadv & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ phy_set_asym_pause(phydev, pp->rx_pause, pp->tx_pause);
- if (pp->rx_pause)
- newadv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-
- if (pp->tx_pause)
- newadv ^= ADVERTISED_Asym_Pause;
-
- if (oldadv ^ newadv) {
- phydev->advertising = newadv;
-
- if (phydev->autoneg)
- return phy_start_aneg(phydev);
-
- if (!pp->autoneg) {
- pdata->mac_ops->flowctl_tx(pdata,
- pdata->tx_pause);
- pdata->mac_ops->flowctl_rx(pdata,
- pdata->rx_pause);
- }
+ if (!pp->autoneg) {
+ pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
+ pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
}
-
} else {
if (pp->autoneg)
return -EINVAL;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 078a04dc1182..e3560311711a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -895,12 +895,10 @@ int xgene_enet_phy_connect(struct net_device *ndev)
}
pdata->phy_speed = SPEED_UNKNOWN;
- phy_dev->supported &= ~SUPPORTED_10baseT_Half &
- ~SUPPORTED_100baseT_Half &
- ~SUPPORTED_1000baseT_Half;
- phy_dev->supported |= SUPPORTED_Pause |
- SUPPORTED_Asym_Pause;
- phy_dev->advertising = phy_dev->supported;
+ phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_support_asym_pause(phy_dev);
return 0;
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 024998d6d8c6..6a8e2567f2bd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
static void bmac_set_timeout(struct net_device *dev);
static void bmac_tx_timeout(struct timer_list *t);
-static int bmac_output(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
static void bmac_start(struct net_device *dev);
#define DBDMA_SET(x) ( ((x) | (x) << 16) )
@@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
}
-static int
+static netdev_tx_t
bmac_output(struct sk_buff *skb, struct net_device *dev)
{
struct bmac_data *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 0b5429d76bcf..68b9ee489489 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -78,7 +78,7 @@ struct mace_data {
static int mace_open(struct net_device *dev);
static int mace_close(struct net_device *dev);
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
static void mace_set_multicast(struct net_device *dev);
static void mace_reset(struct net_device *dev);
static int mace_set_address(struct net_device *dev, void *addr);
@@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev)
mp->timeout_active = 1;
}
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 137cbb470af2..376f2c2613e7 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -89,7 +89,7 @@ struct mace_frame {
static int mace_open(struct net_device *dev);
static int mace_close(struct net_device *dev);
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
static void mace_set_multicast(struct net_device *dev);
static int mace_set_address(struct net_device *dev, void *addr);
static void mace_reset(struct net_device *dev);
@@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev)
* Transmit a frame
*/
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index d52b088ff8f0..becb578211ed 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -57,4 +57,9 @@
#define AQ_NIC_RATE_1G BIT(4)
#define AQ_NIC_RATE_100M BIT(5)
+#define AQ_NIC_RATE_EEE_10G BIT(6)
+#define AQ_NIC_RATE_EEE_5G BIT(7)
+#define AQ_NIC_RATE_EEE_2GS BIT(8)
+#define AQ_NIC_RATE_EEE_1G BIT(9)
+
#endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 08c9fa6ca71f..6a633c70f603 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -98,8 +98,8 @@ static void aq_ethtool_stats(struct net_device *ndev,
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
- ARRAY_SIZE(aq_ethtool_queue_stat_names) *
- cfg->vecs) * sizeof(u64));
+ ARRAY_SIZE(aq_ethtool_queue_stat_names) *
+ cfg->vecs) * sizeof(u64));
aq_nic_get_stats(aq_nic, data);
}
@@ -285,6 +285,111 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
return aq_nic_update_interrupt_moderation_settings(aq_nic);
}
+static void aq_ethtool_get_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ if (cfg->wol)
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int aq_ethtool_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ int err = 0;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ cfg->wol |= AQ_NIC_WOL_ENABLED;
+ else
+ cfg->wol &= ~AQ_NIC_WOL_ENABLED;
+ err = device_set_wakeup_enable(&pdev->dev, wol->wolopts);
+
+ return err;
+}
+
+static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed)
+{
+ u32 rate = 0;
+
+ if (speed & AQ_NIC_RATE_EEE_10G)
+ rate |= SUPPORTED_10000baseT_Full;
+
+ if (speed & AQ_NIC_RATE_EEE_2GS)
+ rate |= SUPPORTED_2500baseX_Full;
+
+ if (speed & AQ_NIC_RATE_EEE_1G)
+ rate |= SUPPORTED_1000baseT_Full;
+
+ return rate;
+}
+
+static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 rate, supported_rates;
+ int err = 0;
+
+ if (!aq_nic->aq_fw_ops->get_eee_rate)
+ return -EOPNOTSUPP;
+
+ err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate,
+ &supported_rates);
+ if (err < 0)
+ return err;
+
+ eee->supported = eee_mask_to_ethtool_mask(supported_rates);
+
+ if (aq_nic->aq_nic_cfg.eee_speeds)
+ eee->advertised = eee->supported;
+
+ eee->lp_advertised = eee_mask_to_ethtool_mask(rate);
+
+ eee->eee_enabled = !!eee->advertised;
+
+ eee->tx_lpi_enabled = eee->eee_enabled;
+ if (eee->advertised & eee->lp_advertised)
+ eee->eee_active = true;
+
+ return 0;
+}
+
+static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 rate, supported_rates;
+ struct aq_nic_cfg_s *cfg;
+ int err = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+
+ if (unlikely(!aq_nic->aq_fw_ops->get_eee_rate ||
+ !aq_nic->aq_fw_ops->set_eee_rate))
+ return -EOPNOTSUPP;
+
+ err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate,
+ &supported_rates);
+ if (err < 0)
+ return err;
+
+ if (eee->eee_enabled) {
+ rate = supported_rates;
+ cfg->eee_speeds = rate;
+ } else {
+ rate = 0;
+ cfg->eee_speeds = 0;
+ }
+
+ return aq_nic->aq_fw_ops->set_eee_rate(aq_nic->aq_hw, rate);
+}
+
static int aq_ethtool_nway_reset(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
@@ -403,9 +508,13 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_drvinfo = aq_ethtool_get_drvinfo,
.get_strings = aq_ethtool_get_strings,
.get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
+ .get_wol = aq_ethtool_get_wol,
+ .set_wol = aq_ethtool_set_wol,
.nway_reset = aq_ethtool_nway_reset,
.get_ringparam = aq_get_ringparam,
.set_ringparam = aq_set_ringparam,
+ .get_eee = aq_ethtool_get_eee,
+ .set_eee = aq_ethtool_set_eee,
.get_pauseparam = aq_ethtool_get_pauseparam,
.set_pauseparam = aq_ethtool_set_pauseparam,
.get_rxfh_key_size = aq_ethtool_get_rss_key_size,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 5c00671f248d..e8689241204e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -112,7 +112,7 @@ struct aq_hw_s {
const struct aq_fw_ops *aq_fw_ops;
void __iomem *mmio;
struct aq_hw_link_status_s aq_link_status;
- struct hw_aq_atl_utils_mbox mbox;
+ struct hw_atl_utils_mbox mbox;
struct hw_atl_stats_s last_stats;
struct aq_stats_s curr_stats;
u64 speed;
@@ -124,7 +124,7 @@ struct aq_hw_s {
u32 mbox_addr;
u32 rpc_addr;
u32 rpc_tid;
- struct hw_aq_atl_utils_fw_rpc rpc;
+ struct hw_atl_utils_fw_rpc rpc;
};
struct aq_ring_s;
@@ -204,7 +204,6 @@ struct aq_hw_ops {
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
- int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
};
struct aq_fw_ops {
@@ -228,6 +227,14 @@ struct aq_fw_ops {
int (*update_stats)(struct aq_hw_s *self);
int (*set_flow_control)(struct aq_hw_s *self);
+
+ int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
+ u8 *mac);
+
+ int (*set_eee_rate)(struct aq_hw_s *self, u32 speed);
+
+ int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
+ u32 *supported_rates);
};
#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 26dc6782b475..5fed24446687 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -189,7 +189,7 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
aq_vec_isr(i, (void *)aq_vec);
mod_timer(&self->polling_timer, jiffies +
- AQ_CFG_POLLING_TIMER_INTERVAL);
+ AQ_CFG_POLLING_TIMER_INTERVAL);
}
int aq_nic_ndev_register(struct aq_nic_s *self)
@@ -301,13 +301,13 @@ int aq_nic_start(struct aq_nic_s *self)
unsigned int i = 0U;
err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
- self->mc_list.ar,
- self->mc_list.count);
+ self->mc_list.ar,
+ self->mc_list.count);
if (err < 0)
goto err_exit;
err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
- self->packet_filter);
+ self->packet_filter);
if (err < 0)
goto err_exit;
@@ -327,7 +327,7 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
mod_timer(&self->service_timer, jiffies +
- AQ_CFG_SERVICE_TIMER_INTERVAL);
+ AQ_CFG_SERVICE_TIMER_INTERVAL);
if (self->aq_nic_cfg.is_polling) {
timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
@@ -344,7 +344,7 @@ int aq_nic_start(struct aq_nic_s *self)
}
err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
- AQ_CFG_IRQ_MASK);
+ AQ_CFG_IRQ_MASK);
if (err < 0)
goto err_exit;
}
@@ -889,11 +889,13 @@ void aq_nic_deinit(struct aq_nic_s *self)
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_deinit(aq_vec);
- if (self->power_state == AQ_HW_POWER_STATE_D0) {
- (void)self->aq_fw_ops->deinit(self->aq_hw);
- } else {
- (void)self->aq_hw_ops->hw_set_power(self->aq_hw,
- self->power_state);
+ self->aq_fw_ops->deinit(self->aq_hw);
+
+ if (self->power_state != AQ_HW_POWER_STATE_D0 ||
+ self->aq_hw->aq_nic_cfg->wol) {
+ self->aq_fw_ops->set_power(self->aq_hw,
+ self->power_state,
+ self->ndev->dev_addr);
}
err_exit:;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index fecfc401f95d..c1582f4e8e1b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -36,6 +36,7 @@ struct aq_nic_cfg_s {
u32 flow_control;
u32 link_speed_msk;
u32 vlan_id;
+ u32 wol;
u16 is_mc_list_enabled;
u16 mc_list_count;
bool is_autoneg;
@@ -44,6 +45,7 @@ struct aq_nic_cfg_s {
bool is_lro;
u8 tcs;
struct aq_rss_parameters aq_rss;
+ u32 eee_speeds;
};
#define AQ_NIC_FLAG_STARTED 0x00000004U
@@ -54,6 +56,8 @@ struct aq_nic_cfg_s {
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
+#define AQ_NIC_WOL_ENABLED BIT(0)
+
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 750007513f9d..1d5d6b8df855 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -84,7 +84,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
const struct aq_hw_ops **ops,
const struct aq_hw_caps_s **caps)
{
- int i = 0;
+ int i;
if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
return -EINVAL;
@@ -107,7 +107,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
int aq_pci_func_init(struct pci_dev *pdev)
{
- int err = 0;
+ int err;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
@@ -141,7 +141,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
char *name, void *aq_vec, cpumask_t *affinity_mask)
{
struct pci_dev *pdev = self->pdev;
- int err = 0;
+ int err;
if (pdev->msix_enabled || pdev->msi_enabled)
err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
@@ -164,7 +164,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
void aq_pci_func_free_irqs(struct aq_nic_s *self)
{
struct pci_dev *pdev = self->pdev;
- unsigned int i = 0U;
+ unsigned int i;
for (i = 32U; i--;) {
if (!((1U << i) & self->msix_entry_mask))
@@ -194,8 +194,8 @@ static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
static int aq_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
- struct aq_nic_s *self = NULL;
- int err = 0;
+ struct aq_nic_s *self;
+ int err;
struct net_device *ndev;
resource_size_t mmio_pa;
u32 bar;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index b5f1f62e8e25..3db91446cc67 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -29,8 +29,8 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
goto err_exit;
}
self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
- self->size * self->dx_size,
- &self->dx_ring_pa, GFP_KERNEL);
+ self->size * self->dx_size,
+ &self->dx_ring_pa, GFP_KERNEL);
if (!self->dx_ring) {
err = -ENOMEM;
goto err_exit;
@@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
/* for single fragment packets use build_skb() */
- if (buff->is_eop) {
+ if (buff->is_eop &&
+ buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
skb = build_skb(page_address(buff->page),
- buff->len + AQ_SKB_ALIGN);
+ AQ_CFG_RX_FRAME_MAX);
if (unlikely(!skb)) {
err = -ENOMEM;
goto err_exit;
@@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff->len - ETH_HLEN,
SKB_TRUESIZE(buff->len - ETH_HLEN));
- for (i = 1U, next_ = buff->next,
- buff_ = &self->buff_ring[next_]; true;
- next_ = buff_->next,
- buff_ = &self->buff_ring[next_], ++i) {
- skb_add_rx_frag(skb, i, buff_->page, 0,
- buff_->len,
- SKB_TRUESIZE(buff->len -
- ETH_HLEN));
- buff_->is_cleaned = 1;
-
- if (buff_->is_eop)
- break;
+ if (!buff->is_eop) {
+ for (i = 1U, next_ = buff->next,
+ buff_ = &self->buff_ring[next_];
+ true; next_ = buff_->next,
+ buff_ = &self->buff_ring[next_], ++i) {
+ skb_add_rx_frag(skb, i,
+ buff_->page, 0,
+ buff_->len,
+ SKB_TRUESIZE(buff->len -
+ ETH_HLEN));
+ buff_->is_cleaned = 1;
+
+ if (buff_->is_eop)
+ break;
+ }
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 97addfa6f895..2469ed4d86b9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -49,37 +49,37 @@
const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
- .link_speed_msk = HW_ATL_A0_RATE_5G |
- HW_ATL_A0_RATE_2G5 |
- HW_ATL_A0_RATE_1G |
- HW_ATL_A0_RATE_100M,
+ .link_speed_msk = AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
};
const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = HW_ATL_A0_RATE_10G |
- HW_ATL_A0_RATE_5G |
- HW_ATL_A0_RATE_2G5 |
- HW_ATL_A0_RATE_1G |
- HW_ATL_A0_RATE_100M,
+ .link_speed_msk = AQ_NIC_RATE_10G |
+ AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
};
const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = HW_ATL_A0_RATE_5G |
- HW_ATL_A0_RATE_2G5 |
- HW_ATL_A0_RATE_1G |
- HW_ATL_A0_RATE_100M,
+ .link_speed_msk = AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
};
const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = HW_ATL_A0_RATE_2G5 |
- HW_ATL_A0_RATE_1G |
- HW_ATL_A0_RATE_100M,
+ .link_speed_msk = AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
};
static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
@@ -284,7 +284,7 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
/* RSS Ring selection */
hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
- 0xB3333333U : 0x00000000U);
+ 0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_A0_MAC_MAX; i--;) {
@@ -325,7 +325,7 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
}
h = (mac_addr[0] << 8) | (mac_addr[1]);
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
- (mac_addr[4] << 8) | mac_addr[5];
+ (mac_addr[4] << 8) | mac_addr[5];
hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
@@ -519,7 +519,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
hw_atl_rdm_rx_desc_data_buff_size_set(self,
AQ_CFG_RX_FRAME_MAX / 1024U,
- aq_ring->idx);
+ aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
@@ -758,7 +758,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
hw_atl_rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled &&
(i <= self->aq_nic_cfg->mc_list_count)) ?
- 1U : 0U, i);
+ 1U : 0U, i);
return aq_hw_err_from_flags(self);
}
@@ -877,7 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
const struct aq_hw_ops hw_atl_ops_a0 = {
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
.hw_init = hw_atl_a0_hw_init,
- .hw_set_power = hw_atl_utils_hw_set_power,
.hw_reset = hw_atl_a0_hw_reset,
.hw_start = hw_atl_a0_hw_start,
.hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 3c94cff57876..a021dc431ef7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -62,12 +62,6 @@
#define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU
#define HW_ATL_A0_MPI_SPEED_SHIFT 16U
-#define HW_ATL_A0_RATE_10G BIT(0)
-#define HW_ATL_A0_RATE_5G BIT(1)
-#define HW_ATL_A0_RATE_2G5 BIT(3)
-#define HW_ATL_A0_RATE_1G BIT(4)
-#define HW_ATL_A0_RATE_100M BIT(5)
-
#define HW_ATL_A0_TXBUF_MAX 160U
#define HW_ATL_A0_RXBUF_MAX 320U
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 1d44a386e7d3..76d25d594a0f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -51,38 +51,38 @@
const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
- .link_speed_msk = HW_ATL_B0_RATE_10G |
- HW_ATL_B0_RATE_5G |
- HW_ATL_B0_RATE_2G5 |
- HW_ATL_B0_RATE_1G |
- HW_ATL_B0_RATE_100M,
+ .link_speed_msk = AQ_NIC_RATE_10G |
+ AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
};
const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = HW_ATL_B0_RATE_10G |
- HW_ATL_B0_RATE_5G |
- HW_ATL_B0_RATE_2G5 |
- HW_ATL_B0_RATE_1G |
- HW_ATL_B0_RATE_100M,
+ .link_speed_msk = AQ_NIC_RATE_10G |
+ AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
};
const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = HW_ATL_B0_RATE_5G |
- HW_ATL_B0_RATE_2G5 |
- HW_ATL_B0_RATE_1G |
- HW_ATL_B0_RATE_100M,
+ .link_speed_msk = AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
};
const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
- .link_speed_msk = HW_ATL_B0_RATE_2G5 |
- HW_ATL_B0_RATE_1G |
- HW_ATL_B0_RATE_100M,
+ .link_speed_msk = AQ_NIC_RATE_2GS |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
};
static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
@@ -935,7 +935,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
- .hw_set_power = hw_atl_utils_hw_set_power,
.hw_reset = hw_atl_b0_hw_reset,
.hw_start = hw_atl_b0_hw_start,
.hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 28568f5fa74b..b318eefd36ae 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -67,12 +67,6 @@
#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU
#define HW_ATL_B0_MPI_SPEED_SHIFT 16U
-#define HW_ATL_B0_RATE_10G BIT(0)
-#define HW_ATL_B0_RATE_5G BIT(1)
-#define HW_ATL_B0_RATE_2G5 BIT(3)
-#define HW_ATL_B0_RATE_1G BIT(4)
-#define HW_ATL_B0_RATE_100M BIT(5)
-
#define HW_ATL_B0_TXBUF_MAX 160U
#define HW_ATL_B0_RXBUF_MAX 320U
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 10ba035dadb1..be0a3a90dfad 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -1460,3 +1460,11 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp),
glb_cpu_scratch_scp);
}
+
+void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR,
+ HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK,
+ HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT,
+ up_force_intr);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index dfb426f2dc2c..7056c7342afc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -698,4 +698,7 @@ void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
/* set pci register reset disable */
void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+/* set uP Force Interrupt */
+void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
+
#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index e0cf70120f1d..716674a9b729 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -2387,4 +2387,17 @@
#define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \
(0x00000300u + (scratch_scp) * 0x4)
+/* register address for bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR 0x00000404
+/* bitmask for bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK 0x00000002
+/* inverted bitmask for bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSKN 0xFFFFFFFD
+/* lower bit position of bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT 1
+/* width of bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_WIDTH 1
+/* default value of bitfield uP Force Interrupt */
+#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
+
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index c965e65d07db..7def1cb8ab9d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -49,6 +49,7 @@
#define FORCE_FLASHLESS 0
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state);
@@ -69,10 +70,10 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
self->fw_ver_actual) == 0) {
*fw_ops = &aq_fw_1x_ops;
} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X,
- self->fw_ver_actual) == 0) {
+ self->fw_ver_actual) == 0) {
*fw_ops = &aq_fw_2x_ops;
} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X,
- self->fw_ver_actual) == 0) {
+ self->fw_ver_actual) == 0) {
*fw_ops = &aq_fw_2x_ops;
} else {
aq_pr_err("Bad FW version detected: %x\n",
@@ -260,7 +261,7 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
- HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
+ HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
10, 1000U);
}
@@ -277,7 +278,7 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self,
HW_ATL_FW_SM_RAM) == 1U,
- 1U, 10000U);
+ 1U, 10000U);
if (err < 0) {
bool is_locked;
@@ -325,17 +326,31 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
err = -ETIME;
goto err_exit;
}
+ if (IS_CHIP_FEATURE(REVISION_B1)) {
+ u32 offset = 0;
+
+ for (; offset < cnt; ++offset) {
+ aq_hw_write_reg(self, 0x328, p[offset]);
+ aq_hw_write_reg(self, 0x32C,
+ (0x80000000 | (0xFFFF & (offset * 4))));
+ hw_atl_mcp_up_force_intr_set(self, 1);
+ /* 1000 times by 10us = 10ms */
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self,
+ 0x32C) & 0xF0000000) !=
+ 0x80000000,
+ 10, 1000);
+ }
+ } else {
+ u32 offset = 0;
- aq_hw_write_reg(self, 0x00000208U, a);
-
- for (++cnt; --cnt;) {
- u32 i = 0U;
+ aq_hw_write_reg(self, 0x208, a);
- aq_hw_write_reg(self, 0x0000020CU, *(p++));
- aq_hw_write_reg(self, 0x00000200U, 0xC000U);
+ for (; offset < cnt; ++offset) {
+ aq_hw_write_reg(self, 0x20C, p[offset]);
+ aq_hw_write_reg(self, 0x200, 0xC000);
- for (i = 1024U;
- (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) {
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self, 0x200U) &
+ 0x100) == 0, 10, 1000);
}
}
@@ -379,7 +394,7 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
- aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
+ aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
return err;
}
@@ -399,7 +414,7 @@ struct aq_hw_atl_utils_fw_rpc_tid_s {
#define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL)
-static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
+int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
{
int err = 0;
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
@@ -411,7 +426,7 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr,
(u32 *)(void *)&self->rpc,
(rpc_size + sizeof(u32) -
- sizeof(u8)) / sizeof(u32));
+ sizeof(u8)) / sizeof(u32));
if (err < 0)
goto err_exit;
@@ -423,8 +438,8 @@ err_exit:
return err;
}
-static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
- struct hw_aq_atl_utils_fw_rpc **rpc)
+int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
+ struct hw_atl_utils_fw_rpc **rpc)
{
int err = 0;
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
@@ -436,7 +451,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
self->rpc_tid = sw.tid;
AQ_HW_WAIT_FOR(sw.tid ==
- (fw.val =
+ (fw.val =
aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR),
fw.tid), 1000U, 100U);
if (err < 0)
@@ -459,7 +474,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
(u32 *)(void *)
&self->rpc,
(fw.len + sizeof(u32) -
- sizeof(u8)) /
+ sizeof(u8)) /
sizeof(u32));
if (err < 0)
goto err_exit;
@@ -489,16 +504,16 @@ err_exit:
}
int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
- struct hw_aq_atl_utils_mbox_header *pmbox)
+ struct hw_atl_utils_mbox_header *pmbox)
{
return hw_atl_utils_fw_downld_dwords(self,
- self->mbox_addr,
- (u32 *)(void *)pmbox,
- sizeof(*pmbox) / sizeof(u32));
+ self->mbox_addr,
+ (u32 *)(void *)pmbox,
+ sizeof(*pmbox) / sizeof(u32));
}
void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
- struct hw_aq_atl_utils_mbox *pmbox)
+ struct hw_atl_utils_mbox *pmbox)
{
int err = 0;
@@ -538,7 +553,7 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
{
int err = 0;
u32 transaction_id = 0;
- struct hw_aq_atl_utils_mbox_header mbox;
+ struct hw_atl_utils_mbox_header mbox;
u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
if (state == MPI_RESET) {
@@ -547,8 +562,8 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
transaction_id = mbox.transaction_id;
AQ_HW_WAIT_FOR(transaction_id !=
- (hw_atl_utils_mpi_read_mbox(self, &mbox),
- mbox.transaction_id),
+ (hw_atl_utils_mpi_read_mbox(self, &mbox),
+ mbox.transaction_id),
1000U, 100U);
if (err < 0)
goto err_exit;
@@ -645,9 +660,9 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
/* chip revision */
- l = 0xE3000000U
- | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG))
- | (0x00 << 16);
+ l = 0xE3000000U |
+ (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) |
+ (0x00 << 16);
h = 0x8001300EU;
mac[5] = (u8)(0xFFU & l);
@@ -730,17 +745,9 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
return 0;
}
-int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
- unsigned int power_state)
-{
- hw_atl_utils_mpi_set_speed(self, 0);
- hw_atl_utils_mpi_set_state(self, MPI_POWER);
- return 0;
-}
-
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
- struct hw_aq_atl_utils_mbox mbox;
+ struct hw_atl_utils_mbox mbox;
hw_atl_utils_mpi_read_stats(self, &mbox);
@@ -825,6 +832,81 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
return 0;
}
+static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac)
+{
+ struct hw_atl_utils_fw_rpc *prpc = NULL;
+ unsigned int rpc_size = 0U;
+ int err = 0;
+
+ err = hw_atl_utils_fw_rpc_wait(self, &prpc);
+ if (err < 0)
+ goto err_exit;
+
+ memset(prpc, 0, sizeof(*prpc));
+
+ if (wol_enabled) {
+ rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_wol);
+
+ prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD;
+ prpc->msg_wol.priority =
+ HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR;
+ prpc->msg_wol.pattern_id =
+ HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
+ prpc->msg_wol.wol_packet_type =
+ HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT;
+
+ ether_addr_copy((u8 *)&prpc->msg_wol.wol_pattern, mac);
+ } else {
+ rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_del_id);
+
+ prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL;
+ prpc->msg_wol.pattern_id =
+ HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
+ }
+
+ err = hw_atl_utils_fw_rpc_call(self, rpc_size);
+
+err_exit:
+ return err;
+}
+
+static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
+ u8 *mac)
+{
+ struct hw_atl_utils_fw_rpc *prpc = NULL;
+ unsigned int rpc_size = 0U;
+ int err = 0;
+
+ if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) {
+ err = aq_fw1x_set_wol(self, 1, mac);
+
+ if (err < 0)
+ goto err_exit;
+
+ rpc_size = sizeof(prpc->msg_id) +
+ sizeof(prpc->msg_enable_wakeup);
+
+ err = hw_atl_utils_fw_rpc_wait(self, &prpc);
+
+ if (err < 0)
+ goto err_exit;
+
+ memset(prpc, 0, rpc_size);
+
+ prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP;
+ prpc->msg_enable_wakeup.pattern_mask = 0x00000002;
+
+ err = hw_atl_utils_fw_rpc_call(self, rpc_size);
+ if (err < 0)
+ goto err_exit;
+ }
+ hw_atl_utils_mpi_set_speed(self, 0);
+ hw_atl_utils_mpi_set_state(self, MPI_POWER);
+
+err_exit:
+ return err;
+}
+
const struct aq_fw_ops aq_fw_1x_ops = {
.init = hw_atl_utils_mpi_create,
.deinit = hw_atl_fw1x_deinit,
@@ -834,5 +916,8 @@ const struct aq_fw_ops aq_fw_1x_ops = {
.set_state = hw_atl_utils_mpi_set_state,
.update_link_status = hw_atl_utils_mpi_get_link_status,
.update_stats = hw_atl_utils_update_stats,
+ .set_power = aq_fw1x_set_power,
+ .set_eee_rate = NULL,
+ .get_eee_rate = NULL,
.set_flow_control = NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index b875590efcbd..3613fca64b58 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -75,7 +75,7 @@ union __packed ip_addr {
} v4;
};
-struct __packed hw_aq_atl_utils_fw_rpc {
+struct __packed hw_atl_utils_fw_rpc {
u32 msg_id;
union {
@@ -101,8 +101,6 @@ struct __packed hw_aq_atl_utils_fw_rpc {
struct {
u32 priority;
u32 wol_packet_type;
- u16 friendly_name_len;
- u16 friendly_name[65];
u32 pattern_id;
u32 next_wol_pattern_offset;
@@ -134,25 +132,112 @@ struct __packed hw_aq_atl_utils_fw_rpc {
u32 pattern_offset;
u32 pattern_size;
} wol_bit_map_pattern;
+
+ struct {
+ u8 mac_addr[ETH_ALEN];
+ } wol_magic_packet_patter;
} wol_pattern;
} msg_wol;
struct {
- u32 is_wake_on_link_down;
- u32 is_wake_on_link_up;
- } msg_wolink;
+ union {
+ u32 pattern_mask;
+
+ struct {
+ u32 reason_arp_v4_pkt : 1;
+ u32 reason_ipv4_ping_pkt : 1;
+ u32 reason_ipv6_ns_pkt : 1;
+ u32 reason_ipv6_ping_pkt : 1;
+ u32 reason_link_up : 1;
+ u32 reason_link_down : 1;
+ u32 reason_maximum : 1;
+ };
+ };
+
+ union {
+ u32 offload_mask;
+ };
+ } msg_enable_wakeup;
+
+ struct {
+ u32 id;
+ } msg_del_id;
};
};
-struct __packed hw_aq_atl_utils_mbox_header {
+struct __packed hw_atl_utils_mbox_header {
u32 version;
u32 transaction_id;
u32 error;
};
-struct __packed hw_aq_atl_utils_mbox {
- struct hw_aq_atl_utils_mbox_header header;
+struct __packed hw_aq_info {
+ u8 reserved[6];
+ u16 phy_fault_code;
+ u16 phy_temperature;
+ u8 cable_len;
+ u8 reserved1;
+ u32 cable_diag_data[4];
+ u8 reserved2[32];
+ u32 caps_lo;
+ u32 caps_hi;
+};
+
+struct __packed hw_atl_utils_mbox {
+ struct hw_atl_utils_mbox_header header;
struct hw_atl_stats_s stats;
+ struct hw_aq_info info;
+};
+
+/* fw2x */
+typedef u32 fw_offset_t;
+
+struct __packed offload_ip_info {
+ u8 v4_local_addr_count;
+ u8 v4_addr_count;
+ u8 v6_local_addr_count;
+ u8 v6_addr_count;
+ fw_offset_t v4_addr;
+ fw_offset_t v4_prefix;
+ fw_offset_t v6_addr;
+ fw_offset_t v6_prefix;
+};
+
+struct __packed offload_port_info {
+ u16 udp_port_count;
+ u16 tcp_port_count;
+ fw_offset_t udp_port;
+ fw_offset_t tcp_port;
+};
+
+struct __packed offload_ka_info {
+ u16 v4_ka_count;
+ u16 v6_ka_count;
+ u32 retry_count;
+ u32 retry_interval;
+ fw_offset_t v4_ka;
+ fw_offset_t v6_ka;
+};
+
+struct __packed offload_rr_info {
+ u32 rr_count;
+ u32 rr_buf_len;
+ fw_offset_t rr_id_x;
+ fw_offset_t rr_buf;
+};
+
+struct __packed offload_info {
+ u32 version;
+ u32 len;
+ u8 mac_addr[ETH_ALEN];
+
+ u8 reserved[2];
+
+ struct offload_ip_info ips;
+ struct offload_port_info ports;
+ struct offload_ka_info kas;
+ struct offload_rr_info rrs;
+ u8 buf[0];
};
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
@@ -181,6 +266,21 @@ enum hal_atl_utils_fw_state_e {
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
+#define HAL_ATLANTIC_UTILS_FW_MSG_PING 0x1U
+#define HAL_ATLANTIC_UTILS_FW_MSG_ARP 0x2U
+#define HAL_ATLANTIC_UTILS_FW_MSG_INJECT 0x3U
+#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD 0x4U
+#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR 0x10000000U
+#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN 0x1U
+#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT 0x2U
+#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL 0x5U
+#define HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP 0x6U
+#define HAL_ATLANTIC_UTILS_FW_MSG_MSM_PFC 0x7U
+#define HAL_ATLANTIC_UTILS_FW_MSG_PROVISIONING 0x8U
+#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_ADD 0x9U
+#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 0xAU
+#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 0xDU
+
enum hw_atl_fw2x_rate {
FW2X_RATE_100M = 0x20,
FW2X_RATE_1G = 0x100,
@@ -286,10 +386,10 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self);
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
- struct hw_aq_atl_utils_mbox_header *pmbox);
+ struct hw_atl_utils_mbox_header *pmbox);
void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
- struct hw_aq_atl_utils_mbox *pmbox);
+ struct hw_atl_utils_mbox *pmbox);
void hw_atl_utils_mpi_set(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state,
@@ -316,9 +416,17 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
int hw_atl_utils_update_stats(struct aq_hw_s *self);
struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
+
int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
u32 *p, u32 cnt);
+int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac);
+
+int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size);
+
+int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
+ struct hw_atl_utils_fw_rpc **rpc);
+
extern const struct aq_fw_ops aq_fw_1x_ops;
extern const struct aq_fw_ops aq_fw_2x_ops;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index e37943760a58..096ca5730887 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -16,11 +16,13 @@
#include "../aq_pci_func.h"
#include "../aq_ring.h"
#include "../aq_vec.h"
+#include "../aq_nic.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364
#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360
+#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334
#define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368
#define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C
@@ -28,6 +30,42 @@
#define HW_ATL_FW2X_MPI_STATE_ADDR 0x370
#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374
+#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
+#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL)
+
+#define HW_ATL_FW2X_CTRL_SLEEP_PROXY BIT(CTRL_SLEEP_PROXY)
+#define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL)
+#define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP)
+#define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE)
+#define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE)
+#define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT)
+
+#define HW_ATL_FW2X_CAP_EEE_1G_MASK BIT(CAPS_HI_1000BASET_FD_EEE)
+#define HW_ATL_FW2X_CAP_EEE_2G5_MASK BIT(CAPS_HI_2P5GBASET_FD_EEE)
+#define HW_ATL_FW2X_CAP_EEE_5G_MASK BIT(CAPS_HI_5GBASET_FD_EEE)
+#define HW_ATL_FW2X_CAP_EEE_10G_MASK BIT(CAPS_HI_10GBASET_FD_EEE)
+
+#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8
+#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E
+
+struct __packed fw2x_msg_wol_pattern {
+ u8 mask[16];
+ u32 crc;
+};
+
+struct __packed fw2x_msg_wol {
+ u32 msg_id;
+ u8 hw_addr[ETH_ALEN];
+ u8 magic_packet_enabled;
+ u8 filter_count;
+ struct fw2x_msg_wol_pattern filter[HAL_ATLANTIC_WOL_FILTERS_COUNT];
+ u8 link_up_enabled;
+ u8 link_down_enabled;
+ u16 reserved;
+ u32 link_up_timeout;
+ u32 link_down_timeout;
+};
+
static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed);
static int aq_fw2x_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state);
@@ -38,8 +76,12 @@ static int aq_fw2x_init(struct aq_hw_s *self)
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(0U != (self->mbox_addr =
- aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)),
+ aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)),
1000U, 10U);
+ AQ_HW_WAIT_FOR(0U != (self->rpc_addr =
+ aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR)),
+ 1000U, 100U);
+
return err;
}
@@ -78,6 +120,38 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
return rate;
}
+static u32 fw2x_to_eee_mask(u32 speed)
+{
+ u32 rate = 0;
+
+ if (speed & HW_ATL_FW2X_CAP_EEE_10G_MASK)
+ rate |= AQ_NIC_RATE_EEE_10G;
+ if (speed & HW_ATL_FW2X_CAP_EEE_5G_MASK)
+ rate |= AQ_NIC_RATE_EEE_5G;
+ if (speed & HW_ATL_FW2X_CAP_EEE_2G5_MASK)
+ rate |= AQ_NIC_RATE_EEE_2GS;
+ if (speed & HW_ATL_FW2X_CAP_EEE_1G_MASK)
+ rate |= AQ_NIC_RATE_EEE_1G;
+
+ return rate;
+}
+
+static u32 eee_mask_to_fw2x(u32 speed)
+{
+ u32 rate = 0;
+
+ if (speed & AQ_NIC_RATE_EEE_10G)
+ rate |= HW_ATL_FW2X_CAP_EEE_10G_MASK;
+ if (speed & AQ_NIC_RATE_EEE_5G)
+ rate |= HW_ATL_FW2X_CAP_EEE_5G_MASK;
+ if (speed & AQ_NIC_RATE_EEE_2GS)
+ rate |= HW_ATL_FW2X_CAP_EEE_2G5_MASK;
+ if (speed & AQ_NIC_RATE_EEE_1G)
+ rate |= HW_ATL_FW2X_CAP_EEE_1G_MASK;
+
+ return rate;
+}
+
static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed)
{
u32 val = link_speed_mask_2fw2x_ratemask(speed);
@@ -100,14 +174,27 @@ static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state)
*mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE);
}
+static void aq_fw2x_upd_eee_rate_bits(struct aq_hw_s *self, u32 *mpi_opts,
+ u32 eee_speeds)
+{
+ *mpi_opts &= ~(HW_ATL_FW2X_CAP_EEE_1G_MASK |
+ HW_ATL_FW2X_CAP_EEE_2G5_MASK |
+ HW_ATL_FW2X_CAP_EEE_5G_MASK |
+ HW_ATL_FW2X_CAP_EEE_10G_MASK);
+
+ *mpi_opts |= eee_mask_to_fw2x(eee_speeds);
+}
+
static int aq_fw2x_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state)
{
u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
switch (state) {
case MPI_INIT:
mpi_state &= ~BIT(CAPS_HI_LINK_DROP);
+ aq_fw2x_upd_eee_rate_bits(self, &mpi_state, cfg->eee_speeds);
aq_fw2x_set_mpi_flow_control(self, &mpi_state);
break;
case MPI_DEINIT:
@@ -126,7 +213,7 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self)
{
u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G |
- FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G);
+ FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G);
struct aq_hw_link_status_s *link_status = &self->aq_link_status;
if (speed) {
@@ -175,9 +262,7 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
get_random_bytes(&rnd, sizeof(unsigned int));
- l = 0xE3000000U
- | (0xFFFFU & rnd)
- | (0x00 << 16);
+ l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16);
h = 0x8001300EU;
mac[5] = (u8)(0xFFU & l);
@@ -207,7 +292,7 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
/* Wait FW to report back */
AQ_HW_WAIT_FOR(orig_stats_val !=
(aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
- BIT(CAPS_HI_STATISTICS)),
+ BIT(CAPS_HI_STATISTICS)),
1U, 10000U);
if (err)
return err;
@@ -215,6 +300,135 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
return hw_atl_utils_update_stats(self);
}
+static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
+{
+ struct hw_atl_utils_fw_rpc *rpc = NULL;
+ struct offload_info *cfg = NULL;
+ unsigned int rpc_size = 0U;
+ u32 mpi_opts;
+ int err = 0;
+
+ rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg);
+
+ err = hw_atl_utils_fw_rpc_wait(self, &rpc);
+ if (err < 0)
+ goto err_exit;
+
+ memset(rpc, 0, rpc_size);
+ cfg = (struct offload_info *)(&rpc->msg_id + 1);
+
+ memcpy(cfg->mac_addr, mac, ETH_ALEN);
+ cfg->len = sizeof(*cfg);
+
+ /* Clear bit 0x36C.23 and 0x36C.22 */
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_SLEEP_PROXY;
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_LINK_DROP;
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ err = hw_atl_utils_fw_rpc_call(self, rpc_size);
+ if (err < 0)
+ goto err_exit;
+
+ /* Set bit 0x36C.23 */
+ mpi_opts |= HW_ATL_FW2X_CTRL_SLEEP_PROXY;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
+ HW_ATL_FW2X_CTRL_SLEEP_PROXY), 1U, 10000U);
+
+err_exit:
+ return err;
+}
+
+static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac)
+{
+ struct hw_atl_utils_fw_rpc *rpc = NULL;
+ struct fw2x_msg_wol *msg = NULL;
+ u32 mpi_opts;
+ int err = 0;
+
+ err = hw_atl_utils_fw_rpc_wait(self, &rpc);
+ if (err < 0)
+ goto err_exit;
+
+ msg = (struct fw2x_msg_wol *)rpc;
+
+ msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL;
+ msg->magic_packet_enabled = true;
+ memcpy(msg->hw_addr, mac, ETH_ALEN);
+
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ mpi_opts &= ~(HW_ATL_FW2X_CTRL_SLEEP_PROXY | HW_ATL_FW2X_CTRL_WOL);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ err = hw_atl_utils_fw_rpc_call(self, sizeof(*msg));
+ if (err < 0)
+ goto err_exit;
+
+ /* Set bit 0x36C.24 */
+ mpi_opts |= HW_ATL_FW2X_CTRL_WOL;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) &
+ HW_ATL_FW2X_CTRL_WOL), 1U, 10000U);
+
+err_exit:
+ return err;
+}
+
+static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
+ u8 *mac)
+{
+ int err = 0;
+
+ if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) {
+ err = aq_fw2x_set_sleep_proxy(self, mac);
+ if (err < 0)
+ goto err_exit;
+ err = aq_fw2x_set_wol_params(self, mac);
+ }
+
+err_exit:
+ return err;
+}
+
+static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed)
+{
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+
+ aq_fw2x_upd_eee_rate_bits(self, &mpi_opts, speed);
+
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ return 0;
+}
+
+static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate,
+ u32 *supported_rates)
+{
+ u32 mpi_state;
+ u32 caps_hi;
+ int err = 0;
+ u32 addr = self->mbox_addr + offsetof(struct hw_atl_utils_mbox, info) +
+ offsetof(struct hw_aq_info, caps_hi);
+
+ err = hw_atl_utils_fw_downld_dwords(self, addr, &caps_hi,
+ sizeof(caps_hi) / sizeof(u32));
+
+ if (err)
+ return err;
+
+ *supported_rates = fw2x_to_eee_mask(caps_hi);
+
+ mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
+ *rate = fw2x_to_eee_mask(mpi_state);
+
+ return err;
+}
+
static int aq_fw2x_renegotiate(struct aq_hw_s *self)
{
u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
@@ -247,5 +461,8 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.set_state = aq_fw2x_set_state,
.update_link_status = aq_fw2x_update_link_status,
.update_stats = aq_fw2x_update_stats,
- .set_flow_control = aq_fw2x_set_flow_control,
+ .set_power = aq_fw2x_set_power,
+ .set_eee_rate = aq_fw2x_set_eee_rate,
+ .get_eee_rate = aq_fw2x_get_eee_rate,
+ .set_flow_control = aq_fw2x_set_flow_control,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 94efc6477bdc..b48260114da3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -12,7 +12,7 @@
#define NIC_MAJOR_DRIVER_VERSION 2
#define NIC_MINOR_DRIVER_VERSION 0
-#define NIC_BUILD_DRIVER_VERSION 3
+#define NIC_BUILD_DRIVER_VERSION 4
#define NIC_REVISION_DRIVER_VERSION 0
#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 6d3221134927..7968c644ad86 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1964,8 +1964,6 @@ static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
if (!alx_reset_mac(hw))
rc = PCI_ERS_RESULT_RECOVERED;
out:
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
rtnl_unlock();
return rc;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b81fbf119bce..63edc5706c09 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -63,7 +63,6 @@
#include <linux/jiffies.h>
#include <linux/mii.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
@@ -3278,7 +3277,6 @@ static int atl1_set_link_ksettings(struct net_device *netdev,
u16 phy_data;
int ret_val = 0;
u16 old_media_type = hw->media_type;
- u32 advertising;
if (netif_running(adapter->netdev)) {
if (netif_msg_link(adapter))
@@ -3312,25 +3310,7 @@ static int atl1_set_link_ksettings(struct net_device *netdev,
hw->media_type = MEDIA_TYPE_10M_HALF;
}
}
- switch (hw->media_type) {
- case MEDIA_TYPE_AUTO_SENSOR:
- advertising =
- ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_Autoneg | ADVERTISED_TP;
- break;
- case MEDIA_TYPE_1000M_FULL:
- advertising =
- ADVERTISED_1000baseT_Full |
- ADVERTISED_Autoneg | ADVERTISED_TP;
- break;
- default:
- advertising = 0;
- break;
- }
+
if (atl1_phy_setup_autoneg_adv(hw)) {
ret_val = -EINVAL;
if (netif_msg_link(adapter))
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index c8d1f8fa4713..6f56276015a4 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -935,18 +935,11 @@ static void nb8800_pause_adv(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
- u32 adv = 0;
if (!phydev)
return;
- if (priv->pause_rx)
- adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
- if (priv->pause_tx)
- adv ^= ADVERTISED_Asym_Pause;
-
- phydev->supported |= adv;
- phydev->advertising |= adv;
+ phy_set_asym_pause(phydev, priv->pause_rx, priv->pause_tx);
}
static int nb8800_open(struct net_device *dev)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 897302adc38e..6bae973d4dce 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -568,12 +568,13 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
/*
* tx request callback
*/
-static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct bcm_enet_desc *desc;
u32 len_stat;
- int ret;
+ netdev_tx_t ret;
priv = netdev_priv(dev);
@@ -890,19 +891,10 @@ static int bcm_enet_open(struct net_device *dev)
}
/* mask with MAC supported features */
- phydev->supported &= (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_Pause |
- SUPPORTED_MII);
- phydev->advertising = phydev->supported;
-
- if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
- phydev->advertising |= SUPPORTED_Pause;
- else
- phydev->advertising &= ~SUPPORTED_Pause;
+ phy_support_sym_pause(phydev);
+ phy_set_max_speed(phydev, SPEED_100);
+ phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
+ priv->pause_auto);
phy_attached_info(phydev);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 147045757b10..4122553e224b 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -126,8 +126,8 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
}
/* Ethtool operations */
-static int bcm_sysport_set_rx_csum(struct net_device *dev,
- netdev_features_t wanted)
+static void bcm_sysport_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
u32 reg;
@@ -157,12 +157,10 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
reg &= ~RXCHK_BRCM_TAG_EN;
rxchk_writel(priv, reg, RXCHK_CONTROL);
-
- return 0;
}
-static int bcm_sysport_set_tx_csum(struct net_device *dev,
- netdev_features_t wanted)
+static void bcm_sysport_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
u32 reg;
@@ -177,23 +175,24 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev,
else
reg &= ~tdma_control_bit(priv, TSB_EN);
tdma_writel(priv, reg, TDMA_CONTROL);
-
- return 0;
}
static int bcm_sysport_set_features(struct net_device *dev,
netdev_features_t features)
{
- netdev_features_t changed = features ^ dev->features;
- netdev_features_t wanted = dev->wanted_features;
- int ret = 0;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
- if (changed & NETIF_F_RXCSUM)
- ret = bcm_sysport_set_rx_csum(dev, wanted);
- if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
- ret = bcm_sysport_set_tx_csum(dev, wanted);
+ /* Read CRC forward */
+ if (!priv->is_lite)
+ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+ else
+ priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
+ GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
- return ret;
+ bcm_sysport_set_rx_csum(dev, features);
+ bcm_sysport_set_tx_csum(dev, features);
+
+ return 0;
}
/* Hardware counters must be kept in sync because the order/offset
@@ -285,6 +284,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
+ STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
+ STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
/* Per TX-queue statistics are dynamically appended */
};
@@ -1069,9 +1070,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
u32 reg;
- /* Stop monitoring MPD interrupt */
- intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
/* Disable RXCHK, active filters and Broadcom tag matching */
reg = rxchk_readl(priv, RXCHK_CONTROL);
reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
@@ -1081,6 +1079,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
/* Clear the MagicPacket detection logic */
mpd_enable_set(priv, false);
+ reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+ if (reg & INTRL2_0_MPD)
+ netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+ if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+ RXCHK_BRCM_TAG_MATCH_MASK;
+ netdev_info(priv->netdev,
+ "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+ }
+
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
@@ -1105,7 +1114,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *txr;
unsigned int ring, ring_bit;
- u32 reg;
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1131,16 +1139,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
bcm_sysport_tx_reclaim_all(priv);
- if (priv->irq0_stat & INTRL2_0_MPD)
- netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
-
- if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
- reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
- RXCHK_BRCM_TAG_MATCH_MASK;
- netdev_info(priv->netdev,
- "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
- }
-
if (!priv->is_lite)
goto out;
@@ -1221,6 +1219,7 @@ static void bcm_sysport_poll_controller(struct net_device *dev)
static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
struct net_device *dev)
{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
struct sk_buff *nskb;
struct bcm_tsb *tsb;
u32 csum_info;
@@ -1231,13 +1230,16 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
/* Re-allocate SKB if needed */
if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
nskb = skb_realloc_headroom(skb, sizeof(*tsb));
- dev_kfree_skb(skb);
if (!nskb) {
+ dev_kfree_skb_any(skb);
+ priv->mib.tx_realloc_tsb_failed++;
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
return NULL;
}
+ dev_consume_skb_any(skb);
skb = nskb;
+ priv->mib.tx_realloc_tsb++;
}
tsb = skb_push(skb, sizeof(*tsb));
@@ -1973,16 +1975,14 @@ static int bcm_sysport_open(struct net_device *dev)
else
gib_set_pad_extension(priv);
+ /* Apply features again in case we changed them while interface was
+ * down
+ */
+ bcm_sysport_set_features(dev, dev->features);
+
/* Set MAC address */
umac_set_hw_addr(priv, dev->dev_addr);
- /* Read CRC forward */
- if (!priv->is_lite)
- priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
- else
- priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
- GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
-
phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
0, priv->phy_interface);
if (!phydev) {
@@ -2511,9 +2511,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
dev->netdev_ops = &bcm_sysport_netdev_ops;
netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
- /* HW supported features, none enabled by default */
- dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= dev->features;
+ dev->vlan_features |= dev->features;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = 1;
@@ -2641,9 +2642,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
/* UniMAC receive needs to be turned on */
umac_enable_set(priv, CMD_RX_EN, 1);
- /* Enable the interrupt wake-up source */
- intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
return 0;
@@ -2716,7 +2714,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
struct net_device *dev = dev_get_drvdata(d);
struct bcm_sysport_priv *priv = netdev_priv(dev);
unsigned int i;
- u32 reg;
int ret;
if (!netif_running(dev))
@@ -2760,12 +2757,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
goto out_free_rx_ring;
}
- /* Enable rxhck */
- if (priv->rx_chk_en) {
- reg = rxchk_readl(priv, RXCHK_CONTROL);
- reg |= RXCHK_EN;
- rxchk_writel(priv, reg, RXCHK_CONTROL);
- }
+ /* Restore enabled features */
+ bcm_sysport_set_features(dev, dev->features);
rbuf_init(priv);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 046c6c1d97fd..a7a230884a87 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -607,6 +607,8 @@ struct bcm_sysport_mib {
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
u32 tx_dma_failed;
+ u32 tx_realloc_tsb;
+ u32 tx_realloc_tsb_failed;
};
/* HW maintains a large list of counters */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 4c94d9218bba..cabc8e49ad24 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -616,7 +616,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
int size; /* ring size: different for Tx and Rx */
- int err;
int i;
BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
@@ -666,7 +665,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
if (!ring->cpu_base) {
dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
ring->mmio_base);
- err = -ENOMEM;
goto err_dma_free;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 122fdb80a789..bbb247116045 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8793,13 +8793,6 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
return result;
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err); /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0e508e5defce..142bc11b9fbb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -494,6 +494,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vf,
int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto);
+int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 71362b7f6040..95309b27c7d1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3536,6 +3536,16 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
*/
static void bnx2x_config_mf_bw(struct bnx2x *bp)
{
+ /* Workaround for MFW bug.
+ * MFW is not supposed to generate BW attention in
+ * single function mode.
+ */
+ if (!IS_MF(bp)) {
+ DP(BNX2X_MSG_MCP,
+ "Ignoring MF BW config in single function mode\n");
+ return;
+ }
+
if (bp->link_vars.link_up) {
bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
bnx2x_link_sync_notify(bp);
@@ -12894,19 +12904,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void poll_bnx2x(struct net_device *dev)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int i;
-
- for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- napi_schedule(&bnx2x_fp(bp, fp->index, napi));
- }
-}
-#endif
-
static int bnx2x_validate_addr(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -13113,14 +13110,12 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_tx_timeout = bnx2x_tx_timeout,
.ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = poll_bnx2x,
-#endif
.ndo_setup_tc = __bnx2x_setup_tc,
#ifdef CONFIG_BNX2X_SRIOV
.ndo_set_vf_mac = bnx2x_set_vf_mac,
.ndo_set_vf_vlan = bnx2x_set_vf_vlan,
.ndo_get_vf_config = bnx2x_get_vf_config,
+ .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk,
#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
@@ -14385,14 +14380,6 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
- /* If AER, perform cleanup of the PCIe registers */
- if (bp->flags & AER_ENABLED) {
- if (pci_cleanup_aer_uncorrect_error_status(pdev))
- BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
- else
- DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
- }
-
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 62da46537734..c835f6c7ecd0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -209,7 +209,10 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
*/
__set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
- __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
+ if (vf->spoofchk)
+ __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
+ else
+ __clear_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
/* Setup-op rx parameters */
if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
@@ -1269,6 +1272,8 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
bnx2x_vf(bp, i, state) = VF_FREE;
mutex_init(&bnx2x_vf(bp, i, op_mutex));
bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
+ /* enable spoofchk by default */
+ bnx2x_vf(bp, i, spoofchk) = 1;
}
/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
@@ -2632,7 +2637,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->qos = 0;
ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
ivi->min_tx_rate = 0;
- ivi->spoofchk = 1; /*always enabled */
+ ivi->spoofchk = vf->spoofchk ? 1 : 0;
+ ivi->linkstate = vf->link_cfg;
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
@@ -2950,6 +2956,77 @@ out:
return rc;
}
+int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_virtf *vf;
+ int i, rc = 0;
+
+ vf = BP_VF(bp, idx);
+ if (!vf)
+ return -EINVAL;
+
+ /* nothing to do */
+ if (vf->spoofchk == val)
+ return 0;
+
+ vf->spoofchk = val ? 1 : 0;
+
+ DP(BNX2X_MSG_IOV, "%s spoofchk for VF %d\n",
+ val ? "enabling" : "disabling", idx);
+
+ /* is vf initialized and queue set up? */
+ if (vf->state != VF_ENABLED ||
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ return rc;
+
+ /* User should be able to see error in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+
+ /* send queue update ramrods to configure spoofchk */
+ for_each_vfq(vf, i) {
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_queue_update_params *update_params;
+
+ q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
+
+ /* validate the Q is UP */
+ if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ continue;
+
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
+ &update_params->update_flags);
+ if (val) {
+ __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
+ &update_params->update_flags);
+ } else {
+ __clear_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
+ &update_params->update_flags);
+ }
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to %s spoofchk on VF %d - vfq %d\n",
+ val ? "enable" : "disable", idx, i);
+ goto out;
+ }
+ }
+out:
+ if (!rc)
+ DP(BNX2X_MSG_IOV,
+ "%s spoofchk for VF[%d]\n", val ? "Enabled" : "Disabled",
+ idx);
+
+ return rc;
+}
+
/* crc is the first field in the bulletin board. Compute the crc over the
* entire bulletin board excluding the crc field itself. Use the length field
* as the Bulletin Board was posted by a PF with possibly a different version
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index eb814c65152f..b6ebd92ec565 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -142,6 +142,8 @@ struct bnx2x_virtf {
bool flr_clnup_stage; /* true during flr cleanup */
bool malicious; /* true if FW indicated so, until FLR */
+ /* 1(true) if spoof check is enabled */
+ u8 spoofchk;
/* dma */
dma_addr_t fw_stat_map;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index cecbb1d1f587..dd85d790f638 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -111,6 +111,7 @@ enum board_idx {
BCM57452,
BCM57454,
BCM5745x_NPAR,
+ BCM57508,
BCM58802,
BCM58804,
BCM58808,
@@ -152,6 +153,7 @@ static const struct {
[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
+ [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -196,6 +198,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
+ { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -241,15 +244,46 @@ static bool bnxt_vf_pciid(enum board_idx idx)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
-#define BNXT_CP_DB_REARM(db, raw_cons) \
- writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
-
-#define BNXT_CP_DB(db, raw_cons) \
- writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
-
#define BNXT_CP_DB_IRQ_DIS(db) \
writel(DB_CP_IRQ_DIS_FLAGS, db)
+#define BNXT_DB_CQ(db, idx) \
+ writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
+
+#define BNXT_DB_NQ_P5(db, idx) \
+ writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
+
+#define BNXT_DB_CQ_ARM(db, idx) \
+ writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
+
+#define BNXT_DB_NQ_ARM_P5(db, idx) \
+ writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
+
+static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ BNXT_DB_NQ_P5(db, idx);
+ else
+ BNXT_DB_CQ(db, idx);
+}
+
+static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ BNXT_DB_NQ_ARM_P5(db, idx);
+ else
+ BNXT_DB_CQ_ARM(db, idx);
+}
+
+static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
+ db->doorbell);
+ else
+ BNXT_DB_CQ(db, idx);
+}
+
const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_512_AND_SMALLER,
TX_BD_FLAGS_LHINT_512_TO_1023,
@@ -341,6 +375,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
+ void __iomem *db = txr->tx_db.doorbell;
void *pdata = tx_push_buf->data;
u64 *end;
int j, push_len;
@@ -398,12 +433,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
- __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
- __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
+ __iowrite64_copy(db, tx_push_buf, 16);
+ __iowrite32_copy(db + 4, tx_push_buf + 1,
(push_len - 16) << 1);
} else {
- __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
- push_len);
+ __iowrite64_copy(db, tx_push_buf, push_len);
}
goto tx_done;
@@ -505,7 +539,7 @@ normal_tx:
txr->tx_prod = prod;
if (!skb->xmit_more || netif_xmit_stopped(txq))
- bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
+ bnxt_db_write(bp, &txr->tx_db, prod);
tx_done:
@@ -513,7 +547,7 @@ tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (skb->xmit_more && !tx_buf->is_push)
- bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
+ bnxt_db_write(bp, &txr->tx_db, prod);
netif_tx_stop_queue(txq);
@@ -776,11 +810,11 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
return 0;
}
-static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
u32 agg_bufs)
{
+ struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt *bp = bnapi->bp;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u16 sw_prod = rxr->rx_sw_agg_prod;
@@ -903,12 +937,13 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return skb;
}
-static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
+static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
struct sk_buff *skb, u16 cp_cons,
u32 agg_bufs)
{
+ struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u32 i;
@@ -955,7 +990,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
* allocated already.
*/
rxr->rx_agg_prod = prod;
- bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
return NULL;
}
@@ -1012,10 +1047,9 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
return skb;
}
-static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
+static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 *raw_cons, void *cmp)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct rx_cmp *rxcmp = cmp;
u32 tmp_raw_cons = *raw_cons;
u8 cmp_type, agg_bufs = 0;
@@ -1141,11 +1175,11 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
cons_rx_buf->data = NULL;
}
-static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
- u16 cp_cons, u32 agg_bufs)
+static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
+ u32 agg_bufs)
{
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
}
static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
@@ -1339,13 +1373,13 @@ static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
}
static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
- struct bnxt_napi *bnapi,
+ struct bnxt_cp_ring_info *cpr,
u32 *raw_cons,
struct rx_tpa_end_cmp *tpa_end,
struct rx_tpa_end_cmp_ext *tpa_end1,
u8 *event)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u8 agg_id = TPA_END_AGG_ID(tpa_end);
u8 *data_ptr, agg_bufs;
@@ -1357,7 +1391,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
void *data;
if (unlikely(bnapi->in_reset)) {
- int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
+ int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
if (rc < 0)
return ERR_PTR(-EBUSY);
@@ -1383,7 +1417,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
- bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
if (agg_bufs > MAX_SKB_FRAGS)
netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
agg_bufs, (int)MAX_SKB_FRAGS);
@@ -1393,7 +1427,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
- bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
return NULL;
}
} else {
@@ -1402,7 +1436,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
- bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
return NULL;
}
@@ -1417,7 +1451,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
kfree(data);
- bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1425,7 +1459,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
return NULL;
@@ -1479,10 +1513,10 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
* -ENOMEM - packet aborted due to out of memory
* -EIO - packet aborted due to hw error indicated in BD
*/
-static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
- u8 *event)
+static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ u32 *raw_cons, u8 *event)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
struct net_device *dev = bp->dev;
struct rx_cmp *rxcmp;
@@ -1521,7 +1555,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
goto next_rx_no_prod_no_len;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
- skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
+ skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
(struct rx_tpa_end_cmp *)rxcmp,
(struct rx_tpa_end_cmp_ext *)rxcmp1, event);
@@ -1542,7 +1576,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
data = rx_buf->data;
data_ptr = rx_buf->data_ptr;
if (unlikely(cons != rxr->rx_next_cons)) {
- int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
+ int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
bnxt_sched_reset(bp, rxr);
return rc1;
@@ -1565,7 +1599,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
rc = -EIO;
goto next_rx;
@@ -1602,7 +1636,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
@@ -1664,10 +1698,10 @@ next_rx_no_prod_no_len:
/* In netpoll mode, if we are using a combined completion ring, we need to
* discard the rx packets and recycle the buffers.
*/
-static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
+static int bnxt_force_rx_discard(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
u32 *raw_cons, u8 *event)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
u32 tmp_raw_cons = *raw_cons;
struct rx_cmp_ext *rxcmp1;
struct rx_cmp *rxcmp;
@@ -1697,7 +1731,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
tpa_end1->rx_tpa_end_cmp_errors_v2 |=
cpu_to_le32(RX_TPA_END_CMP_ERRORS);
}
- return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
+ return bnxt_rx_pkt(bp, cpr, raw_cons, event);
}
#define BNXT_GET_EVENT_PORT(data) \
@@ -1848,7 +1882,7 @@ static irqreturn_t bnxt_inta(int irq, void *dev_instance)
}
/* disable ring IRQ */
- BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
+ BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
/* Return here if interrupt is shared and is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0))
@@ -1858,9 +1892,10 @@ static irqreturn_t bnxt_inta(int irq, void *dev_instance)
return IRQ_HANDLED;
}
-static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ int budget)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
u32 cons;
int tx_pkts = 0;
@@ -1868,6 +1903,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
u8 event = 0;
struct tx_cmp *txcmp;
+ cpr->has_more_work = 0;
while (1) {
int rc;
@@ -1881,16 +1917,22 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
* reading any further.
*/
dma_rmb();
+ cpr->had_work_done = 1;
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts > bp->tx_wake_thresh))
+ if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
rx_pkts = budget;
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ if (budget)
+ cpr->has_more_work = 1;
+ break;
+ }
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
if (likely(budget))
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+ rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
else
- rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
+ rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
&event);
if (likely(rc >= 0))
rx_pkts += rc;
@@ -1913,39 +1955,60 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (rx_pkts == budget)
+ if (rx_pkts && rx_pkts == budget) {
+ cpr->has_more_work = 1;
break;
+ }
}
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
- void __iomem *db = txr->tx_doorbell;
u16 prod = txr->tx_prod;
/* Sync BD data before updating doorbell */
wmb();
- bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod);
+ bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
}
cpr->cp_raw_cons = raw_cons;
- /* ACK completion ring before freeing tx ring and producing new
- * buffers in rx/agg rings to prevent overflowing the completion
- * ring.
- */
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bnapi->tx_pkts += tx_pkts;
+ bnapi->events |= event;
+ return rx_pkts;
+}
- if (tx_pkts)
- bnapi->tx_int(bp, bnapi, tx_pkts);
+static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
+{
+ if (bnapi->tx_pkts) {
+ bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
+ bnapi->tx_pkts = 0;
+ }
- if (event & BNXT_RX_EVENT) {
+ if (bnapi->events & BNXT_RX_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
- if (event & BNXT_AGG_EVENT)
- bnxt_db_write(bp, rxr->rx_agg_doorbell,
- DB_KEY_RX | rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ if (bnapi->events & BNXT_AGG_EVENT)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
}
+ bnapi->events = 0;
+}
+
+static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ int budget)
+{
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ int rx_pkts;
+
+ rx_pkts = __bnxt_poll_work(bp, cpr, budget);
+
+ /* ACK completion ring before freeing tx ring and producing new
+ * buffers in rx/agg rings to prevent overflowing the completion
+ * ring.
+ */
+ bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
+ __bnxt_poll_work_done(bp, bnapi);
return rx_pkts;
}
@@ -1984,7 +2047,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
- rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
+ rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
if (likely(rc == -EIO) && budget)
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
@@ -2003,16 +2066,15 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
}
cpr->cp_raw_cons = raw_cons;
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
- bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
+ BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (event & BNXT_AGG_EVENT)
- bnxt_db_write(bp, rxr->rx_agg_doorbell,
- DB_KEY_RX | rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
napi_complete_done(napi, rx_pkts);
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
}
return rx_pkts;
}
@@ -2025,15 +2087,17 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
int work_done = 0;
while (1) {
- work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+ work_done += bnxt_poll_work(bp, cpr, budget - work_done);
- if (work_done >= budget)
+ if (work_done >= budget) {
+ if (!budget)
+ BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
break;
+ }
if (!bnxt_has_work(bp, cpr)) {
if (napi_complete_done(napi, work_done))
- BNXT_CP_DB_REARM(cpr->cp_doorbell,
- cpr->cp_raw_cons);
+ BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
break;
}
}
@@ -2050,6 +2114,104 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int i, work_done = 0;
+
+ for (i = 0; i < 2; i++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+
+ if (cpr2) {
+ work_done += __bnxt_poll_work(bp, cpr2,
+ budget - work_done);
+ cpr->has_more_work |= cpr2->has_more_work;
+ }
+ }
+ return work_done;
+}
+
+static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
+ u64 dbr_type, bool all)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+ struct bnxt_db_info *db;
+
+ if (cpr2 && (all || cpr2->had_work_done)) {
+ db = &cpr2->cp_db;
+ writeq(db->db_key64 | dbr_type |
+ RING_CMP(cpr2->cp_raw_cons), db->doorbell);
+ cpr2->had_work_done = 0;
+ }
+ }
+ __bnxt_poll_work_done(bp, bnapi);
+}
+
+static int bnxt_poll_p5(struct napi_struct *napi, int budget)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 raw_cons = cpr->cp_raw_cons;
+ struct bnxt *bp = bnapi->bp;
+ struct nqe_cn *nqcmp;
+ int work_done = 0;
+ u32 cons;
+
+ if (cpr->has_more_work) {
+ cpr->has_more_work = 0;
+ work_done = __bnxt_poll_cqs(bp, bnapi, budget);
+ if (cpr->has_more_work) {
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
+ return work_done;
+ }
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
+ if (napi_complete_done(napi, work_done))
+ BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
+ return work_done;
+ }
+ while (1) {
+ cons = RING_CMP(raw_cons);
+ nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
+ false);
+ cpr->cp_raw_cons = raw_cons;
+ if (napi_complete_done(napi, work_done))
+ BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
+ cpr->cp_raw_cons);
+ return work_done;
+ }
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+
+ if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
+ u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
+ struct bnxt_cp_ring_info *cpr2;
+
+ cpr2 = cpr->cp_ring_arr[idx];
+ work_done += __bnxt_poll_work(bp, cpr2,
+ budget - work_done);
+ cpr->has_more_work = cpr2->has_more_work;
+ } else {
+ bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ if (cpr->has_more_work)
+ break;
+ }
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
+ cpr->cp_raw_cons = raw_cons;
+ return work_done;
+}
+
static void bnxt_free_tx_skbs(struct bnxt *bp)
{
int i, max_idx;
@@ -2195,60 +2357,73 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp);
}
-static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
int i;
- for (i = 0; i < ring->nr_pages; i++) {
- if (!ring->pg_arr[i])
+ for (i = 0; i < rmem->nr_pages; i++) {
+ if (!rmem->pg_arr[i])
continue;
- dma_free_coherent(&pdev->dev, ring->page_size,
- ring->pg_arr[i], ring->dma_arr[i]);
+ dma_free_coherent(&pdev->dev, rmem->page_size,
+ rmem->pg_arr[i], rmem->dma_arr[i]);
- ring->pg_arr[i] = NULL;
+ rmem->pg_arr[i] = NULL;
}
- if (ring->pg_tbl) {
- dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
- ring->pg_tbl, ring->pg_tbl_map);
- ring->pg_tbl = NULL;
+ if (rmem->pg_tbl) {
+ dma_free_coherent(&pdev->dev, rmem->nr_pages * 8,
+ rmem->pg_tbl, rmem->pg_tbl_map);
+ rmem->pg_tbl = NULL;
}
- if (ring->vmem_size && *ring->vmem) {
- vfree(*ring->vmem);
- *ring->vmem = NULL;
+ if (rmem->vmem_size && *rmem->vmem) {
+ vfree(*rmem->vmem);
+ *rmem->vmem = NULL;
}
}
-static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
- int i;
struct pci_dev *pdev = bp->pdev;
+ u64 valid_bit = 0;
+ int i;
- if (ring->nr_pages > 1) {
- ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
- ring->nr_pages * 8,
- &ring->pg_tbl_map,
+ if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
+ valid_bit = PTU_PTE_VALID;
+ if (rmem->nr_pages > 1) {
+ rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
+ rmem->nr_pages * 8,
+ &rmem->pg_tbl_map,
GFP_KERNEL);
- if (!ring->pg_tbl)
+ if (!rmem->pg_tbl)
return -ENOMEM;
}
- for (i = 0; i < ring->nr_pages; i++) {
- ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
- ring->page_size,
- &ring->dma_arr[i],
+ for (i = 0; i < rmem->nr_pages; i++) {
+ u64 extra_bits = valid_bit;
+
+ rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ rmem->page_size,
+ &rmem->dma_arr[i],
GFP_KERNEL);
- if (!ring->pg_arr[i])
+ if (!rmem->pg_arr[i])
return -ENOMEM;
- if (ring->nr_pages > 1)
- ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
+ if (rmem->nr_pages > 1) {
+ if (i == rmem->nr_pages - 2 &&
+ (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_NEXT_TO_LAST;
+ else if (i == rmem->nr_pages - 1 &&
+ (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+ extra_bits |= PTU_PTE_LAST;
+ rmem->pg_tbl[i] =
+ cpu_to_le64(rmem->dma_arr[i] | extra_bits);
+ }
}
- if (ring->vmem_size) {
- *ring->vmem = vzalloc(ring->vmem_size);
- if (!(*ring->vmem))
+ if (rmem->vmem_size) {
+ *rmem->vmem = vzalloc(rmem->vmem_size);
+ if (!(*rmem->vmem))
return -ENOMEM;
}
return 0;
@@ -2278,10 +2453,10 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
rxr->rx_agg_bmap = NULL;
ring = &rxr->rx_ring_struct;
- bnxt_free_ring(bp, ring);
+ bnxt_free_ring(bp, &ring->ring_mem);
ring = &rxr->rx_agg_ring_struct;
- bnxt_free_ring(bp, ring);
+ bnxt_free_ring(bp, &ring->ring_mem);
}
}
@@ -2308,15 +2483,16 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
if (rc < 0)
return rc;
- rc = bnxt_alloc_ring(bp, ring);
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
+ ring->grp_idx = i;
if (agg_rings) {
u16 mem_size;
ring = &rxr->rx_agg_ring_struct;
- rc = bnxt_alloc_ring(bp, ring);
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
@@ -2359,7 +2535,7 @@ static void bnxt_free_tx_rings(struct bnxt *bp)
ring = &txr->tx_ring_struct;
- bnxt_free_ring(bp, ring);
+ bnxt_free_ring(bp, &ring->ring_mem);
}
}
@@ -2390,7 +2566,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
ring = &txr->tx_ring_struct;
- rc = bnxt_alloc_ring(bp, ring);
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
@@ -2436,6 +2612,7 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
struct bnxt_ring_struct *ring;
+ int j;
if (!bnapi)
continue;
@@ -2443,12 +2620,51 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
cpr = &bnapi->cp_ring;
ring = &cpr->cp_ring_struct;
- bnxt_free_ring(bp, ring);
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ for (j = 0; j < 2; j++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+
+ if (cpr2) {
+ ring = &cpr2->cp_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+ kfree(cpr2);
+ cpr->cp_ring_arr[j] = NULL;
+ }
+ }
}
}
+static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+ struct bnxt_cp_ring_info *cpr;
+ int rc;
+
+ cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
+ if (!cpr)
+ return NULL;
+
+ ring = &cpr->cp_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->cp_desc_ring;
+ rmem->dma_arr = cpr->cp_desc_mapping;
+ rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
+ rc = bnxt_alloc_ring(bp, rmem);
+ if (rc) {
+ bnxt_free_ring(bp, rmem);
+ kfree(cpr);
+ cpr = NULL;
+ }
+ return cpr;
+}
+
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
+ bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
int i, rc, ulp_base_vec, ulp_msix;
ulp_msix = bnxt_get_ulp_msix_num(bp);
@@ -2462,9 +2678,10 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
continue;
cpr = &bnapi->cp_ring;
+ cpr->bnapi = bnapi;
ring = &cpr->cp_ring_struct;
- rc = bnxt_alloc_ring(bp, ring);
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
@@ -2472,6 +2689,29 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
ring->map_idx = i + ulp_msix;
else
ring->map_idx = i;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ continue;
+
+ if (i < bp->rx_nr_rings) {
+ struct bnxt_cp_ring_info *cpr2 =
+ bnxt_alloc_cp_sub_ring(bp);
+
+ cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
+ if (!cpr2)
+ return -ENOMEM;
+ cpr2->bnapi = bnapi;
+ }
+ if ((sh && i < bp->tx_nr_rings) ||
+ (!sh && i >= bp->rx_nr_rings)) {
+ struct bnxt_cp_ring_info *cpr2 =
+ bnxt_alloc_cp_sub_ring(bp);
+
+ cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
+ if (!cpr2)
+ return -ENOMEM;
+ cpr2->bnapi = bnapi;
+ }
}
return 0;
}
@@ -2482,6 +2722,7 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_ring_mem_info *rmem;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
struct bnxt_tx_ring_info *txr;
@@ -2492,31 +2733,34 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
cpr = &bnapi->cp_ring;
ring = &cpr->cp_ring_struct;
- ring->nr_pages = bp->cp_nr_pages;
- ring->page_size = HW_CMPD_RING_SIZE;
- ring->pg_arr = (void **)cpr->cp_desc_ring;
- ring->dma_arr = cpr->cp_desc_mapping;
- ring->vmem_size = 0;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->cp_desc_ring;
+ rmem->dma_arr = cpr->cp_desc_mapping;
+ rmem->vmem_size = 0;
rxr = bnapi->rx_ring;
if (!rxr)
goto skip_rx;
ring = &rxr->rx_ring_struct;
- ring->nr_pages = bp->rx_nr_pages;
- ring->page_size = HW_RXBD_RING_SIZE;
- ring->pg_arr = (void **)rxr->rx_desc_ring;
- ring->dma_arr = rxr->rx_desc_mapping;
- ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
- ring->vmem = (void **)&rxr->rx_buf_ring;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
ring = &rxr->rx_agg_ring_struct;
- ring->nr_pages = bp->rx_agg_nr_pages;
- ring->page_size = HW_RXBD_RING_SIZE;
- ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
- ring->dma_arr = rxr->rx_agg_desc_mapping;
- ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
- ring->vmem = (void **)&rxr->rx_agg_ring;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_ring;
skip_rx:
txr = bnapi->tx_ring;
@@ -2524,12 +2768,13 @@ skip_rx:
continue;
ring = &txr->tx_ring_struct;
- ring->nr_pages = bp->tx_nr_pages;
- ring->page_size = HW_RXBD_RING_SIZE;
- ring->pg_arr = (void **)txr->tx_desc_ring;
- ring->dma_arr = txr->tx_desc_mapping;
- ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
- ring->vmem = (void **)&txr->tx_buf_ring;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->tx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_buf_ring;
}
}
@@ -2539,8 +2784,8 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
u32 prod;
struct rx_bd **rx_buf_ring;
- rx_buf_ring = (struct rx_bd **)ring->pg_arr;
- for (i = 0, prod = 0; i < ring->nr_pages; i++) {
+ rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
+ for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
int j;
struct rx_bd *rxbd;
@@ -2642,7 +2887,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
static void bnxt_init_cp_rings(struct bnxt *bp)
{
- int i;
+ int i, j;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
@@ -2651,6 +2896,17 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
ring->fw_ring_id = INVALID_HW_RING_ID;
cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
+ for (j = 0; j < 2; j++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+
+ if (!cpr2)
+ continue;
+
+ ring = &cpr2->cp_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
+ cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
+ }
}
}
@@ -2754,10 +3010,12 @@ static void bnxt_init_vnics(struct bnxt *bp)
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ int j;
vnic->fw_vnic_id = INVALID_HW_RING_ID;
- vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
- vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
+ for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
+ vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
+
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
@@ -2971,6 +3229,9 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
}
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ goto vnic_skip_grps;
+
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
max_rings = bp->rx_nr_rings;
else
@@ -2981,7 +3242,7 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
rc = -ENOMEM;
goto out;
}
-
+vnic_skip_grps:
if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
!(vnic->flags & BNXT_VNIC_RSS_FLAG))
continue;
@@ -3010,10 +3271,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
- dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
- bp->hwrm_cmd_resp_dma_addr);
-
- bp->hwrm_cmd_resp_addr = NULL;
+ if (bp->hwrm_cmd_resp_addr) {
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+ bp->hwrm_cmd_resp_dma_addr);
+ bp->hwrm_cmd_resp_addr = NULL;
+ }
}
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -3034,7 +3296,7 @@ static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
if (bp->hwrm_short_cmd_req_addr) {
struct pci_dev *pdev = bp->pdev;
- dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
+ dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
bp->hwrm_short_cmd_req_addr,
bp->hwrm_short_cmd_req_dma_addr);
bp->hwrm_short_cmd_req_addr = NULL;
@@ -3046,7 +3308,7 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
struct pci_dev *pdev = bp->pdev;
bp->hwrm_short_cmd_req_addr =
- dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
+ dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
&bp->hwrm_short_cmd_req_dma_addr,
GFP_KERNEL);
if (!bp->hwrm_short_cmd_req_addr)
@@ -3070,6 +3332,13 @@ static void bnxt_free_stats(struct bnxt *bp)
bp->hw_rx_port_stats = NULL;
}
+ if (bp->hw_tx_port_stats_ext) {
+ dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
+ bp->hw_tx_port_stats_ext,
+ bp->hw_tx_port_stats_ext_map);
+ bp->hw_tx_port_stats_ext = NULL;
+ }
+
if (bp->hw_rx_port_stats_ext) {
dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
bp->hw_rx_port_stats_ext,
@@ -3144,6 +3413,13 @@ static int bnxt_alloc_stats(struct bnxt *bp)
if (!bp->hw_rx_port_stats_ext)
return 0;
+ if (bp->hwrm_spec_code >= 0x10902) {
+ bp->hw_tx_port_stats_ext =
+ dma_zalloc_coherent(&pdev->dev,
+ sizeof(struct tx_port_stats_ext),
+ &bp->hw_tx_port_stats_ext_map,
+ GFP_KERNEL);
+ }
bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
}
return 0;
@@ -3282,6 +3558,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
bp->bnapi[i] = bnapi;
bp->bnapi[i]->index = i;
bp->bnapi[i]->bp = bp;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_cp_ring_info *cpr =
+ &bp->bnapi[i]->cp_ring;
+
+ cpr->cp_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ }
}
bp->rx_ring = kcalloc(bp->rx_nr_rings,
@@ -3291,7 +3574,15 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
return -ENOMEM;
for (i = 0; i < bp->rx_nr_rings; i++) {
- bp->rx_ring[i].bnapi = bp->bnapi[i];
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ rxr->rx_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ rxr->rx_agg_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ }
+ rxr->bnapi = bp->bnapi[i];
bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
}
@@ -3313,12 +3604,16 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
j = bp->rx_nr_rings;
for (i = 0; i < bp->tx_nr_rings; i++, j++) {
- bp->tx_ring[i].bnapi = bp->bnapi[j];
- bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ txr->tx_ring_struct.ring_mem.flags =
+ BNXT_RMEM_RING_PTE_FLAG;
+ txr->bnapi = bp->bnapi[j];
+ bp->bnapi[j]->tx_ring = txr;
bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
if (i >= bp->tx_nr_rings_xdp) {
- bp->tx_ring[i].txq_index = i -
- bp->tx_nr_rings_xdp;
+ txr->txq_index = i - bp->tx_nr_rings_xdp;
bp->bnapi[j]->tx_int = bnxt_tx_int;
} else {
bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
@@ -3378,7 +3673,7 @@ static void bnxt_disable_int(struct bnxt *bp)
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID)
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
}
}
@@ -3414,7 +3709,7 @@ static void bnxt_enable_int(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
}
}
@@ -3447,12 +3742,27 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
- if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
+ if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
+ if (msg_len > bp->hwrm_max_ext_req_len ||
+ !bp->hwrm_short_cmd_req_addr)
+ return -EINVAL;
+ }
+
+ if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
+ msg_len > BNXT_HWRM_MAX_REQ_LEN) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
+ u16 max_msg_len;
+
+ /* Set boundary for maximum extended request length for short
+ * cmd format. If passed up from device use the max supported
+ * internal req length.
+ */
+ max_msg_len = bp->hwrm_max_ext_req_len;
memcpy(short_cmd_req, req, msg_len);
- memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
- msg_len);
+ if (msg_len < max_msg_len)
+ memset(short_cmd_req + msg_len, 0,
+ max_msg_len - msg_len);
short_input.req_type = req->req_type;
short_input.signature =
@@ -3981,13 +4291,48 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+ struct bnxt_ring_grp_info *grp_info;
+
+ grp_info = &bp->grp_info[ring->grp_idx];
+ return grp_info->cp_fw_ring_id;
+}
+
+static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ struct bnxt_cp_ring_info *cpr;
+
+ cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
+ return cpr->cp_ring_struct.fw_ring_id;
+ } else {
+ return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
+ }
+}
+
+static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_napi *bnapi = txr->bnapi;
+ struct bnxt_cp_ring_info *cpr;
+
+ cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
+ return cpr->cp_ring_struct.fw_ring_id;
+ } else {
+ return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
+ }
+}
+
static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
u32 i, j, max_rings;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input req = {0};
- if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
+ vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
@@ -4018,6 +4363,51 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
+ struct hwrm_vnic_rss_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ if (!set_rss) {
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return 0;
+ }
+ req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
+ req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+ nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
+ for (i = 0, k = 0; i < nr_ctxs; i++) {
+ __le16 *ring_tbl = vnic->rss_table;
+ int rc;
+
+ req.ring_table_pair_index = i;
+ req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
+ for (j = 0; j < 64; j++) {
+ u16 ring_id;
+
+ ring_id = rxr->rx_ring_struct.fw_ring_id;
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ rxr++;
+ k++;
+ if (k == max_rings) {
+ k = 0;
+ rxr = &bp->rx_ring[0];
+ }
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -EIO;
+ }
+ return 0;
+}
+
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -4101,6 +4491,18 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
+
+ req.default_rx_ring_id =
+ cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
+ req.default_cmpl_ring_id =
+ cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
+ req.enables =
+ cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
+ VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
+ goto vnic_mru;
+ }
req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
/* Only RSS support for now TBD: COS & LB */
if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
@@ -4133,13 +4535,13 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index;
- req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
-
req.lb_rule = cpu_to_le16(0xffff);
+vnic_mru:
req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
VLAN_HLEN);
+ req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp))
def_vlan = bp->vf.vlan;
@@ -4187,6 +4589,10 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
struct hwrm_vnic_alloc_input req = {0};
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ goto vnic_no_ring_grps;
/* map ring groups to this vnic */
for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
@@ -4196,12 +4602,12 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
j, nr_rings);
break;
}
- bp->vnic_info[vnic_id].fw_grp_ids[j] =
- bp->grp_info[grp_idx].fw_grp_id;
+ vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
}
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
+vnic_no_ring_grps:
+ for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
+ vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
if (vnic_id == 0)
req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
@@ -4210,7 +4616,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
- bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
+ vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -4230,7 +4636,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (!rc) {
u32 flags = le32_to_cpu(resp->flags);
- if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
@@ -4245,6 +4652,9 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
u16 i;
u32 rc = 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return 0;
+
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->rx_nr_rings; i++) {
struct hwrm_ring_grp_alloc_input req = {0};
@@ -4277,7 +4687,7 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
u32 rc = 0;
struct hwrm_ring_grp_free_input req = {0};
- if (!bp->grp_info)
+ if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
@@ -4306,45 +4716,90 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
int rc = 0, err = 0;
struct hwrm_ring_alloc_input req = {0};
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
struct bnxt_ring_grp_info *grp_info;
u16 ring_id;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
req.enables = 0;
- if (ring->nr_pages > 1) {
- req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
+ if (rmem->nr_pages > 1) {
+ req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
/* Page size is in log2 units */
req.page_size = BNXT_PAGE_SHIFT;
req.page_tbl_depth = 1;
} else {
- req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
+ req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
}
req.fbo = 0;
/* Association of ring index with doorbell index and MSIX number */
req.logical_id = cpu_to_le16(map_index);
switch (ring_type) {
- case HWRM_RING_ALLOC_TX:
+ case HWRM_RING_ALLOC_TX: {
+ struct bnxt_tx_ring_info *txr;
+
+ txr = container_of(ring, struct bnxt_tx_ring_info,
+ tx_ring_struct);
req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
/* Association of transmit ring with completion ring */
grp_info = &bp->grp_info[ring->grp_idx];
- req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
req.length = cpu_to_le32(bp->tx_ring_mask + 1);
req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req.queue_id = cpu_to_le16(ring->queue_id);
break;
+ }
case HWRM_RING_ALLOC_RX:
req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ u16 flags = 0;
+
+ /* Association of rx ring with stats context */
+ grp_info = &bp->grp_info[ring->grp_idx];
+ req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
+ req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req.enables |= cpu_to_le32(
+ RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ if (NET_IP_ALIGN == 2)
+ flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
+ req.flags = cpu_to_le16(flags);
+ }
break;
case HWRM_RING_ALLOC_AGG:
- req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ /* Association of agg ring with rx ring */
+ grp_info = &bp->grp_info[ring->grp_idx];
+ req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req.enables |= cpu_to_le32(
+ RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
+ RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ } else {
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ }
req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
break;
case HWRM_RING_ALLOC_CMPL:
req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ /* Association of cp ring with nq */
+ grp_info = &bp->grp_info[map_index];
+ req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req.cq_handle = cpu_to_le64(ring->handle);
+ req.enables |= cpu_to_le32(
+ RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
+ } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
+ req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ }
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
+ req.length = cpu_to_le32(bp->cp_ring_mask + 1);
if (bp->flags & BNXT_FLAG_USING_MSIX)
req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
break;
@@ -4393,22 +4848,67 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
return rc;
}
+static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
+ u32 map_idx, u32 xid)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_PF(bp))
+ db->doorbell = bp->bar1 + 0x10000;
+ else
+ db->doorbell = bp->bar1 + 0x4000;
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ case HWRM_RING_ALLOC_AGG:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ }
+ db->db_key64 |= (u64)xid << DBR_XID_SFT;
+ } else {
+ db->doorbell = bp->bar1 + map_idx * 0x80;
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_key32 = DB_KEY_TX;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ case HWRM_RING_ALLOC_AGG:
+ db->db_key32 = DB_KEY_RX;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ db->db_key32 = DB_KEY_CP;
+ break;
+ }
+ }
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
int i, rc = 0;
+ u32 type;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ type = HWRM_RING_ALLOC_NQ;
+ else
+ type = HWRM_RING_ALLOC_CMPL;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
u32 map_idx = ring->map_idx;
- cpr->cp_doorbell = bp->bar1 + map_idx * 0x80;
- rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL,
- map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
goto err_out;
- BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
if (!i) {
@@ -4418,33 +4918,69 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
}
+ type = HWRM_RING_ALLOC_TX;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
- u32 map_idx = i;
-
- rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
- map_idx);
+ struct bnxt_ring_struct *ring;
+ u32 map_idx;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_napi *bnapi = txr->bnapi;
+ struct bnxt_cp_ring_info *cpr, *cpr2;
+ u32 type2 = HWRM_RING_ALLOC_CMPL;
+
+ cpr = &bnapi->cp_ring;
+ cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
+ ring = &cpr2->cp_ring_struct;
+ ring->handle = BNXT_TX_HDL;
+ map_idx = bnapi->index;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ if (rc)
+ goto err_out;
+ bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
+ }
+ ring = &txr->tx_ring_struct;
+ map_idx = i;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
goto err_out;
- txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
+ bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
+ type = HWRM_RING_ALLOC_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- u32 map_idx = rxr->bnapi->index;
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ u32 map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
- map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
goto err_out;
- rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
- writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ u32 type2 = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_cp_ring_info *cpr2;
+
+ cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
+ ring = &cpr2->cp_ring_struct;
+ ring->handle = BNXT_RX_HDL;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ if (rc)
+ goto err_out;
+ bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
+ }
}
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ type = HWRM_RING_ALLOC_AGG;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring =
@@ -4452,15 +4988,13 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
u32 grp_idx = ring->grp_idx;
u32 map_idx = grp_idx + bp->rx_nr_rings;
- rc = hwrm_ring_alloc_send_msg(bp, ring,
- HWRM_RING_ALLOC_AGG,
- map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
if (rc)
goto err_out;
- rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
- writel(DB_KEY_RX | rxr->rx_agg_prod,
- rxr->rx_agg_doorbell);
+ bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
}
}
@@ -4496,6 +5030,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
+ u32 type;
int i;
if (!bp->bnapi)
@@ -4504,9 +5039,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
- u32 grp_idx = txr->bnapi->index;
- u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
+ u32 cmpl_ring_id;
+ cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_TX,
@@ -4520,8 +5055,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
u32 grp_idx = rxr->bnapi->index;
- u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
+ u32 cmpl_ring_id;
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring,
RING_FREE_REQ_RING_TYPE_RX,
@@ -4533,15 +5069,19 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ type = RING_FREE_REQ_RING_TYPE_RX_AGG;
+ else
+ type = RING_FREE_REQ_RING_TYPE_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
u32 grp_idx = rxr->bnapi->index;
- u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
+ u32 cmpl_ring_id;
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_RX,
+ hwrm_ring_free_send_msg(bp, ring, type,
close_path ? cmpl_ring_id :
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
@@ -4556,14 +5096,32 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
*/
bnxt_disable_int_sync(bp);
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ type = RING_FREE_REQ_RING_TYPE_NQ;
+ else
+ type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ struct bnxt_ring_struct *ring;
+ int j;
+
+ for (j = 0; j < 2; j++) {
+ struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ if (cpr2) {
+ ring = &cpr2->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ continue;
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+ ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ hwrm_ring_free_send_msg(bp, ring, type,
INVALID_HW_RING_ID);
ring->fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
@@ -4571,6 +5129,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
+
static int bnxt_hwrm_get_rings(struct bnxt *bp)
{
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
@@ -4601,6 +5162,22 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
cp = min_t(u16, cp, stats);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ int rx = hw_resc->resv_rx_rings;
+ int tx = hw_resc->resv_tx_rings;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx >>= 1;
+ if (cp < (rx + tx)) {
+ bnxt_trim_rings(bp, &rx, &tx, cp, false);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+ hw_resc->resv_rx_rings = rx;
+ hw_resc->resv_tx_rings = tx;
+ }
+ cp = le16_to_cpu(resp->alloc_msix);
+ hw_resc->resv_hw_ring_grps = rx;
+ }
hw_resc->resv_cp_rings = cp;
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -4626,6 +5203,8 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
return rc;
}
+static bool bnxt_rfs_supported(struct bnxt *bp);
+
static void
__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
int tx_rings, int rx_rings, int ring_grps,
@@ -4639,15 +5218,38 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->num_tx_rings = cpu_to_le16(tx_rings);
if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- enables |= ring_grps ?
- FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= tx_rings + ring_grps ?
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= rx_rings ?
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ } else {
+ enables |= cp_rings ?
+ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ }
+ enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
req->num_rx_rings = cpu_to_le16(rx_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
+ req->num_msix = cpu_to_le16(cp_rings);
+ req->num_rsscos_ctxs =
+ cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ } else {
+ req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ req->num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req->num_rsscos_ctxs = cpu_to_le16(1);
+ if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ bnxt_rfs_supported(bp))
+ req->num_rsscos_ctxs =
+ cpu_to_le16(ring_grps + 1);
+ }
req->num_stat_ctxs = req->num_cmpl_rings;
req->num_vnics = cpu_to_le16(vnics);
}
@@ -4664,16 +5266,33 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ enables |= tx_rings + ring_grps ?
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ } else {
+ enables |= cp_rings ?
+ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= ring_grps ?
+ FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ }
enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
+ req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req->num_tx_rings = cpu_to_le16(tx_rings);
req->num_rx_rings = cpu_to_le16(rx_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
+ req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ } else {
+ req->num_cmpl_rings = cpu_to_le16(cp_rings);
+ req->num_hw_ring_grps = cpu_to_le16(ring_grps);
+ req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
+ }
req->num_stat_ctxs = req->num_cmpl_rings;
req->num_vnics = cpu_to_le16(vnics);
@@ -4717,10 +5336,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics);
- req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS);
- req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
- req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
@@ -4766,20 +5381,19 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
return true;
- if (bp->flags & BNXT_FLAG_RFS)
+ if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
- hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))
+ hw_resc->resv_vnics != vnic ||
+ (hw_resc->resv_hw_ring_grps != grp &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5))))
return true;
return false;
}
-static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
- bool shared);
-
static int __bnxt_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -4795,7 +5409,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if (bp->flags & BNXT_FLAG_RFS)
+ if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
@@ -4858,9 +5472,11 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
- FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
- FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -4879,12 +5495,16 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, vnics);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
- if (BNXT_NEW_RM(bp))
+ if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
- FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
+ else
+ flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
+ }
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -4907,46 +5527,140 @@ static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
cp_rings, vnics);
}
-static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
+static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
+{
+ struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ struct hwrm_ring_aggint_qcaps_input req = {0};
+ int rc;
+
+ coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
+ coal_cap->num_cmpl_dma_aggr_max = 63;
+ coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
+ coal_cap->cmpl_aggr_dma_tmr_max = 65535;
+ coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
+ coal_cap->int_lat_tmr_min_max = 65535;
+ coal_cap->int_lat_tmr_max_max = 65535;
+ coal_cap->num_cmpl_aggr_int_max = 65535;
+ coal_cap->timer_units = 80;
+
+ if (bp->hwrm_spec_code < 0x10902)
+ return;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
+ coal_cap->nq_params = le32_to_cpu(resp->nq_params);
+ coal_cap->num_cmpl_dma_aggr_max =
+ le16_to_cpu(resp->num_cmpl_dma_aggr_max);
+ coal_cap->num_cmpl_dma_aggr_during_int_max =
+ le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
+ coal_cap->cmpl_aggr_dma_tmr_max =
+ le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
+ coal_cap->cmpl_aggr_dma_tmr_during_int_max =
+ le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
+ coal_cap->int_lat_tmr_min_max =
+ le16_to_cpu(resp->int_lat_tmr_min_max);
+ coal_cap->int_lat_tmr_max_max =
+ le16_to_cpu(resp->int_lat_tmr_max_max);
+ coal_cap->num_cmpl_aggr_int_max =
+ le16_to_cpu(resp->num_cmpl_aggr_int_max);
+ coal_cap->timer_units = le16_to_cpu(resp->timer_units);
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
+{
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+
+ return usec * 1000 / coal_cap->timer_units;
+}
+
+static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
+ struct bnxt_coal *hw_coal,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
- u16 val, tmr, max, flags;
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ u32 cmpl_params = coal_cap->cmpl_params;
+ u16 val, tmr, max, flags = 0;
max = hw_coal->bufs_per_record * 128;
if (hw_coal->budget)
max = hw_coal->bufs_per_record * hw_coal->budget;
+ max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
req->num_cmpl_aggr_int = cpu_to_le16(val);
- /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
- val = min_t(u16, val, 63);
+ val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
req->num_cmpl_dma_aggr = cpu_to_le16(val);
- /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
- val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63);
+ val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
+ coal_cap->num_cmpl_dma_aggr_during_int_max);
req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
- tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks);
- tmr = max_t(u16, tmr, 1);
+ tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
+ tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
req->int_lat_tmr_max = cpu_to_le16(tmr);
/* min timer set to 1/2 of interrupt timer */
- val = tmr / 2;
- req->int_lat_tmr_min = cpu_to_le16(val);
+ if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
+ val = tmr / 2;
+ val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
+ req->int_lat_tmr_min = cpu_to_le16(val);
+ req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
+ }
/* buf timer set to 1/4 of interrupt timer */
- val = max_t(u16, tmr / 4, 1);
+ val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
- tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq);
- tmr = max_t(u16, tmr, 1);
- req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
+ if (cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
+ tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
+ val = clamp_t(u16, tmr, 1,
+ coal_cap->cmpl_aggr_dma_tmr_during_int_max);
+ req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
+ req->enables |=
+ cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
+ }
- flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
- if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
+ if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
+ hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
req->flags = cpu_to_le16(flags);
+ req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
+}
+
+/* Caller holds bp->hwrm_cmd_lock */
+static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct bnxt_coal *hw_coal)
+{
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ u32 nq_params = coal_cap->nq_params;
+ u16 tmr;
+
+ if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
+ -1, -1);
+ req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
+ req.flags =
+ cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
+
+ tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
+ tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
+ req.int_lat_tmr_min = cpu_to_le16(tmr);
+ req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
+ return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
@@ -4954,7 +5668,6 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_coal coal;
- unsigned int grp_idx;
/* Tick values in micro seconds.
* 1 coal_buf x bufs_per_record = 1 completion record.
@@ -4970,10 +5683,9 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
- bnxt_hwrm_set_coal_params(&coal, &req_rx);
+ bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
- grp_idx = bnapi->index;
- req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+ req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
HWRM_CMD_TIMEOUT);
@@ -4990,22 +5702,46 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
- bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx);
- bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx);
+ bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
+ bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_coal *hw_coal;
+ u16 ring_id;
req = &req_rx;
- if (!bnapi->rx_ring)
+ if (!bnapi->rx_ring) {
+ ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
req = &req_tx;
- req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+ } else {
+ ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
+ }
+ req->ring_id = cpu_to_le16(ring_id);
rc = _hwrm_send_message(bp, req, sizeof(*req),
HWRM_CMD_TIMEOUT);
if (rc)
break;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ continue;
+
+ if (bnapi->rx_ring && bnapi->tx_ring) {
+ req = &req_tx;
+ ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
+ req->ring_id = cpu_to_le16(ring_id);
+ rc = _hwrm_send_message(bp, req, sizeof(*req),
+ HWRM_CMD_TIMEOUT);
+ if (rc)
+ break;
+ }
+ if (bnapi->rx_ring)
+ hw_coal = &bp->rx_coal;
+ else
+ hw_coal = &bp->tx_coal;
+ __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -5132,6 +5868,304 @@ func_qcfg_exit:
return rc;
}
+static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
+{
+ struct hwrm_func_backing_store_qcaps_input req = {0};
+ struct hwrm_func_backing_store_qcaps_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_info *ctx;
+ int i;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
+ if (!ctx_pg) {
+ kfree(ctx);
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
+ ctx->tqm_mem[i] = ctx_pg;
+
+ bp->ctx = ctx;
+ ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
+ ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
+ ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
+ ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
+ ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
+ ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
+ ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
+ ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
+ ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
+ ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
+ ctx->vnic_max_vnic_entries =
+ le16_to_cpu(resp->vnic_max_vnic_entries);
+ ctx->vnic_max_ring_table_entries =
+ le16_to_cpu(resp->vnic_max_ring_table_entries);
+ ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
+ ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
+ ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
+ ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
+ ctx->tqm_min_entries_per_ring =
+ le32_to_cpu(resp->tqm_min_entries_per_ring);
+ ctx->tqm_max_entries_per_ring =
+ le32_to_cpu(resp->tqm_max_entries_per_ring);
+ ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
+ if (!ctx->tqm_entries_multiple)
+ ctx->tqm_entries_multiple = 1;
+ ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
+ ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
+ ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
+ ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
+ } else {
+ rc = 0;
+ }
+ctx_err:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
+ __le64 *pg_dir)
+{
+ u8 pg_size = 0;
+
+ if (BNXT_PAGE_SHIFT == 13)
+ pg_size = 1 << 4;
+ else if (BNXT_PAGE_SIZE == 16)
+ pg_size = 2 << 4;
+
+ *pg_attr = pg_size;
+ if (rmem->nr_pages > 1) {
+ *pg_attr |= 1;
+ *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
+ } else {
+ *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
+ }
+}
+
+#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
+ (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
+ FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
+
+static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
+{
+ struct hwrm_func_backing_store_cfg_input req = {0};
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ struct bnxt_ctx_pg_info *ctx_pg;
+ __le32 *num_entries;
+ __le64 *pg_dir;
+ u8 *pg_attr;
+ int i, rc;
+ u32 ena;
+
+ if (!ctx)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
+ req.enables = cpu_to_le32(enables);
+
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
+ ctx_pg = &ctx->qp_mem;
+ req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
+ req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
+ req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.qpc_pg_size_qpc_lvl,
+ &req.qpc_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
+ ctx_pg = &ctx->srq_mem;
+ req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
+ req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.srq_pg_size_srq_lvl,
+ &req.srq_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
+ ctx_pg = &ctx->cq_mem;
+ req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
+ req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
+ &req.cq_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
+ ctx_pg = &ctx->vnic_mem;
+ req.vnic_num_vnic_entries =
+ cpu_to_le16(ctx->vnic_max_vnic_entries);
+ req.vnic_num_ring_table_entries =
+ cpu_to_le16(ctx->vnic_max_ring_table_entries);
+ req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.vnic_pg_size_vnic_lvl,
+ &req.vnic_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
+ ctx_pg = &ctx->stat_mem;
+ req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
+ req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.stat_pg_size_stat_lvl,
+ &req.stat_page_dir);
+ }
+ for (i = 0, num_entries = &req.tqm_sp_num_entries,
+ pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
+ pg_dir = &req.tqm_sp_page_dir,
+ ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
+ i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ if (!(enables & ena))
+ continue;
+
+ req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
+ ctx_pg = ctx->tqm_mem[i];
+ *num_entries = cpu_to_le32(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
+static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ if (!mem_size)
+ return 0;
+
+ rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
+ if (rmem->nr_pages > MAX_CTX_PAGES) {
+ rmem->nr_pages = 0;
+ return -EINVAL;
+ }
+ rmem->page_size = BNXT_PAGE_SIZE;
+ rmem->pg_arr = ctx_pg->ctx_pg_arr;
+ rmem->dma_arr = ctx_pg->ctx_dma_arr;
+ rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
+ return bnxt_alloc_ring(bp, rmem);
+}
+
+static void bnxt_free_ctx_mem(struct bnxt *bp)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ int i;
+
+ if (!ctx)
+ return;
+
+ if (ctx->tqm_mem[0]) {
+ for (i = 0; i < bp->max_q + 1; i++)
+ bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem);
+ kfree(ctx->tqm_mem[0]);
+ ctx->tqm_mem[0] = NULL;
+ }
+
+ bnxt_free_ring(bp, &ctx->stat_mem.ring_mem);
+ bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem);
+ bnxt_free_ring(bp, &ctx->cq_mem.ring_mem);
+ bnxt_free_ring(bp, &ctx->srq_mem.ring_mem);
+ bnxt_free_ring(bp, &ctx->qp_mem.ring_mem);
+ ctx->flags &= ~BNXT_CTX_FLAG_INITED;
+}
+
+static int bnxt_alloc_ctx_mem(struct bnxt *bp)
+{
+ struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_info *ctx;
+ u32 mem_size, ena, entries;
+ int i, rc;
+
+ rc = bnxt_hwrm_func_backing_store_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
+ rc);
+ return rc;
+ }
+ ctx = bp->ctx;
+ if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
+ return 0;
+
+ ctx_pg = &ctx->qp_mem;
+ ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
+ mem_size = ctx->qp_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->srq_mem;
+ ctx_pg->entries = ctx->srq_max_l2_entries;
+ mem_size = ctx->srq_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->cq_mem;
+ ctx_pg->entries = ctx->cq_max_l2_entries;
+ mem_size = ctx->cq_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->vnic_mem;
+ ctx_pg->entries = ctx->vnic_max_vnic_entries +
+ ctx->vnic_max_ring_table_entries;
+ mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ ctx_pg = &ctx->stat_mem;
+ ctx_pg->entries = ctx->stat_max_entries;
+ mem_size = ctx->stat_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+
+ entries = ctx->qp_max_l2_entries;
+ entries = roundup(entries, ctx->tqm_entries_multiple);
+ entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
+ ctx->tqm_max_entries_per_ring);
+ for (i = 0, ena = 0; i < bp->max_q + 1; i++) {
+ ctx_pg = ctx->tqm_mem[i];
+ ctx_pg->entries = entries;
+ mem_size = ctx->tqm_entry_size * entries;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
+ }
+ ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+ rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+ if (rc)
+ netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
+ rc);
+ else
+ ctx->flags |= BNXT_CTX_FLAG_INITED;
+
+ return 0;
+}
+
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
{
struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
@@ -5170,6 +6204,13 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ u16 max_msix = le16_to_cpu(resp->max_msix);
+
+ hw_resc->max_irqs = min_t(u16, hw_resc->max_irqs, max_msix);
+ hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
+ }
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -5259,6 +6300,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
return rc;
if (bp->hwrm_spec_code >= 0x10803) {
+ rc = bnxt_alloc_ctx_mem(bp);
+ if (rc)
+ return rc;
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
if (!rc)
bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
@@ -5303,13 +6347,15 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
qptr = &resp->queue_id0;
for (i = 0, j = 0; i < bp->max_tc; i++) {
- bp->q_info[j].queue_id = *qptr++;
+ bp->q_info[j].queue_id = *qptr;
+ bp->q_ids[i] = *qptr++;
bp->q_info[j].queue_profile = *qptr++;
bp->tc_to_qidx[j] = j;
if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
(no_rdma && BNXT_PF(bp)))
j++;
}
+ bp->max_q = bp->max_tc;
bp->max_tc = max_t(u8, j, 1);
if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
@@ -5359,8 +6405,12 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
- if (resp->hwrm_intf_maj_8b >= 1)
+ if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+ bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
+ }
+ if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
+ bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
bp->chip_num = le16_to_cpu(resp->chip_num);
if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
@@ -5417,8 +6467,10 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
{
+ struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_port_qstats_ext_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
return 0;
@@ -5427,7 +6479,19 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
req.port_id = cpu_to_le16(pf->port_id);
req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
+ req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
+ bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
+ } else {
+ bp->fw_rx_stats_ext_size = 0;
+ bp->fw_tx_stats_ext_size = 0;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
}
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
@@ -5532,7 +6596,7 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
return rc;
}
-static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc;
@@ -5588,6 +6652,53 @@ vnic_setup_err:
return rc;
}
+static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
+{
+ int rc, i, nr_ctxs;
+
+ nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
+ for (i = 0; i < nr_ctxs; i++) {
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
+ vnic_id, i, rc);
+ break;
+ }
+ bp->rsscos_nr_ctxs++;
+ }
+ if (i < nr_ctxs)
+ return -ENOMEM;
+
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic_id, rc);
+ return rc;
+ }
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+ vnic_id, rc);
+ return rc;
+ }
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
+ vnic_id, rc);
+ }
+ }
+ return rc;
+}
+
+static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return __bnxt_setup_vnic_p5(bp, vnic_id);
+ else
+ return __bnxt_setup_vnic(bp, vnic_id);
+}
+
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
#ifdef CONFIG_RFS_ACCEL
@@ -6206,12 +7317,15 @@ static void bnxt_init_napi(struct bnxt *bp)
struct bnxt_napi *bnapi;
if (bp->flags & BNXT_FLAG_USING_MSIX) {
- if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ poll_fn = bnxt_poll_p5;
+ else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
@@ -6968,10 +8082,10 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
netdev_err(bp->dev, "Failed to reserve default rings at open\n");
return rc;
}
- rc = bnxt_reserve_rings(bp);
- if (rc)
- return rc;
}
+ rc = bnxt_reserve_rings(bp);
+ if (rc)
+ return rc;
if ((bp->flags & BNXT_FLAG_RFS) &&
!(bp->flags & BNXT_FLAG_USING_MSIX)) {
/* disable RFS if falling back to INTA */
@@ -7443,6 +8557,8 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
/* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp)
{
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
@@ -7456,6 +8572,8 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
int vnics, max_vnics, max_rss_ctxs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return false;
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
return false;
@@ -7672,21 +8790,6 @@ static void bnxt_tx_timeout(struct net_device *dev)
bnxt_queue_sp_work(bp);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void bnxt_poll_controller(struct net_device *dev)
-{
- struct bnxt *bp = netdev_priv(dev);
- int i;
-
- /* Only process tx rings/combined rings in netpoll mode. */
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
-
- napi_schedule(&txr->bnapi->napi);
- }
-}
-#endif
-
static void bnxt_timer(struct timer_list *t)
{
struct bnxt *bp = from_timer(bp, t, timer);
@@ -7991,6 +9094,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
INIT_WORK(&bp->sp_task, bnxt_sp_task);
spin_lock_init(&bp->ntp_fltr_lock);
+#if BITS_PER_LONG == 32
+ spin_lock_init(&bp->db_lock);
+#endif
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
@@ -8027,7 +9133,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
if (ether_addr_equal(addr->sa_data, dev->dev_addr))
return 0;
- rc = bnxt_approve_mac(bp, addr->sa_data);
+ rc = bnxt_approve_mac(bp, addr->sa_data, true);
if (rc)
return rc;
@@ -8520,9 +9626,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
.ndo_set_vf_trust = bnxt_set_vf_trust,
#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = bnxt_poll_controller,
-#endif
.ndo_setup_tc = bnxt_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
@@ -8559,6 +9662,9 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
bnxt_cleanup_pci(bp);
free_netdev(dev);
}
@@ -8632,7 +9738,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_tx = hw_resc->max_tx_rings;
*max_rx = hw_resc->max_rx_rings;
*max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
- hw_resc->max_irqs);
+ hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -8827,14 +9933,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
+ bool strict_approval = true;
if (is_valid_ether_addr(vf->mac_addr)) {
/* overwrite netdev dev_addr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ /* Older PF driver or firmware may not approve this
+ * correctly.
+ */
+ strict_approval = false;
} else {
eth_hw_addr_random(bp->dev);
}
- rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
#endif
}
return rc;
@@ -8859,6 +9970,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
bp = netdev_priv(dev);
+ bnxt_set_max_func_irqs(bp, max_irqs);
if (bnxt_vf_pciid(ent->driver_data))
bp->flags |= BNXT_FLAG_VF;
@@ -8885,12 +9997,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- if (bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) {
+ if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
+ bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
rc = bnxt_alloc_hwrm_short_cmd_req(bp);
if (rc)
goto init_err_pci_clean;
}
+ if (BNXT_CHIP_P5(bp))
+ bp->flags |= BNXT_FLAG_CHIP_P5;
+
rc = bnxt_hwrm_func_reset(bp);
if (rc)
goto init_err_pci_clean;
@@ -8905,7 +10021,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_GRO;
- if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_LRO;
dev->hw_enc_features =
@@ -8919,7 +10035,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
- if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_GRO_HW;
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
if (dev->features & NETIF_F_GRO_HW)
@@ -8930,10 +10046,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
init_waitqueue_head(&bp->sriov_cfg_wait);
mutex_init(&bp->sriov_lock);
#endif
- bp->gro_func = bnxt_gro_func_5730x;
- if (BNXT_CHIP_P4_PLUS(bp))
- bp->gro_func = bnxt_gro_func_5731x;
- else
+ if (BNXT_SUPPORTS_TPA(bp)) {
+ bp->gro_func = bnxt_gro_func_5730x;
+ if (BNXT_CHIP_P4(bp))
+ bp->gro_func = bnxt_gro_func_5731x;
+ }
+ if (!BNXT_CHIP_P4_PLUS(bp))
bp->flags |= BNXT_FLAG_DOUBLE_DB;
rc = bnxt_hwrm_func_drv_rgtr(bp);
@@ -8946,6 +10064,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->ulp_probe = bnxt_ulp_probe;
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
+ rc);
+ rc = -1;
+ goto init_err_pci_clean;
+ }
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
@@ -8960,13 +10085,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -EADDRNOTAVAIL;
goto init_err_pci_clean;
}
- rc = bnxt_hwrm_queue_qportcfg(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
- rc);
- rc = -1;
- goto init_err_pci_clean;
- }
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_port_led_qcaps(bp);
@@ -8984,7 +10102,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
- bnxt_set_max_func_irqs(bp, max_irqs);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
netdev_err(bp->dev, "Not enough rings available.\n");
@@ -8997,7 +10114,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
+ if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
@@ -9032,6 +10149,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
+ bnxt_hwrm_coal_params_qcaps(bp);
+
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
@@ -9063,6 +10182,10 @@ init_err_cleanup_tc:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
+ bnxt_free_hwrm_resources(bp);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
bnxt_cleanup_pci(bp);
init_err_free:
@@ -9231,13 +10354,6 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err); /* non-fatal, continue */
- }
-
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index bde384630a75..498b373c992d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,11 +12,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.9.2"
+#define DRV_MODULE_VERSION "1.10.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 9
-#define DRV_VER_UPD 2
+#define DRV_VER_MIN 10
+#define DRV_VER_UPD 0
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
@@ -403,6 +403,19 @@ struct rx_tpa_end_cmp_ext {
((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+struct nqe_cn {
+ __le16 type;
+ #define NQ_CN_TYPE_MASK 0x3fUL
+ #define NQ_CN_TYPE_SFT 0
+ #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL
+ #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION
+ __le16 reserved16;
+ __le32 cq_handle_low;
+ __le32 v;
+ #define NQ_CN_V 0x1UL
+ __le32 cq_handle_high;
+};
+
#define DB_IDX_MASK 0xffffff
#define DB_IDX_VALID (0x1 << 26)
#define DB_IRQ_DIS (0x1 << 27)
@@ -416,6 +429,25 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
+/* 64-bit doorbell */
+#define DBR_INDEX_MASK 0x0000000000ffffffULL
+#define DBR_XID_MASK 0x000fffff00000000ULL
+#define DBR_XID_SFT 32
+#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_TYPE_SQ (0x0ULL << 60)
+#define DBR_TYPE_RQ (0x1ULL << 60)
+#define DBR_TYPE_SRQ (0x2ULL << 60)
+#define DBR_TYPE_SRQ_ARM (0x3ULL << 60)
+#define DBR_TYPE_CQ (0x4ULL << 60)
+#define DBR_TYPE_CQ_ARMSE (0x5ULL << 60)
+#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60)
+#define DBR_TYPE_CQ_ARMENA (0x7ULL << 60)
+#define DBR_TYPE_SRQ_ARMENA (0x8ULL << 60)
+#define DBR_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60)
+#define DBR_TYPE_NQ (0xaULL << 60)
+#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_NULL (0xfULL << 60)
+
#define INVALID_HW_RING_ID ((u16)-1)
/* The hardware supports certain page sizes. Use the supported page sizes
@@ -505,6 +537,9 @@ struct rx_tpa_end_cmp_ext {
(!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
!((raw_cons) & bp->cp_bit))
+#define NQ_CMP_VALID(nqcmp, raw_cons) \
+ (!!((nqcmp)->v & cpu_to_le32(NQ_CN_V)) == !((raw_cons) & bp->cp_bit))
+
#define TX_CMP_TYPE(txcmp) \
(le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
@@ -577,9 +612,13 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping;
};
-struct bnxt_ring_struct {
+struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
+ u32 flags;
+#define BNXT_RMEM_VALID_PTE_FLAG 1
+#define BNXT_RMEM_RING_PTE_FLAG 2
+
void **pg_arr;
dma_addr_t *dma_arr;
@@ -588,12 +627,17 @@ struct bnxt_ring_struct {
int vmem_size;
void **vmem;
+};
+
+struct bnxt_ring_struct {
+ struct bnxt_ring_mem_info ring_mem;
u16 fw_ring_id; /* Ring id filled by Chimp FW */
union {
u16 grp_idx;
u16 map_idx; /* Used by cmpl rings */
};
+ u32 handle;
u8 queue_id;
};
@@ -609,12 +653,20 @@ struct tx_push_buffer {
u32 data[25];
};
+struct bnxt_db_info {
+ void __iomem *doorbell;
+ union {
+ u64 db_key64;
+ u32 db_key32;
+ };
+};
+
struct bnxt_tx_ring_info {
struct bnxt_napi *bnapi;
u16 tx_prod;
u16 tx_cons;
u16 txq_index;
- void __iomem *tx_doorbell;
+ struct bnxt_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
struct bnxt_sw_tx_bd *tx_buf_ring;
@@ -631,6 +683,42 @@ struct bnxt_tx_ring_info {
struct bnxt_ring_struct tx_ring_struct;
};
+#define BNXT_LEGACY_COAL_CMPL_PARAMS \
+ (RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT | \
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT)
+
+#define BNXT_COAL_CMPL_ENABLES \
+ (RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR | \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR | \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX | \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT)
+
+#define BNXT_COAL_CMPL_MIN_TMR_ENABLE \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN
+
+#define BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE \
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT
+
+struct bnxt_coal_cap {
+ u32 cmpl_params;
+ u32 nq_params;
+ u16 num_cmpl_dma_aggr_max;
+ u16 num_cmpl_dma_aggr_during_int_max;
+ u16 cmpl_aggr_dma_tmr_max;
+ u16 cmpl_aggr_dma_tmr_during_int_max;
+ u16 int_lat_tmr_min_max;
+ u16 int_lat_tmr_max_max;
+ u16 num_cmpl_aggr_int_max;
+ u16 timer_units;
+};
+
struct bnxt_coal {
u16 coal_ticks;
u16 coal_ticks_irq;
@@ -675,8 +763,8 @@ struct bnxt_rx_ring_info {
u16 rx_agg_prod;
u16 rx_sw_agg_prod;
u16 rx_next_cons;
- void __iomem *rx_doorbell;
- void __iomem *rx_agg_doorbell;
+ struct bnxt_db_info rx_db;
+ struct bnxt_db_info rx_agg_db;
struct bpf_prog *xdp_prog;
@@ -703,8 +791,12 @@ struct bnxt_rx_ring_info {
};
struct bnxt_cp_ring_info {
+ struct bnxt_napi *bnapi;
u32 cp_raw_cons;
- void __iomem *cp_doorbell;
+ struct bnxt_db_info cp_db;
+
+ u8 had_work_done:1;
+ u8 has_more_work:1;
struct bnxt_coal rx_ring_coal;
u64 rx_packets;
@@ -713,7 +805,10 @@ struct bnxt_cp_ring_info {
struct net_dim dim;
- struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
+ union {
+ struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
+ struct nqe_cn *nq_desc_ring[MAX_CP_PAGES];
+ };
dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
@@ -723,6 +818,10 @@ struct bnxt_cp_ring_info {
u64 rx_l4_csum_errors;
struct bnxt_ring_struct cp_ring_struct;
+
+ struct bnxt_cp_ring_info *cp_ring_arr[2];
+#define BNXT_RX_HDL 0
+#define BNXT_TX_HDL 1
};
struct bnxt_napi {
@@ -736,6 +835,9 @@ struct bnxt_napi {
void (*tx_int)(struct bnxt *, struct bnxt_napi *,
int);
+ int tx_pkts;
+ u8 events;
+
u32 flags;
#define BNXT_NAPI_FLAG_XDP 0x1
@@ -755,6 +857,7 @@ struct bnxt_irq {
#define HWRM_RING_ALLOC_RX 0x2
#define HWRM_RING_ALLOC_AGG 0x4
#define HWRM_RING_ALLOC_CMPL 0x8
+#define HWRM_RING_ALLOC_NQ 0x10
#define INVALID_STATS_CTX_ID -1
@@ -768,7 +871,7 @@ struct bnxt_ring_grp_info {
struct bnxt_vnic_info {
u16 fw_vnic_id; /* returned by Chimp during alloc */
-#define BNXT_MAX_CTX_PER_VNIC 2
+#define BNXT_MAX_CTX_PER_VNIC 8
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
#define BNXT_MAX_UC_ADDRS 4
@@ -1069,6 +1172,55 @@ struct bnxt_vf_rep {
struct bnxt_vf_rep_stats tx_stats;
};
+#define PTU_PTE_VALID 0x1UL
+#define PTU_PTE_LAST 0x2UL
+#define PTU_PTE_NEXT_TO_LAST 0x4UL
+
+#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
+
+struct bnxt_ctx_pg_info {
+ u32 entries;
+ void *ctx_pg_arr[MAX_CTX_PAGES];
+ dma_addr_t ctx_dma_arr[MAX_CTX_PAGES];
+ struct bnxt_ring_mem_info ring_mem;
+};
+
+struct bnxt_ctx_mem_info {
+ u32 qp_max_entries;
+ u16 qp_min_qp1_entries;
+ u16 qp_max_l2_entries;
+ u16 qp_entry_size;
+ u16 srq_max_l2_entries;
+ u32 srq_max_entries;
+ u16 srq_entry_size;
+ u16 cq_max_l2_entries;
+ u32 cq_max_entries;
+ u16 cq_entry_size;
+ u16 vnic_max_vnic_entries;
+ u16 vnic_max_ring_table_entries;
+ u16 vnic_entry_size;
+ u32 stat_max_entries;
+ u16 stat_entry_size;
+ u16 tqm_entry_size;
+ u32 tqm_min_entries_per_ring;
+ u32 tqm_max_entries_per_ring;
+ u32 mrav_max_entries;
+ u16 mrav_entry_size;
+ u16 tim_entry_size;
+ u32 tim_max_entries;
+ u8 tqm_entries_multiple;
+
+ u32 flags;
+ #define BNXT_CTX_FLAG_INITED 0x01
+
+ struct bnxt_ctx_pg_info qp_mem;
+ struct bnxt_ctx_pg_info srq_mem;
+ struct bnxt_ctx_pg_info cq_mem;
+ struct bnxt_ctx_pg_info vnic_mem;
+ struct bnxt_ctx_pg_info stat_mem;
+ struct bnxt_ctx_pg_info *tqm_mem[9];
+};
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -1098,6 +1250,8 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730
+#define CHIP_NUM_57500 0x1750
+
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804
#define CHIP_NUM_58808 0xd808
@@ -1144,6 +1298,7 @@ struct bnxt {
atomic_t intr_sem;
u32 flags;
+ #define BNXT_FLAG_CHIP_P5 0x1
#define BNXT_FLAG_VF 0x2
#define BNXT_FLAG_LRO 0x4
#ifdef CONFIG_INET
@@ -1190,15 +1345,24 @@ struct bnxt {
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
+#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
+ !(bp->flags & BNXT_FLAG_CHIP_P5))
-/* Chip class phase 4 and later */
-#define BNXT_CHIP_P4_PLUS(bp) \
+/* Chip class phase 5 */
+#define BNXT_CHIP_P5(bp) \
+ ((bp)->chip_num == CHIP_NUM_57500)
+
+/* Chip class phase 4.x */
+#define BNXT_CHIP_P4(bp) \
(BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \
BNXT_CHIP_NUM_5745X((bp)->chip_num) || \
BNXT_CHIP_NUM_588XX((bp)->chip_num) || \
(BNXT_CHIP_NUM_58700((bp)->chip_num) && \
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
+#define BNXT_CHIP_P4_PLUS(bp) \
+ (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+
struct bnxt_en_dev *edev;
struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
@@ -1261,6 +1425,8 @@ struct bnxt {
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
u8 tc_to_qidx[BNXT_MAX_QUEUE];
+ u8 q_ids[BNXT_MAX_QUEUE];
+ u8 max_q;
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -1305,12 +1471,17 @@ struct bnxt {
struct rx_port_stats *hw_rx_port_stats;
struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext;
+ struct tx_port_stats_ext *hw_tx_port_stats_ext;
dma_addr_t hw_rx_port_stats_map;
dma_addr_t hw_tx_port_stats_map;
dma_addr_t hw_rx_port_stats_ext_map;
+ dma_addr_t hw_tx_port_stats_ext_map;
int hw_port_stats_size;
+ u16 fw_rx_stats_ext_size;
+ u16 fw_tx_stats_ext_size;
u16 hwrm_max_req_len;
+ u16 hwrm_max_ext_req_len;
int hwrm_cmd_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
@@ -1328,11 +1499,10 @@ struct bnxt {
u8 port_count;
u16 br_mode;
+ struct bnxt_coal_cap coal_cap;
struct bnxt_coal rx_coal;
struct bnxt_coal tx_coal;
-#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
-
u32 stats_coal_ticks;
#define BNXT_DEF_STATS_COAL_TICKS 1000000
#define BNXT_MIN_STATS_COAL_TICKS 250000
@@ -1360,6 +1530,7 @@ struct bnxt {
struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
+ struct bnxt_ctx_mem_info *ctx;
#ifdef CONFIG_BNXT_SRIOV
int nr_vfs;
struct bnxt_vf_info vf;
@@ -1374,6 +1545,11 @@ struct bnxt {
struct mutex sriov_lock;
#endif
+#if BITS_PER_LONG == 32
+ /* ensure atomic 64-bit doorbell writes on 32-bit systems. */
+ spinlock_t db_lock;
+#endif
+
#define BNXT_NTP_FLTR_MAX_FLTR 4096
#define BNXT_NTP_FLTR_HASH_SIZE 512
#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
@@ -1425,6 +1601,9 @@ struct bnxt {
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
+#define BNXT_TX_STATS_EXT_OFFSET(counter) \
+ (offsetof(struct tx_port_stats_ext, counter) / 8)
+
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
#define SFF_DIAG_SUPPORT_OFFSET 0x5c
@@ -1443,21 +1622,46 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
+#if BITS_PER_LONG == 32
+#define writeq(val64, db) \
+do { \
+ spin_lock(&bp->db_lock); \
+ writel((val64) & 0xffffffff, db); \
+ writel((val64) >> 32, (db) + 4); \
+ spin_unlock(&bp->db_lock); \
+} while (0)
+
+#define writeq_relaxed writeq
+#endif
+
/* For TX and RX ring doorbells with no ordering guarantee*/
-static inline void bnxt_db_write_relaxed(struct bnxt *bp, void __iomem *db,
- u32 val)
+static inline void bnxt_db_write_relaxed(struct bnxt *bp,
+ struct bnxt_db_info *db, u32 idx)
{
- writel_relaxed(val, db);
- if (bp->flags & BNXT_FLAG_DOUBLE_DB)
- writel_relaxed(val, db);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ writeq_relaxed(db->db_key64 | idx, db->doorbell);
+ } else {
+ u32 db_val = db->db_key32 | idx;
+
+ writel_relaxed(db_val, db->doorbell);
+ if (bp->flags & BNXT_FLAG_DOUBLE_DB)
+ writel_relaxed(db_val, db->doorbell);
+ }
}
/* For TX and RX ring doorbells */
-static inline void bnxt_db_write(struct bnxt *bp, void __iomem *db, u32 val)
+static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
+ u32 idx)
{
- writel(val, db);
- if (bp->flags & BNXT_FLAG_DOUBLE_DB)
- writel(val, db);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ writeq(db->db_key64 | idx, db->doorbell);
+ } else {
+ u32 db_val = db->db_key32 | idx;
+
+ writel(db_val, db->doorbell);
+ if (bp->flags & BNXT_FLAG_DOUBLE_DB)
+ writel(db_val, db->doorbell);
+ }
}
extern const u16 bnxt_lhint_arr[];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index ddc98c359488..a85d2be986af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -98,13 +98,13 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
for (i = 0; i < max_tc; i++) {
- u8 qidx;
+ u8 qidx = bp->tc_to_qidx[i];
req.enables |= cpu_to_le32(
- QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
+ QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
+ qidx);
memset(&cos2bw, 0, sizeof(cos2bw));
- qidx = bp->tc_to_qidx[i];
cos2bw.queue_id = bp->q_info[qidx].queue_id;
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
cos2bw.tsa =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index f3b9fbcc705b..140dbd62106d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -21,9 +21,22 @@ static const struct devlink_ops bnxt_dl_ops = {
#endif /* CONFIG_BNXT_SRIOV */
};
+enum bnxt_dl_param_id {
+ BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
+};
+
static const struct bnxt_dl_nvm_param nvm_params[] = {
{DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
BNXT_NVM_SHARED_CFG, 1},
+ {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI,
+ BNXT_NVM_SHARED_CFG, 1},
+ {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10},
+ {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7},
+ {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
+ BNXT_NVM_SHARED_CFG, 1},
};
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
@@ -46,14 +59,31 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
}
+ if (i == ARRAY_SIZE(nvm_params))
+ return -EOPNOTSUPP;
+
if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
- if (nvm_param.num_bits == 1)
- buf = &val->vbool;
+ switch (bytesize) {
+ case 1:
+ if (nvm_param.num_bits == 1)
+ buf = &val->vbool;
+ else
+ buf = &val->vu8;
+ break;
+ case 2:
+ buf = &val->vu16;
+ break;
+ case 4:
+ buf = &val->vu32;
+ break;
+ default:
+ return -EFAULT;
+ }
data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize,
&data_dma_addr, GFP_KERNEL);
@@ -75,8 +105,12 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
memcpy(buf, data_addr, bytesize);
dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
- if (rc)
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+ netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
+ return -EACCES;
+ } else if (rc) {
return -EIO;
+ }
return 0;
}
@@ -85,9 +119,15 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
{
struct hwrm_nvm_get_variable_input req = {0};
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
- return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+ rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
+ if (!rc)
+ if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
+ ctx->val.vbool = !ctx->val.vbool;
+
+ return rc;
}
static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
@@ -97,14 +137,55 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
+
+ if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
+ ctx->val.vbool = !ctx->val.vbool;
+
return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
}
+static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ int max_val = -1;
+
+ if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX)
+ max_val = BNXT_MSIX_VEC_MAX;
+
+ if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN)
+ max_val = BNXT_MSIX_VEC_MIN_MAX;
+
+ if (val.vu32 > max_val) {
+ NL_SET_ERR_MSG_MOD(extack, "MSIX value is exceeding the range");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct devlink_param bnxt_dl_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
NULL),
+ DEVLINK_PARAM_GENERIC(IGNORE_ARI,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ NULL),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_msix_validate),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_msix_validate),
+ DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
+ "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ NULL),
};
int bnxt_dl_register(struct bnxt *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 2f68dc048390..5b6b2c7d97cf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -33,8 +33,15 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
}
}
+#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
+#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
+#define NVM_OFF_IGNORE_ARI 164
+#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define BNXT_MSIX_VEC_MAX 1280
+#define BNXT_MSIX_VEC_MIN_MAX 128
+
enum bnxt_nvm_dir_type {
BNXT_NVM_SHARED_CFG = 40,
BNXT_NVM_PORT_CFG,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index e52d7af3ab3e..48078564f025 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -148,6 +148,65 @@ reset_coalesce:
#define BNXT_RX_STATS_EXT_ENTRY(counter) \
{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
+#define BNXT_TX_STATS_EXT_ENTRY(counter) \
+ { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
+
+#define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \
+ BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \
+ BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
+
+#define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \
+ BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \
+ BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
+
+#define BNXT_RX_STATS_EXT_PFC_ENTRIES \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(0), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(1), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(2), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(3), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(4), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(5), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(6), \
+ BNXT_RX_STATS_EXT_PFC_ENTRY(7)
+
+#define BNXT_TX_STATS_EXT_PFC_ENTRIES \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(0), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(1), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(2), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(3), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(4), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(5), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(6), \
+ BNXT_TX_STATS_EXT_PFC_ENTRY(7)
+
+#define BNXT_RX_STATS_EXT_COS_ENTRY(n) \
+ BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \
+ BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
+
+#define BNXT_TX_STATS_EXT_COS_ENTRY(n) \
+ BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \
+ BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
+
+#define BNXT_RX_STATS_EXT_COS_ENTRIES \
+ BNXT_RX_STATS_EXT_COS_ENTRY(0), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(1), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(2), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(3), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(4), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(5), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(6), \
+ BNXT_RX_STATS_EXT_COS_ENTRY(7) \
+
+#define BNXT_TX_STATS_EXT_COS_ENTRIES \
+ BNXT_TX_STATS_EXT_COS_ENTRY(0), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(1), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(2), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(3), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(4), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(5), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(6), \
+ BNXT_TX_STATS_EXT_COS_ENTRY(7) \
+
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
@@ -256,11 +315,20 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
+ BNXT_RX_STATS_EXT_COS_ENTRIES,
+ BNXT_RX_STATS_EXT_PFC_ENTRIES,
+};
+
+static const struct {
+ long offset;
+ char string[ETH_GSTRING_LEN];
+} bnxt_tx_port_stats_ext_arr[] = {
+ BNXT_TX_STATS_EXT_COS_ENTRIES,
+ BNXT_TX_STATS_EXT_PFC_ENTRIES,
};
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
-#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr)
static int bnxt_get_num_stats(struct bnxt *bp)
{
@@ -272,7 +340,8 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_PORT_STATS;
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT)
- num_stats += BNXT_NUM_PORT_STATS_EXT;
+ num_stats += bp->fw_rx_stats_ext_size +
+ bp->fw_tx_stats_ext_size;
return num_stats;
}
@@ -334,12 +403,17 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
}
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- __le64 *port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
+ __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
+ __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext;
- for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++, j++) {
- buf[j] = le64_to_cpu(*(port_stats_ext +
+ for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
+ buf[j] = le64_to_cpu(*(rx_port_stats_ext +
bnxt_port_stats_ext_arr[i].offset));
}
+ for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
+ buf[j] = le64_to_cpu(*(tx_port_stats_ext +
+ bnxt_tx_port_stats_ext_arr[i].offset));
+ }
}
}
@@ -407,10 +481,15 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++) {
+ for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
strcpy(buf, bnxt_port_stats_ext_arr[i].string);
buf += ETH_GSTRING_LEN;
}
+ for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
+ strcpy(buf,
+ bnxt_tx_port_stats_ext_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
}
break;
case ETH_SS_TEST:
@@ -2419,11 +2498,11 @@ static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
+static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 raw_cons, int pkt_size)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ struct bnxt_rx_ring_info *rxr;
struct bnxt_sw_rx_bd *rx_buf;
struct rx_cmp *rxcmp;
u16 cp_cons, cons;
@@ -2431,6 +2510,7 @@ static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
u32 len;
int i;
+ rxr = bnapi->rx_ring;
cp_cons = RING_CMP(raw_cons);
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
@@ -2451,17 +2531,15 @@ static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
return 0;
}
-static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
+static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ int pkt_size)
{
- struct bnxt_napi *bnapi = bp->bnapi[0];
- struct bnxt_cp_ring_info *cpr;
struct tx_cmp *txcmp;
int rc = -EIO;
u32 raw_cons;
u32 cons;
int i;
- cpr = &bnapi->cp_ring;
raw_cons = cpr->cp_raw_cons;
for (i = 0; i < 200; i++) {
cons = RING_CMP(raw_cons);
@@ -2477,7 +2555,7 @@ static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
*/
dma_rmb();
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
- rc = bnxt_rx_loopback(bp, bnapi, raw_cons, pkt_size);
+ rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
raw_cons = NEXT_RAW_CMP(raw_cons);
raw_cons = NEXT_RAW_CMP(raw_cons);
break;
@@ -2491,12 +2569,14 @@ static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
static int bnxt_run_loopback(struct bnxt *bp)
{
struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
+ struct bnxt_cp_ring_info *cpr;
int pkt_size, i = 0;
struct sk_buff *skb;
dma_addr_t map;
u8 *data;
int rc;
+ cpr = &txr->bnapi->cp_ring;
pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
@@ -2520,8 +2600,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
/* Sync BD data before updating doorbell */
wmb();
- bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | txr->tx_prod);
- rc = bnxt_poll_loopback(bp, pkt_size);
+ bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
+ rc = bnxt_poll_loopback(bp, cpr, pkt_size);
dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 971ace5d0d4a..5dd086059568 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -37,6 +37,8 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_HWRM_REQUEST 0x1UL
#define TLV_TYPE_HWRM_RESPONSE 0x2UL
#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
@@ -186,6 +188,7 @@ struct cmd_nums {
#define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
#define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
#define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
#define HWRM_STAT_CTX_ALLOC 0xb0UL
#define HWRM_STAT_CTX_FREE 0xb1UL
#define HWRM_STAT_CTX_QUERY 0xb2UL
@@ -235,6 +238,7 @@ struct cmd_nums {
#define HWRM_CFA_PAIR_INFO 0x10fUL
#define HWRM_FW_IPC_MSG 0x110UL
#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
#define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
@@ -295,6 +299,7 @@ struct cmd_nums {
#define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
#define HWRM_DBG_FW_CLI 0xff1aUL
#define HWRM_DBG_I2C_CMD 0xff1bUL
+ #define HWRM_DBG_RING_INFO_GET 0xff1cUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -320,20 +325,21 @@ struct cmd_nums {
/* ret_codes (size:64b/8B) */
struct ret_codes {
__le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS 0x0UL
- #define HWRM_ERR_CODE_FAIL 0x1UL
- #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
- #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
- #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
- #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
- #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
- #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
- #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
- #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
- #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
__le16 unused_0[3];
};
@@ -355,10 +361,10 @@ struct hwrm_err_output {
#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 9
-#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 25
-#define HWRM_VERSION_STR "1.9.2.25"
+#define HWRM_VERSION_MINOR 10
+#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_RSVD 3
+#define HWRM_VERSION_STR "1.10.0.3"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -396,10 +402,15 @@ struct hwrm_ver_get_output {
u8 netctrl_fw_bld_8b;
u8 netctrl_fw_rsvd_8b;
__le32 dev_caps_cfg;
- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
- #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -528,6 +539,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
@@ -539,6 +551,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
__le32 event_data2;
@@ -652,10 +665,11 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
};
/* hwrm_func_reset_input (size:192b/24B) */
@@ -852,6 +866,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
#define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
#define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -903,6 +918,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
#define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1014,6 +1030,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
#define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
#define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1214,9 +1231,10 @@ struct hwrm_func_drv_rgtr_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1416,7 +1434,9 @@ struct hwrm_func_resource_qcaps_output {
__le16 min_hw_ring_grps;
__le16 max_hw_ring_grps;
__le16 max_tx_scheduler_inputs;
- u8 unused_0[7];
+ __le16 flags;
+ #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
+ u8 unused_0[5];
u8 valid;
};
@@ -1445,7 +1465,9 @@ struct hwrm_func_vf_resource_cfg_input {
__le16 max_stat_ctx;
__le16 min_hw_ring_grps;
__le16 max_hw_ring_grps;
- u8 unused_0[4];
+ __le16 flags;
+ #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
+ u8 unused_0[2];
};
/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
@@ -1503,7 +1525,8 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 mrav_entry_size;
__le16 tim_entry_size;
__le32 tim_max_entries;
- u8 unused_0[3];
+ u8 unused_0[2];
+ u8 tqm_entries_multiple;
u8 valid;
};
@@ -1917,6 +1940,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -1947,6 +1971,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -1964,6 +1989,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
#define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
@@ -2048,6 +2074,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
u8 duplex_cfg;
@@ -2072,6 +2099,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL
__le16 force_link_speed;
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
@@ -2083,6 +2111,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
u8 auto_mode;
@@ -2107,6 +2136,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
__le16 auto_link_speed_mask;
@@ -2124,6 +2154,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL
u8 wirespeed;
#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
@@ -2178,7 +2209,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -2644,7 +2679,8 @@ struct hwrm_port_qstats_ext_output {
__le16 tx_stat_size;
__le16 rx_stat_size;
__le16 total_active_cos_queues;
- u8 unused_0;
+ u8 flags;
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
u8 valid;
};
@@ -2685,7 +2721,9 @@ struct hwrm_port_clr_stats_input {
__le16 target_id;
__le64 resp_addr;
__le16 port_id;
- u8 unused_0[6];
+ u8 flags;
+ #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
+ u8 unused_0[5];
};
/* hwrm_port_clr_stats_output (size:128b/16B) */
@@ -4574,7 +4612,9 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
#define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
#define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
- u8 unused_0[3];
+ u8 unused_0;
+ __le16 flags;
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
__le64 page_tbl_addr;
__le32 fbo;
u8 page_size;
@@ -4838,13 +4878,19 @@ struct hwrm_cfa_l2_filter_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
__le32 enables;
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
@@ -4901,6 +4947,8 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_4;
@@ -4958,11 +5006,17 @@ struct hwrm_cfa_l2_filter_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
__le32 enables;
#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
@@ -5064,6 +5118,8 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 tunnel_flags;
@@ -5140,7 +5196,7 @@ struct hwrm_vxlan_ipv6_hdr {
__be32 dest_ip_addr[4];
};
-/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
struct hwrm_cfa_encap_data_vxlan {
u8 src_mac_addr[6];
__le16 unused_0;
@@ -5159,6 +5215,10 @@ struct hwrm_cfa_encap_data_vxlan {
__be16 src_port;
__be16 dst_port;
__be32 vni;
+ u8 hdr_rsvd0[3];
+ u8 hdr_rsvd1;
+ u8 hdr_flags;
+ u8 unused[3];
};
/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
@@ -5171,15 +5231,18 @@ struct hwrm_cfa_encap_record_alloc_input {
__le32 flags;
#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE
u8 unused_0[3];
__le32 encap_data[20];
};
@@ -5273,6 +5336,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 pri_hint;
@@ -5404,6 +5469,8 @@ struct hwrm_cfa_decap_filter_alloc_input {
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_0;
@@ -5476,19 +5543,22 @@ struct hwrm_cfa_flow_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le16 flags;
- #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
__le16 src_fid;
__le32 tunnel_handle;
__le16 action_flags;
@@ -5502,6 +5572,7 @@ struct hwrm_cfa_flow_alloc_input {
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
#define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
__le16 dst_fid;
__be16 l2_rewrite_vlan_tpid;
__be16 l2_rewrite_vlan_tci;
@@ -5525,21 +5596,38 @@ struct hwrm_cfa_flow_alloc_input {
__be16 nat_port;
__be16 l2_rewrite_smac[3];
u8 ip_proto;
- u8 unused_0;
-};
-
-/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
+ u8 tunnel_type;
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+};
+
+/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
struct hwrm_cfa_flow_alloc_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 flow_handle;
- u8 unused_0[5];
+ u8 unused_0[2];
+ __le32 flow_id;
+ __le64 ext_flow_handle;
+ u8 unused_1[7];
u8 valid;
};
-/* hwrm_cfa_flow_free_input (size:192b/24B) */
+/* hwrm_cfa_flow_free_input (size:256b/32B) */
struct hwrm_cfa_flow_free_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5548,6 +5636,7 @@ struct hwrm_cfa_flow_free_input {
__le64 resp_addr;
__le16 flow_handle;
u8 unused_0[6];
+ __le64 ext_flow_handle;
};
/* hwrm_cfa_flow_free_output (size:256b/32B) */
@@ -5562,7 +5651,7 @@ struct hwrm_cfa_flow_free_output {
u8 valid;
};
-/* hwrm_cfa_flow_stats_input (size:320b/40B) */
+/* hwrm_cfa_flow_stats_input (size:640b/80B) */
struct hwrm_cfa_flow_stats_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5581,6 +5670,16 @@ struct hwrm_cfa_flow_stats_input {
__le16 flow_handle_8;
__le16 flow_handle_9;
u8 unused_0[2];
+ __le32 flow_id_0;
+ __le32 flow_id_1;
+ __le32 flow_id_2;
+ __le32 flow_id_3;
+ __le32 flow_id_4;
+ __le32 flow_id_5;
+ __le32 flow_id_6;
+ __le32 flow_id_7;
+ __le32 flow_id_8;
+ __le32 flow_id_9;
};
/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
@@ -5670,7 +5769,8 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE
u8 unused_0[7];
};
@@ -5698,7 +5798,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE
u8 unused_0;
__be16 tunnel_dst_port_val;
u8 unused_1[4];
@@ -5727,7 +5828,8 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE
u8 unused_0;
__le16 tunnel_dst_port_id;
u8 unused_1[4];
@@ -5932,10 +6034,11 @@ struct hwrm_fw_reset_input {
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT
u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
u8 host_idx;
u8 unused_0[5];
};
@@ -5947,10 +6050,11 @@ struct hwrm_fw_reset_output {
__le16 seq_id;
__le16 resp_len;
u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
u8 unused_0[6];
u8 valid;
};
@@ -6498,6 +6602,34 @@ struct hwrm_dbg_coredump_retrieve_output {
u8 valid;
};
+/* hwrm_dbg_ring_info_get_input (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX
+ u8 unused_0[3];
+ __le32 fw_ring_id;
+};
+
+/* hwrm_dbg_ring_info_get_output (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 producer_index;
+ __le32 consumer_index;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
__le16 req_type;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index fcd085a9853a..3962f6fd543c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1104,7 +1104,7 @@ update_vf_mac_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
{
struct hwrm_func_vf_cfg_input req = {0};
int rc = 0;
@@ -1122,12 +1122,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
mac_done:
- if (rc) {
+ if (rc && strict) {
rc = -EADDRNOTAVAIL;
netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
mac);
+ return rc;
}
- return rc;
+ return 0;
}
#else
@@ -1144,7 +1145,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
{
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
{
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index e9b20cd19881..2eed9eda1195 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
-int bnxt_approve_mac(struct bnxt *, u8 *);
+int bnxt_approve_mac(struct bnxt *, u8 *, bool);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 092c817f8f11..749f63beddd8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
return 0;
}
-static void bnxt_tc_parse_vlan(struct bnxt *bp,
- struct bnxt_tc_actions *actions,
- const struct tc_action *tc_act)
+static int bnxt_tc_parse_vlan(struct bnxt *bp,
+ struct bnxt_tc_actions *actions,
+ const struct tc_action *tc_act)
{
- if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
+ switch (tcf_vlan_action(tc_act)) {
+ case TCA_VLAN_ACT_POP:
actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
- } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
+ break;
+ case TCA_VLAN_ACT_PUSH:
actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
+ break;
+ default:
+ return -EOPNOTSUPP;
}
+ return 0;
}
static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
@@ -134,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
/* Push/pop VLAN */
if (is_tcf_vlan(tc_act)) {
- bnxt_tc_parse_vlan(bp, actions, tc_act);
+ rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
+ if (rc)
+ return rc;
continue;
}
@@ -181,7 +189,6 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
struct bnxt_tc_flow *flow)
{
struct flow_dissector *dissector = tc_flow_cmd->dissector;
- u16 addr_type = 0;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
@@ -191,13 +198,6 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
return -EOPNOTSUPP;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_dissector_key_control *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
-
- addr_type = key->addr_type;
- }
-
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_dissector_key_basic *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
@@ -293,13 +293,6 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
flow->l4_mask.icmp.code = mask->code;
}
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
- struct flow_dissector_key_control *key =
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL);
-
- addr_type = key->addr_type;
- }
-
if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
struct flow_dissector_key_ipv4_addrs *key =
GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index e31f5d803c13..9a25c05aa571 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -209,9 +209,7 @@ struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code)
void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev);
- struct bnxt_vf_rep_stats *rx_stats;
- rx_stats = &vf_rep->rx_stats;
vf_rep->rx_stats.bytes += skb->len;
vf_rep->rx_stats.packets++;
@@ -523,7 +521,8 @@ int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return 0;
}
-int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
+int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
int rc = 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index 38b9a75ad724..d7287651422f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -30,7 +30,8 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
bool bnxt_dev_is_vf_rep(struct net_device *dev);
int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
-int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
+int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
#else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 0584d07c8c33..bf6de02be396 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -63,7 +63,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
tx_buf = &txr->tx_buf_ring[last_tx_cons];
rx_prod = tx_buf->rx_prod;
}
- bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rx_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rx_prod);
}
/* returns the following:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 4241ae928d4a..35564a8a48f9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -214,7 +214,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
case PHY_INTERFACE_MODE_MII:
phy_name = "external MII";
- phydev->supported &= PHY_BASIC_FEATURES;
+ phy_set_max_speed(phydev, SPEED_100);
bcmgenet_sys_writel(priv,
PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
break;
@@ -226,11 +226,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* capabilities, use that knowledge to also configure the
* Reverse MII interface correctly.
*/
- if ((dev->phydev->supported & PHY_BASIC_FEATURES) ==
- PHY_BASIC_FEATURES)
- port_ctrl = PORT_MODE_EXT_RVMII_25;
- else
+ if (dev->phydev->supported & PHY_1000BT_FEATURES)
port_ctrl = PORT_MODE_EXT_RVMII_50;
+ else
+ port_ctrl = PORT_MODE_EXT_RVMII_25;
bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
break;
@@ -321,9 +320,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
phydev->advertising = phydev->supported;
/* The internal PHY has its link interrupts routed to the
- * Ethernet MAC ISRs
+ * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
+ * that prevents the signaling of link UP interrupts when
+ * the link operates at 10Mbps, so fallback to polling for
+ * those versions of GENET.
*/
- if (priv->internal_phy)
+ if (priv->internal_phy && !GENET_IS_V5(priv))
dev->phydev->irq = PHY_IGNORE_INTERRUPT;
return 0;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index ef4a0c326736..5db9f4158e62 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -156,7 +156,7 @@ enum sbmac_state {
(d)->sbdma_dscrtable : (d)->f+1)
-#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
+#define NUMCACHEBLKS(x) DIV_ROUND_UP(x, SMP_CACHE_BYTES)
#define SBMAC_MAX_TXDESCR 256
#define SBMAC_MAX_RXDESCR 256
@@ -299,7 +299,7 @@ static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *,
static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff);
static uint64_t sbmac_addr2reg(unsigned char *ptr);
static irqreturn_t sbmac_intr(int irq, void *dev_instance);
-static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
static void sbmac_setmulti(struct sbmac_softc *sc);
static int sbmac_init(struct platform_device *pldev, long long base);
static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed);
@@ -2028,7 +2028,7 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance)
* Return value:
* nothing
********************************************************************* */
-static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct sbmac_softc *sc = netdev_priv(dev);
unsigned long flags;
@@ -2357,21 +2357,11 @@ static int sbmac_mii_probe(struct net_device *dev)
}
/* Remove any features not supported by the controller */
- phy_dev->supported &= SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_MII |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause;
+ phy_set_max_speed(phy_dev, SPEED_1000);
+ phy_support_asym_pause(phy_dev);
phy_attached_info(phy_dev);
- phy_dev->advertising = phy_dev->supported;
-
sc->phy_dev = phy_dev;
return 0;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e6f28c7942ab..89295306f161 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1598,7 +1598,7 @@ static int tg3_mdio_init(struct tg3 *tp)
phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
- /* fallthru */
+ /* fall through */
case PHY_ID_RTL8211C:
phydev->interface = PHY_INTERFACE_MODE_RGMII;
break;
@@ -2122,16 +2122,14 @@ static int tg3_phy_init(struct tg3 *tp)
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_RGMII:
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
- phydev->supported &= (PHY_GBIT_FEATURES |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
+ phy_set_max_speed(phydev, SPEED_1000);
+ phy_support_asym_pause(phydev);
break;
}
- /* fallthru */
+ /* fall through */
case PHY_INTERFACE_MODE_MII:
- phydev->supported &= (PHY_BASIC_FEATURES |
- SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
+ phy_set_max_speed(phydev, SPEED_100);
+ phy_support_asym_pause(phydev);
break;
default:
phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
@@ -2140,8 +2138,6 @@ static int tg3_phy_init(struct tg3 *tp)
tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
- phydev->advertising = phydev->supported;
-
phy_attached_info(phydev);
return 0;
@@ -5215,7 +5211,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
ap->state = ANEG_STATE_AN_ENABLE;
- /* fallthru */
+ /* fall through */
case ANEG_STATE_AN_ENABLE:
ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
if (ap->flags & MR_AN_ENABLE) {
@@ -5245,7 +5241,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
ret = ANEG_TIMER_ENAB;
ap->state = ANEG_STATE_RESTART;
- /* fallthru */
+ /* fall through */
case ANEG_STATE_RESTART:
delta = ap->cur_time - ap->link_time;
if (delta > ANEG_STATE_SETTLE_TIME)
@@ -5288,7 +5284,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
ap->state = ANEG_STATE_ACK_DETECT;
- /* fallthru */
+ /* fall through */
case ANEG_STATE_ACK_DETECT:
if (ap->ack_match != 0) {
if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
@@ -12496,31 +12492,24 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
tg3_warn_mgmt_link_flap(tp);
if (tg3_flag(tp, USE_PHYLIB)) {
- u32 newadv;
struct phy_device *phydev;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
- if (!(phydev->supported & SUPPORTED_Pause) ||
- (!(phydev->supported & SUPPORTED_Asym_Pause) &&
- (epause->rx_pause != epause->tx_pause)))
+ if (!phy_validate_pause(phydev, epause))
return -EINVAL;
tp->link_config.flowctrl = 0;
+ phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
if (epause->rx_pause) {
tp->link_config.flowctrl |= FLOW_CTRL_RX;
if (epause->tx_pause) {
tp->link_config.flowctrl |= FLOW_CTRL_TX;
- newadv = ADVERTISED_Pause;
- } else
- newadv = ADVERTISED_Pause |
- ADVERTISED_Asym_Pause;
+ }
} else if (epause->tx_pause) {
tp->link_config.flowctrl |= FLOW_CTRL_TX;
- newadv = ADVERTISED_Asym_Pause;
- } else
- newadv = 0;
+ }
if (epause->autoneg)
tg3_flag_set(tp, PAUSE_AUTONEG);
@@ -12528,33 +12517,19 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
tg3_flag_clear(tp, PAUSE_AUTONEG);
if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
- u32 oldadv = phydev->advertising &
- (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
- if (oldadv != newadv) {
- phydev->advertising &=
- ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- phydev->advertising |= newadv;
- if (phydev->autoneg) {
- /*
- * Always renegotiate the link to
- * inform our link partner of our
- * flow control settings, even if the
- * flow control is forced. Let
- * tg3_adjust_link() do the final
- * flow control setup.
- */
- return phy_start_aneg(phydev);
- }
+ if (phydev->autoneg) {
+ /* phy_set_asym_pause() will
+ * renegotiate the link to inform our
+ * link partner of our flow control
+ * settings, even if the flow control
+ * is forced. Let tg3_adjust_link()
+ * do the final flow control setup.
+ */
+ return 0;
}
if (!epause->autoneg)
tg3_setup_flow_control(tp, 0, 0);
- } else {
- tp->link_config.advertising &=
- ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- tp->link_config.advertising |= newadv;
}
} else {
int irq_sync = 0;
@@ -14013,7 +13988,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGMIIPHY:
data->phy_id = tp->phy_addr;
- /* fallthru */
+ /* fall through */
case SIOCGMIIREG: {
u32 mii_regval;
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index bba81735ce87..6d2d4527357c 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1797,7 +1797,7 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
/* A separate queue to allow synchronous setting of a list of MACs */
INIT_LIST_HEAD(&ucam_mod->del_q);
- for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++)
+ for (; i < (bna->ioceth.attr.num_ucmac * 2); i++)
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
ucam_mod->bna = bna;
@@ -1832,7 +1832,7 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
/* A separate queue to allow synchronous setting of a list of MACs */
INIT_LIST_HEAD(&mcam_mod->del_q);
- for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++)
+ for (; i < (bna->ioceth.attr.num_mcmac * 2); i++)
list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
mcam_mod->bna = bna;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 16e4ef7d7185..8f5bf9166c11 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -544,14 +544,13 @@ static int macb_mii_probe(struct net_device *dev)
/* mask with MAC supported features */
if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
- phydev->supported &= PHY_GBIT_FEATURES;
+ phy_set_max_speed(phydev, SPEED_1000);
else
- phydev->supported &= PHY_BASIC_FEATURES;
+ phy_set_max_speed(phydev, SPEED_100);
if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
- phydev->supported &= ~SUPPORTED_1000baseT_Half;
-
- phydev->advertising = phydev->supported;
+ phy_remove_link_mode(phydev,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
bp->link = 0;
bp->speed = 0;
@@ -2160,6 +2159,7 @@ static void macb_configure_dma(struct macb *bp)
else
dmacfg &= ~GEM_BIT(TXCOEN);
+ dmacfg &= ~GEM_BIT(ADDR64);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
dmacfg |= GEM_BIT(ADDR64);
@@ -3837,6 +3837,13 @@ static const struct macb_config at91sam9260_config = {
.init = macb_init,
};
+static const struct macb_config sama5d3macb_config = {
+ .caps = MACB_CAPS_SG_DISABLED
+ | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+};
+
static const struct macb_config pc302gem_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
.dma_burst_length = 16,
@@ -3904,6 +3911,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,gem", .data = &pc302gem_config },
{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+ { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
@@ -4148,8 +4156,7 @@ static int macb_remove(struct platform_device *pdev)
static int __maybe_unused macb_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *netdev = platform_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
netif_carrier_off(netdev);
@@ -4171,8 +4178,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
static int __maybe_unused macb_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *netdev = platform_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
if (bp->wol & MACB_WOL_ENABLED) {
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
index 962bb62933db..fda49404968c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -616,7 +616,7 @@ static void cn23xx_disable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
{
struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
- u32 rings_per_vf, ring_flag;
+ u32 rings_per_vf;
u64 reg_val;
if (octeon_map_pci_barx(oct, 0, 0))
@@ -634,8 +634,6 @@ int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
- ring_flag = 0;
-
cn23xx->conf = oct_get_config_info(oct, LIO_23XX);
if (!cn23xx->conf) {
dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n",
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 8093c5eafea2..825a28e5b544 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -32,38 +32,6 @@
#define OCTNIC_MAX_SG MAX_SKB_FRAGS
/**
- * \brief Callback for getting interface configuration
- * @param status status of request
- * @param buf pointer to resp structure
- */
-void lio_if_cfg_callback(struct octeon_device *oct,
- u32 status __attribute__((unused)), void *buf)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- struct liquidio_if_cfg_context *ctx;
- struct liquidio_if_cfg_resp *resp;
-
- resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
-
- oct = lio_get_device(ctx->octeon_id);
- if (resp->status)
- dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
- CVM_CAST64(resp->status));
- WRITE_ONCE(ctx->cond, 1);
-
- snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
- resp->cfg_info.liquidio_firmware_version);
-
- /* This barrier is required to be sure that the response has been
- * written fully before waking up the handler
- */
- wmb();
-
- wake_up_interruptible(&ctx->wc);
-}
-
-/**
* \brief Delete gather lists
* @param lio per-network private data
*/
@@ -198,14 +166,15 @@ int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
nctrl.ncmd.s.cmd = cmd;
nctrl.ncmd.s.param1 = param1;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
+ if (ret) {
dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
ret);
+ if (ret > 0)
+ ret = -EIO;
}
return ret;
}
@@ -285,15 +254,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
struct octeon_device *oct = lio->oct_dev;
u8 *mac;
- if (nctrl->completion && nctrl->response_code) {
- /* Signal whoever is interested that the response code from the
- * firmware has arrived.
- */
- WRITE_ONCE(*nctrl->response_code, nctrl->status);
- complete(nctrl->completion);
- }
-
- if (nctrl->status)
+ if (nctrl->sc_status)
return;
switch (nctrl->ncmd.s.cmd) {
@@ -464,56 +425,73 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
*/
}
+void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ struct net_device *netdev = oct->props[0].netdev;
+ struct lio *lio = GET_LIO(netdev);
+ struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no];
+
+ queue_delayed_work(wq->wq, &wq->wk.work,
+ msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+}
+
static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
{
struct cavium_wk *wk = (struct cavium_wk *)work;
struct lio *lio = (struct lio *)wk->ctxptr;
struct octeon_device *oct = lio->oct_dev;
- struct octeon_droq *droq;
- int q, q_no = 0;
+ int q_no = wk->ctxul;
+ struct octeon_droq *droq = oct->droq[q_no];
- if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
- for (q = 0; q < lio->linfo.num_rxpciq; q++) {
- q_no = lio->linfo.rxpciq[q].s.q_no;
- droq = oct->droq[q_no];
- if (!droq)
- continue;
- octeon_droq_check_oom(droq);
- }
- }
- queue_delayed_work(lio->rxq_status_wq.wq,
- &lio->rxq_status_wq.wk.work,
- msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq)
+ return;
+
+ if (octeon_retry_droq_refill(droq))
+ octeon_schedule_rxq_oom_work(oct, droq);
}
int setup_rx_oom_poll_fn(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ struct cavium_wq *wq;
+ int q, q_no;
- lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
- WQ_MEM_RECLAIM, 0);
- if (!lio->rxq_status_wq.wq) {
- dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
- return -ENOMEM;
+ for (q = 0; q < oct->num_oqs; q++) {
+ q_no = lio->linfo.rxpciq[q].s.q_no;
+ wq = &lio->rxq_status_wq[q_no];
+ wq->wq = alloc_workqueue("rxq-oom-status",
+ WQ_MEM_RECLAIM, 0);
+ if (!wq->wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&wq->wk.work,
+ octnet_poll_check_rxq_oom_status);
+ wq->wk.ctxptr = lio;
+ wq->wk.ctxul = q_no;
}
- INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
- octnet_poll_check_rxq_oom_status);
- lio->rxq_status_wq.wk.ctxptr = lio;
- queue_delayed_work(lio->rxq_status_wq.wq,
- &lio->rxq_status_wq.wk.work,
- msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+
return 0;
}
void cleanup_rx_oom_poll_fn(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
-
- if (lio->rxq_status_wq.wq) {
- cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
- flush_workqueue(lio->rxq_status_wq.wq);
- destroy_workqueue(lio->rxq_status_wq.wq);
+ struct octeon_device *oct = lio->oct_dev;
+ struct cavium_wq *wq;
+ int q_no;
+
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ wq = &lio->rxq_status_wq[q_no];
+ if (wq->wq) {
+ cancel_delayed_work_sync(&wq->wk.work);
+ flush_workqueue(wq->wq);
+ destroy_workqueue(wq->wq);
+ wq->wq = NULL;
+ }
}
}
@@ -1218,30 +1196,6 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
return 0;
}
-static void liquidio_change_mtu_completion(struct octeon_device *oct,
- u32 status, void *buf)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- struct liquidio_if_cfg_context *ctx;
-
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
-
- if (status) {
- dev_err(&oct->pci_dev->dev, "MTU change failed. Status: %llx\n",
- CVM_CAST64(status));
- WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_FAIL);
- } else {
- WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_SUCCESS);
- }
-
- /* This barrier is required to be sure that the response has been
- * written fully before waking up the handler
- */
- wmb();
-
- wake_up_interruptible(&ctx->wc);
-}
-
/**
* \brief Net device change_mtu
* @param netdev network device
@@ -1250,22 +1204,17 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- struct liquidio_if_cfg_context *ctx;
struct octeon_soft_command *sc;
union octnet_cmd *ncmd;
- int ctx_size;
int ret = 0;
- ctx_size = sizeof(struct liquidio_if_cfg_context);
sc = (struct octeon_soft_command *)
- octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, ctx_size);
+ octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
ncmd = (union octnet_cmd *)sc->virtdptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
- WRITE_ONCE(ctx->cond, 0);
- ctx->octeon_id = lio_get_device_id(oct);
- init_waitqueue_head(&ctx->wc);
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
ncmd->u64 = 0;
ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
@@ -1278,28 +1227,28 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_CMD, 0, 0, 0);
- sc->callback = liquidio_change_mtu_completion;
- sc->callback_arg = sc;
- sc->wait_time = 100;
-
ret = octeon_send_soft_command(oct, sc);
if (ret == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
+ octeon_free_soft_command(oct, sc);
return -EINVAL;
}
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
- if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR ||
- ctx->cond == LIO_CHANGE_MTU_FAIL) {
- octeon_free_soft_command(oct, sc);
+ ret = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (ret)
+ return ret;
+
+ if (sc->sc_status) {
+ WRITE_ONCE(sc->caller_is_done, true);
return -EINVAL;
}
netdev->mtu = new_mtu;
lio->mtu = new_mtu;
- octeon_free_soft_command(oct, sc);
+ WRITE_ONCE(sc->caller_is_done, true);
return 0;
}
@@ -1333,8 +1282,6 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev,
struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
struct oct_nic_stats_resp *resp =
(struct oct_nic_stats_resp *)sc->virtrptr;
- struct oct_nic_stats_ctrl *ctrl =
- (struct oct_nic_stats_ctrl *)sc->ctxptr;
struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
@@ -1422,93 +1369,148 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev,
resp->status = 1;
} else {
+ dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
resp->status = -1;
}
- complete(&ctrl->complete);
}
-int octnet_get_link_stats(struct net_device *netdev)
+static int lio_fetch_vf_stats(struct lio *lio)
{
- struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
struct octeon_soft_command *sc;
- struct oct_nic_stats_ctrl *ctrl;
- struct oct_nic_stats_resp *resp;
+ struct oct_nic_vf_stats_resp *resp;
+
int retval;
/* Alloc soft command */
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct_dev,
0,
- sizeof(struct oct_nic_stats_resp),
- sizeof(struct octnic_ctrl_pkt));
+ sizeof(struct oct_nic_vf_stats_resp),
+ 0);
- if (!sc)
- return -ENOMEM;
+ if (!sc) {
+ dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
+ retval = -ENOMEM;
+ goto lio_fetch_vf_stats_exit;
+ }
- resp = (struct oct_nic_stats_resp *)sc->virtrptr;
- memset(resp, 0, sizeof(struct oct_nic_stats_resp));
+ resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp));
- ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
- memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
- ctrl->netdev = netdev;
- init_completion(&ctrl->complete);
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
sc->iq_no = lio->linfo.txpciq[0].s.q_no;
octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
- OPCODE_NIC_PORT_STATS, 0, 0, 0);
-
- sc->callback = octnet_nic_stats_callback;
- sc->callback_arg = sc;
- sc->wait_time = 500; /*in milli seconds*/
+ OPCODE_NIC_VF_PORT_STATS, 0, 0, 0);
retval = octeon_send_soft_command(oct_dev, sc);
if (retval == IQ_SEND_FAILED) {
octeon_free_soft_command(oct_dev, sc);
- return -EINVAL;
+ goto lio_fetch_vf_stats_exit;
}
- wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
+ retval =
+ wait_for_sc_completion_timeout(oct_dev, sc,
+ (2 * LIO_SC_MAX_TMO_MS));
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev,
+ "sc OPCODE_NIC_VF_PORT_STATS command failed\n");
+ goto lio_fetch_vf_stats_exit;
+ }
- if (resp->status != 1) {
- octeon_free_soft_command(oct_dev, sc);
+ if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
+ octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt,
+ (sizeof(u64)) >> 3);
- return -EINVAL;
+ if (resp->spoofmac_cnt != 0) {
+ dev_warn(&oct_dev->pci_dev->dev,
+ "%llu Spoofed packets detected\n",
+ resp->spoofmac_cnt);
+ }
}
+ WRITE_ONCE(sc->caller_is_done, 1);
- octeon_free_soft_command(oct_dev, sc);
-
- return 0;
+lio_fetch_vf_stats_exit:
+ return retval;
}
-static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct,
- u32 status,
- void *buf)
+void lio_fetch_stats(struct work_struct *work)
{
- struct liquidio_nic_seapi_ctl_context *ctx;
- struct octeon_soft_command *sc = buf;
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = wk->ctxptr;
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ struct oct_nic_stats_resp *resp;
+ unsigned long time_in_jiffies;
+ int retval;
+
+ if (OCTEON_CN23XX_PF(oct_dev)) {
+ /* report spoofchk every 2 seconds */
+ if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) &&
+ (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) &&
+ oct_dev->sriov_info.num_vfs_alloced) {
+ lio_fetch_vf_stats(lio);
+ }
- ctx = sc->ctxptr;
+ oct_dev->vfstats_poll++;
+ }
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ 0,
+ sizeof(struct oct_nic_stats_resp),
+ 0);
+
+ if (!sc) {
+ dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
+ goto lio_fetch_stats_exit;
+ }
+
+ resp = (struct oct_nic_stats_resp *)sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_stats_resp));
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
- oct = lio_get_device(ctx->octeon_id);
- if (status) {
- dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n",
- __func__,
- CVM_CAST64(status));
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_PORT_STATS, 0, 0, 0);
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ goto lio_fetch_stats_exit;
+ }
+
+ retval = wait_for_sc_completion_timeout(oct_dev, sc,
+ (2 * LIO_SC_MAX_TMO_MS));
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
+ goto lio_fetch_stats_exit;
}
- ctx->status = status;
- complete(&ctx->complete);
+
+ octnet_nic_stats_callback(oct_dev, sc->sc_status, sc);
+ WRITE_ONCE(sc->caller_is_done, true);
+
+lio_fetch_stats_exit:
+ time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS);
+ if (ifstate_check(lio, LIO_IFSTATE_RUNNING))
+ schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies);
+
+ return;
}
int liquidio_set_speed(struct lio *lio, int speed)
{
- struct liquidio_nic_seapi_ctl_context *ctx;
struct octeon_device *oct = lio->oct_dev;
struct oct_nic_seapi_resp *resp;
struct octeon_soft_command *sc;
union octnet_cmd *ncmd;
- u32 ctx_size;
int retval;
u32 var;
@@ -1521,21 +1523,18 @@ int liquidio_set_speed(struct lio *lio, int speed)
return -EOPNOTSUPP;
}
- ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
sizeof(struct oct_nic_seapi_resp),
- ctx_size);
+ 0);
if (!sc)
return -ENOMEM;
ncmd = sc->virtdptr;
- ctx = sc->ctxptr;
resp = sc->virtrptr;
memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
- ctx->octeon_id = lio_get_device_id(oct);
- ctx->status = 0;
- init_completion(&ctx->complete);
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
ncmd->u64 = 0;
ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
@@ -1548,30 +1547,24 @@ int liquidio_set_speed(struct lio *lio, int speed)
octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
- sc->callback = liquidio_nic_seapi_ctl_callback;
- sc->callback_arg = sc;
- sc->wait_time = 5000;
-
retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) {
dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
+ octeon_free_soft_command(oct, sc);
retval = -EBUSY;
} else {
/* Wait for response or timeout */
- if (wait_for_completion_timeout(&ctx->complete,
- msecs_to_jiffies(10000)) == 0) {
- dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
- __func__);
- octeon_free_soft_command(oct, sc);
- return -EINTR;
- }
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return retval;
retval = resp->status;
if (retval) {
dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
__func__, retval);
- octeon_free_soft_command(oct, sc);
+ WRITE_ONCE(sc->caller_is_done, true);
+
return -EIO;
}
@@ -1583,38 +1576,32 @@ int liquidio_set_speed(struct lio *lio, int speed)
}
oct->speed_setting = var;
+ WRITE_ONCE(sc->caller_is_done, true);
}
- octeon_free_soft_command(oct, sc);
-
return retval;
}
int liquidio_get_speed(struct lio *lio)
{
- struct liquidio_nic_seapi_ctl_context *ctx;
struct octeon_device *oct = lio->oct_dev;
struct oct_nic_seapi_resp *resp;
struct octeon_soft_command *sc;
union octnet_cmd *ncmd;
- u32 ctx_size;
int retval;
- ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
sizeof(struct oct_nic_seapi_resp),
- ctx_size);
+ 0);
if (!sc)
return -ENOMEM;
ncmd = sc->virtdptr;
- ctx = sc->ctxptr;
resp = sc->virtrptr;
memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
- ctx->octeon_id = lio_get_device_id(oct);
- ctx->status = 0;
- init_completion(&ctx->complete);
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
ncmd->u64 = 0;
ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
@@ -1626,37 +1613,20 @@ int liquidio_get_speed(struct lio *lio)
octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
- sc->callback = liquidio_nic_seapi_ctl_callback;
- sc->callback_arg = sc;
- sc->wait_time = 5000;
-
retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) {
dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
- oct->no_speed_setting = 1;
- oct->speed_setting = 25;
-
- retval = -EBUSY;
+ octeon_free_soft_command(oct, sc);
+ retval = -EIO;
} else {
- if (wait_for_completion_timeout(&ctx->complete,
- msecs_to_jiffies(10000)) == 0) {
- dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
- __func__);
-
- oct->speed_setting = 25;
- oct->no_speed_setting = 1;
-
- octeon_free_soft_command(oct, sc);
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return retval;
- return -EINTR;
- }
retval = resp->status;
if (retval) {
dev_err(&oct->pci_dev->dev,
"%s failed retval=%d\n", __func__, retval);
- oct->no_speed_setting = 1;
- oct->speed_setting = 25;
- octeon_free_soft_command(oct, sc);
retval = -EIO;
} else {
u32 var;
@@ -1664,16 +1634,171 @@ int liquidio_get_speed(struct lio *lio)
var = be32_to_cpu((__force __be32)resp->speed);
oct->speed_setting = var;
if (var == 0xffff) {
- oct->no_speed_setting = 1;
/* unable to access boot variables
* get the default value based on the NIC type
*/
- oct->speed_setting = 25;
+ if (oct->subsystem_id ==
+ OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id ==
+ OCTEON_CN2360_25GB_SUBSYS_ID) {
+ oct->no_speed_setting = 1;
+ oct->speed_setting = 25;
+ } else {
+ oct->speed_setting = 10;
+ }
}
+
}
+ WRITE_ONCE(sc->caller_is_done, true);
+ }
+
+ return retval;
+}
+
+int liquidio_set_fec(struct lio *lio, int on_off)
+{
+ struct oct_nic_seapi_resp *resp;
+ struct octeon_soft_command *sc;
+ struct octeon_device *oct;
+ union octnet_cmd *ncmd;
+ int retval;
+ u32 var;
+
+ oct = lio->oct_dev;
+
+ if (oct->props[lio->ifidx].fec == on_off)
+ return 0;
+
+ if (!OCTEON_CN23XX_PF(oct)) {
+ dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n",
+ __func__);
+ return -1;
+ }
+
+ if (oct->speed_boot != 25) {
+ dev_err(&oct->pci_dev->dev,
+ "Set FEC only when link speed is 25G during insmod\n");
+ return -1;
+ }
+
+ sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ sizeof(struct oct_nic_seapi_resp), 0);
+
+ ncmd = sc->virtdptr;
+ resp = sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = SEAPI_CMD_FEC_SET;
+ ncmd->s.param1 = on_off;
+ /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
+ octeon_free_soft_command(oct, sc);
+ return -EIO;
+ }
+
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return (-EIO);
+
+ var = be32_to_cpu(resp->fec_setting);
+ resp->fec_setting = var;
+ if (var != on_off) {
+ dev_err(&oct->pci_dev->dev,
+ "Setting failed fec= %x, expect %x\n",
+ var, on_off);
+ oct->props[lio->ifidx].fec = var;
+ if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
+ oct->props[lio->ifidx].fec = 1;
+ else
+ oct->props[lio->ifidx].fec = 0;
+ }
+
+ WRITE_ONCE(sc->caller_is_done, true);
+
+ if (oct->props[lio->ifidx].fec !=
+ oct->props[lio->ifidx].fec_boot) {
+ dev_dbg(&oct->pci_dev->dev,
+ "Reload driver to change fec to %s\n",
+ oct->props[lio->ifidx].fec ? "on" : "off");
+ }
+
+ return retval;
+}
+
+int liquidio_get_fec(struct lio *lio)
+{
+ struct oct_nic_seapi_resp *resp;
+ struct octeon_soft_command *sc;
+ struct octeon_device *oct;
+ union octnet_cmd *ncmd;
+ int retval;
+ u32 var;
+
+ oct = lio->oct_dev;
+
+ sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ sizeof(struct oct_nic_seapi_resp), 0);
+ if (!sc)
+ return -ENOMEM;
+
+ ncmd = sc->virtdptr;
+ resp = sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = SEAPI_CMD_FEC_GET;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_info(&oct->pci_dev->dev,
+ "%s: Failed to send soft command\n", __func__);
+ octeon_free_soft_command(oct, sc);
+ return -EIO;
}
- octeon_free_soft_command(oct, sc);
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return retval;
+
+ var = be32_to_cpu(resp->fec_setting);
+ resp->fec_setting = var;
+ if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
+ oct->props[lio->ifidx].fec = 1;
+ else
+ oct->props[lio->ifidx].fec = 0;
+
+ WRITE_ONCE(sc->caller_is_done, true);
+
+ if (oct->props[lio->ifidx].fec !=
+ oct->props[lio->ifidx].fec_boot) {
+ dev_dbg(&oct->pci_dev->dev,
+ "Reload driver to change fec to %s\n",
+ oct->props[lio->ifidx].fec ? "on" : "off");
+ }
return retval;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 8e05afd5e39c..4c3925af53bc 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -33,25 +33,12 @@
static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
-struct oct_intrmod_context {
- int octeon_id;
- wait_queue_head_t wc;
- int cond;
- int status;
-};
-
struct oct_intrmod_resp {
u64 rh;
struct oct_intrmod_cfg intrmod;
u64 status;
};
-struct oct_mdio_cmd_context {
- int octeon_id;
- wait_queue_head_t wc;
- int cond;
-};
-
struct oct_mdio_cmd_resp {
u64 rh;
struct oct_mdio_cmd resp;
@@ -257,6 +244,7 @@ static int lio_get_link_ksettings(struct net_device *netdev,
linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
+ ecmd->base.transceiver = XCVR_EXTERNAL;
} else {
dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n",
linfo->link.s.if_mode);
@@ -290,10 +278,12 @@ static int lio_get_link_ksettings(struct net_device *netdev,
10000baseCR_Full);
}
- if (oct->no_speed_setting == 0)
+ if (oct->no_speed_setting == 0) {
liquidio_get_speed(lio);
- else
+ liquidio_get_fec(lio);
+ } else {
oct->speed_setting = 25;
+ }
if (oct->speed_setting == 10) {
ethtool_link_ksettings_add_link_mode
@@ -317,6 +307,24 @@ static int lio_get_link_ksettings(struct net_device *netdev,
(ecmd, advertising,
25000baseCR_Full);
}
+
+ if (oct->no_speed_setting)
+ break;
+
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, FEC_NONE);
+ /*FEC_OFF*/
+ if (oct->props[lio->ifidx].fec == 1) {
+ /* ETHTOOL_FEC_RS */
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising, FEC_RS);
+ } else {
+ /* ETHTOOL_FEC_OFF */
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising, FEC_NONE);
+ }
} else { /* VF */
if (linfo->link.s.speed == 10000) {
ethtool_link_ksettings_add_link_mode
@@ -472,12 +480,11 @@ lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues)
nctrl.ncmd.s.param1 = num_queues;
nctrl.ncmd.s.param2 = num_queues;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
+ if (ret) {
dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n",
ret);
return -1;
@@ -708,13 +715,13 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
nctrl.ncmd.s.param1 = addr;
nctrl.ncmd.s.param2 = val;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to configure gpio value, ret=%d\n", ret);
return -EINVAL;
}
@@ -734,41 +741,19 @@ static int octnet_id_active(struct net_device *netdev, int val)
nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
nctrl.ncmd.s.param1 = val;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to configure gpio value, ret=%d\n", ret);
return -EINVAL;
}
return 0;
}
-/* Callback for when mdio command response arrives
- */
-static void octnet_mdio_resp_callback(struct octeon_device *oct,
- u32 status,
- void *buf)
-{
- struct oct_mdio_cmd_context *mdio_cmd_ctx;
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
-
- mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
-
- oct = lio_get_device(mdio_cmd_ctx->octeon_id);
- if (status) {
- dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
- CVM_CAST64(status));
- WRITE_ONCE(mdio_cmd_ctx->cond, -1);
- } else {
- WRITE_ONCE(mdio_cmd_ctx->cond, 1);
- }
- wake_up_interruptible(&mdio_cmd_ctx->wc);
-}
-
/* This routine provides PHY access routines for
* mdio clause45 .
*/
@@ -778,25 +763,20 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
struct octeon_device *oct_dev = lio->oct_dev;
struct octeon_soft_command *sc;
struct oct_mdio_cmd_resp *mdio_cmd_rsp;
- struct oct_mdio_cmd_context *mdio_cmd_ctx;
struct oct_mdio_cmd *mdio_cmd;
int retval = 0;
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct_dev,
sizeof(struct oct_mdio_cmd),
- sizeof(struct oct_mdio_cmd_resp),
- sizeof(struct oct_mdio_cmd_context));
+ sizeof(struct oct_mdio_cmd_resp), 0);
if (!sc)
return -ENOMEM;
- mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
- WRITE_ONCE(mdio_cmd_ctx->cond, 0);
- mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
mdio_cmd->op = op;
mdio_cmd->mdio_addr = loc;
if (op)
@@ -808,42 +788,40 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
0, 0, 0);
- sc->wait_time = 1000;
- sc->callback = octnet_mdio_resp_callback;
- sc->callback_arg = sc;
-
- init_waitqueue_head(&mdio_cmd_ctx->wc);
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
retval = octeon_send_soft_command(oct_dev, sc);
-
if (retval == IQ_SEND_FAILED) {
dev_err(&oct_dev->pci_dev->dev,
"octnet_mdio45_access instruction failed status: %x\n",
retval);
- retval = -EBUSY;
+ octeon_free_soft_command(oct_dev, sc);
+ return -EBUSY;
} else {
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived
*/
- sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
+ retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
+ if (retval)
+ return retval;
+
retval = mdio_cmd_rsp->status;
if (retval) {
- dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
- retval = -EBUSY;
- } else {
- octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
- sizeof(struct oct_mdio_cmd) / 8);
-
- if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
- if (!op)
- *value = mdio_cmd_rsp->resp.value1;
- } else {
- retval = -EINVAL;
- }
+ dev_err(&oct_dev->pci_dev->dev,
+ "octnet mdio45 access failed: %x\n", retval);
+ WRITE_ONCE(sc->caller_is_done, true);
+ return -EBUSY;
}
- }
- octeon_free_soft_command(oct_dev, sc);
+ octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
+ sizeof(struct oct_mdio_cmd) / 8);
+
+ if (!op)
+ *value = mdio_cmd_rsp->resp.value1;
+
+ WRITE_ONCE(sc->caller_is_done, true);
+ }
return retval;
}
@@ -1007,8 +985,7 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
static int lio_23xx_reconfigure_queue_count(struct lio *lio)
{
struct octeon_device *oct = lio->oct_dev;
- struct liquidio_if_cfg_context *ctx;
- u32 resp_size, ctx_size, data_size;
+ u32 resp_size, data_size;
struct liquidio_if_cfg_resp *resp;
struct octeon_soft_command *sc;
union oct_nic_if_cfg if_cfg;
@@ -1018,11 +995,10 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio)
int j;
resp_size = sizeof(struct liquidio_if_cfg_resp);
- ctx_size = sizeof(struct liquidio_if_cfg_context);
data_size = sizeof(struct lio_version);
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, data_size,
- resp_size, ctx_size);
+ resp_size, 0);
if (!sc) {
dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n",
__func__);
@@ -1030,7 +1006,6 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio)
}
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
vdata = (struct lio_version *)sc->virtdptr;
vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
@@ -1038,9 +1013,6 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio)
vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
ifidx_or_pfnum = oct->pf_num;
- WRITE_ONCE(ctx->cond, 0);
- ctx->octeon_id = lio_get_device_id(oct);
- init_waitqueue_head(&ctx->wc);
if_cfg.u64 = 0;
if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings;
@@ -1052,27 +1024,29 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio)
octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_QCOUNT_UPDATE, 0,
if_cfg.u64, 0);
- sc->callback = lio_if_cfg_callback;
- sc->callback_arg = sc;
- sc->wait_time = LIO_IFCFG_WAIT_TIME;
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) {
dev_err(&oct->pci_dev->dev,
- "iq/oq config failed status: %x\n",
+ "Sending iq/oq config failed status: %x\n",
retval);
- goto qcount_update_fail;
+ octeon_free_soft_command(oct, sc);
+ return -EIO;
}
- if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
- dev_err(&oct->pci_dev->dev, "Wait interrupted\n");
- return -1;
- }
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return retval;
retval = resp->status;
if (retval) {
- dev_err(&oct->pci_dev->dev, "iq/oq config failed\n");
- goto qcount_update_fail;
+ dev_err(&oct->pci_dev->dev,
+ "iq/oq config failed: %x\n", retval);
+ WRITE_ONCE(sc->caller_is_done, true);
+ return -1;
}
octeon_swap_8B_data((u64 *)(&resp->cfg_info),
@@ -1097,16 +1071,12 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio)
lio->txq = lio->linfo.txpciq[0].s.q_no;
lio->rxq = lio->linfo.rxpciq[0].s.q_no;
- octeon_free_soft_command(oct, sc);
dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n",
lio->linfo.num_rxpciq);
- return 0;
-
-qcount_update_fail:
- octeon_free_soft_command(oct, sc);
+ WRITE_ONCE(sc->caller_is_done, true);
- return -1;
+ return 0;
}
static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
@@ -1166,6 +1136,8 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
* steps like updating sriov_info for the octeon device need to be done.
*/
if (queue_count_update) {
+ cleanup_rx_oom_poll_fn(netdev);
+
lio_delete_glists(lio);
/* Delete mbox for PF which is SRIOV disabled because sriov_info
@@ -1265,6 +1237,11 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
return -1;
}
+ if (setup_rx_oom_poll_fn(netdev)) {
+ dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n");
+ return 1;
+ }
+
/* Send firmware the information about new number of queues
* if the interface is a VF or a PF that is SRIOV enabled.
*/
@@ -1412,7 +1389,6 @@ lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
@@ -1433,8 +1409,9 @@ lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
}
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
- dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to set pause parameter, ret=%d\n", ret);
return -EINVAL;
}
@@ -1764,7 +1741,8 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
*/
data[i++] = lstats.rx_dropped;
/* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
- data[i++] = lstats.tx_dropped;
+ data[i++] = lstats.tx_dropped +
+ oct_dev->link_stats.fromhost.fw_err_drop;
data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
@@ -2013,34 +1991,11 @@ static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
}
}
-/* Callback function for intrmod */
-static void octnet_intrmod_callback(struct octeon_device *oct_dev,
- u32 status,
- void *ptr)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
- struct oct_intrmod_context *ctx;
-
- ctx = (struct oct_intrmod_context *)sc->ctxptr;
-
- ctx->status = status;
-
- WRITE_ONCE(ctx->cond, 1);
-
- /* This barrier is required to be sure that the response has been
- * written fully before waking up the handler
- */
- wmb();
-
- wake_up_interruptible(&ctx->wc);
-}
-
/* get interrupt moderation parameters */
static int octnet_get_intrmod_cfg(struct lio *lio,
struct oct_intrmod_cfg *intr_cfg)
{
struct octeon_soft_command *sc;
- struct oct_intrmod_context *ctx;
struct oct_intrmod_resp *resp;
int retval;
struct octeon_device *oct_dev = lio->oct_dev;
@@ -2049,8 +2004,7 @@ static int octnet_get_intrmod_cfg(struct lio *lio,
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct_dev,
0,
- sizeof(struct oct_intrmod_resp),
- sizeof(struct oct_intrmod_context));
+ sizeof(struct oct_intrmod_resp), 0);
if (!sc)
return -ENOMEM;
@@ -2058,20 +2012,13 @@ static int octnet_get_intrmod_cfg(struct lio *lio,
resp = (struct oct_intrmod_resp *)sc->virtrptr;
memset(resp, 0, sizeof(struct oct_intrmod_resp));
- ctx = (struct oct_intrmod_context *)sc->ctxptr;
- memset(ctx, 0, sizeof(struct oct_intrmod_context));
- WRITE_ONCE(ctx->cond, 0);
- ctx->octeon_id = lio_get_device_id(oct_dev);
- init_waitqueue_head(&ctx->wc);
-
sc->iq_no = lio->linfo.txpciq[0].s.q_no;
octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
- sc->callback = octnet_intrmod_callback;
- sc->callback_arg = sc;
- sc->wait_time = 1000;
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
retval = octeon_send_soft_command(oct_dev, sc);
if (retval == IQ_SEND_FAILED) {
@@ -2082,32 +2029,23 @@ static int octnet_get_intrmod_cfg(struct lio *lio,
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
- if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
- dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n");
- goto intrmod_info_wait_intr;
- }
+ retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
+ if (retval)
+ return -ENODEV;
- retval = ctx->status || resp->status;
- if (retval) {
+ if (resp->status) {
dev_err(&oct_dev->pci_dev->dev,
"Get interrupt moderation parameters failed\n");
- goto intrmod_info_wait_fail;
+ WRITE_ONCE(sc->caller_is_done, true);
+ return -ENODEV;
}
octeon_swap_8B_data((u64 *)&resp->intrmod,
(sizeof(struct oct_intrmod_cfg)) / 8);
memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
- octeon_free_soft_command(oct_dev, sc);
+ WRITE_ONCE(sc->caller_is_done, true);
return 0;
-
-intrmod_info_wait_fail:
-
- octeon_free_soft_command(oct_dev, sc);
-
-intrmod_info_wait_intr:
-
- return -ENODEV;
}
/* Configure interrupt moderation parameters */
@@ -2115,7 +2053,6 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
struct oct_intrmod_cfg *intr_cfg)
{
struct octeon_soft_command *sc;
- struct oct_intrmod_context *ctx;
struct oct_intrmod_cfg *cfg;
int retval;
struct octeon_device *oct_dev = lio->oct_dev;
@@ -2124,18 +2061,11 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct_dev,
sizeof(struct oct_intrmod_cfg),
- 0,
- sizeof(struct oct_intrmod_context));
+ 16, 0);
if (!sc)
return -ENOMEM;
- ctx = (struct oct_intrmod_context *)sc->ctxptr;
-
- WRITE_ONCE(ctx->cond, 0);
- ctx->octeon_id = lio_get_device_id(oct_dev);
- init_waitqueue_head(&ctx->wc);
-
cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
@@ -2146,9 +2076,8 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
- sc->callback = octnet_intrmod_callback;
- sc->callback_arg = sc;
- sc->wait_time = 1000;
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
retval = octeon_send_soft_command(oct_dev, sc);
if (retval == IQ_SEND_FAILED) {
@@ -2159,26 +2088,24 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
- if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) {
- retval = ctx->status;
- if (retval)
- dev_err(&oct_dev->pci_dev->dev,
- "intrmod config failed. Status: %llx\n",
- CVM_CAST64(retval));
- else
- dev_info(&oct_dev->pci_dev->dev,
- "Rx-Adaptive Interrupt moderation %s\n",
- (intr_cfg->rx_enable) ?
- "enabled" : "disabled");
-
- octeon_free_soft_command(oct_dev, sc);
-
- return ((retval) ? -ENODEV : 0);
+ retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
+ if (retval)
+ return retval;
+
+ retval = sc->sc_status;
+ if (retval == 0) {
+ dev_info(&oct_dev->pci_dev->dev,
+ "Rx-Adaptive Interrupt moderation %s\n",
+ (intr_cfg->rx_enable) ?
+ "enabled" : "disabled");
+ WRITE_ONCE(sc->caller_is_done, true);
+ return 0;
}
- dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n");
-
- return -EINTR;
+ dev_err(&oct_dev->pci_dev->dev,
+ "intrmod config failed. Status: %x\n", retval);
+ WRITE_ONCE(sc->caller_is_done, true);
+ return -ENODEV;
}
static int lio_get_intr_coalesce(struct net_device *netdev,
@@ -3123,9 +3050,60 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
return 0;
}
+static int lio_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ fec->active_fec = ETHTOOL_FEC_NONE;
+ fec->fec = ETHTOOL_FEC_NONE;
+
+ if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
+ if (oct->no_speed_setting == 1)
+ return 0;
+
+ liquidio_get_fec(lio);
+ fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF);
+ if (oct->props[lio->ifidx].fec == 1)
+ fec->active_fec = ETHTOOL_FEC_RS;
+ else
+ fec->active_fec = ETHTOOL_FEC_OFF;
+ }
+
+ return 0;
+}
+
+static int lio_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
+ if (oct->no_speed_setting == 1)
+ return -EOPNOTSUPP;
+
+ if (fec->fec & ETHTOOL_FEC_OFF)
+ liquidio_set_fec(lio, 0);
+ else if (fec->fec & ETHTOOL_FEC_RS)
+ liquidio_set_fec(lio, 1);
+ else
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops lio_ethtool_ops = {
.get_link_ksettings = lio_get_link_ksettings,
.set_link_ksettings = lio_set_link_ksettings,
+ .get_fecparam = lio_get_fecparam,
+ .set_fecparam = lio_set_fecparam,
.get_link = ethtool_op_get_link,
.get_drvinfo = lio_get_drvinfo,
.get_ringparam = lio_ethtool_get_ringparam,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 6fb13fa73b27..3d24133e5e49 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -99,14 +99,6 @@ struct lio_trusted_vf_ctx {
int status;
};
-struct liquidio_rx_ctl_context {
- int octeon_id;
-
- wait_queue_head_t wc;
-
- int cond;
-};
-
struct oct_link_status_resp {
u64 rh;
struct oct_link_info link_info;
@@ -642,26 +634,6 @@ static inline void update_link_status(struct net_device *netdev,
}
/**
- * lio_sync_octeon_time_cb - callback that is invoked when soft command
- * sent by lio_sync_octeon_time() has completed successfully or failed
- *
- * @oct - octeon device structure
- * @status - indicates success or failure
- * @buf - pointer to the command that was sent to firmware
- **/
-static void lio_sync_octeon_time_cb(struct octeon_device *oct,
- u32 status, void *buf)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
-
- if (status)
- dev_err(&oct->pci_dev->dev,
- "Failed to sync time to octeon; error=%d\n", status);
-
- octeon_free_soft_command(oct, sc);
-}
-
-/**
* lio_sync_octeon_time - send latest localtime to octeon firmware so that
* firmware will correct it's time, in case there is a time skew
*
@@ -677,7 +649,7 @@ static void lio_sync_octeon_time(struct work_struct *work)
struct lio_time *lt;
int ret;
- sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0);
+ sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
if (!sc) {
dev_err(&oct->pci_dev->dev,
"Failed to sync time to octeon: soft command allocation failed\n");
@@ -696,15 +668,16 @@ static void lio_sync_octeon_time(struct work_struct *work)
octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
- sc->callback = lio_sync_octeon_time_cb;
- sc->callback_arg = sc;
- sc->wait_time = 1000;
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
ret = octeon_send_soft_command(oct, sc);
if (ret == IQ_SEND_FAILED) {
dev_err(&oct->pci_dev->dev,
"Failed to sync time to octeon: failed to send soft command\n");
octeon_free_soft_command(oct, sc);
+ } else {
+ WRITE_ONCE(sc->caller_is_done, true);
}
queue_delayed_work(lio->sync_octeon_time_wq.wq,
@@ -1037,12 +1010,12 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_IO_QUEUES_DONE:
- if (wait_for_pending_requests(oct))
- dev_err(&oct->pci_dev->dev, "There were pending requests\n");
-
if (lio_wait_for_instr_fetch(oct))
dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
/* Disable the input and output queues now. No more packets will
* arrive from Octeon, but we should wait for all packet
* processing to finish.
@@ -1052,6 +1025,31 @@ static void octeon_destroy_resources(struct octeon_device *oct)
if (lio_wait_for_oq_pkts(oct))
dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
+ /* Force all requests waiting to be fetched by OCTEON to
+ * complete.
+ */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ struct octeon_instr_queue *iq;
+
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ iq = oct->instr_queue[i];
+
+ if (atomic_read(&iq->instr_pending)) {
+ spin_lock_bh(&iq->lock);
+ iq->fill_cnt = 0;
+ iq->octeon_read_index = iq->host_write_index;
+ iq->stats.instr_processed +=
+ atomic_read(&iq->instr_pending);
+ lio_process_iq_request_list(oct, iq, 0);
+ spin_unlock_bh(&iq->lock);
+ }
+ }
+
+ lio_process_ordered_list(oct, 1);
+ octeon_free_sc_done_list(oct);
+ octeon_free_sc_zombie_list(oct);
+
/* fallthrough */
case OCT_DEV_INTR_SET_DONE:
/* Disable interrupts */
@@ -1178,34 +1176,6 @@ static void octeon_destroy_resources(struct octeon_device *oct)
}
/**
- * \brief Callback for rx ctrl
- * @param status status of request
- * @param buf pointer to resp structure
- */
-static void rx_ctl_callback(struct octeon_device *oct,
- u32 status,
- void *buf)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- struct liquidio_rx_ctl_context *ctx;
-
- ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
-
- oct = lio_get_device(ctx->octeon_id);
- if (status)
- dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
- CVM_CAST64(status));
- WRITE_ONCE(ctx->cond, 1);
-
- /* This barrier is required to be sure that the response has been
- * written fully before waking up the handler
- */
- wmb();
-
- wake_up_interruptible(&ctx->wc);
-}
-
-/**
* \brief Send Rx control command
* @param lio per-network private data
* @param start_stop whether to start or stop
@@ -1213,9 +1183,7 @@ static void rx_ctl_callback(struct octeon_device *oct,
static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octeon_soft_command *sc;
- struct liquidio_rx_ctl_context *ctx;
union octnet_cmd *ncmd;
- int ctx_size = sizeof(struct liquidio_rx_ctl_context);
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
int retval;
@@ -1224,14 +1192,9 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
- 16, ctx_size);
+ 16, 0);
ncmd = (union octnet_cmd *)sc->virtdptr;
- ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
-
- WRITE_ONCE(ctx->cond, 0);
- ctx->octeon_id = lio_get_device_id(oct);
- init_waitqueue_head(&ctx->wc);
ncmd->u64 = 0;
ncmd->s.cmd = OCTNET_CMD_RX_CTL;
@@ -1244,23 +1207,25 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_CMD, 0, 0, 0);
- sc->callback = rx_ctl_callback;
- sc->callback_arg = sc;
- sc->wait_time = 5000;
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+ octeon_free_soft_command(oct, sc);
+ return;
} else {
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
- if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
return;
+
oct->props[lio->ifidx].rx_on = start_stop;
+ WRITE_ONCE(sc->caller_is_done, true);
}
-
- octeon_free_soft_command(oct, sc);
}
/**
@@ -1274,8 +1239,10 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
{
struct net_device *netdev = oct->props[ifidx].netdev;
- struct lio *lio;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ struct lio *lio;
if (!netdev) {
dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
@@ -1304,6 +1271,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
netif_napi_del(napi);
+ tasklet_enable(&oct_priv->droq_tasklet);
+
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
@@ -1840,9 +1809,13 @@ static int liquidio_open(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
if (oct->props[lio->ifidx].napi_enabled == 0) {
+ tasklet_disable(&oct_priv->droq_tasklet);
+
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_enable(napi);
@@ -1876,6 +1849,12 @@ static int liquidio_open(struct net_device *netdev)
/* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1);
+ /* start periodical statistics fetch */
+ INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
+ lio->stats_wk.ctxptr = lio;
+ schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
+ (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
+
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
@@ -1890,6 +1869,8 @@ static int liquidio_stop(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
@@ -1916,6 +1897,8 @@ static int liquidio_stop(struct net_device *netdev)
cleanup_tx_poll_fn(netdev);
}
+ cancel_delayed_work_sync(&lio->stats_wk.work);
+
if (lio->ptp_clock) {
ptp_clock_unregister(lio->ptp_clock);
lio->ptp_clock = NULL;
@@ -1934,6 +1917,8 @@ static int liquidio_stop(struct net_device *netdev)
if (OCTEON_CN23XX_PF(oct))
oct->droq[0]->ops.poll_mode = 0;
+
+ tasklet_enable(&oct_priv->droq_tasklet);
}
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
@@ -2014,10 +1999,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
/* Apparently, any activity in this call from the kernel has to
* be atomic. So we won't wait for response.
*/
- nctrl.wait_time = 0;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
+ if (ret) {
dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
ret);
}
@@ -2046,8 +2030,6 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
nctrl.ncmd.s.more = 1;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)netdev;
- nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- nctrl.wait_time = 100;
nctrl.udd[0] = 0;
/* The MAC Address is presented in network byte order. */
@@ -2058,6 +2040,14 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
return -ENOMEM;
}
+
+ if (nctrl.sc_status) {
+ dev_err(&oct->pci_dev->dev,
+ "%s: MAC Address change failed. sc return=%x\n",
+ __func__, nctrl.sc_status);
+ return -EIO;
+ }
+
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
@@ -2111,7 +2101,6 @@ liquidio_get_stats64(struct net_device *netdev,
lstats->rx_packets = pkts;
lstats->rx_dropped = drop;
- octnet_get_link_stats(netdev);
lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
lstats->collisions = oct->link_stats.fromhost.total_collisions;
@@ -2324,7 +2313,7 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
* @returns whether the packet was transmitted to the device okay or not
* (NETDEV_TX_OK or NETDEV_TX_BUSY)
*/
-static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct lio *lio;
struct octnet_buf_free_info *finfo;
@@ -2598,14 +2587,15 @@ static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
nctrl.ncmd.s.param1 = vid;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
+ if (ret) {
dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
ret);
+ if (ret > 0)
+ ret = -EIO;
}
return ret;
@@ -2626,14 +2616,15 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
nctrl.ncmd.s.param1 = vid;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
+ if (ret) {
dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
ret);
+ if (ret > 0)
+ ret = -EIO;
}
return ret;
}
@@ -2659,15 +2650,16 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.param1 = rx_cmd;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
+ if (ret) {
dev_err(&oct->pci_dev->dev,
"DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
ret);
+ if (ret > 0)
+ ret = -EIO;
}
return ret;
}
@@ -2695,15 +2687,16 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
nctrl.ncmd.s.more = vxlan_cmd_bit;
nctrl.ncmd.s.param1 = vxlan_port;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
+ if (ret) {
dev_err(&oct->pci_dev->dev,
"VxLAN port add/delete failed in core (ret:0x%x)\n",
ret);
+ if (ret > 0)
+ ret = -EIO;
}
return ret;
}
@@ -2826,6 +2819,7 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
if (!is_valid_ether_addr(mac))
return -EINVAL;
@@ -2839,12 +2833,13 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
/* vfidx is 0 based, but vf_num (param1) is 1 based */
nctrl.ncmd.s.param1 = vfidx + 1;
- nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
nctrl.ncmd.s.more = 1;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)netdev;
- nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- nctrl.wait_time = LIO_CMD_WAIT_TM;
+ if (is_admin_assigned) {
+ nctrl.ncmd.s.param2 = true;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+ }
nctrl.udd[0] = 0;
/* The MAC Address is presented in network byte order. */
@@ -2852,9 +2847,11 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
- octnet_send_nic_ctrl_pkt(oct, &nctrl);
+ ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
+ if (ret > 0)
+ ret = -EIO;
- return 0;
+ return ret;
}
static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
@@ -2873,6 +2870,62 @@ static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
return retval;
}
+static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
+ bool enable)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int retval;
+
+ if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
+ netif_info(lio, drv, lio->netdev,
+ "firmware does not support spoofchk\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
+ netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ if (oct->sriov_info.vf_spoofchk[vfidx])
+ return 0;
+ } else {
+ /* Clear */
+ if (!oct->sriov_info.vf_spoofchk[vfidx])
+ return 0;
+ }
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+ nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
+ nctrl.ncmd.s.param1 =
+ vfidx + 1; /* vfidx is 0 based,
+ * but vf_num (param1) is 1 based
+ */
+ nctrl.ncmd.s.param2 = enable;
+ nctrl.ncmd.s.more = 0;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.cb_fn = 0;
+
+ retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
+
+ if (retval) {
+ netif_info(lio, drv, lio->netdev,
+ "Failed to set VF %d spoofchk %s\n", vfidx,
+ enable ? "on" : "off");
+ return -1;
+ }
+
+ oct->sriov_info.vf_spoofchk[vfidx] = enable;
+ netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
+ enable ? "on" : "off");
+
+ return 0;
+}
+
static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
u16 vlan, u8 qos, __be16 vlan_proto)
{
@@ -2880,6 +2933,7 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
u16 vlantci;
+ int ret = 0;
if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
return -EINVAL;
@@ -2911,13 +2965,17 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
nctrl.ncmd.s.more = 0;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.cb_fn = NULL;
- nctrl.wait_time = LIO_CMD_WAIT_TM;
- octnet_send_nic_ctrl_pkt(oct, &nctrl);
+ ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
+ if (ret) {
+ if (ret > 0)
+ ret = -EIO;
+ return ret;
+ }
oct->sriov_info.vf_vlantci[vfidx] = vlantci;
- return 0;
+ return ret;
}
static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
@@ -2930,6 +2988,8 @@ static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
return -EINVAL;
+ memset(ivi, 0, sizeof(struct ifla_vf_info));
+
ivi->vf = vfidx;
macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
ether_addr_copy(&ivi->mac[0], macaddr);
@@ -2941,33 +3001,22 @@ static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
else
ivi->trusted = false;
ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
- return 0;
-}
-
-static void trusted_vf_callback(struct octeon_device *oct_dev,
- u32 status, void *ptr)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
- struct lio_trusted_vf_ctx *ctx;
+ ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
+ ivi->max_tx_rate = lio->linfo.link.s.speed;
+ ivi->min_tx_rate = 0;
- ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr;
- ctx->status = status;
-
- complete(&ctx->complete);
+ return 0;
}
static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
{
struct octeon_device *oct = lio->oct_dev;
- struct lio_trusted_vf_ctx *ctx;
struct octeon_soft_command *sc;
- int ctx_size, retval;
-
- ctx_size = sizeof(struct lio_trusted_vf_ctx);
- sc = octeon_alloc_soft_command(oct, 0, 0, ctx_size);
+ int retval;
- ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr;
- init_completion(&ctx->complete);
+ sc = octeon_alloc_soft_command(oct, 0, 16, 0);
+ if (!sc)
+ return -ENOMEM;
sc->iq_no = lio->linfo.txpciq[0].s.q_no;
@@ -2976,23 +3025,21 @@ static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
trusted);
- sc->callback = trusted_vf_callback;
- sc->callback_arg = sc;
- sc->wait_time = 1000;
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct, sc);
retval = -1;
} else {
/* Wait for response or timeout */
- if (wait_for_completion_timeout(&ctx->complete,
- msecs_to_jiffies(2000)))
- retval = ctx->status;
- else
- retval = -1;
- }
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return (retval);
- octeon_free_soft_command(oct, sc);
+ WRITE_ONCE(sc->caller_is_done, true);
+ }
return retval;
}
@@ -3055,6 +3102,7 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
return -EINVAL;
@@ -3070,13 +3118,15 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
nctrl.ncmd.s.more = 0;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.cb_fn = NULL;
- nctrl.wait_time = LIO_CMD_WAIT_TM;
- octnet_send_nic_ctrl_pkt(oct, &nctrl);
+ ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
- oct->sriov_info.vf_linkstate[vfidx] = linkstate;
+ if (!ret)
+ oct->sriov_info.vf_linkstate[vfidx] = linkstate;
+ else if (ret > 0)
+ ret = -EIO;
- return 0;
+ return ret;
}
static int
@@ -3094,7 +3144,8 @@ liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
}
static int
-liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
+liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct lio_devlink_priv *priv;
struct octeon_device *oct;
@@ -3204,6 +3255,7 @@ static const struct net_device_ops lionetdevops = {
.ndo_set_vf_mac = liquidio_set_vf_mac,
.ndo_set_vf_vlan = liquidio_set_vf_vlan,
.ndo_get_vf_config = liquidio_get_vf_config,
+ .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk,
.ndo_set_vf_trust = liquidio_set_vf_trust,
.ndo_set_vf_link_state = liquidio_set_vf_link_state,
.ndo_get_vf_stats = liquidio_get_vf_stats,
@@ -3307,7 +3359,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
unsigned long micro;
u32 cur_ver;
struct octeon_soft_command *sc;
- struct liquidio_if_cfg_context *ctx;
struct liquidio_if_cfg_resp *resp;
struct octdev_props *props;
int retval, num_iqueues, num_oqueues;
@@ -3315,7 +3366,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
union oct_nic_if_cfg if_cfg;
unsigned int base_queue;
unsigned int gmx_port_id;
- u32 resp_size, ctx_size, data_size;
+ u32 resp_size, data_size;
u32 ifidx_or_pfnum;
struct lio_version *vdata;
struct devlink *devlink;
@@ -3340,13 +3391,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
for (i = 0; i < octeon_dev->ifcount; i++) {
resp_size = sizeof(struct liquidio_if_cfg_resp);
- ctx_size = sizeof(struct liquidio_if_cfg_context);
data_size = sizeof(struct lio_version);
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(octeon_dev, data_size,
- resp_size, ctx_size);
+ resp_size, 0);
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
vdata = (struct lio_version *)sc->virtdptr;
*((u64 *)vdata) = 0;
@@ -3376,9 +3425,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
dev_dbg(&octeon_dev->pci_dev->dev,
"requesting config for interface %d, iqs %d, oqs %d\n",
ifidx_or_pfnum, num_iqueues, num_oqueues);
- WRITE_ONCE(ctx->cond, 0);
- ctx->octeon_id = lio_get_device_id(octeon_dev);
- init_waitqueue_head(&ctx->wc);
if_cfg.u64 = 0;
if_cfg.s.num_iqueues = num_iqueues;
@@ -3392,9 +3438,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OPCODE_NIC_IF_CFG, 0,
if_cfg.u64, 0);
- sc->callback = lio_if_cfg_callback;
- sc->callback_arg = sc;
- sc->wait_time = LIO_IFCFG_WAIT_TIME;
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
retval = octeon_send_soft_command(octeon_dev, sc);
if (retval == IQ_SEND_FAILED) {
@@ -3402,22 +3447,26 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
"iq/oq config failed status: %x\n",
retval);
/* Soft instr is freed by driver in case of failure. */
- goto setup_nic_dev_fail;
+ octeon_free_soft_command(octeon_dev, sc);
+ return(-EIO);
}
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
- if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
- dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
- goto setup_nic_wait_intr;
- }
+ retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
+ if (retval)
+ return retval;
retval = resp->status;
if (retval) {
dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
- goto setup_nic_dev_fail;
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_done;
}
+ snprintf(octeon_dev->fw_info.liquidio_firmware_version,
+ 32, "%s",
+ resp->cfg_info.liquidio_firmware_version);
/* Verify f/w version (in case of 'auto' loading from flash) */
fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
@@ -3427,7 +3476,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
dev_err(&octeon_dev->pci_dev->dev,
"Unmatched firmware version. Expected %s.x, got %s.\n",
LIQUIDIO_BASE_VERSION, fw_ver);
- goto setup_nic_dev_fail;
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_done;
} else if (atomic_read(octeon_dev->adapter_fw_state) ==
FW_IS_PRELOADED) {
dev_info(&octeon_dev->pci_dev->dev,
@@ -3454,7 +3504,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
resp->cfg_info.iqmask,
resp->cfg_info.oqmask);
- goto setup_nic_dev_fail;
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_done;
}
if (OCTEON_CN6XXX(octeon_dev)) {
@@ -3473,7 +3524,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if (!netdev) {
dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
- goto setup_nic_dev_fail;
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_done;
}
SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
@@ -3488,14 +3540,16 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if (retval) {
dev_err(&octeon_dev->pci_dev->dev,
"setting real number rx failed\n");
- goto setup_nic_dev_fail;
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_free;
}
retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
if (retval) {
dev_err(&octeon_dev->pci_dev->dev,
"setting real number tx failed\n");
- goto setup_nic_dev_fail;
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_free;
}
lio = GET_LIO(netdev);
@@ -3522,6 +3576,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
+ WRITE_ONCE(sc->caller_is_done, true);
+
lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
if (OCTEON_CN23XX_PF(octeon_dev) ||
@@ -3588,7 +3644,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
dev_err(&octeon_dev->pci_dev->dev,
"Error setting VF%d MAC address\n",
j);
- goto setup_nic_dev_fail;
+ goto setup_nic_dev_free;
}
}
@@ -3610,7 +3666,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->linfo.num_txpciq,
lio->linfo.num_rxpciq)) {
dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
- goto setup_nic_dev_fail;
+ goto setup_nic_dev_free;
}
ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
@@ -3621,7 +3677,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
dev_err(&octeon_dev->pci_dev->dev,
"Gather list allocation failed\n");
- goto setup_nic_dev_fail;
+ goto setup_nic_dev_free;
}
/* Register ethtool support */
@@ -3643,20 +3699,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OCTNET_CMD_VERBOSE_ENABLE, 0);
if (setup_link_status_change_wq(netdev))
- goto setup_nic_dev_fail;
+ goto setup_nic_dev_free;
if ((octeon_dev->fw_info.app_cap_flags &
LIQUIDIO_TIME_SYNC_CAP) &&
setup_sync_octeon_time_wq(netdev))
- goto setup_nic_dev_fail;
+ goto setup_nic_dev_free;
if (setup_rx_oom_poll_fn(netdev))
- goto setup_nic_dev_fail;
+ goto setup_nic_dev_free;
/* Register the network device with the OS */
if (register_netdev(netdev)) {
dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
- goto setup_nic_dev_fail;
+ goto setup_nic_dev_free;
}
dev_dbg(&octeon_dev->pci_dev->dev,
@@ -3679,8 +3735,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
dev_dbg(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup successful\n", i);
- octeon_free_soft_command(octeon_dev, sc);
-
if (octeon_dev->subsystem_id ==
OCTEON_CN2350_25GB_SUBSYS_ID ||
octeon_dev->subsystem_id ==
@@ -3709,13 +3763,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
}
octeon_dev->speed_boot = octeon_dev->speed_setting;
+ /* don't read FEC setting if unsupported by f/w (see above) */
+ if (octeon_dev->speed_boot == 25 &&
+ !octeon_dev->no_speed_setting) {
+ liquidio_get_fec(lio);
+ octeon_dev->props[lio->ifidx].fec_boot =
+ octeon_dev->props[lio->ifidx].fec;
+ }
}
devlink = devlink_alloc(&liquidio_devlink_ops,
sizeof(struct lio_devlink_priv));
if (!devlink) {
dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
- goto setup_nic_wait_intr;
+ goto setup_nic_dev_free;
}
lio_devlink = devlink_priv(devlink);
@@ -3725,7 +3786,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
devlink_free(devlink);
dev_err(&octeon_dev->pci_dev->dev,
"devlink registration failed\n");
- goto setup_nic_wait_intr;
+ goto setup_nic_dev_free;
}
octeon_dev->devlink = devlink;
@@ -3733,17 +3794,16 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
return 0;
-setup_nic_dev_fail:
-
- octeon_free_soft_command(octeon_dev, sc);
-
-setup_nic_wait_intr:
+setup_nic_dev_free:
while (i--) {
dev_err(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup failed\n", i);
liquidio_destroy_nic_device(octeon_dev, i);
}
+
+setup_nic_dev_done:
+
return -ENODEV;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index b77835724dc8..54b245797d2e 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -40,14 +40,6 @@ MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
-struct liquidio_rx_ctl_context {
- int octeon_id;
-
- wait_queue_head_t wc;
-
- int cond;
-};
-
struct oct_timestamp_resp {
u64 rh;
u64 timestamp;
@@ -452,6 +444,8 @@ static void octeon_pci_flr(struct octeon_device *oct)
*/
static void octeon_destroy_resources(struct octeon_device *oct)
{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
struct msix_entry *msix_entries;
int i;
@@ -471,12 +465,12 @@ static void octeon_destroy_resources(struct octeon_device *oct)
case OCT_DEV_HOST_OK:
/* fallthrough */
case OCT_DEV_IO_QUEUES_DONE:
- if (wait_for_pending_requests(oct))
- dev_err(&oct->pci_dev->dev, "There were pending requests\n");
-
if (lio_wait_for_instr_fetch(oct))
dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
/* Disable the input and output queues now. No more packets will
* arrive from Octeon, but we should wait for all packet
* processing to finish.
@@ -485,7 +479,33 @@ static void octeon_destroy_resources(struct octeon_device *oct)
if (lio_wait_for_oq_pkts(oct))
dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
- /* fall through */
+
+ /* Force all requests waiting to be fetched by OCTEON to
+ * complete.
+ */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ struct octeon_instr_queue *iq;
+
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ iq = oct->instr_queue[i];
+
+ if (atomic_read(&iq->instr_pending)) {
+ spin_lock_bh(&iq->lock);
+ iq->fill_cnt = 0;
+ iq->octeon_read_index = iq->host_write_index;
+ iq->stats.instr_processed +=
+ atomic_read(&iq->instr_pending);
+ lio_process_iq_request_list(oct, iq, 0);
+ spin_unlock_bh(&iq->lock);
+ }
+ }
+
+ lio_process_ordered_list(oct, 1);
+ octeon_free_sc_done_list(oct);
+ octeon_free_sc_zombie_list(oct);
+
+ /* fall through */
case OCT_DEV_INTR_SET_DONE:
/* Disable interrupts */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
@@ -569,33 +589,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* Nothing to be done here either */
break;
}
-}
-
-/**
- * \brief Callback for rx ctrl
- * @param status status of request
- * @param buf pointer to resp structure
- */
-static void rx_ctl_callback(struct octeon_device *oct,
- u32 status, void *buf)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- struct liquidio_rx_ctl_context *ctx;
-
- ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
- oct = lio_get_device(ctx->octeon_id);
- if (status)
- dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
- CVM_CAST64(status));
- WRITE_ONCE(ctx->cond, 1);
-
- /* This barrier is required to be sure that the response has been
- * written fully before waking up the handler
- */
- wmb();
-
- wake_up_interruptible(&ctx->wc);
+ tasklet_kill(&oct_priv->droq_tasklet);
}
/**
@@ -606,8 +601,6 @@ static void rx_ctl_callback(struct octeon_device *oct,
static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
- int ctx_size = sizeof(struct liquidio_rx_ctl_context);
- struct liquidio_rx_ctl_context *ctx;
struct octeon_soft_command *sc;
union octnet_cmd *ncmd;
int retval;
@@ -617,14 +610,9 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
- 16, ctx_size);
+ 16, 0);
ncmd = (union octnet_cmd *)sc->virtdptr;
- ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
-
- WRITE_ONCE(ctx->cond, 0);
- ctx->octeon_id = lio_get_device_id(oct);
- init_waitqueue_head(&ctx->wc);
ncmd->u64 = 0;
ncmd->s.cmd = OCTNET_CMD_RX_CTL;
@@ -637,23 +625,24 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_CMD, 0, 0, 0);
- sc->callback = rx_ctl_callback;
- sc->callback_arg = sc;
- sc->wait_time = 5000;
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
retval = octeon_send_soft_command(oct, sc);
if (retval == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+ octeon_free_soft_command(oct, sc);
} else {
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
- if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
return;
+
oct->props[lio->ifidx].rx_on = start_stop;
+ WRITE_ONCE(sc->caller_is_done, true);
}
-
- octeon_free_soft_command(oct, sc);
}
/**
@@ -667,6 +656,8 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
{
struct net_device *netdev = oct->props[ifidx].netdev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
struct lio *lio;
@@ -696,6 +687,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
netif_napi_del(napi);
+ tasklet_enable(&oct_priv->droq_tasklet);
+
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
@@ -913,9 +906,13 @@ static int liquidio_open(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
if (!oct->props[lio->ifidx].napi_enabled) {
+ tasklet_disable(&oct_priv->droq_tasklet);
+
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_enable(napi);
@@ -932,6 +929,11 @@ static int liquidio_open(struct net_device *netdev)
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
start_txqs(netdev);
+ INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
+ lio->stats_wk.ctxptr = lio;
+ schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
+ (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
+
/* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1);
@@ -948,6 +950,8 @@ static int liquidio_stop(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
/* tell Octeon to stop forwarding packets to host */
@@ -977,8 +981,12 @@ static int liquidio_stop(struct net_device *netdev)
oct->props[lio->ifidx].napi_enabled = 0;
oct->droq[0]->ops.poll_mode = 0;
+
+ tasklet_enable(&oct_priv->droq_tasklet);
}
+ cancel_delayed_work_sync(&lio->stats_wk.work);
+
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
return 0;
@@ -1093,10 +1101,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
/* Apparently, any activity in this call from the kernel has to
* be atomic. So we won't wait for response.
*/
- nctrl.wait_time = 0;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
+ if (ret) {
dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
ret);
}
@@ -1133,8 +1140,6 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
nctrl.ncmd.s.more = 1;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)netdev;
- nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- nctrl.wait_time = 100;
nctrl.udd[0] = 0;
/* The MAC Address is presented in network byte order. */
@@ -1145,6 +1150,13 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
return -ENOMEM;
}
+
+ if (nctrl.sc_status ==
+ FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) {
+ dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n");
+ return -EPERM;
+ }
+
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
@@ -1198,7 +1210,6 @@ liquidio_get_stats64(struct net_device *netdev,
lstats->rx_packets = pkts;
lstats->rx_dropped = drop;
- octnet_get_link_stats(netdev);
lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
/* detailed rx_errors: */
@@ -1390,7 +1401,7 @@ static int send_nic_timestamp_pkt(struct octeon_device *oct,
* @returns whether the packet was transmitted to the device okay or not
* (NETDEV_TX_OK or NETDEV_TX_BUSY)
*/
-static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct octnet_buf_free_info *finfo;
union octnic_cmd_setup cmdsetup;
@@ -1638,8 +1649,6 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct completion compl;
- u16 response_code;
int ret = 0;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
@@ -1648,26 +1657,15 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev,
nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
nctrl.ncmd.s.param1 = vid;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- init_completion(&compl);
- nctrl.completion = &compl;
- nctrl.response_code = &response_code;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
+ if (ret) {
dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
ret);
- return -EIO;
- }
-
- if (!wait_for_completion_timeout(&compl,
- msecs_to_jiffies(nctrl.wait_time)))
- return -EPERM;
-
- if (READ_ONCE(response_code))
return -EPERM;
+ }
return 0;
}
@@ -1687,14 +1685,15 @@ liquidio_vlan_rx_kill_vid(struct net_device *netdev,
nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
nctrl.ncmd.s.param1 = vid;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
+ if (ret) {
dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
ret);
+ if (ret > 0)
+ ret = -EIO;
}
return ret;
}
@@ -1720,14 +1719,15 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
nctrl.ncmd.s.cmd = command;
nctrl.ncmd.s.param1 = rx_cmd;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
+ if (ret) {
dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
ret);
+ if (ret > 0)
+ ret = -EIO;
}
return ret;
}
@@ -1755,15 +1755,16 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
nctrl.ncmd.s.more = vxlan_cmd_bit;
nctrl.ncmd.s.param1 = vxlan_port;
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
- nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
- if (ret < 0) {
+ if (ret) {
dev_err(&oct->pci_dev->dev,
"DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
ret);
+ if (ret > 0)
+ ret = -EIO;
}
return ret;
}
@@ -1924,8 +1925,7 @@ nic_info_err:
static int setup_nic_devices(struct octeon_device *octeon_dev)
{
int retval, num_iqueues, num_oqueues;
- struct liquidio_if_cfg_context *ctx;
- u32 resp_size, ctx_size, data_size;
+ u32 resp_size, data_size;
struct liquidio_if_cfg_resp *resp;
struct octeon_soft_command *sc;
union oct_nic_if_cfg if_cfg;
@@ -1956,13 +1956,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
for (i = 0; i < octeon_dev->ifcount; i++) {
resp_size = sizeof(struct liquidio_if_cfg_resp);
- ctx_size = sizeof(struct liquidio_if_cfg_context);
data_size = sizeof(struct lio_version);
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(octeon_dev, data_size,
- resp_size, ctx_size);
+ resp_size, 0);
resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
- ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
vdata = (struct lio_version *)sc->virtdptr;
*((u64 *)vdata) = 0;
@@ -1970,10 +1968,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
- WRITE_ONCE(ctx->cond, 0);
- ctx->octeon_id = lio_get_device_id(octeon_dev);
- init_waitqueue_head(&ctx->wc);
-
if_cfg.u64 = 0;
if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
@@ -1986,32 +1980,37 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
0);
- sc->callback = lio_if_cfg_callback;
- sc->callback_arg = sc;
- sc->wait_time = 5000;
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
retval = octeon_send_soft_command(octeon_dev, sc);
if (retval == IQ_SEND_FAILED) {
dev_err(&octeon_dev->pci_dev->dev,
"iq/oq config failed status: %x\n", retval);
/* Soft instr is freed by driver in case of failure. */
- goto setup_nic_dev_fail;
+ octeon_free_soft_command(octeon_dev, sc);
+ return(-EIO);
}
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
- if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
- dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
- goto setup_nic_wait_intr;
- }
+ retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
+ if (retval)
+ return retval;
retval = resp->status;
if (retval) {
- dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
- goto setup_nic_dev_fail;
+ dev_err(&octeon_dev->pci_dev->dev,
+ "iq/oq config failed, retval = %d\n", retval);
+ WRITE_ONCE(sc->caller_is_done, true);
+ return -EIO;
}
+ snprintf(octeon_dev->fw_info.liquidio_firmware_version,
+ 32, "%s",
+ resp->cfg_info.liquidio_firmware_version);
+
octeon_swap_8B_data((u64 *)(&resp->cfg_info),
(sizeof(struct liquidio_if_cfg_info)) >> 3);
@@ -2022,7 +2021,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
dev_err(&octeon_dev->pci_dev->dev,
"Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
resp->cfg_info.iqmask, resp->cfg_info.oqmask);
- goto setup_nic_dev_fail;
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_done;
}
dev_dbg(&octeon_dev->pci_dev->dev,
"interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
@@ -2033,7 +2033,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if (!netdev) {
dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
- goto setup_nic_dev_fail;
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_done;
}
SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
@@ -2070,6 +2071,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
lio->linfo.macaddr_is_admin_asgnd =
resp->cfg_info.linfo.macaddr_is_admin_asgnd;
+ lio->linfo.macaddr_spoofchk =
+ resp->cfg_info.linfo.macaddr_spoofchk;
lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
@@ -2109,6 +2112,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
netdev->min_mtu = LIO_MIN_MTU_SIZE;
netdev->max_mtu = LIO_MAX_MTU_SIZE;
+ WRITE_ONCE(sc->caller_is_done, true);
+
/* Point to the properties for octeon device to which this
* interface belongs.
*/
@@ -2132,7 +2137,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->linfo.num_txpciq,
lio->linfo.num_rxpciq)) {
dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
- goto setup_nic_dev_fail;
+ goto setup_nic_dev_free;
}
ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
@@ -2155,7 +2160,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
dev_err(&octeon_dev->pci_dev->dev,
"Gather list allocation failed\n");
- goto setup_nic_dev_fail;
+ goto setup_nic_dev_free;
}
/* Register ethtool support */
@@ -2170,15 +2175,15 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
if (setup_link_status_change_wq(netdev))
- goto setup_nic_dev_fail;
+ goto setup_nic_dev_free;
if (setup_rx_oom_poll_fn(netdev))
- goto setup_nic_dev_fail;
+ goto setup_nic_dev_free;
/* Register the network device with the OS */
if (register_netdev(netdev)) {
dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
- goto setup_nic_dev_fail;
+ goto setup_nic_dev_free;
}
dev_dbg(&octeon_dev->pci_dev->dev,
@@ -2201,24 +2206,21 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
dev_dbg(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup successful\n", i);
- octeon_free_soft_command(octeon_dev, sc);
-
octeon_dev->no_speed_setting = 1;
}
return 0;
-setup_nic_dev_fail:
-
- octeon_free_soft_command(octeon_dev, sc);
-
-setup_nic_wait_intr:
+setup_nic_dev_free:
while (i--) {
dev_err(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup failed\n", i);
liquidio_destroy_nic_device(octeon_dev, i);
}
+
+setup_nic_dev_done:
+
return -ENODEV;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index ddd7431579f4..ea9859e028d4 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -27,11 +27,11 @@
#include "octeon_network.h"
#include <net/switchdev.h>
#include "lio_vf_rep.h"
-#include "octeon_network.h"
static int lio_vf_rep_open(struct net_device *ndev);
static int lio_vf_rep_stop(struct net_device *ndev);
-static int lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev);
+static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
+ struct net_device *ndev);
static void lio_vf_rep_tx_timeout(struct net_device *netdev);
static int lio_vf_rep_phys_port_name(struct net_device *dev,
char *buf, size_t len);
@@ -49,44 +49,25 @@ static const struct net_device_ops lio_vf_rep_ndev_ops = {
.ndo_change_mtu = lio_vf_rep_change_mtu,
};
-static void
-lio_vf_rep_send_sc_complete(struct octeon_device *oct,
- u32 status, void *ptr)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
- struct lio_vf_rep_sc_ctx *ctx =
- (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
- struct lio_vf_rep_resp *resp =
- (struct lio_vf_rep_resp *)sc->virtrptr;
-
- if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status))
- WRITE_ONCE(resp->status, 0);
-
- complete(&ctx->complete);
-}
-
static int
lio_vf_rep_send_soft_command(struct octeon_device *oct,
void *req, int req_size,
void *resp, int resp_size)
{
int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
- int ctx_size = sizeof(struct lio_vf_rep_sc_ctx);
struct octeon_soft_command *sc = NULL;
struct lio_vf_rep_resp *rep_resp;
- struct lio_vf_rep_sc_ctx *ctx;
void *sc_req;
int err;
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, req_size,
- tot_resp_size, ctx_size);
+ tot_resp_size, 0);
if (!sc)
return -ENOMEM;
- ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
- memset(ctx, 0, ctx_size);
- init_completion(&ctx->complete);
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
memcpy(sc_req, req, req_size);
@@ -98,23 +79,24 @@ lio_vf_rep_send_soft_command(struct octeon_device *oct,
sc->iq_no = 0;
octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
- sc->callback = lio_vf_rep_send_sc_complete;
- sc->callback_arg = sc;
- sc->wait_time = LIO_VF_REP_REQ_TMO_MS;
err = octeon_send_soft_command(oct, sc);
if (err == IQ_SEND_FAILED)
goto free_buff;
- wait_for_completion_timeout(&ctx->complete,
- msecs_to_jiffies
- (2 * LIO_VF_REP_REQ_TMO_MS));
+ err = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (err)
+ return err;
+
err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
if (err)
dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
-
- if (resp)
+ else if (resp)
memcpy(resp, (rep_resp + 1), resp_size);
+
+ WRITE_ONCE(sc->caller_is_done, true);
+ return err;
+
free_buff:
octeon_free_soft_command(oct, sc);
@@ -380,7 +362,7 @@ lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
netif_wake_queue(ndev);
}
-static int
+static netdev_tx_t
lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
@@ -404,7 +386,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
}
sc = (struct octeon_soft_command *)
- octeon_alloc_soft_command(oct, 0, 0, 0);
+ octeon_alloc_soft_command(oct, 0, 16, 0);
if (!sc) {
dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
goto xmit_failed;
@@ -413,6 +395,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Multiple buffers are not used for vf_rep packets. */
if (skb_shinfo(skb)->nr_frags != 0) {
dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
+ octeon_free_soft_command(oct, sc);
goto xmit_failed;
}
@@ -420,6 +403,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
+ octeon_free_soft_command(oct, sc);
goto xmit_failed;
}
@@ -440,6 +424,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
if (status == IQ_SEND_FAILED) {
dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
sc->datasize, DMA_TO_DEVICE);
+ octeon_free_soft_command(oct, sc);
goto xmit_failed;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 7407fcd338e9..a5e0e9f17959 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -118,6 +118,10 @@ enum octeon_tag_type {
/* App specific capabilities from firmware to pf driver */
#define LIQUIDIO_TIME_SYNC_CAP 0x1
#define LIQUIDIO_SWITCHDEV_CAP 0x2
+#define LIQUIDIO_SPOOFCHK_CAP 0x4
+
+/* error status return from firmware */
+#define OCTEON_REQUEST_NO_PERMISSION 0xc
static inline u32 incr_index(u32 index, u32 count, u32 max)
{
@@ -241,6 +245,10 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_QUEUE_COUNT_CTL 0x1f
+#define OCTNET_CMD_GROUP1 1
+#define OCTNET_CMD_SET_VF_SPOOFCHK 0x1
+#define OCTNET_GROUP1_LAST_CMD OCTNET_CMD_SET_VF_SPOOFCHK
+
#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
#define OCTNET_CMD_RXCSUM_ENABLE 0x0
@@ -250,9 +258,18 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1
#define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0
+#define OCTNET_CMD_FAIL 0x1
+
+#define SEAPI_CMD_FEC_SET 0x0
+#define SEAPI_CMD_FEC_SET_DISABLE 0x0
+#define SEAPI_CMD_FEC_SET_RS 0x1
+#define SEAPI_CMD_FEC_GET 0x1
+
#define SEAPI_CMD_SPEED_SET 0x2
#define SEAPI_CMD_SPEED_GET 0x3
+#define OPCODE_NIC_VF_PORT_STATS 0x22
+
#define LIO_CMD_WAIT_TM 100
/* RX(packets coming from wire) Checksum verification flags */
@@ -301,7 +318,8 @@ union octnet_cmd {
u64 more:6; /* How many udd words follow the command */
- u64 reserved:29;
+ u64 cmdgroup:8;
+ u64 reserved:21;
u64 param1:16;
@@ -313,7 +331,8 @@ union octnet_cmd {
u64 param1:16;
- u64 reserved:29;
+ u64 reserved:21;
+ u64 cmdgroup:8;
u64 more:6;
@@ -757,13 +776,17 @@ struct oct_link_info {
#ifdef __BIG_ENDIAN_BITFIELD
u64 gmxport:16;
u64 macaddr_is_admin_asgnd:1;
- u64 rsvd:31;
+ u64 rsvd:13;
+ u64 macaddr_spoofchk:1;
+ u64 rsvd1:17;
u64 num_txpciq:8;
u64 num_rxpciq:8;
#else
u64 num_rxpciq:8;
u64 num_txpciq:8;
- u64 rsvd:31;
+ u64 rsvd1:17;
+ u64 macaddr_spoofchk:1;
+ u64 rsvd:13;
u64 macaddr_is_admin_asgnd:1;
u64 gmxport:16;
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index ceac74388e09..24c212001212 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -438,9 +438,10 @@ struct octeon_config {
#define MAX_BAR1_IOREMAP_SIZE (16 * OCTEON_BAR1_ENTRY_SIZE)
/* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking
+ * 1 process done list, 1 zombie lists(timeouted sc list)
* NoResponse Lists are now maintained with each IQ. (Dec' 2007).
*/
-#define MAX_RESPONSE_LISTS 4
+#define MAX_RESPONSE_LISTS 6
/* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the
* dispatch table.
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index f878a552fef3..ce8c3f818666 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1044,8 +1044,7 @@ void octeon_delete_dispatch_list(struct octeon_device *oct)
dispatch = &oct->dispatch.dlist[i].list;
while (dispatch->next != dispatch) {
temp = dispatch->next;
- list_del(temp);
- list_add_tail(temp, &freelist);
+ list_move_tail(temp, &freelist);
}
oct->dispatch.dlist[i].opcode = 0;
@@ -1440,18 +1439,15 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
/* the whole thing needs to be atomic, ideally */
if (droq) {
pkts_pend = (u32)atomic_read(&droq->pkts_pending);
- spin_lock_bh(&droq->lock);
writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg);
droq->pkt_count = pkts_pend;
- /* this write needs to be flushed before we release the lock */
- mmiowb();
- spin_unlock_bh(&droq->lock);
oct = droq->oct_dev;
}
if (iq) {
spin_lock_bh(&iq->lock);
- writel(iq->pkt_in_done, iq->inst_cnt_reg);
- iq->pkt_in_done = 0;
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
/* this write needs to be flushed before we release the lock */
mmiowb();
spin_unlock_bh(&iq->lock);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index d99ca6ba23a4..3d01d3602d8f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -316,6 +316,8 @@ struct octdev_props {
* device pointer (used for OS specific calls).
*/
int rx_on;
+ int fec;
+ int fec_boot;
int napi_enabled;
int gmxport;
struct net_device *netdev;
@@ -397,6 +399,8 @@ struct octeon_sriov_info {
int vf_linkstate[MAX_POSSIBLE_VFS];
+ bool vf_spoofchk[MAX_POSSIBLE_VFS];
+
u64 vf_drv_loaded_mask;
};
@@ -607,6 +611,9 @@ struct octeon_device {
u8 speed_boot;
u8 speed_setting;
u8 no_speed_setting;
+
+ u32 vfstats_poll;
+#define LIO_VFSTATS_POLL 10
};
#define OCT_DRV_ONLINE 1
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index a71dbb7ab6af..a0c099f71524 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -301,8 +301,6 @@ int octeon_init_droq(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
droq->max_empty_descs);
- spin_lock_init(&droq->lock);
-
INIT_LIST_HEAD(&droq->dispatch_list);
/* For 56xx Pass1, this function won't be called, so no checks. */
@@ -333,8 +331,6 @@ init_droq_fail:
* Returns:
* Success: Pointer to recv_info_t
* Failure: NULL.
- * Locks:
- * The droq->lock is held when this routine is called.
*/
static inline struct octeon_recv_info *octeon_create_recv_info(
struct octeon_device *octeon_dev,
@@ -433,8 +429,6 @@ octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
* up buffers (that were not dispatched) to form a contiguous ring.
* Returns:
* No of descriptors refilled.
- * Locks:
- * This routine is called with droq->lock held.
*/
static u32
octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
@@ -449,8 +443,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
while (droq->refill_count && (desc_refilled < droq->max_count)) {
/* If a valid buffer exists (happens if there is no dispatch),
- * reuse
- * the buffer, else allocate.
+ * reuse the buffer, else allocate.
*/
if (!droq->recv_buf_list[droq->refill_idx].buffer) {
pg_info =
@@ -503,34 +496,37 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
/** check if we can allocate packets to get out of oom.
* @param droq - Droq being checked.
- * @return does not return anything
+ * @return 1 if fails to refill minimum
*/
-void octeon_droq_check_oom(struct octeon_droq *droq)
+int octeon_retry_droq_refill(struct octeon_droq *droq)
{
- int desc_refilled;
struct octeon_device *oct = droq->oct_dev;
+ int desc_refilled, reschedule = 1;
+ u32 pkts_credit;
+
+ pkts_credit = readl(droq->pkts_credit_reg);
+ desc_refilled = octeon_droq_refill(oct, droq);
+ if (desc_refilled) {
+ /* Flush the droq descriptor data to memory to be sure
+ * that when we update the credits the data in memory
+ * is accurate.
+ */
+ wmb();
+ writel(desc_refilled, droq->pkts_credit_reg);
+ /* make sure mmio write completes */
+ mmiowb();
- if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) {
- spin_lock_bh(&droq->lock);
- desc_refilled = octeon_droq_refill(oct, droq);
- if (desc_refilled) {
- /* Flush the droq descriptor data to memory to be sure
- * that when we update the credits the data in memory
- * is accurate.
- */
- wmb();
- writel(desc_refilled, droq->pkts_credit_reg);
- /* make sure mmio write completes */
- mmiowb();
- }
- spin_unlock_bh(&droq->lock);
+ if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP)
+ reschedule = 0;
}
+
+ return reschedule;
}
static inline u32
octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
{
- return ((total_len + buf_size - 1) / buf_size);
+ return DIV_ROUND_UP(total_len, buf_size);
}
static int
@@ -603,9 +599,9 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
struct octeon_droq *droq,
u32 pkts_to_process)
{
+ u32 pkt, total_len = 0, pkt_count, retval;
struct octeon_droq_info *info;
union octeon_rh *rh;
- u32 pkt, total_len = 0, pkt_count;
pkt_count = pkts_to_process;
@@ -709,30 +705,43 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
if (droq->refill_count >= droq->refill_threshold) {
int desc_refilled = octeon_droq_refill(oct, droq);
- /* Flush the droq descriptor data to memory to be sure
- * that when we update the credits the data in memory
- * is accurate.
- */
- wmb();
- writel((desc_refilled), droq->pkts_credit_reg);
- /* make sure mmio write completes */
- mmiowb();
+ if (desc_refilled) {
+ /* Flush the droq descriptor data to memory to
+ * be sure that when we update the credits the
+ * data in memory is accurate.
+ */
+ wmb();
+ writel(desc_refilled, droq->pkts_credit_reg);
+ /* make sure mmio write completes */
+ mmiowb();
+ }
}
-
} /* for (each packet)... */
/* Increment refill_count by the number of buffers processed. */
droq->stats.pkts_received += pkt;
droq->stats.bytes_received += total_len;
+ retval = pkt;
if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
droq->stats.dropped_toomany += (pkts_to_process - pkt);
- return pkts_to_process;
+ retval = pkts_to_process;
+ }
+
+ atomic_sub(retval, &droq->pkts_pending);
+
+ if (droq->refill_count >= droq->refill_threshold &&
+ readl(droq->pkts_credit_reg) < CN23XX_SLI_DEF_BP) {
+ octeon_droq_check_hw_for_pkts(droq);
+
+ /* Make sure there are no pkts_pending */
+ if (!atomic_read(&droq->pkts_pending))
+ octeon_schedule_rxq_oom_work(oct, droq);
}
- return pkt;
+ return retval;
}
int
@@ -740,29 +749,19 @@ octeon_droq_process_packets(struct octeon_device *oct,
struct octeon_droq *droq,
u32 budget)
{
- u32 pkt_count = 0, pkts_processed = 0;
+ u32 pkt_count = 0;
struct list_head *tmp, *tmp2;
- /* Grab the droq lock */
- spin_lock(&droq->lock);
-
octeon_droq_check_hw_for_pkts(droq);
pkt_count = atomic_read(&droq->pkts_pending);
- if (!pkt_count) {
- spin_unlock(&droq->lock);
+ if (!pkt_count)
return 0;
- }
if (pkt_count > budget)
pkt_count = budget;
- pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
-
- atomic_sub(pkts_processed, &droq->pkts_pending);
-
- /* Release the spin lock */
- spin_unlock(&droq->lock);
+ octeon_droq_fast_process_packets(oct, droq, pkt_count);
list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
struct __dispatch *rdisp = (struct __dispatch *)tmp;
@@ -798,8 +797,6 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
if (budget > droq->max_count)
budget = droq->max_count;
- spin_lock(&droq->lock);
-
while (total_pkts_processed < budget) {
octeon_droq_check_hw_for_pkts(droq);
@@ -813,13 +810,9 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
octeon_droq_fast_process_packets(oct, droq,
pkts_available);
- atomic_sub(pkts_processed, &droq->pkts_pending);
-
total_pkts_processed += pkts_processed;
}
- spin_unlock(&droq->lock);
-
list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
struct __dispatch *rdisp = (struct __dispatch *)tmp;
@@ -879,9 +872,8 @@ octeon_enable_irq(struct octeon_device *oct, u32 q_no)
int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
struct octeon_droq_ops *ops)
{
- struct octeon_droq *droq;
- unsigned long flags;
struct octeon_config *oct_cfg = NULL;
+ struct octeon_droq *droq;
oct_cfg = octeon_get_conf(oct);
@@ -901,21 +893,15 @@ int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
}
droq = oct->droq[q_no];
-
- spin_lock_irqsave(&droq->lock, flags);
-
memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
- spin_unlock_irqrestore(&droq->lock, flags);
-
return 0;
}
int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
{
- unsigned long flags;
- struct octeon_droq *droq;
struct octeon_config *oct_cfg = NULL;
+ struct octeon_droq *droq;
oct_cfg = octeon_get_conf(oct);
@@ -936,14 +922,10 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
return 0;
}
- spin_lock_irqsave(&droq->lock, flags);
-
droq->ops.fptr = NULL;
droq->ops.farg = NULL;
droq->ops.drop_on_max = 0;
- spin_unlock_irqrestore(&droq->lock, flags);
-
return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index f28f262d4ab6..c9b19e624dce 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -245,9 +245,6 @@ struct octeon_droq_ops {
* Octeon DROQ.
*/
struct octeon_droq {
- /** A spinlock to protect access to this ring. */
- spinlock_t lock;
-
u32 q_no;
u32 pkt_count;
@@ -414,6 +411,6 @@ int octeon_droq_process_poll_pkts(struct octeon_device *oct,
int octeon_enable_irq(struct octeon_device *oct, u32 q_no);
-void octeon_droq_check_oom(struct octeon_droq *droq);
+int octeon_retry_droq_refill(struct octeon_droq *droq);
#endif /*__OCTEON_DROQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 2327062e8af6..bebf3bd349c6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -94,6 +94,8 @@ struct octeon_instr_queue {
u32 pkt_in_done;
+ u32 pkts_processed;
+
/** A spinlock to protect access to the input ring.*/
spinlock_t iq_flush_running_lock;
@@ -290,13 +292,19 @@ struct octeon_soft_command {
u32 ctxsize;
/** Time out and callback */
- size_t wait_time;
- size_t timeout;
+ size_t expiry_time;
u32 iq_no;
void (*callback)(struct octeon_device *, u32, void *);
void *callback_arg;
+
+ int caller_is_done;
+ u32 sc_status;
+ struct completion complete;
};
+/* max timeout (in milli sec) for soft request */
+#define LIO_SC_MAX_TMO_MS 60000
+
/** Maximum number of buffers to allocate into soft command buffer pool
*/
#define MAX_SOFT_COMMAND_BUFFERS 256
@@ -317,6 +325,8 @@ struct octeon_sc_buffer_pool {
(((octeon_dev_ptr)->instr_queue[iq_no]->stats.field) += count)
int octeon_setup_sc_buffer_pool(struct octeon_device *oct);
+int octeon_free_sc_done_list(struct octeon_device *oct);
+int octeon_free_sc_zombie_list(struct octeon_device *oct);
int octeon_free_sc_buffer_pool(struct octeon_device *oct);
struct octeon_soft_command *
octeon_alloc_soft_command(struct octeon_device *oct,
@@ -368,6 +378,9 @@ int octeon_send_command(struct octeon_device *oct, u32 iq_no,
u32 force_db, void *cmd, void *buf,
u32 datasize, u32 reqtype);
+void octeon_dump_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc);
+
void octeon_prepare_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc,
u8 opcode, u8 subcode,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index c846eec11a45..073d0647b439 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -70,6 +70,10 @@ void octeon_update_tx_completion_counters(void *buf, int reqtype,
void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
unsigned int bytes_compl);
void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac);
+
+void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
+ struct octeon_droq *droq);
+
/** Swap 8B blocks */
static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
{
@@ -146,46 +150,70 @@ err_release_region:
return 1;
}
+/* input parameter:
+ * sc: pointer to a soft request
+ * timeout: milli sec which an application wants to wait for the
+ response of the request.
+ * 0: the request will wait until its response gets back
+ * from the firmware within LIO_SC_MAX_TMO_MS milli sec.
+ * It the response does not return within
+ * LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list()
+ * will move the request to zombie response list.
+ *
+ * return value:
+ * 0: got the response from firmware for the sc request.
+ * errno -EINTR: user abort the command.
+ * errno -ETIME: user spefified timeout value has been expired.
+ * errno -EBUSY: the response of the request does not return in
+ * resonable time (LIO_SC_MAX_TMO_MS).
+ * the sc wll be move to zombie response list by
+ * lio_process_ordered_list()
+ *
+ * A request with non-zero return value, the sc->caller_is_done
+ * will be marked 1.
+ * When getting a request with zero return value, the requestor
+ * should mark sc->caller_is_done with 1 after examing the
+ * response of sc.
+ * lio_process_ordered_list() will free the soft command on behalf
+ * of the soft command requestor.
+ * This is to fix the possible race condition of both timeout process
+ * and lio_process_ordered_list()/callback function to free a
+ * sc strucutre.
+ */
static inline int
-sleep_cond(wait_queue_head_t *wait_queue, int *condition)
+wait_for_sc_completion_timeout(struct octeon_device *oct_dev,
+ struct octeon_soft_command *sc,
+ unsigned long timeout)
{
int errno = 0;
- wait_queue_entry_t we;
-
- init_waitqueue_entry(&we, current);
- add_wait_queue(wait_queue, &we);
- while (!(READ_ONCE(*condition))) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (signal_pending(current)) {
- errno = -EINTR;
- goto out;
- }
- schedule();
+ long timeout_jiff;
+
+ if (timeout)
+ timeout_jiff = msecs_to_jiffies(timeout);
+ else
+ timeout_jiff = MAX_SCHEDULE_TIMEOUT;
+
+ timeout_jiff =
+ wait_for_completion_interruptible_timeout(&sc->complete,
+ timeout_jiff);
+ if (timeout_jiff == 0) {
+ dev_err(&oct_dev->pci_dev->dev, "%s: sc is timeout\n",
+ __func__);
+ WRITE_ONCE(sc->caller_is_done, true);
+ errno = -ETIME;
+ } else if (timeout_jiff == -ERESTARTSYS) {
+ dev_err(&oct_dev->pci_dev->dev, "%s: sc is interrupted\n",
+ __func__);
+ WRITE_ONCE(sc->caller_is_done, true);
+ errno = -EINTR;
+ } else if (sc->sc_status == OCTEON_REQUEST_TIMEOUT) {
+ dev_err(&oct_dev->pci_dev->dev, "%s: sc has fatal timeout\n",
+ __func__);
+ WRITE_ONCE(sc->caller_is_done, true);
+ errno = -EBUSY;
}
-out:
- set_current_state(TASK_RUNNING);
- remove_wait_queue(wait_queue, &we);
- return errno;
-}
-/* Gives up the CPU for a timeout period.
- * Check that the condition is not true before we go to sleep for a
- * timeout period.
- */
-static inline void
-sleep_timeout_cond(wait_queue_head_t *wait_queue,
- int *condition,
- int timeout)
-{
- wait_queue_entry_t we;
-
- init_waitqueue_entry(&we, current);
- add_wait_queue(wait_queue, &we);
- set_current_state(TASK_INTERRUPTIBLE);
- if (!(*condition))
- schedule_timeout(timeout);
- set_current_state(TASK_RUNNING);
- remove_wait_queue(wait_queue, &we);
+ return errno;
}
#ifndef ROUNDUP4
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index d7a3916fe877..50201fc86dcf 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -35,12 +35,6 @@
#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
#define LIO_IFSTATE_RESETTING 0x10
-struct liquidio_if_cfg_context {
- u32 octeon_id;
- wait_queue_head_t wc;
- int cond;
-};
-
struct liquidio_if_cfg_resp {
u64 rh;
struct liquidio_if_cfg_info cfg_info;
@@ -48,6 +42,7 @@ struct liquidio_if_cfg_resp {
};
#define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */
+#define LIQUIDIO_NDEV_STATS_POLL_TIME_MS 200
/* Structure of a node in list of gather components maintained by
* NIC driver for each network device.
@@ -76,6 +71,12 @@ struct oct_nic_stats_resp {
u64 status;
};
+struct oct_nic_vf_stats_resp {
+ u64 rh;
+ u64 spoofmac_cnt;
+ u64 status;
+};
+
struct oct_nic_stats_ctrl {
struct completion complete;
struct net_device *netdev;
@@ -83,16 +84,13 @@ struct oct_nic_stats_ctrl {
struct oct_nic_seapi_resp {
u64 rh;
- u32 speed;
+ union {
+ u32 fec_setting;
+ u32 speed;
+ };
u64 status;
};
-struct liquidio_nic_seapi_ctl_context {
- int octeon_id;
- u32 status;
- struct completion complete;
-};
-
/** LiquidIO per-interface network private data */
struct lio {
/** State of the interface. Rx/Tx happens only in the RUNNING state. */
@@ -178,7 +176,7 @@ struct lio {
struct cavium_wq txq_status_wq;
/* work queue for rxq oom status */
- struct cavium_wq rxq_status_wq;
+ struct cavium_wq rxq_status_wq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
/* work queue for link status */
struct cavium_wq link_status_wq;
@@ -187,6 +185,7 @@ struct lio {
struct cavium_wq sync_octeon_time_wq;
int netdev_uc_count;
+ struct cavium_wk stats_wk;
};
#define LIO_SIZE (sizeof(struct lio))
@@ -225,7 +224,7 @@ irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
-int octnet_get_link_stats(struct net_device *netdev);
+void lio_fetch_stats(struct work_struct *work);
int lio_wait_for_clean_oq(struct octeon_device *oct);
/**
@@ -234,16 +233,14 @@ int lio_wait_for_clean_oq(struct octeon_device *oct);
*/
void liquidio_set_ethtool_ops(struct net_device *netdev);
-void lio_if_cfg_callback(struct octeon_device *oct,
- u32 status __attribute__((unused)),
- void *buf);
-
void lio_delete_glists(struct lio *lio);
int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs);
int liquidio_get_speed(struct lio *lio);
int liquidio_set_speed(struct lio *lio, int speed);
+int liquidio_get_fec(struct lio *lio);
+int liquidio_set_fec(struct lio *lio, int on_off);
/**
* \brief Net device change_mtu
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 150609bd8849..1a706f81bbb0 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -75,8 +75,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
else
sc->cmd.cmd2.rptr = sc->dmarptr;
- sc->wait_time = 1000;
- sc->timeout = jiffies + sc->wait_time;
+ sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS);
return sc;
}
@@ -92,29 +91,6 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct,
ndata->reqtype);
}
-static void octnet_link_ctrl_callback(struct octeon_device *oct,
- u32 status,
- void *sc_ptr)
-{
- struct octeon_soft_command *sc = (struct octeon_soft_command *)sc_ptr;
- struct octnic_ctrl_pkt *nctrl;
-
- nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr;
-
- /* Call the callback function if status is zero (meaning OK) or status
- * contains a firmware status code bigger than zero (meaning the
- * firmware is reporting an error).
- * If no response was expected, status is OK if the command was posted
- * successfully.
- */
- if ((!status || status > FIRMWARE_STATUS_CODE(0)) && nctrl->cb_fn) {
- nctrl->status = status;
- nctrl->cb_fn(nctrl);
- }
-
- octeon_free_soft_command(oct, sc);
-}
-
static inline struct octeon_soft_command
*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct,
struct octnic_ctrl_pkt *nctrl)
@@ -127,17 +103,14 @@ static inline struct octeon_soft_command
uddsize = (u32)(nctrl->ncmd.s.more * 8);
datasize = OCTNET_CMD_SIZE + uddsize;
- rdatasize = (nctrl->wait_time) ? 16 : 0;
+ rdatasize = 16;
sc = (struct octeon_soft_command *)
- octeon_alloc_soft_command(oct, datasize, rdatasize,
- sizeof(struct octnic_ctrl_pkt));
+ octeon_alloc_soft_command(oct, datasize, rdatasize, 0);
if (!sc)
return NULL;
- memcpy(sc->ctxptr, nctrl, sizeof(struct octnic_ctrl_pkt));
-
data = (u8 *)sc->virtdptr;
memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
@@ -154,9 +127,8 @@ static inline struct octeon_soft_command
octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD,
0, 0, 0);
- sc->callback = octnet_link_ctrl_callback;
- sc->callback_arg = sc;
- sc->wait_time = nctrl->wait_time;
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
return sc;
}
@@ -199,5 +171,28 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
}
spin_unlock_bh(&oct->cmd_resp_wqlock);
+
+ if (nctrl->ncmd.s.cmdgroup == 0) {
+ switch (nctrl->ncmd.s.cmd) {
+ /* caller holds lock, can not sleep */
+ case OCTNET_CMD_CHANGE_DEVFLAGS:
+ case OCTNET_CMD_SET_MULTI_LIST:
+ case OCTNET_CMD_SET_UC_LIST:
+ WRITE_ONCE(sc->caller_is_done, true);
+ return retval;
+ }
+ }
+
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return (retval);
+
+ nctrl->sc_status = sc->sc_status;
+ retval = nctrl->sc_status;
+ if (nctrl->cb_fn)
+ nctrl->cb_fn(nctrl);
+
+ WRITE_ONCE(sc->caller_is_done, true);
+
return retval;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index de4130d26a98..87dd6f89ce51 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -52,20 +52,13 @@ struct octnic_ctrl_pkt {
/** Input queue to use to send this command. */
u64 iq_no;
- /** Time to wait for Octeon software to respond to this control command.
- * If wait_time is 0, OSI assumes no response is expected.
- */
- size_t wait_time;
-
/** The network device that issued the control command. */
u64 netpndev;
/** Callback function called when the command has been fetched */
octnic_ctrl_pkt_cb_fn_t cb_fn;
- u32 status;
- u16 *response_code;
- struct completion *completion;
+ u32 sc_status;
};
#define MAX_UDD_SIZE(nctrl) (sizeof((nctrl)->udd))
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 8f746e1348d4..c6f4cbda040f 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -123,6 +123,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
iq->do_auto_flush = 1;
iq->db_timeout = (u32)conf->db_timeout;
atomic_set(&iq->instr_pending, 0);
+ iq->pkts_processed = 0;
/* Initialize the spinlock for this instruction queue */
spin_lock_init(&iq->lock);
@@ -379,7 +380,6 @@ lio_process_iq_request_list(struct octeon_device *oct,
u32 inst_count = 0;
unsigned int pkts_compl = 0, bytes_compl = 0;
struct octeon_soft_command *sc;
- struct octeon_instr_irh *irh;
unsigned long flags;
while (old != iq->octeon_read_index) {
@@ -401,40 +401,21 @@ lio_process_iq_request_list(struct octeon_device *oct,
case REQTYPE_RESP_NET:
case REQTYPE_SOFT_COMMAND:
sc = buf;
-
- if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))
- irh = (struct octeon_instr_irh *)
- &sc->cmd.cmd3.irh;
- else
- irh = (struct octeon_instr_irh *)
- &sc->cmd.cmd2.irh;
- if (irh->rflag) {
- /* We're expecting a response from Octeon.
- * It's up to lio_process_ordered_list() to
- * process sc. Add sc to the ordered soft
- * command response list because we expect
- * a response from Octeon.
- */
- spin_lock_irqsave
- (&oct->response_list
- [OCTEON_ORDERED_SC_LIST].lock,
- flags);
- atomic_inc(&oct->response_list
- [OCTEON_ORDERED_SC_LIST].
- pending_req_count);
- list_add_tail(&sc->node, &oct->response_list
- [OCTEON_ORDERED_SC_LIST].head);
- spin_unlock_irqrestore
- (&oct->response_list
- [OCTEON_ORDERED_SC_LIST].lock,
- flags);
- } else {
- if (sc->callback) {
- /* This callback must not sleep */
- sc->callback(oct, OCTEON_REQUEST_DONE,
- sc->callback_arg);
- }
- }
+ /* We're expecting a response from Octeon.
+ * It's up to lio_process_ordered_list() to
+ * process sc. Add sc to the ordered soft
+ * command response list because we expect
+ * a response from Octeon.
+ */
+ spin_lock_irqsave(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock, flags);
+ atomic_inc(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].pending_req_count);
+ list_add_tail(&sc->node, &oct->response_list
+ [OCTEON_ORDERED_SC_LIST].head);
+ spin_unlock_irqrestore(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock,
+ flags);
break;
default:
dev_err(&oct->pci_dev->dev,
@@ -459,7 +440,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
if (atomic_read(&oct->response_list
[OCTEON_ORDERED_SC_LIST].pending_req_count))
- queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1));
+ queue_work(cwq->wq, &cwq->wk.work.work);
return inst_count;
}
@@ -495,6 +476,7 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
lio_process_iq_request_list(oct, iq, 0);
if (inst_processed) {
+ iq->pkts_processed += inst_processed;
atomic_sub(inst_processed, &iq->instr_pending);
iq->stats.instr_processed += inst_processed;
}
@@ -753,8 +735,7 @@ int octeon_send_soft_command(struct octeon_device *oct,
len = (u32)ih2->dlengsz;
}
- if (sc->wait_time)
- sc->timeout = jiffies + sc->wait_time;
+ sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS);
return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
len, REQTYPE_SOFT_COMMAND));
@@ -789,11 +770,76 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
return 0;
}
+int octeon_free_sc_done_list(struct octeon_device *oct)
+{
+ struct octeon_response_list *done_sc_list, *zombie_sc_list;
+ struct octeon_soft_command *sc;
+ struct list_head *tmp, *tmp2;
+ spinlock_t *sc_lists_lock; /* lock for response_list */
+
+ done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST];
+ zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
+
+ if (!atomic_read(&done_sc_list->pending_req_count))
+ return 0;
+
+ sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
+
+ spin_lock_bh(sc_lists_lock);
+
+ list_for_each_safe(tmp, tmp2, &done_sc_list->head) {
+ sc = list_entry(tmp, struct octeon_soft_command, node);
+
+ if (READ_ONCE(sc->caller_is_done)) {
+ list_del(&sc->node);
+ atomic_dec(&done_sc_list->pending_req_count);
+
+ if (*sc->status_word == COMPLETION_WORD_INIT) {
+ /* timeout; move sc to zombie list */
+ list_add_tail(&sc->node, &zombie_sc_list->head);
+ atomic_inc(&zombie_sc_list->pending_req_count);
+ } else {
+ octeon_free_soft_command(oct, sc);
+ }
+ }
+ }
+
+ spin_unlock_bh(sc_lists_lock);
+
+ return 0;
+}
+
+int octeon_free_sc_zombie_list(struct octeon_device *oct)
+{
+ struct octeon_response_list *zombie_sc_list;
+ struct octeon_soft_command *sc;
+ struct list_head *tmp, *tmp2;
+ spinlock_t *sc_lists_lock; /* lock for response_list */
+
+ zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
+ sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
+
+ spin_lock_bh(sc_lists_lock);
+
+ list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) {
+ list_del(tmp);
+ atomic_dec(&zombie_sc_list->pending_req_count);
+ sc = list_entry(tmp, struct octeon_soft_command, node);
+ octeon_free_soft_command(oct, sc);
+ }
+
+ spin_unlock_bh(sc_lists_lock);
+
+ return 0;
+}
+
int octeon_free_sc_buffer_pool(struct octeon_device *oct)
{
struct list_head *tmp, *tmp2;
struct octeon_soft_command *sc;
+ octeon_free_sc_zombie_list(oct);
+
spin_lock_bh(&oct->sc_buf_pool.lock);
list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
@@ -822,6 +868,9 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc = NULL;
struct list_head *tmp;
+ if (!rdatasize)
+ rdatasize = 16;
+
WARN_ON((offset + datasize + rdatasize + ctxsize) >
SOFT_COMMAND_BUFFER_SIZE);
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index fe5b53700576..ac7747ccf56a 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -69,6 +69,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
u32 status;
u64 status64;
+ octeon_free_sc_done_list(octeon_dev);
+
ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
do {
@@ -111,26 +113,88 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
}
}
}
- } else if (force_quit || (sc->timeout &&
- time_after(jiffies, (unsigned long)sc->timeout))) {
- dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n",
- __func__, (long)jiffies, (long)sc->timeout);
+ } else if (unlikely(force_quit) || (sc->expiry_time &&
+ time_after(jiffies, (unsigned long)sc->expiry_time))) {
+ struct octeon_instr_irh *irh =
+ (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+
+ dev_err(&octeon_dev->pci_dev->dev, "%s: ", __func__);
+ dev_err(&octeon_dev->pci_dev->dev,
+ "cmd %x/%x/%llx/%llx failed, ",
+ irh->opcode, irh->subcode,
+ sc->cmd.cmd3.ossp[0], sc->cmd.cmd3.ossp[1]);
+ dev_err(&octeon_dev->pci_dev->dev,
+ "timeout (%ld, %ld)\n",
+ (long)jiffies, (long)sc->expiry_time);
status = OCTEON_REQUEST_TIMEOUT;
}
if (status != OCTEON_REQUEST_PENDING) {
+ sc->sc_status = status;
+
/* we have received a response or we have timed out */
/* remove node from linked list */
list_del(&sc->node);
atomic_dec(&octeon_dev->response_list
- [OCTEON_ORDERED_SC_LIST].
- pending_req_count);
- spin_unlock_bh
- (&ordered_sc_list->lock);
+ [OCTEON_ORDERED_SC_LIST].
+ pending_req_count);
+
+ if (!sc->callback) {
+ atomic_inc(&octeon_dev->response_list
+ [OCTEON_DONE_SC_LIST].
+ pending_req_count);
+ list_add_tail(&sc->node,
+ &octeon_dev->response_list
+ [OCTEON_DONE_SC_LIST].head);
+
+ if (unlikely(READ_ONCE(sc->caller_is_done))) {
+ /* caller does not wait for response
+ * from firmware
+ */
+ if (status != OCTEON_REQUEST_DONE) {
+ struct octeon_instr_irh *irh;
+
+ irh =
+ (struct octeon_instr_irh *)
+ &sc->cmd.cmd3.irh;
+ dev_dbg
+ (&octeon_dev->pci_dev->dev,
+ "%s: sc failed: opcode=%x, ",
+ __func__, irh->opcode);
+ dev_dbg
+ (&octeon_dev->pci_dev->dev,
+ "subcode=%x, ossp[0]=%llx, ",
+ irh->subcode,
+ sc->cmd.cmd3.ossp[0]);
+ dev_dbg
+ (&octeon_dev->pci_dev->dev,
+ "ossp[1]=%llx, status=%d\n",
+ sc->cmd.cmd3.ossp[1],
+ status);
+ }
+ } else {
+ complete(&sc->complete);
+ }
+
+ spin_unlock_bh(&ordered_sc_list->lock);
+ } else {
+ /* sc with callback function */
+ if (status == OCTEON_REQUEST_TIMEOUT) {
+ atomic_inc(&octeon_dev->response_list
+ [OCTEON_ZOMBIE_SC_LIST].
+ pending_req_count);
+ list_add_tail(&sc->node,
+ &octeon_dev->response_list
+ [OCTEON_ZOMBIE_SC_LIST].
+ head);
+ }
+
+ spin_unlock_bh(&ordered_sc_list->lock);
- if (sc->callback)
sc->callback(octeon_dev, status,
sc->callback_arg);
+ /* sc is freed by caller */
+ }
request_complete++;
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h
index 9169c2815dba..ed4020d26fae 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.h
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h
@@ -53,7 +53,9 @@ enum {
OCTEON_ORDERED_LIST = 0,
OCTEON_UNORDERED_NONBLOCKING_LIST = 1,
OCTEON_UNORDERED_BLOCKING_LIST = 2,
- OCTEON_ORDERED_SC_LIST = 3
+ OCTEON_ORDERED_SC_LIST = 3,
+ OCTEON_DONE_SC_LIST = 4,
+ OCTEON_ZOMBIE_SC_LIST = 5
};
/** Response Order values for a Octeon Request. */
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index bb43ddb7539e..4b3aecf98f2a 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1268,12 +1268,13 @@ static int octeon_mgmt_stop(struct net_device *netdev)
return 0;
}
-static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t
+octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
union mgmt_port_ring_entry re;
unsigned long flags;
- int rv = NETDEV_TX_BUSY;
+ netdev_tx_t rv = NETDEV_TX_BUSY;
re.d64 = 0;
re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index e2cdfa75673f..75c1c5ed2387 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -67,6 +67,7 @@ config CHELSIO_T3
config CHELSIO_T4
tristate "Chelsio Communications T4/T5/T6 Ethernet support"
depends on PCI && (IPV6 || IPV6=n)
+ depends on THERMAL || !THERMAL
select FW_LOADER
select MDIO
select ZLIB_DEFLATE
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index a19172dbe6be..1e82b9efe447 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -33,7 +33,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -2159,6 +2158,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_SET_QSET_PARAMS)
+ return -EINVAL;
if (t.qset_idx >= SGE_QSETS)
return -EINVAL;
if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
@@ -2258,6 +2259,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_GET_QSET_PARAMS)
+ return -EINVAL;
+
/* Display qsets for all ports when offload enabled */
if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
q1 = 0;
@@ -2303,6 +2307,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
+ if (edata.cmd != CHELSIO_SET_QSET_NUM)
+ return -EINVAL;
if (edata.val < 1 ||
(edata.val > 1 && !(adapter->flags & USING_MSIX)))
return -EINVAL;
@@ -2343,6 +2349,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_LOAD_FW)
+ return -EINVAL;
/* Check t.len sanity ? */
fw_data = memdup_user(useraddr + sizeof(t), t.len);
if (IS_ERR(fw_data))
@@ -2366,6 +2374,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
+ if (m.cmd != CHELSIO_SETMTUTAB)
+ return -EINVAL;
if (m.nmtus != NMTUS)
return -EINVAL;
if (m.mtus[0] < 81) /* accommodate SACK */
@@ -2407,6 +2417,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
+ if (m.cmd != CHELSIO_SET_PM)
+ return -EINVAL;
if (!is_power_of_2(m.rx_pg_sz) ||
!is_power_of_2(m.tx_pg_sz))
return -EINVAL; /* not power of 2 */
@@ -2440,6 +2452,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_GET_MEM)
+ return -EINVAL;
if ((t.addr & 7) || (t.len & 7))
return -EINVAL;
if (t.mem_id == MEM_CM)
@@ -2492,6 +2506,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EAGAIN;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_SET_TRACE_FILTER)
+ return -EINVAL;
tp = (const struct trace_params *)&t.sip;
if (t.config_tx)
@@ -3423,8 +3439,7 @@ static void remove_one(struct pci_dev *pdev)
free_netdev(adapter->port[i]);
iounmap(adapter->regs);
- if (adapter->nofail_skb)
- kfree_skb(adapter->nofail_skb);
+ kfree_skb(adapter->nofail_skb);
kfree(adapter);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 50cd660732c5..84604aff53ce 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -1302,8 +1302,7 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
rcu_read_unlock();
RCU_INIT_POINTER(tdev->l2opt, NULL);
call_rcu(&d->rcu_head, clean_l2_data);
- if (t->nofail_skb)
- kfree_skb(t->nofail_skb);
+ kfree_skb(t->nofail_skb);
kfree(t);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index bea6a059a8f1..78e5d17a1d5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -12,3 +12,6 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
+ifdef CONFIG_THERMAL
+cxgb4-objs += cxgb4_thermal.o
+endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 36d25883d123..b2d617abcf49 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -315,6 +315,48 @@ struct cudbg_pbt_tables {
u32 pbt_data[CUDBG_PBT_DATA_ENTRIES];
};
+enum cudbg_qdesc_qtype {
+ CUDBG_QTYPE_UNKNOWN = 0,
+ CUDBG_QTYPE_NIC_TXQ,
+ CUDBG_QTYPE_NIC_RXQ,
+ CUDBG_QTYPE_NIC_FLQ,
+ CUDBG_QTYPE_CTRLQ,
+ CUDBG_QTYPE_FWEVTQ,
+ CUDBG_QTYPE_INTRQ,
+ CUDBG_QTYPE_PTP_TXQ,
+ CUDBG_QTYPE_OFLD_TXQ,
+ CUDBG_QTYPE_RDMA_RXQ,
+ CUDBG_QTYPE_RDMA_FLQ,
+ CUDBG_QTYPE_RDMA_CIQ,
+ CUDBG_QTYPE_ISCSI_RXQ,
+ CUDBG_QTYPE_ISCSI_FLQ,
+ CUDBG_QTYPE_ISCSIT_RXQ,
+ CUDBG_QTYPE_ISCSIT_FLQ,
+ CUDBG_QTYPE_CRYPTO_TXQ,
+ CUDBG_QTYPE_CRYPTO_RXQ,
+ CUDBG_QTYPE_CRYPTO_FLQ,
+ CUDBG_QTYPE_TLS_RXQ,
+ CUDBG_QTYPE_TLS_FLQ,
+ CUDBG_QTYPE_MAX,
+};
+
+#define CUDBG_QDESC_REV 1
+
+struct cudbg_qdesc_entry {
+ u32 data_size;
+ u32 qtype;
+ u32 qid;
+ u32 desc_size;
+ u32 num_desc;
+ u8 data[0]; /* Must be last */
+};
+
+struct cudbg_qdesc_info {
+ u32 qdesc_entry_size;
+ u32 num_queues;
+ u8 data[0]; /* Must be last */
+};
+
#define IREG_NUM_ELEM 4
static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
index 215fe6260fd7..dec63c15c0ba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
@@ -81,7 +81,8 @@ enum cudbg_dbg_entity_type {
CUDBG_MBOX_LOG = 66,
CUDBG_HMA_INDIRECT = 67,
CUDBG_HMA = 68,
- CUDBG_MAX_ENTITY = 70,
+ CUDBG_QDESC = 70,
+ CUDBG_MAX_ENTITY = 71,
};
struct cudbg_init {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index d97e0d7e541a..7c49681407ad 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -19,6 +19,7 @@
#include "t4_regs.h"
#include "cxgb4.h"
+#include "cxgb4_cudbg.h"
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
#include "cudbg_entity.h"
@@ -2890,3 +2891,240 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
}
return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
+
+void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
+ u32 *num, u32 *size)
+{
+ u32 tot_entries = 0, tot_size = 0;
+
+ /* NIC TXQ, RXQ, FLQ, and CTRLQ */
+ tot_entries += MAX_ETH_QSETS * 3;
+ tot_entries += MAX_CTRL_QUEUES;
+
+ tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
+ tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
+ tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE;
+ tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES *
+ MAX_CTRL_TXQ_DESC_SIZE;
+
+ /* FW_EVTQ and INTRQ */
+ tot_entries += INGQ_EXTRAS;
+ tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
+
+ /* PTP_TXQ */
+ tot_entries += 1;
+ tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
+
+ /* ULD TXQ, RXQ, and FLQ */
+ tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS;
+ tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2;
+
+ tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES *
+ MAX_TXQ_DESC_SIZE;
+ tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES *
+ MAX_RXQ_DESC_SIZE;
+ tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS *
+ MAX_FL_DESC_SIZE;
+
+ /* ULD CIQ */
+ tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS;
+ tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
+ MAX_RXQ_DESC_SIZE;
+
+ tot_size += sizeof(struct cudbg_ver_hdr) +
+ sizeof(struct cudbg_qdesc_info) +
+ sizeof(struct cudbg_qdesc_entry) * tot_entries;
+
+ if (num)
+ *num = tot_entries;
+
+ if (size)
+ *size = tot_size;
+}
+
+int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ u32 num_queues = 0, tot_entries = 0, size = 0;
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_qdesc_entry *qdesc_entry;
+ struct cudbg_qdesc_info *qdesc_info;
+ struct cudbg_ver_hdr *ver_hdr;
+ struct sge *s = &padap->sge;
+ u32 i, j, cur_off, tot_len;
+ u8 *data;
+ int rc;
+
+ cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size);
+ size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE);
+ tot_len = size;
+ data = kvzalloc(size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ver_hdr = (struct cudbg_ver_hdr *)data;
+ ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
+ ver_hdr->revision = CUDBG_QDESC_REV;
+ ver_hdr->size = sizeof(struct cudbg_qdesc_info);
+ size -= sizeof(*ver_hdr);
+
+ qdesc_info = (struct cudbg_qdesc_info *)(data +
+ sizeof(*ver_hdr));
+ size -= sizeof(*qdesc_info);
+ qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data;
+
+#define QDESC_GET(q, desc, type, label) do { \
+ if (size <= 0) { \
+ goto label; \
+ } \
+ if (desc) { \
+ cudbg_fill_qdesc_##q(q, type, qdesc_entry); \
+ size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \
+ num_queues++; \
+ qdesc_entry = cudbg_next_qdesc(qdesc_entry); \
+ } \
+} while (0)
+
+#define QDESC_GET_TXQ(q, type, label) do { \
+ struct sge_txq *txq = (struct sge_txq *)q; \
+ QDESC_GET(txq, txq->desc, type, label); \
+} while (0)
+
+#define QDESC_GET_RXQ(q, type, label) do { \
+ struct sge_rspq *rxq = (struct sge_rspq *)q; \
+ QDESC_GET(rxq, rxq->desc, type, label); \
+} while (0)
+
+#define QDESC_GET_FLQ(q, type, label) do { \
+ struct sge_fl *flq = (struct sge_fl *)q; \
+ QDESC_GET(flq, flq->desc, type, label); \
+} while (0)
+
+ /* NIC TXQ */
+ for (i = 0; i < s->ethqsets; i++)
+ QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out);
+
+ /* NIC RXQ */
+ for (i = 0; i < s->ethqsets; i++)
+ QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out);
+
+ /* NIC FLQ */
+ for (i = 0; i < s->ethqsets; i++)
+ QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out);
+
+ /* NIC CTRLQ */
+ for (i = 0; i < padap->params.nports; i++)
+ QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out);
+
+ /* FW_EVTQ */
+ QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out);
+
+ /* INTRQ */
+ QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out);
+
+ /* PTP_TXQ */
+ QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out);
+
+ /* ULD Queues */
+ mutex_lock(&uld_mutex);
+
+ if (s->uld_txq_info) {
+ struct sge_uld_txq_info *utxq;
+
+ /* ULD TXQ */
+ for (j = 0; j < CXGB4_TX_MAX; j++) {
+ if (!s->uld_txq_info[j])
+ continue;
+
+ utxq = s->uld_txq_info[j];
+ for (i = 0; i < utxq->ntxq; i++)
+ QDESC_GET_TXQ(&utxq->uldtxq[i].q,
+ cudbg_uld_txq_to_qtype(j),
+ out_unlock);
+ }
+ }
+
+ if (s->uld_rxq_info) {
+ struct sge_uld_rxq_info *urxq;
+ u32 base;
+
+ /* ULD RXQ */
+ for (j = 0; j < CXGB4_ULD_MAX; j++) {
+ if (!s->uld_rxq_info[j])
+ continue;
+
+ urxq = s->uld_rxq_info[j];
+ for (i = 0; i < urxq->nrxq; i++)
+ QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
+ cudbg_uld_rxq_to_qtype(j),
+ out_unlock);
+ }
+
+ /* ULD FLQ */
+ for (j = 0; j < CXGB4_ULD_MAX; j++) {
+ if (!s->uld_rxq_info[j])
+ continue;
+
+ urxq = s->uld_rxq_info[j];
+ for (i = 0; i < urxq->nrxq; i++)
+ QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
+ cudbg_uld_flq_to_qtype(j),
+ out_unlock);
+ }
+
+ /* ULD CIQ */
+ for (j = 0; j < CXGB4_ULD_MAX; j++) {
+ if (!s->uld_rxq_info[j])
+ continue;
+
+ urxq = s->uld_rxq_info[j];
+ base = urxq->nrxq;
+ for (i = 0; i < urxq->nciq; i++)
+ QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
+ cudbg_uld_ciq_to_qtype(j),
+ out_unlock);
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+
+out:
+ qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
+ qdesc_info->num_queues = num_queues;
+ cur_off = 0;
+ while (tot_len) {
+ u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE);
+
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size,
+ &temp_buff);
+ if (rc) {
+ cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
+ goto out_free;
+ }
+
+ memcpy(temp_buff.data, data + cur_off, chunk_size);
+ tot_len -= chunk_size;
+ cur_off += chunk_size;
+ rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
+ dbg_buff);
+ if (rc) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
+ goto out_free;
+ }
+ }
+
+out_free:
+ if (data)
+ kvfree(data);
+
+#undef QDESC_GET_FLQ
+#undef QDESC_GET_RXQ
+#undef QDESC_GET_TXQ
+#undef QDESC_GET
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
index eebefe7cd18e..f047a01a3e5b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
@@ -171,6 +171,9 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
@@ -182,4 +185,107 @@ int cudbg_fill_meminfo(struct adapter *padap,
struct cudbg_meminfo *meminfo_buff);
void cudbg_fill_le_tcam_info(struct adapter *padap,
struct cudbg_tcam *tcam_region);
+void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
+ u32 *num, u32 *size);
+
+static inline u32 cudbg_uld_txq_to_qtype(u32 uld)
+{
+ switch (uld) {
+ case CXGB4_TX_OFLD:
+ return CUDBG_QTYPE_OFLD_TXQ;
+ case CXGB4_TX_CRYPTO:
+ return CUDBG_QTYPE_CRYPTO_TXQ;
+ }
+
+ return CUDBG_QTYPE_UNKNOWN;
+}
+
+static inline u32 cudbg_uld_rxq_to_qtype(u32 uld)
+{
+ switch (uld) {
+ case CXGB4_ULD_RDMA:
+ return CUDBG_QTYPE_RDMA_RXQ;
+ case CXGB4_ULD_ISCSI:
+ return CUDBG_QTYPE_ISCSI_RXQ;
+ case CXGB4_ULD_ISCSIT:
+ return CUDBG_QTYPE_ISCSIT_RXQ;
+ case CXGB4_ULD_CRYPTO:
+ return CUDBG_QTYPE_CRYPTO_RXQ;
+ case CXGB4_ULD_TLS:
+ return CUDBG_QTYPE_TLS_RXQ;
+ }
+
+ return CUDBG_QTYPE_UNKNOWN;
+}
+
+static inline u32 cudbg_uld_flq_to_qtype(u32 uld)
+{
+ switch (uld) {
+ case CXGB4_ULD_RDMA:
+ return CUDBG_QTYPE_RDMA_FLQ;
+ case CXGB4_ULD_ISCSI:
+ return CUDBG_QTYPE_ISCSI_FLQ;
+ case CXGB4_ULD_ISCSIT:
+ return CUDBG_QTYPE_ISCSIT_FLQ;
+ case CXGB4_ULD_CRYPTO:
+ return CUDBG_QTYPE_CRYPTO_FLQ;
+ case CXGB4_ULD_TLS:
+ return CUDBG_QTYPE_TLS_FLQ;
+ }
+
+ return CUDBG_QTYPE_UNKNOWN;
+}
+
+static inline u32 cudbg_uld_ciq_to_qtype(u32 uld)
+{
+ switch (uld) {
+ case CXGB4_ULD_RDMA:
+ return CUDBG_QTYPE_RDMA_CIQ;
+ }
+
+ return CUDBG_QTYPE_UNKNOWN;
+}
+
+static inline void cudbg_fill_qdesc_txq(const struct sge_txq *txq,
+ enum cudbg_qdesc_qtype type,
+ struct cudbg_qdesc_entry *entry)
+{
+ entry->qtype = type;
+ entry->qid = txq->cntxt_id;
+ entry->desc_size = sizeof(struct tx_desc);
+ entry->num_desc = txq->size;
+ entry->data_size = txq->size * sizeof(struct tx_desc);
+ memcpy(entry->data, txq->desc, entry->data_size);
+}
+
+static inline void cudbg_fill_qdesc_rxq(const struct sge_rspq *rxq,
+ enum cudbg_qdesc_qtype type,
+ struct cudbg_qdesc_entry *entry)
+{
+ entry->qtype = type;
+ entry->qid = rxq->cntxt_id;
+ entry->desc_size = rxq->iqe_len;
+ entry->num_desc = rxq->size;
+ entry->data_size = rxq->size * rxq->iqe_len;
+ memcpy(entry->data, rxq->desc, entry->data_size);
+}
+
+static inline void cudbg_fill_qdesc_flq(const struct sge_fl *flq,
+ enum cudbg_qdesc_qtype type,
+ struct cudbg_qdesc_entry *entry)
+{
+ entry->qtype = type;
+ entry->qid = flq->cntxt_id;
+ entry->desc_size = sizeof(__be64);
+ entry->num_desc = flq->size;
+ entry->data_size = flq->size * sizeof(__be64);
+ memcpy(entry->data, flq->desc, entry->data_size);
+}
+
+static inline
+struct cudbg_qdesc_entry *cudbg_next_qdesc(struct cudbg_qdesc_entry *e)
+{
+ return (struct cudbg_qdesc_entry *)
+ ((u8 *)e + sizeof(*e) + e->data_size);
+}
#endif /* __CUDBG_LIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 76d16747f513..b16f4b3ef4c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -52,6 +52,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h>
#include <linux/crash_dump.h>
+#include <linux/thermal.h>
#include <asm/io.h>
#include "t4_chip_type.h"
#include "cxgb4_uld.h"
@@ -533,6 +534,13 @@ enum {
};
enum {
+ MAX_TXQ_DESC_SIZE = 64,
+ MAX_RXQ_DESC_SIZE = 128,
+ MAX_FL_DESC_SIZE = 8,
+ MAX_CTRL_TXQ_DESC_SIZE = 64,
+};
+
+enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
@@ -685,6 +693,7 @@ struct sge_eth_stats { /* Ethernet queue statistics */
unsigned long rx_cso; /* # of Rx checksum offloads */
unsigned long vlan_ex; /* # of Rx VLAN extractions */
unsigned long rx_drops; /* # of packets dropped due to no mem */
+ unsigned long bad_rx_pkts; /* # of packets with err_vec!=0 */
};
struct sge_eth_rxq { /* SW Ethernet Rx queue */
@@ -882,6 +891,14 @@ struct mps_encap_entry {
atomic_t refcnt;
};
+#if IS_ENABLED(CONFIG_THERMAL)
+struct ch_thermal {
+ struct thermal_zone_device *tzdev;
+ int trip_temp;
+ int trip_type;
+};
+#endif
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -1000,6 +1017,9 @@ struct adapter {
/* Dump buffer for collecting logs in kdump kernel */
struct vmcoredd_data vmcoredd;
+#if IS_ENABLED(CONFIG_THERMAL)
+ struct ch_thermal ch_thermal;
+#endif
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1854,4 +1874,8 @@ void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
int cxgb4_dcb_enabled(const struct net_device *dev);
+
+int cxgb4_thermal_init(struct adapter *adap);
+int cxgb4_thermal_remove(struct adapter *adap);
+
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 5f01c0a7fd98..972f0a124714 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -30,6 +30,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_MBOX_LOG, cudbg_collect_mbox_log },
+ { CUDBG_QDESC, cudbg_collect_qdesc },
{ CUDBG_DEV_LOG, cudbg_collect_fw_devlog },
{ CUDBG_REG_DUMP, cudbg_collect_reg_dump },
{ CUDBG_CIM_LA, cudbg_collect_cim_la },
@@ -311,6 +312,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
}
len = cudbg_mbytes_to_bytes(len);
break;
+ case CUDBG_QDESC:
+ cudbg_fill_qdesc_num_and_size(adap, NULL, &len);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index b34f0f077a31..9bd5f755a0e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -114,6 +114,24 @@ void cxgb4_dcb_reset(struct net_device *dev)
cxgb4_dcb_state_init(dev);
}
+/* update the dcb port support, if version is IEEE then set it to
+ * FW_PORT_DCB_VER_IEEE and if DCB_CAP_DCBX_VER_CEE is already set then
+ * clear that. and if it is set to CEE then set dcb supported to
+ * DCB_CAP_DCBX_VER_CEE & if DCB_CAP_DCBX_VER_IEEE is set, clear it
+ */
+static inline void cxgb4_dcb_update_support(struct port_dcb_info *dcb)
+{
+ if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+ if (dcb->supported & DCB_CAP_DCBX_VER_CEE)
+ dcb->supported &= ~DCB_CAP_DCBX_VER_CEE;
+ dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
+ } else if (dcb->dcb_version == FW_PORT_DCB_VER_CEE1D01) {
+ if (dcb->supported & DCB_CAP_DCBX_VER_IEEE)
+ dcb->supported &= ~DCB_CAP_DCBX_VER_IEEE;
+ dcb->supported |= DCB_CAP_DCBX_VER_CEE;
+ }
+}
+
/* Finite State machine for Data Center Bridging.
*/
void cxgb4_dcb_state_fsm(struct net_device *dev,
@@ -165,6 +183,15 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
}
case CXGB4_DCB_STATE_FW_INCOMPLETE: {
+ if (transition_to != CXGB4_DCB_INPUT_FW_DISABLED) {
+ /* during this CXGB4_DCB_STATE_FW_INCOMPLETE state,
+ * check if the dcb version is changed (there can be
+ * mismatch in default config & the negotiated switch
+ * configuration at FW, so update the dcb support
+ * accordingly.
+ */
+ cxgb4_dcb_update_support(dcb);
+ }
switch (transition_to) {
case CXGB4_DCB_INPUT_FW_ENABLED: {
/* we're alreaady in firmware DCB mode */
@@ -273,8 +300,8 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
enum cxgb4_dcb_state_input input =
((pcmd->u.dcb.control.all_syncd_pkd &
FW_PORT_CMD_ALL_SYNCD_F)
- ? CXGB4_DCB_STATE_FW_ALLSYNCED
- : CXGB4_DCB_STATE_FW_INCOMPLETE);
+ ? CXGB4_DCB_INPUT_FW_ALLSYNCED
+ : CXGB4_DCB_INPUT_FW_INCOMPLETE);
if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) {
dcb_running_version = FW_PORT_CMD_DCB_VERSION_G(
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index 02040b99c78a..484ee8290090 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -67,7 +67,7 @@
do { \
if ((__dcb)->dcb_version == FW_PORT_DCB_VER_IEEE) \
cxgb4_dcb_state_fsm((__dev), \
- CXGB4_DCB_STATE_FW_ALLSYNCED); \
+ CXGB4_DCB_INPUT_FW_ALLSYNCED); \
} while (0)
/* States we can be in for a port's Data Center Bridging.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0f72f9c4ec74..cab492ec8f59 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2784,6 +2784,7 @@ do { \
RL("LROmerged:", stats.lro_merged);
RL("LROpackets:", stats.lro_pkts);
RL("RxDrops:", stats.rx_drops);
+ RL("RxBadPkts:", stats.bad_rx_pkts);
TL("TSO:", tso);
TL("TxCSO:", tx_cso);
TL("VLANins:", vlan_ins);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 961e3087d1d3..05a46926016a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -62,7 +62,6 @@
#include <net/netevent.h>
#include <net/addrconf.h>
#include <net/bonding.h>
-#include <net/addrconf.h>
#include <linux/uaccess.h>
#include <linux/crash_dump.h>
#include <net/udp_tunnel.h>
@@ -2749,6 +2748,27 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
return -EINVAL;
}
+ if (max_tx_rate == 0) {
+ /* unbind VF to to any Traffic Class */
+ fw_pfvf =
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
+ fw_class = 0xffffffff;
+ ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
+ &fw_pfvf, &fw_class);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
+ ret, adap->pf, vf);
+ return -EINVAL;
+ }
+ dev_info(adap->pdev_dev,
+ "PF %d VF %d is unbound from TX Rate Limiting\n",
+ adap->pf, vf);
+ adap->vfinfo[vf].tx_rate = 0;
+ return 0;
+ }
+
ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
if (ret != FW_SUCCESS) {
dev_err(adap->pdev_dev,
@@ -2798,8 +2818,8 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
&fw_class);
if (ret) {
dev_err(adap->pdev_dev,
- "Err %d in binding VF %d to Traffic Class %d\n",
- ret, vf, class_id);
+ "Err %d in binding PF %d VF %d to Traffic Class %d\n",
+ ret, adap->pf, vf, class_id);
return -EINVAL;
}
dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
@@ -4747,7 +4767,6 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
if (t4_wait_dev_ready(adap->regs) < 0)
return PCI_ERS_RESULT_DISCONNECT;
@@ -5844,6 +5863,10 @@ fw_attach_fail:
if (!is_t4(adapter->params.chip))
cxgb4_ptp_init(adapter);
+ if (IS_ENABLED(CONFIG_THERMAL) &&
+ !is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
+ cxgb4_thermal_init(adapter);
+
print_adapter_info(adapter);
return 0;
@@ -5909,6 +5932,8 @@ static void remove_one(struct pci_dev *pdev)
if (!is_t4(adapter->params.chip))
cxgb4_ptp_stop(adapter);
+ if (IS_ENABLED(CONFIG_THERMAL))
+ cxgb4_thermal_remove(adapter);
/* If we allocated filters, free up state associated with any
* valid filters ...
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
new file mode 100644
index 000000000000..28052e7504e5
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2017 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Written by: Ganesh Goudar (ganeshgr@chelsio.com)
+ */
+
+#include "cxgb4.h"
+
+#define CXGB4_NUM_TRIPS 1
+
+static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
+ int *temp)
+{
+ struct adapter *adap = tzdev->devdata;
+ u32 param, val;
+ int ret;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
+ FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP));
+
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret < 0 || val == 0)
+ return -1;
+
+ *temp = val * 1000;
+ return 0;
+}
+
+static int cxgb4_thermal_get_trip_type(struct thermal_zone_device *tzdev,
+ int trip, enum thermal_trip_type *type)
+{
+ struct adapter *adap = tzdev->devdata;
+
+ if (!adap->ch_thermal.trip_temp)
+ return -EINVAL;
+
+ *type = adap->ch_thermal.trip_type;
+ return 0;
+}
+
+static int cxgb4_thermal_get_trip_temp(struct thermal_zone_device *tzdev,
+ int trip, int *temp)
+{
+ struct adapter *adap = tzdev->devdata;
+
+ if (!adap->ch_thermal.trip_temp)
+ return -EINVAL;
+
+ *temp = adap->ch_thermal.trip_temp;
+ return 0;
+}
+
+static struct thermal_zone_device_ops cxgb4_thermal_ops = {
+ .get_temp = cxgb4_thermal_get_temp,
+ .get_trip_type = cxgb4_thermal_get_trip_type,
+ .get_trip_temp = cxgb4_thermal_get_trip_temp,
+};
+
+int cxgb4_thermal_init(struct adapter *adap)
+{
+ struct ch_thermal *ch_thermal = &adap->ch_thermal;
+ int num_trip = CXGB4_NUM_TRIPS;
+ u32 param, val;
+ int ret;
+
+ /* on older firmwares we may not get the trip temperature,
+ * set the num of trips to 0.
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
+ FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_MAXTMPTHRESH));
+
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret < 0) {
+ num_trip = 0; /* could not get trip temperature */
+ } else {
+ ch_thermal->trip_temp = val * 1000;
+ ch_thermal->trip_type = THERMAL_TRIP_CRITICAL;
+ }
+
+ ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip,
+ 0, adap,
+ &cxgb4_thermal_ops,
+ NULL, 0, 0);
+ if (IS_ERR(ch_thermal->tzdev)) {
+ ret = PTR_ERR(ch_thermal->tzdev);
+ dev_err(adap->pdev_dev, "Failed to register thermal zone\n");
+ ch_thermal->tzdev = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+int cxgb4_thermal_remove(struct adapter *adap)
+{
+ if (adap->ch_thermal.tzdev)
+ thermal_zone_device_unregister(adap->ch_thermal.tzdev);
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 4bc211093c98..9a6065a3fa46 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -520,10 +520,20 @@ setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
if (!txq_info)
return -ENOMEM;
+ if (uld_type == CXGB4_ULD_CRYPTO) {
+ i = min_t(int, adap->vres.ncrypto_fc,
+ num_online_cpus());
+ txq_info->ntxq = rounddown(i, adap->params.nports);
+ if (txq_info->ntxq <= 0) {
+ dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
+ kfree(txq_info);
+ return -EINVAL;
+ }
- i = min_t(int, uld_info->ntxq, num_online_cpus());
- txq_info->ntxq = roundup(i, adap->params.nports);
-
+ } else {
+ i = min_t(int, uld_info->ntxq, num_online_cpus());
+ txq_info->ntxq = roundup(i, adap->params.nports);
+ }
txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
GFP_KERNEL);
if (!txq_info->uldtxq) {
@@ -546,11 +556,14 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
struct cxgb4_lld_info *lli)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int tx_uld_type = TX_ULD(uld_type);
+ struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
lli->rxq_ids = rxq_info->rspq_id;
lli->nrxq = rxq_info->nrxq;
lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
lli->nciq = rxq_info->nciq;
+ lli->ntxq = txq_info->ntxq;
}
int t4_uld_mem_alloc(struct adapter *adap)
@@ -634,7 +647,6 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->ports = adap->port;
lld->vr = &adap->vres;
lld->mtus = adap->params.mtus;
- lld->ntxq = adap->sge.ofldqsets;
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
@@ -702,15 +714,14 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
* about any presently available devices that support its type. Returns
* %-EBUSY if a ULD of the same type is already registered.
*/
-int cxgb4_register_uld(enum cxgb4_uld type,
- const struct cxgb4_uld_info *p)
+void cxgb4_register_uld(enum cxgb4_uld type,
+ const struct cxgb4_uld_info *p)
{
int ret = 0;
- unsigned int adap_idx = 0;
struct adapter *adap;
if (type >= CXGB4_ULD_MAX)
- return -EINVAL;
+ return;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) {
@@ -733,52 +744,29 @@ int cxgb4_register_uld(enum cxgb4_uld type,
}
if (adap->flags & FULL_INIT_DONE)
enable_rx_uld(adap, type);
- if (adap->uld[type].add) {
- ret = -EBUSY;
+ if (adap->uld[type].add)
goto free_irq;
- }
ret = setup_sge_txq_uld(adap, type, p);
if (ret)
goto free_irq;
adap->uld[type] = *p;
uld_attach(adap, type);
- adap_idx++;
- }
- mutex_unlock(&uld_mutex);
- return 0;
-
+ continue;
free_irq:
- if (adap->flags & FULL_INIT_DONE)
- quiesce_rx_uld(adap, type);
- if (adap->flags & USING_MSIX)
- free_msix_queue_irqs_uld(adap, type);
-free_rxq:
- free_sge_queues_uld(adap, type);
-free_queues:
- free_queues_uld(adap, type);
-out:
-
- list_for_each_entry(adap, &adapter_list, list_node) {
- if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
- (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
- continue;
- if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
- continue;
- if (!adap_idx)
- break;
- adap->uld[type].handle = NULL;
- adap->uld[type].add = NULL;
- release_sge_txq_uld(adap, type);
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
+free_rxq:
free_sge_queues_uld(adap, type);
+free_queues:
free_queues_uld(adap, type);
- adap_idx--;
+out:
+ dev_warn(adap->pdev_dev,
+ "ULD registration failed for uld type %d\n", type);
}
mutex_unlock(&uld_mutex);
- return ret;
+ return;
}
EXPORT_SYMBOL(cxgb4_register_uld);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index de9ad311dacd..5fa9a2d5fc4b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -384,7 +384,7 @@ struct cxgb4_uld_info {
int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
};
-int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
+void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 301c4df8a566..99022c0898b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -433,10 +433,12 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
else
lport = netdev2pinfo(physdev)->lport;
- if (is_vlan_dev(neigh->dev))
+ if (is_vlan_dev(neigh->dev)) {
vlan = vlan_dev_vlan_id(neigh->dev);
- else
+ vlan |= vlan_dev_get_egress_qos_mask(neigh->dev, priority);
+ } else {
vlan = VLAN_NONE;
+ }
write_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 7fc656680299..52edb688942b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -38,7 +38,6 @@
#include "cxgb4.h"
#include "sched.h"
-/* Spinlock must be held by caller */
static int t4_sched_class_fw_cmd(struct port_info *pi,
struct ch_sched_params *p,
enum sched_fw_ops op)
@@ -67,7 +66,6 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
return err;
}
-/* Spinlock must be held by caller */
static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
enum sched_bind_type type, bool bind)
{
@@ -163,7 +161,6 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
if (e && index >= 0) {
int i = 0;
- spin_lock(&e->lock);
list_for_each_entry(qe, &e->queue_list, list) {
if (i == index)
break;
@@ -171,10 +168,8 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
}
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
false);
- if (err) {
- spin_unlock(&e->lock);
- goto out;
- }
+ if (err)
+ return err;
list_del(&qe->list);
kvfree(qe);
@@ -182,9 +177,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
}
- spin_unlock(&e->lock);
}
-out:
return err;
}
@@ -210,10 +203,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
/* Unbind queue from any existing class */
err = t4_sched_queue_unbind(pi, p);
- if (err) {
- kvfree(qe);
- goto out;
- }
+ if (err)
+ goto out_err;
/* Bind queue to specified class */
memset(qe, 0, sizeof(*qe));
@@ -221,18 +212,16 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
memcpy(&qe->param, p, sizeof(qe->param));
e = &s->tab[qe->param.class];
- spin_lock(&e->lock);
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
- if (err) {
- kvfree(qe);
- spin_unlock(&e->lock);
- goto out;
- }
+ if (err)
+ goto out_err;
list_add_tail(&qe->list, &e->queue_list);
atomic_inc(&e->refcnt);
- spin_unlock(&e->lock);
-out:
+ return err;
+
+out_err:
+ kvfree(qe);
return err;
}
@@ -296,8 +285,6 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type)
{
struct port_info *pi = netdev2pinfo(dev);
- struct sched_table *s;
- int err = 0;
u8 class_id;
if (!can_sched(dev))
@@ -323,12 +310,8 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
if (class_id == SCHED_CLS_NONE)
return -ENOTSUPP;
- s = pi->sched_tbl;
- write_lock(&s->rw_lock);
- err = t4_sched_class_bind_unbind_op(pi, arg, type, true);
- write_unlock(&s->rw_lock);
+ return t4_sched_class_bind_unbind_op(pi, arg, type, true);
- return err;
}
/**
@@ -343,8 +326,6 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
enum sched_bind_type type)
{
struct port_info *pi = netdev2pinfo(dev);
- struct sched_table *s;
- int err = 0;
u8 class_id;
if (!can_sched(dev))
@@ -367,12 +348,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
if (!valid_class_id(dev, class_id))
return -EINVAL;
- s = pi->sched_tbl;
- write_lock(&s->rw_lock);
- err = t4_sched_class_bind_unbind_op(pi, arg, type, false);
- write_unlock(&s->rw_lock);
-
- return err;
+ return t4_sched_class_bind_unbind_op(pi, arg, type, false);
}
/* If @p is NULL, fetch any available unused class */
@@ -425,7 +401,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
struct ch_sched_params *p)
{
- struct sched_table *s = pi->sched_tbl;
struct sched_class *e;
u8 class_id;
int err;
@@ -441,7 +416,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
if (class_id != SCHED_CLS_NONE)
return NULL;
- write_lock(&s->rw_lock);
/* See if there's an exisiting class with same
* requested sched params
*/
@@ -452,27 +426,19 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
/* Fetch any available unused class */
e = t4_sched_class_lookup(pi, NULL);
if (!e)
- goto out;
+ return NULL;
memcpy(&np, p, sizeof(np));
np.u.params.class = e->idx;
-
- spin_lock(&e->lock);
/* New class */
err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
- if (err) {
- spin_unlock(&e->lock);
- e = NULL;
- goto out;
- }
+ if (err)
+ return NULL;
memcpy(&e->info, &np, sizeof(e->info));
atomic_set(&e->refcnt, 0);
e->state = SCHED_STATE_ACTIVE;
- spin_unlock(&e->lock);
}
-out:
- write_unlock(&s->rw_lock);
return e;
}
@@ -517,14 +483,12 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
return NULL;
s->sched_size = sched_size;
- rwlock_init(&s->rw_lock);
for (i = 0; i < s->sched_size; i++) {
memset(&s->tab[i], 0, sizeof(struct sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
INIT_LIST_HEAD(&s->tab[i].queue_list);
- spin_lock_init(&s->tab[i].lock);
atomic_set(&s->tab[i].refcnt, 0);
}
return s;
@@ -545,11 +509,9 @@ void t4_cleanup_sched(struct adapter *adap)
for (i = 0; i < s->sched_size; i++) {
struct sched_class *e;
- write_lock(&s->rw_lock);
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
t4_sched_class_free(pi, e);
- write_unlock(&s->rw_lock);
}
kvfree(s);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 3a49e00a38a1..168fb4ce3759 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -69,13 +69,11 @@ struct sched_class {
u8 idx;
struct ch_sched_params info;
struct list_head queue_list;
- spinlock_t lock; /* Per class lock */
atomic_t refcnt;
};
struct sched_table { /* per port scheduling table */
u8 sched_size;
- rwlock_t rw_lock; /* Table lock */
struct sched_class tab[0];
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6807bc3a44fb..b90188401d4a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2830,6 +2830,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
csum_ok = pkt->csum_calc && !err_vec &&
(q->netdev->features & NETIF_F_RXCSUM);
+
+ if (err_vec)
+ rxq->stats.bad_rx_pkts++;
+
if (((pkt->l2info & htonl(RXF_TCP_F)) ||
tnl_hdr_len) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 5fe5d16dee72..cb523949c812 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3889,7 +3889,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
c.param[0].mnem =
cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
- c.param[0].val = (__force __be32)op;
+ c.param[0].val = cpu_to_be32(op);
return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
}
@@ -4204,6 +4204,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
*/
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
{
+ unsigned int fw_caps = adap->params.fw_caps_support;
struct fw_port_cmd c;
memset(&c, 0, sizeof(c));
@@ -4211,9 +4212,14 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
FW_PORT_CMD_PORTID_V(port));
c.action_to_len16 =
- cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
+ ? FW_PORT_ACTION_L1_CFG
+ : FW_PORT_ACTION_L1_CFG32) |
FW_LEN16(c));
- c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG);
+ if (fw_caps == FW_CAPS16)
+ c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
+ else
+ c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -10209,7 +10215,9 @@ int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
FW_ACL_VLAN_CMD_VFN_V(vf));
vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
/* Drop all packets that donot match vlan id */
- vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F;
+ vlan_cmd.dropnovlan_fm = (enable
+ ? (FW_ACL_VLAN_CMD_DROPNOVLAN_F |
+ FW_ACL_VLAN_CMD_FM_F) : 0);
if (enable != 0) {
vlan_cmd.nvlan = 1;
vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index b8f75a22fb6c..f152da1ce046 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -753,7 +753,6 @@ struct cpl_abort_req_rss {
};
struct cpl_abort_req_rss6 {
- WR_HDR;
union opcode_tid ot;
__be32 srqidx_status;
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 5dc6c4154af8..57584ab32043 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1332,6 +1332,7 @@ enum fw_params_param_dev_phyfw {
enum fw_params_param_dev_diag {
FW_PARAM_DEV_DIAG_TMP = 0x00,
FW_PARAM_DEV_DIAG_VDD = 0x01,
+ FW_PARAM_DEV_DIAG_MAXTMPTHRESH = 0x02,
};
enum fw_params_param_dev_fwcache {
@@ -2464,6 +2465,7 @@ struct fw_acl_vlan_cmd {
#define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7
#define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S)
+#define FW_ACL_VLAN_CMD_DROPNOVLAN_F FW_ACL_VLAN_CMD_DROPNOVLAN_V(1U)
#define FW_ACL_VLAN_CMD_FM_S 6
#define FW_ACL_VLAN_CMD_FM_M 0x1
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index e2a702996db4..13dfdfca49fc 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget)
return rx;
}
-static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ep93xx_priv *ep = netdev_priv(dev);
struct ep93xx_tdesc *txd;
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 3f8fe8fd79cc..6324e80960c3 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -113,7 +113,7 @@ struct net_local {
/* Index to functions, as function prototypes. */
static int net_open(struct net_device *dev);
-static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t net_interrupt(int irq, void *dev_id);
static void set_multicast_list(struct net_device *dev);
static void net_rx(struct net_device *dev);
@@ -324,7 +324,7 @@ net_open(struct net_device *dev)
return 0;
}
-static int
+static netdev_tx_t
net_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 1c9ad3630c77..ceec467f590d 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -372,9 +372,8 @@ static int gmac_setup_phy(struct net_device *netdev)
return -ENODEV;
netdev->phydev = phy;
- phy->supported &= PHY_GBIT_FEATURES;
- phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
- phy->advertising = phy->supported;
+ phy_set_max_speed(phy, SPEED_1000);
+ phy_support_asym_pause(phy);
/* set PHY interface type */
switch (phy->interface) {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 50222b7b81f3..0a82fcf16d35 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1722,8 +1722,7 @@ out:
static int
dm9000_drv_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct board_info *db;
if (ndev) {
@@ -1745,8 +1744,7 @@ dm9000_drv_suspend(struct device *dev)
static int
dm9000_drv_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct board_info *db = netdev_priv(ndev);
if (ndev) {
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 5a847941c46b..79521e27f0d1 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -284,13 +284,11 @@ static int dnet_mii_probe(struct net_device *dev)
/* mask with MAC supported features */
if (bp->capabilities & DNET_HAS_GIGABIT)
- phydev->supported &= PHY_GBIT_FEATURES;
+ phy_set_max_speed(phydev, SPEED_1000);
else
- phydev->supported &= PHY_BASIC_FEATURES;
+ phy_set_max_speed(phydev, SPEED_100);
- phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
-
- phydev->advertising = phydev->supported;
+ phy_support_asym_pause(phydev);
bp->link = 0;
bp->speed = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 58bcee8f0a58..ce041c90adb0 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -185,6 +185,7 @@ static inline void queue_tail_inc(struct be_queue_info *q)
struct be_eq_obj {
struct be_queue_info q;
+ char desc[32];
struct be_adapter *adapter;
struct napi_struct napi;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 74d122616e76..c5ad7a4f4d83 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3488,11 +3488,9 @@ static int be_msix_register(struct be_adapter *adapter)
int status, i, vec;
for_all_evt_queues(adapter, eqo, i) {
- char irq_name[IFNAMSIZ+4];
-
- snprintf(irq_name, sizeof(irq_name), "%s-q%d", netdev->name, i);
+ sprintf(eqo->desc, "%s-q%d", netdev->name, i);
vec = be_msix_vec_get(adapter, eqo);
- status = request_irq(vec, be_msix, 0, irq_name, eqo);
+ status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
if (status)
goto err_msix;
@@ -4002,8 +4000,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL;
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
be16_to_cpu(port));
@@ -4025,8 +4021,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
adapter->vxlan_port = 0;
netdev->hw_enc_features = 0;
- netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
- netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
}
static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
@@ -5320,6 +5314,7 @@ static void be_netdev_init(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX;
if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
@@ -6151,7 +6146,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
if (status)
return PCI_ERS_RESULT_DISCONNECT;
- pci_cleanup_aer_uncorrect_error_status(pdev);
be_clear_error(adapter, BE_CLEAR_ALL);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 60da0499ad66..0f3e7f21c6fa 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -721,10 +721,7 @@ static int ethoc_mdio_probe(struct net_device *dev)
return err;
}
- phy->advertising &= ~(ADVERTISED_1000baseT_Full |
- ADVERTISED_1000baseT_Half);
- phy->supported &= ~(SUPPORTED_1000baseT_Full |
- SUPPORTED_1000baseT_Half);
+ phy_set_max_speed(phy, SPEED_100);
return 0;
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index ed6c76d20b45..4d673225ed3e 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -712,8 +712,8 @@ static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan)
return skb_checksum_help(skb) == 0;
}
-static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
struct ftgmac100_txdes *txdes, *first;
@@ -1079,8 +1079,7 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf)
/* Indicate that we support PAUSE frames (see comment in
* Documentation/networking/phy.txt)
*/
- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phydev->advertising = phydev->supported;
+ phy_support_asym_pause(phydev);
/* Display what we found */
phy_attached_info(phydev);
@@ -1220,22 +1219,11 @@ static int ftgmac100_set_pauseparam(struct net_device *netdev,
priv->tx_pause = pause->tx_pause;
priv->rx_pause = pause->rx_pause;
- if (phydev) {
- phydev->advertising &= ~ADVERTISED_Pause;
- phydev->advertising &= ~ADVERTISED_Asym_Pause;
+ if (phydev)
+ phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
- if (pause->rx_pause) {
- phydev->advertising |= ADVERTISED_Pause;
- phydev->advertising |= ADVERTISED_Asym_Pause;
- }
-
- if (pause->tx_pause)
- phydev->advertising ^= ADVERTISED_Asym_Pause;
- }
if (netif_running(netdev)) {
- if (phydev && priv->aneg_pause)
- phy_start_aneg(phydev);
- else
+ if (!(phydev && priv->aneg_pause))
ftgmac100_config_pause(priv);
}
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index a1197d3adbe0..570caeb8ee9e 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -634,8 +634,8 @@ static void ftmac100_tx_complete(struct ftmac100 *priv)
;
}
-static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb,
- dma_addr_t map)
+static netdev_tx_t ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb,
+ dma_addr_t map)
{
struct net_device *netdev = priv->netdev;
struct ftmac100_txdes *txdes;
@@ -1016,7 +1016,8 @@ static int ftmac100_stop(struct net_device *netdev)
return 0;
}
-static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t
+ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ftmac100 *priv = netdev_priv(netdev);
dma_addr_t map;
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index a580a3dcbe59..d3a62bc1f1c6 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -96,5 +96,6 @@ config GIANFAR
on the 8540.
source "drivers/net/ethernet/freescale/dpaa/Kconfig"
+source "drivers/net/ethernet/freescale/dpaa2/Kconfig"
endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 0914a3ea4405..3b4ff08e3841 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -21,3 +21,5 @@ ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
obj-$(CONFIG_FSL_FMAN) += fman/
obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
+
+obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 65a22cd9aef2..6e0f47f2c8a3 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1280,7 +1280,7 @@ static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
err = bman_release(dpaa_bp->pool, bmb, cnt);
/* Should never occur, address anyway to avoid leaking the buffers */
- if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb)
+ if (WARN_ON(err) && dpaa_bp->free_buf_cb)
while (cnt-- > 0)
dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
@@ -1704,10 +1704,8 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
skb = build_skb(vaddr, dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- if (unlikely(!skb)) {
- WARN_ONCE(1, "Build skb failure on Rx\n");
+ if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
goto free_buffer;
- }
WARN_ON(fd_off != priv->rx_headroom);
skb_reserve(skb, fd_off);
skb_put(skb, qm_fd_get_length(fd));
@@ -1770,7 +1768,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
sz = dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
skb = build_skb(sg_vaddr, sz);
- if (WARN_ON(unlikely(!skb)))
+ if (WARN_ON(!skb))
goto free_buffers;
skb->ip_summed = rx_csum_offload(priv, fd);
@@ -2046,7 +2044,8 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
return 0;
}
-static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+static netdev_tx_t
+dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{
const int queue_mapping = skb_get_queue_mapping(skb);
bool nonlinear = skb_is_nonlinear(skb);
@@ -2493,8 +2492,7 @@ static int dpaa_phy_init(struct net_device *net_dev)
/* Remove any features not supported by the controller */
phy_dev->supported &= mac_dev->if_support;
- phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
- phy_dev->advertising = phy_dev->supported;
+ phy_support_asym_pause(phy_dev);
mac_dev->phy_dev = phy_dev;
net_dev->phydev = phy_dev;
@@ -2733,8 +2731,6 @@ out_error:
return err;
}
-static const struct of_device_id dpaa_match[];
-
static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
{
u16 headroom;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 3184c8f7cdd0..13d6e2272ece 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -182,7 +182,6 @@ static int dpaa_set_pauseparam(struct net_device *net_dev,
struct phy_device *phydev;
bool rx_pause, tx_pause;
struct dpaa_priv *priv;
- u32 newadv, oldadv;
int err;
priv = netdev_priv(net_dev);
@@ -194,9 +193,7 @@ static int dpaa_set_pauseparam(struct net_device *net_dev,
return -ENODEV;
}
- if (!(phydev->supported & SUPPORTED_Pause) ||
- (!(phydev->supported & SUPPORTED_Asym_Pause) &&
- (epause->rx_pause != epause->tx_pause)))
+ if (!phy_validate_pause(phydev, epause))
return -EINVAL;
/* The MAC should know how to handle PAUSE frame autonegotiation before
@@ -210,29 +207,8 @@ static int dpaa_set_pauseparam(struct net_device *net_dev,
/* Determine the sym/asym advertised PAUSE capabilities from the desired
* rx/tx pause settings.
*/
- newadv = 0;
- if (epause->rx_pause)
- newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
- if (epause->tx_pause)
- newadv ^= ADVERTISED_Asym_Pause;
- oldadv = phydev->advertising &
- (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
-
- /* If there are differences between the old and the new advertised
- * values, restart PHY autonegotiation and advertise the new values.
- */
- if (oldadv != newadv) {
- phydev->advertising &= ~(ADVERTISED_Pause
- | ADVERTISED_Asym_Pause);
- phydev->advertising |= newadv;
- if (phydev->autoneg) {
- err = phy_start_aneg(phydev);
- if (err < 0)
- netdev_err(net_dev, "phy_start_aneg() = %d\n",
- err);
- }
- }
+ phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
new file mode 100644
index 000000000000..809a155eb193
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -0,0 +1,16 @@
+config FSL_DPAA2_ETH
+ tristate "Freescale DPAA2 Ethernet"
+ depends on FSL_MC_BUS && FSL_MC_DPIO
+ help
+ This is the DPAA2 Ethernet driver supporting Freescale SoCs
+ with DPAA2 (DataPath Acceleration Architecture v2).
+ The driver manages network objects discovered on the Freescale
+ MC bus.
+
+config FSL_DPAA2_PTP_CLOCK
+ tristate "Freescale DPAA2 PTP Clock"
+ depends on FSL_DPAA2_ETH && POSIX_TIMERS
+ select PTP_1588_CLOCK
+ help
+ This driver adds support for using the DPAA2 1588 timer module
+ as a PTP clock.
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
new file mode 100644
index 000000000000..2f424e0a8225
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Freescale DPAA2 Ethernet controller
+#
+
+obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
+obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
+
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
+
+# Needed by the tracing framework
+CFLAGS_dpaa2-eth.o := -I$(src)
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
index 9801528db2a5..9801528db2a5 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 9329fcad95ac..88f7acce38dc 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -98,8 +98,7 @@ free_buf:
}
/* Build a linear skb based on a single-buffer frame descriptor */
-static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
+static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
void *fd_vaddr)
{
@@ -233,7 +232,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
if (fd_format == dpaa2_fd_single) {
- skb = build_linear_skb(priv, ch, fd, vaddr);
+ skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
skb = build_frag_skb(priv, ch, buf_data);
skb_free_frag(vaddr);
@@ -289,10 +288,11 @@ err_frame_format:
*
* Observance of NAPI budget is not our concern, leaving that to the caller.
*/
-static int consume_frames(struct dpaa2_eth_channel *ch)
+static int consume_frames(struct dpaa2_eth_channel *ch,
+ enum dpaa2_eth_fq_type *type)
{
struct dpaa2_eth_priv *priv = ch->priv;
- struct dpaa2_eth_fq *fq;
+ struct dpaa2_eth_fq *fq = NULL;
struct dpaa2_dq *dq;
const struct dpaa2_fd *fd;
int cleaned = 0;
@@ -311,12 +311,23 @@ static int consume_frames(struct dpaa2_eth_channel *ch)
fd = dpaa2_dq_fd(dq);
fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
- fq->stats.frames++;
fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
cleaned++;
} while (!is_last);
+ if (!cleaned)
+ return 0;
+
+ fq->stats.frames += cleaned;
+ ch->stats.frames += cleaned;
+
+ /* A dequeue operation only pulls frames from a single queue
+ * into the store. Return the frame queue type as an out param.
+ */
+ if (type)
+ *type = fq->type;
+
return cleaned;
}
@@ -426,7 +437,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, addr);
dpaa2_fd_set_len(fd, skb->len);
- dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
enable_tx_tstamp(fd, sgt_buf);
@@ -479,7 +490,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_format(fd, dpaa2_fd_single);
- dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
enable_tx_tstamp(fd, buffer_start);
@@ -648,7 +659,7 @@ err_alloc_headroom:
/* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_channel *ch __always_unused,
const struct dpaa2_fd *fd,
struct napi_struct *napi __always_unused,
u16 queue_id __always_unused)
@@ -921,14 +932,16 @@ static int pull_channel(struct dpaa2_eth_channel *ch)
static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
{
struct dpaa2_eth_channel *ch;
- int cleaned = 0, store_cleaned;
struct dpaa2_eth_priv *priv;
+ int rx_cleaned = 0, txconf_cleaned = 0;
+ enum dpaa2_eth_fq_type type = 0;
+ int store_cleaned;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
priv = ch->priv;
- while (cleaned < budget) {
+ do {
err = pull_channel(ch);
if (unlikely(err))
break;
@@ -936,30 +949,32 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
/* Refill pool if appropriate */
refill_pool(priv, ch, priv->bpid);
- store_cleaned = consume_frames(ch);
- cleaned += store_cleaned;
+ store_cleaned = consume_frames(ch, &type);
+ if (type == DPAA2_RX_FQ)
+ rx_cleaned += store_cleaned;
+ else
+ txconf_cleaned += store_cleaned;
- /* If we have enough budget left for a full store,
- * try a new pull dequeue, otherwise we're done here
+ /* If we either consumed the whole NAPI budget with Rx frames
+ * or we reached the Tx confirmations threshold, we're done.
*/
- if (store_cleaned == 0 ||
- cleaned > budget - DPAA2_ETH_STORE_SIZE)
- break;
- }
+ if (rx_cleaned >= budget ||
+ txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI)
+ return budget;
+ } while (store_cleaned);
- if (cleaned < budget && napi_complete_done(napi, cleaned)) {
- /* Re-enable data available notifications */
- do {
- err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
- cpu_relax();
- } while (err == -EBUSY);
- WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
- ch->nctx.desired_cpu);
- }
-
- ch->stats.frames += cleaned;
+ /* We didn't consume the entire budget, so finish napi and
+ * re-enable data availability notifications
+ */
+ napi_complete_done(napi, rx_cleaned);
+ do {
+ err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
+ cpu_relax();
+ } while (err == -EBUSY);
+ WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
+ ch->nctx.desired_cpu);
- return cleaned;
+ return max(rx_cleaned, 1);
}
static void enable_ch_napi(struct dpaa2_eth_priv *priv)
@@ -986,7 +1001,7 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv)
static int link_state_update(struct dpaa2_eth_priv *priv)
{
- struct dpni_link_state state;
+ struct dpni_link_state state = {0};
int err;
err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
@@ -1069,14 +1084,13 @@ enable_err:
/* The DPIO store must be empty when we call this,
* at the end of every NAPI cycle.
*/
-static u32 drain_channel(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch)
+static u32 drain_channel(struct dpaa2_eth_channel *ch)
{
u32 drained = 0, total = 0;
do {
pull_channel(ch);
- drained = consume_frames(ch);
+ drained = consume_frames(ch, NULL);
total += drained;
} while (drained);
@@ -1091,7 +1105,7 @@ static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- drained += drain_channel(priv, ch);
+ drained += drain_channel(ch);
}
return drained;
@@ -1100,7 +1114,7 @@ static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
static int dpaa2_eth_stop(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- int dpni_enabled;
+ int dpni_enabled = 0;
int retries = 10;
u32 drained;
@@ -1143,34 +1157,6 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
return 0;
}
-static int dpaa2_eth_init(struct net_device *net_dev)
-{
- u64 supported = 0;
- u64 not_supported = 0;
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- u32 options = priv->dpni_attrs.options;
-
- /* Capabilities listing */
- supported |= IFF_LIVE_ADDR_CHANGE;
-
- if (options & DPNI_OPT_NO_MAC_FILTER)
- not_supported |= IFF_UNICAST_FLT;
- else
- supported |= IFF_UNICAST_FLT;
-
- net_dev->priv_flags |= supported;
- net_dev->priv_flags &= ~not_supported;
-
- /* Features */
- net_dev->features = NETIF_F_RXCSUM |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_LLTX;
- net_dev->hw_features = net_dev->features;
-
- return 0;
-}
-
static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -1418,7 +1404,6 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_open = dpaa2_eth_open,
.ndo_start_xmit = dpaa2_eth_tx,
.ndo_stop = dpaa2_eth_stop,
- .ndo_init = dpaa2_eth_init,
.ndo_set_mac_address = dpaa2_eth_set_addr,
.ndo_get_stats64 = dpaa2_eth_get_stats,
.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
@@ -1926,6 +1911,11 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
if (err)
goto close;
+ priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
+ dpaa2_eth_fs_count(priv), GFP_KERNEL);
+ if (!priv->cls_rules)
+ goto close;
+
return 0;
close:
@@ -2032,9 +2022,33 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
return 0;
}
-/* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */
-static const struct dpaa2_eth_hash_fields hash_fields[] = {
+/* Supported header fields for Rx hash distribution key */
+static const struct dpaa2_eth_dist_fields dist_fields[] = {
{
+ /* L2 header */
+ .rxnfc_field = RXH_L2DA,
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_DA,
+ .size = 6,
+ }, {
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_SA,
+ .size = 6,
+ }, {
+ /* This is the last ethertype field parsed:
+ * depending on frame format, it can be the MAC ethertype
+ * or the VLAN etype.
+ */
+ .cls_prot = NET_PROT_ETH,
+ .cls_field = NH_FLD_ETH_TYPE,
+ .size = 2,
+ }, {
+ /* VLAN header */
+ .rxnfc_field = RXH_VLAN,
+ .cls_prot = NET_PROT_VLAN,
+ .cls_field = NH_FLD_VLAN_TCI,
+ .size = 2,
+ }, {
/* IP header */
.rxnfc_field = RXH_IP_SRC,
.cls_prot = NET_PROT_IP,
@@ -2066,32 +2080,122 @@ static const struct dpaa2_eth_hash_fields hash_fields[] = {
},
};
-/* Set RX hash options
+/* Configure the Rx hash key using the legacy API */
+static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_tc_dist_cfg dist_cfg;
+ int err;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
+ if (err)
+ dev_err(dev, "dpni_set_rx_tc_dist failed\n");
+
+ return err;
+}
+
+/* Configure the Rx hash key using the new API */
+static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_dist_cfg dist_cfg;
+ int err;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.enable = 1;
+
+ err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
+ if (err)
+ dev_err(dev, "dpni_set_rx_hash_dist failed\n");
+
+ return err;
+}
+
+/* Configure the Rx flow classification key */
+static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_rx_dist_cfg dist_cfg;
+ int err;
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ dist_cfg.key_cfg_iova = key;
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.enable = 1;
+
+ err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
+ if (err)
+ dev_err(dev, "dpni_set_rx_fs_dist failed\n");
+
+ return err;
+}
+
+/* Size of the Rx flow classification key */
+int dpaa2_eth_cls_key_size(void)
+{
+ int i, size = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+ size += dist_fields[i].size;
+
+ return size;
+}
+
+/* Offset of header field in Rx classification key */
+int dpaa2_eth_cls_fld_off(int prot, int field)
+{
+ int i, off = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ if (dist_fields[i].cls_prot == prot &&
+ dist_fields[i].cls_field == field)
+ return off;
+ off += dist_fields[i].size;
+ }
+
+ WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
+ return 0;
+}
+
+/* Set Rx distribution (hash or flow classification) key
* flags is a combination of RXH_ bits
*/
-static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
+ enum dpaa2_eth_rx_dist type, u64 flags)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpkg_profile_cfg cls_cfg;
- struct dpni_rx_tc_dist_cfg dist_cfg;
+ u32 rx_hash_fields = 0;
+ dma_addr_t key_iova;
u8 *dma_mem;
int i;
int err = 0;
- if (!dpaa2_eth_hash_enabled(priv)) {
- dev_dbg(dev, "Hashing support is not enabled\n");
- return 0;
- }
-
memset(&cls_cfg, 0, sizeof(cls_cfg));
- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
struct dpkg_extract *key =
&cls_cfg.extracts[cls_cfg.num_extracts];
- if (!(flags & hash_fields[i].rxnfc_field))
- continue;
+ /* For Rx hashing key we set only the selected fields.
+ * For Rx flow classification key we set all supported fields
+ */
+ if (type == DPAA2_ETH_RX_DIST_HASH) {
+ if (!(flags & dist_fields[i].rxnfc_field))
+ continue;
+ rx_hash_fields |= dist_fields[i].rxnfc_field;
+ }
if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
dev_err(dev, "error adding key extraction rule, too many rules?\n");
@@ -2099,12 +2203,10 @@ static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
}
key->type = DPKG_EXTRACT_FROM_HDR;
- key->extract.from_hdr.prot = hash_fields[i].cls_prot;
+ key->extract.from_hdr.prot = dist_fields[i].cls_prot;
key->extract.from_hdr.type = DPKG_FULL_FIELD;
- key->extract.from_hdr.field = hash_fields[i].cls_field;
+ key->extract.from_hdr.field = dist_fields[i].cls_field;
cls_cfg.num_extracts++;
-
- priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
}
dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
@@ -2114,36 +2216,73 @@ static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
if (err) {
dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
- goto err_prep_key;
+ goto free_key;
}
- memset(&dist_cfg, 0, sizeof(dist_cfg));
-
/* Prepare for setting the rx dist */
- dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
- DPAA2_CLASSIFIER_DMA_SIZE,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
+ key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_iova)) {
dev_err(dev, "DMA mapping failed\n");
err = -ENOMEM;
- goto err_dma_map;
+ goto free_key;
}
- dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
- dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+ if (type == DPAA2_ETH_RX_DIST_HASH) {
+ if (dpaa2_eth_has_legacy_dist(priv))
+ err = config_legacy_hash_key(priv, key_iova);
+ else
+ err = config_hash_key(priv, key_iova);
+ } else {
+ err = config_cls_key(priv, key_iova);
+ }
- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
- dma_unmap_single(dev, dist_cfg.key_cfg_iova,
- DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
- if (err)
- dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
+ dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (!err && type == DPAA2_ETH_RX_DIST_HASH)
+ priv->rx_hash_fields = rx_hash_fields;
-err_dma_map:
-err_prep_key:
+free_key:
kfree(dma_mem);
return err;
}
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (!dpaa2_eth_hash_enabled(priv))
+ return -EOPNOTSUPP;
+
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
+}
+
+static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+
+ /* Check if we actually support Rx flow classification */
+ if (dpaa2_eth_has_legacy_dist(priv)) {
+ dev_dbg(dev, "Rx cls not supported by current MC version\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
+ !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
+ dev_dbg(dev, "Rx cls disabled in DPNI options\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!dpaa2_eth_hash_enabled(priv)) {
+ dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
+ return -EOPNOTSUPP;
+ }
+
+ priv->rx_cls_enabled = 1;
+
+ return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
+}
+
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
* frame queues and channels
*/
@@ -2170,9 +2309,16 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
* the default hash key
*/
err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
- if (err)
+ if (err && err != -EOPNOTSUPP)
dev_err(dev, "Failed to configure hashing\n");
+ /* Configure the flow classification key; it includes all
+ * supported header fields and cannot be modified at runtime
+ */
+ err = dpaa2_eth_set_cls(priv);
+ if (err && err != -EOPNOTSUPP)
+ dev_err(dev, "Failed to configure Rx classification key\n");
+
/* Configure handling of error frames */
err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
err_cfg.set_frame_annotation = 1;
@@ -2316,11 +2462,14 @@ static int netdev_init(struct net_device *net_dev)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u32 options = priv->dpni_attrs.options;
+ u64 supported = 0, not_supported = 0;
u8 bcast_addr[ETH_ALEN];
u8 num_queues;
int err;
net_dev->netdev_ops = &dpaa2_eth_ops;
+ net_dev->ethtool_ops = &dpaa2_ethtool_ops;
err = set_mac_addr(priv);
if (err)
@@ -2356,12 +2505,23 @@ static int netdev_init(struct net_device *net_dev)
return err;
}
- /* Our .ndo_init will be called herein */
- err = register_netdev(net_dev);
- if (err < 0) {
- dev_err(dev, "register_netdev() failed\n");
- return err;
- }
+ /* Capabilities listing */
+ supported |= IFF_LIVE_ADDR_CHANGE;
+
+ if (options & DPNI_OPT_NO_MAC_FILTER)
+ not_supported |= IFF_UNICAST_FLT;
+ else
+ supported |= IFF_UNICAST_FLT;
+
+ net_dev->priv_flags |= supported;
+ net_dev->priv_flags &= ~not_supported;
+
+ /* Features */
+ net_dev->features = NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_LLTX;
+ net_dev->hw_features = net_dev->features;
return 0;
}
@@ -2561,28 +2721,36 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_alloc_rings;
- net_dev->ethtool_ops = &dpaa2_ethtool_ops;
-
err = setup_irqs(dpni_dev);
if (err) {
netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
priv->poll_thread = kthread_run(poll_link_state, priv,
"%s_poll_link", net_dev->name);
if (IS_ERR(priv->poll_thread)) {
- netdev_err(net_dev, "Error starting polling thread\n");
+ dev_err(dev, "Error starting polling thread\n");
goto err_poll_thread;
}
priv->do_link_poll = true;
}
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() failed\n");
+ goto err_netdev_reg;
+ }
+
dev_info(dev, "Probed interface %s\n", net_dev->name);
return 0;
+err_netdev_reg:
+ if (priv->do_link_poll)
+ kthread_stop(priv->poll_thread);
+ else
+ fsl_mc_free_irqs(dpni_dev);
err_poll_thread:
free_rings(priv);
err_alloc_rings:
err_csum:
- unregister_netdev(net_dev);
err_netdev_init:
free_percpu(priv->percpu_extras);
err_alloc_percpu_extras:
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index d54cb0b99d08..452a8e9c4f0e 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -40,6 +40,11 @@
*/
#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
+/* Maximum number of Tx confirmation frames to be processed
+ * in a single NAPI call
+ */
+#define DPAA2_ETH_TXCONF_PER_NAPI 256
+
/* Buffer quota per queue. Must be large enough such that for minimum sized
* frames taildrop kicks in before the bpool gets depleted, so we compute
* how many 64B frames fit inside the taildrop threshold and add a margin
@@ -290,13 +295,18 @@ struct dpaa2_eth_channel {
struct dpaa2_eth_ch_stats stats;
};
-struct dpaa2_eth_hash_fields {
+struct dpaa2_eth_dist_fields {
u64 rxnfc_field;
enum net_prot cls_prot;
int cls_field;
int size;
};
+struct dpaa2_eth_cls_rule {
+ struct ethtool_rx_flow_spec fs;
+ u8 in_use;
+};
+
/* Driver private data */
struct dpaa2_eth_priv {
struct net_device *net_dev;
@@ -340,6 +350,8 @@ struct dpaa2_eth_priv {
/* enabled ethtool hashing bits */
u64 rx_hash_fields;
+ struct dpaa2_eth_cls_rule *cls_rules;
+ u8 rx_cls_enabled;
};
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
@@ -367,6 +379,24 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
return priv->dpni_ver_major - ver_major;
}
+/* Minimum firmware version that supports a more flexible API
+ * for configuring the Rx flow hash key
+ */
+#define DPNI_RX_DIST_KEY_VER_MAJOR 7
+#define DPNI_RX_DIST_KEY_VER_MINOR 5
+
+#define dpaa2_eth_has_legacy_dist(priv) \
+ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
+ DPNI_RX_DIST_KEY_VER_MINOR) < 0)
+
+#define dpaa2_eth_fs_count(priv) \
+ ((priv)->dpni_attrs.fs_entries)
+
+enum dpaa2_eth_rx_dist {
+ DPAA2_ETH_RX_DIST_HASH,
+ DPAA2_ETH_RX_DIST_CLS
+};
+
/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
* the buffer also needs space for its shared info struct, and we need
* to allocate enough to accommodate hardware alignment restrictions
@@ -409,4 +439,8 @@ static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
return priv->dpni_attrs.num_queues;
}
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
+int dpaa2_eth_cls_key_size(void);
+int dpaa2_eth_cls_fld_off(int prot, int field);
+
#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
new file mode 100644
index 000000000000..26bd5a2bd8ed
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ */
+
+#include <linux/net_tstamp.h>
+
+#include "dpni.h" /* DPNI_LINK_OPT_* */
+#include "dpaa2-eth.h"
+
+/* To be kept in sync with DPNI statistics */
+static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
+ "[hw] rx frames",
+ "[hw] rx bytes",
+ "[hw] rx mcast frames",
+ "[hw] rx mcast bytes",
+ "[hw] rx bcast frames",
+ "[hw] rx bcast bytes",
+ "[hw] tx frames",
+ "[hw] tx bytes",
+ "[hw] tx mcast frames",
+ "[hw] tx mcast bytes",
+ "[hw] tx bcast frames",
+ "[hw] tx bcast bytes",
+ "[hw] rx filtered frames",
+ "[hw] rx discarded frames",
+ "[hw] rx nobuffer discards",
+ "[hw] tx discarded frames",
+ "[hw] tx confirmed frames",
+};
+
+#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
+
+static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
+ /* per-cpu stats */
+ "[drv] tx conf frames",
+ "[drv] tx conf bytes",
+ "[drv] tx sg frames",
+ "[drv] tx sg bytes",
+ "[drv] tx realloc frames",
+ "[drv] rx sg frames",
+ "[drv] rx sg bytes",
+ "[drv] enqueue portal busy",
+ /* Channel stats */
+ "[drv] dequeue portal busy",
+ "[drv] channel pull errors",
+ "[drv] cdan",
+};
+
+#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
+
+static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
+
+ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static int
+dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *link_settings)
+{
+ struct dpni_link_state state = {0};
+ int err = 0;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (err) {
+ netdev_err(net_dev, "ERROR %d getting link state\n", err);
+ goto out;
+ }
+
+ /* At the moment, we have no way of interrogating the DPMAC
+ * from the DPNI side - and for that matter there may exist
+ * no DPMAC at all. So for now we just don't report anything
+ * beyond the DPNI attributes.
+ */
+ if (state.options & DPNI_LINK_OPT_AUTONEG)
+ link_settings->base.autoneg = AUTONEG_ENABLE;
+ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
+ link_settings->base.duplex = DUPLEX_FULL;
+ link_settings->base.speed = state.rate;
+
+out:
+ return err;
+}
+
+#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7
+#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1
+static int
+dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *link_settings)
+{
+ struct dpni_link_cfg cfg = {0};
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = 0;
+
+ /* If using an older MC version, the DPNI must be down
+ * in order to be able to change link settings. Taking steps to let
+ * the user know that.
+ */
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
+ DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
+ if (netif_running(net_dev)) {
+ netdev_info(net_dev, "Interface must be brought down first.\n");
+ return -EACCES;
+ }
+ }
+
+ cfg.rate = link_settings->base.speed;
+ if (link_settings->base.autoneg == AUTONEG_ENABLE)
+ cfg.options |= DPNI_LINK_OPT_AUTONEG;
+ else
+ cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
+ if (link_settings->base.duplex == DUPLEX_HALF)
+ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
+ else
+ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
+
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
+ if (err)
+ /* ethtool will be loud enough if we return an error; no point
+ * in putting our own error message on the console by default
+ */
+ netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
+
+ return err;
+}
+
+static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
+ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
+ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
+ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/** Fill in hardware counters, as returned by MC.
+ */
+static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ int i = 0;
+ int j, k, err;
+ int num_cnt;
+ union dpni_statistics dpni_stats;
+ u64 cdan = 0;
+ u64 portal_busy = 0, pull_err = 0;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_eth_drv_stats *extras;
+ struct dpaa2_eth_ch_stats *ch_stats;
+
+ memset(data, 0,
+ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
+
+ /* Print standard counters, from DPNI statistics */
+ for (j = 0; j <= 2; j++) {
+ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
+ j, &dpni_stats);
+ if (err != 0)
+ netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
+ switch (j) {
+ case 0:
+ num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64);
+ break;
+ case 1:
+ num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64);
+ break;
+ case 2:
+ num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
+ break;
+ }
+ for (k = 0; k < num_cnt; k++)
+ *(data + i++) = dpni_stats.raw.counter[k];
+ }
+
+ /* Print per-cpu extra stats */
+ for_each_online_cpu(k) {
+ extras = per_cpu_ptr(priv->percpu_extras, k);
+ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
+ *((__u64 *)data + i + j) += *((__u64 *)extras + j);
+ }
+ i += j;
+
+ for (j = 0; j < priv->num_channels; j++) {
+ ch_stats = &priv->channel[j]->stats;
+ cdan += ch_stats->cdan;
+ portal_busy += ch_stats->dequeue_portal_busy;
+ pull_err += ch_stats->pull_err;
+ }
+
+ *(data + i++) = portal_busy;
+ *(data + i++) = pull_err;
+ *(data + i++) = cdan;
+}
+
+static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
+ void *key, void *mask)
+{
+ int off;
+
+ if (eth_mask->h_proto) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ *(__be16 *)(key + off) = eth_value->h_proto;
+ *(__be16 *)(mask + off) = eth_mask->h_proto;
+ }
+
+ if (!is_zero_ether_addr(eth_mask->h_source)) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
+ ether_addr_copy(key + off, eth_value->h_source);
+ ether_addr_copy(mask + off, eth_mask->h_source);
+ }
+
+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
+ ether_addr_copy(key + off, eth_value->h_dest);
+ ether_addr_copy(mask + off, eth_mask->h_dest);
+ }
+
+ return 0;
+}
+
+static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
+ struct ethtool_usrip4_spec *uip_mask,
+ void *key, void *mask)
+{
+ int off;
+ u32 tmp_value, tmp_mask;
+
+ if (uip_mask->tos || uip_mask->ip_ver)
+ return -EOPNOTSUPP;
+
+ if (uip_mask->ip4src) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
+ *(__be32 *)(key + off) = uip_value->ip4src;
+ *(__be32 *)(mask + off) = uip_mask->ip4src;
+ }
+
+ if (uip_mask->ip4dst) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
+ *(__be32 *)(key + off) = uip_value->ip4dst;
+ *(__be32 *)(mask + off) = uip_mask->ip4dst;
+ }
+
+ if (uip_mask->proto) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
+ *(u8 *)(key + off) = uip_value->proto;
+ *(u8 *)(mask + off) = uip_mask->proto;
+ }
+
+ if (uip_mask->l4_4_bytes) {
+ tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
+ tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
+
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+ *(__be16 *)(key + off) = htons(tmp_value >> 16);
+ *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
+
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+ *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
+ *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
+ }
+
+ /* Only apply the rule for IPv4 frames */
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ *(__be16 *)(key + off) = htons(ETH_P_IP);
+ *(__be16 *)(mask + off) = htons(0xFFFF);
+
+ return 0;
+}
+
+static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
+ struct ethtool_tcpip4_spec *l4_mask,
+ void *key, void *mask, u8 l4_proto)
+{
+ int off;
+
+ if (l4_mask->tos)
+ return -EOPNOTSUPP;
+
+ if (l4_mask->ip4src) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
+ *(__be32 *)(key + off) = l4_value->ip4src;
+ *(__be32 *)(mask + off) = l4_mask->ip4src;
+ }
+
+ if (l4_mask->ip4dst) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
+ *(__be32 *)(key + off) = l4_value->ip4dst;
+ *(__be32 *)(mask + off) = l4_mask->ip4dst;
+ }
+
+ if (l4_mask->psrc) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+ *(__be16 *)(key + off) = l4_value->psrc;
+ *(__be16 *)(mask + off) = l4_mask->psrc;
+ }
+
+ if (l4_mask->pdst) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+ *(__be16 *)(key + off) = l4_value->pdst;
+ *(__be16 *)(mask + off) = l4_mask->pdst;
+ }
+
+ /* Only apply the rule for IPv4 frames with the specified L4 proto */
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ *(__be16 *)(key + off) = htons(ETH_P_IP);
+ *(__be16 *)(mask + off) = htons(0xFFFF);
+
+ off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
+ *(u8 *)(key + off) = l4_proto;
+ *(u8 *)(mask + off) = 0xFF;
+
+ return 0;
+}
+
+static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask)
+{
+ int off;
+
+ if (ext_mask->vlan_etype)
+ return -EOPNOTSUPP;
+
+ if (ext_mask->vlan_tci) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
+ *(__be16 *)(key + off) = ext_value->vlan_tci;
+ *(__be16 *)(mask + off) = ext_mask->vlan_tci;
+ }
+
+ return 0;
+}
+
+static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask)
+{
+ int off;
+
+ if (!is_zero_ether_addr(ext_mask->h_dest)) {
+ off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
+ ether_addr_copy(key + off, ext_value->h_dest);
+ ether_addr_copy(mask + off, ext_mask->h_dest);
+ }
+
+ return 0;
+}
+
+static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
+{
+ int err;
+
+ switch (fs->flow_type & 0xFF) {
+ case ETHER_FLOW:
+ err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
+ key, mask);
+ break;
+ case IP_USER_FLOW:
+ err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec, key, mask);
+ break;
+ case TCP_V4_FLOW:
+ err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
+ key, mask, IPPROTO_TCP);
+ break;
+ case UDP_V4_FLOW:
+ err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
+ key, mask, IPPROTO_UDP);
+ break;
+ case SCTP_V4_FLOW:
+ err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
+ &fs->m_u.sctp_ip4_spec, key, mask,
+ IPPROTO_SCTP);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+
+ if (fs->flow_type & FLOW_EXT) {
+ err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+ if (err)
+ return err;
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int do_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *fs,
+ bool add)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ struct dpni_rule_cfg rule_cfg = { 0 };
+ struct dpni_fs_action_cfg fs_act = { 0 };
+ dma_addr_t key_iova;
+ void *key_buf;
+ int err;
+
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+ fs->ring_cookie >= dpaa2_eth_queue_count(priv))
+ return -EINVAL;
+
+ rule_cfg.key_size = dpaa2_eth_cls_key_size();
+
+ /* allocate twice the key size, for the actual key and for mask */
+ key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
+ if (!key_buf)
+ return -ENOMEM;
+
+ /* Fill the key and mask memory areas */
+ err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size);
+ if (err)
+ goto free_mem;
+
+ key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_iova)) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ rule_cfg.key_iova = key_iova;
+ rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+
+ if (add) {
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ fs_act.options |= DPNI_FS_OPT_DISCARD;
+ else
+ fs_act.flow_id = fs->ring_cookie;
+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
+ fs->location, &rule_cfg, &fs_act);
+ } else {
+ err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
+ &rule_cfg);
+ }
+
+ dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
+
+free_mem:
+ kfree(key_buf);
+
+ return err;
+}
+
+static int update_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *new_fs,
+ int location)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_eth_cls_rule *rule;
+ int err = -EINVAL;
+
+ if (!priv->rx_cls_enabled)
+ return -EOPNOTSUPP;
+
+ if (location >= dpaa2_eth_fs_count(priv))
+ return -EINVAL;
+
+ rule = &priv->cls_rules[location];
+
+ /* If a rule is present at the specified location, delete it. */
+ if (rule->in_use) {
+ err = do_cls_rule(net_dev, &rule->fs, false);
+ if (err)
+ return err;
+
+ rule->in_use = 0;
+ }
+
+ /* If no new entry to add, return here */
+ if (!new_fs)
+ return err;
+
+ err = do_cls_rule(net_dev, new_fs, true);
+ if (err)
+ return err;
+
+ rule->in_use = 1;
+ rule->fs = *new_fs;
+
+ return 0;
+}
+
+static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int max_rules = dpaa2_eth_fs_count(priv);
+ int i, j = 0;
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXFH:
+ /* we purposely ignore cmd->flow_type for now, because the
+ * classifier only supports a single set of fields for all
+ * protocols
+ */
+ rxnfc->data = priv->rx_hash_fields;
+ break;
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = dpaa2_eth_queue_count(priv);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ rxnfc->rule_cnt = 0;
+ for (i = 0; i < max_rules; i++)
+ if (priv->cls_rules[i].in_use)
+ rxnfc->rule_cnt++;
+ rxnfc->data = max_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (rxnfc->fs.location >= max_rules)
+ return -EINVAL;
+ if (!priv->cls_rules[rxnfc->fs.location].in_use)
+ return -EINVAL;
+ rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ for (i = 0; i < max_rules; i++) {
+ if (!priv->cls_rules[i].in_use)
+ continue;
+ if (j == rxnfc->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[j++] = i;
+ }
+ rxnfc->rule_cnt = j;
+ rxnfc->data = max_rules;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *rxnfc)
+{
+ int err = 0;
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_SRXFH:
+ if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
+ return -EOPNOTSUPP;
+ err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+int dpaa2_phc_index = -1;
+EXPORT_SYMBOL(dpaa2_phc_index);
+
+static int dpaa2_eth_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = dpaa2_phc_index;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+const struct ethtool_ops dpaa2_ethtool_ops = {
+ .get_drvinfo = dpaa2_eth_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = dpaa2_eth_get_link_ksettings,
+ .set_link_ksettings = dpaa2_eth_set_link_ksettings,
+ .get_sset_count = dpaa2_eth_get_sset_count,
+ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
+ .get_strings = dpaa2_eth_get_strings,
+ .get_rxnfc = dpaa2_eth_get_rxnfc,
+ .set_rxnfc = dpaa2_eth_set_rxnfc,
+ .get_ts_info = dpaa2_eth_get_ts_info,
+};
diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 0d52cb85441f..84b942b1eccc 100644
--- a/drivers/staging/fsl-dpaa2/rtc/rtc.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -9,10 +9,10 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/fsl/mc.h>
-#include "rtc.h"
+#include "dpaa2-ptp.h"
struct ptp_dpaa2_priv {
- struct fsl_mc_device *rtc_mc_dev;
+ struct fsl_mc_device *ptp_mc_dev;
struct ptp_clock *clock;
struct ptp_clock_info caps;
u32 freq_comp;
@@ -23,7 +23,7 @@ static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
struct ptp_dpaa2_priv *ptp_dpaa2 =
container_of(ptp, struct ptp_dpaa2_priv, caps);
- struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev;
+ struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
struct device *dev = &mc_dev->dev;
u64 adj;
u32 diff, tmr_add;
@@ -46,14 +46,14 @@ static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
mc_dev->mc_handle, tmr_add);
if (err)
dev_err(dev, "dprtc_set_freq_compensation err %d\n", err);
- return 0;
+ return err;
}
static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct ptp_dpaa2_priv *ptp_dpaa2 =
container_of(ptp, struct ptp_dpaa2_priv, caps);
- struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev;
+ struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
struct device *dev = &mc_dev->dev;
s64 now;
int err = 0;
@@ -61,24 +61,22 @@ static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta)
err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now);
if (err) {
dev_err(dev, "dprtc_get_time err %d\n", err);
- return 0;
+ return err;
}
now += delta;
err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now);
- if (err) {
+ if (err)
dev_err(dev, "dprtc_set_time err %d\n", err);
- return 0;
- }
- return 0;
+ return err;
}
static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct ptp_dpaa2_priv *ptp_dpaa2 =
container_of(ptp, struct ptp_dpaa2_priv, caps);
- struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev;
+ struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
struct device *dev = &mc_dev->dev;
u64 ns;
u32 remainder;
@@ -87,12 +85,12 @@ static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns);
if (err) {
dev_err(dev, "dprtc_get_time err %d\n", err);
- return 0;
+ return err;
}
ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
ts->tv_nsec = remainder;
- return 0;
+ return err;
}
static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
@@ -100,7 +98,7 @@ static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
{
struct ptp_dpaa2_priv *ptp_dpaa2 =
container_of(ptp, struct ptp_dpaa2_priv, caps);
- struct fsl_mc_device *mc_dev = ptp_dpaa2->rtc_mc_dev;
+ struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev;
struct device *dev = &mc_dev->dev;
u64 ns;
int err = 0;
@@ -111,10 +109,10 @@ static int ptp_dpaa2_settime(struct ptp_clock_info *ptp,
err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns);
if (err)
dev_err(dev, "dprtc_set_time err %d\n", err);
- return 0;
+ return err;
}
-static struct ptp_clock_info ptp_dpaa2_caps = {
+static const struct ptp_clock_info ptp_dpaa2_caps = {
.owner = THIS_MODULE,
.name = "DPAA2 PTP Clock",
.max_adj = 512000,
@@ -129,14 +127,14 @@ static struct ptp_clock_info ptp_dpaa2_caps = {
.settime64 = ptp_dpaa2_settime,
};
-static int rtc_probe(struct fsl_mc_device *mc_dev)
+static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
{
struct device *dev = &mc_dev->dev;
struct ptp_dpaa2_priv *ptp_dpaa2;
u32 tmr_add = 0;
int err;
- ptp_dpaa2 = kzalloc(sizeof(*ptp_dpaa2), GFP_KERNEL);
+ ptp_dpaa2 = devm_kzalloc(dev, sizeof(*ptp_dpaa2), GFP_KERNEL);
if (!ptp_dpaa2)
return -ENOMEM;
@@ -153,7 +151,7 @@ static int rtc_probe(struct fsl_mc_device *mc_dev)
goto err_free_mcp;
}
- ptp_dpaa2->rtc_mc_dev = mc_dev;
+ ptp_dpaa2->ptp_mc_dev = mc_dev;
err = dprtc_get_freq_compensation(mc_dev->mc_io, 0,
mc_dev->mc_handle, &tmr_add);
@@ -182,12 +180,10 @@ err_close:
err_free_mcp:
fsl_mc_portal_free(mc_dev->mc_io);
err_exit:
- kfree(ptp_dpaa2);
- dev_set_drvdata(dev, NULL);
return err;
}
-static int rtc_remove(struct fsl_mc_device *mc_dev)
+static int dpaa2_ptp_remove(struct fsl_mc_device *mc_dev)
{
struct ptp_dpaa2_priv *ptp_dpaa2;
struct device *dev = &mc_dev->dev;
@@ -198,32 +194,29 @@ static int rtc_remove(struct fsl_mc_device *mc_dev)
dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
fsl_mc_portal_free(mc_dev->mc_io);
- kfree(ptp_dpaa2);
- dev_set_drvdata(dev, NULL);
-
return 0;
}
-static const struct fsl_mc_device_id rtc_match_id_table[] = {
+static const struct fsl_mc_device_id dpaa2_ptp_match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dprtc",
},
{}
};
-MODULE_DEVICE_TABLE(fslmc, rtc_match_id_table);
+MODULE_DEVICE_TABLE(fslmc, dpaa2_ptp_match_id_table);
-static struct fsl_mc_driver rtc_drv = {
+static struct fsl_mc_driver dpaa2_ptp_drv = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
- .probe = rtc_probe,
- .remove = rtc_remove,
- .match_id_table = rtc_match_id_table,
+ .probe = dpaa2_ptp_probe,
+ .remove = dpaa2_ptp_remove,
+ .match_id_table = dpaa2_ptp_match_id_table,
};
-module_fsl_mc_driver(rtc_drv);
+module_fsl_mc_driver(dpaa2_ptp_drv);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DPAA2 PTP Clock Driver");
diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
index ff2e177395d4..ff2e177395d4 100644
--- a/drivers/staging/fsl-dpaa2/rtc/rtc.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h b/drivers/net/ethernet/freescale/dpaa2/dpkg.h
index 6de613b13e4d..6de613b13e4d 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpkg.h
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 83698abce8b4..7b44d7d9b19a 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -82,6 +82,9 @@
#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
+#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
+#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
+
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
@@ -515,4 +518,52 @@ struct dpni_rsp_get_api_version {
__le16 minor;
};
+#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_fs_dist {
+ __le16 dist_size;
+ u8 enable;
+ u8 tc;
+ __le16 miss_flow_id;
+ __le16 pad;
+ __le64 key_cfg_iova;
+};
+
+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_hash_dist {
+ __le16 dist_size;
+ u8 enable;
+ u8 tc;
+ __le32 pad;
+ __le64 key_cfg_iova;
+};
+
+struct dpni_cmd_add_fs_entry {
+ /* cmd word 0 */
+ __le16 options;
+ u8 tc_id;
+ u8 key_size;
+ __le16 index;
+ __le16 flow_id;
+ /* cmd word 1 */
+ __le64 key_iova;
+ /* cmd word 2 */
+ __le64 mask_iova;
+ /* cmd word 3 */
+ __le64 flc;
+};
+
+struct dpni_cmd_remove_fs_entry {
+ /* cmd word 0 */
+ __le16 pad0;
+ u8 tc_id;
+ u8 key_size;
+ __le32 pad1;
+ /* cmd word 1 */
+ __le64 key_iova;
+ /* cmd word 2 */
+ __le64 mask_iova;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index d6ac26797cec..220dfc806a24 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1598,3 +1598,155 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dpni_set_rx_fs_dist() - Set Rx flow steering distribution
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Distribution configuration
+ *
+ * If the FS is already enabled with a previous call the classification
+ * key will be changed but all the table rules are kept. If the
+ * existing rules do not match the key the results will not be
+ * predictable. It is the user responsibility to keep key integrity.
+ * If cfg.enable is set to 1 the command will create a flow steering table
+ * and will classify packets according to this table. The packets that
+ * miss all the table rules will be classified according to settings
+ * made in dpni_set_rx_hash_dist()
+ * If cfg.enable is set to 0 the command will clear flow steering table.
+ * The packets will be classified according to settings made in
+ * dpni_set_rx_hash_dist()
+ */
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg)
+{
+ struct dpni_cmd_set_rx_fs_dist *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable);
+ cmd_params->tc = cfg->tc;
+ cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id);
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_hash_dist() - Set Rx hash distribution
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Distribution configuration
+ * If cfg.enable is set to 1 the packets will be classified using a hash
+ * function based on the key received in cfg.key_cfg_iova parameter.
+ * If cfg.enable is set to 0 the packets will be sent to the default queue
+ */
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg)
+{
+ struct dpni_cmd_set_rx_hash_dist *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ dpni_set_field(cmd_params->enable, RX_HASH_DIST_ENABLE, cfg->enable);
+ cmd_params->tc = cfg->tc;
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class
+ * (to select a flow ID)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @index: Location in the FS table where to insert the entry.
+ * Only relevant if MASKING is enabled for FS
+ * classification on this DPNI, it is ignored for exact match.
+ * @cfg: Flow steering rule to add
+ * @action: Action to be taken as result of a classification hit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ u16 index,
+ const struct dpni_rule_cfg *cfg,
+ const struct dpni_fs_action_cfg *action)
+{
+ struct dpni_cmd_add_fs_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->index = cpu_to_le16(index);
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+ cmd_params->options = cpu_to_le16(action->options);
+ cmd_params->flow_id = cpu_to_le16(action->flow_id);
+ cmd_params->flc = cpu_to_le64(action->flc);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific
+ * traffic class
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Flow steering rule to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rule_cfg *cfg)
+{
+ struct dpni_cmd_remove_fs_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params;
+ cmd_params->tc_id = tc_id;
+ cmd_params->key_size = cfg->key_size;
+ cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index b378a00c7c53..a521242e2353 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -629,6 +629,45 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
const struct dpni_rx_tc_dist_cfg *cfg);
/**
+ * When used for fs_miss_flow_id in function dpni_set_rx_dist,
+ * will signal to dpni to drop all unclassified frames
+ */
+#define DPNI_FS_MISS_DROP ((uint16_t)-1)
+
+/**
+ * struct dpni_rx_dist_cfg - Rx distribution configuration
+ * @dist_size: distribution size
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpni_prepare_key_cfg(); relevant only when enable!=0 otherwise
+ * it can be '0'
+ * @enable: enable/disable the distribution.
+ * @tc: TC id for which distribution is set
+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
+ * hash is disabled it will be put into this queue id; use
+ * DPNI_FS_MISS_DROP to drop frames. The value of this field is
+ * used only when flow steering distribution is enabled and hash
+ * distribution is disabled
+ */
+struct dpni_rx_dist_cfg {
+ u16 dist_size;
+ u64 key_cfg_iova;
+ u8 enable;
+ u8 tc;
+ u16 fs_miss_flow_id;
+};
+
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_rx_dist_cfg *cfg);
+
+/**
* enum dpni_dest - DPNI destination types
* @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
* does not generate FQDAN notifications; user is expected to
@@ -816,6 +855,64 @@ struct dpni_rule_cfg {
u8 key_size;
};
+/**
+ * Discard matching traffic. If set, this takes precedence over any other
+ * configuration and matching traffic is always discarded.
+ */
+ #define DPNI_FS_OPT_DISCARD 0x1
+
+/**
+ * Set FLC value. If set, flc member of struct dpni_fs_action_cfg is used to
+ * override the FLC value set per queue.
+ * For more details check the Frame Descriptor section in the hardware
+ * documentation.
+ */
+#define DPNI_FS_OPT_SET_FLC 0x2
+
+/**
+ * Indicates whether the 6 lowest significant bits of FLC are used for stash
+ * control. If set, the 6 least significant bits in value are interpreted as
+ * follows:
+ * - bits 0-1: indicates the number of 64 byte units of context that are
+ * stashed. FLC value is interpreted as a memory address in this case,
+ * excluding the 6 LS bits.
+ * - bits 2-3: indicates the number of 64 byte units of frame annotation
+ * to be stashed. Annotation is placed at FD[ADDR].
+ * - bits 4-5: indicates the number of 64 byte units of frame data to be
+ * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET].
+ * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified.
+ */
+#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4
+
+/**
+ * struct dpni_fs_action_cfg - Action configuration for table look-up
+ * @flc: FLC value for traffic matching this rule. Please check the
+ * Frame Descriptor section in the hardware documentation for
+ * more information.
+ * @flow_id: Identifies the Rx queue used for matching traffic. Supported
+ * values are in range 0 to num_queue-1.
+ * @options: Any combination of DPNI_FS_OPT_ values.
+ */
+struct dpni_fs_action_cfg {
+ u64 flc;
+ u16 flow_id;
+ u16 options;
+};
+
+int dpni_add_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ u16 index,
+ const struct dpni_rule_cfg *cfg,
+ const struct dpni_fs_action_cfg *action);
+
+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rule_cfg *cfg);
+
int dpni_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 *major_ver,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
new file mode 100644
index 000000000000..9af4ac71f347
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef _FSL_DPRTC_CMD_H
+#define _FSL_DPRTC_CMD_H
+
+/* Command versioning */
+#define DPRTC_CMD_BASE_VERSION 1
+#define DPRTC_CMD_ID_OFFSET 4
+
+#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
+#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
+
+#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1)
+#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2)
+#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3)
+#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4)
+
+#pragma pack(push, 1)
+struct dprtc_cmd_open {
+ __le32 dprtc_id;
+};
+
+struct dprtc_get_freq_compensation {
+ __le32 freq_compensation;
+};
+
+struct dprtc_time {
+ __le64 time;
+};
+
+#pragma pack(pop)
+
+#endif /* _FSL_DPRTC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.c b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
new file mode 100644
index 000000000000..c13e09bc7b9d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#include <linux/fsl/mc.h>
+
+#include "dprtc.h"
+#include "dprtc-cmd.h"
+
+/**
+ * dprtc_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dprtc_id: DPRTC unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dprtc_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dprtc_id,
+ u16 *token)
+{
+ struct dprtc_cmd_open *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dprtc_cmd_open *)cmd.params;
+ cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dprtc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @freq_compensation: The new frequency compensation value to set.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 freq_compensation)
+{
+ struct dprtc_get_freq_compensation *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
+ cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @freq_compensation: Frequency compensation value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 *freq_compensation)
+{
+ struct dprtc_get_freq_compensation *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
+ *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
+
+ return 0;
+}
+
+/**
+ * dprtc_get_time() - Returns the current RTC time.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @time: Current RTC time.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_get_time(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t *time)
+{
+ struct dprtc_time *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dprtc_time *)cmd.params;
+ *time = le64_to_cpu(rsp_params->time);
+
+ return 0;
+}
+
+/**
+ * dprtc_set_time() - Updates current RTC time.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRTC object
+ * @time: New RTC time.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprtc_set_time(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t time)
+{
+ struct dprtc_time *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprtc_time *)cmd.params;
+ cmd_params->time = cpu_to_le64(time);
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
new file mode 100644
index 000000000000..fe19618d6cdf
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2018 NXP
+ */
+
+#ifndef __FSL_DPRTC_H
+#define __FSL_DPRTC_H
+
+/* Data Path Real Time Counter API
+ * Contains initialization APIs and runtime control APIs for RTC
+ */
+
+struct fsl_mc_io;
+
+int dprtc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dprtc_id,
+ u16 *token);
+
+int dprtc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 freq_compensation);
+
+int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 *freq_compensation);
+
+int dprtc_get_time(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t *time);
+
+int dprtc_set_time(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ uint64_t time);
+
+#endif /* __FSL_DPRTC_H */
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 4778b663653e..bf80855dd0dd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -452,6 +452,10 @@ struct bufdesc_ex {
* initialisation.
*/
#define FEC_QUIRK_MIB_CLEAR (1 << 15)
+/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
+ * those FIFO receive registers are resolved in other platforms.
+ */
+#define FEC_QUIRK_HAS_FRREG (1 << 16)
struct bufdesc_prop {
int qid;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2708297e7795..6db69ba30dcd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -91,14 +91,16 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
+ .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+ FEC_QUIRK_HAS_FRREG,
}, {
.name = "imx27-fec",
- .driver_data = FEC_QUIRK_MIB_CLEAR,
+ .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
- FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
+ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_FRREG,
}, {
.name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -1158,7 +1160,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
@@ -1273,7 +1275,7 @@ skb_done:
/* Since we have freed up a buffer, the ring is no longer full
*/
- if (netif_queue_stopped(ndev)) {
+ if (netif_tx_queue_stopped(nq)) {
entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free >= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
@@ -1746,7 +1748,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
@@ -1946,16 +1948,15 @@ static int fec_enet_mii_probe(struct net_device *ndev)
/* mask with MAC supported features */
if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
- phy_dev->supported &= PHY_GBIT_FEATURES;
- phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
+ phy_set_max_speed(phy_dev, 1000);
+ phy_remove_link_mode(phy_dev,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
#if !defined(CONFIG_M5272)
- phy_dev->supported |= SUPPORTED_Pause;
+ phy_support_sym_pause(phy_dev);
#endif
}
else
- phy_dev->supported &= PHY_BASIC_FEATURES;
-
- phy_dev->advertising = phy_dev->supported;
+ phy_set_max_speed(phy_dev, 100);
fep->link = 0;
fep->full_duplex = 0;
@@ -2055,8 +2056,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
node = of_get_child_by_name(pdev->dev.of_node, "mdio");
err = of_mdiobus_register(fep->mii_bus, node);
- if (node)
- of_node_put(node);
+ of_node_put(node);
if (err)
goto err_out_free_mdiobus;
@@ -2164,7 +2164,13 @@ static void fec_enet_get_regs(struct net_device *ndev,
memset(buf, 0, regs->len);
for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
- off = fec_enet_register_offset[i] / 4;
+ off = fec_enet_register_offset[i];
+
+ if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
+ !(fep->quirks & FEC_QUIRK_HAS_FRREG))
+ continue;
+
+ off >>= 2;
buf[off] = readl(&theregs[off]);
}
}
@@ -2230,13 +2236,8 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
- if (pause->rx_pause || pause->autoneg) {
- ndev->phydev->supported |= ADVERTISED_Pause;
- ndev->phydev->advertising |= ADVERTISED_Pause;
- } else {
- ndev->phydev->supported &= ~ADVERTISED_Pause;
- ndev->phydev->advertising &= ~ADVERTISED_Pause;
- }
+ phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
+ pause->autoneg);
if (pause->autoneg) {
if (netif_running(ndev))
@@ -2247,7 +2248,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 6d7269d87a85..b90bab72efdb 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -305,7 +305,8 @@ static int mpc52xx_fec_close(struct net_device *dev)
* invariant will hold if you make sure that the netif_*_queue()
* calls are done at the proper times.
*/
-static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct bcom_fec_bd *bd;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index a847b9c3b31a..d79e4e009d63 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -393,11 +393,7 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
*/
/* get local capabilities */
- lcl_adv = 0;
- if (phy_dev->advertising & ADVERTISED_Pause)
- lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (phy_dev->advertising & ADVERTISED_Asym_Pause)
- lcl_adv |= ADVERTISE_PAUSE_ASYM;
+ lcl_adv = ethtool_adv_to_lcl_adv_t(phy_dev->advertising);
/* get link partner capabilities */
rmt_adv = 0;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 2c2976a2dda6..7c548ed535da 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -481,7 +481,8 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
}
#endif
-static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
cbd_t __iomem *bdp;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index ac2c3f6a12bc..82722d05fedb 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -446,8 +446,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
goto error;
}
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
- (unsigned long long)res.start);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%pOFn@%llx", np,
+ (unsigned long long)res.start);
priv->map = of_iomap(np, 0);
if (!priv->map) {
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index f27f9bae1a4a..3c8da1a18ba0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -102,8 +102,6 @@
#include <linux/phy_fixed.h>
#include <linux/of.h>
#include <linux/of_net.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include "gianfar.h"
@@ -112,7 +110,7 @@
const char gfar_driver_version[] = "2.0";
static int gfar_enet_open(struct net_device *dev);
-static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
@@ -1814,8 +1812,8 @@ static int init_phy(struct net_device *dev)
phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
phydev->advertising = phydev->supported;
- /* Add support for flow control, but don't advertise it by default */
- phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ /* Add support for flow control */
+ phy_support_asym_pause(phydev);
/* disable EEE autoneg, EEE not supported by eTSEC */
memset(&edata, 0, sizeof(struct ethtool_eee));
@@ -2334,7 +2332,7 @@ static inline bool gfar_csum_errata_76(struct gfar_private *priv,
/* This is called by the kernel when a frame is ready for transmission.
* It is pointed to by the dev->hard_start_xmit function pointer
*/
-static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -3658,12 +3656,7 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = 0;
- if (phydev->advertising & ADVERTISED_Pause)
- lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (phydev->advertising & ADVERTISED_Asym_Pause)
- lcl_adv |= ADVERTISE_PAUSE_ASYM;
-
+ lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
val |= MACCFG1_TX_FLOW;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 395a5266ea30..0d76e15cd6dd 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -230,7 +230,7 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
/* Make sure we return a number greater than 0
* if usecs > 0 */
- return (usecs * 1000 + count - 1) / count;
+ return DIV_ROUND_UP(usecs * 1000, count);
}
/* Convert ethernet clock ticks to microseconds */
@@ -503,65 +503,44 @@ static int gfar_spauseparam(struct net_device *dev,
struct gfar_private *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u32 oldadv, newadv;
if (!phydev)
return -ENODEV;
- if (!(phydev->supported & SUPPORTED_Pause) ||
- (!(phydev->supported & SUPPORTED_Asym_Pause) &&
- (epause->rx_pause != epause->tx_pause)))
+ if (!phy_validate_pause(phydev, epause))
return -EINVAL;
priv->rx_pause_en = priv->tx_pause_en = 0;
+ phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
if (epause->rx_pause) {
priv->rx_pause_en = 1;
if (epause->tx_pause) {
priv->tx_pause_en = 1;
- /* FLOW_CTRL_RX & TX */
- newadv = ADVERTISED_Pause;
- } else /* FLOW_CTLR_RX */
- newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ }
} else if (epause->tx_pause) {
priv->tx_pause_en = 1;
- /* FLOW_CTLR_TX */
- newadv = ADVERTISED_Asym_Pause;
- } else
- newadv = 0;
+ }
if (epause->autoneg)
priv->pause_aneg_en = 1;
else
priv->pause_aneg_en = 0;
- oldadv = phydev->advertising &
- (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
- if (oldadv != newadv) {
- phydev->advertising &=
- ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
- phydev->advertising |= newadv;
- if (phydev->autoneg)
- /* inform link partner of our
- * new flow ctrl settings
- */
- return phy_start_aneg(phydev);
-
- if (!epause->autoneg) {
- u32 tempval;
- tempval = gfar_read(&regs->maccfg1);
- tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
-
- priv->tx_actual_en = 0;
- if (priv->tx_pause_en) {
- priv->tx_actual_en = 1;
- tempval |= MACCFG1_TX_FLOW;
- }
+ if (!epause->autoneg) {
+ u32 tempval = gfar_read(&regs->maccfg1);
- if (priv->rx_pause_en)
- tempval |= MACCFG1_RX_FLOW;
- gfar_write(&regs->maccfg1, tempval);
+ tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+
+ priv->tx_actual_en = 0;
+ if (priv->tx_pause_en) {
+ priv->tx_actual_en = 1;
+ tempval |= MACCFG1_TX_FLOW;
}
+
+ if (priv->rx_pause_en)
+ tempval |= MACCFG1_RX_FLOW;
+ gfar_write(&regs->maccfg1, tempval);
}
return 0;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 22a817da861e..32e02700feaa 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1742,12 +1742,7 @@ static int init_phy(struct net_device *dev)
if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
uec_configure_serdes(dev);
- phydev->supported &= (SUPPORTED_MII |
- SUPPORTED_Autoneg |
- ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full);
+ phy_set_max_speed(phydev, SPEED_100);
if (priv->max_speed == SPEED_1000)
phydev->supported |= ADVERTISED_1000baseT_Full;
@@ -3083,7 +3078,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* This is called by the kernel when a frame is ready for transmission. */
/* It is pointed to by the dev->hard_start_xmit function pointer */
-static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
#ifdef CONFIG_UGETH_TX_ON_DEMAND
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 14374a856d30..be268dcde8fa 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -422,7 +422,8 @@ static void hip04_start_tx_timer(struct hip04_priv *priv)
ns, HRTIMER_MODE_REL);
}
-static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t
+hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct hip04_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index c5727003af8c..471805ea363b 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -736,7 +736,7 @@ static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
return 0;
}
-static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
struct hix5hd2_desc *desc;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index a051e582d541..79d03f8ee7b1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
if (cb->type == DESC_TYPE_SKB)
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
- else
+ else if (cb->length)
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 09e4061d1fa6..aaf72c055711 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -319,7 +319,7 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en)
hns_gmac_set_uc_match(mac_drv, en);
}
-int hns_gmac_wait_fifo_clean(void *mac_drv)
+static int hns_gmac_wait_fifo_clean(void *mac_drv)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
int wait_cnt;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 6ed6f142427e..3613e400e816 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -837,8 +837,8 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
*/
put_device(&mac_cb->phy_dev->mdio.dev);
- dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
- mac_cb->mac_id, np->name);
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n",
+ mac_cb->mac_id, np);
}
of_node_put(np);
@@ -855,8 +855,8 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
* if the phy_dev is found
*/
put_device(&mac_cb->phy_dev->mdio.dev);
- dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
- mac_cb->mac_id, np->name);
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n",
+ mac_cb->mac_id, np);
}
of_node_put(np);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index f56855e63c96..28e907831b0e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -40,9 +40,9 @@
#define SKB_TMP_LEN(SKB) \
(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
-static void fill_v2_desc(struct hnae_ring *ring, void *priv,
- int size, dma_addr_t dma, int frag_end,
- int buf_num, enum hns_desc_type type, int mtu)
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+ int send_sz, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
{
struct hnae_desc *desc = &ring->desc[ring->next_to_use];
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
- desc->tx.send_size = cpu_to_le16((u16)size);
+ desc->tx.send_size = cpu_to_le16((u16)send_sz);
/* config bd buffer end */
hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
ring_ptr_move_fw(ring, next_to_use);
}
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+ buf_num, type, mtu);
+}
+
static const struct acpi_device_id hns_enet_acpi_match[] = {
{ "HISI00C1", 0 },
{ "HISI00C2", 0 },
@@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
/* when the frag size is bigger than hardware, split this frag */
for (k = 0; k < frag_buf_num; k++)
- fill_v2_desc(ring, priv,
- (k == frag_buf_num - 1) ?
+ fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+ (k == frag_buf_num - 1) ?
sizeoflast : BD_MAX_SEND_SIZE,
- dma + BD_MAX_SEND_SIZE * k,
- frag_end && (k == frag_buf_num - 1) ? 1 : 0,
- buf_num,
- (type == DESC_TYPE_SKB && !k) ?
+ dma + BD_MAX_SEND_SIZE * k,
+ frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+ buf_num,
+ (type == DESC_TYPE_SKB && !k) ?
DESC_TYPE_SKB : DESC_TYPE_PAGE,
- mtu);
+ mtu);
}
netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -1495,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
return phy_mii_ioctl(phy_dev, ifr, cmd);
}
-/* use only for netconsole to poll with the device without interrupt */
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hns_nic_poll_controller(struct net_device *ndev)
-{
- struct hns_nic_priv *priv = netdev_priv(ndev);
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for (i = 0; i < priv->ae_handle->q_num * 2; i++)
- napi_schedule(&priv->ring_data[i].napi);
- local_irq_restore(flags);
-}
-#endif
-
static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -1962,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_set_features = hns_nic_set_features,
.ndo_fix_features = hns_nic_fix_features,
.ndo_get_stats64 = hns_nic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = hns_nic_poll_controller,
-#endif
.ndo_set_rx_mode = hns_nic_set_rx_mode,
.ndo_select_queue = hns_nic_select_queue,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index be9dc08ccf67..038326cfda93 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -46,9 +46,6 @@ enum hclge_mbx_mac_vlan_subcode {
HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
- HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */
- HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */
- HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */
};
/* below are per-VF vlan cfg subcodes */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index fff5be8078ac..781e5dee3c70 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -29,8 +29,8 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
return false;
}
-static void hnae3_set_client_init_flag(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev, int inited)
+void hnae3_set_client_init_flag(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev, int inited)
{
switch (client->type) {
case HNAE3_CLIENT_KNIC:
@@ -46,6 +46,7 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client,
break;
}
}
+EXPORT_SYMBOL(hnae3_set_client_init_flag);
static int hnae3_get_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
@@ -86,14 +87,11 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
/* now, (un-)instantiate client by calling lower layer */
if (is_reg) {
ret = ae_dev->ops->init_client_instance(client, ae_dev);
- if (ret) {
+ if (ret)
dev_err(&ae_dev->pdev->dev,
"fail to instantiate client, ret = %d\n", ret);
- return ret;
- }
- hnae3_set_client_init_flag(client, ae_dev, 1);
- return 0;
+ return ret;
}
if (hnae3_get_client_init_flag(client, ae_dev)) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 67befff0bfc5..e82e4ca20620 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -51,6 +51,7 @@
#define HNAE3_KNIC_CLIENT_INITED_B 0x3
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
+#define HNAE3_DEV_SUPPORT_FD_B 0x6
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -61,6 +62,9 @@
#define hnae3_dev_dcb_supported(hdev) \
hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+#define hnae3_dev_fd_supported(hdev) \
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
+
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
@@ -84,10 +88,11 @@ struct hnae3_queue {
/*hnae3 loop mode*/
enum hnae3_loop {
- HNAE3_MAC_INTER_LOOP_MAC,
- HNAE3_MAC_INTER_LOOP_SERDES,
- HNAE3_MAC_INTER_LOOP_PHY,
- HNAE3_MAC_LOOP_NONE,
+ HNAE3_LOOP_APP,
+ HNAE3_LOOP_SERIAL_SERDES,
+ HNAE3_LOOP_PARALLEL_SERDES,
+ HNAE3_LOOP_PHY,
+ HNAE3_LOOP_NONE,
};
enum hnae3_client_type {
@@ -107,6 +112,7 @@ enum hnae3_media_type {
HNAE3_MEDIA_TYPE_FIBER,
HNAE3_MEDIA_TYPE_COPPER,
HNAE3_MEDIA_TYPE_BACKPLANE,
+ HNAE3_MEDIA_TYPE_NONE,
};
enum hnae3_reset_notify_type {
@@ -173,6 +179,7 @@ struct hnae3_ae_dev {
struct list_head node;
u32 flag;
enum hnae3_dev_type dev_type;
+ enum hnae3_reset_type reset_type;
void *priv;
};
@@ -337,6 +344,8 @@ struct hnae3_ae_ops {
void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p);
int (*set_mac_addr)(struct hnae3_handle *handle, void *p,
bool is_first);
+ int (*do_ioctl)(struct hnae3_handle *handle,
+ struct ifreq *ifr, int cmd);
int (*add_uc_addr)(struct hnae3_handle *handle,
const unsigned char *addr);
int (*rm_uc_addr)(struct hnae3_handle *handle,
@@ -346,8 +355,6 @@ struct hnae3_ae_ops {
const unsigned char *addr);
int (*rm_mc_addr)(struct hnae3_handle *handle,
const unsigned char *addr);
- int (*update_mta_status)(struct hnae3_handle *handle);
-
void (*set_tso_stats)(struct hnae3_handle *handle, int enable);
void (*update_stats)(struct hnae3_handle *handle,
struct net_device_stats *net_stats);
@@ -395,11 +402,11 @@ struct hnae3_ae_ops {
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto);
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
- void (*reset_event)(struct hnae3_handle *handle);
+ void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
void (*get_channels)(struct hnae3_handle *handle,
struct ethtool_channels *ch);
void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
- u16 *free_tqps, u16 *max_rss_size);
+ u16 *alloc_tqps, u16 *max_rss_size);
int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
void (*get_flowctrl_adv)(struct hnae3_handle *handle,
u32 *flowctrl_adv);
@@ -408,7 +415,21 @@ struct hnae3_ae_ops {
void (*get_link_mode)(struct hnae3_handle *handle,
unsigned long *supported,
unsigned long *advertising);
- void (*get_port_type)(struct hnae3_handle *handle, u8 *port_type);
+ int (*add_fd_entry)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*del_fd_entry)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ void (*del_all_fd_entries)(struct hnae3_handle *handle,
+ bool clear_list);
+ int (*get_fd_rule_cnt)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_fd_rule_info)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_fd_all_rules)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs);
+ int (*restore_fd_rules)(struct hnae3_handle *handle);
+ void (*enable_fd)(struct hnae3_handle *handle, bool enable);
+ pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev);
};
struct hnae3_dcb_ops {
@@ -459,6 +480,7 @@ struct hnae3_knic_private_info {
const struct hnae3_dcb_ops *dcb_ops;
u16 int_rl_setting;
+ enum pkt_hash_types rss_type;
};
struct hnae3_roce_private_info {
@@ -476,10 +498,20 @@ struct hnae3_unic_private_info {
struct hnae3_queue **tqp; /* array base of all TQPs of this instance */
};
-#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0)
+#define HNAE3_SUPPORT_APP_LOOPBACK BIT(0)
#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1)
-#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2)
+#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
#define HNAE3_SUPPORT_VF BIT(3)
+#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
+
+#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
+#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
+#define HNAE3_BPE BIT(2) /* broadcast promisc enable */
+#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */
+#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */
+#define HNAE3_VLAN_FLTR BIT(5) /* enable vlan filter */
+#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE)
+#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE)
struct hnae3_handle {
struct hnae3_client *client;
@@ -499,6 +531,8 @@ struct hnae3_handle {
};
u32 numa_node_mask; /* for multi-chip support */
+
+ u8 netdev_flags;
};
#define hnae3_set_field(origin, mask, shift, val) \
@@ -521,4 +555,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_client(struct hnae3_client *client);
int hnae3_register_client(struct hnae3_client *client);
+
+void hnae3_set_client_init_flag(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev, int inited);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 955c4ab18b03..32f3aca814e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -9,6 +9,7 @@
#include <linux/ipv6.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/aer.h>
#include <linux/skbuff.h>
#include <linux/sctp.h>
#include <linux/vermagic.h>
@@ -21,6 +22,7 @@
static void hns3_clear_all_ring(struct hnae3_handle *h);
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
+static void hns3_remove_hw_addr(struct net_device *netdev);
static const char hns3_driver_name[] = "hns3";
const char hns3_driver_version[] = VERMAGIC_STRING;
@@ -66,6 +68,23 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector)
return IRQ_HANDLED;
}
+/* This callback function is used to set affinity changes to the irq affinity
+ * masks when the irq_set_affinity_notifier function is used.
+ */
+static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct hns3_enet_tqp_vector *tqp_vectors =
+ container_of(notify, struct hns3_enet_tqp_vector,
+ affinity_notify);
+
+ tqp_vectors->affinity_mask = *mask;
+}
+
+static void hns3_nic_irq_affinity_release(struct kref *ref)
+{
+}
+
static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
{
struct hns3_enet_tqp_vector *tqp_vectors;
@@ -77,6 +96,10 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
continue;
+ /* clear the affinity notifier and affinity mask */
+ irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
+ irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
+
/* release the irq resource */
free_irq(tqp_vectors->vector_irq, tqp_vectors);
tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
@@ -127,6 +150,15 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
return ret;
}
+ tqp_vectors->affinity_notify.notify =
+ hns3_nic_irq_affinity_notify;
+ tqp_vectors->affinity_notify.release =
+ hns3_nic_irq_affinity_release;
+ irq_set_affinity_notifier(tqp_vectors->vector_irq,
+ &tqp_vectors->affinity_notify);
+ irq_set_affinity_hint(tqp_vectors->vector_irq,
+ &tqp_vectors->affinity_mask);
+
tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
}
@@ -195,8 +227,6 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hns3_nic_priv *priv)
{
- struct hnae3_handle *h = priv->ae_handle;
-
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
@@ -209,9 +239,6 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
- /* Default: disable RL */
- h->kinfo.int_rl_setting = 0;
-
tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
@@ -277,12 +304,12 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
{
- u16 free_tqps, max_rss_size, max_tqps;
+ u16 alloc_tqps, max_rss_size, rss_size;
- h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
- max_tqps = h->kinfo.num_tc * max_rss_size;
+ h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
+ rss_size = alloc_tqps / h->kinfo.num_tc;
- return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
+ return min_t(u16, rss_size, max_rss_size);
}
static int hns3_nic_net_up(struct net_device *netdev)
@@ -433,26 +460,81 @@ static int hns3_nic_mc_unsync(struct net_device *netdev,
return 0;
}
+static u8 hns3_get_netdev_flags(struct net_device *netdev)
+{
+ u8 flags = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ flags = HNAE3_USER_UPE | HNAE3_USER_MPE;
+ } else {
+ flags |= HNAE3_VLAN_FLTR;
+ if (netdev->flags & IFF_ALLMULTI)
+ flags |= HNAE3_USER_MPE;
+ }
+
+ return flags;
+}
+
static void hns3_nic_set_rx_mode(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ u8 new_flags;
+ int ret;
- if (h->ae_algo->ops->set_promisc_mode) {
- if (netdev->flags & IFF_PROMISC)
- h->ae_algo->ops->set_promisc_mode(h, true, true);
- else if (netdev->flags & IFF_ALLMULTI)
- h->ae_algo->ops->set_promisc_mode(h, false, true);
- else
- h->ae_algo->ops->set_promisc_mode(h, false, false);
- }
- if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
+ new_flags = hns3_get_netdev_flags(netdev);
+
+ ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
+ if (ret) {
netdev_err(netdev, "sync uc address fail\n");
+ if (ret == -ENOSPC)
+ new_flags |= HNAE3_OVERFLOW_UPE;
+ }
+
if (netdev->flags & IFF_MULTICAST) {
- if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
+ ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
+ hns3_nic_mc_unsync);
+ if (ret) {
netdev_err(netdev, "sync mc address fail\n");
+ if (ret == -ENOSPC)
+ new_flags |= HNAE3_OVERFLOW_MPE;
+ }
+ }
- if (h->ae_algo->ops->update_mta_status)
- h->ae_algo->ops->update_mta_status(h);
+ hns3_update_promisc_mode(netdev, new_flags);
+ /* User mode Promisc mode enable and vlan filtering is disabled to
+ * let all packets in. MAC-VLAN Table overflow Promisc enabled and
+ * vlan fitering is enabled
+ */
+ hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
+ h->netdev_flags = new_flags;
+}
+
+void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+ if (h->ae_algo->ops->set_promisc_mode) {
+ h->ae_algo->ops->set_promisc_mode(h,
+ promisc_flags & HNAE3_UPE,
+ promisc_flags & HNAE3_MPE);
+ }
+}
+
+void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ bool last_state;
+
+ if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
+ last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
+ if (enable != last_state) {
+ netdev_info(netdev,
+ "%s vlan filter\n",
+ enable ? "enable" : "disable");
+ h->ae_algo->ops->enable_vlan_filter(h, enable);
+ }
}
}
@@ -896,35 +978,28 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
}
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
- int size, dma_addr_t dma, int frag_end,
- enum hns_desc_type type)
+ int size, int frag_end, enum hns_desc_type type)
{
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
+ struct device *dev = ring_to_dev(ring);
u32 ol_type_vlan_len_msec = 0;
u16 bdtp_fe_sc_vld_ra_ri = 0;
+ struct skb_frag_struct *frag;
+ unsigned int frag_buf_num;
u32 type_cs_vlan_tso = 0;
struct sk_buff *skb;
u16 inner_vtag = 0;
u16 out_vtag = 0;
+ unsigned int k;
+ int sizeoflast;
u32 paylen = 0;
+ dma_addr_t dma;
u16 mss = 0;
u8 ol4_proto;
u8 il4_proto;
int ret;
- /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
- desc_cb->priv = priv;
- desc_cb->length = size;
- desc_cb->dma = dma;
- desc_cb->type = type;
-
- /* now, fill the descriptor */
- desc->addr = cpu_to_le64(dma);
- desc->tx.send_size = cpu_to_le16((u16)size);
- hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
- desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
-
if (type == DESC_TYPE_SKB) {
skb = (struct sk_buff *)priv;
paylen = skb->len;
@@ -965,38 +1040,47 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
- }
- /* move ring pointer to next.*/
- ring_ptr_move_fw(ring, next_to_use);
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ } else {
+ frag = (struct skb_frag_struct *)priv;
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ }
- return 0;
-}
+ if (dma_mapping_error(ring->dev, dma)) {
+ ring->stats.sw_err_cnt++;
+ return -ENOMEM;
+ }
-static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
- int size, dma_addr_t dma, int frag_end,
- enum hns_desc_type type)
-{
- unsigned int frag_buf_num;
- unsigned int k;
- int sizeoflast;
- int ret;
+ desc_cb->length = size;
frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
sizeoflast = size % HNS3_MAX_BD_SIZE;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
- /* When the frag size is bigger than hardware, split this frag */
+ /* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
- ret = hns3_fill_desc(ring, priv,
- (k == frag_buf_num - 1) ?
- sizeoflast : HNS3_MAX_BD_SIZE,
- dma + HNS3_MAX_BD_SIZE * k,
- frag_end && (k == frag_buf_num - 1) ? 1 : 0,
- (type == DESC_TYPE_SKB && !k) ?
- DESC_TYPE_SKB : DESC_TYPE_PAGE);
- if (ret)
- return ret;
+ /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
+ desc_cb->priv = priv;
+ desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
+ desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
+ DESC_TYPE_SKB : DESC_TYPE_PAGE;
+
+ /* now, fill the descriptor */
+ desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
+ desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
+ (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
+ hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
+ frag_end && (k == frag_buf_num - 1) ?
+ 1 : 0);
+ desc->tx.bdtp_fe_sc_vld_ra_ri =
+ cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+
+ /* move ring pointer to next.*/
+ ring_ptr_move_fw(ring, next_to_use);
+
+ desc_cb = &ring->desc_cb[ring->next_to_use];
+ desc = &ring->desc[ring->next_to_use];
}
return 0;
@@ -1044,7 +1128,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
/* No. of segments (plus a header) */
buf_num = skb_shinfo(skb)->nr_frags + 1;
- if (buf_num > ring_space(ring))
+ if (unlikely(ring_space(ring) < buf_num))
return -EBUSY;
*bnum = buf_num;
@@ -1052,7 +1136,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
return 0;
}
-static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
+static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
{
struct device *dev = ring_to_dev(ring);
unsigned int i;
@@ -1068,12 +1152,14 @@ static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
ring->desc_cb[ring->next_to_use].dma,
ring->desc_cb[ring->next_to_use].length,
DMA_TO_DEVICE);
- else
+ else if (ring->desc_cb[ring->next_to_use].length)
dma_unmap_page(dev,
ring->desc_cb[ring->next_to_use].dma,
ring->desc_cb[ring->next_to_use].length,
DMA_TO_DEVICE);
+ ring->desc_cb[ring->next_to_use].length = 0;
+
/* rollback one */
ring_ptr_move_bw(ring, next_to_use);
}
@@ -1085,12 +1171,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
struct hns3_nic_ring_data *ring_data =
&tx_ring_data(priv, skb->queue_mapping);
struct hns3_enet_ring *ring = ring_data->ring;
- struct device *dev = priv->dev;
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int next_to_use_head;
int next_to_use_frag;
- dma_addr_t dma;
int buf_num;
int seg_num;
int size;
@@ -1125,35 +1209,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma)) {
- netdev_err(netdev, "TX head DMA map failed\n");
- ring->stats.sw_err_cnt++;
- goto out_err_tx_ok;
- }
-
- ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
- DESC_TYPE_SKB);
+ ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
+ DESC_TYPE_SKB);
if (ret)
- goto head_dma_map_err;
+ goto head_fill_err;
next_to_use_frag = ring->next_to_use;
/* Fill the fragments */
for (i = 1; i < seg_num; i++) {
frag = &skb_shinfo(skb)->frags[i - 1];
size = skb_frag_size(frag);
- dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma)) {
- netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
- ring->stats.sw_err_cnt++;
- goto frag_dma_map_err;
- }
- ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
- seg_num - 1 == i ? 1 : 0,
- DESC_TYPE_PAGE);
+
+ ret = priv->ops.fill_desc(ring, frag, size,
+ seg_num - 1 == i ? 1 : 0,
+ DESC_TYPE_PAGE);
if (ret)
- goto frag_dma_map_err;
+ goto frag_fill_err;
}
/* Complete translate all packets */
@@ -1166,11 +1238,11 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
-frag_dma_map_err:
- hns_nic_dma_unmap(ring, next_to_use_frag);
+frag_fill_err:
+ hns3_clear_desc(ring, next_to_use_frag);
-head_dma_map_err:
- hns_nic_dma_unmap(ring, next_to_use_head);
+head_fill_err:
+ hns3_clear_desc(ring, next_to_use_head);
out_err_tx_ok:
dev_kfree_skb_any(skb);
@@ -1209,6 +1281,20 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
+static int hns3_nic_do_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!h->ae_algo->ops->do_ioctl)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
+}
+
static int hns3_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -1218,13 +1304,10 @@ static int hns3_nic_set_features(struct net_device *netdev,
int ret;
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
- if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
- priv->ops.fill_desc = hns3_fill_desc_tso;
+ if (features & (NETIF_F_TSO | NETIF_F_TSO6))
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- } else {
- priv->ops.fill_desc = hns3_fill_desc;
+ else
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
- }
}
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
@@ -1246,6 +1329,13 @@ static int hns3_nic_set_features(struct net_device *netdev,
return ret;
}
+ if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
+ if (features & NETIF_F_NTUPLE)
+ h->ae_algo->ops->enable_fd(h, true);
+ else
+ h->ae_algo->ops->enable_fd(h, false);
+ }
+
netdev->features = features;
return 0;
}
@@ -1447,13 +1537,11 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
}
ret = h->ae_algo->ops->set_mtu(h, new_mtu);
- if (ret) {
+ if (ret)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
ret);
- return ret;
- }
-
- netdev->mtu = new_mtu;
+ else
+ netdev->mtu = new_mtu;
/* if the netdev was running earlier, bring it up again */
if (if_running && hns3_nic_net_open(netdev))
@@ -1526,7 +1614,7 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
/* request the reset */
if (h->ae_algo->ops->reset_event)
- h->ae_algo->ops->reset_event(h);
+ h->ae_algo->ops->reset_event(h->pdev, h);
}
static const struct net_device_ops hns3_nic_netdev_ops = {
@@ -1535,6 +1623,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_start_xmit = hns3_nic_net_xmit,
.ndo_tx_timeout = hns3_nic_net_timeout,
.ndo_set_mac_address = hns3_nic_net_set_mac_address,
+ .ndo_do_ioctl = hns3_nic_do_ioctl,
.ndo_change_mtu = hns3_nic_change_mtu,
.ndo_set_features = hns3_nic_set_features,
.ndo_get_stats64 = hns3_nic_get_stats64,
@@ -1584,6 +1673,13 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
}
+static void hns3_get_dev_capability(struct pci_dev *pdev,
+ struct hnae3_ae_dev *ae_dev)
+{
+ if (pdev->revision >= 0x21)
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
+}
+
/* hns3_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in hns3_pci_tbl
@@ -1609,6 +1705,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
ae_dev->dev_type = HNAE3_DEV_KNIC;
+ ae_dev->reset_type = HNAE3_NONE_RESET;
+ hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev);
hnae3_register_ae_dev(ae_dev);
@@ -1662,12 +1760,72 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
return 0;
}
+static void hns3_shutdown(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ hnae3_unregister_ae_dev(ae_dev);
+ devm_kfree(&pdev->dev, ae_dev);
+ pci_set_drvdata(pdev, NULL);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ pci_ers_result_t ret;
+
+ dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (!ae_dev) {
+ dev_err(&pdev->dev,
+ "Can't recover - error happened during device init\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ if (ae_dev->ops->process_hw_error)
+ ret = ae_dev->ops->process_hw_error(ae_dev);
+ else
+ return PCI_ERS_RESULT_NONE;
+
+ return ret;
+}
+
+static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ dev_info(dev, "requesting reset due to PCI error\n");
+
+ /* request the reset */
+ if (ae_dev->ops->reset_event) {
+ ae_dev->ops->reset_event(pdev, NULL);
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static const struct pci_error_handlers hns3_err_handler = {
+ .error_detected = hns3_error_detected,
+ .slot_reset = hns3_slot_reset,
+};
+
static struct pci_driver hns3_driver = {
.name = hns3_driver_name,
.id_table = hns3_pci_tbl,
.probe = hns3_probe,
.remove = hns3_remove,
+ .shutdown = hns3_shutdown,
.sriov_configure = hns3_pci_sriov_configure,
+ .err_handler = &hns3_err_handler,
};
/* set default feature to hns3 */
@@ -1682,7 +1840,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
@@ -1694,24 +1852,30 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
netdev->vlan_features |=
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
- if (pdev->revision != 0x20)
+ if (pdev->revision >= 0x21) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (!(h->flags & HNAE3_SUPPORT_VF)) {
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->features |= NETIF_F_NTUPLE;
+ }
+ }
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -1749,7 +1913,7 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
cb->length, ring_to_dma_dir(ring));
- if (dma_mapping_error(ring_to_dev(ring), cb->dma))
+ if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
return -EIO;
return 0;
@@ -1761,7 +1925,7 @@ static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
if (cb->type == DESC_TYPE_SKB)
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
- else
+ else if (cb->length)
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
}
@@ -1912,9 +2076,10 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
return u > c ? (h > c && h <= u) : (h > c || h <= u);
}
-bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
+void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
struct netdev_queue *dev_queue;
int bytes, pkts;
int head;
@@ -1923,7 +2088,7 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
rmb(); /* Make sure head is ready before touch any data */
if (is_ring_empty(ring) || head == ring->next_to_clean)
- return true; /* no data to poll */
+ return; /* no data to poll */
if (unlikely(!is_valid_clean_head(ring, head))) {
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
@@ -1932,16 +2097,15 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
u64_stats_update_begin(&ring->syncp);
ring->stats.io_err_cnt++;
u64_stats_update_end(&ring->syncp);
- return true;
+ return;
}
bytes = 0;
pkts = 0;
- while (head != ring->next_to_clean && budget) {
+ while (head != ring->next_to_clean) {
hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
/* Issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ring->next_to_clean]);
- budget--;
}
ring->tqp_vector->tx_group.total_bytes += bytes;
@@ -1961,13 +2125,12 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
* sees the new next_to_clean.
*/
smp_mb();
- if (netif_tx_queue_stopped(dev_queue)) {
+ if (netif_tx_queue_stopped(dev_queue) &&
+ !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
netif_tx_wake_queue(dev_queue);
ring->stats.restart_queue++;
}
}
-
- return !!budget;
}
static int hns3_desc_unused(struct hns3_enet_ring *ring)
@@ -2092,7 +2255,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
- netdev_err(netdev, "L3/L4 error pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.l3l4_csum_err++;
u64_stats_update_end(&ring->syncp);
@@ -2121,6 +2283,8 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
l4_type == HNS3_L4_TYPE_SCTP))
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
+ default:
+ break;
}
}
@@ -2129,18 +2293,18 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
napi_gro_receive(&ring->tqp_vector->napi, skb);
}
-static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
- struct hns3_desc *desc, u32 l234info)
+static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
+ struct hns3_desc *desc, u32 l234info,
+ u16 *vlan_tag)
{
struct pci_dev *pdev = ring->tqp->handle->pdev;
- u16 vlan_tag;
if (pdev->revision == 0x20) {
- vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
- if (!(vlan_tag & VLAN_VID_MASK))
- vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ if (!(*vlan_tag & VLAN_VID_MASK))
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
- return vlan_tag;
+ return (*vlan_tag != 0);
}
#define HNS3_STRP_OUTER_VLAN 0x1
@@ -2149,17 +2313,29 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
- vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
- break;
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ return true;
case HNS3_STRP_INNER_VLAN:
- vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
- break;
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ return true;
default:
- vlan_tag = 0;
- break;
+ return false;
}
+}
+
+static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ struct hnae3_handle *handle = ring->tqp->handle;
+ enum pkt_hash_types rss_type;
- return vlan_tag;
+ if (le32_to_cpu(desc->rx.rss_hash))
+ rss_type = handle->kinfo.rss_type;
+ else
+ rss_type = PKT_HASH_TYPE_NONE;
+
+ skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
@@ -2261,16 +2437,13 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
u16 vlan_tag;
- vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
- if (vlan_tag & VLAN_VID_MASK)
+ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
__vlan_hwaccel_put_tag(skb,
htons(ETH_P_8021Q),
vlan_tag);
}
if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
- netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
- ((u64 *)desc)[0], ((u64 *)desc)[1]);
u64_stats_update_begin(&ring->syncp);
ring->stats.non_vld_descs++;
u64_stats_update_end(&ring->syncp);
@@ -2281,7 +2454,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
if (unlikely((!desc->rx.pkt_len) ||
hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
- netdev_err(netdev, "truncated pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.err_pkt_len++;
u64_stats_update_end(&ring->syncp);
@@ -2291,7 +2463,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
}
if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
- netdev_err(netdev, "L2 error pkt\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.l2_err++;
u64_stats_update_end(&ring->syncp);
@@ -2308,6 +2479,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->tqp_vector->rx_group.total_bytes += skb->len;
hns3_rx_checksum(ring, skb, desc);
+ hns3_set_rx_skb_rss_type(ring, skb);
+
return 0;
}
@@ -2501,10 +2674,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- hns3_for_each_ring(ring, tqp_vector->tx_group) {
- if (!hns3_clean_tx_ring(ring, budget))
- clean_complete = false;
- }
+ hns3_for_each_ring(ring, tqp_vector->tx_group)
+ hns3_clean_tx_ring(ring);
/* make sure rx ring budget not smaller than 1 */
rx_budget = max(budget / tqp_vector->num_tqps, 1);
@@ -2627,6 +2798,23 @@ static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
group->count++;
}
+static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
+{
+ struct pci_dev *pdev = priv->ae_handle->pdev;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int num_vectors = priv->vector_num;
+ int numa_node;
+ int vector_i;
+
+ numa_node = dev_to_node(&pdev->dev);
+
+ for (vector_i = 0; vector_i < num_vectors; vector_i++) {
+ tqp_vector = &priv->tqp_vector[vector_i];
+ cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
+ &tqp_vector->affinity_mask);
+ }
+}
+
static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_ring_chain_node vector_ring_chain;
@@ -2635,6 +2823,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
int ret = 0;
u16 i;
+ hns3_nic_set_cpumask(priv);
+
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
hns3_vector_gl_rl_init_hw(tqp_vector, priv);
@@ -3069,38 +3259,48 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init)
}
-static void hns3_uninit_mac_addr(struct net_device *netdev)
+static int hns3_restore_fd_rules(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret = 0;
- if (h->ae_algo->ops->rm_uc_addr)
- h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
+ if (h->ae_algo->ops->restore_fd_rules)
+ ret = h->ae_algo->ops->restore_fd_rules(h);
+
+ return ret;
+}
+
+static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->del_all_fd_entries)
+ h->ae_algo->ops->del_all_fd_entries(h, clear_list);
}
static void hns3_nic_set_priv_ops(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ priv->ops.fill_desc = hns3_fill_desc;
if ((netdev->features & NETIF_F_TSO) ||
- (netdev->features & NETIF_F_TSO6)) {
- priv->ops.fill_desc = hns3_fill_desc_tso;
+ (netdev->features & NETIF_F_TSO6))
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- } else {
- priv->ops.fill_desc = hns3_fill_desc;
+ else
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
- }
}
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
+ u16 alloc_tqps, max_rss_size;
struct hns3_nic_priv *priv;
struct net_device *netdev;
int ret;
- netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
- hns3_get_max_available_channels(handle));
+ handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
+ &max_rss_size);
+ netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
if (!netdev)
return -ENOMEM;
@@ -3189,9 +3389,13 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ hns3_remove_hw_addr(netdev);
+
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ hns3_del_all_fd_rules(netdev, true);
+
hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv);
@@ -3210,8 +3414,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
priv->ring_data = NULL;
- hns3_uninit_mac_addr(netdev);
-
free_netdev(netdev);
}
@@ -3283,6 +3485,25 @@ static void hns3_recover_hw_addr(struct net_device *ndev)
hns3_nic_mc_sync(ndev, ha->addr);
}
+static void hns3_remove_hw_addr(struct net_device *netdev)
+{
+ struct netdev_hw_addr_list *list;
+ struct netdev_hw_addr *ha, *tmp;
+
+ hns3_nic_uc_unsync(netdev, netdev->dev_addr);
+
+ /* go through and unsync uc_addr entries to the device */
+ list = &netdev->uc;
+ list_for_each_entry_safe(ha, tmp, &list->list, list)
+ hns3_nic_uc_unsync(netdev, ha->addr);
+
+ /* go through and unsync mc_addr entries to the device */
+ list = &netdev->mc;
+ list_for_each_entry_safe(ha, tmp, &list->list, list)
+ if (ha->refcount > 1)
+ hns3_nic_mc_unsync(netdev, ha->addr);
+}
+
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
while (ring->next_to_clean != ring->next_to_use) {
@@ -3419,6 +3640,31 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
return 0;
}
+static void hns3_store_coal(struct hns3_nic_priv *priv)
+{
+ /* ethtool only support setting and querying one coal
+ * configuation for now, so save the vector 0' coal
+ * configuation here in order to restore it.
+ */
+ memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
+ sizeof(struct hns3_enet_coalesce));
+ memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
+ sizeof(struct hns3_enet_coalesce));
+}
+
+static void hns3_restore_coal(struct hns3_nic_priv *priv)
+{
+ u16 vector_num = priv->vector_num;
+ int i;
+
+ for (i = 0; i < vector_num; i++) {
+ memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
+ sizeof(struct hns3_enet_coalesce));
+ memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
+ sizeof(struct hns3_enet_coalesce));
+ }
+}
+
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -3452,19 +3698,27 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ bool vlan_filter_enable;
int ret;
hns3_init_mac_addr(netdev, false);
- hns3_nic_set_rx_mode(netdev);
hns3_recover_hw_addr(netdev);
+ hns3_update_promisc_mode(netdev, handle->netdev_flags);
+ vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
+ hns3_enable_vlan_filter(netdev, vlan_filter_enable);
+
/* Hardware table is only clear when pf resets */
if (!(handle->flags & HNAE3_SUPPORT_VF))
hns3_restore_vlan(netdev);
+ hns3_restore_fd_rules(netdev);
+
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ hns3_restore_coal(priv);
+
ret = hns3_nic_init_vector_data(priv);
if (ret)
return ret;
@@ -3480,6 +3734,7 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
@@ -3492,11 +3747,20 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
return ret;
}
+ hns3_store_coal(priv);
+
ret = hns3_uninit_all_ring(priv);
if (ret)
netdev_err(netdev, "uninit ring error\n");
- hns3_uninit_mac_addr(netdev);
+ /* it is cumbersome for hardware to pick-and-choose entries for deletion
+ * from table space. Hence, for function reset software intervention is
+ * required to delete the entries
+ */
+ if (hns3_dev_ongoing_func_reset(ae_dev)) {
+ hns3_remove_hw_addr(netdev);
+ hns3_del_all_fd_rules(netdev, false);
+ }
return ret;
}
@@ -3526,24 +3790,7 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
return ret;
}
-static void hns3_restore_coal(struct hns3_nic_priv *priv,
- struct hns3_enet_coalesce *tx,
- struct hns3_enet_coalesce *rx)
-{
- u16 vector_num = priv->vector_num;
- int i;
-
- for (i = 0; i < vector_num; i++) {
- memcpy(&priv->tqp_vector[i].tx_group.coal, tx,
- sizeof(struct hns3_enet_coalesce));
- memcpy(&priv->tqp_vector[i].rx_group.coal, rx,
- sizeof(struct hns3_enet_coalesce));
- }
-}
-
-static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
- struct hns3_enet_coalesce *tx,
- struct hns3_enet_coalesce *rx)
+static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -3561,7 +3808,7 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
if (ret)
goto err_alloc_vector;
- hns3_restore_coal(priv, tx, rx);
+ hns3_restore_coal(priv);
ret = hns3_nic_init_vector_data(priv);
if (ret)
@@ -3593,7 +3840,6 @@ int hns3_set_channels(struct net_device *netdev,
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
- struct hns3_enet_coalesce tx_coal, rx_coal;
bool if_running = netif_running(netdev);
u32 new_tqp_num = ch->combined_count;
u16 org_tqp_num;
@@ -3625,15 +3871,7 @@ int hns3_set_channels(struct net_device *netdev,
goto open_netdev;
}
- /* Changing the tqp num may also change the vector num,
- * ethtool only support setting and querying one coal
- * configuation for now, so save the vector 0' coal
- * configuation here in order to restore it.
- */
- memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal,
- sizeof(struct hns3_enet_coalesce));
- memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal,
- sizeof(struct hns3_enet_coalesce));
+ hns3_store_coal(priv);
hns3_nic_dealloc_vector_data(priv);
@@ -3641,10 +3879,9 @@ int hns3_set_channels(struct net_device *netdev,
hns3_put_ring_config(priv);
org_tqp_num = h->kinfo.num_tqps;
- ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal);
+ ret = hns3_modify_tqp_num(netdev, new_tqp_num);
if (ret) {
- ret = hns3_modify_tqp_num(netdev, org_tqp_num,
- &tx_coal, &rx_coal);
+ ret = hns3_modify_tqp_num(netdev, org_tqp_num);
if (ret) {
/* If revert to old tqp failed, fatal error occurred */
dev_err(&netdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index cb450d7ec8c1..71cfca132d0b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -419,8 +419,7 @@ struct hns3_nic_ring_data {
struct hns3_nic_ops {
int (*fill_desc)(struct hns3_enet_ring *ring, void *priv,
- int size, dma_addr_t dma, int frag_end,
- enum hns_desc_type type);
+ int size, int frag_end, enum hns_desc_type type);
int (*maybe_stop_tx)(struct sk_buff **out_skb,
int *bnum, struct hns3_enet_ring *ring);
void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
@@ -491,7 +490,9 @@ struct hns3_enet_tqp_vector {
struct hns3_enet_ring_group rx_group;
struct hns3_enet_ring_group tx_group;
+ cpumask_t affinity_mask;
u16 num_tqps; /* total number of tqps in TQP vector */
+ struct irq_affinity_notify affinity_notify;
char name[HNAE3_INT_NAME_LEN];
@@ -541,6 +542,8 @@ struct hns3_nic_priv {
/* Vxlan/Geneve information */
struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX];
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ struct hns3_enet_coalesce tx_coal;
+ struct hns3_enet_coalesce rx_coal;
};
union l3_hdr_info {
@@ -581,6 +584,11 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
writel(value, reg_addr + reg);
}
+static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
+{
+ return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET));
+}
+
#define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value))
@@ -615,7 +623,7 @@ void hns3_ethtool_set_ops(struct net_device *netdev);
int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch);
-bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
+void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
@@ -631,6 +639,9 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
u32 rl_value);
+void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
+void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
+
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
#else
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index f70ee6910ee2..a4762c2b8ba1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -22,13 +22,13 @@ struct hns3_stats {
static const struct hns3_stats hns3_txq_stats[] = {
/* Tx per-queue statistics */
HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
- HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
+ HNS3_TQP_STAT("dropped", sw_err_cnt),
HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
HNS3_TQP_STAT("packets", tx_pkts),
HNS3_TQP_STAT("bytes", tx_bytes),
HNS3_TQP_STAT("errors", tx_err_cnt),
- HNS3_TQP_STAT("tx_wake", restart_queue),
- HNS3_TQP_STAT("tx_busy", tx_busy),
+ HNS3_TQP_STAT("wake", restart_queue),
+ HNS3_TQP_STAT("busy", tx_busy),
};
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
@@ -36,7 +36,7 @@ static const struct hns3_stats hns3_txq_stats[] = {
static const struct hns3_stats hns3_rxq_stats[] = {
/* Rx per-queue statistics */
HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
- HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
+ HNS3_TQP_STAT("dropped", sw_err_cnt),
HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
HNS3_TQP_STAT("packets", rx_pkts),
HNS3_TQP_STAT("bytes", rx_bytes),
@@ -53,7 +53,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TYPE_NUM 2
+#define HNS3_SELF_TEST_TYPE_NUM 3
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -71,6 +71,7 @@ struct hns3_link_mode_mapping {
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ bool vlan_filter_enable;
int ret;
if (!h->ae_algo->ops->set_loopback ||
@@ -78,8 +79,9 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
return -EOPNOTSUPP;
switch (loop) {
- case HNAE3_MAC_INTER_LOOP_SERDES:
- case HNAE3_MAC_INTER_LOOP_MAC:
+ case HNAE3_LOOP_SERIAL_SERDES:
+ case HNAE3_LOOP_PARALLEL_SERDES:
+ case HNAE3_LOOP_APP:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
@@ -90,7 +92,14 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
if (ret)
return ret;
- h->ae_algo->ops->set_promisc_mode(h, en, en);
+ if (en) {
+ h->ae_algo->ops->set_promisc_mode(h, true, true);
+ } else {
+ /* recover promisc mode before loopback test */
+ hns3_update_promisc_mode(ndev, h->netdev_flags);
+ vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
+ hns3_enable_vlan_filter(ndev, vlan_filter_enable);
+ }
return ret;
}
@@ -100,41 +109,26 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
struct hnae3_handle *h = hns3_get_handle(ndev);
int ret;
- if (!h->ae_algo->ops->start)
- return -EOPNOTSUPP;
-
ret = hns3_nic_reset_all_ring(h);
if (ret)
return ret;
- ret = h->ae_algo->ops->start(h);
- if (ret) {
- netdev_err(ndev,
- "hns3_lb_up ae start return error: %d\n", ret);
- return ret;
- }
-
ret = hns3_lp_setup(ndev, loop_mode, true);
usleep_range(10000, 20000);
- return ret;
+ return 0;
}
static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
{
- struct hnae3_handle *h = hns3_get_handle(ndev);
int ret;
- if (!h->ae_algo->ops->stop)
- return -EOPNOTSUPP;
-
ret = hns3_lp_setup(ndev, loop_mode, false);
if (ret) {
netdev_err(ndev, "lb_setup return error: %d\n", ret);
return ret;
}
- h->ae_algo->ops->stop(h);
usleep_range(10000, 20000);
return 0;
@@ -152,6 +146,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
+ ethh->h_dest[5] += 0x1f;
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
skb_reset_mac_header(skb);
@@ -214,7 +209,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
for (i = start_ringid; i <= end_ringid; i++) {
struct hns3_enet_ring *ring = priv->ring_data[i].ring;
- hns3_clean_tx_ring(ring, budget);
+ hns3_clean_tx_ring(ring);
}
}
@@ -300,16 +295,21 @@ static void hns3_self_test(struct net_device *ndev,
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return;
- st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC;
- st_param[HNAE3_MAC_INTER_LOOP_MAC][1] =
- h->flags & HNAE3_SUPPORT_MAC_LOOPBACK;
+ st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
+ st_param[HNAE3_LOOP_APP][1] =
+ h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
- st_param[HNAE3_MAC_INTER_LOOP_SERDES][0] = HNAE3_MAC_INTER_LOOP_SERDES;
- st_param[HNAE3_MAC_INTER_LOOP_SERDES][1] =
- h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK;
+ st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES;
+ st_param[HNAE3_LOOP_SERIAL_SERDES][1] =
+ h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+
+ st_param[HNAE3_LOOP_PARALLEL_SERDES][0] =
+ HNAE3_LOOP_PARALLEL_SERDES;
+ st_param[HNAE3_LOOP_PARALLEL_SERDES][1] =
+ h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
if (if_running)
- dev_close(ndev);
+ ndev->netdev_ops->ndo_stop(ndev);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Disable the vlan filter for selftest does not support it */
@@ -347,7 +347,7 @@ static void hns3_self_test(struct net_device *ndev,
#endif
if (if_running)
- dev_open(ndev);
+ ndev->netdev_ops->ndo_open(ndev);
}
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
@@ -365,9 +365,10 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
case ETH_SS_TEST:
return ops->get_sset_count(h, stringset);
- }
- return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
@@ -383,7 +384,7 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
- n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
+ n1 = snprintf(data, MAX_PREFIX_SIZE, "%s%d_",
prefix, i);
n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
size_left = (ETH_GSTRING_LEN - 1) - n1;
@@ -431,6 +432,8 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_TEST:
ops->get_strings(h, stringset, data);
break;
+ default:
+ break;
}
}
@@ -556,30 +559,72 @@ static int hns3_set_pauseparam(struct net_device *netdev,
return -EOPNOTSUPP;
}
+static void hns3_get_ksettings(struct hnae3_handle *h,
+ struct ethtool_link_ksettings *cmd)
+{
+ const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+
+ /* 1.auto_neg & speed & duplex from cmd */
+ if (ops->get_ksettings_an_result)
+ ops->get_ksettings_an_result(h,
+ &cmd->base.autoneg,
+ &cmd->base.speed,
+ &cmd->base.duplex);
+
+ /* 2.get link mode*/
+ if (ops->get_link_mode)
+ ops->get_link_mode(h,
+ cmd->link_modes.supported,
+ cmd->link_modes.advertising);
+
+ /* 3.mdix_ctrl&mdix get from phy reg */
+ if (ops->get_mdix_mode)
+ ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl,
+ &cmd->base.eth_tp_mdix);
+}
+
static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- u32 flowctrl_adv = 0;
+ const struct hnae3_ae_ops *ops;
+ u8 media_type;
u8 link_stat;
if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
- /* 1.auto_neg & speed & duplex from cmd */
- if (netdev->phydev) {
+ ops = h->ae_algo->ops;
+ if (ops->get_media_type)
+ ops->get_media_type(h, &media_type);
+ else
+ return -EOPNOTSUPP;
+
+ switch (media_type) {
+ case HNAE3_MEDIA_TYPE_NONE:
+ cmd->base.port = PORT_NONE;
+ hns3_get_ksettings(h, cmd);
+ break;
+ case HNAE3_MEDIA_TYPE_FIBER:
+ cmd->base.port = PORT_FIBRE;
+ hns3_get_ksettings(h, cmd);
+ break;
+ case HNAE3_MEDIA_TYPE_COPPER:
+ if (!netdev->phydev)
+ return -EOPNOTSUPP;
+
+ cmd->base.port = PORT_TP;
phy_ethtool_ksettings_get(netdev->phydev, cmd);
+ break;
+ default:
+
+ netdev_warn(netdev, "Unknown media type");
return 0;
}
- if (h->ae_algo->ops->get_ksettings_an_result)
- h->ae_algo->ops->get_ksettings_an_result(h,
- &cmd->base.autoneg,
- &cmd->base.speed,
- &cmd->base.duplex);
- else
- return -EOPNOTSUPP;
+ /* mdio_support */
+ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
link_stat = hns3_get_link(netdev);
if (!link_stat) {
@@ -587,36 +632,6 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
cmd->base.duplex = DUPLEX_UNKNOWN;
}
- /* 2.get link mode and port type*/
- if (h->ae_algo->ops->get_link_mode)
- h->ae_algo->ops->get_link_mode(h,
- cmd->link_modes.supported,
- cmd->link_modes.advertising);
-
- cmd->base.port = PORT_NONE;
- if (h->ae_algo->ops->get_port_type)
- h->ae_algo->ops->get_port_type(h,
- &cmd->base.port);
-
- /* 3.mdix_ctrl&mdix get from phy reg */
- if (h->ae_algo->ops->get_mdix_mode)
- h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl,
- &cmd->base.eth_tp_mdix);
- /* 4.mdio_support */
- cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
-
- /* 5.get flow control setttings */
- if (h->ae_algo->ops->get_flowctrl_adv)
- h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
-
- if (flowctrl_adv & ADVERTISED_Pause)
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- Pause);
-
- if (flowctrl_adv & ADVERTISED_Asym_Pause)
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- Asym_Pause);
-
return 0;
}
@@ -671,12 +686,13 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
- /* currently we only support Toeplitz hash */
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) {
- netdev_err(netdev,
- "hash func not supported (only Toeplitz hash)\n");
+ if ((h->pdev->revision == 0x20 &&
+ hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
+ netdev_err(netdev, "hash func not supported\n");
return -EOPNOTSUPP;
}
+
if (!indir) {
netdev_err(netdev,
"set rss failed for indir is empty\n");
@@ -692,20 +708,33 @@ static int hns3_get_rxnfc(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple)
+ if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = h->kinfo.rss_size;
- break;
+ cmd->data = h->kinfo.num_tqps;
+ return 0;
case ETHTOOL_GRXFH:
- return h->ae_algo->ops->get_rss_tuple(h, cmd);
+ if (h->ae_algo->ops->get_rss_tuple)
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (h->ae_algo->ops->get_fd_rule_cnt)
+ return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_GRXCLSRULE:
+ if (h->ae_algo->ops->get_fd_rule_info)
+ return h->ae_algo->ops->get_fd_rule_info(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_GRXCLSRLALL:
+ if (h->ae_algo->ops->get_fd_all_rules)
+ return h->ae_algo->ops->get_fd_all_rules(h, cmd,
+ rule_locs);
+ return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
@@ -788,12 +817,22 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple)
+ if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
- return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ if (h->ae_algo->ops->set_rss_tuple)
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_SRXCLSRLINS:
+ if (h->ae_algo->ops->add_fd_entry)
+ return h->ae_algo->ops->add_fd_entry(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (h->ae_algo->ops->del_fd_entry)
+ return h->ae_algo->ops->del_fd_entry(h, cmd);
+ return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
@@ -1047,6 +1086,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count,
.get_rxnfc = hns3_get_rxnfc,
+ .set_rxnfc = hns3_set_rxnfc,
.get_rxfh_key_size = hns3_get_rss_key_size,
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index cb8ddd043476..580e81743681 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -6,6 +6,6 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 821d4c2f84bd..872cd4bdd70d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -175,21 +175,22 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
+ HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
- HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
-
- /* Multicast linear table commands */
- HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
- HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021,
- HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022,
- HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023,
/* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
+ /* Flow Director commands */
+ HCLGE_OPC_FD_MODE_CTRL = 0x1200,
+ HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
+ HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
+ HCLGE_OPC_FD_TCAM_OP = 0x1203,
+ HCLGE_OPC_FD_AD_OP = 0x1204,
+
/* MDIO command */
HCLGE_OPC_MDIO_CONFIG = 0x1900,
@@ -208,6 +209,28 @@ enum hclge_opcode_type {
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+
+ /* Error INT commands */
+ HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
+ HCLGE_TM_SCH_ECC_ERR_RINT_CMD = 0x082d,
+ HCLGE_TM_SCH_ECC_ERR_RINT_CE = 0x082f,
+ HCLGE_TM_SCH_ECC_ERR_RINT_NFE = 0x0830,
+ HCLGE_TM_SCH_ECC_ERR_RINT_FE = 0x0831,
+ HCLGE_TM_SCH_MBIT_ECC_INFO_CMD = 0x0833,
+ HCLGE_COMMON_ECC_INT_CFG = 0x1505,
+ HCLGE_IGU_EGU_TNL_INT_QUERY = 0x1802,
+ HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
+ HCLGE_IGU_EGU_TNL_INT_CLR = 0x1804,
+ HCLGE_IGU_COMMON_INT_QUERY = 0x1805,
+ HCLGE_IGU_COMMON_INT_EN = 0x1806,
+ HCLGE_IGU_COMMON_INT_CLR = 0x1807,
+ HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
+ HCLGE_TM_QCN_MEM_INT_INFO_CMD = 0x1A17,
+ HCLGE_PPP_CMD0_INT_CMD = 0x2100,
+ HCLGE_PPP_CMD1_INT_CMD = 0x2101,
+ HCLGE_NCSI_INT_QUERY = 0x2400,
+ HCLGE_NCSI_INT_EN = 0x2401,
+ HCLGE_NCSI_INT_CLR = 0x2402,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
@@ -395,6 +418,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
#define HCLGE_CFG_SPEED_ABILITY_S 0
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
+#define HCLGE_CFG_UMV_TBL_SPACE_S 16
+#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
struct hclge_cfg_param_cmd {
__le32 offset;
@@ -584,13 +609,12 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6];
};
-#define HCLGE_VLAN_MASK_EN_B 0
-struct hclge_mac_vlan_mask_entry_cmd {
- u8 rsv0[2];
- u8 vlan_mask;
- u8 rsv1;
- u8 mac_mask[6];
- u8 rsv2[14];
+#define HCLGE_UMV_SPC_ALC_B 0
+struct hclge_umv_spc_alc_cmd {
+ u8 allocate;
+ u8 rsv1[3];
+ __le32 space_size;
+ u8 rsv2[16];
};
#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
@@ -615,30 +639,6 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 rsv3[2];
};
-#define HCLGE_CFG_MTA_MAC_SEL_S 0
-#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
-#define HCLGE_CFG_MTA_MAC_EN_B 7
-struct hclge_mta_filter_mode_cmd {
- u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */
- u8 rsv[23];
-};
-
-#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0
-struct hclge_cfg_func_mta_filter_cmd {
- u8 accept; /* Only used lowest 1 bit */
- u8 function_id;
- u8 rsv[22];
-};
-
-#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0
-#define HCLGE_CFG_MTA_ITEM_IDX_S 0
-#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0)
-struct hclge_cfg_func_mta_item_cmd {
- __le16 item_idx; /* Only used lowest 12 bit */
- u8 accept; /* Only used lowest 1 bit */
- u8 rsv[21];
-};
-
struct hclge_mac_vlan_add_cmd {
__le16 flags;
__le16 mac_addr_hi16;
@@ -778,6 +778,7 @@ struct hclge_reset_cmd {
};
#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
+#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2)
#define HCLGE_CMD_SERDES_DONE_B BIT(0)
#define HCLGE_CMD_SERDES_SUCCESS_B BIT(1)
struct hclge_serdes_lb_cmd {
@@ -818,6 +819,76 @@ struct hclge_set_led_state_cmd {
u8 rsv2[20];
};
+struct hclge_get_fd_mode_cmd {
+ u8 mode;
+ u8 enable;
+ u8 rsv[22];
+};
+
+struct hclge_get_fd_allocation_cmd {
+ __le32 stage1_entry_num;
+ __le32 stage2_entry_num;
+ __le16 stage1_counter_num;
+ __le16 stage2_counter_num;
+ u8 rsv[12];
+};
+
+struct hclge_set_fd_key_config_cmd {
+ u8 stage;
+ u8 key_select;
+ u8 inner_sipv6_word_en;
+ u8 inner_dipv6_word_en;
+ u8 outer_sipv6_word_en;
+ u8 outer_dipv6_word_en;
+ u8 rsv1[2];
+ __le32 tuple_mask;
+ __le32 meta_data_mask;
+ u8 rsv2[8];
+};
+
+#define HCLGE_FD_EPORT_SW_EN_B 0
+struct hclge_fd_tcam_config_1_cmd {
+ u8 stage;
+ u8 xy_sel;
+ u8 port_info;
+ u8 rsv1[1];
+ __le32 index;
+ u8 entry_vld;
+ u8 rsv2[7];
+ u8 tcam_data[8];
+};
+
+struct hclge_fd_tcam_config_2_cmd {
+ u8 tcam_data[24];
+};
+
+struct hclge_fd_tcam_config_3_cmd {
+ u8 tcam_data[20];
+ u8 rsv[4];
+};
+
+#define HCLGE_FD_AD_DROP_B 0
+#define HCLGE_FD_AD_DIRECT_QID_B 1
+#define HCLGE_FD_AD_QID_S 2
+#define HCLGE_FD_AD_QID_M GENMASK(12, 2)
+#define HCLGE_FD_AD_USE_COUNTER_B 12
+#define HCLGE_FD_AD_COUNTER_NUM_S 13
+#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
+#define HCLGE_FD_AD_NXT_STEP_B 20
+#define HCLGE_FD_AD_NXT_KEY_S 21
+#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21)
+#define HCLGE_FD_AD_WR_RULE_ID_B 0
+#define HCLGE_FD_AD_RULE_ID_S 1
+#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1)
+
+struct hclge_fd_ad_config_cmd {
+ u8 stage;
+ u8 rsv1[3];
+ __le32 index;
+ __le64 ad_data;
+ u8 rsv2[8];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index f08ebb7caaaf..e72f724123d7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -73,6 +73,7 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
u8 *tc, bool *changed)
{
+ bool has_ets_tc = false;
u32 total_ets_bw = 0;
u8 max_tc = 0;
u8 i;
@@ -100,13 +101,14 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
*changed = true;
total_ets_bw += ets->tc_tx_bw[i];
- break;
+ has_ets_tc = true;
+ break;
default:
return -EINVAL;
}
}
- if (total_ets_bw != BW_PERCENT)
+ if (has_ets_tc && total_ets_bw != BW_PERCENT)
return -EINVAL;
*tc = max_tc + 1;
@@ -182,7 +184,9 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
return ret;
- hclge_tm_schd_info_update(hdev, num_tc);
+ ret = hclge_tm_schd_info_update(hdev, num_tc);
+ if (ret)
+ return ret;
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@@ -308,7 +312,9 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
return -EINVAL;
}
- hclge_tm_schd_info_update(hdev, tc);
+ ret = hclge_tm_schd_info_update(hdev, tc);
+ if (ret)
+ return ret;
ret = hclge_tm_prio_tc_info_update(hdev, prio_tc);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
new file mode 100644
index 000000000000..f7e363b90fe0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#include "hclge_err.h"
+
+static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "imp_itcm0_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "imp_itcm1_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "imp_itcm2_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "imp_itcm3_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "imp_dtcm0_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "imp_dtcm0_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "imp_dtcm1_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "imp_dtcm1_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_imp_itcm4_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "imp_itcm4_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "imp_itcm4_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "cmdq_nic_rx_depth_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "cmdq_nic_tx_depth_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "cmdq_nic_rx_tail_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "cmdq_nic_tx_tail_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "cmdq_nic_rx_head_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "cmdq_nic_tx_head_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "cmdq_nic_rx_addr_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "cmdq_nic_tx_addr_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_cmdq_rocee_mem_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "cmdq_rocee_rx_depth_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "cmdq_rocee_tx_depth_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "cmdq_rocee_rx_tail_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "cmdq_rocee_tx_tail_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "cmdq_rocee_rx_head_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "cmdq_rocee_tx_head_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "cmdq_rocee_rx_addr_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "cmdq_rocee_tx_addr_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
+ { .int_msk = BIT(0), .msg = "tqp_int_cfg_even_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "tqp_int_cfg_odd_ecc_1bit_err" },
+ { .int_msk = BIT(2), .msg = "tqp_int_ctrl_even_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "tqp_int_ctrl_odd_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "tx_que_scan_int_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "rx_que_scan_int_ecc_1bit_err" },
+ { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
+ { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
+ { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
+ { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_igu_com_err_int[] = {
+ { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "igu_rx_buf0_ecc_1bit_err" },
+ { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
+ { .int_msk = BIT(3), .msg = "igu_rx_buf1_ecc_1bit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = {
+ { .int_msk = BIT(0), .msg = "rx_buf_overflow" },
+ { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
+ { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
+ { .int_msk = BIT(3), .msg = "tx_buf_overflow" },
+ { .int_msk = BIT(4), .msg = "tx_buf_underrun" },
+ { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ncsi_err_int[] = {
+ { .int_msk = BIT(0), .msg = "ncsi_tx_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_int0[] = {
+ { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_1bit_err" },
+ { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_1bit_err" },
+ { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_1bit_err" },
+ { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_1bit_err" },
+ { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_1bit_err" },
+ { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_1bit_err" },
+ { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_1bit_err" },
+ { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_1bit_err" },
+ { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_1bit_err" },
+ { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_1bit_err" },
+ { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_1bit_err" },
+ { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_1bit_err" },
+ { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_1bit_err" },
+ { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_1bit_err" },
+ { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_1bit_err" },
+ { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_1bit_err" },
+ { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_1bit_err" },
+ { .int_msk = BIT(27),
+ .msg = "flow_director_ad_mem0_ecc_1bit_err" },
+ { .int_msk = BIT(28),
+ .msg = "flow_director_ad_mem1_ecc_1bit_err" },
+ { .int_msk = BIT(29),
+ .msg = "rx_vlan_tag_memory_ecc_1bit_err" },
+ { .int_msk = BIT(30),
+ .msg = "Tx_UP_mapping_config_mem_ecc_1bit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_int1[] = {
+ { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
+ { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" },
+ { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
+ { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
+ { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
+ { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
+ { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
+ { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
+ { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
+ { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
+ { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
+ { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
+ { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
+ { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
+ { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
+ { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
+ { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
+ { .int_msk = BIT(27),
+ .msg = "flow_director_ad_mem0_ecc_mbit_err" },
+ { .int_msk = BIT(28),
+ .msg = "flow_director_ad_mem1_ecc_mbit_err" },
+ { .int_msk = BIT(29),
+ .msg = "rx_vlan_tag_memory_ecc_mbit_err" },
+ { .int_msk = BIT(30),
+ .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_pf_int[] = {
+ { .int_msk = BIT(0), .msg = "Tx_vlan_tag_err" },
+ { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_int2[] = {
+ { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_1bit_err" },
+ { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_1bit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_int3[] = {
+ { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
+ { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
+ { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+struct hclge_tm_sch_ecc_info {
+ const char *name;
+};
+
+static const struct hclge_tm_sch_ecc_info hclge_tm_sch_ecc_err[7][15] = {
+ {
+ { .name = "QSET_QUEUE_CTRL:PRI_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPA_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPB_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRA_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRB_LEN TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPA_HPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPB_HPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRA_HPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRB_HPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:QS_LINKLIST TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPA_TPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:SPB_TPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRA_TPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:WRRB_TPTR TAB" },
+ { .name = "QSET_QUEUE_CTRL:QS_DEFICITCNT TAB" },
+ },
+ {
+ { .name = "ROCE_QUEUE_CTRL:QS_LEN TAB" },
+ { .name = "ROCE_QUEUE_CTRL:QS_TPTR TAB" },
+ { .name = "ROCE_QUEUE_CTRL:QS_HPTR TAB" },
+ { .name = "ROCE_QUEUE_CTRL:QLINKLIST TAB" },
+ { .name = "ROCE_QUEUE_CTRL:QCLEN TAB" },
+ },
+ {
+ { .name = "NIC_QUEUE_CTRL:QS_LEN TAB" },
+ { .name = "NIC_QUEUE_CTRL:QS_TPTR TAB" },
+ { .name = "NIC_QUEUE_CTRL:QS_HPTR TAB" },
+ { .name = "NIC_QUEUE_CTRL:QLINKLIST TAB" },
+ { .name = "NIC_QUEUE_CTRL:QCLEN TAB" },
+ },
+ {
+ { .name = "RAM_CFG_CTRL:CSHAP TAB" },
+ { .name = "RAM_CFG_CTRL:PSHAP TAB" },
+ },
+ {
+ { .name = "SHAPER_CTRL:PSHAP TAB" },
+ },
+ {
+ { .name = "MSCH_CTRL" },
+ },
+ {
+ { .name = "TOP_CTRL" },
+ },
+};
+
+static const struct hclge_hw_error hclge_tm_sch_err_int[] = {
+ { .int_msk = BIT(0), .msg = "tm_sch_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_full_err" },
+ { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(12),
+ .msg = "tm_sch_port_shap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(13),
+ .msg = "tm_sch_port_shap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(14),
+ .msg = "tm_sch_pg_pshap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(15),
+ .msg = "tm_sch_pg_pshap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(16),
+ .msg = "tm_sch_pg_cshap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(17),
+ .msg = "tm_sch_pg_cshap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(18),
+ .msg = "tm_sch_pri_pshap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(19),
+ .msg = "tm_sch_pri_pshap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(20),
+ .msg = "tm_sch_pri_cshap_offset_fifo_wr_full_err" },
+ { .int_msk = BIT(21),
+ .msg = "tm_sch_pri_cshap_offset_fifo_rd_empty_err" },
+ { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_full_err" },
+ { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_empty_err" },
+ { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_full_err" },
+ { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_empty_err" },
+ { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_full_err" },
+ { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_empty_err" },
+ { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_full_err" },
+ { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_empty_err" },
+ { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_full_err" },
+ { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_empty_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_qcn_ecc_err_int[] = {
+ { .int_msk = BIT(0), .msg = "qcn_byte_mem_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "qcn_time_mem_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "qcn_fb_mem_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "qcn_link_mem_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
+ { .int_msk = BIT(8), .msg = "qcn_rate_mem_ecc_1bit_err" },
+ { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
+ { .int_msk = BIT(10), .msg = "qcn_tmplt_mem_ecc_1bit_err" },
+ { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
+ { .int_msk = BIT(12), .msg = "qcn_shap_cfg_mem_ecc_1bit_err" },
+ { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "qcn_gp0_barrel_mem_ecc_1bit_err" },
+ { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
+ { .int_msk = BIT(16), .msg = "qcn_gp1_barrel_mem_ecc_1bit_err" },
+ { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
+ { .int_msk = BIT(18), .msg = "qcn_gp2_barrel_mem_ecc_1bit_err" },
+ { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
+ { .int_msk = BIT(20), .msg = "qcn_gp3_barral_mem_ecc_1bit_err" },
+ { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static void hclge_log_error(struct device *dev,
+ const struct hclge_hw_error *err_list,
+ u32 err_sts)
+{
+ const struct hclge_hw_error *err;
+ int i = 0;
+
+ while (err_list[i].msg) {
+ err = &err_list[i];
+ if (!(err->int_msk & err_sts)) {
+ i++;
+ continue;
+ }
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg, err_sts);
+ i++;
+ }
+}
+
+/* hclge_cmd_query_error: read the error information
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @cmd: command opcode
+ * @flag: flag for extended command structure
+ * @w_num: offset for setting the read interrupt type.
+ * @int_type: select which type of the interrupt for which the error
+ * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
+ *
+ * This function query the error info from hw register/s using command
+ */
+static int hclge_cmd_query_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 cmd,
+ u16 flag, u8 w_num,
+ enum hclge_err_int_type int_type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int num = 1;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ if (flag) {
+ desc[0].flag |= cpu_to_le16(flag);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ num = 2;
+ }
+ if (w_num)
+ desc[0].data[w_num] = cpu_to_le32(int_type);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "query error cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+/* hclge_cmd_clear_error: clear the error status
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @desc_src: prefilled descriptor from the previous command for reusing
+ * @cmd: command opcode
+ * @flag: flag for extended command structure
+ *
+ * This function clear the error status in the hw register/s using command
+ */
+static int hclge_cmd_clear_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ struct hclge_desc *desc_src,
+ u32 cmd, u16 flag)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int num = 1;
+ int ret, i;
+
+ if (cmd) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ if (flag) {
+ desc[0].flag |= cpu_to_le16(flag);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
+ num = 2;
+ }
+ if (desc_src) {
+ for (i = 0; i < 6; i++) {
+ desc[0].data[i] = desc_src[0].data[i];
+ if (flag)
+ desc[1].data[i] = desc_src[1].data[i];
+ }
+ }
+ } else {
+ hclge_cmd_reuse_desc(&desc[0], false);
+ if (flag) {
+ desc[0].flag |= cpu_to_le16(flag);
+ hclge_cmd_reuse_desc(&desc[1], false);
+ num = 2;
+ }
+ }
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "clear error cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int hclge_enable_common_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
+
+ if (en) {
+ /* enable COMMON error interrupts */
+ desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
+ desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
+ HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
+ desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN);
+ desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
+ } else {
+ /* disable COMMON error interrupts */
+ desc[0].data[0] = 0;
+ desc[0].data[2] = 0;
+ desc[0].data[3] = 0;
+ desc[0].data[4] = 0;
+ desc[0].data[5] = 0;
+ }
+ desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
+ desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
+ HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
+ desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
+ desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK);
+ desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to enable/disable COMMON err interrupts\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->pdev->revision < 0x21)
+ return 0;
+
+ /* enable/disable NCSI error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
+ else
+ desc.data[0] = 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to enable/disable NCSI error interrupts\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_enable_igu_egu_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* enable/disable error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
+ else
+ desc.data[0] = 0;
+ desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev,
+ "failed(%d) to enable/disable IGU common interrupts\n",
+ ret);
+ return ret;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
+ else
+ desc.data[0] = 0;
+ desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev,
+ "failed(%d) to enable/disable IGU-EGU TNL interrupts\n",
+ ret);
+ return ret;
+ }
+
+ ret = hclge_enable_ncsi_error(hdev, en);
+ if (ret)
+ dev_err(dev, "fail(%d) to en/disable err int\n", ret);
+
+ return ret;
+}
+
+static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
+ bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* enable/disable PPP error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
+
+ if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
+ if (en) {
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
+ } else {
+ desc[0].data[0] = 0;
+ desc[0].data[1] = 0;
+ }
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
+ } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
+ if (en) {
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
+ } else {
+ desc[0].data[0] = 0;
+ desc[0].data[1] = 0;
+ }
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to enable/disable PPP error interrupts\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_enable_ppp_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret;
+
+ ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
+ en);
+ if (ret) {
+ dev_err(dev,
+ "failed(%d) to enable/disable PPP error intr 0,1\n",
+ ret);
+ return ret;
+ }
+
+ ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
+ en);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to enable/disable PPP error intr 2,3\n",
+ ret);
+
+ return ret;
+}
+
+int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* enable TM SCH hw errors */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN);
+ else
+ desc.data[0] = 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev, "failed(%d) to configure TM SCH errors\n", ret);
+ return ret;
+ }
+
+ /* enable TM QCN hw errors */
+ ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
+ 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read TM QCN CFG status\n", ret);
+ return ret;
+ }
+
+ hclge_cmd_reuse_desc(&desc, false);
+ if (en)
+ desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
+ else
+ desc.data[1] = 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to configure TM QCN mem errors\n", ret);
+
+ return ret;
+}
+
+static void hclge_process_common_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ u32 err_sts;
+ int ret;
+
+ /* read err sts */
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_COMMON_ECC_INT_CFG,
+ HCLGE_CMD_FLAG_NEXT, 0, 0);
+ if (ret) {
+ dev_err(dev,
+ "failed(=%d) to query COMMON error interrupt status\n",
+ ret);
+ return;
+ }
+
+ /* log err */
+ err_sts = (le32_to_cpu(desc[0].data[0])) & HCLGE_IMP_TCM_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_imp_tcm_ecc_int[0], err_sts);
+
+ err_sts = (le32_to_cpu(desc[0].data[1])) & HCLGE_CMDQ_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_cmdq_nic_mem_ecc_int[0], err_sts);
+
+ err_sts = (le32_to_cpu(desc[0].data[1]) >> HCLGE_CMDQ_ROC_ECC_INT_SHIFT)
+ & HCLGE_CMDQ_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_cmdq_rocee_mem_ecc_int[0], err_sts);
+
+ if ((le32_to_cpu(desc[0].data[3])) & BIT(0))
+ dev_warn(dev, "imp_rd_data_poison_err found\n");
+
+ err_sts = (le32_to_cpu(desc[0].data[3]) >> HCLGE_TQP_ECC_INT_SHIFT) &
+ HCLGE_TQP_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_tqp_int_ecc_int[0], err_sts);
+
+ err_sts = (le32_to_cpu(desc[0].data[5])) &
+ HCLGE_IMP_ITCM4_ECC_INT_MASK;
+ hclge_log_error(dev, &hclge_imp_itcm4_ecc_int[0], err_sts);
+
+ /* clear error interrupts */
+ desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_CLR_MASK);
+ desc[1].data[1] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_CLR_MASK |
+ HCLGE_CMDQ_ROCEE_ECC_CLR_MASK);
+ desc[1].data[3] = cpu_to_le32(HCLGE_TQP_IMP_ERR_CLR_MASK);
+ desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_CLR_MASK);
+
+ ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
+ HCLGE_CMD_FLAG_NEXT);
+ if (ret)
+ dev_err(dev,
+ "failed(%d) to clear COMMON error interrupt status\n",
+ ret);
+}
+
+static void hclge_process_ncsi_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc_rd;
+ struct hclge_desc desc_wr;
+ u32 err_sts;
+ int ret;
+
+ if (hdev->pdev->revision < 0x21)
+ return;
+
+ /* read NCSI error status */
+ ret = hclge_cmd_query_error(hdev, &desc_rd, HCLGE_NCSI_INT_QUERY,
+ 0, 1, HCLGE_NCSI_ERR_INT_TYPE);
+ if (ret) {
+ dev_err(dev,
+ "failed(=%d) to query NCSI error interrupt status\n",
+ ret);
+ return;
+ }
+
+ /* log err */
+ err_sts = le32_to_cpu(desc_rd.data[0]);
+ hclge_log_error(dev, &hclge_ncsi_err_int[0], err_sts);
+
+ /* clear err int */
+ ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
+ HCLGE_NCSI_INT_CLR, 0);
+ if (ret)
+ dev_err(dev, "failed(=%d) to clear NCSI intrerrupt status\n",
+ ret);
+}
+
+static void hclge_process_igu_egu_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type int_type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc_rd;
+ struct hclge_desc desc_wr;
+ u32 err_sts;
+ int ret;
+
+ /* read IGU common err sts */
+ ret = hclge_cmd_query_error(hdev, &desc_rd,
+ HCLGE_IGU_COMMON_INT_QUERY,
+ 0, 1, int_type);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to query IGU common int status\n",
+ ret);
+ return;
+ }
+
+ /* log err */
+ err_sts = le32_to_cpu(desc_rd.data[0]) &
+ HCLGE_IGU_COM_INT_MASK;
+ hclge_log_error(dev, &hclge_igu_com_err_int[0], err_sts);
+
+ /* clear err int */
+ ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
+ HCLGE_IGU_COMMON_INT_CLR, 0);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to clear IGU common int status\n",
+ ret);
+ return;
+ }
+
+ /* read IGU-EGU TNL err sts */
+ ret = hclge_cmd_query_error(hdev, &desc_rd,
+ HCLGE_IGU_EGU_TNL_INT_QUERY,
+ 0, 1, int_type);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to query IGU-EGU TNL int status\n",
+ ret);
+ return;
+ }
+
+ /* log err */
+ err_sts = le32_to_cpu(desc_rd.data[0]) &
+ HCLGE_IGU_EGU_TNL_INT_MASK;
+ hclge_log_error(dev, &hclge_igu_egu_tnl_err_int[0], err_sts);
+
+ /* clear err int */
+ ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
+ HCLGE_IGU_EGU_TNL_INT_CLR, 0);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to clear IGU-EGU TNL int status\n",
+ ret);
+ return;
+ }
+
+ hclge_process_ncsi_error(hdev, HCLGE_ERR_INT_RAS_NFE);
+}
+
+static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
+ enum hclge_err_int_type int_type)
+{
+ enum hnae3_reset_type reset_level = HNAE3_NONE_RESET;
+ struct device *dev = &hdev->pdev->dev;
+ const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3;
+ struct hclge_desc desc[2];
+ u32 err_sts;
+ int ret;
+
+ /* read PPP INT sts */
+ ret = hclge_cmd_query_error(hdev, &desc[0], cmd,
+ HCLGE_CMD_FLAG_NEXT, 5, int_type);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to query PPP interrupt status\n",
+ ret);
+ return -EIO;
+ }
+
+ /* log error */
+ if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
+ hw_err_lst1 = &hclge_ppp_mpf_int0[0];
+ hw_err_lst2 = &hclge_ppp_mpf_int1[0];
+ hw_err_lst3 = &hclge_ppp_pf_int[0];
+ } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
+ hw_err_lst1 = &hclge_ppp_mpf_int2[0];
+ hw_err_lst2 = &hclge_ppp_mpf_int3[0];
+ } else {
+ dev_err(dev, "invalid command(=%d)\n", cmd);
+ return -EINVAL;
+ }
+
+ err_sts = le32_to_cpu(desc[0].data[2]);
+ if (err_sts) {
+ hclge_log_error(dev, hw_err_lst1, err_sts);
+ reset_level = HNAE3_FUNC_RESET;
+ }
+
+ err_sts = le32_to_cpu(desc[0].data[3]);
+ if (err_sts) {
+ hclge_log_error(dev, hw_err_lst2, err_sts);
+ reset_level = HNAE3_FUNC_RESET;
+ }
+
+ err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
+ if (err_sts) {
+ hclge_log_error(dev, hw_err_lst3, err_sts);
+ reset_level = HNAE3_FUNC_RESET;
+ }
+
+ /* clear PPP INT */
+ ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
+ HCLGE_CMD_FLAG_NEXT);
+ if (ret) {
+ dev_err(dev, "failed(=%d) to clear PPP interrupt status\n",
+ ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void hclge_process_ppp_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type int_type)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret;
+
+ /* read PPP INT0,1 sts */
+ ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD0_INT_CMD,
+ int_type);
+ if (ret < 0) {
+ dev_err(dev, "failed(=%d) to clear PPP interrupt 0,1 status\n",
+ ret);
+ return;
+ }
+
+ /* read err PPP INT2,3 sts */
+ ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD1_INT_CMD,
+ int_type);
+ if (ret < 0)
+ dev_err(dev, "failed(=%d) to clear PPP interrupt 2,3 status\n",
+ ret);
+}
+
+static void hclge_process_tm_sch_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ const struct hclge_tm_sch_ecc_info *tm_sch_ecc_info;
+ struct hclge_desc desc;
+ u32 ecc_info;
+ u8 module_no;
+ u8 ram_no;
+ int ret;
+
+ /* read TM scheduler errors */
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_MBIT_ECC_INFO_CMD, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH mbit ECC err info\n", ret);
+ return;
+ }
+ ecc_info = le32_to_cpu(desc.data[0]);
+
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_ECC_ERR_RINT_CMD, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH ECC err status\n", ret);
+ return;
+ }
+
+ /* log TM scheduler errors */
+ if (le32_to_cpu(desc.data[0])) {
+ hclge_log_error(dev, &hclge_tm_sch_err_int[0],
+ le32_to_cpu(desc.data[0]));
+ if (le32_to_cpu(desc.data[0]) & 0x2) {
+ module_no = (ecc_info >> 20) & 0xF;
+ ram_no = (ecc_info >> 16) & 0xF;
+ tm_sch_ecc_info =
+ &hclge_tm_sch_ecc_err[module_no][ram_no];
+ dev_warn(dev, "ecc err module:ram=%s\n",
+ tm_sch_ecc_info->name);
+ dev_warn(dev, "ecc memory address = 0x%x\n",
+ ecc_info & 0xFFFF);
+ }
+ }
+
+ /* clear TM scheduler errors */
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to clear TM SCH error status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_ECC_ERR_RINT_CE, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH CE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to clear TM SCH CE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_ECC_ERR_RINT_NFE, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH NFE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to clear TM SCH NFE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_SCH_ECC_ERR_RINT_FE, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read SCH FE status\n", ret);
+ return;
+ }
+
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret)
+ dev_err(dev, "failed(%d) to clear TM SCH FE status\n", ret);
+}
+
+static void hclge_process_tm_qcn_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* read QCN errors */
+ ret = hclge_cmd_query_error(hdev, &desc,
+ HCLGE_TM_QCN_MEM_INT_INFO_CMD, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to read QCN ECC err status\n", ret);
+ return;
+ }
+
+ /* log QCN errors */
+ if (le32_to_cpu(desc.data[0]))
+ hclge_log_error(dev, &hclge_qcn_ecc_err_int[0],
+ le32_to_cpu(desc.data[0]));
+
+ /* clear QCN errors */
+ ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ if (ret)
+ dev_err(dev, "failed(%d) to clear QCN error status\n", ret);
+}
+
+static void hclge_process_tm_error(struct hclge_dev *hdev,
+ enum hclge_err_int_type type)
+{
+ hclge_process_tm_sch_error(hdev);
+ hclge_process_tm_qcn_error(hdev);
+}
+
+static const struct hclge_hw_blk hw_blk[] = {
+ { .msk = BIT(0), .name = "IGU_EGU",
+ .enable_error = hclge_enable_igu_egu_error,
+ .process_error = hclge_process_igu_egu_error, },
+ { .msk = BIT(5), .name = "COMMON",
+ .enable_error = hclge_enable_common_error,
+ .process_error = hclge_process_common_error, },
+ { .msk = BIT(4), .name = "TM",
+ .enable_error = hclge_enable_tm_hw_error,
+ .process_error = hclge_process_tm_error, },
+ { .msk = BIT(1), .name = "PPP",
+ .enable_error = hclge_enable_ppp_error,
+ .process_error = hclge_process_ppp_error, },
+ { /* sentinel */ }
+};
+
+int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret = 0;
+ int i = 0;
+
+ while (hw_blk[i].name) {
+ if (!hw_blk[i].enable_error) {
+ i++;
+ continue;
+ }
+ ret = hw_blk[i].enable_error(hdev, state);
+ if (ret) {
+ dev_err(dev, "fail(%d) to en/disable err int\n", ret);
+ return ret;
+ }
+ i++;
+ }
+
+ return ret;
+}
+
+pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ u32 sts, val;
+ int i = 0;
+
+ sts = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+
+ /* Processing Non-fatal errors */
+ if (sts & HCLGE_RAS_REG_NFE_MASK) {
+ val = (sts >> HCLGE_RAS_REG_NFE_SHIFT) & 0xFF;
+ i = 0;
+ while (hw_blk[i].name) {
+ if (!(hw_blk[i].msk & val)) {
+ i++;
+ continue;
+ }
+ dev_warn(dev, "%s ras non-fatal error identified\n",
+ hw_blk[i].name);
+ if (hw_blk[i].process_error)
+ hw_blk[i].process_error(hdev,
+ HCLGE_ERR_INT_RAS_NFE);
+ i++;
+ }
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
new file mode 100644
index 000000000000..e0e3b5861495
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_ERR_H
+#define __HCLGE_ERR_H
+
+#include "hclge_main.h"
+
+#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
+#define HCLGE_RAS_REG_FE_MASK 0xFF
+#define HCLGE_RAS_REG_NFE_MASK 0xFF00
+#define HCLGE_RAS_REG_NFE_SHIFT 8
+
+#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
+#define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000
+#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN 0x300
+#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK 0x300
+#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN 0xFFFF
+#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK 0xFFFF
+#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN 0xFFFF0000
+#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK 0xFFFF0000
+#define HCLGE_IMP_RD_POISON_ERR_INT_EN 0x0100
+#define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100
+#define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF
+#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF
+#define HCLGE_IGU_ERR_INT_EN 0x0000066F
+#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F
+#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF
+#define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK 0xFFFFFFFF
+#define HCLGE_PPP_PF_ERR_INT_EN 0x0003
+#define HCLGE_PPP_PF_ERR_INT_EN_MASK 0x0003
+#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
+#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
+#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
+#define HCLGE_NCSI_ERR_INT_EN 0x3
+#define HCLGE_NCSI_ERR_INT_TYPE 0x9
+
+#define HCLGE_IMP_TCM_ECC_INT_MASK 0xFFFF
+#define HCLGE_IMP_ITCM4_ECC_INT_MASK 0x3
+#define HCLGE_CMDQ_ECC_INT_MASK 0xFFFF
+#define HCLGE_CMDQ_ROC_ECC_INT_SHIFT 16
+#define HCLGE_TQP_ECC_INT_MASK 0xFFF
+#define HCLGE_TQP_ECC_INT_SHIFT 16
+#define HCLGE_IMP_TCM_ECC_CLR_MASK 0xFFFF
+#define HCLGE_IMP_ITCM4_ECC_CLR_MASK 0x3
+#define HCLGE_CMDQ_NIC_ECC_CLR_MASK 0xFFFF
+#define HCLGE_CMDQ_ROCEE_ECC_CLR_MASK 0xFFFF0000
+#define HCLGE_TQP_IMP_ERR_CLR_MASK 0x0FFF0001
+#define HCLGE_IGU_COM_INT_MASK 0xF
+#define HCLGE_IGU_EGU_TNL_INT_MASK 0x3F
+#define HCLGE_PPP_PF_INT_MASK 0x100
+
+enum hclge_err_int_type {
+ HCLGE_ERR_INT_MSIX = 0,
+ HCLGE_ERR_INT_RAS_CE = 1,
+ HCLGE_ERR_INT_RAS_NFE = 2,
+ HCLGE_ERR_INT_RAS_FE = 3,
+};
+
+struct hclge_hw_blk {
+ u32 msk;
+ const char *name;
+ int (*enable_error)(struct hclge_dev *hdev, bool en);
+ void (*process_error)(struct hclge_dev *hdev,
+ enum hclge_err_int_type type);
+};
+
+struct hclge_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
+int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en);
+pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 8577dfc799ad..5234b5373ed3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -19,20 +19,18 @@
#include "hclge_mbx.h"
#include "hclge_mdio.h"
#include "hclge_tm.h"
+#include "hclge_err.h"
#include "hnae3.h"
#define HCLGE_NAME "hclge"
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
-#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
-#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
- enum hclge_mta_dmac_sel_type mta_mac_sel,
- bool enable);
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+ u16 *allocated_size, bool is_alloc);
static struct hnae3_ae_algo ae_algo;
@@ -51,175 +49,12 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
- "Mac Loopback test",
- "Serdes Loopback test",
+ "App Loopback test",
+ "Serdes serial Loopback test",
+ "Serdes parallel Loopback test",
"Phy Loopback test"
};
-static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
- {"igu_rx_oversize_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
- {"igu_rx_undersize_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
- {"igu_rx_out_all_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
- {"igu_rx_uni_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
- {"igu_rx_multi_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
- {"igu_rx_broad_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
- {"egu_tx_out_all_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
- {"egu_tx_uni_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
- {"egu_tx_multi_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
- {"egu_tx_broad_pkt",
- HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
- {"ssu_ppp_mac_key_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
- {"ssu_ppp_host_key_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
- {"ppp_ssu_mac_rlt_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
- {"ppp_ssu_host_rlt_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
- {"ssu_tx_in_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
- {"ssu_tx_out_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
- {"ssu_rx_in_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
- {"ssu_rx_out_num",
- HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
-};
-
-static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
- {"igu_rx_err_pkt",
- HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
- {"igu_rx_no_eof_pkt",
- HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
- {"igu_rx_no_sof_pkt",
- HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
- {"egu_tx_1588_pkt",
- HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
- {"ssu_full_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
- {"ssu_part_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
- {"ppp_key_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
- {"ppp_rlt_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
- {"ssu_key_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
- {"pkt_curr_buf_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
- {"qcn_fb_rcv_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
- {"qcn_fb_drop_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
- {"qcn_fb_invaild_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
- {"rx_packet_tc0_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
- {"rx_packet_tc1_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
- {"rx_packet_tc2_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
- {"rx_packet_tc3_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
- {"rx_packet_tc4_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
- {"rx_packet_tc5_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
- {"rx_packet_tc6_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
- {"rx_packet_tc7_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
- {"rx_packet_tc0_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
- {"rx_packet_tc1_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
- {"rx_packet_tc2_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
- {"rx_packet_tc3_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
- {"rx_packet_tc4_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
- {"rx_packet_tc5_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
- {"rx_packet_tc6_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
- {"rx_packet_tc7_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
- {"tx_packet_tc0_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
- {"tx_packet_tc1_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
- {"tx_packet_tc2_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
- {"tx_packet_tc3_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
- {"tx_packet_tc4_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
- {"tx_packet_tc5_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
- {"tx_packet_tc6_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
- {"tx_packet_tc7_in_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
- {"tx_packet_tc0_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
- {"tx_packet_tc1_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
- {"tx_packet_tc2_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
- {"tx_packet_tc3_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
- {"tx_packet_tc4_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
- {"tx_packet_tc5_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
- {"tx_packet_tc6_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
- {"tx_packet_tc7_out_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
- {"pkt_curr_buf_tc0_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
- {"pkt_curr_buf_tc1_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
- {"pkt_curr_buf_tc2_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
- {"pkt_curr_buf_tc3_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
- {"pkt_curr_buf_tc4_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
- {"pkt_curr_buf_tc5_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
- {"pkt_curr_buf_tc6_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
- {"pkt_curr_buf_tc7_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
- {"mb_uncopy_num",
- HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
- {"lo_pri_unicast_rlt_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
- {"hi_pri_multicast_rlt_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
- {"lo_pri_multicast_rlt_drop_num",
- HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
- {"rx_oq_drop_pkt_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
- {"tx_oq_drop_pkt_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
- {"nic_l2_err_drop_pkt_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
- {"roc_l2_err_drop_pkt_cnt",
- HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
-};
-
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
{"mac_tx_mac_pause_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
@@ -394,109 +229,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
-static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
-{
-#define HCLGE_64_BIT_CMD_NUM 5
-#define HCLGE_64_BIT_RTN_DATANUM 4
- u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
- struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
- __le64 *desc_data;
- int i, k, n;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
- ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get 64 bit pkt stats fail, status = %d.\n", ret);
- return ret;
- }
-
- for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
- if (unlikely(i == 0)) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_64_BIT_RTN_DATANUM - 1;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_64_BIT_RTN_DATANUM;
- }
- for (k = 0; k < n; k++) {
- *data++ += le64_to_cpu(*desc_data);
- desc_data++;
- }
- }
-
- return 0;
-}
-
-static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
-{
- stats->pkt_curr_buf_cnt = 0;
- stats->pkt_curr_buf_tc0_cnt = 0;
- stats->pkt_curr_buf_tc1_cnt = 0;
- stats->pkt_curr_buf_tc2_cnt = 0;
- stats->pkt_curr_buf_tc3_cnt = 0;
- stats->pkt_curr_buf_tc4_cnt = 0;
- stats->pkt_curr_buf_tc5_cnt = 0;
- stats->pkt_curr_buf_tc6_cnt = 0;
- stats->pkt_curr_buf_tc7_cnt = 0;
-}
-
-static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
-{
-#define HCLGE_32_BIT_CMD_NUM 8
-#define HCLGE_32_BIT_RTN_DATANUM 8
-
- struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
- struct hclge_32_bit_stats *all_32_bit_stats;
- __le32 *desc_data;
- int i, k, n;
- u64 *data;
- int ret;
-
- all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
- data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
-
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
- ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get 32 bit pkt stats fail, status = %d.\n", ret);
-
- return ret;
- }
-
- hclge_reset_partial_32bit_counter(all_32_bit_stats);
- for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
- if (unlikely(i == 0)) {
- __le16 *desc_data_16bit;
-
- all_32_bit_stats->igu_rx_err_pkt +=
- le32_to_cpu(desc[i].data[0]);
-
- desc_data_16bit = (__le16 *)&desc[i].data[1];
- all_32_bit_stats->igu_rx_no_eof_pkt +=
- le16_to_cpu(*desc_data_16bit);
-
- desc_data_16bit++;
- all_32_bit_stats->igu_rx_no_sof_pkt +=
- le16_to_cpu(*desc_data_16bit);
-
- desc_data = &desc[i].data[2];
- n = HCLGE_32_BIT_RTN_DATANUM - 4;
- } else {
- desc_data = (__le32 *)&desc[i];
- n = HCLGE_32_BIT_RTN_DATANUM;
- }
- for (k = 0; k < n; k++) {
- *data++ += le32_to_cpu(*desc_data);
- desc_data++;
- }
- }
-
- return 0;
-}
-
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -623,7 +355,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -631,7 +363,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -675,14 +407,8 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
struct net_device_stats *net_stats)
{
net_stats->tx_dropped = 0;
- net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
- net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
- net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
-
net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
- net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
@@ -717,12 +443,6 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
dev_err(&hdev->pdev->dev,
"Update MAC stats fail, status = %d.\n", status);
- status = hclge_32_bit_update_stats(hdev);
- if (status)
- dev_err(&hdev->pdev->dev,
- "Update 32 bit stats fail, status = %d.\n",
- status);
-
hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
}
@@ -743,18 +463,6 @@ static void hclge_update_stats(struct hnae3_handle *handle,
"Update MAC stats fail, status = %d.\n",
status);
- status = hclge_32_bit_update_stats(hdev);
- if (status)
- dev_err(&hdev->pdev->dev,
- "Update 32 bit stats fail, status = %d.\n",
- status);
-
- status = hclge_64_bit_update_stats(hdev);
- if (status)
- dev_err(&hdev->pdev->dev,
- "Update 64 bit stats fail, status = %d.\n",
- status);
-
status = hclge_tqps_update_stats(handle);
if (status)
dev_err(&hdev->pdev->dev,
@@ -768,7 +476,10 @@ static void hclge_update_stats(struct hnae3_handle *handle,
static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
{
-#define HCLGE_LOOPBACK_TEST_FLAGS 0x7
+#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
+ HNAE3_SUPPORT_PHY_LOOPBACK |\
+ HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -782,19 +493,19 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
if (stringset == ETH_SS_TEST) {
/* clear loopback bit flags at first */
handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
- if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
+ if (hdev->pdev->revision >= 0x21 ||
+ hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
count += 1;
- handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
+ handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count++;
- handle->flags |= HNAE3_SUPPORT_SERDES_LOOPBACK;
+ count += 2;
+ handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
} else if (stringset == ETH_SS_STATS) {
count = ARRAY_SIZE(g_mac_stats_string) +
- ARRAY_SIZE(g_all_32bit_stats_string) +
- ARRAY_SIZE(g_all_64bit_stats_string) +
hclge_tqps_get_sset_count(handle, stringset);
}
@@ -814,33 +525,29 @@ static void hclge_get_strings(struct hnae3_handle *handle,
g_mac_stats_string,
size,
p);
- size = ARRAY_SIZE(g_all_32bit_stats_string);
- p = hclge_comm_get_strings(stringset,
- g_all_32bit_stats_string,
- size,
- p);
- size = ARRAY_SIZE(g_all_64bit_stats_string);
- p = hclge_comm_get_strings(stringset,
- g_all_64bit_stats_string,
- size,
- p);
p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
- if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
+ if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
+ memcpy(p,
+ hns3_nic_test_strs[HNAE3_LOOP_APP],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
memcpy(p,
- hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
+ hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
+ if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
memcpy(p,
- hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
+ hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
memcpy(p,
- hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
+ hns3_nic_test_strs[HNAE3_LOOP_PHY],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
@@ -857,14 +564,6 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string),
data);
- p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
- g_all_32bit_stats_string,
- ARRAY_SIZE(g_all_32bit_stats_string),
- p);
- p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
- g_all_64bit_stats_string,
- ARRAY_SIZE(g_all_64bit_stats_string),
- p);
p = hclge_tqps_get_stats(handle, p);
}
@@ -1079,6 +778,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_SPEED_ABILITY_M,
HCLGE_CFG_SPEED_ABILITY_S);
+ cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_UMV_TBL_SPACE_M,
+ HCLGE_CFG_UMV_TBL_SPACE_S);
+ if (!cfg->umv_space)
+ cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
}
/* hclge_get_cfg: query the static parameter from flash
@@ -1157,6 +861,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.num_pg = 1;
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
+ hdev->wanted_umv_size = cfg.umv_space;
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
@@ -1657,11 +1362,13 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
- u32 rx_all = hdev->pkt_buf_size;
+#define HCLGE_BUF_SIZE_UNIT 128
+ u32 rx_all = hdev->pkt_buf_size, aligned_mps;
int no_pfc_priv_num, pfc_priv_num;
struct hclge_priv_buf *priv;
int i;
+ aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
/* When DCB is not supported, rx private
@@ -1680,13 +1387,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
if (hdev->hw_tc_map & BIT(i)) {
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
- priv->wl.low = hdev->mps;
- priv->wl.high = priv->wl.low + hdev->mps;
+ priv->wl.low = aligned_mps;
+ priv->wl.high = priv->wl.low + aligned_mps;
priv->buf_size = priv->wl.high +
HCLGE_DEFAULT_DV;
} else {
priv->wl.low = 0;
- priv->wl.high = 2 * hdev->mps;
+ priv->wl.high = 2 * aligned_mps;
priv->buf_size = priv->wl.high;
}
} else {
@@ -1718,11 +1425,11 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
priv->wl.low = 128;
- priv->wl.high = priv->wl.low + hdev->mps;
+ priv->wl.high = priv->wl.low + aligned_mps;
priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
} else {
priv->wl.low = 0;
- priv->wl.high = hdev->mps;
+ priv->wl.high = aligned_mps;
priv->buf_size = priv->wl.high;
}
}
@@ -2066,19 +1773,17 @@ static int hclge_init_msi(struct hclge_dev *hdev)
return 0;
}
-static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
+static u8 hclge_check_speed_dup(u8 duplex, int speed)
{
- struct hclge_mac *mac = &hdev->hw.mac;
- if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
- mac->duplex = (u8)duplex;
- else
- mac->duplex = HCLGE_MAC_FULL;
+ if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
+ duplex = HCLGE_MAC_FULL;
- mac->speed = speed;
+ return duplex;
}
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
+ u8 duplex)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
@@ -2138,7 +1843,23 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
return ret;
}
- hclge_check_speed_dup(hdev, duplex, speed);
+ return 0;
+}
+
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+{
+ int ret;
+
+ duplex = hclge_check_speed_dup(duplex, speed);
+ if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
+ return 0;
+
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
+ if (ret)
+ return ret;
+
+ hdev->hw.mac.speed = speed;
+ hdev->hw.mac.duplex = duplex;
return 0;
}
@@ -2224,42 +1945,17 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
return hdev->hw.mac.autoneg;
}
-static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
- bool mask_vlan,
- u8 *mac_mask)
-{
- struct hclge_mac_vlan_mask_entry_cmd *req;
- struct hclge_desc desc;
- int status;
-
- req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
-
- hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
- mask_vlan ? 1 : 0);
- ether_addr_copy(req->mac_mask, mac_mask);
-
- status = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (status)
- dev_err(&hdev->pdev->dev,
- "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
- status);
-
- return status;
-}
-
static int hclge_mac_init(struct hclge_dev *hdev)
{
struct hnae3_handle *handle = &hdev->vport[0].nic;
struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
- u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
- struct hclge_vport *vport;
int mtu;
int ret;
- int i;
- ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
+ hdev->hw.mac.duplex);
if (ret) {
dev_err(&hdev->pdev->dev,
"Config mac speed dup fail ret=%d\n", ret);
@@ -2268,39 +1964,6 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
- /* Initialize the MTA table work mode */
- hdev->enable_mta = true;
- hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
-
- ret = hclge_set_mta_filter_mode(hdev,
- hdev->mta_mac_sel_type,
- hdev->enable_mta);
- if (ret) {
- dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
- ret);
- return ret;
- }
-
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- vport->accept_mta_mc = false;
-
- memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
- ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "set mta filter mode fail ret=%d\n", ret);
- return ret;
- }
- }
-
- ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "set default mac_vlan_mask fail ret=%d\n", ret);
- return ret;
- }
-
if (netdev)
mtu = netdev->mtu;
else
@@ -2360,10 +2023,13 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
int mac_state;
int link_stat;
+ if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
+ return 0;
+
mac_state = hclge_get_mac_link_status(hdev);
if (hdev->hw.mac.phydev) {
- if (!genphy_read_status(hdev->hw.mac.phydev))
+ if (hdev->hw.mac.phydev->state == PHY_RUNNING)
link_stat = mac_state &
hdev->hw.mac.phydev->link;
else
@@ -2415,13 +2081,11 @@ static int hclge_update_speed_duplex(struct hclge_dev *hdev)
return ret;
}
- if ((mac.speed != speed) || (mac.duplex != duplex)) {
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac speed/duplex config failed %d\n", ret);
- return ret;
- }
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mac speed/duplex config failed %d\n", ret);
+ return ret;
}
return 0;
@@ -2520,6 +2184,8 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
case HCLGE_VECTOR0_EVENT_MBX:
hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
break;
+ default:
+ break;
}
}
@@ -2793,8 +2459,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
static void hclge_reset(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hnae3_handle *handle;
+ /* Initialize ae_dev reset status as well, in case enet layer wants to
+ * know if device is undergoing reset
+ */
+ ae_dev->reset_type = hdev->reset_type;
/* perform reset of the stack & ae device for a client */
handle = &hdev->vport[0].nic;
rtnl_lock();
@@ -2815,14 +2486,21 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
handle->last_reset_time = jiffies;
rtnl_unlock();
+ ae_dev->reset_type = HNAE3_NONE_RESET;
}
-static void hclge_reset_event(struct hnae3_handle *handle)
+static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct hclge_dev *hdev = ae_dev->priv;
- /* check if this is a new reset request and we are not here just because
+ /* We might end up getting called broadly because of 2 below cases:
+ * 1. Recoverable error was conveyed through APEI and only way to bring
+ * normalcy is to reset.
+ * 2. A new reset request from the stack due to timeout
+ *
+ * For the first case,error event might not have ae handle available.
+ * check if this is a new reset request and we are not here just because
* last reset attempt did not succeed and watchdog hit us again. We will
* know this if last reset request did not occur very recently (watchdog
* timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
@@ -2831,6 +2509,9 @@ static void hclge_reset_event(struct hnae3_handle *handle)
* want to make sure we throttle the reset request. Therefore, we will
* not allow it again before 3*HZ times.
*/
+ if (!handle)
+ handle = &hdev->vport[0].nic;
+
if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
return;
else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
@@ -3102,6 +2783,22 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
return ret;
}
+static void hclge_get_rss_type(struct hclge_vport *vport)
+{
+ if (vport->rss_tuple_sets.ipv4_tcp_en ||
+ vport->rss_tuple_sets.ipv4_udp_en ||
+ vport->rss_tuple_sets.ipv4_sctp_en ||
+ vport->rss_tuple_sets.ipv6_tcp_en ||
+ vport->rss_tuple_sets.ipv6_udp_en ||
+ vport->rss_tuple_sets.ipv6_sctp_en)
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
+ else if (vport->rss_tuple_sets.ipv4_fragment_en ||
+ vport->rss_tuple_sets.ipv6_fragment_en)
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
+ else
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
+}
+
static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
{
struct hclge_rss_input_tuple_cmd *req;
@@ -3121,6 +2818,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
+ hclge_get_rss_type(&hdev->vport[0]);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
@@ -3135,8 +2833,19 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
int i;
/* Get hash algorithm */
- if (hfunc)
- *hfunc = vport->rss_algo;
+ if (hfunc) {
+ switch (vport->rss_algo) {
+ case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGE_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
+ }
/* Get the RSS Key required by the user */
if (key)
@@ -3160,12 +2869,20 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
/* Set the RSS Hash Key if specififed by the user */
if (key) {
-
- if (hfunc == ETH_RSS_HASH_TOP ||
- hfunc == ETH_RSS_HASH_NO_CHANGE)
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- else
+ break;
+ case ETH_RSS_HASH_XOR:
+ hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+ break;
+ case ETH_RSS_HASH_NO_CHANGE:
+ hash_algo = vport->rss_algo;
+ break;
+ default:
return -EINVAL;
+ }
+
ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
if (ret)
return ret;
@@ -3283,6 +3000,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ hclge_get_rss_type(vport);
return 0;
}
@@ -3608,6 +3326,1281 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
hclge_cmd_set_promisc_mode(hdev, &param);
}
+static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
+{
+ struct hclge_get_fd_mode_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
+
+ req = (struct hclge_get_fd_mode_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ *fd_mode = req->mode;
+
+ return ret;
+}
+
+static int hclge_get_fd_allocation(struct hclge_dev *hdev,
+ u32 *stage1_entry_num,
+ u32 *stage2_entry_num,
+ u16 *stage1_counter_num,
+ u16 *stage2_counter_num)
+{
+ struct hclge_get_fd_allocation_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
+
+ req = (struct hclge_get_fd_allocation_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
+ *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
+ *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
+ *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
+
+ return ret;
+}
+
+static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
+{
+ struct hclge_set_fd_key_config_cmd *req;
+ struct hclge_fd_key_cfg *stage;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
+
+ req = (struct hclge_set_fd_key_config_cmd *)desc.data;
+ stage = &hdev->fd_cfg.key_cfg[stage_num];
+ req->stage = stage_num;
+ req->key_select = stage->key_sel;
+ req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
+ req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
+ req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
+ req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
+ req->tuple_mask = cpu_to_le32(~stage->tuple_active);
+ req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
+
+ return ret;
+}
+
+static int hclge_init_fd_config(struct hclge_dev *hdev)
+{
+#define LOW_2_WORDS 0x03
+ struct hclge_fd_key_cfg *key_cfg;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return 0;
+
+ ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
+ if (ret)
+ return ret;
+
+ switch (hdev->fd_cfg.fd_mode) {
+ case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
+ break;
+ case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "Unsupported flow director mode %d\n",
+ hdev->fd_cfg.fd_mode);
+ return -EOPNOTSUPP;
+ }
+
+ hdev->fd_cfg.fd_en = true;
+ hdev->fd_cfg.proto_support =
+ TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
+ UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
+ key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
+ key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
+ key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
+ key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
+ key_cfg->outer_sipv6_word_en = 0;
+ key_cfg->outer_dipv6_word_en = 0;
+
+ key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
+ BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
+ BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+ /* If use max 400bit key, we can support tuples for ether type */
+ if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
+ hdev->fd_cfg.proto_support |= ETHER_FLOW;
+ key_cfg->tuple_active |=
+ BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
+ }
+
+ /* roce_type is used to filter roce frames
+ * dst_vport is used to specify the rule
+ */
+ key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
+
+ ret = hclge_get_fd_allocation(hdev,
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
+ if (ret)
+ return ret;
+
+ return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
+}
+
+static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
+ int loc, u8 *key, bool is_add)
+{
+ struct hclge_fd_tcam_config_1_cmd *req1;
+ struct hclge_fd_tcam_config_2_cmd *req2;
+ struct hclge_fd_tcam_config_3_cmd *req3;
+ struct hclge_desc desc[3];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
+
+ req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
+ req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
+ req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
+
+ req1->stage = stage;
+ req1->xy_sel = sel_x ? 1 : 0;
+ hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
+ req1->index = cpu_to_le32(loc);
+ req1->entry_vld = sel_x ? is_add : 0;
+
+ if (key) {
+ memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
+ memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
+ sizeof(req2->tcam_data));
+ memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
+ sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "config tcam key fail, ret=%d\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
+ struct hclge_fd_ad_data *action)
+{
+ struct hclge_fd_ad_config_cmd *req;
+ struct hclge_desc desc;
+ u64 ad_data = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
+
+ req = (struct hclge_fd_ad_config_cmd *)desc.data;
+ req->index = cpu_to_le32(loc);
+ req->stage = stage;
+
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
+ action->write_rule_id_to_bd);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
+ action->rule_id);
+ ad_data <<= 32;
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
+ action->forward_to_direct_queue);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
+ action->queue_id);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
+ HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
+ action->counter_id);
+
+ req->ad_data = cpu_to_le64(ad_data);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
+
+ return ret;
+}
+
+static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
+ struct hclge_fd_rule *rule)
+{
+ u16 tmp_x_s, tmp_y_s;
+ u32 tmp_x_l, tmp_y_l;
+ int i;
+
+ if (rule->unused_tuple & tuple_bit)
+ return true;
+
+ switch (tuple_bit) {
+ case 0:
+ return false;
+ case BIT(INNER_DST_MAC):
+ for (i = 0; i < 6; i++) {
+ calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
+ rule->tuples_mask.dst_mac[i]);
+ calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
+ rule->tuples_mask.dst_mac[i]);
+ }
+
+ return true;
+ case BIT(INNER_SRC_MAC):
+ for (i = 0; i < 6; i++) {
+ calc_x(key_x[5 - i], rule->tuples.src_mac[i],
+ rule->tuples.src_mac[i]);
+ calc_y(key_y[5 - i], rule->tuples.src_mac[i],
+ rule->tuples.src_mac[i]);
+ }
+
+ return true;
+ case BIT(INNER_VLAN_TAG_FST):
+ calc_x(tmp_x_s, rule->tuples.vlan_tag1,
+ rule->tuples_mask.vlan_tag1);
+ calc_y(tmp_y_s, rule->tuples.vlan_tag1,
+ rule->tuples_mask.vlan_tag1);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case BIT(INNER_ETH_TYPE):
+ calc_x(tmp_x_s, rule->tuples.ether_proto,
+ rule->tuples_mask.ether_proto);
+ calc_y(tmp_y_s, rule->tuples.ether_proto,
+ rule->tuples_mask.ether_proto);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case BIT(INNER_IP_TOS):
+ calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
+ calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
+
+ return true;
+ case BIT(INNER_IP_PROTO):
+ calc_x(*key_x, rule->tuples.ip_proto,
+ rule->tuples_mask.ip_proto);
+ calc_y(*key_y, rule->tuples.ip_proto,
+ rule->tuples_mask.ip_proto);
+
+ return true;
+ case BIT(INNER_SRC_IP):
+ calc_x(tmp_x_l, rule->tuples.src_ip[3],
+ rule->tuples_mask.src_ip[3]);
+ calc_y(tmp_y_l, rule->tuples.src_ip[3],
+ rule->tuples_mask.src_ip[3]);
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
+
+ return true;
+ case BIT(INNER_DST_IP):
+ calc_x(tmp_x_l, rule->tuples.dst_ip[3],
+ rule->tuples_mask.dst_ip[3]);
+ calc_y(tmp_y_l, rule->tuples.dst_ip[3],
+ rule->tuples_mask.dst_ip[3]);
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
+
+ return true;
+ case BIT(INNER_SRC_PORT):
+ calc_x(tmp_x_s, rule->tuples.src_port,
+ rule->tuples_mask.src_port);
+ calc_y(tmp_y_s, rule->tuples.src_port,
+ rule->tuples_mask.src_port);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case BIT(INNER_DST_PORT):
+ calc_x(tmp_x_s, rule->tuples.dst_port,
+ rule->tuples_mask.dst_port);
+ calc_y(tmp_y_s, rule->tuples.dst_port,
+ rule->tuples_mask.dst_port);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
+ u8 vf_id, u8 network_port_id)
+{
+ u32 port_number = 0;
+
+ if (port_type == HOST_PORT) {
+ hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
+ pf_id);
+ hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
+ vf_id);
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
+ } else {
+ hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
+ HCLGE_NETWORK_PORT_ID_S, network_port_id);
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
+ }
+
+ return port_number;
+}
+
+static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
+ __le32 *key_x, __le32 *key_y,
+ struct hclge_fd_rule *rule)
+{
+ u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
+ u8 cur_pos = 0, tuple_size, shift_bits;
+ int i;
+
+ for (i = 0; i < MAX_META_DATA; i++) {
+ tuple_size = meta_data_key_info[i].key_length;
+ tuple_bit = key_cfg->meta_data_active & BIT(i);
+
+ switch (tuple_bit) {
+ case BIT(ROCE_TYPE):
+ hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
+ cur_pos += tuple_size;
+ break;
+ case BIT(DST_VPORT):
+ port_number = hclge_get_port_number(HOST_PORT, 0,
+ rule->vf_id, 0);
+ hnae3_set_field(meta_data,
+ GENMASK(cur_pos + tuple_size, cur_pos),
+ cur_pos, port_number);
+ cur_pos += tuple_size;
+ break;
+ default:
+ break;
+ }
+ }
+
+ calc_x(tmp_x, meta_data, 0xFFFFFFFF);
+ calc_y(tmp_y, meta_data, 0xFFFFFFFF);
+ shift_bits = sizeof(meta_data) * 8 - cur_pos;
+
+ *key_x = cpu_to_le32(tmp_x << shift_bits);
+ *key_y = cpu_to_le32(tmp_y << shift_bits);
+}
+
+/* A complete key is combined with meta data key and tuple key.
+ * Meta data key is stored at the MSB region, and tuple key is stored at
+ * the LSB region, unused bits will be filled 0.
+ */
+static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
+ u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
+ u8 *cur_key_x, *cur_key_y;
+ int i, ret, tuple_size;
+ u8 meta_data_region;
+
+ memset(key_x, 0, sizeof(key_x));
+ memset(key_y, 0, sizeof(key_y));
+ cur_key_x = key_x;
+ cur_key_y = key_y;
+
+ for (i = 0 ; i < MAX_TUPLE; i++) {
+ bool tuple_valid;
+ u32 check_tuple;
+
+ tuple_size = tuple_key_info[i].key_length / 8;
+ check_tuple = key_cfg->tuple_active & BIT(i);
+
+ tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
+ cur_key_y, rule);
+ if (tuple_valid) {
+ cur_key_x += tuple_size;
+ cur_key_y += tuple_size;
+ }
+ }
+
+ meta_data_region = hdev->fd_cfg.max_key_length / 8 -
+ MAX_META_DATA_LENGTH / 8;
+
+ hclge_fd_convert_meta_data(key_cfg,
+ (__le32 *)(key_x + meta_data_region),
+ (__le32 *)(key_y + meta_data_region),
+ rule);
+
+ ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
+ true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "fd key_y config fail, loc=%d, ret=%d\n",
+ rule->queue_id, ret);
+ return ret;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
+ true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "fd key_x config fail, loc=%d, ret=%d\n",
+ rule->queue_id, ret);
+ return ret;
+}
+
+static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_ad_data ad_data;
+
+ ad_data.ad_id = rule->location;
+
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ ad_data.drop_packet = true;
+ ad_data.forward_to_direct_queue = false;
+ ad_data.queue_id = 0;
+ } else {
+ ad_data.drop_packet = false;
+ ad_data.forward_to_direct_queue = true;
+ ad_data.queue_id = rule->queue_id;
+ }
+
+ ad_data.use_counter = false;
+ ad_data.counter_id = 0;
+
+ ad_data.use_next_stage = false;
+ ad_data.next_input_key = 0;
+
+ ad_data.write_rule_id_to_bd = true;
+ ad_data.rule_id = rule->location;
+
+ return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
+}
+
+static int hclge_fd_check_spec(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs, u32 *unused)
+{
+ struct ethtool_tcpip4_spec *tcp_ip4_spec;
+ struct ethtool_usrip4_spec *usr_ip4_spec;
+ struct ethtool_tcpip6_spec *tcp_ip6_spec;
+ struct ethtool_usrip6_spec *usr_ip6_spec;
+ struct ethhdr *ether_spec;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ return -EINVAL;
+
+ if (!(fs->flow_type & hdev->fd_cfg.proto_support))
+ return -EOPNOTSUPP;
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
+ dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
+ *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
+
+ if (!tcp_ip4_spec->ip4src)
+ *unused |= BIT(INNER_SRC_IP);
+
+ if (!tcp_ip4_spec->ip4dst)
+ *unused |= BIT(INNER_DST_IP);
+
+ if (!tcp_ip4_spec->psrc)
+ *unused |= BIT(INNER_SRC_PORT);
+
+ if (!tcp_ip4_spec->pdst)
+ *unused |= BIT(INNER_DST_PORT);
+
+ if (!tcp_ip4_spec->tos)
+ *unused |= BIT(INNER_IP_TOS);
+
+ break;
+ case IP_USER_FLOW:
+ usr_ip4_spec = &fs->h_u.usr_ip4_spec;
+ *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+ if (!usr_ip4_spec->ip4src)
+ *unused |= BIT(INNER_SRC_IP);
+
+ if (!usr_ip4_spec->ip4dst)
+ *unused |= BIT(INNER_DST_IP);
+
+ if (!usr_ip4_spec->tos)
+ *unused |= BIT(INNER_IP_TOS);
+
+ if (!usr_ip4_spec->proto)
+ *unused |= BIT(INNER_IP_PROTO);
+
+ if (usr_ip4_spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+
+ if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
+ return -EOPNOTSUPP;
+
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
+ *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS);
+
+ if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
+ !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
+ *unused |= BIT(INNER_SRC_IP);
+
+ if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
+ !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
+ *unused |= BIT(INNER_DST_IP);
+
+ if (!tcp_ip6_spec->psrc)
+ *unused |= BIT(INNER_SRC_PORT);
+
+ if (!tcp_ip6_spec->pdst)
+ *unused |= BIT(INNER_DST_PORT);
+
+ if (tcp_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+
+ break;
+ case IPV6_USER_FLOW:
+ usr_ip6_spec = &fs->h_u.usr_ip6_spec;
+ *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
+ BIT(INNER_DST_PORT);
+
+ if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
+ !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
+ *unused |= BIT(INNER_SRC_IP);
+
+ if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
+ !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
+ *unused |= BIT(INNER_DST_IP);
+
+ if (!usr_ip6_spec->l4_proto)
+ *unused |= BIT(INNER_IP_PROTO);
+
+ if (usr_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+
+ if (usr_ip6_spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+
+ break;
+ case ETHER_FLOW:
+ ether_spec = &fs->h_u.ether_spec;
+ *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
+ BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+
+ if (is_zero_ether_addr(ether_spec->h_source))
+ *unused |= BIT(INNER_SRC_MAC);
+
+ if (is_zero_ether_addr(ether_spec->h_dest))
+ *unused |= BIT(INNER_DST_MAC);
+
+ if (!ether_spec->h_proto)
+ *unused |= BIT(INNER_ETH_TYPE);
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if ((fs->flow_type & FLOW_EXT)) {
+ if (fs->h_ext.vlan_etype)
+ return -EOPNOTSUPP;
+ if (!fs->h_ext.vlan_tci)
+ *unused |= BIT(INNER_VLAN_TAG_FST);
+
+ if (fs->m_ext.vlan_tci) {
+ if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
+ return -EINVAL;
+ }
+ } else {
+ *unused |= BIT(INNER_VLAN_TAG_FST);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
+ return -EOPNOTSUPP;
+
+ if (is_zero_ether_addr(fs->h_ext.h_dest))
+ *unused |= BIT(INNER_DST_MAC);
+ else
+ *unused &= ~(BIT(INNER_DST_MAC));
+ }
+
+ return 0;
+}
+
+static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location >= location)
+ break;
+ }
+
+ return rule && rule->location == location;
+}
+
+static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
+ struct hclge_fd_rule *new_rule,
+ u16 location,
+ bool is_add)
+{
+ struct hclge_fd_rule *rule = NULL, *parent = NULL;
+ struct hlist_node *node2;
+
+ if (is_add && !new_rule)
+ return -EINVAL;
+
+ hlist_for_each_entry_safe(rule, node2,
+ &hdev->fd_rule_list, rule_node) {
+ if (rule->location >= location)
+ break;
+ parent = rule;
+ }
+
+ if (rule && rule->location == location) {
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hdev->hclge_fd_rule_num--;
+
+ if (!is_add)
+ return 0;
+
+ } else if (!is_add) {
+ dev_err(&hdev->pdev->dev,
+ "delete fail, rule %d is inexistent\n",
+ location);
+ return -EINVAL;
+ }
+
+ INIT_HLIST_NODE(&new_rule->rule_node);
+
+ if (parent)
+ hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
+ else
+ hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
+
+ hdev->hclge_fd_rule_num++;
+
+ return 0;
+}
+
+static int hclge_fd_get_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ rule->tuples.src_ip[3] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[3] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
+
+ rule->tuples.dst_ip[3] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[3] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
+
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
+ rule->tuples_mask.src_port =
+ be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
+
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
+ rule->tuples_mask.dst_port =
+ be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
+
+ rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
+
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case IP_USER_FLOW:
+ rule->tuples.src_ip[3] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[3] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
+
+ rule->tuples.dst_ip[3] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[3] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
+
+ rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
+
+ rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
+
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ be32_to_cpu_array(rule->tuples.src_ip,
+ fs->h_u.tcp_ip6_spec.ip6src, 4);
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
+ fs->m_u.tcp_ip6_spec.ip6src, 4);
+
+ be32_to_cpu_array(rule->tuples.dst_ip,
+ fs->h_u.tcp_ip6_spec.ip6dst, 4);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
+ fs->m_u.tcp_ip6_spec.ip6dst, 4);
+
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
+ rule->tuples_mask.src_port =
+ be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
+
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
+ rule->tuples_mask.dst_port =
+ be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
+
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case IPV6_USER_FLOW:
+ be32_to_cpu_array(rule->tuples.src_ip,
+ fs->h_u.usr_ip6_spec.ip6src, 4);
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
+ fs->m_u.usr_ip6_spec.ip6src, 4);
+
+ be32_to_cpu_array(rule->tuples.dst_ip,
+ fs->h_u.usr_ip6_spec.ip6dst, 4);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
+ fs->m_u.usr_ip6_spec.ip6dst, 4);
+
+ rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
+
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case ETHER_FLOW:
+ ether_addr_copy(rule->tuples.src_mac,
+ fs->h_u.ether_spec.h_source);
+ ether_addr_copy(rule->tuples_mask.src_mac,
+ fs->m_u.ether_spec.h_source);
+
+ ether_addr_copy(rule->tuples.dst_mac,
+ fs->h_u.ether_spec.h_dest);
+ ether_addr_copy(rule->tuples_mask.dst_mac,
+ fs->m_u.ether_spec.h_dest);
+
+ rule->tuples.ether_proto =
+ be16_to_cpu(fs->h_u.ether_spec.h_proto);
+ rule->tuples_mask.ether_proto =
+ be16_to_cpu(fs->m_u.ether_spec.h_proto);
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ rule->tuples.ip_proto = IPPROTO_SCTP;
+ rule->tuples_mask.ip_proto = 0xFF;
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ rule->tuples.ip_proto = IPPROTO_TCP;
+ rule->tuples_mask.ip_proto = 0xFF;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ rule->tuples.ip_proto = IPPROTO_UDP;
+ rule->tuples_mask.ip_proto = 0xFF;
+ break;
+ default:
+ break;
+ }
+
+ if ((fs->flow_type & FLOW_EXT)) {
+ rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
+ rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
+ ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
+ }
+
+ return 0;
+}
+
+static int hclge_add_fd_entry(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u16 dst_vport_id = 0, q_index = 0;
+ struct ethtool_rx_flow_spec *fs;
+ struct hclge_fd_rule *rule;
+ u32 unused = 0;
+ u8 action;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ if (!hdev->fd_cfg.fd_en) {
+ dev_warn(&hdev->pdev->dev,
+ "Please enable flow director first\n");
+ return -EOPNOTSUPP;
+ }
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ ret = hclge_fd_check_spec(hdev, fs, &unused);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
+ return ret;
+ }
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
+ action = HCLGE_FD_ACTION_DROP_PACKET;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ u16 tqps;
+
+ dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
+ tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
+
+ if (ring >= tqps) {
+ dev_err(&hdev->pdev->dev,
+ "Error: queue id (%d) > max tqp num (%d)\n",
+ ring, tqps - 1);
+ return -EINVAL;
+ }
+
+ if (vf > hdev->num_req_vfs) {
+ dev_err(&hdev->pdev->dev,
+ "Error: vf id (%d) > max vf num (%d)\n",
+ vf, hdev->num_req_vfs);
+ return -EINVAL;
+ }
+
+ action = HCLGE_FD_ACTION_ACCEPT_PACKET;
+ q_index = ring;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = hclge_fd_get_tuple(hdev, fs, rule);
+ if (ret)
+ goto free_rule;
+
+ rule->flow_type = fs->flow_type;
+
+ rule->location = fs->location;
+ rule->unused_tuple = unused;
+ rule->vf_id = dst_vport_id;
+ rule->queue_id = q_index;
+ rule->action = action;
+
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto free_rule;
+
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto free_rule;
+
+ ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
+ if (ret)
+ goto free_rule;
+
+ return ret;
+
+free_rule:
+ kfree(rule);
+ return ret;
+}
+
+static int hclge_del_fd_entry(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct ethtool_rx_flow_spec *fs;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ return -EINVAL;
+
+ if (!hclge_fd_rule_exist(hdev, fs->location)) {
+ dev_err(&hdev->pdev->dev,
+ "Delete fail, rule %d is inexistent\n",
+ fs->location);
+ return -ENOENT;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ fs->location, NULL, false);
+ if (ret)
+ return ret;
+
+ return hclge_fd_update_rule_list(hdev, NULL, fs->location,
+ false);
+}
+
+static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
+ bool clear_list)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return;
+
+ if (clear_list) {
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
+ rule_node) {
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hdev->hclge_fd_rule_num--;
+ }
+ } else {
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
+ rule_node)
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ }
+}
+
+static int hclge_restore_fd_entries(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (!ret)
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+
+ if (ret) {
+ dev_warn(&hdev->pdev->dev,
+ "Restore rule %d failed, remove it\n",
+ rule->location);
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hdev->hclge_fd_rule_num--;
+ }
+ }
+ return 0;
+}
+
+static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ cmd->rule_cnt = hdev->hclge_fd_rule_num;
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+
+ return 0;
+}
+
+static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_fd_rule *rule = NULL;
+ struct hclge_dev *hdev = vport->back;
+ struct ethtool_rx_flow_spec *fs;
+ struct hlist_node *node2;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location >= fs->location)
+ break;
+ }
+
+ if (!rule || fs->location != rule->location)
+ return -ENOENT;
+
+ fs->flow_type = rule->flow_type;
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ fs->h_u.tcp_ip4_spec.ip4src =
+ cpu_to_be32(rule->tuples.src_ip[3]);
+ fs->m_u.tcp_ip4_spec.ip4src =
+ rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
+
+ fs->h_u.tcp_ip4_spec.ip4dst =
+ cpu_to_be32(rule->tuples.dst_ip[3]);
+ fs->m_u.tcp_ip4_spec.ip4dst =
+ rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
+
+ fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
+ fs->m_u.tcp_ip4_spec.psrc =
+ rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
+ fs->m_u.tcp_ip4_spec.pdst =
+ rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+ fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
+ fs->m_u.tcp_ip4_spec.tos =
+ rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ break;
+ case IP_USER_FLOW:
+ fs->h_u.usr_ip4_spec.ip4src =
+ cpu_to_be32(rule->tuples.src_ip[3]);
+ fs->m_u.tcp_ip4_spec.ip4src =
+ rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
+
+ fs->h_u.usr_ip4_spec.ip4dst =
+ cpu_to_be32(rule->tuples.dst_ip[3]);
+ fs->m_u.usr_ip4_spec.ip4dst =
+ rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
+
+ fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
+ fs->m_u.usr_ip4_spec.tos =
+ rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
+ fs->m_u.usr_ip4_spec.proto =
+ rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
+ rule->tuples.src_ip, 4);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
+ else
+ cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
+ rule->tuples_mask.src_ip, 4);
+
+ cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
+ rule->tuples.dst_ip, 4);
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
+ else
+ cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
+ rule->tuples_mask.dst_ip, 4);
+
+ fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
+ fs->m_u.tcp_ip6_spec.psrc =
+ rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
+ fs->m_u.tcp_ip6_spec.pdst =
+ rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+ break;
+ case IPV6_USER_FLOW:
+ cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
+ rule->tuples.src_ip, 4);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
+ else
+ cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
+ rule->tuples_mask.src_ip, 4);
+
+ cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
+ rule->tuples.dst_ip, 4);
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
+ else
+ cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
+ rule->tuples_mask.dst_ip, 4);
+
+ fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
+ fs->m_u.usr_ip6_spec.l4_proto =
+ rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+
+ break;
+ case ETHER_FLOW:
+ ether_addr_copy(fs->h_u.ether_spec.h_source,
+ rule->tuples.src_mac);
+ if (rule->unused_tuple & BIT(INNER_SRC_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_source);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_source,
+ rule->tuples_mask.src_mac);
+
+ ether_addr_copy(fs->h_u.ether_spec.h_dest,
+ rule->tuples.dst_mac);
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
+ rule->tuples_mask.dst_mac);
+
+ fs->h_u.ether_spec.h_proto =
+ cpu_to_be16(rule->tuples.ether_proto);
+ fs->m_u.ether_spec.h_proto =
+ rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
+ 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (fs->flow_type & FLOW_EXT) {
+ fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
+ fs->m_ext.vlan_tci =
+ rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
+ cpu_to_be16(VLAN_VID_MASK) :
+ cpu_to_be16(rule->tuples_mask.vlan_tag1);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
+ rule->tuples_mask.dst_mac);
+ }
+
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ } else {
+ u64 vf_id;
+
+ fs->ring_cookie = rule->queue_id;
+ vf_id = rule->vf_id;
+ vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fs->ring_cookie |= vf_id;
+ }
+
+ return 0;
+}
+
+static int hclge_get_all_rules(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node2;
+ int cnt = 0;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+
+ hlist_for_each_entry_safe(rule, node2,
+ &hdev->fd_rule_list, rule_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = rule->location;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hdev->fd_cfg.fd_en = enable;
+ if (!enable)
+ hclge_del_all_fd_entries(handle, false);
+ else
+ hclge_restore_fd_entries(handle);
+}
+
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
{
struct hclge_desc desc;
@@ -3639,7 +4632,7 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
"mac enable fail, ret =%d.\n", ret);
}
-static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
+static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
{
struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc;
@@ -3659,6 +4652,8 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
/* 2 Then setup the loopback flag */
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
@@ -3673,22 +4668,37 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
+static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
{
#define HCLGE_SERDES_RETRY_MS 10
#define HCLGE_SERDES_RETRY_NUM 100
struct hclge_serdes_lb_cmd *req;
struct hclge_desc desc;
int ret, i = 0;
+ u8 loop_mode_b;
- req = (struct hclge_serdes_lb_cmd *)&desc.data[0];
+ req = (struct hclge_serdes_lb_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
+ switch (loop_mode) {
+ case HNAE3_LOOP_SERIAL_SERDES:
+ loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ break;
+ case HNAE3_LOOP_PARALLEL_SERDES:
+ loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "unsupported serdes loopback mode %d\n", loop_mode);
+ return -ENOTSUPP;
+ }
+
if (en) {
- req->enable = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
- req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ req->enable = loop_mode_b;
+ req->mask = loop_mode_b;
} else {
- req->mask = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ req->mask = loop_mode_b;
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -3719,33 +4729,10 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en)
return -EIO;
}
+ hclge_cfg_mac_mode(hdev, en);
return 0;
}
-static int hclge_set_loopback(struct hnae3_handle *handle,
- enum hnae3_loop loop_mode, bool en)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- int ret;
-
- switch (loop_mode) {
- case HNAE3_MAC_INTER_LOOP_MAC:
- ret = hclge_set_mac_loopback(hdev, en);
- break;
- case HNAE3_MAC_INTER_LOOP_SERDES:
- ret = hclge_set_serdes_loopback(hdev, en);
- break;
- default:
- ret = -ENOTSUPP;
- dev_err(&hdev->pdev->dev,
- "loop_mode %d is not supported\n", loop_mode);
- break;
- }
-
- return ret;
-}
-
static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
int stream_id, bool enable)
{
@@ -3766,6 +4753,37 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
return ret;
}
+static int hclge_set_loopback(struct hnae3_handle *handle,
+ enum hnae3_loop loop_mode, bool en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int i, ret;
+
+ switch (loop_mode) {
+ case HNAE3_LOOP_APP:
+ ret = hclge_set_app_loopback(hdev, en);
+ break;
+ case HNAE3_LOOP_SERIAL_SERDES:
+ case HNAE3_LOOP_PARALLEL_SERDES:
+ ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ dev_err(&hdev->pdev->dev,
+ "loop_mode %d is not supported\n", loop_mode);
+ break;
+ }
+
+ for (i = 0; i < vport->alloc_tqps; i++) {
+ ret = hclge_tqp_enable(hdev, i, 0, en);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -3809,6 +4827,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
struct hclge_dev *hdev = vport->back;
int i;
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
del_timer_sync(&hdev->service_timer);
cancel_work_sync(&hdev->service_task);
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
@@ -3950,174 +4970,6 @@ static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
}
-static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
- const u8 *addr)
-{
- u16 high_val = addr[1] | (addr[0] << 8);
- struct hclge_dev *hdev = vport->back;
- u32 rsh = 4 - hdev->mta_mac_sel_type;
- u16 ret_val = (high_val >> rsh) & 0xfff;
-
- return ret_val;
-}
-
-static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
- enum hclge_mta_dmac_sel_type mta_mac_sel,
- bool enable)
-{
- struct hclge_mta_filter_mode_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_mta_filter_mode_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
-
- hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
- enable);
- hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
- HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Config mat filter mode failed for cmd_send, ret =%d.\n",
- ret);
-
- return ret;
-}
-
-int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
- u8 func_id,
- bool enable)
-{
- struct hclge_cfg_func_mta_filter_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
-
- hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
- enable);
- req->function_id = func_id;
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Config func_id enable failed for cmd_send, ret =%d.\n",
- ret);
-
- return ret;
-}
-
-static int hclge_set_mta_table_item(struct hclge_vport *vport,
- u16 idx,
- bool enable)
-{
- struct hclge_dev *hdev = vport->back;
- struct hclge_cfg_func_mta_item_cmd *req;
- struct hclge_desc desc;
- u16 item_idx = 0;
- int ret;
-
- req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
- hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
-
- hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
- HCLGE_CFG_MTA_ITEM_IDX_S, idx);
- req->item_idx = cpu_to_le16(item_idx);
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Config mta table item failed for cmd_send, ret =%d.\n",
- ret);
- return ret;
- }
-
- if (enable)
- set_bit(idx, vport->mta_shadow);
- else
- clear_bit(idx, vport->mta_shadow);
-
- return 0;
-}
-
-static int hclge_update_mta_status(struct hnae3_handle *handle)
-{
- unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct net_device *netdev = handle->kinfo.netdev;
- struct netdev_hw_addr *ha;
- u16 tbl_idx;
-
- memset(mta_status, 0, sizeof(mta_status));
-
- /* update mta_status from mc addr list */
- netdev_for_each_mc_addr(ha, netdev) {
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
- set_bit(tbl_idx, mta_status);
- }
-
- return hclge_update_mta_status_common(vport, mta_status,
- 0, HCLGE_MTA_TBL_SIZE, true);
-}
-
-int hclge_update_mta_status_common(struct hclge_vport *vport,
- unsigned long *status,
- u16 idx,
- u16 count,
- bool update_filter)
-{
- struct hclge_dev *hdev = vport->back;
- u16 update_max = idx + count;
- u16 check_max;
- int ret = 0;
- bool used;
- u16 i;
-
- /* setup mta check range */
- if (update_filter) {
- i = 0;
- check_max = HCLGE_MTA_TBL_SIZE;
- } else {
- i = idx;
- check_max = update_max;
- }
-
- used = false;
- /* check and update all mta item */
- for (; i < check_max; i++) {
- /* ignore unused item */
- if (!test_bit(i, vport->mta_shadow))
- continue;
-
- /* if i in update range then update it */
- if (i >= idx && i < update_max)
- if (!test_bit(i - idx, status))
- hclge_set_mta_table_item(vport, i, false);
-
- if (!used && test_bit(i, vport->mta_shadow))
- used = true;
- }
-
- /* no longer use mta, disable it */
- if (vport->accept_mta_mc && update_filter && !used) {
- ret = hclge_cfg_func_mta_filter(hdev,
- vport->vport_id,
- false);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "disable func mta filter fail ret=%d\n",
- ret);
- else
- vport->accept_mta_mc = false;
- }
-
- return ret;
-}
-
static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
struct hclge_mac_vlan_tbl_entry_cmd *req)
{
@@ -4241,6 +5093,118 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
return cfg_status;
}
+static int hclge_init_umv_space(struct hclge_dev *hdev)
+{
+ u16 allocated_size = 0;
+ int ret;
+
+ ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
+ true);
+ if (ret)
+ return ret;
+
+ if (allocated_size < hdev->wanted_umv_size)
+ dev_warn(&hdev->pdev->dev,
+ "Alloc umv space failed, want %d, get %d\n",
+ hdev->wanted_umv_size, allocated_size);
+
+ mutex_init(&hdev->umv_mutex);
+ hdev->max_umv_size = allocated_size;
+ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_req_vfs + 2);
+
+ return 0;
+}
+
+static int hclge_uninit_umv_space(struct hclge_dev *hdev)
+{
+ int ret;
+
+ if (hdev->max_umv_size > 0) {
+ ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
+ false);
+ if (ret)
+ return ret;
+ hdev->max_umv_size = 0;
+ }
+ mutex_destroy(&hdev->umv_mutex);
+
+ return 0;
+}
+
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+ u16 *allocated_size, bool is_alloc)
+{
+ struct hclge_umv_spc_alc_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_umv_spc_alc_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
+ hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
+ req->space_size = cpu_to_le32(space_size);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "%s umv space failed for cmd_send, ret =%d\n",
+ is_alloc ? "allocate" : "free", ret);
+ return ret;
+ }
+
+ if (is_alloc && allocated_size)
+ *allocated_size = le32_to_cpu(desc.data[1]);
+
+ return 0;
+}
+
+static void hclge_reset_umv_space(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->used_umv_num = 0;
+ }
+
+ mutex_lock(&hdev->umv_mutex);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_req_vfs + 2);
+ mutex_unlock(&hdev->umv_mutex);
+}
+
+static bool hclge_is_umv_space_full(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+ bool is_full;
+
+ mutex_lock(&hdev->umv_mutex);
+ is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
+ hdev->share_umv_size == 0);
+ mutex_unlock(&hdev->umv_mutex);
+
+ return is_full;
+}
+
+static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ mutex_lock(&hdev->umv_mutex);
+ if (is_free) {
+ if (vport->used_umv_num > hdev->priv_umv_size)
+ hdev->share_umv_size++;
+ vport->used_umv_num--;
+ } else {
+ if (vport->used_umv_num >= hdev->priv_umv_size)
+ hdev->share_umv_size--;
+ vport->used_umv_num++;
+ }
+ mutex_unlock(&hdev->umv_mutex);
+}
+
static int hclge_add_uc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
@@ -4286,8 +5250,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
* is not allowed in the mac vlan table.
*/
ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
- if (ret == -ENOENT)
- return hclge_add_mac_vlan_tbl(vport, &req, NULL);
+ if (ret == -ENOENT) {
+ if (!hclge_is_umv_space_full(vport)) {
+ ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
+ if (!ret)
+ hclge_update_umv_space(vport, false);
+ return ret;
+ }
+
+ dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
+ hdev->priv_umv_size);
+
+ return -ENOSPC;
+ }
/* check if we just hit the duplicate */
if (!ret)
@@ -4330,6 +5305,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
+ if (!ret)
+ hclge_update_umv_space(vport, true);
return ret;
}
@@ -4348,7 +5325,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
- u16 tbl_idx;
int status;
/* mac addr check */
@@ -4362,7 +5338,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -4378,25 +5354,8 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
}
- /* If mc mac vlan table is full, use MTA table */
- if (status == -ENOSPC) {
- if (!vport->accept_mta_mc) {
- status = hclge_cfg_func_mta_filter(hdev,
- vport->vport_id,
- true);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "set mta filter mode fail ret=%d\n",
- status);
- return status;
- }
- vport->accept_mta_mc = true;
- }
-
- /* Set MTA table for this MAC address */
- tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
- status = hclge_set_mta_table_item(vport, tbl_idx, true);
- }
+ if (status == -ENOSPC)
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
return status;
}
@@ -4429,7 +5388,7 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
- hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -4598,8 +5557,20 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
return 0;
}
+static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
+ int cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!hdev->hw.mac.phydev)
+ return -EOPNOTSUPP;
+
+ return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
+}
+
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
- bool filter_en)
+ u8 fe_type, bool filter_en)
{
struct hclge_vlan_filter_ctrl_cmd *req;
struct hclge_desc desc;
@@ -4609,7 +5580,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
req->vlan_type = vlan_type;
- req->vlan_fe = filter_en;
+ req->vlan_fe = filter_en ? fe_type : 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -4621,13 +5592,34 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
#define HCLGE_FILTER_TYPE_VF 0
#define HCLGE_FILTER_TYPE_PORT 1
+#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
+#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
+#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
+#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_EGRESS_B)
+#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_INGRESS_B)
static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
+ if (hdev->pdev->revision >= 0x21) {
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, enable);
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, enable);
+ } else {
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B, enable);
+ }
+ if (enable)
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
+ else
+ handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
@@ -4686,9 +5678,17 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
"Add vf vlan filter fail, ret =%d.\n",
req0->resp_code);
} else {
+#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
if (!req0->resp_code)
return 0;
+ if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
+ dev_warn(&hdev->pdev->dev,
+ "vlan %d filter is not in vf vlan table\n",
+ vlan);
+ return 0;
+ }
+
dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%d.\n",
req0->resp_code);
@@ -4732,6 +5732,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
u16 vport_idx, vport_num = 0;
int ret;
+ if (is_kill && !vlan_id)
+ return 0;
+
ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
0, proto);
if (ret) {
@@ -4761,7 +5764,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return -EINVAL;
}
- for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID)
+ for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
vport_num++;
if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
@@ -4896,7 +5899,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
- tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
+ tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
@@ -4913,18 +5916,30 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
#define HCLGE_DEF_VLAN_TYPE 0x8100
- struct hnae3_handle *handle;
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_vport *vport;
int ret;
int i;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
- if (ret)
- return ret;
+ if (hdev->pdev->revision >= 0x21) {
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, true);
+ if (ret)
+ return ret;
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
- if (ret)
- return ret;
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, true);
+ if (ret)
+ return ret;
+ } else {
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ true);
+ if (ret)
+ return ret;
+ }
+
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
@@ -4970,7 +5985,6 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return ret;
}
- handle = &hdev->vport[0].nic;
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
@@ -5187,20 +6201,6 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
return hdev->fw_version;
}
-static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
- u32 *flowctrl_adv)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- struct phy_device *phydev = hdev->hw.mac.phydev;
-
- if (!phydev)
- return;
-
- *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
- (phydev->advertising & ADVERTISED_Asym_Pause);
-}
-
static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
struct phy_device *phydev = hdev->hw.mac.phydev;
@@ -5208,13 +6208,7 @@ static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
if (!phydev)
return;
- phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
-
- if (rx_en)
- phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-
- if (tx_en)
- phydev->advertising ^= ADVERTISED_Asym_Pause;
+ phy_set_asym_pause(phydev, rx_en, tx_en);
}
static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
@@ -5256,11 +6250,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
if (!phydev->link || !phydev->autoneg)
return 0;
- if (phydev->advertising & ADVERTISED_Pause)
- local_advertising = ADVERTISE_PAUSE_CAP;
-
- if (phydev->advertising & ADVERTISED_Asym_Pause)
- local_advertising |= ADVERTISE_PAUSE_ASYM;
+ local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising);
if (phydev->pause)
remote_advertising = LPA_PAUSE_CAP;
@@ -5444,26 +6434,31 @@ static int hclge_init_client_instance(struct hnae3_client *client,
vport->nic.client = client;
ret = client->ops->init_instance(&vport->nic);
if (ret)
- return ret;
+ goto clear_nic;
ret = hclge_init_instance_hw(hdev);
if (ret) {
client->ops->uninit_instance(&vport->nic,
0);
- return ret;
+ goto clear_nic;
}
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
if (hdev->roce_client &&
hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
ret = hclge_init_roce_base_info(vport);
if (ret)
- return ret;
+ goto clear_roce;
ret = rc->ops->init_instance(&vport->roce);
if (ret)
- return ret;
+ goto clear_roce;
+
+ hnae3_set_client_init_flag(hdev->roce_client,
+ ae_dev, 1);
}
break;
@@ -5473,7 +6468,9 @@ static int hclge_init_client_instance(struct hnae3_client *client,
ret = client->ops->init_instance(&vport->nic);
if (ret)
- return ret;
+ goto clear_nic;
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
break;
case HNAE3_CLIENT_ROCE:
@@ -5485,16 +6482,31 @@ static int hclge_init_client_instance(struct hnae3_client *client,
if (hdev->roce_client && hdev->nic_client) {
ret = hclge_init_roce_base_info(vport);
if (ret)
- return ret;
+ goto clear_roce;
ret = client->ops->init_instance(&vport->roce);
if (ret)
- return ret;
+ goto clear_roce;
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
}
+
+ break;
+ default:
+ return -EINVAL;
}
}
return 0;
+
+clear_nic:
+ hdev->nic_client = NULL;
+ vport->nic.client = NULL;
+ return ret;
+clear_roce:
+ hdev->roce_client = NULL;
+ vport->roce.client = NULL;
+ return ret;
}
static void hclge_uninit_client_instance(struct hnae3_client *client,
@@ -5514,7 +6526,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
}
if (client->type == HNAE3_CLIENT_ROCE)
return;
- if (client->ops->uninit_instance) {
+ if (hdev->nic_client && client->ops->uninit_instance) {
hclge_uninit_instance_hw(hdev);
client->ops->uninit_instance(&vport->nic, 0);
hdev->nic_client = NULL;
@@ -5697,6 +6709,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
}
+ ret = hclge_init_umv_space(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
+ goto err_msi_irq_uninit;
+ }
+
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -5734,6 +6752,20 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_init_fd_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fd table init fail, ret=%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_hw_error_set_state(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "hw error interrupts enable failed, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
@@ -5810,6 +6842,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ hclge_reset_umv_space(hdev);
+
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -5840,6 +6874,19 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_init_fd_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fd table init fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Re-enable the TM hw error interrupts because
+ * they get disabled on core/global reset.
+ */
+ if (hclge_enable_tm_hw_error(hdev, true))
+ dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
+
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -5856,10 +6903,13 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
+ hclge_uninit_umv_space(hdev);
+
/* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
+ hclge_hw_error_set_state(hdev, false);
hclge_destroy_cmd_queue(&hdev->hw);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
@@ -5887,18 +6937,12 @@ static void hclge_get_channels(struct hnae3_handle *handle,
}
static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
- u16 *free_tqps, u16 *max_rss_size)
+ u16 *alloc_tqps, u16 *max_rss_size)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- u16 temp_tqps = 0;
- int i;
- for (i = 0; i < hdev->num_tqps; i++) {
- if (!hdev->htqp[i].alloced)
- temp_tqps++;
- }
- *free_tqps = temp_tqps;
+ *alloc_tqps = vport->alloc_tqps;
*max_rss_size = hdev->rss_size_max;
}
@@ -6228,27 +7272,6 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
}
}
-static void hclge_get_port_type(struct hnae3_handle *handle,
- u8 *port_type)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u8 media_type = hdev->hw.mac.media_type;
-
- switch (media_type) {
- case HNAE3_MEDIA_TYPE_FIBER:
- *port_type = PORT_FIBRE;
- break;
- case HNAE3_MEDIA_TYPE_COPPER:
- *port_type = PORT_TP;
- break;
- case HNAE3_MEDIA_TYPE_UNKNOWN:
- default:
- *port_type = PORT_OTHER;
- break;
- }
-}
-
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -6276,11 +7299,11 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_tc_size = hclge_get_tc_size,
.get_mac_addr = hclge_get_mac_addr,
.set_mac_addr = hclge_set_mac_addr,
+ .do_ioctl = hclge_do_ioctl,
.add_uc_addr = hclge_add_uc_addr,
.rm_uc_addr = hclge_rm_uc_addr,
.add_mc_addr = hclge_add_mc_addr,
.rm_mc_addr = hclge_rm_mc_addr,
- .update_mta_status = hclge_update_mta_status,
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
.get_pauseparam = hclge_get_pauseparam,
@@ -6301,12 +7324,19 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
.set_channels = hclge_set_channels,
.get_channels = hclge_get_channels,
- .get_flowctrl_adv = hclge_get_flowctrl_adv,
.get_regs_len = hclge_get_regs_len,
.get_regs = hclge_get_regs,
.set_led_id = hclge_set_led_id,
.get_link_mode = hclge_get_link_mode,
- .get_port_type = hclge_get_port_type,
+ .add_fd_entry = hclge_add_fd_entry,
+ .del_fd_entry = hclge_del_fd_entry,
+ .del_all_fd_entries = hclge_del_all_fd_entries,
+ .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
+ .get_fd_rule_info = hclge_get_fd_rule_info,
+ .get_fd_all_rules = hclge_get_all_rules,
+ .restore_fd_rules = hclge_restore_fd_entries,
+ .enable_fd = hclge_enable_fd,
+ .process_hw_error = hclge_process_ras_hw_error,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 1528fb3fa6be..e3dfd654eca9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -14,6 +14,8 @@
#define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge"
+#define HCLGE_MAX_PF_NUM 8
+
#define HCLGE_INVALID_VPORT 0xffff
#define HCLGE_PF_CFG_BLOCK_SIZE 32
@@ -53,7 +55,9 @@
#define HCLGE_RSS_TC_SIZE_6 64
#define HCLGE_RSS_TC_SIZE_7 128
-#define HCLGE_MTA_TBL_SIZE 4096
+#define HCLGE_UMV_TBL_SIZE 3072
+#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
+ (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
#define HCLGE_TQP_RESET_TRY_TIMES 10
@@ -79,6 +83,19 @@
#define HCLGE_VF_NUM_PER_CMD 64
#define HCLGE_VF_NUM_PER_BYTE 8
+enum HLCGE_PORT_TYPE {
+ HOST_PORT,
+ NETWORK_PORT
+};
+
+#define HCLGE_PF_ID_S 0
+#define HCLGE_PF_ID_M GENMASK(2, 0)
+#define HCLGE_VF_ID_S 3
+#define HCLGE_VF_ID_M GENMASK(10, 3)
+#define HCLGE_PORT_TYPE_B 11
+#define HCLGE_NETWORK_PORT_ID_S 0
+#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
+
/* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_MISC_VECTOR_INT_STS 0x20800
@@ -149,13 +166,6 @@ enum HCLGE_MAC_DUPLEX {
HCLGE_MAC_FULL
};
-enum hclge_mta_dmac_sel_type {
- HCLGE_MAC_ADDR_47_36,
- HCLGE_MAC_ADDR_46_35,
- HCLGE_MAC_ADDR_45_34,
- HCLGE_MAC_ADDR_44_33,
-};
-
struct hclge_mac {
u8 phy_addr;
u8 flag;
@@ -238,6 +248,7 @@ struct hclge_cfg {
u8 default_speed;
u32 numa_node_map;
u8 speed_ability;
+ u16 umv_space;
};
struct hclge_tm_info {
@@ -256,109 +267,6 @@ struct hclge_comm_stats_str {
unsigned long offset;
};
-/* all 64bit stats, opcode id: 0x0030 */
-struct hclge_64_bit_stats {
- /* query_igu_stat */
- u64 igu_rx_oversize_pkt;
- u64 igu_rx_undersize_pkt;
- u64 igu_rx_out_all_pkt;
- u64 igu_rx_uni_pkt;
- u64 igu_rx_multi_pkt;
- u64 igu_rx_broad_pkt;
- u64 rsv0;
-
- /* query_egu_stat */
- u64 egu_tx_out_all_pkt;
- u64 egu_tx_uni_pkt;
- u64 egu_tx_multi_pkt;
- u64 egu_tx_broad_pkt;
-
- /* ssu_ppp packet stats */
- u64 ssu_ppp_mac_key_num;
- u64 ssu_ppp_host_key_num;
- u64 ppp_ssu_mac_rlt_num;
- u64 ppp_ssu_host_rlt_num;
-
- /* ssu_tx_in_out_dfx_stats */
- u64 ssu_tx_in_num;
- u64 ssu_tx_out_num;
- /* ssu_rx_in_out_dfx_stats */
- u64 ssu_rx_in_num;
- u64 ssu_rx_out_num;
-};
-
-/* all 32bit stats, opcode id: 0x0031 */
-struct hclge_32_bit_stats {
- u64 igu_rx_err_pkt;
- u64 igu_rx_no_eof_pkt;
- u64 igu_rx_no_sof_pkt;
- u64 egu_tx_1588_pkt;
- u64 egu_tx_err_pkt;
- u64 ssu_full_drop_num;
- u64 ssu_part_drop_num;
- u64 ppp_key_drop_num;
- u64 ppp_rlt_drop_num;
- u64 ssu_key_drop_num;
- u64 pkt_curr_buf_cnt;
- u64 qcn_fb_rcv_cnt;
- u64 qcn_fb_drop_cnt;
- u64 qcn_fb_invaild_cnt;
- u64 rsv0;
- u64 rx_packet_tc0_in_cnt;
- u64 rx_packet_tc1_in_cnt;
- u64 rx_packet_tc2_in_cnt;
- u64 rx_packet_tc3_in_cnt;
- u64 rx_packet_tc4_in_cnt;
- u64 rx_packet_tc5_in_cnt;
- u64 rx_packet_tc6_in_cnt;
- u64 rx_packet_tc7_in_cnt;
- u64 rx_packet_tc0_out_cnt;
- u64 rx_packet_tc1_out_cnt;
- u64 rx_packet_tc2_out_cnt;
- u64 rx_packet_tc3_out_cnt;
- u64 rx_packet_tc4_out_cnt;
- u64 rx_packet_tc5_out_cnt;
- u64 rx_packet_tc6_out_cnt;
- u64 rx_packet_tc7_out_cnt;
-
- /* Tx packet level statistics */
- u64 tx_packet_tc0_in_cnt;
- u64 tx_packet_tc1_in_cnt;
- u64 tx_packet_tc2_in_cnt;
- u64 tx_packet_tc3_in_cnt;
- u64 tx_packet_tc4_in_cnt;
- u64 tx_packet_tc5_in_cnt;
- u64 tx_packet_tc6_in_cnt;
- u64 tx_packet_tc7_in_cnt;
- u64 tx_packet_tc0_out_cnt;
- u64 tx_packet_tc1_out_cnt;
- u64 tx_packet_tc2_out_cnt;
- u64 tx_packet_tc3_out_cnt;
- u64 tx_packet_tc4_out_cnt;
- u64 tx_packet_tc5_out_cnt;
- u64 tx_packet_tc6_out_cnt;
- u64 tx_packet_tc7_out_cnt;
-
- /* packet buffer statistics */
- u64 pkt_curr_buf_tc0_cnt;
- u64 pkt_curr_buf_tc1_cnt;
- u64 pkt_curr_buf_tc2_cnt;
- u64 pkt_curr_buf_tc3_cnt;
- u64 pkt_curr_buf_tc4_cnt;
- u64 pkt_curr_buf_tc5_cnt;
- u64 pkt_curr_buf_tc6_cnt;
- u64 pkt_curr_buf_tc7_cnt;
-
- u64 mb_uncopy_num;
- u64 lo_pri_unicast_rlt_drop_num;
- u64 hi_pri_multicast_rlt_drop_num;
- u64 lo_pri_multicast_rlt_drop_num;
- u64 rx_oq_drop_pkt_cnt;
- u64 tx_oq_drop_pkt_cnt;
- u64 nic_l2_err_drop_pkt_cnt;
- u64 roc_l2_err_drop_pkt_cnt;
-};
-
/* mac stats ,opcode id: 0x0032 */
struct hclge_mac_stats {
u64 mac_tx_mac_pause_num;
@@ -450,8 +358,6 @@ struct hclge_mac_stats {
#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
struct hclge_hw_stats {
struct hclge_mac_stats mac_stats;
- struct hclge_64_bit_stats all_64_bit_stats;
- struct hclge_32_bit_stats all_32_bit_stats;
u32 stats_timer;
};
@@ -464,6 +370,221 @@ struct hclge_vlan_type_cfg {
u16 tx_in_vlan_type;
};
+enum HCLGE_FD_MODE {
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
+ HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
+ HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
+};
+
+enum HCLGE_FD_KEY_TYPE {
+ HCLGE_FD_KEY_BASE_ON_PTYPE,
+ HCLGE_FD_KEY_BASE_ON_TUPLE,
+};
+
+enum HCLGE_FD_STAGE {
+ HCLGE_FD_STAGE_1,
+ HCLGE_FD_STAGE_2,
+};
+
+/* OUTER_XXX indicates tuples in tunnel header of tunnel packet
+ * INNER_XXX indicate tuples in tunneled header of tunnel packet or
+ * tuples of non-tunnel packet
+ */
+enum HCLGE_FD_TUPLE {
+ OUTER_DST_MAC,
+ OUTER_SRC_MAC,
+ OUTER_VLAN_TAG_FST,
+ OUTER_VLAN_TAG_SEC,
+ OUTER_ETH_TYPE,
+ OUTER_L2_RSV,
+ OUTER_IP_TOS,
+ OUTER_IP_PROTO,
+ OUTER_SRC_IP,
+ OUTER_DST_IP,
+ OUTER_L3_RSV,
+ OUTER_SRC_PORT,
+ OUTER_DST_PORT,
+ OUTER_L4_RSV,
+ OUTER_TUN_VNI,
+ OUTER_TUN_FLOW_ID,
+ INNER_DST_MAC,
+ INNER_SRC_MAC,
+ INNER_VLAN_TAG_FST,
+ INNER_VLAN_TAG_SEC,
+ INNER_ETH_TYPE,
+ INNER_L2_RSV,
+ INNER_IP_TOS,
+ INNER_IP_PROTO,
+ INNER_SRC_IP,
+ INNER_DST_IP,
+ INNER_L3_RSV,
+ INNER_SRC_PORT,
+ INNER_DST_PORT,
+ INNER_L4_RSV,
+ MAX_TUPLE,
+};
+
+enum HCLGE_FD_META_DATA {
+ PACKET_TYPE_ID,
+ IP_FRAGEMENT,
+ ROCE_TYPE,
+ NEXT_KEY,
+ VLAN_NUMBER,
+ SRC_VPORT,
+ DST_VPORT,
+ TUNNEL_PACKET,
+ MAX_META_DATA,
+};
+
+struct key_info {
+ u8 key_type;
+ u8 key_length;
+};
+
+static const struct key_info meta_data_key_info[] = {
+ { PACKET_TYPE_ID, 6},
+ { IP_FRAGEMENT, 1},
+ { ROCE_TYPE, 1},
+ { NEXT_KEY, 5},
+ { VLAN_NUMBER, 2},
+ { SRC_VPORT, 12},
+ { DST_VPORT, 12},
+ { TUNNEL_PACKET, 1},
+};
+
+static const struct key_info tuple_key_info[] = {
+ { OUTER_DST_MAC, 48},
+ { OUTER_SRC_MAC, 48},
+ { OUTER_VLAN_TAG_FST, 16},
+ { OUTER_VLAN_TAG_SEC, 16},
+ { OUTER_ETH_TYPE, 16},
+ { OUTER_L2_RSV, 16},
+ { OUTER_IP_TOS, 8},
+ { OUTER_IP_PROTO, 8},
+ { OUTER_SRC_IP, 32},
+ { OUTER_DST_IP, 32},
+ { OUTER_L3_RSV, 16},
+ { OUTER_SRC_PORT, 16},
+ { OUTER_DST_PORT, 16},
+ { OUTER_L4_RSV, 32},
+ { OUTER_TUN_VNI, 24},
+ { OUTER_TUN_FLOW_ID, 8},
+ { INNER_DST_MAC, 48},
+ { INNER_SRC_MAC, 48},
+ { INNER_VLAN_TAG_FST, 16},
+ { INNER_VLAN_TAG_SEC, 16},
+ { INNER_ETH_TYPE, 16},
+ { INNER_L2_RSV, 16},
+ { INNER_IP_TOS, 8},
+ { INNER_IP_PROTO, 8},
+ { INNER_SRC_IP, 32},
+ { INNER_DST_IP, 32},
+ { INNER_L3_RSV, 16},
+ { INNER_SRC_PORT, 16},
+ { INNER_DST_PORT, 16},
+ { INNER_L4_RSV, 32},
+};
+
+#define MAX_KEY_LENGTH 400
+#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
+#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
+#define MAX_META_DATA_LENGTH 32
+
+enum HCLGE_FD_PACKET_TYPE {
+ NIC_PACKET,
+ ROCE_PACKET,
+};
+
+enum HCLGE_FD_ACTION {
+ HCLGE_FD_ACTION_ACCEPT_PACKET,
+ HCLGE_FD_ACTION_DROP_PACKET,
+};
+
+struct hclge_fd_key_cfg {
+ u8 key_sel;
+ u8 inner_sipv6_word_en;
+ u8 inner_dipv6_word_en;
+ u8 outer_sipv6_word_en;
+ u8 outer_dipv6_word_en;
+ u32 tuple_active;
+ u32 meta_data_active;
+};
+
+struct hclge_fd_cfg {
+ u8 fd_mode;
+ u8 fd_en;
+ u16 max_key_length;
+ u32 proto_support;
+ u32 rule_num[2]; /* rule entry number */
+ u16 cnt_num[2]; /* rule hit counter number */
+ struct hclge_fd_key_cfg key_cfg[2];
+};
+
+struct hclge_fd_rule_tuples {
+ u8 src_mac[6];
+ u8 dst_mac[6];
+ u32 src_ip[4];
+ u32 dst_ip[4];
+ u16 src_port;
+ u16 dst_port;
+ u16 vlan_tag1;
+ u16 ether_proto;
+ u8 ip_tos;
+ u8 ip_proto;
+};
+
+struct hclge_fd_rule {
+ struct hlist_node rule_node;
+ struct hclge_fd_rule_tuples tuples;
+ struct hclge_fd_rule_tuples tuples_mask;
+ u32 unused_tuple;
+ u32 flow_type;
+ u8 action;
+ u16 vf_id;
+ u16 queue_id;
+ u16 location;
+};
+
+struct hclge_fd_ad_data {
+ u16 ad_id;
+ u8 drop_packet;
+ u8 forward_to_direct_queue;
+ u16 queue_id;
+ u8 use_counter;
+ u8 counter_id;
+ u8 use_next_stage;
+ u8 write_rule_id_to_bd;
+ u8 next_input_key;
+ u16 rule_id;
+};
+
+/* For each bit of TCAM entry, it uses a pair of 'x' and
+ * 'y' to indicate which value to match, like below:
+ * ----------------------------------
+ * | bit x | bit y | search value |
+ * ----------------------------------
+ * | 0 | 0 | always hit |
+ * ----------------------------------
+ * | 1 | 0 | match '0' |
+ * ----------------------------------
+ * | 0 | 1 | match '1' |
+ * ----------------------------------
+ * | 1 | 1 | invalid |
+ * ----------------------------------
+ * Then for input key(k) and mask(v), we can calculate the value by
+ * the formulae:
+ * x = (~k) & v
+ * y = (k ^ ~v) & k
+ */
+#define calc_x(x, k, v) ((x) = (~(k) & (v)))
+#define calc_y(y, k, v) \
+ do { \
+ const typeof(k) _k_ = (k); \
+ const typeof(v) _v_ = (v); \
+ (y) = (_k_ ^ ~_v_) & (_k_); \
+ } while (0)
+
#define HCLGE_VPORT_NUM 256
struct hclge_dev {
struct pci_dev *pdev;
@@ -547,12 +668,22 @@ struct hclge_dev {
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
u32 mps; /* Max packet size */
- enum hclge_mta_dmac_sel_type mta_mac_sel_type;
- bool enable_mta; /* Multicast filter enable */
-
struct hclge_vlan_type_cfg vlan_type_cfg;
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+
+ struct hclge_fd_cfg fd_cfg;
+ struct hlist_head fd_rule_list;
+ u16 hclge_fd_rule_num;
+
+ u16 wanted_umv_size;
+ /* max available unicast mac vlan space */
+ u16 max_umv_size;
+ /* private unicast mac vlan space, it's same for PF and its VFs */
+ u16 priv_umv_size;
+ /* unicast mac vlan space shared by PF and its VFs */
+ u16 share_umv_size;
+ struct mutex umv_mutex; /* protect share_umv_size */
};
/* VPort level vlan tag configuration for TX direction */
@@ -605,13 +736,12 @@ struct hclge_vport {
struct hclge_tx_vtag_cfg txvlan_cfg;
struct hclge_rx_vtag_cfg rxvlan_cfg;
+ u16 used_umv_num;
+
int vport_id;
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
-
- bool accept_mta_mc; /* whether to accept mta filter multicast */
- unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
};
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -626,15 +756,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr);
-int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
- u8 func_id,
- bool enable);
-int hclge_update_mta_status_common(struct hclge_vport *vport,
- unsigned long *status,
- u16 idx,
- u16 count,
- bool update_filter);
-
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
int vector_id, bool en,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f34851c91eb3..04462a347a94 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -233,43 +233,6 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
return 0;
}
-static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport,
- u8 *msg, u8 idx, bool is_end)
-{
-#define HCLGE_MTA_STATUS_MSG_SIZE 13
-#define HCLGE_MTA_STATUS_MSG_BITS \
- (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
-#define HCLGE_MTA_STATUS_MSG_END_BITS \
- (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS)
- unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)];
- u16 tbl_cnt;
- u16 tbl_idx;
- u8 msg_ofs;
- u8 msg_bit;
-
- tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS :
- HCLGE_MTA_STATUS_MSG_BITS;
-
- /* set msg field */
- msg_ofs = 0;
- msg_bit = 0;
- memset(status, 0, sizeof(status));
- for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) {
- if (msg[msg_ofs] & BIT(msg_bit))
- set_bit(tbl_idx, status);
-
- msg_bit++;
- if (msg_bit == BITS_PER_BYTE) {
- msg_bit = 0;
- msg_ofs++;
- }
- }
-
- return hclge_update_mta_status_common(vport,
- status, idx * HCLGE_MTA_STATUS_MSG_BITS,
- tbl_cnt, is_end);
-}
-
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
@@ -284,27 +247,6 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
status = hclge_add_mc_addr_common(vport, mac_addr);
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
status = hclge_rm_mc_addr_common(vport, mac_addr);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) {
- u8 func_id = vport->vport_id;
- bool enable = mbx_req->msg[2];
-
- status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) {
- resp_data = hdev->mta_mac_sel_type;
- resp_len = sizeof(u8);
- gen_resp = true;
- status = 0;
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) {
- /* mta status update msg format
- * msg[2.6 : 2.0] msg index
- * msg[2.7] msg is end
- * msg[15 : 3] mta status bits[103 : 0]
- */
- bool is_end = (mbx_req->msg[2] & 0x80) ? true : false;
-
- status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3],
- mbx_req->msg[2] & 0x7F,
- is_end);
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 398971a062f4..24b1f2a0c32a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -10,8 +10,6 @@
#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \
SUPPORTED_TP | \
- SUPPORTED_Pause | \
- SUPPORTED_Asym_Pause | \
PHY_10BT_FEATURES | \
PHY_100BT_FEATURES | \
PHY_1000BT_FEATURES)
@@ -213,7 +211,7 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
}
phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES;
- phydev->advertising = phydev->supported;
+ phy_support_asym_pause(phydev);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 5db70a1451c5..aa5cb9834d73 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -172,7 +172,7 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
u8 pfc_bitmap)
{
struct hclge_desc desc;
- struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data;
+ struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
@@ -188,11 +188,12 @@ static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
- pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
ether_addr_copy(pause_param->mac_addr, addr);
+ ether_addr_copy(pause_param->mac_addr_extra, addr);
pause_param->pause_trans_gap = pause_trans_gap;
pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
@@ -207,7 +208,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
u8 trans_gap;
int ret;
- pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
@@ -297,7 +298,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
}
static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
- u8 q_id, u16 qs_id)
+ u16 q_id, u16 qs_id)
{
struct hclge_nq_to_qs_link_cmd *map;
struct hclge_desc desc;
@@ -1279,10 +1280,15 @@ int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
return 0;
}
-void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
+int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
{
u8 i, bit_map = 0;
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ if (num_tc > hdev->vport[i].alloc_tqps)
+ return -EINVAL;
+ }
+
hdev->tm_info.num_tc = num_tc;
for (i = 0; i < hdev->tm_info.num_tc; i++)
@@ -1296,6 +1302,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hdev->hw_tc_map = bit_map;
hclge_tm_schd_info_init(hdev);
+
+ return 0;
}
int hclge_tm_init_hw(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index dd4c194747c1..25eef13a3e14 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -106,6 +106,10 @@ struct hclge_cfg_pause_param_cmd {
u8 pause_trans_gap;
u8 rsvd;
__le16 pause_trans_time;
+ u8 rsvd1[6];
+ /* extra mac address to do double check for pause frame */
+ u8 mac_addr_extra[ETH_ALEN];
+ u16 rsvd2;
};
struct hclge_pfc_stats_cmd {
@@ -128,7 +132,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev);
int hclge_pause_setup_hw(struct hclge_dev *hdev);
int hclge_tm_schd_mode_hw(struct hclge_dev *hdev);
int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
-void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_map_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index fb471fe2c494..0d3b445f6799 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -132,9 +132,9 @@ static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
- break;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ return 0;
case HCLGEVF_TYPE_CRQ:
reg_val = (u32)ring->desc_dma_addr;
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
@@ -145,12 +145,12 @@ static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
- break;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+ return 0;
+ default:
+ return -EINVAL;
}
-
- return 0;
}
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 19b32860309c..bc294b0c8b62 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -89,6 +89,7 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
/* Mailbox cmd */
@@ -148,7 +149,8 @@ struct hclgevf_query_res_cmd {
__le16 rsv[7];
};
-#define HCLGEVF_RSS_HASH_KEY_OFFSET 4
+#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
+#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
struct hclgevf_rss_config_cmd {
u8 hash_config;
@@ -159,11 +161,11 @@ struct hclgevf_rss_config_cmd {
struct hclgevf_rss_input_tuple_cmd {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
- u8 ipv4_stcp_en;
+ u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
- u8 ipv6_stcp_en;
+ u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
u8 rsv[16];
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 9c0091f2addf..e0a86a58342c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -31,16 +31,15 @@ static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hnae3_queue *queue;
struct hclgevf_desc desc;
struct hclgevf_tqp *tqp;
int status;
int i;
- for (i = 0; i < hdev->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclgevf_tqp, q);
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
hclgevf_cmd_setup_basic_desc(&desc,
HCLGEVF_OPC_QUERY_RX_STATUS,
true);
@@ -77,17 +76,16 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_tqp *tqp;
u64 *buff = data;
int i;
- for (i = 0; i < hdev->num_tqps; i++) {
- tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
}
for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+ tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
}
@@ -96,29 +94,29 @@ static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- return hdev->num_tqps * 2;
+ return kinfo->num_tqps * 2;
}
static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u8 *buff = data;
int i = 0;
- for (i = 0; i < hdev->num_tqps; i++) {
- struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
- struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
+ struct hclgevf_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
tqp->index);
buff += ETH_GSTRING_LEN;
}
- for (i = 0; i < hdev->num_tqps; i++) {
- struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
- struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
+ struct hclgevf_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
tqp->index);
buff += ETH_GSTRING_LEN;
}
@@ -182,7 +180,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
return 0;
}
-static int hclge_get_queue_info(struct hclgevf_dev *hdev)
+static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_RSS_INFO_LEN 8
u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
@@ -299,6 +297,9 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
client = handle->client;
+ link_state =
+ test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
+
if (link_state != hdev->hw.mac.link) {
client->ops->link_status_change(handle, !!link_state);
hdev->hw.mac.link = link_state;
@@ -385,6 +386,47 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
return -EINVAL;
}
+static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
+ const u8 hfunc, const u8 *key)
+{
+ struct hclgevf_rss_config_cmd *req;
+ struct hclgevf_desc desc;
+ int key_offset;
+ int key_size;
+ int ret;
+
+ req = (struct hclgevf_rss_config_cmd *)desc.data;
+
+ for (key_offset = 0; key_offset < 3; key_offset++) {
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG,
+ false);
+
+ req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
+ req->hash_config |=
+ (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
+
+ if (key_offset == 2)
+ key_size =
+ HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
+ else
+ key_size = HCLGEVF_RSS_HASH_KEY_NUM;
+
+ memcpy(req->hash_key,
+ key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Configure RSS config fail, status = %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
{
return HCLGEVF_RSS_KEY_SIZE;
@@ -465,68 +507,40 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
return status;
}
-static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
- u8 *key)
+static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_config_cmd *req;
- int lkup_times = key ? 3 : 1;
- struct hclgevf_desc desc;
- int key_offset;
- int key_size;
- int status;
-
- req = (struct hclgevf_rss_config_cmd *)desc.data;
- lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
-
- for (key_offset = 0; key_offset < lkup_times; key_offset++) {
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_RSS_GENERIC_CONFIG,
- true);
- req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i;
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "failed to get hardware RSS cfg, status = %d\n",
- status);
- return status;
+ if (handle->pdev->revision >= 0x21) {
+ /* Get hash algorithm */
+ if (hfunc) {
+ switch (rss_cfg->hash_algo) {
+ case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
}
- if (key_offset == 2)
- key_size =
- HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
- else
- key_size = HCLGEVF_RSS_HASH_KEY_NUM;
-
+ /* Get the RSS Key required by the user */
if (key)
- memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
- req->hash_key,
- key_size);
- }
-
- if (hash) {
- if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
- *hash = ETH_RSS_HASH_TOP;
- else
- *hash = ETH_RSS_HASH_UNKNOWN;
+ memcpy(key, rss_cfg->rss_hash_key,
+ HCLGEVF_RSS_KEY_SIZE);
}
- return 0;
-}
-
-static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- int i;
-
if (indir)
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
indir[i] = rss_cfg->rss_indirection_tbl[i];
- return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
+ return 0;
}
static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
@@ -534,7 +548,36 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- int i;
+ int ret, i;
+
+ if (handle->pdev->revision >= 0x21) {
+ /* Set the RSS Hash Key if specififed by the user */
+ if (key) {
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ rss_cfg->hash_algo =
+ HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+ break;
+ case ETH_RSS_HASH_XOR:
+ rss_cfg->hash_algo =
+ HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+ break;
+ case ETH_RSS_HASH_NO_CHANGE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
+ key);
+ if (ret)
+ return ret;
+
+ /* Update the shadow RSS key with user specified qids */
+ memcpy(rss_cfg->rss_hash_key, key,
+ HCLGEVF_RSS_KEY_SIZE);
+ }
+ }
/* update the shadow RSS table with user specified qids */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
@@ -544,6 +587,193 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
return hclgevf_set_rss_indir_table(hdev);
}
+static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+{
+ u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
+
+ if (nfc->data & RXH_L4_B_2_3)
+ hash_sets |= HCLGEVF_D_PORT_BIT;
+ else
+ hash_sets &= ~HCLGEVF_D_PORT_BIT;
+
+ if (nfc->data & RXH_IP_SRC)
+ hash_sets |= HCLGEVF_S_IP_BIT;
+ else
+ hash_sets &= ~HCLGEVF_S_IP_BIT;
+
+ if (nfc->data & RXH_IP_DST)
+ hash_sets |= HCLGEVF_D_IP_BIT;
+ else
+ hash_sets &= ~HCLGEVF_D_IP_BIT;
+
+ if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
+ hash_sets |= HCLGEVF_V_TAG_BIT;
+
+ return hash_sets;
+}
+
+static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclgevf_rss_input_tuple_cmd *req;
+ struct hclgevf_desc desc;
+ u8 tuple_sets;
+ int ret;
+
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ tuple_sets = hclgevf_get_rss_hash_bits(nfc);
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ req->ipv4_tcp_en = tuple_sets;
+ break;
+ case TCP_V6_FLOW:
+ req->ipv6_tcp_en = tuple_sets;
+ break;
+ case UDP_V4_FLOW:
+ req->ipv4_udp_en = tuple_sets;
+ break;
+ case UDP_V6_FLOW:
+ req->ipv6_udp_en = tuple_sets;
+ break;
+ case SCTP_V4_FLOW:
+ req->ipv4_sctp_en = tuple_sets;
+ break;
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req->ipv6_sctp_en = tuple_sets;
+ break;
+ case IPV4_FLOW:
+ req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ break;
+ case IPV6_FLOW:
+ req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set rss tuple fail, status = %d\n", ret);
+ return ret;
+ }
+
+ rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
+ rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
+ rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
+ rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ return 0;
+}
+
+static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u8 tuple_sets;
+
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ nfc->data = 0;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ break;
+ case UDP_V4_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ break;
+ case TCP_V6_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ break;
+ case UDP_V6_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ break;
+ case SCTP_V4_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ break;
+ case SCTP_V6_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!tuple_sets)
+ return 0;
+
+ if (tuple_sets & HCLGEVF_D_PORT_BIT)
+ nfc->data |= RXH_L4_B_2_3;
+ if (tuple_sets & HCLGEVF_S_PORT_BIT)
+ nfc->data |= RXH_L4_B_0_1;
+ if (tuple_sets & HCLGEVF_D_IP_BIT)
+ nfc->data |= RXH_IP_DST;
+ if (tuple_sets & HCLGEVF_S_IP_BIT)
+ nfc->data |= RXH_IP_SRC;
+
+ return 0;
+}
+
+static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
+ struct hclgevf_rss_cfg *rss_cfg)
+{
+ struct hclgevf_rss_input_tuple_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
+
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Configure rss input fail, status = %d\n", ret);
+ return ret;
+}
+
static int hclgevf_get_tc_size(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -735,138 +965,16 @@ static int hclgevf_get_queue_id(struct hnae3_queue *queue)
static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hnae3_queue *queue;
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_tqp *tqp;
int i;
- for (i = 0; i < hdev->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclgevf_tqp, q);
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
}
}
-static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
-{
- u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
- int ret;
-
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
- NULL, 0, true, &resp_msg, sizeof(u8));
-
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Read mta type fail, ret=%d.\n", ret);
- return ret;
- }
-
- if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
- dev_err(&hdev->pdev->dev,
- "Read mta type invalid, resp=%d.\n", resp_msg);
- return -EINVAL;
- }
-
- hdev->mta_mac_sel_type = resp_msg;
-
- return 0;
-}
-
-static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
- const u8 *addr)
-{
- u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
- u16 high_val = addr[1] | (addr[0] << 8);
-
- return (high_val >> rsh) & 0xfff;
-}
-
-static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
- unsigned long *status)
-{
-#define HCLGEVF_MTA_STATUS_MSG_SIZE 13
-#define HCLGEVF_MTA_STATUS_MSG_BITS \
- (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
-#define HCLGEVF_MTA_STATUS_MSG_END_BITS \
- (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
- u16 tbl_cnt;
- u16 tbl_idx;
- u8 msg_cnt;
- u8 msg_idx;
- int ret;
-
- msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
- HCLGEVF_MTA_STATUS_MSG_BITS);
- tbl_idx = 0;
- msg_idx = 0;
- while (msg_cnt--) {
- u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
- u8 *p = &msg[1];
- u8 msg_ofs;
- u8 msg_bit;
-
- memset(msg, 0, sizeof(msg));
-
- /* set index field */
- msg[0] = 0x7F & msg_idx;
-
- /* set end flag field */
- if (msg_cnt == 0) {
- msg[0] |= 0x80;
- tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
- } else {
- tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
- }
-
- /* set status field */
- msg_ofs = 0;
- msg_bit = 0;
- while (tbl_cnt--) {
- if (test_bit(tbl_idx, status))
- p[msg_ofs] |= BIT(msg_bit);
-
- tbl_idx++;
-
- msg_bit++;
- if (msg_bit == BITS_PER_BYTE) {
- msg_bit = 0;
- msg_ofs++;
- }
- }
-
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
- msg, sizeof(msg), false, NULL, 0);
- if (ret)
- break;
-
- msg_idx++;
- }
-
- return ret;
-}
-
-static int hclgevf_update_mta_status(struct hnae3_handle *handle)
-{
- unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct net_device *netdev = hdev->nic.kinfo.netdev;
- struct netdev_hw_addr *ha;
- u16 tbl_idx;
-
- /* clear status */
- memset(mta_status, 0, sizeof(mta_status));
-
- /* update status from mc addr list */
- netdev_for_each_mc_addr(ha, netdev) {
- tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
- set_bit(tbl_idx, mta_status);
- }
-
- return hclgevf_do_update_mta_status(hdev, mta_status);
-}
-
static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -1106,7 +1214,8 @@ static int hclgevf_do_reset(struct hclgevf_dev *hdev)
return status;
}
-static void hclgevf_reset_event(struct hnae3_handle *handle)
+static void hclgevf_reset_event(struct pci_dev *pdev,
+ struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -1341,8 +1450,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
{
int ret;
+ hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE;
+
/* get queue configuration from PF */
- ret = hclge_get_queue_info(hdev);
+ ret = hclgevf_get_queue_info(hdev);
if (ret)
return ret;
/* get tc configuration from PF */
@@ -1395,6 +1506,39 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
rss_cfg->rss_size = hdev->rss_size_max;
+ if (hdev->pdev->revision >= 0x21) {
+ rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+ netdev_rss_key_fill(rss_cfg->rss_hash_key,
+ HCLGEVF_RSS_KEY_SIZE);
+
+ ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
+
+ rss_cfg->rss_tuple_sets.ipv4_tcp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv4_udp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv4_sctp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ rss_cfg->rss_tuple_sets.ipv4_fragment_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv6_tcp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv6_udp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ rss_cfg->rss_tuple_sets.ipv6_sctp_en =
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ rss_cfg->rss_tuple_sets.ipv6_fragment_en =
+ HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+
+ ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
+ if (ret)
+ return ret;
+
+ }
+
/* Initialize RSS indirect table for each vport */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
@@ -1417,12 +1561,13 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
static int hclgevf_ae_start(struct hnae3_handle *handle)
{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int i, queue_id;
- for (i = 0; i < handle->kinfo.num_tqps; i++) {
+ for (i = 0; i < kinfo->num_tqps; i++) {
/* ring enable */
- queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+ queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
if (queue_id < 0) {
dev_warn(&hdev->pdev->dev,
"Get invalid queue id, ignore it\n");
@@ -1445,12 +1590,15 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
static void hclgevf_ae_stop(struct hnae3_handle *handle)
{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int i, queue_id;
- for (i = 0; i < hdev->num_tqps; i++) {
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
/* Ring disable */
- queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+ queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
if (queue_id < 0) {
dev_warn(&hdev->pdev->dev,
"Get invalid queue id, ignore it\n");
@@ -1619,17 +1767,22 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
ret = client->ops->init_instance(&hdev->nic);
if (ret)
- return ret;
+ goto clear_nic;
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
ret = hclgevf_init_roce_base_info(hdev);
if (ret)
- return ret;
+ goto clear_roce;
ret = rc->ops->init_instance(&hdev->roce);
if (ret)
- return ret;
+ goto clear_roce;
+
+ hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
+ 1);
}
break;
case HNAE3_CLIENT_UNIC:
@@ -1638,7 +1791,9 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
ret = client->ops->init_instance(&hdev->nic);
if (ret)
- return ret;
+ goto clear_nic;
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
break;
case HNAE3_CLIENT_ROCE:
if (hnae3_dev_roce_supported(hdev)) {
@@ -1649,15 +1804,29 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
if (hdev->roce_client && hdev->nic_client) {
ret = hclgevf_init_roce_base_info(hdev);
if (ret)
- return ret;
+ goto clear_roce;
ret = client->ops->init_instance(&hdev->roce);
if (ret)
- return ret;
+ goto clear_roce;
}
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+ break;
+ default:
+ return -EINVAL;
}
return 0;
+
+clear_nic:
+ hdev->nic_client = NULL;
+ hdev->nic.client = NULL;
+ return ret;
+clear_roce:
+ hdev->roce_client = NULL;
+ hdev->roce.client = NULL;
+ return ret;
}
static void hclgevf_uninit_client_instance(struct hnae3_client *client,
@@ -1666,13 +1835,19 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
struct hclgevf_dev *hdev = ae_dev->priv;
/* un-init roce, if it exists */
- if (hdev->roce_client)
+ if (hdev->roce_client) {
hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
+ hdev->roce_client = NULL;
+ hdev->roce.client = NULL;
+ }
/* un-init nic/unic, if this was not called by roce client */
- if ((client->ops->uninit_instance) &&
- (client->type != HNAE3_CLIENT_ROCE))
+ if (client->ops->uninit_instance && hdev->nic_client &&
+ client->type != HNAE3_CLIENT_ROCE) {
client->ops->uninit_instance(&hdev->nic, 0);
+ hdev->nic_client = NULL;
+ hdev->nic.client = NULL;
+ }
}
static int hclgevf_pci_init(struct hclgevf_dev *hdev)
@@ -1839,14 +2014,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- /* Initialize mta type for this VF */
- ret = hclgevf_cfg_func_mta_type(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize MTA type\n", ret);
- goto err_config;
- }
-
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -1943,11 +2110,11 @@ static void hclgevf_get_channels(struct hnae3_handle *handle,
}
static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
- u16 *free_tqps, u16 *max_rss_size)
+ u16 *alloc_tqps, u16 *max_rss_size)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- *free_tqps = 0;
+ *alloc_tqps = hdev->num_tqps;
*max_rss_size = hdev->rss_size_max;
}
@@ -1979,6 +2146,14 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
hdev->hw.mac.duplex = duplex;
}
+static void hclgevf_get_media_type(struct hnae3_handle *handle,
+ u8 *media_type)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ if (media_type)
+ *media_type = hdev->hw.mac.media_type;
+}
+
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
@@ -1998,7 +2173,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.rm_uc_addr = hclgevf_rm_uc_addr,
.add_mc_addr = hclgevf_add_mc_addr,
.rm_mc_addr = hclgevf_rm_mc_addr,
- .update_mta_status = hclgevf_update_mta_status,
.get_stats = hclgevf_get_stats,
.update_stats = hclgevf_update_stats,
.get_strings = hclgevf_get_strings,
@@ -2007,6 +2181,8 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_rss_indir_size = hclgevf_get_rss_indir_size,
.get_rss = hclgevf_get_rss,
.set_rss = hclgevf_set_rss,
+ .get_rss_tuple = hclgevf_get_rss_tuple,
+ .set_rss_tuple = hclgevf_set_rss_tuple,
.get_tc_size = hclgevf_get_tc_size,
.get_fw_version = hclgevf_get_fw_version,
.set_vlan_filter = hclgevf_set_vlan_filter,
@@ -2016,6 +2192,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
.get_status = hclgevf_get_status,
.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
+ .get_media_type = hclgevf_get_media_type,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index b23ba171473c..aed241e8ffab 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -46,9 +46,13 @@
#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
#define HCLGEVF_RSS_CFG_TBL_NUM \
(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
-
-#define HCLGEVF_MTA_TBL_SIZE 4096
-#define HCLGEVF_MTA_TYPE_SEL_MAX 4
+#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
+#define HCLGEVF_D_PORT_BIT BIT(0)
+#define HCLGEVF_S_PORT_BIT BIT(1)
+#define HCLGEVF_D_IP_BIT BIT(2)
+#define HCLGEVF_S_IP_BIT BIT(3)
+#define HCLGEVF_V_TAG_BIT BIT(4)
/* states of hclgevf device & tasks */
enum hclgevf_states {
@@ -66,6 +70,7 @@ enum hclgevf_states {
#define HCLGEVF_MPF_ENBALE 1
struct hclgevf_mac {
+ u8 media_type;
u8 mac_addr[ETH_ALEN];
int link;
u8 duplex;
@@ -108,12 +113,24 @@ struct hclgevf_cfg {
u32 numa_node_map;
};
+struct hclgevf_rss_tuple_cfg {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+};
+
struct hclgevf_rss_cfg {
u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
u32 hash_algo;
u32 rss_size;
u8 hw_tc_map;
u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+ struct hclgevf_rss_tuple_cfg rss_tuple_sets;
};
struct hclgevf_misc_vector {
@@ -156,8 +173,6 @@ struct hclgevf_dev {
u16 *vector_status;
int *vector_irq;
- bool accept_mta_mc; /* whether to accept mta filter multicast */
- u8 mta_mac_sel_type;
bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index c8c7ad2eff77..9b5a68b65432 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
/* Wait for link to drop */
time = jiffies + (HZ / 10);
do {
- if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+ if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
break;
if (!in_interrupt())
schedule_timeout_interruptible(1);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 0f5563f3b779..097b5502603f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -58,6 +58,8 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_GET_GLOBAL_QPN = 102,
+ HINIC_PORT_CMD_SET_TSO = 112,
+
HINIC_PORT_CMD_GET_CAP = 170,
};
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index cb239627770f..967c993d5303 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -70,8 +70,6 @@
#define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask)
#define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
-#define TX_MAX_MSS_DEFAULT 0x3E00
-
enum sq_wqe_type {
SQ_NORMAL_WQE = 0,
};
@@ -494,33 +492,16 @@ static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx,
HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
HINIC_SQ_CTRL_SET(ctrl_size, LEN);
- ctrl->queue_info = HINIC_SQ_CTRL_SET(TX_MAX_MSS_DEFAULT,
- QUEUE_INFO_MSS);
+ ctrl->queue_info = HINIC_SQ_CTRL_SET(HINIC_MSS_DEFAULT,
+ QUEUE_INFO_MSS) |
+ HINIC_SQ_CTRL_SET(1, QUEUE_INFO_UC);
}
static void sq_prepare_task(struct hinic_sq_task *task)
{
- task->pkt_info0 =
- HINIC_SQ_TASK_INFO0_SET(0, L2HDR_LEN) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_L4_OFF_DISABLE, L4_OFFLOAD) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_OUTER_L3TYPE_UNKNOWN,
- INNER_L3TYPE) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_VLAN_OFF_DISABLE,
- VLAN_OFFLOAD) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_PKT_NOT_PARSED, PARSE_FLAG);
-
- task->pkt_info1 =
- HINIC_SQ_TASK_INFO1_SET(HINIC_MEDIA_UNKNOWN, MEDIA_TYPE) |
- HINIC_SQ_TASK_INFO1_SET(0, INNER_L4_LEN) |
- HINIC_SQ_TASK_INFO1_SET(0, INNER_L3_LEN);
-
- task->pkt_info2 =
- HINIC_SQ_TASK_INFO2_SET(0, TUNNEL_L4_LEN) |
- HINIC_SQ_TASK_INFO2_SET(0, OUTER_L3_LEN) |
- HINIC_SQ_TASK_INFO2_SET(HINIC_TUNNEL_L4TYPE_UNKNOWN,
- TUNNEL_L4TYPE) |
- HINIC_SQ_TASK_INFO2_SET(HINIC_OUTER_L3TYPE_UNKNOWN,
- OUTER_L3TYPE);
+ task->pkt_info0 = 0;
+ task->pkt_info1 = 0;
+ task->pkt_info2 = 0;
task->ufo_v6_identify = 0;
@@ -529,6 +510,86 @@ static void sq_prepare_task(struct hinic_sq_task *task)
task->zero_pad = 0;
}
+void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len)
+{
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(len, L2HDR_LEN);
+}
+
+void hinic_task_set_outter_l3(struct hinic_sq_task *task,
+ enum hinic_l3_offload_type l3_type,
+ u32 network_len)
+{
+ task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) |
+ HINIC_SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN);
+}
+
+void hinic_task_set_inner_l3(struct hinic_sq_task *task,
+ enum hinic_l3_offload_type l3_type,
+ u32 network_len)
+{
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE);
+ task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(network_len, INNER_L3LEN);
+}
+
+void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
+ enum hinic_l4_offload_type l4_type,
+ u32 tunnel_len)
+{
+ task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) |
+ HINIC_SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN);
+}
+
+void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
+ enum hinic_l4_offload_type l4_offload,
+ u32 l4_len, u32 offset)
+{
+ u32 tcp_udp_cs = 0, sctp = 0;
+ u32 mss = HINIC_MSS_DEFAULT;
+
+ if (l4_offload == TCP_OFFLOAD_ENABLE ||
+ l4_offload == UDP_OFFLOAD_ENABLE)
+ tcp_udp_cs = 1;
+ else if (l4_offload == SCTP_OFFLOAD_ENABLE)
+ sctp = 1;
+
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD);
+ task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
+
+ *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) |
+ HINIC_SQ_CTRL_SET(tcp_udp_cs, QUEUE_INFO_TCPUDP_CS) |
+ HINIC_SQ_CTRL_SET(sctp, QUEUE_INFO_SCTP);
+
+ *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
+ *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS);
+}
+
+void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
+ enum hinic_l4_offload_type l4_offload,
+ u32 l4_len, u32 offset, u32 ip_ident, u32 mss)
+{
+ u32 tso = 0, ufo = 0;
+
+ if (l4_offload == TCP_OFFLOAD_ENABLE)
+ tso = 1;
+ else if (l4_offload == UDP_OFFLOAD_ENABLE)
+ ufo = 1;
+
+ task->ufo_v6_identify = ip_ident;
+
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD);
+ task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(tso || ufo, TSO_FLAG);
+ task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN);
+
+ *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) |
+ HINIC_SQ_CTRL_SET(tso, QUEUE_INFO_TSO) |
+ HINIC_SQ_CTRL_SET(ufo, QUEUE_INFO_UFO) |
+ HINIC_SQ_CTRL_SET(!!l4_offload, QUEUE_INFO_TCPUDP_CS);
+
+ /* set MSS value */
+ *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
+ *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS);
+}
+
/**
* hinic_sq_prepare_wqe - prepare wqe before insert to the queue
* @sq: send queue
@@ -613,6 +674,16 @@ struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
}
/**
+ * hinic_sq_return_wqe - return the wqe to the sq
+ * @sq: send queue
+ * @wqe_size: the size of the wqe
+ **/
+void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size)
+{
+ hinic_return_wqe(sq->wq, wqe_size);
+}
+
+/**
* hinic_sq_write_wqe - write the wqe to the sq
* @sq: send queue
* @prod_idx: pi of the wqe
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index 6c84f83ec283..a0dc63a4bfc7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -149,6 +149,31 @@ int hinic_get_sq_free_wqebbs(struct hinic_sq *sq);
int hinic_get_rq_free_wqebbs(struct hinic_rq *rq);
+void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len);
+
+void hinic_task_set_outter_l3(struct hinic_sq_task *task,
+ enum hinic_l3_offload_type l3_type,
+ u32 network_len);
+
+void hinic_task_set_inner_l3(struct hinic_sq_task *task,
+ enum hinic_l3_offload_type l3_type,
+ u32 network_len);
+
+void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
+ enum hinic_l4_offload_type l4_type,
+ u32 tunnel_len);
+
+void hinic_set_cs_inner_l4(struct hinic_sq_task *task,
+ u32 *queue_info,
+ enum hinic_l4_offload_type l4_offload,
+ u32 l4_len, u32 offset);
+
+void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
+ u32 *queue_info,
+ enum hinic_l4_offload_type l4_offload,
+ u32 l4_len,
+ u32 offset, u32 ip_ident, u32 mss);
+
void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
int nr_sges);
@@ -159,6 +184,8 @@ void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
unsigned int wqe_size, u16 *prod_idx);
+void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size);
+
void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
struct hinic_sq_wqe *wqe, struct sk_buff *skb,
unsigned int wqe_size);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 3e3181c089bd..f92f1bf3901a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -775,6 +775,20 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
}
/**
+ * hinic_return_wqe - return the wqe when transmit failed
+ * @wq: wq to return wqe
+ * @wqe_size: wqe size
+ **/
+void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
+{
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+
+ atomic_sub(num_wqebbs, &wq->prod_idx);
+
+ atomic_add(num_wqebbs, &wq->delta);
+}
+
+/**
* hinic_put_wqe - return the wqe place to use for a new wqe
* @wq: wq to return wqe
* @wqe_size: wqe size
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
index 9c030a0f035e..9b66545ba563 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
@@ -104,6 +104,8 @@ void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
u16 *prod_idx);
+void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size);
+
void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size);
struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index bc73485483c5..9754d6ed5f4a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -62,19 +62,33 @@
(((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \
& HINIC_CMDQ_WQE_HEADER_##member##_MASK)
-#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
-#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16
-#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22
-#define HINIC_SQ_CTRL_LEN_SHIFT 29
-
-#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
-#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F
-#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1
-#define HINIC_SQ_CTRL_LEN_MASK 0x3
-
-#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13
-
-#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF
+#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
+#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16
+#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22
+#define HINIC_SQ_CTRL_LEN_SHIFT 29
+
+#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
+#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F
+#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1
+#define HINIC_SQ_CTRL_LEN_MASK 0x3
+
+#define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2
+#define HINIC_SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10
+#define HINIC_SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11
+#define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12
+#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13
+#define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27
+#define HINIC_SQ_CTRL_QUEUE_INFO_UC_SHIFT 28
+#define HINIC_SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29
+
+#define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFF
+#define HINIC_SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF
+#define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_UC_MASK 0x1
+#define HINIC_SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7
#define HINIC_SQ_CTRL_SET(val, member) \
(((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \
@@ -84,6 +98,10 @@
(((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \
& HINIC_SQ_CTRL_##member##_MASK)
+#define HINIC_SQ_CTRL_CLEAR(val, member) \
+ ((u32)(val) & (~(HINIC_SQ_CTRL_##member##_MASK \
+ << HINIC_SQ_CTRL_##member##_SHIFT)))
+
#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0
#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8
#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10
@@ -108,28 +126,28 @@
/* 8 bits reserved */
#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8
-#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_SHIFT 16
-#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_SHIFT 24
+#define HINIC_SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16
+#define HINIC_SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24
/* 8 bits reserved */
#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF
-#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_MASK 0xFF
-#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_MASK 0xFF
+#define HINIC_SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFF
+#define HINIC_SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFF
#define HINIC_SQ_TASK_INFO1_SET(val, member) \
(((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \
HINIC_SQ_TASK_INFO1_##member##_SHIFT)
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_SHIFT 0
-#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_SHIFT 12
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 19
+#define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0
+#define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8
+#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16
/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 22
+#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24
/* 8 bits reserved */
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_MASK 0xFFF
-#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_MASK 0x7F
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x3
+#define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFF
+#define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFF
+#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7
/* 1 bit reserved */
#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3
/* 8 bits reserved */
@@ -187,12 +205,15 @@
sizeof(struct hinic_sq_task) + \
(nr_sges) * sizeof(struct hinic_sq_bufdesc))
-#define HINIC_SCMD_DATA_LEN 16
+#define HINIC_SCMD_DATA_LEN 16
+
+#define HINIC_MAX_SQ_BUFDESCS 17
-#define HINIC_MAX_SQ_BUFDESCS 17
+#define HINIC_SQ_WQE_MAX_SIZE 320
+#define HINIC_RQ_WQE_SIZE 32
-#define HINIC_SQ_WQE_MAX_SIZE 320
-#define HINIC_RQ_WQE_SIZE 32
+#define HINIC_MSS_DEFAULT 0x3E00
+#define HINIC_MSS_MIN 0x50
enum hinic_l4offload_type {
HINIC_L4_OFF_DISABLE = 0,
@@ -211,6 +232,26 @@ enum hinic_pkt_parsed {
HINIC_PKT_PARSED = 1,
};
+enum hinic_l3_offload_type {
+ L3TYPE_UNKNOWN = 0,
+ IPV6_PKT = 1,
+ IPV4_PKT_NO_CHKSUM_OFFLOAD = 2,
+ IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3,
+};
+
+enum hinic_l4_offload_type {
+ OFFLOAD_DISABLE = 0,
+ TCP_OFFLOAD_ENABLE = 1,
+ SCTP_OFFLOAD_ENABLE = 2,
+ UDP_OFFLOAD_ENABLE = 3,
+};
+
+enum hinic_l4_tunnel_type {
+ NOT_TUNNEL,
+ TUNNEL_UDP_NO_CSUM,
+ TUNNEL_UDP_CSUM,
+};
+
enum hinic_outer_l3type {
HINIC_OUTER_L3TYPE_UNKNOWN = 0,
HINIC_OUTER_L3TYPE_IPV6 = 1,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 09e9da10b786..fdf2bdb6b0d0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
stats->tx_errors = nic_tx_stats->tx_dropped;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hinic_netpoll(struct net_device *netdev)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int i, num_qps;
-
- num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
- for (i = 0; i < num_qps; i++) {
- struct hinic_txq *txq = &nic_dev->txqs[i];
- struct hinic_rxq *rxq = &nic_dev->rxqs[i];
-
- napi_schedule(&txq->napi);
- napi_schedule(&rxq->napi);
- }
-}
-#endif
-
static const struct net_device_ops hinic_netdev_ops = {
.ndo_open = hinic_open,
.ndo_stop = hinic_close,
@@ -818,14 +801,12 @@ static const struct net_device_ops hinic_netdev_ops = {
.ndo_start_xmit = hinic_xmit_frame,
.ndo_tx_timeout = hinic_tx_timeout,
.ndo_get_stats64 = hinic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = hinic_netpoll,
-#endif
};
static void netdev_features_init(struct net_device *netdev)
{
- netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
+ netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
netdev->vlan_features = netdev->hw_features;
@@ -883,6 +864,20 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
*out_size = sizeof(*ret_link_status);
}
+static int set_features(struct hinic_dev *nic_dev,
+ netdev_features_t pre_features,
+ netdev_features_t features, bool force_change)
+{
+ netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
+ int err = 0;
+
+ if (changed & NETIF_F_TSO)
+ err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
+ HINIC_TSO_ENABLE : HINIC_TSO_DISABLE);
+
+ return err;
+}
+
/**
* nic_dev_init - Initialize the NIC device
* @pdev: the NIC pci device
@@ -983,7 +978,12 @@ static int nic_dev_init(struct pci_dev *pdev)
hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
nic_dev, link_status_event_handler);
+ err = set_features(nic_dev, 0, nic_dev->netdev->features, true);
+ if (err)
+ goto err_set_features;
+
SET_NETDEV_DEV(netdev, &pdev->dev);
+
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
@@ -993,6 +993,7 @@ static int nic_dev_init(struct pci_dev *pdev)
return 0;
err_reg_netdev:
+err_set_features:
hinic_hwdev_cb_unregister(nic_dev->hwdev,
HINIC_MGMT_MSG_CMD_LINK_STATUS);
cancel_work_sync(&rx_mode_work->work);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 4d4e3f05fb5f..7575a7d3bd9f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -377,3 +377,35 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev,
return 0;
}
+
+/**
+ * hinic_port_set_tso - set port tso configuration
+ * @nic_dev: nic device
+ * @state: the tso state to set
+ *
+ * Return 0 - Success, negative - Failure
+ **/
+int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct hinic_tso_config tso_cfg = {0};
+ struct pci_dev *pdev = hwif->pdev;
+ u16 out_size;
+ int err;
+
+ tso_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ tso_cfg.tso_en = state;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_TSO,
+ &tso_cfg, sizeof(tso_cfg),
+ &tso_cfg, &out_size);
+ if (err || out_size != sizeof(tso_cfg) || tso_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set port tso, ret = %d\n",
+ tso_cfg.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index 9404365195dd..f6e3220fe28f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -72,6 +72,11 @@ enum hinic_speed {
HINIC_SPEED_UNKNOWN = 0xFF,
};
+enum hinic_tso_state {
+ HINIC_TSO_DISABLE = 0,
+ HINIC_TSO_ENABLE = 1,
+};
+
struct hinic_port_mac_cmd {
u8 status;
u8 version;
@@ -167,6 +172,17 @@ struct hinic_port_cap {
u8 rsvd2[3];
};
+struct hinic_tso_config {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u8 tso_en;
+ u8 resv2[3];
+};
+
int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id);
@@ -195,4 +211,6 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev,
int hinic_port_get_cap(struct hinic_dev *nic_dev,
struct hinic_port_cap *port_cap);
+int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index c5fca0356c9c..11e73e67358d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -26,6 +26,13 @@
#include <linux/skbuff.h>
#include <linux/smp.h>
#include <asm/byteorder.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
#include "hinic_common.h"
#include "hinic_hw_if.h"
@@ -45,9 +52,31 @@
#define CI_UPDATE_NO_PENDING 0
#define CI_UPDATE_NO_COALESC 0
-#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
+#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
-#define MIN_SKB_LEN 64
+#define MIN_SKB_LEN 17
+
+#define MAX_PAYLOAD_OFFSET 221
+#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
+
+union hinic_l3 {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+};
+
+union hinic_l4 {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+};
+
+enum hinic_offload_type {
+ TX_OFFLOAD_TSO = BIT(0),
+ TX_OFFLOAD_CSUM = BIT(1),
+ TX_OFFLOAD_VLAN = BIT(2),
+ TX_OFFLOAD_INVALID = BIT(3),
+};
/**
* hinic_txq_clean_stats - Clean the statistics of specific queue
@@ -175,18 +204,263 @@ static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
DMA_TO_DEVICE);
}
+static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
+ union hinic_l4 *l4,
+ enum hinic_offload_type offload_type,
+ enum hinic_l3_offload_type *l3_type,
+ u8 *l4_proto)
+{
+ u8 *exthdr;
+
+ if (ip->v4->version == 4) {
+ *l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
+ IPV4_PKT_NO_CHKSUM_OFFLOAD :
+ IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ *l4_proto = ip->v4->protocol;
+ } else if (ip->v4->version == 6) {
+ *l3_type = IPV6_PKT;
+ exthdr = ip->hdr + sizeof(*ip->v6);
+ *l4_proto = ip->v6->nexthdr;
+ if (exthdr != l4->hdr) {
+ int start = exthdr - skb->data;
+ __be16 frag_off;
+
+ ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
+ }
+ } else {
+ *l3_type = L3TYPE_UNKNOWN;
+ *l4_proto = 0;
+ }
+}
+
+static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
+ enum hinic_offload_type offload_type, u8 l4_proto,
+ enum hinic_l4_offload_type *l4_offload,
+ u32 *l4_len, u32 *offset)
+{
+ *l4_offload = OFFLOAD_DISABLE;
+ *offset = 0;
+ *l4_len = 0;
+
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ *l4_offload = TCP_OFFLOAD_ENABLE;
+ /* doff in unit of 4B */
+ *l4_len = l4->tcp->doff * 4;
+ *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_UDP:
+ *l4_offload = UDP_OFFLOAD_ENABLE;
+ *l4_len = sizeof(struct udphdr);
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_SCTP:
+ /* only csum offload support sctp */
+ if (offload_type != TX_OFFLOAD_CSUM)
+ break;
+
+ *l4_offload = SCTP_OFFLOAD_ENABLE;
+ *l4_len = sizeof(struct sctphdr);
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
+{
+ return (ip->v4->version == 4) ?
+ csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
+ csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
+}
+
+static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
+ struct sk_buff *skb)
+{
+ u32 offset, l4_len, ip_identify, network_hdr_len;
+ enum hinic_l3_offload_type l3_offload;
+ enum hinic_l4_offload_type l4_offload;
+ union hinic_l3 ip;
+ union hinic_l4 l4;
+ u8 l4_proto;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_cow_head(skb, 0) < 0)
+ return -EPROTONOSUPPORT;
+
+ if (skb->encapsulation) {
+ u32 gso_type = skb_shinfo(skb)->gso_type;
+ u32 tunnel_type = 0;
+ u32 l4_tunnel_len;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
+ } else if (ip.v4->version == 6) {
+ l3_offload = IPV6_PKT;
+ } else {
+ l3_offload = 0;
+ }
+
+ hinic_task_set_outter_l3(task, l3_offload,
+ skb_network_header_len(skb));
+
+ if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
+ tunnel_type = TUNNEL_UDP_CSUM;
+ } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
+ }
+
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ }
+
+ /* initialize inner IP header fields */
+ if (ip.v4->version == 4)
+ ip.v4->tot_len = 0;
+ else
+ ip.v6->payload_len = 0;
+
+ get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
+ &l4_proto);
+
+ hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
+
+ ip_identify = 0;
+ if (l4_proto == IPPROTO_TCP)
+ l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
+
+ get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
+ &l4_len, &offset);
+
+ hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
+ ip_identify, skb_shinfo(skb)->gso_size);
+
+ return 1;
+}
+
+static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
+ struct sk_buff *skb)
+{
+ enum hinic_l4_offload_type l4_offload;
+ u32 offset, l4_len, network_hdr_len;
+ enum hinic_l3_offload_type l3_type;
+ union hinic_l3 ip;
+ union hinic_l4 l4;
+ u8 l4_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->encapsulation) {
+ u32 l4_tunnel_len;
+
+ ip.hdr = skb_network_header(skb);
+
+ if (ip.v4->version == 4)
+ l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
+ else if (ip.v4->version == 6)
+ l3_type = IPV6_PKT;
+ else
+ l3_type = L3TYPE_UNKNOWN;
+
+ hinic_task_set_outter_l3(task, l3_type,
+ skb_network_header_len(skb));
+
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+
+ hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM,
+ l4_tunnel_len);
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ }
+
+ get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
+ &l4_proto);
+
+ hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
+
+ get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
+ &l4_len, &offset);
+
+ hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
+
+ return 1;
+}
+
+static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
+ u32 *queue_info)
+{
+ enum hinic_offload_type offload = 0;
+ int enabled;
+
+ enabled = offload_tso(task, queue_info, skb);
+ if (enabled > 0) {
+ offload |= TX_OFFLOAD_TSO;
+ } else if (enabled == 0) {
+ enabled = offload_csum(task, queue_info, skb);
+ if (enabled)
+ offload |= TX_OFFLOAD_CSUM;
+ } else {
+ return -EPROTONOSUPPORT;
+ }
+
+ if (offload)
+ hinic_task_set_l2hdr(task, skb_network_offset(skb));
+
+ /* payload offset should not more than 221 */
+ if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
+ MAX_PAYLOAD_OFFSET) {
+ return -EPROTONOSUPPORT;
+ }
+
+ /* mss should not less than 80 */
+ if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
+ *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
+ *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
+ }
+
+ return 0;
+}
+
netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
+ u16 prod_idx, q_id = skb->queue_mapping;
struct netdev_queue *netdev_txq;
int nr_sges, err = NETDEV_TX_OK;
struct hinic_sq_wqe *sq_wqe;
unsigned int wqe_size;
struct hinic_txq *txq;
struct hinic_qp *qp;
- u16 prod_idx;
- txq = &nic_dev->txqs[skb->queue_mapping];
+ txq = &nic_dev->txqs[q_id];
qp = container_of(txq->sq, struct hinic_qp, sq);
if (skb->len < MIN_SKB_LEN) {
@@ -236,15 +510,23 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
process_sq_wqe:
hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
+ if (err)
+ goto offload_error;
+
hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
flush_skbs:
- netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping);
+ netdev_txq = netdev_get_tx_queue(netdev, q_id);
if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq)))
hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
return err;
+offload_error:
+ hinic_sq_return_wqe(txq->sq, wqe_size);
+ tx_unmap_skb(nic_dev, skb, txq->sges);
+
skb_error:
dev_kfree_skb_any(skb);
@@ -252,7 +534,8 @@ update_error_stats:
u64_stats_update_begin(&txq->txq_stats.syncp);
txq->txq_stats.tx_dropped++;
u64_stats_update_end(&txq->txq_stats.syncp);
- return err;
+
+ return NETDEV_TX_OK;
}
/**
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index dc983450354b..35f6291a3672 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG;
#define RX_AREA_END 0x0fc00
static int ether1_open(struct net_device *dev);
-static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
+ struct net_device *dev);
static irqreturn_t ether1_interrupt(int irq, void *dev_id);
static int ether1_close(struct net_device *dev);
static void ether1_setmulticastlist(struct net_device *dev);
@@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-static int
+static netdev_tx_t
ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
{
int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f00a1dc2128c..2f7ae118217f 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -347,7 +347,7 @@ static const char init_setup[] =
0x7f /* *multi IA */ };
static int i596_open(struct net_device *dev);
-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t i596_interrupt(int irq, void *dev_id);
static int i596_close(struct net_device *dev);
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
@@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev)
}
-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct i596_private *lp = netdev_priv(dev);
struct tx_cmd *tx_cmd;
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 8bb15a8c2a40..1a86184d44c0 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -121,7 +121,8 @@ static int sun3_82586_probe1(struct net_device *dev,int ioaddr);
static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
static int sun3_82586_open(struct net_device *dev);
static int sun3_82586_close(struct net_device *dev);
-static int sun3_82586_send_packet(struct sk_buff *,struct net_device *);
+static netdev_tx_t sun3_82586_send_packet(struct sk_buff *,
+ struct net_device *);
static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static void sun3_82586_timeout(struct net_device *dev);
@@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev)
* send frame
*/
-static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
{
int len,i;
#ifndef NO_NOPCOMMANDS
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index ba580bfae512..3baabdc89726 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -778,12 +778,11 @@ static void check_sqs(struct ehea_port *port)
{
struct ehea_swqe *swqe;
int swqe_index;
- int i, k;
+ int i;
for (i = 0; i < port->num_def_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
int ret;
- k = 0;
swqe = ehea_get_swqe(pr->qp, &swqe_index);
memset(swqe, 0, SWQE_HEADER_SIZE);
atomic_dec(&pr->swqe_avail);
@@ -921,17 +920,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
return rx;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ehea_netpoll(struct net_device *dev)
-{
- struct ehea_port *port = netdev_priv(dev);
- int i;
-
- for (i = 0; i < port->num_def_qps; i++)
- napi_schedule(&port->port_res[i].napi);
-}
-#endif
-
static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
{
struct ehea_port_res *pr = param;
@@ -2038,7 +2026,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
dev_consume_skb_any(skb);
}
-static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_swqe *swqe;
@@ -2953,9 +2941,6 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_open = ehea_open,
.ndo_stop = ehea_stop,
.ndo_start_xmit = ehea_start_xmit,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ehea_netpoll,
-#endif
.ndo_get_stats64 = ehea_get_stats64,
.ndo_set_mac_address = ehea_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index a0820f72b25c..5e4e37132bf2 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -125,7 +125,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
struct ehea_cq *cq;
struct h_epa epa;
u64 *cq_handle_ref, hret, rpage;
- u32 act_nr_of_entries, act_pages, counter;
+ u32 counter;
int ret;
void *vpage;
@@ -140,8 +140,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
cq->adapter = adapter;
cq_handle_ref = &cq->fw_handle;
- act_nr_of_entries = 0;
- act_pages = 0;
hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
&cq->fw_handle, &cq->epas);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 372664686309..760b2ad8e295 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -423,7 +423,7 @@ static void emac_hash_mc(struct emac_instance *dev)
{
const int regs = EMAC_XAHT_REGS(dev);
u32 *gaht_base = emac_gaht_base(dev);
- u32 gaht_temp[regs];
+ u32 gaht_temp[EMAC_XAHT_MAX_REGS];
struct netdev_hw_addr *ha;
int i;
@@ -1409,7 +1409,7 @@ static inline u16 emac_tx_csum(struct emac_instance *dev,
return 0;
}
-static inline int emac_xmit_finish(struct emac_instance *dev, int len)
+static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len)
{
struct emac_regs __iomem *p = dev->emacp;
struct net_device *ndev = dev->ndev;
@@ -1436,7 +1436,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len)
}
/* Tx lock BH */
-static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct emac_instance *dev = netdev_priv(ndev);
unsigned int len = skb->len;
@@ -1494,7 +1494,8 @@ static inline int emac_xmit_split(struct emac_instance *dev, int slot,
}
/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
-static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t
+emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
{
struct emac_instance *dev = netdev_priv(ndev);
int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -2677,12 +2678,17 @@ static int emac_init_phy(struct emac_instance *dev)
if (of_phy_is_fixed_link(np)) {
int res = emac_dt_mdio_probe(dev);
- if (!res) {
- res = of_phy_register_fixed_link(np);
- if (res)
- mdiobus_unregister(dev->mii_bus);
+ if (res)
+ return res;
+
+ res = of_phy_register_fixed_link(np);
+ dev->phy_dev = of_phy_find_device(np);
+ if (res || !dev->phy_dev) {
+ mdiobus_unregister(dev->mii_bus);
+ return res ? res : -EINVAL;
}
- return res;
+ emac_adjust_link(dev->ndev);
+ put_device(&dev->phy_dev->mdio.dev);
}
return 0;
}
@@ -2964,6 +2970,10 @@ static int emac_init_config(struct emac_instance *dev)
dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
}
+ /* This should never happen */
+ if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS))
+ return -ENXIO;
+
DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 369de2cfb15b..84caa4a3fc52 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -390,6 +390,9 @@ static inline int emac_has_feature(struct emac_instance *dev,
#define EMAC4SYNC_XAHT_SLOTS_SHIFT 8
#define EMAC4SYNC_XAHT_WIDTH_SHIFT 5
+/* The largest span between slots and widths above is 3 */
+#define EMAC_XAHT_MAX_REGS (1 << 3)
+
#define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift)
#define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift)
#define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index eeade2ea8334..e4c20f0024f6 100644
--- a/drivers/net/ethernet/ibm/emac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -136,7 +136,7 @@ static inline int mal_rx_size(int len)
static inline int mal_tx_chunks(int len)
{
- return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE;
+ return DIV_ROUND_UP(len, MAL_MAX_TX_SIZE);
}
#define MAL_CHAN_MASK(n) (0x80000000 >> (n))
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 525d8b89187b..a4681780a55d 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -24,7 +24,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4f0daf67b18d..7893beffcc71 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1428,7 +1428,7 @@ static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
return 0;
}
-static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int queue_num = skb_get_queue_mapping(skb);
@@ -1452,7 +1452,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
u64 *handle_array;
int index = 0;
u8 proto = 0;
- int ret = 0;
+ netdev_tx_t ret = NETDEV_TX_OK;
if (adapter->resetting) {
if (!netif_subqueue_stopped(netdev, skb))
@@ -2207,19 +2207,6 @@ restart_poll:
return frames_processed;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ibmvnic_netpoll_controller(struct net_device *dev)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(dev);
- int i;
-
- replenish_pools(netdev_priv(dev));
- for (i = 0; i < adapter->req_rx_queues; i++)
- ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
- adapter->rx_scrq[i]);
-}
-#endif
-
static int wait_for_reset(struct ibmvnic_adapter *adapter)
{
int rc, ret;
@@ -2292,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
.ndo_set_mac_address = ibmvnic_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = ibmvnic_tx_timeout,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ibmvnic_netpoll_controller,
-#endif
.ndo_change_mtu = ibmvnic_change_mtu,
.ndo_features_check = ibmvnic_features_check,
};
@@ -2364,8 +2348,13 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
- ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
+ if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
+ ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
+ ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
+ } else {
+ ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
+ ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
+ }
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@@ -2378,21 +2367,23 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int ret;
- if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
- ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
- netdev_err(netdev, "Invalid request.\n");
- netdev_err(netdev, "Max tx buffers = %llu\n",
- adapter->max_rx_add_entries_per_subcrq);
- netdev_err(netdev, "Max rx buffers = %llu\n",
- adapter->max_tx_entries_per_subcrq);
- return -EINVAL;
- }
-
+ ret = 0;
adapter->desired.rx_entries = ring->rx_pending;
adapter->desired.tx_entries = ring->tx_pending;
- return wait_for_reset(adapter);
+ ret = wait_for_reset(adapter);
+
+ if (!ret &&
+ (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
+ adapter->req_tx_entries_per_subcrq != ring->tx_pending))
+ netdev_info(netdev,
+ "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
+ ring->rx_pending, ring->tx_pending,
+ adapter->req_rx_add_entries_per_subcrq,
+ adapter->req_tx_entries_per_subcrq);
+ return ret;
}
static void ibmvnic_get_channels(struct net_device *netdev,
@@ -2400,8 +2391,14 @@ static void ibmvnic_get_channels(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- channels->max_rx = adapter->max_rx_queues;
- channels->max_tx = adapter->max_tx_queues;
+ if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
+ channels->max_rx = adapter->max_rx_queues;
+ channels->max_tx = adapter->max_tx_queues;
+ } else {
+ channels->max_rx = IBMVNIC_MAX_QUEUES;
+ channels->max_tx = IBMVNIC_MAX_QUEUES;
+ }
+
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = adapter->req_rx_queues;
@@ -2414,11 +2411,23 @@ static int ibmvnic_set_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int ret;
+ ret = 0;
adapter->desired.rx_queues = channels->rx_count;
adapter->desired.tx_queues = channels->tx_count;
- return wait_for_reset(adapter);
+ ret = wait_for_reset(adapter);
+
+ if (!ret &&
+ (adapter->req_rx_queues != channels->rx_count ||
+ adapter->req_tx_queues != channels->tx_count))
+ netdev_info(netdev,
+ "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
+ channels->rx_count, channels->tx_count,
+ adapter->req_rx_queues, adapter->req_tx_queues);
+ return ret;
+
}
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2426,32 +2435,43 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
struct ibmvnic_adapter *adapter = netdev_priv(dev);
int i;
- if (stringset != ETH_SS_STATS)
- return;
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
+ i++, data += ETH_GSTRING_LEN)
+ memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
- for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
- memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- for (i = 0; i < adapter->req_tx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "tx%d_dropped_packets", i);
+ data += ETH_GSTRING_LEN;
+ }
- snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- for (i = 0; i < adapter->req_rx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
- snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
- data += ETH_GSTRING_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
+ strcpy(data + i * ETH_GSTRING_LEN,
+ ibmvnic_priv_flags[i]);
+ break;
+ default:
+ return;
}
}
@@ -2464,6 +2484,8 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
return ARRAY_SIZE(ibmvnic_stats) +
adapter->req_tx_queues * NUM_TX_STATS +
adapter->req_rx_queues * NUM_RX_STATS;
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(ibmvnic_priv_flags);
default:
return -EOPNOTSUPP;
}
@@ -2514,6 +2536,25 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
}
}
+static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->priv_flags;
+}
+
+static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
+
+ if (which_maxes)
+ adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
+ else
+ adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
+
+ return 0;
+}
static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
@@ -2527,6 +2568,8 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
.get_link_ksettings = ibmvnic_get_link_ksettings,
+ .get_priv_flags = ibmvnic_get_priv_flags,
+ .set_priv_flags = ibmvnic_set_priv_flags,
};
/* Routines for managing CRQs/sCRQs */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f06eec145ca6..18103b811d4d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -39,7 +39,8 @@
#define IBMVNIC_RX_WEIGHT 16
/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
#define IBMVNIC_BUFFS_PER_POOL 100
-#define IBMVNIC_MAX_QUEUES 10
+#define IBMVNIC_MAX_QUEUES 16
+#define IBMVNIC_MAX_QUEUE_SZ 4096
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
@@ -48,6 +49,11 @@
#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
#define IBMVNIC_BUFFER_HLEN 500
+static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
+#define IBMVNIC_USE_SERVER_MAXES 0x1
+ "use-server-maxes"
+};
+
struct ibmvnic_login_buffer {
__be32 len;
__be32 version;
@@ -969,6 +975,7 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
+ u32 priv_flags;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 1ab613eb5796..fd3373d82a9e 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -68,6 +68,9 @@ config E1000E
<http://support.intel.com>
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000e.rst>.
+
To compile this driver as a module, choose M here. The module
will be called e1000e.
@@ -94,7 +97,7 @@ config IGB
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.rst>.
+ <file:Documentation/networking/igb.rst>.
To compile this driver as a module, choose M here. The module
will be called igb.
@@ -130,7 +133,7 @@ config IGBVF
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.rst>.
+ <file:Documentation/networking/igbvf.rst>.
To compile this driver as a module, choose M here. The module
will be called igbvf.
@@ -147,7 +150,7 @@ config IXGB
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/ixgb.txt>.
+ <file:Documentation/networking/ixgb.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgb.
@@ -164,6 +167,9 @@ config IXGBE
<http://support.intel.com>
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgbe.rst>.
+
To compile this driver as a module, choose M here. The module
will be called ixgbe.
@@ -205,7 +211,7 @@ config IXGBEVF
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/ixgbevf.txt>.
+ <file:Documentation/networking/ixgbevf.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgbevf. MSI-X interrupt support is required
@@ -222,6 +228,9 @@ config I40E
<http://support.intel.com>
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/i40e.rst>.
+
To compile this driver as a module, choose M here. The module
will be called i40e.
@@ -235,20 +244,30 @@ config I40E_DCB
If unsure, say N.
+# this is here to allow seamless migration from I40EVF --> IAVF name
+# so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF
+config IAVF
+ tristate
config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support"
+ select IAVF
depends on PCI_MSI
---help---
This driver supports virtual functions for Intel XL710,
- X710, X722, and all devices advertising support for Intel
- Ethernet Adaptive Virtual Function devices. For more
+ X710, X722, XXV710, and all devices advertising support for
+ Intel Ethernet Adaptive Virtual Function devices. For more
information on how to identify your adapter, go to the Adapter
& Driver ID Guide that can be located at:
- <http://support.intel.com>
+ <https://support.intel.com>
+
+ This driver was formerly named i40evf.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/iavf.rst>.
To compile this driver as a module, choose M here. The module
- will be called i40evf. MSI-X interrupt support is required
+ will be called iavf. MSI-X interrupt support is required
for this driver to work correctly.
config ICE
@@ -262,6 +281,9 @@ config ICE
<http://support.intel.com>
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ice.rst>.
+
To compile this driver as a module, choose M here. The module
will be called ice.
@@ -277,7 +299,26 @@ config FM10K
<http://support.intel.com>
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/fm10k.rst>.
+
To compile this driver as a module, choose M here. The module
will be called fm10k. MSI-X interrupt support is required
+config IGC
+ tristate "Intel(R) Ethernet Controller I225-LM/I225-V support"
+ default n
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
+ family of adapters.
+
+ For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide that can be located at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called igc.
+
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 807a4f8c7e4e..3075290063f6 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -7,11 +7,12 @@ obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
obj-$(CONFIG_IGB) += igb/
+obj-$(CONFIG_IGC) += igc/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
-obj-$(CONFIG_I40EVF) += i40evf/
+obj-$(CONFIG_IAVF) += iavf/
obj-$(CONFIG_FM10K) += fm10k/
obj-$(CONFIG_ICE) += ice/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 27d5f27163d2..7c4b55482f72 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -164,7 +164,7 @@
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_FIRMWARE(FIRMWARE_D101M);
MODULE_FIRMWARE(FIRMWARE_D101S);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 2110d5f2da19..43b6d3cec3b3 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -195,7 +195,7 @@ static struct pci_driver e1000_driver = {
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
@@ -2433,7 +2433,6 @@ static void e1000_watchdog(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
- bool txb2b = true;
/* update snapshot of PHY registers on LSC */
e1000_get_speed_and_duplex(hw,
&adapter->link_speed,
@@ -2455,11 +2454,9 @@ static void e1000_watchdog(struct work_struct *work)
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
- txb2b = false;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
- txb2b = false;
/* maybe add some timeout factor ? */
break;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3ba0c90e7055..16a73bd9f4cb 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6854,8 +6854,6 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
return result;
}
@@ -7592,7 +7590,7 @@ module_exit(e1000_exit_module);
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
/* netdev.c */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index a903a0ba45e1..7d42582ed48d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface);
void fm10k_service_event_schedule(struct fm10k_intfc *interface);
void fm10k_macvlan_schedule(struct fm10k_intfc *interface);
void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-void fm10k_netpoll(struct net_device *netdev);
-#endif
/* Netdev */
struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 3f536541f45f..503bbc017792 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -21,7 +21,7 @@ static const char fm10k_copyright[] =
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
/* single workqueue for entire fm10k driver */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 929f538d28bc..538a8467f434 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = fm10k_netpoll,
-#endif
.ndo_features_check = fm10k_features_check,
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 15071e4adb98..02345d381303 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * fm10k_netpoll - A Polling 'interrupt' handler
- * @netdev: network interface device structure
- *
- * This is used by netconsole to send skbs without having to re-enable
- * interrupts. It's not called while the normal interrupt routine is executing.
- **/
-void fm10k_netpoll(struct net_device *netdev)
-{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__FM10K_DOWN, interface->state))
- return;
-
- for (i = 0; i < interface->num_q_vectors; i++)
- fm10k_msix_clean_rings(0, interface->q_vector[i]);
-}
-
-#endif
#define FM10K_ERR_MSG(type) case (type): error = #type; break
static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
struct fm10k_fault *fault)
@@ -2462,8 +2440,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
return result;
}
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 14397e7e9925..50590e8d1fd1 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -22,6 +22,7 @@ i40e-objs := i40e_main.o \
i40e_txrx.o \
i40e_ptp.o \
i40e_client.o \
- i40e_virtchnl_pf.o
+ i40e_virtchnl_pf.o \
+ i40e_xsk.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 7a80652e2500..876cac317e79 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -786,6 +786,11 @@ struct i40e_vsi {
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
+
+ /* AF_XDP zero-copy */
+ struct xdp_umem **xsk_umems;
+ u16 num_xsk_umems_used;
+ u16 num_xsk_umems;
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -1090,6 +1095,20 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
return !!vsi->xdp_prog;
}
+static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+{
+ bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
+ int qid = ring->queue_index;
+
+ if (ring_is_xdp(ring))
+ qid -= ring->vsi->alloc_queue_pairs;
+
+ if (!ring->vsi->xsk_umems || !ring->vsi->xsk_umems[qid] || !xdp_on)
+ return NULL;
+
+ return ring->vsi->xsk_umems[qid];
+}
+
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 56b911a5dd8b..a20d1cf058ad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -132,8 +132,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
(unsigned long int)nd->vlan_features);
}
- dev_info(&pf->pdev->dev, " active_vlans is %s\n",
- vsi->active_vlans ? "<valid>" : "<null>");
dev_info(&pf->pdev->dev,
" flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 5ff6caa83948..9f8464f80783 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -5,26 +5,227 @@
#include "i40e.h"
#include "i40e_diag.h"
+#include "i40e_txrx_common.h"
+/* ethtool statistics helpers */
+
+/**
+ * struct i40e_stats - definition for an ethtool statistic
+ * @stat_string: statistic name to display in ethtool -S output
+ * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
+ * @stat_offset: offsetof() the stat from a base pointer
+ *
+ * This structure defines a statistic to be added to the ethtool stats buffer.
+ * It defines a statistic as offset from a common base pointer. Stats should
+ * be defined in constant arrays using the I40E_STAT macro, with every element
+ * of the array using the same _type for calculating the sizeof_stat and
+ * stat_offset.
+ *
+ * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
+ * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
+ * the i40e_add_ethtool_stat() helper function.
+ *
+ * The @stat_string is interpreted as a format string, allowing formatted
+ * values to be inserted while looping over multiple structures for a given
+ * statistics array. Thus, every statistic string in an array should have the
+ * same type and number of format specifiers, to be formatted by variadic
+ * arguments to the i40e_add_stat_string() helper function.
+ **/
struct i40e_stats {
- /* The stat_string is expected to be a format string formatted using
- * vsnprintf by i40e_add_stat_strings. Every member of a stats array
- * should use the same format specifiers as they will be formatted
- * using the same variadic arguments.
- */
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
};
+/* Helper macro to define an i40e_stat structure with proper size and type.
+ * Use this when defining constant statistics arrays. Note that @_type expects
+ * only a type name and is used multiple times.
+ */
#define I40E_STAT(_type, _name, _stat) { \
.stat_string = _name, \
.sizeof_stat = FIELD_SIZEOF(_type, _stat), \
.stat_offset = offsetof(_type, _stat) \
}
+/* Helper macro for defining some statistics directly copied from the netdev
+ * stats structure.
+ */
#define I40E_NETDEV_STAT(_net_stat) \
I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat)
+
+/* Helper macro for defining some statistics related to queues */
+#define I40E_QUEUE_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_ring, _name, _stat)
+
+/* Stats associated with a Tx or Rx ring */
+static const struct i40e_stats i40e_gstrings_queue_stats[] = {
+ I40E_QUEUE_STAT("%s-%u.packets", stats.packets),
+ I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes),
+};
+
+/**
+ * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer
+ * @data: location to store the stat value
+ * @pointer: basis for where to copy from
+ * @stat: the stat definition
+ *
+ * Copies the stat data defined by the pointer and stat structure pair into
+ * the memory supplied as data. Used to implement i40e_add_ethtool_stats and
+ * i40e_add_queue_stats. If the pointer is null, data will be zero'd.
+ */
+static void
+i40e_add_one_ethtool_stat(u64 *data, void *pointer,
+ const struct i40e_stats *stat)
+{
+ char *p;
+
+ if (!pointer) {
+ /* ensure that the ethtool data buffer is zero'd for any stats
+ * which don't have a valid pointer.
+ */
+ *data = 0;
+ return;
+ }
+
+ p = (char *)pointer + stat->stat_offset;
+ switch (stat->sizeof_stat) {
+ case sizeof(u64):
+ *data = *((u64 *)p);
+ break;
+ case sizeof(u32):
+ *data = *((u32 *)p);
+ break;
+ case sizeof(u16):
+ *data = *((u16 *)p);
+ break;
+ case sizeof(u8):
+ *data = *((u8 *)p);
+ break;
+ default:
+ WARN_ONCE(1, "unexpected stat size for %s",
+ stat->stat_string);
+ *data = 0;
+ }
+}
+
+/**
+ * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location to copy stats from
+ * @stats: array of stats to copy
+ * @size: the size of the stats definition
+ *
+ * Copy the stats defined by the stats array using the pointer as a base into
+ * the data buffer supplied by ethtool. Updates the data pointer to point to
+ * the next empty location for successive calls to __i40e_add_ethtool_stats.
+ * If pointer is null, set the data values to zero and update the pointer to
+ * skip these stats.
+ **/
+static void
+__i40e_add_ethtool_stats(u64 **data, void *pointer,
+ const struct i40e_stats stats[],
+ const unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
+}
+
+/**
+ * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location where stats are stored
+ * @stats: static const array of stat definitions
+ *
+ * Macro to ease the use of __i40e_add_ethtool_stats by taking a static
+ * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
+ * ensuring that we pass the size associated with the given stats array.
+ *
+ * The parameter @stats is evaluated twice, so parameters with side effects
+ * should be avoided.
+ **/
+#define i40e_add_ethtool_stats(data, pointer, stats) \
+ __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
+
+/**
+ * i40e_add_queue_stats - copy queue statistics into supplied buffer
+ * @data: ethtool stats buffer
+ * @ring: the ring to copy
+ *
+ * Queue statistics must be copied while protected by
+ * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats.
+ * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the
+ * ring pointer is null, zero out the queue stat values and update the data
+ * pointer. Otherwise safely copy the stats from the ring into the supplied
+ * buffer and update the data pointer when finished.
+ *
+ * This function expects to be called while under rcu_read_lock().
+ **/
+static void
+i40e_add_queue_stats(u64 **data, struct i40e_ring *ring)
+{
+ const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats);
+ const struct i40e_stats *stats = i40e_gstrings_queue_stats;
+ unsigned int start;
+ unsigned int i;
+
+ /* To avoid invalid statistics values, ensure that we keep retrying
+ * the copy until we get a consistent value according to
+ * u64_stats_fetch_retry_irq. But first, make sure our ring is
+ * non-null before attempting to access its syncp.
+ */
+ do {
+ start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
+ for (i = 0; i < size; i++) {
+ i40e_add_one_ethtool_stat(&(*data)[i], ring,
+ &stats[i]);
+ }
+ } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
+
+ /* Once we successfully copy the stats in, update the data pointer */
+ *data += size;
+}
+
+/**
+ * __i40e_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ * @size: size of the stats array
+ *
+ * Format and copy the strings described by stats into the buffer pointed at
+ * by p.
+ **/
+static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
+ const unsigned int size, ...)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ va_list args;
+
+ va_start(args, size);
+ vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
+ *p += ETH_GSTRING_LEN;
+ va_end(args);
+ }
+}
+
+/**
+ * 40e_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ *
+ * Format and copy the strings described by the const static stats value into
+ * the buffer pointed at by p.
+ *
+ * The parameter @stats is evaluated twice, so parameters with side effects
+ * should be avoided. Additionally, stats must be an array such that
+ * ARRAY_SIZE can be called on it.
+ **/
+#define i40e_add_stat_strings(p, stats, ...) \
+ __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
+
#define I40E_PF_STAT(_name, _stat) \
I40E_STAT(struct i40e_pf, _name, _stat)
#define I40E_VSI_STAT(_name, _stat) \
@@ -33,6 +234,8 @@ struct i40e_stats {
I40E_STAT(struct i40e_veb, _name, _stat)
#define I40E_PFC_STAT(_name, _stat) \
I40E_STAT(struct i40e_pfc_stats, _name, _stat)
+#define I40E_QUEUE_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_ring, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -171,20 +374,11 @@ static const struct i40e_stats i40e_gstrings_pfc_stats[] = {
I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff),
};
-/* We use num_tx_queues here as a proxy for the maximum number of queues
- * available because we always allocate queues symmetrically.
- */
-#define I40E_MAX_NUM_QUEUES(n) ((n)->num_tx_queues)
-#define I40E_QUEUE_STATS_LEN(n) \
- (I40E_MAX_NUM_QUEUES(n) \
- * 2 /* Tx and Rx together */ \
- * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
-#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
+
#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
-#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
- I40E_MISC_STATS_LEN + \
- I40E_QUEUE_STATS_LEN((n)))
+
+#define I40E_VSI_STATS_LEN (I40E_NETDEV_STATS_LEN + I40E_MISC_STATS_LEN)
#define I40E_PFC_STATS_LEN (ARRAY_SIZE(i40e_gstrings_pfc_stats) * \
I40E_MAX_USER_PRIORITY)
@@ -193,10 +387,15 @@ static const struct i40e_stats i40e_gstrings_pfc_stats[] = {
(ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \
I40E_MAX_TRAFFIC_CLASS))
-#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
+#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
+
+#define I40E_PF_STATS_LEN (I40E_GLOBAL_STATS_LEN + \
I40E_PFC_STATS_LEN + \
I40E_VEB_STATS_LEN + \
- I40E_VSI_STATS_LEN((n)))
+ I40E_VSI_STATS_LEN)
+
+/* Length of stats for a single queue */
+#define I40E_QUEUE_STATS_LEN ARRAY_SIZE(i40e_gstrings_queue_stats)
enum i40e_ethtool_test_id {
I40E_ETH_TEST_REG = 0,
@@ -1512,6 +1711,13 @@ static int i40e_set_ringparam(struct net_device *netdev,
(new_rx_count == vsi->rx_rings[0]->count))
return 0;
+ /* If there is a AF_XDP UMEM attached to any of Rx rings,
+ * disallow changing the number of descriptors -- regardless
+ * if the netdev is running or not.
+ */
+ if (i40e_xsk_any_rx_ring_enabled(vsi))
+ return -EBUSY;
+
while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
timeout--;
if (!timeout)
@@ -1701,11 +1907,30 @@ static int i40e_get_stats_count(struct net_device *netdev)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ int stats_len;
if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
- return I40E_PF_STATS_LEN(netdev);
+ stats_len = I40E_PF_STATS_LEN;
else
- return I40E_VSI_STATS_LEN(netdev);
+ stats_len = I40E_VSI_STATS_LEN;
+
+ /* The number of stats reported for a given net_device must remain
+ * constant throughout the life of that device.
+ *
+ * This is because the API for obtaining the size, strings, and stats
+ * is spread out over three separate ethtool ioctls. There is no safe
+ * way to lock the number of stats across these calls, so we must
+ * assume that they will never change.
+ *
+ * Due to this, we report the maximum number of queues, even if not
+ * every queue is currently configured. Since we always allocate
+ * queues in pairs, we'll just use netdev->num_tx_queues * 2. This
+ * works because the num_tx_queues is set at device creation and never
+ * changes.
+ */
+ stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->num_tx_queues;
+
+ return stats_len;
}
static int i40e_get_sset_count(struct net_device *netdev, int sset)
@@ -1728,89 +1953,6 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
}
/**
- * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer
- * @data: location to store the stat value
- * @pointer: basis for where to copy from
- * @stat: the stat definition
- *
- * Copies the stat data defined by the pointer and stat structure pair into
- * the memory supplied as data. Used to implement i40e_add_ethtool_stats.
- * If the pointer is null, data will be zero'd.
- */
-static inline void
-i40e_add_one_ethtool_stat(u64 *data, void *pointer,
- const struct i40e_stats *stat)
-{
- char *p;
-
- if (!pointer) {
- /* ensure that the ethtool data buffer is zero'd for any stats
- * which don't have a valid pointer.
- */
- *data = 0;
- return;
- }
-
- p = (char *)pointer + stat->stat_offset;
- switch (stat->sizeof_stat) {
- case sizeof(u64):
- *data = *((u64 *)p);
- break;
- case sizeof(u32):
- *data = *((u32 *)p);
- break;
- case sizeof(u16):
- *data = *((u16 *)p);
- break;
- case sizeof(u8):
- *data = *((u8 *)p);
- break;
- default:
- WARN_ONCE(1, "unexpected stat size for %s",
- stat->stat_string);
- *data = 0;
- }
-}
-
-/**
- * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer
- * @data: ethtool stats buffer
- * @pointer: location to copy stats from
- * @stats: array of stats to copy
- * @size: the size of the stats definition
- *
- * Copy the stats defined by the stats array using the pointer as a base into
- * the data buffer supplied by ethtool. Updates the data pointer to point to
- * the next empty location for successive calls to __i40e_add_ethtool_stats.
- * If pointer is null, set the data values to zero and update the pointer to
- * skip these stats.
- **/
-static inline void
-__i40e_add_ethtool_stats(u64 **data, void *pointer,
- const struct i40e_stats stats[],
- const unsigned int size)
-{
- unsigned int i;
-
- for (i = 0; i < size; i++)
- i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
-}
-
-/**
- * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer
- * @data: ethtool stats buffer
- * @pointer: location where stats are stored
- * @stats: static const array of stat definitions
- *
- * Macro to ease the use of __i40e_add_ethtool_stats by taking a static
- * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
- * ensuring that we pass the size associated with the given stats array.
- * Assumes that stats is an array.
- **/
-#define i40e_add_ethtool_stats(data, pointer, stats) \
- __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
-
-/**
* i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
* @pf: the PF device structure
* @i: the priority value to copy
@@ -1853,12 +1995,10 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_veb *veb = pf->veb[pf->lan_veb];
unsigned int i;
- unsigned int start;
bool veb_stats;
u64 *p = data;
@@ -1870,38 +2010,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats);
rcu_read_lock();
- for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev) ; i++) {
- tx_ring = READ_ONCE(vsi->tx_rings[i]);
-
- if (!tx_ring) {
- /* Bump the stat counter to skip these stats, and make
- * sure the memory is zero'd
- */
- *(data++) = 0;
- *(data++) = 0;
- *(data++) = 0;
- *(data++) = 0;
- continue;
- }
-
- /* process Tx ring statistics */
- do {
- start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
- data[0] = tx_ring->stats.packets;
- data[1] = tx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
- data += 2;
-
- /* Rx ring is the 2nd half of the queue pair */
- rx_ring = &tx_ring[1];
- do {
- start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
- data[0] = rx_ring->stats.packets;
- data[1] = rx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
- data += 2;
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ i40e_add_queue_stats(&data, READ_ONCE(vsi->tx_rings[i]));
+ i40e_add_queue_stats(&data, READ_ONCE(vsi->rx_rings[i]));
}
rcu_read_unlock();
+
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
goto check_data_pointer;
@@ -1933,42 +2047,6 @@ check_data_pointer:
}
/**
- * __i40e_add_stat_strings - copy stat strings into ethtool buffer
- * @p: ethtool supplied buffer
- * @stats: stat definitions array
- * @size: size of the stats array
- *
- * Format and copy the strings described by stats into the buffer pointed at
- * by p.
- **/
-static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
- const unsigned int size, ...)
-{
- unsigned int i;
-
- for (i = 0; i < size; i++) {
- va_list args;
-
- va_start(args, size);
- vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
- *p += ETH_GSTRING_LEN;
- va_end(args);
- }
-}
-
-/**
- * 40e_add_stat_strings - copy stat strings into ethtool buffer
- * @p: ethtool supplied buffer
- * @stats: stat definitions array
- *
- * Format and copy the strings described by the const static stats value into
- * the buffer pointed at by p. Assumes that stats can have ARRAY_SIZE called
- * for it.
- **/
-#define i40e_add_stat_strings(p, stats, ...) \
- __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
-
-/**
* i40e_get_stat_strings - copy stat strings into supplied buffer
* @netdev: the netdev to collect strings for
* @data: supplied buffer to copy strings into
@@ -1990,16 +2068,13 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
i40e_add_stat_strings(&data, i40e_gstrings_misc_stats);
- for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) {
- snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
- data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ i40e_add_stat_strings(&data, i40e_gstrings_queue_stats,
+ "tx", i);
+ i40e_add_stat_strings(&data, i40e_gstrings_queue_stats,
+ "rx", i);
}
+
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ac685ad4d877..bc71a21c1dc2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -9,7 +9,9 @@
/* Local includes */
#include "i40e.h"
#include "i40e_diag.h"
+#include "i40e_xsk.h"
#include <net/udp_tunnel.h>
+#include <net/xdp_sock.h>
/* All i40e tracepoints are defined by the include below, which
* must be included exactly once across the whole kernel with
* CREATE_TRACE_POINTS defined
@@ -89,7 +91,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
static struct workqueue_struct *i40e_wq;
@@ -420,9 +422,9 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+ struct i40e_ring *ring;
int i;
if (test_bit(__I40E_VSI_DOWN, vsi->state))
@@ -436,24 +438,26 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
u64 bytes, packets;
unsigned int start;
- tx_ring = READ_ONCE(vsi->tx_rings[i]);
- if (!tx_ring)
+ ring = READ_ONCE(vsi->tx_rings[i]);
+ if (!ring)
continue;
- i40e_get_netdev_stats_struct_tx(tx_ring, stats);
+ i40e_get_netdev_stats_struct_tx(ring, stats);
- rx_ring = &tx_ring[1];
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ ring++;
+ i40e_get_netdev_stats_struct_tx(ring, stats);
+ }
+ ring++;
do {
- start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
- packets = rx_ring->stats.packets;
- bytes = rx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
- if (i40e_enabled_xdp_vsi(vsi))
- i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
}
rcu_read_unlock();
@@ -1528,8 +1532,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
return 0;
}
- if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
- test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
+ if (test_bit(__I40E_DOWN, pf->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
return -EADDRNOTAVAIL;
if (ether_addr_equal(hw->mac.addr, addr->sa_data))
@@ -1553,8 +1557,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
- ret = i40e_aq_mac_address_write(&vsi->back->hw,
- I40E_AQC_WRITE_TYPE_LAA_WOL,
+ ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL);
if (ret)
netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
@@ -1565,7 +1568,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
- i40e_service_event_schedule(vsi->back);
+ i40e_service_event_schedule(pf);
return 0;
}
@@ -3072,6 +3075,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
i40e_status err = 0;
u32 qtx_ctl = 0;
+ if (ring_is_xdp(ring))
+ ring->xsk_umem = i40e_xsk_umem(ring);
+
/* some ATR related tx ring init */
if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
ring->atr_sample_rate = vsi->back->atr_sample_rate;
@@ -3181,13 +3187,46 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
struct i40e_hw *hw = &vsi->back->hw;
struct i40e_hmc_obj_rxq rx_ctx;
i40e_status err = 0;
+ bool ok;
+ int ret;
bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
/* clear the context structure first */
memset(&rx_ctx, 0, sizeof(rx_ctx));
- ring->rx_buf_len = vsi->rx_buf_len;
+ if (ring->vsi->type == I40E_VSI_MAIN)
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+
+ ring->xsk_umem = i40e_xsk_umem(ring);
+ if (ring->xsk_umem) {
+ ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
+ XDP_PACKET_HEADROOM;
+ /* For AF_XDP ZC, we disallow packets to span on
+ * multiple buffers, thus letting us skip that
+ * handling in the fast-path.
+ */
+ chain_len = 1;
+ ring->zca.free = i40e_zca_free;
+ ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_ZERO_COPY,
+ &ring->zca);
+ if (ret)
+ return ret;
+ dev_info(&vsi->back->pdev->dev,
+ "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ ring->queue_index);
+
+ } else {
+ ring->rx_buf_len = vsi->rx_buf_len;
+ if (ring->vsi->type == I40E_VSI_MAIN) {
+ ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (ret)
+ return ret;
+ }
+ }
rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
@@ -3243,7 +3282,15 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+ ok = ring->xsk_umem ?
+ i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
+ !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+ if (!ok) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ ring->xsk_umem ? "UMEM enabled " : "",
+ ring->queue_index, pf_q);
+ }
return 0;
}
@@ -6384,7 +6431,10 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
char *req_fec = "";
char *an = "";
- new_speed = pf->hw.phy.link_info.link_speed;
+ if (isup)
+ new_speed = pf->hw.phy.link_info.link_speed;
+ else
+ new_speed = I40E_LINK_SPEED_UNKNOWN;
if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
return;
@@ -6568,6 +6618,24 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
struct i40e_hw *hw = &pf->hw;
i40e_status err;
u64 mask;
+ u8 speed;
+
+ /* Card might've been put in an unstable state by other drivers
+ * and applications, which causes incorrect speed values being
+ * set on startup. In order to clear speed registers, we call
+ * get_phy_capabilities twice, once to get initial state of
+ * available speeds, and once to get current PHY config.
+ */
+ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
+ NULL);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "failed to get phy cap., ret = %s last_status = %s\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return err;
+ }
+ speed = abilities.link_speed;
/* Get the current phy config */
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
@@ -6581,9 +6649,9 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
}
/* If link needs to go up, but was not forced to go down,
- * no need for a flap
+ * and its speed values are OK, no need for a flap
*/
- if (is_up && abilities.phy_type != 0)
+ if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
return I40E_SUCCESS;
/* To force link we need to set bits for all supported PHY types,
@@ -6595,7 +6663,10 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
/* Copy the old settings, except of phy_type */
config.abilities = abilities.abilities;
- config.link_speed = abilities.link_speed;
+ if (abilities.link_speed != 0)
+ config.link_speed = abilities.link_speed;
+ else
+ config.link_speed = speed;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
@@ -8440,14 +8511,9 @@ static void i40e_link_event(struct i40e_pf *pf)
i40e_status status;
bool new_link, old_link;
- /* save off old link status information */
- pf->hw.phy.link_info_old = pf->hw.phy.link_info;
-
/* set this to force the get_link_status call to refresh state */
pf->hw.phy.get_link_info = true;
-
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
-
status = i40e_get_link_status(&pf->hw, &new_link);
/* On success, disable temp link polling */
@@ -11828,6 +11894,256 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
}
/**
+ * i40e_enter_busy_conf - Enters busy config state
+ * @vsi: vsi
+ *
+ * Returns 0 on success, <0 for failure.
+ **/
+static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int timeout = 50;
+
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_exit_busy_conf - Exits busy config state
+ * @vsi: vsi
+ **/
+static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ clear_bit(__I40E_CONFIG_BUSY, pf->state);
+}
+
+/**
+ * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ **/
+static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
+{
+ memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
+ sizeof(vsi->rx_rings[queue_pair]->rx_stats));
+ memset(&vsi->tx_rings[queue_pair]->stats, 0,
+ sizeof(vsi->tx_rings[queue_pair]->stats));
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ memset(&vsi->xdp_rings[queue_pair]->stats, 0,
+ sizeof(vsi->xdp_rings[queue_pair]->stats));
+ }
+}
+
+/**
+ * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ **/
+static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
+{
+ i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
+ if (i40e_enabled_xdp_vsi(vsi))
+ i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
+}
+
+/**
+ * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ * @enable: true for enable, false for disable
+ **/
+static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
+ bool enable)
+{
+ struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
+ struct i40e_q_vector *q_vector = rxr->q_vector;
+
+ if (!vsi->netdev)
+ return;
+
+ /* All rings in a qp belong to the same qvector. */
+ if (q_vector->rx.ring || q_vector->tx.ring) {
+ if (enable)
+ napi_enable(&q_vector->napi);
+ else
+ napi_disable(&q_vector->napi);
+ }
+}
+
+/**
+ * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ * @enable: true for enable, false for disable
+ *
+ * Returns 0 on success, <0 on failure.
+ **/
+static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
+ bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ int pf_q, ret = 0;
+
+ pf_q = vsi->base_queue + queue_pair;
+ ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
+ false /*is xdp*/, enable);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Tx ring %d %sable timeout\n",
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
+ return ret;
+ }
+
+ i40e_control_rx_q(pf, pf_q, enable);
+ ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Rx ring %d %sable timeout\n",
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
+ return ret;
+ }
+
+ /* Due to HW errata, on Rx disable only, the register can
+ * indicate done before it really is. Needs 50ms to be sure
+ */
+ if (!enable)
+ mdelay(50);
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return ret;
+
+ ret = i40e_control_wait_tx_q(vsi->seid, pf,
+ pf_q + vsi->alloc_queue_pairs,
+ true /*is xdp*/, enable);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d XDP Tx ring %d %sable timeout\n",
+ vsi->seid, pf_q, (enable ? "en" : "dis"));
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue_pair
+ **/
+static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
+{
+ struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ /* All rings in a qp belong to the same qvector. */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
+ else
+ i40e_irq_dynamic_enable_icr0(pf);
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue_pair
+ **/
+static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
+{
+ struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ /* For simplicity, instead of removing the qp interrupt causes
+ * from the interrupt linked list, we simply disable the interrupt, and
+ * leave the list intact.
+ *
+ * All rings in a qp belong to the same qvector.
+ */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
+
+ wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
+ i40e_flush(hw);
+ synchronize_irq(pf->msix_entries[intpf].vector);
+ } else {
+ /* Legacy and MSI mode - this stops all interrupt handling */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+ wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+ i40e_flush(hw);
+ synchronize_irq(pf->pdev->irq);
+ }
+}
+
+/**
+ * i40e_queue_pair_disable - Disables a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ *
+ * Returns 0 on success, <0 on failure.
+ **/
+int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
+{
+ int err;
+
+ err = i40e_enter_busy_conf(vsi);
+ if (err)
+ return err;
+
+ i40e_queue_pair_disable_irq(vsi, queue_pair);
+ err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
+ i40e_queue_pair_clean_rings(vsi, queue_pair);
+ i40e_queue_pair_reset_stats(vsi, queue_pair);
+
+ return err;
+}
+
+/**
+ * i40e_queue_pair_enable - Enables a queue pair
+ * @vsi: vsi
+ * @queue_pair: queue pair
+ *
+ * Returns 0 on success, <0 on failure.
+ **/
+int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
+{
+ int err;
+
+ err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
+ if (err)
+ return err;
+
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
+ if (err)
+ return err;
+ }
+
+ err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
+ if (err)
+ return err;
+
+ err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
+ i40e_queue_pair_enable_irq(vsi, queue_pair);
+
+ i40e_exit_busy_conf(vsi);
+
+ return err;
+}
+
+/**
* i40e_xdp - implements ndo_bpf for i40e
* @dev: netdevice
* @xdp: XDP command
@@ -11847,6 +12163,12 @@ static int i40e_xdp(struct net_device *dev,
case XDP_QUERY_PROG:
xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
return 0;
+ case XDP_QUERY_XSK_UMEM:
+ return i40e_xsk_umem_query(vsi, &xdp->xsk.umem,
+ xdp->xsk.queue_id);
+ case XDP_SETUP_XSK_UMEM:
+ return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
+ xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -11886,6 +12208,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
.ndo_bpf = i40e_xdp,
.ndo_xdp_xmit = i40e_xdp_xmit,
+ .ndo_xsk_async_xmit = i40e_xsk_async_xmit,
};
/**
@@ -13033,7 +13356,7 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
break;
- if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
+ if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
vsi_seid);
return NULL;
@@ -14159,6 +14482,7 @@ static void i40e_remove(struct pci_dev *pdev)
mutex_destroy(&hw->aq.asq_mutex);
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
+ rtnl_lock();
i40e_clear_interrupt_scheme(pf);
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i]) {
@@ -14167,6 +14491,7 @@ static void i40e_remove(struct pci_dev *pdev)
pf->vsi[i] = NULL;
}
}
+ rtnl_unlock();
for (i = 0; i < I40E_MAX_VEB; i++) {
kfree(pf->veb[i]);
@@ -14227,7 +14552,6 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
pci_ers_result_t result;
- int err;
u32 reg;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -14248,14 +14572,6 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_info(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err);
- /* non-fatal, continue */
- }
-
return result;
}
@@ -14378,7 +14694,13 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_WUFC,
(pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ /* Since we're going to destroy queues during the
+ * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
+ * whole section
+ */
+ rtnl_lock();
i40e_clear_interrupt_scheme(pf);
+ rtnl_unlock();
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, pf->wol_en);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 35f2866b38c6..1199f0502d6d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -694,7 +694,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
if (!IS_ERR_OR_NULL(pf->ptp_clock))
return 0;
- strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name));
+ strncpy(pf->ptp_caps.name, i40e_driver_name,
+ sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
pf->ptp_caps.n_ext_ts = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b5042d1a63c0..740ea58ba938 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -8,16 +8,8 @@
#include "i40e.h"
#include "i40e_trace.h"
#include "i40e_prototype.h"
-
-static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
- u32 td_tag)
-{
- return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
- ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
- ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
- ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
- ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
-}
+#include "i40e_txrx_common.h"
+#include "i40e_xsk.h"
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/**
@@ -536,8 +528,8 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
* This is used to verify if the FD programming or invalidation
* requested by SW to the HW is successful or not and take actions accordingly.
**/
-static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, u8 prog_id)
+void i40e_fd_handle_status(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, u8 prog_id)
{
struct i40e_pf *pf = rx_ring->vsi->back;
struct pci_dev *pdev = pf->pdev;
@@ -644,13 +636,18 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
unsigned long bi_size;
u16 i;
- /* ring already cleared, nothing to do */
- if (!tx_ring->tx_bi)
- return;
+ if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ i40e_xsk_clean_tx_ring(tx_ring);
+ } else {
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_bi)
+ return;
- /* Free all the Tx ring sk_buffs */
- for (i = 0; i < tx_ring->count; i++)
- i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++)
+ i40e_unmap_and_free_tx_resource(tx_ring,
+ &tx_ring->tx_bi[i]);
+ }
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_bi, 0, bi_size);
@@ -767,8 +764,6 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi)
}
}
-#define WB_STRIDE 4
-
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
* @vsi: the VSI we care about
@@ -873,27 +868,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
i += tx_ring->count;
tx_ring->next_to_clean = i;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- tx_ring->q_vector->tx.total_bytes += total_bytes;
- tx_ring->q_vector->tx.total_packets += total_packets;
-
- if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
- /* check to see if there are < 4 descriptors
- * waiting to be written back, then kick the hardware to force
- * them to be written back in case we stay in NAPI.
- * In this mode on X722 we do not enable Interrupt.
- */
- unsigned int j = i40e_get_tx_pending(tx_ring, false);
-
- if (budget &&
- ((j / WB_STRIDE) == 0) && (j > 0) &&
- !test_bit(__I40E_VSI_DOWN, vsi->state) &&
- (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
- tx_ring->arm_wb = true;
- }
+ i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
+ i40e_arm_wb(tx_ring, vsi, budget);
if (ring_is_xdp(tx_ring))
return !!budget;
@@ -1244,6 +1220,11 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
new_buff->page = old_buff->page;
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+
+ rx_ring->rx_stats.page_reuse_count++;
+
+ /* clear contents of buffer_info */
+ old_buff->page = NULL;
}
/**
@@ -1266,7 +1247,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
}
/**
- * i40e_clean_programming_status - clean the programming status descriptor
+ * i40e_clean_programming_status - try clean the programming status descriptor
* @rx_ring: the rx ring that has this descriptor
* @rx_desc: the rx descriptor written back by HW
* @qw: qword representing status_error_len in CPU ordering
@@ -1275,15 +1256,22 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
* status being successful or not and take actions accordingly. FCoE should
* handle its context/filter programming/invalidation status and take actions.
*
+ * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
**/
-static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- u64 qw)
+struct i40e_rx_buffer *i40e_clean_programming_status(
+ struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc,
+ u64 qw)
{
struct i40e_rx_buffer *rx_buffer;
- u32 ntc = rx_ring->next_to_clean;
+ u32 ntc;
u8 id;
+ if (!i40e_rx_is_programming_status(qw))
+ return NULL;
+
+ ntc = rx_ring->next_to_clean;
+
/* fetch, update, and store next to clean */
rx_buffer = &rx_ring->rx_bi[ntc++];
ntc = (ntc < rx_ring->count) ? ntc : 0;
@@ -1291,18 +1279,13 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
prefetch(I40E_RX_DESC(rx_ring, ntc));
- /* place unused page back on the ring */
- i40e_reuse_rx_page(rx_ring, rx_buffer);
- rx_ring->rx_stats.page_reuse_count++;
-
- /* clear contents of buffer_info */
- rx_buffer->page = NULL;
-
id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
i40e_fd_handle_status(rx_ring, rx_desc, id);
+
+ return rx_buffer;
}
/**
@@ -1372,6 +1355,11 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
rx_ring->skb = NULL;
}
+ if (rx_ring->xsk_umem) {
+ i40e_xsk_clean_rx_ring(rx_ring);
+ goto skip_free;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
@@ -1400,6 +1388,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
rx_bi->page_offset = 0;
}
+skip_free:
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
memset(rx_ring->rx_bi, 0, bi_size);
@@ -1492,7 +1481,7 @@ err:
* @rx_ring: ring to bump
* @val: new head index
**/
-static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
@@ -1576,8 +1565,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
* @skb: packet to send up
* @vlan_tag: vlan tag for packet
**/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag)
+void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
{
struct i40e_q_vector *q_vector = rx_ring->q_vector;
@@ -1804,7 +1793,6 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
**/
-static inline
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, struct sk_buff *skb,
u8 rx_ptype)
@@ -2152,7 +2140,6 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
if (i40e_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
- rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
@@ -2160,10 +2147,9 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
}
-
- /* clear contents of buffer_info */
- rx_buffer->page = NULL;
}
/**
@@ -2199,16 +2185,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
return true;
}
-#define I40E_XDP_PASS 0
-#define I40E_XDP_CONSUMED BIT(0)
-#define I40E_XDP_TX BIT(1)
-#define I40E_XDP_REDIR BIT(2)
-
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
struct i40e_ring *xdp_ring);
-static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp,
- struct i40e_ring *xdp_ring)
+int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
{
struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
@@ -2287,7 +2267,13 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
#endif
}
-static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
+/**
+ * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
+ * @xdp_ring: XDP Tx ring
+ *
+ * This function updates the XDP Tx ring tail register.
+ **/
+void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
{
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch.
@@ -2297,6 +2283,48 @@ static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
}
/**
+ * i40e_update_rx_stats - Update Rx ring statistics
+ * @rx_ring: rx descriptor ring
+ * @total_rx_bytes: number of bytes received
+ * @total_rx_packets: number of packets received
+ *
+ * This function updates the Rx ring statistics.
+ **/
+void i40e_update_rx_stats(struct i40e_ring *rx_ring,
+ unsigned int total_rx_bytes,
+ unsigned int total_rx_packets)
+{
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+}
+
+/**
+ * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
+ * @rx_ring: Rx ring
+ * @xdp_res: Result of the receive batch
+ *
+ * This function bumps XDP Tx tail and/or flush redirect map, and
+ * should be called when a batch of packets has been processed in the
+ * napi loop.
+ **/
+void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
+{
+ if (xdp_res & I40E_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_res & I40E_XDP_TX) {
+ struct i40e_ring *xdp_ring =
+ rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+
+ i40e_xdp_ring_update_tail(xdp_ring);
+ }
+}
+
+/**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -2349,11 +2377,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
dma_rmb();
- if (unlikely(i40e_rx_is_programming_status(qword))) {
- i40e_clean_programming_status(rx_ring, rx_desc, qword);
+ rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc,
+ qword);
+ if (unlikely(rx_buffer)) {
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
cleaned_count++;
continue;
}
+
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
if (!size)
@@ -2432,24 +2463,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
total_rx_packets++;
}
- if (xdp_xmit & I40E_XDP_REDIR)
- xdp_do_flush_map();
-
- if (xdp_xmit & I40E_XDP_TX) {
- struct i40e_ring *xdp_ring =
- rx_ring->vsi->xdp_rings[rx_ring->queue_index];
-
- i40e_xdp_ring_update_tail(xdp_ring);
- }
-
+ i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
rx_ring->skb = skb;
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_packets += total_rx_packets;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
/* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_packets;
@@ -2587,7 +2604,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+ bool wd = ring->xsk_umem ?
+ i40e_clean_xdp_tx_irq(vsi, ring, budget) :
+ i40e_clean_tx_irq(vsi, ring, budget);
+
+ if (!wd) {
clean_complete = false;
continue;
}
@@ -2605,7 +2626,9 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
+ int cleaned = ring->xsk_umem ?
+ i40e_clean_rx_irq_zc(ring, budget_per_ring) :
+ i40e_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index bb04f6a731fe..100e92d2982f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -296,13 +296,17 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
dma_addr_t dma;
- struct page *page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 page_offset;
-#else
- __u16 page_offset;
-#endif
- __u16 pagecnt_bias;
+ union {
+ struct {
+ struct page *page;
+ __u32 page_offset;
+ __u16 pagecnt_bias;
+ };
+ struct {
+ void *addr;
+ u64 handle;
+ };
+ };
};
struct i40e_queue_stats {
@@ -414,6 +418,8 @@ struct i40e_ring {
struct i40e_channel *ch;
struct xdp_rxq_info xdp_rxq;
+ struct xdp_umem *xsk_umem;
+ struct zero_copy_allocator zca; /* ZC allocator anchor */
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
new file mode 100644
index 000000000000..09809dffe399
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2018 Intel Corporation. */
+
+#ifndef I40E_TXRX_COMMON_
+#define I40E_TXRX_COMMON_
+
+void i40e_fd_handle_status(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, u8 prog_id);
+int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring);
+struct i40e_rx_buffer *i40e_clean_programming_status(
+ struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc,
+ u64 qw);
+void i40e_process_skb_fields(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+ u8 rx_ptype);
+void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag);
+void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
+void i40e_update_rx_stats(struct i40e_ring *rx_ring,
+ unsigned int total_rx_bytes,
+ unsigned int total_rx_packets);
+void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res);
+void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
+
+#define I40E_XDP_PASS 0
+#define I40E_XDP_CONSUMED BIT(0)
+#define I40E_XDP_TX BIT(1)
+#define I40E_XDP_REDIR BIT(2)
+
+/**
+ * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
+ **/
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+ u32 td_tag)
+{
+ return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+ ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+/**
+ * i40e_update_tx_stats - Update the egress statistics for the Tx ring
+ * @tx_ring: Tx ring to update
+ * @total_packet: total packets sent
+ * @total_bytes: total bytes sent
+ **/
+static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring,
+ unsigned int total_packets,
+ unsigned int total_bytes)
+{
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->q_vector->tx.total_bytes += total_bytes;
+ tx_ring->q_vector->tx.total_packets += total_packets;
+}
+
+#define WB_STRIDE 4
+
+/**
+ * i40e_arm_wb - (Possibly) arms Tx write-back
+ * @tx_ring: Tx ring to update
+ * @vsi: the VSI
+ * @budget: the NAPI budget left
+ **/
+static inline void i40e_arm_wb(struct i40e_ring *tx_ring,
+ struct i40e_vsi *vsi,
+ int budget)
+{
+ if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ /* check to see if there are < 4 descriptors
+ * waiting to be written back, then kick the hardware to force
+ * them to be written back in case we stay in NAPI.
+ * In this mode on X722 we do not enable Interrupt.
+ */
+ unsigned int j = i40e_get_tx_pending(tx_ring, false);
+
+ if (budget &&
+ ((j / WB_STRIDE) == 0) && j > 0 &&
+ !test_bit(__I40E_VSI_DOWN, vsi->state) &&
+ (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ tx_ring->arm_wb = true;
+ }
+}
+
+void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
+void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
+bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
+
+#endif /* I40E_TXRX_COMMON_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index c6d24eaede18..81b0e1f8d14b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1084,6 +1084,136 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
return -EIO;
}
+static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi);
+
+/**
+ * i40e_config_vf_promiscuous_mode
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI id
+ * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
+ * @alluni: set MAC L2 layer unicast promiscuous enable/disable
+ *
+ * Called from the VF to configure the promiscuous mode of
+ * VF vsis and from the VF reset path to reset promiscuous mode.
+ **/
+static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+ u16 vsi_id,
+ bool allmulti,
+ bool alluni)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_mac_filter *f;
+ i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
+ int bkt;
+
+ vsi = i40e_find_vsi_from_id(pf, vsi_id);
+ if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
+ return I40E_ERR_PARAM;
+
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+ /* Lie to the VF on purpose. */
+ return 0;
+ }
+
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
+ allmulti,
+ vf->port_vlan_id,
+ NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ return aq_ret;
+ }
+
+ aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
+ alluni,
+ vf->port_vlan_id,
+ NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+ return aq_ret;
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+ if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+ continue;
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
+ vsi->seid,
+ allmulti,
+ f->vlan,
+ NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
+ f->vlan,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+
+ aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
+ vsi->seid,
+ alluni,
+ f->vlan,
+ NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
+ f->vlan,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+ }
+ return aq_ret;
+ }
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti,
+ NULL);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ return aq_ret;
+ }
+
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni,
+ NULL, true);
+ if (aq_ret) {
+ int aq_err = pf->hw.aq.asq_last_status;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+
+ return aq_ret;
+}
+
/**
* i40e_trigger_vf_reset
* @vf: pointer to the VF structure
@@ -1145,6 +1275,9 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
struct i40e_hw *hw = &pf->hw;
u32 reg;
+ /* disable promisc modes in case they were enabled */
+ i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
+
/* free VF resources to begin resetting the VSI state */
i40e_free_vf_res(vf);
@@ -1840,143 +1973,55 @@ static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
* i40e_vc_config_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to configure the promiscuous mode of
* VF vsis
**/
-static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
- u8 *msg, u16 msglen)
+static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_mac_filter *f;
i40e_status aq_ret = 0;
bool allmulti = false;
- struct i40e_vsi *vsi;
bool alluni = false;
- int aq_err = 0;
- int bkt;
- vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
- !vsi) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
- dev_err(&pf->pdev->dev,
- "Unprivileged VF %d is attempting to configure promiscuous mode\n",
- vf->vf_id);
- /* Lie to the VF on purpose. */
- aq_ret = 0;
- goto error_param;
- }
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
+ return I40E_ERR_PARAM;
+
/* Multicast promiscuous handling*/
if (info->flags & FLAG_VF_MULTICAST_PROMISC)
allmulti = true;
- if (vf->port_vlan_id) {
- aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
- allmulti,
- vf->port_vlan_id,
- NULL);
- } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
- if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
- continue;
- aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
- vsi->seid,
- allmulti,
- f->vlan,
- NULL);
- aq_err = pf->hw.aq.asq_last_status;
- if (aq_ret) {
- dev_err(&pf->pdev->dev,
- "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
- f->vlan,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- break;
- }
- }
- } else {
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
- allmulti, NULL);
- aq_err = pf->hw.aq.asq_last_status;
- if (aq_ret) {
- dev_err(&pf->pdev->dev,
- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
- vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- goto error_param;
- }
- }
-
+ if (info->flags & FLAG_VF_UNICAST_PROMISC)
+ alluni = true;
+ aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
+ alluni);
if (!aq_ret) {
- dev_info(&pf->pdev->dev,
- "VF %d successfully set multicast promiscuous mode\n",
- vf->vf_id);
- if (allmulti)
+ if (allmulti) {
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set multicast promiscuous mode\n",
+ vf->vf_id);
set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
- else
+ } else {
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset multicast promiscuous mode\n",
+ vf->vf_id);
clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
- }
-
- if (info->flags & FLAG_VF_UNICAST_PROMISC)
- alluni = true;
- if (vf->port_vlan_id) {
- aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
- alluni,
- vf->port_vlan_id,
- NULL);
- } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
- if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
- continue;
- aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
- vsi->seid,
- alluni,
- f->vlan,
- NULL);
- aq_err = pf->hw.aq.asq_last_status;
- if (aq_ret)
- dev_err(&pf->pdev->dev,
- "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
- f->vlan,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- }
- } else {
- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
- alluni, NULL,
- true);
- aq_err = pf->hw.aq.asq_last_status;
- if (aq_ret) {
- dev_err(&pf->pdev->dev,
- "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
- vf->vf_id, info->flags,
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- goto error_param;
}
- }
-
- if (!aq_ret) {
- dev_info(&pf->pdev->dev,
- "VF %d successfully set unicast promiscuous mode\n",
- vf->vf_id);
- if (alluni)
+ if (alluni) {
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set unicast promiscuous mode\n",
+ vf->vf_id);
set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
- else
+ } else {
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset unicast promiscuous mode\n",
+ vf->vf_id);
clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
+ }
}
-error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
@@ -1987,12 +2032,11 @@ error_param:
* i40e_vc_config_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to configure the rx/tx
* queues
**/
-static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
@@ -2105,12 +2149,11 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
* i40e_vc_config_irq_map_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to configure the irq to
* queue map
**/
-static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
@@ -2202,11 +2245,10 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
* i40e_vc_enable_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to enable all or specific queue(s)
**/
-static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
@@ -2261,12 +2303,11 @@ error_param:
* i40e_vc_disable_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to disable all or specific
* queue(s)
**/
-static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
@@ -2309,14 +2350,13 @@ error_param:
* i40e_vc_request_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* VFs get a default number of queues but can use this message to request a
* different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of
* available queues and return result of sending VF a message.
**/
-static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
+static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
@@ -2360,11 +2400,10 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
* i40e_vc_get_stats_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* called from the VF to get vsi stats
**/
-static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
@@ -2458,7 +2497,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
!is_multicast_ether_addr(addr) && vf->pf_set_mac &&
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
- "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
+ "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
return -EPERM;
}
}
@@ -2470,11 +2509,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* i40e_vc_add_mac_addr_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* add guest mac address filter
**/
-static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
@@ -2541,11 +2579,10 @@ error_param:
* i40e_vc_del_mac_addr_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* remove guest mac address filter
**/
-static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
@@ -2569,6 +2606,16 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
ret = I40E_ERR_INVALID_MAC_ADDR;
goto error_param;
}
+
+ if (vf->pf_set_mac &&
+ ether_addr_equal(al->list[i].addr,
+ vf->default_lan_addr.addr)) {
+ dev_err(&pf->pdev->dev,
+ "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n",
+ vf->default_lan_addr.addr, vf->vf_id);
+ ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
}
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2601,11 +2648,10 @@ error_param:
* i40e_vc_add_vlan_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* program guest vlan id
**/
-static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
@@ -2674,11 +2720,10 @@ error_param:
* i40e_vc_remove_vlan_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* remove programmed guest vlan id
**/
-static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
@@ -2761,13 +2806,11 @@ error_param:
* i40e_vc_iwarp_qvmap_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
* @config: config qvmap or release it
*
* called from the VF for the iwarp msgs
**/
-static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
- bool config)
+static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
{
struct virtchnl_iwarp_qvlist_info *qvlist_info =
(struct virtchnl_iwarp_qvlist_info *)msg;
@@ -2798,11 +2841,10 @@ error_param:
* i40e_vc_config_rss_key
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* Configure the VF's RSS key
**/
-static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
@@ -2830,11 +2872,10 @@ err:
* i40e_vc_config_rss_lut
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* Configure the VF's RSS LUT
**/
-static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_rss_lut *vrl =
(struct virtchnl_rss_lut *)msg;
@@ -2862,11 +2903,10 @@ err:
* i40e_vc_get_rss_hena
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* Return the RSS HENA bits allowed by the hardware
**/
-static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_rss_hena *vrh = NULL;
struct i40e_pf *pf = vf->pf;
@@ -2898,11 +2938,10 @@ err:
* i40e_vc_set_rss_hena
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* Set the RSS HENA bits for the VF
**/
-static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_rss_hena *vrh =
(struct virtchnl_rss_hena *)msg;
@@ -2927,12 +2966,10 @@ err:
* i40e_vc_enable_vlan_stripping
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* Enable vlan header stripping for the VF
**/
-static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
- u16 msglen)
+static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_status aq_ret = 0;
@@ -2954,12 +2991,10 @@ err:
* i40e_vc_disable_vlan_stripping
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @msglen: msg length
*
* Disable vlan header stripping for the VF
**/
-static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
- u16 msglen)
+static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
i40e_status aq_ret = 0;
@@ -3659,65 +3694,65 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
ret = 0;
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
+ ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- ret = i40e_vc_config_queues_msg(vf, msg, msglen);
+ ret = i40e_vc_config_queues_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
- ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
+ ret = i40e_vc_config_irq_map_msg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
- ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
+ ret = i40e_vc_enable_queues_msg(vf, msg);
i40e_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_DISABLE_QUEUES:
- ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
+ ret = i40e_vc_disable_queues_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
- ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
+ ret = i40e_vc_add_mac_addr_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_ETH_ADDR:
- ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
+ ret = i40e_vc_del_mac_addr_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_VLAN:
- ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
+ ret = i40e_vc_add_vlan_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_VLAN:
- ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
+ ret = i40e_vc_remove_vlan_msg(vf, msg);
break;
case VIRTCHNL_OP_GET_STATS:
- ret = i40e_vc_get_stats_msg(vf, msg, msglen);
+ ret = i40e_vc_get_stats_msg(vf, msg);
break;
case VIRTCHNL_OP_IWARP:
ret = i40e_vc_iwarp_msg(vf, msg, msglen);
break;
case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
+ ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
break;
case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
- ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
+ ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
- ret = i40e_vc_config_rss_key(vf, msg, msglen);
+ ret = i40e_vc_config_rss_key(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
- ret = i40e_vc_config_rss_lut(vf, msg, msglen);
+ ret = i40e_vc_config_rss_lut(vf, msg);
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- ret = i40e_vc_get_rss_hena(vf, msg, msglen);
+ ret = i40e_vc_get_rss_hena(vf, msg);
break;
case VIRTCHNL_OP_SET_RSS_HENA:
- ret = i40e_vc_set_rss_hena(vf, msg, msglen);
+ ret = i40e_vc_set_rss_hena(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
- ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
+ ret = i40e_vc_enable_vlan_stripping(vf, msg);
break;
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
- ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
+ ret = i40e_vc_disable_vlan_stripping(vf, msg);
break;
case VIRTCHNL_OP_REQUEST_QUEUES:
- ret = i40e_vc_request_queues_msg(vf, msg, msglen);
+ ret = i40e_vc_request_queues_msg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_CHANNELS:
ret = i40e_vc_add_qch_msg(vf, msg);
@@ -3786,6 +3821,35 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
}
/**
+ * i40e_validate_vf
+ * @pf: the physical function
+ * @vf_id: VF identifier
+ *
+ * Check that the VF is enabled and the VSI exists.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
+{
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev,
+ "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto err_out;
+ }
+ vf = &pf->vf[vf_id];
+ vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
+ if (!vsi)
+ ret = -EINVAL;
+err_out:
+ return ret;
+}
+
+/**
* i40e_ndo_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -3806,14 +3870,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
u8 i;
/* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev,
- "Invalid VF Identifier %d\n", vf_id);
- ret = -EINVAL;
+ ret = i40e_validate_vf(pf, vf_id);
+ if (ret)
goto error_param;
- }
- vf = &(pf->vf[vf_id]);
+ vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx];
/* When the VF is resetting wait until it is done.
@@ -3873,9 +3934,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
mac, vf_id);
}
- /* Force the VF driver stop so it has to reload with new MAC address */
+ /* Force the VF interface down so it has to bring up with new MAC
+ * address
+ */
i40e_vc_disable_vf(vf);
- dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
+ dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
error_param:
return ret;
@@ -3930,11 +3993,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
int ret = 0;
/* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
- ret = -EINVAL;
+ ret = i40e_validate_vf(pf, vf_id);
+ if (ret)
goto error_pvid;
- }
if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
@@ -3948,7 +4009,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
goto error_pvid;
}
- vf = &(pf->vf[vf_id]);
+ vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
@@ -4068,11 +4129,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int ret = 0;
/* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
- ret = -EINVAL;
+ ret = i40e_validate_vf(pf, vf_id);
+ if (ret)
goto error;
- }
if (min_tx_rate) {
dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
@@ -4080,7 +4139,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
return -EINVAL;
}
- vf = &(pf->vf[vf_id]);
+ vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
@@ -4116,13 +4175,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
int ret = 0;
/* validate the request */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
- ret = -EINVAL;
+ ret = i40e_validate_vf(pf, vf_id);
+ if (ret)
goto error_param;
- }
- vf = &(pf->vf[vf_id]);
+ vf = &pf->vf[vf_id];
/* first vsi is always the LAN vsi */
vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
@@ -4199,7 +4256,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
vf->link_forced = true;
vf->link_up = true;
pfe.event_data.link_event.link_status = true;
- pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
break;
case IFLA_VF_LINK_STATE_DISABLE:
vf->link_forced = true;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
new file mode 100644
index 000000000000..add1e457886d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -0,0 +1,967 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
+#include <net/xdp.h>
+
+#include "i40e.h"
+#include "i40e_txrx_common.h"
+#include "i40e_xsk.h"
+
+/**
+ * i40e_alloc_xsk_umems - Allocate an array to store per ring UMEMs
+ * @vsi: Current VSI
+ *
+ * Returns 0 on success, <0 on failure
+ **/
+static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi)
+{
+ if (vsi->xsk_umems)
+ return 0;
+
+ vsi->num_xsk_umems_used = 0;
+ vsi->num_xsk_umems = vsi->alloc_queue_pairs;
+ vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
+ GFP_KERNEL);
+ if (!vsi->xsk_umems) {
+ vsi->num_xsk_umems = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_add_xsk_umem - Store an UMEM for a certain ring/qid
+ * @vsi: Current VSI
+ * @umem: UMEM to store
+ * @qid: Ring/qid to associate with the UMEM
+ *
+ * Returns 0 on success, <0 on failure
+ **/
+static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem,
+ u16 qid)
+{
+ int err;
+
+ err = i40e_alloc_xsk_umems(vsi);
+ if (err)
+ return err;
+
+ vsi->xsk_umems[qid] = umem;
+ vsi->num_xsk_umems_used++;
+
+ return 0;
+}
+
+/**
+ * i40e_remove_xsk_umem - Remove an UMEM for a certain ring/qid
+ * @vsi: Current VSI
+ * @qid: Ring/qid associated with the UMEM
+ **/
+static void i40e_remove_xsk_umem(struct i40e_vsi *vsi, u16 qid)
+{
+ vsi->xsk_umems[qid] = NULL;
+ vsi->num_xsk_umems_used--;
+
+ if (vsi->num_xsk_umems == 0) {
+ kfree(vsi->xsk_umems);
+ vsi->xsk_umems = NULL;
+ vsi->num_xsk_umems = 0;
+ }
+}
+
+/**
+ * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
+ * @vsi: Current VSI
+ * @umem: UMEM to DMA map
+ *
+ * Returns 0 on success, <0 on failure
+ **/
+static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct device *dev;
+ unsigned int i, j;
+ dma_addr_t dma;
+
+ dev = &pf->pdev->dev;
+ for (i = 0; i < umem->npgs; i++) {
+ dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
+ if (dma_mapping_error(dev, dma))
+ goto out_unmap;
+
+ umem->pages[i].dma = dma;
+ }
+
+ return 0;
+
+out_unmap:
+ for (j = 0; j < i; j++) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
+ umem->pages[i].dma = 0;
+ }
+
+ return -1;
+}
+
+/**
+ * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev
+ * @vsi: Current VSI
+ * @umem: UMEM to DMA map
+ **/
+static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct device *dev;
+ unsigned int i;
+
+ dev = &pf->pdev->dev;
+
+ for (i = 0; i < umem->npgs; i++) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
+
+ umem->pages[i].dma = 0;
+ }
+}
+
+/**
+ * i40e_xsk_umem_enable - Enable/associate an UMEM to a certain ring/qid
+ * @vsi: Current VSI
+ * @umem: UMEM
+ * @qid: Rx ring to associate UMEM to
+ *
+ * Returns 0 on success, <0 on failure
+ **/
+static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
+ u16 qid)
+{
+ struct xdp_umem_fq_reuse *reuseq;
+ bool if_running;
+ int err;
+
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ if (qid >= vsi->num_queue_pairs)
+ return -EINVAL;
+
+ if (vsi->xsk_umems) {
+ if (qid >= vsi->num_xsk_umems)
+ return -EINVAL;
+ if (vsi->xsk_umems[qid])
+ return -EBUSY;
+ }
+
+ reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
+ if (!reuseq)
+ return -ENOMEM;
+
+ xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
+
+ err = i40e_xsk_umem_dma_map(vsi, umem);
+ if (err)
+ return err;
+
+ if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
+
+ if (if_running) {
+ err = i40e_queue_pair_disable(vsi, qid);
+ if (err)
+ return err;
+ }
+
+ err = i40e_add_xsk_umem(vsi, umem, qid);
+ if (err)
+ return err;
+
+ if (if_running) {
+ err = i40e_queue_pair_enable(vsi, qid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_xsk_umem_disable - Diassociate an UMEM from a certain ring/qid
+ * @vsi: Current VSI
+ * @qid: Rx ring to associate UMEM to
+ *
+ * Returns 0 on success, <0 on failure
+ **/
+static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
+{
+ bool if_running;
+ int err;
+
+ if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
+ !vsi->xsk_umems[qid])
+ return -EINVAL;
+
+ if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
+
+ if (if_running) {
+ err = i40e_queue_pair_disable(vsi, qid);
+ if (err)
+ return err;
+ }
+
+ i40e_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
+ i40e_remove_xsk_umem(vsi, qid);
+
+ if (if_running) {
+ err = i40e_queue_pair_enable(vsi, qid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM
+ * @vsi: Current VSI
+ * @umem: UMEM associated to the ring, if any
+ * @qid: Rx ring to associate UMEM to
+ *
+ * This function will store, if any, the UMEM associated to certain ring.
+ *
+ * Returns 0 on success, <0 on failure
+ **/
+int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
+ u16 qid)
+{
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ if (qid >= vsi->num_queue_pairs)
+ return -EINVAL;
+
+ if (vsi->xsk_umems) {
+ if (qid >= vsi->num_xsk_umems)
+ return -EINVAL;
+ *umem = vsi->xsk_umems[qid];
+ return 0;
+ }
+
+ *umem = NULL;
+ return 0;
+}
+
+/**
+ * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM
+ * @vsi: Current VSI
+ * @umem: UMEM to enable/associate to a ring, or NULL to disable
+ * @qid: Rx ring to (dis)associate UMEM (from)to
+ *
+ * This function enables or disables an UMEM to a certain ring.
+ *
+ * Returns 0 on success, <0 on failure
+ **/
+int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+ u16 qid)
+{
+ return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
+ i40e_xsk_umem_disable(vsi, qid);
+}
+
+/**
+ * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
+ * @rx_ring: Rx ring
+ * @xdp: xdp_buff used as input to the XDP program
+ *
+ * This function enables or disables an UMEM to a certain ring.
+ *
+ * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
+ **/
+static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+{
+ int err, result = I40E_XDP_PASS;
+ struct i40e_ring *xdp_ring;
+ struct bpf_prog *xdp_prog;
+ u32 act;
+
+ rcu_read_lock();
+ /* NB! xdp_prog will always be !NULL, due to the fact that
+ * this path is enabled by setting an XDP program.
+ */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ xdp->handle += xdp->data - xdp->data_hard_start;
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
+ result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping packet */
+ case XDP_DROP:
+ result = I40E_XDP_CONSUMED;
+ break;
+ }
+ rcu_read_unlock();
+ return result;
+}
+
+/**
+ * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer
+ * @rx_ring: Rx ring
+ * @bi: Rx buffer to populate
+ *
+ * This function allocates an Rx buffer. The buffer can come from fill
+ * queue, or via the recycle queue (next_to_alloc).
+ *
+ * Returns true for a successful allocation, false otherwise
+ **/
+static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ void *addr = bi->addr;
+ u64 handle, hr;
+
+ if (addr) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
+
+ if (!xsk_umem_peek_addr(umem, &handle)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ bi->dma = xdp_umem_get_dma(umem, handle);
+ bi->dma += hr;
+
+ bi->addr = xdp_umem_get_data(umem, handle);
+ bi->addr += hr;
+
+ bi->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr(umem);
+ return true;
+}
+
+/**
+ * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer
+ * @rx_ring: Rx ring
+ * @bi: Rx buffer to populate
+ *
+ * This function allocates an Rx buffer. The buffer can come from fill
+ * queue, or via the reuse queue.
+ *
+ * Returns true for a successful allocation, false otherwise
+ **/
+static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ u64 handle, hr;
+
+ if (!xsk_umem_peek_addr_rq(umem, &handle)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ handle &= rx_ring->xsk_umem->chunk_mask;
+
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ bi->dma = xdp_umem_get_dma(umem, handle);
+ bi->dma += hr;
+
+ bi->addr = xdp_umem_get_data(umem, handle);
+ bi->addr += hr;
+
+ bi->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr_rq(umem);
+ return true;
+}
+
+static __always_inline bool
+__i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
+ bool alloc(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi))
+{
+ u16 ntu = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+ bool ok = true;
+
+ rx_desc = I40E_RX_DESC(rx_ring, ntu);
+ bi = &rx_ring->rx_bi[ntu];
+ do {
+ if (!alloc(rx_ring, bi)) {
+ ok = false;
+ goto no_buffers;
+ }
+
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0,
+ rx_ring->rx_buf_len,
+ DMA_BIDIRECTIONAL);
+
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+
+ rx_desc++;
+ bi++;
+ ntu++;
+
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_bi;
+ ntu = 0;
+ }
+
+ rx_desc->wb.qword1.status_error_len = 0;
+ count--;
+ } while (count);
+
+no_buffers:
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
+
+ return ok;
+}
+
+/**
+ * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ *
+ * This function allocates a number of Rx buffers from the reuse queue
+ * or fill ring and places them on the Rx ring.
+ *
+ * Returns true for a successful allocation, false otherwise
+ **/
+bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
+{
+ return __i40e_alloc_rx_buffers_zc(rx_ring, count,
+ i40e_alloc_buffer_slow_zc);
+}
+
+/**
+ * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ *
+ * This function allocates a number of Rx buffers from the fill ring
+ * or the internal recycle mechanism and places them on the Rx ring.
+ *
+ * Returns true for a successful allocation, false otherwise
+ **/
+static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count)
+{
+ return __i40e_alloc_rx_buffers_zc(rx_ring, count,
+ i40e_alloc_buffer_zc);
+}
+
+/**
+ * i40e_get_rx_buffer_zc - Return the current Rx buffer
+ * @rx_ring: Rx ring
+ * @size: The size of the rx buffer (read from descriptor)
+ *
+ * This function returns the current, received Rx buffer, and also
+ * does DMA synchronization. the Rx ring.
+ *
+ * Returns the received Rx buffer
+ **/
+static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring,
+ const unsigned int size)
+{
+ struct i40e_rx_buffer *bi;
+
+ bi = &rx_ring->rx_bi[rx_ring->next_to_clean];
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ bi->dma, 0,
+ size,
+ DMA_BIDIRECTIONAL);
+
+ return bi;
+}
+
+/**
+ * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer
+ * @rx_ring: Rx ring
+ * @old_bi: The Rx buffer to recycle
+ *
+ * This function recycles a finished Rx buffer, and places it on the
+ * recycle queue (next_to_alloc).
+ **/
+static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *old_bi)
+{
+ struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
+ unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
+ u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ u16 nta = rx_ring->next_to_alloc;
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ new_bi->dma = old_bi->dma & mask;
+ new_bi->dma += hr;
+
+ new_bi->addr = (void *)((unsigned long)old_bi->addr & mask);
+ new_bi->addr += hr;
+
+ new_bi->handle = old_bi->handle & mask;
+ new_bi->handle += rx_ring->xsk_umem->headroom;
+
+ old_bi->addr = NULL;
+}
+
+/**
+ * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations
+ * @alloc: Zero-copy allocator
+ * @handle: Buffer handle
+ **/
+void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
+{
+ struct i40e_rx_buffer *bi;
+ struct i40e_ring *rx_ring;
+ u64 hr, mask;
+ u16 nta;
+
+ rx_ring = container_of(alloc, struct i40e_ring, zca);
+ hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ mask = rx_ring->xsk_umem->chunk_mask;
+
+ nta = rx_ring->next_to_alloc;
+ bi = &rx_ring->rx_bi[nta];
+
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ handle &= mask;
+
+ bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
+ bi->dma += hr;
+
+ bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
+ bi->addr += hr;
+
+ bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
+}
+
+/**
+ * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer
+ * @rx_ring: Rx ring
+ * @bi: Rx buffer
+ * @xdp: xdp_buff
+ *
+ * This functions allocates a new skb from a zero-copy Rx buffer.
+ *
+ * Returns the skb, or NULL on failure.
+ **/
+static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ struct sk_buff *skb;
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ xdp->data_end - xdp->data_hard_start,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ i40e_reuse_rx_buffer_zc(rx_ring, bi);
+ return skb;
+}
+
+/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+}
+
+/**
+ * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
+ * @rx_ring: Rx ring
+ * @budget: NAPI budget
+ *
+ * Returns amount of work completed
+ **/
+int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ unsigned int xdp_res, xdp_xmit = 0;
+ bool failure = false;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
+
+ while (likely(total_rx_packets < (unsigned int)budget)) {
+ struct i40e_rx_buffer *bi;
+ union i40e_rx_desc *rx_desc;
+ unsigned int size;
+ u16 vlan_tag;
+ u8 rx_ptype;
+ u64 qword;
+
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ failure = failure ||
+ !i40e_alloc_rx_buffers_fast_zc(rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we have
+ * verified the descriptor has been written back.
+ */
+ dma_rmb();
+
+ bi = i40e_clean_programming_status(rx_ring, rx_desc,
+ qword);
+ if (unlikely(bi)) {
+ i40e_reuse_rx_buffer_zc(rx_ring, bi);
+ cleaned_count++;
+ continue;
+ }
+
+ size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ if (!size)
+ break;
+
+ bi = i40e_get_rx_buffer_zc(rx_ring, size);
+ xdp.data = bi->addr;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp.data_end = xdp.data + size;
+ xdp.handle = bi->handle;
+
+ xdp_res = i40e_run_xdp_zc(rx_ring, &xdp);
+ if (xdp_res) {
+ if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
+ bi->addr = NULL;
+ } else {
+ i40e_reuse_rx_buffer_zc(rx_ring, bi);
+ }
+
+ total_rx_bytes += size;
+ total_rx_packets++;
+
+ cleaned_count++;
+ i40e_inc_ntc(rx_ring);
+ continue;
+ }
+
+ /* XDP_PASS path */
+
+ /* NB! We are not checking for errors using
+ * i40e_test_staterr with
+ * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
+ * SBP is *not* set in PRT_SBPVSI (default not set).
+ */
+ skb = i40e_construct_skb_zc(rx_ring, bi, &xdp);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ break;
+ }
+
+ cleaned_count++;
+ i40e_inc_ntc(rx_ring);
+
+ if (eth_skb_pad(skb))
+ continue;
+
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+ i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+
+ vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+ le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+ i40e_receive_skb(rx_ring, skb, vlan_tag);
+ }
+
+ i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
+ i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
+ return failure ? budget : (int)total_rx_packets;
+}
+
+/**
+ * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
+ * @xdp_ring: XDP Tx ring
+ * @budget: NAPI budget
+ *
+ * Returns true if the work is finished.
+ **/
+static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
+{
+ struct i40e_tx_desc *tx_desc = NULL;
+ struct i40e_tx_buffer *tx_bi;
+ bool work_done = true;
+ dma_addr_t dma;
+ u32 len;
+
+ while (budget-- > 0) {
+ if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
+ xdp_ring->tx_stats.tx_busy++;
+ work_done = false;
+ break;
+ }
+
+ if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
+ break;
+
+ dma_sync_single_for_device(xdp_ring->dev, dma, len,
+ DMA_BIDIRECTIONAL);
+
+ tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
+ tx_bi->bytecount = len;
+
+ tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use);
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(I40E_TX_DESC_CMD_ICRC
+ | I40E_TX_DESC_CMD_EOP,
+ 0, len, 0);
+
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
+ }
+
+ if (tx_desc) {
+ /* Request an interrupt for the last frame and bump tail ptr. */
+ tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ i40e_xdp_ring_update_tail(xdp_ring);
+
+ xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ }
+
+ return !!budget && work_done;
+}
+
+/**
+ * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
+ * @tx_ring: XDP Tx ring
+ * @tx_bi: Tx buffer info to clean
+ **/
+static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
+ struct i40e_tx_buffer *tx_bi)
+{
+ xdp_return_frame(tx_bi->xdpf);
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_bi, dma),
+ dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_bi, len, 0);
+}
+
+/**
+ * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
+ * @tx_ring: XDP Tx ring
+ * @tx_bi: Tx buffer info to clean
+ *
+ * Returns true if cleanup/tranmission is done.
+ **/
+bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
+ struct i40e_ring *tx_ring, int napi_budget)
+{
+ unsigned int ntc, total_bytes = 0, budget = vsi->work_limit;
+ u32 i, completed_frames, frames_ready, xsk_frames = 0;
+ struct xdp_umem *umem = tx_ring->xsk_umem;
+ u32 head_idx = i40e_get_head(tx_ring);
+ bool work_done = true, xmit_done;
+ struct i40e_tx_buffer *tx_bi;
+
+ if (head_idx < tx_ring->next_to_clean)
+ head_idx += tx_ring->count;
+ frames_ready = head_idx - tx_ring->next_to_clean;
+
+ if (frames_ready == 0) {
+ goto out_xmit;
+ } else if (frames_ready > budget) {
+ completed_frames = budget;
+ work_done = false;
+ } else {
+ completed_frames = frames_ready;
+ }
+
+ ntc = tx_ring->next_to_clean;
+
+ for (i = 0; i < completed_frames; i++) {
+ tx_bi = &tx_ring->tx_bi[ntc];
+
+ if (tx_bi->xdpf)
+ i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
+ else
+ xsk_frames++;
+
+ tx_bi->xdpf = NULL;
+ total_bytes += tx_bi->bytecount;
+
+ if (++ntc >= tx_ring->count)
+ ntc = 0;
+ }
+
+ tx_ring->next_to_clean += completed_frames;
+ if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
+ tx_ring->next_to_clean -= tx_ring->count;
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(umem, xsk_frames);
+
+ i40e_arm_wb(tx_ring, vsi, budget);
+ i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
+
+out_xmit:
+ xmit_done = i40e_xmit_zc(tx_ring, budget);
+
+ return work_done && xmit_done;
+}
+
+/**
+ * i40e_xsk_async_xmit - Implements the ndo_xsk_async_xmit
+ * @dev: the netdevice
+ * @queue_id: queue id to wake up
+ *
+ * Returns <0 for errors, 0 otherwise.
+ **/
+int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_ring *ring;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!i40e_enabled_xdp_vsi(vsi))
+ return -ENXIO;
+
+ if (queue_id >= vsi->num_queue_pairs)
+ return -ENXIO;
+
+ if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ return -ENXIO;
+
+ ring = vsi->xdp_rings[queue_id];
+
+ /* The idea here is that if NAPI is running, mark a miss, so
+ * it will run again. If not, trigger an interrupt and
+ * schedule the NAPI from interrupt context. If NAPI would be
+ * scheduled here, the interrupt affinity would not be
+ * honored.
+ */
+ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
+ i40e_force_wb(vsi, ring->q_vector);
+
+ return 0;
+}
+
+void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+ u16 i;
+
+ for (i = 0; i < rx_ring->count; i++) {
+ struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
+ if (!rx_bi->addr)
+ continue;
+
+ xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle);
+ rx_bi->addr = NULL;
+ }
+}
+
+/**
+ * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
+ * @xdp_ring: XDP Tx ring
+ **/
+void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+ u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
+ struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct i40e_tx_buffer *tx_bi;
+ u32 xsk_frames = 0;
+
+ while (ntc != ntu) {
+ tx_bi = &tx_ring->tx_bi[ntc];
+
+ if (tx_bi->xdpf)
+ i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
+ else
+ xsk_frames++;
+
+ tx_bi->xdpf = NULL;
+
+ ntc++;
+ if (ntc >= tx_ring->count)
+ ntc = 0;
+ }
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(umem, xsk_frames);
+}
+
+/**
+ * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached
+ * @vsi: vsi
+ *
+ * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ **/
+bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->xsk_umems)
+ return false;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ if (vsi->xsk_umems[i])
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
new file mode 100644
index 000000000000..9038c5d5cf08
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2018 Intel Corporation. */
+
+#ifndef _I40E_XSK_H_
+#define _I40E_XSK_H_
+
+struct i40e_vsi;
+struct xdp_umem;
+struct zero_copy_allocator;
+
+int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
+int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
+int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
+ u16 qid);
+int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+ u16 qid);
+void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
+bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
+int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
+
+bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
+ struct i40e_ring *tx_ring, int napi_budget);
+int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id);
+
+#endif /* _I40E_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
deleted file mode 100644
index 3c5c6e962280..000000000000
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 2013 - 2018 Intel Corporation.
-
-#
-## Makefile for the Intel(R) 40GbE VF driver
-#
-#
-
-ccflags-y += -I$(src)
-subdir-ccflags-y += -I$(src)
-
-obj-$(CONFIG_I40EVF) += i40evf.o
-
-i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
- i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o
-
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
deleted file mode 100644
index 5fd8529465d4..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ /dev/null
@@ -1,2717 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_ADMINQ_CMD_H_
-#define _I40E_ADMINQ_CMD_H_
-
-/* This header file defines the i40e Admin Queue commands and is shared between
- * i40e Firmware and Software.
- *
- * This file needs to comply with the Linux Kernel coding style.
- */
-
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0007
-
-#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
- I40E_FW_API_VERSION_MINOR_X710 : \
- I40E_FW_API_VERSION_MINOR_X722)
-
-/* API version 1.7 implements additional link and PHY-specific APIs */
-#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
-
-struct i40e_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- struct {
- __le32 param0;
- __le32 param1;
- __le32 param2;
- __le32 param3;
- } internal;
- struct {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
- } external;
- u8 raw[16];
- } params;
-};
-
-/* Flags sub-structure
- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-/* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT 0
-#define I40E_AQ_FLAG_CMP_SHIFT 1
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_VFE_SHIFT 3
-#define I40E_AQ_FLAG_LB_SHIFT 9
-#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_VFC_SHIFT 11
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT 13
-#define I40E_AQ_FLAG_EI_SHIFT 14
-#define I40E_AQ_FLAG_FE_SHIFT 15
-
-#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
-
-/* error codes */
-enum i40e_admin_queue_err {
- I40E_AQ_RC_OK = 0, /* success */
- I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
- I40E_AQ_RC_ENOENT = 2, /* No such element */
- I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
- I40E_AQ_RC_EINTR = 4, /* operation interrupted */
- I40E_AQ_RC_EIO = 5, /* I/O error */
- I40E_AQ_RC_ENXIO = 6, /* No such resource */
- I40E_AQ_RC_E2BIG = 7, /* Arg too long */
- I40E_AQ_RC_EAGAIN = 8, /* Try again */
- I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
- I40E_AQ_RC_EACCES = 10, /* Permission denied */
- I40E_AQ_RC_EFAULT = 11, /* Bad address */
- I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
- I40E_AQ_RC_EEXIST = 13, /* object already exists */
- I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
- I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
- I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
- I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
- I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- I40E_AQ_RC_EFBIG = 22, /* File too large */
-};
-
-/* Admin Queue command opcodes */
-enum i40e_admin_queue_opc {
- /* aq commands */
- i40e_aqc_opc_get_version = 0x0001,
- i40e_aqc_opc_driver_version = 0x0002,
- i40e_aqc_opc_queue_shutdown = 0x0003,
- i40e_aqc_opc_set_pf_context = 0x0004,
-
- /* resource ownership */
- i40e_aqc_opc_request_resource = 0x0008,
- i40e_aqc_opc_release_resource = 0x0009,
-
- i40e_aqc_opc_list_func_capabilities = 0x000A,
- i40e_aqc_opc_list_dev_capabilities = 0x000B,
-
- /* Proxy commands */
- i40e_aqc_opc_set_proxy_config = 0x0104,
- i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
-
- /* LAA */
- i40e_aqc_opc_mac_address_read = 0x0107,
- i40e_aqc_opc_mac_address_write = 0x0108,
-
- /* PXE */
- i40e_aqc_opc_clear_pxe_mode = 0x0110,
-
- /* WoL commands */
- i40e_aqc_opc_set_wol_filter = 0x0120,
- i40e_aqc_opc_get_wake_reason = 0x0121,
-
- /* internal switch commands */
- i40e_aqc_opc_get_switch_config = 0x0200,
- i40e_aqc_opc_add_statistics = 0x0201,
- i40e_aqc_opc_remove_statistics = 0x0202,
- i40e_aqc_opc_set_port_parameters = 0x0203,
- i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
- i40e_aqc_opc_set_switch_config = 0x0205,
- i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
- i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
-
- i40e_aqc_opc_add_vsi = 0x0210,
- i40e_aqc_opc_update_vsi_parameters = 0x0211,
- i40e_aqc_opc_get_vsi_parameters = 0x0212,
-
- i40e_aqc_opc_add_pv = 0x0220,
- i40e_aqc_opc_update_pv_parameters = 0x0221,
- i40e_aqc_opc_get_pv_parameters = 0x0222,
-
- i40e_aqc_opc_add_veb = 0x0230,
- i40e_aqc_opc_update_veb_parameters = 0x0231,
- i40e_aqc_opc_get_veb_parameters = 0x0232,
-
- i40e_aqc_opc_delete_element = 0x0243,
-
- i40e_aqc_opc_add_macvlan = 0x0250,
- i40e_aqc_opc_remove_macvlan = 0x0251,
- i40e_aqc_opc_add_vlan = 0x0252,
- i40e_aqc_opc_remove_vlan = 0x0253,
- i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
- i40e_aqc_opc_add_tag = 0x0255,
- i40e_aqc_opc_remove_tag = 0x0256,
- i40e_aqc_opc_add_multicast_etag = 0x0257,
- i40e_aqc_opc_remove_multicast_etag = 0x0258,
- i40e_aqc_opc_update_tag = 0x0259,
- i40e_aqc_opc_add_control_packet_filter = 0x025A,
- i40e_aqc_opc_remove_control_packet_filter = 0x025B,
- i40e_aqc_opc_add_cloud_filters = 0x025C,
- i40e_aqc_opc_remove_cloud_filters = 0x025D,
- i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
-
- i40e_aqc_opc_add_mirror_rule = 0x0260,
- i40e_aqc_opc_delete_mirror_rule = 0x0261,
-
- /* Dynamic Device Personalization */
- i40e_aqc_opc_write_personalization_profile = 0x0270,
- i40e_aqc_opc_get_personalization_profile_list = 0x0271,
-
- /* DCB commands */
- i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
- i40e_aqc_opc_dcb_updated = 0x0302,
- i40e_aqc_opc_set_dcb_parameters = 0x0303,
-
- /* TX scheduler */
- i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
- i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
- i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
- i40e_aqc_opc_query_vsi_bw_config = 0x0408,
- i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
- i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
-
- i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
- i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
- i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
- i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
- i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
- i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
- i40e_aqc_opc_query_port_ets_config = 0x0419,
- i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
- i40e_aqc_opc_suspend_port_tx = 0x041B,
- i40e_aqc_opc_resume_port_tx = 0x041C,
- i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
- /* phy commands*/
- i40e_aqc_opc_get_phy_abilities = 0x0600,
- i40e_aqc_opc_set_phy_config = 0x0601,
- i40e_aqc_opc_set_mac_config = 0x0603,
- i40e_aqc_opc_set_link_restart_an = 0x0605,
- i40e_aqc_opc_get_link_status = 0x0607,
- i40e_aqc_opc_set_phy_int_mask = 0x0613,
- i40e_aqc_opc_get_local_advt_reg = 0x0614,
- i40e_aqc_opc_set_local_advt_reg = 0x0615,
- i40e_aqc_opc_get_partner_advt = 0x0616,
- i40e_aqc_opc_set_lb_modes = 0x0618,
- i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_debug = 0x0622,
- i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
- i40e_aqc_opc_run_phy_activity = 0x0626,
- i40e_aqc_opc_set_phy_register = 0x0628,
- i40e_aqc_opc_get_phy_register = 0x0629,
-
- /* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
- i40e_aqc_opc_nvm_config_read = 0x0704,
- i40e_aqc_opc_nvm_config_write = 0x0705,
- i40e_aqc_opc_oem_post_update = 0x0720,
- i40e_aqc_opc_thermal_sensor = 0x0721,
-
- /* virtualization commands */
- i40e_aqc_opc_send_msg_to_pf = 0x0801,
- i40e_aqc_opc_send_msg_to_vf = 0x0802,
- i40e_aqc_opc_send_msg_to_peer = 0x0803,
-
- /* alternate structure */
- i40e_aqc_opc_alternate_write = 0x0900,
- i40e_aqc_opc_alternate_write_indirect = 0x0901,
- i40e_aqc_opc_alternate_read = 0x0902,
- i40e_aqc_opc_alternate_read_indirect = 0x0903,
- i40e_aqc_opc_alternate_write_done = 0x0904,
- i40e_aqc_opc_alternate_set_mode = 0x0905,
- i40e_aqc_opc_alternate_clear_port = 0x0906,
-
- /* LLDP commands */
- i40e_aqc_opc_lldp_get_mib = 0x0A00,
- i40e_aqc_opc_lldp_update_mib = 0x0A01,
- i40e_aqc_opc_lldp_add_tlv = 0x0A02,
- i40e_aqc_opc_lldp_update_tlv = 0x0A03,
- i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
- i40e_aqc_opc_lldp_stop = 0x0A05,
- i40e_aqc_opc_lldp_start = 0x0A06,
-
- /* Tunnel commands */
- i40e_aqc_opc_add_udp_tunnel = 0x0B00,
- i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_set_rss_key = 0x0B02,
- i40e_aqc_opc_set_rss_lut = 0x0B03,
- i40e_aqc_opc_get_rss_key = 0x0B04,
- i40e_aqc_opc_get_rss_lut = 0x0B05,
-
- /* Async Events */
- i40e_aqc_opc_event_lan_overflow = 0x1001,
-
- /* OEM commands */
- i40e_aqc_opc_oem_parameter_change = 0xFE00,
- i40e_aqc_opc_oem_device_status_change = 0xFE01,
- i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
- i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
-
- /* debug commands */
- i40e_aqc_opc_debug_read_reg = 0xFF03,
- i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_modify_reg = 0xFF07,
- i40e_aqc_opc_debug_dump_internals = 0xFF08,
-};
-
-/* command structures and indirect data structures */
-
-/* Structure naming conventions:
- * - no suffix for direct command descriptor structures
- * - _data for indirect sent data
- * - _resp for indirect return data (data which is both will use _data)
- * - _completion for direct return data
- * - _element_ for repeated elements (may also be _data or _resp)
- *
- * Command structures are expected to overlay the params.raw member of the basic
- * descriptor, and as such cannot exceed 16 bytes in length.
- */
-
-/* This macro is used to generate a compilation error if a structure
- * is not exactly the correct length. It gives a divide by zero error if the
- * structure is not of the correct size, otherwise it creates an enum that is
- * never used.
- */
-#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
- { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
-
-/* This macro is used extensively to ensure that command structures are 16
- * bytes in length as they have to map to the raw array of that size.
- */
-#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
-
-/* internal (0x00XX) commands */
-
-/* Get version (direct 0x0001) */
-struct i40e_aqc_get_version {
- __le32 rom_ver;
- __le32 fw_build;
- __le16 fw_major;
- __le16 fw_minor;
- __le16 api_major;
- __le16 api_minor;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
-
-/* Send driver version (indirect 0x0002) */
-struct i40e_aqc_driver_version {
- u8 driver_major_ver;
- u8 driver_minor_ver;
- u8 driver_build_ver;
- u8 driver_subbuild_ver;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
-
-/* Queue Shutdown (direct 0x0003) */
-struct i40e_aqc_queue_shutdown {
- __le32 driver_unloading;
-#define I40E_AQ_DRIVER_UNLOADING 0x1
- u8 reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
-
-/* Set PF context (0x0004, direct) */
-struct i40e_aqc_set_pf_context {
- u8 pf_id;
- u8 reserved[15];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
-
-/* Request resource ownership (direct 0x0008)
- * Release resource ownership (direct 0x0009)
- */
-#define I40E_AQ_RESOURCE_NVM 1
-#define I40E_AQ_RESOURCE_SDP 2
-#define I40E_AQ_RESOURCE_ACCESS_READ 1
-#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
-#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
-#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
-
-struct i40e_aqc_request_resource {
- __le16 resource_id;
- __le16 access_type;
- __le32 timeout;
- __le32 resource_number;
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
-
-/* Get function capabilities (indirect 0x000A)
- * Get device capabilities (indirect 0x000B)
- */
-struct i40e_aqc_list_capabilites {
- u8 command_flags;
-#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
- u8 pf_index;
- u8 reserved[2];
- __le32 count;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
-
-struct i40e_aqc_list_capabilities_element_resp {
- __le16 id;
- u8 major_rev;
- u8 minor_rev;
- __le32 number;
- __le32 logical_id;
- __le32 phys_id;
- u8 reserved[16];
-};
-
-/* list of caps */
-
-#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
-#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
-#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
-#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
-#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
-#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
-#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008
-#define I40E_AQ_CAP_ID_SRIOV 0x0012
-#define I40E_AQ_CAP_ID_VF 0x0013
-#define I40E_AQ_CAP_ID_VMDQ 0x0014
-#define I40E_AQ_CAP_ID_8021QBG 0x0015
-#define I40E_AQ_CAP_ID_8021QBR 0x0016
-#define I40E_AQ_CAP_ID_VSI 0x0017
-#define I40E_AQ_CAP_ID_DCB 0x0018
-#define I40E_AQ_CAP_ID_FCOE 0x0021
-#define I40E_AQ_CAP_ID_ISCSI 0x0022
-#define I40E_AQ_CAP_ID_RSS 0x0040
-#define I40E_AQ_CAP_ID_RXQ 0x0041
-#define I40E_AQ_CAP_ID_TXQ 0x0042
-#define I40E_AQ_CAP_ID_MSIX 0x0043
-#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
-#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
-#define I40E_AQ_CAP_ID_1588 0x0046
-#define I40E_AQ_CAP_ID_IWARP 0x0051
-#define I40E_AQ_CAP_ID_LED 0x0061
-#define I40E_AQ_CAP_ID_SDP 0x0062
-#define I40E_AQ_CAP_ID_MDIO 0x0063
-#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
-#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
-#define I40E_AQ_CAP_ID_FLEX10 0x00F1
-#define I40E_AQ_CAP_ID_CEM 0x00F2
-
-/* Set CPPM Configuration (direct 0x0103) */
-struct i40e_aqc_cppm_configuration {
- __le16 command_flags;
-#define I40E_AQ_CPPM_EN_LTRC 0x0800
-#define I40E_AQ_CPPM_EN_DMCTH 0x1000
-#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
-#define I40E_AQ_CPPM_EN_HPTC 0x4000
-#define I40E_AQ_CPPM_EN_DMARC 0x8000
- __le16 ttlx;
- __le32 dmacr;
- __le16 dmcth;
- u8 hptc;
- u8 reserved;
- __le32 pfltrc;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
-
-/* Set ARP Proxy command / response (indirect 0x0104) */
-struct i40e_aqc_arp_proxy_data {
- __le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0800
-#define I40E_AQ_ARP_UNSUP_CTL 0x1000
-#define I40E_AQ_ARP_ENA 0x2000
-#define I40E_AQ_ARP_ADD_IPV4 0x4000
-#define I40E_AQ_ARP_DEL_IPV4 0x8000
- __le16 table_id;
- __le32 enabled_offloads;
-#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
-#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
- __le32 ip_addr;
- u8 mac_addr[6];
- u8 reserved[2];
-};
-
-I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
-
-/* Set NS Proxy Table Entry Command (indirect 0x0105) */
-struct i40e_aqc_ns_proxy_data {
- __le16 table_idx_mac_addr_0;
- __le16 table_idx_mac_addr_1;
- __le16 table_idx_ipv6_0;
- __le16 table_idx_ipv6_1;
- __le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0001
-#define I40E_AQ_NS_PROXY_DEL_0 0x0002
-#define I40E_AQ_NS_PROXY_ADD_1 0x0004
-#define I40E_AQ_NS_PROXY_DEL_1 0x0008
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
-#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
-#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
- u8 mac_addr_0[6];
- u8 mac_addr_1[6];
- u8 local_mac_addr[6];
- u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
- u8 ipv6_addr_1[16];
-};
-
-I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
-
-/* Manage LAA Command (0x0106) - obsolete */
-struct i40e_aqc_mng_laa {
- __le16 command_flags;
-#define I40E_AQ_LAA_FLAG_WR 0x8000
- u8 reserved[2];
- __le32 sal;
- __le16 sah;
- u8 reserved2[6];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
-
-/* Manage MAC Address Read Command (indirect 0x0107) */
-struct i40e_aqc_mac_address_read {
- __le16 command_flags;
-#define I40E_AQC_LAN_ADDR_VALID 0x10
-#define I40E_AQC_SAN_ADDR_VALID 0x20
-#define I40E_AQC_PORT_ADDR_VALID 0x40
-#define I40E_AQC_WOL_ADDR_VALID 0x80
-#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x3F0
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
-
-struct i40e_aqc_mac_address_read_data {
- u8 pf_lan_mac[6];
- u8 pf_san_mac[6];
- u8 port_mac[6];
- u8 pf_wol_mac[6];
-};
-
-I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
-
-/* Manage MAC Address Write Command (0x0108) */
-struct i40e_aqc_mac_address_write {
- __le16 command_flags;
-#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
-#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
-#define I40E_AQC_WRITE_TYPE_PORT 0x8000
-#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000
-#define I40E_AQC_WRITE_TYPE_MASK 0xC000
-
- __le16 mac_sah;
- __le32 mac_sal;
- u8 reserved[8];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
-
-/* PXE commands (0x011x) */
-
-/* Clear PXE Command and response (direct 0x0110) */
-struct i40e_aqc_clear_pxe {
- u8 rx_cnt;
- u8 reserved[15];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
-
-/* Set WoL Filter (0x0120) */
-
-struct i40e_aqc_set_wol_filter {
- __le16 filter_index;
-#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
-#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
-#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
- I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
-
-#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
-#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
- I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
- __le16 cmd_flags;
-#define I40E_AQC_SET_WOL_FILTER 0x8000
-#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
-#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
-#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
-#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
- __le16 valid_flags;
-#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
-#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
- u8 reserved[2];
- __le32 address_high;
- __le32 address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
-
-struct i40e_aqc_set_wol_filter_data {
- u8 filter[128];
- u8 mask[16];
-};
-
-I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
-
-/* Get Wake Reason (0x0121) */
-
-struct i40e_aqc_get_wake_reason_completion {
- u8 reserved_1[2];
- __le16 wake_reason;
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
- I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
- I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
- u8 reserved_2[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
-
-/* Switch configuration commands (0x02xx) */
-
-/* Used by many indirect commands that only pass an seid and a buffer in the
- * command
- */
-struct i40e_aqc_switch_seid {
- __le16 seid;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
-
-/* Get Switch Configuration command (indirect 0x0200)
- * uses i40e_aqc_switch_seid for the descriptor
- */
-struct i40e_aqc_get_switch_config_header_resp {
- __le16 num_reported;
- __le16 num_total;
- u8 reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
-
-struct i40e_aqc_switch_config_element_resp {
- u8 element_type;
-#define I40E_AQ_SW_ELEM_TYPE_MAC 1
-#define I40E_AQ_SW_ELEM_TYPE_PF 2
-#define I40E_AQ_SW_ELEM_TYPE_VF 3
-#define I40E_AQ_SW_ELEM_TYPE_EMP 4
-#define I40E_AQ_SW_ELEM_TYPE_BMC 5
-#define I40E_AQ_SW_ELEM_TYPE_PV 16
-#define I40E_AQ_SW_ELEM_TYPE_VEB 17
-#define I40E_AQ_SW_ELEM_TYPE_PA 18
-#define I40E_AQ_SW_ELEM_TYPE_VSI 19
- u8 revision;
-#define I40E_AQ_SW_ELEM_REV_1 1
- __le16 seid;
- __le16 uplink_seid;
- __le16 downlink_seid;
- u8 reserved[3];
- u8 connection_type;
-#define I40E_AQ_CONN_TYPE_REGULAR 0x1
-#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
-#define I40E_AQ_CONN_TYPE_CASCADED 0x3
- __le16 scheduler_id;
- __le16 element_info;
-};
-
-I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
-
-/* Get Switch Configuration (indirect 0x0200)
- * an array of elements are returned in the response buffer
- * the first in the array is the header, remainder are elements
- */
-struct i40e_aqc_get_switch_config_resp {
- struct i40e_aqc_get_switch_config_header_resp header;
- struct i40e_aqc_switch_config_element_resp element[1];
-};
-
-I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
-
-/* Add Statistics (direct 0x0201)
- * Remove Statistics (direct 0x0202)
- */
-struct i40e_aqc_add_remove_statistics {
- __le16 seid;
- __le16 vlan;
- __le16 stat_index;
- u8 reserved[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
-
-/* Set Port Parameters command (direct 0x0203) */
-struct i40e_aqc_set_port_parameters {
- __le16 command_flags;
-#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
-#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
-#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
- __le16 bad_frame_vsi;
-#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
-#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
- __le16 default_seid; /* reserved for command */
- u8 reserved[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
-
-/* Get Switch Resource Allocation (indirect 0x0204) */
-struct i40e_aqc_get_switch_resource_alloc {
- u8 num_entries; /* reserved for command */
- u8 reserved[7];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
-
-/* expect an array of these structs in the response buffer */
-struct i40e_aqc_switch_resource_alloc_element_resp {
- u8 resource_type;
-#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
-#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
-#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
-#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
-#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
-#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
-#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
-#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
-#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
-#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
-#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
-#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
-#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
-#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
-#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
-#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
-#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
-#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
-#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
- u8 reserved1;
- __le16 guaranteed;
- __le16 total;
- __le16 used;
- __le16 total_unalloced;
- u8 reserved2[6];
-};
-
-I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
-
-/* Set Switch Configuration (direct 0x0205) */
-struct i40e_aqc_set_switch_config {
- __le16 flags;
-/* flags used for both fields below */
-#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
-#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
- __le16 valid_flags;
- /* The ethertype in switch_tag is dropped on ingress and used
- * internally by the switch. Set this to zero for the default
- * of 0x88a8 (802.1ad). Should be zero for firmware API
- * versions lower than 1.7.
- */
- __le16 switch_tag;
- /* The ethertypes in first_tag and second_tag are used to
- * match the outer and inner VLAN tags (respectively) when HW
- * double VLAN tagging is enabled via the set port parameters
- * AQ command. Otherwise these are both ignored. Set them to
- * zero for their defaults of 0x8100 (802.1Q). Should be zero
- * for firmware API versions lower than 1.7.
- */
- __le16 first_tag;
- __le16 second_tag;
- u8 reserved[6];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
-
-/* Read Receive control registers (direct 0x0206)
- * Write Receive control registers (direct 0x0207)
- * used for accessing Rx control registers that can be
- * slow and need special handling when under high Rx load
- */
-struct i40e_aqc_rx_ctl_reg_read_write {
- __le32 reserved1;
- __le32 address;
- __le32 reserved2;
- __le32 value;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write);
-
-/* Add VSI (indirect 0x0210)
- * this indirect command uses struct i40e_aqc_vsi_properties_data
- * as the indirect buffer (128 bytes)
- *
- * Update VSI (indirect 0x211)
- * uses the same data structure as Add VSI
- *
- * Get VSI (indirect 0x0212)
- * uses the same completion and data structure as Add VSI
- */
-struct i40e_aqc_add_get_update_vsi {
- __le16 uplink_seid;
- u8 connection_type;
-#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
-#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
-#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
- u8 reserved1;
- u8 vf_id;
- u8 reserved2;
- __le16 vsi_flags;
-#define I40E_AQ_VSI_TYPE_SHIFT 0x0
-#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
-#define I40E_AQ_VSI_TYPE_VF 0x0
-#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
-#define I40E_AQ_VSI_TYPE_PF 0x2
-#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
-#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
-
-struct i40e_aqc_add_get_update_vsi_completion {
- __le16 seid;
- __le16 vsi_number;
- __le16 vsi_used;
- __le16 vsi_free;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
-
-struct i40e_aqc_vsi_properties_data {
- /* first 96 byte are written by SW */
- __le16 valid_sections;
-#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
-#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
-#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
-#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
-#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
-#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
- /* switch section */
- __le16 switch_id; /* 12bit id combined with flags below */
-#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
-#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
-#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
-#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
- u8 sw_reserved[2];
- /* security section */
- u8 sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
- u8 sec_reserved;
- /* VLAN section */
- __le16 pvid; /* VLANS include priority bits */
- __le16 fcoe_pvid;
- u8 port_vlan_flags;
-#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
-#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
- I40E_AQ_VSI_PVLAN_MODE_SHIFT)
-#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
-#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
-#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
-#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
-#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
- I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
-#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
-#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
- u8 pvlan_reserved[3];
- /* ingress egress up sections */
- __le32 ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
- __le32 egress_table; /* same defines as for ingress table */
- /* cascaded PV section */
- __le16 cas_pv_tag;
- u8 cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
- I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
- u8 cas_pv_reserved;
- /* queue mapping section */
- __le16 mapping_flags;
-#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
-#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
- __le16 queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
-#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
- __le16 tc_mapping[8];
-#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
- I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
-#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
- I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
- /* queueing option section */
- u8 queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
-#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
-#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
-#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
-#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
- u8 queueing_opt_reserved[3];
- /* scheduler section */
- u8 up_enable_bits;
- u8 sched_reserved;
- /* outer up section */
- __le32 outer_up_table; /* same structure and defines as ingress tbl */
- u8 cmd_reserved[8];
- /* last 32 bytes are written by FW */
- __le16 qs_handle[8];
-#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
- __le16 stat_counter_idx;
- __le16 sched_id;
- u8 resp_reserved[12];
-};
-
-I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
-
-/* Add Port Virtualizer (direct 0x0220)
- * also used for update PV (direct 0x0221) but only flags are used
- * (IS_CTRL_PORT only works on add PV)
- */
-struct i40e_aqc_add_update_pv {
- __le16 command_flags;
-#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
-#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
- __le16 uplink_seid;
- __le16 connected_seid;
- u8 reserved[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
-
-struct i40e_aqc_add_update_pv_completion {
- /* reserved for update; for add also encodes error if rc == ENOSPC */
- __le16 pv_seid;
-#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
-#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
-#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
-#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
-
-/* Get PV Params (direct 0x0222)
- * uses i40e_aqc_switch_seid for the descriptor
- */
-
-struct i40e_aqc_get_pv_params_completion {
- __le16 seid;
- __le16 default_stag;
- __le16 pv_flags; /* same flags as add_pv */
-#define I40E_AQC_GET_PV_PV_TYPE 0x1
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
- u8 reserved[8];
- __le16 default_port_seid;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
-
-/* Add VEB (direct 0x0230) */
-struct i40e_aqc_add_veb {
- __le16 uplink_seid;
- __le16 downlink_seid;
- __le16 veb_flags;
-#define I40E_AQC_ADD_VEB_FLOATING 0x1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
- I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
-#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */
-#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10
- u8 enable_tcs;
- u8 reserved[9];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
-
-struct i40e_aqc_add_veb_completion {
- u8 reserved[6];
- __le16 switch_seid;
- /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
- __le16 veb_seid;
-#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
-#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
-#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
-#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
-
-/* Get VEB Parameters (direct 0x0232)
- * uses i40e_aqc_switch_seid for the descriptor
- */
-struct i40e_aqc_get_veb_parameters_completion {
- __le16 seid;
- __le16 switch_id;
- __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
-
-/* Delete Element (direct 0x0243)
- * uses the generic i40e_aqc_switch_seid
- */
-
-/* Add MAC-VLAN (indirect 0x0250) */
-
-/* used for the command for most vlan commands */
-struct i40e_aqc_macvlan {
- __le16 num_addresses;
- __le16 seid[3];
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
- I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
-#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
-
-/* indirect data for command and response */
-struct i40e_aqc_add_macvlan_element_data {
- u8 mac_addr[6];
- __le16 vlan_tag;
- __le16 flags;
-#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
-#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
-#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
-#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
-#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010
- __le16 queue_number;
-#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
-#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
- I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
- /* response section */
- u8 match_method;
-#define I40E_AQC_MM_PERFECT_MATCH 0x01
-#define I40E_AQC_MM_HASH_MATCH 0x02
-#define I40E_AQC_MM_ERR_NO_RES 0xFF
- u8 reserved1[3];
-};
-
-struct i40e_aqc_add_remove_macvlan_completion {
- __le16 perfect_mac_used;
- __le16 perfect_mac_free;
- __le16 unicast_hash_free;
- __le16 multicast_hash_free;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
-
-/* Remove MAC-VLAN (indirect 0x0251)
- * uses i40e_aqc_macvlan for the descriptor
- * data points to an array of num_addresses of elements
- */
-
-struct i40e_aqc_remove_macvlan_element_data {
- u8 mac_addr[6];
- __le16 vlan_tag;
- u8 flags;
-#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
-#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
-#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
-#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
- u8 reserved[3];
- /* reply section */
- u8 error_code;
-#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
-#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
- u8 reply_reserved[3];
-};
-
-/* Add VLAN (indirect 0x0252)
- * Remove VLAN (indirect 0x0253)
- * use the generic i40e_aqc_macvlan for the command
- */
-struct i40e_aqc_add_remove_vlan_element_data {
- __le16 vlan_tag;
- u8 vlan_flags;
-/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_LOCAL 0x1
-#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
-#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
-#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
-#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
-#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
-#define I40E_AQC_VLAN_PTYPE_SHIFT 3
-#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
-#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
-#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
-#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
-#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
-/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_ALL 0x1
- u8 reserved;
- u8 result;
-/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
-#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
-#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
-/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
-#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
- u8 reserved1[3];
-};
-
-struct i40e_aqc_add_remove_vlan_completion {
- u8 reserved[4];
- __le16 vlans_used;
- __le16 vlans_free;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-/* Set VSI Promiscuous Modes (direct 0x0254) */
-struct i40e_aqc_set_vsi_promiscuous_modes {
- __le16 promiscuous_flags;
- __le16 valid_flags;
-/* flags used for both fields above */
-#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
-#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
-#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
-#define I40E_AQC_SET_VSI_DEFAULT 0x08
-#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
-#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
- __le16 seid;
-#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
- __le16 vlan_tag;
-#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF
-#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
- u8 reserved[8];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
-
-/* Add S/E-tag command (direct 0x0255)
- * Uses generic i40e_aqc_add_remove_tag_completion for completion
- */
-struct i40e_aqc_add_tag {
- __le16 flags;
-#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
- __le16 seid;
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
- I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
- __le16 tag;
- __le16 queue_number;
- u8 reserved[8];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
-
-struct i40e_aqc_add_remove_tag_completion {
- u8 reserved[12];
- __le16 tags_used;
- __le16 tags_free;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
-
-/* Remove S/E-tag command (direct 0x0256)
- * Uses generic i40e_aqc_add_remove_tag_completion for completion
- */
-struct i40e_aqc_remove_tag {
- __le16 seid;
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
- I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
- __le16 tag;
- u8 reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
-
-/* Add multicast E-Tag (direct 0x0257)
- * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
- * and no external data
- */
-struct i40e_aqc_add_remove_mcast_etag {
- __le16 pv_seid;
- __le16 etag;
- u8 num_unicast_etags;
- u8 reserved[3];
- __le32 addr_high; /* address of array of 2-byte s-tags */
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
-
-struct i40e_aqc_add_remove_mcast_etag_completion {
- u8 reserved[4];
- __le16 mcast_etags_used;
- __le16 mcast_etags_free;
- __le32 addr_high;
- __le32 addr_low;
-
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
-
-/* Update S/E-Tag (direct 0x0259) */
-struct i40e_aqc_update_tag {
- __le16 seid;
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
- I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
- __le16 old_tag;
- __le16 new_tag;
- u8 reserved[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
-
-struct i40e_aqc_update_tag_completion {
- u8 reserved[12];
- __le16 tags_used;
- __le16 tags_free;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
-
-/* Add Control Packet filter (direct 0x025A)
- * Remove Control Packet filter (direct 0x025B)
- * uses the i40e_aqc_add_oveb_cloud,
- * and the generic direct completion structure
- */
-struct i40e_aqc_add_remove_control_packet_filter {
- u8 mac[6];
- __le16 etype;
- __le16 flags;
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
- __le16 seid;
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
- I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
- __le16 queue;
- u8 reserved[2];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
-
-struct i40e_aqc_add_remove_control_packet_filter_completion {
- __le16 mac_etype_used;
- __le16 etype_used;
- __le16 mac_etype_free;
- __le16 etype_free;
- u8 reserved[8];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
-
-/* Add Cloud filters (indirect 0x025C)
- * Remove Cloud filters (indirect 0x025D)
- * uses the i40e_aqc_add_remove_cloud_filters,
- * and the generic indirect completion structure
- */
-struct i40e_aqc_add_remove_cloud_filters {
- u8 num_filters;
- u8 reserved;
- __le16 seid;
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
- I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
- u8 big_buffer_flag;
-#define I40E_AQC_ADD_CLOUD_CMD_BB 1
- u8 reserved2[3];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
-
-struct i40e_aqc_cloud_filters_element_data {
- u8 outer_mac[6];
- u8 inner_mac[6];
- __le16 inner_vlan;
- union {
- struct {
- u8 reserved[12];
- u8 data[4];
- } v4;
- struct {
- u8 data[16];
- } v6;
- struct {
- __le16 data[8];
- } raw_v6;
- } ipaddr;
- __le16 flags;
-#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
- I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
-/* 0x0000 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
-/* 0x0002 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
-/* 0x0005 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
-/* 0x0007 reserved */
-/* 0x0008 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
-#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
-#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
-/* 0x0010 to 0x0017 is for custom filters */
-#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */
-#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */
-#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */
-
-#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
-#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
-#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
-#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
-
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5
-
-#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000
-#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000
-#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000
-
- __le32 tenant_id;
- u8 reserved[4];
- __le16 queue_number;
-#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
- I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
- u8 reserved2[14];
- /* response section */
- u8 allocation_result;
-#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
-#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
- u8 response_reserved[7];
-};
-
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
-
-/* i40e_aqc_cloud_filters_element_bb is used when
- * I40E_AQC_ADD_CLOUD_CMD_BB flag is set.
- */
-struct i40e_aqc_cloud_filters_element_bb {
- struct i40e_aqc_cloud_filters_element_data element;
- u16 general_fields[32];
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
-};
-
-I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
-
-struct i40e_aqc_remove_cloud_filters_completion {
- __le16 perfect_ovlan_used;
- __le16 perfect_ovlan_free;
- __le16 vlan_used;
- __le16 vlan_free;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
-
-/* Replace filter Command 0x025F
- * uses the i40e_aqc_replace_cloud_filters,
- * and the generic indirect completion structure
- */
-struct i40e_filter_data {
- u8 filter_type;
- u8 input[3];
-};
-
-I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
-
-struct i40e_aqc_replace_cloud_filters_cmd {
- u8 valid_flags;
-#define I40E_AQC_REPLACE_L1_FILTER 0x0
-#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
-#define I40E_AQC_GET_CLOUD_FILTERS 0x2
-#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
-#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
- u8 old_filter_type;
- u8 new_filter_type;
- u8 tr_bit;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
-
-struct i40e_aqc_replace_cloud_filters_cmd_buf {
- u8 data[32];
-/* Filter type INPUT codes*/
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7)
-
-/* Field Vector offsets */
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
-/* big FLU */
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
-/* big FLU */
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
-
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
- struct i40e_filter_data filters[8];
-};
-
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf);
-
-/* Add Mirror Rule (indirect or direct 0x0260)
- * Delete Mirror Rule (indirect or direct 0x0261)
- * note: some rule types (4,5) do not use an external buffer.
- * take care to set the flags correctly.
- */
-struct i40e_aqc_add_delete_mirror_rule {
- __le16 seid;
- __le16 rule_type;
-#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
-#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
- I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
-#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
-#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
- __le16 num_entries;
- __le16 destination; /* VSI for add, rule id for delete */
- __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
-
-struct i40e_aqc_add_delete_mirror_rule_completion {
- u8 reserved[2];
- __le16 rule_id; /* only used on add */
- __le16 mirror_rules_used;
- __le16 mirror_rules_free;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-
-/* Dynamic Device Personalization */
-struct i40e_aqc_write_personalization_profile {
- u8 flags;
- u8 reserved[3];
- __le32 profile_track_id;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
-
-struct i40e_aqc_write_ddp_resp {
- __le32 error_offset;
- __le32 error_info;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-struct i40e_aqc_get_applied_profiles {
- u8 flags;
-#define I40E_AQC_GET_DDP_GET_CONF 0x1
-#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
- u8 rsv[3];
- __le32 reserved;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
-
-/* DCB 0x03xx*/
-
-/* PFC Ignore (direct 0x0301)
- * the command and response use the same descriptor structure
- */
-struct i40e_aqc_pfc_ignore {
- u8 tc_bitmap;
- u8 command_flags; /* unused on response */
-#define I40E_AQC_PFC_IGNORE_SET 0x80
-#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
-
-/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
- * with no parameters
- */
-
-/* TX scheduler 0x04xx */
-
-/* Almost all the indirect commands use
- * this generic struct to pass the SEID in param0
- */
-struct i40e_aqc_tx_sched_ind {
- __le16 vsi_seid;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
-
-/* Several commands respond with a set of queue set handles */
-struct i40e_aqc_qs_handles_resp {
- __le16 qs_handles[8];
-};
-
-/* Configure VSI BW limits (direct 0x0400) */
-struct i40e_aqc_configure_vsi_bw_limit {
- __le16 vsi_seid;
- u8 reserved[2];
- __le16 credit;
- u8 reserved1[2];
- u8 max_credit; /* 0-3, limit = 2^max */
- u8 reserved2[7];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
-
-/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
- * responds with i40e_aqc_qs_handles_resp
- */
-struct i40e_aqc_configure_vsi_ets_sla_bw_data {
- u8 tc_valid_bits;
- u8 reserved[15];
- __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
-
- /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved1[28];
-};
-
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
-
-/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
- * responds with i40e_aqc_qs_handles_resp
- */
-struct i40e_aqc_configure_vsi_tc_bw_data {
- u8 tc_valid_bits;
- u8 reserved[3];
- u8 tc_bw_credits[8];
- u8 reserved1[4];
- __le16 qs_handles[8];
-};
-
-I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
-
-/* Query vsi bw configuration (indirect 0x0408) */
-struct i40e_aqc_query_vsi_bw_config_resp {
- u8 tc_valid_bits;
- u8 tc_suspended_bits;
- u8 reserved[14];
- __le16 qs_handles[8];
- u8 reserved1[4];
- __le16 port_bw_limit;
- u8 reserved2[2];
- u8 max_bw; /* 0-3, limit = 2^max */
- u8 reserved3[23];
-};
-
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
-
-/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
-struct i40e_aqc_query_vsi_ets_sla_config_resp {
- u8 tc_valid_bits;
- u8 reserved[3];
- u8 share_credits[8];
- __le16 credits[8];
-
- /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
-};
-
-I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
-
-/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
-struct i40e_aqc_configure_switching_comp_bw_limit {
- __le16 seid;
- u8 reserved[2];
- __le16 credit;
- u8 reserved1[2];
- u8 max_bw; /* 0-3, limit = 2^max */
- u8 reserved2[7];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
-
-/* Enable Physical Port ETS (indirect 0x0413)
- * Modify Physical Port ETS (indirect 0x0414)
- * Disable Physical Port ETS (indirect 0x0415)
- */
-struct i40e_aqc_configure_switching_comp_ets_data {
- u8 reserved[4];
- u8 tc_valid_bits;
- u8 seepage;
-#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
- u8 tc_strict_priority_flags;
- u8 reserved1[17];
- u8 tc_bw_share_credits[8];
- u8 reserved2[96];
-};
-
-I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
-
-/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
-struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
- u8 tc_valid_bits;
- u8 reserved[15];
- __le16 tc_bw_credit[8];
-
- /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved1[28];
-};
-
-I40E_CHECK_STRUCT_LEN(0x40,
- i40e_aqc_configure_switching_comp_ets_bw_limit_data);
-
-/* Configure Switching Component Bandwidth Allocation per Tc
- * (indirect 0x0417)
- */
-struct i40e_aqc_configure_switching_comp_bw_config_data {
- u8 tc_valid_bits;
- u8 reserved[2];
- u8 absolute_credits; /* bool */
- u8 tc_bw_share_credits[8];
- u8 reserved1[20];
-};
-
-I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
-
-/* Query Switching Component Configuration (indirect 0x0418) */
-struct i40e_aqc_query_switching_comp_ets_config_resp {
- u8 tc_valid_bits;
- u8 reserved[35];
- __le16 port_bw_limit;
- u8 reserved1[2];
- u8 tc_bw_max; /* 0-3, limit = 2^max */
- u8 reserved2[23];
-};
-
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
-
-/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
-struct i40e_aqc_query_port_ets_config_resp {
- u8 reserved[4];
- u8 tc_valid_bits;
- u8 reserved1;
- u8 tc_strict_priority_bits;
- u8 reserved2;
- u8 tc_bw_share_credits[8];
- __le16 tc_bw_limits[8];
-
- /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
- __le16 tc_bw_max[2];
- u8 reserved3[32];
-};
-
-I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
-
-/* Query Switching Component Bandwidth Allocation per Traffic Type
- * (indirect 0x041A)
- */
-struct i40e_aqc_query_switching_comp_bw_config_resp {
- u8 tc_valid_bits;
- u8 reserved[2];
- u8 absolute_credits_enable; /* bool */
- u8 tc_bw_share_credits[8];
- __le16 tc_bw_limits[8];
-
- /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
- __le16 tc_bw_max[2];
-};
-
-I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
-
-/* Suspend/resume port TX traffic
- * (direct 0x041B and 0x041C) uses the generic SEID struct
- */
-
-/* Configure partition BW
- * (indirect 0x041D)
- */
-struct i40e_aqc_configure_partition_bw_data {
- __le16 pf_valid_bits;
- u8 min_bw[16]; /* guaranteed bandwidth */
- u8 max_bw[16]; /* bandwidth limit */
-};
-
-I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
-
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
- /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
-};
-
-/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
-
-/* set in param0 for get phy abilities to report qualified modules */
-#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
-#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
-
-enum i40e_aq_phy_type {
- I40E_PHY_TYPE_SGMII = 0x0,
- I40E_PHY_TYPE_1000BASE_KX = 0x1,
- I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
- I40E_PHY_TYPE_10GBASE_KR = 0x3,
- I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
- I40E_PHY_TYPE_XAUI = 0x5,
- I40E_PHY_TYPE_XFI = 0x6,
- I40E_PHY_TYPE_SFI = 0x7,
- I40E_PHY_TYPE_XLAUI = 0x8,
- I40E_PHY_TYPE_XLPPI = 0x9,
- I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
- I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
- I40E_PHY_TYPE_10GBASE_AOC = 0xC,
- I40E_PHY_TYPE_40GBASE_AOC = 0xD,
- I40E_PHY_TYPE_UNRECOGNIZED = 0xE,
- I40E_PHY_TYPE_UNSUPPORTED = 0xF,
- I40E_PHY_TYPE_100BASE_TX = 0x11,
- I40E_PHY_TYPE_1000BASE_T = 0x12,
- I40E_PHY_TYPE_10GBASE_T = 0x13,
- I40E_PHY_TYPE_10GBASE_SR = 0x14,
- I40E_PHY_TYPE_10GBASE_LR = 0x15,
- I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
- I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
- I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
- I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
- I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
- I40E_PHY_TYPE_1000BASE_SX = 0x1B,
- I40E_PHY_TYPE_1000BASE_LX = 0x1C,
- I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
- I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
- I40E_PHY_TYPE_25GBASE_KR = 0x1F,
- I40E_PHY_TYPE_25GBASE_CR = 0x20,
- I40E_PHY_TYPE_25GBASE_SR = 0x21,
- I40E_PHY_TYPE_25GBASE_LR = 0x22,
- I40E_PHY_TYPE_25GBASE_AOC = 0x23,
- I40E_PHY_TYPE_25GBASE_ACC = 0x24,
- I40E_PHY_TYPE_MAX,
- I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
- I40E_PHY_TYPE_EMPTY = 0xFE,
- I40E_PHY_TYPE_DEFAULT = 0xFF,
-};
-
-#define I40E_LINK_SPEED_100MB_SHIFT 0x1
-#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
-#define I40E_LINK_SPEED_10GB_SHIFT 0x3
-#define I40E_LINK_SPEED_40GB_SHIFT 0x4
-#define I40E_LINK_SPEED_20GB_SHIFT 0x5
-#define I40E_LINK_SPEED_25GB_SHIFT 0x6
-
-enum i40e_aq_link_speed {
- I40E_LINK_SPEED_UNKNOWN = 0,
- I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
- I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
- I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
- I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
- I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
-};
-
-struct i40e_aqc_module_desc {
- u8 oui[3];
- u8 reserved1;
- u8 part_number[16];
- u8 revision[4];
- u8 reserved2[8];
-};
-
-I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
-
-struct i40e_aq_get_phy_abilities_resp {
- __le32 phy_type; /* bitmap using the above enum for offsets */
- u8 link_speed; /* bitmap using the above enum bit patterns */
- u8 abilities;
-#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
-#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
-#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_LINK_ENABLED 0x08
-#define I40E_AQ_PHY_AN_ENABLED 0x10
-#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
-#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
-#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
- __le16 eee_capability;
-#define I40E_AQ_EEE_100BASE_TX 0x0002
-#define I40E_AQ_EEE_1000BASE_T 0x0004
-#define I40E_AQ_EEE_10GBASE_T 0x0008
-#define I40E_AQ_EEE_1000BASE_KX 0x0010
-#define I40E_AQ_EEE_10GBASE_KX4 0x0020
-#define I40E_AQ_EEE_10GBASE_KR 0x0040
- __le32 eeer_val;
- u8 d3_lpan;
-#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
-#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
-#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
-#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
-#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
- u8 fec_cfg_curr_mod_ext_info;
-#define I40E_AQ_ENABLE_FEC_KR 0x01
-#define I40E_AQ_ENABLE_FEC_RS 0x02
-#define I40E_AQ_REQUEST_FEC_KR 0x04
-#define I40E_AQ_REQUEST_FEC_RS 0x08
-#define I40E_AQ_ENABLE_FEC_AUTO 0x10
-#define I40E_AQ_FEC
-#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
-#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
-
- u8 ext_comp_code;
- u8 phy_id[4];
- u8 module_type[3];
- u8 qualified_module_count;
-#define I40E_AQ_PHY_MAX_QMS 16
- struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
-};
-
-I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
-
-/* Set PHY Config (direct 0x0601) */
-struct i40e_aq_set_phy_config { /* same bits as above in all */
- __le32 phy_type;
- u8 link_speed;
- u8 abilities;
-/* bits 0-2 use the values from get_phy_abilities_resp */
-#define I40E_AQ_PHY_ENABLE_LINK 0x08
-#define I40E_AQ_PHY_ENABLE_AN 0x10
-#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
- __le16 eee_capability;
- __le32 eeer;
- u8 low_power_ctrl;
- u8 phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
-#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
-#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
- u8 fec_config;
-#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
-#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
-#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
-#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
-#define I40E_AQ_SET_FEC_AUTO BIT(4)
-#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
-#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
- u8 reserved;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
-
-/* Set MAC Config command data structure (direct 0x0603) */
-struct i40e_aq_set_mac_config {
- __le16 max_frame_size;
- u8 params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
- u8 tx_timer_priority; /* bitmap */
- __le16 tx_timer_value;
- __le16 fc_refresh_threshold;
- u8 reserved[8];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
-
-/* Restart Auto-Negotiation (direct 0x605) */
-struct i40e_aqc_set_link_restart_an {
- u8 command;
-#define I40E_AQ_PHY_RESTART_AN 0x02
-#define I40E_AQ_PHY_LINK_ENABLE 0x04
- u8 reserved[15];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
-
-/* Get Link Status cmd & response data structure (direct 0x0607) */
-struct i40e_aqc_get_link_status {
- __le16 command_flags; /* only field set on command */
-#define I40E_AQ_LSE_MASK 0x3
-#define I40E_AQ_LSE_NOP 0x0
-#define I40E_AQ_LSE_DISABLE 0x2
-#define I40E_AQ_LSE_ENABLE 0x3
-/* only response uses this flag */
-#define I40E_AQ_LSE_IS_ENABLED 0x1
- u8 phy_type; /* i40e_aq_phy_type */
- u8 link_speed; /* i40e_aq_link_speed */
- u8 link_info;
-#define I40E_AQ_LINK_UP 0x01 /* obsolete */
-#define I40E_AQ_LINK_UP_FUNCTION 0x01
-#define I40E_AQ_LINK_FAULT 0x02
-#define I40E_AQ_LINK_FAULT_TX 0x04
-#define I40E_AQ_LINK_FAULT_RX 0x08
-#define I40E_AQ_LINK_FAULT_REMOTE 0x10
-#define I40E_AQ_LINK_UP_PORT 0x20
-#define I40E_AQ_MEDIA_AVAILABLE 0x40
-#define I40E_AQ_SIGNAL_DETECT 0x80
- u8 an_info;
-#define I40E_AQ_AN_COMPLETED 0x01
-#define I40E_AQ_LP_AN_ABILITY 0x02
-#define I40E_AQ_PD_FAULT 0x04
-#define I40E_AQ_FEC_EN 0x08
-#define I40E_AQ_PHY_LOW_POWER 0x10
-#define I40E_AQ_LINK_PAUSE_TX 0x20
-#define I40E_AQ_LINK_PAUSE_RX 0x40
-#define I40E_AQ_QUALIFIED_MODULE 0x80
- u8 ext_info;
-#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
-#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
-#define I40E_AQ_LINK_TX_SHIFT 0x02
-#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
-#define I40E_AQ_LINK_TX_ACTIVE 0x00
-#define I40E_AQ_LINK_TX_DRAINED 0x01
-#define I40E_AQ_LINK_TX_FLUSHED 0x03
-#define I40E_AQ_LINK_FORCED_40G 0x10
-/* 25G Error Codes */
-#define I40E_AQ_25G_NO_ERR 0X00
-#define I40E_AQ_25G_NOT_PRESENT 0X01
-#define I40E_AQ_25G_NVM_CRC_ERR 0X02
-#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
-#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
-#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
- u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
-/* Since firmware API 1.7 loopback field keeps power class info as well */
-#define I40E_AQ_LOOPBACK_MASK 0x07
-#define I40E_AQ_PWR_CLASS_SHIFT_LB 6
-#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
- __le16 max_frame_size;
- u8 config;
-#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
-#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
-#define I40E_AQ_CONFIG_CRC_ENA 0x04
-#define I40E_AQ_CONFIG_PACING_MASK 0x78
- union {
- struct {
- u8 power_desc;
-#define I40E_AQ_LINK_POWER_CLASS_1 0x00
-#define I40E_AQ_LINK_POWER_CLASS_2 0x01
-#define I40E_AQ_LINK_POWER_CLASS_3 0x02
-#define I40E_AQ_LINK_POWER_CLASS_4 0x03
-#define I40E_AQ_PWR_CLASS_MASK 0x03
- u8 reserved[4];
- };
- struct {
- u8 link_type[4];
- u8 link_type_ext;
- };
- };
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
-
-/* Set event mask command (direct 0x613) */
-struct i40e_aqc_set_phy_int_mask {
- u8 reserved[8];
- __le16 event_mask;
-#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
-#define I40E_AQ_EVENT_MEDIA_NA 0x0004
-#define I40E_AQ_EVENT_LINK_FAULT 0x0008
-#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
-#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
-#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
-#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
-#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
-#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
- u8 reserved1[6];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
-
-/* Get Local AN advt register (direct 0x0614)
- * Set Local AN advt register (direct 0x0615)
- * Get Link Partner AN advt register (direct 0x0616)
- */
-struct i40e_aqc_an_advt_reg {
- __le32 local_an_reg0;
- __le16 local_an_reg1;
- u8 reserved[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
-
-/* Set Loopback mode (0x0618) */
-struct i40e_aqc_set_lb_mode {
- __le16 lb_mode;
-#define I40E_AQ_LB_PHY_LOCAL 0x01
-#define I40E_AQ_LB_PHY_REMOTE 0x02
-#define I40E_AQ_LB_MAC_LOCAL 0x04
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
-
-/* Set PHY Debug command (0x0622) */
-struct i40e_aqc_set_phy_debug {
- u8 command_flags;
-#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
- I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
-#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
- u8 reserved[15];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
-
-enum i40e_aq_phy_reg_type {
- I40E_AQC_PHY_REG_INTERNAL = 0x1,
- I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
- I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
-};
-
-/* Run PHY Activity (0x0626) */
-struct i40e_aqc_run_phy_activity {
- __le16 activity_id;
- u8 flags;
- u8 reserved1;
- __le32 control;
- __le32 data;
- u8 reserved2[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
-
-/* Set PHY Register command (0x0628) */
-/* Get PHY Register command (0x0629) */
-struct i40e_aqc_phy_register_access {
- u8 phy_interface;
-#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0
-#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
-#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
- u8 dev_address;
- u8 reserved1[2];
- __le32 reg_address;
- __le32 reg_value;
- u8 reserved2[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
-
-/* NVM Read command (indirect 0x0701)
- * NVM Erase commands (direct 0x0702)
- * NVM Update commands (indirect 0x0703)
- */
-struct i40e_aqc_nvm_update {
- u8 command_flags;
-#define I40E_AQ_NVM_LAST_CMD 0x01
-#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
-#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
-#define I40E_AQ_NVM_FLASH_ONLY 0x80
-#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
-#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
-#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
-#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
- u8 module_pointer;
- __le16 length;
- __le32 offset;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
-
-/* NVM Config Read (indirect 0x0704) */
-struct i40e_aqc_nvm_config_read {
- __le16 cmd_flags;
-#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
-#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
-#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
- __le16 element_count;
- __le16 element_id; /* Feature/field ID */
- __le16 element_id_msw; /* MSWord of field ID */
- __le32 address_high;
- __le32 address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
-
-/* NVM Config Write (indirect 0x0705) */
-struct i40e_aqc_nvm_config_write {
- __le16 cmd_flags;
- __le16 element_count;
- u8 reserved[4];
- __le32 address_high;
- __le32 address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
-
-/* Used for 0x0704 as well as for 0x0705 commands */
-#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
-#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
- BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
-#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT)
-struct i40e_aqc_nvm_config_data_feature {
- __le16 feature_id;
-#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
-#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
-#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
- __le16 feature_options;
- __le16 feature_selection;
-};
-
-I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
-
-struct i40e_aqc_nvm_config_data_immediate_field {
- __le32 field_id;
- __le32 field_value;
- __le16 field_options;
- __le16 reserved;
-};
-
-I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
-
-/* OEM Post Update (indirect 0x0720)
- * no command data struct used
- */
- struct i40e_aqc_nvm_oem_post_update {
-#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01
- u8 sel_data;
- u8 reserved[7];
-};
-
-I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
-
-struct i40e_aqc_nvm_oem_post_update_buffer {
- u8 str_len;
- u8 dev_addr;
- __le16 eeprom_addr;
- u8 data[36];
-};
-
-I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
-
-/* Thermal Sensor (indirect 0x0721)
- * read or set thermal sensor configs and values
- * takes a sensor and command specific data buffer, not detailed here
- */
-struct i40e_aqc_thermal_sensor {
- u8 sensor_action;
-#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0
-#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1
-#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2
- u8 reserved[7];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor);
-
-/* Send to PF command (indirect 0x0801) id is only used by PF
- * Send to VF command (indirect 0x0802) id is only used by PF
- * Send to Peer PF command (indirect 0x0803)
- */
-struct i40e_aqc_pf_vf_message {
- __le32 id;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
-
-/* Alternate structure */
-
-/* Direct write (direct 0x0900)
- * Direct read (direct 0x0902)
- */
-struct i40e_aqc_alternate_write {
- __le32 address0;
- __le32 data0;
- __le32 address1;
- __le32 data1;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
-
-/* Indirect write (indirect 0x0901)
- * Indirect read (indirect 0x0903)
- */
-
-struct i40e_aqc_alternate_ind_write {
- __le32 address;
- __le32 length;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
-
-/* Done alternate write (direct 0x0904)
- * uses i40e_aq_desc
- */
-struct i40e_aqc_alternate_write_done {
- __le16 cmd_flags;
-#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
-#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
-#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
-#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
-
-/* Set OEM mode (direct 0x0905) */
-struct i40e_aqc_alternate_set_mode {
- __le32 mode;
-#define I40E_AQ_ALTERNATE_MODE_NONE 0
-#define I40E_AQ_ALTERNATE_MODE_OEM 1
- u8 reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
-
-/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
-
-/* async events 0x10xx */
-
-/* Lan Queue Overflow Event (direct, 0x1001) */
-struct i40e_aqc_lan_overflow {
- __le32 prtdcb_rupto;
- __le32 otx_ctl;
- u8 reserved[8];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
-
-/* Get LLDP MIB (indirect 0x0A00) */
-struct i40e_aqc_lldp_get_mib {
- u8 type;
- u8 reserved1;
-#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
-#define I40E_AQ_LLDP_MIB_LOCAL 0x0
-#define I40E_AQ_LLDP_MIB_REMOTE 0x1
-#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
-#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
-#define I40E_AQ_LLDP_TX_SHIFT 0x4
-#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
-/* TX pause flags use I40E_AQ_LINK_TX_* above */
- __le16 local_len;
- __le16 remote_len;
- u8 reserved2[2];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
-
-/* Configure LLDP MIB Change Event (direct 0x0A01)
- * also used for the event (with type in the command field)
- */
-struct i40e_aqc_lldp_update_mib {
- u8 command;
-#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
-#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
- u8 reserved[7];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
-
-/* Add LLDP TLV (indirect 0x0A02)
- * Delete LLDP TLV (indirect 0x0A04)
- */
-struct i40e_aqc_lldp_add_tlv {
- u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
- u8 reserved1[1];
- __le16 len;
- u8 reserved2[4];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
-
-/* Update LLDP TLV (indirect 0x0A03) */
-struct i40e_aqc_lldp_update_tlv {
- u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
- u8 reserved;
- __le16 old_len;
- __le16 new_offset;
- __le16 new_len;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
-
-/* Stop LLDP (direct 0x0A05) */
-struct i40e_aqc_lldp_stop {
- u8 command;
-#define I40E_AQ_LLDP_AGENT_STOP 0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
- u8 reserved[15];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
-
-/* Start LLDP (direct 0x0A06) */
-
-struct i40e_aqc_lldp_start {
- u8 command;
-#define I40E_AQ_LLDP_AGENT_START 0x1
- u8 reserved[15];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
-
-/* Set DCB (direct 0x0303) */
-struct i40e_aqc_set_dcb_parameters {
- u8 command;
-#define I40E_AQ_DCB_SET_AGENT 0x1
-#define I40E_DCB_VALID 0x1
- u8 valid_flags;
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
-
-/* Apply MIB changes (0x0A07)
- * uses the generic struc as it contains no data
- */
-
-/* Add Udp Tunnel command and completion (direct 0x0B00) */
-struct i40e_aqc_add_udp_tunnel {
- __le16 udp_port;
- u8 reserved0[3];
- u8 protocol_type;
-#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
-#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
-#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
-#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11
- u8 reserved1[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
-
-struct i40e_aqc_add_udp_tunnel_completion {
- __le16 udp_port;
- u8 filter_entry_index;
- u8 multiple_pfs;
-#define I40E_AQC_SINGLE_PF 0x0
-#define I40E_AQC_MULTIPLE_PFS 0x1
- u8 total_filters;
- u8 reserved[11];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
-
-/* remove UDP Tunnel command (0x0B01) */
-struct i40e_aqc_remove_udp_tunnel {
- u8 reserved[2];
- u8 index; /* 0 to 15 */
- u8 reserved2[13];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
-
-struct i40e_aqc_del_udp_tunnel_completion {
- __le16 udp_port;
- u8 index; /* 0 to 15 */
- u8 multiple_pfs;
- u8 total_filters_used;
- u8 reserved1[11];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
-
-struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
-#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
-#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
- I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
- __le16 vsi_id;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
-
-struct i40e_aqc_get_set_rss_key_data {
- u8 standard_rss_key[0x28];
- u8 extended_hash_key[0xc];
-};
-
-I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
-
-struct i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
-#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
- I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
- __le16 vsi_id;
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
- BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
-
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
- __le16 flags;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
-
-/* tunnel key structure 0x0B10 */
-
-struct i40e_aqc_tunnel_key_structure_A0 {
- __le16 key1_off;
- __le16 key1_len;
- __le16 key2_off;
- __le16 key2_len;
- __le16 flags;
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
-/* response flags */
-#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
-#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
- u8 resreved[6];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0);
-
-struct i40e_aqc_tunnel_key_structure {
- u8 key1_off;
- u8 key2_off;
- u8 key1_len; /* 0 to 15 */
- u8 key2_len; /* 0 to 15 */
- u8 flags;
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
-/* response flags */
-#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
-#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
- u8 network_key_index;
-#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
-#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
-#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
-#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
- u8 reserved[10];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
-
-/* OEM mode commands (direct 0xFE0x) */
-struct i40e_aqc_oem_param_change {
- __le32 param_type;
-#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
-#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
-#define I40E_AQ_OEM_PARAM_MAC 2
- __le32 param_value1;
- __le16 param_value2;
- u8 reserved[6];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
-
-struct i40e_aqc_oem_state_change {
- __le32 state;
-#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
-#define I40E_AQ_OEM_STATE_LINK_UP 0x1
- u8 reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
-
-/* Initialize OCSD (0xFE02, direct) */
-struct i40e_aqc_opc_oem_ocsd_initialize {
- u8 type_status;
- u8 reserved1[3];
- __le32 ocsd_memory_block_addr_high;
- __le32 ocsd_memory_block_addr_low;
- __le32 requested_update_interval;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
-
-/* Initialize OCBB (0xFE03, direct) */
-struct i40e_aqc_opc_oem_ocbb_initialize {
- u8 type_status;
- u8 reserved1[3];
- __le32 ocbb_memory_block_addr_high;
- __le32 ocbb_memory_block_addr_low;
- u8 reserved2[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
-
-/* debug commands */
-
-/* get device id (0xFF00) uses the generic structure */
-
-/* set test more (0xFF01, internal) */
-
-struct i40e_acq_set_test_mode {
- u8 mode;
-#define I40E_AQ_TEST_PARTIAL 0
-#define I40E_AQ_TEST_FULL 1
-#define I40E_AQ_TEST_NVM 2
- u8 reserved[3];
- u8 command;
-#define I40E_AQ_TEST_OPEN 0
-#define I40E_AQ_TEST_CLOSE 1
-#define I40E_AQ_TEST_INC 2
- u8 reserved2[3];
- __le32 address_high;
- __le32 address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
-
-/* Debug Read Register command (0xFF03)
- * Debug Write Register command (0xFF04)
- */
-struct i40e_aqc_debug_reg_read_write {
- __le32 reserved;
- __le32 address;
- __le32 value_high;
- __le32 value_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
-
-/* Scatter/gather Reg Read (indirect 0xFF05)
- * Scatter/gather Reg Write (indirect 0xFF06)
- */
-
-/* i40e_aq_desc is used for the command */
-struct i40e_aqc_debug_reg_sg_element_data {
- __le32 address;
- __le32 value;
-};
-
-/* Debug Modify register (direct 0xFF07) */
-struct i40e_aqc_debug_modify_reg {
- __le32 address;
- __le32 value;
- __le32 clear_mask;
- __le32 set_mask;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
-
-/* dump internal data (0xFF08, indirect) */
-
-#define I40E_AQ_CLUSTER_ID_AUX 0
-#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
-#define I40E_AQ_CLUSTER_ID_TXSCHED 2
-#define I40E_AQ_CLUSTER_ID_HMC 3
-#define I40E_AQ_CLUSTER_ID_MAC0 4
-#define I40E_AQ_CLUSTER_ID_MAC1 5
-#define I40E_AQ_CLUSTER_ID_MAC2 6
-#define I40E_AQ_CLUSTER_ID_MAC3 7
-#define I40E_AQ_CLUSTER_ID_DCB 8
-#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
-#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
-#define I40E_AQ_CLUSTER_ID_ALTRAM 11
-
-struct i40e_aqc_debug_dump_internals {
- u8 cluster_id;
- u8 table_id;
- __le16 data_size;
- __le32 idx;
- __le32 address_high;
- __le32 address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
-
-struct i40e_aqc_debug_modify_internals {
- u8 cluster_id;
- u8 cluster_specific_params[7];
- __le32 address_high;
- __le32 address_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
-
-#endif /* _I40E_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
deleted file mode 100644
index cb8689222c8b..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_ALLOC_H_
-#define _I40E_ALLOC_H_
-
-struct i40e_hw;
-
-/* Memory allocation types */
-enum i40e_memory_type {
- i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
- i40e_mem_asq_buf = 1,
- i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
- i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
- i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
- i40e_mem_pd = 5, /* Page Descriptor */
- i40e_mem_bp = 6, /* Backing Page - 4KB */
- i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
- i40e_mem_reserved
-};
-
-/* prototype for functions used for dynamic memory allocation */
-i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem,
- enum i40e_memory_type type,
- u64 size, u32 alignment);
-i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem);
-i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem,
- u32 size);
-i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem);
-
-#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
deleted file mode 100644
index eea280ba411e..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ /dev/null
@@ -1,1320 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#include "i40e_type.h"
-#include "i40e_adminq.h"
-#include "i40e_prototype.h"
-#include <linux/avf/virtchnl.h>
-
-/**
- * i40e_set_mac_type - Sets MAC type
- * @hw: pointer to the HW structure
- *
- * This function sets the mac type of the adapter based on the
- * vendor ID and device ID stored in the hw structure.
- **/
-i40e_status i40e_set_mac_type(struct i40e_hw *hw)
-{
- i40e_status status = 0;
-
- if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
- switch (hw->device_id) {
- case I40E_DEV_ID_SFP_XL710:
- case I40E_DEV_ID_QEMU:
- case I40E_DEV_ID_KX_B:
- case I40E_DEV_ID_KX_C:
- case I40E_DEV_ID_QSFP_A:
- case I40E_DEV_ID_QSFP_B:
- case I40E_DEV_ID_QSFP_C:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_20G_KR2:
- case I40E_DEV_ID_20G_KR2_A:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- hw->mac.type = I40E_MAC_XL710;
- break;
- case I40E_DEV_ID_SFP_X722:
- case I40E_DEV_ID_1G_BASE_T_X722:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_SFP_I_X722:
- hw->mac.type = I40E_MAC_X722;
- break;
- case I40E_DEV_ID_X722_VF:
- hw->mac.type = I40E_MAC_X722_VF;
- break;
- case I40E_DEV_ID_VF:
- case I40E_DEV_ID_VF_HV:
- case I40E_DEV_ID_ADAPTIVE_VF:
- hw->mac.type = I40E_MAC_VF;
- break;
- default:
- hw->mac.type = I40E_MAC_GENERIC;
- break;
- }
- } else {
- status = I40E_ERR_DEVICE_NOT_SUPPORTED;
- }
-
- hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
- hw->mac.type, status);
- return status;
-}
-
-/**
- * i40evf_aq_str - convert AQ err code to a string
- * @hw: pointer to the HW structure
- * @aq_err: the AQ error code to convert
- **/
-const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
-{
- switch (aq_err) {
- case I40E_AQ_RC_OK:
- return "OK";
- case I40E_AQ_RC_EPERM:
- return "I40E_AQ_RC_EPERM";
- case I40E_AQ_RC_ENOENT:
- return "I40E_AQ_RC_ENOENT";
- case I40E_AQ_RC_ESRCH:
- return "I40E_AQ_RC_ESRCH";
- case I40E_AQ_RC_EINTR:
- return "I40E_AQ_RC_EINTR";
- case I40E_AQ_RC_EIO:
- return "I40E_AQ_RC_EIO";
- case I40E_AQ_RC_ENXIO:
- return "I40E_AQ_RC_ENXIO";
- case I40E_AQ_RC_E2BIG:
- return "I40E_AQ_RC_E2BIG";
- case I40E_AQ_RC_EAGAIN:
- return "I40E_AQ_RC_EAGAIN";
- case I40E_AQ_RC_ENOMEM:
- return "I40E_AQ_RC_ENOMEM";
- case I40E_AQ_RC_EACCES:
- return "I40E_AQ_RC_EACCES";
- case I40E_AQ_RC_EFAULT:
- return "I40E_AQ_RC_EFAULT";
- case I40E_AQ_RC_EBUSY:
- return "I40E_AQ_RC_EBUSY";
- case I40E_AQ_RC_EEXIST:
- return "I40E_AQ_RC_EEXIST";
- case I40E_AQ_RC_EINVAL:
- return "I40E_AQ_RC_EINVAL";
- case I40E_AQ_RC_ENOTTY:
- return "I40E_AQ_RC_ENOTTY";
- case I40E_AQ_RC_ENOSPC:
- return "I40E_AQ_RC_ENOSPC";
- case I40E_AQ_RC_ENOSYS:
- return "I40E_AQ_RC_ENOSYS";
- case I40E_AQ_RC_ERANGE:
- return "I40E_AQ_RC_ERANGE";
- case I40E_AQ_RC_EFLUSHED:
- return "I40E_AQ_RC_EFLUSHED";
- case I40E_AQ_RC_BAD_ADDR:
- return "I40E_AQ_RC_BAD_ADDR";
- case I40E_AQ_RC_EMODE:
- return "I40E_AQ_RC_EMODE";
- case I40E_AQ_RC_EFBIG:
- return "I40E_AQ_RC_EFBIG";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
- return hw->err_str;
-}
-
-/**
- * i40evf_stat_str - convert status err code to a string
- * @hw: pointer to the HW structure
- * @stat_err: the status error code to convert
- **/
-const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
-{
- switch (stat_err) {
- case 0:
- return "OK";
- case I40E_ERR_NVM:
- return "I40E_ERR_NVM";
- case I40E_ERR_NVM_CHECKSUM:
- return "I40E_ERR_NVM_CHECKSUM";
- case I40E_ERR_PHY:
- return "I40E_ERR_PHY";
- case I40E_ERR_CONFIG:
- return "I40E_ERR_CONFIG";
- case I40E_ERR_PARAM:
- return "I40E_ERR_PARAM";
- case I40E_ERR_MAC_TYPE:
- return "I40E_ERR_MAC_TYPE";
- case I40E_ERR_UNKNOWN_PHY:
- return "I40E_ERR_UNKNOWN_PHY";
- case I40E_ERR_LINK_SETUP:
- return "I40E_ERR_LINK_SETUP";
- case I40E_ERR_ADAPTER_STOPPED:
- return "I40E_ERR_ADAPTER_STOPPED";
- case I40E_ERR_INVALID_MAC_ADDR:
- return "I40E_ERR_INVALID_MAC_ADDR";
- case I40E_ERR_DEVICE_NOT_SUPPORTED:
- return "I40E_ERR_DEVICE_NOT_SUPPORTED";
- case I40E_ERR_MASTER_REQUESTS_PENDING:
- return "I40E_ERR_MASTER_REQUESTS_PENDING";
- case I40E_ERR_INVALID_LINK_SETTINGS:
- return "I40E_ERR_INVALID_LINK_SETTINGS";
- case I40E_ERR_AUTONEG_NOT_COMPLETE:
- return "I40E_ERR_AUTONEG_NOT_COMPLETE";
- case I40E_ERR_RESET_FAILED:
- return "I40E_ERR_RESET_FAILED";
- case I40E_ERR_SWFW_SYNC:
- return "I40E_ERR_SWFW_SYNC";
- case I40E_ERR_NO_AVAILABLE_VSI:
- return "I40E_ERR_NO_AVAILABLE_VSI";
- case I40E_ERR_NO_MEMORY:
- return "I40E_ERR_NO_MEMORY";
- case I40E_ERR_BAD_PTR:
- return "I40E_ERR_BAD_PTR";
- case I40E_ERR_RING_FULL:
- return "I40E_ERR_RING_FULL";
- case I40E_ERR_INVALID_PD_ID:
- return "I40E_ERR_INVALID_PD_ID";
- case I40E_ERR_INVALID_QP_ID:
- return "I40E_ERR_INVALID_QP_ID";
- case I40E_ERR_INVALID_CQ_ID:
- return "I40E_ERR_INVALID_CQ_ID";
- case I40E_ERR_INVALID_CEQ_ID:
- return "I40E_ERR_INVALID_CEQ_ID";
- case I40E_ERR_INVALID_AEQ_ID:
- return "I40E_ERR_INVALID_AEQ_ID";
- case I40E_ERR_INVALID_SIZE:
- return "I40E_ERR_INVALID_SIZE";
- case I40E_ERR_INVALID_ARP_INDEX:
- return "I40E_ERR_INVALID_ARP_INDEX";
- case I40E_ERR_INVALID_FPM_FUNC_ID:
- return "I40E_ERR_INVALID_FPM_FUNC_ID";
- case I40E_ERR_QP_INVALID_MSG_SIZE:
- return "I40E_ERR_QP_INVALID_MSG_SIZE";
- case I40E_ERR_QP_TOOMANY_WRS_POSTED:
- return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
- case I40E_ERR_INVALID_FRAG_COUNT:
- return "I40E_ERR_INVALID_FRAG_COUNT";
- case I40E_ERR_QUEUE_EMPTY:
- return "I40E_ERR_QUEUE_EMPTY";
- case I40E_ERR_INVALID_ALIGNMENT:
- return "I40E_ERR_INVALID_ALIGNMENT";
- case I40E_ERR_FLUSHED_QUEUE:
- return "I40E_ERR_FLUSHED_QUEUE";
- case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
- return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
- case I40E_ERR_INVALID_IMM_DATA_SIZE:
- return "I40E_ERR_INVALID_IMM_DATA_SIZE";
- case I40E_ERR_TIMEOUT:
- return "I40E_ERR_TIMEOUT";
- case I40E_ERR_OPCODE_MISMATCH:
- return "I40E_ERR_OPCODE_MISMATCH";
- case I40E_ERR_CQP_COMPL_ERROR:
- return "I40E_ERR_CQP_COMPL_ERROR";
- case I40E_ERR_INVALID_VF_ID:
- return "I40E_ERR_INVALID_VF_ID";
- case I40E_ERR_INVALID_HMCFN_ID:
- return "I40E_ERR_INVALID_HMCFN_ID";
- case I40E_ERR_BACKING_PAGE_ERROR:
- return "I40E_ERR_BACKING_PAGE_ERROR";
- case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
- return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
- case I40E_ERR_INVALID_PBLE_INDEX:
- return "I40E_ERR_INVALID_PBLE_INDEX";
- case I40E_ERR_INVALID_SD_INDEX:
- return "I40E_ERR_INVALID_SD_INDEX";
- case I40E_ERR_INVALID_PAGE_DESC_INDEX:
- return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
- case I40E_ERR_INVALID_SD_TYPE:
- return "I40E_ERR_INVALID_SD_TYPE";
- case I40E_ERR_MEMCPY_FAILED:
- return "I40E_ERR_MEMCPY_FAILED";
- case I40E_ERR_INVALID_HMC_OBJ_INDEX:
- return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
- case I40E_ERR_INVALID_HMC_OBJ_COUNT:
- return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
- case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
- return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
- case I40E_ERR_SRQ_ENABLED:
- return "I40E_ERR_SRQ_ENABLED";
- case I40E_ERR_ADMIN_QUEUE_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_ERROR";
- case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
- return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
- case I40E_ERR_BUF_TOO_SHORT:
- return "I40E_ERR_BUF_TOO_SHORT";
- case I40E_ERR_ADMIN_QUEUE_FULL:
- return "I40E_ERR_ADMIN_QUEUE_FULL";
- case I40E_ERR_ADMIN_QUEUE_NO_WORK:
- return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
- case I40E_ERR_BAD_IWARP_CQE:
- return "I40E_ERR_BAD_IWARP_CQE";
- case I40E_ERR_NVM_BLANK_MODE:
- return "I40E_ERR_NVM_BLANK_MODE";
- case I40E_ERR_NOT_IMPLEMENTED:
- return "I40E_ERR_NOT_IMPLEMENTED";
- case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
- return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
- case I40E_ERR_DIAG_TEST_FAILED:
- return "I40E_ERR_DIAG_TEST_FAILED";
- case I40E_ERR_NOT_READY:
- return "I40E_ERR_NOT_READY";
- case I40E_NOT_SUPPORTED:
- return "I40E_NOT_SUPPORTED";
- case I40E_ERR_FIRMWARE_API_VERSION:
- return "I40E_ERR_FIRMWARE_API_VERSION";
- case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
- return hw->err_str;
-}
-
-/**
- * i40evf_debug_aq
- * @hw: debug mask related to admin queue
- * @mask: debug mask
- * @desc: pointer to admin queue descriptor
- * @buffer: pointer to command buffer
- * @buf_len: max length of buffer
- *
- * Dumps debug log about adminq command with descriptor contents.
- **/
-void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
- void *buffer, u16 buf_len)
-{
- struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
- u8 *buf = (u8 *)buffer;
-
- if ((!(mask & hw->debug_mask)) || (desc == NULL))
- return;
-
- i40e_debug(hw, mask,
- "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
- le16_to_cpu(aq_desc->opcode),
- le16_to_cpu(aq_desc->flags),
- le16_to_cpu(aq_desc->datalen),
- le16_to_cpu(aq_desc->retval));
- i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->cookie_high),
- le32_to_cpu(aq_desc->cookie_low));
- i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.internal.param0),
- le32_to_cpu(aq_desc->params.internal.param1));
- i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
- le32_to_cpu(aq_desc->params.external.addr_high),
- le32_to_cpu(aq_desc->params.external.addr_low));
-
- if ((buffer != NULL) && (aq_desc->datalen != 0)) {
- u16 len = le16_to_cpu(aq_desc->datalen);
-
- i40e_debug(hw, mask, "AQ CMD Buffer:\n");
- if (buf_len < len)
- len = buf_len;
- /* write the full 16-byte chunks */
- if (hw->debug_mask & mask) {
- char prefix[27];
-
- snprintf(prefix, sizeof(prefix),
- "i40evf %02x:%02x.%x: \t0x",
- hw->bus.bus_id,
- hw->bus.device,
- hw->bus.func);
-
- print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
- 16, 1, buf, len, false);
- }
- }
-}
-
-/**
- * i40evf_check_asq_alive
- * @hw: pointer to the hw struct
- *
- * Returns true if Queue is enabled else false.
- **/
-bool i40evf_check_asq_alive(struct i40e_hw *hw)
-{
- if (hw->aq.asq.len)
- return !!(rd32(hw, hw->aq.asq.len) &
- I40E_VF_ATQLEN1_ATQENABLE_MASK);
- else
- return false;
-}
-
-/**
- * i40evf_aq_queue_shutdown
- * @hw: pointer to the hw struct
- * @unloading: is the driver unloading itself
- *
- * Tell the Firmware that we're shutting down the AdminQ and whether
- * or not the driver is unloading as well.
- **/
-i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_queue_shutdown *cmd =
- (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
- i40e_status status;
-
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_queue_shutdown);
-
- if (unloading)
- cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
- status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL);
-
- return status;
-}
-
-/**
- * i40e_aq_get_set_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- * @set: set true to set the table, false to get the table
- *
- * Internal function to get or set RSS look up table
- **/
-static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
- u16 vsi_id, bool pf_lut,
- u8 *lut, u16 lut_size,
- bool set)
-{
- i40e_status status;
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_lut *cmd_resp =
- (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
-
- if (set)
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_rss_lut);
- else
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_rss_lut);
-
- /* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
-
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
- I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
-
- if (pf_lut)
- cmd_resp->flags |= cpu_to_le16((u16)
- ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
- else
- cmd_resp->flags |= cpu_to_le16((u16)
- ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
-
- status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
-
- return status;
-}
-
-/**
- * i40evf_aq_get_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- *
- * get the RSS lookup table, PF or VSI type
- **/
-i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
-{
- return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
- false);
-}
-
-/**
- * i40evf_aq_set_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- *
- * set the RSS lookup table, PF or VSI type
- **/
-i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
-{
- return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
-}
-
-/**
- * i40e_aq_get_set_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- * @set: set true to set the key, false to get the key
- *
- * get the RSS key per VSI
- **/
-static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key,
- bool set)
-{
- i40e_status status;
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_set_rss_key *cmd_resp =
- (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
- u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
-
- if (set)
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_rss_key);
- else
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_rss_key);
-
- /* Indirect command */
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
-
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
- I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
-
- status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
-
- return status;
-}
-
-/**
- * i40evf_aq_get_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- *
- **/
-i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
-{
- return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
-}
-
-/**
- * i40evf_aq_set_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- *
- * set the RSS key per VSI
- **/
-i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
-{
- return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
-}
-
-
-/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT i40evf_ptype_lookup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum i40e_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-
-/* macro to make the table lines short */
-#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- { PTYPE, \
- 1, \
- I40E_RX_PTYPE_OUTER_##OUTER_IP, \
- I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- I40E_RX_PTYPE_##OUTER_FRAG, \
- I40E_RX_PTYPE_TUNNEL_##T, \
- I40E_RX_PTYPE_TUNNEL_END_##TE, \
- I40E_RX_PTYPE_##TEF, \
- I40E_RX_PTYPE_INNER_PROT_##I, \
- I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
- { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
-#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
-#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
-
-/* Lookup table mapping the HW PTYPE to the bit field for decoding */
-struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
- /* L2 Packet types */
- I40E_PTT_UNUSED_ENTRY(0),
- I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
- I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT_UNUSED_ENTRY(4),
- I40E_PTT_UNUSED_ENTRY(5),
- I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT_UNUSED_ENTRY(8),
- I40E_PTT_UNUSED_ENTRY(9),
- I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-
- /* Non Tunneled IPv4 */
- I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(25),
- I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(32),
- I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(39),
- I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(47),
- I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(54),
- I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(62),
- I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(69),
- I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(77),
- I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(84),
- I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
- I40E_PTT_UNUSED_ENTRY(91),
- I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(98),
- I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(105),
- I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(113),
- I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(120),
- I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(128),
- I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(135),
- I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(143),
- I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(150),
- I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* unused entries */
- I40E_PTT_UNUSED_ENTRY(154),
- I40E_PTT_UNUSED_ENTRY(155),
- I40E_PTT_UNUSED_ENTRY(156),
- I40E_PTT_UNUSED_ENTRY(157),
- I40E_PTT_UNUSED_ENTRY(158),
- I40E_PTT_UNUSED_ENTRY(159),
-
- I40E_PTT_UNUSED_ENTRY(160),
- I40E_PTT_UNUSED_ENTRY(161),
- I40E_PTT_UNUSED_ENTRY(162),
- I40E_PTT_UNUSED_ENTRY(163),
- I40E_PTT_UNUSED_ENTRY(164),
- I40E_PTT_UNUSED_ENTRY(165),
- I40E_PTT_UNUSED_ENTRY(166),
- I40E_PTT_UNUSED_ENTRY(167),
- I40E_PTT_UNUSED_ENTRY(168),
- I40E_PTT_UNUSED_ENTRY(169),
-
- I40E_PTT_UNUSED_ENTRY(170),
- I40E_PTT_UNUSED_ENTRY(171),
- I40E_PTT_UNUSED_ENTRY(172),
- I40E_PTT_UNUSED_ENTRY(173),
- I40E_PTT_UNUSED_ENTRY(174),
- I40E_PTT_UNUSED_ENTRY(175),
- I40E_PTT_UNUSED_ENTRY(176),
- I40E_PTT_UNUSED_ENTRY(177),
- I40E_PTT_UNUSED_ENTRY(178),
- I40E_PTT_UNUSED_ENTRY(179),
-
- I40E_PTT_UNUSED_ENTRY(180),
- I40E_PTT_UNUSED_ENTRY(181),
- I40E_PTT_UNUSED_ENTRY(182),
- I40E_PTT_UNUSED_ENTRY(183),
- I40E_PTT_UNUSED_ENTRY(184),
- I40E_PTT_UNUSED_ENTRY(185),
- I40E_PTT_UNUSED_ENTRY(186),
- I40E_PTT_UNUSED_ENTRY(187),
- I40E_PTT_UNUSED_ENTRY(188),
- I40E_PTT_UNUSED_ENTRY(189),
-
- I40E_PTT_UNUSED_ENTRY(190),
- I40E_PTT_UNUSED_ENTRY(191),
- I40E_PTT_UNUSED_ENTRY(192),
- I40E_PTT_UNUSED_ENTRY(193),
- I40E_PTT_UNUSED_ENTRY(194),
- I40E_PTT_UNUSED_ENTRY(195),
- I40E_PTT_UNUSED_ENTRY(196),
- I40E_PTT_UNUSED_ENTRY(197),
- I40E_PTT_UNUSED_ENTRY(198),
- I40E_PTT_UNUSED_ENTRY(199),
-
- I40E_PTT_UNUSED_ENTRY(200),
- I40E_PTT_UNUSED_ENTRY(201),
- I40E_PTT_UNUSED_ENTRY(202),
- I40E_PTT_UNUSED_ENTRY(203),
- I40E_PTT_UNUSED_ENTRY(204),
- I40E_PTT_UNUSED_ENTRY(205),
- I40E_PTT_UNUSED_ENTRY(206),
- I40E_PTT_UNUSED_ENTRY(207),
- I40E_PTT_UNUSED_ENTRY(208),
- I40E_PTT_UNUSED_ENTRY(209),
-
- I40E_PTT_UNUSED_ENTRY(210),
- I40E_PTT_UNUSED_ENTRY(211),
- I40E_PTT_UNUSED_ENTRY(212),
- I40E_PTT_UNUSED_ENTRY(213),
- I40E_PTT_UNUSED_ENTRY(214),
- I40E_PTT_UNUSED_ENTRY(215),
- I40E_PTT_UNUSED_ENTRY(216),
- I40E_PTT_UNUSED_ENTRY(217),
- I40E_PTT_UNUSED_ENTRY(218),
- I40E_PTT_UNUSED_ENTRY(219),
-
- I40E_PTT_UNUSED_ENTRY(220),
- I40E_PTT_UNUSED_ENTRY(221),
- I40E_PTT_UNUSED_ENTRY(222),
- I40E_PTT_UNUSED_ENTRY(223),
- I40E_PTT_UNUSED_ENTRY(224),
- I40E_PTT_UNUSED_ENTRY(225),
- I40E_PTT_UNUSED_ENTRY(226),
- I40E_PTT_UNUSED_ENTRY(227),
- I40E_PTT_UNUSED_ENTRY(228),
- I40E_PTT_UNUSED_ENTRY(229),
-
- I40E_PTT_UNUSED_ENTRY(230),
- I40E_PTT_UNUSED_ENTRY(231),
- I40E_PTT_UNUSED_ENTRY(232),
- I40E_PTT_UNUSED_ENTRY(233),
- I40E_PTT_UNUSED_ENTRY(234),
- I40E_PTT_UNUSED_ENTRY(235),
- I40E_PTT_UNUSED_ENTRY(236),
- I40E_PTT_UNUSED_ENTRY(237),
- I40E_PTT_UNUSED_ENTRY(238),
- I40E_PTT_UNUSED_ENTRY(239),
-
- I40E_PTT_UNUSED_ENTRY(240),
- I40E_PTT_UNUSED_ENTRY(241),
- I40E_PTT_UNUSED_ENTRY(242),
- I40E_PTT_UNUSED_ENTRY(243),
- I40E_PTT_UNUSED_ENTRY(244),
- I40E_PTT_UNUSED_ENTRY(245),
- I40E_PTT_UNUSED_ENTRY(246),
- I40E_PTT_UNUSED_ENTRY(247),
- I40E_PTT_UNUSED_ENTRY(248),
- I40E_PTT_UNUSED_ENTRY(249),
-
- I40E_PTT_UNUSED_ENTRY(250),
- I40E_PTT_UNUSED_ENTRY(251),
- I40E_PTT_UNUSED_ENTRY(252),
- I40E_PTT_UNUSED_ENTRY(253),
- I40E_PTT_UNUSED_ENTRY(254),
- I40E_PTT_UNUSED_ENTRY(255)
-};
-
-/**
- * i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register
- * @hw: pointer to the hw struct
- * @reg_addr: register address
- * @reg_val: ptr to register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Use the firmware to read the Rx control register,
- * especially useful if the Rx unit is under heavy pressure
- **/
-i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
- (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
- i40e_status status;
-
- if (!reg_val)
- return I40E_ERR_PARAM;
-
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_rx_ctl_reg_read);
-
- cmd_resp->address = cpu_to_le32(reg_addr);
-
- status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (status == 0)
- *reg_val = le32_to_cpu(cmd_resp->value);
-
- return status;
-}
-
-/**
- * i40evf_read_rx_ctl - read from an Rx control register
- * @hw: pointer to the hw struct
- * @reg_addr: register address
- **/
-u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
-{
- i40e_status status = 0;
- bool use_register;
- int retry = 5;
- u32 val = 0;
-
- use_register = (((hw->aq.api_maj_ver == 1) &&
- (hw->aq.api_min_ver < 5)) ||
- (hw->mac.type == I40E_MAC_X722));
- if (!use_register) {
-do_retry:
- status = i40evf_aq_rx_ctl_read_register(hw, reg_addr,
- &val, NULL);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
- usleep_range(1000, 2000);
- retry--;
- goto do_retry;
- }
- }
-
- /* if the AQ access failed, try the old-fashioned way */
- if (status || use_register)
- val = rd32(hw, reg_addr);
-
- return val;
-}
-
-/**
- * i40evf_aq_rx_ctl_write_register
- * @hw: pointer to the hw struct
- * @reg_addr: register address
- * @reg_val: register value
- * @cmd_details: pointer to command details structure or NULL
- *
- * Use the firmware to write to an Rx control register,
- * especially useful if the Rx unit is under heavy pressure
- **/
-i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_rx_ctl_reg_read_write *cmd =
- (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
- i40e_status status;
-
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_rx_ctl_reg_write);
-
- cmd->address = cpu_to_le32(reg_addr);
- cmd->value = cpu_to_le32(reg_val);
-
- status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
- * i40evf_write_rx_ctl - write to an Rx control register
- * @hw: pointer to the hw struct
- * @reg_addr: register address
- * @reg_val: register value
- **/
-void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
-{
- i40e_status status = 0;
- bool use_register;
- int retry = 5;
-
- use_register = (((hw->aq.api_maj_ver == 1) &&
- (hw->aq.api_min_ver < 5)) ||
- (hw->mac.type == I40E_MAC_X722));
- if (!use_register) {
-do_retry:
- status = i40evf_aq_rx_ctl_write_register(hw, reg_addr,
- reg_val, NULL);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
- usleep_range(1000, 2000);
- retry--;
- goto do_retry;
- }
- }
-
- /* if the AQ access failed, try the old-fashioned way */
- if (status || use_register)
- wr32(hw, reg_addr, reg_val);
-}
-
-/**
- * i40e_aq_send_msg_to_pf
- * @hw: pointer to the hardware structure
- * @v_opcode: opcodes for VF-PF communication
- * @v_retval: return error code
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @cmd_details: pointer to command details
- *
- * Send message to PF driver using admin queue. By default, this message
- * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for
- * completion before returning.
- **/
-i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum virtchnl_ops v_opcode,
- i40e_status v_retval,
- u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_asq_cmd_details details;
- i40e_status status;
-
- i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
- desc.cookie_high = cpu_to_le32(v_opcode);
- desc.cookie_low = cpu_to_le32(v_retval);
- if (msglen) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
- | I40E_AQ_FLAG_RD));
- if (msglen > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- desc.datalen = cpu_to_le16(msglen);
- }
- if (!cmd_details) {
- memset(&details, 0, sizeof(details));
- details.async = true;
- cmd_details = &details;
- }
- status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details);
- return status;
-}
-
-/**
- * i40e_vf_parse_hw_config
- * @hw: pointer to the hardware structure
- * @msg: pointer to the virtual channel VF resource structure
- *
- * Given a VF resource message from the PF, populate the hw struct
- * with appropriate information.
- **/
-void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct virtchnl_vf_resource *msg)
-{
- struct virtchnl_vsi_resource *vsi_res;
- int i;
-
- vsi_res = &msg->vsi_res[0];
-
- hw->dev_caps.num_vsis = msg->num_vsis;
- hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
- hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
- hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
- hw->dev_caps.dcb = msg->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_L2;
- hw->dev_caps.fcoe = 0;
- for (i = 0; i < msg->num_vsis; i++) {
- if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
- ether_addr_copy(hw->mac.perm_addr,
- vsi_res->default_mac_addr);
- ether_addr_copy(hw->mac.addr,
- vsi_res->default_mac_addr);
- }
- vsi_res++;
- }
-}
-
-/**
- * i40e_vf_reset
- * @hw: pointer to the hardware structure
- *
- * Send a VF_RESET message to the PF. Does not wait for response from PF
- * as none will be forthcoming. Immediately after calling this function,
- * the admin queue should be shut down and (optionally) reinitialized.
- **/
-i40e_status i40e_vf_reset(struct i40e_hw *hw)
-{
- return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
- 0, NULL, 0, NULL);
-}
-
-/**
- * i40evf_aq_write_ddp - Write dynamic device personalization (ddp)
- * @hw: pointer to the hw struct
- * @buff: command buffer (size in bytes = buff_size)
- * @buff_size: buffer size in bytes
- * @track_id: package tracking id
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum
-i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_write_personalization_profile *cmd =
- (struct i40e_aqc_write_personalization_profile *)
- &desc.params.raw;
- struct i40e_aqc_write_ddp_resp *resp;
- i40e_status status;
-
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_write_personalization_profile);
-
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
- if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
- desc.datalen = cpu_to_le16(buff_size);
-
- cmd->profile_track_id = cpu_to_le32(track_id);
-
- status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
- if (!status) {
- resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
- if (error_offset)
- *error_offset = le32_to_cpu(resp->error_offset);
- if (error_info)
- *error_info = le32_to_cpu(resp->error_info);
- }
-
- return status;
-}
-
-/**
- * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp)
- * @hw: pointer to the hw struct
- * @buff: command buffer (size in bytes = buff_size)
- * @buff_size: buffer size in bytes
- * @flags: AdminQ command flags
- * @cmd_details: pointer to command details structure or NULL
- **/
-enum
-i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_get_applied_profiles *cmd =
- (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
- i40e_status status;
-
- i40evf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_personalization_profile_list);
-
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- desc.datalen = cpu_to_le16(buff_size);
-
- cmd->flags = flags;
-
- status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
-
- return status;
-}
-
-/**
- * i40evf_find_segment_in_package
- * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
- * @pkg_hdr: pointer to the package header to be searched
- *
- * This function searches a package file for a particular segment type. On
- * success it returns a pointer to the segment header, otherwise it will
- * return NULL.
- **/
-struct i40e_generic_seg_header *
-i40evf_find_segment_in_package(u32 segment_type,
- struct i40e_package_header *pkg_hdr)
-{
- struct i40e_generic_seg_header *segment;
- u32 i;
-
- /* Search all package segments for the requested segment type */
- for (i = 0; i < pkg_hdr->segment_count; i++) {
- segment =
- (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
- pkg_hdr->segment_offset[i]);
-
- if (segment->type == segment_type)
- return segment;
- }
-
- return NULL;
-}
-
-/**
- * i40evf_write_profile
- * @hw: pointer to the hardware structure
- * @profile: pointer to the profile segment of the package to be downloaded
- * @track_id: package tracking id
- *
- * Handles the download of a complete package.
- */
-enum i40e_status_code
-i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
- u32 track_id)
-{
- i40e_status status = 0;
- struct i40e_section_table *sec_tbl;
- struct i40e_profile_section_header *sec = NULL;
- u32 dev_cnt;
- u32 vendor_dev_id;
- u32 *nvm;
- u32 section_size = 0;
- u32 offset = 0, info = 0;
- u32 i;
-
- dev_cnt = profile->device_table_count;
-
- for (i = 0; i < dev_cnt; i++) {
- vendor_dev_id = profile->device_table[i].vendor_dev_id;
- if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
- if (hw->device_id == (vendor_dev_id & 0xFFFF))
- break;
- }
- if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
- return I40E_ERR_DEVICE_NOT_SUPPORTED;
- }
-
- nvm = (u32 *)&profile->device_table[dev_cnt];
- sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
-
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec = (struct i40e_profile_section_header *)((u8 *)profile +
- sec_tbl->section_offset[i]);
-
- /* Skip 'AQ', 'note' and 'name' sections */
- if (sec->section.type != SECTION_TYPE_MMIO)
- continue;
-
- section_size = sec->section.size +
- sizeof(struct i40e_profile_section_header);
-
- /* Write profile */
- status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
- track_id, &offset, &info, NULL);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE,
- "Failed to write profile: offset %d, info %d",
- offset, info);
- break;
- }
- }
- return status;
-}
-
-/**
- * i40evf_add_pinfo_to_list
- * @hw: pointer to the hardware structure
- * @profile: pointer to the profile segment of the package
- * @profile_info_sec: buffer for information section
- * @track_id: package tracking id
- *
- * Register a profile to the list of loaded profiles.
- */
-enum i40e_status_code
-i40evf_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id)
-{
- i40e_status status = 0;
- struct i40e_profile_section_header *sec = NULL;
- struct i40e_profile_info *pinfo;
- u32 offset = 0, info = 0;
-
- sec = (struct i40e_profile_section_header *)profile_info_sec;
- sec->tbl_size = 1;
- sec->data_end = sizeof(struct i40e_profile_section_header) +
- sizeof(struct i40e_profile_info);
- sec->section.type = SECTION_TYPE_INFO;
- sec->section.offset = sizeof(struct i40e_profile_section_header);
- sec->section.size = sizeof(struct i40e_profile_info);
- pinfo = (struct i40e_profile_info *)(profile_info_sec +
- sec->section.offset);
- pinfo->track_id = track_id;
- pinfo->version = profile->version;
- pinfo->op = I40E_DDP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
-
- status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end,
- track_id, &offset, &info, NULL);
- return status;
-}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
deleted file mode 100644
index f300bf271824..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_DEVIDS_H_
-#define _I40E_DEVIDS_H_
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_QEMU 0x1574
-#define I40E_DEV_ID_KX_B 0x1580
-#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_QSFP_A 0x1583
-#define I40E_DEV_ID_QSFP_B 0x1584
-#define I40E_DEV_ID_QSFP_C 0x1585
-#define I40E_DEV_ID_10G_BASE_T 0x1586
-#define I40E_DEV_ID_20G_KR2 0x1587
-#define I40E_DEV_ID_20G_KR2_A 0x1588
-#define I40E_DEV_ID_10G_BASE_T4 0x1589
-#define I40E_DEV_ID_25G_B 0x158A
-#define I40E_DEV_ID_25G_SFP28 0x158B
-#define I40E_DEV_ID_VF 0x154C
-#define I40E_DEV_ID_VF_HV 0x1571
-#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
-#define I40E_DEV_ID_SFP_X722 0x37D0
-#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
-#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
-#define I40E_DEV_ID_SFP_I_X722 0x37D3
-#define I40E_DEV_ID_X722_VF 0x37CD
-
-#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
- (d) == I40E_DEV_ID_QSFP_B || \
- (d) == I40E_DEV_ID_QSFP_C)
-
-#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
deleted file mode 100644
index 1c78de838857..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_HMC_H_
-#define _I40E_HMC_H_
-
-#define I40E_HMC_MAX_BP_COUNT 512
-
-/* forward-declare the HW struct for the compiler */
-struct i40e_hw;
-
-#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
-#define I40E_HMC_PD_CNT_IN_SD 512
-#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
-#define I40E_HMC_PAGED_BP_SIZE 4096
-#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
-#define I40E_FIRST_VF_FPM_ID 16
-
-struct i40e_hmc_obj_info {
- u64 base; /* base addr in FPM */
- u32 max_cnt; /* max count available for this hmc func */
- u32 cnt; /* count of objects driver actually wants to create */
- u64 size; /* size in bytes of one object */
-};
-
-enum i40e_sd_entry_type {
- I40E_SD_TYPE_INVALID = 0,
- I40E_SD_TYPE_PAGED = 1,
- I40E_SD_TYPE_DIRECT = 2
-};
-
-struct i40e_hmc_bp {
- enum i40e_sd_entry_type entry_type;
- struct i40e_dma_mem addr; /* populate to be used by hw */
- u32 sd_pd_index;
- u32 ref_cnt;
-};
-
-struct i40e_hmc_pd_entry {
- struct i40e_hmc_bp bp;
- u32 sd_index;
- bool rsrc_pg;
- bool valid;
-};
-
-struct i40e_hmc_pd_table {
- struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
- struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
- struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
-
- u32 ref_cnt;
- u32 sd_index;
-};
-
-struct i40e_hmc_sd_entry {
- enum i40e_sd_entry_type entry_type;
- bool valid;
-
- union {
- struct i40e_hmc_pd_table pd_table;
- struct i40e_hmc_bp bp;
- } u;
-};
-
-struct i40e_hmc_sd_table {
- struct i40e_virt_mem addr; /* used to track sd_entry allocations */
- u32 sd_cnt;
- u32 ref_cnt;
- struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
-};
-
-struct i40e_hmc_info {
- u32 signature;
- /* equals to pci func num for PF and dynamically allocated for VFs */
- u8 hmc_fn_id;
- u16 first_sd_index; /* index of the first available SD */
-
- /* hmc objects */
- struct i40e_hmc_obj_info *hmc_obj;
- struct i40e_virt_mem hmc_obj_virt_mem;
- struct i40e_hmc_sd_table sd_table;
-};
-
-#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
-#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
-#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
-
-#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
-#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
-#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
-
-/**
- * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
- * @hw: pointer to our hw struct
- * @pa: pointer to physical address
- * @sd_index: segment descriptor index
- * @type: if sd entry is direct or paged
- **/
-#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
-{ \
- u32 val1, val2, val3; \
- val1 = (u32)(upper_32_bits(pa)); \
- val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
- I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
- ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
- I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
- BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
- val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
- wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
- wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
- wr32((hw), I40E_PFHMC_SDCMD, val3); \
-}
-
-/**
- * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
- * @hw: pointer to our hw struct
- * @sd_index: segment descriptor index
- * @type: if sd entry is direct or paged
- **/
-#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
-{ \
- u32 val2, val3; \
- val2 = (I40E_HMC_MAX_BP_COUNT << \
- I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
- ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
- I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
- val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
- wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
- wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
- wr32((hw), I40E_PFHMC_SDCMD, val3); \
-}
-
-/**
- * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
- * @hw: pointer to our hw struct
- * @sd_idx: segment descriptor index
- * @pd_idx: page descriptor index
- **/
-#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
- wr32((hw), I40E_PFHMC_PDINV, \
- (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
- ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-
-/**
- * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
- * @hmc_info: pointer to the HMC configuration information structure
- * @type: type of HMC resources we're searching
- * @index: starting index for the object
- * @cnt: number of objects we're trying to create
- * @sd_idx: pointer to return index of the segment descriptor in question
- * @sd_limit: pointer to return the maximum number of segment descriptors
- *
- * This function calculates the segment descriptor index and index limit
- * for the resource defined by i40e_hmc_rsrc_type.
- **/
-#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
-{ \
- u64 fpm_addr, fpm_limit; \
- fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
- (hmc_info)->hmc_obj[(type)].size * (index); \
- fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
- *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
- *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
- /* add one more to the limit to correct our range */ \
- *(sd_limit) += 1; \
-}
-
-/**
- * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
- * @hmc_info: pointer to the HMC configuration information struct
- * @type: HMC resource type we're examining
- * @idx: starting index for the object
- * @cnt: number of objects we're trying to create
- * @pd_index: pointer to return page descriptor index
- * @pd_limit: pointer to return page descriptor index limit
- *
- * Calculates the page descriptor index and index limit for the resource
- * defined by i40e_hmc_rsrc_type.
- **/
-#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
-{ \
- u64 fpm_adr, fpm_limit; \
- fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
- (hmc_info)->hmc_obj[(type)].size * (idx); \
- fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
- *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
- *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
- /* add one more to the limit to correct our range */ \
- *(pd_limit) += 1; \
-}
-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 sd_index,
- enum i40e_sd_entry_type type,
- u64 direct_mode_sz);
-
-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 pd_index,
- struct i40e_dma_mem *rsrc_pg);
-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
-
-#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
deleted file mode 100644
index 82b00f70a632..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_LAN_HMC_H_
-#define _I40E_LAN_HMC_H_
-
-/* forward-declare the HW struct for the compiler */
-struct i40e_hw;
-
-/* HMC element context information */
-
-/* Rx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
-struct i40e_hmc_obj_rxq {
- u16 head;
- u16 cpuid; /* bigger than needed, see above for reason */
- u64 base;
- u16 qlen;
-#define I40E_RXQ_CTX_DBUFF_SHIFT 7
- u16 dbuff; /* bigger than needed, see above for reason */
-#define I40E_RXQ_CTX_HBUFF_SHIFT 6
- u16 hbuff; /* bigger than needed, see above for reason */
- u8 dtype;
- u8 dsize;
- u8 crcstrip;
- u8 fc_ena;
- u8 l2tsel;
- u8 hsplit_0;
- u8 hsplit_1;
- u8 showiv;
- u32 rxmax; /* bigger than needed, see above for reason */
- u8 tphrdesc_ena;
- u8 tphwdesc_ena;
- u8 tphdata_ena;
- u8 tphhead_ena;
- u16 lrxqthresh; /* bigger than needed, see above for reason */
- u8 prefena; /* NOTE: normally must be set to 1 at init */
-};
-
-/* Tx queue context data
-*
-* The sizes of the variables may be larger than needed due to crossing byte
-* boundaries. If we do not have the width of the variable set to the correct
-* size then we could end up shifting bits off the top of the variable when the
-* variable is at the top of a byte and crosses over into the next byte.
-*/
-struct i40e_hmc_obj_txq {
- u16 head;
- u8 new_context;
- u64 base;
- u8 fc_ena;
- u8 timesync_ena;
- u8 fd_ena;
- u8 alt_vlan_ena;
- u16 thead_wb;
- u8 cpuid;
- u8 head_wb_ena;
- u16 qlen;
- u8 tphrdesc_ena;
- u8 tphrpacket_ena;
- u8 tphwdesc_ena;
- u64 head_wb_addr;
- u32 crc;
- u16 rdylist;
- u8 rdylist_act;
-};
-
-/* for hsplit_0 field of Rx HMC context */
-enum i40e_hmc_obj_rx_hsplit_0 {
- I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
- I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
- I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
- I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
- I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
-};
-
-/* fcoe_cntx and fcoe_filt are for debugging purpose only */
-struct i40e_hmc_obj_fcoe_cntx {
- u32 rsv[32];
-};
-
-struct i40e_hmc_obj_fcoe_filt {
- u32 rsv[8];
-};
-
-/* Context sizes for LAN objects */
-enum i40e_hmc_lan_object_size {
- I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
- I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
- I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
- I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
- I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
- I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
- I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
-};
-
-#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
-#define I40E_HMC_OBJ_SIZE_TXQ 128
-#define I40E_HMC_OBJ_SIZE_RXQ 32
-#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
-#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
-
-enum i40e_hmc_lan_rsrc_type {
- I40E_HMC_LAN_FULL = 0,
- I40E_HMC_LAN_TX = 1,
- I40E_HMC_LAN_RX = 2,
- I40E_HMC_FCOE_CTX = 3,
- I40E_HMC_FCOE_FILT = 4,
- I40E_HMC_LAN_MAX = 5
-};
-
-enum i40e_hmc_model {
- I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
- I40E_HMC_MODEL_DIRECT_ONLY = 1,
- I40E_HMC_MODEL_PAGED_ONLY = 2,
- I40E_HMC_MODEL_UNKNOWN,
-};
-
-struct i40e_hmc_lan_create_obj_info {
- struct i40e_hmc_info *hmc_info;
- u32 rsrc_type;
- u32 start_idx;
- u32 count;
- enum i40e_sd_entry_type entry_type;
- u64 direct_mode_sz;
-};
-
-struct i40e_hmc_lan_delete_obj_info {
- struct i40e_hmc_info *hmc_info;
- u32 rsrc_type;
- u32 start_idx;
- u32 count;
-};
-
-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
- u32 rxq_num, u32 fcoe_cntx_num,
- u32 fcoe_filt_num);
-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
- enum i40e_hmc_model model);
-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
-
-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue);
-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_txq *s);
-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue);
-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_rxq *s);
-
-#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
deleted file mode 100644
index a358f4b9d5aa..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_PROTOTYPE_H_
-#define _I40E_PROTOTYPE_H_
-
-#include "i40e_type.h"
-#include "i40e_alloc.h"
-#include <linux/avf/virtchnl.h>
-
-/* Prototypes for shared code functions that are not in
- * the standard function pointer structures. These are
- * mostly because they are needed even before the init
- * has happened and will assist in the early SW and FW
- * setup.
- */
-
-/* adminq functions */
-i40e_status i40evf_init_adminq(struct i40e_hw *hw);
-i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
-void i40e_adminq_init_ring_data(struct i40e_hw *hw);
-i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *events_pending);
-i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details);
-bool i40evf_asq_done(struct i40e_hw *hw);
-
-/* debug function for adminq */
-void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
- void *desc, void *buffer, u16 buf_len);
-
-void i40e_idle_aq(struct i40e_hw *hw);
-void i40evf_resume_aq(struct i40e_hw *hw);
-bool i40evf_check_asq_alive(struct i40e_hw *hw);
-i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
-
-i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
-i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
-
-i40e_status i40e_set_mac_type(struct i40e_hw *hw);
-
-extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
-
-static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
-{
- return i40evf_ptype_lookup[ptype];
-}
-
-/* prototype for functions used for SW locks */
-
-/* i40e_common for VF drivers*/
-void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct virtchnl_vf_resource *msg);
-i40e_status i40e_vf_reset(struct i40e_hw *hw);
-i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum virtchnl_ops v_opcode,
- i40e_status v_retval,
- u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings);
-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
- u8 *mac_addr, u16 ethtype, u16 flags,
- u16 vsi_seid, u16 queue, bool is_add,
- struct i40e_control_filter_stats *stats,
- struct i40e_asq_cmd_details *cmd_details);
-void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
- u16 vsi_seid);
-i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
-i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-
-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 value);
-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 value);
-u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
-i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *
- cmd_details);
-i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *
- cmd_details);
-struct i40e_generic_seg_header *
-i40evf_find_segment_in_package(u32 segment_type,
- struct i40e_package_header *pkg_header);
-enum i40e_status_code
-i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
- u32 track_id);
-enum i40e_status_code
-i40evf_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id);
-#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
deleted file mode 100644
index 49e1f57d99cc..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ /dev/null
@@ -1,313 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_REGISTER_H_
-#define _I40E_REGISTER_H_
-
-#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
-#define I40E_VFMSIX_PBA1_MAX_INDEX 19
-#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TADD1_MAX_INDEX 639
-#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
-#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
-#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
-#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
-#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
-#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
-#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
-#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
-#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
-#define I40E_VF_ARQH1_ARQH_SHIFT 0
-#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
-#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
-#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
-#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
-#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
-#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
-#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
-#define I40E_VF_ARQT1_ARQT_SHIFT 0
-#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
-#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
-#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
-#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
-#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
-#define I40E_VF_ATQH1_ATQH_SHIFT 0
-#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
-#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
-#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
-#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
-#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
-#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
-#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
-#define I40E_VF_ATQT1_ATQT_SHIFT 0
-#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
-#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
-#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
-#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
-#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
-#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
-#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
-#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
-#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
-#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR01_SWINT_SHIFT 31
-#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
-#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
-#define I40E_VFINT_ITR01_MAX_INDEX 2
-#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
-#define I40E_VFINT_ITRN1_MAX_INDEX 2
-#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
-#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
-#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_QRX_TAIL1_MAX_INDEX 15
-#define I40E_QRX_TAIL1_TAIL_SHIFT 0
-#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
-#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
-#define I40E_QTX_TAIL1_MAX_INDEX 15
-#define I40E_QTX_TAIL1_TAIL_SHIFT 0
-#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
-#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
-#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TADD_MAX_INDEX 16
-#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
-#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TMSG_MAX_INDEX 16
-#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TUADD_MAX_INDEX 16
-#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
-#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
-#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
-#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_VFQF_HENA_MAX_INDEX 1
-#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
-#define I40E_VFQF_HKEY_MAX_INDEX 12
-#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
-#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
-#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
-#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_VFQF_HLUT_MAX_INDEX 15
-#define I40E_VFQF_HLUT_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
-#define I40E_VFQF_HLUT_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
-#define I40E_VFQF_HLUT_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
-#define I40E_VFQF_HLUT_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
-#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_VFQF_HREGION_MAX_INDEX 7
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
-#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
-#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
-#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
-#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
-#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
-#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
-#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
-#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
-#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
-#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
-#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
-#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
-#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
-#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
-#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
-#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
-#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
-#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
-#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
-#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
-#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
-#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
deleted file mode 100644
index 094387db3c11..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ /dev/null
@@ -1,1496 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_TYPE_H_
-#define _I40E_TYPE_H_
-
-#include "i40e_status.h"
-#include "i40e_osdep.h"
-#include "i40e_register.h"
-#include "i40e_adminq.h"
-#include "i40e_hmc.h"
-#include "i40e_lan_hmc.h"
-#include "i40e_devids.h"
-
-/* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
-
-#define I40E_MAX_VSI_QP 16
-#define I40E_MAX_VF_VSI 3
-#define I40E_MAX_CHAINED_RX_BUFFERS 5
-#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
-
-/* Max default timeout in ms, */
-#define I40E_MAX_NVM_TIMEOUT 18000
-
-/* Max timeout in ms for the phy to respond */
-#define I40E_MAX_PHY_TIMEOUT 500
-
-/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
-#define I40E_MS_TO_GTIME(time) ((time) * 1000)
-
-/* forward declaration */
-struct i40e_hw;
-typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-
-/* Data type manipulation macros. */
-
-#define I40E_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
-
-/* bitfields for Tx queue mapping in QTX_CTL */
-#define I40E_QTX_CTL_VF_QUEUE 0x0
-#define I40E_QTX_CTL_VM_QUEUE 0x1
-#define I40E_QTX_CTL_PF_QUEUE 0x2
-
-/* debug masks - set these bits in hw->debug_mask to control output */
-enum i40e_debug_mask {
- I40E_DEBUG_INIT = 0x00000001,
- I40E_DEBUG_RELEASE = 0x00000002,
-
- I40E_DEBUG_LINK = 0x00000010,
- I40E_DEBUG_PHY = 0x00000020,
- I40E_DEBUG_HMC = 0x00000040,
- I40E_DEBUG_NVM = 0x00000080,
- I40E_DEBUG_LAN = 0x00000100,
- I40E_DEBUG_FLOW = 0x00000200,
- I40E_DEBUG_DCB = 0x00000400,
- I40E_DEBUG_DIAG = 0x00000800,
- I40E_DEBUG_FD = 0x00001000,
- I40E_DEBUG_PACKAGE = 0x00002000,
-
- I40E_DEBUG_AQ_MESSAGE = 0x01000000,
- I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
- I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
- I40E_DEBUG_AQ_COMMAND = 0x06000000,
- I40E_DEBUG_AQ = 0x0F000000,
-
- I40E_DEBUG_USER = 0xF0000000,
-
- I40E_DEBUG_ALL = 0xFFFFFFFF
-};
-
-/* These are structs for managing the hardware information and the operations.
- * The structures of function pointers are filled out at init time when we
- * know for sure exactly which hardware we're working with. This gives us the
- * flexibility of using the same main driver code but adapting to slightly
- * different hardware needs as new parts are developed. For this architecture,
- * the Firmware and AdminQ are intended to insulate the driver from most of the
- * future changes, but these structures will also do part of the job.
- */
-enum i40e_mac_type {
- I40E_MAC_UNKNOWN = 0,
- I40E_MAC_XL710,
- I40E_MAC_VF,
- I40E_MAC_X722,
- I40E_MAC_X722_VF,
- I40E_MAC_GENERIC,
-};
-
-enum i40e_media_type {
- I40E_MEDIA_TYPE_UNKNOWN = 0,
- I40E_MEDIA_TYPE_FIBER,
- I40E_MEDIA_TYPE_BASET,
- I40E_MEDIA_TYPE_BACKPLANE,
- I40E_MEDIA_TYPE_CX4,
- I40E_MEDIA_TYPE_DA,
- I40E_MEDIA_TYPE_VIRTUAL
-};
-
-enum i40e_fc_mode {
- I40E_FC_NONE = 0,
- I40E_FC_RX_PAUSE,
- I40E_FC_TX_PAUSE,
- I40E_FC_FULL,
- I40E_FC_PFC,
- I40E_FC_DEFAULT
-};
-
-enum i40e_set_fc_aq_failures {
- I40E_SET_FC_AQ_FAIL_NONE = 0,
- I40E_SET_FC_AQ_FAIL_GET = 1,
- I40E_SET_FC_AQ_FAIL_SET = 2,
- I40E_SET_FC_AQ_FAIL_UPDATE = 4,
- I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
-};
-
-enum i40e_vsi_type {
- I40E_VSI_MAIN = 0,
- I40E_VSI_VMDQ1 = 1,
- I40E_VSI_VMDQ2 = 2,
- I40E_VSI_CTRL = 3,
- I40E_VSI_FCOE = 4,
- I40E_VSI_MIRROR = 5,
- I40E_VSI_SRIOV = 6,
- I40E_VSI_FDIR = 7,
- I40E_VSI_TYPE_UNKNOWN
-};
-
-enum i40e_queue_type {
- I40E_QUEUE_TYPE_RX = 0,
- I40E_QUEUE_TYPE_TX,
- I40E_QUEUE_TYPE_PE_CEQ,
- I40E_QUEUE_TYPE_UNKNOWN
-};
-
-struct i40e_link_status {
- enum i40e_aq_phy_type phy_type;
- enum i40e_aq_link_speed link_speed;
- u8 link_info;
- u8 an_info;
- u8 req_fec_info;
- u8 fec_info;
- u8 ext_info;
- u8 loopback;
- /* is Link Status Event notification to SW enabled */
- bool lse_enable;
- u16 max_frame_size;
- bool crc_enable;
- u8 pacing;
- u8 requested_speeds;
- u8 module_type[3];
- /* 1st byte: module identifier */
-#define I40E_MODULE_TYPE_SFP 0x03
-#define I40E_MODULE_TYPE_QSFP 0x0D
- /* 2nd byte: ethernet compliance codes for 10/40G */
-#define I40E_MODULE_TYPE_40G_ACTIVE 0x01
-#define I40E_MODULE_TYPE_40G_LR4 0x02
-#define I40E_MODULE_TYPE_40G_SR4 0x04
-#define I40E_MODULE_TYPE_40G_CR4 0x08
-#define I40E_MODULE_TYPE_10G_BASE_SR 0x10
-#define I40E_MODULE_TYPE_10G_BASE_LR 0x20
-#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40
-#define I40E_MODULE_TYPE_10G_BASE_ER 0x80
- /* 3rd byte: ethernet compliance codes for 1G */
-#define I40E_MODULE_TYPE_1000BASE_SX 0x01
-#define I40E_MODULE_TYPE_1000BASE_LX 0x02
-#define I40E_MODULE_TYPE_1000BASE_CX 0x04
-#define I40E_MODULE_TYPE_1000BASE_T 0x08
-};
-
-struct i40e_phy_info {
- struct i40e_link_status link_info;
- struct i40e_link_status link_info_old;
- bool get_link_info;
- enum i40e_media_type media_type;
- /* all the phy types the NVM is capable of */
- u64 phy_types;
-};
-
-#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
-#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
-#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
-#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
-#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
-#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
-#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
-#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
-#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
-#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
-#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
-#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
-#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
-#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
-#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
-#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
-#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
-#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
-#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
-#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
-#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
-#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
-#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
-#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
-#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
-#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
-#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
- BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
-#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
-/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
- * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
- * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
- * a shift is needed to adjust for this with values larger than 31. The
- * only affected values are I40E_PHY_TYPE_25GBASE_*.
- */
-#define I40E_PHY_TYPE_OFFSET 1
-#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
- I40E_PHY_TYPE_OFFSET)
-#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
- I40E_PHY_TYPE_OFFSET)
-#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
- I40E_PHY_TYPE_OFFSET)
-#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
- I40E_PHY_TYPE_OFFSET)
-#define I40E_HW_CAP_MAX_GPIO 30
-/* Capabilities of a PF or a VF or the whole device */
-struct i40e_hw_capabilities {
- u32 switch_mode;
-#define I40E_NVM_IMAGE_TYPE_EVB 0x0
-#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
-#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
-
- u32 management_mode;
- u32 mng_protocols_over_mctp;
-#define I40E_MNG_PROTOCOL_PLDM 0x2
-#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
-#define I40E_MNG_PROTOCOL_NCSI 0x8
- u32 npar_enable;
- u32 os2bmc;
- u32 valid_functions;
- bool sr_iov_1_1;
- bool vmdq;
- bool evb_802_1_qbg; /* Edge Virtual Bridging */
- bool evb_802_1_qbh; /* Bridge Port Extension */
- bool dcb;
- bool fcoe;
- bool iscsi; /* Indicates iSCSI enabled */
- bool flex10_enable;
- bool flex10_capable;
- u32 flex10_mode;
-#define I40E_FLEX10_MODE_UNKNOWN 0x0
-#define I40E_FLEX10_MODE_DCC 0x1
-#define I40E_FLEX10_MODE_DCI 0x2
-
- u32 flex10_status;
-#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
-#define I40E_FLEX10_STATUS_VC_MODE 0x2
-
- bool sec_rev_disabled;
- bool update_disabled;
-#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
-#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
-
- bool mgmt_cem;
- bool ieee_1588;
- bool iwarp;
- bool fd;
- u32 fd_filters_guaranteed;
- u32 fd_filters_best_effort;
- bool rss;
- u32 rss_table_size;
- u32 rss_table_entry_width;
- bool led[I40E_HW_CAP_MAX_GPIO];
- bool sdp[I40E_HW_CAP_MAX_GPIO];
- u32 nvm_image_type;
- u32 num_flow_director_filters;
- u32 num_vfs;
- u32 vf_base_id;
- u32 num_vsis;
- u32 num_rx_qp;
- u32 num_tx_qp;
- u32 base_queue;
- u32 num_msix_vectors;
- u32 num_msix_vectors_vf;
- u32 led_pin_num;
- u32 sdp_pin_num;
- u32 mdio_port_num;
- u32 mdio_port_mode;
- u8 rx_buf_chain_len;
- u32 enabled_tcmap;
- u32 maxtc;
- u64 wr_csr_prot;
-};
-
-struct i40e_mac_info {
- enum i40e_mac_type type;
- u8 addr[ETH_ALEN];
- u8 perm_addr[ETH_ALEN];
- u8 san_addr[ETH_ALEN];
- u16 max_fcoeq;
-};
-
-enum i40e_aq_resources_ids {
- I40E_NVM_RESOURCE_ID = 1
-};
-
-enum i40e_aq_resource_access_type {
- I40E_RESOURCE_READ = 1,
- I40E_RESOURCE_WRITE
-};
-
-struct i40e_nvm_info {
- u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
- u32 timeout; /* [ms] */
- u16 sr_size; /* Shadow RAM size in words */
- bool blank_nvm_mode; /* is NVM empty (no FW present)*/
- u16 version; /* NVM package version */
- u32 eetrack; /* NVM data version */
- u32 oem_ver; /* OEM version info */
-};
-
-/* definitions used in NVM update support */
-
-enum i40e_nvmupd_cmd {
- I40E_NVMUPD_INVALID,
- I40E_NVMUPD_READ_CON,
- I40E_NVMUPD_READ_SNT,
- I40E_NVMUPD_READ_LCB,
- I40E_NVMUPD_READ_SA,
- I40E_NVMUPD_WRITE_ERA,
- I40E_NVMUPD_WRITE_CON,
- I40E_NVMUPD_WRITE_SNT,
- I40E_NVMUPD_WRITE_LCB,
- I40E_NVMUPD_WRITE_SA,
- I40E_NVMUPD_CSUM_CON,
- I40E_NVMUPD_CSUM_SA,
- I40E_NVMUPD_CSUM_LCB,
- I40E_NVMUPD_STATUS,
- I40E_NVMUPD_EXEC_AQ,
- I40E_NVMUPD_GET_AQ_RESULT,
- I40E_NVMUPD_GET_AQ_EVENT,
-};
-
-enum i40e_nvmupd_state {
- I40E_NVMUPD_STATE_INIT,
- I40E_NVMUPD_STATE_READING,
- I40E_NVMUPD_STATE_WRITING,
- I40E_NVMUPD_STATE_INIT_WAIT,
- I40E_NVMUPD_STATE_WRITE_WAIT,
- I40E_NVMUPD_STATE_ERROR
-};
-
-/* nvm_access definition and its masks/shifts need to be accessible to
- * application, core driver, and shared code. Where is the right file?
- */
-#define I40E_NVM_READ 0xB
-#define I40E_NVM_WRITE 0xC
-
-#define I40E_NVM_MOD_PNT_MASK 0xFF
-
-#define I40E_NVM_TRANS_SHIFT 8
-#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
-#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
-#define I40E_NVM_PRESERVATION_FLAGS_MASK \
- (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
-#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
-#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
-#define I40E_NVM_CON 0x0
-#define I40E_NVM_SNT 0x1
-#define I40E_NVM_LCB 0x2
-#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
-#define I40E_NVM_ERA 0x4
-#define I40E_NVM_CSUM 0x8
-#define I40E_NVM_AQE 0xe
-#define I40E_NVM_EXEC 0xf
-
-#define I40E_NVM_ADAPT_SHIFT 16
-#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT)
-
-#define I40E_NVMUPD_MAX_DATA 4096
-#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
-
-struct i40e_nvm_access {
- u32 command;
- u32 config;
- u32 offset; /* in bytes */
- u32 data_size; /* in bytes */
- u8 data[1];
-};
-
-/* (Q)SFP module access definitions */
-#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
-#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
-#define I40E_MODULE_TYPE_ADDR 0x00
-#define I40E_MODULE_REVISION_ADDR 0x01
-#define I40E_MODULE_SFF_8472_COMP 0x5E
-#define I40E_MODULE_SFF_8472_SWAP 0x5C
-#define I40E_MODULE_SFF_ADDR_MODE 0x04
-#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
-#define I40E_MODULE_TYPE_QSFP28 0x11
-#define I40E_MODULE_QSFP_MAX_LEN 640
-
-/* PCI bus types */
-enum i40e_bus_type {
- i40e_bus_type_unknown = 0,
- i40e_bus_type_pci,
- i40e_bus_type_pcix,
- i40e_bus_type_pci_express,
- i40e_bus_type_reserved
-};
-
-/* PCI bus speeds */
-enum i40e_bus_speed {
- i40e_bus_speed_unknown = 0,
- i40e_bus_speed_33 = 33,
- i40e_bus_speed_66 = 66,
- i40e_bus_speed_100 = 100,
- i40e_bus_speed_120 = 120,
- i40e_bus_speed_133 = 133,
- i40e_bus_speed_2500 = 2500,
- i40e_bus_speed_5000 = 5000,
- i40e_bus_speed_8000 = 8000,
- i40e_bus_speed_reserved
-};
-
-/* PCI bus widths */
-enum i40e_bus_width {
- i40e_bus_width_unknown = 0,
- i40e_bus_width_pcie_x1 = 1,
- i40e_bus_width_pcie_x2 = 2,
- i40e_bus_width_pcie_x4 = 4,
- i40e_bus_width_pcie_x8 = 8,
- i40e_bus_width_32 = 32,
- i40e_bus_width_64 = 64,
- i40e_bus_width_reserved
-};
-
-/* Bus parameters */
-struct i40e_bus_info {
- enum i40e_bus_speed speed;
- enum i40e_bus_width width;
- enum i40e_bus_type type;
-
- u16 func;
- u16 device;
- u16 lan_id;
- u16 bus_id;
-};
-
-/* Flow control (FC) parameters */
-struct i40e_fc_info {
- enum i40e_fc_mode current_mode; /* FC mode in effect */
- enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
-};
-
-#define I40E_MAX_TRAFFIC_CLASS 8
-#define I40E_MAX_USER_PRIORITY 8
-#define I40E_DCBX_MAX_APPS 32
-#define I40E_LLDPDU_SIZE 1500
-
-/* IEEE 802.1Qaz ETS Configuration data */
-struct i40e_ieee_ets_config {
- u8 willing;
- u8 cbs;
- u8 maxtcs;
- u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
- u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
- u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
-};
-
-/* IEEE 802.1Qaz ETS Recommendation data */
-struct i40e_ieee_ets_recommend {
- u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
- u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
- u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
-};
-
-/* IEEE 802.1Qaz PFC Configuration data */
-struct i40e_ieee_pfc_config {
- u8 willing;
- u8 mbc;
- u8 pfccap;
- u8 pfcenable;
-};
-
-/* IEEE 802.1Qaz Application Priority data */
-struct i40e_ieee_app_priority_table {
- u8 priority;
- u8 selector;
- u16 protocolid;
-};
-
-struct i40e_dcbx_config {
- u32 numapps;
- u32 tlv_status; /* CEE mode TLV status */
- struct i40e_ieee_ets_config etscfg;
- struct i40e_ieee_ets_recommend etsrec;
- struct i40e_ieee_pfc_config pfc;
- struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
-};
-
-/* Port hardware description */
-struct i40e_hw {
- u8 __iomem *hw_addr;
- void *back;
-
- /* subsystem structs */
- struct i40e_phy_info phy;
- struct i40e_mac_info mac;
- struct i40e_bus_info bus;
- struct i40e_nvm_info nvm;
- struct i40e_fc_info fc;
-
- /* pci info */
- u16 device_id;
- u16 vendor_id;
- u16 subsystem_device_id;
- u16 subsystem_vendor_id;
- u8 revision_id;
- u8 port;
- bool adapter_stopped;
-
- /* capabilities for entire device and PCI func */
- struct i40e_hw_capabilities dev_caps;
- struct i40e_hw_capabilities func_caps;
-
- /* Flow Director shared filter space */
- u16 fdir_shared_filter_count;
-
- /* device profile info */
- u8 pf_id;
- u16 main_vsi_seid;
-
- /* for multi-function MACs */
- u16 partition_id;
- u16 num_partitions;
- u16 num_ports;
-
- /* Closest numa node to the device */
- u16 numa_node;
-
- /* Admin Queue info */
- struct i40e_adminq_info aq;
-
- /* state of nvm update process */
- enum i40e_nvmupd_state nvmupd_state;
- struct i40e_aq_desc nvm_wb_desc;
- struct i40e_aq_desc nvm_aq_event_desc;
- struct i40e_virt_mem nvm_buff;
- bool nvm_release_on_done;
- u16 nvm_wait_opcode;
-
- /* HMC info */
- struct i40e_hmc_info hmc; /* HMC info struct */
-
- /* LLDP/DCBX Status */
- u16 dcbx_status;
-
-#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
-#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
-
- /* DCBX info */
- struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
- struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
- struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
-
- /* Used in set switch config AQ command */
- u16 switch_tag;
- u16 first_tag;
- u16 second_tag;
-
- /* debug mask */
- u32 debug_mask;
- char err_str[16];
-};
-
-static inline bool i40e_is_vf(struct i40e_hw *hw)
-{
- return (hw->mac.type == I40E_MAC_VF ||
- hw->mac.type == I40E_MAC_X722_VF);
-}
-
-struct i40e_driver_version {
- u8 major_version;
- u8 minor_version;
- u8 build_version;
- u8 subbuild_version;
- u8 driver_string[32];
-};
-
-/* RX Descriptors */
-union i40e_16byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fd_id; /* Flow director filter id */
- __le32 fcoe_param; /* FCoE DDP Context id */
- } hi_dword;
- } qword0;
- struct {
- /* ext status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- } wb; /* writeback */
-};
-
-union i40e_32byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- /* bit 0 of hdr_buffer_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fcoe_param; /* FCoE DDP Context id */
- /* Flow director filter id in case of
- * Programming status desc WB
- */
- __le32 fd_id;
- } hi_dword;
- } qword0;
- struct {
- /* status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- struct {
- __le16 ext_status; /* extended status */
- __le16 rsvd;
- __le16 l2tag2_1;
- __le16 l2tag2_2;
- } qword2;
- struct {
- union {
- __le32 flex_bytes_lo;
- __le32 pe_status;
- } lo_dword;
- union {
- __le32 flex_bytes_hi;
- __le32 fd_id;
- } hi_dword;
- } qword3;
- } wb; /* writeback */
-};
-
-enum i40e_rx_desc_status_bits {
- /* Note: These are predefined bit offsets */
- I40E_RX_DESC_STATUS_DD_SHIFT = 0,
- I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
- I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
- I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
- I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
- I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
- I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- /* Note: Bit 8 is reserved in X710 and XL710 */
- I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
- I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
- I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
- I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
- I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
- I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- /* Note: For non-tunnel packets INT_UDP_0 is the right status for
- * UDP header
- */
- I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
- I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
-};
-
-#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
- << I40E_RXD_QW1_STATUS_SHIFT)
-
-#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
- I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
-
-#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
- BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
-
-enum i40e_rx_desc_fltstat_values {
- I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
- I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
- I40E_RX_DESC_FLTSTAT_RSV = 2,
- I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
-};
-
-#define I40E_RXD_QW1_ERROR_SHIFT 19
-#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
-
-enum i40e_rx_desc_error_bits {
- /* Note: These are predefined bit offsets */
- I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
- I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
- I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
- I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
- I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
- I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
- I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
- I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
- I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
-};
-
-enum i40e_rx_desc_error_l3l4e_fcoe_masks {
- I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
- I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
- I40E_RX_DESC_ERROR_L3L4E_FC = 2,
- I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
- I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
-};
-
-#define I40E_RXD_QW1_PTYPE_SHIFT 30
-#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
-
-/* Packet type non-ip values */
-enum i40e_rx_l2_ptype {
- I40E_RX_PTYPE_L2_RESERVED = 0,
- I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
- I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
- I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
- I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
- I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
- I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
- I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
- I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- I40E_RX_PTYPE_L2_ARP = 11,
- I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
- I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
- I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
- I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
- I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
- I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
-};
-
-struct i40e_rx_ptype_decoded {
- u32 ptype:8;
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:1;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum i40e_rx_ptype_outer_ip {
- I40E_RX_PTYPE_OUTER_L2 = 0,
- I40E_RX_PTYPE_OUTER_IP = 1
-};
-
-enum i40e_rx_ptype_outer_ip_ver {
- I40E_RX_PTYPE_OUTER_NONE = 0,
- I40E_RX_PTYPE_OUTER_IPV4 = 0,
- I40E_RX_PTYPE_OUTER_IPV6 = 1
-};
-
-enum i40e_rx_ptype_outer_fragmented {
- I40E_RX_PTYPE_NOT_FRAG = 0,
- I40E_RX_PTYPE_FRAG = 1
-};
-
-enum i40e_rx_ptype_tunnel_type {
- I40E_RX_PTYPE_TUNNEL_NONE = 0,
- I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum i40e_rx_ptype_tunnel_end_prot {
- I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
- I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum i40e_rx_ptype_inner_prot {
- I40E_RX_PTYPE_INNER_PROT_NONE = 0,
- I40E_RX_PTYPE_INNER_PROT_UDP = 1,
- I40E_RX_PTYPE_INNER_PROT_TCP = 2,
- I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
- I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
- I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
-};
-
-enum i40e_rx_ptype_payload_layer {
- I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
-#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
-#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
-
-#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
-#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
- I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
-
-#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
-
-enum i40e_rx_desc_ext_status_bits {
- /* Note: These are predefined bit offsets */
- I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
- I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
- I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
- I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
- I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
- I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
- I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
-};
-
-enum i40e_rx_desc_pe_status_bits {
- /* Note: These are predefined bit offsets */
- I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
- I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
- I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
- I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
- I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
- I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
- I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
- I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
- I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
-};
-
-#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
-#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
-
-#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
-#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
- I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
-
-#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
-#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
- I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
-
-enum i40e_rx_prog_status_desc_status_bits {
- /* Note: These are predefined bit offsets */
- I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
- I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
-};
-
-enum i40e_rx_prog_status_desc_prog_id_masks {
- I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
- I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
- I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
-};
-
-enum i40e_rx_prog_status_desc_error_bits {
- /* Note: These are predefined bit offsets */
- I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
- I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
- I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
- I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
-};
-
-/* TX Descriptor */
-struct i40e_tx_desc {
- __le64 buffer_addr; /* Address of descriptor's data buf */
- __le64 cmd_type_offset_bsz;
-};
-
-#define I40E_TXD_QW1_DTYPE_SHIFT 0
-#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
-
-enum i40e_tx_desc_dtype_value {
- I40E_TX_DESC_DTYPE_DATA = 0x0,
- I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
- I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
- I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
- I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
- I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
- I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
- I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
- I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
- I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
-};
-
-#define I40E_TXD_QW1_CMD_SHIFT 4
-#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
-
-enum i40e_tx_desc_cmd_bits {
- I40E_TX_DESC_CMD_EOP = 0x0001,
- I40E_TX_DESC_CMD_RS = 0x0002,
- I40E_TX_DESC_CMD_ICRC = 0x0004,
- I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
- I40E_TX_DESC_CMD_DUMMY = 0x0010,
- I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
- I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
- I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
- I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
- I40E_TX_DESC_CMD_FCOET = 0x0080,
- I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
- I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
- I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
- I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
- I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
- I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
- I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
- I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
-};
-
-#define I40E_TXD_QW1_OFFSET_SHIFT 16
-#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
- I40E_TXD_QW1_OFFSET_SHIFT)
-
-enum i40e_tx_desc_length_fields {
- /* Note: These are predefined bit offsets */
- I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
- I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
- I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
-};
-
-#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
-#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
- I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
-
-#define I40E_TXD_QW1_L2TAG1_SHIFT 48
-#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
-
-/* Context descriptors */
-struct i40e_tx_context_desc {
- __le32 tunneling_params;
- __le16 l2tag2;
- __le16 rsvd;
- __le64 type_cmd_tso_mss;
-};
-
-#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
-#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
-
-#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
-#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
-
-enum i40e_tx_ctx_desc_cmd_bits {
- I40E_TX_CTX_DESC_TSO = 0x01,
- I40E_TX_CTX_DESC_TSYN = 0x02,
- I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
- I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
- I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
- I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
- I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
- I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
- I40E_TX_CTX_DESC_SWPE = 0x40
-};
-
-#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
-#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
- I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
-
-#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
-#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
- I40E_TXD_CTX_QW1_MSS_SHIFT)
-
-#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
-#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
-
-#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
-#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
- I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
-
-enum i40e_tx_ctx_desc_eipt_offload {
- I40E_TX_CTX_EXT_IP_NONE = 0x0,
- I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
- I40E_TX_CTX_EXT_IP_IPV4 = 0x3
-};
-
-#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
-#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
-
-#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
-#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-
-#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
-#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
-
-#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
- BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
-
-#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
-
-#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
-#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
- I40E_TXD_CTX_QW0_NATLEN_SHIFT)
-
-#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
-#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
- I40E_TXD_CTX_QW0_DECTTL_SHIFT)
-
-#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
-#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
-struct i40e_filter_program_desc {
- __le32 qindex_flex_ptype_vsi;
- __le32 rsvd;
- __le32 dtype_cmd_cntindex;
- __le32 fd_id;
-};
-#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
-#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
- I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
-#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
-#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
- I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
-#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
-#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
- I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
-
-/* Packet Classifier Types for filters */
-enum i40e_filter_pctype {
- /* Note: Values 0-28 are reserved for future use.
- * Value 29, 30, 32 are not supported on XL710 and X710.
- */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
- I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
- I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use.
- * Value 39, 40, 42 are not supported on XL710 and X710.
- */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
- I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
- I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
- /* Note: Value 47 is reserved for future use */
- I40E_FILTER_PCTYPE_FCOE_OX = 48,
- I40E_FILTER_PCTYPE_FCOE_RX = 49,
- I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
- /* Note: Values 51-62 are reserved for future use */
- I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
-};
-
-enum i40e_filter_program_desc_dest {
- I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
- I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
- I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
-};
-
-enum i40e_filter_program_desc_fd_status {
- I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
- I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
- I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
- I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
-};
-
-#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
-
-#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
-#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
- I40E_TXD_FLTR_QW1_CMD_SHIFT)
-
-#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
-
-enum i40e_filter_program_desc_pcmd {
- I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
- I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
-};
-
-#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
-
-#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
-
-#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
- I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
- I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
-
-#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
-#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
-
-enum i40e_filter_type {
- I40E_FLOW_DIRECTOR_FLTR = 0,
- I40E_PE_QUAD_HASH_FLTR = 1,
- I40E_ETHERTYPE_FLTR,
- I40E_FCOE_CTX_FLTR,
- I40E_MAC_VLAN_FLTR,
- I40E_HASH_FLTR
-};
-
-struct i40e_vsi_context {
- u16 seid;
- u16 uplink_seid;
- u16 vsi_number;
- u16 vsis_allocated;
- u16 vsis_unallocated;
- u16 flags;
- u8 pf_num;
- u8 vf_num;
- u8 connection_type;
- struct i40e_aqc_vsi_properties_data info;
-};
-
-struct i40e_veb_context {
- u16 seid;
- u16 uplink_seid;
- u16 veb_number;
- u16 vebs_allocated;
- u16 vebs_unallocated;
- u16 flags;
- struct i40e_aqc_get_veb_parameters_completion info;
-};
-
-/* Statistics collected by each port, VSI, VEB, and S-channel */
-struct i40e_eth_stats {
- u64 rx_bytes; /* gorc */
- u64 rx_unicast; /* uprc */
- u64 rx_multicast; /* mprc */
- u64 rx_broadcast; /* bprc */
- u64 rx_discards; /* rdpc */
- u64 rx_unknown_protocol; /* rupp */
- u64 tx_bytes; /* gotc */
- u64 tx_unicast; /* uptc */
- u64 tx_multicast; /* mptc */
- u64 tx_broadcast; /* bptc */
- u64 tx_discards; /* tdpc */
- u64 tx_errors; /* tepc */
-};
-
-/* Statistics collected per VEB per TC */
-struct i40e_veb_tc_stats {
- u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
- u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
- u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
- u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
-};
-
-/* Statistics collected by the MAC */
-struct i40e_hw_port_stats {
- /* eth stats collected by the port */
- struct i40e_eth_stats eth;
-
- /* additional port specific stats */
- u64 tx_dropped_link_down; /* tdold */
- u64 crc_errors; /* crcerrs */
- u64 illegal_bytes; /* illerrc */
- u64 error_bytes; /* errbc */
- u64 mac_local_faults; /* mlfc */
- u64 mac_remote_faults; /* mrfc */
- u64 rx_length_errors; /* rlec */
- u64 link_xon_rx; /* lxonrxc */
- u64 link_xoff_rx; /* lxoffrxc */
- u64 priority_xon_rx[8]; /* pxonrxc[8] */
- u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
- u64 link_xon_tx; /* lxontxc */
- u64 link_xoff_tx; /* lxofftxc */
- u64 priority_xon_tx[8]; /* pxontxc[8] */
- u64 priority_xoff_tx[8]; /* pxofftxc[8] */
- u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
- u64 rx_size_64; /* prc64 */
- u64 rx_size_127; /* prc127 */
- u64 rx_size_255; /* prc255 */
- u64 rx_size_511; /* prc511 */
- u64 rx_size_1023; /* prc1023 */
- u64 rx_size_1522; /* prc1522 */
- u64 rx_size_big; /* prc9522 */
- u64 rx_undersize; /* ruc */
- u64 rx_fragments; /* rfc */
- u64 rx_oversize; /* roc */
- u64 rx_jabber; /* rjc */
- u64 tx_size_64; /* ptc64 */
- u64 tx_size_127; /* ptc127 */
- u64 tx_size_255; /* ptc255 */
- u64 tx_size_511; /* ptc511 */
- u64 tx_size_1023; /* ptc1023 */
- u64 tx_size_1522; /* ptc1522 */
- u64 tx_size_big; /* ptc9522 */
- u64 mac_short_packet_dropped; /* mspdc */
- u64 checksum_error; /* xec */
- /* flow director stats */
- u64 fd_atr_match;
- u64 fd_sb_match;
- u64 fd_atr_tunnel_match;
- u32 fd_atr_status;
- u32 fd_sb_status;
- /* EEE LPI */
- u32 tx_lpi_status;
- u32 rx_lpi_status;
- u64 tx_lpi_count; /* etlpic */
- u64 rx_lpi_count; /* erlpic */
-};
-
-/* Checksum and Shadow RAM pointers */
-#define I40E_SR_NVM_CONTROL_WORD 0x00
-#define I40E_EMP_MODULE_PTR 0x0F
-#define I40E_SR_EMP_MODULE_PTR 0x48
-#define I40E_NVM_OEM_VER_OFF 0x83
-#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
-#define I40E_SR_NVM_WAKE_ON_LAN 0x19
-#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
-#define I40E_SR_NVM_EETRACK_LO 0x2D
-#define I40E_SR_NVM_EETRACK_HI 0x2E
-#define I40E_SR_VPD_PTR 0x2F
-#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
-#define I40E_SR_SW_CHECKSUM_WORD 0x3F
-
-/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
-#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
-#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
-#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
-#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
-#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
-#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
-#define I40E_PTR_TYPE BIT(15)
-
-/* Shadow RAM related */
-#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
-#define I40E_SR_WORDS_IN_1KB 512
-/* Checksum should be calculated such that after adding all the words,
- * including the checksum word itself, the sum should be 0xBABA.
- */
-#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
-
-#define I40E_SRRD_SRCTL_ATTEMPTS 100000
-
-enum i40e_switch_element_types {
- I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
- I40E_SWITCH_ELEMENT_TYPE_PF = 2,
- I40E_SWITCH_ELEMENT_TYPE_VF = 3,
- I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
- I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
- I40E_SWITCH_ELEMENT_TYPE_PE = 16,
- I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
- I40E_SWITCH_ELEMENT_TYPE_PA = 18,
- I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
-};
-
-/* Supported EtherType filters */
-enum i40e_ether_type_index {
- I40E_ETHER_TYPE_1588 = 0,
- I40E_ETHER_TYPE_FIP = 1,
- I40E_ETHER_TYPE_OUI_EXTENDED = 2,
- I40E_ETHER_TYPE_MAC_CONTROL = 3,
- I40E_ETHER_TYPE_LLDP = 4,
- I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
- I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
- I40E_ETHER_TYPE_QCN_CNM = 7,
- I40E_ETHER_TYPE_8021X = 8,
- I40E_ETHER_TYPE_ARP = 9,
- I40E_ETHER_TYPE_RSV1 = 10,
- I40E_ETHER_TYPE_RSV2 = 11,
-};
-
-/* Filter context base size is 1K */
-#define I40E_HASH_FILTER_BASE_SIZE 1024
-/* Supported Hash filter values */
-enum i40e_hash_filter_size {
- I40E_HASH_FILTER_SIZE_1K = 0,
- I40E_HASH_FILTER_SIZE_2K = 1,
- I40E_HASH_FILTER_SIZE_4K = 2,
- I40E_HASH_FILTER_SIZE_8K = 3,
- I40E_HASH_FILTER_SIZE_16K = 4,
- I40E_HASH_FILTER_SIZE_32K = 5,
- I40E_HASH_FILTER_SIZE_64K = 6,
- I40E_HASH_FILTER_SIZE_128K = 7,
- I40E_HASH_FILTER_SIZE_256K = 8,
- I40E_HASH_FILTER_SIZE_512K = 9,
- I40E_HASH_FILTER_SIZE_1M = 10,
-};
-
-/* DMA context base size is 0.5K */
-#define I40E_DMA_CNTX_BASE_SIZE 512
-/* Supported DMA context values */
-enum i40e_dma_cntx_size {
- I40E_DMA_CNTX_SIZE_512 = 0,
- I40E_DMA_CNTX_SIZE_1K = 1,
- I40E_DMA_CNTX_SIZE_2K = 2,
- I40E_DMA_CNTX_SIZE_4K = 3,
- I40E_DMA_CNTX_SIZE_8K = 4,
- I40E_DMA_CNTX_SIZE_16K = 5,
- I40E_DMA_CNTX_SIZE_32K = 6,
- I40E_DMA_CNTX_SIZE_64K = 7,
- I40E_DMA_CNTX_SIZE_128K = 8,
- I40E_DMA_CNTX_SIZE_256K = 9,
-};
-
-/* Supported Hash look up table (LUT) sizes */
-enum i40e_hash_lut_size {
- I40E_HASH_LUT_SIZE_128 = 0,
- I40E_HASH_LUT_SIZE_512 = 1,
-};
-
-/* Structure to hold a per PF filter control settings */
-struct i40e_filter_control_settings {
- /* number of PE Quad Hash filter buckets */
- enum i40e_hash_filter_size pe_filt_num;
- /* number of PE Quad Hash contexts */
- enum i40e_dma_cntx_size pe_cntx_num;
- /* number of FCoE filter buckets */
- enum i40e_hash_filter_size fcoe_filt_num;
- /* number of FCoE DDP contexts */
- enum i40e_dma_cntx_size fcoe_cntx_num;
- /* size of the Hash LUT */
- enum i40e_hash_lut_size hash_lut_size;
- /* enable FDIR filters for PF and its VFs */
- bool enable_fdir;
- /* enable Ethertype filters for PF and its VFs */
- bool enable_ethtype;
- /* enable MAC/VLAN filters for PF and its VFs */
- bool enable_macvlan;
-};
-
-/* Structure to hold device level control filter counts */
-struct i40e_control_filter_stats {
- u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
- u16 etype_used; /* Used perfect EtherType filters */
- u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
- u16 etype_free; /* Un-used perfect EtherType filters */
-};
-
-enum i40e_reset_type {
- I40E_RESET_POR = 0,
- I40E_RESET_CORER = 1,
- I40E_RESET_GLOBR = 2,
- I40E_RESET_EMPR = 3,
-};
-
-/* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR 0x06
-#define I40E_SR_LLDP_CFG_PTR 0x31
-
-/* RSS Hash Table Size */
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
-
-/* INPUT SET MASK for RSS, flow director and flexible payload */
-#define I40E_FD_INSET_L3_SRC_SHIFT 47
-#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \
- I40E_FD_INSET_L3_SRC_SHIFT)
-#define I40E_FD_INSET_L3_DST_SHIFT 35
-#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \
- I40E_FD_INSET_L3_DST_SHIFT)
-#define I40E_FD_INSET_L4_SRC_SHIFT 34
-#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \
- I40E_FD_INSET_L4_SRC_SHIFT)
-#define I40E_FD_INSET_L4_DST_SHIFT 33
-#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \
- I40E_FD_INSET_L4_DST_SHIFT)
-#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31
-#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \
- I40E_FD_INSET_VERIFY_TAG_SHIFT)
-
-#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17
-#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \
- I40E_FD_INSET_FLEX_WORD50_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16
-#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \
- I40E_FD_INSET_FLEX_WORD51_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15
-#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \
- I40E_FD_INSET_FLEX_WORD52_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14
-#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \
- I40E_FD_INSET_FLEX_WORD53_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13
-#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \
- I40E_FD_INSET_FLEX_WORD54_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12
-#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \
- I40E_FD_INSET_FLEX_WORD55_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11
-#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \
- I40E_FD_INSET_FLEX_WORD56_SHIFT)
-#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10
-#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
- I40E_FD_INSET_FLEX_WORD57_SHIFT)
-
-/* Version format for Dynamic Device Personalization(DDP) */
-struct i40e_ddp_version {
- u8 major;
- u8 minor;
- u8 update;
- u8 draft;
-};
-
-#define I40E_DDP_NAME_SIZE 32
-
-/* Package header */
-struct i40e_package_header {
- struct i40e_ddp_version version;
- u32 segment_count;
- u32 segment_offset[1];
-};
-
-/* Generic segment header */
-struct i40e_generic_seg_header {
-#define SEGMENT_TYPE_METADATA 0x00000001
-#define SEGMENT_TYPE_NOTES 0x00000002
-#define SEGMENT_TYPE_I40E 0x00000011
-#define SEGMENT_TYPE_X722 0x00000012
- u32 type;
- struct i40e_ddp_version version;
- u32 size;
- char name[I40E_DDP_NAME_SIZE];
-};
-
-struct i40e_metadata_segment {
- struct i40e_generic_seg_header header;
- struct i40e_ddp_version version;
- u32 track_id;
- char name[I40E_DDP_NAME_SIZE];
-};
-
-struct i40e_device_id_entry {
- u32 vendor_dev_id;
- u32 sub_vendor_dev_id;
-};
-
-struct i40e_profile_segment {
- struct i40e_generic_seg_header header;
- struct i40e_ddp_version version;
- char name[I40E_DDP_NAME_SIZE];
- u32 device_table_count;
- struct i40e_device_id_entry device_table[1];
-};
-
-struct i40e_section_table {
- u32 section_count;
- u32 section_offset[1];
-};
-
-struct i40e_profile_section_header {
- u16 tbl_size;
- u16 data_end;
- struct {
-#define SECTION_TYPE_INFO 0x00000010
-#define SECTION_TYPE_MMIO 0x00000800
-#define SECTION_TYPE_AQ 0x00000801
-#define SECTION_TYPE_NOTE 0x80000000
-#define SECTION_TYPE_NAME 0x80000001
- u32 type;
- u32 offset;
- u32 size;
- } section;
-};
-
-struct i40e_profile_info {
- u32 track_id;
- struct i40e_ddp_version version;
- u8 op;
-#define I40E_DDP_ADD_TRACKID 0x01
-#define I40E_DDP_REMOVE_TRACKID 0x02
- u8 reserved[7];
- u8 name[I40E_DDP_NAME_SIZE];
-};
-#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
deleted file mode 100644
index 96e537a35000..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ /dev/null
@@ -1,427 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40EVF_H_
-#define _I40EVF_H_
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/aer.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/sctp.h>
-#include <linux/ipv6.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <linux/wait.h>
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <linux/skbuff.h>
-#include <linux/dma-mapping.h>
-#include <linux/etherdevice.h>
-#include <linux/socket.h>
-#include <linux/jiffies.h>
-#include <net/ip6_checksum.h>
-#include <net/pkt_cls.h>
-#include <net/udp.h>
-#include <net/tc_act/tc_gact.h>
-#include <net/tc_act/tc_mirred.h>
-
-#include "i40e_type.h"
-#include <linux/avf/virtchnl.h>
-#include "i40e_txrx.h"
-
-#define DEFAULT_DEBUG_LEVEL_SHIFT 3
-#define PFX "i40evf: "
-
-/* VSI state flags shared with common code */
-enum i40evf_vsi_state_t {
- __I40E_VSI_DOWN,
- /* This must be last as it determines the size of the BITMAP */
- __I40E_VSI_STATE_SIZE__,
-};
-
-/* dummy struct to make common code less painful */
-struct i40e_vsi {
- struct i40evf_adapter *back;
- struct net_device *netdev;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u16 seid;
- u16 id;
- DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
- int base_vector;
- u16 work_limit;
- u16 qs_handle;
- void *priv; /* client driver data reference. */
-};
-
-/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define I40EVF_DEFAULT_TXD 512
-#define I40EVF_DEFAULT_RXD 512
-#define I40EVF_MAX_TXD 4096
-#define I40EVF_MIN_TXD 64
-#define I40EVF_MAX_RXD 4096
-#define I40EVF_MIN_RXD 64
-#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
-#define I40EVF_MAX_AQ_BUF_SIZE 4096
-#define I40EVF_AQ_LEN 32
-#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
-
-#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-
-#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
-#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
-#define I40E_TX_CTXTDESC(R, i) \
- (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
-#define I40EVF_MAX_REQ_QUEUES 4
-
-#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
-#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4)
-#define I40EVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
-
-/* MAX_MSIX_Q_VECTORS of these are allocated,
- * but we only use one per queue-specific vector.
- */
-struct i40e_q_vector {
- struct i40evf_adapter *adapter;
- struct i40e_vsi *vsi;
- struct napi_struct napi;
- struct i40e_ring_container rx;
- struct i40e_ring_container tx;
- u32 ring_mask;
- u8 itr_countdown; /* when 0 should adjust adaptive ITR */
- u8 num_ringpairs; /* total number of ring pairs in vector */
- u16 v_idx; /* index in the vsi->q_vector array. */
- u16 reg_idx; /* register index of the interrupt */
- char name[IFNAMSIZ + 15];
- bool arm_wb_state;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
-};
-
-/* Helper macros to switch between ints/sec and what the register uses.
- * And yes, it's the same math going both ways. The lowest value
- * supported by all of the i40e hardware is 8.
- */
-#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
- ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
-#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
-
-#define I40EVF_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
-
-#define I40EVF_RX_DESC_ADV(R, i) \
- (&(((union i40e_adv_rx_desc *)((R).desc))[i]))
-#define I40EVF_TX_DESC_ADV(R, i) \
- (&(((union i40e_adv_tx_desc *)((R).desc))[i]))
-#define I40EVF_TX_CTXTDESC_ADV(R, i) \
- (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))
-
-#define OTHER_VECTOR 1
-#define NONQ_VECS (OTHER_VECTOR)
-
-#define MIN_MSIX_Q_VECTORS 1
-#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS)
-
-#define I40EVF_QUEUE_END_OF_LIST 0x7FF
-#define I40EVF_FREE_VECTOR 0x7FFF
-struct i40evf_mac_filter {
- struct list_head list;
- u8 macaddr[ETH_ALEN];
- bool remove; /* filter needs to be removed */
- bool add; /* filter needs to be added */
-};
-
-struct i40evf_vlan_filter {
- struct list_head list;
- u16 vlan;
- bool remove; /* filter needs to be removed */
- bool add; /* filter needs to be added */
-};
-
-#define I40EVF_MAX_TRAFFIC_CLASS 4
-/* State of traffic class creation */
-enum i40evf_tc_state_t {
- __I40EVF_TC_INVALID, /* no traffic class, default state */
- __I40EVF_TC_RUNNING, /* traffic classes have been created */
-};
-
-/* channel info */
-struct i40evf_channel_config {
- struct virtchnl_channel_info ch_info[I40EVF_MAX_TRAFFIC_CLASS];
- enum i40evf_tc_state_t state;
- u8 total_qps;
-};
-
-/* State of cloud filter */
-enum i40evf_cloud_filter_state_t {
- __I40EVF_CF_INVALID, /* cloud filter not added */
- __I40EVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */
- __I40EVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */
- __I40EVF_CF_ACTIVE, /* cloud filter is active */
-};
-
-/* Driver state. The order of these is important! */
-enum i40evf_state_t {
- __I40EVF_STARTUP, /* driver loaded, probe complete */
- __I40EVF_REMOVE, /* driver is being unloaded */
- __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
- __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
- __I40EVF_INIT_SW, /* got resources, setting up structs */
- __I40EVF_RESETTING, /* in reset */
- /* Below here, watchdog is running */
- __I40EVF_DOWN, /* ready, can be opened */
- __I40EVF_DOWN_PENDING, /* descending, waiting for watchdog */
- __I40EVF_TESTING, /* in ethtool self-test */
- __I40EVF_RUNNING, /* opened, working */
-};
-
-enum i40evf_critical_section_t {
- __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
- __I40EVF_IN_CLIENT_TASK,
- __I40EVF_IN_REMOVE_TASK, /* device being removed */
-};
-
-#define I40EVF_CLOUD_FIELD_OMAC 0x01
-#define I40EVF_CLOUD_FIELD_IMAC 0x02
-#define I40EVF_CLOUD_FIELD_IVLAN 0x04
-#define I40EVF_CLOUD_FIELD_TEN_ID 0x08
-#define I40EVF_CLOUD_FIELD_IIP 0x10
-
-#define I40EVF_CF_FLAGS_OMAC I40EVF_CLOUD_FIELD_OMAC
-#define I40EVF_CF_FLAGS_IMAC I40EVF_CLOUD_FIELD_IMAC
-#define I40EVF_CF_FLAGS_IMAC_IVLAN (I40EVF_CLOUD_FIELD_IMAC |\
- I40EVF_CLOUD_FIELD_IVLAN)
-#define I40EVF_CF_FLAGS_IMAC_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\
- I40EVF_CLOUD_FIELD_TEN_ID)
-#define I40EVF_CF_FLAGS_OMAC_TEN_ID_IMAC (I40EVF_CLOUD_FIELD_OMAC |\
- I40EVF_CLOUD_FIELD_IMAC |\
- I40EVF_CLOUD_FIELD_TEN_ID)
-#define I40EVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\
- I40EVF_CLOUD_FIELD_IVLAN |\
- I40EVF_CLOUD_FIELD_TEN_ID)
-#define I40EVF_CF_FLAGS_IIP I40E_CLOUD_FIELD_IIP
-
-/* bookkeeping of cloud filters */
-struct i40evf_cloud_filter {
- enum i40evf_cloud_filter_state_t state;
- struct list_head list;
- struct virtchnl_filter f;
- unsigned long cookie;
- bool del; /* filter needs to be deleted */
- bool add; /* filter needs to be added */
-};
-
-/* board specific private data structure */
-struct i40evf_adapter {
- struct timer_list watchdog_timer;
- struct work_struct reset_task;
- struct work_struct adminq_task;
- struct delayed_work client_task;
- struct delayed_work init_task;
- wait_queue_head_t down_waitqueue;
- struct i40e_q_vector *q_vectors;
- struct list_head vlan_filter_list;
- struct list_head mac_filter_list;
- /* Lock to protect accesses to MAC and VLAN lists */
- spinlock_t mac_vlan_list_lock;
- char misc_vector_name[IFNAMSIZ + 9];
- int num_active_queues;
- int num_req_queues;
-
- /* TX */
- struct i40e_ring *tx_rings;
- u32 tx_timeout_count;
- u32 tx_desc_count;
-
- /* RX */
- struct i40e_ring *rx_rings;
- u64 hw_csum_rx_error;
- u32 rx_desc_count;
- int num_msix_vectors;
- int num_iwarp_msix;
- int iwarp_base_vector;
- u32 client_pending;
- struct i40e_client_instance *cinst;
- struct msix_entry *msix_entries;
-
- u32 flags;
-#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40EVF_FLAG_PF_COMMS_FAILED BIT(3)
-#define I40EVF_FLAG_RESET_PENDING BIT(4)
-#define I40EVF_FLAG_RESET_NEEDED BIT(5)
-#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(6)
-#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(8)
-#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9)
-#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(10)
-#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11)
-#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12)
-#define I40EVF_FLAG_PROMISC_ON BIT(13)
-#define I40EVF_FLAG_ALLMULTI_ON BIT(14)
-#define I40EVF_FLAG_LEGACY_RX BIT(15)
-#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16)
-#define I40EVF_FLAG_QUEUES_DISABLED BIT(17)
-/* duplicates for common code */
-#define I40E_FLAG_DCB_ENABLED 0
-#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
-#define I40E_FLAG_LEGACY_RX I40EVF_FLAG_LEGACY_RX
- /* flags for admin queue service task */
- u32 aq_required;
-#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
-#define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1)
-#define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2)
-#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3)
-#define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4)
-#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5)
-#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
-#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7)
-#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8)
-#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
-#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10)
-/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
-#define I40EVF_FLAG_AQ_GET_HENA BIT(11)
-#define I40EVF_FLAG_AQ_SET_HENA BIT(12)
-#define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13)
-#define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14)
-#define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15)
-#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16)
-#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17)
-#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18)
-#define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19)
-#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20)
-#define I40EVF_FLAG_AQ_ENABLE_CHANNELS BIT(21)
-#define I40EVF_FLAG_AQ_DISABLE_CHANNELS BIT(22)
-#define I40EVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23)
-#define I40EVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24)
-
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
-
- struct i40e_hw hw; /* defined in i40e_type.h */
-
- enum i40evf_state_t state;
- unsigned long crit_section;
-
- struct work_struct watchdog_task;
- bool netdev_registered;
- bool link_up;
- enum virtchnl_link_speed link_speed;
- enum virtchnl_ops current_op;
-#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
- (_a)->vf_res->vf_cap_flags & \
- VIRTCHNL_VF_OFFLOAD_IWARP : \
- 0)
-#define CLIENT_ENABLED(_a) ((_a)->cinst)
-/* RSS by the PF should be preferred over RSS via other methods. */
-#define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \
- VIRTCHNL_VF_OFFLOAD_RSS_PF)
-#define RSS_AQ(_a) ((_a)->vf_res->vf_cap_flags & \
- VIRTCHNL_VF_OFFLOAD_RSS_AQ)
-#define RSS_REG(_a) (!((_a)->vf_res->vf_cap_flags & \
- (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
- VIRTCHNL_VF_OFFLOAD_RSS_PF)))
-#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
- VIRTCHNL_VF_OFFLOAD_VLAN)
- struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
- struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
- struct virtchnl_version_info pf_version;
-#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
- ((_a)->pf_version.minor == 1))
- u16 msg_enable;
- struct i40e_eth_stats current_stats;
- struct i40e_vsi vsi;
- u32 aq_wait_count;
- /* RSS stuff */
- u64 hena;
- u16 rss_key_size;
- u16 rss_lut_size;
- u8 *rss_key;
- u8 *rss_lut;
- /* ADQ related members */
- struct i40evf_channel_config ch_config;
- u8 num_tc;
- struct list_head cloud_filter_list;
- /* lock to protest access to the cloud filter list */
- spinlock_t cloud_filter_list_lock;
- u16 num_cloud_filters;
-};
-
-
-/* Ethtool Private Flags */
-
-/* lan device */
-struct i40e_device {
- struct list_head list;
- struct i40evf_adapter *vf;
-};
-
-/* needed by i40evf_ethtool.c */
-extern char i40evf_driver_name[];
-extern const char i40evf_driver_version[];
-
-int i40evf_up(struct i40evf_adapter *adapter);
-void i40evf_down(struct i40evf_adapter *adapter);
-int i40evf_process_config(struct i40evf_adapter *adapter);
-void i40evf_schedule_reset(struct i40evf_adapter *adapter);
-void i40evf_reset(struct i40evf_adapter *adapter);
-void i40evf_set_ethtool_ops(struct net_device *netdev);
-void i40evf_update_stats(struct i40evf_adapter *adapter);
-void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter);
-int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter);
-void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask);
-void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
-void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
-
-void i40e_napi_add_all(struct i40evf_adapter *adapter);
-void i40e_napi_del_all(struct i40evf_adapter *adapter);
-
-int i40evf_send_api_ver(struct i40evf_adapter *adapter);
-int i40evf_verify_api_ver(struct i40evf_adapter *adapter);
-int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter);
-int i40evf_get_vf_config(struct i40evf_adapter *adapter);
-void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush);
-void i40evf_configure_queues(struct i40evf_adapter *adapter);
-void i40evf_deconfigure_queues(struct i40evf_adapter *adapter);
-void i40evf_enable_queues(struct i40evf_adapter *adapter);
-void i40evf_disable_queues(struct i40evf_adapter *adapter);
-void i40evf_map_queues(struct i40evf_adapter *adapter);
-int i40evf_request_queues(struct i40evf_adapter *adapter, int num);
-void i40evf_add_ether_addrs(struct i40evf_adapter *adapter);
-void i40evf_del_ether_addrs(struct i40evf_adapter *adapter);
-void i40evf_add_vlans(struct i40evf_adapter *adapter);
-void i40evf_del_vlans(struct i40evf_adapter *adapter);
-void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
-void i40evf_request_stats(struct i40evf_adapter *adapter);
-void i40evf_request_reset(struct i40evf_adapter *adapter);
-void i40evf_get_hena(struct i40evf_adapter *adapter);
-void i40evf_set_hena(struct i40evf_adapter *adapter);
-void i40evf_set_rss_key(struct i40evf_adapter *adapter);
-void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
-void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter);
-void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter);
-void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
- enum virtchnl_ops v_opcode,
- i40e_status v_retval, u8 *msg, u16 msglen);
-int i40evf_config_rss(struct i40evf_adapter *adapter);
-int i40evf_lan_add_device(struct i40evf_adapter *adapter);
-int i40evf_lan_del_device(struct i40evf_adapter *adapter);
-void i40evf_client_subtask(struct i40evf_adapter *adapter);
-void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len);
-void i40evf_notify_client_l2_params(struct i40e_vsi *vsi);
-void i40evf_notify_client_open(struct i40e_vsi *vsi);
-void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
-void i40evf_enable_channels(struct i40evf_adapter *adapter);
-void i40evf_disable_channels(struct i40evf_adapter *adapter);
-void i40evf_add_cloud_filter(struct i40evf_adapter *adapter);
-void i40evf_del_cloud_filter(struct i40evf_adapter *adapter);
-#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
deleted file mode 100644
index 69efe0aec76a..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ /dev/null
@@ -1,820 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-/* ethtool support for i40evf */
-#include "i40evf.h"
-
-#include <linux/uaccess.h>
-
-struct i40evf_stats {
- char stat_string[ETH_GSTRING_LEN];
- int stat_offset;
-};
-
-#define I40EVF_STAT(_name, _stat) { \
- .stat_string = _name, \
- .stat_offset = offsetof(struct i40evf_adapter, _stat) \
-}
-
-/* All stats are u64, so we don't need to track the size of the field. */
-static const struct i40evf_stats i40evf_gstrings_stats[] = {
- I40EVF_STAT("rx_bytes", current_stats.rx_bytes),
- I40EVF_STAT("rx_unicast", current_stats.rx_unicast),
- I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
- I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
- I40EVF_STAT("rx_discards", current_stats.rx_discards),
- I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
- I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
- I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
- I40EVF_STAT("tx_multicast", current_stats.tx_multicast),
- I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast),
- I40EVF_STAT("tx_discards", current_stats.tx_discards),
- I40EVF_STAT("tx_errors", current_stats.tx_errors),
-};
-
-#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
-#define I40EVF_QUEUE_STATS_LEN(_dev) \
- (((struct i40evf_adapter *)\
- netdev_priv(_dev))->num_active_queues \
- * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
-#define I40EVF_STATS_LEN(_dev) \
- (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
-
-/* For now we have one and only one private flag and it is only defined
- * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
- * of leaving all this code sitting around empty we will strip it unless
- * our one private flag is actually available.
- */
-struct i40evf_priv_flags {
- char flag_string[ETH_GSTRING_LEN];
- u32 flag;
- bool read_only;
-};
-
-#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \
- .flag_string = _name, \
- .flag = _flag, \
- .read_only = _read_only, \
-}
-
-static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = {
- I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0),
-};
-
-#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags)
-
-/**
- * i40evf_get_link_ksettings - Get Link Speed and Duplex settings
- * @netdev: network interface device structure
- * @cmd: ethtool command
- *
- * Reports speed/duplex settings. Because this is a VF, we don't know what
- * kind of link we really have, so we fake it.
- **/
-static int i40evf_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
-
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- cmd->base.autoneg = AUTONEG_DISABLE;
- cmd->base.port = PORT_NONE;
- /* Set speed and duplex */
- switch (adapter->link_speed) {
- case I40E_LINK_SPEED_40GB:
- cmd->base.speed = SPEED_40000;
- break;
- case I40E_LINK_SPEED_25GB:
-#ifdef SPEED_25000
- cmd->base.speed = SPEED_25000;
-#else
- netdev_info(netdev,
- "Speed is 25G, display not supported by this version of ethtool.\n");
-#endif
- break;
- case I40E_LINK_SPEED_20GB:
- cmd->base.speed = SPEED_20000;
- break;
- case I40E_LINK_SPEED_10GB:
- cmd->base.speed = SPEED_10000;
- break;
- case I40E_LINK_SPEED_1GB:
- cmd->base.speed = SPEED_1000;
- break;
- case I40E_LINK_SPEED_100MB:
- cmd->base.speed = SPEED_100;
- break;
- default:
- break;
- }
- cmd->base.duplex = DUPLEX_FULL;
-
- return 0;
-}
-
-/**
- * i40evf_get_sset_count - Get length of string set
- * @netdev: network interface device structure
- * @sset: id of string set
- *
- * Reports size of string table. This driver only supports
- * strings for statistics.
- **/
-static int i40evf_get_sset_count(struct net_device *netdev, int sset)
-{
- if (sset == ETH_SS_STATS)
- return I40EVF_STATS_LEN(netdev);
- else if (sset == ETH_SS_PRIV_FLAGS)
- return I40EVF_PRIV_FLAGS_STR_LEN;
- else
- return -EINVAL;
-}
-
-/**
- * i40evf_get_ethtool_stats - report device statistics
- * @netdev: network interface device structure
- * @stats: ethtool statistics structure
- * @data: pointer to data buffer
- *
- * All statistics are added to the data buffer as an array of u64.
- **/
-static void i40evf_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- unsigned int i, j;
- char *p;
-
- for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
- p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
- data[i] = *(u64 *)p;
- }
- for (j = 0; j < adapter->num_active_queues; j++) {
- data[i++] = adapter->tx_rings[j].stats.packets;
- data[i++] = adapter->tx_rings[j].stats.bytes;
- }
- for (j = 0; j < adapter->num_active_queues; j++) {
- data[i++] = adapter->rx_rings[j].stats.packets;
- data[i++] = adapter->rx_rings[j].stats.bytes;
- }
-}
-
-/**
- * i40evf_get_strings - Get string set
- * @netdev: network interface device structure
- * @sset: id of string set
- * @data: buffer for string data
- *
- * Builds stats string table.
- **/
-static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- u8 *p = data;
- int i;
-
- if (sset == ETH_SS_STATS) {
- for (i = 0; i < (int)I40EVF_GLOBAL_STATS_LEN; i++) {
- memcpy(p, i40evf_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < adapter->num_active_queues; i++) {
- snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < adapter->num_active_queues; i++) {
- snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
- p += ETH_GSTRING_LEN;
- }
- } else if (sset == ETH_SS_PRIV_FLAGS) {
- for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "%s",
- i40evf_gstrings_priv_flags[i].flag_string);
- p += ETH_GSTRING_LEN;
- }
- }
-}
-
-/**
- * i40evf_get_priv_flags - report device private flags
- * @netdev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 i40evf_get_priv_flags(struct net_device *netdev)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- u32 i, ret_flags = 0;
-
- for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
- const struct i40evf_priv_flags *priv_flags;
-
- priv_flags = &i40evf_gstrings_priv_flags[i];
-
- if (priv_flags->flag & adapter->flags)
- ret_flags |= BIT(i);
- }
-
- return ret_flags;
-}
-
-/**
- * i40evf_set_priv_flags - set private flags
- * @netdev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- u32 orig_flags, new_flags, changed_flags;
- u32 i;
-
- orig_flags = READ_ONCE(adapter->flags);
- new_flags = orig_flags;
-
- for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
- const struct i40evf_priv_flags *priv_flags;
-
- priv_flags = &i40evf_gstrings_priv_flags[i];
-
- if (flags & BIT(i))
- new_flags |= priv_flags->flag;
- else
- new_flags &= ~(priv_flags->flag);
-
- if (priv_flags->read_only &&
- ((orig_flags ^ new_flags) & ~BIT(i)))
- return -EOPNOTSUPP;
- }
-
- /* Before we finalize any flag changes, any checks which we need to
- * perform to determine if the new flags will be supported should go
- * here...
- */
-
- /* Compare and exchange the new flags into place. If we failed, that
- * is if cmpxchg returns anything but the old value, this means
- * something else must have modified the flags variable since we
- * copied it. We'll just punt with an error and log something in the
- * message buffer.
- */
- if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
- dev_warn(&adapter->pdev->dev,
- "Unable to update adapter->flags as it was modified by another thread...\n");
- return -EAGAIN;
- }
-
- changed_flags = orig_flags ^ new_flags;
-
- /* Process any additional changes needed as a result of flag changes.
- * The changed_flags value reflects the list of bits that were changed
- * in the code above.
- */
-
- /* issue a reset to force legacy-rx change to take effect */
- if (changed_flags & I40EVF_FLAG_LEGACY_RX) {
- if (netif_running(netdev)) {
- adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
- schedule_work(&adapter->reset_task);
- }
- }
-
- return 0;
-}
-
-/**
- * i40evf_get_msglevel - Get debug message level
- * @netdev: network interface device structure
- *
- * Returns current debug message level.
- **/
-static u32 i40evf_get_msglevel(struct net_device *netdev)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
-
- return adapter->msg_enable;
-}
-
-/**
- * i40evf_set_msglevel - Set debug message level
- * @netdev: network interface device structure
- * @data: message level
- *
- * Set current debug message level. Higher values cause the driver to
- * be noisier.
- **/
-static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
-
- if (I40E_DEBUG_USER & data)
- adapter->hw.debug_mask = data;
- adapter->msg_enable = data;
-}
-
-/**
- * i40evf_get_drvinfo - Get driver info
- * @netdev: network interface device structure
- * @drvinfo: ethool driver info structure
- *
- * Returns information about the driver and device for display to the user.
- **/
-static void i40evf_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
-
- strlcpy(drvinfo->driver, i40evf_driver_name, 32);
- strlcpy(drvinfo->version, i40evf_driver_version, 32);
- strlcpy(drvinfo->fw_version, "N/A", 4);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
-}
-
-/**
- * i40evf_get_ringparam - Get ring parameters
- * @netdev: network interface device structure
- * @ring: ethtool ringparam structure
- *
- * Returns current ring parameters. TX and RX rings are reported separately,
- * but the number of rings is not reported.
- **/
-static void i40evf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
-
- ring->rx_max_pending = I40EVF_MAX_RXD;
- ring->tx_max_pending = I40EVF_MAX_TXD;
- ring->rx_pending = adapter->rx_desc_count;
- ring->tx_pending = adapter->tx_desc_count;
-}
-
-/**
- * i40evf_set_ringparam - Set ring parameters
- * @netdev: network interface device structure
- * @ring: ethtool ringparam structure
- *
- * Sets ring parameters. TX and RX rings are controlled separately, but the
- * number of rings is not specified, so all rings get the same settings.
- **/
-static int i40evf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- u32 new_rx_count, new_tx_count;
-
- if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
- return -EINVAL;
-
- new_tx_count = clamp_t(u32, ring->tx_pending,
- I40EVF_MIN_TXD,
- I40EVF_MAX_TXD);
- new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
-
- new_rx_count = clamp_t(u32, ring->rx_pending,
- I40EVF_MIN_RXD,
- I40EVF_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
-
- /* if nothing to do return success */
- if ((new_tx_count == adapter->tx_desc_count) &&
- (new_rx_count == adapter->rx_desc_count))
- return 0;
-
- adapter->tx_desc_count = new_tx_count;
- adapter->rx_desc_count = new_rx_count;
-
- if (netif_running(netdev)) {
- adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
- schedule_work(&adapter->reset_task);
- }
-
- return 0;
-}
-
-/**
- * __i40evf_get_coalesce - get per-queue coalesce settings
- * @netdev: the netdev to check
- * @ec: ethtool coalesce data structure
- * @queue: which queue to pick
- *
- * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
- * are per queue. If queue is <0 then we default to queue 0 as the
- * representative value.
- **/
-static int __i40evf_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec,
- int queue)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_vsi *vsi = &adapter->vsi;
- struct i40e_ring *rx_ring, *tx_ring;
-
- ec->tx_max_coalesced_frames = vsi->work_limit;
- ec->rx_max_coalesced_frames = vsi->work_limit;
-
- /* Rx and Tx usecs per queue value. If user doesn't specify the
- * queue, return queue 0's value to represent.
- */
- if (queue < 0)
- queue = 0;
- else if (queue >= adapter->num_active_queues)
- return -EINVAL;
-
- rx_ring = &adapter->rx_rings[queue];
- tx_ring = &adapter->tx_rings[queue];
-
- if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
- ec->use_adaptive_rx_coalesce = 1;
-
- if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
- ec->use_adaptive_tx_coalesce = 1;
-
- ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
-
- return 0;
-}
-
-/**
- * i40evf_get_coalesce - Get interrupt coalescing settings
- * @netdev: network interface device structure
- * @ec: ethtool coalesce structure
- *
- * Returns current coalescing settings. This is referred to elsewhere in the
- * driver as Interrupt Throttle Rate, as this is how the hardware describes
- * this functionality. Note that if per-queue settings have been modified this
- * only represents the settings of queue 0.
- **/
-static int i40evf_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- return __i40evf_get_coalesce(netdev, ec, -1);
-}
-
-/**
- * i40evf_get_per_queue_coalesce - get coalesce values for specific queue
- * @netdev: netdev to read
- * @ec: coalesce settings from ethtool
- * @queue: the queue to read
- *
- * Read specific queue's coalesce settings.
- **/
-static int i40evf_get_per_queue_coalesce(struct net_device *netdev,
- u32 queue,
- struct ethtool_coalesce *ec)
-{
- return __i40evf_get_coalesce(netdev, ec, queue);
-}
-
-/**
- * i40evf_set_itr_per_queue - set ITR values for specific queue
- * @adapter: the VF adapter struct to set values for
- * @ec: coalesce settings from ethtool
- * @queue: the queue to modify
- *
- * Change the ITR settings for a specific queue.
- **/
-static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
- struct ethtool_coalesce *ec,
- int queue)
-{
- struct i40e_ring *rx_ring = &adapter->rx_rings[queue];
- struct i40e_ring *tx_ring = &adapter->tx_rings[queue];
- struct i40e_q_vector *q_vector;
-
- rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
- tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
-
- rx_ring->itr_setting |= I40E_ITR_DYNAMIC;
- if (!ec->use_adaptive_rx_coalesce)
- rx_ring->itr_setting ^= I40E_ITR_DYNAMIC;
-
- tx_ring->itr_setting |= I40E_ITR_DYNAMIC;
- if (!ec->use_adaptive_tx_coalesce)
- tx_ring->itr_setting ^= I40E_ITR_DYNAMIC;
-
- q_vector = rx_ring->q_vector;
- q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
-
- q_vector = tx_ring->q_vector;
- q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
-
- /* The interrupt handler itself will take care of programming
- * the Tx and Rx ITR values based on the values we have entered
- * into the q_vector, no need to write the values now.
- */
-}
-
-/**
- * __i40evf_set_coalesce - set coalesce settings for particular queue
- * @netdev: the netdev to change
- * @ec: ethtool coalesce settings
- * @queue: the queue to change
- *
- * Sets the coalesce settings for a particular queue.
- **/
-static int __i40evf_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec,
- int queue)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_vsi *vsi = &adapter->vsi;
- int i;
-
- if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
- vsi->work_limit = ec->tx_max_coalesced_frames_irq;
-
- if (ec->rx_coalesce_usecs == 0) {
- if (ec->use_adaptive_rx_coalesce)
- netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
- } else if ((ec->rx_coalesce_usecs < I40E_MIN_ITR) ||
- (ec->rx_coalesce_usecs > I40E_MAX_ITR)) {
- netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
- return -EINVAL;
- }
-
- else
- if (ec->tx_coalesce_usecs == 0) {
- if (ec->use_adaptive_tx_coalesce)
- netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
- } else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) ||
- (ec->tx_coalesce_usecs > I40E_MAX_ITR)) {
- netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
- return -EINVAL;
- }
-
- /* Rx and Tx usecs has per queue value. If user doesn't specify the
- * queue, apply to all queues.
- */
- if (queue < 0) {
- for (i = 0; i < adapter->num_active_queues; i++)
- i40evf_set_itr_per_queue(adapter, ec, i);
- } else if (queue < adapter->num_active_queues) {
- i40evf_set_itr_per_queue(adapter, ec, queue);
- } else {
- netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
- adapter->num_active_queues - 1);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * i40evf_set_coalesce - Set interrupt coalescing settings
- * @netdev: network interface device structure
- * @ec: ethtool coalesce structure
- *
- * Change current coalescing settings for every queue.
- **/
-static int i40evf_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- return __i40evf_set_coalesce(netdev, ec, -1);
-}
-
-/**
- * i40evf_set_per_queue_coalesce - set specific queue's coalesce settings
- * @netdev: the netdev to change
- * @ec: ethtool's coalesce settings
- * @queue: the queue to modify
- *
- * Modifies a specific queue's coalesce settings.
- */
-static int i40evf_set_per_queue_coalesce(struct net_device *netdev,
- u32 queue,
- struct ethtool_coalesce *ec)
-{
- return __i40evf_set_coalesce(netdev, ec, queue);
-}
-
-/**
- * i40evf_get_rxnfc - command to get RX flow classification rules
- * @netdev: network interface device structure
- * @cmd: ethtool rxnfc command
- * @rule_locs: pointer to store rule locations
- *
- * Returns Success if the command is supported.
- **/
-static int i40evf_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_active_queues;
- ret = 0;
- break;
- case ETHTOOL_GRXFH:
- netdev_info(netdev,
- "RSS hash info is not available to vf, use pf.\n");
- break;
- default:
- break;
- }
-
- return ret;
-}
-/**
- * i40evf_get_channels: get the number of channels supported by the device
- * @netdev: network interface device structure
- * @ch: channel information structure
- *
- * For the purposes of our device, we only use combined channels, i.e. a tx/rx
- * queue pair. Report one extra channel to match our "other" MSI-X vector.
- **/
-static void i40evf_get_channels(struct net_device *netdev,
- struct ethtool_channels *ch)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
-
- /* Report maximum channels */
- ch->max_combined = I40EVF_MAX_REQ_QUEUES;
-
- ch->max_other = NONQ_VECS;
- ch->other_count = NONQ_VECS;
-
- ch->combined_count = adapter->num_active_queues;
-}
-
-/**
- * i40evf_set_channels: set the new channel count
- * @netdev: network interface device structure
- * @ch: channel information structure
- *
- * Negotiate a new number of channels with the PF then do a reset. During
- * reset we'll realloc queues and fix the RSS table. Returns 0 on success,
- * negative on failure.
- **/
-static int i40evf_set_channels(struct net_device *netdev,
- struct ethtool_channels *ch)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- int num_req = ch->combined_count;
-
- if (num_req != adapter->num_active_queues &&
- !(adapter->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
- dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n");
- return -EINVAL;
- }
-
- if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
- adapter->num_tc) {
- dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
- return -EINVAL;
- }
-
- /* All of these should have already been checked by ethtool before this
- * even gets to us, but just to be sure.
- */
- if (num_req <= 0 || num_req > I40EVF_MAX_REQ_QUEUES)
- return -EINVAL;
-
- if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
- return -EINVAL;
-
- adapter->num_req_queues = num_req;
- return i40evf_request_queues(adapter, num_req);
-}
-
-/**
- * i40evf_get_rxfh_key_size - get the RSS hash key size
- * @netdev: network interface device structure
- *
- * Returns the table size.
- **/
-static u32 i40evf_get_rxfh_key_size(struct net_device *netdev)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
-
- return adapter->rss_key_size;
-}
-
-/**
- * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
- * @netdev: network interface device structure
- *
- * Returns the table size.
- **/
-static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
-
- return adapter->rss_lut_size;
-}
-
-/**
- * i40evf_get_rxfh - get the rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function in use
- *
- * Reads the indirection table directly from the hardware. Always returns 0.
- **/
-static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- u16 i;
-
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
- return 0;
-
- memcpy(key, adapter->rss_key, adapter->rss_key_size);
-
- /* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < adapter->rss_lut_size; i++)
- indir[i] = (u32)adapter->rss_lut[i];
-
- return 0;
-}
-
-/**
- * i40evf_set_rxfh - set the rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function to use
- *
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
- * returns 0 after programming the table.
- **/
-static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- u16 i;
-
- /* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
- return -EOPNOTSUPP;
- if (!indir)
- return 0;
-
- if (key) {
- memcpy(adapter->rss_key, key, adapter->rss_key_size);
- }
-
- /* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < adapter->rss_lut_size; i++)
- adapter->rss_lut[i] = (u8)(indir[i]);
-
- return i40evf_config_rss(adapter);
-}
-
-static const struct ethtool_ops i40evf_ethtool_ops = {
- .get_drvinfo = i40evf_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_ringparam = i40evf_get_ringparam,
- .set_ringparam = i40evf_set_ringparam,
- .get_strings = i40evf_get_strings,
- .get_ethtool_stats = i40evf_get_ethtool_stats,
- .get_sset_count = i40evf_get_sset_count,
- .get_priv_flags = i40evf_get_priv_flags,
- .set_priv_flags = i40evf_set_priv_flags,
- .get_msglevel = i40evf_get_msglevel,
- .set_msglevel = i40evf_set_msglevel,
- .get_coalesce = i40evf_get_coalesce,
- .set_coalesce = i40evf_set_coalesce,
- .get_per_queue_coalesce = i40evf_get_per_queue_coalesce,
- .set_per_queue_coalesce = i40evf_set_per_queue_coalesce,
- .get_rxnfc = i40evf_get_rxnfc,
- .get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
- .get_rxfh = i40evf_get_rxfh,
- .set_rxfh = i40evf_set_rxfh,
- .get_channels = i40evf_get_channels,
- .set_channels = i40evf_set_channels,
- .get_rxfh_key_size = i40evf_get_rxfh_key_size,
- .get_link_ksettings = i40evf_get_link_ksettings,
-};
-
-/**
- * i40evf_set_ethtool_ops - Initialize ethtool ops struct
- * @netdev: network interface device structure
- *
- * Sets ethtool ops struct in our netdev so that ethtool can call
- * our functions.
- **/
-void i40evf_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &i40evf_ethtool_ops;
-}
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
new file mode 100644
index 000000000000..9cbb5743ed12
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2013 - 2018 Intel Corporation.
+#
+# Makefile for the Intel(R) Ethernet Adaptive Virtual Function (iavf)
+# driver
+#
+#
+
+ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(src)
+
+obj-$(CONFIG_IAVF) += iavf.o
+
+iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \
+ iavf_txrx.o iavf_common.o i40e_adminq.o iavf_client.o
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/iavf/i40e_adminq.c
index 21a0dbf6ccf6..fca1ecfd9f71 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.c
@@ -1,21 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#include "i40e_status.h"
-#include "i40e_type.h"
-#include "i40e_register.h"
+#include "iavf_status.h"
+#include "iavf_type.h"
+#include "iavf_register.h"
#include "i40e_adminq.h"
-#include "i40e_prototype.h"
-
-/**
- * i40e_is_nvm_update_op - return true if this is an NVM update operation
- * @desc: API request descriptor
- **/
-static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
-{
- return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
- (desc->opcode == i40e_aqc_opc_nvm_update);
-}
+#include "iavf_prototype.h"
/**
* i40e_adminq_init_regs - Initialize AdminQ registers
@@ -23,44 +13,42 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
*
* This assumes the alloc_asq and alloc_arq functions have already been called
**/
-static void i40e_adminq_init_regs(struct i40e_hw *hw)
+static void i40e_adminq_init_regs(struct iavf_hw *hw)
{
/* set head and tail registers in our local struct */
- if (i40e_is_vf(hw)) {
- hw->aq.asq.tail = I40E_VF_ATQT1;
- hw->aq.asq.head = I40E_VF_ATQH1;
- hw->aq.asq.len = I40E_VF_ATQLEN1;
- hw->aq.asq.bal = I40E_VF_ATQBAL1;
- hw->aq.asq.bah = I40E_VF_ATQBAH1;
- hw->aq.arq.tail = I40E_VF_ARQT1;
- hw->aq.arq.head = I40E_VF_ARQH1;
- hw->aq.arq.len = I40E_VF_ARQLEN1;
- hw->aq.arq.bal = I40E_VF_ARQBAL1;
- hw->aq.arq.bah = I40E_VF_ARQBAH1;
- }
+ hw->aq.asq.tail = IAVF_VF_ATQT1;
+ hw->aq.asq.head = IAVF_VF_ATQH1;
+ hw->aq.asq.len = IAVF_VF_ATQLEN1;
+ hw->aq.asq.bal = IAVF_VF_ATQBAL1;
+ hw->aq.asq.bah = IAVF_VF_ATQBAH1;
+ hw->aq.arq.tail = IAVF_VF_ARQT1;
+ hw->aq.arq.head = IAVF_VF_ARQH1;
+ hw->aq.arq.len = IAVF_VF_ARQLEN1;
+ hw->aq.arq.bal = IAVF_VF_ARQBAL1;
+ hw->aq.arq.bah = IAVF_VF_ARQBAH1;
}
/**
* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw)
{
- i40e_status ret_code;
+ iavf_status ret_code;
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
(hw->aq.num_asq_entries *
sizeof(struct i40e_aq_desc)),
- I40E_ADMINQ_DESC_ALIGNMENT);
+ IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
return ret_code;
- ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
sizeof(struct i40e_asq_cmd_details)));
if (ret_code) {
- i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
}
@@ -71,15 +59,15 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
* i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw)
{
- i40e_status ret_code;
+ iavf_status ret_code;
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
(hw->aq.num_arq_entries *
sizeof(struct i40e_aq_desc)),
- I40E_ADMINQ_DESC_ALIGNMENT);
+ IAVF_ADMINQ_DESC_ALIGNMENT);
return ret_code;
}
@@ -91,9 +79,9 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
* This assumes the posted send buffers have already been cleaned
* and de-allocated
**/
-static void i40e_free_adminq_asq(struct i40e_hw *hw)
+static void i40e_free_adminq_asq(struct iavf_hw *hw)
{
- i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
/**
@@ -103,20 +91,20 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw)
* This assumes the posted receive buffers have already been cleaned
* and de-allocated
**/
-static void i40e_free_adminq_arq(struct i40e_hw *hw)
+static void i40e_free_adminq_arq(struct iavf_hw *hw)
{
- i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+ iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
/**
* i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
{
- i40e_status ret_code;
struct i40e_aq_desc *desc;
- struct i40e_dma_mem *bi;
+ struct iavf_dma_mem *bi;
+ iavf_status ret_code;
int i;
/* We'll be allocating the buffer info memory first, then we can
@@ -124,24 +112,25 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
*/
/* buffer_info structures do not need alignment */
- ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
- (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries *
+ sizeof(struct iavf_dma_mem)));
if (ret_code)
goto alloc_arq_bufs;
- hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+ hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) {
bi = &hw->aq.arq.r.arq_bi[i];
- ret_code = i40e_allocate_dma_mem(hw, bi,
+ ret_code = iavf_allocate_dma_mem(hw, bi,
i40e_mem_arq_buf,
hw->aq.arq_buf_size,
- I40E_ADMINQ_DESC_ALIGNMENT);
+ IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
goto unwind_alloc_arq_bufs;
/* now configure the descriptors for use */
- desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+ desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
@@ -169,8 +158,8 @@ unwind_alloc_arq_bufs:
/* don't try to free the one that failed... */
i--;
for (; i >= 0; i--)
- i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
- i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
return ret_code;
}
@@ -179,26 +168,27 @@ unwind_alloc_arq_bufs:
* i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw)
{
- i40e_status ret_code;
- struct i40e_dma_mem *bi;
+ struct iavf_dma_mem *bi;
+ iavf_status ret_code;
int i;
/* No mapped memory needed yet, just the buffer info structures */
- ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
- (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries *
+ sizeof(struct iavf_dma_mem)));
if (ret_code)
goto alloc_asq_bufs;
- hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+ hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) {
bi = &hw->aq.asq.r.asq_bi[i];
- ret_code = i40e_allocate_dma_mem(hw, bi,
+ ret_code = iavf_allocate_dma_mem(hw, bi,
i40e_mem_asq_buf,
hw->aq.asq_buf_size,
- I40E_ADMINQ_DESC_ALIGNMENT);
+ IAVF_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
goto unwind_alloc_asq_bufs;
}
@@ -209,8 +199,8 @@ unwind_alloc_asq_bufs:
/* don't try to free the one that failed... */
i--;
for (; i >= 0; i--)
- i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
- i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
return ret_code;
}
@@ -219,42 +209,42 @@ unwind_alloc_asq_bufs:
* i40e_free_arq_bufs - Free receive queue buffer info elements
* @hw: pointer to the hardware structure
**/
-static void i40e_free_arq_bufs(struct i40e_hw *hw)
+static void i40e_free_arq_bufs(struct iavf_hw *hw)
{
int i;
/* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++)
- i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
/* free the descriptor memory */
- i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+ iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
/* free the dma header */
- i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
}
/**
* i40e_free_asq_bufs - Free send queue buffer info elements
* @hw: pointer to the hardware structure
**/
-static void i40e_free_asq_bufs(struct i40e_hw *hw)
+static void i40e_free_asq_bufs(struct iavf_hw *hw)
{
int i;
/* only unmap if the address is non-NULL */
for (i = 0; i < hw->aq.num_asq_entries; i++)
if (hw->aq.asq.r.asq_bi[i].pa)
- i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
/* free the buffer info list */
- i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+ iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
/* free the descriptor memory */
- i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
/* free the dma header */
- i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
}
/**
@@ -263,9 +253,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
*
* Configure base address and length registers for the transmit queue
**/
-static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
+static iavf_status i40e_config_asq_regs(struct iavf_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
@@ -274,7 +264,7 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
/* set starting point */
wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
- I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ IAVF_VF_ATQLEN1_ATQENABLE_MASK));
wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
@@ -292,9 +282,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
*
* Configure base address and length registers for the receive (event queue)
**/
-static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
+static iavf_status i40e_config_arq_regs(struct iavf_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
@@ -303,7 +293,7 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
/* set starting point */
wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
- I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ IAVF_VF_ARQLEN1_ARQENABLE_MASK));
wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
@@ -331,9 +321,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static i40e_status i40e_init_asq(struct i40e_hw *hw)
+static iavf_status i40e_init_asq(struct iavf_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -390,9 +380,9 @@ init_adminq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static i40e_status i40e_init_arq(struct i40e_hw *hw)
+static iavf_status i40e_init_arq(struct iavf_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -442,9 +432,9 @@ init_adminq_exit:
*
* The main shutdown routine for the Admin Send Queue
**/
-static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+static iavf_status i40e_shutdown_asq(struct iavf_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
mutex_lock(&hw->aq.asq_mutex);
@@ -476,9 +466,9 @@ shutdown_asq_out:
*
* The main shutdown routine for the Admin Receive Queue
**/
-static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+static iavf_status i40e_shutdown_arq(struct iavf_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
mutex_lock(&hw->aq.arq_mutex);
@@ -505,7 +495,7 @@ shutdown_arq_out:
}
/**
- * i40evf_init_adminq - main initialization routine for Admin Queue
+ * iavf_init_adminq - main initialization routine for Admin Queue
* @hw: pointer to the hardware structure
*
* Prior to calling this function, drivers *MUST* set the following fields
@@ -515,9 +505,9 @@ shutdown_arq_out:
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
-i40e_status i40evf_init_adminq(struct i40e_hw *hw)
+iavf_status iavf_init_adminq(struct iavf_hw *hw)
{
- i40e_status ret_code;
+ iavf_status ret_code;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
@@ -556,22 +546,19 @@ init_adminq_exit:
}
/**
- * i40evf_shutdown_adminq - shutdown routine for the Admin Queue
+ * iavf_shutdown_adminq - shutdown routine for the Admin Queue
* @hw: pointer to the hardware structure
**/
-i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
+iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
- if (i40evf_check_asq_alive(hw))
- i40evf_aq_queue_shutdown(hw, true);
+ if (iavf_check_asq_alive(hw))
+ iavf_aq_queue_shutdown(hw, true);
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
- if (hw->nvm_buff.va)
- i40e_free_virt_mem(hw, &hw->nvm_buff);
-
return ret_code;
}
@@ -581,18 +568,18 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
*
* returns the number of free desc
**/
-static u16 i40e_clean_asq(struct i40e_hw *hw)
+static u16 i40e_clean_asq(struct iavf_hw *hw)
{
- struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct iavf_adminq_ring *asq = &hw->aq.asq;
struct i40e_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
struct i40e_aq_desc desc_cb;
struct i40e_aq_desc *desc;
- desc = I40E_ADMINQ_DESC(*asq, ntc);
+ desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
@@ -607,33 +594,32 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
ntc++;
if (ntc == asq->count)
ntc = 0;
- desc = I40E_ADMINQ_DESC(*asq, ntc);
+ desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
}
asq->next_to_clean = ntc;
- return I40E_DESC_UNUSED(asq);
+ return IAVF_DESC_UNUSED(asq);
}
/**
- * i40evf_asq_done - check if FW has processed the Admin Send Queue
+ * iavf_asq_done - check if FW has processed the Admin Send Queue
* @hw: pointer to the hw struct
*
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
-bool i40evf_asq_done(struct i40e_hw *hw)
+bool iavf_asq_done(struct iavf_hw *hw)
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
-
}
/**
- * i40evf_asq_send_command - send command to Admin Queue
+ * iavf_asq_send_command - send command to Admin Queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
@@ -643,24 +629,23 @@ bool i40evf_asq_done(struct i40e_hw *hw)
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details)
+iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
{
- i40e_status status = 0;
- struct i40e_dma_mem *dma_buff = NULL;
+ struct iavf_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
struct i40e_aq_desc *desc_on_ring;
bool cmd_completed = false;
+ iavf_status status = 0;
u16 retval = 0;
u32 val = 0;
mutex_lock(&hw->aq.asq_mutex);
if (hw->aq.asq.count == 0) {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
status = I40E_ERR_QUEUE_EMPTY;
goto asq_send_command_error;
@@ -670,7 +655,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
val = rd32(hw, hw->aq.asq.head);
if (val >= hw->aq.num_asq_entries) {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
status = I40E_ERR_QUEUE_EMPTY;
goto asq_send_command_error;
@@ -699,8 +684,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
desc->flags |= cpu_to_le16(details->flags_ena);
if (buff_size > hw->aq.asq_buf_size) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Invalid buffer size: %d.\n",
buff_size);
status = I40E_ERR_INVALID_SIZE;
@@ -708,8 +693,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
}
if (details->postpone && !details->async) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Async flag not set along with postpone flag");
status = I40E_ERR_PARAM;
goto asq_send_command_error;
@@ -723,22 +708,22 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
* in case of asynchronous completions
*/
if (i40e_clean_asq(hw) == 0) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Error queue is full.\n");
status = I40E_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error;
}
/* initialize the temp desc pointer with the right desc */
- desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+ desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
/* if the desc is available copy the temp desc to the right place */
*desc_on_ring = *desc;
/* if buff is not NULL assume indirect command */
- if (buff != NULL) {
- dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ if (buff) {
+ dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
/* copy the user buff into the respective DMA buff */
memcpy(dma_buff->va, buff, buff_size);
desc_on_ring->datalen = cpu_to_le16(buff_size);
@@ -753,9 +738,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
}
/* bump the tail */
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
- i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
- buff, buff_size);
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
(hw->aq.asq.next_to_use)++;
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
@@ -772,7 +757,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
- if (i40evf_asq_done(hw))
+ if (iavf_asq_done(hw))
break;
udelay(50);
total_delay += 50;
@@ -780,14 +765,14 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
}
/* if ready, copy the desc back to temp */
- if (i40evf_asq_done(hw)) {
+ if (iavf_asq_done(hw)) {
*desc = *desc_on_ring;
- if (buff != NULL)
+ if (buff)
memcpy(buff, dma_buff->va, buff_size);
retval = le16_to_cpu(desc->retval);
if (retval != 0) {
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Command completed with error 0x%X.\n",
retval);
@@ -804,10 +789,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n");
- i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
- buff_size);
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
/* save writeback aq if requested */
if (details->wb_desc)
@@ -816,12 +800,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
} else {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
}
@@ -833,14 +817,13 @@ asq_send_command_error:
}
/**
- * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
* @opcode: the opcode can be used to decide which flags to turn off or on
*
* Fill the desc with default values
**/
-void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
- u16 opcode)
+void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
@@ -849,7 +832,7 @@ void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
}
/**
- * i40evf_clean_arq_element
+ * iavf_clean_arq_element
* @hw: pointer to the hw struct
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
@@ -858,14 +841,14 @@ void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
-i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *pending)
+iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
{
- i40e_status ret_code = 0;
u16 ntc = hw->aq.arq.next_to_clean;
struct i40e_aq_desc *desc;
- struct i40e_dma_mem *bi;
+ iavf_status ret_code = 0;
+ struct iavf_dma_mem *bi;
u16 desc_idx;
u16 datalen;
u16 flags;
@@ -878,14 +861,14 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
mutex_lock(&hw->aq.arq_mutex);
if (hw->aq.arq.count == 0) {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQRX: Admin queue not initialized.\n");
ret_code = I40E_ERR_QUEUE_EMPTY;
goto clean_arq_element_err;
}
/* set next_to_use to head */
- ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
+ ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@@ -893,7 +876,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
}
/* now clean the next descriptor */
- desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
hw->aq.arq_last_status =
@@ -901,8 +884,8 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- i40e_debug(hw,
- I40E_DEBUG_AQ_MESSAGE,
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
hw->aq.arq_last_status);
}
@@ -910,13 +893,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
e->msg_len = min(datalen, e->buf_len);
- if (e->msg_buf != NULL && (e->msg_len != 0))
+ if (e->msg_buf && (e->msg_len != 0))
memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
e->msg_len);
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
- i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
- hw->aq.arq_buf_size);
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
@@ -943,7 +926,7 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
clean_arq_element_out:
/* Set pending if needed, unlock and return */
- if (pending != NULL)
+ if (pending)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
clean_arq_element_err:
@@ -951,17 +934,3 @@ clean_arq_element_err:
return ret_code;
}
-
-void i40evf_resume_aq(struct i40e_hw *hw)
-{
- /* Registers are reset after PF reset */
- hw->aq.asq.next_to_use = 0;
- hw->aq.asq.next_to_clean = 0;
-
- i40e_config_asq_regs(hw);
-
- hw->aq.arq.next_to_use = 0;
- hw->aq.arq.next_to_clean = 0;
-
- i40e_config_arq_regs(hw);
-}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/iavf/i40e_adminq.h
index 1f264b9b6805..ee983889eab0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.h
@@ -1,26 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#ifndef _I40E_ADMINQ_H_
-#define _I40E_ADMINQ_H_
+#ifndef _IAVF_ADMINQ_H_
+#define _IAVF_ADMINQ_H_
-#include "i40e_osdep.h"
-#include "i40e_status.h"
+#include "iavf_osdep.h"
+#include "iavf_status.h"
#include "i40e_adminq_cmd.h"
-#define I40E_ADMINQ_DESC(R, i) \
+#define IAVF_ADMINQ_DESC(R, i) \
(&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
-#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
-struct i40e_adminq_ring {
- struct i40e_virt_mem dma_head; /* space for dma structures */
- struct i40e_dma_mem desc_buf; /* descriptor ring memory */
- struct i40e_virt_mem cmd_buf; /* command buffer memory */
+struct iavf_adminq_ring {
+ struct iavf_virt_mem dma_head; /* space for dma structures */
+ struct iavf_dma_mem desc_buf; /* descriptor ring memory */
+ struct iavf_virt_mem cmd_buf; /* command buffer memory */
union {
- struct i40e_dma_mem *asq_bi;
- struct i40e_dma_mem *arq_bi;
+ struct iavf_dma_mem *asq_bi;
+ struct iavf_dma_mem *arq_bi;
} r;
u16 count; /* Number of descriptors */
@@ -61,9 +61,9 @@ struct i40e_arq_event_info {
};
/* Admin Queue information */
-struct i40e_adminq_info {
- struct i40e_adminq_ring arq; /* receive queue */
- struct i40e_adminq_ring asq; /* send queue */
+struct iavf_adminq_info {
+ struct iavf_adminq_ring arq; /* receive queue */
+ struct iavf_adminq_ring asq; /* send queue */
u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
u16 num_arq_entries; /* receive queue depth */
u16 num_asq_entries; /* send queue depth */
@@ -130,7 +130,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
-void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
- u16 opcode);
+void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode);
-#endif /* _I40E_ADMINQ_H_ */
+#endif /* _IAVF_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
new file mode 100644
index 000000000000..af4f94a6541e
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
@@ -0,0 +1,530 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software. Do not change the names in this file to IAVF
+ * because this file should be diff-able against the i40e version, even
+ * though many parts have been removed in this VF version.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+ I40E_FW_API_VERSION_MINOR_X710 : \
+ I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+ i40e_aqc_opc_set_switch_config = 0x0205,
+ i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
+ i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* Dynamic Device Personalization */
+ i40e_aqc_opc_write_personalization_profile = 0x0270,
+ i40e_aqc_opc_get_personalization_profile_list = 0x0271,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+ i40e_aqc_opc_run_phy_activity = 0x0626,
+ i40e_aqc_opc_set_phy_register = 0x0628,
+ i40e_aqc_opc_get_phy_register = 0x0629,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_oem_post_update = 0x0720,
+ i40e_aqc_opc_thermal_sensor = 0x0721,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
+};
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
+ BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+#endif /* _I40E_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
new file mode 100644
index 000000000000..272d76b733aa
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -0,0 +1,418 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _IAVF_H_
+#define _IAVF_H_
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/ipv6.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/socket.h>
+#include <linux/jiffies.h>
+#include <net/ip6_checksum.h>
+#include <net/pkt_cls.h>
+#include <net/udp.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+
+#include "iavf_type.h"
+#include <linux/avf/virtchnl.h>
+#include "iavf_txrx.h"
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+#define PFX "iavf: "
+
+/* VSI state flags shared with common code */
+enum iavf_vsi_state_t {
+ __IAVF_VSI_DOWN,
+ /* This must be last as it determines the size of the BITMAP */
+ __IAVF_VSI_STATE_SIZE__,
+};
+
+/* dummy struct to make common code less painful */
+struct iavf_vsi {
+ struct iavf_adapter *back;
+ struct net_device *netdev;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 seid;
+ u16 id;
+ DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
+ int base_vector;
+ u16 work_limit;
+ u16 qs_handle;
+ void *priv; /* client driver data reference. */
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IAVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define IAVF_DEFAULT_TXD 512
+#define IAVF_DEFAULT_RXD 512
+#define IAVF_MAX_TXD 4096
+#define IAVF_MIN_TXD 64
+#define IAVF_MAX_RXD 4096
+#define IAVF_MIN_RXD 64
+#define IAVF_REQ_DESCRIPTOR_MULTIPLE 32
+#define IAVF_MAX_AQ_BUF_SIZE 4096
+#define IAVF_AQ_LEN 32
+#define IAVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define IAVF_RX_DESC(R, i) (&(((union iavf_32byte_rx_desc *)((R)->desc))[i]))
+#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
+#define IAVF_TX_CTXTDESC(R, i) \
+ (&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
+#define IAVF_MAX_REQ_QUEUES 4
+
+#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
+#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
+#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct iavf_q_vector {
+ struct iavf_adapter *adapter;
+ struct iavf_vsi *vsi;
+ struct napi_struct napi;
+ struct iavf_ring_container rx;
+ struct iavf_ring_container tx;
+ u32 ring_mask;
+ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
+ u8 num_ringpairs; /* total number of ring pairs in vector */
+ u16 v_idx; /* index in the vsi->q_vector array. */
+ u16 reg_idx; /* register index of the interrupt */
+ char name[IFNAMSIZ + 15];
+ bool arm_wb_state;
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways. The lowest value
+ * supported by all of the i40e hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define IAVF_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define OTHER_VECTOR 1
+#define NONQ_VECS (OTHER_VECTOR)
+
+#define MIN_MSIX_Q_VECTORS 1
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS)
+
+#define IAVF_QUEUE_END_OF_LIST 0x7FF
+#define IAVF_FREE_VECTOR 0x7FFF
+struct iavf_mac_filter {
+ struct list_head list;
+ u8 macaddr[ETH_ALEN];
+ bool remove; /* filter needs to be removed */
+ bool add; /* filter needs to be added */
+};
+
+struct iavf_vlan_filter {
+ struct list_head list;
+ u16 vlan;
+ bool remove; /* filter needs to be removed */
+ bool add; /* filter needs to be added */
+};
+
+#define IAVF_MAX_TRAFFIC_CLASS 4
+/* State of traffic class creation */
+enum iavf_tc_state_t {
+ __IAVF_TC_INVALID, /* no traffic class, default state */
+ __IAVF_TC_RUNNING, /* traffic classes have been created */
+};
+
+/* channel info */
+struct iavf_channel_config {
+ struct virtchnl_channel_info ch_info[IAVF_MAX_TRAFFIC_CLASS];
+ enum iavf_tc_state_t state;
+ u8 total_qps;
+};
+
+/* State of cloud filter */
+enum iavf_cloud_filter_state_t {
+ __IAVF_CF_INVALID, /* cloud filter not added */
+ __IAVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */
+ __IAVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */
+ __IAVF_CF_ACTIVE, /* cloud filter is active */
+};
+
+/* Driver state. The order of these is important! */
+enum iavf_state_t {
+ __IAVF_STARTUP, /* driver loaded, probe complete */
+ __IAVF_REMOVE, /* driver is being unloaded */
+ __IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
+ __IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
+ __IAVF_INIT_SW, /* got resources, setting up structs */
+ __IAVF_RESETTING, /* in reset */
+ /* Below here, watchdog is running */
+ __IAVF_DOWN, /* ready, can be opened */
+ __IAVF_DOWN_PENDING, /* descending, waiting for watchdog */
+ __IAVF_TESTING, /* in ethtool self-test */
+ __IAVF_RUNNING, /* opened, working */
+};
+
+enum iavf_critical_section_t {
+ __IAVF_IN_CRITICAL_TASK, /* cannot be interrupted */
+ __IAVF_IN_CLIENT_TASK,
+ __IAVF_IN_REMOVE_TASK, /* device being removed */
+};
+
+#define IAVF_CLOUD_FIELD_OMAC 0x01
+#define IAVF_CLOUD_FIELD_IMAC 0x02
+#define IAVF_CLOUD_FIELD_IVLAN 0x04
+#define IAVF_CLOUD_FIELD_TEN_ID 0x08
+#define IAVF_CLOUD_FIELD_IIP 0x10
+
+#define IAVF_CF_FLAGS_OMAC IAVF_CLOUD_FIELD_OMAC
+#define IAVF_CF_FLAGS_IMAC IAVF_CLOUD_FIELD_IMAC
+#define IAVF_CF_FLAGS_IMAC_IVLAN (IAVF_CLOUD_FIELD_IMAC |\
+ IAVF_CLOUD_FIELD_IVLAN)
+#define IAVF_CF_FLAGS_IMAC_TEN_ID (IAVF_CLOUD_FIELD_IMAC |\
+ IAVF_CLOUD_FIELD_TEN_ID)
+#define IAVF_CF_FLAGS_OMAC_TEN_ID_IMAC (IAVF_CLOUD_FIELD_OMAC |\
+ IAVF_CLOUD_FIELD_IMAC |\
+ IAVF_CLOUD_FIELD_TEN_ID)
+#define IAVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (IAVF_CLOUD_FIELD_IMAC |\
+ IAVF_CLOUD_FIELD_IVLAN |\
+ IAVF_CLOUD_FIELD_TEN_ID)
+#define IAVF_CF_FLAGS_IIP IAVF_CLOUD_FIELD_IIP
+
+/* bookkeeping of cloud filters */
+struct iavf_cloud_filter {
+ enum iavf_cloud_filter_state_t state;
+ struct list_head list;
+ struct virtchnl_filter f;
+ unsigned long cookie;
+ bool del; /* filter needs to be deleted */
+ bool add; /* filter needs to be added */
+};
+
+/* board specific private data structure */
+struct iavf_adapter {
+ struct timer_list watchdog_timer;
+ struct work_struct reset_task;
+ struct work_struct adminq_task;
+ struct delayed_work client_task;
+ struct delayed_work init_task;
+ wait_queue_head_t down_waitqueue;
+ struct iavf_q_vector *q_vectors;
+ struct list_head vlan_filter_list;
+ struct list_head mac_filter_list;
+ /* Lock to protect accesses to MAC and VLAN lists */
+ spinlock_t mac_vlan_list_lock;
+ char misc_vector_name[IFNAMSIZ + 9];
+ int num_active_queues;
+ int num_req_queues;
+
+ /* TX */
+ struct iavf_ring *tx_rings;
+ u32 tx_timeout_count;
+ u32 tx_desc_count;
+
+ /* RX */
+ struct iavf_ring *rx_rings;
+ u64 hw_csum_rx_error;
+ u32 rx_desc_count;
+ int num_msix_vectors;
+ int num_iwarp_msix;
+ int iwarp_base_vector;
+ u32 client_pending;
+ struct i40e_client_instance *cinst;
+ struct msix_entry *msix_entries;
+
+ u32 flags;
+#define IAVF_FLAG_RX_CSUM_ENABLED BIT(0)
+#define IAVF_FLAG_PF_COMMS_FAILED BIT(3)
+#define IAVF_FLAG_RESET_PENDING BIT(4)
+#define IAVF_FLAG_RESET_NEEDED BIT(5)
+#define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6)
+#define IAVF_FLAG_ADDR_SET_BY_PF BIT(8)
+#define IAVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9)
+#define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10)
+#define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11)
+#define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12)
+#define IAVF_FLAG_PROMISC_ON BIT(13)
+#define IAVF_FLAG_ALLMULTI_ON BIT(14)
+#define IAVF_FLAG_LEGACY_RX BIT(15)
+#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
+#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
+/* duplicates for common code */
+#define IAVF_FLAG_DCB_ENABLED 0
+ /* flags for admin queue service task */
+ u32 aq_required;
+#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
+#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT(1)
+#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT(2)
+#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3)
+#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT(4)
+#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5)
+#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
+#define IAVF_FLAG_AQ_MAP_VECTORS BIT(7)
+#define IAVF_FLAG_AQ_HANDLE_RESET BIT(8)
+#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
+#define IAVF_FLAG_AQ_GET_CONFIG BIT(10)
+/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
+#define IAVF_FLAG_AQ_GET_HENA BIT(11)
+#define IAVF_FLAG_AQ_SET_HENA BIT(12)
+#define IAVF_FLAG_AQ_SET_RSS_KEY BIT(13)
+#define IAVF_FLAG_AQ_SET_RSS_LUT BIT(14)
+#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT(15)
+#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT(16)
+#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17)
+#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18)
+#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19)
+#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20)
+#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT(21)
+#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22)
+#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23)
+#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24)
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ struct iavf_hw hw; /* defined in iavf_type.h */
+
+ enum iavf_state_t state;
+ unsigned long crit_section;
+
+ struct work_struct watchdog_task;
+ bool netdev_registered;
+ bool link_up;
+ enum virtchnl_link_speed link_speed;
+ enum virtchnl_ops current_op;
+#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
+ (_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_IWARP : \
+ 0)
+#define CLIENT_ENABLED(_a) ((_a)->cinst)
+/* RSS by the PF should be preferred over RSS via other methods. */
+#define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+#define RSS_AQ(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define RSS_REG(_a) (!((_a)->vf_res->vf_cap_flags & \
+ (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)))
+#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_VLAN)
+ struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
+ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+ struct virtchnl_version_info pf_version;
+#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
+ ((_a)->pf_version.minor == 1))
+ u16 msg_enable;
+ struct iavf_eth_stats current_stats;
+ struct iavf_vsi vsi;
+ u32 aq_wait_count;
+ /* RSS stuff */
+ u64 hena;
+ u16 rss_key_size;
+ u16 rss_lut_size;
+ u8 *rss_key;
+ u8 *rss_lut;
+ /* ADQ related members */
+ struct iavf_channel_config ch_config;
+ u8 num_tc;
+ struct list_head cloud_filter_list;
+ /* lock to protect access to the cloud filter list */
+ spinlock_t cloud_filter_list_lock;
+ u16 num_cloud_filters;
+};
+
+
+/* Ethtool Private Flags */
+
+/* lan device, used by client interface */
+struct i40e_device {
+ struct list_head list;
+ struct iavf_adapter *vf;
+};
+
+/* needed by iavf_ethtool.c */
+extern char iavf_driver_name[];
+extern const char iavf_driver_version[];
+
+int iavf_up(struct iavf_adapter *adapter);
+void iavf_down(struct iavf_adapter *adapter);
+int iavf_process_config(struct iavf_adapter *adapter);
+void iavf_schedule_reset(struct iavf_adapter *adapter);
+void iavf_reset(struct iavf_adapter *adapter);
+void iavf_set_ethtool_ops(struct net_device *netdev);
+void iavf_update_stats(struct iavf_adapter *adapter);
+void iavf_reset_interrupt_capability(struct iavf_adapter *adapter);
+int iavf_init_interrupt_scheme(struct iavf_adapter *adapter);
+void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask);
+void iavf_free_all_tx_resources(struct iavf_adapter *adapter);
+void iavf_free_all_rx_resources(struct iavf_adapter *adapter);
+
+void iavf_napi_add_all(struct iavf_adapter *adapter);
+void iavf_napi_del_all(struct iavf_adapter *adapter);
+
+int iavf_send_api_ver(struct iavf_adapter *adapter);
+int iavf_verify_api_ver(struct iavf_adapter *adapter);
+int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_config(struct iavf_adapter *adapter);
+void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
+void iavf_configure_queues(struct iavf_adapter *adapter);
+void iavf_deconfigure_queues(struct iavf_adapter *adapter);
+void iavf_enable_queues(struct iavf_adapter *adapter);
+void iavf_disable_queues(struct iavf_adapter *adapter);
+void iavf_map_queues(struct iavf_adapter *adapter);
+int iavf_request_queues(struct iavf_adapter *adapter, int num);
+void iavf_add_ether_addrs(struct iavf_adapter *adapter);
+void iavf_del_ether_addrs(struct iavf_adapter *adapter);
+void iavf_add_vlans(struct iavf_adapter *adapter);
+void iavf_del_vlans(struct iavf_adapter *adapter);
+void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
+void iavf_request_stats(struct iavf_adapter *adapter);
+void iavf_request_reset(struct iavf_adapter *adapter);
+void iavf_get_hena(struct iavf_adapter *adapter);
+void iavf_set_hena(struct iavf_adapter *adapter);
+void iavf_set_rss_key(struct iavf_adapter *adapter);
+void iavf_set_rss_lut(struct iavf_adapter *adapter);
+void iavf_enable_vlan_stripping(struct iavf_adapter *adapter);
+void iavf_disable_vlan_stripping(struct iavf_adapter *adapter);
+void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ enum virtchnl_ops v_opcode,
+ iavf_status v_retval, u8 *msg, u16 msglen);
+int iavf_config_rss(struct iavf_adapter *adapter);
+int iavf_lan_add_device(struct iavf_adapter *adapter);
+int iavf_lan_del_device(struct iavf_adapter *adapter);
+void iavf_client_subtask(struct iavf_adapter *adapter);
+void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len);
+void iavf_notify_client_l2_params(struct iavf_vsi *vsi);
+void iavf_notify_client_open(struct iavf_vsi *vsi);
+void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset);
+void iavf_enable_channels(struct iavf_adapter *adapter);
+void iavf_disable_channels(struct iavf_adapter *adapter);
+void iavf_add_cloud_filter(struct iavf_adapter *adapter);
+void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+#endif /* _IAVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_alloc.h b/drivers/net/ethernet/intel/iavf/iavf_alloc.h
new file mode 100644
index 000000000000..bf2753146f30
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_alloc.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _IAVF_ALLOC_H_
+#define _IAVF_ALLOC_H_
+
+struct iavf_hw;
+
+/* Memory allocation types */
+enum iavf_memory_type {
+ iavf_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ iavf_mem_asq_buf = 1,
+ iavf_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ iavf_mem_arq_ring = 3, /* ARQ descriptor ring */
+ iavf_mem_atq_ring = 4, /* ATQ descriptor ring */
+ iavf_mem_pd = 5, /* Page Descriptor */
+ iavf_mem_bp = 6, /* Backing Page - 4KB */
+ iavf_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ iavf_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+iavf_status iavf_allocate_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem,
+ enum iavf_memory_type type,
+ u64 size, u32 alignment);
+iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem);
+iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem, u32 size);
+iavf_status iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem);
+
+#endif /* _IAVF_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c
index 3cc9d60d0d72..aea45364fd1c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.c
@@ -4,36 +4,36 @@
#include <linux/list.h>
#include <linux/errno.h>
-#include "i40evf.h"
-#include "i40e_prototype.h"
-#include "i40evf_client.h"
+#include "iavf.h"
+#include "iavf_prototype.h"
+#include "iavf_client.h"
static
-const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR;
+const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR;
static struct i40e_client *vf_registered_client;
-static LIST_HEAD(i40evf_devices);
-static DEFINE_MUTEX(i40evf_device_mutex);
+static LIST_HEAD(i40e_devices);
+static DEFINE_MUTEX(iavf_device_mutex);
-static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
- struct i40e_client *client,
- u8 *msg, u16 len);
+static u32 iavf_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len);
-static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
- struct i40e_client *client,
- struct i40e_qvlist_info *qvlist_info);
+static int iavf_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info);
-static struct i40e_ops i40evf_lan_ops = {
- .virtchnl_send = i40evf_client_virtchnl_send,
- .setup_qvlist = i40evf_client_setup_qvlist,
+static struct i40e_ops iavf_lan_ops = {
+ .virtchnl_send = iavf_client_virtchnl_send,
+ .setup_qvlist = iavf_client_setup_qvlist,
};
/**
- * i40evf_client_get_params - retrieve relevant client parameters
+ * iavf_client_get_params - retrieve relevant client parameters
* @vsi: VSI with parameters
* @params: client param struct
**/
static
-void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
+void iavf_client_get_params(struct iavf_vsi *vsi, struct i40e_params *params)
{
int i;
@@ -41,21 +41,21 @@ void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
params->mtu = vsi->netdev->mtu;
params->link_up = vsi->back->link_up;
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ for (i = 0; i < IAVF_MAX_USER_PRIORITY; i++) {
params->qos.prio_qos[i].tc = 0;
params->qos.prio_qos[i].qs_handle = vsi->qs_handle;
}
}
/**
- * i40evf_notify_client_message - call the client message receive callback
+ * iavf_notify_client_message - call the client message receive callback
* @vsi: the VSI associated with this client
* @msg: message buffer
* @len: length of message
*
* If there is a client to this VSI, call the client
**/
-void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
+void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len)
{
struct i40e_client_instance *cinst;
@@ -74,12 +74,12 @@ void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
}
/**
- * i40evf_notify_client_l2_params - call the client notify callback
+ * iavf_notify_client_l2_params - call the client notify callback
* @vsi: the VSI with l2 param changes
*
* If there is a client to this VSI, call the client
**/
-void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
+void iavf_notify_client_l2_params(struct iavf_vsi *vsi)
{
struct i40e_client_instance *cinst;
struct i40e_params params;
@@ -95,21 +95,21 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
"Cannot locate client instance l2_param_change function\n");
return;
}
- i40evf_client_get_params(vsi, &params);
+ iavf_client_get_params(vsi, &params);
cinst->lan_info.params = params;
cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
&params);
}
/**
- * i40evf_notify_client_open - call the client open callback
+ * iavf_notify_client_open - call the client open callback
* @vsi: the VSI with netdev opened
*
* If there is a client to this netdev, call the client with open
**/
-void i40evf_notify_client_open(struct i40e_vsi *vsi)
+void iavf_notify_client_open(struct iavf_vsi *vsi)
{
- struct i40evf_adapter *adapter = vsi->back;
+ struct iavf_adapter *adapter = vsi->back;
struct i40e_client_instance *cinst = adapter->cinst;
int ret;
@@ -127,22 +127,22 @@ void i40evf_notify_client_open(struct i40e_vsi *vsi)
}
/**
- * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map
+ * iavf_client_release_qvlist - send a message to the PF to release iwarp qv map
* @ldev: pointer to L2 context.
*
* Return 0 on success or < 0 on error
**/
-static int i40evf_client_release_qvlist(struct i40e_info *ldev)
+static int iavf_client_release_qvlist(struct i40e_info *ldev)
{
- struct i40evf_adapter *adapter = ldev->vf;
- i40e_status err;
+ struct iavf_adapter *adapter = ldev->vf;
+ iavf_status err;
if (adapter->aq_required)
return -EAGAIN;
- err = i40e_aq_send_msg_to_pf(&adapter->hw,
- VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
- I40E_SUCCESS, NULL, 0, NULL);
+ err = iavf_aq_send_msg_to_pf(&adapter->hw,
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ I40E_SUCCESS, NULL, 0, NULL);
if (err)
dev_err(&adapter->pdev->dev,
@@ -153,15 +153,15 @@ static int i40evf_client_release_qvlist(struct i40e_info *ldev)
}
/**
- * i40evf_notify_client_close - call the client close callback
+ * iavf_notify_client_close - call the client close callback
* @vsi: the VSI with netdev closed
* @reset: true when close called due to reset pending
*
* If there is a client to this netdev, call the client with close
**/
-void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
+void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset)
{
- struct i40evf_adapter *adapter = vsi->back;
+ struct iavf_adapter *adapter = vsi->back;
struct i40e_client_instance *cinst = adapter->cinst;
if (!cinst || !cinst->client || !cinst->client->ops ||
@@ -171,21 +171,21 @@ void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
return;
}
cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
- i40evf_client_release_qvlist(&cinst->lan_info);
+ iavf_client_release_qvlist(&cinst->lan_info);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
}
/**
- * i40evf_client_add_instance - add a client instance to the instance list
+ * iavf_client_add_instance - add a client instance to the instance list
* @adapter: pointer to the board struct
*
* Returns cinst ptr on success, NULL on failure
**/
static struct i40e_client_instance *
-i40evf_client_add_instance(struct i40evf_adapter *adapter)
+iavf_client_add_instance(struct iavf_adapter *adapter)
{
struct i40e_client_instance *cinst = NULL;
- struct i40e_vsi *vsi = &adapter->vsi;
+ struct iavf_vsi *vsi = &adapter->vsi;
struct netdev_hw_addr *mac = NULL;
struct i40e_params params;
@@ -207,11 +207,11 @@ i40evf_client_add_instance(struct i40evf_adapter *adapter)
cinst->lan_info.fid = 0;
cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF;
cinst->lan_info.hw_addr = adapter->hw.hw_addr;
- cinst->lan_info.ops = &i40evf_lan_ops;
- cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
- cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
- cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
- i40evf_client_get_params(vsi, &params);
+ cinst->lan_info.ops = &iavf_lan_ops;
+ cinst->lan_info.version.major = IAVF_CLIENT_VERSION_MAJOR;
+ cinst->lan_info.version.minor = IAVF_CLIENT_VERSION_MINOR;
+ cinst->lan_info.version.build = IAVF_CLIENT_VERSION_BUILD;
+ iavf_client_get_params(vsi, &params);
cinst->lan_info.params = params;
set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
@@ -233,28 +233,28 @@ out:
}
/**
- * i40evf_client_del_instance - removes a client instance from the list
+ * iavf_client_del_instance - removes a client instance from the list
* @adapter: pointer to the board struct
*
**/
static
-void i40evf_client_del_instance(struct i40evf_adapter *adapter)
+void iavf_client_del_instance(struct iavf_adapter *adapter)
{
kfree(adapter->cinst);
adapter->cinst = NULL;
}
/**
- * i40evf_client_subtask - client maintenance work
+ * iavf_client_subtask - client maintenance work
* @adapter: board private structure
**/
-void i40evf_client_subtask(struct i40evf_adapter *adapter)
+void iavf_client_subtask(struct iavf_adapter *adapter)
{
struct i40e_client *client = vf_registered_client;
struct i40e_client_instance *cinst;
int ret = 0;
- if (adapter->state < __I40EVF_DOWN)
+ if (adapter->state < __IAVF_DOWN)
return;
/* first check client is registered */
@@ -262,7 +262,7 @@ void i40evf_client_subtask(struct i40evf_adapter *adapter)
return;
/* Add the client instance to the instance list */
- cinst = i40evf_client_add_instance(adapter);
+ cinst = iavf_client_add_instance(adapter);
if (!cinst)
return;
@@ -279,23 +279,23 @@ void i40evf_client_subtask(struct i40evf_adapter *adapter)
&cinst->state);
else
/* remove client instance */
- i40evf_client_del_instance(adapter);
+ iavf_client_del_instance(adapter);
}
}
/**
- * i40evf_lan_add_device - add a lan device struct to the list of lan devices
+ * iavf_lan_add_device - add a lan device struct to the list of lan devices
* @adapter: pointer to the board struct
*
* Returns 0 on success or none 0 on error
**/
-int i40evf_lan_add_device(struct i40evf_adapter *adapter)
+int iavf_lan_add_device(struct iavf_adapter *adapter)
{
struct i40e_device *ldev;
int ret = 0;
- mutex_lock(&i40evf_device_mutex);
- list_for_each_entry(ldev, &i40evf_devices, list) {
+ mutex_lock(&iavf_device_mutex);
+ list_for_each_entry(ldev, &i40e_devices, list) {
if (ldev->vf == adapter) {
ret = -EEXIST;
goto out;
@@ -308,7 +308,7 @@ int i40evf_lan_add_device(struct i40evf_adapter *adapter)
}
ldev->vf = adapter;
INIT_LIST_HEAD(&ldev->list);
- list_add(&ldev->list, &i40evf_devices);
+ list_add(&ldev->list, &i40e_devices);
dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
adapter->hw.bus.bus_id, adapter->hw.bus.device,
adapter->hw.bus.func);
@@ -316,26 +316,26 @@ int i40evf_lan_add_device(struct i40evf_adapter *adapter)
/* Since in some cases register may have happened before a device gets
* added, we can schedule a subtask to go initiate the clients.
*/
- adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
out:
- mutex_unlock(&i40evf_device_mutex);
+ mutex_unlock(&iavf_device_mutex);
return ret;
}
/**
- * i40evf_lan_del_device - removes a lan device from the device list
+ * iavf_lan_del_device - removes a lan device from the device list
* @adapter: pointer to the board struct
*
* Returns 0 on success or non-0 on error
**/
-int i40evf_lan_del_device(struct i40evf_adapter *adapter)
+int iavf_lan_del_device(struct iavf_adapter *adapter)
{
struct i40e_device *ldev, *tmp;
int ret = -ENODEV;
- mutex_lock(&i40evf_device_mutex);
- list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) {
+ mutex_lock(&iavf_device_mutex);
+ list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
if (ldev->vf == adapter) {
dev_info(&adapter->pdev->dev,
"Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
@@ -348,23 +348,23 @@ int i40evf_lan_del_device(struct i40evf_adapter *adapter)
}
}
- mutex_unlock(&i40evf_device_mutex);
+ mutex_unlock(&iavf_device_mutex);
return ret;
}
/**
- * i40evf_client_release - release client specific resources
+ * iavf_client_release - release client specific resources
* @client: pointer to the registered client
*
**/
-static void i40evf_client_release(struct i40e_client *client)
+static void iavf_client_release(struct i40e_client *client)
{
struct i40e_client_instance *cinst;
struct i40e_device *ldev;
- struct i40evf_adapter *adapter;
+ struct iavf_adapter *adapter;
- mutex_lock(&i40evf_device_mutex);
- list_for_each_entry(ldev, &i40evf_devices, list) {
+ mutex_lock(&iavf_device_mutex);
+ list_for_each_entry(ldev, &i40e_devices, list) {
adapter = ldev->vf;
cinst = adapter->cinst;
if (!cinst)
@@ -373,41 +373,41 @@ static void i40evf_client_release(struct i40e_client *client)
if (client->ops && client->ops->close)
client->ops->close(&cinst->lan_info, client,
false);
- i40evf_client_release_qvlist(&cinst->lan_info);
+ iavf_client_release_qvlist(&cinst->lan_info);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
dev_warn(&adapter->pdev->dev,
"Client %s instance closed\n", client->name);
}
/* delete the client instance */
- i40evf_client_del_instance(adapter);
+ iavf_client_del_instance(adapter);
dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
}
- mutex_unlock(&i40evf_device_mutex);
+ mutex_unlock(&iavf_device_mutex);
}
/**
- * i40evf_client_prepare - prepare client specific resources
+ * iavf_client_prepare - prepare client specific resources
* @client: pointer to the registered client
*
**/
-static void i40evf_client_prepare(struct i40e_client *client)
+static void iavf_client_prepare(struct i40e_client *client)
{
struct i40e_device *ldev;
- struct i40evf_adapter *adapter;
+ struct iavf_adapter *adapter;
- mutex_lock(&i40evf_device_mutex);
- list_for_each_entry(ldev, &i40evf_devices, list) {
+ mutex_lock(&iavf_device_mutex);
+ list_for_each_entry(ldev, &i40e_devices, list) {
adapter = ldev->vf;
/* Signal the watchdog to service the client */
- adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
}
- mutex_unlock(&i40evf_device_mutex);
+ mutex_unlock(&iavf_device_mutex);
}
/**
- * i40evf_client_virtchnl_send - send a message to the PF instance
+ * iavf_client_virtchnl_send - send a message to the PF instance
* @ldev: pointer to L2 context.
* @client: Client pointer.
* @msg: pointer to message buffer
@@ -415,17 +415,17 @@ static void i40evf_client_prepare(struct i40e_client *client)
*
* Return 0 on success or < 0 on error
**/
-static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
- struct i40e_client *client,
- u8 *msg, u16 len)
+static u32 iavf_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len)
{
- struct i40evf_adapter *adapter = ldev->vf;
- i40e_status err;
+ struct iavf_adapter *adapter = ldev->vf;
+ iavf_status err;
if (adapter->aq_required)
return -EAGAIN;
- err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
+ err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
I40E_SUCCESS, msg, len, NULL);
if (err)
dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
@@ -435,21 +435,21 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
}
/**
- * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
+ * iavf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
* @ldev: pointer to L2 context.
* @client: Client pointer.
* @qvlist_info: queue and vector list
*
* Return 0 on success or < 0 on error
**/
-static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
- struct i40e_client *client,
- struct i40e_qvlist_info *qvlist_info)
+static int iavf_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info)
{
struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
- struct i40evf_adapter *adapter = ldev->vf;
+ struct iavf_adapter *adapter = ldev->vf;
struct i40e_qv_info *qv_info;
- i40e_status err;
+ iavf_status err;
u32 v_idx, i;
u32 msg_size;
@@ -474,9 +474,9 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
(v_qvlist_info->num_vectors - 1));
adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
- err = i40e_aq_send_msg_to_pf(&adapter->hw,
- VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
- I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
+ err = iavf_aq_send_msg_to_pf(&adapter->hw,
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, I40E_SUCCESS,
+ (u8 *)v_qvlist_info, msg_size, NULL);
if (err) {
dev_err(&adapter->pdev->dev,
@@ -499,12 +499,12 @@ out:
}
/**
- * i40evf_register_client - Register a i40e client driver with the L2 driver
+ * iavf_register_client - Register a i40e client driver with the L2 driver
* @client: pointer to the i40e_client struct
*
* Returns 0 on success or non-0 on error
**/
-int i40evf_register_client(struct i40e_client *client)
+int iavf_register_client(struct i40e_client *client)
{
int ret = 0;
@@ -514,48 +514,48 @@ int i40evf_register_client(struct i40e_client *client)
}
if (strlen(client->name) == 0) {
- pr_info("i40evf: Failed to register client with no name\n");
+ pr_info("iavf: Failed to register client with no name\n");
ret = -EIO;
goto out;
}
if (vf_registered_client) {
- pr_info("i40evf: Client %s has already been registered!\n",
+ pr_info("iavf: Client %s has already been registered!\n",
client->name);
ret = -EEXIST;
goto out;
}
- if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) ||
- (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) {
- pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n",
+ if ((client->version.major != IAVF_CLIENT_VERSION_MAJOR) ||
+ (client->version.minor != IAVF_CLIENT_VERSION_MINOR)) {
+ pr_info("iavf: Failed to register client %s due to mismatched client interface version\n",
client->name);
pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
client->version.major, client->version.minor,
client->version.build,
- i40evf_client_interface_version_str);
+ iavf_client_interface_version_str);
ret = -EIO;
goto out;
}
vf_registered_client = client;
- i40evf_client_prepare(client);
+ iavf_client_prepare(client);
- pr_info("i40evf: Registered client %s with return code %d\n",
+ pr_info("iavf: Registered client %s with return code %d\n",
client->name, ret);
out:
return ret;
}
-EXPORT_SYMBOL(i40evf_register_client);
+EXPORT_SYMBOL(iavf_register_client);
/**
- * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver
+ * iavf_unregister_client - Unregister a i40e client driver with the L2 driver
* @client: pointer to the i40e_client struct
*
* Returns 0 on success or non-0 on error
**/
-int i40evf_unregister_client(struct i40e_client *client)
+int iavf_unregister_client(struct i40e_client *client)
{
int ret = 0;
@@ -563,17 +563,17 @@ int i40evf_unregister_client(struct i40e_client *client)
* a close for each of the client instances that were opened.
* client_release function is called to handle this.
*/
- i40evf_client_release(client);
+ iavf_client_release(client);
if (vf_registered_client != client) {
- pr_info("i40evf: Client %s has not been registered\n",
+ pr_info("iavf: Client %s has not been registered\n",
client->name);
ret = -ENODEV;
goto out;
}
vf_registered_client = NULL;
- pr_info("i40evf: Unregistered client %s\n", client->name);
+ pr_info("iavf: Unregistered client %s\n", client->name);
out:
return ret;
}
-EXPORT_SYMBOL(i40evf_unregister_client);
+EXPORT_SYMBOL(iavf_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h
index 5585f362048a..e216fc9dfd81 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.h
@@ -1,21 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#ifndef _I40EVF_CLIENT_H_
-#define _I40EVF_CLIENT_H_
+#ifndef _IAVF_CLIENT_H_
+#define _IAVF_CLIENT_H_
-#define I40EVF_CLIENT_STR_LENGTH 10
+#define IAVF_CLIENT_STR_LENGTH 10
/* Client interface version should be updated anytime there is a change in the
* existing APIs or data structures.
*/
-#define I40EVF_CLIENT_VERSION_MAJOR 0
-#define I40EVF_CLIENT_VERSION_MINOR 01
-#define I40EVF_CLIENT_VERSION_BUILD 00
-#define I40EVF_CLIENT_VERSION_STR \
- __stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \
- __stringify(I40EVF_CLIENT_VERSION_MINOR) "." \
- __stringify(I40EVF_CLIENT_VERSION_BUILD)
+#define IAVF_CLIENT_VERSION_MAJOR 0
+#define IAVF_CLIENT_VERSION_MINOR 01
+#define IAVF_CLIENT_VERSION_BUILD 00
+#define IAVF_CLIENT_VERSION_STR \
+ __stringify(IAVF_CLIENT_VERSION_MAJOR) "." \
+ __stringify(IAVF_CLIENT_VERSION_MINOR) "." \
+ __stringify(IAVF_CLIENT_VERSION_BUILD)
struct i40e_client_version {
u8 major;
@@ -90,7 +90,7 @@ struct i40e_info {
#define I40E_CLIENT_FTYPE_PF 0
#define I40E_CLIENT_FTYPE_VF 1
u8 ftype; /* function type, PF or VF */
- void *vf; /* cast to i40evf_adapter */
+ void *vf; /* cast to iavf_adapter */
/* All L2 params that could change during the life span of the device
* and needs to be communicated to the client when they change
@@ -151,7 +151,7 @@ struct i40e_client_instance {
struct i40e_client {
struct list_head list; /* list of registered clients */
- char name[I40EVF_CLIENT_STR_LENGTH];
+ char name[IAVF_CLIENT_STR_LENGTH];
struct i40e_client_version version;
unsigned long state; /* client state */
atomic_t ref_cnt; /* Count of all the client devices of this kind */
@@ -164,6 +164,6 @@ struct i40e_client {
};
/* used by clients */
-int i40evf_register_client(struct i40e_client *client);
-int i40evf_unregister_client(struct i40e_client *client);
-#endif /* _I40EVF_CLIENT_H_ */
+int iavf_register_client(struct i40e_client *client);
+int iavf_unregister_client(struct i40e_client *client);
+#endif /* _IAVF_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
new file mode 100644
index 000000000000..768369c89e77
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -0,0 +1,955 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "iavf_type.h"
+#include "i40e_adminq.h"
+#include "iavf_prototype.h"
+#include <linux/avf/virtchnl.h>
+
+/**
+ * iavf_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+iavf_status iavf_set_mac_type(struct iavf_hw *hw)
+{
+ iavf_status status = 0;
+
+ if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (hw->device_id) {
+ case IAVF_DEV_ID_X722_VF:
+ hw->mac.type = IAVF_MAC_X722_VF;
+ break;
+ case IAVF_DEV_ID_VF:
+ case IAVF_DEV_ID_VF_HV:
+ case IAVF_DEV_ID_ADAPTIVE_VF:
+ hw->mac.type = IAVF_MAC_VF;
+ break;
+ default:
+ hw->mac.type = IAVF_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw_dbg(hw, "found mac: %d, returns: %d\n", hw->mac.type, status);
+ return status;
+}
+
+/**
+ * iavf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+ switch (aq_err) {
+ case I40E_AQ_RC_OK:
+ return "OK";
+ case I40E_AQ_RC_EPERM:
+ return "I40E_AQ_RC_EPERM";
+ case I40E_AQ_RC_ENOENT:
+ return "I40E_AQ_RC_ENOENT";
+ case I40E_AQ_RC_ESRCH:
+ return "I40E_AQ_RC_ESRCH";
+ case I40E_AQ_RC_EINTR:
+ return "I40E_AQ_RC_EINTR";
+ case I40E_AQ_RC_EIO:
+ return "I40E_AQ_RC_EIO";
+ case I40E_AQ_RC_ENXIO:
+ return "I40E_AQ_RC_ENXIO";
+ case I40E_AQ_RC_E2BIG:
+ return "I40E_AQ_RC_E2BIG";
+ case I40E_AQ_RC_EAGAIN:
+ return "I40E_AQ_RC_EAGAIN";
+ case I40E_AQ_RC_ENOMEM:
+ return "I40E_AQ_RC_ENOMEM";
+ case I40E_AQ_RC_EACCES:
+ return "I40E_AQ_RC_EACCES";
+ case I40E_AQ_RC_EFAULT:
+ return "I40E_AQ_RC_EFAULT";
+ case I40E_AQ_RC_EBUSY:
+ return "I40E_AQ_RC_EBUSY";
+ case I40E_AQ_RC_EEXIST:
+ return "I40E_AQ_RC_EEXIST";
+ case I40E_AQ_RC_EINVAL:
+ return "I40E_AQ_RC_EINVAL";
+ case I40E_AQ_RC_ENOTTY:
+ return "I40E_AQ_RC_ENOTTY";
+ case I40E_AQ_RC_ENOSPC:
+ return "I40E_AQ_RC_ENOSPC";
+ case I40E_AQ_RC_ENOSYS:
+ return "I40E_AQ_RC_ENOSYS";
+ case I40E_AQ_RC_ERANGE:
+ return "I40E_AQ_RC_ERANGE";
+ case I40E_AQ_RC_EFLUSHED:
+ return "I40E_AQ_RC_EFLUSHED";
+ case I40E_AQ_RC_BAD_ADDR:
+ return "I40E_AQ_RC_BAD_ADDR";
+ case I40E_AQ_RC_EMODE:
+ return "I40E_AQ_RC_EMODE";
+ case I40E_AQ_RC_EFBIG:
+ return "I40E_AQ_RC_EFBIG";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+ return hw->err_str;
+}
+
+/**
+ * iavf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err)
+{
+ switch (stat_err) {
+ case 0:
+ return "OK";
+ case I40E_ERR_NVM:
+ return "I40E_ERR_NVM";
+ case I40E_ERR_NVM_CHECKSUM:
+ return "I40E_ERR_NVM_CHECKSUM";
+ case I40E_ERR_PHY:
+ return "I40E_ERR_PHY";
+ case I40E_ERR_CONFIG:
+ return "I40E_ERR_CONFIG";
+ case I40E_ERR_PARAM:
+ return "I40E_ERR_PARAM";
+ case I40E_ERR_MAC_TYPE:
+ return "I40E_ERR_MAC_TYPE";
+ case I40E_ERR_UNKNOWN_PHY:
+ return "I40E_ERR_UNKNOWN_PHY";
+ case I40E_ERR_LINK_SETUP:
+ return "I40E_ERR_LINK_SETUP";
+ case I40E_ERR_ADAPTER_STOPPED:
+ return "I40E_ERR_ADAPTER_STOPPED";
+ case I40E_ERR_INVALID_MAC_ADDR:
+ return "I40E_ERR_INVALID_MAC_ADDR";
+ case I40E_ERR_DEVICE_NOT_SUPPORTED:
+ return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+ case I40E_ERR_MASTER_REQUESTS_PENDING:
+ return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_INVALID_LINK_SETTINGS:
+ return "I40E_ERR_INVALID_LINK_SETTINGS";
+ case I40E_ERR_AUTONEG_NOT_COMPLETE:
+ return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+ case I40E_ERR_RESET_FAILED:
+ return "I40E_ERR_RESET_FAILED";
+ case I40E_ERR_SWFW_SYNC:
+ return "I40E_ERR_SWFW_SYNC";
+ case I40E_ERR_NO_AVAILABLE_VSI:
+ return "I40E_ERR_NO_AVAILABLE_VSI";
+ case I40E_ERR_NO_MEMORY:
+ return "I40E_ERR_NO_MEMORY";
+ case I40E_ERR_BAD_PTR:
+ return "I40E_ERR_BAD_PTR";
+ case I40E_ERR_RING_FULL:
+ return "I40E_ERR_RING_FULL";
+ case I40E_ERR_INVALID_PD_ID:
+ return "I40E_ERR_INVALID_PD_ID";
+ case I40E_ERR_INVALID_QP_ID:
+ return "I40E_ERR_INVALID_QP_ID";
+ case I40E_ERR_INVALID_CQ_ID:
+ return "I40E_ERR_INVALID_CQ_ID";
+ case I40E_ERR_INVALID_CEQ_ID:
+ return "I40E_ERR_INVALID_CEQ_ID";
+ case I40E_ERR_INVALID_AEQ_ID:
+ return "I40E_ERR_INVALID_AEQ_ID";
+ case I40E_ERR_INVALID_SIZE:
+ return "I40E_ERR_INVALID_SIZE";
+ case I40E_ERR_INVALID_ARP_INDEX:
+ return "I40E_ERR_INVALID_ARP_INDEX";
+ case I40E_ERR_INVALID_FPM_FUNC_ID:
+ return "I40E_ERR_INVALID_FPM_FUNC_ID";
+ case I40E_ERR_QP_INVALID_MSG_SIZE:
+ return "I40E_ERR_QP_INVALID_MSG_SIZE";
+ case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+ return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+ case I40E_ERR_INVALID_FRAG_COUNT:
+ return "I40E_ERR_INVALID_FRAG_COUNT";
+ case I40E_ERR_QUEUE_EMPTY:
+ return "I40E_ERR_QUEUE_EMPTY";
+ case I40E_ERR_INVALID_ALIGNMENT:
+ return "I40E_ERR_INVALID_ALIGNMENT";
+ case I40E_ERR_FLUSHED_QUEUE:
+ return "I40E_ERR_FLUSHED_QUEUE";
+ case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+ return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+ case I40E_ERR_INVALID_IMM_DATA_SIZE:
+ return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+ case I40E_ERR_TIMEOUT:
+ return "I40E_ERR_TIMEOUT";
+ case I40E_ERR_OPCODE_MISMATCH:
+ return "I40E_ERR_OPCODE_MISMATCH";
+ case I40E_ERR_CQP_COMPL_ERROR:
+ return "I40E_ERR_CQP_COMPL_ERROR";
+ case I40E_ERR_INVALID_VF_ID:
+ return "I40E_ERR_INVALID_VF_ID";
+ case I40E_ERR_INVALID_HMCFN_ID:
+ return "I40E_ERR_INVALID_HMCFN_ID";
+ case I40E_ERR_BACKING_PAGE_ERROR:
+ return "I40E_ERR_BACKING_PAGE_ERROR";
+ case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+ return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+ case I40E_ERR_INVALID_PBLE_INDEX:
+ return "I40E_ERR_INVALID_PBLE_INDEX";
+ case I40E_ERR_INVALID_SD_INDEX:
+ return "I40E_ERR_INVALID_SD_INDEX";
+ case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+ return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+ case I40E_ERR_INVALID_SD_TYPE:
+ return "I40E_ERR_INVALID_SD_TYPE";
+ case I40E_ERR_MEMCPY_FAILED:
+ return "I40E_ERR_MEMCPY_FAILED";
+ case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+ return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+ case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+ return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+ case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+ return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+ case I40E_ERR_SRQ_ENABLED:
+ return "I40E_ERR_SRQ_ENABLED";
+ case I40E_ERR_ADMIN_QUEUE_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_ERROR";
+ case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+ return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+ case I40E_ERR_BUF_TOO_SHORT:
+ return "I40E_ERR_BUF_TOO_SHORT";
+ case I40E_ERR_ADMIN_QUEUE_FULL:
+ return "I40E_ERR_ADMIN_QUEUE_FULL";
+ case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+ return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+ case I40E_ERR_BAD_IWARP_CQE:
+ return "I40E_ERR_BAD_IWARP_CQE";
+ case I40E_ERR_NVM_BLANK_MODE:
+ return "I40E_ERR_NVM_BLANK_MODE";
+ case I40E_ERR_NOT_IMPLEMENTED:
+ return "I40E_ERR_NOT_IMPLEMENTED";
+ case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+ return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+ case I40E_ERR_DIAG_TEST_FAILED:
+ return "I40E_ERR_DIAG_TEST_FAILED";
+ case I40E_ERR_NOT_READY:
+ return "I40E_ERR_NOT_READY";
+ case I40E_NOT_SUPPORTED:
+ return "I40E_NOT_SUPPORTED";
+ case I40E_ERR_FIRMWARE_API_VERSION:
+ return "I40E_ERR_FIRMWARE_API_VERSION";
+ case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
+ }
+
+ snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+ return hw->err_str;
+}
+
+/**
+ * iavf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
+ void *buffer, u16 buf_len)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u8 *buf = (u8 *)buffer;
+
+ if ((!(mask & hw->debug_mask)) || !desc)
+ return;
+
+ iavf_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ le16_to_cpu(aq_desc->opcode),
+ le16_to_cpu(aq_desc->flags),
+ le16_to_cpu(aq_desc->datalen),
+ le16_to_cpu(aq_desc->retval));
+ iavf_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(aq_desc->cookie_high),
+ le32_to_cpu(aq_desc->cookie_low));
+ iavf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ le32_to_cpu(aq_desc->params.internal.param0),
+ le32_to_cpu(aq_desc->params.internal.param1));
+ iavf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(aq_desc->params.external.addr_high),
+ le32_to_cpu(aq_desc->params.external.addr_low));
+
+ if (buffer && aq_desc->datalen) {
+ u16 len = le16_to_cpu(aq_desc->datalen);
+
+ iavf_debug(hw, mask, "AQ CMD Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+ /* write the full 16-byte chunks */
+ if (hw->debug_mask & mask) {
+ char prefix[27];
+
+ snprintf(prefix, sizeof(prefix),
+ "iavf %02x:%02x.%x: \t0x",
+ hw->bus.bus_id,
+ hw->bus.device,
+ hw->bus.func);
+
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
+ }
+ }
+}
+
+/**
+ * iavf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool iavf_check_asq_alive(struct iavf_hw *hw)
+{
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ IAVF_VF_ATQLEN1_ATQENABLE_MASK);
+ else
+ return false;
+}
+
+/**
+ * iavf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ iavf_status status;
+
+ iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * iavf_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ iavf_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ iavf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ iavf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= cpu_to_le16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * iavf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ false);
+}
+
+/**
+ * iavf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * iavf_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static
+iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ iavf_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ iavf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ iavf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ cpu_to_le16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = iavf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * iavf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return iavf_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * iavf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
+/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT iavf_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF iavf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum iavf_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ IAVF_RX_PTYPE_OUTER_##OUTER_IP, \
+ IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ IAVF_RX_PTYPE_##OUTER_FRAG, \
+ IAVF_RX_PTYPE_TUNNEL_##T, \
+ IAVF_RX_PTYPE_TUNNEL_END_##TE, \
+ IAVF_RX_PTYPE_##TEF, \
+ IAVF_RX_PTYPE_INNER_PROT_##I, \
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define IAVF_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG
+#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG
+#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
+ /* L2 Packet types */
+ IAVF_PTT_UNUSED_ENTRY(0),
+ IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT_UNUSED_ENTRY(4),
+ IAVF_PTT_UNUSED_ENTRY(5),
+ IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT_UNUSED_ENTRY(8),
+ IAVF_PTT_UNUSED_ENTRY(9),
+ IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(25),
+ IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(32),
+ IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(39),
+ IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(47),
+ IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(54),
+ IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(62),
+ IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(69),
+ IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(77),
+ IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(84),
+ IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ IAVF_PTT_UNUSED_ENTRY(91),
+ IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(98),
+ IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(105),
+ IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(113),
+ IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(120),
+ IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(128),
+ IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(135),
+ IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(143),
+ IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ IAVF_PTT_UNUSED_ENTRY(150),
+ IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ IAVF_PTT_UNUSED_ENTRY(154),
+ IAVF_PTT_UNUSED_ENTRY(155),
+ IAVF_PTT_UNUSED_ENTRY(156),
+ IAVF_PTT_UNUSED_ENTRY(157),
+ IAVF_PTT_UNUSED_ENTRY(158),
+ IAVF_PTT_UNUSED_ENTRY(159),
+
+ IAVF_PTT_UNUSED_ENTRY(160),
+ IAVF_PTT_UNUSED_ENTRY(161),
+ IAVF_PTT_UNUSED_ENTRY(162),
+ IAVF_PTT_UNUSED_ENTRY(163),
+ IAVF_PTT_UNUSED_ENTRY(164),
+ IAVF_PTT_UNUSED_ENTRY(165),
+ IAVF_PTT_UNUSED_ENTRY(166),
+ IAVF_PTT_UNUSED_ENTRY(167),
+ IAVF_PTT_UNUSED_ENTRY(168),
+ IAVF_PTT_UNUSED_ENTRY(169),
+
+ IAVF_PTT_UNUSED_ENTRY(170),
+ IAVF_PTT_UNUSED_ENTRY(171),
+ IAVF_PTT_UNUSED_ENTRY(172),
+ IAVF_PTT_UNUSED_ENTRY(173),
+ IAVF_PTT_UNUSED_ENTRY(174),
+ IAVF_PTT_UNUSED_ENTRY(175),
+ IAVF_PTT_UNUSED_ENTRY(176),
+ IAVF_PTT_UNUSED_ENTRY(177),
+ IAVF_PTT_UNUSED_ENTRY(178),
+ IAVF_PTT_UNUSED_ENTRY(179),
+
+ IAVF_PTT_UNUSED_ENTRY(180),
+ IAVF_PTT_UNUSED_ENTRY(181),
+ IAVF_PTT_UNUSED_ENTRY(182),
+ IAVF_PTT_UNUSED_ENTRY(183),
+ IAVF_PTT_UNUSED_ENTRY(184),
+ IAVF_PTT_UNUSED_ENTRY(185),
+ IAVF_PTT_UNUSED_ENTRY(186),
+ IAVF_PTT_UNUSED_ENTRY(187),
+ IAVF_PTT_UNUSED_ENTRY(188),
+ IAVF_PTT_UNUSED_ENTRY(189),
+
+ IAVF_PTT_UNUSED_ENTRY(190),
+ IAVF_PTT_UNUSED_ENTRY(191),
+ IAVF_PTT_UNUSED_ENTRY(192),
+ IAVF_PTT_UNUSED_ENTRY(193),
+ IAVF_PTT_UNUSED_ENTRY(194),
+ IAVF_PTT_UNUSED_ENTRY(195),
+ IAVF_PTT_UNUSED_ENTRY(196),
+ IAVF_PTT_UNUSED_ENTRY(197),
+ IAVF_PTT_UNUSED_ENTRY(198),
+ IAVF_PTT_UNUSED_ENTRY(199),
+
+ IAVF_PTT_UNUSED_ENTRY(200),
+ IAVF_PTT_UNUSED_ENTRY(201),
+ IAVF_PTT_UNUSED_ENTRY(202),
+ IAVF_PTT_UNUSED_ENTRY(203),
+ IAVF_PTT_UNUSED_ENTRY(204),
+ IAVF_PTT_UNUSED_ENTRY(205),
+ IAVF_PTT_UNUSED_ENTRY(206),
+ IAVF_PTT_UNUSED_ENTRY(207),
+ IAVF_PTT_UNUSED_ENTRY(208),
+ IAVF_PTT_UNUSED_ENTRY(209),
+
+ IAVF_PTT_UNUSED_ENTRY(210),
+ IAVF_PTT_UNUSED_ENTRY(211),
+ IAVF_PTT_UNUSED_ENTRY(212),
+ IAVF_PTT_UNUSED_ENTRY(213),
+ IAVF_PTT_UNUSED_ENTRY(214),
+ IAVF_PTT_UNUSED_ENTRY(215),
+ IAVF_PTT_UNUSED_ENTRY(216),
+ IAVF_PTT_UNUSED_ENTRY(217),
+ IAVF_PTT_UNUSED_ENTRY(218),
+ IAVF_PTT_UNUSED_ENTRY(219),
+
+ IAVF_PTT_UNUSED_ENTRY(220),
+ IAVF_PTT_UNUSED_ENTRY(221),
+ IAVF_PTT_UNUSED_ENTRY(222),
+ IAVF_PTT_UNUSED_ENTRY(223),
+ IAVF_PTT_UNUSED_ENTRY(224),
+ IAVF_PTT_UNUSED_ENTRY(225),
+ IAVF_PTT_UNUSED_ENTRY(226),
+ IAVF_PTT_UNUSED_ENTRY(227),
+ IAVF_PTT_UNUSED_ENTRY(228),
+ IAVF_PTT_UNUSED_ENTRY(229),
+
+ IAVF_PTT_UNUSED_ENTRY(230),
+ IAVF_PTT_UNUSED_ENTRY(231),
+ IAVF_PTT_UNUSED_ENTRY(232),
+ IAVF_PTT_UNUSED_ENTRY(233),
+ IAVF_PTT_UNUSED_ENTRY(234),
+ IAVF_PTT_UNUSED_ENTRY(235),
+ IAVF_PTT_UNUSED_ENTRY(236),
+ IAVF_PTT_UNUSED_ENTRY(237),
+ IAVF_PTT_UNUSED_ENTRY(238),
+ IAVF_PTT_UNUSED_ENTRY(239),
+
+ IAVF_PTT_UNUSED_ENTRY(240),
+ IAVF_PTT_UNUSED_ENTRY(241),
+ IAVF_PTT_UNUSED_ENTRY(242),
+ IAVF_PTT_UNUSED_ENTRY(243),
+ IAVF_PTT_UNUSED_ENTRY(244),
+ IAVF_PTT_UNUSED_ENTRY(245),
+ IAVF_PTT_UNUSED_ENTRY(246),
+ IAVF_PTT_UNUSED_ENTRY(247),
+ IAVF_PTT_UNUSED_ENTRY(248),
+ IAVF_PTT_UNUSED_ENTRY(249),
+
+ IAVF_PTT_UNUSED_ENTRY(250),
+ IAVF_PTT_UNUSED_ENTRY(251),
+ IAVF_PTT_UNUSED_ENTRY(252),
+ IAVF_PTT_UNUSED_ENTRY(253),
+ IAVF_PTT_UNUSED_ENTRY(254),
+ IAVF_PTT_UNUSED_ENTRY(255)
+};
+
+/**
+ * iavf_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. iavf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ iavf_status v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_asq_cmd_details details;
+ struct i40e_aq_desc desc;
+ iavf_status status;
+
+ iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
+ | I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ }
+ if (!cmd_details) {
+ memset(&details, 0, sizeof(details));
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = iavf_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+ return status;
+}
+
+/**
+ * iavf_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void iavf_vf_parse_hw_config(struct iavf_hw *hw,
+ struct virtchnl_vf_resource *msg)
+{
+ struct virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
+ ether_addr_copy(hw->mac.perm_addr,
+ vsi_res->default_mac_addr);
+ ether_addr_copy(hw->mac.addr,
+ vsi_res->default_mac_addr);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * iavf_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+iavf_status iavf_vf_reset(struct iavf_hw *hw)
+{
+ return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
+ 0, NULL, 0, NULL);
+}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_devids.h b/drivers/net/ethernet/intel/iavf/iavf_devids.h
new file mode 100644
index 000000000000..8eb7b697e96c
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_devids.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _IAVF_DEVIDS_H_
+#define _IAVF_DEVIDS_H_
+
+/* Device IDs for the VF driver */
+#define IAVF_DEV_ID_VF 0x154C
+#define IAVF_DEV_ID_VF_HV 0x1571
+#define IAVF_DEV_ID_ADAPTIVE_VF 0x1889
+#define IAVF_DEV_ID_X722_VF 0x37CD
+#endif /* _IAVF_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
new file mode 100644
index 000000000000..9f87304109fe
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -0,0 +1,1036 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+/* ethtool support for iavf */
+#include "iavf.h"
+
+#include <linux/uaccess.h>
+
+/* ethtool statistics helpers */
+
+/**
+ * struct iavf_stats - definition for an ethtool statistic
+ * @stat_string: statistic name to display in ethtool -S output
+ * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
+ * @stat_offset: offsetof() the stat from a base pointer
+ *
+ * This structure defines a statistic to be added to the ethtool stats buffer.
+ * It defines a statistic as offset from a common base pointer. Stats should
+ * be defined in constant arrays using the IAVF_STAT macro, with every element
+ * of the array using the same _type for calculating the sizeof_stat and
+ * stat_offset.
+ *
+ * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
+ * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
+ * the iavf_add_ethtool_stat() helper function.
+ *
+ * The @stat_string is interpreted as a format string, allowing formatted
+ * values to be inserted while looping over multiple structures for a given
+ * statistics array. Thus, every statistic string in an array should have the
+ * same type and number of format specifiers, to be formatted by variadic
+ * arguments to the iavf_add_stat_string() helper function.
+ **/
+struct iavf_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+/* Helper macro to define an iavf_stat structure with proper size and type.
+ * Use this when defining constant statistics arrays. Note that @_type expects
+ * only a type name and is used multiple times.
+ */
+#define IAVF_STAT(_type, _name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+ .stat_offset = offsetof(_type, _stat) \
+}
+
+/* Helper macro for defining some statistics related to queues */
+#define IAVF_QUEUE_STAT(_name, _stat) \
+ IAVF_STAT(struct iavf_ring, _name, _stat)
+
+/* Stats associated with a Tx or Rx ring */
+static const struct iavf_stats iavf_gstrings_queue_stats[] = {
+ IAVF_QUEUE_STAT("%s-%u.packets", stats.packets),
+ IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes),
+};
+
+/**
+ * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer
+ * @data: location to store the stat value
+ * @pointer: basis for where to copy from
+ * @stat: the stat definition
+ *
+ * Copies the stat data defined by the pointer and stat structure pair into
+ * the memory supplied as data. Used to implement iavf_add_ethtool_stats and
+ * iavf_add_queue_stats. If the pointer is null, data will be zero'd.
+ */
+static void
+iavf_add_one_ethtool_stat(u64 *data, void *pointer,
+ const struct iavf_stats *stat)
+{
+ char *p;
+
+ if (!pointer) {
+ /* ensure that the ethtool data buffer is zero'd for any stats
+ * which don't have a valid pointer.
+ */
+ *data = 0;
+ return;
+ }
+
+ p = (char *)pointer + stat->stat_offset;
+ switch (stat->sizeof_stat) {
+ case sizeof(u64):
+ *data = *((u64 *)p);
+ break;
+ case sizeof(u32):
+ *data = *((u32 *)p);
+ break;
+ case sizeof(u16):
+ *data = *((u16 *)p);
+ break;
+ case sizeof(u8):
+ *data = *((u8 *)p);
+ break;
+ default:
+ WARN_ONCE(1, "unexpected stat size for %s",
+ stat->stat_string);
+ *data = 0;
+ }
+}
+
+/**
+ * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location to copy stats from
+ * @stats: array of stats to copy
+ * @size: the size of the stats definition
+ *
+ * Copy the stats defined by the stats array using the pointer as a base into
+ * the data buffer supplied by ethtool. Updates the data pointer to point to
+ * the next empty location for successive calls to __iavf_add_ethtool_stats.
+ * If pointer is null, set the data values to zero and update the pointer to
+ * skip these stats.
+ **/
+static void
+__iavf_add_ethtool_stats(u64 **data, void *pointer,
+ const struct iavf_stats stats[],
+ const unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
+}
+
+/**
+ * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer
+ * @data: ethtool stats buffer
+ * @pointer: location where stats are stored
+ * @stats: static const array of stat definitions
+ *
+ * Macro to ease the use of __iavf_add_ethtool_stats by taking a static
+ * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
+ * ensuring that we pass the size associated with the given stats array.
+ *
+ * The parameter @stats is evaluated twice, so parameters with side effects
+ * should be avoided.
+ **/
+#define iavf_add_ethtool_stats(data, pointer, stats) \
+ __iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
+
+/**
+ * iavf_add_queue_stats - copy queue statistics into supplied buffer
+ * @data: ethtool stats buffer
+ * @ring: the ring to copy
+ *
+ * Queue statistics must be copied while protected by
+ * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats.
+ * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the
+ * ring pointer is null, zero out the queue stat values and update the data
+ * pointer. Otherwise safely copy the stats from the ring into the supplied
+ * buffer and update the data pointer when finished.
+ *
+ * This function expects to be called while under rcu_read_lock().
+ **/
+static void
+iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
+{
+ const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats);
+ const struct iavf_stats *stats = iavf_gstrings_queue_stats;
+ unsigned int start;
+ unsigned int i;
+
+ /* To avoid invalid statistics values, ensure that we keep retrying
+ * the copy until we get a consistent value according to
+ * u64_stats_fetch_retry_irq. But first, make sure our ring is
+ * non-null before attempting to access its syncp.
+ */
+ do {
+ start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
+ for (i = 0; i < size; i++)
+ iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]);
+ } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
+
+ /* Once we successfully copy the stats in, update the data pointer */
+ *data += size;
+}
+
+/**
+ * __iavf_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ * @size: size of the stats array
+ *
+ * Format and copy the strings described by stats into the buffer pointed at
+ * by p.
+ **/
+static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[],
+ const unsigned int size, ...)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ va_list args;
+
+ va_start(args, size);
+ vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
+ *p += ETH_GSTRING_LEN;
+ va_end(args);
+ }
+}
+
+/**
+ * iavf_add_stat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ *
+ * Format and copy the strings described by the const static stats value into
+ * the buffer pointed at by p.
+ *
+ * The parameter @stats is evaluated twice, so parameters with side effects
+ * should be avoided. Additionally, stats must be an array such that
+ * ARRAY_SIZE can be called on it.
+ **/
+#define iavf_add_stat_strings(p, stats, ...) \
+ __iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
+
+#define VF_STAT(_name, _stat) \
+ IAVF_STAT(struct iavf_adapter, _name, _stat)
+
+static const struct iavf_stats iavf_gstrings_stats[] = {
+ VF_STAT("rx_bytes", current_stats.rx_bytes),
+ VF_STAT("rx_unicast", current_stats.rx_unicast),
+ VF_STAT("rx_multicast", current_stats.rx_multicast),
+ VF_STAT("rx_broadcast", current_stats.rx_broadcast),
+ VF_STAT("rx_discards", current_stats.rx_discards),
+ VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
+ VF_STAT("tx_bytes", current_stats.tx_bytes),
+ VF_STAT("tx_unicast", current_stats.tx_unicast),
+ VF_STAT("tx_multicast", current_stats.tx_multicast),
+ VF_STAT("tx_broadcast", current_stats.tx_broadcast),
+ VF_STAT("tx_discards", current_stats.tx_discards),
+ VF_STAT("tx_errors", current_stats.tx_errors),
+};
+
+#define IAVF_STATS_LEN ARRAY_SIZE(iavf_gstrings_stats)
+
+#define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats)
+
+/* For now we have one and only one private flag and it is only defined
+ * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
+ * of leaving all this code sitting around empty we will strip it unless
+ * our one private flag is actually available.
+ */
+struct iavf_priv_flags {
+ char flag_string[ETH_GSTRING_LEN];
+ u32 flag;
+ bool read_only;
+};
+
+#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \
+ .flag_string = _name, \
+ .flag = _flag, \
+ .read_only = _read_only, \
+}
+
+static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = {
+ IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0),
+};
+
+#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags)
+
+/**
+ * iavf_get_link_ksettings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @cmd: ethtool command
+ *
+ * Reports speed/duplex settings. Because this is a VF, we don't know what
+ * kind of link we really have, so we fake it.
+ **/
+static int iavf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = PORT_NONE;
+ /* Set speed and duplex */
+ switch (adapter->link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ cmd->base.speed = SPEED_40000;
+ break;
+ case I40E_LINK_SPEED_25GB:
+#ifdef SPEED_25000
+ cmd->base.speed = SPEED_25000;
+#else
+ netdev_info(netdev,
+ "Speed is 25G, display not supported by this version of ethtool.\n");
+#endif
+ break;
+ case I40E_LINK_SPEED_20GB:
+ cmd->base.speed = SPEED_20000;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ cmd->base.speed = SPEED_10000;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ cmd->base.speed = SPEED_1000;
+ break;
+ case I40E_LINK_SPEED_100MB:
+ cmd->base.speed = SPEED_100;
+ break;
+ default:
+ break;
+ }
+ cmd->base.duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+/**
+ * iavf_get_sset_count - Get length of string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ *
+ * Reports size of various string tables.
+ **/
+static int iavf_get_sset_count(struct net_device *netdev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return IAVF_STATS_LEN +
+ (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES);
+ else if (sset == ETH_SS_PRIV_FLAGS)
+ return IAVF_PRIV_FLAGS_STR_LEN;
+ else
+ return -EINVAL;
+}
+
+/**
+ * iavf_get_ethtool_stats - report device statistics
+ * @netdev: network interface device structure
+ * @stats: ethtool statistics structure
+ * @data: pointer to data buffer
+ *
+ * All statistics are added to the data buffer as an array of u64.
+ **/
+static void iavf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ unsigned int i;
+
+ iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
+
+ rcu_read_lock();
+ for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) {
+ struct iavf_ring *ring;
+
+ /* Avoid accessing un-allocated queues */
+ ring = (i < adapter->num_active_queues ?
+ &adapter->tx_rings[i] : NULL);
+ iavf_add_queue_stats(&data, ring);
+
+ /* Avoid accessing un-allocated queues */
+ ring = (i < adapter->num_active_queues ?
+ &adapter->rx_rings[i] : NULL);
+ iavf_add_queue_stats(&data, ring);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * iavf_get_priv_flag_strings - Get private flag strings
+ * @netdev: network interface device structure
+ * @data: buffer for string data
+ *
+ * Builds the private flags string table
+ **/
+static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "%s",
+ iavf_gstrings_priv_flags[i].flag_string);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+/**
+ * iavf_get_stat_strings - Get stat strings
+ * @netdev: network interface device structure
+ * @data: buffer for string data
+ *
+ * Builds the statistics string table
+ **/
+static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
+{
+ unsigned int i;
+
+ iavf_add_stat_strings(&data, iavf_gstrings_stats);
+
+ /* Queues are always allocated in pairs, so we just use num_tx_queues
+ * for both Tx and Rx queues.
+ */
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
+ "tx", i);
+ iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
+ "rx", i);
+ }
+}
+
+/**
+ * iavf_get_strings - Get string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ * @data: buffer for string data
+ *
+ * Builds string tables for various string sets
+ **/
+static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ iavf_get_stat_strings(netdev, data);
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ iavf_get_priv_flag_strings(netdev, data);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * iavf_get_priv_flags - report device private flags
+ * @netdev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ **/
+static u32 iavf_get_priv_flags(struct net_device *netdev)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ u32 i, ret_flags = 0;
+
+ for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
+ const struct iavf_priv_flags *priv_flags;
+
+ priv_flags = &iavf_gstrings_priv_flags[i];
+
+ if (priv_flags->flag & adapter->flags)
+ ret_flags |= BIT(i);
+ }
+
+ return ret_flags;
+}
+
+/**
+ * iavf_set_priv_flags - set private flags
+ * @netdev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ u32 orig_flags, new_flags, changed_flags;
+ u32 i;
+
+ orig_flags = READ_ONCE(adapter->flags);
+ new_flags = orig_flags;
+
+ for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
+ const struct iavf_priv_flags *priv_flags;
+
+ priv_flags = &iavf_gstrings_priv_flags[i];
+
+ if (flags & BIT(i))
+ new_flags |= priv_flags->flag;
+ else
+ new_flags &= ~(priv_flags->flag);
+
+ if (priv_flags->read_only &&
+ ((orig_flags ^ new_flags) & ~BIT(i)))
+ return -EOPNOTSUPP;
+ }
+
+ /* Before we finalize any flag changes, any checks which we need to
+ * perform to determine if the new flags will be supported should go
+ * here...
+ */
+
+ /* Compare and exchange the new flags into place. If we failed, that
+ * is if cmpxchg returns anything but the old value, this means
+ * something else must have modified the flags variable since we
+ * copied it. We'll just punt with an error and log something in the
+ * message buffer.
+ */
+ if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
+ dev_warn(&adapter->pdev->dev,
+ "Unable to update adapter->flags as it was modified by another thread...\n");
+ return -EAGAIN;
+ }
+
+ changed_flags = orig_flags ^ new_flags;
+
+ /* Process any additional changes needed as a result of flag changes.
+ * The changed_flags value reflects the list of bits that were changed
+ * in the code above.
+ */
+
+ /* issue a reset to force legacy-rx change to take effect */
+ if (changed_flags & IAVF_FLAG_LEGACY_RX) {
+ if (netif_running(netdev)) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+ schedule_work(&adapter->reset_task);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_get_msglevel - Get debug message level
+ * @netdev: network interface device structure
+ *
+ * Returns current debug message level.
+ **/
+static u32 iavf_get_msglevel(struct net_device *netdev)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+/**
+ * iavf_set_msglevel - Set debug message level
+ * @netdev: network interface device structure
+ * @data: message level
+ *
+ * Set current debug message level. Higher values cause the driver to
+ * be noisier.
+ **/
+static void iavf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ if (IAVF_DEBUG_USER & data)
+ adapter->hw.debug_mask = data;
+ adapter->msg_enable = data;
+}
+
+/**
+ * iavf_get_drvinfo - Get driver info
+ * @netdev: network interface device structure
+ * @drvinfo: ethool driver info structure
+ *
+ * Returns information about the driver and device for display to the user.
+ **/
+static void iavf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, iavf_driver_name, 32);
+ strlcpy(drvinfo->version, iavf_driver_version, 32);
+ strlcpy(drvinfo->fw_version, "N/A", 4);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
+}
+
+/**
+ * iavf_get_ringparam - Get ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Returns current ring parameters. TX and RX rings are reported separately,
+ * but the number of rings is not reported.
+ **/
+static void iavf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ ring->rx_max_pending = IAVF_MAX_RXD;
+ ring->tx_max_pending = IAVF_MAX_TXD;
+ ring->rx_pending = adapter->rx_desc_count;
+ ring->tx_pending = adapter->tx_desc_count;
+}
+
+/**
+ * iavf_set_ringparam - Set ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Sets ring parameters. TX and RX rings are controlled separately, but the
+ * number of rings is not specified, so all rings get the same settings.
+ **/
+static int iavf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ u32 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ IAVF_MIN_TXD,
+ IAVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ IAVF_MIN_RXD,
+ IAVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == adapter->tx_desc_count) &&
+ (new_rx_count == adapter->rx_desc_count))
+ return 0;
+
+ adapter->tx_desc_count = new_tx_count;
+ adapter->rx_desc_count = new_rx_count;
+
+ if (netif_running(netdev)) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+ schedule_work(&adapter->reset_task);
+ }
+
+ return 0;
+}
+
+/**
+ * __iavf_get_coalesce - get per-queue coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @queue: which queue to pick
+ *
+ * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
+ * are per queue. If queue is <0 then we default to queue 0 as the
+ * representative value.
+ **/
+static int __iavf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec, int queue)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_vsi *vsi = &adapter->vsi;
+ struct iavf_ring *rx_ring, *tx_ring;
+
+ ec->tx_max_coalesced_frames = vsi->work_limit;
+ ec->rx_max_coalesced_frames = vsi->work_limit;
+
+ /* Rx and Tx usecs per queue value. If user doesn't specify the
+ * queue, return queue 0's value to represent.
+ */
+ if (queue < 0)
+ queue = 0;
+ else if (queue >= adapter->num_active_queues)
+ return -EINVAL;
+
+ rx_ring = &adapter->rx_rings[queue];
+ tx_ring = &adapter->tx_rings[queue];
+
+ if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
+ ec->use_adaptive_rx_coalesce = 1;
+
+ if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
+ ec->use_adaptive_tx_coalesce = 1;
+
+ ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
+
+ return 0;
+}
+
+/**
+ * iavf_get_coalesce - Get interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Returns current coalescing settings. This is referred to elsewhere in the
+ * driver as Interrupt Throttle Rate, as this is how the hardware describes
+ * this functionality. Note that if per-queue settings have been modified this
+ * only represents the settings of queue 0.
+ **/
+static int iavf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ return __iavf_get_coalesce(netdev, ec, -1);
+}
+
+/**
+ * iavf_get_per_queue_coalesce - get coalesce values for specific queue
+ * @netdev: netdev to read
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to read
+ *
+ * Read specific queue's coalesce settings.
+ **/
+static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __iavf_get_coalesce(netdev, ec, queue);
+}
+
+/**
+ * iavf_set_itr_per_queue - set ITR values for specific queue
+ * @adapter: the VF adapter struct to set values for
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to modify
+ *
+ * Change the ITR settings for a specific queue.
+ **/
+static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
+ struct ethtool_coalesce *ec, int queue)
+{
+ struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
+ struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
+ struct iavf_q_vector *q_vector;
+
+ rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
+ tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
+
+ rx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
+ if (!ec->use_adaptive_rx_coalesce)
+ rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
+
+ tx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
+ if (!ec->use_adaptive_tx_coalesce)
+ tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
+
+ q_vector = rx_ring->q_vector;
+ q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
+
+ q_vector = tx_ring->q_vector;
+ q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
+
+ /* The interrupt handler itself will take care of programming
+ * the Tx and Rx ITR values based on the values we have entered
+ * into the q_vector, no need to write the values now.
+ */
+}
+
+/**
+ * __iavf_set_coalesce - set coalesce settings for particular queue
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the coalesce settings for a particular queue.
+ **/
+static int __iavf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec, int queue)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_vsi *vsi = &adapter->vsi;
+ int i;
+
+ if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+ if (ec->rx_coalesce_usecs == 0) {
+ if (ec->use_adaptive_rx_coalesce)
+ netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
+ } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
+ (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
+ netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
+ return -EINVAL;
+ } else if (ec->tx_coalesce_usecs == 0) {
+ if (ec->use_adaptive_tx_coalesce)
+ netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
+ } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
+ (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
+ netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
+ return -EINVAL;
+ }
+
+ /* Rx and Tx usecs has per queue value. If user doesn't specify the
+ * queue, apply to all queues.
+ */
+ if (queue < 0) {
+ for (i = 0; i < adapter->num_active_queues; i++)
+ iavf_set_itr_per_queue(adapter, ec, i);
+ } else if (queue < adapter->num_active_queues) {
+ iavf_set_itr_per_queue(adapter, ec, queue);
+ } else {
+ netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
+ adapter->num_active_queues - 1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_set_coalesce - Set interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Change current coalescing settings for every queue.
+ **/
+static int iavf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ return __iavf_set_coalesce(netdev, ec, -1);
+}
+
+/**
+ * iavf_set_per_queue_coalesce - set specific queue's coalesce settings
+ * @netdev: the netdev to change
+ * @ec: ethtool's coalesce settings
+ * @queue: the queue to modify
+ *
+ * Modifies a specific queue's coalesce settings.
+ */
+static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __iavf_set_coalesce(netdev, ec, queue);
+}
+
+/**
+ * iavf_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ * @rule_locs: pointer to store rule locations
+ *
+ * Returns Success if the command is supported.
+ **/
+static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_active_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ netdev_info(netdev,
+ "RSS hash info is not available to vf, use pf.\n");
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+/**
+ * iavf_get_channels: get the number of channels supported by the device
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * For the purposes of our device, we only use combined channels, i.e. a tx/rx
+ * queue pair. Report one extra channel to match our "other" MSI-X vector.
+ **/
+static void iavf_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = IAVF_MAX_REQ_QUEUES;
+
+ ch->max_other = NONQ_VECS;
+ ch->other_count = NONQ_VECS;
+
+ ch->combined_count = adapter->num_active_queues;
+}
+
+/**
+ * iavf_set_channels: set the new channel count
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * Negotiate a new number of channels with the PF then do a reset. During
+ * reset we'll realloc queues and fix the RSS table. Returns 0 on success,
+ * negative on failure.
+ **/
+static int iavf_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ int num_req = ch->combined_count;
+
+ if (num_req != adapter->num_active_queues &&
+ !(adapter->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
+ dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n");
+ return -EINVAL;
+ }
+
+ if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+ adapter->num_tc) {
+ dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
+ return -EINVAL;
+ }
+
+ /* All of these should have already been checked by ethtool before this
+ * even gets to us, but just to be sure.
+ */
+ if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES)
+ return -EINVAL;
+
+ if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
+ return -EINVAL;
+
+ adapter->num_req_queues = num_req;
+ return iavf_request_queues(adapter, num_req);
+}
+
+/**
+ * iavf_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 iavf_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->rss_key_size;
+}
+
+/**
+ * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->rss_lut_size;
+}
+
+/**
+ * iavf_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function in use
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ **/
+static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ u16 i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (!indir)
+ return 0;
+
+ memcpy(key, adapter->rss_key, adapter->rss_key_size);
+
+ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ indir[i] = (u32)adapter->rss_lut[i];
+
+ return 0;
+}
+
+/**
+ * iavf_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function to use
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ u16 i;
+
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!indir)
+ return 0;
+
+ if (key)
+ memcpy(adapter->rss_key, key, adapter->rss_key_size);
+
+ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = (u8)(indir[i]);
+
+ return iavf_config_rss(adapter);
+}
+
+static const struct ethtool_ops iavf_ethtool_ops = {
+ .get_drvinfo = iavf_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = iavf_get_ringparam,
+ .set_ringparam = iavf_set_ringparam,
+ .get_strings = iavf_get_strings,
+ .get_ethtool_stats = iavf_get_ethtool_stats,
+ .get_sset_count = iavf_get_sset_count,
+ .get_priv_flags = iavf_get_priv_flags,
+ .set_priv_flags = iavf_set_priv_flags,
+ .get_msglevel = iavf_get_msglevel,
+ .set_msglevel = iavf_set_msglevel,
+ .get_coalesce = iavf_get_coalesce,
+ .set_coalesce = iavf_set_coalesce,
+ .get_per_queue_coalesce = iavf_get_per_queue_coalesce,
+ .set_per_queue_coalesce = iavf_set_per_queue_coalesce,
+ .get_rxnfc = iavf_get_rxnfc,
+ .get_rxfh_indir_size = iavf_get_rxfh_indir_size,
+ .get_rxfh = iavf_get_rxfh,
+ .set_rxfh = iavf_set_rxfh,
+ .get_channels = iavf_get_channels,
+ .set_channels = iavf_set_channels,
+ .get_rxfh_key_size = iavf_get_rxfh_key_size,
+ .get_link_ksettings = iavf_get_link_ksettings,
+};
+
+/**
+ * iavf_set_ethtool_ops - Initialize ethtool ops struct
+ * @netdev: network interface device structure
+ *
+ * Sets ethtool ops struct in our netdev so that ethtool can call
+ * our functions.
+ **/
+void iavf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &iavf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 5906c1c1d19d..9f2b7b7adf6b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1,38 +1,38 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#include "i40evf.h"
-#include "i40e_prototype.h"
-#include "i40evf_client.h"
-/* All i40evf tracepoints are defined by the include below, which must
+#include "iavf.h"
+#include "iavf_prototype.h"
+#include "iavf_client.h"
+/* All iavf tracepoints are defined by the include below, which must
* be included exactly once across the whole kernel with
* CREATE_TRACE_POINTS defined
*/
#define CREATE_TRACE_POINTS
-#include "i40e_trace.h"
+#include "iavf_trace.h"
-static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
-static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
-static int i40evf_close(struct net_device *netdev);
+static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
+static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
+static int iavf_close(struct net_device *netdev);
-char i40evf_driver_name[] = "i40evf";
-static const char i40evf_driver_string[] =
- "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
+char iavf_driver_name[] = "iavf";
+static const char iavf_driver_string[] =
+ "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 3
#define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_BUILD 3
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
DRV_KERN
-const char i40evf_driver_version[] = DRV_VERSION;
-static const char i40evf_copyright[] =
- "Copyright (c) 2013 - 2015 Intel Corporation.";
+const char iavf_driver_version[] = DRV_VERSION;
+static const char iavf_copyright[] =
+ "Copyright (c) 2013 - 2018 Intel Corporation.";
-/* i40evf_pci_tbl - PCI Device ID Table
+/* iavf_pci_tbl - PCI Device ID Table
*
* Wildcard entries (PCI_ANY_ID) should come last
* Last entry must be all 0s
@@ -40,36 +40,37 @@ static const char i40evf_copyright[] =
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static const struct pci_device_id i40evf_pci_tbl[] = {
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0},
+static const struct pci_device_id iavf_pci_tbl[] = {
+ {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
+ {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
+ {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
+ {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
/* required last entry */
{0, }
};
-MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
+MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
+MODULE_ALIAS("i40evf");
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
-static struct workqueue_struct *i40evf_wq;
+static struct workqueue_struct *iavf_wq;
/**
- * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to fill out
* @size: size of memory requested
* @alignment: what to align the allocation to
**/
-i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
- struct i40e_dma_mem *mem,
- u64 size, u32 alignment)
+iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
+ struct iavf_dma_mem *mem,
+ u64 size, u32 alignment)
{
- struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+ struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
if (!mem)
return I40E_ERR_PARAM;
@@ -84,13 +85,13 @@ i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
}
/**
- * i40evf_free_dma_mem_d - OS specific memory free for shared code
+ * iavf_free_dma_mem_d - OS specific memory free for shared code
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
-i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw, struct iavf_dma_mem *mem)
{
- struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+ struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
if (!mem || !mem->va)
return I40E_ERR_PARAM;
@@ -100,13 +101,13 @@ i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
}
/**
- * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to fill out
* @size: size of memory requested
**/
-i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
- struct i40e_virt_mem *mem, u32 size)
+iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
+ struct iavf_virt_mem *mem, u32 size)
{
if (!mem)
return I40E_ERR_PARAM;
@@ -121,12 +122,11 @@ i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
}
/**
- * i40evf_free_virt_mem_d - OS specific memory free for shared code
+ * iavf_free_virt_mem_d - OS specific memory free for shared code
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
-i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
- struct i40e_virt_mem *mem)
+iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, struct iavf_virt_mem *mem)
{
if (!mem)
return I40E_ERR_PARAM;
@@ -138,17 +138,17 @@ i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
}
/**
- * i40evf_debug_d - OS dependent version of debug printing
+ * iavf_debug_d - OS dependent version of debug printing
* @hw: pointer to the HW structure
* @mask: debug level mask
* @fmt_str: printf-type format description
**/
-void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
{
char buf[512];
va_list argptr;
- if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
+ if (!(mask & ((struct iavf_hw *)hw)->debug_mask))
return;
va_start(argptr, fmt_str);
@@ -160,134 +160,131 @@ void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
}
/**
- * i40evf_schedule_reset - Set the flags and schedule a reset event
+ * iavf_schedule_reset - Set the flags and schedule a reset event
* @adapter: board private structure
**/
-void i40evf_schedule_reset(struct i40evf_adapter *adapter)
+void iavf_schedule_reset(struct iavf_adapter *adapter)
{
if (!(adapter->flags &
- (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
- adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+ (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
}
}
/**
- * i40evf_tx_timeout - Respond to a Tx Hang
+ * iavf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-static void i40evf_tx_timeout(struct net_device *netdev)
+static void iavf_tx_timeout(struct net_device *netdev)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
- i40evf_schedule_reset(adapter);
+ iavf_schedule_reset(adapter);
}
/**
- * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
+ * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
-static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
+static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
{
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
if (!adapter->msix_entries)
return;
- wr32(hw, I40E_VFINT_DYN_CTL01, 0);
+ wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
- /* read flush */
- rd32(hw, I40E_VFGEN_RSTAT);
+ iavf_flush(hw);
synchronize_irq(adapter->msix_entries[0].vector);
}
/**
- * i40evf_misc_irq_enable - Enable default interrupt generation settings
+ * iavf_misc_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
-static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
+static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
{
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
- wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
- I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
- wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
+ wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
+ IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
- /* read flush */
- rd32(hw, I40E_VFGEN_RSTAT);
+ iavf_flush(hw);
}
/**
- * i40evf_irq_disable - Mask off interrupt generation on the NIC
+ * iavf_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
-static void i40evf_irq_disable(struct i40evf_adapter *adapter)
+static void iavf_irq_disable(struct iavf_adapter *adapter)
{
int i;
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
if (!adapter->msix_entries)
return;
for (i = 1; i < adapter->num_msix_vectors; i++) {
- wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
+ wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
synchronize_irq(adapter->msix_entries[i].vector);
}
- /* read flush */
- rd32(hw, I40E_VFGEN_RSTAT);
+ iavf_flush(hw);
}
/**
- * i40evf_irq_enable_queues - Enable interrupt for specified queues
+ * iavf_irq_enable_queues - Enable interrupt for specified queues
* @adapter: board private structure
* @mask: bitmap of queues to enable
**/
-void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
+void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
{
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
int i;
for (i = 1; i < adapter->num_msix_vectors; i++) {
if (mask & BIT(i - 1)) {
- wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
- I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
+ wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
+ IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
}
}
}
/**
- * i40evf_irq_enable - Enable default interrupt generation settings
+ * iavf_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
* @flush: boolean value whether to run rd32()
**/
-void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
+void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
{
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
- i40evf_misc_irq_enable(adapter);
- i40evf_irq_enable_queues(adapter, ~0);
+ iavf_misc_irq_enable(adapter);
+ iavf_irq_enable_queues(adapter, ~0);
if (flush)
- rd32(hw, I40E_VFGEN_RSTAT);
+ iavf_flush(hw);
}
/**
- * i40evf_msix_aq - Interrupt handler for vector 0
+ * iavf_msix_aq - Interrupt handler for vector 0
* @irq: interrupt number
* @data: pointer to netdev
**/
-static irqreturn_t i40evf_msix_aq(int irq, void *data)
+static irqreturn_t iavf_msix_aq(int irq, void *data)
{
struct net_device *netdev = data;
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_hw *hw = &adapter->hw;
/* handle non-queue interrupts, these reads clear the registers */
- rd32(hw, I40E_VFINT_ICR01);
- rd32(hw, I40E_VFINT_ICR0_ENA1);
+ rd32(hw, IAVF_VFINT_ICR01);
+ rd32(hw, IAVF_VFINT_ICR0_ENA1);
/* schedule work on the private workqueue */
schedule_work(&adapter->adminq_task);
@@ -296,13 +293,13 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
}
/**
- * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
+ * iavf_msix_clean_rings - MSIX mode Interrupt Handler
* @irq: interrupt number
* @data: pointer to a q_vector
**/
-static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
+static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
{
- struct i40e_q_vector *q_vector = data;
+ struct iavf_q_vector *q_vector = data;
if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
@@ -313,17 +310,17 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
}
/**
- * i40evf_map_vector_to_rxq - associate irqs with rx queues
+ * iavf_map_vector_to_rxq - associate irqs with rx queues
* @adapter: board private structure
* @v_idx: interrupt number
* @r_idx: queue number
**/
static void
-i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
+iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
{
- struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
- struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
+ struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
+ struct iavf_hw *hw = &adapter->hw;
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
@@ -333,23 +330,23 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.next_update = jiffies + 1;
q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
q_vector->ring_mask |= BIT(r_idx);
- wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx),
+ wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
q_vector->rx.current_itr);
q_vector->rx.current_itr = q_vector->rx.target_itr;
}
/**
- * i40evf_map_vector_to_txq - associate irqs with tx queues
+ * iavf_map_vector_to_txq - associate irqs with tx queues
* @adapter: board private structure
* @v_idx: interrupt number
* @t_idx: queue number
**/
static void
-i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
+iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
{
- struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
- struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
+ struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
+ struct iavf_hw *hw = &adapter->hw;
tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring;
@@ -359,13 +356,13 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.next_update = jiffies + 1;
q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
q_vector->num_ringpairs++;
- wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx),
+ wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
q_vector->tx.target_itr);
q_vector->tx.current_itr = q_vector->tx.target_itr;
}
/**
- * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
* @adapter: board private structure to initialize
*
* This function maps descriptor rings to the queue-specific vectors
@@ -374,7 +371,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
* group the rings as "efficiently" as possible. You would add new
* mapping configurations in here.
**/
-static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
+static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
{
int rings_remaining = adapter->num_active_queues;
int ridx = 0, vidx = 0;
@@ -383,8 +380,8 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (; ridx < rings_remaining; ridx++) {
- i40evf_map_vector_to_rxq(adapter, vidx, ridx);
- i40evf_map_vector_to_txq(adapter, vidx, ridx);
+ iavf_map_vector_to_rxq(adapter, vidx, ridx);
+ iavf_map_vector_to_txq(adapter, vidx, ridx);
/* In the case where we have more queues than vectors, continue
* round-robin on vectors until all queues are mapped.
@@ -393,61 +390,38 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
vidx = 0;
}
- adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+ adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
/**
- * i40evf_netpoll - A Polling 'interrupt' handler
- * @netdev: network interface device structure
- *
- * This is used by netconsole to send skbs without having to re-enable
- * interrupts. It's not called while the normal interrupt routine is executing.
- **/
-static void i40evf_netpoll(struct net_device *netdev)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
- return;
-
- for (i = 0; i < q_vectors; i++)
- i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
-}
-
-#endif
-/**
- * i40evf_irq_affinity_notify - Callback for affinity changes
+ * iavf_irq_affinity_notify - Callback for affinity changes
* @notify: context as to what irq was changed
* @mask: the new affinity mask
*
* This is a callback function used by the irq_set_affinity_notifier function
* so that we may register to receive changes to the irq affinity masks.
**/
-static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
+static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
{
- struct i40e_q_vector *q_vector =
- container_of(notify, struct i40e_q_vector, affinity_notify);
+ struct iavf_q_vector *q_vector =
+ container_of(notify, struct iavf_q_vector, affinity_notify);
cpumask_copy(&q_vector->affinity_mask, mask);
}
/**
- * i40evf_irq_affinity_release - Callback for affinity notifier release
+ * iavf_irq_affinity_release - Callback for affinity notifier release
* @ref: internal core kernel usage
*
* This is a callback function used by the irq_set_affinity_notifier function
* to inform the current notification subscriber that they will no longer
* receive notifications.
**/
-static void i40evf_irq_affinity_release(struct kref *ref) {}
+static void iavf_irq_affinity_release(struct kref *ref) {}
/**
- * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
+ * iavf_request_traffic_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
* @basename: device basename
*
@@ -455,37 +429,38 @@ static void i40evf_irq_affinity_release(struct kref *ref) {}
* interrupts from the kernel.
**/
static int
-i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
+iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
{
unsigned int vector, q_vectors;
unsigned int rx_int_idx = 0, tx_int_idx = 0;
int irq_num, err;
int cpu;
- i40evf_irq_disable(adapter);
+ iavf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (vector = 0; vector < q_vectors; vector++) {
- struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
+ struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
+
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "i40evf-%s-TxRx-%d", basename, rx_int_idx++);
+ "iavf-%s-TxRx-%d", basename, rx_int_idx++);
tx_int_idx++;
} else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "i40evf-%s-rx-%d", basename, rx_int_idx++);
+ "iavf-%s-rx-%d", basename, rx_int_idx++);
} else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "i40evf-%s-tx-%d", basename, tx_int_idx++);
+ "iavf-%s-tx-%d", basename, tx_int_idx++);
} else {
/* skip this unused q_vector */
continue;
}
err = request_irq(irq_num,
- i40evf_msix_clean_rings,
+ iavf_msix_clean_rings,
0,
q_vector->name,
q_vector);
@@ -495,9 +470,9 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
goto free_queue_irqs;
}
/* register for affinity change notifications */
- q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
+ q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
q_vector->affinity_notify.release =
- i40evf_irq_affinity_release;
+ iavf_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* Spread the IRQ affinity hints across online CPUs. Note that
* get_cpu_mask returns a mask with a permanent lifetime so
@@ -521,23 +496,23 @@ free_queue_irqs:
}
/**
- * i40evf_request_misc_irq - Initialize MSI-X interrupts
+ * iavf_request_misc_irq - Initialize MSI-X interrupts
* @adapter: board private structure
*
* Allocates MSI-X vector 0 and requests interrupts from the kernel. This
* vector is only for the admin queue, and stays active even when the netdev
* is closed.
**/
-static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
+static int iavf_request_misc_irq(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err;
snprintf(adapter->misc_vector_name,
- sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
+ sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
dev_name(&adapter->pdev->dev));
err = request_irq(adapter->msix_entries[0].vector,
- &i40evf_msix_aq, 0,
+ &iavf_msix_aq, 0,
adapter->misc_vector_name, netdev);
if (err) {
dev_err(&adapter->pdev->dev,
@@ -549,12 +524,12 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
}
/**
- * i40evf_free_traffic_irqs - Free MSI-X interrupts
+ * iavf_free_traffic_irqs - Free MSI-X interrupts
* @adapter: board private structure
*
* Frees all MSI-X vectors other than 0.
**/
-static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
+static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
{
int vector, irq_num, q_vectors;
@@ -572,12 +547,12 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
}
/**
- * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
+ * iavf_free_misc_irq - Free MSI-X miscellaneous vector
* @adapter: board private structure
*
* Frees MSI-X vector 0.
**/
-static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
+static void iavf_free_misc_irq(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -588,58 +563,58 @@ static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
}
/**
- * i40evf_configure_tx - Configure Transmit Unit after Reset
+ * iavf_configure_tx - Configure Transmit Unit after Reset
* @adapter: board private structure
*
* Configure the Tx unit of the MAC after a reset.
**/
-static void i40evf_configure_tx(struct i40evf_adapter *adapter)
+static void iavf_configure_tx(struct iavf_adapter *adapter)
{
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
int i;
for (i = 0; i < adapter->num_active_queues; i++)
- adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+ adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
}
/**
- * i40evf_configure_rx - Configure Receive Unit after Reset
+ * iavf_configure_rx - Configure Receive Unit after Reset
* @adapter: board private structure
*
* Configure the Rx unit of the MAC after a reset.
**/
-static void i40evf_configure_rx(struct i40evf_adapter *adapter)
+static void iavf_configure_rx(struct iavf_adapter *adapter)
{
- unsigned int rx_buf_len = I40E_RXBUFFER_2048;
- struct i40e_hw *hw = &adapter->hw;
+ unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
+ struct iavf_hw *hw = &adapter->hw;
int i;
/* Legacy Rx will always default to a 2048 buffer size. */
#if (PAGE_SIZE < 8192)
- if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
+ if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
struct net_device *netdev = adapter->netdev;
/* For jumbo frames on systems with 4K pages we have to use
* an order 1 page, so we might as well increase the size
* of our Rx buffer to make better use of the available space
*/
- rx_buf_len = I40E_RXBUFFER_3072;
+ rx_buf_len = IAVF_RXBUFFER_3072;
/* We use a 1536 buffer size for configurations with
* standard Ethernet mtu. On x86 this gives us enough room
* for shared info and 192 bytes of padding.
*/
- if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+ if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
(netdev->mtu <= ETH_DATA_LEN))
- rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
}
#endif
for (i = 0; i < adapter->num_active_queues; i++) {
- adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
+ adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
adapter->rx_rings[i].rx_buf_len = rx_buf_len;
- if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
+ if (adapter->flags & IAVF_FLAG_LEGACY_RX)
clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
else
set_ring_build_skb_enabled(&adapter->rx_rings[i]);
@@ -647,7 +622,7 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
}
/**
- * i40evf_find_vlan - Search filter list for specific vlan filter
+ * iavf_find_vlan - Search filter list for specific vlan filter
* @adapter: board private structure
* @vlan: vlan tag
*
@@ -655,9 +630,9 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
* mac_vlan_list_lock.
**/
static struct
-i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
+iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
{
- struct i40evf_vlan_filter *f;
+ struct iavf_vlan_filter *f;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (vlan == f->vlan)
@@ -667,20 +642,20 @@ i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
}
/**
- * i40evf_add_vlan - Add a vlan filter to the list
+ * iavf_add_vlan - Add a vlan filter to the list
* @adapter: board private structure
* @vlan: VLAN tag
*
* Returns ptr to the filter object or NULL when no memory available.
**/
static struct
-i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
+iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
{
- struct i40evf_vlan_filter *f = NULL;
+ struct iavf_vlan_filter *f = NULL;
spin_lock_bh(&adapter->mac_vlan_list_lock);
- f = i40evf_find_vlan(adapter, vlan);
+ f = iavf_find_vlan(adapter, vlan);
if (!f) {
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
@@ -691,7 +666,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
INIT_LIST_HEAD(&f->list);
list_add(&f->list, &adapter->vlan_filter_list);
f->add = true;
- adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}
clearout:
@@ -700,63 +675,63 @@ clearout:
}
/**
- * i40evf_del_vlan - Remove a vlan filter from the list
+ * iavf_del_vlan - Remove a vlan filter from the list
* @adapter: board private structure
* @vlan: VLAN tag
**/
-static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
+static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
{
- struct i40evf_vlan_filter *f;
+ struct iavf_vlan_filter *f;
spin_lock_bh(&adapter->mac_vlan_list_lock);
- f = i40evf_find_vlan(adapter, vlan);
+ f = iavf_find_vlan(adapter, vlan);
if (f) {
f->remove = true;
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
- * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
+ * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
* @netdev: network device struct
* @proto: unused protocol data
* @vid: VLAN tag
**/
-static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+static int iavf_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
if (!VLAN_ALLOWED(adapter))
return -EIO;
- if (i40evf_add_vlan(adapter, vid) == NULL)
+ if (iavf_add_vlan(adapter, vid) == NULL)
return -ENOMEM;
return 0;
}
/**
- * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
+ * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
* @netdev: network device struct
* @proto: unused protocol data
* @vid: VLAN tag
**/
-static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
if (VLAN_ALLOWED(adapter)) {
- i40evf_del_vlan(adapter, vid);
+ iavf_del_vlan(adapter, vid);
return 0;
}
return -EIO;
}
/**
- * i40evf_find_filter - Search filter list for specific mac filter
+ * iavf_find_filter - Search filter list for specific mac filter
* @adapter: board private structure
* @macaddr: the MAC address
*
@@ -764,10 +739,10 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
* mac_vlan_list_lock.
**/
static struct
-i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
- const u8 *macaddr)
+iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr)
{
- struct i40evf_mac_filter *f;
+ struct iavf_mac_filter *f;
if (!macaddr)
return NULL;
@@ -780,22 +755,22 @@ i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
}
/**
- * i40e_add_filter - Add a mac filter to the filter list
+ * iavf_add_filter - Add a mac filter to the filter list
* @adapter: board private structure
* @macaddr: the MAC address
*
* Returns ptr to the filter object or NULL when no memory available.
**/
static struct
-i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
- const u8 *macaddr)
+iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
+ const u8 *macaddr)
{
- struct i40evf_mac_filter *f;
+ struct iavf_mac_filter *f;
if (!macaddr)
return NULL;
- f = i40evf_find_filter(adapter, macaddr);
+ f = iavf_find_filter(adapter, macaddr);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
@@ -805,7 +780,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
- adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else {
f->remove = false;
}
@@ -814,17 +789,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
}
/**
- * i40evf_set_mac - NDO callback to set port mac address
+ * iavf_set_mac - NDO callback to set port mac address
* @netdev: network interface device structure
* @p: pointer to an address structure
*
* Returns 0 on success, negative on failure
**/
-static int i40evf_set_mac(struct net_device *netdev, void *p)
+static int iavf_set_mac(struct net_device *netdev, void *p)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_hw *hw = &adapter->hw;
- struct i40evf_mac_filter *f;
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_hw *hw = &adapter->hw;
+ struct iavf_mac_filter *f;
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
@@ -833,18 +808,18 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
return 0;
- if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
+ if (adapter->flags & IAVF_FLAG_ADDR_SET_BY_PF)
return -EPERM;
spin_lock_bh(&adapter->mac_vlan_list_lock);
- f = i40evf_find_filter(adapter, hw->mac.addr);
+ f = iavf_find_filter(adapter, hw->mac.addr);
if (f) {
f->remove = true;
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
}
- f = i40evf_add_filter(adapter, addr->sa_data);
+ f = iavf_add_filter(adapter, addr->sa_data);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -857,35 +832,35 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
}
/**
- * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address
+ * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
* @netdev: the netdevice
* @addr: address to add
*
* Called by __dev_(mc|uc)_sync when an address needs to be added. We call
* __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
*/
-static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr)
+static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
- if (i40evf_add_filter(adapter, addr))
+ if (iavf_add_filter(adapter, addr))
return 0;
else
return -ENOMEM;
}
/**
- * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
+ * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
* @netdev: the netdevice
* @addr: address to add
*
* Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
* __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
*/
-static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr)
+static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40evf_mac_filter *f;
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_mac_filter *f;
/* Under some circumstances, we might receive a request to delete
* our own device address from our uc list. Because we store the
@@ -895,50 +870,50 @@ static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr)
if (ether_addr_equal(addr, netdev->dev_addr))
return 0;
- f = i40evf_find_filter(adapter, addr);
+ f = iavf_find_filter(adapter, addr);
if (f) {
f->remove = true;
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
}
return 0;
}
/**
- * i40evf_set_rx_mode - NDO callback to set the netdev filters
+ * iavf_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
**/
-static void i40evf_set_rx_mode(struct net_device *netdev)
+static void iavf_set_rx_mode(struct net_device *netdev)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
spin_lock_bh(&adapter->mac_vlan_list_lock);
- __dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
- __dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
+ __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
+ __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
if (netdev->flags & IFF_PROMISC &&
- !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
- adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
+ !(adapter->flags & IAVF_FLAG_PROMISC_ON))
+ adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
else if (!(netdev->flags & IFF_PROMISC) &&
- adapter->flags & I40EVF_FLAG_PROMISC_ON)
- adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
+ adapter->flags & IAVF_FLAG_PROMISC_ON)
+ adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
if (netdev->flags & IFF_ALLMULTI &&
- !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
- adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+ !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
+ adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
else if (!(netdev->flags & IFF_ALLMULTI) &&
- adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
- adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
+ adapter->flags & IAVF_FLAG_ALLMULTI_ON)
+ adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
}
/**
- * i40evf_napi_enable_all - enable NAPI on all queue vectors
+ * iavf_napi_enable_all - enable NAPI on all queue vectors
* @adapter: board private structure
**/
-static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
+static void iavf_napi_enable_all(struct iavf_adapter *adapter)
{
int q_idx;
- struct i40e_q_vector *q_vector;
+ struct iavf_q_vector *q_vector;
int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
@@ -951,13 +926,13 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
}
/**
- * i40evf_napi_disable_all - disable NAPI on all queue vectors
+ * iavf_napi_disable_all - disable NAPI on all queue vectors
* @adapter: board private structure
**/
-static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
+static void iavf_napi_disable_all(struct iavf_adapter *adapter)
{
int q_idx;
- struct i40e_q_vector *q_vector;
+ struct iavf_q_vector *q_vector;
int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
@@ -967,67 +942,67 @@ static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
}
/**
- * i40evf_configure - set up transmit and receive data structures
+ * iavf_configure - set up transmit and receive data structures
* @adapter: board private structure
**/
-static void i40evf_configure(struct i40evf_adapter *adapter)
+static void iavf_configure(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int i;
- i40evf_set_rx_mode(netdev);
+ iavf_set_rx_mode(netdev);
- i40evf_configure_tx(adapter);
- i40evf_configure_rx(adapter);
- adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+ iavf_configure_tx(adapter);
+ iavf_configure_rx(adapter);
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
for (i = 0; i < adapter->num_active_queues; i++) {
- struct i40e_ring *ring = &adapter->rx_rings[i];
+ struct iavf_ring *ring = &adapter->rx_rings[i];
- i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+ iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
}
}
/**
- * i40evf_up_complete - Finish the last steps of bringing up a connection
+ * iavf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
*
- * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
+ * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
**/
-static void i40evf_up_complete(struct i40evf_adapter *adapter)
+static void iavf_up_complete(struct iavf_adapter *adapter)
{
- adapter->state = __I40EVF_RUNNING;
- clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+ adapter->state = __IAVF_RUNNING;
+ clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
- i40evf_napi_enable_all(adapter);
+ iavf_napi_enable_all(adapter);
- adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
if (CLIENT_ENABLED(adapter))
- adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
+ adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
}
/**
- * i40e_down - Shutdown the connection processing
+ * iavf_down - Shutdown the connection processing
* @adapter: board private structure
*
- * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
+ * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
**/
-void i40evf_down(struct i40evf_adapter *adapter)
+void iavf_down(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct i40evf_vlan_filter *vlf;
- struct i40evf_mac_filter *f;
- struct i40evf_cloud_filter *cf;
+ struct iavf_vlan_filter *vlf;
+ struct iavf_mac_filter *f;
+ struct iavf_cloud_filter *cf;
- if (adapter->state <= __I40EVF_DOWN_PENDING)
+ if (adapter->state <= __IAVF_DOWN_PENDING)
return;
netif_carrier_off(netdev);
netif_tx_disable(netdev);
adapter->link_up = false;
- i40evf_napi_disable_all(adapter);
- i40evf_irq_disable(adapter);
+ iavf_napi_disable_all(adapter);
+ iavf_irq_disable(adapter);
spin_lock_bh(&adapter->mac_vlan_list_lock);
@@ -1054,25 +1029,25 @@ void i40evf_down(struct i40evf_adapter *adapter)
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
- if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
- adapter->state != __I40EVF_RESETTING) {
+ if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
+ adapter->state != __IAVF_RESETTING) {
/* cancel any current operation */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
* here for this to complete. The watchdog is still running
* and it will take care of this.
*/
- adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
- adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
}
/**
- * i40evf_acquire_msix_vectors - Setup the MSIX capability
+ * iavf_acquire_msix_vectors - Setup the MSIX capability
* @adapter: board private structure
* @vectors: number of vectors to request
*
@@ -1081,7 +1056,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
* Returns 0 on success, negative on failure
**/
static int
-i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
+iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
{
int err, vector_threshold;
@@ -1115,12 +1090,12 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
}
/**
- * i40evf_free_queues - Free memory for all rings
+ * iavf_free_queues - Free memory for all rings
* @adapter: board private structure to initialize
*
* Free all of the memory associated with queue pairs.
**/
-static void i40evf_free_queues(struct i40evf_adapter *adapter)
+static void iavf_free_queues(struct iavf_adapter *adapter)
{
if (!adapter->vsi_res)
return;
@@ -1132,14 +1107,14 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
}
/**
- * i40evf_alloc_queues - Allocate memory for all rings
+ * iavf_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
* We allocate one ring per queue at run-time since we don't know the
* number of queues at compile-time. The polling_netdev array is
* intended for Multiqueue, but should work fine with a single queue.
**/
-static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
+static int iavf_alloc_queues(struct iavf_adapter *adapter)
{
int i, num_active_queues;
@@ -1160,17 +1135,17 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
adapter->tx_rings = kcalloc(num_active_queues,
- sizeof(struct i40e_ring), GFP_KERNEL);
+ sizeof(struct iavf_ring), GFP_KERNEL);
if (!adapter->tx_rings)
goto err_out;
adapter->rx_rings = kcalloc(num_active_queues,
- sizeof(struct i40e_ring), GFP_KERNEL);
+ sizeof(struct iavf_ring), GFP_KERNEL);
if (!adapter->rx_rings)
goto err_out;
for (i = 0; i < num_active_queues; i++) {
- struct i40e_ring *tx_ring;
- struct i40e_ring *rx_ring;
+ struct iavf_ring *tx_ring;
+ struct iavf_ring *rx_ring;
tx_ring = &adapter->tx_rings[i];
@@ -1178,16 +1153,16 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count;
- tx_ring->itr_setting = I40E_ITR_TX_DEF;
- if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE)
- tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
+ tx_ring->itr_setting = IAVF_ITR_TX_DEF;
+ if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
+ tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
rx_ring = &adapter->rx_rings[i];
rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
- rx_ring->itr_setting = I40E_ITR_RX_DEF;
+ rx_ring->itr_setting = IAVF_ITR_RX_DEF;
}
adapter->num_active_queues = num_active_queues;
@@ -1195,18 +1170,18 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
return 0;
err_out:
- i40evf_free_queues(adapter);
+ iavf_free_queues(adapter);
return -ENOMEM;
}
/**
- * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
* @adapter: board private structure to initialize
*
* Attempt to configure the interrupts using the best available
* capabilities of the hardware and the kernel.
**/
-static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
+static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
{
int vector, v_budget;
int pairs = 0;
@@ -1236,7 +1211,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
- err = i40evf_acquire_msix_vectors(adapter, v_budget);
+ err = iavf_acquire_msix_vectors(adapter, v_budget);
out:
netif_set_real_num_rx_queues(adapter->netdev, pairs);
@@ -1245,16 +1220,16 @@ out:
}
/**
- * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
+ * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
* @adapter: board private structure
*
* Return 0 on success, negative on failure
**/
-static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
+static int iavf_config_rss_aq(struct iavf_adapter *adapter)
{
struct i40e_aqc_get_set_rss_key_data *rss_key =
(struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
int ret = 0;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
@@ -1264,21 +1239,21 @@ static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
return -EBUSY;
}
- ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+ ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
if (ret) {
dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
+ iavf_stat_str(hw, ret),
+ iavf_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
- ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
- adapter->rss_lut, adapter->rss_lut_size);
+ ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+ adapter->rss_lut, adapter->rss_lut_size);
if (ret) {
dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
+ iavf_stat_str(hw, ret),
+ iavf_aq_str(hw, hw->aq.asq_last_status));
}
return ret;
@@ -1286,55 +1261,55 @@ static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
}
/**
- * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
+ * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
* @adapter: board private structure
*
* Returns 0 on success, negative on failure
**/
-static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
+static int iavf_config_rss_reg(struct iavf_adapter *adapter)
{
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
u32 *dw;
u16 i;
dw = (u32 *)adapter->rss_key;
for (i = 0; i <= adapter->rss_key_size / 4; i++)
- wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
+ wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
dw = (u32 *)adapter->rss_lut;
for (i = 0; i <= adapter->rss_lut_size / 4; i++)
- wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
+ wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
- i40e_flush(hw);
+ iavf_flush(hw);
return 0;
}
/**
- * i40evf_config_rss - Configure RSS keys and lut
+ * iavf_config_rss - Configure RSS keys and lut
* @adapter: board private structure
*
* Returns 0 on success, negative on failure
**/
-int i40evf_config_rss(struct i40evf_adapter *adapter)
+int iavf_config_rss(struct iavf_adapter *adapter)
{
if (RSS_PF(adapter)) {
- adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
- I40EVF_FLAG_AQ_SET_RSS_KEY;
+ adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
+ IAVF_FLAG_AQ_SET_RSS_KEY;
return 0;
} else if (RSS_AQ(adapter)) {
- return i40evf_config_rss_aq(adapter);
+ return iavf_config_rss_aq(adapter);
} else {
- return i40evf_config_rss_reg(adapter);
+ return iavf_config_rss_reg(adapter);
}
}
/**
- * i40evf_fill_rss_lut - Fill the lut with default values
+ * iavf_fill_rss_lut - Fill the lut with default values
* @adapter: board private structure
**/
-static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
+static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
{
u16 i;
@@ -1343,47 +1318,46 @@ static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
}
/**
- * i40evf_init_rss - Prepare for RSS
+ * iavf_init_rss - Prepare for RSS
* @adapter: board private structure
*
* Return 0 on success, negative on failure
**/
-static int i40evf_init_rss(struct i40evf_adapter *adapter)
+static int iavf_init_rss(struct iavf_adapter *adapter)
{
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
int ret;
if (!RSS_PF(adapter)) {
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
if (adapter->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
+ adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
else
- adapter->hena = I40E_DEFAULT_RSS_HENA;
+ adapter->hena = IAVF_DEFAULT_RSS_HENA;
- wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
+ wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
+ wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
}
- i40evf_fill_rss_lut(adapter);
-
+ iavf_fill_rss_lut(adapter);
netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
- ret = i40evf_config_rss(adapter);
+ ret = iavf_config_rss(adapter);
return ret;
}
/**
- * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
* @adapter: board private structure to initialize
*
* We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
**/
-static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
+static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
{
int q_idx = 0, num_q_vectors;
- struct i40e_q_vector *q_vector;
+ struct iavf_q_vector *q_vector;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
@@ -1399,21 +1373,21 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi,
- i40evf_napi_poll, NAPI_POLL_WEIGHT);
+ iavf_napi_poll, NAPI_POLL_WEIGHT);
}
return 0;
}
/**
- * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
+ * iavf_free_q_vectors - Free memory allocated for interrupt vectors
* @adapter: board private structure to initialize
*
* This function frees the memory allocated to the q_vectors. In addition if
* NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector.
**/
-static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
+static void iavf_free_q_vectors(struct iavf_adapter *adapter)
{
int q_idx, num_q_vectors;
int napi_vectors;
@@ -1425,7 +1399,8 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
napi_vectors = adapter->num_active_queues;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
+ struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
+
if (q_idx < napi_vectors)
netif_napi_del(&q_vector->napi);
}
@@ -1434,11 +1409,11 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
}
/**
- * i40evf_reset_interrupt_capability - Reset MSIX setup
+ * iavf_reset_interrupt_capability - Reset MSIX setup
* @adapter: board private structure
*
**/
-void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
+void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
{
if (!adapter->msix_entries)
return;
@@ -1449,15 +1424,15 @@ void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
}
/**
- * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
* @adapter: board private structure to initialize
*
**/
-int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
+int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
{
int err;
- err = i40evf_alloc_queues(adapter);
+ err = iavf_alloc_queues(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"Unable to allocate memory for queues\n");
@@ -1465,7 +1440,7 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
}
rtnl_lock();
- err = i40evf_set_interrupt_capability(adapter);
+ err = iavf_set_interrupt_capability(adapter);
rtnl_unlock();
if (err) {
dev_err(&adapter->pdev->dev,
@@ -1473,7 +1448,7 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
goto err_set_interrupt;
}
- err = i40evf_alloc_q_vectors(adapter);
+ err = iavf_alloc_q_vectors(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"Unable to allocate memory for queue vectors\n");
@@ -1496,18 +1471,18 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
return 0;
err_alloc_q_vectors:
- i40evf_reset_interrupt_capability(adapter);
+ iavf_reset_interrupt_capability(adapter);
err_set_interrupt:
- i40evf_free_queues(adapter);
+ iavf_free_queues(adapter);
err_alloc_queues:
return err;
}
/**
- * i40evf_free_rss - Free memory used by RSS structs
+ * iavf_free_rss - Free memory used by RSS structs
* @adapter: board private structure
**/
-static void i40evf_free_rss(struct i40evf_adapter *adapter)
+static void iavf_free_rss(struct iavf_adapter *adapter)
{
kfree(adapter->rss_key);
adapter->rss_key = NULL;
@@ -1517,52 +1492,52 @@ static void i40evf_free_rss(struct i40evf_adapter *adapter)
}
/**
- * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors
+ * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
* @adapter: board private structure
*
* Returns 0 on success, negative on failure
**/
-static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter)
+static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err;
if (netif_running(netdev))
- i40evf_free_traffic_irqs(adapter);
- i40evf_free_misc_irq(adapter);
- i40evf_reset_interrupt_capability(adapter);
- i40evf_free_q_vectors(adapter);
- i40evf_free_queues(adapter);
+ iavf_free_traffic_irqs(adapter);
+ iavf_free_misc_irq(adapter);
+ iavf_reset_interrupt_capability(adapter);
+ iavf_free_q_vectors(adapter);
+ iavf_free_queues(adapter);
- err = i40evf_init_interrupt_scheme(adapter);
+ err = iavf_init_interrupt_scheme(adapter);
if (err)
goto err;
netif_tx_stop_all_queues(netdev);
- err = i40evf_request_misc_irq(adapter);
+ err = iavf_request_misc_irq(adapter);
if (err)
goto err;
- set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
- i40evf_map_rings_to_vectors(adapter);
+ iavf_map_rings_to_vectors(adapter);
if (RSS_AQ(adapter))
- adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
else
- err = i40evf_init_rss(adapter);
+ err = iavf_init_rss(adapter);
err:
return err;
}
/**
- * i40evf_watchdog_timer - Periodic call-back timer
+ * iavf_watchdog_timer - Periodic call-back timer
* @data: pointer to adapter disguised as unsigned long
**/
-static void i40evf_watchdog_timer(struct timer_list *t)
+static void iavf_watchdog_timer(struct timer_list *t)
{
- struct i40evf_adapter *adapter = from_timer(adapter, t,
+ struct iavf_adapter *adapter = from_timer(adapter, t,
watchdog_timer);
schedule_work(&adapter->watchdog_task);
@@ -1570,31 +1545,31 @@ static void i40evf_watchdog_timer(struct timer_list *t)
}
/**
- * i40evf_watchdog_task - Periodic call-back task
+ * iavf_watchdog_task - Periodic call-back task
* @work: pointer to work_struct
**/
-static void i40evf_watchdog_task(struct work_struct *work)
+static void iavf_watchdog_task(struct work_struct *work)
{
- struct i40evf_adapter *adapter = container_of(work,
- struct i40evf_adapter,
+ struct iavf_adapter *adapter = container_of(work,
+ struct iavf_adapter,
watchdog_task);
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
- if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
goto restart_watchdog;
- if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
- reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
+ reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
+ IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
(reg_val == VIRTCHNL_VFR_COMPLETED)) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
- adapter->state = __I40EVF_STARTUP;
- adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+ adapter->state = __IAVF_STARTUP;
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
schedule_delayed_work(&adapter->init_task, 10);
- clear_bit(__I40EVF_IN_CRITICAL_TASK,
+ clear_bit(__IAVF_IN_CRITICAL_TASK,
&adapter->crit_section);
/* Don't reschedule the watchdog, since we've restarted
* the init task. When init_task contacts the PF and
@@ -1608,15 +1583,15 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto watchdog_done;
}
- if ((adapter->state < __I40EVF_DOWN) ||
- (adapter->flags & I40EVF_FLAG_RESET_PENDING))
+ if ((adapter->state < __IAVF_DOWN) ||
+ (adapter->flags & IAVF_FLAG_RESET_PENDING))
goto watchdog_done;
/* check for reset */
- reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
- if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
- adapter->state = __I40EVF_RESETTING;
- adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
+ if (!(adapter->flags & IAVF_FLAG_RESET_PENDING) && !reg_val) {
+ adapter->state = __IAVF_RESETTING;
+ adapter->flags |= IAVF_FLAG_RESET_PENDING;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
schedule_work(&adapter->reset_task);
adapter->aq_required = 0;
@@ -1628,140 +1603,140 @@ static void i40evf_watchdog_task(struct work_struct *work)
* here so we don't race on the admin queue.
*/
if (adapter->current_op) {
- if (!i40evf_asq_done(hw)) {
+ if (!iavf_asq_done(hw)) {
dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
- i40evf_send_api_ver(adapter);
+ iavf_send_api_ver(adapter);
}
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
- i40evf_send_vf_config_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) {
+ iavf_send_vf_config_msg(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
- i40evf_disable_queues(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
+ iavf_disable_queues(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
- i40evf_map_queues(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
+ iavf_map_queues(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
- i40evf_add_ether_addrs(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
+ iavf_add_ether_addrs(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
- i40evf_add_vlans(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
+ iavf_add_vlans(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
- i40evf_del_ether_addrs(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
+ iavf_del_ether_addrs(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
- i40evf_del_vlans(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
+ iavf_del_vlans(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
- i40evf_enable_vlan_stripping(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
+ iavf_enable_vlan_stripping(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
- i40evf_disable_vlan_stripping(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
+ iavf_disable_vlan_stripping(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
- i40evf_configure_queues(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
+ iavf_configure_queues(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
- i40evf_enable_queues(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
+ iavf_enable_queues(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
+ if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
/* This message goes straight to the firmware, not the
* PF, so we don't have to set current_op as we will
* not get a response through the ARQ.
*/
- i40evf_init_rss(adapter);
- adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
+ iavf_init_rss(adapter);
+ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
- i40evf_get_hena(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
+ iavf_get_hena(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
- i40evf_set_hena(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
+ iavf_set_hena(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
- i40evf_set_rss_key(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
+ iavf_set_rss_key(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
- i40evf_set_rss_lut(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
+ iavf_set_rss_lut(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
- i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
+ if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
+ iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
FLAG_VF_MULTICAST_PROMISC);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
- i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
+ if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
+ iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
goto watchdog_done;
}
- if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
- (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
- i40evf_set_promiscuous(adapter, 0);
+ if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
+ (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
+ iavf_set_promiscuous(adapter, 0);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) {
- i40evf_enable_channels(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
+ iavf_enable_channels(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) {
- i40evf_disable_channels(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
+ iavf_disable_channels(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) {
- i40evf_add_cloud_filter(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
+ iavf_add_cloud_filter(adapter);
goto watchdog_done;
}
- if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) {
- i40evf_del_cloud_filter(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
+ iavf_del_cloud_filter(adapter);
goto watchdog_done;
}
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
- if (adapter->state == __I40EVF_RUNNING)
- i40evf_request_stats(adapter);
+ if (adapter->state == __IAVF_RUNNING)
+ iavf_request_stats(adapter);
watchdog_done:
- if (adapter->state == __I40EVF_RUNNING)
- i40evf_detect_recover_hung(&adapter->vsi);
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ if (adapter->state == __IAVF_RUNNING)
+ iavf_detect_recover_hung(&adapter->vsi);
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
restart_watchdog:
- if (adapter->state == __I40EVF_REMOVE)
+ if (adapter->state == __IAVF_REMOVE)
return;
if (adapter->aq_required)
mod_timer(&adapter->watchdog_timer,
@@ -1771,28 +1746,28 @@ restart_watchdog:
schedule_work(&adapter->adminq_task);
}
-static void i40evf_disable_vf(struct i40evf_adapter *adapter)
+static void iavf_disable_vf(struct iavf_adapter *adapter)
{
- struct i40evf_mac_filter *f, *ftmp;
- struct i40evf_vlan_filter *fv, *fvtmp;
- struct i40evf_cloud_filter *cf, *cftmp;
+ struct iavf_mac_filter *f, *ftmp;
+ struct iavf_vlan_filter *fv, *fvtmp;
+ struct iavf_cloud_filter *cf, *cftmp;
- adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+ adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
/* We don't use netif_running() because it may be true prior to
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
*/
- if (adapter->state == __I40EVF_RUNNING) {
- set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+ if (adapter->state == __IAVF_RUNNING) {
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
adapter->link_up = false;
- i40evf_napi_disable_all(adapter);
- i40evf_irq_disable(adapter);
- i40evf_free_traffic_irqs(adapter);
- i40evf_free_all_tx_resources(adapter);
- i40evf_free_all_rx_resources(adapter);
+ iavf_napi_disable_all(adapter);
+ iavf_irq_disable(adapter);
+ iavf_free_traffic_irqs(adapter);
+ iavf_free_all_tx_resources(adapter);
+ iavf_free_all_rx_resources(adapter);
}
spin_lock_bh(&adapter->mac_vlan_list_lock);
@@ -1818,41 +1793,41 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
- i40evf_free_misc_irq(adapter);
- i40evf_reset_interrupt_capability(adapter);
- i40evf_free_queues(adapter);
- i40evf_free_q_vectors(adapter);
+ iavf_free_misc_irq(adapter);
+ iavf_reset_interrupt_capability(adapter);
+ iavf_free_queues(adapter);
+ iavf_free_q_vectors(adapter);
kfree(adapter->vf_res);
- i40evf_shutdown_adminq(&adapter->hw);
+ iavf_shutdown_adminq(&adapter->hw);
adapter->netdev->flags &= ~IFF_UP;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
- adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
- adapter->state = __I40EVF_DOWN;
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
+ adapter->state = __IAVF_DOWN;
wake_up(&adapter->down_waitqueue);
dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
}
-#define I40EVF_RESET_WAIT_MS 10
-#define I40EVF_RESET_WAIT_COUNT 500
+#define IAVF_RESET_WAIT_MS 10
+#define IAVF_RESET_WAIT_COUNT 500
/**
- * i40evf_reset_task - Call-back task to handle hardware reset
+ * iavf_reset_task - Call-back task to handle hardware reset
* @work: pointer to work_struct
*
* During reset we need to shut down and reinitialize the admin queue
* before we can use it to communicate with the PF again. We also clear
* and reinit the rings because that context is lost as well.
**/
-static void i40evf_reset_task(struct work_struct *work)
+static void iavf_reset_task(struct work_struct *work)
{
- struct i40evf_adapter *adapter = container_of(work,
- struct i40evf_adapter,
+ struct iavf_adapter *adapter = container_of(work,
+ struct iavf_adapter,
reset_task);
struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
- struct i40e_hw *hw = &adapter->hw;
- struct i40evf_vlan_filter *vlf;
- struct i40evf_cloud_filter *cf;
- struct i40evf_mac_filter *f;
+ struct iavf_hw *hw = &adapter->hw;
+ struct iavf_vlan_filter *vlf;
+ struct iavf_cloud_filter *cf;
+ struct iavf_mac_filter *f;
u32 reg_val;
int i = 0, err;
bool running;
@@ -1860,63 +1835,63 @@ static void i40evf_reset_task(struct work_struct *work)
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
- if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
+ if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return;
- while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
+ while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
&adapter->crit_section))
usleep_range(500, 1000);
if (CLIENT_ENABLED(adapter)) {
- adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
- I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
- I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
- I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
+ adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
+ IAVF_FLAG_CLIENT_NEEDS_CLOSE |
+ IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
+ IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
cancel_delayed_work_sync(&adapter->client_task);
- i40evf_notify_client_close(&adapter->vsi, true);
+ iavf_notify_client_close(&adapter->vsi, true);
}
- i40evf_misc_irq_disable(adapter);
- if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
- adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
+ iavf_misc_irq_disable(adapter);
+ if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
+ adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
/* Restart the AQ here. If we have been reset but didn't
* detect it, or if the PF had to reinit, our AQ will be hosed.
*/
- i40evf_shutdown_adminq(hw);
- i40evf_init_adminq(hw);
- i40evf_request_reset(adapter);
+ iavf_shutdown_adminq(hw);
+ iavf_init_adminq(hw);
+ iavf_request_reset(adapter);
}
- adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ adapter->flags |= IAVF_FLAG_RESET_PENDING;
/* poll until we see the reset actually happen */
- for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
- reg_val = rd32(hw, I40E_VF_ARQLEN1) &
- I40E_VF_ARQLEN1_ARQENABLE_MASK;
+ for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
+ reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
+ IAVF_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val)
break;
usleep_range(5000, 10000);
}
- if (i == I40EVF_RESET_WAIT_COUNT) {
+ if (i == IAVF_RESET_WAIT_COUNT) {
dev_info(&adapter->pdev->dev, "Never saw reset\n");
goto continue_reset; /* act like the reset happened */
}
/* wait until the reset is complete and the PF is responding to us */
- for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+ for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
/* sleep first to make sure a minimum wait time is met */
- msleep(I40EVF_RESET_WAIT_MS);
+ msleep(IAVF_RESET_WAIT_MS);
- reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
+ IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == VIRTCHNL_VFR_VFACTIVE)
break;
}
pci_set_master(adapter->pdev);
- if (i == I40EVF_RESET_WAIT_COUNT) {
+ if (i == IAVF_RESET_WAIT_COUNT) {
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
- i40evf_disable_vf(adapter);
- clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+ iavf_disable_vf(adapter);
+ clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -1925,44 +1900,44 @@ continue_reset:
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
*/
- running = ((adapter->state == __I40EVF_RUNNING) ||
- (adapter->state == __I40EVF_RESETTING));
+ running = ((adapter->state == __IAVF_RUNNING) ||
+ (adapter->state == __IAVF_RESETTING));
if (running) {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
- i40evf_napi_disable_all(adapter);
+ iavf_napi_disable_all(adapter);
}
- i40evf_irq_disable(adapter);
+ iavf_irq_disable(adapter);
- adapter->state = __I40EVF_RESETTING;
- adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+ adapter->state = __IAVF_RESETTING;
+ adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
/* free the Tx/Rx rings and descriptors, might be better to just
* re-use them sometime in the future
*/
- i40evf_free_all_rx_resources(adapter);
- i40evf_free_all_tx_resources(adapter);
+ iavf_free_all_rx_resources(adapter);
+ iavf_free_all_tx_resources(adapter);
- adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED;
+ adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
/* kill and reinit the admin queue */
- i40evf_shutdown_adminq(hw);
+ iavf_shutdown_adminq(hw);
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- err = i40evf_init_adminq(hw);
+ err = iavf_init_adminq(hw);
if (err)
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
err);
adapter->aq_required = 0;
- if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
- err = i40evf_reinit_interrupt_scheme(adapter);
+ if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
+ err = iavf_reinit_interrupt_scheme(adapter);
if (err)
goto reset_err;
}
- adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
- adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+ adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
+ adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
spin_lock_bh(&adapter->mac_vlan_list_lock);
@@ -1987,10 +1962,10 @@ continue_reset:
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
- adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
- adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
- i40evf_misc_irq_enable(adapter);
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
+ iavf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -1999,84 +1974,83 @@ continue_reset:
*/
if (running) {
/* allocate transmit descriptors */
- err = i40evf_setup_all_tx_resources(adapter);
+ err = iavf_setup_all_tx_resources(adapter);
if (err)
goto reset_err;
/* allocate receive descriptors */
- err = i40evf_setup_all_rx_resources(adapter);
+ err = iavf_setup_all_rx_resources(adapter);
if (err)
goto reset_err;
- if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
- err = i40evf_request_traffic_irqs(adapter,
- netdev->name);
+ if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
+ err = iavf_request_traffic_irqs(adapter, netdev->name);
if (err)
goto reset_err;
- adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
}
- i40evf_configure(adapter);
+ iavf_configure(adapter);
- i40evf_up_complete(adapter);
+ iavf_up_complete(adapter);
- i40evf_irq_enable(adapter, true);
+ iavf_irq_enable(adapter, true);
} else {
- adapter->state = __I40EVF_DOWN;
+ adapter->state = __IAVF_DOWN;
wake_up(&adapter->down_waitqueue);
}
- clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
return;
reset_err:
- clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
- i40evf_close(netdev);
+ iavf_close(netdev);
}
/**
- * i40evf_adminq_task - worker thread to clean the admin queue
+ * iavf_adminq_task - worker thread to clean the admin queue
* @work: pointer to work_struct containing our data
**/
-static void i40evf_adminq_task(struct work_struct *work)
+static void iavf_adminq_task(struct work_struct *work)
{
- struct i40evf_adapter *adapter =
- container_of(work, struct i40evf_adapter, adminq_task);
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_adapter *adapter =
+ container_of(work, struct iavf_adapter, adminq_task);
+ struct iavf_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
enum virtchnl_ops v_op;
- i40e_status ret, v_ret;
+ iavf_status ret, v_ret;
u32 val, oldval;
u16 pending;
- if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+ if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
goto out;
- event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
+ event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
goto out;
do {
- ret = i40evf_clean_arq_element(hw, &event, &pending);
+ ret = iavf_clean_arq_element(hw, &event, &pending);
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low);
if (ret || !v_op)
break; /* No event to process or error cleaning ARQ */
- i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
- event.msg_len);
+ iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
+ event.msg_len);
if (pending != 0)
- memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
+ memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
} while (pending);
if ((adapter->flags &
- (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
- adapter->state == __I40EVF_RESETTING)
+ (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
+ adapter->state == __IAVF_RESETTING)
goto freedom;
/* check for error indications */
@@ -2084,34 +2058,34 @@ static void i40evf_adminq_task(struct work_struct *work)
if (val == 0xdeadbeef) /* indicates device in reset */
goto freedom;
oldval = val;
- if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
+ if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
- val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
+ val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
}
- if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
+ if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
- val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
+ val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
}
- if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
+ if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
- val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
+ val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
}
if (oldval != val)
wr32(hw, hw->aq.arq.len, val);
val = rd32(hw, hw->aq.asq.len);
oldval = val;
- if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
+ if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
- val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
+ val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
}
- if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
+ if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
- val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
+ val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
}
- if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
+ if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
- val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
+ val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
}
if (oldval != val)
wr32(hw, hw->aq.asq.len, val);
@@ -2120,58 +2094,58 @@ freedom:
kfree(event.msg_buf);
out:
/* re-enable Admin queue interrupt cause */
- i40evf_misc_irq_enable(adapter);
+ iavf_misc_irq_enable(adapter);
}
/**
- * i40evf_client_task - worker thread to perform client work
+ * iavf_client_task - worker thread to perform client work
* @work: pointer to work_struct containing our data
*
* This task handles client interactions. Because client calls can be
* reentrant, we can't handle them in the watchdog.
**/
-static void i40evf_client_task(struct work_struct *work)
+static void iavf_client_task(struct work_struct *work)
{
- struct i40evf_adapter *adapter =
- container_of(work, struct i40evf_adapter, client_task.work);
+ struct iavf_adapter *adapter =
+ container_of(work, struct iavf_adapter, client_task.work);
/* If we can't get the client bit, just give up. We'll be rescheduled
* later.
*/
- if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
+ if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
return;
- if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
- i40evf_client_subtask(adapter);
- adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
+ iavf_client_subtask(adapter);
+ adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
goto out;
}
- if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
- i40evf_notify_client_l2_params(&adapter->vsi);
- adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+ if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+ iavf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
goto out;
}
- if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
- i40evf_notify_client_close(&adapter->vsi, false);
- adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
+ if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
+ iavf_notify_client_close(&adapter->vsi, false);
+ adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
goto out;
}
- if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
- i40evf_notify_client_open(&adapter->vsi);
- adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
+ if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
+ iavf_notify_client_open(&adapter->vsi);
+ adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
}
out:
- clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+ clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
}
/**
- * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
+ * iavf_free_all_tx_resources - Free Tx Resources for All Queues
* @adapter: board private structure
*
* Free all transmit software resources
**/
-void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
+void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
{
int i;
@@ -2180,11 +2154,11 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->tx_rings[i].desc)
- i40evf_free_tx_resources(&adapter->tx_rings[i]);
+ iavf_free_tx_resources(&adapter->tx_rings[i]);
}
/**
- * i40evf_setup_all_tx_resources - allocate all queues Tx resources
+ * iavf_setup_all_tx_resources - allocate all queues Tx resources
* @adapter: board private structure
*
* If this function returns with an error, then it's possible one or
@@ -2193,13 +2167,13 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
*
* Return 0 on success, negative on failure
**/
-static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
+static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
{
int i, err = 0;
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->tx_rings[i].count = adapter->tx_desc_count;
- err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
+ err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
if (!err)
continue;
dev_err(&adapter->pdev->dev,
@@ -2211,7 +2185,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
}
/**
- * i40evf_setup_all_rx_resources - allocate all queues Rx resources
+ * iavf_setup_all_rx_resources - allocate all queues Rx resources
* @adapter: board private structure
*
* If this function returns with an error, then it's possible one or
@@ -2220,13 +2194,13 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
*
* Return 0 on success, negative on failure
**/
-static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
+static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
{
int i, err = 0;
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].count = adapter->rx_desc_count;
- err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
+ err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
if (!err)
continue;
dev_err(&adapter->pdev->dev,
@@ -2237,12 +2211,12 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
}
/**
- * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
+ * iavf_free_all_rx_resources - Free Rx Resources for All Queues
* @adapter: board private structure
*
* Free all receive software resources
**/
-void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
+void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
{
int i;
@@ -2251,16 +2225,16 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->rx_rings[i].desc)
- i40evf_free_rx_resources(&adapter->rx_rings[i]);
+ iavf_free_rx_resources(&adapter->rx_rings[i]);
}
/**
- * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth
+ * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
* @adapter: board private structure
* @max_tx_rate: max Tx bw for a tc
**/
-static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter,
- u64 max_tx_rate)
+static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
+ u64 max_tx_rate)
{
int speed = 0, ret = 0;
@@ -2297,7 +2271,7 @@ static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter,
}
/**
- * i40evf_validate_channel_config - validate queue mapping info
+ * iavf_validate_channel_config - validate queue mapping info
* @adapter: board private structure
* @mqprio_qopt: queue parameters
*
@@ -2305,15 +2279,15 @@ static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter,
* configure queue channels is valid or not. Returns 0 on a valid
* config.
**/
-static int i40evf_validate_ch_config(struct i40evf_adapter *adapter,
- struct tc_mqprio_qopt_offload *mqprio_qopt)
+static int iavf_validate_ch_config(struct iavf_adapter *adapter,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
{
u64 total_max_rate = 0;
int i, num_qps = 0;
u64 tx_rate = 0;
int ret = 0;
- if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS ||
+ if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
mqprio_qopt->qopt.num_tc < 1)
return -EINVAL;
@@ -2328,24 +2302,24 @@ static int i40evf_validate_ch_config(struct i40evf_adapter *adapter,
}
/*convert to Mbps */
tx_rate = div_u64(mqprio_qopt->max_rate[i],
- I40EVF_MBPS_DIVISOR);
+ IAVF_MBPS_DIVISOR);
total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i];
}
- if (num_qps > I40EVF_MAX_REQ_QUEUES)
+ if (num_qps > IAVF_MAX_REQ_QUEUES)
return -EINVAL;
- ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate);
+ ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
return ret;
}
/**
- * i40evf_del_all_cloud_filters - delete all cloud filters
+ * iavf_del_all_cloud_filters - delete all cloud filters
* on the traffic classes
**/
-static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter)
+static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
{
- struct i40evf_cloud_filter *cf, *cftmp;
+ struct iavf_cloud_filter *cf, *cftmp;
spin_lock_bh(&adapter->cloud_filter_list_lock);
list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
@@ -2358,7 +2332,7 @@ static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter)
}
/**
- * __i40evf_setup_tc - configure multiple traffic classes
+ * __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type_date: tc offload data
*
@@ -2368,10 +2342,10 @@ static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter)
*
* Returns 0 on success.
**/
-static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
+static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
{
struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
struct virtchnl_vf_resource *vfres = adapter->vf_res;
u8 num_tc = 0, total_qps = 0;
int ret = 0, netdev_tc = 0;
@@ -2384,14 +2358,14 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
/* delete queue_channel */
if (!mqprio_qopt->qopt.hw) {
- if (adapter->ch_config.state == __I40EVF_TC_RUNNING) {
+ if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
/* reset the tc configuration */
netdev_reset_tc(netdev);
adapter->num_tc = 0;
netif_tx_stop_all_queues(netdev);
netif_tx_disable(netdev);
- i40evf_del_all_cloud_filters(adapter);
- adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS;
+ iavf_del_all_cloud_filters(adapter);
+ adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
goto exit;
} else {
return -EINVAL;
@@ -2404,12 +2378,12 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
dev_err(&adapter->pdev->dev, "ADq not supported\n");
return -EOPNOTSUPP;
}
- if (adapter->ch_config.state != __I40EVF_TC_INVALID) {
+ if (adapter->ch_config.state != __IAVF_TC_INVALID) {
dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
return -EINVAL;
}
- ret = i40evf_validate_ch_config(adapter, mqprio_qopt);
+ ret = iavf_validate_ch_config(adapter, mqprio_qopt);
if (ret)
return ret;
/* Return if same TC config is requested */
@@ -2417,7 +2391,7 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
return 0;
adapter->num_tc = num_tc;
- for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
if (i < num_tc) {
adapter->ch_config.ch_info[i].count =
mqprio_qopt->qopt.count[i];
@@ -2427,7 +2401,7 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
max_tx_rate = mqprio_qopt->max_rate[i];
/* convert to Mbps */
max_tx_rate = div_u64(max_tx_rate,
- I40EVF_MBPS_DIVISOR);
+ IAVF_MBPS_DIVISOR);
adapter->ch_config.ch_info[i].max_tx_rate =
max_tx_rate;
} else {
@@ -2438,11 +2412,11 @@ static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
adapter->ch_config.total_qps = total_qps;
netif_tx_stop_all_queues(netdev);
netif_tx_disable(netdev);
- adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS;
+ adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
netdev_reset_tc(netdev);
/* Report the tc mapping up the stack */
netdev_set_num_tc(adapter->netdev, num_tc);
- for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
u16 qcount = mqprio_qopt->qopt.count[i];
u16 qoffset = mqprio_qopt->qopt.offset[i];
@@ -2456,14 +2430,14 @@ exit:
}
/**
- * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel
+ * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
* @adapter: board private structure
* @cls_flower: pointer to struct tc_cls_flower_offload
* @filter: pointer to cloud filter structure
*/
-static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
- struct tc_cls_flower_offload *f,
- struct i40evf_cloud_filter *filter)
+static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
+ struct tc_cls_flower_offload *f,
+ struct iavf_cloud_filter *filter)
{
u16 n_proto_mask = 0;
u16 n_proto_key = 0;
@@ -2494,7 +2468,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
f->mask);
if (mask->keyid != 0)
- field_flags |= I40EVF_CLOUD_FIELD_TEN_ID;
+ field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -2541,7 +2515,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
/* use is_broadcast and is_zero to check for all 0xf or 0 */
if (!is_zero_ether_addr(mask->dst)) {
if (is_broadcast_ether_addr(mask->dst)) {
- field_flags |= I40EVF_CLOUD_FIELD_OMAC;
+ field_flags |= IAVF_CLOUD_FIELD_OMAC;
} else {
dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
mask->dst);
@@ -2551,7 +2525,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
if (!is_zero_ether_addr(mask->src)) {
if (is_broadcast_ether_addr(mask->src)) {
- field_flags |= I40EVF_CLOUD_FIELD_IMAC;
+ field_flags |= IAVF_CLOUD_FIELD_IMAC;
} else {
dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
mask->src);
@@ -2592,7 +2566,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
if (mask->vlan_id) {
if (mask->vlan_id == VLAN_VID_MASK) {
- field_flags |= I40EVF_CLOUD_FIELD_IVLAN;
+ field_flags |= IAVF_CLOUD_FIELD_IVLAN;
} else {
dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
mask->vlan_id);
@@ -2624,7 +2598,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
if (mask->dst) {
if (mask->dst == cpu_to_be32(0xffffffff)) {
- field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
be32_to_cpu(mask->dst));
@@ -2634,7 +2608,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
if (mask->src) {
if (mask->src == cpu_to_be32(0xffffffff)) {
- field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
be32_to_cpu(mask->dst));
@@ -2642,7 +2616,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
}
}
- if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) {
+ if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
return I40E_ERR_CONFIG;
}
@@ -2683,7 +2657,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
return I40E_ERR_CONFIG;
}
if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
- field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ field_flags |= IAVF_CLOUD_FIELD_IIP;
for (i = 0; i < 4; i++)
vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
@@ -2706,7 +2680,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
if (mask->src) {
if (mask->src == cpu_to_be16(0xffff)) {
- field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
be16_to_cpu(mask->src));
@@ -2716,7 +2690,7 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
if (mask->dst) {
if (mask->dst == cpu_to_be16(0xffff)) {
- field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
be16_to_cpu(mask->dst));
@@ -2739,13 +2713,13 @@ static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
}
/**
- * i40evf_handle_tclass - Forward to a traffic class on the device
+ * iavf_handle_tclass - Forward to a traffic class on the device
* @adapter: board private structure
* @tc: traffic class index on the device
* @filter: pointer to cloud filter structure
*/
-static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc,
- struct i40evf_cloud_filter *filter)
+static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
+ struct iavf_cloud_filter *filter)
{
if (tc == 0)
return 0;
@@ -2763,15 +2737,15 @@ static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc,
}
/**
- * i40evf_configure_clsflower - Add tc flower filters
+ * iavf_configure_clsflower - Add tc flower filters
* @adapter: board private structure
* @cls_flower: Pointer to struct tc_cls_flower_offload
*/
-static int i40evf_configure_clsflower(struct i40evf_adapter *adapter,
- struct tc_cls_flower_offload *cls_flower)
+static int iavf_configure_clsflower(struct iavf_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
{
int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
- struct i40evf_cloud_filter *filter = NULL;
+ struct iavf_cloud_filter *filter = NULL;
int err = -EINVAL, count = 50;
if (tc < 0) {
@@ -2783,7 +2757,7 @@ static int i40evf_configure_clsflower(struct i40evf_adapter *adapter,
if (!filter)
return -ENOMEM;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
&adapter->crit_section)) {
if (--count == 0)
goto err;
@@ -2796,11 +2770,11 @@ static int i40evf_configure_clsflower(struct i40evf_adapter *adapter,
memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
/* start out with flow type and eth type IPv4 to begin with */
filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
- err = i40evf_parse_cls_flower(adapter, cls_flower, filter);
+ err = iavf_parse_cls_flower(adapter, cls_flower, filter);
if (err < 0)
goto err;
- err = i40evf_handle_tclass(adapter, tc, filter);
+ err = iavf_handle_tclass(adapter, tc, filter);
if (err < 0)
goto err;
@@ -2809,27 +2783,27 @@ static int i40evf_configure_clsflower(struct i40evf_adapter *adapter,
list_add_tail(&filter->list, &adapter->cloud_filter_list);
adapter->num_cloud_filters++;
filter->add = true;
- adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
spin_unlock_bh(&adapter->cloud_filter_list_lock);
err:
if (err)
kfree(filter);
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
return err;
}
-/* i40evf_find_cf - Find the cloud filter in the list
+/* iavf_find_cf - Find the cloud filter in the list
* @adapter: Board private structure
* @cookie: filter specific cookie
*
* Returns ptr to the filter object or NULL. Must be called while holding the
* cloud_filter_list_lock.
*/
-static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter,
- unsigned long *cookie)
+static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
+ unsigned long *cookie)
{
- struct i40evf_cloud_filter *filter = NULL;
+ struct iavf_cloud_filter *filter = NULL;
if (!cookie)
return NULL;
@@ -2842,21 +2816,21 @@ static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter
}
/**
- * i40evf_delete_clsflower - Remove tc flower filters
+ * iavf_delete_clsflower - Remove tc flower filters
* @adapter: board private structure
* @cls_flower: Pointer to struct tc_cls_flower_offload
*/
-static int i40evf_delete_clsflower(struct i40evf_adapter *adapter,
- struct tc_cls_flower_offload *cls_flower)
+static int iavf_delete_clsflower(struct iavf_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
{
- struct i40evf_cloud_filter *filter = NULL;
+ struct iavf_cloud_filter *filter = NULL;
int err = 0;
spin_lock_bh(&adapter->cloud_filter_list_lock);
- filter = i40evf_find_cf(adapter, &cls_flower->cookie);
+ filter = iavf_find_cf(adapter, &cls_flower->cookie);
if (filter) {
filter->del = true;
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
} else {
err = -EINVAL;
}
@@ -2866,21 +2840,21 @@ static int i40evf_delete_clsflower(struct i40evf_adapter *adapter,
}
/**
- * i40evf_setup_tc_cls_flower - flower classifier offloads
+ * iavf_setup_tc_cls_flower - flower classifier offloads
* @netdev: net device to configure
* @type_data: offload data
*/
-static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
- struct tc_cls_flower_offload *cls_flower)
+static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
{
if (cls_flower->common.chain_index)
return -EOPNOTSUPP;
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- return i40evf_configure_clsflower(adapter, cls_flower);
+ return iavf_configure_clsflower(adapter, cls_flower);
case TC_CLSFLOWER_DESTROY:
- return i40evf_delete_clsflower(adapter, cls_flower);
+ return iavf_delete_clsflower(adapter, cls_flower);
case TC_CLSFLOWER_STATS:
return -EOPNOTSUPP;
default:
@@ -2889,46 +2863,46 @@ static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
}
/**
- * i40evf_setup_tc_block_cb - block callback for tc
+ * iavf_setup_tc_block_cb - block callback for tc
* @type: type of offload
* @type_data: offload data
* @cb_priv:
*
* This function is the block callback for traffic classes
**/
-static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
+static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
{
switch (type) {
case TC_SETUP_CLSFLOWER:
- return i40evf_setup_tc_cls_flower(cb_priv, type_data);
+ return iavf_setup_tc_cls_flower(cb_priv, type_data);
default:
return -EOPNOTSUPP;
}
}
/**
- * i40evf_setup_tc_block - register callbacks for tc
+ * iavf_setup_tc_block - register callbacks for tc
* @netdev: network interface device structure
* @f: tc offload data
*
* This function registers block callbacks for tc
* offloads
**/
-static int i40evf_setup_tc_block(struct net_device *dev,
- struct tc_block_offload *f)
+static int iavf_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
{
- struct i40evf_adapter *adapter = netdev_priv(dev);
+ struct iavf_adapter *adapter = netdev_priv(dev);
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
switch (f->command) {
case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
+ return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb,
adapter, adapter, f->extack);
case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
+ tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb,
adapter);
return 0;
default:
@@ -2937,7 +2911,7 @@ static int i40evf_setup_tc_block(struct net_device *dev,
}
/**
- * i40evf_setup_tc - configure multiple traffic classes
+ * iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type: type of offload
* @type_date: tc offload data
@@ -2947,21 +2921,21 @@ static int i40evf_setup_tc_block(struct net_device *dev,
*
* Returns 0 on success
**/
-static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
- void *type_data)
+static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
{
switch (type) {
case TC_SETUP_QDISC_MQPRIO:
- return __i40evf_setup_tc(netdev, type_data);
+ return __iavf_setup_tc(netdev, type_data);
case TC_SETUP_BLOCK:
- return i40evf_setup_tc_block(netdev, type_data);
+ return iavf_setup_tc_block(netdev, type_data);
default:
return -EOPNOTSUPP;
}
}
/**
- * i40evf_open - Called when a network interface is made active
+ * iavf_open - Called when a network interface is made active
* @netdev: network interface device structure
*
* Returns 0 on success, negative value on failure
@@ -2972,71 +2946,71 @@ static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
-static int i40evf_open(struct net_device *netdev)
+static int iavf_open(struct net_device *netdev)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
int err;
- if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+ if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
return -EIO;
}
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
&adapter->crit_section))
usleep_range(500, 1000);
- if (adapter->state != __I40EVF_DOWN) {
+ if (adapter->state != __IAVF_DOWN) {
err = -EBUSY;
goto err_unlock;
}
/* allocate transmit descriptors */
- err = i40evf_setup_all_tx_resources(adapter);
+ err = iavf_setup_all_tx_resources(adapter);
if (err)
goto err_setup_tx;
/* allocate receive descriptors */
- err = i40evf_setup_all_rx_resources(adapter);
+ err = iavf_setup_all_rx_resources(adapter);
if (err)
goto err_setup_rx;
/* clear any pending interrupts, may auto mask */
- err = i40evf_request_traffic_irqs(adapter, netdev->name);
+ err = iavf_request_traffic_irqs(adapter, netdev->name);
if (err)
goto err_req_irq;
spin_lock_bh(&adapter->mac_vlan_list_lock);
- i40evf_add_filter(adapter, adapter->hw.mac.addr);
+ iavf_add_filter(adapter, adapter->hw.mac.addr);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- i40evf_configure(adapter);
+ iavf_configure(adapter);
- i40evf_up_complete(adapter);
+ iavf_up_complete(adapter);
- i40evf_irq_enable(adapter, true);
+ iavf_irq_enable(adapter, true);
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
return 0;
err_req_irq:
- i40evf_down(adapter);
- i40evf_free_traffic_irqs(adapter);
+ iavf_down(adapter);
+ iavf_free_traffic_irqs(adapter);
err_setup_rx:
- i40evf_free_all_rx_resources(adapter);
+ iavf_free_all_rx_resources(adapter);
err_setup_tx:
- i40evf_free_all_tx_resources(adapter);
+ iavf_free_all_tx_resources(adapter);
err_unlock:
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
return err;
}
/**
- * i40evf_close - Disables a network interface
+ * iavf_close - Disables a network interface
* @netdev: network interface device structure
*
* Returns 0, this is not allowed to fail
@@ -3046,41 +3020,41 @@ err_unlock:
* needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
* are freed, along with all transmit and receive resources.
**/
-static int i40evf_close(struct net_device *netdev)
+static int iavf_close(struct net_device *netdev)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
int status;
- if (adapter->state <= __I40EVF_DOWN_PENDING)
+ if (adapter->state <= __IAVF_DOWN_PENDING)
return 0;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
&adapter->crit_section))
usleep_range(500, 1000);
- set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
- adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
+ adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
- i40evf_down(adapter);
- adapter->state = __I40EVF_DOWN_PENDING;
- i40evf_free_traffic_irqs(adapter);
+ iavf_down(adapter);
+ adapter->state = __IAVF_DOWN_PENDING;
+ iavf_free_traffic_irqs(adapter);
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
- * i40evf_virtchnl_completion() after we get confirmation from the PF
+ * iavf_virtchnl_completion() after we get confirmation from the PF
* driver that the rings have been stopped.
*
- * Also, we wait for state to transition to __I40EVF_DOWN before
- * returning. State change occurs in i40evf_virtchnl_completion() after
+ * Also, we wait for state to transition to __IAVF_DOWN before
+ * returning. State change occurs in iavf_virtchnl_completion() after
* VF resources are released (which occurs after PF driver processes and
* responds to admin queue commands).
*/
status = wait_event_timeout(adapter->down_waitqueue,
- adapter->state == __I40EVF_DOWN,
+ adapter->state == __IAVF_DOWN,
msecs_to_jiffies(200));
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
@@ -3088,64 +3062,65 @@ static int i40evf_close(struct net_device *netdev)
}
/**
- * i40evf_change_mtu - Change the Maximum Transfer Unit
+ * iavf_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
*
* Returns 0 on success, negative on failure
**/
-static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
+static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
netdev->mtu = new_mtu;
if (CLIENT_ENABLED(adapter)) {
- i40evf_notify_client_l2_params(&adapter->vsi);
- adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ iavf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
}
- adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
return 0;
}
/**
- * i40e_set_features - set the netdev feature flags
+ * iavf_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
* Note: expects to be called while under rtnl_lock()
**/
-static int i40evf_set_features(struct net_device *netdev,
- netdev_features_t features)
+static int iavf_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
- /* Don't allow changing VLAN_RX flag when VLAN is set for VF
- * and return an error in this case
+ /* Don't allow changing VLAN_RX flag when adapter is not capable
+ * of VLAN offload
*/
- if (VLAN_ALLOWED(adapter)) {
+ if (!VLAN_ALLOWED(adapter)) {
+ if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
+ return -EINVAL;
+ } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
adapter->aq_required |=
- I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
+ IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
else
adapter->aq_required |=
- I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
- } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
- return -EINVAL;
+ IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
}
return 0;
}
/**
- * i40evf_features_check - Validate encapsulated packet conforms to limits
+ * iavf_features_check - Validate encapsulated packet conforms to limits
* @skb: skb buff
* @dev: This physical port's netdev
* @features: Offload features that the stack believes apply
**/
-static netdev_features_t i40evf_features_check(struct sk_buff *skb,
- struct net_device *dev,
- netdev_features_t features)
+static netdev_features_t iavf_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
size_t len;
@@ -3196,16 +3171,16 @@ out_err:
}
/**
- * i40evf_fix_features - fix up the netdev feature bits
+ * iavf_fix_features - fix up the netdev feature bits
* @netdev: our net device
* @features: desired feature bits
*
* Returns fixed-up features bits
**/
-static netdev_features_t i40evf_fix_features(struct net_device *netdev,
- netdev_features_t features)
+static netdev_features_t iavf_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
@@ -3215,40 +3190,37 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev,
return features;
}
-static const struct net_device_ops i40evf_netdev_ops = {
- .ndo_open = i40evf_open,
- .ndo_stop = i40evf_close,
- .ndo_start_xmit = i40evf_xmit_frame,
- .ndo_set_rx_mode = i40evf_set_rx_mode,
+static const struct net_device_ops iavf_netdev_ops = {
+ .ndo_open = iavf_open,
+ .ndo_stop = iavf_close,
+ .ndo_start_xmit = iavf_xmit_frame,
+ .ndo_set_rx_mode = iavf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = i40evf_set_mac,
- .ndo_change_mtu = i40evf_change_mtu,
- .ndo_tx_timeout = i40evf_tx_timeout,
- .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
- .ndo_features_check = i40evf_features_check,
- .ndo_fix_features = i40evf_fix_features,
- .ndo_set_features = i40evf_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = i40evf_netpoll,
-#endif
- .ndo_setup_tc = i40evf_setup_tc,
+ .ndo_set_mac_address = iavf_set_mac,
+ .ndo_change_mtu = iavf_change_mtu,
+ .ndo_tx_timeout = iavf_tx_timeout,
+ .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
+ .ndo_features_check = iavf_features_check,
+ .ndo_fix_features = iavf_fix_features,
+ .ndo_set_features = iavf_set_features,
+ .ndo_setup_tc = iavf_setup_tc,
};
/**
- * i40evf_check_reset_complete - check that VF reset is complete
+ * iavf_check_reset_complete - check that VF reset is complete
* @hw: pointer to hw struct
*
* Returns 0 if device is ready to use, or -EBUSY if it's in reset.
**/
-static int i40evf_check_reset_complete(struct i40e_hw *hw)
+static int iavf_check_reset_complete(struct iavf_hw *hw)
{
u32 rstat;
int i;
for (i = 0; i < 100; i++) {
- rstat = rd32(hw, I40E_VFGEN_RSTAT) &
- I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
+ IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
(rstat == VIRTCHNL_VFR_COMPLETED))
return 0;
@@ -3258,18 +3230,18 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
}
/**
- * i40evf_process_config - Process the config information we got from the PF
+ * iavf_process_config - Process the config information we got from the PF
* @adapter: board private structure
*
* Verify that we have a valid config struct, and set up our netdev features
* and our VSI struct.
**/
-int i40evf_process_config(struct i40evf_adapter *adapter)
+int iavf_process_config(struct iavf_adapter *adapter)
{
struct virtchnl_vf_resource *vfres = adapter->vf_res;
int i, num_req_queues = adapter->num_req_queues;
struct net_device *netdev = adapter->netdev;
- struct i40e_vsi *vsi = &adapter->vsi;
+ struct iavf_vsi *vsi = &adapter->vsi;
netdev_features_t hw_enc_features;
netdev_features_t hw_features;
@@ -3293,9 +3265,9 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
"Requested %d queues, but PF only gave us %d.\n",
num_req_queues,
adapter->vsi_res->num_queue_pairs);
- adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
- i40evf_schedule_reset(adapter);
+ iavf_schedule_reset(adapter);
return -ENODEV;
}
adapter->num_req_queues = 0;
@@ -3358,6 +3330,8 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
/* Do not turn on offloads when they are requested to be turned off.
* TSO needs minimum 576 bytes to work correctly.
*/
@@ -3380,22 +3354,22 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1;
- adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+ adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
adapter->rss_key_size = vfres->rss_key_size;
adapter->rss_lut_size = vfres->rss_lut_size;
} else {
- adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
- adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
+ adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
+ adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
}
return 0;
}
/**
- * i40evf_init_task - worker thread to perform delayed initialization
+ * iavf_init_task - worker thread to perform delayed initialization
* @work: pointer to work_struct containing our data
*
* This task completes the work that was begun in probe. Due to the nature
@@ -3406,65 +3380,65 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
* communications with the PF driver and set up our netdev, the watchdog
* takes over.
**/
-static void i40evf_init_task(struct work_struct *work)
+static void iavf_init_task(struct work_struct *work)
{
- struct i40evf_adapter *adapter = container_of(work,
- struct i40evf_adapter,
+ struct iavf_adapter *adapter = container_of(work,
+ struct iavf_adapter,
init_task.work);
struct net_device *netdev = adapter->netdev;
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
int err, bufsz;
switch (adapter->state) {
- case __I40EVF_STARTUP:
+ case __IAVF_STARTUP:
/* driver loaded, probe complete */
- adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
- adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
- err = i40e_set_mac_type(hw);
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
+ adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
+ err = iavf_set_mac_type(hw);
if (err) {
dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
err);
goto err;
}
- err = i40evf_check_reset_complete(hw);
+ err = iavf_check_reset_complete(hw);
if (err) {
dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
err);
goto err;
}
- hw->aq.num_arq_entries = I40EVF_AQ_LEN;
- hw->aq.num_asq_entries = I40EVF_AQ_LEN;
- hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
- hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+ hw->aq.num_arq_entries = IAVF_AQ_LEN;
+ hw->aq.num_asq_entries = IAVF_AQ_LEN;
+ hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
+ hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
- err = i40evf_init_adminq(hw);
+ err = iavf_init_adminq(hw);
if (err) {
dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
err);
goto err;
}
- err = i40evf_send_api_ver(adapter);
+ err = iavf_send_api_ver(adapter);
if (err) {
dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
- i40evf_shutdown_adminq(hw);
+ iavf_shutdown_adminq(hw);
goto err;
}
- adapter->state = __I40EVF_INIT_VERSION_CHECK;
+ adapter->state = __IAVF_INIT_VERSION_CHECK;
goto restart;
- case __I40EVF_INIT_VERSION_CHECK:
- if (!i40evf_asq_done(hw)) {
+ case __IAVF_INIT_VERSION_CHECK:
+ if (!iavf_asq_done(hw)) {
dev_err(&pdev->dev, "Admin queue command never completed\n");
- i40evf_shutdown_adminq(hw);
- adapter->state = __I40EVF_STARTUP;
+ iavf_shutdown_adminq(hw);
+ adapter->state = __IAVF_STARTUP;
goto err;
}
/* aq msg sent, awaiting reply */
- err = i40evf_verify_api_ver(adapter);
+ err = iavf_verify_api_ver(adapter);
if (err) {
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
- err = i40evf_send_api_ver(adapter);
+ err = iavf_send_api_ver(adapter);
else
dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
adapter->pf_version.major,
@@ -3473,34 +3447,34 @@ static void i40evf_init_task(struct work_struct *work)
VIRTCHNL_VERSION_MINOR);
goto err;
}
- err = i40evf_send_vf_config_msg(adapter);
+ err = iavf_send_vf_config_msg(adapter);
if (err) {
dev_err(&pdev->dev, "Unable to send config request (%d)\n",
err);
goto err;
}
- adapter->state = __I40EVF_INIT_GET_RESOURCES;
+ adapter->state = __IAVF_INIT_GET_RESOURCES;
goto restart;
- case __I40EVF_INIT_GET_RESOURCES:
+ case __IAVF_INIT_GET_RESOURCES:
/* aq msg sent, awaiting reply */
if (!adapter->vf_res) {
bufsz = sizeof(struct virtchnl_vf_resource) +
- (I40E_MAX_VF_VSI *
+ (IAVF_MAX_VF_VSI *
sizeof(struct virtchnl_vsi_resource));
adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
if (!adapter->vf_res)
goto err;
}
- err = i40evf_get_vf_config(adapter);
+ err = iavf_get_vf_config(adapter);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
- err = i40evf_send_vf_config_msg(adapter);
+ err = iavf_send_vf_config_msg(adapter);
goto err;
} else if (err == I40E_ERR_PARAM) {
/* We only get ERR_PARAM if the device is in a very bad
* state or if we've been disabled for previous bad
* behavior. Either way, we're done now.
*/
- i40evf_shutdown_adminq(hw);
+ iavf_shutdown_adminq(hw);
dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
return;
}
@@ -3509,25 +3483,25 @@ static void i40evf_init_task(struct work_struct *work)
err);
goto err_alloc;
}
- adapter->state = __I40EVF_INIT_SW;
+ adapter->state = __IAVF_INIT_SW;
break;
default:
goto err_alloc;
}
- if (i40evf_process_config(adapter))
+ if (iavf_process_config(adapter))
goto err_alloc;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
+ adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
- netdev->netdev_ops = &i40evf_netdev_ops;
- i40evf_set_ethtool_ops(netdev);
+ netdev->netdev_ops = &iavf_netdev_ops;
+ iavf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
/* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
+ netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -3535,25 +3509,25 @@ static void i40evf_init_task(struct work_struct *work)
eth_hw_addr_random(netdev);
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
- adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
+ adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF;
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
- timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0);
+ timer_setup(&adapter->watchdog_timer, iavf_watchdog_timer, 0);
mod_timer(&adapter->watchdog_timer, jiffies + 1);
- adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
- adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
- err = i40evf_init_interrupt_scheme(adapter);
+ adapter->tx_desc_count = IAVF_DEFAULT_TXD;
+ adapter->rx_desc_count = IAVF_DEFAULT_RXD;
+ err = iavf_init_interrupt_scheme(adapter);
if (err)
goto err_sw_init;
- i40evf_map_rings_to_vectors(adapter);
+ iavf_map_rings_to_vectors(adapter);
if (adapter->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
- adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
+ adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
- err = i40evf_request_misc_irq(adapter);
+ err = iavf_request_misc_irq(adapter);
if (err)
goto err_sw_init;
@@ -3570,7 +3544,7 @@ static void i40evf_init_task(struct work_struct *work)
netif_tx_stop_all_queues(netdev);
if (CLIENT_ALLOWED(adapter)) {
- err = i40evf_lan_add_device(adapter);
+ err = iavf_lan_add_device(adapter);
if (err)
dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
err);
@@ -3580,9 +3554,9 @@ static void i40evf_init_task(struct work_struct *work)
if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n");
- adapter->state = __I40EVF_DOWN;
- set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
- i40evf_misc_irq_enable(adapter);
+ adapter->state = __IAVF_DOWN;
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+ iavf_misc_irq_enable(adapter);
wake_up(&adapter->down_waitqueue);
adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
@@ -3591,31 +3565,31 @@ static void i40evf_init_task(struct work_struct *work)
goto err_mem;
if (RSS_AQ(adapter)) {
- adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
} else {
- i40evf_init_rss(adapter);
+ iavf_init_rss(adapter);
}
return;
restart:
schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
return;
err_mem:
- i40evf_free_rss(adapter);
+ iavf_free_rss(adapter);
err_register:
- i40evf_free_misc_irq(adapter);
+ iavf_free_misc_irq(adapter);
err_sw_init:
- i40evf_reset_interrupt_capability(adapter);
+ iavf_reset_interrupt_capability(adapter);
err_alloc:
kfree(adapter->vf_res);
adapter->vf_res = NULL;
err:
/* Things went into the weeds, so try again later */
- if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
+ if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
- adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- i40evf_shutdown_adminq(hw);
- adapter->state = __I40EVF_STARTUP;
+ adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
+ iavf_shutdown_adminq(hw);
+ adapter->state = __IAVF_STARTUP;
schedule_delayed_work(&adapter->init_task, HZ * 5);
return;
}
@@ -3623,21 +3597,21 @@ err:
}
/**
- * i40evf_shutdown - Shutdown the device in preparation for a reboot
+ * iavf_shutdown - Shutdown the device in preparation for a reboot
* @pdev: pci device structure
**/
-static void i40evf_shutdown(struct pci_dev *pdev)
+static void iavf_shutdown(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
if (netif_running(netdev))
- i40evf_close(netdev);
+ iavf_close(netdev);
/* Prevent the watchdog from running. */
- adapter->state = __I40EVF_REMOVE;
+ adapter->state = __IAVF_REMOVE;
adapter->aq_required = 0;
#ifdef CONFIG_PM
@@ -3648,21 +3622,21 @@ static void i40evf_shutdown(struct pci_dev *pdev)
}
/**
- * i40evf_probe - Device Initialization Routine
+ * iavf_probe - Device Initialization Routine
* @pdev: PCI device information struct
- * @ent: entry in i40evf_pci_tbl
+ * @ent: entry in iavf_pci_tbl
*
* Returns 0 on success, negative on failure
*
- * i40evf_probe initializes an adapter identified by a pci_dev structure.
+ * iavf_probe initializes an adapter identified by a pci_dev structure.
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
- struct i40evf_adapter *adapter = NULL;
- struct i40e_hw *hw = NULL;
+ struct iavf_adapter *adapter = NULL;
+ struct iavf_hw *hw = NULL;
int err;
err = pci_enable_device(pdev);
@@ -3679,7 +3653,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- err = pci_request_regions(pdev, i40evf_driver_name);
+ err = pci_request_regions(pdev, iavf_driver_name);
if (err) {
dev_err(&pdev->dev,
"pci_request_regions failed 0x%x\n", err);
@@ -3690,8 +3664,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
- I40EVF_MAX_REQ_QUEUES);
+ netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
+ IAVF_MAX_REQ_QUEUES);
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -3709,7 +3683,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->back = adapter;
adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
- adapter->state = __I40EVF_STARTUP;
+ adapter->state = __IAVF_STARTUP;
/* Call save state here because it relies on the adapter struct. */
pci_save_state(pdev);
@@ -3742,11 +3716,11 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&adapter->vlan_filter_list);
INIT_LIST_HEAD(&adapter->cloud_filter_list);
- INIT_WORK(&adapter->reset_task, i40evf_reset_task);
- INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
- INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
- INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
- INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
+ INIT_WORK(&adapter->reset_task, iavf_reset_task);
+ INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
+ INIT_WORK(&adapter->watchdog_task, iavf_watchdog_task);
+ INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
+ INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
schedule_delayed_work(&adapter->init_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
@@ -3767,33 +3741,33 @@ err_dma:
#ifdef CONFIG_PM
/**
- * i40evf_suspend - Power management suspend routine
+ * iavf_suspend - Power management suspend routine
* @pdev: PCI device information struct
* @state: unused
*
* Called when the system (VM) is entering sleep/suspend.
**/
-static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
+static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = netdev_priv(netdev);
int retval = 0;
netif_device_detach(netdev);
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
&adapter->crit_section))
usleep_range(500, 1000);
if (netif_running(netdev)) {
rtnl_lock();
- i40evf_down(adapter);
+ iavf_down(adapter);
rtnl_unlock();
}
- i40evf_free_misc_irq(adapter);
- i40evf_reset_interrupt_capability(adapter);
+ iavf_free_misc_irq(adapter);
+ iavf_reset_interrupt_capability(adapter);
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
retval = pci_save_state(pdev);
if (retval)
@@ -3805,14 +3779,14 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
}
/**
- * i40evf_resume - Power management resume routine
+ * iavf_resume - Power management resume routine
* @pdev: PCI device information struct
*
* Called when the system (VM) is resumed from sleep/suspend.
**/
-static int i40evf_resume(struct pci_dev *pdev)
+static int iavf_resume(struct pci_dev *pdev)
{
- struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
+ struct iavf_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
u32 err;
@@ -3831,13 +3805,13 @@ static int i40evf_resume(struct pci_dev *pdev)
pci_set_master(pdev);
rtnl_lock();
- err = i40evf_set_interrupt_capability(adapter);
+ err = iavf_set_interrupt_capability(adapter);
if (err) {
rtnl_unlock();
dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
return err;
}
- err = i40evf_request_misc_irq(adapter);
+ err = iavf_request_misc_irq(adapter);
rtnl_unlock();
if (err) {
dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
@@ -3853,25 +3827,25 @@ static int i40evf_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
/**
- * i40evf_remove - Device Removal Routine
+ * iavf_remove - Device Removal Routine
* @pdev: PCI device information struct
*
- * i40evf_remove is called by the PCI subsystem to alert the driver
+ * iavf_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device. The could be caused by a
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void i40evf_remove(struct pci_dev *pdev)
+static void iavf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40evf_vlan_filter *vlf, *vlftmp;
- struct i40evf_mac_filter *f, *ftmp;
- struct i40evf_cloud_filter *cf, *cftmp;
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_vlan_filter *vlf, *vlftmp;
+ struct iavf_mac_filter *f, *ftmp;
+ struct iavf_cloud_filter *cf, *cftmp;
+ struct iavf_hw *hw = &adapter->hw;
int err;
/* Indicate we are in remove and not to run reset_task */
- set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
+ set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->client_task);
@@ -3880,37 +3854,39 @@ static void i40evf_remove(struct pci_dev *pdev)
adapter->netdev_registered = false;
}
if (CLIENT_ALLOWED(adapter)) {
- err = i40evf_lan_del_device(adapter);
+ err = iavf_lan_del_device(adapter);
if (err)
dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
err);
}
/* Shut down all the garbage mashers on the detention level */
- adapter->state = __I40EVF_REMOVE;
+ adapter->state = __IAVF_REMOVE;
adapter->aq_required = 0;
- adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
- i40evf_request_reset(adapter);
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+ iavf_request_reset(adapter);
msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
- if (!i40evf_asq_done(hw)) {
- i40evf_request_reset(adapter);
+ if (!iavf_asq_done(hw)) {
+ iavf_request_reset(adapter);
msleep(50);
}
- i40evf_free_all_tx_resources(adapter);
- i40evf_free_all_rx_resources(adapter);
- i40evf_misc_irq_disable(adapter);
- i40evf_free_misc_irq(adapter);
- i40evf_reset_interrupt_capability(adapter);
- i40evf_free_q_vectors(adapter);
+ iavf_free_all_tx_resources(adapter);
+ iavf_free_all_rx_resources(adapter);
+ iavf_misc_irq_disable(adapter);
+ iavf_free_misc_irq(adapter);
+ iavf_reset_interrupt_capability(adapter);
+ iavf_free_q_vectors(adapter);
if (adapter->watchdog_timer.function)
del_timer_sync(&adapter->watchdog_timer);
- i40evf_free_rss(adapter);
+ cancel_work_sync(&adapter->adminq_task);
+
+ iavf_free_rss(adapter);
if (hw->aq.asq.count)
- i40evf_shutdown_adminq(hw);
+ iavf_shutdown_adminq(hw);
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
@@ -3918,9 +3894,9 @@ static void i40evf_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
pci_release_regions(pdev);
- i40evf_free_all_tx_resources(adapter);
- i40evf_free_all_rx_resources(adapter);
- i40evf_free_queues(adapter);
+ iavf_free_all_tx_resources(adapter);
+ iavf_free_all_rx_resources(adapter);
+ iavf_free_queues(adapter);
kfree(adapter->vf_res);
spin_lock_bh(&adapter->mac_vlan_list_lock);
/* If we got removed before an up/down sequence, we've got a filter
@@ -3952,57 +3928,57 @@ static void i40evf_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_driver i40evf_driver = {
- .name = i40evf_driver_name,
- .id_table = i40evf_pci_tbl,
- .probe = i40evf_probe,
- .remove = i40evf_remove,
+static struct pci_driver iavf_driver = {
+ .name = iavf_driver_name,
+ .id_table = iavf_pci_tbl,
+ .probe = iavf_probe,
+ .remove = iavf_remove,
#ifdef CONFIG_PM
- .suspend = i40evf_suspend,
- .resume = i40evf_resume,
+ .suspend = iavf_suspend,
+ .resume = iavf_resume,
#endif
- .shutdown = i40evf_shutdown,
+ .shutdown = iavf_shutdown,
};
/**
- * i40e_init_module - Driver Registration Routine
+ * iavf_init_module - Driver Registration Routine
*
- * i40e_init_module is the first routine called when the driver is
+ * iavf_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
**/
-static int __init i40evf_init_module(void)
+static int __init iavf_init_module(void)
{
int ret;
- pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
- i40evf_driver_version);
+ pr_info("iavf: %s - version %s\n", iavf_driver_string,
+ iavf_driver_version);
- pr_info("%s\n", i40evf_copyright);
+ pr_info("%s\n", iavf_copyright);
- i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
- i40evf_driver_name);
- if (!i40evf_wq) {
- pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
+ iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
+ iavf_driver_name);
+ if (!iavf_wq) {
+ pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
return -ENOMEM;
}
- ret = pci_register_driver(&i40evf_driver);
+ ret = pci_register_driver(&iavf_driver);
return ret;
}
-module_init(i40evf_init_module);
+module_init(iavf_init_module);
/**
- * i40e_exit_module - Driver Exit Cleanup Routine
+ * iavf_exit_module - Driver Exit Cleanup Routine
*
- * i40e_exit_module is called just before the driver is removed
+ * iavf_exit_module is called just before the driver is removed
* from memory.
**/
-static void __exit i40evf_exit_module(void)
+static void __exit iavf_exit_module(void)
{
- pci_unregister_driver(&i40evf_driver);
- destroy_workqueue(i40evf_wq);
+ pci_unregister_driver(&iavf_driver);
+ destroy_workqueue(iavf_wq);
}
-module_exit(i40evf_exit_module);
+module_exit(iavf_exit_module);
-/* i40evf_main.c */
+/* iavf_main.c */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/iavf/iavf_osdep.h
index 3ddddb46455b..e6e0b0328706 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_osdep.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#ifndef _I40E_OSDEP_H_
-#define _I40E_OSDEP_H_
+#ifndef _IAVF_OSDEP_H_
+#define _IAVF_OSDEP_H_
#include <linux/types.h>
#include <linux/if_ether.h>
@@ -24,29 +24,29 @@
#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
#define rd64(a, reg) readq((a)->hw_addr + (reg))
-#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT)
+#define iavf_flush(a) readl((a)->hw_addr + IAVF_VFGEN_RSTAT)
/* memory allocation tracking */
-struct i40e_dma_mem {
+struct iavf_dma_mem {
void *va;
dma_addr_t pa;
u32 size;
};
-#define i40e_allocate_dma_mem(h, m, unused, s, a) \
- i40evf_allocate_dma_mem_d(h, m, s, a)
-#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m)
+#define iavf_allocate_dma_mem(h, m, unused, s, a) \
+ iavf_allocate_dma_mem_d(h, m, s, a)
+#define iavf_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m)
-struct i40e_virt_mem {
+struct iavf_virt_mem {
void *va;
u32 size;
};
-#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s)
-#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m)
+#define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s)
+#define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m)
-#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__)
-extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+#define iavf_debug(h, m, s, ...) iavf_debug_d(h, m, s, ##__VA_ARGS__)
+extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
__attribute__ ((format(gnu_printf, 3, 4)));
-typedef enum i40e_status_code i40e_status;
-#endif /* _I40E_OSDEP_H_ */
+typedef enum iavf_status_code iavf_status;
+#endif /* _IAVF_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
new file mode 100644
index 000000000000..d6685103af39
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _IAVF_PROTOTYPE_H_
+#define _IAVF_PROTOTYPE_H_
+
+#include "iavf_type.h"
+#include "iavf_alloc.h"
+#include <linux/avf/virtchnl.h>
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+iavf_status iavf_init_adminq(struct iavf_hw *hw);
+iavf_status iavf_shutdown_adminq(struct iavf_hw *hw);
+void i40e_adminq_init_ring_data(struct iavf_hw *hw);
+iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+bool iavf_asq_done(struct iavf_hw *hw);
+
+/* debug function for adminq */
+void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask,
+ void *desc, void *buffer, u16 buf_len);
+
+void i40e_idle_aq(struct iavf_hw *hw);
+void iavf_resume_aq(struct iavf_hw *hw);
+bool iavf_check_asq_alive(struct iavf_hw *hw);
+iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading);
+const char *iavf_aq_str(struct iavf_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *iavf_stat_str(struct iavf_hw *hw, iavf_status stat_err);
+
+iavf_status iavf_aq_get_rss_lut(struct iavf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+iavf_status iavf_aq_get_rss_key(struct iavf_hw *hw, u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+
+iavf_status iavf_set_mac_type(struct iavf_hw *hw);
+
+extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
+
+static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return iavf_ptype_lookup[ptype];
+}
+
+void iavf_vf_parse_hw_config(struct iavf_hw *hw,
+ struct virtchnl_vf_resource *msg);
+iavf_status iavf_vf_reset(struct iavf_hw *hw);
+iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
+ enum virtchnl_ops v_opcode,
+ iavf_status v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+#endif /* _IAVF_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_register.h b/drivers/net/ethernet/intel/iavf/iavf_register.h
new file mode 100644
index 000000000000..bf793332fc9d
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_register.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _IAVF_REGISTER_H_
+#define _IAVF_REGISTER_H_
+
+#define IAVF_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
+#define IAVF_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
+#define IAVF_VF_ARQH1 0x00007400 /* Reset: EMPR */
+#define IAVF_VF_ARQH1_ARQH_SHIFT 0
+#define IAVF_VF_ARQH1_ARQH_MASK IAVF_MASK(0x3FF, IAVF_VF_ARQH1_ARQH_SHIFT)
+#define IAVF_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
+#define IAVF_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define IAVF_VF_ARQLEN1_ARQVFE_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQVFE_SHIFT)
+#define IAVF_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define IAVF_VF_ARQLEN1_ARQOVFL_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define IAVF_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define IAVF_VF_ARQLEN1_ARQCRIT_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define IAVF_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define IAVF_VF_ARQLEN1_ARQENABLE_MASK IAVF_MASK(0x1, IAVF_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define IAVF_VF_ARQT1 0x00007000 /* Reset: EMPR */
+#define IAVF_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
+#define IAVF_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
+#define IAVF_VF_ATQH1 0x00006400 /* Reset: EMPR */
+#define IAVF_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
+#define IAVF_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define IAVF_VF_ATQLEN1_ATQVFE_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQVFE_SHIFT)
+#define IAVF_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define IAVF_VF_ATQLEN1_ATQOVFL_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define IAVF_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define IAVF_VF_ATQLEN1_ATQCRIT_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define IAVF_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define IAVF_VF_ATQLEN1_ATQENABLE_MASK IAVF_MASK(0x1, IAVF_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define IAVF_VF_ATQT1 0x00008400 /* Reset: EMPR */
+#define IAVF_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
+#define IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define IAVF_VFGEN_RSTAT_VFR_STATE_MASK IAVF_MASK(0x3, IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define IAVF_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
+#define IAVF_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define IAVF_VFINT_DYN_CTL01_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define IAVF_VFINT_DYN_CTLN1_INTENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK IAVF_MASK(0x3, IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define IAVF_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
+#define IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK IAVF_MASK(0x1, IAVF_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define IAVF_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define IAVF_VFINT_ICR01 0x00004800 /* Reset: CORER */
+#define IAVF_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define IAVF_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define IAVF_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define IAVF_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define IAVF_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define IAVF_VFQF_HKEY_MAX_INDEX 12
+#define IAVF_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define IAVF_VFQF_HLUT_MAX_INDEX 15
+#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK IAVF_MASK(0x1, IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#endif /* _IAVF_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h
index 77be0702d07c..46742fab7b8c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_status.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#ifndef _I40E_STATUS_H_
-#define _I40E_STATUS_H_
+#ifndef _IAVF_STATUS_H_
+#define _IAVF_STATUS_H_
/* Error Codes */
-enum i40e_status_code {
+enum iavf_status_code {
I40E_SUCCESS = 0,
I40E_ERR_NVM = -1,
I40E_ERR_NVM_CHECKSUM = -2,
@@ -75,4 +75,4 @@ enum i40e_status_code {
I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
-#endif /* _I40E_STATUS_H_ */
+#endif /* _IAVF_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index d7a4e68820a8..1474f5539751 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -3,16 +3,16 @@
/* Modeled on trace-events-sample.h */
-/* The trace subsystem name for i40evf will be "i40evf".
+/* The trace subsystem name for iavf will be "iavf".
*
- * This file is named i40e_trace.h.
+ * This file is named iavf_trace.h.
*
* Since this include file's name is different from the trace
* subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
* of this file.
*/
#undef TRACE_SYSTEM
-#define TRACE_SYSTEM i40evf
+#define TRACE_SYSTEM iavf
/* See trace-events-sample.h for a detailed description of why this
* guard clause is different from most normal include files.
@@ -23,14 +23,14 @@
#include <linux/tracepoint.h>
/**
- * i40e_trace() macro enables shared code to refer to trace points
+ * iavf_trace() macro enables shared code to refer to trace points
* like:
*
- * trace_i40e{,vf}_example(args...)
+ * trace_iavf{,vf}_example(args...)
*
* ... as:
*
- * i40e_trace(example, args...)
+ * iavf_trace(example, args...)
*
* ... to resolve to the PF or VF version of the tracepoint without
* ifdefs, and to allow tracepoints to be disabled entirely at build
@@ -39,29 +39,29 @@
* Trace point should always be referred to in the driver via this
* macro.
*
- * Similarly, i40e_trace_enabled(trace_name) wraps references to
- * trace_i40e{,vf}_<trace_name>_enabled() functions.
+ * Similarly, iavf_trace_enabled(trace_name) wraps references to
+ * trace_iavf{,vf}_<trace_name>_enabled() functions.
*/
-#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40evf ## _ ## trace_name)
-#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name)
+#define _IAVF_TRACE_NAME(trace_name) (trace_ ## iavf ## _ ## trace_name)
+#define IAVF_TRACE_NAME(trace_name) _IAVF_TRACE_NAME(trace_name)
-#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args)
+#define iavf_trace(trace_name, args...) IAVF_TRACE_NAME(trace_name)(args)
-#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)()
+#define iavf_trace_enabled(trace_name) IAVF_TRACE_NAME(trace_name##_enabled)()
/* Events common to PF and VF. Corresponding versions will be defined
- * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace()
+ * for both, named trace_iavf_* and trace_iavf_*. The iavf_trace()
* macro above will select the right trace point name for the driver
* being built from shared code.
*/
/* Events related to a vsi & ring */
DECLARE_EVENT_CLASS(
- i40evf_tx_template,
+ iavf_tx_template,
- TP_PROTO(struct i40e_ring *ring,
- struct i40e_tx_desc *desc,
- struct i40e_tx_buffer *buf),
+ TP_PROTO(struct iavf_ring *ring,
+ struct iavf_tx_desc *desc,
+ struct iavf_tx_buffer *buf),
TP_ARGS(ring, desc, buf),
@@ -93,26 +93,26 @@ DECLARE_EVENT_CLASS(
);
DEFINE_EVENT(
- i40evf_tx_template, i40evf_clean_tx_irq,
- TP_PROTO(struct i40e_ring *ring,
- struct i40e_tx_desc *desc,
- struct i40e_tx_buffer *buf),
+ iavf_tx_template, iavf_clean_tx_irq,
+ TP_PROTO(struct iavf_ring *ring,
+ struct iavf_tx_desc *desc,
+ struct iavf_tx_buffer *buf),
TP_ARGS(ring, desc, buf));
DEFINE_EVENT(
- i40evf_tx_template, i40evf_clean_tx_irq_unmap,
- TP_PROTO(struct i40e_ring *ring,
- struct i40e_tx_desc *desc,
- struct i40e_tx_buffer *buf),
+ iavf_tx_template, iavf_clean_tx_irq_unmap,
+ TP_PROTO(struct iavf_ring *ring,
+ struct iavf_tx_desc *desc,
+ struct iavf_tx_buffer *buf),
TP_ARGS(ring, desc, buf));
DECLARE_EVENT_CLASS(
- i40evf_rx_template,
+ iavf_rx_template,
- TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ TP_PROTO(struct iavf_ring *ring,
+ union iavf_32byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -138,26 +138,26 @@ DECLARE_EVENT_CLASS(
);
DEFINE_EVENT(
- i40evf_rx_template, i40evf_clean_rx_irq,
- TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ iavf_rx_template, iavf_clean_rx_irq,
+ TP_PROTO(struct iavf_ring *ring,
+ union iavf_32byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
DEFINE_EVENT(
- i40evf_rx_template, i40evf_clean_rx_irq_rx,
- TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ iavf_rx_template, iavf_clean_rx_irq_rx,
+ TP_PROTO(struct iavf_ring *ring,
+ union iavf_32byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
DECLARE_EVENT_CLASS(
- i40evf_xmit_template,
+ iavf_xmit_template,
TP_PROTO(struct sk_buff *skb,
- struct i40e_ring *ring),
+ struct iavf_ring *ring),
TP_ARGS(skb, ring),
@@ -180,23 +180,23 @@ DECLARE_EVENT_CLASS(
);
DEFINE_EVENT(
- i40evf_xmit_template, i40evf_xmit_frame_ring,
+ iavf_xmit_template, iavf_xmit_frame_ring,
TP_PROTO(struct sk_buff *skb,
- struct i40e_ring *ring),
+ struct iavf_ring *ring),
TP_ARGS(skb, ring));
DEFINE_EVENT(
- i40evf_xmit_template, i40evf_xmit_frame_ring_drop,
+ iavf_xmit_template, iavf_xmit_frame_ring_drop,
TP_PROTO(struct sk_buff *skb,
- struct i40e_ring *ring),
+ struct iavf_ring *ring),
TP_ARGS(skb, ring));
/* Events unique to the VF. */
-#endif /* _I40E_TRACE_H_ */
-/* This must be outside ifdef _I40E_TRACE_H */
+#endif /* _IAVF_TRACE_H_ */
+/* This must be outside ifdef _IAVF_TRACE_H */
/* This trace include file is not located in the .../include/trace
* with the kernel tracepoint definitions, because we're a loadable
@@ -205,5 +205,5 @@ DEFINE_EVENT(
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE i40e_trace
+#define TRACE_INCLUDE_FILE iavf_trace
#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index a9730711e257..edc349f49748 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -4,32 +4,32 @@
#include <linux/prefetch.h>
#include <net/busy_poll.h>
-#include "i40evf.h"
-#include "i40e_trace.h"
-#include "i40e_prototype.h"
+#include "iavf.h"
+#include "iavf_trace.h"
+#include "iavf_prototype.h"
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
{
- return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
- ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
- ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
- ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
- ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+ return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
+ ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
+ ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
+ ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT));
}
-#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
/**
- * i40e_unmap_and_free_tx_resource - Release a Tx buffer
+ * iavf_unmap_and_free_tx_resource - Release a Tx buffer
* @ring: the ring that owns the buffer
* @tx_buffer: the buffer to free
**/
-static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
- struct i40e_tx_buffer *tx_buffer)
+static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
+ struct iavf_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf);
else
dev_kfree_skb_any(tx_buffer->skb);
@@ -52,10 +52,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
}
/**
- * i40evf_clean_tx_ring - Free any empty Tx buffers
+ * iavf_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
**/
-void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
+void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
{
unsigned long bi_size;
u16 i;
@@ -66,9 +66,9 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++)
- i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
+ iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
- bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_bi, 0, bi_size);
/* Zero out the descriptor ring */
@@ -85,14 +85,14 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
}
/**
- * i40evf_free_tx_resources - Free Tx resources per queue
+ * iavf_free_tx_resources - Free Tx resources per queue
* @tx_ring: Tx descriptor ring for a specific queue
*
* Free all transmit software resources
**/
-void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
+void iavf_free_tx_resources(struct iavf_ring *tx_ring)
{
- i40evf_clean_tx_ring(tx_ring);
+ iavf_clean_tx_ring(tx_ring);
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
@@ -104,14 +104,14 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
- * i40evf_get_tx_pending - how many Tx descriptors not processed
+ * iavf_get_tx_pending - how many Tx descriptors not processed
* @ring: the ring of descriptors
* @in_sw: is tx_pending being checked in SW or HW
*
* Since there is no access to the ring head register
* in XL710, we need to use our local copies
**/
-u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
+u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
{
u32 head, tail;
@@ -126,15 +126,15 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
}
/**
- * i40evf_detect_recover_hung - Function to detect and recover hung_queues
+ * iavf_detect_recover_hung - Function to detect and recover hung_queues
* @vsi: pointer to vsi struct with tx queues
*
* VSI has netdev and netdev has TX queues. This function is to check each of
* those TX queues if they are hung, trigger recovery by issuing SW interrupt.
**/
-void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
+void iavf_detect_recover_hung(struct iavf_vsi *vsi)
{
- struct i40e_ring *tx_ring = NULL;
+ struct iavf_ring *tx_ring = NULL;
struct net_device *netdev;
unsigned int i;
int packets;
@@ -142,7 +142,7 @@ void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
if (!vsi)
return;
- if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ if (test_bit(__IAVF_VSI_DOWN, vsi->state))
return;
netdev = vsi->netdev;
@@ -164,16 +164,16 @@ void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
*/
packets = tx_ring->stats.packets & INT_MAX;
if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
- i40evf_force_wb(vsi, tx_ring->q_vector);
+ iavf_force_wb(vsi, tx_ring->q_vector);
continue;
}
/* Memory barrier between read of packet count and call
- * to i40evf_get_tx_pending()
+ * to iavf_get_tx_pending()
*/
smp_rmb();
tx_ring->tx_stats.prev_pkt_ctr =
- i40evf_get_tx_pending(tx_ring, true) ? packets : -1;
+ iavf_get_tx_pending(tx_ring, true) ? packets : -1;
}
}
}
@@ -181,28 +181,28 @@ void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
#define WB_STRIDE 4
/**
- * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * iavf_clean_tx_irq - Reclaim resources after transmit completes
* @vsi: the VSI we care about
* @tx_ring: Tx ring to clean
* @napi_budget: Used to determine if we are in netpoll
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
- struct i40e_ring *tx_ring, int napi_budget)
+static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
+ struct iavf_ring *tx_ring, int napi_budget)
{
u16 i = tx_ring->next_to_clean;
- struct i40e_tx_buffer *tx_buf;
- struct i40e_tx_desc *tx_desc;
+ struct iavf_tx_buffer *tx_buf;
+ struct iavf_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = vsi->work_limit;
tx_buf = &tx_ring->tx_bi[i];
- tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_desc = IAVF_TX_DESC(tx_ring, i);
i -= tx_ring->count;
do {
- struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+ struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
@@ -211,10 +211,10 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* prevent any other reads prior to eop_desc */
smp_rmb();
- i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
- cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE)))
break;
/* clear next_to_watch to prevent false hangs */
@@ -239,7 +239,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
- i40e_trace(clean_tx_irq_unmap,
+ iavf_trace(clean_tx_irq_unmap,
tx_ring, tx_desc, tx_buf);
tx_buf++;
@@ -248,7 +248,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (unlikely(!i)) {
i -= tx_ring->count;
tx_buf = tx_ring->tx_bi;
- tx_desc = I40E_TX_DESC(tx_ring, 0);
+ tx_desc = IAVF_TX_DESC(tx_ring, 0);
}
/* unmap any remaining paged data */
@@ -268,7 +268,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (unlikely(!i)) {
i -= tx_ring->count;
tx_buf = tx_ring->tx_bi;
- tx_desc = I40E_TX_DESC(tx_ring, 0);
+ tx_desc = IAVF_TX_DESC(tx_ring, 0);
}
prefetch(tx_desc);
@@ -286,18 +286,18 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
- if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
/* check to see if there are < 4 descriptors
* waiting to be written back, then kick the hardware to force
* them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt.
*/
- unsigned int j = i40evf_get_tx_pending(tx_ring, false);
+ unsigned int j = iavf_get_tx_pending(tx_ring, false);
if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) &&
- !test_bit(__I40E_VSI_DOWN, vsi->state) &&
- (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
+ (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -307,14 +307,14 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
- (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_VSI_DOWN, vsi->state)) {
+ !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -325,75 +325,75 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
}
/**
- * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
+ * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
* @vsi: the VSI we care about
* @q_vector: the vector on which to enable writeback
*
**/
-static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
- struct i40e_q_vector *q_vector)
+static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
+ struct iavf_q_vector *q_vector)
{
u16 flags = q_vector->tx.ring[0].flags;
u32 val;
- if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
+ if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
return;
if (q_vector->arm_wb_state)
return;
- val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
+ val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
+ IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
q_vector->arm_wb_state = true;
}
/**
- * i40evf_force_wb - Issue SW Interrupt so HW does a wb
+ * iavf_force_wb - Issue SW Interrupt so HW does a wb
* @vsi: the VSI we care about
* @q_vector: the vector on which to force writeback
*
**/
-void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
{
- u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
- I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
+ u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
+ IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+ IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
/* allow 00 to be written to the index */;
wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->reg_idx),
+ IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
val);
}
-static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
- struct i40e_ring_container *rc)
+static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
+ struct iavf_ring_container *rc)
{
return &q_vector->rx == rc;
}
-static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
+static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
{
unsigned int divisor;
switch (q_vector->adapter->link_speed) {
case I40E_LINK_SPEED_40GB:
- divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
+ divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
break;
case I40E_LINK_SPEED_25GB:
case I40E_LINK_SPEED_20GB:
- divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
+ divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
break;
default:
case I40E_LINK_SPEED_10GB:
- divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
+ divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
break;
case I40E_LINK_SPEED_1GB:
case I40E_LINK_SPEED_100MB:
- divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
+ divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
break;
}
@@ -401,7 +401,7 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
}
/**
- * i40e_update_itr - update the dynamic ITR value based on statistics
+ * iavf_update_itr - update the dynamic ITR value based on statistics
* @q_vector: structure containing interrupt and ring information
* @rc: structure containing ring performance data
*
@@ -413,8 +413,8 @@ static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static void i40e_update_itr(struct i40e_q_vector *q_vector,
- struct i40e_ring_container *rc)
+static void iavf_update_itr(struct iavf_q_vector *q_vector,
+ struct iavf_ring_container *rc)
{
unsigned int avg_wire_size, packets, bytes, itr;
unsigned long next_update = jiffies;
@@ -428,9 +428,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
/* For Rx we want to push the delay up and default to low latency.
* for Tx we want to pull the delay down and default to high latency.
*/
- itr = i40e_container_is_rx(q_vector, rc) ?
- I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
- I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
+ itr = iavf_container_is_rx(q_vector, rc) ?
+ IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
+ IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
/* If we didn't update within up to 1 - 2 jiffies we can assume
* that either packets are coming in so slow there hasn't been
@@ -454,15 +454,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
packets = rc->total_packets;
bytes = rc->total_bytes;
- if (i40e_container_is_rx(q_vector, rc)) {
+ if (iavf_container_is_rx(q_vector, rc)) {
/* If Rx there are 1 to 4 packets and bytes are less than
* 9000 assume insufficient data to use bulk rate limiting
* approach unless Tx is already in bulk rate limiting. We
* are likely latency driven.
*/
if (packets && packets < 4 && bytes < 9000 &&
- (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
- itr = I40E_ITR_ADAPTIVE_LATENCY;
+ (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
+ itr = IAVF_ITR_ADAPTIVE_LATENCY;
goto adjust_by_size;
}
} else if (packets < 4) {
@@ -471,15 +471,15 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
* that the Rx can relax.
*/
- if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
- (q_vector->rx.target_itr & I40E_ITR_MASK) ==
- I40E_ITR_ADAPTIVE_MAX_USECS)
+ if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
+ (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
+ IAVF_ITR_ADAPTIVE_MAX_USECS)
goto clear_counts;
} else if (packets > 32) {
/* If we have processed over 32 packets in a single interrupt
* for Tx assume we need to switch over to "bulk" mode.
*/
- rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
+ rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
}
/* We have no packets to actually measure against. This means
@@ -491,17 +491,17 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* fixed amount.
*/
if (packets < 56) {
- itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
- if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
- itr &= I40E_ITR_ADAPTIVE_LATENCY;
- itr += I40E_ITR_ADAPTIVE_MAX_USECS;
+ itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
+ if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= IAVF_ITR_ADAPTIVE_LATENCY;
+ itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
}
goto clear_counts;
}
if (packets <= 256) {
itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
- itr &= I40E_ITR_MASK;
+ itr &= IAVF_ITR_MASK;
/* Between 56 and 112 is our "goldilocks" zone where we are
* working out "just right". Just report that our current
@@ -516,9 +516,9 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* in half per interrupt.
*/
itr /= 2;
- itr &= I40E_ITR_MASK;
- if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
- itr = I40E_ITR_ADAPTIVE_MIN_USECS;
+ itr &= IAVF_ITR_MASK;
+ if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
+ itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
goto clear_counts;
}
@@ -529,7 +529,7 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
* though for smaller packet sizes there isn't much we can do as
* NAPI polling will likely be kicking in sooner rather than later.
*/
- itr = I40E_ITR_ADAPTIVE_BULK;
+ itr = IAVF_ITR_ADAPTIVE_BULK;
adjust_by_size:
/* If packet counts are 256 or greater we can assume we have a gross
@@ -577,7 +577,7 @@ adjust_by_size:
/* If we are in low latency mode halve our delay which doubles the
* rate to somewhere between 100K to 16K ints/sec
*/
- if (itr & I40E_ITR_ADAPTIVE_LATENCY)
+ if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
avg_wire_size /= 2;
/* Resultant value is 256 times larger than it needs to be. This
@@ -587,12 +587,12 @@ adjust_by_size:
* Use addition as we have already recorded the new latency flag
* for the ITR value.
*/
- itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
- I40E_ITR_ADAPTIVE_MIN_INC;
+ itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
+ IAVF_ITR_ADAPTIVE_MIN_INC;
- if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
- itr &= I40E_ITR_ADAPTIVE_LATENCY;
- itr += I40E_ITR_ADAPTIVE_MAX_USECS;
+ if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= IAVF_ITR_ADAPTIVE_LATENCY;
+ itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
}
clear_counts:
@@ -607,12 +607,12 @@ clear_counts:
}
/**
- * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
+ * iavf_setup_tx_descriptors - Allocate the Tx descriptors
* @tx_ring: the tx ring to set up
*
* Return 0 on success, negative on error
**/
-int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
+int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
int bi_size;
@@ -622,13 +622,13 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(tx_ring->tx_bi);
- bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
if (!tx_ring->tx_bi)
goto err;
/* round up to nearest 4K */
- tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
@@ -650,10 +650,10 @@ err:
}
/**
- * i40evf_clean_rx_ring - Free Rx buffers
+ * iavf_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
**/
-void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
+void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
{
unsigned long bi_size;
u16 i;
@@ -669,7 +669,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+ struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
if (!rx_bi->page)
continue;
@@ -685,9 +685,9 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
/* free resources associated with mapping */
dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
- i40e_rx_pg_size(rx_ring),
+ iavf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
- I40E_RX_DMA_ATTR);
+ IAVF_RX_DMA_ATTR);
__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
@@ -695,7 +695,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
rx_bi->page_offset = 0;
}
- bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
memset(rx_ring->rx_bi, 0, bi_size);
/* Zero out the descriptor ring */
@@ -707,14 +707,14 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
}
/**
- * i40evf_free_rx_resources - Free Rx resources
+ * iavf_free_rx_resources - Free Rx resources
* @rx_ring: ring to clean the resources from
*
* Free all receive software resources
**/
-void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
+void iavf_free_rx_resources(struct iavf_ring *rx_ring)
{
- i40evf_clean_rx_ring(rx_ring);
+ iavf_clean_rx_ring(rx_ring);
kfree(rx_ring->rx_bi);
rx_ring->rx_bi = NULL;
@@ -726,19 +726,19 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
- * i40evf_setup_rx_descriptors - Allocate Rx descriptors
+ * iavf_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
-int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
+int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
int bi_size;
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_bi);
- bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
if (!rx_ring->rx_bi)
goto err;
@@ -746,7 +746,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -769,11 +769,11 @@ err:
}
/**
- * i40e_release_rx_desc - Store the new tail and head values
+ * iavf_release_rx_desc - Store the new tail and head values
* @rx_ring: ring to bump
* @val: new head index
**/
-static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
@@ -790,26 +790,26 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40e_rx_offset - Return expected offset into page to access data
+ * iavf_rx_offset - Return expected offset into page to access data
* @rx_ring: Ring we are requesting offset of
*
* Returns the offset value for ring into the data buffer.
*/
-static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
{
- return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+ return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
}
/**
- * i40e_alloc_mapped_page - recycle or make a new page
+ * iavf_alloc_mapped_page - recycle or make a new page
* @rx_ring: ring to use
* @bi: rx_buffer struct to modify
*
* Returns true if the page was successfully allocated or
* reused.
**/
-static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *bi)
+static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
@@ -821,7 +821,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/* alloc new page for storage */
- page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
+ page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++;
return false;
@@ -829,22 +829,22 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
/* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
- i40e_rx_pg_size(rx_ring),
+ iavf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
- I40E_RX_DMA_ATTR);
+ IAVF_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, i40e_rx_pg_order(rx_ring));
+ __free_pages(page, iavf_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
- bi->page_offset = i40e_rx_offset(rx_ring);
+ bi->page_offset = iavf_rx_offset(rx_ring);
/* initialize pagecnt_bias to 1 representing we fully own page */
bi->pagecnt_bias = 1;
@@ -853,15 +853,15 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
+ * iavf_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play
* @skb: packet to send up
* @vlan_tag: vlan tag for packet
**/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
+static void iavf_receive_skb(struct iavf_ring *rx_ring,
struct sk_buff *skb, u16 vlan_tag)
{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
+ struct iavf_q_vector *q_vector = rx_ring->q_vector;
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
@@ -871,27 +871,27 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
}
/**
- * i40evf_alloc_rx_buffers - Replace used receive buffers
+ * iavf_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
* Returns false if all allocations were successful, true if any fail
**/
-bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
{
u16 ntu = rx_ring->next_to_use;
- union i40e_rx_desc *rx_desc;
- struct i40e_rx_buffer *bi;
+ union iavf_rx_desc *rx_desc;
+ struct iavf_rx_buffer *bi;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
- rx_desc = I40E_RX_DESC(rx_ring, ntu);
+ rx_desc = IAVF_RX_DESC(rx_ring, ntu);
bi = &rx_ring->rx_bi[ntu];
do {
- if (!i40e_alloc_mapped_page(rx_ring, bi))
+ if (!iavf_alloc_mapped_page(rx_ring, bi))
goto no_buffers;
/* sync the buffer for use by the device */
@@ -909,7 +909,7 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
bi++;
ntu++;
if (unlikely(ntu == rx_ring->count)) {
- rx_desc = I40E_RX_DESC(rx_ring, 0);
+ rx_desc = IAVF_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_bi;
ntu = 0;
}
@@ -921,13 +921,13 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
} while (cleaned_count);
if (rx_ring->next_to_use != ntu)
- i40e_release_rx_desc(rx_ring, ntu);
+ iavf_release_rx_desc(rx_ring, ntu);
return false;
no_buffers:
if (rx_ring->next_to_use != ntu)
- i40e_release_rx_desc(rx_ring, ntu);
+ iavf_release_rx_desc(rx_ring, ntu);
/* make sure to come back via polling to try again after
* allocation failure
@@ -936,27 +936,27 @@ no_buffers:
}
/**
- * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
**/
-static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
struct sk_buff *skb,
- union i40e_rx_desc *rx_desc)
+ union iavf_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded;
+ struct iavf_rx_ptype_decoded decoded;
u32 rx_error, rx_status;
bool ipv4, ipv6;
u8 ptype;
u64 qword;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
+ rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
+ IAVF_RXD_QW1_ERROR_SHIFT;
+ rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
+ IAVF_RXD_QW1_STATUS_SHIFT;
decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
@@ -968,45 +968,45 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
return;
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
/* both known and outer_ip must be set for the below code to work */
if (!(decoded.known && decoded.outer_ip))
return;
- ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
+ ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
+ ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
if (ipv4 &&
- (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
- BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
+ BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
if (ipv6 &&
- rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
/* don't increment checksum err here, non-fatal err */
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
+ if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
+ if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
return;
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
- case I40E_RX_PTYPE_INNER_PROT_TCP:
- case I40E_RX_PTYPE_INNER_PROT_UDP:
- case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ case IAVF_RX_PTYPE_INNER_PROT_TCP:
+ case IAVF_RX_PTYPE_INNER_PROT_UDP:
+ case IAVF_RX_PTYPE_INNER_PROT_SCTP:
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* fall though */
default:
@@ -1020,56 +1020,56 @@ checksum_fail:
}
/**
- * i40e_ptype_to_htype - get a hash type
+ * iavf_ptype_to_htype - get a hash type
* @ptype: the ptype value from the descriptor
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline int i40e_ptype_to_htype(u8 ptype)
+static inline int iavf_ptype_to_htype(u8 ptype)
{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+ struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
if (!decoded.known)
return PKT_HASH_TYPE_NONE;
- if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+ if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
return PKT_HASH_TYPE_L4;
- else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+ else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
return PKT_HASH_TYPE_L3;
else
return PKT_HASH_TYPE_L2;
}
/**
- * i40e_rx_hash - set the hash value in the skb
+ * iavf_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
* @skb: skb currently being received and modified
* @rx_ptype: Rx packet type
**/
-static inline void i40e_rx_hash(struct i40e_ring *ring,
- union i40e_rx_desc *rx_desc,
+static inline void iavf_rx_hash(struct iavf_ring *ring,
+ union iavf_rx_desc *rx_desc,
struct sk_buff *skb,
u8 rx_ptype)
{
u32 hash;
const __le64 rss_mask =
- cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+ cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
+ IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
if (ring->netdev->features & NETIF_F_RXHASH)
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+ skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
}
}
/**
- * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
+ * iavf_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
@@ -1080,13 +1080,13 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* other fields within the skb.
**/
static inline
-void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype)
+void iavf_process_skb_fields(struct iavf_ring *rx_ring,
+ union iavf_rx_desc *rx_desc, struct sk_buff *skb,
+ u8 rx_ptype)
{
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
+ iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -1095,7 +1095,7 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
}
/**
- * i40e_cleanup_headers - Correct empty headers
+ * iavf_cleanup_headers - Correct empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being fixed
*
@@ -1107,7 +1107,7 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
*
* Returns true if an error was encountered and skb was freed.
**/
-static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
{
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
@@ -1117,16 +1117,16 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
}
/**
- * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * iavf_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
* @old_buff: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
**/
-static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *old_buff)
+static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *old_buff)
{
- struct i40e_rx_buffer *new_buff;
+ struct iavf_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;
new_buff = &rx_ring->rx_bi[nta];
@@ -1143,20 +1143,20 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_page_is_reusable - check if any reuse is possible
+ * iavf_page_is_reusable - check if any reuse is possible
* @page: page struct to check
*
* A page is not reusable if it was allocated under low memory
* conditions, or it's not in the same NUMA node as this CPU.
*/
-static inline bool i40e_page_is_reusable(struct page *page)
+static inline bool iavf_page_is_reusable(struct page *page)
{
return (page_to_nid(page) == numa_mem_id()) &&
!page_is_pfmemalloc(page);
}
/**
- * i40e_can_reuse_rx_page - Determine if this page can be reused by
+ * iavf_can_reuse_rx_page - Determine if this page can be reused by
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
@@ -1182,13 +1182,13 @@ static inline bool i40e_page_is_reusable(struct page *page)
*
* In either case, if the page is reusable its refcount is increased.
**/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
+static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
/* Is any reuse possible? */
- if (unlikely(!i40e_page_is_reusable(page)))
+ if (unlikely(!iavf_page_is_reusable(page)))
return false;
#if (PAGE_SIZE < 8192)
@@ -1196,9 +1196,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
#else
-#define I40E_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
- if (rx_buffer->page_offset > I40E_LAST_OFFSET)
+#define IAVF_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
+ if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
return false;
#endif
@@ -1215,7 +1215,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
}
/**
- * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
* @skb: sk_buff to place the data into
@@ -1226,15 +1226,15 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
*
* The function will then update the page offset.
**/
-static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
+static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *rx_buffer,
struct sk_buff *skb,
unsigned int size)
{
#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
+ unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
#endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
@@ -1249,17 +1249,17 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
}
/**
- * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
+ * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
-static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
+static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
const unsigned int size)
{
- struct i40e_rx_buffer *rx_buffer;
+ struct iavf_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
@@ -1278,7 +1278,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
}
/**
- * i40e_construct_skb - Allocate skb and populate it
+ * iavf_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
* @size: size of buffer to add to skb
@@ -1287,13 +1287,13 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
* data from the current receive descriptor, taking care to set up the
* skb correctly.
*/
-static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
+static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *rx_buffer,
unsigned int size)
{
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
@@ -1308,15 +1308,15 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- I40E_RX_HDR_SIZE,
+ IAVF_RX_HDR_SIZE,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
/* Determine available headroom for copy */
headlen = size;
- if (headlen > I40E_RX_HDR_SIZE)
- headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+ if (headlen > IAVF_RX_HDR_SIZE)
+ headlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
@@ -1343,7 +1343,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
}
/**
- * i40e_build_skb - Build skb around an existing buffer
+ * iavf_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buffer: Rx buffer to pull data from
* @size: size of buffer to add to skb
@@ -1351,16 +1351,16 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
-static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
+static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *rx_buffer,
unsigned int size)
{
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+ unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(I40E_SKB_PAD + size);
+ SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
#endif
struct sk_buff *skb;
@@ -1370,12 +1370,12 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
prefetch(va + L1_CACHE_BYTES);
#endif
/* build an skb around the page buffer */
- skb = build_skb(va - I40E_SKB_PAD, truesize);
+ skb = build_skb(va - IAVF_SKB_PAD, truesize);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, I40E_SKB_PAD);
+ skb_reserve(skb, IAVF_SKB_PAD);
__skb_put(skb, size);
/* buffer is used by skb, update page_offset */
@@ -1389,25 +1389,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
}
/**
- * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
+ * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
*
* This function will clean up the contents of the rx_buffer. It will
* either recycle the buffer or unmap it and free the associated resources.
*/
-static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer)
+static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *rx_buffer)
{
- if (i40e_can_reuse_rx_page(rx_buffer)) {
+ if (iavf_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
- i40e_reuse_rx_page(rx_ring, rx_buffer);
+ iavf_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
- i40e_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
+ iavf_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
}
@@ -1417,7 +1417,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
}
/**
- * i40e_is_non_eop - process handling of non-EOP buffers
+ * iavf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress
@@ -1427,8 +1427,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
* sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer.
**/
-static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
+static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
+ union iavf_rx_desc *rx_desc,
struct sk_buff *skb)
{
u32 ntc = rx_ring->next_to_clean + 1;
@@ -1437,11 +1437,11 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
ntc = (ntc < rx_ring->count) ? ntc : 0;
rx_ring->next_to_clean = ntc;
- prefetch(I40E_RX_DESC(rx_ring, ntc));
+ prefetch(IAVF_RX_DESC(rx_ring, ntc));
/* if we are the last buffer then there is nothing else to do */
-#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
- if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
+ if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
return false;
rx_ring->rx_stats.non_eop_descs++;
@@ -1450,7 +1450,7 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
}
/**
- * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
*
@@ -1461,29 +1461,29 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
*
* Returns amount of work completed
**/
-static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
bool failure = false;
while (likely(total_rx_packets < (unsigned int)budget)) {
- struct i40e_rx_buffer *rx_buffer;
- union i40e_rx_desc *rx_desc;
+ struct iavf_rx_buffer *rx_buffer;
+ union iavf_rx_desc *rx_desc;
unsigned int size;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
/* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
failure = failure ||
- i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+ iavf_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
@@ -1498,21 +1498,21 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
dma_rmb();
- size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+ IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
if (!size)
break;
- i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
- rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+ iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
+ rx_buffer = iavf_get_rx_buffer(rx_ring, size);
/* retrieve a buffer from the ring */
if (skb)
- i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = i40e_build_skb(rx_ring, rx_buffer, size);
+ skb = iavf_build_skb(rx_ring, rx_buffer, size);
else
- skb = i40e_construct_skb(rx_ring, rx_buffer, size);
+ skb = iavf_construct_skb(rx_ring, rx_buffer, size);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1521,24 +1521,24 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
break;
}
- i40e_put_rx_buffer(rx_ring, rx_buffer);
+ iavf_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++;
- if (i40e_is_non_eop(rx_ring, rx_desc, skb))
+ if (iavf_is_non_eop(rx_ring, rx_desc, skb))
continue;
/* ERR_MASK will only have valid bits if EOP set, and
* what we are doing here is actually checking
- * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
* the error field
*/
- if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
+ if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
skb = NULL;
continue;
}
- if (i40e_cleanup_headers(rx_ring, skb)) {
+ if (iavf_cleanup_headers(rx_ring, skb)) {
skb = NULL;
continue;
}
@@ -1547,18 +1547,18 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
total_rx_bytes += skb->len;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
+ IAVF_RXD_QW1_PTYPE_SHIFT;
/* populate checksum, VLAN, and protocol */
- i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+ iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
- vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+ vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
- i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
- i40e_receive_skb(rx_ring, skb, vlan_tag);
+ iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
+ iavf_receive_skb(rx_ring, skb, vlan_tag);
skb = NULL;
/* update budget accounting */
@@ -1578,7 +1578,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
return failure ? budget : (int)total_rx_packets;
}
-static inline u32 i40e_buildreg_itr(const int type, u16 itr)
+static inline u32 iavf_buildreg_itr(const int type, u16 itr)
{
u32 val;
@@ -1597,17 +1597,17 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
* only need to shift by the interval shift - 1 instead of the
* full value.
*/
- itr &= I40E_ITR_MASK;
+ itr &= IAVF_ITR_MASK;
- val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
- (itr << (I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
+ val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
+ (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+ (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
return val;
}
/* a small macro to shorten up some long lines */
-#define INTREG I40E_VFINT_DYN_CTLN1
+#define INTREG IAVF_VFINT_DYN_CTLN1
/* The act of updating the ITR will cause it to immediately trigger. In order
* to prevent this from throwing off adaptive update statistics we defer the
@@ -1619,20 +1619,20 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
#define ITR_COUNTDOWN_START 3
/**
- * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt
* @vsi: the VSI we care about
* @q_vector: q_vector for which itr is being updated and interrupt enabled
*
**/
-static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
- struct i40e_q_vector *q_vector)
+static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
+ struct iavf_q_vector *q_vector)
{
- struct i40e_hw *hw = &vsi->back->hw;
+ struct iavf_hw *hw = &vsi->back->hw;
u32 intval;
/* These will do nothing if dynamic updates are not enabled */
- i40e_update_itr(q_vector, &q_vector->tx);
- i40e_update_itr(q_vector, &q_vector->rx);
+ iavf_update_itr(q_vector, &q_vector->tx);
+ iavf_update_itr(q_vector, &q_vector->rx);
/* This block of logic allows us to get away with only updating
* one ITR value with each interrupt. The idea is to perform a
@@ -1644,7 +1644,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
/* Rx ITR needs to be reduced, this is highest priority */
- intval = i40e_buildreg_itr(I40E_RX_ITR,
+ intval = iavf_buildreg_itr(IAVF_RX_ITR,
q_vector->rx.target_itr);
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
@@ -1654,29 +1654,29 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
/* Tx ITR needs to be reduced, this is second priority
* Tx ITR needs to be increased more than Rx, fourth priority
*/
- intval = i40e_buildreg_itr(I40E_TX_ITR,
+ intval = iavf_buildreg_itr(IAVF_TX_ITR,
q_vector->tx.target_itr);
q_vector->tx.current_itr = q_vector->tx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
/* Rx ITR needs to be increased, third priority */
- intval = i40e_buildreg_itr(I40E_RX_ITR,
+ intval = iavf_buildreg_itr(IAVF_RX_ITR,
q_vector->rx.target_itr);
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else {
/* No ITR update, lowest priority */
- intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
}
- if (!test_bit(__I40E_VSI_DOWN, vsi->state))
+ if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
wr32(hw, INTREG(q_vector->reg_idx), intval);
}
/**
- * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
*
@@ -1684,18 +1684,18 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*
* Returns the amount of work done
**/
-int i40evf_napi_poll(struct napi_struct *napi, int budget)
+int iavf_napi_poll(struct napi_struct *napi, int budget)
{
- struct i40e_q_vector *q_vector =
- container_of(napi, struct i40e_q_vector, napi);
- struct i40e_vsi *vsi = q_vector->vsi;
- struct i40e_ring *ring;
+ struct iavf_q_vector *q_vector =
+ container_of(napi, struct iavf_q_vector, napi);
+ struct iavf_vsi *vsi = q_vector->vsi;
+ struct iavf_ring *ring;
bool clean_complete = true;
bool arm_wb = false;
int budget_per_ring;
int work_done = 0;
- if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
+ if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
napi_complete(napi);
return 0;
}
@@ -1703,8 +1703,8 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- i40e_for_each_ring(ring, q_vector->tx) {
- if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+ iavf_for_each_ring(ring, q_vector->tx) {
+ if (!iavf_clean_tx_irq(vsi, ring, budget)) {
clean_complete = false;
continue;
}
@@ -1721,8 +1721,8 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
*/
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
- i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
+ iavf_for_each_ring(ring, q_vector->rx) {
+ int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
@@ -1746,7 +1746,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
napi_complete_done(napi, work_done);
/* Force an interrupt */
- i40evf_force_wb(vsi, q_vector);
+ iavf_force_wb(vsi, q_vector);
/* Return budget-1 so that polling stops */
return budget - 1;
@@ -1754,24 +1754,24 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
tx_only:
if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
- i40e_enable_wb_on_itr(vsi, q_vector);
+ iavf_enable_wb_on_itr(vsi, q_vector);
}
return budget;
}
- if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done);
- i40e_update_enable_itr(vsi, q_vector);
+ iavf_update_enable_itr(vsi, q_vector);
return min(work_done, budget - 1);
}
/**
- * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer
* @tx_ring: ring to send buffer on
* @flags: the tx flags to be set
@@ -1782,9 +1782,9 @@ tx_only:
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
**/
-static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring,
- u32 *flags)
+static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct iavf_ring *tx_ring,
+ u32 *flags)
{
__be16 protocol = skb->protocol;
u32 tx_flags = 0;
@@ -1804,8 +1804,8 @@ static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
/* if we have a HW VLAN tag being added, default to the HW one */
if (skb_vlan_tag_present(skb)) {
- tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
- tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
@@ -1815,8 +1815,8 @@ static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
return -EINVAL;
protocol = vhdr->h_vlan_encapsulated_proto;
- tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
- tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
}
out:
@@ -1825,14 +1825,14 @@ out:
}
/**
- * i40e_tso - set up the tso context descriptor
+ * iavf_tso - set up the tso context descriptor
* @first: pointer to first Tx buffer for xmit
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
+static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
u64 *cd_type_cmd_tso_mss)
{
struct sk_buff *skb = first->skb;
@@ -1923,17 +1923,17 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* find the field values */
- cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_cmd = IAVF_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
cd_mss = gso_size;
- *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
- (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
- (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
+ (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
return 1;
}
/**
- * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * iavf_tx_enable_csum - Enable Tx checksum offloads
* @skb: send buffer
* @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
@@ -1941,9 +1941,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
* @tx_ring: Tx descriptor ring
* @cd_tunneling: ptr to context desc bits
**/
-static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
+static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
u32 *td_cmd, u32 *td_offset,
- struct i40e_ring *tx_ring,
+ struct iavf_ring *tx_ring,
u32 *cd_tunneling)
{
union {
@@ -1968,19 +1968,19 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
l4.hdr = skb_transport_header(skb);
/* compute outer L2 header size */
- offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+ offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
if (skb->encapsulation) {
u32 tunnel = 0;
/* define outer network header type */
- if (*tx_flags & I40E_TX_FLAGS_IPV4) {
- tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
- I40E_TX_CTX_EXT_IP_IPV4 :
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
+ tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
+ IAVF_TX_CTX_EXT_IP_IPV4 :
+ IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol;
- } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
- tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
+ } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
+ tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
@@ -1992,20 +1992,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
- tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
- *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+ tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
+ *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
break;
case IPPROTO_GRE:
- tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
- *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+ tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
+ *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
break;
case IPPROTO_IPIP:
case IPPROTO_IPV6:
- *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+ *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
l4.hdr = skb_inner_network_header(skb);
break;
default:
- if (*tx_flags & I40E_TX_FLAGS_TSO)
+ if (*tx_flags & IAVF_TX_FLAGS_TSO)
return -1;
skb_checksum_help(skb);
@@ -2014,20 +2014,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
/* compute outer L3 header size */
tunnel |= ((l4.hdr - ip.hdr) / 4) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+ IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
/* switch IP header pointer from outer to inner header */
ip.hdr = skb_inner_network_header(skb);
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
- I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+ IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
/* indicate if we need to offload outer UDP header */
- if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+ if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
- tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+ tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
/* record tunnel offload values */
*cd_tunneling |= tunnel;
@@ -2037,24 +2037,24 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
l4_proto = 0;
/* reset type as we transition from outer to inner headers */
- *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
+ *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
if (ip.v4->version == 4)
- *tx_flags |= I40E_TX_FLAGS_IPV4;
+ *tx_flags |= IAVF_TX_FLAGS_IPV4;
if (ip.v6->version == 6)
- *tx_flags |= I40E_TX_FLAGS_IPV6;
+ *tx_flags |= IAVF_TX_FLAGS_IPV6;
}
/* Enable IP checksum offloads */
- if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
l4_proto = ip.v4->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
- cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
- I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
- I40E_TX_DESC_CMD_IIPT_IPV4;
- } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
- cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
+ IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
+ IAVF_TX_DESC_CMD_IIPT_IPV4;
+ } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
+ cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
@@ -2064,29 +2064,29 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
}
/* compute inner L3 header size */
- offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
/* Enable L4 checksum offloads */
switch (l4_proto) {
case IPPROTO_TCP:
/* enable checksum offloads */
- cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
- offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+ offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case IPPROTO_SCTP:
/* enable SCTP checksum offload */
- cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
offset |= (sizeof(struct sctphdr) >> 2) <<
- I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case IPPROTO_UDP:
/* enable UDP checksum offload */
- cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
offset |= (sizeof(struct udphdr) >> 2) <<
- I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
default:
- if (*tx_flags & I40E_TX_FLAGS_TSO)
+ if (*tx_flags & IAVF_TX_FLAGS_TSO)
return -1;
skb_checksum_help(skb);
return 0;
@@ -2099,25 +2099,25 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
}
/**
- * i40e_create_tx_ctx Build the Tx context descriptor
+ * iavf_create_tx_ctx Build the Tx context descriptor
* @tx_ring: ring to create the descriptor on
* @cd_type_cmd_tso_mss: Quad Word 1
* @cd_tunneling: Quad Word 0 - bits 0-31
* @cd_l2tag2: Quad Word 0 - bits 32-63
**/
-static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
const u64 cd_type_cmd_tso_mss,
const u32 cd_tunneling, const u32 cd_l2tag2)
{
- struct i40e_tx_context_desc *context_desc;
+ struct iavf_tx_context_desc *context_desc;
int i = tx_ring->next_to_use;
- if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
+ if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
!cd_tunneling && !cd_l2tag2)
return;
/* grab the next descriptor */
- context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+ context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
@@ -2130,7 +2130,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
}
/**
- * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
+ * __iavf_chk_linearize - Check if there are more than 8 buffers per packet
* @skb: send buffer
*
* Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
@@ -2142,20 +2142,20 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
* the segment payload in the first descriptor, and another 7 for the
* fragments.
**/
-bool __i40evf_chk_linearize(struct sk_buff *skb)
+bool __iavf_chk_linearize(struct sk_buff *skb)
{
const struct skb_frag_struct *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
nr_frags = skb_shinfo(skb)->nr_frags;
- if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
+ if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
return false;
/* We need to walk through the list and validate that each group
* of 6 fragments totals at least gso_size.
*/
- nr_frags -= I40E_MAX_BUFFER_TXD - 2;
+ nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
@@ -2187,17 +2187,17 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
* figure out what the remainder would be in the last
* descriptor associated with the fragment.
*/
- if (stale_size > I40E_MAX_DATA_PER_TXD) {
+ if (stale_size > IAVF_MAX_DATA_PER_TXD) {
int align_pad = -(stale->page_offset) &
- (I40E_MAX_READ_REQ_SIZE - 1);
+ (IAVF_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
stale_size -= align_pad;
do {
- sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
- stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
- } while (stale_size > I40E_MAX_DATA_PER_TXD);
+ sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > IAVF_MAX_DATA_PER_TXD);
}
/* if sum is negative we failed to make sufficient progress */
@@ -2214,20 +2214,20 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
}
/**
- * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
+ * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns -EBUSY if a stop is needed, else 0
**/
-int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */
smp_mb();
/* Check again in a case another CPU has just made room available. */
- if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
@@ -2237,7 +2237,7 @@ int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
- * i40evf_tx_map - Build the Tx descriptor
+ * iavf_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
* @first: first buffer info buffer to use
@@ -2246,34 +2246,34 @@ int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
**/
-static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
+static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
+ struct iavf_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
struct skb_frag_struct *frag;
- struct i40e_tx_buffer *tx_bi;
- struct i40e_tx_desc *tx_desc;
+ struct iavf_tx_buffer *tx_bi;
+ struct iavf_tx_desc *tx_desc;
u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
- if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
- td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
- td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
- I40E_TX_FLAGS_VLAN_SHIFT;
+ if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
+ td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
+ td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
+ IAVF_TX_FLAGS_VLAN_SHIFT;
}
first->tx_flags = tx_flags;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_desc = IAVF_TX_DESC(tx_ring, i);
tx_bi = first;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
- unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+ unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
@@ -2283,10 +2283,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
dma_unmap_addr_set(tx_bi, dma, dma);
/* align size to end of page */
- max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
+ max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
tx_desc->buffer_addr = cpu_to_le64(dma);
- while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
+ while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
max_data, td_tag);
@@ -2295,14 +2295,14 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i++;
if (i == tx_ring->count) {
- tx_desc = I40E_TX_DESC(tx_ring, 0);
+ tx_desc = IAVF_TX_DESC(tx_ring, 0);
i = 0;
}
dma += max_data;
size -= max_data;
- max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+ max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
tx_desc->buffer_addr = cpu_to_le64(dma);
}
@@ -2316,7 +2316,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i++;
if (i == tx_ring->count) {
- tx_desc = I40E_TX_DESC(tx_ring, 0);
+ tx_desc = IAVF_TX_DESC(tx_ring, 0);
i = 0;
}
@@ -2337,10 +2337,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* write last descriptor with RS and EOP bits */
- td_cmd |= I40E_TXD_CMD;
+ td_cmd |= IAVF_TXD_CMD;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag);
@@ -2373,7 +2373,7 @@ dma_error:
/* clear dma mappings for failed tx_bi map */
for (;;) {
tx_bi = &tx_ring->tx_bi[i];
- i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
+ iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
if (tx_bi == first)
break;
if (i == 0)
@@ -2385,18 +2385,18 @@ dma_error:
}
/**
- * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * iavf_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer
* @tx_ring: ring to send buffer on
*
* Returns NETDEV_TX_OK if sent, else an error code
**/
-static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
+static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
+ struct iavf_ring *tx_ring)
{
- u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+ u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
u32 cd_tunneling = 0, cd_l2tag2 = 0;
- struct i40e_tx_buffer *first;
+ struct iavf_tx_buffer *first;
u32 td_offset = 0;
u32 tx_flags = 0;
__be16 protocol;
@@ -2407,25 +2407,25 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* prefetch the data, we'll need it later */
prefetch(skb->data);
- i40e_trace(xmit_frame_ring, skb, tx_ring);
+ iavf_trace(xmit_frame_ring, skb, tx_ring);
- count = i40e_xmit_descriptor_count(skb);
- if (i40e_chk_linearize(skb, count)) {
+ count = iavf_xmit_descriptor_count(skb);
+ if (iavf_chk_linearize(skb, count)) {
if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- count = i40e_txd_use_count(skb->len);
+ count = iavf_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
- /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
- * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD,
* + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor,
* otherwise try next time
*/
- if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
@@ -2437,7 +2437,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
first->gso_segs = 1;
/* prepare the xmit flags */
- if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
/* obtain protocol of skb */
@@ -2445,19 +2445,19 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* setup IPv4/IPv6 offloads */
if (protocol == htons(ETH_P_IP))
- tx_flags |= I40E_TX_FLAGS_IPV4;
+ tx_flags |= IAVF_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6))
- tx_flags |= I40E_TX_FLAGS_IPV6;
+ tx_flags |= IAVF_TX_FLAGS_IPV6;
- tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
else if (tso)
- tx_flags |= I40E_TX_FLAGS_TSO;
+ tx_flags |= IAVF_TX_FLAGS_TSO;
/* Always offload the checksum, since it's in the data descriptor */
- tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
+ tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
if (tso < 0)
goto out_drop;
@@ -2465,44 +2465,44 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
- td_cmd |= I40E_TX_DESC_CMD_ICRC;
+ td_cmd |= IAVF_TX_DESC_CMD_ICRC;
- i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+ iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
- i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
- td_cmd, td_offset);
+ iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
return NETDEV_TX_OK;
out_drop:
- i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
+ iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
dev_kfree_skb_any(first->skb);
first->skb = NULL;
return NETDEV_TX_OK;
}
/**
- * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
* @skb: send buffer
* @netdev: network interface device structure
*
* Returns NETDEV_TX_OK if sent, else an error code
**/
-netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
/* hardware can't handle really short frames, hardware padding works
* beyond this point
*/
- if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
- if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+ if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
+ if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
return NETDEV_TX_OK;
- skb->len = I40E_MIN_TX_LEN;
- skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+ skb->len = IAVF_MIN_TX_LEN;
+ skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
}
- return i40e_xmit_frame_ring(skb, tx_ring);
+ return iavf_xmit_frame_ring(skb, tx_ring);
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 3b5a63b3236e..71e7d090f8db 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#ifndef _I40E_TXRX_H_
-#define _I40E_TXRX_H_
+#ifndef _IAVF_TXRX_H_
+#define _IAVF_TXRX_H_
/* Interrupt Throttling and Rate Limiting Goodies */
-#define I40E_DEFAULT_IRQ_WORK 256
+#define IAVF_DEFAULT_IRQ_WORK 256
/* The datasheet for the X710 and XL710 indicate that the maximum value for
* the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
@@ -13,80 +13,80 @@
* the register value which is divided by 2 lets use the actual values and
* avoid an excessive amount of translation.
*/
-#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
-#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */
-#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */
-#define I40E_ITR_100K 10 /* all values below must be even */
-#define I40E_ITR_50K 20
-#define I40E_ITR_20K 50
-#define I40E_ITR_18K 60
-#define I40E_ITR_8K 122
-#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */
-#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
-#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
-#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
-
-#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
-#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
+#define IAVF_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define IAVF_ITR_MASK 0x1FFE /* mask for ITR register value */
+#define IAVF_MIN_ITR 2 /* reg uses 2 usec resolution */
+#define IAVF_ITR_100K 10 /* all values below must be even */
+#define IAVF_ITR_50K 20
+#define IAVF_ITR_20K 50
+#define IAVF_ITR_18K 60
+#define IAVF_ITR_8K 122
+#define IAVF_MAX_ITR 8160 /* maximum value as per datasheet */
+#define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC)
+#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK)
+#define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC))
+
+#define IAVF_ITR_RX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
+#define IAVF_ITR_TX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
* the value of the rate limit is non-zero
*/
#define INTRL_ENA BIT(6)
-#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
+#define IAVF_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
-#define I40E_INTRL_8K 125 /* 8000 ints/sec */
-#define I40E_INTRL_62K 16 /* 62500 ints/sec */
-#define I40E_INTRL_83K 12 /* 83333 ints/sec */
+#define IAVF_INTRL_8K 125 /* 8000 ints/sec */
+#define IAVF_INTRL_62K 16 /* 62500 ints/sec */
+#define IAVF_INTRL_83K 12 /* 83333 ints/sec */
-#define I40E_QUEUE_END_OF_LIST 0x7FF
+#define IAVF_QUEUE_END_OF_LIST 0x7FF
/* this enum matches hardware bits and is meant to be used by DYN_CTLN
* registers and QINT registers or more generally anywhere in the manual
* mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
* register but instead is a special value meaning "don't update" ITR0/1/2.
*/
-enum i40e_dyn_idx_t {
- I40E_IDX_ITR0 = 0,
- I40E_IDX_ITR1 = 1,
- I40E_IDX_ITR2 = 2,
- I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+enum iavf_dyn_idx_t {
+ IAVF_IDX_ITR0 = 0,
+ IAVF_IDX_ITR1 = 1,
+ IAVF_IDX_ITR2 = 2,
+ IAVF_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
};
/* these are indexes into ITRN registers */
-#define I40E_RX_ITR I40E_IDX_ITR0
-#define I40E_TX_ITR I40E_IDX_ITR1
-#define I40E_PE_ITR I40E_IDX_ITR2
+#define IAVF_RX_ITR IAVF_IDX_ITR0
+#define IAVF_TX_ITR IAVF_IDX_ITR1
+#define IAVF_PE_ITR IAVF_IDX_ITR2
/* Supported RSS offloads */
-#define I40E_DEFAULT_RSS_HENA ( \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
- BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
-
-#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+#define IAVF_DEFAULT_RSS_HENA ( \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
/* Supported Rx Buffer Sizes (a multiple of 128) */
-#define I40E_RXBUFFER_256 256
-#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
-#define I40E_RXBUFFER_2048 2048
-#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
-#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
+#define IAVF_RXBUFFER_256 256
+#define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
+#define IAVF_RXBUFFER_2048 2048
+#define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
+#define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */
/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
@@ -95,11 +95,11 @@ enum i40e_dyn_idx_t {
* i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
* i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/
-#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
-#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
-#define i40e_rx_desc i40e_32byte_rx_desc
+#define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
+#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+#define iavf_rx_desc iavf_32byte_rx_desc
-#define I40E_RX_DMA_ATTR \
+#define IAVF_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
/* Attempt to maximize the headroom available for incoming frames. We
@@ -113,10 +113,10 @@ enum i40e_dyn_idx_t {
* receive path.
*/
#if (PAGE_SIZE < 8192)
-#define I40E_2K_TOO_SMALL_WITH_PADDING \
-((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
+#define IAVF_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
-static inline int i40e_compute_pad(int rx_buf_len)
+static inline int iavf_compute_pad(int rx_buf_len)
{
int page_size, pad_size;
@@ -126,7 +126,7 @@ static inline int i40e_compute_pad(int rx_buf_len)
return pad_size;
}
-static inline int i40e_skb_pad(void)
+static inline int iavf_skb_pad(void)
{
int rx_buf_len;
@@ -137,25 +137,25 @@ static inline int i40e_skb_pad(void)
* tailroom due to NET_IP_ALIGN possibly shifting us out of
* cache-line alignment.
*/
- if (I40E_2K_TOO_SMALL_WITH_PADDING)
- rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ if (IAVF_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
else
- rx_buf_len = I40E_RXBUFFER_1536;
+ rx_buf_len = IAVF_RXBUFFER_1536;
/* if needed make room for NET_IP_ALIGN */
rx_buf_len -= NET_IP_ALIGN;
- return i40e_compute_pad(rx_buf_len);
+ return iavf_compute_pad(rx_buf_len);
}
-#define I40E_SKB_PAD i40e_skb_pad()
+#define IAVF_SKB_PAD iavf_skb_pad()
#else
-#define I40E_2K_TOO_SMALL_WITH_PADDING false
-#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#define IAVF_2K_TOO_SMALL_WITH_PADDING false
+#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#endif
/**
- * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * iavf_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
* @stat_err_bits: value to mask
*
@@ -164,7 +164,7 @@ static inline int i40e_skb_pad(void)
* The status_error_len doesn't need to be shifted because it begins
* at offset zero.
*/
-static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
const u64 stat_err_bits)
{
return !!(rx_desc->wb.qword1.status_error_len &
@@ -172,8 +172,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
-#define I40E_RX_INCREMENT(r, i) \
+#define IAVF_RX_INCREMENT(r, i) \
do { \
(i)++; \
if ((i) == (r)->count) \
@@ -181,34 +180,34 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
r->next_to_clean = i; \
} while (0)
-#define I40E_RX_NEXT_DESC(r, i, n) \
+#define IAVF_RX_NEXT_DESC(r, i, n) \
do { \
(i)++; \
if ((i) == (r)->count) \
i = 0; \
- (n) = I40E_RX_DESC((r), (i)); \
+ (n) = IAVF_RX_DESC((r), (i)); \
} while (0)
-#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
+#define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n) \
do { \
- I40E_RX_NEXT_DESC((r), (i), (n)); \
+ IAVF_RX_NEXT_DESC((r), (i), (n)); \
prefetch((n)); \
} while (0)
-#define I40E_MAX_BUFFER_TXD 8
-#define I40E_MIN_TX_LEN 17
+#define IAVF_MAX_BUFFER_TXD 8
+#define IAVF_MIN_TX_LEN 17
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
* the nearest 4K which represents our maximum read request size.
*/
-#define I40E_MAX_READ_REQ_SIZE 4096
-#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
-#define I40E_MAX_DATA_PER_TXD_ALIGNED \
- (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+#define IAVF_MAX_READ_REQ_SIZE 4096
+#define IAVF_MAX_DATA_PER_TXD (16 * 1024 - 1)
+#define IAVF_MAX_DATA_PER_TXD_ALIGNED \
+ (IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1))
/**
- * i40e_txd_use_count - estimate the number of descriptors needed for Tx
+ * iavf_txd_use_count - estimate the number of descriptors needed for Tx
* @size: transmit request size in bytes
*
* Due to hardware alignment restrictions (4K alignment), we need to
@@ -235,31 +234,31 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
* operations into:
* return ((size * 85) >> 20) + 1;
*/
-static inline unsigned int i40e_txd_use_count(unsigned int size)
+static inline unsigned int iavf_txd_use_count(unsigned int size)
{
return ((size * 85) >> 20) + 1;
}
/* Tx Descriptors needed, worst case */
#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
-#define I40E_MIN_DESC_PENDING 4
-
-#define I40E_TX_FLAGS_HW_VLAN BIT(1)
-#define I40E_TX_FLAGS_SW_VLAN BIT(2)
-#define I40E_TX_FLAGS_TSO BIT(3)
-#define I40E_TX_FLAGS_IPV4 BIT(4)
-#define I40E_TX_FLAGS_IPV6 BIT(5)
-#define I40E_TX_FLAGS_FCCRC BIT(6)
-#define I40E_TX_FLAGS_FSO BIT(7)
-#define I40E_TX_FLAGS_FD_SB BIT(9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
-#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
-#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
-#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
-#define I40E_TX_FLAGS_VLAN_SHIFT 16
-
-struct i40e_tx_buffer {
- struct i40e_tx_desc *next_to_watch;
+#define IAVF_MIN_DESC_PENDING 4
+
+#define IAVF_TX_FLAGS_HW_VLAN BIT(1)
+#define IAVF_TX_FLAGS_SW_VLAN BIT(2)
+#define IAVF_TX_FLAGS_TSO BIT(3)
+#define IAVF_TX_FLAGS_IPV4 BIT(4)
+#define IAVF_TX_FLAGS_IPV6 BIT(5)
+#define IAVF_TX_FLAGS_FCCRC BIT(6)
+#define IAVF_TX_FLAGS_FSO BIT(7)
+#define IAVF_TX_FLAGS_FD_SB BIT(9)
+#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10)
+#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define IAVF_TX_FLAGS_VLAN_SHIFT 16
+
+struct iavf_tx_buffer {
+ struct iavf_tx_desc *next_to_watch;
union {
struct sk_buff *skb;
void *raw_buf;
@@ -272,7 +271,7 @@ struct i40e_tx_buffer {
u32 tx_flags;
};
-struct i40e_rx_buffer {
+struct iavf_rx_buffer {
dma_addr_t dma;
struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
@@ -283,12 +282,12 @@ struct i40e_rx_buffer {
__u16 pagecnt_bias;
};
-struct i40e_queue_stats {
+struct iavf_queue_stats {
u64 packets;
u64 bytes;
};
-struct i40e_tx_queue_stats {
+struct iavf_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_done_old;
@@ -298,7 +297,7 @@ struct i40e_tx_queue_stats {
u64 tx_lost_interrupt;
};
-struct i40e_rx_queue_stats {
+struct iavf_rx_queue_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buff_failed;
@@ -306,34 +305,34 @@ struct i40e_rx_queue_stats {
u64 realloc_count;
};
-enum i40e_ring_state_t {
- __I40E_TX_FDIR_INIT_DONE,
- __I40E_TX_XPS_INIT_DONE,
- __I40E_RING_STATE_NBITS /* must be last */
+enum iavf_ring_state_t {
+ __IAVF_TX_FDIR_INIT_DONE,
+ __IAVF_TX_XPS_INIT_DONE,
+ __IAVF_RING_STATE_NBITS /* must be last */
};
/* some useful defines for virtchannel interface, which
* is the only remaining user of header split
*/
-#define I40E_RX_DTYPE_NO_SPLIT 0
-#define I40E_RX_DTYPE_HEADER_SPLIT 1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
-#define I40E_RX_SPLIT_L2 0x1
-#define I40E_RX_SPLIT_IP 0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP 0x8
+#define IAVF_RX_DTYPE_NO_SPLIT 0
+#define IAVF_RX_DTYPE_HEADER_SPLIT 1
+#define IAVF_RX_DTYPE_SPLIT_ALWAYS 2
+#define IAVF_RX_SPLIT_L2 0x1
+#define IAVF_RX_SPLIT_IP 0x2
+#define IAVF_RX_SPLIT_TCP_UDP 0x4
+#define IAVF_RX_SPLIT_SCTP 0x8
/* struct that defines a descriptor ring, associated with a VSI */
-struct i40e_ring {
- struct i40e_ring *next; /* pointer to next ring in q_vector */
+struct iavf_ring {
+ struct iavf_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
union {
- struct i40e_tx_buffer *tx_bi;
- struct i40e_rx_buffer *rx_bi;
+ struct iavf_tx_buffer *tx_bi;
+ struct iavf_rx_buffer *rx_bi;
};
- DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
+ DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
u16 queue_index; /* Queue number of ring */
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
@@ -361,59 +360,59 @@ struct i40e_ring {
u8 packet_stride;
u16 flags;
-#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
-#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
+#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
/* stats structs */
- struct i40e_queue_stats stats;
+ struct iavf_queue_stats stats;
struct u64_stats_sync syncp;
union {
- struct i40e_tx_queue_stats tx_stats;
- struct i40e_rx_queue_stats rx_stats;
+ struct iavf_tx_queue_stats tx_stats;
+ struct iavf_rx_queue_stats rx_stats;
};
unsigned int size; /* length of descriptor ring in bytes */
dma_addr_t dma; /* physical address of ring */
- struct i40e_vsi *vsi; /* Backreference to associated VSI */
- struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+ struct iavf_vsi *vsi; /* Backreference to associated VSI */
+ struct iavf_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
- struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must
+ struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must
* return before it sees the EOP for
* the current packet, we save that skb
* here and resume receiving this
* packet the next time
- * i40evf_clean_rx_ring_irq() is called
+ * iavf_clean_rx_ring_irq() is called
* for this ring.
*/
} ____cacheline_internodealigned_in_smp;
-static inline bool ring_uses_build_skb(struct i40e_ring *ring)
+static inline bool ring_uses_build_skb(struct iavf_ring *ring)
{
- return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
+ return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
}
-static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
+static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)
{
- ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+ ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
}
-static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
+static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
{
- ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+ ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
}
-#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002
-#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
-#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
-#define I40E_ITR_ADAPTIVE_LATENCY 0x8000
-#define I40E_ITR_ADAPTIVE_BULK 0x0000
-#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
+#define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002
+#define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002
+#define IAVF_ITR_ADAPTIVE_MAX_USECS 0x007e
+#define IAVF_ITR_ADAPTIVE_LATENCY 0x8000
+#define IAVF_ITR_ADAPTIVE_BULK 0x0000
+#define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY))
-struct i40e_ring_container {
- struct i40e_ring *ring; /* pointer to linked list of ring(s) */
+struct iavf_ring_container {
+ struct iavf_ring *ring; /* pointer to linked list of ring(s) */
unsigned long next_update; /* jiffies value of next update */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
@@ -423,10 +422,10 @@ struct i40e_ring_container {
};
/* iterator for handling rings in ring container */
-#define i40e_for_each_ring(pos, head) \
+#define iavf_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
+static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring->rx_buf_len > (PAGE_SIZE / 2))
@@ -435,25 +434,25 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
return 0;
}
-#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
-
-bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
-netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
-void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
-int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
-int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
-void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
-void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
-int i40evf_napi_poll(struct napi_struct *napi, int budget);
-void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
-u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
-void i40evf_detect_recover_hung(struct i40e_vsi *vsi);
-int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
-bool __i40evf_chk_linearize(struct sk_buff *skb);
+#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
+
+bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
+netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void iavf_clean_tx_ring(struct iavf_ring *tx_ring);
+void iavf_clean_rx_ring(struct iavf_ring *rx_ring);
+int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
+int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
+void iavf_free_tx_resources(struct iavf_ring *tx_ring);
+void iavf_free_rx_resources(struct iavf_ring *rx_ring);
+int iavf_napi_poll(struct napi_struct *napi, int budget);
+void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector);
+u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw);
+void iavf_detect_recover_hung(struct iavf_vsi *vsi);
+int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
+bool __iavf_chk_linearize(struct sk_buff *skb);
/**
- * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
+ * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
*
@@ -461,14 +460,14 @@ bool __i40evf_chk_linearize(struct sk_buff *skb);
* there is not enough descriptors available in this ring since we need at least
* one descriptor.
**/
-static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
+static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
{
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
int count = 0, size = skb_headlen(skb);
for (;;) {
- count += i40e_txd_use_count(size);
+ count += iavf_txd_use_count(size);
if (!nr_frags--)
break;
@@ -480,21 +479,21 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
}
/**
- * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
+ * iavf_maybe_stop_tx - 1st level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
**/
-static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
{
- if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ if (likely(IAVF_DESC_UNUSED(tx_ring) >= size))
return 0;
- return __i40evf_maybe_stop_tx(tx_ring, size);
+ return __iavf_maybe_stop_tx(tx_ring, size);
}
/**
- * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * iavf_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer
* @count: number of buffers used
*
@@ -502,23 +501,23 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb.
**/
-static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
+static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
{
/* Both TSO and single send will work if count is less than 8 */
- if (likely(count < I40E_MAX_BUFFER_TXD))
+ if (likely(count < IAVF_MAX_BUFFER_TXD))
return false;
if (skb_is_gso(skb))
- return __i40evf_chk_linearize(skb);
+ return __iavf_chk_linearize(skb);
/* we can support up to 8 data buffers for a single send */
- return count != I40E_MAX_BUFFER_TXD;
+ return count != IAVF_MAX_BUFFER_TXD;
}
/**
* @ring: Tx ring to find the netdev equivalent of
**/
-static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}
-#endif /* _I40E_TXRX_H_ */
+#endif /* _IAVF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
new file mode 100644
index 000000000000..ca89583613fb
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -0,0 +1,688 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _IAVF_TYPE_H_
+#define _IAVF_TYPE_H_
+
+#include "iavf_status.h"
+#include "iavf_osdep.h"
+#include "iavf_register.h"
+#include "i40e_adminq.h"
+#include "iavf_devids.h"
+
+#define IAVF_RXQ_CTX_DBUFF_SHIFT 7
+
+/* IAVF_MASK is a macro used on 32 bit registers */
+#define IAVF_MASK(mask, shift) ((u32)(mask) << (shift))
+
+#define IAVF_MAX_VSI_QP 16
+#define IAVF_MAX_VF_VSI 3
+#define IAVF_MAX_CHAINED_RX_BUFFERS 5
+
+/* forward declaration */
+struct iavf_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct iavf_hw *, struct i40e_aq_desc *);
+
+/* Data type manipulation macros. */
+
+#define IAVF_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define IAVF_QTX_CTL_VF_QUEUE 0x0
+#define IAVF_QTX_CTL_VM_QUEUE 0x1
+#define IAVF_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum iavf_debug_mask {
+ IAVF_DEBUG_INIT = 0x00000001,
+ IAVF_DEBUG_RELEASE = 0x00000002,
+
+ IAVF_DEBUG_LINK = 0x00000010,
+ IAVF_DEBUG_PHY = 0x00000020,
+ IAVF_DEBUG_HMC = 0x00000040,
+ IAVF_DEBUG_NVM = 0x00000080,
+ IAVF_DEBUG_LAN = 0x00000100,
+ IAVF_DEBUG_FLOW = 0x00000200,
+ IAVF_DEBUG_DCB = 0x00000400,
+ IAVF_DEBUG_DIAG = 0x00000800,
+ IAVF_DEBUG_FD = 0x00001000,
+ IAVF_DEBUG_PACKAGE = 0x00002000,
+
+ IAVF_DEBUG_AQ_MESSAGE = 0x01000000,
+ IAVF_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ IAVF_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ IAVF_DEBUG_AQ_COMMAND = 0x06000000,
+ IAVF_DEBUG_AQ = 0x0F000000,
+
+ IAVF_DEBUG_USER = 0xF0000000,
+
+ IAVF_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum iavf_mac_type {
+ IAVF_MAC_UNKNOWN = 0,
+ IAVF_MAC_XL710,
+ IAVF_MAC_VF,
+ IAVF_MAC_X722,
+ IAVF_MAC_X722_VF,
+ IAVF_MAC_GENERIC,
+};
+
+enum iavf_vsi_type {
+ IAVF_VSI_MAIN = 0,
+ IAVF_VSI_VMDQ1 = 1,
+ IAVF_VSI_VMDQ2 = 2,
+ IAVF_VSI_CTRL = 3,
+ IAVF_VSI_FCOE = 4,
+ IAVF_VSI_MIRROR = 5,
+ IAVF_VSI_SRIOV = 6,
+ IAVF_VSI_FDIR = 7,
+ IAVF_VSI_TYPE_UNKNOWN
+};
+
+enum iavf_queue_type {
+ IAVF_QUEUE_TYPE_RX = 0,
+ IAVF_QUEUE_TYPE_TX,
+ IAVF_QUEUE_TYPE_PE_CEQ,
+ IAVF_QUEUE_TYPE_UNKNOWN
+};
+
+#define IAVF_HW_CAP_MAX_GPIO 30
+/* Capabilities of a PF or a VF or the whole device */
+struct iavf_hw_capabilities {
+ bool dcb;
+ bool fcoe;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors_vf;
+};
+
+struct iavf_mac_info {
+ enum iavf_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+/* PCI bus types */
+enum iavf_bus_type {
+ iavf_bus_type_unknown = 0,
+ iavf_bus_type_pci,
+ iavf_bus_type_pcix,
+ iavf_bus_type_pci_express,
+ iavf_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum iavf_bus_speed {
+ iavf_bus_speed_unknown = 0,
+ iavf_bus_speed_33 = 33,
+ iavf_bus_speed_66 = 66,
+ iavf_bus_speed_100 = 100,
+ iavf_bus_speed_120 = 120,
+ iavf_bus_speed_133 = 133,
+ iavf_bus_speed_2500 = 2500,
+ iavf_bus_speed_5000 = 5000,
+ iavf_bus_speed_8000 = 8000,
+ iavf_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum iavf_bus_width {
+ iavf_bus_width_unknown = 0,
+ iavf_bus_width_pcie_x1 = 1,
+ iavf_bus_width_pcie_x2 = 2,
+ iavf_bus_width_pcie_x4 = 4,
+ iavf_bus_width_pcie_x8 = 8,
+ iavf_bus_width_32 = 32,
+ iavf_bus_width_64 = 64,
+ iavf_bus_width_reserved
+};
+
+/* Bus parameters */
+struct iavf_bus_info {
+ enum iavf_bus_speed speed;
+ enum iavf_bus_width width;
+ enum iavf_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+ u16 bus_id;
+};
+
+#define IAVF_MAX_USER_PRIORITY 8
+/* Port hardware description */
+struct iavf_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+
+ /* subsystem structs */
+ struct iavf_mac_info mac;
+ struct iavf_bus_info bus;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+
+ /* capabilities for entire device and PCI func */
+ struct iavf_hw_capabilities dev_caps;
+
+ /* Admin Queue info */
+ struct iavf_adminq_info aq;
+
+ /* debug mask */
+ u32 debug_mask;
+ char err_str[16];
+};
+
+struct iavf_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union iavf_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union iavf_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+enum iavf_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_STATUS_DD_SHIFT = 0,
+ IAVF_RX_DESC_STATUS_EOF_SHIFT = 1,
+ IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ /* Note: Bit 8 is reserved in X710 and XL710 */
+ IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
+ IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_FLM_SHIFT = 11,
+ IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ /* Note: For non-tunnel packets INT_UDP_0 is the right status for
+ * UDP header
+ */
+ IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
+ IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define IAVF_RXD_QW1_STATUS_SHIFT 0
+#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \
+ << IAVF_RXD_QW1_STATUS_SHIFT)
+
+#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define IAVF_RXD_QW1_STATUS_TSYNVALID_MASK \
+ BIT_ULL(IAVF_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+enum iavf_rx_desc_fltstat_values {
+ IAVF_RX_DESC_FLTSTAT_NO_DATA = 0,
+ IAVF_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ IAVF_RX_DESC_FLTSTAT_RSV = 2,
+ IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define IAVF_RXD_QW1_ERROR_SHIFT 19
+#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)
+
+enum iavf_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_ERROR_RXE_SHIFT = 0,
+ IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ IAVF_RX_DESC_ERROR_HBO_SHIFT = 2,
+ IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ IAVF_RX_DESC_ERROR_IPE_SHIFT = 3,
+ IAVF_RX_DESC_ERROR_L4E_SHIFT = 4,
+ IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum iavf_rx_desc_error_l3l4e_fcoe_masks {
+ IAVF_RX_DESC_ERROR_L3L4E_NONE = 0,
+ IAVF_RX_DESC_ERROR_L3L4E_PROT = 1,
+ IAVF_RX_DESC_ERROR_L3L4E_FC = 2,
+ IAVF_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define IAVF_RXD_QW1_PTYPE_SHIFT 30
+#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum iavf_rx_l2_ptype {
+ IAVF_RX_PTYPE_L2_RESERVED = 0,
+ IAVF_RX_PTYPE_L2_MAC_PAY2 = 1,
+ IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ IAVF_RX_PTYPE_L2_FIP_PAY2 = 3,
+ IAVF_RX_PTYPE_L2_OUI_PAY2 = 4,
+ IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ IAVF_RX_PTYPE_L2_ECP_PAY2 = 7,
+ IAVF_RX_PTYPE_L2_EVB_PAY2 = 8,
+ IAVF_RX_PTYPE_L2_QCN_PAY2 = 9,
+ IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ IAVF_RX_PTYPE_L2_ARP = 11,
+ IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct iavf_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum iavf_rx_ptype_outer_ip {
+ IAVF_RX_PTYPE_OUTER_L2 = 0,
+ IAVF_RX_PTYPE_OUTER_IP = 1
+};
+
+enum iavf_rx_ptype_outer_ip_ver {
+ IAVF_RX_PTYPE_OUTER_NONE = 0,
+ IAVF_RX_PTYPE_OUTER_IPV4 = 0,
+ IAVF_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum iavf_rx_ptype_outer_fragmented {
+ IAVF_RX_PTYPE_NOT_FRAG = 0,
+ IAVF_RX_PTYPE_FRAG = 1
+};
+
+enum iavf_rx_ptype_tunnel_type {
+ IAVF_RX_PTYPE_TUNNEL_NONE = 0,
+ IAVF_RX_PTYPE_TUNNEL_IP_IP = 1,
+ IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum iavf_rx_ptype_tunnel_end_prot {
+ IAVF_RX_PTYPE_TUNNEL_END_NONE = 0,
+ IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum iavf_rx_ptype_inner_prot {
+ IAVF_RX_PTYPE_INNER_PROT_NONE = 0,
+ IAVF_RX_PTYPE_INNER_PROT_UDP = 1,
+ IAVF_RX_PTYPE_INNER_PROT_TCP = 2,
+ IAVF_RX_PTYPE_INNER_PROT_SCTP = 3,
+ IAVF_RX_PTYPE_INNER_PROT_ICMP = 4,
+ IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum iavf_rx_ptype_payload_layer {
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define IAVF_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define IAVF_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(IAVF_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum iavf_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ IAVF_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ IAVF_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ IAVF_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ IAVF_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+enum iavf_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ IAVF_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ IAVF_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ IAVF_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ IAVF_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ IAVF_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ IAVF_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ IAVF_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ IAVF_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define IAVF_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define IAVF_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ IAVF_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ IAVF_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum iavf_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ IAVF_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum iavf_rx_prog_status_desc_prog_id_masks {
+ IAVF_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum iavf_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ IAVF_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ IAVF_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+/* TX Descriptor */
+struct iavf_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define IAVF_TXD_QW1_DTYPE_SHIFT 0
+#define IAVF_TXD_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+enum iavf_tx_desc_dtype_value {
+ IAVF_TX_DESC_DTYPE_DATA = 0x0,
+ IAVF_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ IAVF_TX_DESC_DTYPE_CONTEXT = 0x1,
+ IAVF_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ IAVF_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ IAVF_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ IAVF_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ IAVF_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ IAVF_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ IAVF_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define IAVF_TXD_QW1_CMD_SHIFT 4
+#define IAVF_TXD_QW1_CMD_MASK (0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT)
+
+enum iavf_tx_desc_cmd_bits {
+ IAVF_TX_DESC_CMD_EOP = 0x0001,
+ IAVF_TX_DESC_CMD_RS = 0x0002,
+ IAVF_TX_DESC_CMD_ICRC = 0x0004,
+ IAVF_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ IAVF_TX_DESC_CMD_DUMMY = 0x0010,
+ IAVF_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ IAVF_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ IAVF_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ IAVF_TX_DESC_CMD_FCOET = 0x0080,
+ IAVF_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ IAVF_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define IAVF_TXD_QW1_OFFSET_SHIFT 16
+#define IAVF_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ IAVF_TXD_QW1_OFFSET_SHIFT)
+
+enum iavf_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ IAVF_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ IAVF_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define IAVF_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define IAVF_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_QW1_L2TAG1_SHIFT 48
+#define IAVF_TXD_QW1_L2TAG1_MASK (0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct iavf_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define IAVF_TXD_CTX_QW1_CMD_SHIFT 4
+#define IAVF_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT)
+
+enum iavf_tx_ctx_desc_cmd_bits {
+ IAVF_TX_CTX_DESC_TSO = 0x01,
+ IAVF_TX_CTX_DESC_TSYN = 0x02,
+ IAVF_TX_CTX_DESC_IL2TAG2 = 0x04,
+ IAVF_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ IAVF_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ IAVF_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ IAVF_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ IAVF_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ IAVF_TX_CTX_DESC_SWPE = 0x40
+};
+
+/* Packet Classifier Types for filters */
+enum iavf_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ IAVF_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ IAVF_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ IAVF_FILTER_PCTYPE_FCOE_OX = 48,
+ IAVF_FILTER_PCTYPE_FCOE_RX = 49,
+ IAVF_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+#define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_MSS_SHIFT 50
+#define IAVF_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ IAVF_TXD_CTX_QW1_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_VSI_SHIFT 50
+#define IAVF_TXD_CTX_QW1_VSI_MASK (0x1FFULL << IAVF_TXD_CTX_QW1_VSI_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define IAVF_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ IAVF_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum iavf_tx_ctx_desc_eipt_offload {
+ IAVF_TX_CTX_EXT_IP_NONE = 0x0,
+ IAVF_TX_CTX_EXT_IP_IPV6 = 0x1,
+ IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ IAVF_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define IAVF_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_NATT_SHIFT 9
+#define IAVF_TXD_CTX_QW0_NATT_MASK (0x3ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT)
+
+#define IAVF_TXD_CTX_UDP_TUNNELING BIT_ULL(IAVF_TXD_CTX_QW0_NATT_SHIFT)
+#define IAVF_TXD_CTX_GRE_TUNNELING (0x2ULL << IAVF_TXD_CTX_QW0_NATT_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define IAVF_TXD_CTX_QW0_EIP_NOINC_MASK \
+ BIT_ULL(IAVF_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define IAVF_TXD_CTX_EIP_NOINC_IPID_CONST IAVF_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define IAVF_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define IAVF_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ IAVF_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define IAVF_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ IAVF_TXD_CTX_QW0_DECTTL_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define IAVF_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT)
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct iavf_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+#endif /* _IAVF_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 565677de5ba3..e64751da0921 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1,16 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#include "i40evf.h"
-#include "i40e_prototype.h"
-#include "i40evf_client.h"
+#include "iavf.h"
+#include "iavf_prototype.h"
+#include "iavf_client.h"
/* busy wait delay in msec */
-#define I40EVF_BUSY_WAIT_DELAY 10
-#define I40EVF_BUSY_WAIT_COUNT 50
+#define IAVF_BUSY_WAIT_DELAY 10
+#define IAVF_BUSY_WAIT_COUNT 50
/**
- * i40evf_send_pf_msg
+ * iavf_send_pf_msg
* @adapter: adapter structure
* @op: virtual channel opcode
* @msg: pointer to message buffer
@@ -18,44 +18,44 @@
*
* Send message to PF and print status if failure.
**/
-static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
- enum virtchnl_ops op, u8 *msg, u16 len)
+static int iavf_send_pf_msg(struct iavf_adapter *adapter,
+ enum virtchnl_ops op, u8 *msg, u16 len)
{
- struct i40e_hw *hw = &adapter->hw;
- i40e_status err;
+ struct iavf_hw *hw = &adapter->hw;
+ iavf_status err;
- if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+ if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
return 0; /* nothing to see here, move along */
- err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+ err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
if (err)
dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
- op, i40evf_stat_str(hw, err),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
+ op, iavf_stat_str(hw, err),
+ iavf_aq_str(hw, hw->aq.asq_last_status));
return err;
}
/**
- * i40evf_send_api_ver
+ * iavf_send_api_ver
* @adapter: adapter structure
*
* Send API version admin queue message to the PF. The reply is not checked
* in this function. Returns 0 if the message was successfully
* sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
**/
-int i40evf_send_api_ver(struct i40evf_adapter *adapter)
+int iavf_send_api_ver(struct iavf_adapter *adapter)
{
struct virtchnl_version_info vvi;
vvi.major = VIRTCHNL_VERSION_MAJOR;
vvi.minor = VIRTCHNL_VERSION_MINOR;
- return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
- sizeof(vvi));
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
+ sizeof(vvi));
}
/**
- * i40evf_verify_api_ver
+ * iavf_verify_api_ver
* @adapter: adapter structure
*
* Compare API versions with the PF. Must be called after admin queue is
@@ -63,15 +63,15 @@ int i40evf_send_api_ver(struct i40evf_adapter *adapter)
* I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
* from the firmware are propagated.
**/
-int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
+int iavf_verify_api_ver(struct iavf_adapter *adapter)
{
struct virtchnl_version_info *pf_vvi;
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
enum virtchnl_ops op;
- i40e_status err;
+ iavf_status err;
- event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
+ event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf) {
err = -ENOMEM;
@@ -79,8 +79,8 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
}
while (1) {
- err = i40evf_clean_arq_element(hw, &event, NULL);
- /* When the AQ is empty, i40evf_clean_arq_element will return
+ err = iavf_clean_arq_element(hw, &event, NULL);
+ /* When the AQ is empty, iavf_clean_arq_element will return
* nonzero and this loop will terminate.
*/
if (err)
@@ -92,7 +92,7 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
}
- err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ err = (iavf_status)le32_to_cpu(event.desc.cookie_low);
if (err)
goto out_alloc;
@@ -118,14 +118,14 @@ out:
}
/**
- * i40evf_send_vf_config_msg
+ * iavf_send_vf_config_msg
* @adapter: adapter structure
*
* Send VF configuration request admin queue message to the PF. The reply
* is not checked in this function. Returns 0 if the message was
* successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
**/
-int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
+int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
{
u32 caps;
@@ -142,19 +142,43 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_ADQ;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
if (PF_IS_V11(adapter))
- return i40evf_send_pf_msg(adapter,
- VIRTCHNL_OP_GET_VF_RESOURCES,
- (u8 *)&caps, sizeof(caps));
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
+ (u8 *)&caps, sizeof(caps));
else
- return i40evf_send_pf_msg(adapter,
- VIRTCHNL_OP_GET_VF_RESOURCES,
- NULL, 0);
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
+ NULL, 0);
}
/**
- * i40evf_get_vf_config
+ * iavf_validate_num_queues
+ * @adapter: adapter structure
+ *
+ * Validate that the number of queues the PF has sent in
+ * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
+ **/
+static void iavf_validate_num_queues(struct iavf_adapter *adapter)
+{
+ if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) {
+ struct virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
+ adapter->vf_res->num_queue_pairs,
+ IAVF_MAX_REQ_QUEUES);
+ dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
+ IAVF_MAX_REQ_QUEUES);
+ adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
+ for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+ vsi_res = &adapter->vf_res->vsi_res[i];
+ vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
+ }
+ }
+}
+
+/**
+ * iavf_get_vf_config
* @adapter: private adapter structure
*
* Get VF configuration from PF and populate hw structure. Must be called after
@@ -162,16 +186,16 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
* with maximum timeout. Response from PF is returned in the buffer for further
* processing by the caller.
**/
-int i40evf_get_vf_config(struct i40evf_adapter *adapter)
+int iavf_get_vf_config(struct iavf_adapter *adapter)
{
- struct i40e_hw *hw = &adapter->hw;
+ struct iavf_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
enum virtchnl_ops op;
- i40e_status err;
+ iavf_status err;
u16 len;
len = sizeof(struct virtchnl_vf_resource) +
- I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
+ IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
event.buf_len = len;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf) {
@@ -180,10 +204,10 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
}
while (1) {
- /* When the AQ is empty, i40evf_clean_arq_element will return
+ /* When the AQ is empty, iavf_clean_arq_element will return
* nonzero and this loop will terminate.
*/
- err = i40evf_clean_arq_element(hw, &event, NULL);
+ err = iavf_clean_arq_element(hw, &event, NULL);
if (err)
goto out_alloc;
op =
@@ -192,10 +216,15 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
break;
}
- err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ err = (iavf_status)le32_to_cpu(event.desc.cookie_low);
memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
- i40e_vf_parse_hw_config(hw, adapter->vf_res);
+ /* some PFs send more queues than we should have so validate that
+ * we aren't getting too many queues
+ */
+ if (!err)
+ iavf_validate_num_queues(adapter);
+ iavf_vf_parse_hw_config(hw, adapter->vf_res);
out_alloc:
kfree(event.msg_buf);
out:
@@ -203,17 +232,17 @@ out:
}
/**
- * i40evf_configure_queues
+ * iavf_configure_queues
* @adapter: adapter structure
*
* Request that the PF set up our (previously allocated) queues.
**/
-void i40evf_configure_queues(struct i40evf_adapter *adapter)
+void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
struct virtchnl_queue_pair_info *vqpi;
int pairs = adapter->num_active_queues;
- int i, len, max_frame = I40E_MAX_RXBUFFER;
+ int i, len, max_frame = IAVF_MAX_RXBUFFER;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -229,9 +258,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
return;
/* Limit maximum frame size when jumbo frames is not enabled */
- if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) &&
+ if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
(adapter->netdev->mtu <= ETH_DATA_LEN))
- max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
@@ -251,23 +280,23 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->rxq.max_pkt_size = max_frame;
vqpi->rxq.databuffer_size =
ALIGN(adapter->rx_rings[i].rx_buf_len,
- BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
+ BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
vqpi++;
}
- adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- (u8 *)vqci, len);
+ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ (u8 *)vqci, len);
kfree(vqci);
}
/**
- * i40evf_enable_queues
+ * iavf_enable_queues
* @adapter: adapter structure
*
* Request that the PF enable all of our queues.
**/
-void i40evf_enable_queues(struct i40evf_adapter *adapter)
+void iavf_enable_queues(struct iavf_adapter *adapter)
{
struct virtchnl_queue_select vqs;
@@ -281,18 +310,18 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
- (u8 *)&vqs, sizeof(vqs));
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
}
/**
- * i40evf_disable_queues
+ * iavf_disable_queues
* @adapter: adapter structure
*
* Request that the PF disable all of our queues.
**/
-void i40evf_disable_queues(struct i40evf_adapter *adapter)
+void iavf_disable_queues(struct iavf_adapter *adapter)
{
struct virtchnl_queue_select vqs;
@@ -306,24 +335,24 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
- (u8 *)&vqs, sizeof(vqs));
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
}
/**
- * i40evf_map_queues
+ * iavf_map_queues
* @adapter: adapter structure
*
* Request that the PF map queues to interrupt vectors. Misc causes, including
* admin queue, are always mapped to vector 0.
**/
-void i40evf_map_queues(struct i40evf_adapter *adapter)
+void iavf_map_queues(struct iavf_adapter *adapter)
{
struct virtchnl_irq_map_info *vimi;
struct virtchnl_vector_map *vecmap;
int v_idx, q_vectors, len;
- struct i40e_q_vector *q_vector;
+ struct iavf_q_vector *q_vector;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -352,8 +381,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
vecmap->vector_id = v_idx + NONQ_VECS;
vecmap->txq_map = q_vector->ring_mask;
vecmap->rxq_map = q_vector->ring_mask;
- vecmap->rxitr_idx = I40E_RX_ITR;
- vecmap->txitr_idx = I40E_TX_ITR;
+ vecmap->rxitr_idx = IAVF_RX_ITR;
+ vecmap->txitr_idx = IAVF_TX_ITR;
}
/* Misc vector last - this is only for AdminQ messages */
vecmap = &vimi->vecmap[v_idx];
@@ -362,21 +391,21 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
vecmap->txq_map = 0;
vecmap->rxq_map = 0;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
- (u8 *)vimi, len);
+ adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ (u8 *)vimi, len);
kfree(vimi);
}
/**
- * i40evf_request_queues
+ * iavf_request_queues
* @adapter: adapter structure
* @num: number of requested queues
*
* We get a default number of queues from the PF. This enables us to request a
* different number. Returns 0 on success, negative on failure
**/
-int i40evf_request_queues(struct i40evf_adapter *adapter, int num)
+int iavf_request_queues(struct iavf_adapter *adapter, int num)
{
struct virtchnl_vf_res_request vfres;
@@ -390,22 +419,22 @@ int i40evf_request_queues(struct i40evf_adapter *adapter, int num)
vfres.num_queue_pairs = num;
adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
- adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
- return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
- (u8 *)&vfres, sizeof(vfres));
+ adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
+ (u8 *)&vfres, sizeof(vfres));
}
/**
- * i40evf_add_ether_addrs
+ * iavf_add_ether_addrs
* @adapter: adapter structure
*
* Request that the PF add one or more addresses to our filters.
**/
-void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
+void iavf_add_ether_addrs(struct iavf_adapter *adapter)
{
struct virtchnl_ether_addr_list *veal;
int len, i = 0, count = 0;
- struct i40evf_mac_filter *f;
+ struct iavf_mac_filter *f;
bool more = false;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
@@ -422,7 +451,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
count++;
}
if (!count) {
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
@@ -430,9 +459,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct virtchnl_ether_addr_list) +
(count * sizeof(struct virtchnl_ether_addr));
- if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
- count = (I40EVF_MAX_AQ_BUF_SIZE -
+ count = (IAVF_MAX_AQ_BUF_SIZE -
sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct virtchnl_ether_addr);
len = sizeof(struct virtchnl_ether_addr_list) +
@@ -458,25 +487,24 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
}
if (!more)
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
- (u8 *)veal, len);
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len);
kfree(veal);
}
/**
- * i40evf_del_ether_addrs
+ * iavf_del_ether_addrs
* @adapter: adapter structure
*
* Request that the PF remove one or more addresses from our filters.
**/
-void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
+void iavf_del_ether_addrs(struct iavf_adapter *adapter)
{
struct virtchnl_ether_addr_list *veal;
- struct i40evf_mac_filter *f, *ftmp;
+ struct iavf_mac_filter *f, *ftmp;
int len, i = 0, count = 0;
bool more = false;
@@ -494,7 +522,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
count++;
}
if (!count) {
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
@@ -502,9 +530,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct virtchnl_ether_addr_list) +
(count * sizeof(struct virtchnl_ether_addr));
- if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
- count = (I40EVF_MAX_AQ_BUF_SIZE -
+ count = (IAVF_MAX_AQ_BUF_SIZE -
sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct virtchnl_ether_addr);
len = sizeof(struct virtchnl_ether_addr_list) +
@@ -530,26 +558,25 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
}
}
if (!more)
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
- (u8 *)veal, len);
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
kfree(veal);
}
/**
- * i40evf_add_vlans
+ * iavf_add_vlans
* @adapter: adapter structure
*
* Request that the PF add one or more VLAN filters to our VSI.
**/
-void i40evf_add_vlans(struct i40evf_adapter *adapter)
+void iavf_add_vlans(struct iavf_adapter *adapter)
{
struct virtchnl_vlan_filter_list *vvfl;
int len, i = 0, count = 0;
- struct i40evf_vlan_filter *f;
+ struct iavf_vlan_filter *f;
bool more = false;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
@@ -566,7 +593,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
count++;
}
if (!count) {
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
@@ -574,9 +601,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
- if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
- count = (I40EVF_MAX_AQ_BUF_SIZE -
+ count = (IAVF_MAX_AQ_BUF_SIZE -
sizeof(struct virtchnl_vlan_filter_list)) /
sizeof(u16);
len = sizeof(struct virtchnl_vlan_filter_list) +
@@ -601,24 +628,24 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
}
}
if (!more)
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
/**
- * i40evf_del_vlans
+ * iavf_del_vlans
* @adapter: adapter structure
*
* Request that the PF remove one or more VLAN filters from our VSI.
**/
-void i40evf_del_vlans(struct i40evf_adapter *adapter)
+void iavf_del_vlans(struct iavf_adapter *adapter)
{
struct virtchnl_vlan_filter_list *vvfl;
- struct i40evf_vlan_filter *f, *ftmp;
+ struct iavf_vlan_filter *f, *ftmp;
int len, i = 0, count = 0;
bool more = false;
@@ -636,7 +663,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
count++;
}
if (!count) {
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
@@ -644,9 +671,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
- if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
- count = (I40EVF_MAX_AQ_BUF_SIZE -
+ count = (IAVF_MAX_AQ_BUF_SIZE -
sizeof(struct virtchnl_vlan_filter_list)) /
sizeof(u16);
len = sizeof(struct virtchnl_vlan_filter_list) +
@@ -672,22 +699,22 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
}
}
if (!more)
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
/**
- * i40evf_set_promiscuous
+ * iavf_set_promiscuous
* @adapter: adapter structure
* @flags: bitmask to control unicast/multicast promiscuous.
*
* Request that the PF enable promiscuous mode for our VSI.
**/
-void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
+void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
{
struct virtchnl_promisc_info vpi;
int promisc_all;
@@ -702,39 +729,39 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
promisc_all = FLAG_VF_UNICAST_PROMISC |
FLAG_VF_MULTICAST_PROMISC;
if ((flags & promisc_all) == promisc_all) {
- adapter->flags |= I40EVF_FLAG_PROMISC_ON;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
+ adapter->flags |= IAVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
}
if (flags & FLAG_VF_MULTICAST_PROMISC) {
- adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+ adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
}
if (!flags) {
- adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
- I40EVF_FLAG_ALLMULTI_ON);
- adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
- I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
+ adapter->flags &= ~(IAVF_FLAG_PROMISC_ON |
+ IAVF_FLAG_ALLMULTI_ON);
+ adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC |
+ IAVF_FLAG_AQ_RELEASE_ALLMULTI);
dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
}
adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
vpi.vsi_id = adapter->vsi_res->vsi_id;
vpi.flags = flags;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
- (u8 *)&vpi, sizeof(vpi));
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ (u8 *)&vpi, sizeof(vpi));
}
/**
- * i40evf_request_stats
+ * iavf_request_stats
* @adapter: adapter structure
*
* Request VSI statistics from PF.
**/
-void i40evf_request_stats(struct i40evf_adapter *adapter)
+void iavf_request_stats(struct iavf_adapter *adapter)
{
struct virtchnl_queue_select vqs;
@@ -745,19 +772,19 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
adapter->current_op = VIRTCHNL_OP_GET_STATS;
vqs.vsi_id = adapter->vsi_res->vsi_id;
/* queue maps are ignored for this message - only the vsi is used */
- if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS,
- (u8 *)&vqs, sizeof(vqs)))
+ if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
+ sizeof(vqs)))
/* if the request failed, don't lock out others */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}
/**
- * i40evf_get_hena
+ * iavf_get_hena
* @adapter: adapter structure
*
* Request hash enable capabilities from PF
**/
-void i40evf_get_hena(struct i40evf_adapter *adapter)
+void iavf_get_hena(struct iavf_adapter *adapter)
{
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -766,18 +793,17 @@ void i40evf_get_hena(struct i40evf_adapter *adapter)
return;
}
adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
- NULL, 0);
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
}
/**
- * i40evf_set_hena
+ * iavf_set_hena
* @adapter: adapter structure
*
* Request the PF to set our RSS hash capabilities
**/
-void i40evf_set_hena(struct i40evf_adapter *adapter)
+void iavf_set_hena(struct iavf_adapter *adapter)
{
struct virtchnl_rss_hena vrh;
@@ -789,18 +815,18 @@ void i40evf_set_hena(struct i40evf_adapter *adapter)
}
vrh.hena = adapter->hena;
adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA,
- (u8 *)&vrh, sizeof(vrh));
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
+ sizeof(vrh));
}
/**
- * i40evf_set_rss_key
+ * iavf_set_rss_key
* @adapter: adapter structure
*
* Request the PF to set our RSS hash key
**/
-void i40evf_set_rss_key(struct i40evf_adapter *adapter)
+void iavf_set_rss_key(struct iavf_adapter *adapter)
{
struct virtchnl_rss_key *vrk;
int len;
@@ -821,19 +847,18 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter)
memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY,
- (u8 *)vrk, len);
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len);
kfree(vrk);
}
/**
- * i40evf_set_rss_lut
+ * iavf_set_rss_lut
* @adapter: adapter structure
*
* Request the PF to set our RSS lookup table
**/
-void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
+void iavf_set_rss_lut(struct iavf_adapter *adapter)
{
struct virtchnl_rss_lut *vrl;
int len;
@@ -853,19 +878,18 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
vrl->lut_entries = adapter->rss_lut_size;
memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT,
- (u8 *)vrl, len);
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len);
kfree(vrl);
}
/**
- * i40evf_enable_vlan_stripping
+ * iavf_enable_vlan_stripping
* @adapter: adapter structure
*
* Request VLAN header stripping to be enabled
**/
-void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter)
+void iavf_enable_vlan_stripping(struct iavf_adapter *adapter)
{
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -874,18 +898,17 @@ void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter)
return;
}
adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
- NULL, 0);
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0);
}
/**
- * i40evf_disable_vlan_stripping
+ * iavf_disable_vlan_stripping
* @adapter: adapter structure
*
* Request VLAN header stripping to be disabled
**/
-void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter)
+void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
{
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -894,18 +917,17 @@ void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter)
return;
}
adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
- NULL, 0);
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
}
/**
- * i40evf_print_link_message - print link up or down
+ * iavf_print_link_message - print link up or down
* @adapter: adapter structure
*
* Log a message telling the world of our wonderous link status
*/
-static void i40evf_print_link_message(struct i40evf_adapter *adapter)
+static void iavf_print_link_message(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
char *speed = "Unknown ";
@@ -942,13 +964,13 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter)
}
/**
- * i40evf_enable_channel
+ * iavf_enable_channel
* @adapter: adapter structure
*
* Request that the PF enable channels as specified by
* the user via tc tool.
**/
-void i40evf_enable_channels(struct i40evf_adapter *adapter)
+void iavf_enable_channels(struct iavf_adapter *adapter)
{
struct virtchnl_tc_info *vti = NULL;
u16 len;
@@ -976,22 +998,21 @@ void i40evf_enable_channels(struct i40evf_adapter *adapter)
adapter->ch_config.ch_info[i].max_tx_rate;
}
- adapter->ch_config.state = __I40EVF_TC_RUNNING;
- adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->ch_config.state = __IAVF_TC_RUNNING;
+ adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS,
- (u8 *)vti, len);
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len);
kfree(vti);
}
/**
- * i40evf_disable_channel
+ * iavf_disable_channel
* @adapter: adapter structure
*
* Request that the PF disable channels that are configured
**/
-void i40evf_disable_channels(struct i40evf_adapter *adapter)
+void iavf_disable_channels(struct iavf_adapter *adapter)
{
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1000,23 +1021,22 @@ void i40evf_disable_channels(struct i40evf_adapter *adapter)
return;
}
- adapter->ch_config.state = __I40EVF_TC_INVALID;
- adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->ch_config.state = __IAVF_TC_INVALID;
+ adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS;
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS,
- NULL, 0);
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0);
}
/**
- * i40evf_print_cloud_filter
+ * iavf_print_cloud_filter
* @adapter: adapter structure
* @f: cloud filter to print
*
* Print the cloud filter
**/
-static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter,
- struct virtchnl_filter *f)
+static void iavf_print_cloud_filter(struct iavf_adapter *adapter,
+ struct virtchnl_filter *f)
{
switch (f->flow_type) {
case VIRTCHNL_TCP_V4_FLOW:
@@ -1043,15 +1063,15 @@ static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter,
}
/**
- * i40evf_add_cloud_filter
+ * iavf_add_cloud_filter
* @adapter: adapter structure
*
* Request that the PF add cloud filters as specified
* by the user via tc tool.
**/
-void i40evf_add_cloud_filter(struct i40evf_adapter *adapter)
+void iavf_add_cloud_filter(struct iavf_adapter *adapter)
{
- struct i40evf_cloud_filter *cf;
+ struct iavf_cloud_filter *cf;
struct virtchnl_filter *f;
int len = 0, count = 0;
@@ -1068,7 +1088,7 @@ void i40evf_add_cloud_filter(struct i40evf_adapter *adapter)
}
}
if (!count) {
- adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
return;
}
adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
@@ -1082,25 +1102,24 @@ void i40evf_add_cloud_filter(struct i40evf_adapter *adapter)
if (cf->add) {
memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
cf->add = false;
- cf->state = __I40EVF_CF_ADD_PENDING;
- i40evf_send_pf_msg(adapter,
- VIRTCHNL_OP_ADD_CLOUD_FILTER,
- (u8 *)f, len);
+ cf->state = __IAVF_CF_ADD_PENDING;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER,
+ (u8 *)f, len);
}
}
kfree(f);
}
/**
- * i40evf_del_cloud_filter
+ * iavf_del_cloud_filter
* @adapter: adapter structure
*
* Request that the PF delete cloud filters as specified
* by the user via tc tool.
**/
-void i40evf_del_cloud_filter(struct i40evf_adapter *adapter)
+void iavf_del_cloud_filter(struct iavf_adapter *adapter)
{
- struct i40evf_cloud_filter *cf, *cftmp;
+ struct iavf_cloud_filter *cf, *cftmp;
struct virtchnl_filter *f;
int len = 0, count = 0;
@@ -1117,7 +1136,7 @@ void i40evf_del_cloud_filter(struct i40evf_adapter *adapter)
}
}
if (!count) {
- adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
return;
}
adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
@@ -1131,30 +1150,29 @@ void i40evf_del_cloud_filter(struct i40evf_adapter *adapter)
if (cf->del) {
memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
cf->del = false;
- cf->state = __I40EVF_CF_DEL_PENDING;
- i40evf_send_pf_msg(adapter,
- VIRTCHNL_OP_DEL_CLOUD_FILTER,
- (u8 *)f, len);
+ cf->state = __IAVF_CF_DEL_PENDING;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER,
+ (u8 *)f, len);
}
}
kfree(f);
}
/**
- * i40evf_request_reset
+ * iavf_request_reset
* @adapter: adapter structure
*
* Request that the PF reset this VF. No response is expected.
**/
-void i40evf_request_reset(struct i40evf_adapter *adapter)
+void iavf_request_reset(struct iavf_adapter *adapter)
{
/* Don't check CURRENT_OP - this is always higher priority */
- i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}
/**
- * i40evf_virtchnl_completion
+ * iavf_virtchnl_completion
* @adapter: adapter structure
* @v_opcode: opcode sent by PF
* @v_retval: retval sent by PF
@@ -1165,10 +1183,9 @@ void i40evf_request_reset(struct i40evf_adapter *adapter)
* wait, we fire off our requests and assume that no errors will be returned.
* This function handles the reply messages.
**/
-void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
- enum virtchnl_ops v_opcode,
- i40e_status v_retval,
- u8 *msg, u16 msglen)
+void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ enum virtchnl_ops v_opcode, iavf_status v_retval,
+ u8 *msg, u16 msglen)
{
struct net_device *netdev = adapter->netdev;
@@ -1176,6 +1193,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
struct virtchnl_pf_event *vpe =
(struct virtchnl_pf_event *)msg;
bool link_up = vpe->event_data.link_event.link_status;
+
switch (vpe->event) {
case VIRTCHNL_EVENT_LINK_CHANGE:
adapter->link_speed =
@@ -1193,7 +1211,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
* after we enable queues and actually prepared
* to send traffic.
*/
- if (adapter->state != __I40EVF_RUNNING)
+ if (adapter->state != __IAVF_RUNNING)
break;
/* For ADq enabled VF, we reconfigure VSIs and
@@ -1201,7 +1219,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
* queues are enabled.
*/
if (adapter->flags &
- I40EVF_FLAG_QUEUES_DISABLED)
+ IAVF_FLAG_QUEUES_DISABLED)
break;
}
@@ -1213,12 +1231,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
}
- i40evf_print_link_message(adapter);
+ iavf_print_link_message(adapter);
break;
case VIRTCHNL_EVENT_RESET_IMPENDING:
dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
- if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
- adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
+ adapter->flags |= IAVF_FLAG_RESET_PENDING;
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
schedule_work(&adapter->reset_task);
}
@@ -1234,48 +1252,48 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
switch (v_opcode) {
case VIRTCHNL_OP_ADD_VLAN:
dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
- i40evf_stat_str(&adapter->hw, v_retval));
+ iavf_stat_str(&adapter->hw, v_retval));
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
- i40evf_stat_str(&adapter->hw, v_retval));
+ iavf_stat_str(&adapter->hw, v_retval));
break;
case VIRTCHNL_OP_DEL_VLAN:
dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
- i40evf_stat_str(&adapter->hw, v_retval));
+ iavf_stat_str(&adapter->hw, v_retval));
break;
case VIRTCHNL_OP_DEL_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
- i40evf_stat_str(&adapter->hw, v_retval));
+ iavf_stat_str(&adapter->hw, v_retval));
break;
case VIRTCHNL_OP_ENABLE_CHANNELS:
dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
- i40evf_stat_str(&adapter->hw, v_retval));
- adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
- adapter->ch_config.state = __I40EVF_TC_INVALID;
+ iavf_stat_str(&adapter->hw, v_retval));
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->ch_config.state = __IAVF_TC_INVALID;
netdev_reset_tc(netdev);
netif_tx_start_all_queues(netdev);
break;
case VIRTCHNL_OP_DISABLE_CHANNELS:
dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
- i40evf_stat_str(&adapter->hw, v_retval));
- adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
- adapter->ch_config.state = __I40EVF_TC_RUNNING;
+ iavf_stat_str(&adapter->hw, v_retval));
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->ch_config.state = __IAVF_TC_RUNNING;
netif_tx_start_all_queues(netdev);
break;
case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
- struct i40evf_cloud_filter *cf, *cftmp;
+ struct iavf_cloud_filter *cf, *cftmp;
list_for_each_entry_safe(cf, cftmp,
&adapter->cloud_filter_list,
list) {
- if (cf->state == __I40EVF_CF_ADD_PENDING) {
- cf->state = __I40EVF_CF_INVALID;
+ if (cf->state == __IAVF_CF_ADD_PENDING) {
+ cf->state = __IAVF_CF_INVALID;
dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
- i40evf_stat_str(&adapter->hw,
- v_retval));
- i40evf_print_cloud_filter(adapter,
- &cf->f);
+ iavf_stat_str(&adapter->hw,
+ v_retval));
+ iavf_print_cloud_filter(adapter,
+ &cf->f);
list_del(&cf->list);
kfree(cf);
adapter->num_cloud_filters--;
@@ -1284,32 +1302,31 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
break;
case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
- struct i40evf_cloud_filter *cf;
+ struct iavf_cloud_filter *cf;
list_for_each_entry(cf, &adapter->cloud_filter_list,
list) {
- if (cf->state == __I40EVF_CF_DEL_PENDING) {
- cf->state = __I40EVF_CF_ACTIVE;
+ if (cf->state == __IAVF_CF_DEL_PENDING) {
+ cf->state = __IAVF_CF_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
- i40evf_stat_str(&adapter->hw,
- v_retval));
- i40evf_print_cloud_filter(adapter,
- &cf->f);
+ iavf_stat_str(&adapter->hw,
+ v_retval));
+ iavf_print_cloud_filter(adapter,
+ &cf->f);
}
}
}
break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
- v_retval,
- i40evf_stat_str(&adapter->hw, v_retval),
+ v_retval, iavf_stat_str(&adapter->hw, v_retval),
v_opcode);
}
}
switch (v_opcode) {
case VIRTCHNL_OP_GET_STATS: {
- struct i40e_eth_stats *stats =
- (struct i40e_eth_stats *)msg;
+ struct iavf_eth_stats *stats =
+ (struct iavf_eth_stats *)msg;
netdev->stats.rx_packets = stats->rx_unicast +
stats->rx_multicast +
stats->rx_broadcast;
@@ -1326,25 +1343,33 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
break;
case VIRTCHNL_OP_GET_VF_RESOURCES: {
u16 len = sizeof(struct virtchnl_vf_resource) +
- I40E_MAX_VF_VSI *
+ IAVF_MAX_VF_VSI *
sizeof(struct virtchnl_vsi_resource);
memcpy(adapter->vf_res, msg, min(msglen, len));
- i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
- /* restore current mac address */
- ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
- i40evf_process_config(adapter);
+ iavf_validate_num_queues(adapter);
+ iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+ if (is_zero_ether_addr(adapter->hw.mac.addr)) {
+ /* restore current mac address */
+ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+ } else {
+ /* refresh current mac address if changed */
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ ether_addr_copy(netdev->perm_addr,
+ adapter->hw.mac.addr);
+ }
+ iavf_process_config(adapter);
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
- i40evf_irq_enable(adapter, true);
- adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED;
+ iavf_irq_enable(adapter, true);
+ adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
break;
case VIRTCHNL_OP_DISABLE_QUEUES:
- i40evf_free_all_tx_resources(adapter);
- i40evf_free_all_rx_resources(adapter);
- if (adapter->state == __I40EVF_DOWN_PENDING) {
- adapter->state = __I40EVF_DOWN;
+ iavf_free_all_tx_resources(adapter);
+ iavf_free_all_rx_resources(adapter);
+ if (adapter->state == __IAVF_DOWN_PENDING) {
+ adapter->state = __IAVF_DOWN;
wake_up(&adapter->down_waitqueue);
}
break;
@@ -1363,8 +1388,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
* care about that.
*/
if (msglen && CLIENT_ENABLED(adapter))
- i40evf_notify_client_message(&adapter->vsi,
- msg, msglen);
+ iavf_notify_client_message(&adapter->vsi, msg, msglen);
break;
case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
@@ -1373,6 +1397,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
+
if (msglen == sizeof(*vrh))
adapter->hena = vrh->hena;
else
@@ -1383,32 +1408,33 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
case VIRTCHNL_OP_REQUEST_QUEUES: {
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
+
if (vfres->num_queue_pairs != adapter->num_req_queues) {
dev_info(&adapter->pdev->dev,
"Requested %d queues, PF can support %d\n",
adapter->num_req_queues,
vfres->num_queue_pairs);
adapter->num_req_queues = 0;
- adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
}
}
break;
case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
- struct i40evf_cloud_filter *cf;
+ struct iavf_cloud_filter *cf;
list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
- if (cf->state == __I40EVF_CF_ADD_PENDING)
- cf->state = __I40EVF_CF_ACTIVE;
+ if (cf->state == __IAVF_CF_ADD_PENDING)
+ cf->state = __IAVF_CF_ACTIVE;
}
}
break;
case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
- struct i40evf_cloud_filter *cf, *cftmp;
+ struct iavf_cloud_filter *cf, *cftmp;
list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
list) {
- if (cf->state == __I40EVF_CF_DEL_PENDING) {
- cf->state = __I40EVF_CF_INVALID;
+ if (cf->state == __IAVF_CF_DEL_PENDING) {
+ cf->state = __IAVF_CF_INVALID;
list_del(&cf->list);
kfree(cf);
adapter->num_cloud_filters--;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 4058673fd853..e5d6f684437e 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -13,5 +13,7 @@ ice-y := ice_main.o \
ice_nvm.o \
ice_switch.o \
ice_sched.o \
+ ice_lib.o \
ice_txrx.o \
ice_ethtool.o
+ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 868f4a1d0f72..4c4b5717a627 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -28,6 +28,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_bridge.h>
+#include <linux/avf/virtchnl.h>
#include <net/ipv6.h>
#include "ice_devids.h"
#include "ice_type.h"
@@ -35,17 +36,20 @@
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
+#include "ice_virtchnl_pf.h"
+#include "ice_sriov.h"
extern const char ice_drv_ver[];
#define ICE_BAR0 0
#define ICE_DFLT_NUM_DESC 128
-#define ICE_MIN_NUM_DESC 8
-#define ICE_MAX_NUM_DESC 8160
#define ICE_REQ_DESC_MULTIPLE 32
+#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
+#define ICE_MAX_NUM_DESC 8160
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64
+#define ICE_MBXQ_LEN 64
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_MAX_VSI_ALLOC 130
@@ -62,6 +66,15 @@ extern const char ice_drv_ver[];
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
+#define ICE_INVAL_VFID 256
+#define ICE_MAX_VF_COUNT 256
+#define ICE_MAX_QS_PER_VF 256
+#define ICE_MIN_QS_PER_VF 1
+#define ICE_DFLT_QS_PER_VF 4
+#define ICE_MAX_BASE_QS_PER_VF 16
+#define ICE_MAX_INTR_PER_VF 65
+#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
+#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
@@ -122,7 +135,8 @@ struct ice_sw {
enum ice_state {
__ICE_DOWN,
__ICE_NEEDS_RESTART,
- __ICE_RESET_RECOVERY_PENDING, /* set by driver when reset starts */
+ __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
+ __ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
__ICE_PFR_REQ, /* set by driver and peers */
__ICE_CORER_REQ, /* set by driver and peers */
__ICE_GLOBR_REQ, /* set by driver and peers */
@@ -131,10 +145,24 @@ enum ice_state {
__ICE_EMPR_RECV, /* set by OICR handler */
__ICE_SUSPENDED, /* set on module remove path */
__ICE_RESET_FAILED, /* set by reset/rebuild */
+ /* When checking for the PF to be in a nominal operating state, the
+ * bits that are grouped at the beginning of the list need to be
+ * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
+ * be checked. If you need to add a bit into consideration for nominal
+ * operating state, it must be added before
+ * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
+ * without appropriate consideration.
+ */
+ __ICE_STATE_NOMINAL_CHECK_BITS,
__ICE_ADMINQ_EVENT_PENDING,
+ __ICE_MAILBOXQ_EVENT_PENDING,
+ __ICE_MDD_EVENT_PENDING,
+ __ICE_VFLR_EVENT_PENDING,
__ICE_FLTR_OVERFLOW_PROMISC,
+ __ICE_VF_DIS,
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
+ __ICE_SERVICE_DIS,
__ICE_STATE_NBITS /* must be last */
};
@@ -168,7 +196,8 @@ struct ice_vsi {
u32 rx_buf_failed;
u32 rx_page_failed;
int num_q_vectors;
- int base_vector;
+ int sw_base_vector; /* Irq base for OS reserved vectors */
+ int hw_base_vector; /* HW (absolute) index of a vector */
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
@@ -176,6 +205,8 @@ struct ice_vsi {
/* Interrupt thresholds */
u16 work_lmt;
+ s16 vf_id; /* VF ID for SR-IOV VSIs */
+
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
@@ -225,21 +256,39 @@ struct ice_q_vector {
u8 num_ring_tx; /* total number of tx rings in vector */
u8 num_ring_rx; /* total number of rx rings in vector */
char name[ICE_INT_NAME_STR_LEN];
+ /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
+ * value to the device
+ */
+ u8 intrl;
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
ICE_FLAG_MSIX_ENA,
ICE_FLAG_FLTR_SYNC,
ICE_FLAG_RSS_ENA,
+ ICE_FLAG_SRIOV_ENA,
+ ICE_FLAG_SRIOV_CAPABLE,
ICE_PF_FLAGS_NBITS /* must be last */
};
struct ice_pf {
struct pci_dev *pdev;
+
+ /* OS reserved IRQ details */
struct msix_entry *msix_entries;
- struct ice_res_tracker *irq_tracker;
+ struct ice_res_tracker *sw_irq_tracker;
+
+ /* HW reserved Interrupts for this PF */
+ struct ice_res_tracker *hw_irq_tracker;
+
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
+ /* Virtchnl/SR-IOV config info */
+ struct ice_vf *vf;
+ int num_alloc_vfs; /* actual number of VFs allocated */
+ u16 num_vfs_supported; /* num VFs supported for this PF */
+ u16 num_vf_qps; /* num queue pairs per VF */
+ u16 num_vf_msix; /* num vectors per VF */
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
@@ -252,9 +301,11 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
u32 msg_enable;
u32 hw_csum_rx_error;
- u32 oicr_idx; /* Other interrupt cause vector index */
+ u32 sw_oicr_idx; /* Other interrupt cause SW vector index */
+ u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
+ u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
+ u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
- u32 num_avail_msix; /* remaining MSIX vectors left unclaimed */
u16 num_lan_tx; /* num lan tx queues setup */
u16 num_lan_rx; /* num lan rx queues setup */
u16 q_left_tx; /* remaining num tx queues left unclaimed */
@@ -270,6 +321,9 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded; /* has previous stats been loaded */
+ u32 tx_timeout_count;
+ unsigned long tx_timeout_last_recovery;
+ u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
};
@@ -286,8 +340,8 @@ struct ice_netdev_priv {
static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
struct ice_q_vector *q_vector)
{
- u32 vector = (vsi && q_vector) ? vsi->base_vector + q_vector->v_idx :
- ((struct ice_pf *)hw->back)->oicr_idx;
+ u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx :
+ ((struct ice_pf *)hw->back)->hw_oicr_idx;
int itr = ICE_ITR_NONE;
u32 val;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index a0614f472658..6653555f55dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -87,6 +87,8 @@ struct ice_aqc_list_caps {
/* Device/Function buffer entry, repeated per reported capability */
struct ice_aqc_list_caps_elem {
__le16 cap;
+#define ICE_AQC_CAPS_SRIOV 0x0012
+#define ICE_AQC_CAPS_VF 0x0013
#define ICE_AQC_CAPS_VSI 0x0017
#define ICE_AQC_CAPS_RSS 0x0040
#define ICE_AQC_CAPS_RXQS 0x0041
@@ -443,6 +445,8 @@ struct ice_aqc_vsi_props {
u8 reserved[24];
};
+#define ICE_MAX_NUM_RECIPES 64
+
/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
*/
struct ice_aqc_sw_rules {
@@ -734,6 +738,10 @@ struct ice_aqc_add_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
+struct ice_aqc_get_elem {
+ struct ice_aqc_txsched_elem_data generic[1];
+};
+
struct ice_aqc_get_topo_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
struct ice_aqc_txsched_elem_data
@@ -771,9 +779,8 @@ struct ice_aqc_layer_props {
u8 chunk_size;
__le16 max_device_nodes;
__le16 max_pf_nodes;
- u8 rsvd0[2];
- __le16 max_shared_rate_lmtr;
- __le16 max_children;
+ u8 rsvd0[4];
+ __le16 max_sibl_grp_sz;
__le16 max_cir_rl_profiles;
__le16 max_eir_rl_profiles;
__le16 max_srl_profiles;
@@ -919,9 +926,11 @@ struct ice_aqc_set_phy_cfg_data {
u8 caps;
#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
-#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
-#define ICE_AQ_PHY_ENA_LINK BIT(3)
-#define ICE_AQ_PHY_ENA_ATOMIC_LINK BIT(5)
+#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
+#define ICE_AQ_PHY_ENA_LINK BIT(3)
+#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define ICE_AQ_PHY_ENA_LESM BIT(6)
+#define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7)
u8 low_power_ctrl;
__le16 eee_cap; /* Value from ice_aqc_get_phy_caps */
__le16 eeer_value;
@@ -1068,6 +1077,19 @@ struct ice_aqc_nvm {
__le32 addr_low;
};
+/**
+ * Send to PF command (indirect 0x0801) id is only used by PF
+ *
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ *
+ */
+struct ice_aqc_pf_vf_msg {
+ __le32 id;
+ u32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -1203,6 +1225,84 @@ struct ice_aqc_dis_txq {
struct ice_aqc_dis_txq_item qgrps[1];
};
+/* Configure Firmware Logging Command (indirect 0xFF09)
+ * Logging Information Read Response (indirect 0xFF10)
+ * Note: The 0xFF10 command has no input parameters.
+ */
+struct ice_aqc_fw_logging {
+ u8 log_ctrl;
+#define ICE_AQC_FW_LOG_AQ_EN BIT(0)
+#define ICE_AQC_FW_LOG_UART_EN BIT(1)
+ u8 rsvd0;
+ u8 log_ctrl_valid; /* Not used by 0xFF10 Response */
+#define ICE_AQC_FW_LOG_AQ_VALID BIT(0)
+#define ICE_AQC_FW_LOG_UART_VALID BIT(1)
+ u8 rsvd1[5];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+enum ice_aqc_fw_logging_mod {
+ ICE_AQC_FW_LOG_ID_GENERAL = 0,
+ ICE_AQC_FW_LOG_ID_CTRL,
+ ICE_AQC_FW_LOG_ID_LINK,
+ ICE_AQC_FW_LOG_ID_LINK_TOPO,
+ ICE_AQC_FW_LOG_ID_DNL,
+ ICE_AQC_FW_LOG_ID_I2C,
+ ICE_AQC_FW_LOG_ID_SDP,
+ ICE_AQC_FW_LOG_ID_MDIO,
+ ICE_AQC_FW_LOG_ID_ADMINQ,
+ ICE_AQC_FW_LOG_ID_HDMA,
+ ICE_AQC_FW_LOG_ID_LLDP,
+ ICE_AQC_FW_LOG_ID_DCBX,
+ ICE_AQC_FW_LOG_ID_DCB,
+ ICE_AQC_FW_LOG_ID_NETPROXY,
+ ICE_AQC_FW_LOG_ID_NVM,
+ ICE_AQC_FW_LOG_ID_AUTH,
+ ICE_AQC_FW_LOG_ID_VPD,
+ ICE_AQC_FW_LOG_ID_IOSF,
+ ICE_AQC_FW_LOG_ID_PARSER,
+ ICE_AQC_FW_LOG_ID_SW,
+ ICE_AQC_FW_LOG_ID_SCHEDULER,
+ ICE_AQC_FW_LOG_ID_TXQ,
+ ICE_AQC_FW_LOG_ID_RSVD,
+ ICE_AQC_FW_LOG_ID_POST,
+ ICE_AQC_FW_LOG_ID_WATCHDOG,
+ ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
+ ICE_AQC_FW_LOG_ID_MNG,
+ ICE_AQC_FW_LOG_ID_MAX,
+};
+
+/* This is the buffer for both of the logging commands.
+ * The entry array size depends on the datalen parameter in the descriptor.
+ * There will be a total of datalen / 2 entries.
+ */
+struct ice_aqc_fw_logging_data {
+ __le16 entry[1];
+#define ICE_AQC_FW_LOG_ID_S 0
+#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S)
+
+#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */
+#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */
+
+#define ICE_AQC_FW_LOG_EN_S 12
+#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S)
+#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */
+#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */
+#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */
+#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */
+};
+
+/* Get/Clear FW Log (indirect 0xFF11) */
+struct ice_aqc_get_clear_fw_log {
+ u8 flags;
+#define ICE_AQC_FW_LOG_CLEAR BIT(0)
+#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1)
+ u8 rsvd1[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -1247,11 +1347,15 @@ struct ice_aq_desc {
struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_add_move_delete_elem add_move_delete_elem;
struct ice_aqc_nvm nvm;
+ struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
+ struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
+ struct ice_aqc_fw_logging fw_logging;
+ struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
@@ -1325,6 +1429,7 @@ enum ice_adminq_opc {
/* transmit scheduler commands */
ice_aqc_opc_get_dflt_topo = 0x0400,
ice_aqc_opc_add_sched_elems = 0x0401,
+ ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
ice_aqc_opc_delete_sched_elems = 0x040F,
@@ -1340,6 +1445,10 @@ enum ice_adminq_opc {
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
+ /* PF/VF mailbox commands */
+ ice_mbx_opc_send_msg_to_pf = 0x0801,
+ ice_mbx_opc_send_msg_to_vf = 0x0802,
+
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
ice_aqc_opc_set_rss_lut = 0x0B03,
@@ -1349,6 +1458,9 @@ enum ice_adminq_opc {
/* TX queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
+
+ /* debug commands */
+ ice_aqc_opc_fw_logging = 0xFF09,
};
#endif /* _ICE_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 661beea6af79..c52f450f2c0d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -7,16 +7,16 @@
#define ICE_PF_RESET_WAIT_COUNT 200
-#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \
- wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \
+#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
+ wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
((ICE_RX_OPC_MDID << \
GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
(((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
-#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \
- wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \
+#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
+ wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
(((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
(((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
@@ -125,7 +125,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
*
* Returns the various PHY capabilities supported on the Port (0x0600)
*/
-static enum ice_status
+enum ice_status
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *pcaps,
struct ice_sq_cd *cd)
@@ -290,30 +290,85 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
}
/**
- * ice_init_flex_parser - initialize rx flex parser
+ * ice_init_flex_flags
* @hw: pointer to the hardware structure
+ * @prof_id: Rx Descriptor Builder profile ID
*
- * Function to initialize flex descriptors
+ * Function to initialize Rx flex flags
*/
-static void ice_init_flex_parser(struct ice_hw *hw)
+static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
{
u8 idx = 0;
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0);
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1);
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2);
- ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100,
- idx++);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN,
- ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++);
- ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
+ /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
+ * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
+ * flexiflags1[3:0] - Not used for flag programming
+ * flexiflags2[7:0] - Tunnel and VLAN types
+ * 2 invalid fields in last index
+ */
+ switch (prof_id) {
+ /* Rx flex flags are currently programmed for the NIC profiles only.
+ * Different flag bit programming configurations can be added per
+ * profile as needed.
+ */
+ case ICE_RXDID_FLEX_NIC:
+ case ICE_RXDID_FLEX_NIC_2:
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
+ ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
+ ICE_RXFLG_FIN, idx++);
+ /* flex flag 1 is not used for flexi-flag programming, skipping
+ * these four FLG64 bits.
+ */
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
+ ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
+ ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
+ ICE_RXFLG_EVLAN_x9100, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
+ ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
+ ICE_RXFLG_TNL0, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
+ ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
+ break;
+
+ default:
+ ice_debug(hw, ICE_DBG_INIT,
+ "Flag programming for profile ID %d not supported\n",
+ prof_id);
+ }
+}
+
+/**
+ * ice_init_flex_flds
+ * @hw: pointer to the hardware structure
+ * @prof_id: Rx Descriptor Builder profile ID
+ *
+ * Function to initialize flex descriptors
+ */
+static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
+{
+ enum ice_flex_rx_mdid mdid;
+
+ switch (prof_id) {
+ case ICE_RXDID_FLEX_NIC:
+ case ICE_RXDID_FLEX_NIC_2:
+ ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
+ ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
+ ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
+
+ mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
+ ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
+
+ ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
+
+ ice_init_flex_flags(hw, prof_id);
+ break;
+
+ default:
+ ice_debug(hw, ICE_DBG_INIT,
+ "Field init for profile ID %d not supported\n",
+ prof_id);
+ }
}
/**
@@ -333,20 +388,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
- mutex_init(&sw->mac_list_lock);
- INIT_LIST_HEAD(&sw->mac_list_head);
-
- mutex_init(&sw->vlan_list_lock);
- INIT_LIST_HEAD(&sw->vlan_list_head);
-
- mutex_init(&sw->eth_m_list_lock);
- INIT_LIST_HEAD(&sw->eth_m_list_head);
-
- mutex_init(&sw->promisc_list_lock);
- INIT_LIST_HEAD(&sw->promisc_list_head);
-
- mutex_init(&sw->mac_vlan_list_lock);
- INIT_LIST_HEAD(&sw->mac_vlan_list_head);
+ ice_init_def_sw_recp(hw);
return 0;
}
@@ -360,20 +402,232 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
struct ice_switch_info *sw = hw->switch_info;
struct ice_vsi_list_map_info *v_pos_map;
struct ice_vsi_list_map_info *v_tmp_map;
+ struct ice_sw_recipe *recps;
+ u8 i;
list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
list_entry) {
list_del(&v_pos_map->list_entry);
devm_kfree(ice_hw_to_dev(hw), v_pos_map);
}
+ recps = hw->switch_info->recp_list;
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+
+ recps[i].root_rid = i;
+ mutex_destroy(&recps[i].filt_rule_lock);
+ list_for_each_entry_safe(lst_itr, tmp_entry,
+ &recps[i].filt_rules, list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
+ }
+ ice_rm_all_sw_replay_rule_info(hw);
+ devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
+ devm_kfree(ice_hw_to_dev(hw), sw);
+}
- mutex_destroy(&sw->mac_list_lock);
- mutex_destroy(&sw->vlan_list_lock);
- mutex_destroy(&sw->eth_m_list_lock);
- mutex_destroy(&sw->promisc_list_lock);
- mutex_destroy(&sw->mac_vlan_list_lock);
+#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
+ (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
+#define ICE_FW_LOG_DESC_SIZE_MAX \
+ ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
- devm_kfree(ice_hw_to_dev(hw), sw);
+/**
+ * ice_cfg_fw_log - configure FW logging
+ * @hw: pointer to the hw struct
+ * @enable: enable certain FW logging events if true, disable all if false
+ *
+ * This function enables/disables the FW logging via Rx CQ events and a UART
+ * port based on predetermined configurations. FW logging via the Rx CQ can be
+ * enabled/disabled for individual PF's. However, FW logging via the UART can
+ * only be enabled/disabled for all PFs on the same device.
+ *
+ * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
+ * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
+ * before initializing the device.
+ *
+ * When re/configuring FW logging, callers need to update the "cfg" elements of
+ * the hw->fw_log.evnts array with the desired logging event configurations for
+ * modules of interest. When disabling FW logging completely, the callers can
+ * just pass false in the "enable" parameter. On completion, the function will
+ * update the "cur" element of the hw->fw_log.evnts array with the resulting
+ * logging event configurations of the modules that are being re/configured. FW
+ * logging modules that are not part of a reconfiguration operation retain their
+ * previous states.
+ *
+ * Before resetting the device, it is recommended that the driver disables FW
+ * logging before shutting down the control queue. When disabling FW logging
+ * ("enable" = false), the latest configurations of FW logging events stored in
+ * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
+ * a device reset.
+ *
+ * When enabling FW logging to emit log messages via the Rx CQ during the
+ * device's initialization phase, a mechanism alternative to interrupt handlers
+ * needs to be used to extract FW log messages from the Rx CQ periodically and
+ * to prevent the Rx CQ from being full and stalling other types of control
+ * messages from FW to SW. Interrupts are typically disabled during the device's
+ * initialization phase.
+ */
+static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
+{
+ struct ice_aqc_fw_logging_data *data = NULL;
+ struct ice_aqc_fw_logging *cmd;
+ enum ice_status status = 0;
+ u16 i, chgs = 0, len = 0;
+ struct ice_aq_desc desc;
+ u8 actv_evnts = 0;
+ void *buf = NULL;
+
+ if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
+ return 0;
+
+ /* Disable FW logging only when the control queue is still responsive */
+ if (!enable &&
+ (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
+ return 0;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
+ cmd = &desc.params.fw_logging;
+
+ /* Indicate which controls are valid */
+ if (hw->fw_log.cq_en)
+ cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
+
+ if (hw->fw_log.uart_en)
+ cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
+
+ if (enable) {
+ /* Fill in an array of entries with FW logging modules and
+ * logging events being reconfigured.
+ */
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
+ u16 val;
+
+ /* Keep track of enabled event types */
+ actv_evnts |= hw->fw_log.evnts[i].cfg;
+
+ if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
+ continue;
+
+ if (!data) {
+ data = devm_kzalloc(ice_hw_to_dev(hw),
+ ICE_FW_LOG_DESC_SIZE_MAX,
+ GFP_KERNEL);
+ if (!data)
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ val = i << ICE_AQC_FW_LOG_ID_S;
+ val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
+ data->entry[chgs++] = cpu_to_le16(val);
+ }
+
+ /* Only enable FW logging if at least one module is specified.
+ * If FW logging is currently enabled but all modules are not
+ * enabled to emit log messages, disable FW logging altogether.
+ */
+ if (actv_evnts) {
+ /* Leave if there is effectively no change */
+ if (!chgs)
+ goto out;
+
+ if (hw->fw_log.cq_en)
+ cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
+
+ if (hw->fw_log.uart_en)
+ cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
+
+ buf = data;
+ len = ICE_FW_LOG_DESC_SIZE(chgs);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ }
+ }
+
+ status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
+ if (!status) {
+ /* Update the current configuration to reflect events enabled.
+ * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
+ * logging mode is enabled for the device. They do not reflect
+ * actual modules being enabled to emit log messages. So, their
+ * values remain unchanged even when all modules are disabled.
+ */
+ u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
+
+ hw->fw_log.actv_evnts = actv_evnts;
+ for (i = 0; i < cnt; i++) {
+ u16 v, m;
+
+ if (!enable) {
+ /* When disabling all FW logging events as part
+ * of device's de-initialization, the original
+ * configurations are retained, and can be used
+ * to reconfigure FW logging later if the device
+ * is re-initialized.
+ */
+ hw->fw_log.evnts[i].cur = 0;
+ continue;
+ }
+
+ v = le16_to_cpu(data->entry[i]);
+ m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
+ hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
+ }
+ }
+
+out:
+ if (data)
+ devm_kfree(ice_hw_to_dev(hw), data);
+
+ return status;
+}
+
+/**
+ * ice_output_fw_log
+ * @hw: pointer to the hw struct
+ * @desc: pointer to the AQ message descriptor
+ * @buf: pointer to the buffer accompanying the AQ message
+ *
+ * Formats a FW Log message and outputs it via the standard driver logs.
+ */
+void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
+{
+ ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n");
+ ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf,
+ le16_to_cpu(desc->datalen));
+ ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
+}
+
+/**
+ * ice_get_itr_intrl_gran - determine int/intrl granularity
+ * @hw: pointer to the hw struct
+ *
+ * Determines the itr/intrl granularities based on the maximum aggregate
+ * bandwidth according to the device's configuration during power-on.
+ */
+static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
+{
+ u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
+ GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
+ GL_PWR_MODE_CTL_CAR_MAX_BW_S;
+
+ switch (max_agg_bw) {
+ case ICE_MAX_AGG_BW_200G:
+ case ICE_MAX_AGG_BW_100G:
+ case ICE_MAX_AGG_BW_50G:
+ hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
+ hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
+ break;
+ case ICE_MAX_AGG_BW_25G:
+ hw->itr_gran = ICE_ITR_GRAN_MAX_25;
+ hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to determine itr/intrl granularity\n");
+ return ICE_ERR_CFG;
+ }
+
+ return 0;
}
/**
@@ -400,16 +654,19 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
return status;
- /* set these values to minimum allowed */
- hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
- hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
- hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
- hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
+ status = ice_get_itr_intrl_gran(hw);
+ if (status)
+ return status;
status = ice_init_all_ctrlq(hw);
if (status)
goto err_unroll_cqinit;
+ /* Enable FW logging. Not fatal if this fails. */
+ status = ice_cfg_fw_log(hw, true);
+ if (status)
+ ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
+
status = ice_clear_pf_cfg(hw);
if (status)
goto err_unroll_cqinit;
@@ -472,6 +729,13 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
+ /* need a valid SW entry point to build a Tx tree */
+ if (!hw->sw_entry_point_layer) {
+ ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
+ status = ICE_ERR_CFG;
+ goto err_unroll_sched;
+ }
+
status = ice_init_fltr_mgmt_struct(hw);
if (status)
goto err_unroll_sched;
@@ -494,7 +758,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
- ice_init_flex_parser(hw);
+ ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
+ ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
return 0;
@@ -515,15 +780,18 @@ err_unroll_cqinit:
*/
void ice_deinit_hw(struct ice_hw *hw)
{
+ ice_cleanup_fltr_mgmt_struct(hw);
+
ice_sched_cleanup_all(hw);
- ice_shutdown_all_ctrlq(hw);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
hw->port_info = NULL;
}
- ice_cleanup_fltr_mgmt_struct(hw);
+ /* Attempt to disable FW logging before shutting down control queues */
+ ice_cfg_fw_log(hw, false);
+ ice_shutdown_all_ctrlq(hw);
}
/**
@@ -652,6 +920,8 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
val = GLGEN_RTRIG_GLOBR_M;
break;
+ default:
+ return ICE_ERR_PARAM;
}
val |= rd32(hw, GLGEN_RTRIG);
@@ -904,7 +1174,22 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* @timeout: the maximum time in ms that the driver may hold the resource
* @cd: pointer to command details structure or NULL
*
- * requests common resource using the admin queue commands (0x0008)
+ * Requests common resource using the admin queue commands (0x0008).
+ * When attempting to acquire the Global Config Lock, the driver can
+ * learn of three states:
+ * 1) ICE_SUCCESS - acquired lock, and can perform download package
+ * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
+ * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
+ * successfully downloaded the package; the driver does
+ * not have to download the package and can continue
+ * loading
+ *
+ * Note that if the caller is in an acquire lock, perform action, release lock
+ * phase of operation, it is possible that the FW may detect a timeout and issue
+ * a CORER. In this case, the driver will receive a CORER interrupt and will
+ * have to determine its cause. The calling thread that is handling this flow
+ * will likely get an error propagated back to it indicating the Download
+ * Package, Update Package or the Release Resource AQ commands timed out.
*/
static enum ice_status
ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
@@ -922,13 +1207,43 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
cmd_resp->res_id = cpu_to_le16(res);
cmd_resp->access_type = cpu_to_le16(access);
cmd_resp->res_number = cpu_to_le32(sdp_number);
+ cmd_resp->timeout = cpu_to_le32(*timeout);
+ *timeout = 0;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
/* The completion specifies the maximum time in ms that the driver
* may hold the resource in the Timeout field.
- * If the resource is held by someone else, the command completes with
- * busy return value and the timeout field indicates the maximum time
- * the current owner of the resource has to free it.
+ */
+
+ /* Global config lock response utilizes an additional status field.
+ *
+ * If the Global config lock resource is held by some other driver, the
+ * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
+ * and the timeout field indicates the maximum time the current owner
+ * of the resource has to free it.
+ */
+ if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
+ if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+ return 0;
+ } else if (le16_to_cpu(cmd_resp->status) ==
+ ICE_AQ_RES_GLBL_IN_PROG) {
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+ return ICE_ERR_AQ_ERROR;
+ } else if (le16_to_cpu(cmd_resp->status) ==
+ ICE_AQ_RES_GLBL_DONE) {
+ return ICE_ERR_AQ_NO_WORK;
+ }
+
+ /* invalid FW response, force a timeout immediately */
+ *timeout = 0;
+ return ICE_ERR_AQ_ERROR;
+ }
+
+ /* If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
*/
if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
@@ -967,30 +1282,28 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
* @hw: pointer to the HW structure
* @res: resource id
* @access: access type (read or write)
+ * @timeout: timeout in milliseconds
*
* This function will attempt to acquire the ownership of a resource.
*/
enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
- enum ice_aq_res_access_type access)
+ enum ice_aq_res_access_type access, u32 timeout)
{
#define ICE_RES_POLLING_DELAY_MS 10
u32 delay = ICE_RES_POLLING_DELAY_MS;
+ u32 time_left = timeout;
enum ice_status status;
- u32 time_left = 0;
- u32 timeout;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- /* An admin queue return code of ICE_AQ_RC_EEXIST means that another
- * driver has previously acquired the resource and performed any
- * necessary updates; in this case the caller does not obtain the
- * resource and has no further work to do.
+ /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
*/
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
- status = ICE_ERR_AQ_NO_WORK;
+ if (status == ICE_ERR_AQ_NO_WORK)
goto ice_acquire_res_exit;
- }
if (status)
ice_debug(hw, ICE_DBG_RES,
@@ -1003,11 +1316,9 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
timeout = (timeout > delay) ? timeout - delay : 0;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
+ if (status == ICE_ERR_AQ_NO_WORK)
/* lock free, but no work to do */
- status = ICE_ERR_AQ_NO_WORK;
break;
- }
if (!status)
/* lock acquired */
@@ -1095,6 +1406,28 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
u16 cap = le16_to_cpu(cap_resp->cap);
switch (cap) {
+ case ICE_AQC_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1);
+ break;
+ case ICE_AQC_CAPS_VF:
+ if (dev_p) {
+ dev_p->num_vfs_exposed = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: VFs exposed = %d\n",
+ dev_p->num_vfs_exposed);
+ } else if (func_p) {
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: VFs allocated = %d\n",
+ func_p->num_allocd_vfs);
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: VF base_id = %d\n",
+ func_p->vf_base_id);
+ }
+ break;
case ICE_AQC_CAPS_VSI:
if (dev_p) {
dev_p->num_vsi_allocd_to_host = number;
@@ -1171,7 +1504,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
* @hw: pointer to the hw struct
* @buf: a virtual buffer to hold the capabilities
* @buf_size: Size of the virtual buffer
- * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
+ * @cap_count: cap count needed if AQ err==ENOMEM
* @opc: capabilities type to discover - pass in the command opcode
* @cd: pointer to command details structure or NULL
*
@@ -1179,7 +1512,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
* the firmware.
*/
static enum ice_status
-ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
+ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_list_caps *cmd;
@@ -1197,59 +1530,77 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status)
ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
- *data_size = le16_to_cpu(desc.datalen);
-
+ else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
+ *cap_count =
+ DIV_ROUND_UP(le16_to_cpu(desc.datalen),
+ sizeof(struct ice_aqc_list_caps_elem));
return status;
}
/**
- * ice_get_caps - get info about the HW
+ * ice_discover_caps - get info about the HW
* @hw: pointer to the hardware structure
+ * @opc: capabilities type to discover - pass in the command opcode
*/
-enum ice_status ice_get_caps(struct ice_hw *hw)
+static enum ice_status ice_discover_caps(struct ice_hw *hw,
+ enum ice_adminq_opc opc)
{
enum ice_status status;
- u16 data_size = 0;
+ u32 cap_count;
u16 cbuf_len;
u8 retries;
/* The driver doesn't know how many capabilities the device will return
* so the buffer size required isn't known ahead of time. The driver
* starts with cbuf_len and if this turns out to be insufficient, the
- * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
- * The driver then allocates the buffer of this size and retries the
- * operation. So it follows that the retry count is 2.
+ * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
+ * The driver then allocates the buffer based on the count and retries
+ * the operation. So it follows that the retry count is 2.
*/
#define ICE_GET_CAP_BUF_COUNT 40
#define ICE_GET_CAP_RETRY_COUNT 2
- cbuf_len = ICE_GET_CAP_BUF_COUNT *
- sizeof(struct ice_aqc_list_caps_elem);
-
+ cap_count = ICE_GET_CAP_BUF_COUNT;
retries = ICE_GET_CAP_RETRY_COUNT;
do {
void *cbuf;
+ cbuf_len = (u16)(cap_count *
+ sizeof(struct ice_aqc_list_caps_elem));
cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
if (!cbuf)
return ICE_ERR_NO_MEMORY;
- status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
- ice_aqc_opc_list_func_caps, NULL);
+ status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
+ opc, NULL);
devm_kfree(ice_hw_to_dev(hw), cbuf);
if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
break;
/* If ENOMEM is returned, try again with bigger buffer */
- cbuf_len = data_size;
} while (--retries);
return status;
}
/**
+ * ice_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_get_caps(struct ice_hw *hw)
+{
+ enum ice_status status;
+
+ status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
+ if (!status)
+ status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
+
+ return status;
+}
+
+/**
* ice_aq_manage_mac_write - manage MAC address write command
* @hw: pointer to the hw struct
* @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
@@ -1307,6 +1658,110 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
}
/**
+ * ice_get_link_speed_based_on_phy_type - returns link speed
+ * @phy_type_low: lower part of phy_type
+ *
+ * This helper function will convert a phy_type_low to its corresponding link
+ * speed.
+ * Note: In the structure of phy_type_low, there should be one bit set, as
+ * this function will convert one phy type to its speed.
+ * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ */
+static u16
+ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
+{
+ u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
+
+ switch (phy_type_low) {
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
+ case ICE_PHY_TYPE_LOW_100M_SGMII:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
+ case ICE_PHY_TYPE_LOW_1G_SGMII:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
+ break;
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
+ break;
+ default:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ return speed_phy_type_low;
+}
+
+/**
+ * ice_update_phy_type
+ * @phy_type_low: pointer to the lower part of phy_type
+ * @link_speeds_bitmap: targeted link speeds bitmap
+ *
+ * Note: For the link_speeds_bitmap structure, you can check it at
+ * [ice_aqc_get_link_status->link_speed]. Caller can pass in
+ * link_speeds_bitmap include multiple speeds.
+ *
+ * The value of phy_type_low will present a certain link speed. This helper
+ * function will turn on bits in the phy_type_low based on the value of
+ * link_speeds_bitmap input parameter.
+ */
+void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap)
+{
+ u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
+ u64 pt_low;
+ int index;
+
+ /* We first check with low part of phy_type */
+ for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
+ pt_low = BIT_ULL(index);
+ speed = ice_get_link_speed_based_on_phy_type(pt_low);
+
+ if (link_speeds_bitmap & speed)
+ *phy_type_low |= BIT_ULL(index);
+ }
+}
+
+/**
* ice_aq_set_phy_cfg
* @hw: pointer to the hw struct
* @lport: logical port number
@@ -1318,19 +1773,18 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* mode as the PF may not have the privilege to set some of the PHY Config
* parameters. This status will be indicated by the command response (0x0601).
*/
-static enum ice_status
+enum ice_status
ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
- struct ice_aqc_set_phy_cfg *cmd;
struct ice_aq_desc desc;
if (!cfg)
return ICE_ERR_PARAM;
- cmd = &desc.params.set_phy;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
- cmd->lport_num = lport;
+ desc.params.set_phy.lport_num = lport;
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
}
@@ -1339,8 +1793,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
* ice_update_link_info - update status of the HW network link
* @pi: port info structure of the interested logical port
*/
-static enum ice_status
-ice_update_link_info(struct ice_port_info *pi)
+enum ice_status ice_update_link_info(struct ice_port_info *pi)
{
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_phy_info *phy_info;
@@ -1379,12 +1832,12 @@ out:
* ice_set_fc
* @pi: port information structure
* @aq_failures: pointer to status code, specific to ice_set_fc routine
- * @atomic_restart: enable automatic link update
+ * @ena_auto_link_update: enable automatic link update
*
* Set the requested flow control mode.
*/
enum ice_status
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_aqc_get_phy_caps_data *pcaps;
@@ -1434,8 +1887,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)
int retry_count, retry_max = 10;
/* Auto restart link so settings take effect */
- if (atomic_restart)
- cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK;
+ if (ena_auto_link_update)
+ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
/* Copy over all the old settings */
cfg.phy_type_low = pcaps->phy_type_low;
cfg.low_power_ctrl = pcaps->low_power_ctrl;
@@ -1654,7 +2107,7 @@ ice_aq_get_set_rss_lut_exit:
/**
* ice_aq_get_rss_lut
* @hw: pointer to the hardware structure
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
* @lut_type: LUT table type
* @lut: pointer to the LUT buffer provided by the caller
* @lut_size: size of the LUT buffer
@@ -1662,17 +2115,20 @@ ice_aq_get_set_rss_lut_exit:
* get the RSS lookup table, PF or VSI type
*/
enum ice_status
-ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
- u16 lut_size)
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
+ u8 *lut, u16 lut_size)
{
- return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
- false);
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ lut_type, lut, lut_size, 0, false);
}
/**
* ice_aq_set_rss_lut
* @hw: pointer to the hardware structure
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
* @lut_type: LUT table type
* @lut: pointer to the LUT buffer provided by the caller
* @lut_size: size of the LUT buffer
@@ -1680,11 +2136,14 @@ ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
* set the RSS lookup table, PF or VSI type
*/
enum ice_status
-ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
- u16 lut_size)
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
+ u8 *lut, u16 lut_size)
{
- return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
- true);
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ lut_type, lut, lut_size, 0, true);
}
/**
@@ -1725,31 +2184,39 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
/**
* ice_aq_get_rss_key
* @hw: pointer to the hw struct
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
* @key: pointer to key info struct
*
* get the RSS key per VSI
*/
enum ice_status
-ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *key)
{
- return __ice_aq_get_set_rss_key(hw, vsi_id, key, false);
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ key, false);
}
/**
* ice_aq_set_rss_key
* @hw: pointer to the hw struct
- * @vsi_id: VSI FW index
+ * @vsi_handle: software VSI handle
* @keys: pointer to key info struct
*
* set the RSS key per VSI
*/
enum ice_status
-ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys)
{
- return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true);
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ keys, true);
}
/**
@@ -1820,6 +2287,8 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
* @num_qgrps: number of groups in the list
* @qg_list: the list of groups to disable
* @buf_size: the total size of the qg_list buffer in bytes
+ * @rst_src: if called due to reset, specifies the RST source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
* Disable LAN Tx queue (0x0C31)
@@ -1827,6 +2296,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
static enum ice_status
ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
struct ice_aqc_dis_txqs *cmd;
@@ -1836,14 +2306,45 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
cmd = &desc.params.dis_txqs;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
- if (!qg_list)
+ /* qg_list can be NULL only in VM/VF reset flow */
+ if (!qg_list && !rst_src)
return ICE_ERR_PARAM;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
return ICE_ERR_PARAM;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
cmd->num_entries = num_qgrps;
+ cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
+ ICE_AQC_Q_DIS_TIMEOUT_M);
+
+ switch (rst_src) {
+ case ICE_VM_RESET:
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
+ cmd->vmvf_and_timeout |=
+ cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
+ break;
+ case ICE_VF_RESET:
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
+ /* In this case, FW expects vmvf_num to be absolute VF id */
+ cmd->vmvf_and_timeout |=
+ cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
+ ICE_AQC_Q_DIS_VMVF_NUM_M);
+ break;
+ case ICE_NO_RESET:
+ default:
+ break;
+ }
+
+ /* If no queue group info, we are in a reset flow. Issue the AQ */
+ if (!qg_list)
+ goto do_aq;
+
+ /* set RD bit to indicate that command buffer is provided by the driver
+ * and it needs to be read by the firmware
+ */
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
for (i = 0; i < num_qgrps; ++i) {
/* Calculate the size taken up by the queue IDs in this group */
sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
@@ -1859,6 +2360,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
if (buf_size != sz)
return ICE_ERR_PARAM;
+do_aq:
return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
}
@@ -2088,7 +2590,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/**
* ice_ena_vsi_txq
* @pi: port information structure
- * @vsi_id: VSI id
+ * @vsi_handle: software VSI handle
* @tc: tc number
* @num_qgrps: Number of added queue groups
* @buf: list of queue groups to be added
@@ -2098,7 +2600,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* This function adds one lan q
*/
enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
@@ -2115,15 +2617,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
hw = pi->hw;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
mutex_lock(&pi->sched_lock);
/* find a parent node */
- parent = ice_sched_get_free_qparent(pi, vsi_id, tc,
+ parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_LAN);
if (!parent) {
status = ICE_ERR_PARAM;
goto ena_txq_exit;
}
+
buf->parent_teid = parent->info.node_teid;
node.parent_teid = parent->info.node_teid;
/* Mark that the values in the "generic" section as valid. The default
@@ -2161,13 +2667,16 @@ ena_txq_exit:
* @num_queues: number of queues
* @q_ids: pointer to the q_id array
* @q_teids: pointer to queue node teids
+ * @rst_src: if called due to reset, specifies the RST source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
* This function removes queues and their corresponding nodes in SW DB
*/
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, struct ice_sq_cd *cd)
+ u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd)
{
enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item qg_list;
@@ -2176,6 +2685,15 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
+ /* if queue is disabled already yet the disable queue command has to be
+ * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
+ * any queue information
+ */
+
+ if (!num_queues && rst_src)
+ return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
+ NULL);
+
mutex_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) {
@@ -2188,7 +2706,8 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
qg_list.num_qs = 1;
qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
- sizeof(qg_list), cd);
+ sizeof(qg_list), rst_src, vmvf_num,
+ cd);
if (status)
break;
@@ -2201,7 +2720,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
/**
* ice_cfg_vsi_qs - configure the new/exisiting VSI queues
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
* @maxqs: max queues array per TC
* @owner: lan or rdma
@@ -2209,7 +2728,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
* This function adds/updates the VSI queues per TC.
*/
static enum ice_status
-ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *maxqs, u8 owner)
{
enum ice_status status = 0;
@@ -2218,6 +2737,9 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
mutex_lock(&pi->sched_lock);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
@@ -2225,7 +2747,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
if (!ice_sched_get_tc_node(pi, i))
continue;
- status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner,
+ status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
ice_is_tc_ena(tc_bitmap, i));
if (status)
break;
@@ -2238,16 +2760,140 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
/**
* ice_cfg_vsi_lan - configure VSI lan queues
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
* @max_lanqs: max lan queues array per TC
*
* This function adds/updates the VSI lan queues per TC.
*/
enum ice_status
-ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs)
{
- return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs,
+ return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
ICE_SCHED_NODE_OWNER_LAN);
}
+
+/**
+ * ice_replay_pre_init - replay pre initialization
+ * @hw: pointer to the hw struct
+ *
+ * Initializes required config data for VSI, FD, ACL, and RSS before replay.
+ */
+static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ u8 i;
+
+ /* Delete old entries from replay filter list head if there is any */
+ ice_rm_all_sw_replay_rule_info(hw);
+ /* In start of replay, move entries into replay_rules list, it
+ * will allow adding rules entries back to filt_rules list,
+ * which is operational list.
+ */
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++)
+ list_replace_init(&sw->recp_list[i].filt_rules,
+ &sw->recp_list[i].filt_replay_rules);
+
+ return 0;
+}
+
+/**
+ * ice_replay_vsi - replay VSI configuration
+ * @hw: pointer to the hw struct
+ * @vsi_handle: driver VSI handle
+ *
+ * Restore all VSI configuration after reset. It is required to call this
+ * function with main VSI first.
+ */
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
+{
+ enum ice_status status;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ /* Replay pre-initialization if there is any */
+ if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
+ status = ice_replay_pre_init(hw);
+ if (status)
+ return status;
+ }
+
+ /* Replay per VSI all filters */
+ status = ice_replay_vsi_all_fltr(hw, vsi_handle);
+ return status;
+}
+
+/**
+ * ice_replay_post - post replay configuration cleanup
+ * @hw: pointer to the hw struct
+ *
+ * Post replay cleanup.
+ */
+void ice_replay_post(struct ice_hw *hw)
+{
+ /* Delete old entries from replay filter list head */
+ ice_rm_all_sw_replay_rule_info(hw);
+}
+
+/**
+ * ice_stat_update40 - read 40 bit stat from the chip and update stat values
+ * @hw: ptr to the hardware info
+ * @hireg: high 32 bit HW register to read from
+ * @loreg: low 32 bit HW register to read from
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
+ * @prev_stat: ptr to previous loaded stat value
+ * @cur_stat: ptr to current stat value
+ */
+void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
+{
+ u64 new_data;
+
+ new_data = rd32(hw, loreg);
+ new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+
+ /* device stats are not reset at PFR, they likely will not be zeroed
+ * when the driver starts. So save the first values read and use them as
+ * offsets to be subtracted from the raw values in order to report stats
+ * that count from zero.
+ */
+ if (!prev_stat_loaded)
+ *prev_stat = new_data;
+ if (new_data >= *prev_stat)
+ *cur_stat = new_data - *prev_stat;
+ else
+ /* to manage the potential roll-over */
+ *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
+ *cur_stat &= 0xFFFFFFFFFFULL;
+}
+
+/**
+ * ice_stat_update32 - read 32 bit stat from the chip and update stat values
+ * @hw: ptr to the hardware info
+ * @reg: HW register to read from
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
+ * @prev_stat: ptr to previous loaded stat value
+ * @cur_stat: ptr to current stat value
+ */
+void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
+{
+ u32 new_data;
+
+ new_data = rd32(hw, reg);
+
+ /* device stats are not reset at PFR, they likely will not be zeroed
+ * when the driver starts. So save the first values read and use them as
+ * offsets to be subtracted from the raw values in order to report stats
+ * that count from zero.
+ */
+ if (!prev_stat_loaded)
+ *prev_stat = new_data;
+ if (new_data >= *prev_stat)
+ *cur_stat = new_data - *prev_stat;
+ else
+ /* to manage the potential roll-over */
+ *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 9a5519130af1..1900681289a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -7,6 +7,7 @@
#include "ice.h"
#include "ice_type.h"
#include "ice_switch.h"
+#include <linux/avf/virtchnl.h>
void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
u16 buf_len);
@@ -21,9 +22,10 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
enum ice_status
ice_get_link_status(struct ice_port_info *pi, bool *link_up);
+enum ice_status ice_update_link_info(struct ice_port_info *pi);
enum ice_status
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
- enum ice_aq_res_access_type access);
+ enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status
@@ -37,17 +39,18 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
enum ice_status
-ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
u16 lut_size);
enum ice_status
-ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
u16 lut_size);
enum ice_status
-ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
enum ice_status
-ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
+
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
@@ -58,12 +61,24 @@ enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
+
+enum ice_status
+ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
+ struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_sq_cd *cd);
+void
+ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap);
enum ice_status
ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart);
+ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
+ struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
+ bool ena_auto_link_update);
+
enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
@@ -75,12 +90,20 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, struct ice_sq_cd *cmd_details);
+ u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cmd_details);
enum ice_status
-ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs);
enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
+void ice_replay_post(struct ice_hw *hw);
+void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
+void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
+void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 62be72fdc8f3..84c967294eaf 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -33,6 +33,36 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
}
/**
+ * ice_mailbox_init_regs - Initialize Mailbox registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_mailbox_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+
+ /* set head and tail registers in our local struct */
+ cq->sq.head = PF_MBX_ATQH;
+ cq->sq.tail = PF_MBX_ATQT;
+ cq->sq.len = PF_MBX_ATQLEN;
+ cq->sq.bah = PF_MBX_ATQBAH;
+ cq->sq.bal = PF_MBX_ATQBAL;
+ cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
+ cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
+ cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
+
+ cq->rq.head = PF_MBX_ARQH;
+ cq->rq.tail = PF_MBX_ARQT;
+ cq->rq.len = PF_MBX_ARQLEN;
+ cq->rq.bah = PF_MBX_ARQBAH;
+ cq->rq.bal = PF_MBX_ARQBAL;
+ cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
+ cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
+ cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
+}
+
+/**
* ice_check_sq_alive
* @hw: pointer to the hw struct
* @cq: pointer to the specific Control queue
@@ -518,22 +548,31 @@ shutdown_sq_out:
/**
* ice_aq_ver_check - Check the reported AQ API version.
- * @fw_branch: The "branch" of FW, typically describes the device type
- * @fw_major: The major version of the FW API
- * @fw_minor: The minor version increment of the FW API
+ * @hw: pointer to the hardware structure
*
* Checks if the driver should load on a given AQ API version.
*
* Return: 'true' iff the driver should attempt to load. 'false' otherwise.
*/
-static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor)
+static bool ice_aq_ver_check(struct ice_hw *hw)
{
- if (fw_branch != EXP_FW_API_VER_BRANCH)
- return false;
- if (fw_major != EXP_FW_API_VER_MAJOR)
- return false;
- if (fw_minor != EXP_FW_API_VER_MINOR)
+ if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
+ /* Major API version is newer than expected, don't load */
+ dev_warn(ice_hw_to_dev(hw),
+ "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
return false;
+ } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
+ if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
+ dev_info(ice_hw_to_dev(hw),
+ "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
+ dev_info(ice_hw_to_dev(hw),
+ "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ } else {
+ /* Major API version is older than expected, log a warning */
+ dev_info(ice_hw_to_dev(hw),
+ "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ }
return true;
}
@@ -588,8 +627,7 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
if (status)
goto init_ctrlq_free_rq;
- if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver,
- hw->api_min_ver)) {
+ if (!ice_aq_ver_check(hw)) {
status = ICE_ERR_FW_API_VER;
goto init_ctrlq_free_rq;
}
@@ -597,11 +635,11 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
return 0;
init_ctrlq_free_rq:
- if (cq->rq.head) {
+ if (cq->rq.count) {
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
- if (cq->sq.head) {
+ if (cq->sq.count) {
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
@@ -631,6 +669,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
ice_adminq_init_regs(hw);
cq = &hw->adminq;
break;
+ case ICE_CTL_Q_MAILBOX:
+ ice_mailbox_init_regs(hw);
+ cq = &hw->mailboxq;
+ break;
default:
return ICE_ERR_PARAM;
}
@@ -688,7 +730,12 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
if (ret_code)
return ret_code;
- return ice_init_check_adminq(hw);
+ ret_code = ice_init_check_adminq(hw);
+ if (ret_code)
+ return ret_code;
+
+ /* Init Mailbox queue */
+ return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/**
@@ -706,15 +753,18 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, true);
break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ break;
default:
return;
}
- if (cq->sq.head) {
+ if (cq->sq.count) {
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
- if (cq->rq.head) {
+ if (cq->rq.count) {
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
@@ -728,6 +778,8 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ /* Shutdown PF-VF Mailbox */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/**
@@ -806,6 +858,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
u16 retval = 0;
u32 val = 0;
+ /* if reset is in progress return a soft error */
+ if (hw->reset_ongoing)
+ return ICE_ERR_RESET_ONGOING;
mutex_lock(&cq->sq_lock);
cq->sq_last_status = ICE_AQ_RC_OK;
@@ -847,7 +902,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
if (cd)
- memcpy(details, cd, sizeof(*details));
+ *details = *cd;
else
memset(details, 0, sizeof(*details));
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index ea02b89243e2..437f832fd7c4 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -8,6 +8,7 @@
/* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096
+#define ICE_MBXQ_MAX_BUF_LEN 4096
#define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
@@ -28,6 +29,7 @@
enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN,
+ ICE_CTL_Q_MAILBOX,
};
/* Control Queue default settings */
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 0e14d7215a6e..a6f0a5c0c305 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -5,15 +5,11 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
-/* Intel(R) Ethernet Controller C810 for backplane */
+/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_C810_BACKPLANE 0x1591
-/* Intel(R) Ethernet Controller C810 for QSFP */
+/* Intel(R) Ethernet Controller E810-C for QSFP */
#define ICE_DEV_ID_C810_QSFP 0x1592
-/* Intel(R) Ethernet Controller C810 for SFP */
+/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_C810_SFP 0x1593
-/* Intel(R) Ethernet Controller C810/X557-AT 10GBASE-T */
-#define ICE_DEV_ID_C810_10G_BASE_T 0x1594
-/* Intel(R) Ethernet Controller C810 1GbE */
-#define ICE_DEV_ID_C810_SGMII 0x1595
#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index c71a9b528d6d..96923580f2a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -332,58 +332,473 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
}
-static int
-ice_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *ks)
+/**
+ * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes
+ * @netdev: network interface device structure
+ * @ks: ethtool link ksettings struct to fill out
+ */
+static void ice_phy_type_to_ethtool(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
- bool link_up;
+ u64 phy_types_low;
hw_link_info = &vsi->port_info->phy.link_info;
- link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
+ phy_types_low = vsi->port_info->phy.phy_type_low;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseKX_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseKX_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseX_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseX_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X ||
+ phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseX_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseX_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseSR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseSR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseLR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseLR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseSR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseSR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseKR_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseKR_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseKR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseKR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC ||
+ phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseSR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseSR4_Full);
+ }
+ if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseLR4_Full);
+ if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseLR4_Full);
+ }
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseT_Full);
+ /* Autoneg PHY types */
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
+ phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
+ phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
+ phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Autoneg);
+ }
+}
- /* set speed and duplex */
- if (link_up) {
- switch (hw_link_info->link_speed) {
- case ICE_AQ_LINK_SPEED_100MB:
- ks->base.speed = SPEED_100;
- break;
- case ICE_AQ_LINK_SPEED_2500MB:
- ks->base.speed = SPEED_2500;
- break;
- case ICE_AQ_LINK_SPEED_5GB:
- ks->base.speed = SPEED_5000;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- ks->base.speed = SPEED_10000;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- ks->base.speed = SPEED_25000;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- ks->base.speed = SPEED_40000;
- break;
- default:
- ks->base.speed = SPEED_UNKNOWN;
- break;
- }
+#define TEST_SET_BITS_TIMEOUT 50
+#define TEST_SET_BITS_SLEEP_MAX 2000
+#define TEST_SET_BITS_SLEEP_MIN 1000
- ks->base.duplex = DUPLEX_FULL;
- } else {
- ks->base.speed = SPEED_UNKNOWN;
- ks->base.duplex = DUPLEX_UNKNOWN;
+/**
+ * ice_get_settings_link_up - Get Link settings for when link is up
+ * @ks: ethtool ksettings to fill in
+ * @netdev: network interface device structure
+ */
+static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
+ struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ethtool_link_ksettings cap_ksettings;
+ struct ice_link_status *link_info;
+ struct ice_vsi *vsi = np->vsi;
+ bool unrecog_phy_low = false;
+
+ link_info = &vsi->port_info->phy.link_info;
+
+ /* Initialize supported and advertised settings based on phy settings */
+ switch (link_info->phy_type_low) {
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_100M_SGMII:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_1G_SGMII:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseX_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseKX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseKX_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseX_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseX_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseSR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseLR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseSR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseKR_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseSR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseLR4_Full);
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseKR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseKR4_Full);
+ break;
+ default:
+ unrecog_phy_low = true;
+ }
+
+ if (unrecog_phy_low) {
+ /* if we got here and link is up something bad is afoot */
+ netdev_info(netdev, "WARNING: Unrecognized PHY_Low (0x%llx).\n",
+ (u64)link_info->phy_type_low);
}
+ /* Now that we've worked out everything that could be supported by the
+ * current PHY type, get what is supported by the NVM and intersect
+ * them to get what is truly supported
+ */
+ memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings));
+ ice_phy_type_to_ethtool(netdev, &cap_ksettings);
+ ethtool_intersect_link_masks(ks, &cap_ksettings);
+
+ switch (link_info->link_speed) {
+ case ICE_AQ_LINK_SPEED_40GB:
+ ks->base.speed = SPEED_40000;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ ks->base.speed = SPEED_25000;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ ks->base.speed = SPEED_20000;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ ks->base.speed = SPEED_10000;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ ks->base.speed = SPEED_5000;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ ks->base.speed = SPEED_2500;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ ks->base.speed = SPEED_1000;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ ks->base.speed = SPEED_100;
+ break;
+ default:
+ netdev_info(netdev,
+ "WARNING: Unrecognized link_speed (0x%x).\n",
+ link_info->link_speed);
+ break;
+ }
+ ks->base.duplex = DUPLEX_FULL;
+}
+
+/**
+ * ice_get_settings_link_down - Get the Link settings when link is down
+ * @ks: ethtool ksettings to fill in
+ * @netdev: network interface device structure
+ *
+ * Reports link settings that can be determined when link is down
+ */
+static void
+ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
+ struct net_device __always_unused *netdev)
+{
+ /* link is down and the driver needs to fall back on
+ * supported phy types to figure out what info to display
+ */
+ ice_phy_type_to_ethtool(netdev, ks);
+
+ /* With no link, speed and duplex are unknown */
+ ks->base.speed = SPEED_UNKNOWN;
+ ks->base.duplex = DUPLEX_UNKNOWN;
+}
+
+/**
+ * ice_get_link_ksettings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ks: ethtool ksettings
+ *
+ * Reports speed/duplex settings based on media_type
+ */
+static int ice_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_link_status *hw_link_info;
+ struct ice_vsi *vsi = np->vsi;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+ hw_link_info = &vsi->port_info->phy.link_info;
+
+ /* set speed and duplex */
+ if (hw_link_info->link_info & ICE_AQ_LINK_UP)
+ ice_get_settings_link_up(ks, netdev);
+ else
+ ice_get_settings_link_down(ks, netdev);
+
/* set autoneg settings */
- ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE);
+ ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
/* set media type settings */
switch (vsi->port_info->phy.media_type) {
@@ -442,6 +857,311 @@ ice_get_link_ksettings(struct net_device *netdev,
}
/**
+ * ice_ksettings_find_adv_link_speed - Find advertising link speed
+ * @ks: ethtool ksettings
+ */
+static u16
+ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
+{
+ u16 adv_link_speed = 0;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100baseT_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_100MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseKX_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseT_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseX_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 5000baseT_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_5GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseKR_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_10GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseSR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseLR_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_10GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseCR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseSR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseKR_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_25GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseCR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseSR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseLR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseKR4_Full))
+ adv_link_speed |= ICE_AQ_LINK_SPEED_40GB;
+
+ return adv_link_speed;
+}
+
+/**
+ * ice_setup_autoneg
+ * @p: port info
+ * @ks: ethtool_link_ksettings
+ * @config: configuration that will be sent down to FW
+ * @autoneg_enabled: autonegotiation is enabled or not
+ * @autoneg_changed: will there a change in autonegotiation
+ * @netdev: network interface device structure
+ *
+ * Setup PHY autonegotiation feature
+ */
+static int
+ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
+ struct ice_aqc_set_phy_cfg_data *config,
+ u8 autoneg_enabled, u8 *autoneg_changed,
+ struct net_device *netdev)
+{
+ int err = 0;
+
+ *autoneg_changed = 0;
+
+ /* Check autoneg */
+ if (autoneg_enabled == AUTONEG_ENABLE) {
+ /* If autoneg was not already enabled */
+ if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) {
+ /* If autoneg is not supported, return error */
+ if (!ethtool_link_ksettings_test_link_mode(ks,
+ supported,
+ Autoneg)) {
+ netdev_info(netdev, "Autoneg not supported on this phy.\n");
+ err = -EINVAL;
+ } else {
+ /* Autoneg is allowed to change */
+ config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ *autoneg_changed = 1;
+ }
+ }
+ } else {
+ /* If autoneg is currently enabled */
+ if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) {
+ /* If autoneg is supported 10GBASE_T is the only phy
+ * that can disable it, so otherwise return error
+ */
+ if (ethtool_link_ksettings_test_link_mode(ks,
+ supported,
+ Autoneg)) {
+ netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+ err = -EINVAL;
+ } else {
+ /* Autoneg is allowed to change */
+ config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ *autoneg_changed = 1;
+ }
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ice_set_link_ksettings - Set Speed and Duplex
+ * @netdev: network interface device structure
+ * @ks: ethtool ksettings
+ *
+ * Set speed/duplex per media_types advertised/forced
+ */
+static int ice_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
+{
+ u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ethtool_link_ksettings safe_ks, copy_ks;
+ struct ice_aqc_get_phy_caps_data *abilities;
+ u16 adv_link_speed, curr_link_speed, idx;
+ struct ice_aqc_set_phy_cfg_data config;
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_port_info *p;
+ u8 autoneg_changed = 0;
+ enum ice_status status;
+ u64 phy_type_low;
+ int err = 0;
+ bool linkup;
+
+ p = np->vsi->port_info;
+
+ if (!p)
+ return -EOPNOTSUPP;
+
+ /* Check if this is lan vsi */
+ for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) {
+ if (pf->vsi[idx]->type == ICE_VSI_PF) {
+ if (np->vsi != pf->vsi[idx])
+ return -EOPNOTSUPP;
+ break;
+ }
+ }
+
+ if (p->phy.media_type != ICE_MEDIA_BASET &&
+ p->phy.media_type != ICE_MEDIA_FIBER &&
+ p->phy.media_type != ICE_MEDIA_BACKPLANE &&
+ p->phy.media_type != ICE_MEDIA_DA &&
+ p->phy.link_info.link_info & ICE_AQ_LINK_UP)
+ return -EOPNOTSUPP;
+
+ /* copy the ksettings to copy_ks to avoid modifying the original */
+ memcpy(&copy_ks, ks, sizeof(struct ethtool_link_ksettings));
+
+ /* save autoneg out of ksettings */
+ autoneg = copy_ks.base.autoneg;
+
+ memset(&safe_ks, 0, sizeof(safe_ks));
+
+ /* Get link modes supported by hardware.*/
+ ice_phy_type_to_ethtool(netdev, &safe_ks);
+
+ /* and check against modes requested by user.
+ * Return an error if unsupported mode was set.
+ */
+ if (!bitmap_subset(copy_ks.link_modes.advertising,
+ safe_ks.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ /* get our own copy of the bits to check against */
+ memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
+ safe_ks.base.cmd = copy_ks.base.cmd;
+ safe_ks.base.link_mode_masks_nwords =
+ copy_ks.base.link_mode_masks_nwords;
+ ice_get_link_ksettings(netdev, &safe_ks);
+
+ /* set autoneg back to what it currently is */
+ copy_ks.base.autoneg = safe_ks.base.autoneg;
+ /* we don't compare the speed */
+ copy_ks.base.speed = safe_ks.base.speed;
+
+ /* If copy_ks.base and safe_ks.base are not the same now, then they are
+ * trying to set something that we do not support.
+ */
+ if (memcmp(&copy_ks.base, &safe_ks.base,
+ sizeof(struct ethtool_link_settings)))
+ return -EOPNOTSUPP;
+
+ while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX);
+ }
+
+ abilities = devm_kzalloc(&pf->pdev->dev, sizeof(*abilities),
+ GFP_KERNEL);
+ if (!abilities)
+ return -ENOMEM;
+
+ /* Get the current phy config */
+ status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities,
+ NULL);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ /* Copy abilities to config in case autoneg is not set below */
+ memset(&config, 0, sizeof(struct ice_aqc_set_phy_cfg_data));
+ config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE;
+ if (abilities->caps & ICE_AQC_PHY_AN_MODE)
+ config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ /* Check autoneg */
+ err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed,
+ netdev);
+
+ if (err)
+ goto done;
+
+ /* Call to get the current link speed */
+ p->phy.get_link_info = true;
+ status = ice_get_link_status(p, &linkup);
+ if (status) {
+ err = -EAGAIN;
+ goto done;
+ }
+
+ curr_link_speed = p->phy.link_info.link_speed;
+ adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
+
+ /* If speed didn't get set, set it to what it currently is.
+ * This is needed because if advertise is 0 (as it is when autoneg
+ * is disabled) then speed won't get set.
+ */
+ if (!adv_link_speed)
+ adv_link_speed = curr_link_speed;
+
+ /* Convert the advertise link speeds to their corresponded PHY_TYPE */
+ ice_update_phy_type(&phy_type_low, adv_link_speed);
+
+ if (!autoneg_changed && adv_link_speed == curr_link_speed) {
+ netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
+ goto done;
+ }
+
+ /* copy over the rest of the abilities */
+ config.low_power_ctrl = abilities->low_power_ctrl;
+ config.eee_cap = abilities->eee_cap;
+ config.eeer_value = abilities->eeer_value;
+ config.link_fec_opt = abilities->link_fec_options;
+
+ /* save the requested speeds */
+ p->phy.link_info.req_speeds = adv_link_speed;
+
+ /* set link and auto negotiation so changes take effect */
+ config.caps |= ICE_AQ_PHY_ENA_LINK;
+
+ if (phy_type_low) {
+ config.phy_type_low = cpu_to_le64(phy_type_low) &
+ abilities->phy_type_low;
+ } else {
+ err = -EAGAIN;
+ netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n");
+ goto done;
+ }
+
+ /* If link is up put link down */
+ if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) {
+ /* Tell the OS link is going down, the link will go
+ * back up when fw says it is ready asynchronously
+ */
+ ice_print_link_msg(np->vsi, false);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+
+ /* make the aq call */
+ status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL);
+ if (status) {
+ netdev_info(netdev, "Set phy config failed,\n");
+ err = -EAGAIN;
+ }
+
+done:
+ devm_kfree(&pf->pdev->dev, abilities);
+ clear_bit(__ICE_CFG_BUSY, pf->state);
+
+ return err;
+}
+
+/**
* ice_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -478,9 +1198,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
ring->tx_max_pending = ICE_MAX_NUM_DESC;
ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0]->count;
- ring->rx_mini_pending = ICE_MIN_NUM_DESC;
+
+ /* Rx mini and jumbo rings are not supported */
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
+ ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
@@ -498,14 +1220,23 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
ring->tx_pending < ICE_MIN_NUM_DESC ||
ring->rx_pending > ICE_MAX_NUM_DESC ||
ring->rx_pending < ICE_MIN_NUM_DESC) {
- netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+ netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
ring->tx_pending, ring->rx_pending,
- ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC);
+ ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
+ ICE_REQ_DESC_MULTIPLE);
return -EINVAL;
}
new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
+ if (new_tx_cnt != ring->tx_pending)
+ netdev_info(netdev,
+ "Requested Tx descriptor count rounded up to %d\n",
+ new_tx_cnt);
new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
+ if (new_rx_cnt != ring->rx_pending)
+ netdev_info(netdev,
+ "Requested Rx descriptor count rounded up to %d\n",
+ new_rx_cnt);
/* if nothing to do return success */
if (new_tx_cnt == vsi->tx_rings[0]->count &&
@@ -933,6 +1664,7 @@ static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings,
+ .set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6076fc87df9d..a6679a9bfd3a 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -6,251 +6,323 @@
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
-#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
-#define PF_FW_ARQBAH 0x00080180
-#define PF_FW_ARQBAL 0x00080080
-#define PF_FW_ARQH 0x00080380
-#define PF_FW_ARQH_ARQH_S 0
-#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, PF_FW_ARQH_ARQH_S)
-#define PF_FW_ARQLEN 0x00080280
-#define PF_FW_ARQLEN_ARQLEN_S 0
-#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, PF_FW_ARQLEN_ARQLEN_S)
-#define PF_FW_ARQLEN_ARQVFE_S 28
-#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S)
-#define PF_FW_ARQLEN_ARQOVFL_S 29
-#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S)
-#define PF_FW_ARQLEN_ARQCRIT_S 30
-#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S)
-#define PF_FW_ARQLEN_ARQENABLE_S 31
-#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S)
-#define PF_FW_ARQT 0x00080480
-#define PF_FW_ATQBAH 0x00080100
-#define PF_FW_ATQBAL 0x00080000
-#define PF_FW_ATQH 0x00080300
-#define PF_FW_ATQH_ATQH_S 0
-#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, PF_FW_ATQH_ATQH_S)
-#define PF_FW_ATQLEN 0x00080200
-#define PF_FW_ATQLEN_ATQLEN_S 0
-#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S)
-#define PF_FW_ATQLEN_ATQVFE_S 28
-#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S)
-#define PF_FW_ATQLEN_ATQOVFL_S 29
-#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S)
-#define PF_FW_ATQLEN_ATQCRIT_S 30
-#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S)
-#define PF_FW_ATQLEN_ATQENABLE_S 31
-#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S)
-#define PF_FW_ATQT 0x00080400
-
+#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
+#define PF_FW_ARQBAH 0x00080180
+#define PF_FW_ARQBAL 0x00080080
+#define PF_FW_ARQH 0x00080380
+#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, 0)
+#define PF_FW_ARQLEN 0x00080280
+#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
+#define PF_FW_ARQLEN_ARQVFE_M BIT(28)
+#define PF_FW_ARQLEN_ARQOVFL_M BIT(29)
+#define PF_FW_ARQLEN_ARQCRIT_M BIT(30)
+#define PF_FW_ARQLEN_ARQENABLE_M BIT(31)
+#define PF_FW_ARQT 0x00080480
+#define PF_FW_ATQBAH 0x00080100
+#define PF_FW_ATQBAL 0x00080000
+#define PF_FW_ATQH 0x00080300
+#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, 0)
+#define PF_FW_ATQLEN 0x00080200
+#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
+#define PF_FW_ATQLEN_ATQVFE_M BIT(28)
+#define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
+#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
+#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
+#define PF_FW_ATQT 0x00080400
+#define PF_MBX_ARQBAH 0x0022E400
+#define PF_MBX_ARQBAL 0x0022E380
+#define PF_MBX_ARQH 0x0022E500
+#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN 0x0022E480
+#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31)
+#define PF_MBX_ARQT 0x0022E580
+#define PF_MBX_ATQBAH 0x0022E180
+#define PF_MBX_ATQBAL 0x0022E100
+#define PF_MBX_ATQH 0x0022E280
+#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN 0x0022E200
+#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
+#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
+#define PF_MBX_ATQT 0x0022E300
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, 8)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, 16)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24
-#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, 24)
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
-#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0)
#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30
-#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S)
+#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, 30)
#define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045c900 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0
-#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, 0)
#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30
-#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S)
+#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, 30)
#define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045ca00 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0
-#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, 0)
#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30
-#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S)
+#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, 30)
#define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045cb00 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0
-#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, 0)
#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30
-#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S)
-
-#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4))
-#define QRXFLXP_CNTXT_RXDID_IDX_S 0
-#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, QRXFLXP_CNTXT_RXDID_IDX_S)
-#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
-#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, QRXFLXP_CNTXT_RXDID_PRIO_S)
-#define QRXFLXP_CNTXT_TS_S 11
-#define QRXFLXP_CNTXT_TS_M BIT(QRXFLXP_CNTXT_TS_S)
-#define GLGEN_RSTAT 0x000B8188
-#define GLGEN_RSTAT_DEVSTATE_S 0
-#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S)
-#define GLGEN_RSTCTL 0x000B8180
-#define GLGEN_RSTCTL_GRSTDEL_S 0
-#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S)
-#define GLGEN_RSTAT_RESET_TYPE_S 2
-#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, GLGEN_RSTAT_RESET_TYPE_S)
-#define GLGEN_RTRIG 0x000B8190
-#define GLGEN_RTRIG_CORER_S 0
-#define GLGEN_RTRIG_CORER_M BIT(GLGEN_RTRIG_CORER_S)
-#define GLGEN_RTRIG_GLOBR_S 1
-#define GLGEN_RTRIG_GLOBR_M BIT(GLGEN_RTRIG_GLOBR_S)
-#define GLGEN_STAT 0x000B612C
-#define PFGEN_CTRL 0x00091000
-#define PFGEN_CTRL_PFSWR_S 0
-#define PFGEN_CTRL_PFSWR_M BIT(PFGEN_CTRL_PFSWR_S)
-#define PFGEN_STATE 0x00088000
-#define PRTGEN_STATUS 0x000B8100
-#define PFHMC_ERRORDATA 0x00520500
-#define PFHMC_ERRORINFO 0x00520400
-#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
-#define GLINT_DYN_CTL_INTENA_S 0
-#define GLINT_DYN_CTL_INTENA_M BIT(GLINT_DYN_CTL_INTENA_S)
-#define GLINT_DYN_CTL_CLEARPBA_S 1
-#define GLINT_DYN_CTL_CLEARPBA_M BIT(GLINT_DYN_CTL_CLEARPBA_S)
-#define GLINT_DYN_CTL_SWINT_TRIG_S 2
-#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(GLINT_DYN_CTL_SWINT_TRIG_S)
-#define GLINT_DYN_CTL_ITR_INDX_S 3
-#define GLINT_DYN_CTL_SW_ITR_INDX_S 25
-#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S)
-#define GLINT_DYN_CTL_INTENA_MSK_S 31
-#define GLINT_DYN_CTL_INTENA_MSK_M BIT(GLINT_DYN_CTL_INTENA_MSK_S)
-#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
-#define PFINT_FW_CTL 0x0016C800
-#define PFINT_FW_CTL_MSIX_INDX_S 0
-#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_FW_CTL_MSIX_INDX_S)
-#define PFINT_FW_CTL_ITR_INDX_S 11
-#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, PFINT_FW_CTL_ITR_INDX_S)
-#define PFINT_FW_CTL_CAUSE_ENA_S 30
-#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
-#define PFINT_OICR 0x0016CA00
-#define PFINT_OICR_ECC_ERR_S 16
-#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
-#define PFINT_OICR_MAL_DETECT_S 19
-#define PFINT_OICR_MAL_DETECT_M BIT(PFINT_OICR_MAL_DETECT_S)
-#define PFINT_OICR_GRST_S 20
-#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
-#define PFINT_OICR_PCI_EXCEPTION_S 21
-#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
-#define PFINT_OICR_HMC_ERR_S 26
-#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
-#define PFINT_OICR_PE_CRITERR_S 28
-#define PFINT_OICR_PE_CRITERR_M BIT(PFINT_OICR_PE_CRITERR_S)
-#define PFINT_OICR_CTL 0x0016CA80
-#define PFINT_OICR_CTL_MSIX_INDX_S 0
-#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_OICR_CTL_MSIX_INDX_S)
-#define PFINT_OICR_CTL_ITR_INDX_S 11
-#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, PFINT_OICR_CTL_ITR_INDX_S)
-#define PFINT_OICR_CTL_CAUSE_ENA_S 30
-#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(PFINT_OICR_CTL_CAUSE_ENA_S)
-#define PFINT_OICR_ENA 0x0016C900
-#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
-#define QINT_RQCTL_MSIX_INDX_S 0
-#define QINT_RQCTL_ITR_INDX_S 11
-#define QINT_RQCTL_CAUSE_ENA_S 30
-#define QINT_RQCTL_CAUSE_ENA_M BIT(QINT_RQCTL_CAUSE_ENA_S)
-#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
-#define QINT_TQCTL_MSIX_INDX_S 0
-#define QINT_TQCTL_ITR_INDX_S 11
-#define QINT_TQCTL_CAUSE_ENA_S 30
-#define QINT_TQCTL_CAUSE_ENA_M BIT(QINT_TQCTL_CAUSE_ENA_S)
-#define GLLAN_RCTL_0 0x002941F8
-#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
-#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
-#define QRX_CTRL_MAX_INDEX 2047
-#define QRX_CTRL_QENA_REQ_S 0
-#define QRX_CTRL_QENA_REQ_M BIT(QRX_CTRL_QENA_REQ_S)
-#define QRX_CTRL_QENA_STAT_S 2
-#define QRX_CTRL_QENA_STAT_M BIT(QRX_CTRL_QENA_STAT_S)
-#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4))
-#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4))
-#define GLNVM_FLA 0x000B6108
-#define GLNVM_FLA_LOCKED_S 6
-#define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S)
-#define GLNVM_GENS 0x000B6100
-#define GLNVM_GENS_SR_SIZE_S 5
-#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, GLNVM_GENS_SR_SIZE_S)
-#define GLNVM_ULD 0x000B6008
-#define GLNVM_ULD_CORER_DONE_S 3
-#define GLNVM_ULD_CORER_DONE_M BIT(GLNVM_ULD_CORER_DONE_S)
-#define GLNVM_ULD_GLOBR_DONE_S 4
-#define GLNVM_ULD_GLOBR_DONE_M BIT(GLNVM_ULD_GLOBR_DONE_S)
-#define PF_FUNC_RID 0x0009E880
-#define PF_FUNC_RID_FUNC_NUM_S 0
-#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, PF_FUNC_RID_FUNC_NUM_S)
-#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
-#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
-#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
-#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
-#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
-#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8))
-#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8))
-#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8))
-#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8))
-#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8))
-#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8))
-#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8))
-#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8))
-#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8))
-#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8))
-#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8))
-#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8))
-#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8))
-#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8))
-#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8))
-#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8))
-#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8))
-#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8))
-#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8))
-#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8))
-#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8))
-#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8))
-#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8))
-#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8))
-#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8))
-#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8))
-#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8))
-#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8))
-#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8))
-#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8))
-#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8))
-#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8))
-#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8))
-#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8))
-#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8))
-#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8))
-#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8))
-#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8))
-#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8))
-#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8))
-#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
-#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
-#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
-#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
-#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
-#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
-#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
-#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
-#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
-#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
-#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
-#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8))
-#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
-#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8))
-#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
-#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8))
-#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
-#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8))
-#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
-#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8))
-#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8))
-#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8))
-#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8))
-#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8))
-#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8))
-#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4))
-#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
-#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8))
-#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
-#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
-#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
-#define VSIQF_HKEY_MAX_INDEX 12
+#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, 30)
+#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4))
+#define QRXFLXP_CNTXT_RXDID_IDX_S 0
+#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0)
+#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
+#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8)
+#define GLGEN_RSTAT 0x000B8188
+#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0)
+#define GLGEN_RSTCTL 0x000B8180
+#define GLGEN_RSTCTL_GRSTDEL_S 0
+#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S)
+#define GLGEN_RSTAT_RESET_TYPE_S 2
+#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, 2)
+#define GLGEN_RTRIG 0x000B8190
+#define GLGEN_RTRIG_CORER_M BIT(0)
+#define GLGEN_RTRIG_GLOBR_M BIT(1)
+#define GLGEN_STAT 0x000B612C
+#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4))
+#define PFGEN_CTRL 0x00091000
+#define PFGEN_CTRL_PFSWR_M BIT(0)
+#define PFGEN_STATE 0x00088000
+#define PRTGEN_STATUS 0x000B8100
+#define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4))
+#define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4))
+#define VPGEN_VFRSTAT_VFRD_M BIT(0)
+#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4))
+#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
+#define PFHMC_ERRORDATA 0x00520500
+#define PFHMC_ERRORINFO 0x00520400
+#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
+#define GLINT_DYN_CTL_INTENA_M BIT(0)
+#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
+#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2)
+#define GLINT_DYN_CTL_ITR_INDX_S 3
+#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
+#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
+#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
+#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4))
+#define GLINT_RATE_INTRL_ENA_M BIT(6)
+#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4))
+#define GLINT_VECT2FUNC_VF_NUM_S 0
+#define GLINT_VECT2FUNC_VF_NUM_M ICE_M(0xFF, 0)
+#define GLINT_VECT2FUNC_PF_NUM_S 12
+#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12)
+#define GLINT_VECT2FUNC_IS_PF_S 16
+#define GLINT_VECT2FUNC_IS_PF_M BIT(16)
+#define PFINT_FW_CTL 0x0016C800
+#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
+#define PFINT_FW_CTL_ITR_INDX_S 11
+#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11)
+#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_MBX_CTL 0x0016B280
+#define PFINT_MBX_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
+#define PFINT_MBX_CTL_ITR_INDX_S 11
+#define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11)
+#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_OICR 0x0016CA00
+#define PFINT_OICR_ECC_ERR_M BIT(16)
+#define PFINT_OICR_MAL_DETECT_M BIT(19)
+#define PFINT_OICR_GRST_M BIT(20)
+#define PFINT_OICR_PCI_EXCEPTION_M BIT(21)
+#define PFINT_OICR_HMC_ERR_M BIT(26)
+#define PFINT_OICR_PE_CRITERR_M BIT(28)
+#define PFINT_OICR_VFLR_M BIT(29)
+#define PFINT_OICR_CTL 0x0016CA80
+#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
+#define PFINT_OICR_CTL_ITR_INDX_S 11
+#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, 11)
+#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_OICR_ENA 0x0016C900
+#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
+#define QINT_RQCTL_MSIX_INDX_S 0
+#define QINT_RQCTL_ITR_INDX_S 11
+#define QINT_RQCTL_CAUSE_ENA_M BIT(30)
+#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
+#define QINT_TQCTL_MSIX_INDX_S 0
+#define QINT_TQCTL_ITR_INDX_S 11
+#define QINT_TQCTL_CAUSE_ENA_M BIT(30)
+#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4))
+#define VPINT_ALLOC_FIRST_S 0
+#define VPINT_ALLOC_FIRST_M ICE_M(0x7FF, 0)
+#define VPINT_ALLOC_LAST_S 12
+#define VPINT_ALLOC_LAST_M ICE_M(0x7FF, 12)
+#define VPINT_ALLOC_VALID_M BIT(31)
+#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
+#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
+#define QRX_CTRL_MAX_INDEX 2047
+#define QRX_CTRL_QENA_REQ_S 0
+#define QRX_CTRL_QENA_REQ_M BIT(0)
+#define QRX_CTRL_QENA_STAT_S 2
+#define QRX_CTRL_QENA_STAT_M BIT(2)
+#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4))
+#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4))
+#define QRX_TAIL_MAX_INDEX 2047
+#define QRX_TAIL_TAIL_S 0
+#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0)
+#define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4))
+#define VPLAN_RX_QBASE_VFFIRSTQ_S 0
+#define VPLAN_RX_QBASE_VFFIRSTQ_M ICE_M(0x7FF, 0)
+#define VPLAN_RX_QBASE_VFNUMQ_S 16
+#define VPLAN_RX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
+#define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4))
+#define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0)
+#define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4))
+#define VPLAN_TX_QBASE_VFFIRSTQ_S 0
+#define VPLAN_TX_QBASE_VFFIRSTQ_M ICE_M(0x3FFF, 0)
+#define VPLAN_TX_QBASE_VFNUMQ_S 16
+#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
+#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
+#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
+#define GL_MDET_RX 0x00294C00
+#define GL_MDET_RX_QNUM_S 0
+#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
+#define GL_MDET_RX_VF_NUM_S 15
+#define GL_MDET_RX_VF_NUM_M ICE_M(0xFF, 15)
+#define GL_MDET_RX_PF_NUM_S 23
+#define GL_MDET_RX_PF_NUM_M ICE_M(0x7, 23)
+#define GL_MDET_RX_MAL_TYPE_S 26
+#define GL_MDET_RX_MAL_TYPE_M ICE_M(0x1F, 26)
+#define GL_MDET_RX_VALID_M BIT(31)
+#define GL_MDET_TX_PQM 0x002D2E00
+#define GL_MDET_TX_PQM_PF_NUM_S 0
+#define GL_MDET_TX_PQM_PF_NUM_M ICE_M(0x7, 0)
+#define GL_MDET_TX_PQM_VF_NUM_S 4
+#define GL_MDET_TX_PQM_VF_NUM_M ICE_M(0xFF, 4)
+#define GL_MDET_TX_PQM_QNUM_S 12
+#define GL_MDET_TX_PQM_QNUM_M ICE_M(0x3FFF, 12)
+#define GL_MDET_TX_PQM_MAL_TYPE_S 26
+#define GL_MDET_TX_PQM_MAL_TYPE_M ICE_M(0x1F, 26)
+#define GL_MDET_TX_PQM_VALID_M BIT(31)
+#define GL_MDET_TX_TCLAN 0x000FC068
+#define GL_MDET_TX_TCLAN_QNUM_S 0
+#define GL_MDET_TX_TCLAN_QNUM_M ICE_M(0x7FFF, 0)
+#define GL_MDET_TX_TCLAN_VF_NUM_S 15
+#define GL_MDET_TX_TCLAN_VF_NUM_M ICE_M(0xFF, 15)
+#define GL_MDET_TX_TCLAN_PF_NUM_S 23
+#define GL_MDET_TX_TCLAN_PF_NUM_M ICE_M(0x7, 23)
+#define GL_MDET_TX_TCLAN_MAL_TYPE_S 26
+#define GL_MDET_TX_TCLAN_MAL_TYPE_M ICE_M(0x1F, 26)
+#define GL_MDET_TX_TCLAN_VALID_M BIT(31)
+#define PF_MDET_RX 0x00294280
+#define PF_MDET_RX_VALID_M BIT(0)
+#define PF_MDET_TX_PQM 0x002D2C80
+#define PF_MDET_TX_PQM_VALID_M BIT(0)
+#define PF_MDET_TX_TCLAN 0x000FC000
+#define PF_MDET_TX_TCLAN_VALID_M BIT(0)
+#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4))
+#define VP_MDET_RX_VALID_M BIT(0)
+#define VP_MDET_TX_PQM(_VF) (0x002D2000 + ((_VF) * 4))
+#define VP_MDET_TX_PQM_VALID_M BIT(0)
+#define VP_MDET_TX_TCLAN(_VF) (0x000FB800 + ((_VF) * 4))
+#define VP_MDET_TX_TCLAN_VALID_M BIT(0)
+#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4))
+#define VP_MDET_TX_TDPU_VALID_M BIT(0)
+#define GLNVM_FLA 0x000B6108
+#define GLNVM_FLA_LOCKED_M BIT(6)
+#define GLNVM_GENS 0x000B6100
+#define GLNVM_GENS_SR_SIZE_S 5
+#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
+#define GLNVM_ULD 0x000B6008
+#define GLNVM_ULD_CORER_DONE_M BIT(3)
+#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
+#define PF_FUNC_RID 0x0009E880
+#define PF_FUNC_RID_FUNC_NUM_S 0
+#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0)
+#define PF_PCI_CIAA 0x0009E580
+#define PF_PCI_CIAA_VF_NUM_S 12
+#define PF_PCI_CIAD 0x0009E500
+#define GL_PWR_MODE_CTL 0x000B820C
+#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
+#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
+#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
+#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
+#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
+#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
+#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
+#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8))
+#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8))
+#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8))
+#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8))
+#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8))
+#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8))
+#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8))
+#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8))
+#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8))
+#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8))
+#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8))
+#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8))
+#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8))
+#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8))
+#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8))
+#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8))
+#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8))
+#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8))
+#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8))
+#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8))
+#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8))
+#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8))
+#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8))
+#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8))
+#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8))
+#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8))
+#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8))
+#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8))
+#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8))
+#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8))
+#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8))
+#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8))
+#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8))
+#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8))
+#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8))
+#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8))
+#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8))
+#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8))
+#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8))
+#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8))
+#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
+#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
+#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
+#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
+#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
+#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
+#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
+#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
+#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
+#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
+#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
+#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8))
+#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
+#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8))
+#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
+#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8))
+#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
+#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8))
+#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
+#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8))
+#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8))
+#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8))
+#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8))
+#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8))
+#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8))
+#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4))
+#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
+#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8))
+#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
+#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
+#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define VSIQF_HKEY_MAX_INDEX 12
+#define VSIQF_HLUT_MAX_INDEX 15
+#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
+#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 068dbc740b76..7d2a66739e3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -188,23 +188,25 @@ struct ice_32b_rx_flex_desc_nic {
* with a specific metadata (profile 7 reserved for HW)
*/
enum ice_rxdid {
- ICE_RXDID_START = 0,
- ICE_RXDID_LEGACY_0 = ICE_RXDID_START,
- ICE_RXDID_LEGACY_1,
- ICE_RXDID_FLX_START,
- ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START,
- ICE_RXDID_FLX_LAST = 63,
- ICE_RXDID_LAST = ICE_RXDID_FLX_LAST
+ ICE_RXDID_LEGACY_0 = 0,
+ ICE_RXDID_LEGACY_1 = 1,
+ ICE_RXDID_FLEX_NIC = 2,
+ ICE_RXDID_FLEX_NIC_2 = 6,
+ ICE_RXDID_HW = 7,
+ ICE_RXDID_LAST = 63,
};
/* Receive Flex Descriptor Rx opcode values */
#define ICE_RX_OPC_MDID 0x01
/* Receive Descriptor MDID values */
-#define ICE_RX_MDID_FLOW_ID_LOWER 5
-#define ICE_RX_MDID_FLOW_ID_HIGH 6
-#define ICE_RX_MDID_HASH_LOW 56
-#define ICE_RX_MDID_HASH_HIGH 57
+enum ice_flex_rx_mdid {
+ ICE_RX_MDID_FLOW_ID_LOWER = 5,
+ ICE_RX_MDID_FLOW_ID_HIGH,
+ ICE_RX_MDID_SRC_VSI = 19,
+ ICE_RX_MDID_HASH_LOW = 56,
+ ICE_RX_MDID_HASH_HIGH,
+};
/* Rx Flag64 packet flag bits */
enum ice_rx_flg64_bits {
@@ -416,6 +418,7 @@ struct ice_tlan_ctx {
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
+#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
u16 src_vsi;
@@ -471,4 +474,16 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
{
return ice_ptype_lkup[ptype];
}
+
+#define ICE_LINK_SPEED_UNKNOWN 0
+#define ICE_LINK_SPEED_10MBPS 10
+#define ICE_LINK_SPEED_100MBPS 100
+#define ICE_LINK_SPEED_1000MBPS 1000
+#define ICE_LINK_SPEED_2500MBPS 2500
+#define ICE_LINK_SPEED_5000MBPS 5000
+#define ICE_LINK_SPEED_10000MBPS 10000
+#define ICE_LINK_SPEED_20000MBPS 20000
+#define ICE_LINK_SPEED_25000MBPS 25000
+#define ICE_LINK_SPEED_40000MBPS 40000
+
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
new file mode 100644
index 000000000000..49f1940772ed
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -0,0 +1,2619 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+
+/**
+ * ice_setup_rx_ctx - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in RLAN context.
+ */
+static int ice_setup_rx_ctx(struct ice_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 rxdid = ICE_RXDID_FLEX_NIC;
+ struct ice_rlan_ctx rlan_ctx;
+ u32 regval;
+ u16 pf_q;
+ int err;
+
+ /* what is RX queue number in global space of 2K Rx queues */
+ pf_q = vsi->rxq_map[ring->q_index];
+
+ /* clear the context structure first */
+ memset(&rlan_ctx, 0, sizeof(rlan_ctx));
+
+ rlan_ctx.base = ring->dma >> 7;
+
+ rlan_ctx.qlen = ring->count;
+
+ /* Receive Packet Data Buffer Size.
+ * The Packet Data Buffer Size is defined in 128 byte units.
+ */
+ rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+
+ /* use 32 byte descriptors */
+ rlan_ctx.dsize = 1;
+
+ /* Strip the Ethernet CRC bytes before the packet is posted to host
+ * memory.
+ */
+ rlan_ctx.crcstrip = 1;
+
+ /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
+ rlan_ctx.l2tsel = 1;
+
+ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
+
+ /* This controls whether VLAN is stripped from inner headers
+ * The VLAN in the inner L2 header is stripped to the receive
+ * descriptor if enabled by this flag.
+ */
+ rlan_ctx.showiv = 0;
+
+ /* Max packet size for this queue - must not be set to a larger value
+ * than 5 x DBUF
+ */
+ rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
+ ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
+
+ /* Rx queue threshold in units of 64 */
+ rlan_ctx.lrxqthresh = 1;
+
+ /* Enable Flexible Descriptors in the queue context which
+ * allows this driver to select a specific receive descriptor format
+ */
+ if (vsi->type != ICE_VSI_VF) {
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ /* increasing context priority to pick up profile id;
+ * default is 0x01; setting to 0x03 to ensure profile
+ * is programming if prev context is of same priority
+ */
+ regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ }
+
+ /* Absolute queue number out of 2K needs to be passed */
+ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
+ if (err) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ pf_q, err);
+ return -EIO;
+ }
+
+ if (vsi->type == ICE_VSI_VF)
+ return 0;
+
+ /* init queue specific tail register */
+ ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
+ writel(0, ring->tail);
+ ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
+
+ return 0;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: The Tx ring to configure
+ * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ */
+static void
+ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+
+ tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+
+ tlan_ctx->port_num = vsi->port_info->lport;
+
+ /* Transmit Queue Length */
+ tlan_ctx->qlen = ring->count;
+
+ /* PF number */
+ tlan_ctx->pf_num = hw->pf_id;
+
+ /* queue belongs to a specific VSI type
+ * VF / VM index should be programmed per vmvf_type setting:
+ * for vmvf_type = VF, it is VF number between 0-256
+ * for vmvf_type = VM, it is VM number between 0-767
+ * for PF or EMP this field should be set to zero
+ */
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ break;
+ case ICE_VSI_VF:
+ /* Firmware expects vmvf_num to be absolute VF id */
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ break;
+ default:
+ return;
+ }
+
+ /* make sure the context is associated with the right VSI */
+ tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+ tlan_ctx->tso_ena = ICE_TX_LEGACY;
+ tlan_ctx->tso_qnum = pf_q;
+
+ /* Legacy or Advanced Host Interface:
+ * 0: Advanced Host Interface
+ * 1: Legacy Host Interface
+ */
+ tlan_ctx->legacy_int = ICE_TX_LEGACY;
+}
+
+/**
+ * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @ena: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ */
+static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
+{
+ int i;
+
+ for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
+ u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
+
+ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+ break;
+
+ usleep_range(10, 20);
+ }
+ if (i >= ICE_Q_WAIT_RETRY_LIMIT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
+ * @vsi: the VSI being configured
+ * @ena: start or stop the Rx rings
+ */
+static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int i, j, ret = 0;
+
+ for (i = 0; i < vsi->num_rxq; i++) {
+ int pf_q = vsi->rxq_map[i];
+ u32 rx_reg;
+
+ for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
+ if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
+ ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Skip if the queue is already in the requested state */
+ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+ continue;
+
+ /* turn on/off the queue */
+ if (ena)
+ rx_reg |= QRX_CTRL_QENA_REQ_M;
+ else
+ rx_reg &= ~QRX_CTRL_QENA_REQ_M;
+ wr32(hw, QRX_CTRL(pf_q), rx_reg);
+
+ /* wait for the change to finish */
+ ret = ice_pf_rxq_wait(pf, pf_q, ena);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "VSI idx %d Rx ring %d %sable timeout\n",
+ vsi->idx, pf_q, (ena ? "en" : "dis"));
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
+ * @vsi: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ */
+static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
+{
+ struct ice_pf *pf = vsi->back;
+
+ /* allocate memory for both Tx and Rx ring pointers */
+ vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ sizeof(struct ice_ring *), GFP_KERNEL);
+ if (!vsi->tx_rings)
+ goto err_txrings;
+
+ vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+ sizeof(struct ice_ring *), GFP_KERNEL);
+ if (!vsi->rx_rings)
+ goto err_rxrings;
+
+ if (alloc_qvectors) {
+ /* allocate memory for q_vector pointers */
+ vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
+ vsi->num_q_vectors,
+ sizeof(struct ice_q_vector *),
+ GFP_KERNEL);
+ if (!vsi->q_vectors)
+ goto err_vectors;
+ }
+
+ return 0;
+
+err_vectors:
+ devm_kfree(&pf->pdev->dev, vsi->rx_rings);
+err_rxrings:
+ devm_kfree(&pf->pdev->dev, vsi->tx_rings);
+err_txrings:
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ */
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ vsi->alloc_txq = pf->num_lan_tx;
+ vsi->alloc_rxq = pf->num_lan_rx;
+ vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
+ vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
+ break;
+ case ICE_VSI_VF:
+ vsi->alloc_txq = pf->num_vf_qps;
+ vsi->alloc_rxq = pf->num_vf_qps;
+ /* pf->num_vf_msix includes (VF miscellaneous vector +
+ * data queue interrupts). Since vsi->num_q_vectors is number
+ * of queues vectors, subtract 1 from the original vector
+ * count
+ */
+ vsi->num_q_vectors = pf->num_vf_msix - 1;
+ break;
+ default:
+ dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
+ vsi->type);
+ break;
+ }
+}
+
+/**
+ * ice_get_free_slot - get the next non-NULL location index in array
+ * @array: array to search
+ * @size: size of the array
+ * @curr: last known occupied index to be used as a search hint
+ *
+ * void * is being used to keep the functionality generic. This lets us use this
+ * function on any array of pointers.
+ */
+static int ice_get_free_slot(void *array, int size, int curr)
+{
+ int **tmp_array = (int **)array;
+ int next;
+
+ if (curr < (size - 1) && !tmp_array[curr + 1]) {
+ next = curr + 1;
+ } else {
+ int i = 0;
+
+ while ((i < size) && (tmp_array[i]))
+ i++;
+ if (i == size)
+ next = ICE_NO_VSI;
+ else
+ next = i;
+ }
+ return next;
+}
+
+/**
+ * ice_vsi_delete - delete a VSI from the switch
+ * @vsi: pointer to VSI being removed
+ */
+void ice_vsi_delete(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_vsi_ctx ctxt;
+ enum ice_status status;
+
+ if (vsi->type == ICE_VSI_VF)
+ ctxt.vf_num = vsi->vf_id;
+ ctxt.vsi_num = vsi->vsi_num;
+
+ memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
+
+ status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL);
+ if (status)
+ dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
+ vsi->vsi_num);
+}
+
+/**
+ * ice_vsi_free_arrays - clean up VSI resources
+ * @vsi: pointer to VSI being cleared
+ * @free_qvectors: bool to specify if q_vectors should be deallocated
+ */
+static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
+{
+ struct ice_pf *pf = vsi->back;
+
+ /* free the ring and vector containers */
+ if (free_qvectors && vsi->q_vectors) {
+ devm_kfree(&pf->pdev->dev, vsi->q_vectors);
+ vsi->q_vectors = NULL;
+ }
+ if (vsi->tx_rings) {
+ devm_kfree(&pf->pdev->dev, vsi->tx_rings);
+ vsi->tx_rings = NULL;
+ }
+ if (vsi->rx_rings) {
+ devm_kfree(&pf->pdev->dev, vsi->rx_rings);
+ vsi->rx_rings = NULL;
+ }
+}
+
+/**
+ * ice_vsi_clear - clean up and deallocate the provided VSI
+ * @vsi: pointer to VSI being cleared
+ *
+ * This deallocates the VSI's queue resources, removes it from the PF's
+ * VSI array if necessary, and deallocates the VSI
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_vsi_clear(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = NULL;
+
+ if (!vsi)
+ return 0;
+
+ if (!vsi->back)
+ return -EINVAL;
+
+ pf = vsi->back;
+
+ if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
+ dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
+ vsi->idx);
+ return -EINVAL;
+ }
+
+ mutex_lock(&pf->sw_mutex);
+ /* updates the PF for this cleared VSI */
+
+ pf->vsi[vsi->idx] = NULL;
+ if (vsi->idx < pf->next_vsi)
+ pf->next_vsi = vsi->idx;
+
+ ice_vsi_free_arrays(vsi, true);
+ mutex_unlock(&pf->sw_mutex);
+ devm_kfree(&pf->pdev->dev, vsi);
+
+ return 0;
+}
+
+/**
+ * ice_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
+
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
+ return IRQ_HANDLED;
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ice_vsi_alloc - Allocates the next available struct VSI in the PF
+ * @pf: board private structure
+ * @type: type of VSI
+ *
+ * returns a pointer to a VSI on success, NULL on failure.
+ */
+static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
+{
+ struct ice_vsi *vsi = NULL;
+
+ /* Need to protect the allocation of the VSIs at the PF level */
+ mutex_lock(&pf->sw_mutex);
+
+ /* If we have already allocated our maximum number of VSIs,
+ * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
+ * is available to be populated
+ */
+ if (pf->next_vsi == ICE_NO_VSI) {
+ dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
+ goto unlock_pf;
+ }
+
+ vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
+ if (!vsi)
+ goto unlock_pf;
+
+ vsi->type = type;
+ vsi->back = pf;
+ set_bit(__ICE_DOWN, vsi->state);
+ vsi->idx = pf->next_vsi;
+ vsi->work_lmt = ICE_DFLT_IRQ_WORK;
+
+ ice_vsi_set_num_qs(vsi);
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ if (ice_vsi_alloc_arrays(vsi, true))
+ goto err_rings;
+
+ /* Setup default MSIX irq handler for VSI */
+ vsi->irq_handler = ice_msix_clean_rings;
+ break;
+ case ICE_VSI_VF:
+ if (ice_vsi_alloc_arrays(vsi, true))
+ goto err_rings;
+ break;
+ default:
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
+ goto unlock_pf;
+ }
+
+ /* fill VSI slot in the PF struct */
+ pf->vsi[pf->next_vsi] = vsi;
+
+ /* prepare pf->next_vsi for next use */
+ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+ pf->next_vsi);
+ goto unlock_pf;
+
+err_rings:
+ devm_kfree(&pf->pdev->dev, vsi);
+ vsi = NULL;
+unlock_pf:
+ mutex_unlock(&pf->sw_mutex);
+ return vsi;
+}
+
+/**
+ * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
+ * @vsi: the VSI getting queues
+ *
+ * Return 0 on success and a negative value on error
+ */
+static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int offset, ret = 0;
+
+ mutex_lock(&pf->avail_q_mutex);
+ /* look for contiguous block of queues for Tx */
+ offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
+ 0, vsi->alloc_txq, 0);
+ if (offset < ICE_MAX_TXQS) {
+ int i;
+
+ bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
+ for (i = 0; i < vsi->alloc_txq; i++)
+ vsi->txq_map[i] = i + offset;
+ } else {
+ ret = -ENOMEM;
+ vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
+ }
+
+ /* look for contiguous block of queues for Rx */
+ offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
+ 0, vsi->alloc_rxq, 0);
+ if (offset < ICE_MAX_RXQS) {
+ int i;
+
+ bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
+ for (i = 0; i < vsi->alloc_rxq; i++)
+ vsi->rxq_map[i] = i + offset;
+ } else {
+ ret = -ENOMEM;
+ vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
+ }
+ mutex_unlock(&pf->avail_q_mutex);
+
+ return ret;
+}
+
+/**
+ * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
+ * @vsi: the VSI getting queues
+ *
+ * Return 0 on success and a negative value on error
+ */
+static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int i, index = 0;
+
+ mutex_lock(&pf->avail_q_mutex);
+
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ index = find_next_zero_bit(pf->avail_txqs,
+ ICE_MAX_TXQS, index);
+ if (index < ICE_MAX_TXQS) {
+ set_bit(index, pf->avail_txqs);
+ vsi->txq_map[i] = index;
+ } else {
+ goto err_scatter_tx;
+ }
+ }
+ }
+
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ index = find_next_zero_bit(pf->avail_rxqs,
+ ICE_MAX_RXQS, index);
+ if (index < ICE_MAX_RXQS) {
+ set_bit(index, pf->avail_rxqs);
+ vsi->rxq_map[i] = index;
+ } else {
+ goto err_scatter_rx;
+ }
+ }
+ }
+
+ mutex_unlock(&pf->avail_q_mutex);
+ return 0;
+
+err_scatter_rx:
+ /* unflag any queues we have grabbed (i is failed position) */
+ for (index = 0; index < i; index++) {
+ clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
+ vsi->rxq_map[index] = 0;
+ }
+ i = vsi->alloc_txq;
+err_scatter_tx:
+ /* i is either position of failed attempt or vsi->alloc_txq */
+ for (index = 0; index < i; index++) {
+ clear_bit(vsi->txq_map[index], pf->avail_txqs);
+ vsi->txq_map[index] = 0;
+ }
+
+ mutex_unlock(&pf->avail_q_mutex);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_get_qs - Assign queues from PF to VSI
+ * @vsi: the VSI to assign queues to
+ *
+ * Returns 0 on success and a negative value on error
+ */
+static int ice_vsi_get_qs(struct ice_vsi *vsi)
+{
+ int ret = 0;
+
+ vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
+ vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
+
+ /* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping
+ * modes individually to scatter if assigning contiguous queues
+ * to Rx or Tx fails
+ */
+ ret = ice_vsi_get_qs_contig(vsi);
+ if (ret < 0) {
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
+ vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
+ ICE_MAX_SCATTER_TXQS);
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
+ vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
+ ICE_MAX_SCATTER_RXQS);
+ ret = ice_vsi_get_qs_scatter(vsi);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_vsi_put_qs - Release queues from VSI to PF
+ * @vsi: the VSI that is going to release queues
+ */
+void ice_vsi_put_qs(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ mutex_lock(&pf->avail_q_mutex);
+
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ clear_bit(vsi->txq_map[i], pf->avail_txqs);
+ vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
+ }
+
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
+ vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
+ }
+
+ mutex_unlock(&pf->avail_q_mutex);
+}
+
+/**
+ * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
+ * @vsi: the VSI being removed
+ */
+static void ice_rss_clean(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf;
+
+ pf = vsi->back;
+
+ if (vsi->rss_hkey_user)
+ devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
+ if (vsi->rss_lut_user)
+ devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
+}
+
+/**
+ * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
+ * @vsi: the VSI being configured
+ */
+static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
+{
+ struct ice_hw_common_caps *cap;
+ struct ice_pf *pf = vsi->back;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ vsi->rss_size = 1;
+ return;
+ }
+
+ cap = &pf->hw.func_caps.common_cap;
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ /* PF VSI will inherit RSS instance of PF */
+ vsi->rss_table_size = cap->rss_table_size;
+ vsi->rss_size = min_t(int, num_online_cpus(),
+ BIT(cap->rss_table_entry_width));
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ break;
+ case ICE_VSI_VF:
+ /* VF VSI will gets a small RSS table
+ * For VSI_LUT, LUT size should be set to 64 bytes
+ */
+ vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vsi->rss_size = min_t(int, num_online_cpus(),
+ BIT(cap->rss_table_entry_width));
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
+ break;
+ default:
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
+ vsi->type);
+ break;
+ }
+}
+
+/**
+ * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
+ * @ctxt: the VSI context being set
+ *
+ * This initializes a default VSI context for all sections except the Queues.
+ */
+static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
+{
+ u32 table = 0;
+
+ memset(&ctxt->info, 0, sizeof(ctxt->info));
+ /* VSI's should be allocated from shared pool */
+ ctxt->alloc_from_pool = true;
+ /* Src pruning enabled by default */
+ ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
+ /* Traffic from VSI can be sent to LAN */
+ ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
+ /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
+ * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
+ * packets untagged/tagged.
+ */
+ ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
+ ICE_AQ_VSI_VLAN_MODE_M) >>
+ ICE_AQ_VSI_VLAN_MODE_S);
+ /* Have 1:1 UP mapping for both ingress/egress tables */
+ table |= ICE_UP_TABLE_TRANSLATE(0, 0);
+ table |= ICE_UP_TABLE_TRANSLATE(1, 1);
+ table |= ICE_UP_TABLE_TRANSLATE(2, 2);
+ table |= ICE_UP_TABLE_TRANSLATE(3, 3);
+ table |= ICE_UP_TABLE_TRANSLATE(4, 4);
+ table |= ICE_UP_TABLE_TRANSLATE(5, 5);
+ table |= ICE_UP_TABLE_TRANSLATE(6, 6);
+ table |= ICE_UP_TABLE_TRANSLATE(7, 7);
+ ctxt->info.ingress_table = cpu_to_le32(table);
+ ctxt->info.egress_table = cpu_to_le32(table);
+ /* Have 1:1 UP mapping for outer to inner UP table */
+ ctxt->info.outer_up_table = cpu_to_le32(table);
+ /* No Outer tag support outer_tag_flags remains to zero */
+}
+
+/**
+ * ice_vsi_setup_q_map - Setup a VSI queue map
+ * @vsi: the VSI being configured
+ * @ctxt: VSI context structure
+ */
+static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+{
+ u16 offset = 0, qmap = 0, numq_tc;
+ u16 pow = 0, max_rss = 0, qcount;
+ u16 qcount_tx = vsi->alloc_txq;
+ u16 qcount_rx = vsi->alloc_rxq;
+ bool ena_tc0 = false;
+ int i;
+
+ /* at least TC0 should be enabled by default */
+ if (vsi->tc_cfg.numtc) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(0)))
+ ena_tc0 = true;
+ } else {
+ ena_tc0 = true;
+ }
+
+ if (ena_tc0) {
+ vsi->tc_cfg.numtc++;
+ vsi->tc_cfg.ena_tc |= 1;
+ }
+
+ numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+
+ /* TC mapping is a function of the number of Rx queues assigned to the
+ * VSI for each traffic class and the offset of these queues.
+ * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
+ * queues allocated to TC0. No:of queues is a power-of-2.
+ *
+ * If TC is not enabled, the queue offset is set to 0, and allocate one
+ * queue, this way, traffic for the given TC will be sent to the default
+ * queue.
+ *
+ * Setup number and offset of Rx queues for all TCs for the VSI
+ */
+
+ qcount = numq_tc;
+ /* qcount will change if RSS is enabled */
+ if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
+ if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
+ if (vsi->type == ICE_VSI_PF)
+ max_rss = ICE_MAX_LG_RSS_QS;
+ else
+ max_rss = ICE_MAX_SMALL_RSS_QS;
+ qcount = min_t(int, numq_tc, max_rss);
+ qcount = min_t(int, qcount, vsi->rss_size);
+ }
+ }
+
+ /* find the (rounded up) power-of-2 of qcount */
+ pow = order_base_2(qcount);
+
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
+ /* TC is not enabled */
+ vsi->tc_cfg.tc_info[i].qoffset = 0;
+ vsi->tc_cfg.tc_info[i].qcount = 1;
+ ctxt->info.tc_mapping[i] = 0;
+ continue;
+ }
+
+ /* TC is enabled */
+ vsi->tc_cfg.tc_info[i].qoffset = offset;
+ vsi->tc_cfg.tc_info[i].qcount = qcount;
+
+ qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
+ ICE_AQ_VSI_TC_Q_OFFSET_M) |
+ ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
+ ICE_AQ_VSI_TC_Q_NUM_M);
+ offset += qcount;
+ ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
+ }
+
+ vsi->num_txq = qcount_tx;
+ vsi->num_rxq = offset;
+
+ if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
+ dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
+ /* since there is a chance that num_rxq could have been changed
+ * in the above for loop, make num_txq equal to num_rxq.
+ */
+ vsi->num_txq = vsi->num_rxq;
+ }
+
+ /* Rx queue mapping */
+ ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
+ /* q_mapping buffer holds the info for the first queue allocated for
+ * this VSI in the PF space and also the number of queues associated
+ * with this VSI.
+ */
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
+ ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
+}
+
+/**
+ * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
+ * @ctxt: the VSI context being set
+ * @vsi: the VSI being configured
+ */
+static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
+{
+ u8 lut_type, hash_type;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ /* PF VSI will inherit RSS instance of PF */
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+ break;
+ case ICE_VSI_VF:
+ /* VF VSI will gets a small RSS table which is a VSI LUT type */
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+ break;
+ default:
+ dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
+ vsi->type);
+ return;
+ }
+
+ ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+ ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+ ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+}
+
+/**
+ * ice_vsi_init - Create and initialize a VSI
+ * @vsi: the VSI being configured
+ *
+ * This initializes a VSI context depending on the VSI type to be added and
+ * passes it down to the add_vsi aq command to create a new VSI.
+ */
+static int ice_vsi_init(struct ice_vsi *vsi)
+{
+ struct ice_vsi_ctx ctxt = { 0 };
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int ret = 0;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ ctxt.flags = ICE_AQ_VSI_TYPE_PF;
+ break;
+ case ICE_VSI_VF:
+ ctxt.flags = ICE_AQ_VSI_TYPE_VF;
+ /* VF number here is the absolute VF number (0-255) */
+ ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ice_set_dflt_vsi_ctx(&ctxt);
+ /* if the switch is in VEB mode, allow VSI loopback */
+ if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
+ ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+
+ /* Set LUT type and HASH type if RSS is enabled */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_set_rss_vsi_ctx(&ctxt, vsi);
+
+ ctxt.info.sw_id = vsi->port_info->sw_id;
+ ice_vsi_setup_q_map(vsi, &ctxt);
+
+ ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Add VSI failed, err %d\n", ret);
+ return -EIO;
+ }
+
+ /* keep context for update VSI operations */
+ vsi->info = ctxt.info;
+
+ /* record VSI number returned */
+ vsi->vsi_num = ctxt.vsi_num;
+
+ return ret;
+}
+
+/**
+ * ice_free_q_vector - Free memory allocated for a specific interrupt vector
+ * @vsi: VSI having the memory freed
+ * @v_idx: index of the vector to be freed
+ */
+static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_ring *ring;
+
+ if (!vsi->q_vectors[v_idx]) {
+ dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
+ v_idx);
+ return;
+ }
+ q_vector = vsi->q_vectors[v_idx];
+
+ ice_for_each_ring(ring, q_vector->tx)
+ ring->q_vector = NULL;
+ ice_for_each_ring(ring, q_vector->rx)
+ ring->q_vector = NULL;
+
+ /* only VSI with an associated netdev is set up with NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+
+ devm_kfree(&vsi->back->pdev->dev, q_vector);
+ vsi->q_vectors[v_idx] = NULL;
+}
+
+/**
+ * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI having memory freed
+ */
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ ice_free_q_vector(vsi, v_idx);
+}
+
+/**
+ * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the VSI struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ */
+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
+
+ /* allocate q_vector */
+ q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ q_vector->vsi = vsi;
+ q_vector->v_idx = v_idx;
+ if (vsi->type == ICE_VSI_VF)
+ goto out;
+ /* only set affinity_mask if the CPU is online */
+ if (cpu_online(v_idx))
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+
+ /* This will not be called in the driver load path because the netdev
+ * will not be created yet. All other cases with register the NAPI
+ * handler here (i.e. resume, reset/rebuild, etc.)
+ */
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
+ NAPI_POLL_WEIGHT);
+
+out:
+ /* tie q_vector and VSI together */
+ vsi->q_vectors[v_idx] = q_vector;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int v_idx = 0, num_q_vectors;
+ int err;
+
+ if (vsi->q_vectors[0]) {
+ dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
+ vsi->vsi_num);
+ return -EEXIST;
+ }
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ num_q_vectors = vsi->num_q_vectors;
+ } else {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ err = ice_vsi_alloc_q_vector(vsi, v_idx);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ while (v_idx--)
+ ice_free_q_vector(vsi, v_idx);
+
+ dev_err(&pf->pdev->dev,
+ "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
+ vsi->num_q_vectors, vsi->vsi_num, err);
+ vsi->num_q_vectors = 0;
+ return err;
+}
+
+/**
+ * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
+ * @vsi: ptr to the VSI
+ *
+ * This should only be called after ice_vsi_alloc() which allocates the
+ * corresponding SW VSI structure and initializes num_queue_pairs for the
+ * newly allocated VSI.
+ *
+ * Returns 0 on success or negative on failure
+ */
+static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int num_q_vectors = 0;
+
+ if (vsi->sw_base_vector || vsi->hw_base_vector) {
+ dev_dbg(&pf->pdev->dev, "VSI %d has non-zero HW base vector %d or SW base vector %d\n",
+ vsi->vsi_num, vsi->hw_base_vector, vsi->sw_base_vector);
+ return -EEXIST;
+ }
+
+ if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ return -ENOENT;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ num_q_vectors = vsi->num_q_vectors;
+ /* reserve slots from OS requested IRQs */
+ vsi->sw_base_vector = ice_get_res(pf, pf->sw_irq_tracker,
+ num_q_vectors, vsi->idx);
+ if (vsi->sw_base_vector < 0) {
+ dev_err(&pf->pdev->dev,
+ "Failed to get tracking for %d SW vectors for VSI %d, err=%d\n",
+ num_q_vectors, vsi->vsi_num,
+ vsi->sw_base_vector);
+ return -ENOENT;
+ }
+ pf->num_avail_sw_msix -= num_q_vectors;
+
+ /* reserve slots from HW interrupts */
+ vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
+ num_q_vectors, vsi->idx);
+ break;
+ case ICE_VSI_VF:
+ /* take VF misc vector and data vectors into account */
+ num_q_vectors = pf->num_vf_msix;
+ /* For VF VSI, reserve slots only from HW interrupts */
+ vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker,
+ num_q_vectors, vsi->idx);
+ break;
+ default:
+ dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
+ vsi->type);
+ break;
+ }
+
+ if (vsi->hw_base_vector < 0) {
+ dev_err(&pf->pdev->dev,
+ "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n",
+ num_q_vectors, vsi->vsi_num, vsi->hw_base_vector);
+ if (vsi->type != ICE_VSI_VF) {
+ ice_free_res(vsi->back->sw_irq_tracker,
+ vsi->sw_base_vector, vsi->idx);
+ pf->num_avail_sw_msix += num_q_vectors;
+ }
+ return -ENOENT;
+ }
+
+ pf->num_avail_hw_msix -= num_q_vectors;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
+ * @vsi: the VSI having rings deallocated
+ */
+static void ice_vsi_clear_rings(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings) {
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ if (vsi->tx_rings[i]) {
+ kfree_rcu(vsi->tx_rings[i], rcu);
+ vsi->tx_rings[i] = NULL;
+ }
+ }
+ }
+ if (vsi->rx_rings) {
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ if (vsi->rx_rings[i]) {
+ kfree_rcu(vsi->rx_rings[i], rcu);
+ vsi->rx_rings[i] = NULL;
+ }
+ }
+ }
+}
+
+/**
+ * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
+ * @vsi: VSI which is having rings allocated
+ */
+static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ /* Allocate tx_rings */
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ struct ice_ring *ring;
+
+ /* allocate with kzalloc(), free with kfree_rcu() */
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+
+ if (!ring)
+ goto err_out;
+
+ ring->q_index = i;
+ ring->reg_idx = vsi->txq_map[i];
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ vsi->tx_rings[i] = ring;
+ }
+
+ /* Allocate rx_rings */
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ struct ice_ring *ring;
+
+ /* allocate with kzalloc(), free with kfree_rcu() */
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_out;
+
+ ring->q_index = i;
+ ring->reg_idx = vsi->rxq_map[i];
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ vsi->rx_rings[i] = ring;
+ }
+
+ return 0;
+
+err_out:
+ ice_vsi_clear_rings(vsi);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors allotted
+ * through the MSI-X enabling code. On a constrained vector budget, we map Tx
+ * and Rx rings to the vector as "efficiently" as possible.
+ */
+static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+{
+ int q_vectors = vsi->num_q_vectors;
+ int tx_rings_rem, rx_rings_rem;
+ int v_id;
+
+ /* initially assigning remaining rings count to VSIs num queue value */
+ tx_rings_rem = vsi->num_txq;
+ rx_rings_rem = vsi->num_rxq;
+
+ for (v_id = 0; v_id < q_vectors; v_id++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
+ int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
+
+ /* Tx rings mapping to vector */
+ tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_tx = tx_rings_per_v;
+ q_vector->tx.ring = NULL;
+ q_vector->tx.itr_idx = ICE_TX_ITR;
+ q_base = vsi->num_txq - tx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
+ struct ice_ring *tx_ring = vsi->tx_rings[q_id];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
+ }
+ tx_rings_rem -= tx_rings_per_v;
+
+ /* Rx rings mapping to vector */
+ rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_rx = rx_rings_per_v;
+ q_vector->rx.ring = NULL;
+ q_vector->rx.itr_idx = ICE_RX_ITR;
+ q_base = vsi->num_rxq - rx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
+ struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ }
+ rx_rings_rem -= rx_rings_per_v;
+ }
+}
+
+/**
+ * ice_vsi_manage_rss_lut - disable/enable RSS
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is an enable or disable request
+ *
+ * In the event of disable request for RSS, this function will zero out RSS
+ * LUT, while in the event of enable request for RSS, it will reconfigure RSS
+ * LUT.
+ */
+int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
+{
+ int err = 0;
+ u8 *lut;
+
+ lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size,
+ GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ if (ena) {
+ if (vsi->rss_lut_user)
+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+ else
+ ice_fill_rss_lut(lut, vsi->rss_table_size,
+ vsi->rss_size);
+ }
+
+ err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
+ devm_kfree(&vsi->back->pdev->dev, lut);
+ return err;
+}
+
+/**
+ * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
+ * @vsi: VSI to be configured
+ */
+static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
+{
+ u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
+ struct ice_aqc_get_set_rss_keys *key;
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ int err = 0;
+ u8 *lut;
+
+ vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
+
+ lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ if (vsi->rss_lut_user)
+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+ else
+ ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
+
+ status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
+ vsi->rss_table_size);
+
+ if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "set_rss_lut failed, error %d\n", status);
+ err = -EIO;
+ goto ice_vsi_cfg_rss_exit;
+ }
+
+ key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
+ if (!key) {
+ err = -ENOMEM;
+ goto ice_vsi_cfg_rss_exit;
+ }
+
+ if (vsi->rss_hkey_user)
+ memcpy(seed, vsi->rss_hkey_user,
+ ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ else
+ netdev_rss_key_fill((void *)seed,
+ ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ memcpy(&key->standard_rss_key, seed,
+ ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+
+ status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
+
+ if (status) {
+ dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
+ status);
+ err = -EIO;
+ }
+
+ devm_kfree(&pf->pdev->dev, key);
+ice_vsi_cfg_rss_exit:
+ devm_kfree(&pf->pdev->dev, lut);
+ return err;
+}
+
+/**
+ * ice_add_mac_to_list - Add a mac address filter entry to the list
+ * @vsi: the VSI to be forwarded to
+ * @add_list: pointer to the list which contains MAC filter entries
+ * @macaddr: the MAC address to be added.
+ *
+ * Adds mac address filter entry to the temp list
+ *
+ * Returns 0 on success or ENOMEM on failure.
+ */
+int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
+ const u8 *macaddr)
+{
+ struct ice_fltr_list_entry *tmp;
+ struct ice_pf *pf = vsi->back;
+
+ tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->fltr_info.flag = ICE_FLTR_TX;
+ tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
+ tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
+ tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp->fltr_info.vsi_handle = vsi->idx;
+ ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
+
+ INIT_LIST_HEAD(&tmp->list_entry);
+ list_add(&tmp->list_entry, add_list);
+
+ return 0;
+}
+
+/**
+ * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
+ * @vsi: the VSI to be updated
+ */
+void ice_update_eth_stats(struct ice_vsi *vsi)
+{
+ struct ice_eth_stats *prev_es, *cur_es;
+ struct ice_hw *hw = &vsi->back->hw;
+ u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
+
+ prev_es = &vsi->eth_stats_prev;
+ cur_es = &vsi->eth_stats;
+
+ ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->rx_bytes,
+ &cur_es->rx_bytes);
+
+ ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->rx_unicast,
+ &cur_es->rx_unicast);
+
+ ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->rx_multicast,
+ &cur_es->rx_multicast);
+
+ ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
+ &cur_es->rx_broadcast);
+
+ ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_discards, &cur_es->rx_discards);
+
+ ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->tx_bytes,
+ &cur_es->tx_bytes);
+
+ ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->tx_unicast,
+ &cur_es->tx_unicast);
+
+ ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->tx_multicast,
+ &cur_es->tx_multicast);
+
+ ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
+ &cur_es->tx_broadcast);
+
+ ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_errors, &cur_es->tx_errors);
+
+ vsi->stat_offsets_loaded = true;
+}
+
+/**
+ * ice_free_fltr_list - free filter lists helper
+ * @dev: pointer to the device struct
+ * @h: pointer to the list head to be freed
+ *
+ * Helper function to free filter lists previously created using
+ * ice_add_mac_to_list
+ */
+void ice_free_fltr_list(struct device *dev, struct list_head *h)
+{
+ struct ice_fltr_list_entry *e, *tmp;
+
+ list_for_each_entry_safe(e, tmp, h, list_entry) {
+ list_del(&e->list_entry);
+ devm_kfree(dev, e);
+ }
+}
+
+/**
+ * ice_vsi_add_vlan - Add VSI membership for given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be added
+ */
+int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
+{
+ struct ice_fltr_list_entry *tmp;
+ struct ice_pf *pf = vsi->back;
+ LIST_HEAD(tmp_add_list);
+ enum ice_status status;
+ int err = 0;
+
+ tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
+ tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp->fltr_info.flag = ICE_FLTR_TX;
+ tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
+ tmp->fltr_info.vsi_handle = vsi->idx;
+ tmp->fltr_info.l_data.vlan.vlan_id = vid;
+
+ INIT_LIST_HEAD(&tmp->list_entry);
+ list_add(&tmp->list_entry, &tmp_add_list);
+
+ status = ice_add_vlan(&pf->hw, &tmp_add_list);
+ if (status) {
+ err = -ENODEV;
+ dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
+ vid, vsi->vsi_num);
+ }
+
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ return err;
+}
+
+/**
+ * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be removed
+ *
+ * Returns 0 on success and negative on failure
+ */
+int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
+{
+ struct ice_fltr_list_entry *list;
+ struct ice_pf *pf = vsi->back;
+ LIST_HEAD(tmp_add_list);
+ int status = 0;
+
+ list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
+ list->fltr_info.vsi_handle = vsi->idx;
+ list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ list->fltr_info.l_data.vlan.vlan_id = vid;
+ list->fltr_info.flag = ICE_FLTR_TX;
+ list->fltr_info.src_id = ICE_SRC_ID_VSI;
+
+ INIT_LIST_HEAD(&list->list_entry);
+ list_add(&list->list_entry, &tmp_add_list);
+
+ if (ice_remove_vlan(&pf->hw, &tmp_add_list)) {
+ dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
+ vid, vsi->vsi_num);
+ status = -EIO;
+ }
+
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ return status;
+}
+
+/**
+ * ice_vsi_cfg_rxqs - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Rx VSI for operation.
+ */
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+{
+ int err = 0;
+ u16 i;
+
+ if (vsi->type == ICE_VSI_VF)
+ goto setup_rings;
+
+ if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
+ vsi->max_frame = vsi->netdev->mtu +
+ ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ else
+ vsi->max_frame = ICE_RXBUF_2048;
+
+ vsi->rx_buf_len = ICE_RXBUF_2048;
+setup_rings:
+ /* set up individual rings */
+ for (i = 0; i < vsi->num_rxq && !err; i++)
+ err = ice_setup_rx_ctx(vsi->rx_rings[i]);
+
+ if (err) {
+ dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
+ return -EIO;
+ }
+ return err;
+}
+
+/**
+ * ice_vsi_cfg_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
+{
+ struct ice_aqc_add_tx_qgrp *qg_buf;
+ struct ice_aqc_add_txqs_perq *txq;
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ u16 buf_len, i, pf_q;
+ int err = 0, tc = 0;
+ u8 num_q_grps;
+
+ buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
+ qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
+ if (!qg_buf)
+ return -ENOMEM;
+
+ if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
+ err = -EINVAL;
+ goto err_cfg_txqs;
+ }
+ qg_buf->num_txqs = 1;
+ num_q_grps = 1;
+
+ /* set up and configure the Tx queues */
+ ice_for_each_txq(vsi, i) {
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+
+ pf_q = vsi->txq_map[i];
+ ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as transmit
+ * comm scheduler queue doorbell.
+ */
+ vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
+ num_q_grps, qg_buf, buf_len, NULL);
+ if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ err = -ENODEV;
+ goto err_cfg_txqs;
+ }
+
+ /* Add Tx Queue TEID into the VSI Tx ring from the response
+ * This will complete configuring and enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ vsi->tx_rings[i]->txq_teid =
+ le32_to_cpu(txq->q_teid);
+ }
+err_cfg_txqs:
+ devm_kfree(&pf->pdev->dev, qg_buf);
+ return err;
+}
+
+/**
+ * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
+ * @intrl: interrupt rate limit in usecs
+ * @gran: interrupt rate limit granularity in usecs
+ *
+ * This function converts a decimal interrupt rate limit in usecs to the format
+ * expected by firmware.
+ */
+static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
+{
+ u32 val = intrl / gran;
+
+ if (val)
+ return val | GLINT_RATE_INTRL_ENA_M;
+ return 0;
+}
+
+/**
+ * ice_cfg_itr - configure the initial interrupt throttle values
+ * @hw: pointer to the HW structure
+ * @q_vector: interrupt vector that's being configured
+ * @vector: HW vector index to apply the interrupt throttling to
+ *
+ * Configure interrupt throttling values for the ring containers that are
+ * associated with the interrupt vector passed in.
+ */
+static void
+ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
+{
+ u8 itr_gran = hw->itr_gran;
+
+ if (q_vector->num_ring_rx) {
+ struct ice_ring_container *rc = &q_vector->rx;
+
+ rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran);
+ rc->latency_range = ICE_LOW_LATENCY;
+ wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+ }
+
+ if (q_vector->num_ring_tx) {
+ struct ice_ring_container *rc = &q_vector->tx;
+
+ rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran);
+ rc->latency_range = ICE_LOW_LATENCY;
+ wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr);
+ }
+}
+
+/**
+ * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
+ * @vsi: the VSI being configured
+ */
+void ice_vsi_cfg_msix(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ u16 vector = vsi->hw_base_vector;
+ struct ice_hw *hw = &pf->hw;
+ u32 txq = 0, rxq = 0;
+ int i, q;
+
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ ice_cfg_itr(hw, q_vector, vector);
+
+ wr32(hw, GLINT_RATE(vector),
+ ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
+
+ /* Both Transmit Queue Interrupt Cause Control register
+ * and Receive Queue Interrupt Cause control register
+ * expects MSIX_INDX field to be the vector index
+ * within the function space and not the absolute
+ * vector index across PF or across device.
+ * For SR-IOV VF VSIs queue vector index always starts
+ * with 1 since first vector index(0) is used for OICR
+ * in VF space. Since VMDq and other PF VSIs are within
+ * the PF function space, use the vector index that is
+ * tracked for this PF.
+ */
+ for (q = 0; q < q_vector->num_ring_tx; q++) {
+ int itr_idx = q_vector->tx.itr_idx;
+ u32 val;
+
+ if (vsi->type == ICE_VSI_VF)
+ val = QINT_TQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_TQCTL_ITR_INDX_S) |
+ ((i + 1) << QINT_TQCTL_MSIX_INDX_S);
+ else
+ val = QINT_TQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_TQCTL_ITR_INDX_S) |
+ (vector << QINT_TQCTL_MSIX_INDX_S);
+ wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ txq++;
+ }
+
+ for (q = 0; q < q_vector->num_ring_rx; q++) {
+ int itr_idx = q_vector->rx.itr_idx;
+ u32 val;
+
+ if (vsi->type == ICE_VSI_VF)
+ val = QINT_RQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_RQCTL_ITR_INDX_S) |
+ ((i + 1) << QINT_RQCTL_MSIX_INDX_S);
+ else
+ val = QINT_RQCTL_CAUSE_ENA_M |
+ (itr_idx << QINT_RQCTL_ITR_INDX_S) |
+ (vector << QINT_RQCTL_MSIX_INDX_S);
+ wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+ rxq++;
+ }
+ }
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
+ * @vsi: the VSI being changed
+ */
+int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ /* Here we are configuring the VSI to let the driver add VLAN tags by
+ * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
+ * insertion happens in the Tx hot path, in ice_tx_map.
+ */
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
+
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ vsi->info.vlan_flags = ctxt.info.vlan_flags;
+ return 0;
+}
+
+/**
+ * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is a enable or disable request
+ */
+int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ /* Here we are configuring what the VSI should do with the VLAN tag in
+ * the Rx packet. We can either leave the tag in the packet or put it in
+ * the Rx descriptor.
+ */
+ if (ena) {
+ /* Strip VLAN tag from Rx packet and put it in the desc */
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
+ } else {
+ /* Disable stripping. Leave tag in packet */
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ }
+
+ /* Allow all packets untagged/tagged */
+ ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
+
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
+ ena, status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ vsi->info.vlan_flags = ctxt.info.vlan_flags;
+ return 0;
+}
+
+/**
+ * ice_vsi_start_rx_rings - start VSI's Rx rings
+ * @vsi: the VSI whose rings are to be started
+ *
+ * Returns 0 on success and a negative value on error
+ */
+int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
+{
+ return ice_vsi_ctrl_rx_rings(vsi, true);
+}
+
+/**
+ * ice_vsi_stop_rx_rings - stop VSI's Rx rings
+ * @vsi: the VSI
+ *
+ * Returns 0 on success and a negative value on error
+ */
+int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
+{
+ return ice_vsi_ctrl_rx_rings(vsi, false);
+}
+
+/**
+ * ice_vsi_stop_tx_rings - Disable Tx rings
+ * @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative id of VF/VM
+ */
+int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u32 *q_teids, val;
+ u16 *q_ids, i;
+ int err = 0;
+
+ if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
+ return -EINVAL;
+
+ q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
+ GFP_KERNEL);
+ if (!q_teids)
+ return -ENOMEM;
+
+ q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
+ GFP_KERNEL);
+ if (!q_ids) {
+ err = -ENOMEM;
+ goto err_alloc_q_ids;
+ }
+
+ /* set up the Tx queue list to be disabled */
+ ice_for_each_txq(vsi, i) {
+ u16 v_idx;
+
+ if (!vsi->tx_rings || !vsi->tx_rings[i]) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ q_ids[i] = vsi->txq_map[i];
+ q_teids[i] = vsi->tx_rings[i]->txq_teid;
+
+ /* clear cause_ena bit for disabled queues */
+ val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
+
+ /* software is expected to wait for 100 ns */
+ ndelay(100);
+
+ /* trigger a software interrupt for the vector associated to
+ * the queue to schedule NAPI handler
+ */
+ v_idx = vsi->tx_rings[i]->q_vector->v_idx;
+ wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
+ GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
+ }
+ status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
+ rst_src, rel_vmvf_num, NULL);
+ /* if the disable queue command was exercised during an active reset
+ * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
+ * the reset operation disables queues at the hardware level anyway.
+ */
+ if (status == ICE_ERR_RESET_ONGOING) {
+ dev_info(&pf->pdev->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status) {
+ dev_err(&pf->pdev->dev,
+ "Failed to disable LAN Tx queues, error: %d\n",
+ status);
+ err = -ENODEV;
+ }
+
+err_out:
+ devm_kfree(&pf->pdev->dev, q_ids);
+
+err_alloc_q_ids:
+ devm_kfree(&pf->pdev->dev, q_teids);
+
+ return err;
+}
+
+/**
+ * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
+ * @vsi: VSI to enable or disable VLAN pruning on
+ * @ena: set to true to enable VLAN pruning and false to disable it
+ *
+ * returns 0 if VSI is updated, negative otherwise
+ */
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_vsi_ctx *ctxt;
+ struct device *dev;
+ int status;
+
+ if (!vsi)
+ return -EINVAL;
+
+ dev = &vsi->back->pdev->dev;
+ ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ if (ena) {
+ ctxt->info.sec_flags |=
+ ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ } else {
+ ctxt->info.sec_flags &=
+ ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ }
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
+ if (status) {
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
+ ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status,
+ vsi->back->hw.adminq.sq_last_status);
+ goto err_out;
+ }
+
+ vsi->info.sec_flags = ctxt->info.sec_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+
+ devm_kfree(dev, ctxt);
+ return 0;
+
+err_out:
+ devm_kfree(dev, ctxt);
+ return -EIO;
+}
+
+/**
+ * ice_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ * @type: VSI type
+ * @vf_id: defines VF id to which this VSI connects. This field is meant to be
+ * used only for ICE_VSI_VF VSI type. For other VSI types, should
+ * fill-in ICE_INVAL_VFID as input.
+ *
+ * This allocates the sw VSI structure and its queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct on
+ * success, NULL on failure.
+ */
+struct ice_vsi *
+ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
+ enum ice_vsi_type type, u16 vf_id)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct device *dev = &pf->pdev->dev;
+ struct ice_vsi *vsi;
+ int ret, i;
+
+ vsi = ice_vsi_alloc(pf, type);
+ if (!vsi) {
+ dev_err(dev, "could not allocate VSI\n");
+ return NULL;
+ }
+
+ vsi->port_info = pi;
+ vsi->vsw = pf->first_sw;
+ if (vsi->type == ICE_VSI_VF)
+ vsi->vf_id = vf_id;
+
+ if (ice_vsi_get_qs(vsi)) {
+ dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
+ vsi->idx);
+ goto unroll_get_qs;
+ }
+
+ /* set RSS capabilities */
+ ice_vsi_set_rss_params(vsi);
+
+ /* create the VSI */
+ ret = ice_vsi_init(vsi);
+ if (ret)
+ goto unroll_get_qs;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto unroll_vsi_init;
+
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto unroll_alloc_q_vector;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
+ ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Do not exit if configuring RSS had an issue, at least
+ * receive traffic on first queue. Hence no need to capture
+ * return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_vsi_cfg_rss_lut_key(vsi);
+ break;
+ case ICE_VSI_VF:
+ /* VF driver will take care of creating netdev for this type and
+ * map queues to vectors through Virtchnl, PF driver only
+ * creates a VSI and corresponding structures for bookkeeping
+ * purpose
+ */
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto unroll_vsi_init;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto unroll_alloc_q_vector;
+
+ /* Setup Vector base only during VF init phase or when VF asks
+ * for more vectors than assigned number. In all other cases,
+ * assign hw_base_vector to the value given earlier.
+ */
+ if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) {
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto unroll_vector_base;
+ } else {
+ vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx;
+ }
+ pf->q_left_tx -= vsi->alloc_txq;
+ pf->q_left_rx -= vsi->alloc_rxq;
+ break;
+ default:
+ /* if VSI type is not recognized, clean up the resources and
+ * exit
+ */
+ goto unroll_vsi_init;
+ }
+
+ ice_vsi_set_tc_cfg(vsi);
+
+ /* configure VSI nodes based on number of queues and TC's */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq;
+
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
+ goto unroll_vector_base;
+ }
+
+ return vsi;
+
+unroll_vector_base:
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ /* reclaim HW interrupt back to the common pool */
+ ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
+ pf->num_avail_hw_msix += vsi->num_q_vectors;
+unroll_alloc_q_vector:
+ ice_vsi_free_q_vectors(vsi);
+unroll_vsi_init:
+ ice_vsi_delete(vsi);
+unroll_get_qs:
+ ice_vsi_put_qs(vsi);
+ pf->q_left_tx += vsi->alloc_txq;
+ pf->q_left_rx += vsi->alloc_rxq;
+ ice_vsi_clear(vsi);
+
+ return NULL;
+}
+
+/**
+ * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
+ * @vsi: the VSI being cleaned up
+ */
+static void ice_vsi_release_msix(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ u16 vector = vsi->hw_base_vector;
+ struct ice_hw *hw = &pf->hw;
+ u32 txq = 0;
+ u32 rxq = 0;
+ int i, q;
+
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0);
+ wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0);
+ for (q = 0; q < q_vector->num_ring_tx; q++) {
+ wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
+ txq++;
+ }
+
+ for (q = 0; q < q_vector->num_ring_rx; q++) {
+ wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
+ rxq++;
+ }
+ }
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_vsi_free_irq - Free the IRQ association with the OS
+ * @vsi: the VSI being configured
+ */
+void ice_vsi_free_irq(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int base = vsi->sw_base_vector;
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ int i;
+
+ if (!vsi->q_vectors || !vsi->irqs_ready)
+ return;
+
+ ice_vsi_release_msix(vsi);
+ if (vsi->type == ICE_VSI_VF)
+ return;
+
+ vsi->irqs_ready = false;
+ for (i = 0; i < vsi->num_q_vectors; i++) {
+ u16 vector = i + base;
+ int irq_num;
+
+ irq_num = pf->msix_entries[vector].vector;
+
+ /* free only the irqs that were actually requested */
+ if (!vsi->q_vectors[i] ||
+ !(vsi->q_vectors[i]->num_ring_tx ||
+ vsi->q_vectors[i]->num_ring_rx))
+ continue;
+
+ /* clear the affinity notifier in the IRQ descriptor */
+ irq_set_affinity_notifier(irq_num, NULL);
+
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(irq_num, NULL);
+ synchronize_irq(irq_num);
+ devm_free_irq(&pf->pdev->dev, irq_num,
+ vsi->q_vectors[i]);
+ }
+ }
+}
+
+/**
+ * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
+ * @vsi: the VSI having resources freed
+ */
+void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->tx_rings)
+ return;
+
+ ice_for_each_txq(vsi, i)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ ice_free_tx_ring(vsi->tx_rings[i]);
+}
+
+/**
+ * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
+ * @vsi: the VSI having resources freed
+ */
+void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->rx_rings)
+ return;
+
+ ice_for_each_rxq(vsi, i)
+ if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
+ ice_free_rx_ring(vsi->rx_rings[i]);
+}
+
+/**
+ * ice_vsi_close - Shut down a VSI
+ * @vsi: the VSI being shut down
+ */
+void ice_vsi_close(struct ice_vsi *vsi)
+{
+ if (!test_and_set_bit(__ICE_DOWN, vsi->state))
+ ice_down(vsi);
+
+ ice_vsi_free_irq(vsi);
+ ice_vsi_free_tx_rings(vsi);
+ ice_vsi_free_rx_rings(vsi);
+}
+
+/**
+ * ice_free_res - free a block of resources
+ * @res: pointer to the resource
+ * @index: starting index previously returned by ice_get_res
+ * @id: identifier to track owner
+ *
+ * Returns number of resources freed
+ */
+int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
+{
+ int count = 0;
+ int i;
+
+ if (!res || index >= res->num_entries)
+ return -EINVAL;
+
+ id |= ICE_RES_VALID_BIT;
+ for (i = index; i < res->num_entries && res->list[i] == id; i++) {
+ res->list[i] = 0;
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * ice_search_res - Search the tracker for a block of resources
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or -ENOMEM for error
+ */
+static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ int start = res->search_hint;
+ int end = start;
+
+ if ((start + needed) > res->num_entries)
+ return -ENOMEM;
+
+ id |= ICE_RES_VALID_BIT;
+
+ do {
+ /* skip already allocated entries */
+ if (res->list[end++] & ICE_RES_VALID_BIT) {
+ start = end;
+ if ((start + needed) > res->num_entries)
+ break;
+ }
+
+ if (end == (start + needed)) {
+ int i = start;
+
+ /* there was enough, so assign it to the requestor */
+ while (i != end)
+ res->list[i++] = id;
+
+ if (end == res->num_entries)
+ end = 0;
+
+ res->search_hint = end;
+ return start;
+ }
+ } while (1);
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_get_res - get a block of resources
+ * @pf: board private structure
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or -ENOMEM for error
+ * The search_hint trick and lack of advanced fit-finding only works
+ * because we're highly likely to have all the same sized requests.
+ * Linear search time and any fragmentation should be minimal.
+ */
+int
+ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ int ret;
+
+ if (!res || !pf)
+ return -EINVAL;
+
+ if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
+ dev_err(&pf->pdev->dev,
+ "param err: needed=%d, num_entries = %d id=0x%04x\n",
+ needed, res->num_entries, id);
+ return -EINVAL;
+ }
+
+ /* search based on search_hint */
+ ret = ice_search_res(res, needed, id);
+
+ if (ret < 0) {
+ /* previous search failed. Reset search hint and try again */
+ res->search_hint = 0;
+ ret = ice_search_res(res, needed, id);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ */
+void ice_vsi_dis_irq(struct ice_vsi *vsi)
+{
+ int base = vsi->sw_base_vector;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+ int i;
+
+ /* disable interrupt causation from each queue */
+ if (vsi->tx_rings) {
+ ice_for_each_txq(vsi, i) {
+ if (vsi->tx_rings[i]) {
+ u16 reg;
+
+ reg = vsi->tx_rings[i]->reg_idx;
+ val = rd32(hw, QINT_TQCTL(reg));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(reg), val);
+ }
+ }
+ }
+
+ if (vsi->rx_rings) {
+ ice_for_each_rxq(vsi, i) {
+ if (vsi->rx_rings[i]) {
+ u16 reg;
+
+ reg = vsi->rx_rings[i]->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+ }
+ }
+ }
+
+ /* disable each interrupt */
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ for (i = vsi->hw_base_vector;
+ i < (vsi->num_q_vectors + vsi->hw_base_vector); i++)
+ wr32(hw, GLINT_DYN_CTL(i), 0);
+
+ ice_flush(hw);
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ synchronize_irq(pf->msix_entries[i + base].vector);
+ }
+}
+
+/**
+ * ice_vsi_release - Delete a VSI and free its resources
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success or < 0 on error
+ */
+int ice_vsi_release(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf;
+ struct ice_vf *vf;
+
+ if (!vsi->back)
+ return -ENODEV;
+ pf = vsi->back;
+ vf = &pf->vf[vsi->vf_id];
+ /* do not unregister and free netdevs while driver is in the reset
+ * recovery pending state. Since reset/rebuild happens through PF
+ * service task workqueue, its not a good idea to unregister netdev
+ * that is associated to the PF that is running the work queue items
+ * currently. This is done to avoid check_flush_dependency() warning
+ * on this wq
+ */
+ if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_rss_clean(vsi);
+
+ /* Disable VSI and free resources */
+ ice_vsi_dis_irq(vsi);
+ ice_vsi_close(vsi);
+
+ /* reclaim interrupt vectors back to PF */
+ if (vsi->type != ICE_VSI_VF) {
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector,
+ vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ /* reclaim HW interrupts back to the common pool */
+ ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector,
+ vsi->idx);
+ pf->num_avail_hw_msix += vsi->num_q_vectors;
+ } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) {
+ /* Reclaim VF resources back only while freeing all VFs or
+ * vector reassignment is requested
+ */
+ ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx,
+ vsi->idx);
+ pf->num_avail_hw_msix += pf->num_vf_msix;
+ }
+
+ ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ ice_vsi_delete(vsi);
+ ice_vsi_free_q_vectors(vsi);
+ ice_vsi_clear_rings(vsi);
+
+ ice_vsi_put_qs(vsi);
+ pf->q_left_tx += vsi->alloc_txq;
+ pf->q_left_rx += vsi->alloc_rxq;
+
+ /* retain SW VSI data structure since it is needed to unregister and
+ * free VSI netdev when PF is not in reset recovery pending state,\
+ * for ex: during rmmod.
+ */
+ if (!ice_is_reset_in_progress(pf->state))
+ ice_vsi_clear(vsi);
+
+ return 0;
+}
+
+/**
+ * ice_vsi_rebuild - Rebuild VSI after reset
+ * @vsi: VSI to be rebuild
+ *
+ * Returns 0 on success and negative value on failure
+ */
+int ice_vsi_rebuild(struct ice_vsi *vsi)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ int ret, i;
+
+ if (!vsi)
+ return -EINVAL;
+
+ ice_vsi_free_q_vectors(vsi);
+ ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
+ vsi->sw_base_vector = 0;
+ vsi->hw_base_vector = 0;
+ ice_vsi_clear_rings(vsi);
+ ice_vsi_free_arrays(vsi, false);
+ ice_vsi_set_num_qs(vsi);
+
+ /* Initialize VSI struct elements and create VSI in FW */
+ ret = ice_vsi_init(vsi);
+ if (ret < 0)
+ goto err_vsi;
+
+ ret = ice_vsi_alloc_arrays(vsi, false);
+ if (ret < 0)
+ goto err_vsi;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto err_rings;
+
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto err_vectors;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto err_vectors;
+
+ ice_vsi_map_rings_to_vectors(vsi);
+ break;
+ case ICE_VSI_VF:
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto err_rings;
+
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto err_vectors;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto err_vectors;
+
+ vsi->back->q_left_tx -= vsi->alloc_txq;
+ vsi->back->q_left_rx -= vsi->alloc_rxq;
+ break;
+ default:
+ break;
+ }
+
+ ice_vsi_set_tc_cfg(vsi);
+
+ /* configure VSI nodes based on number of queues and TC's */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq;
+
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed VSI lan queue config\n");
+ goto err_vectors;
+ }
+ return 0;
+
+err_vectors:
+ ice_vsi_free_q_vectors(vsi);
+err_rings:
+ if (vsi->netdev) {
+ vsi->current_netdev_flags = 0;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+err_vsi:
+ ice_vsi_clear(vsi);
+ set_bit(__ICE_RESET_FAILED, vsi->back->state);
+ return ret;
+}
+
+/**
+ * ice_is_reset_in_progress - check for a reset in progress
+ * @state: pf state field
+ */
+bool ice_is_reset_in_progress(unsigned long *state)
+{
+ return test_bit(__ICE_RESET_OICR_RECV, state) ||
+ test_bit(__ICE_PFR_REQ, state) ||
+ test_bit(__ICE_CORER_REQ, state) ||
+ test_bit(__ICE_GLOBR_REQ, state);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
new file mode 100644
index 000000000000..677db40338f5
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_LIB_H_
+#define _ICE_LIB_H_
+
+#include "ice.h"
+
+int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
+ const u8 *macaddr);
+
+void ice_free_fltr_list(struct device *dev, struct list_head *h);
+
+void ice_update_eth_stats(struct ice_vsi *vsi);
+
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
+
+int ice_vsi_cfg_txqs(struct ice_vsi *vsi);
+
+void ice_vsi_cfg_msix(struct ice_vsi *vsi);
+
+int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
+
+int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
+
+int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi);
+
+int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena);
+
+int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
+
+int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
+
+int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num);
+
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
+
+void ice_vsi_delete(struct ice_vsi *vsi);
+
+int ice_vsi_clear(struct ice_vsi *vsi);
+
+struct ice_vsi *
+ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
+ enum ice_vsi_type type, u16 vf_id);
+
+int ice_vsi_release(struct ice_vsi *vsi);
+
+void ice_vsi_close(struct ice_vsi *vsi);
+
+int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
+
+int
+ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
+
+int ice_vsi_rebuild(struct ice_vsi *vsi);
+
+bool ice_is_reset_in_progress(unsigned long *state);
+
+void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
+
+void ice_vsi_put_qs(struct ice_vsi *vsi);
+
+void ice_vsi_dis_irq(struct ice_vsi *vsi);
+
+void ice_vsi_free_irq(struct ice_vsi *vsi);
+
+void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
+
+void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
+
+int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
+
+int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+
+irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data);
+#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f1e80eed2fd6..8f61b375e768 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6,8 +6,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ice.h"
+#include "ice_lib.h"
-#define DRV_VERSION "ice-0.7.0-k"
+#define DRV_VERSION "0.7.2-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -15,7 +16,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
static int debug = -1;
@@ -31,173 +32,84 @@ static const struct net_device_ops ice_netdev_ops;
static void ice_pf_dis_all_vsi(struct ice_pf *pf);
static void ice_rebuild(struct ice_pf *pf);
-static int ice_vsi_release(struct ice_vsi *vsi);
+
+static void ice_vsi_release_all(struct ice_pf *pf);
static void ice_update_vsi_stats(struct ice_vsi *vsi);
static void ice_update_pf_stats(struct ice_pf *pf);
/**
- * ice_get_free_slot - get the next non-NULL location index in array
- * @array: array to search
- * @size: size of the array
- * @curr: last known occupied index to be used as a search hint
- *
- * void * is being used to keep the functionality generic. This lets us use this
- * function on any array of pointers.
+ * ice_get_tx_pending - returns number of Tx descriptors not processed
+ * @ring: the ring of descriptors
*/
-static int ice_get_free_slot(void *array, int size, int curr)
+static u32 ice_get_tx_pending(struct ice_ring *ring)
{
- int **tmp_array = (int **)array;
- int next;
+ u32 head, tail;
- if (curr < (size - 1) && !tmp_array[curr + 1]) {
- next = curr + 1;
- } else {
- int i = 0;
+ head = ring->next_to_clean;
+ tail = readl(ring->tail);
- while ((i < size) && (tmp_array[i]))
- i++;
- if (i == size)
- next = ICE_NO_VSI;
- else
- next = i;
- }
- return next;
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+ return 0;
}
/**
- * ice_search_res - Search the tracker for a block of resources
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- * Returns the base item index of the block, or -ENOMEM for error
+ * ice_check_for_hang_subtask - check for and recover hung queues
+ * @pf: pointer to PF struct
*/
-static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
+static void ice_check_for_hang_subtask(struct ice_pf *pf)
{
- int start = res->search_hint;
- int end = start;
-
- id |= ICE_RES_VALID_BIT;
+ struct ice_vsi *vsi = NULL;
+ unsigned int i;
+ u32 v, v_idx;
+ int packets;
- do {
- /* skip already allocated entries */
- if (res->list[end++] & ICE_RES_VALID_BIT) {
- start = end;
- if ((start + needed) > res->num_entries)
- break;
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
+ vsi = pf->vsi[v];
+ break;
}
- if (end == (start + needed)) {
- int i = start;
+ if (!vsi || test_bit(__ICE_DOWN, vsi->state))
+ return;
- /* there was enough, so assign it to the requestor */
- while (i != end)
- res->list[i++] = id;
+ if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
+ return;
- if (end == res->num_entries)
- end = 0;
+ for (i = 0; i < vsi->num_txq; i++) {
+ struct ice_ring *tx_ring = vsi->tx_rings[i];
+
+ if (tx_ring && tx_ring->desc) {
+ int itr = ICE_ITR_NONE;
+
+ /* If packet counter has not changed the queue is
+ * likely stalled, so force an interrupt for this
+ * queue.
+ *
+ * prev_pkt would be negative if there was no
+ * pending work.
+ */
+ packets = tx_ring->stats.pkts & INT_MAX;
+ if (tx_ring->tx_stats.prev_pkt == packets) {
+ /* Trigger sw interrupt to revive the queue */
+ v_idx = tx_ring->q_vector->v_idx;
+ wr32(&vsi->back->hw,
+ GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
+ (itr << GLINT_DYN_CTL_ITR_INDX_S) |
+ GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_INTENA_MSK_M);
+ continue;
+ }
- res->search_hint = end;
- return start;
+ /* Memory barrier between read of packet count and call
+ * to ice_get_tx_pending()
+ */
+ smp_rmb();
+ tx_ring->tx_stats.prev_pkt =
+ ice_get_tx_pending(tx_ring) ? packets : -1;
}
- } while (1);
-
- return -ENOMEM;
-}
-
-/**
- * ice_get_res - get a block of resources
- * @pf: board private structure
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- *
- * Returns the base item index of the block, or -ENOMEM for error
- * The search_hint trick and lack of advanced fit-finding only works
- * because we're highly likely to have all the same sized requests.
- * Linear search time and any fragmentation should be minimal.
- */
-static int
-ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
-{
- int ret;
-
- if (!res || !pf)
- return -EINVAL;
-
- if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
- dev_err(&pf->pdev->dev,
- "param err: needed=%d, num_entries = %d id=0x%04x\n",
- needed, res->num_entries, id);
- return -EINVAL;
- }
-
- /* search based on search_hint */
- ret = ice_search_res(res, needed, id);
-
- if (ret < 0) {
- /* previous search failed. Reset search hint and try again */
- res->search_hint = 0;
- ret = ice_search_res(res, needed, id);
}
-
- return ret;
-}
-
-/**
- * ice_free_res - free a block of resources
- * @res: pointer to the resource
- * @index: starting index previously returned by ice_get_res
- * @id: identifier to track owner
- * Returns number of resources freed
- */
-static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
-{
- int count = 0;
- int i;
-
- if (!res || index >= res->num_entries)
- return -EINVAL;
-
- id |= ICE_RES_VALID_BIT;
- for (i = index; i < res->num_entries && res->list[i] == id; i++) {
- res->list[i] = 0;
- count++;
- }
-
- return count;
-}
-
-/**
- * ice_add_mac_to_list - Add a mac address filter entry to the list
- * @vsi: the VSI to be forwarded to
- * @add_list: pointer to the list which contains MAC filter entries
- * @macaddr: the MAC address to be added.
- *
- * Adds mac address filter entry to the temp list
- *
- * Returns 0 on success or ENOMEM on failure.
- */
-static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr)
-{
- struct ice_fltr_list_entry *tmp;
- struct ice_pf *pf = vsi->back;
-
- tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
- if (!tmp)
- return -ENOMEM;
-
- tmp->fltr_info.flag = ICE_FLTR_TX;
- tmp->fltr_info.src = vsi->vsi_num;
- tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
- ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
-
- INIT_LIST_HEAD(&tmp->list_entry);
- list_add(&tmp->list_entry, add_list);
-
- return 0;
}
/**
@@ -243,24 +155,6 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
}
/**
- * ice_free_fltr_list - free filter lists helper
- * @dev: pointer to the device struct
- * @h: pointer to the list head to be freed
- *
- * Helper function to free filter lists previously created using
- * ice_add_mac_to_list
- */
-static void ice_free_fltr_list(struct device *dev, struct list_head *h)
-{
- struct ice_fltr_list_entry *e, *tmp;
-
- list_for_each_entry_safe(e, tmp, h, list_entry) {
- list_del(&e->list_entry);
- devm_kfree(dev, e);
- }
-}
-
-/**
* ice_vsi_fltr_changed - check if filter state changed
* @vsi: VSI to be checked
*
@@ -359,7 +253,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply TX filter rule to get traffic from VMs */
- status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
+ status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_TX);
if (status) {
netdev_err(netdev, "Error setting default VSI %i tx rule\n",
@@ -369,7 +263,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
goto out_promisc;
}
/* Apply RX filter rule to get traffic from wire */
- status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
+ status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_RX);
if (status) {
netdev_err(netdev, "Error setting default VSI %i rx rule\n",
@@ -380,7 +274,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
} else {
/* Clear TX filter rule to stop traffic from VMs */
- status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
+ status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_TX);
if (status) {
netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
@@ -389,8 +283,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
err = -EIO;
goto out_promisc;
}
- /* Clear filter RX to remove traffic from wire */
- status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
+ /* Clear RX filter to remove traffic from wire */
+ status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
netdev_err(netdev, "Error clearing default VSI %i rx rule\n",
@@ -438,15 +332,6 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
}
/**
- * ice_is_reset_recovery_pending - schedule a reset
- * @state: pf state field
- */
-static bool ice_is_reset_recovery_pending(unsigned long int *state)
-{
- return test_bit(__ICE_RESET_RECOVERY_PENDING, state);
-}
-
-/**
* ice_prepare_for_reset - prep for the core to reset
* @pf: board private structure
*
@@ -456,23 +341,17 @@ static void
ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- u32 v;
-
- ice_for_each_vsi(pf, v)
- if (pf->vsi[v])
- ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num);
- dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+ /* Notify VFs of impending reset */
+ if (ice_check_sq_alive(hw, &hw->mailboxq))
+ ice_vc_notify_reset(pf);
/* disable the VSIs and their queues that are not already DOWN */
- /* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */
ice_pf_dis_all_vsi(pf);
- ice_for_each_vsi(pf, v)
- if (pf->vsi[v])
- pf->vsi[v]->vsi_num = 0;
-
ice_shutdown_all_ctrlq(hw);
+
+ set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
}
/**
@@ -489,27 +368,29 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
WARN_ON(in_interrupt());
- /* PFR is a bit of a special case because it doesn't result in an OICR
- * interrupt. So for PFR, we prepare for reset, issue the reset and
- * rebuild sequentially.
- */
- if (reset_type == ICE_RESET_PFR) {
- set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
- ice_prepare_for_reset(pf);
- }
+ ice_prepare_for_reset(pf);
/* trigger the reset */
if (ice_reset(hw, reset_type)) {
dev_err(dev, "reset %d failed\n", reset_type);
set_bit(__ICE_RESET_FAILED, pf->state);
- clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__ICE_RESET_OICR_RECV, pf->state);
+ clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
+ clear_bit(__ICE_PFR_REQ, pf->state);
+ clear_bit(__ICE_CORER_REQ, pf->state);
+ clear_bit(__ICE_GLOBR_REQ, pf->state);
return;
}
+ /* PFR is a bit of a special case because it doesn't result in an OICR
+ * interrupt. So for PFR, rebuild after the reset and clear the reset-
+ * associated state bits.
+ */
if (reset_type == ICE_RESET_PFR) {
pf->pfr_count++;
ice_rebuild(pf);
- clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
+ clear_bit(__ICE_PFR_REQ, pf->state);
}
}
@@ -519,48 +400,60 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
*/
static void ice_reset_subtask(struct ice_pf *pf)
{
- enum ice_reset_req reset_type;
-
- rtnl_lock();
+ enum ice_reset_req reset_type = ICE_RESET_INVAL;
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
- * OICR interrupt. The OICR handler (ice_misc_intr) determines what
- * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in
- * pf->state. So if reset/recovery is pending (as indicated by this bit)
- * we do a rebuild and return.
+ * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
+ * of reset is pending and sets bits in pf->state indicating the reset
+ * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
+ * prepare for pending reset if not already (for PF software-initiated
+ * global resets the software should already be prepared for it as
+ * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
+ * by firmware or software on other PFs, that bit is not set so prepare
+ * for the reset now), poll for reset done, rebuild and return.
*/
- if (ice_is_reset_recovery_pending(pf->state)) {
+ if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
clear_bit(__ICE_GLOBR_RECV, pf->state);
clear_bit(__ICE_CORER_RECV, pf->state);
- ice_prepare_for_reset(pf);
+ if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
+ ice_prepare_for_reset(pf);
/* make sure we are ready to rebuild */
- if (ice_check_reset(&pf->hw))
+ if (ice_check_reset(&pf->hw)) {
set_bit(__ICE_RESET_FAILED, pf->state);
- else
+ } else {
+ /* done with reset. start rebuild */
+ pf->hw.reset_ongoing = false;
ice_rebuild(pf);
- clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
- goto unlock;
+ /* clear bit to resume normal operations, but
+ * ICE_NEEDS_RESTART bit is set incase rebuild failed
+ */
+ clear_bit(__ICE_RESET_OICR_RECV, pf->state);
+ clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
+ clear_bit(__ICE_PFR_REQ, pf->state);
+ clear_bit(__ICE_CORER_REQ, pf->state);
+ clear_bit(__ICE_GLOBR_REQ, pf->state);
+ }
+
+ return;
}
/* No pending resets to finish processing. Check for new resets */
- if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state))
- reset_type = ICE_RESET_GLOBR;
- else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state))
- reset_type = ICE_RESET_CORER;
- else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state))
+ if (test_bit(__ICE_PFR_REQ, pf->state))
reset_type = ICE_RESET_PFR;
- else
- goto unlock;
+ if (test_bit(__ICE_CORER_REQ, pf->state))
+ reset_type = ICE_RESET_CORER;
+ if (test_bit(__ICE_GLOBR_REQ, pf->state))
+ reset_type = ICE_RESET_GLOBR;
+ /* If no valid reset type requested just return */
+ if (reset_type == ICE_RESET_INVAL)
+ return;
- /* reset if not already down or resetting */
+ /* reset if not already down or busy */
if (!test_bit(__ICE_DOWN, pf->state) &&
!test_bit(__ICE_CFG_BUSY, pf->state)) {
ice_do_reset(pf, reset_type);
}
-
-unlock:
- rtnl_unlock();
}
/**
@@ -772,6 +665,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
}
}
+ ice_vc_notify_link_state(pf);
+
return 0;
}
@@ -822,6 +717,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
cq = &hw->adminq;
qtype = "Admin";
break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ qtype = "Mailbox";
+ break;
default:
dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
q_type);
@@ -903,6 +802,12 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
dev_err(&pf->pdev->dev,
"Could not handle link event\n");
break;
+ case ice_mbx_opc_send_msg_to_pf:
+ ice_vc_process_vf_msg(pf, &event);
+ break;
+ case ice_aqc_opc_fw_logging:
+ ice_output_fw_log(hw, &event.desc, event.msg_buf);
+ break;
default:
dev_dbg(&pf->pdev->dev,
"%s Receive Queue unknown event 0x%04x ignored\n",
@@ -959,6 +864,28 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
}
/**
+ * ice_clean_mailboxq_subtask - clean the MailboxQ rings
+ * @pf: board private structure
+ */
+static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
+ return;
+
+ if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
+ return;
+
+ clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
+
+ if (ice_ctrlq_pending(hw, &hw->mailboxq))
+ __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
+
+ ice_flush(hw);
+}
+
+/**
* ice_service_task_schedule - schedule the service task to wake up
* @pf: board private structure
*
@@ -966,8 +893,9 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
*/
static void ice_service_task_schedule(struct ice_pf *pf)
{
- if (!test_bit(__ICE_DOWN, pf->state) &&
- !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state))
+ if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
+ !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
+ !test_bit(__ICE_NEEDS_RESTART, pf->state))
queue_work(ice_wq, &pf->serv_task);
}
@@ -985,6 +913,22 @@ static void ice_service_task_complete(struct ice_pf *pf)
}
/**
+ * ice_service_task_stop - stop service task and cancel works
+ * @pf: board private structure
+ */
+static void ice_service_task_stop(struct ice_pf *pf)
+{
+ set_bit(__ICE_SERVICE_DIS, pf->state);
+
+ if (pf->serv_tmr.function)
+ del_timer_sync(&pf->serv_tmr);
+ if (pf->serv_task.func)
+ cancel_work_sync(&pf->serv_task);
+
+ clear_bit(__ICE_SERVICE_SCHED, pf->state);
+}
+
+/**
* ice_service_timer - timer callback to schedule service task
* @t: pointer to timer_list
*/
@@ -997,6 +941,160 @@ static void ice_service_timer(struct timer_list *t)
}
/**
+ * ice_handle_mdd_event - handle malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from service task. OICR interrupt handler indicates MDD event
+ */
+static void ice_handle_mdd_event(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ bool mdd_detected = false;
+ u32 reg;
+ int i;
+
+ if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
+ return;
+
+ /* find what triggered the MDD event */
+ reg = rd32(hw, GL_MDET_TX_PQM);
+ if (reg & GL_MDET_TX_PQM_VALID_M) {
+ u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
+ GL_MDET_TX_PQM_PF_NUM_S;
+ u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
+ GL_MDET_TX_PQM_VF_NUM_S;
+ u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
+ GL_MDET_TX_PQM_MAL_TYPE_S;
+ u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
+ GL_MDET_TX_PQM_QNUM_S);
+
+ if (netif_msg_tx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
+ event, queue, pf_num, vf_num);
+ wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ reg = rd32(hw, GL_MDET_TX_TCLAN);
+ if (reg & GL_MDET_TX_TCLAN_VALID_M) {
+ u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
+ GL_MDET_TX_TCLAN_PF_NUM_S;
+ u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
+ GL_MDET_TX_TCLAN_VF_NUM_S;
+ u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
+ GL_MDET_TX_TCLAN_MAL_TYPE_S;
+ u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
+ GL_MDET_TX_TCLAN_QNUM_S);
+
+ if (netif_msg_rx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
+ event, queue, pf_num, vf_num);
+ wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ reg = rd32(hw, GL_MDET_RX);
+ if (reg & GL_MDET_RX_VALID_M) {
+ u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
+ GL_MDET_RX_PF_NUM_S;
+ u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
+ GL_MDET_RX_VF_NUM_S;
+ u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
+ GL_MDET_RX_MAL_TYPE_S;
+ u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
+ GL_MDET_RX_QNUM_S);
+
+ if (netif_msg_rx_err(pf))
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
+ event, queue, pf_num, vf_num);
+ wr32(hw, GL_MDET_RX, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ if (mdd_detected) {
+ bool pf_mdd_detected = false;
+
+ reg = rd32(hw, PF_MDET_TX_PQM);
+ if (reg & PF_MDET_TX_PQM_VALID_M) {
+ wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
+ dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
+ pf_mdd_detected = true;
+ }
+
+ reg = rd32(hw, PF_MDET_TX_TCLAN);
+ if (reg & PF_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
+ dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
+ pf_mdd_detected = true;
+ }
+
+ reg = rd32(hw, PF_MDET_RX);
+ if (reg & PF_MDET_RX_VALID_M) {
+ wr32(hw, PF_MDET_RX, 0xFFFF);
+ dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
+ pf_mdd_detected = true;
+ }
+ /* Queue belongs to the PF initiate a reset */
+ if (pf_mdd_detected) {
+ set_bit(__ICE_NEEDS_RESTART, pf->state);
+ ice_service_task_schedule(pf);
+ }
+ }
+
+ /* see if one of the VFs needs to be reset */
+ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ reg = rd32(hw, VP_MDET_TX_PQM(i));
+ if (reg & VP_MDET_TX_PQM_VALID_M) {
+ wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+
+ reg = rd32(hw, VP_MDET_TX_TCLAN(i));
+ if (reg & VP_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+
+ reg = rd32(hw, VP_MDET_TX_TDPU(i));
+ if (reg & VP_MDET_TX_TDPU_VALID_M) {
+ wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+
+ reg = rd32(hw, VP_MDET_RX(i));
+ if (reg & VP_MDET_RX_VALID_M) {
+ wr32(hw, VP_MDET_RX(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+ i);
+ }
+
+ if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) {
+ dev_info(&pf->pdev->dev,
+ "Too many MDD events on VF %d, disabled\n", i);
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF\n");
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ }
+ }
+
+ /* re-enable MDD interrupt cause */
+ clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
+ reg = rd32(hw, PFINT_OICR_ENA);
+ reg |= PFINT_OICR_MAL_DETECT_M;
+ wr32(hw, PFINT_OICR_ENA, reg);
+ ice_flush(hw);
+}
+
+/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
*/
@@ -1010,16 +1108,21 @@ static void ice_service_task(struct work_struct *work)
/* process reset requests first */
ice_reset_subtask(pf);
- /* bail if a reset/recovery cycle is pending */
- if (ice_is_reset_recovery_pending(pf->state) ||
- test_bit(__ICE_SUSPENDED, pf->state)) {
+ /* bail if a reset/recovery cycle is pending or rebuild failed */
+ if (ice_is_reset_in_progress(pf->state) ||
+ test_bit(__ICE_SUSPENDED, pf->state) ||
+ test_bit(__ICE_NEEDS_RESTART, pf->state)) {
ice_service_task_complete(pf);
return;
}
+ ice_check_for_hang_subtask(pf);
ice_sync_fltr_subtask(pf);
+ ice_handle_mdd_event(pf);
+ ice_process_vflr_event(pf);
ice_watchdog_subtask(pf);
ice_clean_adminq_subtask(pf);
+ ice_clean_mailboxq_subtask(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
@@ -1029,6 +1132,9 @@ static void ice_service_task(struct work_struct *work)
* schedule the service task now.
*/
if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
+ test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
+ test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+ test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
@@ -1043,6 +1149,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
+ hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
+ hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
}
/**
@@ -1073,57 +1183,6 @@ static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
/**
- * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
- * @vsi: the VSI being un-configured
- */
-static void ice_vsi_dis_irq(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- int base = vsi->base_vector;
- u32 val;
- int i;
-
- /* disable interrupt causation from each queue */
- if (vsi->tx_rings) {
- ice_for_each_txq(vsi, i) {
- if (vsi->tx_rings[i]) {
- u16 reg;
-
- reg = vsi->tx_rings[i]->reg_idx;
- val = rd32(hw, QINT_TQCTL(reg));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(reg), val);
- }
- }
- }
-
- if (vsi->rx_rings) {
- ice_for_each_rxq(vsi, i) {
- if (vsi->rx_rings[i]) {
- u16 reg;
-
- reg = vsi->rx_rings[i]->reg_idx;
- val = rd32(hw, QINT_RQCTL(reg));
- val &= ~QINT_RQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_RQCTL(reg), val);
- }
- }
- }
-
- /* disable each interrupt */
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- for (i = vsi->base_vector;
- i < (vsi->num_q_vectors + vsi->base_vector); i++)
- wr32(hw, GLINT_DYN_CTL(i), 0);
-
- ice_flush(hw);
- for (i = 0; i < vsi->num_q_vectors; i++)
- synchronize_irq(pf->msix_entries[i + base].vector);
- }
-}
-
-/**
* ice_vsi_ena_irq - Enable IRQ for the given VSI
* @vsi: the VSI being configured
*/
@@ -1144,26 +1203,6 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi)
}
/**
- * ice_vsi_delete - delete a VSI from the switch
- * @vsi: pointer to VSI being removed
- */
-static void ice_vsi_delete(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_vsi_ctx ctxt;
- enum ice_status status;
-
- ctxt.vsi_num = vsi->vsi_num;
-
- memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
-
- status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
- if (status)
- dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
- vsi->vsi_num);
-}
-
-/**
* ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
* @vsi: the VSI being configured
* @basename: name for the vector
@@ -1172,7 +1211,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
{
int q_vectors = vsi->num_q_vectors;
struct ice_pf *pf = vsi->back;
- int base = vsi->base_vector;
+ int base = vsi->sw_base_vector;
int rx_int_idx = 0;
int tx_int_idx = 0;
int vector, err;
@@ -1231,467 +1270,6 @@ free_q_irqs:
}
/**
- * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
- * @vsi: the VSI being configured
- */
-static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
-{
- struct ice_hw_common_caps *cap;
- struct ice_pf *pf = vsi->back;
-
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- vsi->rss_size = 1;
- return;
- }
-
- cap = &pf->hw.func_caps.common_cap;
- switch (vsi->type) {
- case ICE_VSI_PF:
- /* PF VSI will inherit RSS instance of PF */
- vsi->rss_table_size = cap->rss_table_size;
- vsi->rss_size = min_t(int, num_online_cpus(),
- BIT(cap->rss_table_entry_width));
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
- break;
- default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
- break;
- }
-}
-
-/**
- * ice_vsi_setup_q_map - Setup a VSI queue map
- * @vsi: the VSI being configured
- * @ctxt: VSI context structure
- */
-static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
-{
- u16 offset = 0, qmap = 0, numq_tc;
- u16 pow = 0, max_rss = 0, qcount;
- u16 qcount_tx = vsi->alloc_txq;
- u16 qcount_rx = vsi->alloc_rxq;
- bool ena_tc0 = false;
- int i;
-
- /* at least TC0 should be enabled by default */
- if (vsi->tc_cfg.numtc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(0)))
- ena_tc0 = true;
- } else {
- ena_tc0 = true;
- }
-
- if (ena_tc0) {
- vsi->tc_cfg.numtc++;
- vsi->tc_cfg.ena_tc |= 1;
- }
-
- numq_tc = qcount_rx / vsi->tc_cfg.numtc;
-
- /* TC mapping is a function of the number of Rx queues assigned to the
- * VSI for each traffic class and the offset of these queues.
- * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
- * queues allocated to TC0. No:of queues is a power-of-2.
- *
- * If TC is not enabled, the queue offset is set to 0, and allocate one
- * queue, this way, traffic for the given TC will be sent to the default
- * queue.
- *
- * Setup number and offset of Rx queues for all TCs for the VSI
- */
-
- /* qcount will change if RSS is enabled */
- if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
- if (vsi->type == ICE_VSI_PF)
- max_rss = ICE_MAX_LG_RSS_QS;
- else
- max_rss = ICE_MAX_SMALL_RSS_QS;
-
- qcount = min_t(int, numq_tc, max_rss);
- qcount = min_t(int, qcount, vsi->rss_size);
- } else {
- qcount = numq_tc;
- }
-
- /* find the (rounded up) power-of-2 of qcount */
- pow = order_base_2(qcount);
-
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
- if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
- /* TC is not enabled */
- vsi->tc_cfg.tc_info[i].qoffset = 0;
- vsi->tc_cfg.tc_info[i].qcount = 1;
- ctxt->info.tc_mapping[i] = 0;
- continue;
- }
-
- /* TC is enabled */
- vsi->tc_cfg.tc_info[i].qoffset = offset;
- vsi->tc_cfg.tc_info[i].qcount = qcount;
-
- qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
- ICE_AQ_VSI_TC_Q_OFFSET_M) |
- ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
- ICE_AQ_VSI_TC_Q_NUM_M);
- offset += qcount;
- ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
- }
-
- vsi->num_txq = qcount_tx;
- vsi->num_rxq = offset;
-
- /* Rx queue mapping */
- ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
- /* q_mapping buffer holds the info for the first queue allocated for
- * this VSI in the PF space and also the number of queues associated
- * with this VSI.
- */
- ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
- ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
-}
-
-/**
- * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
- * @ctxt: the VSI context being set
- *
- * This initializes a default VSI context for all sections except the Queues.
- */
-static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
-{
- u32 table = 0;
-
- memset(&ctxt->info, 0, sizeof(ctxt->info));
- /* VSI's should be allocated from shared pool */
- ctxt->alloc_from_pool = true;
- /* Src pruning enabled by default */
- ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
- /* Traffic from VSI can be sent to LAN */
- ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
-
- /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
- * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
- * packets untagged/tagged.
- */
- ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
- ICE_AQ_VSI_VLAN_MODE_M) >>
- ICE_AQ_VSI_VLAN_MODE_S);
-
- /* Have 1:1 UP mapping for both ingress/egress tables */
- table |= ICE_UP_TABLE_TRANSLATE(0, 0);
- table |= ICE_UP_TABLE_TRANSLATE(1, 1);
- table |= ICE_UP_TABLE_TRANSLATE(2, 2);
- table |= ICE_UP_TABLE_TRANSLATE(3, 3);
- table |= ICE_UP_TABLE_TRANSLATE(4, 4);
- table |= ICE_UP_TABLE_TRANSLATE(5, 5);
- table |= ICE_UP_TABLE_TRANSLATE(6, 6);
- table |= ICE_UP_TABLE_TRANSLATE(7, 7);
- ctxt->info.ingress_table = cpu_to_le32(table);
- ctxt->info.egress_table = cpu_to_le32(table);
- /* Have 1:1 UP mapping for outer to inner UP table */
- ctxt->info.outer_up_table = cpu_to_le32(table);
- /* No Outer tag support outer_tag_flags remains to zero */
-}
-
-/**
- * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
- * @ctxt: the VSI context being set
- * @vsi: the VSI being configured
- */
-static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
-{
- u8 lut_type, hash_type;
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- /* PF VSI will inherit RSS instance of PF */
- lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
- hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
- break;
- default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
- return;
- }
-
- ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
- ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
- ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
-}
-
-/**
- * ice_vsi_add - Create a new VSI or fetch preallocated VSI
- * @vsi: the VSI being configured
- *
- * This initializes a VSI context depending on the VSI type to be added and
- * passes it down to the add_vsi aq command to create a new VSI.
- */
-static int ice_vsi_add(struct ice_vsi *vsi)
-{
- struct ice_vsi_ctx ctxt = { 0 };
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- int ret = 0;
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- ctxt.flags = ICE_AQ_VSI_TYPE_PF;
- break;
- default:
- return -ENODEV;
- }
-
- ice_set_dflt_vsi_ctx(&ctxt);
- /* if the switch is in VEB mode, allow VSI loopback */
- if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
- ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
-
- /* Set LUT type and HASH type if RSS is enabled */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_set_rss_vsi_ctx(&ctxt, vsi);
-
- ctxt.info.sw_id = vsi->port_info->sw_id;
- ice_vsi_setup_q_map(vsi, &ctxt);
-
- ret = ice_aq_add_vsi(hw, &ctxt, NULL);
- if (ret) {
- dev_err(&vsi->back->pdev->dev,
- "Add VSI AQ call failed, err %d\n", ret);
- return -EIO;
- }
- vsi->info = ctxt.info;
- vsi->vsi_num = ctxt.vsi_num;
-
- return ret;
-}
-
-/**
- * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
- * @vsi: the VSI being cleaned up
- */
-static void ice_vsi_release_msix(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- u16 vector = vsi->base_vector;
- struct ice_hw *hw = &pf->hw;
- u32 txq = 0;
- u32 rxq = 0;
- int i, q;
-
- for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
- struct ice_q_vector *q_vector = vsi->q_vectors[i];
-
- wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
- wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
- for (q = 0; q < q_vector->num_ring_tx; q++) {
- wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
- txq++;
- }
-
- for (q = 0; q < q_vector->num_ring_rx; q++) {
- wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
- rxq++;
- }
- }
-
- ice_flush(hw);
-}
-
-/**
- * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
- * @vsi: the VSI having rings deallocated
- */
-static void ice_vsi_clear_rings(struct ice_vsi *vsi)
-{
- int i;
-
- if (vsi->tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++) {
- if (vsi->tx_rings[i]) {
- kfree_rcu(vsi->tx_rings[i], rcu);
- vsi->tx_rings[i] = NULL;
- }
- }
- }
- if (vsi->rx_rings) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
- if (vsi->rx_rings[i]) {
- kfree_rcu(vsi->rx_rings[i], rcu);
- vsi->rx_rings[i] = NULL;
- }
- }
- }
-}
-
-/**
- * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
- * @vsi: VSI which is having rings allocated
- */
-static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int i;
-
- /* Allocate tx_rings */
- for (i = 0; i < vsi->alloc_txq; i++) {
- struct ice_ring *ring;
-
- /* allocate with kzalloc(), free with kfree_rcu() */
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
-
- if (!ring)
- goto err_out;
-
- ring->q_index = i;
- ring->reg_idx = vsi->txq_map[i];
- ring->ring_active = false;
- ring->vsi = vsi;
- ring->netdev = vsi->netdev;
- ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
-
- vsi->tx_rings[i] = ring;
- }
-
- /* Allocate rx_rings */
- for (i = 0; i < vsi->alloc_rxq; i++) {
- struct ice_ring *ring;
-
- /* allocate with kzalloc(), free with kfree_rcu() */
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- goto err_out;
-
- ring->q_index = i;
- ring->reg_idx = vsi->rxq_map[i];
- ring->ring_active = false;
- ring->vsi = vsi;
- ring->netdev = vsi->netdev;
- ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
- vsi->rx_rings[i] = ring;
- }
-
- return 0;
-
-err_out:
- ice_vsi_clear_rings(vsi);
- return -ENOMEM;
-}
-
-/**
- * ice_vsi_free_irq - Free the irq association with the OS
- * @vsi: the VSI being configured
- */
-static void ice_vsi_free_irq(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int base = vsi->base_vector;
-
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- int i;
-
- if (!vsi->q_vectors || !vsi->irqs_ready)
- return;
-
- vsi->irqs_ready = false;
- for (i = 0; i < vsi->num_q_vectors; i++) {
- u16 vector = i + base;
- int irq_num;
-
- irq_num = pf->msix_entries[vector].vector;
-
- /* free only the irqs that were actually requested */
- if (!vsi->q_vectors[i] ||
- !(vsi->q_vectors[i]->num_ring_tx ||
- vsi->q_vectors[i]->num_ring_rx))
- continue;
-
- /* clear the affinity notifier in the IRQ descriptor */
- irq_set_affinity_notifier(irq_num, NULL);
-
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
- synchronize_irq(irq_num);
- devm_free_irq(&pf->pdev->dev, irq_num,
- vsi->q_vectors[i]);
- }
- ice_vsi_release_msix(vsi);
- }
-}
-
-/**
- * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
- * @vsi: the VSI being configured
- */
-static void ice_vsi_cfg_msix(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- u16 vector = vsi->base_vector;
- struct ice_hw *hw = &pf->hw;
- u32 txq = 0, rxq = 0;
- int i, q, itr;
- u8 itr_gran;
-
- for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
- struct ice_q_vector *q_vector = vsi->q_vectors[i];
-
- itr_gran = hw->itr_gran_200;
-
- if (q_vector->num_ring_rx) {
- q_vector->rx.itr =
- ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,
- itr_gran);
- q_vector->rx.latency_range = ICE_LOW_LATENCY;
- }
-
- if (q_vector->num_ring_tx) {
- q_vector->tx.itr =
- ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,
- itr_gran);
- q_vector->tx.latency_range = ICE_LOW_LATENCY;
- }
- wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
- wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
-
- /* Both Transmit Queue Interrupt Cause Control register
- * and Receive Queue Interrupt Cause control register
- * expects MSIX_INDX field to be the vector index
- * within the function space and not the absolute
- * vector index across PF or across device.
- * For SR-IOV VF VSIs queue vector index always starts
- * with 1 since first vector index(0) is used for OICR
- * in VF space. Since VMDq and other PF VSIs are withtin
- * the PF function space, use the vector index thats
- * tracked for this PF.
- */
- for (q = 0; q < q_vector->num_ring_tx; q++) {
- u32 val;
-
- itr = ICE_TX_ITR;
- val = QINT_TQCTL_CAUSE_ENA_M |
- (itr << QINT_TQCTL_ITR_INDX_S) |
- (vector << QINT_TQCTL_MSIX_INDX_S);
- wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
- txq++;
- }
-
- for (q = 0; q < q_vector->num_ring_rx; q++) {
- u32 val;
-
- itr = ICE_RX_ITR;
- val = QINT_RQCTL_CAUSE_ENA_M |
- (itr << QINT_RQCTL_ITR_INDX_S) |
- (vector << QINT_RQCTL_MSIX_INDX_S);
- wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
- rxq++;
- }
- }
-
- ice_flush(hw);
-}
-
-/**
* ice_ena_misc_vector - enable the non-queue interrupts
* @pf: board private structure
*/
@@ -1708,13 +1286,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M |
+ PFINT_OICR_VFLR_M |
PFINT_OICR_HMC_ERR_M |
PFINT_OICR_PE_CRITERR_M);
wr32(hw, PFINT_OICR_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
+ wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
}
@@ -1731,12 +1310,23 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
u32 oicr, ena_mask;
set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
+ set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
+ if (oicr & PFINT_OICR_MAL_DETECT_M) {
+ ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
+ set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
+ }
+ if (oicr & PFINT_OICR_VFLR_M) {
+ ena_mask &= ~PFINT_OICR_VFLR_M;
+ set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ }
+
if (oicr & PFINT_OICR_GRST_M) {
u32 reset;
+
/* we have a reset warning */
ena_mask &= ~PFINT_OICR_GRST_M;
reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
@@ -1746,15 +1336,18 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
pf->corer_count++;
else if (reset == ICE_RESET_GLOBR)
pf->globr_count++;
- else
+ else if (reset == ICE_RESET_EMPR)
pf->empr_count++;
+ else
+ dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n",
+ reset);
/* If a reset cycle isn't already in progress, we set a bit in
* pf->state so that the service task can start a reset/rebuild.
* We also make note of which reset happened so that peer
* devices/drivers can be informed.
*/
- if (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) {
+ if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
if (reset == ICE_RESET_CORER)
set_bit(__ICE_CORER_RECV, pf->state);
else if (reset == ICE_RESET_GLOBR)
@@ -1762,7 +1355,20 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
else
set_bit(__ICE_EMPR_RECV, pf->state);
- set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ /* There are couple of different bits at play here.
+ * hw->reset_ongoing indicates whether the hardware is
+ * in reset. This is set to true when a reset interrupt
+ * is received and set back to false after the driver
+ * has determined that the hardware is out of reset.
+ *
+ * __ICE_RESET_OICR_RECV in pf->state indicates
+ * that a post reset rebuild is required before the
+ * driver is operational again. This is set above.
+ *
+ * As this is the start of the reset/rebuild cycle, set
+ * both to indicate that.
+ */
+ hw->reset_ongoing = true;
}
}
@@ -1803,208 +1409,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
/**
- * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
- * @vsi: the VSI being configured
- *
- * This function maps descriptor rings to the queue-specific vectors allotted
- * through the MSI-X enabling code. On a constrained vector budget, we map Tx
- * and Rx rings to the vector as "efficiently" as possible.
- */
-static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
-{
- int q_vectors = vsi->num_q_vectors;
- int tx_rings_rem, rx_rings_rem;
- int v_id;
-
- /* initially assigning remaining rings count to VSIs num queue value */
- tx_rings_rem = vsi->num_txq;
- rx_rings_rem = vsi->num_rxq;
-
- for (v_id = 0; v_id < q_vectors; v_id++) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
- int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
-
- /* Tx rings mapping to vector */
- tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
- q_vector->num_ring_tx = tx_rings_per_v;
- q_vector->tx.ring = NULL;
- q_base = vsi->num_txq - tx_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
- struct ice_ring *tx_ring = vsi->tx_rings[q_id];
-
- tx_ring->q_vector = q_vector;
- tx_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = tx_ring;
- }
- tx_rings_rem -= tx_rings_per_v;
-
- /* Rx rings mapping to vector */
- rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
- q_vector->num_ring_rx = rx_rings_per_v;
- q_vector->rx.ring = NULL;
- q_base = vsi->num_rxq - rx_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
- struct ice_ring *rx_ring = vsi->rx_rings[q_id];
-
- rx_ring->q_vector = q_vector;
- rx_ring->next = q_vector->rx.ring;
- q_vector->rx.ring = rx_ring;
- }
- rx_rings_rem -= rx_rings_per_v;
- }
-}
-
-/**
- * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- */
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- vsi->alloc_txq = pf->num_lan_tx;
- vsi->alloc_rxq = pf->num_lan_rx;
- vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
- vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
- break;
- default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
- break;
- }
-}
-
-/**
- * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
- * @vsi: VSI pointer
- * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
- *
- * On error: returns error code (negative)
- * On success: returns 0
- */
-static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
-{
- struct ice_pf *pf = vsi->back;
-
- /* allocate memory for both Tx and Rx ring pointers */
- vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
- sizeof(struct ice_ring *), GFP_KERNEL);
- if (!vsi->tx_rings)
- goto err_txrings;
-
- vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
- sizeof(struct ice_ring *), GFP_KERNEL);
- if (!vsi->rx_rings)
- goto err_rxrings;
-
- if (alloc_qvectors) {
- /* allocate memory for q_vector pointers */
- vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
- vsi->num_q_vectors,
- sizeof(struct ice_q_vector *),
- GFP_KERNEL);
- if (!vsi->q_vectors)
- goto err_vectors;
- }
-
- return 0;
-
-err_vectors:
- devm_kfree(&pf->pdev->dev, vsi->rx_rings);
-err_rxrings:
- devm_kfree(&pf->pdev->dev, vsi->tx_rings);
-err_txrings:
- return -ENOMEM;
-}
-
-/**
- * ice_msix_clean_rings - MSIX mode Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a q_vector
- */
-static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
-{
- struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
-
- if (!q_vector->tx.ring && !q_vector->rx.ring)
- return IRQ_HANDLED;
-
- napi_schedule(&q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ice_vsi_alloc - Allocates the next available struct vsi in the PF
- * @pf: board private structure
- * @type: type of VSI
- *
- * returns a pointer to a VSI on success, NULL on failure.
- */
-static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
-{
- struct ice_vsi *vsi = NULL;
-
- /* Need to protect the allocation of the VSIs at the PF level */
- mutex_lock(&pf->sw_mutex);
-
- /* If we have already allocated our maximum number of VSIs,
- * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
- * is available to be populated
- */
- if (pf->next_vsi == ICE_NO_VSI) {
- dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
- goto unlock_pf;
- }
-
- vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
- if (!vsi)
- goto unlock_pf;
-
- vsi->type = type;
- vsi->back = pf;
- set_bit(__ICE_DOWN, vsi->state);
- vsi->idx = pf->next_vsi;
- vsi->work_lmt = ICE_DFLT_IRQ_WORK;
-
- ice_vsi_set_num_qs(vsi);
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- if (ice_vsi_alloc_arrays(vsi, true))
- goto err_rings;
-
- /* Setup default MSIX irq handler for VSI */
- vsi->irq_handler = ice_msix_clean_rings;
- break;
- default:
- dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
- goto unlock_pf;
- }
-
- /* fill VSI slot in the PF struct */
- pf->vsi[pf->next_vsi] = vsi;
-
- /* prepare pf->next_vsi for next use */
- pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
- pf->next_vsi);
- goto unlock_pf;
-
-err_rings:
- devm_kfree(&pf->pdev->dev, vsi);
- vsi = NULL;
-unlock_pf:
- mutex_unlock(&pf->sw_mutex);
- return vsi;
-}
-
-/**
* ice_free_irq_msix_misc - Unroll misc vector setup
* @pf: board private structure
*/
@@ -2015,12 +1419,15 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
ice_flush(&pf->hw);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
- synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
+ synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector);
devm_free_irq(&pf->pdev->dev,
- pf->msix_entries[pf->oicr_idx].vector, pf);
+ pf->msix_entries[pf->sw_oicr_idx].vector, pf);
}
- ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_sw_msix += 1;
+ ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_hw_msix += 1;
+ ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID);
}
/**
@@ -2047,42 +1454,61 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
* lost during reset. Note that this function is called only during
* rebuild path and not while reset is in progress.
*/
- if (ice_is_reset_recovery_pending(pf->state))
+ if (ice_is_reset_in_progress(pf->state))
goto skip_req_irq;
- /* reserve one vector in irq_tracker for misc interrupts */
- oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ /* reserve one vector in sw_irq_tracker for misc interrupts */
+ oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
if (oicr_idx < 0)
return oicr_idx;
- pf->oicr_idx = oicr_idx;
+ pf->num_avail_sw_msix -= 1;
+ pf->sw_oicr_idx = oicr_idx;
+
+ /* reserve one vector in hw_irq_tracker for misc interrupts */
+ oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ if (oicr_idx < 0) {
+ ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_sw_msix += 1;
+ return oicr_idx;
+ }
+ pf->num_avail_hw_msix -= 1;
+ pf->hw_oicr_idx = oicr_idx;
err = devm_request_irq(&pf->pdev->dev,
- pf->msix_entries[pf->oicr_idx].vector,
+ pf->msix_entries[pf->sw_oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
if (err) {
dev_err(&pf->pdev->dev,
"devm_request_irq for %s failed: %d\n",
pf->int_name, err);
- ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_sw_msix += 1;
+ ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ pf->num_avail_hw_msix += 1;
return err;
}
skip_req_irq:
ice_ena_misc_vector(pf);
- val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+ val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val);
/* This enables Admin queue Interrupt causes */
- val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+ val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
- itr_gran = hw->itr_gran_200;
+ /* This enables Mailbox queue Interrupt causes */
+ val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
+ PFINT_MBX_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_MBX_CTL, val);
- wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
+ itr_gran = hw->itr_gran;
+
+ wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
ITR_TO_REG(ICE_ITR_8K, itr_gran));
ice_flush(hw);
@@ -2092,209 +1518,43 @@ skip_req_irq:
}
/**
- * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
- * @vsi: the VSI getting queues
- *
- * Return 0 on success and a negative value on error
+ * ice_napi_del - Remove NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be removed
*/
-static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
+static void ice_napi_del(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
- int offset, ret = 0;
-
- mutex_lock(&pf->avail_q_mutex);
- /* look for contiguous block of queues for tx */
- offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
- 0, vsi->alloc_txq, 0);
- if (offset < ICE_MAX_TXQS) {
- int i;
-
- bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
- for (i = 0; i < vsi->alloc_txq; i++)
- vsi->txq_map[i] = i + offset;
- } else {
- ret = -ENOMEM;
- vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
- }
-
- /* look for contiguous block of queues for rx */
- offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
- 0, vsi->alloc_rxq, 0);
- if (offset < ICE_MAX_RXQS) {
- int i;
-
- bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
- for (i = 0; i < vsi->alloc_rxq; i++)
- vsi->rxq_map[i] = i + offset;
- } else {
- ret = -ENOMEM;
- vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
- }
- mutex_unlock(&pf->avail_q_mutex);
-
- return ret;
-}
-
-/**
- * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
- * @vsi: the VSI getting queues
- *
- * Return 0 on success and a negative value on error
- */
-static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int i, index = 0;
-
- mutex_lock(&pf->avail_q_mutex);
-
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
- for (i = 0; i < vsi->alloc_txq; i++) {
- index = find_next_zero_bit(pf->avail_txqs,
- ICE_MAX_TXQS, index);
- if (index < ICE_MAX_TXQS) {
- set_bit(index, pf->avail_txqs);
- vsi->txq_map[i] = index;
- } else {
- goto err_scatter_tx;
- }
- }
- }
-
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
- index = find_next_zero_bit(pf->avail_rxqs,
- ICE_MAX_RXQS, index);
- if (index < ICE_MAX_RXQS) {
- set_bit(index, pf->avail_rxqs);
- vsi->rxq_map[i] = index;
- } else {
- goto err_scatter_rx;
- }
- }
- }
-
- mutex_unlock(&pf->avail_q_mutex);
- return 0;
+ int v_idx;
-err_scatter_rx:
- /* unflag any queues we have grabbed (i is failed position) */
- for (index = 0; index < i; index++) {
- clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
- vsi->rxq_map[index] = 0;
- }
- i = vsi->alloc_txq;
-err_scatter_tx:
- /* i is either position of failed attempt or vsi->alloc_txq */
- for (index = 0; index < i; index++) {
- clear_bit(vsi->txq_map[index], pf->avail_txqs);
- vsi->txq_map[index] = 0;
- }
+ if (!vsi->netdev)
+ return;
- mutex_unlock(&pf->avail_q_mutex);
- return -ENOMEM;
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ netif_napi_del(&vsi->q_vectors[v_idx]->napi);
}
/**
- * ice_vsi_get_qs - Assign queues from PF to VSI
- * @vsi: the VSI to assign queues to
+ * ice_napi_add - register NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be registered
*
- * Returns 0 on success and a negative value on error
- */
-static int ice_vsi_get_qs(struct ice_vsi *vsi)
-{
- int ret = 0;
-
- vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
- vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
-
- /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping
- * modes individually to scatter if assigning contiguous queues
- * to rx or tx fails
- */
- ret = ice_vsi_get_qs_contig(vsi);
- if (ret < 0) {
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
- vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
- ICE_MAX_SCATTER_TXQS);
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
- vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
- ICE_MAX_SCATTER_RXQS);
- ret = ice_vsi_get_qs_scatter(vsi);
- }
-
- return ret;
-}
-
-/**
- * ice_vsi_put_qs - Release queues from VSI to PF
- * @vsi: the VSI thats going to release queues
- */
-static void ice_vsi_put_qs(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int i;
-
- mutex_lock(&pf->avail_q_mutex);
-
- for (i = 0; i < vsi->alloc_txq; i++) {
- clear_bit(vsi->txq_map[i], pf->avail_txqs);
- vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
- }
-
- for (i = 0; i < vsi->alloc_rxq; i++) {
- clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
- vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
- }
-
- mutex_unlock(&pf->avail_q_mutex);
-}
-
-/**
- * ice_free_q_vector - Free memory allocated for a specific interrupt vector
- * @vsi: VSI having the memory freed
- * @v_idx: index of the vector to be freed
+ * This function is only called in the driver's load path. Registering the NAPI
+ * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
+ * reset/rebuild, etc.)
*/
-static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
+static void ice_napi_add(struct ice_vsi *vsi)
{
- struct ice_q_vector *q_vector;
- struct ice_ring *ring;
+ int v_idx;
- if (!vsi->q_vectors[v_idx]) {
- dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
- v_idx);
+ if (!vsi->netdev)
return;
- }
- q_vector = vsi->q_vectors[v_idx];
-
- ice_for_each_ring(ring, q_vector->tx)
- ring->q_vector = NULL;
- ice_for_each_ring(ring, q_vector->rx)
- ring->q_vector = NULL;
-
- /* only VSI with an associated netdev is set up with NAPI */
- if (vsi->netdev)
- netif_napi_del(&q_vector->napi);
-
- devm_kfree(&vsi->back->pdev->dev, q_vector);
- vsi->q_vectors[v_idx] = NULL;
-}
-
-/**
- * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
- * @vsi: the VSI having memory freed
- */
-static void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
-{
- int v_idx;
for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
- ice_free_q_vector(vsi, v_idx);
+ netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
+ ice_napi_poll, NAPI_POLL_WEIGHT);
}
/**
- * ice_cfg_netdev - Setup the netdev flags
- * @vsi: the VSI being configured
+ * ice_cfg_netdev - Allocate, configure and register a netdev
+ * @vsi: the VSI associated with the new netdev
*
* Returns 0 on success, negative value on failure
*/
@@ -2307,6 +1567,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
struct ice_netdev_priv *np;
struct net_device *netdev;
u8 mac_addr[ETH_ALEN];
+ int err;
netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),
vsi->alloc_txq, vsi->alloc_rxq);
@@ -2364,195 +1625,14 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = ICE_MAX_MTU;
- return 0;
-}
-
-/**
- * ice_vsi_free_arrays - clean up vsi resources
- * @vsi: pointer to VSI being cleared
- * @free_qvectors: bool to specify if q_vectors should be deallocated
- */
-static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
-{
- struct ice_pf *pf = vsi->back;
-
- /* free the ring and vector containers */
- if (free_qvectors && vsi->q_vectors) {
- devm_kfree(&pf->pdev->dev, vsi->q_vectors);
- vsi->q_vectors = NULL;
- }
- if (vsi->tx_rings) {
- devm_kfree(&pf->pdev->dev, vsi->tx_rings);
- vsi->tx_rings = NULL;
- }
- if (vsi->rx_rings) {
- devm_kfree(&pf->pdev->dev, vsi->rx_rings);
- vsi->rx_rings = NULL;
- }
-}
-
-/**
- * ice_vsi_clear - clean up and deallocate the provided vsi
- * @vsi: pointer to VSI being cleared
- *
- * This deallocates the vsi's queue resources, removes it from the PF's
- * VSI array if necessary, and deallocates the VSI
- *
- * Returns 0 on success, negative on failure
- */
-static int ice_vsi_clear(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = NULL;
-
- if (!vsi)
- return 0;
-
- if (!vsi->back)
- return -EINVAL;
-
- pf = vsi->back;
-
- if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
- dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
- vsi->idx);
- return -EINVAL;
- }
-
- mutex_lock(&pf->sw_mutex);
- /* updates the PF for this cleared vsi */
-
- pf->vsi[vsi->idx] = NULL;
- if (vsi->idx < pf->next_vsi)
- pf->next_vsi = vsi->idx;
-
- ice_vsi_free_arrays(vsi, true);
- mutex_unlock(&pf->sw_mutex);
- devm_kfree(&pf->pdev->dev, vsi);
-
- return 0;
-}
-
-/**
- * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
- * @vsi: the VSI being configured
- * @v_idx: index of the vector in the vsi struct
- *
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
- */
-static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_q_vector *q_vector;
-
- /* allocate q_vector */
- q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
- if (!q_vector)
- return -ENOMEM;
-
- q_vector->vsi = vsi;
- q_vector->v_idx = v_idx;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
-
- if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
- /* tie q_vector and vsi together */
- vsi->q_vectors[v_idx] = q_vector;
-
- return 0;
-}
-
-/**
- * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
- * @vsi: the VSI being configured
- *
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
- */
-static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int v_idx = 0, num_q_vectors;
- int err;
-
- if (vsi->q_vectors[0]) {
- dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
- vsi->vsi_num);
- return -EEXIST;
- }
-
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- num_q_vectors = vsi->num_q_vectors;
- } else {
- err = -EINVAL;
- goto err_out;
- }
-
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = ice_vsi_alloc_q_vector(vsi, v_idx);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
-
- dev_err(&pf->pdev->dev,
- "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
-}
-
-/**
- * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
- * @vsi: ptr to the VSI
- *
- * This should only be called after ice_vsi_alloc() which allocates the
- * corresponding SW VSI structure and initializes num_queue_pairs for the
- * newly allocated VSI.
- *
- * Returns 0 on success or negative on failure
- */
-static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- int num_q_vectors = 0;
-
- if (vsi->base_vector) {
- dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
- vsi->vsi_num, vsi->base_vector);
- return -EEXIST;
- }
-
- if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- return -ENOENT;
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- num_q_vectors = vsi->num_q_vectors;
- break;
- default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
- break;
- }
+ err = register_netdev(vsi->netdev);
+ if (err)
+ return err;
- if (num_q_vectors)
- vsi->base_vector = ice_get_res(pf, pf->irq_tracker,
- num_q_vectors, vsi->idx);
+ netif_carrier_off(vsi->netdev);
- if (vsi->base_vector < 0) {
- dev_err(&pf->pdev->dev,
- "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
- num_q_vectors, vsi->vsi_num, vsi->base_vector);
- return -ENOENT;
- }
+ /* make sure transmit queues start off as stopped */
+ netif_tx_stop_all_queues(vsi->netdev);
return 0;
}
@@ -2572,327 +1652,17 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
}
/**
- * ice_vsi_cfg_rss - Configure RSS params for a VSI
- * @vsi: VSI to be configured
- */
-static int ice_vsi_cfg_rss(struct ice_vsi *vsi)
-{
- u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
- struct ice_aqc_get_set_rss_keys *key;
- struct ice_pf *pf = vsi->back;
- enum ice_status status;
- int err = 0;
- u8 *lut;
-
- vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
-
- lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
-
- if (vsi->rss_lut_user)
- memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
- else
- ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
-
- status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type,
- lut, vsi->rss_table_size);
-
- if (status) {
- dev_err(&vsi->back->pdev->dev,
- "set_rss_lut failed, error %d\n", status);
- err = -EIO;
- goto ice_vsi_cfg_rss_exit;
- }
-
- key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
- if (!key) {
- err = -ENOMEM;
- goto ice_vsi_cfg_rss_exit;
- }
-
- if (vsi->rss_hkey_user)
- memcpy(seed, vsi->rss_hkey_user,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
- else
- netdev_rss_key_fill((void *)seed,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
- memcpy(&key->standard_rss_key, seed,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
-
- status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key);
-
- if (status) {
- dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
- status);
- err = -EIO;
- }
-
- devm_kfree(&pf->pdev->dev, key);
-ice_vsi_cfg_rss_exit:
- devm_kfree(&pf->pdev->dev, lut);
- return err;
-}
-
-/**
- * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI
- * @vsi: pointer to the ice_vsi
- *
- * This reallocates the VSIs queue resources
- *
- * Returns 0 on success and negative value on failure
- */
-static int ice_vsi_reinit_setup(struct ice_vsi *vsi)
-{
- u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- int ret, i;
-
- if (!vsi)
- return -EINVAL;
-
- ice_vsi_free_q_vectors(vsi);
- ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
- vsi->base_vector = 0;
- ice_vsi_clear_rings(vsi);
- ice_vsi_free_arrays(vsi, false);
- ice_vsi_set_num_qs(vsi);
-
- /* Initialize VSI struct elements and create VSI in FW */
- ret = ice_vsi_add(vsi);
- if (ret < 0)
- goto err_vsi;
-
- ret = ice_vsi_alloc_arrays(vsi, false);
- if (ret < 0)
- goto err_vsi;
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- if (!vsi->netdev) {
- ret = ice_cfg_netdev(vsi);
- if (ret)
- goto err_rings;
-
- ret = register_netdev(vsi->netdev);
- if (ret)
- goto err_rings;
-
- netif_carrier_off(vsi->netdev);
- netif_tx_stop_all_queues(vsi->netdev);
- }
-
- ret = ice_vsi_alloc_q_vectors(vsi);
- if (ret)
- goto err_rings;
-
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_rings(vsi);
- if (ret)
- goto err_vectors;
-
- ice_vsi_map_rings_to_vectors(vsi);
- break;
- default:
- break;
- }
-
- ice_vsi_set_tc_cfg(vsi);
-
- /* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->num_txq;
-
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
- vsi->tc_cfg.ena_tc, max_txqs);
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Failed VSI lan queue config\n");
- goto err_vectors;
- }
- return 0;
-
-err_vectors:
- ice_vsi_free_q_vectors(vsi);
-err_rings:
- if (vsi->netdev) {
- vsi->current_netdev_flags = 0;
- unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
-err_vsi:
- ice_vsi_clear(vsi);
- set_bit(__ICE_RESET_FAILED, vsi->back->state);
- return ret;
-}
-
-/**
- * ice_vsi_setup - Set up a VSI by a given type
+ * ice_pf_vsi_setup - Set up a PF VSI
* @pf: board private structure
- * @type: VSI type
* @pi: pointer to the port_info instance
*
- * This allocates the sw VSI structure and its queue resources.
- *
- * Returns pointer to the successfully allocated and configure VSI sw struct on
- * success, otherwise returns NULL on failure.
+ * Returns pointer to the successfully allocated VSI sw struct on success,
+ * otherwise returns NULL on failure.
*/
static struct ice_vsi *
-ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type,
- struct ice_port_info *pi)
+ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- struct device *dev = &pf->pdev->dev;
- struct ice_vsi_ctx ctxt = { 0 };
- struct ice_vsi *vsi;
- int ret, i;
-
- vsi = ice_vsi_alloc(pf, type);
- if (!vsi) {
- dev_err(dev, "could not allocate VSI\n");
- return NULL;
- }
-
- vsi->port_info = pi;
- vsi->vsw = pf->first_sw;
-
- if (ice_vsi_get_qs(vsi)) {
- dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
- vsi->idx);
- goto err_get_qs;
- }
-
- /* set RSS capabilities */
- ice_vsi_set_rss_params(vsi);
-
- /* create the VSI */
- ret = ice_vsi_add(vsi);
- if (ret)
- goto err_vsi;
-
- ctxt.vsi_num = vsi->vsi_num;
-
- switch (vsi->type) {
- case ICE_VSI_PF:
- ret = ice_cfg_netdev(vsi);
- if (ret)
- goto err_cfg_netdev;
-
- ret = register_netdev(vsi->netdev);
- if (ret)
- goto err_register_netdev;
-
- netif_carrier_off(vsi->netdev);
-
- /* make sure transmit queues start off as stopped */
- netif_tx_stop_all_queues(vsi->netdev);
- ret = ice_vsi_alloc_q_vectors(vsi);
- if (ret)
- goto err_msix;
-
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_rings;
-
- ret = ice_vsi_alloc_rings(vsi);
- if (ret)
- goto err_rings;
-
- ice_vsi_map_rings_to_vectors(vsi);
-
- /* Do not exit if configuring RSS had an issue, at least
- * receive traffic on first queue. Hence no need to capture
- * return value
- */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_vsi_cfg_rss(vsi);
- break;
- default:
- /* if vsi type is not recognized, clean up the resources and
- * exit
- */
- goto err_rings;
- }
-
- ice_vsi_set_tc_cfg(vsi);
-
- /* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->num_txq;
-
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
- vsi->tc_cfg.ena_tc, max_txqs);
- if (ret) {
- dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
- goto err_rings;
- }
-
- return vsi;
-
-err_rings:
- ice_vsi_free_q_vectors(vsi);
-err_msix:
- if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
- unregister_netdev(vsi->netdev);
-err_register_netdev:
- if (vsi->netdev) {
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
-err_cfg_netdev:
- ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
- if (ret)
- dev_err(&vsi->back->pdev->dev,
- "Free VSI AQ call failed, err %d\n", ret);
-err_vsi:
- ice_vsi_put_qs(vsi);
-err_get_qs:
- pf->q_left_tx += vsi->alloc_txq;
- pf->q_left_rx += vsi->alloc_rxq;
- ice_vsi_clear(vsi);
-
- return NULL;
-}
-
-/**
- * ice_vsi_add_vlan - Add vsi membership for given vlan
- * @vsi: the vsi being configured
- * @vid: vlan id to be added
- */
-static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
-{
- struct ice_fltr_list_entry *tmp;
- struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
- enum ice_status status;
- int err = 0;
-
- tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.flag = ICE_FLTR_TX;
- tmp->fltr_info.src = vsi->vsi_num;
- tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
- tmp->fltr_info.l_data.vlan.vlan_id = vid;
-
- INIT_LIST_HEAD(&tmp->list_entry);
- list_add(&tmp->list_entry, &tmp_add_list);
-
- status = ice_add_vlan(&pf->hw, &tmp_add_list);
- if (status) {
- err = -ENODEV;
- dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
- vid, vsi->vsi_num);
- }
-
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
- return err;
+ return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
}
/**
@@ -2908,7 +1678,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- int ret = 0;
+ int ret;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -2919,6 +1689,13 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
if (vsi->info.pvid)
return -EINVAL;
+ /* Enable VLAN pruning when VLAN 0 is added */
+ if (unlikely(!vid)) {
+ ret = ice_cfg_vlan_pruning(vsi, true);
+ if (ret)
+ return ret;
+ }
+
/* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
@@ -2932,38 +1709,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
}
/**
- * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN id to be removed
- */
-static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
-{
- struct ice_fltr_list_entry *list;
- struct ice_pf *pf = vsi->back;
- LIST_HEAD(tmp_add_list);
-
- list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
- if (!list)
- return;
-
- list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
- list->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
- list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- list->fltr_info.l_data.vlan.vlan_id = vid;
- list->fltr_info.flag = ICE_FLTR_TX;
- list->fltr_info.src = vsi->vsi_num;
-
- INIT_LIST_HEAD(&list->list_entry);
- list_add(&list->list_entry, &tmp_add_list);
-
- if (ice_remove_vlan(&pf->hw, &tmp_add_list))
- dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
- vid, vsi->vsi_num);
-
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
-}
-
-/**
* ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
@@ -2976,19 +1721,25 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ int status;
if (vsi->info.pvid)
return -EINVAL;
- /* return code is ignored as there is nothing a user
- * can do about failure to remove and a log message was
- * already printed from the other function
+ /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
+ * information
*/
- ice_vsi_kill_vlan(vsi, vid);
+ status = ice_vsi_kill_vlan(vsi, vid);
+ if (status)
+ return status;
clear_bit(vid, vsi->active_vlans);
- return 0;
+ /* Disable VLAN pruning when VLAN 0 is removed */
+ if (unlikely(!vid))
+ status = ice_cfg_vlan_pruning(vsi, false);
+
+ return status;
}
/**
@@ -3004,59 +1755,73 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
struct ice_vsi *vsi;
int status = 0;
- if (!ice_is_reset_recovery_pending(pf->state)) {
- vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info);
- if (!vsi) {
- status = -ENOMEM;
- goto error_exit;
- }
- } else {
- vsi = pf->vsi[0];
- status = ice_vsi_reinit_setup(vsi);
- if (status < 0)
- return -EIO;
+ if (ice_is_reset_in_progress(pf->state))
+ return -EBUSY;
+
+ vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
+ if (!vsi) {
+ status = -ENOMEM;
+ goto unroll_vsi_setup;
}
- /* tmp_add_list contains a list of MAC addresses for which MAC
- * filters need to be programmed. Add the VSI's unicast MAC to
- * this list
+ status = ice_cfg_netdev(vsi);
+ if (status) {
+ status = -ENODEV;
+ goto unroll_vsi_setup;
+ }
+
+ /* registering the NAPI handler requires both the queues and
+ * netdev to be created, which are done in ice_pf_vsi_setup()
+ * and ice_cfg_netdev() respectively
+ */
+ ice_napi_add(vsi);
+
+ /* To add a MAC filter, first add the MAC to a list and then
+ * pass the list to ice_add_mac.
*/
+
+ /* Add a unicast MAC filter so the VSI can get its packets */
status = ice_add_mac_to_list(vsi, &tmp_add_list,
vsi->port_info->mac.perm_addr);
if (status)
- goto error_exit;
+ goto unroll_napi_add;
/* VSI needs to receive broadcast traffic, so add the broadcast
- * MAC address to the list.
+ * MAC address to the list as well.
*/
eth_broadcast_addr(broadcast);
status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
if (status)
- goto error_exit;
+ goto free_mac_list;
/* program MAC filters for entries in tmp_add_list */
status = ice_add_mac(&pf->hw, &tmp_add_list);
if (status) {
dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
status = -ENOMEM;
- goto error_exit;
+ goto free_mac_list;
}
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
return status;
-error_exit:
+free_mac_list:
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+unroll_napi_add:
if (vsi) {
- ice_vsi_free_q_vectors(vsi);
- if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
- unregister_netdev(vsi->netdev);
+ ice_napi_del(vsi);
if (vsi->netdev) {
+ if (vsi->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
+ }
+unroll_vsi_setup:
+ if (vsi) {
+ ice_vsi_free_q_vectors(vsi);
ice_vsi_delete(vsi);
ice_vsi_put_qs(vsi);
pf->q_left_tx += vsi->alloc_txq;
@@ -3097,10 +1862,7 @@ static void ice_determine_q_usage(struct ice_pf *pf)
*/
static void ice_deinit_pf(struct ice_pf *pf)
{
- if (pf->serv_tmr.function)
- del_timer_sync(&pf->serv_tmr);
- if (pf->serv_task.func)
- cancel_work_sync(&pf->serv_task);
+ ice_service_task_stop(pf);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->avail_q_mutex);
}
@@ -3113,6 +1875,15 @@ static void ice_init_pf(struct ice_pf *pf)
{
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+#ifdef CONFIG_PCI_IOV
+ if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
+ struct ice_hw *hw = &pf->hw;
+
+ set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
+ pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs,
+ ICE_MAX_VF_COUNT);
+ }
+#endif /* CONFIG_PCI_IOV */
mutex_init(&pf->sw_mutex);
mutex_init(&pf->avail_q_mutex);
@@ -3155,6 +1926,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
/* reserve vectors for LAN traffic */
pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
v_budget += pf->num_lan_msix;
+ v_left -= pf->num_lan_msix;
pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
@@ -3182,10 +1954,11 @@ static int ice_ena_msix_range(struct ice_pf *pf)
"not enough vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
if (v_actual >= (pf->num_lan_msix + 1)) {
- pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1);
+ pf->num_avail_sw_msix = v_actual -
+ (pf->num_lan_msix + 1);
} else if (v_actual >= 2) {
pf->num_lan_msix = 1;
- pf->num_avail_msix = v_actual - 2;
+ pf->num_avail_sw_msix = v_actual - 2;
} else {
pci_disable_msix(pf->pdev);
err = -ERANGE;
@@ -3218,12 +1991,32 @@ static void ice_dis_msix(struct ice_pf *pf)
}
/**
+ * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
+ * @pf: board private structure
+ */
+static void ice_clear_interrupt_scheme(struct ice_pf *pf)
+{
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ ice_dis_msix(pf);
+
+ if (pf->sw_irq_tracker) {
+ devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker);
+ pf->sw_irq_tracker = NULL;
+ }
+
+ if (pf->hw_irq_tracker) {
+ devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker);
+ pf->hw_irq_tracker = NULL;
+ }
+}
+
+/**
* ice_init_interrupt_scheme - Determine proper interrupt scheme
* @pf: board private structure to initialize
*/
static int ice_init_interrupt_scheme(struct ice_pf *pf)
{
- int vectors = 0;
+ int vectors = 0, hw_vectors = 0;
ssize_t size;
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
@@ -3237,30 +2030,31 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
/* set up vector assignment tracking */
size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
- pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
- if (!pf->irq_tracker) {
+ pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ if (!pf->sw_irq_tracker) {
ice_dis_msix(pf);
return -ENOMEM;
}
- pf->irq_tracker->num_entries = vectors;
+ /* populate SW interrupts pool with number of OS granted IRQs. */
+ pf->num_avail_sw_msix = vectors;
+ pf->sw_irq_tracker->num_entries = vectors;
- return 0;
-}
+ /* set up HW vector assignment tracking */
+ hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+ size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors);
-/**
- * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
- * @pf: board private structure
- */
-static void ice_clear_interrupt_scheme(struct ice_pf *pf)
-{
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_dis_msix(pf);
-
- if (pf->irq_tracker) {
- devm_kfree(&pf->pdev->dev, pf->irq_tracker);
- pf->irq_tracker = NULL;
+ pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ if (!pf->hw_irq_tracker) {
+ ice_clear_interrupt_scheme(pf);
+ return -ENOMEM;
}
+
+ /* populate HW interrupts pool with number of HW supported irqs. */
+ pf->num_avail_hw_msix = hw_vectors;
+ pf->hw_irq_tracker->num_entries = hw_vectors;
+
+ return 0;
}
/**
@@ -3307,6 +2101,8 @@ static int ice_probe(struct pci_dev *pdev,
pf->pdev = pdev;
pci_set_drvdata(pdev, pf);
set_bit(__ICE_DOWN, pf->state);
+ /* Disable service task until DOWN bit is cleared */
+ set_bit(__ICE_SERVICE_DIS, pf->state);
hw = &pf->hw;
hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
@@ -3364,6 +2160,9 @@ static int ice_probe(struct pci_dev *pdev,
goto err_init_interrupt_unroll;
}
+ /* Driver is mostly up */
+ clear_bit(__ICE_DOWN, pf->state);
+
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
* the misc functionality and queue processing is combined in
@@ -3386,7 +2185,11 @@ static int ice_probe(struct pci_dev *pdev,
goto err_msix_misc_unroll;
}
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+ if (hw->evb_veb)
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
+
pf->first_sw->pf = pf;
/* record the sw_id available for later use */
@@ -3399,8 +2202,7 @@ static int ice_probe(struct pci_dev *pdev,
goto err_alloc_sw_unroll;
}
- /* Driver is mostly up */
- clear_bit(__ICE_DOWN, pf->state);
+ clear_bit(__ICE_SERVICE_DIS, pf->state);
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
@@ -3414,6 +2216,7 @@ static int ice_probe(struct pci_dev *pdev,
return 0;
err_alloc_sw_unroll:
+ set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
devm_kfree(&pf->pdev->dev, pf->first_sw);
err_msix_misc_unroll:
@@ -3436,25 +2239,23 @@ err_exit_unroll:
static void ice_remove(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
- int i = 0;
- int err;
+ int i;
if (!pf)
return;
set_bit(__ICE_DOWN, pf->state);
+ ice_service_task_stop(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
+ ice_free_vfs(pf);
+ ice_vsi_release_all(pf);
+ ice_free_irq_msix_misc(pf);
+ ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
-
- err = ice_vsi_release(pf->vsi[i]);
- if (err)
- dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n",
- i, err);
+ ice_vsi_free_q_vectors(pf->vsi[i]);
}
-
- ice_free_irq_msix_misc(pf);
ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
@@ -3473,8 +2274,6 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 },
/* required last entry */
{ 0, }
};
@@ -3485,6 +2284,7 @@ static struct pci_driver ice_driver = {
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
+ .sriov_configure = ice_sriov_configure,
};
/**
@@ -3500,7 +2300,7 @@ static int __init ice_module_init(void)
pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
pr_info("%s\n", ice_copyright);
- ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME);
+ ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
@@ -3562,7 +2362,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
}
if (test_bit(__ICE_DOWN, pf->state) ||
- ice_is_reset_recovery_pending(pf->state)) {
+ ice_is_reset_in_progress(pf->state)) {
netdev_err(netdev, "can't set mac %pM. device not ready\n",
mac);
return -EBUSY;
@@ -3722,78 +2522,6 @@ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
}
/**
- * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
- * @vsi: the vsi being changed
- */
-static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
-{
- struct device *dev = &vsi->back->pdev->dev;
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx ctxt = { 0 };
- enum ice_status status;
-
- /* Here we are configuring the VSI to let the driver add VLAN tags by
- * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
- * insertion happens in the Tx hot path, in ice_tx_map.
- */
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
-
- ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- ctxt.vsi_num = vsi->vsi_num;
-
- status = ice_aq_update_vsi(hw, &ctxt, NULL);
- if (status) {
- dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
- status, hw->adminq.sq_last_status);
- return -EIO;
- }
-
- vsi->info.vlan_flags = ctxt.info.vlan_flags;
- return 0;
-}
-
-/**
- * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
- * @vsi: the vsi being changed
- * @ena: boolean value indicating if this is a enable or disable request
- */
-static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
-{
- struct device *dev = &vsi->back->pdev->dev;
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx ctxt = { 0 };
- enum ice_status status;
-
- /* Here we are configuring what the VSI should do with the VLAN tag in
- * the Rx packet. We can either leave the tag in the packet or put it in
- * the Rx descriptor.
- */
- if (ena) {
- /* Strip VLAN tag from Rx packet and put it in the desc */
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
- } else {
- /* Disable stripping. Leave tag in packet */
- ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
- }
-
- /* Allow all packets untagged/tagged */
- ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
-
- ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- ctxt.vsi_num = vsi->vsi_num;
-
- status = ice_aq_update_vsi(hw, &ctxt, NULL);
- if (status) {
- dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n",
- ena, status, hw->adminq.sq_last_status);
- return -EIO;
- }
-
- vsi->info.vlan_flags = ctxt.info.vlan_flags;
- return 0;
-}
-
-/**
* ice_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -3805,6 +2533,12 @@ static int ice_set_features(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
int ret = 0;
+ if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
+ ret = ice_vsi_manage_rss_lut(vsi, true);
+ else if (!(features & NETIF_F_RXHASH) &&
+ netdev->features & NETIF_F_RXHASH)
+ ret = ice_vsi_manage_rss_lut(vsi, false);
+
if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
ret = ice_vsi_manage_vlan_stripping(vsi, true);
@@ -3863,248 +2597,6 @@ static int ice_restore_vlan(struct ice_vsi *vsi)
}
/**
- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
- * @ring: The Tx ring to configure
- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
- * @pf_q: queue index in the PF space
- *
- * Configure the Tx descriptor ring in TLAN context.
- */
-static void
-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
-{
- struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
-
- tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
-
- tlan_ctx->port_num = vsi->port_info->lport;
-
- /* Transmit Queue Length */
- tlan_ctx->qlen = ring->count;
-
- /* PF number */
- tlan_ctx->pf_num = hw->pf_id;
-
- /* queue belongs to a specific VSI type
- * VF / VM index should be programmed per vmvf_type setting:
- * for vmvf_type = VF, it is VF number between 0-256
- * for vmvf_type = VM, it is VM number between 0-767
- * for PF or EMP this field should be set to zero
- */
- switch (vsi->type) {
- case ICE_VSI_PF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
- break;
- default:
- return;
- }
-
- /* make sure the context is associated with the right VSI */
- tlan_ctx->src_vsi = vsi->vsi_num;
-
- tlan_ctx->tso_ena = ICE_TX_LEGACY;
- tlan_ctx->tso_qnum = pf_q;
-
- /* Legacy or Advanced Host Interface:
- * 0: Advanced Host Interface
- * 1: Legacy Host Interface
- */
- tlan_ctx->legacy_int = ICE_TX_LEGACY;
-}
-
-/**
- * ice_vsi_cfg_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-static int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
-{
- struct ice_aqc_add_tx_qgrp *qg_buf;
- struct ice_aqc_add_txqs_perq *txq;
- struct ice_pf *pf = vsi->back;
- enum ice_status status;
- u16 buf_len, i, pf_q;
- int err = 0, tc = 0;
- u8 num_q_grps;
-
- buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
- qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
- if (!qg_buf)
- return -ENOMEM;
-
- if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
- err = -EINVAL;
- goto err_cfg_txqs;
- }
- qg_buf->num_txqs = 1;
- num_q_grps = 1;
-
- /* set up and configure the tx queues */
- ice_for_each_txq(vsi, i) {
- struct ice_tlan_ctx tlan_ctx = { 0 };
-
- pf_q = vsi->txq_map[i];
- ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
- /* copy context contents into the qg_buf */
- qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
-
- /* init queue specific tail reg. It is referred as transmit
- * comm scheduler queue doorbell.
- */
- vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,
- num_q_grps, qg_buf, buf_len, NULL);
- if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Tx queue context, error: %d\n",
- status);
- err = -ENODEV;
- goto err_cfg_txqs;
- }
-
- /* Add Tx Queue TEID into the VSI tx ring from the response
- * This will complete configuring and enabling the queue.
- */
- txq = &qg_buf->txqs[0];
- if (pf_q == le16_to_cpu(txq->txq_id))
- vsi->tx_rings[i]->txq_teid =
- le32_to_cpu(txq->q_teid);
- }
-err_cfg_txqs:
- devm_kfree(&pf->pdev->dev, qg_buf);
- return err;
-}
-
-/**
- * ice_setup_rx_ctx - Configure a receive ring context
- * @ring: The Rx ring to configure
- *
- * Configure the Rx descriptor ring in RLAN context.
- */
-static int ice_setup_rx_ctx(struct ice_ring *ring)
-{
- struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
- u32 rxdid = ICE_RXDID_FLEX_NIC;
- struct ice_rlan_ctx rlan_ctx;
- u32 regval;
- u16 pf_q;
- int err;
-
- /* what is RX queue number in global space of 2K rx queues */
- pf_q = vsi->rxq_map[ring->q_index];
-
- /* clear the context structure first */
- memset(&rlan_ctx, 0, sizeof(rlan_ctx));
-
- rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
-
- rlan_ctx.qlen = ring->count;
-
- /* Receive Packet Data Buffer Size.
- * The Packet Data Buffer Size is defined in 128 byte units.
- */
- rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
-
- /* use 32 byte descriptors */
- rlan_ctx.dsize = 1;
-
- /* Strip the Ethernet CRC bytes before the packet is posted to host
- * memory.
- */
- rlan_ctx.crcstrip = 1;
-
- /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
- rlan_ctx.l2tsel = 1;
-
- rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
- rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
- rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
-
- /* This controls whether VLAN is stripped from inner headers
- * The VLAN in the inner L2 header is stripped to the receive
- * descriptor if enabled by this flag.
- */
- rlan_ctx.showiv = 0;
-
- /* Max packet size for this queue - must not be set to a larger value
- * than 5 x DBUF
- */
- rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
- ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
-
- /* Rx queue threshold in units of 64 */
- rlan_ctx.lrxqthresh = 1;
-
- /* Enable Flexible Descriptors in the queue context which
- * allows this driver to select a specific receive descriptor format
- */
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- /* increasing context priority to pick up profile id;
- * default is 0x01; setting to 0x03 to ensure profile
- * is programming if prev context is of same priority
- */
- regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
-
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
-
- /* Absolute queue number out of 2K needs to be passed */
- err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
- if (err) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
- pf_q, err);
- return -EIO;
- }
-
- /* init queue specific tail register */
- ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
- writel(0, ring->tail);
- ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
-
- return 0;
-}
-
-/**
- * ice_vsi_cfg_rxqs - Configure the VSI for Rx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Rx VSI for operation.
- */
-static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
-{
- int err = 0;
- u16 i;
-
- if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
- vsi->max_frame = vsi->netdev->mtu +
- ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- else
- vsi->max_frame = ICE_RXBUF_2048;
-
- vsi->rx_buf_len = ICE_RXBUF_2048;
- /* set up individual rings */
- for (i = 0; i < vsi->num_rxq && !err; i++)
- err = ice_setup_rx_ctx(vsi->rx_rings[i]);
-
- if (err) {
- dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
- return -EIO;
- }
- return err;
-}
-
-/**
* ice_vsi_cfg - Setup the VSI
* @vsi: the VSI being configured
*
@@ -4129,200 +2621,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
}
/**
- * ice_vsi_stop_tx_rings - Disable Tx rings
- * @vsi: the VSI being configured
- */
-static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- enum ice_status status;
- u32 *q_teids, val;
- u16 *q_ids, i;
- int err = 0;
-
- if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
- return -EINVAL;
-
- q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
- GFP_KERNEL);
- if (!q_teids)
- return -ENOMEM;
-
- q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
- GFP_KERNEL);
- if (!q_ids) {
- err = -ENOMEM;
- goto err_alloc_q_ids;
- }
-
- /* set up the tx queue list to be disabled */
- ice_for_each_txq(vsi, i) {
- u16 v_idx;
-
- if (!vsi->tx_rings || !vsi->tx_rings[i]) {
- err = -EINVAL;
- goto err_out;
- }
-
- q_ids[i] = vsi->txq_map[i];
- q_teids[i] = vsi->tx_rings[i]->txq_teid;
-
- /* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
-
- /* software is expected to wait for 100 ns */
- ndelay(100);
-
- /* trigger a software interrupt for the vector associated to
- * the queue to schedule napi handler
- */
- v_idx = vsi->tx_rings[i]->q_vector->v_idx;
- wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),
- GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
- }
- status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
- NULL);
- if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n",
- status);
- err = -ENODEV;
- }
-
-err_out:
- devm_kfree(&pf->pdev->dev, q_ids);
-
-err_alloc_q_ids:
- devm_kfree(&pf->pdev->dev, q_teids);
-
- return err;
-}
-
-/**
- * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
- * @pf: the PF being configured
- * @pf_q: the PF queue
- * @ena: enable or disable state of the queue
- *
- * This routine will wait for the given Rx queue of the PF to reach the
- * enabled or disabled state.
- * Returns -ETIMEDOUT in case of failing to reach the requested state after
- * multiple retries; else will return 0 in case of success.
- */
-static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
-{
- int i;
-
- for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
- u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
-
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- break;
-
- usleep_range(10, 20);
- }
- if (i >= ICE_Q_WAIT_RETRY_LIMIT)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-/**
- * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings
- * @vsi: the VSI being configured
- * @ena: start or stop the rx rings
- */
-static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- int i, j, ret = 0;
-
- for (i = 0; i < vsi->num_rxq; i++) {
- int pf_q = vsi->rxq_map[i];
- u32 rx_reg;
-
- for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
- rx_reg = rd32(hw, QRX_CTRL(pf_q));
- if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
- ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
- break;
- usleep_range(1000, 2000);
- }
-
- /* Skip if the queue is already in the requested state */
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- continue;
-
- /* turn on/off the queue */
- if (ena)
- rx_reg |= QRX_CTRL_QENA_REQ_M;
- else
- rx_reg &= ~QRX_CTRL_QENA_REQ_M;
- wr32(hw, QRX_CTRL(pf_q), rx_reg);
-
- /* wait for the change to finish */
- ret = ice_pf_rxq_wait(pf, pf_q, ena);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "VSI idx %d Rx ring %d %sable timeout\n",
- vsi->idx, pf_q, (ena ? "en" : "dis"));
- break;
- }
- }
-
- return ret;
-}
-
-/**
- * ice_vsi_start_rx_rings - start VSI's rx rings
- * @vsi: the VSI whose rings are to be started
- *
- * Returns 0 on success and a negative value on error
- */
-static int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
-{
- return ice_vsi_ctrl_rx_rings(vsi, true);
-}
-
-/**
- * ice_vsi_stop_rx_rings - stop VSI's rx rings
- * @vsi: the VSI
- *
- * Returns 0 on success and a negative value on error
- */
-static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
-{
- return ice_vsi_ctrl_rx_rings(vsi, false);
-}
-
-/**
- * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings
- * @vsi: the VSI
- * Returns 0 on success and a negative value on error
- */
-static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi)
-{
- int err_tx, err_rx;
-
- err_tx = ice_vsi_stop_tx_rings(vsi);
- if (err_tx)
- dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n");
-
- err_rx = ice_vsi_stop_rx_rings(vsi);
- if (err_rx)
- dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n");
-
- if (err_tx || err_rx)
- return -EIO;
-
- return 0;
-}
-
-/**
* ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
* @vsi: the VSI being configured
*/
@@ -4419,122 +2717,6 @@ static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
}
/**
- * ice_stat_update40 - read 40 bit stat from the chip and update stat values
- * @hw: ptr to the hardware info
- * @hireg: high 32 bit HW register to read from
- * @loreg: low 32 bit HW register to read from
- * @prev_stat_loaded: bool to specify if previous stats are loaded
- * @prev_stat: ptr to previous loaded stat value
- * @cur_stat: ptr to current stat value
- */
-static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat,
- u64 *cur_stat)
-{
- u64 new_data;
-
- new_data = rd32(hw, loreg);
- new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
-
- /* device stats are not reset at PFR, they likely will not be zeroed
- * when the driver starts. So save the first values read and use them as
- * offsets to be subtracted from the raw values in order to report stats
- * that count from zero.
- */
- if (!prev_stat_loaded)
- *prev_stat = new_data;
- if (likely(new_data >= *prev_stat))
- *cur_stat = new_data - *prev_stat;
- else
- /* to manage the potential roll-over */
- *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
- *cur_stat &= 0xFFFFFFFFFFULL;
-}
-
-/**
- * ice_stat_update32 - read 32 bit stat from the chip and update stat values
- * @hw: ptr to the hardware info
- * @reg: HW register to read from
- * @prev_stat_loaded: bool to specify if previous stats are loaded
- * @prev_stat: ptr to previous loaded stat value
- * @cur_stat: ptr to current stat value
- */
-static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
- u64 *prev_stat, u64 *cur_stat)
-{
- u32 new_data;
-
- new_data = rd32(hw, reg);
-
- /* device stats are not reset at PFR, they likely will not be zeroed
- * when the driver starts. So save the first values read and use them as
- * offsets to be subtracted from the raw values in order to report stats
- * that count from zero.
- */
- if (!prev_stat_loaded)
- *prev_stat = new_data;
- if (likely(new_data >= *prev_stat))
- *cur_stat = new_data - *prev_stat;
- else
- /* to manage the potential roll-over */
- *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
-}
-
-/**
- * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
- * @vsi: the VSI to be updated
- */
-static void ice_update_eth_stats(struct ice_vsi *vsi)
-{
- struct ice_eth_stats *prev_es, *cur_es;
- struct ice_hw *hw = &vsi->back->hw;
- u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
-
- prev_es = &vsi->eth_stats_prev;
- cur_es = &vsi->eth_stats;
-
- ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_bytes,
- &cur_es->rx_bytes);
-
- ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_unicast,
- &cur_es->rx_unicast);
-
- ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_multicast,
- &cur_es->rx_multicast);
-
- ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
- &cur_es->rx_broadcast);
-
- ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
- &prev_es->rx_discards, &cur_es->rx_discards);
-
- ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_bytes,
- &cur_es->tx_bytes);
-
- ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_unicast,
- &cur_es->tx_unicast);
-
- ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_multicast,
- &cur_es->tx_multicast);
-
- ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
- &cur_es->tx_broadcast);
-
- ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
- &prev_es->tx_errors, &cur_es->tx_errors);
-
- vsi->stat_offsets_loaded = true;
-}
-
-/**
* ice_update_vsi_ring_stats - Update VSI stats counters
* @vsi: the VSI to be updated
*/
@@ -4806,30 +2988,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
stats->rx_length_errors = vsi_stats->rx_length_errors;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * ice_netpoll - polling "interrupt" handler
- * @netdev: network interface device structure
- *
- * Used by netconsole to send skbs without having to re-enable interrupts.
- * This is not called in the normal interrupt path.
- */
-static void ice_netpoll(struct net_device *netdev)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
- int i;
-
- if (test_bit(__ICE_DOWN, vsi->state) ||
- !test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- return;
-
- for (i = 0; i < vsi->num_q_vectors; i++)
- ice_msix_clean_rings(0, vsi->q_vectors[i]);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
/**
* ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
* @vsi: VSI having NAPI disabled
@@ -4851,7 +3009,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, err;
+ int i, tx_err, rx_err;
/* Caller of this function is expected to set the
* vsi->state __ICE_DOWN bit
@@ -4862,7 +3020,18 @@ int ice_down(struct ice_vsi *vsi)
}
ice_vsi_dis_irq(vsi);
- err = ice_vsi_stop_tx_rx_rings(vsi);
+ tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0);
+ if (tx_err)
+ netdev_err(vsi->netdev,
+ "Failed stop Tx rings, VSI %d error %d\n",
+ vsi->vsi_num, tx_err);
+
+ rx_err = ice_vsi_stop_rx_rings(vsi);
+ if (rx_err)
+ netdev_err(vsi->netdev,
+ "Failed stop Rx rings, VSI %d error %d\n",
+ vsi->vsi_num, rx_err);
+
ice_napi_disable_all(vsi);
ice_for_each_txq(vsi, i)
@@ -4871,10 +3040,14 @@ int ice_down(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (err)
- netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
+ if (tx_err || rx_err) {
+ netdev_err(vsi->netdev,
+ "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
- return err;
+ return -EIO;
+ }
+
+ return 0;
}
/**
@@ -4894,6 +3067,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
}
ice_for_each_txq(vsi, i) {
+ vsi->tx_rings[i]->netdev = vsi->netdev;
err = ice_setup_tx_ring(vsi->tx_rings[i]);
if (err)
break;
@@ -4919,6 +3093,7 @@ static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
ice_for_each_rxq(vsi, i) {
+ vsi->rx_rings[i]->netdev = vsi->netdev;
err = ice_setup_rx_ring(vsi->rx_rings[i]);
if (err)
break;
@@ -4946,38 +3121,6 @@ static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
}
/**
- * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
- * @vsi: the VSI having resources freed
- */
-static void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
-{
- int i;
-
- if (!vsi->tx_rings)
- return;
-
- ice_for_each_txq(vsi, i)
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
- ice_free_tx_ring(vsi->tx_rings[i]);
-}
-
-/**
- * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
- * @vsi: the VSI having resources freed
- */
-static void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
-{
- int i;
-
- if (!vsi->rx_rings)
- return;
-
- ice_for_each_rxq(vsi, i)
- if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
- ice_free_rx_ring(vsi->rx_rings[i]);
-}
-
-/**
* ice_vsi_open - Called when a network interface is made active
* @vsi: the VSI to open
*
@@ -5038,78 +3181,26 @@ err_setup_tx:
}
/**
- * ice_vsi_close - Shut down a VSI
- * @vsi: the VSI being shut down
- */
-static void ice_vsi_close(struct ice_vsi *vsi)
-{
- if (!test_and_set_bit(__ICE_DOWN, vsi->state))
- ice_down(vsi);
-
- ice_vsi_free_irq(vsi);
- ice_vsi_free_tx_rings(vsi);
- ice_vsi_free_rx_rings(vsi);
-}
-
-/**
- * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
- * @vsi: the VSI being removed
+ * ice_vsi_release_all - Delete all VSIs
+ * @pf: PF from which all VSIs are being removed
*/
-static void ice_rss_clean(struct ice_vsi *vsi)
+static void ice_vsi_release_all(struct ice_pf *pf)
{
- struct ice_pf *pf;
-
- pf = vsi->back;
-
- if (vsi->rss_hkey_user)
- devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
- if (vsi->rss_lut_user)
- devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
-}
+ int err, i;
-/**
- * ice_vsi_release - Delete a VSI and free its resources
- * @vsi: the VSI being removed
- *
- * Returns 0 on success or < 0 on error
- */
-static int ice_vsi_release(struct ice_vsi *vsi)
-{
- struct ice_pf *pf;
+ if (!pf->vsi)
+ return;
- if (!vsi->back)
- return -ENODEV;
- pf = vsi->back;
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (!pf->vsi[i])
+ continue;
- if (vsi->netdev) {
- unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
+ err = ice_vsi_release(pf->vsi[i]);
+ if (err)
+ dev_dbg(&pf->pdev->dev,
+ "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
+ i, err, pf->vsi[i]->vsi_num);
}
-
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_rss_clean(vsi);
-
- /* Disable VSI and free resources */
- ice_vsi_dis_irq(vsi);
- ice_vsi_close(vsi);
-
- /* reclaim interrupt vectors back to PF */
- ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_msix += vsi->num_q_vectors;
-
- ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num);
- ice_vsi_delete(vsi);
- ice_vsi_free_q_vectors(vsi);
- ice_vsi_clear_rings(vsi);
-
- ice_vsi_put_qs(vsi);
- pf->q_left_tx += vsi->alloc_txq;
- pf->q_left_rx += vsi->alloc_rxq;
-
- ice_vsi_clear(vsi);
-
- return 0;
}
/**
@@ -5123,28 +3214,37 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
set_bit(__ICE_NEEDS_RESTART, vsi->state);
- if (vsi->netdev && netif_running(vsi->netdev) &&
- vsi->type == ICE_VSI_PF)
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
-
- ice_vsi_close(vsi);
+ if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
+ rtnl_lock();
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ ice_vsi_close(vsi);
+ }
+ }
}
/**
* ice_ena_vsi - resume a VSI
* @vsi: the VSI being resume
*/
-static void ice_ena_vsi(struct ice_vsi *vsi)
+static int ice_ena_vsi(struct ice_vsi *vsi)
{
- if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state))
- return;
+ int err = 0;
+
+ if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
+ vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
+ rtnl_lock();
+ err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ err = ice_vsi_open(vsi);
+ }
+ }
- if (vsi->netdev && netif_running(vsi->netdev))
- vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
- else if (ice_vsi_open(vsi))
- /* this clears the DOWN bit */
- dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n",
- vsi->vsi_num, vsi->vsw->sw_id);
+ return err;
}
/**
@@ -5164,13 +3264,89 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf)
* ice_pf_ena_all_vsi - Resume all VSIs on a PF
* @pf: the PF
*/
-static void ice_pf_ena_all_vsi(struct ice_pf *pf)
+static int ice_pf_ena_all_vsi(struct ice_pf *pf)
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
- ice_ena_vsi(pf->vsi[v]);
+ if (ice_ena_vsi(pf->vsi[v]))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_rebuild_all - rebuild all VSIs in pf
+ * @pf: the PF
+ */
+static int ice_vsi_rebuild_all(struct ice_pf *pf)
+{
+ int i;
+
+ /* loop through pf->vsi array and reinit the VSI if found */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ int err;
+
+ if (!pf->vsi[i])
+ continue;
+
+ /* VF VSI rebuild isn't supported yet */
+ if (pf->vsi[i]->type == ICE_VSI_VF)
+ continue;
+
+ err = ice_vsi_rebuild(pf->vsi[i]);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "VSI at index %d rebuild failed\n",
+ pf->vsi[i]->idx);
+ return err;
+ }
+
+ dev_info(&pf->pdev->dev,
+ "VSI at index %d rebuilt. vsi_num = 0x%x\n",
+ pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vsi_replay_all - replay all VSIs configuration in the PF
+ * @pf: the PF
+ */
+static int ice_vsi_replay_all(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status ret;
+ int i;
+
+ /* loop through pf->vsi array and replay the VSI if found */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (!pf->vsi[i])
+ continue;
+
+ ret = ice_replay_vsi(hw, pf->vsi[i]->idx);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "VSI at index %d replay failed %d\n",
+ pf->vsi[i]->idx, ret);
+ return -EIO;
+ }
+
+ /* Re-map HW VSI number, using VSI handle that has been
+ * previously validated in ice_replay_vsi() call above
+ */
+ pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx);
+
+ dev_info(&pf->pdev->dev,
+ "VSI at index %d filter replayed successfully - vsi_num %i\n",
+ pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
+ }
+
+ /* Clean up replay filter after successful re-configuration */
+ ice_replay_post(hw);
+ return 0;
}
/**
@@ -5192,13 +3368,13 @@ static void ice_rebuild(struct ice_pf *pf)
ret = ice_init_all_ctrlq(hw);
if (ret) {
dev_err(dev, "control queues init failed %d\n", ret);
- goto fail_reset;
+ goto err_init_ctrlq;
}
ret = ice_clear_pf_cfg(hw);
if (ret) {
dev_err(dev, "clear PF configuration failed %d\n", ret);
- goto fail_reset;
+ goto err_init_ctrlq;
}
ice_clear_pxe_mode(hw);
@@ -5206,14 +3382,34 @@ static void ice_rebuild(struct ice_pf *pf)
ret = ice_get_caps(hw);
if (ret) {
dev_err(dev, "ice_get_caps failed %d\n", ret);
- goto fail_reset;
+ goto err_init_ctrlq;
}
- /* basic nic switch setup */
- err = ice_setup_pf_sw(pf);
+ err = ice_sched_init_port(hw->port_info);
+ if (err)
+ goto err_sched_init_port;
+
+ /* reset search_hint of irq_trackers to 0 since interrupts are
+ * reclaimed and could be allocated from beginning during VSI rebuild
+ */
+ pf->sw_irq_tracker->search_hint = 0;
+ pf->hw_irq_tracker->search_hint = 0;
+
+ err = ice_vsi_rebuild_all(pf);
if (err) {
- dev_err(dev, "ice_setup_pf_sw failed\n");
- goto fail_reset;
+ dev_err(dev, "ice_vsi_rebuild_all failed\n");
+ goto err_vsi_rebuild;
+ }
+
+ err = ice_update_link_info(hw->port_info);
+ if (err)
+ dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
+
+ /* Replay all VSIs Configuration, including filters after reset */
+ if (ice_vsi_replay_all(pf)) {
+ dev_err(&pf->pdev->dev,
+ "error replaying VSI configurations with switch filter rules\n");
+ goto err_vsi_rebuild;
}
/* start misc vector */
@@ -5221,20 +3417,36 @@ static void ice_rebuild(struct ice_pf *pf)
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "misc vector setup failed: %d\n", err);
- goto fail_reset;
+ goto err_vsi_rebuild;
}
}
/* restart the VSIs that were rebuilt and running before the reset */
- ice_pf_ena_all_vsi(pf);
+ err = ice_pf_ena_all_vsi(pf);
+ if (err) {
+ dev_err(&pf->pdev->dev, "error enabling VSIs\n");
+ /* no need to disable VSIs in tear down path in ice_rebuild()
+ * since its already taken care in ice_vsi_open()
+ */
+ goto err_vsi_rebuild;
+ }
+ ice_reset_all_vfs(pf, true);
+ /* if we get here, reset flow is successful */
+ clear_bit(__ICE_RESET_FAILED, pf->state);
return;
-fail_reset:
+err_vsi_rebuild:
+ ice_vsi_release_all(pf);
+err_sched_init_port:
+ ice_sched_cleanup_all(hw);
+err_init_ctrlq:
ice_shutdown_all_ctrlq(hw);
set_bit(__ICE_RESET_FAILED, pf->state);
clear_recovery:
- set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ /* set this bit in PF state to control service task scheduling */
+ set_bit(__ICE_NEEDS_RESTART, pf->state);
+ dev_err(dev, "Rebuild failed, unload and reload driver\n");
}
/**
@@ -5267,7 +3479,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
/* if a reset is in progress, wait for some time for it to complete */
do {
- if (ice_is_reset_recovery_pending(pf->state)) {
+ if (ice_is_reset_in_progress(pf->state)) {
count++;
usleep_range(1000, 2000);
} else {
@@ -5323,7 +3535,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
struct ice_aqc_get_set_rss_keys *buf =
(struct ice_aqc_get_set_rss_keys *)seed;
- status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf);
+ status = ice_aq_set_rss_key(hw, vsi->idx, buf);
if (status) {
dev_err(&pf->pdev->dev,
@@ -5334,8 +3546,8 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
}
if (lut) {
- status = ice_aq_set_rss_lut(hw, vsi->vsi_num,
- vsi->rss_lut_type, lut, lut_size);
+ status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
+ lut, lut_size);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot set RSS lut, err %d aq_err %d\n",
@@ -5366,7 +3578,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
struct ice_aqc_get_set_rss_keys *buf =
(struct ice_aqc_get_set_rss_keys *)seed;
- status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf);
+ status = ice_aq_get_rss_key(hw, vsi->idx, buf);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot get RSS key, err %d aq_err %d\n",
@@ -5376,8 +3588,8 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
}
if (lut) {
- status = ice_aq_get_rss_lut(hw, vsi->vsi_num,
- vsi->rss_lut_type, lut, lut_size);
+ status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
+ lut, lut_size);
if (status) {
dev_err(&pf->pdev->dev,
"Cannot get RSS lut, err %d aq_err %d\n",
@@ -5390,6 +3602,232 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
}
/**
+ * ice_bridge_getlink - Get the hardware bridge mode
+ * @skb: skb buff
+ * @pid: process id
+ * @seq: RTNL message seq
+ * @dev: the netdev being configured
+ * @filter_mask: filter mask passed in
+ * @nlflags: netlink flags passed in
+ *
+ * Return the bridge mode (VEB/VEPA)
+ */
+static int
+ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask, int nlflags)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u16 bmode;
+
+ bmode = pf->first_sw->bridge_mode;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
+ filter_mask, NULL);
+}
+
+/**
+ * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
+ * @vsi: Pointer to VSI structure
+ * @bmode: Hardware bridge mode (VEB/VEPA)
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_aqc_vsi_props *vsi_props;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ vsi_props = &vsi->info;
+ ctxt.info = vsi->info;
+
+ if (bmode == BRIDGE_MODE_VEB)
+ /* change from VEPA to VEB mode */
+ ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+ else
+ /* change from VEB to VEPA mode */
+ ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
+ bmode, status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+ /* Update sw flags for book keeping */
+ vsi_props->sw_flags = ctxt.info.sw_flags;
+
+ return 0;
+}
+
+/**
+ * ice_bridge_setlink - Set the hardware bridge mode
+ * @dev: the netdev being configured
+ * @nlh: RTNL message
+ * @flags: bridge setlink flags
+ *
+ * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
+ * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
+ * not already set for all VSIs connected to this switch. And also update the
+ * unicast switch filter rules for the corresponding switch of the netdev.
+ */
+static int
+ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 __always_unused flags)
+{
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ struct ice_pf *pf = np->vsi->back;
+ struct nlattr *attr, *br_spec;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ struct ice_sw *pf_sw;
+ int rem, v, err = 0;
+
+ pf_sw = pf->first_sw;
+ /* find the attribute in the netlink message */
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+ mode = nla_get_u16(attr);
+ if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
+ return -EINVAL;
+ /* Continue if bridge mode is not being flipped */
+ if (mode == pf_sw->bridge_mode)
+ continue;
+ /* Iterates through the PF VSI list and update the loopback
+ * mode of the VSI
+ */
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+ err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
+ if (err)
+ return err;
+ }
+
+ hw->evb_veb = (mode == BRIDGE_MODE_VEB);
+ /* Update the unicast switch filter rules for the corresponding
+ * switch of the netdev
+ */
+ status = ice_update_sw_rule_bridge_mode(hw);
+ if (status) {
+ netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n",
+ mode, status, hw->adminq.sq_last_status);
+ /* revert hw->evb_veb */
+ hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
+ return -EIO;
+ }
+
+ pf_sw->bridge_mode = mode;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void ice_tx_timeout(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_ring *tx_ring = NULL;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u32 head, val = 0, i;
+ int hung_queue = -1;
+
+ pf->tx_timeout_count++;
+
+ /* find the stopped queue the same way the stack does */
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ struct netdev_queue *q;
+ unsigned long trans_start;
+
+ q = netdev_get_tx_queue(netdev, i);
+ trans_start = q->trans_start;
+ if (netif_xmit_stopped(q) &&
+ time_after(jiffies,
+ (trans_start + netdev->watchdog_timeo))) {
+ hung_queue = i;
+ break;
+ }
+ }
+
+ if (i == netdev->num_tx_queues) {
+ netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
+ } else {
+ /* now that we have an index, find the tx_ring struct */
+ for (i = 0; i < vsi->num_txq; i++) {
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
+ if (hung_queue ==
+ vsi->tx_rings[i]->q_index) {
+ tx_ring = vsi->tx_rings[i];
+ break;
+ }
+ }
+ }
+ }
+
+ /* Reset recovery level if enough time has elapsed after last timeout.
+ * Also ensure no new reset action happens before next timeout period.
+ */
+ if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
+ pf->tx_timeout_recovery_level = 1;
+ else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
+ netdev->watchdog_timeo)))
+ return;
+
+ if (tx_ring) {
+ head = tx_ring->next_to_clean;
+ /* Read interrupt register */
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ val = rd32(&pf->hw,
+ GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
+ tx_ring->vsi->hw_base_vector));
+
+ netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+ vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
+ head, tx_ring->next_to_use,
+ readl(tx_ring->tail), val);
+ }
+
+ pf->tx_timeout_last_recovery = jiffies;
+ netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
+ pf->tx_timeout_recovery_level, hung_queue);
+
+ switch (pf->tx_timeout_recovery_level) {
+ case 1:
+ set_bit(__ICE_PFR_REQ, pf->state);
+ break;
+ case 2:
+ set_bit(__ICE_CORER_REQ, pf->state);
+ break;
+ case 3:
+ set_bit(__ICE_GLOBR_REQ, pf->state);
+ break;
+ default:
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
+ set_bit(__ICE_DOWN, pf->state);
+ set_bit(__ICE_NEEDS_RESTART, vsi->state);
+ set_bit(__ICE_SERVICE_DIS, pf->state);
+ break;
+ }
+
+ ice_service_task_schedule(pf);
+ pf->tx_timeout_recovery_level++;
+}
+
+/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -5407,6 +3845,11 @@ static int ice_open(struct net_device *netdev)
struct ice_vsi *vsi = np->vsi;
int err;
+ if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
+ netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
+ return -EIO;
+ }
+
netif_carrier_off(netdev);
err = ice_vsi_open(vsi);
@@ -5497,12 +3940,18 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ice_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
+ .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
+ .ndo_set_vf_mac = ice_set_vf_mac,
+ .ndo_get_vf_config = ice_get_vf_cfg,
+ .ndo_set_vf_trust = ice_set_vf_trust,
+ .ndo_set_vf_vlan = ice_set_vf_port_vlan,
+ .ndo_set_vf_link_state = ice_set_vf_link_state,
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
.ndo_set_features = ice_set_features,
+ .ndo_bridge_getlink = ice_bridge_getlink,
+ .ndo_bridge_setlink = ice_bridge_setlink,
.ndo_fdb_add = ice_fdb_add,
.ndo_fdb_del = ice_fdb_del,
+ .ndo_tx_timeout = ice_tx_timeout,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 295a8cd87fc1..3274c543283c 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -137,7 +137,7 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
if (hw->nvm.blank_nvm_mode)
return 0;
- return ice_acquire_res(hw, ICE_NVM_RES_ID, access);
+ return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index eeae199469b6..7cc8aa18a22b 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -17,7 +17,6 @@ ice_sched_add_root_node(struct ice_port_info *pi,
{
struct ice_sched_node *root;
struct ice_hw *hw;
- u16 max_children;
if (!pi)
return ICE_ERR_PARAM;
@@ -28,8 +27,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,
if (!root)
return ICE_ERR_NO_MEMORY;
- max_children = le16_to_cpu(hw->layer_info[0].max_children);
- root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
+ /* coverity[suspicious_sizeof] */
+ root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
sizeof(*root), GFP_KERNEL);
if (!root->children) {
devm_kfree(ice_hw_to_dev(hw), root);
@@ -86,6 +85,62 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
}
/**
+ * ice_aq_query_sched_elems - query scheduler elements
+ * @hw: pointer to the hw struct
+ * @elems_req: number of elements to query
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_ret: returns total number of elements returned
+ * @cd: pointer to command details structure or NULL
+ *
+ * Query scheduling elements (0x0404)
+ */
+static enum ice_status
+ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_get_elem *buf, u16 buf_size,
+ u16 *elems_ret, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_cfg_elem *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_update_elem;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems);
+ cmd->num_elem_req = cpu_to_le16(elems_req);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && elems_ret)
+ *elems_ret = le16_to_cpu(cmd->num_elem_resp);
+
+ return status;
+}
+
+/**
+ * ice_sched_query_elem - query element information from hw
+ * @hw: pointer to the hw struct
+ * @node_teid: node teid to be queried
+ * @buf: buffer to element information
+ *
+ * This function queries HW element information
+ */
+static enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf)
+{
+ u16 buf_size, num_elem_ret = 0;
+ enum ice_status status;
+
+ buf_size = sizeof(*buf);
+ memset(buf, 0, buf_size);
+ buf->generic[0].node_teid = cpu_to_le32(node_teid);
+ status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
+ NULL);
+ if (status || num_elem_ret != 1)
+ ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
+ return status;
+}
+
+/**
* ice_sched_add_node - Insert the Tx scheduler node in SW DB
* @pi: port information structure
* @layer: Scheduler layer of the node
@@ -98,9 +153,10 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_sched_node *parent;
+ struct ice_aqc_get_elem elem;
struct ice_sched_node *node;
+ enum ice_status status;
struct ice_hw *hw;
- u16 max_children;
if (!pi)
return ICE_ERR_PARAM;
@@ -117,12 +173,20 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
return ICE_ERR_PARAM;
}
+ /* query the current node information from FW before additing it
+ * to the SW DB
+ */
+ status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
+ if (status)
+ return status;
+
node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
if (!node)
return ICE_ERR_NO_MEMORY;
- max_children = le16_to_cpu(hw->layer_info[layer].max_children);
- if (max_children) {
- node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
+ if (hw->max_children[layer]) {
+ /* coverity[suspicious_sizeof] */
+ node->children = devm_kcalloc(ice_hw_to_dev(hw),
+ hw->max_children[layer],
sizeof(*node), GFP_KERNEL);
if (!node->children) {
devm_kfree(ice_hw_to_dev(hw), node);
@@ -134,7 +198,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node->parent = parent;
node->tx_sched_layer = layer;
parent->children[parent->num_children++] = node;
- memcpy(&node->info, info, sizeof(*info));
+ memcpy(&node->info, &elem.generic[0], sizeof(node->info));
return 0;
}
@@ -192,14 +256,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
+
buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes);
for (i = 0; i < num_nodes; i++)
buf->teid[i] = cpu_to_le32(node_teids[i]);
+
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
if (status || num_groups_removed != 1)
ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
+
devm_kfree(ice_hw_to_dev(hw), buf);
return status;
}
@@ -532,9 +599,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
{
struct ice_sched_agg_info *agg_info;
- struct ice_sched_vsi_info *vsi_elem;
struct ice_sched_agg_info *atmp;
- struct ice_sched_vsi_info *tmp;
struct ice_hw *hw;
if (!pi)
@@ -553,13 +618,6 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
}
}
- /* remove the vsi list */
- list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list,
- list_entry) {
- list_del(&vsi_elem->list_entry);
- devm_kfree(ice_hw_to_dev(hw), vsi_elem);
- }
-
if (pi->root) {
ice_free_sched_node(pi, pi->root);
pi->root = NULL;
@@ -592,13 +650,16 @@ static void ice_sched_clear_port(struct ice_port_info *pi)
*/
void ice_sched_cleanup_all(struct ice_hw *hw)
{
- if (!hw || !hw->port_info)
+ if (!hw)
return;
- if (hw->layer_info)
+ if (hw->layer_info) {
devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
+ hw->layer_info = NULL;
+ }
- ice_sched_clear_port(hw->port_info);
+ if (hw->port_info)
+ ice_sched_clear_port(hw->port_info);
hw->num_tx_sched_layers = 0;
hw->num_tx_sched_phys_layers = 0;
@@ -607,31 +668,6 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
}
/**
- * ice_sched_create_vsi_info_entry - create an empty new VSI entry
- * @pi: port information structure
- * @vsi_id: VSI Id
- *
- * This function creates a new VSI entry and adds it to list
- */
-static struct ice_sched_vsi_info *
-ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
-{
- struct ice_sched_vsi_info *vsi_elem;
-
- if (!pi)
- return NULL;
-
- vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem),
- GFP_KERNEL);
- if (!vsi_elem)
- return NULL;
-
- list_add(&vsi_elem->list_entry, &pi->vsi_info_list);
- vsi_elem->vsi_id = vsi_id;
- return vsi_elem;
-}
-
-/**
* ice_sched_add_elems - add nodes to hw and SW DB
* @pi: port information structure
* @tc_node: pointer to the branch node
@@ -671,9 +707,13 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
ICE_AQC_ELEM_VALID_EIR;
buf->generic[i].data.generic = 0;
buf->generic[i].data.cir_bw.bw_profile_idx =
- ICE_SCHED_DFLT_RL_PROF_ID;
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->generic[i].data.cir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
buf->generic[i].data.eir_bw.bw_profile_idx =
- ICE_SCHED_DFLT_RL_PROF_ID;
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->generic[i].data.eir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
}
status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
@@ -697,7 +737,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
teid = le32_to_cpu(buf->generic[i].node_teid);
new_node = ice_sched_find_node_by_teid(parent, teid);
-
if (!new_node) {
ice_debug(hw, ICE_DBG_SCHED,
"Node is missing for teid =%d\n", teid);
@@ -710,7 +749,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/* add it to previous node sibling pointer */
/* Note: siblings are not linked across branches */
prev = ice_sched_get_first_node(hw, tc_node, layer);
-
if (prev && prev != new_node) {
while (prev->sibling)
prev = prev->sibling;
@@ -760,8 +798,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
return ICE_ERR_PARAM;
/* max children per node per layer */
- max_child_nodes =
- le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
+ max_child_nodes = hw->max_children[parent->tx_sched_layer];
/* current number of children + required nodes exceed max children ? */
if ((parent->num_children + num_nodes) > max_child_nodes) {
@@ -851,78 +888,6 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
}
/**
- * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer
- * @pi: pointer to the port info struct
- * @layer: layer number
- *
- * This function calculates the number of nodes present in the scheduler tree
- * including all the branches for a given layer
- */
-static u16
-ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer)
-{
- struct ice_hw *hw;
- u16 num_nodes = 0;
- u8 i;
-
- if (!pi)
- return num_nodes;
-
- hw = pi->hw;
-
- /* Calculate the number of nodes for all TCs */
- for (i = 0; i < pi->root->num_children; i++) {
- struct ice_sched_node *tc_node, *node;
-
- tc_node = pi->root->children[i];
-
- /* Get the first node */
- node = ice_sched_get_first_node(hw, tc_node, layer);
- if (!node)
- continue;
-
- /* count the siblings */
- while (node) {
- num_nodes++;
- node = node->sibling;
- }
- }
-
- return num_nodes;
-}
-
-/**
- * ice_sched_val_max_nodes - check max number of nodes reached or not
- * @pi: port information structure
- * @new_num_nodes_per_layer: pointer to the new number of nodes array
- *
- * This function checks whether the scheduler tree layers have enough space to
- * add new nodes
- */
-static enum ice_status
-ice_sched_validate_for_max_nodes(struct ice_port_info *pi,
- u16 *new_num_nodes_per_layer)
-{
- struct ice_hw *hw = pi->hw;
- u8 i, qg_layer;
- u16 num_nodes;
-
- qg_layer = ice_sched_get_qgrp_layer(hw);
-
- /* walk through all the layers from SW entry point to qgroup layer */
- for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) {
- num_nodes = ice_sched_get_num_nodes_per_layer(pi, i);
- if (num_nodes + new_num_nodes_per_layer[i] >
- le16_to_cpu(hw->layer_info[i].max_pf_nodes)) {
- ice_debug(hw, ICE_DBG_SCHED,
- "max nodes reached for layer = %d\n", i);
- return ICE_ERR_CFG;
- }
- }
- return 0;
-}
-
-/**
* ice_rm_dflt_leaf_node - remove the default leaf node in the tree
* @pi: port information structure
*
@@ -1003,14 +968,12 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
hw = pi->hw;
/* Query the Default Topology from FW */
- buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES,
- sizeof(*buf), GFP_KERNEL);
+ buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
/* Query default scheduling tree topology */
- status = ice_aq_get_dflt_topo(hw, pi->lport, buf,
- sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES,
+ status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
&num_branches, NULL);
if (status)
goto err_init_port;
@@ -1075,7 +1038,6 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
pi->port_state = ICE_SCHED_PORT_STATE_READY;
mutex_init(&pi->sched_lock);
INIT_LIST_HEAD(&pi->agg_list);
- INIT_LIST_HEAD(&pi->vsi_info_list);
err_init_port:
if (status && pi->root) {
@@ -1097,6 +1059,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
{
struct ice_aqc_query_txsched_res_resp *buf;
enum ice_status status = 0;
+ __le16 max_sibl;
+ u8 i;
if (hw->layer_info)
return status;
@@ -1115,7 +1079,20 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
hw->flattened_layers = buf->sched_props.flattening_bitmap;
hw->max_cgds = buf->sched_props.max_pf_cgds;
- hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
+ /* max sibling group size of current layer refers to the max children
+ * of the below layer node.
+ * layer 1 node max children will be layer 2 max sibling group size
+ * layer 2 node max children will be layer 3 max sibling group size
+ * and so on. This array will be populated from root (index 0) to
+ * qgroup layer 7. Leaf node has no children.
+ */
+ for (i = 0; i < hw->num_tx_sched_layers; i++) {
+ max_sibl = buf->layer_props[i].max_sibl_grp_sz;
+ hw->max_children[i] = le16_to_cpu(max_sibl);
+ }
+
+ hw->layer_info = (struct ice_aqc_layer_props *)
+ devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
(hw->num_tx_sched_layers *
sizeof(*hw->layer_info)),
GFP_KERNEL);
@@ -1130,27 +1107,6 @@ sched_query_out:
}
/**
- * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id
- * @pi: port information structure
- * @vsi_id: vsi id
- *
- * This function retrieves the vsi list for the given vsi id
- */
-static struct ice_sched_vsi_info *
-ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
-{
- struct ice_sched_vsi_info *list_elem;
-
- if (!pi)
- return NULL;
-
- list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry)
- if (list_elem->vsi_id == vsi_id)
- return list_elem;
- return NULL;
-}
-
-/**
* ice_sched_find_node_in_subtree - Find node in part of base node subtree
* @hw: pointer to the hw struct
* @base: pointer to the base node
@@ -1186,30 +1142,28 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
/**
* ice_sched_get_free_qparent - Get a free lan or rdma q group node
* @pi: port information structure
- * @vsi_id: vsi id
+ * @vsi_handle: software VSI handle
* @tc: branch number
* @owner: lan or rdma
*
* This function retrieves a free lan or rdma q group node
*/
struct ice_sched_node *
-ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner)
{
struct ice_sched_node *vsi_node, *qgrp_node = NULL;
- struct ice_sched_vsi_info *list_elem;
+ struct ice_vsi_ctx *vsi_ctx;
u16 max_children;
u8 qgrp_layer;
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
- max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children);
-
- list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);
- if (!list_elem)
- goto lan_q_exit;
-
- vsi_node = list_elem->vsi_node[tc];
+ max_children = pi->hw->max_children[qgrp_layer];
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return NULL;
+ vsi_node = vsi_ctx->sched.vsi_node[tc];
/* validate invalid VSI id */
if (!vsi_node)
goto lan_q_exit;
@@ -1233,14 +1187,14 @@ lan_q_exit:
* ice_sched_get_vsi_node - Get a VSI node based on VSI id
* @hw: pointer to the hw struct
* @tc_node: pointer to the TC node
- * @vsi_id: VSI id
+ * @vsi_handle: software VSI handle
*
* This function retrieves a VSI node for a given VSI id from a given
* TC branch
*/
static struct ice_sched_node *
ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
- u16 vsi_id)
+ u16 vsi_handle)
{
struct ice_sched_node *node;
u8 vsi_layer;
@@ -1250,7 +1204,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
/* Check whether it already exists */
while (node) {
- if (node->vsi_id == vsi_id)
+ if (node->vsi_handle == vsi_handle)
return node;
node = node->sibling;
}
@@ -1278,10 +1232,8 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
/* calculate num nodes from q group to VSI layer */
for (i = qgl; i > vsil; i--) {
- u16 max_children = le16_to_cpu(hw->layer_info[i].max_children);
-
/* round to the next integer if there is a remainder */
- num = DIV_ROUND_UP(num, max_children);
+ num = DIV_ROUND_UP(num, hw->max_children[i]);
/* need at least one node */
num_nodes[i] = num ? num : 1;
@@ -1291,7 +1243,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
/**
* ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
* @pi: port information structure
- * @vsi_id: VSI id
+ * @vsi_handle: software VSI handle
* @tc_node: pointer to the TC node
* @num_nodes: pointer to the num nodes that needs to be added per layer
* @owner: node owner (lan or rdma)
@@ -1300,7 +1252,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* lan and rdma separately.
*/
static enum ice_status
-ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
+ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes,
u8 owner)
{
@@ -1311,16 +1263,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
u16 num_added = 0;
u8 i, qgl, vsil;
- status = ice_sched_validate_for_max_nodes(pi, num_nodes);
- if (status)
- return status;
-
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
- parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
if (!parent)
return ICE_ERR_CFG;
+
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i],
&first_node_teid,
@@ -1398,8 +1347,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *node;
- u16 max_child;
- u8 i, vsil;
+ u8 vsil;
+ int i;
vsil = ice_sched_get_vsi_layer(hw);
for (i = vsil; i >= hw->sw_entry_point_layer; i--)
@@ -1412,12 +1361,10 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
/* If intermediate nodes are reached max children
* then add a new one.
*/
- node = ice_sched_get_first_node(hw, tc_node, i);
- max_child = le16_to_cpu(hw->layer_info[i].max_children);
-
+ node = ice_sched_get_first_node(hw, tc_node, (u8)i);
/* scan all the siblings */
while (node) {
- if (node->num_children < max_child)
+ if (node->num_children < hw->max_children[i])
break;
node = node->sibling;
}
@@ -1431,7 +1378,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
/**
* ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
@@ -1439,7 +1386,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
* VSI, its parent and intermediate nodes in below layers
*/
static enum ice_status
-ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
+ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *parent = tc_node;
@@ -1451,10 +1398,6 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
if (!pi)
return ICE_ERR_PARAM;
- status = ice_sched_validate_for_max_nodes(pi, num_nodes);
- if (status)
- return status;
-
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
@@ -1477,21 +1420,22 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
return ICE_ERR_CFG;
if (i == vsil)
- parent->vsi_id = vsi_id;
+ parent->vsi_handle = vsi_handle;
}
+
return 0;
}
/**
* ice_sched_add_vsi_to_topo - add a new VSI into tree
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc: TC number
*
* This function adds a new VSI into scheduler tree
*/
static enum ice_status
-ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
+ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
{
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *tc_node;
@@ -1505,13 +1449,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
/* add vsi supported nodes to tc subtree */
- return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes);
+ return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
+ num_nodes);
}
/**
* ice_sched_update_vsi_child_nodes - update VSI child nodes
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc: TC number
* @new_numqs: new number of max queues
* @owner: owner of this subtree
@@ -1519,14 +1464,14 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
* This function updates the VSI child nodes based on the number of queues
*/
static enum ice_status
-ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
- u16 new_numqs, u8 owner)
+ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
+ u8 tc, u16 new_numqs, u8 owner)
{
u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
- struct ice_sched_vsi_info *vsi;
+ struct ice_vsi_ctx *vsi_ctx;
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 prev_numqs;
@@ -1536,16 +1481,16 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
if (!tc_node)
return ICE_ERR_CFG;
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
if (!vsi_node)
return ICE_ERR_CFG;
- vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
- if (!vsi)
- return ICE_ERR_CFG;
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
if (owner == ICE_SCHED_NODE_OWNER_LAN)
- prev_numqs = vsi->max_lanq[tc];
+ prev_numqs = vsi_ctx->sched.max_lanq[tc];
else
return ICE_ERR_PARAM;
@@ -1570,13 +1515,13 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
new_num_nodes[i] -= prev_num_nodes[i];
- status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node,
+ status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
new_num_nodes, owner);
if (status)
return status;
}
- vsi->max_lanq[tc] = new_numqs;
+ vsi_ctx->sched.max_lanq[tc] = new_numqs;
return status;
}
@@ -1584,7 +1529,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
/**
* ice_sched_cfg_vsi - configure the new/exisiting VSI
* @pi: port information structure
- * @vsi_id: VSI Id
+ * @vsi_handle: software VSI handle
* @tc: TC number
* @maxqs: max number of queues
* @owner: lan or rdma
@@ -1595,25 +1540,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
* disabled then suspend the VSI if it is not already.
*/
enum ice_status
-ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
+ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable)
{
struct ice_sched_node *vsi_node, *tc_node;
- struct ice_sched_vsi_info *vsi;
+ struct ice_vsi_ctx *vsi_ctx;
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
return ICE_ERR_PARAM;
-
- vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
- if (!vsi)
- vsi = ice_sched_create_vsi_info_entry(pi, vsi_id);
- if (!vsi)
- return ICE_ERR_NO_MEMORY;
-
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
/* suspend the VSI if tc is not enabled */
if (!enable) {
@@ -1630,18 +1571,26 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
/* TC is enabled, if it is a new VSI then add it to the tree */
if (!vsi_node) {
- status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc);
+ status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
if (status)
return status;
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+
+ vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
if (!vsi_node)
return ICE_ERR_CFG;
- vsi->vsi_node[tc] = vsi_node;
+
+ vsi_ctx->sched.vsi_node[tc] = vsi_node;
vsi_node->in_use = true;
+ /* invalidate the max queues whenever VSI gets added first time
+ * into the scheduler tree (boot or after reset). We need to
+ * recreate the child nodes all the time in these cases.
+ */
+ vsi_ctx->sched.max_lanq[tc] = 0;
}
/* update the VSI child nodes */
- status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner);
+ status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
+ owner);
if (status)
return status;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index badadcc120d3..5dc9cfa04c58 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -12,7 +12,6 @@
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
- u16 vsi_id;
};
struct ice_sched_agg_info {
@@ -35,9 +34,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
-ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner);
enum ice_status
-ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
+ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
new file mode 100644
index 000000000000..027eba4e13f8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_adminq_cmd.h"
+#include "ice_sriov.h"
+
+/**
+ * ice_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF ID to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to VF driver (0x0802) using mailbox
+ * queue and asynchronously sending message via
+ * ice_sq_send_cmd() function
+ */
+enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_pf_vf_msg *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+
+ cmd = &desc.params.virt;
+ cmd->id = cpu_to_le32(vfid);
+
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+
+ if (msglen)
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+/**
+ * ice_conv_link_speed_to_virtchnl
+ * @adv_link_support: determines the format of the returned link speed
+ * @link_speed: variable containing the link_speed to be converted
+ *
+ * Convert link speed supported by HW to link speed supported by virtchnl.
+ * If adv_link_support is true, then return link speed in Mbps. Else return
+ * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
+ * needs to cast back to an enum virtchnl_link_speed in the case where
+ * adv_link_support is false, but when adv_link_support is true the caller can
+ * expect the speed in Mbps.
+ */
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+{
+ u32 speed;
+
+ if (adv_link_support)
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ speed = ICE_LINK_SPEED_10MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = ICE_LINK_SPEED_100MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ speed = ICE_LINK_SPEED_1000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ speed = ICE_LINK_SPEED_2500MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = ICE_LINK_SPEED_5000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = ICE_LINK_SPEED_10000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = ICE_LINK_SPEED_20000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = ICE_LINK_SPEED_25000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ speed = ICE_LINK_SPEED_40000MBPS;
+ break;
+ default:
+ speed = ICE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ else
+ /* Virtchnl speeds are not defined for every speed supported in
+ * the hardware. To maintain compatibility with older AVF
+ * drivers, while reporting the speed the new speed values are
+ * resolved to the closest known virtchnl speeds
+ */
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ case ICE_AQ_LINK_SPEED_2500MB:
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ /* fall through */
+ speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
+ break;
+ default:
+ speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ return speed;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
new file mode 100644
index 000000000000..3d78a0795138
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_SRIOV_H_
+#define _ICE_SRIOV_H_
+
+#include "ice_common.h"
+
+#ifdef CONFIG_PCI_IOV
+enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+#else /* CONFIG_PCI_IOV */
+static inline enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
+ u16 __always_unused vfid, u32 __always_unused v_opcode,
+ u32 __always_unused v_retval, u8 __always_unused *msg,
+ u16 __always_unused msglen,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline u32
+ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
+ u16 __always_unused link_speed)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index 9a95c4ffd7d7..f49f299ddf2c 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -6,6 +6,9 @@
/* Error Codes */
enum ice_status {
+ ICE_SUCCESS = 0,
+
+ /* Generic codes : Range -1..-49 */
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
@@ -20,6 +23,7 @@ enum ice_status {
ICE_ERR_ALREADY_EXISTS = -14,
ICE_ERR_DOES_NOT_EXIST = -15,
ICE_ERR_MAX_LIMIT = -17,
+ ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
ICE_ERR_AQ_ERROR = -100,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 6b7ec2ae5ad6..33403f39f1b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -86,6 +86,36 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
}
/**
+ * ice_init_def_sw_recp - initialize the recipe book keeping tables
+ * @hw: pointer to the hw struct
+ *
+ * Allocate memory for the entire recipe table and initialize the structures/
+ * entries corresponding to basic recipes.
+ */
+enum ice_status
+ice_init_def_sw_recp(struct ice_hw *hw)
+{
+ struct ice_sw_recipe *recps;
+ u8 i;
+
+ recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
+ sizeof(struct ice_sw_recipe), GFP_KERNEL);
+ if (!recps)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ recps[i].root_rid = i;
+ INIT_LIST_HEAD(&recps[i].filt_rules);
+ INIT_LIST_HEAD(&recps[i].filt_replay_rules);
+ mutex_init(&recps[i].filt_rule_lock);
+ }
+
+ hw->switch_info->recp_list = recps;
+
+ return 0;
+}
+
+/**
* ice_aq_get_sw_cfg - get switch configuration
* @hw: pointer to the hardware structure
* @buf: pointer to the result buffer
@@ -140,23 +170,24 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
*
* Add a VSI context to the hardware (0x0210)
*/
-enum ice_status
+static enum ice_status
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
- enum ice_status status;
struct ice_aq_desc desc;
+ enum ice_status status;
cmd = &desc.params.vsi_cmd;
- res = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
+ res = &desc.params.add_update_free_vsi_res;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
if (!vsi_ctx->alloc_from_pool)
cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
ICE_AQ_VSI_IS_VALID);
+ cmd->vf_id = vsi_ctx->vf_num;
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
@@ -175,6 +206,42 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
}
/**
+ * ice_aq_free_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a VSI context struct
+ * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
+ * @cd: pointer to command details structure or NULL
+ *
+ * Free VSI context info from hardware (0x0213)
+ */
+static enum ice_status
+ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_update_free_vsi_resp *resp;
+ struct ice_aqc_add_get_update_free_vsi *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.vsi_cmd;
+ resp = &desc.params.add_update_free_vsi_res;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
+
+ cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
+ if (keep_vsi_alloc)
+ cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status) {
+ vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+ }
+
+ return status;
+}
+
+/**
* ice_aq_update_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a VSI context struct
@@ -182,7 +249,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware (0x0211)
*/
-enum ice_status
+static enum ice_status
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
@@ -192,7 +259,7 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
enum ice_status status;
cmd = &desc.params.vsi_cmd;
- resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
+ resp = &desc.params.add_update_free_vsi_res;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
@@ -212,42 +279,162 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
}
/**
- * ice_aq_free_vsi
+ * ice_is_vsi_valid - check whether the VSI is valid or not
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ *
+ * check whether the VSI is valid or not
+ */
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
+{
+ return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
+}
+
+/**
+ * ice_get_hw_vsi_num - return the hw VSI number
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ *
+ * return the hw VSI number
+ * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
+ */
+u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
+{
+ return hw->vsi_ctx[vsi_handle]->vsi_num;
+}
+
+/**
+ * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ *
+ * return the VSI context entry for a given VSI handle
+ */
+struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
+}
+
+/**
+ * ice_save_vsi_ctx - save the VSI context for a given VSI handle
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ * @vsi: VSI context pointer
+ *
+ * save the VSI context entry for a given VSI handle
+ */
+static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_vsi_ctx *vsi)
+{
+ hw->vsi_ctx[vsi_handle] = vsi;
+}
+
+/**
+ * ice_clear_vsi_ctx - clear the VSI context entry
+ * @hw: pointer to the hw struct
+ * @vsi_handle: VSI handle
+ *
+ * clear the VSI context entry
+ */
+static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_vsi_ctx *vsi;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (vsi) {
+ devm_kfree(ice_hw_to_dev(hw), vsi);
+ hw->vsi_ctx[vsi_handle] = NULL;
+ }
+}
+
+/**
+ * ice_add_vsi - add VSI context to the hardware and VSI handle list
* @hw: pointer to the hw struct
+ * @vsi_handle: unique VSI handle provided by drivers
* @vsi_ctx: pointer to a VSI context struct
- * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
* @cd: pointer to command details structure or NULL
*
- * Get VSI context info from hardware (0x0213)
+ * Add a VSI context to the hardware also add it into the VSI handle list.
+ * If this function gets called after reset for existing VSIs then update
+ * with the new HW VSI number in the corresponding VSI handle list entry.
*/
enum ice_status
-ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
- bool keep_vsi_alloc, struct ice_sq_cd *cd)
+ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
{
- struct ice_aqc_add_update_free_vsi_resp *resp;
- struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct ice_vsi_ctx *tmp_vsi_ctx;
enum ice_status status;
- cmd = &desc.params.vsi_cmd;
- resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
+ if (vsi_handle >= ICE_MAX_VSI)
+ return ICE_ERR_PARAM;
+ status = ice_aq_add_vsi(hw, vsi_ctx, cd);
+ if (status)
+ return status;
+ tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!tmp_vsi_ctx) {
+ /* Create a new vsi context */
+ tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*tmp_vsi_ctx), GFP_KERNEL);
+ if (!tmp_vsi_ctx) {
+ ice_aq_free_vsi(hw, vsi_ctx, false, cd);
+ return ICE_ERR_NO_MEMORY;
+ }
+ *tmp_vsi_ctx = *vsi_ctx;
+ ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
+ } else {
+ /* update with new HW VSI num */
+ if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
+ tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
+ }
- cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
- if (keep_vsi_alloc)
- cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
+ return status;
+}
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
- if (!status) {
- vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
- vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
- }
+/**
+ * ice_free_vsi- free VSI context from hardware and VSI handle list
+ * @hw: pointer to the hw struct
+ * @vsi_handle: unique VSI handle
+ * @vsi_ctx: pointer to a VSI context struct
+ * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
+ * @cd: pointer to command details structure or NULL
+ *
+ * Free VSI context info from hardware as well as from VSI handle list
+ */
+enum ice_status
+ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd)
+{
+ enum ice_status status;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
+ status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
+ if (!status)
+ ice_clear_vsi_ctx(hw, vsi_handle);
return status;
}
/**
+ * ice_update_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_handle: unique VSI handle
+ * @vsi_ctx: pointer to a VSI context struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update VSI context in the hardware
+ */
+enum ice_status
+ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
+{
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
+ return ice_aq_update_vsi(hw, vsi_ctx, cd);
+}
+
+/**
* ice_aq_alloc_free_vsi_list
* @hw: pointer to the hw struct
* @vsi_list_id: VSI list id returned or used for lookup
@@ -464,10 +651,12 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
{
u16 vlan_id = ICE_MAX_VLAN_ID + 1;
- u8 eth_hdr[DUMMY_ETH_HDR_LEN];
void *daddr = NULL;
+ u16 eth_hdr_sz;
+ u8 *eth_hdr;
u32 act = 0;
__be16 *off;
+ u8 q_rgn;
if (opc == ice_aqc_opc_remove_sw_rules) {
s_rule->pdata.lkup_tx_rx.act = 0;
@@ -477,13 +666,16 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
return;
}
+ eth_hdr_sz = sizeof(dummy_eth_header);
+ eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
+
/* initialize the ether header with a dummy header */
- memcpy(eth_hdr, dummy_eth_header, sizeof(dummy_eth_header));
+ memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
ice_fill_sw_info(hw, f_info);
switch (f_info->fltr_act) {
case ICE_FWD_TO_VSI:
- act |= (f_info->fwd_id.vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
+ act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
ICE_SINGLE_ACT_VSI_ID_M;
if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
act |= ICE_SINGLE_ACT_VSI_FORWARDING |
@@ -503,14 +695,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
ICE_SINGLE_ACT_Q_INDEX_M;
break;
+ case ICE_DROP_PACKET:
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
case ICE_FWD_TO_QGRP:
+ q_rgn = f_info->qgrp_size > 0 ?
+ (u8)ilog2(f_info->qgrp_size) : 0;
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
+ act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
ICE_SINGLE_ACT_Q_REGION_M;
break;
- case ICE_DROP_PACKET:
- act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
- break;
default:
return;
}
@@ -536,7 +733,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
daddr = f_info->l_data.ethertype_mac.mac_addr;
/* fall-through */
case ICE_SW_LKUP_ETHERTYPE:
- off = (__be16 *)&eth_hdr[ICE_ETH_ETHTYPE_OFFSET];
+ off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
break;
case ICE_SW_LKUP_MAC_VLAN:
@@ -563,18 +760,16 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
if (daddr)
- ether_addr_copy(&eth_hdr[ICE_ETH_DA_OFFSET], daddr);
+ ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
if (!(vlan_id > ICE_MAX_VLAN_ID)) {
- off = (__be16 *)&eth_hdr[ICE_ETH_VLAN_TCI_OFFSET];
+ off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
*off = cpu_to_be16(vlan_id);
}
/* Create the switch rule with the final dummy Ethernet header */
if (opc != ice_aqc_opc_update_sw_rules)
- s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(sizeof(eth_hdr));
-
- memcpy(s_rule->pdata.lkup_tx_rx.hdr, eth_hdr, sizeof(eth_hdr));
+ s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
}
/**
@@ -601,8 +796,8 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
enum ice_status status;
u16 lg_act_size;
u16 rules_size;
- u16 vsi_info;
u32 act;
+ u16 id;
if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
return ICE_ERR_PARAM;
@@ -628,12 +823,11 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/* First action VSI forwarding or VSI list forwarding depending on how
* many VSIs
*/
- vsi_info = (m_ent->vsi_count > 1) ?
- m_ent->fltr_info.fwd_id.vsi_list_id :
- m_ent->fltr_info.fwd_id.vsi_id;
+ id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
+ m_ent->fltr_info.fwd_id.hw_vsi_id;
act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
- act |= (vsi_info << ICE_LG_ACT_VSI_LIST_ID_S) &
+ act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
@@ -686,15 +880,15 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/**
* ice_create_vsi_list_map
* @hw: pointer to the hardware structure
- * @vsi_array: array of VSIs to form a VSI list
- * @num_vsi: num VSI in the array
+ * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
+ * @num_vsi: number of VSI handles in the array
* @vsi_list_id: VSI list id generated as part of allocate resource
*
* Helper function to create a new entry of VSI list id to VSI mapping
* using the given VSI list id
*/
static struct ice_vsi_list_map_info *
-ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id)
{
struct ice_switch_info *sw = hw->switch_info;
@@ -706,9 +900,9 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
return NULL;
v_map->vsi_list_id = vsi_list_id;
-
+ v_map->ref_cnt = 1;
for (i = 0; i < num_vsi; i++)
- set_bit(vsi_array[i], v_map->vsi_map);
+ set_bit(vsi_handle_arr[i], v_map->vsi_map);
list_add(&v_map->list_entry, &sw->vsi_list_map_head);
return v_map;
@@ -717,8 +911,8 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
/**
* ice_update_vsi_list_rule
* @hw: pointer to the hardware structure
- * @vsi_array: array of VSIs to form a VSI list
- * @num_vsi: num VSI in the array
+ * @vsi_handle_arr: array of VSI handles to form a VSI list
+ * @num_vsi: number of VSI handles in the array
* @vsi_list_id: VSI list id generated as part of allocate resource
* @remove: Boolean value to indicate if this is a remove action
* @opc: switch rules population command type - pass in the command opcode
@@ -728,7 +922,7 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
* using the given VSI list id
*/
static enum ice_status
-ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
enum ice_sw_lkup_type lkup_type)
{
@@ -759,9 +953,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
-
- for (i = 0; i < num_vsi; i++)
- s_rule->pdata.vsi_list.vsi[i] = cpu_to_le16(vsi_array[i]);
+ for (i = 0; i < num_vsi; i++) {
+ if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
+ status = ICE_ERR_PARAM;
+ goto exit;
+ }
+ /* AQ call requires hw_vsi_id(s) */
+ s_rule->pdata.vsi_list.vsi[i] =
+ cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
+ }
s_rule->type = cpu_to_le16(type);
s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
@@ -769,6 +969,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
+exit:
devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
@@ -776,21 +977,16 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
/**
* ice_create_vsi_list_rule - Creates and populates a VSI list rule
* @hw: pointer to the hw struct
- * @vsi_array: array of VSIs to form a VSI list
- * @num_vsi: number of VSIs in the array
+ * @vsi_handle_arr: array of VSI handles to form a VSI list
+ * @num_vsi: number of VSI handles in the array
* @vsi_list_id: stores the ID of the VSI list to be created
* @lkup_type: switch rule filter's lookup type
*/
static enum ice_status
-ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
{
enum ice_status status;
- int i;
-
- for (i = 0; i < num_vsi; i++)
- if (vsi_array[i] >= ICE_MAX_VSI)
- return ICE_ERR_OUT_OF_RANGE;
status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
ice_aqc_opc_alloc_res);
@@ -798,9 +994,9 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
return status;
/* Update the newly created VSI list to include the specified VSIs */
- return ice_update_vsi_list_rule(hw, vsi_array, num_vsi, *vsi_list_id,
- false, ice_aqc_opc_add_sw_rules,
- lkup_type);
+ return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
+ *vsi_list_id, false,
+ ice_aqc_opc_add_sw_rules, lkup_type);
}
/**
@@ -816,10 +1012,10 @@ static enum ice_status
ice_create_pkt_fwd_rule(struct ice_hw *hw,
struct ice_fltr_list_entry *f_entry)
{
- struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *fm_entry;
struct ice_aqc_sw_rules_elem *s_rule;
enum ice_sw_lkup_type l_type;
+ struct ice_sw_recipe *recp;
enum ice_status status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
@@ -860,31 +1056,9 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
* calls remove filter AQ command
*/
l_type = fm_entry->fltr_info.lkup_type;
- if (l_type == ICE_SW_LKUP_MAC) {
- mutex_lock(&sw->mac_list_lock);
- list_add(&fm_entry->list_entry, &sw->mac_list_head);
- mutex_unlock(&sw->mac_list_lock);
- } else if (l_type == ICE_SW_LKUP_VLAN) {
- mutex_lock(&sw->vlan_list_lock);
- list_add(&fm_entry->list_entry, &sw->vlan_list_head);
- mutex_unlock(&sw->vlan_list_lock);
- } else if (l_type == ICE_SW_LKUP_ETHERTYPE ||
- l_type == ICE_SW_LKUP_ETHERTYPE_MAC) {
- mutex_lock(&sw->eth_m_list_lock);
- list_add(&fm_entry->list_entry, &sw->eth_m_list_head);
- mutex_unlock(&sw->eth_m_list_lock);
- } else if (l_type == ICE_SW_LKUP_PROMISC ||
- l_type == ICE_SW_LKUP_PROMISC_VLAN) {
- mutex_lock(&sw->promisc_list_lock);
- list_add(&fm_entry->list_entry, &sw->promisc_list_head);
- mutex_unlock(&sw->promisc_list_lock);
- } else if (fm_entry->fltr_info.lkup_type == ICE_SW_LKUP_MAC_VLAN) {
- mutex_lock(&sw->mac_vlan_list_lock);
- list_add(&fm_entry->list_entry, &sw->mac_vlan_list_head);
- mutex_unlock(&sw->mac_vlan_list_lock);
- } else {
- status = ICE_ERR_NOT_IMPL;
- }
+ recp = &hw->switch_info->recp_list[l_type];
+ list_add(&fm_entry->list_entry, &recp->filt_rules);
+
ice_create_pkt_fwd_rule_exit:
devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
@@ -893,19 +1067,15 @@ ice_create_pkt_fwd_rule_exit:
/**
* ice_update_pkt_fwd_rule
* @hw: pointer to the hardware structure
- * @rule_id: rule of previously created switch rule to update
- * @vsi_list_id: VSI list id to be updated with
- * @f_info: ice_fltr_info to pull other information for switch rule
+ * @f_info: filter information for switch rule
*
* Call AQ command to update a previously created switch rule with a
* VSI list id
*/
static enum ice_status
-ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
- struct ice_fltr_info f_info)
+ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
struct ice_aqc_sw_rules_elem *s_rule;
- struct ice_fltr_info tmp_fltr;
enum ice_status status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
@@ -913,14 +1083,9 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
if (!s_rule)
return ICE_ERR_NO_MEMORY;
- tmp_fltr = f_info;
- tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
- tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+ ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
- ice_fill_sw_rule(hw, &tmp_fltr, s_rule,
- ice_aqc_opc_update_sw_rules);
-
- s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
+ s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
/* Update switch rule with new rule set to forward VSI list */
status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
@@ -931,7 +1096,48 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
}
/**
- * ice_handle_vsi_list_mgmt
+ * ice_update_sw_rule_bridge_mode
+ * @hw: pointer to the hw struct
+ *
+ * Updates unicast switch filter rules based on VEB/VEPA mode
+ */
+enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ enum ice_status status = 0;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
+
+ mutex_lock(rule_lock);
+ list_for_each_entry(fm_entry, rule_head, list_entry) {
+ struct ice_fltr_info *fi = &fm_entry->fltr_info;
+ u8 *addr = fi->l_data.mac.mac_addr;
+
+ /* Update unicast Tx rules to reflect the selected
+ * VEB/VEPA mode
+ */
+ if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
+ (fi->fltr_act == ICE_FWD_TO_VSI ||
+ fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
+ fi->fltr_act == ICE_FWD_TO_Q ||
+ fi->fltr_act == ICE_FWD_TO_QGRP)) {
+ status = ice_update_pkt_fwd_rule(hw, fi);
+ if (status)
+ break;
+ }
+ }
+
+ mutex_unlock(rule_lock);
+
+ return status;
+}
+
+/**
+ * ice_add_update_vsi_list
* @hw: pointer to the hardware structure
* @m_entry: pointer to current filter management list entry
* @cur_fltr: filter information from the book keeping entry
@@ -952,10 +1158,10 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
* using the update switch rule command
*/
static enum ice_status
-ice_handle_vsi_list_mgmt(struct ice_hw *hw,
- struct ice_fltr_mgmt_list_entry *m_entry,
- struct ice_fltr_info *cur_fltr,
- struct ice_fltr_info *new_fltr)
+ice_add_update_vsi_list(struct ice_hw *hw,
+ struct ice_fltr_mgmt_list_entry *m_entry,
+ struct ice_fltr_info *cur_fltr,
+ struct ice_fltr_info *new_fltr)
{
enum ice_status status = 0;
u16 vsi_list_id = 0;
@@ -975,34 +1181,36 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw,
* a part of a VSI list. So, create a VSI list with the old and
* new VSIs.
*/
- u16 vsi_id_arr[2];
- u16 fltr_rule;
+ struct ice_fltr_info tmp_fltr;
+ u16 vsi_handle_arr[2];
/* A rule already exists with the new VSI being added */
- if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id)
+ if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
return ICE_ERR_ALREADY_EXISTS;
- vsi_id_arr[0] = cur_fltr->fwd_id.vsi_id;
- vsi_id_arr[1] = new_fltr->fwd_id.vsi_id;
- status = ice_create_vsi_list_rule(hw, &vsi_id_arr[0], 2,
+ vsi_handle_arr[0] = cur_fltr->vsi_handle;
+ vsi_handle_arr[1] = new_fltr->vsi_handle;
+ status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
&vsi_list_id,
new_fltr->lkup_type);
if (status)
return status;
- fltr_rule = cur_fltr->fltr_rule_id;
+ tmp_fltr = *new_fltr;
+ tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
/* Update the previous switch rule of "MAC forward to VSI" to
* "MAC fwd to VSI list"
*/
- status = ice_update_pkt_fwd_rule(hw, fltr_rule, vsi_list_id,
- *new_fltr);
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
if (status)
return status;
cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
m_entry->vsi_list_info =
- ice_create_vsi_list_map(hw, &vsi_id_arr[0], 2,
+ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
vsi_list_id);
/* If this entry was large action then the large action needs
@@ -1014,11 +1222,11 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw,
m_entry->sw_marker_id,
m_entry->lg_act_idx);
} else {
- u16 vsi_id = new_fltr->fwd_id.vsi_id;
+ u16 vsi_handle = new_fltr->vsi_handle;
enum ice_adminq_opc opcode;
/* A rule already exists with the new VSI being added */
- if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map))
+ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
return 0;
/* Update the previously created VSI list set with
@@ -1027,12 +1235,12 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw,
vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
opcode = ice_aqc_opc_update_sw_rules;
- status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
- false, opcode,
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
+ vsi_list_id, false, opcode,
new_fltr->lkup_type);
/* update VSI list mapping info with new VSI id */
if (!status)
- set_bit(vsi_id, m_entry->vsi_list_info->vsi_map);
+ set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
}
if (!status)
m_entry->vsi_count++;
@@ -1040,54 +1248,313 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw,
}
/**
- * ice_find_mac_entry
+ * ice_find_rule_entry - Search a rule entry
* @hw: pointer to the hardware structure
- * @mac_addr: MAC address to search for
+ * @recp_id: lookup type for which the specified rule needs to be searched
+ * @f_info: rule information
*
- * Helper function to search for a MAC entry using a given MAC address
- * Returns pointer to the entry if found.
+ * Helper function to search for a given rule entry
+ * Returns pointer to entry storing the rule if found
*/
static struct ice_fltr_mgmt_list_entry *
-ice_find_mac_entry(struct ice_hw *hw, u8 *mac_addr)
+ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
{
- struct ice_fltr_mgmt_list_entry *m_list_itr, *mac_ret = NULL;
+ struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
struct ice_switch_info *sw = hw->switch_info;
-
- mutex_lock(&sw->mac_list_lock);
- list_for_each_entry(m_list_itr, &sw->mac_list_head, list_entry) {
- u8 *buf = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
-
- if (ether_addr_equal(buf, mac_addr)) {
- mac_ret = m_list_itr;
+ struct list_head *list_head;
+
+ list_head = &sw->recp_list[recp_id].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
+ sizeof(f_info->l_data)) &&
+ f_info->flag == list_itr->fltr_info.flag) {
+ ret = list_itr;
break;
}
}
- mutex_unlock(&sw->mac_list_lock);
- return mac_ret;
+ return ret;
+}
+
+/**
+ * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
+ * @hw: pointer to the hardware structure
+ * @recp_id: lookup type for which VSI lists needs to be searched
+ * @vsi_handle: VSI handle to be found in VSI list
+ * @vsi_list_id: VSI list id found containing vsi_handle
+ *
+ * Helper function to search a VSI list with single entry containing given VSI
+ * handle element. This can be extended further to search VSI list with more
+ * than 1 vsi_count. Returns pointer to VSI list entry if found.
+ */
+static struct ice_vsi_list_map_info *
+ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
+ u16 *vsi_list_id)
+{
+ struct ice_vsi_list_map_info *map_info = NULL;
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *list_itr;
+ struct list_head *list_head;
+
+ list_head = &sw->recp_list[recp_id].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
+ map_info = list_itr->vsi_list_info;
+ if (test_bit(vsi_handle, map_info->vsi_map)) {
+ *vsi_list_id = map_info->vsi_list_id;
+ return map_info;
+ }
+ }
+ }
+ return NULL;
}
/**
- * ice_add_shared_mac - Add one MAC shared filter rule
+ * ice_add_rule_internal - add rule for a given lookup type
* @hw: pointer to the hardware structure
+ * @recp_id: lookup type (recipe id) for which rule has to be added
* @f_entry: structure containing MAC forwarding information
*
- * Adds or updates the book keeping list for the MAC addresses
+ * Adds or updates the rule lists for a given recipe
*/
static enum ice_status
-ice_add_shared_mac(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
+ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
+ struct ice_fltr_list_entry *f_entry)
{
+ struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_info *new_fltr, *cur_fltr;
struct ice_fltr_mgmt_list_entry *m_entry;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
- new_fltr = &f_entry->fltr_info;
+ if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+ return ICE_ERR_PARAM;
+ f_entry->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
+
+ rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
- m_entry = ice_find_mac_entry(hw, &new_fltr->l_data.mac.mac_addr[0]);
- if (!m_entry)
+ mutex_lock(rule_lock);
+ new_fltr = &f_entry->fltr_info;
+ if (new_fltr->flag & ICE_FLTR_RX)
+ new_fltr->src = hw->port_info->lport;
+ else if (new_fltr->flag & ICE_FLTR_TX)
+ new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
+
+ m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
+ if (!m_entry) {
+ mutex_unlock(rule_lock);
return ice_create_pkt_fwd_rule(hw, f_entry);
+ }
cur_fltr = &m_entry->fltr_info;
+ status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
+ mutex_unlock(rule_lock);
- return ice_handle_vsi_list_mgmt(hw, m_entry, cur_fltr, new_fltr);
+ return status;
+}
+
+/**
+ * ice_remove_vsi_list_rule
+ * @hw: pointer to the hardware structure
+ * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @lkup_type: switch rule filter lookup type
+ *
+ * The VSI list should be emptied before this function is called to remove the
+ * VSI list.
+ */
+static enum ice_status
+ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
+ enum ice_sw_lkup_type lkup_type)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status status;
+ u16 s_rule_size;
+
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
+ s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+
+ /* Free the vsi_list resource that we allocated. It is assumed that the
+ * list is empty at this point.
+ */
+ status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
+ ice_aqc_opc_free_res);
+
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+ return status;
+}
+
+/**
+ * ice_rem_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle of the VSI to remove
+ * @fm_list: filter management entry for which the VSI list management needs to
+ * be done
+ */
+static enum ice_status
+ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_fltr_mgmt_list_entry *fm_list)
+{
+ enum ice_sw_lkup_type lkup_type;
+ enum ice_status status = 0;
+ u16 vsi_list_id;
+
+ if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
+ fm_list->vsi_count == 0)
+ return ICE_ERR_PARAM;
+
+ /* A rule with the VSI being removed does not exist */
+ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ lkup_type = fm_list->fltr_info.lkup_type;
+ vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ fm_list->vsi_count--;
+ clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
+
+ if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
+ struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
+ struct ice_vsi_list_map_info *vsi_list_info =
+ fm_list->vsi_list_info;
+ u16 rem_vsi_handle;
+
+ rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+ if (!ice_is_vsi_valid(hw, rem_vsi_handle))
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* Make sure VSI list is empty before removing it below */
+ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
+ vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ tmp_fltr_info.vsi_handle = rem_vsi_handle;
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ tmp_fltr_info.fwd_id.hw_vsi_id, status);
+ return status;
+ }
+
+ fm_list->fltr_info = tmp_fltr_info;
+ }
+
+ if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
+ (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
+ struct ice_vsi_list_map_info *vsi_list_info =
+ fm_list->vsi_list_info;
+
+ /* Remove the VSI list since it is no longer used */
+ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Failed to remove VSI list %d, error %d\n",
+ vsi_list_id, status);
+ return status;
+ }
+
+ list_del(&vsi_list_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
+ fm_list->vsi_list_info = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * ice_remove_rule_internal - Remove a filter rule of a given type
+ * @hw: pointer to the hardware structure
+ * @recp_id: recipe id for which the rule needs to removed
+ * @f_entry: rule entry containing filter information
+ */
+static enum ice_status
+ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
+ struct ice_fltr_list_entry *f_entry)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *list_elem;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
+ bool remove_rule = false;
+ u16 vsi_handle;
+
+ if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+ return ICE_ERR_PARAM;
+ f_entry->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
+
+ rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
+ mutex_lock(rule_lock);
+ list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
+ if (!list_elem) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit;
+ }
+
+ if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
+ remove_rule = true;
+ } else if (!list_elem->vsi_list_info) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit;
+ } else {
+ if (list_elem->vsi_list_info->ref_cnt > 1)
+ list_elem->vsi_list_info->ref_cnt--;
+ vsi_handle = f_entry->fltr_info.vsi_handle;
+ status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
+ if (status)
+ goto exit;
+ /* if vsi count goes to zero after updating the vsi list */
+ if (list_elem->vsi_count == 0)
+ remove_rule = true;
+ }
+
+ if (remove_rule) {
+ /* Remove the lookup rule */
+ struct ice_aqc_sw_rules_elem *s_rule;
+
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw),
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
+ GFP_KERNEL);
+ if (!s_rule) {
+ status = ICE_ERR_NO_MEMORY;
+ goto exit;
+ }
+
+ ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
+ ice_aqc_opc_remove_sw_rules);
+
+ status = ice_aq_sw_rules(hw, s_rule,
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
+ ice_aqc_opc_remove_sw_rules, NULL);
+ if (status)
+ goto exit;
+
+ /* Remove a book keeping from the list */
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+
+ list_del(&list_elem->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), list_elem);
+ }
+exit:
+ mutex_unlock(rule_lock);
+ return status;
}
/**
@@ -1106,7 +1573,10 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
+ struct list_head *rule_head;
u16 elem_sent, total_elem_left;
+ struct ice_switch_info *sw;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status = 0;
u16 num_unicast = 0;
u16 s_rule_size;
@@ -1114,48 +1584,73 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
if (!m_list || !hw)
return ICE_ERR_PARAM;
+ s_rule = NULL;
+ sw = hw->switch_info;
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry(m_list_itr, m_list, list_entry) {
u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
+ u16 vsi_handle;
+ u16 hw_vsi_id;
- if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
+ m_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ vsi_handle = m_list_itr->fltr_info.vsi_handle;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ /* update the src in case it is vsi num */
+ if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
return ICE_ERR_PARAM;
- if (is_zero_ether_addr(add))
+ m_list_itr->fltr_info.src = hw_vsi_id;
+ if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
+ is_zero_ether_addr(add))
return ICE_ERR_PARAM;
if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
/* Don't overwrite the unicast address */
- if (ice_find_mac_entry(hw, add))
+ mutex_lock(rule_lock);
+ if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
+ &m_list_itr->fltr_info)) {
+ mutex_unlock(rule_lock);
return ICE_ERR_ALREADY_EXISTS;
+ }
+ mutex_unlock(rule_lock);
num_unicast++;
} else if (is_multicast_ether_addr(add) ||
(is_unicast_ether_addr(add) && hw->ucast_shared)) {
- status = ice_add_shared_mac(hw, m_list_itr);
- if (status) {
- m_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
- return status;
- }
- m_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
+ m_list_itr->status =
+ ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
+ m_list_itr);
+ if (m_list_itr->status)
+ return m_list_itr->status;
}
}
+ mutex_lock(rule_lock);
/* Exit if no suitable entries were found for adding bulk switch rule */
- if (!num_unicast)
- return 0;
+ if (!num_unicast) {
+ status = 0;
+ goto ice_add_mac_exit;
+ }
+
+ rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
/* Allocate switch rule buffer for the bulk update for unicast */
s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ if (!s_rule) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_add_mac_exit;
+ }
r_iter = s_rule;
list_for_each_entry(m_list_itr, m_list, list_entry) {
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *addr = &f_info->l_data.mac.mac_addr[0];
+ u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
- if (is_unicast_ether_addr(addr)) {
- ice_fill_sw_rule(hw, &m_list_itr->fltr_info,
- r_iter, ice_aqc_opc_add_sw_rules);
+ if (is_unicast_ether_addr(mac_addr)) {
+ ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
+ ice_aqc_opc_add_sw_rules);
r_iter = (struct ice_aqc_sw_rules_elem *)
((u8 *)r_iter + s_rule_size);
}
@@ -1183,11 +1678,10 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
r_iter = s_rule;
list_for_each_entry(m_list_itr, m_list, list_entry) {
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *addr = &f_info->l_data.mac.mac_addr[0];
- struct ice_switch_info *sw = hw->switch_info;
+ u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
struct ice_fltr_mgmt_list_entry *fm_entry;
- if (is_unicast_ether_addr(addr)) {
+ if (is_unicast_ether_addr(mac_addr)) {
f_info->fltr_rule_id =
le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
f_info->fltr_act = ICE_FWD_TO_VSI;
@@ -1203,46 +1697,21 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
/* The book keeping entries will get removed when
* base driver calls remove filter AQ command
*/
- mutex_lock(&sw->mac_list_lock);
- list_add(&fm_entry->list_entry, &sw->mac_list_head);
- mutex_unlock(&sw->mac_list_lock);
+ list_add(&fm_entry->list_entry, rule_head);
r_iter = (struct ice_aqc_sw_rules_elem *)
((u8 *)r_iter + s_rule_size);
}
}
ice_add_mac_exit:
- devm_kfree(ice_hw_to_dev(hw), s_rule);
+ mutex_unlock(rule_lock);
+ if (s_rule)
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
/**
- * ice_find_vlan_entry
- * @hw: pointer to the hardware structure
- * @vlan_id: VLAN id to search for
- *
- * Helper function to search for a VLAN entry using a given VLAN id
- * Returns pointer to the entry if found.
- */
-static struct ice_fltr_mgmt_list_entry *
-ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id)
-{
- struct ice_fltr_mgmt_list_entry *vlan_list_itr, *vlan_ret = NULL;
- struct ice_switch_info *sw = hw->switch_info;
-
- mutex_lock(&sw->vlan_list_lock);
- list_for_each_entry(vlan_list_itr, &sw->vlan_list_head, list_entry)
- if (vlan_list_itr->fltr_info.l_data.vlan.vlan_id == vlan_id) {
- vlan_ret = vlan_list_itr;
- break;
- }
-
- mutex_unlock(&sw->vlan_list_lock);
- return vlan_ret;
-}
-
-/**
* ice_add_vlan_internal - Add one VLAN based filter rule
* @hw: pointer to the hardware structure
* @f_entry: filter entry containing one VLAN information
@@ -1250,53 +1719,150 @@ ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id)
static enum ice_status
ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
{
- struct ice_fltr_info *new_fltr, *cur_fltr;
+ struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *v_list_itr;
- u16 vlan_id;
+ struct ice_fltr_info *new_fltr, *cur_fltr;
+ enum ice_sw_lkup_type lkup_type;
+ u16 vsi_list_id = 0, vsi_handle;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
+
+ if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+ return ICE_ERR_PARAM;
+ f_entry->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
new_fltr = &f_entry->fltr_info;
+
/* VLAN id should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
return ICE_ERR_PARAM;
- vlan_id = new_fltr->l_data.vlan.vlan_id;
- v_list_itr = ice_find_vlan_entry(hw, vlan_id);
+ if (new_fltr->src_id != ICE_SRC_ID_VSI)
+ return ICE_ERR_PARAM;
+
+ new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
+ lkup_type = new_fltr->lkup_type;
+ vsi_handle = new_fltr->vsi_handle;
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ mutex_lock(rule_lock);
+ v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
if (!v_list_itr) {
- u16 vsi_id = ICE_VSI_INVAL_ID;
- enum ice_status status;
- u16 vsi_list_id = 0;
+ struct ice_vsi_list_map_info *map_info = NULL;
if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
- enum ice_sw_lkup_type lkup_type = new_fltr->lkup_type;
-
- /* All VLAN pruning rules use a VSI list.
- * Convert the action to forwarding to a VSI list.
+ /* All VLAN pruning rules use a VSI list. Check if
+ * there is already a VSI list containing VSI that we
+ * want to add. If found, use the same vsi_list_id for
+ * this new VLAN rule or else create a new list.
*/
- vsi_id = new_fltr->fwd_id.vsi_id;
- status = ice_create_vsi_list_rule(hw, &vsi_id, 1,
- &vsi_list_id,
- lkup_type);
- if (status)
- return status;
+ map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
+ vsi_handle,
+ &vsi_list_id);
+ if (!map_info) {
+ status = ice_create_vsi_list_rule(hw,
+ &vsi_handle,
+ 1,
+ &vsi_list_id,
+ lkup_type);
+ if (status)
+ goto exit;
+ }
+ /* Convert the action to forwarding to a VSI list. */
new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
new_fltr->fwd_id.vsi_list_id = vsi_list_id;
}
status = ice_create_pkt_fwd_rule(hw, f_entry);
- if (!status && vsi_id != ICE_VSI_INVAL_ID) {
- v_list_itr = ice_find_vlan_entry(hw, vlan_id);
- if (!v_list_itr)
- return ICE_ERR_DOES_NOT_EXIST;
- v_list_itr->vsi_list_info =
- ice_create_vsi_list_map(hw, &vsi_id, 1,
- vsi_list_id);
+ if (!status) {
+ v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
+ new_fltr);
+ if (!v_list_itr) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit;
+ }
+ /* reuse VSI list for new rule and increment ref_cnt */
+ if (map_info) {
+ v_list_itr->vsi_list_info = map_info;
+ map_info->ref_cnt++;
+ } else {
+ v_list_itr->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle,
+ 1, vsi_list_id);
+ }
}
+ } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
+ /* Update existing VSI list to add new VSI id only if it used
+ * by one VLAN rule.
+ */
+ cur_fltr = &v_list_itr->fltr_info;
+ status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
+ new_fltr);
+ } else {
+ /* If VLAN rule exists and VSI list being used by this rule is
+ * referenced by more than 1 VLAN rule. Then create a new VSI
+ * list appending previous VSI with new VSI and update existing
+ * VLAN rule to point to new VSI list id
+ */
+ struct ice_fltr_info tmp_fltr;
+ u16 vsi_handle_arr[2];
+ u16 cur_handle;
- return status;
+ /* Current implementation only supports reusing VSI list with
+ * one VSI count. We should never hit below condition
+ */
+ if (v_list_itr->vsi_count > 1 &&
+ v_list_itr->vsi_list_info->ref_cnt > 1) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
+ status = ICE_ERR_CFG;
+ goto exit;
+ }
+
+ cur_handle =
+ find_first_bit(v_list_itr->vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+
+ /* A rule already exists with the new VSI being added */
+ if (cur_handle == vsi_handle) {
+ status = ICE_ERR_ALREADY_EXISTS;
+ goto exit;
+ }
+
+ vsi_handle_arr[0] = cur_handle;
+ vsi_handle_arr[1] = vsi_handle;
+ status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
+ &vsi_list_id, lkup_type);
+ if (status)
+ goto exit;
+
+ tmp_fltr = v_list_itr->fltr_info;
+ tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ /* Update the previous switch rule to a new VSI list which
+ * includes current VSI thats requested
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status)
+ goto exit;
+
+ /* before overriding VSI list map info. decrement ref_cnt of
+ * previous VSI list
+ */
+ v_list_itr->vsi_list_info->ref_cnt--;
+
+ /* now update to newly created list */
+ v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
+ v_list_itr->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
+ vsi_list_id);
+ v_list_itr->vsi_count++;
}
- cur_fltr = &v_list_itr->fltr_info;
- return ice_handle_vsi_list_mgmt(hw, v_list_itr, cur_fltr, new_fltr);
+exit:
+ mutex_unlock(rule_lock);
+ return status;
}
/**
@@ -1313,335 +1879,58 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
return ICE_ERR_PARAM;
list_for_each_entry(v_list_itr, v_list, list_entry) {
- enum ice_status status;
-
if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
return ICE_ERR_PARAM;
-
- status = ice_add_vlan_internal(hw, v_list_itr);
- if (status) {
- v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
- return status;
- }
- v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
+ v_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
}
return 0;
}
/**
- * ice_remove_vsi_list_rule
+ * ice_rem_sw_rule_info
* @hw: pointer to the hardware structure
- * @vsi_list_id: VSI list id generated as part of allocate resource
- * @lkup_type: switch rule filter lookup type
+ * @rule_head: pointer to the switch list structure that we want to delete
*/
-static enum ice_status
-ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
- enum ice_sw_lkup_type lkup_type)
-{
- struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
- u16 s_rule_size;
-
- s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
- s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
- s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
- /* FW expects number of VSIs in vsi_list resource to be 0 for clear
- * command. Since memory is zero'ed out during initialization, it's not
- * necessary to explicitly initialize the variable to 0.
- */
-
- status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1,
- ice_aqc_opc_remove_sw_rules, NULL);
- if (!status)
- /* Free the vsi_list resource that we allocated */
- status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
- ice_aqc_opc_free_res);
-
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- return status;
-}
-
-/**
- * ice_handle_rem_vsi_list_mgmt
- * @hw: pointer to the hardware structure
- * @vsi_id: ID of the VSI to remove
- * @fm_list_itr: filter management entry for which the VSI list management
- * needs to be done
- */
-static enum ice_status
-ice_handle_rem_vsi_list_mgmt(struct ice_hw *hw, u16 vsi_id,
- struct ice_fltr_mgmt_list_entry *fm_list_itr)
+static void
+ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
{
- struct ice_switch_info *sw = hw->switch_info;
- enum ice_status status = 0;
- enum ice_sw_lkup_type lkup_type;
- bool is_last_elem = true;
- bool conv_list = false;
- bool del_list = false;
- u16 vsi_list_id;
-
- lkup_type = fm_list_itr->fltr_info.lkup_type;
- vsi_list_id = fm_list_itr->fltr_info.fwd_id.vsi_list_id;
-
- if (fm_list_itr->vsi_count > 1) {
- status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
- true,
- ice_aqc_opc_update_sw_rules,
- lkup_type);
- if (status)
- return status;
- fm_list_itr->vsi_count--;
- is_last_elem = false;
- clear_bit(vsi_id, fm_list_itr->vsi_list_info->vsi_map);
- }
-
- /* For non-VLAN rules that forward packets to a VSI list, convert them
- * to forwarding packets to a VSI if there is only one VSI left in the
- * list. Unused lists are then removed.
- * VLAN rules need to use VSI lists even with only one VSI.
- */
- if (fm_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST) {
- if (lkup_type == ICE_SW_LKUP_VLAN) {
- del_list = is_last_elem;
- } else if (fm_list_itr->vsi_count == 1) {
- conv_list = true;
- del_list = true;
- }
- }
-
- if (del_list) {
- /* Remove the VSI list since it is no longer used */
- struct ice_vsi_list_map_info *vsi_list_info =
- fm_list_itr->vsi_list_info;
+ if (!list_empty(rule_head)) {
+ struct ice_fltr_mgmt_list_entry *entry;
+ struct ice_fltr_mgmt_list_entry *tmp;
- status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
- if (status)
- return status;
-
- if (conv_list) {
- u16 rem_vsi_id;
-
- rem_vsi_id = find_first_bit(vsi_list_info->vsi_map,
- ICE_MAX_VSI);
-
- /* Error out when the expected last element is not in
- * the VSI list map
- */
- if (rem_vsi_id == ICE_MAX_VSI)
- return ICE_ERR_OUT_OF_RANGE;
-
- /* Change the list entry action from VSI_LIST to VSI */
- fm_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- fm_list_itr->fltr_info.fwd_id.vsi_id = rem_vsi_id;
+ list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
+ list_del(&entry->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), entry);
}
-
- list_del(&vsi_list_info->list_entry);
- devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
- fm_list_itr->vsi_list_info = NULL;
- }
-
- if (conv_list) {
- /* Convert the rule's forward action to forwarding packets to
- * a VSI
- */
- struct ice_aqc_sw_rules_elem *s_rule;
-
- s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_ETH_HDR_SIZE,
- GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
- ice_aqc_opc_update_sw_rules);
-
- s_rule->pdata.lkup_tx_rx.index =
- cpu_to_le16(fm_list_itr->fltr_info.fltr_rule_id);
-
- status = ice_aq_sw_rules(hw, s_rule,
- ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
- ice_aqc_opc_update_sw_rules, NULL);
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- if (status)
- return status;
}
-
- if (is_last_elem) {
- /* Remove the lookup rule */
- struct ice_aqc_sw_rules_elem *s_rule;
-
- s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
- GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
- ice_aqc_opc_remove_sw_rules);
-
- status = ice_aq_sw_rules(hw, s_rule,
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
- ice_aqc_opc_remove_sw_rules, NULL);
- if (status)
- return status;
-
- /* Remove a book keeping entry from the MAC address list */
- mutex_lock(&sw->mac_list_lock);
- list_del(&fm_list_itr->list_entry);
- mutex_unlock(&sw->mac_list_lock);
- devm_kfree(ice_hw_to_dev(hw), fm_list_itr);
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- }
- return status;
-}
-
-/**
- * ice_remove_mac_entry
- * @hw: pointer to the hardware structure
- * @f_entry: structure containing MAC forwarding information
- */
-static enum ice_status
-ice_remove_mac_entry(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
-{
- struct ice_fltr_mgmt_list_entry *m_entry;
- u16 vsi_id;
- u8 *add;
-
- add = &f_entry->fltr_info.l_data.mac.mac_addr[0];
-
- m_entry = ice_find_mac_entry(hw, add);
- if (!m_entry)
- return ICE_ERR_PARAM;
-
- vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
- return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, m_entry);
}
/**
- * ice_remove_mac - remove a MAC address based filter rule
+ * ice_cfg_dflt_vsi - change state of VSI to set/clear default
* @hw: pointer to the hardware structure
- * @m_list: list of MAC addresses and forwarding information
- *
- * This function removes either a MAC filter rule or a specific VSI from a
- * VSI list for a multicast MAC address.
- *
- * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
- * ice_add_mac. Caller should be aware that this call will only work if all
- * the entries passed into m_list were added previously. It will not attempt to
- * do a partial remove of entries that were found.
- */
-enum ice_status
-ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
-{
- struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
- u8 s_rule_size = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
- struct ice_switch_info *sw = hw->switch_info;
- struct ice_fltr_mgmt_list_entry *m_entry;
- struct ice_fltr_list_entry *m_list_itr;
- u16 elem_sent, total_elem_left;
- enum ice_status status = 0;
- u16 num_unicast = 0;
-
- if (!m_list)
- return ICE_ERR_PARAM;
-
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
-
- if (is_unicast_ether_addr(addr) && !hw->ucast_shared)
- num_unicast++;
- else if (is_multicast_ether_addr(addr) ||
- (is_unicast_ether_addr(addr) && hw->ucast_shared))
- ice_remove_mac_entry(hw, m_list_itr);
- }
-
- /* Exit if no unicast addresses found. Multicast switch rules
- * were added individually
- */
- if (!num_unicast)
- return 0;
-
- /* Allocate switch rule buffer for the bulk update for unicast */
- s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
- GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
-
- if (is_unicast_ether_addr(addr)) {
- m_entry = ice_find_mac_entry(hw, addr);
- if (!m_entry) {
- status = ICE_ERR_DOES_NOT_EXIST;
- goto ice_remove_mac_exit;
- }
-
- ice_fill_sw_rule(hw, &m_entry->fltr_info,
- r_iter, ice_aqc_opc_remove_sw_rules);
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + s_rule_size);
- }
- }
-
- /* Call AQ bulk switch rule update for all unicast addresses */
- r_iter = s_rule;
- /* Call AQ switch rule in AQ_MAX chunk */
- for (total_elem_left = num_unicast; total_elem_left > 0;
- total_elem_left -= elem_sent) {
- struct ice_aqc_sw_rules_elem *entry = r_iter;
-
- elem_sent = min(total_elem_left,
- (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
- status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
- elem_sent, ice_aqc_opc_remove_sw_rules,
- NULL);
- if (status)
- break;
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + s_rule_size);
- }
-
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
-
- if (is_unicast_ether_addr(addr)) {
- m_entry = ice_find_mac_entry(hw, addr);
- if (!m_entry)
- return ICE_ERR_OUT_OF_RANGE;
- mutex_lock(&sw->mac_list_lock);
- list_del(&m_entry->list_entry);
- mutex_unlock(&sw->mac_list_lock);
- devm_kfree(ice_hw_to_dev(hw), m_entry);
- }
- }
-
-ice_remove_mac_exit:
- devm_kfree(ice_hw_to_dev(hw), s_rule);
- return status;
-}
-
-/**
- * ice_cfg_dflt_vsi - add filter rule to set/unset given VSI as default
- * VSI for the switch (represented by swid)
- * @hw: pointer to the hardware structure
- * @vsi_id: number of VSI to set as default
+ * @vsi_handle: VSI handle to set as default
* @set: true to add the above mentioned switch rule, false to remove it
* @direction: ICE_FLTR_RX or ICE_FLTR_TX
+ *
+ * add filter rule to set/unset given VSI as default VSI for the switch
+ * (represented by swid)
*/
enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
+ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
{
struct ice_aqc_sw_rules_elem *s_rule;
struct ice_fltr_info f_info;
enum ice_adminq_opc opcode;
enum ice_status status;
u16 s_rule_size;
+ u16 hw_vsi_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
@@ -1654,15 +1943,17 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
f_info.lkup_type = ICE_SW_LKUP_DFLT;
f_info.flag = direction;
f_info.fltr_act = ICE_FWD_TO_VSI;
- f_info.fwd_id.vsi_id = vsi_id;
+ f_info.fwd_id.hw_vsi_id = hw_vsi_id;
if (f_info.flag & ICE_FLTR_RX) {
f_info.src = hw->port_info->lport;
+ f_info.src_id = ICE_SRC_ID_LPORT;
if (!set)
f_info.fltr_rule_id =
hw->port_info->dflt_rx_vsi_rule_id;
} else if (f_info.flag & ICE_FLTR_TX) {
- f_info.src = vsi_id;
+ f_info.src_id = ICE_SRC_ID_VSI;
+ f_info.src = hw_vsi_id;
if (!set)
f_info.fltr_rule_id =
hw->port_info->dflt_tx_vsi_rule_id;
@@ -1682,10 +1973,10 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = vsi_id;
+ hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
hw->port_info->dflt_tx_vsi_rule_id = index;
} else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = vsi_id;
+ hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
hw->port_info->dflt_rx_vsi_rule_id = index;
}
} else {
@@ -1704,26 +1995,38 @@ out:
}
/**
- * ice_remove_vlan_internal - Remove one VLAN based filter rule
+ * ice_remove_mac - remove a MAC address based filter rule
* @hw: pointer to the hardware structure
- * @f_entry: filter entry containing one VLAN information
+ * @m_list: list of MAC addresses and forwarding information
+ *
+ * This function removes either a MAC filter rule or a specific VSI from a
+ * VSI list for a multicast MAC address.
+ *
+ * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
+ * ice_add_mac. Caller should be aware that this call will only work if all
+ * the entries passed into m_list were added previously. It will not attempt to
+ * do a partial remove of entries that were found.
*/
-static enum ice_status
-ice_remove_vlan_internal(struct ice_hw *hw,
- struct ice_fltr_list_entry *f_entry)
+enum ice_status
+ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
- struct ice_fltr_info *new_fltr;
- struct ice_fltr_mgmt_list_entry *v_list_elem;
- u16 vsi_id;
+ struct ice_fltr_list_entry *list_itr, *tmp;
- new_fltr = &f_entry->fltr_info;
-
- v_list_elem = ice_find_vlan_entry(hw, new_fltr->l_data.vlan.vlan_id);
- if (!v_list_elem)
+ if (!m_list)
return ICE_ERR_PARAM;
- vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
- return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, v_list_elem);
+ list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
+ enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_MAC)
+ return ICE_ERR_PARAM;
+ list_itr->status = ice_remove_rule_internal(hw,
+ ICE_SW_LKUP_MAC,
+ list_itr);
+ if (list_itr->status)
+ return list_itr->status;
+ }
+ return 0;
}
/**
@@ -1734,131 +2037,169 @@ ice_remove_vlan_internal(struct ice_hw *hw,
enum ice_status
ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
{
- struct ice_fltr_list_entry *v_list_itr;
- enum ice_status status = 0;
+ struct ice_fltr_list_entry *v_list_itr, *tmp;
if (!v_list || !hw)
return ICE_ERR_PARAM;
- list_for_each_entry(v_list_itr, v_list, list_entry) {
- status = ice_remove_vlan_internal(hw, v_list_itr);
- if (status) {
- v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
- return status;
- }
- v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
+ list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
+ enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_VLAN)
+ return ICE_ERR_PARAM;
+ v_list_itr->status = ice_remove_rule_internal(hw,
+ ICE_SW_LKUP_VLAN,
+ v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
}
- return status;
+ return 0;
+}
+
+/**
+ * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
+ * @fm_entry: filter entry to inspect
+ * @vsi_handle: VSI handle to compare with filter info
+ */
+static bool
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
+{
+ return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
+ fm_entry->fltr_info.vsi_handle == vsi_handle) ||
+ (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+ (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
+}
+
+/**
+ * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to remove filters from
+ * @vsi_list_head: pointer to the list to add entry to
+ * @fi: pointer to fltr_info of filter entry to copy & add
+ *
+ * Helper function, used when creating a list of filters to remove from
+ * a specific VSI. The entry added to vsi_list_head is a COPY of the
+ * original filter entry, with the exception of fltr_info.fltr_act and
+ * fltr_info.fwd_id fields. These are set such that later logic can
+ * extract which VSI to remove the fltr from, and pass on that information.
+ */
+static enum ice_status
+ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
+ struct list_head *vsi_list_head,
+ struct ice_fltr_info *fi)
+{
+ struct ice_fltr_list_entry *tmp;
+
+ /* this memory is freed up in the caller function
+ * once filters for this VSI are removed
+ */
+ tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ tmp->fltr_info = *fi;
+
+ /* Overwrite these fields to indicate which VSI to remove filter from,
+ * so find and remove logic can extract the information from the
+ * list entries. Note that original entries will still have proper
+ * values.
+ */
+ tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp->fltr_info.vsi_handle = vsi_handle;
+ tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ list_add(&tmp->list_entry, vsi_list_head);
+
+ return 0;
}
/**
* ice_add_to_vsi_fltr_list - Add VSI filters to the list
* @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
* @lkup_list_head: pointer to the list that has certain lookup type filters
- * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id
+ * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
+ *
+ * Locates all filters in lkup_list_head that are used by the given VSI,
+ * and adds COPIES of those entries to vsi_list_head (intended to be used
+ * to remove the listed filters).
+ * Note that this means all entries in vsi_list_head must be explicitly
+ * deallocated by the caller when done with list.
*/
static enum ice_status
-ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
+ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct list_head *lkup_list_head,
struct list_head *vsi_list_head)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
+ enum ice_status status = 0;
/* check to make sure VSI id is valid and within boundary */
- if (vsi_id >=
- (sizeof(fm_entry->vsi_list_info->vsi_map) * BITS_PER_BYTE - 1))
+ if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
struct ice_fltr_info *fi;
fi = &fm_entry->fltr_info;
- if ((fi->fltr_act == ICE_FWD_TO_VSI &&
- fi->fwd_id.vsi_id == vsi_id) ||
- (fi->fltr_act == ICE_FWD_TO_VSI_LIST &&
- (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map)))) {
- struct ice_fltr_list_entry *tmp;
-
- /* this memory is freed up in the caller function
- * ice_remove_vsi_lkup_fltr() once filters for
- * this VSI are removed
- */
- tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp),
- GFP_KERNEL);
- if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
+ continue;
- memcpy(&tmp->fltr_info, fi, sizeof(*fi));
-
- /* Expected below fields to be set to ICE_FWD_TO_VSI and
- * the particular VSI id since we are only removing this
- * one VSI
- */
- if (fi->fltr_act == ICE_FWD_TO_VSI_LIST) {
- tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
- tmp->fltr_info.fwd_id.vsi_id = vsi_id;
- }
-
- list_add(&tmp->list_entry, vsi_list_head);
- }
+ status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
+ vsi_list_head, fi);
+ if (status)
+ return status;
}
- return 0;
+ return status;
}
/**
* ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
* @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
* @lkup: switch rule filter lookup type
*/
static void
-ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
+ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
enum ice_sw_lkup_type lkup)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_list_entry *fm_entry;
struct list_head remove_list_head;
+ struct list_head *rule_head;
struct ice_fltr_list_entry *tmp;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status;
INIT_LIST_HEAD(&remove_list_head);
+ rule_lock = &sw->recp_list[lkup].filt_rule_lock;
+ rule_head = &sw->recp_list[lkup].filt_rules;
+ mutex_lock(rule_lock);
+ status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
+ &remove_list_head);
+ mutex_unlock(rule_lock);
+ if (status)
+ return;
+
switch (lkup) {
case ICE_SW_LKUP_MAC:
- mutex_lock(&sw->mac_list_lock);
- status = ice_add_to_vsi_fltr_list(hw, vsi_id,
- &sw->mac_list_head,
- &remove_list_head);
- mutex_unlock(&sw->mac_list_lock);
- if (!status) {
- ice_remove_mac(hw, &remove_list_head);
- goto free_fltr_list;
- }
+ ice_remove_mac(hw, &remove_list_head);
break;
case ICE_SW_LKUP_VLAN:
- mutex_lock(&sw->vlan_list_lock);
- status = ice_add_to_vsi_fltr_list(hw, vsi_id,
- &sw->vlan_list_head,
- &remove_list_head);
- mutex_unlock(&sw->vlan_list_lock);
- if (!status) {
- ice_remove_vlan(hw, &remove_list_head);
- goto free_fltr_list;
- }
+ ice_remove_vlan(hw, &remove_list_head);
break;
case ICE_SW_LKUP_MAC_VLAN:
case ICE_SW_LKUP_ETHERTYPE:
case ICE_SW_LKUP_ETHERTYPE_MAC:
case ICE_SW_LKUP_PROMISC:
- case ICE_SW_LKUP_PROMISC_VLAN:
case ICE_SW_LKUP_DFLT:
- ice_debug(hw, ICE_DBG_SW,
- "Remove filters for this lookup type hasn't been implemented yet\n");
+ case ICE_SW_LKUP_PROMISC_VLAN:
+ case ICE_SW_LKUP_LAST:
+ default:
+ ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
break;
}
- return;
-free_fltr_list:
list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
list_del(&fm_entry->list_entry);
devm_kfree(ice_hw_to_dev(hw), fm_entry);
@@ -1868,16 +2209,121 @@ free_fltr_list:
/**
* ice_remove_vsi_fltr - Remove all filters for a VSI
* @hw: pointer to the hardware structure
- * @vsi_id: ID of VSI to remove filters from
+ * @vsi_handle: VSI handle to remove filters from
+ */
+void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
+{
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
+}
+
+/**
+ * ice_replay_vsi_fltr - Replay filters for requested VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: driver VSI handle
+ * @recp_id: Recipe id for which rules need to be replayed
+ * @list_head: list for which filters need to be replayed
+ *
+ * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
+ * It is required to pass valid VSI handle.
+ */
+static enum ice_status
+ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
+ struct list_head *list_head)
+{
+ struct ice_fltr_mgmt_list_entry *itr;
+ enum ice_status status = 0;
+ u16 hw_vsi_id;
+
+ if (list_empty(list_head))
+ return status;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ list_for_each_entry(itr, list_head, list_entry) {
+ struct ice_fltr_list_entry f_entry;
+
+ f_entry.fltr_info = itr->fltr_info;
+ if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
+ itr->fltr_info.vsi_handle == vsi_handle) {
+ /* update the src in case it is vsi num */
+ if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
+ f_entry.fltr_info.src = hw_vsi_id;
+ status = ice_add_rule_internal(hw, recp_id, &f_entry);
+ if (status)
+ goto end;
+ continue;
+ }
+ if (!itr->vsi_list_info ||
+ !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
+ continue;
+ /* Clearing it so that the logic can add it back */
+ clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
+ f_entry.fltr_info.vsi_handle = vsi_handle;
+ f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ /* update the src in case it is vsi num */
+ if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
+ f_entry.fltr_info.src = hw_vsi_id;
+ if (recp_id == ICE_SW_LKUP_VLAN)
+ status = ice_add_vlan_internal(hw, &f_entry);
+ else
+ status = ice_add_rule_internal(hw, recp_id, &f_entry);
+ if (status)
+ goto end;
+ }
+end:
+ return status;
+}
+
+/**
+ * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: driver VSI handle
+ *
+ * Replays filters for requested VSI via vsi_handle.
*/
-void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id)
+enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
{
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC_VLAN);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_VLAN);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_DFLT);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC);
- ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN);
+ struct ice_switch_info *sw = hw->switch_info;
+ enum ice_status status = 0;
+ u8 i;
+
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ struct list_head *head;
+
+ head = &sw->recp_list[i].filt_replay_rules;
+ status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
+ if (status)
+ return status;
+ }
+ return status;
+}
+
+/**
+ * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
+ * @hw: pointer to the hw struct
+ *
+ * Deletes the filter replay rules.
+ */
+void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ u8 i;
+
+ if (!sw)
+ return;
+
+ for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
+ struct list_head *l_head;
+
+ l_head = &sw->recp_list[i].filt_replay_rules;
+ ice_rem_sw_rule_info(hw, l_head);
+ }
+ }
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 9b8ec128ee31..b88d96a1ef69 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -17,7 +17,9 @@ struct ice_vsi_ctx {
u16 vsis_unallocated;
u16 flags;
struct ice_aqc_vsi_props info;
+ struct ice_sched_vsi_info sched;
u8 alloc_from_pool;
+ u8 vf_num;
};
enum ice_sw_fwd_act_type {
@@ -39,6 +41,15 @@ enum ice_sw_lkup_type {
ICE_SW_LKUP_DFLT = 5,
ICE_SW_LKUP_ETHERTYPE_MAC = 8,
ICE_SW_LKUP_PROMISC_VLAN = 9,
+ ICE_SW_LKUP_LAST
+};
+
+/* type of filter src id */
+enum ice_src_id {
+ ICE_SRC_ID_UNKNOWN = 0,
+ ICE_SRC_ID_VSI,
+ ICE_SRC_ID_QUEUE,
+ ICE_SRC_ID_LPORT,
};
struct ice_fltr_info {
@@ -55,6 +66,7 @@ struct ice_fltr_info {
/* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
u16 src;
+ enum ice_src_id src_id;
union {
struct {
@@ -76,7 +88,10 @@ struct ice_fltr_info {
u16 ethertype;
u8 mac_addr[ETH_ALEN]; /* optional */
} ethertype_mac;
- } l_data;
+ } l_data; /* Make sure to zero out the memory of l_data before using
+ * it or only set the data associated with lookup match
+ * rest everything should be zero
+ */
/* Depending on filter action */
union {
@@ -84,12 +99,16 @@ struct ice_fltr_info {
* queue id in case of ICE_FWD_TO_QGRP.
*/
u16 q_id:11;
- u16 vsi_id:10;
+ u16 hw_vsi_id:10;
u16 vsi_list_id:10;
} fwd_id;
+ /* Sw VSI handle */
+ u16 vsi_handle;
+
/* Set to num_queues if action is ICE_FWD_TO_QGRP. This field
- * determines the range of queues the packet needs to be forwarded to
+ * determines the range of queues the packet needs to be forwarded to.
+ * Note that qgrp_size must be set to a power of 2.
*/
u8 qgrp_size;
@@ -98,29 +117,52 @@ struct ice_fltr_info {
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
+struct ice_sw_recipe {
+ struct list_head l_entry;
+
+ /* To protect modification of filt_rule list
+ * defined below
+ */
+ struct mutex filt_rule_lock;
+
+ /* List of type ice_fltr_mgmt_list_entry */
+ struct list_head filt_rules;
+ struct list_head filt_replay_rules;
+
+ /* linked list of type recipe_list_entry */
+ struct list_head rg_list;
+ /* linked list of type ice_sw_fv_list_entry*/
+ struct list_head fv_list;
+ struct ice_aqc_recipe_data_elem *r_buf;
+ u8 recp_count;
+ u8 root_rid;
+ u8 num_profs;
+ u8 *prof_ids;
+
+ /* recipe bitmap: what all recipes makes this recipe */
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+};
+
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
struct ice_vsi_list_map_info {
struct list_head list_entry;
DECLARE_BITMAP(vsi_map, ICE_MAX_VSI);
u16 vsi_list_id;
-};
-
-enum ice_sw_fltr_status {
- ICE_FLTR_STATUS_NEW = 0,
- ICE_FLTR_STATUS_FW_SUCCESS,
- ICE_FLTR_STATUS_FW_FAIL,
+ /* counter to track how many rules are reusing this VSI list */
+ u16 ref_cnt;
};
struct ice_fltr_list_entry {
struct list_head list_entry;
- enum ice_sw_fltr_status status;
+ enum ice_status status;
struct ice_fltr_info fltr_info;
};
/* This defines an entry in the list that maintains MAC or VLAN membership
* to HW list mapping, since multiple VSIs can subscribe to the same MAC or
* VLAN. As an optimization the VSI list should be created only when a
- * second VSI becomes a subscriber to the VLAN address.
+ * second VSI becomes a subscriber to the same MAC address. VSI lists are always
+ * used for VLAN membership.
*/
struct ice_fltr_mgmt_list_entry {
/* back pointer to VSI list id to VSI list mapping */
@@ -138,24 +180,33 @@ struct ice_fltr_mgmt_list_entry {
/* VSI related commands */
enum ice_status
-ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
- struct ice_sq_cd *cd);
+ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
enum ice_status
-ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
- struct ice_sq_cd *cd);
+ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd);
enum ice_status
-ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
- bool keep_vsi_alloc, struct ice_sq_cd *cd);
-
+ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
+struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
/* Switch/bridge related commands */
+enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
-void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id);
+void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction);
+ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+
+enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
+u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
+
+enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
+void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 6481e3d86374..5dae968d853e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -251,6 +251,7 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+ tx_ring->tx_stats.prev_pkt = -1;
return 0;
err:
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 31bc998fe200..1d0f58bd389b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -71,6 +71,7 @@ struct ice_txq_stats {
u64 restart_q;
u64 tx_busy;
u64 tx_linearize;
+ int prev_pkt; /* negative if no pending Tx descriptors */
};
struct ice_rxq_stats {
@@ -103,10 +104,17 @@ enum ice_rx_dtype {
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
-#define ICE_ITR_8K 0x003E
+#define ICE_ITR_8K 125
+#define ICE_ITR_20K 50
+#define ICE_DFLT_TX_ITR ICE_ITR_20K
+#define ICE_DFLT_RX_ITR ICE_ITR_20K
+/* apply ITR granularity translation to program the register. itr_gran is either
+ * 2 or 4 usecs so we need to divide by 2 first then shift by that value
+ */
+#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> \
+ ((itr_gran) / 2))
-/* apply ITR HW granularity translation to program the HW registers */
-#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))
+#define ICE_DFLT_INTRL 0
/* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED 0
@@ -128,14 +136,6 @@ struct ice_ring {
u16 q_index; /* Queue number of ring */
u32 txq_teid; /* Added Tx queue TEID */
- /* high bit set means dynamic, use accessor routines to read/write.
- * hardware supports 2us/1us resolution for the ITR registers.
- * these values always store the USER setting, and must be converted
- * before programming to a register.
- */
- u16 rx_itr_setting;
- u16 tx_itr_setting;
-
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -172,6 +172,7 @@ struct ice_ring_container {
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */
enum ice_latency_range latency_range;
+ int itr_idx; /* index in the interrupt vector */
u16 itr;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 97c366e0ca59..12f9432abf11 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -18,6 +18,9 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
return test_bit(tc, (unsigned long *)&bitmap);
}
+/* Driver always calls main vsi_handle first */
+#define ICE_MAIN_VSI_HANDLE 0
+
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1)
#define ICE_DBG_LINK BIT_ULL(4)
@@ -34,10 +37,15 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
enum ice_aq_res_ids {
ICE_NVM_RES_ID = 1,
ICE_SPD_RES_ID,
- ICE_GLOBAL_CFG_LOCK_RES_ID,
- ICE_CHANGE_LOCK_RES_ID
+ ICE_CHANGE_LOCK_RES_ID,
+ ICE_GLOBAL_CFG_LOCK_RES_ID
};
+/* FW update timeout definitions are in milliseconds */
+#define ICE_NVM_TIMEOUT 180000
+#define ICE_CHANGE_LOCK_TIMEOUT 1000
+#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+
enum ice_aq_res_access_type {
ICE_RES_READ = 1,
ICE_RES_WRITE
@@ -76,6 +84,7 @@ enum ice_media_type {
enum ice_vsi_type {
ICE_VSI_PF = 0,
+ ICE_VSI_VF,
};
struct ice_link_status {
@@ -95,6 +104,15 @@ struct ice_link_status {
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
};
+/* Different reset sources for which a disable queue AQ call has to be made in
+ * order to clean the TX scheduler as a part of the reset
+ */
+enum ice_disq_rst_src {
+ ICE_NO_RESET = 0,
+ ICE_VM_RESET,
+ ICE_VF_RESET,
+};
+
/* PHY info such as phy_type, etc... */
struct ice_phy_info {
struct ice_link_status link_info;
@@ -119,6 +137,9 @@ struct ice_hw_common_caps {
/* Max MTU for function or device */
u16 max_mtu;
+ /* Virtualization support */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+
/* RSS related capabilities */
u16 rss_table_size; /* 512 for PFs and 64 for VFs */
u8 rss_table_entry_width; /* RSS Entry width in bits */
@@ -127,12 +148,15 @@ struct ice_hw_common_caps {
/* Function specific capabilities */
struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
u32 guaranteed_num_vsi;
};
/* Device wide capabilities */
struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
};
@@ -142,11 +166,18 @@ struct ice_mac_info {
u8 perm_addr[ETH_ALEN];
};
-/* Various RESET request, These are not tied with HW reset types */
+/* Reset types used to determine which kind of reset was requested. These
+ * defines match what the RESET_TYPE field of the GLGEN_RSTAT register.
+ * ICE_RESET_PFR does not match any RESET_TYPE field in the GLGEN_RSTAT register
+ * because its reset source is different than the other types listed.
+ */
enum ice_reset_req {
- ICE_RESET_PFR = 0,
+ ICE_RESET_POR = 0,
+ ICE_RESET_INVAL = 0,
ICE_RESET_CORER = 1,
ICE_RESET_GLOBR = 2,
+ ICE_RESET_EMPR = 3,
+ ICE_RESET_PFR = 4,
};
/* Bus parameters */
@@ -180,7 +211,7 @@ struct ice_sched_node {
struct ice_sched_node **children;
struct ice_aqc_txsched_elem_data info;
u32 agg_id; /* aggregator group id */
- u16 vsi_id;
+ u16 vsi_handle;
u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
u8 num_children;
@@ -204,6 +235,7 @@ enum ice_agg_type {
};
#define ICE_SCHED_DFLT_RL_PROF_ID 0
+#define ICE_SCHED_DFLT_BW_WT 1
/* vsi type list entry to locate corresponding vsi/ag nodes */
struct ice_sched_vsi_info {
@@ -238,8 +270,6 @@ struct ice_port_info {
struct ice_mac_info mac;
struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */
- struct ice_sched_tx_policy sched_policy;
- struct list_head vsi_info_list;
struct list_head agg_list; /* lists all aggregator */
u8 lport;
#define ICE_LPORT_MASK 0xff
@@ -247,19 +277,26 @@ struct ice_port_info {
};
struct ice_switch_info {
- /* Switch VSI lists to MAC/VLAN translation */
- struct mutex mac_list_lock; /* protect MAC list */
- struct list_head mac_list_head;
- struct mutex vlan_list_lock; /* protect VLAN list */
- struct list_head vlan_list_head;
- struct mutex eth_m_list_lock; /* protect ethtype list */
- struct list_head eth_m_list_head;
- struct mutex promisc_list_lock; /* protect promisc mode list */
- struct list_head promisc_list_head;
- struct mutex mac_vlan_list_lock; /* protect MAC-VLAN list */
- struct list_head mac_vlan_list_head;
-
struct list_head vsi_list_map_head;
+ struct ice_sw_recipe *recp_list;
+};
+
+/* FW logging configuration */
+struct ice_fw_log_evnt {
+ u8 cfg : 4; /* New event enables to configure */
+ u8 cur : 4; /* Current/active event enables */
+};
+
+struct ice_fw_log_cfg {
+ u8 cq_en : 1; /* FW logging is enabled via the control queue */
+ u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */
+ u8 actv_evnts; /* Cumulation of currently enabled log events */
+
+#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S)
+#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S)
+#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S)
+#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S)
+ struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX];
};
/* Port hardware description */
@@ -286,8 +323,11 @@ struct ice_hw {
u8 flattened_layers;
u8 max_cgds;
u8 sw_entry_point_layer;
+ u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
u8 evb_veb; /* true for VEB, false for VEPA */
+ u8 reset_ongoing; /* true if hw is in reset, false otherwise */
struct ice_bus_info bus;
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
@@ -297,6 +337,7 @@ struct ice_hw {
/* Control Queue info */
struct ice_ctl_q_info adminq;
+ struct ice_ctl_q_info mailboxq;
u8 api_branch; /* API branch version */
u8 api_maj_ver; /* API major version */
@@ -308,16 +349,27 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
- /* minimum allowed value for different speeds */
-#define ICE_ITR_GRAN_MIN_200 1
-#define ICE_ITR_GRAN_MIN_100 1
-#define ICE_ITR_GRAN_MIN_50 2
-#define ICE_ITR_GRAN_MIN_25 4
+ struct ice_fw_log_cfg fw_log;
+
+/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
+ * register. Used for determining the itr/intrl granularity during
+ * initialization.
+ */
+#define ICE_MAX_AGG_BW_200G 0x0
+#define ICE_MAX_AGG_BW_100G 0X1
+#define ICE_MAX_AGG_BW_50G 0x2
+#define ICE_MAX_AGG_BW_25G 0x3
+ /* ITR granularity for different speeds */
+#define ICE_ITR_GRAN_ABOVE_25 2
+#define ICE_ITR_GRAN_MAX_25 4
/* ITR granularity in 1 us */
- u8 itr_gran_200;
- u8 itr_gran_100;
- u8 itr_gran_50;
- u8 itr_gran_25;
+ u8 itr_gran;
+ /* INTRL granularity for different speeds */
+#define ICE_INTRL_GRAN_ABOVE_25 4
+#define ICE_INTRL_GRAN_MAX_25 8
+ /* INTRL granularity in 1 us */
+ u8 intrl_gran;
+
u8 ucast_shared; /* true if VSIs can share unicast addr */
};
@@ -391,4 +443,7 @@ struct ice_hw_port_stats {
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512
+/* Hash redirection LUT for VSI - maximum array size */
+#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
new file mode 100644
index 000000000000..c25e486706f3
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -0,0 +1,2668 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+
+/**
+ * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
+ * @pf: pointer to the PF structure
+ * @v_opcode: operation code
+ * @v_retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ */
+static void
+ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
+ enum ice_status v_retval, u8 *msg, u16 msglen)
+{
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+ /* Not all vfs are enabled so skip the ones that are not */
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ continue;
+
+ /* Ignore return value on purpose - a given VF may fail, but
+ * we need to keep going and send to all of them
+ */
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
+ msglen, NULL);
+ }
+}
+
+/**
+ * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+ int ice_link_speed, bool link_up)
+{
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+ pfe->event_data.link_event_adv.link_status = link_up;
+ /* Speed in Mbps */
+ pfe->event_data.link_event_adv.link_speed =
+ ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
+ } else {
+ pfe->event_data.link_event.link_status = link_up;
+ /* Legacy method for virtchnl link speeds */
+ pfe->event_data.link_event.link_speed =
+ (enum virtchnl_link_speed)
+ ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
+ }
+}
+
+/**
+ * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+ bool link_up)
+{
+ u16 link_speed;
+
+ if (link_up)
+ link_speed = ICE_AQ_LINK_SPEED_40GB;
+ else
+ link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
+
+ ice_set_pfe_link(vf, pfe, link_speed, link_up);
+}
+
+/**
+ * ice_vc_notify_vf_link_state - Inform a VF of link status
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ */
+static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
+{
+ struct virtchnl_pf_event pfe = { 0 };
+ struct ice_link_status *ls;
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ ls = &hw->port_info->phy.link_info;
+
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+ if (vf->link_forced)
+ ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
+ else
+ ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
+ ICE_AQ_LINK_UP);
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ sizeof(pfe), NULL);
+}
+
+/**
+ * ice_get_vf_vector - get VF interrupt vector register offset
+ * @vf_msix: number of MSIx vector per VF on a PF
+ * @vf_id: VF identifier
+ * @i: index of MSIx vector
+ */
+static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i)
+{
+ return ((i == 0) ? VFINT_DYN_CTLN(vf_id) :
+ VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1)));
+}
+
+/**
+ * ice_free_vf_res - Free a VF's resources
+ * @vf: pointer to the VF info
+ */
+static void ice_free_vf_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ int i, pf_vf_msix;
+
+ /* First, disable VF's configuration API to prevent OS from
+ * accessing the VF's VSI after it's freed or invalidated.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ /* free vsi & disconnect it from the parent uplink */
+ if (vf->lan_vsi_idx) {
+ ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+ vf->lan_vsi_idx = 0;
+ vf->lan_vsi_num = 0;
+ vf->num_mac = 0;
+ }
+
+ pf_vf_msix = pf->num_vf_msix;
+ /* Disable interrupts so that VF starts in a known state */
+ for (i = 0; i < pf_vf_msix; i++) {
+ u32 reg_idx;
+
+ reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i);
+ wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M);
+ ice_flush(&pf->hw);
+ }
+ /* reset some of the state variables keeping track of the resources */
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+}
+
+/***********************enable_vf routines*****************************/
+
+/**
+ * ice_dis_vf_mappings
+ * @vf: pointer to the VF structure
+ */
+static void ice_dis_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int first, last, v;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
+
+ first = vf->first_vector_idx;
+ last = first + pf->num_vf_msix - 1;
+ for (v = first; v <= last; v++) {
+ u32 reg;
+
+ reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
+ GLINT_VECT2FUNC_IS_PF_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Tx queues is not yet implemented\n");
+
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Rx queues is not yet implemented\n");
+}
+
+/**
+ * ice_free_vfs - Free all VFs
+ * @pf: pointer to the PF structure
+ */
+void ice_free_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int tmp, i;
+
+ if (!pf->vf)
+ return;
+
+ while (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ usleep_range(1000, 2000);
+
+ /* Avoid wait time by stopping all VFs at the same time */
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
+ continue;
+
+ /* stop rings without wait time */
+ ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+ ICE_NO_RESET, i);
+ ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
+
+ clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
+ }
+
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
+ tmp = pf->num_alloc_vfs;
+ pf->num_vf_qps = 0;
+ pf->num_alloc_vfs = 0;
+ for (i = 0; i < tmp; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
+ /* disable VF qp mappings */
+ ice_dis_vf_mappings(&pf->vf[i]);
+
+ /* Set this state so that assigned VF vectors can be
+ * reclaimed by PF for reuse in ice_vsi_release(). No
+ * need to clear this bit since pf->vf array is being
+ * freed anyways after this for loop
+ */
+ set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
+ ice_free_vf_res(&pf->vf[i]);
+ }
+ }
+
+ devm_kfree(&pf->pdev->dev, pf->vf);
+ pf->vf = NULL;
+
+ /* This check is for when the driver is unloaded while VFs are
+ * assigned. Setting the number of VFs to 0 through sysfs is caught
+ * before this function ever gets called.
+ */
+ if (!pci_vfs_assigned(pf->pdev)) {
+ int vf_id;
+
+ /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
+ * work correctly when SR-IOV gets re-enabled.
+ */
+ for (vf_id = 0; vf_id < tmp; vf_id++) {
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ }
+ }
+ clear_bit(__ICE_VF_DIS, pf->state);
+ clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+}
+
+/**
+ * ice_trigger_vf_reset - Reset a VF on HW
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
+ */
+static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
+{
+ struct ice_pf *pf = vf->pf;
+ u32 reg, reg_idx, bit_idx;
+ struct ice_hw *hw;
+ int vf_abs_id, i;
+
+ hw = &pf->hw;
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ /* Inform VF that it is no longer active, as a warning */
+ clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+ /* Disable VF's configuration API during reset. The flag is re-enabled
+ * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
+ * It's normally disabled in ice_free_vf_res(), but it's safer
+ * to do it earlier to give some time to finish to any VF config
+ * functions that may still be running at this point.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ /* In the case of a VFLR, the HW has already reset the VF and we
+ * just need to clean up, so don't hit the VFRTRIG register.
+ */
+ if (!is_vflr) {
+ /* reset VF using VPGEN_VFRTRIG reg */
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg |= VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+ }
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (vf_abs_id) / 32;
+ bit_idx = (vf_abs_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ ice_flush(hw);
+
+ wr32(hw, PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
+ for (i = 0; i < 100; i++) {
+ reg = rd32(hw, PF_PCI_CIAD);
+ if ((reg & VF_TRANS_PENDING_M) != 0)
+ dev_err(&pf->pdev->dev,
+ "VF %d PCI transactions stuck\n", vf->vf_id);
+ udelay(1);
+ }
+}
+
+/**
+ * ice_vsi_set_pvid - Set port VLAN id for the VSI
+ * @vsi: the VSI being changed
+ * @vid: the VLAN id to set as a PVID
+ */
+static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
+ ICE_AQ_VSI_PVLAN_INSERT_PVID |
+ ICE_AQ_VSI_VLAN_EMOD_STR;
+ ctxt.info.pvid = cpu_to_le16(vid);
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
+ if (status) {
+ dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ vsi->info.pvid = ctxt.info.pvid;
+ vsi->info.vlan_flags = ctxt.info.vlan_flags;
+ return 0;
+}
+
+/**
+ * ice_vsi_kill_pvid - Remove port VLAN id from the VSI
+ * @vsi: the VSI being changed
+ */
+static int ice_vsi_kill_pvid(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ if (ice_vsi_manage_vlan_stripping(vsi, false)) {
+ dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n",
+ vsi->vsi_num);
+ return -ENODEV;
+ }
+
+ vsi->info.pvid = 0;
+ return 0;
+}
+
+/**
+ * ice_vf_vsi_setup - Set up a VF VSI
+ * @pf: board private structure
+ * @pi: pointer to the port_info instance
+ * @vf_id: defines VF id to which this VSI connects.
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+static struct ice_vsi *
+ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
+}
+
+/**
+ * ice_alloc_vsi_res - Setup VF VSI and its resources
+ * @vf: pointer to the VF structure
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int ice_alloc_vsi_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ LIST_HEAD(tmp_add_list);
+ u8 broadcast[ETH_ALEN];
+ struct ice_vsi *vsi;
+ int status = 0;
+
+ vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
+
+ if (!vsi) {
+ dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
+ return -ENOMEM;
+ }
+
+ vf->lan_vsi_idx = vsi->idx;
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ /* first vector index is the VFs OICR index */
+ vf->first_vector_idx = vsi->hw_base_vector;
+ /* Since hw_base_vector holds the vector where data queue interrupts
+ * starts, increment by 1 since VFs allocated vectors include OICR intr
+ * as well.
+ */
+ vsi->hw_base_vector += 1;
+
+ /* Check if port VLAN exist before, and restore it accordingly */
+ if (vf->port_vlan_id)
+ ice_vsi_set_pvid(vsi, vf->port_vlan_id);
+
+ eth_broadcast_addr(broadcast);
+
+ status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
+ if (status)
+ goto ice_alloc_vsi_res_exit;
+
+ if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
+ status = ice_add_mac_to_list(vsi, &tmp_add_list,
+ vf->dflt_lan_addr.addr);
+ if (status)
+ goto ice_alloc_vsi_res_exit;
+ }
+
+ status = ice_add_mac(&pf->hw, &tmp_add_list);
+ if (status)
+ dev_err(&pf->pdev->dev, "could not add mac filters\n");
+
+ /* Clear this bit after VF initialization since we shouldn't reclaim
+ * and reassign interrupts for synchronous or asynchronous VFR events.
+ * We don't want to reconfigure interrupts since AVF driver doesn't
+ * expect vector assignment to be changed unless there is a request for
+ * more vectors.
+ */
+ clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
+ice_alloc_vsi_res_exit:
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ return status;
+}
+
+/**
+ * ice_alloc_vf_res - Allocate VF resources
+ * @vf: pointer to the VF structure
+ */
+static int ice_alloc_vf_res(struct ice_vf *vf)
+{
+ int status;
+
+ /* setup VF VSI and necessary resources */
+ status = ice_alloc_vsi_res(vf);
+ if (status)
+ goto ice_alloc_vf_res_exit;
+
+ if (vf->trusted)
+ set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+ /* VF is now completely initialized */
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ return status;
+
+ice_alloc_vf_res_exit:
+ ice_free_vf_res(vf);
+ return status;
+}
+
+/**
+ * ice_ena_vf_mappings
+ * @vf: pointer to the VF structure
+ *
+ * Enable VF vectors and queues allocation by writing the details into
+ * respective registers.
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int first, last, v;
+ struct ice_hw *hw;
+ int abs_vf_id;
+ u32 reg;
+
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ first = vf->first_vector_idx;
+ last = (first + pf->num_vf_msix) - 1;
+ abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ /* VF Vector allocation */
+ reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
+ ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
+ VPINT_ALLOC_VALID_M);
+ wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
+
+ /* map the interrupts to its functions */
+ for (v = first; v <= last; v++) {
+ reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+ GLINT_VECT2FUNC_VF_NUM_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ /* VF Tx queues allocation */
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id),
+ VPLAN_TXQ_MAPENA_TX_ENA_M);
+ /* set the VF PF Tx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
+ VPLAN_TX_QBASE_VFFIRSTQ_M) |
+ (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+ VPLAN_TX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Tx queues is not yet implemented\n");
+ }
+
+ /* VF Rx queues allocation */
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id),
+ VPLAN_RXQ_MAPENA_RX_ENA_M);
+ /* set the VF PF Rx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
+ VPLAN_RX_QBASE_VFFIRSTQ_M) |
+ (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+ VPLAN_RX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Scattered mode for VF Rx queues is not yet implemented\n");
+ }
+}
+
+/**
+ * ice_determine_res
+ * @pf: pointer to the PF structure
+ * @avail_res: available resources in the PF structure
+ * @max_res: maximum resources that can be given per VF
+ * @min_res: minimum resources that can be given per VF
+ *
+ * Returns non-zero value if resources (queues/vectors) are available or
+ * returns zero if PF cannot accommodate for all num_alloc_vfs.
+ */
+static int
+ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
+{
+ bool checked_min_res = false;
+ int res;
+
+ /* start by checking if PF can assign max number of resources for
+ * all num_alloc_vfs.
+ * if yes, return number per VF
+ * If no, divide by 2 and roundup, check again
+ * repeat the loop till we reach a point where even minimum resources
+ * are not available, in that case return 0
+ */
+ res = max_res;
+ while ((res >= min_res) && !checked_min_res) {
+ int num_all_res;
+
+ num_all_res = pf->num_alloc_vfs * res;
+ if (num_all_res <= avail_res)
+ return res;
+
+ if (res == min_res)
+ checked_min_res = true;
+
+ res = DIV_ROUND_UP(res, 2);
+ }
+ return 0;
+}
+
+/**
+ * ice_check_avail_res - check if vectors and queues are available
+ * @pf: pointer to the PF structure
+ *
+ * This function is where we calculate actual number of resources for VF VSIs,
+ * we don't reserve ahead of time during probe. Returns success if vectors and
+ * queues resources are available, otherwise returns error code
+ */
+static int ice_check_avail_res(struct ice_pf *pf)
+{
+ u16 num_msix, num_txq, num_rxq;
+
+ if (!pf->num_alloc_vfs)
+ return -EINVAL;
+
+ /* Grab from HW interrupts common pool
+ * Note: By the time the user decides it needs more vectors in a VF
+ * its already too late since one must decide this prior to creating the
+ * VF interface. So the best we can do is take a guess as to what the
+ * user might want.
+ *
+ * We have two policies for vector allocation:
+ * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
+ * number of NFV VFs used for NFV appliances, since this is a special
+ * case, we try to assign maximum vectors per VF (65) as much as
+ * possible, based on determine_resources algorithm.
+ * 2. if num_alloc_vfs is from 17 to 256, then its large number of
+ * regular VFs which are not used for any special purpose. Hence try to
+ * grab default interrupt vectors (5 as supported by AVF driver).
+ */
+ if (pf->num_alloc_vfs <= 16) {
+ num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+ ICE_MAX_INTR_PER_VF,
+ ICE_MIN_INTR_PER_VF);
+ } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
+ num_msix = ice_determine_res(pf, pf->num_avail_hw_msix,
+ ICE_DFLT_INTR_PER_VF,
+ ICE_MIN_INTR_PER_VF);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Number of VFs %d exceeds max VF count %d\n",
+ pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
+ return -EIO;
+ }
+
+ if (!num_msix)
+ return -EIO;
+
+ /* Grab from the common pool
+ * start by requesting Default queues (4 as supported by AVF driver),
+ * Note that, the main difference between queues and vectors is, latter
+ * can only be reserved at init time but queues can be requested by VF
+ * at runtime through Virtchnl, that is the reason we start by reserving
+ * few queues.
+ */
+ num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF,
+ ICE_MIN_QS_PER_VF);
+
+ num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF,
+ ICE_MIN_QS_PER_VF);
+
+ if (!num_txq || !num_rxq)
+ return -EIO;
+
+ /* since AVF driver works with only queue pairs which means, it expects
+ * to have equal number of Rx and Tx queues, so take the minimum of
+ * available Tx or Rx queues
+ */
+ pf->num_vf_qps = min_t(int, num_txq, num_rxq);
+ pf->num_vf_msix = num_msix;
+
+ return 0;
+}
+
+/**
+ * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
+ * @vf: pointer to the VF structure
+ *
+ * Cleanup a VF after the hardware reset is finished. Expects the caller to
+ * have verified whether the reset is finished properly, and ensure the
+ * minimum amount of wait time has passed. Reallocate VF resources back to make
+ * VF state active
+ */
+static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw;
+ u32 reg;
+
+ hw = &pf->hw;
+
+ /* PF software completes the flow by notifying VF that reset flow is
+ * completed. This is done by enabling hardware by clearing the reset
+ * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
+ * register to VFR completed (done at the end of this function)
+ * By doing this we allow HW to access VF memory at any point. If we
+ * did it any sooner, HW could access memory while it was being freed
+ * in ice_free_vf_res(), causing an IOMMU fault.
+ *
+ * On the other hand, this needs to be done ASAP, because the VF driver
+ * is waiting for this to happen and may report a timeout. It's
+ * harmless, but it gets logged into Guest OS kernel log, so best avoid
+ * it.
+ */
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+
+ /* reallocate VF resources to finish resetting the VSI state */
+ if (!ice_alloc_vf_res(vf)) {
+ ice_ena_vf_mappings(vf);
+ set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ vf->num_vlan = 0;
+ }
+
+ /* Tell the VF driver the reset is done. This needs to be done only
+ * after VF has been fully initialized, because the VF driver may
+ * request resources immediately after setting this flag.
+ */
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+}
+
+/**
+ * ice_reset_all_vfs - reset all allocated VFs in one go
+ * @pf: pointer to the PF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * First, tell the hardware to reset each VF, then do all the waiting in one
+ * chunk, and finally finish restoring each VF after the wait. This is useful
+ * during PF routines which need to reset all VFs, as otherwise it must perform
+ * these resets in a serialized fashion.
+ *
+ * Returns true if any VFs were reset, and false otherwise.
+ */
+bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
+{
+ struct ice_hw *hw = &pf->hw;
+ int v, i;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+ if (!pf->num_alloc_vfs)
+ return false;
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ return false;
+
+ /* Begin reset on all VFs at once */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_trigger_vf_reset(&pf->vf[v], is_vflr);
+
+ /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
+ * queues to inform Firmware about VF reset.
+ */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
+ ICE_VF_RESET, v, NULL);
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
+ * sequence to make sure that it has completed. We'll keep track of
+ * the VFs using a simple iterator that increments once that VF has
+ * finished resetting.
+ */
+ for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+ usleep_range(10000, 20000);
+
+ /* Check each VF in sequence */
+ while (v < pf->num_alloc_vfs) {
+ struct ice_vf *vf = &pf->vf[v];
+ u32 reg;
+
+ reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & VPGEN_VFRSTAT_VFRD_M))
+ break;
+
+ /* If the current VF has finished resetting, move on
+ * to the next VF in sequence.
+ */
+ v++;
+ }
+ }
+
+ /* Display a warning if at least one VF didn't manage to reset in
+ * time, but continue on with the operation.
+ */
+ if (v < pf->num_alloc_vfs)
+ dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
+ usleep_range(10000, 20000);
+
+ /* free VF resources to begin resetting the VSI state */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_free_vf_res(&pf->vf[v]);
+
+ if (ice_check_avail_res(pf)) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate VF resources, try with fewer number of VFs\n");
+ return false;
+ }
+
+ /* Finish the reset on each VF */
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ ice_cleanup_and_realloc_vf(&pf->vf[v]);
+
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
+
+ return true;
+}
+
+/**
+ * ice_reset_vf - Reset a particular VF
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ *
+ * Returns true if the VF is reset, false otherwise.
+ */
+static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_hw *hw = &pf->hw;
+ bool rsd = false;
+ u32 reg;
+ int i;
+
+ /* If the VFs have been disabled, this means something else is
+ * resetting the VF, so we shouldn't continue.
+ */
+ if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ return false;
+
+ ice_trigger_vf_reset(vf, is_vflr);
+
+ if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET,
+ vf->vf_id);
+ ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]);
+ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+ } else {
+ /* Call Disable LAN Tx queue AQ call even when queues are not
+ * enabled. This is needed for successful completiom of VFR
+ */
+ ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0,
+ NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL);
+ }
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 10; i++) {
+ /* VF reset requires driver to first reset the VF and then
+ * poll the status register to make sure that the reset
+ * completed successfully.
+ */
+ usleep_range(10000, 20000);
+ reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & VPGEN_VFRSTAT_VFRD_M) {
+ rsd = true;
+ break;
+ }
+ }
+
+ /* Display a warning if VF didn't manage to reset in time, but need to
+ * continue on with the operation.
+ */
+ if (!rsd)
+ dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ vf->vf_id);
+
+ usleep_range(10000, 20000);
+
+ /* free VF resources to begin resetting the VSI state */
+ ice_free_vf_res(vf);
+
+ ice_cleanup_and_realloc_vf(vf);
+
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
+
+ return true;
+}
+
+/**
+ * ice_vc_notify_link_state - Inform all VFs on a PF of link status
+ * @pf: pointer to the PF structure
+ */
+void ice_vc_notify_link_state(struct ice_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_vc_notify_vf_link_state(&pf->vf[i]);
+}
+
+/**
+ * ice_vc_notify_reset - Send pending reset message to all VFs
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ */
+void ice_vc_notify_reset(struct ice_pf *pf)
+{
+ struct virtchnl_pf_event pfe;
+
+ if (!pf->num_alloc_vfs)
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS,
+ (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
+}
+
+/**
+ * ice_vc_notify_vf_reset - Notify VF of a reset event
+ * @vf: pointer to the VF structure
+ */
+static void ice_vc_notify_vf_reset(struct ice_vf *vf)
+{
+ struct virtchnl_pf_event pfe;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return;
+
+ /* verify if the VF is in either init or active before proceeding */
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0,
+ (u8 *)&pfe, sizeof(pfe), NULL);
+}
+
+/**
+ * ice_alloc_vfs - Allocate and set up VFs resources
+ * @pf: pointer to the PF structure
+ * @num_alloc_vfs: number of VFs to allocate
+ */
+static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
+{
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vfs;
+ int i, ret;
+
+ /* Disable global interrupt 0 so we don't try to handle the VFLR. */
+ wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
+ ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+
+ ice_flush(hw);
+
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ pf->num_alloc_vfs = 0;
+ goto err_unroll_intr;
+ }
+ /* allocate memory */
+ vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
+ GFP_KERNEL);
+ if (!vfs) {
+ ret = -ENOMEM;
+ goto err_unroll_sriov;
+ }
+ pf->vf = vfs;
+
+ /* apply default profile */
+ for (i = 0; i < num_alloc_vfs; i++) {
+ vfs[i].pf = pf;
+ vfs[i].vf_sw_id = pf->first_sw;
+ vfs[i].vf_id = i;
+
+ /* assign default capabilities */
+ set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+ vfs[i].spoofchk = true;
+
+ /* Set this state so that PF driver does VF vector assignment */
+ set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
+ }
+ pf->num_alloc_vfs = num_alloc_vfs;
+
+ /* VF resources get allocated during reset */
+ if (!ice_reset_all_vfs(pf, false))
+ goto err_unroll_sriov;
+
+ goto err_unroll_intr;
+
+err_unroll_sriov:
+ pci_disable_sriov(pf->pdev);
+err_unroll_intr:
+ /* rearm interrupts here */
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+ return ret;
+}
+
+/**
+ * ice_pf_state_is_nominal - checks the pf for nominal state
+ * @pf: pointer to pf to check
+ *
+ * Check the PF's state for a collection of bits that would indicate
+ * the PF is in a state that would inhibit normal operation for
+ * driver functionality.
+ *
+ * Returns true if PF is in a nominal state.
+ * Returns false otherwise
+ */
+static bool ice_pf_state_is_nominal(struct ice_pf *pf)
+{
+ DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
+
+ if (!pf)
+ return false;
+
+ bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
+ if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_pci_sriov_ena - Enable or change number of VFs
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to allocate
+ */
+static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
+{
+ int pre_existing_vfs = pci_num_vf(pf->pdev);
+ struct device *dev = &pf->pdev->dev;
+ int err;
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+ return -EBUSY;
+ }
+
+ if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+ dev_err(dev, "This device is not capable of SR-IOV\n");
+ return -ENODEV;
+ }
+
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ ice_free_vfs(pf);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ return num_vfs;
+
+ if (num_vfs > pf->num_vfs_supported) {
+ dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
+ num_vfs, pf->num_vfs_supported);
+ return -ENOTSUPP;
+ }
+
+ dev_info(dev, "Allocating %d VFs\n", num_vfs);
+ err = ice_alloc_vfs(pf, num_vfs);
+ if (err) {
+ dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
+ return err;
+ }
+
+ set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+ return num_vfs;
+}
+
+/**
+ * ice_sriov_configure - Enable or change number of VFs via sysfs
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * This function is called when the user updates the number of VFs in sysfs.
+ */
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (num_vfs)
+ return ice_pci_sriov_ena(pf, num_vfs);
+
+ if (!pci_vfs_assigned(pdev)) {
+ ice_free_vfs(pf);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_process_vflr_event - Free VF resources via IRQ calls
+ * @pf: pointer to the PF structure
+ *
+ * called from the VLFR IRQ handler to
+ * free up VF resources and state variables
+ */
+void ice_process_vflr_event(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int vf_id;
+ u32 reg;
+
+ if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+ !pf->num_alloc_vfs)
+ return;
+
+ /* Re-enable the VFLR interrupt cause here, before looking for which
+ * VF got reset. Otherwise, if another VF gets a reset while the
+ * first one is being processed, that interrupt will be lost, and
+ * that VF will be stuck in reset forever.
+ */
+ reg = rd32(hw, PFINT_OICR_ENA);
+ reg |= PFINT_OICR_VFLR_M;
+ wr32(hw, PFINT_OICR_ENA, reg);
+ ice_flush(hw);
+
+ clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+ struct ice_vf *vf = &pf->vf[vf_id];
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
+ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
+ if (reg & BIT(bit_idx))
+ /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+ ice_reset_vf(vf, true);
+ }
+}
+
+/**
+ * ice_vc_dis_vf - Disable a given VF via SW reset
+ * @vf: pointer to the VF info
+ *
+ * Disable the VF through a SW reset
+ */
+static void ice_vc_dis_vf(struct ice_vf *vf)
+{
+ ice_vc_notify_vf_reset(vf);
+ ice_reset_vf(vf, false);
+}
+
+/**
+ * ice_vc_send_msg_to_vf - Send message to VF
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ */
+static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum ice_status v_retval, u8 *msg, u16 msglen)
+{
+ enum ice_status aq_ret;
+ struct ice_pf *pf;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return -EINVAL;
+
+ pf = vf->pf;
+
+ /* single place to detect unsuccessful return values */
+ if (v_retval) {
+ vf->num_inval_msgs++;
+ dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
+ vf->vf_id, v_opcode, v_retval);
+ if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
+ dev_err(&pf->pdev->dev,
+ "Number of invalid messages exceeded for VF %d\n",
+ vf->vf_id);
+ dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ return -EIO;
+ }
+ } else {
+ vf->num_valid_msgs++;
+ /* reset the invalid counter, if a valid message is received. */
+ vf->num_inval_msgs = 0;
+ }
+
+ aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "Unable to send the message to VF %d aq_err %d\n",
+ vf->vf_id, pf->hw.mailboxq.sq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_get_ver_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request the API version used by the PF
+ */
+static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_version_info info = {
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
+ };
+
+ vf->vf_ver = *(struct virtchnl_version_info *)msg;
+ /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+ if (VF_IS_V10(&vf->vf_ver))
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS,
+ (u8 *)&info,
+ sizeof(struct virtchnl_version_info));
+}
+
+/**
+ * ice_vc_get_vf_res_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request its resources
+ */
+static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vf_resource *vfres = NULL;
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int len = 0;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_vf_resource);
+
+ vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
+ if (!vfres) {
+ aq_ret = ICE_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+ if (VF_IS_V11(&vf->vf_ver))
+ vf->driver_caps = *(u32 *)msg;
+ else
+ vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi->info.pvid)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
+ } else {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ else
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ }
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+
+ vfres->num_vsis = 1;
+ /* Tx and Rx queue are equal for VF */
+ vfres->num_queue_pairs = vsi->num_txq;
+ vfres->max_vectors = pf->num_vf_msix;
+ vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+ vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+ vf->dflt_lan_addr.addr);
+
+ set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret,
+ (u8 *)vfres, len);
+
+ devm_kfree(&pf->pdev->dev, vfres);
+ return ret;
+}
+
+/**
+ * ice_vc_reset_vf_msg
+ * @vf: pointer to the VF info
+ *
+ * called from the VF to reset itself,
+ * unlike other virtchnl messages, PF driver
+ * doesn't send the response back to the VF
+ */
+static void ice_vc_reset_vf_msg(struct ice_vf *vf)
+{
+ if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ ice_reset_vf(vf, false);
+}
+
+/**
+ * ice_find_vsi_from_id
+ * @pf: the pf structure to search for the VSI
+ * @id: id of the VSI it is searching for
+ *
+ * searches for the VSI with the given id
+ */
+static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
+ return pf->vsi[i];
+
+ return NULL;
+}
+
+/**
+ * ice_vc_isvalid_vsi_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI id
+ *
+ * check for the valid VSI id
+ */
+static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_find_vsi_from_id(pf, vsi_id);
+
+ return (vsi && (vsi->vf_id == vf->vf_id));
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI id
+ * @qid: VSI relative queue id
+ *
+ * check for the valid queue id
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+{
+ struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+ /* allocated Tx and Rx queues should be always equal for VF VSI */
+ return (vsi && (qid < vsi->alloc_txq));
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ struct ice_vsi *vsi = NULL;
+ enum ice_status aq_ret;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ ret = ice_set_rss(vsi, vrk->key, NULL, 0);
+ aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ struct ice_vsi *vsi = NULL;
+ enum ice_status aq_ret;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE);
+ aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_get_stats_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to get VSI stats
+ */
+static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_eth_stats stats;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ memset(&stats, 0, sizeof(struct ice_eth_stats));
+ ice_update_eth_stats(vsi);
+
+ stats = vsi->eth_stats;
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
+ (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!vqs->rx_queues && !vqs->tx_queues) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable only Rx rings, Tx rings were enabled by the FW when the
+ * Tx queue group list was configured and the context bits were
+ * programmed using ice_vsi_cfg_txqs
+ */
+ if (ice_vsi_start_rx_rings(vsi))
+ aq_ret = ICE_ERR_PARAM;
+
+ /* Set flag to indicate that queues are enabled */
+ if (!aq_ret)
+ set_bit(ICE_VF_STATE_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific
+ * queue(s)
+ */
+static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!vqs->rx_queues && !vqs->tx_queues) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to stop tx rings on VSI %d\n",
+ vsi->vsi_num);
+ aq_ret = ICE_ERR_PARAM;
+ }
+
+ if (ice_vsi_stop_rx_rings(vsi)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to stop rx rings on VSI %d\n",
+ vsi->vsi_num);
+ aq_ret = ICE_ERR_PARAM;
+ }
+
+ /* Clear enabled queues flag */
+ if (!aq_ret)
+ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_irq_map_info *irqmap_info =
+ (struct virtchnl_irq_map_info *)msg;
+ u16 vsi_id, vsi_q_id, vector_id;
+ struct virtchnl_vector_map *map;
+ struct ice_vsi *vsi = NULL;
+ struct ice_pf *pf = vf->pf;
+ enum ice_status aq_ret = 0;
+ unsigned long qmap;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < irqmap_info->num_vectors; i++) {
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* validate msg params */
+ if (!(vector_id < pf->hw.func_caps.common_cap
+ .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
+ struct ice_q_vector *q_vector;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ q_vector = vsi->q_vectors[i];
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
+ struct ice_q_vector *q_vector;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ q_vector = vsi->q_vectors[i];
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ }
+ }
+
+ if (vsi)
+ ice_vsi_cfg_msix(vsi);
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ enum ice_status aq_ret = 0;
+ struct ice_vsi *vsi;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != qpi->txq.queue_id ||
+ !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ /* copy Tx queue info from VF into VSI */
+ vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[i]->count = qpi->txq.ring_len;
+ /* copy Rx queue info from VF into vsi */
+ vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+ vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+ if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ vsi->rx_buf_len = qpi->rxq.databuffer_size;
+ if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
+ qpi->rxq.max_pkt_size < 64) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+ vsi->max_frame = qpi->rxq.max_pkt_size;
+ }
+
+ /* VF can request to configure less than allocated queues
+ * or default allocated queues. So update the VSI with new number
+ */
+ vsi->num_txq = qci->num_queue_pairs;
+ vsi->num_rxq = qci->num_queue_pairs;
+
+ if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
+ aq_ret = 0;
+ else
+ aq_ret = ICE_ERR_PARAM;
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_is_vf_trusted
+ * @vf: pointer to the VF info
+ */
+static bool ice_is_vf_trusted(struct ice_vf *vf)
+{
+ return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool ice_can_vf_change_mac(struct ice_vf *vf)
+{
+ /* If the VF MAC address has been set administratively (via the
+ * ndo_set_vf_mac command), then deny permission to the VF to
+ * add/delete unicast MAC addresses, unless the VF is trusted
+ */
+ if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_handle_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @set: true if mac filters are being set, false otherwise
+ *
+ * add guest mac address filter
+ */
+static int
+ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
+{
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct ice_pf *pf = vf->pf;
+ enum virtchnl_ops vc_op;
+ enum ice_status ret;
+ LIST_HEAD(mac_list);
+ struct ice_vsi *vsi;
+ int mac_count = 0;
+ int i;
+
+ if (set)
+ vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
+ else
+ vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ if (set && !ice_is_vf_trusted(vf) &&
+ (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n");
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ for (i = 0; i < al->num_elements; i++) {
+ u8 *maddr = al->list[i].addr;
+
+ if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
+ is_broadcast_ether_addr(maddr)) {
+ if (set) {
+ /* VF is trying to add filters that the PF
+ * already added. Just continue.
+ */
+ dev_info(&pf->pdev->dev,
+ "mac %pM already set for VF %d\n",
+ maddr, vf->vf_id);
+ continue;
+ } else {
+ /* VF can't remove dflt_lan_addr/bcast mac */
+ dev_err(&pf->pdev->dev,
+ "can't remove mac %pM for VF %d\n",
+ maddr, vf->vf_id);
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+ }
+
+ /* check for the invalid cases and bail if necessary */
+ if (is_zero_ether_addr(maddr)) {
+ dev_err(&pf->pdev->dev,
+ "invalid mac %pM provided for VF %d\n",
+ maddr, vf->vf_id);
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ if (is_unicast_ether_addr(maddr) &&
+ !ice_can_vf_change_mac(vf)) {
+ dev_err(&pf->pdev->dev,
+ "can't change unicast mac for untrusted VF %d\n",
+ vf->vf_id);
+ ret = ICE_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ /* get here if maddr is multicast or if VF can change mac */
+ if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
+ ret = ICE_ERR_NO_MEMORY;
+ goto handle_mac_exit;
+ }
+ mac_count++;
+ }
+
+ /* program the updated filter list */
+ if (set)
+ ret = ice_add_mac(&pf->hw, &mac_list);
+ else
+ ret = ice_remove_mac(&pf->hw, &mac_list);
+
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "can't update mac filters for VF %d, error %d\n",
+ vf->vf_id, ret);
+ } else {
+ if (set)
+ vf->num_mac += mac_count;
+ else
+ vf->num_mac -= mac_count;
+ }
+
+handle_mac_exit:
+ ice_free_fltr_list(&pf->pdev->dev, &mac_list);
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0);
+}
+
+/**
+ * ice_vc_add_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * add guest MAC address filter
+ */
+static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_handle_mac_addr_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_del_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove guest MAC address filter
+ */
+static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_handle_mac_addr_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ int req_queues = vfres->num_queue_pairs;
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ int tx_rx_queue_left;
+ int cur_queues;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ cur_queues = pf->num_vf_qps;
+ tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ if (req_queues <= 0) {
+ dev_err(&pf->pdev->dev,
+ "VF %d tried to request %d queues. Ignoring.\n",
+ vf->vf_id, req_queues);
+ } else if (req_queues > ICE_MAX_QS_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "VF %d tried to request more than %d queues.\n",
+ vf->vf_id, ICE_MAX_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_QS_PER_VF;
+ } else if (req_queues - cur_queues > tx_rx_queue_left) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but only %d left.\n",
+ vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+ vfres->num_queue_pairs = tx_rx_queue_left + cur_queues;
+ } else {
+ /* request is successful, then reset VF */
+ vf->num_req_qs = req_queues;
+ ice_vc_dis_vf(vf);
+ dev_info(&pf->pdev->dev,
+ "VF %d granted request of %d queues.\n",
+ vf->vf_id, req_queues);
+ return 0;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+ aq_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
+/**
+ * ice_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: VLAN id being set
+ * @qos: priority setting
+ * @vlan_proto: VLAN protocol
+ *
+ * program VF Port VLAN id and/or qos
+ */
+int
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto)
+{
+ u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ if (vlan_id > ICE_MAX_VLANID || qos > 7) {
+ dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+ return -EINVAL;
+ }
+
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+ return -EPROTONOSUPPORT;
+ }
+
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
+ /* duplicate request, so just return success */
+ dev_info(&pf->pdev->dev,
+ "Duplicate pvid %d request\n", vlanprio);
+ return ret;
+ }
+
+ /* If pvid, then remove all filters on the old VLAN */
+ if (vsi->info.pvid)
+ ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
+ VLAN_VID_MASK));
+
+ if (vlan_id || qos) {
+ ret = ice_vsi_set_pvid(vsi, vlanprio);
+ if (ret)
+ goto error_set_pvid;
+ } else {
+ ice_vsi_kill_pvid(vsi);
+ }
+
+ if (vlan_id) {
+ dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan_id, qos, vf_id);
+
+ /* add new VLAN filter for each MAC */
+ ret = ice_vsi_add_vlan(vsi, vlan_id);
+ if (ret)
+ goto error_set_pvid;
+ }
+
+ /* The Port VLAN needs to be saved across resets the same as the
+ * default LAN MAC address.
+ */
+ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+
+error_set_pvid:
+ return ret;
+}
+
+/**
+ * ice_vc_process_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @add_v: Add VLAN if true, otherwise delete VLAN
+ *
+ * Process virtchnl op to add or remove programmed guest VLAN id
+ */
+static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
+{
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add_v && !ice_is_vf_trusted(vf) &&
+ vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ dev_info(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n");
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
+ aq_ret = ICE_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+ goto error_param;
+ }
+ }
+
+ vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id);
+ if (!vsi) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vsi->info.pvid) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
+ dev_err(&pf->pdev->dev,
+ "%sable VLAN stripping failed for VSI %i\n",
+ add_v ? "en" : "dis", vsi->vsi_num);
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add_v) {
+ for (i = 0; i < vfl->num_elements; i++) {
+ u16 vid = vfl->vlan_id[i];
+
+ if (!ice_vsi_add_vlan(vsi, vid)) {
+ vf->num_vlan++;
+ set_bit(vid, vsi->active_vlans);
+
+ /* Enable VLAN pruning when VLAN 0 is added */
+ if (unlikely(!vid))
+ if (ice_cfg_vlan_pruning(vsi, true))
+ aq_ret = ICE_ERR_PARAM;
+ } else {
+ aq_ret = ICE_ERR_PARAM;
+ }
+ }
+ } else {
+ for (i = 0; i < vfl->num_elements; i++) {
+ u16 vid = vfl->vlan_id[i];
+
+ /* Make sure ice_vsi_kill_vlan is successful before
+ * updating VLAN information
+ */
+ if (!ice_vsi_kill_vlan(vsi, vid)) {
+ vf->num_vlan--;
+ clear_bit(vid, vsi->active_vlans);
+
+ /* Disable VLAN pruning when removing VLAN 0 */
+ if (unlikely(!vid))
+ ice_cfg_vlan_pruning(vsi, false);
+ }
+ }
+ }
+
+error_param:
+ /* send the response to the VF */
+ if (add_v)
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret,
+ NULL, 0);
+ else
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_add_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Add and program guest VLAN id
+ */
+static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_process_vlan_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_remove_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove programmed guest VLAN id
+ */
+static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_process_vlan_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_ena_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Enable VLAN header stripping for a given VF
+ */
+static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
+{
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vsi_manage_vlan_stripping(vsi, true))
+ aq_ret = ICE_ERR_AQ_ERROR;
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ aq_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Disable VLAN header stripping for a given VF
+ */
+static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
+{
+ enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vsi_manage_vlan_stripping(vsi, false))
+ aq_ret = ICE_ERR_AQ_ERROR;
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ aq_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_process_vf_msg - Process request from VF
+ * @pf: pointer to the PF structure
+ * @event: pointer to the AQ event
+ *
+ * called from the common asq/arq handler to
+ * process request from VF
+ */
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
+ s16 vf_id = le16_to_cpu(event->desc.retval);
+ u16 msglen = event->msg_len;
+ u8 *msg = event->msg_buf;
+ struct ice_vf *vf = NULL;
+ int err = 0;
+
+ if (vf_id >= pf->num_alloc_vfs) {
+ err = -EINVAL;
+ goto error_handler;
+ }
+
+ vf = &pf->vf[vf_id];
+
+ /* Check if VF is disabled. */
+ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
+ err = -EPERM;
+ goto error_handler;
+ }
+
+ /* Perform basic checks on the msg */
+ err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+ if (err) {
+ if (err == VIRTCHNL_ERR_PARAM)
+ err = -EPERM;
+ else
+ err = -EINVAL;
+ goto error_handler;
+ }
+
+ /* Perform additional checks specific to RSS and Virtchnl */
+ if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
+ struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE)
+ err = -EINVAL;
+ } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE)
+ err = -EINVAL;
+ }
+
+error_handler:
+ if (err) {
+ ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0);
+ dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
+ vf_id, v_opcode, msglen, err);
+ return;
+ }
+
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ err = ice_vc_get_ver_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ err = ice_vc_get_vf_res_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ ice_vc_reset_vf_msg(vf);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ err = ice_vc_add_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ err = ice_vc_del_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ err = ice_vc_cfg_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ err = ice_vc_ena_qs_msg(vf, msg);
+ ice_vc_notify_vf_link_state(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ err = ice_vc_dis_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ err = ice_vc_request_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ err = ice_vc_cfg_irq_map_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ err = ice_vc_config_rss_key(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ err = ice_vc_config_rss_lut(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ err = ice_vc_get_stats_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ err = ice_vc_add_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN:
+ err = ice_vc_remove_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ err = ice_vc_ena_vlan_stripping(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ err = ice_vc_dis_vlan_stripping(vf);
+ break;
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
+ v_opcode, vf_id);
+ err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL,
+ NULL, 0);
+ break;
+ }
+ if (err) {
+ /* Helper function cares less about error return values here
+ * as it is busy with pending work.
+ */
+ dev_info(&pf->pdev->dev,
+ "PF failed to honor VF %d, opcode %d\n, error %d\n",
+ vf_id, v_opcode, err);
+ }
+}
+
+/**
+ * ice_get_vf_cfg
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
+ *
+ * return VF configuration
+ */
+int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
+ struct ifla_vf_info *ivi)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ ivi->vf = vf_id;
+ ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
+
+ /* VF configuration for VLAN and applicable QoS */
+ ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
+ ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
+ ICE_VLAN_PRIORITY_S;
+
+ ivi->trusted = vf->trusted;
+ ivi->spoofchk = vf->spoofchk;
+ if (!vf->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->max_tx_rate = vf->tx_rate;
+ ivi->min_tx_rate = 0;
+ return 0;
+}
+
+/**
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ */
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_ctx ctx = { 0 };
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+ int status;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ if (ena == vf->spoofchk) {
+ dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
+ ena ? "ON" : "OFF");
+ return 0;
+ }
+
+ ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+ if (ena) {
+ ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
+ }
+
+ status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL);
+ if (status) {
+ dev_dbg(&pf->pdev->dev,
+ "Error %d, failed to update VSI* parameters\n", status);
+ return -EIO;
+ }
+
+ vf->spoofchk = ena;
+ vsi->info.sec_flags = ctx.info.sec_flags;
+ vsi->info.sw_flags2 = ctx.info.sw_flags2;
+
+ return status;
+}
+
+/**
+ * ice_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: mac address
+ *
+ * program VF mac address
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ netdev_err(netdev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
+ netdev_err(netdev, "%pM not a valid unicast address\n", mac);
+ return -EINVAL;
+ }
+
+ /* copy mac into dflt_lan_addr and trigger a VF reset. The reset
+ * flow will use the updated dflt_lan_addr and add a MAC filter
+ * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
+ * set the MAC address for this VF.
+ */
+ ether_addr_copy(vf->dflt_lan_addr.addr, mac);
+ vf->pf_set_mac = true;
+ netdev_info(netdev,
+ "mac on VF %d set to %pM\n. VF driver will be reinitialized\n",
+ vf_id, mac);
+
+ ice_vc_dis_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_trust
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @trusted: Boolean value to enable/disable trusted VF
+ *
+ * Enable or disable a given VF as trusted
+ */
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ /* Check if already trusted */
+ if (trusted == vf->trusted)
+ return 0;
+
+ vf->trusted = trusted;
+ ice_vc_dis_vf(vf);
+ dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+ vf_id, trusted ? "" : "un");
+
+ return 0;
+}
+
+/**
+ * ice_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link_state: required link state
+ *
+ * Set VF's link state, irrespective of physical link state status
+ */
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct virtchnl_pf_event pfe = { 0 };
+ struct ice_link_status *ls;
+ struct ice_vf *vf;
+ struct ice_hw *hw;
+
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+ hw = &pf->hw;
+ ls = &pf->hw.port_info->phy.link_info;
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
+ return -EBUSY;
+ }
+
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+ switch (link_state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
+ vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (vf->link_forced)
+ ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
+ else
+ ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
+
+ /* Notify the VF of its new link state */
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ sizeof(pfe), NULL);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
new file mode 100644
index 000000000000..10131e0180f9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_PF_H_
+#define _ICE_VIRTCHNL_PF_H_
+#include "ice.h"
+
+#define ICE_MAX_VLANID 4095
+#define ICE_VLAN_PRIORITY_S 12
+#define ICE_VLAN_M 0xFFF
+#define ICE_PRIORITY_M 0x7000
+
+/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
+#define ICE_MAX_VLAN_PER_VF 8
+#define ICE_MAX_MACADDR_PER_VF 12
+
+/* Malicious Driver Detection */
+#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3
+#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
+
+/* Static VF transaction/status register def */
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_M 0x20
+
+/* Specific VF states */
+enum ice_vf_states {
+ ICE_VF_STATE_INIT = 0,
+ ICE_VF_STATE_ACTIVE,
+ ICE_VF_STATE_ENA,
+ ICE_VF_STATE_DIS,
+ ICE_VF_STATE_MC_PROMISC,
+ ICE_VF_STATE_UC_PROMISC,
+ /* state to indicate if PF needs to do vector assignment for VF.
+ * This needs to be set during first time VF initialization or later
+ * when VF asks for more Vectors through virtchnl OP.
+ */
+ ICE_VF_STATE_CFG_INTR,
+ ICE_VF_STATES_NBITS
+};
+
+/* VF capabilities */
+enum ice_virtchnl_cap {
+ ICE_VIRTCHNL_VF_CAP_L2 = 0,
+ ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
+};
+
+/* VF information structure */
+struct ice_vf {
+ struct ice_pf *pf;
+
+ s16 vf_id; /* VF id in the PF space */
+ u32 driver_caps; /* reported by VF driver */
+ int first_vector_idx; /* first vector index of this VF */
+ struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */
+ struct virtchnl_version_info vf_ver;
+ struct virtchnl_ether_addr dflt_lan_addr;
+ u16 port_vlan_id;
+ u8 pf_set_mac; /* VF MAC address set by VMM admin */
+ u8 trusted;
+ u16 lan_vsi_idx; /* index into PF struct */
+ u16 lan_vsi_num; /* ID as used by firmware */
+ u64 num_mdd_events; /* number of mdd events detected */
+ u64 num_inval_msgs; /* number of continuous invalid msgs */
+ u64 num_valid_msgs; /* number of valid msgs detected */
+ unsigned long vf_caps; /* vf's adv. capabilities */
+ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
+ u8 link_forced;
+ u8 link_up; /* only valid if VF link is forced */
+ u8 spoofchk;
+ u16 num_mac;
+ u16 num_vlan;
+ u8 num_req_qs; /* num of queue pairs requested by VF */
+};
+
+#ifdef CONFIG_PCI_IOV
+void ice_process_vflr_event(struct ice_pf *pf);
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
+ struct ifla_vf_info *ivi);
+
+void ice_free_vfs(struct ice_pf *pf);
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_vc_notify_link_state(struct ice_pf *pf);
+void ice_vc_notify_reset(struct ice_pf *pf);
+bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
+
+int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto);
+
+int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
+
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
+#else /* CONFIG_PCI_IOV */
+#define ice_process_vflr_event(pf) do {} while (0)
+#define ice_free_vfs(pf) do {} while (0)
+#define ice_vc_process_vf_msg(pf, event) do {} while (0)
+#define ice_vc_notify_link_state(pf) do {} while (0)
+#define ice_vc_notify_reset(pf) do {} while (0)
+
+static inline bool
+ice_reset_all_vfs(struct ice_pf __always_unused *pf,
+ bool __always_unused is_vflr)
+{
+ return true;
+}
+
+static inline int
+ice_sriov_configure(struct pci_dev __always_unused *pdev,
+ int __always_unused num_vfs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_mac(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u8 __always_unused *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_get_vf_cfg(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_info __always_unused *ivi)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_trust(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused trusted)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u16 __always_unused vid,
+ u8 __always_unused qos, __be16 __always_unused v_proto)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused ena)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_link_state(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused link_state)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused min_tx_rate,
+ int __always_unused max_tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a32c576c1e65..5df88ad8ac81 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = {
.priority = 0
};
#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* for netdump / net console */
-static void igb_netpoll(struct net_device *);
-#endif
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
@@ -243,7 +239,7 @@ static struct pci_driver igb_driver = {
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
@@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = igb_ndo_set_vf_trust,
.ndo_get_vf_config = igb_ndo_get_vf_config,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = igb_netpoll,
-#endif
.ndo_fix_features = igb_fix_features,
.ndo_set_features = igb_set_features,
.ndo_fdb_add = igb_ndo_fdb_add,
@@ -9053,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void igb_netpoll(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct igb_q_vector *q_vector;
- int i;
-
- for (i = 0; i < adapter->num_q_vectors; i++) {
- q_vector = adapter->q_vector[i];
- if (adapter->flags & IGB_FLAG_HAS_MSIX)
- wr32(E1000_EIMC, q_vector->eims_value);
- else
- igb_irq_disable(adapter);
- napi_schedule(&q_vector->napi);
- }
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
/**
* igb_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -9116,7 +9086,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
pci_ers_result_t result;
- int err;
if (pci_enable_device_mem(pdev)) {
dev_err(&pdev->dev,
@@ -9140,14 +9109,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err);
- /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index e0c989ffb2b3..820d49eb41ab 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -3011,7 +3011,7 @@ module_exit(igbvf_exit_module);
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
/* netdev.c */
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
new file mode 100644
index 000000000000..4387f6ba8e67
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018 Intel Corporation
+
+#
+# Intel(R) I225-LM/I225-V 2.5G Ethernet Controller
+#
+
+obj-$(CONFIG_IGC) += igc.o
+
+igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
new file mode 100644
index 000000000000..cdf18a5d9e08
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_H_
+#define _IGC_H_
+
+#include <linux/kobject.h>
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#include <linux/ethtool.h>
+
+#include <linux/sctp.h>
+
+#define IGC_ERR(args...) pr_err("igc: " args)
+
+#define PFX "igc: "
+
+#include <linux/timecounter.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "igc_hw.h"
+
+/* main */
+extern char igc_driver_name[];
+extern char igc_driver_version[];
+
+/* Interrupt defines */
+#define IGC_START_ITR 648 /* ~6000 ints/sec */
+#define IGC_FLAG_HAS_MSI BIT(0)
+#define IGC_FLAG_QUEUE_PAIRS BIT(4)
+#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
+#define IGC_FLAG_MEDIA_RESET BIT(10)
+#define IGC_FLAG_MAS_ENABLE BIT(12)
+#define IGC_FLAG_HAS_MSIX BIT(13)
+#define IGC_FLAG_VLAN_PROMISC BIT(15)
+
+#define IGC_START_ITR 648 /* ~6000 ints/sec */
+#define IGC_4K_ITR 980
+#define IGC_20K_ITR 196
+#define IGC_70K_ITR 56
+
+#define IGC_DEFAULT_ITR 3 /* dynamic */
+#define IGC_MAX_ITR_USECS 10000
+#define IGC_MIN_ITR_USECS 10
+#define NON_Q_VECTORS 1
+#define MAX_MSIX_ENTRIES 10
+
+/* TX/RX descriptor defines */
+#define IGC_DEFAULT_TXD 256
+#define IGC_DEFAULT_TX_WORK 128
+#define IGC_MIN_TXD 80
+#define IGC_MAX_TXD 4096
+
+#define IGC_DEFAULT_RXD 256
+#define IGC_MIN_RXD 80
+#define IGC_MAX_RXD 4096
+
+/* Transmit and receive queues */
+#define IGC_MAX_RX_QUEUES 4
+#define IGC_MAX_TX_QUEUES 4
+
+#define MAX_Q_VECTORS 8
+#define MAX_STD_JUMBO_FRAME_SIZE 9216
+
+/* Supported Rx Buffer Sizes */
+#define IGC_RXBUFFER_256 256
+#define IGC_RXBUFFER_2048 2048
+#define IGC_RXBUFFER_3072 3072
+
+#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
+
+/* RX and TX descriptor control thresholds.
+ * PTHRESH - MAC will consider prefetch if it has fewer than this number of
+ * descriptors available in its onboard memory.
+ * Setting this to 0 disables RX descriptor prefetch.
+ * HTHRESH - MAC will only prefetch if there are at least this many descriptors
+ * available in host memory.
+ * If PTHRESH is 0, this should also be 0.
+ * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
+ * descriptors until either it has this many to write back, or the
+ * ITR timer expires.
+ */
+#define IGC_RX_PTHRESH 8
+#define IGC_RX_HTHRESH 8
+#define IGC_TX_PTHRESH 8
+#define IGC_TX_HTHRESH 1
+#define IGC_RX_WTHRESH 4
+#define IGC_TX_WTHRESH 16
+
+#define IGC_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+#define IGC_TS_HDR_LEN 16
+
+#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+
+#if (PAGE_SIZE < 8192)
+#define IGC_MAX_FRAME_BUILD_SKB \
+ (SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN)
+#else
+#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN)
+#endif
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+/* igc_test_staterr - tests bits within Rx descriptor status and error fields */
+static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+{
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
+enum igc_state_t {
+ __IGC_TESTING,
+ __IGC_RESETTING,
+ __IGC_DOWN,
+ __IGC_PTP_TX_IN_PROGRESS,
+};
+
+enum igc_tx_flags {
+ /* cmd_type flags */
+ IGC_TX_FLAGS_VLAN = 0x01,
+ IGC_TX_FLAGS_TSO = 0x02,
+ IGC_TX_FLAGS_TSTAMP = 0x04,
+
+ /* olinfo flags */
+ IGC_TX_FLAGS_IPV4 = 0x10,
+ IGC_TX_FLAGS_CSUM = 0x20,
+};
+
+enum igc_boards {
+ board_base,
+};
+
+/* The largest size we can write to the descriptor is 65535. In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGC_MAX_TXD_PWR 15
+#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct igc_tx_buffer {
+ union igc_adv_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ u16 gso_segs;
+ __be16 protocol;
+
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+};
+
+struct igc_rx_buffer {
+ dma_addr_t dma;
+ struct page *page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
+};
+
+struct igc_tx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 restart_queue;
+ u64 restart_queue2;
+};
+
+struct igc_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 drops;
+ u64 csum_err;
+ u64 alloc_failed;
+};
+
+struct igc_rx_packet_stats {
+ u64 ipv4_packets; /* IPv4 headers processed */
+ u64 ipv4e_packets; /* IPv4E headers with extensions processed */
+ u64 ipv6_packets; /* IPv6 headers processed */
+ u64 ipv6e_packets; /* IPv6E headers with extensions processed */
+ u64 tcp_packets; /* TCP headers processed */
+ u64 udp_packets; /* UDP headers processed */
+ u64 sctp_packets; /* SCTP headers processed */
+ u64 nfs_packets; /* NFS headers processe */
+ u64 other_packets;
+};
+
+struct igc_ring_container {
+ struct igc_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 work_limit; /* total work allowed per interrupt */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+struct igc_ring {
+ struct igc_q_vector *q_vector; /* backlink to q_vector */
+ struct net_device *netdev; /* back pointer to net_device */
+ struct device *dev; /* device for dma mapping */
+ union { /* array of buffer info structs */
+ struct igc_tx_buffer *tx_buffer_info;
+ struct igc_rx_buffer *rx_buffer_info;
+ };
+ void *desc; /* descriptor ring memory */
+ unsigned long flags; /* ring specific flags */
+ void __iomem *tail; /* pointer to ring tail register */
+ dma_addr_t dma; /* phys address of the ring */
+ unsigned int size; /* length of desc. ring in bytes */
+
+ u16 count; /* number of desc. in the ring */
+ u8 queue_index; /* logical index of the ring*/
+ u8 reg_idx; /* physical index of the ring */
+
+ /* everything past this point are written often */
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+
+ union {
+ /* TX */
+ struct {
+ struct igc_tx_queue_stats tx_stats;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync tx_syncp2;
+ };
+ /* RX */
+ struct {
+ struct igc_rx_queue_stats rx_stats;
+ struct igc_rx_packet_stats pkt_stats;
+ struct u64_stats_sync rx_syncp;
+ struct sk_buff *skb;
+ };
+ };
+} ____cacheline_internodealigned_in_smp;
+
+struct igc_q_vector {
+ struct igc_adapter *adapter; /* backlink */
+ void __iomem *itr_register;
+ u32 eims_value; /* EIMS mask value */
+
+ u16 itr_val;
+ u8 set_itr;
+
+ struct igc_ring_container rx, tx;
+
+ struct napi_struct napi;
+
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[IFNAMSIZ + 9];
+ struct net_device poll_dev;
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct igc_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+struct igc_mac_addr {
+ u8 addr[ETH_ALEN];
+ u8 queue;
+ u8 state; /* bitmask */
+};
+
+#define IGC_MAC_STATE_DEFAULT 0x1
+#define IGC_MAC_STATE_MODIFIED 0x2
+#define IGC_MAC_STATE_IN_USE 0x4
+
+/* Board specific private data structure */
+struct igc_adapter {
+ struct net_device *netdev;
+
+ unsigned long state;
+ unsigned int flags;
+ unsigned int num_q_vectors;
+
+ struct msix_entry *msix_entries;
+
+ /* TX */
+ u16 tx_work_limit;
+ u32 tx_timeout_count;
+ int num_tx_queues;
+ struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
+
+ /* RX */
+ int num_rx_queues;
+ struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
+
+ struct timer_list watchdog_timer;
+ struct timer_list dma_err_timer;
+ struct timer_list phy_info_timer;
+
+ u16 link_speed;
+ u16 link_duplex;
+
+ u8 port_num;
+
+ u8 __iomem *io_addr;
+ /* Interrupt Throttle Rate */
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
+
+ struct work_struct reset_task;
+ struct work_struct watchdog_task;
+ struct work_struct dma_err_task;
+ bool fc_autoneg;
+
+ u8 tx_timeout_factor;
+
+ int msg_enable;
+ u32 max_frame_size;
+ u32 min_frame_size;
+
+ /* OS defined structs */
+ struct pci_dev *pdev;
+ /* lock for statistics */
+ spinlock_t stats64_lock;
+ struct rtnl_link_stats64 stats64;
+
+ /* structs defined in igc_hw.h */
+ struct igc_hw hw;
+ struct igc_hw_stats stats;
+
+ struct igc_q_vector *q_vector[MAX_Q_VECTORS];
+ u32 eims_enable_mask;
+ u32 eims_other;
+
+ u16 tx_ring_count;
+ u16 rx_ring_count;
+
+ u32 *shadow_vfta;
+
+ u32 rss_queues;
+
+ /* lock for RX network flow classification filter */
+ spinlock_t nfc_lock;
+
+ struct igc_mac_addr *mac_table;
+
+ unsigned long link_check_timeout;
+ struct igc_info ei;
+};
+
+/* igc_desc_unused - calculate if we have unused descriptors */
+static inline u16 igc_desc_unused(const struct igc_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+static inline s32 igc_get_phy_info(struct igc_hw *hw)
+{
+ if (hw->phy.ops.get_phy_info)
+ return hw->phy.ops.get_phy_info(hw);
+
+ return 0;
+}
+
+static inline s32 igc_reset_phy(struct igc_hw *hw)
+{
+ if (hw->phy.ops.reset)
+ return hw->phy.ops.reset(hw);
+
+ return 0;
+}
+
+static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring)
+{
+ return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+}
+
+enum igc_ring_flags_t {
+ IGC_RING_FLAG_RX_3K_BUFFER,
+ IGC_RING_FLAG_RX_BUILD_SKB_ENABLED,
+ IGC_RING_FLAG_RX_SCTP_CSUM,
+ IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
+ IGC_RING_FLAG_TX_CTX_IDX,
+ IGC_RING_FLAG_TX_DETECT_HANG
+};
+
+#define ring_uses_large_buffer(ring) \
+ test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
+
+#define ring_uses_build_skb(ring) \
+ test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
+static inline unsigned int igc_rx_bufsz(struct igc_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return IGC_RXBUFFER_3072;
+
+ if (ring_uses_build_skb(ring))
+ return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN;
+#endif
+ return IGC_RXBUFFER_2048;
+}
+
+static inline unsigned int igc_rx_pg_order(struct igc_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return 1;
+#endif
+ return 0;
+}
+
+static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
+{
+ if (hw->phy.ops.read_reg)
+ return hw->phy.ops.read_reg(hw, offset, data);
+
+ return 0;
+}
+
+#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
+
+#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
+
+#define IGC_RX_DESC(R, i) \
+ (&(((union igc_adv_rx_desc *)((R)->desc))[i]))
+#define IGC_TX_DESC(R, i) \
+ (&(((union igc_adv_tx_desc *)((R)->desc))[i]))
+#define IGC_TX_CTXTDESC(R, i) \
+ (&(((struct igc_adv_tx_context_desc *)((R)->desc))[i]))
+
+#endif /* _IGC_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
new file mode 100644
index 000000000000..832da609d9a7
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include <linux/delay.h>
+
+#include "igc_hw.h"
+#include "igc_i225.h"
+#include "igc_mac.h"
+#include "igc_base.h"
+#include "igc.h"
+
+/**
+ * igc_set_pcie_completion_timeout - set pci-e completion timeout
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_set_pcie_completion_timeout(struct igc_hw *hw)
+{
+ u32 gcr = rd32(IGC_GCR);
+ u16 pcie_devctl2;
+ s32 ret_val = 0;
+
+ /* only take action if timeout value is defaulted to 0 */
+ if (gcr & IGC_GCR_CMPL_TMOUT_MASK)
+ goto out;
+
+ /* if capabilities version is type 1 we can write the
+ * timeout of 10ms to 200ms through the GCR register
+ */
+ if (!(gcr & IGC_GCR_CAP_VER2)) {
+ gcr |= IGC_GCR_CMPL_TMOUT_10ms;
+ goto out;
+ }
+
+ /* for version 2 capabilities we need to write the config space
+ * directly in order to set the completion timeout value for
+ * 16ms to 55ms
+ */
+ ret_val = igc_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+ if (ret_val)
+ goto out;
+
+ pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
+
+ ret_val = igc_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
+out:
+ /* disable completion timeout resend */
+ gcr &= ~IGC_GCR_CMPL_TMOUT_RESEND;
+
+ wr32(IGC_GCR, gcr);
+
+ return ret_val;
+}
+
+/**
+ * igc_check_for_link_base - Check for link
+ * @hw: pointer to the HW structure
+ *
+ * If sgmii is enabled, then use the pcs register to determine link, otherwise
+ * use the generic interface for determining link.
+ */
+static s32 igc_check_for_link_base(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = igc_check_for_copper_link(hw);
+
+ return ret_val;
+}
+
+/**
+ * igc_reset_hw_base - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state. This is a
+ * function pointer entry point called by the api module.
+ */
+static s32 igc_reset_hw_base(struct igc_hw *hw)
+{
+ s32 ret_val;
+ u32 ctrl;
+
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ * on the last TLP read/write transaction when MAC is reset.
+ */
+ ret_val = igc_disable_pcie_master(hw);
+ if (ret_val)
+ hw_dbg("PCI-E Master disable polling has failed.\n");
+
+ /* set the completion timeout for interface */
+ ret_val = igc_set_pcie_completion_timeout(hw);
+ if (ret_val)
+ hw_dbg("PCI-E Set completion timeout has failed.\n");
+
+ hw_dbg("Masking off all interrupts\n");
+ wr32(IGC_IMC, 0xffffffff);
+
+ wr32(IGC_RCTL, 0);
+ wr32(IGC_TCTL, IGC_TCTL_PSP);
+ wrfl();
+
+ usleep_range(10000, 20000);
+
+ ctrl = rd32(IGC_CTRL);
+
+ hw_dbg("Issuing a global reset to MAC\n");
+ wr32(IGC_CTRL, ctrl | IGC_CTRL_RST);
+
+ ret_val = igc_get_auto_rd_done(hw);
+ if (ret_val) {
+ /* When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ hw_dbg("Auto Read Done did not complete\n");
+ }
+
+ /* Clear any pending interrupt events. */
+ wr32(IGC_IMC, 0xffffffff);
+ rd32(IGC_ICR);
+
+ return ret_val;
+}
+
+/**
+ * igc_get_phy_id_base - Retrieve PHY addr and id
+ * @hw: pointer to the HW structure
+ *
+ * Retrieves the PHY address and ID for both PHY's which do and do not use
+ * sgmi interface.
+ */
+static s32 igc_get_phy_id_base(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = igc_get_phy_id(hw);
+
+ return ret_val;
+}
+
+/**
+ * igc_init_nvm_params_base - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_init_nvm_params_base(struct igc_hw *hw)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ u32 eecd = rd32(IGC_EECD);
+ u16 size;
+
+ size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
+ IGC_EECD_SIZE_EX_SHIFT);
+
+ /* Added to a constant, "size" becomes the left-shift value
+ * for setting word_size.
+ */
+ size += NVM_WORD_SIZE_BASE_SHIFT;
+
+ /* Just in case size is out of range, cap it to the largest
+ * EEPROM size supported
+ */
+ if (size > 15)
+ size = 15;
+
+ nvm->word_size = BIT(size);
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+
+ nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ?
+ 16 : 8;
+
+ if (nvm->word_size == BIT(15))
+ nvm->page_size = 128;
+
+ return 0;
+}
+
+/**
+ * igc_setup_copper_link_base - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Configures the link for auto-neg or forced speed and duplex. Then we check
+ * for link, once link is established calls to configure collision distance
+ * and flow control are called.
+ */
+static s32 igc_setup_copper_link_base(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_CTRL);
+ ctrl |= IGC_CTRL_SLU;
+ ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
+ wr32(IGC_CTRL, ctrl);
+
+ ret_val = igc_setup_copper_link(hw);
+
+ return ret_val;
+}
+
+/**
+ * igc_init_mac_params_base - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_init_mac_params_base(struct igc_hw *hw)
+{
+ struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base;
+ struct igc_mac_info *mac = &hw->mac;
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ mac->rar_entry_count = IGC_RAR_ENTRIES;
+
+ /* reset */
+ mac->ops.reset_hw = igc_reset_hw_base;
+
+ mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225;
+ mac->ops.release_swfw_sync = igc_release_swfw_sync_i225;
+
+ /* Allow a single clear of the SW semaphore on I225 */
+ if (mac->type == igc_i225)
+ dev_spec->clear_semaphore_once = true;
+
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface = igc_setup_copper_link_base;
+
+ return 0;
+}
+
+/**
+ * igc_init_phy_params_base - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_init_phy_params_base(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u32 ctrl_ext;
+
+ if (hw->phy.media_type != igc_media_type_copper) {
+ phy->type = igc_phy_none;
+ goto out;
+ }
+
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
+ phy->reset_delay_us = 100;
+
+ ctrl_ext = rd32(IGC_CTRL_EXT);
+
+ /* set lan id */
+ hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
+ IGC_STATUS_FUNC_SHIFT;
+
+ /* Make sure the PHY is in a good state. Several people have reported
+ * firmware leaving the PHY's page select register set to something
+ * other than the default of zero, which causes the PHY ID read to
+ * access something other than the intended register.
+ */
+ ret_val = hw->phy.ops.reset(hw);
+ if (ret_val) {
+ hw_dbg("Error resetting the PHY.\n");
+ goto out;
+ }
+
+ ret_val = igc_get_phy_id_base(hw);
+ if (ret_val)
+ return ret_val;
+
+ igc_check_for_link_base(hw);
+
+ /* Verify phy id and set remaining function pointers */
+ switch (phy->id) {
+ case I225_I_PHY_ID:
+ phy->type = igc_phy_i225;
+ break;
+ default:
+ ret_val = -IGC_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+static s32 igc_get_invariants_base(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ u32 link_mode = 0;
+ u32 ctrl_ext = 0;
+ s32 ret_val = 0;
+
+ switch (hw->device_id) {
+ case IGC_DEV_ID_I225_LM:
+ case IGC_DEV_ID_I225_V:
+ mac->type = igc_i225;
+ break;
+ default:
+ return -IGC_ERR_MAC_INIT;
+ }
+
+ hw->phy.media_type = igc_media_type_copper;
+
+ ctrl_ext = rd32(IGC_CTRL_EXT);
+ link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK;
+
+ /* mac initialization and operations */
+ ret_val = igc_init_mac_params_base(hw);
+ if (ret_val)
+ goto out;
+
+ /* NVM initialization */
+ ret_val = igc_init_nvm_params_base(hw);
+ switch (hw->mac.type) {
+ case igc_i225:
+ ret_val = igc_init_nvm_params_i225(hw);
+ break;
+ default:
+ break;
+ }
+
+ /* setup PHY parameters */
+ ret_val = igc_init_phy_params_base(hw);
+ if (ret_val)
+ goto out;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_acquire_phy_base - Acquire rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * Acquire access rights to the correct PHY. This is a
+ * function pointer entry point called by the api module.
+ */
+static s32 igc_acquire_phy_base(struct igc_hw *hw)
+{
+ u16 mask = IGC_SWFW_PHY0_SM;
+
+ return hw->mac.ops.acquire_swfw_sync(hw, mask);
+}
+
+/**
+ * igc_release_phy_base - Release rights to access PHY
+ * @hw: pointer to the HW structure
+ *
+ * A wrapper to release access rights to the correct PHY. This is a
+ * function pointer entry point called by the api module.
+ */
+static void igc_release_phy_base(struct igc_hw *hw)
+{
+ u16 mask = IGC_SWFW_PHY0_SM;
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
+/**
+ * igc_get_link_up_info_base - Get link speed/duplex info
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * This is a wrapper function, if using the serial gigabit media independent
+ * interface, use PCS to retrieve the link speed and duplex information.
+ * Otherwise, use the generic function to get the link speed and duplex info.
+ */
+static s32 igc_get_link_up_info_base(struct igc_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ ret_val = igc_get_speed_and_duplex_copper(hw, speed, duplex);
+
+ return ret_val;
+}
+
+/**
+ * igc_init_hw_base - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation.
+ */
+static s32 igc_init_hw_base(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ u16 i, rar_count = mac->rar_entry_count;
+ s32 ret_val = 0;
+
+ /* Setup the receive address */
+ igc_init_rx_addrs(hw, rar_count);
+
+ /* Zero out the Multicast HASH table */
+ hw_dbg("Zeroing the MTA\n");
+ for (i = 0; i < mac->mta_reg_count; i++)
+ array_wr32(IGC_MTA, i, 0);
+
+ /* Zero out the Unicast HASH table */
+ hw_dbg("Zeroing the UTA\n");
+ for (i = 0; i < mac->uta_reg_count; i++)
+ array_wr32(IGC_UTA, i, 0);
+
+ /* Setup link and flow control */
+ ret_val = igc_setup_link(hw);
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ igc_clear_hw_cntrs_base(hw);
+
+ return ret_val;
+}
+
+/**
+ * igc_read_mac_addr_base - Read device MAC address
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_read_mac_addr_base(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = igc_read_mac_addr(hw);
+
+ return ret_val;
+}
+
+/**
+ * igc_power_down_phy_copper_base - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ */
+void igc_power_down_phy_copper_base(struct igc_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw)))
+ igc_power_down_phy_copper(hw);
+}
+
+/**
+ * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable
+ * @hw: pointer to the HW structure
+ *
+ * After Rx enable, if manageability is enabled then there is likely some
+ * bad data at the start of the fifo and possibly in the DMA fifo. This
+ * function clears the fifos and flushes any packets that came in as rx was
+ * being enabled.
+ */
+void igc_rx_fifo_flush_base(struct igc_hw *hw)
+{
+ u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
+ int i, ms_wait;
+
+ /* disable IPv6 options as per hardware errata */
+ rfctl = rd32(IGC_RFCTL);
+ rfctl |= IGC_RFCTL_IPV6_EX_DIS;
+ wr32(IGC_RFCTL, rfctl);
+
+ if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN))
+ return;
+
+ /* Disable all Rx queues */
+ for (i = 0; i < 4; i++) {
+ rxdctl[i] = rd32(IGC_RXDCTL(i));
+ wr32(IGC_RXDCTL(i),
+ rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
+ }
+ /* Poll all queues to verify they have shut down */
+ for (ms_wait = 0; ms_wait < 10; ms_wait++) {
+ usleep_range(1000, 2000);
+ rx_enabled = 0;
+ for (i = 0; i < 4; i++)
+ rx_enabled |= rd32(IGC_RXDCTL(i));
+ if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
+ break;
+ }
+
+ if (ms_wait == 10)
+ pr_debug("Queue disable timed out after 10ms\n");
+
+ /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
+ * incoming packets are rejected. Set enable and wait 2ms so that
+ * any packet that was coming in as RCTL.EN was set is flushed
+ */
+ wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
+
+ rlpml = rd32(IGC_RLPML);
+ wr32(IGC_RLPML, 0);
+
+ rctl = rd32(IGC_RCTL);
+ temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
+ temp_rctl |= IGC_RCTL_LPE;
+
+ wr32(IGC_RCTL, temp_rctl);
+ wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN);
+ wrfl();
+ usleep_range(2000, 3000);
+
+ /* Enable Rx queues that were previously enabled and restore our
+ * previous state
+ */
+ for (i = 0; i < 4; i++)
+ wr32(IGC_RXDCTL(i), rxdctl[i]);
+ wr32(IGC_RCTL, rctl);
+ wrfl();
+
+ wr32(IGC_RLPML, rlpml);
+ wr32(IGC_RFCTL, rfctl);
+
+ /* Flush receive errors generated by workaround */
+ rd32(IGC_ROC);
+ rd32(IGC_RNBC);
+ rd32(IGC_MPC);
+}
+
+static struct igc_mac_operations igc_mac_ops_base = {
+ .init_hw = igc_init_hw_base,
+ .check_for_link = igc_check_for_link_base,
+ .rar_set = igc_rar_set,
+ .read_mac_addr = igc_read_mac_addr_base,
+ .get_speed_and_duplex = igc_get_link_up_info_base,
+};
+
+static const struct igc_phy_operations igc_phy_ops_base = {
+ .acquire = igc_acquire_phy_base,
+ .release = igc_release_phy_base,
+ .reset = igc_phy_hw_reset,
+ .read_reg = igc_read_phy_reg_gpy,
+ .write_reg = igc_write_phy_reg_gpy,
+};
+
+const struct igc_info igc_base_info = {
+ .get_invariants = igc_get_invariants_base,
+ .mac_ops = &igc_mac_ops_base,
+ .phy_ops = &igc_phy_ops_base,
+};
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
new file mode 100644
index 000000000000..35588fa7b8c5
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_BASE_H
+#define _IGC_BASE_H
+
+/* forward declaration */
+void igc_rx_fifo_flush_base(struct igc_hw *hw);
+void igc_power_down_phy_copper_base(struct igc_hw *hw);
+
+/* Transmit Descriptor - Advanced */
+union igc_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
+#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+#define IGC_RAR_ENTRIES 16
+
+struct igc_adv_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ u32 data;
+ struct {
+ u32 datalen:16; /* Data buffer length */
+ u32 rsvd:4;
+ u32 dtyp:4; /* Descriptor type */
+ u32 dcmd:8; /* Descriptor command */
+ } config;
+ } lower;
+ union {
+ u32 data;
+ struct {
+ u32 status:4; /* Descriptor status */
+ u32 idx:4;
+ u32 popts:6; /* Packet Options */
+ u32 paylen:18; /* Payload length */
+ } options;
+ } upper;
+};
+
+/* Receive Descriptor - Advanced */
+union igc_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /*RSS type, Pkt type*/
+ /* Split Header, header buffer len */
+ __le16 hdr_info;
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+
+/* Additional Transmit Descriptor Control definitions */
+#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
+
+/* Additional Receive Descriptor Control definitions */
+#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
+
+/* SRRCTL bit definitions */
+#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+
+#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
new file mode 100644
index 000000000000..8740754ea1fd
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -0,0 +1,389 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_DEFINES_H_
+#define _IGC_DEFINES_H_
+
+#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
+
+/* PCI Bus Info */
+#define PCIE_DEVICE_CONTROL2 0x28
+#define PCIE_DEVICE_CONTROL2_16ms 0x0005
+
+/* Physical Func Reset Done Indication */
+#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define COPPER_LINK_UP_LIMIT 10
+#define PHY_AUTO_NEG_LIMIT 45
+#define PHY_FORCE_LIMIT 20
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT 800
+/*Blocks new Master requests */
+#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004
+/* Status of Master requests. */
+#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000
+
+/* PCI Express Control */
+#define IGC_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define IGC_GCR_CMPL_TMOUT_10ms 0x00001000
+#define IGC_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define IGC_GCR_CAP_VER2 0x00040000
+
+/* Receive Address
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots. However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define IGC_RAH_POOL_1 0x00040000
+#define IGC_RAL_MAC_ADDR_LEN 4
+#define IGC_RAH_MAC_ADDR_LEN 2
+
+/* Error Codes */
+#define IGC_SUCCESS 0
+#define IGC_ERR_NVM 1
+#define IGC_ERR_PHY 2
+#define IGC_ERR_CONFIG 3
+#define IGC_ERR_PARAM 4
+#define IGC_ERR_MAC_INIT 5
+#define IGC_ERR_RESET 9
+#define IGC_ERR_MASTER_REQUESTS_PENDING 10
+#define IGC_ERR_BLK_PHY_RESET 12
+#define IGC_ERR_SWFW_SYNC 13
+
+/* Device Control */
+#define IGC_CTRL_RST 0x04000000 /* Global reset */
+
+#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+
+#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+
+#define IGC_CONNSW_AUTOSENSE_EN 0x1
+
+/* PBA constants */
+#define IGC_PBA_34K 0x0022
+
+/* SW Semaphore Register */
+#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+
+/* SWFW_SYNC Definitions */
+#define IGC_SWFW_EEP_SM 0x1
+#define IGC_SWFW_PHY0_SM 0x2
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+
+/* PHY GPY 211 registers */
+#define STANDARD_AN_REG_MASK 0x0007 /* MMD */
+#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */
+#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */
+#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */
+
+/* NVM Control */
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT 10
+#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
+#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */
+#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define IGC_EECD_ADDR_BITS 0x00000400
+#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
+#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
+#define IGC_EECD_SIZE_EX_SHIFT 11
+#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */
+#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/
+#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */
+#define IGC_FLUDONE_ATTEMPTS 20000
+#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
+
+/* Offset to data in NVM read/write registers */
+#define IGC_NVM_RW_REG_DATA 16
+#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define IGC_NVM_RW_REG_START 1 /* Start operation */
+#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */
+
+/* NVM Word Offsets */
+#define NVM_CHECKSUM_REG 0x003F
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM 0xBABA
+
+#define NVM_PBA_OFFSET_0 8
+#define NVM_PBA_OFFSET_1 9
+#define NVM_RESERVED_WORD 0xFFFF
+#define NVM_PBA_PTR_GUARD 0xFAFA
+#define NVM_WORD_SIZE_BASE_SHIFT 6
+
+/* Collision related configuration parameters */
+#define IGC_COLLISION_THRESHOLD 15
+#define IGC_CT_SHIFT 4
+#define IGC_COLLISION_DISTANCE 63
+#define IGC_COLD_SHIFT 12
+
+/* Device Status */
+#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define IGC_STATUS_FUNC_SHIFT 2
+#define IGC_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */
+#define ADVERTISE_2500_FULL 0x0080
+
+#define IGC_ALL_SPEED_DUPLEX_2500 ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500
+
+/* Interrupt Cause Read */
+#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */
+#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */
+#define IGC_ICR_LSC BIT(2) /* Link Status Change */
+#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */
+#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */
+#define IGC_ICR_RXO BIT(6) /* Rx overrun */
+#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */
+#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */
+
+/* If this bit asserted, the driver should claim the interrupt */
+#define IGC_ICR_INT_ASSERTED BIT(31)
+
+#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
+
+#define IMS_ENABLE_MASK ( \
+ IGC_IMS_RXT0 | \
+ IGC_IMS_TXDW | \
+ IGC_IMS_RXDMT0 | \
+ IGC_IMS_RXSEQ | \
+ IGC_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */
+#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */
+#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */
+#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */
+#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */
+#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
+#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */
+
+#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */
+#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */
+
+/* Interrupt Cause Set */
+#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */
+#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */
+#define IGC_ICS_DRSTA IGC_ICR_DRSTA /* Device Reset Aserted */
+
+#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
+#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
+#define IGC_IVAR_VALID 0x80
+#define IGC_GPIE_NSICR 0x00000001
+#define IGC_GPIE_MSIX_MODE 0x00000010
+#define IGC_GPIE_EIAME 0x40000000
+#define IGC_GPIE_PBA 0x80000000
+
+/* Transmit Descriptor bit definitions */
+#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IGC_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IGC_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IGC_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
+#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IGC_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define IGC_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define IGC_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define IGC_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */
+#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+
+/* Transmit Control */
+#define IGC_TCTL_EN 0x00000002 /* enable Tx */
+#define IGC_TCTL_PSP 0x00000008 /* pad short packets */
+#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */
+#define IGC_TCTL_COLD 0x003ff000 /* collision distance */
+#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define IGC_TCTL_MULR 0x10000000 /* Multiple request support */
+
+#define IGC_CT_SHIFT 4
+#define IGC_COLLISION_THRESHOLD 15
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
+/* Enable XON frame transmission */
+#define IGC_FCRTL_XONE 0x80000000
+
+/* Management Control */
+#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+
+/* Receive Control */
+#define IGC_RCTL_RST 0x00000001 /* Software reset */
+#define IGC_RCTL_EN 0x00000002 /* enable */
+#define IGC_RCTL_SBP 0x00000004 /* store bad packet */
+#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */
+#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */
+#define IGC_RCTL_LPE 0x00000020 /* long packet enable */
+#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+
+#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
+#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
+
+/* Receive Descriptor bit definitions */
+#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
+
+#define IGC_RXDEXT_STATERR_CE 0x01000000
+#define IGC_RXDEXT_STATERR_SE 0x02000000
+#define IGC_RXDEXT_STATERR_SEQ 0x04000000
+#define IGC_RXDEXT_STATERR_CXE 0x10000000
+#define IGC_RXDEXT_STATERR_TCPE 0x20000000
+#define IGC_RXDEXT_STATERR_IPE 0x40000000
+#define IGC_RXDEXT_STATERR_RXE 0x80000000
+
+/* Same mask, but for extended and packet split descriptors */
+#define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \
+ IGC_RXDEXT_STATERR_CE | \
+ IGC_RXDEXT_STATERR_SE | \
+ IGC_RXDEXT_STATERR_SEQ | \
+ IGC_RXDEXT_STATERR_CXE | \
+ IGC_RXDEXT_STATERR_RXE)
+
+/* Header split receive */
+#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
+#define IGC_RFCTL_LEF 0x00040000
+
+#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
+
+#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */
+#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+
+#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+
+/* GPY211 - I225 defines */
+#define GPY_MMD_MASK 0xFFFF0000
+#define GPY_MMD_SHIFT 16
+#define GPY_REG_MASK 0x0000FFFF
+
+#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
+
+/* MAC definitions */
+#define IGC_FACTPS_MNGCG 0x20000000
+#define IGC_FWSM_MODE_MASK 0xE
+#define IGC_FWSM_MODE_SHIFT 1
+
+/* Management Control */
+#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+
+/* PHY */
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define IGC_GEN_POLL_TIMEOUT 1920
+
+/* PHY Control Register */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+/* PHY Status Register */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Register */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+
+/* Bit definitions for valid PHY IDs. I = Integrated E = External */
+#define I225_I_PHY_ID 0x67C9DC00
+
+/* MDI Control */
+#define IGC_MDIC_DATA_MASK 0x0000FFFF
+#define IGC_MDIC_REG_MASK 0x001F0000
+#define IGC_MDIC_REG_SHIFT 16
+#define IGC_MDIC_PHY_MASK 0x03E00000
+#define IGC_MDIC_PHY_SHIFT 21
+#define IGC_MDIC_OP_WRITE 0x04000000
+#define IGC_MDIC_OP_READ 0x08000000
+#define IGC_MDIC_READY 0x10000000
+#define IGC_MDIC_INT_EN 0x20000000
+#define IGC_MDIC_ERROR 0x40000000
+#define IGC_MDIC_DEST 0x80000000
+
+#define IGC_N0_QUEUE -1
+
+#endif /* _IGC_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
new file mode 100644
index 000000000000..c50414f48f0d
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_HW_H_
+#define _IGC_HW_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#include "igc_regs.h"
+#include "igc_defines.h"
+#include "igc_mac.h"
+#include "igc_phy.h"
+#include "igc_nvm.h"
+#include "igc_i225.h"
+#include "igc_base.h"
+
+#define IGC_DEV_ID_I225_LM 0x15F2
+#define IGC_DEV_ID_I225_V 0x15F3
+
+#define IGC_FUNC_0 0
+
+/* Function pointers for the MAC. */
+struct igc_mac_operations {
+ s32 (*check_for_link)(struct igc_hw *hw);
+ s32 (*reset_hw)(struct igc_hw *hw);
+ s32 (*init_hw)(struct igc_hw *hw);
+ s32 (*setup_physical_interface)(struct igc_hw *hw);
+ void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index);
+ s32 (*read_mac_addr)(struct igc_hw *hw);
+ s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed,
+ u16 *duplex);
+ s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask);
+ void (*release_swfw_sync)(struct igc_hw *hw, u16 mask);
+};
+
+enum igc_mac_type {
+ igc_undefined = 0,
+ igc_i225,
+ igc_num_macs /* List is 1-based, so subtract 1 for true count. */
+};
+
+enum igc_phy_type {
+ igc_phy_unknown = 0,
+ igc_phy_none,
+ igc_phy_i225,
+};
+
+enum igc_media_type {
+ igc_media_type_unknown = 0,
+ igc_media_type_copper = 1,
+ igc_num_media_types
+};
+
+enum igc_nvm_type {
+ igc_nvm_unknown = 0,
+ igc_nvm_flash_hw,
+ igc_nvm_invm,
+};
+
+struct igc_info {
+ s32 (*get_invariants)(struct igc_hw *hw);
+ struct igc_mac_operations *mac_ops;
+ const struct igc_phy_operations *phy_ops;
+ struct igc_nvm_operations *nvm_ops;
+};
+
+extern const struct igc_info igc_base_info;
+
+struct igc_mac_info {
+ struct igc_mac_operations ops;
+
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+
+ enum igc_mac_type type;
+
+ u32 collision_delta;
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ u32 mc_filter_type;
+ u32 tx_packet_delta;
+ u32 txcw;
+
+ u16 mta_reg_count;
+ u16 uta_reg_count;
+
+ u16 rar_entry_count;
+
+ u8 forced_speed_duplex;
+
+ bool adaptive_ifs;
+ bool has_fwsm;
+ bool asf_firmware_present;
+ bool arc_subsystem_valid;
+
+ bool autoneg;
+ bool autoneg_failed;
+ bool get_link_status;
+};
+
+struct igc_nvm_operations {
+ s32 (*acquire)(struct igc_hw *hw);
+ s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data);
+ void (*release)(struct igc_hw *hw);
+ s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data);
+ s32 (*update)(struct igc_hw *hw);
+ s32 (*validate)(struct igc_hw *hw);
+ s32 (*valid_led_default)(struct igc_hw *hw, u16 *data);
+};
+
+struct igc_phy_operations {
+ s32 (*acquire)(struct igc_hw *hw);
+ s32 (*check_polarity)(struct igc_hw *hw);
+ s32 (*check_reset_block)(struct igc_hw *hw);
+ s32 (*force_speed_duplex)(struct igc_hw *hw);
+ s32 (*get_cfg_done)(struct igc_hw *hw);
+ s32 (*get_cable_length)(struct igc_hw *hw);
+ s32 (*get_phy_info)(struct igc_hw *hw);
+ s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data);
+ void (*release)(struct igc_hw *hw);
+ s32 (*reset)(struct igc_hw *hw);
+ s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data);
+};
+
+struct igc_nvm_info {
+ struct igc_nvm_operations ops;
+ enum igc_nvm_type type;
+
+ u32 flash_bank_size;
+ u32 flash_base_addr;
+
+ u16 word_size;
+ u16 delay_usec;
+ u16 address_bits;
+ u16 opcode_bits;
+ u16 page_size;
+};
+
+struct igc_phy_info {
+ struct igc_phy_operations ops;
+
+ enum igc_phy_type type;
+
+ u32 addr;
+ u32 id;
+ u32 reset_delay_us; /* in usec */
+ u32 revision;
+
+ enum igc_media_type media_type;
+
+ u16 autoneg_advertised;
+ u16 autoneg_mask;
+ u16 cable_length;
+ u16 max_cable_length;
+ u16 min_cable_length;
+ u16 pair_length[4];
+
+ u8 mdix;
+
+ bool disable_polarity_correction;
+ bool is_mdix;
+ bool polarity_correction;
+ bool reset_disable;
+ bool speed_downgraded;
+ bool autoneg_wait_to_complete;
+};
+
+struct igc_bus_info {
+ u16 func;
+ u16 pci_cmd_word;
+};
+
+enum igc_fc_mode {
+ igc_fc_none = 0,
+ igc_fc_rx_pause,
+ igc_fc_tx_pause,
+ igc_fc_full,
+ igc_fc_default = 0xFF
+};
+
+struct igc_fc_info {
+ u32 high_water; /* Flow control high-water mark */
+ u32 low_water; /* Flow control low-water mark */
+ u16 pause_time; /* Flow control pause timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ enum igc_fc_mode current_mode; /* Type of flow control */
+ enum igc_fc_mode requested_mode;
+};
+
+struct igc_dev_spec_base {
+ bool global_device_reset;
+ bool eee_disable;
+ bool clear_semaphore_once;
+ bool module_plugged;
+ u8 media_port;
+ bool mas_capable;
+};
+
+struct igc_hw {
+ void *back;
+
+ u8 __iomem *hw_addr;
+ unsigned long io_base;
+
+ struct igc_mac_info mac;
+ struct igc_fc_info fc;
+ struct igc_nvm_info nvm;
+ struct igc_phy_info phy;
+
+ struct igc_bus_info bus;
+
+ union {
+ struct igc_dev_spec_base _base;
+ } dev_spec;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+};
+
+/* Statistics counters collected by the MAC */
+struct igc_hw_stats {
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 cexterr;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 tor;
+ u64 tot;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 iac;
+ u64 icrxptc;
+ u64 icrxatc;
+ u64 ictxptc;
+ u64 ictxatc;
+ u64 ictxqec;
+ u64 ictxqmtc;
+ u64 icrxdmtc;
+ u64 icrxoc;
+ u64 cbtmpc;
+ u64 htdpmc;
+ u64 cbrdpc;
+ u64 cbrmpc;
+ u64 rpthc;
+ u64 hgptc;
+ u64 htcbdpc;
+ u64 hgorc;
+ u64 hgotc;
+ u64 lenerrs;
+ u64 scvpc;
+ u64 hrmpc;
+ u64 doosync;
+ u64 o2bgptc;
+ u64 o2bspc;
+ u64 b2ospc;
+ u64 b2ogprc;
+};
+
+struct net_device *igc_get_hw_dev(struct igc_hw *hw);
+#define hw_dbg(format, arg...) \
+ netdev_dbg(igc_get_hw_dev(hw), format, ##arg)
+
+s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
+s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
+void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
+void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
+
+#endif /* _IGC_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
new file mode 100644
index 000000000000..c25f555aaf82
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include <linux/delay.h>
+
+#include "igc_hw.h"
+
+/**
+ * igc_get_hw_semaphore_i225 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the necessary semaphores for exclusive access to the EEPROM.
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -IGC_ERR_NVM (-1).
+ */
+static s32 igc_acquire_nvm_i225(struct igc_hw *hw)
+{
+ return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
+}
+
+/**
+ * igc_release_nvm_i225 - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ * then release the semaphores acquired.
+ */
+static void igc_release_nvm_i225(struct igc_hw *hw)
+{
+ igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
+}
+
+/**
+ * igc_get_hw_semaphore_i225 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ */
+static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw)
+{
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+ u32 swsm;
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = rd32(IGC_SWSM);
+ if (!(swsm & IGC_SWSM_SMBI))
+ break;
+
+ usleep_range(500, 600);
+ i++;
+ }
+
+ if (i == timeout) {
+ /* In rare circumstances, the SW semaphore may already be held
+ * unintentionally. Clear the semaphore once before giving up.
+ */
+ if (hw->dev_spec._base.clear_semaphore_once) {
+ hw->dev_spec._base.clear_semaphore_once = false;
+ igc_put_hw_semaphore(hw);
+ for (i = 0; i < timeout; i++) {
+ swsm = rd32(IGC_SWSM);
+ if (!(swsm & IGC_SWSM_SMBI))
+ break;
+
+ usleep_range(500, 600);
+ }
+ }
+
+ /* If we do not have the semaphore here, we have to give up. */
+ if (i == timeout) {
+ hw_dbg("Driver can't access device - SMBI bit is set.\n");
+ return -IGC_ERR_NVM;
+ }
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = rd32(IGC_SWSM);
+ wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI)
+ break;
+
+ usleep_range(500, 600);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ igc_put_hw_semaphore(hw);
+ hw_dbg("Driver can't access the NVM\n");
+ return -IGC_ERR_NVM;
+ }
+
+ return 0;
+}
+
+/**
+ * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
+ * will also specify which port we're acquiring the lock for.
+ */
+s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask)
+{
+ s32 i = 0, timeout = 200;
+ u32 fwmask = mask << 16;
+ u32 swmask = mask;
+ s32 ret_val = 0;
+ u32 swfw_sync;
+
+ while (i < timeout) {
+ if (igc_get_hw_semaphore_i225(hw)) {
+ ret_val = -IGC_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync = rd32(IGC_SW_FW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask)))
+ break;
+
+ /* Firmware currently using resource (fwmask) */
+ igc_put_hw_semaphore(hw);
+ mdelay(5);
+ i++;
+ }
+
+ if (i == timeout) {
+ hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ ret_val = -IGC_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ swfw_sync |= swmask;
+ wr32(IGC_SW_FW_SYNC, swfw_sync);
+
+ igc_put_hw_semaphore(hw);
+out:
+ return ret_val;
+}
+
+/**
+ * igc_release_swfw_sync_i225 - Release SW/FW semaphore
+ * @hw: pointer to the HW structure
+ * @mask: specifies which semaphore to acquire
+ *
+ * Release the SW/FW semaphore used to access the PHY or NVM. The mask
+ * will also specify which port we're releasing the lock for.
+ */
+void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+
+ while (igc_get_hw_semaphore_i225(hw))
+ ; /* Empty */
+
+ swfw_sync = rd32(IGC_SW_FW_SYNC);
+ swfw_sync &= ~mask;
+ wr32(IGC_SW_FW_SYNC, swfw_sync);
+
+ igc_put_hw_semaphore(hw);
+}
+
+/**
+ * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the Shadow Ram to read
+ * @words: number of words to read
+ * @data: word read from the Shadow Ram
+ *
+ * Reads a 16 bit word from the Shadow Ram using the EERD register.
+ * Uses necessary synchronization semaphores.
+ */
+static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = 0;
+ u16 i, count;
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
+ IGC_EERD_EEWR_MAX_COUNT : (words - i);
+
+ status = hw->nvm.ops.acquire(hw);
+ if (status)
+ break;
+
+ status = igc_read_nvm_eerd(hw, offset, count, data + i);
+ hw->nvm.ops.release(hw);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * igc_write_nvm_srwr - Write to Shadow Ram using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow Ram to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow Ram
+ *
+ * Writes data to Shadow Ram at offset using EEWR register.
+ *
+ * If igc_update_nvm_checksum is not called after this function , the
+ * Shadow Ram will most likely contain an invalid checksum.
+ */
+static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ u32 attempts = 100000;
+ u32 i, k, eewr = 0;
+ s32 ret_val = 0;
+
+ /* A check for invalid values: offset too large, too many words,
+ * too many words for the offset, and not enough words.
+ */
+ if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
+ words == 0) {
+ hw_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -IGC_ERR_NVM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
+ (data[i] << IGC_NVM_RW_REG_DATA) |
+ IGC_NVM_RW_REG_START;
+
+ wr32(IGC_SRWR, eewr);
+
+ for (k = 0; k < attempts; k++) {
+ if (IGC_NVM_RW_REG_DONE &
+ rd32(IGC_SRWR)) {
+ ret_val = 0;
+ break;
+ }
+ udelay(5);
+ }
+
+ if (ret_val) {
+ hw_dbg("Shadow RAM write EEWR timed out\n");
+ break;
+ }
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow RAM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ * Writes data to Shadow RAM at offset using EEWR register.
+ *
+ * If igc_update_nvm_checksum is not called after this function , the
+ * data will not be committed to FLASH and also Shadow RAM will most likely
+ * contain an invalid checksum.
+ *
+ * If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ * partially written.
+ */
+static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = 0;
+ u16 i, count;
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to write in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
+ IGC_EERD_EEWR_MAX_COUNT : (words - i);
+
+ status = hw->nvm.ops.acquire(hw);
+ if (status)
+ break;
+
+ status = igc_write_nvm_srwr(hw, offset, count, data + i);
+ hw->nvm.ops.release(hw);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ */
+static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw)
+{
+ s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count,
+ u16 *data);
+ s32 status = 0;
+
+ status = hw->nvm.ops.acquire(hw);
+ if (status)
+ goto out;
+
+ /* Replace the read function with semaphore grabbing with
+ * the one that skips this for a while.
+ * We have semaphore taken already here.
+ */
+ read_op_ptr = hw->nvm.ops.read;
+ hw->nvm.ops.read = igc_read_nvm_eerd;
+
+ status = igc_validate_nvm_checksum(hw);
+
+ /* Revert original read operation. */
+ hw->nvm.ops.read = read_op_ptr;
+
+ hw->nvm.ops.release(hw);
+
+out:
+ return status;
+}
+
+/**
+ * igc_pool_flash_update_done_i225 - Pool FLUDONE status
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw)
+{
+ s32 ret_val = -IGC_ERR_NVM;
+ u32 i, reg;
+
+ for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) {
+ reg = rd32(IGC_EECD);
+ if (reg & IGC_EECD_FLUDONE_I225) {
+ ret_val = 0;
+ break;
+ }
+ udelay(5);
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_update_flash_i225 - Commit EEPROM to the flash
+ * @hw: pointer to the HW structure
+ */
+static s32 igc_update_flash_i225(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 flup;
+
+ ret_val = igc_pool_flash_update_done_i225(hw);
+ if (ret_val == -IGC_ERR_NVM) {
+ hw_dbg("Flash update time out\n");
+ goto out;
+ }
+
+ flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225;
+ wr32(IGC_EECD, flup);
+
+ ret_val = igc_pool_flash_update_done_i225(hw);
+ if (ret_val)
+ hw_dbg("Flash update time out\n");
+ else
+ hw_dbg("Flash update complete\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_update_nvm_checksum_i225 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM. Next commit EEPROM data onto the Flash.
+ */
+static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
+{
+ u16 checksum = 0;
+ s32 ret_val = 0;
+ u16 i, nvm_data;
+
+ /* Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("EEPROM read failed\n");
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
+ * because we do not want to take the synchronization
+ * semaphores twice here.
+ */
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw->nvm.ops.release(hw);
+ hw_dbg("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16)NVM_SUM - checksum;
+ ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
+ &checksum);
+ if (ret_val) {
+ hw->nvm.ops.release(hw);
+ hw_dbg("NVM Write Error while updating checksum.\n");
+ goto out;
+ }
+
+ hw->nvm.ops.release(hw);
+
+ ret_val = igc_update_flash_i225(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_get_flash_presence_i225 - Check if flash device is detected
+ * @hw: pointer to the HW structure
+ */
+bool igc_get_flash_presence_i225(struct igc_hw *hw)
+{
+ bool ret_val = false;
+ u32 eec = 0;
+
+ eec = rd32(IGC_EECD);
+ if (eec & IGC_EECD_FLASH_DETECTED_I225)
+ ret_val = true;
+
+ return ret_val;
+}
+
+/**
+ * igc_init_nvm_params_i225 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ */
+s32 igc_init_nvm_params_i225(struct igc_hw *hw)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+
+ nvm->ops.acquire = igc_acquire_nvm_i225;
+ nvm->ops.release = igc_release_nvm_i225;
+
+ /* NVM Function Pointers */
+ if (igc_get_flash_presence_i225(hw)) {
+ hw->nvm.type = igc_nvm_flash_hw;
+ nvm->ops.read = igc_read_nvm_srrd_i225;
+ nvm->ops.write = igc_write_nvm_srwr_i225;
+ nvm->ops.validate = igc_validate_nvm_checksum_i225;
+ nvm->ops.update = igc_update_nvm_checksum_i225;
+ } else {
+ hw->nvm.type = igc_nvm_invm;
+ nvm->ops.read = igc_read_nvm_eerd;
+ nvm->ops.write = NULL;
+ nvm->ops.validate = NULL;
+ nvm->ops.update = NULL;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.h b/drivers/net/ethernet/intel/igc/igc_i225.h
new file mode 100644
index 000000000000..7b66e1f9c0e6
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_i225.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_I225_H_
+#define _IGC_I225_H_
+
+s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask);
+void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask);
+
+s32 igc_init_nvm_params_i225(struct igc_hw *hw);
+bool igc_get_flash_presence_i225(struct igc_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
new file mode 100644
index 000000000000..f7683d3ae47c
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "igc_mac.h"
+#include "igc_hw.h"
+
+/* forward declaration */
+static s32 igc_set_default_fc(struct igc_hw *hw);
+static s32 igc_set_fc_watermarks(struct igc_hw *hw);
+
+/**
+ * igc_disable_pcie_master - Disables PCI-express master access
+ * @hw: pointer to the HW structure
+ *
+ * Returns 0 (0) if successful, else returns -10
+ * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ * the master requests to be disabled.
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests.
+ */
+s32 igc_disable_pcie_master(struct igc_hw *hw)
+{
+ s32 timeout = MASTER_DISABLE_TIMEOUT;
+ s32 ret_val = 0;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_CTRL);
+ ctrl |= IGC_CTRL_GIO_MASTER_DISABLE;
+ wr32(IGC_CTRL, ctrl);
+
+ while (timeout) {
+ if (!(rd32(IGC_STATUS) &
+ IGC_STATUS_GIO_MASTER_ENABLE))
+ break;
+ usleep_range(2000, 3000);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg("Master requests are pending.\n");
+ ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_init_rx_addrs - Initialize receive addresses
+ * @hw: pointer to the HW structure
+ * @rar_count: receive address registers
+ *
+ * Setup the receive address registers by setting the base receive address
+ * register to the devices MAC address and clearing all the other receive
+ * address registers to 0.
+ */
+void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count)
+{
+ u8 mac_addr[ETH_ALEN] = {0};
+ u32 i;
+
+ /* Setup the receive address */
+ hw_dbg("Programming MAC Address into RAR[0]\n");
+
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+ /* Zero out the other (rar_entry_count - 1) receive addresses */
+ hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
+ for (i = 1; i < rar_count; i++)
+ hw->mac.ops.rar_set(hw, mac_addr, i);
+}
+
+/**
+ * igc_setup_link - Setup flow control and link settings
+ * @hw: pointer to the HW structure
+ *
+ * Determines which flow control settings to use, then configures flow
+ * control. Calls the appropriate media-specific link configuration
+ * function. Assuming the adapter has a valid link partner, a valid link
+ * should be established. Assumes the hardware has previously been reset
+ * and the transmitter and receiver are not enabled.
+ */
+s32 igc_setup_link(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /* In the case of the phy reset being blocked, we already have a link.
+ * We do not need to set it up again.
+ */
+ if (igc_check_reset_block(hw))
+ goto out;
+
+ /* If requested flow control is set to default, set flow control
+ * based on the EEPROM flow control settings.
+ */
+ if (hw->fc.requested_mode == igc_fc_default) {
+ ret_val = igc_set_default_fc(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ /* We want to save off the original Flow Control configuration just
+ * in case we get disconnected and then reconnected into a different
+ * hub or switch with different Flow Control capabilities.
+ */
+ hw->fc.current_mode = hw->fc.requested_mode;
+
+ hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
+
+ /* Call the necessary media_type subroutine to configure the link. */
+ ret_val = hw->mac.ops.setup_physical_interface(hw);
+ if (ret_val)
+ goto out;
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ hw_dbg("Initializing the Flow Control address, type and timer regs\n");
+ wr32(IGC_FCT, FLOW_CONTROL_TYPE);
+ wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+ wr32(IGC_FCTTV, hw->fc.pause_time);
+
+ ret_val = igc_set_fc_watermarks(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_set_default_fc - Set flow control default values
+ * @hw: pointer to the HW structure
+ *
+ * Read the EEPROM for the default values for flow control and store the
+ * values.
+ */
+static s32 igc_set_default_fc(struct igc_hw *hw)
+{
+ hw->fc.requested_mode = igc_fc_full;
+ return 0;
+}
+
+/**
+ * igc_force_mac_fc - Force the MAC's flow control settings
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
+ * device control register to reflect the adapter settings. TFCE and RFCE
+ * need to be explicitly set by software when a copper PHY is used because
+ * autonegotiation is managed by the PHY rather than the MAC. Software must
+ * also configure these bits when link is forced on a fiber connection.
+ */
+s32 igc_force_mac_fc(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc.current_mode" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and TX flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+ hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+ switch (hw->fc.current_mode) {
+ case igc_fc_none:
+ ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE));
+ break;
+ case igc_fc_rx_pause:
+ ctrl &= (~IGC_CTRL_TFCE);
+ ctrl |= IGC_CTRL_RFCE;
+ break;
+ case igc_fc_tx_pause:
+ ctrl &= (~IGC_CTRL_RFCE);
+ ctrl |= IGC_CTRL_TFCE;
+ break;
+ case igc_fc_full:
+ ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE);
+ break;
+ default:
+ hw_dbg("Flow control param set incorrectly\n");
+ ret_val = -IGC_ERR_CONFIG;
+ goto out;
+ }
+
+ wr32(IGC_CTRL, ctrl);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_set_fc_watermarks - Set flow control high/low watermarks
+ * @hw: pointer to the HW structure
+ *
+ * Sets the flow control high/low threshold (watermark) registers. If
+ * flow control XON frame transmission is enabled, then set XON frame
+ * transmission as well.
+ */
+static s32 igc_set_fc_watermarks(struct igc_hw *hw)
+{
+ u32 fcrtl = 0, fcrth = 0;
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames is not enabled, then these
+ * registers will be set to 0.
+ */
+ if (hw->fc.current_mode & igc_fc_tx_pause) {
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
+ */
+ fcrtl = hw->fc.low_water;
+ if (hw->fc.send_xon)
+ fcrtl |= IGC_FCRTL_XONE;
+
+ fcrth = hw->fc.high_water;
+ }
+ wr32(IGC_FCRTL, fcrtl);
+ wr32(IGC_FCRTH, fcrth);
+
+ return 0;
+}
+
+/**
+ * igc_clear_hw_cntrs_base - Clear base hardware counters
+ * @hw: pointer to the HW structure
+ *
+ * Clears the base hardware counters by reading the counter registers.
+ */
+void igc_clear_hw_cntrs_base(struct igc_hw *hw)
+{
+ rd32(IGC_CRCERRS);
+ rd32(IGC_SYMERRS);
+ rd32(IGC_MPC);
+ rd32(IGC_SCC);
+ rd32(IGC_ECOL);
+ rd32(IGC_MCC);
+ rd32(IGC_LATECOL);
+ rd32(IGC_COLC);
+ rd32(IGC_DC);
+ rd32(IGC_SEC);
+ rd32(IGC_RLEC);
+ rd32(IGC_XONRXC);
+ rd32(IGC_XONTXC);
+ rd32(IGC_XOFFRXC);
+ rd32(IGC_XOFFTXC);
+ rd32(IGC_FCRUC);
+ rd32(IGC_GPRC);
+ rd32(IGC_BPRC);
+ rd32(IGC_MPRC);
+ rd32(IGC_GPTC);
+ rd32(IGC_GORCL);
+ rd32(IGC_GORCH);
+ rd32(IGC_GOTCL);
+ rd32(IGC_GOTCH);
+ rd32(IGC_RNBC);
+ rd32(IGC_RUC);
+ rd32(IGC_RFC);
+ rd32(IGC_ROC);
+ rd32(IGC_RJC);
+ rd32(IGC_TORL);
+ rd32(IGC_TORH);
+ rd32(IGC_TOTL);
+ rd32(IGC_TOTH);
+ rd32(IGC_TPR);
+ rd32(IGC_TPT);
+ rd32(IGC_MPTC);
+ rd32(IGC_BPTC);
+
+ rd32(IGC_PRC64);
+ rd32(IGC_PRC127);
+ rd32(IGC_PRC255);
+ rd32(IGC_PRC511);
+ rd32(IGC_PRC1023);
+ rd32(IGC_PRC1522);
+ rd32(IGC_PTC64);
+ rd32(IGC_PTC127);
+ rd32(IGC_PTC255);
+ rd32(IGC_PTC511);
+ rd32(IGC_PTC1023);
+ rd32(IGC_PTC1522);
+
+ rd32(IGC_ALGNERRC);
+ rd32(IGC_RXERRC);
+ rd32(IGC_TNCRS);
+ rd32(IGC_CEXTERR);
+ rd32(IGC_TSCTC);
+ rd32(IGC_TSCTFC);
+
+ rd32(IGC_MGTPRC);
+ rd32(IGC_MGTPDC);
+ rd32(IGC_MGTPTC);
+
+ rd32(IGC_IAC);
+ rd32(IGC_ICRXOC);
+
+ rd32(IGC_ICRXPTC);
+ rd32(IGC_ICRXATC);
+ rd32(IGC_ICTXPTC);
+ rd32(IGC_ICTXATC);
+ rd32(IGC_ICTXQEC);
+ rd32(IGC_ICTXQMTC);
+ rd32(IGC_ICRXDMTC);
+
+ rd32(IGC_CBTMPC);
+ rd32(IGC_HTDPMC);
+ rd32(IGC_CBRMPC);
+ rd32(IGC_RPTHC);
+ rd32(IGC_HGPTC);
+ rd32(IGC_HTCBDPC);
+ rd32(IGC_HGORCL);
+ rd32(IGC_HGORCH);
+ rd32(IGC_HGOTCL);
+ rd32(IGC_HGOTCH);
+ rd32(IGC_LENERRS);
+}
+
+/**
+ * igc_rar_set - Set receive address register
+ * @hw: pointer to the HW structure
+ * @addr: pointer to the receive address
+ * @index: receive address array register
+ *
+ * Sets the receive address array register at index to the address passed
+ * in by addr.
+ */
+void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index)
+{
+ u32 rar_low, rar_high;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] |
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+
+ rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= IGC_RAH_AV;
+
+ /* Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ wr32(IGC_RAL(index), rar_low);
+ wrfl();
+ wr32(IGC_RAH(index), rar_high);
+ wrfl();
+}
+
+/**
+ * igc_check_for_copper_link - Check for link (Copper)
+ * @hw: pointer to the HW structure
+ *
+ * Checks to see of the link status of the hardware has changed. If a
+ * change in link status has been detected, then we read the PHY registers
+ * to get the current speed/duplex if link exists.
+ */
+s32 igc_check_for_copper_link(struct igc_hw *hw)
+{
+ struct igc_mac_info *mac = &hw->mac;
+ s32 ret_val;
+ bool link;
+
+ /* We only want to go out to the PHY registers to see if Auto-Neg
+ * has completed and/or if our link status has changed. The
+ * get_link_status flag is set upon receiving a Link Status
+ * Change or Rx Sequence Error interrupt.
+ */
+ if (!mac->get_link_status) {
+ ret_val = 0;
+ goto out;
+ }
+
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ */
+ ret_val = igc_phy_has_link(hw, 1, 0, &link);
+ if (ret_val)
+ goto out;
+
+ if (!link)
+ goto out; /* No link detected */
+
+ mac->get_link_status = false;
+
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
+ igc_check_downshift(hw);
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!mac->autoneg) {
+ ret_val = -IGC_ERR_CONFIG;
+ goto out;
+ }
+
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
+ * of MAC speed/duplex configuration. So we only need to
+ * configure Collision Distance in the MAC.
+ */
+ igc_config_collision_dist(hw);
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = igc_config_fc_after_link_up(hw);
+ if (ret_val)
+ hw_dbg("Error configuring flow control\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_config_collision_dist - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ */
+void igc_config_collision_dist(struct igc_hw *hw)
+{
+ u32 tctl;
+
+ tctl = rd32(IGC_TCTL);
+
+ tctl &= ~IGC_TCTL_COLD;
+ tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT;
+
+ wr32(IGC_TCTL, tctl);
+ wrfl();
+}
+
+/**
+ * igc_config_fc_after_link_up - Configures flow control after link
+ * @hw: pointer to the HW structure
+ *
+ * Checks the status of auto-negotiation after link up to ensure that the
+ * speed and duplex were not forced. If the link needed to be forced, then
+ * flow control needs to be forced also. If auto-negotiation is enabled
+ * and did not fail, then we configure flow control based on our link
+ * partner.
+ */
+s32 igc_config_fc_after_link_up(struct igc_hw *hw)
+{
+ u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+ struct igc_mac_info *mac = &hw->mac;
+ u16 speed, duplex;
+ s32 ret_val = 0;
+
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (mac->autoneg_failed) {
+ if (hw->phy.media_type == igc_media_type_copper)
+ ret_val = igc_force_mac_fc(hw);
+ }
+
+ if (ret_val) {
+ hw_dbg("Error forcing flow control settings\n");
+ goto out;
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if (hw->phy.media_type == igc_media_type_copper && mac->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &mii_status_reg);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+ &mii_status_reg);
+ if (ret_val)
+ goto out;
+
+ if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ hw_dbg("Copper PHY and Auto Neg has not completed.\n");
+ goto out;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (Address 4) and the Auto_Negotiation Base
+ * Page Ability Register (Address 5) to determine how
+ * flow control was negotiated.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ goto out;
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | igc_fc_none
+ * 0 | 1 | 0 | DC | igc_fc_none
+ * 0 | 1 | 1 | 0 | igc_fc_none
+ * 0 | 1 | 1 | 1 | igc_fc_tx_pause
+ * 1 | 0 | 0 | DC | igc_fc_none
+ * 1 | DC | 1 | DC | igc_fc_full
+ * 1 | 1 | 0 | 0 | igc_fc_none
+ * 1 | 1 | 0 | 1 | igc_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | IGC_fc_full
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected RX ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == igc_fc_full) {
+ hw->fc.current_mode = igc_fc_full;
+ hw_dbg("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
+ }
+ }
+
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | igc_fc_tx_pause
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = igc_fc_tx_pause;
+ hw_dbg("Flow Control = TX PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | igc_fc_rx_pause
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
+ }
+ /* Per the IEEE spec, at this point flow control should be
+ * disabled. However, we want to consider that we could
+ * be connected to a legacy switch that doesn't advertise
+ * desired flow control, but can be forced on the link
+ * partner. So if we advertised no flow control, that is
+ * what we will resolve to. If we advertised some kind of
+ * receive capability (Rx Pause Only or Full Flow Control)
+ * and the link partner advertised none, we will configure
+ * ourselves to enable Rx Flow Control only. We can do
+ * this safely for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx, no
+ * harm done since we won't be receiving any PAUSE frames
+ * anyway. If the intent on the link partner was to have
+ * flow control enabled, then by us enabling RX only, we
+ * can at least receive pause frames and process them.
+ * This is a good idea because in most cases, since we are
+ * predominantly a server NIC, more times than not we will
+ * be asked to delay transmission of packets than asking
+ * our link partner to pause transmission of frames.
+ */
+ else if ((hw->fc.requested_mode == igc_fc_none) ||
+ (hw->fc.requested_mode == igc_fc_tx_pause) ||
+ (hw->fc.strict_ieee)) {
+ hw->fc.current_mode = igc_fc_none;
+ hw_dbg("Flow Control = NONE.\n");
+ } else {
+ hw->fc.current_mode = igc_fc_rx_pause;
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ hw_dbg("Error getting link speed and duplex\n");
+ goto out;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc.current_mode = igc_fc_none;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = igc_force_mac_fc(hw);
+ if (ret_val) {
+ hw_dbg("Error forcing flow control settings\n");
+ goto out;
+ }
+ }
+
+out:
+ return 0;
+}
+
+/**
+ * igc_get_auto_rd_done - Check for auto read completion
+ * @hw: pointer to the HW structure
+ *
+ * Check EEPROM for Auto Read done bit.
+ */
+s32 igc_get_auto_rd_done(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+ s32 i = 0;
+
+ while (i < AUTO_READ_DONE_TIMEOUT) {
+ if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD)
+ break;
+ usleep_range(1000, 2000);
+ i++;
+ }
+
+ if (i == AUTO_READ_DONE_TIMEOUT) {
+ hw_dbg("Auto read by HW from NVM has not completed.\n");
+ ret_val = -IGC_ERR_RESET;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Read the status register for the current speed/duplex and store the current
+ * speed and duplex for copper connections.
+ */
+s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ u32 status;
+
+ status = rd32(IGC_STATUS);
+ if (status & IGC_STATUS_SPEED_1000) {
+ /* For I225, STATUS will indicate 1G speed in both 1 Gbps
+ * and 2.5 Gbps link modes. An additional bit is used
+ * to differentiate between 1 Gbps and 2.5 Gbps.
+ */
+ if (hw->mac.type == igc_i225 &&
+ (status & IGC_STATUS_SPEED_2500)) {
+ *speed = SPEED_2500;
+ hw_dbg("2500 Mbs, ");
+ } else {
+ *speed = SPEED_1000;
+ hw_dbg("1000 Mbs, ");
+ }
+ } else if (status & IGC_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ hw_dbg("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ hw_dbg("10 Mbs, ");
+ }
+
+ if (status & IGC_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ hw_dbg("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ hw_dbg("Half Duplex\n");
+ }
+
+ return 0;
+}
+
+/**
+ * igc_put_hw_semaphore - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ */
+void igc_put_hw_semaphore(struct igc_hw *hw)
+{
+ u32 swsm;
+
+ swsm = rd32(IGC_SWSM);
+
+ swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI);
+
+ wr32(IGC_SWSM, swsm);
+}
+
+/**
+ * igc_enable_mng_pass_thru - Enable processing of ARP's
+ * @hw: pointer to the HW structure
+ *
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
+ */
+bool igc_enable_mng_pass_thru(struct igc_hw *hw)
+{
+ bool ret_val = false;
+ u32 fwsm, factps;
+ u32 manc;
+
+ if (!hw->mac.asf_firmware_present)
+ goto out;
+
+ manc = rd32(IGC_MANC);
+
+ if (!(manc & IGC_MANC_RCV_TCO_EN))
+ goto out;
+
+ if (hw->mac.arc_subsystem_valid) {
+ fwsm = rd32(IGC_FWSM);
+ factps = rd32(IGC_FACTPS);
+
+ if (!(factps & IGC_FACTPS_MNGCG) &&
+ ((fwsm & IGC_FWSM_MODE_MASK) ==
+ (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) {
+ ret_val = true;
+ goto out;
+ }
+ } else {
+ if ((manc & IGC_MANC_SMBUS_EN) &&
+ !(manc & IGC_MANC_ASF_EN)) {
+ ret_val = true;
+ goto out;
+ }
+ }
+
+out:
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.h b/drivers/net/ethernet/intel/igc/igc_mac.h
new file mode 100644
index 000000000000..782bc995badc
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_mac.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_MAC_H_
+#define _IGC_MAC_H_
+
+#include "igc_hw.h"
+#include "igc_phy.h"
+#include "igc_defines.h"
+
+#ifndef IGC_REMOVED
+#define IGC_REMOVED(a) (0)
+#endif /* IGC_REMOVED */
+
+/* forward declaration */
+s32 igc_disable_pcie_master(struct igc_hw *hw);
+s32 igc_check_for_copper_link(struct igc_hw *hw);
+s32 igc_config_fc_after_link_up(struct igc_hw *hw);
+s32 igc_force_mac_fc(struct igc_hw *hw);
+void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count);
+s32 igc_setup_link(struct igc_hw *hw);
+void igc_clear_hw_cntrs_base(struct igc_hw *hw);
+s32 igc_get_auto_rd_done(struct igc_hw *hw);
+void igc_put_hw_semaphore(struct igc_hw *hw);
+void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index);
+void igc_config_collision_dist(struct igc_hw *hw);
+
+s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
+ u16 *duplex);
+
+bool igc_enable_mng_pass_thru(struct igc_hw *hw);
+
+enum igc_mng_mode {
+ igc_mng_mode_none = 0,
+ igc_mng_mode_asf,
+ igc_mng_mode_pt,
+ igc_mng_mode_ipmi,
+ igc_mng_mode_host_if_only
+};
+
+#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
new file mode 100644
index 000000000000..9d85707e8a81
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -0,0 +1,3901 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/if_vlan.h>
+#include <linux/aer.h>
+
+#include "igc.h"
+#include "igc_hw.h"
+
+#define DRV_VERSION "0.0.1-k"
+#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
+
+static int debug = -1;
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+char igc_driver_name[] = "igc";
+char igc_driver_version[] = DRV_VERSION;
+static const char igc_driver_string[] = DRV_SUMMARY;
+static const char igc_copyright[] =
+ "Copyright(c) 2018 Intel Corporation.";
+
+static const struct igc_info *igc_info_tbl[] = {
+ [board_base] = &igc_base_info,
+};
+
+static const struct pci_device_id igc_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
+
+/* forward declaration */
+static void igc_clean_tx_ring(struct igc_ring *tx_ring);
+static int igc_sw_init(struct igc_adapter *);
+static void igc_configure(struct igc_adapter *adapter);
+static void igc_power_down_link(struct igc_adapter *adapter);
+static void igc_set_default_mac_filter(struct igc_adapter *adapter);
+static void igc_set_rx_mode(struct net_device *netdev);
+static void igc_write_itr(struct igc_q_vector *q_vector);
+static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector);
+static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx);
+static void igc_set_interrupt_capability(struct igc_adapter *adapter,
+ bool msix);
+static void igc_free_q_vectors(struct igc_adapter *adapter);
+static void igc_irq_disable(struct igc_adapter *adapter);
+static void igc_irq_enable(struct igc_adapter *adapter);
+static void igc_configure_msix(struct igc_adapter *adapter);
+static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *bi);
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+static void igc_reset(struct igc_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ hw->mac.ops.reset_hw(hw);
+
+ if (hw->mac.ops.init_hw(hw))
+ dev_err(&pdev->dev, "Hardware Error\n");
+
+ if (!netif_running(adapter->netdev))
+ igc_power_down_link(adapter);
+
+ igc_get_phy_info(hw);
+}
+
+/**
+ * igc_power_up_link - Power up the phy/serdes link
+ * @adapter: address of board private structure
+ */
+static void igc_power_up_link(struct igc_adapter *adapter)
+{
+ igc_reset_phy(&adapter->hw);
+
+ if (adapter->hw.phy.media_type == igc_media_type_copper)
+ igc_power_up_phy_copper(&adapter->hw);
+
+ igc_setup_link(&adapter->hw);
+}
+
+/**
+ * igc_power_down_link - Power down the phy/serdes link
+ * @adapter: address of board private structure
+ */
+static void igc_power_down_link(struct igc_adapter *adapter)
+{
+ if (adapter->hw.phy.media_type == igc_media_type_copper)
+ igc_power_down_phy_copper_base(&adapter->hw);
+}
+
+/**
+ * igc_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
+ */
+static void igc_release_hw_control(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+
+ /* Let firmware take over control of h/w */
+ ctrl_ext = rd32(IGC_CTRL_EXT);
+ wr32(IGC_CTRL_EXT,
+ ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
+}
+
+/**
+ * igc_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded.
+ */
+static void igc_get_hw_control(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = rd32(IGC_CTRL_EXT);
+ wr32(IGC_CTRL_EXT,
+ ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
+}
+
+/**
+ * igc_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ */
+static void igc_free_tx_resources(struct igc_ring *tx_ring)
+{
+ igc_clean_tx_ring(tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * igc_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+static void igc_free_all_tx_resources(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igc_free_tx_resources(adapter->tx_ring[i]);
+}
+
+/**
+ * igc_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ */
+static void igc_clean_tx_ring(struct igc_ring *tx_ring)
+{
+ u16 i = tx_ring->next_to_clean;
+ struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+
+ while (i != tx_ring->next_to_use) {
+ union igc_adv_tx_desc *eop_desc, *tx_desc;
+
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = IGC_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGC_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
+ }
+
+ /* reset BQL for queue */
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ /* reset next_to_use and next_to_clean */
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * igc_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ */
+static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tx_ring[i])
+ igc_clean_tx_ring(adapter->tx_ring[i]);
+}
+
+/**
+ * igc_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ */
+static int igc_setup_tx_resources(struct igc_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int size = 0;
+
+ size = sizeof(struct igc_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vzalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ dev_err(dev,
+ "Unable to allocate memory for the transmit descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = igc_setup_tx_resources(adapter->tx_ring[i]);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Allocation for Tx Queue %u failed\n", i);
+ for (i--; i >= 0; i--)
+ igc_free_tx_resources(adapter->tx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * igc_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ */
+static void igc_clean_rx_ring(struct igc_ring *rx_ring)
+{
+ u16 i = rx_ring->next_to_clean;
+
+ if (rx_ring->skb)
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+
+ /* Free all the Rx ring sk_buffs */
+ while (i != rx_ring->next_to_alloc) {
+ struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
+
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ buffer_info->dma,
+ buffer_info->page_offset,
+ igc_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev,
+ buffer_info->dma,
+ igc_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IGC_RX_DMA_ATTR);
+ __page_frag_cache_drain(buffer_info->page,
+ buffer_info->pagecnt_bias);
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * igc_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ */
+static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rx_ring[i])
+ igc_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ * igc_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ */
+static void igc_free_rx_resources(struct igc_ring *rx_ring)
+{
+ igc_clean_rx_ring(rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * igc_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ */
+static void igc_free_all_rx_resources(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igc_free_rx_resources(adapter->rx_ring[i]);
+}
+
+/**
+ * igc_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int igc_setup_rx_resources(struct igc_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int size, desc_len;
+
+ size = sizeof(struct igc_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
+
+ desc_len = sizeof(union igc_adv_rx_desc);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * desc_len;
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc)
+ goto err;
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev,
+ "Unable to allocate memory for the receive descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * igc_setup_all_rx_resources - wrapper to allocate Rx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = igc_setup_rx_resources(adapter->rx_ring[i]);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Allocation for Rx Queue %u failed\n", i);
+ for (i--; i >= 0; i--)
+ igc_free_rx_resources(adapter->rx_ring[i]);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * igc_configure_rx_ring - Configure a receive ring after Reset
+ * @adapter: board private structure
+ * @ring: receive ring to be configured
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ */
+static void igc_configure_rx_ring(struct igc_adapter *adapter,
+ struct igc_ring *ring)
+{
+ struct igc_hw *hw = &adapter->hw;
+ union igc_adv_rx_desc *rx_desc;
+ int reg_idx = ring->reg_idx;
+ u32 srrctl = 0, rxdctl = 0;
+ u64 rdba = ring->dma;
+
+ /* disable the queue */
+ wr32(IGC_RXDCTL(reg_idx), 0);
+
+ /* Set DMA base address registers */
+ wr32(IGC_RDBAL(reg_idx),
+ rdba & 0x00000000ffffffffULL);
+ wr32(IGC_RDBAH(reg_idx), rdba >> 32);
+ wr32(IGC_RDLEN(reg_idx),
+ ring->count * sizeof(union igc_adv_rx_desc));
+
+ /* initialize head and tail */
+ ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
+ wr32(IGC_RDH(reg_idx), 0);
+ writel(0, ring->tail);
+
+ /* reset next-to- use/clean to place SW in sync with hardware */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ /* set descriptor configuration */
+ srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ if (ring_uses_large_buffer(ring))
+ srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ wr32(IGC_SRRCTL(reg_idx), srrctl);
+
+ rxdctl |= IGC_RX_PTHRESH;
+ rxdctl |= IGC_RX_HTHRESH << 8;
+ rxdctl |= IGC_RX_WTHRESH << 16;
+
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct igc_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = IGC_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
+ /* enable receive descriptor fetching */
+ rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
+
+ wr32(IGC_RXDCTL(reg_idx), rxdctl);
+}
+
+/**
+ * igc_configure_rx - Configure receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ */
+static void igc_configure_rx(struct igc_adapter *adapter)
+{
+ int i;
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
+}
+
+/**
+ * igc_configure_tx_ring - Configure transmit ring after Reset
+ * @adapter: board private structure
+ * @ring: tx ring to configure
+ *
+ * Configure a transmit ring after a reset.
+ */
+static void igc_configure_tx_ring(struct igc_adapter *adapter,
+ struct igc_ring *ring)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int reg_idx = ring->reg_idx;
+ u64 tdba = ring->dma;
+ u32 txdctl = 0;
+
+ /* disable the queue */
+ wr32(IGC_TXDCTL(reg_idx), 0);
+ wrfl();
+ mdelay(10);
+
+ wr32(IGC_TDLEN(reg_idx),
+ ring->count * sizeof(union igc_adv_tx_desc));
+ wr32(IGC_TDBAL(reg_idx),
+ tdba & 0x00000000ffffffffULL);
+ wr32(IGC_TDBAH(reg_idx), tdba >> 32);
+
+ ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
+ wr32(IGC_TDH(reg_idx), 0);
+ writel(0, ring->tail);
+
+ txdctl |= IGC_TX_PTHRESH;
+ txdctl |= IGC_TX_HTHRESH << 8;
+ txdctl |= IGC_TX_WTHRESH << 16;
+
+ txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
+ wr32(IGC_TXDCTL(reg_idx), txdctl);
+}
+
+/**
+ * igc_configure_tx - Configure transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ */
+static void igc_configure_tx(struct igc_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+/**
+ * igc_setup_mrqc - configure the multiple receive queue control registers
+ * @adapter: Board private structure
+ */
+static void igc_setup_mrqc(struct igc_adapter *adapter)
+{
+}
+
+/**
+ * igc_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ */
+static void igc_setup_rctl(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 rctl;
+
+ rctl = rd32(IGC_RCTL);
+
+ rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
+ rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
+
+ rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
+ (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
+
+ /* enable stripping of CRC. Newer features require
+ * that the HW strips the CRC.
+ */
+ rctl |= IGC_RCTL_SECRC;
+
+ /* disable store bad packets and clear size bits. */
+ rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
+
+ /* enable LPE to allow for reception of jumbo frames */
+ rctl |= IGC_RCTL_LPE;
+
+ /* disable queue 0 to prevent tail write w/o re-config */
+ wr32(IGC_RXDCTL(0), 0);
+
+ /* This is useful for sniffing bad packets. */
+ if (adapter->netdev->features & NETIF_F_RXALL) {
+ /* UPE and MPE will be handled by normal PROMISC logic
+ * in set_rx_mode
+ */
+ rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
+ IGC_RCTL_BAM | /* RX All Bcast Pkts */
+ IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
+
+ rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
+ IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
+ }
+
+ wr32(IGC_RCTL, rctl);
+}
+
+/**
+ * igc_setup_tctl - configure the transmit control registers
+ * @adapter: Board private structure
+ */
+static void igc_setup_tctl(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 tctl;
+
+ /* disable queue 0 which icould be enabled by default */
+ wr32(IGC_TXDCTL(0), 0);
+
+ /* Program the Transmit Control Register */
+ tctl = rd32(IGC_TCTL);
+ tctl &= ~IGC_TCTL_CT;
+ tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
+ (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
+
+ /* Enable transmits */
+ tctl |= IGC_TCTL_EN;
+
+ wr32(IGC_TCTL, tctl);
+}
+
+/**
+ * igc_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int igc_set_mac(struct net_device *netdev, void *p)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ /* set the correct pool for the new PF MAC address in entry 0 */
+ igc_set_default_mac_filter(adapter);
+
+ return 0;
+}
+
+static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
+{
+}
+
+static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
+{
+ struct net_device *netdev = tx_ring->netdev;
+
+ netif_stop_subqueue(netdev, tx_ring->queue_index);
+
+ /* memory barriier comment */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (igc_desc_unused(tx_ring) < size)
+ return -EBUSY;
+
+ /* A reprieve! */
+ netif_wake_subqueue(netdev, tx_ring->queue_index);
+
+ u64_stats_update_begin(&tx_ring->tx_syncp2);
+ tx_ring->tx_stats.restart_queue2++;
+ u64_stats_update_end(&tx_ring->tx_syncp2);
+
+ return 0;
+}
+
+static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
+{
+ if (igc_desc_unused(tx_ring) >= size)
+ return 0;
+ return __igc_maybe_stop_tx(tx_ring, size);
+}
+
+static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
+{
+ /* set type for advanced descriptor with frame checksum insertion */
+ u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
+ IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS;
+
+ return cmd_type;
+}
+
+static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
+ union igc_adv_tx_desc *tx_desc,
+ u32 tx_flags, unsigned int paylen)
+{
+ u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
+
+ /* insert L4 checksum */
+ olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
+ ((IGC_TXD_POPTS_TXSM << 8) /
+ IGC_TX_FLAGS_CSUM);
+
+ /* insert IPv4 checksum */
+ olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
+ (((IGC_TXD_POPTS_IXSM << 8)) /
+ IGC_TX_FLAGS_IPV4);
+
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+}
+
+static int igc_tx_map(struct igc_ring *tx_ring,
+ struct igc_tx_buffer *first,
+ const u8 hdr_len)
+{
+ struct sk_buff *skb = first->skb;
+ struct igc_tx_buffer *tx_buffer;
+ union igc_adv_tx_desc *tx_desc;
+ u32 tx_flags = first->tx_flags;
+ struct skb_frag_struct *frag;
+ u16 i = tx_ring->next_to_use;
+ unsigned int data_len, size;
+ dma_addr_t dma;
+ u32 cmd_type = igc_tx_cmd_type(skb, tx_flags);
+
+ tx_desc = IGC_TX_DESC(tx_ring, i);
+
+ igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_buffer = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IGC_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ tx_desc->read.olinfo_status = 0;
+
+ dma += IGC_MAX_DATA_PER_TXD;
+ size -= IGC_MAX_DATA_PER_TXD;
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IGC_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ tx_desc->read.olinfo_status = 0;
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
+ size, DMA_TO_DEVICE);
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ }
+
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= size | IGC_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+
+ /* set the timestamp */
+ first->time_stamp = jiffies;
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* Make sure there is space in the ring for the next send. */
+ igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
+ }
+
+ return 0;
+dma_error:
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ while (tx_buffer != first) {
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ if (i-- == 0)
+ i += tx_ring->count;
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ }
+
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+
+ tx_ring->next_to_use = i;
+
+ return -1;
+}
+
+static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
+ struct igc_ring *tx_ring)
+{
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ __be16 protocol = vlan_get_protocol(skb);
+ struct igc_tx_buffer *first;
+ u32 tx_flags = 0;
+ unsigned short f;
+ u8 hdr_len = 0;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+ if (igc_maybe_stop_tx(tx_ring, count + 3)) {
+ /* this is a hard error */
+ return NETDEV_TX_BUSY;
+ }
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
+ skb_tx_timestamp(skb);
+
+ /* record initial flags and protocol */
+ first->tx_flags = tx_flags;
+ first->protocol = protocol;
+
+ igc_tx_csum(tx_ring, first);
+
+ igc_tx_map(tx_ring, first, hdr_len);
+
+ return NETDEV_TX_OK;
+}
+
+static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
+ struct sk_buff *skb)
+{
+ unsigned int r_idx = skb->queue_mapping;
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+ if (skb->len < 17) {
+ if (skb_padto(skb, 17))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+ }
+
+ return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
+}
+
+static inline void igc_rx_hash(struct igc_ring *ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (ring->netdev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb,
+ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ PKT_HASH_TYPE_L3);
+}
+
+/**
+ * igc_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ */
+static void igc_process_skb_fields(struct igc_ring *rx_ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ igc_rx_hash(rx_ring, rx_desc, skb);
+
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+}
+
+static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
+ const unsigned int size)
+{
+ struct igc_rx_buffer *rx_buffer;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ prefetchw(rx_buffer->page);
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
+ rx_buffer->pagecnt_bias--;
+
+ return rx_buffer;
+}
+
+/**
+ * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: size of buffer to be added
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ */
+static void igc_add_rx_frag(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *rx_buffer,
+ struct sk_buff *skb,
+ unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+ rx_buffer->page_offset ^= truesize;
+#else
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
+static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *rx_buffer,
+ union igc_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(IGC_SKB_PAD + size);
+#endif
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* build an skb around the page buffer */
+ skb = build_skb(va - IGC_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, IGC_SKB_PAD);
+ __skb_put(skb, size);
+
+ /* update buffer offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+
+ return skb;
+}
+
+static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *rx_buffer,
+ union igc_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ unsigned int headlen;
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > IGC_RX_HDR_LEN)
+ headlen = eth_get_headlen(va, IGC_RX_HDR_LEN);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+ /* update all of the pointers */
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ (va + headlen) - page_address(rx_buffer->page),
+ size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
+
+ return skb;
+}
+
+/**
+ * igc_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ */
+static void igc_reuse_rx_page(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *old_buff)
+{
+ u16 nta = rx_ring->next_to_alloc;
+ struct igc_rx_buffer *new_buff;
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* Transfer page from old buffer to new buffer.
+ * Move each member individually to avoid possible store
+ * forwarding stalls.
+ */
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+}
+
+static inline bool igc_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
+
+ /* avoid re-using remote pages */
+ if (unlikely(igc_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ return false;
+#else
+#define IGC_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
+
+ if (rx_buffer->page_offset > IGC_LAST_OFFSET)
+ return false;
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(!pagecnt_bias)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
+}
+
+/**
+ * igc_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ */
+static bool igc_is_non_eop(struct igc_ring *rx_ring,
+ union igc_adv_rx_desc *rx_desc)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IGC_RX_DESC(rx_ring, ntc));
+
+ if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
+ return false;
+
+ return true;
+}
+
+/**
+ * igc_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ */
+static bool igc_cleanup_headers(struct igc_ring *rx_ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (unlikely((igc_test_staterr(rx_desc,
+ IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ struct net_device *netdev = rx_ring->netdev;
+
+ if (!(netdev->features & NETIF_F_RXALL)) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+ }
+
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
+
+ return false;
+}
+
+static void igc_put_rx_buffer(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *rx_buffer)
+{
+ if (igc_can_reuse_rx_page(rx_buffer)) {
+ /* hand second half of page back to the ring */
+ igc_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+ IGC_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+}
+
+/**
+ * igc_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ */
+static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
+{
+ union igc_adv_rx_desc *rx_desc;
+ u16 i = rx_ring->next_to_use;
+ struct igc_rx_buffer *bi;
+ u16 bufsz;
+
+ /* nothing to do */
+ if (!cleaned_count)
+ return;
+
+ rx_desc = IGC_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
+
+ bufsz = igc_rx_bufsz(rx_ring);
+
+ do {
+ if (!igc_alloc_mapped_page(rx_ring, bi))
+ break;
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset, bufsz,
+ DMA_FROM_DEVICE);
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = IGC_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ /* record the next descriptor to use */
+ rx_ring->next_to_use = i;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
+}
+
+static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
+{
+ unsigned int total_bytes = 0, total_packets = 0;
+ struct igc_ring *rx_ring = q_vector->rx.ring;
+ struct sk_buff *skb = rx_ring->skb;
+ u16 cleaned_count = igc_desc_unused(rx_ring);
+
+ while (likely(total_packets < budget)) {
+ union igc_adv_rx_desc *rx_desc;
+ struct igc_rx_buffer *rx_buffer;
+ unsigned int size;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
+ igc_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ rx_buffer = igc_get_rx_buffer(rx_ring, size);
+
+ /* retrieve a buffer from the ring */
+ if (skb)
+ igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
+ else
+ skb = igc_construct_skb(rx_ring, rx_buffer,
+ rx_desc, size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ rx_buffer->pagecnt_bias++;
+ break;
+ }
+
+ igc_put_rx_buffer(rx_ring, rx_buffer);
+ cleaned_count++;
+
+ /* fetch next buffer in frame if non-eop */
+ if (igc_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+ /* verify the packet layout is correct */
+ if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igc_process_skb_fields(rx_ring, rx_desc, skb);
+
+ napi_gro_receive(&q_vector->napi, skb);
+
+ /* reset skb pointer */
+ skb = NULL;
+
+ /* update budget accounting */
+ total_packets++;
+ }
+
+ /* place incomplete frames back on ring for completion */
+ rx_ring->skb = skb;
+
+ u64_stats_update_begin(&rx_ring->rx_syncp);
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+ u64_stats_update_end(&rx_ring->rx_syncp);
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igc_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return total_packets;
+}
+
+static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
+}
+
+static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
+ struct igc_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
+ return true;
+
+ /* alloc new page for storage */
+ page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ /* map page for use */
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ igc_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IGC_RX_DMA_ATTR);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
+
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = igc_rx_offset(rx_ring);
+ bi->pagecnt_bias = 1;
+
+ return true;
+}
+
+/**
+ * igc_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: pointer to q_vector containing needed info
+ * @napi_budget: Used to determine if we are in netpoll
+ *
+ * returns true if ring is completely cleaned
+ */
+static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = q_vector->tx.work_limit;
+ struct igc_ring *tx_ring = q_vector->tx.ring;
+ unsigned int i = tx_ring->next_to_clean;
+ struct igc_tx_buffer *tx_buffer;
+ union igc_adv_tx_desc *tx_desc;
+
+ if (test_bit(__IGC_DOWN, &adapter->state))
+ return true;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = IGC_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+
+ /* free the skb */
+ napi_consume_skb(tx_buffer->skb, napi_budget);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ /* clear last DMA location and unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGC_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IGC_TX_DESC(tx_ring, 0);
+ }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ netdev_tx_completed_queue(txring_txq(tx_ring),
+ total_packets, total_bytes);
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->tx_syncp);
+ tx_ring->tx_stats.bytes += total_bytes;
+ tx_ring->tx_stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->tx_syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
+
+ if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
+ struct igc_hw *hw = &adapter->hw;
+
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i
+ */
+ clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+ if (tx_buffer->next_to_watch &&
+ time_after(jiffies, tx_buffer->time_stamp +
+ (adapter->tx_timeout_factor * HZ)) &&
+ !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
+ /* detected Tx unit hang */
+ dev_err(tx_ring->dev,
+ "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%p>\n"
+ " jiffies <%lx>\n"
+ " desc.status <%x>\n",
+ tx_ring->queue_index,
+ rd32(IGC_TDH(tx_ring->reg_idx)),
+ readl(tx_ring->tail),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_buffer->time_stamp,
+ tx_buffer->next_to_watch,
+ jiffies,
+ tx_buffer->next_to_watch->wb.status);
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+
+ /* we are about to reset, no point in enabling stuff */
+ return true;
+ }
+ }
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets &&
+ netif_carrier_ok(tx_ring->netdev) &&
+ igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !(test_bit(__IGC_DOWN, &adapter->state))) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+
+ u64_stats_update_begin(&tx_ring->tx_syncp);
+ tx_ring->tx_stats.restart_queue++;
+ u64_stats_update_end(&tx_ring->tx_syncp);
+ }
+ }
+
+ return !!budget;
+}
+
+/**
+ * igc_ioctl - I/O control method
+ * @netdev: network interface device structure
+ * @ifreq: frequency
+ * @cmd: command
+ */
+static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * igc_up - Open the interface and prepare it to handle traffic
+ * @adapter: board private structure
+ */
+static void igc_up(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int i = 0;
+
+ /* hardware has been reset, we need to reload some things */
+ igc_configure(adapter);
+
+ clear_bit(__IGC_DOWN, &adapter->state);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ napi_enable(&adapter->q_vector[i]->napi);
+
+ if (adapter->msix_entries)
+ igc_configure_msix(adapter);
+ else
+ igc_assign_vector(adapter->q_vector[0], 0);
+
+ /* Clear any pending interrupts. */
+ rd32(IGC_ICR);
+ igc_irq_enable(adapter);
+
+ netif_tx_start_all_queues(adapter->netdev);
+
+ /* start the watchdog. */
+ hw->mac.get_link_status = 1;
+ schedule_work(&adapter->watchdog_task);
+}
+
+/**
+ * igc_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ */
+static void igc_update_stats(struct igc_adapter *adapter)
+{
+}
+
+static void igc_nfc_filter_exit(struct igc_adapter *adapter)
+{
+}
+
+/**
+ * igc_down - Close the interface
+ * @adapter: board private structure
+ */
+static void igc_down(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igc_hw *hw = &adapter->hw;
+ u32 tctl, rctl;
+ int i = 0;
+
+ set_bit(__IGC_DOWN, &adapter->state);
+
+ /* disable receives in the hardware */
+ rctl = rd32(IGC_RCTL);
+ wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
+ /* flush and sleep below */
+
+ igc_nfc_filter_exit(adapter);
+
+ /* set trans_start so we don't get spurious watchdogs during reset */
+ netif_trans_update(netdev);
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ /* disable transmits in the hardware */
+ tctl = rd32(IGC_TCTL);
+ tctl &= ~IGC_TCTL_EN;
+ wr32(IGC_TCTL, tctl);
+ /* flush both disables and wait for them to finish */
+ wrfl();
+ usleep_range(10000, 20000);
+
+ igc_irq_disable(adapter);
+
+ adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ if (adapter->q_vector[i]) {
+ napi_synchronize(&adapter->q_vector[i]->napi);
+ napi_disable(&adapter->q_vector[i]->napi);
+ }
+ }
+
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ /* record the stats before reset*/
+ spin_lock(&adapter->stats64_lock);
+ igc_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
+
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ if (!pci_channel_offline(adapter->pdev))
+ igc_reset(adapter);
+
+ /* clear VLAN promisc flag so VFTA will be updated if necessary */
+ adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
+
+ igc_clean_all_tx_rings(adapter);
+ igc_clean_all_rx_rings(adapter);
+}
+
+static void igc_reinit_locked(struct igc_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ igc_down(adapter);
+ igc_up(adapter);
+ clear_bit(__IGC_RESETTING, &adapter->state);
+}
+
+static void igc_reset_task(struct work_struct *work)
+{
+ struct igc_adapter *adapter;
+
+ adapter = container_of(work, struct igc_adapter, reset_task);
+
+ netdev_err(adapter->netdev, "Reset adapter\n");
+ igc_reinit_locked(adapter);
+}
+
+/**
+ * igc_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int igc_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
+
+ while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ /* igc_down has a dependency on max_frame_size */
+ adapter->max_frame_size = max_frame;
+
+ if (netif_running(netdev))
+ igc_down(adapter);
+
+ dev_info(&pdev->dev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ igc_up(adapter);
+ else
+ igc_reset(adapter);
+
+ clear_bit(__IGC_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+/**
+ * igc_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are updated here and also from the timer callback.
+ */
+static struct net_device_stats *igc_get_stats(struct net_device *netdev)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(__IGC_RESETTING, &adapter->state))
+ igc_update_stats(adapter);
+
+ /* only return the current stats */
+ return &netdev->stats;
+}
+
+/**
+ * igc_configure - configure the hardware for RX and TX
+ * @adapter: private board structure
+ */
+static void igc_configure(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i = 0;
+
+ igc_get_hw_control(adapter);
+ igc_set_rx_mode(netdev);
+
+ igc_setup_tctl(adapter);
+ igc_setup_mrqc(adapter);
+ igc_setup_rctl(adapter);
+
+ igc_configure_tx(adapter);
+ igc_configure_rx(adapter);
+
+ igc_rx_fifo_flush_base(&adapter->hw);
+
+ /* call igc_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean
+ */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igc_ring *ring = adapter->rx_ring[i];
+
+ igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
+ }
+}
+
+/**
+ * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
+ * @adapter: Pointer to adapter structure
+ * @index: Index of the RAR entry which need to be synced with MAC table
+ */
+static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
+{
+ u8 *addr = adapter->mac_table[index].addr;
+ struct igc_hw *hw = &adapter->hw;
+ u32 rar_low, rar_high;
+
+ /* HW expects these to be in network order when they are plugged
+ * into the registers which are little endian. In order to guarantee
+ * that ordering we need to do an leXX_to_cpup here in order to be
+ * ready for the byteswap that occurs with writel
+ */
+ rar_low = le32_to_cpup((__le32 *)(addr));
+ rar_high = le16_to_cpup((__le16 *)(addr + 4));
+
+ /* Indicate to hardware the Address is Valid. */
+ if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
+ if (is_valid_ether_addr(addr))
+ rar_high |= IGC_RAH_AV;
+
+ rar_high |= IGC_RAH_POOL_1 <<
+ adapter->mac_table[index].queue;
+ }
+
+ wr32(IGC_RAL(index), rar_low);
+ wrfl();
+ wr32(IGC_RAH(index), rar_high);
+ wrfl();
+}
+
+/* Set default MAC address for the PF in the first RAR entry */
+static void igc_set_default_mac_filter(struct igc_adapter *adapter)
+{
+ struct igc_mac_addr *mac_table = &adapter->mac_table[0];
+
+ ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
+ mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+
+ igc_rar_set_index(adapter, 0);
+}
+
+/**
+ * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void igc_set_rx_mode(struct net_device *netdev)
+{
+}
+
+/**
+ * igc_msix_other - msix other interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t igc_msix_other(int irq, void *data)
+{
+ struct igc_adapter *adapter = data;
+ struct igc_hw *hw = &adapter->hw;
+ u32 icr = rd32(IGC_ICR);
+
+ /* reading ICR causes bit 31 of EICR to be cleared */
+ if (icr & IGC_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
+ if (icr & IGC_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
+
+ if (icr & IGC_ICR_LSC) {
+ hw->mac.get_link_status = 1;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__IGC_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ wr32(IGC_EIMS, adapter->eims_other);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * igc_write_ivar - configure ivar for given MSI-X vector
+ * @hw: pointer to the HW structure
+ * @msix_vector: vector number we are allocating to a given ring
+ * @index: row index of IVAR register to write within IVAR table
+ * @offset: column offset of in IVAR, should be multiple of 8
+ *
+ * The IVAR table consists of 2 columns,
+ * each containing an cause allocation for an Rx and Tx ring, and a
+ * variable number of rows depending on the number of queues supported.
+ */
+static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
+ int index, int offset)
+{
+ u32 ivar = array_rd32(IGC_IVAR0, index);
+
+ /* clear any bits that are currently set */
+ ivar &= ~((u32)0xFF << offset);
+
+ /* write vector and valid bit */
+ ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
+
+ array_wr32(IGC_IVAR0, index, ivar);
+}
+
+static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+ int rx_queue = IGC_N0_QUEUE;
+ int tx_queue = IGC_N0_QUEUE;
+
+ if (q_vector->rx.ring)
+ rx_queue = q_vector->rx.ring->reg_idx;
+ if (q_vector->tx.ring)
+ tx_queue = q_vector->tx.ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case igc_i225:
+ if (rx_queue > IGC_N0_QUEUE)
+ igc_write_ivar(hw, msix_vector,
+ rx_queue >> 1,
+ (rx_queue & 0x1) << 4);
+ if (tx_queue > IGC_N0_QUEUE)
+ igc_write_ivar(hw, msix_vector,
+ tx_queue >> 1,
+ ((tx_queue & 0x1) << 4) + 8);
+ q_vector->eims_value = BIT(msix_vector);
+ break;
+ default:
+ WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
+ break;
+ }
+
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= q_vector->eims_value;
+
+ /* configure q_vector to set itr on first interrupt */
+ q_vector->set_itr = 1;
+}
+
+/**
+ * igc_configure_msix - Configure MSI-X hardware
+ * @adapter: Pointer to adapter structure
+ *
+ * igc_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ */
+static void igc_configure_msix(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int i, vector = 0;
+ u32 tmp;
+
+ adapter->eims_enable_mask = 0;
+
+ /* set vector for other causes, i.e. link changes */
+ switch (hw->mac.type) {
+ case igc_i225:
+ /* Turn on MSI-X capability first, or our settings
+ * won't stick. And it will take days to debug.
+ */
+ wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
+ IGC_GPIE_PBA | IGC_GPIE_EIAME |
+ IGC_GPIE_NSICR);
+
+ /* enable msix_other interrupt */
+ adapter->eims_other = BIT(vector);
+ tmp = (vector++ | IGC_IVAR_VALID) << 8;
+
+ wr32(IGC_IVAR_MISC, tmp);
+ break;
+ default:
+ /* do nothing, since nothing else supports MSI-X */
+ break;
+ } /* switch (hw->mac.type) */
+
+ adapter->eims_enable_mask |= adapter->eims_other;
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ igc_assign_vector(adapter->q_vector[i], vector++);
+
+ wrfl();
+}
+
+static irqreturn_t igc_msix_ring(int irq, void *data)
+{
+ struct igc_q_vector *q_vector = data;
+
+ /* Write the ITR value calculated from the previous interrupt. */
+ igc_write_itr(q_vector);
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * igc_request_msix - Initialize MSI-X interrupts
+ * @adapter: Pointer to adapter structure
+ *
+ * igc_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ */
+static int igc_request_msix(struct igc_adapter *adapter)
+{
+ int i = 0, err = 0, vector = 0, free_vector = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ &igc_msix_other, 0, netdev->name, adapter);
+ if (err)
+ goto err_out;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igc_q_vector *q_vector = adapter->q_vector[i];
+
+ vector++;
+
+ q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
+
+ if (q_vector->rx.ring && q_vector->tx.ring)
+ sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
+ q_vector->rx.ring->queue_index);
+ else if (q_vector->tx.ring)
+ sprintf(q_vector->name, "%s-tx-%u", netdev->name,
+ q_vector->tx.ring->queue_index);
+ else if (q_vector->rx.ring)
+ sprintf(q_vector->name, "%s-rx-%u", netdev->name,
+ q_vector->rx.ring->queue_index);
+ else
+ sprintf(q_vector->name, "%s-unused", netdev->name);
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igc_msix_ring, 0, q_vector->name,
+ q_vector);
+ if (err)
+ goto err_free;
+ }
+
+ igc_configure_msix(adapter);
+ return 0;
+
+err_free:
+ /* free already assigned IRQs */
+ free_irq(adapter->msix_entries[free_vector++].vector, adapter);
+
+ vector--;
+ for (i = 0; i < vector; i++) {
+ free_irq(adapter->msix_entries[free_vector++].vector,
+ adapter->q_vector[i]);
+ }
+err_out:
+ return err;
+}
+
+/**
+ * igc_reset_q_vector - Reset config for interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be reset
+ *
+ * If NAPI is enabled it will delete any references to the
+ * NAPI struct. This is preparation for igc_free_q_vector.
+ */
+static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ /* if we're coming from igc_set_interrupt_capability, the vectors are
+ * not yet allocated
+ */
+ if (!q_vector)
+ return;
+
+ if (q_vector->tx.ring)
+ adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+ if (q_vector->rx.ring)
+ adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
+
+ netif_napi_del(&q_vector->napi);
+}
+
+static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
+ pci_disable_msi(adapter->pdev);
+ }
+
+ while (v_idx--)
+ igc_reset_q_vector(adapter, v_idx);
+}
+
+/**
+ * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
+ * @adapter: Pointer to adapter structure
+ *
+ * This function resets the device so that it has 0 rx queues, tx queues, and
+ * MSI-X interrupts allocated.
+ */
+static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
+{
+ igc_free_q_vectors(adapter);
+ igc_reset_interrupt_capability(adapter);
+}
+
+/**
+ * igc_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ */
+static void igc_free_q_vectors(struct igc_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--) {
+ igc_reset_q_vector(adapter, v_idx);
+ igc_free_q_vector(adapter, v_idx);
+ }
+}
+
+/**
+ * igc_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.
+ */
+static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ adapter->q_vector[v_idx] = NULL;
+
+ /* igc_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ if (q_vector)
+ kfree_rcu(q_vector, rcu);
+}
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
+static void igc_update_phy_info(struct timer_list *t)
+{
+ struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+
+ igc_get_phy_info(&adapter->hw);
+}
+
+/**
+ * igc_has_link - check shared code for link and determine up/down
+ * @adapter: pointer to driver private info
+ */
+static bool igc_has_link(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ bool link_active = false;
+
+ /* get_link_status is set on LSC (link status) interrupt or
+ * rx sequence error interrupt. get_link_status will stay
+ * false until the igc_check_for_link establishes link
+ * for copper adapters ONLY
+ */
+ switch (hw->phy.media_type) {
+ case igc_media_type_copper:
+ if (!hw->mac.get_link_status)
+ return true;
+ hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
+ break;
+ default:
+ case igc_media_type_unknown:
+ break;
+ }
+
+ if (hw->mac.type == igc_i225 &&
+ hw->phy.id == I225_I_PHY_ID) {
+ if (!netif_carrier_ok(adapter->netdev)) {
+ adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
+ } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
+ adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ }
+ }
+
+ return link_active;
+}
+
+/**
+ * igc_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ */
+static void igc_watchdog(struct timer_list *t)
+{
+ struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->watchdog_task);
+}
+
+static void igc_watchdog_task(struct work_struct *work)
+{
+ struct igc_adapter *adapter = container_of(work,
+ struct igc_adapter,
+ watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct igc_hw *hw = &adapter->hw;
+ struct igc_phy_info *phy = &hw->phy;
+ u16 phy_data, retry_count = 20;
+ u32 connsw;
+ u32 link;
+ int i;
+
+ link = igc_has_link(adapter);
+
+ if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
+ if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
+ adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
+ else
+ link = false;
+ }
+
+ /* Force link down if we have fiber to swap to */
+ if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
+ if (hw->phy.media_type == igc_media_type_copper) {
+ connsw = rd32(IGC_CONNSW);
+ if (!(connsw & IGC_CONNSW_AUTOSENSE_EN))
+ link = 0;
+ }
+ }
+ if (link) {
+ if (!netif_carrier_ok(netdev)) {
+ u32 ctrl;
+
+ hw->mac.ops.get_speed_and_duplex(hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+
+ ctrl = rd32(IGC_CTRL);
+ /* Link status message must follow this format */
+ netdev_info(netdev,
+ "igc: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full" : "Half",
+ (ctrl & IGC_CTRL_TFCE) &&
+ (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
+ (ctrl & IGC_CTRL_RFCE) ? "RX" :
+ (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
+
+ /* check if SmartSpeed worked */
+ igc_check_downshift(hw);
+ if (phy->speed_downgraded)
+ netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
+
+ /* adjust timeout factor according to speed/duplex */
+ adapter->tx_timeout_factor = 1;
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adapter->tx_timeout_factor = 14;
+ break;
+ case SPEED_100:
+ /* maybe add some timeout factor ? */
+ break;
+ }
+
+ if (adapter->link_speed != SPEED_1000)
+ goto no_wait;
+
+ /* wait for Remote receiver status OK */
+retry_read_status:
+ if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data)) {
+ if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
+ retry_count) {
+ msleep(100);
+ retry_count--;
+ goto retry_read_status;
+ } else if (!retry_count) {
+ dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
+ }
+ } else {
+ dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
+ }
+no_wait:
+ netif_carrier_on(netdev);
+
+ /* link state has changed, schedule phy info update */
+ if (!test_bit(__IGC_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
+ } else {
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = 0;
+
+ /* Links status message must follow this format */
+ netdev_info(netdev, "igc: %s NIC Link is Down\n",
+ netdev->name);
+ netif_carrier_off(netdev);
+
+ /* link state has changed, schedule phy info update */
+ if (!test_bit(__IGC_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+
+ /* link is down, time to check for alternate media */
+ if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
+ if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
+
+ /* also check for alternate media here */
+ } else if (!netif_carrier_ok(netdev) &&
+ (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
+ if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
+ }
+
+ spin_lock(&adapter->stats64_lock);
+ igc_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ if (!netif_carrier_ok(netdev)) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
+ }
+ }
+
+ /* Force detection of hung controller every watchdog period */
+ set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+ }
+
+ /* Cause software interrupt to ensure Rx ring is cleaned */
+ if (adapter->flags & IGC_FLAG_HAS_MSIX) {
+ u32 eics = 0;
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ eics |= adapter->q_vector[i]->eims_value;
+ wr32(IGC_EICS, eics);
+ } else {
+ wr32(IGC_ICS, IGC_ICS_RXDMT0);
+ }
+
+ /* Reset the timer */
+ if (!test_bit(__IGC_DOWN, &adapter->state)) {
+ if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + HZ));
+ else
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
+}
+
+/**
+ * igc_update_ring_itr - update the dynamic ITR value based on packet size
+ * @q_vector: pointer to q_vector
+ *
+ * Stores a new ITR value based on strictly on packet size. This
+ * algorithm is less sophisticated than that used in igc_update_itr,
+ * due to the difficulty of synchronizing statistics across multiple
+ * receive rings. The divisors and thresholds used by this function
+ * were determined based on theoretical maximum wire speed and testing
+ * data, in order to minimize response time while increasing bulk
+ * throughput.
+ * NOTE: This function is called only when operating in a multiqueue
+ * receive environment.
+ */
+static void igc_update_ring_itr(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ int new_val = q_vector->itr_val;
+ int avg_wire_size = 0;
+ unsigned int packets;
+
+ /* For non-gigabit speeds, just fix the interrupt rate at 4000
+ * ints/sec - ITR timer value of 120 ticks.
+ */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ new_val = IGC_4K_ITR;
+ goto set_itr_val;
+ default:
+ break;
+ }
+
+ packets = q_vector->rx.total_packets;
+ if (packets)
+ avg_wire_size = q_vector->rx.total_bytes / packets;
+
+ packets = q_vector->tx.total_packets;
+ if (packets)
+ avg_wire_size = max_t(u32, avg_wire_size,
+ q_vector->tx.total_bytes / packets);
+
+ /* if avg_wire_size isn't set no work was done */
+ if (!avg_wire_size)
+ goto clear_counts;
+
+ /* Add 24 bytes to size to account for CRC, preamble, and gap */
+ avg_wire_size += 24;
+
+ /* Don't starve jumbo frames */
+ avg_wire_size = min(avg_wire_size, 3000);
+
+ /* Give a little boost to mid-size frames */
+ if (avg_wire_size > 300 && avg_wire_size < 1200)
+ new_val = avg_wire_size / 3;
+ else
+ new_val = avg_wire_size / 2;
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (new_val < IGC_20K_ITR &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ new_val = IGC_20K_ITR;
+
+set_itr_val:
+ if (new_val != q_vector->itr_val) {
+ q_vector->itr_val = new_val;
+ q_vector->set_itr = 1;
+ }
+clear_counts:
+ q_vector->rx.total_bytes = 0;
+ q_vector->rx.total_packets = 0;
+ q_vector->tx.total_bytes = 0;
+ q_vector->tx.total_packets = 0;
+}
+
+/**
+ * igc_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: pointer to q_vector
+ * @ring_container: ring info to update the itr for
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * NOTE: These calculations are only valid when operating in a single-
+ * queue environment.
+ */
+static void igc_update_itr(struct igc_q_vector *q_vector,
+ struct igc_ring_container *ring_container)
+{
+ unsigned int packets = ring_container->total_packets;
+ unsigned int bytes = ring_container->total_bytes;
+ u8 itrval = ring_container->itr;
+
+ /* no packets, exit with status unchanged */
+ if (packets == 0)
+ return;
+
+ switch (itrval) {
+ case lowest_latency:
+ /* handle TSO and jumbo frames */
+ if (bytes / packets > 8000)
+ itrval = bulk_latency;
+ else if ((packets < 5) && (bytes > 512))
+ itrval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ /* this if handles the TSO accounting */
+ if (bytes / packets > 8000)
+ itrval = bulk_latency;
+ else if ((packets < 10) || ((bytes / packets) > 1200))
+ itrval = bulk_latency;
+ else if ((packets > 35))
+ itrval = lowest_latency;
+ } else if (bytes / packets > 2000) {
+ itrval = bulk_latency;
+ } else if (packets <= 2 && bytes < 512) {
+ itrval = lowest_latency;
+ }
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ itrval = low_latency;
+ } else if (bytes < 1500) {
+ itrval = low_latency;
+ }
+ break;
+ }
+
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itrval;
+}
+
+/**
+ * igc_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ */
+static irqreturn_t igc_intr_msi(int irq, void *data)
+{
+ struct igc_adapter *adapter = data;
+ struct igc_q_vector *q_vector = adapter->q_vector[0];
+ struct igc_hw *hw = &adapter->hw;
+ /* read ICR disables interrupts using IAM */
+ u32 icr = rd32(IGC_ICR);
+
+ igc_write_itr(q_vector);
+
+ if (icr & IGC_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
+ if (icr & IGC_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
+
+ if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
+ hw->mac.get_link_status = 1;
+ if (!test_bit(__IGC_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * igc_intr - Legacy Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ */
+static irqreturn_t igc_intr(int irq, void *data)
+{
+ struct igc_adapter *adapter = data;
+ struct igc_q_vector *q_vector = adapter->q_vector[0];
+ struct igc_hw *hw = &adapter->hw;
+ /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
+ * need for the IMC write
+ */
+ u32 icr = rd32(IGC_ICR);
+
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt
+ */
+ if (!(icr & IGC_ICR_INT_ASSERTED))
+ return IRQ_NONE;
+
+ igc_write_itr(q_vector);
+
+ if (icr & IGC_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
+ if (icr & IGC_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
+
+ if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
+ hw->mac.get_link_status = 1;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__IGC_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void igc_set_itr(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ u32 new_itr = q_vector->itr_val;
+ u8 current_itr = 0;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ current_itr = 0;
+ new_itr = IGC_4K_ITR;
+ goto set_itr_now;
+ default:
+ break;
+ }
+
+ igc_update_itr(q_vector, &q_vector->tx);
+ igc_update_itr(q_vector, &q_vector->rx);
+
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+
+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
+ if (current_itr == lowest_latency &&
+ ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+ (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+ current_itr = low_latency;
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
+ break;
+ case low_latency:
+ new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
+ break;
+ case bulk_latency:
+ new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != q_vector->itr_val) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing
+ */
+ new_itr = new_itr > q_vector->itr_val ?
+ max((new_itr * q_vector->itr_val) /
+ (new_itr + (q_vector->itr_val >> 2)),
+ new_itr) : new_itr;
+ /* Don't write the value here; it resets the adapter's
+ * internal timer, and causes us to delay far longer than
+ * we should between interrupts. Instead, we write the ITR
+ * value at the beginning of the next interrupt so the timing
+ * ends up being correct.
+ */
+ q_vector->itr_val = new_itr;
+ q_vector->set_itr = 1;
+ }
+}
+
+static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
+{
+ struct igc_adapter *adapter = q_vector->adapter;
+ struct igc_hw *hw = &adapter->hw;
+
+ if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
+ (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
+ if (adapter->num_q_vectors == 1)
+ igc_set_itr(q_vector);
+ else
+ igc_update_ring_itr(q_vector);
+ }
+
+ if (!test_bit(__IGC_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ wr32(IGC_EIMS, q_vector->eims_value);
+ else
+ igc_irq_enable(adapter);
+ }
+}
+
+/**
+ * igc_poll - NAPI Rx polling callback
+ * @napi: napi polling structure
+ * @budget: count of how many packets we should handle
+ */
+static int igc_poll(struct napi_struct *napi, int budget)
+{
+ struct igc_q_vector *q_vector = container_of(napi,
+ struct igc_q_vector,
+ napi);
+ bool clean_complete = true;
+ int work_done = 0;
+
+ if (q_vector->tx.ring)
+ clean_complete = igc_clean_tx_irq(q_vector, budget);
+
+ if (q_vector->rx.ring) {
+ int cleaned = igc_clean_rx_irq(q_vector, budget);
+
+ work_done += cleaned;
+ if (cleaned >= budget)
+ clean_complete = false;
+ }
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* If not enough Rx work done, exit the polling mode */
+ napi_complete_done(napi, work_done);
+ igc_ring_irq_enable(q_vector);
+
+ return 0;
+}
+
+/**
+ * igc_set_interrupt_capability - set MSI or MSI-X if supported
+ * @adapter: Pointer to adapter structure
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ */
+static void igc_set_interrupt_capability(struct igc_adapter *adapter,
+ bool msix)
+{
+ int numvecs, i;
+ int err;
+
+ if (!msix)
+ goto msi_only;
+ adapter->flags |= IGC_FLAG_HAS_MSIX;
+
+ /* Number of supported queues. */
+ adapter->num_rx_queues = adapter->rss_queues;
+
+ adapter->num_tx_queues = adapter->rss_queues;
+
+ /* start with one vector for every Rx queue */
+ numvecs = adapter->num_rx_queues;
+
+ /* if Tx handler is separate add 1 for every Tx queue */
+ if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
+ numvecs += adapter->num_tx_queues;
+
+ /* store the number of vectors reserved for queues */
+ adapter->num_q_vectors = numvecs;
+
+ /* add 1 vector for link status interrupts */
+ numvecs++;
+
+ adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
+ GFP_KERNEL);
+
+ if (!adapter->msix_entries)
+ return;
+
+ /* populate entry values */
+ for (i = 0; i < numvecs; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries,
+ numvecs,
+ numvecs);
+ if (err > 0)
+ return;
+
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ igc_reset_interrupt_capability(adapter);
+
+msi_only:
+ adapter->flags &= ~IGC_FLAG_HAS_MSIX;
+
+ adapter->rss_queues = 1;
+ adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_q_vectors = 1;
+ if (!pci_enable_msi(adapter->pdev))
+ adapter->flags |= IGC_FLAG_HAS_MSI;
+}
+
+static void igc_add_ring(struct igc_ring *ring,
+ struct igc_ring_container *head)
+{
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * igc_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ */
+static int igc_alloc_q_vector(struct igc_adapter *adapter,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txr_count, unsigned int txr_idx,
+ unsigned int rxr_count, unsigned int rxr_idx)
+{
+ struct igc_q_vector *q_vector;
+ struct igc_ring *ring;
+ int ring_count, size;
+
+ /* igc only supports 1 Tx and/or 1 Rx queue per vector */
+ if (txr_count > 1 || rxr_count > 1)
+ return -ENOMEM;
+
+ ring_count = txr_count + rxr_count;
+ size = sizeof(struct igc_q_vector) +
+ (sizeof(struct igc_ring) * ring_count);
+
+ /* allocate q_vector and rings */
+ q_vector = adapter->q_vector[v_idx];
+ if (!q_vector)
+ q_vector = kzalloc(size, GFP_KERNEL);
+ else
+ memset(q_vector, 0, size);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ igc_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize ITR configuration */
+ q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
+ q_vector->itr_val = IGC_START_ITR;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ /* initialize ITR */
+ if (rxr_count) {
+ /* rx or rx/tx vector */
+ if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
+ q_vector->itr_val = adapter->rx_itr_setting;
+ } else {
+ /* tx only vector */
+ if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
+ q_vector->itr_val = adapter->tx_itr_setting;
+ }
+
+ if (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ igc_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ if (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ igc_add_ring(ring, &q_vector->rx);
+
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+ }
+
+ return 0;
+}
+
+/**
+ * igc_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+static int igc_alloc_q_vectors(struct igc_adapter *adapter)
+{
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int q_vectors = adapter->num_q_vectors;
+ int err;
+
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++) {
+ err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
+ 0, 0, 1, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining--;
+ rxr_idx++;
+ }
+ }
+
+ for (; v_idx < q_vectors; v_idx++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
+ err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
+ tqpv, txr_idx, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
+ return 0;
+
+err_out:
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igc_free_q_vector(adapter, v_idx);
+
+ return -ENOMEM;
+}
+
+/**
+ * igc_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ */
+static void igc_cache_ring_register(struct igc_adapter *adapter)
+{
+ int i = 0, j = 0;
+
+ switch (adapter->hw.mac.type) {
+ case igc_i225:
+ /* Fall through */
+ default:
+ for (; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (; j < adapter->num_tx_queues; j++)
+ adapter->tx_ring[j]->reg_idx = j;
+ break;
+ }
+}
+
+/**
+ * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ * @adapter: Pointer to adapter structure
+ *
+ * This function initializes the interrupts and allocates all of the queues.
+ */
+static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ igc_set_interrupt_capability(adapter, msix);
+
+ err = igc_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ igc_cache_ring_register(adapter);
+
+ return 0;
+
+err_alloc_q_vectors:
+ igc_reset_interrupt_capability(adapter);
+ return err;
+}
+
+static void igc_free_irq(struct igc_adapter *adapter)
+{
+ if (adapter->msix_entries) {
+ int vector = 0, i;
+
+ free_irq(adapter->msix_entries[vector++].vector, adapter);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ free_irq(adapter->msix_entries[vector++].vector,
+ adapter->q_vector[i]);
+ } else {
+ free_irq(adapter->pdev->irq, adapter);
+ }
+}
+
+/**
+ * igc_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static void igc_irq_disable(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ u32 regval = rd32(IGC_EIAM);
+
+ wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
+ wr32(IGC_EIMC, adapter->eims_enable_mask);
+ regval = rd32(IGC_EIAC);
+ wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
+ }
+
+ wr32(IGC_IAM, 0);
+ wr32(IGC_IMC, ~0);
+ wrfl();
+
+ if (adapter->msix_entries) {
+ int vector = 0, i;
+
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+/**
+ * igc_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static void igc_irq_enable(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ if (adapter->msix_entries) {
+ u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
+ u32 regval = rd32(IGC_EIAC);
+
+ wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
+ regval = rd32(IGC_EIAM);
+ wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
+ wr32(IGC_EIMS, adapter->eims_enable_mask);
+ wr32(IGC_IMS, ims);
+ } else {
+ wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
+ wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
+ }
+}
+
+/**
+ * igc_request_irq - initialize interrupts
+ * @adapter: Pointer to adapter structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ */
+static int igc_request_irq(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ if (adapter->flags & IGC_FLAG_HAS_MSIX) {
+ err = igc_request_msix(adapter);
+ if (!err)
+ goto request_done;
+ /* fall back to MSI */
+ igc_free_all_tx_resources(adapter);
+ igc_free_all_rx_resources(adapter);
+
+ igc_clear_interrupt_scheme(adapter);
+ err = igc_init_interrupt_scheme(adapter, false);
+ if (err)
+ goto request_done;
+ igc_setup_all_tx_resources(adapter);
+ igc_setup_all_rx_resources(adapter);
+ igc_configure(adapter);
+ }
+
+ igc_assign_vector(adapter->q_vector[0], 0);
+
+ if (adapter->flags & IGC_FLAG_HAS_MSI) {
+ err = request_irq(pdev->irq, &igc_intr_msi, 0,
+ netdev->name, adapter);
+ if (!err)
+ goto request_done;
+
+ /* fall back to legacy interrupts */
+ igc_reset_interrupt_capability(adapter);
+ adapter->flags &= ~IGC_FLAG_HAS_MSI;
+ }
+
+ err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
+ netdev->name, adapter);
+
+ if (err)
+ dev_err(&pdev->dev, "Error %d getting interrupt\n",
+ err);
+
+request_done:
+ return err;
+}
+
+static void igc_write_itr(struct igc_q_vector *q_vector)
+{
+ u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
+
+ if (!q_vector->set_itr)
+ return;
+
+ if (!itr_val)
+ itr_val = IGC_ITR_VAL_MASK;
+
+ itr_val |= IGC_EITR_CNT_IGNR;
+
+ writel(itr_val, q_vector->itr_register);
+ q_vector->set_itr = 0;
+}
+
+/**
+ * igc_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ */
+static int __igc_open(struct net_device *netdev, bool resuming)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ int err = 0;
+ int i = 0;
+
+ /* disallow open during test */
+
+ if (test_bit(__IGC_TESTING, &adapter->state)) {
+ WARN_ON(resuming);
+ return -EBUSY;
+ }
+
+ netif_carrier_off(netdev);
+
+ /* allocate transmit descriptors */
+ err = igc_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = igc_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ igc_power_up_link(adapter);
+
+ igc_configure(adapter);
+
+ err = igc_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ /* Notify the stack of the actual queue counts. */
+ netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
+ clear_bit(__IGC_DOWN, &adapter->state);
+
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ napi_enable(&adapter->q_vector[i]->napi);
+
+ /* Clear any pending interrupts. */
+ rd32(IGC_ICR);
+ igc_irq_enable(adapter);
+
+ netif_tx_start_all_queues(netdev);
+
+ /* start the watchdog. */
+ hw->mac.get_link_status = 1;
+ schedule_work(&adapter->watchdog_task);
+
+ return IGC_SUCCESS;
+
+err_set_queues:
+ igc_free_irq(adapter);
+err_req_irq:
+ igc_release_hw_control(adapter);
+ igc_power_down_link(adapter);
+ igc_free_all_rx_resources(adapter);
+err_setup_rx:
+ igc_free_all_tx_resources(adapter);
+err_setup_tx:
+ igc_reset(adapter);
+
+ return err;
+}
+
+static int igc_open(struct net_device *netdev)
+{
+ return __igc_open(netdev, false);
+}
+
+/**
+ * igc_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the driver's control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ */
+static int __igc_close(struct net_device *netdev, bool suspending)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
+
+ igc_down(adapter);
+
+ igc_release_hw_control(adapter);
+
+ igc_free_irq(adapter);
+
+ igc_free_all_tx_resources(adapter);
+ igc_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+static int igc_close(struct net_device *netdev)
+{
+ if (netif_device_present(netdev) || netdev->dismantle)
+ return __igc_close(netdev, false);
+ return 0;
+}
+
+static const struct net_device_ops igc_netdev_ops = {
+ .ndo_open = igc_open,
+ .ndo_stop = igc_close,
+ .ndo_start_xmit = igc_xmit_frame,
+ .ndo_set_mac_address = igc_set_mac,
+ .ndo_change_mtu = igc_change_mtu,
+ .ndo_get_stats = igc_get_stats,
+ .ndo_do_ioctl = igc_ioctl,
+};
+
+/* PCIe configuration access */
+void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
+{
+ struct igc_adapter *adapter = hw->back;
+
+ pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
+{
+ struct igc_adapter *adapter = hw->back;
+
+ pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
+{
+ struct igc_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (!cap_offset)
+ return -IGC_ERR_CONFIG;
+
+ pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+ return IGC_SUCCESS;
+}
+
+s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
+{
+ struct igc_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (!cap_offset)
+ return -IGC_ERR_CONFIG;
+
+ pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
+
+ return IGC_SUCCESS;
+}
+
+u32 igc_rd32(struct igc_hw *hw, u32 reg)
+{
+ struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
+ u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
+ u32 value = 0;
+
+ if (IGC_REMOVED(hw_addr))
+ return ~value;
+
+ value = readl(&hw_addr[reg]);
+
+ /* reads should not return all F's */
+ if (!(~value) && (!reg || !(~readl(hw_addr)))) {
+ struct net_device *netdev = igc->netdev;
+
+ hw->hw_addr = NULL;
+ netif_device_detach(netdev);
+ netdev_err(netdev, "PCIe link lost, device now detached\n");
+ }
+
+ return value;
+}
+
+/**
+ * igc_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in igc_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * igc_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring the adapter private structure,
+ * and a hardware reset occur.
+ */
+static int igc_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct igc_adapter *adapter;
+ struct net_device *netdev;
+ struct igc_hw *hw;
+ const struct igc_info *ei = igc_info_tbl[ent->driver_data];
+ int err, pci_using_dac;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ pci_using_dac = 0;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(64));
+ if (!err)
+ pci_using_dac = 1;
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ IGC_ERR("Wrong DMA configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev,
+ IORESOURCE_MEM),
+ igc_driver_name);
+ if (err)
+ goto err_pci_reg;
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ err = -ENOMEM;
+ netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
+ IGC_MAX_TX_QUEUES);
+
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->port_num = hw->bus.func;
+ adapter->msg_enable = GENMASK(debug - 1, 0);
+
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_ioremap;
+
+ err = -EIO;
+ adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!adapter->io_addr)
+ goto err_ioremap;
+
+ /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
+ hw->hw_addr = adapter->io_addr;
+
+ netdev->netdev_ops = &igc_netdev_ops;
+
+ netdev->watchdog_timeo = 5 * HZ;
+
+ netdev->mem_start = pci_resource_start(pdev, 0);
+ netdev->mem_end = pci_resource_end(pdev, 0);
+
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->revision_id = pdev->revision;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ /* Copy the default MAC and PHY function pointers */
+ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+ memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+
+ /* Initialize skew-specific constants */
+ err = ei->get_invariants(hw);
+ if (err)
+ goto err_sw_init;
+
+ /* setup the private structure */
+ err = igc_sw_init(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* MTU range: 68 - 9216 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
+
+ /* before reading the NVM, reset the controller to put the device in a
+ * known good starting state
+ */
+ hw->mac.ops.reset_hw(hw);
+
+ if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
+ /* copy the MAC address out of the NVM */
+ if (hw->mac.ops.read_mac_addr(hw))
+ dev_err(&pdev->dev, "NVM Read Error\n");
+ }
+
+ memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_err(&pdev->dev, "Invalid MAC Address\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+
+ /* configure RXPBSIZE and TXPBSIZE */
+ wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
+ wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+
+ timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
+ timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
+
+ INIT_WORK(&adapter->reset_task, igc_reset_task);
+ INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
+
+ /* Initialize link properties that are user-changeable */
+ adapter->fc_autoneg = true;
+ hw->mac.autoneg = true;
+ hw->phy.autoneg_advertised = 0xaf;
+
+ hw->fc.requested_mode = igc_fc_default;
+ hw->fc.current_mode = igc_fc_default;
+
+ /* reset the hardware with the new settings */
+ igc_reset(adapter);
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver.
+ */
+ igc_get_hw_control(adapter);
+
+ strncpy(netdev->name, "eth%d", IFNAMSIZ);
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ /* Check if Media Autosense is enabled */
+ adapter->ei = *ei;
+
+ /* print pcie link status and MAC address */
+ pcie_print_link_status(pdev);
+ netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
+
+ return 0;
+
+err_register:
+ igc_release_hw_control(adapter);
+err_eeprom:
+ if (!igc_check_reset_block(hw))
+ igc_reset_phy(hw);
+err_sw_init:
+ igc_clear_interrupt_scheme(adapter);
+ iounmap(adapter->io_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * igc_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * igc_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. This could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ */
+static void igc_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ set_bit(__IGC_DOWN, &adapter->state);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+ * would have already happened in close and is redundant.
+ */
+ igc_release_hw_control(adapter);
+ unregister_netdev(netdev);
+
+ igc_clear_interrupt_scheme(adapter);
+ pci_iounmap(pdev, adapter->io_addr);
+ pci_release_mem_regions(pdev);
+
+ kfree(adapter->mac_table);
+ kfree(adapter->shadow_vfta);
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver igc_driver = {
+ .name = igc_driver_name,
+ .id_table = igc_pci_tbl,
+ .probe = igc_probe,
+ .remove = igc_remove,
+};
+
+static void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
+ const u32 max_rss_queues)
+{
+ /* Determine if we need to pair queues. */
+ /* If rss_queues > half of max_rss_queues, pair the queues in
+ * order to conserve interrupts due to limited supply.
+ */
+ if (adapter->rss_queues > (max_rss_queues / 2))
+ adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
+ else
+ adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
+}
+
+static unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
+{
+ unsigned int max_rss_queues;
+
+ /* Determine the maximum number of RSS queues supported. */
+ max_rss_queues = IGC_MAX_RX_QUEUES;
+
+ return max_rss_queues;
+}
+
+static void igc_init_queue_configuration(struct igc_adapter *adapter)
+{
+ u32 max_rss_queues;
+
+ max_rss_queues = igc_get_max_rss_queues(adapter);
+ adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+
+ igc_set_flag_queue_pairs(adapter, max_rss_queues);
+}
+
+/**
+ * igc_sw_init - Initialize general software structures (struct igc_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igc_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int igc_sw_init(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct igc_hw *hw = &adapter->hw;
+
+ int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IGC_DEFAULT_TXD;
+ adapter->rx_ring_count = IGC_DEFAULT_RXD;
+
+ /* set default ITR values */
+ adapter->rx_itr_setting = IGC_DEFAULT_ITR;
+ adapter->tx_itr_setting = IGC_DEFAULT_ITR;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
+
+ /* adjust max frame to be at least the size of a standard frame */
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ spin_lock_init(&adapter->nfc_lock);
+ spin_lock_init(&adapter->stats64_lock);
+ /* Assume MSI-X interrupts, will be checked during IRQ allocation */
+ adapter->flags |= IGC_FLAG_HAS_MSIX;
+
+ adapter->mac_table = kzalloc(size, GFP_ATOMIC);
+ if (!adapter->mac_table)
+ return -ENOMEM;
+
+ igc_init_queue_configuration(adapter);
+
+ /* This call may decrease the number of queues */
+ if (igc_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ /* Explicitly disable IRQ since the NIC can be in any state. */
+ igc_irq_disable(adapter);
+
+ set_bit(__IGC_DOWN, &adapter->state);
+
+ return 0;
+}
+
+/**
+ * igc_get_hw_dev - return device
+ * @hw: pointer to hardware structure
+ *
+ * used by hardware layer to print debugging information
+ */
+struct net_device *igc_get_hw_dev(struct igc_hw *hw)
+{
+ struct igc_adapter *adapter = hw->back;
+
+ return adapter->netdev;
+}
+
+/**
+ * igc_init_module - Driver Registration Routine
+ *
+ * igc_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init igc_init_module(void)
+{
+ int ret;
+
+ pr_info("%s - version %s\n",
+ igc_driver_string, igc_driver_version);
+
+ pr_info("%s\n", igc_copyright);
+
+ ret = pci_register_driver(&igc_driver);
+ return ret;
+}
+
+module_init(igc_init_module);
+
+/**
+ * igc_exit_module - Driver Exit Cleanup Routine
+ *
+ * igc_exit_module is called just before the driver is removed
+ * from memory.
+ */
+static void __exit igc_exit_module(void)
+{
+ pci_unregister_driver(&igc_driver);
+}
+
+module_exit(igc_exit_module);
+/* igc_main.c */
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c
new file mode 100644
index 000000000000..58f81aba0144
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include "igc_mac.h"
+#include "igc_nvm.h"
+
+/**
+ * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ * @hw: pointer to the HW structure
+ * @ee_reg: EEPROM flag for polling
+ *
+ * Polls the EEPROM status bit for either read or write completion based
+ * upon the value of 'ee_reg'.
+ */
+static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg)
+{
+ s32 ret_val = -IGC_ERR_NVM;
+ u32 attempts = 100000;
+ u32 i, reg = 0;
+
+ for (i = 0; i < attempts; i++) {
+ if (ee_reg == IGC_NVM_POLL_READ)
+ reg = rd32(IGC_EERD);
+ else
+ reg = rd32(IGC_EEWR);
+
+ if (reg & IGC_NVM_RW_REG_DONE) {
+ ret_val = 0;
+ break;
+ }
+
+ udelay(5);
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_acquire_nvm - Generic request for access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ * Return successful if access grant bit set, else clear the request for
+ * EEPROM access and return -IGC_ERR_NVM (-1).
+ */
+s32 igc_acquire_nvm(struct igc_hw *hw)
+{
+ s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
+ u32 eecd = rd32(IGC_EECD);
+ s32 ret_val = 0;
+
+ wr32(IGC_EECD, eecd | IGC_EECD_REQ);
+ eecd = rd32(IGC_EECD);
+
+ while (timeout) {
+ if (eecd & IGC_EECD_GNT)
+ break;
+ udelay(5);
+ eecd = rd32(IGC_EECD);
+ timeout--;
+ }
+
+ if (!timeout) {
+ eecd &= ~IGC_EECD_REQ;
+ wr32(IGC_EECD, eecd);
+ hw_dbg("Could not acquire NVM grant\n");
+ ret_val = -IGC_ERR_NVM;
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_release_nvm - Release exclusive access to EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ */
+void igc_release_nvm(struct igc_hw *hw)
+{
+ u32 eecd;
+
+ eecd = rd32(IGC_EECD);
+ eecd &= ~IGC_EECD_REQ;
+ wr32(IGC_EECD, eecd);
+}
+
+/**
+ * igc_read_nvm_eerd - Reads EEPROM using EERD register
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ */
+s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct igc_nvm_info *nvm = &hw->nvm;
+ u32 i, eerd = 0;
+ s32 ret_val = 0;
+
+ /* A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
+ words == 0) {
+ hw_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -IGC_ERR_NVM;
+ goto out;
+ }
+
+ for (i = 0; i < words; i++) {
+ eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) +
+ IGC_NVM_RW_REG_START;
+
+ wr32(IGC_EERD, eerd);
+ ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ);
+ if (ret_val)
+ break;
+
+ data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA);
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_read_mac_addr - Read device MAC address
+ * @hw: pointer to the HW structure
+ */
+s32 igc_read_mac_addr(struct igc_hw *hw)
+{
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
+
+ rar_high = rd32(IGC_RAH(0));
+ rar_low = rd32(IGC_RAL(0));
+
+ for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8));
+
+ for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8));
+
+ for (i = 0; i < ETH_ALEN; i++)
+ hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+ return 0;
+}
+
+/**
+ * igc_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ */
+s32 igc_validate_nvm_checksum(struct igc_hw *hw)
+{
+ u16 checksum = 0;
+ u16 i, nvm_data;
+ s32 ret_val = 0;
+
+ for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16)NVM_SUM) {
+ hw_dbg("NVM Checksum Invalid\n");
+ ret_val = -IGC_ERR_NVM;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_update_nvm_checksum - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ */
+s32 igc_update_nvm_checksum(struct igc_hw *hw)
+{
+ u16 checksum = 0;
+ u16 i, nvm_data;
+ s32 ret_val;
+
+ for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16)NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
+ if (ret_val)
+ hw_dbg("NVM Write Error while updating checksum.\n");
+
+out:
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.h b/drivers/net/ethernet/intel/igc/igc_nvm.h
new file mode 100644
index 000000000000..f9fc2e9cfb03
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_NVM_H_
+#define _IGC_NVM_H_
+
+s32 igc_acquire_nvm(struct igc_hw *hw);
+void igc_release_nvm(struct igc_hw *hw);
+s32 igc_read_mac_addr(struct igc_hw *hw);
+s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data);
+s32 igc_validate_nvm_checksum(struct igc_hw *hw);
+s32 igc_update_nvm_checksum(struct igc_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
new file mode 100644
index 000000000000..38e43e6fc1c7
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -0,0 +1,791 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include "igc_phy.h"
+
+/* forward declaration */
+static s32 igc_phy_setup_autoneg(struct igc_hw *hw);
+static s32 igc_wait_autoneg(struct igc_hw *hw);
+
+/**
+ * igc_check_reset_block - Check if PHY reset is blocked
+ * @hw: pointer to the HW structure
+ *
+ * Read the PHY management control register and check whether a PHY reset
+ * is blocked. If a reset is not blocked return 0, otherwise
+ * return IGC_ERR_BLK_PHY_RESET (12).
+ */
+s32 igc_check_reset_block(struct igc_hw *hw)
+{
+ u32 manc;
+
+ manc = rd32(IGC_MANC);
+
+ return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ?
+ IGC_ERR_BLK_PHY_RESET : 0;
+}
+
+/**
+ * igc_get_phy_id - Retrieve the PHY ID and revision
+ * @hw: pointer to the HW structure
+ *
+ * Reads the PHY registers and stores the PHY ID and possibly the PHY
+ * revision in the hardware structure.
+ */
+s32 igc_get_phy_id(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val = 0;
+ u16 phy_id;
+
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ if (ret_val)
+ goto out;
+
+ phy->id = (u32)(phy_id << 16);
+ usleep_range(200, 500);
+ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+ if (ret_val)
+ goto out;
+
+ phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+ phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_phy_has_link - Polls PHY for link
+ * @hw: pointer to the HW structure
+ * @iterations: number of times to poll for link
+ * @usec_interval: delay between polling attempts
+ * @success: pointer to whether polling was successful or not
+ *
+ * Polls the PHY status register for link, 'iterations' number of times.
+ */
+s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success)
+{
+ u16 i, phy_status;
+ s32 ret_val = 0;
+
+ for (i = 0; i < iterations; i++) {
+ /* Some PHYs require the PHY_STATUS register to be read
+ * twice due to the link bit being sticky. No harm doing
+ * it across the board.
+ */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val && usec_interval > 0) {
+ /* If the first read fails, another entity may have
+ * ownership of the resources, wait and try again to
+ * see if they have relinquished the resources yet.
+ */
+ if (usec_interval >= 1000)
+ mdelay(usec_interval / 1000);
+ else
+ udelay(usec_interval);
+ }
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_LINK_STATUS)
+ break;
+ if (usec_interval >= 1000)
+ mdelay(usec_interval / 1000);
+ else
+ udelay(usec_interval);
+ }
+
+ *success = (i < iterations) ? true : false;
+
+ return ret_val;
+}
+
+/**
+ * igc_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, restore the link to previous settings.
+ */
+void igc_power_up_phy_copper(struct igc_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * igc_power_down_phy_copper - Power down copper PHY
+ * @hw: pointer to the HW structure
+ *
+ * Power down PHY to save power when interface is down and wake on lan
+ * is not enabled.
+ */
+void igc_power_down_phy_copper(struct igc_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+
+ /* Temporary workaround - should be removed when PHY will implement
+ * IEEE registers as properly
+ */
+ /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/
+ usleep_range(1000, 2000);
+}
+
+/**
+ * igc_check_downshift - Checks whether a downshift in speed occurred
+ * @hw: pointer to the HW structure
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * A downshift is detected by querying the PHY link health.
+ */
+s32 igc_check_downshift(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u16 phy_data, offset, mask;
+ s32 ret_val;
+
+ switch (phy->type) {
+ case igc_phy_i225:
+ default:
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
+ ret_val = 0;
+ goto out;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->speed_downgraded = (phy_data & mask) ? true : false;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_phy_hw_reset - PHY hardware reset
+ * @hw: pointer to the HW structure
+ *
+ * Verify the reset block is not blocking us from resetting. Acquire
+ * semaphore (if necessary) and read/set/write the device control reset
+ * bit in the PHY. Wait the appropriate delay time for the device to
+ * reset and release the semaphore (if necessary).
+ */
+s32 igc_phy_hw_reset(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u32 ctrl;
+
+ ret_val = igc_check_reset_block(hw);
+ if (ret_val) {
+ ret_val = 0;
+ goto out;
+ }
+
+ ret_val = phy->ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ctrl = rd32(IGC_CTRL);
+ wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
+ wrfl();
+
+ udelay(phy->reset_delay_us);
+
+ wr32(IGC_CTRL, ctrl);
+ wrfl();
+
+ usleep_range(1500, 2000);
+
+ phy->ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_copper_link_autoneg - Setup/Enable autoneg for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Performs initial bounds checking on autoneg advertisement parameter, then
+ * configure to advertise the full capability. Setup the PHY to autoneg
+ * and restart the negotiation process between the link partner. If
+ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ */
+static s32 igc_copper_link_autoneg(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u16 phy_ctrl;
+ s32 ret_val;
+
+ /* Perform some bounds checking on the autoneg advertisement
+ * parameter.
+ */
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (phy->autoneg_advertised == 0)
+ phy->autoneg_advertised = phy->autoneg_mask;
+
+ hw_dbg("Reconfiguring auto-neg advertisement params\n");
+ ret_val = igc_phy_setup_autoneg(hw);
+ if (ret_val) {
+ hw_dbg("Error Setting up Auto-Negotiation\n");
+ goto out;
+ }
+ hw_dbg("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ goto out;
+
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ goto out;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (phy->autoneg_wait_to_complete) {
+ ret_val = igc_wait_autoneg(hw);
+ if (ret_val) {
+ hw_dbg("Error while waiting for autoneg to complete\n");
+ goto out;
+ }
+ }
+
+ hw->mac.get_link_status = true;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_wait_autoneg - Wait for auto-neg completion
+ * @hw: pointer to the HW structure
+ *
+ * Waits for auto-negotiation to complete or for the auto-negotiation time
+ * limit to expire, which ever happens first.
+ */
+static s32 igc_wait_autoneg(struct igc_hw *hw)
+{
+ u16 i, phy_status;
+ s32 ret_val = 0;
+
+ /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+ if (ret_val)
+ break;
+ if (phy_status & MII_SR_AUTONEG_COMPLETE)
+ break;
+ msleep(100);
+ }
+
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ * has completed.
+ */
+ return ret_val;
+}
+
+/**
+ * igc_phy_setup_autoneg - Configure PHY for auto-negotiation
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MII auto-neg advertisement register and/or the 1000T control
+ * register and if the PHY is already setup for auto-negotiation, then
+ * return successful. Otherwise, setup advertisement and flow control to
+ * the appropriate values for the wanted auto-negotiation.
+ */
+static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u16 aneg_multigbt_an_ctrl = 0;
+ u16 mii_1000t_ctrl_reg = 0;
+ u16 mii_autoneg_adv_reg;
+ s32 ret_val;
+
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
+ &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
+ hw->phy.id == I225_I_PHY_ID) {
+ /* Read the MULTI GBT AN Control Register - reg 7.32 */
+ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ ANEG_MULTIGBT_AN_CTRL,
+ &aneg_multigbt_an_ctrl);
+
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
+
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+ NWAY_AR_100TX_HD_CAPS |
+ NWAY_AR_10T_FD_CAPS |
+ NWAY_AR_10T_HD_CAPS);
+ mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+ hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+ hw_dbg("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
+
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+ hw_dbg("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+ hw_dbg("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
+
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+ hw_dbg("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+ hw_dbg("Advertise 1000mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+ hw_dbg("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
+
+ /* We do not allow the Phy to advertise 2500 Mb Half Duplex */
+ if (phy->autoneg_advertised & ADVERTISE_2500_HALF)
+ hw_dbg("Advertise 2500mb Half duplex request denied!\n");
+
+ /* Do we want to advertise 2500 Mb Full Duplex? */
+ if (phy->autoneg_advertised & ADVERTISE_2500_FULL) {
+ hw_dbg("Advertise 2500mb Full duplex\n");
+ aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS;
+ } else {
+ aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS;
+ }
+
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+ * negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc.current_mode) {
+ case igc_fc_none:
+ /* Flow control (Rx & Tx) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case igc_fc_rx_pause:
+ /* Rx Flow control is enabled, and Tx Flow control is
+ * disabled, by a software over-ride.
+ *
+ * Since there really isn't a way to advertise that we are
+ * capable of Rx Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric Rx PAUSE. Later
+ * (in igc_config_fc_after_link_up) we will disable the
+ * hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case igc_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case igc_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ hw_dbg("Flow control param set incorrectly\n");
+ return -IGC_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL)
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+
+ if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
+ hw->phy.id == I225_I_PHY_ID)
+ ret_val = phy->ops.write_reg(hw,
+ (STANDARD_AN_REG_MASK <<
+ MMD_DEVADDR_SHIFT) |
+ ANEG_MULTIGBT_AN_CTRL,
+ aneg_multigbt_an_ctrl);
+
+ return ret_val;
+}
+
+/**
+ * igc_setup_copper_link - Configure copper link settings
+ * @hw: pointer to the HW structure
+ *
+ * Calls the appropriate function to configure the link for auto-neg or forced
+ * speed and duplex. Then we check for link, once link is established calls
+ * to configure collision distance and flow control are called. If link is
+ * not established, we return -IGC_ERR_PHY (-2).
+ */
+s32 igc_setup_copper_link(struct igc_hw *hw)
+{
+ s32 ret_val = 0;
+ bool link;
+
+ if (hw->mac.autoneg) {
+ /* Setup autoneg and flow control advertisement and perform
+ * autonegotiation.
+ */
+ ret_val = igc_copper_link_autoneg(hw);
+ if (ret_val)
+ goto out;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H or 100F
+ * depending on user settings.
+ */
+ hw_dbg("Forcing Speed and Duplex\n");
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
+ if (ret_val) {
+ hw_dbg("Error Forcing Speed and Duplex\n");
+ goto out;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
+ if (ret_val)
+ goto out;
+
+ if (link) {
+ hw_dbg("Valid link established!!!\n");
+ igc_config_collision_dist(hw);
+ ret_val = igc_config_fc_after_link_up(hw);
+ } else {
+ hw_dbg("Unable to establish link!!!\n");
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_read_phy_reg_mdic - Read MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the MDI control register in the PHY at offset and stores the
+ * information read to data.
+ */
+static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+ s32 ret_val = 0;
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ hw_dbg("PHY Address %d is out of range\n", offset);
+ ret_val = -IGC_ERR_PARAM;
+ goto out;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((offset << IGC_MDIC_REG_SHIFT) |
+ (phy->addr << IGC_MDIC_PHY_SHIFT) |
+ (IGC_MDIC_OP_READ));
+
+ wr32(IGC_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
+ usleep_range(500, 1000);
+ mdic = rd32(IGC_MDIC);
+ if (mdic & IGC_MDIC_READY)
+ break;
+ }
+ if (!(mdic & IGC_MDIC_READY)) {
+ hw_dbg("MDI Read did not complete\n");
+ ret_val = -IGC_ERR_PHY;
+ goto out;
+ }
+ if (mdic & IGC_MDIC_ERROR) {
+ hw_dbg("MDI Error\n");
+ ret_val = -IGC_ERR_PHY;
+ goto out;
+ }
+ *data = (u16)mdic;
+
+out:
+ return ret_val;
+}
+
+/**
+ * igc_write_phy_reg_mdic - Write MDI control register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write to register at offset
+ *
+ * Writes data to MDI control register in the PHY at offset.
+ */
+static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u32 i, mdic = 0;
+ s32 ret_val = 0;
+
+ if (offset > MAX_PHY_REG_ADDRESS) {
+ hw_dbg("PHY Address %d is out of range\n", offset);
+ ret_val = -IGC_ERR_PARAM;
+ goto out;
+ }
+
+ /* Set up Op-code, Phy Address, and register offset in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to write the desired data.
+ */
+ mdic = (((u32)data) |
+ (offset << IGC_MDIC_REG_SHIFT) |
+ (phy->addr << IGC_MDIC_PHY_SHIFT) |
+ (IGC_MDIC_OP_WRITE));
+
+ wr32(IGC_MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
+ usleep_range(500, 1000);
+ mdic = rd32(IGC_MDIC);
+ if (mdic & IGC_MDIC_READY)
+ break;
+ }
+ if (!(mdic & IGC_MDIC_READY)) {
+ hw_dbg("MDI Write did not complete\n");
+ ret_val = -IGC_ERR_PHY;
+ goto out;
+ }
+ if (mdic & IGC_MDIC_ERROR) {
+ hw_dbg("MDI Error\n");
+ ret_val = -IGC_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * __igc_access_xmdio_reg - Read/write XMDIO register
+ * @hw: pointer to the HW structure
+ * @address: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: pointer to value to read/write from/to the XMDIO address
+ * @read: boolean flag to indicate read or write
+ */
+static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address,
+ u8 dev_addr, u16 *data, bool read)
+{
+ s32 ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA |
+ dev_addr);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data);
+ else
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data);
+ if (ret_val)
+ return ret_val;
+
+ /* Recalibrate the device back to 0 */
+ ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0);
+ if (ret_val)
+ return ret_val;
+
+ return ret_val;
+}
+
+/**
+ * igc_read_xmdio_reg - Read XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be read from the EMI address
+ */
+static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr,
+ u8 dev_addr, u16 *data)
+{
+ return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true);
+}
+
+/**
+ * igc_write_xmdio_reg - Write XMDIO register
+ * @hw: pointer to the HW structure
+ * @addr: XMDIO address to program
+ * @dev_addr: device address to program
+ * @data: value to be written to the XMDIO address
+ */
+static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr,
+ u8 dev_addr, u16 data)
+{
+ return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+}
+
+/**
+ * igc_write_phy_reg_gpy - Write GPY PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ */
+s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
+{
+ u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
+ s32 ret_val;
+
+ offset = offset & GPY_REG_MASK;
+
+ if (!dev_addr) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = igc_write_phy_reg_mdic(hw, offset, data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.release(hw);
+ } else {
+ ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
+ data);
+ }
+
+ return ret_val;
+}
+
+/**
+ * igc_read_phy_reg_gpy - Read GPY PHY register
+ * @hw: pointer to the HW structure
+ * @offset: lower half is register offset to read to
+ * upper half is MMD to use.
+ * @data: data to read at register offset
+ *
+ * Acquires semaphore, if necessary, then reads the data in the PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ */
+s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
+{
+ u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
+ s32 ret_val;
+
+ offset = offset & GPY_REG_MASK;
+
+ if (!dev_addr) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ ret_val = igc_read_phy_reg_mdic(hw, offset, data);
+ if (ret_val)
+ return ret_val;
+ hw->phy.ops.release(hw);
+ } else {
+ ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
+ data);
+ }
+
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.h b/drivers/net/ethernet/intel/igc/igc_phy.h
new file mode 100644
index 000000000000..25cba33de7e2
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_phy.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_PHY_H_
+#define _IGC_PHY_H_
+
+#include "igc_mac.h"
+
+s32 igc_check_reset_block(struct igc_hw *hw);
+s32 igc_phy_hw_reset(struct igc_hw *hw);
+s32 igc_get_phy_id(struct igc_hw *hw);
+s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+s32 igc_check_downshift(struct igc_hw *hw);
+s32 igc_setup_copper_link(struct igc_hw *hw);
+void igc_power_up_phy_copper(struct igc_hw *hw);
+void igc_power_down_phy_copper(struct igc_hw *hw);
+s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data);
+s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data);
+
+#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
new file mode 100644
index 000000000000..a1bd3216c906
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Intel Corporation */
+
+#ifndef _IGC_REGS_H_
+#define _IGC_REGS_H_
+
+/* General Register Descriptions */
+#define IGC_CTRL 0x00000 /* Device Control - RW */
+#define IGC_STATUS 0x00008 /* Device Status - RO */
+#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define IGC_MDIC 0x00020 /* MDI Control - RW */
+#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
+#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+
+/* Internal Packet Buffer Size Registers */
+#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+
+/* NVM Register Descriptions */
+#define IGC_EERD 0x12014 /* EEprom mode read - RW */
+#define IGC_EEWR 0x12018 /* EEprom mode write - RW */
+
+/* Flow Control Register Descriptions */
+#define IGC_FCAL 0x00028 /* FC Address Low - RW */
+#define IGC_FCAH 0x0002C /* FC Address High - RW */
+#define IGC_FCT 0x00030 /* FC Type - RW */
+#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */
+#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */
+#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */
+#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */
+#define IGC_FCSTS 0x02464 /* FC Status - RO */
+
+/* PCIe Register Description */
+#define IGC_GCR 0x05B00 /* PCIe control- RW */
+
+/* Semaphore registers */
+#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
+#define IGC_SWSM 0x05B50 /* SW Semaphore */
+#define IGC_FWSM 0x05B54 /* FW Semaphore */
+
+/* Function Active and Power State to MNG */
+#define IGC_FACTPS 0x05B30
+
+/* Interrupt Register Description */
+#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */
+#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */
+#define IGC_ICS 0x01504 /* Intr Cause Set - WO */
+#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */
+#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */
+#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */
+/* Intr Throttle - RW */
+#define IGC_EITR(_n) (0x01680 + (0x4 * (_n)))
+/* Interrupt Vector Allocation - RW */
+#define IGC_IVAR0 0x01700
+#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */
+
+/* Interrupt Cause */
+#define IGC_ICRXPTC 0x04104 /* Rx Packet Timer Expire Count */
+#define IGC_ICRXATC 0x04108 /* Rx Absolute Timer Expire Count */
+#define IGC_ICTXPTC 0x0410C /* Tx Packet Timer Expire Count */
+#define IGC_ICTXATC 0x04110 /* Tx Absolute Timer Expire Count */
+#define IGC_ICTXQEC 0x04118 /* Tx Queue Empty Count */
+#define IGC_ICTXQMTC 0x0411C /* Tx Queue Min Threshold Count */
+#define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */
+#define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */
+
+#define IGC_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */
+#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
+#define IGC_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */
+#define IGC_RPTHC 0x04104 /* Rx Packets To Host */
+#define IGC_HGPTC 0x04118 /* Host Good Packets TX Count */
+#define IGC_HTCBDPC 0x04124 /* Host TX Circ.Breaker Drop Count */
+
+/* MSI-X Table Register Descriptions */
+#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
+
+/* Receive Register Descriptions */
+#define IGC_RCTL 0x00100 /* Rx Control - RW */
+#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40))
+#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4))
+#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40))
+#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40))
+#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40))
+#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40))
+#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40))
+#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40))
+#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
+#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */
+#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */
+#define IGC_RFCTL 0x05008 /* Receive Filter Control*/
+#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */
+#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08))
+#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08))
+
+/* Transmit Register Descriptions */
+#define IGC_TCTL 0x00400 /* Tx Control - RW */
+#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */
+#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40))
+#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40))
+#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40))
+#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40))
+#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40))
+#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40))
+
+/* MMD Register Descriptions */
+#define IGC_MMDAC 13 /* MMD Access Control */
+#define IGC_MMDAAD 14 /* MMD Access Address/Data */
+
+/* Good transmitted packets counter registers */
+#define IGC_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
+
+/* Statistics Register Descriptions */
+#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define IGC_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */
+#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define IGC_COLC 0x04028 /* Collision Count - R/clr */
+#define IGC_DC 0x04030 /* Defer Count - R/clr */
+#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */
+#define IGC_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define IGC_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */
+#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */
+#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
+#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
+#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
+#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
+#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
+#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
+#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
+#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
+#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
+#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
+#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */
+#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */
+#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */
+#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */
+#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
+#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
+#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
+#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */
+#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
+#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */
+#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */
+#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */
+#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define IGC_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
+#define IGC_IAC 0x04100 /* Interrupt Assertion Count */
+#define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
+#define IGC_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define IGC_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
+#define IGC_RPTHC 0x04104 /* Rx Packets To Host */
+#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */
+#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */
+#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */
+#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */
+#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
+#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
+#define IGC_LENERRS 0x04138 /* Length Errors Count */
+#define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
+#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
+
+/* Management registers */
+#define IGC_MANC 0x05820 /* Management Control - RW */
+
+/* Shadow Ram Write Register - RW */
+#define IGC_SRWR 0x12018
+
+/* forward declaration */
+struct igc_hw;
+u32 igc_rd32(struct igc_hw *hw, u32 reg);
+
+/* write operations, indexed using DWORDS */
+#define wr32(reg, val) \
+do { \
+ u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
+ if (!IGC_REMOVED(hw_addr)) \
+ writel((val), &hw_addr[(reg)]); \
+} while (0)
+
+#define rd32(reg) (igc_rd32(hw, reg))
+
+#define wrfl() ((void)rd32(IGC_STATUS))
+
+#define array_wr32(reg, offset, value) \
+ wr32((reg) + ((offset) << 2), (value))
+
+#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
+
+#endif
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index d3e72d0f66ef..1d4d1686909a 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* for netdump / net console */
-static void ixgb_netpoll(struct net_device *dev);
-#endif
-
static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
enum pci_channel_state state);
static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
@@ -107,7 +102,7 @@ static struct pci_driver ixgb_driver = {
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
@@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
.ndo_tx_timeout = ixgb_tx_timeout,
.ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ixgb_netpoll,
-#endif
.ndo_fix_features = ixgb_fix_features,
.ndo_set_features = ixgb_set_features,
};
@@ -2195,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-
-static void ixgb_netpoll(struct net_device *dev)
-{
- struct ixgb_adapter *adapter = netdev_priv(dev);
-
- disable_irq(adapter->pdev->irq);
- ixgb_intr(adapter->pdev->irq, dev);
- enable_irq(adapter->pdev->irq);
-}
-#endif
-
/**
* ixgb_io_error_detected - called when PCI error is detected
* @pdev: pointer to pci device with error
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 5414685189ce..ca6b0c458e4a 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -8,7 +8,8 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
+ ixgbe_xsk.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 4fc906c6166b..7a7679e7be84 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -228,13 +228,17 @@ struct ixgbe_tx_buffer {
struct ixgbe_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
- struct page *page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 page_offset;
-#else
- __u16 page_offset;
-#endif
- __u16 pagecnt_bias;
+ union {
+ struct {
+ struct page *page;
+ __u32 page_offset;
+ __u16 pagecnt_bias;
+ };
+ struct {
+ void *addr;
+ u64 handle;
+ };
+ };
};
struct ixgbe_queue_stats {
@@ -271,6 +275,7 @@ enum ixgbe_ring_state_t {
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_TX_XDP_RING,
+ __IXGBE_TX_DISABLED,
};
#define ring_uses_build_skb(ring) \
@@ -347,6 +352,10 @@ struct ixgbe_ring {
struct ixgbe_rx_queue_stats rx_stats;
};
struct xdp_rxq_info xdp_rxq;
+ struct xdp_umem *xsk_umem;
+ struct zero_copy_allocator zca; /* ZC allocator anchor */
+ u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
+ u16 rx_buf_len;
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
@@ -605,6 +614,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
#define IXGBE_FLAG2_RX_LEGACY BIT(16)
#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
+#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
/* Tx fast path data */
int num_tx_queues;
@@ -763,6 +773,11 @@ struct ixgbe_adapter {
#ifdef CONFIG_XFRM_OFFLOAD
struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_XFRM_OFFLOAD */
+
+ /* AF_XDP zero-copy */
+ struct xdp_umem **xsk_umems;
+ u16 num_xsk_umems_used;
+ u16 num_xsk_umems;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -1003,15 +1018,24 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
struct sk_buff *skb);
int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
struct ixgbe_ipsec_tx_data *itd);
+void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf);
+int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
+int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
#else
-static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { };
-static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { };
-static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { };
+static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }
+static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }
+static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }
static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb) { };
+ struct sk_buff *skb) { }
static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
- struct ixgbe_ipsec_tx_data *itd) { return 0; };
+ struct ixgbe_ipsec_tx_data *itd) { return 0; }
+static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter,
+ u32 vf) { }
+static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
+ u32 *mbuf, u32 vf) { return -EACCES; }
+static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
+ u32 *mbuf, u32 vf) { return -EACCES; }
#endif /* CONFIG_XFRM_OFFLOAD */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e5a8461fe6a9..732b1e6ecc43 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -136,6 +136,8 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
+#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
+ "vf-ipsec",
};
#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
@@ -3409,6 +3411,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev)
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
+ if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
+ priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
+
return priv_flags;
}
@@ -3421,6 +3426,10 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
flags2 |= IXGBE_FLAG2_RX_LEGACY;
+ flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED;
+ if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
+ flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
+
if (flags2 != adapter->flags2) {
adapter->flags2 = flags2;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index da4322e4daed..fd1b0546fd67 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -5,6 +5,11 @@
#include <net/xfrm.h>
#include <crypto/aead.h>
+#define IXGBE_IPSEC_KEY_BITS 160
+static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
+
+static void ixgbe_ipsec_del_sa(struct xfrm_state *xs);
+
/**
* ixgbe_ipsec_set_tx_sa - set the Tx SA registers
* @hw: hw specific details
@@ -113,7 +118,6 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
**/
static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
{
- struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
u32 buf[4] = {0, 0, 0, 0};
u16 idx;
@@ -132,9 +136,6 @@ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
}
-
- ipsec->num_rx_sa = 0;
- ipsec->num_tx_sa = 0;
}
/**
@@ -290,6 +291,13 @@ static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
/**
* ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
* @adapter: board private structure
+ *
+ * Reload the HW tables from the SW tables after they've been bashed
+ * by a chip reset.
+ *
+ * Any VF entries are removed from the SW and HW tables since either
+ * (a) the VF also gets reset on PF reset and will ask again for the
+ * offloads, or (b) the VF has been removed by a change in the num_vfs.
**/
void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
{
@@ -305,6 +313,28 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
ixgbe_ipsec_clear_hw_tables(adapter);
ixgbe_ipsec_start_engine(adapter);
+ /* reload the Rx and Tx keys */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ struct rx_sa *r = &ipsec->rx_tbl[i];
+ struct tx_sa *t = &ipsec->tx_tbl[i];
+
+ if (r->used) {
+ if (r->mode & IXGBE_RXTXMOD_VF)
+ ixgbe_ipsec_del_sa(r->xs);
+ else
+ ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi,
+ r->key, r->salt,
+ r->mode, r->iptbl_ind);
+ }
+
+ if (t->used) {
+ if (t->mode & IXGBE_RXTXMOD_VF)
+ ixgbe_ipsec_del_sa(t->xs);
+ else
+ ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt);
+ }
+ }
+
/* reload the IP addrs */
for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
@@ -312,20 +342,6 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
if (ipsa->used)
ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
}
-
- /* reload the Rx and Tx keys */
- for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
- struct rx_sa *rsa = &ipsec->rx_tbl[i];
- struct tx_sa *tsa = &ipsec->tx_tbl[i];
-
- if (rsa->used)
- ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
- rsa->key, rsa->salt,
- rsa->mode, rsa->iptbl_ind);
-
- if (tsa->used)
- ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
- }
}
/**
@@ -382,6 +398,8 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
rcu_read_lock();
hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
(__force u32)spi) {
+ if (rsa->mode & IXGBE_RXTXMOD_VF)
+ continue;
if (spi == rsa->xs->id.spi &&
((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
(!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
@@ -411,7 +429,6 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
struct net_device *dev = xs->xso.dev;
unsigned char *key_data;
char *alg_name = NULL;
- const char aes_gcm_name[] = "rfc4106(gcm(aes))";
int key_len;
if (!xs->aead) {
@@ -439,9 +456,9 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
* we don't need to do any byteswapping.
* 160 accounts for 16 byte key and 4 byte salt
*/
- if (key_len == 160) {
+ if (key_len == IXGBE_IPSEC_KEY_BITS) {
*mysalt = ((u32 *)key_data)[4];
- } else if (key_len != 128) {
+ } else if (key_len != (IXGBE_IPSEC_KEY_BITS - (sizeof(*mysalt) * 8))) {
netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
return -EINVAL;
} else {
@@ -676,6 +693,9 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
} else {
struct tx_sa tsa;
+ if (adapter->num_vfs)
+ return -EOPNOTSUPP;
+
/* find the first unused index */
ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
if (ret < 0) {
@@ -811,6 +831,226 @@ static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
};
/**
+ * ixgbe_ipsec_vf_clear - clear the tables of data for a VF
+ * @adapter: board private structure
+ * @vf: VF id to be removed
+ **/
+void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ int i;
+
+ /* search rx sa table */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) {
+ if (!ipsec->rx_tbl[i].used)
+ continue;
+ if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
+ ipsec->rx_tbl[i].vf == vf)
+ ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs);
+ }
+
+ /* search tx sa table */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_tx_sa; i++) {
+ if (!ipsec->tx_tbl[i].used)
+ continue;
+ if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
+ ipsec->tx_tbl[i].vf == vf)
+ ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs);
+ }
+}
+
+/**
+ * ixgbe_ipsec_vf_add_sa - translate VF request to SA add
+ * @adapter: board private structure
+ * @msgbuf: The message buffer
+ * @vf: the VF index
+ *
+ * Make up a new xs and algorithm info from the data sent by the VF.
+ * We only need to sketch in just enough to set up the HW offload.
+ * Put the resulting offload_handle into the return message to the VF.
+ *
+ * Returns 0 or error value
+ **/
+int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct xfrm_algo_desc *algo;
+ struct sa_mbx_msg *sam;
+ struct xfrm_state *xs;
+ size_t aead_len;
+ u16 sa_idx;
+ u32 pfsa;
+ int err;
+
+ sam = (struct sa_mbx_msg *)(&msgbuf[1]);
+ if (!adapter->vfinfo[vf].trusted ||
+ !(adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)) {
+ e_warn(drv, "VF %d attempted to add an IPsec SA\n", vf);
+ err = -EACCES;
+ goto err_out;
+ }
+
+ /* Tx IPsec offload doesn't seem to work on this
+ * device, so block these requests for now.
+ */
+ if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) {
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ xs = kzalloc(sizeof(*xs), GFP_KERNEL);
+ if (unlikely(!xs)) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ xs->xso.flags = sam->flags;
+ xs->id.spi = sam->spi;
+ xs->id.proto = sam->proto;
+ xs->props.family = sam->family;
+ if (xs->props.family == AF_INET6)
+ memcpy(&xs->id.daddr.a6, sam->addr, sizeof(xs->id.daddr.a6));
+ else
+ memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
+ xs->xso.dev = adapter->netdev;
+
+ algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
+ if (unlikely(!algo)) {
+ err = -ENOENT;
+ goto err_xs;
+ }
+
+ aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
+ xs->aead = kzalloc(aead_len, GFP_KERNEL);
+ if (unlikely(!xs->aead)) {
+ err = -ENOMEM;
+ goto err_xs;
+ }
+
+ xs->props.ealgo = algo->desc.sadb_alg_id;
+ xs->geniv = algo->uinfo.aead.geniv;
+ xs->aead->alg_icv_len = IXGBE_IPSEC_AUTH_BITS;
+ xs->aead->alg_key_len = IXGBE_IPSEC_KEY_BITS;
+ memcpy(xs->aead->alg_key, sam->key, sizeof(sam->key));
+ memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
+
+ /* set up the HW offload */
+ err = ixgbe_ipsec_add_sa(xs);
+ if (err)
+ goto err_aead;
+
+ pfsa = xs->xso.offload_handle;
+ if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) {
+ sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX;
+ ipsec->rx_tbl[sa_idx].vf = vf;
+ ipsec->rx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF;
+ } else {
+ sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
+ ipsec->tx_tbl[sa_idx].vf = vf;
+ ipsec->tx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF;
+ }
+
+ msgbuf[1] = xs->xso.offload_handle;
+
+ return 0;
+
+err_aead:
+ memset(xs->aead, 0, sizeof(*xs->aead));
+ kfree(xs->aead);
+err_xs:
+ memset(xs, 0, sizeof(*xs));
+ kfree(xs);
+err_out:
+ msgbuf[1] = err;
+ return err;
+}
+
+/**
+ * ixgbe_ipsec_vf_del_sa - translate VF request to SA delete
+ * @adapter: board private structure
+ * @msgbuf: The message buffer
+ * @vf: the VF index
+ *
+ * Given the offload_handle sent by the VF, look for the related SA table
+ * entry and use its xs field to call for a delete of the SA.
+ *
+ * Note: We silently ignore requests to delete entries that are already
+ * set to unused because when a VF is set to "DOWN", the PF first
+ * gets a reset and clears all the VF's entries; then the VF's
+ * XFRM stack sends individual deletes for each entry, which the
+ * reset already removed. In the future it might be good to try to
+ * optimize this so not so many unnecessary delete messages are sent.
+ *
+ * Returns 0 or error value
+ **/
+int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct xfrm_state *xs;
+ u32 pfsa = msgbuf[1];
+ u16 sa_idx;
+
+ if (!adapter->vfinfo[vf].trusted) {
+ e_err(drv, "vf %d attempted to delete an SA\n", vf);
+ return -EPERM;
+ }
+
+ if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) {
+ struct rx_sa *rsa;
+
+ sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX;
+ if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) {
+ e_err(drv, "vf %d SA index %d out of range\n",
+ vf, sa_idx);
+ return -EINVAL;
+ }
+
+ rsa = &ipsec->rx_tbl[sa_idx];
+
+ if (!rsa->used)
+ return 0;
+
+ if (!(rsa->mode & IXGBE_RXTXMOD_VF) ||
+ rsa->vf != vf) {
+ e_err(drv, "vf %d bad Rx SA index %d\n", vf, sa_idx);
+ return -ENOENT;
+ }
+
+ xs = ipsec->rx_tbl[sa_idx].xs;
+ } else {
+ struct tx_sa *tsa;
+
+ sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
+ if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) {
+ e_err(drv, "vf %d SA index %d out of range\n",
+ vf, sa_idx);
+ return -EINVAL;
+ }
+
+ tsa = &ipsec->tx_tbl[sa_idx];
+
+ if (!tsa->used)
+ return 0;
+
+ if (!(tsa->mode & IXGBE_RXTXMOD_VF) ||
+ tsa->vf != vf) {
+ e_err(drv, "vf %d bad Tx SA index %d\n", vf, sa_idx);
+ return -ENOENT;
+ }
+
+ xs = ipsec->tx_tbl[sa_idx].xs;
+ }
+
+ ixgbe_ipsec_del_sa(xs);
+
+ /* remove the xs that was made-up in the add request */
+ memset(xs, 0, sizeof(*xs));
+ kfree(xs);
+
+ return 0;
+}
+
+/**
* ixgbe_ipsec_tx - setup Tx flags for ipsec offload
* @tx_ring: outgoing context
* @first: current data packet
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
index 9ef7faadda69..d2b64ff8eb4e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
@@ -26,6 +26,7 @@ enum ixgbe_ipsec_tbl_sel {
#define IXGBE_RXMOD_PROTO_ESP 0x00000004
#define IXGBE_RXMOD_DECRYPT 0x00000008
#define IXGBE_RXMOD_IPV6 0x00000010
+#define IXGBE_RXTXMOD_VF 0x00000020
struct rx_sa {
struct hlist_node hlist;
@@ -37,6 +38,7 @@ struct rx_sa {
u8 iptbl_ind;
bool used;
bool decrypt;
+ u32 vf;
};
struct rx_ip_sa {
@@ -49,8 +51,10 @@ struct tx_sa {
struct xfrm_state *xs;
u32 key[4];
u32 salt;
+ u32 mode;
bool encrypt;
bool used;
+ u32 vf;
};
struct ixgbe_ipsec_tx_data {
@@ -67,4 +71,13 @@ struct ixgbe_ipsec {
struct tx_sa *tx_tbl;
DECLARE_HASHTABLE(rx_sa_list, 10);
};
+
+struct sa_mbx_msg {
+ __be32 spi;
+ u8 flags;
+ u8 proto;
+ u16 family;
+ __be32 addr[4];
+ u32 key[5];
+};
#endif /* _IXGBE_IPSEC_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index d361f570ca37..62e6499e4146 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1055,7 +1055,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
int txr_remaining = adapter->num_tx_queues;
int xdp_remaining = adapter->num_xdp_queues;
int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
- int err;
+ int err, i;
/* only one q_vector if MSI-X is disabled. */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
@@ -1097,6 +1097,21 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
xdp_idx += xqpv;
}
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ if (adapter->rx_ring[i])
+ adapter->rx_ring[i]->ring_idx = i;
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->tx_ring[i])
+ adapter->tx_ring[i]->ring_idx = i;
+ }
+
+ for (i = 0; i < adapter->num_xdp_queues; i++) {
+ if (adapter->xdp_ring[i])
+ adapter->xdp_ring[i]->ring_idx = i;
+ }
+
return 0;
err_out:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9a23d33a47ed..0049a2becd7e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -34,12 +34,14 @@
#include <net/tc_act/tc_mirred.h>
#include <net/vxlan.h>
#include <net/mpls.h>
+#include <net/xdp_sock.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_sriov.h"
#include "ixgbe_model.h"
+#include "ixgbe_txrx_common.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -159,7 +161,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
static struct workqueue_struct *ixgbe_wq;
@@ -893,8 +895,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
}
}
-static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
- u64 qmask)
+void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
+ u64 qmask)
{
u32 mask;
@@ -1673,9 +1675,9 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
* order to populate the hash, checksum, VLAN, timestamp, protocol, and
* other fields within the skb.
**/
-static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
u32 flags = rx_ring->q_vector->adapter->flags;
@@ -1708,8 +1710,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
skb->protocol = eth_type_trans(skb, dev);
}
-static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb)
+void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb)
{
napi_gro_receive(&q_vector->napi, skb);
}
@@ -1868,9 +1870,9 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
*
* Returns true if an error was encountered and skb was freed.
**/
-static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct net_device *netdev = rx_ring->netdev;
@@ -2186,14 +2188,6 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
-#define IXGBE_XDP_PASS 0
-#define IXGBE_XDP_CONSUMED BIT(0)
-#define IXGBE_XDP_TX BIT(1)
-#define IXGBE_XDP_REDIR BIT(2)
-
-static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
- struct xdp_frame *xdpf);
-
static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp)
@@ -3167,7 +3161,11 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
#endif
ixgbe_for_each_ring(ring, q_vector->tx) {
- if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
+ bool wd = ring->xsk_umem ?
+ ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
+ ixgbe_clean_tx_irq(q_vector, ring, budget);
+
+ if (!wd)
clean_complete = false;
}
@@ -3183,7 +3181,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
per_ring_budget = budget;
ixgbe_for_each_ring(ring, q_vector->rx) {
- int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
+ int cleaned = ring->xsk_umem ?
+ ixgbe_clean_rx_irq_zc(q_vector, ring,
+ per_ring_budget) :
+ ixgbe_clean_rx_irq(q_vector, ring,
per_ring_budget);
work_done += cleaned;
@@ -3196,11 +3197,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
return budget;
/* all work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr(q_vector);
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
+ if (likely(napi_complete_done(napi, work_done))) {
+ if (adapter->rx_itr_setting & 1)
+ ixgbe_set_itr(q_vector);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable_queues(adapter,
+ BIT_ULL(q_vector->v_idx));
+ }
return min(work_done, budget - 1);
}
@@ -3473,6 +3476,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
u32 txdctl = IXGBE_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
+ ring->xsk_umem = NULL;
+ if (ring_is_xdp(ring))
+ ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+
/* disable queue to avoid issues while updating state */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
IXGBE_WRITE_FLUSH(hw);
@@ -3577,12 +3584,18 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
else
mtqc |= IXGBE_MTQC_64VF;
} else {
- if (tcs > 4)
+ if (tcs > 4) {
mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
- else if (tcs > 1)
+ } else if (tcs > 1) {
mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
- else
- mtqc = IXGBE_MTQC_64Q_1PB;
+ } else {
+ u8 max_txq = adapter->num_tx_queues +
+ adapter->num_xdp_queues;
+ if (max_txq > 63)
+ mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else
+ mtqc = IXGBE_MTQC_64Q_1PB;
+ }
}
IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
@@ -3705,10 +3718,27 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
- if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
+ if (rx_ring->xsk_umem) {
+ u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr -
+ XDP_PACKET_HEADROOM;
+
+ /* If the MAC support setting RXDCTL.RLPML, the
+ * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
+ * RXDCTL.RLPML is set to the actual UMEM buffer
+ * size. If not, then we are stuck with a 1k buffer
+ * size resolution. In this case frames larger than
+ * the UMEM buffer size viewed in a 1k resolution will
+ * be dropped.
+ */
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) {
srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
+ } else {
srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ }
/* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -4031,6 +4061,19 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+ if (ring->xsk_umem) {
+ ring->zca.free = ixgbe_zca_free;
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_ZERO_COPY,
+ &ring->zca));
+
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL));
+ }
+
/* disable queue to avoid use of these values while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
@@ -4080,6 +4123,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
#endif
}
+ if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
+ u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
+ XDP_PACKET_HEADROOM;
+
+ rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+ IXGBE_RXDCTL_RLPML_EN);
+ rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN;
+
+ ring->rx_buf_len = xsk_buf_len;
+ }
+
/* initialize rx_buffer_info */
memset(ring->rx_buffer_info, 0,
sizeof(struct ixgbe_rx_buffer) * ring->count);
@@ -4093,7 +4147,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
+ if (ring->xsk_umem)
+ ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
+ else
+ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
}
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -5173,6 +5230,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
+ u64 action;
spin_lock(&adapter->fdir_perfect_lock);
@@ -5181,12 +5239,17 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
+ action = filter->action;
+ if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
+ action =
+ (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
+
ixgbe_fdir_write_perfect_filter_82599(hw,
&filter->filter,
filter->sw_idx,
- (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
+ (action == IXGBE_FDIR_DROP_QUEUE) ?
IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[filter->action]->reg_idx);
+ adapter->rx_ring[action]->reg_idx);
}
spin_unlock(&adapter->fdir_perfect_lock);
@@ -5201,6 +5264,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
u16 i = rx_ring->next_to_clean;
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
+ if (rx_ring->xsk_umem) {
+ ixgbe_xsk_clean_rx_ring(rx_ring);
+ goto skip_free;
+ }
+
/* Free all the Rx ring sk_buffs */
while (i != rx_ring->next_to_alloc) {
if (rx_buffer->skb) {
@@ -5239,6 +5307,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
}
}
+skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -5883,6 +5952,11 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
u16 i = tx_ring->next_to_clean;
struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ if (tx_ring->xsk_umem) {
+ ixgbe_xsk_clean_tx_ring(tx_ring);
+ goto out;
+ }
+
while (i != tx_ring->next_to_use) {
union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
@@ -5934,6 +6008,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
if (!ring_is_xdp(tx_ring))
netdev_tx_reset_queue(txring_txq(tx_ring));
+out:
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -6434,7 +6509,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
struct device *dev = rx_ring->dev;
int orig_node = dev_to_node(dev);
int ring_node = -1;
- int size, err;
+ int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
@@ -6471,13 +6546,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->queue_index) < 0)
goto err;
- err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
- if (err) {
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- goto err;
- }
-
rx_ring->xdp_prog = adapter->xdp_prog;
return 0;
@@ -7775,6 +7843,33 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_check_fw_error - Check firmware for errors
+ * @adapter: the adapter private structure
+ *
+ * Check firmware errors in register FWSM
+ */
+static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 fwsm;
+
+ /* read fwsm.ext_err_ind register and log errors */
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+
+ if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
+ !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
+ e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
+ fwsm);
+
+ if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
+ e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
* ixgbe_service_task - manages and runs subtasks
* @work: pointer to work_struct containing our data
**/
@@ -7792,6 +7887,15 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
+ if (ixgbe_check_fw_error(adapter)) {
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ rtnl_lock();
+ unregister_netdev(adapter->netdev);
+ rtnl_unlock();
+ }
+ ixgbe_service_event_complete(adapter);
+ return;
+ }
if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
rtnl_lock();
adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
@@ -8066,9 +8170,6 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
-#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
- IXGBE_TXD_CMD_RS)
-
static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
const u8 hdr_len)
@@ -8421,8 +8522,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
}
#endif
-static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
- struct xdp_frame *xdpf)
+int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+ struct xdp_frame *xdpf)
{
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
struct ixgbe_tx_buffer *tx_buffer;
@@ -8644,6 +8745,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
+ if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
+ return NETDEV_TX_BUSY;
return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
}
@@ -8768,28 +8871,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
return err;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void ixgbe_netpoll(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__IXGBE_DOWN, &adapter->state))
- return;
-
- /* loop through and schedule all active queues */
- for (i = 0; i < adapter->num_q_vectors; i++)
- ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
-}
-
-#endif
-
static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
struct ixgbe_ring *ring)
{
@@ -10177,12 +10258,19 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
xdp->prog_id = adapter->xdp_prog ?
adapter->xdp_prog->aux->id : 0;
return 0;
+ case XDP_QUERY_XSK_UMEM:
+ return ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem,
+ xdp->xsk.queue_id);
+ case XDP_SETUP_XSK_UMEM:
+ return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
+ xdp->xsk.queue_id);
+
default:
return -EINVAL;
}
}
-static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
+void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
{
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
@@ -10212,6 +10300,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
if (unlikely(!ring))
return -ENXIO;
+ if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
+ return -ENXIO;
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
@@ -10251,9 +10342,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
.ndo_setup_tc = __ixgbe_setup_tc,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ixgbe_netpoll,
-#endif
#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
@@ -10276,8 +10364,162 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_features_check = ixgbe_features_check,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
+ .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit,
};
+static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 reg_idx = tx_ring->reg_idx;
+ int wait_loop;
+ u32 txdctl;
+
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+
+ /* delay mechanism from ixgbe_disable_tx */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+
+ if (!(txdctl & IXGBE_TXDCTL_ENABLE))
+ return;
+ }
+
+ e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void ixgbe_disable_txr(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring)
+{
+ set_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+ ixgbe_disable_txr_hw(adapter, tx_ring);
+}
+
+static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring)
+{
+ unsigned long wait_delay, delay_interval;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 reg_idx = rx_ring->reg_idx;
+ int wait_loop;
+ u32 rxdctl;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ rxdctl |= IXGBE_RXDCTL_SWFLSH;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ /* RXDCTL.EN may not change on 82598 if link is down, so skip it */
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* delay mechanism from ixgbe_disable_rx */
+ delay_interval = ixgbe_get_completion_timeout(adapter) / 100;
+
+ wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ wait_delay = delay_interval;
+
+ while (wait_loop--) {
+ usleep_range(wait_delay, wait_delay + 10);
+ wait_delay += delay_interval * 2;
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+
+ if (!(rxdctl & IXGBE_RXDCTL_ENABLE))
+ return;
+ }
+
+ e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n");
+}
+
+static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring)
+{
+ memset(&tx_ring->stats, 0, sizeof(tx_ring->stats));
+ memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
+}
+
+static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
+{
+ memset(&rx_ring->stats, 0, sizeof(rx_ring->stats));
+ memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+}
+
+/**
+ * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
+ * @adapter: adapter structure
+ * @ring: ring index
+ *
+ * This function disables a certain Rx/Tx/XDP Tx ring. The function
+ * assumes that the netdev is running.
+ **/
+void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
+{
+ struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
+
+ rx_ring = adapter->rx_ring[ring];
+ tx_ring = adapter->tx_ring[ring];
+ xdp_ring = adapter->xdp_ring[ring];
+
+ ixgbe_disable_txr(adapter, tx_ring);
+ if (xdp_ring)
+ ixgbe_disable_txr(adapter, xdp_ring);
+ ixgbe_disable_rxr_hw(adapter, rx_ring);
+
+ if (xdp_ring)
+ synchronize_sched();
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
+ ixgbe_clean_tx_ring(tx_ring);
+ if (xdp_ring)
+ ixgbe_clean_tx_ring(xdp_ring);
+ ixgbe_clean_rx_ring(rx_ring);
+
+ ixgbe_reset_txr_stats(tx_ring);
+ if (xdp_ring)
+ ixgbe_reset_txr_stats(xdp_ring);
+ ixgbe_reset_rxr_stats(rx_ring);
+}
+
+/**
+ * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings
+ * @adapter: adapter structure
+ * @ring: ring index
+ *
+ * This function enables a certain Rx/Tx/XDP Tx ring. The function
+ * assumes that the netdev is running.
+ **/
+void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
+{
+ struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring;
+
+ rx_ring = adapter->rx_ring[ring];
+ tx_ring = adapter->tx_ring[ring];
+ xdp_ring = adapter->xdp_ring[ring];
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+
+ ixgbe_configure_tx_ring(adapter, tx_ring);
+ if (xdp_ring)
+ ixgbe_configure_tx_ring(adapter, xdp_ring);
+ ixgbe_configure_rx_ring(adapter, rx_ring);
+
+ clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
+ clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+}
+
/**
* ixgbe_enumerate_functions - Get the number of ports this device has
* @adapter: adapter structure
@@ -10716,6 +10958,11 @@ skip_sriov:
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO;
+ if (ixgbe_check_fw_error(adapter)) {
+ err = -EIO;
+ goto err_sw_init;
+ }
+
/* make sure the EEPROM is good */
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
e_dev_err("The EEPROM Checksum Is Not Valid\n");
@@ -11075,8 +11322,6 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
/* Free device reference count */
pci_dev_put(vfdev);
}
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
/*
@@ -11126,7 +11371,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
pci_ers_result_t result;
- int err;
if (pci_enable_device_mem(pdev)) {
e_err(probe, "Cannot re-enable PCI device after reset.\n");
@@ -11146,13 +11390,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- e_dev_err("pci_cleanup_aer_uncorrect_error_status "
- "failed 0x%0x\n", err);
- /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index e085b6520dac..a148534d7256 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -50,6 +50,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
+ ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -80,6 +81,10 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+/* mailbox API, version 1.4 VF requests */
+#define IXGBE_VF_IPSEC_ADD 0x0d
+#define IXGBE_VF_IPSEC_DEL 0x0e
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 3c6f01c41b78..af25a8fffeb8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -496,6 +496,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
/* Version 1.1 supports jumbo frames on VFs if PF has
* jumbo frames enabled which means legacy VFs are
* disabled
@@ -728,6 +729,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* reset multicast table array for vf */
adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+ /* clear any ipsec table info */
+ ixgbe_ipsec_vf_clear(adapter, vf);
+
/* Flush and reset the mta with the new values */
ixgbe_set_rx_mode(adapter->netdev);
@@ -1000,6 +1004,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
adapter->vfinfo[vf].vf_api = api;
return 0;
default:
@@ -1025,6 +1030,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
break;
default:
return -1;
@@ -1065,6 +1071,7 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* verify the PF is supporting the correct API */
switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
break;
@@ -1097,6 +1104,7 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
/* verify the PF is supporting the correct API */
switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
break;
@@ -1122,8 +1130,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
/* promisc introduced in 1.3 version */
if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
return -EOPNOTSUPP;
- /* Fall threw */
+ /* Fall through */
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
break;
default:
return -EOPNOTSUPP;
@@ -1249,6 +1258,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_UPDATE_XCAST_MODE:
retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
break;
+ case IXGBE_VF_IPSEC_ADD:
+ retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_IPSEC_DEL:
+ retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf);
+ break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
retval = IXGBE_ERR_MBX;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
new file mode 100644
index 000000000000..53d4089f5644
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2018 Intel Corporation. */
+
+#ifndef _IXGBE_TXRX_COMMON_H_
+#define _IXGBE_TXRX_COMMON_H_
+
+#define IXGBE_XDP_PASS 0
+#define IXGBE_XDP_CONSUMED BIT(0)
+#define IXGBE_XDP_TX BIT(1)
+#define IXGBE_XDP_REDIR BIT(2)
+
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
+ IXGBE_TXD_CMD_RS)
+
+int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+ struct xdp_frame *xdpf);
+bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb);
+void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring);
+void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
+
+void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
+void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
+
+struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
+int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
+ u16 qid);
+int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+ u16 qid);
+
+void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
+
+void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
+int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *rx_ring,
+ const int budget);
+void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring);
+bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *tx_ring, int napi_budget);
+int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id);
+void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
+
+#endif /* #define _IXGBE_TXRX_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 41bcbb337e83..84f2dba39e36 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -924,6 +924,9 @@ struct ixgbe_nvm_version {
/* Firmware Semaphore Register */
#define IXGBE_FWSM_MODE_MASK 0xE
#define IXGBE_FWSM_FW_MODE_PT 0x4
+#define IXGBE_FWSM_FW_NVM_RECOVERY_MODE BIT(5)
+#define IXGBE_FWSM_EXT_ERR_IND_MASK 0x01F80000
+#define IXGBE_FWSM_FW_VAL_BIT BIT(15)
/* ARC Subsystem registers */
#define IXGBE_HICR 0x15F00
@@ -3461,6 +3464,7 @@ struct ixgbe_mac_operations {
const char *);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+ bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index a8148c7126e5..10dbaf4f6e80 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1247,6 +1247,20 @@ static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbe_fw_recovery_mode - Check FW NVM recovery mode
+ * @hw: pointer t hardware structure
+ *
+ * Returns true if in FW NVM recovery mode.
+ */
+static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
+{
+ u32 fwsm;
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+ return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
+}
+
/** ixgbe_disable_rx_x550 - Disable RX unit
*
* Enables the Rx DMA unit for x550
@@ -3816,6 +3830,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
.init_thermal_sensor_thresh = NULL, \
+ .fw_recovery_mode = &ixgbe_fw_recovery_mode_X550, \
.enable_rx = &ixgbe_enable_rx_generic, \
.disable_rx = &ixgbe_disable_rx_x550, \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
new file mode 100644
index 000000000000..65c3e2c979d4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -0,0 +1,801 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
+#include <net/xdp.h>
+
+#include "ixgbe.h"
+#include "ixgbe_txrx_common.h"
+
+struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ bool xdp_on = READ_ONCE(adapter->xdp_prog);
+ int qid = ring->ring_idx;
+
+ if (!adapter->xsk_umems || !adapter->xsk_umems[qid] ||
+ qid >= adapter->num_xsk_umems || !xdp_on)
+ return NULL;
+
+ return adapter->xsk_umems[qid];
+}
+
+static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter)
+{
+ if (adapter->xsk_umems)
+ return 0;
+
+ adapter->num_xsk_umems_used = 0;
+ adapter->num_xsk_umems = adapter->num_rx_queues;
+ adapter->xsk_umems = kcalloc(adapter->num_xsk_umems,
+ sizeof(*adapter->xsk_umems),
+ GFP_KERNEL);
+ if (!adapter->xsk_umems) {
+ adapter->num_xsk_umems = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter,
+ struct xdp_umem *umem,
+ u16 qid)
+{
+ int err;
+
+ err = ixgbe_alloc_xsk_umems(adapter);
+ if (err)
+ return err;
+
+ adapter->xsk_umems[qid] = umem;
+ adapter->num_xsk_umems_used++;
+
+ return 0;
+}
+
+static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid)
+{
+ adapter->xsk_umems[qid] = NULL;
+ adapter->num_xsk_umems_used--;
+
+ if (adapter->num_xsk_umems == 0) {
+ kfree(adapter->xsk_umems);
+ adapter->xsk_umems = NULL;
+ adapter->num_xsk_umems = 0;
+ }
+}
+
+static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
+ struct xdp_umem *umem)
+{
+ struct device *dev = &adapter->pdev->dev;
+ unsigned int i, j;
+ dma_addr_t dma;
+
+ for (i = 0; i < umem->npgs; i++) {
+ dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
+ if (dma_mapping_error(dev, dma))
+ goto out_unmap;
+
+ umem->pages[i].dma = dma;
+ }
+
+ return 0;
+
+out_unmap:
+ for (j = 0; j < i; j++) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
+ umem->pages[i].dma = 0;
+ }
+
+ return -1;
+}
+
+static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
+ struct xdp_umem *umem)
+{
+ struct device *dev = &adapter->pdev->dev;
+ unsigned int i;
+
+ for (i = 0; i < umem->npgs; i++) {
+ dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
+
+ umem->pages[i].dma = 0;
+ }
+}
+
+static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
+ struct xdp_umem *umem,
+ u16 qid)
+{
+ struct xdp_umem_fq_reuse *reuseq;
+ bool if_running;
+ int err;
+
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (adapter->xsk_umems) {
+ if (qid >= adapter->num_xsk_umems)
+ return -EINVAL;
+ if (adapter->xsk_umems[qid])
+ return -EBUSY;
+ }
+
+ reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
+ if (!reuseq)
+ return -ENOMEM;
+
+ xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
+
+ err = ixgbe_xsk_umem_dma_map(adapter, umem);
+ if (err)
+ return err;
+
+ if_running = netif_running(adapter->netdev) &&
+ READ_ONCE(adapter->xdp_prog);
+
+ if (if_running)
+ ixgbe_txrx_ring_disable(adapter, qid);
+
+ err = ixgbe_add_xsk_umem(adapter, umem, qid);
+
+ if (if_running)
+ ixgbe_txrx_ring_enable(adapter, qid);
+
+ return err;
+}
+
+static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
+{
+ bool if_running;
+
+ if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems ||
+ !adapter->xsk_umems[qid])
+ return -EINVAL;
+
+ if_running = netif_running(adapter->netdev) &&
+ READ_ONCE(adapter->xdp_prog);
+
+ if (if_running)
+ ixgbe_txrx_ring_disable(adapter, qid);
+
+ ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]);
+ ixgbe_remove_xsk_umem(adapter, qid);
+
+ if (if_running)
+ ixgbe_txrx_ring_enable(adapter, qid);
+
+ return 0;
+}
+
+int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem,
+ u16 qid)
+{
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (adapter->xsk_umems) {
+ if (qid >= adapter->num_xsk_umems)
+ return -EINVAL;
+ *umem = adapter->xsk_umems[qid];
+ return 0;
+ }
+
+ *umem = NULL;
+ return 0;
+}
+
+int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+ u16 qid)
+{
+ return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
+ ixgbe_xsk_umem_disable(adapter, qid);
+}
+
+static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ int err, result = IXGBE_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ xdp->handle += xdp->data - xdp->data_hard_start;
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ xdpf = convert_to_xdp_frame(xdp);
+ if (unlikely(!xdpf)) {
+ result = IXGBE_XDP_CONSUMED;
+ break;
+ }
+ result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fallthrough */
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ /* fallthrough -- handle aborts by dropping packet */
+ case XDP_DROP:
+ result = IXGBE_XDP_CONSUMED;
+ break;
+ }
+ rcu_read_unlock();
+ return result;
+}
+
+static struct
+ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
+ unsigned int size)
+{
+ struct ixgbe_rx_buffer *bi;
+
+ bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ bi->dma, 0,
+ size,
+ DMA_BIDIRECTIONAL);
+
+ return bi;
+}
+
+static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *obi)
+{
+ unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
+ u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ u16 nta = rx_ring->next_to_alloc;
+ struct ixgbe_rx_buffer *nbi;
+
+ nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ nbi->dma = obi->dma & mask;
+ nbi->dma += hr;
+
+ nbi->addr = (void *)((unsigned long)obi->addr & mask);
+ nbi->addr += hr;
+
+ nbi->handle = obi->handle & mask;
+ nbi->handle += rx_ring->xsk_umem->headroom;
+
+ obi->addr = NULL;
+ obi->skb = NULL;
+}
+
+void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
+{
+ struct ixgbe_rx_buffer *bi;
+ struct ixgbe_ring *rx_ring;
+ u64 hr, mask;
+ u16 nta;
+
+ rx_ring = container_of(alloc, struct ixgbe_ring, zca);
+ hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
+ mask = rx_ring->xsk_umem->chunk_mask;
+
+ nta = rx_ring->next_to_alloc;
+ bi = rx_ring->rx_buffer_info;
+
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ handle &= mask;
+
+ bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
+ bi->dma += hr;
+
+ bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
+ bi->addr += hr;
+
+ bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
+}
+
+static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ void *addr = bi->addr;
+ u64 handle, hr;
+
+ if (addr)
+ return true;
+
+ if (!xsk_umem_peek_addr(umem, &handle)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ return false;
+ }
+
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ bi->dma = xdp_umem_get_dma(umem, handle);
+ bi->dma += hr;
+
+ bi->addr = xdp_umem_get_data(umem, handle);
+ bi->addr += hr;
+
+ bi->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr(umem);
+ return true;
+}
+
+static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi)
+{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
+ u64 handle, hr;
+
+ if (!xsk_umem_peek_addr_rq(umem, &handle)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ return false;
+ }
+
+ handle &= rx_ring->xsk_umem->chunk_mask;
+
+ hr = umem->headroom + XDP_PACKET_HEADROOM;
+
+ bi->dma = xdp_umem_get_dma(umem, handle);
+ bi->dma += hr;
+
+ bi->addr = xdp_umem_get_data(umem, handle);
+ bi->addr += hr;
+
+ bi->handle = handle + umem->headroom;
+
+ xsk_umem_discard_addr_rq(umem);
+ return true;
+}
+
+static __always_inline bool
+__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
+ bool alloc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi))
+{
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *bi;
+ u16 i = rx_ring->next_to_use;
+ bool ok = true;
+
+ /* nothing to do */
+ if (!cleaned_count)
+ return true;
+
+ rx_desc = IXGBE_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
+
+ do {
+ if (!alloc(rx_ring, bi)) {
+ ok = false;
+ break;
+ }
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_BIDIRECTIONAL);
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = IXGBE_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
+
+ return ok;
+}
+
+void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
+{
+ __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
+ ixgbe_alloc_buffer_slow_zc);
+}
+
+static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
+ u16 count)
+{
+ return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
+ ixgbe_alloc_buffer_zc);
+}
+
+static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
+ struct ixgbe_rx_buffer *bi,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ struct sk_buff *skb;
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ xdp->data_end - xdp->data_hard_start,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ return skb;
+}
+
+static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(IXGBE_RX_DESC(rx_ring, ntc));
+}
+
+int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *rx_ring,
+ const int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+ unsigned int xdp_res, xdp_xmit = 0;
+ bool failure = false;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
+
+ while (likely(total_rx_packets < budget)) {
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbe_rx_buffer *bi;
+ unsigned int size;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
+ failure = failure ||
+ !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
+
+ if (unlikely(!ixgbe_test_staterr(rx_desc,
+ IXGBE_RXD_STAT_EOP))) {
+ struct ixgbe_rx_buffer *next_bi;
+
+ ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ ixgbe_inc_ntc(rx_ring);
+ next_bi =
+ &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ next_bi->skb = ERR_PTR(-EINVAL);
+ continue;
+ }
+
+ if (unlikely(bi->skb)) {
+ ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ ixgbe_inc_ntc(rx_ring);
+ continue;
+ }
+
+ xdp.data = bi->addr;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp.data_end = xdp.data + size;
+ xdp.handle = bi->handle;
+
+ xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
+
+ if (xdp_res) {
+ if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
+ bi->addr = NULL;
+ bi->skb = NULL;
+ } else {
+ ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
+ }
+ total_rx_packets++;
+ total_rx_bytes += size;
+
+ cleaned_count++;
+ ixgbe_inc_ntc(rx_ring);
+ continue;
+ }
+
+ /* XDP_PASS path */
+ skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ break;
+ }
+
+ cleaned_count++;
+ ixgbe_inc_ntc(rx_ring);
+
+ if (eth_skb_pad(skb))
+ continue;
+
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
+ ixgbe_rx_skb(q_vector, skb);
+ }
+
+ if (xdp_xmit & IXGBE_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & IXGBE_XDP_TX) {
+ struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+ }
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_packets += total_rx_packets;
+ q_vector->rx.total_bytes += total_rx_bytes;
+
+ return failure ? budget : (int)total_rx_packets;
+}
+
+void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
+{
+ u16 i = rx_ring->next_to_clean;
+ struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
+
+ while (i != rx_ring->next_to_alloc) {
+ xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
+ i++;
+ bi++;
+ if (i == rx_ring->count) {
+ i = 0;
+ bi = rx_ring->rx_buffer_info;
+ }
+ }
+}
+
+static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+{
+ union ixgbe_adv_tx_desc *tx_desc = NULL;
+ struct ixgbe_tx_buffer *tx_bi;
+ bool work_done = true;
+ u32 len, cmd_type;
+ dma_addr_t dma;
+
+ while (budget-- > 0) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ work_done = false;
+ break;
+ }
+
+ if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
+ break;
+
+ dma_sync_single_for_device(xdp_ring->dev, dma, len,
+ DMA_BIDIRECTIONAL);
+
+ tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
+ tx_bi->bytecount = len;
+ tx_bi->xdpf = NULL;
+
+ tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS;
+ cmd_type |= len | IXGBE_TXD_CMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status =
+ cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
+ }
+
+ if (tx_desc) {
+ ixgbe_xdp_ring_update_tail(xdp_ring);
+ xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ }
+
+ return !!budget && work_done;
+}
+
+static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *tx_bi)
+{
+ xdp_return_frame(tx_bi->xdpf);
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_bi, dma),
+ dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_bi, len, 0);
+}
+
+bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *tx_ring, int napi_budget)
+{
+ unsigned int total_packets = 0, total_bytes = 0;
+ u32 i = tx_ring->next_to_clean, xsk_frames = 0;
+ unsigned int budget = q_vector->tx.work_limit;
+ struct xdp_umem *umem = tx_ring->xsk_umem;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct ixgbe_tx_buffer *tx_bi;
+ bool xmit_done;
+
+ tx_bi = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+ break;
+
+ total_bytes += tx_bi->bytecount;
+ total_packets += tx_bi->gso_segs;
+
+ if (tx_bi->xdpf)
+ ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
+ else
+ xsk_frames++;
+
+ tx_bi->xdpf = NULL;
+ total_bytes += tx_bi->bytecount;
+
+ tx_bi++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_bi = tx_ring->tx_buffer_info;
+ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+ }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(umem, xsk_frames);
+
+ xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
+ return budget > 0 && xmit_done;
+}
+
+int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_ring *ring;
+
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!READ_ONCE(adapter->xdp_prog))
+ return -ENXIO;
+
+ if (qid >= adapter->num_xdp_queues)
+ return -ENXIO;
+
+ if (!adapter->xsk_umems || !adapter->xsk_umems[qid])
+ return -ENXIO;
+
+ ring = adapter->xdp_ring[qid];
+ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
+ u64 eics = BIT_ULL(ring->q_vector->v_idx);
+
+ ixgbe_irq_rearm_queues(adapter, eics);
+ }
+
+ return 0;
+}
+
+void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
+{
+ u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
+ struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct ixgbe_tx_buffer *tx_bi;
+ u32 xsk_frames = 0;
+
+ while (ntc != ntu) {
+ tx_bi = &tx_ring->tx_buffer_info[ntc];
+
+ if (tx_bi->xdpf)
+ ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
+ else
+ xsk_frames++;
+
+ tx_bi->xdpf = NULL;
+
+ ntc++;
+ if (ntc == tx_ring->count)
+ ntc = 0;
+ }
+
+ if (xsk_frames)
+ xsk_umem_complete_tx(umem, xsk_frames);
+}
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index aba1e6a37a6a..297d0f0858b5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -10,4 +10,5 @@ ixgbevf-objs := vf.o \
mbx.o \
ethtool.o \
ixgbevf_main.o
+ixgbevf-$(CONFIG_XFRM_OFFLOAD) += ipsec.o
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 700d8eb2f6f8..6bace746eaac 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -133,9 +133,14 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+#define IXGBE_RXDADV_STAT_SECP 0x00020000 /* IPsec/MACsec pkt found */
#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
@@ -229,7 +234,7 @@ union ixgbe_adv_rx_desc {
/* Context descriptors */
struct ixgbe_adv_tx_context_desc {
__le32 vlan_macip_lens;
- __le32 seqnum_seed;
+ __le32 fceof_saidx;
__le32 type_tucmd_mlhl;
__le32 mss_l4len_idx;
};
@@ -250,9 +255,12 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 /* ESP Encrypt Enable */
#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 631c91046f39..5399787e07af 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -55,6 +55,8 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+ IXGBEVF_STAT("tx_ipsec", tx_ipsec),
+ IXGBEVF_STAT("rx_ipsec", rx_ipsec),
};
#define IXGBEVF_QUEUE_STATS_LEN ( \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
new file mode 100644
index 000000000000..e8a3231be0bf
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */
+
+#include "ixgbevf.h"
+#include <net/xfrm.h>
+#include <crypto/aead.h>
+
+#define IXGBE_IPSEC_KEY_BITS 160
+static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
+
+/**
+ * ixgbevf_ipsec_set_pf_sa - ask the PF to set up an SA
+ * @adapter: board private structure
+ * @xs: xfrm info to be sent to the PF
+ *
+ * Returns: positive offload handle from the PF, or negative error code
+ **/
+static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
+ struct xfrm_state *xs)
+{
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 };
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct sa_mbx_msg *sam;
+ int ret;
+
+ /* send the important bits to the PF */
+ sam = (struct sa_mbx_msg *)(&msgbuf[1]);
+ sam->flags = xs->xso.flags;
+ sam->spi = xs->id.spi;
+ sam->proto = xs->id.proto;
+ sam->family = xs->props.family;
+
+ if (xs->props.family == AF_INET6)
+ memcpy(sam->addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6));
+ else
+ memcpy(sam->addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4));
+ memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key));
+
+ msgbuf[0] = IXGBE_VF_IPSEC_ADD;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+ if (ret)
+ goto out;
+
+ ret = hw->mbx.ops.read_posted(hw, msgbuf, 2);
+ if (ret)
+ goto out;
+
+ ret = (int)msgbuf[1];
+ if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0)
+ ret = -1;
+
+out:
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ return ret;
+}
+
+/**
+ * ixgbevf_ipsec_del_pf_sa - ask the PF to delete an SA
+ * @adapter: board private structure
+ * @pfsa: sa index returned from PF when created, -1 for all
+ *
+ * Returns: 0 on success, or negative error code
+ **/
+static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 msgbuf[2];
+ int err;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ msgbuf[0] = IXGBE_VF_IPSEC_DEL;
+ msgbuf[1] = (u32)pfsa;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ err = hw->mbx.ops.write_posted(hw, msgbuf, 2);
+ if (err)
+ goto out;
+
+ err = hw->mbx.ops.read_posted(hw, msgbuf, 2);
+ if (err)
+ goto out;
+
+out:
+ spin_unlock_bh(&adapter->mbx_lock);
+ return err;
+}
+
+/**
+ * ixgbevf_ipsec_restore - restore the IPsec HW settings after a reset
+ * @adapter: board private structure
+ *
+ * Reload the HW tables from the SW tables after they've been bashed
+ * by a chip reset. While we're here, make sure any stale VF data is
+ * removed, since we go through reset when num_vfs changes.
+ **/
+void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ if (!(adapter->netdev->features & NETIF_F_HW_ESP))
+ return;
+
+ /* reload the Rx and Tx keys */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ struct rx_sa *r = &ipsec->rx_tbl[i];
+ struct tx_sa *t = &ipsec->tx_tbl[i];
+ int ret;
+
+ if (r->used) {
+ ret = ixgbevf_ipsec_set_pf_sa(adapter, r->xs);
+ if (ret < 0)
+ netdev_err(netdev, "reload rx_tbl[%d] failed = %d\n",
+ i, ret);
+ }
+
+ if (t->used) {
+ ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs);
+ if (ret < 0)
+ netdev_err(netdev, "reload tx_tbl[%d] failed = %d\n",
+ i, ret);
+ }
+ }
+}
+
+/**
+ * ixgbevf_ipsec_find_empty_idx - find the first unused security parameter index
+ * @ipsec: pointer to IPsec struct
+ * @rxtable: true if we need to look in the Rx table
+ *
+ * Returns the first unused index in either the Rx or Tx SA table
+ **/
+static
+int ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec *ipsec, bool rxtable)
+{
+ u32 i;
+
+ if (rxtable) {
+ if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
+ return -ENOSPC;
+
+ /* search rx sa table */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ if (!ipsec->rx_tbl[i].used)
+ return i;
+ }
+ } else {
+ if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
+ return -ENOSPC;
+
+ /* search tx sa table */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
+ if (!ipsec->tx_tbl[i].used)
+ return i;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/**
+ * ixgbevf_ipsec_find_rx_state - find the state that matches
+ * @ipsec: pointer to IPsec struct
+ * @daddr: inbound address to match
+ * @proto: protocol to match
+ * @spi: SPI to match
+ * @ip4: true if using an IPv4 address
+ *
+ * Returns a pointer to the matching SA state information
+ **/
+static
+struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
+ __be32 *daddr, u8 proto,
+ __be32 spi, bool ip4)
+{
+ struct xfrm_state *ret = NULL;
+ struct rx_sa *rsa;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
+ (__force u32)spi) {
+ if (spi == rsa->xs->id.spi &&
+ ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
+ (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
+ sizeof(rsa->xs->id.daddr.a6)))) &&
+ proto == rsa->xs->id.proto) {
+ ret = rsa->xs;
+ xfrm_state_hold(ret);
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @xs: pointer to xfrm_state struct
+ * @mykey: pointer to key array to populate
+ * @mysalt: pointer to salt value to populate
+ *
+ * This copies the protocol keys and salt to our own data tables. The
+ * 82599 family only supports the one algorithm.
+ **/
+static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
+ u32 *mykey, u32 *mysalt)
+{
+ struct net_device *dev = xs->xso.dev;
+ unsigned char *key_data;
+ char *alg_name = NULL;
+ int key_len;
+
+ if (!xs->aead) {
+ netdev_err(dev, "Unsupported IPsec algorithm\n");
+ return -EINVAL;
+ }
+
+ if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
+ netdev_err(dev, "IPsec offload requires %d bit authentication\n",
+ IXGBE_IPSEC_AUTH_BITS);
+ return -EINVAL;
+ }
+
+ key_data = &xs->aead->alg_key[0];
+ key_len = xs->aead->alg_key_len;
+ alg_name = xs->aead->alg_name;
+
+ if (strcmp(alg_name, aes_gcm_name)) {
+ netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
+ aes_gcm_name);
+ return -EINVAL;
+ }
+
+ /* The key bytes come down in a big endian array of bytes, so
+ * we don't need to do any byte swapping.
+ * 160 accounts for 16 byte key and 4 byte salt
+ */
+ if (key_len > IXGBE_IPSEC_KEY_BITS) {
+ *mysalt = ((u32 *)key_data)[4];
+ } else if (key_len == IXGBE_IPSEC_KEY_BITS) {
+ *mysalt = 0;
+ } else {
+ netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
+ return -EINVAL;
+ }
+ memcpy(mykey, key_data, 16);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_ipsec_add_sa - program device with a security association
+ * @xs: pointer to transformer state struct
+ **/
+static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
+{
+ struct net_device *dev = xs->xso.dev;
+ struct ixgbevf_adapter *adapter = netdev_priv(dev);
+ struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ u16 sa_idx;
+ int ret;
+
+ if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+ netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
+ xs->id.proto);
+ return -EINVAL;
+ }
+
+ if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ struct rx_sa rsa;
+
+ if (xs->calg) {
+ netdev_err(dev, "Compression offload not supported\n");
+ return -EINVAL;
+ }
+
+ /* find the first unused index */
+ ret = ixgbevf_ipsec_find_empty_idx(ipsec, true);
+ if (ret < 0) {
+ netdev_err(dev, "No space for SA in Rx table!\n");
+ return ret;
+ }
+ sa_idx = (u16)ret;
+
+ memset(&rsa, 0, sizeof(rsa));
+ rsa.used = true;
+ rsa.xs = xs;
+
+ if (rsa.xs->id.proto & IPPROTO_ESP)
+ rsa.decrypt = xs->ealg || xs->aead;
+
+ /* get the key and salt */
+ ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ if (ret) {
+ netdev_err(dev, "Failed to get key data for Rx SA table\n");
+ return ret;
+ }
+
+ /* get ip for rx sa table */
+ if (xs->props.family == AF_INET6)
+ memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
+ else
+ memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
+
+ rsa.mode = IXGBE_RXMOD_VALID;
+ if (rsa.xs->id.proto & IPPROTO_ESP)
+ rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
+ if (rsa.decrypt)
+ rsa.mode |= IXGBE_RXMOD_DECRYPT;
+ if (rsa.xs->props.family == AF_INET6)
+ rsa.mode |= IXGBE_RXMOD_IPV6;
+
+ ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
+ if (ret < 0)
+ return ret;
+ rsa.pfsa = ret;
+
+ /* the preparations worked, so save the info */
+ memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
+
+ xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
+
+ ipsec->num_rx_sa++;
+
+ /* hash the new entry for faster search in Rx path */
+ hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
+ (__force u32)rsa.xs->id.spi);
+ } else {
+ struct tx_sa tsa;
+
+ /* find the first unused index */
+ ret = ixgbevf_ipsec_find_empty_idx(ipsec, false);
+ if (ret < 0) {
+ netdev_err(dev, "No space for SA in Tx table\n");
+ return ret;
+ }
+ sa_idx = (u16)ret;
+
+ memset(&tsa, 0, sizeof(tsa));
+ tsa.used = true;
+ tsa.xs = xs;
+
+ if (xs->id.proto & IPPROTO_ESP)
+ tsa.encrypt = xs->ealg || xs->aead;
+
+ ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ if (ret) {
+ netdev_err(dev, "Failed to get key data for Tx SA table\n");
+ memset(&tsa, 0, sizeof(tsa));
+ return ret;
+ }
+
+ ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
+ if (ret < 0)
+ return ret;
+ tsa.pfsa = ret;
+
+ /* the preparations worked, so save the info */
+ memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
+
+ xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
+
+ ipsec->num_tx_sa++;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbevf_ipsec_del_sa - clear out this specific SA
+ * @xs: pointer to transformer state struct
+ **/
+static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
+{
+ struct net_device *dev = xs->xso.dev;
+ struct ixgbevf_adapter *adapter = netdev_priv(dev);
+ struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ u16 sa_idx;
+
+ if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
+
+ if (!ipsec->rx_tbl[sa_idx].used) {
+ netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
+ sa_idx, xs->xso.offload_handle);
+ return;
+ }
+
+ ixgbevf_ipsec_del_pf_sa(adapter, ipsec->rx_tbl[sa_idx].pfsa);
+ hash_del_rcu(&ipsec->rx_tbl[sa_idx].hlist);
+ memset(&ipsec->rx_tbl[sa_idx], 0, sizeof(struct rx_sa));
+ ipsec->num_rx_sa--;
+ } else {
+ sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
+
+ if (!ipsec->tx_tbl[sa_idx].used) {
+ netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
+ sa_idx, xs->xso.offload_handle);
+ return;
+ }
+
+ ixgbevf_ipsec_del_pf_sa(adapter, ipsec->tx_tbl[sa_idx].pfsa);
+ memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
+ ipsec->num_tx_sa--;
+ }
+}
+
+/**
+ * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload
+ * @skb: current data packet
+ * @xs: pointer to transformer state struct
+ **/
+static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+{
+ if (xs->props.family == AF_INET) {
+ /* Offload with IPv4 options is not supported yet */
+ if (ip_hdr(skb)->ihl != 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+
+ return true;
+}
+
+static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
+ .xdo_dev_state_add = ixgbevf_ipsec_add_sa,
+ .xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
+ .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
+};
+
+/**
+ * ixgbevf_ipsec_tx - setup Tx flags for IPsec offload
+ * @tx_ring: outgoing context
+ * @first: current data packet
+ * @itd: ipsec Tx data for later use in building context descriptor
+ **/
+int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
+ struct ixgbevf_tx_buffer *first,
+ struct ixgbevf_ipsec_tx_data *itd)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
+ struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ struct xfrm_state *xs;
+ struct tx_sa *tsa;
+ u16 sa_idx;
+
+ if (unlikely(!first->skb->sp->len)) {
+ netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
+ __func__, first->skb->sp->len);
+ return 0;
+ }
+
+ xs = xfrm_input_state(first->skb);
+ if (unlikely(!xs)) {
+ netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
+ __func__, xs);
+ return 0;
+ }
+
+ sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
+ if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
+ netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
+ __func__, sa_idx, xs->xso.offload_handle);
+ return 0;
+ }
+
+ tsa = &ipsec->tx_tbl[sa_idx];
+ if (unlikely(!tsa->used)) {
+ netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
+ __func__, sa_idx);
+ return 0;
+ }
+
+ itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
+
+ first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM;
+
+ if (xs->id.proto == IPPROTO_ESP) {
+ itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ if (first->protocol == htons(ETH_P_IP))
+ itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+ /* The actual trailer length is authlen (16 bytes) plus
+ * 2 bytes for the proto and the padlen values, plus
+ * padlen bytes of padding. This ends up not the same
+ * as the static value found in xs->props.trailer_len (21).
+ *
+ * ... but if we're doing GSO, don't bother as the stack
+ * doesn't add a trailer for those.
+ */
+ if (!skb_is_gso(first->skb)) {
+ /* The "correct" way to get the auth length would be
+ * to use
+ * authlen = crypto_aead_authsize(xs->data);
+ * but since we know we only have one size to worry
+ * about * we can let the compiler use the constant
+ * and save us a few CPU cycles.
+ */
+ const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
+ struct sk_buff *skb = first->skb;
+ u8 padlen;
+ int ret;
+
+ ret = skb_copy_bits(skb, skb->len - (authlen + 2),
+ &padlen, 1);
+ if (unlikely(ret))
+ return 0;
+ itd->trailer_len = authlen + 2 + padlen;
+ }
+ }
+ if (tsa->encrypt)
+ itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
+
+ return 1;
+}
+
+/**
+ * ixgbevf_ipsec_rx - decode IPsec bits from Rx descriptor
+ * @rx_ring: receiving ring
+ * @rx_desc: receive data descriptor
+ * @skb: current data packet
+ *
+ * Determine if there was an IPsec encapsulation noticed, and if so set up
+ * the resulting status for later in the receive stack.
+ **/
+void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev);
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+ __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
+ IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
+ struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ struct xfrm_offload *xo = NULL;
+ struct xfrm_state *xs = NULL;
+ struct ipv6hdr *ip6 = NULL;
+ struct iphdr *ip4 = NULL;
+ void *daddr;
+ __be32 spi;
+ u8 *c_hdr;
+ u8 proto;
+
+ /* Find the IP and crypto headers in the data.
+ * We can assume no VLAN header in the way, b/c the
+ * hw won't recognize the IPsec packet and anyway the
+ * currently VLAN device doesn't support xfrm offload.
+ */
+ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
+ ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
+ daddr = &ip4->daddr;
+ c_hdr = (u8 *)ip4 + ip4->ihl * 4;
+ } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
+ ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ daddr = &ip6->daddr;
+ c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
+ } else {
+ return;
+ }
+
+ switch (pkt_info & ipsec_pkt_types) {
+ case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
+ spi = ((struct ip_auth_hdr *)c_hdr)->spi;
+ proto = IPPROTO_AH;
+ break;
+ case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
+ spi = ((struct ip_esp_hdr *)c_hdr)->spi;
+ proto = IPPROTO_ESP;
+ break;
+ default:
+ return;
+ }
+
+ xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
+ if (unlikely(!xs))
+ return;
+
+ skb->sp = secpath_dup(skb->sp);
+ if (unlikely(!skb->sp))
+ return;
+
+ skb->sp->xvec[skb->sp->len++] = xs;
+ skb->sp->olen++;
+ xo = xfrm_offload(skb);
+ xo->flags = CRYPTO_DONE;
+ xo->status = CRYPTO_SUCCESS;
+
+ adapter->rx_ipsec++;
+}
+
+/**
+ * ixgbevf_init_ipsec_offload - initialize registers for IPsec operation
+ * @adapter: board private structure
+ **/
+void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbevf_ipsec *ipsec;
+ size_t size;
+
+ switch (adapter->hw.api_version) {
+ case ixgbe_mbox_api_14:
+ break;
+ default:
+ return;
+ }
+
+ ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
+ if (!ipsec)
+ goto err1;
+ hash_init(ipsec->rx_sa_list);
+
+ size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
+ ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
+ if (!ipsec->rx_tbl)
+ goto err2;
+
+ size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
+ ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
+ if (!ipsec->tx_tbl)
+ goto err2;
+
+ ipsec->num_rx_sa = 0;
+ ipsec->num_tx_sa = 0;
+
+ adapter->ipsec = ipsec;
+
+ adapter->netdev->xfrmdev_ops = &ixgbevf_xfrmdev_ops;
+
+#define IXGBEVF_ESP_FEATURES (NETIF_F_HW_ESP | \
+ NETIF_F_HW_ESP_TX_CSUM | \
+ NETIF_F_GSO_ESP)
+
+ adapter->netdev->features |= IXGBEVF_ESP_FEATURES;
+ adapter->netdev->hw_enc_features |= IXGBEVF_ESP_FEATURES;
+
+ return;
+
+err2:
+ kfree(ipsec->rx_tbl);
+ kfree(ipsec->tx_tbl);
+ kfree(ipsec);
+err1:
+ netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
+}
+
+/**
+ * ixgbevf_stop_ipsec_offload - tear down the IPsec offload
+ * @adapter: board private structure
+ **/
+void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+
+ adapter->ipsec = NULL;
+ if (ipsec) {
+ kfree(ipsec->rx_tbl);
+ kfree(ipsec->tx_tbl);
+ kfree(ipsec);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.h b/drivers/net/ethernet/intel/ixgbevf/ipsec.h
new file mode 100644
index 000000000000..3740725041c3
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */
+
+#ifndef _IXGBEVF_IPSEC_H_
+#define _IXGBEVF_IPSEC_H_
+
+#define IXGBE_IPSEC_MAX_SA_COUNT 1024
+#define IXGBE_IPSEC_BASE_RX_INDEX 0
+#define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT
+#define IXGBE_IPSEC_AUTH_BITS 128
+
+#define IXGBE_RXMOD_VALID 0x00000001
+#define IXGBE_RXMOD_PROTO_ESP 0x00000004
+#define IXGBE_RXMOD_DECRYPT 0x00000008
+#define IXGBE_RXMOD_IPV6 0x00000010
+
+struct rx_sa {
+ struct hlist_node hlist;
+ struct xfrm_state *xs;
+ __be32 ipaddr[4];
+ u32 key[4];
+ u32 salt;
+ u32 mode;
+ u32 pfsa;
+ bool used;
+ bool decrypt;
+};
+
+struct rx_ip_sa {
+ __be32 ipaddr[4];
+ u32 ref_cnt;
+ bool used;
+};
+
+struct tx_sa {
+ struct xfrm_state *xs;
+ u32 key[4];
+ u32 salt;
+ u32 pfsa;
+ bool encrypt;
+ bool used;
+};
+
+struct ixgbevf_ipsec_tx_data {
+ u32 flags;
+ u16 trailer_len;
+ u16 pfsa;
+};
+
+struct ixgbevf_ipsec {
+ u16 num_rx_sa;
+ u16 num_tx_sa;
+ struct rx_sa *rx_tbl;
+ struct tx_sa *tx_tbl;
+ DECLARE_HASHTABLE(rx_sa_list, 10);
+};
+
+struct sa_mbx_msg {
+ __be32 spi;
+ u8 flags;
+ u8 proto;
+ u16 family;
+ __be32 addr[4];
+ u32 key[5];
+};
+#endif /* _IXGBEVF_IPSEC_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 56a1031dcc07..e399e1c0c54a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -14,6 +14,7 @@
#include <net/xdp.h>
#include "vf.h"
+#include "ipsec.h"
#define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
@@ -163,6 +164,7 @@ struct ixgbevf_ring {
#define IXGBE_TX_FLAGS_VLAN BIT(1)
#define IXGBE_TX_FLAGS_TSO BIT(2)
#define IXGBE_TX_FLAGS_IPV4 BIT(3)
+#define IXGBE_TX_FLAGS_IPSEC BIT(4)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
@@ -338,6 +340,7 @@ struct ixgbevf_adapter {
struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 restart_queue;
u32 tx_timeout_count;
+ u64 tx_ipsec;
/* RX */
int num_rx_queues;
@@ -348,6 +351,7 @@ struct ixgbevf_adapter {
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
u64 alloc_rx_page;
+ u64 rx_ipsec;
struct msix_entry *msix_entries;
@@ -384,6 +388,10 @@ struct ixgbevf_adapter {
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
u32 flags;
#define IXGBEVF_FLAGS_LEGACY_RX BIT(1)
+
+#ifdef CONFIG_XFRM
+ struct ixgbevf_ipsec *ipsec;
+#endif /* CONFIG_XFRM */
};
enum ixbgevf_state_t {
@@ -451,6 +459,31 @@ int ethtool_ioctl(struct ifreq *ifr);
extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
+#ifdef CONFIG_XFRM_OFFLOAD
+void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter);
+void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter);
+void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter);
+void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
+int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
+ struct ixgbevf_tx_buffer *first,
+ struct ixgbevf_ipsec_tx_data *itd);
+#else
+static inline void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
+{ }
+static inline void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter)
+{ }
+static inline void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) { }
+static inline void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb) { }
+static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
+ struct ixgbevf_tx_buffer *first,
+ struct ixgbevf_ipsec_tx_data *itd)
+{ return 0; }
+#endif /* CONFIG_XFRM_OFFLOAD */
+
void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d86446d202d5..98707ee11d72 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -40,7 +40,7 @@ static const char ixgbevf_driver_string[] =
#define DRV_VERSION "4.1.0-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
- "Copyright (c) 2009 - 2015 Intel Corporation.";
+ "Copyright (c) 2009 - 2018 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -79,7 +79,7 @@ MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
@@ -268,7 +268,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
unsigned int budget = tx_ring->count / 2;
unsigned int i = tx_ring->next_to_clean;
@@ -299,6 +299,8 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
/* update the statistics for this packet */
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
+ if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC)
+ total_ipsec++;
/* free the skb */
if (ring_is_xdp(tx_ring))
@@ -361,6 +363,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
u64_stats_update_end(&tx_ring->syncp);
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+ adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
struct ixgbe_hw *hw = &adapter->hw;
@@ -516,6 +519,9 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
+ if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP))
+ ixgbevf_ipsec_rx(rx_ring, rx_desc, skb);
+
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
@@ -1012,7 +1018,7 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
context_desc->vlan_macip_lens =
cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT);
- context_desc->seqnum_seed = 0;
+ context_desc->fceof_saidx = 0;
context_desc->type_tucmd_mlhl =
cpu_to_le32(IXGBE_TXD_CMD_DEXT |
IXGBE_ADVTXD_DTYP_CTXT);
@@ -2200,6 +2206,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
ixgbevf_set_rx_mode(adapter->netdev);
ixgbevf_restore_vlan(adapter);
+ ixgbevf_ipsec_restore(adapter);
ixgbevf_configure_tx(adapter);
ixgbevf_configure_rx(adapter);
@@ -2246,7 +2253,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int api[] = { ixgbe_mbox_api_13,
+ int api[] = { ixgbe_mbox_api_14,
+ ixgbe_mbox_api_13,
ixgbe_mbox_api_12,
ixgbe_mbox_api_11,
ixgbe_mbox_api_10,
@@ -2605,6 +2613,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1;
@@ -3700,8 +3709,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
}
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
- u32 vlan_macip_lens, u32 type_tucmd,
- u32 mss_l4len_idx)
+ u32 vlan_macip_lens, u32 fceof_saidx,
+ u32 type_tucmd, u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
@@ -3715,14 +3724,15 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
+ context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
- u8 *hdr_len)
+ u8 *hdr_len,
+ struct ixgbevf_ipsec_tx_data *itd)
{
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
@@ -3736,6 +3746,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
+ u32 fceof_saidx = 0;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3761,13 +3772,15 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (ip.v4->version == 4) {
unsigned char *csum_start = skb_checksum_start(skb);
unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+ int len = csum_start - trans_start;
/* IP header will have to cancel out any data that
- * is not a part of the outer IP header
+ * is not a part of the outer IP header, so set to
+ * a reverse csum if needed, else init check to 0.
*/
- ip.v4->check = csum_fold(csum_partial(trans_start,
- csum_start - trans_start,
- 0));
+ ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ?
+ csum_fold(csum_partial(trans_start,
+ len, 0)) : 0;
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0;
@@ -3799,13 +3812,16 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
+ fceof_saidx |= itd->pfsa;
+ type_tucmd |= itd->flags | itd->trailer_len;
+
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = l4.hdr - ip.hdr;
vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
- type_tucmd, mss_l4len_idx);
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd,
+ mss_l4len_idx);
return 1;
}
@@ -3820,10 +3836,12 @@ static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
}
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
- struct ixgbevf_tx_buffer *first)
+ struct ixgbevf_tx_buffer *first,
+ struct ixgbevf_ipsec_tx_data *itd)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
+ u32 fceof_saidx = 0;
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3849,6 +3867,10 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
skb_checksum_help(skb);
goto no_csum;
}
+
+ if (first->protocol == htons(ETH_P_IP))
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
/* update TX checksum flag */
first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
vlan_macip_lens = skb_checksum_start_offset(skb) -
@@ -3858,7 +3880,11 @@ no_csum:
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
+ fceof_saidx |= itd->pfsa;
+ type_tucmd |= itd->flags | itd->trailer_len;
+
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
+ fceof_saidx, type_tucmd, 0);
}
static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
@@ -3892,8 +3918,12 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
- /* use index 1 context for TSO/FSO/FCOE */
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ /* enable IPsec */
+ if (tx_flags & IXGBE_TX_FLAGS_IPSEC)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC);
+
+ /* use index 1 context for TSO/FSO/FCOE/IPSEC */
+ if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC))
olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
/* Check Context must be set if Tx switch is enabled, which it
@@ -4075,6 +4105,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
int tso;
u32 tx_flags = 0;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 };
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f;
#endif
@@ -4119,11 +4150,15 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
first->tx_flags = tx_flags;
first->protocol = vlan_get_protocol(skb);
- tso = ixgbevf_tso(tx_ring, first, &hdr_len);
+#ifdef CONFIG_XFRM_OFFLOAD
+ if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
+ goto out_drop;
+#endif
+ tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
if (tso < 0)
goto out_drop;
else if (!tso)
- ixgbevf_tx_csum(tx_ring, first);
+ ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
ixgbevf_tx_map(tx_ring, first, hdr_len);
@@ -4233,24 +4268,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void ixgbevf_netpoll(struct net_device *netdev)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__IXGBEVF_DOWN, &adapter->state))
- return;
- for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4482,9 +4499,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ixgbevf_netpoll,
-#endif
.ndo_features_check = ixgbevf_features_check,
.ndo_bpf = ixgbevf_xdp,
};
@@ -4634,6 +4648,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
@@ -4669,6 +4684,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, netdev);
netif_carrier_off(netdev);
+ ixgbevf_init_ipsec_offload(adapter);
ixgbevf_init_last_counter_stats(adapter);
@@ -4735,6 +4751,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ ixgbevf_stop_ipsec_offload(adapter);
ixgbevf_clear_interrupt_scheme(adapter);
ixgbevf_reset_interrupt_capability(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index bfd9ae150808..853796c8ef0e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -62,6 +62,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
+ ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -92,6 +93,10 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+/* mailbox API, version 1.4 VF requests */
+#define IXGBE_VF_IPSEC_ADD 0x0d
+#define IXGBE_VF_IPSEC_DEL 0x0e
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index bf0577e819e1..cd3b81300cc7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -309,6 +309,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
if (hw->mac.type < ixgbe_mac_X550_vf)
@@ -376,6 +377,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* or if the operation is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
if (hw->mac.type < ixgbe_mac_X550_vf)
@@ -540,6 +542,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
return -EOPNOTSUPP;
/* Fall threw */
+ case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
break;
default:
@@ -890,6 +893,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
break;
default:
return 0;
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index e08301d833e2..32ac9045cdae 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -365,15 +365,8 @@ ltq_etop_mdio_probe(struct net_device *dev)
return PTR_ERR(phydev);
}
- phydev->supported &= (SUPPORTED_10baseT_Half
- | SUPPORTED_10baseT_Full
- | SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full
- | SUPPORTED_Autoneg
- | SUPPORTED_MII
- | SUPPORTED_TP);
-
- phydev->advertising = phydev->supported;
+ phy_set_max_speed(phydev, SPEED_100);
+
phy_attached_info(phydev);
return 0;
@@ -439,6 +432,7 @@ ltq_etop_open(struct net_device *dev)
if (!IS_TX(i) && (!IS_RX(i)))
continue;
ltq_dma_open(&ch->dma);
+ ltq_dma_enable_irq(&ch->dma);
napi_enable(&ch->napi);
}
phy_start(dev->phydev);
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
new file mode 100644
index 000000000000..8c5ba4b81fb7
--- /dev/null
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lantiq / Intel PMAC driver for XRX200 SoCs
+ *
+ * Copyright (C) 2010 Lantiq Deutschland
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+
+#include <xway_dma.h>
+
+/* DMA */
+#define XRX200_DMA_DATA_LEN 0x600
+#define XRX200_DMA_RX 0
+#define XRX200_DMA_TX 1
+
+/* cpu port mac */
+#define PMAC_RX_IPG 0x0024
+#define PMAC_RX_IPG_MASK 0xf
+
+#define PMAC_HD_CTL 0x0000
+/* Add Ethernet header to packets from DMA to PMAC */
+#define PMAC_HD_CTL_ADD BIT(0)
+/* Add VLAN tag to Packets from DMA to PMAC */
+#define PMAC_HD_CTL_TAG BIT(1)
+/* Add CRC to packets from DMA to PMAC */
+#define PMAC_HD_CTL_AC BIT(2)
+/* Add status header to packets from PMAC to DMA */
+#define PMAC_HD_CTL_AS BIT(3)
+/* Remove CRC from packets from PMAC to DMA */
+#define PMAC_HD_CTL_RC BIT(4)
+/* Remove Layer-2 header from packets from PMAC to DMA */
+#define PMAC_HD_CTL_RL2 BIT(5)
+/* Status header is present from DMA to PMAC */
+#define PMAC_HD_CTL_RXSH BIT(6)
+/* Add special tag from PMAC to switch */
+#define PMAC_HD_CTL_AST BIT(7)
+/* Remove specail Tag from PMAC to DMA */
+#define PMAC_HD_CTL_RST BIT(8)
+/* Check CRC from DMA to PMAC */
+#define PMAC_HD_CTL_CCRC BIT(9)
+/* Enable reaction to Pause frames in the PMAC */
+#define PMAC_HD_CTL_FC BIT(10)
+
+struct xrx200_chan {
+ int tx_free;
+
+ struct napi_struct napi;
+ struct ltq_dma_channel dma;
+ struct sk_buff *skb[LTQ_DESC_NUM];
+
+ struct xrx200_priv *priv;
+};
+
+struct xrx200_priv {
+ struct clk *clk;
+
+ struct xrx200_chan chan_tx;
+ struct xrx200_chan chan_rx;
+
+ struct net_device *net_dev;
+ struct device *dev;
+
+ __iomem void *pmac_reg;
+};
+
+static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset)
+{
+ return __raw_readl(priv->pmac_reg + offset);
+}
+
+static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset)
+{
+ __raw_writel(val, priv->pmac_reg + offset);
+}
+
+static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
+ u32 offset)
+{
+ u32 val = xrx200_pmac_r32(priv, offset);
+
+ val &= ~(clear);
+ val |= set;
+ xrx200_pmac_w32(priv, val, offset);
+}
+
+/* drop all the packets from the DMA ring */
+static void xrx200_flush_dma(struct xrx200_chan *ch)
+{
+ int i;
+
+ for (i = 0; i < LTQ_DESC_NUM; i++) {
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
+ break;
+
+ desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
+ XRX200_DMA_DATA_LEN;
+ ch->dma.desc++;
+ ch->dma.desc %= LTQ_DESC_NUM;
+ }
+}
+
+static int xrx200_open(struct net_device *net_dev)
+{
+ struct xrx200_priv *priv = netdev_priv(net_dev);
+
+ napi_enable(&priv->chan_tx.napi);
+ ltq_dma_open(&priv->chan_tx.dma);
+ ltq_dma_enable_irq(&priv->chan_tx.dma);
+
+ napi_enable(&priv->chan_rx.napi);
+ ltq_dma_open(&priv->chan_rx.dma);
+ /* The boot loader does not always deactivate the receiving of frames
+ * on the ports and then some packets queue up in the PPE buffers.
+ * They already passed the PMAC so they do not have the tags
+ * configured here. Read the these packets here and drop them.
+ * The HW should have written them into memory after 10us
+ */
+ usleep_range(20, 40);
+ xrx200_flush_dma(&priv->chan_rx);
+ ltq_dma_enable_irq(&priv->chan_rx.dma);
+
+ netif_wake_queue(net_dev);
+
+ return 0;
+}
+
+static int xrx200_close(struct net_device *net_dev)
+{
+ struct xrx200_priv *priv = netdev_priv(net_dev);
+
+ netif_stop_queue(net_dev);
+
+ napi_disable(&priv->chan_rx.napi);
+ ltq_dma_close(&priv->chan_rx.dma);
+
+ napi_disable(&priv->chan_tx.napi);
+ ltq_dma_close(&priv->chan_tx.dma);
+
+ return 0;
+}
+
+static int xrx200_alloc_skb(struct xrx200_chan *ch)
+{
+ int ret = 0;
+
+ ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
+ XRX200_DMA_DATA_LEN);
+ if (!ch->skb[ch->dma.desc]) {
+ ret = -ENOMEM;
+ goto skip;
+ }
+
+ ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev,
+ ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ch->priv->dev,
+ ch->dma.desc_base[ch->dma.desc].addr))) {
+ dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+ ret = -ENOMEM;
+ goto skip;
+ }
+
+skip:
+ ch->dma.desc_base[ch->dma.desc].ctl =
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
+ XRX200_DMA_DATA_LEN;
+
+ return ret;
+}
+
+static int xrx200_hw_receive(struct xrx200_chan *ch)
+{
+ struct xrx200_priv *priv = ch->priv;
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+ struct sk_buff *skb = ch->skb[ch->dma.desc];
+ int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
+ struct net_device *net_dev = priv->net_dev;
+ int ret;
+
+ ret = xrx200_alloc_skb(ch);
+
+ ch->dma.desc++;
+ ch->dma.desc %= LTQ_DESC_NUM;
+
+ if (ret) {
+ netdev_err(net_dev, "failed to allocate new rx buffer\n");
+ return ret;
+ }
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, net_dev);
+ netif_receive_skb(skb);
+ net_dev->stats.rx_packets++;
+ net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
+
+ return 0;
+}
+
+static int xrx200_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct xrx200_chan *ch = container_of(napi,
+ struct xrx200_chan, napi);
+ int rx = 0;
+ int ret;
+
+ while (rx < budget) {
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
+ ret = xrx200_hw_receive(ch);
+ if (ret)
+ return ret;
+ rx++;
+ } else {
+ break;
+ }
+ }
+
+ if (rx < budget) {
+ napi_complete(&ch->napi);
+ ltq_dma_enable_irq(&ch->dma);
+ }
+
+ return rx;
+}
+
+static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
+{
+ struct xrx200_chan *ch = container_of(napi,
+ struct xrx200_chan, napi);
+ struct net_device *net_dev = ch->priv->net_dev;
+ int pkts = 0;
+ int bytes = 0;
+
+ while (pkts < budget) {
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
+
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
+ struct sk_buff *skb = ch->skb[ch->tx_free];
+
+ pkts++;
+ bytes += skb->len;
+ ch->skb[ch->tx_free] = NULL;
+ consume_skb(skb);
+ memset(&ch->dma.desc_base[ch->tx_free], 0,
+ sizeof(struct ltq_dma_desc));
+ ch->tx_free++;
+ ch->tx_free %= LTQ_DESC_NUM;
+ } else {
+ break;
+ }
+ }
+
+ net_dev->stats.tx_packets += pkts;
+ net_dev->stats.tx_bytes += bytes;
+ netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
+
+ if (pkts < budget) {
+ napi_complete(&ch->napi);
+ ltq_dma_enable_irq(&ch->dma);
+ }
+
+ return pkts;
+}
+
+static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct xrx200_priv *priv = netdev_priv(net_dev);
+ struct xrx200_chan *ch = &priv->chan_tx;
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+ u32 byte_offset;
+ dma_addr_t mapping;
+ int len;
+
+ skb->dev = net_dev;
+ if (skb_put_padto(skb, ETH_ZLEN)) {
+ net_dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ len = skb->len;
+
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
+ netdev_err(net_dev, "tx ring full\n");
+ netif_stop_queue(net_dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ ch->skb[ch->dma.desc] = skb;
+
+ mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, mapping)))
+ goto err_drop;
+
+ /* dma needs to start on a 16 byte aligned address */
+ byte_offset = mapping % 16;
+
+ desc->addr = mapping - byte_offset;
+ /* Make sure the address is written before we give it to HW */
+ wmb();
+ desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
+ LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
+ ch->dma.desc++;
+ ch->dma.desc %= LTQ_DESC_NUM;
+ if (ch->dma.desc == ch->tx_free)
+ netif_stop_queue(net_dev);
+
+ netdev_sent_queue(net_dev, len);
+
+ return NETDEV_TX_OK;
+
+err_drop:
+ dev_kfree_skb(skb);
+ net_dev->stats.tx_dropped++;
+ net_dev->stats.tx_errors++;
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops xrx200_netdev_ops = {
+ .ndo_open = xrx200_open,
+ .ndo_stop = xrx200_close,
+ .ndo_start_xmit = xrx200_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
+{
+ struct xrx200_chan *ch = ptr;
+
+ ltq_dma_disable_irq(&ch->dma);
+ ltq_dma_ack_irq(&ch->dma);
+
+ napi_schedule(&ch->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int xrx200_dma_init(struct xrx200_priv *priv)
+{
+ struct xrx200_chan *ch_rx = &priv->chan_rx;
+ struct xrx200_chan *ch_tx = &priv->chan_tx;
+ int ret = 0;
+ int i;
+
+ ltq_dma_init_port(DMA_PORT_ETOP);
+
+ ch_rx->dma.nr = XRX200_DMA_RX;
+ ch_rx->dma.dev = priv->dev;
+ ch_rx->priv = priv;
+
+ ltq_dma_alloc_rx(&ch_rx->dma);
+ for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
+ ch_rx->dma.desc++) {
+ ret = xrx200_alloc_skb(ch_rx);
+ if (ret)
+ goto rx_free;
+ }
+ ch_rx->dma.desc = 0;
+ ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
+ "xrx200_net_rx", &priv->chan_rx);
+ if (ret) {
+ dev_err(priv->dev, "failed to request RX irq %d\n",
+ ch_rx->dma.irq);
+ goto rx_ring_free;
+ }
+
+ ch_tx->dma.nr = XRX200_DMA_TX;
+ ch_tx->dma.dev = priv->dev;
+ ch_tx->priv = priv;
+
+ ltq_dma_alloc_tx(&ch_tx->dma);
+ ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
+ "xrx200_net_tx", &priv->chan_tx);
+ if (ret) {
+ dev_err(priv->dev, "failed to request TX irq %d\n",
+ ch_tx->dma.irq);
+ goto tx_free;
+ }
+
+ return ret;
+
+tx_free:
+ ltq_dma_free(&ch_tx->dma);
+
+rx_ring_free:
+ /* free the allocated RX ring */
+ for (i = 0; i < LTQ_DESC_NUM; i++) {
+ if (priv->chan_rx.skb[i])
+ dev_kfree_skb_any(priv->chan_rx.skb[i]);
+ }
+
+rx_free:
+ ltq_dma_free(&ch_rx->dma);
+ return ret;
+}
+
+static void xrx200_hw_cleanup(struct xrx200_priv *priv)
+{
+ int i;
+
+ ltq_dma_free(&priv->chan_tx.dma);
+ ltq_dma_free(&priv->chan_rx.dma);
+
+ /* free the allocated RX ring */
+ for (i = 0; i < LTQ_DESC_NUM; i++)
+ dev_kfree_skb_any(priv->chan_rx.skb[i]);
+}
+
+static int xrx200_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ struct xrx200_priv *priv;
+ struct net_device *net_dev;
+ const u8 *mac;
+ int err;
+
+ /* alloc the network device */
+ net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
+ if (!net_dev)
+ return -ENOMEM;
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+ priv->dev = dev;
+
+ net_dev->netdev_ops = &xrx200_netdev_ops;
+ SET_NETDEV_DEV(net_dev, dev);
+ net_dev->min_mtu = ETH_ZLEN;
+ net_dev->max_mtu = XRX200_DMA_DATA_LEN;
+
+ /* load the memory ranges */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get resources\n");
+ return -ENOENT;
+ }
+
+ priv->pmac_reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->pmac_reg)) {
+ dev_err(dev, "failed to request and remap io ranges\n");
+ return PTR_ERR(priv->pmac_reg);
+ }
+
+ priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
+ if (priv->chan_rx.dma.irq < 0) {
+ dev_err(dev, "failed to get RX IRQ, %i\n",
+ priv->chan_rx.dma.irq);
+ return -ENOENT;
+ }
+ priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
+ if (priv->chan_tx.dma.irq < 0) {
+ dev_err(dev, "failed to get TX IRQ, %i\n",
+ priv->chan_tx.dma.irq);
+ return -ENOENT;
+ }
+
+ /* get the clock */
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ mac = of_get_mac_address(np);
+ if (mac && is_valid_ether_addr(mac))
+ ether_addr_copy(net_dev->dev_addr, mac);
+ else
+ eth_hw_addr_random(net_dev);
+
+ /* bring up the dma engine and IP core */
+ err = xrx200_dma_init(priv);
+ if (err)
+ return err;
+
+ /* enable clock gate */
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ goto err_uninit_dma;
+
+ /* set IPG to 12 */
+ xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG);
+
+ /* enable status header, enable CRC */
+ xrx200_pmac_mask(priv, 0,
+ PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH |
+ PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
+ PMAC_HD_CTL);
+
+ /* setup NAPI */
+ netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
+ netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
+
+ platform_set_drvdata(pdev, priv);
+
+ err = register_netdev(net_dev);
+ if (err)
+ goto err_unprepare_clk;
+ return err;
+
+err_unprepare_clk:
+ clk_disable_unprepare(priv->clk);
+
+err_uninit_dma:
+ xrx200_hw_cleanup(priv);
+
+ return 0;
+}
+
+static int xrx200_remove(struct platform_device *pdev)
+{
+ struct xrx200_priv *priv = platform_get_drvdata(pdev);
+ struct net_device *net_dev = priv->net_dev;
+
+ /* free stack related instances */
+ netif_stop_queue(net_dev);
+ netif_napi_del(&priv->chan_tx.napi);
+ netif_napi_del(&priv->chan_rx.napi);
+
+ /* remove the actual device */
+ unregister_netdev(net_dev);
+
+ /* release the clock */
+ clk_disable_unprepare(priv->clk);
+
+ /* shut down hardware */
+ xrx200_hw_cleanup(priv);
+
+ return 0;
+}
+
+static const struct of_device_id xrx200_match[] = {
+ { .compatible = "lantiq,xrx200-net" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xrx200_match);
+
+static struct platform_driver xrx200_driver = {
+ .probe = xrx200_probe,
+ .remove = xrx200_remove,
+ .driver = {
+ .name = "lantiq,xrx200-net",
+ .of_match_table = xrx200_match,
+ },
+};
+
+module_platform_driver(xrx200_driver);
+
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
+MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index f33fd22b351c..3238aa7f5dac 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -167,4 +167,7 @@ config SKY2_DEBUG
If unsure, say N.
+
+source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 55d4d10aa7d3..89dea7284d5b 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_MVPP2) += mvpp2/
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
+obj-y += octeontx2/
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 62f204f32316..1e9bcbdc6a90 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2733,17 +2733,17 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
memset(&res, 0, sizeof(res));
if (of_irq_to_resource(pnp, 0, &res) <= 0) {
- dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
+ dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp);
return -EINVAL;
}
if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
- dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name);
+ dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp);
return -EINVAL;
}
if (ppd.port_number >= 3) {
- dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name);
+ dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bc80a678abc3..5bfd349bf41a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -221,6 +221,8 @@
#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
+#define MVNETA_GMAC_CTRL_4 0x2c90
+#define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1)
#define MVNETA_MIB_COUNTERS_BASE 0x3000
#define MVNETA_MIB_LATE_COLLISION 0x7c
#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
@@ -1890,8 +1892,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
if (!data || !(rx_desc->buf_phys_addr))
continue;
- dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
+ dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(data);
}
}
@@ -2008,8 +2010,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
skb_add_rx_frag(rxq->skb, frag_num, page,
frag_offset, frag_size,
PAGE_SIZE);
- dma_unmap_single(dev->dev.parent, phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(dev->dev.parent, phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
rxq->left_size -= frag_size;
}
} else {
@@ -2039,9 +2041,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
frag_offset, frag_size,
PAGE_SIZE);
- dma_unmap_single(dev->dev.parent, phys_addr,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
+ dma_unmap_page(dev->dev.parent, phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
rxq->left_size -= frag_size;
}
@@ -2065,10 +2066,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* Linux processing */
rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
- if (dev->features & NETIF_F_GRO)
- napi_gro_receive(napi, rxq->skb);
- else
- netif_receive_skb(rxq->skb);
+ napi_gro_receive(napi, rxq->skb);
/* clean uncomplete skb pointer in queue */
rxq->skb = NULL;
@@ -2396,7 +2394,7 @@ error:
}
/* Main tx processing */
-static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
u16 txq_id = skb_get_queue_mapping(skb);
@@ -2510,12 +2508,13 @@ static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
{
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
+ int cpu = smp_processor_id();
while (cause_tx_done) {
txq = mvneta_tx_done_policy(pp, cause_tx_done);
nq = netdev_get_tx_queue(pp->dev, txq->id);
- __netif_tx_lock(nq, smp_processor_id());
+ __netif_tx_lock(nq, cpu);
if (txq->count)
mvneta_txq_done(pp, txq);
@@ -3344,6 +3343,7 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != PHY_INTERFACE_MODE_QSGMII &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
+ state->interface != PHY_INTERFACE_MODE_2500BASEX &&
!phy_interface_mode_is_8023z(state->interface) &&
!phy_interface_mode_is_rgmii(state->interface)) {
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -3356,9 +3356,15 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
/* Asymmetric pause is unsupported */
phylink_set(mask, Pause);
- /* Half-duplex at speeds higher than 100Mbit is unsupported */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+
+ /* We cannot use 1Gbps when using the 2.5G interface. */
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ } else {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
if (!phy_interface_mode_is_8023z(state->interface)) {
/* 10M and 100M are only supported in non-802.3z mode */
@@ -3419,12 +3425,14 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
struct mvneta_port *pp = netdev_priv(ndev);
u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+ u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
MVNETA_GMAC2_PORT_RESET);
+ new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
MVNETA_GMAC_INBAND_RESTART_AN |
@@ -3457,7 +3465,7 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
if (state->duplex)
new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
- if (state->speed == SPEED_1000)
+ if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
else if (state->speed == SPEED_100)
new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
@@ -3496,10 +3504,18 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
MVNETA_GMAC_FORCE_LINK_DOWN);
}
+ /* When at 2.5G, the link partner can send frames with shortened
+ * preambles.
+ */
+ if (state->speed == SPEED_2500)
+ new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
+
if (new_ctrl0 != gmac_ctrl0)
mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
if (new_ctrl2 != gmac_ctrl2)
mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
+ if (new_ctrl4 != gmac_ctrl4)
+ mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
if (new_clk != gmac_clk)
mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
if (new_an != gmac_an)
@@ -3793,9 +3809,6 @@ static int mvneta_open(struct net_device *dev)
goto err_free_online_hp;
}
- /* In default link is down */
- netif_carrier_off(pp->dev);
-
ret = mvneta_mdio_probe(pp);
if (ret < 0) {
netdev_err(dev, "cannot probe MDIO bus\n");
@@ -4598,7 +4611,8 @@ static int mvneta_probe(struct platform_device *pdev)
}
}
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO;
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_RXCSUM;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 67b9e81b7c02..176c6b56fdcc 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -253,7 +253,8 @@
#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
-#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
+#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(version) \
+ ((version) == MVPP21 ? 0xffff : 0xff)
#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
@@ -330,6 +331,7 @@
#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
+#define MVPP2_TXP_SCHED_FIXED_PRIO_REG 0x8014
#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
#define MVPP2_TXP_SCHED_MTU_REG 0x801c
#define MVPP2_TXP_MTU_MAX 0x7FFFF
@@ -613,6 +615,7 @@
/* Port flags */
#define MVPP2_F_LOOPBACK BIT(0)
+#define MVPP2_F_DT_COMPAT BIT(1)
/* Marvell tag types */
enum mvpp2_tag_type {
@@ -662,7 +665,7 @@ enum mvpp2_prs_l3_cast {
#define MVPP21_ADDR_SPACE_SZ 0
#define MVPP22_ADDR_SPACE_SZ SZ_64K
-#define MVPP2_MAX_THREADS 8
+#define MVPP2_MAX_THREADS 9
#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
/* GMAC MIB Counters register definitions */
@@ -734,6 +737,11 @@ struct mvpp2 {
int port_count;
struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
+ /* Number of Tx threads used */
+ unsigned int nthreads;
+ /* Map of threads needing locking */
+ unsigned long lock_map;
+
/* Aggregated TXQs */
struct mvpp2_tx_queue *aggr_txqs;
@@ -823,6 +831,12 @@ struct mvpp2_port {
/* Per-CPU port control */
struct mvpp2_port_pcpu __percpu *pcpu;
+ /* Protect the BM refills and the Tx paths when a thread is used on more
+ * than a single CPU.
+ */
+ spinlock_t bm_lock[MVPP2_MAX_THREADS];
+ spinlock_t tx_lock[MVPP2_MAX_THREADS];
+
/* Flags */
unsigned long flags;
@@ -969,7 +983,7 @@ struct mvpp2_txq_pcpu_buf {
/* Per-CPU Tx queue control */
struct mvpp2_txq_pcpu {
- int cpu;
+ unsigned int thread;
/* Number of Tx DMA descriptors in the descriptor ring */
int size;
@@ -1095,14 +1109,6 @@ struct mvpp2_bm_pool {
void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data);
u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
-u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset);
-
-void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data);
-u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset);
-
-void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset,
- u32 data);
-
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 28500417843e..14f9679c957c 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -58,6 +58,8 @@ static struct {
*/
static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
const struct phylink_link_state *state);
+static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phy);
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
@@ -80,13 +82,19 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
return readl(priv->swth_base[0] + offset);
}
-u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
+static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
{
return readl_relaxed(priv->swth_base[0] + offset);
}
+
+static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
+{
+ return cpu % priv->nthreads;
+}
+
/* These accessors should be used to access:
*
- * - per-CPU registers, where each CPU has its own copy of the
+ * - per-thread registers, where each thread has its own copy of the
* register.
*
* MVPP2_BM_VIRT_ALLOC_REG
@@ -102,8 +110,8 @@ u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
* MVPP2_TXQ_SENT_REG
* MVPP2_RXQ_NUM_REG
*
- * - global registers that must be accessed through a specific CPU
- * window, because they are related to an access to a per-CPU
+ * - global registers that must be accessed through a specific thread
+ * window, because they are related to an access to a per-thread
* register
*
* MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
@@ -120,28 +128,28 @@ u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
* MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
* MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
*/
-void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
+static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
u32 offset, u32 data)
{
- writel(data, priv->swth_base[cpu] + offset);
+ writel(data, priv->swth_base[thread] + offset);
}
-u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
+static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
u32 offset)
{
- return readl(priv->swth_base[cpu] + offset);
+ return readl(priv->swth_base[thread] + offset);
}
-void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu,
+static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
u32 offset, u32 data)
{
- writel_relaxed(data, priv->swth_base[cpu] + offset);
+ writel_relaxed(data, priv->swth_base[thread] + offset);
}
-static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu,
+static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
u32 offset)
{
- return readl_relaxed(priv->swth_base[cpu] + offset);
+ return readl_relaxed(priv->swth_base[thread] + offset);
}
static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
@@ -383,17 +391,17 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
dma_addr_t *dma_addr,
phys_addr_t *phys_addr)
{
- int cpu = get_cpu();
+ unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
- *dma_addr = mvpp2_percpu_read(priv, cpu,
+ *dma_addr = mvpp2_thread_read(priv, thread,
MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
- *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
+ *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
if (priv->hw_version == MVPP22) {
u32 val;
u32 dma_addr_highbits, phys_addr_highbits;
- val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
+ val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
@@ -624,7 +632,11 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
dma_addr_t buf_dma_addr,
phys_addr_t buf_phys_addr)
{
- int cpu = get_cpu();
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ unsigned long flags = 0;
+
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_lock_irqsave(&port->bm_lock[thread], flags);
if (port->priv->hw_version == MVPP22) {
u32 val = 0;
@@ -638,7 +650,7 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
<< MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
- mvpp2_percpu_write_relaxed(port->priv, cpu,
+ mvpp2_thread_write_relaxed(port->priv, thread,
MVPP22_BM_ADDR_HIGH_RLS_REG, val);
}
@@ -647,11 +659,14 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
* descriptor. Instead of storing the virtual address, we
* store the physical address
*/
- mvpp2_percpu_write_relaxed(port->priv, cpu,
+ mvpp2_thread_write_relaxed(port->priv, thread,
MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
- mvpp2_percpu_write_relaxed(port->priv, cpu,
+ mvpp2_thread_write_relaxed(port->priv, thread,
MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_unlock_irqrestore(&port->bm_lock[thread], flags);
+
put_cpu();
}
@@ -884,7 +899,7 @@ static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
}
-/* Mask the current CPU's Rx/Tx interrupts
+/* Mask the current thread's Rx/Tx interrupts
* Called by on_each_cpu(), guaranteed to run with migration disabled,
* using smp_processor_id() is OK.
*/
@@ -892,11 +907,16 @@ static void mvpp2_interrupts_mask(void *arg)
{
struct mvpp2_port *port = arg;
- mvpp2_percpu_write(port->priv, smp_processor_id(),
+ /* If the thread isn't used, don't do anything */
+ if (smp_processor_id() > port->priv->nthreads)
+ return;
+
+ mvpp2_thread_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
}
-/* Unmask the current CPU's Rx/Tx interrupts.
+/* Unmask the current thread's Rx/Tx interrupts.
* Called by on_each_cpu(), guaranteed to run with migration disabled,
* using smp_processor_id() is OK.
*/
@@ -905,12 +925,17 @@ static void mvpp2_interrupts_unmask(void *arg)
struct mvpp2_port *port = arg;
u32 val;
+ /* If the thread isn't used, don't do anything */
+ if (smp_processor_id() > port->priv->nthreads)
+ return;
+
val = MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
if (port->has_tx_irqs)
val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- mvpp2_percpu_write(port->priv, smp_processor_id(),
+ mvpp2_thread_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
}
@@ -926,7 +951,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
if (mask)
val = 0;
else
- val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+ val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *v = port->qvecs + i;
@@ -934,7 +959,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
continue;
- mvpp2_percpu_write(port->priv, v->sw_thread_id,
+ mvpp2_thread_write(port->priv, v->sw_thread_id,
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
}
}
@@ -1423,6 +1448,9 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
tx_port_num);
mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
+ /* Set TXQ scheduling to Round-Robin */
+ mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
+
/* Close bandwidth for all queues */
for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
ptxq = mvpp2_txq_phys(port->id, queue);
@@ -1622,7 +1650,8 @@ mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
{
/* aggregated access - relevant TXQ number is written in TX desc */
- mvpp2_percpu_write(port->priv, smp_processor_id(),
+ mvpp2_thread_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_AGGR_TXQ_UPDATE_REG, pending);
}
@@ -1632,14 +1661,15 @@ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
* Called only from mvpp2_tx(), so migration is disabled, using
* smp_processor_id() is OK.
*/
-static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
+static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
struct mvpp2_tx_queue *aggr_txq, int num)
{
if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
/* Update number of occupied aggregated Tx descriptors */
- int cpu = smp_processor_id();
- u32 val = mvpp2_read_relaxed(priv,
- MVPP2_AGGR_TXQ_STATUS_REG(cpu));
+ unsigned int thread =
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ u32 val = mvpp2_read_relaxed(port->priv,
+ MVPP2_AGGR_TXQ_STATUS_REG(thread));
aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
@@ -1655,16 +1685,17 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
* only by mvpp2_tx(), so migration is disabled, using
* smp_processor_id() is OK.
*/
-static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
+static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq, int num)
{
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ struct mvpp2 *priv = port->priv;
u32 val;
- int cpu = smp_processor_id();
val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
- mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
+ mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
- val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
+ val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
return val & MVPP2_TXQ_RSVD_RSLT_MASK;
}
@@ -1672,12 +1703,13 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
/* Check if there are enough reserved descriptors for transmission.
* If not, request chunk of reserved descriptors and check again.
*/
-static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
+static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq,
struct mvpp2_txq_pcpu *txq_pcpu,
int num)
{
- int req, cpu, desc_count;
+ int req, desc_count;
+ unsigned int thread;
if (txq_pcpu->reserved_num >= num)
return 0;
@@ -1688,10 +1720,10 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
desc_count = 0;
/* Compute total of used descriptors */
- for_each_present_cpu(cpu) {
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
struct mvpp2_txq_pcpu *txq_pcpu_aux;
- txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
+ txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
desc_count += txq_pcpu_aux->count;
desc_count += txq_pcpu_aux->reserved_num;
}
@@ -1700,10 +1732,10 @@ static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
desc_count += req;
if (desc_count >
- (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
+ (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
return -ENOMEM;
- txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
+ txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
/* OK, the descriptor could have been updated: check again. */
if (txq_pcpu->reserved_num < num)
@@ -1723,7 +1755,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
}
/* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
@@ -1757,7 +1789,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
/* Get number of sent descriptors and decrement counter.
* The number of sent descriptors is returned.
- * Per-CPU access
+ * Per-thread access
*
* Called only from mvpp2_txq_done(), called from mvpp2_tx()
* (migration disabled) and from the TX completion tasklet (migration
@@ -1769,7 +1801,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
u32 val;
/* Reading status reg resets transmitted descriptor counter */
- val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(),
+ val = mvpp2_thread_read_relaxed(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_TXQ_SENT_REG(txq->id));
return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
@@ -1784,10 +1817,15 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
struct mvpp2_port *port = arg;
int queue;
+ /* If the thread isn't used, don't do anything */
+ if (smp_processor_id() > port->priv->nthreads)
+ return;
+
for (queue = 0; queue < port->ntxqs; queue++) {
int id = port->txqs[queue]->id;
- mvpp2_percpu_read(port->priv, smp_processor_id(),
+ mvpp2_thread_read(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_TXQ_SENT_REG(id));
}
}
@@ -1847,13 +1885,13 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
- int cpu = get_cpu();
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
rxq->pkts_coal);
put_cpu();
@@ -1863,15 +1901,15 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
- int cpu = get_cpu();
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
put_cpu();
}
@@ -1972,7 +2010,7 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
int tx_done;
- if (txq_pcpu->cpu != smp_processor_id())
+ if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
tx_done = mvpp2_txq_sent_desc_proc(port, txq);
@@ -1988,7 +2026,7 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
}
static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
- int cpu)
+ unsigned int thread)
{
struct mvpp2_tx_queue *txq;
struct mvpp2_txq_pcpu *txq_pcpu;
@@ -1999,7 +2037,7 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
if (!txq)
break;
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
if (txq_pcpu->count) {
mvpp2_txq_done(port, txq, txq_pcpu);
@@ -2015,8 +2053,8 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
/* Allocate and initialize descriptors for aggr TXQ */
static int mvpp2_aggr_txq_init(struct platform_device *pdev,
- struct mvpp2_tx_queue *aggr_txq, int cpu,
- struct mvpp2 *priv)
+ struct mvpp2_tx_queue *aggr_txq,
+ unsigned int thread, struct mvpp2 *priv)
{
u32 txq_dma;
@@ -2031,7 +2069,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
/* Aggr TXQ no reset WA */
aggr_txq->next_desc_to_proc = mvpp2_read(priv,
- MVPP2_AGGR_TXQ_INDEX_REG(cpu));
+ MVPP2_AGGR_TXQ_INDEX_REG(thread));
/* Set Tx descriptors queue starting address indirect
* access
@@ -2042,8 +2080,8 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
txq_dma = aggr_txq->descs_dma >>
MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
+ mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
MVPP2_AGGR_TXQ_SIZE);
return 0;
@@ -2054,8 +2092,8 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
+ unsigned int thread;
u32 rxq_dma;
- int cpu;
rxq->size = port->rx_ring_size;
@@ -2072,15 +2110,15 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
/* Set Rx descriptors queue starting address - indirect access */
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
if (port->priv->hw_version == MVPP21)
rxq_dma = rxq->descs_dma;
else
rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
put_cpu();
/* Set Offset */
@@ -2125,7 +2163,7 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
static void mvpp2_rxq_deinit(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
- int cpu;
+ unsigned int thread;
mvpp2_rxq_drop_pkts(port, rxq);
@@ -2144,10 +2182,10 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port,
* free descriptor number
*/
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
+ mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
put_cpu();
}
@@ -2156,7 +2194,8 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
u32 val;
- int cpu, desc, desc_per_txq, tx_port_num;
+ unsigned int thread;
+ int desc, desc_per_txq, tx_port_num;
struct mvpp2_txq_pcpu *txq_pcpu;
txq->size = port->tx_ring_size;
@@ -2171,18 +2210,18 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
txq->descs_dma);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
- val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
+ val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
val &= ~MVPP2_TXQ_PENDING_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
/* Calculate base address in prefetch buffer. We reserve 16 descriptors
* for each existing TXQ.
@@ -2193,7 +2232,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
(txq->log_id * desc_per_txq);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
put_cpu();
@@ -2212,8 +2251,8 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
val);
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
txq_pcpu->size = txq->size;
txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
sizeof(*txq_pcpu->buffs),
@@ -2247,10 +2286,10 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
struct mvpp2_txq_pcpu *txq_pcpu;
- int cpu;
+ unsigned int thread;
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
kfree(txq_pcpu->buffs);
if (txq_pcpu->tso_headers)
@@ -2276,10 +2315,10 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
put_cpu();
}
@@ -2287,14 +2326,14 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
{
struct mvpp2_txq_pcpu *txq_pcpu;
- int delay, pending, cpu;
+ int delay, pending;
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
u32 val;
- cpu = get_cpu();
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
- val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
val |= MVPP2_TXQ_DRAIN_EN_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
/* The napi queue has been stopped so wait for all packets
* to be transmitted.
@@ -2310,17 +2349,17 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
mdelay(1);
delay++;
- pending = mvpp2_percpu_read(port->priv, cpu,
+ pending = mvpp2_thread_read(port->priv, thread,
MVPP2_TXQ_PENDING_REG);
pending &= MVPP2_TXQ_PENDING_MASK;
} while (pending);
val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
put_cpu();
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
/* Release all packets */
mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
@@ -2387,13 +2426,17 @@ err_cleanup:
static int mvpp2_setup_txqs(struct mvpp2_port *port)
{
struct mvpp2_tx_queue *txq;
- int queue, err;
+ int queue, err, cpu;
for (queue = 0; queue < port->ntxqs; queue++) {
txq = port->txqs[queue];
err = mvpp2_txq_init(port, txq);
if (err)
goto err_cleanup;
+
+ /* Assign this queue to a CPU */
+ cpu = queue % num_present_cpus();
+ netif_set_xps_queue(port->dev, cpumask_of(cpu), queue);
}
if (port->has_tx_irqs) {
@@ -2501,16 +2544,20 @@ static void mvpp2_tx_proc_cb(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct mvpp2_port *port = netdev_priv(dev);
- struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ struct mvpp2_port_pcpu *port_pcpu;
unsigned int tx_todo, cause;
+ port_pcpu = per_cpu_ptr(port->pcpu,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
+
if (!netif_running(dev))
return;
port_pcpu->timer_scheduled = false;
/* Process all the Tx queues */
cause = (1 << port->ntxqs) - 1;
- tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
+ tx_todo = mvpp2_tx_done(port, cause,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
/* Set the timer in case not all the packets were processed */
if (tx_todo)
@@ -2598,14 +2645,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
u8 l4_proto;
+ __be16 l3_proto = vlan_get_protocol(skb);
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers */
@@ -2617,7 +2665,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
}
return mvpp2_txq_desc_csum(skb_network_offset(skb),
- skb->protocol, ip_hdr_len, l4_proto);
+ l3_proto, ip_hdr_len, l4_proto);
}
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
@@ -2726,7 +2774,8 @@ static inline void
tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
struct mvpp2_tx_desc *desc)
{
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
dma_addr_t buf_dma_addr =
mvpp2_txdesc_dma_addr_get(port, desc);
@@ -2743,7 +2792,8 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
struct mvpp2_tx_queue *aggr_txq,
struct mvpp2_tx_queue *txq)
{
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
struct mvpp2_tx_desc *tx_desc;
int i;
dma_addr_t buf_dma_addr;
@@ -2862,9 +2912,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
int i, len, descs = 0;
/* Check number of available descriptors */
- if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
- tso_count_descs(skb)) ||
- mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
+ if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
+ mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
tso_count_descs(skb)))
return 0;
@@ -2904,21 +2953,28 @@ release:
}
/* Main tx processing */
-static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_tx_queue *txq, *aggr_txq;
struct mvpp2_txq_pcpu *txq_pcpu;
struct mvpp2_tx_desc *tx_desc;
dma_addr_t buf_dma_addr;
+ unsigned long flags = 0;
+ unsigned int thread;
int frags = 0;
u16 txq_id;
u32 tx_cmd;
+ thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+
txq_id = skb_get_queue_mapping(skb);
txq = port->txqs[txq_id];
- txq_pcpu = this_cpu_ptr(txq->pcpu);
- aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+ aggr_txq = &port->priv->aggr_txqs[thread];
+
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_lock_irqsave(&port->tx_lock[thread], flags);
if (skb_is_gso(skb)) {
frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
@@ -2927,9 +2983,8 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
frags = skb_shinfo(skb)->nr_frags + 1;
/* Check number of available descriptors */
- if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
- mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
- txq_pcpu, frags)) {
+ if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
+ mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
frags = 0;
goto out;
}
@@ -2971,7 +3026,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
- struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+ struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
txq_pcpu->reserved_num -= frags;
@@ -3001,11 +3056,14 @@ out:
/* Set the timer in case not all frags were processed */
if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
txq_pcpu->count > 0) {
- struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
mvpp2_timer_set(port_pcpu);
}
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_unlock_irqrestore(&port->tx_lock[thread], flags);
+
return NETDEV_TX_OK;
}
@@ -3025,7 +3083,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
int rx_done = 0;
struct mvpp2_port *port = netdev_priv(napi->dev);
struct mvpp2_queue_vector *qv;
- int cpu = smp_processor_id();
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
qv = container_of(napi, struct mvpp2_queue_vector, napi);
@@ -3039,7 +3097,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
*
* Each CPU has its own Rx/Tx cause register
*/
- cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id,
+ cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
@@ -3048,19 +3106,22 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
/* Clear the cause register */
mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
- mvpp2_percpu_write(port->priv, cpu,
+ mvpp2_thread_write(port->priv, thread,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- if (cause_tx) {
- cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
- mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+ if (port->has_tx_irqs) {
+ cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+ if (cause_tx) {
+ cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
+ mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+ }
}
/* Process RX packets */
- cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+ cause_rx = cause_rx_tx &
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
cause_rx <<= qv->first_rxq;
cause_rx |= qv->pending_cause_rx;
while (cause_rx && budget > 0) {
@@ -3135,7 +3196,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
for (i = 0; i < port->nqvecs; i++)
napi_enable(&port->qvecs[i].napi);
- /* Enable interrupts on all CPUs */
+ /* Enable interrupts on all threads */
mvpp2_interrupts_enable(port);
if (port->priv->hw_version == MVPP22)
@@ -3150,9 +3211,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
*/
struct phylink_link_state state = {
.interface = port->phy_interface,
- .link = 1,
};
mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
+ mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
+ NULL);
}
netif_tx_start_all_queues(port->dev);
@@ -3163,7 +3225,7 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
{
int i;
- /* Disable interrupts on all CPUs */
+ /* Disable interrupts on all threads */
mvpp2_interrupts_disable(port);
for (i = 0; i < port->nqvecs; i++)
@@ -3243,9 +3305,18 @@ static int mvpp2_irqs_init(struct mvpp2_port *port)
if (err)
goto err;
- if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
- irq_set_affinity_hint(qv->irq,
- cpumask_of(qv->sw_thread_id));
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
+ unsigned long mask = 0;
+ unsigned int cpu;
+
+ for_each_present_cpu(cpu) {
+ if (mvpp2_cpu_to_thread(port->priv, cpu) ==
+ qv->sw_thread_id)
+ mask |= BIT(cpu);
+ }
+
+ irq_set_affinity_hint(qv->irq, to_cpumask(&mask));
+ }
}
return 0;
@@ -3389,11 +3460,11 @@ static int mvpp2_stop(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_port_pcpu *port_pcpu;
- int cpu;
+ unsigned int thread;
mvpp2_stop_dev(port);
- /* Mask interrupts on all CPUs */
+ /* Mask interrupts on all threads */
on_each_cpu(mvpp2_interrupts_mask, port, 1);
mvpp2_shared_interrupt_mask_unmask(port, true);
@@ -3404,8 +3475,8 @@ static int mvpp2_stop(struct net_device *dev)
mvpp2_irqs_deinit(port);
if (!port->has_tx_irqs) {
- for_each_present_cpu(cpu) {
- port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
+ port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_cancel(&port_pcpu->tx_done_timer);
port_pcpu->timer_scheduled = false;
@@ -3550,7 +3621,7 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mvpp2_port *port = netdev_priv(dev);
unsigned int start;
- int cpu;
+ unsigned int cpu;
for_each_possible_cpu(cpu) {
struct mvpp2_pcpu_stats *cpu_stats;
@@ -3977,12 +4048,18 @@ static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
struct device_node *port_node)
{
+ struct mvpp2 *priv = port->priv;
struct mvpp2_queue_vector *v;
int i, ret;
- port->nqvecs = num_possible_cpus();
- if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
- port->nqvecs += 1;
+ switch (queue_mode) {
+ case MVPP2_QDIST_SINGLE_MODE:
+ port->nqvecs = priv->nthreads + 1;
+ break;
+ case MVPP2_QDIST_MULTI_MODE:
+ port->nqvecs = priv->nthreads;
+ break;
+ }
for (i = 0; i < port->nqvecs; i++) {
char irqname[16];
@@ -3994,7 +4071,10 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
v->sw_thread_id = i;
v->sw_thread_mask = BIT(i);
- snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
+ if (port->flags & MVPP2_F_DT_COMPAT)
+ snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
+ else
+ snprintf(irqname, sizeof(irqname), "hif%d", i);
if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
v->first_rxq = i * MVPP2_DEFAULT_RXQ;
@@ -4004,7 +4084,9 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
v->first_rxq = 0;
v->nrxqs = port->nrxqs;
v->type = MVPP2_QUEUE_VECTOR_SHARED;
- strncpy(irqname, "rx-shared", sizeof(irqname));
+
+ if (port->flags & MVPP2_F_DT_COMPAT)
+ strncpy(irqname, "rx-shared", sizeof(irqname));
}
if (port_node)
@@ -4081,7 +4163,8 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct device *dev = port->dev->dev.parent;
struct mvpp2 *priv = port->priv;
struct mvpp2_txq_pcpu *txq_pcpu;
- int queue, cpu, err;
+ unsigned int thread;
+ int queue, err;
/* Checks for hardware constraints */
if (port->first_rxq + port->nrxqs >
@@ -4125,9 +4208,9 @@ static int mvpp2_port_init(struct mvpp2_port *port)
txq->id = queue_phy_id;
txq->log_id = queue;
txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- txq_pcpu->cpu = cpu;
+ for (thread = 0; thread < priv->nthreads; thread++) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+ txq_pcpu->thread = thread;
}
port->txqs[queue] = txq;
@@ -4200,24 +4283,51 @@ err_free_percpu:
return err;
}
-/* Checks if the port DT description has the TX interrupts
- * described. On PPv2.1, there are no such interrupts. On PPv2.2,
- * there are available, but we need to keep support for old DTs.
+static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
+ unsigned long *flags)
+{
+ char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
+ "tx-cpu3" };
+ int i;
+
+ for (i = 0; i < 5; i++)
+ if (of_property_match_string(port_node, "interrupt-names",
+ irqs[i]) < 0)
+ return false;
+
+ *flags |= MVPP2_F_DT_COMPAT;
+ return true;
+}
+
+/* Checks if the port dt description has the required Tx interrupts:
+ * - PPv2.1: there are no such interrupts.
+ * - PPv2.2:
+ * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
+ * - The new ones have: "hifX" with X in [0..8]
+ *
+ * All those variants are supported to keep the backward compatibility.
*/
-static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
- struct device_node *port_node)
+static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
+ struct device_node *port_node,
+ unsigned long *flags)
{
- char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
- "tx-cpu2", "tx-cpu3" };
- int ret, i;
+ char name[5];
+ int i;
+
+ /* ACPI */
+ if (!port_node)
+ return true;
if (priv->hw_version == MVPP21)
return false;
- for (i = 0; i < 5; i++) {
- ret = of_property_match_string(port_node, "interrupt-names",
- irqs[i]);
- if (ret < 0)
+ if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
+ return true;
+
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
+ snprintf(name, 5, "hif%d", i);
+ if (of_property_match_string(port_node, "interrupt-names",
+ name) < 0)
return false;
}
@@ -4495,10 +4605,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
return;
}
- netif_tx_stop_all_queues(port->dev);
- if (!port->has_phy)
- netif_carrier_off(port->dev);
-
/* Make sure the port is disabled when reconfiguring the mode */
mvpp2_port_disable(port);
@@ -4523,16 +4629,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
mvpp2_port_loopback_set(port, state);
- /* If the port already was up, make sure it's still in the same state */
- if (state->link || !port->has_phy) {
- mvpp2_port_enable(port);
-
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
- if (!port->has_phy)
- netif_carrier_on(dev);
- netif_tx_wake_all_queues(dev);
- }
+ mvpp2_port_enable(port);
}
static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
@@ -4607,23 +4704,21 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct resource *res;
struct phylink *phylink;
char *mac_from = "";
- unsigned int ntxqs, nrxqs;
+ unsigned int ntxqs, nrxqs, thread;
+ unsigned long flags = 0;
bool has_tx_irqs;
u32 id;
int features;
int phy_mode;
- int err, i, cpu;
+ int err, i;
- if (port_node) {
- has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
- } else {
- has_tx_irqs = true;
- queue_mode = MVPP2_QDIST_MULTI_MODE;
+ has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
+ if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
+ dev_err(&pdev->dev,
+ "not enough IRQs to support multi queue mode\n");
+ return -EINVAL;
}
- if (!has_tx_irqs)
- queue_mode = MVPP2_QDIST_SINGLE_MODE;
-
ntxqs = MVPP2_MAX_TXQ;
if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
@@ -4671,6 +4766,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->nrxqs = nrxqs;
port->priv = priv;
port->has_tx_irqs = has_tx_irqs;
+ port->flags = flags;
err = mvpp2_queue_vectors_init(port, port_node);
if (err)
@@ -4767,8 +4863,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
if (!port->has_tx_irqs) {
- for_each_present_cpu(cpu) {
- port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+ for (thread = 0; thread < priv->nthreads; thread++) {
+ port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
@@ -5052,13 +5148,13 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
}
/* Allocate and initialize aggregated TXQs */
- priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
+ priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
sizeof(*priv->aggr_txqs),
GFP_KERNEL);
if (!priv->aggr_txqs)
return -ENOMEM;
- for_each_present_cpu(i) {
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
priv->aggr_txqs[i].id = i;
priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
@@ -5105,7 +5201,7 @@ static int mvpp2_probe(struct platform_device *pdev)
struct mvpp2 *priv;
struct resource *res;
void __iomem *base;
- int i;
+ int i, shared;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -5170,6 +5266,15 @@ static int mvpp2_probe(struct platform_device *pdev)
mvpp2_setup_bm_pool();
+
+ priv->nthreads = min_t(unsigned int, num_present_cpus(),
+ MVPP2_MAX_THREADS);
+
+ shared = num_present_cpus() - priv->nthreads;
+ if (shared > 0)
+ bitmap_fill(&priv->lock_map,
+ min_t(int, shared, MVPP2_MAX_THREADS));
+
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
u32 addr_space_sz;
@@ -5344,7 +5449,7 @@ static int mvpp2_remove(struct platform_device *pdev)
mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
}
- for_each_present_cpu(i) {
+ for (i = 0; i < MVPP2_MAX_THREADS; i++) {
struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
dma_free_coherent(&pdev->dev,
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
new file mode 100644
index 000000000000..35827bdf1878
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -0,0 +1,17 @@
+#
+# Marvell OcteonTX2 drivers configuration
+#
+
+config OCTEONTX2_MBOX
+ tristate
+
+config OCTEONTX2_AF
+ tristate "Marvell OcteonTX2 RVU Admin Function driver"
+ select OCTEONTX2_MBOX
+ depends on (64BIT && COMPILE_TEST) || ARM64
+ depends on PCI
+ help
+ This driver supports Marvell's OcteonTX2 Resource Virtualization
+ Unit's admin function manager which manages all RVU HW resources
+ and provides a medium to other PF/VFs to configure HW. Should be
+ enabled for other RVU device drivers to work.
diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile
new file mode 100644
index 000000000000..e579dcd54c97
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell OcteonTX2 device drivers.
+#
+
+obj-$(CONFIG_OCTEONTX2_AF) += af/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
new file mode 100644
index 000000000000..06329acf9c2c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 RVU Admin Function driver
+#
+
+obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
+obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
+
+octeontx2_mbox-y := mbox.o
+octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
+ rvu_reg.o rvu_npc.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
new file mode 100644
index 000000000000..12db256c8c9f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "cgx.h"
+
+#define DRV_NAME "octeontx2-cgx"
+#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
+
+/**
+ * struct lmac
+ * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
+ * @cmd_lock: Lock to serialize the command interface
+ * @resp: command response
+ * @link_info: link related information
+ * @event_cb: callback for linkchange events
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
+ * @cgx: parent cgx port
+ * @lmac_id: lmac port id
+ * @name: lmac port name
+ */
+struct lmac {
+ wait_queue_head_t wq_cmd_cmplt;
+ struct mutex cmd_lock;
+ u64 resp;
+ struct cgx_link_user_info link_info;
+ struct cgx_event_cb event_cb;
+ bool cmd_pend;
+ struct cgx *cgx;
+ u8 lmac_id;
+ char *name;
+};
+
+struct cgx {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ u8 cgx_id;
+ u8 lmac_count;
+ struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ struct list_head cgx_list;
+};
+
+static LIST_HEAD(cgx_list);
+
+/* Convert firmware speed encoding to user format(Mbps) */
+static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
+
+/* Convert firmware lmac type encoding to string */
+static char *cgx_lmactype_string[LMAC_MODE_MAX];
+
+/* Supported devices */
+static const struct pci_device_id cgx_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
+ { 0, } /* end of table */
+};
+
+MODULE_DEVICE_TABLE(pci, cgx_id_table);
+
+static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+{
+ writeq(val, cgx->reg_base + (lmac << 18) + offset);
+}
+
+static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+{
+ return readq(cgx->reg_base + (lmac << 18) + offset);
+}
+
+static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
+{
+ if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
+ return NULL;
+
+ return cgx->lmac_idmap[lmac_id];
+}
+
+int cgx_get_cgx_cnt(void)
+{
+ struct cgx *cgx_dev;
+ int count = 0;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
+ count++;
+
+ return count;
+}
+EXPORT_SYMBOL(cgx_get_cgx_cnt);
+
+int cgx_get_lmac_cnt(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ return cgx->lmac_count;
+}
+EXPORT_SYMBOL(cgx_get_lmac_cnt);
+
+void *cgx_get_pdata(int cgx_id)
+{
+ struct cgx *cgx_dev;
+
+ list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
+ if (cgx_dev->cgx_id == cgx_id)
+ return cgx_dev;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(cgx_get_pdata);
+
+/* Ensure the required lock for event queue(where asynchronous events are
+ * posted) is acquired before calling this API. Else an asynchronous event(with
+ * latest link status) can reach the destination before this function returns
+ * and could make the link status appear wrong.
+ */
+int cgx_get_link_info(void *cgxd, int lmac_id,
+ struct cgx_link_user_info *linfo)
+{
+ struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
+
+ if (!lmac)
+ return -ENODEV;
+
+ *linfo = lmac->link_info;
+ return 0;
+}
+EXPORT_SYMBOL(cgx_get_link_info);
+
+static u64 mac2u64 (u8 *mac_addr)
+{
+ u64 mac = 0;
+ int index;
+
+ for (index = ETH_ALEN - 1; index >= 0; index--)
+ mac |= ((u64)*mac_addr++) << (8 * index);
+ return mac;
+}
+
+int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ u64 cfg;
+
+ /* copy 6bytes from macaddr */
+ /* memcpy(&cfg, mac_addr, 6); */
+
+ cfg = mac2u64 (mac_addr);
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
+ cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_set);
+
+u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ u64 cfg;
+
+ cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
+ return cfg & CGX_RX_DMAC_ADR_MASK;
+}
+EXPORT_SYMBOL(cgx_lmac_addr_get);
+
+int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
+ return 0;
+}
+EXPORT_SYMBOL(cgx_set_pkind);
+
+static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
+{
+ u64 cfg;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
+}
+
+/* Configure CGX LMAC in internal loopback mode */
+int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u8 lmac_type;
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ lmac_type = cgx_get_lmac_type(cgx, lmac_id);
+ if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
+ if (enable)
+ cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
+ else
+ cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
+ if (enable)
+ cfg |= CGXX_SPUX_CONTROL1_LBK;
+ else
+ cfg &= ~CGXX_SPUX_CONTROL1_LBK;
+ cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_internal_loopback);
+
+void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgx_get_pdata(cgx_id);
+ u64 cfg = 0;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ /* Enable promiscuous mode on LMAC */
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
+ cfg |= CGX_DMAC_BCAST_MODE;
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
+ cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ } else {
+ /* Disable promiscuous mode */
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ }
+}
+EXPORT_SYMBOL(cgx_lmac_promisc_config);
+
+int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+ *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
+ return 0;
+}
+EXPORT_SYMBOL(cgx_get_rx_stats);
+
+int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+ *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
+ return 0;
+}
+EXPORT_SYMBOL(cgx_get_tx_stats);
+
+int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
+ if (enable)
+ cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+ else
+ cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+ cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
+
+/* CGX Firmware interface low level support */
+static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
+{
+ struct cgx *cgx = lmac->cgx;
+ struct device *dev;
+ int err = 0;
+ u64 cmd;
+
+ /* Ensure no other command is in progress */
+ err = mutex_lock_interruptible(&lmac->cmd_lock);
+ if (err)
+ return err;
+
+ /* Ensure command register is free */
+ cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG);
+ if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ /* Update ownership in command request */
+ req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
+
+ /* Mark this lmac as pending, before we start */
+ lmac->cmd_pend = true;
+
+ /* Start command in hardware */
+ cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
+
+ /* Ensure command is completed without errors */
+ if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
+ msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
+ dev = &cgx->pdev->dev;
+ dev_err(dev, "cgx port %d:%d cmd timeout\n",
+ cgx->cgx_id, lmac->lmac_id);
+ err = -EIO;
+ goto unlock;
+ }
+
+ /* we have a valid command response */
+ smp_rmb(); /* Ensure the latest updates are visible */
+ *resp = lmac->resp;
+
+unlock:
+ mutex_unlock(&lmac->cmd_lock);
+
+ return err;
+}
+
+static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
+ struct cgx *cgx, int lmac_id)
+{
+ struct lmac *lmac;
+ int err;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ err = cgx_fwi_cmd_send(req, resp, lmac);
+
+ /* Check for valid response */
+ if (!err) {
+ if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
+ return -EIO;
+ else
+ return 0;
+ }
+
+ return err;
+}
+
+static inline void cgx_link_usertable_init(void)
+{
+ cgx_speed_mbps[CGX_LINK_NONE] = 0;
+ cgx_speed_mbps[CGX_LINK_10M] = 10;
+ cgx_speed_mbps[CGX_LINK_100M] = 100;
+ cgx_speed_mbps[CGX_LINK_1G] = 1000;
+ cgx_speed_mbps[CGX_LINK_2HG] = 2500;
+ cgx_speed_mbps[CGX_LINK_5G] = 5000;
+ cgx_speed_mbps[CGX_LINK_10G] = 10000;
+ cgx_speed_mbps[CGX_LINK_20G] = 20000;
+ cgx_speed_mbps[CGX_LINK_25G] = 25000;
+ cgx_speed_mbps[CGX_LINK_40G] = 40000;
+ cgx_speed_mbps[CGX_LINK_50G] = 50000;
+ cgx_speed_mbps[CGX_LINK_100G] = 100000;
+
+ cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
+ cgx_lmactype_string[LMAC_MODE_XAUI] = "XAUI";
+ cgx_lmactype_string[LMAC_MODE_RXAUI] = "RXAUI";
+ cgx_lmactype_string[LMAC_MODE_10G_R] = "10G_R";
+ cgx_lmactype_string[LMAC_MODE_40G_R] = "40G_R";
+ cgx_lmactype_string[LMAC_MODE_QSGMII] = "QSGMII";
+ cgx_lmactype_string[LMAC_MODE_25G_R] = "25G_R";
+ cgx_lmactype_string[LMAC_MODE_50G_R] = "50G_R";
+ cgx_lmactype_string[LMAC_MODE_100G_R] = "100G_R";
+ cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
+}
+
+static inline void link_status_user_format(u64 lstat,
+ struct cgx_link_user_info *linfo,
+ struct cgx *cgx, u8 lmac_id)
+{
+ char *lmac_string;
+
+ linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
+ linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
+ linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
+ linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
+ lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
+ strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
+}
+
+/* Hardware event handlers */
+static inline void cgx_link_change_handler(u64 lstat,
+ struct lmac *lmac)
+{
+ struct cgx_link_user_info *linfo;
+ struct cgx *cgx = lmac->cgx;
+ struct cgx_link_event event;
+ struct device *dev;
+ int err_type;
+
+ dev = &cgx->pdev->dev;
+
+ link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
+ err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
+
+ event.cgx_id = cgx->cgx_id;
+ event.lmac_id = lmac->lmac_id;
+
+ /* update the local copy of link status */
+ lmac->link_info = event.link_uinfo;
+ linfo = &lmac->link_info;
+
+ if (!lmac->event_cb.notify_link_chg) {
+ dev_dbg(dev, "cgx port %d:%d Link change handler null",
+ cgx->cgx_id, lmac->lmac_id);
+ if (err_type != CGX_ERR_NONE) {
+ dev_err(dev, "cgx port %d:%d Link error %d\n",
+ cgx->cgx_id, lmac->lmac_id, err_type);
+ }
+ dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
+ cgx->cgx_id, lmac->lmac_id,
+ linfo->link_up ? "UP" : "DOWN", linfo->speed);
+ return;
+ }
+
+ if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
+ dev_err(dev, "event notification failure\n");
+}
+
+static inline bool cgx_cmdresp_is_linkevent(u64 event)
+{
+ u8 id;
+
+ id = FIELD_GET(EVTREG_ID, event);
+ if (id == CGX_CMD_LINK_BRING_UP ||
+ id == CGX_CMD_LINK_BRING_DOWN)
+ return true;
+ else
+ return false;
+}
+
+static inline bool cgx_event_is_linkevent(u64 event)
+{
+ if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
+ return true;
+ else
+ return false;
+}
+
+static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
+{
+ struct lmac *lmac = data;
+ struct cgx *cgx;
+ u64 event;
+
+ cgx = lmac->cgx;
+
+ event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
+
+ if (!FIELD_GET(EVTREG_ACK, event))
+ return IRQ_NONE;
+
+ switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
+ case CGX_EVT_CMD_RESP:
+ /* Copy the response. Since only one command is active at a
+ * time, there is no way a response can get overwritten
+ */
+ lmac->resp = event;
+ /* Ensure response is updated before thread context starts */
+ smp_wmb();
+
+ /* There wont be separate events for link change initiated from
+ * software; Hence report the command responses as events
+ */
+ if (cgx_cmdresp_is_linkevent(event))
+ cgx_link_change_handler(event, lmac);
+
+ /* Release thread waiting for completion */
+ lmac->cmd_pend = false;
+ wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ break;
+ case CGX_EVT_ASYNC:
+ if (cgx_event_is_linkevent(event))
+ cgx_link_change_handler(event, lmac);
+ break;
+ }
+
+ /* Any new event or command response will be posted by firmware
+ * only after the current status is acked.
+ * Ack the interrupt register as well.
+ */
+ cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
+ cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
+
+ return IRQ_HANDLED;
+}
+
+/* APIs for PHY management using CGX firmware interface */
+
+/* callback registration for hardware events like link change */
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ lmac->event_cb = *cb;
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_evh_register);
+
+static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
+{
+ u64 req = 0;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
+ return cgx_fwi_cmd_generic(req, resp, cgx, 0);
+}
+
+static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
+{
+ struct device *dev = &cgx->pdev->dev;
+ int major_ver, minor_ver;
+ u64 resp;
+ int err;
+
+ if (!cgx->lmac_count)
+ return 0;
+
+ err = cgx_fwi_read_version(&resp, cgx);
+ if (err)
+ return err;
+
+ major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
+ minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
+ dev_dbg(dev, "Firmware command interface version = %d.%d\n",
+ major_ver, minor_ver);
+ if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
+ minor_ver != CGX_FIRMWARE_MINOR_VER)
+ return -EIO;
+ else
+ return 0;
+}
+
+static int cgx_lmac_init(struct cgx *cgx)
+{
+ struct lmac *lmac;
+ int i, err;
+
+ cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
+ if (cgx->lmac_count > MAX_LMAC_PER_CGX)
+ cgx->lmac_count = MAX_LMAC_PER_CGX;
+
+ for (i = 0; i < cgx->lmac_count; i++) {
+ lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
+ if (!lmac)
+ return -ENOMEM;
+ lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
+ if (!lmac->name)
+ return -ENOMEM;
+ sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
+ lmac->lmac_id = i;
+ lmac->cgx = cgx;
+ init_waitqueue_head(&lmac->wq_cmd_cmplt);
+ mutex_init(&lmac->cmd_lock);
+ err = request_irq(pci_irq_vector(cgx->pdev,
+ CGX_LMAC_FWI + i * 9),
+ cgx_fwi_event_handler, 0, lmac->name, lmac);
+ if (err)
+ return err;
+
+ /* Enable interrupt */
+ cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
+ FW_CGX_INT);
+
+ /* Add reference */
+ cgx->lmac_idmap[i] = lmac;
+ }
+
+ return cgx_lmac_verify_fwi_version(cgx);
+}
+
+static int cgx_lmac_exit(struct cgx *cgx)
+{
+ struct lmac *lmac;
+ int i;
+
+ /* Free all lmac related resources */
+ for (i = 0; i < cgx->lmac_count; i++) {
+ lmac = cgx->lmac_idmap[i];
+ if (!lmac)
+ continue;
+ free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
+ kfree(lmac->name);
+ kfree(lmac);
+ }
+
+ return 0;
+}
+
+static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct cgx *cgx;
+ int err, nvec;
+
+ cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
+ if (!cgx)
+ return -ENOMEM;
+ cgx->pdev = pdev;
+
+ pci_set_drvdata(pdev, cgx);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!cgx->reg_base) {
+ dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ nvec = CGX_NVEC;
+ err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (err < 0 || err != nvec) {
+ dev_err(dev, "Request for %d msix vectors failed, err %d\n",
+ nvec, err);
+ goto err_release_regions;
+ }
+
+ list_add(&cgx->cgx_list, &cgx_list);
+ cgx->cgx_id = cgx_get_cgx_cnt() - 1;
+
+ cgx_link_usertable_init();
+
+ err = cgx_lmac_init(cgx);
+ if (err)
+ goto err_release_lmac;
+
+ return 0;
+
+err_release_lmac:
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void cgx_remove(struct pci_dev *pdev)
+{
+ struct cgx *cgx = pci_get_drvdata(pdev);
+
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver cgx_driver = {
+ .name = DRV_NAME,
+ .id_table = cgx_id_table,
+ .probe = cgx_probe,
+ .remove = cgx_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
new file mode 100644
index 000000000000..0a66d2717442
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CGX_H
+#define CGX_H
+
+#include "mbox.h"
+#include "cgx_fw_if.h"
+
+ /* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_CGX 0xA059
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 0
+
+#define MAX_CGX 3
+#define MAX_LMAC_PER_CGX 4
+#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
+
+/* Registers */
+#define CGXX_CMRX_CFG 0x00
+#define CMR_EN BIT_ULL(55)
+#define DATA_PKT_TX_EN BIT_ULL(53)
+#define DATA_PKT_RX_EN BIT_ULL(54)
+#define CGX_LMAC_TYPE_SHIFT 40
+#define CGX_LMAC_TYPE_MASK 0xF
+#define CGXX_CMRX_INT 0x040
+#define FW_CGX_INT BIT_ULL(1)
+#define CGXX_CMRX_INT_ENA_W1S 0x058
+#define CGXX_CMRX_RX_ID_MAP 0x060
+#define CGXX_CMRX_RX_STAT0 0x070
+#define CGXX_CMRX_RX_LMACS 0x128
+#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
+#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
+#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
+#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
+#define CGXX_CMRX_RX_DMAC_CAM0 0x200
+#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGXX_CMRX_RX_DMAC_CAM1 0x400
+#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
+#define CGXX_CMRX_TX_STAT0 0x700
+#define CGXX_SCRATCH0_REG 0x1050
+#define CGXX_SCRATCH1_REG 0x1058
+#define CGX_CONST 0x2000
+#define CGXX_SPUX_CONTROL1 0x10000
+#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
+#define CGXX_GMP_PCS_MRX_CTL 0x30000
+#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
+
+#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
+#define CGX_EVENT_REG CGXX_SCRATCH0_REG
+#define CGX_CMD_TIMEOUT 2200 /* msecs */
+
+#define CGX_NVEC 37
+#define CGX_LMAC_FWI 0
+
+enum LMAC_TYPE {
+ LMAC_MODE_SGMII = 0,
+ LMAC_MODE_XAUI = 1,
+ LMAC_MODE_RXAUI = 2,
+ LMAC_MODE_10G_R = 3,
+ LMAC_MODE_40G_R = 4,
+ LMAC_MODE_QSGMII = 6,
+ LMAC_MODE_25G_R = 7,
+ LMAC_MODE_50G_R = 8,
+ LMAC_MODE_100G_R = 9,
+ LMAC_MODE_USXGMII = 10,
+ LMAC_MODE_MAX,
+};
+
+struct cgx_link_event {
+ struct cgx_link_user_info link_uinfo;
+ u8 cgx_id;
+ u8 lmac_id;
+};
+
+/**
+ * struct cgx_event_cb
+ * @notify_link_chg: callback for link change notification
+ * @data: data passed to callback function
+ */
+struct cgx_event_cb {
+ int (*notify_link_chg)(struct cgx_link_event *event, void *data);
+ void *data;
+};
+
+extern struct pci_driver cgx_driver;
+
+int cgx_get_cgx_cnt(void);
+int cgx_get_lmac_cnt(void *cgxd);
+void *cgx_get_pdata(int cgx_id);
+int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
+int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
+int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
+int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
+int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
+int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
+int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
+int cgx_get_link_info(void *cgxd, int lmac_id,
+ struct cgx_link_user_info *linfo);
+#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
new file mode 100644
index 000000000000..fa17af3f4ba7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 CGX driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CGX_FW_INTF_H__
+#define __CGX_FW_INTF_H__
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define CGX_FIRMWARE_MAJOR_VER 1
+#define CGX_FIRMWARE_MINOR_VER 0
+
+#define CGX_EVENT_ACK 1UL
+
+/* CGX error types. set for cmd response status as CGX_STAT_FAIL */
+enum cgx_error_type {
+ CGX_ERR_NONE,
+ CGX_ERR_LMAC_NOT_ENABLED,
+ CGX_ERR_LMAC_MODE_INVALID,
+ CGX_ERR_REQUEST_ID_INVALID,
+ CGX_ERR_PREV_ACK_NOT_CLEAR,
+ CGX_ERR_PHY_LINK_DOWN,
+ CGX_ERR_PCS_RESET_FAIL,
+ CGX_ERR_AN_CPT_FAIL,
+ CGX_ERR_TX_NOT_IDLE,
+ CGX_ERR_RX_NOT_IDLE,
+ CGX_ERR_SPUX_BR_BLKLOCK_FAIL,
+ CGX_ERR_SPUX_RX_ALIGN_FAIL,
+ CGX_ERR_SPUX_TX_FAULT,
+ CGX_ERR_SPUX_RX_FAULT,
+ CGX_ERR_SPUX_RESET_FAIL,
+ CGX_ERR_SPUX_AN_RESET_FAIL,
+ CGX_ERR_SPUX_USX_AN_RESET_FAIL,
+ CGX_ERR_SMUX_RX_LINK_NOT_OK,
+ CGX_ERR_PCS_RECV_LINK_FAIL,
+ CGX_ERR_TRAINING_FAIL,
+ CGX_ERR_RX_EQU_FAIL,
+ CGX_ERR_SPUX_BER_FAIL,
+ CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */
+};
+
+/* LINK speed types */
+enum cgx_link_speed {
+ CGX_LINK_NONE,
+ CGX_LINK_10M,
+ CGX_LINK_100M,
+ CGX_LINK_1G,
+ CGX_LINK_2HG,
+ CGX_LINK_5G,
+ CGX_LINK_10G,
+ CGX_LINK_20G,
+ CGX_LINK_25G,
+ CGX_LINK_40G,
+ CGX_LINK_50G,
+ CGX_LINK_100G,
+ CGX_LINK_SPEED_MAX,
+};
+
+/* REQUEST ID types. Input to firmware */
+enum cgx_cmd_id {
+ CGX_CMD_NONE,
+ CGX_CMD_GET_FW_VER,
+ CGX_CMD_GET_MAC_ADDR,
+ CGX_CMD_SET_MTU,
+ CGX_CMD_GET_LINK_STS, /* optional to user */
+ CGX_CMD_LINK_BRING_UP,
+ CGX_CMD_LINK_BRING_DOWN,
+ CGX_CMD_INTERNAL_LBK,
+ CGX_CMD_EXTERNAL_LBK,
+ CGX_CMD_HIGIG,
+ CGX_CMD_LINK_STATE_CHANGE,
+ CGX_CMD_MODE_CHANGE, /* hot plug support */
+ CGX_CMD_INTF_SHUTDOWN,
+ CGX_CMD_IRQ_ENABLE,
+ CGX_CMD_IRQ_DISABLE,
+};
+
+/* async event ids */
+enum cgx_evt_id {
+ CGX_EVT_NONE,
+ CGX_EVT_LINK_CHANGE,
+};
+
+/* event types - cause of interrupt */
+enum cgx_evt_type {
+ CGX_EVT_ASYNC,
+ CGX_EVT_CMD_RESP
+};
+
+enum cgx_stat {
+ CGX_STAT_SUCCESS,
+ CGX_STAT_FAIL
+};
+
+enum cgx_cmd_own {
+ CGX_CMD_OWN_NS,
+ CGX_CMD_OWN_FIRMWARE,
+};
+
+/* m - bit mask
+ * y - value to be written in the bitrange
+ * x - input value whose bitrange to be modified
+ */
+#define FIELD_SET(m, y, x) \
+ (((x) & ~(m)) | \
+ FIELD_PREP((m), (y)))
+
+/* scratchx(0) CSR used for ATF->non-secure SW communication.
+ * This acts as the status register
+ * Provides details on command ack/status, command response, error details
+ */
+#define EVTREG_ACK BIT_ULL(0)
+#define EVTREG_EVT_TYPE BIT_ULL(1)
+#define EVTREG_STAT BIT_ULL(2)
+#define EVTREG_ID GENMASK_ULL(8, 3)
+
+/* Response to command IDs with command status as CGX_STAT_FAIL
+ *
+ * Not applicable for commands :
+ * CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE
+ */
+#define EVTREG_ERRTYPE GENMASK_ULL(18, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_FW_VER with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAJOR_VER GENMASK_ULL(12, 9)
+#define RESP_MINOR_VER GENMASK_ULL(16, 13)
+
+/* Response to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MAC_ADDR GENMASK_ULL(56, 9)
+
+/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
+ * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
+ *
+ * In case of CGX_STAT_FAIL, it indicates CGX configuration failed
+ * when processing link up/down/change command.
+ * Both err_type and current link status will be updated
+ *
+ * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current
+ * link status will be updated
+ */
+struct cgx_lnk_sts {
+ uint64_t reserved1:9;
+ uint64_t link_up:1;
+ uint64_t full_duplex:1;
+ uint64_t speed:4; /* cgx_link_speed */
+ uint64_t err_type:10;
+ uint64_t reserved2:39;
+};
+
+#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9)
+#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10)
+#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11)
+#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15)
+
+/* scratchx(1) CSR used for non-secure SW->ATF communication
+ * This CSR acts as a command register
+ */
+#define CMDREG_OWN BIT_ULL(0)
+#define CMDREG_ID GENMASK_ULL(7, 2)
+
+/* Any command using enable/disable as an argument need
+ * to set this bitfield.
+ * Ex: Loopback, HiGig...
+ */
+#define CMDREG_ENABLE BIT_ULL(8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */
+#define CMDMTU_SIZE GENMASK_ULL(23, 8)
+
+/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */
+#define CMDLINKCHANGE_LINKUP BIT_ULL(8)
+#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9)
+#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10)
+
+#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
new file mode 100644
index 000000000000..d39ada404c8f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef COMMON_H
+#define COMMON_H
+
+#include "rvu_struct.h"
+
+#define OTX2_ALIGN 128 /* Align to cacheline */
+
+#define Q_SIZE_16 0ULL /* 16 entries */
+#define Q_SIZE_64 1ULL /* 64 entries */
+#define Q_SIZE_256 2ULL
+#define Q_SIZE_1K 3ULL
+#define Q_SIZE_4K 4ULL
+#define Q_SIZE_16K 5ULL
+#define Q_SIZE_64K 6ULL
+#define Q_SIZE_256K 7ULL
+#define Q_SIZE_1M 8ULL /* Million entries */
+#define Q_SIZE_MIN Q_SIZE_16
+#define Q_SIZE_MAX Q_SIZE_1M
+
+#define Q_COUNT(x) (16ULL << (2 * x))
+#define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2)
+
+/* Admin queue info */
+
+/* Since we intend to add only one instruction at a time,
+ * keep queue size to it's minimum.
+ */
+#define AQ_SIZE Q_SIZE_16
+/* HW head & tail pointer mask */
+#define AQ_PTR_MASK 0xFFFFF
+
+struct qmem {
+ void *base;
+ dma_addr_t iova;
+ int alloc_sz;
+ u8 entry_sz;
+ u8 align;
+ u32 qsize;
+};
+
+static inline int qmem_alloc(struct device *dev, struct qmem **q,
+ int qsize, int entry_sz)
+{
+ struct qmem *qmem;
+ int aligned_addr;
+
+ if (!qsize)
+ return -EINVAL;
+
+ *q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
+ if (!*q)
+ return -ENOMEM;
+ qmem = *q;
+
+ qmem->entry_sz = entry_sz;
+ qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
+ qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz,
+ &qmem->iova, GFP_KERNEL);
+ if (!qmem->base)
+ return -ENOMEM;
+
+ qmem->qsize = qsize;
+
+ aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
+ qmem->align = (aligned_addr - qmem->iova);
+ qmem->base += qmem->align;
+ qmem->iova += qmem->align;
+ return 0;
+}
+
+static inline void qmem_free(struct device *dev, struct qmem *qmem)
+{
+ if (!qmem)
+ return;
+
+ if (qmem->base)
+ dma_free_coherent(dev, qmem->alloc_sz,
+ qmem->base - qmem->align,
+ qmem->iova - qmem->align);
+ devm_kfree(dev, qmem);
+}
+
+struct admin_queue {
+ struct qmem *inst;
+ struct qmem *res;
+ spinlock_t lock; /* Serialize inst enqueue from PFs */
+};
+
+/* NPA aura count */
+enum npa_aura_sz {
+ NPA_AURA_SZ_0,
+ NPA_AURA_SZ_128,
+ NPA_AURA_SZ_256,
+ NPA_AURA_SZ_512,
+ NPA_AURA_SZ_1K,
+ NPA_AURA_SZ_2K,
+ NPA_AURA_SZ_4K,
+ NPA_AURA_SZ_8K,
+ NPA_AURA_SZ_16K,
+ NPA_AURA_SZ_32K,
+ NPA_AURA_SZ_64K,
+ NPA_AURA_SZ_128K,
+ NPA_AURA_SZ_256K,
+ NPA_AURA_SZ_512K,
+ NPA_AURA_SZ_1M,
+ NPA_AURA_SZ_MAX,
+};
+
+#define NPA_AURA_COUNT(x) (1ULL << ((x) + 6))
+
+/* NPA AQ result structure for init/read/write of aura HW contexts */
+struct npa_aq_aura_res {
+ struct npa_aq_res_s res;
+ struct npa_aura_s aura_ctx;
+ struct npa_aura_s ctx_mask;
+};
+
+/* NPA AQ result structure for init/read/write of pool HW contexts */
+struct npa_aq_pool_res {
+ struct npa_aq_res_s res;
+ struct npa_pool_s pool_ctx;
+ struct npa_pool_s ctx_mask;
+};
+
+/* NIX Transmit schedulers */
+enum nix_scheduler {
+ NIX_TXSCH_LVL_SMQ = 0x0,
+ NIX_TXSCH_LVL_MDQ = 0x0,
+ NIX_TXSCH_LVL_TL4 = 0x1,
+ NIX_TXSCH_LVL_TL3 = 0x2,
+ NIX_TXSCH_LVL_TL2 = 0x3,
+ NIX_TXSCH_LVL_TL1 = 0x4,
+ NIX_TXSCH_LVL_CNT = 0x5,
+};
+
+/* NIX RX action operation*/
+#define NIX_RX_ACTIONOP_DROP (0x0ull)
+#define NIX_RX_ACTIONOP_UCAST (0x1ull)
+#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
+#define NIX_RX_ACTIONOP_MCAST (0x3ull)
+#define NIX_RX_ACTIONOP_RSS (0x4ull)
+
+/* NIX TX action operation*/
+#define NIX_TX_ACTIONOP_DROP (0x0ull)
+#define NIX_TX_ACTIONOP_UCAST_DEFAULT (0x1ull)
+#define NIX_TX_ACTIONOP_UCAST_CHAN (0x2ull)
+#define NIX_TX_ACTIONOP_MCAST (0x3ull)
+#define NIX_TX_ACTIONOP_DROP_VIOL (0x5ull)
+
+#define NPC_MCAM_KEY_X1 0
+#define NPC_MCAM_KEY_X2 1
+#define NPC_MCAM_KEY_X4 2
+
+#define NIX_INTF_RX 0
+#define NIX_INTF_TX 1
+
+#define NIX_INTF_TYPE_CGX 0
+#define NIX_INTF_TYPE_LBK 1
+
+#define MAX_LMAC_PKIND 12
+#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
+#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
+
+/* NIX LSO format indices.
+ * As of now TSO is the only one using, so statically assigning indices.
+ */
+#define NIX_LSO_FORMAT_IDX_TSOV4 0
+#define NIX_LSO_FORMAT_IDX_TSOV6 1
+
+/* RSS info */
+#define MAX_RSS_GROUPS 8
+/* Group 0 has to be used in default pkt forwarding MCAM entries
+ * reserved for NIXLFs. Groups 1-7 can be used for RSS for ntuple
+ * filters.
+ */
+#define DEFAULT_RSS_CONTEXT_GROUP 0
+#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
+
+/* NIX flow tag, key type flags */
+#define FLOW_KEY_TYPE_PORT BIT(0)
+#define FLOW_KEY_TYPE_IPV4 BIT(1)
+#define FLOW_KEY_TYPE_IPV6 BIT(2)
+#define FLOW_KEY_TYPE_TCP BIT(3)
+#define FLOW_KEY_TYPE_UDP BIT(4)
+#define FLOW_KEY_TYPE_SCTP BIT(5)
+
+/* NIX flow tag algorithm indices, max is 31 */
+enum {
+ FLOW_KEY_ALG_PORT,
+ FLOW_KEY_ALG_IP,
+ FLOW_KEY_ALG_TCP,
+ FLOW_KEY_ALG_UDP,
+ FLOW_KEY_ALG_SCTP,
+ FLOW_KEY_ALG_TCP_UDP,
+ FLOW_KEY_ALG_TCP_SCTP,
+ FLOW_KEY_ALG_UDP_SCTP,
+ FLOW_KEY_ALG_TCP_UDP_SCTP,
+ FLOW_KEY_ALG_MAX,
+};
+
+#endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
new file mode 100644
index 000000000000..85ba24a05774
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#include "rvu_reg.h"
+#include "mbox.h"
+
+static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+
+ tx_hdr = mdev->mbase + mbox->tx_start;
+ rx_hdr = mdev->mbase + mbox->rx_start;
+
+ spin_lock(&mdev->mbox_lock);
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ tx_hdr->num_msgs = 0;
+ rx_hdr->num_msgs = 0;
+ spin_unlock(&mdev->mbox_lock);
+}
+EXPORT_SYMBOL(otx2_mbox_reset);
+
+void otx2_mbox_destroy(struct otx2_mbox *mbox)
+{
+ mbox->reg_base = NULL;
+ mbox->hwbase = NULL;
+
+ kfree(mbox->dev);
+ mbox->dev = NULL;
+}
+EXPORT_SYMBOL(otx2_mbox_destroy);
+
+int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid;
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_PFVF:
+ mbox->tx_start = MBOX_DOWN_TX_START;
+ mbox->rx_start = MBOX_DOWN_RX_START;
+ mbox->tx_size = MBOX_DOWN_TX_SIZE;
+ mbox->rx_size = MBOX_DOWN_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_VFPF:
+ mbox->tx_start = MBOX_DOWN_RX_START;
+ mbox->rx_start = MBOX_DOWN_TX_START;
+ mbox->tx_size = MBOX_DOWN_RX_SIZE;
+ mbox->rx_size = MBOX_DOWN_TX_SIZE;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ case MBOX_DIR_PFVF_UP:
+ mbox->tx_start = MBOX_UP_TX_START;
+ mbox->rx_start = MBOX_UP_RX_START;
+ mbox->tx_size = MBOX_UP_TX_SIZE;
+ mbox->rx_size = MBOX_UP_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ case MBOX_DIR_VFPF_UP:
+ mbox->tx_start = MBOX_UP_RX_START;
+ mbox->rx_start = MBOX_UP_TX_START;
+ mbox->tx_size = MBOX_UP_RX_SIZE;
+ mbox->rx_size = MBOX_UP_TX_SIZE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_AFPF_UP:
+ mbox->trigger = RVU_AF_AFPF_MBOX0;
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_PFAF_UP:
+ mbox->trigger = RVU_PF_PFAF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFVF:
+ case MBOX_DIR_PFVF_UP:
+ mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
+ mbox->tr_shift = 12;
+ break;
+ case MBOX_DIR_VFPF:
+ case MBOX_DIR_VFPF_UP:
+ mbox->trigger = RVU_VF_VFPF_MBOX1;
+ mbox->tr_shift = 0;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ mbox->reg_base = reg_base;
+ mbox->hwbase = hwbase;
+ mbox->pdev = pdev;
+
+ mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
+ if (!mbox->dev) {
+ otx2_mbox_destroy(mbox);
+ return -ENOMEM;
+ }
+
+ mbox->ndevs = ndevs;
+ for (devid = 0; devid < ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ spin_lock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_init);
+
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ int timeout = 0, sleep = 1;
+
+ while (mdev->num_msgs != mdev->msgs_acked) {
+ msleep(sleep);
+ timeout += sleep;
+ if (timeout >= MBOX_RSP_TIMEOUT)
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
+
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ unsigned long timeout = jiffies + 1 * HZ;
+
+ while (!time_after(jiffies, timeout)) {
+ if (mdev->num_msgs == mdev->msgs_acked)
+ return 0;
+ cpu_relax();
+ }
+ return -EIO;
+}
+EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
+
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_hdr *tx_hdr, *rx_hdr;
+
+ tx_hdr = mdev->mbase + mbox->tx_start;
+ rx_hdr = mdev->mbase + mbox->rx_start;
+
+ spin_lock(&mdev->mbox_lock);
+ /* Reset header for next messages */
+ mdev->msg_size = 0;
+ mdev->rsp_size = 0;
+ mdev->msgs_acked = 0;
+
+ /* Sync mbox data into memory */
+ smp_wmb();
+
+ /* num_msgs != 0 signals to the peer that the buffer has a number of
+ * messages. So this should be written after writing all the messages
+ * to the shared memory.
+ */
+ tx_hdr->num_msgs = mdev->num_msgs;
+ rx_hdr->num_msgs = 0;
+ spin_unlock(&mdev->mbox_lock);
+
+ /* The interrupt should be fired after num_msgs is written
+ * to the shared memory
+ */
+ writeq(1, (void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+}
+EXPORT_SYMBOL(otx2_mbox_msg_send);
+
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ struct mbox_msghdr *msghdr = NULL;
+
+ spin_lock(&mdev->mbox_lock);
+ size = ALIGN(size, MBOX_MSG_ALIGN);
+ size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
+ /* Check if there is space in mailbox */
+ if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
+ goto exit;
+ if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
+ goto exit;
+
+ if (mdev->msg_size == 0)
+ mdev->num_msgs = 0;
+ mdev->num_msgs++;
+
+ msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
+
+ /* Clear the whole msg region */
+ memset(msghdr, 0, sizeof(*msghdr) + size);
+ /* Init message header with reset values */
+ msghdr->ver = OTX2_MBOX_VERSION;
+ mdev->msg_size += size;
+ mdev->rsp_size += size_rsp;
+ msghdr->next_msgoff = mdev->msg_size + msgs_offset;
+exit:
+ spin_unlock(&mdev->mbox_lock);
+
+ return msghdr;
+}
+EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
+
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *msg)
+{
+ unsigned long imsg = mbox->tx_start + msgs_offset;
+ unsigned long irsp = mbox->rx_start + msgs_offset;
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ u16 msgs;
+
+ if (mdev->num_msgs != mdev->msgs_acked)
+ return ERR_PTR(-ENODEV);
+
+ for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
+ struct mbox_msghdr *pmsg = mdev->mbase + imsg;
+ struct mbox_msghdr *prsp = mdev->mbase + irsp;
+
+ if (msg == pmsg) {
+ if (pmsg->id != prsp->id)
+ return ERR_PTR(-ENODEV);
+ return prsp;
+ }
+
+ imsg = pmsg->next_msgoff;
+ irsp = prsp->next_msgoff;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(otx2_mbox_get_rsp);
+
+int
+otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
+{
+ struct msg_rsp *rsp;
+
+ rsp = (struct msg_rsp *)
+ otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
+ if (!rsp)
+ return -ENOMEM;
+ rsp->hdr.id = id;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.rc = MBOX_MSG_INVALID;
+ rsp->hdr.pcifunc = pcifunc;
+ return 0;
+}
+EXPORT_SYMBOL(otx2_reply_invalid_msg);
+
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
+{
+ struct otx2_mbox_dev *mdev = &mbox->dev[devid];
+ bool ret;
+
+ spin_lock(&mdev->mbox_lock);
+ ret = mdev->num_msgs != 0;
+ spin_unlock(&mdev->mbox_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(otx2_mbox_nonempty);
+
+const char *otx2_mbox_id2name(u16 id)
+{
+ switch (id) {
+#define M(_name, _id, _1, _2) case _id: return # _name;
+ MBOX_MESSAGES
+#undef M
+ default:
+ return "INVALID ID";
+ }
+}
+EXPORT_SYMBOL(otx2_mbox_id2name);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
new file mode 100644
index 000000000000..a15a59c9a239
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MBOX_H
+#define MBOX_H
+
+#include <linux/etherdevice.h>
+#include <linux/sizes.h>
+
+#include "rvu_struct.h"
+#include "common.h"
+
+#define MBOX_SIZE SZ_64K
+
+/* AF/PF: PF initiated, PF/VF VF initiated */
+#define MBOX_DOWN_RX_START 0
+#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
+#define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE)
+#define MBOX_DOWN_TX_SIZE (16 * SZ_1K)
+/* AF/PF: AF initiated, PF/VF PF initiated */
+#define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE)
+#define MBOX_UP_RX_SIZE SZ_1K
+#define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE)
+#define MBOX_UP_TX_SIZE SZ_1K
+
+#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE
+# error "incorrect mailbox area sizes"
+#endif
+
+#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
+
+#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
+
+#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
+
+/* Mailbox directions */
+#define MBOX_DIR_AFPF 0 /* AF replies to PF */
+#define MBOX_DIR_PFAF 1 /* PF sends messages to AF */
+#define MBOX_DIR_PFVF 2 /* PF replies to VF */
+#define MBOX_DIR_VFPF 3 /* VF sends messages to PF */
+#define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */
+#define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */
+#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
+#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
+
+struct otx2_mbox_dev {
+ void *mbase; /* This dev's mbox region */
+ spinlock_t mbox_lock;
+ u16 msg_size; /* Total msg size to be sent */
+ u16 rsp_size; /* Total rsp size to be sure the reply is ok */
+ u16 num_msgs; /* No of msgs sent or waiting for response */
+ u16 msgs_acked; /* No of msgs for which response is received */
+};
+
+struct otx2_mbox {
+ struct pci_dev *pdev;
+ void *hwbase; /* Mbox region advertised by HW */
+ void *reg_base;/* CSR base for this dev */
+ u64 trigger; /* Trigger mbox notification */
+ u16 tr_shift; /* Mbox trigger shift */
+ u64 rx_start; /* Offset of Rx region in mbox memory */
+ u64 tx_start; /* Offset of Tx region in mbox memory */
+ u16 rx_size; /* Size of Rx region */
+ u16 tx_size; /* Size of Tx region */
+ u16 ndevs; /* The number of peers */
+ struct otx2_mbox_dev *dev;
+};
+
+/* Header which preceeds all mbox messages */
+struct mbox_hdr {
+ u16 num_msgs; /* No of msgs embedded */
+};
+
+/* Header which preceeds every msg and is also part of it */
+struct mbox_msghdr {
+ u16 pcifunc; /* Who's sending this msg */
+ u16 id; /* Mbox message ID */
+#define OTX2_MBOX_REQ_SIG (0xdead)
+#define OTX2_MBOX_RSP_SIG (0xbeef)
+ u16 sig; /* Signature, for validating corrupted msgs */
+#define OTX2_MBOX_VERSION (0x0001)
+ u16 ver; /* Version of msg's structure for this ID */
+ u16 next_msgoff; /* Offset of next msg within mailbox region */
+ int rc; /* Msg process'ed response code */
+};
+
+void otx2_mbox_reset(struct otx2_mbox *mbox, int devid);
+void otx2_mbox_destroy(struct otx2_mbox *mbox);
+int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs);
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
+struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
+ int size, int size_rsp);
+struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
+ struct mbox_msghdr *msg);
+int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
+ u16 pcifunc, u16 id);
+bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
+const char *otx2_mbox_id2name(u16 id);
+static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
+ int devid, int size)
+{
+ return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
+}
+
+/* Mailbox message types */
+#define MBOX_MSG_MASK 0xFFFF
+#define MBOX_MSG_INVALID 0xFFFE
+#define MBOX_MSG_MAX 0xFFFF
+
+#define MBOX_MESSAGES \
+/* Generic mbox IDs (range 0x000 - 0x1FF) */ \
+M(READY, 0x001, msg_req, ready_msg_rsp) \
+M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \
+M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \
+M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \
+/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
+M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \
+M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \
+M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \
+M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \
+ cgx_mac_addr_set_or_get) \
+M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \
+ cgx_mac_addr_set_or_get) \
+M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \
+M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \
+M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \
+M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \
+M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \
+M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \
+M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \
+/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \
+M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \
+M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \
+M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \
+/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
+/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
+/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
+/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
+/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
+M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \
+M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \
+M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \
+M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \
+M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
+M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp) \
+M(NIX_STATS_RST, 0x8007, msg_req, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_config, msg_rsp) \
+M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, msg_rsp) \
+M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, msg_rsp) \
+M(NIX_SET_RX_MODE, 0x800b, nix_rx_mode, msg_rsp)
+
+/* Messages initiated by AF (range 0xC00 - 0xDFF) */
+#define MBOX_UP_CGX_MESSAGES \
+M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp)
+
+enum {
+#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
+MBOX_MESSAGES
+MBOX_UP_CGX_MESSAGES
+#undef M
+};
+
+/* Mailbox message formats */
+
+#define RVU_DEFAULT_PF_FUNC 0xFFFF
+
+/* Generic request msg used for those mbox messages which
+ * don't send any data in the request.
+ */
+struct msg_req {
+ struct mbox_msghdr hdr;
+};
+
+/* Generic rsponse msg used a ack or response for those mbox
+ * messages which doesn't have a specific rsp msg format.
+ */
+struct msg_rsp {
+ struct mbox_msghdr hdr;
+};
+
+struct ready_msg_rsp {
+ struct mbox_msghdr hdr;
+ u16 sclk_feq; /* SCLK frequency */
+};
+
+/* Structure for requesting resource provisioning.
+ * 'modify' flag to be used when either requesting more
+ * or to detach partial of a cetain resource type.
+ * Rest of the fields specify how many of what type to
+ * be attached.
+ */
+struct rsrc_attach {
+ struct mbox_msghdr hdr;
+ u8 modify:1;
+ u8 npalf:1;
+ u8 nixlf:1;
+ u16 sso;
+ u16 ssow;
+ u16 timlfs;
+ u16 cptlfs;
+};
+
+/* Structure for relinquishing resources.
+ * 'partial' flag to be used when relinquishing all resources
+ * but only of a certain type. If not set, all resources of all
+ * types provisioned to the RVU function will be detached.
+ */
+struct rsrc_detach {
+ struct mbox_msghdr hdr;
+ u8 partial:1;
+ u8 npalf:1;
+ u8 nixlf:1;
+ u8 sso:1;
+ u8 ssow:1;
+ u8 timlfs:1;
+ u8 cptlfs:1;
+};
+
+#define MSIX_VECTOR_INVALID 0xFFFF
+#define MAX_RVU_BLKLF_CNT 256
+
+struct msix_offset_rsp {
+ struct mbox_msghdr hdr;
+ u16 npa_msixoff;
+ u16 nix_msixoff;
+ u8 sso;
+ u8 ssow;
+ u8 timlfs;
+ u8 cptlfs;
+ u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+};
+
+/* CGX mbox message formats */
+
+struct cgx_stats_rsp {
+ struct mbox_msghdr hdr;
+#define CGX_RX_STATS_COUNT 13
+#define CGX_TX_STATS_COUNT 18
+ u64 rx_stats[CGX_RX_STATS_COUNT];
+ u64 tx_stats[CGX_TX_STATS_COUNT];
+};
+
+/* Structure for requesting the operation for
+ * setting/getting mac address in the CGX interface
+ */
+struct cgx_mac_addr_set_or_get {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct cgx_link_user_info {
+ uint64_t link_up:1;
+ uint64_t full_duplex:1;
+ uint64_t lmac_type_id:4;
+ uint64_t speed:20; /* speed in Mbps */
+#define LMACTYPE_STR_LEN 16
+ char lmac_type[LMACTYPE_STR_LEN];
+};
+
+struct cgx_link_info_msg {
+ struct mbox_msghdr hdr;
+ struct cgx_link_user_info link_info;
+};
+
+/* NPA mbox message formats */
+
+/* NPA mailbox error codes
+ * Range 301 - 400.
+ */
+enum npa_af_status {
+ NPA_AF_ERR_PARAM = -301,
+ NPA_AF_ERR_AQ_FULL = -302,
+ NPA_AF_ERR_AQ_ENQUEUE = -303,
+ NPA_AF_ERR_AF_LF_INVALID = -304,
+ NPA_AF_ERR_AF_LF_ALLOC = -305,
+ NPA_AF_ERR_LF_RESET = -306,
+};
+
+/* For NPA LF context alloc and init */
+struct npa_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ int aura_sz; /* No of auras */
+ u32 nr_pools; /* No of pools */
+};
+
+struct npa_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u32 stack_pg_ptrs; /* No of ptrs per stack page */
+ u32 stack_pg_bytes; /* Size of stack page */
+ u16 qints; /* NPA_AF_CONST::QINTS */
+};
+
+/* NPA AQ enqueue msg */
+struct npa_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 aura_id;
+ u8 ctype;
+ u8 op;
+ union {
+ /* Valid when op == WRITE/INIT and ctype == AURA.
+ * LF fills the pool_id in aura.pool_addr. AF will translate
+ * the pool_id to pool context pointer.
+ */
+ struct npa_aura_s aura;
+ /* Valid when op == WRITE/INIT and ctype == POOL */
+ struct npa_pool_s pool;
+ };
+ /* Mask data when op == WRITE (1=write, 0=don't write) */
+ union {
+ /* Valid when op == WRITE and ctype == AURA */
+ struct npa_aura_s aura_mask;
+ /* Valid when op == WRITE and ctype == POOL */
+ struct npa_pool_s pool_mask;
+ };
+};
+
+struct npa_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ /* Valid when op == READ and ctype == AURA */
+ struct npa_aura_s aura;
+ /* Valid when op == READ and ctype == POOL */
+ struct npa_pool_s pool;
+ };
+};
+
+/* Disable all contexts of type 'ctype' */
+struct hwctx_disable_req {
+ struct mbox_msghdr hdr;
+ u8 ctype;
+};
+
+/* NIX mailbox error codes
+ * Range 401 - 500.
+ */
+enum nix_af_status {
+ NIX_AF_ERR_PARAM = -401,
+ NIX_AF_ERR_AQ_FULL = -402,
+ NIX_AF_ERR_AQ_ENQUEUE = -403,
+ NIX_AF_ERR_AF_LF_INVALID = -404,
+ NIX_AF_ERR_AF_LF_ALLOC = -405,
+ NIX_AF_ERR_TLX_ALLOC_FAIL = -406,
+ NIX_AF_ERR_TLX_INVALID = -407,
+ NIX_AF_ERR_RSS_SIZE_INVALID = -408,
+ NIX_AF_ERR_RSS_GRPS_INVALID = -409,
+ NIX_AF_ERR_FRS_INVALID = -410,
+ NIX_AF_ERR_RX_LINK_INVALID = -411,
+ NIX_AF_INVAL_TXSCHQ_CFG = -412,
+ NIX_AF_SMQ_FLUSH_FAILED = -413,
+ NIX_AF_ERR_LF_RESET = -414,
+};
+
+/* For NIX LF context alloc and init */
+struct nix_lf_alloc_req {
+ struct mbox_msghdr hdr;
+ int node;
+ u32 rq_cnt; /* No of receive queues */
+ u32 sq_cnt; /* No of send queues */
+ u32 cq_cnt; /* No of completion queues */
+ u8 xqe_sz;
+ u16 rss_sz;
+ u8 rss_grps;
+ u16 npa_func;
+ u16 sso_func;
+ u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
+};
+
+struct nix_lf_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 sqb_size;
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u8 rx_chan_cnt; /* total number of RX channels */
+ u8 tx_chan_cnt; /* total number of TX channels */
+ u8 lso_tsov4_idx;
+ u8 lso_tsov6_idx;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* NIX AQ enqueue msg */
+struct nix_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_rq_ctx_s rq;
+ struct nix_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ };
+ union {
+ struct nix_rq_ctx_s rq_mask;
+ struct nix_sq_ctx_s sq_mask;
+ struct nix_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ };
+};
+
+struct nix_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_rq_ctx_s rq;
+ struct nix_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ };
+};
+
+/* Tx scheduler/shaper mailbox messages */
+
+#define MAX_TXSCHQ_PER_FUNC 128
+
+struct nix_txsch_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count request at each level */
+ u16 schq_contig[NIX_TXSCH_LVL_CNT]; /* No of contiguous queues */
+ u16 schq[NIX_TXSCH_LVL_CNT]; /* No of non-contiguous queues */
+};
+
+struct nix_txsch_alloc_rsp {
+ struct mbox_msghdr hdr;
+ /* Scheduler queue count allocated at each level */
+ u16 schq_contig[NIX_TXSCH_LVL_CNT];
+ u16 schq[NIX_TXSCH_LVL_CNT];
+ /* Scheduler queue list allocated at each level */
+ u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+};
+
+struct nix_txsch_free_req {
+ struct mbox_msghdr hdr;
+#define TXSCHQ_FREE_ALL BIT_ULL(0)
+ u16 flags;
+ /* Scheduler queue level to be freed */
+ u16 schq_lvl;
+ /* List of scheduler queues to be freed */
+ u16 schq;
+};
+
+struct nix_txschq_config {
+ struct mbox_msghdr hdr;
+ u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
+#define TXSCHQ_IDX_SHIFT 16
+#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
+#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
+ u8 num_regs;
+#define MAX_REGS_PER_MBOX_MSG 20
+ u64 reg[MAX_REGS_PER_MBOX_MSG];
+ u64 regval[MAX_REGS_PER_MBOX_MSG];
+};
+
+struct nix_vtag_config {
+ struct mbox_msghdr hdr;
+ u8 vtag_size;
+ /* cfg_type is '0' for tx vlan cfg
+ * cfg_type is '1' for rx vlan cfg
+ */
+ u8 cfg_type;
+ union {
+ /* valid when cfg_type is '0' */
+ struct {
+ /* tx vlan0 tag(C-VLAN) */
+ u64 vlan0;
+ /* tx vlan1 tag(S-VLAN) */
+ u64 vlan1;
+ /* insert tx vlan tag */
+ u8 insert_vlan :1;
+ /* insert tx double vlan tag */
+ u8 double_vlan :1;
+ } tx;
+
+ /* valid when cfg_type is '1' */
+ struct {
+ /* rx vtag type index */
+ u8 vtag_type;
+ /* rx vtag strip */
+ u8 strip_vtag :1;
+ /* rx vtag capture */
+ u8 capture_vtag :1;
+ } rx;
+ };
+};
+
+struct nix_rss_flowkey_cfg {
+ struct mbox_msghdr hdr;
+ int mcam_index; /* MCAM entry index to modify */
+ u32 flowkey_cfg; /* Flowkey types selected */
+ u8 group; /* RSS context or group */
+};
+
+struct nix_set_mac_addr {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
+};
+
+struct nix_rx_mode {
+ struct mbox_msghdr hdr;
+#define NIX_RX_MODE_UCAST BIT(0)
+#define NIX_RX_MODE_PROMISC BIT(1)
+#define NIX_RX_MODE_ALLMULTI BIT(2)
+ u16 mode;
+};
+
+#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
new file mode 100644
index 000000000000..f98b0113def3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef NPC_H
+#define NPC_H
+
+enum NPC_LID_E {
+ NPC_LID_LA = 0,
+ NPC_LID_LB,
+ NPC_LID_LC,
+ NPC_LID_LD,
+ NPC_LID_LE,
+ NPC_LID_LF,
+ NPC_LID_LG,
+ NPC_LID_LH,
+};
+
+#define NPC_LT_NA 0
+
+enum npc_kpu_la_ltype {
+ NPC_LT_LA_8023 = 1,
+ NPC_LT_LA_ETHER,
+};
+
+enum npc_kpu_lb_ltype {
+ NPC_LT_LB_ETAG = 1,
+ NPC_LT_LB_CTAG,
+ NPC_LT_LB_STAG,
+ NPC_LT_LB_BTAG,
+ NPC_LT_LB_QINQ,
+ NPC_LT_LB_ITAG,
+};
+
+enum npc_kpu_lc_ltype {
+ NPC_LT_LC_IP = 1,
+ NPC_LT_LC_IP6,
+ NPC_LT_LC_ARP,
+ NPC_LT_LC_RARP,
+ NPC_LT_LC_MPLS,
+ NPC_LT_LC_NSH,
+ NPC_LT_LC_PTP,
+ NPC_LT_LC_FCOE,
+};
+
+/* Don't modify Ltypes upto SCTP, otherwise it will
+ * effect flow tag calculation and thus RSS.
+ */
+enum npc_kpu_ld_ltype {
+ NPC_LT_LD_TCP = 1,
+ NPC_LT_LD_UDP,
+ NPC_LT_LD_ICMP,
+ NPC_LT_LD_SCTP,
+ NPC_LT_LD_IGMP,
+ NPC_LT_LD_ICMP6,
+ NPC_LT_LD_ESP,
+ NPC_LT_LD_AH,
+ NPC_LT_LD_GRE,
+ NPC_LT_LD_GRE_MPLS,
+ NPC_LT_LD_GRE_NSH,
+ NPC_LT_LD_TU_MPLS,
+};
+
+enum npc_kpu_le_ltype {
+ NPC_LT_LE_TU_ETHER = 1,
+ NPC_LT_LE_TU_PPP,
+ NPC_LT_LE_TU_MPLS_IN_NSH,
+ NPC_LT_LE_TU_3RD_NSH,
+};
+
+enum npc_kpu_lf_ltype {
+ NPC_LT_LF_TU_IP = 1,
+ NPC_LT_LF_TU_IP6,
+ NPC_LT_LF_TU_ARP,
+ NPC_LT_LF_TU_MPLS_IP,
+ NPC_LT_LF_TU_MPLS_IP6,
+ NPC_LT_LF_TU_MPLS_ETHER,
+};
+
+enum npc_kpu_lg_ltype {
+ NPC_LT_LG_TU_TCP = 1,
+ NPC_LT_LG_TU_UDP,
+ NPC_LT_LG_TU_SCTP,
+ NPC_LT_LG_TU_ICMP,
+ NPC_LT_LG_TU_IGMP,
+ NPC_LT_LG_TU_ICMP6,
+ NPC_LT_LG_TU_ESP,
+ NPC_LT_LG_TU_AH,
+};
+
+enum npc_kpu_lh_ltype {
+ NPC_LT_LH_TCP_DATA = 1,
+ NPC_LT_LH_HTTP_DATA,
+ NPC_LT_LH_HTTPS_DATA,
+ NPC_LT_LH_PPTP_DATA,
+ NPC_LT_LH_UDP_DATA,
+};
+
+struct npc_kpu_profile_cam {
+ u8 state;
+ u8 state_mask;
+ u16 dp0;
+ u16 dp0_mask;
+ u16 dp1;
+ u16 dp1_mask;
+ u16 dp2;
+ u16 dp2_mask;
+};
+
+struct npc_kpu_profile_action {
+ u8 errlev;
+ u8 errcode;
+ u8 dp0_offset;
+ u8 dp1_offset;
+ u8 dp2_offset;
+ u8 bypass_count;
+ u8 parse_done;
+ u8 next_state;
+ u8 ptr_advance;
+ u8 cap_ena;
+ u8 lid;
+ u8 ltype;
+ u8 flags;
+ u8 offset;
+ u8 mask;
+ u8 right;
+ u8 shift;
+};
+
+struct npc_kpu_profile {
+ int cam_entries;
+ int action_entries;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_profile_action *action;
+};
+
+/* NPC KPU register formats */
+struct npc_kpu_cam {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_56 : 8;
+ u64 state : 8;
+ u64 dp2_data : 16;
+ u64 dp1_data : 16;
+ u64 dp0_data : 16;
+#else
+ u64 dp0_data : 16;
+ u64 dp1_data : 16;
+ u64 dp2_data : 16;
+ u64 state : 8;
+ u64 rsvd_63_56 : 8;
+#endif
+};
+
+struct npc_kpu_action0 {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_57 : 7;
+ u64 byp_count : 3;
+ u64 capture_ena : 1;
+ u64 parse_done : 1;
+ u64 next_state : 8;
+ u64 rsvd_43 : 1;
+ u64 capture_lid : 3;
+ u64 capture_ltype : 4;
+ u64 capture_flags : 8;
+ u64 ptr_advance : 8;
+ u64 var_len_offset : 8;
+ u64 var_len_mask : 8;
+ u64 var_len_right : 1;
+ u64 var_len_shift : 3;
+#else
+ u64 var_len_shift : 3;
+ u64 var_len_right : 1;
+ u64 var_len_mask : 8;
+ u64 var_len_offset : 8;
+ u64 ptr_advance : 8;
+ u64 capture_flags : 8;
+ u64 capture_ltype : 4;
+ u64 capture_lid : 3;
+ u64 rsvd_43 : 1;
+ u64 next_state : 8;
+ u64 parse_done : 1;
+ u64 capture_ena : 1;
+ u64 byp_count : 3;
+ u64 rsvd_63_57 : 7;
+#endif
+};
+
+struct npc_kpu_action1 {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_36 : 28;
+ u64 errlev : 4;
+ u64 errcode : 8;
+ u64 dp2_offset : 8;
+ u64 dp1_offset : 8;
+ u64 dp0_offset : 8;
+#else
+ u64 dp0_offset : 8;
+ u64 dp1_offset : 8;
+ u64 dp2_offset : 8;
+ u64 errcode : 8;
+ u64 errlev : 4;
+ u64 rsvd_63_36 : 28;
+#endif
+};
+
+struct npc_kpu_pkind_cpi_def {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 ena : 1;
+ u64 rsvd_62_59 : 4;
+ u64 lid : 3;
+ u64 ltype_match : 4;
+ u64 ltype_mask : 4;
+ u64 flags_match : 8;
+ u64 flags_mask : 8;
+ u64 add_offset : 8;
+ u64 add_mask : 8;
+ u64 rsvd_15 : 1;
+ u64 add_shift : 3;
+ u64 rsvd_11_10 : 2;
+ u64 cpi_base : 10;
+#else
+ u64 cpi_base : 10;
+ u64 rsvd_11_10 : 2;
+ u64 add_shift : 3;
+ u64 rsvd_15 : 1;
+ u64 add_mask : 8;
+ u64 add_offset : 8;
+ u64 flags_mask : 8;
+ u64 flags_match : 8;
+ u64 ltype_mask : 4;
+ u64 ltype_match : 4;
+ u64 lid : 3;
+ u64 rsvd_62_59 : 4;
+ u64 ena : 1;
+#endif
+};
+
+struct nix_rx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_61 :3;
+ u64 flow_key_alg :5;
+ u64 match_id :16;
+ u64 index :20;
+ u64 pf_func :16;
+ u64 op :4;
+#else
+ u64 op :4;
+ u64 pf_func :16;
+ u64 index :20;
+ u64 match_id :16;
+ u64 flow_key_alg :5;
+ u64 rsvd_63_61 :3;
+#endif
+};
+
+#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
new file mode 100644
index 000000000000..b2ce957605bb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -0,0 +1,5709 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef NPC_PROFILE_H
+#define NPC_PROFILE_H
+
+#define NPC_ETYPE_IP 0x0800
+#define NPC_ETYPE_IP6 0x86dd
+#define NPC_ETYPE_ARP 0x0806
+#define NPC_ETYPE_RARP 0x8035
+#define NPC_ETYPE_MPLSU 0x8847
+#define NPC_ETYPE_MPLSM 0x8848
+#define NPC_ETYPE_ETAG 0x893f
+#define NPC_ETYPE_CTAG 0x8100
+#define NPC_ETYPE_SBTAG 0x88a8
+#define NPC_ETYPE_ITAG 0x88e7
+#define NPC_ETYPE_PTP 0x88f7
+#define NPC_ETYPE_FCOE 0x8906
+#define NPC_ETYPE_QINQ 0x9100
+#define NPC_ETYPE_TRANS_ETH_BR 0x6558
+#define NPC_ETYPE_PPP 0x880b
+#define NPC_ETYPE_NSH 0x894f
+
+#define NPC_IPNH_HOP 0
+#define NPC_IPNH_ICMP 1
+#define NPC_IPNH_IGMP 2
+#define NPC_IPNH_IP 4
+#define NPC_IPNH_TCP 6
+#define NPC_IPNH_UDP 17
+#define NPC_IPNH_IP6 41
+#define NPC_IPNH_ROUT 43
+#define NPC_IPNH_FRAG 44
+#define NPC_IPNH_GRE 47
+#define NPC_IPNH_ESP 50
+#define NPC_IPNH_AH 51
+#define NPC_IPNH_ICMP6 58
+#define NPC_IPNH_NONH 59
+#define NPC_IPNH_DEST 60
+#define NPC_IPNH_SCTP 132
+#define NPC_IPNH_MPLS 137
+
+#define NPC_UDP_PORT_GTPC 2123
+#define NPC_UDP_PORT_GTPU 2152
+#define NPC_UDP_PORT_VXLAN 4789
+#define NPC_UDP_PORT_VXLANGPE 4790
+#define NPC_UDP_PORT_GENEVE 6081
+
+#define NPC_VXLANGPE_NP_IP 0x1
+#define NPC_VXLANGPE_NP_IP6 0x2
+#define NPC_VXLANGPE_NP_ETH 0x3
+#define NPC_VXLANGPE_NP_NSH 0x4
+#define NPC_VXLANGPE_NP_MPLS 0x5
+#define NPC_VXLANGPE_NP_GBP 0x6
+#define NPC_VXLANGPE_NP_VBNG 0x7
+
+#define NPC_NSH_NP_IP 0x1
+#define NPC_NSH_NP_IP6 0x2
+#define NPC_NSH_NP_ETH 0x3
+#define NPC_NSH_NP_NSH 0x4
+#define NPC_NSH_NP_MPLS 0x5
+
+#define NPC_TCP_PORT_HTTP 80
+#define NPC_TCP_PORT_HTTPS 443
+#define NPC_TCP_PORT_PPTP 1723
+
+#define NPC_MPLS_S 0x0100
+
+#define NPC_IP_VER_4 0x4000
+#define NPC_IP_VER_6 0x6000
+#define NPC_IP_VER_MASK 0xf000
+#define NPC_IP_HDR_LEN_5 0x0500
+#define NPC_IP_HDR_LEN_MASK 0x0f00
+
+#define NPC_GRE_F_CSUM (0x1 << 15)
+#define NPC_GRE_F_ROUTE (0x1 << 14)
+#define NPC_GRE_F_KEY (0x1 << 13)
+#define NPC_GRE_F_SEQ (0x1 << 12)
+#define NPC_GRE_F_ACK (0x1 << 7)
+#define NPC_GRE_FLAG_MASK (NPC_GRE_F_CSUM | NPC_GRE_F_ROUTE | \
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK)
+#define NPC_GRE_VER_MASK 0x0003
+#define NPC_GRE_VER_1 0x0001
+
+#define NPC_VXLAN_I 0x0800
+
+#define NPC_VXLANGPE_VER (0x3 << 12)
+#define NPC_VXLANGPE_I (0x1 << 11)
+#define NPC_VXLANGPE_P (0x1 << 10)
+#define NPC_VXLANGPE_B (0x1 << 9)
+#define NPC_VXLANGPE_NP_MASK 0x00ff
+
+#define NPC_NSH_NP_MASK 0x00ff
+
+#define NPC_GENEVE_F_OAM (0x1 << 7)
+#define NPC_GENEVE_F_CRI_OPT (0x1 << 6)
+
+#define NPC_GTP_PT_GTP (0x1 << 12)
+#define NPC_GTP_PT_MASK (0x1 << 12)
+#define NPC_GTP_VER1 (0x1 << 13)
+#define NPC_GTP_VER_MASK (0x7 << 13)
+#define NPC_GTP_MT_G_PDU 0xff
+#define NPC_GTP_MT_MASK 0xff
+
+#define NPC_TCP_DATA_OFFSET_5 0x5000
+#define NPC_TCP_DATA_OFFSET_MASK 0xf000
+
+enum npc_kpu_parser_state {
+ NPC_S_NA = 0,
+ NPC_S_KPU1_ETHER,
+ NPC_S_KPU1_PKI,
+ NPC_S_KPU2_CTAG,
+ NPC_S_KPU2_SBTAG,
+ NPC_S_KPU2_QINQ,
+ NPC_S_KPU2_ETAG,
+ NPC_S_KPU2_ITAG,
+ NPC_S_KPU3_CTAG,
+ NPC_S_KPU3_STAG,
+ NPC_S_KPU3_QINQ,
+ NPC_S_KPU3_ITAG,
+ NPC_S_KPU4_MPLS,
+ NPC_S_KPU4_NSH,
+ NPC_S_KPU5_IP,
+ NPC_S_KPU5_IP6,
+ NPC_S_KPU5_ARP,
+ NPC_S_KPU5_RARP,
+ NPC_S_KPU5_PTP,
+ NPC_S_KPU5_FCOE,
+ NPC_S_KPU5_MPLS,
+ NPC_S_KPU5_MPLS_PL,
+ NPC_S_KPU5_NSH,
+ NPC_S_KPU6_IP6_EXT,
+ NPC_S_KPU7_IP6_EXT,
+ NPC_S_KPU8_TCP,
+ NPC_S_KPU8_UDP,
+ NPC_S_KPU8_SCTP,
+ NPC_S_KPU8_ICMP,
+ NPC_S_KPU8_IGMP,
+ NPC_S_KPU8_ICMP6,
+ NPC_S_KPU8_GRE,
+ NPC_S_KPU8_ESP,
+ NPC_S_KPU8_AH,
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN,
+ NPC_S_KPU9_TU_MPLS,
+ NPC_S_KPU9_TU_NSH,
+ NPC_S_KPU10_TU_MPLS_PL,
+ NPC_S_KPU10_TU_MPLS,
+ NPC_S_KPU10_TU_NSH,
+ NPC_S_KPU11_TU_ETHER,
+ NPC_S_KPU11_TU_PPP,
+ NPC_S_KPU11_TU_MPLS_IN_NSH,
+ NPC_S_KPU11_TU_3RD_NSH,
+ NPC_S_KPU12_TU_IP,
+ NPC_S_KPU12_TU_IP6,
+ NPC_S_KPU12_TU_ARP,
+ NPC_S_KPU13_TU_IP6_EXT,
+ NPC_S_KPU14_TU_IP6_EXT,
+ NPC_S_KPU15_TU_TCP,
+ NPC_S_KPU15_TU_UDP,
+ NPC_S_KPU15_TU_SCTP,
+ NPC_S_KPU15_TU_ICMP,
+ NPC_S_KPU15_TU_IGMP,
+ NPC_S_KPU15_TU_ICMP6,
+ NPC_S_KPU15_TU_ESP,
+ NPC_S_KPU15_TU_AH,
+ NPC_S_KPU16_HTTP_DATA,
+ NPC_S_KPU16_HTTPS_DATA,
+ NPC_S_KPU16_PPTP_DATA,
+ NPC_S_KPU16_TCP_DATA,
+ NPC_S_KPU16_UDP_DATA,
+ NPC_S_LAST /* has to be the last item */
+};
+
+enum npc_kpu_parser_flag {
+ NPC_F_NA = 0,
+ NPC_F_PKI,
+ NPC_F_PKI_VLAN,
+ NPC_F_PKI_ETAG,
+ NPC_F_PKI_ITAG,
+ NPC_F_PKI_MPLS,
+ NPC_F_PKI_NSH,
+ NPC_F_ETYPE_UNK,
+ NPC_F_ETHER_VLAN,
+ NPC_F_ETHER_ETAG,
+ NPC_F_ETHER_ITAG,
+ NPC_F_ETHER_MPLS,
+ NPC_F_ETHER_NSH,
+ NPC_F_STAG_CTAG,
+ NPC_F_STAG_CTAG_UNK,
+ NPC_F_STAG_STAG_CTAG,
+ NPC_F_STAG_STAG_STAG,
+ NPC_F_QINQ_CTAG,
+ NPC_F_QINQ_CTAG_UNK,
+ NPC_F_QINQ_QINQ_CTAG,
+ NPC_F_QINQ_QINQ_QINQ,
+ NPC_F_BTAG_ITAG,
+ NPC_F_BTAG_ITAG_STAG,
+ NPC_F_BTAG_ITAG_CTAG,
+ NPC_F_BTAG_ITAG_UNK,
+ NPC_F_ETAG_CTAG,
+ NPC_F_ETAG_BTAG_ITAG,
+ NPC_F_ETAG_STAG,
+ NPC_F_ETAG_QINQ,
+ NPC_F_ETAG_ITAG,
+ NPC_F_ETAG_ITAG_STAG,
+ NPC_F_ETAG_ITAG_CTAG,
+ NPC_F_ETAG_ITAG_UNK,
+ NPC_F_ITAG_STAG_CTAG,
+ NPC_F_ITAG_STAG,
+ NPC_F_ITAG_CTAG,
+ NPC_F_MPLS_4_LABELS,
+ NPC_F_MPLS_3_LABELS,
+ NPC_F_MPLS_2_LABELS,
+ NPC_F_IP_HAS_OPTIONS,
+ NPC_F_IP_IP_IN_IP,
+ NPC_F_IP_6TO4,
+ NPC_F_IP_MPLS_IN_IP,
+ NPC_F_IP_UNK_PROTO,
+ NPC_F_IP_IP_IN_IP_HAS_OPTIONS,
+ NPC_F_IP_6TO4_HAS_OPTIONS,
+ NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
+ NPC_F_IP_UNK_PROTO_HAS_OPTIONS,
+ NPC_F_IP6_HAS_EXT,
+ NPC_F_IP6_TUN_IP6,
+ NPC_F_IP6_MPLS_IN_IP,
+ NPC_F_TCP_HAS_OPTIONS,
+ NPC_F_TCP_HTTP,
+ NPC_F_TCP_HTTPS,
+ NPC_F_TCP_PPTP,
+ NPC_F_TCP_UNK_PORT,
+ NPC_F_TCP_HTTP_HAS_OPTIONS,
+ NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ NPC_F_TCP_PPTP_HAS_OPTIONS,
+ NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ NPC_F_UDP_VXLAN,
+ NPC_F_UDP_VXLAN_NOVNI,
+ NPC_F_UDP_VXLAN_NOVNI_NSH,
+ NPC_F_UDP_VXLANGPE,
+ NPC_F_UDP_VXLANGPE_NSH,
+ NPC_F_UDP_VXLANGPE_MPLS,
+ NPC_F_UDP_VXLANGPE_NOVNI,
+ NPC_F_UDP_VXLANGPE_NOVNI_NSH,
+ NPC_F_UDP_VXLANGPE_NOVNI_MPLS,
+ NPC_F_UDP_VXLANGPE_UNK,
+ NPC_F_UDP_VXLANGPE_NONP,
+ NPC_F_UDP_GTP_GTPC,
+ NPC_F_UDP_GTP_GTPU_G_PDU,
+ NPC_F_UDP_GTP_GTPU_UNK,
+ NPC_F_UDP_UNK_PORT,
+ NPC_F_UDP_GENEVE,
+ NPC_F_UDP_GENEVE_OAM,
+ NPC_F_UDP_GENEVE_CRI_OPT,
+ NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ NPC_F_GRE_NVGRE,
+ NPC_F_GRE_HAS_SRE,
+ NPC_F_GRE_HAS_CSUM,
+ NPC_F_GRE_HAS_KEY,
+ NPC_F_GRE_HAS_SEQ,
+ NPC_F_GRE_HAS_CSUM_KEY,
+ NPC_F_GRE_HAS_CSUM_SEQ,
+ NPC_F_GRE_HAS_KEY_SEQ,
+ NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_F_GRE_HAS_ROUTE,
+ NPC_F_GRE_UNK_PROTO,
+ NPC_F_GRE_VER1,
+ NPC_F_GRE_VER1_HAS_SEQ,
+ NPC_F_GRE_VER1_HAS_ACK,
+ NPC_F_GRE_VER1_HAS_SEQ_ACK,
+ NPC_F_GRE_VER1_UNK_PROTO,
+ NPC_F_TU_ETHER_UNK,
+ NPC_F_TU_ETHER_CTAG,
+ NPC_F_TU_ETHER_CTAG_UNK,
+ NPC_F_TU_ETHER_STAG_CTAG,
+ NPC_F_TU_ETHER_STAG_CTAG_UNK,
+ NPC_F_TU_ETHER_STAG,
+ NPC_F_TU_ETHER_STAG_UNK,
+ NPC_F_TU_ETHER_QINQ_CTAG,
+ NPC_F_TU_ETHER_QINQ_CTAG_UNK,
+ NPC_F_TU_ETHER_QINQ,
+ NPC_F_TU_ETHER_QINQ_UNK,
+ NPC_F_LAST /* has to be the last item */
+};
+
+enum npc_kpu_err_code {
+ NPC_EC_NOERR = 0, /* has to be zero */
+ NPC_EC_UNK,
+ NPC_EC_L2_K1,
+ NPC_EC_L2_K2,
+ NPC_EC_L2_K3,
+ NPC_EC_L2_K3_ETYPE_UNK,
+ NPC_EC_L2_MPLS_2MANY,
+ NPC_EC_L2_K4,
+ NPC_EC_IP_VER,
+ NPC_EC_IP6_VER,
+ NPC_EC_VXLAN,
+ NPC_EC_NVGRE,
+ NPC_EC_GRE,
+ NPC_EC_GRE_VER1,
+ NPC_EC_L4,
+ NPC_EC_LAST /* has to be the last item */
+};
+
+enum NPC_ERRLEV_E {
+ NPC_ERRLEV_RE = 0,
+ NPC_ERRLEV_LA = 1,
+ NPC_ERRLEV_LB = 2,
+ NPC_ERRLEV_LC = 3,
+ NPC_ERRLEV_LD = 4,
+ NPC_ERRLEV_LE = 5,
+ NPC_ERRLEV_LF = 6,
+ NPC_ERRLEV_LG = 7,
+ NPC_ERRLEV_LH = 8,
+ NPC_ERRLEV_R9 = 9,
+ NPC_ERRLEV_R10 = 10,
+ NPC_ERRLEV_R11 = 11,
+ NPC_ERRLEV_R12 = 12,
+ NPC_ERRLEV_R13 = 13,
+ NPC_ERRLEV_R14 = 14,
+ NPC_ERRLEV_NIX = 15,
+ NPC_ERRLEV_ENUM_LAST = 16,
+};
+
+static struct npc_kpu_profile_action ikpu_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 14, 16,
+ 0, 0, NPC_S_KPU1_ETHER, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 1, 0xff,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ETAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, 0x0000, 0xfc00,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, 0x0400, 0xfe00,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ETAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0010, 0x0010, 0x0000, 0xffff,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0010, 0x0010, 0x0000, 0xffff,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU1_PKI, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_SBTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_RARP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_PTP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_FCOE, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_MPLSU, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_MPLSM, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_NSH, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_SBTAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_QINQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_QINQ, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_ITAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_SBTAG, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, NPC_ETYPE_CTAG, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, NPC_ETYPE_ITAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ETAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU2_ITAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_CTAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_STAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_RARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_PTP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_FCOE, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSU, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_MPLSM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_NSH, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_PTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_FCOE, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_QINQ, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_RARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU3_ITAG, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+ {
+ NPC_S_KPU4_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU4_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_GRE, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_IP6, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, NPC_IPNH_MPLS, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_ARP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_RARP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_PTP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_FCOE, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_GRE << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_IP6 << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, NPC_IPNH_MPLS << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_MPLS_PL, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU5_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+ {
+ NPC_S_KPU6_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+ {
+ NPC_S_KPU7_IP6_EXT, 0xff, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_TCP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
+ NPC_VXLAN_I, NPC_VXLAN_I, 0x0000, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLAN, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_IP6, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_ETH, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_NSH, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P | NPC_VXLANGPE_I,
+ NPC_VXLANGPE_NP_MPLS, NPC_VXLANGPE_NP_MASK,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ NPC_VXLANGPE_P, NPC_VXLANGPE_P, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_VXLANGPE, 0xffff,
+ 0x0000, NPC_VXLANGPE_P, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_CRI_OPT, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ 0x0000, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM, NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GENEVE, 0xffff,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT,
+ NPC_GENEVE_F_OAM | NPC_GENEVE_F_CRI_OPT, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPC, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
+ 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, NPC_UDP_PORT_GTPU, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_SCTP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_IGMP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_ICMP6, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_ESP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_AH, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_TRANS_ETH_BR, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSU, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_MPLSM, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_NSH, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_CSUM, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_IP6, 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ NPC_GRE_F_ROUTE, 0x4fff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ 0x0000, 0x4fff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ 0x0000, 0x0003, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_VER_1, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, NPC_ETYPE_PPP, 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1,
+ 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ 0x2001, 0xef7f, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff,
+ 0x0001, 0x0003, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
+ NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ NPC_MPLS_S, NPC_MPLS_S, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, NPC_MPLS_S, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, NPC_MPLS_S, 0x0000, NPC_MPLS_S,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU9_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, NPC_MPLS_S, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS, 0xff, 0x0000, NPC_MPLS_S,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_4, NPC_IP_VER_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff, NPC_IP_VER_6, NPC_IP_VER_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_MPLS_PL, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_IP6, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_ETH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_NSH, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU10_TU_NSH, 0xff, NPC_NSH_NP_MPLS, NPC_NSH_NP_MASK,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_IP6, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_ARP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_CTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_SBTAG, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_IP6, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_ARP, 0xffff,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_CTAG, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_IP6, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ NPC_ETYPE_ARP, 0xffff, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, NPC_ETYPE_QINQ, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_ETHER, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_PPP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_MPLS_IN_NSH, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU11_TU_3RD_NSH, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_TCP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_UDP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_SCTP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ICMP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_IGMP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_ESP, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, NPC_IPNH_AH, 0x00ff,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_4, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_ARP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_TCP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_UDP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_SCTP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ICMP6 << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_ESP << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, NPC_IPNH_AH << 8, 0xff00,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
+ NPC_IP_VER_6, NPC_IP_VER_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU12_TU_IP6, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+ {
+ NPC_S_KPU13_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+ {
+ NPC_S_KPU14_TU_IP6_EXT, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
+ NPC_TCP_DATA_OFFSET_5, NPC_TCP_DATA_OFFSET_MASK, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_HTTPS, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, NPC_TCP_PORT_PPTP, 0xffff,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_TCP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_UDP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_SCTP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_IGMP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ICMP6, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_ESP, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU15_TU_AH, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_NA, 0X00, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+ {
+ NPC_S_KPU16_TCP_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTP_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU16_HTTPS_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU16_PPTP_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+ {
+ NPC_S_KPU16_UDP_DATA, 0xff, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ },
+};
+
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU2_CTAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
+ 0, 0, NPC_S_KPU2_SBTAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU2_QINQ, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
+ 0, 0, NPC_S_KPU2_ETAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ETAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
+ 0, 0, NPC_S_KPU2_ITAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU4_NSH, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETHER_NSH, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_8023, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_ARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_RARP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_PTP, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU5_FCOE, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU2_CTAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 20,
+ 0, 0, NPC_S_KPU2_SBTAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU2_QINQ, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_VLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 10, 24,
+ 0, 0, NPC_S_KPU2_ETAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ETAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
+ 0, 0, NPC_S_KPU2_ITAG, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_MPLS, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 2, 0, NPC_S_KPU4_NSH, 14, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_PKI_NSH, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LA, NPC_EC_L2_K1, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_CTAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_STAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_STAG_STAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_STAG, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_BTAG_ITAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_CTAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_QINQ, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_QINQ_QINQ_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 1, 0, NPC_S_KPU4_NSH, 4, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_QINQ, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_PTP, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_FCOE, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 1, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_MPLS, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 1, 0, NPC_S_KPU4_NSH, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, 2, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 16, 20, 24,
+ 0, 0, NPC_S_KPU3_ITAG, 12, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_BTAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_STAG, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 0,
+ 0, 0, NPC_S_KPU3_QINQ, 8, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_STAG, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU3_CTAG, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETAG_ITAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_ETYPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 18, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 26, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 22, 1,
+ NPC_LID_LB, NPC_LT_LB_ITAG, NPC_F_ITAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_PTP, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_FCOE, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_RARP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_PTP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_FCOE, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU4_MPLS, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU4_NSH, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU5_IP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU5_IP6, 18, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_ARP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU5_RARP, 18, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 26, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 26, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 26, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU5_IP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU5_IP6, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU5_ARP, 22, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU5_MPLS_PL, 4, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU5_MPLS_PL, 8, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU5_MPLS_PL, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
+ 0, 0, NPC_S_KPU5_MPLS, 12, 1,
+ NPC_LID_LC, NPC_LT_LC_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 7, 0, NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 7, 0, NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 6, 0, NPC_S_KPU11_TU_ETHER, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU5_NSH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 4, 0, NPC_S_KPU9_TU_MPLS, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NSH, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_K4, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
+ 2, 0, NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU8_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
+ 2, 0, NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU8_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_IP_IN_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_6TO4_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 3, 0, NPC_S_KPU9_TU_MPLS, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_VER, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_ARP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_RARP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_PTP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_FCOE, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 8, 10,
+ 2, 0, NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_TUN_IP6, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 3, 0, NPC_S_KPU9_TU_MPLS, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_MPLS_IN_IP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU6_IP6_EXT, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LB, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 6, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 5, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 5, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 3, 0, NPC_S_KPU9_TU_MPLS, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLAN_NOVNI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_VXLAN, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NSH, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_NSH, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NOVNI_MPLS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_VXLANGPE_NONP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM, 8, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ 8, 0x3f, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPC, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_G_PDU, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_GTP_GTPU_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 7, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_SCTP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_IGMP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ICMP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_ESP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_AH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 2, 0, NPC_S_KPU11_TU_ETHER, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_NVGRE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_NVGRE, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10,
+ 0, 0, NPC_S_KPU9_TU_MPLS_IN_GRE_VXLAN, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_MPLS, NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU9_TU_NSH, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE_NSH, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 3, 0, NPC_S_KPU12_TU_IP6, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_CSUM_KEY_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_HAS_ROUTE, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_UNK_PROTO, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_GRE, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU11_TU_PPP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU11_TU_PPP, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_ACK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU11_TU_PPP, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_HAS_SEQ_ACK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE, NPC_F_GRE_VER1_UNK_PROTO, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_GRE_VER1, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LD, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 0,
+ NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_2_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 0,
+ NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_3_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS, 12, 0,
+ NPC_LID_LD, NPC_LT_NA, NPC_F_MPLS_4_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 4, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_2_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS_PL, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_3_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 4, 0,
+ 0, 0, NPC_S_KPU10_TU_MPLS, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_TU_MPLS, NPC_F_MPLS_4_LABELS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 2, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 2, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 1, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU10_TU_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 1, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP6, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 8, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LB, NPC_EC_L2_MPLS_2MANY, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 4, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 1, 0, NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20,
+ 0, 0, NPC_S_KPU11_TU_ETHER, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU11_TU_3RD_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU11_TU_MPLS_IN_NSH, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 1, 0x3f,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LD, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 14, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 14, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 14, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_CTAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER,
+ NPC_F_TU_ETHER_STAG_CTAG_UNK, 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_STAG_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 22, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_CTAG, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER,
+ NPC_F_TU_ETHER_QINQ_CTAG_UNK, 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_IP6, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU12_TU_ARP, 18, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_QINQ_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_ETHER, NPC_F_TU_ETHER_UNK, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_PPP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_MPLS_IN_NSH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_TU_3RD_NSH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LE, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU15_TU_TCP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_UDP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_SCTP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ICMP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_IGMP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ESP, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_AH, 20, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_UNK_PROTO, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU15_TU_TCP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_UDP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_SCTP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ICMP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_IGMP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ESP, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_AH, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, NPC_F_IP_HAS_OPTIONS, 0, 0xf,
+ 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP,
+ NPC_F_IP_UNK_PROTO_HAS_OPTIONS, 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_IP_VER, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_ARP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 12, 0,
+ 2, 0, NPC_S_KPU15_TU_TCP, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_UDP, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_SCTP, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ICMP, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ICMP6, 40, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 2, 0, NPC_S_KPU15_TU_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0,
+ 0, 0, NPC_S_KPU13_TU_IP6_EXT, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, NPC_F_IP6_HAS_EXT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_IP6_VER, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_TU_IP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LF, NPC_EC_UNK, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LF, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_HTTP_DATA, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_HTTPS_DATA, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_PPTP_DATA, 20, 1,
+ NPC_LID_LD, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_TCP_DATA, 20, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_HTTP_DATA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_HTTPS_DATA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_PPTP_DATA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_PPTP_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_TCP_DATA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_TCP, NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ 12, 0xf0, 1, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 0, NPC_S_KPU16_UDP_DATA, 8, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_UDP, NPC_F_UDP_UNK_PORT, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_SCTP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ICMP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_IGMP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ICMP6, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_ESP, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LG, NPC_LT_LG_TU_AH, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_LG, NPC_EC_L4, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 0,
+ NPC_LID_LG, NPC_LT_NA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile_action kpu16_action_entries[] = {
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_TCP_DATA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_HTTP_DATA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_HTTPS_DATA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_PPTP_DATA, 0, 0, 0,
+ 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0,
+ 0, 1, NPC_S_NA, 0, 1,
+ NPC_LID_LH, NPC_LT_LH_UDP_DATA, 0, 0, 0,
+ 0, 0,
+ },
+};
+
+static struct npc_kpu_profile npc_kpu_profiles[] = {
+ {
+ ARRAY_SIZE(kpu1_cam_entries),
+ ARRAY_SIZE(kpu1_action_entries),
+ &kpu1_cam_entries[0],
+ &kpu1_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu2_cam_entries),
+ ARRAY_SIZE(kpu2_action_entries),
+ &kpu2_cam_entries[0],
+ &kpu2_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu3_cam_entries),
+ ARRAY_SIZE(kpu3_action_entries),
+ &kpu3_cam_entries[0],
+ &kpu3_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu4_cam_entries),
+ ARRAY_SIZE(kpu4_action_entries),
+ &kpu4_cam_entries[0],
+ &kpu4_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu5_cam_entries),
+ ARRAY_SIZE(kpu5_action_entries),
+ &kpu5_cam_entries[0],
+ &kpu5_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu6_cam_entries),
+ ARRAY_SIZE(kpu6_action_entries),
+ &kpu6_cam_entries[0],
+ &kpu6_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu7_cam_entries),
+ ARRAY_SIZE(kpu7_action_entries),
+ &kpu7_cam_entries[0],
+ &kpu7_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu8_cam_entries),
+ ARRAY_SIZE(kpu8_action_entries),
+ &kpu8_cam_entries[0],
+ &kpu8_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu9_cam_entries),
+ ARRAY_SIZE(kpu9_action_entries),
+ &kpu9_cam_entries[0],
+ &kpu9_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu10_cam_entries),
+ ARRAY_SIZE(kpu10_action_entries),
+ &kpu10_cam_entries[0],
+ &kpu10_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu11_cam_entries),
+ ARRAY_SIZE(kpu11_action_entries),
+ &kpu11_cam_entries[0],
+ &kpu11_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu12_cam_entries),
+ ARRAY_SIZE(kpu12_action_entries),
+ &kpu12_cam_entries[0],
+ &kpu12_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu13_cam_entries),
+ ARRAY_SIZE(kpu13_action_entries),
+ &kpu13_cam_entries[0],
+ &kpu13_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu14_cam_entries),
+ ARRAY_SIZE(kpu14_action_entries),
+ &kpu14_cam_entries[0],
+ &kpu14_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu15_cam_entries),
+ ARRAY_SIZE(kpu15_action_entries),
+ &kpu15_cam_entries[0],
+ &kpu15_action_entries[0],
+ },
+ {
+ ARRAY_SIZE(kpu16_cam_entries),
+ ARRAY_SIZE(kpu16_action_entries),
+ &kpu16_cam_entries[0],
+ &kpu16_action_entries[0],
+ },
+};
+
+#endif /* NPC_PROFILE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
new file mode 100644
index 000000000000..dc28fa2b9481
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -0,0 +1,1772 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "cgx.h"
+#include "rvu.h"
+#include "rvu_reg.h"
+
+#define DRV_NAME "octeontx2-af"
+#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
+#define DRV_VERSION "1.0"
+
+static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf);
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf);
+
+/* Supported devices */
+static const struct pci_device_id rvu_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, rvu_id_table);
+
+/* Poll a RVU block's register 'offset', for a 'zero'
+ * or 'nonzero' at bits specified by 'mask'
+ */
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(100);
+ void __iomem *reg;
+ u64 reg_val;
+
+ reg = rvu->afreg_base + ((block << 28) | offset);
+ while (time_before(jiffies, timeout)) {
+ reg_val = readq(reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ usleep_range(1, 5);
+ timeout--;
+ }
+ return -EBUSY;
+}
+
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
+{
+ int id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ id = find_first_zero_bit(rsrc->bmap, rsrc->max);
+ if (id >= rsrc->max)
+ return -ENOSPC;
+
+ __set_bit(id, rsrc->bmap);
+
+ return id;
+}
+
+int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+ int start;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+ if (start >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, start, nrsrc);
+ return start;
+}
+
+static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
+{
+ if (!rsrc->bmap)
+ return;
+ if (start >= rsrc->max)
+ return;
+
+ bitmap_clear(rsrc->bmap, start, nrsrc);
+}
+
+bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
+{
+ int start;
+
+ if (!rsrc->bmap)
+ return false;
+
+ start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
+ if (start >= rsrc->max)
+ return false;
+
+ return true;
+}
+
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return;
+
+ __clear_bit(id, rsrc->bmap);
+}
+
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
+{
+ int used;
+
+ if (!rsrc->bmap)
+ return 0;
+
+ used = bitmap_weight(rsrc->bmap, rsrc->max);
+ return (rsrc->max - used);
+}
+
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
+{
+ rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
+ sizeof(long), GFP_KERNEL);
+ if (!rsrc->bmap)
+ return -ENOMEM;
+ return 0;
+}
+
+/* Get block LF's HW index from a PF_FUNC's block slot number */
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
+{
+ u16 match = 0;
+ int lf;
+
+ spin_lock(&rvu->rsrc_lock);
+ for (lf = 0; lf < block->lf.max; lf++) {
+ if (block->fn_map[lf] == pcifunc) {
+ if (slot == match) {
+ spin_unlock(&rvu->rsrc_lock);
+ return lf;
+ }
+ match++;
+ }
+ }
+ spin_unlock(&rvu->rsrc_lock);
+ return -ENODEV;
+}
+
+/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
+ * Some silicon variants of OcteonTX2 supports
+ * multiple blocks of same type.
+ *
+ * @pcifunc has to be zero when no LF is yet attached.
+ */
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
+{
+ int devnum, blkaddr = -ENODEV;
+ u64 cfg, reg;
+ bool is_pf;
+
+ switch (blktype) {
+ case BLKTYPE_NPC:
+ blkaddr = BLKADDR_NPC;
+ goto exit;
+ case BLKTYPE_NPA:
+ blkaddr = BLKADDR_NPA;
+ goto exit;
+ case BLKTYPE_NIX:
+ /* For now assume NIX0 */
+ if (!pcifunc) {
+ blkaddr = BLKADDR_NIX0;
+ goto exit;
+ }
+ break;
+ case BLKTYPE_SSO:
+ blkaddr = BLKADDR_SSO;
+ goto exit;
+ case BLKTYPE_SSOW:
+ blkaddr = BLKADDR_SSOW;
+ goto exit;
+ case BLKTYPE_TIM:
+ blkaddr = BLKADDR_TIM;
+ goto exit;
+ case BLKTYPE_CPT:
+ /* For now assume CPT0 */
+ if (!pcifunc) {
+ blkaddr = BLKADDR_CPT0;
+ goto exit;
+ }
+ break;
+ }
+
+ /* Check if this is a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
+ if (blktype == BLKTYPE_NIX) {
+ reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_NIX0;
+ }
+
+ /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
+ if (blktype == BLKTYPE_CPT) {
+ reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_CPT0;
+ }
+
+exit:
+ if (is_block_implemented(rvu->hw, blkaddr))
+ return blkaddr;
+ return -ENODEV;
+}
+
+static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, u16 pcifunc,
+ u16 lf, bool attach)
+{
+ int devnum, num_lfs = 0;
+ bool is_pf;
+ u64 reg;
+
+ if (lf >= block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
+ __func__, lf, block->name, block->lf.max);
+ return;
+ }
+
+ /* Check if this is for a RVU PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK) {
+ is_pf = false;
+ devnum = rvu_get_hwvf(rvu, pcifunc);
+ } else {
+ is_pf = true;
+ devnum = rvu_get_pf(pcifunc);
+ }
+
+ block->fn_map[lf] = attach ? pcifunc : 0;
+
+ switch (block->type) {
+ case BLKTYPE_NPA:
+ pfvf->npalf = attach ? true : false;
+ num_lfs = pfvf->npalf;
+ break;
+ case BLKTYPE_NIX:
+ pfvf->nixlf = attach ? true : false;
+ num_lfs = pfvf->nixlf;
+ break;
+ case BLKTYPE_SSO:
+ attach ? pfvf->sso++ : pfvf->sso--;
+ num_lfs = pfvf->sso;
+ break;
+ case BLKTYPE_SSOW:
+ attach ? pfvf->ssow++ : pfvf->ssow--;
+ num_lfs = pfvf->ssow;
+ break;
+ case BLKTYPE_TIM:
+ attach ? pfvf->timlfs++ : pfvf->timlfs--;
+ num_lfs = pfvf->timlfs;
+ break;
+ case BLKTYPE_CPT:
+ attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
+ num_lfs = pfvf->cptlfs;
+ break;
+ }
+
+ reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
+ rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
+}
+
+inline int rvu_get_pf(u16 pcifunc)
+{
+ return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+}
+
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
+{
+ u64 cfg;
+
+ /* Get numVFs attached to this PF and first HWVF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ *numvfs = (cfg >> 12) & 0xFF;
+ *hwvf = cfg & 0xFFF;
+}
+
+static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
+{
+ int pf, func;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ func = pcifunc & RVU_PFVF_FUNC_MASK;
+
+ /* Get first HWVF attached to this PF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+
+ return ((cfg & 0xFFF) + func - 1);
+}
+
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
+{
+ /* Check if it is a PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ return &rvu->pf[rvu_get_pf(pcifunc)];
+}
+
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
+{
+ struct rvu_block *block;
+
+ if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
+ return false;
+
+ block = &hw->block[blkaddr];
+ return block->implemented;
+}
+
+static void rvu_check_block_implemented(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* For each block check if 'implemented' bit is set */
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
+ if (cfg & BIT_ULL(11))
+ block->implemented = true;
+ }
+}
+
+int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
+{
+ int err;
+
+ if (!block->implemented)
+ return 0;
+
+ rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
+ err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
+ true);
+ return err;
+}
+
+static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
+{
+ struct rvu_block *block = &rvu->hw->block[blkaddr];
+
+ if (!block->implemented)
+ return;
+
+ rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
+ rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
+}
+
+static void rvu_reset_all_blocks(struct rvu *rvu)
+{
+ /* Do a HW reset of all RVU blocks */
+ rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST);
+}
+
+static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
+{
+ struct rvu_pfvf *pfvf;
+ u64 cfg;
+ int lf;
+
+ for (lf = 0; lf < block->lf.max; lf++) {
+ cfg = rvu_read64(rvu, block->addr,
+ block->lfcfg_reg | (lf << block->lfshift));
+ if (!(cfg & BIT_ULL(63)))
+ continue;
+
+ /* Set this resource as being used */
+ __set_bit(lf, block->lf.bmap);
+
+ /* Get, to whom this LF is attached */
+ pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ (cfg >> 8) & 0xFFFF, lf, true);
+
+ /* Set start MSIX vector for this LF within this PF/VF */
+ rvu_set_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
+{
+ int min_vecs;
+
+ if (!vf)
+ goto check_pf;
+
+ if (!nvecs) {
+ dev_warn(rvu->dev,
+ "PF%d:VF%d is configured with zero msix vectors, %d\n",
+ pf, vf - 1, nvecs);
+ }
+ return;
+
+check_pf:
+ if (pf == 0)
+ min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
+ else
+ min_vecs = RVU_PF_INT_VEC_CNT;
+
+ if (!(nvecs < min_vecs))
+ return;
+ dev_warn(rvu->dev,
+ "PF%d is configured with too few vectors, %d, min is %d\n",
+ pf, nvecs, min_vecs);
+}
+
+static int rvu_setup_msix_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf, err;
+ int nvecs, offset, max_msix;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, phy_addr;
+ dma_addr_t iova;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ /* If PF is not enabled, nothing to do */
+ if (!((cfg >> 20) & 0x01))
+ continue;
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+ pfvf = &rvu->pf[pf];
+ /* Get num of MSIX vectors attached to this PF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
+ pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
+ rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
+
+ /* Alloc msix bitmap for this PF */
+ err = rvu_alloc_bitmap(&pfvf->msix);
+ if (err)
+ return err;
+
+ /* Allocate memory for MSIX vector to RVU block LF mapping */
+ pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!pfvf->msix_lfmap)
+ return -ENOMEM;
+
+ /* For PF0 (AF) firmware will set msix vector offsets for
+ * AF, block AF and PF0_INT vectors, so jump to VFs.
+ */
+ if (!pf)
+ goto setup_vfmsix;
+
+ /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
+ * These are allocated on driver init and never freed,
+ * so no need to set 'msix_lfmap' for these.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
+ nvecs = (cfg >> 12) & 0xFF;
+ cfg &= ~0x7FFULL;
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
+setup_vfmsix:
+ /* Alloc msix bitmap for VFs */
+ for (vf = 0; vf < numvfs; vf++) {
+ pfvf = &rvu->hwvf[hwvf + vf];
+ /* Get num of MSIX vectors attached to this VF */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_MSIX_CFG(pf));
+ pfvf->msix.max = (cfg & 0xFFF) + 1;
+ rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
+
+ /* Alloc msix bitmap for this VF */
+ err = rvu_alloc_bitmap(&pfvf->msix);
+ if (err)
+ return err;
+
+ pfvf->msix_lfmap =
+ devm_kcalloc(rvu->dev, pfvf->msix.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!pfvf->msix_lfmap)
+ return -ENOMEM;
+
+ /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
+ * These are allocated on driver init and never freed,
+ * so no need to set 'msix_lfmap' for these.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
+ nvecs = (cfg >> 12) & 0xFF;
+ cfg &= ~0x7FFULL;
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
+ cfg | offset);
+ }
+ }
+
+ /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
+ * create a IOMMU mapping for the physcial address configured by
+ * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
+ */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ max_msix = cfg & 0xFFFFF;
+ phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+ iova = dma_map_resource(rvu->dev, phy_addr,
+ max_msix * PCI_MSIX_ENTRY_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+
+ if (dma_mapping_error(rvu->dev, iova))
+ return -ENOMEM;
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
+ rvu->msix_base_iova = iova;
+
+ return 0;
+}
+
+static void rvu_free_hw_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int id, max_msix;
+ u64 cfg;
+
+ rvu_npa_freemem(rvu);
+ rvu_npc_freemem(rvu);
+ rvu_nix_freemem(rvu);
+
+ /* Free block LF bitmaps */
+ for (id = 0; id < BLK_COUNT; id++) {
+ block = &hw->block[id];
+ kfree(block->lf.bmap);
+ }
+
+ /* Free MSIX bitmaps */
+ for (id = 0; id < hw->total_pfs; id++) {
+ pfvf = &rvu->pf[id];
+ kfree(pfvf->msix.bmap);
+ }
+
+ for (id = 0; id < hw->total_vfs; id++) {
+ pfvf = &rvu->hwvf[id];
+ kfree(pfvf->msix.bmap);
+ }
+
+ /* Unmap MSIX vector base IOVA mapping */
+ if (!rvu->msix_base_iova)
+ return;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ max_msix = cfg & 0xFFFFF;
+ dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
+ max_msix * PCI_MSIX_ENTRY_SIZE,
+ DMA_BIDIRECTIONAL, 0);
+}
+
+static int rvu_setup_hw_resources(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid, err;
+ u64 cfg;
+
+ /* Get HW supported max RVU PF & VF count */
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
+ hw->total_pfs = (cfg >> 32) & 0xFF;
+ hw->total_vfs = (cfg >> 20) & 0xFFF;
+ hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
+
+ /* Init NPA LF's bitmap */
+ block = &hw->block[BLKADDR_NPA];
+ if (!block->implemented)
+ goto nix;
+ cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
+ block->lf.max = (cfg >> 16) & 0xFFF;
+ block->addr = BLKADDR_NPA;
+ block->type = BLKTYPE_NPA;
+ block->lfshift = 8;
+ block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
+ block->lfcfg_reg = NPA_PRIV_LFX_CFG;
+ block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NPA_AF_LF_RST;
+ sprintf(block->name, "NPA");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+nix:
+ /* Init NIX LF's bitmap */
+ block = &hw->block[BLKADDR_NIX0];
+ if (!block->implemented)
+ goto sso;
+ cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
+ block->lf.max = cfg & 0xFFF;
+ block->addr = BLKADDR_NIX0;
+ block->type = BLKTYPE_NIX;
+ block->lfshift = 8;
+ block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
+ block->lfcfg_reg = NIX_PRIV_LFX_CFG;
+ block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NIX_AF_LF_RST;
+ sprintf(block->name, "NIX");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+sso:
+ /* Init SSO group's bitmap */
+ block = &hw->block[BLKADDR_SSO];
+ if (!block->implemented)
+ goto ssow;
+ cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
+ block->lf.max = cfg & 0xFFFF;
+ block->addr = BLKADDR_SSO;
+ block->type = BLKTYPE_SSO;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
+ block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
+ block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
+ block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+ sprintf(block->name, "SSO GROUP");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+ssow:
+ /* Init SSO workslot's bitmap */
+ block = &hw->block[BLKADDR_SSOW];
+ if (!block->implemented)
+ goto tim;
+ block->lf.max = (cfg >> 56) & 0xFF;
+ block->addr = BLKADDR_SSOW;
+ block->type = BLKTYPE_SSOW;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
+ block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
+ block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
+ block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+ sprintf(block->name, "SSOWS");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+tim:
+ /* Init TIM LF's bitmap */
+ block = &hw->block[BLKADDR_TIM];
+ if (!block->implemented)
+ goto cpt;
+ cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
+ block->lf.max = cfg & 0xFFFF;
+ block->addr = BLKADDR_TIM;
+ block->type = BLKTYPE_TIM;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
+ block->lfcfg_reg = TIM_PRIV_LFX_CFG;
+ block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = TIM_AF_LF_RST;
+ sprintf(block->name, "TIM");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+cpt:
+ /* Init CPT LF's bitmap */
+ block = &hw->block[BLKADDR_CPT0];
+ if (!block->implemented)
+ goto init;
+ cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
+ block->lf.max = cfg & 0xFF;
+ block->addr = BLKADDR_CPT0;
+ block->type = BLKTYPE_CPT;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
+ block->lfcfg_reg = CPT_PRIV_LFX_CFG;
+ block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = CPT_AF_LF_RST;
+ sprintf(block->name, "CPT");
+ err = rvu_alloc_bitmap(&block->lf);
+ if (err)
+ return err;
+
+init:
+ /* Allocate memory for PFVF data */
+ rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_pfvf), GFP_KERNEL);
+ if (!rvu->pf)
+ return -ENOMEM;
+
+ rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
+ sizeof(struct rvu_pfvf), GFP_KERNEL);
+ if (!rvu->hwvf)
+ return -ENOMEM;
+
+ spin_lock_init(&rvu->rsrc_lock);
+
+ err = rvu_setup_msix_resources(rvu);
+ if (err)
+ return err;
+
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ if (!block->lf.bmap)
+ continue;
+
+ /* Allocate memory for block LF/slot to pcifunc mapping info */
+ block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!block->fn_map)
+ return -ENOMEM;
+
+ /* Scan all blocks to check if low level firmware has
+ * already provisioned any of the resources to a PF/VF.
+ */
+ rvu_scan_block(rvu, block);
+ }
+
+ err = rvu_npc_init(rvu);
+ if (err)
+ return err;
+
+ err = rvu_npa_init(rvu);
+ if (err)
+ return err;
+
+ err = rvu_nix_init(rvu);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* NPA and NIX admin queue APIs */
+void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
+{
+ if (!aq)
+ return;
+
+ qmem_free(rvu->dev, aq->inst);
+ qmem_free(rvu->dev, aq->res);
+ devm_kfree(rvu->dev, aq);
+}
+
+int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+ int qsize, int inst_size, int res_size)
+{
+ struct admin_queue *aq;
+ int err;
+
+ *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
+ if (!*ad_queue)
+ return -ENOMEM;
+ aq = *ad_queue;
+
+ /* Alloc memory for instructions i.e AQ */
+ err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
+ if (err) {
+ devm_kfree(rvu->dev, aq);
+ return err;
+ }
+
+ /* Alloc memory for results */
+ err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
+ if (err) {
+ rvu_aq_free(rvu, aq);
+ return err;
+ }
+
+ spin_lock_init(&aq->lock);
+ return 0;
+}
+
+static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req,
+ struct ready_msg_rsp *rsp)
+{
+ return 0;
+}
+
+/* Get current count of a RVU block's LF/slots
+ * provisioned to a given RVU func.
+ */
+static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
+{
+ switch (blktype) {
+ case BLKTYPE_NPA:
+ return pfvf->npalf ? 1 : 0;
+ case BLKTYPE_NIX:
+ return pfvf->nixlf ? 1 : 0;
+ case BLKTYPE_SSO:
+ return pfvf->sso;
+ case BLKTYPE_SSOW:
+ return pfvf->ssow;
+ case BLKTYPE_TIM:
+ return pfvf->timlfs;
+ case BLKTYPE_CPT:
+ return pfvf->cptlfs;
+ }
+ return 0;
+}
+
+static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
+ int pcifunc, int slot)
+{
+ u64 val;
+
+ val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
+ rvu_write64(rvu, block->addr, block->lookup_reg, val);
+ /* Wait for the lookup to finish */
+ /* TODO: put some timeout here */
+ while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
+ ;
+
+ val = rvu_read64(rvu, block->addr, block->lookup_reg);
+
+ /* Check LF valid bit */
+ if (!(val & (1ULL << 12)))
+ return -1;
+
+ return (val & 0xFFF);
+}
+
+static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int slot, lf, num_lfs;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ if (!num_lfs)
+ return;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
+ if (lf < 0) /* This should never happen */
+ continue;
+
+ /* Disable the LF */
+ rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift), 0x00ULL);
+
+ /* Update SW maintained mapping info as well */
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ pcifunc, lf, false);
+
+ /* Free the resource */
+ rvu_free_rsrc(&block->lf, lf);
+
+ /* Clear MSIX vector offset for this LF */
+ rvu_clear_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
+ u16 pcifunc)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ bool detach_all = true;
+ struct rvu_block *block;
+ int blkid;
+
+ spin_lock(&rvu->rsrc_lock);
+
+ /* Check for partial resource detach */
+ if (detach && detach->partial)
+ detach_all = false;
+
+ /* Check for RVU block's LFs attached to this func,
+ * if so, detach them.
+ */
+ for (blkid = 0; blkid < BLK_COUNT; blkid++) {
+ block = &hw->block[blkid];
+ if (!block->lf.bmap)
+ continue;
+ if (!detach_all && detach) {
+ if (blkid == BLKADDR_NPA && !detach->npalf)
+ continue;
+ else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
+ continue;
+ else if ((blkid == BLKADDR_SSO) && !detach->sso)
+ continue;
+ else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
+ continue;
+ else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
+ continue;
+ else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
+ continue;
+ }
+ rvu_detach_block(rvu, pcifunc, block->type);
+ }
+
+ spin_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu,
+ struct rsrc_detach *detach,
+ struct msg_rsp *rsp)
+{
+ return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
+}
+
+static void rvu_attach_block(struct rvu *rvu, int pcifunc,
+ int blktype, int num_lfs)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int slot, lf;
+ int blkaddr;
+ u64 cfg;
+
+ if (!num_lfs)
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ /* Allocate the resource */
+ lf = rvu_alloc_rsrc(&block->lf);
+ if (lf < 0)
+ return;
+
+ cfg = (1ULL << 63) | (pcifunc << 8) | slot;
+ rvu_write64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift), cfg);
+ rvu_update_rsrc_map(rvu, pfvf, block,
+ pcifunc, lf, true);
+
+ /* Set start MSIX vector for this LF within this PF/VF */
+ rvu_set_msix_offset(rvu, pfvf, block, lf);
+ }
+}
+
+static int rvu_check_rsrc_availability(struct rvu *rvu,
+ struct rsrc_attach *req, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int free_lfs, mappedlfs;
+
+ /* Only one NPA LF can be attached */
+ if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
+ block = &hw->block[BLKADDR_NPA];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (!free_lfs)
+ goto fail;
+ } else if (req->npalf) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid req, already has NPA\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
+ /* Only one NIX LF can be attached */
+ if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
+ block = &hw->block[BLKADDR_NIX0];
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (!free_lfs)
+ goto fail;
+ } else if (req->nixlf) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid req, already has NIX\n",
+ pcifunc);
+ return -EINVAL;
+ }
+
+ if (req->sso) {
+ block = &hw->block[BLKADDR_SSO];
+ /* Is request within limits ? */
+ if (req->sso > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSO req, %d > max %d\n",
+ pcifunc, req->sso, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ /* Check if additional resources are available */
+ if (req->sso > mappedlfs &&
+ ((req->sso - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->ssow) {
+ block = &hw->block[BLKADDR_SSOW];
+ if (req->ssow > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid SSOW req, %d > max %d\n",
+ pcifunc, req->sso, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->ssow > mappedlfs &&
+ ((req->ssow - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->timlfs) {
+ block = &hw->block[BLKADDR_TIM];
+ if (req->timlfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
+ pcifunc, req->timlfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->timlfs > mappedlfs &&
+ ((req->timlfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ if (req->cptlfs) {
+ block = &hw->block[BLKADDR_CPT0];
+ if (req->cptlfs > block->lf.max) {
+ dev_err(&rvu->pdev->dev,
+ "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
+ pcifunc, req->cptlfs, block->lf.max);
+ return -EINVAL;
+ }
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ free_lfs = rvu_rsrc_free_count(&block->lf);
+ if (req->cptlfs > mappedlfs &&
+ ((req->cptlfs - mappedlfs) > free_lfs))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_info(rvu->dev, "Request for %s failed\n", block->name);
+ return -ENOSPC;
+}
+
+static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
+ struct rsrc_attach *attach,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = attach->hdr.pcifunc;
+ int err;
+
+ /* If first request, detach all existing attached resources */
+ if (!attach->modify)
+ rvu_detach_rsrcs(rvu, NULL, pcifunc);
+
+ spin_lock(&rvu->rsrc_lock);
+
+ /* Check if the request can be accommodated */
+ err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
+ if (err)
+ goto exit;
+
+ /* Now attach the requested resources */
+ if (attach->npalf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
+
+ if (attach->nixlf)
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
+
+ if (attach->sso) {
+ /* RVU func doesn't know which exact LF or slot is attached
+ * to it, it always sees as slot 0,1,2. So for a 'modify'
+ * request, simply detach all existing attached LFs/slots
+ * and attach a fresh.
+ */
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
+ }
+
+ if (attach->ssow) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
+ }
+
+ if (attach->timlfs) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
+ }
+
+ if (attach->cptlfs) {
+ if (attach->modify)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
+ }
+
+exit:
+ spin_unlock(&rvu->rsrc_lock);
+ return err;
+}
+
+static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int lf)
+{
+ u16 vec;
+
+ if (lf < 0)
+ return MSIX_VECTOR_INVALID;
+
+ for (vec = 0; vec < pfvf->msix.max; vec++) {
+ if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
+ return vec;
+ }
+ return MSIX_VECTOR_INVALID;
+}
+
+static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf)
+{
+ u16 nvecs, vec, offset;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift));
+ nvecs = (cfg >> 12) & 0xFF;
+
+ /* Check and alloc MSIX vectors, must be contiguous */
+ if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
+ return;
+
+ offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
+
+ /* Config MSIX offset in LF */
+ rvu_write64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
+
+ /* Update the bitmap as well */
+ for (vec = 0; vec < nvecs; vec++)
+ pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
+}
+
+static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct rvu_block *block, int lf)
+{
+ u16 nvecs, vec, offset;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift));
+ nvecs = (cfg >> 12) & 0xFF;
+
+ /* Clear MSIX offset in LF */
+ rvu_write64(rvu, block->addr, block->msixcfg_reg |
+ (lf << block->lfshift), cfg & ~0x7FFULL);
+
+ offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
+
+ /* Update the mapping */
+ for (vec = 0; vec < nvecs; vec++)
+ pfvf->msix_lfmap[offset + vec] = 0;
+
+ /* Free the same in MSIX bitmap */
+ rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
+}
+
+static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
+ struct msix_offset_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int lf, slot;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->msix.bmap)
+ return 0;
+
+ /* Set MSIX offsets for each block's LFs attached to this PF/VF */
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
+ rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
+
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
+ rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
+
+ rsp->sso = pfvf->sso;
+ for (slot = 0; slot < rsp->sso; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
+ rsp->sso_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
+ }
+
+ rsp->ssow = pfvf->ssow;
+ for (slot = 0; slot < rsp->ssow; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
+ rsp->ssow_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
+ }
+
+ rsp->timlfs = pfvf->timlfs;
+ for (slot = 0; slot < rsp->timlfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
+ rsp->timlf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
+ }
+
+ rsp->cptlfs = pfvf->cptlfs;
+ for (slot = 0; slot < rsp->cptlfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
+ rsp->cptlf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
+ }
+ return 0;
+}
+
+static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
+ struct mbox_msghdr *req)
+{
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG)
+ goto bad_message;
+
+ switch (req->id) {
+#define M(_name, _id, _req_type, _rsp_type) \
+ case _id: { \
+ struct _rsp_type *rsp; \
+ int err; \
+ \
+ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
+ &rvu->mbox, devid, \
+ sizeof(struct _rsp_type)); \
+ if (rsp) { \
+ rsp->hdr.id = _id; \
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
+ rsp->hdr.pcifunc = req->pcifunc; \
+ rsp->hdr.rc = 0; \
+ } \
+ \
+ err = rvu_mbox_handler_ ## _name(rvu, \
+ (struct _req_type *)req, \
+ rsp); \
+ if (rsp && err) \
+ rsp->hdr.rc = err; \
+ \
+ return rsp ? err : -ENOMEM; \
+ }
+MBOX_MESSAGES
+#undef M
+ break;
+bad_message:
+ default:
+ otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc,
+ req->id);
+ return -ENODEV;
+ }
+}
+
+static void rvu_mbox_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = mwork->rvu;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, id, err;
+ u16 pf;
+
+ mbox = &rvu->mbox;
+ pf = mwork - rvu->mbox_wrk;
+ mdev = &mbox->dev[pf];
+
+ /* Process received mbox messages */
+ req_hdr = mdev->mbase + mbox->rx_start;
+ if (req_hdr->num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < req_hdr->num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ /* Set which PF sent this message based on mbox IRQ */
+ msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT);
+ err = rvu_process_mbox_msg(rvu, pf, msg);
+ if (!err) {
+ offset = mbox->rx_start + msg->next_msgoff;
+ continue;
+ }
+
+ if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
+ dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
+ err, otx2_mbox_id2name(msg->id), msg->id, pf,
+ (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ else
+ dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
+ err, otx2_mbox_id2name(msg->id), msg->id, pf);
+ }
+
+ /* Send mbox responses to PF */
+ otx2_mbox_msg_send(mbox, pf);
+}
+
+static void rvu_mbox_up_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = mwork->rvu;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ int offset, id;
+ u16 pf;
+
+ mbox = &rvu->mbox_up;
+ pf = mwork - rvu->mbox_wrk_up;
+ mdev = &mbox->dev[pf];
+
+ rsp_hdr = mdev->mbase + mbox->rx_start;
+ if (rsp_hdr->num_msgs == 0) {
+ dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
+ return;
+ }
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < rsp_hdr->num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(rvu->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(rvu->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ break;
+ default:
+ if (msg->rc)
+ dev_err(rvu->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+end:
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, 0);
+}
+
+static int rvu_mbox_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ void __iomem *hwbase = NULL;
+ struct rvu_work *mwork;
+ u64 bar4_addr;
+ int err, pf;
+
+ rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ hw->total_pfs);
+ if (!rvu->mbox_wq)
+ return -ENOMEM;
+
+ rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!rvu->mbox_wrk) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!rvu->mbox_wrk_up) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* Map mbox region shared with PFs */
+ bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * RVU devices, shouldn't be mapped as device memory to allow
+ * unaligned accesses.
+ */
+ hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs);
+ if (!hwbase) {
+ dev_err(rvu->dev, "Unable to map mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base,
+ MBOX_DIR_AFPF, hw->total_pfs);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base,
+ MBOX_DIR_AFPF_UP, hw->total_pfs);
+ if (err)
+ goto exit;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ mwork = &rvu->mbox_wrk[pf];
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, rvu_mbox_handler);
+ }
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ mwork = &rvu->mbox_wrk_up[pf];
+ mwork->rvu = rvu;
+ INIT_WORK(&mwork->work, rvu_mbox_up_handler);
+ }
+
+ return 0;
+exit:
+ if (hwbase)
+ iounmap((void __iomem *)hwbase);
+ destroy_workqueue(rvu->mbox_wq);
+ return err;
+}
+
+static void rvu_mbox_destroy(struct rvu *rvu)
+{
+ if (rvu->mbox_wq) {
+ flush_workqueue(rvu->mbox_wq);
+ destroy_workqueue(rvu->mbox_wq);
+ rvu->mbox_wq = NULL;
+ }
+
+ if (rvu->mbox.hwbase)
+ iounmap((void __iomem *)rvu->mbox.hwbase);
+
+ otx2_mbox_destroy(&rvu->mbox);
+ otx2_mbox_destroy(&rvu->mbox_up);
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
+ /* Clear interrupts */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
+
+ /* Sync with mbox memory region */
+ smp_wmb();
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ mbox = &rvu->mbox;
+ mdev = &mbox->dev[pf];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(rvu->mbox_wq,
+ &rvu->mbox_wrk[pf].work);
+ mbox = &rvu->mbox_up;
+ mdev = &mbox->dev[pf];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(rvu->mbox_wq,
+ &rvu->mbox_wrk_up[pf].work);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_enable_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ /* Clear spurious irqs, if any */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
+
+ /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+}
+
+static void rvu_unregister_interrupts(struct rvu *rvu)
+{
+ int irq;
+
+ /* Disable the Mbox interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ for (irq = 0; irq < rvu->num_vec; irq++) {
+ if (rvu->irq_allocated[irq])
+ free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
+ }
+
+ pci_free_irq_vectors(rvu->pdev);
+ rvu->num_vec = 0;
+}
+
+static int rvu_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ rvu->num_vec = pci_msix_vec_count(rvu->pdev);
+
+ rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
+ NAME_SIZE, GFP_KERNEL);
+ if (!rvu->irq_name)
+ return -ENOMEM;
+
+ rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
+ sizeof(bool), GFP_KERNEL);
+ if (!rvu->irq_allocated)
+ return -ENOMEM;
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
+ rvu->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(rvu->dev,
+ "RVUAF: Request for %d msix vectors failed, ret %d\n",
+ rvu->num_vec, ret);
+ return ret;
+ }
+
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox irq\n");
+ goto fail;
+ }
+
+ rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+
+ /* Enable mailbox interrupts from all PFs */
+ rvu_enable_mbox_intr(rvu);
+
+ return 0;
+
+fail:
+ pci_free_irq_vectors(rvu->pdev);
+ return ret;
+}
+
+static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct rvu *rvu;
+ int err;
+
+ rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
+ if (!rvu)
+ return -ENOMEM;
+
+ rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
+ if (!rvu->hw) {
+ devm_kfree(dev, rvu);
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, rvu);
+ rvu->pdev = pdev;
+ rvu->dev = &pdev->dev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto err_freemem;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set DMA mask\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to set consistent DMA mask\n");
+ goto err_release_regions;
+ }
+
+ /* Map Admin function CSRs */
+ rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
+ rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+ if (!rvu->afreg_base || !rvu->pfreg_base) {
+ dev_err(dev, "Unable to map admin function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Check which blocks the HW supports */
+ rvu_check_block_implemented(rvu);
+
+ rvu_reset_all_blocks(rvu);
+
+ err = rvu_setup_hw_resources(rvu);
+ if (err)
+ goto err_release_regions;
+
+ err = rvu_mbox_init(rvu);
+ if (err)
+ goto err_hwsetup;
+
+ err = rvu_cgx_probe(rvu);
+ if (err)
+ goto err_mbox;
+
+ err = rvu_register_interrupts(rvu);
+ if (err)
+ goto err_cgx;
+
+ return 0;
+err_cgx:
+ rvu_cgx_wq_destroy(rvu);
+err_mbox:
+ rvu_mbox_destroy(rvu);
+err_hwsetup:
+ rvu_reset_all_blocks(rvu);
+ rvu_free_hw_resources(rvu);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_freemem:
+ pci_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, rvu->hw);
+ devm_kfree(dev, rvu);
+ return err;
+}
+
+static void rvu_remove(struct pci_dev *pdev)
+{
+ struct rvu *rvu = pci_get_drvdata(pdev);
+
+ rvu_unregister_interrupts(rvu);
+ rvu_cgx_wq_destroy(rvu);
+ rvu_mbox_destroy(rvu);
+ rvu_reset_all_blocks(rvu);
+ rvu_free_hw_resources(rvu);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ devm_kfree(&pdev->dev, rvu->hw);
+ devm_kfree(&pdev->dev, rvu);
+}
+
+static struct pci_driver rvu_driver = {
+ .name = DRV_NAME,
+ .id_table = rvu_id_table,
+ .probe = rvu_probe,
+ .remove = rvu_remove,
+};
+
+static int __init rvu_init_module(void)
+{
+ int err;
+
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ err = pci_register_driver(&cgx_driver);
+ if (err < 0)
+ return err;
+
+ err = pci_register_driver(&rvu_driver);
+ if (err < 0)
+ pci_unregister_driver(&cgx_driver);
+
+ return err;
+}
+
+static void __exit rvu_cleanup_module(void)
+{
+ pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&cgx_driver);
+}
+
+module_init(rvu_init_module);
+module_exit(rvu_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
new file mode 100644
index 000000000000..2c0580cd2807
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_H
+#define RVU_H
+
+#include "rvu_struct.h"
+#include "common.h"
+#include "mbox.h"
+
+/* PCI device IDs */
+#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+
+/* PCI BAR nos */
+#define PCI_AF_REG_BAR_NUM 0
+#define PCI_PF_REG_BAR_NUM 2
+#define PCI_MBOX_BAR_NUM 4
+
+#define NAME_SIZE 32
+
+/* PF_FUNC */
+#define RVU_PFVF_PF_SHIFT 10
+#define RVU_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+
+struct rvu_work {
+ struct work_struct work;
+ struct rvu *rvu;
+};
+
+struct rsrc_bmap {
+ unsigned long *bmap; /* Pointer to resource bitmap */
+ u16 max; /* Max resource id or count */
+};
+
+struct rvu_block {
+ struct rsrc_bmap lf;
+ struct admin_queue *aq; /* NIX/NPA AQ */
+ u16 *fn_map; /* LF to pcifunc mapping */
+ bool multislot;
+ bool implemented;
+ u8 addr; /* RVU_BLOCK_ADDR_E */
+ u8 type; /* RVU_BLOCK_TYPE_E */
+ u8 lfshift;
+ u64 lookup_reg;
+ u64 pf_lfcnt_reg;
+ u64 vf_lfcnt_reg;
+ u64 lfcfg_reg;
+ u64 msixcfg_reg;
+ u64 lfreset_reg;
+ unsigned char name[NAME_SIZE];
+};
+
+struct nix_mcast {
+ struct qmem *mce_ctx;
+ struct qmem *mcast_buf;
+ int replay_pkind;
+ int next_free_mce;
+ spinlock_t mce_lock; /* Serialize MCE updates */
+};
+
+struct nix_mce_list {
+ struct hlist_head head;
+ int count;
+ int max;
+};
+
+struct npc_mcam {
+ spinlock_t lock; /* MCAM entries and counters update lock */
+ u8 keysize; /* MCAM keysize 112/224/448 bits */
+ u8 banks; /* Number of MCAM banks */
+ u8 banks_per_entry;/* Number of keywords in key */
+ u16 banksize; /* Number of MCAM entries in each bank */
+ u16 total_entries; /* Total number of MCAM entries */
+ u16 entries; /* Total minus reserved for NIX LFs */
+ u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */
+ u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */
+};
+
+/* Structure for per RVU func info ie PF/VF */
+struct rvu_pfvf {
+ bool npalf; /* Only one NPALF per RVU_FUNC */
+ bool nixlf; /* Only one NIXLF per RVU_FUNC */
+ u16 sso;
+ u16 ssow;
+ u16 cptlfs;
+ u16 timlfs;
+ u8 cgx_lmac;
+
+ /* Block LF's MSIX vector info */
+ struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
+#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
+ u16 *msix_lfmap; /* Vector to block LF mapping */
+
+ /* NPA contexts */
+ struct qmem *aura_ctx;
+ struct qmem *pool_ctx;
+ struct qmem *npa_qints_ctx;
+ unsigned long *aura_bmap;
+ unsigned long *pool_bmap;
+
+ /* NIX contexts */
+ struct qmem *rq_ctx;
+ struct qmem *sq_ctx;
+ struct qmem *cq_ctx;
+ struct qmem *rss_ctx;
+ struct qmem *cq_ints_ctx;
+ struct qmem *nix_qints_ctx;
+ unsigned long *sq_bmap;
+ unsigned long *rq_bmap;
+ unsigned long *cq_bmap;
+
+ u16 rx_chan_base;
+ u16 tx_chan_base;
+ u8 rx_chan_cnt; /* total number of RX channels */
+ u8 tx_chan_cnt; /* total number of TX channels */
+
+ u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+
+ /* Broadcast pkt replication info */
+ u16 bcast_mce_idx;
+ struct nix_mce_list bcast_mce_list;
+};
+
+struct nix_txsch {
+ struct rsrc_bmap schq;
+ u8 lvl;
+ u16 *pfvf_map;
+};
+
+struct npc_pkind {
+ struct rsrc_bmap rsrc;
+ u32 *pfchan_map;
+};
+
+struct nix_hw {
+ struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
+ struct nix_mcast mcast;
+};
+
+struct rvu_hwinfo {
+ u8 total_pfs; /* MAX RVU PFs HW supports */
+ u16 total_vfs; /* Max RVU VFs HW supports */
+ u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
+ u8 cgx;
+ u8 lmac_per_cgx;
+ u8 cgx_links;
+ u8 lbk_links;
+ u8 sdp_links;
+ u8 npc_kpus; /* No of parser units */
+
+
+ struct rvu_block block[BLK_COUNT]; /* Block info */
+ struct nix_hw *nix0;
+ struct npc_pkind pkind;
+ struct npc_mcam mcam;
+};
+
+struct rvu {
+ void __iomem *afreg_base;
+ void __iomem *pfreg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct rvu_hwinfo *hw;
+ struct rvu_pfvf *pf;
+ struct rvu_pfvf *hwvf;
+ spinlock_t rsrc_lock; /* Serialize resource alloc/free */
+
+ /* Mbox */
+ struct otx2_mbox mbox;
+ struct rvu_work *mbox_wrk;
+ struct otx2_mbox mbox_up;
+ struct rvu_work *mbox_wrk_up;
+ struct workqueue_struct *mbox_wq;
+
+ /* MSI-X */
+ u16 num_vec;
+ char *irq_name;
+ bool *irq_allocated;
+ dma_addr_t msix_base_iova;
+
+ /* CGX */
+#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
+ u8 cgx_mapped_pfs;
+ u8 cgx_cnt; /* available cgx ports */
+ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
+ u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
+ * every cgx lmac port
+ */
+ unsigned long pf_notify_bmap; /* Flags for PF notification */
+ void **cgx_idmap; /* cgx id to cgx data map table */
+ struct work_struct cgx_evh_work;
+ struct workqueue_struct *cgx_evh_wq;
+ spinlock_t cgx_evq_lock; /* cgx event queue lock */
+ struct list_head cgx_evq_head; /* cgx event queue head */
+};
+
+static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ writeq(val, rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset)
+{
+ return readq(rvu->afreg_base + ((block << 28) | offset));
+}
+
+static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val)
+{
+ writeq(val, rvu->pfreg_base + offset);
+}
+
+static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
+{
+ return readq(rvu->pfreg_base + offset);
+}
+
+/* Function Prototypes
+ * RVU
+ */
+int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
+void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
+int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
+bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
+int rvu_get_pf(u16 pcifunc);
+struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
+void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
+bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
+int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
+int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
+int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
+int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+
+/* RVU HW reg validation */
+enum regmap_block {
+ TXSCHQ_HWREGMAP = 0,
+ MAX_HWREGMAP,
+};
+
+bool rvu_check_valid_reg(int regmap, int regblk, u64 reg);
+
+/* NPA/NIX AQ APIs */
+int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
+ int qsize, int inst_size, int res_size);
+void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
+
+/* CGX APIs */
+static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
+{
+ return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
+}
+
+static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
+{
+ *cgx_id = (map >> 4) & 0xF;
+ *lmac_id = (map & 0xF);
+}
+
+int rvu_cgx_probe(struct rvu *rvu);
+void rvu_cgx_wq_destroy(struct rvu *rvu);
+void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
+int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
+int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+ struct cgx_stats_rsp *rsp);
+int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp);
+int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp);
+int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+ struct cgx_link_info_msg *rsp);
+int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+
+/* NPA APIs */
+int rvu_npa_init(struct rvu *rvu);
+void rvu_npa_freemem(struct rvu *rvu);
+int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp);
+int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+ struct npa_lf_alloc_req *req,
+ struct npa_lf_alloc_rsp *rsp);
+int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+
+/* NIX APIs */
+int rvu_nix_init(struct rvu *rvu);
+void rvu_nix_freemem(struct rvu *rvu);
+int rvu_get_nixlf_count(struct rvu *rvu);
+int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+ struct nix_lf_alloc_req *req,
+ struct nix_lf_alloc_rsp *rsp);
+int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp);
+int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+ struct nix_txsch_alloc_req *req,
+ struct nix_txsch_alloc_rsp *rsp);
+int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+ struct nix_txsch_free_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+ struct nix_txschq_config *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+ struct nix_vtag_config *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
+ struct nix_rss_flowkey_cfg *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+ struct nix_set_mac_addr *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+ struct msg_rsp *rsp);
+
+/* NPC APIs */
+int rvu_npc_init(struct rvu *rvu);
+void rvu_npc_freemem(struct rvu *rvu);
+int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
+void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
+void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 *mac_addr);
+void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, bool allmulti);
+void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan);
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ int group, int alg_idx, int mcam_index);
+#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
new file mode 100644
index 000000000000..188185c15b4a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu.h"
+#include "cgx.h"
+
+struct cgx_evq_entry {
+ struct list_head evq_node;
+ struct cgx_link_event link_event;
+};
+
+#define M(_name, _id, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_CGX_MESSAGES
+#undef M
+
+/* Returns bitmap of mapped PFs */
+static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+{
+ return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
+}
+
+static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
+{
+ return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
+}
+
+void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
+{
+ if (cgx_id >= rvu->cgx_cnt)
+ return NULL;
+
+ return rvu->cgx_idmap[cgx_id];
+}
+
+static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ int cgx_cnt = rvu->cgx_cnt;
+ int cgx, lmac_cnt, lmac;
+ int pf = PF_CGXMAP_BASE;
+ int size, free_pkind;
+
+ if (!cgx_cnt)
+ return 0;
+
+ if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
+ return -EINVAL;
+
+ /* Alloc map table
+ * An additional entry is required since PF id starts from 1 and
+ * hence entry at offset 0 is invalid.
+ */
+ size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+ rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
+ if (!rvu->pf2cgxlmac_map)
+ return -ENOMEM;
+
+ /* Initialize offset 0 with an invalid cgx and lmac id */
+ rvu->pf2cgxlmac_map[0] = 0xFF;
+
+ /* Reverse map table */
+ rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
+ cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
+ GFP_KERNEL);
+ if (!rvu->cgxlmac2pf_map)
+ return -ENOMEM;
+
+ rvu->cgx_mapped_pfs = 0;
+ for (cgx = 0; cgx < cgx_cnt; cgx++) {
+ lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
+ rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+ rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
+ free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
+ pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+ rvu->cgx_mapped_pfs++;
+ }
+ }
+ return 0;
+}
+
+static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
+{
+ struct cgx_evq_entry *qentry;
+ unsigned long flags;
+ int err;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
+ if (!qentry)
+ return -ENOMEM;
+
+ /* Lock the event queue before we read the local link status */
+ spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+ err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &qentry->link_event.link_uinfo);
+ qentry->link_event.cgx_id = cgx_id;
+ qentry->link_event.lmac_id = lmac_id;
+ if (err)
+ goto skip_add;
+ list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+skip_add:
+ spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+
+ /* start worker to process the events */
+ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+ return 0;
+}
+
+/* This is called from interrupt context and is expected to be atomic */
+static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
+{
+ struct cgx_evq_entry *qentry;
+ struct rvu *rvu = data;
+
+ /* post event to the event queue */
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+ qentry->link_event = *event;
+ spin_lock(&rvu->cgx_evq_lock);
+ list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
+ spin_unlock(&rvu->cgx_evq_lock);
+
+ /* start worker to process the events */
+ queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
+
+ return 0;
+}
+
+static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
+{
+ struct cgx_link_user_info *linfo;
+ struct cgx_link_info_msg *msg;
+ unsigned long pfmap;
+ int err, pfid;
+
+ linfo = &event->link_uinfo;
+ pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+
+ do {
+ pfid = find_first_bit(&pfmap, 16);
+ clear_bit(pfid, &pfmap);
+
+ /* check if notification is enabled */
+ if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
+ dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
+ event->cgx_id, event->lmac_id,
+ linfo->link_up ? "UP" : "DOWN");
+ continue;
+ }
+
+ /* Send mbox message to PF */
+ msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid);
+ if (!msg)
+ continue;
+ msg->link_info = *linfo;
+ otx2_mbox_msg_send(&rvu->mbox_up, pfid);
+ err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid);
+ if (err)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ pfid);
+ } while (pfmap);
+}
+
+static void cgx_evhandler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
+ struct cgx_evq_entry *qentry;
+ struct cgx_link_event *event;
+ unsigned long flags;
+
+ do {
+ /* Dequeue an event */
+ spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
+ struct cgx_evq_entry,
+ evq_node);
+ if (qentry)
+ list_del(&qentry->evq_node);
+ spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->link_event;
+
+ /* process event */
+ cgx_notify_pfs(event, rvu);
+ kfree(qentry);
+ } while (1);
+}
+
+static void cgx_lmac_event_handler_init(struct rvu *rvu)
+{
+ struct cgx_event_cb cb;
+ int cgx, lmac, err;
+ void *cgxd;
+
+ spin_lock_init(&rvu->cgx_evq_lock);
+ INIT_LIST_HEAD(&rvu->cgx_evq_head);
+ INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
+ rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+ if (!rvu->cgx_evh_wq) {
+ dev_err(rvu->dev, "alloc workqueue failed");
+ return;
+ }
+
+ cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
+ cb.data = rvu;
+
+ for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
+ err = cgx_lmac_evh_register(&cb, cgxd, lmac);
+ if (err)
+ dev_err(rvu->dev,
+ "%d:%d handler register failed\n",
+ cgx, lmac);
+ }
+ }
+}
+
+void rvu_cgx_wq_destroy(struct rvu *rvu)
+{
+ if (rvu->cgx_evh_wq) {
+ flush_workqueue(rvu->cgx_evh_wq);
+ destroy_workqueue(rvu->cgx_evh_wq);
+ rvu->cgx_evh_wq = NULL;
+ }
+}
+
+int rvu_cgx_probe(struct rvu *rvu)
+{
+ int i, err;
+
+ /* find available cgx ports */
+ rvu->cgx_cnt = cgx_get_cgx_cnt();
+ if (!rvu->cgx_cnt) {
+ dev_info(rvu->dev, "No CGX devices found!\n");
+ return -ENODEV;
+ }
+
+ rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
+ GFP_KERNEL);
+ if (!rvu->cgx_idmap)
+ return -ENOMEM;
+
+ /* Initialize the cgxdata table */
+ for (i = 0; i < rvu->cgx_cnt; i++)
+ rvu->cgx_idmap[i] = cgx_get_pdata(i);
+
+ /* Map CGX LMAC interfaces to RVU PFs */
+ err = rvu_map_cgx_lmac_pf(rvu);
+ if (err)
+ return err;
+
+ /* Register for CGX events */
+ cgx_lmac_event_handler_init(rvu);
+ return 0;
+}
+
+int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
+
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+ struct cgx_stats_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ int stat = 0, err = 0;
+ u64 tx_stat, rx_stat;
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+
+ /* Rx stats */
+ while (stat < CGX_RX_STATS_COUNT) {
+ err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
+ if (err)
+ return err;
+ rsp->rx_stats[stat] = rx_stat;
+ stat++;
+ }
+
+ /* Tx stats */
+ stat = 0;
+ while (stat < CGX_TX_STATS_COUNT) {
+ err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
+ if (err)
+ return err;
+ rsp->tx_stats[stat] = tx_stat;
+ stat++;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
+
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+ struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0, i;
+ u64 cfg;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rsp->hdr.rc = rc;
+ cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
+ /* copy 48 bit mac address to req->mac_addr */
+ for (i = 0; i < ETH_ALEN; i++)
+ rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_promisc_config(cgx_id, lmac_id, true);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ cgx_lmac_promisc_config(cgx_id, lmac_id, false);
+ return 0;
+}
+
+static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (en) {
+ set_bit(pf, &rvu->pf_notify_bmap);
+ /* Send the current link status to PF */
+ rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
+ } else {
+ clear_bit(pf, &rvu->pf_notify_bmap);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+ struct cgx_link_info_msg *rsp)
+{
+ u8 cgx_id, lmac_id;
+ int pf, err;
+
+ pf = rvu_get_pf(req->hdr.pcifunc);
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &rsp->link_info);
+ return err;
+}
+
+static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, en);
+}
+
+int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
+ return 0;
+}
+
+int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
new file mode 100644
index 000000000000..8890c95831ca
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -0,0 +1,1959 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+
+static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+
+enum mc_tbl_sz {
+ MC_TBL_SZ_256,
+ MC_TBL_SZ_512,
+ MC_TBL_SZ_1K,
+ MC_TBL_SZ_2K,
+ MC_TBL_SZ_4K,
+ MC_TBL_SZ_8K,
+ MC_TBL_SZ_16K,
+ MC_TBL_SZ_32K,
+ MC_TBL_SZ_64K,
+};
+
+enum mc_buf_cnt {
+ MC_BUF_CNT_8,
+ MC_BUF_CNT_16,
+ MC_BUF_CNT_32,
+ MC_BUF_CNT_64,
+ MC_BUF_CNT_128,
+ MC_BUF_CNT_256,
+ MC_BUF_CNT_512,
+ MC_BUF_CNT_1024,
+ MC_BUF_CNT_2048,
+};
+
+/* For now considering MC resources needed for broadcast
+ * pkt replication only. i.e 256 HWVFs + 12 PFs.
+ */
+#define MC_TBL_SIZE MC_TBL_SZ_512
+#define MC_BUF_CNT MC_BUF_CNT_128
+
+struct mce {
+ struct hlist_node node;
+ u16 idx;
+ u16 pcifunc;
+};
+
+int rvu_get_nixlf_count(struct rvu *rvu)
+{
+ struct rvu_block *block;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return 0;
+ block = &rvu->hw->block[blkaddr];
+ return block->lf.max;
+}
+
+static void nix_mce_list_init(struct nix_mce_list *list, int max)
+{
+ INIT_HLIST_HEAD(&list->head);
+ list->count = 0;
+ list->max = max;
+}
+
+static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
+{
+ int idx;
+
+ if (!mcast)
+ return 0;
+
+ idx = mcast->next_free_mce;
+ mcast->next_free_mce += count;
+ return idx;
+}
+
+static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
+{
+ if (blkaddr == BLKADDR_NIX0 && hw->nix0)
+ return hw->nix0;
+
+ return NULL;
+}
+
+static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
+ int lvl, u16 pcifunc, u16 schq)
+{
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return false;
+
+ txsch = &nix_hw->txsch[lvl];
+ /* Check out of bounds */
+ if (schq >= txsch->schq.max)
+ return false;
+
+ spin_lock(&rvu->rsrc_lock);
+ if (txsch->pfvf_map[schq] != pcifunc) {
+ spin_unlock(&rvu->rsrc_lock);
+ return false;
+ }
+ spin_unlock(&rvu->rsrc_lock);
+ return true;
+}
+
+static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u8 cgx_id, lmac_id;
+ int pkind, pf;
+ int err;
+
+ pf = rvu_get_pf(pcifunc);
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ return 0;
+
+ switch (type) {
+ case NIX_INTF_TYPE_CGX:
+ pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+
+ pkind = rvu_npc_get_pkind(rvu, pf);
+ if (pkind < 0) {
+ dev_err(rvu->dev,
+ "PF_Func 0x%x: Invalid pkind\n", pcifunc);
+ return -EINVAL;
+ }
+ pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
+ pfvf->tx_chan_base = pfvf->rx_chan_base;
+ pfvf->rx_chan_cnt = 1;
+ pfvf->tx_chan_cnt = 1;
+ cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
+ rvu_npc_set_pkind(rvu, pkind, pfvf);
+ break;
+ case NIX_INTF_TYPE_LBK:
+ break;
+ }
+
+ /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
+ * RVU PF/VF's MAC address.
+ */
+ rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, pfvf->mac_addr);
+
+ /* Add this PF_FUNC to bcast pkt replication list */
+ err = nix_update_bcast_mce_list(rvu, pcifunc, true);
+ if (err) {
+ dev_err(rvu->dev,
+ "Bcast list, failed to enable PF_FUNC 0x%x\n",
+ pcifunc);
+ return err;
+ }
+
+ rvu_npc_install_bcast_match_entry(rvu, pcifunc,
+ nixlf, pfvf->rx_chan_base);
+
+ return 0;
+}
+
+static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
+{
+ int err;
+
+ /* Remove this PF_FUNC from bcast pkt replication list */
+ err = nix_update_bcast_mce_list(rvu, pcifunc, false);
+ if (err) {
+ dev_err(rvu->dev,
+ "Bcast list, failed to disable PF_FUNC 0x%x\n",
+ pcifunc);
+ }
+
+ /* Free and disable any MCAM entries used by this NIX LF */
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+}
+
+static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
+ u64 format, bool v4, u64 *fidx)
+{
+ struct nix_lso_format field = {0};
+
+ /* IP's Length field */
+ field.layer = NIX_TXLAYER_OL3;
+ /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
+ field.offset = v4 ? 2 : 4;
+ field.sizem1 = 1; /* i.e 2 bytes */
+ field.alg = NIX_LSOALG_ADD_PAYLEN;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+
+ /* No ID field in IPv6 header */
+ if (!v4)
+ return;
+
+ /* IP's ID field */
+ field.layer = NIX_TXLAYER_OL3;
+ field.offset = 4;
+ field.sizem1 = 1; /* i.e 2 bytes */
+ field.alg = NIX_LSOALG_ADD_SEGNUM;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+}
+
+static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
+ u64 format, u64 *fidx)
+{
+ struct nix_lso_format field = {0};
+
+ /* TCP's sequence number field */
+ field.layer = NIX_TXLAYER_OL4;
+ field.offset = 4;
+ field.sizem1 = 3; /* i.e 4 bytes */
+ field.alg = NIX_LSOALG_ADD_OFFSET;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+
+ /* TCP's flags field */
+ field.layer = NIX_TXLAYER_OL4;
+ field.offset = 12;
+ field.sizem1 = 0; /* not needed */
+ field.alg = NIX_LSOALG_TCP_FLAGS;
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
+ *(u64 *)&field);
+}
+
+static void nix_setup_lso(struct rvu *rvu, int blkaddr)
+{
+ u64 cfg, idx, fidx = 0;
+
+ /* Enable LSO */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
+ /* For TSO, set first and middle segment flags to
+ * mask out PSH, RST & FIN flags in TCP packet
+ */
+ cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
+ cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
+ rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
+
+ /* Configure format fields for TCPv4 segmentation offload */
+ idx = NIX_LSO_FORMAT_IDX_TSOV4;
+ nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
+ nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
+
+ /* Set rest of the fields to NOP */
+ for (; fidx < 8; fidx++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
+ }
+
+ /* Configure format fields for TCPv6 segmentation offload */
+ idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ fidx = 0;
+ nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
+ nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
+
+ /* Set rest of the fields to NOP */
+ for (; fidx < 8; fidx++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
+ }
+}
+
+static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
+{
+ kfree(pfvf->rq_bmap);
+ kfree(pfvf->sq_bmap);
+ kfree(pfvf->cq_bmap);
+ if (pfvf->rq_ctx)
+ qmem_free(rvu->dev, pfvf->rq_ctx);
+ if (pfvf->sq_ctx)
+ qmem_free(rvu->dev, pfvf->sq_ctx);
+ if (pfvf->cq_ctx)
+ qmem_free(rvu->dev, pfvf->cq_ctx);
+ if (pfvf->rss_ctx)
+ qmem_free(rvu->dev, pfvf->rss_ctx);
+ if (pfvf->nix_qints_ctx)
+ qmem_free(rvu->dev, pfvf->nix_qints_ctx);
+ if (pfvf->cq_ints_ctx)
+ qmem_free(rvu->dev, pfvf->cq_ints_ctx);
+
+ pfvf->rq_bmap = NULL;
+ pfvf->cq_bmap = NULL;
+ pfvf->sq_bmap = NULL;
+ pfvf->rq_ctx = NULL;
+ pfvf->sq_ctx = NULL;
+ pfvf->cq_ctx = NULL;
+ pfvf->rss_ctx = NULL;
+ pfvf->nix_qints_ctx = NULL;
+ pfvf->cq_ints_ctx = NULL;
+}
+
+static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
+ struct rvu_pfvf *pfvf, int nixlf,
+ int rss_sz, int rss_grps, int hwctx_size)
+{
+ int err, grp, num_indices;
+
+ /* RSS is not requested for this NIXLF */
+ if (!rss_sz)
+ return 0;
+ num_indices = rss_sz * rss_grps;
+
+ /* Alloc NIX RSS HW context memory and config the base */
+ err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
+ (u64)pfvf->rss_ctx->iova);
+
+ /* Config full RSS table size, enable RSS and caching */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
+ BIT_ULL(36) | BIT_ULL(4) |
+ ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
+ /* Config RSS group offset and sizes */
+ for (grp = 0; grp < rss_grps; grp++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
+ ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
+ return 0;
+}
+
+static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ struct nix_aq_inst_s *inst)
+{
+ struct admin_queue *aq = block->aq;
+ struct nix_aq_res_s *result;
+ int timeout = 1000;
+ u64 reg, head;
+
+ result = (struct nix_aq_res_s *)aq->res->base;
+
+ /* Get current head pointer where to append this instruction */
+ reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
+ head = (reg >> 4) & AQ_PTR_MASK;
+
+ memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
+ (void *)inst, aq->inst->entry_sz);
+ memset(result, 0, sizeof(*result));
+ /* sync into memory */
+ wmb();
+
+ /* Ring the doorbell and wait for result */
+ rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
+ while (result->compcode == NIX_AQ_COMP_NOTDONE) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ }
+
+ if (result->compcode != NIX_AQ_COMP_GOOD)
+ /* TODO: Replace this with some error code */
+ return -EBUSY;
+
+ return 0;
+}
+
+static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, blkaddr, rc = 0;
+ struct nix_aq_inst_s inst;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+ struct rvu_pfvf *pfvf;
+ void *ctx, *mask;
+ bool ena;
+ u64 cfg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ if (!aq) {
+ dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
+ return NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ switch (req->ctype) {
+ case NIX_AQ_CTYPE_RQ:
+ /* Check if index exceeds max no of queues */
+ if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_SQ:
+ if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_CQ:
+ if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_RSS:
+ /* Check if RSS is enabled and qidx is within range */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
+ if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
+ (req->qidx >= (256UL << (cfg & 0xF))))
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ case NIX_AQ_CTYPE_MCE:
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
+ /* Check if index exceeds MCE list length */
+ if (!hw->nix0->mcast.mce_ctx ||
+ (req->qidx >= (256UL << (cfg & 0xF))))
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+
+ /* Adding multicast lists for requests from PF/VFs is not
+ * yet supported, so ignore this.
+ */
+ if (rsp)
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ break;
+ default:
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ if (rc)
+ return rc;
+
+ /* Check if SQ pointed SMQ belongs to this PF/VF or not */
+ if (req->ctype == NIX_AQ_CTYPE_SQ &&
+ req->op != NIX_AQ_INSTOP_WRITE) {
+ if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
+ pcifunc, req->sq.smq))
+ return NIX_AF_ERR_AQ_ENQUEUE;
+ }
+
+ memset(&inst, 0, sizeof(struct nix_aq_inst_s));
+ inst.lf = nixlf;
+ inst.cindex = req->qidx;
+ inst.ctype = req->ctype;
+ inst.op = req->op;
+ /* Currently we are not supporting enqueuing multiple instructions,
+ * so always choose first entry in result memory.
+ */
+ inst.res_addr = (u64)aq->res->iova;
+
+ /* Clean result + context memory */
+ memset(aq->res->base, 0, aq->res->entry_sz);
+ /* Context needs to be written at RES_ADDR + 128 */
+ ctx = aq->res->base + 128;
+ /* Mask needs to be written at RES_ADDR + 256 */
+ mask = aq->res->base + 256;
+
+ switch (req->op) {
+ case NIX_AQ_INSTOP_WRITE:
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(mask, &req->rq_mask,
+ sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(mask, &req->sq_mask,
+ sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(mask, &req->cq_mask,
+ sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(mask, &req->rss_mask,
+ sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(mask, &req->mce_mask,
+ sizeof(struct nix_rx_mce_s));
+ /* Fall through */
+ case NIX_AQ_INSTOP_INIT:
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ break;
+ case NIX_AQ_INSTOP_NOP:
+ case NIX_AQ_INSTOP_READ:
+ case NIX_AQ_INSTOP_LOCK:
+ case NIX_AQ_INSTOP_UNLOCK:
+ break;
+ default:
+ rc = NIX_AF_ERR_AQ_ENQUEUE;
+ return rc;
+ }
+
+ spin_lock(&aq->lock);
+
+ /* Submit the instruction to AQ */
+ rc = nix_aq_enqueue_wait(rvu, block, &inst);
+ if (rc) {
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
+ if (req->op == NIX_AQ_INSTOP_INIT) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
+ __set_bit(req->qidx, pfvf->rq_bmap);
+ if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
+ __set_bit(req->qidx, pfvf->sq_bmap);
+ if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
+ __set_bit(req->qidx, pfvf->cq_bmap);
+ }
+
+ if (req->op == NIX_AQ_INSTOP_WRITE) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ) {
+ ena = (req->rq.ena & req->rq_mask.ena) |
+ (test_bit(req->qidx, pfvf->rq_bmap) &
+ ~req->rq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->rq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->rq_bmap);
+ }
+ if (req->ctype == NIX_AQ_CTYPE_SQ) {
+ ena = (req->rq.ena & req->sq_mask.ena) |
+ (test_bit(req->qidx, pfvf->sq_bmap) &
+ ~req->sq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->sq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->sq_bmap);
+ }
+ if (req->ctype == NIX_AQ_CTYPE_CQ) {
+ ena = (req->rq.ena & req->cq_mask.ena) |
+ (test_bit(req->qidx, pfvf->cq_bmap) &
+ ~req->cq_mask.ena);
+ if (ena)
+ __set_bit(req->qidx, pfvf->cq_bmap);
+ else
+ __clear_bit(req->qidx, pfvf->cq_bmap);
+ }
+ }
+
+ if (rsp) {
+ /* Copy read context into mailbox */
+ if (req->op == NIX_AQ_INSTOP_READ) {
+ if (req->ctype == NIX_AQ_CTYPE_RQ)
+ memcpy(&rsp->rq, ctx,
+ sizeof(struct nix_rq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_SQ)
+ memcpy(&rsp->sq, ctx,
+ sizeof(struct nix_sq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_CQ)
+ memcpy(&rsp->cq, ctx,
+ sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_RSS)
+ memcpy(&rsp->rss, ctx,
+ sizeof(struct nix_cq_ctx_s));
+ else if (req->ctype == NIX_AQ_CTYPE_MCE)
+ memcpy(&rsp->mce, ctx,
+ sizeof(struct nix_rx_mce_s));
+ }
+ }
+
+ spin_unlock(&aq->lock);
+ return 0;
+}
+
+static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct nix_aq_enq_req aq_req;
+ unsigned long *bmap;
+ int qidx, q_cnt = 0;
+ int err = 0, rc;
+
+ if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
+ return NIX_AF_ERR_AQ_ENQUEUE;
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = req->hdr.pcifunc;
+
+ if (req->ctype == NIX_AQ_CTYPE_CQ) {
+ aq_req.cq.ena = 0;
+ aq_req.cq_mask.ena = 1;
+ q_cnt = pfvf->cq_ctx->qsize;
+ bmap = pfvf->cq_bmap;
+ }
+ if (req->ctype == NIX_AQ_CTYPE_SQ) {
+ aq_req.sq.ena = 0;
+ aq_req.sq_mask.ena = 1;
+ q_cnt = pfvf->sq_ctx->qsize;
+ bmap = pfvf->sq_bmap;
+ }
+ if (req->ctype == NIX_AQ_CTYPE_RQ) {
+ aq_req.rq.ena = 0;
+ aq_req.rq_mask.ena = 1;
+ q_cnt = pfvf->rq_ctx->qsize;
+ bmap = pfvf->rq_bmap;
+ }
+
+ aq_req.ctype = req->ctype;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+
+ for (qidx = 0; qidx < q_cnt; qidx++) {
+ if (!test_bit(qidx, bmap))
+ continue;
+ aq_req.qidx = qidx;
+ rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+ if (rc) {
+ err = rc;
+ dev_err(rvu->dev, "Failed to disable %s:%d context\n",
+ (req->ctype == NIX_AQ_CTYPE_CQ) ?
+ "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
+ "RQ" : "SQ"), qidx);
+ }
+ }
+
+ return err;
+}
+
+int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, req, rsp);
+}
+
+int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_lf_hwctx_disable(rvu, req);
+}
+
+int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+ struct nix_lf_alloc_req *req,
+ struct nix_lf_alloc_rsp *rsp)
+{
+ int nixlf, qints, hwctx_size, err, rc = 0;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, ctx_cfg;
+ int blkaddr;
+
+ if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
+ return NIX_AF_ERR_PARAM;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* If RSS is being enabled, check if requested config is valid.
+ * RSS table size should be power of two, otherwise
+ * RSS_GRP::OFFSET + adder might go beyond that group or
+ * won't be able to use entire table.
+ */
+ if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
+ !is_power_of_2(req->rss_sz)))
+ return NIX_AF_ERR_RSS_SIZE_INVALID;
+
+ if (req->rss_sz &&
+ (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
+ return NIX_AF_ERR_RSS_GRPS_INVALID;
+
+ /* Reset this NIX LF */
+ err = rvu_lf_reset(rvu, block, nixlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
+ block->addr - BLKADDR_NIX0, nixlf);
+ return NIX_AF_ERR_LF_RESET;
+ }
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
+
+ /* Alloc NIX RQ HW context memory and config the base */
+ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->rq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
+ (u64)pfvf->rq_ctx->iova);
+
+ /* Set caching and queue count in HW */
+ cfg = BIT_ULL(36) | (req->rq_cnt - 1);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
+
+ /* Alloc NIX SQ HW context memory and config the base */
+ hwctx_size = 1UL << (ctx_cfg & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->sq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
+ (u64)pfvf->sq_ctx->iova);
+ cfg = BIT_ULL(36) | (req->sq_cnt - 1);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
+
+ /* Alloc NIX CQ HW context memory and config the base */
+ hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
+ if (!pfvf->cq_bmap)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
+ (u64)pfvf->cq_ctx->iova);
+ cfg = BIT_ULL(36) | (req->cq_cnt - 1);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
+
+ /* Initialize receive side scaling (RSS) */
+ hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
+ err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
+ req->rss_sz, req->rss_grps, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ /* Alloc memory for CQINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 24) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
+ (u64)pfvf->cq_ints_ctx->iova);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
+
+ /* Alloc memory for QINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 12) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
+ (u64)pfvf->nix_qints_ctx->iova);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
+
+ /* Enable LMTST for this NIX LF */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
+
+ /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
+ * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
+ * PCIFUNC itself.
+ */
+ if (req->npa_func == RVU_DEFAULT_PF_FUNC)
+ cfg = pcifunc;
+ else
+ cfg = req->npa_func;
+
+ if (req->sso_func == RVU_DEFAULT_PF_FUNC)
+ cfg |= (u64)pcifunc << 16;
+ else
+ cfg |= (u64)req->sso_func << 16;
+
+ cfg |= (u64)req->xqe_sz << 33;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
+
+ /* Config Rx pkt length, csum checks and apad enable / disable */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
+
+ err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf);
+ if (err)
+ goto free_mem;
+
+ goto exit;
+
+free_mem:
+ nix_ctx_free(rvu, pfvf);
+ rc = -ENOMEM;
+
+exit:
+ /* Set macaddr of this PF/VF */
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
+
+ /* set SQB size info */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
+ rsp->sqb_size = (cfg >> 34) & 0xFFFF;
+ rsp->rx_chan_base = pfvf->rx_chan_base;
+ rsp->tx_chan_base = pfvf->tx_chan_base;
+ rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
+ rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
+ rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
+ rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ return rc;
+}
+
+int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_interface_deinit(rvu, pcifunc, nixlf);
+
+ /* Reset this NIX LF */
+ err = rvu_lf_reset(rvu, block, nixlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
+ block->addr - BLKADDR_NIX0, nixlf);
+ return NIX_AF_ERR_LF_RESET;
+ }
+
+ nix_ctx_free(rvu, pfvf);
+
+ return 0;
+}
+
+/* Disable shaping of pkts by a scheduler queue
+ * at a given scheduler level.
+ */
+static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ u64 cir_reg = 0, pir_reg = 0;
+ u64 cfg;
+
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ cir_reg = NIX_AF_TL1X_CIR(schq);
+ pir_reg = 0; /* PIR not available at TL1 */
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ cir_reg = NIX_AF_TL2X_CIR(schq);
+ pir_reg = NIX_AF_TL2X_PIR(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ cir_reg = NIX_AF_TL3X_CIR(schq);
+ pir_reg = NIX_AF_TL3X_PIR(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ cir_reg = NIX_AF_TL4X_CIR(schq);
+ pir_reg = NIX_AF_TL4X_PIR(schq);
+ break;
+ }
+
+ if (!cir_reg)
+ return;
+ cfg = rvu_read64(rvu, blkaddr, cir_reg);
+ rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
+
+ if (!pir_reg)
+ return;
+ cfg = rvu_read64(rvu, blkaddr, pir_reg);
+ rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
+}
+
+static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link;
+
+ /* Reset TL4's SDP link config */
+ if (lvl == NIX_TXSCH_LVL_TL4)
+ rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
+
+ if (lvl != NIX_TXSCH_LVL_TL2)
+ return;
+
+ /* Reset TL2's CGX or LBK link config */
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
+}
+
+int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+ struct nix_txsch_alloc_req *req,
+ struct nix_txsch_alloc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_txsch *txsch;
+ int lvl, idx, req_schq;
+ struct rvu_pfvf *pfvf;
+ struct nix_hw *nix_hw;
+ int blkaddr, rc = 0;
+ u16 schq;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ spin_lock(&rvu->rsrc_lock);
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ req_schq = req->schq_contig[lvl] + req->schq[lvl];
+
+ /* There are only 28 TL1s */
+ if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
+ goto err;
+
+ /* Check if request is valid */
+ if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ goto err;
+
+ /* If contiguous queues are needed, check for availability */
+ if (req->schq_contig[lvl] &&
+ !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
+ goto err;
+
+ /* Check if full request can be accommodated */
+ if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
+ goto err;
+ }
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ rsp->schq_contig[lvl] = req->schq_contig[lvl];
+ rsp->schq[lvl] = req->schq[lvl];
+
+ schq = 0;
+ /* Alloc contiguous queues first */
+ if (req->schq_contig[lvl]) {
+ schq = rvu_alloc_rsrc_contig(&txsch->schq,
+ req->schq_contig[lvl]);
+
+ for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
+ txsch->pfvf_map[schq] = pcifunc;
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+ rsp->schq_contig_list[lvl][idx] = schq;
+ schq++;
+ }
+ }
+
+ /* Alloc non-contiguous queues */
+ for (idx = 0; idx < req->schq[lvl]; idx++) {
+ schq = rvu_alloc_rsrc(&txsch->schq);
+ txsch->pfvf_map[schq] = pcifunc;
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+ rsp->schq_list[lvl][idx] = schq;
+ }
+ }
+ goto exit;
+err:
+ rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
+exit:
+ spin_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, nixlf, lvl, schq, err;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* Disable TL2/3 queue links before SMQ flush*/
+ spin_lock(&rvu->rsrc_lock);
+ for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
+ continue;
+
+ txsch = &nix_hw->txsch[lvl];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (txsch->pfvf_map[schq] != pcifunc)
+ continue;
+ nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ }
+ }
+
+ /* Flush SMQs */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (txsch->pfvf_map[schq] != pcifunc)
+ continue;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+
+ /* Wait for flush to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
+ if (err) {
+ dev_err(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
+ }
+ }
+
+ /* Now free scheduler queues to free pool */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (txsch->pfvf_map[schq] != pcifunc)
+ continue;
+ rvu_free_rsrc(&txsch->schq, schq);
+ txsch->pfvf_map[schq] = 0;
+ }
+ }
+ spin_unlock(&rvu->rsrc_lock);
+
+ /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
+ rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+ if (err)
+ dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
+
+ return 0;
+}
+
+int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+ struct nix_txsch_free_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_txschq_free(rvu, req->hdr.pcifunc);
+}
+
+static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
+ int lvl, u64 reg, u64 regval)
+{
+ u64 regbase = reg & 0xFFFF;
+ u16 schq, parent;
+
+ if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
+ return false;
+
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ /* Check if this schq belongs to this PF/VF or not */
+ if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
+ return false;
+
+ parent = (regval >> 16) & 0x1FF;
+ /* Validate MDQ's TL4 parent */
+ if (regbase == NIX_AF_MDQX_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
+ return false;
+
+ /* Validate TL4's TL3 parent */
+ if (regbase == NIX_AF_TL4X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
+ return false;
+
+ /* Validate TL3's TL2 parent */
+ if (regbase == NIX_AF_TL3X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
+ return false;
+
+ /* Validate TL2's TL1 parent */
+ if (regbase == NIX_AF_TL2X_PARENT(0) &&
+ !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
+ return false;
+
+ return true;
+}
+
+int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+ struct nix_txschq_config *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ u64 reg, regval, schq_regbase;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, err;
+ int nixlf;
+
+ if (req->lvl >= NIX_TXSCH_LVL_CNT ||
+ req->num_regs > MAX_REGS_PER_MBOX_MSG)
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ txsch = &nix_hw->txsch[req->lvl];
+ for (idx = 0; idx < req->num_regs; idx++) {
+ reg = req->reg[idx];
+ regval = req->regval[idx];
+ schq_regbase = reg & 0xFFFF;
+
+ if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
+ txsch->lvl, reg, regval))
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+
+ /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
+ if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+ pcifunc, 0);
+ regval &= ~(0x7FULL << 24);
+ regval |= ((u64)nixlf << 24);
+ }
+
+ rvu_write64(rvu, blkaddr, reg, regval);
+
+ /* Check for SMQ flush, if so, poll for its completion */
+ if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
+ (regval & BIT_ULL(49))) {
+ err = rvu_poll_reg(rvu, blkaddr,
+ reg, BIT_ULL(49), true);
+ if (err)
+ return NIX_AF_SMQ_FLUSH_FAILED;
+ }
+ }
+ return 0;
+}
+
+static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
+ struct nix_vtag_config *req)
+{
+ u64 regval = 0;
+
+#define NIX_VTAGTYPE_MAX 0x8ull
+#define NIX_VTAGSIZE_MASK 0x7ull
+#define NIX_VTAGSTRIP_CAP_MASK 0x30ull
+
+ if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX ||
+ req->vtag_size > VTAGSIZE_T8)
+ return -EINVAL;
+
+ regval = rvu_read64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type));
+
+ if (req->rx.strip_vtag && req->rx.capture_vtag)
+ regval |= BIT_ULL(4) | BIT_ULL(5);
+ else if (req->rx.strip_vtag)
+ regval |= BIT_ULL(4);
+ else
+ regval &= ~(BIT_ULL(4) | BIT_ULL(5));
+
+ regval &= ~NIX_VTAGSIZE_MASK;
+ regval |= req->vtag_size & NIX_VTAGSIZE_MASK;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
+ return 0;
+}
+
+int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+ struct nix_vtag_config *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->cfg_type) {
+ err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
+ if (err)
+ return NIX_AF_ERR_PARAM;
+ } else {
+ /* TODO: handle tx vtag configuration */
+ return 0;
+ }
+
+ return 0;
+}
+
+static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
+ u16 pcifunc, int next, bool eol)
+{
+ struct nix_aq_enq_req aq_req;
+ int err;
+
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = NIX_AQ_CTYPE_MCE;
+ aq_req.op = op;
+ aq_req.qidx = mce;
+
+ /* Forward bcast pkts to RQ0, RSS not needed */
+ aq_req.mce.op = 0;
+ aq_req.mce.index = 0;
+ aq_req.mce.eol = eol;
+ aq_req.mce.pf_func = pcifunc;
+ aq_req.mce.next = next;
+
+ /* All fields valid */
+ *(u64 *)(&aq_req.mce_mask) = ~0ULL;
+
+ err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+ if (err) {
+ dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
+ rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ return err;
+ }
+ return 0;
+}
+
+static int nix_update_mce_list(struct nix_mce_list *mce_list,
+ u16 pcifunc, int idx, bool add)
+{
+ struct mce *mce, *tail = NULL;
+ bool delete = false;
+
+ /* Scan through the current list */
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ /* If already exists, then delete */
+ if (mce->pcifunc == pcifunc && !add) {
+ delete = true;
+ break;
+ }
+ tail = mce;
+ }
+
+ if (delete) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ mce_list->count--;
+ return 0;
+ }
+
+ if (!add)
+ return 0;
+
+ /* Add a new one to the list, at the tail */
+ mce = kzalloc(sizeof(*mce), GFP_KERNEL);
+ if (!mce)
+ return -ENOMEM;
+ mce->idx = idx;
+ mce->pcifunc = pcifunc;
+ if (!tail)
+ hlist_add_head(&mce->node, &mce_list->head);
+ else
+ hlist_add_behind(&mce->node, &tail->node);
+ mce_list->count++;
+ return 0;
+}
+
+static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
+{
+ int err = 0, idx, next_idx, count;
+ struct nix_mce_list *mce_list;
+ struct mce *mce, *next_mce;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return 0;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return 0;
+
+ mcast = &nix_hw->mcast;
+
+ /* Get this PF/VF func's MCE index */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
+
+ mce_list = &pfvf->bcast_mce_list;
+ if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
+ dev_err(rvu->dev,
+ "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
+ __func__, idx, mce_list->max,
+ pcifunc >> RVU_PFVF_PF_SHIFT);
+ return -EINVAL;
+ }
+
+ spin_lock(&mcast->mce_lock);
+
+ err = nix_update_mce_list(mce_list, pcifunc, idx, add);
+ if (err)
+ goto end;
+
+ /* Disable MCAM entry in NPC */
+
+ if (!mce_list->count)
+ goto end;
+ count = mce_list->count;
+
+ /* Dump the updated list to HW */
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ next_idx = 0;
+ count--;
+ if (count) {
+ next_mce = hlist_entry(mce->node.next,
+ struct mce, node);
+ next_idx = next_mce->idx;
+ }
+ /* EOL should be set in last MCE */
+ err = nix_setup_mce(rvu, mce->idx,
+ NIX_AQ_INSTOP_WRITE, mce->pcifunc,
+ next_idx, count ? false : true);
+ if (err)
+ goto end;
+ }
+
+end:
+ spin_unlock(&mcast->mce_lock);
+ return err;
+}
+
+static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_mcast *mcast = &nix_hw->mcast;
+ int err, pf, numvfs, idx;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+ u64 cfg;
+
+ /* Skip PF0 (i.e AF) */
+ for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ /* If PF is not enabled, nothing to do */
+ if (!((cfg >> 20) & 0x01))
+ continue;
+ /* Get numVFs attached to this PF */
+ numvfs = (cfg >> 12) & 0xFF;
+
+ pfvf = &rvu->pf[pf];
+ /* Save the start MCE */
+ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+
+ nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
+
+ for (idx = 0; idx < (numvfs + 1); idx++) {
+ /* idx-0 is for PF, followed by VFs */
+ pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc |= idx;
+ /* Add dummy entries now, so that we don't have to check
+ * for whether AQ_OP should be INIT/WRITE later on.
+ * Will be updated when a NIXLF is attached/detached to
+ * these PF/VFs.
+ */
+ err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_mcast *mcast = &nix_hw->mcast;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int err, size;
+
+ size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
+ size = (1ULL << size);
+
+ /* Alloc memory for multicast/mirror replication entries */
+ err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
+ (256UL << MC_TBL_SIZE), size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
+ (u64)mcast->mce_ctx->iova);
+
+ /* Set max list length equal to max no of VFs per PF + PF itself */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
+ BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
+
+ /* Alloc memory for multicast replication buffers */
+ size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
+ err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
+ (8UL << MC_BUF_CNT), size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
+ (u64)mcast->mcast_buf->iova);
+
+ /* Alloc pkind for NIX internal RX multicast/mirror replay */
+ mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
+ BIT_ULL(63) | (mcast->replay_pkind << 24) |
+ BIT_ULL(20) | MC_BUF_CNT);
+
+ spin_lock_init(&mcast->mce_lock);
+
+ return nix_setup_bcast_tables(rvu, nix_hw);
+}
+
+static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
+{
+ struct nix_txsch *txsch;
+ u64 cfg, reg;
+ int err, lvl;
+
+ /* Get scheduler queue count of each type and alloc
+ * bitmap for each for alloc/free/attach operations.
+ */
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ txsch->lvl = lvl;
+ switch (lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ reg = NIX_AF_MDQ_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg = NIX_AF_TL4_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg = NIX_AF_TL3_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg = NIX_AF_TL2_CONST;
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ reg = NIX_AF_TL1_CONST;
+ break;
+ }
+ cfg = rvu_read64(rvu, blkaddr, reg);
+ txsch->schq.max = cfg & 0xFFFF;
+ err = rvu_alloc_bitmap(&txsch->schq);
+ if (err)
+ return err;
+
+ /* Allocate memory for scheduler queues to
+ * PF/VF pcifunc mapping info.
+ */
+ txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!txsch->pfvf_map)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int i, nixlf, blkaddr;
+ u64 stats;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* Get stats count supported by HW */
+ stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+
+ /* Reset tx stats */
+ for (i = 0; i < ((stats >> 24) & 0xFF); i++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
+
+ /* Reset rx stats */
+ for (i = 0; i < ((stats >> 32) & 0xFF); i++)
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
+
+ return 0;
+}
+
+/* Returns the ALG index to be set into NPC_RX_ACTION */
+static int get_flowkey_alg_idx(u32 flow_cfg)
+{
+ u32 ip_cfg;
+
+ flow_cfg &= ~FLOW_KEY_TYPE_PORT;
+ ip_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
+ if (flow_cfg == ip_cfg)
+ return FLOW_KEY_ALG_IP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP))
+ return FLOW_KEY_ALG_TCP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP))
+ return FLOW_KEY_ALG_UDP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_SCTP))
+ return FLOW_KEY_ALG_SCTP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP))
+ return FLOW_KEY_ALG_TCP_UDP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP))
+ return FLOW_KEY_ALG_TCP_SCTP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
+ return FLOW_KEY_ALG_UDP_SCTP;
+ else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP |
+ FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
+ return FLOW_KEY_ALG_TCP_UDP_SCTP;
+
+ return FLOW_KEY_ALG_PORT;
+}
+
+int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
+ struct nix_rss_flowkey_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int alg_idx, nixlf, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ alg_idx = get_flowkey_alg_idx(req->flowkey_cfg);
+
+ rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
+ alg_idx, req->mcam_index);
+ return 0;
+}
+
+static void set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+{
+ struct nix_rx_flowkey_alg *field = NULL;
+ int idx, key_type;
+
+ if (!alg)
+ return;
+
+ /* FIELD0: IPv4
+ * FIELD1: IPv6
+ * FIELD2: TCP/UDP/SCTP/ALL
+ * FIELD3: Unused
+ * FIELD4: Unused
+ *
+ * Each of the 32 possible flow key algorithm definitions should
+ * fall into above incremental config (except ALG0). Otherwise a
+ * single NPC MCAM entry is not sufficient for supporting RSS.
+ *
+ * If a different definition or combination needed then NPC MCAM
+ * has to be programmed to filter such pkts and it's action should
+ * point to this definition to calculate flowtag or hash.
+ */
+ for (idx = 0; idx < 32; idx++) {
+ key_type = flow_cfg & BIT_ULL(idx);
+ if (!key_type)
+ continue;
+ switch (key_type) {
+ case FLOW_KEY_TYPE_PORT:
+ field = &alg[0];
+ field->sel_chan = true;
+ /* This should be set to 1, when SEL_CHAN is set */
+ field->bytesm1 = 1;
+ break;
+ case FLOW_KEY_TYPE_IPV4:
+ field = &alg[0];
+ field->lid = NPC_LID_LC;
+ field->ltype_match = NPC_LT_LC_IP;
+ field->hdr_offset = 12; /* SIP offset */
+ field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
+ field->ltype_mask = 0xF; /* Match only IPv4 */
+ break;
+ case FLOW_KEY_TYPE_IPV6:
+ field = &alg[1];
+ field->lid = NPC_LID_LC;
+ field->ltype_match = NPC_LT_LC_IP6;
+ field->hdr_offset = 8; /* SIP offset */
+ field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
+ field->ltype_mask = 0xF; /* Match only IPv6 */
+ break;
+ case FLOW_KEY_TYPE_TCP:
+ case FLOW_KEY_TYPE_UDP:
+ case FLOW_KEY_TYPE_SCTP:
+ field = &alg[2];
+ field->lid = NPC_LID_LD;
+ field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
+ if (key_type == FLOW_KEY_TYPE_TCP)
+ field->ltype_match |= NPC_LT_LD_TCP;
+ else if (key_type == FLOW_KEY_TYPE_UDP)
+ field->ltype_match |= NPC_LT_LD_UDP;
+ else if (key_type == FLOW_KEY_TYPE_SCTP)
+ field->ltype_match |= NPC_LT_LD_SCTP;
+ field->key_offset = 32; /* After IPv4/v6 SIP, DIP */
+ field->ltype_mask = ~field->ltype_match;
+ break;
+ }
+ if (field)
+ field->ena = 1;
+ field = NULL;
+ }
+}
+
+static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+{
+#define FIELDS_PER_ALG 5
+ u64 field[FLOW_KEY_ALG_MAX][FIELDS_PER_ALG];
+ u32 flowkey_cfg, minkey_cfg;
+ int alg, fid;
+
+ memset(&field, 0, sizeof(u64) * FLOW_KEY_ALG_MAX * FIELDS_PER_ALG);
+
+ /* Only incoming channel number */
+ flowkey_cfg = FLOW_KEY_TYPE_PORT;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_PORT], flowkey_cfg);
+
+ /* For a incoming pkt if none of the fields match then flowkey
+ * will be zero, hence tag generated will also be zero.
+ * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will
+ * be used to queue the packet.
+ */
+
+ /* IPv4/IPv6 SIP/DIPs */
+ flowkey_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_IP], flowkey_cfg);
+
+ /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ minkey_cfg = flowkey_cfg;
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP], flowkey_cfg);
+
+ /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP], flowkey_cfg);
+
+ /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_SCTP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_SCTP], flowkey_cfg);
+
+ /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP], flowkey_cfg);
+
+ /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_SCTP], flowkey_cfg);
+
+ /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP_SCTP], flowkey_cfg);
+
+ /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
+ flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP |
+ FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
+ set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP_SCTP],
+ flowkey_cfg);
+
+ for (alg = 0; alg < FLOW_KEY_ALG_MAX; alg++) {
+ for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
+ field[alg][fid]);
+ }
+}
+
+int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+ struct nix_set_mac_addr *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+
+ rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, req->mac_addr);
+ return 0;
+}
+
+int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+ struct msg_rsp *rsp)
+{
+ bool allmulti = false, disable_promisc = false;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->mode & NIX_RX_MODE_PROMISC)
+ allmulti = false;
+ else if (req->mode & NIX_RX_MODE_ALLMULTI)
+ allmulti = true;
+ else
+ disable_promisc = true;
+
+ if (disable_promisc)
+ rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
+ else
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, allmulti);
+ return 0;
+}
+
+static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
+{
+ int idx, err;
+ u64 status;
+
+ /* Start X2P bus calibration */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
+ /* Wait for calibration to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_STATUS, BIT_ULL(10), false);
+ if (err) {
+ dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
+ return err;
+ }
+
+ status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
+ /* Check if CGX devices are ready */
+ for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
+ if (status & (BIT_ULL(16 + idx)))
+ continue;
+ dev_err(rvu->dev,
+ "CGX%d didn't respond to NIX X2P calibration\n", idx);
+ err = -EBUSY;
+ }
+
+ /* Check if LBK is ready */
+ if (!(status & BIT_ULL(19))) {
+ dev_err(rvu->dev,
+ "LBK didn't respond to NIX X2P calibration\n");
+ err = -EBUSY;
+ }
+
+ /* Clear 'calibrate_x2p' bit */
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
+ if (err || (status & 0x3FFULL))
+ dev_err(rvu->dev,
+ "NIX X2P calibration failed, status 0x%llx\n", status);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+ int err;
+
+ /* Set admin queue endianness */
+ cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
+#ifdef __BIG_ENDIAN
+ cfg |= BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
+#else
+ cfg &= ~BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
+#endif
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
+ cfg &= ~0x3FFEULL;
+ rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
+
+ /* Result structure can be followed by RQ/SQ/CQ context at
+ * RES + 128bytes and a write mask at RES + 256 bytes, depending on
+ * operation type. Alloc sufficient result memory for all operations.
+ */
+ err = rvu_aq_alloc(rvu, &block->aq,
+ Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
+ ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
+ rvu_write64(rvu, block->addr,
+ NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
+ return 0;
+}
+
+int rvu_nix_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr, err;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return 0;
+ block = &hw->block[blkaddr];
+
+ /* Calibrate X2P bus to check if CGX/LBK links are fine */
+ err = nix_calibrate_x2p(rvu, blkaddr);
+ if (err)
+ return err;
+
+ /* Set num of links of each type */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ hw->cgx = (cfg >> 12) & 0xF;
+ hw->lmac_per_cgx = (cfg >> 8) & 0xF;
+ hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
+ hw->lbk_links = 1;
+ hw->sdp_links = 1;
+
+ /* Initialize admin queue */
+ err = nix_aq_init(rvu, block);
+ if (err)
+ return err;
+
+ /* Restore CINT timer delay to HW reset values */
+ rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
+
+ /* Configure segmentation offload formats */
+ nix_setup_lso(rvu, blkaddr);
+
+ if (blkaddr == BLKADDR_NIX0) {
+ hw->nix0 = devm_kzalloc(rvu->dev,
+ sizeof(struct nix_hw), GFP_KERNEL);
+ if (!hw->nix0)
+ return -ENOMEM;
+
+ err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
+ if (err)
+ return err;
+
+ err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
+ if (err)
+ return err;
+
+ /* Config Outer L2, IP, TCP and UDP's NPC layer info.
+ * This helps HW protocol checker to identify headers
+ * and validate length and checksums.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
+ (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
+ (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
+ (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
+ (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+
+ nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+ }
+ return 0;
+}
+
+void rvu_nix_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ struct nix_txsch *txsch;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+ int blkaddr, lvl;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ rvu_aq_free(rvu, block->aq);
+
+ if (blkaddr == BLKADDR_NIX0) {
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ kfree(txsch->schq.bmap);
+ }
+
+ mcast = &nix_hw->mcast;
+ qmem_free(rvu->dev, mcast->mce_ctx);
+ qmem_free(rvu->dev, mcast->mcast_buf);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
new file mode 100644
index 000000000000..7531fdc54fa1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+
+static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ struct npa_aq_inst_s *inst)
+{
+ struct admin_queue *aq = block->aq;
+ struct npa_aq_res_s *result;
+ int timeout = 1000;
+ u64 reg, head;
+
+ result = (struct npa_aq_res_s *)aq->res->base;
+
+ /* Get current head pointer where to append this instruction */
+ reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
+ head = (reg >> 4) & AQ_PTR_MASK;
+
+ memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
+ (void *)inst, aq->inst->entry_sz);
+ memset(result, 0, sizeof(*result));
+ /* sync into memory */
+ wmb();
+
+ /* Ring the doorbell and wait for result */
+ rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
+ while (result->compcode == NPA_AQ_COMP_NOTDONE) {
+ cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ }
+
+ if (result->compcode != NPA_AQ_COMP_GOOD)
+ /* TODO: Replace this with some error code */
+ return -EBUSY;
+
+ return 0;
+}
+
+static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, npalf, rc = 0;
+ struct npa_aq_inst_s inst;
+ struct rvu_block *block;
+ struct admin_queue *aq;
+ struct rvu_pfvf *pfvf;
+ void *ctx, *mask;
+ bool ena;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
+ return NPA_AF_ERR_AQ_ENQUEUE;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ aq = block->aq;
+ if (!aq) {
+ dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
+ return NPA_AF_ERR_AQ_ENQUEUE;
+ }
+
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ memset(&inst, 0, sizeof(struct npa_aq_inst_s));
+ inst.cindex = req->aura_id;
+ inst.lf = npalf;
+ inst.ctype = req->ctype;
+ inst.op = req->op;
+ /* Currently we are not supporting enqueuing multiple instructions,
+ * so always choose first entry in result memory.
+ */
+ inst.res_addr = (u64)aq->res->iova;
+
+ /* Clean result + context memory */
+ memset(aq->res->base, 0, aq->res->entry_sz);
+ /* Context needs to be written at RES_ADDR + 128 */
+ ctx = aq->res->base + 128;
+ /* Mask needs to be written at RES_ADDR + 256 */
+ mask = aq->res->base + 256;
+
+ switch (req->op) {
+ case NPA_AQ_INSTOP_WRITE:
+ /* Copy context and write mask */
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ memcpy(mask, &req->aura_mask,
+ sizeof(struct npa_aura_s));
+ memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
+ } else {
+ memcpy(mask, &req->pool_mask,
+ sizeof(struct npa_pool_s));
+ memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
+ }
+ break;
+ case NPA_AQ_INSTOP_INIT:
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
+ rc = NPA_AF_ERR_AQ_FULL;
+ break;
+ }
+ /* Set pool's context address */
+ req->aura.pool_addr = pfvf->pool_ctx->iova +
+ (req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
+ memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
+ } else { /* POOL's context */
+ memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
+ }
+ break;
+ case NPA_AQ_INSTOP_NOP:
+ case NPA_AQ_INSTOP_READ:
+ case NPA_AQ_INSTOP_LOCK:
+ case NPA_AQ_INSTOP_UNLOCK:
+ break;
+ default:
+ rc = NPA_AF_ERR_AQ_FULL;
+ break;
+ }
+
+ if (rc)
+ return rc;
+
+ spin_lock(&aq->lock);
+
+ /* Submit the instruction to AQ */
+ rc = npa_aq_enqueue_wait(rvu, block, &inst);
+ if (rc) {
+ spin_unlock(&aq->lock);
+ return rc;
+ }
+
+ /* Set aura bitmap if aura hw context is enabled */
+ if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
+ __set_bit(req->aura_id, pfvf->aura_bmap);
+ if (req->op == NPA_AQ_INSTOP_WRITE) {
+ ena = (req->aura.ena & req->aura_mask.ena) |
+ (test_bit(req->aura_id, pfvf->aura_bmap) &
+ ~req->aura_mask.ena);
+ if (ena)
+ __set_bit(req->aura_id, pfvf->aura_bmap);
+ else
+ __clear_bit(req->aura_id, pfvf->aura_bmap);
+ }
+ }
+
+ /* Set pool bitmap if pool hw context is enabled */
+ if (req->ctype == NPA_AQ_CTYPE_POOL) {
+ if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
+ __set_bit(req->aura_id, pfvf->pool_bmap);
+ if (req->op == NPA_AQ_INSTOP_WRITE) {
+ ena = (req->pool.ena & req->pool_mask.ena) |
+ (test_bit(req->aura_id, pfvf->pool_bmap) &
+ ~req->pool_mask.ena);
+ if (ena)
+ __set_bit(req->aura_id, pfvf->pool_bmap);
+ else
+ __clear_bit(req->aura_id, pfvf->pool_bmap);
+ }
+ }
+ spin_unlock(&aq->lock);
+
+ if (rsp) {
+ /* Copy read context into mailbox */
+ if (req->op == NPA_AQ_INSTOP_READ) {
+ if (req->ctype == NPA_AQ_CTYPE_AURA)
+ memcpy(&rsp->aura, ctx,
+ sizeof(struct npa_aura_s));
+ else
+ memcpy(&rsp->pool, ctx,
+ sizeof(struct npa_pool_s));
+ }
+ }
+
+ return 0;
+}
+
+static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ struct npa_aq_enq_req aq_req;
+ unsigned long *bmap;
+ int id, cnt = 0;
+ int err = 0, rc;
+
+ if (!pfvf->pool_ctx || !pfvf->aura_ctx)
+ return NPA_AF_ERR_AQ_ENQUEUE;
+
+ memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
+ aq_req.hdr.pcifunc = req->hdr.pcifunc;
+
+ if (req->ctype == NPA_AQ_CTYPE_POOL) {
+ aq_req.pool.ena = 0;
+ aq_req.pool_mask.ena = 1;
+ cnt = pfvf->pool_ctx->qsize;
+ bmap = pfvf->pool_bmap;
+ } else if (req->ctype == NPA_AQ_CTYPE_AURA) {
+ aq_req.aura.ena = 0;
+ aq_req.aura_mask.ena = 1;
+ cnt = pfvf->aura_ctx->qsize;
+ bmap = pfvf->aura_bmap;
+ }
+
+ aq_req.ctype = req->ctype;
+ aq_req.op = NPA_AQ_INSTOP_WRITE;
+
+ for (id = 0; id < cnt; id++) {
+ if (!test_bit(id, bmap))
+ continue;
+ aq_req.aura_id = id;
+ rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
+ if (rc) {
+ err = rc;
+ dev_err(rvu->dev, "Failed to disable %s:%d context\n",
+ (req->ctype == NPA_AQ_CTYPE_AURA) ?
+ "Aura" : "Pool", id);
+ }
+ }
+
+ return err;
+}
+
+int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ return rvu_npa_aq_enq_inst(rvu, req, rsp);
+}
+
+int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+ struct hwctx_disable_req *req,
+ struct msg_rsp *rsp)
+{
+ return npa_lf_hwctx_disable(rvu, req);
+}
+
+static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
+{
+ kfree(pfvf->aura_bmap);
+ pfvf->aura_bmap = NULL;
+
+ qmem_free(rvu->dev, pfvf->aura_ctx);
+ pfvf->aura_ctx = NULL;
+
+ kfree(pfvf->pool_bmap);
+ pfvf->pool_bmap = NULL;
+
+ qmem_free(rvu->dev, pfvf->pool_ctx);
+ pfvf->pool_ctx = NULL;
+
+ qmem_free(rvu->dev, pfvf->npa_qints_ctx);
+ pfvf->npa_qints_ctx = NULL;
+}
+
+int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+ struct npa_lf_alloc_req *req,
+ struct npa_lf_alloc_rsp *rsp)
+{
+ int npalf, qints, hwctx_size, err, rc = 0;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ u64 cfg, ctx_cfg;
+ int blkaddr;
+
+ if (req->aura_sz > NPA_AURA_SZ_MAX ||
+ req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
+ return NPA_AF_ERR_PARAM;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Reset this NPA LF */
+ err = rvu_lf_reset(rvu, block, npalf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
+ return NPA_AF_ERR_LF_RESET;
+ }
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
+
+ /* Alloc memory for aura HW contexts */
+ hwctx_size = 1UL << (ctx_cfg & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
+ NPA_AURA_COUNT(req->aura_sz), hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
+ GFP_KERNEL);
+ if (!pfvf->aura_bmap)
+ goto free_mem;
+
+ /* Alloc memory for pool HW contexts */
+ hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
+ GFP_KERNEL);
+ if (!pfvf->pool_bmap)
+ goto free_mem;
+
+ /* Get no of queue interrupts supported */
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
+ qints = (cfg >> 28) & 0xFFF;
+
+ /* Alloc memory for Qints HW contexts */
+ hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
+ if (err)
+ goto free_mem;
+
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
+ /* Clear way partition mask and set aura offset to '0' */
+ cfg &= ~(BIT_ULL(34) - 1);
+ /* Set aura size & enable caching of contexts */
+ cfg |= (req->aura_sz << 16) | BIT_ULL(34);
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
+
+ /* Configure aura HW context's base */
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
+ (u64)pfvf->aura_ctx->iova);
+
+ /* Enable caching of qints hw context */
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
+ (u64)pfvf->npa_qints_ctx->iova);
+
+ goto exit;
+
+free_mem:
+ npa_ctx_free(rvu, pfvf);
+ rc = -ENOMEM;
+
+exit:
+ /* set stack page info */
+ cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
+ rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
+ rsp->stack_pg_bytes = cfg & 0xFF;
+ rsp->qints = (cfg >> 28) & 0xFFF;
+ return rc;
+}
+
+int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int npalf, err;
+ int blkaddr;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (!pfvf->npalf || blkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ npalf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (npalf < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Reset this NPA LF */
+ err = rvu_lf_reset(rvu, block, npalf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
+ return NPA_AF_ERR_LF_RESET;
+ }
+
+ npa_ctx_free(rvu, pfvf);
+
+ return 0;
+}
+
+static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+ int err;
+
+ /* Set admin queue endianness */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
+#ifdef __BIG_ENDIAN
+ cfg |= BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
+#else
+ cfg &= ~BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
+#endif
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
+ cfg &= ~0x03DULL;
+ rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+
+ /* Result structure can be followed by Aura/Pool context at
+ * RES + 128bytes and a write mask at RES + 256 bytes, depending on
+ * operation type. Alloc sufficient result memory for all operations.
+ */
+ err = rvu_aq_alloc(rvu, &block->aq,
+ Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
+ ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
+ rvu_write64(rvu, block->addr,
+ NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
+ return 0;
+}
+
+int rvu_npa_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ /* Initialize admin queue */
+ err = npa_aq_init(rvu, &hw->block[blkaddr]);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void rvu_npa_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ block = &hw->block[blkaddr];
+ rvu_aq_free(rvu, block->aq);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
new file mode 100644
index 000000000000..23ff47f7efc5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "npc_profile.h"
+
+#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */
+#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
+
+#define NIXLF_UCAST_ENTRY 0
+#define NIXLF_BCAST_ENTRY 1
+#define NIXLF_PROMISC_ENTRY 2
+
+#define NPC_PARSE_RESULT_DMAC_OFFSET 8
+
+struct mcam_entry {
+#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
+ u64 kw[NPC_MAX_KWS_IN_KEY];
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ u64 action;
+ u64 vtag_action;
+};
+
+void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
+{
+ int blkaddr;
+ u64 val = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Config CPI base for the PKIND */
+ val = pkind | 1ULL << 62;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_CPI_DEFX(pkind, 0), val);
+}
+
+int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ u32 map;
+ int i;
+
+ for (i = 0; i < pkind->rsrc.max; i++) {
+ map = pkind->pfchan_map[i];
+ if (((map >> 16) & 0x3F) == pf)
+ return i;
+ }
+ return -1;
+}
+
+static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
+ u16 pcifunc, int nixlf, int type)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int index;
+
+ /* Check if this is for a PF */
+ if (pf && !(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ /* Reserved entries exclude PF0 */
+ pf--;
+ index = mcam->pf_offset + (pf * RSVD_MCAM_ENTRIES_PER_PF);
+ /* Broadcast address matching entry should be first so
+ * that the packet can be replicated to all VFs.
+ */
+ if (type == NIXLF_BCAST_ENTRY)
+ return index;
+ else if (type == NIXLF_PROMISC_ENTRY)
+ return index + 1;
+ }
+
+ return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF));
+}
+
+static int npc_get_bank(struct npc_mcam *mcam, int index)
+{
+ int bank = index / mcam->banksize;
+
+ /* 0,1 & 2,3 banks are combined for this keysize */
+ if (mcam->keysize == NPC_MCAM_KEY_X2)
+ return bank ? 2 : 0;
+
+ return bank;
+}
+
+static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+ u64 cfg;
+
+ index &= (mcam->banksize - 1);
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(index, bank));
+ return (cfg & 1);
+}
+
+static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable)
+{
+ int bank = npc_get_bank(mcam, index);
+ int actbank = bank;
+
+ index &= (mcam->banksize - 1);
+ for (; bank < (actbank + mcam->banks_per_entry); bank++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(index, bank),
+ enable ? 1 : 0);
+ }
+}
+
+static void npc_get_keyword(struct mcam_entry *entry, int idx,
+ u64 *cam0, u64 *cam1)
+{
+ u64 kw_mask = 0x00;
+
+#define CAM_MASK(n) (BIT_ULL(n) - 1)
+
+ /* 0, 2, 4, 6 indices refer to BANKX_CAMX_W0 and
+ * 1, 3, 5, 7 indices refer to BANKX_CAMX_W1.
+ *
+ * Also, only 48 bits of BANKX_CAMX_W1 are valid.
+ */
+ switch (idx) {
+ case 0:
+ /* BANK(X)_CAM_W0<63:0> = MCAM_KEY[KW0]<63:0> */
+ *cam1 = entry->kw[0];
+ kw_mask = entry->kw_mask[0];
+ break;
+ case 1:
+ /* BANK(X)_CAM_W1<47:0> = MCAM_KEY[KW1]<47:0> */
+ *cam1 = entry->kw[1] & CAM_MASK(48);
+ kw_mask = entry->kw_mask[1] & CAM_MASK(48);
+ break;
+ case 2:
+ /* BANK(X + 1)_CAM_W0<15:0> = MCAM_KEY[KW1]<63:48>
+ * BANK(X + 1)_CAM_W0<63:16> = MCAM_KEY[KW2]<47:0>
+ */
+ *cam1 = (entry->kw[1] >> 48) & CAM_MASK(16);
+ *cam1 |= ((entry->kw[2] & CAM_MASK(48)) << 16);
+ kw_mask = (entry->kw_mask[1] >> 48) & CAM_MASK(16);
+ kw_mask |= ((entry->kw_mask[2] & CAM_MASK(48)) << 16);
+ break;
+ case 3:
+ /* BANK(X + 1)_CAM_W1<15:0> = MCAM_KEY[KW2]<63:48>
+ * BANK(X + 1)_CAM_W1<47:16> = MCAM_KEY[KW3]<31:0>
+ */
+ *cam1 = (entry->kw[2] >> 48) & CAM_MASK(16);
+ *cam1 |= ((entry->kw[3] & CAM_MASK(32)) << 16);
+ kw_mask = (entry->kw_mask[2] >> 48) & CAM_MASK(16);
+ kw_mask |= ((entry->kw_mask[3] & CAM_MASK(32)) << 16);
+ break;
+ case 4:
+ /* BANK(X + 2)_CAM_W0<31:0> = MCAM_KEY[KW3]<63:32>
+ * BANK(X + 2)_CAM_W0<63:32> = MCAM_KEY[KW4]<31:0>
+ */
+ *cam1 = (entry->kw[3] >> 32) & CAM_MASK(32);
+ *cam1 |= ((entry->kw[4] & CAM_MASK(32)) << 32);
+ kw_mask = (entry->kw_mask[3] >> 32) & CAM_MASK(32);
+ kw_mask |= ((entry->kw_mask[4] & CAM_MASK(32)) << 32);
+ break;
+ case 5:
+ /* BANK(X + 2)_CAM_W1<31:0> = MCAM_KEY[KW4]<63:32>
+ * BANK(X + 2)_CAM_W1<47:32> = MCAM_KEY[KW5]<15:0>
+ */
+ *cam1 = (entry->kw[4] >> 32) & CAM_MASK(32);
+ *cam1 |= ((entry->kw[5] & CAM_MASK(16)) << 32);
+ kw_mask = (entry->kw_mask[4] >> 32) & CAM_MASK(32);
+ kw_mask |= ((entry->kw_mask[5] & CAM_MASK(16)) << 32);
+ break;
+ case 6:
+ /* BANK(X + 3)_CAM_W0<47:0> = MCAM_KEY[KW5]<63:16>
+ * BANK(X + 3)_CAM_W0<63:48> = MCAM_KEY[KW6]<15:0>
+ */
+ *cam1 = (entry->kw[5] >> 16) & CAM_MASK(48);
+ *cam1 |= ((entry->kw[6] & CAM_MASK(16)) << 48);
+ kw_mask = (entry->kw_mask[5] >> 16) & CAM_MASK(48);
+ kw_mask |= ((entry->kw_mask[6] & CAM_MASK(16)) << 48);
+ break;
+ case 7:
+ /* BANK(X + 3)_CAM_W1<47:0> = MCAM_KEY[KW6]<63:16> */
+ *cam1 = (entry->kw[6] >> 16) & CAM_MASK(48);
+ kw_mask = (entry->kw_mask[6] >> 16) & CAM_MASK(48);
+ break;
+ }
+
+ *cam1 &= kw_mask;
+ *cam0 = ~*cam1 & kw_mask;
+}
+
+static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, u8 intf,
+ struct mcam_entry *entry, bool enable)
+{
+ int bank = npc_get_bank(mcam, index);
+ int kw = 0, actbank, actindex;
+ u64 cam0, cam1;
+
+ actbank = bank; /* Save bank id, to set action later on */
+ actindex = index;
+ index &= (mcam->banksize - 1);
+
+ /* CAM1 takes the comparison value and
+ * CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
+ * CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
+ * CAM1<n> = 1 & CAM0<n> = 0 => match if key<n> = 1
+ * CAM1<n> = 0 & CAM0<n> = 0 => always match i.e dontcare.
+ */
+ for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+ /* Interface should be set in all banks */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1),
+ intf);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0),
+ ~intf & 0x3);
+
+ /* Set the match key */
+ npc_get_keyword(entry, kw, &cam0, &cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), cam0);
+
+ npc_get_keyword(entry, kw + 1, &cam0, &cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), cam1);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
+ }
+
+ /* Set 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action);
+
+ /* Set TAG 'action' */
+ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_TAG_ACT(index, actbank),
+ entry->vtag_action);
+
+ /* Enable the entry */
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
+ else
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
+}
+
+static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
+{
+ int bank = npc_get_bank(mcam, index);
+
+ index &= (mcam->banksize - 1);
+ return rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, u8 *mac_addr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry entry = { {0} };
+ struct nix_rx_action action;
+ int blkaddr, index, kwi;
+ u64 mac = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ for (index = ETH_ALEN - 1; index >= 0; index--)
+ mac |= ((u64)*mac_addr++) << (8 * index);
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+
+ /* Match ingress channel and DMAC */
+ entry.kw[0] = chan;
+ entry.kw_mask[0] = 0xFFFULL;
+
+ kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
+ entry.kw[kwi] = mac;
+ entry.kw_mask[kwi] = BIT_ULL(48) - 1;
+
+ /* Don't change the action if entry is already enabled
+ * Otherwise RSS action may get overwritten.
+ */
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, index);
+ } else {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ }
+
+ entry.action = *(u64 *)&action;
+ npc_config_mcam_entry(rvu, mcam, blkaddr, index,
+ NIX_INTF_RX, &entry, true);
+}
+
+void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan, bool allmulti)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry entry = { {0} };
+ struct nix_rx_action action;
+ int blkaddr, index, kwi;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Only PF or AF VF can add a promiscuous entry */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
+ entry.kw[0] = chan;
+ entry.kw_mask[0] = 0xFFFULL;
+
+ if (allmulti) {
+ kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
+ entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */
+ entry.kw_mask[kwi] = BIT_ULL(40);
+ }
+
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+
+ entry.action = *(u64 *)&action;
+ npc_config_mcam_entry(rvu, mcam, blkaddr, index,
+ NIX_INTF_RX, &entry, true);
+}
+
+void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Only PF's have a promiscuous entry */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+}
+
+void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, u64 chan)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry entry = { {0} };
+ struct nix_rx_action action;
+#ifdef MCAST_MCE
+ struct rvu_pfvf *pfvf;
+#endif
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Only PF can add a bcast match entry */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+#ifdef MCAST_MCE
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+#endif
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_BCAST_ENTRY);
+
+ /* Check for L2B bit and LMAC channel */
+ entry.kw[0] = BIT_ULL(25) | chan;
+ entry.kw_mask[0] = BIT_ULL(25) | 0xFFFULL;
+
+ *(u64 *)&action = 0x00;
+#ifdef MCAST_MCE
+ /* Early silicon doesn't support pkt replication,
+ * so install entry with UCAST action, so that PF
+ * receives all broadcast packets.
+ */
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ action.pf_func = pcifunc;
+ action.index = pfvf->bcast_mce_idx;
+#else
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+#endif
+
+ entry.action = *(u64 *)&action;
+ npc_config_mcam_entry(rvu, mcam, blkaddr, index,
+ NIX_INTF_RX, &entry, true);
+}
+
+void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
+ int group, int alg_idx, int mcam_index)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action action;
+ int blkaddr, index, bank;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Check if this is for reserved default entry */
+ if (mcam_index < 0) {
+ if (group != DEFAULT_RSS_CONTEXT_GROUP)
+ return;
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ } else {
+ /* TODO: validate this mcam index */
+ index = mcam_index;
+ }
+
+ if (index >= mcam->total_entries)
+ return;
+
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ /* Ignore if no action was set earlier */
+ if (!*(u64 *)&action)
+ return;
+
+ action.op = NIX_RX_ACTIONOP_RSS;
+ action.pf_func = pcifunc;
+ action.index = group;
+ action.flow_key_alg = alg_idx;
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+}
+
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action action;
+ int blkaddr, index, bank;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Disable ucast MCAM match entry of this PF/VF */
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+ /* For PF, disable promisc and bcast MCAM match entries */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_BCAST_ENTRY);
+ /* For bcast, disable only if it's action is not
+ * packet replication, incase if action is replication
+ * then this PF's nixlf is removed from bcast replication
+ * list.
+ */
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ if (action.op != NIX_RX_ACTIONOP_MCAST)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+ rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
+ }
+}
+
+#define LDATA_EXTRACT_CONFIG(intf, lid, ltype, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
+
+#define LDATA_FLAGS_CONFIG(intf, ld, flags, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+
+static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int lid, ltype;
+ int lid_count;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ lid_count = (cfg >> 4) & 0xF;
+
+ /* First clear any existing config i.e
+ * disable LDATA and FLAGS extraction.
+ */
+ for (lid = 0; lid < lid_count; lid++) {
+ for (ltype = 0; ltype < 16; ltype++) {
+ LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 0, 0ULL);
+ LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 1, 0ULL);
+ LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 0, 0ULL);
+ LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 1, 0ULL);
+
+ LDATA_FLAGS_CONFIG(NIX_INTF_RX, 0, ltype, 0ULL);
+ LDATA_FLAGS_CONFIG(NIX_INTF_RX, 1, ltype, 0ULL);
+ LDATA_FLAGS_CONFIG(NIX_INTF_TX, 0, ltype, 0ULL);
+ LDATA_FLAGS_CONFIG(NIX_INTF_TX, 1, ltype, 0ULL);
+ }
+ }
+
+ /* If we plan to extract Outer IPv4 tuple for TCP/UDP pkts
+ * then 112bit key is not sufficient
+ */
+ if (mcam->keysize != NPC_MCAM_KEY_X2)
+ return;
+
+ /* Start placing extracted data/flags from 64bit onwards, for now */
+ /* Extract DMAC from the packet */
+ cfg = (0x05 << 16) | BIT_ULL(7) | NPC_PARSE_RESULT_DMAC_OFFSET;
+ LDATA_EXTRACT_CONFIG(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
+}
+
+static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
+ struct npc_kpu_profile_action *kpuaction,
+ int kpu, int entry, bool pkind)
+{
+ struct npc_kpu_action0 action0 = {0};
+ struct npc_kpu_action1 action1 = {0};
+ u64 reg;
+
+ action1.errlev = kpuaction->errlev;
+ action1.errcode = kpuaction->errcode;
+ action1.dp0_offset = kpuaction->dp0_offset;
+ action1.dp1_offset = kpuaction->dp1_offset;
+ action1.dp2_offset = kpuaction->dp2_offset;
+
+ if (pkind)
+ reg = NPC_AF_PKINDX_ACTION1(entry);
+ else
+ reg = NPC_AF_KPUX_ENTRYX_ACTION1(kpu, entry);
+
+ rvu_write64(rvu, blkaddr, reg, *(u64 *)&action1);
+
+ action0.byp_count = kpuaction->bypass_count;
+ action0.capture_ena = kpuaction->cap_ena;
+ action0.parse_done = kpuaction->parse_done;
+ action0.next_state = kpuaction->next_state;
+ action0.capture_lid = kpuaction->lid;
+ action0.capture_ltype = kpuaction->ltype;
+ action0.capture_flags = kpuaction->flags;
+ action0.ptr_advance = kpuaction->ptr_advance;
+ action0.var_len_offset = kpuaction->offset;
+ action0.var_len_mask = kpuaction->mask;
+ action0.var_len_right = kpuaction->right;
+ action0.var_len_shift = kpuaction->shift;
+
+ if (pkind)
+ reg = NPC_AF_PKINDX_ACTION0(entry);
+ else
+ reg = NPC_AF_KPUX_ENTRYX_ACTION0(kpu, entry);
+
+ rvu_write64(rvu, blkaddr, reg, *(u64 *)&action0);
+}
+
+static void npc_config_kpucam(struct rvu *rvu, int blkaddr,
+ struct npc_kpu_profile_cam *kpucam,
+ int kpu, int entry)
+{
+ struct npc_kpu_cam cam0 = {0};
+ struct npc_kpu_cam cam1 = {0};
+
+ cam1.state = kpucam->state & kpucam->state_mask;
+ cam1.dp0_data = kpucam->dp0 & kpucam->dp0_mask;
+ cam1.dp1_data = kpucam->dp1 & kpucam->dp1_mask;
+ cam1.dp2_data = kpucam->dp2 & kpucam->dp2_mask;
+
+ cam0.state = ~kpucam->state & kpucam->state_mask;
+ cam0.dp0_data = ~kpucam->dp0 & kpucam->dp0_mask;
+ cam0.dp1_data = ~kpucam->dp1 & kpucam->dp1_mask;
+ cam0.dp2_data = ~kpucam->dp2 & kpucam->dp2_mask;
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 0), *(u64 *)&cam0);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRYX_CAMX(kpu, entry, 1), *(u64 *)&cam1);
+}
+
+static inline u64 enable_mask(int count)
+{
+ return (((count) < 64) ? ~(BIT_ULL(count) - 1) : (0x00ULL));
+}
+
+static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
+ struct npc_kpu_profile *profile)
+{
+ int entry, num_entries, max_entries;
+
+ if (profile->cam_entries != profile->action_entries) {
+ dev_err(rvu->dev,
+ "KPU%d: CAM and action entries [%d != %d] not equal\n",
+ kpu, profile->cam_entries, profile->action_entries);
+ }
+
+ max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF;
+
+ /* Program CAM match entries for previous KPU extracted data */
+ num_entries = min_t(int, profile->cam_entries, max_entries);
+ for (entry = 0; entry < num_entries; entry++)
+ npc_config_kpucam(rvu, blkaddr,
+ &profile->cam[entry], kpu, entry);
+
+ /* Program this KPU's actions */
+ num_entries = min_t(int, profile->action_entries, max_entries);
+ for (entry = 0; entry < num_entries; entry++)
+ npc_config_kpuaction(rvu, blkaddr, &profile->action[entry],
+ kpu, entry, false);
+
+ /* Enable all programmed entries */
+ num_entries = min_t(int, profile->action_entries, profile->cam_entries);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries));
+ if (num_entries > 64) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 1),
+ enable_mask(num_entries - 64));
+ }
+
+ /* Enable this KPU */
+ rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01);
+}
+
+static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int num_pkinds, num_kpus, idx;
+ struct npc_pkind *pkind;
+
+ /* Get HW limits */
+ hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F;
+
+ /* Disable all KPUs and their entries */
+ for (idx = 0; idx < hw->npc_kpus; idx++) {
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(idx, 0), ~0ULL);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_KPUX_ENTRY_DISX(idx, 1), ~0ULL);
+ rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00);
+ }
+
+ /* First program IKPU profile i.e PKIND configs.
+ * Check HW max count to avoid configuring junk or
+ * writing to unsupported CSR addresses.
+ */
+ pkind = &hw->pkind;
+ num_pkinds = ARRAY_SIZE(ikpu_action_entries);
+ num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
+
+ for (idx = 0; idx < num_pkinds; idx++)
+ npc_config_kpuaction(rvu, blkaddr,
+ &ikpu_action_entries[idx], 0, idx, true);
+
+ /* Program KPU CAM and Action profiles */
+ num_kpus = ARRAY_SIZE(npc_kpu_profiles);
+ num_kpus = min_t(int, hw->npc_kpus, num_kpus);
+
+ for (idx = 0; idx < num_kpus; idx++)
+ npc_program_kpu_profile(rvu, blkaddr,
+ idx, &npc_kpu_profiles[idx]);
+}
+
+static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
+{
+ int nixlf_count = rvu_get_nixlf_count(rvu);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int rsvd;
+ u64 cfg;
+
+ /* Get HW limits */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ mcam->banks = (cfg >> 44) & 0xF;
+ mcam->banksize = (cfg >> 28) & 0xFFFF;
+
+ /* Actual number of MCAM entries vary by entry size */
+ cfg = (rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07;
+ mcam->total_entries = (mcam->banks / BIT_ULL(cfg)) * mcam->banksize;
+ mcam->keysize = cfg;
+
+ /* Number of banks combined per MCAM entry */
+ if (cfg == NPC_MCAM_KEY_X4)
+ mcam->banks_per_entry = 4;
+ else if (cfg == NPC_MCAM_KEY_X2)
+ mcam->banks_per_entry = 2;
+ else
+ mcam->banks_per_entry = 1;
+
+ /* Reserve one MCAM entry for each of the NIX LF to
+ * guarantee space to install default matching DMAC rule.
+ * Also reserve 2 MCAM entries for each PF for default
+ * channel based matching or 'bcast & promisc' matching to
+ * support BCAST and PROMISC modes of operation for PFs.
+ * PF0 is excluded.
+ */
+ rsvd = (nixlf_count * RSVD_MCAM_ENTRIES_PER_NIXLF) +
+ ((rvu->hw->total_pfs - 1) * RSVD_MCAM_ENTRIES_PER_PF);
+ if (mcam->total_entries <= rsvd) {
+ dev_warn(rvu->dev,
+ "Insufficient NPC MCAM size %d for pkt I/O, exiting\n",
+ mcam->total_entries);
+ return -ENOMEM;
+ }
+
+ mcam->entries = mcam->total_entries - rsvd;
+ mcam->nixlf_offset = mcam->entries;
+ mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
+
+ spin_lock_init(&mcam->lock);
+
+ return 0;
+}
+
+int rvu_npc_init(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ u64 keyz = NPC_MCAM_KEY_X2;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Allocate resource bimap for pkind*/
+ pkind->rsrc.max = (rvu_read64(rvu, blkaddr,
+ NPC_AF_CONST1) >> 12) & 0xFF;
+ err = rvu_alloc_bitmap(&pkind->rsrc);
+ if (err)
+ return err;
+
+ /* Allocate mem for pkind to PF and channel mapping info */
+ pkind->pfchan_map = devm_kcalloc(rvu->dev, pkind->rsrc.max,
+ sizeof(u32), GFP_KERNEL);
+ if (!pkind->pfchan_map)
+ return -ENOMEM;
+
+ /* Configure KPU profile */
+ npc_parser_profile_init(rvu, blkaddr);
+
+ /* Config Outer L2, IPv4's NPC layer info */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2,
+ (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
+ (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+
+ /* Enable below for Rx pkts.
+ * - Outer IPv4 header checksum validation.
+ * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M].
+ */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
+ rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
+ BIT_ULL(6) | BIT_ULL(2));
+
+ /* Set RX and TX side MCAM search key size.
+ * Also enable parse key extract nibbles suchthat except
+ * layer E to H, rest of the key is included for MCAM search.
+ */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
+ ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
+ ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+
+ err = npc_mcam_rsrcs_init(rvu, blkaddr);
+ if (err)
+ return err;
+
+ /* Config packet data and flags extraction into PARSE result */
+ npc_config_ldata_extract(rvu, blkaddr);
+
+ /* Set TX miss action to UCAST_DEFAULT i.e
+ * transmit the packet on NIX LF SQ's default channel.
+ */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
+ NIX_TX_ACTIONOP_UCAST_DEFAULT);
+
+ /* If MCAM lookup doesn't result in a match, drop the received packet */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
+ NIX_RX_ACTIONOP_DROP);
+
+ return 0;
+}
+
+void rvu_npc_freemem(struct rvu *rvu)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+
+ kfree(pkind->rsrc.bmap);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
new file mode 100644
index 000000000000..9d7c135c7965
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "rvu_struct.h"
+#include "common.h"
+#include "mbox.h"
+#include "rvu.h"
+
+struct reg_range {
+ u64 start;
+ u64 end;
+};
+
+struct hw_reg_map {
+ u8 regblk;
+ u8 num_ranges;
+ u64 mask;
+#define MAX_REG_RANGES 8
+ struct reg_range range[MAX_REG_RANGES];
+};
+
+static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
+ {NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } },
+ {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
+ {0x1200, 0x12E0} } },
+ {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
+ {0x1610, 0x1618} } },
+ {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } },
+ {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
+};
+
+bool rvu_check_valid_reg(int regmap, int regblk, u64 reg)
+{
+ int idx;
+ struct hw_reg_map *map;
+
+ /* Only 64bit offsets */
+ if (reg & 0x07)
+ return false;
+
+ if (regmap == TXSCHQ_HWREGMAP) {
+ if (regblk >= NIX_TXSCH_LVL_CNT)
+ return false;
+ map = &txsch_reg_map[regblk];
+ } else {
+ return false;
+ }
+
+ /* Should never happen */
+ if (map->regblk != regblk)
+ return false;
+
+ reg &= map->mask;
+
+ for (idx = 0; idx < map->num_ranges; idx++) {
+ if (reg >= map->range[idx].start &&
+ reg < map->range[idx].end)
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
new file mode 100644
index 000000000000..09a8d61f3144
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -0,0 +1,502 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_REG_H
+#define RVU_REG_H
+
+/* Admin function registers */
+#define RVU_AF_MSIXTR_BASE (0x10)
+#define RVU_AF_ECO (0x20)
+#define RVU_AF_BLK_RST (0x30)
+#define RVU_AF_PF_BAR4_ADDR (0x40)
+#define RVU_AF_RAS (0x100)
+#define RVU_AF_RAS_W1S (0x108)
+#define RVU_AF_RAS_ENA_W1S (0x110)
+#define RVU_AF_RAS_ENA_W1C (0x118)
+#define RVU_AF_GEN_INT (0x120)
+#define RVU_AF_GEN_INT_W1S (0x128)
+#define RVU_AF_GEN_INT_ENA_W1S (0x130)
+#define RVU_AF_GEN_INT_ENA_W1C (0x138)
+#define RVU_AF_AFPF_MBOX0 (0x02000)
+#define RVU_AF_AFPF_MBOX1 (0x02008)
+#define RVU_AF_AFPFX_MBOXX(a, b) (0x2000 | (a) << 4 | (b) << 3)
+#define RVU_AF_PFME_STATUS (0x2800)
+#define RVU_AF_PFTRPEND (0x2810)
+#define RVU_AF_PFTRPEND_W1S (0x2820)
+#define RVU_AF_PF_RST (0x2840)
+#define RVU_AF_HWVF_RST (0x2850)
+#define RVU_AF_PFAF_MBOX_INT (0x2880)
+#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890)
+#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898)
+#define RVU_AF_PFFLR_INT (0x28a0)
+#define RVU_AF_PFFLR_INT_W1S (0x28a8)
+#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0)
+#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8)
+#define RVU_AF_PFME_INT (0x28c0)
+#define RVU_AF_PFME_INT_W1S (0x28c8)
+#define RVU_AF_PFME_INT_ENA_W1S (0x28d0)
+#define RVU_AF_PFME_INT_ENA_W1C (0x28d8)
+
+/* Admin function's privileged PF/VF registers */
+#define RVU_PRIV_CONST (0x8000000)
+#define RVU_PRIV_GEN_CFG (0x8000010)
+#define RVU_PRIV_CLK_CFG (0x8000020)
+#define RVU_PRIV_ACTIVE_PC (0x8000030)
+#define RVU_PRIV_PFX_CFG(a) (0x8000100 | (a) << 16)
+#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16)
+#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16)
+#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16)
+#define RVU_PRIV_PFX_NIX0_CFG (0x8000300)
+#define RVU_PRIV_PFX_NPA_CFG (0x8000310)
+#define RVU_PRIV_PFX_SSO_CFG (0x8000320)
+#define RVU_PRIV_PFX_SSOW_CFG (0x8000330)
+#define RVU_PRIV_PFX_TIM_CFG (0x8000340)
+#define RVU_PRIV_PFX_CPT0_CFG (0x8000350)
+#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3)
+#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16)
+#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300)
+#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310)
+#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320)
+#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330)
+#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340)
+#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350)
+
+/* RVU PF registers */
+#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
+#define RVU_PF_VFX_PFVF_MBOX1 (0x00008)
+#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3)
+#define RVU_PF_VF_BAR4_ADDR (0x10)
+#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3)
+#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3)
+#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3)
+#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3)
+#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3)
+#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3)
+#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3)
+#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3)
+#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3)
+#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3)
+#define RVU_PF_PFAF_MBOX0 (0xC00)
+#define RVU_PF_PFAF_MBOX1 (0xC08)
+#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3)
+#define RVU_PF_INT (0xc20)
+#define RVU_PF_INT_W1S (0xc28)
+#define RVU_PF_INT_ENA_W1S (0xc30)
+#define RVU_PF_INT_ENA_W1C (0xc38)
+#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+
+/* RVU VF registers */
+#define RVU_VF_VFPF_MBOX0 (0x00000)
+#define RVU_VF_VFPF_MBOX1 (0x00008)
+
+/* NPA block's admin function registers */
+#define NPA_AF_BLK_RST (0x0000)
+#define NPA_AF_CONST (0x0010)
+#define NPA_AF_CONST1 (0x0018)
+#define NPA_AF_LF_RST (0x0020)
+#define NPA_AF_GEN_CFG (0x0030)
+#define NPA_AF_NDC_CFG (0x0040)
+#define NPA_AF_INP_CTL (0x00D0)
+#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
+#define NPA_AF_AVG_DELAY (0x0100)
+#define NPA_AF_GEN_INT (0x0140)
+#define NPA_AF_GEN_INT_W1S (0x0148)
+#define NPA_AF_GEN_INT_ENA_W1S (0x0150)
+#define NPA_AF_GEN_INT_ENA_W1C (0x0158)
+#define NPA_AF_RVU_INT (0x0160)
+#define NPA_AF_RVU_INT_W1S (0x0168)
+#define NPA_AF_RVU_INT_ENA_W1S (0x0170)
+#define NPA_AF_RVU_INT_ENA_W1C (0x0178)
+#define NPA_AF_ERR_INT (0x0180)
+#define NPA_AF_ERR_INT_W1S (0x0188)
+#define NPA_AF_ERR_INT_ENA_W1S (0x0190)
+#define NPA_AF_ERR_INT_ENA_W1C (0x0198)
+#define NPA_AF_RAS (0x01A0)
+#define NPA_AF_RAS_W1S (0x01A8)
+#define NPA_AF_RAS_ENA_W1S (0x01B0)
+#define NPA_AF_RAS_ENA_W1C (0x01B8)
+#define NPA_AF_BP_TEST (0x0200)
+#define NPA_AF_ECO (0x0300)
+#define NPA_AF_AQ_CFG (0x0600)
+#define NPA_AF_AQ_BASE (0x0610)
+#define NPA_AF_AQ_STATUS (0x0620)
+#define NPA_AF_AQ_DOOR (0x0630)
+#define NPA_AF_AQ_DONE_WAIT (0x0640)
+#define NPA_AF_AQ_DONE (0x0650)
+#define NPA_AF_AQ_DONE_ACK (0x0660)
+#define NPA_AF_AQ_DONE_INT (0x0680)
+#define NPA_AF_AQ_DONE_INT_W1S (0x0688)
+#define NPA_AF_AQ_DONE_ENA_W1S (0x0690)
+#define NPA_AF_AQ_DONE_ENA_W1C (0x0698)
+#define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18)
+#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18)
+#define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18)
+#define NPA_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 18)
+#define NPA_PRIV_AF_INT_CFG (0x10000)
+#define NPA_PRIV_LFX_CFG (0x10010)
+#define NPA_PRIV_LFX_INT_CFG (0x10020)
+#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030)
+
+/* NIX block's admin function registers */
+#define NIX_AF_CFG (0x0000)
+#define NIX_AF_STATUS (0x0010)
+#define NIX_AF_NDC_CFG (0x0018)
+#define NIX_AF_CONST (0x0020)
+#define NIX_AF_CONST1 (0x0028)
+#define NIX_AF_CONST2 (0x0030)
+#define NIX_AF_CONST3 (0x0038)
+#define NIX_AF_SQ_CONST (0x0040)
+#define NIX_AF_CQ_CONST (0x0048)
+#define NIX_AF_RQ_CONST (0x0050)
+#define NIX_AF_PSE_CONST (0x0060)
+#define NIX_AF_TL1_CONST (0x0070)
+#define NIX_AF_TL2_CONST (0x0078)
+#define NIX_AF_TL3_CONST (0x0080)
+#define NIX_AF_TL4_CONST (0x0088)
+#define NIX_AF_MDQ_CONST (0x0090)
+#define NIX_AF_MC_MIRROR_CONST (0x0098)
+#define NIX_AF_LSO_CFG (0x00A8)
+#define NIX_AF_BLK_RST (0x00B0)
+#define NIX_AF_TX_TSTMP_CFG (0x00C0)
+#define NIX_AF_RX_CFG (0x00D0)
+#define NIX_AF_AVG_DELAY (0x00E0)
+#define NIX_AF_CINT_DELAY (0x00F0)
+#define NIX_AF_RX_MCAST_BASE (0x0100)
+#define NIX_AF_RX_MCAST_CFG (0x0110)
+#define NIX_AF_RX_MCAST_BUF_BASE (0x0120)
+#define NIX_AF_RX_MCAST_BUF_CFG (0x0130)
+#define NIX_AF_RX_MIRROR_BUF_BASE (0x0140)
+#define NIX_AF_RX_MIRROR_BUF_CFG (0x0148)
+#define NIX_AF_LF_RST (0x0150)
+#define NIX_AF_GEN_INT (0x0160)
+#define NIX_AF_GEN_INT_W1S (0x0168)
+#define NIX_AF_GEN_INT_ENA_W1S (0x0170)
+#define NIX_AF_GEN_INT_ENA_W1C (0x0178)
+#define NIX_AF_ERR_INT (0x0180)
+#define NIX_AF_ERR_INT_W1S (0x0188)
+#define NIX_AF_ERR_INT_ENA_W1S (0x0190)
+#define NIX_AF_ERR_INT_ENA_W1C (0x0198)
+#define NIX_AF_RAS (0x01A0)
+#define NIX_AF_RAS_W1S (0x01A8)
+#define NIX_AF_RAS_ENA_W1S (0x01B0)
+#define NIX_AF_RAS_ENA_W1C (0x01B8)
+#define NIX_AF_RVU_INT (0x01C0)
+#define NIX_AF_RVU_INT_W1S (0x01C8)
+#define NIX_AF_RVU_INT_ENA_W1S (0x01D0)
+#define NIX_AF_RVU_INT_ENA_W1C (0x01D8)
+#define NIX_AF_TCP_TIMER (0x01E0)
+#define NIX_AF_RX_WQE_TAG_CTL (0x01F0)
+#define NIX_AF_RX_DEF_OL2 (0x0200)
+#define NIX_AF_RX_DEF_OIP4 (0x0210)
+#define NIX_AF_RX_DEF_IIP4 (0x0220)
+#define NIX_AF_RX_DEF_OIP6 (0x0230)
+#define NIX_AF_RX_DEF_IIP6 (0x0240)
+#define NIX_AF_RX_DEF_OTCP (0x0250)
+#define NIX_AF_RX_DEF_ITCP (0x0260)
+#define NIX_AF_RX_DEF_OUDP (0x0270)
+#define NIX_AF_RX_DEF_IUDP (0x0280)
+#define NIX_AF_RX_DEF_OSCTP (0x0290)
+#define NIX_AF_RX_DEF_ISCTP (0x02A0)
+#define NIX_AF_RX_DEF_IPSECX (0x02B0)
+#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
+#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
+#define NIX_AF_NDC_TX_SYNC (0x03F0)
+#define NIX_AF_AQ_CFG (0x0400)
+#define NIX_AF_AQ_BASE (0x0410)
+#define NIX_AF_AQ_STATUS (0x0420)
+#define NIX_AF_AQ_DOOR (0x0430)
+#define NIX_AF_AQ_DONE_WAIT (0x0440)
+#define NIX_AF_AQ_DONE (0x0450)
+#define NIX_AF_AQ_DONE_ACK (0x0460)
+#define NIX_AF_AQ_DONE_TIMER (0x0470)
+#define NIX_AF_AQ_DONE_INT (0x0480)
+#define NIX_AF_AQ_DONE_INT_W1S (0x0488)
+#define NIX_AF_AQ_DONE_ENA_W1S (0x0490)
+#define NIX_AF_AQ_DONE_ENA_W1C (0x0498)
+#define NIX_AF_RX_LINKX_SLX_SPKT_CNT (0x0500)
+#define NIX_AF_RX_LINKX_SLX_SXQE_CNT (0x0510)
+#define NIX_AF_RX_MCAST_JOBSX_SW_CNT (0x0520)
+#define NIX_AF_RX_MIRROR_JOBSX_SW_CNT (0x0530)
+#define NIX_AF_RX_LINKX_CFG(a) (0x0540 | (a) << 16)
+#define NIX_AF_RX_SW_SYNC (0x0550)
+#define NIX_AF_RX_SW_SYNC_DONE (0x0560)
+#define NIX_AF_SEB_ECO (0x0600)
+#define NIX_AF_SEB_TEST_BP (0x0610)
+#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620)
+#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630)
+#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640)
+#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
+#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
+
+#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
+#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
+#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
+#define NIX_AF_PSE_SHAPER_CFG (0x810)
+#define NIX_AF_TX_EXPR_CREDIT (0x830)
+#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18)
+#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16)
+#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16)
+#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16)
+#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16)
+#define NIX_AF_SDP_LINK_CREDIT (0xa40)
+#define NIX_AF_SDP_SW_XOFFX(a) (0xA60 | (a) << 3)
+#define NIX_AF_SDP_HW_XOFFX(a) (0xAC0 | (a) << 3)
+#define NIX_AF_TL4X_BP_STATUS(a) (0xB00 | (a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE(a) (0xC10 | (a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
+#define NIX_AF_TL1X_SHAPE_STATE(a) (0xC50 | (a) << 16)
+#define NIX_AF_TL1X_SW_XOFF(a) (0xC70 | (a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN(a) (0xC90 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW(a) (0xCA0 | (a) << 16)
+#define NIX_AF_TL1X_RED(a) (0xCB0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG0(a) (0xCC0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG1(a) (0xCC8 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG2(a) (0xCD0 | (a) << 16)
+#define NIX_AF_TL1X_MD_DEBUG3(a) (0xCD8 | (a) << 16)
+#define NIX_AF_TL1A_DEBUG (0xce0)
+#define NIX_AF_TL1B_DEBUG (0xcf0)
+#define NIX_AF_TL1_DEBUG_GREEN (0xd00)
+#define NIX_AF_TL1_DEBUG_NODE (0xd10)
+#define NIX_AF_TL1X_DROPPED_PACKETS(a) (0xD20 | (a) << 16)
+#define NIX_AF_TL1X_DROPPED_BYTES(a) (0xD30 | (a) << 16)
+#define NIX_AF_TL1X_RED_PACKETS(a) (0xD40 | (a) << 16)
+#define NIX_AF_TL1X_RED_BYTES(a) (0xD50 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_PACKETS(a) (0xD60 | (a) << 16)
+#define NIX_AF_TL1X_YELLOW_BYTES(a) (0xD70 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_PACKETS(a) (0xD80 | (a) << 16)
+#define NIX_AF_TL1X_GREEN_BYTES(a) (0xD90 | (a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE(a) (0xE10 | (a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
+#define NIX_AF_TL2X_SCHED_STATE(a) (0xE40 | (a) << 16)
+#define NIX_AF_TL2X_SHAPE_STATE(a) (0xE50 | (a) << 16)
+#define NIX_AF_TL2X_POINTERS(a) (0xE60 | (a) << 16)
+#define NIX_AF_TL2X_SW_XOFF(a) (0xE70 | (a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
+#define NIX_AF_TL2X_GREEN(a) (0xE90 | (a) << 16)
+#define NIX_AF_TL2X_YELLOW(a) (0xEA0 | (a) << 16)
+#define NIX_AF_TL2X_RED(a) (0xEB0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG0(a) (0xEC0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG1(a) (0xEC8 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG2(a) (0xED0 | (a) << 16)
+#define NIX_AF_TL2X_MD_DEBUG3(a) (0xED8 | (a) << 16)
+#define NIX_AF_TL2A_DEBUG (0xee0)
+#define NIX_AF_TL2B_DEBUG (0xef0)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
+#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
+#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
+#define NIX_AF_TL3X_SCHED_STATE(a) (0x1040 | (a) << 16)
+#define NIX_AF_TL3X_SHAPE_STATE(a) (0x1050 | (a) << 16)
+#define NIX_AF_TL3X_POINTERS(a) (0x1060 | (a) << 16)
+#define NIX_AF_TL3X_SW_XOFF(a) (0x1070 | (a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
+#define NIX_AF_TL3X_GREEN(a) (0x1090 | (a) << 16)
+#define NIX_AF_TL3X_YELLOW(a) (0x10A0 | (a) << 16)
+#define NIX_AF_TL3X_RED(a) (0x10B0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG0(a) (0x10C0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG1(a) (0x10C8 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG2(a) (0x10D0 | (a) << 16)
+#define NIX_AF_TL3X_MD_DEBUG3(a) (0x10D8 | (a) << 16)
+#define NIX_AF_TL3A_DEBUG (0x10e0)
+#define NIX_AF_TL3B_DEBUG (0x10f0)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
+#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
+#define NIX_AF_TL4X_SCHED_STATE(a) (0x1240 | (a) << 16)
+#define NIX_AF_TL4X_SHAPE_STATE(a) (0x1250 | (a) << 16)
+#define NIX_AF_TL4X_POINTERS(a) (0x1260 | (a) << 16)
+#define NIX_AF_TL4X_SW_XOFF(a) (0x1270 | (a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
+#define NIX_AF_TL4X_GREEN(a) (0x1290 | (a) << 16)
+#define NIX_AF_TL4X_YELLOW(a) (0x12A0 | (a) << 16)
+#define NIX_AF_TL4X_RED(a) (0x12B0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG0(a) (0x12C0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG1(a) (0x12C8 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG2(a) (0x12D0 | (a) << 16)
+#define NIX_AF_TL4X_MD_DEBUG3(a) (0x12D8 | (a) << 16)
+#define NIX_AF_TL4A_DEBUG (0x12e0)
+#define NIX_AF_TL4B_DEBUG (0x12f0)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
+#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
+#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
+#define NIX_AF_MDQX_SCHED_STATE(a) (0x1440 | (a) << 16)
+#define NIX_AF_MDQX_SHAPE_STATE(a) (0x1450 | (a) << 16)
+#define NIX_AF_MDQX_POINTERS(a) (0x1460 | (a) << 16)
+#define NIX_AF_MDQX_SW_XOFF(a) (0x1470 | (a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
+#define NIX_AF_MDQX_MD_DEBUG(a) (0x14C0 | (a) << 16)
+#define NIX_AF_MDQX_PTR_FIFO(a) (0x14D0 | (a) << 16)
+#define NIX_AF_MDQA_DEBUG (0x14e0)
+#define NIX_AF_MDQB_DEBUG (0x14f0)
+#define NIX_AF_TL3_TL2X_CFG(a) (0x1600 | (a) << 18)
+#define NIX_AF_TL3_TL2X_BP_STATUS(a) (0x1610 | (a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b) (0x1800 | (a) << 18 | (b) << 3)
+#define NIX_AF_TX_MCASTX(a) (0x1900 | (a) << 15)
+#define NIX_AF_TX_VTAG_DEFX_CTL(a) (0x1A00 | (a) << 16)
+#define NIX_AF_TX_VTAG_DEFX_DATA(a) (0x1A10 | (a) << 16)
+#define NIX_AF_RX_BPIDX_STATUS(a) (0x1A20 | (a) << 17)
+#define NIX_AF_RX_CHANX_CFG(a) (0x1A30 | (a) << 15)
+#define NIX_AF_CINT_TIMERX(a) (0x1A40 | (a) << 18)
+#define NIX_AF_LSO_FORMATX_FIELDX(a, b) (0x1B00 | (a) << 16 | (b) << 3)
+#define NIX_AF_LFX_CFG(a) (0x4000 | (a) << 17)
+#define NIX_AF_LFX_SQS_CFG(a) (0x4020 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG2(a) (0x4028 | (a) << 17)
+#define NIX_AF_LFX_SQS_BASE(a) (0x4030 | (a) << 17)
+#define NIX_AF_LFX_RQS_CFG(a) (0x4040 | (a) << 17)
+#define NIX_AF_LFX_RQS_BASE(a) (0x4050 | (a) << 17)
+#define NIX_AF_LFX_CQS_CFG(a) (0x4060 | (a) << 17)
+#define NIX_AF_LFX_CQS_BASE(a) (0x4070 | (a) << 17)
+#define NIX_AF_LFX_TX_CFG(a) (0x4080 | (a) << 17)
+#define NIX_AF_LFX_TX_PARSE_CFG(a) (0x4090 | (a) << 17)
+#define NIX_AF_LFX_RX_CFG(a) (0x40A0 | (a) << 17)
+#define NIX_AF_LFX_RSS_CFG(a) (0x40C0 | (a) << 17)
+#define NIX_AF_LFX_RSS_BASE(a) (0x40D0 | (a) << 17)
+#define NIX_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 17)
+#define NIX_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 17)
+#define NIX_AF_LFX_CINTS_CFG(a) (0x4120 | (a) << 17)
+#define NIX_AF_LFX_CINTS_BASE(a) (0x4130 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17)
+#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17)
+#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17)
+#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_LOCKX(a, b) (0x4300 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_TX_STATX(a, b) (0x4400 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RX_STATX(a, b) (0x4500 | (a) << 17 | (b) << 3)
+#define NIX_AF_LFX_RSS_GRPX(a, b) (0x4600 | (a) << 17 | (b) << 3)
+#define NIX_AF_RX_NPC_MC_RCV (0x4700)
+#define NIX_AF_RX_NPC_MC_DROP (0x4710)
+#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720)
+#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
+#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
+
+#define NIX_PRIV_AF_INT_CFG (0x8000000)
+#define NIX_PRIV_LFX_CFG (0x8000010)
+#define NIX_PRIV_LFX_INT_CFG (0x8000020)
+#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030)
+
+/* SSO */
+#define SSO_AF_CONST (0x1000)
+#define SSO_AF_CONST1 (0x1008)
+#define SSO_AF_BLK_RST (0x10f8)
+#define SSO_AF_LF_HWGRP_RST (0x10e0)
+#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800)
+#define SSO_PRIV_LFX_HWGRP_CFG (0x10000)
+#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000)
+
+/* SSOW */
+#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010)
+#define SSOW_AF_LF_HWS_RST (0x0030)
+#define SSOW_PRIV_LFX_HWS_CFG (0x1000)
+#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000)
+
+/* TIM */
+#define TIM_AF_CONST (0x90)
+#define TIM_PRIV_LFX_CFG (0x20000)
+#define TIM_PRIV_LFX_INT_CFG (0x24000)
+#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000)
+#define TIM_AF_BLK_RST (0x10)
+#define TIM_AF_LF_RST (0x20)
+
+/* CPT */
+#define CPT_AF_CONSTANTS0 (0x0000)
+#define CPT_PRIV_LFX_CFG (0x41000)
+#define CPT_PRIV_LFX_INT_CFG (0x43000)
+#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000)
+#define CPT_AF_LF_RST (0x44000)
+#define CPT_AF_BLK_RST (0x46000)
+
+#define NDC_AF_BLK_RST (0x002F0)
+#define NPC_AF_BLK_RST (0x00040)
+
+/* NPC */
+#define NPC_AF_CFG (0x00000)
+#define NPC_AF_ACTIVE_PC (0x00010)
+#define NPC_AF_CONST (0x00020)
+#define NPC_AF_CONST1 (0x00030)
+#define NPC_AF_BLK_RST (0x00040)
+#define NPC_AF_MCAM_SCRUB_CTL (0x000a0)
+#define NPC_AF_KCAM_SCRUB_CTL (0x000b0)
+#define NPC_AF_KPUX_CFG(a) (0x00500 | (a) << 3)
+#define NPC_AF_PCK_CFG (0x00600)
+#define NPC_AF_PCK_DEF_OL2 (0x00610)
+#define NPC_AF_PCK_DEF_OIP4 (0x00620)
+#define NPC_AF_PCK_DEF_OIP6 (0x00630)
+#define NPC_AF_PCK_DEF_IIP4 (0x00640)
+#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3)
+#define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8)
+#define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6)
+#define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6)
+#define NPC_AF_PKINDX_CPI_DEFX(a, b) (0x80020ull | (a) << 6 | (b) << 3)
+#define NPC_AF_KPUX_ENTRYX_CAMX(a, b, c) \
+ (0x100000 | (a) << 14 | (b) << 6 | (c) << 3)
+#define NPC_AF_KPUX_ENTRYX_ACTION0(a, b) \
+ (0x100020 | (a) << 14 | (b) << 6)
+#define NPC_AF_KPUX_ENTRYX_ACTION1(a, b) \
+ (0x100028 | (a) << 14 | (b) << 6)
+#define NPC_AF_KPUX_ENTRY_DISX(a, b) (0x180000 | (a) << 6 | (b) << 3)
+#define NPC_AF_CPIX_CFG(a) (0x200000 | (a) << 3)
+#define NPC_AF_INTFX_LIDX_LTX_LDX_CFG(a, b, c, d) \
+ (0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3)
+#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
+ (0x980000 | (a) << 16 | (b) << 12 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \
+ (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \
+ (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \
+ (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3)
+#define NPC_AF_MCAMEX_BANKX_CFG(a, b) (0x1800000ull | (a) << 8 | (b) << 4)
+#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \
+ (0x1880000 | (a) << 8 | (b) << 4)
+#define NPC_AF_MATCH_STATX(a) (0x1880008 | (a) << 8)
+#define NPC_AF_INTFX_MISS_STAT_ACT(a) (0x1880040 + (a) * 0x8)
+#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) (0x1900000ull | (a) << 8 | (b) << 4)
+#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \
+ (0x1900008 | (a) << 8 | (b) << 4)
+#define NPC_AF_INTFX_MISS_ACT(a) (0x1a00000 | (a) << 4)
+#define NPC_AF_INTFX_MISS_TAG_ACT(a) (0x1b00008 | (a) << 4)
+#define NPC_AF_MCAM_BANKX_HITX(a, b) (0x1c80000 | (a) << 8 | (b) << 4)
+#define NPC_AF_LKUP_CTL (0x2000000)
+#define NPC_AF_LKUP_DATAX(a) (0x2000200 | (a) << 4)
+#define NPC_AF_LKUP_RESULTX(a) (0x2000400 | (a) << 4)
+#define NPC_AF_INTFX_STAT(a) (0x2000800 | (a) << 4)
+#define NPC_AF_DBG_CTL (0x3000000)
+#define NPC_AF_DBG_STATUS (0x3000010)
+#define NPC_AF_KPUX_DBG(a) (0x3000020 | (a) << 8)
+#define NPC_AF_IKPU_ERR_CTL (0x3000080)
+#define NPC_AF_KPUX_ERR_CTL(a) (0x30000a0 | (a) << 8)
+#define NPC_AF_MCAM_DBG (0x3001000)
+#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
+#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+
+#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
new file mode 100644
index 000000000000..f920dac74e6c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -0,0 +1,917 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RVU_STRUCT_H
+#define RVU_STRUCT_H
+
+/* RVU Block Address Enumeration */
+enum rvu_block_addr_e {
+ BLKADDR_RVUM = 0x0ULL,
+ BLKADDR_LMT = 0x1ULL,
+ BLKADDR_MSIX = 0x2ULL,
+ BLKADDR_NPA = 0x3ULL,
+ BLKADDR_NIX0 = 0x4ULL,
+ BLKADDR_NIX1 = 0x5ULL,
+ BLKADDR_NPC = 0x6ULL,
+ BLKADDR_SSO = 0x7ULL,
+ BLKADDR_SSOW = 0x8ULL,
+ BLKADDR_TIM = 0x9ULL,
+ BLKADDR_CPT0 = 0xaULL,
+ BLKADDR_CPT1 = 0xbULL,
+ BLKADDR_NDC0 = 0xcULL,
+ BLKADDR_NDC1 = 0xdULL,
+ BLKADDR_NDC2 = 0xeULL,
+ BLK_COUNT = 0xfULL,
+};
+
+/* RVU Block Type Enumeration */
+enum rvu_block_type_e {
+ BLKTYPE_RVUM = 0x0,
+ BLKTYPE_MSIX = 0x1,
+ BLKTYPE_LMT = 0x2,
+ BLKTYPE_NIX = 0x3,
+ BLKTYPE_NPA = 0x4,
+ BLKTYPE_NPC = 0x5,
+ BLKTYPE_SSO = 0x6,
+ BLKTYPE_SSOW = 0x7,
+ BLKTYPE_TIM = 0x8,
+ BLKTYPE_CPT = 0x9,
+ BLKTYPE_NDC = 0xa,
+ BLKTYPE_MAX = 0xa,
+};
+
+/* RVU Admin function Interrupt Vector Enumeration */
+enum rvu_af_int_vec_e {
+ RVU_AF_INT_VEC_POISON = 0x0,
+ RVU_AF_INT_VEC_PFFLR = 0x1,
+ RVU_AF_INT_VEC_PFME = 0x2,
+ RVU_AF_INT_VEC_GEN = 0x3,
+ RVU_AF_INT_VEC_MBOX = 0x4,
+ RVU_AF_INT_VEC_CNT = 0x5,
+};
+
+/**
+ * RVU PF Interrupt Vector Enumeration
+ */
+enum rvu_pf_int_vec_e {
+ RVU_PF_INT_VEC_VFFLR0 = 0x0,
+ RVU_PF_INT_VEC_VFFLR1 = 0x1,
+ RVU_PF_INT_VEC_VFME0 = 0x2,
+ RVU_PF_INT_VEC_VFME1 = 0x3,
+ RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4,
+ RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5,
+ RVU_PF_INT_VEC_AFPF_MBOX = 0x6,
+ RVU_PF_INT_VEC_CNT = 0x7,
+};
+
+/* NPA admin queue completion enumeration */
+enum npa_aq_comp {
+ NPA_AQ_COMP_NOTDONE = 0x0,
+ NPA_AQ_COMP_GOOD = 0x1,
+ NPA_AQ_COMP_SWERR = 0x2,
+ NPA_AQ_COMP_CTX_POISON = 0x3,
+ NPA_AQ_COMP_CTX_FAULT = 0x4,
+ NPA_AQ_COMP_LOCKERR = 0x5,
+};
+
+/* NPA admin queue context types */
+enum npa_aq_ctype {
+ NPA_AQ_CTYPE_AURA = 0x0,
+ NPA_AQ_CTYPE_POOL = 0x1,
+};
+
+/* NPA admin queue instruction opcodes */
+enum npa_aq_instop {
+ NPA_AQ_INSTOP_NOP = 0x0,
+ NPA_AQ_INSTOP_INIT = 0x1,
+ NPA_AQ_INSTOP_WRITE = 0x2,
+ NPA_AQ_INSTOP_READ = 0x3,
+ NPA_AQ_INSTOP_LOCK = 0x4,
+ NPA_AQ_INSTOP_UNLOCK = 0x5,
+};
+
+/* NPA admin queue instruction structure */
+struct npa_aq_inst_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 doneint : 1; /* W0 */
+ u64 reserved_44_62 : 19;
+ u64 cindex : 20;
+ u64 reserved_17_23 : 7;
+ u64 lf : 9;
+ u64 ctype : 4;
+ u64 op : 4;
+#else
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 lf : 9;
+ u64 reserved_17_23 : 7;
+ u64 cindex : 20;
+ u64 reserved_44_62 : 19;
+ u64 doneint : 1;
+#endif
+ u64 res_addr; /* W1 */
+};
+
+/* NPA admin queue result structure */
+struct npa_aq_res_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_17_63 : 47; /* W0 */
+ u64 doneint : 1;
+ u64 compcode : 8;
+ u64 ctype : 4;
+ u64 op : 4;
+#else
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 compcode : 8;
+ u64 doneint : 1;
+ u64 reserved_17_63 : 47;
+#endif
+ u64 reserved_64_127; /* W1 */
+};
+
+struct npa_aura_s {
+ u64 pool_addr; /* W0 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 avg_level : 8;
+ u64 reserved_118_119 : 2;
+ u64 shift : 6;
+ u64 aura_drop : 8;
+ u64 reserved_98_103 : 6;
+ u64 bp_ena : 2;
+ u64 aura_drop_ena : 1;
+ u64 pool_drop_ena : 1;
+ u64 reserved_93 : 1;
+ u64 avg_con : 9;
+ u64 pool_way_mask : 16;
+ u64 pool_caching : 1;
+ u64 reserved_65 : 2;
+ u64 ena : 1;
+#else
+ u64 ena : 1;
+ u64 reserved_65 : 2;
+ u64 pool_caching : 1;
+ u64 pool_way_mask : 16;
+ u64 avg_con : 9;
+ u64 reserved_93 : 1;
+ u64 pool_drop_ena : 1;
+ u64 aura_drop_ena : 1;
+ u64 bp_ena : 2;
+ u64 reserved_98_103 : 6;
+ u64 aura_drop : 8;
+ u64 shift : 6;
+ u64 reserved_118_119 : 2;
+ u64 avg_level : 8;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 reserved_189_191 : 3;
+ u64 nix1_bpid : 9;
+ u64 reserved_177_179 : 3;
+ u64 nix0_bpid : 9;
+ u64 reserved_164_167 : 4;
+ u64 count : 36;
+#else
+ u64 count : 36;
+ u64 reserved_164_167 : 4;
+ u64 nix0_bpid : 9;
+ u64 reserved_177_179 : 3;
+ u64 nix1_bpid : 9;
+ u64 reserved_189_191 : 3;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 reserved_252_255 : 4;
+ u64 fc_hyst_bits : 4;
+ u64 fc_stype : 2;
+ u64 fc_up_crossing : 1;
+ u64 fc_ena : 1;
+ u64 reserved_240_243 : 4;
+ u64 bp : 8;
+ u64 reserved_228_231 : 4;
+ u64 limit : 36;
+#else
+ u64 limit : 36;
+ u64 reserved_228_231 : 4;
+ u64 bp : 8;
+ u64 reserved_240_243 : 4;
+ u64 fc_ena : 1;
+ u64 fc_up_crossing : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 reserved_252_255 : 4;
+#endif
+ u64 fc_addr; /* W4 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
+ u64 reserved_379_383 : 5;
+ u64 err_qint_idx : 7;
+ u64 reserved_371 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_363 : 1;
+ u64 thresh_up : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_int : 1;
+ u64 err_int_ena : 8;
+ u64 err_int : 8;
+ u64 update_time : 16;
+ u64 pool_drop : 8;
+#else
+ u64 pool_drop : 8;
+ u64 update_time : 16;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_363 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_371 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_379_383 : 5;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
+ u64 reserved_420_447 : 28;
+ u64 thresh : 36;
+#else
+ u64 thresh : 36;
+ u64 reserved_420_447 : 28;
+#endif
+ u64 reserved_448_511; /* W7 */
+};
+
+struct npa_pool_s {
+ u64 stack_base; /* W0 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 reserved_115_127 : 13;
+ u64 buf_size : 11;
+ u64 reserved_100_103 : 4;
+ u64 buf_offset : 12;
+ u64 stack_way_mask : 16;
+ u64 reserved_70_71 : 3;
+ u64 stack_caching : 1;
+ u64 reserved_66_67 : 2;
+ u64 nat_align : 1;
+ u64 ena : 1;
+#else
+ u64 ena : 1;
+ u64 nat_align : 1;
+ u64 reserved_66_67 : 2;
+ u64 stack_caching : 1;
+ u64 reserved_70_71 : 3;
+ u64 stack_way_mask : 16;
+ u64 buf_offset : 12;
+ u64 reserved_100_103 : 4;
+ u64 buf_size : 11;
+ u64 reserved_115_127 : 13;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 stack_pages : 32;
+ u64 stack_max_pages : 32;
+#else
+ u64 stack_max_pages : 32;
+ u64 stack_pages : 32;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 reserved_240_255 : 16;
+ u64 op_pc : 48;
+#else
+ u64 op_pc : 48;
+ u64 reserved_240_255 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
+ u64 reserved_316_319 : 4;
+ u64 update_time : 16;
+ u64 reserved_297_299 : 3;
+ u64 fc_up_crossing : 1;
+ u64 fc_hyst_bits : 4;
+ u64 fc_stype : 2;
+ u64 fc_ena : 1;
+ u64 avg_con : 9;
+ u64 avg_level : 8;
+ u64 reserved_270_271 : 2;
+ u64 shift : 6;
+ u64 reserved_260_263 : 4;
+ u64 stack_offset : 4;
+#else
+ u64 stack_offset : 4;
+ u64 reserved_260_263 : 4;
+ u64 shift : 6;
+ u64 reserved_270_271 : 2;
+ u64 avg_level : 8;
+ u64 avg_con : 9;
+ u64 fc_ena : 1;
+ u64 fc_stype : 2;
+ u64 fc_hyst_bits : 4;
+ u64 fc_up_crossing : 1;
+ u64 reserved_297_299 : 3;
+ u64 update_time : 16;
+ u64 reserved_316_319 : 4;
+#endif
+ u64 fc_addr; /* W5 */
+ u64 ptr_start; /* W6 */
+ u64 ptr_end; /* W7 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
+ u64 reserved_571_575 : 5;
+ u64 err_qint_idx : 7;
+ u64 reserved_563 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_555 : 1;
+ u64 thresh_up : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_int : 1;
+ u64 err_int_ena : 8;
+ u64 err_int : 8;
+ u64 reserved_512_535 : 24;
+#else
+ u64 reserved_512_535 : 24;
+ u64 err_int : 8;
+ u64 err_int_ena : 8;
+ u64 thresh_int : 1;
+ u64 thresh_int_ena : 1;
+ u64 thresh_up : 1;
+ u64 reserved_555 : 1;
+ u64 thresh_qint_idx : 7;
+ u64 reserved_563 : 1;
+ u64 err_qint_idx : 7;
+ u64 reserved_571_575 : 5;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
+ u64 reserved_612_639 : 28;
+ u64 thresh : 36;
+#else
+ u64 thresh : 36;
+ u64 reserved_612_639 : 28;
+#endif
+ u64 reserved_640_703; /* W10 */
+ u64 reserved_704_767; /* W11 */
+ u64 reserved_768_831; /* W12 */
+ u64 reserved_832_895; /* W13 */
+ u64 reserved_896_959; /* W14 */
+ u64 reserved_960_1023; /* W15 */
+};
+
+/* NIX admin queue completion status */
+enum nix_aq_comp {
+ NIX_AQ_COMP_NOTDONE = 0x0,
+ NIX_AQ_COMP_GOOD = 0x1,
+ NIX_AQ_COMP_SWERR = 0x2,
+ NIX_AQ_COMP_CTX_POISON = 0x3,
+ NIX_AQ_COMP_CTX_FAULT = 0x4,
+ NIX_AQ_COMP_LOCKERR = 0x5,
+ NIX_AQ_COMP_SQB_ALLOC_FAIL = 0x6,
+};
+
+/* NIX admin queue context types */
+enum nix_aq_ctype {
+ NIX_AQ_CTYPE_RQ = 0x0,
+ NIX_AQ_CTYPE_SQ = 0x1,
+ NIX_AQ_CTYPE_CQ = 0x2,
+ NIX_AQ_CTYPE_MCE = 0x3,
+ NIX_AQ_CTYPE_RSS = 0x4,
+ NIX_AQ_CTYPE_DYNO = 0x5,
+};
+
+/* NIX admin queue instruction opcodes */
+enum nix_aq_instop {
+ NIX_AQ_INSTOP_NOP = 0x0,
+ NIX_AQ_INSTOP_INIT = 0x1,
+ NIX_AQ_INSTOP_WRITE = 0x2,
+ NIX_AQ_INSTOP_READ = 0x3,
+ NIX_AQ_INSTOP_LOCK = 0x4,
+ NIX_AQ_INSTOP_UNLOCK = 0x5,
+};
+
+/* NIX admin queue instruction structure */
+struct nix_aq_inst_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 doneint : 1; /* W0 */
+ u64 reserved_44_62 : 19;
+ u64 cindex : 20;
+ u64 reserved_15_23 : 9;
+ u64 lf : 7;
+ u64 ctype : 4;
+ u64 op : 4;
+#else
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 lf : 7;
+ u64 reserved_15_23 : 9;
+ u64 cindex : 20;
+ u64 reserved_44_62 : 19;
+ u64 doneint : 1;
+#endif
+ u64 res_addr; /* W1 */
+};
+
+/* NIX admin queue result structure */
+struct nix_aq_res_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_17_63 : 47; /* W0 */
+ u64 doneint : 1;
+ u64 compcode : 8;
+ u64 ctype : 4;
+ u64 op : 4;
+#else
+ u64 op : 4;
+ u64 ctype : 4;
+ u64 compcode : 8;
+ u64 doneint : 1;
+ u64 reserved_17_63 : 47;
+#endif
+ u64 reserved_64_127; /* W1 */
+};
+
+/* NIX Completion queue context structure */
+struct nix_cq_ctx_s {
+ u64 base;
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 wrptr : 20;
+ u64 avg_con : 9;
+ u64 cint_idx : 7;
+ u64 cq_err : 1;
+ u64 qint_idx : 7;
+ u64 rsvd_81_83 : 3;
+ u64 bpid : 9;
+ u64 rsvd_69_71 : 3;
+ u64 bp_ena : 1;
+ u64 rsvd_64_67 : 4;
+#else
+ u64 rsvd_64_67 : 4;
+ u64 bp_ena : 1;
+ u64 rsvd_69_71 : 3;
+ u64 bpid : 9;
+ u64 rsvd_81_83 : 3;
+ u64 qint_idx : 7;
+ u64 cq_err : 1;
+ u64 cint_idx : 7;
+ u64 avg_con : 9;
+ u64 wrptr : 20;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 update_time : 16;
+ u64 avg_level : 8;
+ u64 head : 20;
+ u64 tail : 20;
+#else
+ u64 tail : 20;
+ u64 head : 20;
+ u64 avg_level : 8;
+ u64 update_time : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 cq_err_int_ena : 8;
+ u64 cq_err_int : 8;
+ u64 qsize : 4;
+ u64 rsvd_233_235 : 3;
+ u64 caching : 1;
+ u64 substream : 20;
+ u64 rsvd_210_211 : 2;
+ u64 ena : 1;
+ u64 drop_ena : 1;
+ u64 drop : 8;
+ u64 dp : 8;
+#else
+ u64 dp : 8;
+ u64 drop : 8;
+ u64 drop_ena : 1;
+ u64 ena : 1;
+ u64 rsvd_210_211 : 2;
+ u64 substream : 20;
+ u64 caching : 1;
+ u64 rsvd_233_235 : 3;
+ u64 qsize : 4;
+ u64 cq_err_int : 8;
+ u64 cq_err_int_ena : 8;
+#endif
+};
+
+/* NIX Receive queue context structure */
+struct nix_rq_ctx_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
+ u64 wqe_aura : 20;
+ u64 substream : 20;
+ u64 cq : 20;
+ u64 ena_wqwd : 1;
+ u64 ipsech_ena : 1;
+ u64 sso_ena : 1;
+ u64 ena : 1;
+#else
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 substream : 20;
+ u64 wqe_aura : 20;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 rsvd_127_122 : 6;
+ u64 lpb_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 xqe_drop_ena : 1;
+ u64 wqe_caching : 1;
+ u64 pb_caching : 2;
+ u64 sso_tt : 2;
+ u64 sso_grp : 10;
+ u64 lpb_aura : 20;
+ u64 spb_aura : 20;
+#else
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 rsvd_127_122 : 6;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 xqe_hdr_split : 1;
+ u64 xqe_imm_copy : 1;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_size : 6;
+ u64 later_skip : 6;
+ u64 rsvd_171 : 1;
+ u64 first_skip : 7;
+ u64 lpb_sizem1 : 12;
+ u64 spb_ena : 1;
+ u64 rsvd_150_148 : 3;
+ u64 wqe_skip : 2;
+ u64 spb_sizem1 : 6;
+ u64 rsvd_139_128 : 12;
+#else
+ u64 rsvd_139_128 : 12;
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 rsvd_150_148 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 rsvd_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 spb_pool_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 xqe_pass : 8;
+ u64 xqe_drop : 8;
+#else
+ u64 xqe_drop : 8;
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
+ u64 rsvd_319_315 : 5;
+ u64 qint_idx : 7;
+ u64 rq_int_ena : 8;
+ u64 rq_int : 8;
+ u64 rsvd_291_288 : 4;
+ u64 lpb_pool_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_aura_pass : 8;
+ u64 lpb_aura_drop : 8;
+#else
+ u64 lpb_aura_drop : 8;
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 rsvd_291_288 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 rsvd_319_315 : 5;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
+ u64 rsvd_383_366 : 18;
+ u64 flow_tagw : 6;
+ u64 bad_utag : 8;
+ u64 good_utag : 8;
+ u64 ltag : 24;
+#else
+ u64 ltag : 24;
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 rsvd_383_366 : 18;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
+ u64 rsvd_447_432 : 16;
+ u64 octs : 48;
+#else
+ u64 octs : 48;
+ u64 rsvd_447_432 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W7 */
+ u64 rsvd_511_496 : 16;
+ u64 pkts : 48;
+#else
+ u64 pkts : 48;
+ u64 rsvd_511_496 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
+ u64 rsvd_575_560 : 16;
+ u64 drop_octs : 48;
+#else
+ u64 drop_octs : 48;
+ u64 rsvd_575_560 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
+ u64 rsvd_639_624 : 16;
+ u64 drop_pkts : 48;
+#else
+ u64 drop_pkts : 48;
+ u64 rsvd_639_624 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
+ u64 rsvd_703_688 : 16;
+ u64 re_pkts : 48;
+#else
+ u64 re_pkts : 48;
+ u64 rsvd_703_688 : 16;
+#endif
+ u64 rsvd_767_704; /* W11 */
+ u64 rsvd_831_768; /* W12 */
+ u64 rsvd_895_832; /* W13 */
+ u64 rsvd_959_896; /* W14 */
+ u64 rsvd_1023_960; /* W15 */
+};
+
+/* NIX sqe sizes */
+enum nix_maxsqesz {
+ NIX_MAXSQESZ_W16 = 0x0,
+ NIX_MAXSQESZ_W8 = 0x1,
+};
+
+/* NIX SQB caching type */
+enum nix_stype {
+ NIX_STYPE_STF = 0x0,
+ NIX_STYPE_STT = 0x1,
+ NIX_STYPE_STP = 0x2,
+};
+
+/* NIX Send queue context structure */
+struct nix_sq_ctx_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
+ u64 sqe_way_mask : 16;
+ u64 cq : 20;
+ u64 sdp_mcast : 1;
+ u64 substream : 20;
+ u64 qint_idx : 6;
+ u64 ena : 1;
+#else
+ u64 ena : 1;
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
+ u64 sqb_count : 16;
+ u64 default_chan : 12;
+ u64 smq_rr_quantum : 24;
+ u64 sso_ena : 1;
+ u64 xoff : 1;
+ u64 cq_ena : 1;
+ u64 smq : 9;
+#else
+ u64 smq : 9;
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_quantum : 24;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
+ u64 rsvd_191 : 1;
+ u64 sqe_stype : 2;
+ u64 sq_int_ena : 8;
+ u64 sq_int : 8;
+ u64 sqb_aura : 20;
+ u64 smq_rr_count : 25;
+#else
+ u64 smq_rr_count : 25;
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 rsvd_191 : 1;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
+ u64 rsvd_255_253 : 3;
+ u64 smq_next_sq_vld : 1;
+ u64 smq_pend : 1;
+ u64 smenq_next_sqb_vld : 1;
+ u64 head_offset : 6;
+ u64 smenq_offset : 6;
+ u64 tail_offset : 6;
+ u64 smq_lso_segnum : 8;
+ u64 smq_next_sq : 20;
+ u64 mnq_dis : 1;
+ u64 lmt_dis : 1;
+ u64 cq_limit : 8;
+ u64 max_sqe_size : 2;
+#else
+ u64 max_sqe_size : 2;
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 rsvd_255_253 : 3;
+#endif
+ u64 next_sqb : 64;/* W4 */
+ u64 tail_sqb : 64;/* W5 */
+ u64 smenq_sqb : 64;/* W6 */
+ u64 smenq_next_sqb : 64;/* W7 */
+ u64 head_sqb : 64;/* W8 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
+ u64 rsvd_639_630 : 10;
+ u64 vfi_lso_vld : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_total : 18;
+ u64 rsvd_583_576 : 8;
+#else
+ u64 rsvd_583_576 : 8;
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 rsvd_639_630 : 10;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
+ u64 rsvd_703_658 : 46;
+ u64 scm_lso_rem : 18;
+#else
+ u64 scm_lso_rem : 18;
+ u64 rsvd_703_658 : 46;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W11 */
+ u64 rsvd_767_752 : 16;
+ u64 octs : 48;
+#else
+ u64 octs : 48;
+ u64 rsvd_767_752 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W12 */
+ u64 rsvd_831_816 : 16;
+ u64 pkts : 48;
+#else
+ u64 pkts : 48;
+ u64 rsvd_831_816 : 16;
+#endif
+ u64 rsvd_895_832 : 64;/* W13 */
+#if defined(__BIG_ENDIAN_BITFIELD) /* W14 */
+ u64 rsvd_959_944 : 16;
+ u64 dropped_octs : 48;
+#else
+ u64 dropped_octs : 48;
+ u64 rsvd_959_944 : 16;
+#endif
+#if defined(__BIG_ENDIAN_BITFIELD) /* W15 */
+ u64 rsvd_1023_1008 : 16;
+ u64 dropped_pkts : 48;
+#else
+ u64 dropped_pkts : 48;
+ u64 rsvd_1023_1008 : 16;
+#endif
+};
+
+/* NIX Receive side scaling entry structure*/
+struct nix_rsse_s {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ uint32_t reserved_20_31 : 12;
+ uint32_t rq : 20;
+#else
+ uint32_t rq : 20;
+ uint32_t reserved_20_31 : 12;
+
+#endif
+};
+
+/* NIX receive multicast/mirror entry structure */
+struct nix_rx_mce_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
+ uint64_t next : 16;
+ uint64_t pf_func : 16;
+ uint64_t rsvd_31_24 : 8;
+ uint64_t index : 20;
+ uint64_t eol : 1;
+ uint64_t rsvd_2 : 1;
+ uint64_t op : 2;
+#else
+ uint64_t op : 2;
+ uint64_t rsvd_2 : 1;
+ uint64_t eol : 1;
+ uint64_t index : 20;
+ uint64_t rsvd_31_24 : 8;
+ uint64_t pf_func : 16;
+ uint64_t next : 16;
+#endif
+};
+
+enum nix_lsoalg {
+ NIX_LSOALG_NOP,
+ NIX_LSOALG_ADD_SEGNUM,
+ NIX_LSOALG_ADD_PAYLEN,
+ NIX_LSOALG_ADD_OFFSET,
+ NIX_LSOALG_TCP_FLAGS,
+};
+
+enum nix_txlayer {
+ NIX_TXLAYER_OL3,
+ NIX_TXLAYER_OL4,
+ NIX_TXLAYER_IL3,
+ NIX_TXLAYER_IL4,
+};
+
+struct nix_lso_format {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_19_63 : 45;
+ u64 alg : 3;
+ u64 rsvd_14_15 : 2;
+ u64 sizem1 : 2;
+ u64 rsvd_10_11 : 2;
+ u64 layer : 2;
+ u64 offset : 8;
+#else
+ u64 offset : 8;
+ u64 layer : 2;
+ u64 rsvd_10_11 : 2;
+ u64 sizem1 : 2;
+ u64 rsvd_14_15 : 2;
+ u64 alg : 3;
+ u64 rsvd_19_63 : 45;
+#endif
+};
+
+struct nix_rx_flowkey_alg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_35_63 :29;
+ u64 ltype_match :4;
+ u64 ltype_mask :4;
+ u64 sel_chan :1;
+ u64 ena :1;
+ u64 reserved_24_24 :1;
+ u64 lid :3;
+ u64 bytesm1 :5;
+ u64 hdr_offset :8;
+ u64 fn_mask :1;
+ u64 ln_mask :1;
+ u64 key_offset :6;
+#else
+ u64 key_offset :6;
+ u64 ln_mask :1;
+ u64 fn_mask :1;
+ u64 hdr_offset :8;
+ u64 bytesm1 :5;
+ u64 lid :3;
+ u64 reserved_24_24 :1;
+ u64 ena :1;
+ u64 sel_chan :1;
+ u64 ltype_mask :4;
+ u64 ltype_match :4;
+ u64 reserved_35_63 :29;
+#endif
+};
+
+/* NIX VTAG size */
+enum nix_vtag_size {
+ VTAGSIZE_T4 = 0x0,
+ VTAGSIZE_T8 = 0x1,
+};
+#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3a9730612a70..0bd4351b2a49 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -988,8 +988,8 @@ static int pxa168_init_phy(struct net_device *dev)
cmd.base.phy_address = pep->phy_addr;
cmd.base.speed = pep->phy_speed;
cmd.base.duplex = pep->phy_duplex;
- ethtool_convert_legacy_u32_to_link_mode(cmd.link_modes.advertising,
- PHY_BASIC_FEATURES);
+ bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
cmd.base.autoneg = AUTONEG_ENABLE;
if (cmd.base.speed != 0)
@@ -1260,7 +1260,8 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 6e6abdc399de..7dbfdac4067a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -243,11 +243,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
if (dev->phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- if (dev->phydev->advertising & ADVERTISED_Pause)
- lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
- lcl_adv |= ADVERTISE_PAUSE_ASYM;
-
+ lcl_adv = ethtool_adv_to_lcl_adv_t(dev->phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
@@ -355,12 +351,8 @@ static int mtk_phy_connect(struct net_device *dev)
dev->phydev->speed = 0;
dev->phydev->duplex = 0;
- if (of_phy_is_fixed_link(mac->of_node))
- dev->phydev->supported |=
- SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-
- dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause;
+ phy_set_max_speed(dev->phydev, SPEED_1000);
+ phy_support_asym_pause(dev->phydev);
dev->phydev->advertising = dev->phydev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(dev->phydev);
@@ -405,7 +397,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
eth->mii_bus->priv = eth;
eth->mii_bus->parent = eth->dev;
- snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
+ snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 4bdf25059542..deef5a998985 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -614,7 +614,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
int i;
buf->direct.buf = NULL;
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ buf->nbufs = DIV_ROUND_UP(size, PAGE_SIZE);
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index d25e16d2c319..109472d6b61f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -167,8 +167,13 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].rx_ppp = pfcrx;
params->prof[i].tx_pause = !(pfcrx || pfctx);
params->prof[i].tx_ppp = pfctx;
- params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
- params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+ if (mlx4_low_memory_profile()) {
+ params->prof[i].tx_ring_size = MLX4_EN_MIN_TX_SIZE;
+ params->prof[i].rx_ring_size = MLX4_EN_MIN_RX_SIZE;
+ } else {
+ params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
+ params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+ }
params->prof[i].num_up = MLX4_EN_NUM_UP_LOW;
params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up;
params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up *
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 6785661d1a72..fe49384eba48 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1286,20 +1286,6 @@ out:
mutex_unlock(&mdev->state_lock);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mlx4_en_netpoll(struct net_device *dev)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_cq *cq;
- int i;
-
- for (i = 0; i < priv->tx_ring_num[TX]; i++) {
- cq = priv->tx_cq[TX][i];
- napi_schedule(&cq->napi);
- }
-}
-#endif
-
static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
{
u64 reg_id;
@@ -2946,9 +2932,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mlx4_en_netpoll,
-#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = __mlx4_en_setup_tc,
@@ -2983,9 +2966,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
.ndo_get_vf_stats = mlx4_en_get_vf_stats,
.ndo_get_vf_config = mlx4_en_get_vf_config,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mlx4_en_netpoll,
-#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = __mlx4_en_setup_tc,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 1f3372c1802e..2df92dbd38e1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
struct mlx4_dev *dev = &priv->dev;
struct mlx4_eq *eq = &priv->eq_table.eq[vec];
- if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+ if (!cpumask_available(eq->affinity_mask) ||
+ cpumask_empty(eq->affinity_mask))
return;
hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 7262c6310650..4b4351141b94 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -406,7 +406,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
if (WARN_ON(!obj_per_chunk))
return -EINVAL;
- num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+ num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
table->icm = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL);
if (!table->icm)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d2d59444f562..6a046030e873 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -260,47 +260,34 @@ static const struct devlink_param mlx4_devlink_params[] = {
NULL, NULL, NULL),
};
-static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
- union devlink_param_value init_val)
-{
- struct mlx4_priv *priv = devlink_priv(devlink);
- struct mlx4_dev *dev = &priv->dev;
- int err;
-
- err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
- if (err)
- mlx4_warn(dev,
- "devlink set parameter %u value failed (err = %d)",
- param_id, err);
-}
-
static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
{
union devlink_param_value value;
value.vbool = !!mlx4_internal_err_reset;
- mlx4_devlink_set_init_value(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ value);
value.vu32 = 1UL << log_num_mac;
- mlx4_devlink_set_init_value(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
value.vbool = enable_64b_cqe_eqe;
- mlx4_devlink_set_init_value(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ value);
value.vbool = enable_4k_uar;
- mlx4_devlink_set_init_value(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ value);
value.vbool = false;
- mlx4_devlink_set_init_value(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ value);
}
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index c3228b89df46..485d856546c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -72,7 +72,7 @@
#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
#define DEF_RX_RINGS 16
#define MAX_RX_RINGS 128
-#define MIN_RX_RINGS 4
+#define MIN_RX_RINGS 1
#define LOG_TXBB_SIZE 6
#define TXBB_SIZE BIT(LOG_TXBB_SIZE)
#define HEADROOM (2048 / TXBB_SIZE + 1)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3ce14d42ddc8..a5a0823e5ada 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -206,7 +206,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
u8 own;
do {
- own = ent->lay->status_own;
+ own = READ_ONCE(ent->lay->status_own);
if (!(own & CMD_OWNER_HW)) {
ent->ret = 0;
return;
@@ -308,10 +308,11 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
- case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_FPGA_DESTROY_QP:
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_DEALLOC_MEMIC:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -426,7 +427,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
- case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
case MLX5_CMD_OP_FPGA_CREATE_QP:
case MLX5_CMD_OP_FPGA_MODIFY_QP:
@@ -435,6 +436,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_ALLOC_MEMIC:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -599,8 +601,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
- MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
- MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
+ MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
@@ -617,6 +619,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
+ MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
+ MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index a4179122a279..4b85abb5c9f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -109,6 +109,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cons_index = 0;
cq->arm_sn = 0;
cq->eq = eq;
+ cq->uid = MLX5_GET(create_cq_in, in, uid);
refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
if (!cq->comp)
@@ -144,6 +145,7 @@ err_cmd:
memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
+ MLX5_SET(destroy_cq_in, din, uid, cq->uid);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
@@ -165,6 +167,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
+ MLX5_SET(destroy_cq_in, in, uid, cq->uid);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
@@ -196,6 +199,7 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
+ MLX5_SET(modify_cq_in, in, uid, cq->uid);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_modify_cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 0240aee9189e..d027ce00c8ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -133,7 +133,7 @@ TRACE_EVENT(mlx5_fs_del_fg,
{MLX5_FLOW_CONTEXT_ACTION_DROP, "DROP"},\
{MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, "FWD"},\
{MLX5_FLOW_CONTEXT_ACTION_COUNT, "CNT"},\
- {MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\
+ {MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT, "REFORMAT"},\
{MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\
{MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\
{MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\
@@ -252,10 +252,10 @@ TRACE_EVENT(mlx5_fs_add_rule,
memcpy(__entry->destination,
&rule->dest_attr,
sizeof(__entry->destination));
- if (rule->dest_attr.type & MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
- rule->dest_attr.counter)
+ if (rule->dest_attr.type &
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
__entry->counter_id =
- rule->dest_attr.counter->id;
+ rule->dest_attr.counter_id;
),
TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n",
__entry->rule, __entry->fte, __entry->index,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index db2cfcd21d43..d7fbd5b6ac95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -54,6 +54,7 @@
#include "en_stats.h"
#include "en/fs.h"
+extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
@@ -172,6 +173,7 @@ static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
}
}
+/* Use this function to get max num channels (rxqs/txqs) only to create netdev */
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
@@ -180,6 +182,13 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
MLX5E_MAX_NUM_CHANNELS);
}
+/* Use this function to get max num channels after netdev was created */
+static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev)
+{
+ return min_t(unsigned int, netdev->num_rx_queues,
+ netdev->num_tx_queues);
+}
+
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
@@ -204,18 +213,12 @@ struct mlx5e_umr_wqe {
extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
-static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
- "rx_cqe_moder",
- "tx_cqe_moder",
- "rx_cqe_compress",
- "rx_striding_rq",
-};
-
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
+ MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
};
#define MLX5E_SET_PFLAG(params, pflag, enable) \
@@ -297,6 +300,7 @@ struct mlx5e_dcbx_dp {
enum {
MLX5E_RQ_STATE_ENABLED,
MLX5E_RQ_STATE_AM,
+ MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
};
struct mlx5e_cq {
@@ -677,7 +681,7 @@ struct mlx5e_priv {
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
struct work_struct tx_timeout_work;
- struct delayed_work update_stats_work;
+ struct work_struct update_stats_work;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
@@ -702,7 +706,7 @@ struct mlx5e_priv {
};
struct mlx5e_profile {
- void (*init)(struct mlx5_core_dev *mdev,
+ int (*init)(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile, void *ppriv);
void (*cleanup)(struct mlx5e_priv *priv);
@@ -714,7 +718,6 @@ struct mlx5e_profile {
void (*disable)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
- int (*max_nch)(struct mlx5_core_dev *mdev);
struct {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
@@ -905,10 +908,16 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
/* common netdev helpers */
+void mlx5e_create_q_counters(struct mlx5e_priv *priv);
+void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
+int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
+ struct mlx5e_rq *drop_rq);
+void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
+
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
-int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv);
-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
+int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
@@ -924,8 +933,8 @@ int mlx5e_create_tises(struct mlx5e_priv *priv);
void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
-void mlx5e_update_stats_work(struct work_struct *work);
+void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
int mlx5e_bits_invert(unsigned long a, int size);
typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
@@ -952,21 +961,32 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal);
+u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
+u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info);
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
/* mlx5e generic netdev management API */
+int mlx5e_netdev_init(struct net_device *netdev,
+ struct mlx5e_priv *priv,
+ struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv);
+void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
- void *ppriv);
+ int nch, void *ppriv);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels, u16 mtu);
+void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+void mlx5e_build_rss_params(struct mlx5e_params *params);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index bbf69e859b78..1431232c9a09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -16,6 +16,8 @@ struct mlx5e_tc_table {
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+ struct notifier_block netdevice_nb;
};
struct mlx5e_flow_table {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 24e3b564964f..023dc4bccd28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -235,3 +235,211 @@ out:
kfree(out);
return err;
}
+
+static u32 fec_supported_speeds[] = {
+ 10000,
+ 40000,
+ 25000,
+ 50000,
+ 56000,
+ 100000
+};
+
+#define MLX5E_FEC_SUPPORTED_SPEEDS ARRAY_SIZE(fec_supported_speeds)
+
+/* get/set FEC admin field for a given speed */
+static int mlx5e_fec_admin_field(u32 *pplm,
+ u8 *fec_policy,
+ bool write,
+ u32 speed)
+{
+ switch (speed) {
+ case 10000:
+ case 40000:
+ if (!write)
+ *fec_policy = MLX5_GET(pplm_reg, pplm,
+ fec_override_cap_10g_40g);
+ else
+ MLX5_SET(pplm_reg, pplm,
+ fec_override_admin_10g_40g, *fec_policy);
+ break;
+ case 25000:
+ if (!write)
+ *fec_policy = MLX5_GET(pplm_reg, pplm,
+ fec_override_admin_25g);
+ else
+ MLX5_SET(pplm_reg, pplm,
+ fec_override_admin_25g, *fec_policy);
+ break;
+ case 50000:
+ if (!write)
+ *fec_policy = MLX5_GET(pplm_reg, pplm,
+ fec_override_admin_50g);
+ else
+ MLX5_SET(pplm_reg, pplm,
+ fec_override_admin_50g, *fec_policy);
+ break;
+ case 56000:
+ if (!write)
+ *fec_policy = MLX5_GET(pplm_reg, pplm,
+ fec_override_admin_56g);
+ else
+ MLX5_SET(pplm_reg, pplm,
+ fec_override_admin_56g, *fec_policy);
+ break;
+ case 100000:
+ if (!write)
+ *fec_policy = MLX5_GET(pplm_reg, pplm,
+ fec_override_admin_100g);
+ else
+ MLX5_SET(pplm_reg, pplm,
+ fec_override_admin_100g, *fec_policy);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* returns FEC capabilities for a given speed */
+static int mlx5e_get_fec_cap_field(u32 *pplm,
+ u8 *fec_cap,
+ u32 speed)
+{
+ switch (speed) {
+ case 10000:
+ case 40000:
+ *fec_cap = MLX5_GET(pplm_reg, pplm,
+ fec_override_admin_10g_40g);
+ break;
+ case 25000:
+ *fec_cap = MLX5_GET(pplm_reg, pplm,
+ fec_override_cap_25g);
+ break;
+ case 50000:
+ *fec_cap = MLX5_GET(pplm_reg, pplm,
+ fec_override_cap_50g);
+ break;
+ case 56000:
+ *fec_cap = MLX5_GET(pplm_reg, pplm,
+ fec_override_cap_56g);
+ break;
+ case 100000:
+ *fec_cap = MLX5_GET(pplm_reg, pplm,
+ fec_override_cap_100g);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps)
+{
+ u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(pplm_reg);
+ u32 current_fec_speed;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, pcam_reg))
+ return -EOPNOTSUPP;
+
+ if (!MLX5_CAP_PCAM_REG(dev, pplm))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(pplm_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
+ if (err)
+ return err;
+
+ err = mlx5e_port_linkspeed(dev, &current_fec_speed);
+ if (err)
+ return err;
+
+ return mlx5e_get_fec_cap_field(out, fec_caps, current_fec_speed);
+}
+
+int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
+ u8 *fec_configured_mode)
+{
+ u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(pplm_reg);
+ u32 link_speed;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, pcam_reg))
+ return -EOPNOTSUPP;
+
+ if (!MLX5_CAP_PCAM_REG(dev, pplm))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(pplm_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
+ if (err)
+ return err;
+
+ *fec_mode_active = MLX5_GET(pplm_reg, out, fec_mode_active);
+
+ if (!fec_configured_mode)
+ return 0;
+
+ err = mlx5e_port_linkspeed(dev, &link_speed);
+ if (err)
+ return err;
+
+ return mlx5e_fec_admin_field(out, fec_configured_mode, 0, link_speed);
+}
+
+int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
+{
+ bool fec_mode_not_supp_in_speed = false;
+ u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC);
+ u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(pplm_reg);
+ u32 current_fec_speed;
+ u8 fec_caps = 0;
+ int err;
+ int i;
+
+ if (!MLX5_CAP_GEN(dev, pcam_reg))
+ return -EOPNOTSUPP;
+
+ if (!MLX5_CAP_PCAM_REG(dev, pplm))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(pplm_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
+ if (err)
+ return err;
+
+ err = mlx5e_port_linkspeed(dev, &current_fec_speed);
+ if (err)
+ return err;
+
+ memset(in, 0, sz);
+ MLX5_SET(pplm_reg, in, local_port, 1);
+ for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) {
+ mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
+ /* policy supported for link speed */
+ if (!!(fec_caps & fec_policy)) {
+ mlx5e_fec_admin_field(in, &fec_policy, 1,
+ fec_supported_speeds[i]);
+ } else {
+ if (fec_supported_speeds[i] == current_fec_speed)
+ return -EOPNOTSUPP;
+ mlx5e_fec_admin_field(in, &no_fec_policy, 1,
+ fec_supported_speeds[i]);
+ fec_mode_not_supp_in_speed = true;
+ }
+ }
+
+ if (fec_mode_not_supp_in_speed)
+ mlx5_core_dbg(dev,
+ "FEC policy 0x%x is not supported for some speeds",
+ fec_policy);
+
+ return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index f8cbd8194179..cd2160b8c9bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -45,4 +45,16 @@ int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
+
+int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps);
+int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
+ u8 *fec_configured_mode);
+int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy);
+
+enum {
+ MLX5E_FEC_NOFEC,
+ MLX5E_FEC_FIRECODE,
+ MLX5E_FEC_RS_528_514,
+};
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index eddd7702680b..e88340e196f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -183,12 +183,13 @@ static const struct tlsdev_ops mlx5e_tls_ops = {
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
- u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
struct net_device *netdev = priv->netdev;
+ u32 caps;
if (!mlx5_accel_is_tls_device(priv->mdev))
return;
+ caps = mlx5_accel_tls_device_caps(priv->mdev);
if (caps & MLX5_ACCEL_TLS_TX) {
netdev->features |= NETIF_F_HW_TLS_TX;
netdev->hw_features |= NETIF_F_HW_TLS_TX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 45cdde694d20..8657e0f26995 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -543,8 +543,11 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
- __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
+ priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
+ mlx5e_dbg(HW, priv,
+ "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
+ __func__, arfs_rule->filter_id, arfs_rule->rxq,
+ tuple->ip_proto, err);
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index db3278cc052b..3078491cc0d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -153,7 +153,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
if (enable_uc_lb)
MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_);
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST);
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 98dd3e0ada72..3e770abfd802 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -135,6 +135,14 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
}
+static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
+ "rx_cqe_moder",
+ "tx_cqe_moder",
+ "rx_cqe_compress",
+ "rx_striding_rq",
+ "rx_no_csum_complete",
+};
+
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
int i, num_stats = 0;
@@ -311,7 +319,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch)
{
- ch->max_combined = priv->profile->max_nch(priv->mdev);
+ ch->max_combined = mlx5e_get_netdev_max_channels(priv->netdev);
ch->combined_count = priv->channels.params.num_channels;
}
@@ -539,6 +547,70 @@ static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static const u32 pplm_fec_2_ethtool[] = {
+ [MLX5E_FEC_NOFEC] = ETHTOOL_FEC_OFF,
+ [MLX5E_FEC_FIRECODE] = ETHTOOL_FEC_BASER,
+ [MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS,
+};
+
+static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
+{
+ int mode = 0;
+
+ if (!fec_mode)
+ return ETHTOOL_FEC_AUTO;
+
+ mode = find_first_bit(&fec_mode, size);
+
+ if (mode < ARRAY_SIZE(pplm_fec_2_ethtool))
+ return pplm_fec_2_ethtool[mode];
+
+ return 0;
+}
+
+/* we use ETHTOOL_FEC_* offset and apply it to ETHTOOL_LINK_MODE_FEC_*_BIT */
+static u32 ethtool_fec2ethtool_caps(u_long ethtool_fec_code)
+{
+ u32 offset;
+
+ offset = find_first_bit(&ethtool_fec_code, sizeof(u32));
+ offset -= ETHTOOL_FEC_OFF_BIT;
+ offset += ETHTOOL_LINK_MODE_FEC_NONE_BIT;
+
+ return offset;
+}
+
+static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ u_long fec_caps = 0;
+ u32 active_fec = 0;
+ u32 offset;
+ u32 bitn;
+ int err;
+
+ err = mlx5e_get_fec_caps(dev, (u8 *)&fec_caps);
+ if (err)
+ return (err == -EOPNOTSUPP) ? 0 : err;
+
+ err = mlx5e_get_fec_mode(dev, &active_fec, NULL);
+ if (err)
+ return err;
+
+ for_each_set_bit(bitn, &fec_caps, ARRAY_SIZE(pplm_fec_2_ethtool)) {
+ u_long ethtool_bitmask = pplm_fec_2_ethtool[bitn];
+
+ offset = ethtool_fec2ethtool_caps(ethtool_bitmask);
+ __set_bit(offset, link_ksettings->link_modes.supported);
+ }
+
+ active_fec = pplm2ethtool_fec(active_fec, sizeof(u32) * BITS_PER_BYTE);
+ offset = ethtool_fec2ethtool_caps(active_fec);
+ __set_bit(offset, link_ksettings->link_modes.advertising);
+
+ return 0;
+}
+
static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap,
u8 connector_type)
@@ -734,7 +806,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
if (err) {
netdev_err(netdev, "%s: query port ptys failed: %d\n",
__func__, err);
- goto err_query_ptys;
+ goto err_query_regs;
}
eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
@@ -770,11 +842,17 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
AUTONEG_ENABLE;
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
Autoneg);
+
+ err = get_fec_supported_advertised(mdev, link_ksettings);
+ if (err)
+ netdev_dbg(netdev, "%s: FEC caps query failed: %d\n",
+ __func__, err);
+
if (!an_disable_admin)
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, Autoneg);
-err_query_ptys:
+err_query_regs:
return err;
}
@@ -852,18 +930,30 @@ out:
return err;
}
+u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv)
+{
+ return sizeof(priv->channels.params.toeplitz_hash_key);
+}
+
static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return sizeof(priv->channels.params.toeplitz_hash_key);
+ return mlx5e_ethtool_get_rxfh_key_size(priv);
}
-static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv)
{
return MLX5E_INDIR_RQT_SIZE;
}
+static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_get_rxfh_indir_size(priv);
+}
+
static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
@@ -1257,6 +1347,58 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
+static int mlx5e_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 fec_configured = 0;
+ u32 fec_active = 0;
+ int err;
+
+ err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured);
+
+ if (err)
+ return err;
+
+ fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active,
+ sizeof(u32) * BITS_PER_BYTE);
+
+ if (!fecparam->active_fec)
+ return -EOPNOTSUPP;
+
+ fecparam->fec = pplm2ethtool_fec((u_long)fec_configured,
+ sizeof(u8) * BITS_PER_BYTE);
+
+ return 0;
+}
+
+static int mlx5e_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 fec_policy = 0;
+ int mode;
+ int err;
+
+ for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
+ if (!(pplm_fec_2_ethtool[mode] & fecparam->fec))
+ continue;
+ fec_policy |= (1 << mode);
+ break;
+ }
+
+ err = mlx5e_set_fec_mode(mdev, fec_policy);
+
+ if (err)
+ return err;
+
+ mlx5_toggle_port_link(mdev);
+
+ return 0;
+}
+
static u32 mlx5e_get_msglevel(struct net_device *dev)
{
return ((struct mlx5e_priv *)netdev_priv(dev))->msglevel;
@@ -1512,6 +1654,27 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
return 0;
}
+static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_channels *channels = &priv->channels;
+ struct mlx5e_channel *c;
+ int i;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ for (i = 0; i < channels->num; i++) {
+ c = channels->c[i];
+ if (enable)
+ __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+ else
+ __clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+ }
+
+ return 0;
+}
+
static int mlx5e_handle_pflag(struct net_device *netdev,
u32 wanted_flags,
enum mlx5e_priv_flag flag,
@@ -1563,6 +1726,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_STRIDING_RQ,
set_pflag_rx_striding_rq);
+ if (err)
+ goto out;
+
+ err = mlx5e_handle_pflag(netdev, pflags,
+ MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
+ set_pflag_rx_no_csum_complete);
out:
mutex_unlock(&priv->state_lock);
@@ -1652,4 +1821,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.self_test = mlx5e_self_test,
.get_msglevel = mlx5e_get_msglevel,
.set_msglevel = mlx5e_set_msglevel,
+ .get_fecparam = mlx5e_get_fecparam,
+ .set_fecparam = mlx5e_set_fecparam,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 41cde926cdab..c18dcebe1462 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -131,14 +131,14 @@ set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
if (ip4src_m) {
memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ip4src_v, sizeof(ip4src_v));
- memset(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
- 0xff, sizeof(ip4src_m));
+ memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &ip4src_m, sizeof(ip4src_m));
}
if (ip4dst_m) {
memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ip4dst_v, sizeof(ip4dst_v));
- memset(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- 0xff, sizeof(ip4dst_m));
+ memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &ip4dst_m, sizeof(ip4dst_m));
}
MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
@@ -173,11 +173,11 @@ set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
__be16 pdst_m, __be16 pdst_v)
{
if (psrc_m) {
- MLX5E_FTE_SET(headers_c, tcp_sport, 0xffff);
+ MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
}
if (pdst_m) {
- MLX5E_FTE_SET(headers_c, tcp_dport, 0xffff);
+ MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
}
@@ -190,12 +190,12 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
__be16 pdst_m, __be16 pdst_v)
{
if (psrc_m) {
- MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
+ MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
}
if (pdst_m) {
- MLX5E_FTE_SET(headers_c, udp_dport, 0xffff);
+ MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
}
@@ -508,26 +508,14 @@ static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
if (l4_mask->tos)
return -EINVAL;
- if (l4_mask->ip4src) {
- if (!all_ones(l4_mask->ip4src))
- return -EINVAL;
+ if (l4_mask->ip4src)
ntuples++;
- }
- if (l4_mask->ip4dst) {
- if (!all_ones(l4_mask->ip4dst))
- return -EINVAL;
+ if (l4_mask->ip4dst)
ntuples++;
- }
- if (l4_mask->psrc) {
- if (!all_ones(l4_mask->psrc))
- return -EINVAL;
+ if (l4_mask->psrc)
ntuples++;
- }
- if (l4_mask->pdst) {
- if (!all_ones(l4_mask->pdst))
- return -EINVAL;
+ if (l4_mask->pdst)
ntuples++;
- }
/* Flow is TCP/UDP */
return ++ntuples;
}
@@ -540,16 +528,10 @@ static int validate_ip4(struct ethtool_rx_flow_spec *fs)
if (l3_mask->l4_4_bytes || l3_mask->tos ||
fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
return -EINVAL;
- if (l3_mask->ip4src) {
- if (!all_ones(l3_mask->ip4src))
- return -EINVAL;
+ if (l3_mask->ip4src)
ntuples++;
- }
- if (l3_mask->ip4dst) {
- if (!all_ones(l3_mask->ip4dst))
- return -EINVAL;
+ if (l3_mask->ip4dst)
ntuples++;
- }
if (l3_mask->proto)
ntuples++;
/* Flow is IPv4 */
@@ -588,16 +570,10 @@ static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
ntuples++;
- if (l4_mask->psrc) {
- if (!all_ones(l4_mask->psrc))
- return -EINVAL;
+ if (l4_mask->psrc)
ntuples++;
- }
- if (l4_mask->pdst) {
- if (!all_ones(l4_mask->pdst))
- return -EINVAL;
+ if (l4_mask->pdst)
ntuples++;
- }
/* Flow is TCP/UDP */
return ++ntuples;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5a7939e70190..1243edbedc9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -272,10 +272,9 @@ static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
mlx5e_stats_grps[i].update_stats(priv);
}
-void mlx5e_update_stats_work(struct work_struct *work)
+static void mlx5e_update_stats_work(struct work_struct *work)
{
- struct delayed_work *dwork = to_delayed_work(work);
- struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
update_stats_work);
mutex_lock(&priv->state_lock);
@@ -283,6 +282,17 @@ void mlx5e_update_stats_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
+void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
+{
+ if (!priv->profile->update_stats)
+ return;
+
+ if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state)))
+ return;
+
+ queue_work(priv->wq, &priv->update_stats_work);
+}
+
static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
@@ -929,6 +939,9 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (params->rx_dim_enabled)
__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
+ if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)
+ __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+
return 0;
err_destroy_rq:
@@ -1786,7 +1799,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_channel_param *cparam)
{
struct mlx5e_priv *priv = c->priv;
- int err, tc, max_nch = priv->profile->max_nch(priv->mdev);
+ int err, tc, max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
for (tc = 0; tc < params->num_tc; tc++) {
int txq_ix = c->ix + tc * max_nch;
@@ -2426,7 +2439,7 @@ int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
int err;
int ix;
- for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
rqt = &priv->direct_tir[ix].rqt;
err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
if (err)
@@ -2447,7 +2460,7 @@ void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
{
int i;
- for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
+ for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++)
mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
}
@@ -2541,7 +2554,7 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
}
- for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
struct mlx5e_redirect_rqt_param direct_rrp = {
.is_rss = false,
{
@@ -2742,7 +2755,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
goto free_in;
}
- for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
in, inlen);
if (err)
@@ -2842,7 +2855,7 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
{
- int max_nch = priv->profile->max_nch(priv->mdev);
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i, tc;
for (i = 0; i < max_nch; i++)
@@ -2954,9 +2967,7 @@ int mlx5e_open_locked(struct net_device *netdev)
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
- if (priv->profile->update_stats)
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
-
+ mlx5e_queue_update_stats(priv);
return 0;
err_clear_state_opened_flag:
@@ -3049,8 +3060,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
return mlx5e_alloc_cq_common(mdev, param, cq);
}
-static int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
- struct mlx5e_rq *drop_rq)
+int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
+ struct mlx5e_rq *drop_rq)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_cq_param cq_param = {};
@@ -3094,7 +3105,7 @@ err_free_cq:
return err;
}
-static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
+void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
{
mlx5e_destroy_rq(drop_rq);
mlx5e_free_rq(drop_rq);
@@ -3175,7 +3186,7 @@ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *t
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}
-int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
+int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
{
struct mlx5e_tir *tir;
void *tirc;
@@ -3202,7 +3213,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
}
}
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
goto out;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
@@ -3236,7 +3247,7 @@ err_destroy_inner_tirs:
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
{
- int nch = priv->profile->max_nch(priv->mdev);
+ int nch = mlx5e_get_netdev_max_channels(priv->netdev);
struct mlx5e_tir *tir;
void *tirc;
int inlen;
@@ -3273,14 +3284,14 @@ err_destroy_ch_tirs:
return err;
}
-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
{
int i;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
return;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
@@ -3289,7 +3300,7 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
{
- int nch = priv->profile->max_nch(priv->mdev);
+ int nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i;
for (i = 0; i < nch; i++)
@@ -3381,9 +3392,6 @@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
- return -EOPNOTSUPP;
-
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
@@ -3438,7 +3446,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
/* update HW stats in background for next time */
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+ mlx5e_queue_update_stats(priv);
if (mlx5e_is_uplink_rep(priv)) {
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
@@ -4315,23 +4323,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
- * reenabling interrupts.
- */
-static void mlx5e_netpoll(struct net_device *dev)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_channels *chs = &priv->channels;
-
- int i;
-
- for (i = 0; i < chs->num; i++)
- napi_schedule(&chs->c[i]->napi);
-}
-#endif
-
-static const struct net_device_ops mlx5e_netdev_ops = {
+const struct net_device_ops mlx5e_netdev_ops = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
@@ -4356,9 +4348,6 @@ static const struct net_device_ops mlx5e_netdev_ops = {
#ifdef CONFIG_MLX5_EN_ARFS
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mlx5e_netpoll,
-#endif
#ifdef CONFIG_MLX5_ESWITCH
/* SRIOV E-Switch NDOs */
.ndo_set_vf_mac = mlx5e_set_vf_mac,
@@ -4499,6 +4488,31 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo
return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
}
+void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ /* Prefer Striding RQ, unless any of the following holds:
+ * - Striding RQ configuration is not possible/supported.
+ * - Slow PCI heuristic.
+ * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
+ */
+ if (!slow_pci_heuristic(mdev) &&
+ mlx5e_striding_rq_possible(mdev, params) &&
+ (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
+ !mlx5e_rx_is_linear_skb(mdev, params)))
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
+ mlx5e_set_rq_type(mdev, params);
+ mlx5e_init_rq_type_params(mdev, params);
+}
+
+void mlx5e_build_rss_params(struct mlx5e_params *params)
+{
+ params->rss_hfunc = ETH_RSS_HASH_XOR;
+ netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
+ mlx5e_build_default_indir_rqt(params->indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, params->num_channels);
+}
+
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels, u16 mtu)
@@ -4522,20 +4536,10 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
/* RQ */
- /* Prefer Striding RQ, unless any of the following holds:
- * - Striding RQ configuration is not possible/supported.
- * - Slow PCI heuristic.
- * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
- */
- if (!slow_pci_heuristic(mdev) &&
- mlx5e_striding_rq_possible(mdev, params) &&
- (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
- !mlx5e_rx_is_linear_skb(mdev, params)))
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
- mlx5e_set_rq_type(mdev, params);
- mlx5e_init_rq_type_params(mdev, params);
+ mlx5e_build_rq_params(mdev, params);
/* HW LRO */
@@ -4558,37 +4562,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */
- params->rss_hfunc = ETH_RSS_HASH_XOR;
- netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
- mlx5e_build_default_indir_rqt(params->indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, max_channels);
-}
-
-static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->profile = profile;
- priv->ppriv = ppriv;
- priv->msglevel = MLX5E_MSG_LEVEL;
- priv->max_opened_tc = 1;
-
- mlx5e_build_nic_params(mdev, &priv->channels.params,
- profile->max_nch(mdev), netdev->mtu);
-
- mutex_init(&priv->state_lock);
-
- INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
- INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
- INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
- INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
-
- mlx5e_timestamp_init(priv);
+ mlx5e_build_rss_params(params);
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
@@ -4726,7 +4700,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_tls_build_netdev(priv);
}
-static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
+void mlx5e_create_q_counters(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -4744,7 +4718,7 @@ static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
}
}
-static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
+void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
{
if (priv->q_counter)
mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
@@ -4753,15 +4727,23 @@ static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
}
-static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
- mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
+ err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
+ if (err)
+ return err;
+
+ mlx5e_build_nic_params(mdev, &priv->channels.params,
+ mlx5e_get_netdev_max_channels(netdev), netdev->mtu);
+
+ mlx5e_timestamp_init(priv);
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
@@ -4770,12 +4752,15 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_build_tc2txq_maps(priv);
+
+ return 0;
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
+ mlx5e_netdev_cleanup(priv->netdev, priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -4783,15 +4768,23 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
+ mlx5e_create_q_counters(priv);
+
+ err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
+ if (err) {
+ mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+ goto err_destroy_q_counters;
+ }
+
err = mlx5e_create_indirect_rqt(priv);
if (err)
- return err;
+ goto err_close_drop_rq;
err = mlx5e_create_direct_rqts(priv);
if (err)
goto err_destroy_indirect_rqts;
- err = mlx5e_create_indirect_tirs(priv);
+ err = mlx5e_create_indirect_tirs(priv, true);
if (err)
goto err_destroy_direct_rqts;
@@ -4816,11 +4809,15 @@ err_destroy_flow_steering:
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
- mlx5e_destroy_indirect_tirs(priv);
+ mlx5e_destroy_indirect_tirs(priv, true);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+err_close_drop_rq:
+ mlx5e_close_drop_rq(&priv->drop_rq);
+err_destroy_q_counters:
+ mlx5e_destroy_q_counters(priv);
return err;
}
@@ -4829,9 +4826,11 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
- mlx5e_destroy_indirect_tirs(priv);
+ mlx5e_destroy_indirect_tirs(priv, true);
mlx5e_destroy_direct_rqts(priv);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5e_close_drop_rq(&priv->drop_rq);
+ mlx5e_destroy_q_counters(priv);
}
static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
@@ -4924,7 +4923,6 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.enable = mlx5e_nic_enable,
.disable = mlx5e_nic_disable,
.update_stats = mlx5e_update_ndo_stats,
- .max_nch = mlx5e_get_max_num_channels,
.update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
@@ -4933,13 +4931,53 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
/* mlx5e generic netdev management API (move to en_common.c) */
+/* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */
+int mlx5e_netdev_init(struct net_device *netdev,
+ struct mlx5e_priv *priv,
+ struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ /* priv init */
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+ priv->msglevel = MLX5E_MSG_LEVEL;
+ priv->max_opened_tc = 1;
+
+ mutex_init(&priv->state_lock);
+ INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+ INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
+ INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+ priv->wq = create_singlethread_workqueue("mlx5e");
+ if (!priv->wq)
+ return -ENOMEM;
+
+ /* netdev init */
+ netif_carrier_off(netdev);
+
+#ifdef CONFIG_MLX5_EN_ARFS
+ netdev->rx_cpu_rmap = mdev->rmap;
+#endif
+
+ return 0;
+}
+
+void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
+{
+ destroy_workqueue(priv->wq);
+}
+
struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile,
+ int nch,
void *ppriv)
{
- int nch = profile->max_nch(mdev);
struct net_device *netdev;
- struct mlx5e_priv *priv;
+ int err;
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
nch * profile->max_tc,
@@ -4949,25 +4987,15 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
return NULL;
}
-#ifdef CONFIG_MLX5_EN_ARFS
- netdev->rx_cpu_rmap = mdev->rmap;
-#endif
-
- profile->init(mdev, netdev, profile, ppriv);
-
- netif_carrier_off(netdev);
-
- priv = netdev_priv(netdev);
-
- priv->wq = create_singlethread_workqueue("mlx5e");
- if (!priv->wq)
- goto err_cleanup_nic;
+ err = profile->init(mdev, netdev, profile, ppriv);
+ if (err) {
+ mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err);
+ goto err_free_netdev;
+ }
return netdev;
-err_cleanup_nic:
- if (profile->cleanup)
- profile->cleanup(priv);
+err_free_netdev:
free_netdev(netdev);
return NULL;
@@ -4975,7 +5003,6 @@ err_cleanup_nic:
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
- struct mlx5_core_dev *mdev = priv->mdev;
const struct mlx5e_profile *profile;
int err;
@@ -4986,28 +5013,16 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
if (err)
goto out;
- mlx5e_create_q_counters(priv);
-
- err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
- if (err) {
- mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
- goto err_destroy_q_counters;
- }
-
err = profile->init_rx(priv);
if (err)
- goto err_close_drop_rq;
+ goto err_cleanup_tx;
if (profile->enable)
profile->enable(priv);
return 0;
-err_close_drop_rq:
- mlx5e_close_drop_rq(&priv->drop_rq);
-
-err_destroy_q_counters:
- mlx5e_destroy_q_counters(priv);
+err_cleanup_tx:
profile->cleanup_tx(priv);
out:
@@ -5025,10 +5040,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
flush_workqueue(priv->wq);
profile->cleanup_rx(priv);
- mlx5e_close_drop_rq(&priv->drop_rq);
- mlx5e_destroy_q_counters(priv);
profile->cleanup_tx(priv);
- cancel_delayed_work_sync(&priv->update_stats_work);
+ cancel_work_sync(&priv->update_stats_work);
}
void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
@@ -5036,7 +5049,6 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- destroy_workqueue(priv->wq);
if (profile->cleanup)
profile->cleanup(priv);
free_netdev(netdev);
@@ -5085,6 +5097,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
void *rpriv = NULL;
void *priv;
int err;
+ int nch;
err = mlx5e_check_required_hca_cap(mdev);
if (err)
@@ -5100,7 +5113,8 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
}
#endif
- netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
+ nch = mlx5e_get_max_num_channels(mdev);
+ netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, rpriv);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
goto err_free_rpriv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index c9cc9747d21d..c3c657548824 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -46,8 +46,6 @@
#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
-#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
- max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -182,12 +180,108 @@ static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
}
}
+static void mlx5e_rep_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_ringparam(priv, param);
+}
+
+static int mlx5e_rep_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_ringparam(priv, param);
+}
+
+static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5_flow_handle *flow_rule;
+
+ flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
+ rep->vport,
+ dest);
+ if (IS_ERR(flow_rule))
+ return PTR_ERR(flow_rule);
+
+ mlx5_del_flow_rules(rpriv->vport_rx_rule);
+ rpriv->vport_rx_rule = flow_rule;
+ return 0;
+}
+
+static void mlx5e_rep_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mlx5e_ethtool_get_channels(priv, ch);
+}
+
+static int mlx5e_rep_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u16 curr_channels_amount = priv->channels.params.num_channels;
+ u32 new_channels_amount = ch->combined_count;
+ struct mlx5_flow_destination new_dest;
+ int err = 0;
+
+ err = mlx5e_ethtool_set_channels(priv, ch);
+ if (err)
+ return err;
+
+ if (curr_channels_amount == 1 && new_channels_amount > 1) {
+ new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ new_dest.ft = priv->fs.ttc.ft.t;
+ } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
+ new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ new_dest.tir_num = priv->direct_tir[0].tirn;
+ } else {
+ return 0;
+ }
+
+ err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
+ if (err) {
+ netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
+ curr_channels_amount, new_channels_amount);
+ return err;
+ }
+
+ return 0;
+}
+
+static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_get_rxfh_key_size(priv);
+}
+
+static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_get_rxfh_indir_size(priv);
+}
+
static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.get_drvinfo = mlx5e_rep_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings,
.get_sset_count = mlx5e_rep_get_sset_count,
.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
+ .get_ringparam = mlx5e_rep_get_ringparam,
+ .set_ringparam = mlx5e_rep_set_ringparam,
+ .get_channels = mlx5e_rep_get_channels,
+ .set_channels = mlx5e_rep_set_channels,
+ .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
+ .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
};
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
@@ -759,9 +853,6 @@ static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
- return -EOPNOTSUPP;
-
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
@@ -775,9 +866,6 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
{
struct mlx5e_priv *priv = cb_priv;
- if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
- return -EOPNOTSUPP;
-
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
@@ -898,8 +986,7 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_priv *priv = netdev_priv(dev);
/* update HW stats in background for next time */
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
-
+ mlx5e_queue_update_stats(priv);
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
}
@@ -934,16 +1021,20 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->sw_mtu = mtu;
params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
- params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
- params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
+ /* RQ */
+ mlx5e_build_rq_params(mdev, params);
+
+ /* CQ moderation params */
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->num_tc = 1;
- params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
+
+ /* RSS */
+ mlx5e_build_rss_params(params);
}
static void mlx5e_build_rep_netdev(struct net_device *netdev)
@@ -963,6 +1054,16 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
netdev->hw_features |= NETIF_F_HW_TC;
+ netdev->hw_features |= NETIF_F_SG;
+ netdev->hw_features |= NETIF_F_IP_CSUM;
+ netdev->hw_features |= NETIF_F_IPV6_CSUM;
+ netdev->hw_features |= NETIF_F_GRO;
+ netdev->hw_features |= NETIF_F_TSO;
+ netdev->hw_features |= NETIF_F_TSO6;
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ netdev->features |= netdev->hw_features;
+
eth_hw_addr_random(netdev);
netdev->min_mtu = ETH_MIN_MTU;
@@ -970,63 +1071,127 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
}
-static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->profile = profile;
- priv->ppriv = ppriv;
-
- mutex_init(&priv->state_lock);
+ err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
+ if (err)
+ return err;
- INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
- priv->channels.params.num_channels = profile->max_nch(mdev);
+ priv->channels.params.num_channels =
+ mlx5e_get_netdev_max_channels(netdev);
mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
mlx5e_build_rep_netdev(netdev);
mlx5e_timestamp_init(priv);
+
+ return 0;
}
-static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
+static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
+{
+ mlx5e_netdev_cleanup(priv->netdev, priv);
+}
+
+static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
+{
+ struct ttc_params ttc_params = {};
+ int tt, err;
+
+ priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
+
+ /* The inner_ttc in the ttc params is intentionally not set */
+ ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
+ mlx5e_set_ttc_ft_params(&ttc_params);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
+
+ err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_destination dest;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = priv->direct_tir[0].tirn;
+ flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
+ rep->vport,
+ &dest);
+ if (IS_ERR(flow_rule))
+ return PTR_ERR(flow_rule);
+ rpriv->vport_rx_rule = flow_rule;
+ return 0;
+}
+
+static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
int err;
mlx5e_init_l2_addr(priv);
+ err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
+ if (err) {
+ mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5e_create_indirect_rqt(priv);
+ if (err)
+ goto err_close_drop_rq;
+
err = mlx5e_create_direct_rqts(priv);
if (err)
- return err;
+ goto err_destroy_indirect_rqts;
- err = mlx5e_create_direct_tirs(priv);
+ err = mlx5e_create_indirect_tirs(priv, false);
if (err)
goto err_destroy_direct_rqts;
- flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
- rep->vport,
- priv->direct_tir[0].tirn);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
+ err = mlx5e_create_direct_tirs(priv);
+ if (err)
+ goto err_destroy_indirect_tirs;
+
+ err = mlx5e_create_rep_ttc_table(priv);
+ if (err)
goto err_destroy_direct_tirs;
- }
- rpriv->vport_rx_rule = flow_rule;
+
+ err = mlx5e_create_rep_vport_rx_rule(priv);
+ if (err)
+ goto err_destroy_ttc_table;
return 0;
+err_destroy_ttc_table:
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
+err_destroy_indirect_tirs:
+ mlx5e_destroy_indirect_tirs(priv, false);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv);
+err_destroy_indirect_rqts:
+ mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+err_close_drop_rq:
+ mlx5e_close_drop_rq(&priv->drop_rq);
return err;
}
@@ -1035,8 +1200,12 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5_del_flow_rules(rpriv->vport_rx_rule);
+ mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_indirect_tirs(priv, false);
mlx5e_destroy_direct_rqts(priv);
+ mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5e_close_drop_rq(&priv->drop_rq);
}
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
@@ -1051,23 +1220,17 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
return 0;
}
-static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
-{
-#define MLX5E_PORT_REPRESENTOR_NCH 1
- return MLX5E_PORT_REPRESENTOR_NCH;
-}
-
static const struct mlx5e_profile mlx5e_rep_profile = {
.init = mlx5e_init_rep,
+ .cleanup = mlx5e_cleanup_rep,
.init_rx = mlx5e_init_rep_rx,
.cleanup_rx = mlx5e_cleanup_rep_rx,
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_nic_tx,
.update_stats = mlx5e_rep_update_hw_counters,
- .max_nch = mlx5e_get_rep_max_num_channels,
.update_carrier = NULL,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
- .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = 1,
};
@@ -1127,13 +1290,14 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
struct mlx5e_priv *upriv;
- int err;
+ int nch, err;
rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
if (!rpriv)
return -ENOMEM;
- netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
+ nch = mlx5e_get_max_num_channels(dev);
+ netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, nch, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 15d8ae28c040..2f7fb8de6967 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -37,6 +37,7 @@
#include <net/busy_poll.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
+#include <net/inet_ecn.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -432,10 +433,9 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
struct mlx5_wq_cyc *wq,
- u16 pi, u16 frag_pi)
+ u16 pi, u16 nnops)
{
struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
- u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
edge_wi = wi + nnops;
@@ -454,15 +454,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
- u16 pi, frag_pi;
+ u16 pi, contig_wqebbs_room;
int err;
int i;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
-
- if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
- mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
+ mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
}
@@ -690,12 +689,29 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
}
-static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
+static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
+ __be16 *proto)
{
- __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+ *proto = ((struct ethhdr *)skb->data)->h_proto;
+ *proto = __vlan_get_protocol(skb, *proto, network_depth);
+ return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6));
+}
+
+static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
+{
+ int network_depth = 0;
+ __be16 proto;
+ void *ip;
+ int rc;
+
+ if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
+ return;
- ethertype = __vlan_get_protocol(skb, ethertype, network_depth);
- return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+ ip = skb->data + network_depth;
+ rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
+ IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
+
+ rq->stats->ecn_mark += !!rc;
}
static __be32 mlx5e_get_fcs(struct sk_buff *skb)
@@ -737,6 +753,14 @@ static __be32 mlx5e_get_fcs(struct sk_buff *skb)
return fcs_bytes;
}
+static u8 get_ip_proto(struct sk_buff *skb, __be16 proto)
+{
+ void *ip_p = skb->data + sizeof(struct ethhdr);
+
+ return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
+ ((struct ipv6hdr *)ip_p)->nexthdr;
+}
+
static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
@@ -745,6 +769,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
{
struct mlx5e_rq_stats *stats = rq->stats;
int network_depth = 0;
+ __be16 proto;
if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
goto csum_none;
@@ -755,7 +780,13 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
- if (likely(is_last_ethertype_ip(skb, &network_depth))) {
+ if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
+ goto csum_unnecessary;
+
+ if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
+ if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP))
+ goto csum_unnecessary;
+
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
if (network_depth > ETH_HLEN)
@@ -773,8 +804,10 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
+csum_unnecessary:
if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
- (cqe->hds_ip_ext & CQE_L4_OK))) {
+ ((cqe->hds_ip_ext & CQE_L4_OK) ||
+ (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
@@ -790,6 +823,8 @@ csum_none:
stats->csum_none++;
}
+#define MLX5E_CE_BIT_MASK 0x80
+
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct mlx5e_rq *rq,
@@ -834,6 +869,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
+ /* checking CE bit in cqe - MSB in ml_path field */
+ if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
+ mlx5e_enable_ecn(rq, skb);
+
skb->protocol = eth_type_trans(skb, netdev);
}
@@ -1230,8 +1269,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
- struct mlx5e_rq_stats *stats = rq->stats;
struct hwtstamp_config *tstamp;
+ struct mlx5e_rq_stats *stats;
struct net_device *netdev;
struct mlx5e_priv *priv;
char *pseudo_header;
@@ -1254,6 +1293,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
priv = mlx5i_epriv(netdev);
tstamp = &priv->tstamp;
+ stats = &priv->channel_stats[rq->ix].rq;
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 6839481f7697..1e55b9c27ffc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -53,6 +53,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
@@ -92,6 +93,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
@@ -131,7 +133,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
+ for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
@@ -144,6 +146,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_bytes += rq_stats->bytes;
s->rx_lro_packets += rq_stats->lro_packets;
s->rx_lro_bytes += rq_stats->lro_bytes;
+ s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
@@ -168,6 +171,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_busy += rq_stats->cache_busy;
s->rx_cache_waive += rq_stats->cache_waive;
s->rx_congst_umr += rq_stats->congst_umr;
+ s->rx_arfs_err += rq_stats->arfs_err;
s->ch_events += ch_stats->events;
s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm;
@@ -610,46 +614,82 @@ static const struct counter_desc pport_phy_statistical_stats_desc[] = {
{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
};
-#define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
+static const struct counter_desc
+pport_phy_statistical_err_lanes_stats_desc[] = {
+ { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
+ { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
+ { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
+ { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
+};
+
+#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
+ ARRAY_SIZE(pport_phy_statistical_stats_desc)
+#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
+ ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int num_stats;
+
/* "1" for link_down_events special counter */
- return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
- NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1;
+ num_stats = 1;
+
+ num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
+ NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
+
+ num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
+ NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
+
+ return num_stats;
}
static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
int i;
strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
- if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
return idx;
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_phy_statistical_stats_desc[i].format);
+
+ if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_phy_statistical_err_lanes_stats_desc[i].format);
+
return idx;
}
static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
int i;
/* link_down_events_phy has special handling since it is not stored in __be64 format */
data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
- if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
return idx;
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
data[idx++] =
MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_stats_desc, i);
+
+ if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_err_lanes_stats_desc,
+ i);
return idx;
}
@@ -1144,6 +1184,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
@@ -1158,6 +1199,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
};
static const struct counter_desc sq_stats_desc[] = {
@@ -1211,7 +1253,7 @@ static const struct counter_desc ch_stats_desc[] = {
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{
- int max_nch = priv->profile->max_nch(priv->mdev);
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
@@ -1223,7 +1265,7 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
{
- int max_nch = priv->profile->max_nch(priv->mdev);
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i, j, tc;
for (i = 0; i < max_nch; i++)
@@ -1258,7 +1300,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
{
- int max_nch = priv->profile->max_nch(priv->mdev);
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i, j, tc;
for (i = 0; i < max_nch; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index a4c035aedd46..77f74ce11280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -66,6 +66,7 @@ struct mlx5e_sw_stats {
u64 tx_nop;
u64 rx_lro_packets;
u64 rx_lro_bytes;
+ u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
u64 rx_csum_unnecessary;
u64 rx_csum_none;
@@ -105,6 +106,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_busy;
u64 rx_cache_waive;
u64 rx_congst_umr;
+ u64 rx_arfs_err;
u64 ch_events;
u64 ch_poll;
u64 ch_arm;
@@ -184,6 +186,7 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
+ u64 ecn_mark;
u64 removed_vlan_packets;
u64 xdp_drop;
u64 xdp_redirect;
@@ -200,6 +203,7 @@ struct mlx5e_rq_stats {
u64 cache_busy;
u64 cache_waive;
u64 congst_umr;
+ u64 arfs_err;
};
struct mlx5e_sq_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9fed54017659..608025ca5c04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -61,6 +61,7 @@ struct mlx5_nic_flow_attr {
u32 hairpin_tirn;
u8 match_level;
struct mlx5_flow_table *hairpin_ft;
+ struct mlx5_fc *counter;
};
#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
@@ -73,6 +74,7 @@ enum {
MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
+ MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 5),
};
#define MLX5E_TC_MAX_SPLITS 1
@@ -81,7 +83,7 @@ struct mlx5e_tc_flow {
struct rhash_head node;
struct mlx5e_priv *priv;
u64 cookie;
- u8 flags;
+ u16 flags;
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
struct list_head encap; /* flows sharing the same encap ID */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
@@ -100,11 +102,6 @@ struct mlx5e_tc_flow_parse_attr {
int mirred_ifindex;
};
-enum {
- MLX5_HEADER_TYPE_VXLAN = 0x0,
- MLX5_HEADER_TYPE_NVGRE = 0x1,
-};
-
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
@@ -532,7 +529,8 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
#define UNKNOWN_MATCH_PRIO 8
static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec, u8 *match_prio)
+ struct mlx5_flow_spec *spec, u8 *match_prio,
+ struct netlink_ext_ack *extack)
{
void *headers_c, *headers_v;
u8 prio_val, prio_mask = 0;
@@ -540,8 +538,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
#ifdef CONFIG_MLX5_CORE_EN_DCB
if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
- netdev_warn(priv->netdev,
- "only PCP trust state supported for hairpin\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "only PCP trust state supported for hairpin");
return -EOPNOTSUPP;
}
#endif
@@ -557,8 +555,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
if (!vlan_present || !prio_mask) {
prio_val = UNKNOWN_MATCH_PRIO;
} else if (prio_mask != 0x7) {
- netdev_warn(priv->netdev,
- "masked priority match not supported for hairpin\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "masked priority match not supported for hairpin");
return -EOPNOTSUPP;
}
@@ -568,7 +566,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
{
int peer_ifindex = parse_attr->mirred_ifindex;
struct mlx5_hairpin_params params;
@@ -583,12 +582,13 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
- netdev_warn(priv->netdev, "hairpin is not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
return -EOPNOTSUPP;
}
peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
+ err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
+ extack);
if (err)
return err;
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
@@ -674,29 +674,28 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
}
}
-static struct mlx5_flow_handle *
+static int
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
- .has_flow_tag = true,
.flow_tag = attr->flow_tag,
- .encap_id = 0,
+ .reformat_id = 0,
+ .flags = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
};
struct mlx5_fc *counter = NULL;
- struct mlx5_flow_handle *rule;
bool table_created = false;
int err, dest_ix = 0;
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
- err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+ err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
if (err) {
- rule = ERR_PTR(err);
goto err_add_hairpin_flow;
}
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
@@ -716,22 +715,21 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
if (IS_ERR(counter)) {
- rule = ERR_CAST(counter);
+ err = PTR_ERR(counter);
goto err_fc_create;
}
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dest_ix].counter = counter;
+ dest[dest_ix].counter_id = mlx5_fc_id(counter);
dest_ix++;
+ attr->counter = counter;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
flow_act.modify_id = attr->mod_hdr_id;
kfree(parse_attr->mod_hdr_actions);
- if (err) {
- rule = ERR_PTR(err);
+ if (err)
goto err_create_mod_hdr_id;
- }
}
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
@@ -753,9 +751,11 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
MLX5E_TC_TABLE_NUM_GROUPS,
MLX5E_TC_FT_LEVEL, 0);
if (IS_ERR(priv->fs.tc.t)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to create tc offload table\n");
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- rule = ERR_CAST(priv->fs.tc.t);
+ err = PTR_ERR(priv->fs.tc.t);
goto err_create_ft;
}
@@ -765,13 +765,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->match_level != MLX5_MATCH_NONE)
parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
- &flow_act, dest, dest_ix);
+ flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
+ &flow_act, dest, dest_ix);
- if (IS_ERR(rule))
+ if (IS_ERR(flow->rule[0])) {
+ err = PTR_ERR(flow->rule[0]);
goto err_add_rule;
+ }
- return rule;
+ return 0;
err_add_rule:
if (table_created) {
@@ -787,7 +789,7 @@ err_fc_create:
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
mlx5e_hairpin_flow_del(priv, flow);
err_add_hairpin_flow:
- return rule;
+ return err;
}
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
@@ -796,7 +798,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_fc *counter = NULL;
- counter = mlx5_flow_rule_counter(flow->rule[0]);
+ counter = attr->counter;
mlx5_del_flow_rules(flow->rule[0]);
mlx5_fc_destroy(priv->mdev, counter);
@@ -819,30 +821,119 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
- struct mlx5e_tc_flow *flow);
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack);
+
+static struct mlx5_flow_handle *
+mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_flow_handle *rule;
+
+ rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+ if (IS_ERR(rule))
+ return rule;
+
+ if (attr->mirror_count) {
+ flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
+ if (IS_ERR(flow->rule[1])) {
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+ return flow->rule[1];
+ }
+ }
+
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ return rule;
+}
+
+static void
+mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *attr)
+{
+ flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+
+ if (attr->mirror_count)
+ mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
+
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+}
static struct mlx5_flow_handle *
+mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *slow_attr)
+{
+ struct mlx5_flow_handle *rule;
+
+ memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ slow_attr->mirror_count = 0,
+ slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
+
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
+ if (!IS_ERR(rule))
+ flow->flags |= MLX5E_TC_FLOW_SLOW;
+
+ return rule;
+}
+
+static void
+mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *slow_attr)
+{
+ memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
+ flow->flags &= ~MLX5E_TC_FLOW_SLOW;
+}
+
+static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ u32 max_chain = mlx5_eswitch_get_chain_range(esw);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ u16 max_prio = mlx5_eswitch_get_prio_range(esw);
struct net_device *out_dev, *encap_dev = NULL;
- struct mlx5_flow_handle *rule = NULL;
+ struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
- int err;
+ int err = 0, encap_err = 0;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+ /* if prios are not supported, keep the old behaviour of using same prio
+ * for all offloaded rules.
+ */
+ if (!mlx5_eswitch_prios_supported(esw))
+ attr->prio = 1;
+
+ if (attr->chain > max_chain) {
+ NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
+ err = -EOPNOTSUPP;
+ goto err_max_prio_chain;
+ }
+
+ if (attr->prio > max_prio) {
+ NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
+ err = -EOPNOTSUPP;
+ goto err_max_prio_chain;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
out_dev = __dev_get_by_index(dev_net(priv->netdev),
attr->parse_attr->mirred_ifindex);
- err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
- out_dev, &encap_dev, flow);
- if (err) {
- rule = ERR_PTR(err);
- if (err != -EAGAIN)
- goto err_attach_encap;
+ encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
+ out_dev, &encap_dev, flow,
+ extack);
+ if (encap_err && encap_err != -EAGAIN) {
+ err = encap_err;
+ goto err_attach_encap;
}
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
@@ -851,49 +942,58 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
- if (err) {
- rule = ERR_PTR(err);
+ if (err)
goto err_add_vlan;
- }
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
kfree(parse_attr->mod_hdr_actions);
- if (err) {
- rule = ERR_PTR(err);
+ if (err)
goto err_mod_hdr;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ counter = mlx5_fc_create(esw->dev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_create_counter;
}
+
+ attr->counter = counter;
}
- /* we get here if (1) there's no error (rule being null) or when
+ /* we get here if (1) there's no error or when
* (2) there's an encap action and we're on -EAGAIN (no valid neigh)
*/
- if (rule != ERR_PTR(-EAGAIN)) {
- rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
- if (IS_ERR(rule))
- goto err_add_rule;
-
- if (attr->mirror_count) {
- flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr);
- if (IS_ERR(flow->rule[1]))
- goto err_fwd_rule;
- }
+ if (encap_err == -EAGAIN) {
+ /* continue with goto slow path rule instead */
+ struct mlx5_esw_flow_attr slow_attr;
+
+ flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
+ } else {
+ flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
}
- return rule;
-err_fwd_rule:
- mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
- rule = flow->rule[1];
+ if (IS_ERR(flow->rule[0])) {
+ err = PTR_ERR(flow->rule[0]);
+ goto err_add_rule;
+ }
+
+ return 0;
+
err_add_rule:
+ mlx5_fc_destroy(esw->dev, counter);
+err_create_counter:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
err_mod_hdr:
mlx5_eswitch_del_vlan_action(esw, attr);
err_add_vlan:
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
mlx5e_detach_encap(priv, flow);
err_attach_encap:
- return rule;
+err_max_prio_chain:
+ return err;
}
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
@@ -901,36 +1001,43 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5_esw_flow_attr slow_attr;
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
- flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- if (attr->mirror_count)
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+ if (flow->flags & MLX5E_TC_FLOW_SLOW)
+ mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
+ else
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
}
mlx5_eswitch_del_vlan_action(esw, attr);
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
mlx5e_detach_encap(priv, flow);
kvfree(attr->parse_attr);
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ mlx5_fc_destroy(esw->dev, attr->counter);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_esw_flow_attr slow_attr, *esw_attr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
int err;
- err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
- e->encap_size, e->encap_header,
- &e->encap_id);
+ err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+ e->encap_size, e->encap_header,
+ MLX5_FLOW_NAMESPACE_FDB,
+ &e->encap_id);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
err);
@@ -942,26 +1049,20 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
list_for_each_entry(flow, &e->flows, encap) {
esw_attr = flow->esw_attr;
esw_attr->encap_id = e->encap_id;
- flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
- if (IS_ERR(flow->rule[0])) {
- err = PTR_ERR(flow->rule[0]);
+ spec = &esw_attr->parse_attr->spec;
+
+ /* update from slow path rule to encap rule */
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
continue;
}
- if (esw_attr->mirror_count) {
- flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
- if (IS_ERR(flow->rule[1])) {
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr);
- err = PTR_ERR(flow->rule[1]);
- mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n",
- err);
- continue;
- }
- }
-
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
+ flow->rule[0] = rule;
}
}
@@ -969,25 +1070,44 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr slow_attr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
+ int err;
list_for_each_entry(flow, &e->flows, encap) {
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ spec = &flow->esw_attr->parse_attr->spec;
- flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- if (attr->mirror_count)
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+ /* update from encap rule to slow path rule */
+ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
+
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
+ err);
+ continue;
}
+
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
+ flow->rule[0] = rule;
}
if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
- mlx5_encap_dealloc(priv->mdev, e->encap_id);
+ mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
}
}
+static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
+{
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ return flow->esw_attr->counter;
+ else
+ return flow->nic_attr->counter;
+}
+
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
@@ -1013,7 +1133,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
continue;
list_for_each_entry(flow, &e->flows, encap) {
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
- counter = mlx5_flow_rule_counter(flow->rule[0]);
+ counter = mlx5e_tc_get_counter(flow);
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
neigh_used = true;
@@ -1053,7 +1173,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
- mlx5_encap_dealloc(priv->mdev, e->encap_id);
+ mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
hash_del_rcu(&e->encap_hlist);
kfree(e->encap_header);
@@ -1105,6 +1225,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
+ struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -1133,6 +1254,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f);
else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "port isn't an offloaded vxlan udp dport");
netdev_warn(priv->netdev,
"%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
return -EOPNOTSUPP;
@@ -1149,6 +1272,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
udp_sport, ntohs(key->src));
} else { /* udp dst port must be given */
vxlan_match_offload_err:
+ NL_SET_ERR_MSG_MOD(extack,
+ "IP tunnel decap offload supported only for vxlan, must set UDP dport");
netdev_warn(priv->netdev,
"IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
return -EOPNOTSUPP;
@@ -1225,6 +1350,16 @@ vxlan_match_offload_err:
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+
+ if (mask->ttl &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB
+ (priv->mdev,
+ ft_field_support.outer_ipv4_ttl)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL is not supported");
+ return -EOPNOTSUPP;
+ }
+
}
/* Enforce DMAC when offloading incoming tunneled flows.
@@ -1247,6 +1382,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f,
u8 *match_level)
{
+ struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -1277,6 +1413,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
f->dissector->used_keys);
return -EOPNOTSUPP;
@@ -1368,6 +1505,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_L2;
}
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -1550,8 +1690,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (mask->ttl &&
!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
- ft_field_support.outer_ipv4_ttl))
+ ft_field_support.outer_ipv4_ttl)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL is not supported");
return -EOPNOTSUPP;
+ }
if (mask->tos || mask->ttl)
*match_level = MLX5_MATCH_L3;
@@ -1593,6 +1736,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
udp_dport, ntohs(key->dst));
break;
default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only UDP and TCP transports are supported for L4 matching");
netdev_err(priv->netdev,
"Only UDP and TCP transport are supported\n");
return -EINVAL;
@@ -1629,6 +1774,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1643,6 +1789,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
if (rep->vport != FDB_UPLINK_VPORT &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw->offloads.inline_mode < match_level)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Flow is not offloaded due to min inline setting");
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
match_level, esw->offloads.inline_mode);
@@ -1744,7 +1892,8 @@ static struct mlx5_fields fields[] = {
*/
static int offload_pedit_fields(struct pedit_headers *masks,
struct pedit_headers *vals,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
int i, action_size, nactions, max_actions, first, last, next_z;
@@ -1783,11 +1932,15 @@ static int offload_pedit_fields(struct pedit_headers *masks,
continue;
if (s_mask && a_mask) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't set and add to the same HW field");
printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
return -EOPNOTSUPP;
}
if (nactions == max_actions) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "too many pedit actions, can't offload");
printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
return -EOPNOTSUPP;
}
@@ -1820,6 +1973,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
next_z = find_next_zero_bit(&mask, field_bsize, first);
last = find_last_bit(&mask, field_bsize);
if (first < next_z && next_z < last) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "rewrite of few sub-fields isn't supported");
printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
mask);
return -EOPNOTSUPP;
@@ -1878,7 +2033,8 @@ static const struct pedit_headers zero_masks = {};
static int parse_tc_pedit_action(struct mlx5e_priv *priv,
const struct tc_action *a, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
{
struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
int nkeys, i, err = -EOPNOTSUPP;
@@ -1896,12 +2052,13 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
err = -EOPNOTSUPP; /* can't be all optimistic */
if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
- netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "legacy pedit isn't offloaded");
goto out_err;
}
if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
- netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd);
+ NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
goto out_err;
}
@@ -1918,13 +2075,15 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
if (err)
goto out_err;
- err = offload_pedit_fields(masks, vals, parse_attr);
+ err = offload_pedit_fields(masks, vals, parse_attr, extack);
if (err < 0)
goto out_dealloc_parsed_actions;
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
cmd_masks = &masks[cmd];
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "attempt to offload an unsupported field");
netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
16, 1, cmd_masks, sizeof(zero_masks), true);
@@ -1941,19 +2100,26 @@ out_err:
return err;
}
-static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
+static bool csum_offload_supported(struct mlx5e_priv *priv,
+ u32 action,
+ u32 update_flags,
+ struct netlink_ext_ack *extack)
{
u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
TCA_CSUM_UPDATE_FLAG_UDP;
/* The HW recalcs checksums only if re-writing headers */
if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TC csum action is only offloaded with pedit");
netdev_warn(priv->netdev,
"TC csum action is only offloaded with pedit\n");
return false;
}
if (update_flags & ~prot_flags) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload TC csum action for some header/s");
netdev_warn(priv->netdev,
"can't offload TC csum action for some header/s - flags %#x\n",
update_flags);
@@ -1964,7 +2130,8 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
}
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
- struct tcf_exts *exts)
+ struct tcf_exts *exts,
+ struct netlink_ext_ack *extack)
{
const struct tc_action *a;
bool modify_ip_header;
@@ -2002,6 +2169,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
if (modify_ip_header && ip_proto != IPPROTO_TCP &&
ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload re-write of non TCP/UDP");
pr_info("can't offload re-write of ip proto %d\n", ip_proto);
return false;
}
@@ -2013,7 +2182,8 @@ out_ok:
static bool actions_match_supported(struct mlx5e_priv *priv,
struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
u32 actions;
@@ -2027,7 +2197,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- return modify_header_match_supported(&parse_attr->spec, exts);
+ return modify_header_match_supported(&parse_attr->spec, exts,
+ extack);
return true;
}
@@ -2040,15 +2211,16 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
fmdev = priv->mdev;
pmdev = peer_priv->mdev;
- mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
- mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
+ fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
+ psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
return (fsystem_guid == psystem_guid);
}
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
const struct tc_action *a;
@@ -2072,7 +2244,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_pedit(a)) {
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr);
+ parse_attr, extack);
if (err)
return err;
@@ -2083,7 +2255,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_csum(a)) {
if (csum_offload_supported(priv, action,
- tcf_csum_update_flags(a)))
+ tcf_csum_update_flags(a),
+ extack))
continue;
return -EOPNOTSUPP;
@@ -2099,6 +2272,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "device is not on same HW, can't offload");
netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
peer_dev->name);
return -EINVAL;
@@ -2110,8 +2285,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
u32 mark = tcf_skbedit_mark(a);
if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
- netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
- mark);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bad flow mark - only 16 bit is supported");
return -EINVAL;
}
@@ -2124,7 +2299,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
attr->action = action;
- if (!actions_match_supported(priv, exts, parse_attr, flow))
+ if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
@@ -2328,7 +2503,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
return -ENOMEM;
switch (e->tunnel_type) {
- case MLX5_HEADER_TYPE_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
fl4.flowi4_proto = IPPROTO_UDP;
fl4.fl4_dport = tun_key->tp_dst;
break;
@@ -2372,7 +2547,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
read_unlock_bh(&n->lock);
switch (e->tunnel_type) {
- case MLX5_HEADER_TYPE_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
gen_vxlan_header_ipv4(out_dev, encap_header,
ipv4_encap_size, e->h_dest, tos, ttl,
fl4.daddr,
@@ -2392,8 +2567,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
goto out;
}
- err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
- ipv4_encap_size, encap_header, &e->encap_id);
+ err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+ ipv4_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB,
+ &e->encap_id);
if (err)
goto destroy_neigh_entry;
@@ -2437,7 +2614,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
return -ENOMEM;
switch (e->tunnel_type) {
- case MLX5_HEADER_TYPE_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
fl6.flowi6_proto = IPPROTO_UDP;
fl6.fl6_dport = tun_key->tp_dst;
break;
@@ -2481,7 +2658,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
read_unlock_bh(&n->lock);
switch (e->tunnel_type) {
- case MLX5_HEADER_TYPE_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
gen_vxlan_header_ipv6(out_dev, encap_header,
ipv6_encap_size, e->h_dest, tos, ttl,
&fl6.daddr,
@@ -2502,8 +2679,10 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
goto out;
}
- err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
- ipv6_encap_size, encap_header, &e->encap_id);
+ err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+ ipv6_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB,
+ &e->encap_id);
if (err)
goto destroy_neigh_entry;
@@ -2526,7 +2705,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
unsigned short family = ip_tunnel_info_af(tun_info);
@@ -2544,6 +2724,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
/* setting udp src port isn't supported */
if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
vxlan_encap_offload_err:
+ NL_SET_ERR_MSG_MOD(extack,
+ "must set udp dst port and not set udp src port");
netdev_warn(priv->netdev,
"must set udp dst port and not set udp src port\n");
return -EOPNOTSUPP;
@@ -2551,8 +2733,10 @@ vxlan_encap_offload_err:
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
- tunnel_type = MLX5_HEADER_TYPE_VXLAN;
+ tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
} else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "port isn't an offloaded vxlan udp dport");
netdev_warn(priv->netdev,
"%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
return -EOPNOTSUPP;
@@ -2657,8 +2841,10 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct ip_tunnel_info *info = NULL;
@@ -2683,7 +2869,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_pedit(a)) {
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr);
+ parse_attr, extack);
if (err)
return err;
@@ -2694,7 +2880,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_csum(a)) {
if (csum_offload_supported(priv, action,
- tcf_csum_update_flags(a)))
+ tcf_csum_update_flags(a),
+ extack))
continue;
return -EOPNOTSUPP;
@@ -2707,6 +2894,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
out_dev = tcf_mirred_dev(a);
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't support more output ports, can't offload forwarding");
pr_err("can't support more than %d output ports, can't offload forwarding\n",
attr->out_count);
return -EOPNOTSUPP;
@@ -2725,11 +2914,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
parse_attr->mirred_ifindex = out_dev->ifindex;
parse_attr->tun_info = *info;
attr->parse_attr = parse_attr;
- action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+ action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
/* attr->out_rep is resolved when we handle encap */
} else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not on same switch HW, can't offload forwarding");
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
return -EINVAL;
@@ -2762,14 +2953,35 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
continue;
}
+ if (is_tcf_gact_goto_chain(a)) {
+ u32 dest_chain = tcf_gact_goto_chain_index(a);
+ u32 max_chain = mlx5_eswitch_get_chain_range(esw);
+
+ if (dest_chain <= attr->chain) {
+ NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
+ return -EOPNOTSUPP;
+ }
+ if (dest_chain > max_chain) {
+ NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
+ return -EOPNOTSUPP;
+ }
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->dest_chain = dest_chain;
+
+ continue;
+ }
+
return -EINVAL;
}
attr->action = action;
- if (!actions_match_supported(priv, exts, parse_attr, flow))
+ if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP;
if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
return -EOPNOTSUPP;
}
@@ -2777,9 +2989,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return 0;
}
-static void get_flags(int flags, u8 *flow_flags)
+static void get_flags(int flags, u16 *flow_flags)
{
- u8 __flow_flags = 0;
+ u16 __flow_flags = 0;
if (flags & MLX5E_TC_INGRESS)
__flow_flags |= MLX5E_TC_FLOW_INGRESS;
@@ -2808,31 +3020,15 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
return &priv->fs.tc.ht;
}
-int mlx5e_configure_flower(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f, int flags)
+static int
+mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
+ struct tc_cls_flower_offload *f, u16 flow_flags,
+ struct mlx5e_tc_flow_parse_attr **__parse_attr,
+ struct mlx5e_tc_flow **__flow)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct rhashtable *tc_ht = get_tc_ht(priv);
struct mlx5e_tc_flow *flow;
- int attr_size, err = 0;
- u8 flow_flags = 0;
-
- get_flags(flags, &flow_flags);
-
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
- if (flow) {
- netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
- return 0;
- }
-
- if (esw && esw->mode == SRIOV_OFFLOADS) {
- flow_flags |= MLX5E_TC_FLOW_ESWITCH;
- attr_size = sizeof(struct mlx5_esw_flow_attr);
- } else {
- flow_flags |= MLX5E_TC_FLOW_NIC;
- attr_size = sizeof(struct mlx5_nic_flow_attr);
- }
+ int err;
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
@@ -2846,45 +3042,161 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
flow->priv = priv;
err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
- if (err < 0)
+ if (err)
goto err_free;
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
- err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
- if (err < 0)
- goto err_free;
- flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
- } else {
- err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
- if (err < 0)
- goto err_free;
- flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
- }
+ *__flow = flow;
+ *__parse_attr = parse_attr;
- if (IS_ERR(flow->rule[0])) {
- err = PTR_ERR(flow->rule[0]);
- if (err != -EAGAIN)
- goto err_free;
- }
+ return 0;
- if (err != -EAGAIN)
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+err_free:
+ kfree(flow);
+ kvfree(parse_attr);
+ return err;
+}
+
+static int
+mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f,
+ u16 flow_flags,
+ struct mlx5e_tc_flow **__flow)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5e_tc_flow *flow;
+ int attr_size, err;
+
+ flow_flags |= MLX5E_TC_FLOW_ESWITCH;
+ attr_size = sizeof(struct mlx5_esw_flow_attr);
+ err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
+ &parse_attr, &flow);
+ if (err)
+ goto out;
+
+ flow->esw_attr->chain = f->common.chain_index;
+ flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
+ err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
+ if (err)
+ goto err_free;
+
+ err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
+ if (err)
+ goto err_free;
- if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
- !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
+ if (!(flow->esw_attr->action &
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
kvfree(parse_attr);
- err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
- if (err) {
- mlx5e_tc_del_flow(priv, flow);
- kfree(flow);
- }
+ *__flow = flow;
+ return 0;
+
+err_free:
+ kfree(flow);
+ kvfree(parse_attr);
+out:
return err;
+}
+
+static int
+mlx5e_add_nic_flow(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f,
+ u16 flow_flags,
+ struct mlx5e_tc_flow **__flow)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5e_tc_flow *flow;
+ int attr_size, err;
+
+ /* multi-chain not supported for NIC rules */
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
+ return -EOPNOTSUPP;
+
+ flow_flags |= MLX5E_TC_FLOW_NIC;
+ attr_size = sizeof(struct mlx5_nic_flow_attr);
+ err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
+ &parse_attr, &flow);
+ if (err)
+ goto out;
+
+ err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
+ if (err)
+ goto err_free;
+
+ err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
+ if (err)
+ goto err_free;
+
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ kvfree(parse_attr);
+ *__flow = flow;
+
+ return 0;
err_free:
+ kfree(flow);
kvfree(parse_attr);
+out:
+ return err;
+}
+
+static int
+mlx5e_tc_add_flow(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f,
+ int flags,
+ struct mlx5e_tc_flow **flow)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ u16 flow_flags;
+ int err;
+
+ get_flags(flags, &flow_flags);
+
+ if (!tc_can_offload_extack(priv->netdev, f->common.extack))
+ return -EOPNOTSUPP;
+
+ if (esw && esw->mode == SRIOV_OFFLOADS)
+ err = mlx5e_add_fdb_flow(priv, f, flow_flags, flow);
+ else
+ err = mlx5e_add_nic_flow(priv, f, flow_flags, flow);
+
+ return err;
+}
+
+int mlx5e_configure_flower(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f, int flags)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct rhashtable *tc_ht = get_tc_ht(priv);
+ struct mlx5e_tc_flow *flow;
+ int err = 0;
+
+ flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ if (flow) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "flow cookie already exists, ignoring");
+ netdev_warn_once(priv->netdev,
+ "flow cookie %lx already exists, ignoring\n",
+ f->cookie);
+ goto out;
+ }
+
+ err = mlx5e_tc_add_flow(priv, f, flags, &flow);
+ if (err)
+ goto out;
+
+ err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
+ if (err)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ mlx5e_tc_del_flow(priv, flow);
kfree(flow);
+out:
return err;
}
@@ -2935,7 +3247,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
return 0;
- counter = mlx5_flow_rule_counter(flow->rule[0]);
+ counter = mlx5e_tc_get_counter(flow);
if (!counter)
return 0;
@@ -2946,14 +3258,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
return 0;
}
+static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
+ struct mlx5e_priv *peer_priv)
+{
+ struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
+ struct mlx5e_hairpin_entry *hpe;
+ u16 peer_vhca_id;
+ int bkt;
+
+ if (!same_hw_devs(priv, peer_priv))
+ return;
+
+ peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+
+ hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
+ if (hpe->peer_vhca_id == peer_vhca_id)
+ hpe->hp->pair->peer_gone = true;
+ }
+}
+
+static int mlx5e_tc_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct mlx5e_flow_steering *fs;
+ struct mlx5e_priv *peer_priv;
+ struct mlx5e_tc_table *tc;
+ struct mlx5e_priv *priv;
+
+ if (ndev->netdev_ops != &mlx5e_netdev_ops ||
+ event != NETDEV_UNREGISTER ||
+ ndev->reg_state == NETREG_REGISTERED)
+ return NOTIFY_DONE;
+
+ tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
+ fs = container_of(tc, struct mlx5e_flow_steering, tc);
+ priv = container_of(fs, struct mlx5e_priv, fs);
+ peer_priv = netdev_priv(ndev);
+ if (priv == peer_priv ||
+ !(priv->netdev->features & NETIF_F_HW_TC))
+ return NOTIFY_DONE;
+
+ mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
+
+ return NOTIFY_DONE;
+}
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ int err;
hash_init(tc->mod_hdr_tbl);
hash_init(tc->hairpin_tbl);
- return rhashtable_init(&tc->ht, &tc_ht_params);
+ err = rhashtable_init(&tc->ht, &tc_ht_params);
+ if (err)
+ return err;
+
+ tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
+ if (register_netdevice_notifier(&tc->netdevice_nb)) {
+ tc->netdevice_nb.notifier_call = NULL;
+ mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
+ }
+
+ return err;
}
static void _mlx5e_tc_del_flow(void *ptr, void *arg)
@@ -2969,6 +3338,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ if (tc->netdevice_nb.notifier_call)
+ unregister_netdevice_notifier(&tc->netdevice_nb);
+
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
if (!IS_ERR_OR_NULL(tc->t)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index ae73ea992845..6dacaeba2fbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -290,10 +290,9 @@ dma_unmap_wqe_err:
static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
struct mlx5_wq_cyc *wq,
- u16 pi, u16 frag_pi)
+ u16 pi, u16 nnops)
{
struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
- u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
edge_wi = wi + nnops;
@@ -348,8 +347,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
+ u16 headlen, ihs, contig_wqebbs_room;
u16 ds_cnt, ds_cnt_inl = 0;
- u16 headlen, ihs, frag_pi;
u8 num_wqebbs, opcode;
u32 num_bytes;
int num_dma;
@@ -386,9 +385,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
- if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
- mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
}
@@ -636,7 +635,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 headlen, ihs, pi, frag_pi;
+ u16 headlen, ihs, pi, contig_wqebbs_room;
u16 ds_cnt, ds_cnt_inl = 0;
u8 num_wqebbs, opcode;
u32 num_bytes;
@@ -672,13 +671,14 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
- if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
}
- mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
+ mlx5i_sq_fetch_wqe(sq, &wqe, pi);
/* fill wqe */
wi = &sq->db.wqe_info[pi];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 48864f4988a4..c1e1a16a9b07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -273,7 +273,7 @@ static void eq_pf_process(struct mlx5_eq *eq)
case MLX5_PFAULT_SUBTYPE_WQE:
/* WQE based event */
pfault->type =
- be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
+ (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
pfault->token =
be32_to_cpu(pf_eqe->wqe.token);
pfault->wqe.wq_num =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2b252cde5cc2..d004957328f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -263,7 +263,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
esw_debug(dev, "Create FDB log_max_size(%d)\n",
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ root_ns = mlx5_get_fdb_sub_ns(dev, 0);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
return -EOPNOTSUPP;
@@ -1198,7 +1198,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter = counter;
+ drop_ctr_dst.counter_id = mlx5_fc_id(counter);
dst = &drop_ctr_dst;
dest_num++;
}
@@ -1285,7 +1285,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter = counter;
+ drop_ctr_dst.counter_id = mlx5_fc_id(counter);
dst = &drop_ctr_dst;
dest_num++;
}
@@ -1746,7 +1746,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->enabled_vports = 0;
esw->mode = SRIOV_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
- if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
@@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
u32 max_guarantee = 0;
int i;
- for (i = 0; i <= esw->total_vports; i++) {
+ for (i = 0; i < esw->total_vports; i++) {
evport = &esw->vports[i];
if (!evport->enabled || evport->info.min_rate < max_guarantee)
continue;
@@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int err;
int i;
- for (i = 0; i <= esw->total_vports; i++) {
+ for (i = 0; i < esw->total_vports; i++) {
evport = &esw->vports[i];
if (!evport->enabled)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index c17bfcab517c..aaafc9f17115 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -59,6 +59,10 @@
#define mlx5_esw_has_fwd_fdb(dev) \
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
+#define FDB_MAX_CHAIN 3
+#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
+#define FDB_MAX_PRIO 16
+
struct vport_ingress {
struct mlx5_flow_table *acl;
struct mlx5_flow_group *allow_untagged_spoofchk_grp;
@@ -120,6 +124,13 @@ struct mlx5_vport {
u16 enabled_events;
};
+enum offloads_fdb_flags {
+ ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
+};
+
+extern const unsigned int ESW_POOLS[4];
+
+#define PRIO_LEVELS 2
struct mlx5_eswitch_fdb {
union {
struct legacy_fdb {
@@ -130,16 +141,24 @@ struct mlx5_eswitch_fdb {
} legacy;
struct offloads_fdb {
- struct mlx5_flow_table *fast_fdb;
- struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle *miss_rule_uni;
struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount;
+
+ struct {
+ struct mlx5_flow_table *fdb;
+ u32 num_rules;
+ } fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS];
+ /* Protects fdb_prio table */
+ struct mutex fdb_prio_lock;
+
+ int fdb_left[ARRAY_SIZE(ESW_POOLS)];
} offloads;
};
+ u32 flags;
};
struct mlx5_esw_offload {
@@ -181,6 +200,7 @@ struct mlx5_eswitch {
struct mlx5_esw_offload offloads;
int mode;
+ int nvports;
};
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
@@ -228,9 +248,23 @@ void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr);
+void
+mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *attr);
+
+bool
+mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw);
+
+u16
+mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw);
+
+u32
+mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw);
struct mlx5_flow_handle *
-mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
+ struct mlx5_flow_destination *dest);
enum {
SET_VLAN_STRIP = BIT(0),
@@ -265,15 +299,22 @@ struct mlx5_esw_flow_attr {
u32 encap_id;
u32 mod_hdr_id;
u8 match_level;
+ struct mlx5_fc *counter;
+ u32 chain;
+ u16 prio;
+ u32 dest_chain;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
-int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
+int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
+ struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
-int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
+ struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
@@ -314,6 +355,11 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {}
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
+
+#define FDB_MAX_CHAIN 1
+#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
+#define FDB_MAX_PRIO 1
+
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 3028e8d90920..9eac137790f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -37,33 +37,59 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "en.h"
+#include "fs_core.h"
enum {
FDB_FAST_PATH = 0,
FDB_SLOW_PATH
};
+#define fdb_prio_table(esw, chain, prio, level) \
+ (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
+
+static struct mlx5_flow_table *
+esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
+static void
+esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
+
+bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
+{
+ return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
+}
+
+u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
+{
+ if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
+ return FDB_MAX_CHAIN;
+
+ return 0;
+}
+
+u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
+{
+ if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
+ return FDB_MAX_PRIO;
+
+ return 1;
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
- struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_table *ft = NULL;
- struct mlx5_fc *counter = NULL;
+ struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
+ bool mirror = !!(attr->mirror_count);
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *fdb;
int j, i = 0;
void *misc;
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
- if (attr->mirror_count)
- ft = esw->fdb_table.offloads.fwd_fdb;
- else
- ft = esw->fdb_table.offloads.fast_fdb;
-
flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
@@ -81,23 +107,33 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- for (j = attr->mirror_count; j < attr->out_count; j++) {
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = attr->out_rep[j]->vport;
- dest[i].vport.vhca_id =
- MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
- dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
+ if (attr->dest_chain) {
+ struct mlx5_flow_table *ft;
+
+ ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
+ if (IS_ERR(ft)) {
+ rule = ERR_CAST(ft);
+ goto err_create_goto_table;
+ }
+
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = ft;
i++;
+ } else {
+ for (j = attr->mirror_count; j < attr->out_count; j++) {
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest[i].vport.num = attr->out_rep[j]->vport;
+ dest[i].vport.vhca_id =
+ MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
+ dest[i].vport.vhca_id_valid =
+ !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
+ i++;
+ }
}
}
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(esw->dev, true);
- if (IS_ERR(counter)) {
- rule = ERR_CAST(counter);
- goto err_counter_alloc;
- }
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[i].counter = counter;
+ dest[i].counter_id = mlx5_fc_id(attr->counter);
i++;
}
@@ -127,10 +163,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id;
- if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
- flow_act.encap_id = attr->encap_id;
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
+ flow_act.reformat_id = attr->encap_id;
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
+ fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!mirror);
+ if (IS_ERR(fdb)) {
+ rule = ERR_CAST(fdb);
+ goto err_esw_get;
+ }
+
+ rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
if (IS_ERR(rule))
goto err_add_rule;
else
@@ -139,8 +181,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
err_add_rule:
- mlx5_fc_destroy(esw->dev, counter);
-err_counter_alloc:
+ esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror);
+err_esw_get:
+ if (attr->dest_chain)
+ esw_put_prio_table(esw, attr->dest_chain, 1, 0);
+err_create_goto_table:
return rule;
}
@@ -150,11 +195,25 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
- struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
+ struct mlx5_flow_table *fast_fdb;
+ struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
void *misc;
int i;
+ fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
+ if (IS_ERR(fast_fdb)) {
+ rule = ERR_CAST(fast_fdb);
+ goto err_get_fast;
+ }
+
+ fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
+ if (IS_ERR(fwd_fdb)) {
+ rule = ERR_CAST(fwd_fdb);
+ goto err_get_fwd;
+ }
+
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < attr->mirror_count; i++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
@@ -164,7 +223,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
+ dest[i].ft = fwd_fdb,
i++;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
@@ -187,25 +246,57 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS;
- rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
+ rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
- if (!IS_ERR(rule))
- esw->offloads.num_flows++;
+ if (IS_ERR(rule))
+ goto add_err;
+ esw->offloads.num_flows++;
+
+ return rule;
+add_err:
+ esw_put_prio_table(esw, attr->chain, attr->prio, 1);
+err_get_fwd:
+ esw_put_prio_table(esw, attr->chain, attr->prio, 0);
+err_get_fast:
return rule;
}
+static void
+__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *attr,
+ bool fwd_rule)
+{
+ bool mirror = (attr->mirror_count > 0);
+
+ mlx5_del_flow_rules(rule);
+ esw->offloads.num_flows--;
+
+ if (fwd_rule) {
+ esw_put_prio_table(esw, attr->chain, attr->prio, 1);
+ esw_put_prio_table(esw, attr->chain, attr->prio, 0);
+ } else {
+ esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror);
+ if (attr->dest_chain)
+ esw_put_prio_table(esw, attr->dest_chain, 1, 0);
+ }
+}
+
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr)
{
- struct mlx5_fc *counter = NULL;
+ __mlx5_eswitch_del_rule(esw, rule, attr, false);
+}
- counter = mlx5_flow_rule_counter(rule);
- mlx5_del_flow_rules(rule);
- mlx5_fc_destroy(esw->dev, counter);
- esw->offloads.num_flows--;
+void
+mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *attr)
+{
+ __mlx5_eswitch_del_rule(esw, rule, attr, true);
}
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
@@ -294,7 +385,8 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
+ !attr->dest_chain);
err = esw_add_vlan_action_check(attr, push, pop, fwd);
if (err)
@@ -501,74 +593,170 @@ out:
#define ESW_OFFLOADS_NUM_GROUPS 4
-static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
+/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
+ * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
+ * for each flow table pool. We can allocate up to 16M of each pool,
+ * and we keep track of how much we used via put/get_sz_to_pool.
+ * Firmware doesn't report any of this for now.
+ * ESW_POOL is expected to be sorted from large to small
+ */
+#define ESW_SIZE (16 * 1024 * 1024)
+const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
+ 64 * 1024, 4 * 1024 };
+
+static int
+get_sz_from_pool(struct mlx5_eswitch *esw)
+{
+ int sz = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
+ if (esw->fdb_table.offloads.fdb_left[i]) {
+ --esw->fdb_table.offloads.fdb_left[i];
+ sz = ESW_POOLS[i];
+ break;
+ }
+ }
+
+ return sz;
+}
+
+static void
+put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
+ if (sz >= ESW_POOLS[i]) {
+ ++esw->fdb_table.offloads.fdb_left[i];
+ break;
+ }
+ }
+}
+
+static struct mlx5_flow_table *
+create_next_size_table(struct mlx5_eswitch *esw,
+ struct mlx5_flow_namespace *ns,
+ u16 table_prio,
+ int level,
+ u32 flags)
+{
+ struct mlx5_flow_table *fdb;
+ int sz;
+
+ sz = get_sz_from_pool(esw);
+ if (!sz)
+ return ERR_PTR(-ENOSPC);
+
+ fdb = mlx5_create_auto_grouped_flow_table(ns,
+ table_prio,
+ sz,
+ ESW_OFFLOADS_NUM_GROUPS,
+ level,
+ flags);
+ if (IS_ERR(fdb)) {
+ esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
+ (int)PTR_ERR(fdb), table_prio, level, sz);
+ put_sz_to_pool(esw, sz);
+ }
+
+ return fdb;
+}
+
+static struct mlx5_flow_table *
+esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
{
struct mlx5_core_dev *dev = esw->dev;
- struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
- int esw_size, err = 0;
+ struct mlx5_flow_namespace *ns;
+ int table_prio, l = 0;
u32 flags = 0;
- u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
- if (!root_ns) {
- esw_warn(dev, "Failed to get FDB flow namespace\n");
- err = -EOPNOTSUPP;
- goto out_namespace;
- }
+ if (chain == FDB_SLOW_PATH_CHAIN)
+ return esw->fdb_table.offloads.slow_fdb;
- esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
- max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
+ mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
- esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
- 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+ fdb = fdb_prio_table(esw, chain, prio, level).fdb;
+ if (fdb) {
+ /* take ref on earlier levels as well */
+ while (level >= 0)
+ fdb_prio_table(esw, chain, prio, level--).num_rules++;
+ mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
+ return fdb;
+ }
- if (mlx5_esw_has_fwd_fdb(dev))
- esw_size >>= 1;
+ ns = mlx5_get_fdb_sub_ns(dev, chain);
+ if (!ns) {
+ esw_warn(dev, "Failed to get FDB sub namespace\n");
+ mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
- flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
+ flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
- esw_size,
- ESW_OFFLOADS_NUM_GROUPS, 0,
- flags);
- if (IS_ERR(fdb)) {
- err = PTR_ERR(fdb);
- esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
- goto out_namespace;
- }
- esw->fdb_table.offloads.fast_fdb = fdb;
+ table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
+
+ /* create earlier levels for correct fs_core lookup when
+ * connecting tables
+ */
+ for (l = 0; l <= level; l++) {
+ if (fdb_prio_table(esw, chain, prio, l).fdb) {
+ fdb_prio_table(esw, chain, prio, l).num_rules++;
+ continue;
+ }
- if (!mlx5_esw_has_fwd_fdb(dev))
- goto out_namespace;
+ fdb = create_next_size_table(esw, ns, table_prio, l, flags);
+ if (IS_ERR(fdb)) {
+ l--;
+ goto err_create_fdb;
+ }
- fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
- esw_size,
- ESW_OFFLOADS_NUM_GROUPS, 1,
- flags);
- if (IS_ERR(fdb)) {
- err = PTR_ERR(fdb);
- esw_warn(dev, "Failed to create fwd table err %d\n", err);
- goto out_ft;
+ fdb_prio_table(esw, chain, prio, l).fdb = fdb;
+ fdb_prio_table(esw, chain, prio, l).num_rules = 1;
}
- esw->fdb_table.offloads.fwd_fdb = fdb;
- return err;
+ mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
+ return fdb;
-out_ft:
- mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
-out_namespace:
- return err;
+err_create_fdb:
+ mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
+ if (l >= 0)
+ esw_put_prio_table(esw, chain, prio, l);
+
+ return fdb;
+}
+
+static void
+esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
+{
+ int l;
+
+ if (chain == FDB_SLOW_PATH_CHAIN)
+ return;
+
+ mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
+
+ for (l = level; l >= 0; l--) {
+ if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
+ continue;
+
+ put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
+ mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
+ fdb_prio_table(esw, chain, prio, l).fdb = NULL;
+ }
+
+ mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
}
-static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
+static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
{
- if (mlx5_esw_has_fwd_fdb(esw->dev))
- mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb);
- mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
+ /* If lazy creation isn't supported, deref the fast path tables */
+ if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
+ esw_put_prio_table(esw, 0, 1, 1);
+ esw_put_prio_table(esw, 0, 1, 0);
+ }
}
#define MAX_PF_SQ 256
@@ -579,12 +767,13 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
+ u32 *flow_group_in, max_flow_counter;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
- int table_size, ix, err = 0;
+ int table_size, ix, err = 0, i;
struct mlx5_flow_group *g;
+ u32 flags = 0, fdb_max;
void *match_criteria;
- u32 *flow_group_in;
u8 *dmac;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
@@ -599,12 +788,29 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
goto ns_err;
}
- err = esw_create_offloads_fast_fdb_table(esw);
- if (err)
- goto fast_fdb_err;
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
+
+ esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
+ max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
+ fdb_max);
+
+ for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
+ esw->fdb_table.offloads.fdb_left[i] =
+ ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
+ /* create the slow path fdb with encap set, so further table instances
+ * can be created at run time while VFs are probed if the FW allows that.
+ */
+ if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
+ flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+
+ ft_attr.flags = flags;
ft_attr.max_fte = table_size;
ft_attr.prio = FDB_SLOW_PATH;
@@ -616,6 +822,18 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
}
esw->fdb_table.offloads.slow_fdb = fdb;
+ /* If lazy creation isn't supported, open the fast path tables now */
+ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
+ esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
+ esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
+ esw_get_prio_table(esw, 0, 1, 0);
+ esw_get_prio_table(esw, 0, 1, 1);
+ } else {
+ esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
+ esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ }
+
/* create send-to-vport group */
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
@@ -663,6 +881,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
if (err)
goto miss_rule_err;
+ esw->nvports = nvports;
kvfree(flow_group_in);
return 0;
@@ -671,10 +890,9 @@ miss_rule_err:
miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
+ esw_destroy_offloads_fast_fdb_tables(esw);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
- esw_destroy_offloads_fast_fdb_table(esw);
-fast_fdb_err:
ns_err:
kvfree(flow_group_in);
return err;
@@ -682,7 +900,7 @@ ns_err:
static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
- if (!esw->fdb_table.offloads.fast_fdb)
+ if (!esw->fdb_table.offloads.slow_fdb)
return;
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
@@ -692,7 +910,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
- esw_destroy_offloads_fast_fdb_table(esw);
+ esw_destroy_offloads_fast_fdb_tables(esw);
}
static int esw_create_offloads_table(struct mlx5_eswitch *esw)
@@ -775,10 +993,10 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
}
struct mlx5_flow_handle *
-mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
+ struct mlx5_flow_destination *dest)
{
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
@@ -796,12 +1014,10 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dest.tir_num = tirn;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
- &flow_act, &dest, 1);
+ &flow_act, dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out;
@@ -812,29 +1028,35 @@ out:
return flow_rule;
}
-static int esw_offloads_start(struct mlx5_eswitch *esw)
+static int esw_offloads_start(struct mlx5_eswitch *esw,
+ struct netlink_ext_ack *extack)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
if (esw->mode != SRIOV_LEGACY) {
- esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't set offloads mode, SRIOV legacy not enabled");
return -EINVAL;
}
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
if (err) {
- esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed setting eswitch to offloads");
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
- if (err1)
- esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
+ if (err1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed setting eswitch back to legacy");
+ }
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
num_vfs,
&esw->offloads.inline_mode)) {
esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
- esw_warn(esw->dev, "Inline mode is different between vports\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Inline mode is different between vports");
}
}
return err;
@@ -945,6 +1167,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
+ mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
+
err = esw_create_offloads_fdb_tables(esw, nvports);
if (err)
return err;
@@ -975,17 +1199,20 @@ create_ft_err:
return err;
}
-static int esw_offloads_stop(struct mlx5_eswitch *esw)
+static int esw_offloads_stop(struct mlx5_eswitch *esw,
+ struct netlink_ext_ack *extack)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err) {
- esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
- if (err1)
- esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
+ if (err1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed setting eswitch back to offloads");
+ }
}
/* enable back PF RoCE */
@@ -1094,7 +1321,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
return 0;
}
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
u16 cur_mlx5_mode, mlx5_mode = 0;
@@ -1113,9 +1341,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
return 0;
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
- return esw_offloads_start(dev->priv.eswitch);
+ return esw_offloads_start(dev->priv.eswitch, extack);
else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
- return esw_offloads_stop(dev->priv.eswitch);
+ return esw_offloads_stop(dev->priv.eswitch, extack);
else
return -EINVAL;
}
@@ -1132,7 +1360,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
}
-int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
+int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -1149,14 +1378,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
return 0;
/* fall through */
case MLX5_CAP_INLINE_MODE_L2:
- esw_warn(dev, "Inline mode can't be set\n");
+ NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
return -EOPNOTSUPP;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
break;
}
if (esw->offloads.num_flows > 0) {
- esw_warn(dev, "Can't set inline mode when flows are configured\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't set inline mode when flows are configured");
return -EOPNOTSUPP;
}
@@ -1167,8 +1397,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
for (vport = 1; vport < esw->enabled_vports; vport++) {
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
if (err) {
- esw_warn(dev, "Failed to set min inline on vport %d\n",
- vport);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set min inline on vport");
goto revert_inline_mode;
}
}
@@ -1234,7 +1464,8 @@ out:
return 0;
}
-int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
+int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -1245,7 +1476,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
return err;
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
- (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
+ (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
return -EOPNOTSUPP;
@@ -1261,19 +1492,24 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
return 0;
if (esw->offloads.num_flows > 0) {
- esw_warn(dev, "Can't set encapsulation when flows are configured\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't set encapsulation when flows are configured");
return -EOPNOTSUPP;
}
- esw_destroy_offloads_fast_fdb_table(esw);
+ esw_destroy_offloads_fdb_tables(esw);
esw->offloads.encap = encap;
- err = esw_create_offloads_fast_fdb_table(esw);
+
+ err = esw_create_offloads_fdb_tables(esw, esw->nvports);
+
if (err) {
- esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed re-creating fast FDB table");
esw->offloads.encap = !encap;
- (void)esw_create_offloads_fast_fdb_table(esw);
+ (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
}
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 5645a4facad2..515e3d6de051 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -245,7 +245,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
return ERR_PTR(res);
}
- /* Context will be freed by wait func after completion */
+ /* Context should be freed by the caller after completion. */
return context;
}
@@ -418,10 +418,8 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
cmd.flags = htonl(flags);
context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
- if (IS_ERR(context)) {
- err = PTR_ERR(context);
- goto out;
- }
+ if (IS_ERR(context))
+ return PTR_ERR(context);
err = mlx5_fpga_ipsec_cmd_wait(context);
if (err)
@@ -435,6 +433,7 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
}
out:
+ kfree(context);
return err;
}
@@ -650,7 +649,7 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
(match_criteria_enable &
~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
(flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
- flow_act->has_flow_tag)
+ (flow_act->flags & FLOW_ACT_HAS_TAG))
return false;
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 8e01f818021b..08a891f9aade 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -152,7 +152,8 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
struct mlx5_flow_table *next_ft,
unsigned int *table_id, u32 flags)
{
- int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
+ int en_encap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
+ int en_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
int err;
@@ -169,9 +170,9 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
}
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
- en_encap_decap);
- MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en,
- en_encap_decap);
+ en_decap);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
+ en_encap);
switch (op_mod) {
case FS_FT_OP_MOD_NORMAL:
@@ -343,7 +344,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
- MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id);
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ fte->action.reformat_id);
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_id);
@@ -417,7 +419,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
continue;
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
- dst->dest_attr.counter->id);
+ dst->dest_attr.counter_id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
list_size++;
}
@@ -594,62 +596,78 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
*bytes = MLX5_GET64(traffic_counter, stats, octets);
}
-int mlx5_encap_alloc(struct mlx5_core_dev *dev,
- int header_type,
- size_t size,
- void *encap_header,
- u32 *encap_id)
+int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type namespace,
+ u32 *packet_reformat_id)
{
- int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
- u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
- void *encap_header_in;
- void *header;
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
+ void *packet_reformat_context_in;
+ int max_encap_size;
+ void *reformat;
int inlen;
int err;
u32 *in;
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB)
+ max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
+ else
+ max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
+
if (size > max_encap_size) {
mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
size, max_encap_size);
return -EINVAL;
}
- in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
+ in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
GFP_KERNEL);
if (!in)
return -ENOMEM;
- encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
- header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
- inlen = header - (void *)in + size;
+ packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
+ in, packet_reformat_context);
+ reformat = MLX5_ADDR_OF(packet_reformat_context_in,
+ packet_reformat_context_in,
+ reformat_data);
+ inlen = reformat - (void *)in + size;
memset(in, 0, inlen);
- MLX5_SET(alloc_encap_header_in, in, opcode,
- MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
- MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
- MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
- memcpy(header, encap_header, size);
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
+ reformat_data_size, size);
+ MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
+ reformat_type, reformat_type);
+ memcpy(reformat, reformat_data, size);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
- *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
+ *packet_reformat_id = MLX5_GET(alloc_packet_reformat_context_out,
+ out, packet_reformat_id);
kfree(in);
return err;
}
+EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
-void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
+void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
+ u32 packet_reformat_id)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
- u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
memset(in, 0, sizeof(in));
- MLX5_SET(dealloc_encap_header_in, in, opcode,
- MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
- MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
+ MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
+ packet_reformat_id);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
u8 namespace, u8 num_actions,
@@ -667,9 +685,14 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
table_type = FS_FT_FDB;
break;
case MLX5_FLOW_NAMESPACE_KERNEL:
+ case MLX5_FLOW_NAMESPACE_BYPASS:
max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_RX;
break;
+ case MLX5_FLOW_NAMESPACE_EGRESS:
+ max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
+ table_type = FS_FT_NIC_TX;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -702,6 +725,7 @@ int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
kfree(in);
return err;
}
+EXPORT_SYMBOL(mlx5_modify_header_alloc);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
{
@@ -716,6 +740,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+EXPORT_SYMBOL(mlx5_modify_header_dealloc);
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
@@ -760,8 +785,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_FDB:
case FS_FT_SNIFFER_RX:
case FS_FT_SNIFFER_TX:
- return mlx5_fs_cmd_get_fw_cmds();
case FS_FT_NIC_TX:
+ return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 37d114c668b7..9d73eb955f75 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -40,6 +40,7 @@
#include "diag/fs_tracepoint.h"
#include "accel/ipsec.h"
#include "fpga/ipsec.h"
+#include "eswitch.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
@@ -76,6 +77,14 @@
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
+#define FS_CHAINING_CAPS_EGRESS \
+ FS_REQUIRED_CAPS( \
+ FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
+ FS_CAP(flow_table_properties_nic_transmit.modify_root), \
+ FS_CAP(flow_table_properties_nic_transmit \
+ .identified_miss_table_mode), \
+ FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
+
#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
@@ -151,6 +160,17 @@ static struct init_tree_node {
}
};
+static struct init_tree_node egress_root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+ .ar_size = 1,
+ .children = (struct init_tree_node[]) {
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
+ FS_CHAINING_CAPS_EGRESS,
+ ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ }
+};
+
enum fs_i_lock_class {
FS_LOCK_GRANDPARENT,
FS_LOCK_PARENT,
@@ -694,7 +714,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
struct fs_node *iter = list_entry(start, struct fs_node, list);
struct mlx5_flow_table *ft = NULL;
- if (!root)
+ if (!root || root->type == FS_TYPE_PRIO_CHAINS)
return NULL;
list_for_each_advance_continue(iter, &root->children, reverse) {
@@ -1388,7 +1408,7 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
return false;
if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_DECAP |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
@@ -1408,7 +1428,7 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act
return -EEXIST;
}
- if (flow_act->has_flow_tag &&
+ if ((flow_act->flags & FLOW_ACT_HAS_TAG) &&
fte->action.flow_tag != flow_act->flow_tag) {
mlx5_core_warn(get_dev(&fte->node),
"FTE flow tag %u already exists with different flow tag %u\n",
@@ -1455,29 +1475,8 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
return handle;
}
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
+static bool counter_is_valid(u32 action)
{
- struct mlx5_flow_rule *dst;
- struct fs_fte *fte;
-
- fs_get_obj(fte, handle->rule[0]->node.parent);
-
- fs_for_each_dst(dst, fte) {
- if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
- return dst->dest_attr.counter;
- }
-
- return NULL;
-}
-
-static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
-{
- if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
- return !counter;
-
- if (!counter)
- return false;
-
return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
}
@@ -1487,7 +1486,7 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
struct mlx5_flow_table *ft)
{
if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
- return counter_is_valid(dest->counter, action);
+ return counter_is_valid(action);
if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return true;
@@ -1629,6 +1628,8 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
search_again_locked:
version = matched_fgs_get_version(match_head);
+ if (flow_act->flags & FLOW_ACT_NO_APPEND)
+ goto skip_search;
/* Try to find a fg that already contains a matching fte */
list_for_each_entry(iter, match_head, list) {
struct fs_fte *fte_tmp;
@@ -1645,6 +1646,11 @@ search_again_locked:
return rule;
}
+skip_search:
+ /* No group with matching fte found, or we skipped the search.
+ * Try to add a new fte to any matching fg.
+ */
+
/* Check the ft version, for case that new flow group
* was added while the fgs weren't locked
*/
@@ -1975,12 +1981,24 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
fg->id);
}
+struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
+ int n)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ if (!steering || !steering->fdb_sub_ns)
+ return NULL;
+
+ return steering->fdb_sub_ns[n];
+}
+EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
+
struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
struct mlx5_flow_root_namespace *root_ns;
- int prio;
+ int prio = 0;
struct fs_prio *fs_prio;
struct mlx5_flow_namespace *ns;
@@ -1988,40 +2006,29 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return NULL;
switch (type) {
- case MLX5_FLOW_NAMESPACE_BYPASS:
- case MLX5_FLOW_NAMESPACE_LAG:
- case MLX5_FLOW_NAMESPACE_OFFLOADS:
- case MLX5_FLOW_NAMESPACE_ETHTOOL:
- case MLX5_FLOW_NAMESPACE_KERNEL:
- case MLX5_FLOW_NAMESPACE_LEFTOVERS:
- case MLX5_FLOW_NAMESPACE_ANCHOR:
- prio = type;
- break;
case MLX5_FLOW_NAMESPACE_FDB:
if (steering->fdb_root_ns)
return &steering->fdb_root_ns->ns;
- else
- return NULL;
+ return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
- else
- return NULL;
+ return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
if (steering->sniffer_tx_root_ns)
return &steering->sniffer_tx_root_ns->ns;
- else
- return NULL;
- case MLX5_FLOW_NAMESPACE_EGRESS:
- if (steering->egress_root_ns)
- return &steering->egress_root_ns->ns;
- else
- return NULL;
- default:
return NULL;
+ default:
+ break;
+ }
+
+ if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
+ root_ns = steering->egress_root_ns;
+ } else { /* Must be NIC RX */
+ root_ns = steering->root_ns;
+ prio = type;
}
- root_ns = steering->root_ns;
if (!root_ns)
return NULL;
@@ -2064,8 +2071,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
}
}
-static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
- unsigned int prio, int num_levels)
+static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
+ unsigned int prio,
+ int num_levels,
+ enum fs_node_type type)
{
struct fs_prio *fs_prio;
@@ -2073,7 +2082,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
if (!fs_prio)
return ERR_PTR(-ENOMEM);
- fs_prio->node.type = FS_TYPE_PRIO;
+ fs_prio->node.type = type;
tree_init_node(&fs_prio->node, NULL, del_sw_prio);
tree_add_node(&fs_prio->node, &ns->node);
fs_prio->num_levels = num_levels;
@@ -2083,6 +2092,19 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
return fs_prio;
}
+static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
+ unsigned int prio,
+ int num_levels)
+{
+ return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
+}
+
+static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
+ unsigned int prio, int num_levels)
+{
+ return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
+}
+
static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
*ns)
{
@@ -2387,6 +2409,9 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
cleanup_egress_acls_root_ns(dev);
cleanup_ingress_acls_root_ns(dev);
cleanup_root_ns(steering->fdb_root_ns);
+ steering->fdb_root_ns = NULL;
+ kfree(steering->fdb_sub_ns);
+ steering->fdb_sub_ns = NULL;
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
@@ -2432,27 +2457,64 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
- struct fs_prio *prio;
+ struct mlx5_flow_namespace *ns;
+ struct fs_prio *maj_prio;
+ struct fs_prio *min_prio;
+ int levels;
+ int chain;
+ int prio;
+ int err;
steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
if (!steering->fdb_root_ns)
return -ENOMEM;
- prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2);
- if (IS_ERR(prio))
+ steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) *
+ (FDB_MAX_CHAIN + 1), GFP_KERNEL);
+ if (!steering->fdb_sub_ns)
+ return -ENOMEM;
+
+ levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
+ maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, 0,
+ levels);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
goto out_err;
+ }
+
+ for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
+ ns = fs_create_namespace(maj_prio);
+ if (IS_ERR(ns)) {
+ err = PTR_ERR(ns);
+ goto out_err;
+ }
+
+ for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) {
+ min_prio = fs_create_prio(ns, prio, 2);
+ if (IS_ERR(min_prio)) {
+ err = PTR_ERR(min_prio);
+ goto out_err;
+ }
+ }
+
+ steering->fdb_sub_ns[chain] = ns;
+ }
- prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
- if (IS_ERR(prio))
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
goto out_err;
+ }
set_prio_attrs(steering->fdb_root_ns);
return 0;
out_err:
cleanup_root_ns(steering->fdb_root_ns);
+ kfree(steering->fdb_sub_ns);
+ steering->fdb_sub_ns = NULL;
steering->fdb_root_ns = NULL;
- return PTR_ERR(prio);
+ return err;
}
static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
@@ -2537,16 +2599,23 @@ cleanup_root_ns:
static int init_egress_root_ns(struct mlx5_flow_steering *steering)
{
- struct fs_prio *prio;
+ int err;
steering->egress_root_ns = create_root_ns(steering,
FS_FT_NIC_TX);
if (!steering->egress_root_ns)
return -ENOMEM;
- /* create 1 prio*/
- prio = fs_create_prio(&steering->egress_root_ns->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
+ err = init_root_tree(steering, &egress_root_fs,
+ &steering->egress_root_ns->ns.node);
+ if (err)
+ goto cleanup;
+ set_prio_attrs(steering->egress_root_ns);
+ return 0;
+cleanup:
+ cleanup_root_ns(steering->egress_root_ns);
+ steering->egress_root_ns = NULL;
+ return err;
}
int mlx5_init_fs(struct mlx5_core_dev *dev)
@@ -2614,7 +2683,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_IPSEC_DEV(dev)) {
+ if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 32070e5d993d..b51ad217da32 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -36,10 +36,23 @@
#include <linux/refcount.h>
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
+#include <linux/llist.h>
+
+/* FS_TYPE_PRIO_CHAINS is a PRIO that will have namespaces only,
+ * and those are in parallel to one another when going over them to connect
+ * a new flow table. Meaning the last flow table in a TYPE_PRIO prio in one
+ * parallel namespace will not automatically connect to the first flow table
+ * found in any prio in any next namespace, but skip the entire containing
+ * TYPE_PRIO_CHAINS prio.
+ *
+ * This is used to implement tc chains, each chain of prios is a different
+ * namespace inside a containing TYPE_PRIO_CHAINS prio.
+ */
enum fs_node_type {
FS_TYPE_NAMESPACE,
FS_TYPE_PRIO,
+ FS_TYPE_PRIO_CHAINS,
FS_TYPE_FLOW_TABLE,
FS_TYPE_FLOW_GROUP,
FS_TYPE_FLOW_ENTRY,
@@ -72,6 +85,7 @@ struct mlx5_flow_steering {
struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
+ struct mlx5_flow_namespace **fdb_sub_ns;
struct mlx5_flow_root_namespace **esw_egress_root_ns;
struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
@@ -138,8 +152,9 @@ struct mlx5_fc_cache {
};
struct mlx5_fc {
- struct rb_node node;
struct list_head list;
+ struct llist_node addlist;
+ struct llist_node dellist;
/* last{packets,bytes} members are used when calculating the delta since
* last reading
@@ -148,7 +163,6 @@ struct mlx5_fc {
u64 lastbytes;
u32 id;
- bool deleted;
bool aging;
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 58af6be13dfa..32accd6b041b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -52,11 +52,13 @@
* access to counter list:
* - create (user context)
* - mlx5_fc_create() only adds to an addlist to be used by
- * mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
+ * mlx5_fc_stats_query_work(). addlist is a lockless single linked list
+ * that doesn't require any additional synchronization when adding single
+ * node.
* - spawn thread to do the actual destroy
*
* - destroy (user context)
- * - mark a counter as deleted
+ * - add a counter to lockless dellist
* - spawn thread to do the actual del
*
* - dump (user context)
@@ -71,36 +73,55 @@
* elapsed, the thread will actually query the hardware.
*/
-static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
+static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
+ u32 id)
{
- struct rb_node **new = &root->rb_node;
- struct rb_node *parent = NULL;
-
- while (*new) {
- struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node);
- int result = counter->id - this->id;
-
- parent = *new;
- if (result < 0)
- new = &((*new)->rb_left);
- else
- new = &((*new)->rb_right);
- }
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ unsigned long next_id = (unsigned long)id + 1;
+ struct mlx5_fc *counter;
+
+ rcu_read_lock();
+ /* skip counters that are in idr, but not yet in counters list */
+ while ((counter = idr_get_next_ul(&fc_stats->counters_idr,
+ &next_id)) != NULL &&
+ list_empty(&counter->list))
+ next_id++;
+ rcu_read_unlock();
+
+ return counter ? &counter->list : &fc_stats->counters;
+}
+
+static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
+ struct mlx5_fc *counter)
+{
+ struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
+
+ list_add_tail(&counter->list, next);
+}
+
+static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
+ struct mlx5_fc *counter)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- /* Add new node and rebalance tree. */
- rb_link_node(&counter->node, parent, new);
- rb_insert_color(&counter->node, root);
+ list_del(&counter->list);
+
+ spin_lock(&fc_stats->counters_idr_lock);
+ WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
+ spin_unlock(&fc_stats->counters_idr_lock);
}
-/* The function returns the last node that was queried so the caller
+/* The function returns the last counter that was queried so the caller
* function can continue calling it till all counters are queried.
*/
-static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
+static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
struct mlx5_fc *first,
u32 last_id)
{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc *counter = NULL;
struct mlx5_cmd_fc_bulk *b;
- struct rb_node *node = NULL;
+ bool more = false;
u32 afirst_id;
int num;
int err;
@@ -130,14 +151,16 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
goto out;
}
- for (node = &first->node; node; node = rb_next(node)) {
- struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
+ counter = first;
+ list_for_each_entry_from(counter, &fc_stats->counters, list) {
struct mlx5_fc_cache *c = &counter->cache;
u64 packets;
u64 bytes;
- if (counter->id > last_id)
+ if (counter->id > last_id) {
+ more = true;
break;
+ }
mlx5_cmd_fc_bulk_get(dev, b,
counter->id, &packets, &bytes);
@@ -153,7 +176,14 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
out:
mlx5_cmd_fc_bulk_free(b);
- return node;
+ return more ? counter : NULL;
+}
+
+static void mlx5_free_fc(struct mlx5_core_dev *dev,
+ struct mlx5_fc *counter)
+{
+ mlx5_cmd_fc_free(dev, counter->id);
+ kfree(counter);
}
static void mlx5_fc_stats_work(struct work_struct *work)
@@ -161,52 +191,36 @@ static void mlx5_fc_stats_work(struct work_struct *work)
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
priv.fc_stats.work.work);
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ /* Take dellist first to ensure that counters cannot be deleted before
+ * they are inserted.
+ */
+ struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
+ struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
+ struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
unsigned long now = jiffies;
- struct mlx5_fc *counter = NULL;
- struct mlx5_fc *last = NULL;
- struct rb_node *node;
- LIST_HEAD(tmplist);
- spin_lock(&fc_stats->addlist_lock);
-
- list_splice_tail_init(&fc_stats->addlist, &tmplist);
-
- if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
+ if (addlist || !list_empty(&fc_stats->counters))
queue_delayed_work(fc_stats->wq, &fc_stats->work,
fc_stats->sampling_interval);
- spin_unlock(&fc_stats->addlist_lock);
-
- list_for_each_entry(counter, &tmplist, list)
- mlx5_fc_stats_insert(&fc_stats->counters, counter);
+ llist_for_each_entry(counter, addlist, addlist)
+ mlx5_fc_stats_insert(dev, counter);
- node = rb_first(&fc_stats->counters);
- while (node) {
- counter = rb_entry(node, struct mlx5_fc, node);
+ llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
+ mlx5_fc_stats_remove(dev, counter);
- node = rb_next(node);
-
- if (counter->deleted) {
- rb_erase(&counter->node, &fc_stats->counters);
-
- mlx5_cmd_fc_free(dev, counter->id);
-
- kfree(counter);
- continue;
- }
-
- last = counter;
+ mlx5_free_fc(dev, counter);
}
- if (time_before(now, fc_stats->next_query) || !last)
+ if (time_before(now, fc_stats->next_query) ||
+ list_empty(&fc_stats->counters))
return;
+ last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
- node = rb_first(&fc_stats->counters);
- while (node) {
- counter = rb_entry(node, struct mlx5_fc, node);
-
- node = mlx5_fc_stats_query(dev, counter, last->id);
- }
+ counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
+ list);
+ while (counter)
+ counter = mlx5_fc_stats_query(dev, counter, last->id);
fc_stats->next_query = now + fc_stats->sampling_interval;
}
@@ -220,24 +234,38 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
counter = kzalloc(sizeof(*counter), GFP_KERNEL);
if (!counter)
return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&counter->list);
err = mlx5_cmd_fc_alloc(dev, &counter->id);
if (err)
goto err_out;
if (aging) {
+ u32 id = counter->id;
+
counter->cache.lastuse = jiffies;
counter->aging = true;
- spin_lock(&fc_stats->addlist_lock);
- list_add(&counter->list, &fc_stats->addlist);
- spin_unlock(&fc_stats->addlist_lock);
+ idr_preload(GFP_KERNEL);
+ spin_lock(&fc_stats->counters_idr_lock);
+
+ err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
+ GFP_NOWAIT);
+
+ spin_unlock(&fc_stats->counters_idr_lock);
+ idr_preload_end();
+ if (err)
+ goto err_out_alloc;
+
+ llist_add(&counter->addlist, &fc_stats->addlist);
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
}
return counter;
+err_out_alloc:
+ mlx5_cmd_fc_free(dev, counter->id);
err_out:
kfree(counter);
@@ -245,6 +273,12 @@ err_out:
}
EXPORT_SYMBOL(mlx5_fc_create);
+u32 mlx5_fc_id(struct mlx5_fc *counter)
+{
+ return counter->id;
+}
+EXPORT_SYMBOL(mlx5_fc_id);
+
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
@@ -253,13 +287,12 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
return;
if (counter->aging) {
- counter->deleted = true;
+ llist_add(&counter->dellist, &fc_stats->dellist);
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
return;
}
- mlx5_cmd_fc_free(dev, counter->id);
- kfree(counter);
+ mlx5_free_fc(dev, counter);
}
EXPORT_SYMBOL(mlx5_fc_destroy);
@@ -267,9 +300,11 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- fc_stats->counters = RB_ROOT;
- INIT_LIST_HEAD(&fc_stats->addlist);
- spin_lock_init(&fc_stats->addlist_lock);
+ spin_lock_init(&fc_stats->counters_idr_lock);
+ idr_init(&fc_stats->counters_idr);
+ INIT_LIST_HEAD(&fc_stats->counters);
+ init_llist_head(&fc_stats->addlist);
+ init_llist_head(&fc_stats->dellist);
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
@@ -284,34 +319,22 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct llist_node *tmplist;
struct mlx5_fc *counter;
struct mlx5_fc *tmp;
- struct rb_node *node;
cancel_delayed_work_sync(&dev->priv.fc_stats.work);
destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL;
- list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
- list_del(&counter->list);
-
- mlx5_cmd_fc_free(dev, counter->id);
+ idr_destroy(&fc_stats->counters_idr);
- kfree(counter);
- }
-
- node = rb_first(&fc_stats->counters);
- while (node) {
- counter = rb_entry(node, struct mlx5_fc, node);
-
- node = rb_next(node);
+ tmplist = llist_del_all(&fc_stats->addlist);
+ llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
+ mlx5_free_fc(dev, counter);
- rb_erase(&counter->node, &fc_stats->counters);
-
- mlx5_cmd_fc_free(dev, counter->id);
-
- kfree(counter);
- }
+ list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
+ mlx5_free_fc(dev, counter);
}
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 41ad24f0de2c..1ab6f7e3bec6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -250,7 +250,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
if (ret)
return ret;
- force_state = MLX5_GET(teardown_hca_out, out, force_state);
+ force_state = MLX5_GET(teardown_hca_out, out, state);
if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n");
return -EIO;
@@ -259,6 +259,54 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
return 0;
}
+#define MLX5_FAST_TEARDOWN_WAIT_MS 3000
+int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
+{
+ unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
+ u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
+ int state;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, fast_teardown)) {
+ mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
+ MLX5_SET(teardown_hca_in, in, profile,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ state = MLX5_GET(teardown_hca_out, out, state);
+ if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
+ mlx5_core_warn(dev, "teardown with fast mode failed\n");
+ return -EIO;
+ }
+
+ mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
+
+ /* Loop until device state turns to disable */
+ end = jiffies + msecs_to_jiffies(delay_ms);
+ do {
+ if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ break;
+
+ cond_resched();
+ } while (!time_after(jiffies, end));
+
+ if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
+ mlx5_get_nic_state(dev), delay_ms);
+ return -EIO;
+ }
+
+ return 0;
+}
+
enum mlxsw_reg_mcc_instruction {
MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9f39aeca863f..43118de8ee99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -59,22 +59,25 @@ enum {
};
enum {
- MLX5_NIC_IFC_FULL = 0,
- MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2,
- MLX5_NIC_IFC_INVALID = 3
-};
-
-enum {
MLX5_DROP_NEW_HEALTH_WORK,
MLX5_DROP_NEW_RECOVERY_WORK,
};
-static u8 get_nic_state(struct mlx5_core_dev *dev)
+u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
}
+void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
+{
+ u32 cur_cmdq_addr_l_sz;
+
+ cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz);
+ iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) |
+ state << MLX5_NIC_IFC_OFFSET,
+ &dev->iseg->cmdq_addr_l_sz);
+}
+
static void trigger_cmd_completions(struct mlx5_core_dev *dev)
{
unsigned long flags;
@@ -103,7 +106,7 @@ static int in_fatal(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
return 1;
if (ioread32be(&h->fw_ver) == 0xffffffff)
@@ -133,7 +136,7 @@ unlock:
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
{
- u8 nic_interface = get_nic_state(dev);
+ u8 nic_interface = mlx5_get_nic_state(dev);
switch (nic_interface) {
case MLX5_NIC_IFC_FULL:
@@ -168,7 +171,7 @@ static void health_recover(struct work_struct *work)
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
- nic_state = get_nic_state(dev);
+ nic_state = mlx5_get_nic_state(dev);
if (nic_state == MLX5_NIC_IFC_INVALID) {
dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index e3797a44e074..b59953daf8b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -45,6 +45,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
.ndo_stop = mlx5i_close,
+ .ndo_get_stats64 = mlx5i_get_stats,
.ndo_init = mlx5i_dev_init,
.ndo_uninit = mlx5i_dev_cleanup,
.ndo_change_mtu = mlx5i_change_mtu,
@@ -70,26 +71,25 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
-void mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+int mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
u16 max_mtu;
+ int err;
- /* priv init */
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->profile = profile;
- priv->ppriv = ppriv;
- mutex_init(&priv->state_lock);
+ err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
+ if (err)
+ return err;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
netdev->mtu = max_mtu;
mlx5e_build_nic_params(mdev, &priv->channels.params,
- profile->max_nch(mdev), netdev->mtu);
+ mlx5e_get_netdev_max_channels(netdev),
+ netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
@@ -106,12 +106,56 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
netdev->netdev_ops = &mlx5i_netdev_ops;
netdev->ethtool_ops = &mlx5i_ethtool_ops;
+
+ return 0;
}
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
-static void mlx5i_cleanup(struct mlx5e_priv *priv)
+void mlx5i_cleanup(struct mlx5e_priv *priv)
+{
+ mlx5e_netdev_cleanup(priv->netdev, priv);
+}
+
+static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
+{
+ int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
+ struct mlx5e_sw_stats s = { 0 };
+ int i, j;
+
+ for (i = 0; i < max_nch; i++) {
+ struct mlx5e_channel_stats *channel_stats;
+ struct mlx5e_rq_stats *rq_stats;
+
+ channel_stats = &priv->channel_stats[i];
+ rq_stats = &channel_stats->rq;
+
+ s.rx_packets += rq_stats->packets;
+ s.rx_bytes += rq_stats->bytes;
+
+ for (j = 0; j < priv->max_opened_tc; j++) {
+ struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
+
+ s.tx_packets += sq_stats->packets;
+ s.tx_bytes += sq_stats->bytes;
+ s.tx_queue_dropped += sq_stats->dropped;
+ }
+ }
+
+ memcpy(&priv->stats.sw, &s, sizeof(s));
+}
+
+void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- /* Do nothing .. */
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct mlx5e_sw_stats *sstats = &priv->stats.sw;
+
+ mlx5i_grp_sw_update_stats(priv);
+
+ stats->rx_packets = sstats->rx_packets;
+ stats->rx_bytes = sstats->rx_bytes;
+ stats->tx_packets = sstats->tx_packets;
+ stats->tx_bytes = sstats->tx_bytes;
+ stats->tx_dropped = sstats->tx_queue_dropped;
}
int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
@@ -306,17 +350,26 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
int err;
+ mlx5e_create_q_counters(priv);
+
+ err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
+ if (err) {
+ mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+ goto err_destroy_q_counters;
+ }
+
err = mlx5e_create_indirect_rqt(priv);
if (err)
- return err;
+ goto err_close_drop_rq;
err = mlx5e_create_direct_rqts(priv);
if (err)
goto err_destroy_indirect_rqts;
- err = mlx5e_create_indirect_tirs(priv);
+ err = mlx5e_create_indirect_tirs(priv, true);
if (err)
goto err_destroy_direct_rqts;
@@ -333,11 +386,15 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
- mlx5e_destroy_indirect_tirs(priv);
+ mlx5e_destroy_indirect_tirs(priv, true);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+err_close_drop_rq:
+ mlx5e_close_drop_rq(&priv->drop_rq);
+err_destroy_q_counters:
+ mlx5e_destroy_q_counters(priv);
return err;
}
@@ -345,9 +402,11 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
- mlx5e_destroy_indirect_tirs(priv);
+ mlx5e_destroy_indirect_tirs(priv, true);
mlx5e_destroy_direct_rqts(priv);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ mlx5e_close_drop_rq(&priv->drop_rq);
+ mlx5e_destroy_q_counters(priv);
}
static const struct mlx5e_profile mlx5i_nic_profile = {
@@ -360,7 +419,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.enable = NULL, /* mlx5i_enable */
.disable = NULL, /* mlx5i_disable */
.update_stats = NULL, /* mlx5i_update_stats */
- .max_nch = mlx5e_get_max_num_channels,
.update_carrier = NULL, /* no HW update in IB link */
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
@@ -592,7 +650,6 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev)
mlx5e_detach_netdev(priv);
profile->cleanup(priv);
- destroy_workqueue(priv->wq);
if (!ipriv->sub_interface) {
mlx5i_pkey_qpn_ht_cleanup(netdev);
@@ -600,58 +657,37 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev)
}
}
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *))
+static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev)
+{
+ return mdev->mlx5e_res.pdn != 0;
+}
+
+static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
+{
+ if (mlx5_is_sub_interface(mdev))
+ return mlx5i_pkey_get_profile();
+ return &mlx5i_nic_profile;
+}
+
+static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
+ struct net_device *netdev, void *param)
{
- const struct mlx5e_profile *profile;
- struct net_device *netdev;
+ struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
+ const struct mlx5e_profile *prof = mlx5_get_profile(mdev);
struct mlx5i_priv *ipriv;
struct mlx5e_priv *epriv;
struct rdma_netdev *rn;
- bool sub_interface;
- int nch;
int err;
- if (mlx5i_check_required_hca_cap(mdev)) {
- mlx5_core_warn(mdev, "Accelerated mode is not supported\n");
- return ERR_PTR(-EOPNOTSUPP);
- }
-
- /* TODO: Need to find a better way to check if child device*/
- sub_interface = (mdev->mlx5e_res.pdn != 0);
-
- if (sub_interface)
- profile = mlx5i_pkey_get_profile();
- else
- profile = &mlx5i_nic_profile;
-
- nch = profile->max_nch(mdev);
-
- netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv),
- name, NET_NAME_UNKNOWN,
- setup,
- nch * MLX5E_MAX_NUM_TC,
- nch);
- if (!netdev) {
- mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n");
- return NULL;
- }
-
ipriv = netdev_priv(netdev);
epriv = mlx5i_epriv(netdev);
- epriv->wq = create_singlethread_workqueue("mlx5i");
- if (!epriv->wq)
- goto err_free_netdev;
-
- ipriv->sub_interface = sub_interface;
+ ipriv->sub_interface = mlx5_is_sub_interface(mdev);
if (!ipriv->sub_interface) {
err = mlx5i_pkey_qpn_ht_init(netdev);
if (err) {
mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
- goto destroy_wq;
+ return err;
}
/* This should only be called once per mdev */
@@ -660,7 +696,7 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
goto destroy_ht;
}
- profile->init(mdev, netdev, profile, ipriv);
+ prof->init(mdev, netdev, prof, ipriv);
mlx5e_attach_netdev(epriv);
netif_carrier_off(netdev);
@@ -676,15 +712,35 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
netdev->priv_destructor = mlx5_rdma_netdev_free;
netdev->needs_free_netdev = 1;
- return netdev;
+ return 0;
destroy_ht:
mlx5i_pkey_qpn_ht_cleanup(netdev);
-destroy_wq:
- destroy_workqueue(epriv->wq);
-err_free_netdev:
- free_netdev(netdev);
+ return err;
+}
- return NULL;
+int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
+ struct ib_device *device,
+ struct rdma_netdev_alloc_params *params)
+{
+ int nch;
+ int rc;
+
+ rc = mlx5i_check_required_hca_cap(mdev);
+ if (rc)
+ return rc;
+
+ nch = mlx5e_get_max_num_channels(mdev);
+
+ *params = (struct rdma_netdev_alloc_params){
+ .sizeof_priv = sizeof(struct mlx5i_priv) +
+ sizeof(struct mlx5e_priv),
+ .txqs = nch * MLX5E_MAX_NUM_TC,
+ .rxqs = nch,
+ .param = mdev,
+ .initialize_rdma_netdev = mlx5_rdma_setup_rn,
+ };
+
+ return 0;
}
-EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
+EXPORT_SYMBOL(mlx5_rdma_rn_get_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 08eac92fc26c..9165ca567047 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -84,10 +84,11 @@ void mlx5i_dev_cleanup(struct net_device *dev);
int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Parent profile functions */
-void mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv);
+int mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv);
+void mlx5i_cleanup(struct mlx5e_priv *priv);
/* Get child interface nic profile */
const struct mlx5e_profile *mlx5i_pkey_get_profile(void);
@@ -109,18 +110,18 @@ struct mlx5i_tx_wqe {
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
struct mlx5i_tx_wqe **wqe,
- u16 *pi)
+ u16 pi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
+ *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memset(*wqe, 0, sizeof(**wqe));
}
netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_av *av, u32 dqpn, u32 dqkey);
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
#endif /* CONFIG_MLX5_CORE_IPOIB */
#endif /* __MLX5E_IPOB_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 54a188f41f90..b491b8f5fd6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -146,6 +146,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_open = mlx5i_pkey_open,
.ndo_stop = mlx5i_pkey_close,
.ndo_init = mlx5i_pkey_dev_init,
+ .ndo_get_stats64 = mlx5i_get_stats,
.ndo_uninit = mlx5i_pkey_dev_cleanup,
.ndo_change_mtu = mlx5i_pkey_change_mtu,
.ndo_do_ioctl = mlx5i_pkey_ioctl,
@@ -274,14 +275,17 @@ static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu)
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
-static void mlx5i_pkey_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+static int mlx5i_pkey_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ int err;
- mlx5i_init(mdev, netdev, profile, ppriv);
+ err = mlx5i_init(mdev, netdev, profile, ppriv);
+ if (err)
+ return err;
/* Override parent ndo */
netdev->netdev_ops = &mlx5i_pkey_netdev_ops;
@@ -291,12 +295,14 @@ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev,
/* Use dummy rqs */
priv->channels.params.log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+
+ return 0;
}
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
static void mlx5i_pkey_cleanup(struct mlx5e_priv *priv)
{
- /* Do nothing .. */
+ mlx5i_cleanup(priv);
}
static int mlx5i_pkey_init_tx(struct mlx5e_priv *priv)
@@ -345,7 +351,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.enable = NULL,
.disable = NULL,
.update_stats = NULL,
- .max_nch = mlx5e_get_max_num_channels,
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 3f767cde4c1d..0d90b1b4a3d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -111,10 +111,10 @@ static void mlx5_pps_out(struct work_struct *work)
for (i = 0; i < clock->ptp_info.n_pins; i++) {
u64 tstart;
- write_lock_irqsave(&clock->lock, flags);
+ write_seqlock_irqsave(&clock->lock, flags);
tstart = clock->pps_info.start[i];
clock->pps_info.start[i] = 0;
- write_unlock_irqrestore(&clock->lock, flags);
+ write_sequnlock_irqrestore(&clock->lock, flags);
if (!tstart)
continue;
@@ -132,10 +132,10 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
overflow_work);
unsigned long flags;
- write_lock_irqsave(&clock->lock, flags);
+ write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
mlx5_update_clock_info_page(clock->mdev);
- write_unlock_irqrestore(&clock->lock, flags);
+ write_sequnlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
}
@@ -147,10 +147,10 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
u64 ns = timespec64_to_ns(ts);
unsigned long flags;
- write_lock_irqsave(&clock->lock, flags);
+ write_seqlock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns);
mlx5_update_clock_info_page(clock->mdev);
- write_unlock_irqrestore(&clock->lock, flags);
+ write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
}
@@ -162,9 +162,9 @@ static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
u64 ns;
unsigned long flags;
- write_lock_irqsave(&clock->lock, flags);
+ write_seqlock_irqsave(&clock->lock, flags);
ns = timecounter_read(&clock->tc);
- write_unlock_irqrestore(&clock->lock, flags);
+ write_sequnlock_irqrestore(&clock->lock, flags);
*ts = ns_to_timespec64(ns);
@@ -177,10 +177,10 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
ptp_info);
unsigned long flags;
- write_lock_irqsave(&clock->lock, flags);
+ write_seqlock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta);
mlx5_update_clock_info_page(clock->mdev);
- write_unlock_irqrestore(&clock->lock, flags);
+ write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
}
@@ -203,12 +203,12 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
adj *= delta;
diff = div_u64(adj, 1000000000ULL);
- write_lock_irqsave(&clock->lock, flags);
+ write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff;
mlx5_update_clock_info_page(clock->mdev);
- write_unlock_irqrestore(&clock->lock, flags);
+ write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
}
@@ -307,12 +307,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
ts.tv_nsec = rq->perout.start.nsec;
ns = timespec64_to_ns(&ts);
cycles_now = mlx5_read_internal_timer(mdev);
- write_lock_irqsave(&clock->lock, flags);
+ write_seqlock_irqsave(&clock->lock, flags);
nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
nsec_delta = ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult);
- write_unlock_irqrestore(&clock->lock, flags);
+ write_sequnlock_irqrestore(&clock->lock, flags);
time_stamp = cycles_now + cycles_delta;
field_select = MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
@@ -471,14 +471,14 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
ts.tv_sec += 1;
ts.tv_nsec = 0;
ns = timespec64_to_ns(&ts);
- write_lock_irqsave(&clock->lock, flags);
+ write_seqlock_irqsave(&clock->lock, flags);
nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
nsec_delta = ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult);
clock->pps_info.start[pin] = cycles_now + cycles_delta;
schedule_work(&clock->pps_info.out_work);
- write_unlock_irqrestore(&clock->lock, flags);
+ write_sequnlock_irqrestore(&clock->lock, flags);
break;
default:
mlx5_core_err(mdev, " Unhandled event\n");
@@ -498,7 +498,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
return;
}
- rwlock_init(&clock->lock);
+ seqlock_init(&clock->lock);
clock->cycles.read = read_internal_timer;
clock->cycles.shift = MLX5_CYCLES_SHIFT;
clock->cycles.mult = clocksource_khz2mult(dev_freq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index 02e2e4575e4f..263cb6e2aeee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -46,11 +46,13 @@ static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
u64 timestamp)
{
+ unsigned int seq;
u64 nsec;
- read_lock(&clock->lock);
- nsec = timecounter_cyc2time(&clock->tc, timestamp);
- read_unlock(&clock->lock);
+ do {
+ seq = read_seqbegin(&clock->lock);
+ nsec = timecounter_cyc2time(&clock->tc, timestamp);
+ } while (read_seqretry(&clock->lock, seq));
return ns_to_ktime(nsec);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b5e9f664fc66..28132c7dc05f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1594,12 +1594,17 @@ static const struct pci_error_handlers mlx5_err_handler = {
static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
{
- int ret;
+ bool fast_teardown = false, force_teardown = false;
+ int ret = 1;
+
+ fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
+ force_teardown = MLX5_CAP_GEN(dev, force_teardown);
+
+ mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
+ mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
- if (!MLX5_CAP_GEN(dev, force_teardown)) {
- mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
+ if (!fast_teardown && !force_teardown)
return -EOPNOTSUPP;
- }
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
@@ -1612,13 +1617,19 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev, false);
+ ret = mlx5_cmd_fast_teardown_hca(dev);
+ if (!ret)
+ goto succeed;
+
ret = mlx5_cmd_force_teardown_hca(dev);
- if (ret) {
- mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
- mlx5_start_health_poll(dev);
- return ret;
- }
+ if (!ret)
+ goto succeed;
+
+ mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
+ mlx5_start_health_poll(dev);
+ return ret;
+succeed:
mlx5_enter_error_state(dev, true);
/* Some platforms requiring freeing the IRQ's in the shutdown
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b4134fa0bba3..0594d0961cb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -39,6 +39,7 @@
#include <linux/if_link.h>
#include <linux/firmware.h>
#include <linux/mlx5/cq.h>
+#include <linux/mlx5/fs.h>
#define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0"
@@ -95,6 +96,8 @@ int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
+
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
@@ -169,17 +172,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void);
-int mlx5_encap_alloc(struct mlx5_core_dev *dev,
- int header_type,
- size_t size,
- void *encap_header,
- u32 *encap_id);
-void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
-
-int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
- u8 namespace, u8 num_actions,
- void *modify_actions, u32 *modify_header_id);
-void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
@@ -214,4 +206,14 @@ int mlx5_lag_allow(struct mlx5_core_dev *dev);
int mlx5_lag_forbid(struct mlx5_core_dev *dev);
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
+
+enum {
+ MLX5_NIC_IFC_FULL = 0,
+ MLX5_NIC_IFC_DISABLED = 1,
+ MLX5_NIC_IFC_NO_DRAM_NIC = 2,
+ MLX5_NIC_IFC_INVALID = 3
+};
+
+u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
+void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 4ca07bfb6b14..91b8139a388d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -211,6 +211,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
}
qp->qpn = MLX5_GET(create_dct_out, out, dctn);
+ qp->uid = MLX5_GET(create_dct_in, in, uid);
err = create_resource_common(dev, qp, MLX5_RES_DCT);
if (err)
goto err_cmd;
@@ -219,6 +220,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
err_cmd:
MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
+ MLX5_SET(destroy_dct_in, din, uid, qp->uid);
mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
(void *)&out, sizeof(dout));
return err;
@@ -240,6 +242,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
if (err)
return err;
+ qp->uid = MLX5_GET(create_qp_in, in, uid);
qp->qpn = MLX5_GET(create_qp_out, out, qpn);
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
@@ -261,6 +264,7 @@ err_cmd:
memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
+ MLX5_SET(destroy_qp_in, din, uid, qp->uid);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
@@ -275,6 +279,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
+ MLX5_SET(drain_dct_in, in, uid, qp->uid);
return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
(void *)&out, sizeof(out));
}
@@ -301,6 +306,7 @@ destroy:
destroy_resource_common(dev, &dct->mqp);
MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
+ MLX5_SET(destroy_dct_in, in, uid, qp->uid);
err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
(void *)&out, sizeof(out));
return err;
@@ -320,6 +326,7 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ MLX5_SET(destroy_qp_in, in, uid, qp->uid);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
@@ -373,7 +380,7 @@ static void mbox_free(struct mbox_info *mbox)
static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
u32 opt_param_mask, void *qpc,
- struct mbox_info *mbox)
+ struct mbox_info *mbox, u16 uid)
{
mbox->out = NULL;
mbox->in = NULL;
@@ -381,26 +388,32 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
#define MBOX_ALLOC(mbox, typ) \
mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
-#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
- MLX5_SET(typ##_in, in, opcode, _opcode); \
- MLX5_SET(typ##_in, in, qpn, _qpn)
-
-#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
- MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
- MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
- memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
+#define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
+ do { \
+ MLX5_SET(typ##_in, in, opcode, _opcode); \
+ MLX5_SET(typ##_in, in, qpn, _qpn); \
+ MLX5_SET(typ##_in, in, uid, _uid); \
+ } while (0)
+
+#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
+ do { \
+ MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
+ MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
+ memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
+ MLX5_ST_SZ_BYTES(qpc)); \
+ } while (0)
switch (opcode) {
/* 2RST & 2ERR */
case MLX5_CMD_OP_2RST_QP:
if (MBOX_ALLOC(mbox, qp_2rst))
return -ENOMEM;
- MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
+ MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
break;
case MLX5_CMD_OP_2ERR_QP:
if (MBOX_ALLOC(mbox, qp_2err))
return -ENOMEM;
- MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
+ MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
break;
/* MODIFY with QPC */
@@ -408,37 +421,37 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
if (MBOX_ALLOC(mbox, rst2init_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
+ opt_param_mask, qpc, uid);
break;
case MLX5_CMD_OP_INIT2RTR_QP:
if (MBOX_ALLOC(mbox, init2rtr_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
+ opt_param_mask, qpc, uid);
break;
case MLX5_CMD_OP_RTR2RTS_QP:
if (MBOX_ALLOC(mbox, rtr2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
+ opt_param_mask, qpc, uid);
break;
case MLX5_CMD_OP_RTS2RTS_QP:
if (MBOX_ALLOC(mbox, rts2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
+ opt_param_mask, qpc, uid);
break;
case MLX5_CMD_OP_SQERR2RTS_QP:
if (MBOX_ALLOC(mbox, sqerr2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
+ opt_param_mask, qpc, uid);
break;
case MLX5_CMD_OP_INIT2INIT_QP:
if (MBOX_ALLOC(mbox, init2init_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
- opt_param_mask, qpc);
+ opt_param_mask, qpc, uid);
break;
default:
mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
@@ -456,7 +469,7 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
int err;
err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
- opt_param_mask, qpc, &mbox);
+ opt_param_mask, qpc, &mbox, qp->uid);
if (err)
return err;
@@ -531,6 +544,17 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
}
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
+static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
+
+ MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, in, rqn, rqn);
+ MLX5_SET(destroy_rq_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *rq)
{
@@ -541,6 +565,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
if (err)
return err;
+ rq->uid = MLX5_GET(create_rq_in, in, uid);
rq->qpn = rqn;
err = create_resource_common(dev, rq, MLX5_RES_RQ);
if (err)
@@ -549,7 +574,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return 0;
err_destroy_rq:
- mlx5_core_destroy_rq(dev, rq->qpn);
+ destroy_rq_tracked(dev, rq->qpn, rq->uid);
return err;
}
@@ -559,10 +584,21 @@ void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *rq)
{
destroy_resource_common(dev, rq);
- mlx5_core_destroy_rq(dev, rq->qpn);
+ destroy_rq_tracked(dev, rq->qpn, rq->uid);
}
EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
+static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
+
+ MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, in, sqn, sqn);
+ MLX5_SET(destroy_sq_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq)
{
@@ -573,6 +609,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
if (err)
return err;
+ sq->uid = MLX5_GET(create_sq_in, in, uid);
sq->qpn = sqn;
err = create_resource_common(dev, sq, MLX5_RES_SQ);
if (err)
@@ -581,7 +618,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return 0;
err_destroy_sq:
- mlx5_core_destroy_sq(dev, sq->qpn);
+ destroy_sq_tracked(dev, sq->qpn, sq->uid);
return err;
}
@@ -591,7 +628,7 @@ void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *sq)
{
destroy_resource_common(dev, sq);
- mlx5_core_destroy_sq(dev, sq->qpn);
+ destroy_sq_tracked(dev, sq->qpn, sq->uid);
}
EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index 23cc337a96c9..6a6fc9be01e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -73,7 +73,7 @@ static int get_pas_size(struct mlx5_srq_attr *in)
u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
u32 page_size = 1 << log_page_size;
u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
- u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
+ u32 rq_num_pas = DIV_ROUND_UP(rq_sz_po, page_size);
return rq_num_pas * sizeof(u64);
}
@@ -166,6 +166,7 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
if (!create_in)
return -ENOMEM;
+ MLX5_SET(create_srq_in, create_in, uid, in->uid);
srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
@@ -178,8 +179,10 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
sizeof(create_out));
kvfree(create_in);
- if (!err)
+ if (!err) {
srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
+ srq->uid = in->uid;
+ }
return err;
}
@@ -193,6 +196,7 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(destroy_srq_in, srq_in, opcode,
MLX5_CMD_OP_DESTROY_SRQ);
MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
+ MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
@@ -208,6 +212,7 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
+ MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
srq_out, sizeof(srq_out));
@@ -260,6 +265,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
if (!create_in)
return -ENOMEM;
+ MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
xrc_srq_context_entry);
pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
@@ -277,6 +283,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
goto out;
srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
+ srq->uid = in->uid;
out:
kvfree(create_in);
return err;
@@ -291,6 +298,7 @@ static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out));
@@ -306,6 +314,7 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out));
@@ -365,10 +374,13 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(create_rmp_in, create_in, uid, in->uid);
set_wq(wq, in);
memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
+ if (!err)
+ srq->uid = in->uid;
kvfree(create_in);
return err;
@@ -377,7 +389,13 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
- return mlx5_core_destroy_rmp(dev, srq->srqn);
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
+
+ MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
static int arm_rmp_cmd(struct mlx5_core_dev *dev,
@@ -400,6 +418,7 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev,
MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(modify_rmp_in, in, uid, srq->uid);
MLX5_SET(wq, wq, lwm, lwm);
MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
@@ -469,11 +488,14 @@ static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(xrqc, xrqc, user_index, in->user_index);
MLX5_SET(xrqc, xrqc, cqn, in->cqn);
MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
+ MLX5_SET(create_xrq_in, create_in, uid, in->uid);
err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
sizeof(create_out));
kvfree(create_in);
- if (!err)
+ if (!err) {
srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
+ srq->uid = in->uid;
+ }
return err;
}
@@ -485,6 +507,7 @@ static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
+ MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
@@ -500,6 +523,7 @@ static int arm_xrq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
MLX5_SET(arm_rq_in, in, lwm, lwm);
+ MLX5_SET(arm_rq_in, in, uid, srq->uid);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index dae1c5c5d27c..a1ee9a8a769e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
for (i = 0; i < hp->num_channels; i++) {
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
- mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+ if (!hp->peer_gone)
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
}
}
@@ -509,7 +510,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
- if (next_state == MLX5_RQC_STATE_RDY) {
+ if (next_state == MLX5_SQC_STATE_RDY) {
MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
}
@@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
MLX5_RQC_STATE_RST, 0, 0);
/* unset peer SQs */
+ if (hp->peer_gone)
+ return;
for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_RST, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index b02af317c125..cfbea66b4879 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1201,3 +1201,12 @@ int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
+
+u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
+{
+ if (!mdev->sys_image_guid)
+ mlx5_query_nic_vport_system_image_guid(mdev, &mdev->sys_image_guid);
+
+ return mdev->sys_image_guid;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 68e7f8df2a6d..2dcbf1ebfd6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -39,11 +39,6 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
return (u32)wq->fbc.sz_m1 + 1;
}
-u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
-{
- return wq->fbc.frag_sz_m1 + 1;
-}
-
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
{
return wq->fbc.sz_m1 + 1;
@@ -54,54 +49,37 @@ u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
return (u32)wq->fbc.sz_m1 + 1;
}
-static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
-{
- return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride;
-}
-
-static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
-{
- return mlx5_wq_cyc_get_byte_size(&wq->rq) +
- mlx5_wq_cyc_get_byte_size(&wq->sq);
-}
-
-static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
+static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
{
- return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride;
-}
-
-static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
-{
- return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride;
+ return ((u32)1 << log_sz) << log_stride;
}
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
+ u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
int err;
- mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
- MLX5_GET(wq, wqc, log_wq_sz),
- fbc);
- wq->sz = wq->fbc.sz_m1 + 1;
-
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
- err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+ wq->db = wq_ctrl->db.db;
+
+ err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
- fbc->frag_buf = wq_ctrl->buf;
- wq->db = wq_ctrl->db.db;
+ mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
+ wq->sz = mlx5_wq_cyc_get_size(wq);
wq_ctrl->mdev = mdev;
@@ -113,46 +91,19 @@ err_db_free:
return err;
}
-static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
- struct mlx5_wq_qp *qp)
-{
- struct mlx5_frag_buf_ctrl *sq_fbc;
- struct mlx5_frag_buf *rqb, *sqb;
-
- rqb = &qp->rq.fbc.frag_buf;
- *rqb = *buf;
- rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
- rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
-
- sq_fbc = &qp->sq.fbc;
- sqb = &sq_fbc->frag_buf;
- *sqb = *buf;
- sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
- sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
- sqb->frags += rqb->npages; /* first part is for the rq */
- if (sq_fbc->strides_offset)
- sqb->frags--;
-}
-
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
- u16 sq_strides_offset;
- u32 rq_pg_remainder;
- int err;
+ u8 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
+ u8 log_rq_sz = MLX5_GET(qpc, qpc, log_rq_size);
+ u8 log_sq_stride = ilog2(MLX5_SEND_WQE_BB);
+ u8 log_sq_sz = MLX5_GET(qpc, qpc, log_sq_size);
- mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
- MLX5_GET(qpc, qpc, log_rq_size),
- &wq->rq.fbc);
+ u32 rq_byte_size;
+ int err;
- rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
- sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
- mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
- MLX5_GET(qpc, qpc, log_sq_size),
- sq_strides_offset,
- &wq->sq.fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -160,14 +111,32 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err;
}
- err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
+ err = mlx5_frag_buf_alloc_node(mdev,
+ wq_get_byte_sz(log_rq_sz, log_rq_stride) +
+ wq_get_byte_sz(log_sq_sz, log_sq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
- mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
+ mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc);
+
+ rq_byte_size = wq_get_byte_sz(log_rq_sz, log_rq_stride);
+
+ if (rq_byte_size < PAGE_SIZE) {
+ /* SQ starts within the same page of the RQ */
+ u16 sq_strides_offset = rq_byte_size / MLX5_SEND_WQE_BB;
+
+ mlx5_init_fbc_offset(wq_ctrl->buf.frags,
+ log_sq_stride, log_sq_sz, sq_strides_offset,
+ &wq->sq.fbc);
+ } else {
+ u16 rq_npages = rq_byte_size >> PAGE_SHIFT;
+
+ mlx5_init_fbc(wq_ctrl->buf.frags + rq_npages,
+ log_sq_stride, log_sq_sz, &wq->sq.fbc);
+ }
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
@@ -186,17 +155,19 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
+ u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) + 6;
+ u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size);
int err;
- mlx5_core_init_cq_frag_buf(&wq->fbc, cqc);
-
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
- err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+ wq->db = wq_ctrl->db.db;
+
+ err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf,
param->buf_numa_node);
if (err) {
@@ -205,8 +176,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- wq->fbc.frag_buf = wq_ctrl->buf;
- wq->db = wq_ctrl->db.db;
+ mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc);
wq_ctrl->mdev = mdev;
@@ -222,30 +192,29 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
+ u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
struct mlx5_wqe_srq_next_seg *next_seg;
int err;
int i;
- mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
- MLX5_GET(wq, wqc, log_wq_sz),
- fbc);
-
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
- err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+ wq->db = wq_ctrl->db.db;
+
+ err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
- wq->fbc.frag_buf = wq_ctrl->buf;
- wq->db = wq_ctrl->db.db;
+ mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
for (i = 0; i < fbc->sz_m1; i++) {
next_seg = mlx5_wq_ll_get_wqe(wq, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 3a1a170bb2d7..b1293d153a58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
-u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
@@ -140,11 +139,6 @@ static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
return ctr & wq->fbc.sz_m1;
}
-static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
-{
- return ctr & wq->fbc.frag_sz_m1;
-}
-
static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
{
return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
@@ -160,6 +154,11 @@ static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
}
+static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix)
+{
+ return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1;
+}
+
static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
{
int equal = (cc1 == cc2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 68fa44a41485..1f77e97e2d7a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -27,7 +27,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_acl_flex_keys.o \
spectrum1_mr_tcam.o spectrum2_mr_tcam.o \
spectrum_mr_tcam.o spectrum_mr.o \
- spectrum_qdisc.o spectrum_span.o
+ spectrum_qdisc.o spectrum_span.o \
+ spectrum_nve.o spectrum_nve_vxlan.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 81533d7f395c..937d0ace699a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1055,6 +1055,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err_driver_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
+ mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
if (!reload)
devlink_unregister(devlink);
@@ -1088,6 +1089,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
+ mlxsw_hwmon_fini(mlxsw_core->hwmon);
if (!reload)
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 655ddd204ab2..c35be477856f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -359,6 +359,10 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
return 0;
}
+static inline void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+}
+
#endif
struct mlxsw_thermal;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index f6cf2896d337..e04e8162aa14 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -303,8 +303,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
struct device *hwmon_dev;
int err;
- mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon),
- GFP_KERNEL);
+ mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
if (!mlxsw_hwmon)
return -ENOMEM;
mlxsw_hwmon->core = mlxsw_core;
@@ -321,10 +320,9 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
- hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev,
- "mlxsw",
- mlxsw_hwmon,
- mlxsw_hwmon->groups);
+ hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
+ "mlxsw", mlxsw_hwmon,
+ mlxsw_hwmon->groups);
if (IS_ERR(hwmon_dev)) {
err = PTR_ERR(hwmon_dev);
goto err_hwmon_register;
@@ -337,5 +335,12 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
err_hwmon_register:
err_fans_init:
err_temp_init:
+ kfree(mlxsw_hwmon);
return err;
}
+
+void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+ hwmon_device_unregister(mlxsw_hwmon->hwmon_dev);
+ kfree(mlxsw_hwmon);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 4d271fb3de3d..5890fdfd62c3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -718,14 +718,17 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
memset(&active_cqns, 0, sizeof(active_cqns));
while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
- u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
- switch (event_type) {
- case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+ /* Command interface completion events are always received on
+ * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
+ * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
+ */
+ switch (q->num) {
+ case MLXSW_PCI_EQ_ASYNC_NUM:
mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
q->u.eq.ev_cmd_count++;
break;
- case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+ case MLXSW_PCI_EQ_COMP_NUM:
cqn = mlxsw_pci_eqe_cqn_get(eqe);
set_bit(cqn, active_cqns);
cq_handle = true;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 83f452b7ccbb..bb99f6d41fe0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -221,7 +221,7 @@ MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
/* pci_eqe_cqn
- * Completion Queue that triggeret this EQE.
+ * Completion Queue that triggered this EQE.
*/
MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6e8b619b769b..32cb6718bb17 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -295,6 +295,7 @@ enum mlxsw_reg_sfd_rec_type {
MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0,
MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1,
MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL = 0xC,
};
/* reg_sfd_rec_type
@@ -525,6 +526,61 @@ mlxsw_reg_sfd_mc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid);
}
+/* reg_sfd_uc_tunnel_uip_msb
+ * When protocol is IPv4, the most significant byte of the underlay IPv4
+ * destination IP.
+ * When protocol is IPv6, reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_msb, MLXSW_REG_SFD_BASE_LEN, 24,
+ 8, MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_tunnel_fid
+ * Filtering ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_fid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+enum mlxsw_reg_sfd_uc_tunnel_protocol {
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4,
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6,
+};
+
+/* reg_sfd_uc_tunnel_protocol
+ * IP protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_protocol, MLXSW_REG_SFD_BASE_LEN, 27,
+ 1, MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+/* reg_sfd_uc_tunnel_uip_lsb
+ * When protocol is IPv4, the least significant bytes of the underlay
+ * IPv4 destination IP.
+ * When protocol is IPv6, pointer to the underlay IPv6 destination IP
+ * which is configured by RIPS.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_lsb, MLXSW_REG_SFD_BASE_LEN, 0,
+ 24, MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+static inline void
+mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 fid,
+ enum mlxsw_reg_sfd_rec_action action, u32 uip,
+ enum mlxsw_reg_sfd_uc_tunnel_protocol proto)
+{
+ mlxsw_reg_sfd_rec_pack(payload, rec_index,
+ MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac,
+ action);
+ mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
+ mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24);
+ mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip);
+ mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid);
+ mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto);
+}
+
/* SFN - Switch FDB Notification Register
* -------------------------------------------
* The switch provides notifications on newly learned FDB entries and
@@ -1069,6 +1125,8 @@ enum mlxsw_reg_sfdf_flush_type {
MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
MLXSW_REG_SFDF_FLUSH_PER_LAG,
MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
+ MLXSW_REG_SFDF_FLUSH_PER_NVE,
+ MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID,
};
/* reg_sfdf_flush_type
@@ -1079,6 +1137,10 @@ enum mlxsw_reg_sfdf_flush_type {
* 3 - All FID dynamic entries pointing to port are flushed.
* 4 - All dynamic entries pointing to LAG are flushed.
* 5 - All FID dynamic entries pointing to LAG are flushed.
+ * 6 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are
+ * flushed.
+ * 7 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are
+ * flushed, per FID.
* Access: RW
*/
MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
@@ -1315,12 +1377,19 @@ MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4);
*/
MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20);
-static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash)
+/* reg_slcr_seed
+ * LAG seed value. The seed is the same for all ports.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, slcr, seed, 0x08, 0, 32);
+
+static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash, u32 seed)
{
MLXSW_REG_ZERO(slcr, payload);
mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC);
mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
+ mlxsw_reg_slcr_seed_set(payload, seed);
}
/* SLCOR - Switch LAG Collector Register
@@ -8279,6 +8348,508 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
mlxsw_reg_mgpc_opcode_set(payload, opcode);
}
+/* MPRS - Monitoring Parsing State Register
+ * ----------------------------------------
+ * The MPRS register is used for setting up the parsing for hash,
+ * policy-engine and routing.
+ */
+#define MLXSW_REG_MPRS_ID 0x9083
+#define MLXSW_REG_MPRS_LEN 0x14
+
+MLXSW_REG_DEFINE(mprs, MLXSW_REG_MPRS_ID, MLXSW_REG_MPRS_LEN);
+
+/* reg_mprs_parsing_depth
+ * Minimum parsing depth.
+ * Need to enlarge parsing depth according to L3, MPLS, tunnels, ACL
+ * rules, traps, hash, etc. Default is 96 bytes. Reserved when SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mprs, parsing_depth, 0x00, 0, 16);
+
+/* reg_mprs_parsing_en
+ * Parsing enable.
+ * Bit 0 - Enable parsing of NVE of types VxLAN, VxLAN-GPE, GENEVE and
+ * NVGRE. Default is enabled. Reserved when SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mprs, parsing_en, 0x04, 0, 16);
+
+/* reg_mprs_vxlan_udp_dport
+ * VxLAN UDP destination port.
+ * Used for identifying VxLAN packets and for dport field in
+ * encapsulation. Default is 4789.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mprs, vxlan_udp_dport, 0x10, 0, 16);
+
+static inline void mlxsw_reg_mprs_pack(char *payload, u16 parsing_depth,
+ u16 vxlan_udp_dport)
+{
+ MLXSW_REG_ZERO(mprs, payload);
+ mlxsw_reg_mprs_parsing_depth_set(payload, parsing_depth);
+ mlxsw_reg_mprs_parsing_en_set(payload, true);
+ mlxsw_reg_mprs_vxlan_udp_dport_set(payload, vxlan_udp_dport);
+}
+
+/* TNGCR - Tunneling NVE General Configuration Register
+ * ----------------------------------------------------
+ * The TNGCR register is used for setting up the NVE Tunneling configuration.
+ */
+#define MLXSW_REG_TNGCR_ID 0xA001
+#define MLXSW_REG_TNGCR_LEN 0x44
+
+MLXSW_REG_DEFINE(tngcr, MLXSW_REG_TNGCR_ID, MLXSW_REG_TNGCR_LEN);
+
+enum mlxsw_reg_tngcr_type {
+ MLXSW_REG_TNGCR_TYPE_VXLAN,
+ MLXSW_REG_TNGCR_TYPE_VXLAN_GPE,
+ MLXSW_REG_TNGCR_TYPE_GENEVE,
+ MLXSW_REG_TNGCR_TYPE_NVGRE,
+};
+
+/* reg_tngcr_type
+ * Tunnel type for encapsulation and decapsulation. The types are mutually
+ * exclusive.
+ * Note: For Spectrum the NVE parsing must be enabled in MPRS.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, type, 0x00, 0, 4);
+
+/* reg_tngcr_nve_valid
+ * The VTEP is valid. Allows adding FDB entries for tunnel encapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_valid, 0x04, 31, 1);
+
+/* reg_tngcr_nve_ttl_uc
+ * The TTL for NVE tunnel encapsulation underlay unicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_ttl_uc, 0x04, 0, 8);
+
+/* reg_tngcr_nve_ttl_mc
+ * The TTL for NVE tunnel encapsulation underlay multicast packets.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_ttl_mc, 0x08, 0, 8);
+
+enum {
+ /* Do not copy flow label. Calculate flow label using nve_flh. */
+ MLXSW_REG_TNGCR_FL_NO_COPY,
+ /* Copy flow label from inner packet if packet is IPv6 and
+ * encapsulation is by IPv6. Otherwise, calculate flow label using
+ * nve_flh.
+ */
+ MLXSW_REG_TNGCR_FL_COPY,
+};
+
+/* reg_tngcr_nve_flc
+ * For NVE tunnel encapsulation: Flow label copy from inner packet.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_flc, 0x0C, 25, 1);
+
+enum {
+ /* Flow label is static. In Spectrum this means '0'. Spectrum-2
+ * uses {nve_fl_prefix, nve_fl_suffix}.
+ */
+ MLXSW_REG_TNGCR_FL_NO_HASH,
+ /* 8 LSBs of the flow label are calculated from ECMP hash of the
+ * inner packet. 12 MSBs are configured by nve_fl_prefix.
+ */
+ MLXSW_REG_TNGCR_FL_HASH,
+};
+
+/* reg_tngcr_nve_flh
+ * NVE flow label hash.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_flh, 0x0C, 24, 1);
+
+/* reg_tngcr_nve_fl_prefix
+ * NVE flow label prefix. Constant 12 MSBs of the flow label.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_fl_prefix, 0x0C, 8, 12);
+
+/* reg_tngcr_nve_fl_suffix
+ * NVE flow label suffix. Constant 8 LSBs of the flow label.
+ * Reserved when nve_flh=1 and for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_fl_suffix, 0x0C, 0, 8);
+
+enum {
+ /* Source UDP port is fixed (default '0') */
+ MLXSW_REG_TNGCR_UDP_SPORT_NO_HASH,
+ /* Source UDP port is calculated based on hash */
+ MLXSW_REG_TNGCR_UDP_SPORT_HASH,
+};
+
+/* reg_tngcr_nve_udp_sport_type
+ * NVE UDP source port type.
+ * Spectrum uses LAG hash (SLCRv2). Spectrum-2 uses ECMP hash (RECRv2).
+ * When the source UDP port is calculated based on hash, then the 8 LSBs
+ * are calculated from hash the 8 MSBs are configured by
+ * nve_udp_sport_prefix.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_udp_sport_type, 0x10, 24, 1);
+
+/* reg_tngcr_nve_udp_sport_prefix
+ * NVE UDP source port prefix. Constant 8 MSBs of the UDP source port.
+ * Reserved when NVE type is NVGRE.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_udp_sport_prefix, 0x10, 8, 8);
+
+/* reg_tngcr_nve_group_size_mc
+ * The amount of sequential linked lists of MC entries. The first linked
+ * list is configured by SFD.underlay_mc_ptr.
+ * Valid values: 1, 2, 4, 8, 16, 32, 64
+ * The linked list are configured by TNUMT.
+ * The hash is set by LAG hash.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_group_size_mc, 0x18, 0, 8);
+
+/* reg_tngcr_nve_group_size_flood
+ * The amount of sequential linked lists of flooding entries. The first
+ * linked list is configured by SFMR.nve_tunnel_flood_ptr
+ * Valid values: 1, 2, 4, 8, 16, 32, 64
+ * The linked list are configured by TNUMT.
+ * The hash is set by LAG hash.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, nve_group_size_flood, 0x1C, 0, 8);
+
+/* reg_tngcr_learn_enable
+ * During decapsulation, whether to learn from NVE port.
+ * Reserved when Spectrum-2. See TNPC.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, learn_enable, 0x20, 31, 1);
+
+/* reg_tngcr_underlay_virtual_router
+ * Underlay virtual router.
+ * Reserved when Spectrum-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, underlay_virtual_router, 0x20, 0, 16);
+
+/* reg_tngcr_underlay_rif
+ * Underlay ingress router interface. RIF type should be loopback generic.
+ * Reserved when Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, underlay_rif, 0x24, 0, 16);
+
+/* reg_tngcr_usipv4
+ * Underlay source IPv4 address of the NVE.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tngcr, usipv4, 0x28, 0, 32);
+
+/* reg_tngcr_usipv6
+ * Underlay source IPv6 address of the NVE. For Spectrum, must not be
+ * modified under traffic of NVE tunneling encapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, tngcr, usipv6, 0x30, 16);
+
+static inline void mlxsw_reg_tngcr_pack(char *payload,
+ enum mlxsw_reg_tngcr_type type,
+ bool valid, u8 ttl)
+{
+ MLXSW_REG_ZERO(tngcr, payload);
+ mlxsw_reg_tngcr_type_set(payload, type);
+ mlxsw_reg_tngcr_nve_valid_set(payload, valid);
+ mlxsw_reg_tngcr_nve_ttl_uc_set(payload, ttl);
+ mlxsw_reg_tngcr_nve_ttl_mc_set(payload, ttl);
+ mlxsw_reg_tngcr_nve_flc_set(payload, MLXSW_REG_TNGCR_FL_NO_COPY);
+ mlxsw_reg_tngcr_nve_flh_set(payload, 0);
+ mlxsw_reg_tngcr_nve_udp_sport_type_set(payload,
+ MLXSW_REG_TNGCR_UDP_SPORT_HASH);
+ mlxsw_reg_tngcr_nve_udp_sport_prefix_set(payload, 0);
+ mlxsw_reg_tngcr_nve_group_size_mc_set(payload, 1);
+ mlxsw_reg_tngcr_nve_group_size_flood_set(payload, 1);
+}
+
+/* TNUMT - Tunneling NVE Underlay Multicast Table Register
+ * -------------------------------------------------------
+ * The TNUMT register is for building the underlay MC table. It is used
+ * for MC, flooding and BC traffic into the NVE tunnel.
+ */
+#define MLXSW_REG_TNUMT_ID 0xA003
+#define MLXSW_REG_TNUMT_LEN 0x20
+
+MLXSW_REG_DEFINE(tnumt, MLXSW_REG_TNUMT_ID, MLXSW_REG_TNUMT_LEN);
+
+enum mlxsw_reg_tnumt_record_type {
+ MLXSW_REG_TNUMT_RECORD_TYPE_IPV4,
+ MLXSW_REG_TNUMT_RECORD_TYPE_IPV6,
+ MLXSW_REG_TNUMT_RECORD_TYPE_LABEL,
+};
+
+/* reg_tnumt_record_type
+ * Record type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, record_type, 0x00, 28, 4);
+
+enum mlxsw_reg_tnumt_tunnel_port {
+ MLXSW_REG_TNUMT_TUNNEL_PORT_NVE,
+ MLXSW_REG_TNUMT_TUNNEL_PORT_VPLS,
+ MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL0,
+ MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL1,
+};
+
+/* reg_tnumt_tunnel_port
+ * Tunnel port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, tunnel_port, 0x00, 24, 4);
+
+/* reg_tnumt_underlay_mc_ptr
+ * Index to the underlay multicast table.
+ * For Spectrum the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tnumt, underlay_mc_ptr, 0x00, 0, 24);
+
+/* reg_tnumt_vnext
+ * The next_underlay_mc_ptr is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, vnext, 0x04, 31, 1);
+
+/* reg_tnumt_next_underlay_mc_ptr
+ * The next index to the underlay multicast table.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, next_underlay_mc_ptr, 0x04, 0, 24);
+
+/* reg_tnumt_record_size
+ * Number of IP addresses in the record.
+ * Range is 1..cap_max_nve_mc_entries_ipv{4,6}
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnumt, record_size, 0x08, 0, 3);
+
+/* reg_tnumt_udip
+ * The underlay IPv4 addresses. udip[i] is reserved if i >= size
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, tnumt, udip, 0x0C, 0, 32, 0x04, 0x00, false);
+
+/* reg_tnumt_udip_ptr
+ * The pointer to the underlay IPv6 addresses. udip_ptr[i] is reserved if
+ * i >= size. The IPv6 addresses are configured by RIPS.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, tnumt, udip_ptr, 0x0C, 0, 24, 0x04, 0x00, false);
+
+static inline void mlxsw_reg_tnumt_pack(char *payload,
+ enum mlxsw_reg_tnumt_record_type type,
+ enum mlxsw_reg_tnumt_tunnel_port tport,
+ u32 underlay_mc_ptr, bool vnext,
+ u32 next_underlay_mc_ptr,
+ u8 record_size)
+{
+ MLXSW_REG_ZERO(tnumt, payload);
+ mlxsw_reg_tnumt_record_type_set(payload, type);
+ mlxsw_reg_tnumt_tunnel_port_set(payload, tport);
+ mlxsw_reg_tnumt_underlay_mc_ptr_set(payload, underlay_mc_ptr);
+ mlxsw_reg_tnumt_vnext_set(payload, vnext);
+ mlxsw_reg_tnumt_next_underlay_mc_ptr_set(payload, next_underlay_mc_ptr);
+ mlxsw_reg_tnumt_record_size_set(payload, record_size);
+}
+
+/* TNQCR - Tunneling NVE QoS Configuration Register
+ * ------------------------------------------------
+ * The TNQCR register configures how QoS is set in encapsulation into the
+ * underlay network.
+ */
+#define MLXSW_REG_TNQCR_ID 0xA010
+#define MLXSW_REG_TNQCR_LEN 0x0C
+
+MLXSW_REG_DEFINE(tnqcr, MLXSW_REG_TNQCR_ID, MLXSW_REG_TNQCR_LEN);
+
+/* reg_tnqcr_enc_set_dscp
+ * For encapsulation: How to set DSCP field:
+ * 0 - Copy the DSCP from the overlay (inner) IP header to the underlay
+ * (outer) IP header. If there is no IP header, use TNQDR.dscp
+ * 1 - Set the DSCP field as TNQDR.dscp
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnqcr, enc_set_dscp, 0x04, 28, 1);
+
+static inline void mlxsw_reg_tnqcr_pack(char *payload)
+{
+ MLXSW_REG_ZERO(tnqcr, payload);
+ mlxsw_reg_tnqcr_enc_set_dscp_set(payload, 0);
+}
+
+/* TNQDR - Tunneling NVE QoS Default Register
+ * ------------------------------------------
+ * The TNQDR register configures the default QoS settings for NVE
+ * encapsulation.
+ */
+#define MLXSW_REG_TNQDR_ID 0xA011
+#define MLXSW_REG_TNQDR_LEN 0x08
+
+MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN);
+
+/* reg_tnqdr_local_port
+ * Local port number (receive port). CPU port is supported.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8);
+
+/* reg_tnqdr_dscp
+ * For encapsulation, the default DSCP.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6);
+
+static inline void mlxsw_reg_tnqdr_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(tnqdr, payload);
+ mlxsw_reg_tnqdr_local_port_set(payload, local_port);
+ mlxsw_reg_tnqdr_dscp_set(payload, 0);
+}
+
+/* TNEEM - Tunneling NVE Encapsulation ECN Mapping Register
+ * --------------------------------------------------------
+ * The TNEEM register maps ECN of the IP header at the ingress to the
+ * encapsulation to the ECN of the underlay network.
+ */
+#define MLXSW_REG_TNEEM_ID 0xA012
+#define MLXSW_REG_TNEEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tneem, MLXSW_REG_TNEEM_ID, MLXSW_REG_TNEEM_LEN);
+
+/* reg_tneem_overlay_ecn
+ * ECN of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tneem, overlay_ecn, 0x04, 24, 2);
+
+/* reg_tneem_underlay_ecn
+ * ECN of the IP header in the underlay network.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tneem, underlay_ecn, 0x04, 16, 2);
+
+static inline void mlxsw_reg_tneem_pack(char *payload, u8 overlay_ecn,
+ u8 underlay_ecn)
+{
+ MLXSW_REG_ZERO(tneem, payload);
+ mlxsw_reg_tneem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tneem_underlay_ecn_set(payload, underlay_ecn);
+}
+
+/* TNDEM - Tunneling NVE Decapsulation ECN Mapping Register
+ * --------------------------------------------------------
+ * The TNDEM register configures the actions that are done in the
+ * decapsulation.
+ */
+#define MLXSW_REG_TNDEM_ID 0xA013
+#define MLXSW_REG_TNDEM_LEN 0x0C
+
+MLXSW_REG_DEFINE(tndem, MLXSW_REG_TNDEM_ID, MLXSW_REG_TNDEM_LEN);
+
+/* reg_tndem_underlay_ecn
+ * ECN field of the IP header in the underlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tndem, underlay_ecn, 0x04, 24, 2);
+
+/* reg_tndem_overlay_ecn
+ * ECN field of the IP header in the overlay network.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tndem, overlay_ecn, 0x04, 16, 2);
+
+/* reg_tndem_eip_ecn
+ * Egress IP ECN. ECN field of the IP header of the packet which goes out
+ * from the decapsulation.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tndem, eip_ecn, 0x04, 8, 2);
+
+/* reg_tndem_trap_en
+ * Trap enable:
+ * 0 - No trap due to decap ECN
+ * 1 - Trap enable with trap_id
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tndem, trap_en, 0x08, 28, 4);
+
+/* reg_tndem_trap_id
+ * Trap ID. Either DECAP_ECN0 or DECAP_ECN1.
+ * Reserved when trap_en is '0'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tndem, trap_id, 0x08, 0, 9);
+
+static inline void mlxsw_reg_tndem_pack(char *payload, u8 underlay_ecn,
+ u8 overlay_ecn, u8 ecn, bool trap_en,
+ u16 trap_id)
+{
+ MLXSW_REG_ZERO(tndem, payload);
+ mlxsw_reg_tndem_underlay_ecn_set(payload, underlay_ecn);
+ mlxsw_reg_tndem_overlay_ecn_set(payload, overlay_ecn);
+ mlxsw_reg_tndem_eip_ecn_set(payload, ecn);
+ mlxsw_reg_tndem_trap_en_set(payload, trap_en);
+ mlxsw_reg_tndem_trap_id_set(payload, trap_id);
+}
+
+/* TNPC - Tunnel Port Configuration Register
+ * -----------------------------------------
+ * The TNPC register is used for tunnel port configuration.
+ * Reserved when Spectrum.
+ */
+#define MLXSW_REG_TNPC_ID 0xA020
+#define MLXSW_REG_TNPC_LEN 0x18
+
+MLXSW_REG_DEFINE(tnpc, MLXSW_REG_TNPC_ID, MLXSW_REG_TNPC_LEN);
+
+enum mlxsw_reg_tnpc_tunnel_port {
+ MLXSW_REG_TNPC_TUNNEL_PORT_NVE,
+ MLXSW_REG_TNPC_TUNNEL_PORT_VPLS,
+ MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL0,
+ MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL1,
+};
+
+/* reg_tnpc_tunnel_port
+ * Tunnel port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, tnpc, tunnel_port, 0x00, 0, 4);
+
+/* reg_tnpc_learn_enable_v6
+ * During IPv6 underlay decapsulation, whether to learn from tunnel port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnpc, learn_enable_v6, 0x04, 1, 1);
+
+/* reg_tnpc_learn_enable_v4
+ * During IPv4 underlay decapsulation, whether to learn from tunnel port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tnpc, learn_enable_v4, 0x04, 0, 1);
+
+static inline void mlxsw_reg_tnpc_pack(char *payload,
+ enum mlxsw_reg_tnpc_tunnel_port tport,
+ bool learn_enable)
+{
+ MLXSW_REG_ZERO(tnpc, payload);
+ mlxsw_reg_tnpc_tunnel_port_set(payload, tport);
+ mlxsw_reg_tnpc_learn_enable_v4_set(payload, learn_enable);
+ mlxsw_reg_tnpc_learn_enable_v6_set(payload, learn_enable);
+}
+
/* TIGCR - Tunneling IPinIP General Configuration Register
* -------------------------------------------------------
* The TIGCR register is used for setting up the IPinIP Tunnel configuration.
@@ -8336,8 +8907,15 @@ MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2);
*/
MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4);
+/* reg_sbpr_infi_size
+ * Size is infinite.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, infi_size, 0x04, 31, 1);
+
/* reg_sbpr_size
* Pool size in buffer cells.
+ * Reserved when infi_size = 1.
* Access: RW
*/
MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24);
@@ -8355,13 +8933,15 @@ MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
enum mlxsw_reg_sbxx_dir dir,
- enum mlxsw_reg_sbpr_mode mode, u32 size)
+ enum mlxsw_reg_sbpr_mode mode, u32 size,
+ bool infi_size)
{
MLXSW_REG_ZERO(sbpr, payload);
mlxsw_reg_sbpr_pool_set(payload, pool);
mlxsw_reg_sbpr_dir_set(payload, dir);
mlxsw_reg_sbpr_mode_set(payload, mode);
mlxsw_reg_sbpr_size_set(payload, size);
+ mlxsw_reg_sbpr_infi_size_set(payload, infi_size);
}
/* SBCM - Shared Buffer Class Management Register
@@ -8409,6 +8989,12 @@ MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1
#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14
+/* reg_sbcm_infi_max
+ * Max buffer is infinite.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, infi_max, 0x1C, 31, 1);
+
/* reg_sbcm_max_buff
* When the pool associated to the port-pg/tclass is configured to
* static, Maximum buffer size for the limiter configured in cells.
@@ -8418,6 +9004,7 @@ MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
* 0: 0
* i: (1/128)*2^(i-1), for i=1..14
* 0xFF: Infinity
+ * Reserved when infi_max = 1.
* Access: RW
*/
MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
@@ -8430,7 +9017,8 @@ MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
enum mlxsw_reg_sbxx_dir dir,
- u32 min_buff, u32 max_buff, u8 pool)
+ u32 min_buff, u32 max_buff,
+ bool infi_max, u8 pool)
{
MLXSW_REG_ZERO(sbcm, payload);
mlxsw_reg_sbcm_local_port_set(payload, local_port);
@@ -8438,6 +9026,7 @@ static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
mlxsw_reg_sbcm_dir_set(payload, dir);
mlxsw_reg_sbcm_min_buff_set(payload, min_buff);
mlxsw_reg_sbcm_max_buff_set(payload, max_buff);
+ mlxsw_reg_sbcm_infi_max_set(payload, infi_max);
mlxsw_reg_sbcm_pool_set(payload, pool);
}
@@ -8810,6 +9399,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mcc),
MLXSW_REG(mcda),
MLXSW_REG(mgpc),
+ MLXSW_REG(mprs),
+ MLXSW_REG(tngcr),
+ MLXSW_REG(tnumt),
+ MLXSW_REG(tnqcr),
+ MLXSW_REG(tnqdr),
+ MLXSW_REG(tneem),
+ MLXSW_REG(tndem),
+ MLXSW_REG(tnpc),
MLXSW_REG(tigcr),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 79a31de7c825..99b341539870 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -46,6 +46,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_RIFS,
MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES,
MLXSW_RES_ID_MAX_LPM_TREES,
+ MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4,
+ MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6,
/* Internal resources.
* Determined by the SW, not queried from the HW.
@@ -96,6 +98,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
[MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10,
[MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30,
+ [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4] = 0x2E02,
+ [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6] = 0x2E03,
};
struct mlxsw_res {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 930700413b1d..8a4983adae94 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -21,6 +21,7 @@
#include <linux/dcbnl.h>
#include <linux/inetdevice.h>
#include <linux/netlink.h>
+#include <linux/random.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
@@ -44,8 +45,8 @@
#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 1702
-#define MLXSW_SP1_FWREV_SUBMINOR 6
+#define MLXSW_SP1_FWREV_MINOR 1703
+#define MLXSW_SP1_FWREV_SUBMINOR 4
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -331,7 +332,10 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
return -EINVAL;
}
if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) ==
- MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor))
+ MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) &&
+ (rev->minor > req_rev->minor ||
+ (rev->minor == req_rev->minor &&
+ rev->subminor >= req_rev->subminor)))
return 0;
dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n",
@@ -2804,6 +2808,13 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
MLXSW_REG_QEEC_MAS_DIS);
if (err)
return err;
+
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+ i + 8, i,
+ MLXSW_REG_QEEC_MAS_DIS);
+ if (err)
+ return err;
}
/* Map all priorities to traffic class 0. */
@@ -2983,6 +2994,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_qdiscs_init;
}
+ err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_nve_init;
+ }
+
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
@@ -3011,6 +3029,8 @@ err_register_netdev:
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
err_port_vlan_get:
+ mlxsw_sp_port_nve_fini(mlxsw_sp_port);
+err_port_nve_init:
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
err_port_qdiscs_init:
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
@@ -3050,6 +3070,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_port_nve_fini(mlxsw_sp_port);
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
@@ -3459,6 +3480,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false),
/* PKT Sample trap */
@@ -3472,6 +3494,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+ /* NVE traps */
+ MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
@@ -3656,8 +3680,10 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
+ u32 seed;
int err;
+ get_random_bytes(&seed, sizeof(seed));
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC |
MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
@@ -3666,7 +3692,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
MLXSW_REG_SLCR_LAG_HASH_DIP |
MLXSW_REG_SLCR_LAG_HASH_SPORT |
MLXSW_REG_SLCR_LAG_HASH_DPORT |
- MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
+ MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
if (err)
return err;
@@ -3779,6 +3805,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_afa_init;
}
+ err = mlxsw_sp_nve_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
+ goto err_nve_init;
+ }
+
err = mlxsw_sp_router_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
@@ -3825,6 +3857,8 @@ err_acl_init:
err_netdev_notifier:
mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ mlxsw_sp_nve_fini(mlxsw_sp);
+err_nve_init:
mlxsw_sp_afa_fini(mlxsw_sp);
err_afa_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -3857,6 +3891,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -3871,6 +3906,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -3884,6 +3920,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_acl_fini(mlxsw_sp);
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_nve_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
@@ -4550,6 +4587,41 @@ static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
}
+static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
+{
+ unsigned int num_vxlans = 0;
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev))
+ num_vxlans++;
+ }
+
+ return num_vxlans > 1;
+}
+
+static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
+{
+ if (br_multicast_enabled(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
+ return false;
+ }
+
+ if (br_vlan_enabled(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN filtering can not be enabled on a bridge with a VxLAN device");
+ return false;
+ }
+
+ if (mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
+ return false;
+ }
+
+ return true;
+}
+
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
struct net_device *dev,
unsigned long event, void *ptr)
@@ -4579,6 +4651,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
}
if (!info->linking)
break;
+ if (netif_is_bridge_master(upper_dev) &&
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
+ mlxsw_sp_bridge_has_vxlan(upper_dev) &&
+ !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
+ return -EOPNOTSUPP;
if (netdev_has_any_upper_dev(upper_dev) &&
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
@@ -4736,6 +4813,11 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
}
if (!info->linking)
break;
+ if (netif_is_bridge_master(upper_dev) &&
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
+ mlxsw_sp_bridge_has_vxlan(upper_dev) &&
+ !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
+ return -EOPNOTSUPP;
if (netdev_has_any_upper_dev(upper_dev) &&
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
@@ -4845,6 +4927,8 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
upper_dev = info->upper_dev;
if (info->linking)
break;
+ if (is_vlan_dev(upper_dev))
+ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
if (netif_is_macvlan(upper_dev))
mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
break;
@@ -4880,6 +4964,63 @@ static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
return netif_is_l3_master(info->upper_dev);
}
+static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *cu_info;
+ struct netdev_notifier_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ extack = netdev_notifier_info_to_extack(info);
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ cu_info = container_of(info,
+ struct netdev_notifier_changeupper_info,
+ info);
+ upper_dev = cu_info->upper_dev;
+ if (!netif_is_bridge_master(upper_dev))
+ return 0;
+ if (!mlxsw_sp_lower_get(upper_dev))
+ return 0;
+ if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
+ return -EOPNOTSUPP;
+ if (cu_info->linking) {
+ if (!netif_running(dev))
+ return 0;
+ return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
+ dev, extack);
+ } else {
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, upper_dev, dev);
+ }
+ break;
+ case NETDEV_PRE_UP:
+ upper_dev = netdev_master_upper_dev_get(dev);
+ if (!upper_dev)
+ return 0;
+ if (!netif_is_bridge_master(upper_dev))
+ return 0;
+ if (!mlxsw_sp_lower_get(upper_dev))
+ return 0;
+ return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev,
+ extack);
+ case NETDEV_DOWN:
+ upper_dev = netdev_master_upper_dev_get(dev);
+ if (!upper_dev)
+ return 0;
+ if (!netif_is_bridge_master(upper_dev))
+ return 0;
+ if (!mlxsw_sp_lower_get(upper_dev))
+ return 0;
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, upper_dev, dev);
+ break;
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -4896,6 +5037,8 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
}
mlxsw_sp_span_respin(mlxsw_sp);
+ if (netif_is_vxlan(dev))
+ err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
event, ptr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3cdb7aca90b7..0875a79cbe7b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -16,6 +16,7 @@
#include <net/psample.h>
#include <net/pkt_cls.h>
#include <net/red.h>
+#include <net/vxlan.h>
#include "port.h"
#include "core.h"
@@ -55,6 +56,8 @@ enum mlxsw_sp_resource_id {
struct mlxsw_sp_port;
struct mlxsw_sp_rif;
struct mlxsw_sp_span_entry;
+enum mlxsw_sp_l3proto;
+union mlxsw_sp_l3addr;
struct mlxsw_sp_upper {
struct net_device *dev;
@@ -113,9 +116,11 @@ struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
struct mlxsw_sp_fid_core;
struct mlxsw_sp_kvdl;
+struct mlxsw_sp_nve;
struct mlxsw_sp_kvdl_ops;
struct mlxsw_sp_mr_tcam_ops;
struct mlxsw_sp_acl_tcam_ops;
+struct mlxsw_sp_nve_ops;
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
@@ -132,6 +137,7 @@ struct mlxsw_sp {
struct mlxsw_sp_acl *acl;
struct mlxsw_sp_fid_core *fid_core;
struct mlxsw_sp_kvdl *kvdl;
+ struct mlxsw_sp_nve *nve;
struct notifier_block netdevice_nb;
struct mlxsw_sp_counter_pool *counter_pool;
@@ -146,6 +152,7 @@ struct mlxsw_sp {
const struct mlxsw_afk_ops *afk_ops;
const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
+ const struct mlxsw_sp_nve_ops **nve_ops_arr;
};
static inline struct mlxsw_sp_upper *
@@ -235,6 +242,25 @@ struct mlxsw_sp_port {
struct mlxsw_sp_acl_block *eg_acl_block;
};
+static inline struct net_device *
+mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static inline bool mlxsw_sp_bridge_has_vxlan(struct net_device *br_dev)
+{
+ return !!mlxsw_sp_bridge_vxlan_dev_find(br_dev);
+}
+
static inline bool
mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -330,6 +356,13 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *br_dev);
bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev);
+int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct net_device *vxlan_dev,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct net_device *vxlan_dev);
/* spectrum.c */
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -383,6 +416,17 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
#endif
/* spectrum_router.c */
+enum mlxsw_sp_l3proto {
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_L3_PROTO_IPV6,
+#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1)
+};
+
+union mlxsw_sp_l3addr {
+ __be32 addr4;
+ struct in6_addr addr6;
+};
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
@@ -416,6 +460,19 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
+struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
+struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
+int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip,
+ u32 tunnel_index);
+void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip);
+int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ u16 *vr_id);
/* spectrum_kvdl.c */
enum mlxsw_sp_kvdl_entry_type {
@@ -423,6 +480,7 @@ enum mlxsw_sp_kvdl_entry_type {
MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT,
};
static inline unsigned int
@@ -433,6 +491,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */
case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */
case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */
+ case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: /* fall through */
default:
return 1;
}
@@ -662,6 +721,16 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
+ __be32 vni);
+int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni);
+int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
+ u32 nve_flood_index);
+void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid);
+bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid);
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni);
+void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid);
+bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid);
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type, u8 local_port,
bool member);
@@ -680,6 +749,8 @@ u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid);
struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
int br_ifindex);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex);
struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp);
@@ -725,4 +796,39 @@ extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
/* spectrum2_mr_tcam.c */
extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops;
+/* spectrum_nve.c */
+enum mlxsw_sp_nve_type {
+ MLXSW_SP_NVE_TYPE_VXLAN,
+};
+
+struct mlxsw_sp_nve_params {
+ enum mlxsw_sp_nve_type type;
+ __be32 vni;
+ const struct net_device *dev;
+};
+
+extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[];
+extern const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[];
+
+int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr);
+void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr);
+u32 mlxsw_sp_nve_decap_tunnel_index_get(const struct mlxsw_sp *mlxsw_sp);
+bool mlxsw_sp_nve_ipv4_route_is_decap(const struct mlxsw_sp *mlxsw_sp,
+ u32 tb_id, __be32 addr);
+int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_nve_params *params,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid);
+int mlxsw_sp_port_nve_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 68c8b148bef2..8d14770766b4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
MAX_KVD_ACTION_SETS),
MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE),
};
#define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 3589432d1643..12c61e0cc570 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -25,28 +25,52 @@ struct mlxsw_cp_sb_occ {
struct mlxsw_sp_sb_cm {
u32 min_buff;
u32 max_buff;
- u8 pool;
+ u16 pool_index;
struct mlxsw_cp_sb_occ occ;
};
+#define MLXSW_SP_SB_INFI -1U
+
struct mlxsw_sp_sb_pm {
u32 min_buff;
u32 max_buff;
struct mlxsw_cp_sb_occ occ;
};
-#define MLXSW_SP_SB_POOL_COUNT 4
-#define MLXSW_SP_SB_TC_COUNT 8
+struct mlxsw_sp_sb_pool_des {
+ enum mlxsw_reg_sbxx_dir dir;
+ u8 pool;
+};
+
+/* Order ingress pools before egress pools. */
+static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = {
+ {MLXSW_REG_SBXX_DIR_INGRESS, 0},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 1},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 2},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 3},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 0},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 1},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 2},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 3},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 15},
+};
+
+#define MLXSW_SP_SB_POOL_DESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pool_dess)
+
+#define MLXSW_SP_SB_ING_TC_COUNT 8
+#define MLXSW_SP_SB_EG_TC_COUNT 16
struct mlxsw_sp_sb_port {
- struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
- struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+ struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
+ struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
+ struct mlxsw_sp_sb_pm pms[MLXSW_SP_SB_POOL_DESS_LEN];
};
struct mlxsw_sp_sb {
- struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
+ struct mlxsw_sp_sb_pr prs[MLXSW_SP_SB_POOL_DESS_LEN];
struct mlxsw_sp_sb_port *ports;
u32 cell_size;
+ u64 sb_size;
};
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
@@ -60,95 +84,122 @@ u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
}
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
- u8 pool,
- enum mlxsw_reg_sbxx_dir dir)
+ u16 pool_index)
{
- return &mlxsw_sp->sb->prs[dir][pool];
+ return &mlxsw_sp->sb->prs[pool_index];
+}
+
+static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
+{
+ if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
+ else
+ return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
}
static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 pg_buff,
enum mlxsw_reg_sbxx_dir dir)
{
- return &mlxsw_sp->sb->ports[local_port].cms[dir][pg_buff];
+ struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
+
+ WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
+ if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ return &sb_port->ing_cms[pg_buff];
+ else
+ return &sb_port->eg_cms[pg_buff];
}
static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 pool,
- enum mlxsw_reg_sbxx_dir dir)
+ u8 local_port, u16 pool_index)
{
- return &mlxsw_sp->sb->ports[local_port].pms[dir][pool];
+ return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
}
-static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
- enum mlxsw_reg_sbxx_dir dir,
- enum mlxsw_reg_sbpr_mode mode, u32 size)
+static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
+ enum mlxsw_reg_sbpr_mode mode,
+ u32 size, bool infi_size)
{
+ const struct mlxsw_sp_sb_pool_des *des =
+ &mlxsw_sp_sb_pool_dess[pool_index];
char sbpr_pl[MLXSW_REG_SBPR_LEN];
struct mlxsw_sp_sb_pr *pr;
int err;
- mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
+ mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
+ size, infi_size);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
if (err)
return err;
- pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+ if (infi_size)
+ size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
+ pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
pr->mode = mode;
pr->size = size;
return 0;
}
static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
- u32 min_buff, u32 max_buff, u8 pool)
+ u8 pg_buff, u32 min_buff, u32 max_buff,
+ bool infi_max, u16 pool_index)
{
+ const struct mlxsw_sp_sb_pool_des *des =
+ &mlxsw_sp_sb_pool_dess[pool_index];
char sbcm_pl[MLXSW_REG_SBCM_LEN];
+ struct mlxsw_sp_sb_cm *cm;
int err;
- mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
- min_buff, max_buff, pool);
+ mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
+ min_buff, max_buff, infi_max, des->pool);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
if (err)
return err;
- if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
- struct mlxsw_sp_sb_cm *cm;
- cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
+ if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
+ if (infi_max)
+ max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
+ mlxsw_sp->sb->sb_size);
+
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
+ des->dir);
cm->min_buff = min_buff;
cm->max_buff = max_buff;
- cm->pool = pool;
+ cm->pool_index = pool_index;
}
return 0;
}
static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u8 pool, enum mlxsw_reg_sbxx_dir dir,
- u32 min_buff, u32 max_buff)
+ u16 pool_index, u32 min_buff, u32 max_buff)
{
+ const struct mlxsw_sp_sb_pool_des *des =
+ &mlxsw_sp_sb_pool_dess[pool_index];
char sbpm_pl[MLXSW_REG_SBPM_LEN];
struct mlxsw_sp_sb_pm *pm;
int err;
- mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
min_buff, max_buff);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
if (err)
return err;
- pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
+ pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
pm->min_buff = min_buff;
pm->max_buff = max_buff;
return 0;
}
static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u8 pool, enum mlxsw_reg_sbxx_dir dir,
- struct list_head *bulk_list)
+ u16 pool_index, struct list_head *bulk_list)
{
+ const struct mlxsw_sp_sb_pool_des *des =
+ &mlxsw_sp_sb_pool_dess[pool_index];
char sbpm_pl[MLXSW_REG_SBPM_LEN];
- mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
+ true, 0, 0);
return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
bulk_list, NULL, 0);
}
@@ -163,14 +214,16 @@ static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
}
static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u8 pool, enum mlxsw_reg_sbxx_dir dir,
- struct list_head *bulk_list)
+ u16 pool_index, struct list_head *bulk_list)
{
+ const struct mlxsw_sp_sb_pool_des *des =
+ &mlxsw_sp_sb_pool_dess[pool_index];
char sbpm_pl[MLXSW_REG_SBPM_LEN];
struct mlxsw_sp_sb_pm *pm;
- pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
- mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
+ pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
+ false, 0, 0);
return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
bulk_list,
mlxsw_sp_sb_pm_occ_query_cb,
@@ -254,63 +307,54 @@ static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
.size = _size, \
}
-static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
+static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs[] = {
+ /* Ingress pools. */
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
MLXSW_SP_SB_PR_INGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
-};
-
-#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
-
-static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
+ /* Egress pools. */
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
};
-#define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
+#define MLXSW_SP_SB_PRS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs)
-static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_reg_sbxx_dir dir,
- const struct mlxsw_sp_sb_pr *prs,
- size_t prs_len)
+static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_sb_pr *prs,
+ size_t prs_len)
{
int i;
int err;
for (i = 0; i < prs_len; i++) {
- u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size);
-
- err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size);
+ u32 size = prs[i].size;
+ u32 size_cells;
+
+ if (size == MLXSW_SP_SB_INFI) {
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
+ 0, true);
+ } else {
+ size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
+ size_cells, false);
+ }
if (err)
return err;
}
return 0;
}
-static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
-{
- int err;
-
- err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
- mlxsw_sp_sb_prs_ingress,
- MLXSW_SP_SB_PRS_INGRESS_LEN);
- if (err)
- return err;
- return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
- mlxsw_sp_sb_prs_egress,
- MLXSW_SP_SB_PRS_EGRESS_LEN);
-}
-
#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
{ \
.min_buff = _min_buff, \
.max_buff = _max_buff, \
- .pool = _pool, \
+ .pool_index = _pool, \
}
static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
@@ -329,38 +373,38 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
- MLXSW_SP_SB_CM(1500, 9, 0),
- MLXSW_SP_SB_CM(1500, 9, 0),
- MLXSW_SP_SB_CM(1500, 9, 0),
- MLXSW_SP_SB_CM(1500, 9, 0),
- MLXSW_SP_SB_CM(1500, 9, 0),
- MLXSW_SP_SB_CM(1500, 9, 0),
- MLXSW_SP_SB_CM(1500, 9, 0),
- MLXSW_SP_SB_CM(1500, 9, 0),
- MLXSW_SP_SB_CM(0, 140000, 15),
- MLXSW_SP_SB_CM(0, 140000, 15),
- MLXSW_SP_SB_CM(0, 140000, 15),
- MLXSW_SP_SB_CM(0, 140000, 15),
- MLXSW_SP_SB_CM(0, 140000, 15),
- MLXSW_SP_SB_CM(0, 140000, 15),
- MLXSW_SP_SB_CM(0, 140000, 15),
- MLXSW_SP_SB_CM(0, 140000, 15),
- MLXSW_SP_SB_CM(1, 0xff, 0),
+ MLXSW_SP_SB_CM(1500, 9, 4),
+ MLXSW_SP_SB_CM(1500, 9, 4),
+ MLXSW_SP_SB_CM(1500, 9, 4),
+ MLXSW_SP_SB_CM(1500, 9, 4),
+ MLXSW_SP_SB_CM(1500, 9, 4),
+ MLXSW_SP_SB_CM(1500, 9, 4),
+ MLXSW_SP_SB_CM(1500, 9, 4),
+ MLXSW_SP_SB_CM(1500, 9, 4),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(1, 0xff, 4),
};
#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
-#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
+#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4)
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
@@ -390,6 +434,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
+static bool
+mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
+{
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
+
+ return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
+}
+
static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
enum mlxsw_reg_sbxx_dir dir,
const struct mlxsw_sp_sb_cm *cms,
@@ -401,16 +453,29 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
for (i = 0; i < cms_len; i++) {
const struct mlxsw_sp_sb_cm *cm;
u32 min_buff;
+ u32 max_buff;
if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
continue; /* PG number 8 does not exist, skip it */
cm = &cms[i];
- /* All pools are initialized using dynamic thresholds,
- * therefore 'max_buff' isn't specified in cells.
- */
+ if (WARN_ON(mlxsw_sp_sb_pool_dess[cm->pool_index].dir != dir))
+ continue;
+
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
- err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
- min_buff, cm->max_buff, cm->pool);
+ max_buff = cm->max_buff;
+ if (max_buff == MLXSW_SP_SB_INFI) {
+ err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
+ min_buff, 0,
+ true, cm->pool_index);
+ } else {
+ if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
+ cm->pool_index))
+ max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
+ max_buff);
+ err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
+ min_buff, max_buff,
+ false, cm->pool_index);
+ }
if (err)
return err;
}
@@ -448,91 +513,74 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
.max_buff = _max_buff, \
}
-static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
+ /* Ingress pools. */
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
-};
-
-#define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
-
-static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
+ /* Egress pools. */
MLXSW_SP_SB_PM(0, 7),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(10000, 90000),
};
-#define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
+#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
-static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- enum mlxsw_reg_sbxx_dir dir,
- const struct mlxsw_sp_sb_pm *pms,
- size_t pms_len)
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
int i;
int err;
- for (i = 0; i < pms_len; i++) {
- const struct mlxsw_sp_sb_pm *pm;
+ for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
+ const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp_sb_pms[i];
+ u32 max_buff;
+ u32 min_buff;
- pm = &pms[i];
- err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
- pm->min_buff, pm->max_buff);
+ min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
+ max_buff = pm->max_buff;
+ if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
+ max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
+ err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port,
+ i, min_buff, max_buff);
if (err)
return err;
}
return 0;
}
-static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- int err;
-
- err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port,
- MLXSW_REG_SBXX_DIR_INGRESS,
- mlxsw_sp_sb_pms_ingress,
- MLXSW_SP_SB_PMS_INGRESS_LEN);
- if (err)
- return err;
- return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port,
- MLXSW_REG_SBXX_DIR_EGRESS,
- mlxsw_sp_sb_pms_egress,
- MLXSW_SP_SB_PMS_EGRESS_LEN);
-}
-
struct mlxsw_sp_sb_mm {
u32 min_buff;
u32 max_buff;
- u8 pool;
+ u16 pool_index;
};
#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
{ \
.min_buff = _min_buff, \
.max_buff = _max_buff, \
- .pool = _pool, \
+ .pool_index = _pool, \
}
static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
- MLXSW_SP_SB_MM(20000, 0xff, 0),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6, 4),
};
#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
@@ -544,16 +592,18 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
int err;
for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
+ const struct mlxsw_sp_sb_pool_des *des;
const struct mlxsw_sp_sb_mm *mc;
u32 min_buff;
mc = &mlxsw_sp_sb_mms[i];
- /* All pools are initialized using dynamic thresholds,
- * therefore 'max_buff' isn't specified in cells.
+ des = &mlxsw_sp_sb_pool_dess[mc->pool_index];
+ /* All pools used by sb_mm's are initialized using dynamic
+ * thresholds, therefore 'max_buff' isn't specified in cells.
*/
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
- mc->pool);
+ des->pool);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
if (err)
return err;
@@ -561,9 +611,24 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+static void mlxsw_sp_pool_count(u16 *p_ingress_len, u16 *p_egress_len)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; ++i)
+ if (mlxsw_sp_sb_pool_dess[i].dir == MLXSW_REG_SBXX_DIR_EGRESS)
+ goto out;
+ WARN(1, "No egress pools\n");
+
+out:
+ *p_ingress_len = i;
+ *p_egress_len = MLXSW_SP_SB_POOL_DESS_LEN - i;
+}
+
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
- u64 sb_size;
+ u16 ing_pool_count;
+ u16 eg_pool_count;
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
@@ -571,17 +636,19 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
return -EIO;
- sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
if (!mlxsw_sp->sb)
return -ENOMEM;
mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
+ mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_BUFFER_SIZE);
err = mlxsw_sp_sb_ports_init(mlxsw_sp);
if (err)
goto err_sb_ports_init;
- err = mlxsw_sp_sb_prs_init(mlxsw_sp);
+ err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp_sb_prs,
+ MLXSW_SP_SB_PRS_LEN);
if (err)
goto err_sb_prs_init;
err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
@@ -590,11 +657,13 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
err = mlxsw_sp_sb_mms_init(mlxsw_sp);
if (err)
goto err_sb_mms_init;
- err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size,
- MLXSW_SP_SB_POOL_COUNT,
- MLXSW_SP_SB_POOL_COUNT,
- MLXSW_SP_SB_TC_COUNT,
- MLXSW_SP_SB_TC_COUNT);
+ mlxsw_sp_pool_count(&ing_pool_count, &eg_pool_count);
+ err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
+ mlxsw_sp->sb->sb_size,
+ ing_pool_count,
+ eg_pool_count,
+ MLXSW_SP_SB_ING_TC_COUNT,
+ MLXSW_SP_SB_EG_TC_COUNT);
if (err)
goto err_devlink_sb_register;
@@ -632,36 +701,15 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
return err;
}
-static u8 pool_get(u16 pool_index)
-{
- return pool_index % MLXSW_SP_SB_POOL_COUNT;
-}
-
-static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
-{
- u16 pool_index;
-
- pool_index = pool;
- if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
- pool_index += MLXSW_SP_SB_POOL_COUNT;
- return pool_index;
-}
-
-static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
-{
- return pool_index < MLXSW_SP_SB_POOL_COUNT ?
- MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
-}
-
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
+ enum mlxsw_reg_sbxx_dir dir = mlxsw_sp_sb_pool_dess[pool_index].dir;
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u8 pool = pool_get(pool_index);
- enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
- struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+ struct mlxsw_sp_sb_pr *pr;
+ pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
pool_info->pool_type = (enum devlink_sb_pool_type) dir;
pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
@@ -674,34 +722,32 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
- u8 pool = pool_get(pool_index);
- enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
enum mlxsw_reg_sbpr_mode mode;
if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
return -EINVAL;
mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
- return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
+ return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
+ pool_size, false);
}
#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
-static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
- enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
+static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
+ u32 max_buff)
{
- struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
}
-static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
- enum mlxsw_reg_sbxx_dir dir, u32 threshold,
- u32 *p_max_buff)
+static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
+ u32 threshold, u32 *p_max_buff)
{
- struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
int val;
@@ -725,12 +771,10 @@ int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
- u8 pool = pool_get(pool_index);
- enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
- pool, dir);
+ pool_index);
- *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
+ *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
pm->max_buff);
return 0;
}
@@ -743,17 +787,15 @@ int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
- u8 pool = pool_get(pool_index);
- enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
u32 max_buff;
int err;
- err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+ err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
threshold, &max_buff);
if (err)
return err;
- return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
+ return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
0, max_buff);
}
@@ -771,9 +813,9 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
- *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
+ *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
cm->max_buff);
- *p_pool_index = pool_index_get(cm->pool, dir);
+ *p_pool_index = cm->pool_index;
return 0;
}
@@ -788,24 +830,24 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
- u8 pool = pool_get(pool_index);
u32 max_buff;
int err;
- if (dir != dir_get(pool_index))
+ if (dir != mlxsw_sp_sb_pool_dess[pool_index].dir)
return -EINVAL;
- err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+ err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
threshold, &max_buff);
if (err)
return err;
- return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
- 0, max_buff, pool);
+ return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
+ 0, max_buff, false, pool_index);
}
#define MASKED_COUNT_MAX \
- (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
+ (MLXSW_REG_SBSR_REC_MAX_COUNT / \
+ (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
u8 masked_count;
@@ -831,7 +873,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
- for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
MLXSW_REG_SBXX_DIR_INGRESS);
mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
@@ -845,7 +887,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
- for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
MLXSW_REG_SBXX_DIR_EGRESS);
mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
@@ -880,23 +922,17 @@ next_batch:
local_port_1 = local_port;
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, false);
- for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+ for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
- }
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
- for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
- err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
- MLXSW_REG_SBXX_DIR_INGRESS,
- &bulk_list);
- if (err)
- goto out;
+ for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) {
err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
- MLXSW_REG_SBXX_DIR_EGRESS,
&bulk_list);
if (err)
goto out;
@@ -945,23 +981,17 @@ next_batch:
local_port++;
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, true);
- for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+ for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
- }
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
- for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
- err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
- MLXSW_REG_SBXX_DIR_INGRESS,
- &bulk_list);
- if (err)
- goto out;
+ for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) {
err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
- MLXSW_REG_SBXX_DIR_EGRESS,
&bulk_list);
if (err)
goto out;
@@ -994,10 +1024,8 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
- u8 pool = pool_get(pool_index);
- enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
- pool, dir);
+ pool_index);
*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 715d24ff937e..a3db033d7399 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -6,6 +6,7 @@
#include <linux/if_vlan.h>
#include <linux/if_bridge.h>
#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
#include <linux/rtnetlink.h>
#include "spectrum.h"
@@ -14,6 +15,7 @@
struct mlxsw_sp_fid_family;
struct mlxsw_sp_fid_core {
+ struct rhashtable vni_ht;
struct mlxsw_sp_fid_family *fid_family_arr[MLXSW_SP_FID_TYPE_MAX];
unsigned int *port_fid_mappings;
};
@@ -24,6 +26,12 @@ struct mlxsw_sp_fid {
unsigned int ref_count;
u16 fid_index;
struct mlxsw_sp_fid_family *fid_family;
+
+ struct rhash_head vni_ht_node;
+ __be32 vni;
+ u32 nve_flood_index;
+ u8 vni_valid:1,
+ nve_flood_index_valid:1;
};
struct mlxsw_sp_fid_8021q {
@@ -36,6 +44,12 @@ struct mlxsw_sp_fid_8021d {
int br_ifindex;
};
+static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
+ .key_len = sizeof_field(struct mlxsw_sp_fid, vni),
+ .key_offset = offsetof(struct mlxsw_sp_fid, vni),
+ .head_offset = offsetof(struct mlxsw_sp_fid, vni_ht_node),
+};
+
struct mlxsw_sp_flood_table {
enum mlxsw_sp_flood_type packet_type;
enum mlxsw_reg_sfgc_bridge_type bridge_type;
@@ -56,6 +70,11 @@ struct mlxsw_sp_fid_ops {
struct mlxsw_sp_port *port, u16 vid);
void (*port_vid_unmap)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
+ int (*vni_set)(struct mlxsw_sp_fid *fid, __be32 vni);
+ void (*vni_clear)(struct mlxsw_sp_fid *fid);
+ int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid,
+ u32 nve_flood_index);
+ void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid);
};
struct mlxsw_sp_fid_family {
@@ -94,6 +113,117 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = {
[MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
};
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
+ __be32 vni)
+{
+ struct mlxsw_sp_fid *fid;
+
+ fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->vni_ht, &vni,
+ mlxsw_sp_fid_vni_ht_params);
+ if (fid)
+ fid->ref_count++;
+
+ return fid;
+}
+
+int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni)
+{
+ if (!fid->vni_valid)
+ return -EINVAL;
+
+ *vni = fid->vni;
+
+ return 0;
+}
+
+int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
+ u32 nve_flood_index)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+ int err;
+
+ if (WARN_ON(!ops->nve_flood_index_set || fid->nve_flood_index_valid))
+ return -EINVAL;
+
+ err = ops->nve_flood_index_set(fid, nve_flood_index);
+ if (err)
+ return err;
+
+ fid->nve_flood_index = nve_flood_index;
+ fid->nve_flood_index_valid = true;
+
+ return 0;
+}
+
+void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+
+ if (WARN_ON(!ops->nve_flood_index_clear || !fid->nve_flood_index_valid))
+ return;
+
+ fid->nve_flood_index_valid = false;
+ ops->nve_flood_index_clear(fid);
+}
+
+bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid)
+{
+ return fid->nve_flood_index_valid;
+}
+
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ int err;
+
+ if (WARN_ON(!ops->vni_set || fid->vni_valid))
+ return -EINVAL;
+
+ fid->vni = vni;
+ err = rhashtable_lookup_insert_fast(&mlxsw_sp->fid_core->vni_ht,
+ &fid->vni_ht_node,
+ mlxsw_sp_fid_vni_ht_params);
+ if (err)
+ return err;
+
+ err = ops->vni_set(fid, vni);
+ if (err)
+ goto err_vni_set;
+
+ fid->vni_valid = true;
+
+ return 0;
+
+err_vni_set:
+ rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node,
+ mlxsw_sp_fid_vni_ht_params);
+ return err;
+}
+
+void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+
+ if (WARN_ON(!ops->vni_clear || !fid->vni_valid))
+ return;
+
+ fid->vni_valid = false;
+ ops->vni_clear(fid);
+ rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node,
+ mlxsw_sp_fid_vni_ht_params);
+}
+
+bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid)
+{
+ return fid->vni_valid;
+}
+
static const struct mlxsw_sp_flood_table *
mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type)
@@ -217,6 +347,21 @@ static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
+static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+ __be32 vni, bool vni_valid, u32 nve_flood_index,
+ bool nve_flood_index_valid)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid_index,
+ 0);
+ mlxsw_reg_sfmr_vv_set(sfmr_pl, vni_valid);
+ mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(vni));
+ mlxsw_reg_sfmr_vtfp_set(sfmr_pl, nve_flood_index_valid);
+ mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, nve_flood_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
static int mlxsw_sp_fid_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
u16 vid, bool valid)
{
@@ -393,6 +538,8 @@ static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
static void mlxsw_sp_fid_8021d_deconfigure(struct mlxsw_sp_fid *fid)
{
+ if (fid->vni_valid)
+ mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
}
@@ -531,6 +678,41 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
mlxsw_sp_port->local_port, vid, false);
}
+static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, vni,
+ true, fid->nve_flood_index,
+ fid->nve_flood_index_valid);
+}
+
+static void mlxsw_sp_fid_8021d_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, 0, false,
+ fid->nve_flood_index, fid->nve_flood_index_valid);
+}
+
+static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid,
+ u32 nve_flood_index)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index,
+ fid->vni, fid->vni_valid, nve_flood_index,
+ true);
+}
+
+static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+
+ mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, fid->vni,
+ fid->vni_valid, 0, false);
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.setup = mlxsw_sp_fid_8021d_setup,
.configure = mlxsw_sp_fid_8021d_configure,
@@ -540,6 +722,10 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.flood_index = mlxsw_sp_fid_8021d_flood_index,
.port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
};
static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
@@ -708,14 +894,12 @@ static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
};
-static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_fid_type type,
- const void *arg)
+static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type,
+ const void *arg)
{
struct mlxsw_sp_fid_family *fid_family;
struct mlxsw_sp_fid *fid;
- u16 fid_index;
- int err;
fid_family = mlxsw_sp->fid_core->fid_family_arr[type];
list_for_each_entry(fid, &fid_family->fids_list, list) {
@@ -725,6 +909,23 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
return fid;
}
+ return NULL;
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_type type,
+ const void *arg)
+{
+ struct mlxsw_sp_fid_family *fid_family;
+ struct mlxsw_sp_fid *fid;
+ u16 fid_index;
+ int err;
+
+ fid = mlxsw_sp_fid_lookup(mlxsw_sp, type, arg);
+ if (fid)
+ return fid;
+
+ fid_family = mlxsw_sp->fid_core->fid_family_arr[type];
fid = kzalloc(fid_family->fid_size, GFP_KERNEL);
if (!fid)
return ERR_PTR(-ENOMEM);
@@ -784,6 +985,13 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D, &br_ifindex);
}
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp,
+ int br_ifindex)
+{
+ return mlxsw_sp_fid_lookup(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D,
+ &br_ifindex);
+}
+
struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
u16 rif_index)
{
@@ -918,6 +1126,10 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
mlxsw_sp->fid_core = fid_core;
+ err = rhashtable_init(&fid_core->vni_ht, &mlxsw_sp_fid_vni_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
fid_core->port_fid_mappings = kcalloc(max_ports, sizeof(unsigned int),
GFP_KERNEL);
if (!fid_core->port_fid_mappings) {
@@ -944,6 +1156,8 @@ err_fid_ops_register:
}
kfree(fid_core->port_fid_mappings);
err_alloc_port_fid_mappings:
+ rhashtable_destroy(&fid_core->vni_ht);
+err_rhashtable_init:
kfree(fid_core);
return err;
}
@@ -957,5 +1171,6 @@ void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_fid_family_unregister(mlxsw_sp,
fid_core->fid_family_arr[i]);
kfree(fid_core->port_fid_mappings);
+ rhashtable_destroy(&fid_core->vni_ht);
kfree(fid_core);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
new file mode 100644
index 000000000000..ad06d9969bc1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -0,0 +1,982 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <net/inet_ecn.h>
+#include <net/ipv6.h>
+
+#include "reg.h"
+#include "spectrum.h"
+#include "spectrum_nve.h"
+
+const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[] = {
+ [MLXSW_SP_NVE_TYPE_VXLAN] = &mlxsw_sp1_nve_vxlan_ops,
+};
+
+const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[] = {
+ [MLXSW_SP_NVE_TYPE_VXLAN] = &mlxsw_sp2_nve_vxlan_ops,
+};
+
+struct mlxsw_sp_nve_mc_entry;
+struct mlxsw_sp_nve_mc_record;
+struct mlxsw_sp_nve_mc_list;
+
+struct mlxsw_sp_nve_mc_record_ops {
+ enum mlxsw_reg_tnumt_record_type type;
+ int (*entry_add)(struct mlxsw_sp_nve_mc_record *mc_record,
+ struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr);
+ void (*entry_del)(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry);
+ void (*entry_set)(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ char *tnumt_pl, unsigned int entry_index);
+ bool (*entry_compare)(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr);
+};
+
+struct mlxsw_sp_nve_mc_list_key {
+ u16 fid_index;
+};
+
+struct mlxsw_sp_nve_mc_ipv6_entry {
+ struct in6_addr addr6;
+ u32 addr6_kvdl_index;
+};
+
+struct mlxsw_sp_nve_mc_entry {
+ union {
+ __be32 addr4;
+ struct mlxsw_sp_nve_mc_ipv6_entry ipv6_entry;
+ };
+ u8 valid:1;
+};
+
+struct mlxsw_sp_nve_mc_record {
+ struct list_head list;
+ enum mlxsw_sp_l3proto proto;
+ unsigned int num_entries;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_nve_mc_list *mc_list;
+ const struct mlxsw_sp_nve_mc_record_ops *ops;
+ u32 kvdl_index;
+ struct mlxsw_sp_nve_mc_entry entries[0];
+};
+
+struct mlxsw_sp_nve_mc_list {
+ struct list_head records_list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_nve_mc_list_key key;
+};
+
+static const struct rhashtable_params mlxsw_sp_nve_mc_list_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_nve_mc_list_key),
+ .key_offset = offsetof(struct mlxsw_sp_nve_mc_list, key),
+ .head_offset = offsetof(struct mlxsw_sp_nve_mc_list, ht_node),
+};
+
+static int
+mlxsw_sp_nve_mc_record_ipv4_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
+ struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr)
+{
+ mc_entry->addr4 = addr->addr4;
+
+ return 0;
+}
+
+static void
+mlxsw_sp_nve_mc_record_ipv4_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry)
+{
+}
+
+static void
+mlxsw_sp_nve_mc_record_ipv4_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ char *tnumt_pl, unsigned int entry_index)
+{
+ u32 udip = be32_to_cpu(mc_entry->addr4);
+
+ mlxsw_reg_tnumt_udip_set(tnumt_pl, entry_index, udip);
+}
+
+static bool
+mlxsw_sp_nve_mc_record_ipv4_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr)
+{
+ return mc_entry->addr4 == addr->addr4;
+}
+
+static const struct mlxsw_sp_nve_mc_record_ops
+mlxsw_sp_nve_mc_record_ipv4_ops = {
+ .type = MLXSW_REG_TNUMT_RECORD_TYPE_IPV4,
+ .entry_add = &mlxsw_sp_nve_mc_record_ipv4_entry_add,
+ .entry_del = &mlxsw_sp_nve_mc_record_ipv4_entry_del,
+ .entry_set = &mlxsw_sp_nve_mc_record_ipv4_entry_set,
+ .entry_compare = &mlxsw_sp_nve_mc_record_ipv4_entry_compare,
+};
+
+static int
+mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
+ struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr)
+{
+ WARN_ON(1);
+
+ return -EINVAL;
+}
+
+static void
+mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry)
+{
+}
+
+static void
+mlxsw_sp_nve_mc_record_ipv6_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ char *tnumt_pl, unsigned int entry_index)
+{
+ u32 udip_ptr = mc_entry->ipv6_entry.addr6_kvdl_index;
+
+ mlxsw_reg_tnumt_udip_ptr_set(tnumt_pl, entry_index, udip_ptr);
+}
+
+static bool
+mlxsw_sp_nve_mc_record_ipv6_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record,
+ const struct mlxsw_sp_nve_mc_entry *mc_entry,
+ const union mlxsw_sp_l3addr *addr)
+{
+ return ipv6_addr_equal(&mc_entry->ipv6_entry.addr6, &addr->addr6);
+}
+
+static const struct mlxsw_sp_nve_mc_record_ops
+mlxsw_sp_nve_mc_record_ipv6_ops = {
+ .type = MLXSW_REG_TNUMT_RECORD_TYPE_IPV6,
+ .entry_add = &mlxsw_sp_nve_mc_record_ipv6_entry_add,
+ .entry_del = &mlxsw_sp_nve_mc_record_ipv6_entry_del,
+ .entry_set = &mlxsw_sp_nve_mc_record_ipv6_entry_set,
+ .entry_compare = &mlxsw_sp_nve_mc_record_ipv6_entry_compare,
+};
+
+static const struct mlxsw_sp_nve_mc_record_ops *
+mlxsw_sp_nve_mc_record_ops_arr[] = {
+ [MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_nve_mc_record_ipv4_ops,
+ [MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_nve_mc_record_ipv6_ops,
+};
+
+static struct mlxsw_sp_nve_mc_list *
+mlxsw_sp_nve_mc_list_find(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_mc_list_key *key)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ return rhashtable_lookup_fast(&nve->mc_list_ht, key,
+ mlxsw_sp_nve_mc_list_ht_params);
+}
+
+static struct mlxsw_sp_nve_mc_list *
+mlxsw_sp_nve_mc_list_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_mc_list_key *key)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ struct mlxsw_sp_nve_mc_list *mc_list;
+ int err;
+
+ mc_list = kmalloc(sizeof(*mc_list), GFP_KERNEL);
+ if (!mc_list)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&mc_list->records_list);
+ mc_list->key = *key;
+
+ err = rhashtable_insert_fast(&nve->mc_list_ht, &mc_list->ht_node,
+ mlxsw_sp_nve_mc_list_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return mc_list;
+
+err_rhashtable_insert:
+ kfree(mc_list);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_nve_mc_list_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ rhashtable_remove_fast(&nve->mc_list_ht, &mc_list->ht_node,
+ mlxsw_sp_nve_mc_list_ht_params);
+ WARN_ON(!list_empty(&mc_list->records_list));
+ kfree(mc_list);
+}
+
+static struct mlxsw_sp_nve_mc_list *
+mlxsw_sp_nve_mc_list_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_mc_list_key *key)
+{
+ struct mlxsw_sp_nve_mc_list *mc_list;
+
+ mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, key);
+ if (mc_list)
+ return mc_list;
+
+ return mlxsw_sp_nve_mc_list_create(mlxsw_sp, key);
+}
+
+static void
+mlxsw_sp_nve_mc_list_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list)
+{
+ if (!list_empty(&mc_list->records_list))
+ return;
+ mlxsw_sp_nve_mc_list_destroy(mlxsw_sp, mc_list);
+}
+
+static struct mlxsw_sp_nve_mc_record *
+mlxsw_sp_nve_mc_record_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto)
+{
+ unsigned int num_max_entries = mlxsw_sp->nve->num_max_mc_entries[proto];
+ struct mlxsw_sp_nve_mc_record *mc_record;
+ int err;
+
+ mc_record = kzalloc(sizeof(*mc_record) + num_max_entries *
+ sizeof(struct mlxsw_sp_nve_mc_entry), GFP_KERNEL);
+ if (!mc_record)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1,
+ &mc_record->kvdl_index);
+ if (err)
+ goto err_kvdl_alloc;
+
+ mc_record->ops = mlxsw_sp_nve_mc_record_ops_arr[proto];
+ mc_record->mlxsw_sp = mlxsw_sp;
+ mc_record->mc_list = mc_list;
+ mc_record->proto = proto;
+ list_add_tail(&mc_record->list, &mc_list->records_list);
+
+ return mc_record;
+
+err_kvdl_alloc:
+ kfree(mc_record);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nve_mc_record_destroy(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp;
+
+ list_del(&mc_record->list);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1,
+ mc_record->kvdl_index);
+ WARN_ON(mc_record->num_entries);
+ kfree(mc_record);
+}
+
+static struct mlxsw_sp_nve_mc_record *
+mlxsw_sp_nve_mc_record_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+
+ list_for_each_entry_reverse(mc_record, &mc_list->records_list, list) {
+ unsigned int num_entries = mc_record->num_entries;
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ if (mc_record->proto == proto &&
+ num_entries < nve->num_max_mc_entries[proto])
+ return mc_record;
+ }
+
+ return mlxsw_sp_nve_mc_record_create(mlxsw_sp, mc_list, proto);
+}
+
+static void
+mlxsw_sp_nve_mc_record_put(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ if (mc_record->num_entries != 0)
+ return;
+
+ mlxsw_sp_nve_mc_record_destroy(mc_record);
+}
+
+static struct mlxsw_sp_nve_mc_entry *
+mlxsw_sp_nve_mc_free_entry_find(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
+ unsigned int num_max_entries;
+ int i;
+
+ num_max_entries = nve->num_max_mc_entries[mc_record->proto];
+ for (i = 0; i < num_max_entries; i++) {
+ if (mc_record->entries[i].valid)
+ continue;
+ return &mc_record->entries[i];
+ }
+
+ return NULL;
+}
+
+static int
+mlxsw_sp_nve_mc_record_refresh(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ enum mlxsw_reg_tnumt_record_type type = mc_record->ops->type;
+ struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
+ struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp;
+ char tnumt_pl[MLXSW_REG_TNUMT_LEN];
+ unsigned int num_max_entries;
+ unsigned int num_entries = 0;
+ u32 next_kvdl_index = 0;
+ bool next_valid = false;
+ int i;
+
+ if (!list_is_last(&mc_record->list, &mc_list->records_list)) {
+ struct mlxsw_sp_nve_mc_record *next_record;
+
+ next_record = list_next_entry(mc_record, list);
+ next_kvdl_index = next_record->kvdl_index;
+ next_valid = true;
+ }
+
+ mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TNUMT_TUNNEL_PORT_NVE,
+ mc_record->kvdl_index, next_valid,
+ next_kvdl_index, mc_record->num_entries);
+
+ num_max_entries = mlxsw_sp->nve->num_max_mc_entries[mc_record->proto];
+ for (i = 0; i < num_max_entries; i++) {
+ struct mlxsw_sp_nve_mc_entry *mc_entry;
+
+ mc_entry = &mc_record->entries[i];
+ if (!mc_entry->valid)
+ continue;
+ mc_record->ops->entry_set(mc_record, mc_entry, tnumt_pl,
+ num_entries++);
+ }
+
+ WARN_ON(num_entries != mc_record->num_entries);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnumt), tnumt_pl);
+}
+
+static bool
+mlxsw_sp_nve_mc_record_is_first(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
+ struct mlxsw_sp_nve_mc_record *first_record;
+
+ first_record = list_first_entry(&mc_list->records_list,
+ struct mlxsw_sp_nve_mc_record, list);
+
+ return mc_record == first_record;
+}
+
+static struct mlxsw_sp_nve_mc_entry *
+mlxsw_sp_nve_mc_entry_find(struct mlxsw_sp_nve_mc_record *mc_record,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
+ unsigned int num_max_entries;
+ int i;
+
+ num_max_entries = nve->num_max_mc_entries[mc_record->proto];
+ for (i = 0; i < num_max_entries; i++) {
+ struct mlxsw_sp_nve_mc_entry *mc_entry;
+
+ mc_entry = &mc_record->entries[i];
+ if (!mc_entry->valid)
+ continue;
+ if (mc_record->ops->entry_compare(mc_record, mc_entry, addr))
+ return mc_entry;
+ }
+
+ return NULL;
+}
+
+static int
+mlxsw_sp_nve_mc_record_ip_add(struct mlxsw_sp_nve_mc_record *mc_record,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_entry *mc_entry = NULL;
+ int err;
+
+ mc_entry = mlxsw_sp_nve_mc_free_entry_find(mc_record);
+ if (WARN_ON(!mc_entry))
+ return -EINVAL;
+
+ err = mc_record->ops->entry_add(mc_record, mc_entry, addr);
+ if (err)
+ return err;
+ mc_record->num_entries++;
+ mc_entry->valid = true;
+
+ err = mlxsw_sp_nve_mc_record_refresh(mc_record);
+ if (err)
+ goto err_record_refresh;
+
+ /* If this is a new record and not the first one, then we need to
+ * update the next pointer of the previous entry
+ */
+ if (mc_record->num_entries != 1 ||
+ mlxsw_sp_nve_mc_record_is_first(mc_record))
+ return 0;
+
+ err = mlxsw_sp_nve_mc_record_refresh(list_prev_entry(mc_record, list));
+ if (err)
+ goto err_prev_record_refresh;
+
+ return 0;
+
+err_prev_record_refresh:
+err_record_refresh:
+ mc_entry->valid = false;
+ mc_record->num_entries--;
+ mc_record->ops->entry_del(mc_record, mc_entry);
+ return err;
+}
+
+static void
+mlxsw_sp_nve_mc_record_entry_del(struct mlxsw_sp_nve_mc_record *mc_record,
+ struct mlxsw_sp_nve_mc_entry *mc_entry)
+{
+ struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
+
+ mc_entry->valid = false;
+ mc_record->num_entries--;
+
+ /* When the record continues to exist we only need to invalidate
+ * the requested entry
+ */
+ if (mc_record->num_entries != 0) {
+ mlxsw_sp_nve_mc_record_refresh(mc_record);
+ mc_record->ops->entry_del(mc_record, mc_entry);
+ return;
+ }
+
+ /* If the record needs to be deleted, but it is not the first,
+ * then we need to make sure that the previous record no longer
+ * points to it. Remove deleted record from the list to reflect
+ * that and then re-add it at the end, so that it could be
+ * properly removed by the record destruction code
+ */
+ if (!mlxsw_sp_nve_mc_record_is_first(mc_record)) {
+ struct mlxsw_sp_nve_mc_record *prev_record;
+
+ prev_record = list_prev_entry(mc_record, list);
+ list_del(&mc_record->list);
+ mlxsw_sp_nve_mc_record_refresh(prev_record);
+ list_add_tail(&mc_record->list, &mc_list->records_list);
+ mc_record->ops->entry_del(mc_record, mc_entry);
+ return;
+ }
+
+ /* If the first record needs to be deleted, but the list is not
+ * singular, then the second record needs to be written in the
+ * first record's address, as this address is stored as a property
+ * of the FID
+ */
+ if (mlxsw_sp_nve_mc_record_is_first(mc_record) &&
+ !list_is_singular(&mc_list->records_list)) {
+ struct mlxsw_sp_nve_mc_record *next_record;
+
+ next_record = list_next_entry(mc_record, list);
+ swap(mc_record->kvdl_index, next_record->kvdl_index);
+ mlxsw_sp_nve_mc_record_refresh(next_record);
+ mc_record->ops->entry_del(mc_record, mc_entry);
+ return;
+ }
+
+ /* This is the last case where the last remaining record needs to
+ * be deleted. Simply delete the entry
+ */
+ mc_record->ops->entry_del(mc_record, mc_entry);
+}
+
+static struct mlxsw_sp_nve_mc_record *
+mlxsw_sp_nve_mc_record_find(struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr,
+ struct mlxsw_sp_nve_mc_entry **mc_entry)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+
+ list_for_each_entry(mc_record, &mc_list->records_list, list) {
+ if (mc_record->proto != proto)
+ continue;
+
+ *mc_entry = mlxsw_sp_nve_mc_entry_find(mc_record, addr);
+ if (*mc_entry)
+ return mc_record;
+ }
+
+ return NULL;
+}
+
+static int mlxsw_sp_nve_mc_list_ip_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+ int err;
+
+ mc_record = mlxsw_sp_nve_mc_record_get(mlxsw_sp, mc_list, proto);
+ if (IS_ERR(mc_record))
+ return PTR_ERR(mc_record);
+
+ err = mlxsw_sp_nve_mc_record_ip_add(mc_record, addr);
+ if (err)
+ goto err_ip_add;
+
+ return 0;
+
+err_ip_add:
+ mlxsw_sp_nve_mc_record_put(mc_record);
+ return err;
+}
+
+static void mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_mc_list *mc_list,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+ struct mlxsw_sp_nve_mc_entry *mc_entry;
+
+ mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr,
+ &mc_entry);
+ if (WARN_ON(!mc_record))
+ return;
+
+ mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
+ mlxsw_sp_nve_mc_record_put(mc_record);
+}
+
+static int
+mlxsw_sp_nve_fid_flood_index_set(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_nve_mc_list *mc_list)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+
+ /* The address of the first record in the list is a property of
+ * the FID and we never change it. It only needs to be set when
+ * a new list is created
+ */
+ if (mlxsw_sp_fid_nve_flood_index_is_set(fid))
+ return 0;
+
+ mc_record = list_first_entry(&mc_list->records_list,
+ struct mlxsw_sp_nve_mc_record, list);
+
+ return mlxsw_sp_fid_nve_flood_index_set(fid, mc_record->kvdl_index);
+}
+
+static void
+mlxsw_sp_nve_fid_flood_index_clear(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_nve_mc_list *mc_list)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record;
+
+ /* The address of the first record needs to be invalidated only when
+ * the last record is about to be removed
+ */
+ if (!list_is_singular(&mc_list->records_list))
+ return;
+
+ mc_record = list_first_entry(&mc_list->records_list,
+ struct mlxsw_sp_nve_mc_record, list);
+ if (mc_record->num_entries != 1)
+ return;
+
+ return mlxsw_sp_fid_nve_flood_index_clear(fid);
+}
+
+int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_list_key key = { 0 };
+ struct mlxsw_sp_nve_mc_list *mc_list;
+ int err;
+
+ key.fid_index = mlxsw_sp_fid_index(fid);
+ mc_list = mlxsw_sp_nve_mc_list_get(mlxsw_sp, &key);
+ if (IS_ERR(mc_list))
+ return PTR_ERR(mc_list);
+
+ err = mlxsw_sp_nve_mc_list_ip_add(mlxsw_sp, mc_list, proto, addr);
+ if (err)
+ goto err_add_ip;
+
+ err = mlxsw_sp_nve_fid_flood_index_set(fid, mc_list);
+ if (err)
+ goto err_fid_flood_index_set;
+
+ return 0;
+
+err_fid_flood_index_set:
+ mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr);
+err_add_ip:
+ mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
+ return err;
+}
+
+void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ struct mlxsw_sp_nve_mc_list_key key = { 0 };
+ struct mlxsw_sp_nve_mc_list *mc_list;
+
+ key.fid_index = mlxsw_sp_fid_index(fid);
+ mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
+ if (WARN_ON(!mc_list))
+ return;
+
+ mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list);
+ mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr);
+ mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
+}
+
+static void
+mlxsw_sp_nve_mc_record_delete(struct mlxsw_sp_nve_mc_record *mc_record)
+{
+ struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
+ unsigned int num_max_entries;
+ int i;
+
+ num_max_entries = nve->num_max_mc_entries[mc_record->proto];
+ for (i = 0; i < num_max_entries; i++) {
+ struct mlxsw_sp_nve_mc_entry *mc_entry = &mc_record->entries[i];
+
+ if (!mc_entry->valid)
+ continue;
+ mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
+ }
+
+ WARN_ON(mc_record->num_entries);
+ mlxsw_sp_nve_mc_record_put(mc_record);
+}
+
+static void mlxsw_sp_nve_flood_ip_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_nve_mc_record *mc_record, *tmp;
+ struct mlxsw_sp_nve_mc_list_key key = { 0 };
+ struct mlxsw_sp_nve_mc_list *mc_list;
+
+ if (!mlxsw_sp_fid_nve_flood_index_is_set(fid))
+ return;
+
+ mlxsw_sp_fid_nve_flood_index_clear(fid);
+
+ key.fid_index = mlxsw_sp_fid_index(fid);
+ mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
+ if (WARN_ON(!mc_list))
+ return;
+
+ list_for_each_entry_safe(mc_record, tmp, &mc_list->records_list, list)
+ mlxsw_sp_nve_mc_record_delete(mc_record);
+
+ WARN_ON(!list_empty(&mc_list->records_list));
+ mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
+}
+
+u32 mlxsw_sp_nve_decap_tunnel_index_get(const struct mlxsw_sp *mlxsw_sp)
+{
+ WARN_ON(mlxsw_sp->nve->num_nve_tunnels == 0);
+
+ return mlxsw_sp->nve->tunnel_index;
+}
+
+bool mlxsw_sp_nve_ipv4_route_is_decap(const struct mlxsw_sp *mlxsw_sp,
+ u32 tb_id, __be32 addr)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ struct mlxsw_sp_nve_config *config = &nve->config;
+
+ if (nve->num_nve_tunnels &&
+ config->ul_proto == MLXSW_SP_L3_PROTO_IPV4 &&
+ config->ul_sip.addr4 == addr && config->ul_tb_id == tb_id)
+ return true;
+
+ return false;
+}
+
+static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_config *config)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ const struct mlxsw_sp_nve_ops *ops;
+ int err;
+
+ if (nve->num_nve_tunnels++ != 0)
+ return 0;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ &nve->tunnel_index);
+ if (err)
+ goto err_kvdl_alloc;
+
+ ops = nve->nve_ops_arr[config->type];
+ err = ops->init(nve, config);
+ if (err)
+ goto err_ops_init;
+
+ return 0;
+
+err_ops_init:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ nve->tunnel_index);
+err_kvdl_alloc:
+ nve->num_nve_tunnels--;
+ return err;
+}
+
+static void mlxsw_sp_nve_tunnel_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ const struct mlxsw_sp_nve_ops *ops;
+
+ ops = nve->nve_ops_arr[nve->config.type];
+
+ if (mlxsw_sp->nve->num_nve_tunnels == 1) {
+ ops->fini(nve);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ nve->tunnel_index);
+ }
+ nve->num_nve_tunnels--;
+}
+
+static void mlxsw_sp_nve_fdb_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index)
+{
+ char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+ mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID);
+ mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_nve_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ const struct mlxsw_sp_nve_ops *ops;
+ struct mlxsw_sp_nve_config config;
+ int err;
+
+ ops = nve->nve_ops_arr[params->type];
+
+ if (!ops->can_offload(nve, params->dev, extack))
+ return -EOPNOTSUPP;
+
+ memset(&config, 0, sizeof(config));
+ ops->nve_config(nve, params->dev, &config);
+ if (nve->num_nve_tunnels &&
+ memcmp(&config, &nve->config, sizeof(config))) {
+ NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize NVE tunnel");
+ return err;
+ }
+
+ err = mlxsw_sp_fid_vni_set(fid, params->vni);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set VNI on FID");
+ goto err_fid_vni_set;
+ }
+
+ nve->config = config;
+
+ return 0;
+
+err_fid_vni_set:
+ mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
+ return err;
+}
+
+void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *fid)
+{
+ u16 fid_index = mlxsw_sp_fid_index(fid);
+
+ mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid);
+ mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index);
+ mlxsw_sp_fid_vni_clear(fid);
+ mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
+}
+
+int mlxsw_sp_port_nve_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char tnqdr_pl[MLXSW_REG_TNQDR_LEN];
+
+ mlxsw_reg_tnqdr_pack(tnqdr_pl, mlxsw_sp_port->local_port);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqdr), tnqdr_pl);
+}
+
+void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+}
+
+static int mlxsw_sp_nve_qos_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char tnqcr_pl[MLXSW_REG_TNQCR_LEN];
+
+ mlxsw_reg_tnqcr_pack(tnqcr_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqcr), tnqcr_pl);
+}
+
+static int mlxsw_sp_nve_ecn_encap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ u8 outer_ecn = INET_ECN_encapsulate(0, i);
+ char tneem_pl[MLXSW_REG_TNEEM_LEN];
+ int err;
+
+ mlxsw_reg_tneem_pack(tneem_pl, i, outer_ecn);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tneem),
+ tneem_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
+ u8 inner_ecn, u8 outer_ecn)
+{
+ char tndem_pl[MLXSW_REG_TNDEM_LEN];
+ bool trap_en, set_ce = false;
+ u8 new_inner_ecn;
+
+ trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+ new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
+
+ mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
+ trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
+}
+
+static int mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ /* Iterate over inner ECN values */
+ for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
+ int j;
+
+ /* Iterate over outer ECN values */
+ for (j = INET_ECN_NOT_ECT; j <= INET_ECN_CE; j++) {
+ int err;
+
+ err = __mlxsw_sp_nve_ecn_decap_init(mlxsw_sp, i, j);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_nve_ecn_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp_nve_ecn_encap_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ return mlxsw_sp_nve_ecn_decap_init(mlxsw_sp);
+}
+
+static int mlxsw_sp_nve_resources_query(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned int max;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6))
+ return -EIO;
+ max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4);
+ mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV4] = max;
+ max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6);
+ mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV6] = max;
+
+ return 0;
+}
+
+int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_nve *nve;
+ int err;
+
+ nve = kzalloc(sizeof(*mlxsw_sp->nve), GFP_KERNEL);
+ if (!nve)
+ return -ENOMEM;
+ mlxsw_sp->nve = nve;
+ nve->mlxsw_sp = mlxsw_sp;
+ nve->nve_ops_arr = mlxsw_sp->nve_ops_arr;
+
+ err = rhashtable_init(&nve->mc_list_ht,
+ &mlxsw_sp_nve_mc_list_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ err = mlxsw_sp_nve_qos_init(mlxsw_sp);
+ if (err)
+ goto err_nve_qos_init;
+
+ err = mlxsw_sp_nve_ecn_init(mlxsw_sp);
+ if (err)
+ goto err_nve_ecn_init;
+
+ err = mlxsw_sp_nve_resources_query(mlxsw_sp);
+ if (err)
+ goto err_nve_resources_query;
+
+ return 0;
+
+err_nve_resources_query:
+err_nve_ecn_init:
+err_nve_qos_init:
+ rhashtable_destroy(&nve->mc_list_ht);
+err_rhashtable_init:
+ mlxsw_sp->nve = NULL;
+ kfree(nve);
+ return err;
+}
+
+void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
+ rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
+ mlxsw_sp->nve = NULL;
+ kfree(mlxsw_sp->nve);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
new file mode 100644
index 000000000000..4cc3297e13d6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_NVE_H
+#define _MLXSW_SPECTRUM_NVE_H
+
+#include <linux/netlink.h>
+#include <linux/rhashtable.h>
+
+#include "spectrum.h"
+
+struct mlxsw_sp_nve_config {
+ enum mlxsw_sp_nve_type type;
+ u8 ttl;
+ u8 learning_en:1;
+ __be16 udp_dport;
+ __be32 flowlabel;
+ u32 ul_tb_id;
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr ul_sip;
+};
+
+struct mlxsw_sp_nve {
+ struct mlxsw_sp_nve_config config;
+ struct rhashtable mc_list_ht;
+ struct mlxsw_sp *mlxsw_sp;
+ const struct mlxsw_sp_nve_ops **nve_ops_arr;
+ unsigned int num_nve_tunnels; /* Protected by RTNL */
+ unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX];
+ u32 tunnel_index;
+};
+
+struct mlxsw_sp_nve_ops {
+ enum mlxsw_sp_nve_type type;
+ bool (*can_offload)(const struct mlxsw_sp_nve *nve,
+ const struct net_device *dev,
+ struct netlink_ext_ack *extack);
+ void (*nve_config)(const struct mlxsw_sp_nve *nve,
+ const struct net_device *dev,
+ struct mlxsw_sp_nve_config *config);
+ int (*init)(struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_config *config);
+ void (*fini)(struct mlxsw_sp_nve *nve);
+};
+
+extern const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops;
+extern const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops;
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
new file mode 100644
index 000000000000..d21c7be5b1c9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <linux/random.h>
+#include <net/vxlan.h>
+
+#include "reg.h"
+#include "spectrum_nve.h"
+
+/* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B)
+ *
+ * In the worst case - where we have a VLAN tag on the outer Ethernet
+ * header and IPv6 in overlay and underlay - we need to parse 128 bytes
+ */
+#define MLXSW_SP_NVE_VXLAN_PARSING_DEPTH 128
+#define MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH 96
+
+#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS VXLAN_F_UDP_ZERO_CSUM_TX
+
+static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
+ const struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_config *cfg = &vxlan->cfg;
+
+ if (cfg->saddr.sa.sa_family != AF_INET) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only IPv4 underlay is supported");
+ return false;
+ }
+
+ if (vxlan_addr_multicast(&cfg->remote_ip)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported");
+ return false;
+ }
+
+ if (vxlan_addr_any(&cfg->saddr)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Source address must be specified");
+ return false;
+ }
+
+ if (cfg->remote_ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Local interface is not supported");
+ return false;
+ }
+
+ if (cfg->port_min || cfg->port_max) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only default UDP source port range is supported");
+ return false;
+ }
+
+ if (cfg->tos != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: TOS must be configured to inherit");
+ return false;
+ }
+
+ if (cfg->flags & VXLAN_F_TTL_INHERIT) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to inherit");
+ return false;
+ }
+
+ if (cfg->flags & VXLAN_F_LEARN) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Learning is not supported");
+ return false;
+ }
+
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported");
+ return false;
+ }
+
+ if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
+ return false;
+ }
+
+ if (cfg->ttl == 0) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: TTL must not be configured to 0");
+ return false;
+ }
+
+ if (cfg->label != 0) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Flow label must be configured to 0");
+ return false;
+ }
+
+ return true;
+}
+
+static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
+ const struct net_device *dev,
+ struct mlxsw_sp_nve_config *config)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_config *cfg = &vxlan->cfg;
+
+ config->type = MLXSW_SP_NVE_TYPE_VXLAN;
+ config->ttl = cfg->ttl;
+ config->flowlabel = cfg->label;
+ config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0;
+ config->ul_tb_id = RT_TABLE_MAIN;
+ config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
+ config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
+ config->udp_dport = cfg->dst_port;
+}
+
+static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
+ unsigned int parsing_depth,
+ __be16 udp_dport)
+{
+ char mprs_pl[MLXSW_REG_MPRS_LEN];
+
+ mlxsw_reg_mprs_pack(mprs_pl, parsing_depth, be16_to_cpu(udp_dport));
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
+}
+
+static int
+mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_nve_config *config)
+{
+ char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+ u16 ul_vr_id;
+ u8 udp_sport;
+ int err;
+
+ err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id,
+ &ul_vr_id);
+ if (err)
+ return err;
+
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
+ config->ttl);
+ /* VxLAN driver's default UDP source port range is 32768 (0x8000)
+ * to 60999 (0xee47). Set the upper 8 bits of the UDP source port
+ * to a random number between 0x80 and 0xee
+ */
+ get_random_bytes(&udp_sport, sizeof(udp_sport));
+ udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
+ mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
+ mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en);
+ mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id);
+ mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+}
+
+static void mlxsw_sp1_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp)
+{
+ char tngcr_pl[MLXSW_REG_TNGCR_LEN];
+
+ mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0);
+
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl);
+}
+
+static int mlxsw_sp1_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp,
+ unsigned int tunnel_index)
+{
+ char rtdp_pl[MLXSW_REG_RTDP_LEN];
+
+ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
+}
+
+static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_config *config)
+{
+ struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
+ int err;
+
+ err = mlxsw_sp_nve_parsing_set(mlxsw_sp,
+ MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
+ config->udp_dport);
+ if (err)
+ return err;
+
+ err = mlxsw_sp1_nve_vxlan_config_set(mlxsw_sp, config);
+ if (err)
+ goto err_config_set;
+
+ err = mlxsw_sp1_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index);
+ if (err)
+ goto err_rtdp_set;
+
+ err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id,
+ config->ul_proto,
+ &config->ul_sip,
+ nve->tunnel_index);
+ if (err)
+ goto err_promote_decap;
+
+ return 0;
+
+err_promote_decap:
+err_rtdp_set:
+ mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
+err_config_set:
+ mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
+ config->udp_dport);
+ return err;
+}
+
+static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
+{
+ struct mlxsw_sp_nve_config *config = &nve->config;
+ struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
+
+ mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
+ config->ul_proto, &config->ul_sip);
+ mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
+ mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
+ config->udp_dport);
+}
+
+const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
+ .type = MLXSW_SP_NVE_TYPE_VXLAN,
+ .can_offload = mlxsw_sp1_nve_vxlan_can_offload,
+ .nve_config = mlxsw_sp_nve_vxlan_config,
+ .init = mlxsw_sp1_nve_vxlan_init,
+ .fini = mlxsw_sp1_nve_vxlan_fini,
+};
+
+static bool mlxsw_sp2_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
+ const struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ return false;
+}
+
+static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
+ const struct mlxsw_sp_nve_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
+{
+}
+
+const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
+ .type = MLXSW_SP_NVE_TYPE_VXLAN,
+ .can_offload = mlxsw_sp2_nve_vxlan_can_offload,
+ .nve_config = mlxsw_sp_nve_vxlan_config,
+ .init = mlxsw_sp2_nve_vxlan_init,
+ .fini = mlxsw_sp2_nve_vxlan_fini,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 2ab9cf25a08a..9e9bb57134f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -366,6 +366,7 @@ enum mlxsw_sp_fib_entry_type {
* encapsulating entries.)
*/
MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
+ MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
};
struct mlxsw_sp_nexthop_group;
@@ -741,6 +742,19 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
return NULL;
}
+int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ u16 *vr_id)
+{
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
+ if (!vr)
+ return -ESRCH;
+ *vr_id = vr->id;
+
+ return 0;
+}
+
static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
@@ -1128,6 +1142,52 @@ mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
}
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ enum mlxsw_sp_fib_entry_type type)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_fib_node *fib_node;
+ unsigned char addr_prefix_len;
+ struct mlxsw_sp_fib *fib;
+ struct mlxsw_sp_vr *vr;
+ const void *addrp;
+ size_t addr_len;
+ u32 addr4;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
+ if (!vr)
+ return NULL;
+ fib = mlxsw_sp_vr_fib(vr, proto);
+
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ addr4 = be32_to_cpu(addr->addr4);
+ addrp = &addr4;
+ addr_len = 4;
+ addr_prefix_len = 32;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
+ addr_prefix_len);
+ if (!fib_node || list_empty(&fib_node->entry_list))
+ return NULL;
+
+ fib_entry = list_first_entry(&fib_node->entry_list,
+ struct mlxsw_sp_fib_entry, list);
+ if (fib_entry->type != type)
+ return NULL;
+
+ return fib_entry;
+}
+
/* Given an IPIP entry, find the corresponding decap route. */
static struct mlxsw_sp_fib_entry *
mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
@@ -1765,6 +1825,56 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip,
+ u32 tunnel_index)
+{
+ enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err;
+
+ /* It is valid to create a tunnel with a local IP and only later
+ * assign this IP address to a local interface
+ */
+ fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
+ ul_proto, ul_sip,
+ type);
+ if (!fib_entry)
+ return 0;
+
+ fib_entry->decap.tunnel_index = tunnel_index;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ if (err)
+ goto err_fib_entry_update;
+
+ return 0;
+
+err_fib_entry_update:
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ return err;
+}
+
+void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip)
+{
+ enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ struct mlxsw_sp_fib_entry *fib_entry;
+
+ fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
+ ul_proto, ul_sip,
+ type);
+ if (!fib_entry)
+ return;
+
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+}
+
struct mlxsw_sp_neigh_key {
struct neighbour *n;
};
@@ -3815,6 +3925,7 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
return !!nh_group->nh_rif;
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
+ case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
return true;
default:
return false;
@@ -3848,7 +3959,8 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
int i;
if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
- fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
+ fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
+ fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
return;
}
@@ -4072,6 +4184,18 @@ mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
fib_entry->decap.tunnel_index);
}
+static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
@@ -4086,6 +4210,8 @@ static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
+ return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
}
return -EINVAL;
}
@@ -4121,6 +4247,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
+ u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
struct net_device *dev = fen_info->fi->fib_dev;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct fib_info *fi = fen_info->fi;
@@ -4135,6 +4262,15 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
fib_entry,
ipip_entry);
}
+ if (mlxsw_sp_nve_ipv4_route_is_decap(mlxsw_sp, tb_id,
+ dip.addr4)) {
+ u32 t_index;
+
+ t_index = mlxsw_sp_nve_decap_tunnel_index_get(mlxsw_sp);
+ fib_entry->decap.tunnel_index = t_index;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ return 0;
+ }
/* fall through */
case RTN_BROADCAST:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 1a60391daafa..3dbafdeaab2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -7,17 +7,6 @@
#include "spectrum.h"
#include "reg.h"
-enum mlxsw_sp_l3proto {
- MLXSW_SP_L3_PROTO_IPV4,
- MLXSW_SP_L3_PROTO_IPV6,
-#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1)
-};
-
-union mlxsw_sp_l3addr {
- __be32 addr4;
- struct in6_addr addr6;
-};
-
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
@@ -35,8 +24,6 @@ struct mlxsw_sp_neigh_entry;
struct mlxsw_sp_nexthop;
struct mlxsw_sp_ipip_entry;
-struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
@@ -44,9 +31,7 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
-u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
-struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index db715da7bab7..bc60d7a8b49d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -15,9 +15,9 @@
#include <linux/rtnetlink.h>
#include <linux/netlink.h>
#include <net/switchdev.h>
+#include <net/vxlan.h>
#include "spectrum_span.h"
-#include "spectrum_router.h"
#include "spectrum_switchdev.h"
#include "spectrum.h"
#include "core.h"
@@ -84,9 +84,19 @@ struct mlxsw_sp_bridge_ops {
void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port);
+ int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev,
+ struct netlink_ext_ack *extack);
+ void (*vxlan_leave)(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev);
struct mlxsw_sp_fid *
(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid);
+ struct mlxsw_sp_fid *
+ (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid);
+ u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct mlxsw_sp_fid *fid);
};
static int
@@ -1237,6 +1247,51 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
MLXSW_REG_SFD_OP_WRITE_REMOVE;
}
+static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ bool adding, bool dynamic)
+{
+ enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
+ char *sfd_pl;
+ u8 num_rec;
+ u32 uip;
+ int err;
+
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ uip = be32_to_cpu(addr->addr4);
+ sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
+ mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
+ mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
+ MLXSW_REG_SFD_REC_ACTION_NOP, uip,
+ sfd_proto);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
+ kfree(sfd_pl);
+ return err;
+}
+
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
const char *mac, u16 fid, bool adding,
enum mlxsw_reg_sfd_rec_action action,
@@ -1950,6 +2005,21 @@ mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
}
+static int
+mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev,
+ struct netlink_ext_ack *extack)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static void
+mlxsw_sp_bridge_8021q_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev)
+{
+}
+
static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid)
@@ -1959,10 +2029,29 @@ mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
}
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
+static u16
+mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_8021q_vid(fid);
+}
+
static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
.port_join = mlxsw_sp_bridge_8021q_port_join,
.port_leave = mlxsw_sp_bridge_8021q_port_leave,
+ .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join,
+ .vxlan_leave = mlxsw_sp_bridge_8021q_vxlan_leave,
.fid_get = mlxsw_sp_bridge_8021q_fid_get,
+ .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
+ .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
};
static bool
@@ -2026,19 +2115,126 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
}
+static int
+mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ struct mlxsw_sp_nve_params params = {
+ .type = MLXSW_SP_NVE_TYPE_VXLAN,
+ .vni = vxlan->cfg.vni,
+ .dev = vxlan_dev,
+ };
+ struct mlxsw_sp_fid *fid;
+ int err;
+
+ fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
+ if (!fid)
+ return -EINVAL;
+
+ if (mlxsw_sp_fid_vni_is_set(fid))
+ return -EINVAL;
+
+ err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
+ if (err)
+ goto err_nve_fid_enable;
+
+ /* The tunnel port does not hold a reference on the FID. Only
+ * local ports and the router port
+ */
+ mlxsw_sp_fid_put(fid);
+
+ return 0;
+
+err_nve_fid_enable:
+ mlxsw_sp_fid_put(fid);
+ return err;
+}
+
+static void
+mlxsw_sp_bridge_8021d_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+ struct mlxsw_sp_fid *fid;
+
+ fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
+ if (WARN_ON(!fid))
+ return;
+
+ /* If the VxLAN device is down, then the FID does not have a VNI */
+ if (!mlxsw_sp_fid_vni_is_set(fid))
+ goto out;
+
+ mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
+out:
+ mlxsw_sp_fid_put(fid);
+}
+
static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+ struct net_device *vxlan_dev;
+ struct mlxsw_sp_fid *fid;
+ int err;
+
+ fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
+ if (IS_ERR(fid))
+ return fid;
+
+ if (mlxsw_sp_fid_vni_is_set(fid))
+ return fid;
+
+ vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
+ if (!vxlan_dev)
+ return fid;
+
+ if (!netif_running(vxlan_dev))
+ return fid;
+
+ err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, NULL);
+ if (err)
+ goto err_vxlan_join;
+
+ return fid;
+
+err_vxlan_join:
+ mlxsw_sp_fid_put(fid);
+ return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+
+ /* The only valid VLAN for a VLAN-unaware bridge is 0 */
+ if (vid)
+ return NULL;
- return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
+ return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
+}
+
+static u16
+mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct mlxsw_sp_fid *fid)
+{
+ return 0;
}
static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
.port_join = mlxsw_sp_bridge_8021d_port_join,
.port_leave = mlxsw_sp_bridge_8021d_port_leave,
+ .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join,
+ .vxlan_leave = mlxsw_sp_bridge_8021d_vxlan_leave,
.fid_get = mlxsw_sp_bridge_8021d_fid_get,
+ .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup,
+ .fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
};
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -2088,15 +2284,43 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
}
+int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct net_device *vxlan_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, extack);
+}
+
+void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ const struct net_device *vxlan_dev)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (WARN_ON(!bridge_device))
+ return;
+
+ bridge_device->ops->vxlan_leave(bridge_device, vxlan_dev);
+}
+
static void
mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
const char *mac, u16 vid,
- struct net_device *dev)
+ struct net_device *dev, bool offloaded)
{
struct switchdev_notifier_fdb_info info;
info.addr = mac;
info.vid = vid;
+ info.offloaded = offloaded;
call_switchdev_notifiers(type, dev, &info.info);
}
@@ -2148,7 +2372,7 @@ do_fdb_op:
if (!do_notification)
return;
type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
- mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
return;
@@ -2208,7 +2432,7 @@ do_fdb_op:
if (!do_notification)
return;
type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
- mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
return;
@@ -2284,12 +2508,127 @@ out:
struct mlxsw_sp_switchdev_event_work {
struct work_struct work;
- struct switchdev_notifier_fdb_info fdb_info;
+ union {
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
+ };
struct net_device *dev;
unsigned long event;
};
-static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
+static void
+mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
+ enum mlxsw_sp_l3proto *proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ if (vxlan_addr->sa.sa_family == AF_INET) {
+ addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
+ *proto = MLXSW_SP_L3_PROTO_IPV4;
+ } else {
+ addr->addr6 = vxlan_addr->sin6.sin6_addr;
+ *proto = MLXSW_SP_L3_PROTO_IPV6;
+ }
+}
+
+static void
+mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_switchdev_event_work *
+ switchdev_work,
+ struct mlxsw_sp_fid *fid, __be32 vni)
+{
+ struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct net_device *dev = switchdev_work->dev;
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr addr;
+ int err;
+
+ fdb_info = &switchdev_work->fdb_info;
+ err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
+ if (err)
+ return;
+
+ mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
+ &proto, &addr);
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
+ vxlan_fdb_info.eth_addr,
+ mlxsw_sp_fid_index(fid),
+ proto, &addr, true, false);
+ if (err)
+ return;
+ vxlan_fdb_info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
+ &vxlan_fdb_info.info);
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ vxlan_fdb_info.eth_addr,
+ fdb_info->vid, dev, true);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
+ vxlan_fdb_info.eth_addr,
+ mlxsw_sp_fid_index(fid),
+ proto, &addr, false,
+ false);
+ vxlan_fdb_info.offloaded = false;
+ call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
+ &vxlan_fdb_info.info);
+ break;
+ }
+}
+
+static void
+mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
+ switchdev_work)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *dev = switchdev_work->dev;
+ struct net_device *br_dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_fid *fid;
+ __be32 vni;
+ int err;
+
+ if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
+ switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
+ return;
+
+ if (!switchdev_work->fdb_info.added_by_user)
+ return;
+
+ if (!netif_running(dev))
+ return;
+ br_dev = netdev_master_upper_dev_get(dev);
+ if (!br_dev)
+ return;
+ if (!netif_is_bridge_master(br_dev))
+ return;
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ return;
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+
+ fid = bridge_device->ops->fid_lookup(bridge_device,
+ switchdev_work->fdb_info.vid);
+ if (!fid)
+ return;
+
+ err = mlxsw_sp_fid_vni(fid, &vni);
+ if (err)
+ goto out;
+
+ mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
+ vni);
+
+out:
+ mlxsw_sp_fid_put(fid);
+}
+
+static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
{
struct mlxsw_sp_switchdev_event_work *switchdev_work =
container_of(work, struct mlxsw_sp_switchdev_event_work, work);
@@ -2299,6 +2638,11 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
int err;
rtnl_lock();
+ if (netif_is_vxlan(dev)) {
+ mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
+ goto out;
+ }
+
mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
if (!mlxsw_sp_port)
goto out;
@@ -2313,7 +2657,7 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
break;
mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
fdb_info->addr,
- fdb_info->vid, dev);
+ fdb_info->vid, dev, true);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = &switchdev_work->fdb_info;
@@ -2338,22 +2682,213 @@ out:
dev_put(dev);
}
+static void
+mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_switchdev_event_work *
+ switchdev_work)
+{
+ struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *dev = switchdev_work->dev;
+ u8 all_zeros_mac[ETH_ALEN] = { 0 };
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr addr;
+ struct net_device *br_dev;
+ struct mlxsw_sp_fid *fid;
+ u16 vid;
+ int err;
+
+ vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
+ br_dev = netdev_master_upper_dev_get(dev);
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
+ if (!fid)
+ return;
+
+ mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
+ &proto, &addr);
+
+ if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
+ err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
+ if (err) {
+ mlxsw_sp_fid_put(fid);
+ return;
+ }
+ vxlan_fdb_info->offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
+ &vxlan_fdb_info->info);
+ mlxsw_sp_fid_put(fid);
+ return;
+ }
+
+ /* The device has a single FDB table, whereas Linux has two - one
+ * in the bridge driver and another in the VxLAN driver. We only
+ * program an entry to the device if the MAC points to the VxLAN
+ * device in the bridge's FDB table
+ */
+ vid = bridge_device->ops->fid_vid(bridge_device, fid);
+ if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
+ goto err_br_fdb_find;
+
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
+ mlxsw_sp_fid_index(fid), proto,
+ &addr, true, false);
+ if (err)
+ goto err_fdb_tunnel_uc_op;
+ vxlan_fdb_info->offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
+ &vxlan_fdb_info->info);
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ vxlan_fdb_info->eth_addr, vid, dev, true);
+
+ mlxsw_sp_fid_put(fid);
+
+ return;
+
+err_fdb_tunnel_uc_op:
+err_br_fdb_find:
+ mlxsw_sp_fid_put(fid);
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_switchdev_event_work *
+ switchdev_work)
+{
+ struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *dev = switchdev_work->dev;
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+ u8 all_zeros_mac[ETH_ALEN] = { 0 };
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr addr;
+ struct mlxsw_sp_fid *fid;
+ u16 vid;
+
+ vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
+ if (!fid)
+ return;
+
+ mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
+ &proto, &addr);
+
+ if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
+ mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
+ mlxsw_sp_fid_put(fid);
+ return;
+ }
+
+ mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
+ mlxsw_sp_fid_index(fid), proto, &addr,
+ false, false);
+ vid = bridge_device->ops->fid_vid(bridge_device, fid);
+ mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ vxlan_fdb_info->eth_addr, vid, dev, false);
+
+ mlxsw_sp_fid_put(fid);
+}
+
+static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_switchdev_event_work *switchdev_work =
+ container_of(work, struct mlxsw_sp_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct net_device *br_dev;
+
+ rtnl_lock();
+
+ if (!netif_running(dev))
+ goto out;
+ br_dev = netdev_master_upper_dev_get(dev);
+ if (!br_dev)
+ goto out;
+ if (!netif_is_bridge_master(br_dev))
+ goto out;
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
+ mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
+ break;
+ case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
+ mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
+ break;
+ }
+
+out:
+ rtnl_unlock();
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+static int
+mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
+ switchdev_work,
+ struct switchdev_notifier_info *info)
+{
+ struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
+ struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
+ struct vxlan_config *cfg = &vxlan->cfg;
+
+ vxlan_fdb_info = container_of(info,
+ struct switchdev_notifier_vxlan_fdb_info,
+ info);
+
+ if (vxlan_fdb_info->remote_port != cfg->dst_port)
+ return -EOPNOTSUPP;
+ if (vxlan_fdb_info->remote_vni != cfg->vni)
+ return -EOPNOTSUPP;
+ if (vxlan_fdb_info->vni != cfg->vni)
+ return -EOPNOTSUPP;
+ if (vxlan_fdb_info->remote_ifindex)
+ return -EOPNOTSUPP;
+ if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr))
+ return -EOPNOTSUPP;
+ if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip))
+ return -EOPNOTSUPP;
+
+ switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
+
+ return 0;
+}
+
/* Called under rcu_read_lock() */
static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct mlxsw_sp_switchdev_event_work *switchdev_work;
- struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct net_device *br_dev;
+ int err;
- if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
+ /* Tunnel devices are not our uppers, so check their master instead */
+ br_dev = netdev_master_upper_dev_get_rcu(dev);
+ if (!br_dev)
+ return NOTIFY_DONE;
+ if (!netif_is_bridge_master(br_dev))
+ return NOTIFY_DONE;
+ if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
return NOTIFY_DONE;
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (!switchdev_work)
return NOTIFY_BAD;
- INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
switchdev_work->dev = dev;
switchdev_work->event = event;
@@ -2362,6 +2897,11 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
case SWITCHDEV_FDB_DEL_TO_BRIDGE:
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+ INIT_WORK(&switchdev_work->work,
+ mlxsw_sp_switchdev_bridge_fdb_event_work);
memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info));
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
@@ -2375,6 +2915,16 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
*/
dev_hold(dev);
break;
+ case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */
+ case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
+ INIT_WORK(&switchdev_work->work,
+ mlxsw_sp_switchdev_vxlan_fdb_event_work);
+ err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
+ info);
+ if (err)
+ goto err_vxlan_work_prepare;
+ dev_hold(dev);
+ break;
default:
kfree(switchdev_work);
return NOTIFY_DONE;
@@ -2384,6 +2934,7 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
+err_vxlan_work_prepare:
err_addr_alloc:
kfree(switchdev_work);
return NOTIFY_BAD;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 53020724c2f6..6f18f4d3322a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -24,6 +24,7 @@ enum {
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
+ MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
MLXSW_TRAP_ID_ARPBC = 0x50,
MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
@@ -59,6 +60,7 @@ enum {
MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
+ MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index bd51e057e915..b881f5d4a7f9 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1164,7 +1164,7 @@ ks8695_timeout(struct net_device *ndev)
* sk_buff and adds it to the TX ring. It then kicks the TX DMA
* engine to ensure transmission begins.
*/
-static int
+static netdev_tx_t
ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 0e9719fbc624..35f8c9ef204d 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1021,9 +1021,9 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
* spin_lock_irqsave is required because tx and rx should be mutual exclusive.
* So while tx is in-progress, prevent IRQ interrupt from happenning.
*/
-static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
- int retv = NETDEV_TX_OK;
+ netdev_tx_t retv = NETDEV_TX_OK;
struct ks_net *ks = netdev_priv(netdev);
disable_irq(netdev->irq);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e7dce79ff2c9..867cddba840f 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -999,7 +999,6 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
struct phy_device *phydev;
struct net_device *netdev;
int ret = -EIO;
- u32 mii_adv;
netdev = adapter->netdev;
phydev = phy_find_first(adapter->mdiobus);
@@ -1013,13 +1012,11 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
goto return_error;
/* MAC doesn't support 1000T Half */
- phydev->supported &= ~SUPPORTED_1000baseT_Half;
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
/* support both flow controls */
+ phy_support_asym_pause(phydev);
phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
- phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
- mii_adv = (u32)mii_advertise_flowctrl(phy->fc_request_control);
- phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
phy->fc_autoneg = phydev->autoneg;
phy_start(phydev);
@@ -2850,7 +2847,7 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
lan743x_hardware_cleanup(adapter);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
{
return bitrev16(crc16(0xFFFF, buf, len));
@@ -3016,7 +3013,7 @@ static int lan743x_pm_resume(struct device *dev)
static const struct dev_pm_ops lan743x_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
};
-#endif /*CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static const struct pci_device_id lan743x_pcidev_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
@@ -3028,7 +3025,7 @@ static struct pci_driver lan743x_pcidev_driver = {
.id_table = lan743x_pcidev_tbl,
.probe = lan743x_pcidev_probe,
.remove = lan743x_pcidev_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.driver.pm = &lan743x_pm_ops,
#endif
.shutdown = lan743x_pcidev_shutdown,
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index ccdf9123f26f..b2109eca81fd 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -977,8 +977,8 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter)
lan743x_ptp_disable(adapter);
}
-void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter,
- bool ts_insert_enable)
+static void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter,
+ bool ts_insert_enable)
{
u32 ptp_tx_mod = lan743x_csr_read(adapter, PTP_TX_MOD);
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index 36c84625d54e..bcec0587cf61 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -23,6 +23,8 @@ config MSCC_OCELOT_SWITCH
config MSCC_OCELOT_SWITCH_OCELOT
tristate "Ocelot switch driver on Ocelot"
depends on MSCC_OCELOT_SWITCH
+ depends on GENERIC_PHY
+ depends on OF_NET
help
This driver supports the Ocelot network switch device as present on
the Ocelot SoCs.
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 1a4f2bb48ead..3238b9ee42f3 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -133,9 +133,9 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
{
unsigned int val, timeout = 10;
- /* Wait for the issued mac table command to be completed, or timeout.
- * When the command read from ANA_TABLES_MACACCESS is
- * MACACCESS_CMD_IDLE, the issued command completed successfully.
+ /* Wait for the issued vlan table command to be completed, or timeout.
+ * When the command read from ANA_TABLES_VLANACCESS is
+ * VLANACCESS_CMD_IDLE, the issued command completed successfully.
*/
do {
val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
@@ -472,6 +472,7 @@ static int ocelot_port_open(struct net_device *dev)
{
struct ocelot_port *port = netdev_priv(dev);
struct ocelot *ocelot = port->ocelot;
+ enum phy_mode phy_mode;
int err;
/* Enable receiving frames on the port, and activate auto-learning of
@@ -482,8 +483,21 @@ static int ocelot_port_open(struct net_device *dev)
ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port),
ANA_PORT_PORT_CFG, port->chip_port);
+ if (port->serdes) {
+ if (port->phy_mode == PHY_INTERFACE_MODE_SGMII)
+ phy_mode = PHY_MODE_SGMII;
+ else
+ phy_mode = PHY_MODE_QSGMII;
+
+ err = phy_set_mode(port->serdes, phy_mode);
+ if (err) {
+ netdev_err(dev, "Could not set mode of SerDes\n");
+ return err;
+ }
+ }
+
err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link,
- PHY_INTERFACE_MODE_NA);
+ port->phy_mode);
if (err) {
netdev_err(dev, "Could not attach to PHY\n");
return err;
@@ -1606,7 +1620,7 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
dev->ethtool_ops = &ocelot_ethtool_ops;
dev->switchdev_ops = &ocelot_port_switchdev_ops;
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 616bec30dfa3..62c7c8eb00d9 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -11,12 +11,13 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "ocelot_ana.h"
#include "ocelot_dev.h"
-#include "ocelot_hsio.h"
#include "ocelot_qsys.h"
#include "ocelot_rew.h"
#include "ocelot_sys.h"
@@ -333,79 +334,6 @@ enum ocelot_reg {
SYS_CM_DATA_RD,
SYS_CM_OP,
SYS_CM_DATA,
- HSIO_PLL5G_CFG0 = HSIO << TARGET_OFFSET,
- HSIO_PLL5G_CFG1,
- HSIO_PLL5G_CFG2,
- HSIO_PLL5G_CFG3,
- HSIO_PLL5G_CFG4,
- HSIO_PLL5G_CFG5,
- HSIO_PLL5G_CFG6,
- HSIO_PLL5G_STATUS0,
- HSIO_PLL5G_STATUS1,
- HSIO_PLL5G_BIST_CFG0,
- HSIO_PLL5G_BIST_CFG1,
- HSIO_PLL5G_BIST_CFG2,
- HSIO_PLL5G_BIST_STAT0,
- HSIO_PLL5G_BIST_STAT1,
- HSIO_RCOMP_CFG0,
- HSIO_RCOMP_STATUS,
- HSIO_SYNC_ETH_CFG,
- HSIO_SYNC_ETH_PLL_CFG,
- HSIO_S1G_DES_CFG,
- HSIO_S1G_IB_CFG,
- HSIO_S1G_OB_CFG,
- HSIO_S1G_SER_CFG,
- HSIO_S1G_COMMON_CFG,
- HSIO_S1G_PLL_CFG,
- HSIO_S1G_PLL_STATUS,
- HSIO_S1G_DFT_CFG0,
- HSIO_S1G_DFT_CFG1,
- HSIO_S1G_DFT_CFG2,
- HSIO_S1G_TP_CFG,
- HSIO_S1G_RC_PLL_BIST_CFG,
- HSIO_S1G_MISC_CFG,
- HSIO_S1G_DFT_STATUS,
- HSIO_S1G_MISC_STATUS,
- HSIO_MCB_S1G_ADDR_CFG,
- HSIO_S6G_DIG_CFG,
- HSIO_S6G_DFT_CFG0,
- HSIO_S6G_DFT_CFG1,
- HSIO_S6G_DFT_CFG2,
- HSIO_S6G_TP_CFG0,
- HSIO_S6G_TP_CFG1,
- HSIO_S6G_RC_PLL_BIST_CFG,
- HSIO_S6G_MISC_CFG,
- HSIO_S6G_OB_ANEG_CFG,
- HSIO_S6G_DFT_STATUS,
- HSIO_S6G_ERR_CNT,
- HSIO_S6G_MISC_STATUS,
- HSIO_S6G_DES_CFG,
- HSIO_S6G_IB_CFG,
- HSIO_S6G_IB_CFG1,
- HSIO_S6G_IB_CFG2,
- HSIO_S6G_IB_CFG3,
- HSIO_S6G_IB_CFG4,
- HSIO_S6G_IB_CFG5,
- HSIO_S6G_OB_CFG,
- HSIO_S6G_OB_CFG1,
- HSIO_S6G_SER_CFG,
- HSIO_S6G_COMMON_CFG,
- HSIO_S6G_PLL_CFG,
- HSIO_S6G_ACJTAG_CFG,
- HSIO_S6G_GP_CFG,
- HSIO_S6G_IB_STATUS0,
- HSIO_S6G_IB_STATUS1,
- HSIO_S6G_ACJTAG_STATUS,
- HSIO_S6G_PLL_STATUS,
- HSIO_S6G_REVID,
- HSIO_MCB_S6G_ADDR_CFG,
- HSIO_HW_CFG,
- HSIO_HW_QSGMII_CFG,
- HSIO_HW_QSGMII_STAT,
- HSIO_CLK_CFG,
- HSIO_TEMP_SENSOR_CTRL,
- HSIO_TEMP_SENSOR_CFG,
- HSIO_TEMP_SENSOR_STAT,
};
enum ocelot_regfield {
@@ -527,6 +455,9 @@ struct ocelot_port {
u8 vlan_aware;
u64 *stats;
+
+ phy_interface_t phy_mode;
+ struct phy *serdes;
};
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 26bb3b18f3be..4c23d18bbf44 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -6,9 +6,11 @@
*/
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of_net.h>
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
#include <linux/skbuff.h>
#include "ocelot.h"
@@ -91,7 +93,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
struct sk_buff *skb;
struct net_device *dev;
u32 *buf;
- int sz, len;
+ int sz, len, buf_len;
u32 ifh[4];
u32 val;
struct frame_info info;
@@ -116,14 +118,25 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
err = -ENOMEM;
break;
}
- buf = (u32 *)skb_put(skb, info.len);
+ buf_len = info.len - ETH_FCS_LEN;
+ buf = (u32 *)skb_put(skb, buf_len);
len = 0;
do {
sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
*buf++ = val;
len += sz;
- } while ((sz == 4) && (len < info.len));
+ } while (len < buf_len);
+
+ /* Read the FCS */
+ sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+ /* Update the statistics if part of the FCS was read before */
+ len -= ETH_FCS_LEN - sz;
+
+ if (unlikely(dev->features & NETIF_F_RXFCS)) {
+ buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
+ *buf = val;
+ }
if (sz < 0) {
err = sz;
@@ -162,6 +175,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device_node *ports, *portnp;
struct ocelot *ocelot;
+ struct regmap *hsio;
u32 val;
struct {
@@ -173,7 +187,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ QSYS, "qsys" },
{ ANA, "ana" },
{ QS, "qs" },
- { HSIO, "hsio" },
};
if (!np && !pdev->dev.platform_data)
@@ -196,6 +209,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[res[i].id] = target;
}
+ hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
+ if (IS_ERR(hsio)) {
+ dev_err(&pdev->dev, "missing hsio syscon\n");
+ return PTR_ERR(hsio);
+ }
+
+ ocelot->targets[HSIO] = hsio;
+
err = ocelot_chip_init(ocelot);
if (err)
return err;
@@ -238,18 +259,11 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&ocelot->multicast);
ocelot_init(ocelot);
- ocelot_rmw(ocelot, HSIO_HW_CFG_DEV1G_4_MODE |
- HSIO_HW_CFG_DEV1G_6_MODE |
- HSIO_HW_CFG_DEV1G_9_MODE,
- HSIO_HW_CFG_DEV1G_4_MODE |
- HSIO_HW_CFG_DEV1G_6_MODE |
- HSIO_HW_CFG_DEV1G_9_MODE,
- HSIO_HW_CFG);
-
for_each_available_child_of_node(ports, portnp) {
struct device_node *phy_node;
struct phy_device *phy;
struct resource *res;
+ struct phy *serdes;
void __iomem *regs;
char res_name[8];
u32 port;
@@ -274,10 +288,43 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
continue;
err = ocelot_probe_port(ocelot, port, regs, phy);
- if (err) {
- dev_err(&pdev->dev, "failed to probe ports\n");
+ if (err)
+ return err;
+
+ err = of_get_phy_mode(portnp);
+ if (err < 0)
+ ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA;
+ else
+ ocelot->ports[port]->phy_mode = err;
+
+ switch (ocelot->ports[port]->phy_mode) {
+ case PHY_INTERFACE_MODE_NA:
+ continue;
+ case PHY_INTERFACE_MODE_SGMII:
+ break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ break;
+ default:
+ dev_err(ocelot->dev,
+ "invalid phy mode for port%d, (Q)SGMII only\n",
+ port);
+ return -EINVAL;
+ }
+
+ serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = PTR_ERR(serdes);
+ if (err == -EPROBE_DEFER)
+ dev_dbg(ocelot->dev, "deferring probe\n");
+ else
+ dev_err(ocelot->dev,
+ "missing SerDes phys for port%d\n",
+ port);
+
goto err_probe_ports;
}
+
+ ocelot->ports[port]->serdes = serdes;
}
register_netdevice_notifier(&ocelot_netdevice_nb);
diff --git a/drivers/net/ethernet/mscc/ocelot_dev_gmii.h b/drivers/net/ethernet/mscc/ocelot_dev_gmii.h
deleted file mode 100644
index 6aa40ea223a2..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_dev_gmii.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_DEV_GMII_H_
-#define _MSCC_OCELOT_DEV_GMII_H_
-
-#define DEV_GMII_PORT_MODE_CLOCK_CFG 0x0
-
-#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_TX_RST BIT(5)
-#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_RX_RST BIT(4)
-#define DEV_GMII_PORT_MODE_CLOCK_CFG_PORT_RST BIT(3)
-#define DEV_GMII_PORT_MODE_CLOCK_CFG_PHY_RST BIT(2)
-#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0))
-#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0)
-
-#define DEV_GMII_PORT_MODE_PORT_MISC 0x4
-
-#define DEV_GMII_PORT_MODE_PORT_MISC_MPLS_RX_ENA BIT(5)
-#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_ERROR_ENA BIT(4)
-#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_PAUSE_ENA BIT(3)
-#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_CTRL_ENA BIT(2)
-#define DEV_GMII_PORT_MODE_PORT_MISC_GMII_LOOP_ENA BIT(1)
-#define DEV_GMII_PORT_MODE_PORT_MISC_DEV_LOOP_ENA BIT(0)
-
-#define DEV_GMII_PORT_MODE_EVENTS 0x8
-
-#define DEV_GMII_PORT_MODE_EEE_CFG 0xc
-
-#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_ENA BIT(22)
-#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15))
-#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15)
-#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15)
-#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8))
-#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8)
-#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8)
-#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1))
-#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1)
-#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1)
-#define DEV_GMII_PORT_MODE_EEE_CFG_PORT_LPI BIT(0)
-
-#define DEV_GMII_PORT_MODE_RX_PATH_DELAY 0x10
-
-#define DEV_GMII_PORT_MODE_TX_PATH_DELAY 0x14
-
-#define DEV_GMII_PORT_MODE_PTP_PREDICT_CFG 0x18
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG 0x1c
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_RX_ENA BIT(4)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_TX_ENA BIT(0)
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG 0x20
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FDX_ENA BIT(0)
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_MAXLEN_CFG 0x24
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG 0x28
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16))
-#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_PB_ENA BIT(1)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2)
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG 0x2c
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG 0x30
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8))
-#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4))
-#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0))
-#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0)
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG 0x34
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_OB_ENA BIT(25)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_WEXC_DIS BIT(24)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16))
-#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_M GENMASK(23, 16)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_LOAD BIT(12)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0))
-#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0)
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG 0x38
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_TBI_MODE BIT(4)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0)
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_LOW_CFG 0x3c
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_HIGH_CFG 0x40
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY 0x44
-
-#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_JUNK_STICKY BIT(5)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_JAM_STICKY BIT(3)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
-#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_ABORT_STICKY BIT(0)
-
-#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG 0x48
-
-#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA BIT(0)
-#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA BIT(4)
-#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_KEEP_S_AFTER_D BIT(8)
-
-#define DEV_GMII_MM_CONFIG_VERIF_CONFIG 0x4c
-
-#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS BIT(0)
-#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(x) (((x) << 4) & GENMASK(11, 4))
-#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M GENMASK(11, 4)
-#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(x) (((x) & GENMASK(11, 4)) >> 4)
-#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS(x) (((x) << 12) & GENMASK(13, 12))
-#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_M GENMASK(13, 12)
-#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_X(x) (((x) & GENMASK(13, 12)) >> 12)
-
-#define DEV_GMII_MM_STATISTICS_MM_STATUS 0x50
-
-#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STATUS BIT(0)
-#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STICKY BIT(4)
-#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE(x) (((x) << 8) & GENMASK(10, 8))
-#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_M GENMASK(10, 8)
-#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_X(x) (((x) & GENMASK(10, 8)) >> 8)
-#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_RX_PFRM_STICKY BIT(12)
-#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_TX_PFRM_STICKY BIT(16)
-#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_RX_FRAME_STATUS BIT(20)
-#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_FRAME_STATUS BIT(24)
-#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_PRMPT_STATUS BIT(28)
-
-#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index e334b406c40c..9271af18b93b 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -5,6 +5,7 @@
* Copyright (c) 2017 Microsemi Corporation
*/
#include "ocelot.h"
+#include <soc/mscc/ocelot_hsio.h>
static const u32 ocelot_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x009000),
@@ -102,82 +103,6 @@ static const u32 ocelot_qs_regmap[] = {
REG(QS_INH_DBG, 0x000048),
};
-static const u32 ocelot_hsio_regmap[] = {
- REG(HSIO_PLL5G_CFG0, 0x000000),
- REG(HSIO_PLL5G_CFG1, 0x000004),
- REG(HSIO_PLL5G_CFG2, 0x000008),
- REG(HSIO_PLL5G_CFG3, 0x00000c),
- REG(HSIO_PLL5G_CFG4, 0x000010),
- REG(HSIO_PLL5G_CFG5, 0x000014),
- REG(HSIO_PLL5G_CFG6, 0x000018),
- REG(HSIO_PLL5G_STATUS0, 0x00001c),
- REG(HSIO_PLL5G_STATUS1, 0x000020),
- REG(HSIO_PLL5G_BIST_CFG0, 0x000024),
- REG(HSIO_PLL5G_BIST_CFG1, 0x000028),
- REG(HSIO_PLL5G_BIST_CFG2, 0x00002c),
- REG(HSIO_PLL5G_BIST_STAT0, 0x000030),
- REG(HSIO_PLL5G_BIST_STAT1, 0x000034),
- REG(HSIO_RCOMP_CFG0, 0x000038),
- REG(HSIO_RCOMP_STATUS, 0x00003c),
- REG(HSIO_SYNC_ETH_CFG, 0x000040),
- REG(HSIO_SYNC_ETH_PLL_CFG, 0x000048),
- REG(HSIO_S1G_DES_CFG, 0x00004c),
- REG(HSIO_S1G_IB_CFG, 0x000050),
- REG(HSIO_S1G_OB_CFG, 0x000054),
- REG(HSIO_S1G_SER_CFG, 0x000058),
- REG(HSIO_S1G_COMMON_CFG, 0x00005c),
- REG(HSIO_S1G_PLL_CFG, 0x000060),
- REG(HSIO_S1G_PLL_STATUS, 0x000064),
- REG(HSIO_S1G_DFT_CFG0, 0x000068),
- REG(HSIO_S1G_DFT_CFG1, 0x00006c),
- REG(HSIO_S1G_DFT_CFG2, 0x000070),
- REG(HSIO_S1G_TP_CFG, 0x000074),
- REG(HSIO_S1G_RC_PLL_BIST_CFG, 0x000078),
- REG(HSIO_S1G_MISC_CFG, 0x00007c),
- REG(HSIO_S1G_DFT_STATUS, 0x000080),
- REG(HSIO_S1G_MISC_STATUS, 0x000084),
- REG(HSIO_MCB_S1G_ADDR_CFG, 0x000088),
- REG(HSIO_S6G_DIG_CFG, 0x00008c),
- REG(HSIO_S6G_DFT_CFG0, 0x000090),
- REG(HSIO_S6G_DFT_CFG1, 0x000094),
- REG(HSIO_S6G_DFT_CFG2, 0x000098),
- REG(HSIO_S6G_TP_CFG0, 0x00009c),
- REG(HSIO_S6G_TP_CFG1, 0x0000a0),
- REG(HSIO_S6G_RC_PLL_BIST_CFG, 0x0000a4),
- REG(HSIO_S6G_MISC_CFG, 0x0000a8),
- REG(HSIO_S6G_OB_ANEG_CFG, 0x0000ac),
- REG(HSIO_S6G_DFT_STATUS, 0x0000b0),
- REG(HSIO_S6G_ERR_CNT, 0x0000b4),
- REG(HSIO_S6G_MISC_STATUS, 0x0000b8),
- REG(HSIO_S6G_DES_CFG, 0x0000bc),
- REG(HSIO_S6G_IB_CFG, 0x0000c0),
- REG(HSIO_S6G_IB_CFG1, 0x0000c4),
- REG(HSIO_S6G_IB_CFG2, 0x0000c8),
- REG(HSIO_S6G_IB_CFG3, 0x0000cc),
- REG(HSIO_S6G_IB_CFG4, 0x0000d0),
- REG(HSIO_S6G_IB_CFG5, 0x0000d4),
- REG(HSIO_S6G_OB_CFG, 0x0000d8),
- REG(HSIO_S6G_OB_CFG1, 0x0000dc),
- REG(HSIO_S6G_SER_CFG, 0x0000e0),
- REG(HSIO_S6G_COMMON_CFG, 0x0000e4),
- REG(HSIO_S6G_PLL_CFG, 0x0000e8),
- REG(HSIO_S6G_ACJTAG_CFG, 0x0000ec),
- REG(HSIO_S6G_GP_CFG, 0x0000f0),
- REG(HSIO_S6G_IB_STATUS0, 0x0000f4),
- REG(HSIO_S6G_IB_STATUS1, 0x0000f8),
- REG(HSIO_S6G_ACJTAG_STATUS, 0x0000fc),
- REG(HSIO_S6G_PLL_STATUS, 0x000100),
- REG(HSIO_S6G_REVID, 0x000104),
- REG(HSIO_MCB_S6G_ADDR_CFG, 0x000108),
- REG(HSIO_HW_CFG, 0x00010c),
- REG(HSIO_HW_QSGMII_CFG, 0x000110),
- REG(HSIO_HW_QSGMII_STAT, 0x000114),
- REG(HSIO_CLK_CFG, 0x000118),
- REG(HSIO_TEMP_SENSOR_CTRL, 0x00011c),
- REG(HSIO_TEMP_SENSOR_CFG, 0x000120),
- REG(HSIO_TEMP_SENSOR_STAT, 0x000124),
-};
-
static const u32 ocelot_qsys_regmap[] = {
REG(QSYS_PORT_MODE, 0x011200),
REG(QSYS_SWITCH_PORT_MODE, 0x011234),
@@ -302,7 +227,6 @@ static const u32 ocelot_sys_regmap[] = {
static const u32 *ocelot_regmap[] = {
[ANA] = ocelot_ana_regmap,
[QS] = ocelot_qs_regmap,
- [HSIO] = ocelot_hsio_regmap,
[QSYS] = ocelot_qsys_regmap,
[REW] = ocelot_rew_regmap,
[SYS] = ocelot_sys_regmap,
@@ -453,9 +377,11 @@ static void ocelot_pll5_init(struct ocelot *ocelot)
/* Configure PLL5. This will need a proper CCF driver
* The values are coming from the VTSS API for Ocelot
*/
- ocelot_write(ocelot, HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
- HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8), HSIO_PLL5G_CFG4);
- ocelot_write(ocelot, HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4,
+ HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
+ HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8));
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0,
+ HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) |
HSIO_PLL5G_CFG0_ENA_BIAS |
HSIO_PLL5G_CFG0_ENA_VCO_BUF |
@@ -465,13 +391,14 @@ static void ocelot_pll5_init(struct ocelot *ocelot)
HSIO_PLL5G_CFG0_SELBGV820(4) |
HSIO_PLL5G_CFG0_DIV4 |
HSIO_PLL5G_CFG0_ENA_CLKTREE |
- HSIO_PLL5G_CFG0_ENA_LANE, HSIO_PLL5G_CFG0);
- ocelot_write(ocelot, HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
+ HSIO_PLL5G_CFG0_ENA_LANE);
+ regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2,
+ HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
HSIO_PLL5G_CFG2_EN_RESET_OVERRUN |
HSIO_PLL5G_CFG2_GAIN_TEST(0x8) |
HSIO_PLL5G_CFG2_ENA_AMPCTRL |
HSIO_PLL5G_CFG2_PWD_AMPCTRL_N |
- HSIO_PLL5G_CFG2_AMPC_SEL(0x10), HSIO_PLL5G_CFG2);
+ HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
}
int ocelot_chip_init(struct ocelot *ocelot)
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index b8983e73265a..82be90075695 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -75,6 +75,7 @@
#include <linux/tcp.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <net/tcp.h>
@@ -491,7 +492,7 @@ static struct pci_driver s2io_driver = {
};
/* A simplifier macro used both by init and free shared_mem Fns(). */
-#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
+#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
/* netqueue manipulation helper functions */
static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
@@ -3679,11 +3680,9 @@ static void restore_xmsi_data(struct s2io_nic *nic)
writeq(nic->msix_info[i].data, &bar0->xmsi_data);
val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
writeq(val64, &bar0->xmsi_access);
- if (wait_for_msix_trans(nic, msix_index)) {
+ if (wait_for_msix_trans(nic, msix_index))
DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
__func__, msix_index);
- continue;
- }
}
}
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index 1a24a7218794..0a921f30f98f 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -10,6 +10,7 @@
* system is licensed under the GPL.
* See the file COPYING in this distribution for more information.
************************************************************************/
+#include <linux/io-64-nonatomic-lo-hi.h>
#ifndef _S2IO_H
#define _S2IO_H
@@ -970,27 +971,6 @@ struct s2io_nic {
#define RESET_ERROR 1
#define CMD_ERROR 2
-/* OS related system calls */
-#ifndef readq
-static inline u64 readq(void __iomem *addr)
-{
- u64 ret = 0;
- ret = readl(addr + 4);
- ret <<= 32;
- ret |= readl(addr);
-
- return ret;
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(u64 val, void __iomem *addr)
-{
- writel((u32) (val), addr);
- writel((u32) (val >> 32), (addr + 4));
-}
-#endif
-
/*
* Some registers have to be written in a particular order to
* expect correct hardware operation. The macro SPECIAL_REG_WRITE
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 398011c87643..4c1fb7e57888 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -13,6 +13,7 @@
******************************************************************************/
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/pci.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index d743a37a3cee..e678ba379598 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -2011,26 +2011,6 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
-#ifndef readq
-static inline u64 readq(void __iomem *addr)
-{
- u64 ret = 0;
- ret = readl(addr + 4);
- ret <<= 32;
- ret |= readl(addr);
-
- return ret;
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(u64 val, void __iomem *addr)
-{
- writel((u32) (val), addr);
- writel((u32) (val >> 32), (addr + 4));
-}
-#endif
-
static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
{
writel(val, addr + 4);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 0c3b5dea2858..f7a0d1d5885e 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -12,6 +12,7 @@
* Copyright(c) 2002-2010 Exar Corp.
******************************************************************************/
#include <linux/etherdevice.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/prefetch.h>
#include "vxge-traffic.h"
@@ -2261,7 +2262,7 @@ void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
- if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
+ if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)
__vxge_hw_pio_mem_write32_upper(
(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
&hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index b157ccd8c80f..3c661f422688 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -1,36 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/kernel.h>
@@ -55,30 +24,21 @@
#define NFP_QMSTAT_DROP 16
#define NFP_QMSTAT_ECN 24
-static unsigned long long
-nfp_abm_q_lvl_thrs(struct nfp_abm_link *alink, unsigned int queue)
-{
- return alink->abm->q_lvls->addr +
- (alink->queue_base + queue) * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
-}
-
static int
nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
unsigned int stride, unsigned int offset, unsigned int i,
bool is_u64, u64 *res)
{
struct nfp_cpp *cpp = alink->abm->app->cpp;
- u32 val32, mur;
- u64 val, addr;
+ u64 val, sym_offset;
+ u32 val32;
int err;
- mur = NFP_CPP_ATOMIC_RD(sym->target, sym->domain);
-
- addr = sym->addr + (alink->queue_base + i) * stride + offset;
+ sym_offset = (alink->queue_base + i) * stride + offset;
if (is_u64)
- err = nfp_cpp_readq(cpp, mur, addr, &val);
+ err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val);
else
- err = nfp_cpp_readl(cpp, mur, addr, &val32);
+ err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32);
if (err) {
nfp_err(cpp,
"RED offload reading stat failed on vNIC %d queue %d\n",
@@ -114,13 +74,12 @@ nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val)
{
struct nfp_cpp *cpp = alink->abm->app->cpp;
- u32 muw;
+ u64 sym_offset;
int err;
- muw = NFP_CPP_ATOMIC_WR(alink->abm->q_lvls->target,
- alink->abm->q_lvls->domain);
-
- err = nfp_cpp_writel(cpp, muw, nfp_abm_q_lvl_thrs(alink, i), val);
+ sym_offset = (alink->queue_base + i) * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
+ err = __nfp_rtsym_writel(cpp, alink->abm->q_lvls, 4, 0,
+ sym_offset, val);
if (err) {
nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n",
alink->id, i);
@@ -290,10 +249,10 @@ nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size)
nfp_err(pf->cpp, "Symbol '%s' not found\n", name);
return ERR_PTR(-ENOENT);
}
- if (sym->size != size) {
+ if (nfp_rtsym_size(sym) != size) {
nfp_err(pf->cpp,
"Symbol '%s' wrong size: expected %u got %llu\n",
- name, size, sym->size);
+ name, size, nfp_rtsym_size(sym));
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index b84a6c2d387b..c0830c0c2c3f 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -1,36 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/etherdevice.h>
@@ -540,8 +509,9 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
{
struct nfp_eth_table_port *eth_port = &pf->eth_tbl->ports[id];
u8 mac_addr[ETH_ALEN];
- const char *mac_str;
- char name[32];
+ struct nfp_nsp *nsp;
+ char hwinfo[32];
+ int err;
if (id > pf->eth_tbl->count) {
nfp_warn(pf->cpp, "No entry for persistent MAC address\n");
@@ -549,22 +519,37 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
return;
}
- snprintf(name, sizeof(name), "eth%u.mac.pf%u",
+ snprintf(hwinfo, sizeof(hwinfo), "eth%u.mac.pf%u",
eth_port->eth_index, abm->pf_id);
- mac_str = nfp_hwinfo_lookup(pf->hwinfo, name);
- if (!mac_str) {
- nfp_warn(pf->cpp, "Can't lookup persistent MAC address (%s)\n",
- name);
+ nsp = nfp_nsp_open(pf->cpp);
+ if (IS_ERR(nsp)) {
+ nfp_warn(pf->cpp, "Failed to access the NSP for persistent MAC address: %ld\n",
+ PTR_ERR(nsp));
+ eth_hw_addr_random(nn->dp.netdev);
+ return;
+ }
+
+ if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
+ nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n");
+ eth_hw_addr_random(nn->dp.netdev);
+ return;
+ }
+
+ err = nfp_nsp_hwinfo_lookup(nsp, hwinfo, sizeof(hwinfo));
+ nfp_nsp_close(nsp);
+ if (err) {
+ nfp_warn(pf->cpp, "Reading persistent MAC address failed: %d\n",
+ err);
eth_hw_addr_random(nn->dp.netdev);
return;
}
- if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ if (sscanf(hwinfo, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
&mac_addr[0], &mac_addr[1], &mac_addr[2],
&mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
nfp_warn(pf->cpp, "Can't parse persistent MAC address (%s)\n",
- mac_str);
+ hwinfo);
eth_hw_addr_random(nn->dp.netdev);
return;
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index 934a70835473..f907b7d98917 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -1,36 +1,5 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __NFP_ABM_H__
#define __NFP_ABM_H__ 1
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 2572a4b91c7c..9b6cfa697879 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bpf.h>
#include <linux/bitops.h>
@@ -89,15 +59,32 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
return skb;
}
+static unsigned int
+nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n)
+{
+ unsigned int size;
+
+ size = sizeof(struct cmsg_req_map_op);
+ size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
+
+ return size;
+}
+
static struct sk_buff *
nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
{
+ return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n));
+}
+
+static unsigned int
+nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
+{
unsigned int size;
- size = sizeof(struct cmsg_req_map_op);
- size += sizeof(struct cmsg_key_value_pair) * n;
+ size = sizeof(struct cmsg_reply_map_op);
+ size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
- return nfp_bpf_cmsg_alloc(bpf, size);
+ return size;
}
static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
@@ -338,6 +325,34 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
dev_consume_skb_any(skb);
}
+static void *
+nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
+ unsigned int n)
+{
+ return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
+}
+
+static void *
+nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
+ unsigned int n)
+{
+ return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
+}
+
+static void *
+nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
+ unsigned int n)
+{
+ return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
+}
+
+static void *
+nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
+ unsigned int n)
+{
+ return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
+}
+
static int
nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
enum nfp_bpf_cmsg_type op,
@@ -366,12 +381,13 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
/* Copy inputs */
if (key)
- memcpy(&req->elem[0].key, key, map->key_size);
+ memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
if (value)
- memcpy(&req->elem[0].value, value, map->value_size);
+ memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
+ map->value_size);
skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
- sizeof(*reply) + sizeof(*reply->elem));
+ nfp_bpf_cmsg_map_reply_size(bpf, 1));
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -382,9 +398,11 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
/* Copy outputs */
if (out_key)
- memcpy(out_key, &reply->elem[0].key, map->key_size);
+ memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
+ map->key_size);
if (out_value)
- memcpy(out_value, &reply->elem[0].value, map->value_size);
+ memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
+ map->value_size);
dev_consume_skb_any(skb);
@@ -428,6 +446,13 @@ int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
key, NULL, 0, next_key, NULL);
}
+unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
+{
+ return max3((unsigned int)NFP_NET_DEFAULT_MTU,
+ nfp_bpf_cmsg_map_req_size(bpf, 1),
+ nfp_bpf_cmsg_map_reply_size(bpf, 1));
+}
+
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_app_bpf *bpf = app->priv;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index e4f9b7ec8528..721921bcf120 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef NFP_BPF_FW_H
#define NFP_BPF_FW_H 1
@@ -52,6 +22,7 @@ enum bpf_cap_tlv_type {
NFP_BPF_CAP_TYPE_RANDOM = 4,
NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5,
NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6,
+ NFP_BPF_CAP_TYPE_ABI_VERSION = 7,
};
struct nfp_bpf_cap_tlv_func {
@@ -98,6 +69,7 @@ enum nfp_bpf_cmsg_type {
#define CMSG_TYPE_MAP_REPLY_BIT 7
#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
+/* BPF ABIv2 fixed-length control message fields */
#define CMSG_MAP_KEY_LW 16
#define CMSG_MAP_VALUE_LW 16
@@ -147,24 +119,19 @@ struct cmsg_reply_map_free_tbl {
__be32 count;
};
-struct cmsg_key_value_pair {
- __be32 key[CMSG_MAP_KEY_LW];
- __be32 value[CMSG_MAP_VALUE_LW];
-};
-
struct cmsg_req_map_op {
struct cmsg_hdr hdr;
__be32 tid;
__be32 count;
__be32 flags;
- struct cmsg_key_value_pair elem[0];
+ u8 data[0];
};
struct cmsg_reply_map_op {
struct cmsg_reply_map_simple reply_hdr;
__be32 count;
__be32 resv;
- struct cmsg_key_value_pair elem[0];
+ u8 data[0];
};
struct cmsg_bpf_event {
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index eff57f7d056a..97d33bb4d84d 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#define pr_fmt(fmt) "NFP net bpf: " fmt
@@ -267,6 +237,38 @@ emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer)
}
static void
+__emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
+ u8 defer, bool dst_lmextn, bool src_lmextn)
+{
+ u64 insn;
+
+ insn = OP_BR_ALU_BASE |
+ FIELD_PREP(OP_BR_ALU_A_SRC, areg) |
+ FIELD_PREP(OP_BR_ALU_B_SRC, breg) |
+ FIELD_PREP(OP_BR_ALU_DEFBR, defer) |
+ FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) |
+ FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer)
+{
+ struct nfp_insn_ur_regs reg;
+ int err;
+
+ err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), &reg);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn,
+ reg.src_lmextn);
+}
+
+static void
__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
enum immed_width width, bool invert,
enum immed_shift shift, bool wr_both,
@@ -1137,7 +1139,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr,
bool clr_gpr, lmem_step step)
{
- s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off;
+ s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
bool first = true, last;
bool needs_inc = false;
swreg stack_off_reg;
@@ -1146,7 +1148,8 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
bool lm3 = true;
int ret;
- if (meta->ptr_not_const) {
+ if (meta->ptr_not_const ||
+ meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) {
/* Use of the last encountered ptr_off is OK, they all have
* the same alignment. Depend on low bits of value being
* discarded when written to LMaddr register.
@@ -1695,7 +1698,7 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
s64 lm_off;
/* We only have to reload LM0 if the key is not at start of stack */
- lm_off = nfp_prog->stack_depth;
+ lm_off = nfp_prog->stack_frame_depth;
lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off;
load_lm_ptr = meta->arg2.var_off || lm_off;
@@ -1808,10 +1811,10 @@ static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
swreg stack_depth_reg;
stack_depth_reg = ur_load_imm_any(nfp_prog,
- nfp_prog->stack_depth,
+ nfp_prog->stack_frame_depth,
stack_imm(nfp_prog));
- emit_alu(nfp_prog, reg_both(dst),
- stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg);
+ emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog),
+ ALU_OP_ADD, stack_depth_reg);
wrp_immed(nfp_prog, reg_both(dst + 1), 0);
} else {
wrp_reg_mov(nfp_prog, dst, src);
@@ -3081,7 +3084,93 @@ static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
}
-static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int
+bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u32 ret_tgt, stack_depth, offset_br;
+ swreg tmp_reg;
+
+ stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN);
+ /* Space for saving the return address is accounted for by the callee,
+ * so stack_depth can be zero for the main function.
+ */
+ if (stack_depth) {
+ tmp_reg = ur_load_imm_any(nfp_prog, stack_depth,
+ stack_imm(nfp_prog));
+ emit_alu(nfp_prog, stack_reg(nfp_prog),
+ stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg);
+ emit_csr_wr(nfp_prog, stack_reg(nfp_prog),
+ NFP_CSR_ACT_LM_ADDR0);
+ }
+
+ /* Two cases for jumping to the callee:
+ *
+ * - If callee uses and needs to save R6~R9 then:
+ * 1. Put the start offset of the callee into imm_b(). This will
+ * require a fixup step, as we do not necessarily know this
+ * address yet.
+ * 2. Put the return address from the callee to the caller into
+ * register ret_reg().
+ * 3. (After defer slots are consumed) Jump to the subroutine that
+ * pushes the registers to the stack.
+ * The subroutine acts as a trampoline, and returns to the address in
+ * imm_b(), i.e. jumps to the callee.
+ *
+ * - If callee does not need to save R6~R9 then just load return
+ * address to the caller in ret_reg(), and jump to the callee
+ * directly.
+ *
+ * Using ret_reg() to pass the return address to the callee is set here
+ * as a convention. The callee can then push this address onto its
+ * stack frame in its prologue. The advantages of passing the return
+ * address through ret_reg(), instead of pushing it to the stack right
+ * here, are the following:
+ * - It looks cleaner.
+ * - If the called function is called multiple time, we get a lower
+ * program size.
+ * - We save two no-op instructions that should be added just before
+ * the emit_br() when stack depth is not null otherwise.
+ * - If we ever find a register to hold the return address during whole
+ * execution of the callee, we will not have to push the return
+ * address to the stack for leaf functions.
+ */
+ if (!meta->jmp_dst) {
+ pr_err("BUG: BPF-to-BPF call has no destination recorded\n");
+ return -ELOOP;
+ }
+ if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) {
+ ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2,
+ RELO_BR_GO_CALL_PUSH_REGS);
+ offset_br = nfp_prog_current_offset(nfp_prog);
+ wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL);
+ } else {
+ ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
+ emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1);
+ offset_br = nfp_prog_current_offset(nfp_prog);
+ }
+ wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL);
+
+ if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
+ return -EINVAL;
+
+ if (stack_depth) {
+ tmp_reg = ur_load_imm_any(nfp_prog, stack_depth,
+ stack_imm(nfp_prog));
+ emit_alu(nfp_prog, stack_reg(nfp_prog),
+ stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
+ emit_csr_wr(nfp_prog, stack_reg(nfp_prog),
+ NFP_CSR_ACT_LM_ADDR0);
+ wrp_nops(nfp_prog, 3);
+ }
+
+ meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog);
+ meta->num_insns_after_br -= offset_br;
+
+ return 0;
+}
+
+static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
switch (meta->insn.imm) {
case BPF_FUNC_xdp_adjust_head:
@@ -3102,6 +3191,19 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
}
}
+static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (is_mbpf_pseudo_call(meta))
+ return bpf_to_bpf_call(nfp_prog, meta);
+ else
+ return helper_call(nfp_prog, meta);
+}
+
+static bool nfp_is_main_function(struct nfp_insn_meta *meta)
+{
+ return meta->subprog_idx == 0;
+}
+
static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
@@ -3109,6 +3211,39 @@ static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int
+nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) {
+ /* Pop R6~R9 to the stack via related subroutine.
+ * We loaded the return address to the caller into ret_reg().
+ * This means that the subroutine does not come back here, we
+ * make it jump back to the subprogram caller directly!
+ */
+ emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1,
+ RELO_BR_GO_CALL_POP_REGS);
+ /* Pop return address from the stack. */
+ wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0));
+ } else {
+ /* Pop return address from the stack. */
+ wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0));
+ /* Jump back to caller if no callee-saved registers were used
+ * by the subprogram.
+ */
+ emit_rtn(nfp_prog, ret_reg(nfp_prog), 0);
+ }
+
+ return 0;
+}
+
+static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (nfp_is_main_function(meta))
+ return goto_out(nfp_prog, meta);
+ else
+ return nfp_subprog_epilogue(nfp_prog, meta);
+}
+
static const instr_cb_t instr_cb[256] = {
[BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64,
[BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64,
@@ -3197,36 +3332,66 @@ static const instr_cb_t instr_cb[256] = {
[BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
[BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
[BPF_JMP | BPF_CALL] = call,
- [BPF_JMP | BPF_EXIT] = goto_out,
+ [BPF_JMP | BPF_EXIT] = jmp_exit,
};
/* --- Assembler logic --- */
+static int
+nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct nfp_insn_meta *jmp_dst, u32 br_idx)
+{
+ if (immed_get_value(nfp_prog->prog[br_idx + 1])) {
+ pr_err("BUG: failed to fix up callee register saving\n");
+ return -EINVAL;
+ }
+
+ immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off);
+
+ return 0;
+}
+
static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta, *jmp_dst;
u32 idx, br_idx;
+ int err;
list_for_each_entry(meta, &nfp_prog->insns, l) {
if (meta->skip)
continue;
- if (meta->insn.code == (BPF_JMP | BPF_CALL))
- continue;
if (BPF_CLASS(meta->insn.code) != BPF_JMP)
continue;
+ if (meta->insn.code == (BPF_JMP | BPF_EXIT) &&
+ !nfp_is_main_function(meta))
+ continue;
+ if (is_mbpf_helper_call(meta))
+ continue;
if (list_is_last(&meta->l, &nfp_prog->insns))
br_idx = nfp_prog->last_bpf_off;
else
br_idx = list_next_entry(meta, l)->off - 1;
+ /* For BPF-to-BPF function call, a stack adjustment sequence is
+ * generated after the return instruction. Therefore, we must
+ * withdraw the length of this sequence to have br_idx pointing
+ * to where the "branch" NFP instruction is expected to be.
+ */
+ if (is_mbpf_pseudo_call(meta))
+ br_idx -= meta->num_insns_after_br;
+
if (!nfp_is_br(nfp_prog->prog[br_idx])) {
pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
return -ELOOP;
}
+
+ if (meta->insn.code == (BPF_JMP | BPF_EXIT))
+ continue;
+
/* Leave special branches for later */
if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
- RELO_BR_REL)
+ RELO_BR_REL && !is_mbpf_pseudo_call(meta))
continue;
if (!meta->jmp_dst) {
@@ -3241,6 +3406,18 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
return -ELOOP;
}
+ if (is_mbpf_pseudo_call(meta) &&
+ nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) {
+ err = nfp_fixup_immed_relo(nfp_prog, meta,
+ jmp_dst, br_idx);
+ if (err)
+ return err;
+ }
+
+ if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
+ RELO_BR_REL)
+ continue;
+
for (idx = meta->off; idx <= br_idx; idx++) {
if (!nfp_is_br(nfp_prog->prog[idx]))
continue;
@@ -3258,6 +3435,27 @@ static void nfp_intro(struct nfp_prog *nfp_prog)
plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
}
+static void
+nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ /* Save return address into the stack. */
+ wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog));
+}
+
+static void
+nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth;
+
+ nfp_prog->stack_frame_depth = round_up(depth, 4);
+ nfp_subprog_prologue(nfp_prog, meta);
+}
+
+bool nfp_is_subprog_start(struct nfp_insn_meta *meta)
+{
+ return meta->flags & FLAG_INSN_IS_SUBPROG_START;
+}
+
static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
{
/* TC direct-action mode:
@@ -3348,6 +3546,67 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
}
+static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog)
+{
+ unsigned int idx;
+
+ for (idx = 1; idx < nfp_prog->subprog_cnt; idx++)
+ if (nfp_prog->subprog[idx].needs_reg_push)
+ return true;
+
+ return false;
+}
+
+static void nfp_push_callee_registers(struct nfp_prog *nfp_prog)
+{
+ u8 reg;
+
+ /* Subroutine: Save all callee saved registers (R6 ~ R9).
+ * imm_b() holds the return address.
+ */
+ nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog);
+ for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) {
+ u8 adj = (reg - BPF_REG_0) * 2;
+ u8 idx = (reg - BPF_REG_6) * 2;
+
+ /* The first slot in the stack frame is used to push the return
+ * address in bpf_to_bpf_call(), start just after.
+ */
+ wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj));
+
+ if (reg == BPF_REG_8)
+ /* Prepare to jump back, last 3 insns use defer slots */
+ emit_rtn(nfp_prog, imm_b(nfp_prog), 3);
+
+ wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1));
+ }
+}
+
+static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog)
+{
+ u8 reg;
+
+ /* Subroutine: Restore all callee saved registers (R6 ~ R9).
+ * ret_reg() holds the return address.
+ */
+ nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog);
+ for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) {
+ u8 adj = (reg - BPF_REG_0) * 2;
+ u8 idx = (reg - BPF_REG_6) * 2;
+
+ /* The first slot in the stack frame holds the return address,
+ * start popping just after that.
+ */
+ wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx));
+
+ if (reg == BPF_REG_8)
+ /* Prepare to jump back, last 3 insns use defer slots */
+ emit_rtn(nfp_prog, ret_reg(nfp_prog), 3);
+
+ wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1));
+ }
+}
+
static void nfp_outro(struct nfp_prog *nfp_prog)
{
switch (nfp_prog->type) {
@@ -3360,13 +3619,23 @@ static void nfp_outro(struct nfp_prog *nfp_prog)
default:
WARN_ON(1);
}
+
+ if (!nfp_prog_needs_callee_reg_save(nfp_prog))
+ return;
+
+ nfp_push_callee_registers(nfp_prog);
+ nfp_pop_callee_registers(nfp_prog);
}
static int nfp_translate(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta;
+ unsigned int depth;
int err;
+ depth = nfp_prog->subprog[0].stack_depth;
+ nfp_prog->stack_frame_depth = round_up(depth, 4);
+
nfp_intro(nfp_prog);
if (nfp_prog->error)
return nfp_prog->error;
@@ -3376,6 +3645,12 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
meta->off = nfp_prog_current_offset(nfp_prog);
+ if (nfp_is_subprog_start(meta)) {
+ nfp_start_subprog(nfp_prog, meta);
+ if (nfp_prog->error)
+ return nfp_prog->error;
+ }
+
if (meta->skip) {
nfp_prog->n_translated++;
continue;
@@ -4018,20 +4293,35 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
/* Another pass to record jump information. */
list_for_each_entry(meta, &nfp_prog->insns, l) {
+ struct nfp_insn_meta *dst_meta;
u64 code = meta->insn.code;
+ unsigned int dst_idx;
+ bool pseudo_call;
+
+ if (BPF_CLASS(code) != BPF_JMP)
+ continue;
+ if (BPF_OP(code) == BPF_EXIT)
+ continue;
+ if (is_mbpf_helper_call(meta))
+ continue;
- if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
- BPF_OP(code) != BPF_CALL) {
- struct nfp_insn_meta *dst_meta;
- unsigned short dst_indx;
+ /* If opcode is BPF_CALL at this point, this can only be a
+ * BPF-to-BPF call (a.k.a pseudo call).
+ */
+ pseudo_call = BPF_OP(code) == BPF_CALL;
- dst_indx = meta->n + 1 + meta->insn.off;
- dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
- cnt);
+ if (pseudo_call)
+ dst_idx = meta->n + 1 + meta->insn.imm;
+ else
+ dst_idx = meta->n + 1 + meta->insn.off;
- meta->jmp_dst = dst_meta;
- dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
- }
+ dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt);
+
+ if (pseudo_call)
+ dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START;
+
+ dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
+ meta->jmp_dst = dst_meta;
}
}
@@ -4054,6 +4344,7 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
for (i = 0; i < nfp_prog->prog_len; i++) {
enum nfp_relo_type special;
u32 val;
+ u16 off;
special = FIELD_GET(OP_RELO_TYPE, prog[i]);
switch (special) {
@@ -4070,6 +4361,24 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
br_set_offset(&prog[i],
nfp_prog->tgt_abort + bv->start_off);
break;
+ case RELO_BR_GO_CALL_PUSH_REGS:
+ if (!nfp_prog->tgt_call_push_regs) {
+ pr_err("BUG: failed to detect subprogram registers needs\n");
+ err = -EINVAL;
+ goto err_free_prog;
+ }
+ off = nfp_prog->tgt_call_push_regs + bv->start_off;
+ br_set_offset(&prog[i], off);
+ break;
+ case RELO_BR_GO_CALL_POP_REGS:
+ if (!nfp_prog->tgt_call_pop_regs) {
+ pr_err("BUG: failed to detect subprogram registers needs\n");
+ err = -EINVAL;
+ goto err_free_prog;
+ }
+ off = nfp_prog->tgt_call_pop_regs + bv->start_off;
+ br_set_offset(&prog[i], off);
+ break;
case RELO_BR_NEXT_PKT:
br_set_offset(&prog[i], bv->tgt_done);
break;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 970af07f4656..6243af0ab025 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <net/pkt_cls.h>
@@ -54,11 +24,14 @@ const struct rhashtable_params nfp_bpf_maps_neutral_params = {
static bool nfp_net_ebpf_capable(struct nfp_net *nn)
{
#ifdef __LITTLE_ENDIAN
- if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
- nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
- return true;
-#endif
+ struct nfp_app_bpf *bpf = nn->app->priv;
+
+ return nn->cap & NFP_NET_CFG_CTRL_BPF &&
+ bpf->abi_version &&
+ nn_readb(nn, NFP_NET_CFG_BPF_ABI) == bpf->abi_version;
+#else
return false;
+#endif
}
static int
@@ -342,6 +315,26 @@ nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value,
return 0;
}
+static int
+nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value,
+ u32 length)
+{
+ if (length < 4) {
+ nfp_err(bpf->app->cpp, "truncated ABI version TLV: %d\n",
+ length);
+ return -EINVAL;
+ }
+
+ bpf->abi_version = readl(value);
+ if (bpf->abi_version < 2 || bpf->abi_version > 3) {
+ nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n",
+ bpf->abi_version);
+ bpf->abi_version = 0;
+ }
+
+ return 0;
+}
+
static int nfp_bpf_parse_capabilities(struct nfp_app *app)
{
struct nfp_cpp *cpp = app->pf->cpp;
@@ -393,6 +386,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
length))
goto err_release_free;
break;
+ case NFP_BPF_CAP_TYPE_ABI_VERSION:
+ if (nfp_bpf_parse_cap_abi_version(app->priv, value,
+ length))
+ goto err_release_free;
+ break;
default:
nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
break;
@@ -414,6 +412,11 @@ err_release_free:
return -EINVAL;
}
+static void nfp_bpf_init_capabilities(struct nfp_app_bpf *bpf)
+{
+ bpf->abi_version = 2; /* Original BPF ABI version */
+}
+
static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev)
{
struct nfp_app_bpf *bpf = app->priv;
@@ -447,10 +450,21 @@ static int nfp_bpf_init(struct nfp_app *app)
if (err)
goto err_free_bpf;
+ nfp_bpf_init_capabilities(bpf);
+
err = nfp_bpf_parse_capabilities(app);
if (err)
goto err_free_neutral_maps;
+ if (bpf->abi_version < 3) {
+ bpf->cmsg_key_sz = CMSG_MAP_KEY_LW * 4;
+ bpf->cmsg_val_sz = CMSG_MAP_VALUE_LW * 4;
+ } else {
+ bpf->cmsg_key_sz = bpf->maps.max_key_sz;
+ bpf->cmsg_val_sz = bpf->maps.max_val_sz;
+ app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
+ }
+
bpf->bpf_dev = bpf_offload_dev_create();
err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
if (err)
@@ -465,11 +479,6 @@ err_free_bpf:
return err;
}
-static void nfp_check_rhashtable_empty(void *ptr, void *arg)
-{
- WARN_ON_ONCE(1);
-}
-
static void nfp_bpf_clean(struct nfp_app *app)
{
struct nfp_app_bpf *bpf = app->priv;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index dbd00982fd2b..7f591d71ab28 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#ifndef __NFP_BPF_H__
#define __NFP_BPF_H__ 1
@@ -61,6 +31,8 @@ enum nfp_relo_type {
/* internal jumps to parts of the outro */
RELO_BR_GO_OUT,
RELO_BR_GO_ABORT,
+ RELO_BR_GO_CALL_PUSH_REGS,
+ RELO_BR_GO_CALL_POP_REGS,
/* external jumps to fixed addresses */
RELO_BR_NEXT_PKT,
RELO_BR_HELPER,
@@ -104,6 +76,7 @@ enum pkt_vec {
#define imma_a(np) reg_a(STATIC_REG_IMMA)
#define imma_b(np) reg_b(STATIC_REG_IMMA)
#define imm_both(np) reg_both(STATIC_REG_IMM)
+#define ret_reg(np) imm_a(np)
#define NFP_BPF_ABI_FLAGS reg_imm(0)
#define NFP_BPF_ABI_FLAG_MARK 1
@@ -121,12 +94,17 @@ enum pkt_vec {
* @cmsg_replies: received cmsg replies waiting to be consumed
* @cmsg_wq: work queue for waiting for cmsg replies
*
+ * @cmsg_key_sz: size of key in cmsg element array
+ * @cmsg_val_sz: size of value in cmsg element array
+ *
* @map_list: list of offloaded maps
* @maps_in_use: number of currently offloaded maps
* @map_elems_in_use: number of elements allocated to offloaded maps
*
* @maps_neutral: hash table of offload-neutral maps (on pointer)
*
+ * @abi_version: global BPF ABI version
+ *
* @adjust_head: adjust head capability
* @adjust_head.flags: extra flags for adjust head
* @adjust_head.off_min: minimal packet offset within buffer required
@@ -164,12 +142,17 @@ struct nfp_app_bpf {
struct sk_buff_head cmsg_replies;
struct wait_queue_head cmsg_wq;
+ unsigned int cmsg_key_sz;
+ unsigned int cmsg_val_sz;
+
struct list_head map_list;
unsigned int maps_in_use;
unsigned int map_elems_in_use;
struct rhashtable maps_neutral;
+ u32 abi_version;
+
struct nfp_bpf_cap_adjust_head {
u32 flags;
int off_min;
@@ -206,6 +189,11 @@ enum nfp_bpf_map_use {
NFP_MAP_USE_ATOMIC_CNT,
};
+struct nfp_bpf_map_word {
+ unsigned char type :4;
+ unsigned char non_zero_update :1;
+};
+
/**
* struct nfp_bpf_map - private per-map data attached to BPF maps for offload
* @offmap: pointer to the offloaded BPF map
@@ -219,7 +207,7 @@ struct nfp_bpf_map {
struct nfp_app_bpf *bpf;
u32 tid;
struct list_head l;
- enum nfp_bpf_map_use use_map[];
+ struct nfp_bpf_map_word use_map[];
};
struct nfp_bpf_neutral_map {
@@ -252,7 +240,9 @@ struct nfp_bpf_reg_state {
bool var_off;
};
-#define FLAG_INSN_IS_JUMP_DST BIT(0)
+#define FLAG_INSN_IS_JUMP_DST BIT(0)
+#define FLAG_INSN_IS_SUBPROG_START BIT(1)
+#define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2)
/**
* struct nfp_insn_meta - BPF instruction wrapper
@@ -269,6 +259,7 @@ struct nfp_bpf_reg_state {
* @xadd_maybe_16bit: 16bit immediate is possible
* @jmp_dst: destination info for jump instructions
* @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
+ * @num_insns_after_br: number of insns following a branch jump, used for fixup
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
@@ -279,6 +270,7 @@ struct nfp_bpf_reg_state {
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
+ * @subprog_idx: index of subprogram to which the instruction belongs
* @skip: skip this instruction (optimized out)
* @double_cb: callback for second part of the instruction
* @l: link on nfp_prog->insns list
@@ -304,6 +296,7 @@ struct nfp_insn_meta {
struct {
struct nfp_insn_meta *jmp_dst;
bool jump_neg_op;
+ u32 num_insns_after_br; /* only for BPF-to-BPF calls */
};
/* function calls */
struct {
@@ -325,6 +318,7 @@ struct nfp_insn_meta {
unsigned int off;
unsigned short n;
unsigned short flags;
+ unsigned short subprog_idx;
bool skip;
instr_cb_t double_cb;
@@ -413,23 +407,56 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
}
+static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
+{
+ struct bpf_insn insn = meta->insn;
+
+ return insn.code == (BPF_JMP | BPF_CALL) &&
+ insn.src_reg != BPF_PSEUDO_CALL;
+}
+
+static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta)
+{
+ struct bpf_insn insn = meta->insn;
+
+ return insn.code == (BPF_JMP | BPF_CALL) &&
+ insn.src_reg == BPF_PSEUDO_CALL;
+}
+
+#define STACK_FRAME_ALIGN 64
+
+/**
+ * struct nfp_bpf_subprog_info - nfp BPF sub-program (a.k.a. function) info
+ * @stack_depth: maximum stack depth used by this sub-program
+ * @needs_reg_push: whether sub-program uses callee-saved registers
+ */
+struct nfp_bpf_subprog_info {
+ u16 stack_depth;
+ u8 needs_reg_push : 1;
+};
+
/**
* struct nfp_prog - nfp BPF program
* @bpf: backpointer to the bpf app priv structure
* @prog: machine code
* @prog_len: number of valid instructions in @prog array
* @__prog_alloc_len: alloc size of @prog array
+ * @stack_size: total amount of stack used
* @verifier_meta: temporary storage for verifier's insn meta
* @type: BPF program type
* @last_bpf_off: address of the last instruction translated from BPF
* @tgt_out: jump target for normal exit
* @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
+ * @tgt_call_push_regs: jump target for subroutine for saving R6~R9 to stack
+ * @tgt_call_pop_regs: jump target for subroutine used for restoring R6~R9
* @n_translated: number of successfully translated instructions (for errors)
* @error: error code if something went wrong
- * @stack_depth: max stack depth from the verifier
+ * @stack_frame_depth: max stack depth for current frame
* @adjust_head_location: if program has single adjust head call - the insn no.
* @map_records_cnt: the number of map pointers recorded for this prog
+ * @subprog_cnt: number of sub-programs, including main function
* @map_records: the map record pointers from bpf->maps_neutral
+ * @subprog: pointer to an array of objects holding info about sub-programs
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
@@ -439,6 +466,8 @@ struct nfp_prog {
unsigned int prog_len;
unsigned int __prog_alloc_len;
+ unsigned int stack_size;
+
struct nfp_insn_meta *verifier_meta;
enum bpf_prog_type type;
@@ -446,15 +475,19 @@ struct nfp_prog {
unsigned int last_bpf_off;
unsigned int tgt_out;
unsigned int tgt_abort;
+ unsigned int tgt_call_push_regs;
+ unsigned int tgt_call_pop_regs;
unsigned int n_translated;
int error;
- unsigned int stack_depth;
+ unsigned int stack_frame_depth;
unsigned int adjust_head_location;
unsigned int map_records_cnt;
+ unsigned int subprog_cnt;
struct nfp_bpf_neutral_map **map_records;
+ struct nfp_bpf_subprog_info *subprog;
struct list_head insns;
};
@@ -471,6 +504,7 @@ struct nfp_bpf_vnic {
unsigned int tgt_done;
};
+bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
@@ -492,6 +526,7 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
+unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf);
long long int
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
void
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 1ccd6371a15b..ba8ceedcf6a2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
/*
* nfp_net_offload.c
@@ -208,6 +178,8 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta, *tmp;
+ kfree(nfp_prog->subprog);
+
list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
list_del(&meta->l);
kfree(meta);
@@ -250,18 +222,9 @@ err_free:
static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- unsigned int stack_size;
unsigned int max_instr;
int err;
- stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
- if (prog->aux->stack_depth > stack_size) {
- nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
- prog->aux->stack_depth, stack_size);
- return -EOPNOTSUPP;
- }
- nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
-
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
@@ -299,10 +262,25 @@ static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
unsigned int i;
for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
- if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT)
+ if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT)
word[i] = (__force u32)cpu_to_be32(word[i]);
}
+/* Mark value as unsafely initialized in case it becomes atomic later
+ * and we didn't byte swap something non-byte swap neutral.
+ */
+static void
+nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value)
+{
+ u32 *word = value;
+ unsigned int i;
+
+ for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
+ if (nfp_map->use_map[i].type == NFP_MAP_UNUSED &&
+ word[i] != (__force u32)cpu_to_be32(word[i]))
+ nfp_map->use_map[i].non_zero_update = 1;
+}
+
static int
nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value)
@@ -322,6 +300,7 @@ nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags)
{
nfp_map_bpf_byte_swap(offmap->dev_priv, value);
+ nfp_map_bpf_byte_swap_record(offmap->dev_priv, value);
return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
}
@@ -510,7 +489,7 @@ nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- unsigned int max_mtu;
+ unsigned int max_mtu, max_stack, max_prog_len;
dma_addr_t dma_addr;
void *img;
int err;
@@ -521,6 +500,18 @@ nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
+ max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
+ if (nfp_prog->stack_size > max_stack) {
+ NL_SET_ERR_MSG_MOD(extack, "stack too large");
+ return -EOPNOTSUPP;
+ }
+
+ max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
+ if (nfp_prog->prog_len > max_prog_len) {
+ NL_SET_ERR_MSG_MOD(extack, "program too long");
+ return -EOPNOTSUPP;
+ }
+
img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
if (IS_ERR(img))
return PTR_ERR(img);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index a6e9248669e1..99f977bfd8cc 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -1,43 +1,15 @@
-/*
- * Copyright (C) 2016-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <linux/pkt_cls.h>
#include "../nfp_app.h"
#include "../nfp_main.h"
+#include "../nfp_net.h"
#include "fw.h"
#include "main.h"
@@ -108,6 +80,46 @@ exit_set_location:
nfp_prog->adjust_head_location = location;
}
+static bool nfp_bpf_map_update_value_ok(struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
+ const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
+ struct bpf_offloaded_map *offmap;
+ struct bpf_func_state *state;
+ struct nfp_bpf_map *nfp_map;
+ int off, i;
+
+ state = env->cur_state->frame[reg3->frameno];
+
+ /* We need to record each time update happens with non-zero words,
+ * in case such word is used in atomic operations.
+ * Implicitly depend on nfp_bpf_stack_arg_ok(reg3) being run before.
+ */
+
+ offmap = map_to_offmap(reg1->map_ptr);
+ nfp_map = offmap->dev_priv;
+ off = reg3->off + reg3->var_off.value;
+
+ for (i = 0; i < offmap->map.value_size; i++) {
+ struct bpf_stack_state *stack_entry;
+ unsigned int soff;
+
+ soff = -(off + i) - 1;
+ stack_entry = &state->stack[soff / BPF_REG_SIZE];
+ if (stack_entry->slot_type[soff % BPF_REG_SIZE] == STACK_ZERO)
+ continue;
+
+ if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) {
+ pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n",
+ i, soff);
+ return false;
+ }
+ nfp_map->use_map[i / 4].non_zero_update = 1;
+ }
+
+ return true;
+}
+
static int
nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
@@ -155,8 +167,9 @@ nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
}
static int
-nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
- struct nfp_insn_meta *meta)
+nfp_bpf_check_helper_call(struct nfp_prog *nfp_prog,
+ struct bpf_verifier_env *env,
+ struct nfp_insn_meta *meta)
{
const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
@@ -198,7 +211,8 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
bpf->helpers.map_update, reg1) ||
!nfp_bpf_stack_arg_ok("map_update", env, reg2,
meta->func_id ? &meta->arg2 : NULL) ||
- !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL))
+ !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL) ||
+ !nfp_bpf_map_update_value_ok(env))
return -EOPNOTSUPP;
break;
@@ -333,6 +347,9 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
{
s32 old_off, new_off;
+ if (reg->frameno != env->cur_state->curframe)
+ meta->flags |= FLAG_INSN_PTR_CALLER_STACK_FRAME;
+
if (!tnum_is_const(reg->var_off)) {
pr_vlog(env, "variable ptr stack access\n");
return -EINVAL;
@@ -376,15 +393,22 @@ nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
struct nfp_bpf_map *nfp_map,
unsigned int off, enum nfp_bpf_map_use use)
{
- if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
- nfp_map->use_map[off / 4] != use) {
+ if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED &&
+ nfp_map->use_map[off / 4].type != use) {
pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
- nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
+ nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type),
nfp_bpf_map_use_name(use), off);
return -EOPNOTSUPP;
}
- nfp_map->use_map[off / 4] = use;
+ if (nfp_map->use_map[off / 4].non_zero_update &&
+ use == NFP_MAP_USE_ATOMIC_CNT) {
+ pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n",
+ off);
+ return -EOPNOTSUPP;
+ }
+
+ nfp_map->use_map[off / 4].type = use;
return 0;
}
@@ -620,8 +644,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
return -EINVAL;
}
- if (meta->insn.code == (BPF_JMP | BPF_CALL))
- return nfp_bpf_check_call(nfp_prog, env, meta);
+ if (is_mbpf_helper_call(meta))
+ return nfp_bpf_check_helper_call(nfp_prog, env, meta);
if (meta->insn.code == (BPF_JMP | BPF_EXIT))
return nfp_bpf_check_exit(nfp_prog, env);
@@ -640,6 +664,132 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
return 0;
}
+static int
+nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env,
+ struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+ int index = 0;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ if (nfp_is_subprog_start(meta))
+ index++;
+ meta->subprog_idx = index;
+
+ if (meta->insn.dst_reg >= BPF_REG_6 &&
+ meta->insn.dst_reg <= BPF_REG_9)
+ nfp_prog->subprog[index].needs_reg_push = 1;
+ }
+
+ if (index + 1 != nfp_prog->subprog_cnt) {
+ pr_vlog(env, "BUG: number of processed BPF functions is not consistent (processed %d, expected %d)\n",
+ index + 1, nfp_prog->subprog_cnt);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static unsigned int
+nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt)
+{
+ struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog);
+ unsigned int max_depth = 0, depth = 0, frame = 0;
+ struct nfp_insn_meta *ret_insn[MAX_CALL_FRAMES];
+ unsigned short frame_depths[MAX_CALL_FRAMES];
+ unsigned short ret_prog[MAX_CALL_FRAMES];
+ unsigned short idx = meta->subprog_idx;
+
+ /* Inspired from check_max_stack_depth() from kernel verifier.
+ * Starting from main subprogram, walk all instructions and recursively
+ * walk all callees that given subprogram can call. Since recursion is
+ * prevented by the kernel verifier, this algorithm only needs a local
+ * stack of MAX_CALL_FRAMES to remember callsites.
+ */
+process_subprog:
+ frame_depths[frame] = nfp_prog->subprog[idx].stack_depth;
+ frame_depths[frame] = round_up(frame_depths[frame], STACK_FRAME_ALIGN);
+ depth += frame_depths[frame];
+ max_depth = max(max_depth, depth);
+
+continue_subprog:
+ for (; meta != nfp_prog_last_meta(nfp_prog) && meta->subprog_idx == idx;
+ meta = nfp_meta_next(meta)) {
+ if (!is_mbpf_pseudo_call(meta))
+ continue;
+
+ /* We found a call to a subprogram. Remember instruction to
+ * return to and subprog id.
+ */
+ ret_insn[frame] = nfp_meta_next(meta);
+ ret_prog[frame] = idx;
+
+ /* Find the callee and start processing it. */
+ meta = nfp_bpf_goto_meta(nfp_prog, meta,
+ meta->n + 1 + meta->insn.imm, cnt);
+ idx = meta->subprog_idx;
+ frame++;
+ goto process_subprog;
+ }
+ /* End of for() loop means the last instruction of the subprog was
+ * reached. If we popped all stack frames, return; otherwise, go on
+ * processing remaining instructions from the caller.
+ */
+ if (frame == 0)
+ return max_depth;
+
+ depth -= frame_depths[frame];
+ frame--;
+ meta = ret_insn[frame];
+ idx = ret_prog[frame];
+ goto continue_subprog;
+}
+
+static int nfp_bpf_finalize(struct bpf_verifier_env *env)
+{
+ struct bpf_subprog_info *info;
+ struct nfp_prog *nfp_prog;
+ unsigned int max_stack;
+ struct nfp_net *nn;
+ int i;
+
+ nfp_prog = env->prog->aux->offload->dev_priv;
+ nfp_prog->subprog_cnt = env->subprog_cnt;
+ nfp_prog->subprog = kcalloc(nfp_prog->subprog_cnt,
+ sizeof(nfp_prog->subprog[0]), GFP_KERNEL);
+ if (!nfp_prog->subprog)
+ return -ENOMEM;
+
+ nfp_assign_subprog_idx_and_regs(env, nfp_prog);
+
+ info = env->subprog_info;
+ for (i = 0; i < nfp_prog->subprog_cnt; i++) {
+ nfp_prog->subprog[i].stack_depth = info[i].stack_depth;
+
+ if (i == 0)
+ continue;
+
+ /* Account for size of return address. */
+ nfp_prog->subprog[i].stack_depth += REG_WIDTH;
+ /* Account for size of saved registers, if necessary. */
+ if (nfp_prog->subprog[i].needs_reg_push)
+ nfp_prog->subprog[i].stack_depth += BPF_REG_SIZE * 4;
+ }
+
+ nn = netdev_priv(env->prog->aux->offload->netdev);
+ max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
+ nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog,
+ env->prog->len);
+ if (nfp_prog->stack_size > max_stack) {
+ pr_vlog(env, "stack too large: program %dB > FW stack %dB\n",
+ nfp_prog->stack_size, max_stack);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
- .insn_hook = nfp_verify_insn,
+ .insn_hook = nfp_verify_insn,
+ .finalize = nfp_bpf_finalize,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 46ba0cf257c6..244dc261006e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <net/geneve.h>
@@ -429,12 +399,14 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
switch (off) {
case offsetof(struct iphdr, daddr):
- set_ip_addr->ipv4_dst_mask = mask;
- set_ip_addr->ipv4_dst = exact;
+ set_ip_addr->ipv4_dst_mask |= mask;
+ set_ip_addr->ipv4_dst &= ~mask;
+ set_ip_addr->ipv4_dst |= exact & mask;
break;
case offsetof(struct iphdr, saddr):
- set_ip_addr->ipv4_src_mask = mask;
- set_ip_addr->ipv4_src = exact;
+ set_ip_addr->ipv4_src_mask |= mask;
+ set_ip_addr->ipv4_src &= ~mask;
+ set_ip_addr->ipv4_src |= exact & mask;
break;
default:
return -EOPNOTSUPP;
@@ -448,11 +420,12 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
}
static void
-nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
+nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
struct nfp_fl_set_ipv6_addr *ip6)
{
- ip6->ipv6[idx % 4].mask = mask;
- ip6->ipv6[idx % 4].exact = exact;
+ ip6->ipv6[word].mask |= mask;
+ ip6->ipv6[word].exact &= ~mask;
+ ip6->ipv6[word].exact |= exact & mask;
ip6->reserved = cpu_to_be16(0);
ip6->head.jump_id = opcode_tag;
@@ -465,6 +438,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
struct nfp_fl_set_ipv6_addr *ip_src)
{
__be32 exact, mask;
+ u8 word;
/* We are expecting tcf_pedit to return a big endian value */
mask = (__force __be32)~tcf_pedit_mask(action, idx);
@@ -473,17 +447,20 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
if (exact & ~mask)
return -EOPNOTSUPP;
- if (off < offsetof(struct ipv6hdr, saddr))
+ if (off < offsetof(struct ipv6hdr, saddr)) {
return -EOPNOTSUPP;
- else if (off < offsetof(struct ipv6hdr, daddr))
- nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
+ } else if (off < offsetof(struct ipv6hdr, daddr)) {
+ word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
exact, mask, ip_src);
- else if (off < offsetof(struct ipv6hdr, daddr) +
- sizeof(struct in6_addr))
- nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
+ } else if (off < offsetof(struct ipv6hdr, daddr) +
+ sizeof(struct in6_addr)) {
+ word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
exact, mask, ip_dst);
- else
+ } else {
return -EOPNOTSUPP;
+ }
return 0;
}
@@ -541,7 +518,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
struct nfp_fl_set_eth set_eth;
enum pedit_header_type htype;
int idx, nkeys, err;
- size_t act_size;
+ size_t act_size = 0;
u32 offset, cmd;
u8 ip_proto = 0;
@@ -599,7 +576,9 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
act_size = sizeof(set_eth);
memcpy(nfp_action, &set_eth, act_size);
*a_len += act_size;
- } else if (set_ip_addr.head.len_lw) {
+ }
+ if (set_ip_addr.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_ip_addr);
memcpy(nfp_action, &set_ip_addr, act_size);
*a_len += act_size;
@@ -607,10 +586,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+ }
+ if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
*/
+ nfp_action += act_size;
act_size = sizeof(set_ip6_src);
memcpy(nfp_action, &set_ip6_src, act_size);
*a_len += act_size;
@@ -623,6 +604,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_dst.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_ip6_dst);
memcpy(nfp_action, &set_ip6_dst, act_size);
*a_len += act_size;
@@ -630,13 +612,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_src.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_ip6_src);
memcpy(nfp_action, &set_ip6_src, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_tport.head.len_lw) {
+ }
+ if (set_tport.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_tport);
memcpy(nfp_action, &set_tport, act_size);
*a_len += act_size;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index cb8565222621..4c5eaf36d5bb 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 325954b829c8..29d673aa5277 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef NFP_FLOWER_CMSG_H
#define NFP_FLOWER_CMSG_H
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index bf10598f66ae..81dcf5b318ba 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include "main.h"
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index e57d23746585..3a54728d2ea6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/etherdevice.h>
#include <linux/lockdep.h>
@@ -518,8 +488,8 @@ err_clear_nn:
static int nfp_flower_init(struct nfp_app *app)
{
const struct nfp_pf *pf = app->pf;
+ u64 version, features, ctx_count;
struct nfp_flower_priv *app_priv;
- u64 version, features;
int err;
if (!pf->eth_tbl) {
@@ -543,6 +513,16 @@ static int nfp_flower_init(struct nfp_app *app)
return err;
}
+ ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
+ &err);
+ if (err) {
+ nfp_warn(app->cpp,
+ "FlowerNIC: unsupported host context count: %d\n",
+ err);
+ err = 0;
+ ctx_count = BIT(17);
+ }
+
/* We need to ensure hardware has enough flower capabilities. */
if (version != NFP_FLOWER_ALLOWED_VER) {
nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
@@ -553,6 +533,7 @@ static int nfp_flower_init(struct nfp_app *app)
if (!app_priv)
return -ENOMEM;
+ app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
app->priv = app_priv;
app_priv->app = app;
skb_queue_head_init(&app_priv->cmsg_skbs_high);
@@ -563,7 +544,7 @@ static int nfp_flower_init(struct nfp_app *app)
init_waitqueue_head(&app_priv->mtu_conf.wait_q);
spin_lock_init(&app_priv->mtu_conf.lock);
- err = nfp_flower_metadata_init(app);
+ err = nfp_flower_metadata_init(app, ctx_count);
if (err)
goto err_free_app_priv;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 81d941ab895c..90045bab95bf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef __NFP_FLOWER_H__
#define __NFP_FLOWER_H__ 1
@@ -38,6 +8,7 @@
#include <linux/circ_buf.h>
#include <linux/hashtable.h>
+#include <linux/rhashtable.h>
#include <linux/time64.h>
#include <linux/types.h>
#include <net/pkt_cls.h>
@@ -50,10 +21,8 @@ struct net_device;
struct nfp_app;
#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
-#define NFP_FL_STATS_ENTRY_RS BIT(20)
-#define NFP_FL_STATS_ELEM_RS 4
-#define NFP_FL_REPEATED_HASH_MAX BIT(17)
-#define NFP_FLOWER_HASH_BITS 19
+#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
+ init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS 256
#define NFP_FLOWER_MASK_ELEMENT_RS 1
#define NFP_FLOWER_MASK_HASH_BITS 10
@@ -138,7 +107,10 @@ struct nfp_fl_lag {
* @stats_ids: List of free stats ids
* @mask_ids: List of free mask ids
* @mask_table: Hash table used to store masks
+ * @stats_ring_size: Maximum number of allowed stats ids
* @flow_table: Hash table used to store flower rules
+ * @stats: Stored stats updates for flower rules
+ * @stats_lock: Lock for flower rule stats updates
* @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs_high: List of higher priority skbs for control message
* processing
@@ -171,7 +143,10 @@ struct nfp_flower_priv {
struct nfp_fl_stats_id stats_ids;
struct nfp_fl_mask_id mask_ids;
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
- DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
+ u32 stats_ring_size;
+ struct rhashtable flow_table;
+ struct nfp_fl_stats *stats;
+ spinlock_t stats_lock; /* lock stats */
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low;
@@ -227,10 +202,8 @@ struct nfp_fl_stats {
struct nfp_fl_payload {
struct nfp_fl_rule_metadata meta;
unsigned long tc_flower_cookie;
- struct hlist_node link;
+ struct rhash_head fl_node;
struct rcu_head rcu;
- spinlock_t lock; /* lock stats */
- struct nfp_fl_stats stats;
__be32 nfp_tun_ipv4_addr;
struct net_device *ingress_dev;
char *unmasked_data;
@@ -239,6 +212,8 @@ struct nfp_fl_payload {
bool ingress_offload;
};
+extern const struct rhashtable_params nfp_flower_table_params;
+
struct nfp_fl_stats_frame {
__be32 stats_con_id;
__be32 pkt_count;
@@ -246,7 +221,7 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
-int nfp_flower_metadata_init(struct nfp_app *app);
+int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 17acb8cc6044..e54fb6034326 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <net/pkt_cls.h>
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index c098730544b7..48729bf171e0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/hash.h>
#include <linux/hashtable.h>
@@ -48,6 +18,12 @@ struct nfp_mask_id_table {
u8 mask_id;
};
+struct nfp_fl_flow_table_cmp_arg {
+ struct net_device *netdev;
+ unsigned long cookie;
+ __be32 host_ctx;
+};
+
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
{
struct nfp_flower_priv *priv = app->priv;
@@ -55,14 +31,14 @@ static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
ring = &priv->stats_ids.free_list;
/* Check if buffer is full. */
- if (!CIRC_SPACE(ring->head, ring->tail, NFP_FL_STATS_ENTRY_RS *
- NFP_FL_STATS_ELEM_RS -
+ if (!CIRC_SPACE(ring->head, ring->tail,
+ priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
NFP_FL_STATS_ELEM_RS + 1))
return -ENOBUFS;
memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
- (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+ (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
return 0;
}
@@ -74,7 +50,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
struct circ_buf *ring;
ring = &priv->stats_ids.free_list;
- freed_stats_id = NFP_FL_STATS_ENTRY_RS;
+ freed_stats_id = priv->stats_ring_size;
/* Check for unallocated entries first. */
if (priv->stats_ids.init_unalloc > 0) {
*stats_context_id = priv->stats_ids.init_unalloc - 1;
@@ -92,7 +68,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
*stats_context_id = temp_stats_id;
memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
- (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS);
+ (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
return 0;
}
@@ -102,56 +78,37 @@ struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev, __be32 host_ctx)
{
+ struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
struct nfp_flower_priv *priv = app->priv;
- struct nfp_fl_payload *flower_entry;
- hash_for_each_possible_rcu(priv->flow_table, flower_entry, link,
- tc_flower_cookie)
- if (flower_entry->tc_flower_cookie == tc_flower_cookie &&
- (!netdev || flower_entry->ingress_dev == netdev) &&
- (host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
- flower_entry->meta.host_ctx_id == host_ctx))
- return flower_entry;
+ flower_cmp_arg.netdev = netdev;
+ flower_cmp_arg.cookie = tc_flower_cookie;
+ flower_cmp_arg.host_ctx = host_ctx;
- return NULL;
-}
-
-static void
-nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats)
-{
- struct nfp_fl_payload *nfp_flow;
- unsigned long flower_cookie;
-
- flower_cookie = be64_to_cpu(stats->stats_cookie);
-
- rcu_read_lock();
- nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL,
- stats->stats_con_id);
- if (!nfp_flow)
- goto exit_rcu_unlock;
-
- spin_lock(&nfp_flow->lock);
- nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count);
- nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count);
- nfp_flow->stats.used = jiffies;
- spin_unlock(&nfp_flow->lock);
-
-exit_rcu_unlock:
- rcu_read_unlock();
+ return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
+ nfp_flower_table_params);
}
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
{
unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
- struct nfp_fl_stats_frame *stats_frame;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_stats_frame *stats;
unsigned char *msg;
+ u32 ctx_id;
int i;
msg = nfp_flower_cmsg_get_data(skb);
- stats_frame = (struct nfp_fl_stats_frame *)msg;
- for (i = 0; i < msg_len / sizeof(*stats_frame); i++)
- nfp_flower_update_stats(app, stats_frame + i);
+ spin_lock(&priv->stats_lock);
+ for (i = 0; i < msg_len / sizeof(*stats); i++) {
+ stats = (struct nfp_fl_stats_frame *)msg + i;
+ ctx_id = be32_to_cpu(stats->stats_con_id);
+ priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
+ priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
+ priv->stats[ctx_id].used = jiffies;
+ }
+ spin_unlock(&priv->stats_lock);
}
static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
@@ -345,9 +302,9 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
/* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
- nfp_flow->stats.pkts = 0;
- nfp_flow->stats.bytes = 0;
- nfp_flow->stats.used = jiffies;
+ priv->stats[stats_cxt].pkts = 0;
+ priv->stats[stats_cxt].bytes = 0;
+ priv->stats[stats_cxt].used = jiffies;
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
NFP_FL_STATS_CTX_DONT_CARE);
@@ -389,12 +346,56 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
return nfp_release_stats_entry(app, temp_ctx_id);
}
-int nfp_flower_metadata_init(struct nfp_app *app)
+static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
+ const struct nfp_fl_payload *flow_entry = obj;
+
+ if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
+ (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
+ flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
+ return flow_entry->tc_flower_cookie != cmp_arg->cookie;
+
+ return 1;
+}
+
+static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct nfp_fl_payload *flower_entry = data;
+
+ return jhash2((u32 *)&flower_entry->tc_flower_cookie,
+ sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
+ seed);
+}
+
+static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
+
+ return jhash2((u32 *)&cmp_arg->cookie,
+ sizeof(cmp_arg->cookie) / sizeof(u32), seed);
+}
+
+const struct rhashtable_params nfp_flower_table_params = {
+ .head_offset = offsetof(struct nfp_fl_payload, fl_node),
+ .hashfn = nfp_fl_key_hashfn,
+ .obj_cmpfn = nfp_fl_obj_cmpfn,
+ .obj_hashfn = nfp_fl_obj_hashfn,
+ .automatic_shrinking = true,
+};
+
+int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
{
struct nfp_flower_priv *priv = app->priv;
+ int err;
hash_init(priv->mask_table);
- hash_init(priv->flow_table);
+
+ err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
+ if (err)
+ return err;
+
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
@@ -402,7 +403,7 @@ int nfp_flower_metadata_init(struct nfp_app *app)
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
- return -ENOMEM;
+ goto err_free_flow_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -416,18 +417,29 @@ int nfp_flower_metadata_init(struct nfp_app *app)
/* Init ring buffer and unallocated stats_ids. */
priv->stats_ids.free_list.buf =
vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
- NFP_FL_STATS_ENTRY_RS));
+ priv->stats_ring_size));
if (!priv->stats_ids.free_list.buf)
goto err_free_last_used;
- priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX;
+ priv->stats_ids.init_unalloc = host_ctx_count;
+
+ priv->stats = kvmalloc_array(priv->stats_ring_size,
+ sizeof(struct nfp_fl_stats), GFP_KERNEL);
+ if (!priv->stats)
+ goto err_free_ring_buf;
+
+ spin_lock_init(&priv->stats_lock);
return 0;
+err_free_ring_buf:
+ vfree(priv->stats_ids.free_list.buf);
err_free_last_used:
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_flow_table:
+ rhashtable_destroy(&priv->flow_table);
return -ENOMEM;
}
@@ -438,6 +450,9 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
if (!priv)
return;
+ rhashtable_free_and_destroy(&priv->flow_table,
+ nfp_check_rhashtable_empty, NULL);
+ kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
vfree(priv->stats_ids.free_list.buf);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index bd19624f10cf..29c95423ab64 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/skbuff.h>
#include <net/devlink.h>
@@ -428,8 +398,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
- spin_lock_init(&flow_pay->lock);
-
flow_pay->ingress_offload = !egress;
return flow_pay;
@@ -513,9 +481,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
- INIT_HLIST_NODE(&flow_pay->link);
flow_pay->tc_flower_cookie = flow->cookie;
- hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
+ err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
+ nfp_flower_table_params);
+ if (err)
+ goto err_destroy_flow;
+
port->tc_offload_cnt++;
/* Deallocate flow payload when flower rule has been destroyed. */
@@ -550,6 +521,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
+ struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
struct net_device *ingr_dev;
int err;
@@ -573,11 +545,13 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_flow;
err_free_flow:
- hash_del_rcu(&nfp_flow->link);
port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
kfree(nfp_flow->mask_data);
kfree(nfp_flow->unmasked_data);
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &nfp_flow->fl_node,
+ nfp_flower_table_params));
kfree_rcu(nfp_flow, rcu);
return err;
}
@@ -598,8 +572,10 @@ static int
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
+ struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
struct net_device *ingr_dev;
+ u32 ctx_id;
ingr_dev = egress ? NULL : netdev;
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
@@ -610,13 +586,16 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->ingress_offload && egress)
return 0;
- spin_lock_bh(&nfp_flow->lock);
- tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
- nfp_flow->stats.pkts, nfp_flow->stats.used);
+ ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
+
+ spin_lock_bh(&priv->stats_lock);
+ tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes,
+ priv->stats[ctx_id].pkts,
+ priv->stats[ctx_id].used);
- nfp_flow->stats.pkts = 0;
- nfp_flow->stats.bytes = 0;
- spin_unlock_bh(&nfp_flow->lock);
+ priv->stats[ctx_id].pkts = 0;
+ priv->stats[ctx_id].bytes = 0;
+ spin_unlock_bh(&priv->stats_lock);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 382bb93cb090..8e5bec04d1f9 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -1,39 +1,10 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <net/netevent.h>
+#include <net/vxlan.h>
#include <linux/idr.h>
#include <net/dst_metadata.h>
#include <net/arp.h>
@@ -217,7 +188,7 @@ static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
return false;
if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
return true;
- if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan"))
+ if (netif_is_vxlan(netdev))
return true;
return false;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_abi.h b/drivers/net/ethernet/netronome/nfp/nfp_abi.h
index 8b56c27931bf..dd359a44adfb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_abi.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_abi.h
@@ -1,36 +1,5 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __NFP_ABI__
#define __NFP_ABI__ 1
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 8607d09ab732..68a0991aac22 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bug.h>
#include <linux/lockdep.h>
@@ -60,6 +30,11 @@ static const struct nfp_app_type *apps[] = {
#endif
};
+void nfp_check_rhashtable_empty(void *ptr, void *arg)
+{
+ WARN_ON_ONCE(1);
+}
+
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev)
{
if (nfp_netdev_is_nfp_net(netdev)) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 4e1eb3395648..4d6ecf99b1cc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef _NFP_APP_H
#define _NFP_APP_H 1
@@ -40,6 +10,8 @@
#include "nfp_net_repr.h"
+#define NFP_APP_CTRL_MTU_MAX U32_MAX
+
struct bpf_prog;
struct net_device;
struct netdev_bpf;
@@ -178,6 +150,7 @@ struct nfp_app_type {
* @ctrl: pointer to ctrl vNIC struct
* @reprs: array of pointers to representors
* @type: pointer to const application ops and info
+ * @ctrl_mtu: MTU to set on the control vNIC (set in .init())
* @priv: app-specific priv data
*/
struct nfp_app {
@@ -189,9 +162,11 @@ struct nfp_app {
struct nfp_reprs __rcu *reprs[NFP_REPR_TYPE_MAX + 1];
const struct nfp_app_type *type;
+ unsigned int ctrl_mtu;
void *priv;
};
+void nfp_check_rhashtable_empty(void *ptr, void *arg);
bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
index e2dfe4f168bb..f119277fd66c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nsp.h"
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index cc6ace2be8a9..b04b83687fe2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#include <linux/bitops.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index fad0e62a910c..648c2810e5ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2016-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
#ifndef __NFP_ASM_H__
#define __NFP_ASM_H__ 1
@@ -82,6 +52,15 @@
#define OP_BR_BIT_ADDR_LO OP_BR_ADDR_LO
#define OP_BR_BIT_ADDR_HI OP_BR_ADDR_HI
+#define OP_BR_ALU_BASE 0x0e800000000ULL
+#define OP_BR_ALU_BASE_MASK 0x0ff80000000ULL
+#define OP_BR_ALU_A_SRC 0x000000003ffULL
+#define OP_BR_ALU_B_SRC 0x000000ffc00ULL
+#define OP_BR_ALU_DEFBR 0x00000300000ULL
+#define OP_BR_ALU_IMM_HI 0x0007fc00000ULL
+#define OP_BR_ALU_SRC_LMEXTN 0x40000000000ULL
+#define OP_BR_ALU_DST_LMEXTN 0x80000000000ULL
+
static inline bool nfp_is_br(u64 insn)
{
return (insn & OP_BR_BASE_MASK) == OP_BR_BASE ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index db463e20a876..808647ec3573 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/rtnetlink.h>
#include <net/devlink.h>
@@ -96,6 +66,7 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
{
struct nfp_pf *pf = devlink_priv(devlink);
struct nfp_eth_table_port eth_port;
+ unsigned int lanes;
int ret;
if (count < 2)
@@ -114,8 +85,12 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
goto out;
}
- ret = nfp_devlink_set_lanes(pf, eth_port.index,
- eth_port.port_lanes / count);
+ /* Special case the 100G CXP -> 2x40G split */
+ lanes = eth_port.port_lanes / count;
+ if (eth_port.lanes == 10 && count == 2)
+ lanes = 8 / count;
+
+ ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
out:
mutex_unlock(&pf->lock);
@@ -128,6 +103,7 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
{
struct nfp_pf *pf = devlink_priv(devlink);
struct nfp_eth_table_port eth_port;
+ unsigned int lanes;
int ret;
mutex_lock(&pf->lock);
@@ -143,7 +119,12 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
goto out;
}
- ret = nfp_devlink_set_lanes(pf, eth_port.index, eth_port.port_lanes);
+ /* Special case the 100G CXP -> 2x40G unsplit */
+ lanes = eth_port.port_lanes;
+ if (eth_port.port_lanes == 8)
+ lanes = 10;
+
+ ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
out:
mutex_unlock(&pf->lock);
@@ -177,7 +158,8 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return nfp_app_eswitch_mode_get(pf->app, mode);
}
-static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
int ret;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
index f0dcf45aeec1..5cabb1aa9c0c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 4a540c5e27fe..6c10e8d119e4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_main.c
@@ -68,6 +38,10 @@ static const struct pci_device_id nfp_pci_device_ids[] = {
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
PCI_ANY_ID, 0,
},
+ { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000,
+ PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+ PCI_ANY_ID, 0,
+ },
{ PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000,
PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
PCI_ANY_ID, 0,
@@ -112,23 +86,18 @@ nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length,
void *out_data, u64 out_length)
{
- unsigned long long addr;
unsigned long err_at;
u64 max_data_sz;
u32 val = 0;
- u32 cpp_id;
int n, err;
if (!pf->mbox)
return -EOPNOTSUPP;
- cpp_id = NFP_CPP_ISLAND_ID(pf->mbox->target, NFP_CPP_ACTION_RW, 0,
- pf->mbox->domain);
- addr = pf->mbox->addr;
- max_data_sz = pf->mbox->size - NFP_MBOX_SYM_MIN_SIZE;
+ max_data_sz = nfp_rtsym_size(pf->mbox) - NFP_MBOX_SYM_MIN_SIZE;
/* Check if cmd field is clear */
- err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val);
+ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val);
if (err || val) {
nfp_warn(pf->cpp, "failed to issue command (%u): %u, err: %d\n",
cmd, val, err);
@@ -136,30 +105,29 @@ int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length,
}
in_length = min(in_length, max_data_sz);
- n = nfp_cpp_write(pf->cpp, cpp_id, addr + NFP_MBOX_DATA,
- in_data, in_length);
+ n = nfp_rtsym_write(pf->cpp, pf->mbox, NFP_MBOX_DATA, in_data,
+ in_length);
if (n != in_length)
return -EIO;
/* Write data_len and wipe reserved */
- err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN,
- in_length);
+ err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, in_length);
if (err)
return err;
/* Read back for ordering */
- err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val);
+ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val);
if (err)
return err;
/* Write cmd and wipe return value */
- err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, cmd);
+ err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_CMD, cmd);
if (err)
return err;
err_at = jiffies + 5 * HZ;
while (true) {
/* Wait for command to go to 0 (NFP_MBOX_NO_CMD) */
- err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val);
+ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val);
if (err)
return err;
if (!val)
@@ -172,18 +140,18 @@ int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length,
}
/* Copy output if any (could be error info, do it before reading ret) */
- err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val);
+ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val);
if (err)
return err;
out_length = min_t(u32, val, min(out_length, max_data_sz));
- n = nfp_cpp_read(pf->cpp, cpp_id, addr + NFP_MBOX_DATA,
- out_data, out_length);
+ n = nfp_rtsym_read(pf->cpp, pf->mbox, NFP_MBOX_DATA,
+ out_data, out_length);
if (n != out_length)
return -EIO;
/* Check if there is an error */
- err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_RET, &val);
+ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_RET, &val);
if (err)
return err;
if (val)
@@ -441,8 +409,11 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp)
}
fw = nfp_net_fw_find(pdev, pf);
- if (!fw)
+ if (!fw) {
+ if (nfp_nsp_has_stored_fw_load(nsp))
+ nfp_nsp_load_stored_fw(nsp);
return 0;
+ }
dev_info(&pdev->dev, "Soft-reset, loading FW image\n");
err = nfp_nsp_device_soft_reset(nsp);
@@ -453,7 +424,6 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp)
}
err = nfp_nsp_load_fw(nsp, fw);
-
if (err < 0) {
dev_err(&pdev->dev, "FW loading failed: %d\n", err);
goto exit_release_fw;
@@ -566,9 +536,9 @@ static int nfp_pf_find_rtsyms(struct nfp_pf *pf)
/* Optional per-PCI PF mailbox */
snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id);
pf->mbox = nfp_rtsym_lookup(pf->rtbl, pf_symbol);
- if (pf->mbox && pf->mbox->size < NFP_MBOX_SYM_MIN_SIZE) {
+ if (pf->mbox && nfp_rtsym_size(pf->mbox) < NFP_MBOX_SYM_MIN_SIZE) {
nfp_err(pf->cpp, "PF mailbox symbol too small: %llu < %d\n",
- pf->mbox->size, NFP_MBOX_SYM_MIN_SIZE);
+ nfp_rtsym_size(pf->mbox), NFP_MBOX_SYM_MIN_SIZE);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index 595b3dc280e3..a3613a2e0aa5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_main.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 439e6ffe2f05..6f0c37d09256 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 253bdaef1505..6bddfcfdec34 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net_common.c
@@ -2077,28 +2047,35 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
return true;
}
-static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
{
struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
struct nfp_net *nn = r_vec->nfp_net;
struct nfp_net_dp *dp = &nn->dp;
+ unsigned int budget = 512;
- while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
continue;
+
+ return budget;
}
static void nfp_ctrl_poll(unsigned long arg)
{
struct nfp_net_r_vector *r_vec = (void *)arg;
- spin_lock_bh(&r_vec->lock);
+ spin_lock(&r_vec->lock);
nfp_net_tx_complete(r_vec->tx_ring, 0);
__nfp_ctrl_tx_queued(r_vec);
- spin_unlock_bh(&r_vec->lock);
-
- nfp_ctrl_rx(r_vec);
+ spin_unlock(&r_vec->lock);
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ if (nfp_ctrl_rx(r_vec)) {
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ } else {
+ tasklet_schedule(&r_vec->tasklet);
+ nn_dp_warn(&r_vec->nfp_net->dp,
+ "control message budget exceeded!\n");
+ }
}
/* Setup and Configuration
@@ -2180,9 +2157,13 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
- if (!tx_ring->txds)
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!tx_ring->txds) {
+ netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+ tx_ring->cnt);
goto err_alloc;
+ }
tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
GFP_KERNEL);
@@ -2334,9 +2315,13 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
rx_ring->cnt = dp->rxd_cnt;
rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
- if (!rx_ring->rxds)
+ &rx_ring->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!rx_ring->rxds) {
+ netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
+ rx_ring->cnt);
goto err_alloc;
+ }
rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs),
GFP_KERNEL);
@@ -3146,27 +3131,13 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void nfp_net_netpoll(struct net_device *netdev)
-{
- struct nfp_net *nn = netdev_priv(netdev);
- int i;
-
- /* nfp_net's NAPIs are statically allocated so even if there is a race
- * with reconfig path this will simply try to schedule some disabled
- * NAPI instances.
- */
- for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
- napi_schedule_irqoff(&nn->r_vecs[i].napi);
-}
-#endif
-
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct nfp_net *nn = netdev_priv(netdev);
int r;
+ /* Collect software stats */
for (r = 0; r < nn->max_r_vecs; r++) {
struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
u64 data[3];
@@ -3192,6 +3163,14 @@ static void nfp_net_stat64(struct net_device *netdev,
stats->tx_bytes += data[1];
stats->tx_errors += data[2];
}
+
+ /* Add in device stats */
+ stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES);
+ stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS);
+ stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS);
+
+ stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS);
+ stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS);
}
static int nfp_net_set_features(struct net_device *netdev,
@@ -3519,9 +3498,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_get_stats64 = nfp_net_stat64,
.ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = nfp_net_netpoll,
-#endif
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
@@ -3762,15 +3738,18 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
}
if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
netdev->hw_features |= NETIF_F_RXHASH;
- if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
- nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
+ if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
if (nn->cap & NFP_NET_CFG_CTRL_LSO)
- netdev->hw_features |= NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL;
- nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
-
- netdev->hw_enc_features = netdev->hw_features;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
}
+ if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
+ if (nn->cap & NFP_NET_CFG_CTRL_LSO)
+ netdev->hw_features |= NETIF_F_GSO_GRE;
+ nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE;
+ }
+ if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
+ netdev->hw_enc_features = netdev->hw_features;
netdev->vlan_features = netdev->hw_features;
@@ -3875,10 +3854,20 @@ int nfp_net_init(struct nfp_net *nn)
return err;
/* Set default MTU and Freelist buffer size */
- if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
+ if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
+ if (nn->app->ctrl_mtu <= nn->max_mtu) {
+ nn->dp.mtu = nn->app->ctrl_mtu;
+ } else {
+ if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX)
+ nn_warn(nn, "app requested MTU above max supported %u > %u\n",
+ nn->app->ctrl_mtu, nn->max_mtu);
+ nn->dp.mtu = nn->max_mtu;
+ }
+ } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
nn->dp.mtu = nn->max_mtu;
- else
+ } else {
nn->dp.mtu = NFP_NET_DEFAULT_MTU;
+ }
nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
if (nfp_app_ctrl_uses_data_vnics(nn->app))
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index 1f9149bb2ae6..f2aaef976c7d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/device.h>
@@ -113,6 +83,13 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
caps->mbox_len = length;
}
break;
+ case NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0:
+ case NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1:
+ dev_warn(dev,
+ "experimental TLV type:%u offset:%u len:%u\n",
+ FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr),
+ offset, length);
+ break;
default:
if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
break;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 44d3ea75d043..d7c8518ac952 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net_ctrl.h
@@ -264,7 +234,6 @@
* %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
#define NFP_NET_CFG_BPF_ABI 0x0080
-#define NFP_NET_BPF_ABI 2
#define NFP_NET_CFG_BPF_CAP 0x0081
#define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */
#define NFP_NET_CFG_BPF_MAX_LEN 0x0082
@@ -489,12 +458,20 @@
* %NFP_NET_CFG_TLV_TYPE_MBOX:
* Variable, mailbox area. Overwrites the default location which is
* %NFP_NET_CFG_MBOX_BASE and length %NFP_NET_CFG_MBOX_VAL_MAX_SZ.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0:
+ * %NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1:
+ * Variable, experimental IDs. IDs designated for internal development and
+ * experiments before a stable TLV ID has been allocated to a feature. Should
+ * never be present in production firmware.
*/
#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
#define NFP_NET_CFG_TLV_TYPE_END 2
#define NFP_NET_CFG_TLV_TYPE_ME_FREQ 3
#define NFP_NET_CFG_TLV_TYPE_MBOX 4
+#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0 5
+#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1 6
struct device;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
index bb8ed460086e..769ceef09756 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
@@ -188,25 +158,21 @@ nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl)
const struct nfp_rtsym *specsym;
struct nfp_dumpspec *dumpspec;
int bytes_read;
- u32 cpp_id;
+ u64 sym_size;
specsym = nfp_rtsym_lookup(rtbl, NFP_DUMP_SPEC_RTSYM);
if (!specsym)
return NULL;
+ sym_size = nfp_rtsym_size(specsym);
/* expected size of this buffer is in the order of tens of kilobytes */
- dumpspec = vmalloc(sizeof(*dumpspec) + specsym->size);
+ dumpspec = vmalloc(sizeof(*dumpspec) + sym_size);
if (!dumpspec)
return NULL;
+ dumpspec->size = sym_size;
- dumpspec->size = specsym->size;
-
- cpp_id = NFP_CPP_ISLAND_ID(specsym->target, NFP_CPP_ACTION_RW, 0,
- specsym->domain);
-
- bytes_read = nfp_cpp_read(cpp, cpp_id, specsym->addr, dumpspec->data,
- specsym->size);
- if (bytes_read != specsym->size) {
+ bytes_read = nfp_rtsym_read(cpp, specsym, 0, dumpspec->data, sym_size);
+ if (bytes_read != sym_size) {
vfree(dumpspec);
nfp_warn(cpp, "Debug dump specification read failed.\n");
return NULL;
@@ -266,7 +232,6 @@ nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec)
struct nfp_dumpspec_rtsym *spec_rtsym;
const struct nfp_rtsym *sym;
u32 tl_len, key_len;
- u32 size;
spec_rtsym = (struct nfp_dumpspec_rtsym *)spec;
tl_len = be32_to_cpu(spec->length);
@@ -278,13 +243,8 @@ nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec)
if (!sym)
return nfp_dump_error_tlv_size(spec);
- if (sym->type == NFP_RTSYM_TYPE_ABS)
- size = sizeof(sym->addr);
- else
- size = sym->size;
-
return ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1) +
- ALIGN8(size);
+ ALIGN8(nfp_rtsym_size(sym));
}
static int
@@ -644,7 +604,6 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
const struct nfp_rtsym *sym;
u32 tl_len, key_len;
int bytes_read;
- u32 cpp_id;
void *dest;
int err;
@@ -657,11 +616,7 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
if (!sym)
return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump);
- if (sym->type == NFP_RTSYM_TYPE_ABS)
- sym_size = sizeof(sym->addr);
- else
- sym_size = sym->size;
-
+ sym_size = nfp_rtsym_size(sym);
header_size =
ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1);
total_size = header_size + ALIGN8(sym_size);
@@ -676,23 +631,20 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
memcpy(dump_header->rtsym, spec->rtsym, key_len + 1);
dump_header->cpp.dump_length = cpu_to_be32(sym_size);
- if (sym->type == NFP_RTSYM_TYPE_ABS) {
- *(u64 *)dest = sym->addr;
- } else {
+ if (sym->type != NFP_RTSYM_TYPE_ABS) {
cpp_params.target = sym->target;
cpp_params.action = NFP_CPP_ACTION_RW;
cpp_params.token = 0;
cpp_params.island = sym->domain;
- cpp_id = nfp_get_numeric_cpp_id(&cpp_params);
dump_header->cpp.cpp_id = cpp_params;
dump_header->cpp.offset = cpu_to_be32(sym->addr);
- bytes_read = nfp_cpp_read(pf->cpp, cpp_id, sym->addr, dest,
- sym_size);
- if (bytes_read != sym_size) {
- if (bytes_read >= 0)
- bytes_read = -EIO;
- dump_header->error = cpu_to_be32(bytes_read);
- }
+ }
+
+ bytes_read = nfp_rtsym_read(pf->cpp, sym, 0, dest, sym_size);
+ if (bytes_read != sym_size) {
+ if (bytes_read >= 0)
+ bytes_read = -EIO;
+ dump_header->error = cpu_to_be32(bytes_read);
}
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 099b63d67451..69b1c9b62e3d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 6a79c8e4a7a4..cb9c512abc76 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net_ethtool.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 28516eecccc8..1e7d20468a34 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_net_main.c
@@ -470,8 +440,8 @@ static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
static int nfp_net_pci_map_mem(struct nfp_pf *pf)
{
+ u32 min_size, cpp_id;
u8 __iomem *mem;
- u32 min_size;
int err;
min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
@@ -519,9 +489,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
pf->vfcfg_tbl2 = NULL;
}
- mem = nfp_cpp_map_area(pf->cpp, "net.qc", 0, 0,
- NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
- &pf->qc_area);
+ cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
+ mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id, NFP_PCIE_QUEUE(0),
+ NFP_QCP_QUEUE_AREA_SZ, &pf->qc_area);
if (IS_ERR(mem)) {
nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
err = PTR_ERR(mem);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 18a09cdcd9c6..c09b893c30dd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/etherdevice.h>
#include <linux/io-64-nonatomic-hi-lo.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 1bf2b18109ab..c412b94bfb97 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef NFP_NET_REPR_H
#define NFP_NET_REPR_H
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index 8b1b962cf1d1..b6ec46ed0540 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index e9df9d1eab8e..c9f09c5bb5ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#ifndef _NFP_NET_SRIOV_H_
#define _NFP_NET_SRIOV_H_
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 68928c86b698..d2c1e9ea5668 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_netvf_main.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 9c1298114c70..86bc149ca231 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/lockdep.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 51f10ae2d53e..b2479a2a49e5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef _NFP_PORT_H_
#define _NFP_PORT_H_
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
index 0ecd83705368..814360ed3a20 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c
@@ -1,36 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/kernel.h>
#include <net/devlink.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
index 6cee6382deb4..afab6f0fc564 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
#ifndef NFP_CRC32_H
#define NFP_CRC32_H
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index f44d0a857314..db94b0bddc92 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h
index 0e497a6154db..4a12133850f5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
#ifndef NFP6000_NFP6000_H
#define NFP6000_NFP6000_H
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h
index 40fb19939505..9a86ec11c5ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/*
* nfp_xpb.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index c8d0b1016a64..85d46f206b3c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp6000_pcie.c
@@ -138,6 +108,7 @@
/* The number of explicit BARs to reserve.
* Minimum is 0, maximum is 4 on the NFP6000.
+ * The NFP3800 can have only one per PF.
*/
#define NFP_PCIE_EXPLICIT_BARS 2
@@ -589,8 +560,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3),
};
char status_msg[196] = {};
+ int i, err, bars_free;
struct nfp_bar *bar;
- int i, bars_free;
int expl_groups;
char *msg, *end;
@@ -643,6 +614,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
nfp_bar_resource_len(bar));
if (bar->iomem) {
+ int pf;
+
msg += snprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
atomic_inc(&bar->refcnt);
bars_free--;
@@ -651,22 +624,40 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
- if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 ||
- nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) {
- nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
- } else {
- int pf = nfp->pdev->devfn & 7;
-
+ switch (nfp->pdev->device) {
+ case PCI_DEVICE_ID_NETRONOME_NFP3800:
+ pf = nfp->pdev->devfn & 7;
nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
+ break;
+ case PCI_DEVICE_ID_NETRONOME_NFP4000:
+ case PCI_DEVICE_ID_NETRONOME_NFP5000:
+ case PCI_DEVICE_ID_NETRONOME_NFP6000:
+ nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
+ break;
+ default:
+ dev_err(nfp->dev, "Unsupported device ID: %04hx!\n",
+ nfp->pdev->device);
+ err = -EINVAL;
+ goto err_unmap_bar0;
}
nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
}
- if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 ||
- nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000)
- expl_groups = 4;
- else
+ switch (nfp->pdev->device) {
+ case PCI_DEVICE_ID_NETRONOME_NFP3800:
expl_groups = 1;
+ break;
+ case PCI_DEVICE_ID_NETRONOME_NFP4000:
+ case PCI_DEVICE_ID_NETRONOME_NFP5000:
+ case PCI_DEVICE_ID_NETRONOME_NFP6000:
+ expl_groups = 4;
+ break;
+ default:
+ dev_err(nfp->dev, "Unsupported device ID: %04hx!\n",
+ nfp->pdev->device);
+ err = -EINVAL;
+ goto err_unmap_bar0;
+ }
/* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */
bar = &nfp->bar[1];
@@ -711,6 +702,11 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
dev_info(nfp->dev, "%sfree: %d/%d\n", status_msg, bars_free, nfp->bars);
return 0;
+
+err_unmap_bar0:
+ if (nfp->bar[0].iomem)
+ iounmap(nfp->bar[0].iomem);
+ return err;
}
static void disable_bars(struct nfp6000_pcie *nfp)
@@ -1327,7 +1323,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
/* Finished with card initialization. */
dev_info(&pdev->dev,
- "Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n");
+ "Netronome Flow Processor NFP4000/NFP5000/NFP6000 PCIe Card Probe\n");
pcie_print_link_status(pdev);
nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
index 245d8aaaa97d..6d1bffa6eac6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/*
* nfp6000_pcie.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h
index 31fe92247f51..3d172e255693 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/*
* nfp_arm.h
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index c338d539fa96..2dd0f5842873 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_cpp.h
@@ -56,9 +26,16 @@
dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
#define nfp_dbg(cpp, fmt, args...) \
dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
+#define nfp_printk(level, cpp, fmt, args...) \
+ dev_printk(level, nfp_cpp_device(cpp)->parent, \
+ NFP_SUBSYS ": " fmt, ## args)
#define PCI_64BIT_BAR_COUNT 3
+/* NFP hardware vendor/device ids.
+ */
+#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800
+
#define NFP_CPP_NUM_TARGETS 16
/* Max size of area it should be safe to request */
#define NFP_CPP_SAFE_AREA_SIZE SZ_2M
@@ -226,6 +203,7 @@ void nfp_cpp_free(struct nfp_cpp *cpp);
u32 nfp_cpp_model(struct nfp_cpp *cpp);
u16 nfp_cpp_interface(struct nfp_cpp *cpp);
int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial);
+unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp);
struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
u32 cpp_id,
@@ -286,8 +264,8 @@ int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
unsigned long long address, u64 value);
u8 __iomem *
-nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target,
- u64 addr, unsigned long size, struct nfp_cpp_area **area);
+nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, u32 cpp_id, u64 addr,
+ unsigned long size, struct nfp_cpp_area **area);
struct nfp_cpp_mutex;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 73de57a09800..94994a939277 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_cppcore.c
@@ -75,6 +45,7 @@ struct nfp_cpp_resource {
* @interface: chip interface id we are using to reach it
* @serial: chip serial number
* @imb_cat_table: CPP Mapping Table
+ * @mu_locality_lsb: MU access type bit offset
*
* Following fields use explicit locking:
* @resource_list: NFP CPP resource list
@@ -100,6 +71,7 @@ struct nfp_cpp {
wait_queue_head_t waitq;
u32 imb_cat_table[16];
+ unsigned int mu_locality_lsb;
struct mutex area_cache_mutex;
struct list_head area_cache_list;
@@ -266,6 +238,34 @@ int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial)
return sizeof(cpp->serial);
}
+#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7)
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12)
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12)
+
+static int nfp_cpp_set_mu_locality_lsb(struct nfp_cpp *cpp)
+{
+ unsigned int mode, addr40;
+ u32 imbcppat;
+ int res;
+
+ imbcppat = cpp->imb_cat_table[NFP_CPP_TARGET_MU];
+ mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat);
+ addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE);
+
+ res = nfp_cppat_mu_locality_lsb(mode, addr40);
+ if (res < 0)
+ return res;
+ cpp->mu_locality_lsb = res;
+
+ return 0;
+}
+
+unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp)
+{
+ return cpp->mu_locality_lsb;
+}
+
/**
* nfp_cpp_area_alloc_with_name() - allocate a new CPP area
* @cpp: CPP device handle
@@ -1241,6 +1241,12 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3,
&mask[1]);
+ err = nfp_cpp_set_mu_locality_lsb(cpp);
+ if (err < 0) {
+ dev_err(parent, "Can't calculate MU locality bit offset\n");
+ goto err_out;
+ }
+
dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n",
nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp));
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
index 20bad05e2e92..3cfecf105bde 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_cpplib.c
@@ -294,8 +264,7 @@ exit_release:
* nfp_cpp_map_area() - Helper function to map an area
* @cpp: NFP CPP handler
* @name: Name for the area
- * @domain: CPP domain
- * @target: CPP target
+ * @cpp_id: CPP ID for operation
* @addr: CPP address
* @size: Size of the area
* @area: Area handle (output)
@@ -306,15 +275,12 @@ exit_release:
* Return: Pointer to memory mapped area or ERR_PTR
*/
u8 __iomem *
-nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target,
- u64 addr, unsigned long size, struct nfp_cpp_area **area)
+nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, u32 cpp_id, u64 addr,
+ unsigned long size, struct nfp_cpp_area **area)
{
u8 __iomem *res;
- u32 dest;
-
- dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain);
- *area = nfp_cpp_area_alloc_acquire(cpp, name, dest, addr, size);
+ *area = nfp_cpp_area_alloc_acquire(cpp, name, cpp_id, addr, size);
if (!*area)
goto err_eio;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
index 063a9a6243d6..f05dd34ab89f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM
* after chip reset.
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
index 5f193fe2d69e..79e17943519e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/*
* nfp_mip.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
index c88bf673cb76..7bc17b94ac60 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
index 40510860341b..d4e02542e2e9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_nffw.c
@@ -156,29 +126,6 @@ static u64 nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi)
return (mip_off_hi & 0xFF) << 32 | le32_to_cpu(fi->mip_offset_lo);
}
-#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7)
-#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12)
-#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0
-#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12)
-
-static int nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp)
-{
- unsigned int mode, addr40;
- u32 xpbaddr, imbcppat;
- int err;
-
- /* Hardcoded XPB IMB Base, island 0 */
- xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4;
- err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat);
- if (err < 0)
- return err;
-
- mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat);
- addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE);
-
- return nfp_cppat_mu_locality_lsb(mode, addr40);
-}
-
static unsigned int
nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr)
{
@@ -304,14 +251,7 @@ int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off)
*off = nffw_fwinfo_mip_offset_get(fwinfo);
if (nffw_fwinfo_mip_mu_da_get(fwinfo)) {
- int locality_off;
-
- if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU)
- return 0;
-
- locality_off = nfp_mip_mu_locality_lsb(state->cpp);
- if (locality_off < 0)
- return locality_off;
+ int locality_off = nfp_cpp_mu_locality_lsb(state->cpp);
*off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off);
*off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
index df599d5b6bb3..49a4d3f56b56 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_nffw.h
@@ -61,10 +31,12 @@ void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
/* Implemented in nfp_rtsym.c */
-#define NFP_RTSYM_TYPE_NONE 0
-#define NFP_RTSYM_TYPE_OBJECT 1
-#define NFP_RTSYM_TYPE_FUNCTION 2
-#define NFP_RTSYM_TYPE_ABS 3
+enum nfp_rtsym_type {
+ NFP_RTSYM_TYPE_NONE = 0,
+ NFP_RTSYM_TYPE_OBJECT = 1,
+ NFP_RTSYM_TYPE_FUNCTION = 2,
+ NFP_RTSYM_TYPE_ABS = 3,
+};
#define NFP_RTSYM_TARGET_NONE 0
#define NFP_RTSYM_TARGET_LMEM -1
@@ -83,7 +55,7 @@ struct nfp_rtsym {
const char *name;
u64 addr;
u64 size;
- int type;
+ enum nfp_rtsym_type type;
int target;
int domain;
};
@@ -98,6 +70,32 @@ const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx);
const struct nfp_rtsym *
nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name);
+u64 nfp_rtsym_size(const struct nfp_rtsym *rtsym);
+int __nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, void *buf, size_t len);
+int nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off,
+ void *buf, size_t len);
+int __nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, u32 *value);
+int nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off,
+ u32 *value);
+int __nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, u64 *value);
+int nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off,
+ u64 *value);
+int __nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, void *buf, size_t len);
+int nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off,
+ void *buf, size_t len);
+int __nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, u32 value);
+int nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off,
+ u32 value);
+int __nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, u64 value);
+int nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off,
+ u64 value);
+
u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
int *error);
int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 2abee0fe3a7c..ce1577bbbd2a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_nsp.c
@@ -87,6 +57,11 @@
#define NSP_CODE_MAJOR GENMASK(15, 12)
#define NSP_CODE_MINOR GENMASK(11, 0)
+#define NFP_FW_LOAD_RET_MAJOR GENMASK(15, 8)
+#define NFP_FW_LOAD_RET_MINOR GENMASK(23, 16)
+
+#define NFP_HWINFO_LOOKUP_SIZE GENMASK(11, 0)
+
enum nfp_nsp_cmd {
SPCODE_NOOP = 0, /* No operation */
SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */
@@ -100,6 +75,8 @@ enum nfp_nsp_cmd {
SPCODE_NSP_WRITE_FLASH = 11, /* Load and flash image from buffer */
SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */
SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
+ SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */
+ SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */
};
static const struct {
@@ -127,6 +104,40 @@ struct nfp_nsp {
void *entries;
};
+/**
+ * struct nfp_nsp_command_arg - NFP command argument structure
+ * @code: NFP SP Command Code
+ * @timeout_sec:Timeout value to wait for completion in seconds
+ * @option: NFP SP Command Argument
+ * @buff_cpp: NFP SP Buffer CPP Address info
+ * @buff_addr: NFP SP Buffer Host address
+ * @error_cb: Callback for interpreting option if error occurred
+ */
+struct nfp_nsp_command_arg {
+ u16 code;
+ unsigned int timeout_sec;
+ u32 option;
+ u32 buff_cpp;
+ u64 buff_addr;
+ void (*error_cb)(struct nfp_nsp *state, u32 ret_val);
+};
+
+/**
+ * struct nfp_nsp_command_buf_arg - NFP command with buffer argument structure
+ * @arg: NFP command argument structure
+ * @in_buf: Buffer with data for input
+ * @in_size: Size of @in_buf
+ * @out_buf: Buffer for output data
+ * @out_size: Size of @out_buf
+ */
+struct nfp_nsp_command_buf_arg {
+ struct nfp_nsp_command_arg arg;
+ const void *in_buf;
+ unsigned int in_size;
+ void *out_buf;
+ unsigned int out_size;
+};
+
struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state)
{
return state->cpp;
@@ -291,11 +302,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
/**
* __nfp_nsp_command() - Execute a command on the NFP Service Processor
* @state: NFP SP state
- * @code: NFP SP Command Code
- * @option: NFP SP Command Argument
- * @buff_cpp: NFP SP Buffer CPP Address info
- * @buff_addr: NFP SP Buffer Host address
- * @timeout_sec:Timeout value to wait for completion in seconds
+ * @arg: NFP command argument structure
*
* Return: 0 for success with no result
*
@@ -308,8 +315,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
* -ETIMEDOUT if the NSP took longer than @timeout_sec seconds to complete
*/
static int
-__nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
- u64 buff_addr, u32 timeout_sec)
+__nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg)
{
u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command;
struct nfp_cpp *cpp = state->cpp;
@@ -326,22 +332,22 @@ __nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
if (err)
return err;
- if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) ||
- !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) {
+ if (!FIELD_FIT(NSP_BUFFER_CPP, arg->buff_cpp >> 8) ||
+ !FIELD_FIT(NSP_BUFFER_ADDRESS, arg->buff_addr)) {
nfp_err(cpp, "Host buffer out of reach %08x %016llx\n",
- buff_cpp, buff_addr);
+ arg->buff_cpp, arg->buff_addr);
return -EINVAL;
}
err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer,
- FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) |
- FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr));
+ FIELD_PREP(NSP_BUFFER_CPP, arg->buff_cpp >> 8) |
+ FIELD_PREP(NSP_BUFFER_ADDRESS, arg->buff_addr));
if (err < 0)
return err;
err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command,
- FIELD_PREP(NSP_COMMAND_OPTION, option) |
- FIELD_PREP(NSP_COMMAND_CODE, code) |
+ FIELD_PREP(NSP_COMMAND_OPTION, arg->option) |
+ FIELD_PREP(NSP_COMMAND_CODE, arg->code) |
FIELD_PREP(NSP_COMMAND_START, 1));
if (err < 0)
return err;
@@ -351,16 +357,16 @@ __nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
NSP_COMMAND_START, 0, NFP_NSP_TIMEOUT_DEFAULT);
if (err) {
nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n",
- err, code);
+ err, arg->code);
return err;
}
/* Wait for NSP_STATUS_BUSY to go to 0 */
err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_status, NSP_STATUS_BUSY,
- 0, timeout_sec);
+ 0, arg->timeout_sec ?: NFP_NSP_TIMEOUT_DEFAULT);
if (err) {
nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n",
- err, code);
+ err, arg->code);
return err;
}
@@ -372,26 +378,28 @@ __nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
err = FIELD_GET(NSP_STATUS_RESULT, reg);
if (err) {
nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n",
- -err, (int)ret_val, code);
- nfp_nsp_print_extended_error(state, ret_val);
+ -err, (int)ret_val, arg->code);
+ if (arg->error_cb)
+ arg->error_cb(state, ret_val);
+ else
+ nfp_nsp_print_extended_error(state, ret_val);
return -err;
}
return ret_val;
}
-static int
-nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
- u64 buff_addr)
+static int nfp_nsp_command(struct nfp_nsp *state, u16 code)
{
- return __nfp_nsp_command(state, code, option, buff_cpp, buff_addr,
- NFP_NSP_TIMEOUT_DEFAULT);
+ const struct nfp_nsp_command_arg arg = {
+ .code = code,
+ };
+
+ return __nfp_nsp_command(state, &arg);
}
static int
-__nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
- const void *in_buf, unsigned int in_size, void *out_buf,
- unsigned int out_size, u32 timeout_sec)
+nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg)
{
struct nfp_cpp *cpp = nsp->cpp;
unsigned int max_size;
@@ -401,7 +409,7 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
if (nsp->ver.minor < 13) {
nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %hu.%hu)\n",
- code, nsp->ver.major, nsp->ver.minor);
+ arg->arg.code, nsp->ver.major, nsp->ver.minor);
return -EOPNOTSUPP;
}
@@ -412,10 +420,11 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
if (err < 0)
return err;
- max_size = max(in_size, out_size);
+ max_size = max(arg->in_size, arg->out_size);
if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) {
nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%llu < %u)\n",
- code, FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M,
+ arg->arg.code,
+ FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M,
max_size);
return -EINVAL;
}
@@ -430,27 +439,30 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8;
cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg);
- if (in_buf && in_size) {
- err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size);
+ if (arg->in_buf && arg->in_size) {
+ err = nfp_cpp_write(cpp, cpp_id, cpp_buf,
+ arg->in_buf, arg->in_size);
if (err < 0)
return err;
}
/* Zero out remaining part of the buffer */
- if (out_buf && out_size && out_size > in_size) {
- memset(out_buf, 0, out_size - in_size);
- err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size,
- out_buf, out_size - in_size);
+ if (arg->out_buf && arg->out_size && arg->out_size > arg->in_size) {
+ memset(arg->out_buf, 0, arg->out_size - arg->in_size);
+ err = nfp_cpp_write(cpp, cpp_id, cpp_buf + arg->in_size,
+ arg->out_buf, arg->out_size - arg->in_size);
if (err < 0)
return err;
}
- ret = __nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf,
- timeout_sec);
+ arg->arg.buff_cpp = cpp_id;
+ arg->arg.buff_addr = cpp_buf;
+ ret = __nfp_nsp_command(nsp, &arg->arg);
if (ret < 0)
return ret;
- if (out_buf && out_size) {
- err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size);
+ if (arg->out_buf && arg->out_size) {
+ err = nfp_cpp_read(cpp, cpp_id, cpp_buf,
+ arg->out_buf, arg->out_size);
if (err < 0)
return err;
}
@@ -458,16 +470,6 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
return ret;
}
-static int
-nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
- const void *in_buf, unsigned int in_size, void *out_buf,
- unsigned int out_size)
-{
- return __nfp_nsp_command_buf(nsp, code, option, in_buf, in_size,
- out_buf, out_size,
- NFP_NSP_TIMEOUT_DEFAULT);
-}
-
int nfp_nsp_wait(struct nfp_nsp *state)
{
const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ;
@@ -479,7 +481,7 @@ int nfp_nsp_wait(struct nfp_nsp *state)
for (;;) {
const unsigned long start_time = jiffies;
- err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0);
+ err = nfp_nsp_command(state, SPCODE_NOOP);
if (err != -EAGAIN)
break;
@@ -501,53 +503,211 @@ int nfp_nsp_wait(struct nfp_nsp *state)
int nfp_nsp_device_soft_reset(struct nfp_nsp *state)
{
- return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
+ return nfp_nsp_command(state, SPCODE_SOFT_RESET);
}
int nfp_nsp_mac_reinit(struct nfp_nsp *state)
{
- return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0);
+ return nfp_nsp_command(state, SPCODE_MAC_INIT);
+}
+
+static void nfp_nsp_load_fw_extended_msg(struct nfp_nsp *state, u32 ret_val)
+{
+ static const char * const major_msg[] = {
+ /* 0 */ "Firmware from driver loaded",
+ /* 1 */ "Firmware from flash loaded",
+ /* 2 */ "Firmware loading failure",
+ };
+ static const char * const minor_msg[] = {
+ /* 0 */ "",
+ /* 1 */ "no named partition on flash",
+ /* 2 */ "error reading from flash",
+ /* 3 */ "can not deflate",
+ /* 4 */ "not a trusted file",
+ /* 5 */ "can not parse FW file",
+ /* 6 */ "MIP not found in FW file",
+ /* 7 */ "null firmware name in MIP",
+ /* 8 */ "FW version none",
+ /* 9 */ "FW build number none",
+ /* 10 */ "no FW selection policy HWInfo key found",
+ /* 11 */ "static FW selection policy",
+ /* 12 */ "FW version has precedence",
+ /* 13 */ "different FW application load requested",
+ /* 14 */ "development build",
+ };
+ unsigned int major, minor;
+ const char *level;
+
+ major = FIELD_GET(NFP_FW_LOAD_RET_MAJOR, ret_val);
+ minor = FIELD_GET(NFP_FW_LOAD_RET_MINOR, ret_val);
+
+ if (!nfp_nsp_has_stored_fw_load(state))
+ return;
+
+ /* Lower the message level in legacy case */
+ if (major == 0 && (minor == 0 || minor == 10))
+ level = KERN_DEBUG;
+ else if (major == 2)
+ level = KERN_ERR;
+ else
+ level = KERN_INFO;
+
+ if (major >= ARRAY_SIZE(major_msg))
+ nfp_printk(level, state->cpp, "FW loading status: %x\n",
+ ret_val);
+ else if (minor >= ARRAY_SIZE(minor_msg))
+ nfp_printk(level, state->cpp, "%s, reason code: %d\n",
+ major_msg[major], minor);
+ else
+ nfp_printk(level, state->cpp, "%s%c %s\n",
+ major_msg[major], minor ? ',' : '.',
+ minor_msg[minor]);
}
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
{
- return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, fw->size, fw->data,
- fw->size, NULL, 0);
+ struct nfp_nsp_command_buf_arg load_fw = {
+ {
+ .code = SPCODE_FW_LOAD,
+ .option = fw->size,
+ .error_cb = nfp_nsp_load_fw_extended_msg,
+ },
+ .in_buf = fw->data,
+ .in_size = fw->size,
+ };
+ int ret;
+
+ ret = nfp_nsp_command_buf(state, &load_fw);
+ if (ret < 0)
+ return ret;
+
+ nfp_nsp_load_fw_extended_msg(state, ret);
+ return 0;
}
int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw)
{
- /* The flash time is specified to take a maximum of 70s so we add an
- * additional factor to this spec time.
- */
- u32 timeout_sec = 2.5 * 70;
-
- return __nfp_nsp_command_buf(state, SPCODE_NSP_WRITE_FLASH, fw->size,
- fw->data, fw->size, NULL, 0, timeout_sec);
+ struct nfp_nsp_command_buf_arg write_flash = {
+ {
+ .code = SPCODE_NSP_WRITE_FLASH,
+ .option = fw->size,
+ /* The flash time is specified to take a maximum of 70s
+ * so we add an additional factor to this spec time.
+ */
+ .timeout_sec = 2.5 * 70,
+ },
+ .in_buf = fw->data,
+ .in_size = fw->size,
+ };
+
+ return nfp_nsp_command_buf(state, &write_flash);
}
int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size)
{
- return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0,
- buf, size);
+ struct nfp_nsp_command_buf_arg eth_rescan = {
+ {
+ .code = SPCODE_ETH_RESCAN,
+ .option = size,
+ },
+ .out_buf = buf,
+ .out_size = size,
+ };
+
+ return nfp_nsp_command_buf(state, &eth_rescan);
}
int nfp_nsp_write_eth_table(struct nfp_nsp *state,
const void *buf, unsigned int size)
{
- return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size,
- NULL, 0);
+ struct nfp_nsp_command_buf_arg eth_ctrl = {
+ {
+ .code = SPCODE_ETH_CONTROL,
+ .option = size,
+ },
+ .in_buf = buf,
+ .in_size = size,
+ };
+
+ return nfp_nsp_command_buf(state, &eth_ctrl);
}
int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size)
{
- return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0,
- buf, size);
+ struct nfp_nsp_command_buf_arg identify = {
+ {
+ .code = SPCODE_NSP_IDENTIFY,
+ .option = size,
+ },
+ .out_buf = buf,
+ .out_size = size,
+ };
+
+ return nfp_nsp_command_buf(state, &identify);
}
int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask,
void *buf, unsigned int size)
{
- return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask,
- NULL, 0, buf, size);
+ struct nfp_nsp_command_buf_arg sensors = {
+ {
+ .code = SPCODE_NSP_SENSORS,
+ .option = sensor_mask,
+ },
+ .out_buf = buf,
+ .out_size = size,
+ };
+
+ return nfp_nsp_command_buf(state, &sensors);
+}
+
+int nfp_nsp_load_stored_fw(struct nfp_nsp *state)
+{
+ const struct nfp_nsp_command_arg arg = {
+ .code = SPCODE_FW_STORED,
+ .error_cb = nfp_nsp_load_fw_extended_msg,
+ };
+ int ret;
+
+ ret = __nfp_nsp_command(state, &arg);
+ if (ret < 0)
+ return ret;
+
+ nfp_nsp_load_fw_extended_msg(state, ret);
+ return 0;
+}
+
+static int
+__nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ struct nfp_nsp_command_buf_arg hwinfo_lookup = {
+ {
+ .code = SPCODE_HWINFO_LOOKUP,
+ .option = size,
+ },
+ .in_buf = buf,
+ .in_size = size,
+ .out_buf = buf,
+ .out_size = size,
+ };
+
+ return nfp_nsp_command_buf(state, &hwinfo_lookup);
+}
+
+int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ int err;
+
+ size = min_t(u32, size, NFP_HWINFO_LOOKUP_SIZE);
+
+ err = __nfp_nsp_hwinfo_lookup(state, buf, size);
+ if (err)
+ return err;
+
+ if (strnlen(buf, size) == size) {
+ nfp_err(state->cpp, "NSP HWinfo value not NULL-terminated\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index f23d9e06f097..ff33ac54097a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
#ifndef NSP_NSP_H
#define NSP_NSP_H 1
@@ -50,12 +20,24 @@ int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw);
int nfp_nsp_mac_reinit(struct nfp_nsp *state);
+int nfp_nsp_load_stored_fw(struct nfp_nsp *state);
+int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size);
static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
{
return nfp_nsp_get_abi_ver_minor(state) > 20;
}
+static inline bool nfp_nsp_has_stored_fw_load(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 23;
+}
+
+static inline bool nfp_nsp_has_hwinfo_lookup(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 24;
+}
+
enum nfp_eth_interface {
NFP_INTERFACE_NONE = 0,
NFP_INTERFACE_SFP = 1,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
index 5d362f87af08..0997d127144f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 7ca589660e4d..802c9224bb32 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2017 Netronome Systems, Inc. */
/* Authors: David Brunecz <david.brunecz@netronome.com>
* Jakub Kicinski <jakub.kicinski@netronome.com>
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
index d32af598da90..ce7492a6a98f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_resource.c
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
index 9e34216578da..75f012444796 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_rtsym.c
@@ -39,6 +9,8 @@
* Espen Skoglund <espen.skoglund@netronome.com>
* Francois H. Theron <francois.theron@netronome.com>
*/
+
+#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -233,6 +205,229 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name)
return NULL;
}
+u64 nfp_rtsym_size(const struct nfp_rtsym *sym)
+{
+ switch (sym->type) {
+ case NFP_RTSYM_TYPE_NONE:
+ pr_err("rtsym '%s': type NONE\n", sym->name);
+ return 0;
+ default:
+ pr_warn("rtsym '%s': unknown type: %d\n", sym->name, sym->type);
+ /* fall through */
+ case NFP_RTSYM_TYPE_OBJECT:
+ case NFP_RTSYM_TYPE_FUNCTION:
+ return sym->size;
+ case NFP_RTSYM_TYPE_ABS:
+ return sizeof(u64);
+ }
+}
+
+static int
+nfp_rtsym_to_dest(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, u32 *cpp_id, u64 *addr)
+{
+ if (sym->type != NFP_RTSYM_TYPE_OBJECT) {
+ nfp_err(cpp, "rtsym '%s': direct access to non-object rtsym\n",
+ sym->name);
+ return -EINVAL;
+ }
+
+ *addr = sym->addr + off;
+
+ if (sym->target == NFP_RTSYM_TARGET_EMU_CACHE) {
+ int locality_off = nfp_cpp_mu_locality_lsb(cpp);
+
+ *addr &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off);
+ *addr |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off;
+
+ *cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, action, token,
+ sym->domain);
+ } else if (sym->target < 0) {
+ nfp_err(cpp, "rtsym '%s': unhandled target encoding: %d\n",
+ sym->name, sym->target);
+ return -EINVAL;
+ } else {
+ *cpp_id = NFP_CPP_ISLAND_ID(sym->target, action, token,
+ sym->domain);
+ }
+
+ return 0;
+}
+
+int __nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, void *buf, size_t len)
+{
+ u64 sym_size = nfp_rtsym_size(sym);
+ u32 cpp_id;
+ u64 addr;
+ int err;
+
+ if (off > sym_size) {
+ nfp_err(cpp, "rtsym '%s': read out of bounds: off: %lld + len: %zd > size: %lld\n",
+ sym->name, off, len, sym_size);
+ return -ENXIO;
+ }
+ len = min_t(size_t, len, sym_size - off);
+
+ if (sym->type == NFP_RTSYM_TYPE_ABS) {
+ u8 tmp[8];
+
+ put_unaligned_le64(sym->addr, tmp);
+ memcpy(buf, &tmp[off], len);
+
+ return len;
+ }
+
+ err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr);
+ if (err)
+ return err;
+
+ return nfp_cpp_read(cpp, cpp_id, addr, buf, len);
+}
+
+int nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off,
+ void *buf, size_t len)
+{
+ return __nfp_rtsym_read(cpp, sym, NFP_CPP_ACTION_RW, 0, off, buf, len);
+}
+
+int __nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, u32 *value)
+{
+ u32 cpp_id;
+ u64 addr;
+ int err;
+
+ if (off + 4 > nfp_rtsym_size(sym)) {
+ nfp_err(cpp, "rtsym '%s': readl out of bounds: off: %lld + 4 > size: %lld\n",
+ sym->name, off, nfp_rtsym_size(sym));
+ return -ENXIO;
+ }
+
+ err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr);
+ if (err)
+ return err;
+
+ return nfp_cpp_readl(cpp, cpp_id, addr, value);
+}
+
+int nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off,
+ u32 *value)
+{
+ return __nfp_rtsym_readl(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value);
+}
+
+int __nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, u64 *value)
+{
+ u32 cpp_id;
+ u64 addr;
+ int err;
+
+ if (off + 8 > nfp_rtsym_size(sym)) {
+ nfp_err(cpp, "rtsym '%s': readq out of bounds: off: %lld + 8 > size: %lld\n",
+ sym->name, off, nfp_rtsym_size(sym));
+ return -ENXIO;
+ }
+
+ if (sym->type == NFP_RTSYM_TYPE_ABS) {
+ *value = sym->addr;
+ return 0;
+ }
+
+ err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr);
+ if (err)
+ return err;
+
+ return nfp_cpp_readq(cpp, cpp_id, addr, value);
+}
+
+int nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off,
+ u64 *value)
+{
+ return __nfp_rtsym_readq(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value);
+}
+
+int __nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, void *buf, size_t len)
+{
+ u64 sym_size = nfp_rtsym_size(sym);
+ u32 cpp_id;
+ u64 addr;
+ int err;
+
+ if (off > sym_size) {
+ nfp_err(cpp, "rtsym '%s': write out of bounds: off: %lld + len: %zd > size: %lld\n",
+ sym->name, off, len, sym_size);
+ return -ENXIO;
+ }
+ len = min_t(size_t, len, sym_size - off);
+
+ err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr);
+ if (err)
+ return err;
+
+ return nfp_cpp_write(cpp, cpp_id, addr, buf, len);
+}
+
+int nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off,
+ void *buf, size_t len)
+{
+ return __nfp_rtsym_write(cpp, sym, NFP_CPP_ACTION_RW, 0, off, buf, len);
+}
+
+int __nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, u32 value)
+{
+ u32 cpp_id;
+ u64 addr;
+ int err;
+
+ if (off + 4 > nfp_rtsym_size(sym)) {
+ nfp_err(cpp, "rtsym '%s': writel out of bounds: off: %lld + 4 > size: %lld\n",
+ sym->name, off, nfp_rtsym_size(sym));
+ return -ENXIO;
+ }
+
+ err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr);
+ if (err)
+ return err;
+
+ return nfp_cpp_writel(cpp, cpp_id, addr, value);
+}
+
+int nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off,
+ u32 value)
+{
+ return __nfp_rtsym_writel(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value);
+}
+
+int __nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym,
+ u8 action, u8 token, u64 off, u64 value)
+{
+ u32 cpp_id;
+ u64 addr;
+ int err;
+
+ if (off + 8 > nfp_rtsym_size(sym)) {
+ nfp_err(cpp, "rtsym '%s': writeq out of bounds: off: %lld + 8 > size: %lld\n",
+ sym->name, off, nfp_rtsym_size(sym));
+ return -ENXIO;
+ }
+
+ err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr);
+ if (err)
+ return err;
+
+ return nfp_cpp_writeq(cpp, cpp_id, addr, value);
+}
+
+int nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off,
+ u64 value)
+{
+ return __nfp_rtsym_writeq(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value);
+}
+
/**
* nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
* @rtbl: NFP RTsym table
@@ -249,7 +444,7 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
int *error)
{
const struct nfp_rtsym *sym;
- u32 val32, id;
+ u32 val32;
u64 val;
int err;
@@ -259,20 +454,18 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
goto exit;
}
- id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);
-
- switch (sym->size) {
+ switch (nfp_rtsym_size(sym)) {
case 4:
- err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32);
+ err = nfp_rtsym_readl(rtbl->cpp, sym, 0, &val32);
val = val32;
break;
case 8:
- err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val);
+ err = nfp_rtsym_readq(rtbl->cpp, sym, 0, &val);
break;
default:
nfp_err(rtbl->cpp,
- "rtsym '%s' unsupported or non-scalar size: %lld\n",
- name, sym->size);
+ "rtsym '%s': unsupported or non-scalar size: %lld\n",
+ name, nfp_rtsym_size(sym));
err = -EINVAL;
break;
}
@@ -303,25 +496,22 @@ int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name,
{
const struct nfp_rtsym *sym;
int err;
- u32 id;
sym = nfp_rtsym_lookup(rtbl, name);
if (!sym)
return -ENOENT;
- id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);
-
- switch (sym->size) {
+ switch (nfp_rtsym_size(sym)) {
case 4:
- err = nfp_cpp_writel(rtbl->cpp, id, sym->addr, value);
+ err = nfp_rtsym_writel(rtbl->cpp, sym, 0, value);
break;
case 8:
- err = nfp_cpp_writeq(rtbl->cpp, id, sym->addr, value);
+ err = nfp_rtsym_writeq(rtbl->cpp, sym, 0, value);
break;
default:
nfp_err(rtbl->cpp,
- "rtsym '%s' unsupported or non-scalar size: %lld\n",
- name, sym->size);
+ "rtsym '%s': unsupported or non-scalar size: %lld\n",
+ name, nfp_rtsym_size(sym));
err = -EINVAL;
break;
}
@@ -335,20 +525,29 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id,
{
const struct nfp_rtsym *sym;
u8 __iomem *mem;
+ u32 cpp_id;
+ u64 addr;
+ int err;
sym = nfp_rtsym_lookup(rtbl, name);
if (!sym)
return (u8 __iomem *)ERR_PTR(-ENOENT);
+ err = nfp_rtsym_to_dest(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0,
+ &cpp_id, &addr);
+ if (err) {
+ nfp_err(rtbl->cpp, "rtsym '%s': mapping failed\n", name);
+ return (u8 __iomem *)ERR_PTR(err);
+ }
+
if (sym->size < min_size) {
- nfp_err(rtbl->cpp, "Symbol %s too small\n", name);
+ nfp_err(rtbl->cpp, "rtsym '%s': too small\n", name);
return (u8 __iomem *)ERR_PTR(-EINVAL);
}
- mem = nfp_cpp_map_area(rtbl->cpp, id, sym->domain, sym->target,
- sym->addr, sym->size, area);
+ mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr, sym->size, area);
if (IS_ERR(mem)) {
- nfp_err(rtbl->cpp, "Failed to map symbol %s: %ld\n",
+ nfp_err(rtbl->cpp, "rtysm '%s': failed to map: %ld\n",
name, PTR_ERR(mem));
return mem;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
index 4ea1e585d945..79470f198a62 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/*
* nfp_target.c
@@ -39,7 +9,11 @@
* Francois H. Theron <francois.theron@netronome.com>
*/
+#define pr_fmt(fmt) "NFP target: " fmt
+
#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
#include "nfp_cpp.h"
@@ -733,8 +707,10 @@ int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address,
u32 imb;
int err;
- if (target < 0 || target >= 16)
+ if (target < 0 || target >= 16) {
+ pr_err("Invalid CPP target: %d\n", target);
return -EINVAL;
+ }
if (island == 0) {
/* Already translated */
@@ -753,8 +729,10 @@ int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address,
err = nfp_cppat_addr_encode(cpp_target_address, island, target,
((imb >> 13) & 7), ((imb >> 12) & 1),
((imb >> 6) & 0x3f), ((imb >> 0) & 0x3f));
- if (err)
+ if (err) {
+ pr_err("Can't encode CPP address: %d\n", err);
return err;
+ }
*cpp_target_id = NFP_CPP_ID(target,
NFP_CPP_ID_ACTION_of(cpp_island_id),
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c
index d5b587fccaa3..aea8579206ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nic/main.c
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017 Netronome Systems, Inc. */
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nsp.h"
diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig
index aa41e5f6e437..c73978474c4b 100644
--- a/drivers/net/ethernet/ni/Kconfig
+++ b/drivers/net/ethernet/ni/Kconfig
@@ -18,8 +18,9 @@ if NET_VENDOR_NI
config NI_XGE_MANAGEMENT_ENET
tristate "National Instruments XGE management enet support"
- depends on ARCH_ZYNQ
+ depends on HAS_IOMEM && HAS_DMA
select PHYLIB
+ select OF_MDIO if OF
help
Simple LAN device for debug or management purposes. Can
support either 10G or 1G PHYs via SFP+ ports.
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 76efed058f33..0611f2335b4a 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -106,10 +106,10 @@
(NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
struct nixge_hw_dma_bd {
- u32 next;
- u32 reserved1;
- u32 phys;
- u32 reserved2;
+ u32 next_lo;
+ u32 next_hi;
+ u32 phys_lo;
+ u32 phys_hi;
u32 reserved3;
u32 reserved4;
u32 cntrl;
@@ -119,11 +119,39 @@ struct nixge_hw_dma_bd {
u32 app2;
u32 app3;
u32 app4;
- u32 sw_id_offset;
- u32 reserved5;
+ u32 sw_id_offset_lo;
+ u32 sw_id_offset_hi;
u32 reserved6;
};
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define nixge_hw_dma_bd_set_addr(bd, field, addr) \
+ do { \
+ (bd)->field##_lo = lower_32_bits((addr)); \
+ (bd)->field##_hi = upper_32_bits((addr)); \
+ } while (0)
+#else
+#define nixge_hw_dma_bd_set_addr(bd, field, addr) \
+ ((bd)->field##_lo = lower_32_bits((addr)))
+#endif
+
+#define nixge_hw_dma_bd_set_phys(bd, addr) \
+ nixge_hw_dma_bd_set_addr((bd), phys, (addr))
+
+#define nixge_hw_dma_bd_set_next(bd, addr) \
+ nixge_hw_dma_bd_set_addr((bd), next, (addr))
+
+#define nixge_hw_dma_bd_set_offset(bd, addr) \
+ nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr))
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define nixge_hw_dma_bd_get_addr(bd, field) \
+ (dma_addr_t)((((u64)(bd)->field##_hi) << 32) | ((bd)->field##_lo))
+#else
+#define nixge_hw_dma_bd_get_addr(bd, field) \
+ (dma_addr_t)((bd)->field##_lo)
+#endif
+
struct nixge_tx_skb {
struct sk_buff *skb;
dma_addr_t mapping;
@@ -176,6 +204,15 @@ static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
writel(val, priv->dma_regs + offset);
}
+static void nixge_dma_write_desc_reg(struct nixge_priv *priv, off_t offset,
+ dma_addr_t addr)
+{
+ writel(lower_32_bits(addr), priv->dma_regs + offset);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ writel(upper_32_bits(addr), priv->dma_regs + offset + 4);
+#endif
+}
+
static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
{
return readl(priv->dma_regs + offset);
@@ -202,13 +239,22 @@ static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
static void nixge_hw_dma_bd_release(struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
+ dma_addr_t phys_addr;
+ struct sk_buff *skb;
int i;
for (i = 0; i < RX_BD_NUM; i++) {
- dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys,
- NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
- dev_kfree_skb((struct sk_buff *)
- (priv->rx_bd_v[i].sw_id_offset));
+ phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
+ phys);
+
+ dma_unmap_single(ndev->dev.parent, phys_addr,
+ NIXGE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb = (struct sk_buff *)(uintptr_t)
+ nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
+ sw_id_offset);
+ dev_kfree_skb(skb);
}
if (priv->rx_bd_v)
@@ -231,6 +277,7 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
{
struct nixge_priv *priv = netdev_priv(ndev);
struct sk_buff *skb;
+ dma_addr_t phys;
u32 cr;
int i;
@@ -259,27 +306,30 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
goto out;
for (i = 0; i < TX_BD_NUM; i++) {
- priv->tx_bd_v[i].next = priv->tx_bd_p +
- sizeof(*priv->tx_bd_v) *
- ((i + 1) % TX_BD_NUM);
+ nixge_hw_dma_bd_set_next(&priv->tx_bd_v[i],
+ priv->tx_bd_p +
+ sizeof(*priv->tx_bd_v) *
+ ((i + 1) % TX_BD_NUM));
}
for (i = 0; i < RX_BD_NUM; i++) {
- priv->rx_bd_v[i].next = priv->rx_bd_p +
- sizeof(*priv->rx_bd_v) *
- ((i + 1) % RX_BD_NUM);
+ nixge_hw_dma_bd_set_next(&priv->rx_bd_v[i],
+ priv->rx_bd_p
+ + sizeof(*priv->rx_bd_v) *
+ ((i + 1) % RX_BD_NUM));
skb = netdev_alloc_skb_ip_align(ndev,
NIXGE_MAX_JUMBO_FRAME_SIZE);
if (!skb)
goto out;
- priv->rx_bd_v[i].sw_id_offset = (u32)skb;
- priv->rx_bd_v[i].phys =
- dma_map_single(ndev->dev.parent,
- skb->data,
- NIXGE_MAX_JUMBO_FRAME_SIZE,
- DMA_FROM_DEVICE);
+ nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], (uintptr_t)skb);
+ phys = dma_map_single(ndev->dev.parent, skb->data,
+ NIXGE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ nixge_hw_dma_bd_set_phys(&priv->rx_bd_v[i], phys);
+
priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
}
@@ -312,18 +362,18 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
- nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
+ nixge_dma_write_desc_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
- nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
+ nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
(sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting.
*/
- nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
+ nixge_dma_write_desc_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
@@ -451,7 +501,7 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct nixge_priv *priv = netdev_priv(ndev);
struct nixge_hw_dma_bd *cur_p;
struct nixge_tx_skb *tx_skb;
- dma_addr_t tail_p;
+ dma_addr_t tail_p, cur_phys;
skb_frag_t *frag;
u32 num_frag;
u32 ii;
@@ -466,15 +516,16 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
+ cur_phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, cur_phys))
goto drop;
+ nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
tx_skb->skb = NULL;
- tx_skb->mapping = cur_p->phys;
+ tx_skb->mapping = cur_phys;
tx_skb->size = skb_headlen(skb);
tx_skb->mapped_as_page = false;
@@ -485,16 +536,17 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_skb = &priv->tx_skb[priv->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
- cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
+ cur_phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, cur_phys))
goto frag_err;
+ nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
cur_p->cntrl = skb_frag_size(frag);
tx_skb->skb = NULL;
- tx_skb->mapping = cur_p->phys;
+ tx_skb->mapping = cur_phys;
tx_skb->size = skb_frag_size(frag);
tx_skb->mapped_as_page = true;
}
@@ -506,7 +558,7 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
/* Start the transfer */
- nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+ nixge_dma_write_desc_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
++priv->tx_bd_tail;
priv->tx_bd_tail %= TX_BD_NUM;
@@ -537,7 +589,7 @@ static int nixge_recv(struct net_device *ndev, int budget)
struct nixge_priv *priv = netdev_priv(ndev);
struct sk_buff *skb, *new_skb;
struct nixge_hw_dma_bd *cur_p;
- dma_addr_t tail_p = 0;
+ dma_addr_t tail_p = 0, cur_phys = 0;
u32 packets = 0;
u32 length = 0;
u32 size = 0;
@@ -549,13 +601,15 @@ static int nixge_recv(struct net_device *ndev, int budget)
tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
priv->rx_bd_ci;
- skb = (struct sk_buff *)(cur_p->sw_id_offset);
+ skb = (struct sk_buff *)(uintptr_t)
+ nixge_hw_dma_bd_get_addr(cur_p, sw_id_offset);
length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
length = NIXGE_MAX_JUMBO_FRAME_SIZE;
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ dma_unmap_single(ndev->dev.parent,
+ nixge_hw_dma_bd_get_addr(cur_p, phys),
NIXGE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
@@ -579,16 +633,17 @@ static int nixge_recv(struct net_device *ndev, int budget)
if (!new_skb)
return packets;
- cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
- NIXGE_MAX_JUMBO_FRAME_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
+ cur_phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ NIXGE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, cur_phys)) {
/* FIXME: bail out and clean up */
netdev_err(ndev, "Failed to map ...\n");
}
+ nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
cur_p->status = 0;
- cur_p->sw_id_offset = (u32)new_skb;
+ nixge_hw_dma_bd_set_offset(cur_p, (uintptr_t)new_skb);
++priv->rx_bd_ci;
priv->rx_bd_ci %= RX_BD_NUM;
@@ -599,7 +654,7 @@ static int nixge_recv(struct net_device *ndev, int budget)
ndev->stats.rx_bytes += size;
if (tail_p)
- nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
return packets;
}
@@ -637,6 +692,7 @@ static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
struct nixge_priv *priv = netdev_priv(_ndev);
struct net_device *ndev = _ndev;
unsigned int status;
+ dma_addr_t phys;
u32 cr;
status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
@@ -650,9 +706,11 @@ static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
return IRQ_NONE;
}
if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ phys = nixge_hw_dma_bd_get_addr(&priv->tx_bd_v[priv->tx_bd_ci],
+ phys);
+
netdev_err(ndev, "DMA Tx error 0x%x\n", status);
- netdev_err(ndev, "Current BD is at: 0x%x\n",
- (priv->tx_bd_v[priv->tx_bd_ci]).phys);
+ netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys);
cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
/* Disable coalesce, delay timer and error interrupts */
@@ -678,6 +736,7 @@ static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
struct nixge_priv *priv = netdev_priv(_ndev);
struct net_device *ndev = _ndev;
unsigned int status;
+ dma_addr_t phys;
u32 cr;
status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
@@ -697,9 +756,10 @@ static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
return IRQ_NONE;
}
if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ phys = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[priv->rx_bd_ci],
+ phys);
netdev_err(ndev, "DMA Rx error 0x%x\n", status);
- netdev_err(ndev, "Current BD is at: 0x%x\n",
- (priv->rx_bd_v[priv->rx_bd_ci]).phys);
+ netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys);
cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
/* Disable coalesce, delay timer and error interrupts */
@@ -735,10 +795,10 @@ static void nixge_dma_err_handler(unsigned long data)
tx_skb = &lp->tx_skb[i];
nixge_tx_skb_unmap(lp, tx_skb);
- cur_p->phys = 0;
+ nixge_hw_dma_bd_set_phys(cur_p, 0);
cur_p->cntrl = 0;
cur_p->status = 0;
- cur_p->sw_id_offset = 0;
+ nixge_hw_dma_bd_set_offset(cur_p, 0);
}
for (i = 0; i < RX_BD_NUM; i++) {
@@ -779,18 +839,18 @@ static void nixge_dma_err_handler(unsigned long data)
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
- nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ nixge_dma_write_desc_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
- nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ nixge_dma_write_desc_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
(sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting
*/
- nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ nixge_dma_write_desc_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 08381ef8bdb4..25382f8fbb70 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -19,34 +19,18 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/crc32.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
#include <linux/clk.h>
-#include <linux/workqueue.h>
-#include <linux/netdevice.h>
+#include <linux/crc32.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/phy.h>
-#include <linux/dma-mapping.h>
-#include <linux/of.h>
+#include <linux/module.h>
#include <linux/of_net.h>
-#include <linux/types.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
-#include <linux/io.h>
#include <mach/board.h>
-#include <mach/platform.h>
#include <mach/hardware.h>
+#include <mach/platform.h>
#define MODNAME "lpc-eth"
#define DRV_VERSION "1.00"
@@ -797,8 +781,7 @@ static int lpc_mii_probe(struct net_device *ndev)
return PTR_ERR(phydev);
}
- /* mask with MAC supported features */
- phydev->supported &= PHY_BASIC_FEATURES;
+ phy_set_max_speed(phydev, SPEED_100);
phydev->advertising = phydev->supported;
@@ -1258,18 +1241,19 @@ static const struct net_device_ops lpc_netdev_ops = {
static int lpc_eth_drv_probe(struct platform_device *pdev)
{
- struct resource *res;
- struct net_device *ndev;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct netdata_local *pldat;
- struct phy_device *phydev;
+ struct net_device *ndev;
dma_addr_t dma_handle;
+ struct resource *res;
int irq, ret;
u32 tmp;
/* Setup network interface for RMII or MII mode */
tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL);
tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK;
- if (lpc_phy_interface_mode(&pdev->dev) == PHY_INTERFACE_MODE_MII)
+ if (lpc_phy_interface_mode(dev) == PHY_INTERFACE_MODE_MII)
tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS;
else
tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS;
@@ -1279,7 +1263,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!res || irq < 0) {
- dev_err(&pdev->dev, "error getting resources.\n");
+ dev_err(dev, "error getting resources.\n");
ret = -ENXIO;
goto err_exit;
}
@@ -1287,12 +1271,12 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
/* Allocate net driver data structure */
ndev = alloc_etherdev(sizeof(struct netdata_local));
if (!ndev) {
- dev_err(&pdev->dev, "could not allocate device.\n");
+ dev_err(dev, "could not allocate device.\n");
ret = -ENOMEM;
goto err_exit;
}
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
pldat = netdev_priv(ndev);
pldat->pdev = pdev;
@@ -1304,9 +1288,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
ndev->irq = irq;
/* Get clock for the device */
- pldat->clk = clk_get(&pdev->dev, NULL);
+ pldat->clk = clk_get(dev, NULL);
if (IS_ERR(pldat->clk)) {
- dev_err(&pdev->dev, "error getting clock.\n");
+ dev_err(dev, "error getting clock.\n");
ret = PTR_ERR(pldat->clk);
goto err_out_free_dev;
}
@@ -1319,14 +1303,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
/* Map IO space */
pldat->net_base = ioremap(res->start, resource_size(res));
if (!pldat->net_base) {
- dev_err(&pdev->dev, "failed to map registers\n");
+ dev_err(dev, "failed to map registers\n");
ret = -ENOMEM;
goto err_out_disable_clocks;
}
ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
ndev->name, ndev);
if (ret) {
- dev_err(&pdev->dev, "error requesting interrupt.\n");
+ dev_err(dev, "error requesting interrupt.\n");
goto err_out_iounmap;
}
@@ -1340,7 +1324,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
pldat->dma_buff_base_v = 0;
- if (use_iram_for_net(&pldat->pdev->dev)) {
+ if (use_iram_for_net(dev)) {
dma_handle = LPC32XX_IRAM_BASE;
if (pldat->dma_buff_size <= lpc32xx_return_iram_size())
pldat->dma_buff_base_v =
@@ -1351,7 +1335,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
if (pldat->dma_buff_base_v == 0) {
- ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
goto err_out_free_irq;
@@ -1360,7 +1344,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
/* Allocate a chunk of memory for the DMA ethernet buffers
and descriptors */
pldat->dma_buff_base_v =
- dma_alloc_coherent(&pldat->pdev->dev,
+ dma_alloc_coherent(dev,
pldat->dma_buff_size, &dma_handle,
GFP_KERNEL);
if (pldat->dma_buff_base_v == NULL) {
@@ -1385,7 +1369,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
__lpc_get_mac(pldat, ndev->dev_addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
- const char *macaddr = of_get_mac_address(pdev->dev.of_node);
+ const char *macaddr = of_get_mac_address(np);
if (macaddr)
memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
}
@@ -1415,7 +1399,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
ret = register_netdev(ndev);
if (ret) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ dev_err(dev, "Cannot register net device, aborting.\n");
goto err_out_dma_unmap;
}
platform_set_drvdata(pdev, ndev);
@@ -1427,19 +1411,17 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
res->start, ndev->irq);
- phydev = ndev->phydev;
-
- device_init_wakeup(&pdev->dev, 1);
- device_set_wakeup_enable(&pdev->dev, 0);
+ device_init_wakeup(dev, 1);
+ device_set_wakeup_enable(dev, 0);
return 0;
err_out_unregister_netdev:
unregister_netdev(ndev);
err_out_dma_unmap:
- if (!use_iram_for_net(&pldat->pdev->dev) ||
+ if (!use_iram_for_net(dev) ||
pldat->dma_buff_size > lpc32xx_return_iram_size())
- dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
+ dma_free_coherent(dev, pldat->dma_buff_size,
pldat->dma_buff_base_v,
pldat->dma_buff_base_p);
err_out_free_irq:
@@ -1534,13 +1516,11 @@ static int lpc_eth_drv_resume(struct platform_device *pdev)
}
#endif
-#ifdef CONFIG_OF
static const struct of_device_id lpc_eth_match[] = {
{ .compatible = "nxp,lpc-eth" },
{ }
};
MODULE_DEVICE_TABLE(of, lpc_eth_match);
-#endif
static struct platform_driver lpc_eth_driver = {
.probe = lpc_eth_drv_probe,
@@ -1551,7 +1531,7 @@ static struct platform_driver lpc_eth_driver = {
#endif
.driver = {
.name = MODNAME,
- .of_match_table = of_match_ptr(lpc_eth_match),
+ .of_match_table = lpc_eth_match,
},
};
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 69aa7fc392c5..7d9819d80e44 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter,
work_func_t func, int delay);
static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
static int netxen_nic_poll(struct napi_struct *napi, int budget);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev);
-#endif
static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
@@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = {
.ndo_tx_timeout = netxen_tx_timeout,
.ndo_fix_features = netxen_fix_features,
.ndo_set_features = netxen_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = netxen_nic_poll_controller,
-#endif
};
static inline bool netxen_function_zero(struct pci_dev *pdev)
@@ -1790,11 +1784,6 @@ static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
-static void netxen_io_resume(struct pci_dev *pdev)
-{
- pci_cleanup_aer_uncorrect_error_status(pdev);
-}
-
static void netxen_nic_shutdown(struct pci_dev *pdev)
{
struct netxen_adapter *adapter = pci_get_drvdata(pdev);
@@ -2402,23 +2391,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
return work_done;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev)
-{
- int ring;
- struct nx_host_sds_ring *sds_ring;
- struct netxen_adapter *adapter = netdev_priv(netdev);
- struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
-
- disable_irq(adapter->irq);
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- netxen_intr(adapter->irq, sds_ring);
- }
- enable_irq(adapter->irq);
-}
-#endif
-
static int
nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
{
@@ -3488,7 +3460,6 @@ netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
static const struct pci_error_handlers netxen_err_handler = {
.error_detected = netxen_io_error_detected,
.slot_reset = netxen_io_slot_reset,
- .resume = netxen_io_resume,
};
static struct pci_driver netxen_driver = {
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index a60e1c8d470a..d9a03aba0e02 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -623,6 +623,7 @@ struct qed_hwfn {
void *unzip_buf;
struct dbg_tools_data dbg_info;
+ void *dbg_user_info;
/* PWM region specific data */
u16 wid_count;
@@ -914,7 +915,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
/* Prototypes */
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info);
-void qed_link_update(struct qed_hwfn *hwfn);
+void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index f1977aa440e5..dc1c1b616084 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -40,7 +40,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/bitops.h>
#include "qed.h"
#include "qed_cxt.h"
#include "qed_dev_api.h"
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 6bb76e6d3c14..8e8fa823d611 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -190,10 +190,8 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
static void
qed_dcbx_set_params(struct qed_dcbx_results *p_data,
- struct qed_hw_info *p_info,
- bool enable,
- u8 prio,
- u8 tc,
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type,
enum qed_pci_personality personality)
{
@@ -206,19 +204,30 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
else
p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
+ /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */
+ if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) ||
+ test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)))
+ p_data->arr[type].dont_add_vlan0 = true;
+
/* QM reconf data */
- if (p_info->personality == personality)
- qed_hw_info_set_offload_tc(p_info, tc);
+ if (p_hwfn->hw_info.personality == personality)
+ qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
+
+ /* Configure dcbx vlan priority in doorbell block for roce EDPM */
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
+ type == DCBX_PROTOCOL_ROCE) {
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
+ }
}
/* Update app protocol data and hw_info fields with the TLV info */
static void
qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
- struct qed_hwfn *p_hwfn,
- bool enable,
- u8 prio, u8 tc, enum dcbx_protocol_type type)
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool enable, u8 prio, u8 tc,
+ enum dcbx_protocol_type type)
{
- struct qed_hw_info *p_info = &p_hwfn->hw_info;
enum qed_pci_personality personality;
enum dcbx_protocol_type id;
int i;
@@ -231,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
personality = qed_dcbx_app_update[i].personality;
- qed_dcbx_set_params(p_data, p_info, enable,
+ qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
prio, tc, type, personality);
}
}
@@ -253,8 +262,9 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
*type = DCBX_PROTOCOL_ROCE_V2;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
- DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n",
- app_prio_bitmap);
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "No action required, App TLV entry = 0x%x\n",
+ app_prio_bitmap);
return false;
}
@@ -265,7 +275,7 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
static int
-qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_dcbx_results *p_data,
struct dcbx_app_priority_entry *p_tbl,
u32 pri_tc_tbl, int count, u8 dcbx_version)
@@ -309,7 +319,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
enable = true;
}
- qed_dcbx_update_app_info(p_data, p_hwfn, enable,
+ qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
priority, tc, type);
}
}
@@ -331,7 +341,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
continue;
enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
- qed_dcbx_update_app_info(p_data, p_hwfn, enable,
+ qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
priority, tc, type);
}
@@ -341,7 +351,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
/* Parse app TLV's to update TC information in hw_info structure for
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
-static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
+static int
+qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct dcbx_app_priority_feature *p_app;
struct dcbx_app_priority_entry *p_tbl;
@@ -365,7 +376,7 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
p_info = &p_hwfn->hw_info;
num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
- rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+ rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl,
num_entries, dcbx_version);
if (rc)
return rc;
@@ -891,7 +902,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
return rc;
if (type == QED_DCBX_OPERATIONAL_MIB) {
- rc = qed_dcbx_process_mib_info(p_hwfn);
+ rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt);
if (!rc) {
/* reconfigure tcs of QM queues according
* to negotiation results
@@ -954,6 +965,7 @@ static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
p_data->dcb_enable_flag = p_src->arr[type].enable;
p_data->dcb_priority = p_src->arr[type].priority;
p_data->dcb_tc = p_src->arr[type].tc;
+ p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
}
/* Set pf update ramrod command params */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index a4d688c04e18..01f253ea4b22 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -55,6 +55,7 @@ struct qed_dcbx_app_data {
u8 update; /* Update indication */
u8 priority; /* Priority */
u8 tc; /* Traffic Class */
+ bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */
};
#define QED_DCBX_VERSION_DISABLED 0
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 1aa9fc1c5890..78a638ec7c0a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -3454,7 +3454,8 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
SEM_FAST_REG_STORM_REG_FILE) +
IOR_SET_OFFSET(set_id);
- buf[strlen(buf) - 1] = '0' + set_id;
+ if (strlen(buf) > 0)
+ buf[strlen(buf) - 1] = '0' + set_id;
offset += qed_grc_dump_mem(p_hwfn,
p_ptt,
dump_buf + offset,
@@ -5563,35 +5564,6 @@ struct block_info {
enum block_id id;
};
-struct mcp_trace_format {
- u32 data;
-#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
-#define MCP_TRACE_FORMAT_MODULE_SHIFT 0
-#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
-#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
-#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
-#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
-#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
-#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
-#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
-#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
-#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
-#define MCP_TRACE_FORMAT_LEN_SHIFT 24
-
- char *format_str;
-};
-
-/* Meta data structure, generated by a perl script during MFW build. therefore,
- * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
- * script.
- */
-struct mcp_trace_meta {
- u32 modules_num;
- char **modules;
- u32 formats_num;
- struct mcp_trace_format *formats;
-};
-
/* REG fifo element */
struct reg_fifo_element {
u64 data;
@@ -5714,6 +5686,20 @@ struct igu_fifo_addr_data {
enum igu_fifo_addr_types type;
};
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+ bool is_allocated;
+};
+
+/* Debug Tools user data */
+struct dbg_tools_user_data {
+ struct mcp_trace_meta mcp_trace_meta;
+ const u32 *mcp_trace_user_meta_buf;
+};
+
/******************************** Constants **********************************/
#define MAX_MSG_LEN 1024
@@ -6137,15 +6123,6 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
/******************************** Variables **********************************/
-/* MCP Trace meta data array - used in case the dump doesn't contain the
- * meta data (e.g. due to no NVRAM access).
- */
-static struct user_dbg_array s_mcp_trace_meta_arr = { NULL, 0 };
-
-/* Parsed MCP Trace meta data info, based on MCP trace meta array */
-static struct mcp_trace_meta s_mcp_trace_meta;
-static bool s_mcp_trace_meta_valid;
-
/* Temporary buffer, used for print size calculations */
static char s_temp_buf[MAX_MSG_LEN];
@@ -6311,6 +6288,12 @@ static u32 qed_print_section_params(u32 *dump_buf,
return dump_offset;
}
+static struct dbg_tools_user_data *
+qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
+{
+ return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
+}
+
/* Parses the idle check rules and returns the number of characters printed.
* In case of parsing error, returns 0.
*/
@@ -6570,43 +6553,26 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
return DBG_STATUS_OK;
}
-/* Frees the specified MCP Trace meta data */
-static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
- struct mcp_trace_meta *meta)
-{
- u32 i;
-
- s_mcp_trace_meta_valid = false;
-
- /* Release modules */
- if (meta->modules) {
- for (i = 0; i < meta->modules_num; i++)
- kfree(meta->modules[i]);
- kfree(meta->modules);
- }
-
- /* Release formats */
- if (meta->formats) {
- for (i = 0; i < meta->formats_num; i++)
- kfree(meta->formats[i].format_str);
- kfree(meta->formats);
- }
-}
-
/* Allocates and fills MCP Trace meta data based on the specified meta data
* dump buffer.
* Returns debug status code.
*/
-static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
- const u32 *meta_buf,
- struct mcp_trace_meta *meta)
+static enum dbg_status
+qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf)
{
- u8 *meta_buf_bytes = (u8 *)meta_buf;
+ struct dbg_tools_user_data *dev_user_data;
u32 offset = 0, signature, i;
+ struct mcp_trace_meta *meta;
+ u8 *meta_buf_bytes;
+
+ dev_user_data = qed_dbg_get_user_data(p_hwfn);
+ meta = &dev_user_data->mcp_trace_meta;
+ meta_buf_bytes = (u8 *)meta_buf;
/* Free the previous meta before loading a new one. */
- if (s_mcp_trace_meta_valid)
- qed_mcp_trace_free_meta(p_hwfn, meta);
+ if (meta->is_allocated)
+ qed_mcp_trace_free_meta_data(p_hwfn);
memset(meta, 0, sizeof(*meta));
@@ -6674,7 +6640,7 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
format_len, format_ptr->format_str);
}
- s_mcp_trace_meta_valid = true;
+ meta->is_allocated = true;
return DBG_STATUS_OK;
}
@@ -6687,21 +6653,26 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
* buffer.
* data_size - size in bytes of data to parse.
* parsed_buf - destination buffer for parsed data.
- * parsed_bytes - size of parsed data in bytes.
+ * parsed_results_bytes - size of parsed data in bytes.
*/
-static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
+static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
+ u8 *trace_buf,
u32 trace_buf_size,
u32 data_offset,
u32 data_size,
char *parsed_buf,
- u32 *parsed_bytes)
+ u32 *parsed_results_bytes)
{
+ struct dbg_tools_user_data *dev_user_data;
+ struct mcp_trace_meta *meta;
u32 param_mask, param_shift;
enum dbg_status status;
- *parsed_bytes = 0;
+ dev_user_data = qed_dbg_get_user_data(p_hwfn);
+ meta = &dev_user_data->mcp_trace_meta;
+ *parsed_results_bytes = 0;
- if (!s_mcp_trace_meta_valid)
+ if (!meta->is_allocated)
return DBG_STATUS_MCP_TRACE_BAD_DATA;
status = DBG_STATUS_OK;
@@ -6723,7 +6694,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
format_idx = header & MFW_TRACE_EVENTID_MASK;
/* Skip message if its index doesn't exist in the meta data */
- if (format_idx >= s_mcp_trace_meta.formats_num) {
+ if (format_idx >= meta->formats_num) {
u8 format_size =
(u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
MFW_TRACE_PRM_SIZE_SHIFT);
@@ -6738,7 +6709,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
continue;
}
- format_ptr = &s_mcp_trace_meta.formats[format_idx];
+ format_ptr = &meta->formats[format_idx];
for (i = 0,
param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
@@ -6783,19 +6754,20 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
return DBG_STATUS_MCP_TRACE_BAD_DATA;
/* Print current message to results buffer */
- *parsed_bytes +=
- sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes),
+ *parsed_results_bytes +=
+ sprintf(qed_get_buf_ptr(parsed_buf,
+ *parsed_results_bytes),
"%s %-8s: ",
s_mcp_trace_level_str[format_level],
- s_mcp_trace_meta.modules[format_module]);
- *parsed_bytes +=
- sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes),
+ meta->modules[format_module]);
+ *parsed_results_bytes +=
+ sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
format_ptr->format_str,
params[0], params[1], params[2]);
}
/* Add string NULL terminator */
- (*parsed_bytes)++;
+ (*parsed_results_bytes)++;
return status;
}
@@ -6803,24 +6775,25 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
/* Parses an MCP Trace dump buffer.
* If result_buf is not NULL, the MCP Trace results are printed to it.
* In any case, the required results buffer size is assigned to
- * parsed_bytes.
+ * parsed_results_bytes.
* The parsing status is returned.
*/
static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
- char *parsed_buf,
- u32 *parsed_bytes)
+ char *results_buf,
+ u32 *parsed_results_bytes,
+ bool free_meta_data)
{
const char *section_name, *param_name, *param_str_val;
u32 data_size, trace_data_dwords, trace_meta_dwords;
- u32 offset, results_offset, parsed_buf_bytes;
+ u32 offset, results_offset, results_buf_bytes;
u32 param_num_val, num_section_params;
struct mcp_trace *trace;
enum dbg_status status;
const u32 *meta_buf;
u8 *trace_buf;
- *parsed_bytes = 0;
+ *parsed_results_bytes = 0;
/* Read global_params section */
dump_buf += qed_read_section_hdr(dump_buf,
@@ -6831,7 +6804,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* Print global params */
dump_buf += qed_print_section_params(dump_buf,
num_section_params,
- parsed_buf, &results_offset);
+ results_buf, &results_offset);
/* Read trace_data section */
dump_buf += qed_read_section_hdr(dump_buf,
@@ -6846,6 +6819,9 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* Prepare trace info */
trace = (struct mcp_trace *)dump_buf;
+ if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
trace_buf = (u8 *)dump_buf + sizeof(*trace);
offset = trace->trace_oldest;
data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
@@ -6865,31 +6841,39 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* Choose meta data buffer */
if (!trace_meta_dwords) {
/* Dump doesn't include meta data */
- if (!s_mcp_trace_meta_arr.ptr)
+ struct dbg_tools_user_data *dev_user_data =
+ qed_dbg_get_user_data(p_hwfn);
+
+ if (!dev_user_data->mcp_trace_user_meta_buf)
return DBG_STATUS_MCP_TRACE_NO_META;
- meta_buf = s_mcp_trace_meta_arr.ptr;
+
+ meta_buf = dev_user_data->mcp_trace_user_meta_buf;
} else {
/* Dump includes meta data */
meta_buf = dump_buf;
}
/* Allocate meta data memory */
- status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &s_mcp_trace_meta);
+ status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
if (status != DBG_STATUS_OK)
return status;
- status = qed_parse_mcp_trace_buf(trace_buf,
+ status = qed_parse_mcp_trace_buf(p_hwfn,
+ trace_buf,
trace->size,
offset,
data_size,
- parsed_buf ?
- parsed_buf + results_offset :
+ results_buf ?
+ results_buf + results_offset :
NULL,
- &parsed_buf_bytes);
+ &results_buf_bytes);
if (status != DBG_STATUS_OK)
return status;
- *parsed_bytes = results_offset + parsed_buf_bytes;
+ if (free_meta_data)
+ qed_mcp_trace_free_meta_data(p_hwfn);
+
+ *parsed_results_bytes = results_offset + results_buf_bytes;
return DBG_STATUS_OK;
}
@@ -7361,6 +7345,16 @@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
return DBG_STATUS_OK;
}
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn)
+{
+ p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data),
+ GFP_KERNEL);
+ if (!p_hwfn->dbg_user_info)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ return DBG_STATUS_OK;
+}
+
const char *qed_dbg_get_status_str(enum dbg_status status)
{
return (status <
@@ -7397,10 +7391,13 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
num_errors, num_warnings);
}
-void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
+void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf)
{
- s_mcp_trace_meta_arr.ptr = data;
- s_mcp_trace_meta_arr.size_in_dwords = size;
+ struct dbg_tools_user_data *dev_user_data =
+ qed_dbg_get_user_data(p_hwfn);
+
+ dev_user_data->mcp_trace_user_meta_buf = meta_buf;
}
enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
@@ -7409,7 +7406,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *results_buf_size)
{
return qed_parse_mcp_trace_dump(p_hwfn,
- dump_buf, NULL, results_buf_size);
+ dump_buf, NULL, results_buf_size, true);
}
enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
@@ -7421,20 +7418,61 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
return qed_parse_mcp_trace_dump(p_hwfn,
dump_buf,
- results_buf, &parsed_buf_size);
+ results_buf, &parsed_buf_size, true);
+}
+
+enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
+ &parsed_buf_size, false);
}
-enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf,
+enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
+ u8 *dump_buf,
u32 num_dumped_bytes,
char *results_buf)
{
- u32 parsed_bytes;
+ u32 parsed_results_bytes;
- return qed_parse_mcp_trace_buf(dump_buf,
+ return qed_parse_mcp_trace_buf(p_hwfn,
+ dump_buf,
num_dumped_bytes,
0,
num_dumped_bytes,
- results_buf, &parsed_bytes);
+ results_buf, &parsed_results_bytes);
+}
+
+/* Frees the specified MCP Trace meta data */
+void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_user_data *dev_user_data;
+ struct mcp_trace_meta *meta;
+ u32 i;
+
+ dev_user_data = qed_dbg_get_user_data(p_hwfn);
+ meta = &dev_user_data->mcp_trace_meta;
+ if (!meta->is_allocated)
+ return;
+
+ /* Release modules */
+ if (meta->modules) {
+ for (i = 0; i < meta->modules_num; i++)
+ kfree(meta->modules[i]);
+ kfree(meta->modules);
+ }
+
+ /* Release formats */
+ if (meta->formats) {
+ for (i = 0; i < meta->formats_num; i++)
+ kfree(meta->formats[i].format_str);
+ kfree(meta->formats);
+ }
+
+ meta->is_allocated = false;
}
enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 016ca8a7ec8a..7ceb2b97538d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -144,6 +144,12 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
qm_info->wfq_data = NULL;
}
+static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->dbg_user_info);
+ p_hwfn->dbg_user_info = NULL;
+}
+
void qed_resc_free(struct qed_dev *cdev)
{
int i;
@@ -183,6 +189,7 @@ void qed_resc_free(struct qed_dev *cdev)
qed_l2_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
+ qed_dbg_user_data_free(p_hwfn);
}
}
@@ -1083,6 +1090,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
rc = qed_dcbx_info_alloc(p_hwfn);
if (rc)
goto alloc_err;
+
+ rc = qed_dbg_alloc_user_data(p_hwfn);
+ if (rc)
+ goto alloc_err;
}
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
@@ -1706,7 +1717,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn,
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
{
struct qed_load_req_params load_req_params;
- u32 load_code, param, drv_mb_param;
+ u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
int rc = 0, mfw_rc, i;
@@ -1852,6 +1863,19 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
if (IS_PF(cdev)) {
p_hwfn = QED_LEADING_HWFN(cdev);
+
+ /* Get pre-negotiated values for stag, bandwidth etc. */
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET;
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_GET_OEM_UPDATES,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to send GET_OEM_UPDATES attention request\n");
+
drv_mb_param = STORM_FW_VERSION;
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
@@ -2655,6 +2679,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
link->speed.forced_speed = 10000;
break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_20G:
+ link->speed.forced_speed = 20000;
+ break;
case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
link->speed.forced_speed = 25000;
break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 8faceb691657..5c221ebaa7b3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -274,7 +274,8 @@ struct core_rx_start_ramrod_data {
u8 mf_si_mcast_accept_all;
struct core_rx_action_on_error action_on_error;
u8 gsi_offload_flag;
- u8 reserved[6];
+ u8 wipe_inner_vlan_pri_en;
+ u8 reserved[5];
};
/* Ramrod data for rx queue stop ramrod */
@@ -351,7 +352,8 @@ struct core_tx_start_ramrod_data {
__le16 pbl_size;
__le16 qm_pq_id;
u8 gsi_offload_flag;
- u8 resrved[3];
+ u8 vport_id;
+ u8 resrved[2];
};
/* Ramrod data for tx queue stop ramrod */
@@ -914,6 +916,16 @@ struct eth_rx_rate_limit {
__le16 reserved1;
};
+/* Update RSS indirection table entry command */
+struct eth_tstorm_rss_update_data {
+ u8 valid;
+ u8 vport_id;
+ u8 ind_table_index;
+ u8 reserved;
+ __le16 ind_table_value;
+ __le16 reserved1;
+};
+
struct eth_ustorm_per_pf_stat {
struct regpair rcv_lb_ucast_bytes;
struct regpair rcv_lb_mcast_bytes;
@@ -1241,6 +1253,10 @@ struct rl_update_ramrod_data {
u8 rl_id_first;
u8 rl_id_last;
u8 rl_dc_qcn_flg;
+ u8 dcqcn_reset_alpha_on_idle;
+ u8 rl_bc_stage_th;
+ u8 rl_timer_stage_th;
+ u8 reserved1;
__le32 rl_bc_rate;
__le16 rl_max_rate;
__le16 rl_r_ai;
@@ -1249,7 +1265,7 @@ struct rl_update_ramrod_data {
__le32 dcqcn_k_us;
__le32 dcqcn_timeuot_us;
__le32 qcn_timeuot_us;
- __le32 reserved[2];
+ __le32 reserved2;
};
/* Slowpath Element (SPQE) */
@@ -3322,6 +3338,25 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
struct dbg_attn_block_result *results);
+/******************************* Data Types **********************************/
+
+struct mcp_trace_format {
+ u32 data;
+#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
+#define MCP_TRACE_FORMAT_MODULE_SHIFT 0
+#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
+#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
+#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
+#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
+#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
+#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
+#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
+#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
+#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
+#define MCP_TRACE_FORMAT_LEN_SHIFT 24
+ char *format_str;
+};
+
/******************************** Constants **********************************/
#define MAX_NAME_LEN 16
@@ -3337,6 +3372,13 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
/**
+ * @brief qed_dbg_alloc_user_data - Allocates user debug data.
+ *
+ * @param p_hwfn - HW device data
+ */
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn);
+
+/**
* @brief qed_dbg_get_status_str - Returns a string for the specified status.
*
* @param status - a debug status code.
@@ -3381,8 +3423,7 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
u32 *num_warnings);
/**
- * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace
- * meta data.
+ * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data.
*
* Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
* no NVRAM access).
@@ -3390,7 +3431,8 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
* @param data - pointer to MCP Trace meta data
* @param size - size of MCP Trace meta data in dwords
*/
-void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size);
+void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf);
/**
* @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
@@ -3425,19 +3467,45 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
char *results_buf);
/**
+ * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and
+ * keeps the MCP trace meta data allocated, to support continuous MCP Trace
+ * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
+ * be called to free the meta data.
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - mcp trace dump buffer, starting from the header.
+ * @param results_buf - buffer for printing the mcp trace results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ char *results_buf);
+
+/**
* @brief print_mcp_trace_line - Prints MCP Trace results for a single line
*
+ * @param p_hwfn - HW device data
* @param dump_buf - mcp trace dump buffer, starting from the header.
* @param num_dumped_bytes - number of bytes that were dumped.
* @param results_buf - buffer for printing the mcp trace results.
*
* @return error if the parsing fails, ok otherwise.
*/
-enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf,
+enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
+ u8 *dump_buf,
u32 num_dumped_bytes,
char *results_buf);
/**
+ * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data.
+ * Should be called after continuous MCP Trace parsing.
+ *
+ * @param p_hwfn - HW device data
+ */
+void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
+
+/**
* @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
* for reg_fifo results (in bytes).
*
@@ -4303,154 +4371,161 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
(IRO[29].base + ((pf_id) * IRO[29].m1))
#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
+ * Use eth_tstorm_rss_update_data for update.
+ */
+#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \
+ (IRO[30].base + ((pf_id) * IRO[30].m1))
+#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size)
+
/* Xstorm queue zone */
#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[30].base + ((queue_id) * IRO[30].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+ (IRO[31].base + ((queue_id) * IRO[31].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size)
/* Ystorm cqe producer */
#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[31].base + ((rss_id) * IRO[31].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size)
+ (IRO[32].base + ((rss_id) * IRO[32].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
/* Ustorm cqe producer */
#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[32].base + ((rss_id) * IRO[32].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+ (IRO[33].base + ((rss_id) * IRO[33].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size)
/* Ustorm grq producer */
#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
- (IRO[33].base + ((pf_id) * IRO[33].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size)
+ (IRO[34].base + ((pf_id) * IRO[34].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size)
/* Tstorm cmdq-cons of given command queue-id */
#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+ (IRO[35].base + ((cmdq_queue_id) * IRO[35].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size)
/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
* BDqueue-id.
*/
#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+ (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+ (IRO[37].base + ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size)
/* Tstorm iSCSI RX stats */
#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[37].base + ((pf_id) * IRO[37].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+ (IRO[38].base + ((pf_id) * IRO[38].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
/* Mstorm iSCSI RX stats */
#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[38].base + ((pf_id) * IRO[38].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+ (IRO[39].base + ((pf_id) * IRO[39].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
/* Ustorm iSCSI RX stats */
#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[39].base + ((pf_id) * IRO[39].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+ (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size)
/* Xstorm iSCSI TX stats */
#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[40].base + ((pf_id) * IRO[40].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
/* Ystorm iSCSI TX stats */
#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+ (IRO[42].base + ((pf_id) * IRO[42].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
/* Pstorm iSCSI TX stats */
#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[42].base + ((pf_id) * IRO[42].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+ (IRO[43].base + ((pf_id) * IRO[43].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size)
/* Tstorm FCoE RX stats */
#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[43].base + ((pf_id) * IRO[43].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size)
+ (IRO[44].base + ((pf_id) * IRO[44].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size)
/* Pstorm FCoE TX stats */
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
- (IRO[44].base + ((pf_id) * IRO[44].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size)
+ (IRO[45].base + ((pf_id) * IRO[45].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size)
/* Pstorm RDMA queue statistics */
#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
+ (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
/* Tstorm RDMA queue statistics */
#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+ (IRO[47].base + ((rdma_stat_counter_id) * IRO[47].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size)
/* Xstorm error level for assert */
#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[47].base + ((pf_id) * IRO[47].m1))
-#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size)
+ (IRO[48].base + ((pf_id) * IRO[48].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
/* Ystorm error level for assert */
#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[48].base + ((pf_id) * IRO[48].m1))
-#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
+ (IRO[49].base + ((pf_id) * IRO[49].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
/* Pstorm error level for assert */
#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[49].base + ((pf_id) * IRO[49].m1))
-#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
+ (IRO[50].base + ((pf_id) * IRO[50].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
/* Tstorm error level for assert */
#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[50].base + ((pf_id) * IRO[50].m1))
-#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
+ (IRO[51].base + ((pf_id) * IRO[51].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
/* Mstorm error level for assert */
#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[51].base + ((pf_id) * IRO[51].m1))
-#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
+ (IRO[52].base + ((pf_id) * IRO[52].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
/* Ustorm error level for assert */
#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[52].base + ((pf_id) * IRO[52].m1))
-#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
+ (IRO[53].base + ((pf_id) * IRO[53].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size)
/* Xstorm iWARP rxmit stats */
#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
- (IRO[53].base + ((pf_id) * IRO[53].m1))
-#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size)
+ (IRO[54].base + ((pf_id) * IRO[54].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size)
/* Tstorm RoCE Event Statistics */
#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
- (IRO[54].base + ((roce_pf_id) * IRO[54].m1))
-#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size)
+ (IRO[55].base + ((roce_pf_id) * IRO[55].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size)
/* DCQCN Received Statistics */
#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
- (IRO[55].base + ((roce_pf_id) * IRO[55].m1))
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size)
+ (IRO[56].base + ((roce_pf_id) * IRO[56].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size)
/* RoCE Error Statistics */
#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
- (IRO[56].base + ((roce_pf_id) * IRO[56].m1))
-#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size)
+ (IRO[57].base + ((roce_pf_id) * IRO[57].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size)
/* DCQCN Sent Statistics */
#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
- (IRO[57].base + ((roce_pf_id) * IRO[57].m1))
-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size)
+ (IRO[58].base + ((roce_pf_id) * IRO[58].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size)
/* RoCE CQEs Statistics */
#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
- (IRO[58].base + ((roce_pf_id) * IRO[58].m1))
-#define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size)
+ (IRO[59].base + ((roce_pf_id) * IRO[59].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size)
-static const struct iro iro_arr[59] = {
+static const struct iro iro_arr[60] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
{0x4cb8, 0x88, 0x0, 0x0, 0x88},
{0x6530, 0x20, 0x0, 0x0, 0x20},
@@ -4461,14 +4536,14 @@ static const struct iro iro_arr[59] = {
{0x84, 0x8, 0x0, 0x0, 0x2},
{0x4c48, 0x0, 0x0, 0x0, 0x78},
{0x3e38, 0x0, 0x0, 0x0, 0x78},
- {0x2b78, 0x0, 0x0, 0x0, 0x78},
+ {0x3ef8, 0x0, 0x0, 0x0, 0x78},
{0x4c40, 0x0, 0x0, 0x0, 0x78},
{0x4998, 0x0, 0x0, 0x0, 0x78},
{0x7f50, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
{0x6210, 0x10, 0x0, 0x0, 0x10},
{0xb820, 0x30, 0x0, 0x0, 0x30},
- {0x96c0, 0x30, 0x0, 0x0, 0x30},
+ {0xa990, 0x30, 0x0, 0x0, 0x30},
{0x4b68, 0x80, 0x0, 0x0, 0x40},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
{0x53a8, 0x80, 0x4, 0x0, 0x4},
@@ -4476,11 +4551,12 @@ static const struct iro iro_arr[59] = {
{0x4ba8, 0x80, 0x0, 0x0, 0x20},
{0x8158, 0x40, 0x0, 0x0, 0x30},
{0xe770, 0x60, 0x0, 0x0, 0x60},
- {0x2d10, 0x80, 0x0, 0x0, 0x38},
- {0xf2b8, 0x78, 0x0, 0x0, 0x78},
+ {0x4090, 0x80, 0x0, 0x0, 0x38},
+ {0xfea8, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
{0xaf20, 0x0, 0x0, 0x0, 0xf0},
{0xb010, 0x8, 0x0, 0x0, 0x8},
+ {0xc00, 0x8, 0x0, 0x0, 0x8},
{0x1f8, 0x8, 0x0, 0x0, 0x8},
{0xac0, 0x8, 0x0, 0x0, 0x8},
{0x2578, 0x8, 0x0, 0x0, 0x8},
@@ -4492,23 +4568,23 @@ static const struct iro iro_arr[59] = {
{0x12908, 0x18, 0x0, 0x0, 0x10},
{0x11aa8, 0x40, 0x0, 0x0, 0x18},
{0xa588, 0x50, 0x0, 0x0, 0x20},
- {0x8700, 0x40, 0x0, 0x0, 0x28},
- {0x10300, 0x18, 0x0, 0x0, 0x10},
+ {0x8f00, 0x40, 0x0, 0x0, 0x28},
+ {0x10e30, 0x18, 0x0, 0x0, 0x10},
{0xde48, 0x48, 0x0, 0x0, 0x38},
- {0x10768, 0x20, 0x0, 0x0, 0x20},
- {0x2d48, 0x80, 0x0, 0x0, 0x10},
+ {0x11298, 0x20, 0x0, 0x0, 0x20},
+ {0x40c8, 0x80, 0x0, 0x0, 0x10},
{0x5048, 0x10, 0x0, 0x0, 0x10},
{0xc748, 0x8, 0x0, 0x0, 0x1},
- {0xa128, 0x8, 0x0, 0x0, 0x1},
- {0x10f00, 0x8, 0x0, 0x0, 0x1},
+ {0xa928, 0x8, 0x0, 0x0, 0x1},
+ {0x11a30, 0x8, 0x0, 0x0, 0x1},
{0xf030, 0x8, 0x0, 0x0, 0x1},
{0x13028, 0x8, 0x0, 0x0, 0x1},
{0x12c58, 0x8, 0x0, 0x0, 0x1},
{0xc9b8, 0x30, 0x0, 0x0, 0x10},
{0xed90, 0x28, 0x0, 0x0, 0x28},
- {0xa520, 0x18, 0x0, 0x0, 0x18},
- {0xa6a0, 0x8, 0x0, 0x0, 0x8},
- {0x13108, 0x8, 0x0, 0x0, 0x8},
+ {0xad20, 0x18, 0x0, 0x0, 0x18},
+ {0xaea0, 0x8, 0x0, 0x0, 0x8},
+ {0x13c38, 0x8, 0x0, 0x0, 0x8},
{0x13c50, 0x18, 0x0, 0x0, 0x18},
};
@@ -5661,6 +5737,14 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE
};
+/* inner to inner vlan priority translation configurations */
+struct eth_in_to_in_pri_map_cfg {
+ u8 inner_vlan_pri_remap_en;
+ u8 reserved[7];
+ u8 non_rdma_in_to_in_pri_map[8];
+ u8 rdma_in_to_in_pri_map[8];
+};
+
/* Eth IPv4 Fragment Type */
enum eth_ipv4_frag_type {
ETH_IPV4_NOT_FRAG,
@@ -6018,6 +6102,14 @@ struct tx_queue_update_ramrod_data {
struct regpair reserved1[5];
};
+/* Inner to Inner VLAN priority map update mode */
+enum update_in_to_in_pri_map_mode_enum {
+ ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED,
+ ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL,
+ ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL,
+ MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM
+};
+
/* Ramrod data for vport update ramrod */
struct vport_filter_update_ramrod_data {
struct eth_filter_cmd_header filter_cmd_hdr;
@@ -6048,7 +6140,8 @@ struct vport_start_ramrod_data {
u8 zero_placement_offset;
u8 ctl_frame_mac_check_en;
u8 ctl_frame_ethtype_check_en;
- u8 reserved[1];
+ u8 wipe_inner_vlan_pri_en;
+ struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg;
};
/* Ramrod data for vport stop ramrod */
@@ -6100,7 +6193,9 @@ struct vport_update_ramrod_data_cmn {
u8 update_ctl_frame_checks_en_flg;
u8 ctl_frame_mac_check_en;
u8 ctl_frame_ethtype_check_en;
- u8 reserved[15];
+ u8 update_in_to_in_pri_map_mode;
+ u8 in_to_in_pri_map[8];
+ u8 reserved[6];
};
struct vport_update_ramrod_mcast {
@@ -6929,11 +7024,6 @@ struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
-/* The roce task context of Ustorm */
-struct ustorm_rdma_task_st_ctx {
- struct regpair temp[2];
-};
-
struct e4_ustorm_rdma_task_ag_ctx {
u8 reserved;
u8 state;
@@ -7007,8 +7097,6 @@ struct e4_rdma_task_context {
struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
struct mstorm_rdma_task_st_ctx mstorm_st_context;
struct rdif_task_context rdif_context;
- struct ustorm_rdma_task_st_ctx ustorm_st_context;
- struct regpair ustorm_st_padding[2];
struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
};
@@ -7388,7 +7476,7 @@ struct e4_ustorm_rdma_conn_ag_ctx {
#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
- u8 byte3;
+ u8 nvmf_only;
__le16 conn_dpi;
__le16 word1;
__le32 cq_cons;
@@ -7831,7 +7919,12 @@ struct roce_create_qp_req_ramrod_data {
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
u8 stats_counter_id;
- u8 reserved3[7];
+ u8 reserved3[6];
+ u8 flags2;
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x7F
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 1
__le16 regular_latency_phy_queue;
__le16 dpi;
};
@@ -7954,6 +8047,7 @@ enum roce_event_opcode {
ROCE_EVENT_DESTROY_QP,
ROCE_EVENT_CREATE_UD_QP,
ROCE_EVENT_DESTROY_UD_QP,
+ ROCE_EVENT_FUNC_UPDATE,
MAX_ROCE_EVENT_OPCODE
};
@@ -7962,7 +8056,13 @@ struct roce_init_func_params {
u8 ll2_queue_id;
u8 cnp_vlan_priority;
u8 cnp_dscp;
- u8 reserved;
+ u8 flags;
+#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1
+#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0
+#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1
+#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1
+#define ROCE_INIT_FUNC_PARAMS_RESERVED0_MASK 0x3F
+#define ROCE_INIT_FUNC_PARAMS_RESERVED0_SHIFT 2
__le32 cnp_send_timeout;
__le16 rl_offset;
u8 rl_count_log;
@@ -8109,9 +8209,24 @@ enum roce_ramrod_cmd_id {
ROCE_RAMROD_DESTROY_QP,
ROCE_RAMROD_CREATE_UD_QP,
ROCE_RAMROD_DESTROY_UD_QP,
+ ROCE_RAMROD_FUNC_UPDATE,
MAX_ROCE_RAMROD_CMD_ID
};
+/* RoCE func init ramrod data */
+struct roce_update_func_params {
+ u8 cnp_vlan_priority;
+ u8 cnp_dscp;
+ __le16 flags;
+#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1
+#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0
+#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1
+#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1
+#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_MASK 0x3FFF
+#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_SHIFT 2
+ __le32 cnp_send_timeout;
+};
+
struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
u8 reserved0;
u8 state;
@@ -11987,6 +12102,7 @@ struct public_global {
u32 running_bundle_id;
s32 external_temperature;
u32 mdump_reason;
+ u64 reserved;
u32 data_ptr;
u32 data_size;
};
@@ -12091,11 +12207,56 @@ struct public_port {
u32 transceiver_data;
#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-
+#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
+#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8
+#define ETH_TRANSCEIVER_TYPE_NONE 0x00
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xFF
+#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
+#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
+#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
+#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
+#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
+#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
+#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
+#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
+#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
+#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
+#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
+#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
+#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
+#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
+#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
+#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
u32 wol_info;
u32 wol_pkt_len;
u32 wol_pkt_details;
@@ -12160,7 +12321,7 @@ struct public_func {
#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
u32 status;
-#define FUNC_STATUS_VLINK_DOWN 0x00000001
+#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
u32 mac_upper;
#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
@@ -12414,6 +12575,7 @@ struct public_drv_mb {
#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
+#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
@@ -12541,6 +12703,9 @@ struct public_drv_mb {
#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
+
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
@@ -12578,6 +12743,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
@@ -12630,6 +12796,7 @@ struct public_drv_mb {
/* get MFW feature support response */
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
+#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
@@ -13035,6 +13202,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@@ -13045,6 +13213,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
@@ -13075,6 +13244,13 @@ struct nvm_cfg1_port {
u32 transceiver_00;
u32 device_ids;
u32 board_cfg;
+#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF
+#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
+#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
+#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
+#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
u32 mnm_10g_cap;
u32 mnm_10g_ctrl;
u32 mnm_10g_misc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index af3a28ec04eb..0f0aba793352 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -228,7 +228,7 @@ static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
(GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
- QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
+ QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
out:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 17f3dfa2cc94..beb8e5d6401a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -935,9 +935,8 @@ qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
}
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
- list_del(&ep->list_entry);
- list_add_tail(&ep->list_entry,
- &p_hwfn->p_rdma_info->iwarp.ep_free_list);
+ list_move_tail(&ep->list_entry,
+ &p_hwfn->p_rdma_info->iwarp.ep_free_list);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
}
@@ -1710,7 +1709,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
cm_info->local_ip[0] = ntohl(iph->daddr);
cm_info->remote_ip[0] = ntohl(iph->saddr);
- cm_info->ip_version = TCP_IPV4;
+ cm_info->ip_version = QED_TCP_IPV4;
ip_hlen = (iph->ihl) * sizeof(u32);
*payload_len = ntohs(iph->tot_len) - ip_hlen;
@@ -1730,7 +1729,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
cm_info->remote_ip[i] =
ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
}
- cm_info->ip_version = TCP_IPV6;
+ cm_info->ip_version = QED_TCP_IPV6;
ip_hlen = sizeof(*ip6h);
*payload_len = ntohs(ip6h->payload_len);
@@ -2270,8 +2269,8 @@ static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
if (rc == -EBUSY)
break;
- list_del(&mpa_buf->list_entry);
- list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list);
+ list_move_tail(&mpa_buf->list_entry,
+ &iwarp_info->mpa_buf_list);
if (rc) { /* different error, don't continue */
DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 14ac9cab2653..aa633381aa47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -63,8 +63,8 @@
#include "qed_sp.h"
#include "qed_rdma.h"
-#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
-#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
+#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered)
+#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered)
#define QED_LL2_TX_SIZE (256)
#define QED_LL2_RX_SIZE (4096)
@@ -796,7 +796,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
tx_pkt.vlan = p_buffer->vlan;
tx_pkt.bd_flags = bd_flags;
tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
- tx_pkt.tx_dest = p_ll2_conn->tx_dest;
+ switch (p_ll2_conn->tx_dest) {
+ case CORE_TX_DEST_NW:
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_NW;
+ break;
+ case CORE_TX_DEST_LB:
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+ break;
+ case CORE_TX_DEST_DROP:
+ default:
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
+ break;
+ }
tx_pkt.first_frag = first_frag;
tx_pkt.first_frag_len = p_buffer->packet_length;
tx_pkt.cookie = p_buffer;
@@ -1404,7 +1415,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
&p_hwfn->p_ll2_info[i],
&p_ll2_info->rx_queue.rx_sb_index,
&p_ll2_info->rx_queue.p_fw_cons);
- p_ll2_info->rx_queue.b_cb_registred = true;
+ p_ll2_info->rx_queue.b_cb_registered = true;
}
if (data->input.tx_num_desc) {
@@ -1413,7 +1424,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
&p_hwfn->p_ll2_info[i],
&p_ll2_info->tx_queue.tx_sb_index,
&p_ll2_info->tx_queue.p_fw_cons);
- p_ll2_info->tx_queue.b_cb_registred = true;
+ p_ll2_info->tx_queue.b_cb_registered = true;
}
*data->p_connection_handle = i;
@@ -1929,7 +1940,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
/* Stop Tx & Rx of connection, if needed */
if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
- p_ll2_conn->tx_queue.b_cb_registred = false;
+ p_ll2_conn->tx_queue.b_cb_registered = false;
smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
@@ -1940,7 +1951,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
}
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
- p_ll2_conn->rx_queue.b_cb_registred = false;
+ p_ll2_conn->rx_queue.b_cb_registered = false;
smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index f65817012e97..1a5c1ae01474 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -79,7 +79,7 @@ struct qed_ll2_rx_queue {
struct qed_chain rxq_chain;
struct qed_chain rcq_chain;
u8 rx_sb_index;
- bool b_cb_registred;
+ bool b_cb_registered;
__le16 *p_fw_cons;
struct list_head active_descq;
struct list_head free_descq;
@@ -93,7 +93,7 @@ struct qed_ll2_tx_queue {
spinlock_t lock;
struct qed_chain txq_chain;
u8 tx_sb_index;
- bool b_cb_registred;
+ bool b_cb_registered;
__le16 *p_fw_cons;
struct list_head active_descq;
struct list_head free_descq;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 2094d86a7a08..35fd0db6a677 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -58,6 +58,7 @@
#include "qed_iscsi.h"
#include "qed_mcp.h"
+#include "qed_reg_addr.h"
#include "qed_hw.h"
#include "qed_selftest.h"
#include "qed_debug.h"
@@ -1304,6 +1305,7 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
struct qed_hwfn *hwfn;
struct qed_mcp_link_params *link_params;
struct qed_ptt *ptt;
+ u32 sup_caps;
int rc;
if (!cdev)
@@ -1330,23 +1332,50 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
link_params->speed.autoneg = params->autoneg;
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
link_params->speed.advertised_speeds = 0;
- if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
- (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
+ sup_caps = QED_LM_1000baseT_Full_BIT |
+ QED_LM_1000baseKX_Full_BIT |
+ QED_LM_1000baseX_Full_BIT;
+ if (params->adv_speeds & sup_caps)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
- if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
+ sup_caps = QED_LM_10000baseT_Full_BIT |
+ QED_LM_10000baseKR_Full_BIT |
+ QED_LM_10000baseKX4_Full_BIT |
+ QED_LM_10000baseR_FEC_BIT |
+ QED_LM_10000baseCR_Full_BIT |
+ QED_LM_10000baseSR_Full_BIT |
+ QED_LM_10000baseLR_Full_BIT |
+ QED_LM_10000baseLRM_Full_BIT;
+ if (params->adv_speeds & sup_caps)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
- if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
+ if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
+ link_params->speed.advertised_speeds |=
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
+ sup_caps = QED_LM_25000baseKR_Full_BIT |
+ QED_LM_25000baseCR_Full_BIT |
+ QED_LM_25000baseSR_Full_BIT;
+ if (params->adv_speeds & sup_caps)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
- if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
+ sup_caps = QED_LM_40000baseLR4_Full_BIT |
+ QED_LM_40000baseKR4_Full_BIT |
+ QED_LM_40000baseCR4_Full_BIT |
+ QED_LM_40000baseSR4_Full_BIT;
+ if (params->adv_speeds & sup_caps)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
- if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ sup_caps = QED_LM_50000baseKR2_Full_BIT |
+ QED_LM_50000baseCR2_Full_BIT |
+ QED_LM_50000baseSR2_Full_BIT;
+ if (params->adv_speeds & sup_caps)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
- if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
+ sup_caps = QED_LM_100000baseKR4_Full_BIT |
+ QED_LM_100000baseSR4_Full_BIT |
+ QED_LM_100000baseCR4_Full_BIT |
+ QED_LM_100000baseLR4_ER4_Full_BIT;
+ if (params->adv_speeds & sup_caps)
link_params->speed.advertised_speeds |=
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
}
@@ -1459,12 +1488,149 @@ static int qed_get_link_data(struct qed_hwfn *hwfn,
return 0;
}
+static void qed_fill_link_capability(struct qed_hwfn *hwfn,
+ struct qed_ptt *ptt, u32 capability,
+ u32 *if_capability)
+{
+ u32 media_type, tcvr_state, tcvr_type;
+ u32 speed_mask, board_cfg;
+
+ if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
+ media_type = MEDIA_UNSPECIFIED;
+
+ if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
+ tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
+
+ if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
+ speed_mask = 0xFFFFFFFF;
+
+ if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
+ board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
+
+ DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
+ "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
+ media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
+
+ switch (media_type) {
+ case MEDIA_DA_TWINAX:
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+ *if_capability |= QED_LM_20000baseKR2_Full_BIT;
+ /* For DAC media multiple speed capabilities are supported*/
+ capability = capability & speed_mask;
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ *if_capability |= QED_LM_1000baseKX_Full_BIT;
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ *if_capability |= QED_LM_10000baseCR_Full_BIT;
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ *if_capability |= QED_LM_40000baseCR4_Full_BIT;
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ *if_capability |= QED_LM_25000baseCR_Full_BIT;
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ *if_capability |= QED_LM_50000baseCR2_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+ *if_capability |= QED_LM_100000baseCR4_Full_BIT;
+ break;
+ case MEDIA_BASE_T:
+ if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
+ *if_capability |= QED_LM_1000baseT_Full_BIT;
+ }
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
+ *if_capability |= QED_LM_10000baseT_Full_BIT;
+ }
+ }
+ if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
+ *if_capability |= QED_LM_1000baseT_Full_BIT;
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
+ *if_capability |= QED_LM_10000baseT_Full_BIT;
+ }
+ break;
+ case MEDIA_SFP_1G_FIBER:
+ case MEDIA_SFPP_10G_FIBER:
+ case MEDIA_XFP_FIBER:
+ case MEDIA_MODULE_FIBER:
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
+ if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
+ (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX))
+ *if_capability |= QED_LM_1000baseKX_Full_BIT;
+ }
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR)
+ *if_capability |= QED_LM_10000baseSR_Full_BIT;
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR)
+ *if_capability |= QED_LM_10000baseLR_Full_BIT;
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM)
+ *if_capability |= QED_LM_10000baseLRM_Full_BIT;
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER)
+ *if_capability |= QED_LM_10000baseR_FEC_BIT;
+ }
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+ *if_capability |= QED_LM_20000baseKR2_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR)
+ *if_capability |= QED_LM_25000baseSR_Full_BIT;
+ }
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4)
+ *if_capability |= QED_LM_40000baseLR4_Full_BIT;
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4)
+ *if_capability |= QED_LM_40000baseSR4_Full_BIT;
+ }
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ *if_capability |= QED_LM_50000baseKR2_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
+ if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4)
+ *if_capability |= QED_LM_100000baseSR4_Full_BIT;
+ }
+
+ break;
+ case MEDIA_KR:
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+ *if_capability |= QED_LM_20000baseKR2_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ *if_capability |= QED_LM_1000baseKX_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ *if_capability |= QED_LM_10000baseKR_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ *if_capability |= QED_LM_25000baseKR_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ *if_capability |= QED_LM_40000baseKR4_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ *if_capability |= QED_LM_50000baseKR2_Full_BIT;
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+ *if_capability |= QED_LM_100000baseKR4_Full_BIT;
+ break;
+ case MEDIA_UNSPECIFIED:
+ case MEDIA_NOT_PRESENT:
+ DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
+ "Unknown media and transceiver type;\n");
+ break;
+ }
+}
+
static void qed_fill_link(struct qed_hwfn *hwfn,
+ struct qed_ptt *ptt,
struct qed_link_output *if_link)
{
+ struct qed_mcp_link_capabilities link_caps;
struct qed_mcp_link_params params;
struct qed_mcp_link_state link;
- struct qed_mcp_link_capabilities link_caps;
u32 media_type;
memset(if_link, 0, sizeof(*if_link));
@@ -1495,52 +1661,20 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->advertised_caps |= QED_LM_Autoneg_BIT;
else
if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
- QED_LM_1000baseT_Full_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
- if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
- if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
-
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
- if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
- QED_LM_1000baseT_Full_BIT;
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
- if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
- if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
- if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
- if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
- if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
- if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
+
+ /* Fill link advertised capability*/
+ qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
+ &if_link->advertised_caps);
+ /* Fill link supported capability*/
+ qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
+ &if_link->supported_caps);
if (link.link_up)
if_link->speed = link.speed;
/* TODO - fill duplex properly */
if_link->duplex = DUPLEX_FULL;
- qed_mcp_get_media_type(hwfn->cdev, &media_type);
+ qed_mcp_get_media_type(hwfn, ptt, &media_type);
if_link->port = qed_get_port_type(media_type);
if_link->autoneg = params.speed.autoneg;
@@ -1553,12 +1687,13 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
/* Link partner capabilities */
- if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
- if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
- if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
+ if (link.partner_adv_speed &
+ QED_LINK_PARTNER_SPEED_1G_FD)
if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
+ if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
+ if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
@@ -1596,21 +1731,34 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
static void qed_get_current_link(struct qed_dev *cdev,
struct qed_link_output *if_link)
{
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
int i;
- qed_fill_link(&cdev->hwfns[0], if_link);
+ hwfn = &cdev->hwfns[0];
+ if (IS_PF(cdev)) {
+ ptt = qed_ptt_acquire(hwfn);
+ if (ptt) {
+ qed_fill_link(hwfn, ptt, if_link);
+ qed_ptt_release(hwfn, ptt);
+ } else {
+ DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
+ }
+ } else {
+ qed_fill_link(hwfn, NULL, if_link);
+ }
for_each_hwfn(cdev, i)
qed_inform_vf_link_state(&cdev->hwfns[i]);
}
-void qed_link_update(struct qed_hwfn *hwfn)
+void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
{
void *cookie = hwfn->cdev->ops_cookie;
struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
struct qed_link_output if_link;
- qed_fill_link(hwfn, &if_link);
+ qed_fill_link(hwfn, ptt, &if_link);
qed_inform_vf_link_state(hwfn);
if (IS_LEAD_HWFN(hwfn) && cookie)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 5d37ec7e9b0b..f40f654398a0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1247,6 +1247,52 @@ static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
}
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data, int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr;
+ u32 i, size;
+
+ func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+ return size;
+}
+
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
+{
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
+ FUNC_MF_CFG_MIN_BW);
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ p_info->bandwidth_min);
+ p_info->bandwidth_min = 1;
+ }
+
+ p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
+ FUNC_MF_CFG_MAX_BW);
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ p_info->bandwidth_max);
+ p_info->bandwidth_max = 100;
+ }
+}
+
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool b_reset)
{
@@ -1274,10 +1320,29 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
goto out;
}
- if (p_hwfn->b_drv_link_init)
- p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
- else
+ if (p_hwfn->b_drv_link_init) {
+ /* Link indication with modern MFW arrives as per-PF
+ * indication.
+ */
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
+ struct public_func shmem_info;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ p_link->link_up = !!(shmem_info.status &
+ FUNC_STATUS_VIRTUAL_LINK_UP);
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Virtual link_up = %d\n", p_link->link_up);
+ } else {
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Physical link_up = %d\n", p_link->link_up);
+ }
+ } else {
p_link->link_up = false;
+ }
p_link->full_duplex = true;
switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
@@ -1382,7 +1447,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
- qed_link_update(p_hwfn);
+ qed_link_update(p_hwfn, p_ptt);
out:
spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
}
@@ -1504,53 +1569,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
}
-static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
- struct public_func *p_shmem_info)
-{
- struct qed_mcp_function_info *p_info;
-
- p_info = &p_hwfn->mcp_info->func_info;
-
- p_info->bandwidth_min = (p_shmem_info->config &
- FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
- if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
- DP_INFO(p_hwfn,
- "bandwidth minimum out of bounds [%02x]. Set to 1\n",
- p_info->bandwidth_min);
- p_info->bandwidth_min = 1;
- }
-
- p_info->bandwidth_max = (p_shmem_info->config &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
- DP_INFO(p_hwfn,
- "bandwidth maximum out of bounds [%02x]. Set to 100\n",
- p_info->bandwidth_max);
- p_info->bandwidth_max = 100;
- }
-}
-
-static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct public_func *p_data, int pfid)
-{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
- u32 i, size;
-
- memset(p_data, 0, sizeof(*p_data));
-
- size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
- for (i = 0; i < size / sizeof(u32); i++)
- ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
- func_addr + (i << 2));
- return size;
-}
-
static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_function_info *p_info;
@@ -1581,13 +1599,29 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
FUNC_MF_CFG_OV_STAG_MASK;
p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
- if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) &&
- (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) {
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan);
+ if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
+ if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
+ p_hwfn->hw_info.ovlan);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
+ p_hwfn->hw_info.ovlan);
+ } else {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
+ }
+
qed_sp_pf_update_stag(p_hwfn);
}
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
+
/* Acknowledge the MFW */
qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
&resp, &param);
@@ -1833,12 +1867,12 @@ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
+int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_media_type)
{
- struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
- struct qed_ptt *p_ptt;
+ *p_media_type = MEDIA_UNSPECIFIED;
- if (IS_VF(cdev))
+ if (IS_VF(p_hwfn->cdev))
return -EINVAL;
if (!qed_mcp_is_init(p_hwfn)) {
@@ -1846,16 +1880,195 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
return -EBUSY;
}
- *p_media_type = MEDIA_UNSPECIFIED;
+ if (!p_ptt) {
+ *p_media_type = MEDIA_UNSPECIFIED;
+ return -EINVAL;
+ }
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
+ *p_media_type = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ media_type));
+
+ return 0;
+}
+
+int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_transceiver_state,
+ u32 *p_transceiver_type)
+{
+ u32 transceiver_info;
+
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
+ *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
return -EBUSY;
+ }
- *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port, media_type));
+ transceiver_info = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
- qed_ptt_release(p_hwfn, p_ptt);
+ *p_transceiver_state = (transceiver_info &
+ ETH_TRANSCEIVER_STATE_MASK) >>
+ ETH_TRANSCEIVER_STATE_OFFSET;
+
+ if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
+ *p_transceiver_type = (transceiver_info &
+ ETH_TRANSCEIVER_TYPE_MASK) >>
+ ETH_TRANSCEIVER_TYPE_OFFSET;
+ else
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
+
+ return 0;
+}
+static bool qed_is_transceiver_ready(u32 transceiver_state,
+ u32 transceiver_type)
+{
+ if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
+ ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
+ (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
+ return true;
+
+ return false;
+}
+
+int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_speed_mask)
+{
+ u32 transceiver_type, transceiver_state;
+
+ qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+ &transceiver_type);
+
+ if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
+ false)
+ return -EINVAL;
+
+ switch (transceiver_type) {
+ case ETH_TRANSCEIVER_TYPE_1G_LX:
+ case ETH_TRANSCEIVER_TYPE_1G_SX:
+ case ETH_TRANSCEIVER_TYPE_1G_PCC:
+ case ETH_TRANSCEIVER_TYPE_1G_ACC:
+ case ETH_TRANSCEIVER_TYPE_1000BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_10G_SR:
+ case ETH_TRANSCEIVER_TYPE_10G_LR:
+ case ETH_TRANSCEIVER_TYPE_10G_LRM:
+ case ETH_TRANSCEIVER_TYPE_10G_ER:
+ case ETH_TRANSCEIVER_TYPE_10G_PCC:
+ case ETH_TRANSCEIVER_TYPE_10G_ACC:
+ case ETH_TRANSCEIVER_TYPE_4x10G:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_40G_LR4:
+ case ETH_TRANSCEIVER_TYPE_40G_SR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_100G_AOC:
+ case ETH_TRANSCEIVER_TYPE_100G_SR4:
+ case ETH_TRANSCEIVER_TYPE_100G_LR4:
+ case ETH_TRANSCEIVER_TYPE_100G_ER4:
+ case ETH_TRANSCEIVER_TYPE_100G_ACC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_25G_SR:
+ case ETH_TRANSCEIVER_TYPE_25G_LR:
+ case ETH_TRANSCEIVER_TYPE_25G_AOC:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_25G_CA_N:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_S:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_L:
+ case ETH_TRANSCEIVER_TYPE_4x25G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_40G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_100G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_XLPPI:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_10G_BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ default:
+ DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
+ transceiver_type);
+ *p_speed_mask = 0xff;
+ break;
+ }
+
+ return 0;
+}
+
+int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_board_config)
+{
+ u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
+ return -EBUSY;
+ }
+ if (!p_ptt) {
+ *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
+ return -EINVAL;
+ }
+
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ *p_board_config = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port,
+ board_cfg));
return 0;
}
@@ -3335,7 +3548,8 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 mcp_resp, mcp_param, features;
- features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
+ features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
+ DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
features, &mcp_resp, &mcp_param);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 85e6b3989e7a..1adfe52b3905 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -322,14 +322,61 @@ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
* @brief Get media type value of the port.
*
* @param cdev - qed dev pointer
+ * @param p_ptt
* @param mfw_ver - media type value
*
* @return int -
* 0 - Operation was successul.
* -EBUSY - Operation failed
*/
-int qed_mcp_get_media_type(struct qed_dev *cdev,
- u32 *media_type);
+int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *media_type);
+
+/**
+ * @brief Get transceiver data of the port.
+ *
+ * @param cdev - qed dev pointer
+ * @param p_ptt
+ * @param p_transceiver_state - transceiver state.
+ * @param p_transceiver_type - media type value
+ *
+ * @return int -
+ * 0 - Operation was successful.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_transceiver_state,
+ u32 *p_tranceiver_type);
+
+/**
+ * @brief Get transceiver supported speed mask.
+ *
+ * @param cdev - qed dev pointer
+ * @param p_ptt
+ * @param p_speed_mask - Bit mask of all supported speeds.
+ *
+ * @return int -
+ * 0 - Operation was successful.
+ * -EBUSY - Operation failed
+ */
+
+int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_speed_mask);
+
+/**
+ * @brief Get board configuration.
+ *
+ * @param cdev - qed dev pointer
+ * @param p_ptt
+ * @param p_board_config - Board config.
+ *
+ * @return int -
+ * 0 - Operation was successful.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_board_config);
/**
* @brief General function for sending commands to the MCP
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 6172354b451c..ffac4ac87394 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -211,9 +211,8 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
if (!p_buffer)
break;
- list_del(&p_buffer->list_entry);
- list_add_tail(&p_buffer->list_entry,
- &p_ooo_info->free_buffers_list);
+ list_move_tail(&p_buffer->list_entry,
+ &p_ooo_info->free_buffers_list);
}
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
@@ -247,9 +246,8 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
if (!p_buffer)
break;
- list_del(&p_buffer->list_entry);
- list_add_tail(&p_buffer->list_entry,
- &p_ooo_info->free_buffers_list);
+ list_move_tail(&p_buffer->list_entry,
+ &p_ooo_info->free_buffers_list);
}
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
@@ -353,11 +351,9 @@ void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
u32 cid, u8 drop_isle, u8 drop_size)
{
- struct qed_ooo_archipelago *p_archipelago = NULL;
struct qed_ooo_isle *p_isle = NULL;
u8 isle_idx;
- p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
for (isle_idx = 0; isle_idx < drop_size; isle_idx++) {
p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle);
if (!p_isle) {
@@ -462,7 +458,6 @@ void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle)
{
- struct qed_ooo_archipelago *p_archipelago = NULL;
struct qed_ooo_isle *p_right_isle = NULL;
struct qed_ooo_isle *p_left_isle = NULL;
@@ -475,7 +470,6 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
return;
}
- p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
list_del(&p_right_isle->list_entry);
p_ooo_info->cur_isles_number--;
if (left_isle) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index be941cfaa2d4..c71391b9c757 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
num_cons, "Toggle");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate toogle bits, rc = %d\n", rc);
+ "Failed to allocate toggle bits, rc = %d\n", rc);
goto free_cq_map;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index f736f70956fd..2440970882c4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -216,6 +216,12 @@
0x00c000UL
#define DORQ_REG_IFEN \
0x100040UL
+#define DORQ_REG_TAG1_OVRD_MODE \
+ 0x1008b4UL
+#define DORQ_REG_PF_PCP_BB_K2 \
+ 0x1008c4UL
+#define DORQ_REG_PF_EXT_VID_BB_K2 \
+ 0x1008c8UL
#define DORQ_REG_DB_DROP_REASON \
0x100a2cUL
#define DORQ_REG_DB_DROP_DETAILS \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 7d7a64c55ff1..f9167d1354bb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
{
- enum roce_flavor flavor;
-
switch (roce_mode) {
case ROCE_V1:
- flavor = PLAIN_ROCE;
- break;
+ return PLAIN_ROCE;
case ROCE_V2_IPV4:
- flavor = RROCE_IPV4;
- break;
+ return RROCE_IPV4;
case ROCE_V2_IPV6:
- flavor = ROCE_V2_IPV6;
- break;
+ return RROCE_IPV6;
default:
- flavor = MAX_ROCE_MODE;
- break;
+ return MAX_ROCE_FLAVOR;
}
- return flavor;
}
static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 8de644b4721e..77b6248ad3b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
struct qed_tunnel_info *p_src)
{
- enum tunnel_clss type;
+ int type;
p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 1673fc90027f..c4a6274dd625 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -730,8 +730,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
!list_empty(head)) {
struct qed_spq_entry *p_ent =
list_first_entry(head, struct qed_spq_entry, list);
- list_del(&p_ent->list);
- list_add_tail(&p_ent->list, &p_spq->completion_pending);
+ list_move_tail(&p_ent->list, &p_spq->completion_pending);
p_spq->comp_sent_count++;
rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 3d4269659820..b6cccf44bf40 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
}
if (!p_iov->b_pre_fp_hsi &&
- ETH_HSI_VER_MINOR &&
(resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
DP_INFO(p_hwfn,
"PF is using older fastpath HSI; %02x.%02x is configured\n",
@@ -572,7 +571,7 @@ free_p_iov:
static void
__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
struct qed_tunn_update_type *p_src,
- enum qed_tunn_clss mask, u8 *p_cls)
+ enum qed_tunn_mode mask, u8 *p_cls)
{
if (p_src->b_update_mode) {
p_req->tun_mode_update_mask |= BIT(mask);
@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
static void
qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
struct qed_tunn_update_type *p_src,
- enum qed_tunn_clss mask,
+ enum qed_tunn_mode mask,
u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
u8 *p_update_port, u16 *p_udp_port)
{
@@ -1689,7 +1688,7 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
ops->ports_update(cookie, vxlan_port, geneve_port);
/* Always update link configuration according to bulletin */
- qed_link_update(hwfn);
+ qed_link_update(hwfn, NULL);
}
void qed_iov_vf_task(struct work_struct *work)
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 6a4d266fb8e2..de98a974673b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -440,7 +440,7 @@ struct qede_fastpath {
struct qede_tx_queue *txq;
struct qede_tx_queue *xdp_tx;
-#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
+#define VEC_NAME_SIZE (FIELD_SIZEOF(struct net_device, name) + 8)
char name[VEC_NAME_SIZE];
};
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 19652cd27ca7..8cbbd628fd73 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -413,18 +413,42 @@ struct qede_link_mode_mapping {
};
static const struct qede_link_mode_mapping qed_lm_map[] = {
- {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
{QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
{QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT},
{QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
- {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT},
{QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
+ {QED_LM_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
+ {QED_LM_2500baseX_Full_BIT, ETHTOOL_LINK_MODE_2500baseX_Full_BIT},
+ {QED_LM_Backplane_BIT, ETHTOOL_LINK_MODE_Backplane_BIT},
+ {QED_LM_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
+ {QED_LM_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT},
{QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
- {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
+ {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
+ {QED_LM_10000baseR_FEC_BIT, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT},
+ {QED_LM_20000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT},
+ {QED_LM_40000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT},
+ {QED_LM_40000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
+ {QED_LM_40000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
{QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
+ {QED_LM_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
+ {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
+ {QED_LM_25000baseSR_Full_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
+ {QED_LM_50000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT},
{QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
{QED_LM_100000baseKR4_Full_BIT,
- ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
+ {QED_LM_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
+ {QED_LM_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
+ {QED_LM_100000baseLR4_ER4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
+ {QED_LM_50000baseSR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT},
+ {QED_LM_1000baseX_Full_BIT, ETHTOOL_LINK_MODE_1000baseX_Full_BIT},
+ {QED_LM_10000baseCR_Full_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
+ {QED_LM_10000baseSR_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
+ {QED_LM_10000baseLR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
+ {QED_LM_10000baseLRM_Full_BIT, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT},
};
#define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \
@@ -494,6 +518,7 @@ static int qede_set_link_ksettings(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
struct qed_link_params params;
+ u32 sup_caps;
if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev, "Link settings are not allowed to be changed\n");
@@ -520,52 +545,85 @@ static int qede_set_link_ksettings(struct net_device *dev,
params.forced_speed = base->speed;
switch (base->speed) {
case SPEED_1000:
- if (!(current_link.supported_caps &
- QED_LM_1000baseT_Full_BIT)) {
+ sup_caps = QED_LM_1000baseT_Full_BIT |
+ QED_LM_1000baseKX_Full_BIT |
+ QED_LM_1000baseX_Full_BIT;
+ if (!(current_link.supported_caps & sup_caps)) {
DP_INFO(edev, "1G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = QED_LM_1000baseT_Full_BIT;
+ params.adv_speeds = current_link.supported_caps &
+ sup_caps;
break;
case SPEED_10000:
- if (!(current_link.supported_caps &
- QED_LM_10000baseKR_Full_BIT)) {
+ sup_caps = QED_LM_10000baseT_Full_BIT |
+ QED_LM_10000baseKR_Full_BIT |
+ QED_LM_10000baseKX4_Full_BIT |
+ QED_LM_10000baseR_FEC_BIT |
+ QED_LM_10000baseCR_Full_BIT |
+ QED_LM_10000baseSR_Full_BIT |
+ QED_LM_10000baseLR_Full_BIT |
+ QED_LM_10000baseLRM_Full_BIT;
+ if (!(current_link.supported_caps & sup_caps)) {
DP_INFO(edev, "10G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = QED_LM_10000baseKR_Full_BIT;
+ params.adv_speeds = current_link.supported_caps &
+ sup_caps;
break;
- case SPEED_25000:
+ case SPEED_20000:
if (!(current_link.supported_caps &
- QED_LM_25000baseKR_Full_BIT)) {
+ QED_LM_20000baseKR2_Full_BIT)) {
+ DP_INFO(edev, "20G speed not supported\n");
+ return -EINVAL;
+ }
+ params.adv_speeds = QED_LM_20000baseKR2_Full_BIT;
+ break;
+ case SPEED_25000:
+ sup_caps = QED_LM_25000baseKR_Full_BIT |
+ QED_LM_25000baseCR_Full_BIT |
+ QED_LM_25000baseSR_Full_BIT;
+ if (!(current_link.supported_caps & sup_caps)) {
DP_INFO(edev, "25G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = QED_LM_25000baseKR_Full_BIT;
+ params.adv_speeds = current_link.supported_caps &
+ sup_caps;
break;
case SPEED_40000:
- if (!(current_link.supported_caps &
- QED_LM_40000baseLR4_Full_BIT)) {
+ sup_caps = QED_LM_40000baseLR4_Full_BIT |
+ QED_LM_40000baseKR4_Full_BIT |
+ QED_LM_40000baseCR4_Full_BIT |
+ QED_LM_40000baseSR4_Full_BIT;
+ if (!(current_link.supported_caps & sup_caps)) {
DP_INFO(edev, "40G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = QED_LM_40000baseLR4_Full_BIT;
+ params.adv_speeds = current_link.supported_caps &
+ sup_caps;
break;
case SPEED_50000:
- if (!(current_link.supported_caps &
- QED_LM_50000baseKR2_Full_BIT)) {
+ sup_caps = QED_LM_50000baseKR2_Full_BIT |
+ QED_LM_50000baseCR2_Full_BIT |
+ QED_LM_50000baseSR2_Full_BIT;
+ if (!(current_link.supported_caps & sup_caps)) {
DP_INFO(edev, "50G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = QED_LM_50000baseKR2_Full_BIT;
+ params.adv_speeds = current_link.supported_caps &
+ sup_caps;
break;
case SPEED_100000:
- if (!(current_link.supported_caps &
- QED_LM_100000baseKR4_Full_BIT)) {
+ sup_caps = QED_LM_100000baseKR4_Full_BIT |
+ QED_LM_100000baseSR4_Full_BIT |
+ QED_LM_100000baseCR4_Full_BIT |
+ QED_LM_100000baseLR4_ER4_Full_BIT;
+ if (!(current_link.supported_caps & sup_caps)) {
DP_INFO(edev, "100G speed not supported\n");
return -EINVAL;
}
- params.adv_speeds = QED_LM_100000baseKR4_Full_BIT;
+ params.adv_speeds = current_link.supported_caps &
+ sup_caps;
break;
default:
DP_INFO(edev, "Unsupported speed %u\n", base->speed);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index b48f76182049..10b075bc5959 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
- ql_write_nvram_reg(qdev, spir,
- ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
}
/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 81312924df14..0c443ea98479 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
int (*config_loopback) (struct qlcnic_adapter *, u8);
int (*clear_loopback) (struct qlcnic_adapter *, u8);
int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
- void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+ void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
int (*get_board_info) (struct qlcnic_adapter *);
void (*set_mac_filter_count) (struct qlcnic_adapter *);
void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
}
static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 *addr, u16 id)
+ u64 *addr, u16 vlan,
+ struct qlcnic_host_tx_ring *tx_ring)
{
- adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
}
static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 569d54ededec..2a533280b124 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2135,7 +2135,8 @@ out:
}
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
- u16 vlan_id)
+ u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring)
{
u8 mac[ETH_ALEN];
memcpy(&mac, addr, ETH_ALEN);
@@ -4232,7 +4233,6 @@ static void qlcnic_83xx_io_resume(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
qlcnic_83xx_aer_start_poll_work(adapter);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index b75a81246856..73fe2f64491d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *ring);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 4b76c69fe86d..834208e55f7b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -883,7 +883,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
struct qlcnic_adapter *adapter = netdev_priv(netdev);
if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
- return 0;
+ return 1;
switch (capid) {
case DCB_CAP_ATTR_PG:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4bb33af8e2b3..56a3bd9e37dc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev);
void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
- u64 *uaddr, u16 vlan_id);
+ u64 *uaddr, u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring);
int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
struct ethtool_coalesce *);
int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 84dd83031a1b..9647578cbe6a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
}
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
- u16 vlan_id)
+ u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
struct qlcnic_mac_req *mac_req;
struct qlcnic_vlan_req *vlan_req;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u32 producer;
u64 word;
@@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
- vlan_id);
+ vlan_id, tx_ring);
tmp_fil->ftime = jiffies;
return;
}
@@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (!fil)
return;
- qlcnic_change_filter(adapter, &src_addr, vlan_id);
+ qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
fil->ftime = jiffies;
fil->vlan_id = vlan_id;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
if (adapter->drv_mac_learn)
- qlcnic_send_filter(adapter, first_desc, skb);
+ qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2d38d1ac2aae..d42ba2293d8c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev);
static void qlcnic_tx_timeout(struct net_device *netdev);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev);
-#endif
static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_udp_tunnel_add = qlcnic_add_vxlan_port,
.ndo_udp_tunnel_del = qlcnic_del_vxlan_port,
.ndo_features_check = qlcnic_features_check,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = qlcnic_poll_controller,
-#endif
#ifdef CONFIG_QLCNIC_SRIOV
.ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
.ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
@@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev)
-{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx;
- struct qlcnic_host_tx_ring *tx_ring;
- int ring;
-
- if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
- return;
-
- recv_ctx = adapter->recv_ctx;
-
- for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_disable_sds_intr(adapter, sds_ring);
- napi_schedule(&sds_ring->napi);
- }
-
- if (adapter->flags & QLCNIC_MSIX_ENABLED) {
- /* Only Multi-Tx queue capable devices need to
- * schedule NAPI for TX rings
- */
- if ((qlcnic_83xx_check(adapter) &&
- (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
- (qlcnic_82xx_check(adapter) &&
- !qlcnic_check_multi_tx(adapter)))
- return;
-
- for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
- tx_ring = &adapter->tx_ring[ring];
- qlcnic_disable_tx_intr(adapter, tx_ring);
- napi_schedule(&tx_ring->napi);
- }
- }
-}
-#endif
-
static void
qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
{
@@ -3975,7 +3930,6 @@ static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
&adapter->state))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 77e386ebff09..f7c2f32237cb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -904,13 +904,11 @@ static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
u32 *hdr, u32 *pay, u32 size)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u32 fw_mbx;
u8 i, max = 2, hdr_size, j;
hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
max = (size / sizeof(u32)) + hdr_size;
- fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
for (i = 2, j = 0; j < hdr_size; i++, j++)
*(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
for (; j < max; i++, j++)
@@ -936,7 +934,7 @@ static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
{
struct qlcnic_vf_info *vf = trans->vf;
- u32 pay_size, hdr_size;
+ u32 pay_size;
u32 *hdr, *pay;
int ret;
u8 pci_func = trans->func_id;
@@ -947,14 +945,12 @@ static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
if (type == QLC_BC_COMMAND) {
hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
- hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
trans->curr_req_frag);
pay_size = (pay_size / sizeof(u32));
} else {
hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
- hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
trans->curr_rsp_frag);
pay_size = (pay_size / sizeof(u32));
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c
index 6c8543fb90c0..4292c89bd35c 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k.c
@@ -81,8 +81,8 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result)
return ret;
}
-int
-qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
+static int
+__qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
{
__be16 tx_data[2];
struct spi_transfer transfer[2];
@@ -117,3 +117,33 @@ qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
return ret;
}
+
+int
+qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value, int retry)
+{
+ int ret, i = 0;
+ u16 confirmed;
+
+ do {
+ ret = __qcaspi_write_register(qca, reg, value);
+ if (ret)
+ return ret;
+
+ if (!retry)
+ return 0;
+
+ ret = qcaspi_read_register(qca, reg, &confirmed);
+ if (ret)
+ return ret;
+
+ ret = confirmed != value;
+ if (!ret)
+ return 0;
+
+ i++;
+ qca->stats.write_verify_failed++;
+
+ } while (i <= retry);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.h b/drivers/net/ethernet/qualcomm/qca_7k.h
index 27124c2bb77a..356de8ec5d48 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k.h
@@ -66,6 +66,6 @@
void qcaspi_spi_error(struct qcaspi *qca);
int qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result);
-int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value);
+int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value, int retry);
#endif /* _QCA_7K_H */
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 51d89c86e60f..a9f1bc013364 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -60,6 +60,7 @@ static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = {
"Write buffer misses",
"Transmit ring full",
"SPI errors",
+ "Write verify errors",
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 66b775d462fd..d5310504f436 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -69,6 +69,12 @@ static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN;
module_param(qcaspi_pluggable, int, 0);
MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
+#define QCASPI_WRITE_VERIFY_MIN 0
+#define QCASPI_WRITE_VERIFY_MAX 3
+static int wr_verify = QCASPI_WRITE_VERIFY_MIN;
+module_param(wr_verify, int, 0);
+MODULE_PARM_DESC(wr_verify, "SPI register write verify trails. Use 0-3.");
+
#define QCASPI_TX_TIMEOUT (1 * HZ)
#define QCASPI_QCA7K_REBOOT_TIME_MS 1000
@@ -77,7 +83,7 @@ start_spi_intr_handling(struct qcaspi *qca, u16 *intr_cause)
{
*intr_cause = 0;
- qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0);
+ qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify);
qcaspi_read_register(qca, SPI_REG_INTR_CAUSE, intr_cause);
netdev_dbg(qca->net_dev, "interrupts: 0x%04x\n", *intr_cause);
}
@@ -90,8 +96,8 @@ end_spi_intr_handling(struct qcaspi *qca, u16 intr_cause)
SPI_INT_RDBUF_ERR |
SPI_INT_WRBUF_ERR);
- qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, intr_cause);
- qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, intr_enable);
+ qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, intr_cause, 0);
+ qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, intr_enable, wr_verify);
netdev_dbg(qca->net_dev, "acking int: 0x%04x\n", intr_cause);
}
@@ -239,7 +245,7 @@ qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb)
len = skb->len;
- qcaspi_write_register(qca, SPI_REG_BFR_SIZE, len);
+ qcaspi_write_register(qca, SPI_REG_BFR_SIZE, len, wr_verify);
if (qca->legacy_mode)
qcaspi_tx_cmd(qca, QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
@@ -345,6 +351,7 @@ qcaspi_receive(struct qcaspi *qca)
/* Read the packet size. */
qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available);
+
netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
available);
@@ -353,7 +360,7 @@ qcaspi_receive(struct qcaspi *qca)
return -1;
}
- qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available);
+ qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available, wr_verify);
if (qca->legacy_mode)
qcaspi_tx_cmd(qca, QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
@@ -524,7 +531,7 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
netdev_dbg(qca->net_dev, "sync: resetting device.\n");
qcaspi_read_register(qca, SPI_REG_SPI_CONFIG, &spi_config);
spi_config |= QCASPI_SLAVE_RESET_BIT;
- qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, spi_config);
+ qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, spi_config, 0);
qca->sync = QCASPI_SYNC_RESET;
qca->stats.trig_reset++;
@@ -684,7 +691,7 @@ qcaspi_netdev_close(struct net_device *dev)
netif_stop_queue(dev);
- qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0);
+ qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify);
free_irq(qca->spi_dev->irq, qca);
kthread_stop(qca->spi_thread);
@@ -904,6 +911,13 @@ qca_spi_probe(struct spi_device *spi)
return -EINVAL;
}
+ if (wr_verify < QCASPI_WRITE_VERIFY_MIN ||
+ wr_verify > QCASPI_WRITE_VERIFY_MAX) {
+ dev_err(&spi->dev, "Invalid write verify: %d\n",
+ wr_verify);
+ return -EINVAL;
+ }
+
dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
QCASPI_DRV_VERSION,
qcaspi_clkspeed,
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index fc0e98726b36..2d2c49726492 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -73,6 +73,7 @@ struct qcaspi_stats {
u64 write_buf_miss;
u64 ring_full;
u64 spi_err;
+ u64 write_verify_failed;
};
struct qcaspi {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 7fd86d40a337..11167abe5934 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -113,7 +113,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
struct sk_buff *skbn;
if (skb->dev->type == ARPHRD_ETHER) {
- if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+ if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
kfree_skb(skb);
return;
}
@@ -147,7 +147,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
}
if (skb_headroom(skb) < required_headroom) {
- if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
+ if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
return -ENOMEM;
}
@@ -189,6 +189,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
if (!skb)
goto done;
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ return RX_HANDLER_PASS;
+
dev = skb->dev;
port = rmnet_get_port(dev);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index aa11b70b9ca4..04aa592f35c3 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1024,16 +1024,8 @@ static int r6040_mii_probe(struct net_device *dev)
return PTR_ERR(phydev);
}
- /* mask with MAC supported features */
- phydev->supported &= (SUPPORTED_10baseT_Half
- | SUPPORTED_10baseT_Full
- | SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full
- | SUPPORTED_Autoneg
- | SUPPORTED_MII
- | SUPPORTED_TP);
-
- phydev->advertising = phydev->supported;
+ phy_set_max_speed(phydev, SPEED_100);
+
lp->old_link = 0;
lp->old_duplex = -1;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 1d8631303b53..006b0aa8cec3 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
@@ -76,8 +77,6 @@ static const int multicast_filter_limit = 32;
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
-#define RTL8169_TX_TIMEOUT (6*HZ)
-
/* write/read MMIO register */
#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg))
#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg))
@@ -632,7 +631,6 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_ENABLED = 0,
- RTL_FLAG_TASK_SLOW_PENDING,
RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_MAX
};
@@ -665,6 +663,7 @@ struct rtl8169_private {
u16 event_slow;
const struct rtl_coalesce_info *coalesce_info;
+ struct clk *clk;
struct mdio_ops {
void (*write)(struct rtl8169_private *, int, int);
@@ -1352,7 +1351,8 @@ static void rtl_irq_enable_all(struct rtl8169_private *tp)
static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
rtl_irq_disable(tp);
- rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
+ rtl_ack_events(tp, 0xffff);
+ /* PCI commit */
RTL_R8(tp, ChipCmd);
}
@@ -4046,29 +4046,25 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
rtl_hw_phy_config(dev);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
- netif_dbg(tp, drv, dev,
- "Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
- RTL_W8(tp, 0x82, 0x01);
- }
-
- pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
-
- if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
netif_dbg(tp, drv, dev,
"Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
RTL_W8(tp, 0x82, 0x01);
- netif_dbg(tp, drv, dev,
- "Set PHY Reg 0x0bh = 0x00h\n");
- rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
}
/* We may have called phy_speed_down before */
phy_speed_up(dev->phydev);
genphy_soft_reset(dev->phydev);
+
+ /* It was reported that several chips end up with 10MBit/Half on a
+ * 1GBit link after resuming from S3. For whatever reason the PHY on
+ * these chips doesn't properly start a renegotiation when soft-reset.
+ * Explicitly requesting a renegotiation fixes this.
+ */
+ if (dev->phydev->autoneg == AUTONEG_ENABLE)
+ phy_restart_aneg(dev->phydev);
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -4272,8 +4268,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
@@ -4525,9 +4521,14 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
{
- /* Set DMA burst size and Interframe Gap Time */
- RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
- (InterFrameGap << TxInterFrameGapShift));
+ u32 val = TX_DMA_BURST << TxDMAShift |
+ InterFrameGap << TxInterFrameGapShift;
+
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_39)
+ val |= TXCFG_AUTO_FIFO;
+
+ RTL_W32(tp, TxConfig, val);
}
static void rtl_set_rx_max_size(struct rtl8169_private *tp)
@@ -4551,27 +4552,19 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
{
- static const struct rtl_cfg2_info {
- u32 mac_version;
- u32 clk;
- u32 val;
- } cfg2_info [] = {
- { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
- { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
- { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
- { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
- };
- const struct rtl_cfg2_info *p = cfg2_info;
- unsigned int i;
- u32 clk;
+ u32 val;
- clk = RTL_R8(tp, Config2) & PCI_Clock_66MHz;
- for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
- if ((p->mac_version == mac_version) && (p->clk == clk)) {
- RTL_W32(tp, 0x7c, p->val);
- break;
- }
- }
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ val = 0x000fff00;
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_06)
+ val = 0x00ffff00;
+ else
+ return;
+
+ if (RTL_R8(tp, Config2) & PCI_Clock_66MHz)
+ val |= 0xff;
+
+ RTL_W32(tp, 0x7c, val);
}
static void rtl_set_rx_mode(struct net_device *dev)
@@ -4775,12 +4768,14 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
if (enable) {
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
} else {
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
}
+
+ udelay(10);
}
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
@@ -5020,7 +5015,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_disable_clock_request(tp);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
/* Adjust EEE LED frequency */
@@ -5054,7 +5048,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_disable_clock_request(tp);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
@@ -5099,8 +5092,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
static void rtl_hw_start_8168g(struct rtl8169_private *tp)
{
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5198,8 +5189,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5282,8 +5271,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
{
rtl8168ep_stop_cmac(tp);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
@@ -5605,7 +5592,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -5625,6 +5611,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
static void rtl_hw_start_8106(struct rtl8169_private *tp)
{
+ rtl_hw_aspm_clkreq_enable(tp, false);
+
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
@@ -5633,6 +5621,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8101(struct rtl8169_private *tp)
@@ -5862,6 +5851,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
{
rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
tp->cur_tx = tp->dirty_tx = 0;
+ netdev_reset_queue(tp->dev);
}
static void rtl_reset_work(struct rtl8169_private *tp)
@@ -6164,6 +6154,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
+ netdev_sent_queue(dev, skb->len);
+
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
@@ -6262,7 +6254,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
{
- unsigned int dirty_tx, tx_left;
+ unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0;
dirty_tx = tp->dirty_tx;
smp_rmb();
@@ -6286,10 +6278,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
- u64_stats_update_begin(&tp->tx_stats.syncp);
- tp->tx_stats.packets++;
- tp->tx_stats.bytes += tx_skb->skb->len;
- u64_stats_update_end(&tp->tx_stats.syncp);
+ pkts_compl++;
+ bytes_compl += tx_skb->skb->len;
dev_consume_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
}
@@ -6298,6 +6288,13 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
}
if (tp->dirty_tx != dirty_tx) {
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ u64_stats_update_begin(&tp->tx_stats.syncp);
+ tp->tx_stats.packets += pkts_compl;
+ tp->tx_stats.bytes += bytes_compl;
+ u64_stats_update_end(&tp->tx_stats.syncp);
+
tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
* - publish dirty_tx ring index (write barrier)
@@ -6462,42 +6459,29 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow)))
return IRQ_NONE;
- rtl_irq_disable(tp);
- napi_schedule_irqoff(&tp->napi);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Workqueue context.
- */
-static void rtl_slow_event_work(struct rtl8169_private *tp)
-{
- struct net_device *dev = tp->dev;
- u16 status;
+ if (unlikely(status & SYSErr)) {
+ rtl8169_pcierr_interrupt(tp->dev);
+ goto out;
+ }
- status = rtl_get_events(tp) & tp->event_slow;
- rtl_ack_events(tp, status);
+ if (status & LinkChg)
+ phy_mac_interrupt(tp->dev->phydev);
- if (unlikely(status & RxFIFOOver)) {
- switch (tp->mac_version) {
- /* Work around for rx fifo overflow */
- case RTL_GIGA_MAC_VER_11:
- netif_stop_queue(dev);
- /* XXX - Hack alert. See rtl_task(). */
- set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
- default:
- break;
- }
+ if (unlikely(status & RxFIFOOver &&
+ tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+ netif_stop_queue(tp->dev);
+ /* XXX - Hack alert. See rtl_task(). */
+ set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
}
- if (unlikely(status & SYSErr))
- rtl8169_pcierr_interrupt(dev);
-
- if (status & LinkChg)
- phy_mac_interrupt(dev->phydev);
+ if (status & RTL_EVENT_NAPI) {
+ rtl_irq_disable(tp);
+ napi_schedule_irqoff(&tp->napi);
+ }
+out:
+ rtl_ack_events(tp, status);
- rtl_irq_enable_all(tp);
+ return IRQ_HANDLED;
}
static void rtl_task(struct work_struct *work)
@@ -6506,8 +6490,6 @@ static void rtl_task(struct work_struct *work)
int bitnr;
void (*action)(struct rtl8169_private *);
} rtl_work[] = {
- /* XXX - keep rtl_slow_event_work() as first element. */
- { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
{ RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
};
struct rtl8169_private *tp =
@@ -6537,29 +6519,16 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
{
struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
struct net_device *dev = tp->dev;
- u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
- int work_done= 0;
- u16 status;
-
- status = rtl_get_events(tp);
- rtl_ack_events(tp, status & ~tp->event_slow);
+ int work_done;
- if (status & RTL_EVENT_NAPI_RX)
- work_done = rtl_rx(dev, tp, (u32) budget);
+ work_done = rtl_rx(dev, tp, (u32) budget);
- if (status & RTL_EVENT_NAPI_TX)
- rtl_tx(dev, tp);
-
- if (status & tp->event_slow) {
- enable_mask &= ~tp->event_slow;
-
- rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
- }
+ rtl_tx(dev, tp);
if (work_done < budget) {
napi_complete_done(napi, work_done);
- rtl_irq_enable(tp, enable_mask);
+ rtl_irq_enable_all(tp);
mmiowb();
}
@@ -6835,7 +6804,6 @@ static void rtl8169_net_suspend(struct net_device *dev)
phy_stop(dev->phydev);
netif_device_detach(dev);
- netif_stop_queue(dev);
rtl_lock_work(tp);
napi_disable(&tp->napi);
@@ -6853,8 +6821,10 @@ static int rtl8169_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
rtl8169_net_suspend(dev);
+ clk_disable_unprepare(tp->clk);
return 0;
}
@@ -6882,6 +6852,9 @@ static int rtl8169_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ clk_prepare_enable(tp->clk);
if (netif_running(dev))
__rtl8169_resume(dev);
@@ -7077,20 +7050,12 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
{
unsigned int flags;
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
flags = PCI_IRQ_LEGACY;
- break;
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_40:
- /* This version was reported to have issues with resume
- * from suspend when using MSI-X
- */
- flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
- break;
- default:
+ } else {
flags = PCI_IRQ_ALL_TYPES;
}
@@ -7257,6 +7222,11 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
}
}
+static void rtl_disable_clk(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
@@ -7277,6 +7247,32 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
tp->supports_gmii = cfg->has_gmii;
+ /* Get the *optional* external "ether_clk" used on some boards */
+ tp->clk = devm_clk_get(&pdev->dev, "ether_clk");
+ if (IS_ERR(tp->clk)) {
+ rc = PTR_ERR(tp->clk);
+ if (rc == -ENOENT) {
+ /* clk-core allows NULL (for suspend / resume) */
+ tp->clk = NULL;
+ } else if (rc == -EPROBE_DEFER) {
+ return rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get clk: %d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = clk_prepare_enable(tp->clk);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to enable clk: %d\n", rc);
+ return rc;
+ }
+
+ rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk,
+ tp->clk);
+ if (rc)
+ return rc;
+ }
+
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {
@@ -7321,11 +7317,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
- if ((sizeof(dma_addr_t) > 4) &&
- (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
- tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (sizeof(dma_addr_t) > 4 && (use_dac == 1 || (use_dac == -1 &&
+ tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
+ !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
/* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
if (!pci_is_pcie(pdev))
@@ -7341,14 +7335,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_init_rxcfg(tp);
- rtl_irq_disable(tp);
+ rtl8169_irq_mask_and_ack(tp);
rtl_hw_initialize(tp);
rtl_hw_reset(tp);
- rtl_ack_events(tp, 0xffff);
-
pci_set_master(pdev);
rtl_init_mdio_ops(tp);
@@ -7388,7 +7380,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->dev_addr[i] = RTL_R8(tp, MAC0 + i);
dev->ethtool_ops = &rtl8169_ethtool_ops;
- dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 1470fc12282b..1c6e4df94f01 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -428,6 +428,7 @@ enum EIS_BIT {
EIS_CULF1 = 0x00000080,
EIS_TFFF = 0x00000100,
EIS_QFS = 0x00010000,
+ EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)),
};
/* RIC0 */
@@ -472,6 +473,7 @@ enum RIS0_BIT {
RIS0_FRF15 = 0x00008000,
RIS0_FRF16 = 0x00010000,
RIS0_FRF17 = 0x00020000,
+ RIS0_RESERVED = GENMASK(31, 18),
};
/* RIC1 */
@@ -528,6 +530,7 @@ enum RIS2_BIT {
RIS2_QFF16 = 0x00010000,
RIS2_QFF17 = 0x00020000,
RIS2_RFFF = 0x80000000,
+ RIS2_RESERVED = GENMASK(30, 18),
};
/* TIC */
@@ -544,6 +547,7 @@ enum TIS_BIT {
TIS_FTF1 = 0x00000002, /* Undocumented? */
TIS_TFUF = 0x00000100,
TIS_TFWF = 0x00000200,
+ TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
};
/* ISS */
@@ -617,6 +621,7 @@ enum GIC_BIT {
enum GIS_BIT {
GIS_PTCF = 0x00000001, /* Undocumented? */
GIS_PTMF = 0x00000004,
+ GIS_RESERVED = GENMASK(15, 10),
};
/* GIE (R-Car Gen3 only) */
@@ -954,7 +959,10 @@ enum RAVB_QUEUE {
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
-#define NUM_TX_DESC 2 /* TX descriptors per packet */
+
+/* TX descriptors per packet */
+#define NUM_TX_DESC_GEN2 2
+#define NUM_TX_DESC_GEN3 1
struct ravb_tstamp_skb {
struct list_head list;
@@ -1033,6 +1041,7 @@ struct ravb_private {
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
unsigned wol_enabled:1;
+ int num_tx_desc; /* TX descriptors per packet */
};
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index aff5516b781e..defed0d0c51d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -182,6 +182,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
struct ravb_private *priv = netdev_priv(ndev);
struct net_device_stats *stats = &priv->stats[q];
+ int num_tx_desc = priv->num_tx_desc;
struct ravb_tx_desc *desc;
int free_num = 0;
int entry;
@@ -191,7 +192,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
bool txed;
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
- NUM_TX_DESC);
+ num_tx_desc);
desc = &priv->tx_ring[q][entry];
txed = desc->die_dt == DT_FEMPTY;
if (free_txed_only && !txed)
@@ -200,12 +201,12 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
- if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+ if (priv->tx_skb[q][entry / num_tx_desc]) {
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
/* Last packet descriptor? */
- if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
- entry /= NUM_TX_DESC;
+ if (entry % num_tx_desc == num_tx_desc - 1) {
+ entry /= num_tx_desc;
dev_kfree_skb_any(priv->tx_skb[q][entry]);
priv->tx_skb[q][entry] = NULL;
if (txed)
@@ -224,6 +225,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
static void ravb_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
+ int num_tx_desc = priv->num_tx_desc;
int ring_size;
int i;
@@ -249,7 +251,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
ravb_tx_free(ndev, q, false);
ring_size = sizeof(struct ravb_tx_desc) *
- (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
+ (priv->num_tx_ring[q] * num_tx_desc + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL;
@@ -278,12 +280,13 @@ static void ravb_ring_free(struct net_device *ndev, int q)
static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
+ int num_tx_desc = priv->num_tx_desc;
struct ravb_ex_rx_desc *rx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
- NUM_TX_DESC;
+ num_tx_desc;
dma_addr_t dma_addr;
int i;
@@ -318,8 +321,10 @@ static void ravb_ring_format(struct net_device *ndev, int q)
for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
i++, tx_desc++) {
tx_desc->die_dt = DT_EEMPTY;
- tx_desc++;
- tx_desc->die_dt = DT_EEMPTY;
+ if (num_tx_desc > 1) {
+ tx_desc++;
+ tx_desc->die_dt = DT_EEMPTY;
+ }
}
tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
tx_desc->die_dt = DT_LINKFIX; /* type */
@@ -339,6 +344,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
+ int num_tx_desc = priv->num_tx_desc;
struct sk_buff *skb;
int ring_size;
int i;
@@ -362,11 +368,13 @@ static int ravb_ring_init(struct net_device *ndev, int q)
priv->rx_skb[q][i] = skb;
}
- /* Allocate rings for the aligned buffers */
- priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
- DPTR_ALIGN - 1, GFP_KERNEL);
- if (!priv->tx_align[q])
- goto error;
+ if (num_tx_desc > 1) {
+ /* Allocate rings for the aligned buffers */
+ priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
+ DPTR_ALIGN - 1, GFP_KERNEL);
+ if (!priv->tx_align[q])
+ goto error;
+ }
/* Allocate all RX descriptors. */
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
@@ -380,7 +388,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
/* Allocate all TX descriptors. */
ring_size = sizeof(struct ravb_tx_desc) *
- (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
+ (priv->num_tx_ring[q] * num_tx_desc + 1);
priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->tx_desc_dma[q],
GFP_KERNEL);
@@ -739,10 +747,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
u32 eis, ris2;
eis = ravb_read(ndev, EIS);
- ravb_write(ndev, ~EIS_QFS, EIS);
+ ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
if (eis & EIS_QFS) {
ris2 = ravb_read(ndev, RIS2);
- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
+ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
+ RIS2);
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF0)
@@ -795,7 +804,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
u32 tis = ravb_read(ndev, TIS);
if (tis & TIS_TFUF) {
- ravb_write(ndev, ~TIS_TFUF, TIS);
+ ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
ravb_get_tx_tstamp(ndev);
return true;
}
@@ -930,7 +939,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Processing RX Descriptor Ring */
if (ris0 & mask) {
/* Clear RX interrupt */
- ravb_write(ndev, ~mask, RIS0);
+ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
if (ravb_rx(ndev, &quota, q))
goto out;
}
@@ -938,7 +947,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
if (tis & mask) {
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
- ravb_write(ndev, ~mask, TIS);
+ ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
mmiowb();
@@ -1073,8 +1082,11 @@ static int ravb_phy_init(struct net_device *ndev)
netdev_info(ndev, "limited PHY to 100Mbit/s\n");
}
- /* 10BASE is not supported */
- phydev->supported &= ~PHY_10BT_FEATURES;
+ /* 10BASE, Pause and Asym Pause is not supported */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
phy_attached_info(phydev);
@@ -1484,6 +1496,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ int num_tx_desc = priv->num_tx_desc;
u16 q = skb_get_queue_mapping(skb);
struct ravb_tstamp_skb *ts_skb;
struct ravb_tx_desc *desc;
@@ -1495,7 +1508,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_lock_irqsave(&priv->lock, flags);
if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
- NUM_TX_DESC) {
+ num_tx_desc) {
netif_err(priv, tx_queued, ndev,
"still transmitting with the full ring!\n");
netif_stop_subqueue(ndev, q);
@@ -1506,41 +1519,55 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_put_padto(skb, ETH_ZLEN))
goto exit;
- entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
- priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
-
- buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
- entry / NUM_TX_DESC * DPTR_ALIGN;
- len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
- /* Zero length DMA descriptors are problematic as they seem to
- * terminate DMA transfers. Avoid them by simply using a length of
- * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
- *
- * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
- * data by the call to skb_put_padto() above this is safe with
- * respect to both the length of the first DMA descriptor (len)
- * overflowing the available data and the length of the second DMA
- * descriptor (skb->len - len) being negative.
- */
- if (len == 0)
- len = DPTR_ALIGN;
+ entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
+ priv->tx_skb[q][entry / num_tx_desc] = skb;
+
+ if (num_tx_desc > 1) {
+ buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+ entry / num_tx_desc * DPTR_ALIGN;
+ len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+
+ /* Zero length DMA descriptors are problematic as they seem
+ * to terminate DMA transfers. Avoid them by simply using a
+ * length of DPTR_ALIGN (4) when skb data is aligned to
+ * DPTR_ALIGN.
+ *
+ * As skb is guaranteed to have at least ETH_ZLEN (60)
+ * bytes of data by the call to skb_put_padto() above this
+ * is safe with respect to both the length of the first DMA
+ * descriptor (len) overflowing the available data and the
+ * length of the second DMA descriptor (skb->len - len)
+ * being negative.
+ */
+ if (len == 0)
+ len = DPTR_ALIGN;
- memcpy(buffer, skb->data, len);
- dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- goto drop;
+ memcpy(buffer, skb->data, len);
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto drop;
- desc = &priv->tx_ring[q][entry];
- desc->ds_tagl = cpu_to_le16(len);
- desc->dptr = cpu_to_le32(dma_addr);
+ desc = &priv->tx_ring[q][entry];
+ desc->ds_tagl = cpu_to_le16(len);
+ desc->dptr = cpu_to_le32(dma_addr);
- buffer = skb->data + len;
- len = skb->len - len;
- dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- goto unmap;
+ buffer = skb->data + len;
+ len = skb->len - len;
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto unmap;
- desc++;
+ desc++;
+ } else {
+ desc = &priv->tx_ring[q][entry];
+ len = skb->len;
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto drop;
+ }
desc->ds_tagl = cpu_to_le16(len);
desc->dptr = cpu_to_le32(dma_addr);
@@ -1548,9 +1575,11 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (q == RAVB_NC) {
ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
if (!ts_skb) {
- desc--;
- dma_unmap_single(ndev->dev.parent, dma_addr, len,
- DMA_TO_DEVICE);
+ if (num_tx_desc > 1) {
+ desc--;
+ dma_unmap_single(ndev->dev.parent, dma_addr,
+ len, DMA_TO_DEVICE);
+ }
goto unmap;
}
ts_skb->skb = skb;
@@ -1567,15 +1596,18 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
/* Descriptor type must be set after all the above writes */
dma_wmb();
- desc->die_dt = DT_FEND;
- desc--;
- desc->die_dt = DT_FSTART;
-
+ if (num_tx_desc > 1) {
+ desc->die_dt = DT_FEND;
+ desc--;
+ desc->die_dt = DT_FSTART;
+ } else {
+ desc->die_dt = DT_FSINGLE;
+ }
ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
- priv->cur_tx[q] += NUM_TX_DESC;
+ priv->cur_tx[q] += num_tx_desc;
if (priv->cur_tx[q] - priv->dirty_tx[q] >
- (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
+ (priv->num_tx_ring[q] - 1) * num_tx_desc &&
!ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q);
@@ -1589,7 +1621,7 @@ unmap:
le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
drop:
dev_kfree_skb_any(skb);
- priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
+ priv->tx_skb[q][entry / num_tx_desc] = NULL;
goto exit;
}
@@ -2075,6 +2107,9 @@ static int ravb_probe(struct platform_device *pdev)
ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
+ priv->num_tx_desc = chip_id == RCAR_GEN2 ?
+ NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3;
+
/* Set function */
ndev->netdev_ops = &ravb_netdev_ops;
ndev->ethtool_ops = &ravb_ethtool_ops;
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 0721b5c35d91..dce2a40a31e3 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -315,7 +315,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
}
}
- ravb_write(ndev, ~gis, GIS);
+ ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
}
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index aeafdb9ac015..beb06628f22d 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -371,7 +371,7 @@ static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
static struct rocker_desc_info *
rocker_desc_head_get(const struct rocker_dma_ring_info *info)
{
- static struct rocker_desc_info *desc_info;
+ struct rocker_desc_info *desc_info;
u32 head = __pos_inc(info->head, info->size);
desc_info = &info->desc_info[info->head];
@@ -402,7 +402,7 @@ static void rocker_desc_head_set(const struct rocker *rocker,
static struct rocker_desc_info *
rocker_desc_tail_get(struct rocker_dma_ring_info *info)
{
- static struct rocker_desc_info *desc_info;
+ struct rocker_desc_info *desc_info;
if (info->tail == info->head)
return NULL; /* nothing to be done between head and tail */
@@ -2728,6 +2728,7 @@ rocker_fdb_offload_notify(struct rocker_port *rocker_port,
info.addr = recv_info->addr;
info.vid = recv_info->vid;
+ info.offloaded = true;
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
rocker_port->dev, &info.info);
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index a9da1ad4b4f2..690aee88f0eb 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -298,8 +298,8 @@ static int sxgbe_init_phy(struct net_device *ndev)
/* Stop Advertising 1000BASE Capability if interface is not GMII */
if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
(phy_iface == PHY_INTERFACE_MODE_RMII))
- phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full);
+ phy_set_max_speed(phydev, SPEED_1000);
+
if (phydev->phy_id == 0) {
phy_disconnect(phydev);
return -ENODEV;
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index c5bc124b41a9..d1bb73bf9914 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -77,7 +77,8 @@ static void ether3_setmulticastlist(struct net_device *dev);
static int ether3_rx(struct net_device *dev, unsigned int maxcnt);
static void ether3_tx(struct net_device *dev);
static int ether3_open (struct net_device *dev);
-static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ether3_sendpacket(struct sk_buff *skb,
+ struct net_device *dev);
static irqreturn_t ether3_interrupt (int irq, void *dev_id);
static int ether3_close (struct net_device *dev);
static void ether3_setmulticastlist (struct net_device *dev);
@@ -481,7 +482,7 @@ static void ether3_timeout(struct net_device *dev)
/*
* Transmit a packet
*/
-static int
+static netdev_tx_t
ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 573691bc3b71..70cce63a6081 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -578,7 +578,8 @@ static inline int sgiseeq_reset(struct net_device *dev)
return 0;
}
-static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sgiseeq_private *sp = netdev_priv(dev);
struct hpc3_ethregs *hregs = sp->hregs;
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 330233286e78..98fe7e762e17 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2208,29 +2208,6 @@ static void efx_fini_napi(struct efx_nic *efx)
/**************************************************************************
*
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void efx_netpoll(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_schedule_channel(channel);
-}
-
-#endif
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
.ndo_get_phys_port_id = efx_get_phys_port_id,
.ndo_get_phys_port_name = efx_get_phys_port_name,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = efx_netpoll,
-#endif
.ndo_setup_tc = efx_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
@@ -3847,7 +3821,6 @@ static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
{
struct efx_nic *efx = pci_get_drvdata(pdev);
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- int rc;
if (pci_enable_device(pdev)) {
netif_err(efx, hw, efx->net_dev,
@@ -3855,13 +3828,6 @@ static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
status = PCI_ERS_RESULT_DISCONNECT;
}
- rc = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
- /* Non-fatal error. Continue. */
- }
-
return status;
}
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index dd5530a4f8c8..8b1f94d7a6c5 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2054,29 +2054,6 @@ static void ef4_fini_napi(struct ef4_nic *efx)
/**************************************************************************
*
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void ef4_netpoll(struct net_device *net_dev)
-{
- struct ef4_nic *efx = netdev_priv(net_dev);
- struct ef4_channel *channel;
-
- ef4_for_each_channel(channel, efx)
- ef4_schedule_channel(channel);
-}
-
-#endif
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = {
.ndo_set_mac_address = ef4_set_mac_address,
.ndo_set_rx_mode = ef4_set_rx_mode,
.ndo_set_features = ef4_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ef4_netpoll,
-#endif
.ndo_setup_tc = ef4_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = ef4_filter_rfs,
@@ -3186,7 +3160,6 @@ static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
{
struct ef4_nic *efx = pci_get_drvdata(pdev);
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- int rc;
if (pci_enable_device(pdev)) {
netif_err(efx, hw, efx->net_dev,
@@ -3194,13 +3167,6 @@ static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
status = PCI_ERS_RESULT_DISCONNECT;
}
- rc = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
- /* Non-fatal error. Continue. */
- }
-
return status;
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 18d533fdf14c..3140999642ba 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -99,7 +99,7 @@ struct ioc3_private {
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void ioc3_set_multicast_list(struct net_device *dev);
-static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void ioc3_timeout(struct net_device *dev);
static inline unsigned int ioc3_hash(const unsigned char *addr);
static inline void ioc3_stop(struct ioc3_private *ip);
@@ -1390,7 +1390,7 @@ static struct pci_driver ioc3_driver = {
.remove = ioc3_remove_one,
};
-static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long data;
struct ioc3_private *ip = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index ea55abd62ec7..703fbbefea44 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -697,7 +697,7 @@ static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
/*
* Transmit a packet (called by the kernel)
*/
-static int meth_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
{
struct meth_private *priv = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index b1b53f6c452f..8355dfbb8ec3 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -513,7 +513,8 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
* now, or set the card to generates an interrupt when ready
* for the packet.
*/
-static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned int free;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index b944828f9ea3..4823b6a51134 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -638,7 +638,8 @@ done: if (!THROTTLE_TX_PKTS)
* now, or set the card to generates an interrupt when ready
* for the packet.
*/
-static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
@@ -2446,8 +2447,7 @@ static int smc_drv_remove(struct platform_device *pdev)
static int smc_drv_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
if (ndev) {
if (netif_running(ndev)) {
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index f0afb88d7bc2..99a5a8a7c777 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1048,10 +1048,10 @@ static int smsc911x_mii_probe(struct net_device *dev)
phy_attached_info(phydev);
+ phy_set_max_speed(phydev, SPEED_100);
+
/* mask with MAC supported features */
- phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- phydev->advertising = phydev->supported;
+ phy_support_asym_pause(phydev);
pdata->last_duplex = -1;
pdata->last_carrier = -1;
@@ -1786,7 +1786,8 @@ static int smsc911x_stop(struct net_device *dev)
}
/* Entry point for transmitting a packet */
-static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
unsigned int freespace;
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 2fa3c1d03abc..9b6366b20110 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1135,10 +1135,10 @@ static int smsc9420_mii_probe(struct net_device *dev)
return PTR_ERR(phydev);
}
+ phy_set_max_speed(phydev, SPEED_100);
+
/* mask with MAC supported features */
- phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- phydev->advertising = phydev->supported;
+ phy_support_asym_pause(phydev);
phy_attached_info(phydev);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 7aa5ebb6766c..d9d0d03e4ce7 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -274,6 +274,7 @@ struct netsec_priv {
struct clk *clk;
u32 msg_enable;
u32 freq;
+ u32 phy_addr;
bool rx_cksum_offload_flag;
};
@@ -431,9 +432,12 @@ static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
return 0;
}
+static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
+
static int netsec_phy_write(struct mii_bus *bus,
int phy_addr, int reg, u16 val)
{
+ int status;
struct netsec_priv *priv = bus->priv;
if (netsec_mac_write(priv, GMAC_REG_GDR, val))
@@ -446,8 +450,19 @@ static int netsec_phy_write(struct mii_bus *bus,
GMAC_REG_SHIFT_CR_GAR)))
return -ETIMEDOUT;
- return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
- NETSEC_GMAC_GAR_REG_GB);
+ status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+ NETSEC_GMAC_GAR_REG_GB);
+
+ /* Developerbox implements RTL8211E PHY and there is
+ * a compatibility problem with F_GMAC4.
+ * RTL8211E expects MDC clock must be kept toggling for several
+ * clock cycle with MDIO high before entering the IDLE state.
+ * To meet this requirement, netsec driver needs to issue dummy
+ * read(e.g. read PHYID1(offset 0x2) register) right after write.
+ */
+ netsec_phy_read(bus, phy_addr, MII_PHYSID1);
+
+ return status;
}
static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
@@ -735,8 +750,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
u16 idx = dring->tail;
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
- if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD))
+ if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
+ /* reading the register clears the irq */
+ netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
break;
+ }
/* This barrier is needed to keep us from reading
* any other fields out of the netsec_de until we have
@@ -937,6 +955,9 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
dring->head = 0;
dring->tail = 0;
dring->pkt_cnt = 0;
+
+ if (id == NETSEC_RING_TX)
+ netdev_reset_queue(priv->ndev);
}
static void netsec_free_dring(struct netsec_priv *priv, int id)
@@ -1340,11 +1361,11 @@ static int netsec_netdev_stop(struct net_device *ndev)
netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
- ret = netsec_reset_hardware(priv, false);
-
phy_stop(ndev->phydev);
phy_disconnect(ndev->phydev);
+ ret = netsec_reset_hardware(priv, false);
+
pm_runtime_put_sync(priv->dev);
return ret;
@@ -1354,6 +1375,7 @@ static int netsec_netdev_init(struct net_device *ndev)
{
struct netsec_priv *priv = netdev_priv(ndev);
int ret;
+ u16 data;
ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
if (ret)
@@ -1363,6 +1385,11 @@ static int netsec_netdev_init(struct net_device *ndev)
if (ret)
goto err1;
+ /* set phy power down */
+ data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
+ BMCR_PDOWN;
+ netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
+
ret = netsec_reset_hardware(priv, true);
if (ret)
goto err2;
@@ -1412,7 +1439,7 @@ static const struct net_device_ops netsec_netdev_ops = {
};
static int netsec_of_probe(struct platform_device *pdev,
- struct netsec_priv *priv)
+ struct netsec_priv *priv, u32 *phy_addr)
{
priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!priv->phy_np) {
@@ -1420,6 +1447,8 @@ static int netsec_of_probe(struct platform_device *pdev,
return -EINVAL;
}
+ *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
+
priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "phy_ref_clk not found\n");
@@ -1620,12 +1649,14 @@ static int netsec_probe(struct platform_device *pdev)
}
if (dev_of_node(&pdev->dev))
- ret = netsec_of_probe(pdev, priv);
+ ret = netsec_of_probe(pdev, priv, &phy_addr);
else
ret = netsec_acpi_probe(pdev, priv, &phy_addr);
if (ret)
goto free_ndev;
+ priv->phy_addr = phy_addr;
+
if (!priv->freq) {
dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
ret = -ENODEV;
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index f7ecceeb1e28..6732f5cbde08 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -461,16 +461,7 @@ static int ave_ethtool_set_pauseparam(struct net_device *ndev,
priv->pause_rx = pause->rx_pause;
priv->pause_tx = pause->tx_pause;
- phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
- if (pause->rx_pause)
- phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
- if (pause->tx_pause)
- phydev->advertising ^= ADVERTISED_Asym_Pause;
-
- if (pause->autoneg) {
- if (netif_running(ndev))
- phy_start_aneg(phydev);
- }
+ phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
return 0;
}
@@ -904,11 +895,11 @@ static void ave_rxfifo_reset(struct net_device *ndev)
/* assert reset */
writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
- usleep_range(40, 50);
+ udelay(50);
/* negate reset */
writel(0, priv->base + AVE_GRR);
- usleep_range(10, 20);
+ udelay(20);
/* negate interrupt status */
writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
@@ -1125,11 +1116,8 @@ static void ave_phy_adjust_link(struct net_device *ndev)
rmt_adv |= LPA_PAUSE_CAP;
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- if (phydev->advertising & ADVERTISED_Pause)
- lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (phydev->advertising & ADVERTISED_Asym_Pause)
- lcl_adv |= ADVERTISE_PAUSE_ASYM;
+ lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (cap & FLOW_CTRL_TX)
txcr |= AVE_TXCR_FLOCTR;
@@ -1223,11 +1211,10 @@ static int ave_init(struct net_device *ndev)
phy_ethtool_get_wol(phydev, &wol);
device_set_wakeup_capable(&ndev->dev, !!wol.supported);
- if (!phy_interface_is_rgmii(phydev)) {
- phydev->supported &= ~PHY_GBIT_FEATURES;
- phydev->supported |= PHY_BASIC_FEATURES;
- }
- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ if (!phy_interface_is_rgmii(phydev))
+ phy_set_max_speed(phydev, SPEED_100);
+
+ phy_support_asym_pause(phydev);
phy_attached_info(phydev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 1854f270ad66..b1b305f8f414 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -258,10 +258,10 @@ struct stmmac_safety_stats {
#define MAX_DMA_RIWT 0xff
#define MIN_DMA_RIWT 0x20
/* Tx coalesce parameters */
-#define STMMAC_COAL_TX_TIMER 40000
+#define STMMAC_COAL_TX_TIMER 1000
#define STMMAC_MAX_COAL_TX_TICK 100000
#define STMMAC_TX_MAX_FRAMES 256
-#define STMMAC_TX_FRAMES 64
+#define STMMAC_TX_FRAMES 25
/* Packets types */
enum packets_types {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 0a80fa25afe3..d6bb953685fa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -119,11 +119,23 @@
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
#define XGMAC_TQS_SHIFT 16
+#define XGMAC_Q2TCMAP GENMASK(10, 8)
+#define XGMAC_Q2TCMAP_SHIFT 8
#define XGMAC_TTC GENMASK(6, 4)
#define XGMAC_TTC_SHIFT 4
#define XGMAC_TXQEN GENMASK(3, 2)
#define XGMAC_TXQEN_SHIFT 2
#define XGMAC_TSF BIT(1)
+#define XGMAC_MTL_TCx_ETS_CONTROL(x) (0x00001110 + (0x80 * (x)))
+#define XGMAC_MTL_TCx_QUANTUM_WEIGHT(x) (0x00001118 + (0x80 * (x)))
+#define XGMAC_MTL_TCx_SENDSLOPE(x) (0x0000111c + (0x80 * (x)))
+#define XGMAC_MTL_TCx_HICREDIT(x) (0x00001120 + (0x80 * (x)))
+#define XGMAC_MTL_TCx_LOCREDIT(x) (0x00001124 + (0x80 * (x)))
+#define XGMAC_CC BIT(3)
+#define XGMAC_TSA GENMASK(1, 0)
+#define XGMAC_SP (0x0 << 0)
+#define XGMAC_CBS (0x1 << 0)
+#define XGMAC_ETS (0x2 << 0)
#define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x)))
#define XGMAC_RQS GENMASK(25, 16)
#define XGMAC_RQS_SHIFT 16
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index d182f82f7b58..64b8cb88ea45 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -177,6 +177,23 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
writel(value, ioaddr + reg);
}
+static void dwxgmac2_config_cbs(struct mac_device_info *hw,
+ u32 send_slope, u32 idle_slope,
+ u32 high_credit, u32 low_credit, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
+ writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
+ writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
+ writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
+
+ value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
+ value |= XGMAC_CC | XGMAC_CBS;
+ writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
+}
+
static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
@@ -316,7 +333,7 @@ const struct stmmac_ops dwxgmac210_ops = {
.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
.set_mtl_tx_queue_weight = NULL,
.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
- .config_cbs = NULL,
+ .config_cbs = dwxgmac2_config_cbs,
.dump_regs = NULL,
.host_irq_status = dwxgmac2_host_irq_status,
.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 20909036e002..6c5092e7771c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -182,6 +182,9 @@ static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
value |= 0x7 << XGMAC_TTC_SHIFT;
}
+ /* Use static TC to Queue mapping */
+ value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP;
+
value &= ~XGMAC_TXQEN;
if (qmode != MTL_QUEUE_AVB)
value |= 0x2 << XGMAC_TXQEN_SHIFT;
@@ -374,6 +377,21 @@ static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
}
+static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
+{
+ u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+
+ value &= ~XGMAC_TXQEN;
+ if (qmode != MTL_QUEUE_AVB) {
+ value |= 0x2 << XGMAC_TXQEN_SHIFT;
+ writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
+ } else {
+ value |= 0x1 << XGMAC_TXQEN_SHIFT;
+ }
+
+ writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+}
+
static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
{
u32 value;
@@ -407,5 +425,6 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
.set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
.enable_tso = dwxgmac2_enable_tso,
+ .qmode = dwxgmac2_qmode,
.set_bfsize = dwxgmac2_set_bfsize,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 357309a6d6a5..81b966a8261b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -133,7 +133,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwmac4_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = NULL,
+ .tc = &dwmac510_tc_ops,
.setup = dwmac4_setup,
.quirks = stmmac_dwmac4_quirks,
}, {
@@ -150,7 +150,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwmac410_ops,
.hwtimestamp = &stmmac_ptp,
.mode = &dwmac4_ring_mode_ops,
- .tc = NULL,
+ .tc = &dwmac510_tc_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -167,7 +167,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwmac410_ops,
.hwtimestamp = &stmmac_ptp,
.mode = &dwmac4_ring_mode_ops,
- .tc = NULL,
+ .tc = &dwmac510_tc_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -201,7 +201,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwxgmac210_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = NULL,
+ .tc = &dwmac510_tc_ops,
.setup = dwxgmac2_setup,
.quirks = NULL,
},
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index a7ffc73fffe8..abc3f85270cd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
- STMMAC_RING_MODE, 0, false, skb->len);
+ STMMAC_RING_MODE, 1, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
@@ -91,7 +91,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
tx_q->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
- STMMAC_RING_MODE, 0, true, skb->len);
+ STMMAC_RING_MODE, 1, true, skb->len);
}
tx_q->cur_tx = entry;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c0a855b7ab3b..63e1064b27a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -48,6 +48,8 @@ struct stmmac_tx_info {
/* Frequently used values are kept adjacent for cache effect */
struct stmmac_tx_queue {
+ u32 tx_count_frames;
+ struct timer_list txtimer;
u32 queue_index;
struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -73,7 +75,14 @@ struct stmmac_rx_queue {
u32 rx_zeroc_thresh;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
+};
+
+struct stmmac_channel {
struct napi_struct napi ____cacheline_aligned_in_smp;
+ struct stmmac_priv *priv_data;
+ u32 index;
+ int has_rx;
+ int has_tx;
};
struct stmmac_tc_entry {
@@ -109,14 +118,12 @@ struct stmmac_pps_cfg {
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
- u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
int tx_coalesce;
int hwts_tx_en;
bool tx_path_in_lpi_mode;
- struct timer_list txtimer;
bool tso;
unsigned int dma_buf_sz;
@@ -137,6 +144,9 @@ struct stmmac_priv {
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+ /* Generic channel for NAPI */
+ struct stmmac_channel channel[STMMAC_CH_MAX];
+
bool oldlink;
int speed;
int oldduplex;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9f458bb16f2a..076a8be18d67 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -148,12 +148,14 @@ static void stmmac_verify_args(void)
static void stmmac_disable_all_queues(struct stmmac_priv *priv)
{
u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
u32 queue;
- for (queue = 0; queue < rx_queues_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
- napi_disable(&rx_q->napi);
+ napi_disable(&ch->napi);
}
}
@@ -164,12 +166,14 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
static void stmmac_enable_all_queues(struct stmmac_priv *priv)
{
u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
u32 queue;
- for (queue = 0; queue < rx_queues_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
- napi_enable(&rx_q->napi);
+ napi_enable(&ch->napi);
}
}
@@ -987,17 +991,20 @@ static int stmmac_init_phy(struct net_device *dev)
if ((interface == PHY_INTERFACE_MODE_MII) ||
(interface == PHY_INTERFACE_MODE_RMII) ||
(max_speed < 1000 && max_speed > 0))
- phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full);
+ phy_set_max_speed(phydev, SPEED_100);
/*
* Half-duplex mode not supported with multiqueue
* half-duplex can only works with single queue
*/
- if (tx_cnt > 1)
- phydev->supported &= ~(SUPPORTED_1000baseT_Half |
- SUPPORTED_100baseT_Half |
- SUPPORTED_10baseT_Half);
+ if (tx_cnt > 1) {
+ phy_remove_link_mode(phydev,
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ }
/*
* Broken HW is sometimes missing the pull-up resistor on the
@@ -1843,18 +1850,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
* @queue: TX queue index
* Description: it reclaims the transmit resources after transmission completes.
*/
-static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
+static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
- unsigned int entry;
+ unsigned int entry, count = 0;
- netif_tx_lock(priv->dev);
+ __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
priv->xstats.tx_clean++;
entry = tx_q->dirty_tx;
- while (entry != tx_q->cur_tx) {
+ while ((entry != tx_q->cur_tx) && (count < budget)) {
struct sk_buff *skb = tx_q->tx_skbuff[entry];
struct dma_desc *p;
int status;
@@ -1870,6 +1877,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
if (unlikely(status & tx_dma_own))
break;
+ count++;
+
/* Make sure descriptor fields are read after reading
* the own bit.
*/
@@ -1937,7 +1946,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
}
- netif_tx_unlock(priv->dev);
+
+ __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
+
+ return count;
}
/**
@@ -2020,6 +2032,33 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
return false;
}
+static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
+{
+ int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+ &priv->xstats, chan);
+ struct stmmac_channel *ch = &priv->channel[chan];
+ bool needs_work = false;
+
+ if ((status & handle_rx) && ch->has_rx) {
+ needs_work = true;
+ } else {
+ status &= ~handle_rx;
+ }
+
+ if ((status & handle_tx) && ch->has_tx) {
+ needs_work = true;
+ } else {
+ status &= ~handle_tx;
+ }
+
+ if (needs_work && napi_schedule_prep(&ch->napi)) {
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ __napi_schedule(&ch->napi);
+ }
+
+ return status;
+}
+
/**
* stmmac_dma_interrupt - DMA ISR
* @priv: driver private structure
@@ -2034,57 +2073,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
u32 channels_to_check = tx_channel_count > rx_channel_count ?
tx_channel_count : rx_channel_count;
u32 chan;
- bool poll_scheduled = false;
int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
/* Make sure we never check beyond our status buffer. */
if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
channels_to_check = ARRAY_SIZE(status);
- /* Each DMA channel can be used for rx and tx simultaneously, yet
- * napi_struct is embedded in struct stmmac_rx_queue rather than in a
- * stmmac_channel struct.
- * Because of this, stmmac_poll currently checks (and possibly wakes)
- * all tx queues rather than just a single tx queue.
- */
for (chan = 0; chan < channels_to_check; chan++)
- status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
- &priv->xstats, chan);
-
- for (chan = 0; chan < rx_channel_count; chan++) {
- if (likely(status[chan] & handle_rx)) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
-
- if (likely(napi_schedule_prep(&rx_q->napi))) {
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
- __napi_schedule(&rx_q->napi);
- poll_scheduled = true;
- }
- }
- }
-
- /* If we scheduled poll, we already know that tx queues will be checked.
- * If we didn't schedule poll, see if any DMA channel (used by tx) has a
- * completed transmission, if so, call stmmac_poll (once).
- */
- if (!poll_scheduled) {
- for (chan = 0; chan < tx_channel_count; chan++) {
- if (status[chan] & handle_tx) {
- /* It doesn't matter what rx queue we choose
- * here. We use 0 since it always exists.
- */
- struct stmmac_rx_queue *rx_q =
- &priv->rx_queue[0];
-
- if (likely(napi_schedule_prep(&rx_q->napi))) {
- stmmac_disable_dma_irq(priv,
- priv->ioaddr, chan);
- __napi_schedule(&rx_q->napi);
- }
- break;
- }
- }
- }
+ status[chan] = stmmac_napi_check(priv, chan);
for (chan = 0; chan < tx_channel_count; chan++) {
if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
@@ -2220,8 +2216,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy +
- (DMA_TX_SIZE * sizeof(struct dma_desc));
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
}
@@ -2233,6 +2228,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret;
}
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
+}
+
/**
* stmmac_tx_timer - mitigation sw timer for tx.
* @data: data pointer
@@ -2241,13 +2243,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
*/
static void stmmac_tx_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = from_timer(priv, t, txtimer);
- u32 tx_queues_count = priv->plat->tx_queues_to_use;
- u32 queue;
+ struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
+ struct stmmac_priv *priv = tx_q->priv_data;
+ struct stmmac_channel *ch;
+
+ ch = &priv->channel[tx_q->queue_index];
- /* let's scan all the tx queues */
- for (queue = 0; queue < tx_queues_count; queue++)
- stmmac_tx_clean(priv, queue);
+ if (likely(napi_schedule_prep(&ch->napi)))
+ __napi_schedule(&ch->napi);
}
/**
@@ -2260,11 +2263,17 @@ static void stmmac_tx_timer(struct timer_list *t)
*/
static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
{
+ u32 tx_channel_count = priv->plat->tx_queues_to_use;
+ u32 chan;
+
priv->tx_coal_frames = STMMAC_TX_FRAMES;
priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
- timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
- priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
- add_timer(&priv->txtimer);
+
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+
+ timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
+ }
}
static void stmmac_set_rings_length(struct stmmac_priv *priv)
@@ -2592,6 +2601,7 @@ static void stmmac_hw_teardown(struct net_device *dev)
static int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
int ret;
stmmac_check_ether_addr(priv);
@@ -2688,7 +2698,9 @@ irq_error:
if (dev->phydev)
phy_stop(dev->phydev);
- del_timer_sync(&priv->txtimer);
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ del_timer_sync(&priv->tx_queue[chan].txtimer);
+
stmmac_hw_teardown(dev);
init_error:
free_dma_desc_resources(priv);
@@ -2708,6 +2720,7 @@ dma_desc_error:
static int stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
if (priv->eee_enabled)
del_timer_sync(&priv->eee_ctrl_timer);
@@ -2722,7 +2735,8 @@ static int stmmac_release(struct net_device *dev)
stmmac_disable_all_queues(priv);
- del_timer_sync(&priv->txtimer);
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ del_timer_sync(&priv->tx_queue[chan].txtimer);
/* Free the IRQ lines */
free_irq(dev->irq, dev);
@@ -2936,14 +2950,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_tso_nfrags += nfrags;
/* Manage tx mitigation */
- priv->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
- mod_timer(&priv->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer));
- } else {
- priv->tx_count_frames = 0;
+ tx_q->tx_count_frames += nfrags + 1;
+ if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ tx_q->tx_count_frames = 0;
+ } else {
+ stmmac_tx_timer_arm(priv, queue);
}
skb_tx_timestamp(skb);
@@ -2992,6 +3005,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK;
@@ -3146,14 +3160,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* This approach takes care about the fragments: desc is the first
* element in case of no SG.
*/
- priv->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
- mod_timer(&priv->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer));
- } else {
- priv->tx_count_frames = 0;
+ tx_q->tx_count_frames += nfrags + 1;
+ if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ tx_q->tx_count_frames = 0;
+ } else {
+ stmmac_tx_timer_arm(priv, queue);
}
skb_tx_timestamp(skb);
@@ -3199,6 +3212,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK;
@@ -3319,6 +3334,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
unsigned int entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum;
unsigned int next_entry;
@@ -3491,7 +3507,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&rx_q->napi, skb);
+ napi_gro_receive(&ch->napi, skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
@@ -3514,27 +3530,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* Description :
* To look at the incoming frames and clear the tx resources.
*/
-static int stmmac_poll(struct napi_struct *napi, int budget)
+static int stmmac_napi_poll(struct napi_struct *napi, int budget)
{
- struct stmmac_rx_queue *rx_q =
- container_of(napi, struct stmmac_rx_queue, napi);
- struct stmmac_priv *priv = rx_q->priv_data;
- u32 tx_count = priv->plat->tx_queues_to_use;
- u32 chan = rx_q->queue_index;
- int work_done = 0;
- u32 queue;
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ int work_done = 0, work_rem = budget;
+ u32 chan = ch->index;
priv->xstats.napi_poll++;
- /* check all the queues */
- for (queue = 0; queue < tx_count; queue++)
- stmmac_tx_clean(priv, queue);
+ if (ch->has_tx) {
+ int done = stmmac_tx_clean(priv, work_rem, chan);
- work_done = stmmac_rx(priv, budget, rx_q->queue_index);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+ work_done += done;
+ work_rem -= done;
+ }
+
+ if (ch->has_rx) {
+ int done = stmmac_rx(priv, work_rem, chan);
+
+ work_done += done;
+ work_rem -= done;
}
+
+ if (work_done < budget && napi_complete_done(napi, work_done))
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+
return work_done;
}
@@ -4198,8 +4220,8 @@ int stmmac_dvr_probe(struct device *device,
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
+ u32 queue, maxq;
int ret = 0;
- u32 queue;
ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
MTL_MAX_TX_QUEUES,
@@ -4322,11 +4344,22 @@ int stmmac_dvr_probe(struct device *device,
"Enable RX Mitigation via HW Watchdog Timer\n");
}
- for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ /* Setup channels NAPI */
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
- netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
- (8 * priv->plat->rx_queues_to_use));
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ ch->priv_data = priv;
+ ch->index = queue;
+
+ if (queue < priv->plat->rx_queues_to_use)
+ ch->has_rx = true;
+ if (queue < priv->plat->tx_queues_to_use)
+ ch->has_tx = true;
+
+ netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
+ NAPI_POLL_WEIGHT);
}
mutex_init(&priv->lock);
@@ -4372,10 +4405,10 @@ error_netdev_register:
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
- for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
- netif_napi_del(&rx_q->napi);
+ netif_napi_del(&ch->napi);
}
error_hw_init:
destroy_workqueue(priv->wq);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3609c7b696c7..2b800ce1d5bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
* Description:
* This function validates the number of Unicast address entries supported
* by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
* logic. This function validates a valid, supported configuration is
* selected, and defaults to 1 Unicast address if an unsupported
* configuration is selected.
@@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
int x = ucast_entries;
switch (x) {
- case 1:
- case 32:
+ case 1 ... 32:
case 64:
case 128:
break;
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index d42f47f6c632..644e42c181ee 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -113,7 +113,7 @@ static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* Wrappers to common functions */
-static int vsw_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t vsw_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
return sunvnet_start_xmit_common(skb, dev, vsw_tx_port_find);
}
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index f047b2797156..720b7ac77f3b 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -950,7 +950,8 @@ static void bigmac_tx_timeout(struct net_device *dev)
}
/* Put a packet on the wire. */
-static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bigmac *bp = netdev_priv(dev);
int len, entry;
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 06da2f59fcbf..863fd602fd33 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2999,7 +2999,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
/* Now make sure pci_dev cookie is there. */
#ifdef CONFIG_SPARC
dp = pci_device_to_OF_node(pdev);
- strcpy(prom_name, dp->name);
+ snprintf(prom_name, sizeof(prom_name), "%pOFn", dp);
#else
if (is_quattro_p(pdev))
strcpy(prom_name, "SUNW,qfe");
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 7fe0d5e33922..1468fa0a54e9 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -570,7 +570,7 @@ out:
}
/* Get a packet queued to go onto the wire. */
-static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
struct sunqe_buffers *qbufs = qep->buffers;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 12539b357a78..590172818b92 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -247,7 +247,7 @@ static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* Wrappers to common functions */
-static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
return sunvnet_start_xmit_common(skb, dev, vnet_tx_port_find);
}
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index d8f4c3f28150..baa3088b475c 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1216,9 +1216,10 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
return skb;
}
-static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
- struct vnet_port *(*vnet_tx_port)
- (struct sk_buff *, struct net_device *))
+static netdev_tx_t
+vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
+ struct vnet_port *(*vnet_tx_port)
+ (struct sk_buff *, struct net_device *))
{
struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
@@ -1321,9 +1322,10 @@ out_dropped:
return NETDEV_TX_OK;
}
-int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
- struct vnet_port *(*vnet_tx_port)
- (struct sk_buff *, struct net_device *))
+netdev_tx_t
+sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
+ struct vnet_port *(*vnet_tx_port)
+ (struct sk_buff *, struct net_device *))
{
struct vnet_port *port = NULL;
struct vio_dring_state *dr;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h
index 1ea0b016580a..2b808d2482d6 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.h
+++ b/drivers/net/ethernet/sun/sunvnet_common.h
@@ -136,9 +136,10 @@ int sunvnet_close_common(struct net_device *dev);
void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp);
int sunvnet_set_mac_addr_common(struct net_device *dev, void *p);
void sunvnet_tx_timeout_common(struct net_device *dev);
-int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
- struct vnet_port *(*vnet_tx_port)
- (struct sk_buff *, struct net_device *));
+netdev_tx_t
+sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
+ struct vnet_port *(*vnet_tx_port)
+ (struct sk_buff *, struct net_device *));
#ifdef CONFIG_NET_POLL_CONTROLLER
void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp);
#endif
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 9263d638bd6d..f932923f7d56 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -41,6 +41,7 @@ config TI_DAVINCI_MDIO
config TI_DAVINCI_CPDMA
tristate "TI DaVinci CPDMA Support"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+ select GENERIC_ALLOCATOR
---help---
This driver supports TI's DaVinci CPDMA dma engine.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 832bce07c385..500f7ed8c58c 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -484,13 +484,13 @@ enum {
};
#define CPSW_STAT(m) CPSW_STATS, \
- sizeof(((struct cpsw_hw_stats *)0)->m), \
+ FIELD_SIZEOF(struct cpsw_hw_stats, m), \
offsetof(struct cpsw_hw_stats, m)
#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
- sizeof(((struct cpdma_chan_stats *)0)->m), \
+ FIELD_SIZEOF(struct cpdma_chan_stats, m), \
offsetof(struct cpdma_chan_stats, m)
#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
- sizeof(((struct cpdma_chan_stats *)0)->m), \
+ FIELD_SIZEOF(struct cpdma_chan_stats, m), \
offsetof(struct cpdma_chan_stats, m)
static const struct cpsw_stats cpsw_gstrings_stats[] = {
@@ -570,16 +570,14 @@ static inline int cpsw_get_slave_port(u32 slave_num)
return slave_num + 1;
}
-static void cpsw_add_mcast(struct cpsw_priv *priv, u8 *addr)
+static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr)
{
struct cpsw_common *cpsw = priv->cpsw;
if (cpsw->data.dual_emac) {
struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
- int slave_port = cpsw_get_slave_port(slave->slave_num);
- cpsw_ale_add_mcast(cpsw->ale, addr,
- 1 << slave_port | ALE_PORT_HOST,
+ cpsw_ale_add_mcast(cpsw->ale, addr, ALE_PORT_HOST,
ALE_VLAN, slave->port_vlan, 0);
return;
}
@@ -642,6 +640,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
+ __dev_mc_unsync(ndev, NULL);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -662,16 +661,35 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
}
}
-static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ cpsw_add_mcast(priv, addr);
+ return 0;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
- int vid;
+ int vid, flags;
- if (cpsw->data.dual_emac)
+ if (cpsw->data.dual_emac) {
vid = cpsw->slaves[priv->emac_port].port_vlan;
- else
- vid = cpsw->data.default_vlan;
+ flags = ALE_VLAN;
+ } else {
+ vid = 0;
+ flags = 0;
+ }
+
+ cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+ return 0;
+}
+
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
@@ -684,19 +702,9 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
}
/* Restore allmulti on vlans if necessary */
- cpsw_ale_set_allmulti(cpsw->ale, priv->ndev->flags & IFF_ALLMULTI);
-
- /* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(cpsw->ale, ALE_ALL_PORTS, vid);
+ cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
- if (!netdev_mc_empty(ndev)) {
- struct netdev_hw_addr *ha;
-
- /* program multicast address list into ALE register */
- netdev_for_each_mc_addr(ha, ndev) {
- cpsw_add_mcast(priv, ha->addr);
- }
- }
+ __dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr);
}
static void cpsw_intr_enable(struct cpsw_common *cpsw)
@@ -1410,7 +1418,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
port_mask, port_mask, 0);
cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
- port_mask, ALE_VLAN, slave->port_vlan, 0);
+ ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN |
ALE_SECURE, slave->port_vlan);
@@ -1956,6 +1964,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
struct cpsw_common *cpsw = priv->cpsw;
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
+ __dev_mc_unsync(priv->ndev, cpsw_del_mc_addr);
netif_tx_stop_all_queues(priv->ndev);
netif_carrier_off(priv->ndev);
@@ -2293,16 +2302,19 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
{
int ret;
int unreg_mcast_mask = 0;
+ int mcast_mask;
u32 port_mask;
struct cpsw_common *cpsw = priv->cpsw;
if (cpsw->data.dual_emac) {
port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
+ mcast_mask = ALE_PORT_HOST;
if (priv->ndev->flags & IFF_ALLMULTI)
- unreg_mcast_mask = port_mask;
+ unreg_mcast_mask = mcast_mask;
} else {
port_mask = ALE_ALL_PORTS;
+ mcast_mask = port_mask;
if (priv->ndev->flags & IFF_ALLMULTI)
unreg_mcast_mask = ALE_ALL_PORTS;
@@ -2321,7 +2333,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
goto clean_vid;
ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
- port_mask, ALE_VLAN, vid, 0);
+ mcast_mask, ALE_VLAN, vid, 0);
if (ret != 0)
goto clean_vlan_ucast;
return 0;
@@ -3658,8 +3670,7 @@ static int cpsw_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int cpsw_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
if (cpsw->data.dual_emac) {
@@ -3682,8 +3693,7 @@ static int cpsw_suspend(struct device *dev)
static int cpsw_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
/* Select default pin state */
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 5766225a4ce1..798c989d5d93 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -136,7 +136,7 @@ static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
}
-static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
+static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
{
int i;
@@ -175,7 +175,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
return idx;
}
-static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
+static int cpsw_ale_match_addr(struct cpsw_ale *ale, const u8 *addr, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
@@ -309,7 +309,7 @@ static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
}
}
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -336,7 +336,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
}
EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast);
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -352,7 +352,7 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
}
EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast);
-int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid, int mcast_state)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
@@ -386,7 +386,7 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
}
EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast);
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index d4fe9016429b..cd07a3e96d57 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -105,13 +105,13 @@ void cpsw_ale_start(struct cpsw_ale *ale);
void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid);
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid);
-int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid, int mcast_state);
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid);
int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index f270beebb428..9153db120352 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -2002,8 +2002,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
static int davinci_emac_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
if (netif_running(ndev))
emac_dev_stop(ndev);
@@ -2013,8 +2012,7 @@ static int davinci_emac_suspend(struct device *dev)
static int davinci_emac_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
if (netif_running(ndev))
emac_dev_open(ndev);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index a1d335a3c5e4..1f612268c998 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -225,17 +225,6 @@ static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
return 0;
}
-static const char *netcp_node_name(struct device_node *node)
-{
- const char *name;
-
- if (of_property_read_string(node, "label", &name) < 0)
- name = node->name;
- if (!name)
- name = "unknown";
- return name;
-}
-
/* Module management routines */
static int netcp_register_interface(struct netcp_intf *netcp)
{
@@ -267,8 +256,13 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
}
for_each_available_child_of_node(devices, child) {
- const char *name = netcp_node_name(child);
+ const char *name;
+ char node_name[32];
+ if (of_property_read_string(node, "label", &name) < 0) {
+ snprintf(node_name, sizeof(node_name), "%pOFn", child);
+ name = node_name;
+ }
if (!strcasecmp(module->name, name))
break;
}
@@ -2209,8 +2203,8 @@ static int netcp_probe(struct platform_device *pdev)
for_each_available_child_of_node(interfaces, child) {
ret = netcp_create_interface(netcp_device, child);
if (ret) {
- dev_err(dev, "could not create interface(%s)\n",
- child->name);
+ dev_err(dev, "could not create interface(%pOFn)\n",
+ child);
goto probe_quit_interface;
}
}
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 72b98e27c992..0397ccb6597e 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3137,15 +3137,15 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
for_each_child_of_node(node, port) {
slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
if (!slave) {
- dev_err(dev, "memory alloc failed for secondary port(%s), skipping...\n",
- port->name);
+ dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n",
+ port);
continue;
}
if (init_slave(gbe_dev, slave, port)) {
dev_err(dev,
- "Failed to initialize secondary port(%s), skipping...\n",
- port->name);
+ "Failed to initialize secondary port(%pOFn), skipping...\n",
+ port);
devm_kfree(dev, slave);
continue;
}
@@ -3239,8 +3239,8 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
if (ret) {
dev_err(gbe_dev->dev,
- "Can't xlate xgbe of node(%s) ss address at %d\n",
- node->name, XGBE_SS_REG_INDEX);
+ "Can't xlate xgbe of node(%pOFn) ss address at %d\n",
+ node, XGBE_SS_REG_INDEX);
return ret;
}
@@ -3254,8 +3254,8 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
if (ret) {
dev_err(gbe_dev->dev,
- "Can't xlate xgbe of node(%s) sm address at %d\n",
- node->name, XGBE_SM_REG_INDEX);
+ "Can't xlate xgbe of node(%pOFn) sm address at %d\n",
+ node, XGBE_SM_REG_INDEX);
return ret;
}
@@ -3269,8 +3269,8 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
if (ret) {
dev_err(gbe_dev->dev,
- "Can't xlate xgbe serdes of node(%s) address at %d\n",
- node->name, XGBE_SERDES_REG_INDEX);
+ "Can't xlate xgbe serdes of node(%pOFn) address at %d\n",
+ node, XGBE_SERDES_REG_INDEX);
return ret;
}
@@ -3347,8 +3347,8 @@ static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
if (ret) {
dev_err(gbe_dev->dev,
- "Can't translate of node(%s) of gbe ss address at %d\n",
- node->name, GBE_SS_REG_INDEX);
+ "Can't translate of node(%pOFn) of gbe ss address at %d\n",
+ node, GBE_SS_REG_INDEX);
return ret;
}
@@ -3372,8 +3372,8 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
if (ret) {
dev_err(gbe_dev->dev,
- "Can't translate of gbe node(%s) address at index %d\n",
- node->name, GBE_SGMII34_REG_INDEX);
+ "Can't translate of gbe node(%pOFn) address at index %d\n",
+ node, GBE_SGMII34_REG_INDEX);
return ret;
}
@@ -3388,8 +3388,8 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
if (ret) {
dev_err(gbe_dev->dev,
- "Can't translate of gbe node(%s) address at index %d\n",
- node->name, GBE_SM_REG_INDEX);
+ "Can't translate of gbe node(%pOFn) address at index %d\n",
+ node, GBE_SM_REG_INDEX);
return ret;
}
@@ -3498,8 +3498,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
if (ret) {
dev_err(gbe_dev->dev,
- "Can't translate of gbenu node(%s) addr at index %d\n",
- node->name, GBENU_SM_REG_INDEX);
+ "Can't translate of gbenu node(%pOFn) addr at index %d\n",
+ node, GBENU_SM_REG_INDEX);
return ret;
}
@@ -3642,7 +3642,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
gbe_dev->ss_regs);
} else {
- dev_err(dev, "unknown GBE node(%s)\n", node->name);
+ dev_err(dev, "unknown GBE node(%pOFn)\n", node);
ret = -ENODEV;
}
@@ -3667,8 +3667,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
for_each_child_of_node(interfaces, interface) {
ret = of_property_read_u32(interface, "slave-port", &slave_num);
if (ret) {
- dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
- interface->name);
+ dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n",
+ interface);
continue;
}
gbe_dev->num_slaves++;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 88d74aef218a..75237c81c63d 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -845,9 +845,9 @@ static int gelic_card_kick_txdma(struct gelic_card *card,
* @skb: packet to send out
* @netdev: interface device structure
*
- * returns 0 on success, <0 on failure
+ * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
*/
-int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct gelic_card *card = netdev_card(netdev);
struct gelic_descr *descr;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 003d0452d9cb..fbbf9b54b173 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -370,7 +370,7 @@ void gelic_card_up(struct gelic_card *card);
void gelic_card_down(struct gelic_card *card);
int gelic_net_open(struct net_device *netdev);
int gelic_net_stop(struct net_device *netdev);
-int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
void gelic_net_set_multi(struct net_device *netdev);
void gelic_net_tx_timeout(struct net_device *netdev);
int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 302079e22b06..00ab417694ad 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1094,7 +1094,7 @@ static int gelic_wl_get_encode(struct net_device *netdev,
struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
struct iw_point *enc = &data->encoding;
unsigned long irqflag;
- unsigned int key_index, index_specified;
+ unsigned int key_index;
int ret = 0;
pr_debug("%s: <-\n", __func__);
@@ -1105,13 +1105,10 @@ static int gelic_wl_get_encode(struct net_device *netdev,
return -EINVAL;
spin_lock_irqsave(&wl->lock, irqflag);
- if (key_index) {
- index_specified = 1;
+ if (key_index)
key_index--;
- } else {
- index_specified = 0;
+ else
key_index = wl->current_key;
- }
if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
switch (wl->auth_method) {
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index d925b8203996..23417266b7ec 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -880,9 +880,9 @@ out:
* @skb: packet to send out
* @netdev: interface device structure
*
- * returns 0 on success, !0 on failure
+ * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
*/
-static int
+static netdev_tx_t
spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
int cnt;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index cce9c9ed46aa..6a71c2c0f17d 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -474,7 +474,8 @@ static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_
/* Index to functions, as function prototypes. */
static int tc35815_open(struct net_device *dev);
-static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t tc35815_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
static irqreturn_t tc35815_interrupt(int irq, void *dev_id);
static int tc35815_rx(struct net_device *dev, int limit);
static int tc35815_poll(struct napi_struct *napi, int budget);
@@ -628,7 +629,7 @@ static int tc_mii_probe(struct net_device *dev)
phy_attached_info(phydev);
/* mask with MAC supported features */
- phydev->supported &= PHY_BASIC_FEATURES;
+ phy_set_max_speed(phydev, SPEED_100);
dropmask = 0;
if (options.speed == 10)
dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
@@ -1248,7 +1249,8 @@ tc35815_open(struct net_device *dev)
* invariant will hold if you make sure that the netif_*_queue()
* calls are done at the proper times.
*/
-static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
struct TxFD *txfd;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 2bdfb39215e9..d8ba512f166a 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work)
w5100_tx_skb(priv->ndev, skb);
}
-static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
{
struct w5100_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 56ae573001e8..f9da5d6172e3 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
netif_wake_queue(ndev);
}
-static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
{
struct w5300_priv *priv = netdev_priv(ndev);
@@ -661,8 +661,7 @@ static int w5300_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int w5300_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5300_priv *priv = netdev_priv(ndev);
if (netif_running(ndev)) {
@@ -676,8 +675,7 @@ static int w5300_suspend(struct device *dev)
static int w5300_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5300_priv *priv = netdev_priv(ndev);
if (!netif_running(ndev)) {
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 60abc9250f56..2241f9897092 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -674,7 +674,8 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
return 0;
}
-static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t
+temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index f24f48f33802..12a14609ec47 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -653,7 +653,8 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
* start the transmission. Additionally if checksum offloading is supported,
* it populates AXI Stream Control fields with appropriate values.
*/
-static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t
+axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
u32 ii;
u32 num_frag;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 42f1f518dad6..639e3e99af46 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -941,8 +941,7 @@ static int xemaclite_open(struct net_device *dev)
}
/* EmacLite doesn't support giga-bit speeds */
- lp->phy_dev->supported &= (PHY_BASIC_FEATURES);
- lp->phy_dev->advertising = lp->phy_dev->supported;
+ phy_set_max_speed(lp->phy_dev, SPEED_100);
/* Don't advertise 1000BASE-T Full/Half duplex speeds */
phy_write(lp->phy_dev, MII_CTRL1000, 0);
@@ -1020,9 +1019,10 @@ static int xemaclite_close(struct net_device *dev)
* deferred and the Tx queue is stopped so that the deferred socket buffer can
* be transmitted when the Emaclite device is free to transmit data.
*
- * Return: 0, always.
+ * Return: NETDEV_TX_OK, always.
*/
-static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
+static netdev_tx_t
+xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
struct sk_buff *new_skb;
@@ -1044,7 +1044,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
/* Take the time stamp now, since we can't do this in an ISR. */
skb_tx_timestamp(new_skb);
spin_unlock_irqrestore(&lp->reset_lock, flags);
- return 0;
+ return NETDEV_TX_OK;
}
spin_unlock_irqrestore(&lp->reset_lock, flags);
@@ -1053,7 +1053,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
dev->stats.tx_bytes += len;
dev_consume_skb_any(new_skb);
- return 0;
+ return NETDEV_TX_OK;
}
/**
diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
index 3a424c864f4d..d62e8c6205f7 100644
--- a/drivers/net/fddi/Kconfig
+++ b/drivers/net/fddi/Kconfig
@@ -15,6 +15,17 @@ config FDDI
if FDDI
+config DEFZA
+ tristate "DEC FDDIcontroller 700/700-C (DEFZA-xx) support"
+ depends on FDDI && TC
+ help
+ This is support for the DEC FDDIcontroller 700 (DEFZA-AA, fiber)
+ and 700-C (DEFZA-CA, copper) TURBOchannel network cards which
+ can connect you to a local FDDI network.
+
+ To compile this driver as a module, choose M here: the module
+ will be called defza. If unsure, say N.
+
config DEFXX
tristate "Digital DEFTA/DEFEA/DEFPA adapter support"
depends on FDDI && (PCI || EISA || TC)
diff --git a/drivers/net/fddi/Makefile b/drivers/net/fddi/Makefile
index 36da19c9a8aa..194b52cc20b0 100644
--- a/drivers/net/fddi/Makefile
+++ b/drivers/net/fddi/Makefile
@@ -3,4 +3,5 @@
#
obj-$(CONFIG_DEFXX) += defxx.o
+obj-$(CONFIG_DEFZA) += defza.o
obj-$(CONFIG_SKFP) += skfp/
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
new file mode 100644
index 000000000000..3b7f10a5f06a
--- /dev/null
+++ b/drivers/net/fddi/defza.c
@@ -0,0 +1,1564 @@
+// SPDX-License-Identifier: GPL-2.0
+/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
+ *
+ * Copyright (c) 2018 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * References:
+ *
+ * Dave Sawyer & Phil Weeks & Frank Itkowsky,
+ * "DEC FDDIcontroller 700 Port Specification",
+ * Revision 1.1, Digital Equipment Corporation
+ */
+
+/* ------------------------------------------------------------------------- */
+/* FZA configurable parameters. */
+
+/* The number of transmit ring descriptors; either 0 for 512 or 1 for 1024. */
+#define FZA_RING_TX_MODE 0
+
+/* The number of receive ring descriptors; from 2 up to 256. */
+#define FZA_RING_RX_SIZE 256
+
+/* End of FZA configurable parameters. No need to change anything below. */
+/* ------------------------------------------------------------------------- */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/tc.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <asm/barrier.h>
+
+#include "defza.h"
+
+#define DRV_NAME "defza"
+#define DRV_VERSION "v.1.1.4"
+#define DRV_RELDATE "Oct 6 2018"
+
+static char version[] =
+ DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Maciej W. Rozycki\n";
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver");
+MODULE_LICENSE("GPL");
+
+static int loopback;
+module_param(loopback, int, 0644);
+
+/* Ring Purger Multicast */
+static u8 hw_addr_purger[8] = { 0x09, 0x00, 0x2b, 0x02, 0x01, 0x05 };
+/* Directed Beacon Multicast */
+static u8 hw_addr_beacon[8] = { 0x01, 0x80, 0xc2, 0x00, 0x01, 0x00 };
+
+/* Shorthands for MMIO accesses that we require to be strongly ordered
+ * WRT preceding MMIO accesses.
+ */
+#define readw_o readw_relaxed
+#define readl_o readl_relaxed
+
+#define writew_o writew_relaxed
+#define writel_o writel_relaxed
+
+/* Shorthands for MMIO accesses that we are happy with being weakly ordered
+ * WRT preceding MMIO accesses.
+ */
+#define readw_u readw_relaxed
+#define readl_u readl_relaxed
+#define readq_u readq_relaxed
+
+#define writew_u writew_relaxed
+#define writel_u writel_relaxed
+#define writeq_u writeq_relaxed
+
+static inline struct sk_buff *fza_alloc_skb_irq(struct net_device *dev,
+ unsigned int length)
+{
+ return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
+}
+
+static inline struct sk_buff *fza_alloc_skb(struct net_device *dev,
+ unsigned int length)
+{
+ return __netdev_alloc_skb(dev, length, GFP_KERNEL);
+}
+
+static inline void fza_skb_align(struct sk_buff *skb, unsigned int v)
+{
+ unsigned long x, y;
+
+ x = (unsigned long)skb->data;
+ y = ALIGN(x, v);
+
+ skb_reserve(skb, y - x);
+}
+
+static inline void fza_reads(const void __iomem *from, void *to,
+ unsigned long size)
+{
+ if (sizeof(unsigned long) == 8) {
+ const u64 __iomem *src = from;
+ const u32 __iomem *src_trail;
+ u64 *dst = to;
+ u32 *dst_trail;
+
+ for (size = (size + 3) / 4; size > 1; size -= 2)
+ *dst++ = readq_u(src++);
+ if (size) {
+ src_trail = (u32 __iomem *)src;
+ dst_trail = (u32 *)dst;
+ *dst_trail = readl_u(src_trail);
+ }
+ } else {
+ const u32 __iomem *src = from;
+ u32 *dst = to;
+
+ for (size = (size + 3) / 4; size; size--)
+ *dst++ = readl_u(src++);
+ }
+}
+
+static inline void fza_writes(const void *from, void __iomem *to,
+ unsigned long size)
+{
+ if (sizeof(unsigned long) == 8) {
+ const u64 *src = from;
+ const u32 *src_trail;
+ u64 __iomem *dst = to;
+ u32 __iomem *dst_trail;
+
+ for (size = (size + 3) / 4; size > 1; size -= 2)
+ writeq_u(*src++, dst++);
+ if (size) {
+ src_trail = (u32 *)src;
+ dst_trail = (u32 __iomem *)dst;
+ writel_u(*src_trail, dst_trail);
+ }
+ } else {
+ const u32 *src = from;
+ u32 __iomem *dst = to;
+
+ for (size = (size + 3) / 4; size; size--)
+ writel_u(*src++, dst++);
+ }
+}
+
+static inline void fza_moves(const void __iomem *from, void __iomem *to,
+ unsigned long size)
+{
+ if (sizeof(unsigned long) == 8) {
+ const u64 __iomem *src = from;
+ const u32 __iomem *src_trail;
+ u64 __iomem *dst = to;
+ u32 __iomem *dst_trail;
+
+ for (size = (size + 3) / 4; size > 1; size -= 2)
+ writeq_u(readq_u(src++), dst++);
+ if (size) {
+ src_trail = (u32 __iomem *)src;
+ dst_trail = (u32 __iomem *)dst;
+ writel_u(readl_u(src_trail), dst_trail);
+ }
+ } else {
+ const u32 __iomem *src = from;
+ u32 __iomem *dst = to;
+
+ for (size = (size + 3) / 4; size; size--)
+ writel_u(readl_u(src++), dst++);
+ }
+}
+
+static inline void fza_zeros(void __iomem *to, unsigned long size)
+{
+ if (sizeof(unsigned long) == 8) {
+ u64 __iomem *dst = to;
+ u32 __iomem *dst_trail;
+
+ for (size = (size + 3) / 4; size > 1; size -= 2)
+ writeq_u(0, dst++);
+ if (size) {
+ dst_trail = (u32 __iomem *)dst;
+ writel_u(0, dst_trail);
+ }
+ } else {
+ u32 __iomem *dst = to;
+
+ for (size = (size + 3) / 4; size; size--)
+ writel_u(0, dst++);
+ }
+}
+
+static inline void fza_regs_dump(struct fza_private *fp)
+{
+ pr_debug("%s: iomem registers:\n", fp->name);
+ pr_debug(" reset: 0x%04x\n", readw_o(&fp->regs->reset));
+ pr_debug(" interrupt event: 0x%04x\n", readw_u(&fp->regs->int_event));
+ pr_debug(" status: 0x%04x\n", readw_u(&fp->regs->status));
+ pr_debug(" interrupt mask: 0x%04x\n", readw_u(&fp->regs->int_mask));
+ pr_debug(" control A: 0x%04x\n", readw_u(&fp->regs->control_a));
+ pr_debug(" control B: 0x%04x\n", readw_u(&fp->regs->control_b));
+}
+
+static inline void fza_do_reset(struct fza_private *fp)
+{
+ /* Reset the board. */
+ writew_o(FZA_RESET_INIT, &fp->regs->reset);
+ readw_o(&fp->regs->reset); /* Synchronize. */
+ readw_o(&fp->regs->reset); /* Read it back for a small delay. */
+ writew_o(FZA_RESET_CLR, &fp->regs->reset);
+
+ /* Enable all interrupt events we handle. */
+ writew_o(fp->int_mask, &fp->regs->int_mask);
+ readw_o(&fp->regs->int_mask); /* Synchronize. */
+}
+
+static inline void fza_do_shutdown(struct fza_private *fp)
+{
+ /* Disable the driver mode. */
+ writew_o(FZA_CONTROL_B_IDLE, &fp->regs->control_b);
+
+ /* And reset the board. */
+ writew_o(FZA_RESET_INIT, &fp->regs->reset);
+ readw_o(&fp->regs->reset); /* Synchronize. */
+ writew_o(FZA_RESET_CLR, &fp->regs->reset);
+ readw_o(&fp->regs->reset); /* Synchronize. */
+}
+
+static int fza_reset(struct fza_private *fp)
+{
+ unsigned long flags;
+ uint status, state;
+ long t;
+
+ pr_info("%s: resetting the board...\n", fp->name);
+
+ spin_lock_irqsave(&fp->lock, flags);
+ fp->state_chg_flag = 0;
+ fza_do_reset(fp);
+ spin_unlock_irqrestore(&fp->lock, flags);
+
+ /* DEC says RESET needs up to 30 seconds to complete. My DEFZA-AA
+ * rev. C03 happily finishes in 9.7 seconds. :-) But we need to
+ * be on the safe side...
+ */
+ t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
+ 45 * HZ);
+ status = readw_u(&fp->regs->status);
+ state = FZA_STATUS_GET_STATE(status);
+ if (fp->state_chg_flag == 0) {
+ pr_err("%s: RESET timed out!, state %x\n", fp->name, state);
+ return -EIO;
+ }
+ if (state != FZA_STATE_UNINITIALIZED) {
+ pr_err("%s: RESET failed!, state %x, failure ID %x\n",
+ fp->name, state, FZA_STATUS_GET_TEST(status));
+ return -EIO;
+ }
+ pr_info("%s: OK\n", fp->name);
+ pr_debug("%s: RESET: %lums elapsed\n", fp->name,
+ (45 * HZ - t) * 1000 / HZ);
+
+ return 0;
+}
+
+static struct fza_ring_cmd __iomem *fza_cmd_send(struct net_device *dev,
+ int command)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_ring_cmd __iomem *ring = fp->ring_cmd + fp->ring_cmd_index;
+ unsigned int old_mask, new_mask;
+ union fza_cmd_buf __iomem *buf;
+ struct netdev_hw_addr *ha;
+ int i;
+
+ old_mask = fp->int_mask;
+ new_mask = old_mask & ~FZA_MASK_STATE_CHG;
+ writew_u(new_mask, &fp->regs->int_mask);
+ readw_o(&fp->regs->int_mask); /* Synchronize. */
+ fp->int_mask = new_mask;
+
+ buf = fp->mmio + readl_u(&ring->buffer);
+
+ if ((readl_u(&ring->cmd_own) & FZA_RING_OWN_MASK) !=
+ FZA_RING_OWN_HOST) {
+ pr_warn("%s: command buffer full, command: %u!\n", fp->name,
+ command);
+ return NULL;
+ }
+
+ switch (command) {
+ case FZA_RING_CMD_INIT:
+ writel_u(FZA_RING_TX_MODE, &buf->init.tx_mode);
+ writel_u(FZA_RING_RX_SIZE, &buf->init.hst_rx_size);
+ fza_zeros(&buf->init.counters, sizeof(buf->init.counters));
+ break;
+
+ case FZA_RING_CMD_MODCAM:
+ i = 0;
+ fza_writes(&hw_addr_purger, &buf->cam.hw_addr[i++],
+ sizeof(*buf->cam.hw_addr));
+ fza_writes(&hw_addr_beacon, &buf->cam.hw_addr[i++],
+ sizeof(*buf->cam.hw_addr));
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i >= FZA_CMD_CAM_SIZE)
+ break;
+ fza_writes(ha->addr, &buf->cam.hw_addr[i++],
+ sizeof(*buf->cam.hw_addr));
+ }
+ while (i < FZA_CMD_CAM_SIZE)
+ fza_zeros(&buf->cam.hw_addr[i++],
+ sizeof(*buf->cam.hw_addr));
+ break;
+
+ case FZA_RING_CMD_PARAM:
+ writel_u(loopback, &buf->param.loop_mode);
+ writel_u(fp->t_max, &buf->param.t_max);
+ writel_u(fp->t_req, &buf->param.t_req);
+ writel_u(fp->tvx, &buf->param.tvx);
+ writel_u(fp->lem_threshold, &buf->param.lem_threshold);
+ fza_writes(&fp->station_id, &buf->param.station_id,
+ sizeof(buf->param.station_id));
+ /* Convert to milliseconds due to buggy firmware. */
+ writel_u(fp->rtoken_timeout / 12500,
+ &buf->param.rtoken_timeout);
+ writel_u(fp->ring_purger, &buf->param.ring_purger);
+ break;
+
+ case FZA_RING_CMD_MODPROM:
+ if (dev->flags & IFF_PROMISC) {
+ writel_u(1, &buf->modprom.llc_prom);
+ writel_u(1, &buf->modprom.smt_prom);
+ } else {
+ writel_u(0, &buf->modprom.llc_prom);
+ writel_u(0, &buf->modprom.smt_prom);
+ }
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > FZA_CMD_CAM_SIZE - 2)
+ writel_u(1, &buf->modprom.llc_multi);
+ else
+ writel_u(0, &buf->modprom.llc_multi);
+ writel_u(1, &buf->modprom.llc_bcast);
+ break;
+ }
+
+ /* Trigger the command. */
+ writel_u(FZA_RING_OWN_FZA | command, &ring->cmd_own);
+ writew_o(FZA_CONTROL_A_CMD_POLL, &fp->regs->control_a);
+
+ fp->ring_cmd_index = (fp->ring_cmd_index + 1) % FZA_RING_CMD_SIZE;
+
+ fp->int_mask = old_mask;
+ writew_u(fp->int_mask, &fp->regs->int_mask);
+
+ return ring;
+}
+
+static int fza_init_send(struct net_device *dev,
+ struct fza_cmd_init *__iomem *init)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_ring_cmd __iomem *ring;
+ unsigned long flags;
+ u32 stat;
+ long t;
+
+ spin_lock_irqsave(&fp->lock, flags);
+ fp->cmd_done_flag = 0;
+ ring = fza_cmd_send(dev, FZA_RING_CMD_INIT);
+ spin_unlock_irqrestore(&fp->lock, flags);
+ if (!ring)
+ /* This should never happen in the uninitialized state,
+ * so do not try to recover and just consider it fatal.
+ */
+ return -ENOBUFS;
+
+ /* INIT may take quite a long time (160ms for my C03). */
+ t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
+ if (fp->cmd_done_flag == 0) {
+ pr_err("%s: INIT command timed out!, state %x\n", fp->name,
+ FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
+ return -EIO;
+ }
+ stat = readl_u(&ring->stat);
+ if (stat != FZA_RING_STAT_SUCCESS) {
+ pr_err("%s: INIT command failed!, status %02x, state %x\n",
+ fp->name, stat,
+ FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
+ return -EIO;
+ }
+ pr_debug("%s: INIT: %lums elapsed\n", fp->name,
+ (3 * HZ - t) * 1000 / HZ);
+
+ if (init)
+ *init = fp->mmio + readl_u(&ring->buffer);
+ return 0;
+}
+
+static void fza_rx_init(struct fza_private *fp)
+{
+ int i;
+
+ /* Fill the host receive descriptor ring. */
+ for (i = 0; i < FZA_RING_RX_SIZE; i++) {
+ writel_o(0, &fp->ring_hst_rx[i].rmc);
+ writel_o((fp->rx_dma[i] + 0x1000) >> 9,
+ &fp->ring_hst_rx[i].buffer1);
+ writel_o(fp->rx_dma[i] >> 9 | FZA_RING_OWN_FZA,
+ &fp->ring_hst_rx[i].buf0_own);
+ }
+}
+
+static void fza_set_rx_mode(struct net_device *dev)
+{
+ fza_cmd_send(dev, FZA_RING_CMD_MODCAM);
+ fza_cmd_send(dev, FZA_RING_CMD_MODPROM);
+}
+
+union fza_buffer_txp {
+ struct fza_buffer_tx *data_ptr;
+ struct fza_buffer_tx __iomem *mmio_ptr;
+};
+
+static int fza_do_xmit(union fza_buffer_txp ub, int len,
+ struct net_device *dev, int smt)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_buffer_tx __iomem *rmc_tx_ptr;
+ int i, first, frag_len, left_len;
+ u32 own, rmc;
+
+ if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
+ fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
+ FZA_TX_BUFFER_SIZE) < len)
+ return 1;
+
+ first = fp->ring_rmc_tx_index;
+
+ left_len = len;
+ frag_len = FZA_TX_BUFFER_SIZE;
+ /* First descriptor is relinquished last. */
+ own = FZA_RING_TX_OWN_HOST;
+ /* First descriptor carries frame length; we don't use cut-through. */
+ rmc = FZA_RING_TX_SOP | FZA_RING_TX_VBC | len;
+ do {
+ i = fp->ring_rmc_tx_index;
+ rmc_tx_ptr = &fp->buffer_tx[i];
+
+ if (left_len < FZA_TX_BUFFER_SIZE)
+ frag_len = left_len;
+ left_len -= frag_len;
+
+ /* Length must be a multiple of 4 as only word writes are
+ * permitted!
+ */
+ frag_len = (frag_len + 3) & ~3;
+ if (smt)
+ fza_moves(ub.mmio_ptr, rmc_tx_ptr, frag_len);
+ else
+ fza_writes(ub.data_ptr, rmc_tx_ptr, frag_len);
+
+ if (left_len == 0)
+ rmc |= FZA_RING_TX_EOP; /* Mark last frag. */
+
+ writel_o(rmc, &fp->ring_rmc_tx[i].rmc);
+ writel_o(own, &fp->ring_rmc_tx[i].own);
+
+ ub.data_ptr++;
+ fp->ring_rmc_tx_index = (fp->ring_rmc_tx_index + 1) %
+ fp->ring_rmc_tx_size;
+
+ /* Settings for intermediate frags. */
+ own = FZA_RING_TX_OWN_RMC;
+ rmc = 0;
+ } while (left_len > 0);
+
+ if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
+ fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
+ FZA_TX_BUFFER_SIZE) < dev->mtu + dev->hard_header_len) {
+ netif_stop_queue(dev);
+ pr_debug("%s: queue stopped\n", fp->name);
+ }
+
+ writel_o(FZA_RING_TX_OWN_RMC, &fp->ring_rmc_tx[first].own);
+
+ /* Go, go, go! */
+ writew_o(FZA_CONTROL_A_TX_POLL, &fp->regs->control_a);
+
+ return 0;
+}
+
+static int fza_do_recv_smt(struct fza_buffer_tx *data_ptr, int len,
+ u32 rmc, struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_buffer_tx __iomem *smt_rx_ptr;
+ u32 own;
+ int i;
+
+ i = fp->ring_smt_rx_index;
+ own = readl_o(&fp->ring_smt_rx[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
+ return 1;
+
+ smt_rx_ptr = fp->mmio + readl_u(&fp->ring_smt_rx[i].buffer);
+
+ /* Length must be a multiple of 4 as only word writes are permitted! */
+ fza_writes(data_ptr, smt_rx_ptr, (len + 3) & ~3);
+
+ writel_o(rmc, &fp->ring_smt_rx[i].rmc);
+ writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_rx[i].own);
+
+ fp->ring_smt_rx_index =
+ (fp->ring_smt_rx_index + 1) % fp->ring_smt_rx_size;
+
+ /* Grab it! */
+ writew_o(FZA_CONTROL_A_SMT_RX_POLL, &fp->regs->control_a);
+
+ return 0;
+}
+
+static void fza_tx(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ u32 own, rmc;
+ int i;
+
+ while (1) {
+ i = fp->ring_rmc_txd_index;
+ if (i == fp->ring_rmc_tx_index)
+ break;
+ own = readl_o(&fp->ring_rmc_tx[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC)
+ break;
+
+ rmc = readl_u(&fp->ring_rmc_tx[i].rmc);
+ /* Only process the first descriptor. */
+ if ((rmc & FZA_RING_TX_SOP) != 0) {
+ if ((rmc & FZA_RING_TX_DCC_MASK) ==
+ FZA_RING_TX_DCC_SUCCESS) {
+ int pkt_len = (rmc & FZA_RING_PBC_MASK) - 3;
+ /* Omit PRH. */
+
+ fp->stats.tx_packets++;
+ fp->stats.tx_bytes += pkt_len;
+ } else {
+ fp->stats.tx_errors++;
+ switch (rmc & FZA_RING_TX_DCC_MASK) {
+ case FZA_RING_TX_DCC_DTP_SOP:
+ case FZA_RING_TX_DCC_DTP:
+ case FZA_RING_TX_DCC_ABORT:
+ fp->stats.tx_aborted_errors++;
+ break;
+ case FZA_RING_TX_DCC_UNDRRUN:
+ fp->stats.tx_fifo_errors++;
+ break;
+ case FZA_RING_TX_DCC_PARITY:
+ default:
+ break;
+ }
+ }
+ }
+
+ fp->ring_rmc_txd_index = (fp->ring_rmc_txd_index + 1) %
+ fp->ring_rmc_tx_size;
+ }
+
+ if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
+ fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
+ FZA_TX_BUFFER_SIZE) >= dev->mtu + dev->hard_header_len) {
+ if (fp->queue_active) {
+ netif_wake_queue(dev);
+ pr_debug("%s: queue woken\n", fp->name);
+ }
+ }
+}
+
+static inline int fza_rx_err(struct fza_private *fp,
+ const u32 rmc, const u8 fc)
+{
+ int len, min_len, max_len;
+
+ len = rmc & FZA_RING_PBC_MASK;
+
+ if (unlikely((rmc & FZA_RING_RX_BAD) != 0)) {
+ fp->stats.rx_errors++;
+
+ /* Check special status codes. */
+ if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK |
+ FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) ==
+ (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR |
+ FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_ALIAS)) {
+ if (len >= 8190)
+ fp->stats.rx_length_errors++;
+ return 1;
+ }
+ if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK |
+ FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) ==
+ (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR |
+ FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_CAM)) {
+ /* Halt the interface to trigger a reset. */
+ writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a);
+ readw_o(&fp->regs->control_a); /* Synchronize. */
+ return 1;
+ }
+
+ /* Check the MAC status. */
+ switch (rmc & FZA_RING_RX_RRR_MASK) {
+ case FZA_RING_RX_RRR_OK:
+ if ((rmc & FZA_RING_RX_CRC) != 0)
+ fp->stats.rx_crc_errors++;
+ else if ((rmc & FZA_RING_RX_FSC_MASK) == 0 ||
+ (rmc & FZA_RING_RX_FSB_ERR) != 0)
+ fp->stats.rx_frame_errors++;
+ return 1;
+ case FZA_RING_RX_RRR_SADDR:
+ case FZA_RING_RX_RRR_DADDR:
+ case FZA_RING_RX_RRR_ABORT:
+ /* Halt the interface to trigger a reset. */
+ writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a);
+ readw_o(&fp->regs->control_a); /* Synchronize. */
+ return 1;
+ case FZA_RING_RX_RRR_LENGTH:
+ fp->stats.rx_frame_errors++;
+ return 1;
+ default:
+ return 1;
+ }
+ }
+
+ /* Packet received successfully; validate the length. */
+ switch (fc & FDDI_FC_K_FORMAT_MASK) {
+ case FDDI_FC_K_FORMAT_MANAGEMENT:
+ if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_ASYNC)
+ min_len = 37;
+ else
+ min_len = 17;
+ break;
+ case FDDI_FC_K_FORMAT_LLC:
+ min_len = 20;
+ break;
+ default:
+ min_len = 17;
+ break;
+ }
+ max_len = 4495;
+ if (len < min_len || len > max_len) {
+ fp->stats.rx_errors++;
+ fp->stats.rx_length_errors++;
+ return 1;
+ }
+
+ return 0;
+}
+
+static void fza_rx(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct sk_buff *skb, *newskb;
+ struct fza_fddihdr *frame;
+ dma_addr_t dma, newdma;
+ u32 own, rmc, buf;
+ int i, len;
+ u8 fc;
+
+ while (1) {
+ i = fp->ring_hst_rx_index;
+ own = readl_o(&fp->ring_hst_rx[i].buf0_own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
+ break;
+
+ rmc = readl_u(&fp->ring_hst_rx[i].rmc);
+ skb = fp->rx_skbuff[i];
+ dma = fp->rx_dma[i];
+
+ /* The RMC doesn't count the preamble and the starting
+ * delimiter. We fix it up here for a total of 3 octets.
+ */
+ dma_rmb();
+ len = (rmc & FZA_RING_PBC_MASK) + 3;
+ frame = (struct fza_fddihdr *)skb->data;
+
+ /* We need to get at real FC. */
+ dma_sync_single_for_cpu(fp->bdev,
+ dma +
+ ((u8 *)&frame->hdr.fc - (u8 *)frame),
+ sizeof(frame->hdr.fc),
+ DMA_FROM_DEVICE);
+ fc = frame->hdr.fc;
+
+ if (fza_rx_err(fp, rmc, fc))
+ goto err_rx;
+
+ /* We have to 512-byte-align RX buffers... */
+ newskb = fza_alloc_skb_irq(dev, FZA_RX_BUFFER_SIZE + 511);
+ if (newskb) {
+ fza_skb_align(newskb, 512);
+ newdma = dma_map_single(fp->bdev, newskb->data,
+ FZA_RX_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(fp->bdev, newdma)) {
+ dev_kfree_skb_irq(newskb);
+ newskb = NULL;
+ }
+ }
+ if (newskb) {
+ int pkt_len = len - 7; /* Omit P, SD and FCS. */
+ int is_multi;
+ int rx_stat;
+
+ dma_unmap_single(fp->bdev, dma, FZA_RX_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+
+ /* Queue SMT frames to the SMT receive ring. */
+ if ((fc & (FDDI_FC_K_CLASS_MASK |
+ FDDI_FC_K_FORMAT_MASK)) ==
+ (FDDI_FC_K_CLASS_ASYNC |
+ FDDI_FC_K_FORMAT_MANAGEMENT) &&
+ (rmc & FZA_RING_RX_DA_MASK) !=
+ FZA_RING_RX_DA_PROM) {
+ if (fza_do_recv_smt((struct fza_buffer_tx *)
+ skb->data, len, rmc,
+ dev)) {
+ writel_o(FZA_CONTROL_A_SMT_RX_OVFL,
+ &fp->regs->control_a);
+ }
+ }
+
+ is_multi = ((frame->hdr.daddr[0] & 0x01) != 0);
+
+ skb_reserve(skb, 3); /* Skip over P and SD. */
+ skb_put(skb, pkt_len); /* And cut off FCS. */
+ skb->protocol = fddi_type_trans(skb, dev);
+
+ rx_stat = netif_rx(skb);
+ if (rx_stat != NET_RX_DROP) {
+ fp->stats.rx_packets++;
+ fp->stats.rx_bytes += pkt_len;
+ if (is_multi)
+ fp->stats.multicast++;
+ } else {
+ fp->stats.rx_dropped++;
+ }
+
+ skb = newskb;
+ dma = newdma;
+ fp->rx_skbuff[i] = skb;
+ fp->rx_dma[i] = dma;
+ } else {
+ fp->stats.rx_dropped++;
+ pr_notice("%s: memory squeeze, dropping packet\n",
+ fp->name);
+ }
+
+err_rx:
+ writel_o(0, &fp->ring_hst_rx[i].rmc);
+ buf = (dma + 0x1000) >> 9;
+ writel_o(buf, &fp->ring_hst_rx[i].buffer1);
+ buf = dma >> 9 | FZA_RING_OWN_FZA;
+ writel_o(buf, &fp->ring_hst_rx[i].buf0_own);
+ fp->ring_hst_rx_index =
+ (fp->ring_hst_rx_index + 1) % fp->ring_hst_rx_size;
+ }
+}
+
+static void fza_tx_smt(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr;
+ int i, len;
+ u32 own;
+
+ while (1) {
+ i = fp->ring_smt_tx_index;
+ own = readl_o(&fp->ring_smt_tx[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
+ break;
+
+ smt_tx_ptr = fp->mmio + readl_u(&fp->ring_smt_tx[i].buffer);
+ len = readl_u(&fp->ring_smt_tx[i].rmc) & FZA_RING_PBC_MASK;
+
+ if (!netif_queue_stopped(dev)) {
+ if (dev_nit_active(dev)) {
+ struct sk_buff *skb;
+
+ /* Length must be a multiple of 4 as only word
+ * reads are permitted!
+ */
+ skb = fza_alloc_skb_irq(dev, (len + 3) & ~3);
+ if (!skb)
+ goto err_no_skb; /* Drop. */
+
+ skb_data_ptr = (struct fza_buffer_tx *)
+ skb->data;
+
+ fza_reads(smt_tx_ptr, skb_data_ptr,
+ (len + 3) & ~3);
+ skb->dev = dev;
+ skb_reserve(skb, 3); /* Skip over PRH. */
+ skb_put(skb, len - 3);
+ skb_reset_network_header(skb);
+
+ dev_queue_xmit_nit(skb, dev);
+
+ dev_kfree_skb_irq(skb);
+
+err_no_skb:
+ ;
+ }
+
+ /* Queue the frame to the RMC transmit ring. */
+ fza_do_xmit((union fza_buffer_txp)
+ { .mmio_ptr = smt_tx_ptr },
+ len, dev, 1);
+ }
+
+ writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own);
+ fp->ring_smt_tx_index =
+ (fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size;
+ }
+}
+
+static void fza_uns(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ u32 own;
+ int i;
+
+ while (1) {
+ i = fp->ring_uns_index;
+ own = readl_o(&fp->ring_uns[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
+ break;
+
+ if (readl_u(&fp->ring_uns[i].id) == FZA_RING_UNS_RX_OVER) {
+ fp->stats.rx_errors++;
+ fp->stats.rx_over_errors++;
+ }
+
+ writel_o(FZA_RING_OWN_FZA, &fp->ring_uns[i].own);
+ fp->ring_uns_index =
+ (fp->ring_uns_index + 1) % FZA_RING_UNS_SIZE;
+ }
+}
+
+static void fza_tx_flush(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ u32 own;
+ int i;
+
+ /* Clean up the SMT TX ring. */
+ i = fp->ring_smt_tx_index;
+ do {
+ writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own);
+ fp->ring_smt_tx_index =
+ (fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size;
+
+ } while (i != fp->ring_smt_tx_index);
+
+ /* Clean up the RMC TX ring. */
+ i = fp->ring_rmc_tx_index;
+ do {
+ own = readl_o(&fp->ring_rmc_tx[i].own);
+ if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC) {
+ u32 rmc = readl_u(&fp->ring_rmc_tx[i].rmc);
+
+ writel_u(rmc | FZA_RING_TX_DTP,
+ &fp->ring_rmc_tx[i].rmc);
+ }
+ fp->ring_rmc_tx_index =
+ (fp->ring_rmc_tx_index + 1) % fp->ring_rmc_tx_size;
+
+ } while (i != fp->ring_rmc_tx_index);
+
+ /* Done. */
+ writew_o(FZA_CONTROL_A_FLUSH_DONE, &fp->regs->control_a);
+}
+
+static irqreturn_t fza_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct fza_private *fp = netdev_priv(dev);
+ uint int_event;
+
+ /* Get interrupt events. */
+ int_event = readw_o(&fp->regs->int_event) & fp->int_mask;
+ if (int_event == 0)
+ return IRQ_NONE;
+
+ /* Clear the events. */
+ writew_u(int_event, &fp->regs->int_event);
+
+ /* Now handle the events. The order matters. */
+
+ /* Command finished interrupt. */
+ if ((int_event & FZA_EVENT_CMD_DONE) != 0) {
+ fp->irq_count_cmd_done++;
+
+ spin_lock(&fp->lock);
+ fp->cmd_done_flag = 1;
+ wake_up(&fp->cmd_done_wait);
+ spin_unlock(&fp->lock);
+ }
+
+ /* Transmit finished interrupt. */
+ if ((int_event & FZA_EVENT_TX_DONE) != 0) {
+ fp->irq_count_tx_done++;
+ fza_tx(dev);
+ }
+
+ /* Host receive interrupt. */
+ if ((int_event & FZA_EVENT_RX_POLL) != 0) {
+ fp->irq_count_rx_poll++;
+ fza_rx(dev);
+ }
+
+ /* SMT transmit interrupt. */
+ if ((int_event & FZA_EVENT_SMT_TX_POLL) != 0) {
+ fp->irq_count_smt_tx_poll++;
+ fza_tx_smt(dev);
+ }
+
+ /* Transmit ring flush request. */
+ if ((int_event & FZA_EVENT_FLUSH_TX) != 0) {
+ fp->irq_count_flush_tx++;
+ fza_tx_flush(dev);
+ }
+
+ /* Link status change interrupt. */
+ if ((int_event & FZA_EVENT_LINK_ST_CHG) != 0) {
+ uint status;
+
+ fp->irq_count_link_st_chg++;
+ status = readw_u(&fp->regs->status);
+ if (FZA_STATUS_GET_LINK(status) == FZA_LINK_ON) {
+ netif_carrier_on(dev);
+ pr_info("%s: link available\n", fp->name);
+ } else {
+ netif_carrier_off(dev);
+ pr_info("%s: link unavailable\n", fp->name);
+ }
+ }
+
+ /* Unsolicited event interrupt. */
+ if ((int_event & FZA_EVENT_UNS_POLL) != 0) {
+ fp->irq_count_uns_poll++;
+ fza_uns(dev);
+ }
+
+ /* State change interrupt. */
+ if ((int_event & FZA_EVENT_STATE_CHG) != 0) {
+ uint status, state;
+
+ fp->irq_count_state_chg++;
+
+ status = readw_u(&fp->regs->status);
+ state = FZA_STATUS_GET_STATE(status);
+ pr_debug("%s: state change: %x\n", fp->name, state);
+ switch (state) {
+ case FZA_STATE_RESET:
+ break;
+
+ case FZA_STATE_UNINITIALIZED:
+ netif_carrier_off(dev);
+ del_timer_sync(&fp->reset_timer);
+ fp->ring_cmd_index = 0;
+ fp->ring_uns_index = 0;
+ fp->ring_rmc_tx_index = 0;
+ fp->ring_rmc_txd_index = 0;
+ fp->ring_hst_rx_index = 0;
+ fp->ring_smt_tx_index = 0;
+ fp->ring_smt_rx_index = 0;
+ if (fp->state > state) {
+ pr_info("%s: OK\n", fp->name);
+ fza_cmd_send(dev, FZA_RING_CMD_INIT);
+ }
+ break;
+
+ case FZA_STATE_INITIALIZED:
+ if (fp->state > state) {
+ fza_set_rx_mode(dev);
+ fza_cmd_send(dev, FZA_RING_CMD_PARAM);
+ }
+ break;
+
+ case FZA_STATE_RUNNING:
+ case FZA_STATE_MAINTENANCE:
+ fp->state = state;
+ fza_rx_init(fp);
+ fp->queue_active = 1;
+ netif_wake_queue(dev);
+ pr_debug("%s: queue woken\n", fp->name);
+ break;
+
+ case FZA_STATE_HALTED:
+ fp->queue_active = 0;
+ netif_stop_queue(dev);
+ pr_debug("%s: queue stopped\n", fp->name);
+ del_timer_sync(&fp->reset_timer);
+ pr_warn("%s: halted, reason: %x\n", fp->name,
+ FZA_STATUS_GET_HALT(status));
+ fza_regs_dump(fp);
+ pr_info("%s: resetting the board...\n", fp->name);
+ fza_do_reset(fp);
+ fp->timer_state = 0;
+ fp->reset_timer.expires = jiffies + 45 * HZ;
+ add_timer(&fp->reset_timer);
+ break;
+
+ default:
+ pr_warn("%s: undefined state: %x\n", fp->name, state);
+ break;
+ }
+
+ spin_lock(&fp->lock);
+ fp->state_chg_flag = 1;
+ wake_up(&fp->state_chg_wait);
+ spin_unlock(&fp->lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void fza_reset_timer(struct timer_list *t)
+{
+ struct fza_private *fp = from_timer(fp, t, reset_timer);
+
+ if (!fp->timer_state) {
+ pr_err("%s: RESET timed out!\n", fp->name);
+ pr_info("%s: trying harder...\n", fp->name);
+
+ /* Assert the board reset. */
+ writew_o(FZA_RESET_INIT, &fp->regs->reset);
+ readw_o(&fp->regs->reset); /* Synchronize. */
+
+ fp->timer_state = 1;
+ fp->reset_timer.expires = jiffies + HZ;
+ } else {
+ /* Clear the board reset. */
+ writew_u(FZA_RESET_CLR, &fp->regs->reset);
+
+ /* Enable all interrupt events we handle. */
+ writew_o(fp->int_mask, &fp->regs->int_mask);
+ readw_o(&fp->regs->int_mask); /* Synchronize. */
+
+ fp->timer_state = 0;
+ fp->reset_timer.expires = jiffies + 45 * HZ;
+ }
+ add_timer(&fp->reset_timer);
+}
+
+static int fza_set_mac_address(struct net_device *dev, void *addr)
+{
+ return -EOPNOTSUPP;
+}
+
+static netdev_tx_t fza_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ unsigned int old_mask, new_mask;
+ int ret;
+ u8 fc;
+
+ skb_push(skb, 3); /* Make room for PRH. */
+
+ /* Decode FC to set PRH. */
+ fc = skb->data[3];
+ skb->data[0] = 0;
+ skb->data[1] = 0;
+ skb->data[2] = FZA_PRH2_NORMAL;
+ if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_SYNC)
+ skb->data[0] |= FZA_PRH0_FRAME_SYNC;
+ switch (fc & FDDI_FC_K_FORMAT_MASK) {
+ case FDDI_FC_K_FORMAT_MANAGEMENT:
+ if ((fc & FDDI_FC_K_CONTROL_MASK) == 0) {
+ /* Token. */
+ skb->data[0] |= FZA_PRH0_TKN_TYPE_IMM;
+ skb->data[1] |= FZA_PRH1_TKN_SEND_NONE;
+ } else {
+ /* SMT or MAC. */
+ skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
+ skb->data[1] |= FZA_PRH1_TKN_SEND_UNR;
+ }
+ skb->data[1] |= FZA_PRH1_CRC_NORMAL;
+ break;
+ case FDDI_FC_K_FORMAT_LLC:
+ case FDDI_FC_K_FORMAT_FUTURE:
+ skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
+ skb->data[1] |= FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR;
+ break;
+ case FDDI_FC_K_FORMAT_IMPLEMENTOR:
+ skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
+ skb->data[1] |= FZA_PRH1_TKN_SEND_ORIG;
+ break;
+ }
+
+ /* SMT transmit interrupts may sneak frames into the RMC
+ * transmit ring. We disable them while queueing a frame
+ * to maintain consistency.
+ */
+ old_mask = fp->int_mask;
+ new_mask = old_mask & ~FZA_MASK_SMT_TX_POLL;
+ writew_u(new_mask, &fp->regs->int_mask);
+ readw_o(&fp->regs->int_mask); /* Synchronize. */
+ fp->int_mask = new_mask;
+ ret = fza_do_xmit((union fza_buffer_txp)
+ { .data_ptr = (struct fza_buffer_tx *)skb->data },
+ skb->len, dev, 0);
+ fp->int_mask = old_mask;
+ writew_u(fp->int_mask, &fp->regs->int_mask);
+
+ if (ret) {
+ /* Probably an SMT packet filled the remaining space,
+ * so just stop the queue, but don't report it as an error.
+ */
+ netif_stop_queue(dev);
+ pr_debug("%s: queue stopped\n", fp->name);
+ fp->stats.tx_dropped++;
+ }
+
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static int fza_open(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ struct fza_ring_cmd __iomem *ring;
+ struct sk_buff *skb;
+ unsigned long flags;
+ dma_addr_t dma;
+ int ret, i;
+ u32 stat;
+ long t;
+
+ for (i = 0; i < FZA_RING_RX_SIZE; i++) {
+ /* We have to 512-byte-align RX buffers... */
+ skb = fza_alloc_skb(dev, FZA_RX_BUFFER_SIZE + 511);
+ if (skb) {
+ fza_skb_align(skb, 512);
+ dma = dma_map_single(fp->bdev, skb->data,
+ FZA_RX_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(fp->bdev, dma)) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+ }
+ if (!skb) {
+ for (--i; i >= 0; i--) {
+ dma_unmap_single(fp->bdev, fp->rx_dma[i],
+ FZA_RX_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(fp->rx_skbuff[i]);
+ fp->rx_dma[i] = 0;
+ fp->rx_skbuff[i] = NULL;
+ }
+ return -ENOMEM;
+ }
+ fp->rx_skbuff[i] = skb;
+ fp->rx_dma[i] = dma;
+ }
+
+ ret = fza_init_send(dev, NULL);
+ if (ret != 0)
+ return ret;
+
+ /* Purger and Beacon multicasts need to be supplied before PARAM. */
+ fza_set_rx_mode(dev);
+
+ spin_lock_irqsave(&fp->lock, flags);
+ fp->cmd_done_flag = 0;
+ ring = fza_cmd_send(dev, FZA_RING_CMD_PARAM);
+ spin_unlock_irqrestore(&fp->lock, flags);
+ if (!ring)
+ return -ENOBUFS;
+
+ t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
+ if (fp->cmd_done_flag == 0) {
+ pr_err("%s: PARAM command timed out!, state %x\n", fp->name,
+ FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
+ return -EIO;
+ }
+ stat = readl_u(&ring->stat);
+ if (stat != FZA_RING_STAT_SUCCESS) {
+ pr_err("%s: PARAM command failed!, status %02x, state %x\n",
+ fp->name, stat,
+ FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
+ return -EIO;
+ }
+ pr_debug("%s: PARAM: %lums elapsed\n", fp->name,
+ (3 * HZ - t) * 1000 / HZ);
+
+ return 0;
+}
+
+static int fza_close(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+ unsigned long flags;
+ uint state;
+ long t;
+ int i;
+
+ netif_stop_queue(dev);
+ pr_debug("%s: queue stopped\n", fp->name);
+
+ del_timer_sync(&fp->reset_timer);
+ spin_lock_irqsave(&fp->lock, flags);
+ fp->state = FZA_STATE_UNINITIALIZED;
+ fp->state_chg_flag = 0;
+ /* Shut the interface down. */
+ writew_o(FZA_CONTROL_A_SHUT, &fp->regs->control_a);
+ readw_o(&fp->regs->control_a); /* Synchronize. */
+ spin_unlock_irqrestore(&fp->lock, flags);
+
+ /* DEC says SHUT needs up to 10 seconds to complete. */
+ t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
+ 15 * HZ);
+ state = FZA_STATUS_GET_STATE(readw_o(&fp->regs->status));
+ if (fp->state_chg_flag == 0) {
+ pr_err("%s: SHUT timed out!, state %x\n", fp->name, state);
+ return -EIO;
+ }
+ if (state != FZA_STATE_UNINITIALIZED) {
+ pr_err("%s: SHUT failed!, state %x\n", fp->name, state);
+ return -EIO;
+ }
+ pr_debug("%s: SHUT: %lums elapsed\n", fp->name,
+ (15 * HZ - t) * 1000 / HZ);
+
+ for (i = 0; i < FZA_RING_RX_SIZE; i++)
+ if (fp->rx_skbuff[i]) {
+ dma_unmap_single(fp->bdev, fp->rx_dma[i],
+ FZA_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb(fp->rx_skbuff[i]);
+ fp->rx_dma[i] = 0;
+ fp->rx_skbuff[i] = NULL;
+ }
+
+ return 0;
+}
+
+static struct net_device_stats *fza_get_stats(struct net_device *dev)
+{
+ struct fza_private *fp = netdev_priv(dev);
+
+ return &fp->stats;
+}
+
+static int fza_probe(struct device *bdev)
+{
+ static const struct net_device_ops netdev_ops = {
+ .ndo_open = fza_open,
+ .ndo_stop = fza_close,
+ .ndo_start_xmit = fza_start_xmit,
+ .ndo_set_rx_mode = fza_set_rx_mode,
+ .ndo_set_mac_address = fza_set_mac_address,
+ .ndo_get_stats = fza_get_stats,
+ };
+ static int version_printed;
+ char rom_rev[4], fw_rev[4], rmc_rev[4];
+ struct tc_dev *tdev = to_tc_dev(bdev);
+ struct fza_cmd_init __iomem *init;
+ resource_size_t start, len;
+ struct net_device *dev;
+ struct fza_private *fp;
+ uint smt_ver, pmd_type;
+ void __iomem *mmio;
+ uint hw_addr[2];
+ int ret, i;
+
+ if (!version_printed) {
+ pr_info("%s", version);
+ version_printed = 1;
+ }
+
+ dev = alloc_fddidev(sizeof(*fp));
+ if (!dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, bdev);
+
+ fp = netdev_priv(dev);
+ dev_set_drvdata(bdev, dev);
+
+ fp->bdev = bdev;
+ fp->name = dev_name(bdev);
+
+ /* Request the I/O MEM resource. */
+ start = tdev->resource.start;
+ len = tdev->resource.end - start + 1;
+ if (!request_mem_region(start, len, dev_name(bdev))) {
+ pr_err("%s: cannot reserve MMIO region\n", fp->name);
+ ret = -EBUSY;
+ goto err_out_kfree;
+ }
+
+ /* MMIO mapping setup. */
+ mmio = ioremap_nocache(start, len);
+ if (!mmio) {
+ pr_err("%s: cannot map MMIO\n", fp->name);
+ ret = -ENOMEM;
+ goto err_out_resource;
+ }
+
+ /* Initialize the new device structure. */
+ switch (loopback) {
+ case FZA_LOOP_NORMAL:
+ case FZA_LOOP_INTERN:
+ case FZA_LOOP_EXTERN:
+ break;
+ default:
+ loopback = FZA_LOOP_NORMAL;
+ }
+
+ fp->mmio = mmio;
+ dev->irq = tdev->interrupt;
+
+ pr_info("%s: DEC FDDIcontroller 700 or 700-C at 0x%08llx, irq %d\n",
+ fp->name, (long long)tdev->resource.start, dev->irq);
+ pr_debug("%s: mapped at: 0x%p\n", fp->name, mmio);
+
+ fp->regs = mmio + FZA_REG_BASE;
+ fp->ring_cmd = mmio + FZA_RING_CMD;
+ fp->ring_uns = mmio + FZA_RING_UNS;
+
+ init_waitqueue_head(&fp->state_chg_wait);
+ init_waitqueue_head(&fp->cmd_done_wait);
+ spin_lock_init(&fp->lock);
+ fp->int_mask = FZA_MASK_NORMAL;
+
+ timer_setup(&fp->reset_timer, fza_reset_timer, 0);
+
+ /* Sanitize the board. */
+ fza_regs_dump(fp);
+ fza_do_shutdown(fp);
+
+ ret = request_irq(dev->irq, fza_interrupt, IRQF_SHARED, fp->name, dev);
+ if (ret != 0) {
+ pr_err("%s: unable to get IRQ %d!\n", fp->name, dev->irq);
+ goto err_out_map;
+ }
+
+ /* Enable the driver mode. */
+ writew_o(FZA_CONTROL_B_DRIVER, &fp->regs->control_b);
+
+ /* For some reason transmit done interrupts can trigger during
+ * reset. This avoids a division error in the handler.
+ */
+ fp->ring_rmc_tx_size = FZA_RING_TX_SIZE;
+
+ ret = fza_reset(fp);
+ if (ret != 0)
+ goto err_out_irq;
+
+ ret = fza_init_send(dev, &init);
+ if (ret != 0)
+ goto err_out_irq;
+
+ fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr));
+ memcpy(dev->dev_addr, &hw_addr, FDDI_K_ALEN);
+
+ fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev));
+ fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev));
+ fza_reads(&init->rmc_rev, &rmc_rev, sizeof(rmc_rev));
+ for (i = 3; i >= 0 && rom_rev[i] == ' '; i--)
+ rom_rev[i] = 0;
+ for (i = 3; i >= 0 && fw_rev[i] == ' '; i--)
+ fw_rev[i] = 0;
+ for (i = 3; i >= 0 && rmc_rev[i] == ' '; i--)
+ rmc_rev[i] = 0;
+
+ fp->ring_rmc_tx = mmio + readl_u(&init->rmc_tx);
+ fp->ring_rmc_tx_size = readl_u(&init->rmc_tx_size);
+ fp->ring_hst_rx = mmio + readl_u(&init->hst_rx);
+ fp->ring_hst_rx_size = readl_u(&init->hst_rx_size);
+ fp->ring_smt_tx = mmio + readl_u(&init->smt_tx);
+ fp->ring_smt_tx_size = readl_u(&init->smt_tx_size);
+ fp->ring_smt_rx = mmio + readl_u(&init->smt_rx);
+ fp->ring_smt_rx_size = readl_u(&init->smt_rx_size);
+
+ fp->buffer_tx = mmio + FZA_TX_BUFFER_ADDR(readl_u(&init->rmc_tx));
+
+ fp->t_max = readl_u(&init->def_t_max);
+ fp->t_req = readl_u(&init->def_t_req);
+ fp->tvx = readl_u(&init->def_tvx);
+ fp->lem_threshold = readl_u(&init->lem_threshold);
+ fza_reads(&init->def_station_id, &fp->station_id,
+ sizeof(fp->station_id));
+ fp->rtoken_timeout = readl_u(&init->rtoken_timeout);
+ fp->ring_purger = readl_u(&init->ring_purger);
+
+ smt_ver = readl_u(&init->smt_ver);
+ pmd_type = readl_u(&init->pmd_type);
+
+ pr_debug("%s: INIT parameters:\n", fp->name);
+ pr_debug(" tx_mode: %u\n", readl_u(&init->tx_mode));
+ pr_debug(" hst_rx_size: %u\n", readl_u(&init->hst_rx_size));
+ pr_debug(" rmc_rev: %.4s\n", rmc_rev);
+ pr_debug(" rom_rev: %.4s\n", rom_rev);
+ pr_debug(" fw_rev: %.4s\n", fw_rev);
+ pr_debug(" mop_type: %u\n", readl_u(&init->mop_type));
+ pr_debug(" hst_rx: 0x%08x\n", readl_u(&init->hst_rx));
+ pr_debug(" rmc_tx: 0x%08x\n", readl_u(&init->rmc_tx));
+ pr_debug(" rmc_tx_size: %u\n", readl_u(&init->rmc_tx_size));
+ pr_debug(" smt_tx: 0x%08x\n", readl_u(&init->smt_tx));
+ pr_debug(" smt_tx_size: %u\n", readl_u(&init->smt_tx_size));
+ pr_debug(" smt_rx: 0x%08x\n", readl_u(&init->smt_rx));
+ pr_debug(" smt_rx_size: %u\n", readl_u(&init->smt_rx_size));
+ /* TC systems are always LE, so don't bother swapping. */
+ pr_debug(" hw_addr: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ (readl_u(&init->hw_addr[0]) >> 0) & 0xff,
+ (readl_u(&init->hw_addr[0]) >> 8) & 0xff,
+ (readl_u(&init->hw_addr[0]) >> 16) & 0xff,
+ (readl_u(&init->hw_addr[0]) >> 24) & 0xff,
+ (readl_u(&init->hw_addr[1]) >> 0) & 0xff,
+ (readl_u(&init->hw_addr[1]) >> 8) & 0xff,
+ (readl_u(&init->hw_addr[1]) >> 16) & 0xff,
+ (readl_u(&init->hw_addr[1]) >> 24) & 0xff);
+ pr_debug(" def_t_req: %u\n", readl_u(&init->def_t_req));
+ pr_debug(" def_tvx: %u\n", readl_u(&init->def_tvx));
+ pr_debug(" def_t_max: %u\n", readl_u(&init->def_t_max));
+ pr_debug(" lem_threshold: %u\n", readl_u(&init->lem_threshold));
+ /* Don't bother swapping, see above. */
+ pr_debug(" def_station_id: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ (readl_u(&init->def_station_id[0]) >> 0) & 0xff,
+ (readl_u(&init->def_station_id[0]) >> 8) & 0xff,
+ (readl_u(&init->def_station_id[0]) >> 16) & 0xff,
+ (readl_u(&init->def_station_id[0]) >> 24) & 0xff,
+ (readl_u(&init->def_station_id[1]) >> 0) & 0xff,
+ (readl_u(&init->def_station_id[1]) >> 8) & 0xff,
+ (readl_u(&init->def_station_id[1]) >> 16) & 0xff,
+ (readl_u(&init->def_station_id[1]) >> 24) & 0xff);
+ pr_debug(" pmd_type_alt: %u\n", readl_u(&init->pmd_type_alt));
+ pr_debug(" smt_ver: %u\n", readl_u(&init->smt_ver));
+ pr_debug(" rtoken_timeout: %u\n", readl_u(&init->rtoken_timeout));
+ pr_debug(" ring_purger: %u\n", readl_u(&init->ring_purger));
+ pr_debug(" smt_ver_max: %u\n", readl_u(&init->smt_ver_max));
+ pr_debug(" smt_ver_min: %u\n", readl_u(&init->smt_ver_min));
+ pr_debug(" pmd_type: %u\n", readl_u(&init->pmd_type));
+
+ pr_info("%s: model %s, address %pMF\n",
+ fp->name,
+ pmd_type == FZA_PMD_TYPE_TW ?
+ "700-C (DEFZA-CA), ThinWire PMD selected" :
+ pmd_type == FZA_PMD_TYPE_STP ?
+ "700-C (DEFZA-CA), STP PMD selected" :
+ "700 (DEFZA-AA), MMF PMD",
+ dev->dev_addr);
+ pr_info("%s: ROM rev. %.4s, firmware rev. %.4s, RMC rev. %.4s, "
+ "SMT ver. %u\n", fp->name, rom_rev, fw_rev, rmc_rev, smt_ver);
+
+ /* Now that we fetched initial parameters just shut the interface
+ * until opened.
+ */
+ ret = fza_close(dev);
+ if (ret != 0)
+ goto err_out_irq;
+
+ /* The FZA-specific entries in the device structure. */
+ dev->netdev_ops = &netdev_ops;
+
+ ret = register_netdev(dev);
+ if (ret != 0)
+ goto err_out_irq;
+
+ pr_info("%s: registered as %s\n", fp->name, dev->name);
+ fp->name = (const char *)dev->name;
+
+ get_device(bdev);
+ return 0;
+
+err_out_irq:
+ del_timer_sync(&fp->reset_timer);
+ fza_do_shutdown(fp);
+ free_irq(dev->irq, dev);
+
+err_out_map:
+ iounmap(mmio);
+
+err_out_resource:
+ release_mem_region(start, len);
+
+err_out_kfree:
+ free_netdev(dev);
+
+ pr_err("%s: initialization failure, aborting!\n", fp->name);
+ return ret;
+}
+
+static int fza_remove(struct device *bdev)
+{
+ struct net_device *dev = dev_get_drvdata(bdev);
+ struct fza_private *fp = netdev_priv(dev);
+ struct tc_dev *tdev = to_tc_dev(bdev);
+ resource_size_t start, len;
+
+ put_device(bdev);
+
+ unregister_netdev(dev);
+
+ del_timer_sync(&fp->reset_timer);
+ fza_do_shutdown(fp);
+ free_irq(dev->irq, dev);
+
+ iounmap(fp->mmio);
+
+ start = tdev->resource.start;
+ len = tdev->resource.end - start + 1;
+ release_mem_region(start, len);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct tc_device_id const fza_tc_table[] = {
+ { "DEC ", "PMAF-AA " },
+ { }
+};
+MODULE_DEVICE_TABLE(tc, fza_tc_table);
+
+static struct tc_driver fza_driver = {
+ .id_table = fza_tc_table,
+ .driver = {
+ .name = "defza",
+ .bus = &tc_bus_type,
+ .probe = fza_probe,
+ .remove = fza_remove,
+ },
+};
+
+static int fza_init(void)
+{
+ return tc_register_driver(&fza_driver);
+}
+
+static void fza_exit(void)
+{
+ tc_unregister_driver(&fza_driver);
+}
+
+module_init(fza_init);
+module_exit(fza_exit);
diff --git a/drivers/net/fddi/defza.h b/drivers/net/fddi/defza.h
new file mode 100644
index 000000000000..b06acf32738e
--- /dev/null
+++ b/drivers/net/fddi/defza.h
@@ -0,0 +1,791 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
+ *
+ * Copyright (c) 2018 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * References:
+ *
+ * Dave Sawyer & Phil Weeks & Frank Itkowsky,
+ * "DEC FDDIcontroller 700 Port Specification",
+ * Revision 1.1, Digital Equipment Corporation
+ */
+
+#include <linux/compiler.h>
+#include <linux/if_fddi.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+/* IOmem register offsets. */
+#define FZA_REG_BASE 0x100000 /* register base address */
+#define FZA_REG_RESET 0x100200 /* reset, r/w */
+#define FZA_REG_INT_EVENT 0x100400 /* interrupt event, r/w1c */
+#define FZA_REG_STATUS 0x100402 /* status, r/o */
+#define FZA_REG_INT_MASK 0x100404 /* interrupt mask, r/w */
+#define FZA_REG_CONTROL_A 0x100500 /* control A, r/w1s */
+#define FZA_REG_CONTROL_B 0x100502 /* control B, r/w */
+
+/* Reset register constants. Bits 1:0 are r/w, others are fixed at 0. */
+#define FZA_RESET_DLU 0x0002 /* OR with INIT to blast flash memory */
+#define FZA_RESET_INIT 0x0001 /* switch into the reset state */
+#define FZA_RESET_CLR 0x0000 /* run self-test and return to work */
+
+/* Interrupt event register constants. All bits are r/w1c. */
+#define FZA_EVENT_DLU_DONE 0x0800 /* flash memory write complete */
+#define FZA_EVENT_FLUSH_TX 0x0400 /* transmit ring flush request */
+#define FZA_EVENT_PM_PARITY_ERR 0x0200 /* onboard packet memory parity err */
+#define FZA_EVENT_HB_PARITY_ERR 0x0100 /* host bus parity error */
+#define FZA_EVENT_NXM_ERR 0x0080 /* non-existent memory access error;
+ * also raised for unaligned and
+ * unsupported partial-word accesses
+ */
+#define FZA_EVENT_LINK_ST_CHG 0x0040 /* link status change */
+#define FZA_EVENT_STATE_CHG 0x0020 /* adapter state change */
+#define FZA_EVENT_UNS_POLL 0x0010 /* unsolicited event service request */
+#define FZA_EVENT_CMD_DONE 0x0008 /* command done ack */
+#define FZA_EVENT_SMT_TX_POLL 0x0004 /* SMT frame transmit request */
+#define FZA_EVENT_RX_POLL 0x0002 /* receive request (packet avail.) */
+#define FZA_EVENT_TX_DONE 0x0001 /* RMC transmit done ack */
+
+/* Status register constants. All bits are r/o. */
+#define FZA_STATUS_DLU_SHIFT 0xc /* down line upgrade status bits */
+#define FZA_STATUS_DLU_MASK 0x03
+#define FZA_STATUS_LINK_SHIFT 0xb /* link status bits */
+#define FZA_STATUS_LINK_MASK 0x01
+#define FZA_STATUS_STATE_SHIFT 0x8 /* adapter state bits */
+#define FZA_STATUS_STATE_MASK 0x07
+#define FZA_STATUS_HALT_SHIFT 0x0 /* halt reason bits */
+#define FZA_STATUS_HALT_MASK 0xff
+#define FZA_STATUS_TEST_SHIFT 0x0 /* test failure bits */
+#define FZA_STATUS_TEST_MASK 0xff
+
+#define FZA_STATUS_GET_DLU(x) (((x) >> FZA_STATUS_DLU_SHIFT) & \
+ FZA_STATUS_DLU_MASK)
+#define FZA_STATUS_GET_LINK(x) (((x) >> FZA_STATUS_LINK_SHIFT) & \
+ FZA_STATUS_LINK_MASK)
+#define FZA_STATUS_GET_STATE(x) (((x) >> FZA_STATUS_STATE_SHIFT) & \
+ FZA_STATUS_STATE_MASK)
+#define FZA_STATUS_GET_HALT(x) (((x) >> FZA_STATUS_HALT_SHIFT) & \
+ FZA_STATUS_HALT_MASK)
+#define FZA_STATUS_GET_TEST(x) (((x) >> FZA_STATUS_TEST_SHIFT) & \
+ FZA_STATUS_TEST_MASK)
+
+#define FZA_DLU_FAILURE 0x0 /* DLU catastrophic error; brain dead */
+#define FZA_DLU_ERROR 0x1 /* DLU error; old firmware intact */
+#define FZA_DLU_SUCCESS 0x2 /* DLU OK; new firmware loaded */
+
+#define FZA_LINK_OFF 0x0 /* link unavailable */
+#define FZA_LINK_ON 0x1 /* link available */
+
+#define FZA_STATE_RESET 0x0 /* resetting */
+#define FZA_STATE_UNINITIALIZED 0x1 /* after a reset */
+#define FZA_STATE_INITIALIZED 0x2 /* initialized */
+#define FZA_STATE_RUNNING 0x3 /* running (link active) */
+#define FZA_STATE_MAINTENANCE 0x4 /* running (link looped back) */
+#define FZA_STATE_HALTED 0x5 /* halted (error condition) */
+
+#define FZA_HALT_UNKNOWN 0x00 /* unknown reason */
+#define FZA_HALT_HOST 0x01 /* host-directed HALT */
+#define FZA_HALT_HB_PARITY 0x02 /* host bus parity error */
+#define FZA_HALT_NXM 0x03 /* adapter non-existent memory ref. */
+#define FZA_HALT_SW 0x04 /* adapter software fault */
+#define FZA_HALT_HW 0x05 /* adapter hardware fault */
+#define FZA_HALT_PC_TRACE 0x06 /* PC Trace path test */
+#define FZA_HALT_DLSW 0x07 /* data link software fault */
+#define FZA_HALT_DLHW 0x08 /* data link hardware fault */
+
+#define FZA_TEST_FATAL 0x00 /* self-test catastrophic failure */
+#define FZA_TEST_68K 0x01 /* 68000 CPU */
+#define FZA_TEST_SRAM_BWADDR 0x02 /* SRAM byte/word address */
+#define FZA_TEST_SRAM_DBUS 0x03 /* SRAM data bus */
+#define FZA_TEST_SRAM_STUCK1 0x04 /* SRAM stuck-at range 1 */
+#define FZA_TEST_SRAM_STUCK2 0x05 /* SRAM stuck-at range 2 */
+#define FZA_TEST_SRAM_COUPL1 0x06 /* SRAM coupling range 1 */
+#define FZA_TEST_SRAM_COUPL2 0x07 /* SRAM coupling */
+#define FZA_TEST_FLASH_CRC 0x08 /* Flash CRC */
+#define FZA_TEST_ROM 0x09 /* option ROM */
+#define FZA_TEST_PHY_CSR 0x0a /* PHY CSR */
+#define FZA_TEST_MAC_BIST 0x0b /* MAC BiST */
+#define FZA_TEST_MAC_CSR 0x0c /* MAC CSR */
+#define FZA_TEST_MAC_ADDR_UNIQ 0x0d /* MAC unique address */
+#define FZA_TEST_ELM_BIST 0x0e /* ELM BiST */
+#define FZA_TEST_ELM_CSR 0x0f /* ELM CSR */
+#define FZA_TEST_ELM_ADDR_UNIQ 0x10 /* ELM unique address */
+#define FZA_TEST_CAM 0x11 /* CAM */
+#define FZA_TEST_NIROM 0x12 /* NI ROM checksum */
+#define FZA_TEST_SC_LOOP 0x13 /* SC loopback packet */
+#define FZA_TEST_LM_LOOP 0x14 /* LM loopback packet */
+#define FZA_TEST_EB_LOOP 0x15 /* EB loopback packet */
+#define FZA_TEST_SC_LOOP_BYPS 0x16 /* SC bypass loopback packet */
+#define FZA_TEST_LM_LOOP_LOCAL 0x17 /* LM local loopback packet */
+#define FZA_TEST_EB_LOOP_LOCAL 0x18 /* EB local loopback packet */
+#define FZA_TEST_CDC_LOOP 0x19 /* CDC loopback packet */
+#define FZA_TEST_FIBER_LOOP 0x1A /* FIBER loopback packet */
+#define FZA_TEST_CAM_MATCH_LOOP 0x1B /* CAM match packet loopback */
+#define FZA_TEST_68K_IRQ_STUCK 0x1C /* 68000 interrupt line stuck-at */
+#define FZA_TEST_IRQ_PRESENT 0x1D /* interrupt present register */
+#define FZA_TEST_RMC_BIST 0x1E /* RMC BiST */
+#define FZA_TEST_RMC_CSR 0x1F /* RMC CSR */
+#define FZA_TEST_RMC_ADDR_UNIQ 0x20 /* RMC unique address */
+#define FZA_TEST_PM_DPATH 0x21 /* packet memory data path */
+#define FZA_TEST_PM_ADDR 0x22 /* packet memory address */
+#define FZA_TEST_RES_23 0x23 /* reserved */
+#define FZA_TEST_PM_DESC 0x24 /* packet memory descriptor */
+#define FZA_TEST_PM_OWN 0x25 /* packet memory own bit */
+#define FZA_TEST_PM_PARITY 0x26 /* packet memory parity */
+#define FZA_TEST_PM_BSWAP 0x27 /* packet memory byte swap */
+#define FZA_TEST_PM_WSWAP 0x28 /* packet memory word swap */
+#define FZA_TEST_PM_REF 0x29 /* packet memory refresh */
+#define FZA_TEST_PM_CSR 0x2A /* PM CSR */
+#define FZA_TEST_PORT_STATUS 0x2B /* port status register */
+#define FZA_TEST_HOST_IRQMASK 0x2C /* host interrupt mask */
+#define FZA_TEST_TIMER_IRQ1 0x2D /* RTOS timer */
+#define FZA_TEST_FORCE_IRQ1 0x2E /* force RTOS IRQ1 */
+#define FZA_TEST_TIMER_IRQ5 0x2F /* IRQ5 backoff timer */
+#define FZA_TEST_FORCE_IRQ5 0x30 /* force IRQ5 */
+#define FZA_TEST_RES_31 0x31 /* reserved */
+#define FZA_TEST_IC_PRIO 0x32 /* interrupt controller priority */
+#define FZA_TEST_PM_FULL 0x33 /* full packet memory */
+#define FZA_TEST_PMI_DMA 0x34 /* PMI DMA */
+
+/* Interrupt mask register constants. All bits are r/w. */
+#define FZA_MASK_RESERVED 0xf000 /* unused */
+#define FZA_MASK_DLU_DONE 0x0800 /* flash memory write complete */
+#define FZA_MASK_FLUSH_TX 0x0400 /* transmit ring flush request */
+#define FZA_MASK_PM_PARITY_ERR 0x0200 /* onboard packet memory parity error
+ */
+#define FZA_MASK_HB_PARITY_ERR 0x0100 /* host bus parity error */
+#define FZA_MASK_NXM_ERR 0x0080 /* adapter non-existent memory
+ * reference
+ */
+#define FZA_MASK_LINK_ST_CHG 0x0040 /* link status change */
+#define FZA_MASK_STATE_CHG 0x0020 /* adapter state change */
+#define FZA_MASK_UNS_POLL 0x0010 /* unsolicited event service request */
+#define FZA_MASK_CMD_DONE 0x0008 /* command ring entry processed */
+#define FZA_MASK_SMT_TX_POLL 0x0004 /* SMT frame transmit request */
+#define FZA_MASK_RCV_POLL 0x0002 /* receive request (packet available)
+ */
+#define FZA_MASK_TX_DONE 0x0001 /* RMC transmit done acknowledge */
+
+/* Which interrupts to receive: 0/1 is mask/unmask. */
+#define FZA_MASK_NONE 0x0000
+#define FZA_MASK_NORMAL \
+ ((~(FZA_MASK_RESERVED | FZA_MASK_DLU_DONE | \
+ FZA_MASK_PM_PARITY_ERR | FZA_MASK_HB_PARITY_ERR | \
+ FZA_MASK_NXM_ERR)) & 0xffff)
+
+/* Control A register constants. */
+#define FZA_CONTROL_A_HB_PARITY_ERR 0x8000 /* host bus parity error */
+#define FZA_CONTROL_A_NXM_ERR 0x4000 /* adapter non-existent memory
+ * reference
+ */
+#define FZA_CONTROL_A_SMT_RX_OVFL 0x0040 /* SMT receive overflow */
+#define FZA_CONTROL_A_FLUSH_DONE 0x0020 /* flush tx request complete */
+#define FZA_CONTROL_A_SHUT 0x0010 /* turn the interface off */
+#define FZA_CONTROL_A_HALT 0x0008 /* halt the controller */
+#define FZA_CONTROL_A_CMD_POLL 0x0004 /* command ring poll */
+#define FZA_CONTROL_A_SMT_RX_POLL 0x0002 /* SMT receive ring poll */
+#define FZA_CONTROL_A_TX_POLL 0x0001 /* transmit poll */
+
+/* Control B register constants. All bits are r/w.
+ *
+ * Possible values:
+ * 0x0000 after booting into REX,
+ * 0x0003 after issuing `boot #/mop'.
+ */
+#define FZA_CONTROL_B_CONSOLE 0x0002 /* OR with DRIVER for console
+ * (TC firmware) mode
+ */
+#define FZA_CONTROL_B_DRIVER 0x0001 /* driver mode */
+#define FZA_CONTROL_B_IDLE 0x0000 /* no driver installed */
+
+#define FZA_RESET_PAD \
+ (FZA_REG_RESET - FZA_REG_BASE)
+#define FZA_INT_EVENT_PAD \
+ (FZA_REG_INT_EVENT - FZA_REG_RESET - sizeof(u16))
+#define FZA_CONTROL_A_PAD \
+ (FZA_REG_CONTROL_A - FZA_REG_INT_MASK - sizeof(u16))
+
+/* Layout of registers. */
+struct fza_regs {
+ u8 pad0[FZA_RESET_PAD];
+ u16 reset; /* reset register */
+ u8 pad1[FZA_INT_EVENT_PAD];
+ u16 int_event; /* interrupt event register */
+ u16 status; /* status register */
+ u16 int_mask; /* interrupt mask register */
+ u8 pad2[FZA_CONTROL_A_PAD];
+ u16 control_a; /* control A register */
+ u16 control_b; /* control B register */
+};
+
+/* Command descriptor ring entry. */
+struct fza_ring_cmd {
+ u32 cmd_own; /* bit 31: ownership, bits [30:0]: command */
+ u32 stat; /* command status */
+ u32 buffer; /* address of the buffer in the FZA space */
+ u32 pad0;
+};
+
+#define FZA_RING_CMD 0x200400 /* command ring address */
+#define FZA_RING_CMD_SIZE 0x40 /* command descriptor ring
+ * size
+/* Command constants. */
+#define FZA_RING_CMD_MASK 0x7fffffff
+#define FZA_RING_CMD_NOP 0x00000000 /* nop */
+#define FZA_RING_CMD_INIT 0x00000001 /* initialize */
+#define FZA_RING_CMD_MODCAM 0x00000002 /* modify CAM */
+#define FZA_RING_CMD_PARAM 0x00000003 /* set system parameters */
+#define FZA_RING_CMD_MODPROM 0x00000004 /* modify promiscuous mode */
+#define FZA_RING_CMD_SETCHAR 0x00000005 /* set link characteristics */
+#define FZA_RING_CMD_RDCNTR 0x00000006 /* read counters */
+#define FZA_RING_CMD_STATUS 0x00000007 /* get link status */
+#define FZA_RING_CMD_RDCAM 0x00000008 /* read CAM */
+
+/* Command status constants. */
+#define FZA_RING_STAT_SUCCESS 0x00000000
+
+/* Unsolicited event descriptor ring entry. */
+struct fza_ring_uns {
+ u32 own; /* bit 31: ownership, bits [30:0]: reserved */
+ u32 id; /* event ID */
+ u32 buffer; /* address of the buffer in the FZA space */
+ u32 pad0; /* reserved */
+};
+
+#define FZA_RING_UNS 0x200800 /* unsolicited ring address */
+#define FZA_RING_UNS_SIZE 0x40 /* unsolicited descriptor ring
+ * size
+ */
+/* Unsolicited event constants. */
+#define FZA_RING_UNS_UND 0x00000000 /* undefined event ID */
+#define FZA_RING_UNS_INIT_IN 0x00000001 /* ring init initiated */
+#define FZA_RING_UNS_INIT_RX 0x00000002 /* ring init received */
+#define FZA_RING_UNS_BEAC_IN 0x00000003 /* ring beaconing initiated */
+#define FZA_RING_UNS_DUP_ADDR 0x00000004 /* duplicate address detected */
+#define FZA_RING_UNS_DUP_TOK 0x00000005 /* duplicate token detected */
+#define FZA_RING_UNS_PURG_ERR 0x00000006 /* ring purger error */
+#define FZA_RING_UNS_STRIP_ERR 0x00000007 /* bridge strip error */
+#define FZA_RING_UNS_OP_OSC 0x00000008 /* ring op oscillation */
+#define FZA_RING_UNS_BEAC_RX 0x00000009 /* directed beacon received */
+#define FZA_RING_UNS_PCT_IN 0x0000000a /* PC trace initiated */
+#define FZA_RING_UNS_PCT_RX 0x0000000b /* PC trace received */
+#define FZA_RING_UNS_TX_UNDER 0x0000000c /* transmit underrun */
+#define FZA_RING_UNS_TX_FAIL 0x0000000d /* transmit failure */
+#define FZA_RING_UNS_RX_OVER 0x0000000e /* receive overrun */
+
+/* RMC (Ring Memory Control) transmit descriptor ring entry. */
+struct fza_ring_rmc_tx {
+ u32 rmc; /* RMC information */
+ u32 avl; /* available for host (unused by RMC) */
+ u32 own; /* bit 31: ownership, bits [30:0]: reserved */
+ u32 pad0; /* reserved */
+};
+
+#define FZA_TX_BUFFER_ADDR(x) (0x200000 | (((x) & 0xffff) << 5))
+#define FZA_TX_BUFFER_SIZE 512
+struct fza_buffer_tx {
+ u32 data[FZA_TX_BUFFER_SIZE / sizeof(u32)];
+};
+
+/* Transmit ring RMC constants. */
+#define FZA_RING_TX_SOP 0x80000000 /* start of packet */
+#define FZA_RING_TX_EOP 0x40000000 /* end of packet */
+#define FZA_RING_TX_DTP 0x20000000 /* discard this packet */
+#define FZA_RING_TX_VBC 0x10000000 /* valid buffer byte count */
+#define FZA_RING_TX_DCC_MASK 0x0f000000 /* DMA completion code */
+#define FZA_RING_TX_DCC_SUCCESS 0x01000000 /* transmit succeeded */
+#define FZA_RING_TX_DCC_DTP_SOP 0x02000000 /* DTP set at SOP */
+#define FZA_RING_TX_DCC_DTP 0x04000000 /* DTP set within packet */
+#define FZA_RING_TX_DCC_ABORT 0x05000000 /* MAC-requested abort */
+#define FZA_RING_TX_DCC_PARITY 0x06000000 /* xmit data parity error */
+#define FZA_RING_TX_DCC_UNDRRUN 0x07000000 /* transmit underrun */
+#define FZA_RING_TX_XPO_MASK 0x003fe000 /* transmit packet offset */
+
+/* Host receive descriptor ring entry. */
+struct fza_ring_hst_rx {
+ u32 buf0_own; /* bit 31: ownership, bits [30:23]: unused,
+ * bits [22:0]: right-shifted address of the
+ * buffer in system memory (low buffer)
+ */
+ u32 buffer1; /* bits [31:23]: unused,
+ * bits [22:0]: right-shifted address of the
+ * buffer in system memory (high buffer)
+ */
+ u32 rmc; /* RMC information */
+ u32 pad0;
+};
+
+#define FZA_RX_BUFFER_SIZE (4096 + 512) /* buffer length */
+
+/* Receive ring RMC constants. */
+#define FZA_RING_RX_SOP 0x80000000 /* start of packet */
+#define FZA_RING_RX_EOP 0x40000000 /* end of packet */
+#define FZA_RING_RX_FSC_MASK 0x38000000 /* # of frame status bits */
+#define FZA_RING_RX_FSB_MASK 0x07c00000 /* frame status bits */
+#define FZA_RING_RX_FSB_ERR 0x04000000 /* error detected */
+#define FZA_RING_RX_FSB_ADDR 0x02000000 /* address recognized */
+#define FZA_RING_RX_FSB_COP 0x01000000 /* frame copied */
+#define FZA_RING_RX_FSB_F0 0x00800000 /* first additional flag */
+#define FZA_RING_RX_FSB_F1 0x00400000 /* second additional flag */
+#define FZA_RING_RX_BAD 0x00200000 /* bad packet */
+#define FZA_RING_RX_CRC 0x00100000 /* CRC error */
+#define FZA_RING_RX_RRR_MASK 0x000e0000 /* MAC receive status bits */
+#define FZA_RING_RX_RRR_OK 0x00000000 /* receive OK */
+#define FZA_RING_RX_RRR_SADDR 0x00020000 /* source address matched */
+#define FZA_RING_RX_RRR_DADDR 0x00040000 /* dest address not matched */
+#define FZA_RING_RX_RRR_ABORT 0x00060000 /* RMC abort */
+#define FZA_RING_RX_RRR_LENGTH 0x00080000 /* invalid length */
+#define FZA_RING_RX_RRR_FRAG 0x000a0000 /* fragment */
+#define FZA_RING_RX_RRR_FORMAT 0x000c0000 /* format error */
+#define FZA_RING_RX_RRR_RESET 0x000e0000 /* MAC reset */
+#define FZA_RING_RX_DA_MASK 0x00018000 /* daddr match status bits */
+#define FZA_RING_RX_DA_NONE 0x00000000 /* no match */
+#define FZA_RING_RX_DA_PROM 0x00008000 /* promiscuous match */
+#define FZA_RING_RX_DA_CAM 0x00010000 /* CAM entry match */
+#define FZA_RING_RX_DA_LOCAL 0x00018000 /* link addr or LLC bcast */
+#define FZA_RING_RX_SA_MASK 0x00006000 /* saddr match status bits */
+#define FZA_RING_RX_SA_NONE 0x00000000 /* no match */
+#define FZA_RING_RX_SA_ALIAS 0x00002000 /* alias address match */
+#define FZA_RING_RX_SA_CAM 0x00004000 /* CAM entry match */
+#define FZA_RING_RX_SA_LOCAL 0x00006000 /* link address match */
+
+/* SMT (Station Management) transmit/receive descriptor ring entry. */
+struct fza_ring_smt {
+ u32 own; /* bit 31: ownership, bits [30:0]: unused */
+ u32 rmc; /* RMC information */
+ u32 buffer; /* address of the buffer */
+ u32 pad0; /* reserved */
+};
+
+/* Ownership constants.
+ *
+ * Only an owner is permitted to process a given ring entry.
+ * RMC transmit ring meanings are reversed.
+ */
+#define FZA_RING_OWN_MASK 0x80000000
+#define FZA_RING_OWN_FZA 0x00000000 /* permit FZA, forbid host */
+#define FZA_RING_OWN_HOST 0x80000000 /* permit host, forbid FZA */
+#define FZA_RING_TX_OWN_RMC 0x80000000 /* permit RMC, forbid host */
+#define FZA_RING_TX_OWN_HOST 0x00000000 /* permit host, forbid RMC */
+
+/* RMC constants. */
+#define FZA_RING_PBC_MASK 0x00001fff /* frame length */
+
+/* Layout of counter buffers. */
+
+struct fza_counter {
+ u32 msw;
+ u32 lsw;
+};
+
+struct fza_counters {
+ struct fza_counter sys_buf; /* system buffer unavailable */
+ struct fza_counter tx_under; /* transmit underruns */
+ struct fza_counter tx_fail; /* transmit failures */
+ struct fza_counter rx_over; /* receive data overruns */
+ struct fza_counter frame_cnt; /* frame count */
+ struct fza_counter error_cnt; /* error count */
+ struct fza_counter lost_cnt; /* lost count */
+ struct fza_counter rinit_in; /* ring initialization initiated */
+ struct fza_counter rinit_rx; /* ring initialization received */
+ struct fza_counter beac_in; /* ring beacon initiated */
+ struct fza_counter dup_addr; /* duplicate address test failures */
+ struct fza_counter dup_tok; /* duplicate token detected */
+ struct fza_counter purg_err; /* ring purge errors */
+ struct fza_counter strip_err; /* bridge strip errors */
+ struct fza_counter pct_in; /* traces initiated */
+ struct fza_counter pct_rx; /* traces received */
+ struct fza_counter lem_rej; /* LEM rejects */
+ struct fza_counter tne_rej; /* TNE expiry rejects */
+ struct fza_counter lem_event; /* LEM events */
+ struct fza_counter lct_rej; /* LCT rejects */
+ struct fza_counter conn_cmpl; /* connections completed */
+ struct fza_counter el_buf; /* elasticity buffer errors */
+};
+
+/* Layout of command buffers. */
+
+/* INIT command buffer.
+ *
+ * Values of default link parameters given are as obtained from a
+ * DEFZA-AA rev. C03 board. The board counts time in units of 80ns.
+ */
+struct fza_cmd_init {
+ u32 tx_mode; /* transmit mode */
+ u32 hst_rx_size; /* host receive ring entries */
+
+ struct fza_counters counters; /* counters */
+
+ u8 rmc_rev[4]; /* RMC revision */
+ u8 rom_rev[4]; /* ROM revision */
+ u8 fw_rev[4]; /* firmware revision */
+
+ u32 mop_type; /* MOP device type */
+
+ u32 hst_rx; /* base of host rx descriptor ring */
+ u32 rmc_tx; /* base of RMC tx descriptor ring */
+ u32 rmc_tx_size; /* size of RMC tx descriptor ring */
+ u32 smt_tx; /* base of SMT tx descriptor ring */
+ u32 smt_tx_size; /* size of SMT tx descriptor ring */
+ u32 smt_rx; /* base of SMT rx descriptor ring */
+ u32 smt_rx_size; /* size of SMT rx descriptor ring */
+
+ u32 hw_addr[2]; /* link address */
+
+ u32 def_t_req; /* default Requested TTRT (T_REQ) --
+ * C03: 100000 [80ns]
+ */
+ u32 def_tvx; /* default Valid Transmission Time
+ * (TVX) -- C03: 32768 [80ns]
+ */
+ u32 def_t_max; /* default Maximum TTRT (T_MAX) --
+ * C03: 2162688 [80ns]
+ */
+ u32 lem_threshold; /* default LEM threshold -- C03: 8 */
+ u32 def_station_id[2]; /* default station ID */
+
+ u32 pmd_type_alt; /* alternative PMD type code */
+
+ u32 smt_ver; /* SMT version */
+
+ u32 rtoken_timeout; /* default restricted token timeout
+ * -- C03: 12500000 [80ns]
+ */
+ u32 ring_purger; /* default ring purger enable --
+ * C03: 1
+ */
+
+ u32 smt_ver_max; /* max SMT version ID */
+ u32 smt_ver_min; /* min SMT version ID */
+ u32 pmd_type; /* PMD type code */
+};
+
+/* INIT command PMD type codes. */
+#define FZA_PMD_TYPE_MMF 0 /* Multimode fiber */
+#define FZA_PMD_TYPE_TW 101 /* ThinWire */
+#define FZA_PMD_TYPE_STP 102 /* STP */
+
+/* MODCAM/RDCAM command buffer. */
+#define FZA_CMD_CAM_SIZE 64 /* CAM address entry count */
+struct fza_cmd_cam {
+ u32 hw_addr[FZA_CMD_CAM_SIZE][2]; /* CAM address entries */
+};
+
+/* PARAM command buffer.
+ *
+ * Permitted ranges given are as defined by the spec and obtained from a
+ * DEFZA-AA rev. C03 board, respectively. The rtoken_timeout field is
+ * erroneously interpreted in units of ms.
+ */
+struct fza_cmd_param {
+ u32 loop_mode; /* loopback mode */
+ u32 t_max; /* Maximum TTRT (T_MAX)
+ * def: ??? [80ns]
+ * C03: [t_req+1,4294967295] [80ns]
+ */
+ u32 t_req; /* Requested TTRT (T_REQ)
+ * def: [50000,2097151] [80ns]
+ * C03: [50001,t_max-1] [80ns]
+ */
+ u32 tvx; /* Valid Transmission Time (TVX)
+ * def: [29375,65280] [80ns]
+ * C03: [29376,65279] [80ns]
+ */
+ u32 lem_threshold; /* LEM threshold */
+ u32 station_id[2]; /* station ID */
+ u32 rtoken_timeout; /* restricted token timeout
+ * def: [0,125000000] [80ns]
+ * C03: [0,9999] [ms]
+ */
+ u32 ring_purger; /* ring purger enable: 0|1 */
+};
+
+/* Loopback modes for the PARAM command. */
+#define FZA_LOOP_NORMAL 0
+#define FZA_LOOP_INTERN 1
+#define FZA_LOOP_EXTERN 2
+
+/* MODPROM command buffer. */
+struct fza_cmd_modprom {
+ u32 llc_prom; /* LLC promiscuous enable */
+ u32 smt_prom; /* SMT promiscuous enable */
+ u32 llc_multi; /* LLC multicast promiscuous enable */
+ u32 llc_bcast; /* LLC broadcast promiscuous enable */
+};
+
+/* SETCHAR command buffer.
+ *
+ * Permitted ranges are as for the PARAM command.
+ */
+struct fza_cmd_setchar {
+ u32 t_max; /* Maximum TTRT (T_MAX) */
+ u32 t_req; /* Requested TTRT (T_REQ) */
+ u32 tvx; /* Valid Transmission Time (TVX) */
+ u32 lem_threshold; /* LEM threshold */
+ u32 rtoken_timeout; /* restricted token timeout */
+ u32 ring_purger; /* ring purger enable */
+};
+
+/* RDCNTR command buffer. */
+struct fza_cmd_rdcntr {
+ struct fza_counters counters; /* counters */
+};
+
+/* STATUS command buffer. */
+struct fza_cmd_status {
+ u32 led_state; /* LED state */
+ u32 rmt_state; /* ring management state */
+ u32 link_state; /* link state */
+ u32 dup_addr; /* duplicate address flag */
+ u32 ring_purger; /* ring purger state */
+ u32 t_neg; /* negotiated TTRT [80ns] */
+ u32 una[2]; /* upstream neighbour address */
+ u32 una_timeout; /* UNA timed out */
+ u32 strip_mode; /* frame strip mode */
+ u32 yield_mode; /* claim token yield mode */
+ u32 phy_state; /* PHY state */
+ u32 neigh_phy; /* neighbour PHY type */
+ u32 reject; /* reject reason */
+ u32 phy_lee; /* PHY link error estimate [-log10] */
+ u32 una_old[2]; /* old upstream neighbour address */
+ u32 rmt_mac; /* remote MAC indicated */
+ u32 ring_err; /* ring error reason */
+ u32 beac_rx[2]; /* sender of last directed beacon */
+ u32 un_dup_addr; /* upstream neighbr dup address flag */
+ u32 dna[2]; /* downstream neighbour address */
+ u32 dna_old[2]; /* old downstream neighbour address */
+};
+
+/* Common command buffer. */
+union fza_cmd_buf {
+ struct fza_cmd_init init;
+ struct fza_cmd_cam cam;
+ struct fza_cmd_param param;
+ struct fza_cmd_modprom modprom;
+ struct fza_cmd_setchar setchar;
+ struct fza_cmd_rdcntr rdcntr;
+ struct fza_cmd_status status;
+};
+
+/* MAC (Media Access Controller) chip packet request header constants. */
+
+/* Packet request header byte #0. */
+#define FZA_PRH0_FMT_TYPE_MASK 0xc0 /* type of packet, always zero */
+#define FZA_PRH0_TOK_TYPE_MASK 0x30 /* type of token required
+ * to send this frame
+ */
+#define FZA_PRH0_TKN_TYPE_ANY 0x30 /* use either token type */
+#define FZA_PRH0_TKN_TYPE_UNR 0x20 /* use an unrestricted token */
+#define FZA_PRH0_TKN_TYPE_RST 0x10 /* use a restricted token */
+#define FZA_PRH0_TKN_TYPE_IMM 0x00 /* send immediately, no token required
+ */
+#define FZA_PRH0_FRAME_MASK 0x08 /* type of frame to send */
+#define FZA_PRH0_FRAME_SYNC 0x08 /* send a synchronous frame */
+#define FZA_PRH0_FRAME_ASYNC 0x00 /* send an asynchronous frame */
+#define FZA_PRH0_MODE_MASK 0x04 /* send mode */
+#define FZA_PRH0_MODE_IMMED 0x04 /* an immediate mode, send regardless
+ * of the ring operational state
+ */
+#define FZA_PRH0_MODE_NORMAL 0x00 /* a normal mode, send only if ring
+ * operational
+ */
+#define FZA_PRH0_SF_MASK 0x02 /* send frame first */
+#define FZA_PRH0_SF_FIRST 0x02 /* send this frame first
+ * with this token capture
+ */
+#define FZA_PRH0_SF_NORMAL 0x00 /* treat this frame normally */
+#define FZA_PRH0_BCN_MASK 0x01 /* beacon frame */
+#define FZA_PRH0_BCN_BEACON 0x01 /* send the frame only
+ * if in the beacon state
+ */
+#define FZA_PRH0_BCN_DATA 0x01 /* send the frame only
+ * if in the data state
+ */
+/* Packet request header byte #1. */
+ /* bit 7 always zero */
+#define FZA_PRH1_SL_MASK 0x40 /* send frame last */
+#define FZA_PRH1_SL_LAST 0x40 /* send this frame last, releasing
+ * the token afterwards
+ */
+#define FZA_PRH1_SL_NORMAL 0x00 /* treat this frame normally */
+#define FZA_PRH1_CRC_MASK 0x20 /* CRC append */
+#define FZA_PRH1_CRC_NORMAL 0x20 /* calculate the CRC and append it
+ * as the FCS field to the frame
+ */
+#define FZA_PRH1_CRC_SKIP 0x00 /* leave the frame as is */
+#define FZA_PRH1_TKN_SEND_MASK 0x18 /* type of token to send after the
+ * frame if this is the last frame
+ */
+#define FZA_PRH1_TKN_SEND_ORIG 0x18 /* send a token of the same type as the
+ * originally captured one
+ */
+#define FZA_PRH1_TKN_SEND_RST 0x10 /* send a restricted token */
+#define FZA_PRH1_TKN_SEND_UNR 0x08 /* send an unrestricted token */
+#define FZA_PRH1_TKN_SEND_NONE 0x00 /* send no token */
+#define FZA_PRH1_EXTRA_FS_MASK 0x07 /* send extra frame status indicators
+ */
+#define FZA_PRH1_EXTRA_FS_ST 0x07 /* TR RR ST II */
+#define FZA_PRH1_EXTRA_FS_SS 0x06 /* TR RR SS II */
+#define FZA_PRH1_EXTRA_FS_SR 0x05 /* TR RR SR II */
+#define FZA_PRH1_EXTRA_FS_NONE1 0x04 /* TR RR II II */
+#define FZA_PRH1_EXTRA_FS_RT 0x03 /* TR RR RT II */
+#define FZA_PRH1_EXTRA_FS_RS 0x02 /* TR RR RS II */
+#define FZA_PRH1_EXTRA_FS_RR 0x01 /* TR RR RR II */
+#define FZA_PRH1_EXTRA_FS_NONE 0x00 /* TR RR II II */
+/* Packet request header byte #2. */
+#define FZA_PRH2_NORMAL 0x00 /* always zero */
+
+/* PRH used for LLC frames. */
+#define FZA_PRH0_LLC (FZA_PRH0_TKN_TYPE_UNR)
+#define FZA_PRH1_LLC (FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR)
+#define FZA_PRH2_LLC (FZA_PRH2_NORMAL)
+
+/* PRH used for SMT frames. */
+#define FZA_PRH0_SMT (FZA_PRH0_TKN_TYPE_UNR)
+#define FZA_PRH1_SMT (FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR)
+#define FZA_PRH2_SMT (FZA_PRH2_NORMAL)
+
+#if ((FZA_RING_RX_SIZE) < 2) || ((FZA_RING_RX_SIZE) > 256)
+# error FZA_RING_RX_SIZE has to be from 2 up to 256
+#endif
+#if ((FZA_RING_TX_MODE) != 0) && ((FZA_RING_TX_MODE) != 1)
+# error FZA_RING_TX_MODE has to be either 0 or 1
+#endif
+
+#define FZA_RING_TX_SIZE (512 << (FZA_RING_TX_MODE))
+
+struct fza_private {
+ struct device *bdev; /* pointer to the bus device */
+ const char *name; /* printable device name */
+ void __iomem *mmio; /* MMIO ioremap cookie */
+ struct fza_regs __iomem *regs; /* pointer to FZA registers */
+
+ struct sk_buff *rx_skbuff[FZA_RING_RX_SIZE];
+ /* all skbs assigned to the host
+ * receive descriptors
+ */
+ dma_addr_t rx_dma[FZA_RING_RX_SIZE];
+ /* their corresponding DMA addresses */
+
+ struct fza_ring_cmd __iomem *ring_cmd;
+ /* pointer to the command descriptor
+ * ring
+ */
+ int ring_cmd_index; /* index to the command descriptor ring
+ * for the next command
+ */
+ struct fza_ring_uns __iomem *ring_uns;
+ /* pointer to the unsolicited
+ * descriptor ring
+ */
+ int ring_uns_index; /* index to the unsolicited descriptor
+ * ring for the next event
+ */
+
+ struct fza_ring_rmc_tx __iomem *ring_rmc_tx;
+ /* pointer to the RMC transmit
+ * descriptor ring (obtained from the
+ * INIT command)
+ */
+ int ring_rmc_tx_size; /* number of entries in the RMC
+ * transmit descriptor ring (obtained
+ * from the INIT command)
+ */
+ int ring_rmc_tx_index; /* index to the RMC transmit descriptor
+ * ring for the next transmission
+ */
+ int ring_rmc_txd_index; /* index to the RMC transmit descriptor
+ * ring for the next transmit done
+ * acknowledge
+ */
+
+ struct fza_ring_hst_rx __iomem *ring_hst_rx;
+ /* pointer to the host receive
+ * descriptor ring (obtained from the
+ * INIT command)
+ */
+ int ring_hst_rx_size; /* number of entries in the host
+ * receive descriptor ring (set by the
+ * INIT command)
+ */
+ int ring_hst_rx_index; /* index to the host receive descriptor
+ * ring for the next transmission
+ */
+
+ struct fza_ring_smt __iomem *ring_smt_tx;
+ /* pointer to the SMT transmit
+ * descriptor ring (obtained from the
+ * INIT command)
+ */
+ int ring_smt_tx_size; /* number of entries in the SMT
+ * transmit descriptor ring (obtained
+ * from the INIT command)
+ */
+ int ring_smt_tx_index; /* index to the SMT transmit descriptor
+ * ring for the next transmission
+ */
+
+ struct fza_ring_smt __iomem *ring_smt_rx;
+ /* pointer to the SMT transmit
+ * descriptor ring (obtained from the
+ * INIT command)
+ */
+ int ring_smt_rx_size; /* number of entries in the SMT
+ * receive descriptor ring (obtained
+ * from the INIT command)
+ */
+ int ring_smt_rx_index; /* index to the SMT receive descriptor
+ * ring for the next transmission
+ */
+
+ struct fza_buffer_tx __iomem *buffer_tx;
+ /* pointer to the RMC transmit buffers
+ */
+
+ uint state; /* adapter expected state */
+
+ spinlock_t lock; /* for device & private data access */
+ uint int_mask; /* interrupt source selector */
+
+ int cmd_done_flag; /* command completion trigger */
+ wait_queue_head_t cmd_done_wait;
+
+ int state_chg_flag; /* state change trigger */
+ wait_queue_head_t state_chg_wait;
+
+ struct timer_list reset_timer; /* RESET time-out trigger */
+ int timer_state; /* RESET trigger state */
+
+ int queue_active; /* whether to enable queueing */
+
+ struct net_device_stats stats;
+
+ uint irq_count_flush_tx; /* transmit flush irqs */
+ uint irq_count_uns_poll; /* unsolicited event irqs */
+ uint irq_count_smt_tx_poll; /* SMT transmit irqs */
+ uint irq_count_rx_poll; /* host receive irqs */
+ uint irq_count_tx_done; /* transmit done irqs */
+ uint irq_count_cmd_done; /* command done irqs */
+ uint irq_count_state_chg; /* state change irqs */
+ uint irq_count_link_st_chg; /* link status change irqs */
+
+ uint t_max; /* T_MAX */
+ uint t_req; /* T_REQ */
+ uint tvx; /* TVX */
+ uint lem_threshold; /* LEM threshold */
+ uint station_id[2]; /* station ID */
+ uint rtoken_timeout; /* restricted token timeout */
+ uint ring_purger; /* ring purger enable flag */
+};
+
+struct fza_fddihdr {
+ u8 pa[2]; /* preamble */
+ u8 sd; /* starting delimiter */
+ struct fddihdr hdr;
+} __packed;
diff --git a/drivers/net/fddi/skfp/ecm.c b/drivers/net/fddi/skfp/ecm.c
index eee9ba91346a..c813e462183d 100644
--- a/drivers/net/fddi/skfp/ecm.c
+++ b/drivers/net/fddi/skfp/ecm.c
@@ -30,7 +30,6 @@
*
* The following external HW dependent functions are referenced :
* sm_pm_bypass_req()
- * sm_pm_ls_latch()
* sm_pm_get_ls()
*
* The following HW dependent events are required :
@@ -356,8 +355,6 @@ static void ecm_fsm(struct s_smc *smc, int cmd)
*/
start_ecm_timer(smc,smc->s.ecm_check_poll,0) ;
smc->e.ecm_line_state = TRUE ; /* flag to pcm: report Q/HLS */
- (void) sm_pm_ls_latch(smc,PA,1) ; /* enable line state latch */
- (void) sm_pm_ls_latch(smc,PB,1) ; /* enable line state latch */
ACTIONS_DONE() ;
break ;
case EC6_CHECK :
diff --git a/drivers/net/fddi/skfp/h/cmtdef.h b/drivers/net/fddi/skfp/h/cmtdef.h
index 5d6891154367..448d66c2e372 100644
--- a/drivers/net/fddi/skfp/h/cmtdef.h
+++ b/drivers/net/fddi/skfp/h/cmtdef.h
@@ -513,7 +513,6 @@ void pcm_status_state(struct s_smc *smc, int np, int *type, int *state,
void plc_config_mux(struct s_smc *smc, int mux);
void sm_lem_evaluate(struct s_smc *smc);
void mac_update_counter(struct s_smc *smc);
-void sm_pm_ls_latch(struct s_smc *smc, int phy, int on_off);
void sm_ma_control(struct s_smc *smc, int mode);
void sm_mac_check_beacon_claim(struct s_smc *smc);
void config_mux(struct s_smc *smc, int mux);
@@ -656,14 +655,6 @@ void dump_hex(char *p, int len);
#ifndef PNMI_INIT
#define PNMI_INIT(smc) /* Nothing */
#endif
-#ifndef PNMI_GET_ID
-#define PNMI_GET_ID( smc, ndis_oid, buf, len, BytesWritten, BytesNeeded ) \
- ( 1 ? (-1) : (-1) )
-#endif
-#ifndef PNMI_SET_ID
-#define PNMI_SET_ID( smc, ndis_oid, buf, len, BytesRead, BytesNeeded, \
- set_type) ( 1 ? (-1) : (-1) )
-#endif
/*
* SMT_PANIC defines
diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c
index a9ecf923f63d..6ef44c480bd5 100644
--- a/drivers/net/fddi/skfp/pcmplc.c
+++ b/drivers/net/fddi/skfp/pcmplc.c
@@ -30,7 +30,6 @@
* The following external HW dependent functions are referenced :
* sm_pm_control()
* sm_ph_linestate()
- * sm_pm_ls_latch()
*
* The following HW dependent events are required :
* PC_QLS
@@ -1248,16 +1247,6 @@ static void sm_ph_lem_stop(struct s_smc *smc, int np)
CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ;
}
-/* ARGSUSED */
-void sm_pm_ls_latch(struct s_smc *smc, int phy, int on_off)
-/* int on_off; en- or disable ident. ls */
-{
- SK_UNUSED(smc) ;
-
- phy = phy ; on_off = on_off ;
-}
-
-
/*
* PCM pseudo code
* receive actions are called AFTER the bit n is received,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 6acb6b5718b9..a0cd1c41cf5f 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -69,6 +69,7 @@ struct geneve_dev {
struct gro_cells gro_cells;
bool collect_md;
bool use_udp6_rx_checksums;
+ bool ttl_inherit;
};
struct geneve_sock {
@@ -830,12 +831,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(rt))
return PTR_ERR(rt);
- if (skb_dst(skb)) {
- int mtu = dst_mtu(&rt->dst) - GENEVE_IPV4_HLEN -
- info->options_len;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, &rt->dst,
+ GENEVE_IPV4_HLEN + info->options_len);
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
@@ -843,7 +840,11 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = key->ttl;
} else {
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
- ttl = key->ttl ? : ip4_dst_hoplimit(&rt->dst);
+ if (geneve->ttl_inherit)
+ ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
+ else
+ ttl = key->ttl;
+ ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
}
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
@@ -876,11 +877,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(dst))
return PTR_ERR(dst);
- if (skb_dst(skb)) {
- int mtu = dst_mtu(dst) - GENEVE_IPV6_HLEN - info->options_len;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
@@ -889,7 +886,11 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
} else {
prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
ip_hdr(skb), skb);
- ttl = key->ttl ? : ip6_dst_hoplimit(dst);
+ if (geneve->ttl_inherit)
+ ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
+ else
+ ttl = key->ttl;
+ ttl = ttl ? : ip6_dst_hoplimit(dst);
}
err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr));
if (unlikely(err))
@@ -1091,6 +1092,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_UDP_CSUM] = { .type = NLA_U8 },
[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
+ [IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 },
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1170,7 +1172,8 @@ static bool geneve_dst_addr_equal(struct ip_tunnel_info *a,
static int geneve_configure(struct net *net, struct net_device *dev,
struct netlink_ext_ack *extack,
const struct ip_tunnel_info *info,
- bool metadata, bool ipv6_rx_csum)
+ bool metadata, bool ipv6_rx_csum,
+ bool ttl_inherit)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *t, *geneve = netdev_priv(dev);
@@ -1219,6 +1222,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
geneve->info = *info;
geneve->collect_md = metadata;
geneve->use_udp6_rx_checksums = ipv6_rx_csum;
+ geneve->ttl_inherit = ttl_inherit;
err = register_netdevice(dev);
if (err)
@@ -1237,7 +1241,8 @@ static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port)
static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack,
struct ip_tunnel_info *info, bool *metadata,
- bool *use_udp6_rx_checksums, bool changelink)
+ bool *use_udp6_rx_checksums, bool *ttl_inherit,
+ bool changelink)
{
int attrtype;
@@ -1312,8 +1317,15 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
info->key.tun_id = tunid;
}
- if (data[IFLA_GENEVE_TTL])
+ if (data[IFLA_GENEVE_TTL_INHERIT]) {
+ if (nla_get_u8(data[IFLA_GENEVE_TTL_INHERIT]))
+ *ttl_inherit = true;
+ else
+ *ttl_inherit = false;
+ } else if (data[IFLA_GENEVE_TTL]) {
info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
+ *ttl_inherit = false;
+ }
if (data[IFLA_GENEVE_TOS])
info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
@@ -1438,17 +1450,18 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
{
bool use_udp6_rx_checksums = false;
struct ip_tunnel_info info;
+ bool ttl_inherit = false;
bool metadata = false;
int err;
init_tnl_info(&info, GENEVE_UDP_PORT);
err = geneve_nl2info(tb, data, extack, &info, &metadata,
- &use_udp6_rx_checksums, false);
+ &use_udp6_rx_checksums, &ttl_inherit, false);
if (err)
return err;
err = geneve_configure(net, dev, extack, &info, metadata,
- use_udp6_rx_checksums);
+ use_udp6_rx_checksums, ttl_inherit);
if (err)
return err;
@@ -1511,6 +1524,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_info info;
bool metadata;
bool use_udp6_rx_checksums;
+ bool ttl_inherit;
int err;
/* If the geneve device is configured for metadata (or externally
@@ -1523,8 +1537,9 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
memcpy(&info, &geneve->info, sizeof(info));
metadata = geneve->collect_md;
use_udp6_rx_checksums = geneve->use_udp6_rx_checksums;
+ ttl_inherit = geneve->ttl_inherit;
err = geneve_nl2info(tb, data, extack, &info, &metadata,
- &use_udp6_rx_checksums, true);
+ &use_udp6_rx_checksums, &ttl_inherit, true);
if (err)
return err;
@@ -1537,6 +1552,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
geneve->info = info;
geneve->collect_md = metadata;
geneve->use_udp6_rx_checksums = use_udp6_rx_checksums;
+ geneve->ttl_inherit = ttl_inherit;
geneve_unquiesce(geneve, gs4, gs6);
return 0;
@@ -1562,6 +1578,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */
+ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */
0;
}
@@ -1569,6 +1586,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
struct ip_tunnel_info *info = &geneve->info;
+ bool ttl_inherit = geneve->ttl_inherit;
bool metadata = geneve->collect_md;
__u8 tmp_vni[3];
__u32 vni;
@@ -1614,6 +1632,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
#endif
+ if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -1650,7 +1671,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
return dev;
init_tnl_info(&info, dst_port);
- err = geneve_configure(net, dev, NULL, &info, true, true);
+ err = geneve_configure(net, dev, NULL, &info, true, true, false);
if (err) {
free_netdev(dev);
return ERR_PTR(err);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index d79a69dd2146..17e6dcd2eb42 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -34,7 +34,6 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/semaphore.h>
-#include <linux/compat.h>
#include <linux/refcount.h>
#define SIXPACK_VERSION "Revision: 0.3.0"
@@ -752,23 +751,6 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
return err;
}
-#ifdef CONFIG_COMPAT
-static long sixpack_compat_ioctl(struct tty_struct * tty, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case SIOCGIFNAME:
- case SIOCGIFENCAP:
- case SIOCSIFENCAP:
- case SIOCSIFHWADDR:
- return sixpack_ioctl(tty, file, cmd,
- (unsigned long)compat_ptr(arg));
- }
-
- return -ENOIOCTLCMD;
-}
-#endif
-
static struct tty_ldisc_ops sp_ldisc = {
.owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
@@ -776,9 +758,6 @@ static struct tty_ldisc_ops sp_ldisc = {
.open = sixpack_open,
.close = sixpack_close,
.ioctl = sixpack_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = sixpack_compat_ioctl,
-#endif
.receive_buf = sixpack_receive_buf,
.write_wakeup = sixpack_write_wakeup,
};
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 13e4c1eff353..802233d41b25 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -35,7 +35,6 @@
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/jiffies.h>
-#include <linux/compat.h>
#include <net/ax25.h>
@@ -875,23 +874,6 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
return err;
}
-#ifdef CONFIG_COMPAT
-static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case SIOCGIFNAME:
- case SIOCGIFENCAP:
- case SIOCSIFENCAP:
- case SIOCSIFHWADDR:
- return mkiss_ioctl(tty, file, cmd,
- (unsigned long)compat_ptr(arg));
- }
-
- return -ENOIOCTLCMD;
-}
-#endif
-
/*
* Handle the 'receiver data ready' interrupt.
* This function is called by the 'tty_io' module in the kernel when
@@ -966,9 +948,6 @@ static struct tty_ldisc_ops ax_ldisc = {
.open = mkiss_open,
.close = mkiss_close,
.ioctl = mkiss_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mkiss_compat_ioctl,
-#endif
.receive_buf = mkiss_receive_buf,
.write_wakeup = mkiss_write_wakeup
};
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 16ec7af6ab7b..ba9df430fca6 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -966,6 +966,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
sizeof(struct yamdrv_ioctl_mcs));
if (IS_ERR(ym))
return PTR_ERR(ym);
+ if (ym->cmd != SIOCYAMSMCS)
+ return -EINVAL;
if (ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
@@ -981,6 +983,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
+ if (yi.cmd != SIOCYAMSCFG)
+ return -EINVAL;
if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index a32ded5b4f41..ef6f766f6389 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -185,7 +185,9 @@ struct rndis_device {
/* Interface */
struct rndis_message;
+struct ndis_offload_params;
struct netvsc_device;
+struct netvsc_channel;
struct net_device_context;
extern u32 netvsc_ring_bytes;
@@ -203,10 +205,7 @@ void netvsc_linkstatus_callback(struct net_device *net,
struct rndis_message *resp);
int netvsc_recv_callback(struct net_device *net,
struct netvsc_device *nvdev,
- struct vmbus_channel *channel,
- void *data, u32 len,
- const struct ndis_tcp_ip_checksum_info *csum_info,
- const struct ndis_pkt_8021q_info *vlan);
+ struct netvsc_channel *nvchan);
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
@@ -220,9 +219,12 @@ void rndis_filter_device_remove(struct hv_device *dev,
struct netvsc_device *nvdev);
int rndis_filter_set_rss_param(struct rndis_device *rdev,
const u8 *key);
+int rndis_filter_set_offload_params(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ struct ndis_offload_params *req_offloads);
int rndis_filter_receive(struct net_device *ndev,
struct netvsc_device *net_dev,
- struct vmbus_channel *channel,
+ struct netvsc_channel *nvchan,
void *data, u32 buflen);
int rndis_filter_set_device_mac(struct netvsc_device *ndev,
@@ -524,6 +526,8 @@ struct nvsp_2_vsc_capability {
u64 ieee8021q:1;
u64 correlation_id:1;
u64 teaming:1;
+ u64 vsubnetid:1;
+ u64 rsc:1;
};
};
} __packed;
@@ -826,7 +830,7 @@ struct nvsp_message {
#define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \
NETIF_F_TSO | NETIF_F_IPV6_CSUM | \
- NETIF_F_TSO6)
+ NETIF_F_TSO6 | NETIF_F_LRO)
#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
#define VRSS_CHANNEL_MAX 64
@@ -852,6 +856,18 @@ struct multi_recv_comp {
u32 next; /* next entry for writing */
};
+#define NVSP_RSC_MAX 562 /* Max #RSC frags in a vmbus xfer page pkt */
+
+struct nvsc_rsc {
+ const struct ndis_pkt_8021q_info *vlan;
+ const struct ndis_tcp_ip_checksum_info *csum_info;
+ u8 is_last; /* last RNDIS msg in a vmtransfer_page */
+ u32 cnt; /* #fragments in an RSC packet */
+ u32 pktlen; /* Full packet length */
+ void *data[NVSP_RSC_MAX];
+ u32 len[NVSP_RSC_MAX];
+};
+
struct netvsc_stats {
u64 packets;
u64 bytes;
@@ -955,6 +971,7 @@ struct netvsc_channel {
struct multi_send_data msd;
struct multi_recv_comp mrc;
atomic_t queue_sends;
+ struct nvsc_rsc rsc;
struct netvsc_stats tx_stats;
struct netvsc_stats rx_stats;
@@ -1136,7 +1153,8 @@ struct rndis_oobd {
/* Packet extension field contents associated with a Data message. */
struct rndis_per_packet_info {
u32 size;
- u32 type;
+ u32 type:31;
+ u32 internal:1;
u32 ppi_offset;
};
@@ -1157,6 +1175,25 @@ enum ndis_per_pkt_info_type {
MAX_PER_PKT_INFO
};
+enum rndis_per_pkt_info_interal_type {
+ RNDIS_PKTINFO_ID = 1,
+ /* Add more memebers here */
+
+ RNDIS_PKTINFO_MAX
+};
+
+#define RNDIS_PKTINFO_SUBALLOC BIT(0)
+#define RNDIS_PKTINFO_1ST_FRAG BIT(1)
+#define RNDIS_PKTINFO_LAST_FRAG BIT(2)
+
+#define RNDIS_PKTINFO_ID_V1 1
+
+struct rndis_pktinfo_id {
+ u8 ver;
+ u8 flag;
+ u16 pkt_id;
+};
+
struct ndis_pkt_8021q_info {
union {
struct {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 31c3d77b4733..922054c1d544 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -542,6 +542,9 @@ static int negotiate_nvsp_ver(struct hv_device *device,
init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
}
+ if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
+ init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
+
trace_nvsp_send(ndev, init_packet);
ret = vmbus_sendpacket(device->channel, init_packet,
@@ -1111,11 +1114,12 @@ static void enq_receive_complete(struct net_device *ndev,
static int netvsc_receive(struct net_device *ndev,
struct netvsc_device *net_device,
- struct vmbus_channel *channel,
+ struct netvsc_channel *nvchan,
const struct vmpacket_descriptor *desc,
const struct nvsp_message *nvsp)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct vmbus_channel *channel = nvchan->channel;
const struct vmtransfer_page_packet_header *vmxferpage_packet
= container_of(desc, const struct vmtransfer_page_packet_header, d);
u16 q_idx = channel->offermsg.offer.sub_channel_index;
@@ -1150,6 +1154,7 @@ static int netvsc_receive(struct net_device *ndev,
int ret;
if (unlikely(offset + buflen > net_device->recv_buf_size)) {
+ nvchan->rsc.cnt = 0;
status = NVSP_STAT_FAIL;
netif_err(net_device_ctx, rx_err, ndev,
"Packet offset:%u + len:%u too big\n",
@@ -1160,11 +1165,13 @@ static int netvsc_receive(struct net_device *ndev,
data = recv_buf + offset;
+ nvchan->rsc.is_last = (i == count - 1);
+
trace_rndis_recv(ndev, q_idx, data);
/* Pass it to the upper layer */
ret = rndis_filter_receive(ndev, net_device,
- channel, data, buflen);
+ nvchan, data, buflen);
if (unlikely(ret != NVSP_STAT_SUCCESS))
status = NVSP_STAT_FAIL;
@@ -1203,6 +1210,9 @@ static void netvsc_send_vf(struct net_device *ndev,
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+ netdev_info(ndev, "VF slot %u %s\n",
+ net_device_ctx->vf_serial,
+ net_device_ctx->vf_alloc ? "added" : "removed");
}
static void netvsc_receive_inband(struct net_device *ndev,
@@ -1220,12 +1230,13 @@ static void netvsc_receive_inband(struct net_device *ndev,
}
static int netvsc_process_raw_pkt(struct hv_device *device,
- struct vmbus_channel *channel,
+ struct netvsc_channel *nvchan,
struct netvsc_device *net_device,
struct net_device *ndev,
const struct vmpacket_descriptor *desc,
int budget)
{
+ struct vmbus_channel *channel = nvchan->channel;
const struct nvsp_message *nvmsg = hv_pkt_data(desc);
trace_nvsp_recv(ndev, channel, nvmsg);
@@ -1237,7 +1248,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- return netvsc_receive(ndev, net_device, channel,
+ return netvsc_receive(ndev, net_device, nvchan,
desc, nvmsg);
break;
@@ -1281,7 +1292,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
nvchan->desc = hv_pkt_iter_first(channel);
while (nvchan->desc && work_done < budget) {
- work_done += netvsc_process_raw_pkt(device, channel, net_device,
+ work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
ndev, nvchan->desc, budget);
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 70921bbe0e28..cf36e7ff3191 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -226,6 +226,7 @@ static inline void *init_ppi_data(struct rndis_message *msg,
ppi->size = ppi_size;
ppi->type = pkt_type;
+ ppi->internal = 0;
ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
rndis_pkt->per_pkt_info_len += ppi_size;
@@ -744,14 +745,16 @@ void netvsc_linkstatus_callback(struct net_device *net,
}
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
- struct napi_struct *napi,
- const struct ndis_tcp_ip_checksum_info *csum_info,
- const struct ndis_pkt_8021q_info *vlan,
- void *data, u32 buflen)
+ struct netvsc_channel *nvchan)
{
+ struct napi_struct *napi = &nvchan->napi;
+ const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
+ const struct ndis_tcp_ip_checksum_info *csum_info =
+ nvchan->rsc.csum_info;
struct sk_buff *skb;
+ int i;
- skb = napi_alloc_skb(napi, buflen);
+ skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
if (!skb)
return skb;
@@ -759,7 +762,8 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
- skb_put_data(skb, data, buflen);
+ for (i = 0; i < nvchan->rsc.cnt; i++)
+ skb_put_data(skb, nvchan->rsc.data[i], nvchan->rsc.len[i]);
skb->protocol = eth_type_trans(skb, net);
@@ -792,14 +796,11 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
*/
int netvsc_recv_callback(struct net_device *net,
struct netvsc_device *net_device,
- struct vmbus_channel *channel,
- void *data, u32 len,
- const struct ndis_tcp_ip_checksum_info *csum_info,
- const struct ndis_pkt_8021q_info *vlan)
+ struct netvsc_channel *nvchan)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct vmbus_channel *channel = nvchan->channel;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
- struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
struct sk_buff *skb;
struct netvsc_stats *rx_stats;
@@ -807,8 +808,8 @@ int netvsc_recv_callback(struct net_device *net,
return NVSP_STAT_FAIL;
/* Allocate a skb - TODO direct I/O to pages? */
- skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
- csum_info, vlan, data, len);
+ skb = netvsc_alloc_recv_skb(net, nvchan);
+
if (unlikely(!skb)) {
++net_device_ctx->eth_stats.rx_no_memory;
rcu_read_unlock();
@@ -825,7 +826,7 @@ int netvsc_recv_callback(struct net_device *net,
rx_stats = &nvchan->rx_stats;
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
- rx_stats->bytes += len;
+ rx_stats->bytes += nvchan->rsc.pktlen;
if (skb->pkt_type == PACKET_BROADCAST)
++rx_stats->broadcast;
@@ -1006,6 +1007,8 @@ static void netvsc_init_settings(struct net_device *dev)
ndc->speed = SPEED_UNKNOWN;
ndc->duplex = DUPLEX_FULL;
+
+ dev->features = NETIF_F_LRO;
}
static int netvsc_get_link_ksettings(struct net_device *dev,
@@ -1562,26 +1565,6 @@ netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
return -EOPNOTSUPP;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netvsc_poll_controller(struct net_device *dev)
-{
- struct net_device_context *ndc = netdev_priv(dev);
- struct netvsc_device *ndev;
- int i;
-
- rcu_read_lock();
- ndev = rcu_dereference(ndc->nvdev);
- if (ndev) {
- for (i = 0; i < ndev->num_chn; i++) {
- struct netvsc_channel *nvchan = &ndev->chan_table[i];
-
- napi_schedule(&nvchan->napi);
- }
- }
- rcu_read_unlock();
-}
-#endif
-
static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
{
return NETVSC_HASH_KEYLEN;
@@ -1733,6 +1716,33 @@ static int netvsc_set_ringparam(struct net_device *ndev,
return ret;
}
+static int netvsc_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t change = features ^ ndev->features;
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
+ struct ndis_offload_params offloads;
+
+ if (!nvdev || nvdev->destroy)
+ return -ENODEV;
+
+ if (!(change & NETIF_F_LRO))
+ return 0;
+
+ memset(&offloads, 0, sizeof(struct ndis_offload_params));
+
+ if (features & NETIF_F_LRO) {
+ offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
+ offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
+ } else {
+ offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
+ offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
+ }
+
+ return rndis_filter_set_offload_params(ndev, nvdev, &offloads);
+}
+
static u32 netvsc_get_msglevel(struct net_device *ndev)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
@@ -1776,14 +1786,12 @@ static const struct net_device_ops device_ops = {
.ndo_start_xmit = netvsc_start_xmit,
.ndo_change_rx_flags = netvsc_change_rx_flags,
.ndo_set_rx_mode = netvsc_set_rx_mode,
+ .ndo_set_features = netvsc_set_features,
.ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = netvsc_set_mac_addr,
.ndo_select_queue = netvsc_select_queue,
.ndo_get_stats64 = netvsc_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = netvsc_poll_controller,
-#endif
};
/*
@@ -1894,20 +1902,6 @@ out_unlock:
rtnl_unlock();
}
-static struct net_device *get_netvsc_bymac(const u8 *mac)
-{
- struct net_device_context *ndev_ctx;
-
- list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
- struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
-
- if (ether_addr_equal(mac, dev->perm_addr))
- return dev;
- }
-
- return NULL;
-}
-
static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
{
struct net_device_context *net_device_ctx;
@@ -2036,26 +2030,55 @@ static void netvsc_vf_setup(struct work_struct *w)
rtnl_unlock();
}
+/* Find netvsc by VF serial number.
+ * The PCI hyperv controller records the serial number as the slot kobj name.
+ */
+static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
+{
+ struct device *parent = vf_netdev->dev.parent;
+ struct net_device_context *ndev_ctx;
+ struct pci_dev *pdev;
+ u32 serial;
+
+ if (!parent || !dev_is_pci(parent))
+ return NULL; /* not a PCI device */
+
+ pdev = to_pci_dev(parent);
+ if (!pdev->slot) {
+ netdev_notice(vf_netdev, "no PCI slot information\n");
+ return NULL;
+ }
+
+ if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
+ netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
+ pci_slot_name(pdev->slot));
+ return NULL;
+ }
+
+ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ if (!ndev_ctx->vf_alloc)
+ continue;
+
+ if (ndev_ctx->vf_serial == serial)
+ return hv_get_drvdata(ndev_ctx->device_ctx);
+ }
+
+ netdev_notice(vf_netdev,
+ "no netdev found for vf serial:%u\n", serial);
+ return NULL;
+}
+
static int netvsc_register_vf(struct net_device *vf_netdev)
{
- struct net_device *ndev;
struct net_device_context *net_device_ctx;
- struct device *pdev = vf_netdev->dev.parent;
struct netvsc_device *netvsc_dev;
+ struct net_device *ndev;
int ret;
if (vf_netdev->addr_len != ETH_ALEN)
return NOTIFY_DONE;
- if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
- return NOTIFY_DONE;
-
- /*
- * We will use the MAC address to locate the synthetic interface to
- * associate with the VF interface. If we don't find a matching
- * synthetic interface, move on.
- */
- ndev = get_netvsc_bymac(vf_netdev->perm_addr);
+ ndev = get_netvsc_byslot(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
@@ -2272,17 +2295,15 @@ static int netvsc_remove(struct hv_device *dev)
cancel_delayed_work_sync(&ndev_ctx->dwork);
- rcu_read_lock();
- nvdev = rcu_dereference(ndev_ctx->nvdev);
-
- if (nvdev)
+ rtnl_lock();
+ nvdev = rtnl_dereference(ndev_ctx->nvdev);
+ if (nvdev)
cancel_work_sync(&nvdev->subchan_work);
/*
* Call to the vsc driver to let it know that the device is being
* removed. Also blocks mtu and channel changes.
*/
- rtnl_lock();
vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
if (vf_netdev)
netvsc_unregister_vf(vf_netdev);
@@ -2294,7 +2315,6 @@ static int netvsc_remove(struct hv_device *dev)
list_del(&ndev_ctx->list);
rtnl_unlock();
- rcu_read_unlock();
hv_set_drvdata(dev, NULL);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2a5209f23f29..8b537a049c1e 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -342,7 +342,8 @@ static void rndis_filter_receive_response(struct net_device *ndev,
* Get the Per-Packet-Info with the specified type
* return NULL if not found.
*/
-static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
+static inline void *rndis_get_ppi(struct rndis_packet *rpkt,
+ u32 type, u8 internal)
{
struct rndis_per_packet_info *ppi;
int len;
@@ -355,7 +356,7 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
len = rpkt->per_pkt_info_len;
while (len > 0) {
- if (ppi->type == type)
+ if (ppi->type == type && ppi->internal == internal)
return (void *)((ulong)ppi + ppi->ppi_offset);
len -= ppi->size;
ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
@@ -364,17 +365,41 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
return NULL;
}
+static inline
+void rsc_add_data(struct netvsc_channel *nvchan,
+ const struct ndis_pkt_8021q_info *vlan,
+ const struct ndis_tcp_ip_checksum_info *csum_info,
+ void *data, u32 len)
+{
+ u32 cnt = nvchan->rsc.cnt;
+
+ if (cnt) {
+ nvchan->rsc.pktlen += len;
+ } else {
+ nvchan->rsc.vlan = vlan;
+ nvchan->rsc.csum_info = csum_info;
+ nvchan->rsc.pktlen = len;
+ }
+
+ nvchan->rsc.data[cnt] = data;
+ nvchan->rsc.len[cnt] = len;
+ nvchan->rsc.cnt++;
+}
+
static int rndis_filter_receive_data(struct net_device *ndev,
struct netvsc_device *nvdev,
- struct vmbus_channel *channel,
+ struct netvsc_channel *nvchan,
struct rndis_message *msg,
u32 data_buflen)
{
struct rndis_packet *rndis_pkt = &msg->msg.pkt;
const struct ndis_tcp_ip_checksum_info *csum_info;
const struct ndis_pkt_8021q_info *vlan;
+ const struct rndis_pktinfo_id *pktinfo_id;
u32 data_offset;
void *data;
+ bool rsc_more = false;
+ int ret;
/* Remove the rndis header and pass it back up the stack */
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
@@ -393,25 +418,59 @@ static int rndis_filter_receive_data(struct net_device *ndev,
return NVSP_STAT_FAIL;
}
- vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
+ vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO, 0);
+
+ csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO, 0);
- csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
+ pktinfo_id = rndis_get_ppi(rndis_pkt, RNDIS_PKTINFO_ID, 1);
data = (void *)msg + data_offset;
- /*
- * Remove the rndis trailer padding from rndis packet message
+ /* Identify RSC frags, drop erroneous packets */
+ if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
+ if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
+ nvchan->rsc.cnt = 0;
+ else if (nvchan->rsc.cnt == 0)
+ goto drop;
+
+ rsc_more = true;
+
+ if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
+ rsc_more = false;
+
+ if (rsc_more && nvchan->rsc.is_last)
+ goto drop;
+ } else {
+ nvchan->rsc.cnt = 0;
+ }
+
+ if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
+ goto drop;
+
+ /* Put data into per channel structure.
+ * Also, remove the rndis trailer padding from rndis packet message
* rndis_pkt->data_len tell us the real data length, we only copy
* the data packet to the stack, without the rndis trailer padding
*/
- return netvsc_recv_callback(ndev, nvdev, channel,
- data, rndis_pkt->data_len,
- csum_info, vlan);
+ rsc_add_data(nvchan, vlan, csum_info, data, rndis_pkt->data_len);
+
+ if (rsc_more)
+ return NVSP_STAT_SUCCESS;
+
+ ret = netvsc_recv_callback(ndev, nvdev, nvchan);
+ nvchan->rsc.cnt = 0;
+
+ return ret;
+
+drop:
+ /* Drop incomplete packet */
+ nvchan->rsc.cnt = 0;
+ return NVSP_STAT_FAIL;
}
int rndis_filter_receive(struct net_device *ndev,
struct netvsc_device *net_dev,
- struct vmbus_channel *channel,
+ struct netvsc_channel *nvchan,
void *data, u32 buflen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
@@ -422,7 +481,7 @@ int rndis_filter_receive(struct net_device *ndev,
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
- return rndis_filter_receive_data(ndev, net_dev, channel,
+ return rndis_filter_receive_data(ndev, net_dev, nvchan,
rndis_msg, buflen);
case RNDIS_MSG_INIT_C:
case RNDIS_MSG_QUERY_C:
@@ -657,7 +716,7 @@ cleanup:
return ret;
}
-static int
+int
rndis_filter_set_offload_params(struct net_device *ndev,
struct netvsc_device *nvdev,
struct ndis_offload_params *req_offloads)
@@ -1184,6 +1243,18 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
}
}
+ if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
+ net->hw_features |= NETIF_F_LRO;
+
+ if (net->features & NETIF_F_LRO) {
+ offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
+ offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
+ } else {
+ offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
+ offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
+ }
+ }
+
/* In case some hw_features disappeared we need to remove them from
* net->features list as they're no longer supported.
*/
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 23a52b9293f3..cd1d8faccca5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1308,8 +1308,7 @@ static int adf7242_remove(struct spi_device *spi)
{
struct adf7242_local *lp = spi_get_drvdata(spi);
- if (!IS_ERR_OR_NULL(lp->debugfs_root))
- debugfs_remove_recursive(lp->debugfs_root);
+ debugfs_remove_recursive(lp->debugfs_root);
cancel_delayed_work_sync(&lp->work);
destroy_workqueue(lp->wqueue);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 58299fb666ed..0ff5a403a8dc 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -634,10 +634,9 @@ static int ca8210_test_int_driver_write(
for (i = 0; i < len; i++)
dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]);
- fifo_buffer = kmalloc(len, GFP_KERNEL);
+ fifo_buffer = kmemdup(buf, len, GFP_KERNEL);
if (!fifo_buffer)
return -ENOMEM;
- memcpy(fifo_buffer, buf, len);
kfifo_in(&test->up_fifo, &fifo_buffer, 4);
wake_up_interruptible(&priv->test.readq);
@@ -3044,8 +3043,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
{
struct ca8210_test *test = &priv->test;
- if (!IS_ERR(test->ca8210_dfs_spi_int))
- debugfs_remove(test->ca8210_dfs_spi_int);
+ debugfs_remove(test->ca8210_dfs_spi_int);
kfifo_free(&test->up_fifo);
dev_info(&priv->spi->dev, "Test interface removed\n");
}
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index bf70ab892e69..51b5198d5943 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -37,8 +37,6 @@ MODULE_LICENSE("GPL");
static LIST_HEAD(hwsim_phys);
static DEFINE_MUTEX(hwsim_phys_lock);
-static LIST_HEAD(hwsim_ifup_phys);
-
static struct platform_device *mac802154hwsim_dev;
/* MAC802154_HWSIM netlink family */
@@ -85,7 +83,6 @@ struct hwsim_phy {
struct list_head edges;
struct list_head list;
- struct list_head list_ifup;
};
static int hwsim_add_one(struct genl_info *info, struct device *dev,
@@ -159,9 +156,6 @@ static int hwsim_hw_start(struct ieee802154_hw *hw)
struct hwsim_phy *phy = hw->priv;
phy->suspended = false;
- list_add_rcu(&phy->list_ifup, &hwsim_ifup_phys);
- synchronize_rcu();
-
return 0;
}
@@ -170,8 +164,6 @@ static void hwsim_hw_stop(struct ieee802154_hw *hw)
struct hwsim_phy *phy = hw->priv;
phy->suspended = true;
- list_del_rcu(&phy->list_ifup);
- synchronize_rcu();
}
static int
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index e428277781ac..44de81e5f140 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -132,11 +132,6 @@ static const struct reg_sequence mar20a_iar_overwrites[] = {
};
#define MCR20A_VALID_CHANNELS (0x07FFF800)
-
-struct mcr20a_platform_data {
- int rst_gpio;
-};
-
#define MCR20A_MAX_BUF (127)
#define printdev(X) (&X->spi->dev)
@@ -412,7 +407,6 @@ struct mcr20a_local {
struct spi_device *spi;
struct ieee802154_hw *hw;
- struct mcr20a_platform_data *pdata;
struct regmap *regmap_dar;
struct regmap *regmap_iar;
@@ -903,19 +897,19 @@ mcr20a_irq_clean_complete(void *context)
switch (seq_state) {
/* TX IRQ, RX IRQ and SEQ IRQ */
- case (0x03):
+ case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
if (lp->is_tx) {
lp->is_tx = 0;
dev_dbg(printdev(lp), "TX is done. No ACK\n");
mcr20a_handle_tx_complete(lp);
}
break;
- case (0x05):
+ case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
/* rx is starting */
dev_dbg(printdev(lp), "RX is starting\n");
mcr20a_handle_rx(lp);
break;
- case (0x07):
+ case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
if (lp->is_tx) {
/* tx is done */
lp->is_tx = 0;
@@ -927,7 +921,7 @@ mcr20a_irq_clean_complete(void *context)
mcr20a_handle_rx(lp);
}
break;
- case (0x01):
+ case (DAR_IRQSTS1_SEQIRQ):
if (lp->is_tx) {
dev_dbg(printdev(lp), "TX is starting\n");
mcr20a_handle_tx(lp);
@@ -976,20 +970,6 @@ static irqreturn_t mcr20a_irq_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int mcr20a_get_platform_data(struct spi_device *spi,
- struct mcr20a_platform_data *pdata)
-{
- int ret = 0;
-
- if (!spi->dev.of_node)
- return -EINVAL;
-
- pdata->rst_gpio = of_get_named_gpio(spi->dev.of_node, "rst_b-gpio", 0);
- dev_dbg(&spi->dev, "rst_b-gpio: %d\n", pdata->rst_gpio);
-
- return ret;
-}
-
static void mcr20a_hw_setup(struct mcr20a_local *lp)
{
u8 i;
@@ -1249,7 +1229,7 @@ mcr20a_probe(struct spi_device *spi)
{
struct ieee802154_hw *hw;
struct mcr20a_local *lp;
- struct mcr20a_platform_data *pdata;
+ struct gpio_desc *rst_b;
int irq_type;
int ret = -ENOMEM;
@@ -1260,48 +1240,32 @@ mcr20a_probe(struct spi_device *spi)
return -EINVAL;
}
- pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- /* set mcr20a platform data */
- ret = mcr20a_get_platform_data(spi, pdata);
- if (ret < 0) {
- dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n");
- goto free_pdata;
- }
-
- /* init reset gpio */
- if (gpio_is_valid(pdata->rst_gpio)) {
- ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio,
- GPIOF_OUT_INIT_HIGH, "reset");
- if (ret)
- goto free_pdata;
+ rst_b = devm_gpiod_get(&spi->dev, "rst_b", GPIOD_OUT_HIGH);
+ if (IS_ERR(rst_b)) {
+ ret = PTR_ERR(rst_b);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&spi->dev, "Failed to get 'rst_b' gpio: %d", ret);
+ return ret;
}
/* reset mcr20a */
- if (gpio_is_valid(pdata->rst_gpio)) {
- usleep_range(10, 20);
- gpio_set_value_cansleep(pdata->rst_gpio, 0);
- usleep_range(10, 20);
- gpio_set_value_cansleep(pdata->rst_gpio, 1);
- usleep_range(120, 240);
- }
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(rst_b, 1);
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(rst_b, 0);
+ usleep_range(120, 240);
/* allocate ieee802154_hw and private data */
hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops);
if (!hw) {
dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n");
- ret = -ENOMEM;
- goto free_pdata;
+ return ret;
}
/* init mcr20a local data */
lp = hw->priv;
lp->hw = hw;
lp->spi = spi;
- lp->spi->dev.platform_data = pdata;
- lp->pdata = pdata;
/* init ieee802154_hw */
hw->parent = &spi->dev;
@@ -1370,8 +1334,6 @@ mcr20a_probe(struct spi_device *spi)
free_dev:
ieee802154_free_hw(lp->hw);
-free_pdata:
- kfree(pdata);
return ret;
}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 30612497643c..2df7f60fe052 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -59,12 +59,6 @@
#include <net/net_namespace.h>
#include <linux/u64_stats_sync.h>
-struct pcpu_lstats {
- u64 packets;
- u64 bytes;
- struct u64_stats_sync syncp;
-};
-
/* The higher levels take care of making this non-reentrant (it's
* called with bh's disabled).
*/
@@ -75,6 +69,10 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
int len;
skb_tx_timestamp(skb);
+
+ /* do not fool net_timestamp_check() with various clock bases */
+ skb->tstamp = 0;
+
skb_orphan(skb);
/* Before queueing this packet to netif_rx(),
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 7de88b33d5b9..4bb90b6867a2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -64,9 +64,9 @@ struct macsec_eth_header {
#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
-#define for_each_rxsc(secy, sc) \
+#define for_each_rxsc(secy, sc) \
for (sc = rcu_dereference_bh(secy->rx_sc); \
- sc; \
+ sc; \
sc = rcu_dereference_bh(sc->next))
#define for_each_rxsc_rtnl(secy, sc) \
for (sc = rtnl_dereference(secy->rx_sc); \
@@ -1142,6 +1142,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
+
sc = sc ? macsec_rxsc_get(sc) : NULL;
if (sc) {
@@ -1584,7 +1585,6 @@ static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
return rx_sa;
}
-
static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
@@ -2156,7 +2156,7 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
}
static int copy_tx_sa_stats(struct sk_buff *skb,
- struct macsec_tx_sa_stats __percpu *pstats)
+ struct macsec_tx_sa_stats __percpu *pstats)
{
struct macsec_tx_sa_stats sum = {0, };
int cpu;
@@ -2176,7 +2176,7 @@ static int copy_tx_sa_stats(struct sk_buff *skb,
}
static int copy_rx_sa_stats(struct sk_buff *skb,
- struct macsec_rx_sa_stats __percpu *pstats)
+ struct macsec_rx_sa_stats __percpu *pstats)
{
struct macsec_rx_sa_stats sum = {0, };
int cpu;
@@ -2202,7 +2202,7 @@ static int copy_rx_sa_stats(struct sk_buff *skb,
}
static int copy_rx_sc_stats(struct sk_buff *skb,
- struct pcpu_rx_sc_stats __percpu *pstats)
+ struct pcpu_rx_sc_stats __percpu *pstats)
{
struct macsec_rx_sc_stats sum = {0, };
int cpu;
@@ -2266,7 +2266,7 @@ static int copy_rx_sc_stats(struct sk_buff *skb,
}
static int copy_tx_sc_stats(struct sk_buff *skb,
- struct pcpu_tx_sc_stats __percpu *pstats)
+ struct pcpu_tx_sc_stats __percpu *pstats)
{
struct macsec_tx_sc_stats sum = {0, };
int cpu;
@@ -2306,7 +2306,7 @@ static int copy_tx_sc_stats(struct sk_buff *skb,
}
static int copy_secy_stats(struct sk_buff *skb,
- struct pcpu_secy_stats __percpu *pstats)
+ struct pcpu_secy_stats __percpu *pstats)
{
struct macsec_dev_stats sum = {0, };
int cpu;
@@ -2962,13 +2962,11 @@ static int macsec_get_iflink(const struct net_device *dev)
return macsec_priv(dev)->real_dev->ifindex;
}
-
static int macsec_get_nest_level(struct net_device *dev)
{
return macsec_priv(dev)->nest_level;
}
-
static const struct net_device_ops macsec_netdev_ops = {
.ndo_init = macsec_dev_init,
.ndo_uninit = macsec_dev_uninit,
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index cfda146f3b3b..fc8d5f1ee1ad 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1077,7 +1077,7 @@ static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
vlan->netpoll = NULL;
- __netpoll_free_async(netpoll);
+ __netpoll_free(netpoll);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 7ae1856d1f18..e964d312f4ca 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -19,7 +19,6 @@
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/netdevice.h>
#include <linux/netpoll.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
@@ -603,6 +602,9 @@ static int net_failover_slave_unregister(struct net_device *slave_dev,
primary_dev = rtnl_dereference(nfo_info->primary_dev);
standby_dev = rtnl_dereference(nfo_info->standby_dev);
+ if (WARN_ON_ONCE(slave_dev != primary_dev && slave_dev != standby_dev))
+ return -ENODEV;
+
vlan_vids_del_by_dev(slave_dev, failover_dev);
dev_uc_unsync(slave_dev, failover_dev);
dev_mc_unsync(slave_dev, failover_dev);
@@ -762,8 +764,10 @@ struct failover *net_failover_create(struct net_device *standby_dev)
netif_carrier_off(failover_dev);
failover = failover_register(failover_dev, &net_failover_ops);
- if (IS_ERR(failover))
+ if (IS_ERR(failover)) {
+ err = PTR_ERR(failover);
goto err_failover_register;
+ }
return failover;
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 81444208b216..cb3518474f0e 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -86,8 +86,14 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
return 0;
}
+static int nsim_bpf_finalize(struct bpf_verifier_env *env)
+{
+ return 0;
+}
+
static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
- .insn_hook = nsim_bpf_verify_insn,
+ .insn_hook = nsim_bpf_verify_insn,
+ .finalize = nsim_bpf_finalize,
};
static bool nsim_xdp_offload_active(struct netdevsim *ns)
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 4b22955de191..dd0db7534cb3 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -6,12 +6,6 @@
#include <linux/if_arp.h>
#include <net/rtnetlink.h>
-struct pcpu_lstats {
- u64 packets;
- u64 bytes;
- struct u64_stats_sync syncp;
-};
-
static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
{
int len = skb->len;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 82070792edbb..3d187cd50eb0 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -240,7 +240,7 @@ config AT803X_PHY
config BCM63XX_PHY
tristate "Broadcom 63xx SOCs internal PHY"
- depends on BCM63XX
+ depends on BCM63XX || COMPILE_TEST
select BCM_NET_PHYLIB
---help---
Currently supports the 6348 and 6358 PHYs.
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
index 319edc9c8ec7..632472cab3bb 100644
--- a/drivers/net/phy/aquantia.c
+++ b/drivers/net/phy/aquantia.c
@@ -115,7 +115,7 @@ static struct phy_driver aquantia_driver[] = {
.phy_id = PHY_ID_AQ1202,
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQ1202",
- .features = PHY_AQUANTIA_FEATURES,
+ .features = PHY_10GBIT_FULL_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
@@ -127,7 +127,7 @@ static struct phy_driver aquantia_driver[] = {
.phy_id = PHY_ID_AQ2104,
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQ2104",
- .features = PHY_AQUANTIA_FEATURES,
+ .features = PHY_10GBIT_FULL_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
@@ -139,7 +139,7 @@ static struct phy_driver aquantia_driver[] = {
.phy_id = PHY_ID_AQR105,
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR105",
- .features = PHY_AQUANTIA_FEATURES,
+ .features = PHY_10GBIT_FULL_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
@@ -151,7 +151,7 @@ static struct phy_driver aquantia_driver[] = {
.phy_id = PHY_ID_AQR106,
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR106",
- .features = PHY_AQUANTIA_FEATURES,
+ .features = PHY_10GBIT_FULL_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
@@ -163,7 +163,7 @@ static struct phy_driver aquantia_driver[] = {
.phy_id = PHY_ID_AQR107,
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR107",
- .features = PHY_AQUANTIA_FEATURES,
+ .features = PHY_10GBIT_FULL_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
@@ -175,7 +175,7 @@ static struct phy_driver aquantia_driver[] = {
.phy_id = PHY_ID_AQR405,
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR405",
- .features = PHY_AQUANTIA_FEATURES,
+ .features = PHY_10GBIT_FULL_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 411cf1072bae..e74a047a846e 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -357,7 +357,7 @@ static int at803x_aneg_done(struct phy_device *phydev)
/* check if the SGMII link is OK. */
if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) {
- pr_warn("803x_aneg_done: SGMII link is not ok\n");
+ phydev_warn(phydev, "803x_aneg_done: SGMII link is not ok\n");
aneg_done = 0;
}
/* switch back to copper page */
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index cf14613745c9..d95bffdec4c1 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -42,6 +42,9 @@ static int bcm63xx_config_init(struct phy_device *phydev)
{
int reg, err;
+ /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */
+ phydev->supported |= SUPPORTED_Pause;
+
reg = phy_read(phydev, MII_BCM63XX_IR);
if (reg < 0)
return reg;
@@ -65,8 +68,7 @@ static struct phy_driver bcm63xx_driver[] = {
.phy_id = 0x00406000,
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (1)",
- /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
@@ -75,8 +77,7 @@ static struct phy_driver bcm63xx_driver[] = {
/* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
.phy_id_mask = 0xfffffc00,
- .name = "Broadcom BCM63XX (2)",
- .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 29aa8d772b0c..edd4d44a386d 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -553,16 +553,17 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
mutex_unlock(&clock->extreg_lock);
if (!phydev->attached_dev) {
- pr_warn("expected to find an attached netdevice\n");
+ phydev_warn(phydev,
+ "expected to find an attached netdevice\n");
return;
}
if (on) {
if (dev_mc_add(phydev->attached_dev, status_frame_dst))
- pr_warn("failed to add mc address\n");
+ phydev_warn(phydev, "failed to add mc address\n");
} else {
if (dev_mc_del(phydev->attached_dev, status_frame_dst))
- pr_warn("failed to delete mc address\n");
+ phydev_warn(phydev, "failed to delete mc address\n");
}
}
@@ -686,9 +687,9 @@ static void recalibrate(struct dp83640_clock *clock)
* read out and correct offsets
*/
val = ext_read(master, PAGE4, PTP_STS);
- pr_info("master PTP_STS 0x%04hx\n", val);
+ phydev_info(master, "master PTP_STS 0x%04hx\n", val);
val = ext_read(master, PAGE4, PTP_ESTS);
- pr_info("master PTP_ESTS 0x%04hx\n", val);
+ phydev_info(master, "master PTP_ESTS 0x%04hx\n", val);
event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA);
event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA);
event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA);
@@ -698,15 +699,16 @@ static void recalibrate(struct dp83640_clock *clock)
list_for_each(this, &clock->phylist) {
tmp = list_entry(this, struct dp83640_private, list);
val = ext_read(tmp->phydev, PAGE4, PTP_STS);
- pr_info("slave PTP_STS 0x%04hx\n", val);
+ phydev_info(tmp->phydev, "slave PTP_STS 0x%04hx\n", val);
val = ext_read(tmp->phydev, PAGE4, PTP_ESTS);
- pr_info("slave PTP_ESTS 0x%04hx\n", val);
+ phydev_info(tmp->phydev, "slave PTP_ESTS 0x%04hx\n", val);
event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
event_ts.sec_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
diff = now - (s64) phy2txts(&event_ts);
- pr_info("slave offset %lld nanoseconds\n", diff);
+ phydev_info(tmp->phydev, "slave offset %lld nanoseconds\n",
+ diff);
diff += ADJTIME_FIX;
ts = ns_to_timespec64(diff);
tdr_write(0, tmp->phydev, &ts, PTP_STEP_CLK);
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index a9a4edfa23c8..565e49e7f76f 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -91,8 +91,7 @@ static struct phy_driver et1011c_driver[] = { {
.phy_id = 0x0282f014,
.name = "ET1011C",
.phy_id_mask = 0xfffffff0,
- .features = (PHY_BASIC_FEATURES | SUPPORTED_1000baseT_Full),
- .flags = PHY_POLL,
+ .features = PHY_GBIT_FEATURES,
.config_aneg = et1011c_config_aneg,
.read_status = et1011c_read_status,
} };
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f7c69ca34056..cbec296107bd 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -265,7 +265,7 @@ static int marvell_set_polarity(struct phy_device *phydev, int polarity)
return err;
}
- return 0;
+ return val != reg;
}
static int marvell_set_downshift(struct phy_device *phydev, bool enable,
@@ -287,12 +287,15 @@ static int marvell_set_downshift(struct phy_device *phydev, bool enable,
static int marvell_config_aneg(struct phy_device *phydev)
{
+ int changed = 0;
int err;
err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
+ changed = err;
+
err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL,
MII_M1111_PHY_LED_DIRECT);
if (err < 0)
@@ -302,7 +305,7 @@ static int marvell_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- if (phydev->autoneg != AUTONEG_ENABLE) {
+ if (phydev->autoneg != AUTONEG_ENABLE || changed) {
/* A write to speed/duplex bits (that is performed by
* genphy_config_aneg() call above) must be followed by
* a software reset. Otherwise, the write has no effect.
@@ -350,42 +353,6 @@ static int m88e1101_config_aneg(struct phy_device *phydev)
return marvell_config_aneg(phydev);
}
-static int m88e1111_config_aneg(struct phy_device *phydev)
-{
- int err;
-
- /* The Marvell PHY has an errata which requires
- * that certain registers get written in order
- * to restart autonegotiation
- */
- err = genphy_soft_reset(phydev);
-
- err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL,
- MII_M1111_PHY_LED_DIRECT);
- if (err < 0)
- return err;
-
- err = genphy_config_aneg(phydev);
- if (err < 0)
- return err;
-
- if (phydev->autoneg != AUTONEG_ENABLE) {
- /* A write to speed/duplex bits (that is performed by
- * genphy_config_aneg() call above) must be followed by
- * a software reset. Otherwise, the write has no effect.
- */
- err = genphy_soft_reset(phydev);
- if (err < 0)
- return err;
- }
-
- return 0;
-}
-
#ifdef CONFIG_OF_MDIO
/* Set and/or override some configuration registers based on the
* marvell,reg-init property stored in the of_node for the phydev.
@@ -479,6 +446,7 @@ static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev)
static int m88e1121_config_aneg(struct phy_device *phydev)
{
+ int changed = 0;
int err = 0;
if (phy_interface_is_rgmii(phydev)) {
@@ -487,15 +455,26 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
return err;
}
- err = genphy_soft_reset(phydev);
+ err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
- err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
+ changed = err;
+
+ err = genphy_config_aneg(phydev);
if (err < 0)
return err;
- return genphy_config_aneg(phydev);
+ if (phydev->autoneg != AUTONEG_ENABLE || changed) {
+ /* A software reset is used to ensure a "commit" of the
+ * changes is done.
+ */
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
}
static int m88e1318_config_aneg(struct phy_device *phydev)
@@ -659,7 +638,7 @@ static void marvell_config_led(struct phy_device *phydev)
err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL,
def_config);
if (err < 0)
- pr_warn("Fail to config marvell phy LED.\n");
+ phydev_warn(phydev, "Fail to config marvell phy LED.\n");
}
static int marvell_config_init(struct phy_device *phydev)
@@ -2067,7 +2046,7 @@ static struct phy_driver marvell_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
- .config_aneg = &m88e1111_config_aneg,
+ .config_aneg = &marvell_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
@@ -2222,7 +2201,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1510,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1510",
- .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
+ .features = PHY_GBIT_FIBRE_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = &m88e1510_probe,
.config_init = &m88e1510_config_init,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index f77a2d9e7f9d..1c9d039eec63 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -337,9 +337,9 @@ static int mv3310_config_init(struct phy_device *phydev)
}
if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported))
- dev_warn(&phydev->mdio.dev,
- "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
+ phydev_warn(phydev,
+ "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
phydev->supported &= mask;
phydev->advertising &= phydev->supported;
@@ -535,16 +535,7 @@ static struct phy_driver mv3310_drivers[] = {
.phy_id = 0x002b09aa,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88x3310",
- .features = SUPPORTED_10baseT_Full |
- SUPPORTED_10baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_TP |
- SUPPORTED_FIBRE |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_Backplane,
+ .features = PHY_10GBIT_FEATURES,
.soft_reset = gen10g_no_soft_reset,
.config_init = mv3310_config_init,
.probe = mv3310_probe,
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 8d370667fa1b..df75efa96a7d 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -45,6 +46,8 @@ struct unimac_mdio_priv {
void __iomem *base;
int (*wait_func) (void *wait_func_data);
void *wait_func_data;
+ struct clk *clk;
+ u32 clk_freq;
};
static inline u32 unimac_mdio_readl(struct unimac_mdio_priv *priv, u32 offset)
@@ -189,6 +192,35 @@ static int unimac_mdio_reset(struct mii_bus *bus)
return 0;
}
+static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
+{
+ unsigned long rate;
+ u32 reg, div;
+
+ /* Keep the hardware default values */
+ if (!priv->clk_freq)
+ return;
+
+ if (!priv->clk)
+ rate = 250000000;
+ else
+ rate = clk_get_rate(priv->clk);
+
+ div = (rate / (2 * priv->clk_freq)) - 1;
+ if (div & ~MDIO_CLK_DIV_MASK) {
+ pr_warn("Incorrect MDIO clock frequency, ignoring\n");
+ return;
+ }
+
+ /* The MDIO clock is the reference clock (typicaly 250Mhz) divided by
+ * 2 x (MDIO_CLK_DIV + 1)
+ */
+ reg = unimac_mdio_readl(priv, MDIO_CFG);
+ reg &= ~(MDIO_CLK_DIV_MASK << MDIO_CLK_DIV_SHIFT);
+ reg |= div << MDIO_CLK_DIV_SHIFT;
+ unimac_mdio_writel(priv, reg, MDIO_CFG);
+}
+
static int unimac_mdio_probe(struct platform_device *pdev)
{
struct unimac_mdio_pdata *pdata = pdev->dev.platform_data;
@@ -217,9 +249,26 @@ static int unimac_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
+ return PTR_ERR(priv->clk);
+ else
+ priv->clk = NULL;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq))
+ priv->clk_freq = 0;
+
+ unimac_mdio_clk_set(priv);
+
priv->mii_bus = mdiobus_alloc();
- if (!priv->mii_bus)
- return -ENOMEM;
+ if (!priv->mii_bus) {
+ ret = -ENOMEM;
+ goto out_clk_disable;
+ }
bus = priv->mii_bus;
bus->priv = priv;
@@ -253,6 +302,8 @@ static int unimac_mdio_probe(struct platform_device *pdev)
out_mdio_free:
mdiobus_free(bus);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -262,10 +313,37 @@ static int unimac_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int __maybe_unused unimac_mdio_suspend(struct device *d)
+{
+ struct unimac_mdio_priv *priv = dev_get_drvdata(d);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int __maybe_unused unimac_mdio_resume(struct device *d)
+{
+ struct unimac_mdio_priv *priv = dev_get_drvdata(d);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ unimac_mdio_clk_set(priv);
return 0;
}
+static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops,
+ unimac_mdio_suspend, unimac_mdio_resume);
+
static const struct of_device_id unimac_mdio_ids[] = {
{ .compatible = "brcm,genet-mdio-v5", },
{ .compatible = "brcm,genet-mdio-v4", },
@@ -281,6 +359,7 @@ static struct platform_driver unimac_mdio_driver = {
.driver = {
.name = UNIMAC_MDIO_DRV_NAME,
.of_match_table = unimac_mdio_ids,
+ .pm = &unimac_mdio_pm_ops,
},
.probe = unimac_mdio_probe,
.remove = unimac_mdio_remove,
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index c017486e9b86..696bdf1e4576 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -289,8 +289,7 @@ static int mdio_mux_iproc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int mdio_mux_iproc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
+ struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
clk_disable_unprepare(md->core_clk);
@@ -299,8 +298,7 @@ static int mdio_mux_iproc_suspend(struct device *dev)
static int mdio_mux_iproc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
+ struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
clk_prepare_enable(md->core_clk);
mdio_mux_iproc_config(md);
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index bc90764a8b8d..fe34576262bd 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -20,23 +20,21 @@
struct mdio_mux_gpio_state {
struct gpio_descs *gpios;
void *mux_handle;
- int values[];
};
static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
void *data)
{
struct mdio_mux_gpio_state *s = data;
- unsigned int n;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(desired_child));
if (current_child == desired_child)
return 0;
- for (n = 0; n < s->gpios->ndescs; n++)
- s->values[n] = (desired_child >> n) & 1;
+ values[0] = desired_child;
gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc,
- s->values);
+ s->gpios->info, values);
return 0;
}
@@ -51,8 +49,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpios))
return PTR_ERR(gpios);
- s = devm_kzalloc(&pdev->dev, struct_size(s, values, gpios->ndescs),
- GFP_KERNEL);
+ s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
if (!s) {
gpiod_put_array(gpios);
return -ENOMEM;
diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c
index 564616968cad..1546f6398831 100644
--- a/drivers/net/phy/mdio-thunder.c
+++ b/drivers/net/phy/mdio-thunder.c
@@ -73,8 +73,8 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
err = of_address_to_resource(node, 0, &r);
if (err) {
dev_err(&pdev->dev,
- "Couldn't translate address for \"%s\"\n",
- node->name);
+ "Couldn't translate address for \"%pOFn\"\n",
+ node);
break;
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 98f4b1f706df..2e59a8419b17 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -38,7 +38,6 @@
#include <linux/phy.h>
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <linux/gpio/consumer.h>
#include <asm/irq.h>
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3db06b40580d..9265dea79412 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -14,7 +14,7 @@
* option) any later version.
*
* Support : Micrel Phys:
- * Giga phys: ksz9021, ksz9031
+ * Giga phys: ksz9021, ksz9031, ksz9131
* 100/10 Phys : ksz8001, ksz8721, ksz8737, ksz8041
* ksz8021, ksz8031, ksz8051,
* ksz8081, ksz8091,
@@ -609,6 +609,116 @@ err_force_master:
return result;
}
+#define KSZ9131_SKEW_5BIT_MAX 2400
+#define KSZ9131_SKEW_4BIT_MAX 800
+#define KSZ9131_OFFSET 700
+#define KSZ9131_STEP 100
+
+static int ksz9131_of_load_skew_values(struct phy_device *phydev,
+ struct device_node *of_node,
+ u16 reg, size_t field_sz,
+ char *field[], u8 numfields)
+{
+ int val[4] = {-(1 + KSZ9131_OFFSET), -(2 + KSZ9131_OFFSET),
+ -(3 + KSZ9131_OFFSET), -(4 + KSZ9131_OFFSET)};
+ int skewval, skewmax = 0;
+ int matches = 0;
+ u16 maxval;
+ u16 newval;
+ u16 mask;
+ int i;
+
+ /* psec properties in dts should mean x pico seconds */
+ if (field_sz == 5)
+ skewmax = KSZ9131_SKEW_5BIT_MAX;
+ else
+ skewmax = KSZ9131_SKEW_4BIT_MAX;
+
+ for (i = 0; i < numfields; i++)
+ if (!of_property_read_s32(of_node, field[i], &skewval)) {
+ if (skewval < -KSZ9131_OFFSET)
+ skewval = -KSZ9131_OFFSET;
+ else if (skewval > skewmax)
+ skewval = skewmax;
+
+ val[i] = skewval + KSZ9131_OFFSET;
+ matches++;
+ }
+
+ if (!matches)
+ return 0;
+
+ if (matches < numfields)
+ newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
+ else
+ newval = 0;
+
+ maxval = (field_sz == 4) ? 0xf : 0x1f;
+ for (i = 0; i < numfields; i++)
+ if (val[i] != -(i + 1 + KSZ9131_OFFSET)) {
+ mask = 0xffff;
+ mask ^= maxval << (field_sz * i);
+ newval = (newval & mask) |
+ (((val[i] / KSZ9131_STEP) & maxval)
+ << (field_sz * i));
+ }
+
+ return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
+}
+
+static int ksz9131_config_init(struct phy_device *phydev)
+{
+ const struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ char *clk_skews[2] = {"rxc-skew-psec", "txc-skew-psec"};
+ char *rx_data_skews[4] = {
+ "rxd0-skew-psec", "rxd1-skew-psec",
+ "rxd2-skew-psec", "rxd3-skew-psec"
+ };
+ char *tx_data_skews[4] = {
+ "txd0-skew-psec", "txd1-skew-psec",
+ "txd2-skew-psec", "txd3-skew-psec"
+ };
+ char *control_skews[2] = {"txen-skew-psec", "rxdv-skew-psec"};
+ const struct device *dev_walker;
+ int ret;
+
+ dev_walker = &phydev->mdio.dev;
+ do {
+ of_node = dev_walker->of_node;
+ dev_walker = dev_walker->parent;
+ } while (!of_node && dev_walker);
+
+ if (!of_node)
+ return 0;
+
+ ret = ksz9131_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_CLK_PAD_SKEW, 5,
+ clk_skews, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz9131_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_CONTROL_PAD_SKEW, 4,
+ control_skews, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz9131_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_RX_DATA_PAD_SKEW, 4,
+ rx_data_skews, 4);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz9131_of_load_skew_values(phydev, of_node,
+ MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
+ tx_data_skews, 4);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX BIT(6)
#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED BIT(4)
@@ -975,6 +1085,23 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = kszphy_resume,
}, {
+ .phy_id = PHY_ID_KSZ9131,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Microchip KSZ9131 Gigabit PHY",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
+ .config_init = ksz9131_config_init,
+ .read_status = ksz9031_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = kszphy_resume,
+}, {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8873MLL Switch",
@@ -1022,6 +1149,7 @@ MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
{ PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ9131, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8001, 0x00fffffc },
{ PHY_ID_KS8737, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8021, 0x00ffffff },
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 2d67937866a3..04b12e34da58 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -88,7 +88,7 @@ static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr,
/* Save current page */
save_page = phy_save_page(phydev);
if (save_page < 0) {
- pr_warn("Failed to get current page\n");
+ phydev_warn(phydev, "Failed to get current page\n");
goto err;
}
@@ -98,14 +98,14 @@ static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr,
ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA,
(data & 0xFFFF));
if (ret < 0) {
- pr_warn("Failed to write TR low data\n");
+ phydev_warn(phydev, "Failed to write TR low data\n");
goto err;
}
ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA,
(data & 0x00FF0000) >> 16);
if (ret < 0) {
- pr_warn("Failed to write TR high data\n");
+ phydev_warn(phydev, "Failed to write TR high data\n");
goto err;
}
@@ -115,14 +115,15 @@ static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr,
ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf);
if (ret < 0) {
- pr_warn("Failed to write data in reg\n");
+ phydev_warn(phydev, "Failed to write data in reg\n");
goto err;
}
usleep_range(1000, 2000);/* Wait for Data to be written */
val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR);
if (!(val & 0x8000))
- pr_warn("TR Register[0x%X] configuration failed\n", regaddr);
+ phydev_warn(phydev, "TR Register[0x%X] configuration failed\n",
+ regaddr);
err:
return phy_restore_page(phydev, save_page, ret);
}
@@ -137,7 +138,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
*/
err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A);
if (err < 0)
- pr_warn("Failed to Set Register[0x0F82]\n");
+ phydev_warn(phydev, "Failed to Set Register[0x0F82]\n");
/* Get access to Channel b'10, Node b'1101, Register 0x06.
* Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv,
@@ -145,7 +146,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
*/
err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F);
if (err < 0)
- pr_warn("Failed to Set Register[0x168C]\n");
+ phydev_warn(phydev, "Failed to Set Register[0x168C]\n");
/* Get access to Channel b'10, Node b'1111, Register 0x11.
* Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh
@@ -153,7 +154,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
*/
err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620);
if (err < 0)
- pr_warn("Failed to Set Register[0x17A2]\n");
+ phydev_warn(phydev, "Failed to Set Register[0x17A2]\n");
/* Get access to Channel b'10, Node b'1101, Register 0x10.
* Write 24-bit value 0xEEFFDD to register. Setting
@@ -162,7 +163,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
*/
err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD);
if (err < 0)
- pr_warn("Failed to Set Register[0x16A0]\n");
+ phydev_warn(phydev, "Failed to Set Register[0x16A0]\n");
/* Get access to Channel b'10, Node b'1101, Register 0x13.
* Write 24-bit value 0x071448 to register. Setting
@@ -170,7 +171,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
*/
err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448);
if (err < 0)
- pr_warn("Failed to Set Register[0x16A6]\n");
+ phydev_warn(phydev, "Failed to Set Register[0x16A6]\n");
/* Get access to Channel b'10, Node b'1101, Register 0x12.
* Write 24-bit value 0x13132F to register. Setting
@@ -178,7 +179,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
*/
err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F);
if (err < 0)
- pr_warn("Failed to Set Register[0x16A4]\n");
+ phydev_warn(phydev, "Failed to Set Register[0x16A4]\n");
/* Get access to Channel b'10, Node b'1101, Register 0x14.
* Write 24-bit value 0x0 to register. Setting eee_3level_delay,
@@ -186,7 +187,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
*/
err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0);
if (err < 0)
- pr_warn("Failed to Set Register[0x16A8]\n");
+ phydev_warn(phydev, "Failed to Set Register[0x16A8]\n");
/* Get access to Channel b'01, Node b'1111, Register 0x34.
* Write 24-bit value 0x91B06C to register. Setting
@@ -195,7 +196,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
*/
err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C);
if (err < 0)
- pr_warn("Failed to Set Register[0x0FE8]\n");
+ phydev_warn(phydev, "Failed to Set Register[0x0FE8]\n");
/* Get access to Channel b'01, Node b'1111, Register 0x3E.
* Write 24-bit value 0xC0A028 to register. Setting
@@ -204,7 +205,7 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
*/
err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028);
if (err < 0)
- pr_warn("Failed to Set Register[0x0FFC]\n");
+ phydev_warn(phydev, "Failed to Set Register[0x0FFC]\n");
/* Get access to Channel b'01, Node b'1111, Register 0x35.
* Write 24-bit value 0x041600 to register. Setting
@@ -213,14 +214,14 @@ static void lan88xx_config_TR_regs(struct phy_device *phydev)
*/
err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600);
if (err < 0)
- pr_warn("Failed to Set Register[0x0FEA]\n");
+ phydev_warn(phydev, "Failed to Set Register[0x0FEA]\n");
/* Get access to Channel b'10, Node b'1101, Register 0x03.
* Write 24-bit value 0x000004 to register. Setting TrFreeze bits.
*/
err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004);
if (err < 0)
- pr_warn("Failed to Set Register[0x1686]\n");
+ phydev_warn(phydev, "Failed to Set Register[0x1686]\n");
}
static int lan88xx_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index b1917dd1978a..c600a8509d60 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -46,7 +46,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Microchip LAN87xx T1",
- .features = SUPPORTED_100baseT_Full,
+ .features = PHY_BASIC_T1_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = genphy_config_init,
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 84ca9ff40ae0..a2e59f4f6f01 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -6,6 +6,8 @@
* Copyright (c) 2016 Microsemi Corporation
*/
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mdio.h>
@@ -32,6 +34,15 @@ enum rgmii_rx_clock_delay {
#define DISABLE_HP_AUTO_MDIX_MASK 0x0080
#define DISABLE_PAIR_SWAP_CORR_MASK 0x0020
#define DISABLE_POLARITY_CORR_MASK 0x0010
+#define PARALLEL_DET_IGNORE_ADVERTISED 0x0008
+
+#define MSCC_PHY_EXT_CNTL_STATUS 22
+#define SMI_BROADCAST_WR_EN 0x0001
+
+#define MSCC_PHY_ERR_RX_CNT 19
+#define MSCC_PHY_ERR_FALSE_CARRIER_CNT 20
+#define MSCC_PHY_ERR_LINK_DISCONNECT_CNT 21
+#define ERR_CNT_MASK GENMASK(7, 0)
#define MSCC_PHY_EXT_PHY_CNTL_1 23
#define MAC_IF_SELECTION_MASK 0x1800
@@ -39,7 +50,22 @@ enum rgmii_rx_clock_delay {
#define MAC_IF_SELECTION_RMII 1
#define MAC_IF_SELECTION_RGMII 2
#define MAC_IF_SELECTION_POS 11
+#define VSC8584_MAC_IF_SELECTION_MASK 0x1000
+#define VSC8584_MAC_IF_SELECTION_SGMII 0
+#define VSC8584_MAC_IF_SELECTION_1000BASEX 1
+#define VSC8584_MAC_IF_SELECTION_POS 12
#define FAR_END_LOOPBACK_MODE_MASK 0x0008
+#define MEDIA_OP_MODE_MASK 0x0700
+#define MEDIA_OP_MODE_COPPER 0
+#define MEDIA_OP_MODE_SERDES 1
+#define MEDIA_OP_MODE_1000BASEX 2
+#define MEDIA_OP_MODE_100BASEFX 3
+#define MEDIA_OP_MODE_AMS_COPPER_SERDES 5
+#define MEDIA_OP_MODE_AMS_COPPER_1000BASEX 6
+#define MEDIA_OP_MODE_AMS_COPPER_100BASEFX 7
+#define MEDIA_OP_MODE_POS 8
+
+#define MSCC_PHY_EXT_PHY_CNTL_2 24
#define MII_VSC85XX_INT_MASK 25
#define MII_VSC85XX_INT_MASK_MASK 0xa000
@@ -54,27 +80,48 @@ enum rgmii_rx_clock_delay {
#define HP_AUTO_MDIX_X_OVER_IND_MASK 0x2000
#define MSCC_PHY_LED_MODE_SEL 29
-#define LED_1_MODE_SEL_MASK 0x00F0
-#define LED_0_MODE_SEL_MASK 0x000F
-#define LED_1_MODE_SEL_POS 4
+#define LED_MODE_SEL_POS(x) ((x) * 4)
+#define LED_MODE_SEL_MASK(x) (GENMASK(3, 0) << LED_MODE_SEL_POS(x))
+#define LED_MODE_SEL(x, mode) (((mode) << LED_MODE_SEL_POS(x)) & LED_MODE_SEL_MASK(x))
#define MSCC_EXT_PAGE_ACCESS 31
#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
+#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */
+#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */
+/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
+ * in the same package.
+ */
+#define MSCC_PHY_PAGE_EXTENDED_GPIO 0x0010 /* Extended reg - GPIO */
+#define MSCC_PHY_PAGE_TEST 0x2a30 /* Test reg */
+#define MSCC_PHY_PAGE_TR 0x52b5 /* Token ring registers */
/* Extended Page 1 Registers */
+#define MSCC_PHY_CU_MEDIA_CRC_VALID_CNT 18
+#define VALID_CRC_CNT_CRC_MASK GENMASK(13, 0)
+
#define MSCC_PHY_EXT_MODE_CNTL 19
#define FORCE_MDI_CROSSOVER_MASK 0x000C
#define FORCE_MDI_CROSSOVER_MDIX 0x000C
#define FORCE_MDI_CROSSOVER_MDI 0x0008
#define MSCC_PHY_ACTIPHY_CNTL 20
+#define PHY_ADDR_REVERSED 0x0200
#define DOWNSHIFT_CNTL_MASK 0x001C
#define DOWNSHIFT_EN 0x0010
#define DOWNSHIFT_CNTL_POS 2
+#define MSCC_PHY_EXT_PHY_CNTL_4 23
+#define PHY_CNTL_4_ADDR_POS 11
+
+#define MSCC_PHY_VERIPHY_CNTL_2 25
+
+#define MSCC_PHY_VERIPHY_CNTL_3 26
+
/* Extended Page 2 Registers */
+#define MSCC_PHY_CU_PMD_TX_CNTL 16
+
#define MSCC_PHY_RGMII_CNTL 20
#define RGMII_RX_CLK_DELAY_MASK 0x0070
#define RGMII_RX_CLK_DELAY_POS 4
@@ -90,11 +137,90 @@ enum rgmii_rx_clock_delay {
#define SECURE_ON_ENABLE 0x8000
#define SECURE_ON_PASSWD_LEN_4 0x4000
+/* Extended Page 3 Registers */
+#define MSCC_PHY_SERDES_TX_VALID_CNT 21
+#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
+#define MSCC_PHY_SERDES_RX_VALID_CNT 28
+#define MSCC_PHY_SERDES_RX_CRC_ERR_CNT 29
+
+/* Extended page GPIO Registers */
+#define MSCC_DW8051_CNTL_STATUS 0
+#define MICRO_NSOFT_RESET 0x8000
+#define RUN_FROM_INT_ROM 0x4000
+#define AUTOINC_ADDR 0x2000
+#define PATCH_RAM_CLK 0x1000
+#define MICRO_PATCH_EN 0x0080
+#define DW8051_CLK_EN 0x0010
+#define MICRO_CLK_EN 0x0008
+#define MICRO_CLK_DIVIDE(x) ((x) >> 1)
+#define MSCC_DW8051_VLD_MASK 0xf1ff
+
+/* x Address in range 1-4 */
+#define MSCC_TRAP_ROM_ADDR(x) ((x) * 2 + 1)
+#define MSCC_PATCH_RAM_ADDR(x) (((x) + 1) * 2)
+#define MSCC_INT_MEM_ADDR 11
+
+#define MSCC_INT_MEM_CNTL 12
+#define READ_SFR 0x6000
+#define READ_PRAM 0x4000
+#define READ_ROM 0x2000
+#define READ_RAM 0x0000
+#define INT_MEM_WRITE_EN 0x1000
+#define EN_PATCH_RAM_TRAP_ADDR(x) (0x0100 << ((x) - 1))
+#define INT_MEM_DATA_M 0x00ff
+#define INT_MEM_DATA(x) (INT_MEM_DATA_M & (x))
+
+#define MSCC_PHY_PROC_CMD 18
+#define PROC_CMD_NCOMPLETED 0x8000
+#define PROC_CMD_FAILED 0x4000
+#define PROC_CMD_SGMII_PORT(x) ((x) << 8)
+#define PROC_CMD_FIBER_PORT(x) (0x0100 << (x) % 4)
+#define PROC_CMD_QSGMII_PORT 0x0c00
+#define PROC_CMD_RST_CONF_PORT 0x0080
+#define PROC_CMD_RECONF_PORT 0x0000
+#define PROC_CMD_READ_MOD_WRITE_PORT 0x0040
+#define PROC_CMD_WRITE 0x0040
+#define PROC_CMD_READ 0x0000
+#define PROC_CMD_FIBER_DISABLE 0x0020
+#define PROC_CMD_FIBER_100BASE_FX 0x0010
+#define PROC_CMD_FIBER_1000BASE_X 0x0000
+#define PROC_CMD_SGMII_MAC 0x0030
+#define PROC_CMD_QSGMII_MAC 0x0020
+#define PROC_CMD_NO_MAC_CONF 0x0000
+#define PROC_CMD_1588_DEFAULT_INIT 0x0010
+#define PROC_CMD_NOP 0x000f
+#define PROC_CMD_PHY_INIT 0x000a
+#define PROC_CMD_CRC16 0x0008
+#define PROC_CMD_FIBER_MEDIA_CONF 0x0001
+#define PROC_CMD_MCB_ACCESS_MAC_CONF 0x0000
+#define PROC_CMD_NCOMPLETED_TIMEOUT_MS 500
+
+#define MSCC_PHY_MAC_CFG_FASTLINK 19
+#define MAC_CFG_MASK 0xc000
+#define MAC_CFG_SGMII 0x0000
+#define MAC_CFG_QSGMII 0x4000
+
+/* Test page Registers */
+#define MSCC_PHY_TEST_PAGE_5 5
+#define MSCC_PHY_TEST_PAGE_8 8
+#define MSCC_PHY_TEST_PAGE_9 9
+#define MSCC_PHY_TEST_PAGE_20 20
+#define MSCC_PHY_TEST_PAGE_24 24
+
+/* Token ring page Registers */
+#define MSCC_PHY_TR_CNTL 16
+#define TR_WRITE 0x8000
+#define TR_ADDR(x) (0x7fff & (x))
+#define MSCC_PHY_TR_LSB 17
+#define MSCC_PHY_TR_MSB 18
+
/* Microsemi PHY ID's */
#define PHY_ID_VSC8530 0x00070560
#define PHY_ID_VSC8531 0x00070570
#define PHY_ID_VSC8540 0x00070760
#define PHY_ID_VSC8541 0x00070770
+#define PHY_ID_VSC8574 0x000704a0
+#define PHY_ID_VSC8584 0x000707c0
#define MSCC_VDDMAC_1500 1500
#define MSCC_VDDMAC_1800 1800
@@ -103,16 +229,160 @@ enum rgmii_rx_clock_delay {
#define DOWNSHIFT_COUNT_MAX 5
+#define MAX_LEDS 4
+
+#define VSC8584_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
+ BIT(VSC8531_LINK_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_100_ACTIVITY) | \
+ BIT(VSC8584_LINK_100FX_1000X_ACTIVITY) | \
+ BIT(VSC8531_DUPLEX_COLLISION) | \
+ BIT(VSC8531_COLLISION) | \
+ BIT(VSC8531_ACTIVITY) | \
+ BIT(VSC8584_100FX_1000X_ACTIVITY) | \
+ BIT(VSC8531_AUTONEG_FAULT) | \
+ BIT(VSC8531_SERIAL_MODE) | \
+ BIT(VSC8531_FORCE_LED_OFF) | \
+ BIT(VSC8531_FORCE_LED_ON))
+
+#define VSC85XX_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
+ BIT(VSC8531_LINK_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_100_ACTIVITY) | \
+ BIT(VSC8531_DUPLEX_COLLISION) | \
+ BIT(VSC8531_COLLISION) | \
+ BIT(VSC8531_ACTIVITY) | \
+ BIT(VSC8531_AUTONEG_FAULT) | \
+ BIT(VSC8531_SERIAL_MODE) | \
+ BIT(VSC8531_FORCE_LED_OFF) | \
+ BIT(VSC8531_FORCE_LED_ON))
+
+#define MSCC_VSC8584_REVB_INT8051_FW "mscc_vsc8584_revb_int8051_fb48.bin"
+#define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800
+#define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48
+
+#define MSCC_VSC8574_REVB_INT8051_FW "mscc_vsc8574_revb_int8051_29e8.bin"
+#define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000
+#define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8
+
+#define VSC8584_REVB 0x0001
+#define MSCC_DEV_REV_MASK GENMASK(3, 0)
+
+struct reg_val {
+ u16 reg;
+ u32 val;
+};
+
+struct vsc85xx_hw_stat {
+ const char *string;
+ u8 reg;
+ u16 page;
+ u16 mask;
+};
+
+static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = {
+ {
+ .string = "phy_receive_errors",
+ .reg = MSCC_PHY_ERR_RX_CNT,
+ .page = MSCC_PHY_PAGE_STANDARD,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_false_carrier",
+ .reg = MSCC_PHY_ERR_FALSE_CARRIER_CNT,
+ .page = MSCC_PHY_PAGE_STANDARD,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_cu_media_link_disconnect",
+ .reg = MSCC_PHY_ERR_LINK_DISCONNECT_CNT,
+ .page = MSCC_PHY_PAGE_STANDARD,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_cu_media_crc_good_count",
+ .reg = MSCC_PHY_CU_MEDIA_CRC_VALID_CNT,
+ .page = MSCC_PHY_PAGE_EXTENDED,
+ .mask = VALID_CRC_CNT_CRC_MASK,
+ }, {
+ .string = "phy_cu_media_crc_error_count",
+ .reg = MSCC_PHY_EXT_PHY_CNTL_4,
+ .page = MSCC_PHY_PAGE_EXTENDED,
+ .mask = ERR_CNT_MASK,
+ },
+};
+
+static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = {
+ {
+ .string = "phy_receive_errors",
+ .reg = MSCC_PHY_ERR_RX_CNT,
+ .page = MSCC_PHY_PAGE_STANDARD,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_false_carrier",
+ .reg = MSCC_PHY_ERR_FALSE_CARRIER_CNT,
+ .page = MSCC_PHY_PAGE_STANDARD,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_cu_media_link_disconnect",
+ .reg = MSCC_PHY_ERR_LINK_DISCONNECT_CNT,
+ .page = MSCC_PHY_PAGE_STANDARD,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_cu_media_crc_good_count",
+ .reg = MSCC_PHY_CU_MEDIA_CRC_VALID_CNT,
+ .page = MSCC_PHY_PAGE_EXTENDED,
+ .mask = VALID_CRC_CNT_CRC_MASK,
+ }, {
+ .string = "phy_cu_media_crc_error_count",
+ .reg = MSCC_PHY_EXT_PHY_CNTL_4,
+ .page = MSCC_PHY_PAGE_EXTENDED,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_serdes_tx_good_pkt_count",
+ .reg = MSCC_PHY_SERDES_TX_VALID_CNT,
+ .page = MSCC_PHY_PAGE_EXTENDED_3,
+ .mask = VALID_CRC_CNT_CRC_MASK,
+ }, {
+ .string = "phy_serdes_tx_bad_crc_count",
+ .reg = MSCC_PHY_SERDES_TX_CRC_ERR_CNT,
+ .page = MSCC_PHY_PAGE_EXTENDED_3,
+ .mask = ERR_CNT_MASK,
+ }, {
+ .string = "phy_serdes_rx_good_pkt_count",
+ .reg = MSCC_PHY_SERDES_RX_VALID_CNT,
+ .page = MSCC_PHY_PAGE_EXTENDED_3,
+ .mask = VALID_CRC_CNT_CRC_MASK,
+ }, {
+ .string = "phy_serdes_rx_bad_crc_count",
+ .reg = MSCC_PHY_SERDES_RX_CRC_ERR_CNT,
+ .page = MSCC_PHY_PAGE_EXTENDED_3,
+ .mask = ERR_CNT_MASK,
+ },
+};
+
struct vsc8531_private {
int rate_magic;
- u8 led_0_mode;
- u8 led_1_mode;
+ u16 supp_led_modes;
+ u32 leds_mode[MAX_LEDS];
+ u8 nleds;
+ const struct vsc85xx_hw_stat *hw_stats;
+ u64 *stats;
+ int nstats;
+ bool pkg_init;
+ /* For multiple port PHYs; the MDIO address of the base PHY in the
+ * package.
+ */
+ unsigned int base_addr;
};
#ifdef CONFIG_OF_MDIO
struct vsc8531_edge_rate_table {
- u16 vddmac;
- u8 slowdown[8];
+ u32 vddmac;
+ u32 slowdown[8];
};
static const struct vsc8531_edge_rate_table edge_table[] = {
@@ -123,12 +393,66 @@ static const struct vsc8531_edge_rate_table edge_table[] = {
};
#endif /* CONFIG_OF_MDIO */
-static int vsc85xx_phy_page_set(struct phy_device *phydev, u16 page)
+static int vsc85xx_phy_read_page(struct phy_device *phydev)
{
- int rc;
+ return __phy_read(phydev, MSCC_EXT_PAGE_ACCESS);
+}
- rc = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page);
- return rc;
+static int vsc85xx_phy_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page);
+}
+
+static int vsc85xx_get_sset_count(struct phy_device *phydev)
+{
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (!priv)
+ return 0;
+
+ return priv->nstats;
+}
+
+static void vsc85xx_get_strings(struct phy_device *phydev, u8 *data)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ int i;
+
+ if (!priv)
+ return;
+
+ for (i = 0; i < priv->nstats; i++)
+ strlcpy(data + i * ETH_GSTRING_LEN, priv->hw_stats[i].string,
+ ETH_GSTRING_LEN);
+}
+
+static u64 vsc85xx_get_stat(struct phy_device *phydev, int i)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ int val;
+
+ val = phy_read_paged(phydev, priv->hw_stats[i].page,
+ priv->hw_stats[i].reg);
+ if (val < 0)
+ return U64_MAX;
+
+ val = val & priv->hw_stats[i].mask;
+ priv->stats[i] += val;
+
+ return priv->stats[i];
+}
+
+static void vsc85xx_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ int i;
+
+ if (!priv)
+ return;
+
+ for (i = 0; i < priv->nstats; i++)
+ data[i] = vsc85xx_get_stat(phydev, i);
}
static int vsc85xx_led_cntl_set(struct phy_device *phydev,
@@ -140,14 +464,8 @@ static int vsc85xx_led_cntl_set(struct phy_device *phydev,
mutex_lock(&phydev->lock);
reg_val = phy_read(phydev, MSCC_PHY_LED_MODE_SEL);
- if (led_num) {
- reg_val &= ~LED_1_MODE_SEL_MASK;
- reg_val |= (((u16)mode << LED_1_MODE_SEL_POS) &
- LED_1_MODE_SEL_MASK);
- } else {
- reg_val &= ~LED_0_MODE_SEL_MASK;
- reg_val |= ((u16)mode & LED_0_MODE_SEL_MASK);
- }
+ reg_val &= ~LED_MODE_SEL_MASK(led_num);
+ reg_val |= LED_MODE_SEL(led_num, (u16)mode);
rc = phy_write(phydev, MSCC_PHY_LED_MODE_SEL, reg_val);
mutex_unlock(&phydev->lock);
@@ -173,7 +491,7 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix)
u16 reg_val;
reg_val = phy_read(phydev, MSCC_PHY_BYPASS_CONTROL);
- if ((mdix == ETH_TP_MDI) || (mdix == ETH_TP_MDI_X)) {
+ if (mdix == ETH_TP_MDI || mdix == ETH_TP_MDI_X) {
reg_val |= (DISABLE_PAIR_SWAP_CORR_MASK |
DISABLE_POLARITY_CORR_MASK |
DISABLE_HP_AUTO_MDIX_MASK);
@@ -183,25 +501,20 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix)
DISABLE_HP_AUTO_MDIX_MASK);
}
rc = phy_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg_val);
- if (rc != 0)
+ if (rc)
return rc;
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED);
- if (rc != 0)
- return rc;
+ reg_val = 0;
- reg_val = phy_read(phydev, MSCC_PHY_EXT_MODE_CNTL);
- reg_val &= ~(FORCE_MDI_CROSSOVER_MASK);
if (mdix == ETH_TP_MDI)
- reg_val |= FORCE_MDI_CROSSOVER_MDI;
+ reg_val = FORCE_MDI_CROSSOVER_MDI;
else if (mdix == ETH_TP_MDI_X)
- reg_val |= FORCE_MDI_CROSSOVER_MDIX;
- rc = phy_write(phydev, MSCC_PHY_EXT_MODE_CNTL, reg_val);
- if (rc != 0)
- return rc;
+ reg_val = FORCE_MDI_CROSSOVER_MDIX;
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
- if (rc != 0)
+ rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED,
+ MSCC_PHY_EXT_MODE_CNTL, FORCE_MDI_CROSSOVER_MASK,
+ reg_val);
+ if (rc < 0)
return rc;
return genphy_restart_aneg(phydev);
@@ -209,30 +522,24 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix)
static int vsc85xx_downshift_get(struct phy_device *phydev, u8 *count)
{
- int rc;
- u16 reg_val;
+ int reg_val;
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED);
- if (rc != 0)
- goto out;
+ reg_val = phy_read_paged(phydev, MSCC_PHY_PAGE_EXTENDED,
+ MSCC_PHY_ACTIPHY_CNTL);
+ if (reg_val < 0)
+ return reg_val;
- reg_val = phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
reg_val &= DOWNSHIFT_CNTL_MASK;
if (!(reg_val & DOWNSHIFT_EN))
*count = DOWNSHIFT_DEV_DISABLE;
else
*count = ((reg_val & ~DOWNSHIFT_EN) >> DOWNSHIFT_CNTL_POS) + 2;
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-out:
- return rc;
+ return 0;
}
static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count)
{
- int rc;
- u16 reg_val;
-
if (count == DOWNSHIFT_DEV_DEFAULT_COUNT) {
/* Default downshift count 3 (i.e. Bit3:2 = 0b01) */
count = ((1 << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN);
@@ -244,21 +551,9 @@ static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count)
count = (((count - 2) << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN);
}
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED);
- if (rc != 0)
- goto out;
-
- reg_val = phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
- reg_val &= ~(DOWNSHIFT_CNTL_MASK);
- reg_val |= count;
- rc = phy_write(phydev, MSCC_PHY_ACTIPHY_CNTL, reg_val);
- if (rc != 0)
- goto out;
-
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-
-out:
- return rc;
+ return phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED,
+ MSCC_PHY_ACTIPHY_CNTL, DOWNSHIFT_CNTL_MASK,
+ count);
}
static int vsc85xx_wol_set(struct phy_device *phydev,
@@ -272,46 +567,48 @@ static int vsc85xx_wol_set(struct phy_device *phydev,
u8 *mac_addr = phydev->attached_dev->dev_addr;
mutex_lock(&phydev->lock);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
- if (rc != 0)
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+ if (rc < 0) {
+ rc = phy_restore_page(phydev, rc, rc);
goto out_unlock;
+ }
if (wol->wolopts & WAKE_MAGIC) {
/* Store the device address for the magic packet */
for (i = 0; i < ARRAY_SIZE(pwd); i++)
pwd[i] = mac_addr[5 - (i * 2 + 1)] << 8 |
mac_addr[5 - i * 2];
- phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]);
- phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]);
- phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]);
+ __phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]);
+ __phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]);
+ __phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]);
} else {
- phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0);
- phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0);
- phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0);
+ __phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0);
+ __phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0);
+ __phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0);
}
if (wol_conf->wolopts & WAKE_MAGICSECURE) {
for (i = 0; i < ARRAY_SIZE(pwd); i++)
pwd[i] = wol_conf->sopass[5 - (i * 2 + 1)] << 8 |
wol_conf->sopass[5 - i * 2];
- phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]);
- phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]);
- phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]);
+ __phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]);
+ __phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]);
+ __phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]);
} else {
- phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0);
- phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0);
- phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0);
+ __phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0);
+ __phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0);
+ __phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0);
}
- reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
+ reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
if (wol_conf->wolopts & WAKE_MAGICSECURE)
reg_val |= SECURE_ON_ENABLE;
else
reg_val &= ~SECURE_ON_ENABLE;
- phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
+ __phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
- if (rc != 0)
+ rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
+ if (rc < 0)
goto out_unlock;
if (wol->wolopts & WAKE_MAGIC) {
@@ -319,14 +616,14 @@ static int vsc85xx_wol_set(struct phy_device *phydev,
reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
reg_val |= MII_VSC85XX_INT_MASK_WOL;
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
- if (rc != 0)
+ if (rc)
goto out_unlock;
} else {
/* Disable the WOL interrupt */
reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
reg_val &= (~MII_VSC85XX_INT_MASK_WOL);
rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
- if (rc != 0)
+ if (rc)
goto out_unlock;
}
/* Clear WOL iterrupt status */
@@ -348,17 +645,17 @@ static void vsc85xx_wol_get(struct phy_device *phydev,
struct ethtool_wolinfo *wol_conf = wol;
mutex_lock(&phydev->lock);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
- if (rc != 0)
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+ if (rc < 0)
goto out_unlock;
- reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
+ reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
if (reg_val & SECURE_ON_ENABLE)
wol_conf->wolopts |= WAKE_MAGICSECURE;
if (wol_conf->wolopts & WAKE_MAGICSECURE) {
- pwd[0] = phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD);
- pwd[1] = phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD);
- pwd[2] = phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD);
+ pwd[0] = __phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD);
+ pwd[1] = __phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD);
+ pwd[2] = __phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD);
for (i = 0; i < ARRAY_SIZE(pwd); i++) {
wol_conf->sopass[5 - i * 2] = pwd[i] & 0x00ff;
wol_conf->sopass[5 - (i * 2 + 1)] = (pwd[i] & 0xff00)
@@ -366,18 +663,16 @@ static void vsc85xx_wol_get(struct phy_device *phydev,
}
}
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-
out_unlock:
+ phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
mutex_unlock(&phydev->lock);
}
#ifdef CONFIG_OF_MDIO
static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
{
- u8 sd;
- u16 vdd;
- int rc, i, j;
+ u32 vdd, sd;
+ int i, j;
struct device *dev = &phydev->mdio.dev;
struct device_node *of_node = dev->of_node;
u8 sd_array_size = ARRAY_SIZE(edge_table[0].slowdown);
@@ -385,12 +680,10 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
if (!of_node)
return -ENODEV;
- rc = of_property_read_u16(of_node, "vsc8531,vddmac", &vdd);
- if (rc != 0)
+ if (of_property_read_u32(of_node, "vsc8531,vddmac", &vdd))
vdd = MSCC_VDDMAC_3300;
- rc = of_property_read_u8(of_node, "vsc8531,edge-slowdown", &sd);
- if (rc != 0)
+ if (of_property_read_u32(of_node, "vsc8531,edge-slowdown", &sd))
sd = 0;
for (i = 0; i < ARRAY_SIZE(edge_table); i++)
@@ -404,19 +697,20 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
static int vsc85xx_dt_led_mode_get(struct phy_device *phydev,
char *led,
- u8 default_mode)
+ u32 default_mode)
{
+ struct vsc8531_private *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
struct device_node *of_node = dev->of_node;
- u8 led_mode;
+ u32 led_mode;
int err;
if (!of_node)
return -ENODEV;
led_mode = default_mode;
- err = of_property_read_u8(of_node, led, &led_mode);
- if (!err && (led_mode > 15 || led_mode == 7 || led_mode == 11)) {
+ err = of_property_read_u32(of_node, led, &led_mode);
+ if (!err && !(BIT(led_mode) & priv->supp_led_modes)) {
phydev_err(phydev, "DT %s invalid\n", led);
return -EINVAL;
}
@@ -438,24 +732,36 @@ static int vsc85xx_dt_led_mode_get(struct phy_device *phydev,
}
#endif /* CONFIG_OF_MDIO */
+static int vsc85xx_dt_led_modes_get(struct phy_device *phydev,
+ u32 *default_mode)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ char led_dt_prop[28];
+ int i, ret;
+
+ for (i = 0; i < priv->nleds; i++) {
+ ret = sprintf(led_dt_prop, "vsc8531,led-%d-mode", i);
+ if (ret < 0)
+ return ret;
+
+ ret = vsc85xx_dt_led_mode_get(phydev, led_dt_prop,
+ default_mode[i]);
+ if (ret < 0)
+ return ret;
+ priv->leds_mode[i] = ret;
+ }
+
+ return 0;
+}
+
static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev, u8 edge_rate)
{
int rc;
- u16 reg_val;
mutex_lock(&phydev->lock);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
- if (rc != 0)
- goto out_unlock;
- reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
- reg_val &= ~(EDGE_RATE_CNTL_MASK);
- reg_val |= (edge_rate << EDGE_RATE_CNTL_POS);
- rc = phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
- if (rc != 0)
- goto out_unlock;
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
-
-out_unlock:
+ rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
+ MSCC_PHY_WOL_MAC_CONTROL, EDGE_RATE_CNTL_MASK,
+ edge_rate << EDGE_RATE_CNTL_POS);
mutex_unlock(&phydev->lock);
return rc;
@@ -486,7 +792,7 @@ static int vsc85xx_mac_if_set(struct phy_device *phydev,
goto out_unlock;
}
rc = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, reg_val);
- if (rc != 0)
+ if (rc)
goto out_unlock;
rc = genphy_soft_reset(phydev);
@@ -504,17 +810,17 @@ static int vsc85xx_default_config(struct phy_device *phydev)
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
mutex_lock(&phydev->lock);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
- if (rc != 0)
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+ if (rc < 0)
goto out_unlock;
reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL);
reg_val &= ~(RGMII_RX_CLK_DELAY_MASK);
reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS);
phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
out_unlock:
+ rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
mutex_unlock(&phydev->lock);
return rc;
@@ -543,9 +849,812 @@ static int vsc85xx_set_tunable(struct phy_device *phydev,
}
}
+/* mdiobus lock should be locked when using this function */
+static void vsc85xx_tr_write(struct phy_device *phydev, u16 addr, u32 val)
+{
+ __phy_write(phydev, MSCC_PHY_TR_MSB, val >> 16);
+ __phy_write(phydev, MSCC_PHY_TR_LSB, val & GENMASK(15, 0));
+ __phy_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr));
+}
+
+static int vsc85xx_eee_init_seq_set(struct phy_device *phydev)
+{
+ const struct reg_val init_eee[] = {
+ {0x0f82, 0x0012b00a},
+ {0x1686, 0x00000004},
+ {0x168c, 0x00d2c46f},
+ {0x17a2, 0x00000620},
+ {0x16a0, 0x00eeffdd},
+ {0x16a6, 0x00071448},
+ {0x16a4, 0x0013132f},
+ {0x16a8, 0x00000000},
+ {0x0ffc, 0x00c0a028},
+ {0x0fe8, 0x0091b06c},
+ {0x0fea, 0x00041600},
+ {0x0f80, 0x00000af4},
+ {0x0fec, 0x00901809},
+ {0x0fee, 0x0000a6a1},
+ {0x0ffe, 0x00b01007},
+ {0x16b0, 0x00eeff00},
+ {0x16b2, 0x00007000},
+ {0x16b4, 0x00000814},
+ };
+ unsigned int i;
+ int oldpage;
+
+ mutex_lock(&phydev->lock);
+ oldpage = phy_select_page(phydev, MSCC_PHY_PAGE_TR);
+ if (oldpage < 0)
+ goto out_unlock;
+
+ for (i = 0; i < ARRAY_SIZE(init_eee); i++)
+ vsc85xx_tr_write(phydev, init_eee[i].reg, init_eee[i].val);
+
+out_unlock:
+ oldpage = phy_restore_page(phydev, oldpage, oldpage);
+ mutex_unlock(&phydev->lock);
+
+ return oldpage;
+}
+
+/* phydev->bus->mdio_lock should be locked when using this function */
+static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
+ dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
+ dump_stack();
+ }
+
+ return __mdiobus_write(phydev->mdio.bus, priv->base_addr, regnum, val);
+}
+
+/* phydev->bus->mdio_lock should be locked when using this function */
+static int phy_base_read(struct phy_device *phydev, u32 regnum)
+{
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
+ dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
+ dump_stack();
+ }
+
+ return __mdiobus_read(phydev->mdio.bus, priv->base_addr, regnum);
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static void vsc8584_csr_write(struct phy_device *phydev, u16 addr, u32 val)
+{
+ phy_base_write(phydev, MSCC_PHY_TR_MSB, val >> 16);
+ phy_base_write(phydev, MSCC_PHY_TR_LSB, val & GENMASK(15, 0));
+ phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr));
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_cmd(struct phy_device *phydev, u16 val)
+{
+ unsigned long deadline;
+ u16 reg_val;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_NCOMPLETED | val);
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ reg_val = phy_base_read(phydev, MSCC_PHY_PROC_CMD);
+ } while (time_before(jiffies, deadline) &&
+ (reg_val & PROC_CMD_NCOMPLETED) &&
+ !(reg_val & PROC_CMD_FAILED));
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ if (reg_val & PROC_CMD_FAILED)
+ return -EIO;
+
+ if (reg_val & PROC_CMD_NCOMPLETED)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_micro_deassert_reset(struct phy_device *phydev,
+ bool patch_en)
+{
+ u32 enable, release;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ enable = RUN_FROM_INT_ROM | MICRO_CLK_EN | DW8051_CLK_EN;
+ release = MICRO_NSOFT_RESET | RUN_FROM_INT_ROM | DW8051_CLK_EN |
+ MICRO_CLK_EN;
+
+ if (patch_en) {
+ enable |= MICRO_PATCH_EN;
+ release |= MICRO_PATCH_EN;
+
+ /* Clear all patches */
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_RAM);
+ }
+
+ /* Enable 8051 Micro clock; CLEAR/SET patch present; disable PRAM clock
+ * override and addr. auto-incr; operate at 125 MHz
+ */
+ phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, enable);
+ /* Release 8051 Micro SW reset */
+ phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, release);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ return 0;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_micro_assert_reset(struct phy_device *phydev)
+{
+ int ret;
+ u16 reg;
+
+ ret = vsc8584_cmd(phydev, PROC_CMD_NOP);
+ if (ret)
+ return ret;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+ reg &= ~EN_PATCH_RAM_TRAP_ADDR(4);
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
+
+ phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(4), 0x005b);
+ phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(4), 0x005b);
+
+ reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+ reg |= EN_PATCH_RAM_TRAP_ADDR(4);
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
+
+ phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_NOP);
+
+ reg = phy_base_read(phydev, MSCC_DW8051_CNTL_STATUS);
+ reg &= ~MICRO_NSOFT_RESET;
+ phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, reg);
+
+ phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_MCB_ACCESS_MAC_CONF |
+ PROC_CMD_SGMII_PORT(0) | PROC_CMD_NO_MAC_CONF |
+ PROC_CMD_READ);
+
+ reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+ reg &= ~EN_PATCH_RAM_TRAP_ADDR(4);
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ return 0;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_get_fw_crc(struct phy_device *phydev, u16 start, u16 size,
+ u16 *crc)
+{
+ int ret;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
+
+ phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_2, start);
+ phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_3, size);
+
+ /* Start Micro command */
+ ret = vsc8584_cmd(phydev, PROC_CMD_CRC16);
+ if (ret)
+ goto out;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
+
+ *crc = phy_base_read(phydev, MSCC_PHY_VERIPHY_CNTL_2);
+
+out:
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ return ret;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_patch_fw(struct phy_device *phydev,
+ const struct firmware *fw)
+{
+ int i, ret;
+
+ ret = vsc8584_micro_assert_reset(phydev);
+ if (ret) {
+ dev_err(&phydev->mdio.dev,
+ "%s: failed to assert reset of micro\n", __func__);
+ return ret;
+ }
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ /* Hold 8051 Micro in SW Reset, Enable auto incr address and patch clock
+ * Disable the 8051 Micro clock
+ */
+ phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, RUN_FROM_INT_ROM |
+ AUTOINC_ADDR | PATCH_RAM_CLK | MICRO_CLK_EN |
+ MICRO_CLK_DIVIDE(2));
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_PRAM | INT_MEM_WRITE_EN |
+ INT_MEM_DATA(2));
+ phy_base_write(phydev, MSCC_INT_MEM_ADDR, 0x0000);
+
+ for (i = 0; i < fw->size; i++)
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_PRAM |
+ INT_MEM_WRITE_EN | fw->data[i]);
+
+ /* Clear internal memory access */
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_RAM);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ return 0;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static bool vsc8574_is_serdes_init(struct phy_device *phydev)
+{
+ u16 reg;
+ bool ret;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ reg = phy_base_read(phydev, MSCC_TRAP_ROM_ADDR(1));
+ if (reg != 0x3eb7) {
+ ret = false;
+ goto out;
+ }
+
+ reg = phy_base_read(phydev, MSCC_PATCH_RAM_ADDR(1));
+ if (reg != 0x4012) {
+ ret = false;
+ goto out;
+ }
+
+ reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
+ if (reg != EN_PATCH_RAM_TRAP_ADDR(1)) {
+ ret = false;
+ goto out;
+ }
+
+ reg = phy_base_read(phydev, MSCC_DW8051_CNTL_STATUS);
+ if ((MICRO_NSOFT_RESET | RUN_FROM_INT_ROM | DW8051_CLK_EN |
+ MICRO_CLK_EN) != (reg & MSCC_DW8051_VLD_MASK)) {
+ ret = false;
+ goto out;
+ }
+
+ ret = true;
+out:
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ return ret;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8574_config_pre_init(struct phy_device *phydev)
+{
+ const struct reg_val pre_init1[] = {
+ {0x0fae, 0x000401bd},
+ {0x0fac, 0x000f000f},
+ {0x17a0, 0x00a0f147},
+ {0x0fe4, 0x00052f54},
+ {0x1792, 0x0027303d},
+ {0x07fe, 0x00000704},
+ {0x0fe0, 0x00060150},
+ {0x0f82, 0x0012b00a},
+ {0x0f80, 0x00000d74},
+ {0x02e0, 0x00000012},
+ {0x03a2, 0x00050208},
+ {0x03b2, 0x00009186},
+ {0x0fb0, 0x000e3700},
+ {0x1688, 0x00049f81},
+ {0x0fd2, 0x0000ffff},
+ {0x168a, 0x00039fa2},
+ {0x1690, 0x0020640b},
+ {0x0258, 0x00002220},
+ {0x025a, 0x00002a20},
+ {0x025c, 0x00003060},
+ {0x025e, 0x00003fa0},
+ {0x03a6, 0x0000e0f0},
+ {0x0f92, 0x00001489},
+ {0x16a2, 0x00007000},
+ {0x16a6, 0x00071448},
+ {0x16a0, 0x00eeffdd},
+ {0x0fe8, 0x0091b06c},
+ {0x0fea, 0x00041600},
+ {0x16b0, 0x00eeff00},
+ {0x16b2, 0x00007000},
+ {0x16b4, 0x00000814},
+ {0x0f90, 0x00688980},
+ {0x03a4, 0x0000d8f0},
+ {0x0fc0, 0x00000400},
+ {0x07fa, 0x0050100f},
+ {0x0796, 0x00000003},
+ {0x07f8, 0x00c3ff98},
+ {0x0fa4, 0x0018292a},
+ {0x168c, 0x00d2c46f},
+ {0x17a2, 0x00000620},
+ {0x16a4, 0x0013132f},
+ {0x16a8, 0x00000000},
+ {0x0ffc, 0x00c0a028},
+ {0x0fec, 0x00901c09},
+ {0x0fee, 0x0004a6a1},
+ {0x0ffe, 0x00b01807},
+ };
+ const struct reg_val pre_init2[] = {
+ {0x0486, 0x0008a518},
+ {0x0488, 0x006dc696},
+ {0x048a, 0x00000912},
+ {0x048e, 0x00000db6},
+ {0x049c, 0x00596596},
+ {0x049e, 0x00000514},
+ {0x04a2, 0x00410280},
+ {0x04a4, 0x00000000},
+ {0x04a6, 0x00000000},
+ {0x04a8, 0x00000000},
+ {0x04aa, 0x00000000},
+ {0x04ae, 0x007df7dd},
+ {0x04b0, 0x006d95d4},
+ {0x04b2, 0x00492410},
+ };
+ struct device *dev = &phydev->mdio.dev;
+ const struct firmware *fw;
+ unsigned int i;
+ u16 crc, reg;
+ bool serdes_init;
+ int ret;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ /* all writes below are broadcasted to all PHYs in the same package */
+ reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+ reg |= SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+ phy_base_write(phydev, MII_VSC85XX_INT_MASK, 0);
+
+ /* The below register writes are tweaking analog and electrical
+ * configuration that were determined through characterization by PHY
+ * engineers. These don't mean anything more than "these are the best
+ * values".
+ */
+ phy_base_write(phydev, MSCC_PHY_EXT_PHY_CNTL_2, 0x0040);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_20, 0x4320);
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_24, 0x0c00);
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_9, 0x18ca);
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1b20);
+
+ reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+ reg |= 0x8000;
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+ for (i = 0; i < ARRAY_SIZE(pre_init1); i++)
+ vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2);
+
+ phy_base_write(phydev, MSCC_PHY_CU_PMD_TX_CNTL, 0x028e);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+ for (i = 0; i < ARRAY_SIZE(pre_init2); i++)
+ vsc8584_csr_write(phydev, pre_init2[i].reg, pre_init2[i].val);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+ reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+ reg &= ~0x8000;
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ /* end of write broadcasting */
+ reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+ reg &= ~SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+ ret = request_firmware(&fw, MSCC_VSC8574_REVB_INT8051_FW, dev);
+ if (ret) {
+ dev_err(dev, "failed to load firmware %s, ret: %d\n",
+ MSCC_VSC8574_REVB_INT8051_FW, ret);
+ return ret;
+ }
+
+ /* Add one byte to size for the one added by the patch_fw function */
+ ret = vsc8584_get_fw_crc(phydev,
+ MSCC_VSC8574_REVB_INT8051_FW_START_ADDR,
+ fw->size + 1, &crc);
+ if (ret)
+ goto out;
+
+ if (crc == MSCC_VSC8574_REVB_INT8051_FW_CRC) {
+ serdes_init = vsc8574_is_serdes_init(phydev);
+
+ if (!serdes_init) {
+ ret = vsc8584_micro_assert_reset(phydev);
+ if (ret) {
+ dev_err(dev,
+ "%s: failed to assert reset of micro\n",
+ __func__);
+ goto out;
+ }
+ }
+ } else {
+ dev_dbg(dev, "FW CRC is not the expected one, patching FW\n");
+
+ serdes_init = false;
+
+ if (vsc8584_patch_fw(phydev, fw))
+ dev_warn(dev,
+ "failed to patch FW, expect non-optimal device\n");
+ }
+
+ if (!serdes_init) {
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), 0x3eb7);
+ phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), 0x4012);
+ phy_base_write(phydev, MSCC_INT_MEM_CNTL,
+ EN_PATCH_RAM_TRAP_ADDR(1));
+
+ vsc8584_micro_deassert_reset(phydev, false);
+
+ /* Add one byte to size for the one added by the patch_fw
+ * function
+ */
+ ret = vsc8584_get_fw_crc(phydev,
+ MSCC_VSC8574_REVB_INT8051_FW_START_ADDR,
+ fw->size + 1, &crc);
+ if (ret)
+ goto out;
+
+ if (crc != MSCC_VSC8574_REVB_INT8051_FW_CRC)
+ dev_warn(dev,
+ "FW CRC after patching is not the expected one, expect non-optimal device\n");
+ }
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ ret = vsc8584_cmd(phydev, PROC_CMD_1588_DEFAULT_INIT |
+ PROC_CMD_PHY_INIT);
+
+out:
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+/* bus->mdio_lock should be locked when using this function */
+static int vsc8584_config_pre_init(struct phy_device *phydev)
+{
+ const struct reg_val pre_init1[] = {
+ {0x07fa, 0x0050100f},
+ {0x1688, 0x00049f81},
+ {0x0f90, 0x00688980},
+ {0x03a4, 0x0000d8f0},
+ {0x0fc0, 0x00000400},
+ {0x0f82, 0x0012b002},
+ {0x1686, 0x00000004},
+ {0x168c, 0x00d2c46f},
+ {0x17a2, 0x00000620},
+ {0x16a0, 0x00eeffdd},
+ {0x16a6, 0x00071448},
+ {0x16a4, 0x0013132f},
+ {0x16a8, 0x00000000},
+ {0x0ffc, 0x00c0a028},
+ {0x0fe8, 0x0091b06c},
+ {0x0fea, 0x00041600},
+ {0x0f80, 0x00fffaff},
+ {0x0fec, 0x00901809},
+ {0x0ffe, 0x00b01007},
+ {0x16b0, 0x00eeff00},
+ {0x16b2, 0x00007000},
+ {0x16b4, 0x00000814},
+ };
+ const struct reg_val pre_init2[] = {
+ {0x0486, 0x0008a518},
+ {0x0488, 0x006dc696},
+ {0x048a, 0x00000912},
+ };
+ const struct firmware *fw;
+ struct device *dev = &phydev->mdio.dev;
+ unsigned int i;
+ u16 crc, reg;
+ int ret;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ /* all writes below are broadcasted to all PHYs in the same package */
+ reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+ reg |= SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+ phy_base_write(phydev, MII_VSC85XX_INT_MASK, 0);
+
+ reg = phy_base_read(phydev, MSCC_PHY_BYPASS_CONTROL);
+ reg |= PARALLEL_DET_IGNORE_ADVERTISED;
+ phy_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg);
+
+ /* The below register writes are tweaking analog and electrical
+ * configuration that were determined through characterization by PHY
+ * engineers. These don't mean anything more than "these are the best
+ * values".
+ */
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_3);
+
+ phy_base_write(phydev, MSCC_PHY_SERDES_TX_CRC_ERR_CNT, 0x2000);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1f20);
+
+ reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+ reg |= 0x8000;
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+ phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(0x2fa4));
+
+ reg = phy_base_read(phydev, MSCC_PHY_TR_MSB);
+ reg &= ~0x007f;
+ reg |= 0x0019;
+ phy_base_write(phydev, MSCC_PHY_TR_MSB, reg);
+
+ phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(0x0fa4));
+
+ for (i = 0; i < ARRAY_SIZE(pre_init1); i++)
+ vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2);
+
+ phy_base_write(phydev, MSCC_PHY_CU_PMD_TX_CNTL, 0x028e);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+ for (i = 0; i < ARRAY_SIZE(pre_init2); i++)
+ vsc8584_csr_write(phydev, pre_init2[i].reg, pre_init2[i].val);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+ reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+ reg &= ~0x8000;
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ /* end of write broadcasting */
+ reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+ reg &= ~SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+ ret = request_firmware(&fw, MSCC_VSC8584_REVB_INT8051_FW, dev);
+ if (ret) {
+ dev_err(dev, "failed to load firmware %s, ret: %d\n",
+ MSCC_VSC8584_REVB_INT8051_FW, ret);
+ return ret;
+ }
+
+ /* Add one byte to size for the one added by the patch_fw function */
+ ret = vsc8584_get_fw_crc(phydev,
+ MSCC_VSC8584_REVB_INT8051_FW_START_ADDR,
+ fw->size + 1, &crc);
+ if (ret)
+ goto out;
+
+ if (crc != MSCC_VSC8584_REVB_INT8051_FW_CRC) {
+ dev_dbg(dev, "FW CRC is not the expected one, patching FW\n");
+ if (vsc8584_patch_fw(phydev, fw))
+ dev_warn(dev,
+ "failed to patch FW, expect non-optimal device\n");
+ }
+
+ vsc8584_micro_deassert_reset(phydev, false);
+
+ /* Add one byte to size for the one added by the patch_fw function */
+ ret = vsc8584_get_fw_crc(phydev,
+ MSCC_VSC8584_REVB_INT8051_FW_START_ADDR,
+ fw->size + 1, &crc);
+ if (ret)
+ goto out;
+
+ if (crc != MSCC_VSC8584_REVB_INT8051_FW_CRC)
+ dev_warn(dev,
+ "FW CRC after patching is not the expected one, expect non-optimal device\n");
+
+ ret = vsc8584_micro_assert_reset(phydev);
+ if (ret)
+ goto out;
+
+ vsc8584_micro_deassert_reset(phydev, true);
+
+out:
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+/* Check if one PHY has already done the init of the parts common to all PHYs
+ * in the Quad PHY package.
+ */
+static bool vsc8584_is_pkg_init(struct phy_device *phydev, bool reversed)
+{
+ struct mdio_device **map = phydev->mdio.bus->mdio_map;
+ struct vsc8531_private *vsc8531;
+ struct phy_device *phy;
+ int i, addr;
+
+ /* VSC8584 is a Quad PHY */
+ for (i = 0; i < 4; i++) {
+ vsc8531 = phydev->priv;
+
+ if (reversed)
+ addr = vsc8531->base_addr - i;
+ else
+ addr = vsc8531->base_addr + i;
+
+ phy = container_of(map[addr], struct phy_device, mdio);
+
+ if ((phy->phy_id & phydev->drv->phy_id_mask) !=
+ (phydev->drv->phy_id & phydev->drv->phy_id_mask))
+ continue;
+
+ vsc8531 = phy->priv;
+
+ if (vsc8531 && vsc8531->pkg_init)
+ return true;
+ }
+
+ return false;
+}
+
+static int vsc8584_config_init(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ u16 addr, val;
+ int ret, i;
+
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+
+ __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr,
+ MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
+ addr = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
+ MSCC_PHY_EXT_PHY_CNTL_4);
+ addr >>= PHY_CNTL_4_ADDR_POS;
+
+ val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
+ MSCC_PHY_ACTIPHY_CNTL);
+ if (val & PHY_ADDR_REVERSED)
+ vsc8531->base_addr = phydev->mdio.addr + addr;
+ else
+ vsc8531->base_addr = phydev->mdio.addr - addr;
+
+ /* Some parts of the init sequence are identical for every PHY in the
+ * package. Some parts are modifying the GPIO register bank which is a
+ * set of registers that are affecting all PHYs, a few resetting the
+ * microprocessor common to all PHYs. The CRC check responsible of the
+ * checking the firmware within the 8051 microprocessor can only be
+ * accessed via the PHY whose internal address in the package is 0.
+ * All PHYs' interrupts mask register has to be zeroed before enabling
+ * any PHY's interrupt in this register.
+ * For all these reasons, we need to do the init sequence once and only
+ * once whatever is the first PHY in the package that is initialized and
+ * do the correct init sequence for all PHYs that are package-critical
+ * in this pre-init function.
+ */
+ if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0)) {
+ if ((phydev->phy_id & phydev->drv->phy_id_mask) ==
+ (PHY_ID_VSC8574 & phydev->drv->phy_id_mask))
+ ret = vsc8574_config_pre_init(phydev);
+ else if ((phydev->phy_id & phydev->drv->phy_id_mask) ==
+ (PHY_ID_VSC8584 & phydev->drv->phy_id_mask))
+ ret = vsc8584_config_pre_init(phydev);
+ else
+ ret = -EINVAL;
+
+ if (ret)
+ goto err;
+ }
+
+ vsc8531->pkg_init = true;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
+ val &= ~MAC_CFG_MASK;
+ if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+ val |= MAC_CFG_QSGMII;
+ else
+ val |= MAC_CFG_SGMII;
+
+ ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
+ if (ret)
+ goto err;
+
+ val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
+ PROC_CMD_READ_MOD_WRITE_PORT;
+ if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+ val |= PROC_CMD_QSGMII_MAC;
+ else
+ val |= PROC_CMD_SGMII_MAC;
+
+ ret = vsc8584_cmd(phydev, val);
+ if (ret)
+ goto err;
+
+ usleep_range(10000, 20000);
+
+ /* Disable SerDes for 100Base-FX */
+ ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
+ PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE |
+ PROC_CMD_READ_MOD_WRITE_PORT |
+ PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX);
+ if (ret)
+ goto err;
+
+ /* Disable SerDes for 1000Base-X */
+ ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
+ PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE |
+ PROC_CMD_READ_MOD_WRITE_PORT |
+ PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X);
+ if (ret)
+ goto err;
+
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
+ val &= ~(MEDIA_OP_MODE_MASK | VSC8584_MAC_IF_SELECTION_MASK);
+ val |= MEDIA_OP_MODE_COPPER | (VSC8584_MAC_IF_SELECTION_SGMII <<
+ VSC8584_MAC_IF_SELECTION_POS);
+ ret = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, val);
+
+ ret = genphy_soft_reset(phydev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < vsc8531->nleds; i++) {
+ ret = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]);
+ if (ret)
+ return ret;
+ }
+
+ return genphy_config_init(phydev);
+
+err:
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ return ret;
+}
+
static int vsc85xx_config_init(struct phy_device *phydev)
{
- int rc;
+ int rc, i;
struct vsc8531_private *vsc8531 = phydev->priv;
rc = vsc85xx_default_config(phydev);
@@ -560,17 +1669,27 @@ static int vsc85xx_config_init(struct phy_device *phydev)
if (rc)
return rc;
- rc = vsc85xx_led_cntl_set(phydev, 1, vsc8531->led_1_mode);
+ rc = vsc85xx_eee_init_seq_set(phydev);
if (rc)
return rc;
- rc = vsc85xx_led_cntl_set(phydev, 0, vsc8531->led_0_mode);
- if (rc)
- return rc;
+ for (i = 0; i < vsc8531->nleds; i++) {
+ rc = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]);
+ if (rc)
+ return rc;
+ }
- rc = genphy_config_init(phydev);
+ return genphy_config_init(phydev);
+}
- return rc;
+static int vsc8584_did_interrupt(struct phy_device *phydev)
+{
+ int rc = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+
+ return (rc < 0) ? 0 : rc & MII_VSC85XX_INT_MASK_MASK;
}
static int vsc85xx_ack_interrupt(struct phy_device *phydev)
@@ -622,11 +1741,67 @@ static int vsc85xx_read_status(struct phy_device *phydev)
return genphy_read_status(phydev);
}
+static int vsc8574_probe(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531;
+ u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
+ VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
+ VSC8531_DUPLEX_COLLISION};
+
+ vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
+ if (!vsc8531)
+ return -ENOMEM;
+
+ phydev->priv = vsc8531;
+
+ vsc8531->nleds = 4;
+ vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
+ vsc8531->hw_stats = vsc8584_hw_stats;
+ vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
+ vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
+ if (!vsc8531->stats)
+ return -ENOMEM;
+
+ return vsc85xx_dt_led_modes_get(phydev, default_mode);
+}
+
+static int vsc8584_probe(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531;
+ u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
+ VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
+ VSC8531_DUPLEX_COLLISION};
+
+ if ((phydev->phy_id & MSCC_DEV_REV_MASK) != VSC8584_REVB) {
+ dev_err(&phydev->mdio.dev, "Only VSC8584 revB is supported.\n");
+ return -ENOTSUPP;
+ }
+
+ vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
+ if (!vsc8531)
+ return -ENOMEM;
+
+ phydev->priv = vsc8531;
+
+ vsc8531->nleds = 4;
+ vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
+ vsc8531->hw_stats = vsc8584_hw_stats;
+ vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
+ vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
+ if (!vsc8531->stats)
+ return -ENOMEM;
+
+ return vsc85xx_dt_led_modes_get(phydev, default_mode);
+}
+
static int vsc85xx_probe(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531;
int rate_magic;
- int led_mode;
+ u32 default_mode[2] = {VSC8531_LINK_1000_ACTIVITY,
+ VSC8531_LINK_100_ACTIVITY};
rate_magic = vsc85xx_edge_rate_magic_get(phydev);
if (rate_magic < 0)
@@ -639,21 +1814,16 @@ static int vsc85xx_probe(struct phy_device *phydev)
phydev->priv = vsc8531;
vsc8531->rate_magic = rate_magic;
+ vsc8531->nleds = 2;
+ vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
+ vsc8531->hw_stats = vsc85xx_hw_stats;
+ vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
+ vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
+ if (!vsc8531->stats)
+ return -ENOMEM;
- /* LED[0] and LED[1] mode */
- led_mode = vsc85xx_dt_led_mode_get(phydev, "vsc8531,led-0-mode",
- VSC8531_LINK_1000_ACTIVITY);
- if (led_mode < 0)
- return led_mode;
- vsc8531->led_0_mode = led_mode;
-
- led_mode = vsc85xx_dt_led_mode_get(phydev, "vsc8531,led-1-mode",
- VSC8531_LINK_100_ACTIVITY);
- if (led_mode < 0)
- return led_mode;
- vsc8531->led_1_mode = led_mode;
-
- return 0;
+ return vsc85xx_dt_led_modes_get(phydev, default_mode);
}
/* Microsemi VSC85xx PHYs */
@@ -678,6 +1848,11 @@ static struct phy_driver vsc85xx_driver[] = {
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8531,
@@ -699,6 +1874,11 @@ static struct phy_driver vsc85xx_driver[] = {
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8540,
@@ -720,6 +1900,11 @@ static struct phy_driver vsc85xx_driver[] = {
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
},
{
.phy_id = PHY_ID_VSC8541,
@@ -741,6 +1926,63 @@ static struct phy_driver vsc85xx_driver[] = {
.get_wol = &vsc85xx_wol_get,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC8574,
+ .name = "Microsemi GE VSC8574 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
+ .phy_id = PHY_ID_VSC8584,
+ .name = "Microsemi GE VSC8584 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8584_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .did_interrupt = &vsc8584_did_interrupt,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
}
};
@@ -752,6 +1994,8 @@ static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
{ PHY_ID_VSC8531, 0xfffffff0, },
{ PHY_ID_VSC8540, 0xfffffff0, },
{ PHY_ID_VSC8541, 0xfffffff0, },
+ { PHY_ID_VSC8574, 0xfffffff0, },
+ { PHY_ID_VSC8584, 0xfffffff0, },
{ }
};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1ee25877c4d1..1d73ac3309ce 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -482,16 +482,15 @@ static int phy_config_aneg(struct phy_device *phydev)
}
/**
- * phy_start_aneg_priv - start auto-negotiation for this PHY device
+ * phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
- * @sync: indicate whether we should wait for the workqueue cancelation
*
* Description: Sanitizes the settings (if we're not autonegotiating
* them), and then calls the driver's config_aneg function.
* If the PHYCONTROL Layer is operating, we change the state to
* reflect the beginning of Auto-negotiation or forcing.
*/
-static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
+int phy_start_aneg(struct phy_device *phydev)
{
bool trigger = 0;
int err;
@@ -537,24 +536,10 @@ out_unlock:
mutex_unlock(&phydev->lock);
if (trigger)
- phy_trigger_machine(phydev, sync);
+ phy_trigger_machine(phydev);
return err;
}
-
-/**
- * phy_start_aneg - start auto-negotiation for this PHY device
- * @phydev: the phy_device struct
- *
- * Description: Sanitizes the settings (if we're not autonegotiating
- * them), and then calls the driver's config_aneg function.
- * If the PHYCONTROL Layer is operating, we change the state to
- * reflect the beginning of Auto-negotiation or forcing.
- */
-int phy_start_aneg(struct phy_device *phydev)
-{
- return phy_start_aneg_priv(phydev, true);
-}
EXPORT_SYMBOL(phy_start_aneg);
static int phy_poll_aneg_done(struct phy_device *phydev)
@@ -635,6 +620,13 @@ int phy_speed_up(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(phy_speed_up);
+static void phy_queue_state_machine(struct phy_device *phydev,
+ unsigned int secs)
+{
+ mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
+ secs * HZ);
+}
+
/**
* phy_start_machine - start PHY state machine tracking
* @phydev: the phy_device struct
@@ -647,7 +639,7 @@ EXPORT_SYMBOL_GPL(phy_speed_up);
*/
void phy_start_machine(struct phy_device *phydev)
{
- queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
+ phy_trigger_machine(phydev);
}
EXPORT_SYMBOL_GPL(phy_start_machine);
@@ -655,19 +647,14 @@ EXPORT_SYMBOL_GPL(phy_start_machine);
* phy_trigger_machine - trigger the state machine to run
*
* @phydev: the phy_device struct
- * @sync: indicate whether we should wait for the workqueue cancelation
*
* Description: There has been a change in state which requires that the
* state machine runs.
*/
-void phy_trigger_machine(struct phy_device *phydev, bool sync)
+void phy_trigger_machine(struct phy_device *phydev)
{
- if (sync)
- cancel_delayed_work_sync(&phydev->state_queue);
- else
- cancel_delayed_work(&phydev->state_queue);
- queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
+ phy_queue_state_machine(phydev, 0);
}
/**
@@ -703,7 +690,7 @@ static void phy_error(struct phy_device *phydev)
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
- phy_trigger_machine(phydev, false);
+ phy_trigger_machine(phydev);
}
/**
@@ -745,7 +732,7 @@ static irqreturn_t phy_change(struct phy_device *phydev)
mutex_unlock(&phydev->lock);
/* reschedule state queue work to run as soon as possible */
- phy_trigger_machine(phydev, true);
+ phy_trigger_machine(phydev);
if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
goto phy_err;
@@ -861,6 +848,8 @@ void phy_stop(struct phy_device *phydev)
out_unlock:
mutex_unlock(&phydev->lock);
+ phy_state_machine(&phydev->state_queue.work);
+
/* Cannot call flush_scheduled_work() here as desired because
* of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
* will not reenable interrupts.
@@ -909,7 +898,7 @@ void phy_start(struct phy_device *phydev)
}
mutex_unlock(&phydev->lock);
- phy_trigger_machine(phydev, true);
+ phy_trigger_machine(phydev);
}
EXPORT_SYMBOL(phy_start);
@@ -937,7 +926,6 @@ void phy_state_machine(struct work_struct *work)
bool needs_aneg = false, do_suspend = false;
enum phy_state old_state;
int err = 0;
- int old_link;
mutex_lock(&phydev->lock);
@@ -1021,26 +1009,16 @@ void phy_state_machine(struct work_struct *work)
}
break;
case PHY_RUNNING:
- /* Only register a CHANGE if we are polling and link changed
- * since latest checking.
- */
- if (phy_polling_mode(phydev)) {
- old_link = phydev->link;
- err = phy_read_status(phydev);
- if (err)
- break;
+ if (!phy_polling_mode(phydev))
+ break;
- if (old_link != phydev->link)
- phydev->state = PHY_CHANGELINK;
- }
- /*
- * Failsafe: check that nobody set phydev->link=0 between two
- * poll cycles, otherwise we won't leave RUNNING state as long
- * as link remains down.
- */
- if (!phydev->link && phydev->state == PHY_RUNNING) {
- phydev->state = PHY_CHANGELINK;
- phydev_err(phydev, "no link in PHY_RUNNING\n");
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (!phydev->link) {
+ phydev->state = PHY_NOLINK;
+ phy_link_down(phydev, true);
}
break;
case PHY_CHANGELINK:
@@ -1066,48 +1044,33 @@ void phy_state_machine(struct work_struct *work)
case PHY_RESUMING:
if (AUTONEG_ENABLE == phydev->autoneg) {
err = phy_aneg_done(phydev);
- if (err < 0)
+ if (err < 0) {
break;
-
- /* err > 0 if AN is done.
- * Otherwise, it's 0, and we're still waiting for AN
- */
- if (err > 0) {
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, false);
- }
- } else {
+ } else if (!err) {
phydev->state = PHY_AN;
phydev->link_timeout = PHY_AN_TIMEOUT;
- }
- } else {
- err = phy_read_status(phydev);
- if (err)
break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, false);
}
}
+
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ phy_link_up(phydev);
+ } else {
+ phydev->state = PHY_NOLINK;
+ phy_link_down(phydev, false);
+ }
break;
}
mutex_unlock(&phydev->lock);
if (needs_aneg)
- err = phy_start_aneg_priv(phydev, false);
+ err = phy_start_aneg(phydev);
else if (do_suspend)
phy_suspend(phydev);
@@ -1121,11 +1084,14 @@ void phy_state_machine(struct work_struct *work)
/* Only re-schedule a PHY state machine change if we are polling the
* PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
- * between states from phy_mac_interrupt()
+ * between states from phy_mac_interrupt().
+ *
+ * In state PHY_HALTED the PHY gets suspended, so rescheduling the
+ * state machine would be pointless and possibly error prone when
+ * called from phy_disconnect() synchronously.
*/
- if (phy_polling_mode(phydev))
- queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
- PHY_STATE_TIME * HZ);
+ if (phy_polling_mode(phydev) && old_state != PHY_HALTED)
+ phy_queue_state_machine(phydev, PHY_STATE_TIME);
}
/**
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index db1172db1e7c..ab33d1777132 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/bitmap.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
#include <linux/mdio.h>
@@ -42,6 +43,149 @@ MODULE_DESCRIPTION("PHY library");
MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_basic_features);
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_basic_t1_features);
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_gbit_features);
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_gbit_fibre_features);
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features);
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_10gbit_features);
+
+static const int phy_basic_ports_array[] = {
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_TP_BIT,
+ ETHTOOL_LINK_MODE_MII_BIT,
+};
+
+static const int phy_fibre_port_array[] = {
+ ETHTOOL_LINK_MODE_FIBRE_BIT,
+};
+
+static const int phy_all_ports_features_array[] = {
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_TP_BIT,
+ ETHTOOL_LINK_MODE_MII_BIT,
+ ETHTOOL_LINK_MODE_FIBRE_BIT,
+ ETHTOOL_LINK_MODE_AUI_BIT,
+ ETHTOOL_LINK_MODE_BNC_BIT,
+ ETHTOOL_LINK_MODE_Backplane_BIT,
+};
+
+static const int phy_10_100_features_array[] = {
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+};
+
+static const int phy_basic_t1_features_array[] = {
+ ETHTOOL_LINK_MODE_TP_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+};
+
+static const int phy_gbit_features_array[] = {
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+};
+
+static const int phy_10gbit_features_array[] = {
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+};
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
+
+static const int phy_10gbit_full_features_array[] = {
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+};
+
+static void features_init(void)
+{
+ /* 10/100 half/full*/
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ phy_basic_features);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ phy_basic_features);
+
+ /* 100 full, TP */
+ linkmode_set_bit_array(phy_basic_t1_features_array,
+ ARRAY_SIZE(phy_basic_t1_features_array),
+ phy_basic_t1_features);
+
+ /* 10/100 half/full + 1000 half/full */
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ phy_gbit_features);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ phy_gbit_features);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ phy_gbit_features);
+
+ /* 10/100 half/full + 1000 half/full + fibre*/
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ phy_gbit_fibre_features);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ phy_gbit_fibre_features);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ phy_gbit_fibre_features);
+ linkmode_set_bit_array(phy_fibre_port_array,
+ ARRAY_SIZE(phy_fibre_port_array),
+ phy_gbit_fibre_features);
+
+ /* 10/100 half/full + 1000 half/full + TP/MII/FIBRE/AUI/BNC/Backplane*/
+ linkmode_set_bit_array(phy_all_ports_features_array,
+ ARRAY_SIZE(phy_all_ports_features_array),
+ phy_gbit_all_ports_features);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ phy_gbit_all_ports_features);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ phy_gbit_all_ports_features);
+
+ /* 10/100 half/full + 1000 half/full + 10G full*/
+ linkmode_set_bit_array(phy_all_ports_features_array,
+ ARRAY_SIZE(phy_all_ports_features_array),
+ phy_10gbit_features);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ phy_10gbit_features);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ phy_10gbit_features);
+ linkmode_set_bit_array(phy_10gbit_features_array,
+ ARRAY_SIZE(phy_10gbit_features_array),
+ phy_10gbit_features);
+
+ /* 10/100/1000/10G full */
+ linkmode_set_bit_array(phy_all_ports_features_array,
+ ARRAY_SIZE(phy_all_ports_features_array),
+ phy_10gbit_full_features);
+ linkmode_set_bit_array(phy_10gbit_full_features_array,
+ ARRAY_SIZE(phy_10gbit_full_features_array),
+ phy_10gbit_full_features);
+}
+
void phy_device_free(struct phy_device *phydev)
{
put_device(&phydev->mdio.dev);
@@ -93,7 +237,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!netdev)
return !phydev->suspended;
- /* Don't suspend PHY if the attached netdev parent may wakeup.
+ if (netdev->wol_enabled)
+ return false;
+
+ /* As long as not all affected network drivers support the
+ * wol_enabled flag, let's check for hints that WoL is enabled.
+ * Don't suspend PHY if the attached netdev parent may wake up.
* The parent may point to a PCI device, as in tg3 driver.
*/
if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
@@ -880,8 +1029,6 @@ int phy_init_hw(struct phy_device *phydev)
if (phydev->drv->soft_reset)
ret = phydev->drv->soft_reset(phydev);
- else
- ret = genphy_soft_reset(phydev);
if (ret < 0)
return ret;
@@ -922,13 +1069,13 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
if (!fmt) {
- dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n",
+ phydev_info(phydev, ATTACHED_FMT "\n",
drv_name, phydev_name(phydev),
irq_str);
} else {
va_list ap;
- dev_info(&phydev->mdio.dev, ATTACHED_FMT,
+ phydev_info(phydev, ATTACHED_FMT,
drv_name, phydev_name(phydev),
irq_str);
@@ -1132,9 +1279,9 @@ void phy_detach(struct phy_device *phydev)
sysfs_remove_link(&dev->dev.kobj, "phydev");
sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
}
+ phy_suspend(phydev);
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
- phy_suspend(phydev);
phydev->phylink = NULL;
phy_led_triggers_unregister(phydev);
@@ -1168,12 +1315,13 @@ EXPORT_SYMBOL(phy_detach);
int phy_suspend(struct phy_device *phydev)
{
struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
+ struct net_device *netdev = phydev->attached_dev;
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
int ret = 0;
/* If the device has WOL enabled, we cannot suspend the PHY */
phy_ethtool_get_wol(phydev, &wol);
- if (wol.wolopts)
+ if (wol.wolopts || (netdev && netdev->wol_enabled))
return -EBUSY;
if (phydev->drv && phydrv->suspend)
@@ -1765,6 +1913,125 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
}
EXPORT_SYMBOL(phy_set_max_speed);
+/**
+ * phy_remove_link_mode - Remove a supported link mode
+ * @phydev: phy_device structure to remove link mode from
+ * @link_mode: Link mode to be removed
+ *
+ * Description: Some MACs don't support all link modes which the PHY
+ * does. e.g. a 1G MAC often does not support 1000Half. Add a helper
+ * to remove a link mode.
+ */
+void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode)
+{
+ WARN_ON(link_mode > 31);
+
+ phydev->supported &= ~BIT(link_mode);
+ phydev->advertising = phydev->supported;
+}
+EXPORT_SYMBOL(phy_remove_link_mode);
+
+/**
+ * phy_support_sym_pause - Enable support of symmetrical pause
+ * @phydev: target phy_device struct
+ *
+ * Description: Called by the MAC to indicate is supports symmetrical
+ * Pause, but not asym pause.
+ */
+void phy_support_sym_pause(struct phy_device *phydev)
+{
+ phydev->supported &= ~SUPPORTED_Asym_Pause;
+ phydev->supported |= SUPPORTED_Pause;
+ phydev->advertising = phydev->supported;
+}
+EXPORT_SYMBOL(phy_support_sym_pause);
+
+/**
+ * phy_support_asym_pause - Enable support of asym pause
+ * @phydev: target phy_device struct
+ *
+ * Description: Called by the MAC to indicate is supports Asym Pause.
+ */
+void phy_support_asym_pause(struct phy_device *phydev)
+{
+ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ phydev->advertising = phydev->supported;
+}
+EXPORT_SYMBOL(phy_support_asym_pause);
+
+/**
+ * phy_set_sym_pause - Configure symmetric Pause
+ * @phydev: target phy_device struct
+ * @rx: Receiver Pause is supported
+ * @tx: Transmit Pause is supported
+ * @autoneg: Auto neg should be used
+ *
+ * Description: Configure advertised Pause support depending on if
+ * receiver pause and pause auto neg is supported. Generally called
+ * from the set_pauseparam .ndo.
+ */
+void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
+ bool autoneg)
+{
+ phydev->supported &= ~SUPPORTED_Pause;
+
+ if (rx && tx && autoneg)
+ phydev->supported |= SUPPORTED_Pause;
+
+ phydev->advertising = phydev->supported;
+}
+EXPORT_SYMBOL(phy_set_sym_pause);
+
+/**
+ * phy_set_asym_pause - Configure Pause and Asym Pause
+ * @phydev: target phy_device struct
+ * @rx: Receiver Pause is supported
+ * @tx: Transmit Pause is supported
+ *
+ * Description: Configure advertised Pause support depending on if
+ * transmit and receiver pause is supported. If there has been a
+ * change in adverting, trigger a new autoneg. Generally called from
+ * the set_pauseparam .ndo.
+ */
+void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx)
+{
+ u16 oldadv = phydev->advertising;
+ u16 newadv = oldadv &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+
+ if (rx)
+ newadv |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ if (tx)
+ newadv ^= SUPPORTED_Asym_Pause;
+
+ if (oldadv != newadv) {
+ phydev->advertising = newadv;
+
+ if (phydev->autoneg)
+ phy_start_aneg(phydev);
+ }
+}
+EXPORT_SYMBOL(phy_set_asym_pause);
+
+/**
+ * phy_validate_pause - Test if the PHY/MAC support the pause configuration
+ * @phydev: phy_device struct
+ * @pp: requested pause configuration
+ *
+ * Description: Test if the PHY/MAC combination supports the Pause
+ * configuration the user is requesting. Returns True if it is
+ * supported, false otherwise.
+ */
+bool phy_validate_pause(struct phy_device *phydev,
+ struct ethtool_pauseparam *pp)
+{
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ pp->rx_pause != pp->tx_pause))
+ return false;
+ return true;
+}
+EXPORT_SYMBOL(phy_validate_pause);
+
static void of_set_phy_supported(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -1820,6 +2087,7 @@ static int phy_probe(struct device *dev)
struct phy_device *phydev = to_phy_device(dev);
struct device_driver *drv = phydev->mdio.dev.driver;
struct phy_driver *phydrv = to_phy_driver(drv);
+ u32 features;
int err = 0;
phydev->drv = phydrv;
@@ -1840,7 +2108,8 @@ static int phy_probe(struct device *dev)
* a controller will attach, and may modify one
* or both of these values
*/
- phydev->supported = phydrv->features;
+ ethtool_convert_link_mode_to_legacy_u32(&features, phydrv->features);
+ phydev->supported = features;
of_set_phy_supported(phydev);
phydev->advertising = phydev->supported;
@@ -1860,10 +2129,14 @@ static int phy_probe(struct device *dev)
* (e.g. hardware erratum) where the driver wants to set only one
* of these bits.
*/
- if (phydrv->features & (SUPPORTED_Pause | SUPPORTED_Asym_Pause)) {
+ if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features) ||
+ test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) {
phydev->supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
- phydev->supported |= phydrv->features &
- (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features))
+ phydev->supported |= SUPPORTED_Pause;
+ if (test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydrv->features))
+ phydev->supported |= SUPPORTED_Asym_Pause;
} else {
phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
}
@@ -1976,9 +2249,7 @@ static struct phy_driver genphy_driver = {
.name = "Generic PHY",
.soft_reset = genphy_no_soft_reset,
.config_init = genphy_config_init,
- .features = PHY_GBIT_FEATURES | SUPPORTED_MII |
- SUPPORTED_AUI | SUPPORTED_FIBRE |
- SUPPORTED_BNC,
+ .features = PHY_GBIT_ALL_PORTS_FEATURES,
.aneg_done = genphy_aneg_done,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -1993,6 +2264,8 @@ static int __init phy_init(void)
if (rc)
return rc;
+ features_init();
+
rc = phy_driver_register(&genphy_10g_driver, THIS_MODULE);
if (rc)
goto err_10g;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 3ba5cf2a8a5f..9b8dd0d0ee42 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -68,33 +68,6 @@ struct phylink {
struct sfp_bus *sfp_bus;
};
-static inline void linkmode_zero(unsigned long *dst)
-{
- bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
-{
- bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static inline void linkmode_and(unsigned long *dst, const unsigned long *a,
- const unsigned long *b)
-{
- bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static inline void linkmode_or(unsigned long *dst, const unsigned long *a,
- const unsigned long *b)
-{
- bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static inline bool linkmode_empty(const unsigned long *src)
-{
- return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
/**
* phylink_set_port_modes() - set the port type modes in the ethtool mask
* @mask: ethtool link mode mask
@@ -717,6 +690,30 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
return 0;
}
+static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
+ phy_interface_t interface)
+{
+ int ret;
+
+ if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+ (pl->link_an_mode == MLO_AN_INBAND &&
+ phy_interface_mode_is_8023z(interface))))
+ return -EINVAL;
+
+ if (pl->phydev)
+ return -EBUSY;
+
+ ret = phy_attach_direct(pl->netdev, phy, 0, interface);
+ if (ret)
+ return ret;
+
+ ret = phylink_bringup_phy(pl, phy);
+ if (ret)
+ phy_detach(phy);
+
+ return ret;
+}
+
/**
* phylink_connect_phy() - connect a PHY to the phylink instance
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -734,31 +731,13 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
*/
int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
{
- int ret;
-
- if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
- (pl->link_an_mode == MLO_AN_INBAND &&
- phy_interface_mode_is_8023z(pl->link_interface))))
- return -EINVAL;
-
- if (pl->phydev)
- return -EBUSY;
-
/* Use PHY device/driver interface */
if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
pl->link_interface = phy->interface;
pl->link_config.interface = pl->link_interface;
}
- ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface);
- if (ret)
- return ret;
-
- ret = phylink_bringup_phy(pl, phy);
- if (ret)
- phy_detach(phy);
-
- return ret;
+ return __phylink_connect_phy(pl, phy, pl->link_interface);
}
EXPORT_SYMBOL_GPL(phylink_connect_phy);
@@ -901,6 +880,9 @@ void phylink_start(struct phylink *pl)
phylink_an_mode_str(pl->link_an_mode),
phy_modes(pl->link_config.interface));
+ /* Always set the carrier off */
+ netif_carrier_off(pl->netdev);
+
/* Apply the link configuration to the MAC when starting. This allows
* a fixed-link to start with the correct parameters, and also
* ensures that we set the appropriate advertisement for Serdes links.
@@ -1672,7 +1654,9 @@ static void phylink_sfp_link_up(void *upstream)
static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
{
- return phylink_connect_phy(upstream, phy);
+ struct phylink *pl = upstream;
+
+ return __phylink_connect_phy(upstream, phy, pl->link_config.interface);
}
static void phylink_sfp_disconnect_phy(void *upstream)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 740655261e5b..83060fb349f4 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -349,6 +349,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
}
if (bus->started)
bus->socket_ops->start(bus->sfp);
+ bus->netdev->sfp_bus = bus;
bus->registered = true;
return 0;
}
@@ -357,6 +358,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
{
const struct sfp_upstream_ops *ops = bus->upstream_ops;
+ bus->netdev->sfp_bus = NULL;
if (bus->registered) {
if (bus->started)
bus->socket_ops->stop(bus->sfp);
@@ -438,7 +440,6 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
{
bus->upstream_ops = NULL;
bus->upstream = NULL;
- bus->netdev->sfp_bus = NULL;
bus->netdev = NULL;
}
@@ -467,7 +468,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
bus->upstream_ops = ops;
bus->upstream = upstream;
bus->netdev = ndev;
- ndev->sfp_bus = bus;
if (bus->sfp) {
ret = sfp_register_bus(bus);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 52fffb98fde9..fd8bb998ae52 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -163,8 +163,6 @@ static const enum gpiod_flags gpio_flags[] = {
/* Give this long for the PHY to reset. */
#define T_PHY_RESET_MS 50
-static DEFINE_MUTEX(sfp_mutex);
-
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@@ -1098,8 +1096,11 @@ static int sfp_hwmon_insert(struct sfp *sfp)
static void sfp_hwmon_remove(struct sfp *sfp)
{
- hwmon_device_unregister(sfp->hwmon_dev);
- kfree(sfp->hwmon_name);
+ if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
+ hwmon_device_unregister(sfp->hwmon_dev);
+ sfp->hwmon_dev = NULL;
+ kfree(sfp->hwmon_name);
+ }
}
#else
static int sfp_hwmon_insert(struct sfp *sfp)
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index fbd548a1ad84..2fe9a87b55b5 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -86,7 +86,7 @@ static struct phy_driver ste10xp_pdriver[] = {
.phy_id = STE101P_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "STe101p",
- .features = PHY_BASIC_FEATURES | SUPPORTED_Pause,
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = ste10Xp_config_init,
.ack_interrupt = ste10Xp_ack_interrupt,
@@ -97,7 +97,7 @@ static struct phy_driver ste10xp_pdriver[] = {
.phy_id = STE100P_PHY_ID,
.phy_id_mask = 0xffffffff,
.name = "STe100p",
- .features = PHY_BASIC_FEATURES | SUPPORTED_Pause,
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = ste10Xp_config_init,
.ack_interrupt = ste10Xp_ack_interrupt,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 02ad03a2fab7..500bc0027c1b 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2400,7 +2400,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
if (ppp->mrru == 0) /* do nothing until mrru is set */
return NULL;
- head = list->next;
+ head = __skb_peek(list);
tail = NULL;
skb_queue_walk_safe(list, p, tmp) {
again:
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index a205750b431b..7ccdc62c6052 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -95,7 +95,7 @@ static inline void sha_pad_init(struct sha_pad *shapad)
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
- struct crypto_skcipher *arc4;
+ struct crypto_sync_skcipher *arc4;
struct shash_desc *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
@@ -155,15 +155,15 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
struct scatterlist sg_in[1], sg_out[1];
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
get_new_key_from_sha(state);
if (!initial_key) {
- crypto_skcipher_setkey(state->arc4, state->sha1_digest,
- state->keylen);
+ crypto_sync_skcipher_setkey(state->arc4, state->sha1_digest,
+ state->keylen);
sg_init_table(sg_in, 1);
sg_init_table(sg_out, 1);
setup_sg(sg_in, state->sha1_digest, state->keylen);
@@ -181,7 +181,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
state->session_key[1] = 0x26;
state->session_key[2] = 0x9e;
}
- crypto_skcipher_setkey(state->arc4, state->session_key, state->keylen);
+ crypto_sync_skcipher_setkey(state->arc4, state->session_key,
+ state->keylen);
skcipher_request_zero(req);
}
@@ -203,7 +204,7 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out;
- state->arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ state->arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(state->arc4)) {
state->arc4 = NULL;
goto out_free;
@@ -250,7 +251,7 @@ out_free:
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
}
- crypto_free_skcipher(state->arc4);
+ crypto_free_sync_skcipher(state->arc4);
kfree(state);
out:
return NULL;
@@ -266,7 +267,7 @@ static void mppe_free(void *arg)
kfree(state->sha1_digest);
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
- crypto_free_skcipher(state->arc4);
+ crypto_free_sync_skcipher(state->arc4);
kfree(state);
}
}
@@ -366,7 +367,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
int isize, int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
int proto;
int err;
struct scatterlist sg_in[1], sg_out[1];
@@ -426,7 +427,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
setup_sg(sg_in, ibuf, isize);
setup_sg(sg_out, obuf, osize);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
err = crypto_skcipher_encrypt(req);
@@ -480,7 +481,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
struct scatterlist sg_in[1], sg_out[1];
@@ -615,7 +616,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
setup_sg(sg_in, ibuf, 1);
setup_sg(sg_out, obuf, 1);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
if (crypto_skcipher_decrypt(req)) {
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index ce61231e96ea..62dc564b251d 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb)
goto out;
+ if (skb_mac_header_len(skb) < ETH_HLEN)
+ goto drop;
+
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
goto drop;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index b008266e91ea..9757f1fc104f 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -79,7 +79,6 @@
#include <linux/rtnetlink.h>
#include <linux/if_arp.h>
#include <linux/if_slip.h>
-#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -1167,27 +1166,6 @@ static int slip_ioctl(struct tty_struct *tty, struct file *file,
}
}
-#ifdef CONFIG_COMPAT
-static long slip_compat_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case SIOCGIFNAME:
- case SIOCGIFENCAP:
- case SIOCSIFENCAP:
- case SIOCSIFHWADDR:
- case SIOCSKEEPALIVE:
- case SIOCGKEEPALIVE:
- case SIOCSOUTFILL:
- case SIOCGOUTFILL:
- return slip_ioctl(tty, file, cmd,
- (unsigned long)compat_ptr(arg));
- }
-
- return -ENOIOCTLCMD;
-}
-#endif
-
/* VSV changes start here */
#ifdef CONFIG_SLIP_SMART
/* function do_ioctl called from net/core/dev.c
@@ -1280,9 +1258,6 @@ static struct tty_ldisc_ops sl_ldisc = {
.close = slip_close,
.hangup = slip_hangup,
.ioctl = slip_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = slip_compat_ioctl,
-#endif
.receive_buf = slip_receive_buf,
.write_wakeup = slip_write_wakeup,
};
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index f0f7cd977667..f03004f37eca 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -619,7 +619,7 @@ static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
#define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
/* Get packet from user space buffer */
-static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
+static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
struct iov_iter *from, int noblock)
{
int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
@@ -663,7 +663,7 @@ static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
if (unlikely(len < ETH_HLEN))
goto err;
- if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
+ if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
struct iov_iter i;
copylen = vnet_hdr.hdr_len ?
@@ -724,11 +724,11 @@ static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
tap = rcu_dereference(q->tap);
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
- skb_shinfo(skb)->destructor_arg = m->msg_control;
+ skb_shinfo(skb)->destructor_arg = msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
- } else if (m && m->msg_control) {
- struct ubuf_info *uarg = m->msg_control;
+ } else if (msg_control) {
+ struct ubuf_info *uarg = msg_control;
uarg->callback(uarg, false);
}
@@ -830,8 +830,7 @@ static ssize_t tap_do_read(struct tap_queue *q,
ssize_t ret = 0;
if (!iov_iter_count(to)) {
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
return 0;
}
@@ -1146,11 +1145,87 @@ static const struct file_operations tap_fops = {
#endif
};
+static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
+{
+ struct tun_xdp_hdr *hdr = xdp->data_hard_start;
+ struct virtio_net_hdr *gso = &hdr->gso;
+ int buflen = hdr->buflen;
+ int vnet_hdr_len = 0;
+ struct tap_dev *tap;
+ struct sk_buff *skb;
+ int err, depth;
+
+ if (q->flags & IFF_VNET_HDR)
+ vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+
+ skb = build_skb(xdp->data_hard_start, buflen);
+ if (!skb) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ skb_put(skb, xdp->data_end - xdp->data);
+
+ skb_set_network_header(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ if (vnet_hdr_len) {
+ err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q));
+ if (err)
+ goto err_kfree;
+ }
+
+ skb_probe_transport_header(skb, ETH_HLEN);
+
+ /* Move network header to the right position for VLAN tagged packets */
+ if ((skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD)) &&
+ __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+ skb_set_network_header(skb, depth);
+
+ rcu_read_lock();
+ tap = rcu_dereference(q->tap);
+ if (tap) {
+ skb->dev = tap->dev;
+ dev_queue_xmit(skb);
+ } else {
+ kfree_skb(skb);
+ }
+ rcu_read_unlock();
+
+ return 0;
+
+err_kfree:
+ kfree_skb(skb);
+err:
+ rcu_read_lock();
+ tap = rcu_dereference(q->tap);
+ if (tap && tap->count_tx_dropped)
+ tap->count_tx_dropped(tap);
+ rcu_read_unlock();
+ return err;
+}
+
static int tap_sendmsg(struct socket *sock, struct msghdr *m,
size_t total_len)
{
struct tap_queue *q = container_of(sock, struct tap_queue, sock);
- return tap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
+ struct tun_msg_ctl *ctl = m->msg_control;
+ struct xdp_buff *xdp;
+ int i;
+
+ if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ for (i = 0; i < ctl->num; i++) {
+ xdp = &((struct xdp_buff *)ctl->ptr)[i];
+ tap_get_user_xdp(q, xdp);
+ }
+ return 0;
+ }
+
+ return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter,
+ m->msg_flags & MSG_DONTWAIT);
}
static int tap_recvmsg(struct socket *sock, struct msghdr *m,
@@ -1160,8 +1235,7 @@ static int tap_recvmsg(struct socket *sock, struct msghdr *m,
struct sk_buff *skb = m->msg_control;
int ret;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
return -EINVAL;
}
ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6a047d30e8c6..db633ae9f784 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1104,10 +1104,7 @@ static void team_port_disable_netpoll(struct team_port *port)
return;
port->np = NULL;
- /* Wait for transmitting packets to finish before freeing. */
- synchronize_rcu_bh();
- __netpoll_cleanup(np);
- kfree(np);
+ __netpoll_free(np);
}
#else
static int team_port_enable_netpoll(struct team_port *port)
@@ -1167,6 +1164,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EBUSY;
}
+ if (dev == port_dev) {
+ NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
+ netdev_err(dev, "Cannot enslave team device to itself\n");
+ return -EINVAL;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index e0d6760f3219..c48c3a1eb1f8 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Networking over Thunderbolt cable using Apple ThunderboltIP protocol
*
@@ -5,10 +6,6 @@
* Authors: Amir Levy <amir.jer.levy@intel.com>
* Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/atomic.h>
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ebd07ad82431..060135ceaf0e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -113,7 +113,6 @@ do { \
} while (0)
#endif
-#define TUN_HEADROOM 256
#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
/* TUN device flags */
@@ -181,6 +180,7 @@ struct tun_file {
};
struct napi_struct napi;
bool napi_enabled;
+ bool napi_frags_enabled;
struct mutex napi_mutex; /* Protects access to the above napi */
struct list_head next;
struct tun_struct *detached;
@@ -313,32 +313,32 @@ static int tun_napi_poll(struct napi_struct *napi, int budget)
}
static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
- bool napi_en)
+ bool napi_en, bool napi_frags)
{
tfile->napi_enabled = napi_en;
+ tfile->napi_frags_enabled = napi_en && napi_frags;
if (napi_en) {
netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
NAPI_POLL_WEIGHT);
napi_enable(&tfile->napi);
- mutex_init(&tfile->napi_mutex);
}
}
-static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_disable(struct tun_file *tfile)
{
if (tfile->napi_enabled)
napi_disable(&tfile->napi);
}
-static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_del(struct tun_file *tfile)
{
if (tfile->napi_enabled)
netif_napi_del(&tfile->napi);
}
-static bool tun_napi_frags_enabled(const struct tun_struct *tun)
+static bool tun_napi_frags_enabled(const struct tun_file *tfile)
{
- return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
+ return tfile->napi_frags_enabled;
}
#ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -562,12 +562,11 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
e->rps_rxhash = hash;
}
-/* We try to identify a flow through its rxhash first. The reason that
+/* We try to identify a flow through its rxhash. The reason that
* we do not check rxq no. is because some cards(e.g 82599), chooses
* the rxq based on the txq where the last packet of the flow comes. As
* the userspace application move between processors, we may get a
- * different rxq no. here. If we could not get rxhash, then we would
- * hope the rxq no. may help here.
+ * different rxq no. here.
*/
static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
{
@@ -578,18 +577,13 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
numqueues = READ_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb);
- if (txq) {
- e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
- if (e) {
- tun_flow_save_rps_rxhash(e, txq);
- txq = e->queue_index;
- } else
- /* use multiply and shift instead of expensive divide */
- txq = ((u64)txq * numqueues) >> 32;
- } else if (likely(skb_rx_queue_recorded(skb))) {
- txq = skb_get_rx_queue(skb);
- while (unlikely(txq >= numqueues))
- txq -= numqueues;
+ e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
+ if (e) {
+ tun_flow_save_rps_rxhash(e, txq);
+ txq = e->queue_index;
+ } else {
+ /* use multiply and shift instead of expensive divide */
+ txq = ((u64)txq * numqueues) >> 32;
}
return txq;
@@ -690,8 +684,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun = rtnl_dereference(tfile->tun);
if (tun && clean) {
- tun_napi_disable(tun, tfile);
- tun_napi_del(tun, tfile);
+ tun_napi_disable(tfile);
+ tun_napi_del(tfile);
}
if (tun && !tfile->detached) {
@@ -758,7 +752,7 @@ static void tun_detach_all(struct net_device *dev)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
- tun_napi_disable(tun, tfile);
+ tun_napi_disable(tfile);
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
@@ -774,7 +768,7 @@ static void tun_detach_all(struct net_device *dev)
synchronize_net();
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- tun_napi_del(tun, tfile);
+ tun_napi_del(tfile);
/* Drop read queue */
tun_queue_purge(tfile);
xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -793,7 +787,7 @@ static void tun_detach_all(struct net_device *dev)
}
static int tun_attach(struct tun_struct *tun, struct file *file,
- bool skip_filter, bool napi)
+ bool skip_filter, bool napi, bool napi_frags)
{
struct tun_file *tfile = file->private_data;
struct net_device *dev = tun->dev;
@@ -866,9 +860,12 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
tun_enable_queue(tfile);
} else {
sock_hold(&tfile->sk);
- tun_napi_init(tun, tfile, napi);
+ tun_napi_init(tun, tfile, napi, napi_frags);
}
+ if (rtnl_dereference(tun->xdp_prog))
+ sock_set_flag(&tfile->sk, SOCK_XDP);
+
tun_set_real_num_queues(tun);
/* device is allowed to go away first, so no need to hold extra
@@ -1044,16 +1041,13 @@ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
/* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here.
*/
+ struct tun_flow_entry *e;
__u32 rxhash;
rxhash = __skb_get_hash_symmetric(skb);
- if (rxhash) {
- struct tun_flow_entry *e;
- e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
- rxhash);
- if (e)
- tun_flow_save_rps_rxhash(e, rxhash);
- }
+ e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
+ if (e)
+ tun_flow_save_rps_rxhash(e, rxhash);
}
#endif
}
@@ -1153,43 +1147,6 @@ static netdev_features_t tun_net_fix_features(struct net_device *dev,
return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void tun_poll_controller(struct net_device *dev)
-{
- /*
- * Tun only receives frames when:
- * 1) the char device endpoint gets data from user space
- * 2) the tun socket gets a sendmsg call from user space
- * If NAPI is not enabled, since both of those are synchronous
- * operations, we are guaranteed never to have pending data when we poll
- * for it so there is nothing to do here but return.
- * We need this though so netpoll recognizes us as an interface that
- * supports polling, which enables bridge devices in virt setups to
- * still use netconsole
- * If NAPI is enabled, however, we need to schedule polling for all
- * queues unless we are using napi_gro_frags(), which we call in
- * process context and not in NAPI context.
- */
- struct tun_struct *tun = netdev_priv(dev);
-
- if (tun->flags & IFF_NAPI) {
- struct tun_file *tfile;
- int i;
-
- if (tun_napi_frags_enabled(tun))
- return;
-
- rcu_read_lock();
- for (i = 0; i < tun->numqueues; i++) {
- tfile = rcu_dereference(tun->tfiles[i]);
- if (tfile->napi_enabled)
- napi_schedule(&tfile->napi);
- }
- rcu_read_unlock();
- }
- return;
-}
-#endif
static void tun_set_headroom(struct net_device *dev, int new_hr)
{
@@ -1241,13 +1198,29 @@ static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct tun_struct *tun = netdev_priv(dev);
+ struct tun_file *tfile;
struct bpf_prog *old_prog;
+ int i;
old_prog = rtnl_dereference(tun->xdp_prog);
rcu_assign_pointer(tun->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
+ for (i = 0; i < tun->numqueues; i++) {
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ if (prog)
+ sock_set_flag(&tfile->sk, SOCK_XDP);
+ else
+ sock_reset_flag(&tfile->sk, SOCK_XDP);
+ }
+ list_for_each_entry(tfile, &tun->disabled, next) {
+ if (prog)
+ sock_set_flag(&tfile->sk, SOCK_XDP);
+ else
+ sock_reset_flag(&tfile->sk, SOCK_XDP);
+ }
+
return 0;
}
@@ -1283,9 +1256,6 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_start_xmit = tun_net_xmit,
.ndo_fix_features = tun_net_fix_features,
.ndo_select_queue = tun_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = tun_poll_controller,
-#endif
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
};
@@ -1365,9 +1335,6 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_select_queue = tun_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = tun_poll_controller,
-#endif
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
@@ -1617,6 +1584,55 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
return true;
}
+static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf,
+ int buflen, int len, int pad)
+{
+ struct sk_buff *skb = build_skb(buf, buflen);
+
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ skb_reserve(skb, pad);
+ skb_put(skb, len);
+
+ get_page(alloc_frag->page);
+ alloc_frag->offset += buflen;
+
+ return skb;
+}
+
+static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
+ struct xdp_buff *xdp, u32 act)
+{
+ int err;
+
+ switch (act) {
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
+ if (err)
+ return err;
+ break;
+ case XDP_TX:
+ err = tun_xdp_tx(tun->dev, xdp);
+ if (err < 0)
+ return err;
+ break;
+ case XDP_PASS:
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+ trace_xdp_exception(tun->dev, xdp_prog, act);
+ /* fall through */
+ case XDP_DROP:
+ this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ break;
+ }
+
+ return act;
+}
+
static struct sk_buff *tun_build_skb(struct tun_struct *tun,
struct tun_file *tfile,
struct iov_iter *from,
@@ -1624,18 +1640,17 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
int len, int *skb_xdp)
{
struct page_frag *alloc_frag = &current->task_frag;
- struct sk_buff *skb;
struct bpf_prog *xdp_prog;
int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- unsigned int delta = 0;
char *buf;
size_t copied;
- int err, pad = TUN_RX_PAD;
+ int pad = TUN_RX_PAD;
+ int err = 0;
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog)
- pad += TUN_HEADROOM;
+ pad += XDP_PACKET_HEADROOM;
buflen += SKB_DATA_ALIGN(len + pad);
rcu_read_unlock();
@@ -1654,17 +1669,18 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
* of xdp_prog above, this should be rare and for simplicity
* we do XDP on skb in case the headroom is not enough.
*/
- if (hdr->gso_type || !xdp_prog)
+ if (hdr->gso_type || !xdp_prog) {
*skb_xdp = 1;
- else
- *skb_xdp = 0;
+ return __tun_build_skb(alloc_frag, buf, buflen, len, pad);
+ }
+
+ *skb_xdp = 0;
local_bh_disable();
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
- if (xdp_prog && !*skb_xdp) {
+ if (xdp_prog) {
struct xdp_buff xdp;
- void *orig_data;
u32 act;
xdp.data_hard_start = buf;
@@ -1672,66 +1688,33 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
xdp.rxq = &tfile->xdp_rxq;
- orig_data = xdp.data;
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
- switch (act) {
- case XDP_REDIRECT:
- get_page(alloc_frag->page);
- alloc_frag->offset += buflen;
- err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
- xdp_do_flush_map();
- if (err)
- goto err_redirect;
- rcu_read_unlock();
- local_bh_enable();
- return NULL;
- case XDP_TX:
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ if (act == XDP_REDIRECT || act == XDP_TX) {
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
- if (tun_xdp_tx(tun->dev, &xdp) < 0)
- goto err_redirect;
- rcu_read_unlock();
- local_bh_enable();
- return NULL;
- case XDP_PASS:
- delta = orig_data - xdp.data;
- len = xdp.data_end - xdp.data;
- break;
- default:
- bpf_warn_invalid_xdp_action(act);
- /* fall through */
- case XDP_ABORTED:
- trace_xdp_exception(tun->dev, xdp_prog, act);
- /* fall through */
- case XDP_DROP:
- goto err_xdp;
}
- }
+ err = tun_xdp_act(tun, xdp_prog, &xdp, act);
+ if (err < 0)
+ goto err_xdp;
+ if (err == XDP_REDIRECT)
+ xdp_do_flush_map();
+ if (err != XDP_PASS)
+ goto out;
- skb = build_skb(buf, buflen);
- if (!skb) {
- rcu_read_unlock();
- local_bh_enable();
- return ERR_PTR(-ENOMEM);
+ pad = xdp.data - xdp.data_hard_start;
+ len = xdp.data_end - xdp.data;
}
-
- skb_reserve(skb, pad - delta);
- skb_put(skb, len);
- get_page(alloc_frag->page);
- alloc_frag->offset += buflen;
-
rcu_read_unlock();
local_bh_enable();
- return skb;
+ return __tun_build_skb(alloc_frag, buf, buflen, len, pad);
-err_redirect:
- put_page(alloc_frag->page);
err_xdp:
+ put_page(alloc_frag->page);
+out:
rcu_read_unlock();
local_bh_enable();
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
return NULL;
}
@@ -1752,7 +1735,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
int err;
u32 rxhash = 0;
int skb_xdp = 1;
- bool frags = tun_napi_frags_enabled(tun);
+ bool frags = tun_napi_frags_enabled(tfile);
if (!(tun->dev->flags & IFF_UP))
return -EIO;
@@ -2306,6 +2289,8 @@ static void tun_setup(struct net_device *dev)
static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ if (!data)
+ return 0;
return -EINVAL;
}
@@ -2392,18 +2377,133 @@ static void tun_sock_write_space(struct sock *sk)
kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
}
+static int tun_xdp_one(struct tun_struct *tun,
+ struct tun_file *tfile,
+ struct xdp_buff *xdp, int *flush)
+{
+ struct tun_xdp_hdr *hdr = xdp->data_hard_start;
+ struct virtio_net_hdr *gso = &hdr->gso;
+ struct tun_pcpu_stats *stats;
+ struct bpf_prog *xdp_prog;
+ struct sk_buff *skb = NULL;
+ u32 rxhash = 0, act;
+ int buflen = hdr->buflen;
+ int err = 0;
+ bool skb_xdp = false;
+
+ xdp_prog = rcu_dereference(tun->xdp_prog);
+ if (xdp_prog) {
+ if (gso->gso_type) {
+ skb_xdp = true;
+ goto build;
+ }
+ xdp_set_data_meta_invalid(xdp);
+ xdp->rxq = &tfile->xdp_rxq;
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ err = tun_xdp_act(tun, xdp_prog, xdp, act);
+ if (err < 0) {
+ put_page(virt_to_head_page(xdp->data));
+ return err;
+ }
+
+ switch (err) {
+ case XDP_REDIRECT:
+ *flush = true;
+ /* fall through */
+ case XDP_TX:
+ return 0;
+ case XDP_PASS:
+ break;
+ default:
+ put_page(virt_to_head_page(xdp->data));
+ return 0;
+ }
+ }
+
+build:
+ skb = build_skb(xdp->data_hard_start, buflen);
+ if (!skb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ skb_put(skb, xdp->data_end - xdp->data);
+
+ if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
+ this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
+ kfree_skb(skb);
+ err = -EINVAL;
+ goto out;
+ }
+
+ skb->protocol = eth_type_trans(skb, tun->dev);
+ skb_reset_network_header(skb);
+ skb_probe_transport_header(skb, 0);
+
+ if (skb_xdp) {
+ err = do_xdp_generic(xdp_prog, skb);
+ if (err != XDP_PASS)
+ goto out;
+ }
+
+ if (!rcu_dereference(tun->steering_prog))
+ rxhash = __skb_get_hash_symmetric(skb);
+
+ netif_receive_skb(skb);
+
+ stats = get_cpu_ptr(tun->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+ put_cpu_ptr(stats);
+
+ if (rxhash)
+ tun_flow_update(tun, rxhash, tfile);
+
+out:
+ return err;
+}
+
static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{
- int ret;
+ int ret, i;
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun = tun_get(tfile);
+ struct tun_msg_ctl *ctl = m->msg_control;
+ struct xdp_buff *xdp;
if (!tun)
return -EBADFD;
- ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
+ if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ int n = ctl->num;
+ int flush = 0;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ for (i = 0; i < n; i++) {
+ xdp = &((struct xdp_buff *)ctl->ptr)[i];
+ tun_xdp_one(tun, tfile, xdp, &flush);
+ }
+
+ if (flush)
+ xdp_do_flush_map();
+
+ rcu_read_unlock();
+ local_bh_enable();
+
+ ret = total_len;
+ goto out;
+ }
+
+ ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
m->msg_flags & MSG_DONTWAIT,
m->msg_flags & MSG_MORE);
+out:
tun_put(tun);
return ret;
}
@@ -2577,7 +2677,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
return err;
err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
- ifr->ifr_flags & IFF_NAPI);
+ ifr->ifr_flags & IFF_NAPI,
+ ifr->ifr_flags & IFF_NAPI_FRAGS);
if (err < 0)
return err;
@@ -2675,7 +2776,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
(ifr->ifr_flags & TUN_FEATURES);
INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
+ err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
+ ifr->ifr_flags & IFF_NAPI_FRAGS);
if (err < 0)
goto err_free_flow;
@@ -2824,7 +2926,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
ret = security_tun_dev_attach_queue(tun->security);
if (ret < 0)
goto unlock;
- ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
+ ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
+ tun->flags & IFF_NAPI_FRAGS);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
@@ -3242,6 +3345,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
return -ENOMEM;
}
+ mutex_init(&tfile->napi_mutex);
RCU_INIT_POINTER(tfile->tun, NULL);
tfile->flags = 0;
tfile->ifindex = 0;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index e95dd12edec4..023b8d0bf175 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
+ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
if (wolinfo->wolopts & WAKE_PHY)
opt |= AX_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 9e8ad372f419..2207f7a7d1ff 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
+ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
if (wolinfo->wolopts & WAKE_PHY)
opt |= AX_MONITOR_MODE_RWLC;
if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 1eaec648bd1f..50c05d0f44cb 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -779,8 +779,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
- ctx->bh.data = (unsigned long)dev;
- ctx->bh.func = cdc_ncm_txpath_bh;
+ tasklet_init(&ctx->bh, cdc_ncm_txpath_bh, (unsigned long)dev);
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
@@ -1601,11 +1600,8 @@ cdc_ncm_speed_change(struct usbnet *dev,
static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
{
- struct cdc_ncm_ctx *ctx;
struct usb_cdc_notification *event;
- ctx = (struct cdc_ncm_ctx *)dev->data[0];
-
if (urb->actual_length < sizeof(*event))
return;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a9991c5f4736..be1917be28f2 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Microchip Technology
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/version.h>
#include <linux/module.h>
@@ -948,11 +936,9 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
if (ret == 0) {
- if (sig == OTP_INDICATOR_1)
- offset = offset;
- else if (sig == OTP_INDICATOR_2)
+ if (sig == OTP_INDICATOR_2)
offset += 0x100;
- else
+ else if (sig != OTP_INDICATOR_1)
ret = -EINVAL;
if (!ret)
ret = lan78xx_read_raw_otp(dev, offset, length, data);
@@ -1027,7 +1013,7 @@ done:
static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
int index, u8 addr[ETH_ALEN])
{
- u32 temp;
+ u32 temp;
if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
temp = addr[3];
@@ -1401,19 +1387,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
if (ret < 0)
return ret;
- pdata->wol = 0;
- if (wol->wolopts & WAKE_UCAST)
- pdata->wol |= WAKE_UCAST;
- if (wol->wolopts & WAKE_MCAST)
- pdata->wol |= WAKE_MCAST;
- if (wol->wolopts & WAKE_BCAST)
- pdata->wol |= WAKE_BCAST;
- if (wol->wolopts & WAKE_MAGIC)
- pdata->wol |= WAKE_MAGIC;
- if (wol->wolopts & WAKE_PHY)
- pdata->wol |= WAKE_PHY;
- if (wol->wolopts & WAKE_ARP)
- pdata->wol |= WAKE_ARP;
+ if (wol->wolopts & ~WAKE_ALL)
+ return -EINVAL;
+
+ pdata->wol = wol->wolopts;
device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
@@ -1847,8 +1824,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
ret = of_mdiobus_register(dev->mdiobus, node);
- if (node)
- of_node_put(node);
+ of_node_put(node);
if (ret) {
netdev_err(dev->net, "can't register MDIO bus\n");
goto exit1;
@@ -2178,7 +2154,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
}
/* MAC doesn't support 1000T Half */
- phydev->supported &= ~SUPPORTED_1000baseT_Half;
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
/* support both flow controls */
dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
@@ -2702,7 +2678,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
static int lan78xx_stop(struct net_device *net)
{
- struct lan78xx_net *dev = netdev_priv(net);
+ struct lan78xx_net *dev = netdev_priv(net);
if (timer_pending(&dev->stat_monitor))
del_timer_sync(&dev->stat_monitor);
@@ -2952,6 +2928,11 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
int i;
ret = lan78xx_get_endpoints(dev, intf);
+ if (ret) {
+ netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
+ ret);
+ return ret;
+ }
dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
@@ -3080,7 +3061,7 @@ static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
{
- int status;
+ int status;
if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
skb_queue_tail(&dev->rxq_pause, skb);
@@ -3347,9 +3328,9 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
count = 0;
length = 0;
spin_lock_irqsave(&tqp->lock, flags);
- for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
+ skb_queue_walk(tqp, skb) {
if (skb_is_gso(skb)) {
- if (pkt_cnt) {
+ if (!skb_queue_is_first(tqp, skb)) {
/* handle previous packets first */
break;
}
@@ -3640,10 +3621,10 @@ static void intr_complete(struct urb *urb)
static void lan78xx_disconnect(struct usb_interface *intf)
{
- struct lan78xx_net *dev;
- struct usb_device *udev;
- struct net_device *net;
- struct phy_device *phydev;
+ struct lan78xx_net *dev;
+ struct usb_device *udev;
+ struct net_device *net;
+ struct phy_device *phydev;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
@@ -3761,7 +3742,6 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = lan78xx_bind(dev, intf);
if (ret < 0)
goto out2;
- strcpy(netdev->name, "eth%d");
if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h
index 25aa54611774..968e5e5faee0 100644
--- a/drivers/net/usb/lan78xx.h
+++ b/drivers/net/usb/lan78xx.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2015 Microchip Technology
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _LAN78XX_H
#define _LAN78XX_H
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e3270deecec2..72a55b6b4211 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1213,13 +1213,13 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */
{QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */
- {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
- {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
- {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -1241,6 +1241,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
+ {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2cd71bdb6484..f1b5201cc320 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4506,6 +4506,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (!rtl_can_wakeup(tp))
return -EOPNOTSUPP;
+ if (wol->wolopts & ~WAKE_ANY)
+ return -EINVAL;
+
ret = usb_autopm_get_interface(tp->intf);
if (ret < 0)
goto out_set_wol;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 05553d252446..ec287c9741e8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
int ret;
+ if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+ return -EINVAL;
+
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
@@ -1517,6 +1520,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
if (pdata) {
+ cancel_work_sync(&pdata->set_multicast);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 06b4d290784d..262e7a3c23cb 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
int ret;
+ if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+ return -EINVAL;
+
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 9277a0f228df..35f39f23d881 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
+ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
if (wolinfo->wolopts & WAKE_PHY)
opt |= SR_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 770aa624147f..504282af27e5 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -802,7 +802,7 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
int usbnet_stop (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
- struct driver_info *info = dev->driver_info;
+ const struct driver_info *info = dev->driver_info;
int retval, pm, mpn;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
@@ -865,7 +865,7 @@ int usbnet_open (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
int retval;
- struct driver_info *info = dev->driver_info;
+ const struct driver_info *info = dev->driver_info;
if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
netif_info(dev, ifup, dev->net,
@@ -1205,7 +1205,7 @@ fail_lowmem:
}
if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
- struct driver_info *info = dev->driver_info;
+ const struct driver_info *info = dev->driver_info;
int retval = 0;
clear_bit (EVENT_LINK_RESET, &dev->flags);
@@ -1353,7 +1353,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
unsigned int length;
struct urb *urb = NULL;
struct skb_data *entry;
- struct driver_info *info = dev->driver_info;
+ const struct driver_info *info = dev->driver_info;
unsigned long flags;
int retval;
@@ -1527,6 +1527,7 @@ static void usbnet_bh (struct timer_list *t)
continue;
case tx_done:
kfree(entry->urb->sg);
+ /* fall through */
case rx_cleanup:
usb_free_urb (entry->urb);
dev_kfree_skb (skb);
@@ -1646,7 +1647,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
struct usbnet *dev;
struct net_device *net;
struct usb_host_interface *interface;
- struct driver_info *info;
+ const struct driver_info *info;
struct usb_device *xdev;
int status;
const char *name;
@@ -1662,7 +1663,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
}
name = udev->dev.driver->name;
- info = (struct driver_info *) prod->driver_info;
+ info = (const struct driver_info *) prod->driver_info;
if (!info) {
dev_dbg (&udev->dev, "blacklisted by %s\n", name);
return -ENODEV;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8d679c8b7f25..890fa5b905e2 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -24,6 +24,7 @@
#include <linux/filter.h>
#include <linux/ptr_ring.h>
#include <linux/bpf_trace.h>
+#include <linux/net_tstamp.h>
#define DRV_NAME "veth"
#define DRV_VERSION "1.0"
@@ -36,9 +37,10 @@
#define VETH_XDP_TX BIT(0)
#define VETH_XDP_REDIR BIT(1)
-struct pcpu_vstats {
- u64 packets;
- u64 bytes;
+struct veth_rq_stats {
+ u64 xdp_packets;
+ u64 xdp_bytes;
+ u64 xdp_drops;
struct u64_stats_sync syncp;
};
@@ -47,6 +49,7 @@ struct veth_rq {
struct net_device *dev;
struct bpf_prog __rcu *xdp_prog;
struct xdp_mem_info xdp_mem;
+ struct veth_rq_stats stats;
bool rx_notify_masked;
struct ptr_ring xdp_ring;
struct xdp_rxq_info xdp_rxq;
@@ -64,6 +67,21 @@ struct veth_priv {
* ethtool interface
*/
+struct veth_q_stat_desc {
+ char desc[ETH_GSTRING_LEN];
+ size_t offset;
+};
+
+#define VETH_RQ_STAT(m) offsetof(struct veth_rq_stats, m)
+
+static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
+ { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
+ { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
+ { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
+};
+
+#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
+
static struct {
const char string[ETH_GSTRING_LEN];
} ethtool_stats_keys[] = {
@@ -88,9 +106,20 @@ static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *inf
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
+ char *p = (char *)buf;
+ int i, j;
+
switch(stringset) {
case ETH_SS_STATS:
- memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+ p += sizeof(ethtool_stats_keys);
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
+ i, veth_rq_stats_desc[j].desc);
+ p += ETH_GSTRING_LEN;
+ }
+ }
break;
}
}
@@ -99,7 +128,8 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(ethtool_stats_keys);
+ return ARRAY_SIZE(ethtool_stats_keys) +
+ VETH_RQ_STATS_LEN * dev->real_num_rx_queues;
default:
return -EOPNOTSUPP;
}
@@ -110,8 +140,37 @@ static void veth_get_ethtool_stats(struct net_device *dev,
{
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer = rtnl_dereference(priv->peer);
+ int i, j, idx;
data[0] = peer ? peer->ifindex : 0;
+ idx = 1;
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
+ const void *stats_base = (void *)rq_stats;
+ unsigned int start;
+ size_t offset;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
+ for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
+ offset = veth_rq_stats_desc[j].offset;
+ data[idx + j] = *(u64 *)(stats_base + offset);
+ }
+ } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
+ idx += VETH_RQ_STATS_LEN;
+ }
+}
+
+static int veth_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+
+ return 0;
}
static const struct ethtool_ops veth_ethtool_ops = {
@@ -121,6 +180,7 @@ static const struct ethtool_ops veth_ethtool_ops = {
.get_sset_count = veth_get_sset_count,
.get_ethtool_stats = veth_get_ethtool_stats,
.get_link_ksettings = veth_get_link_ksettings,
+ .get_ts_info = veth_get_ts_info,
};
/* general routines */
@@ -201,13 +261,16 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_record_rx_queue(skb, rxq);
}
+ skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
- struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
+ if (!rcv_xdp) {
+ struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
- u64_stats_update_begin(&stats->syncp);
- stats->bytes += length;
- stats->packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ stats->bytes += length;
+ stats->packets++;
+ u64_stats_update_end(&stats->syncp);
+ }
} else {
drop:
atomic64_inc(&priv->dropped);
@@ -221,7 +284,7 @@ drop:
return NETDEV_TX_OK;
}
-static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
+static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
int cpu;
@@ -229,7 +292,7 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
result->packets = 0;
result->bytes = 0;
for_each_possible_cpu(cpu) {
- struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu);
+ struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu);
u64 packets, bytes;
unsigned int start;
@@ -244,23 +307,58 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
return atomic64_read(&priv->dropped);
}
+static void veth_stats_rx(struct veth_rq_stats *result, struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int i;
+
+ result->xdp_packets = 0;
+ result->xdp_bytes = 0;
+ result->xdp_drops = 0;
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ struct veth_rq_stats *stats = &priv->rq[i].stats;
+ u64 packets, bytes, drops;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->xdp_packets;
+ bytes = stats->xdp_bytes;
+ drops = stats->xdp_drops;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ result->xdp_packets += packets;
+ result->xdp_bytes += bytes;
+ result->xdp_drops += drops;
+ }
+}
+
static void veth_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct pcpu_vstats one;
+ struct veth_rq_stats rx;
+ struct pcpu_lstats tx;
- tot->tx_dropped = veth_stats_one(&one, dev);
- tot->tx_bytes = one.bytes;
- tot->tx_packets = one.packets;
+ tot->tx_dropped = veth_stats_tx(&tx, dev);
+ tot->tx_bytes = tx.bytes;
+ tot->tx_packets = tx.packets;
+
+ veth_stats_rx(&rx, dev);
+ tot->rx_dropped = rx.xdp_drops;
+ tot->rx_bytes = rx.xdp_bytes;
+ tot->rx_packets = rx.xdp_packets;
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (peer) {
- tot->rx_dropped = veth_stats_one(&one, peer);
- tot->rx_bytes = one.bytes;
- tot->rx_packets = one.packets;
+ tot->rx_dropped += veth_stats_tx(&tx, peer);
+ tot->rx_bytes += tx.bytes;
+ tot->rx_packets += tx.packets;
+
+ veth_stats_rx(&rx, peer);
+ tot->tx_bytes += rx.xdp_bytes;
+ tot->tx_packets += rx.xdp_packets;
}
rcu_read_unlock();
}
@@ -299,16 +397,20 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct net_device *rcv;
+ int i, ret, drops = n;
unsigned int max_len;
struct veth_rq *rq;
- int i, drops = 0;
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
- return -EINVAL;
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
+ ret = -EINVAL;
+ goto drop;
+ }
rcv = rcu_dereference(priv->peer);
- if (unlikely(!rcv))
- return -ENXIO;
+ if (unlikely(!rcv)) {
+ ret = -ENXIO;
+ goto drop;
+ }
rcv_priv = netdev_priv(rcv);
rq = &rcv_priv->rq[veth_select_rxq(rcv)];
@@ -316,9 +418,12 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
* side. This means an XDP program is loaded on the peer and the peer
* device is up.
*/
- if (!rcu_access_pointer(rq->xdp_prog))
- return -ENXIO;
+ if (!rcu_access_pointer(rq->xdp_prog)) {
+ ret = -ENXIO;
+ goto drop;
+ }
+ drops = 0;
max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
spin_lock(&rq->xdp_ring.producer_lock);
@@ -337,7 +442,14 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq);
- return n - drops;
+ if (likely(!drops))
+ return n;
+
+ ret = n - drops;
+drop:
+ atomic64_add(drops, &priv->dropped);
+
+ return ret;
}
static void veth_xdp_flush(struct net_device *dev)
@@ -463,6 +575,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
int mac_len, delta, off;
struct xdp_buff xdp;
+ skb_orphan(skb);
+
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (unlikely(!xdp_prog)) {
@@ -508,8 +622,6 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
skb_copy_header(nskb, skb);
head_off = skb_headroom(nskb) - skb_headroom(skb);
skb_headers_offset_update(nskb, head_off);
- if (skb->sk)
- skb_set_owner_w(nskb, skb->sk);
consume_skb(skb);
skb = nskb;
}
@@ -586,28 +698,42 @@ xdp_xmit:
static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit)
{
- int i, done = 0;
+ int i, done = 0, drops = 0, bytes = 0;
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
+ unsigned int xdp_xmit_one = 0;
struct sk_buff *skb;
if (!ptr)
break;
if (veth_is_xdp_frame(ptr)) {
- skb = veth_xdp_rcv_one(rq, veth_ptr_to_xdp(ptr),
- xdp_xmit);
+ struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
+
+ bytes += frame->len;
+ skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one);
} else {
- skb = veth_xdp_rcv_skb(rq, ptr, xdp_xmit);
+ skb = ptr;
+ bytes += skb->len;
+ skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one);
}
+ *xdp_xmit |= xdp_xmit_one;
if (skb)
napi_gro_receive(&rq->xdp_napi, skb);
+ else if (!xdp_xmit_one)
+ drops++;
done++;
}
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.xdp_packets += done;
+ rq->stats.xdp_bytes += bytes;
+ rq->stats.xdp_drops += drops;
+ u64_stats_update_end(&rq->stats.syncp);
+
return done;
}
@@ -798,8 +924,10 @@ static int veth_alloc_queues(struct net_device *dev)
if (!priv->rq)
return -ENOMEM;
- for (i = 0; i < dev->num_rx_queues; i++)
+ for (i = 0; i < dev->num_rx_queues; i++) {
priv->rq[i].dev = dev;
+ u64_stats_init(&priv->rq[i].stats.syncp);
+ }
return 0;
}
@@ -815,13 +943,13 @@ static int veth_dev_init(struct net_device *dev)
{
int err;
- dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats);
- if (!dev->vstats)
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
+ if (!dev->lstats)
return -ENOMEM;
err = veth_alloc_queues(dev);
if (err) {
- free_percpu(dev->vstats);
+ free_percpu(dev->lstats);
return err;
}
@@ -831,7 +959,7 @@ static int veth_dev_init(struct net_device *dev)
static void veth_dev_free(struct net_device *dev)
{
veth_free_queues(dev);
- free_percpu(dev->vstats);
+ free_percpu(dev->lstats);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 765920905226..3e2c041d76ac 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1699,17 +1699,6 @@ static void virtnet_stats(struct net_device *dev,
tot->rx_frame_errors = dev->stats.rx_frame_errors;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void virtnet_netpoll(struct net_device *dev)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- int i;
-
- for (i = 0; i < vi->curr_queue_pairs; i++)
- napi_schedule(&vi->rq[i].napi);
-}
-#endif
-
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
rtnl_lock();
@@ -2181,6 +2170,53 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
return 0;
}
+static int virtnet_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct ethtool_coalesce ec_default = {
+ .cmd = ETHTOOL_SCOALESCE,
+ .rx_max_coalesced_frames = 1,
+ };
+ struct virtnet_info *vi = netdev_priv(dev);
+ int i, napi_weight;
+
+ if (ec->tx_max_coalesced_frames > 1)
+ return -EINVAL;
+
+ ec_default.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+ napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
+
+ /* disallow changes to fields not explicitly tested above */
+ if (memcmp(ec, &ec_default, sizeof(ec_default)))
+ return -EINVAL;
+
+ if (napi_weight ^ vi->sq[0].napi.weight) {
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ vi->sq[i].napi.weight = napi_weight;
+ }
+
+ return 0;
+}
+
+static int virtnet_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct ethtool_coalesce ec_default = {
+ .cmd = ETHTOOL_GCOALESCE,
+ .rx_max_coalesced_frames = 1,
+ };
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ memcpy(ec, &ec_default, sizeof(ec_default));
+
+ if (vi->sq[0].napi.weight)
+ ec->tx_max_coalesced_frames = 1;
+
+ return 0;
+}
+
static void virtnet_init_settings(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2219,6 +2255,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = virtnet_get_link_ksettings,
.set_link_ksettings = virtnet_set_link_ksettings,
+ .set_coalesce = virtnet_set_coalesce,
+ .get_coalesce = virtnet_get_coalesce,
};
static void virtnet_freeze_down(struct virtio_device *vdev)
@@ -2229,8 +2267,9 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
+ netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
- netif_tx_disable(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
@@ -2266,7 +2305,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
}
}
+ netif_tx_lock_bh(vi->dev);
netif_device_attach(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
return err;
}
@@ -2447,9 +2488,6 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_get_stats64 = virtnet_stats,
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = virtnet_netpoll,
-#endif
.ndo_bpf = virtnet_xdp,
.ndo_xdp_xmit = virtnet_xdp_xmit,
.ndo_features_check = passthru_features_check,
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index f93547f257fb..69b7227c637e 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1215,8 +1215,19 @@ static int vrf_add_fib_rules(const struct net_device *dev)
goto ipmr_err;
#endif
+#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
+ err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true);
+ if (err < 0)
+ goto ip6mr_err;
+#endif
+
return 0;
+#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
+ip6mr_err:
+ vrf_fib_rule(dev, RTNL_FAMILY_IPMR, false);
+#endif
+
#if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
ipmr_err:
vrf_fib_rule(dev, AF_INET6, false);
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index c28bdce14fd5..7bad5c95551f 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -11,12 +11,6 @@
#define DEFAULT_MTU (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + \
sizeof(struct af_vsockmon_hdr))
-struct pcpu_lstats {
- u64 rx_packets;
- u64 rx_bytes;
- struct u64_stats_sync syncp;
-};
-
static int vsockmon_dev_init(struct net_device *dev)
{
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
@@ -56,8 +50,8 @@ static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev)
struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
u64_stats_update_begin(&stats->syncp);
- stats->rx_bytes += len;
- stats->rx_packets++;
+ stats->bytes += len;
+ stats->packets++;
u64_stats_update_end(&stats->syncp);
dev_kfree_skb(skb);
@@ -80,8 +74,8 @@ vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
do {
start = u64_stats_fetch_begin_irq(&vstats->syncp);
- tbytes = vstats->rx_bytes;
- tpackets = vstats->rx_packets;
+ tbytes = vstats->bytes;
+ tpackets = vstats->packets;
} while (u64_stats_fetch_retry_irq(&vstats->syncp, start));
packets += tpackets;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ababba37d735..297cdeaef479 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -103,22 +103,6 @@ bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
}
-static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
-{
- if (ipa->sa.sa_family == AF_INET6)
- return ipv6_addr_any(&ipa->sin6.sin6_addr);
- else
- return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
-}
-
-static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
-{
- if (ipa->sa.sa_family == AF_INET6)
- return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
- else
- return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
-}
-
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
if (nla_len(nla) >= sizeof(struct in6_addr)) {
@@ -151,16 +135,6 @@ bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
}
-static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
-{
- return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
-}
-
-static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
-{
- return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
-}
-
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
if (nla_len(nla) >= sizeof(struct in6_addr)) {
@@ -298,6 +272,8 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
ndm->ndm_state = fdb->state;
ndm->ndm_ifindex = vxlan->dev->ifindex;
ndm->ndm_flags = fdb->flags;
+ if (rdst->offloaded)
+ ndm->ndm_flags |= NTF_OFFLOADED;
ndm->ndm_type = RTN_UNICAST;
if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
@@ -353,8 +329,8 @@ static inline size_t vxlan_nlmsg_size(void)
+ nla_total_size(sizeof(struct nda_cacheinfo));
}
-static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
- struct vxlan_rdst *rd, int type)
+static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
+ struct vxlan_rdst *rd, int type)
{
struct net *net = dev_net(vxlan->dev);
struct sk_buff *skb;
@@ -379,6 +355,49 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
+static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
+ struct vxlan_fdb *fdb,
+ struct vxlan_rdst *rd,
+ bool adding)
+{
+ struct switchdev_notifier_vxlan_fdb_info info;
+ enum switchdev_notifier_type notifier_type;
+
+ if (WARN_ON(!rd))
+ return;
+
+ notifier_type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE
+ : SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE;
+
+ info = (struct switchdev_notifier_vxlan_fdb_info){
+ .remote_ip = rd->remote_ip,
+ .remote_port = rd->remote_port,
+ .remote_vni = rd->remote_vni,
+ .remote_ifindex = rd->remote_ifindex,
+ .vni = fdb->vni,
+ .offloaded = rd->offloaded,
+ };
+ memcpy(info.eth_addr, fdb->eth_addr, ETH_ALEN);
+
+ call_switchdev_notifiers(notifier_type, vxlan->dev,
+ &info.info);
+}
+
+static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
+ struct vxlan_rdst *rd, int type)
+{
+ switch (type) {
+ case RTM_NEWNEIGH:
+ vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, true);
+ break;
+ case RTM_DELNEIGH:
+ vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, false);
+ break;
+ }
+
+ __vxlan_fdb_notify(vxlan, fdb, rd, type);
+}
+
static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -464,7 +483,7 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
struct vxlan_fdb *f;
f = __vxlan_find_mac(vxlan, mac, vni);
- if (f)
+ if (f && f->used != jiffies)
f->used = jiffies;
return f;
@@ -488,6 +507,47 @@ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
return NULL;
}
+int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ u8 eth_addr[ETH_ALEN + 2] = { 0 };
+ struct vxlan_rdst *rdst;
+ struct vxlan_fdb *f;
+ int rc = 0;
+
+ if (is_multicast_ether_addr(mac) ||
+ is_zero_ether_addr(mac))
+ return -EINVAL;
+
+ ether_addr_copy(eth_addr, mac);
+
+ rcu_read_lock();
+
+ f = __vxlan_find_mac(vxlan, eth_addr, vni);
+ if (!f) {
+ rc = -ENOENT;
+ goto out;
+ }
+
+ rdst = first_remote_rcu(f);
+
+ memset(fdb_info, 0, sizeof(*fdb_info));
+ fdb_info->info.dev = dev;
+ fdb_info->remote_ip = rdst->remote_ip;
+ fdb_info->remote_port = rdst->remote_port;
+ fdb_info->remote_vni = rdst->remote_vni;
+ fdb_info->remote_ifindex = rdst->remote_ifindex;
+ fdb_info->vni = vni;
+ fdb_info->offloaded = rdst->offloaded;
+ ether_addr_copy(fdb_info->eth_addr, mac);
+
+out:
+ rcu_read_unlock();
+ return rc;
+}
+EXPORT_SYMBOL_GPL(vxlan_fdb_find_uc);
+
/* Replace destination of unicast mac */
static int vxlan_fdb_replace(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port, __be32 vni,
@@ -533,6 +593,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
rd->remote_ip = *ip;
rd->remote_port = port;
+ rd->offloaded = false;
rd->remote_vni = vni;
rd->remote_ifindex = ifindex;
@@ -697,6 +758,7 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
__be16 port, __be32 src_vni, __be32 vni,
__u32 ifindex, __u8 ndm_flags)
{
+ __u8 fdb_flags = (ndm_flags & ~NTF_USE);
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
int notify = 0;
@@ -714,8 +776,8 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
f->updated = jiffies;
notify = 1;
}
- if (f->flags != ndm_flags) {
- f->flags = ndm_flags;
+ if (f->flags != fdb_flags) {
+ f->flags = fdb_flags;
f->updated = jiffies;
notify = 1;
}
@@ -737,6 +799,9 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
return rc;
notify |= rc;
}
+
+ if (ndm_flags & NTF_USE)
+ f->used = jiffies;
} else {
if (!(flags & NLM_F_CREATE))
return -ENOENT;
@@ -748,7 +813,7 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
- vni, ifindex, ndm_flags, &f);
+ vni, ifindex, fdb_flags, &f);
if (rc < 0)
return rc;
notify = 1;
@@ -778,12 +843,15 @@ static void vxlan_fdb_free(struct rcu_head *head)
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
bool do_notify)
{
+ struct vxlan_rdst *rd;
+
netdev_dbg(vxlan->dev,
"delete %pM\n", f->eth_addr);
--vxlan->addrcnt;
if (do_notify)
- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
+ list_for_each_entry(rd, &f->remotes, list)
+ vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
hlist_del_rcu(&f->hlist);
call_rcu(&f->rcu, vxlan_fdb_free);
@@ -2194,11 +2262,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ndst = &rt->dst;
- if (skb_dst(skb)) {
- int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
@@ -2235,11 +2299,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto out_unlock;
}
- if (skb_dst(skb)) {
- int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
@@ -3539,6 +3599,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
@@ -3603,6 +3664,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
}
if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+ nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
+ !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
@@ -3754,6 +3817,51 @@ static struct notifier_block vxlan_notifier_block __read_mostly = {
.notifier_call = vxlan_netdevice_event,
};
+static void
+vxlan_fdb_offloaded_set(struct net_device *dev,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *rdst;
+ struct vxlan_fdb *f;
+
+ spin_lock_bh(&vxlan->hash_lock);
+
+ f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ if (!f)
+ goto out;
+
+ rdst = vxlan_fdb_find_rdst(f, &fdb_info->remote_ip,
+ fdb_info->remote_port,
+ fdb_info->remote_vni,
+ fdb_info->remote_ifindex);
+ if (!rdst)
+ goto out;
+
+ rdst->offloaded = fdb_info->offloaded;
+
+out:
+ spin_unlock_bh(&vxlan->hash_lock);
+}
+
+static int vxlan_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case SWITCHDEV_VXLAN_FDB_OFFLOADED:
+ vxlan_fdb_offloaded_set(dev, ptr);
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block vxlan_switchdev_notifier_block __read_mostly = {
+ .notifier_call = vxlan_switchdev_event,
+};
+
static __net_init int vxlan_init_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -3827,11 +3935,17 @@ static int __init vxlan_init_module(void)
if (rc)
goto out2;
- rc = rtnl_link_register(&vxlan_link_ops);
+ rc = register_switchdev_notifier(&vxlan_switchdev_notifier_block);
if (rc)
goto out3;
+ rc = rtnl_link_register(&vxlan_link_ops);
+ if (rc)
+ goto out4;
+
return 0;
+out4:
+ unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
out3:
unregister_netdevice_notifier(&vxlan_notifier_block);
out2:
@@ -3844,6 +3958,7 @@ late_initcall(vxlan_init_module);
static void __exit vxlan_cleanup_module(void)
{
rtnl_link_unregister(&vxlan_link_ops);
+ unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
unregister_netdevice_notifier(&vxlan_notifier_block);
unregister_pernet_subsys(&vxlan_net_ops);
/* rcu_barrier() is called by netns */
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 2371e078afbb..91dbbde3c35b 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -24,7 +24,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/hdlc.h>
#include <linux/delay.h>
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 5f0366a125e2..4d6409605207 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -36,6 +36,7 @@
#define DRV_NAME "ucc_hdlc"
#define TDM_PPPOHT_SLIC_MAXIN
+#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
@@ -97,6 +98,12 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
if (priv->tsa) {
uf_info->tsa = 1;
uf_info->ctsp = 1;
+ uf_info->cds = 1;
+ uf_info->ctss = 1;
+ } else {
+ uf_info->cds = 0;
+ uf_info->ctsp = 0;
+ uf_info->ctss = 0;
}
/* This sets HPM register in CMXUCR register which configures a
@@ -265,7 +272,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
- iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
+ iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
@@ -375,6 +382,10 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
break;
+ case ARPHRD_ETHER:
+ dev->stats.tx_bytes += skb->len;
+ break;
+
default:
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
@@ -420,12 +431,25 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
+{
+ u32 cecr_subblock;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
+
+ qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ return 0;
+}
+
static int hdlc_tx_done(struct ucc_hdlc_private *priv)
{
/* Start from the next BD that should be filled */
struct net_device *dev = priv->ndev;
struct qe_bd *bd; /* BD pointer */
u16 bd_status;
+ int tx_restart = 0;
bd = priv->dirty_tx;
bd_status = ioread16be(&bd->status);
@@ -434,6 +458,15 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
while ((bd_status & T_R_S) == 0) {
struct sk_buff *skb;
+ if (bd_status & T_UN_S) { /* Underrun */
+ dev->stats.tx_fifo_errors++;
+ tx_restart = 1;
+ }
+ if (bd_status & T_CT_S) { /* Carrier lost */
+ dev->stats.tx_carrier_errors++;
+ tx_restart = 1;
+ }
+
/* BD contains already transmitted buffer. */
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
@@ -465,6 +498,9 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
}
priv->dirty_tx = bd;
+ if (tx_restart)
+ hdlc_tx_restart(priv);
+
return 0;
}
@@ -483,11 +519,22 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
/* while there are received buffers and BD is full (~R_E) */
while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
- if (bd_status & R_OV_S)
- dev->stats.rx_over_errors++;
- if (bd_status & R_CR_S) {
- dev->stats.rx_crc_errors++;
- dev->stats.rx_dropped++;
+ if (bd_status & (RX_BD_ERRORS)) {
+ dev->stats.rx_errors++;
+
+ if (bd_status & R_CD_S)
+ dev->stats.collisions++;
+ if (bd_status & R_OV_S)
+ dev->stats.rx_fifo_errors++;
+ if (bd_status & R_CR_S)
+ dev->stats.rx_crc_errors++;
+ if (bd_status & R_AB_S)
+ dev->stats.rx_over_errors++;
+ if (bd_status & R_NO_S)
+ dev->stats.rx_frame_errors++;
+ if (bd_status & R_LG_S)
+ dev->stats.rx_length_errors++;
+
goto recycle;
}
bdbuffer = priv->rx_buffer +
@@ -512,6 +559,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
break;
case ARPHRD_PPP:
+ case ARPHRD_ETHER:
length -= HDLC_CRC_SIZE;
skb = dev_alloc_skb(length);
@@ -535,7 +583,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
netif_receive_skb(skb);
recycle:
- iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
+ iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
/* update to point at the next bd */
if (bd_status & R_W_S) {
@@ -611,7 +659,7 @@ static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
/* Errors and other events */
if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
- dev->stats.rx_errors++;
+ dev->stats.rx_missed_errors++;
if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
dev->stats.tx_errors++;
@@ -780,6 +828,7 @@ static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
if (parity != PARITY_NONE &&
parity != PARITY_CRC32_PR1_CCITT &&
+ parity != PARITY_CRC16_PR0_CCITT &&
parity != PARITY_CRC16_PR1_CCITT)
return -EINVAL;
@@ -987,11 +1036,17 @@ static const struct dev_pm_ops uhdlc_pm_ops = {
#define HDLC_PM_OPS NULL
#endif
+static void uhdlc_tx_timeout(struct net_device *ndev)
+{
+ netdev_err(ndev, "%s\n", __func__);
+}
+
static const struct net_device_ops uhdlc_ops = {
.ndo_open = uhdlc_open,
.ndo_stop = uhdlc_close,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = uhdlc_ioctl,
+ .ndo_tx_timeout = uhdlc_tx_timeout,
};
static int ucc_hdlc_probe(struct platform_device *pdev)
@@ -1015,7 +1070,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
}
ucc_num = val - 1;
- if ((ucc_num > 3) || (ucc_num < 0)) {
+ if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
dev_err(&pdev->dev, ": Invalid UCC num\n");
return -EINVAL;
}
@@ -1090,6 +1145,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
goto free_utdm;
}
+ if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
+ uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
+
ret = uhdlc_init(uhdlc_priv);
if (ret) {
dev_err(&pdev->dev, "Failed to init uhdlc\n");
@@ -1107,6 +1165,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
hdlc = dev_to_hdlc(dev);
dev->tx_queue_len = 16;
dev->netdev_ops = &uhdlc_ops;
+ dev->watchdog_timeo = 2 * HZ;
hdlc->attach = ucc_hdlc_attach;
hdlc->xmit = ucc_hdlc_tx;
netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h
index c21134c1f180..b99fa2f1cd99 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.h
+++ b/drivers/net/wan/fsl_ucc_hdlc.h
@@ -106,6 +106,7 @@ struct ucc_hdlc_private {
unsigned short encoding;
unsigned short parity;
+ unsigned short hmask;
u32 clocking;
spinlock_t lock; /* lock for Tx BD and Tx buffer */
#ifdef CONFIG_PM
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 4e437c599e9a..1f8a3f77a7b9 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -27,7 +27,6 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/hdlc.h>
#include <linux/pci.h>
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 74c06a5f586f..1098263ab862 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -33,7 +33,6 @@
#include <linux/lapb.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
-#include <linux/compat.h>
#include <linux/slab.h>
#include <net/x25device.h>
#include "x25_asy.h"
@@ -703,21 +702,6 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
}
}
-#ifdef CONFIG_COMPAT
-static long x25_asy_compat_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case SIOCGIFNAME:
- case SIOCSIFHWADDR:
- return x25_asy_ioctl(tty, file, cmd,
- (unsigned long)compat_ptr(arg));
- }
-
- return -ENOIOCTLCMD;
-}
-#endif
-
static int x25_asy_open_dev(struct net_device *dev)
{
struct x25_asy *sl = netdev_priv(dev);
@@ -769,9 +753,6 @@ static struct tty_ldisc_ops x25_ldisc = {
.open = x25_asy_open_tty,
.close = x25_asy_close_tty,
.ioctl = x25_asy_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = x25_asy_compat_ioctl,
-#endif
.receive_buf = x25_asy_receive_buf,
.write_wakeup = x25_asy_write_wakeup,
};
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 094cea775d0c..ef298d8525c5 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -257,7 +257,7 @@ static const struct
[I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
[I2400M_MS_BUSY] = { "busy", -EBUSY },
[I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
- [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ },
+ [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ },
[I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
[I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
[I2400M_MS_NO_RF] = { "no RF", -EIO },
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 54ff5930126c..e1ad6b9166a6 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -42,7 +42,9 @@ config ATH10K_USB
config ATH10K_SNOC
tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
- depends on ATH10K && ARCH_QCOM
+ depends on ATH10K
+ depends on ARCH_QCOM || COMPILE_TEST
+ select QCOM_QMI_HELPERS
---help---
This module adds support for integrated WCN3990 chip connected
to system NOC(SNOC). Currently work in progress and will not
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 44d60a61b242..66326b949ab1 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -36,7 +36,9 @@ obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o
ath10k_usb-y += usb.o
obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o
-ath10k_snoc-y += snoc.o
+ath10k_snoc-y += qmi.o \
+ qmi_wlfw_v01.o \
+ snoc.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index c9bd0e2b5db7..4cd69aca75e2 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -655,10 +655,10 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar)
ath10k_ahb_irq_disable(ar);
synchronize_irq(ar_ahb->irq);
- ath10k_pci_flush(ar);
-
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
+
+ ath10k_pci_flush(ar);
}
static int ath10k_ahb_hif_power_up(struct ath10k *ar)
@@ -750,7 +750,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
enum ath10k_hw_rev hw_rev;
size_t size;
int ret;
- u32 chip_id;
+ struct ath10k_bus_params bus_params;
of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev);
if (!of_id) {
@@ -806,14 +806,15 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
ath10k_pci_ce_deinit(ar);
- chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
- if (chip_id == 0xffffffff) {
+ bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+ bus_params.chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (bus_params.chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
ret = -ENODEV;
goto err_halt_device;
}
- ret = ath10k_core_register(ar, chip_id);
+ ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_halt_device;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index af4978d6a14b..1750b182209b 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -459,3 +459,26 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
return ret;
}
+
+int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
+ int ret;
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "bmi set start command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_SET_APP_START);
+ cmd.set_app_start.addr = __cpu_to_le32(address);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 9a396817aa55..725c9afc63f2 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -86,6 +86,10 @@ enum bmi_cmd_id {
#define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000
#define BMI_PARAM_FLASH_SECTION_ALL 0x10000
+/* Dual-band Extended Board ID */
+#define BMI_PARAM_GET_EXT_BOARD_ID 0x40000
+#define ATH10K_BMI_EXT_BOARD_ID_SUPPORT 0x40000
+
#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00
#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10
@@ -93,6 +97,7 @@ enum bmi_cmd_id {
#define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB 15
#define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff
+#define ATH10K_BMI_EBOARD_ID_STATUS_MASK 0xff
struct bmi_cmd {
__le32 id; /* enum bmi_cmd_id */
@@ -190,6 +195,35 @@ struct bmi_target_info {
u32 type;
};
+struct bmi_segmented_file_header {
+ __le32 magic_num;
+ __le32 file_flags;
+ u8 data[];
+};
+
+struct bmi_segmented_metadata {
+ __le32 addr;
+ __le32 length;
+ u8 data[];
+};
+
+#define BMI_SGMTFILE_MAGIC_NUM 0x544d4753 /* "SGMT" */
+#define BMI_SGMTFILE_FLAG_COMPRESS 1
+
+/* Special values for bmi_segmented_metadata.length (all have high bit set) */
+
+/* end of segmented data */
+#define BMI_SGMTFILE_DONE 0xffffffff
+
+/* Board Data segment */
+#define BMI_SGMTFILE_BDDATA 0xfffffffe
+
+/* set beginning address */
+#define BMI_SGMTFILE_BEGINADDR 0xfffffffd
+
+/* immediate function execution */
+#define BMI_SGMTFILE_EXEC 0xfffffffc
+
/* in jiffies */
#define BMI_COMMUNICATION_TIMEOUT_HZ (3 * HZ)
@@ -239,4 +273,6 @@ int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val);
int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val);
+int ath10k_bmi_set_start(struct ath10k *ar, u32 address);
+
#endif /* _BMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 18c709c484e7..f6d3ecbdd3a3 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1280,10 +1280,17 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
int ath10k_ce_disable_interrupts(struct ath10k *ar)
{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state;
+ u32 ctrl_addr;
int ce_id;
for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
- u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ ce_state = &ce->ce_states[ce_id];
+ if (ce_state->attr_flags & CE_ATTR_POLL)
+ continue;
+
+ ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
ath10k_ce_error_intr_disable(ar, ctrl_addr);
@@ -1300,11 +1307,14 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar)
int ce_id;
struct ath10k_ce_pipe *ce_state;
- /* Skip the last copy engine, CE7 the diagnostic window, as that
- * uses polling and isn't initialized for interrupts.
+ /* Enable interrupts for copy engine that
+ * are not using polling mode.
*/
- for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) {
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
ce_state = &ce->ce_states[ce_id];
+ if (ce_state->attr_flags & CE_ATTR_POLL)
+ continue;
+
ath10k_ce_per_engine_handler_adjust(ce_state);
}
}
@@ -1416,10 +1426,8 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
nentries = roundup_pow_of_two(nentries);
- src_ring = kzalloc(sizeof(*src_ring) +
- (nentries *
- sizeof(*src_ring->per_transfer_context)),
- GFP_KERNEL);
+ src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
+ nentries), GFP_KERNEL);
if (src_ring == NULL)
return ERR_PTR(-ENOMEM);
@@ -1476,10 +1484,8 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
nentries = roundup_pow_of_two(nentries);
- src_ring = kzalloc(sizeof(*src_ring) +
- (nentries *
- sizeof(*src_ring->per_transfer_context)),
- GFP_KERNEL);
+ src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
+ nentries), GFP_KERNEL);
if (!src_ring)
return ERR_PTR(-ENOMEM);
@@ -1534,10 +1540,8 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
nentries = roundup_pow_of_two(attr->dest_nentries);
- dest_ring = kzalloc(sizeof(*dest_ring) +
- (nentries *
- sizeof(*dest_ring->per_transfer_context)),
- GFP_KERNEL);
+ dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
+ nentries), GFP_KERNEL);
if (dest_ring == NULL)
return ERR_PTR(-ENOMEM);
@@ -1580,10 +1584,8 @@ ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
nentries = roundup_pow_of_two(attr->dest_nentries);
- dest_ring = kzalloc(sizeof(*dest_ring) +
- (nentries *
- sizeof(*dest_ring->per_transfer_context)),
- GFP_KERNEL);
+ dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
+ nentries), GFP_KERNEL);
if (!dest_ring)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index b8fb5382dede..ead9987c3259 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -275,16 +275,19 @@ void ath10k_ce_free_rri(struct ath10k *ar);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
-#define CE_ATTR_NO_SNOOP 1
+#define CE_ATTR_NO_SNOOP BIT(0)
/* Byte swap data words */
-#define CE_ATTR_BYTE_SWAP_DATA 2
+#define CE_ATTR_BYTE_SWAP_DATA BIT(1)
/* Swizzle descriptors? */
-#define CE_ATTR_SWIZZLE_DESCRIPTORS 4
+#define CE_ATTR_SWIZZLE_DESCRIPTORS BIT(2)
/* no interrupt on copy completion */
-#define CE_ATTR_DIS_INTR 8
+#define CE_ATTR_DIS_INTR BIT(3)
+
+/* no interrupt, only polling */
+#define CE_ATTR_POLL BIT(4)
/* Attributes of an instance of a Copy Engine */
struct ce_attr {
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index c40cd129afe7..da607febfd82 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/of.h>
+#include <linux/property.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <asm/byteorder.h>
@@ -63,6 +64,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
.id = QCA988X_HW_2_0_VERSION,
.dev_id = QCA988X_2_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
@@ -84,13 +86,14 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
},
{
.id = QCA988X_HW_2_0_VERSION,
@@ -116,7 +119,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
@@ -124,10 +126,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
},
{
.id = QCA9887_HW_1_0_VERSION,
.dev_id = QCA9887_1_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
.name = "qca9887 hw1.0",
.patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
@@ -149,7 +154,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
@@ -157,10 +161,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
.dev_id = QCA6164_2_1_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
.name = "qca6164 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
@@ -181,7 +188,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
@@ -189,10 +195,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
.dev_id = QCA6174_2_1_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
.name = "qca6174 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
@@ -213,7 +222,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
@@ -221,10 +229,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
},
{
.id = QCA6174_HW_3_0_VERSION,
.dev_id = QCA6174_2_1_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
.name = "qca6174 hw3.0",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
@@ -245,7 +256,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
@@ -253,10 +263,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
.dev_id = QCA6174_2_1_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
.name = "qca6174 hw3.2",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
@@ -280,7 +293,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
@@ -288,10 +300,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = true,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
.dev_id = QCA99X0_2_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
.name = "qca99x0 hw2.0",
.patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
@@ -318,7 +333,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 11,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
@@ -326,10 +340,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
.dev_id = QCA9984_1_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
.name = "qca9984/qca9994 hw1.0",
.patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
@@ -346,8 +363,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.fw = {
.dir = QCA9984_HW_1_0_FW_DIR,
.board = QCA9984_HW_1_0_BOARD_DATA_FILE,
+ .eboard = QCA9984_HW_1_0_EBOARD_DATA_FILE,
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ .ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
.hw_ops = &qca99x0_ops,
@@ -361,7 +380,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 1560,
.vht160_mcs_tx_highest = 1560,
.n_cipher_suites = 11,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
@@ -369,10 +387,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
.dev_id = QCA9888_2_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
.name = "qca9888 hw2.0",
.patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
@@ -403,7 +424,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 780,
.vht160_mcs_tx_highest = 780,
.n_cipher_suites = 11,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
@@ -411,10 +431,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
.dev_id = QCA9377_1_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
.name = "qca9377 hw1.0",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
@@ -435,7 +458,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
@@ -443,10 +465,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
.dev_id = QCA9377_1_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
.name = "qca9377 hw1.1",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
@@ -469,7 +494,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 8,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
@@ -477,10 +501,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = true,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
.dev_id = 0,
+ .bus = ATH10K_BUS_AHB,
.name = "qca4019 hw1.0",
.patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
@@ -508,7 +535,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
.n_cipher_suites = 11,
- .num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
@@ -516,10 +542,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
},
{
.id = WCN3990_HW_1_0_DEV_VERSION,
.dev_id = 0,
+ .bus = ATH10K_BUS_PCI,
.name = "wcn3990 hw1.0",
.continuous_frag_desc = true,
.tx_chain_mask = 0x7,
@@ -539,6 +568,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.per_ce_irq = true,
.shadow_reg_support = true,
.rri_on_ddr = true,
+ .hw_filter_reset_required = false,
+ .fw_diag_ce_download = false,
},
};
@@ -762,153 +793,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
return 0;
}
-static int ath10k_download_board_data(struct ath10k *ar, const void *data,
- size_t data_len)
-{
- u32 board_data_size = ar->hw_params.fw.board_size;
- u32 address;
- int ret;
-
- ret = ath10k_push_board_ext_data(ar, data, data_len);
- if (ret) {
- ath10k_err(ar, "could not push board ext data (%d)\n", ret);
- goto exit;
- }
-
- ret = ath10k_bmi_read32(ar, hi_board_data, &address);
- if (ret) {
- ath10k_err(ar, "could not read board data addr (%d)\n", ret);
- goto exit;
- }
-
- ret = ath10k_bmi_write_memory(ar, address, data,
- min_t(u32, board_data_size,
- data_len));
- if (ret) {
- ath10k_err(ar, "could not write board data (%d)\n", ret);
- goto exit;
- }
-
- ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
- if (ret) {
- ath10k_err(ar, "could not write board data bit (%d)\n", ret);
- goto exit;
- }
-
-exit:
- return ret;
-}
-
-static int ath10k_download_cal_file(struct ath10k *ar,
- const struct firmware *file)
-{
- int ret;
-
- if (!file)
- return -ENOENT;
-
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- ret = ath10k_download_board_data(ar, file->data, file->size);
- if (ret) {
- ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
- return ret;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
-
- return 0;
-}
-
-static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
-{
- struct device_node *node;
- int data_len;
- void *data;
- int ret;
-
- node = ar->dev->of_node;
- if (!node)
- /* Device Tree is optional, don't print any warnings if
- * there's no node for ath10k.
- */
- return -ENOENT;
-
- if (!of_get_property(node, dt_name, &data_len)) {
- /* The calibration data node is optional */
- return -ENOENT;
- }
-
- if (data_len != ar->hw_params.cal_data_len) {
- ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
- data_len);
- ret = -EMSGSIZE;
- goto out;
- }
-
- data = kmalloc(data_len, GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = of_property_read_u8_array(node, dt_name, data, data_len);
- if (ret) {
- ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
- ret);
- goto out_free;
- }
-
- ret = ath10k_download_board_data(ar, data, data_len);
- if (ret) {
- ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n",
- ret);
- goto out_free;
- }
-
- ret = 0;
-
-out_free:
- kfree(data);
-
-out:
- return ret;
-}
-
-static int ath10k_download_cal_eeprom(struct ath10k *ar)
-{
- size_t data_len;
- void *data = NULL;
- int ret;
-
- ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len);
- if (ret) {
- if (ret != -EOPNOTSUPP)
- ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n",
- ret);
- goto out_free;
- }
-
- ret = ath10k_download_board_data(ar, data, data_len);
- if (ret) {
- ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n",
- ret);
- goto out_free;
- }
-
- ret = 0;
-
-out_free:
- kfree(data);
-
- return ret;
-}
-
static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
{
u32 result, address;
u8 board_id, chip_id;
+ bool ext_bid_support;
int ret, bmi_board_id_param;
address = ar->hw_params.patch_load_addr;
@@ -948,10 +837,13 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
board_id = MS(result, ATH10K_BMI_BOARD_ID_FROM_OTP);
chip_id = MS(result, ATH10K_BMI_CHIP_ID_FROM_OTP);
+ ext_bid_support = (result & ATH10K_BMI_EXT_BOARD_ID_SUPPORT);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot get otp board id result 0x%08x board_id %d chip_id %d\n",
- result, board_id, chip_id);
+ "boot get otp board id result 0x%08x board_id %d chip_id %d ext_bid_support %d\n",
+ result, board_id, chip_id, ext_bid_support);
+
+ ar->id.ext_bid_supported = ext_bid_support;
if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 ||
(board_id == 0)) {
@@ -1055,64 +947,6 @@ static int ath10k_core_check_dt(struct ath10k *ar)
return 0;
}
-static int ath10k_download_and_run_otp(struct ath10k *ar)
-{
- u32 result, address = ar->hw_params.patch_load_addr;
- u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
- int ret;
-
- ret = ath10k_download_board_data(ar,
- ar->running_fw->board_data,
- ar->running_fw->board_len);
- if (ret) {
- ath10k_err(ar, "failed to download board data: %d\n", ret);
- return ret;
- }
-
- /* OTP is optional */
-
- if (!ar->running_fw->fw_file.otp_data ||
- !ar->running_fw->fw_file.otp_len) {
- ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
- ar->running_fw->fw_file.otp_data,
- ar->running_fw->fw_file.otp_len);
- return 0;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
- address, ar->running_fw->fw_file.otp_len);
-
- ret = ath10k_bmi_fast_download(ar, address,
- ar->running_fw->fw_file.otp_data,
- ar->running_fw->fw_file.otp_len);
- if (ret) {
- ath10k_err(ar, "could not write otp (%d)\n", ret);
- return ret;
- }
-
- /* As of now pre-cal is valid for 10_4 variants */
- if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
- ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
- bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
-
- ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
- if (ret) {
- ath10k_err(ar, "could not execute otp (%d)\n", ret);
- return ret;
- }
-
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
-
- if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
- ar->running_fw->fw_file.fw_features)) &&
- result != 0) {
- ath10k_err(ar, "otp calibration failed: %d", result);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int ath10k_download_fw(struct ath10k *ar)
{
u32 address, data_len;
@@ -1135,25 +969,42 @@ static int ath10k_download_fw(struct ath10k *ar)
"boot uploading firmware image %pK len %d\n",
data, data_len);
- ret = ath10k_bmi_fast_download(ar, address, data, data_len);
- if (ret) {
- ath10k_err(ar, "failed to download firmware: %d\n",
- ret);
- return ret;
+ /* Check if device supports to download firmware via
+ * diag copy engine. Downloading firmware via diag CE
+ * greatly reduces the time to download firmware.
+ */
+ if (ar->hw_params.fw_diag_ce_download) {
+ ret = ath10k_hw_diag_fast_download(ar, address,
+ data, data_len);
+ if (ret == 0)
+ /* firmware upload via diag ce was successful */
+ return 0;
+
+ ath10k_warn(ar,
+ "failed to upload firmware via diag ce, trying BMI: %d",
+ ret);
}
- return ret;
+ return ath10k_bmi_fast_download(ar, address,
+ data, data_len);
}
-static void ath10k_core_free_board_files(struct ath10k *ar)
+void ath10k_core_free_board_files(struct ath10k *ar)
{
if (!IS_ERR(ar->normal_mode_fw.board))
release_firmware(ar->normal_mode_fw.board);
+ if (!IS_ERR(ar->normal_mode_fw.ext_board))
+ release_firmware(ar->normal_mode_fw.ext_board);
+
ar->normal_mode_fw.board = NULL;
ar->normal_mode_fw.board_data = NULL;
ar->normal_mode_fw.board_len = 0;
+ ar->normal_mode_fw.ext_board = NULL;
+ ar->normal_mode_fw.ext_board_data = NULL;
+ ar->normal_mode_fw.ext_board_len = 0;
}
+EXPORT_SYMBOL(ath10k_core_free_board_files);
static void ath10k_core_free_firmware_files(struct ath10k *ar)
{
@@ -1206,28 +1057,47 @@ success:
return 0;
}
-static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar)
+static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
{
- if (!ar->hw_params.fw.board) {
- ath10k_err(ar, "failed to find board file fw entry\n");
- return -EINVAL;
- }
+ const struct firmware *fw;
- ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
- if (IS_ERR(ar->normal_mode_fw.board))
- return PTR_ERR(ar->normal_mode_fw.board);
+ if (bd_ie_type == ATH10K_BD_IE_BOARD) {
+ if (!ar->hw_params.fw.board) {
+ ath10k_err(ar, "failed to find board file fw entry\n");
+ return -EINVAL;
+ }
+
+ ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->normal_mode_fw.board))
+ return PTR_ERR(ar->normal_mode_fw.board);
+
+ ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
+ ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
+ } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
+ if (!ar->hw_params.fw.eboard) {
+ ath10k_err(ar, "failed to find eboard file fw entry\n");
+ return -EINVAL;
+ }
- ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
- ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
+ fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+ ar->hw_params.fw.eboard);
+ ar->normal_mode_fw.ext_board = fw;
+ if (IS_ERR(ar->normal_mode_fw.ext_board))
+ return PTR_ERR(ar->normal_mode_fw.ext_board);
+
+ ar->normal_mode_fw.ext_board_data = ar->normal_mode_fw.ext_board->data;
+ ar->normal_mode_fw.ext_board_len = ar->normal_mode_fw.ext_board->size;
+ }
return 0;
}
static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
const void *buf, size_t buf_len,
- const char *boardname)
+ const char *boardname,
+ int bd_ie_type)
{
const struct ath10k_fw_ie *hdr;
bool name_match_found;
@@ -1276,12 +1146,21 @@ static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
/* no match found */
break;
- ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot found board data for '%s'",
- boardname);
+ if (bd_ie_type == ATH10K_BD_IE_BOARD) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot found board data for '%s'",
+ boardname);
- ar->normal_mode_fw.board_data = board_ie_data;
- ar->normal_mode_fw.board_len = board_ie_len;
+ ar->normal_mode_fw.board_data = board_ie_data;
+ ar->normal_mode_fw.board_len = board_ie_len;
+ } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot found eboard data for '%s'",
+ boardname);
+
+ ar->normal_mode_fw.ext_board_data = board_ie_data;
+ ar->normal_mode_fw.ext_board_len = board_ie_len;
+ }
ret = 0;
goto out;
@@ -1331,7 +1210,18 @@ static int ath10k_core_search_bd(struct ath10k *ar,
switch (ie_id) {
case ATH10K_BD_IE_BOARD:
ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
- boardname);
+ boardname,
+ ATH10K_BD_IE_BOARD);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ break;
+
+ /* either found or error, so stop searching */
+ goto out;
+ case ATH10K_BD_IE_BOARD_EXT:
+ ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
+ boardname,
+ ATH10K_BD_IE_BOARD_EXT);
if (ret == -ENOENT)
/* no match found, continue */
break;
@@ -1361,9 +1251,11 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
const u8 *data;
int ret;
- ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- filename);
+ /* Skip if already fetched during board data download */
+ if (!ar->normal_mode_fw.board)
+ ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ filename);
if (IS_ERR(ar->normal_mode_fw.board))
return PTR_ERR(ar->normal_mode_fw.board);
@@ -1440,6 +1332,14 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
goto out;
}
+ if (ar->id.qmi_ids_valid) {
+ scnprintf(name, name_len,
+ "bus=%s,qmi-board-id=%x",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.qmi_board_id);
+ goto out;
+ }
+
scnprintf(name, name_len,
"bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
ath10k_bus_str(ar->hif.bus),
@@ -1451,23 +1351,49 @@ out:
return 0;
}
-static int ath10k_core_fetch_board_file(struct ath10k *ar)
+static int ath10k_core_create_eboard_name(struct ath10k *ar, char *name,
+ size_t name_len)
+{
+ if (ar->id.bmi_ids_valid) {
+ scnprintf(name, name_len,
+ "bus=%s,bmi-chip-id=%d,bmi-eboard-id=%d",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.bmi_chip_id,
+ ar->id.bmi_eboard_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using eboard name '%s'\n", name);
+ return 0;
+ }
+ /* Fallback if returned board id is zero */
+ return -1;
+}
+
+int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type)
{
char boardname[100], fallback_boardname[100];
int ret;
- ret = ath10k_core_create_board_name(ar, boardname,
- sizeof(boardname), true);
- if (ret) {
- ath10k_err(ar, "failed to create board name: %d", ret);
- return ret;
- }
+ if (bd_ie_type == ATH10K_BD_IE_BOARD) {
+ ret = ath10k_core_create_board_name(ar, boardname,
+ sizeof(boardname), true);
+ if (ret) {
+ ath10k_err(ar, "failed to create board name: %d", ret);
+ return ret;
+ }
- ret = ath10k_core_create_board_name(ar, fallback_boardname,
- sizeof(boardname), false);
- if (ret) {
- ath10k_err(ar, "failed to create fallback board name: %d", ret);
- return ret;
+ ret = ath10k_core_create_board_name(ar, fallback_boardname,
+ sizeof(boardname), false);
+ if (ret) {
+ ath10k_err(ar, "failed to create fallback board name: %d", ret);
+ return ret;
+ }
+ } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
+ ret = ath10k_core_create_eboard_name(ar, boardname,
+ sizeof(boardname));
+ if (ret) {
+ ath10k_err(ar, "fallback to eboard.bin since board id 0");
+ goto fallback;
+ }
}
ar->bd_api = 2;
@@ -1477,8 +1403,9 @@ static int ath10k_core_fetch_board_file(struct ath10k *ar)
if (!ret)
goto success;
+fallback:
ar->bd_api = 1;
- ret = ath10k_core_fetch_board_data_api_1(ar);
+ ret = ath10k_core_fetch_board_data_api_1(ar, bd_ie_type);
if (ret) {
ath10k_err(ar, "failed to fetch board-2.bin or board.bin from %s\n",
ar->hw_params.fw.dir);
@@ -1489,6 +1416,292 @@ success:
ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api);
return 0;
}
+EXPORT_SYMBOL(ath10k_core_fetch_board_file);
+
+static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar)
+{
+ u32 result, address;
+ u8 ext_board_id;
+ int ret;
+
+ address = ar->hw_params.patch_load_addr;
+
+ if (!ar->normal_mode_fw.fw_file.otp_data ||
+ !ar->normal_mode_fw.fw_file.otp_len) {
+ ath10k_warn(ar,
+ "failed to retrieve extended board id due to otp binary missing\n");
+ return -ENODATA;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot upload otp to 0x%x len %zd for ext board id\n",
+ address, ar->normal_mode_fw.fw_file.otp_len);
+
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->normal_mode_fw.fw_file.otp_data,
+ ar->normal_mode_fw.fw_file.otp_len);
+ if (ret) {
+ ath10k_err(ar, "could not write otp for ext board id check: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EXT_BOARD_ID, &result);
+ if (ret) {
+ ath10k_err(ar, "could not execute otp for ext board id check: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (!result) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "ext board id does not exist in otp, ignore it\n");
+ return -EOPNOTSUPP;
+ }
+
+ ext_board_id = result & ATH10K_BMI_EBOARD_ID_STATUS_MASK;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot get otp ext board id result 0x%08x ext_board_id %d\n",
+ result, ext_board_id);
+
+ ar->id.bmi_eboard_id = ext_board_id;
+
+ return 0;
+}
+
+static int ath10k_download_board_data(struct ath10k *ar, const void *data,
+ size_t data_len)
+{
+ u32 board_data_size = ar->hw_params.fw.board_size;
+ u32 eboard_data_size = ar->hw_params.fw.ext_board_size;
+ u32 board_address;
+ u32 ext_board_address;
+ int ret;
+
+ ret = ath10k_push_board_ext_data(ar, data, data_len);
+ if (ret) {
+ ath10k_err(ar, "could not push board ext data (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_read32(ar, hi_board_data, &board_address);
+ if (ret) {
+ ath10k_err(ar, "could not read board data addr (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_write_memory(ar, board_address, data,
+ min_t(u32, board_data_size,
+ data_len));
+ if (ret) {
+ ath10k_err(ar, "could not write board data (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
+ if (ret) {
+ ath10k_err(ar, "could not write board data bit (%d)\n", ret);
+ goto exit;
+ }
+
+ if (!ar->id.ext_bid_supported)
+ goto exit;
+
+ /* Extended board data download */
+ ret = ath10k_core_get_ext_board_id_from_otp(ar);
+ if (ret == -EOPNOTSUPP) {
+ /* Not fetching ext_board_data if ext board id is 0 */
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "otp returned ext board id 0\n");
+ return 0;
+ } else if (ret) {
+ ath10k_err(ar, "failed to get extended board id: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_core_fetch_board_file(ar, ATH10K_BD_IE_BOARD_EXT);
+ if (ret)
+ goto exit;
+
+ if (ar->normal_mode_fw.ext_board_data) {
+ ext_board_address = board_address + EXT_BOARD_ADDRESS_OFFSET;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot writing ext board data to addr 0x%x",
+ ext_board_address);
+ ret = ath10k_bmi_write_memory(ar, ext_board_address,
+ ar->normal_mode_fw.ext_board_data,
+ min_t(u32, eboard_data_size, data_len));
+ if (ret)
+ ath10k_err(ar, "failed to write ext board data: %d\n", ret);
+ }
+
+exit:
+ return ret;
+}
+
+static int ath10k_download_and_run_otp(struct ath10k *ar)
+{
+ u32 result, address = ar->hw_params.patch_load_addr;
+ u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
+ int ret;
+
+ ret = ath10k_download_board_data(ar,
+ ar->running_fw->board_data,
+ ar->running_fw->board_len);
+ if (ret) {
+ ath10k_err(ar, "failed to download board data: %d\n", ret);
+ return ret;
+ }
+
+ /* OTP is optional */
+
+ if (!ar->running_fw->fw_file.otp_data ||
+ !ar->running_fw->fw_file.otp_len) {
+ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
+ ar->running_fw->fw_file.otp_data,
+ ar->running_fw->fw_file.otp_len);
+ return 0;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
+ address, ar->running_fw->fw_file.otp_len);
+
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->running_fw->fw_file.otp_data,
+ ar->running_fw->fw_file.otp_len);
+ if (ret) {
+ ath10k_err(ar, "could not write otp (%d)\n", ret);
+ return ret;
+ }
+
+ /* As of now pre-cal is valid for 10_4 variants */
+ if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
+ bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
+
+ ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
+ if (ret) {
+ ath10k_err(ar, "could not execute otp (%d)\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
+
+ if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+ ar->running_fw->fw_file.fw_features)) &&
+ result != 0) {
+ ath10k_err(ar, "otp calibration failed: %d", result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ath10k_download_cal_file(struct ath10k *ar,
+ const struct firmware *file)
+{
+ int ret;
+
+ if (!file)
+ return -ENOENT;
+
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ret = ath10k_download_board_data(ar, file->data, file->size);
+ if (ret) {
+ ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
+
+ return 0;
+}
+
+static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
+{
+ struct device_node *node;
+ int data_len;
+ void *data;
+ int ret;
+
+ node = ar->dev->of_node;
+ if (!node)
+ /* Device Tree is optional, don't print any warnings if
+ * there's no node for ath10k.
+ */
+ return -ENOENT;
+
+ if (!of_get_property(node, dt_name, &data_len)) {
+ /* The calibration data node is optional */
+ return -ENOENT;
+ }
+
+ if (data_len != ar->hw_params.cal_data_len) {
+ ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
+ data_len);
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
+ data = kmalloc(data_len, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_property_read_u8_array(node, dt_name, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = ath10k_download_board_data(ar, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+
+out:
+ return ret;
+}
+
+static int ath10k_download_cal_eeprom(struct ath10k *ar)
+{
+ size_t data_len;
+ void *data = NULL;
+ int ret;
+
+ ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = ath10k_download_board_data(ar, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+
+ return ret;
+}
int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
struct ath10k_fw_file *fw_file)
@@ -1882,7 +2095,8 @@ static int ath10k_init_hw_params(struct ath10k *ar)
for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
hw_params = &ath10k_hw_params_list[i];
- if (hw_params->id == ar->target_version &&
+ if (hw_params->bus == ar->hif.bus &&
+ hw_params->id == ar->target_version &&
hw_params->dev_id == ar->dev_id)
break;
}
@@ -1983,6 +2197,7 @@ static void ath10k_core_set_coverage_class_work(struct work_struct *work)
static int ath10k_core_init_firmware_features(struct ath10k *ar)
{
struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+ int max_num_peers;
if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) &&
!test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
@@ -2062,7 +2277,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
switch (fw_file->wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
- ar->max_num_peers = TARGET_NUM_PEERS;
+ max_num_peers = TARGET_NUM_PEERS;
ar->max_num_stations = TARGET_NUM_STATIONS;
ar->max_num_vdevs = TARGET_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
@@ -2074,10 +2289,10 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
if (ath10k_peer_stats_enabled(ar)) {
- ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
+ max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS;
} else {
- ar->max_num_peers = TARGET_10X_NUM_PEERS;
+ max_num_peers = TARGET_10X_NUM_PEERS;
ar->max_num_stations = TARGET_10X_NUM_STATIONS;
}
ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
@@ -2086,7 +2301,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
- ar->max_num_peers = TARGET_TLV_NUM_PEERS;
+ max_num_peers = TARGET_TLV_NUM_PEERS;
ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
@@ -2098,7 +2313,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC;
break;
case ATH10K_FW_WMI_OP_VERSION_10_4:
- ar->max_num_peers = TARGET_10_4_NUM_PEERS;
+ max_num_peers = TARGET_10_4_NUM_PEERS;
ar->max_num_stations = TARGET_10_4_NUM_STATIONS;
ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
@@ -2117,10 +2332,16 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
+ default:
WARN_ON(1);
return -EINVAL;
}
+ if (ar->hw_params.num_peers)
+ ar->max_num_peers = ar->hw_params.num_peers;
+ else
+ ar->max_num_peers = max_num_peers;
+
/* Backwards compatibility for firmwares without
* ATH10K_FW_IE_HTT_OP_VERSION.
*/
@@ -2370,6 +2591,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ar->wmi.svc_map))
val |= WMI_10_4_TDLS_UAPSD_BUFFER_STA;
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI,
+ ar->wmi.svc_map))
+ val |= WMI_10_4_TX_DATA_ACK_RSSI;
+
status = ath10k_mac_ext_resource_config(ar, val);
if (status) {
ath10k_err(ar,
@@ -2405,7 +2630,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
* possible to implicitly make it correct by creating a dummy vdev and
* then deleting it.
*/
- if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ if (ar->hw_params.hw_filter_reset_required &&
+ mode == ATH10K_FIRMWARE_MODE_NORMAL) {
status = ath10k_core_reset_rx_filter(ar);
if (status) {
ath10k_err(ar,
@@ -2593,7 +2819,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
if (ret)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
- ret = ath10k_core_fetch_board_file(ar);
+ ret = ath10k_core_fetch_board_file(ar, ATH10K_BD_IE_BOARD);
if (ret) {
ath10k_err(ar, "failed to fetch board file: %d\n", ret);
goto err_free_firmware_files;
@@ -2602,6 +2828,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ath10k_debug_print_board_info(ar);
}
+ device_get_mac_address(ar->dev, ar->mac_addr, sizeof(ar->mac_addr));
+
ret = ath10k_core_init_firmware_features(ar);
if (ret) {
ath10k_err(ar, "fatal problem with firmware features: %d\n",
@@ -2714,9 +2942,11 @@ err:
return;
}
-int ath10k_core_register(struct ath10k *ar, u32 chip_id)
+int ath10k_core_register(struct ath10k *ar,
+ const struct ath10k_bus_params *bus_params)
{
- ar->chip_id = chip_id;
+ ar->chip_id = bus_params->chip_id;
+ ar->dev_type = bus_params->dev_type;
queue_work(ar->workqueue, &ar->register_work);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 9feea02e7d37..042418097cf9 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -92,14 +92,6 @@
struct ath10k;
-enum ath10k_bus {
- ATH10K_BUS_PCI,
- ATH10K_BUS_AHB,
- ATH10K_BUS_SDIO,
- ATH10K_BUS_USB,
- ATH10K_BUS_SNOC,
-};
-
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
{
switch (bus) {
@@ -461,6 +453,36 @@ struct ath10k_sta_tid_stats {
unsigned long int rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX];
};
+enum ath10k_counter_type {
+ ATH10K_COUNTER_TYPE_BYTES,
+ ATH10K_COUNTER_TYPE_PKTS,
+ ATH10K_COUNTER_TYPE_MAX,
+};
+
+enum ath10k_stats_type {
+ ATH10K_STATS_TYPE_SUCC,
+ ATH10K_STATS_TYPE_FAIL,
+ ATH10K_STATS_TYPE_RETRY,
+ ATH10K_STATS_TYPE_AMPDU,
+ ATH10K_STATS_TYPE_MAX,
+};
+
+struct ath10k_htt_data_stats {
+ u64 legacy[ATH10K_COUNTER_TYPE_MAX][ATH10K_LEGACY_NUM];
+ u64 ht[ATH10K_COUNTER_TYPE_MAX][ATH10K_HT_MCS_NUM];
+ u64 vht[ATH10K_COUNTER_TYPE_MAX][ATH10K_VHT_MCS_NUM];
+ u64 bw[ATH10K_COUNTER_TYPE_MAX][ATH10K_BW_NUM];
+ u64 nss[ATH10K_COUNTER_TYPE_MAX][ATH10K_NSS_NUM];
+ u64 gi[ATH10K_COUNTER_TYPE_MAX][ATH10K_GI_NUM];
+};
+
+struct ath10k_htt_tx_stats {
+ struct ath10k_htt_data_stats stats[ATH10K_STATS_TYPE_MAX];
+ u64 tx_duration;
+ u64 ba_fails;
+ u64 ack_fails;
+};
+
struct ath10k_sta {
struct ath10k_vif *arvif;
@@ -474,6 +496,7 @@ struct ath10k_sta {
struct work_struct update_wk;
u64 rx_duration;
+ struct ath10k_htt_tx_stats *tx_stats;
#ifdef CONFIG_MAC80211_DEBUGFS
/* protected by conf_mutex */
@@ -482,6 +505,8 @@ struct ath10k_sta {
/* Protected with ar->data_lock */
struct ath10k_sta_tid_stats tid_stats[IEEE80211_NUM_TIDS + 1];
#endif
+ /* Protected with ar->data_lock */
+ u32 peer_ps_state;
};
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
@@ -607,6 +632,7 @@ struct ath10k_debug {
u32 reg_addr;
u32 nf_cal_period;
void *cal_data;
+ u32 enable_extd_tx_stats;
};
enum ath10k_state {
@@ -861,6 +887,9 @@ struct ath10k_fw_components {
const struct firmware *board;
const void *board_data;
size_t board_len;
+ const struct firmware *ext_board;
+ const void *ext_board_data;
+ size_t ext_board_len;
struct ath10k_fw_file fw_file;
};
@@ -880,6 +909,16 @@ struct ath10k_per_peer_tx_stats {
u32 reserved2;
};
+enum ath10k_dev_type {
+ ATH10K_DEV_TYPE_LL,
+ ATH10K_DEV_TYPE_HL,
+};
+
+struct ath10k_bus_params {
+ u32 chip_id;
+ enum ath10k_dev_type dev_type;
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@@ -890,6 +929,7 @@ struct ath10k {
enum ath10k_hw_rev hw_rev;
u16 dev_id;
u32 chip_id;
+ enum ath10k_dev_type dev_type;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
@@ -908,7 +948,10 @@ struct ath10k {
u32 low_5ghz_chan;
u32 high_5ghz_chan;
bool ani_enabled;
+ /* protected by conf_mutex */
+ u8 ps_state_enable;
+ bool nlo_enabled;
bool p2p;
struct {
@@ -946,8 +989,12 @@ struct ath10k {
u32 subsystem_device;
bool bmi_ids_valid;
+ bool qmi_ids_valid;
+ u32 qmi_board_id;
u8 bmi_board_id;
+ u8 bmi_eboard_id;
u8 bmi_chip_id;
+ bool ext_bid_supported;
char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH];
} id;
@@ -1003,6 +1050,7 @@ struct ath10k {
struct completion install_key_done;
+ int last_wmi_vdev_start_status;
struct completion vdev_setup_done;
struct workqueue_struct *workqueue;
@@ -1167,7 +1215,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
const struct ath10k_fw_components *fw_components);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
void ath10k_core_stop(struct ath10k *ar);
-int ath10k_core_register(struct ath10k *ar, u32 chip_id);
+int ath10k_core_register(struct ath10k *ar,
+ const struct ath10k_bus_params *bus_params);
void ath10k_core_unregister(struct ath10k *ar);
+int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type);
+void ath10k_core_free_board_files(struct ath10k *ar);
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 0baaad90b8d1..15964b374f68 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -2042,6 +2042,61 @@ static const struct file_operations fops_btcoex = {
.open = simple_open
};
+static ssize_t ath10k_write_enable_extd_tx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ar->debug.enable_extd_tx_stats = filter;
+ ret = count;
+ goto out;
+ }
+
+ if (filter == ar->debug.enable_extd_tx_stats) {
+ ret = count;
+ goto out;
+ }
+
+ ar->debug.enable_extd_tx_stats = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath10k_read_enable_extd_tx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->debug.enable_extd_tx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_enable_extd_tx_stats = {
+ .read = ath10k_read_enable_extd_tx_stats,
+ .write = ath10k_write_enable_extd_tx_stats,
+ .open = simple_open
+};
+
static ssize_t ath10k_write_peer_stats(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
@@ -2343,6 +2398,85 @@ static const struct file_operations fops_warm_hw_reset = {
.llseek = default_llseek,
};
+static void ath10k_peer_ps_state_disable(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k *ar = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath10k_write_ps_state_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ u32 param;
+ u8 ps_state_enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
+ return -EINVAL;
+
+ if (ps_state_enable > 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->ps_state_enable == ps_state_enable) {
+ ret = count;
+ goto exit;
+ }
+
+ param = ar->wmi.pdev_param->peer_sta_ps_statechg_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, param, ps_state_enable);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable ps_state_enable: %d\n",
+ ret);
+ goto exit;
+ }
+ ar->ps_state_enable = ps_state_enable;
+
+ if (!ar->ps_state_enable)
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath10k_peer_ps_state_disable,
+ ar);
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_read_ps_state_enable(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+ char buf[32];
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->ps_state_enable);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_state_enable = {
+ .read = ath10k_read_ps_state_enable,
+ .write = ath10k_write_ps_state_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
@@ -2454,10 +2588,15 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar,
&fops_btcoex);
- if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar,
&fops_peer_stats);
+ debugfs_create_file("enable_extd_tx_stats", 0644,
+ ar->debug.debugfs_phy, ar,
+ &fops_enable_extd_tx_stats);
+ }
+
debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar,
&fops_fw_checksums);
@@ -2474,6 +2613,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar,
&fops_warm_hw_reset);
+ debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_phy, ar,
+ &fops_ps_state_enable);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 0afca5c106b6..5cf16d690724 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -44,6 +44,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_USB = 0x00040000,
ATH10K_DBG_USB_BULK = 0x00080000,
ATH10K_DBG_SNOC = 0x00100000,
+ ATH10K_DBG_QMI = 0x00200000,
ATH10K_DBG_ANY = 0xffffffff,
};
@@ -128,6 +129,10 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
return ar->debug.fw_dbglog_level;
}
+static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
+{
+ return ar->debug.enable_extd_tx_stats;
+}
#else
static inline int ath10k_debug_start(struct ath10k *ar)
@@ -190,6 +195,11 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
return 0;
}
+static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
+{
+ return 0;
+}
+
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index a63c97e2c50c..b09cdc699c69 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -460,6 +460,33 @@ static const struct file_operations fops_peer_debug_trigger = {
.llseek = default_llseek,
};
+static ssize_t ath10k_dbg_sta_read_peer_ps_state(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ char buf[20];
+ int len = 0;
+
+ spin_lock_bh(&ar->data_lock);
+
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ arsta->peer_ps_state);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_ps_state = {
+ .open = simple_open,
+ .read = ath10k_dbg_sta_read_peer_ps_state,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static char *get_err_str(enum ath10k_pkt_rx_err i)
{
switch (i) {
@@ -626,9 +653,105 @@ static const struct file_operations fops_tid_stats_dump = {
.llseek = default_llseek,
};
+static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ struct ath10k_htt_data_stats *stats;
+ const char *str_name[ATH10K_STATS_TYPE_MAX] = {"succ", "fail",
+ "retry", "ampdu"};
+ const char *str[ATH10K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
+ int len = 0, i, j, k, retval = 0;
+ const int size = 2 * 4096;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ for (k = 0; k < ATH10K_STATS_TYPE_MAX; k++) {
+ for (j = 0; j < ATH10K_COUNTER_TYPE_MAX; j++) {
+ stats = &arsta->tx_stats->stats[k];
+ len += scnprintf(buf + len, size - len, "%s_%s\n",
+ str_name[k],
+ str[j]);
+ len += scnprintf(buf + len, size - len,
+ " VHT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH10K_VHT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ",
+ stats->vht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, " HT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH10K_HT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ", stats->ht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " BW %s (20,40,80,160 MHz)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->bw[j][0], stats->bw[j][1],
+ stats->bw[j][2], stats->bw[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->nss[j][0], stats->nss[j][1],
+ stats->nss[j][2], stats->nss[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " GI %s (LGI,SGI)\n",
+ str[j]);
+ len += scnprintf(buf + len, size - len, " %llu %llu\n",
+ stats->gi[j][0], stats->gi[j][1]);
+ len += scnprintf(buf + len, size - len,
+ " legacy rate %s (1,2 ... Mbps)\n ",
+ str[j]);
+ for (i = 0; i < ATH10K_LEGACY_NUM; i++)
+ len += scnprintf(buf + len, size - len, "%llu ",
+ stats->legacy[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ }
+ }
+
+ len += scnprintf(buf + len, size - len,
+ "\nTX duration\n %llu usecs\n",
+ arsta->tx_stats->tx_duration);
+ len += scnprintf(buf + len, size - len,
+ "BA fails\n %llu\n", arsta->tx_stats->ba_fails);
+ len += scnprintf(buf + len, size - len,
+ "ack fails\n %llu\n", arsta->tx_stats->ack_fails);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+ return retval;
+}
+
+static const struct file_operations fops_tx_stats = {
+ .read = ath10k_dbg_sta_dump_tx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
+ struct ath10k *ar = hw->priv;
+
debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
@@ -637,4 +760,11 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
&fops_peer_debug_trigger);
debugfs_create_file("dump_tid_stats", 0400, dir, sta,
&fops_tid_stats_dump);
+
+ if (ath10k_peer_stats_enabled(ar) &&
+ ath10k_debug_is_extd_tx_stats_enabled(ar))
+ debugfs_create_file("tx_stats", 0400, dir, sta,
+ &fops_tx_stats);
+ debugfs_create_file("peer_ps_state", 0400, dir, sta,
+ &fops_peer_ps_state);
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 331b8d558791..28daed5981a1 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -53,7 +53,8 @@ static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
- dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+ if (htc->ar->dev_type != ATH10K_DEV_TYPE_HL)
+ dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
}
@@ -137,11 +138,14 @@ int ath10k_htc_send(struct ath10k_htc *htc,
ath10k_htc_prepare_tx_skb(ep, skb);
skb_cb->eid = eid;
- skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
- ret = dma_mapping_error(dev, skb_cb->paddr);
- if (ret) {
- ret = -EIO;
- goto err_credits;
+ if (ar->dev_type != ATH10K_DEV_TYPE_HL) {
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_credits;
+ }
}
sg_item.transfer_id = ep->eid;
@@ -157,7 +161,8 @@ int ath10k_htc_send(struct ath10k_htc *htc,
return 0;
err_unmap:
- dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+ if (ar->dev_type != ATH10K_DEV_TYPE_HL)
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
@@ -803,8 +808,11 @@ setup:
ep->service_id,
&ep->ul_pipe_id,
&ep->dl_pipe_id);
- if (status)
+ if (status) {
+ ath10k_warn(ar, "unsupported HTC service id: %d\n",
+ ep->service_id);
return status;
+ }
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
@@ -838,6 +846,56 @@ struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
return skb;
}
+static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+}
+
+static int ath10k_htc_pktlog_connect(struct ath10k *ar)
+{
+ struct ath10k_htc_svc_conn_resp conn_resp;
+ struct ath10k_htc_svc_conn_req conn_req;
+ int status;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ conn_req.ep_ops.ep_tx_complete = NULL;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
+ conn_req.ep_ops.ep_tx_credits = NULL;
+
+ /* connect to control service */
+ conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
+ status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
+{
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+ int status;
+
+ status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
+ &ul_pipe_id,
+ &dl_pipe_id);
+ if (status) {
+ ath10k_warn(ar, "unsupported HTC service id: %d\n",
+ ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
+
+ return false;
+ }
+
+ return true;
+}
+
int ath10k_htc_start(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
@@ -871,6 +929,14 @@ int ath10k_htc_start(struct ath10k_htc *htc)
return status;
}
+ if (ath10k_htc_pktlog_svc_supported(ar)) {
+ status = ath10k_htc_pktlog_connect(ar);
+ if (status) {
+ ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
+ return status;
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 5d3ff80f3a1f..a76f7c9e2199 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -29,7 +29,6 @@
#include "htc.h"
#include "hw.h"
#include "rx_desc.h"
-#include "hw.h"
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
@@ -577,6 +576,8 @@ struct htt_mgmt_tx_completion {
#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
+#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0)
+
struct htt_rx_indication_hdr {
u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
__le16 peer_id;
@@ -719,6 +720,15 @@ struct htt_rx_indication {
struct htt_rx_indication_mpdu_range mpdu_ranges[0];
} __packed;
+/* High latency version of the RX indication */
+struct htt_rx_indication_hl {
+ struct htt_rx_indication_hdr hdr;
+ struct htt_rx_indication_ppdu ppdu;
+ struct htt_rx_indication_prefix prefix;
+ struct fw_rx_desc_hl fw_desc;
+ struct htt_rx_indication_mpdu_range mpdu_ranges[0];
+} __packed;
+
static inline struct htt_rx_indication_mpdu_range *
htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
{
@@ -731,6 +741,18 @@ static inline struct htt_rx_indication_mpdu_range *
return ptr;
}
+static inline struct htt_rx_indication_mpdu_range *
+ htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind)
+{
+ void *ptr = rx_ind;
+
+ ptr += sizeof(rx_ind->hdr)
+ + sizeof(rx_ind->ppdu)
+ + sizeof(rx_ind->prefix)
+ + sizeof(rx_ind->fw_desc);
+ return ptr;
+}
+
enum htt_rx_flush_mpdu_status {
HTT_RX_FLUSH_MPDU_DISCARD = 0,
HTT_RX_FLUSH_MPDU_REORDER = 1,
@@ -840,7 +862,7 @@ struct htt_data_tx_completion {
} __packed;
} __packed;
u8 num_msdus;
- u8 rsvd0;
+ u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */
__le16 msdus[0]; /* variable length based on %num_msdus */
} __packed;
@@ -1641,6 +1663,7 @@ struct htt_resp {
struct htt_mgmt_tx_completion mgmt_tx_completion;
struct htt_data_tx_completion data_tx_completion;
struct htt_rx_indication rx_ind;
+ struct htt_rx_indication_hl rx_ind_hl;
struct htt_rx_fragment_indication rx_frag_ind;
struct htt_rx_peer_map peer_map;
struct htt_rx_peer_unmap peer_unmap;
@@ -1994,6 +2017,31 @@ struct htt_rx_desc {
u8 msdu_payload[0];
};
+#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
+#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0
+#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000
+#define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12
+#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000
+#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13
+#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00008000
+#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 15
+#define HTT_RX_DESC_HL_INFO_FRAGMENT_MASK 0x00010000
+#define HTT_RX_DESC_HL_INFO_FRAGMENT_LSB 16
+#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000
+#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17
+
+struct htt_rx_desc_base_hl {
+ __le32 info; /* HTT_RX_DESC_HL_INFO_ */
+};
+
+struct htt_rx_chan_info {
+ __le16 primary_chan_center_freq_mhz;
+ __le16 contig_chan1_center_freq_mhz;
+ __le16 contig_chan2_center_freq_mhz;
+ u8 phy_mode;
+ u8 reserved;
+} __packed;
+
#define HTT_RX_DESC_ALIGN 8
#define HTT_MAC_ADDR_LEN 6
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 4d1cd90d6d27..ffec98f7be50 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -265,6 +265,9 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
struct ath10k_htt *htt = &ar->htt;
int ret;
+ if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+ return 0;
+
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
@@ -279,6 +282,9 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
+ if (htt->ar->dev_type == ATH10K_DEV_TYPE_HL)
+ return;
+
del_timer_sync(&htt->rx_ring.refill_retry_timer);
skb_queue_purge(&htt->rx_msdus_q);
@@ -570,6 +576,9 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
size_t size;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
+ if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+ return 0;
+
htt->rx_confused = false;
/* XXX: The fill level could be changed during runtime in response to
@@ -1176,11 +1185,11 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
*/
/* This probably shouldn't happen but warn just in case */
- if (unlikely(WARN_ON_ONCE(!is_first)))
+ if (WARN_ON_ONCE(!is_first))
return;
/* This probably shouldn't happen but warn just in case */
- if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
+ if (WARN_ON_ONCE(!(is_first && is_last)))
return;
skb_trim(msdu, msdu->len - FCS_LEN);
@@ -1846,8 +1855,116 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return 0;
}
-static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
- struct htt_rx_indication *rx)
+static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
+ struct htt_rx_indication_hl *rx,
+ struct sk_buff *skb)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_peer *peer;
+ struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ struct fw_rx_desc_hl *fw_desc;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_rx_status *rx_status;
+ u16 peer_id;
+ u8 rx_desc_len;
+ int num_mpdu_ranges;
+ size_t tot_hdr_len;
+ struct ieee80211_channel *ch;
+
+ peer_id = __le16_to_cpu(rx->hdr.peer_id);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ spin_unlock_bh(&ar->data_lock);
+ if (!peer)
+ ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
+
+ num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+ mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
+ fw_desc = &rx->fw_desc;
+ rx_desc_len = fw_desc->len;
+
+ /* I have not yet seen any case where num_mpdu_ranges > 1.
+ * qcacld does not seem handle that case either, so we introduce the
+ * same limitiation here as well.
+ */
+ if (num_mpdu_ranges > 1)
+ ath10k_warn(ar,
+ "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
+ num_mpdu_ranges);
+
+ if (mpdu_ranges->mpdu_range_status !=
+ HTT_RX_IND_MPDU_STATUS_OK) {
+ ath10k_warn(ar, "MPDU range status: %d\n",
+ mpdu_ranges->mpdu_range_status);
+ goto err;
+ }
+
+ /* Strip off all headers before the MAC header before delivery to
+ * mac80211
+ */
+ tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
+ sizeof(rx->ppdu) + sizeof(rx->prefix) +
+ sizeof(rx->fw_desc) +
+ sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
+ skb_pull(skb, tot_hdr_len);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ rx_status = IEEE80211_SKB_RXCB(skb);
+ rx_status->chains |= BIT(0);
+ rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ rx->ppdu.combined_rssi;
+ rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
+
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->scan_channel;
+ if (!ch)
+ ch = ar->rx_channel;
+ if (!ch)
+ ch = ath10k_htt_rx_h_any_channel(ar);
+ if (!ch)
+ ch = ar->tgt_oper_chan;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ch) {
+ rx_status->band = ch->band;
+ rx_status->freq = ch->center_freq;
+ }
+ if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
+ rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rx_status->flag |= RX_FLAG_AMSDU_MORE;
+
+ /* Not entirely sure about this, but all frames from the chipset has
+ * the protected flag set even though they have already been decrypted.
+ * Unmasking this flag is necessary in order for mac80211 not to drop
+ * the frame.
+ * TODO: Verify this is always the case or find out a way to check
+ * if there has been hw decryption.
+ */
+ if (ieee80211_has_protected(hdr->frame_control)) {
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ rx_status->flag |= RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ }
+
+ ieee80211_rx_ni(ar->hw, skb);
+
+ /* We have delivered the skb to the upper layers (mac80211) so we
+ * must not free it.
+ */
+ return false;
+err:
+ /* Tell the caller that it must free the skb since we have not
+ * consumed it
+ */
+ return true;
+}
+
+static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
+ struct htt_rx_indication *rx)
{
struct ath10k *ar = htt->ar;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
@@ -1884,7 +2001,9 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
struct htt_resp *resp = (struct htt_resp *)skb->data;
struct htt_tx_done tx_done = {};
int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
- __le16 msdu_id;
+ __le16 msdu_id, *msdus;
+ bool rssi_enabled = false;
+ u8 msdu_count = 0;
int i;
switch (status) {
@@ -1908,10 +2027,30 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
resp->data_tx_completion.num_msdus);
- for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
- msdu_id = resp->data_tx_completion.msdus[i];
+ msdu_count = resp->data_tx_completion.num_msdus;
+
+ if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI)
+ rssi_enabled = true;
+
+ for (i = 0; i < msdu_count; i++) {
+ msdus = resp->data_tx_completion.msdus;
+ msdu_id = msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
+ if (rssi_enabled) {
+ /* Total no of MSDUs should be even,
+ * if odd MSDUs are sent firmware fills
+ * last msdu id with 0xffff
+ */
+ if (msdu_count & 0x01) {
+ msdu_id = msdus[msdu_count + i + 1];
+ tx_done.ack_rssi = __le16_to_cpu(msdu_id);
+ } else {
+ msdu_id = msdus[msdu_count + i];
+ tx_done.ack_rssi = __le16_to_cpu(msdu_id);
+ }
+ }
+
/* kfifo_put: In practice firmware shouldn't fire off per-CE
* interrupt and main interrupt (MSI/-X range case) for the same
* HTC service so it should be safe to use kfifo_put w/o lock.
@@ -2488,7 +2627,7 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb_any(skb);
}
-static inline bool is_valid_legacy_rate(u8 rate)
+static inline int ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
{
static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
18, 24, 36, 48, 54};
@@ -2496,10 +2635,114 @@ static inline bool is_valid_legacy_rate(u8 rate)
for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
if (rate == legacy_rates[i])
- return true;
+ return i;
}
- return false;
+ ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate);
+ return -EINVAL;
+}
+
+static void
+ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
+ struct ath10k_sta *arsta,
+ struct ath10k_per_peer_tx_stats *pstats,
+ u8 legacy_rate_idx)
+{
+ struct rate_info *txrate = &arsta->txrate;
+ struct ath10k_htt_tx_stats *tx_stats;
+ int ht_idx, gi, mcs, bw, nss;
+
+ if (!arsta->tx_stats)
+ return;
+
+ tx_stats = arsta->tx_stats;
+ gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI);
+ ht_idx = txrate->mcs + txrate->nss * 8;
+ mcs = txrate->mcs;
+ bw = txrate->bw;
+ nss = txrate->nss;
+
+#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
+
+ if (txrate->flags == RATE_INFO_FLAGS_VHT_MCS) {
+ STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
+ STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
+ STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
+ } else if (txrate->flags == RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
+ STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
+ STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
+ } else {
+ mcs = legacy_rate_idx;
+
+ STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
+ STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
+ STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
+ }
+
+ if (ATH10K_HW_AMPDU(pstats->flags)) {
+ tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
+
+ if (txrate->flags == RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
+ pstats->succ_bytes + pstats->retry_bytes;
+ STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
+ pstats->succ_pkts + pstats->retry_pkts;
+ } else {
+ STATS_OP_FMT(AMPDU).vht[0][mcs] +=
+ pstats->succ_bytes + pstats->retry_bytes;
+ STATS_OP_FMT(AMPDU).vht[1][mcs] +=
+ pstats->succ_pkts + pstats->retry_pkts;
+ }
+ STATS_OP_FMT(AMPDU).bw[0][bw] +=
+ pstats->succ_bytes + pstats->retry_bytes;
+ STATS_OP_FMT(AMPDU).nss[0][nss] +=
+ pstats->succ_bytes + pstats->retry_bytes;
+ STATS_OP_FMT(AMPDU).gi[0][gi] +=
+ pstats->succ_bytes + pstats->retry_bytes;
+ STATS_OP_FMT(AMPDU).bw[1][bw] +=
+ pstats->succ_pkts + pstats->retry_pkts;
+ STATS_OP_FMT(AMPDU).nss[1][nss] +=
+ pstats->succ_pkts + pstats->retry_pkts;
+ STATS_OP_FMT(AMPDU).gi[1][gi] +=
+ pstats->succ_pkts + pstats->retry_pkts;
+ } else {
+ tx_stats->ack_fails +=
+ ATH10K_HW_BA_FAIL(pstats->flags);
+ }
+
+ STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
+
+ STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
+ STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts;
+ STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
+
+ STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
+
+ STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
+ STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts;
+ STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
+
+ STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
+
+ STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
+ STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts;
+ STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
}
static void
@@ -2509,6 +2752,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
{
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
u8 rate = 0, sgi;
+ s8 rate_idx = 0;
struct rate_info txrate;
lockdep_assert_held(&ar->data_lock);
@@ -2536,17 +2780,12 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
-
- if (!is_valid_legacy_rate(rate)) {
- ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
- rate);
- return;
- }
-
/* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
- rate *= 10;
- if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
- rate = rate - 5;
+ if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
+ rate = 5;
+ rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
+ if (rate_idx < 0)
+ return;
arsta->txrate.legacy = rate;
} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
@@ -2561,6 +2800,10 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
arsta->txrate.nss = txrate.nss;
arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
+
+ if (ath10k_debug_is_extd_tx_stats_enabled(ar))
+ ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
+ rate_idx);
}
static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
@@ -2702,7 +2945,12 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
- ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
+ if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+ return ath10k_htt_rx_proc_rx_ind_hl(htt,
+ &resp->rx_ind_hl,
+ skb);
+ else
+ ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
break;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
@@ -2986,11 +3234,16 @@ static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
};
+static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
+};
+
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
- if (ar->hw_params.target_64bit)
+ if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+ htt->rx_ops = &htt_rx_ops_hl;
+ else if (ar->hw_params.target_64bit)
htt->rx_ops = &htt_rx_ops_64;
else
htt->rx_ops = &htt_rx_ops_32;
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 7cff0d52338f..ad05ab714c9b 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -495,6 +495,9 @@ int ath10k_htt_tx_start(struct ath10k_htt *htt)
if (htt->tx_mem_allocated)
return 0;
+ if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+ return 0;
+
ret = ath10k_htt_tx_alloc_buf(htt);
if (ret)
goto free_idr_pending_tx;
@@ -934,6 +937,57 @@ static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
return 0;
}
+static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_rx_ring_setup_ring32 *ring;
+ const int num_rx_ring = 1;
+ u16 flags;
+ int len;
+ int ret;
+
+ /*
+ * the HW expects the buffer to be an integral number of 4-byte
+ * "words"
+ */
+ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+ BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
+ + (sizeof(*ring) * num_rx_ring);
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_cmd *)skb->data;
+ ring = &cmd->rx_setup_32.rings[0];
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+ cmd->rx_setup_32.hdr.num_rings = 1;
+
+ flags = 0;
+ flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+ flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+
+ memset(ring, 0, sizeof(*ring));
+ ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN);
+ ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+ ring->flags = __cpu_to_le16(flags);
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu)
@@ -1123,7 +1177,8 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
return 0;
err_unmap_msdu:
- dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ if (ar->dev_type != ATH10K_DEV_TYPE_HL)
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
@@ -1134,6 +1189,94 @@ err:
return res;
}
+#define HTT_TX_HL_NEEDED_HEADROOM \
+ (unsigned int)(sizeof(struct htt_cmd_hdr) + \
+ sizeof(struct htt_data_tx_desc) + \
+ sizeof(struct ath10k_htc_hdr))
+
+static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
+{
+ struct ath10k *ar = htt->ar;
+ int res, data_len;
+ struct htt_cmd_hdr *cmd_hdr;
+ struct htt_data_tx_desc *tx_desc;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ struct sk_buff *tmp_skb;
+ bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+ u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+ u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
+ u8 flags0 = 0;
+ u16 flags1 = 0;
+
+ data_len = msdu->len;
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ /* fall through */
+ case ATH10K_HW_TXRX_ETHERNET:
+ flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ break;
+ case ATH10K_HW_TXRX_MGMT:
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ break;
+ }
+
+ if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+ flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+ flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+ if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ }
+
+ /* Prepend the HTT header and TX desc struct to the data message
+ * and realloc the skb if it does not have enough headroom.
+ */
+ if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) {
+ tmp_skb = msdu;
+
+ ath10k_dbg(htt->ar, ATH10K_DBG_HTT,
+ "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
+ skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM);
+ msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM);
+ kfree_skb(tmp_skb);
+ if (!msdu) {
+ ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n");
+ res = -ENOMEM;
+ goto out;
+ }
+ }
+
+ skb_push(msdu, sizeof(*cmd_hdr));
+ skb_push(msdu, sizeof(*tx_desc));
+ cmd_hdr = (struct htt_cmd_hdr *)msdu->data;
+ tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr));
+
+ cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ tx_desc->flags0 = flags0;
+ tx_desc->flags1 = __cpu_to_le16(flags1);
+ tx_desc->len = __cpu_to_le16(data_len);
+ tx_desc->id = 0;
+ tx_desc->frags_paddr = 0; /* always zero */
+ /* Initialize peer_id to INVALID_PEER because this is NOT
+ * Reinjection path
+ */
+ tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
+
+ res = ath10k_htc_send(&htt->ar->htc, htt->eid, msdu);
+
+out:
+ return res;
+}
+
static int ath10k_htt_tx_32(struct ath10k_htt *htt,
enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu)
@@ -1561,11 +1704,19 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
};
+static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
+ .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl,
+ .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
+ .htt_tx = ath10k_htt_tx_hl,
+};
+
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
- if (ar->hw_params.target_64bit)
+ if (ar->dev_type == ATH10K_DEV_TYPE_HL)
+ htt->tx_ops = &htt_tx_ops_hl;
+ else if (ar->hw_params.target_64bit)
htt->tx_ops = &htt_tx_ops_64;
else
htt->tx_ops = &htt_tx_ops_32;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 677535b3d207..af8ae8117c62 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include "core.h"
#include "hw.h"
#include "hif.h"
@@ -918,6 +919,196 @@ static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar)
return 0;
}
+/* Program CPU_ADDR_MSB to allow different memory
+ * region access.
+ */
+static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb)
+{
+ u32 address = SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS;
+
+ ath10k_hif_write32(ar, address, msb);
+}
+
+/* 1. Write to memory region of target, such as IRAM adn DRAM.
+ * 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000)
+ * can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too.
+ * 3. In order to access the region other than the above,
+ * we need to set the value of register CPU_ADDR_MSB.
+ * 4. Target memory access space is limited to 1M size. If the size is larger
+ * than 1M, need to split it and program CPU_ADDR_MSB accordingly.
+ */
+static int ath10k_hw_diag_segment_msb_download(struct ath10k *ar,
+ const void *buffer,
+ u32 address,
+ u32 length)
+{
+ u32 addr = address & REGION_ACCESS_SIZE_MASK;
+ int ret, remain_size, size;
+ const u8 *buf;
+
+ ath10k_hw_map_target_mem(ar, CPU_ADDR_MSB_REGION_VAL(address));
+
+ if (addr + length > REGION_ACCESS_SIZE_LIMIT) {
+ size = REGION_ACCESS_SIZE_LIMIT - addr;
+ remain_size = length - size;
+
+ ret = ath10k_hif_diag_write(ar, address, buffer, size);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to download the first %d bytes segment to address:0x%x: %d\n",
+ size, address, ret);
+ goto done;
+ }
+
+ /* Change msb to the next memory region*/
+ ath10k_hw_map_target_mem(ar,
+ CPU_ADDR_MSB_REGION_VAL(address) + 1);
+ buf = buffer + size;
+ ret = ath10k_hif_diag_write(ar,
+ address & ~REGION_ACCESS_SIZE_MASK,
+ buf, remain_size);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to download the second %d bytes segment to address:0x%x: %d\n",
+ remain_size,
+ address & ~REGION_ACCESS_SIZE_MASK,
+ ret);
+ goto done;
+ }
+ } else {
+ ret = ath10k_hif_diag_write(ar, address, buffer, length);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to download the only %d bytes segment to address:0x%x: %d\n",
+ length, address, ret);
+ goto done;
+ }
+ }
+
+done:
+ /* Change msb to DRAM */
+ ath10k_hw_map_target_mem(ar,
+ CPU_ADDR_MSB_REGION_VAL(DRAM_BASE_ADDRESS));
+ return ret;
+}
+
+static int ath10k_hw_diag_segment_download(struct ath10k *ar,
+ const void *buffer,
+ u32 address,
+ u32 length)
+{
+ if (address >= DRAM_BASE_ADDRESS + REGION_ACCESS_SIZE_LIMIT)
+ /* Needs to change MSB for memory write */
+ return ath10k_hw_diag_segment_msb_download(ar, buffer,
+ address, length);
+ else
+ return ath10k_hif_diag_write(ar, address, buffer, length);
+}
+
+int ath10k_hw_diag_fast_download(struct ath10k *ar,
+ u32 address,
+ const void *buffer,
+ u32 length)
+{
+ const u8 *buf = buffer;
+ bool sgmt_end = false;
+ u32 base_addr = 0;
+ u32 base_len = 0;
+ u32 left = 0;
+ struct bmi_segmented_file_header *hdr;
+ struct bmi_segmented_metadata *metadata;
+ int ret = 0;
+
+ if (length < sizeof(*hdr))
+ return -EINVAL;
+
+ /* check firmware header. If it has no correct magic number
+ * or it's compressed, returns error.
+ */
+ hdr = (struct bmi_segmented_file_header *)buf;
+ if (__le32_to_cpu(hdr->magic_num) != BMI_SGMTFILE_MAGIC_NUM) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "Not a supported firmware, magic_num:0x%x\n",
+ hdr->magic_num);
+ return -EINVAL;
+ }
+
+ if (hdr->file_flags != 0) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "Not a supported firmware, file_flags:0x%x\n",
+ hdr->file_flags);
+ return -EINVAL;
+ }
+
+ metadata = (struct bmi_segmented_metadata *)hdr->data;
+ left = length - sizeof(*hdr);
+
+ while (left > 0) {
+ if (left < sizeof(*metadata)) {
+ ath10k_warn(ar, "firmware segment is truncated: %d\n",
+ left);
+ ret = -EINVAL;
+ break;
+ }
+ base_addr = __le32_to_cpu(metadata->addr);
+ base_len = __le32_to_cpu(metadata->length);
+ buf = metadata->data;
+ left -= sizeof(*metadata);
+
+ switch (base_len) {
+ case BMI_SGMTFILE_BEGINADDR:
+ /* base_addr is the start address to run */
+ ret = ath10k_bmi_set_start(ar, base_addr);
+ base_len = 0;
+ break;
+ case BMI_SGMTFILE_DONE:
+ /* no more segment */
+ base_len = 0;
+ sgmt_end = true;
+ ret = 0;
+ break;
+ case BMI_SGMTFILE_BDDATA:
+ case BMI_SGMTFILE_EXEC:
+ ath10k_warn(ar,
+ "firmware has unsupported segment:%d\n",
+ base_len);
+ ret = -EINVAL;
+ break;
+ default:
+ if (base_len > left) {
+ /* sanity check */
+ ath10k_warn(ar,
+ "firmware has invalid segment length, %d > %d\n",
+ base_len, left);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = ath10k_hw_diag_segment_download(ar,
+ buf,
+ base_addr,
+ base_len);
+
+ if (ret)
+ ath10k_warn(ar,
+ "failed to download firmware via diag interface:%d\n",
+ ret);
+ break;
+ }
+
+ if (ret || sgmt_end)
+ break;
+
+ metadata = (struct bmi_segmented_metadata *)(buf + base_len);
+ left -= base_len;
+ }
+
+ if (ret == 0)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot firmware fast diag download successfully.\n");
+ return ret;
+}
+
const struct ath10k_hw_ops qca988x_ops = {
.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 977f79ebb4fd..1b5da272d18c 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -21,6 +21,14 @@
#include "targaddrs.h"
+enum ath10k_bus {
+ ATH10K_BUS_PCI,
+ ATH10K_BUS_AHB,
+ ATH10K_BUS_SDIO,
+ ATH10K_BUS_USB,
+ ATH10K_BUS_SNOC,
+};
+
#define ATH10K_FW_DIR "ath10k"
#define QCA988X_2_0_DEVICE_ID_UBNT (0x11ac)
@@ -109,6 +117,7 @@ enum qca9377_chip_id_rev {
#define QCA9984_HW_1_0_CHIP_ID_REV 0x0
#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0"
#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA9984_HW_1_0_EBOARD_DATA_FILE "eboard.bin"
#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA9888 2.0 defines */
@@ -221,6 +230,7 @@ enum ath10k_fw_htt_op_version {
enum ath10k_bd_ie_type {
/* contains sub IEs of enum ath10k_bd_ie_board_type */
ATH10K_BD_IE_BOARD = 0,
+ ATH10K_BD_IE_BOARD_EXT = 1,
};
enum ath10k_bd_ie_board_type {
@@ -389,6 +399,11 @@ extern const struct ath10k_hw_ce_regs qcax_ce_regs;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
+int ath10k_hw_diag_fast_download(struct ath10k *ar,
+ u32 address,
+ const void *buffer,
+ u32 length);
+
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
@@ -501,6 +516,7 @@ struct ath10k_hw_clk_params {
struct ath10k_hw_params {
u32 id;
u16 dev_id;
+ enum ath10k_bus bus;
const char *name;
u32 patch_load_addr;
int uart_pin;
@@ -539,6 +555,8 @@ struct ath10k_hw_params {
const char *dir;
const char *board;
size_t board_size;
+ const char *eboard;
+ size_t ext_board_size;
size_t board_ext_size;
} fw;
@@ -589,6 +607,14 @@ struct ath10k_hw_params {
/* Number of bytes to be the offset for each FFT sample */
int spectral_bin_offset;
+
+ /* targets which require hw filter reset during boot up,
+ * to avoid it sending spurious acks.
+ */
+ bool hw_filter_reset_required;
+
+ /* target supporting fw download via diag ce */
+ bool fw_diag_ce_download;
};
struct htt_rx_desc;
@@ -1124,4 +1150,15 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020
/* qca6174 PLL offset/mask end */
+/* CPU_ADDR_MSB is a register, bit[3:0] is to specify which memory
+ * region is accessed. The memory region size is 1M.
+ * If host wants to access 0xX12345 at target, then CPU_ADDR_MSB[3:0]
+ * is 0xX.
+ * The following MACROs are defined to get the 0xX and the size limit.
+ */
+#define CPU_ADDR_MSB_REGION_MASK GENMASK(23, 20)
+#define CPU_ADDR_MSB_REGION_VAL(X) FIELD_GET(CPU_ADDR_MSB_REGION_MASK, X)
+#define REGION_ACCESS_SIZE_LIMIT 0x100000
+#define REGION_ACCESS_SIZE_MASK (REGION_ACCESS_SIZE_LIMIT - 1)
+
#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 90f9372dec25..a1c2801ded10 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -18,6 +18,7 @@
#include "mac.h"
+#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <linux/acpi.h>
@@ -29,7 +30,6 @@
#include "htt.h"
#include "txrx.h"
#include "testmode.h"
-#include "wmi.h"
#include "wmi-tlv.h"
#include "wmi-ops.h"
#include "wow.h"
@@ -156,6 +156,22 @@ u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
return 0;
}
+static int ath10k_mac_get_rate_hw_value(int bitrate)
+{
+ int i;
+ u8 hw_value_prefix = 0;
+
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6;
+
+ for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) {
+ if (ath10k_rates[i].bitrate == bitrate)
+ return hw_value_prefix | ath10k_rates[i].hw_value;
+ }
+
+ return -EINVAL;
+}
+
static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
{
switch ((mcs_map >> (2 * nss)) & 0x3) {
@@ -967,7 +983,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
if (time_left == 0)
return -ETIMEDOUT;
- return 0;
+ return ar->last_wmi_vdev_start_status;
}
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
@@ -4681,6 +4697,14 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_core_stop;
}
+ if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
+ ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
+ if (ret) {
+ ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
+ goto err_core_stop;
+ }
+ }
+
if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
ret = ath10k_wmi_adaptive_qcs(ar, true);
if (ret) {
@@ -5451,9 +5475,10 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct cfg80211_chan_def def;
u32 vdev_param, pdev_param, slottime, preamble;
u16 bitrate, hw_value;
- u8 rate;
- int rateidx, ret = 0;
+ u8 rate, basic_rate_idx;
+ int rateidx, ret = 0, hw_rate_code;
enum nl80211_band band;
+ const struct ieee80211_supported_band *sband;
mutex_lock(&ar->conf_mutex);
@@ -5659,6 +5684,30 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) {
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
+ sband = ar->hw->wiphy->bands[def.chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+ hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
+ vdev_param = ar->wmi.vdev_param->mgmt_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -6215,6 +6264,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NONE) {
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
@@ -6243,6 +6293,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ar->num_stations + 1, ar->max_num_stations,
ar->num_peers + 1, ar->max_num_peers);
+ if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
+ GFP_KERNEL);
+ if (!arsta->tx_stats)
+ goto exit;
+ }
+
num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
@@ -6328,6 +6385,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
+ if (ath10k_debug_is_extd_tx_stats_enabled(ar))
+ kfree(arsta->tx_stats);
+
if (sta->tdls) {
ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
sta,
@@ -6768,23 +6828,17 @@ static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
-static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop)
+void ath10k_mac_wait_tx_complete(struct ath10k *ar)
{
- struct ath10k *ar = hw->priv;
bool skip;
long time_left;
/* mac80211 doesn't care if we really xmit queued frames or not
* we'll collect those frames either way if we stop/delete vdevs
*/
- if (drop)
- return;
-
- mutex_lock(&ar->conf_mutex);
if (ar->state == ATH10K_STATE_WEDGED)
- goto skip;
+ return;
time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
bool empty;
@@ -6803,8 +6857,29 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (time_left == 0 || skip)
ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
skip, ar->state, time_left);
+}
+
+static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif;
+ u32 bitmap;
+
+ if (drop) {
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bitmap = ~(1 << WMI_MGMT_TID);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ath10k_wmi_peer_flush(ar, arvif->vdev_id,
+ arvif->bssid, bitmap);
+ }
+ }
+ return;
+ }
-skip:
+ mutex_lock(&ar->conf_mutex);
+ ath10k_mac_wait_tx_complete(ar);
mutex_unlock(&ar->conf_mutex);
}
@@ -8148,6 +8223,24 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
},
};
+static const struct
+ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = {
+ {
+ .limits = ath10k_10_4_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
+ .max_interfaces = 16,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .beacon_int_min_gcd = 100,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
+ },
+};
+
static void ath10k_get_arvif_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -8310,6 +8403,10 @@ int ath10k_mac_register(struct ath10k *ar)
void *channels;
int ret;
+ if (!is_valid_ether_addr(ar->mac_addr)) {
+ ath10k_warn(ar, "invalid MAC address; choosing random\n");
+ eth_random_addr(ar->mac_addr);
+ }
SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
SET_IEEE80211_DEV(ar->hw, ar->dev);
@@ -8359,6 +8456,7 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
}
+ wiphy_read_of_freq_limits(ar->hw->wiphy);
ath10k_mac_setup_ht_vht_cap(ar);
ar->hw->wiphy->interface_modes =
@@ -8414,6 +8512,18 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+ if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
+ ar->hw->wiphy->max_sched_scan_reqs = 1;
+ ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
+ ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
+ ar->hw->wiphy->max_sched_scan_plan_interval =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
+ ar->hw->wiphy->max_sched_scan_plan_iterations =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ }
+
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
ar->hw->sta_data_size = sizeof(struct ath10k_sta);
ar->hw->txq_data_size = sizeof(struct ath10k_txq);
@@ -8463,6 +8573,11 @@ int ath10k_mac_register(struct ath10k *ar)
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) ||
+ test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
+
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
@@ -8506,6 +8621,13 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_10_4_if_comb);
+ if (test_bit(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+ ar->wmi.svc_map)) {
+ ar->hw->wiphy->iface_combinations =
+ ath10k_10_4_bcn_int_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_10_4_bcn_int_if_comb);
+ }
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -8545,12 +8667,6 @@ int ath10k_mac_register(struct ath10k *ar)
}
if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
- ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
- if (ret) {
- ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
- goto err_dfs_detector_exit;
- }
-
ar->hw->wiphy->features |=
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 81f8d6c0af35..570493d2d648 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -82,6 +82,7 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
u16 peer_id,
u8 tid);
int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
+void ath10k_mac_wait_tx_complete(struct ath10k *ar);
static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
struct sk_buff *skb)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index af2cf55c4c1e..01b4edb00e9e 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -192,7 +192,7 @@ static struct ce_attr host_ce_config_wlan[] = {
/* CE7: ce_diag, the Diagnostic Window */
{
- .flags = CE_ATTR_FLAGS,
+ .flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
.src_nentries = 2,
.src_sz_max = DIAG_TRANSFER_LIMIT,
.dest_nentries = 2,
@@ -870,6 +870,21 @@ static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
return val;
}
+/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
+ * Support to access target space below 1M for qca6174 and qca9377.
+ * If target space is below 1M, the bit[20] of converted CE addr is 0.
+ * Otherwise bit[20] of converted CE addr is 1.
+ */
+static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
+ & 0x7ff) << 21;
+ val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
+ return val;
+}
+
static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
@@ -931,6 +946,15 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
}
+ /* The address supplied by the caller is in the
+ * Target CPU virtual address space.
+ *
+ * In order to use this address with the diagnostic CE,
+ * convert it from Target CPU virtual address space
+ * to CE address space
+ */
+ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
+
remaining_bytes = nbytes;
ce_data = ce_data_base;
while (remaining_bytes) {
@@ -942,16 +966,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
/* Request CE to send from Target(!) address to Host buffer */
- /*
- * The address supplied by the caller is in the
- * Target CPU virtual address space.
- *
- * In order to use this address with the diagnostic CE,
- * convert it from Target CPU virtual address space
- * to CE address space
- */
- address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
-
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
0);
if (ret)
@@ -960,8 +974,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
i = 0;
while (ath10k_ce_completed_send_next_nolock(ce_diag,
NULL) != 0) {
- mdelay(1);
- if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ udelay(DIAG_ACCESS_CE_WAIT_US);
+ i += DIAG_ACCESS_CE_WAIT_US;
+
+ if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@@ -972,9 +988,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
(void **)&buf,
&completed_nbytes)
!= 0) {
- mdelay(1);
+ udelay(DIAG_ACCESS_CE_WAIT_US);
+ i += DIAG_ACCESS_CE_WAIT_US;
- if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@@ -1054,10 +1071,9 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret = 0;
u32 *buf;
- unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
- u32 ce_data; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0;
int i;
@@ -1071,9 +1087,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* 1) 4-byte alignment
* 2) Buffer in DMA-able space
*/
- orig_nbytes = nbytes;
+ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
+
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
- orig_nbytes,
+ alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
if (!data_buf) {
@@ -1081,9 +1098,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done;
}
- /* Copy caller's data to allocated DMA buf */
- memcpy(data_buf, data, orig_nbytes);
-
/*
* The address supplied by the caller is in the
* Target CPU virtual address space.
@@ -1096,12 +1110,14 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
*/
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
- remaining_bytes = orig_nbytes;
- ce_data = ce_data_base;
+ remaining_bytes = nbytes;
while (remaining_bytes) {
/* FIXME: check cast */
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
+ /* Copy caller's data to allocated DMA buf */
+ memcpy(data_buf, data, nbytes);
+
/* Set up to receive directly into Target(!) address */
ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
@@ -1111,7 +1127,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* Request CE to send caller-supplied data that
* was copied to bounce buffer to Target(!) address.
*/
- ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
+ ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base,
nbytes, 0, 0);
if (ret != 0)
goto done;
@@ -1119,9 +1135,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
i = 0;
while (ath10k_ce_completed_send_next_nolock(ce_diag,
NULL) != 0) {
- mdelay(1);
+ udelay(DIAG_ACCESS_CE_WAIT_US);
+ i += DIAG_ACCESS_CE_WAIT_US;
- if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@@ -1132,9 +1149,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
(void **)&buf,
&completed_nbytes)
!= 0) {
- mdelay(1);
+ udelay(DIAG_ACCESS_CE_WAIT_US);
+ i += DIAG_ACCESS_CE_WAIT_US;
- if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@@ -1152,12 +1170,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
remaining_bytes -= nbytes;
address += nbytes;
- ce_data += nbytes;
+ data += nbytes;
}
done:
if (data_buf) {
- dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base);
}
@@ -1839,7 +1857,7 @@ int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
}
}
- if (WARN_ON(!ul_set || !dl_set))
+ if (!ul_set || !dl_set)
return -ENOENT;
return 0;
@@ -2068,9 +2086,9 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
- ath10k_pci_flush(ar);
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
+ ath10k_pci_flush(ar);
spin_lock_irqsave(&ar_pci->ps_lock, flags);
WARN_ON(ar_pci->ps_wake_refcount > 0);
@@ -3482,7 +3500,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k *ar;
struct ath10k_pci *ar_pci;
enum ath10k_hw_rev hw_rev;
- u32 chip_id;
+ struct ath10k_bus_params bus_params;
bool pci_ps;
int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar);
@@ -3510,7 +3528,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = true;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
- targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+ targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
@@ -3538,7 +3556,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = true;
pci_soft_reset = NULL;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
- targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+ targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
break;
default:
WARN_ON(1);
@@ -3618,19 +3636,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_free_irq;
}
- chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
- if (chip_id == 0xffffffff) {
+ bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+ bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (bus_params.chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
goto err_free_irq;
}
- if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
+ if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
- pdev->device, chip_id);
+ pdev->device, bus_params.chip_id);
goto err_free_irq;
}
- ret = ath10k_core_register(ar, chip_id);
+ ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_free_irq;
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 0ed436657108..e8d86331c539 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -207,7 +207,8 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
#define CDC_WAR_DATA_CE 4
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
-#define DIAG_ACCESS_CE_TIMEOUT_MS 10
+#define DIAG_ACCESS_CE_TIMEOUT_US 10000 /* 10 ms */
+#define DIAG_ACCESS_CE_WAIT_US 50
void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
new file mode 100644
index 000000000000..56cb1831dcdf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -0,0 +1,1019 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/string.h>
+#include <net/sock.h>
+
+#include "debug.h"
+#include "snoc.h"
+
+#define ATH10K_QMI_CLIENT_ID 0x4b4e454c
+#define ATH10K_QMI_TIMEOUT 30
+
+static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
+ struct ath10k_msa_mem_info *mem_info)
+{
+ struct qcom_scm_vmperm dst_perms[3];
+ struct ath10k *ar = qmi->ar;
+ unsigned int src_perms;
+ u32 perm_count;
+ int ret;
+
+ src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
+ dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA;
+ dst_perms[0].perm = QCOM_SCM_PERM_RW;
+ dst_perms[1].vmid = QCOM_SCM_VMID_WLAN;
+ dst_perms[1].perm = QCOM_SCM_PERM_RW;
+
+ if (mem_info->secure) {
+ perm_count = 2;
+ } else {
+ dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE;
+ dst_perms[2].perm = QCOM_SCM_PERM_RW;
+ perm_count = 3;
+ }
+
+ ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
+ &src_perms, dst_perms, perm_count);
+ if (ret < 0)
+ ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret);
+
+ return ret;
+}
+
+static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi,
+ struct ath10k_msa_mem_info *mem_info)
+{
+ struct qcom_scm_vmperm dst_perms;
+ struct ath10k *ar = qmi->ar;
+ unsigned int src_perms;
+ int ret;
+
+ src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN);
+
+ if (!mem_info->secure)
+ src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE);
+
+ dst_perms.vmid = QCOM_SCM_VMID_HLOS;
+ dst_perms.perm = QCOM_SCM_PERM_RW;
+
+ ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
+ &src_perms, &dst_perms, 1);
+ if (ret < 0)
+ ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret);
+
+ return ret;
+}
+
+static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < qmi->nr_mem_region; i++) {
+ ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
+ if (ret)
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ for (i--; i >= 0; i--)
+ ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
+ return ret;
+}
+
+static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
+{
+ int i;
+
+ for (i = 0; i < qmi->nr_mem_region; i++)
+ ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
+}
+
+static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
+{
+ struct wlfw_msa_info_resp_msg_v01 resp = {};
+ struct wlfw_msa_info_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int ret;
+ int i;
+
+ req.msa_addr = qmi->msa_pa;
+ req.size = qmi->msa_mem_size;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_msa_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_MSA_INFO_REQ_V01,
+ WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_msa_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send msa mem info req: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) {
+ ath10k_err(ar, "invalid memory region length received: %d\n",
+ resp.mem_region_info_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ qmi->nr_mem_region = resp.mem_region_info_len;
+ for (i = 0; i < resp.mem_region_info_len; i++) {
+ qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
+ qmi->mem_region[i].size = resp.mem_region_info[i].size;
+ qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
+ ath10k_dbg(ar, ATH10K_DBG_QMI,
+ "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
+ i, &qmi->mem_region[i].addr,
+ qmi->mem_region[i].size,
+ qmi->mem_region[i].secure);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi)
+{
+ struct wlfw_msa_ready_resp_msg_v01 resp = {};
+ struct wlfw_msa_ready_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_msa_ready_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_MSA_READY_REQ_V01,
+ WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_msa_ready_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
+{
+ struct wlfw_bdf_download_resp_msg_v01 resp = {};
+ struct wlfw_bdf_download_req_msg_v01 *req;
+ struct ath10k *ar = qmi->ar;
+ unsigned int remaining;
+ struct qmi_txn txn;
+ const u8 *temp;
+ int ret;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ temp = ar->normal_mode_fw.board_data;
+ remaining = ar->normal_mode_fw.board_len;
+
+ while (remaining) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = 0;
+ req->total_size_valid = 1;
+ req->total_size = ar->normal_mode_fw.board_len;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->end_valid = 1;
+
+ if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ memcpy(req->data, temp, req->data_len);
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_bdf_download_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
+ WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "failed to download board data file: %d\n",
+ resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n");
+
+ kfree(req);
+ return 0;
+
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
+{
+ struct wlfw_cal_report_resp_msg_v01 resp = {};
+ struct wlfw_cal_report_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int i, j = 0;
+ int ret;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) {
+ if (qmi->cal_data[i].total_size &&
+ qmi->cal_data[i].data) {
+ req.meta_data[j] = qmi->cal_data[i].cal_id;
+ j++;
+ }
+ }
+ req.meta_data_len = j;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_CAL_REPORT_REQ_V01,
+ WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_cal_report_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send calibration request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static int
+ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_qmi *qmi = ar_snoc->qmi;
+ struct wlfw_wlan_mode_resp_msg_v01 resp = {};
+ struct wlfw_wlan_mode_req_msg_v01 req = {};
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_wlan_mode_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_WLAN_MODE_REQ_V01,
+ WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_wlan_mode_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "more request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode);
+ return 0;
+
+out:
+ return ret;
+}
+
+static int
+ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar,
+ struct ath10k_qmi_wlan_enable_cfg *config,
+ const char *version)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_qmi *qmi = ar_snoc->qmi;
+ struct wlfw_wlan_cfg_resp_msg_v01 resp = {};
+ struct wlfw_wlan_cfg_req_msg_v01 *req;
+ struct qmi_txn txn;
+ int ret;
+ u32 i;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_wlan_cfg_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ req->host_version_valid = 0;
+
+ req->tgt_cfg_valid = 1;
+ if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
+ req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
+ else
+ req->tgt_cfg_len = config->num_ce_tgt_cfg;
+ for (i = 0; i < req->tgt_cfg_len; i++) {
+ req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
+ req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
+ req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
+ req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
+ req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
+ }
+
+ req->svc_cfg_valid = 1;
+ if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
+ req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
+ else
+ req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
+ for (i = 0; i < req->svc_cfg_len; i++) {
+ req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
+ req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
+ req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
+ }
+
+ req->shadow_reg_valid = 1;
+ if (config->num_shadow_reg_cfg >
+ QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
+ req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
+ else
+ req->shadow_reg_len = config->num_shadow_reg_cfg;
+
+ memcpy(req->shadow_reg, config->shadow_reg_cfg,
+ sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len);
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_WLAN_CFG_REQ_V01,
+ WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_wlan_cfg_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send config request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "config request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n");
+ kfree(req);
+ return 0;
+
+out:
+ kfree(req);
+ return ret;
+}
+
+int ath10k_qmi_wlan_enable(struct ath10k *ar,
+ struct ath10k_qmi_wlan_enable_cfg *config,
+ enum wlfw_driver_mode_enum_v01 mode,
+ const char *version)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n",
+ mode, config);
+
+ ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version);
+ if (ret) {
+ ath10k_err(ar, "failed to send qmi config: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_qmi_mode_send_sync_msg(ar, mode);
+ if (ret) {
+ ath10k_err(ar, "failed to send qmi mode: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_qmi_wlan_disable(struct ath10k *ar)
+{
+ return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
+}
+
+static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
+{
+ struct wlfw_cap_resp_msg_v01 *resp;
+ struct wlfw_cap_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int ret;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_CAP_REQ_V01,
+ WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send capability request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "capablity req rejected: %d\n", resp->resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp->chip_info_valid) {
+ qmi->chip_info.chip_id = resp->chip_info.chip_id;
+ qmi->chip_info.chip_family = resp->chip_info.chip_family;
+ }
+
+ if (resp->board_info_valid)
+ qmi->board_info.board_id = resp->board_info.board_id;
+ else
+ qmi->board_info.board_id = 0xFF;
+
+ if (resp->soc_info_valid)
+ qmi->soc_info.soc_id = resp->soc_info.soc_id;
+
+ if (resp->fw_version_info_valid) {
+ qmi->fw_version = resp->fw_version_info.fw_version;
+ strlcpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
+ sizeof(qmi->fw_build_timestamp));
+ }
+
+ if (resp->fw_build_id_valid)
+ strlcpy(qmi->fw_build_id, resp->fw_build_id,
+ MAX_BUILD_ID_LEN + 1);
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI,
+ "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
+ qmi->chip_info.chip_id, qmi->chip_info.chip_family,
+ qmi->board_info.board_id, qmi->soc_info.soc_id);
+ ath10k_dbg(ar, ATH10K_DBG_QMI,
+ "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
+ qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
+
+ kfree(resp);
+ return 0;
+
+out:
+ kfree(resp);
+ return ret;
+}
+
+static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
+{
+ struct wlfw_host_cap_resp_msg_v01 resp = {};
+ struct wlfw_host_cap_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int ret;
+
+ req.daemon_support_valid = 1;
+ req.daemon_support = 0;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_host_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_HOST_CAP_REQ_V01,
+ WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_host_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send host capability request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capablity request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static int
+ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
+{
+ struct wlfw_ind_register_resp_msg_v01 resp = {};
+ struct wlfw_ind_register_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int ret;
+
+ req.client_id_valid = 1;
+ req.client_id = ATH10K_QMI_CLIENT_ID;
+ req.fw_ready_enable_valid = 1;
+ req.fw_ready_enable = 1;
+ req.msa_ready_enable_valid = 1;
+ req.msa_ready_enable = 1;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_ind_register_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_IND_REGISTER_REQ_V01,
+ WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_ind_register_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send indication registed request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.fw_status_valid) {
+ if (resp.fw_status & QMI_WLFW_FW_READY_V01)
+ qmi->fw_ready = true;
+ }
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
+{
+ struct ath10k *ar = qmi->ar;
+ int ret;
+
+ ret = ath10k_qmi_ind_register_send_sync_msg(qmi);
+ if (ret)
+ return;
+
+ if (qmi->fw_ready) {
+ ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
+ return;
+ }
+
+ ret = ath10k_qmi_host_cap_send_sync(qmi);
+ if (ret)
+ return;
+
+ ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi);
+ if (ret)
+ return;
+
+ ret = ath10k_qmi_setup_msa_permissions(qmi);
+ if (ret)
+ return;
+
+ ret = ath10k_qmi_msa_ready_send_sync_msg(qmi);
+ if (ret)
+ goto err_setup_msa;
+
+ ret = ath10k_qmi_cap_send_sync_msg(qmi);
+ if (ret)
+ goto err_setup_msa;
+
+ return;
+
+err_setup_msa:
+ ath10k_qmi_remove_msa_permission(qmi);
+}
+
+static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi)
+{
+ struct ath10k *ar = qmi->ar;
+
+ ar->hif.bus = ATH10K_BUS_SNOC;
+ ar->id.qmi_ids_valid = true;
+ ar->id.qmi_board_id = qmi->board_info.board_id;
+ ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR;
+
+ return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD);
+}
+
+static int
+ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
+ enum ath10k_qmi_driver_event_type type,
+ void *data)
+{
+ struct ath10k_qmi_driver_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ event->type = type;
+ event->data = data;
+
+ spin_lock(&qmi->event_lock);
+ list_add_tail(&event->list, &qmi->event_list);
+ spin_unlock(&qmi->event_lock);
+
+ queue_work(qmi->event_wq, &qmi->event_work);
+
+ return 0;
+}
+
+static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
+{
+ struct ath10k *ar = qmi->ar;
+
+ ath10k_qmi_remove_msa_permission(qmi);
+ ath10k_core_free_board_files(ar);
+ ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
+}
+
+static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi)
+{
+ int ret;
+
+ ret = ath10k_qmi_fetch_board_file(qmi);
+ if (ret)
+ goto out;
+
+ ret = ath10k_qmi_bdf_dnld_send_sync(qmi);
+ if (ret)
+ goto out;
+
+ ret = ath10k_qmi_send_cal_report_req(qmi);
+
+out:
+ return;
+}
+
+static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi)
+{
+ struct ath10k *ar = qmi->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n");
+ ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
+
+ return 0;
+}
+
+static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
+
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL);
+}
+
+static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
+
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL);
+}
+
+static struct qmi_msg_handler qmi_msg_handler[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_READY_IND_V01,
+ .ei = wlfw_fw_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
+ .fn = ath10k_qmi_fw_ready_ind,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_MSA_READY_IND_V01,
+ .ei = wlfw_msa_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01),
+ .fn = ath10k_qmi_msa_ready_ind,
+ },
+ {}
+};
+
+static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
+ struct sockaddr_qrtr *sq = &qmi->sq;
+ struct ath10k *ar = qmi->ar;
+ int ret;
+
+ sq->sq_family = AF_QIPCRTR;
+ sq->sq_node = service->node;
+ sq->sq_port = service->port;
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n");
+
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq,
+ sizeof(qmi->sq), 0);
+ if (ret) {
+ ath10k_err(ar, "failed to connect to a remote QMI service port\n");
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n");
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL);
+
+ return ret;
+}
+
+static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath10k_qmi *qmi =
+ container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
+
+ qmi->fw_ready = false;
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT, NULL);
+}
+
+static struct qmi_ops ath10k_qmi_ops = {
+ .new_server = ath10k_qmi_new_server,
+ .del_server = ath10k_qmi_del_server,
+};
+
+static void ath10k_qmi_driver_event_work(struct work_struct *work)
+{
+ struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi,
+ event_work);
+ struct ath10k_qmi_driver_event *event;
+ struct ath10k *ar = qmi->ar;
+
+ spin_lock(&qmi->event_lock);
+ while (!list_empty(&qmi->event_list)) {
+ event = list_first_entry(&qmi->event_list,
+ struct ath10k_qmi_driver_event, list);
+ list_del(&event->list);
+ spin_unlock(&qmi->event_lock);
+
+ switch (event->type) {
+ case ATH10K_QMI_EVENT_SERVER_ARRIVE:
+ ath10k_qmi_event_server_arrive(qmi);
+ break;
+ case ATH10K_QMI_EVENT_SERVER_EXIT:
+ ath10k_qmi_event_server_exit(qmi);
+ break;
+ case ATH10K_QMI_EVENT_FW_READY_IND:
+ ath10k_qmi_event_fw_ready_ind(qmi);
+ break;
+ case ATH10K_QMI_EVENT_MSA_READY_IND:
+ ath10k_qmi_event_msa_ready(qmi);
+ break;
+ default:
+ ath10k_warn(ar, "invalid event type: %d", event->type);
+ break;
+ }
+ kfree(event);
+ spin_lock(&qmi->event_lock);
+ }
+ spin_unlock(&qmi->event_lock);
+}
+
+static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
+{
+ struct ath10k *ar = qmi->ar;
+ struct device *dev = ar->dev;
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node) {
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret) {
+ dev_err(dev, "failed to resolve msa fixed region\n");
+ return ret;
+ }
+ of_node_put(node);
+
+ qmi->msa_pa = r.start;
+ qmi->msa_mem_size = resource_size(&r);
+ qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size,
+ MEMREMAP_WT);
+ if (!qmi->msa_pa) {
+ dev_err(dev, "failed to map memory region: %pa\n", &r.start);
+ return -EBUSY;
+ }
+ } else {
+ qmi->msa_va = dmam_alloc_coherent(dev, msa_size,
+ &qmi->msa_pa, GFP_KERNEL);
+ if (!qmi->msa_va) {
+ ath10k_err(ar, "failed to allocate dma memory for msa region\n");
+ return -ENOMEM;
+ }
+ qmi->msa_mem_size = msa_size;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n",
+ &qmi->msa_pa,
+ qmi->msa_va);
+
+ return 0;
+}
+
+int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_qmi *qmi;
+ int ret;
+
+ qmi = kzalloc(sizeof(*qmi), GFP_KERNEL);
+ if (!qmi)
+ return -ENOMEM;
+
+ qmi->ar = ar;
+ ar_snoc->qmi = qmi;
+
+ ret = ath10k_qmi_setup_msa_resources(qmi, msa_size);
+ if (ret)
+ goto err;
+
+ ret = qmi_handle_init(&qmi->qmi_hdl,
+ WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ &ath10k_qmi_ops, qmi_msg_handler);
+ if (ret)
+ goto err;
+
+ qmi->event_wq = alloc_workqueue("ath10k_qmi_driver_event",
+ WQ_UNBOUND, 1);
+ if (!qmi->event_wq) {
+ ath10k_err(ar, "failed to allocate workqueue\n");
+ ret = -EFAULT;
+ goto err_release_qmi_handle;
+ }
+
+ INIT_LIST_HEAD(&qmi->event_list);
+ spin_lock_init(&qmi->event_lock);
+ INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work);
+
+ ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01, 0);
+ if (ret)
+ goto err_qmi_lookup;
+
+ return 0;
+
+err_qmi_lookup:
+ destroy_workqueue(qmi->event_wq);
+
+err_release_qmi_handle:
+ qmi_handle_release(&qmi->qmi_hdl);
+
+err:
+ kfree(qmi);
+ return ret;
+}
+
+int ath10k_qmi_deinit(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_qmi *qmi = ar_snoc->qmi;
+
+ qmi_handle_release(&qmi->qmi_hdl);
+ cancel_work_sync(&qmi->event_work);
+ destroy_workqueue(qmi->event_wq);
+ ar_snoc->qmi = NULL;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
new file mode 100644
index 000000000000..1efe1d22fc2f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _ATH10K_QMI_H_
+#define _ATH10K_QMI_H_
+
+#include <linux/soc/qcom/qmi.h>
+#include <linux/qrtr.h>
+#include "qmi_wlfw_v01.h"
+
+#define MAX_NUM_MEMORY_REGIONS 2
+#define MAX_TIMESTAMP_LEN 32
+#define MAX_BUILD_ID_LEN 128
+#define MAX_NUM_CAL_V01 5
+
+enum ath10k_qmi_driver_event_type {
+ ATH10K_QMI_EVENT_SERVER_ARRIVE,
+ ATH10K_QMI_EVENT_SERVER_EXIT,
+ ATH10K_QMI_EVENT_FW_READY_IND,
+ ATH10K_QMI_EVENT_FW_DOWN_IND,
+ ATH10K_QMI_EVENT_MSA_READY_IND,
+ ATH10K_QMI_EVENT_MAX,
+};
+
+struct ath10k_msa_mem_info {
+ phys_addr_t addr;
+ u32 size;
+ bool secure;
+};
+
+struct ath10k_qmi_chip_info {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct ath10k_qmi_board_info {
+ u32 board_id;
+};
+
+struct ath10k_qmi_soc_info {
+ u32 soc_id;
+};
+
+struct ath10k_qmi_cal_data {
+ u32 cal_id;
+ u32 total_size;
+ u8 *data;
+};
+
+struct ath10k_tgt_pipe_cfg {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+struct ath10k_svc_pipe_cfg {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct ath10k_shadow_reg_cfg {
+ __le16 ce_id;
+ __le16 reg_offset;
+};
+
+struct ath10k_qmi_wlan_enable_cfg {
+ u32 num_ce_tgt_cfg;
+ struct ath10k_tgt_pipe_cfg *ce_tgt_cfg;
+ u32 num_ce_svc_pipe_cfg;
+ struct ath10k_svc_pipe_cfg *ce_svc_cfg;
+ u32 num_shadow_reg_cfg;
+ struct ath10k_shadow_reg_cfg *shadow_reg_cfg;
+};
+
+struct ath10k_qmi_driver_event {
+ struct list_head list;
+ enum ath10k_qmi_driver_event_type type;
+ void *data;
+};
+
+struct ath10k_qmi {
+ struct ath10k *ar;
+ struct qmi_handle qmi_hdl;
+ struct sockaddr_qrtr sq;
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for qmi event list */
+ u32 nr_mem_region;
+ struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS];
+ dma_addr_t msa_pa;
+ u32 msa_mem_size;
+ void *msa_va;
+ struct ath10k_qmi_chip_info chip_info;
+ struct ath10k_qmi_board_info board_info;
+ struct ath10k_qmi_soc_info soc_info;
+ char fw_build_id[MAX_BUILD_ID_LEN + 1];
+ u32 fw_version;
+ bool fw_ready;
+ char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
+ struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
+};
+
+int ath10k_qmi_wlan_enable(struct ath10k *ar,
+ struct ath10k_qmi_wlan_enable_cfg *config,
+ enum wlfw_driver_mode_enum_v01 mode,
+ const char *version);
+int ath10k_qmi_wlan_disable(struct ath10k *ar);
+int ath10k_qmi_register_service_notifier(struct notifier_block *nb);
+int ath10k_qmi_init(struct ath10k *ar, u32 msa_size);
+int ath10k_qmi_deinit(struct ath10k *ar);
+
+#endif /* ATH10K_QMI_H */
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
new file mode 100644
index 000000000000..ba79c2e4aed6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
@@ -0,0 +1,2072 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/soc/qcom/qmi.h>
+#include <linux/types.h>
+#include "qmi_wlfw_v01.h"
+
+static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+ id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01,
+ addr),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ region_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ secure_flag),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_cfg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_cfg_s_v01,
+ secure_flag),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ mem_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_CFG_V01,
+ .elem_size = sizeof(struct wlfw_mem_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ mem_cfg),
+ .ei_array = wlfw_mem_cfg_s_v01_ei,
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ type),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_soc_info_s_v01,
+ soc_id),
+ },
+ {}
+};
+
+static struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_init_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_init_done_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ xo_cal_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ xo_cal_enable),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_driver_mode_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = wlfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_SHADOW_REG_V2,
+ .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2),
+ .ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cap_req_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = wlfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_board_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = wlfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_soc_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = wlfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_fw_version_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = wlfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ meta_data_len),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = QMI_WLFW_MAX_NUM_CAL_V01,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ meta_data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ xo_cal_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ xo_cal_data),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_report_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_initiate_cal_download_ind_msg_v01,
+ cal_id),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ end),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01,
+ total_size),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
+ seg_id),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ end),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
+ msa_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
+ size),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ mem_region_info_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_MEM_REG_V01,
+ .elem_size = sizeof(struct wlfw_memory_region_info_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ mem_region_info),
+ .ei_array = wlfw_memory_region_info_s_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_ready_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ini_req_msg_v01,
+ enablefwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ini_req_msg_v01,
+ enablefwlog),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_ini_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ data_len),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
+ data),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
+ data),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_write_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_vbatt_req_msg_v01,
+ voltage_uv),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_vbatt_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
+ mac_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
+ mac_addr),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_mac_addr_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_host_cap_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_s_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_resp_s_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ line_number_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ line_number),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ function_name_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ function_name),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
+ {}
+};
+
+struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_rejuvenate_ack_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01,
+ mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01,
+ mask),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_m3_info_req_msg_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_m3_info_req_msg_v01,
+ size),
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_m3_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_xo_cal_ind_msg_v01,
+ xo_cal_data),
+ },
+ {}
+};
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
new file mode 100644
index 000000000000..c5e3870b8871
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
@@ -0,0 +1,677 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WCN3990_QMI_SVC_V01_H
+#define WCN3990_QMI_SVC_V01_H
+
+#define WLFW_SERVICE_ID_V01 0x45
+#define WLFW_SERVICE_VERS_V01 0x01
+
+#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
+#define QMI_WLFW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
+#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLFW_M3_INFO_REQ_V01 0x003C
+#define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
+#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
+#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
+#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
+#define QMI_WLFW_XO_CAL_IND_V01 0x003D
+#define QMI_WLFW_INI_RESP_V01 0x002F
+#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
+#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
+#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MSA_READY_IND_V01 0x002B
+#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
+#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
+#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
+#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
+#define QMI_WLFW_MSA_READY_REQ_V01 0x002E
+#define QMI_WLFW_CAP_RESP_V01 0x0024
+#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
+#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
+#define QMI_WLFW_VBATT_REQ_V01 0x0032
+#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
+#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLFW_VBATT_RESP_V01 0x0032
+#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
+#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
+#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030
+#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
+
+#define QMI_WLFW_MAX_MEM_REG_V01 2
+#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 16
+#define QMI_WLFW_MAX_NUM_CAL_V01 5
+#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
+#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_CE_V01 12
+#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
+#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
+#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2
+#define QMI_WLFW_MAX_STR_LEN_V01 16
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
+#define QMI_WLFW_MAX_SHADOW_REG_V2 36
+#define QMI_WLFW_MAX_NUM_SVC_V01 24
+
+enum wlfw_driver_mode_enum_v01 {
+ QMI_WLFW_MISSION_V01 = 0,
+ QMI_WLFW_FTM_V01 = 1,
+ QMI_WLFW_EPPING_V01 = 2,
+ QMI_WLFW_WALTEST_V01 = 3,
+ QMI_WLFW_OFF_V01 = 4,
+ QMI_WLFW_CCPM_V01 = 5,
+ QMI_WLFW_QVIT_V01 = 6,
+ QMI_WLFW_CALIBRATION_V01 = 7,
+};
+
+enum wlfw_cal_temp_id_enum_v01 {
+ QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4,
+};
+
+enum wlfw_pipedir_enum_v01 {
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+};
+
+enum wlfw_mem_type_enum_v01 {
+ QMI_WLFW_MEM_TYPE_MSA_V01 = 0,
+ QMI_WLFW_MEM_TYPE_DDR_V01 = 1,
+};
+
+#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
+#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
+#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
+#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((u32)0x04)
+#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((u32)0x08)
+#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((u32)0x10)
+
+#define QMI_WLFW_ALREADY_REGISTERED_V01 ((u64)0x01ULL)
+#define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL)
+#define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL)
+#define QMI_WLFW_MEM_READY_V01 ((u64)0x08ULL)
+#define QMI_WLFW_FW_INIT_DONE_V01 ((u64)0x10ULL)
+
+#define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL)
+
+struct wlfw_ce_tgt_pipe_cfg_s_v01 {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+};
+
+struct wlfw_ce_svc_pipe_cfg_s_v01 {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct wlfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct wlfw_shadow_reg_v2_cfg_s_v01 {
+ u32 addr;
+};
+
+struct wlfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct wlfw_mem_cfg_s_v01 {
+ u64 offset;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct wlfw_mem_seg_s_v01 {
+ u32 size;
+ enum wlfw_mem_type_enum_v01 type;
+ u32 mem_cfg_len;
+ struct wlfw_mem_cfg_s_v01 mem_cfg[QMI_WLFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct wlfw_mem_seg_resp_s_v01 {
+ u64 addr;
+ u32 size;
+ enum wlfw_mem_type_enum_v01 type;
+};
+
+struct wlfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct wlfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct wlfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct wlfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+struct wlfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 mem_ready_enable_valid;
+ u8 mem_ready_enable;
+ u8 fw_init_done_enable_valid;
+ u8 fw_init_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+ u8 xo_cal_enable_valid;
+ u8 xo_cal_enable;
+};
+
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 50
+extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
+
+struct wlfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[];
+
+struct wlfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[];
+
+struct wlfw_msa_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[];
+
+struct wlfw_pin_connect_result_ind_msg_v01 {
+ u8 pwr_pin_result_valid;
+ u32 pwr_pin_result;
+ u8 phy_io_pin_result_valid;
+ u32 phy_io_pin_result;
+ u8 rf_pin_result_valid;
+ u32 rf_pin_result;
+};
+
+#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
+extern struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
+
+struct wlfw_wlan_mode_req_msg_v01 {
+ enum wlfw_driver_mode_enum_v01 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[];
+
+struct wlfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct wlfw_shadow_reg_cfg_s_v01 shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v2_valid;
+ u32 shadow_reg_v2_len;
+ struct wlfw_shadow_reg_v2_cfg_s_v01 shadow_reg_v2[QMI_WLFW_MAX_SHADOW_REG_V2];
+};
+
+#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
+extern struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
+
+struct wlfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_cap_req_msg_v01_ei[];
+
+struct wlfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct wlfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct wlfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct wlfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct wlfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+};
+
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207
+extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
+
+struct wlfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+};
+
+#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182
+extern struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[];
+
+struct wlfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[];
+
+struct wlfw_cal_report_req_msg_v01 {
+ u32 meta_data_len;
+ enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
+ u8 xo_cal_data_valid;
+ u8 xo_cal_data;
+};
+
+#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28
+extern struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[];
+
+struct wlfw_cal_report_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_download_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+};
+
+#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
+
+struct wlfw_cal_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+extern struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[];
+
+struct wlfw_cal_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_update_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 total_size;
+};
+
+#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
+extern struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
+
+struct wlfw_cal_update_req_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 seg_id;
+};
+
+#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
+extern struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[];
+
+struct wlfw_cal_update_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
+extern struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[];
+
+struct wlfw_msa_info_req_msg_v01 {
+ u64 msa_addr;
+ u32 size;
+};
+
+#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[];
+
+struct wlfw_msa_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u32 mem_region_info_len;
+ struct wlfw_memory_region_info_s_v01 mem_region_info[QMI_WLFW_MAX_MEM_REG_V01];
+};
+
+#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
+extern struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[];
+
+struct wlfw_msa_ready_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[];
+
+struct wlfw_msa_ready_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[];
+
+struct wlfw_ini_req_msg_v01 {
+ u8 enablefwlog_valid;
+ u8 enablefwlog;
+};
+
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct qmi_elem_info wlfw_ini_req_msg_v01_ei[];
+
+struct wlfw_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_read_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+};
+
+#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
+extern struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[];
+
+struct wlfw_athdiag_read_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
+extern struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_write_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
+extern struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[];
+
+struct wlfw_athdiag_write_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
+
+struct wlfw_vbatt_req_msg_v01 {
+ u64 voltage_uv;
+};
+
+#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[];
+
+struct wlfw_vbatt_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[];
+
+struct wlfw_mac_addr_req_msg_v01 {
+ u8 mac_addr_valid;
+ u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
+};
+
+#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
+extern struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[];
+
+struct wlfw_mac_addr_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[];
+
+struct wlfw_host_cap_req_msg_v01 {
+ u8 daemon_support_valid;
+ u8 daemon_support;
+};
+
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
+
+struct wlfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[];
+
+struct wlfw_request_mem_ind_msg_v01 {
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+};
+
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 564
+extern struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[];
+
+struct wlfw_respond_mem_req_msg_v01 {
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+};
+
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 260
+extern struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[];
+
+struct wlfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[];
+
+struct wlfw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[];
+
+struct wlfw_fw_init_done_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ind_msg_v01 {
+ u8 cause_for_rejuvenation_valid;
+ u8 cause_for_rejuvenation;
+ u8 requesting_sub_system_valid;
+ u8 requesting_sub_system;
+ u8 line_number_valid;
+ u16 line_number;
+ u8 function_name_valid;
+ char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+};
+
+#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
+extern struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_req_msg_v01 {
+ u8 mask_valid;
+ u64 mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 prev_mask_valid;
+ u64 prev_mask;
+ u8 curr_mask_valid;
+ u64 curr_mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
+extern struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
+
+struct wlfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[];
+
+struct wlfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[];
+
+struct wlfw_xo_cal_ind_msg_v01 {
+ u8 xo_cal_data;
+};
+
+#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
+extern struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[];
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index ea4075d456fa..310674de3cb8 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -1277,4 +1277,19 @@ struct fw_rx_desc_base {
u8 info0;
} __packed;
+#define FW_RX_DESC_FLAGS_FIRST_MSDU (1 << 0)
+#define FW_RX_DESC_FLAGS_LAST_MSDU (1 << 1)
+#define FW_RX_DESC_C3_FAILED (1 << 2)
+#define FW_RX_DESC_C4_FAILED (1 << 3)
+#define FW_RX_DESC_IPV6 (1 << 4)
+#define FW_RX_DESC_TCP (1 << 5)
+#define FW_RX_DESC_UDP (1 << 6)
+
+struct fw_rx_desc_hl {
+ u8 info0;
+ u8 version;
+ u8 len;
+ u8 flags;
+} __packed;
+
#endif /* _RX_DESC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 7f61591ce0de..983ecfef1d28 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1941,7 +1941,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
struct ath10k_sdio *ar_sdio;
struct ath10k *ar;
enum ath10k_hw_rev hw_rev;
- u32 chip_id, dev_id_base;
+ u32 dev_id_base;
+ struct ath10k_bus_params bus_params;
int ret, i;
/* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
@@ -2035,9 +2036,10 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_free_wq;
}
+ bus_params.dev_type = ATH10K_DEV_TYPE_HL;
/* TODO: don't know yet how to get chip_id with SDIO */
- chip_id = 0;
- ret = ath10k_core_register(ar, chip_id);
+ bus_params.chip_id = 0;
+ ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_free_wq;
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index fa1843a7e0fd..8d3d9bca410f 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -62,10 +62,77 @@ static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
static const struct ath10k_snoc_drv_priv drv_priv = {
.hw_rev = ATH10K_HW_WCN3990,
.dma_mask = DMA_BIT_MASK(37),
+ .msa_size = 0x100000,
+};
+
+#define WCN3990_SRC_WR_IDX_OFFSET 0x3C
+#define WCN3990_DST_WR_IDX_OFFSET 0x40
+
+static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
+ {
+ .ce_id = __cpu_to_le16(0),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(3),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(4),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(5),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(7),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(1),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(2),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(7),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(8),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(9),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(10),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(11),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
};
static struct ce_attr host_ce_config_wlan[] = {
@@ -171,7 +238,129 @@ static struct ce_attr host_ce_config_wlan[] = {
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
- .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+ .recv_cb = ath10k_snoc_pktlog_rx_cb,
+ },
+};
+
+static struct ce_pipe_config target_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(64),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host HTT (HIF->HTT) */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(1024),
+ .nbytes_max = __cpu_to_le32(64),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(4),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 Target to uMC */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE10 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE11 target autonomous qcache memcpy */
+ {
+ .pipenum = __cpu_to_le32(11),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
},
};
@@ -436,6 +625,14 @@ static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
}
+/* Called by lower (CE) layer when data is received from the Target.
+ * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
+ */
+static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
{
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
@@ -616,7 +813,7 @@ static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
}
}
- if (WARN_ON(!ul_set || !dl_set))
+ if (!ul_set || !dl_set)
return -ENOENT;
return 0;
@@ -722,14 +919,15 @@ static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
static void ath10k_snoc_hif_stop(struct ath10k *ar)
{
ath10k_snoc_irq_disable(ar);
- ath10k_snoc_buffer_cleanup(ar);
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
+ ath10k_snoc_buffer_cleanup(ar);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
}
static int ath10k_snoc_hif_start(struct ath10k *ar)
{
+ napi_enable(&ar->napi);
ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
@@ -756,11 +954,47 @@ static int ath10k_snoc_init_pipes(struct ath10k *ar)
static int ath10k_snoc_wlan_enable(struct ath10k *ar)
{
- return 0;
+ struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
+ struct ath10k_qmi_wlan_enable_cfg cfg;
+ enum wlfw_driver_mode_enum_v01 mode;
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
+ tgt_cfg[pipe_num].pipe_num =
+ target_ce_config_wlan[pipe_num].pipenum;
+ tgt_cfg[pipe_num].pipe_dir =
+ target_ce_config_wlan[pipe_num].pipedir;
+ tgt_cfg[pipe_num].nentries =
+ target_ce_config_wlan[pipe_num].nentries;
+ tgt_cfg[pipe_num].nbytes_max =
+ target_ce_config_wlan[pipe_num].nbytes_max;
+ tgt_cfg[pipe_num].flags =
+ target_ce_config_wlan[pipe_num].flags;
+ tgt_cfg[pipe_num].reserved = 0;
+ }
+
+ cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
+ sizeof(struct ath10k_tgt_pipe_cfg);
+ cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
+ &tgt_cfg;
+ cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
+ sizeof(struct ath10k_svc_pipe_cfg);
+ cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
+ &target_service_to_ce_map_wlan;
+ cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
+ sizeof(struct ath10k_shadow_reg_cfg);
+ cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
+ &target_shadow_reg_cfg_map;
+
+ mode = QMI_WLFW_MISSION_V01;
+
+ return ath10k_qmi_wlan_enable(ar, &cfg, mode,
+ NULL);
}
static void ath10k_snoc_wlan_disable(struct ath10k *ar)
{
+ ath10k_qmi_wlan_disable(ar);
}
static void ath10k_snoc_hif_power_down(struct ath10k *ar)
@@ -792,7 +1026,6 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar)
goto err_wlan_enable;
}
- napi_enable(&ar->napi);
return 0;
err_wlan_enable:
@@ -948,6 +1181,32 @@ out:
return ret;
}
+int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_bus_params bus_params;
+ int ret;
+
+ switch (type) {
+ case ATH10K_QMI_EVENT_FW_READY_IND:
+ bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+ bus_params.chip_id = ar_snoc->target_info.soc_version;
+ ret = ath10k_core_register(ar, &bus_params);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n",
+ ret);
+ }
+ break;
+ case ATH10K_QMI_EVENT_FW_DOWN_IND:
+ break;
+ default:
+ ath10k_err(ar, "invalid fw indication: %llx\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ath10k_snoc_setup_resource(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
@@ -1272,6 +1531,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
struct ath10k_snoc *ar_snoc;
struct device *dev;
struct ath10k *ar;
+ u32 msa_size;
int ret;
u32 i;
@@ -1303,6 +1563,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ar_snoc->ar = ar;
ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
ar->ce_priv = &ar_snoc->ce;
+ msa_size = drv_data->msa_size;
ret = ath10k_snoc_resource_init(ar);
if (ret) {
@@ -1341,10 +1602,10 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_free_irq;
}
- ret = ath10k_core_register(ar, drv_data->hw_rev);
+ ret = ath10k_qmi_init(ar, msa_size);
if (ret) {
- ath10k_err(ar, "failed to register driver core: %d\n", ret);
- goto err_hw_power_off;
+ ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
+ goto err_core_destroy;
}
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
@@ -1352,9 +1613,6 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
return 0;
-err_hw_power_off:
- ath10k_hw_power_off(ar);
-
err_free_irq:
ath10k_snoc_free_irq(ar);
@@ -1376,6 +1634,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_hw_power_off(ar);
ath10k_snoc_free_irq(ar);
ath10k_snoc_release_resource(ar);
+ ath10k_qmi_deinit(ar);
ath10k_core_destroy(ar);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index f9e530189d48..e1d2d6675556 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -19,10 +19,12 @@
#include "hw.h"
#include "ce.h"
+#include "qmi.h"
struct ath10k_snoc_drv_priv {
enum ath10k_hw_rev hw_rev;
u64 dma_mask;
+ u32 msa_size;
};
struct snoc_state {
@@ -81,6 +83,7 @@ struct ath10k_snoc {
struct timer_list rx_post_retry;
struct ath10k_wcn3990_vreg_info *vreg;
struct ath10k_wcn3990_clk_info *clk;
+ struct ath10k_qmi *qmi;
};
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
@@ -90,5 +93,6 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value);
u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset);
+int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type);
#endif /* _SNOC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index c2b5bad0459b..b11a1c3d87b4 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -484,6 +484,10 @@ struct host_interest {
#define QCA99X0_BOARD_DATA_SZ 12288
#define QCA99X0_BOARD_EXT_DATA_SZ 0
+/* Dual band extended board data */
+#define QCA99X0_EXT_BOARD_DATA_SZ 2048
+#define EXT_BOARD_ADDRESS_OFFSET 0x3000
+
#define QCA4019_BOARD_DATA_SZ 12064
#define QCA4019_BOARD_EXT_DATA_SZ 0
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index cda164f6e9f6..23606b6972d0 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -95,7 +95,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
- dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ if (ar->dev_type != ATH10K_DEV_TYPE_HL)
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
ath10k_report_offchan_tx(htt->ar, msdu);
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index d4803ff5a78a..f731d35ee76d 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -983,7 +983,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
struct usb_device *dev = interface_to_usbdev(interface);
int ret, vendor_id, product_id;
enum ath10k_hw_rev hw_rev;
- u32 chip_id;
+ struct ath10k_bus_params bus_params;
/* Assumption: All USB based chipsets (so far) are QCA9377 based.
* If there will be newer chipsets that does not use the hw reg
@@ -1016,9 +1016,10 @@ static int ath10k_usb_probe(struct usb_interface *interface,
ar->id.vendor = vendor_id;
ar->id.device = product_id;
+ bus_params.dev_type = ATH10K_DEV_TYPE_HL;
/* TODO: don't know yet how to get chip_id with USB */
- chip_id = 0;
- ret = ath10k_core_register(ar, chip_id);
+ bus_params.chip_id = 0;
+ ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_warn(ar, "failed to register driver core: %d\n", ret);
goto err;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 7fd63bbf8e24..7978a7783f90 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -210,6 +210,9 @@ struct wmi_ops {
u32 fw_feature_bitmap);
int (*get_vdev_subtype)(struct ath10k *ar,
enum wmi_vdev_subtype subtype);
+ struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
+ u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan);
struct sk_buff *(*gen_pdev_bss_chan_info_req)
(struct ath10k *ar,
enum wmi_bss_survey_req_type type);
@@ -1361,6 +1364,24 @@ ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
}
static inline int
+ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_config_pno)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
enum wmi_tdls_state state)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index cdc1e64d52ad..bab8b2527fb8 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -19,7 +19,6 @@
#include "debug.h"
#include "mac.h"
#include "hw.h"
-#include "mac.h"
#include "wmi.h"
#include "wmi-ops.h"
#include "wmi-tlv.h"
@@ -1569,7 +1568,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
- cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
+ if (ar->hw_params.num_peers)
+ cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
+ else
+ cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
@@ -1582,7 +1584,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
}
cfg->num_peer_keys = __cpu_to_le32(2);
- cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
+ if (ar->hw_params.num_peers)
+ cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2);
+ else
+ cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
cfg->tx_chain_mask = __cpu_to_le32(0x7);
cfg->rx_chain_mask = __cpu_to_le32(0x7);
cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
@@ -3436,6 +3441,192 @@ ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
return skb;
}
+/* Request FW to start PNO operation */
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
+ u32 vdev_id,
+ struct wmi_pno_scan_req *pno)
+{
+ struct nlo_configured_parameters *nlo_list;
+ struct wmi_tlv_wow_nlo_config_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ __le32 *channel_list;
+ u16 tlv_len;
+ size_t len;
+ void *ptr;
+ u32 i;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_parameters(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count,
+ WMI_NLO_MAX_CHAN);
+ len += sizeof(struct nlo_configured_parameters) *
+ min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ /* wmi_tlv_wow_nlo_config_cmd parameters*/
+ cmd->vdev_id = __cpu_to_le32(pno->vdev_id);
+ cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time);
+ cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time);
+
+ if (pno->do_passive_scan)
+ cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
+
+ /* copy scan interval */
+ cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period);
+ cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period);
+ cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles);
+ cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time);
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_parameters(nlo_list) */
+ cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
+ WMI_NLO_MAX_SSIDS));
+ tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
+ sizeof(struct nlo_configured_parameters);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = ptr;
+ for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) -
+ sizeof(*tlv));
+
+ /* copy ssid and it's length */
+ nlo_list[i].ssid.valid = __cpu_to_le32(true);
+ nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
+
+ /* copy rssi threshold */
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = __cpu_to_le32(true);
+ nlo_list[i].rssi_cond.rssi =
+ __cpu_to_le32(pno->a_networks[i].rssi_threshold);
+ }
+
+ nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true);
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ __cpu_to_le32(pno->a_networks[i].bcast_nw_type);
+ }
+
+ ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters);
+
+ /* copy channel info */
+ cmd->num_of_channels = __cpu_to_le32(min_t(u8,
+ pno->a_networks[0].channel_count,
+ WMI_NLO_MAX_CHAN));
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) *
+ sizeof(u_int32_t));
+ ptr += sizeof(*tlv);
+
+ channel_list = (__le32 *)ptr;
+ for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++)
+ channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+/* Request FW to stop ongoing PNO operation */
+static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar,
+ u32 vdev_id)
+{
+ struct wmi_tlv_wow_nlo_config_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_parameters(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_parameters(nlo_list) */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* channel list */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(0);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan)
+{
+ if (pno_scan->enable)
+ return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id);
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
{
@@ -3968,6 +4159,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
+ .gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno,
.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 4f0c20c90642..92c25f51bf86 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -2146,6 +2146,260 @@ struct wmi_tlv_tdls_peer_event {
void ath10k_wmi_tlv_attach(struct ath10k *ar);
+enum wmi_nlo_auth_algorithm {
+ WMI_NLO_AUTH_ALGO_80211_OPEN = 1,
+ WMI_NLO_AUTH_ALGO_80211_SHARED_KEY = 2,
+ WMI_NLO_AUTH_ALGO_WPA = 3,
+ WMI_NLO_AUTH_ALGO_WPA_PSK = 4,
+ WMI_NLO_AUTH_ALGO_WPA_NONE = 5,
+ WMI_NLO_AUTH_ALGO_RSNA = 6,
+ WMI_NLO_AUTH_ALGO_RSNA_PSK = 7,
+};
+
+enum wmi_nlo_cipher_algorithm {
+ WMI_NLO_CIPHER_ALGO_NONE = 0x00,
+ WMI_NLO_CIPHER_ALGO_WEP40 = 0x01,
+ WMI_NLO_CIPHER_ALGO_TKIP = 0x02,
+ WMI_NLO_CIPHER_ALGO_CCMP = 0x04,
+ WMI_NLO_CIPHER_ALGO_WEP104 = 0x05,
+ WMI_NLO_CIPHER_ALGO_BIP = 0x06,
+ WMI_NLO_CIPHER_ALGO_RSN_USE_GROUP = 0x100,
+ WMI_NLO_CIPHER_ALGO_WEP = 0x101,
+};
+
+/* SSID broadcast type passed in NLO params */
+enum wmi_nlo_ssid_bcastnwtype {
+ WMI_NLO_BCAST_UNKNOWN = 0,
+ WMI_NLO_BCAST_NORMAL = 1,
+ WMI_NLO_BCAST_HIDDEN = 2,
+};
+
+#define WMI_NLO_MAX_SSIDS 16
+#define WMI_NLO_MAX_CHAN 48
+
+#define WMI_NLO_CONFIG_STOP (0x1 << 0)
+#define WMI_NLO_CONFIG_START (0x1 << 1)
+#define WMI_NLO_CONFIG_RESET (0x1 << 2)
+#define WMI_NLO_CONFIG_SLOW_SCAN (0x1 << 4)
+#define WMI_NLO_CONFIG_FAST_SCAN (0x1 << 5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN (0x1 << 6)
+
+/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
+ * Only one of them can be enabled at a given time
+ */
+#define WMI_NLO_CONFIG_ENLO (0x1 << 7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE (0x1 << 8)
+#define WMI_NLO_CONFIG_ENLO_RESET (0x1 << 9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ (0x1 << 10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ (0x1 << 11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ (0x1 << 12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG (0x1 << 13)
+
+/* Whether directed scan needs to be performed (for hidden SSIDs) */
+#define WMI_ENLO_FLAG_DIRECTED_SCAN 1
+
+/* Whether PNO event shall be triggered if the network is found on A band */
+#define WMI_ENLO_FLAG_A_BAND 2
+
+/* Whether PNO event shall be triggered if the network is found on G band */
+#define WMI_ENLO_FLAG_G_BAND 4
+
+/* Whether strict matching is required (i.e. firmware shall not
+ * match on the entire SSID)
+ */
+#define WMI_ENLO_FLAG_STRICT_MATCH 8
+
+/* Code for matching the beacon AUTH IE - additional codes TBD */
+/* open */
+#define WMI_ENLO_AUTH_CODE_OPEN 1
+
+/* WPA_PSK or WPA2PSK */
+#define WMI_ENLO_AUTH_CODE_PSK 2
+
+/* any EAPOL */
+#define WMI_ENLO_AUTH_CODE_EAPOL 4
+
+struct wmi_nlo_ssid_param {
+ __le32 valid;
+ struct wmi_ssid ssid;
+} __packed;
+
+struct wmi_nlo_enc_param {
+ __le32 valid;
+ __le32 enc_type;
+} __packed;
+
+struct wmi_nlo_auth_param {
+ __le32 valid;
+ __le32 auth_type;
+} __packed;
+
+struct wmi_nlo_bcast_nw_param {
+ __le32 valid;
+
+ /* If WMI_NLO_CONFIG_EPNO is not set. Supplicant PNO is enabled.
+ * The value should be true/false. Otherwise EPNO is enabled.
+ * bcast_nw_type would be used as a bit flag contains WMI_ENLO_FLAG_XXX
+ */
+ __le32 bcast_nw_type;
+} __packed;
+
+struct wmi_nlo_rssi_param {
+ __le32 valid;
+ __le32 rssi;
+} __packed;
+
+struct nlo_configured_parameters {
+ /* TLV tag and len;*/
+ __le32 tlv_header;
+ struct wmi_nlo_ssid_param ssid;
+ struct wmi_nlo_enc_param enc_type;
+ struct wmi_nlo_auth_param auth_type;
+ struct wmi_nlo_rssi_param rssi_cond;
+
+ /* indicates if the SSID is hidden or not */
+ struct wmi_nlo_bcast_nw_param bcast_nw_type;
+} __packed;
+
+/* Support channel prediction for PNO scan after scanning top_k_num channels
+ * if stationary_threshold is met.
+ */
+struct nlo_channel_prediction_cfg {
+ __le32 tlv_header;
+
+ /* Enable or disable this feature. */
+ __le32 enable;
+
+ /* Top K channels will be scanned before deciding whether to further scan
+ * or stop. Minimum value is 3 and maximum is 5.
+ */
+ __le32 top_k_num;
+
+ /* Preconfigured stationary threshold.
+ * Lesser value means more conservative. Bigger value means more aggressive.
+ * Maximum is 100 and mininum is 0.
+ */
+ __le32 stationary_threshold;
+
+ /* Periodic full channel scan in milliseconds unit.
+ * After full_scan_period_ms since last full scan, channel prediction
+ * scan is suppressed and will do full scan.
+ * This is to help detecting sudden AP power-on or -off. Value 0 means no
+ * full scan at all (not recommended).
+ */
+ __le32 full_scan_period_ms;
+} __packed;
+
+struct enlo_candidate_score_params_t {
+ __le32 tlv_header; /* TLV tag and len; */
+
+ /* minimum 5GHz RSSI for a BSSID to be considered (units = dBm) */
+ __le32 min_5ghz_rssi;
+
+ /* minimum 2.4GHz RSSI for a BSSID to be considered (units = dBm) */
+ __le32 min_24ghz_rssi;
+
+ /* the maximum score that a network can have before bonuses */
+ __le32 initial_score_max;
+
+ /* current_connection_bonus:
+ * only report when there is a network's score this much higher
+ * than the current connection
+ */
+ __le32 current_connection_bonus;
+
+ /* score bonus for all networks with the same network flag */
+ __le32 same_network_bonus;
+
+ /* score bonus for networks that are not open */
+ __le32 secure_bonus;
+
+ /* 5GHz RSSI score bonus (applied to all 5GHz networks) */
+ __le32 band_5ghz_bonus;
+} __packed;
+
+struct connected_nlo_bss_band_rssi_pref_t {
+ __le32 tlv_header; /* TLV tag and len;*/
+
+ /* band which needs to get preference over other band
+ * - see wmi_set_vdev_ie_band enum
+ */
+ __le32 band;
+
+ /* Amount of RSSI preference (in dB) that can be given to a band */
+ __le32 rssi_pref;
+} __packed;
+
+struct connected_nlo_rssi_params_t {
+ __le32 tlv_header; /* TLV tag and len;*/
+
+ /* Relative rssi threshold (in dB) by which new BSS should have
+ * better rssi than the current connected BSS.
+ */
+ __le32 relative_rssi;
+
+ /* The amount of rssi preference (in dB) that can be given
+ * to a 5G BSS over 2.4G BSS.
+ */
+ __le32 relative_rssi_5g_pref;
+} __packed;
+
+struct wmi_tlv_wow_nlo_config_cmd {
+ __le32 flags;
+ __le32 vdev_id;
+ __le32 fast_scan_max_cycles;
+ __le32 active_dwell_time;
+ __le32 passive_dwell_time; /* PDT in msecs */
+ __le32 probe_bundle_size;
+
+ /* ART = IRT */
+ __le32 rest_time;
+
+ /* Max value that can be reached after SBM */
+ __le32 max_rest_time;
+
+ /* SBM */
+ __le32 scan_backoff_multiplier;
+
+ /* SCBM */
+ __le32 fast_scan_period;
+
+ /* specific to windows */
+ __le32 slow_scan_period;
+
+ __le32 no_of_ssids;
+
+ __le32 num_of_channels;
+
+ /* NLO scan start delay time in milliseconds */
+ __le32 delay_start_time;
+
+ /** MAC Address to use in Probe Req as SA **/
+ struct wmi_mac_addr mac_addr;
+
+ /** Mask on which MAC has to be randomized **/
+ struct wmi_mac_addr mac_mask;
+
+ /** IE bitmap to use in Probe Req **/
+ __le32 ie_bitmap[8];
+
+ /** Number of vendor OUIs. In the TLV vendor_oui[] **/
+ __le32 num_vendor_oui;
+
+ /** Number of connected NLO band preferences **/
+ __le32 num_cnlo_band_pref;
+
+ /* The TLVs will follow.
+ * nlo_configured_parameters nlo_list[];
+ * A_UINT32 channel_list[num_of_channels];
+ * nlo_channel_prediction_cfg ch_prediction_cfg;
+ * enlo_candidate_score_params candidate_score_params;
+ * wmi_vendor_oui vendor_oui[num_vendor_oui];
+ * connected_nlo_rssi_params cnlo_rssi_params;
+ * connected_nlo_bss_band_rssi_pref cnlo_bss_band_rssi_pref[num_cnlo_band_pref];
+ */
+} __packed;
+
struct wmi_tlv_mgmt_tx_cmd {
__le32 vdev_id;
__le32 desc_id;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index fd612d2905b0..25e8fa789e8d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1307,7 +1307,8 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
- .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable =
+ WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
@@ -1869,6 +1870,12 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
if (ret)
dev_kfree_skb_any(skb);
+ if (ret == -EAGAIN) {
+ ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
+ cmd_id);
+ queue_work(ar->workqueue, &ar->restart_work);
+ }
+
return ret;
}
@@ -2336,7 +2343,12 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
dma_unmap_single(ar->dev, pkt_addr->paddr,
msdu->len, DMA_FROM_DEVICE);
info = IEEE80211_SKB_CB(msdu);
- info->flags |= status;
+
+ if (status)
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+ else
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
ieee80211_tx_status_irqsafe(ar->hw, msdu);
ret = 0;
@@ -2476,7 +2488,8 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->freq, status->band, status->signal,
status->rate_idx);
- ieee80211_rx(ar->hw, skb);
+ ieee80211_rx_ni(ar->hw, skb);
+
return 0;
}
@@ -3236,18 +3249,31 @@ void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_vdev_start_ev_arg arg = {};
int ret;
+ u32 status;
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
+ ar->last_wmi_vdev_start_status = 0;
+
ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
if (ret) {
ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
- return;
+ ar->last_wmi_vdev_start_status = ret;
+ goto out;
}
- if (WARN_ON(__le32_to_cpu(arg.status)))
- return;
+ status = __le32_to_cpu(arg.status);
+ if (WARN_ON_ONCE(status)) {
+ ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
+ status, (status == WMI_VDEV_START_CHAN_INVALID) ?
+ "chan-invalid" : "unknown");
+ /* Setup is done one way or another though, so we should still
+ * do the completion, so don't return here.
+ */
+ ar->last_wmi_vdev_start_status = -EINVAL;
+ }
+out:
complete(&ar->vdev_setup_done);
}
@@ -4774,6 +4800,13 @@ ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
}
}
+ if (pream == -1) {
+ ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n",
+ pream_idx, __le32_to_cpu(ev->chan_freq));
+ tpc = 0;
+ goto out;
+ }
+
if (pream == 4)
tpc = min_t(u8, ev->rates_array[rate_idx],
ev->max_reg_allow_pow[ch]);
@@ -5016,6 +5049,36 @@ ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
}
}
+static void
+ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_peer_sta_ps_state_chg_event *ev;
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+ u8 peer_addr[ETH_ALEN];
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
+ ether_addr_copy(peer_addr, ev->peer_macaddr.addr);
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL);
+
+ if (!sta) {
+ ath10k_warn(ar, "failed to find station entry %pM\n",
+ peer_addr);
+ goto exit;
+ }
+
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state);
+
+exit:
+ rcu_read_unlock();
+}
+
void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
@@ -5449,7 +5512,8 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
arg.mac_addr,
__le32_to_cpu(arg.status));
- ether_addr_copy(ar->mac_addr, arg.mac_addr);
+ if (is_zero_ether_addr(ar->mac_addr))
+ ether_addr_copy(ar->mac_addr, arg.mac_addr);
complete(&ar->wmi.unified_ready);
return 0;
}
@@ -5945,6 +6009,9 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_WMI,
"received event id %d not implemented\n", id);
break;
+ case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID:
+ ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@@ -6062,6 +6129,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
ath10k_wmi_event_dfs_status_check(ar, skb);
break;
+ case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID:
+ ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 36220258e3c7..f7badd079051 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -203,6 +203,8 @@ enum wmi_service {
WMI_SERVICE_TPC_STATS_FINAL,
WMI_SERVICE_RESET_CHIP,
WMI_SERVICE_SPOOF_MAC_SUPPORT,
+ WMI_SERVICE_TX_DATA_ACK_RSSI,
+ WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
/* keep last */
WMI_SERVICE_MAX,
@@ -350,6 +352,13 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT,
WMI_10_4_SERVICE_TPC_STATS_FINAL,
+ WMI_10_4_SERVICE_CFR_CAPTURE_SUPPORT,
+ WMI_10_4_SERVICE_TX_DATA_ACK_RSSI,
+ WMI_10_4_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_LEGACY,
+ WMI_10_4_SERVICE_PER_PACKET_SW_ENCRYPT,
+ WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT,
+ WMI_10_4_SERVICE_VDEV_BCN_RATE_CONTROL,
+ WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
};
static inline char *wmi_service_name(int service_id)
@@ -463,6 +472,8 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT);
SVCSTR(WMI_SERVICE_TPC_STATS_FINAL);
SVCSTR(WMI_SERVICE_RESET_CHIP);
+ SVCSTR(WMI_SERVICE_TX_DATA_ACK_RSSI);
+ SVCSTR(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT);
default:
return NULL;
}
@@ -771,6 +782,10 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, len);
SVCMAP(WMI_10_4_SERVICE_TPC_STATS_FINAL,
WMI_SERVICE_TPC_STATS_FINAL, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_DATA_ACK_RSSI,
+ WMI_SERVICE_TX_DATA_ACK_RSSI, len);
+ SVCMAP(WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+ WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, len);
}
#undef SVCMAP
@@ -2924,6 +2939,7 @@ enum wmi_coex_version {
* @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host
* enable/disable
* @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable
+ * @WMI_10_4_TX_DATA_ACK_RSSI: Enable DATA ACK RSSI if firmware is capable
*/
enum wmi_10_4_feature_mask {
WMI_10_4_LTEU_SUPPORT = BIT(0),
@@ -2939,6 +2955,7 @@ enum wmi_10_4_feature_mask {
WMI_10_4_TDLS_UAPSD_SLEEP_STA = BIT(10),
WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11),
WMI_10_4_TDLS_EXPLICIT_MODE_ONLY = BIT(12),
+ WMI_10_4_TX_DATA_ACK_RSSI = BIT(16),
};
@@ -4153,6 +4170,13 @@ enum wmi_tpc_pream_5ghz {
WMI_TPC_PREAM_5GHZ_HTCUP,
};
+#define WMI_PEER_PS_STATE_DISABLED 2
+
+struct wmi_peer_sta_ps_state_chg_event {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_ps_state;
+} __packed;
+
struct wmi_pdev_chanlist_update_event {
/* number of channels */
__le32 num_chan;
@@ -4958,10 +4982,15 @@ enum wmi_rate_preamble {
#define ATH10K_HW_GI(flags) (((flags) >> 5) & 0x1)
#define ATH10K_HW_RATECODE(rate, nss, preamble) \
(((preamble) << 6) | ((nss) << 4) | (rate))
+#define ATH10K_HW_AMPDU(flags) ((flags) & 0x1)
+#define ATH10K_HW_BA_FAIL(flags) (((flags) >> 1) & 0x3)
-#define VHT_MCS_NUM 10
-#define VHT_BW_NUM 4
-#define VHT_NSS_NUM 4
+#define ATH10K_VHT_MCS_NUM 10
+#define ATH10K_BW_NUM 4
+#define ATH10K_NSS_NUM 4
+#define ATH10K_LEGACY_NUM 12
+#define ATH10K_GI_NUM 2
+#define ATH10K_HT_MCS_NUM 32
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
@@ -6642,11 +6671,17 @@ struct wmi_ch_info_ev_arg {
__le32 rx_frame_count;
};
+/* From 10.4 firmware, not sure all have the same values. */
+enum wmi_vdev_start_status {
+ WMI_VDEV_START_OK = 0,
+ WMI_VDEV_START_CHAN_INVALID,
+};
+
struct wmi_vdev_start_ev_arg {
__le32 vdev_id;
__le32 req_id;
__le32 resp_type; /* %WMI_VDEV_RESP_ */
- __le32 status;
+ __le32 status; /* See wmi_vdev_start_status enum above */
};
struct wmi_peer_kick_ev_arg {
@@ -7033,6 +7068,63 @@ struct wmi_pdev_set_adaptive_cca_params {
__le32 cca_detect_margin;
} __packed;
+#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
+#define WMI_PNO_MAX_NETW_CHANNELS 26
+#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
+#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
+#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
+
+/*size based of dot11 declaration without extra IEs as we will not carry those for PNO*/
+#define WMI_PNO_MAX_PB_REQ_SIZE 450
+
+#define WMI_PNO_24G_DEFAULT_CH 1
+#define WMI_PNO_5G_DEFAULT_CH 36
+
+#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
+#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
+
+/* SSID broadcast type */
+enum wmi_SSID_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+};
+
+struct wmi_network_type {
+ struct wmi_ssid ssid;
+ u32 authentication;
+ u32 encryption;
+ u32 bcast_nw_type;
+ u8 channel_count;
+ u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
+ s32 rssi_threshold;
+} __packed;
+
+struct wmi_pno_scan_req {
+ u8 enable;
+ u8 vdev_id;
+ u8 uc_networks_count;
+ struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
+ u32 fast_scan_period;
+ u32 slow_scan_period;
+ u8 fast_scan_max_cycles;
+
+ bool do_passive_scan;
+
+ u32 delay_start_time;
+ u32 active_min_time;
+ u32 active_max_time;
+ u32 passive_min_time;
+ u32 passive_max_time;
+
+ /* mac address randomization attributes */
+ u32 enable_pno_scan_randomization;
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+} __packed;
+
enum wmi_host_platform_type {
WMI_HOST_PLATFORM_HIGH_PERF,
WMI_HOST_PLATFORM_LOW_PERF,
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index a6b179f88d36..51b26b305885 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -180,6 +180,100 @@ static void ath10k_wow_convert_8023_to_80211
}
}
+static int ath10k_wmi_pno_check(struct ath10k *ar, u32 vdev_id,
+ struct cfg80211_sched_scan_request *nd_config,
+ struct wmi_pno_scan_req *pno)
+{
+ int i, j, ret = 0;
+ u8 ssid_len;
+
+ pno->enable = 1;
+ pno->vdev_id = vdev_id;
+ pno->uc_networks_count = nd_config->n_match_sets;
+
+ if (!pno->uc_networks_count ||
+ pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
+ return -EINVAL;
+
+ if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
+ return -EINVAL;
+
+ /* Filling per profile params */
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ ssid_len = nd_config->match_sets[i].ssid.ssid_len;
+
+ if (ssid_len == 0 || ssid_len > 32)
+ return -EINVAL;
+
+ pno->a_networks[i].ssid.ssid_len = __cpu_to_le32(ssid_len);
+
+ memcpy(pno->a_networks[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid_len);
+ pno->a_networks[i].authentication = 0;
+ pno->a_networks[i].encryption = 0;
+ pno->a_networks[i].bcast_nw_type = 0;
+
+ /*Copying list of valid channel into request */
+ pno->a_networks[i].channel_count = nd_config->n_channels;
+ pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
+
+ for (j = 0; j < nd_config->n_channels; j++) {
+ pno->a_networks[i].channels[j] =
+ nd_config->channels[j]->center_freq;
+ }
+ }
+
+ /* set scan to passive if no SSIDs are specified in the request */
+ if (nd_config->n_ssids == 0)
+ pno->do_passive_scan = true;
+ else
+ pno->do_passive_scan = false;
+
+ for (i = 0; i < nd_config->n_ssids; i++) {
+ j = 0;
+ while (j < pno->uc_networks_count) {
+ if (__le32_to_cpu(pno->a_networks[j].ssid.ssid_len) ==
+ nd_config->ssids[i].ssid_len &&
+ (memcmp(pno->a_networks[j].ssid.ssid,
+ nd_config->ssids[i].ssid,
+ __le32_to_cpu(pno->a_networks[j].ssid.ssid_len)) == 0)) {
+ pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
+ break;
+ }
+ j++;
+ }
+ }
+
+ if (nd_config->n_scan_plans == 2) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
+ pno->slow_scan_period =
+ nd_config->scan_plans[1].interval * MSEC_PER_SEC;
+ } else if (nd_config->n_scan_plans == 1) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = 1;
+ pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ } else {
+ ath10k_warn(ar, "Invalid number of scan plans %d !!",
+ nd_config->n_scan_plans);
+ }
+
+ if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ /* enable mac randomization */
+ pno->enable_pno_scan_randomization = 1;
+ memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
+ memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
+ }
+
+ pno->delay_start_time = nd_config->delay;
+
+ /* Current FW does not support min-max range for dwell time */
+ pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
+ pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
+ return ret;
+}
+
static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
struct cfg80211_wowlan *wowlan)
{
@@ -213,6 +307,26 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
if (wowlan->magic_pkt)
__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+
+ if (wowlan->nd_config) {
+ struct wmi_pno_scan_req *pno;
+ int ret;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ ar->nlo_enabled = true;
+
+ ret = ath10k_wmi_pno_check(ar, arvif->vdev_id,
+ wowlan->nd_config, pno);
+ if (!ret) {
+ ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
+ }
+
+ kfree(pno);
+ }
break;
default:
break;
@@ -299,6 +413,51 @@ static int ath10k_wow_set_wakeups(struct ath10k *ar,
return 0;
}
+static int ath10k_vif_wow_clean_nlo(struct ath10k_vif *arvif)
+{
+ int ret = 0;
+ struct ath10k *ar = arvif->ar;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ if (ar->nlo_enabled) {
+ struct wmi_pno_scan_req *pno;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ pno->enable = 0;
+ ar->nlo_enabled = false;
+ ret = ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int ath10k_wow_nlo_cleanup(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_vif_wow_clean_nlo(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to clean nlo settings on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int ath10k_wow_enable(struct ath10k *ar)
{
int ret;
@@ -374,6 +533,8 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
goto cleanup;
}
+ ath10k_mac_wait_tx_complete(ar);
+
ret = ath10k_wow_enable(ar);
if (ret) {
ath10k_warn(ar, "failed to start wow: %d\n", ret);
@@ -434,6 +595,10 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
if (ret)
ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
+ ret = ath10k_wow_nlo_cleanup(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to cleanup nlo: %d\n", ret);
+
exit:
if (ret) {
switch (ar->state) {
@@ -473,6 +638,11 @@ int ath10k_wow_init(struct ath10k *ar)
ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
}
+ if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
+ ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ }
+
ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index e01faf641288..94f70047d3fc 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -1028,8 +1028,6 @@ ath5k_debug_dump_bands(struct ath5k_hw *ah)
if (likely(!(ah->debug.level & ATH5K_DEBUG_DUMPBANDS)))
return;
- BUG_ON(!ah->sbands);
-
for (b = 0; b < NUM_NL80211_BANDS; b++) {
struct ieee80211_supported_band *band = &ah->sbands[b];
char bname[6];
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 58fb227a849f..54132af70094 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -710,8 +710,8 @@ static bool check_device_tree(struct ath6kl *ar)
for_each_compatible_node(node, NULL, "atheros,ath6kl") {
board_id = of_get_property(node, board_id_prop, NULL);
if (board_id == NULL) {
- ath6kl_warn("No \"%s\" property on %s node.\n",
- board_id_prop, node->name);
+ ath6kl_warn("No \"%s\" property on %pOFn node.\n",
+ board_id_prop, node);
continue;
}
snprintf(board_filename, sizeof(board_filename),
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 0c61dbaa62a4..cb59016c723b 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -638,7 +638,7 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
memcpy(vif->bssid, bssid, sizeof(vif->bssid));
vif->bss_ch = channel;
- if ((vif->nw_type == INFRA_NETWORK)) {
+ if (vif->nw_type == INFRA_NETWORK) {
ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
vif->listen_intvl_t, 0);
ath6kl_check_ch_switch(ar, channel);
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index a3668433dc02..988222cea9df 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -755,11 +755,11 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
}
if (main_ant_conf == rx_ant_conf) {
- ANT_STAT_INC(ANT_MAIN, recv_cnt);
- ANT_LNA_INC(ANT_MAIN, rx_ant_conf);
+ ANT_STAT_INC(sc, ANT_MAIN, recv_cnt);
+ ANT_LNA_INC(sc, ANT_MAIN, rx_ant_conf);
} else {
- ANT_STAT_INC(ANT_ALT, recv_cnt);
- ANT_LNA_INC(ANT_ALT, rx_ant_conf);
+ ANT_STAT_INC(sc, ANT_ALT, recv_cnt);
+ ANT_LNA_INC(sc, ANT_ALT, rx_ant_conf);
}
/* Short scan check */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index ef2dd68d3f77..11d6f975c87d 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -37,10 +37,6 @@
#define AR5008_11NG_HT_SS_SHIFT 12
#define AR5008_11NG_HT_DS_SHIFT 20
-static const int firstep_table[] =
-/* level: 0 1 2 3 4 5 6 7 8 */
- { -4, -2, 0, 2, 4, 6, 8, 10, 12 }; /* lvl 0-8, default 2 */
-
/*
* register values to turn OFDM weak signal detection OFF
*/
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.c b/drivers/net/wireless/ath/ath9k/common-debug.c
index 239429f10378..53ca4b063eb9 100644
--- a/drivers/net/wireless/ath/ath9k/common-debug.c
+++ b/drivers/net/wireless/ath/ath9k/common-debug.c
@@ -144,6 +144,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
RXS_ERR("BEACONS", rx_beacons);
RXS_ERR("FRAGS", rx_frags);
RXS_ERR("SPECTRAL", rx_spectral);
+ RXS_ERR("SPECTRAL SMPL GOOD", rx_spectral_sample_good);
+ RXS_ERR("SPECTRAL SMPL ERR", rx_spectral_sample_err);
RXS_ERR("CRC ERR", crc_err);
RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.h b/drivers/net/wireless/ath/ath9k/common-debug.h
index 3376990d3a24..2938b5b96b07 100644
--- a/drivers/net/wireless/ath/ath9k/common-debug.h
+++ b/drivers/net/wireless/ath/ath9k/common-debug.h
@@ -39,6 +39,8 @@
* @rx_beacons: No. of beacons received.
* @rx_frags: No. of rx-fragements received.
* @rx_spectral: No of spectral packets received.
+ * @rx_spectral_sample_good: No. of good spectral samples
+ * @rx_spectral_sample_err: No. of good spectral samples
*/
struct ath_rx_stats {
u32 rx_pkts_all;
@@ -58,6 +60,8 @@ struct ath_rx_stats {
u32 rx_beacons;
u32 rx_frags;
u32 rx_spectral;
+ u32 rx_spectral_sample_good;
+ u32 rx_spectral_sample_err;
};
#ifdef CONFIG_ATH9K_COMMON_DEBUG
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 440e16e641e4..6aa3ec024ffa 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -59,8 +59,7 @@ ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
sample = sample_end - SPECTRAL_HT20_SAMPLE_LEN + 1;
- max_index = spectral_max_index(mag_info->all_bins,
- SPECTRAL_HT20_NUM_BINS);
+ max_index = spectral_max_index_ht20(mag_info->all_bins);
max_magnitude = spectral_max_magnitude(mag_info->all_bins);
max_exp = mag_info->max_exp & 0xf;
@@ -72,7 +71,7 @@ ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN && max_index < 1)
return -1;
- if (sample[max_index] != (max_magnitude >> max_exp))
+ if ((sample[max_index] & 0xf8) != ((max_magnitude >> max_exp) & 0xf8))
return -1;
else
return 0;
@@ -100,12 +99,10 @@ ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
sample = sample_end - SPECTRAL_HT20_40_SAMPLE_LEN + 1;
lower_mag = spectral_max_magnitude(mag_info->lower_bins);
- lower_max_index = spectral_max_index(mag_info->lower_bins,
- SPECTRAL_HT20_40_NUM_BINS);
+ lower_max_index = spectral_max_index_ht40(mag_info->lower_bins);
upper_mag = spectral_max_magnitude(mag_info->upper_bins);
- upper_max_index = spectral_max_index(mag_info->upper_bins,
- SPECTRAL_HT20_40_NUM_BINS);
+ upper_max_index = spectral_max_index_ht40(mag_info->upper_bins);
max_exp = mag_info->max_exp & 0xf;
@@ -117,19 +114,10 @@ ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
((upper_max_index < 1) || (lower_max_index < 1)))
return -1;
- /* Some time hardware messes up the index and adds
- * the index of the middle point (dc_pos). Try to fix it.
- */
- if ((upper_max_index - dc_pos > 0) &&
- (sample[upper_max_index] == (upper_mag >> max_exp)))
- upper_max_index -= dc_pos;
-
- if ((lower_max_index - dc_pos > 0) &&
- (sample[lower_max_index - dc_pos] == (lower_mag >> max_exp)))
- lower_max_index -= dc_pos;
-
- if ((sample[upper_max_index + dc_pos] != (upper_mag >> max_exp)) ||
- (sample[lower_max_index] != (lower_mag >> max_exp)))
+ if (((sample[upper_max_index + dc_pos] & 0xf8) !=
+ ((upper_mag >> max_exp) & 0xf8)) ||
+ ((sample[lower_max_index] & 0xf8) !=
+ ((lower_mag >> max_exp) & 0xf8)))
return -1;
else
return 0;
@@ -169,8 +157,7 @@ ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
magnitude = spectral_max_magnitude(mag_info->all_bins);
fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
- max_index = spectral_max_index(mag_info->all_bins,
- SPECTRAL_HT20_NUM_BINS);
+ max_index = spectral_max_index_ht20(mag_info->all_bins);
fft_sample_20.max_index = max_index;
bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
@@ -188,7 +175,8 @@ ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
magnitude >> max_exp,
max_index);
- if (fft_sample_20.data[max_index] != (magnitude >> max_exp)) {
+ if ((fft_sample_20.data[max_index] & 0xf8) !=
+ ((magnitude >> max_exp) & 0xf8)) {
ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
ret = -1;
}
@@ -302,12 +290,10 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
upper_mag = spectral_max_magnitude(mag_info->upper_bins);
fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
- lower_max_index = spectral_max_index(mag_info->lower_bins,
- SPECTRAL_HT20_40_NUM_BINS);
+ lower_max_index = spectral_max_index_ht40(mag_info->lower_bins);
fft_sample_40.lower_max_index = lower_max_index;
- upper_max_index = spectral_max_index(mag_info->upper_bins,
- SPECTRAL_HT20_40_NUM_BINS);
+ upper_max_index = spectral_max_index_ht40(mag_info->upper_bins);
fft_sample_40.upper_max_index = upper_max_index;
lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
@@ -331,29 +317,13 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
upper_mag >> max_exp,
upper_max_index);
- /* Some time hardware messes up the index and adds
- * the index of the middle point (dc_pos). Try to fix it.
- */
- if ((upper_max_index - dc_pos > 0) &&
- (fft_sample_40.data[upper_max_index] == (upper_mag >> max_exp))) {
- upper_max_index -= dc_pos;
- fft_sample_40.upper_max_index = upper_max_index;
- }
-
- if ((lower_max_index - dc_pos > 0) &&
- (fft_sample_40.data[lower_max_index - dc_pos] ==
- (lower_mag >> max_exp))) {
- lower_max_index -= dc_pos;
- fft_sample_40.lower_max_index = lower_max_index;
- }
-
/* Check if we got the expected magnitude values at
* the expected bins
*/
- if ((fft_sample_40.data[upper_max_index + dc_pos]
- != (upper_mag >> max_exp)) ||
- (fft_sample_40.data[lower_max_index]
- != (lower_mag >> max_exp))) {
+ if (((fft_sample_40.data[upper_max_index + dc_pos] & 0xf8)
+ != ((upper_mag >> max_exp) & 0xf8)) ||
+ ((fft_sample_40.data[lower_max_index] & 0xf8)
+ != ((lower_mag >> max_exp) & 0xf8))) {
ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
ret = -1;
}
@@ -411,7 +381,7 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
ath_dbg(common, SPECTRAL_SCAN,
"Calculated new upper max 0x%X at %i\n",
- tmp_mag, i);
+ tmp_mag, fft_sample_40.upper_max_index);
} else
for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
if (fft_sample_40.data[i] == (upper_mag >> max_exp))
@@ -501,6 +471,7 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
u8 sample_buf[SPECTRAL_SAMPLE_MAX_LEN] = {0};
struct ath_hw *ah = spec_priv->ah;
struct ath_common *common = ath9k_hw_common(spec_priv->ah);
+ struct ath_softc *sc = (struct ath_softc *)common->priv;
u8 num_bins, *vdata = (u8 *)hdr;
struct ath_radar_info *radar_info;
int len = rs->rs_datalen;
@@ -649,8 +620,13 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
sample_buf, sample_len,
sample_bytes);
- fft_handler(rs, spec_priv, sample_buf,
- tsf, freq, chan_type);
+ ret = fft_handler(rs, spec_priv, sample_buf,
+ tsf, freq, chan_type);
+
+ if (ret == 0)
+ RX_STAT_INC(sc, rx_spectral_sample_good);
+ else
+ RX_STAT_INC(sc, rx_spectral_sample_err);
memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
@@ -665,6 +641,11 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
ret = fft_handler(rs, spec_priv, sample_start,
tsf, freq, chan_type);
+ if (ret == 0)
+ RX_STAT_INC(sc, rx_spectral_sample_good);
+ else
+ RX_STAT_INC(sc, rx_spectral_sample_err);
+
/* Mix the received bins to the /dev/random
* pool
*/
@@ -675,7 +656,7 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
* loop.
*/
if (len <= fft_len + 2)
- break;
+ return 1;
sample_start = &vdata[i + 1];
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h
index 303ab470ce34..011d8ab8b974 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.h
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.h
@@ -145,6 +145,23 @@ static inline u8 spectral_max_index(u8 *bins, int num_bins)
return m;
}
+static inline u8 spectral_max_index_ht40(u8 *bins)
+{
+ u8 idx;
+
+ idx = spectral_max_index(bins, SPECTRAL_HT20_40_NUM_BINS);
+
+ /* positive values and zero are starting at the beginning
+ * of the data field.
+ */
+ return idx % (SPECTRAL_HT20_40_NUM_BINS / 2);
+}
+
+static inline u8 spectral_max_index_ht20(u8 *bins)
+{
+ return spectral_max_index(bins, SPECTRAL_HT20_NUM_BINS);
+}
+
/* return the bitmap weight from the all/upper/lower bins */
static inline u8 spectral_bitmap_weight(u8 *bins)
{
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 0a6eb8a8c1ed..4399e9ad058f 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -785,35 +785,35 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
{
int qnum = txq->axq_qnum;
- TX_STAT_INC(qnum, tx_pkts_all);
+ TX_STAT_INC(sc, qnum, tx_pkts_all);
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) {
if (flags & ATH_TX_ERROR)
- TX_STAT_INC(qnum, a_xretries);
+ TX_STAT_INC(sc, qnum, a_xretries);
else
- TX_STAT_INC(qnum, a_completed);
+ TX_STAT_INC(sc, qnum, a_completed);
} else {
if (ts->ts_status & ATH9K_TXERR_XRETRY)
- TX_STAT_INC(qnum, xretries);
+ TX_STAT_INC(sc, qnum, xretries);
else
- TX_STAT_INC(qnum, completed);
+ TX_STAT_INC(sc, qnum, completed);
}
if (ts->ts_status & ATH9K_TXERR_FILT)
- TX_STAT_INC(qnum, txerr_filtered);
+ TX_STAT_INC(sc, qnum, txerr_filtered);
if (ts->ts_status & ATH9K_TXERR_FIFO)
- TX_STAT_INC(qnum, fifo_underrun);
+ TX_STAT_INC(sc, qnum, fifo_underrun);
if (ts->ts_status & ATH9K_TXERR_XTXOP)
- TX_STAT_INC(qnum, xtxop);
+ TX_STAT_INC(sc, qnum, xtxop);
if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
- TX_STAT_INC(qnum, timer_exp);
+ TX_STAT_INC(sc, qnum, timer_exp);
if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
- TX_STAT_INC(qnum, desc_cfg_err);
+ TX_STAT_INC(sc, qnum, desc_cfg_err);
if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
- TX_STAT_INC(qnum, data_underrun);
+ TX_STAT_INC(sc, qnum, data_underrun);
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
- TX_STAT_INC(qnum, delim_underrun);
+ TX_STAT_INC(sc, qnum, delim_underrun);
}
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
@@ -990,19 +990,6 @@ static int read_file_dump_nfcal(struct seq_file *file, void *data)
return 0;
}
-static int open_file_dump_nfcal(struct inode *inode, struct file *f)
-{
- return single_open(f, read_file_dump_nfcal, inode->i_private);
-}
-
-static const struct file_operations fops_dump_nfcal = {
- .read = seq_read,
- .open = open_file_dump_nfcal,
- .owner = THIS_MODULE,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 249f8141cd00..79607db14387 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -25,17 +25,17 @@ struct ath_buf;
struct fft_sample_tlv;
#ifdef CONFIG_ATH9K_DEBUGFS
-#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
-#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
-#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
-#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++
-#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++;
+#define TX_STAT_INC(sc, q, c) do { (sc)->debug.stats.txstats[q].c++; } while (0)
+#define RX_STAT_INC(sc, c) do { (sc)->debug.stats.rxstats.c++; } while (0)
+#define RESET_STAT_INC(sc, type) do { (sc)->debug.stats.reset[type]++; } while (0)
+#define ANT_STAT_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].c++; } while (0)
+#define ANT_LNA_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].lna_recv_cnt[c]++; } while (0)
#else
-#define TX_STAT_INC(q, c) do { } while (0)
-#define RX_STAT_INC(c)
-#define RESET_STAT_INC(sc, type) do { } while (0)
-#define ANT_STAT_INC(i, c) do { } while (0)
-#define ANT_LNA_INC(i, c) do { } while (0)
+#define TX_STAT_INC(sc, q, c) do { (void)(sc); } while (0)
+#define RX_STAT_INC(sc, c) do { (void)(sc); } while (0)
+#define RESET_STAT_INC(sc, type) do { (void)(sc); } while (0)
+#define ANT_STAT_INC(sc, i, c) do { (void)(sc); } while (0)
+#define ANT_LNA_INC(sc, i, c) do { (void)(sc); } while (0)
#endif
enum ath_reset_type {
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index a6f45f1bb5bb..e8fcd3e1c470 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -116,7 +116,7 @@ void ath_debug_rate_stats(struct ath_softc *sc,
if (rxs->rate_idx >= ARRAY_SIZE(rstats->ht_stats))
goto exit;
- if ((rxs->bw == RATE_INFO_BW_40))
+ if (rxs->bw == RATE_INFO_BW_40)
rstats->ht_stats[rxs->rate_idx].ht40_cnt++;
else
rstats->ht_stats[rxs->rate_idx].ht20_cnt++;
@@ -286,9 +286,25 @@ static ssize_t read_airtime(struct file *file, char __user *user_buf,
return retval;
}
+static ssize_t
+write_airtime_reset_stub(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_node *an = file->private_data;
+ struct ath_airtime_stats *astats;
+ int i;
+
+ astats = &an->airtime_stats;
+ astats->rx_airtime = 0;
+ astats->tx_airtime = 0;
+ for (i = 0; i < 4; i++)
+ an->airtime_deficit[i] = ATH_AIRTIME_QUANTUM;
+ return count;
+}
static const struct file_operations fops_airtime = {
.read = read_airtime,
+ .write = write_airtime_reset_stub,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
@@ -304,5 +320,5 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
debugfs_create_file("node_aggr", 0444, dir, an, &fops_node_aggr);
debugfs_create_file("node_recv", 0444, dir, an, &fops_node_recv);
- debugfs_create_file("airtime", 0444, dir, an, &fops_airtime);
+ debugfs_create_file("airtime", 0644, dir, an, &fops_airtime);
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 1049773378f2..1e3b5f4a4cf9 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -809,7 +809,7 @@ static void ath9k_tx(struct ieee80211_hw *hw,
if (ath_tx_start(hw, skb, &txctl) != 0) {
ath_dbg(common, XMIT, "TX failed\n");
- TX_STAT_INC(txctl.txq->axq_qnum, txfailed);
+ TX_STAT_INC(sc, txctl.txq->axq_qnum, txfailed);
goto exit;
}
@@ -1251,8 +1251,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ath_vif *avp = (void *)vif->drv_priv;
struct ath_node *an = &avp->mcast_node;
- mutex_lock(&sc->mutex);
-
if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
if (sc->cur_chan->nvifs >= 1) {
mutex_unlock(&sc->mutex);
@@ -1261,6 +1259,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
sc->tx99_vif = vif;
}
+ mutex_lock(&sc->mutex);
+
ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->cur_chan->nvifs++;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a8ac42c96d71..30d1bd832d90 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -829,7 +829,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* Discard zero-length packets and packets smaller than an ACK
*/
if (rx_stats->rs_datalen < 10) {
- RX_STAT_INC(rx_len_err);
+ RX_STAT_INC(sc, rx_len_err);
goto corrupt;
}
@@ -839,7 +839,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* those frames.
*/
if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
- RX_STAT_INC(rx_len_err);
+ RX_STAT_INC(sc, rx_len_err);
goto corrupt;
}
@@ -880,7 +880,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
} else if (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED &&
ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats,
rx_status->mactime)) {
- RX_STAT_INC(rx_spectral);
+ RX_STAT_INC(sc, rx_spectral);
}
return -EINVAL;
}
@@ -898,7 +898,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
spin_unlock_bh(&sc->chan_lock);
if (ath_is_mybeacon(common, hdr)) {
- RX_STAT_INC(rx_beacons);
+ RX_STAT_INC(sc, rx_beacons);
rx_stats->is_mybeacon = true;
}
@@ -915,7 +915,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
*/
ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
rx_stats->rs_rate);
- RX_STAT_INC(rx_rate_err);
+ RX_STAT_INC(sc, rx_rate_err);
return -EINVAL;
}
@@ -1136,7 +1136,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
* skb and put it at the tail of the sc->rx.rxbuf list for
* processing. */
if (!requeue_skb) {
- RX_STAT_INC(rx_oom_err);
+ RX_STAT_INC(sc, rx_oom_err);
goto requeue_drop_frag;
}
@@ -1164,7 +1164,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
rxs, decrypt_error);
if (rs.rs_more) {
- RX_STAT_INC(rx_frags);
+ RX_STAT_INC(sc, rx_frags);
/*
* rs_more indicates chained descriptors which can be
* used to link buffers together for a sort of
@@ -1174,7 +1174,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
/* too many fragments - cannot handle frame */
dev_kfree_skb_any(sc->rx.frag);
dev_kfree_skb_any(skb);
- RX_STAT_INC(rx_too_many_frags_err);
+ RX_STAT_INC(sc, rx_too_many_frags_err);
skb = NULL;
}
sc->rx.frag = skb;
@@ -1186,7 +1186,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
dev_kfree_skb(skb);
- RX_STAT_INC(rx_oom_err);
+ RX_STAT_INC(sc, rx_oom_err);
goto requeue_drop_frag;
}
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index ce50d8f5835e..95544ce05acf 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -56,11 +56,6 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
struct sk_buff *skb;
struct ath_vif *avp;
- if (!sc->tx99_vif)
- return NULL;
-
- avp = (struct ath_vif *)sc->tx99_vif->drv_priv;
-
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return NULL;
@@ -77,7 +72,10 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
- hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
+ if (sc->tx99_vif) {
+ avp = (struct ath_vif *) sc->tx99_vif->drv_priv;
+ hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
+ }
tx_info = IEEE80211_SKB_CB(skb);
memset(tx_info, 0, sizeof(*tx_info));
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 43b6c8508e49..25b3fc82d4ac 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -391,7 +391,7 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
struct ieee80211_hdr *hdr;
int prev = fi->retries;
- TX_STAT_INC(txq->axq_qnum, a_retries);
+ TX_STAT_INC(sc, txq->axq_qnum, a_retries);
fi->retries += count;
if (prev > 0)
@@ -1105,7 +1105,7 @@ finish:
al = get_frame_info(bf->bf_mpdu)->framelen;
bf->bf_state.bf_type = BUF_AMPDU;
} else {
- TX_STAT_INC(txq->axq_qnum, a_aggr);
+ TX_STAT_INC(sc, txq->axq_qnum, a_aggr);
}
return al;
@@ -1727,7 +1727,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
bf_tail = bf;
nframes--;
sent++;
- TX_STAT_INC(txq->axq_qnum, a_queued_hw);
+ TX_STAT_INC(sc, txq->axq_qnum, a_queued_hw);
if (an->sta && skb_queue_empty(&tid->retry_q))
ieee80211_sta_set_buffered(an->sta, i, false);
@@ -2110,14 +2110,14 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
}
if (puttxbuf) {
- TX_STAT_INC(txq->axq_qnum, puttxbuf);
+ TX_STAT_INC(sc, txq->axq_qnum, puttxbuf);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
}
if (!edma || sc->tx99_state) {
- TX_STAT_INC(txq->axq_qnum, txstart);
+ TX_STAT_INC(sc, txq->axq_qnum, txstart);
ath9k_hw_txstart(ah, txq->axq_qnum);
}
@@ -2154,7 +2154,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_lastbf = bf;
ath_tx_fill_desc(sc, bf, txq, fi->framelen);
ath_tx_txqaddbuf(sc, txq, &bf_head, false);
- TX_STAT_INC(txq->axq_qnum, queued);
+ TX_STAT_INC(sc, txq->axq_qnum, queued);
}
static void setup_frame_info(struct ieee80211_hw *hw,
@@ -2486,7 +2486,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ath_txq_lock(sc, txctl.txq);
ath_tx_fill_desc(sc, bf, txctl.txq, 0);
ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
- TX_STAT_INC(txctl.txq->axq_qnum, queued);
+ TX_STAT_INC(sc, txctl.txq->axq_qnum, queued);
ath_txq_unlock(sc, txctl.txq);
}
@@ -2699,7 +2699,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
if (status == -EINPROGRESS)
break;
- TX_STAT_INC(txq->axq_qnum, txprocdesc);
+ TX_STAT_INC(sc, txq->axq_qnum, txprocdesc);
/*
* Remove ath_buf's of the same transmit unit from txq,
@@ -2778,7 +2778,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_txq_lock(sc, txq);
- TX_STAT_INC(txq->axq_qnum, txprocdesc);
+ TX_STAT_INC(sc, txq->axq_qnum, txprocdesc);
fifo_list = &txq->txq_fifo[txq->txq_tailidx];
if (list_empty(fifo_list)) {
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 0cb5b58925dc..8c75651ede6c 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -246,8 +246,8 @@ static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
* of available memory blocks, so the number can
* never execeed the mem_blocks count.
*/
- if (unlikely(WARN_ON_ONCE(cookie == 0) ||
- WARN_ON_ONCE(cookie > ar->fw.mem_blocks)))
+ if (WARN_ON_ONCE(cookie == 0) ||
+ WARN_ON_ONCE(cookie > ar->fw.mem_blocks))
return;
atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 06cfe8d311f3..5ab3e31c9ffa 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -174,13 +174,12 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn
int i;
size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
- wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
- GFP_KERNEL);
+ wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size,
+ &wcn_ch->dma_addr,
+ GFP_KERNEL);
if (!wcn_ch->cpu_addr)
return -ENOMEM;
- memset(wcn_ch->cpu_addr, 0, size);
-
cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
cur_ctl = wcn_ch->head_blk_ctl;
@@ -628,13 +627,13 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
- cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
- GFP_KERNEL);
+ cpu_addr = dma_zalloc_coherent(wcn->dev, s,
+ &wcn->mgmt_mem_pool.phy_addr,
+ GFP_KERNEL);
if (!cpu_addr)
goto out_err;
wcn->mgmt_mem_pool.virt_addr = cpu_addr;
- memset(cpu_addr, 0, s);
/* Allocate BD headers for DATA frames */
@@ -643,13 +642,13 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
- cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
- GFP_KERNEL);
+ cpu_addr = dma_zalloc_coherent(wcn->dev, s,
+ &wcn->data_mem_pool.phy_addr,
+ GFP_KERNEL);
if (!cpu_addr)
goto out_err;
wcn->data_mem_pool.virt_addr = cpu_addr;
- memset(cpu_addr, 0, s);
return 0;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 00098f24116d..1d2d698fb779 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -792,10 +792,10 @@ static int wcn36xx_smd_process_ptt_msg_rsp(void *buf, size_t len,
rsp->header.len - sizeof(rsp->ptt_msg_resp_status));
if (rsp->header.len > 0) {
- *p_ptt_rsp_msg = kmalloc(rsp->header.len, GFP_ATOMIC);
+ *p_ptt_rsp_msg = kmemdup(rsp->ptt_msg, rsp->header.len,
+ GFP_ATOMIC);
if (!*p_ptt_rsp_msg)
return -ENOMEM;
- memcpy(*p_ptt_rsp_msg, rsp->ptt_msg, rsp->header.len);
}
return ret;
}
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index f79c337105cb..d18e81fae5f1 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -48,9 +48,29 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
CHAN60G(1, 0),
CHAN60G(2, 0),
CHAN60G(3, 0),
-/* channel 4 not supported yet */
+ CHAN60G(4, 0),
};
+static int wil_num_supported_channels(struct wil6210_priv *wil)
+{
+ int num_channels = ARRAY_SIZE(wil_60ghz_channels);
+
+ if (!test_bit(WMI_FW_CAPABILITY_CHANNEL_4, wil->fw_capabilities))
+ num_channels--;
+
+ return num_channels;
+}
+
+void update_supported_bands(struct wil6210_priv *wil)
+{
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+
+ wil_dbg_misc(wil, "update supported bands");
+
+ wiphy->bands[NL80211_BAND_60GHZ]->n_channels =
+ wil_num_supported_channels(wil);
+}
+
/* Vendor id to be used in vendor specific command and events
* to user space.
* NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
@@ -199,7 +219,9 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_RESP >> 4) |
BIT(IEEE80211_STYPE_ASSOC_RESP >> 4) |
- BIT(IEEE80211_STYPE_DISASSOC >> 4),
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_RESP >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
@@ -871,6 +893,26 @@ static void wil_print_crypto(struct wil6210_priv *wil,
c->control_port_no_encrypt);
}
+static const char *
+wil_get_auth_type_name(enum nl80211_auth_type auth_type)
+{
+ switch (auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ return "OPEN_SYSTEM";
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ return "SHARED_KEY";
+ case NL80211_AUTHTYPE_FT:
+ return "FT";
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ return "NETWORK_EAP";
+ case NL80211_AUTHTYPE_SAE:
+ return "SAE";
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ return "AUTOMATIC";
+ default:
+ return "unknown";
+ }
+}
static void wil_print_connect_params(struct wil6210_priv *wil,
struct cfg80211_connect_params *sme)
{
@@ -884,11 +926,73 @@ static void wil_print_connect_params(struct wil6210_priv *wil,
if (sme->ssid)
print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET,
16, 1, sme->ssid, sme->ssid_len, true);
+ if (sme->prev_bssid)
+ wil_info(wil, " Previous BSSID=%pM\n", sme->prev_bssid);
+ wil_info(wil, " Auth Type: %s\n",
+ wil_get_auth_type_name(sme->auth_type));
wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open");
wil_info(wil, " PBSS: %d\n", sme->pbss);
wil_print_crypto(wil, &sme->crypto);
}
+static int wil_ft_connect(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(ndev);
+ struct wmi_ft_auth_cmd auth_cmd;
+ int rc;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, wil->fw_capabilities)) {
+ wil_err(wil, "FT: FW does not support FT roaming\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!sme->prev_bssid) {
+ wil_err(wil, "FT: prev_bssid was not set\n");
+ return -EINVAL;
+ }
+
+ if (ether_addr_equal(sme->prev_bssid, sme->bssid)) {
+ wil_err(wil, "FT: can not roam to same AP\n");
+ return -EINVAL;
+ }
+
+ if (!test_bit(wil_vif_fwconnected, vif->status)) {
+ wil_err(wil, "FT: roam while not connected\n");
+ return -EINVAL;
+ }
+
+ if (vif->privacy != sme->privacy) {
+ wil_err(wil, "FT: privacy mismatch, current (%d) roam (%d)\n",
+ vif->privacy, sme->privacy);
+ return -EINVAL;
+ }
+
+ if (sme->pbss) {
+ wil_err(wil, "FT: roam is not valid for PBSS\n");
+ return -EINVAL;
+ }
+
+ memset(&auth_cmd, 0, sizeof(auth_cmd));
+ auth_cmd.channel = sme->channel->hw_value - 1;
+ ether_addr_copy(auth_cmd.bssid, sme->bssid);
+
+ wil_info(wil, "FT: roaming\n");
+
+ set_bit(wil_vif_ft_roam, vif->status);
+ rc = wmi_send(wil, WMI_FT_AUTH_CMDID, vif->mid,
+ &auth_cmd, sizeof(auth_cmd));
+ if (rc == 0)
+ mod_timer(&vif->connect_timer,
+ jiffies + msecs_to_jiffies(5000));
+ else
+ clear_bit(wil_vif_ft_roam, vif->status);
+
+ return rc;
+}
+
static int wil_cfg80211_connect(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_connect_params *sme)
@@ -901,14 +1005,23 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
const u8 *rsn_eid;
int ch;
int rc = 0;
+ bool is_ft_roam = false;
+ u8 network_type;
enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
wil_dbg_misc(wil, "connect, mid=%d\n", vif->mid);
wil_print_connect_params(wil, sme);
- if (test_bit(wil_vif_fwconnecting, vif->status) ||
+ if (sme->auth_type == NL80211_AUTHTYPE_FT)
+ is_ft_roam = true;
+ if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC &&
test_bit(wil_vif_fwconnected, vif->status))
- return -EALREADY;
+ is_ft_roam = true;
+
+ if (!is_ft_roam)
+ if (test_bit(wil_vif_fwconnecting, vif->status) ||
+ test_bit(wil_vif_fwconnected, vif->status))
+ return -EALREADY;
if (sme->ie_len > WMI_MAX_IE_LEN) {
wil_err(wil, "IE too large (%td bytes)\n", sme->ie_len);
@@ -918,8 +1031,13 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
rsn_eid = sme->ie ?
cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
NULL;
- if (sme->privacy && !rsn_eid)
+ if (sme->privacy && !rsn_eid) {
wil_info(wil, "WSC connection\n");
+ if (is_ft_roam) {
+ wil_err(wil, "No WSC with FT roam\n");
+ return -EINVAL;
+ }
+ }
if (sme->pbss)
bss_type = IEEE80211_BSS_TYPE_PBSS;
@@ -941,6 +1059,45 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
vif->privacy = sme->privacy;
vif->pbss = sme->pbss;
+ rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
+ if (rc)
+ goto out;
+
+ switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) {
+ case WLAN_CAPABILITY_DMG_TYPE_AP:
+ network_type = WMI_NETTYPE_INFRA;
+ break;
+ case WLAN_CAPABILITY_DMG_TYPE_PBSS:
+ network_type = WMI_NETTYPE_P2P;
+ break;
+ default:
+ wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
+ bss->capability);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ ch = bss->channel->hw_value;
+ if (ch == 0) {
+ wil_err(wil, "BSS at unknown frequency %dMhz\n",
+ bss->channel->center_freq);
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (is_ft_roam) {
+ if (network_type != WMI_NETTYPE_INFRA) {
+ wil_err(wil, "FT: Unsupported BSS type, capability= 0x%04x\n",
+ bss->capability);
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = wil_ft_connect(wiphy, ndev, sme);
+ if (rc == 0)
+ vif->bss = bss;
+ goto out;
+ }
+
if (vif->privacy) {
/* For secure assoc, remove old keys */
rc = wmi_del_cipher_key(vif, 0, bss->bssid,
@@ -957,28 +1114,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
}
}
- /* WMI_SET_APPIE_CMD. ie may contain rsn info as well as other info
- * elements. Send it also in case it's empty, to erase previously set
- * ies in FW.
- */
- rc = wmi_set_ie(vif, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
- if (rc)
- goto out;
-
/* WMI_CONNECT_CMD */
memset(&conn, 0, sizeof(conn));
- switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) {
- case WLAN_CAPABILITY_DMG_TYPE_AP:
- conn.network_type = WMI_NETTYPE_INFRA;
- break;
- case WLAN_CAPABILITY_DMG_TYPE_PBSS:
- conn.network_type = WMI_NETTYPE_P2P;
- break;
- default:
- wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
- bss->capability);
- goto out;
- }
+ conn.network_type = network_type;
if (vif->privacy) {
if (rsn_eid) { /* regular secure connection */
conn.dot11_auth_mode = WMI_AUTH11_SHARED;
@@ -998,14 +1136,6 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
conn.ssid_len = min_t(u8, ssid_eid[1], 32);
memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
-
- ch = bss->channel->hw_value;
- if (ch == 0) {
- wil_err(wil, "BSS at unknown frequency %dMhz\n",
- bss->channel->center_freq);
- rc = -EOPNOTSUPP;
- goto out;
- }
conn.channel = ch - 1;
ether_addr_copy(conn.bssid, bss->bssid);
@@ -1201,9 +1331,9 @@ wil_find_sta_by_key_usage(struct wil6210_priv *wil, u8 mid,
return &wil->sta[cid];
}
-static void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
- struct wil_sta_info *cs,
- struct key_params *params)
+void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
+ struct wil_sta_info *cs,
+ struct key_params *params)
{
struct wil_tid_crypto_rx_single *cc;
int tid;
@@ -1286,13 +1416,19 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
params->seq_len, params->seq);
if (IS_ERR(cs)) {
- wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n",
- mac_addr, key_usage_str[key_usage], key_index,
- params->seq_len, params->seq);
- return -EINVAL;
+ /* in FT, sta info may not be available as add_key may be
+ * sent by host before FW sends WMI_CONNECT_EVENT
+ */
+ if (!test_bit(wil_vif_ft_roam, vif->status)) {
+ wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n",
+ mac_addr, key_usage_str[key_usage], key_index,
+ params->seq_len, params->seq);
+ return -EINVAL;
+ }
}
- wil_del_rx_key(key_index, key_usage, cs);
+ if (!IS_ERR(cs))
+ wil_del_rx_key(key_index, key_usage, cs);
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
wil_err(wil,
@@ -1305,7 +1441,10 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
rc = wmi_add_cipher_key(vif, key_index, mac_addr, params->key_len,
params->key, key_usage);
- if (!rc)
+ if (!rc && !IS_ERR(cs))
+ /* in FT set crypto will take place upon receiving
+ * WMI_RING_EN_EVENTID event
+ */
wil_set_crypto_rx(key_index, key_usage, cs, params);
return rc;
@@ -1468,21 +1607,36 @@ static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
}
/* internal functions for device reset and starting AP */
-static int _wil_cfg80211_set_ies(struct wil6210_vif *vif,
- struct cfg80211_beacon_data *bcon)
+static u8 *
+_wil_cfg80211_get_proberesp_ies(const u8 *proberesp, u16 proberesp_len,
+ u16 *ies_len)
{
- int rc;
- u16 len = 0, proberesp_len = 0;
- u8 *ies = NULL, *proberesp = NULL;
+ u8 *ies = NULL;
- if (bcon->probe_resp) {
+ if (proberesp) {
struct ieee80211_mgmt *f =
- (struct ieee80211_mgmt *)bcon->probe_resp;
+ (struct ieee80211_mgmt *)proberesp;
size_t hlen = offsetof(struct ieee80211_mgmt,
u.probe_resp.variable);
- proberesp = f->u.probe_resp.variable;
- proberesp_len = bcon->probe_resp_len - hlen;
+
+ ies = f->u.probe_resp.variable;
+ if (ies_len)
+ *ies_len = proberesp_len - hlen;
}
+
+ return ies;
+}
+
+static int _wil_cfg80211_set_ies(struct wil6210_vif *vif,
+ struct cfg80211_beacon_data *bcon)
+{
+ int rc;
+ u16 len = 0, proberesp_len = 0;
+ u8 *ies = NULL, *proberesp;
+
+ proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp,
+ bcon->probe_resp_len,
+ &proberesp_len);
rc = _wil_cfg80211_merge_extra_ies(proberesp,
proberesp_len,
bcon->proberesp_ies,
@@ -1526,6 +1680,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
struct wireless_dev *wdev = ndev->ieee80211_ptr;
u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO);
+ u16 proberesp_len = 0;
+ u8 *proberesp;
+ bool ft = false;
if (pbss)
wmi_nettype = WMI_NETTYPE_P2P;
@@ -1538,6 +1695,25 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
wil_set_recovery_state(wil, fw_recovery_idle);
+ proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp,
+ bcon->probe_resp_len,
+ &proberesp_len);
+ /* check that the probe response IEs has a MDE */
+ if ((proberesp && proberesp_len > 0 &&
+ cfg80211_find_ie(WLAN_EID_MOBILITY_DOMAIN,
+ proberesp,
+ proberesp_len)))
+ ft = true;
+
+ if (ft) {
+ if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING,
+ wil->fw_capabilities)) {
+ wil_err(wil, "FW does not support FT roaming\n");
+ return -ENOTSUPP;
+ }
+ set_bit(wil_vif_ft_roam, vif->status);
+ }
+
mutex_lock(&wil->mutex);
if (!wil_has_other_active_ifaces(wil, ndev, true, false)) {
@@ -1699,6 +1875,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
mutex_lock(&wil->mutex);
wmi_pcp_stop(vif);
+ clear_bit(wil_vif_ft_roam, vif->status);
if (last)
__wil_down(wil);
@@ -1718,8 +1895,9 @@ static int wil_cfg80211_add_station(struct wiphy *wiphy,
struct wil6210_vif *vif = ndev_to_vif(dev);
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "add station %pM aid %d mid %d\n",
- mac, params->aid, vif->mid);
+ wil_dbg_misc(wil, "add station %pM aid %d mid %d mask 0x%x set 0x%x\n",
+ mac, params->aid, vif->mid,
+ params->sta_flags_mask, params->sta_flags_set);
if (!disable_ap_sme) {
wil_err(wil, "not supported with AP SME enabled\n");
@@ -2040,6 +2218,54 @@ wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static int
+wil_cfg80211_update_ft_ies(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_update_ft_ies_params *ftie)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil6210_vif *vif = ndev_to_vif(dev);
+ struct cfg80211_bss *bss;
+ struct wmi_ft_reassoc_cmd reassoc;
+ int rc = 0;
+
+ wil_dbg_misc(wil, "update ft ies, mid=%d\n", vif->mid);
+ wil_hex_dump_misc("FT IE ", DUMP_PREFIX_OFFSET, 16, 1,
+ ftie->ie, ftie->ie_len, true);
+
+ if (!test_bit(WMI_FW_CAPABILITY_FT_ROAMING, wil->fw_capabilities)) {
+ wil_err(wil, "FW does not support FT roaming\n");
+ return -EOPNOTSUPP;
+ }
+
+ rc = wmi_update_ft_ies(vif, ftie->ie_len, ftie->ie);
+ if (rc)
+ return rc;
+
+ if (!test_bit(wil_vif_ft_roam, vif->status))
+ /* vif is not roaming */
+ return 0;
+
+ /* wil_vif_ft_roam is set. wil_cfg80211_update_ft_ies is used as
+ * a trigger for reassoc
+ */
+
+ bss = vif->bss;
+ if (!bss) {
+ wil_err(wil, "FT: bss is NULL\n");
+ return -EINVAL;
+ }
+
+ memset(&reassoc, 0, sizeof(reassoc));
+ ether_addr_copy(reassoc.bssid, bss->bssid);
+
+ rc = wmi_send(wil, WMI_FT_REASSOC_CMDID, vif->mid,
+ &reassoc, sizeof(reassoc));
+ if (rc)
+ wil_err(wil, "FT: reassoc failed (%d)\n", rc);
+
+ return rc;
+}
+
static const struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@@ -2075,6 +2301,7 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
.resume = wil_cfg80211_resume,
.sched_scan_start = wil_cfg80211_sched_scan_start,
.sched_scan_stop = wil_cfg80211_sched_scan_stop,
+ .update_ft_ies = wil_cfg80211_update_ft_ies,
};
static void wil_wiphy_init(struct wiphy *wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 51c3330bc316..aa50813a0595 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -416,8 +416,8 @@ static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
- wil_debugfs_iomem_x32_set, "0x%08llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
+ wil_debugfs_iomem_x32_set, "0x%08llx\n");
static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
umode_t mode,
@@ -432,7 +432,8 @@ static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
data->wil = wil;
data->offset = value;
- file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32);
+ file = debugfs_create_file_unsafe(name, mode, parent, data,
+ &fops_iomem_x32);
if (!IS_ERR_OR_NULL(file))
wil->dbg_data.iomem_data_count++;
@@ -451,14 +452,15 @@ static int wil_debugfs_ulong_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
- wil_debugfs_ulong_set, "0x%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
+ wil_debugfs_ulong_set, "0x%llx\n");
static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
struct dentry *parent,
ulong *value)
{
- return debugfs_create_file(name, mode, parent, value, &wil_fops_ulong);
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ &wil_fops_ulong);
}
/**
@@ -725,32 +727,6 @@ struct dentry *wil_debugfs_create_ioblob(const char *name,
return debugfs_create_file(name, mode, parent, wil_blob, &fops_ioblob);
}
-/*---reset---*/
-static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
-{
- struct wil6210_priv *wil = file->private_data;
- struct net_device *ndev = wil->main_ndev;
-
- /**
- * BUG:
- * this code does NOT sync device state with the rest of system
- * use with care, debug only!!!
- */
- rtnl_lock();
- dev_close(ndev);
- ndev->flags &= ~IFF_UP;
- rtnl_unlock();
- wil_reset(wil, true);
-
- return len;
-}
-
-static const struct file_operations fops_reset = {
- .write = wil_write_file_reset,
- .open = simple_open,
-};
-
/*---write channel 1..4 to rxon for it, 0 to rxoff---*/
static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
@@ -1263,6 +1239,9 @@ static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data)
int num_active;
int num_free;
+ if (!rbm->buff_arr)
+ return -EINVAL;
+
seq_printf(s, " size = %zu\n", rbm->size);
seq_printf(s, " free_list_empty_cnt = %lu\n",
rbm->free_list_empty_cnt);
@@ -1436,7 +1415,7 @@ static int wil_freq_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
- u16 freq = wdev->chandef.chan ? wdev->chandef.chan->center_freq : 0;
+ u32 freq = wdev->chandef.chan ? wdev->chandef.chan->center_freq : 0;
seq_printf(s, "Freq = %d\n", freq);
@@ -1695,6 +1674,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
char *status = "unknown";
u8 aid = 0;
u8 mid;
+ bool sta_connected = false;
switch (p->status) {
case wil_sta_unused:
@@ -1709,8 +1689,20 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
break;
}
mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
- seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i, p->addr, status,
- mid, aid);
+ if (mid < wil->max_vifs) {
+ struct wil6210_vif *vif = wil->vifs[mid];
+
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
+ p->status == wil_sta_connected)
+ sta_connected = true;
+ }
+ /* print roam counter only for connected stations */
+ if (sta_connected)
+ seq_printf(s, "[%d] %pM connected (roam counter %d) MID %d AID %d\n",
+ i, p->addr, p->stats.ft_roams, mid, aid);
+ else
+ seq_printf(s, "[%d] %pM %s MID %d AID %d\n", i,
+ p->addr, status, mid, aid);
if (p->status == wil_sta_connected) {
spin_lock_bh(&p->tid_rx_lock);
@@ -2451,7 +2443,6 @@ static const struct {
{"desc", 0444, &fops_txdesc},
{"bf", 0444, &fops_bf},
{"mem_val", 0644, &fops_memread},
- {"reset", 0244, &fops_reset},
{"rxon", 0244, &fops_rxon},
{"tx_mgmt", 0244, &fops_txmgmt},
{"wmi_send", 0244, &fops_wmi},
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 7debed6bec06..398900a1c29e 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -223,6 +223,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
struct net_device *ndev = vif_to_ndev(vif);
struct wireless_dev *wdev = vif_to_wdev(vif);
struct wil_sta_info *sta = &wil->sta[cid];
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
might_sleep();
wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n",
@@ -273,7 +274,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
/* release vrings */
- for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
+ for (i = min_ring_id; i < ARRAY_SIZE(wil->ring_tx); i++) {
if (wil->ring2cid_tid[i][0] == cid)
wil_ring_fini_tx(wil, i);
}
@@ -360,6 +361,8 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
vif->bss = NULL;
}
clear_bit(wil_vif_fwconnecting, vif->status);
+ clear_bit(wil_vif_ft_roam, vif->status);
+
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
@@ -604,8 +607,10 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->sta[i].mid = U8_MAX;
}
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++)
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
spin_lock_init(&wil->ring_tx_data[i].lock);
+ wil->ring2cid_tid[i][0] = WIL6210_MAX_CID;
+ }
mutex_init(&wil->mutex);
mutex_init(&wil->vif_mutex);
@@ -653,8 +658,6 @@ int wil_priv_init(struct wil6210_priv *wil)
/* edma configuration can be updated via debugfs before allocation */
wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS;
- wil->use_compressed_rx_status = true;
- wil->use_rx_hw_reordering = true;
wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
/* Rx status ring size should be bigger than the number of RX buffers
@@ -1154,6 +1157,8 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
wil->max_agg_wsize = WIL_MAX_AGG_WSIZE;
wil->max_ampdu_size = WIL_MAX_AMPDU_SIZE;
}
+
+ update_supported_bands(wil);
}
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 89119e7facd0..c8c6613371d1 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -108,6 +108,7 @@ int wil_set_capabilities(struct wil6210_priv *wil)
set_bit(hw_capa_no_flash, wil->hw_capa);
wil->use_enhanced_dma_hw = true;
wil->use_rx_hw_reordering = true;
+ wil->use_compressed_rx_status = true;
wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN :
WIL_FW_NAME_TALYN;
if (wil_fw_verify_file_exists(wil, wil_fw_name))
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 3a4194779ddf..75fe9323547c 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -190,7 +190,7 @@ out:
static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
{
int rc = 0;
- unsigned long start, data_comp_to;
+ unsigned long data_comp_to;
wil_dbg_pm(wil, "suspend keep radio on\n");
@@ -232,7 +232,6 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
}
/* Wait for completion of the pending RX packets */
- start = jiffies;
data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
while (!wil->txrx_ops.is_rx_idle(wil)) {
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index b608aa16b4f1..983bd001b53b 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -382,11 +382,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
}
/* apply */
- r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
- spin_lock_bh(&sta->tid_rx_lock);
- wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
- sta->tid_rx[tid] = r;
- spin_unlock_bh(&sta->tid_rx_lock);
+ if (!wil->use_rx_hw_reordering) {
+ r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
+ spin_lock_bh(&sta->tid_rx_lock);
+ wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
+ sta->tid_rx[tid] = r;
+ spin_unlock_bh(&sta->tid_rx_lock);
+ }
out:
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 6a7943e487fb..cc5f263cc965 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -77,8 +77,9 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
{
int i;
unsigned long data_comp_to;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
struct wil_ring *vring = &wil->ring_tx[i];
int vring_index = vring - wil->ring_tx;
struct wil_ring_tx_data *txdata =
@@ -765,7 +766,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
return;
}
- if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
+ if (wdev->iftype == NL80211_IFTYPE_STATION) {
+ if (mcast && ether_addr_equal(eth->h_source, ndev->dev_addr)) {
+ /* mcast packet looped back to us */
+ rc = GRO_DROP;
+ dev_kfree_skb(skb);
+ goto stats;
+ }
+ } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
if (mcast) {
/* send multicast frames both to higher layers in
* local net stack and back to the wireless medium
@@ -1051,6 +1059,88 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
return rc;
}
+static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid,
+ int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ int rc;
+ struct wmi_vring_cfg_cmd cmd = {
+ .action = cpu_to_le32(WMI_VRING_CMD_MODIFY),
+ .vring_cfg = {
+ .tx_sw_ring = {
+ .max_mpdu_size =
+ cpu_to_le16(wil_mtu2macbuf(mtu_max)),
+ .ring_size = 0,
+ },
+ .ringid = ring_id,
+ .cidxtid = mk_cidxtid(cid, tid),
+ .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+ .mac_ctrl = 0,
+ .to_resolution = 0,
+ .agg_max_wsize = 0,
+ .schd_params = {
+ .priority = cpu_to_le16(0),
+ .timeslot_us = cpu_to_le16(0xfff),
+ },
+ },
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_vring_cfg_done_event cmd;
+ } __packed reply = {
+ .cmd = {.status = WMI_FW_STATUS_FAILURE},
+ };
+ struct wil_ring *vring = &wil->ring_tx[ring_id];
+ struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
+
+ wil_dbg_misc(wil, "vring_modify: ring %d cid %d tid %d\n", ring_id,
+ cid, tid);
+ lockdep_assert_held(&wil->mutex);
+
+ if (!vring->va) {
+ wil_err(wil, "Tx ring [%d] not allocated\n", ring_id);
+ return -EINVAL;
+ }
+
+ if (wil->ring2cid_tid[ring_id][0] != cid ||
+ wil->ring2cid_tid[ring_id][1] != tid) {
+ wil_err(wil, "ring info does not match cid=%u tid=%u\n",
+ wil->ring2cid_tid[ring_id][0],
+ wil->ring2cid_tid[ring_id][1]);
+ }
+
+ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+
+ rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
+ WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ goto fail;
+
+ if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "Tx modify failed, status 0x%02x\n",
+ reply.cmd.status);
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ /* set BA aggregation window size to 0 to force a new BA with the
+ * new AP
+ */
+ txdata->agg_wsize = 0;
+ if (txdata->dot1x_open && agg_wsize >= 0)
+ wil_addba_tx_request(wil, ring_id, agg_wsize);
+
+ return 0;
+fail:
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
+ txdata->enabled = 0;
+ spin_unlock_bh(&txdata->lock);
+ wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
+ wil->ring2cid_tid[ring_id][1] = 0;
+ return rc;
+}
+
int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
{
struct wil6210_priv *wil = vif_to_wil(vif);
@@ -1935,6 +2025,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
bool check_stop)
{
int i;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
if (unlikely(!vif))
return;
@@ -1967,7 +2058,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
return;
/* check wake */
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
struct wil_ring *cur_ring = &wil->ring_tx[i];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
@@ -2272,6 +2363,7 @@ void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
wil->txrx_ops.tx_init = wil_tx_init;
wil->txrx_ops.tx_fini = wil_tx_fini;
+ wil->txrx_ops.tx_ring_modify = wil_tx_vring_modify;
/* RX ops */
wil->txrx_ops.rx_init = wil_rx_init;
wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index bca61cb44c37..2bbae75b9a84 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -279,9 +279,6 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
u16 buff_id;
*d = *_d;
- pa = wil_rx_desc_get_addr_edma(&d->dma);
- dmalen = le16_to_cpu(d->dma.length);
- dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
/* Extract the SKB from the rx_buff management array */
buff_id = __le16_to_cpu(d->mac.buff_id);
@@ -291,10 +288,15 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
}
skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
- else
+ } else {
+ pa = wil_rx_desc_get_addr_edma(&d->dma);
+ dmalen = le16_to_cpu(d->dma.length);
+ dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
+
kfree_skb(skb);
+ }
/* Move the buffer from the active to the free list */
list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
@@ -745,6 +747,16 @@ static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
return rc;
}
+static int wil_tx_ring_modify_edma(struct wil6210_vif *vif, int ring_id,
+ int cid, int tid)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ wil_err(wil, "ring modify is not supported for EDMA\n");
+
+ return -EOPNOTSUPP;
+}
+
/* This function is used only for RX SW reorder */
static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid,
struct sk_buff *skb, struct wil_net_stats *stats)
@@ -906,6 +918,9 @@ again:
wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
if (!skb) {
wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+ /* Move the buffer from the active list to the free list */
+ list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
goto again;
}
@@ -1595,6 +1610,7 @@ void wil_init_txrx_ops_edma(struct wil6210_priv *wil)
wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma;
wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma;
wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma;
+ wil->txrx_ops.tx_ring_modify = wil_tx_ring_modify_edma;
/* RX ops */
wil->txrx_ops.rx_init = wil_rx_init_edma;
wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 17c294b1ead1..abb82018d3b4 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -449,6 +449,15 @@ static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
*tid = (cidxtid >> 4) & 0xf;
}
+/**
+ * wil_cid_valid - check cid is valid
+ * @cid: CID value
+ */
+static inline bool wil_cid_valid(u8 cid)
+{
+ return cid < WIL6210_MAX_CID;
+}
+
struct wil6210_mbox_ring {
u32 base;
u16 entry_size; /* max. size of mbox entry, incl. all headers */
@@ -577,6 +586,7 @@ struct wil_net_stats {
unsigned long rx_csum_err;
u16 last_mcs_rx;
u64 rx_per_mcs[WIL_MCS_MAX + 1];
+ u32 ft_roams; /* relevant in STA mode */
};
/**
@@ -599,6 +609,8 @@ struct wil_txrx_ops {
struct wil_ctx *ctx);
int (*tx_ring_tso)(struct wil6210_priv *wil, struct wil6210_vif *vif,
struct wil_ring *ring, struct sk_buff *skb);
+ int (*tx_ring_modify)(struct wil6210_vif *vif, int ring_id,
+ int cid, int tid);
irqreturn_t (*irq_tx)(int irq, void *cookie);
/* RX ops */
int (*rx_init)(struct wil6210_priv *wil, u16 ring_size);
@@ -821,6 +833,7 @@ extern u8 led_polarity;
enum wil6210_vif_status {
wil_vif_fwconnecting,
wil_vif_fwconnected,
+ wil_vif_ft_roam,
wil_vif_status_last /* keep last */
};
@@ -1204,6 +1217,7 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index,
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
+int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie);
int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
@@ -1319,6 +1333,9 @@ void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil);
void wil_rx_handle(struct wil6210_priv *wil, int *quota);
void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil);
+void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
+ struct wil_sta_info *cs,
+ struct key_params *params);
int wil_iftype_nl2wmi(enum nl80211_iftype type);
@@ -1370,4 +1387,6 @@ int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
u8 tid, u8 token, u16 status, bool amsdu,
u16 agg_wsize, u16 timeout);
+void update_supported_bands(struct wil6210_priv *wil);
+
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 42c02a20ec97..4859f0e43658 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -227,6 +227,14 @@ struct blink_on_off_time led_blink_time[] = {
{WIL_LED_BLINK_ON_FAST_MS, WIL_LED_BLINK_OFF_FAST_MS},
};
+struct auth_no_hdr {
+ __le16 auth_alg;
+ __le16 auth_transaction;
+ __le16 status_code;
+ /* possibly followed by Challenge text */
+ u8 variable[0];
+} __packed;
+
u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
/**
@@ -468,6 +476,12 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_LINK_STATS_CMD";
case WMI_SW_TX_REQ_EXT_CMDID:
return "WMI_SW_TX_REQ_EXT_CMDID";
+ case WMI_FT_AUTH_CMDID:
+ return "WMI_FT_AUTH_CMD";
+ case WMI_FT_REASSOC_CMDID:
+ return "WMI_FT_REASSOC_CMD";
+ case WMI_UPDATE_FT_IES_CMDID:
+ return "WMI_UPDATE_FT_IES_CMD";
default:
return "Untracked CMD";
}
@@ -606,6 +620,12 @@ static const char *eventid2name(u16 eventid)
return "WMI_LINK_STATS_CONFIG_DONE_EVENT";
case WMI_LINK_STATS_EVENTID:
return "WMI_LINK_STATS_EVENT";
+ case WMI_COMMAND_NOT_SUPPORTED_EVENTID:
+ return "WMI_COMMAND_NOT_SUPPORTED_EVENT";
+ case WMI_FT_AUTH_STATUS_EVENTID:
+ return "WMI_FT_AUTH_STATUS_EVENT";
+ case WMI_FT_REASSOC_STATUS_EVENTID:
+ return "WMI_FT_REASSOC_STATUS_EVENT";
default:
return "Untracked EVENT";
}
@@ -1156,6 +1176,9 @@ static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
struct wmi_ring_en_event *evt = d;
u8 vri = evt->ring_index;
struct wireless_dev *wdev = vif_to_wdev(vif);
+ struct wil_sta_info *sta;
+ u8 cid;
+ struct key_params params;
wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid);
@@ -1164,13 +1187,33 @@ static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
return;
}
- if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme)
- /* in AP mode with disable_ap_sme, this is done by
- * wil_cfg80211_change_station()
+ if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme ||
+ test_bit(wil_vif_ft_roam, vif->status))
+ /* in AP mode with disable_ap_sme that is not FT,
+ * this is done by wil_cfg80211_change_station()
*/
wil->ring_tx_data[vri].dot1x_open = true;
if (vri == vif->bcast_ring) /* no BA for bcast */
return;
+
+ cid = wil->ring2cid_tid[vri][0];
+ if (!wil_cid_valid(cid)) {
+ wil_err(wil, "invalid cid %d for vring %d\n", cid, vri);
+ return;
+ }
+
+ /* In FT mode we get key but not store it as it is received
+ * before WMI_CONNECT_EVENT received from FW.
+ * wil_set_crypto_rx is called here to reset the security PN
+ */
+ sta = &wil->sta[cid];
+ if (test_bit(wil_vif_ft_roam, vif->status)) {
+ memset(&params, 0, sizeof(params));
+ wil_set_crypto_rx(0, WMI_KEY_USE_PAIRWISE, sta, &params);
+ if (wdev->iftype != NL80211_IFTYPE_AP)
+ clear_bit(wil_vif_ft_roam, vif->status);
+ }
+
if (agg_wsize >= 0)
wil_addba_tx_request(wil, vri, agg_wsize);
}
@@ -1462,6 +1505,271 @@ wmi_evt_link_stats(struct wil6210_vif *vif, int id, void *d, int len)
}
/**
+ * find cid and ringid for the station vif
+ *
+ * return error, if other interfaces are used or ring was not found
+ */
+static int wil_find_cid_ringid_sta(struct wil6210_priv *wil,
+ struct wil6210_vif *vif,
+ int *cid,
+ int *ringid)
+{
+ struct wil_ring *ring;
+ struct wil_ring_tx_data *txdata;
+ int min_ring_id = wil_get_min_tx_ring_id(wil);
+ int i;
+ u8 lcid;
+
+ if (!(vif->wdev.iftype == NL80211_IFTYPE_STATION ||
+ vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+ wil_err(wil, "invalid interface type %d\n", vif->wdev.iftype);
+ return -EINVAL;
+ }
+
+ /* In the STA mode, it is expected to have only one ring
+ * for the AP we are connected to.
+ * find it and return the cid associated with it.
+ */
+ for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
+ ring = &wil->ring_tx[i];
+ txdata = &wil->ring_tx_data[i];
+ if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
+ continue;
+
+ lcid = wil->ring2cid_tid[i][0];
+ if (lcid >= WIL6210_MAX_CID) /* skip BCAST */
+ continue;
+
+ wil_dbg_wmi(wil, "find sta -> ringid %d cid %d\n", i, lcid);
+ *cid = lcid;
+ *ringid = i;
+ return 0;
+ }
+
+ wil_dbg_wmi(wil, "find sta cid while no rings active?\n");
+
+ return -ENOENT;
+}
+
+static void
+wmi_evt_auth_status(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wmi_ft_auth_status_event *data = d;
+ int ie_len = len - offsetof(struct wmi_ft_auth_status_event, ie_info);
+ int rc, cid = 0, ringid = 0;
+ struct cfg80211_ft_event_params ft;
+ u16 d_len;
+ /* auth_alg(u16) + auth_transaction(u16) + status_code(u16) */
+ const size_t auth_ie_offset = sizeof(u16) * 3;
+ struct auth_no_hdr *auth = (struct auth_no_hdr *)data->ie_info;
+
+ /* check the status */
+ if (ie_len >= 0 && data->status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "FT: auth failed. status %d\n", data->status);
+ goto fail;
+ }
+
+ if (ie_len < auth_ie_offset) {
+ wil_err(wil, "FT: auth event too short, len %d\n", len);
+ goto fail;
+ }
+
+ d_len = le16_to_cpu(data->ie_len);
+ if (d_len != ie_len) {
+ wil_err(wil,
+ "FT: auth ie length mismatch, d_len %d should be %d\n",
+ d_len, ie_len);
+ goto fail;
+ }
+
+ if (!test_bit(wil_vif_ft_roam, wil->status)) {
+ wil_err(wil, "FT: Not in roaming state\n");
+ goto fail;
+ }
+
+ if (le16_to_cpu(auth->auth_transaction) != 2) {
+ wil_err(wil, "FT: auth error. auth_transaction %d\n",
+ le16_to_cpu(auth->auth_transaction));
+ goto fail;
+ }
+
+ if (le16_to_cpu(auth->auth_alg) != WLAN_AUTH_FT) {
+ wil_err(wil, "FT: auth error. auth_alg %d\n",
+ le16_to_cpu(auth->auth_alg));
+ goto fail;
+ }
+
+ wil_dbg_wmi(wil, "FT: Auth to %pM successfully\n", data->mac_addr);
+ wil_hex_dump_wmi("FT Auth ies : ", DUMP_PREFIX_OFFSET, 16, 1,
+ data->ie_info, d_len, true);
+
+ /* find cid and ringid */
+ rc = wil_find_cid_ringid_sta(wil, vif, &cid, &ringid);
+ if (rc) {
+ wil_err(wil, "No valid cid found\n");
+ goto fail;
+ }
+
+ if (vif->privacy) {
+ /* For secure assoc, remove old keys */
+ rc = wmi_del_cipher_key(vif, 0, wil->sta[cid].addr,
+ WMI_KEY_USE_PAIRWISE);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
+ goto fail;
+ }
+ rc = wmi_del_cipher_key(vif, 0, wil->sta[cid].addr,
+ WMI_KEY_USE_RX_GROUP);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
+ goto fail;
+ }
+ }
+
+ memset(&ft, 0, sizeof(ft));
+ ft.ies = data->ie_info + auth_ie_offset;
+ ft.ies_len = d_len - auth_ie_offset;
+ ft.target_ap = data->mac_addr;
+ cfg80211_ft_event(ndev, &ft);
+
+ return;
+
+fail:
+ wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false);
+}
+
+static void
+wmi_evt_reassoc_status(struct wil6210_vif *vif, int id, void *d, int len)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+ struct wmi_ft_reassoc_status_event *data = d;
+ int ies_len = len - offsetof(struct wmi_ft_reassoc_status_event,
+ ie_info);
+ int rc = -ENOENT, cid = 0, ringid = 0;
+ int ch; /* channel number (primary) */
+ size_t assoc_req_ie_len = 0, assoc_resp_ie_len = 0;
+ u8 *assoc_req_ie = NULL, *assoc_resp_ie = NULL;
+ /* capinfo(u16) + listen_interval(u16) + current_ap mac addr + IEs */
+ const size_t assoc_req_ie_offset = sizeof(u16) * 2 + ETH_ALEN;
+ /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
+ const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
+ u16 d_len;
+ int freq;
+ struct cfg80211_roam_info info;
+
+ if (ies_len < 0) {
+ wil_err(wil, "ft reassoc event too short, len %d\n", len);
+ goto fail;
+ }
+
+ wil_dbg_wmi(wil, "Reasoc Status event: status=%d, aid=%d",
+ data->status, data->aid);
+ wil_dbg_wmi(wil, " mac_addr=%pM, beacon_ie_len=%d",
+ data->mac_addr, data->beacon_ie_len);
+ wil_dbg_wmi(wil, " reassoc_req_ie_len=%d, reassoc_resp_ie_len=%d",
+ le16_to_cpu(data->reassoc_req_ie_len),
+ le16_to_cpu(data->reassoc_resp_ie_len));
+
+ d_len = le16_to_cpu(data->beacon_ie_len) +
+ le16_to_cpu(data->reassoc_req_ie_len) +
+ le16_to_cpu(data->reassoc_resp_ie_len);
+ if (d_len != ies_len) {
+ wil_err(wil,
+ "ft reassoc ie length mismatch, d_len %d should be %d\n",
+ d_len, ies_len);
+ goto fail;
+ }
+
+ /* check the status */
+ if (data->status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "ft reassoc failed. status %d\n", data->status);
+ goto fail;
+ }
+
+ /* find cid and ringid */
+ rc = wil_find_cid_ringid_sta(wil, vif, &cid, &ringid);
+ if (rc) {
+ wil_err(wil, "No valid cid found\n");
+ goto fail;
+ }
+
+ ch = data->channel + 1;
+ wil_info(wil, "FT: Roam %pM channel [%d] cid %d aid %d\n",
+ data->mac_addr, ch, cid, data->aid);
+
+ wil_hex_dump_wmi("reassoc AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+ data->ie_info, len - sizeof(*data), true);
+
+ /* figure out IE's */
+ if (le16_to_cpu(data->reassoc_req_ie_len) > assoc_req_ie_offset) {
+ assoc_req_ie = &data->ie_info[assoc_req_ie_offset];
+ assoc_req_ie_len = le16_to_cpu(data->reassoc_req_ie_len) -
+ assoc_req_ie_offset;
+ }
+ if (le16_to_cpu(data->reassoc_resp_ie_len) <= assoc_resp_ie_offset) {
+ wil_err(wil, "FT: reassoc resp ie len is too short, len %d\n",
+ le16_to_cpu(data->reassoc_resp_ie_len));
+ goto fail;
+ }
+
+ assoc_resp_ie = &data->ie_info[le16_to_cpu(data->reassoc_req_ie_len) +
+ assoc_resp_ie_offset];
+ assoc_resp_ie_len = le16_to_cpu(data->reassoc_resp_ie_len) -
+ assoc_resp_ie_offset;
+
+ if (test_bit(wil_status_resetting, wil->status) ||
+ !test_bit(wil_status_fwready, wil->status)) {
+ wil_err(wil, "FT: status_resetting, cancel reassoc event\n");
+ /* no need for cleanup, wil_reset will do that */
+ return;
+ }
+
+ mutex_lock(&wil->mutex);
+
+ /* ring modify to set the ring for the roamed AP settings */
+ wil_dbg_wmi(wil,
+ "ft modify tx config for connection CID %d ring %d\n",
+ cid, ringid);
+
+ rc = wil->txrx_ops.tx_ring_modify(vif, ringid, cid, 0);
+ if (rc) {
+ wil_err(wil, "modify TX for CID %d MID %d ring %d failed (%d)\n",
+ cid, vif->mid, ringid, rc);
+ mutex_unlock(&wil->mutex);
+ goto fail;
+ }
+
+ /* Update the driver STA members with the new bss */
+ wil->sta[cid].aid = data->aid;
+ wil->sta[cid].stats.ft_roams++;
+ ether_addr_copy(wil->sta[cid].addr, vif->bss->bssid);
+ mutex_unlock(&wil->mutex);
+ del_timer_sync(&vif->connect_timer);
+
+ cfg80211_ref_bss(wiphy, vif->bss);
+ freq = ieee80211_channel_to_frequency(ch, NL80211_BAND_60GHZ);
+
+ memset(&info, 0, sizeof(info));
+ info.channel = ieee80211_get_channel(wiphy, freq);
+ info.bss = vif->bss;
+ info.req_ie = assoc_req_ie;
+ info.req_ie_len = assoc_req_ie_len;
+ info.resp_ie = assoc_resp_ie;
+ info.resp_ie_len = assoc_resp_ie_len;
+ cfg80211_roamed(ndev, &info, GFP_KERNEL);
+ vif->bss = NULL;
+
+ return;
+
+fail:
+ wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false);
+}
+
+/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
*/
@@ -1492,6 +1800,8 @@ static const struct {
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
{WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result},
{WMI_LINK_STATS_EVENTID, wmi_evt_link_stats},
+ {WMI_FT_AUTH_STATUS_EVENTID, wmi_evt_auth_status},
+ {WMI_FT_REASSOC_STATUS_EVENTID, wmi_evt_reassoc_status},
};
/*
@@ -2086,6 +2396,40 @@ out:
return rc;
}
+int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ u16 len;
+ struct wmi_update_ft_ies_cmd *cmd;
+ int rc;
+
+ if (!ie)
+ ie_len = 0;
+
+ len = sizeof(struct wmi_update_ft_ies_cmd) + ie_len;
+ if (len < ie_len) {
+ wil_err(wil, "wraparound. ie len %d\n", ie_len);
+ return -EINVAL;
+ }
+
+ cmd = kzalloc(len, GFP_KERNEL);
+ if (!cmd) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ cmd->ie_len = cpu_to_le16(ie_len);
+ memcpy(cmd->ie_info, ie, ie_len);
+ rc = wmi_send(wil, WMI_UPDATE_FT_IES_CMDID, vif->mid, cmd, len);
+ kfree(cmd);
+
+out:
+ if (rc)
+ wil_err(wil, "update ft ies failed : %d\n", rc);
+
+ return rc;
+}
+
/**
* wmi_rxon - turn radio on/off
* @on: turn on if true, off otherwise
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 139acb2caf92..b668758da994 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -103,6 +103,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_AMSDU = 23,
WMI_FW_CAPABILITY_RAW_MODE = 24,
WMI_FW_CAPABILITY_TX_REQ_EXT = 25,
+ WMI_FW_CAPABILITY_CHANNEL_4 = 26,
WMI_FW_CAPABILITY_MAX,
};
@@ -2369,6 +2370,7 @@ struct wmi_ft_reassoc_status_event {
__le16 beacon_ie_len;
__le16 reassoc_req_ie_len;
__le16 reassoc_resp_ie_len;
+ u8 reserved[4];
u8 ie_info[0];
} __packed;
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index b77d1a904f7e..9fc7c088a539 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -909,7 +909,7 @@ struct b43_wl {
/* Set this if we call ieee80211_register_hw() and check if we call
* ieee80211_unregister_hw(). */
- bool hw_registred;
+ bool hw_registered;
/* We can only have one operating interface (802.11 core)
* at a time. General information about this interface follows.
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 6b0e1ec346cb..dfc4c34298d4 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1432,7 +1432,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
goto out;
}
- if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
+ if (WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME)) {
/* If we get here, we have a real error with the queue
* full, but queues not stopped. */
b43err(dev->wl, "DMA queue overflow\n");
@@ -1518,13 +1518,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
} else {
/* More than a single header/data pair were missed.
- * Report this error, and reset the controller to
+ * Report this error. If running with open-source
+ * firmware, then reset the controller to
* revive operation.
*/
b43dbg(dev->wl,
"Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
ring->index, firstused, slot);
- b43_controller_restart(dev, "Out of order TX");
+ if (dev->fw.opensource)
+ b43_controller_restart(dev, "Out of order TX");
return;
}
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index b37e7391f55d..74be3c809225 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -2611,7 +2611,7 @@ start_ieee80211:
err = ieee80211_register_hw(wl->hw);
if (err)
goto err_one_core_detach;
- wl->hw_registred = true;
+ wl->hw_registered = true;
b43_leds_register(wl->current_dev);
/* Register HW RNG driver */
@@ -5493,13 +5493,11 @@ err_powerdown:
static void b43_one_core_detach(struct b43_bus_dev *dev)
{
struct b43_wldev *wldev;
- struct b43_wl *wl;
/* Do not cancel ieee80211-workqueue based work here.
* See comment in b43_remove(). */
wldev = b43_bus_get_wldev(dev);
- wl = wldev->wl;
b43_debugfs_remove_device(wldev);
b43_wireless_core_detach(wldev);
list_del(&wldev->list);
@@ -5610,7 +5608,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- wl->hw_registred = false;
+ wl->hw_registered = false;
hw->max_rates = 2;
SET_IEEE80211_DEV(hw, dev->dev);
if (is_valid_ether_addr(sprom->et1mac))
@@ -5693,7 +5691,7 @@ static void b43_bcma_remove(struct bcma_device *core)
B43_WARN_ON(!wl);
if (!wldev->fw.ucode.data)
return; /* NULL if firmware never loaded */
- if (wl->current_dev == wldev && wl->hw_registred) {
+ if (wl->current_dev == wldev && wl->hw_registered) {
b43_leds_stop(wldev);
ieee80211_unregister_hw(wl->hw);
}
@@ -5776,7 +5774,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
B43_WARN_ON(!wl);
if (!wldev->fw.ucode.data)
return; /* NULL if firmware never loaded */
- if (wl->current_dev == wldev && wl->hw_registred) {
+ if (wl->current_dev == wldev && wl->hw_registered) {
b43_leds_stop(wldev);
ieee80211_unregister_hw(wl->hw);
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index 2f0c64cef65f..1b1da7d83652 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -1149,7 +1149,7 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
return -ENOSPC;
}
- if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
+ if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) {
/* If we get here, we have a real error with the queue
* full, but queues not stopped. */
b43legacyerr(dev->wl, "DMA queue overflow\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index d2f788d88668..3e37c8cf82c6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -576,7 +576,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
if (pktq->qlen == 1)
err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
- pktq->next);
+ __skb_peek(pktq));
else if (!sdiodev->sg_support) {
glom_skb = brcmu_pkt_buf_get_skb(totlen);
if (!glom_skb)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 5444e6213d45..230a378c26fc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1649,6 +1649,14 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
case WLAN_AKM_SUITE_PSK:
val = WPA2_AUTH_PSK;
break;
+ case WLAN_AKM_SUITE_FT_8021X:
+ val = WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT;
+ if (sme->want_1x)
+ profile->use_fwsup = BRCMF_PROFILE_FWSUP_1X;
+ break;
+ case WLAN_AKM_SUITE_FT_PSK:
+ val = WPA2_AUTH_PSK | WPA2_AUTH_FT;
+ break;
default:
brcmf_err("invalid cipher group (%d)\n",
sme->crypto.cipher_group);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index cd3651069d0c..94044a7a6021 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -296,9 +296,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
/* Replace all newline/linefeed characters with space
* character
*/
- ptr = clmver;
- while ((ptr = strnchr(ptr, '\n', sizeof(buf))) != NULL)
- *ptr = ' ';
+ strreplace(clmver, '\n', ' ');
brcmf_dbg(INFO, "CLM version = %s\n", clmver);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 8347da632a5b..4c5a3995dc35 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -178,7 +178,7 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
ifp->fwil_fwerr = false;
}
-#define MAX_CAPS_BUFFER_SIZE 512
+#define MAX_CAPS_BUFFER_SIZE 768
static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp)
{
char caps[MAX_CAPS_BUFFER_SIZE];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 3e9c4f2f5dd1..456a1bf008b3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -74,7 +74,7 @@
#define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000)
#define P2P_INVALID_CHANNEL -1
#define P2P_CHANNEL_SYNC_RETRY 5
-#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(1500)
+#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(450)
#define P2P_DEFAULT_SLEEP_TIME_VSDB 200
/* WiFi P2P Public Action Frame OUI Subtypes */
@@ -1134,7 +1134,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
{
struct afx_hdl *afx_hdl = &p2p->afx_hdl;
struct brcmf_cfg80211_vif *pri_vif;
- unsigned long duration;
s32 retry;
brcmf_dbg(TRACE, "Enter\n");
@@ -1150,7 +1149,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
* pending action frame tx is cancelled.
*/
retry = 0;
- duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT);
while ((retry < P2P_CHANNEL_SYNC_RETRY) &&
(afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) {
afx_hdl->is_listen = false;
@@ -1158,7 +1156,8 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
retry);
/* search peer on peer's listen channel */
schedule_work(&afx_hdl->afx_work);
- wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration);
+ wait_for_completion_timeout(&afx_hdl->act_frm_scan,
+ P2P_AF_FRM_SCAN_MAX_WAIT);
if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
(!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
&p2p->status)))
@@ -1171,7 +1170,7 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
afx_hdl->is_listen = true;
schedule_work(&afx_hdl->afx_work);
wait_for_completion_timeout(&afx_hdl->act_frm_scan,
- duration);
+ P2P_AF_FRM_SCAN_MAX_WAIT);
}
if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
(!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
@@ -1458,10 +1457,12 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
return 0;
if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) {
- if (e->status == BRCMF_E_STATUS_SUCCESS)
+ if (e->status == BRCMF_E_STATUS_SUCCESS) {
set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
&p2p->status);
- else {
+ if (!p2p->wait_for_offchan_complete)
+ complete(&p2p->send_af_done);
+ } else {
set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
/* If there is no ack, we don't need to wait for
* WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event
@@ -1512,6 +1513,17 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
p2p->af_sent_channel = le32_to_cpu(af_params->channel);
p2p->af_tx_sent_jiffies = jiffies;
+ if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status) &&
+ p2p->af_sent_channel ==
+ ieee80211_frequency_to_channel(p2p->remain_on_channel.center_freq))
+ p2p->wait_for_offchan_complete = false;
+ else
+ p2p->wait_for_offchan_complete = true;
+
+ brcmf_dbg(TRACE, "Waiting for %s tx completion event\n",
+ (p2p->wait_for_offchan_complete) ?
+ "off-channel" : "on-channel");
+
timeout = wait_for_completion_timeout(&p2p->send_af_done,
P2P_AF_MAX_WAIT_TIME);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index 0e8b34d2d85c..39f0d0218088 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -124,6 +124,7 @@ struct afx_hdl {
* @gon_req_action: about to send go negotiation requets frame.
* @block_gon_req_tx: drop tx go negotiation requets frame.
* @p2pdev_dynamically: is p2p device if created by module param or supplicant.
+ * @wait_for_offchan_complete: wait for off-channel tx completion event.
*/
struct brcmf_p2p_info {
struct brcmf_cfg80211_info *cfg;
@@ -144,6 +145,7 @@ struct brcmf_p2p_info {
bool gon_req_action;
bool block_gon_req_tx;
bool p2pdev_dynamically;
+ bool wait_for_offchan_complete;
};
s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 4fffa6988087..5dea569d63ed 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -2017,6 +2017,7 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = {
static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
+ BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index a907d7b065fa..b2e1ab5adb64 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1463,7 +1463,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
struct sk_buff *pfirst, *pnext;
int errcode;
- u8 doff, sfdoff;
+ u8 doff;
struct brcmf_sdio_hdrinfo rd_new;
@@ -1597,7 +1597,6 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
/* Remove superframe header, remember offset */
skb_pull(pfirst, rd_new.dat_offset);
- sfdoff = rd_new.dat_offset;
num = 0;
/* Validate all the subframe headers */
@@ -2189,7 +2188,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
* length of the chain (including padding)
*/
if (bus->txglom)
- brcmf_sdio_update_hwhdr(pktq->next->data, total_len);
+ brcmf_sdio_update_hwhdr(__skb_peek(pktq)->data, total_len);
return 0;
}
@@ -3405,7 +3404,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
struct brcmf_core *core = bus->sdio_core;
- uint pad_size;
u32 value;
int err;
@@ -3448,7 +3446,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
if (sdiodev->sg_support) {
bus->txglom = false;
value = 1;
- pad_size = bus->sdiodev->func2->cur_blksize << 1;
err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
&value, sizeof(u32));
if (err < 0) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c
index 2fe1f6863278..3bd54f125776 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c
@@ -62,8 +62,7 @@ int brcms_debugfs_attach(struct brcms_pub *drvr)
void brcms_debugfs_detach(struct brcms_pub *drvr)
{
- if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
- debugfs_remove_recursive(drvr->dbgfs_dir);
+ debugfs_remove_recursive(drvr->dbgfs_dir);
}
struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index ecc89e718b9c..81ff558046a8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
spin_lock_bh(&wl->lock);
+ wl->wlc->vif = vif;
wl->mute_tx = false;
brcms_c_mute(wl->wlc, false);
if (vif->type == NL80211_IFTYPE_STATION)
@@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static void
brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
+ struct brcms_info *wl = hw->priv;
+
+ spin_lock_bh(&wl->lock);
+ wl->wlc->vif = NULL;
+ spin_unlock_bh(&wl->lock);
}
static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
@@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw,
spin_unlock_bh(&wl->lock);
}
+static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, bool set)
+{
+ struct brcms_info *wl = hw->priv;
+ struct sk_buff *beacon = NULL;
+ u16 tim_offset = 0;
+
+ spin_lock_bh(&wl->lock);
+ if (wl->wlc->vif)
+ beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif,
+ &tim_offset, NULL);
+ if (beacon)
+ brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
+ wl->wlc->vif->bss_conf.dtim_period);
+ spin_unlock_bh(&wl->lock);
+
+ return 0;
+}
+
static const struct ieee80211_ops brcms_ops = {
.tx = brcms_ops_tx,
.start = brcms_ops_start,
@@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = {
.flush = brcms_ops_flush,
.get_tsf = brcms_ops_get_tsf,
.set_tsf = brcms_ops_set_tsf,
+ .set_tim = brcms_ops_beacon_set_tim,
};
void brcms_dpc(unsigned long data)
@@ -1578,10 +1604,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
if (le32_to_cpu(hdr->idx) == idx) {
pdata = wl->fw.fw_bin[i]->data +
le32_to_cpu(hdr->offset);
- *pbuf = kmemdup(pdata, len, GFP_KERNEL);
+ *pbuf = kvmalloc(len, GFP_KERNEL);
if (*pbuf == NULL)
goto fail;
-
+ memcpy(*pbuf, pdata, len);
return 0;
}
}
@@ -1629,7 +1655,7 @@ int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
*/
void brcms_ucode_free_buf(void *p)
{
- kfree(p);
+ kvfree(p);
}
/*
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
index c4d135cff04a..9f76b880814e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
@@ -563,6 +563,7 @@ struct brcms_c_info {
struct wiphy *wiphy;
struct scb pri_scb;
+ struct ieee80211_vif *vif;
struct sk_buff *beacon;
u16 beacon_tim_offset;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index bedec1606caa..a57f2711f3c0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -25453,12 +25453,12 @@ void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
(pi->cal_type_override ==
PHY_PERICAL_FULL) ? true : false;
- if ((pi->mphase_cal_phase_id > MPHASE_CAL_STATE_INIT)) {
+ if (pi->mphase_cal_phase_id > MPHASE_CAL_STATE_INIT) {
if (pi->nphy_txiqlocal_chanspec != pi->radio_chanspec)
wlc_phy_cal_perical_mphase_restart(pi);
}
- if ((pi->mphase_cal_phase_id == MPHASE_CAL_STATE_RXCAL))
+ if (pi->mphase_cal_phase_id == MPHASE_CAL_STATE_RXCAL)
wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000);
wlapi_suspend_mac_and_wait(pi->sh->physhim);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
index d8b79cb72b58..e7584b842dce 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
@@ -77,6 +77,8 @@ static u16 d11ac_bw(enum brcmu_chan_bw bw)
return BRCMU_CHSPEC_D11AC_BW_40;
case BRCMU_CHAN_BW_80:
return BRCMU_CHSPEC_D11AC_BW_80;
+ case BRCMU_CHAN_BW_160:
+ return BRCMU_CHSPEC_D11AC_BW_160;
default:
WARN_ON(1);
}
@@ -190,8 +192,38 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
break;
}
break;
- case BRCMU_CHSPEC_D11AC_BW_8080:
case BRCMU_CHSPEC_D11AC_BW_160:
+ switch (ch->sb) {
+ case BRCMU_CHAN_SB_LLL:
+ ch->control_ch_num -= CH_70MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_LLU:
+ ch->control_ch_num -= CH_50MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_LUL:
+ ch->control_ch_num -= CH_30MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_LUU:
+ ch->control_ch_num -= CH_10MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_ULL:
+ ch->control_ch_num += CH_10MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_ULU:
+ ch->control_ch_num += CH_30MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_UUL:
+ ch->control_ch_num += CH_50MHZ_APART;
+ break;
+ case BRCMU_CHAN_SB_UUU:
+ ch->control_ch_num += CH_70MHZ_APART;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+ break;
+ case BRCMU_CHSPEC_D11AC_BW_8080:
default:
WARN_ON_ONCE(1);
break;
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
index 7b9a77981df1..dddebaa60352 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
@@ -29,6 +29,8 @@
#define CH_UPPER_SB 0x01
#define CH_LOWER_SB 0x02
#define CH_EWA_VALID 0x04
+#define CH_70MHZ_APART 14
+#define CH_50MHZ_APART 10
#define CH_30MHZ_APART 6
#define CH_20MHZ_APART 4
#define CH_10MHZ_APART 2
@@ -237,6 +239,7 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec)
#define WPA2_AUTH_RESERVED4 0x0400
#define WPA2_AUTH_RESERVED5 0x0800
#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */
+#define WPA2_AUTH_FT 0x4000 /* Fast BSS Transition */
#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */
#define DOT11_DEFAULT_RTS_LEN 2347
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 9644e7b93645..bbdca13c5a9f 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -5652,7 +5652,7 @@ static void ipw_merge_adhoc_network(struct work_struct *work)
}
mutex_lock(&priv->mutex);
- if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
+ if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
IPW_DEBUG_MERGE("remove network %*pE\n",
priv->essid_len, priv->essid);
ipw_remove_current_network(priv);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index c3c638ed0ed7..ce4144a89217 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -1297,6 +1297,8 @@ il4965_send_rxon_assoc(struct il_priv *il)
const struct il_rxon_cmd *rxon1 = &il->staging;
const struct il_rxon_cmd *rxon2 = &il->active;
+ lockdep_assert_held(&il->mutex);
+
if (rxon1->flags == rxon2->flags &&
rxon1->filter_flags == rxon2->filter_flags &&
rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index 591687984962..76b5ddb20248 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -12,10 +12,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
@@ -51,6 +47,7 @@
static const struct iwl_base_params iwl1000_base_params = {
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.pll_cfg = true,
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index fedb108db68f..e7e45846dd07 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -12,10 +12,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 91ca77c7571c..da5d5f9b2573 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -56,7 +56,7 @@
#include "iwl-config.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 38
+#define IWL_22000_UCODE_API_MAX 41
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -77,10 +77,13 @@
#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
#define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_HR_B_F0_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
+#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@@ -88,7 +91,11 @@
IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
+#define IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(api) \
+ IWL_22000_HR_B_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \
+ IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
@@ -96,6 +103,8 @@
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
+#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
+ IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_22000 10
@@ -134,7 +143,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
- .non_shared_ant = ANT_A, \
+ .non_shared_ant = ANT_B, \
.dccm_offset = IWL_22000_DCCM_OFFSET, \
.dccm_len = IWL_22000_DCCM_LEN, \
.dccm2_offset = IWL_22000_DCCM2_OFFSET, \
@@ -155,7 +164,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.gen2 = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
- .min_umac_error_event_table = 0x400000
+ .min_umac_error_event_table = 0x400000, \
+ .d3_debug_data_base_addr = 0x401000, \
+ .d3_debug_data_length = 60 * 1024
#define IWL_DEVICE_22500 \
IWL_DEVICE_22000_COMMON, \
@@ -190,7 +201,54 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = {
const struct iwl_cfg iwl22000_2ax_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_HR_FW_PRE,
+ .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+/*
+ * All JF radio modules are part of the 9000 series, but the MAC part
+ * looks more like 22000. That's why this device is here, but called
+ * 9560 nevertheless.
+ */
+const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = {
+ .name = "Intel(R) Wireless-AC 9461",
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = {
+ .name = "Intel(R) Wireless-AC 9462",
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
+ .name = "Intel(R) Wireless-AC 9560",
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
+ .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
+ .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_jf = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
@@ -264,7 +322,10 @@ const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index 36151e61a26f..575a7022d045 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -12,10 +12,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index b5d8274761d8..30e62a7c9d52 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -12,10 +12,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index a62c8346f13a..c973bfaa3414 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index c46fa712985b..348c40fcddcb 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index 24b2f7cbb308..d55fd23cafe6 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -57,7 +57,7 @@
#include "fw/file.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 38
+#define IWL9000_UCODE_API_MAX 41
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 30
@@ -155,7 +155,9 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
.min_umac_error_event_table = 0x800000, \
- .csr = &iwl_csr_v1
+ .csr = &iwl_csr_v1, \
+ .d3_debug_data_base_addr = 0x401000, \
+ .d3_debug_data_length = 92 * 1024
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index b79e38734f2f..431e13c6ee35 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -16,11 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
index c96f9b1d948a..588b15697710 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
@@ -16,11 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.h b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h
index 099e3ce80ffc..c43ba94bfa8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h
@@ -16,11 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
index f89736d60a3d..0f4be4be181c 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
@@ -16,11 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
index 096a07c5a33f..3d2e44a642de 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -13,11 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index cceb4cd8e501..c5b8376d827f 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index f21732ec3b25..3dd7d8c45dab 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
index 1bbd17ada974..04c236e9399b 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.h b/drivers/net/wireless/intel/iwlwifi/dvm/led.h
index 75f74edd018f..8f93a3246dee 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 2b6ffbc46fa5..b2f172d4f78a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -13,11 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 82caae02dd09..49b71dbf8490 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -14,10 +14,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 030482b357a3..1088ff036e13 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -2,6 +2,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -15,10 +16,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
@@ -1651,7 +1648,6 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
priv->status, table.valid);
}
- trace_iwlwifi_dev_ucode_error(trans->dev, &table, 0, table.brd_ver);
IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
desc_lookup(table.error_id));
IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
index 0ad557c89514..8c25e3aefb2b 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
@@ -14,10 +14,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.h b/drivers/net/wireless/intel/iwlwifi/dvm/power.h
index 2fd9b43adafd..a04fd4d375c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.h
@@ -14,10 +14,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index 98050d7be411..ef4b9de256f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
index 50c1e951dd2d..b2df3a8cc464 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index c942830af2b5..6f17a5e24e82 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -15,10 +15,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index 8f3e5586eda9..eee1d48d453a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -12,10 +12,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index 17e6a32384d3..8d7aafb4d9e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -13,11 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index de6ec9b7ace4..b1792de09594 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -14,10 +14,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
index 6524533d723c..4de2727ac63e 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -14,10 +14,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
index d324e9be9cbf..6388c09603c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
@@ -14,10 +14,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index fb40ddfced99..4ff323a3a4e5 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -13,11 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
index d6013bfe991c..3bf57085b976 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
@@ -14,11 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 75cae54ea7de..32d000cffe9f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -16,9 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program;
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index cb5f32c1d705..2439e98431ee 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -16,9 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program;
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index 87c1ddea75ae..68060085010f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -8,6 +8,7 @@
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -203,6 +205,7 @@ enum iwl_bt_activity_grading {
BT_ON_NO_CONNECTION = 1,
BT_LOW_TRAFFIC = 2,
BT_HIGH_TRAFFIC = 3,
+ BT_VERY_HIGH_TRAFFIC = 4,
BT_MAX_AG,
}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 6dad748e5cdc..8b4922bbe139 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -436,7 +436,8 @@ enum iwl_legacy_cmds {
/**
* @REDUCE_TX_POWER_CMD:
- * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd
+ * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd_v4
+ * or &struct iwl_dev_tx_power_cmd
*/
REDUCE_TX_POWER_CMD = 0x9f,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 57f4bc242023..6fae02fa4cad 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -374,7 +376,7 @@ enum iwl_wowlan_wakeup_reason {
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
-struct iwl_wowlan_gtk_status {
+struct iwl_wowlan_gtk_status_v1 {
u8 key_index;
u8 reserved[3];
u8 decrypt_key[16];
@@ -382,9 +384,84 @@ struct iwl_wowlan_gtk_status {
struct iwl_wowlan_rsc_tsc_params_cmd rsc;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
+#define WOWLAN_KEY_MAX_SIZE 32
+#define WOWLAN_GTK_KEYS_NUM 2
+#define WOWLAN_IGTK_KEYS_NUM 2
+
+/**
+ * struct iwl_wowlan_gtk_status - GTK status
+ * @key: GTK material
+ * @key_len: GTK legth, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ * bits[0:1]: key index assigned by the AP
+ * bits[2:6]: GTK index of the key in the internal DB
+ * bit[7]: Set iff this is the currently used GTK
+ * @reserved: padding
+ * @tkip_mic_key: TKIP RX MIC key
+ * @rsc: TSC RSC counters
+ */
+struct iwl_wowlan_gtk_status {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 key_len;
+ u8 key_flags;
+ u8 reserved[2];
+ u8 tkip_mic_key[8];
+ struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+} __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */
+
+#define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1))
+
+/**
+ * struct iwl_wowlan_igtk_status - IGTK status
+ * @key: IGTK material
+ * @ipn: the IGTK packet number (replay counter)
+ * @key_len: IGTK length, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ * bits[0]: key index assigned by the AP (0: index 4, 1: index 5)
+ * bits[1:5]: IGTK index of the key in the internal DB
+ * bit[6]: Set iff this is the currently used IGTK
+ */
+struct iwl_wowlan_igtk_status {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 ipn[6];
+ u8 key_len;
+ u8 key_flags;
+} __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */
+
+/**
+ * struct iwl_wowlan_status_v6 - WoWLAN status
+ * @gtk: GTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_status_v6 {
+ struct iwl_wowlan_gtk_status_v1 gtk;
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
+
/**
* struct iwl_wowlan_status - WoWLAN status
* @gtk: GTK data
+ * @igtk: IGTK data
* @replay_ctr: GTK rekey replay counter
* @pattern_number: number of the matched pattern
* @non_qos_seq_ctr: non-QoS sequence counter to use next
@@ -398,7 +475,8 @@ struct iwl_wowlan_gtk_status {
* @wake_packet: wakeup packet
*/
struct iwl_wowlan_status {
- struct iwl_wowlan_gtk_status gtk;
+ struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 non_qos_seq_ctr;
@@ -410,7 +488,12 @@ struct iwl_wowlan_status {
__le32 wake_packet_length;
__le32 wake_packet_bufsize;
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
+
+static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
+{
+ return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
+}
#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 59b3c6e8f37b..eff3249af48a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -100,6 +100,11 @@ enum iwl_data_path_subcmd_ids {
TLC_MNG_CONFIG_CMD = 0xF,
/**
+ * @HE_AIR_SNIFFER_CONFIG_CMD: &struct iwl_he_monitor_cmd
+ */
+ HE_AIR_SNIFFER_CONFIG_CMD = 0x13,
+
+ /**
* @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
*/
TLC_MNG_UPDATE_NOTIF = 0xF7,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 106782341544..dc1fa377087a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -8,6 +8,7 @@
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -336,6 +338,9 @@ struct iwl_dbg_mem_access_rsp {
#define CONT_REC_COMMAND_SIZE 80
#define ENABLE_CONT_RECORDING 0x15
#define DISABLE_CONT_RECORDING 0x16
+#define BUFFER_ALLOCATION 0x27
+#define START_DEBUG_RECORDING 0x29
+#define STOP_DEBUG_RECORDING 0x2A
/*
* struct iwl_continuous_record_mode - recording mode
@@ -353,4 +358,31 @@ struct iwl_continuous_record_cmd {
sizeof(struct iwl_continuous_record_mode)];
} __packed;
+/* maximum fragments to be allocated per target of allocationId */
+#define IWL_BUFFER_LOCATION_MAX_FRAGS 2
+
+/**
+ * struct iwl_fragment_data single fragment structure
+ * @address: 64bit start address
+ * @size: size in bytes
+ */
+struct iwl_fragment_data {
+ __le64 address;
+ __le32 size;
+} __packed; /* FRAGMENT_STRUCTURE_API_S_VER_1 */
+
+/**
+ * struct iwl_buffer_allocation_cmd - buffer allocation command structure
+ * @allocation_id: id of the allocation
+ * @buffer_location: location of the buffer
+ * @num_frags: number of fragments
+ * @fragments: memory fragments
+ */
+struct iwl_buffer_allocation_cmd {
+ __le32 allocation_id;
+ __le32 buffer_location;
+ __le32 num_frags;
+ struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS];
+} __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_1 */
+
#endif /* __iwl_fw_api_debug_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index 17c7ef1662a9..ca49db786ed6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -72,11 +74,58 @@ enum iwl_mac_conf_subcmd_ids {
*/
LOW_LATENCY_CMD = 0x3,
/**
+ * @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif
+ */
+ PROBE_RESPONSE_DATA_NOTIF = 0xFC,
+
+ /**
* @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif
*/
CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
};
+#define IWL_P2P_NOA_DESC_COUNT (2)
+
+/**
+ * struct iwl_p2p_noa_attr - NOA attr contained in probe resp FW notification
+ *
+ * @id: attribute id
+ * @len_low: length low half
+ * @len_high: length high half
+ * @idx: instance of NoA timing
+ * @ctwin: GO's ct window and pwer save capability
+ * @desc: NoA descriptor
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_p2p_noa_attr {
+ u8 id;
+ u8 len_low;
+ u8 len_high;
+ u8 idx;
+ u8 ctwin;
+ struct ieee80211_p2p_noa_desc desc[IWL_P2P_NOA_DESC_COUNT];
+ u8 reserved;
+} __packed;
+
+#define IWL_PROBE_RESP_DATA_NO_CSA (0xff)
+
+/**
+ * struct iwl_probe_resp_data_notif - notification with NOA and CSA counter
+ *
+ * @mac_id: the mac which should send the probe response
+ * @noa_active: notifies if the noa attribute should be handled
+ * @noa_attr: P2P NOA attribute
+ * @csa_counter: current csa counter
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_probe_resp_data_notif {
+ __le32 mac_id;
+ __le32 noa_active;
+ struct iwl_p2p_noa_attr noa_attr;
+ u8 csa_counter;
+ u8 reserved[3];
+} __packed; /* PROBE_RESPONSE_DATA_NTFY_API_S_VER_1 */
+
/**
* struct iwl_channel_switch_noa_notif - Channel switch NOA notification
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 55594c93b014..1dd23f846fb9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -578,4 +578,18 @@ struct iwl_he_sta_context_cmd {
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
} __packed; /* STA_CONTEXT_DOT11AX_API_S */
+/**
+ * struct iwl_he_monitor_cmd - configure air sniffer for HE
+ * @bssid: the BSSID to sniff for
+ * @reserved1: reserved for dword alignment
+ * @aid: the AID to track on for HE MU
+ * @reserved2: reserved for future use
+ */
+struct iwl_he_monitor_cmd {
+ u8 bssid[6];
+ __le16 reserved1;
+ __le16 aid;
+ u8 reserved2[6];
+} __packed; /* HE_AIR_SNIFFER_CONFIG_CMD_API_S_VER_1 */
+
#endif /* __iwl_fw_api_mac_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 6c5338364794..93b392f0c6a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -165,7 +165,7 @@ struct iwl_nvm_access_resp {
*/
struct iwl_nvm_get_info {
__le32 reserved;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */
+} __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */
/**
* enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp
@@ -180,14 +180,14 @@ enum iwl_nvm_info_general_flags {
* @flags: bit 0: 1 - empty, 0 - non-empty
* @nvm_version: nvm version
* @board_type: board type
- * @reserved: reserved
+ * @n_hw_addrs: number of reserved MAC addresses
*/
struct iwl_nvm_get_info_general {
__le32 flags;
__le16 nvm_version;
u8 board_type;
- u8 reserved;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */
+ u8 n_hw_addrs;
+} __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */
/**
* enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku
@@ -231,7 +231,7 @@ struct iwl_nvm_get_info_sku {
struct iwl_nvm_get_info_phy {
__le32 tx_chains;
__le32 rx_chains;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
+} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
#define IWL_NUM_CHANNELS (51)
@@ -245,7 +245,7 @@ struct iwl_nvm_get_info_regulatory {
__le32 lar_enabled;
__le16 channel_profile[IWL_NUM_CHANNELS];
__le16 reserved;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
+} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
/**
* struct iwl_nvm_get_info_rsp - response to get NVM data
@@ -259,7 +259,7 @@ struct iwl_nvm_get_info_rsp {
struct iwl_nvm_get_info_sku mac_sku;
struct iwl_nvm_get_info_phy phy_sku;
struct iwl_nvm_get_info_regulatory regulatory;
-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
/**
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
@@ -270,22 +270,6 @@ struct iwl_nvm_access_complete_cmd {
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
/**
- * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic
- * regulatory profile according to the given MCC (Mobile Country Code).
- * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
- * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
- * MCC in the cmd response will be the relevant MCC in the NVM.
- * @mcc: given mobile country code
- * @source_id: the source from where we got the MCC, see iwl_mcc_source
- * @reserved: reserved for alignment
- */
-struct iwl_mcc_update_cmd_v1 {
- __le16 mcc;
- u8 source_id;
- u8 reserved;
-} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */
-
-/**
* struct iwl_mcc_update_cmd - Request the device to update geographic
* regulatory profile according to the given MCC (Mobile Country Code).
* The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
@@ -306,7 +290,18 @@ struct iwl_mcc_update_cmd {
} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
/**
- * struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD.
+ * enum iwl_geo_information - geographic information.
+ * @GEO_NO_INFO: no special info for this geo profile.
+ * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params
+ * for the 5 GHz band.
+ */
+enum iwl_geo_information {
+ GEO_NO_INFO = 0,
+ GEO_WMM_ETSI_5GHZ_INFO = BIT(0),
+};
+
+/**
+ * struct iwl_mcc_update_resp_v3 - response to MCC_UPDATE_CMD.
* Contains the new channel control profile map, if changed, and the new MCC
* (mobile country code).
* The new MCC may be different than what was requested in MCC_UPDATE_CMD.
@@ -314,30 +309,23 @@ struct iwl_mcc_update_cmd {
* @mcc: the new applied MCC
* @cap: capabilities for all channels which matches the MCC
* @source_id: the MCC source, see iwl_mcc_source
- * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
- * channels, depending on platform)
+ * @time: time elapsed from the MCC test start (in units of 30 seconds)
+ * @geo_info: geographic specific profile information
+ * see &enum iwl_geo_information.
+ * @n_channels: number of channels in @channels_data.
* @channels: channel control data map, DWORD for each channel. Only the first
* 16bits are used.
*/
-struct iwl_mcc_update_resp_v1 {
+struct iwl_mcc_update_resp_v3 {
__le32 status;
__le16 mcc;
u8 cap;
u8 source_id;
+ __le16 time;
+ __le16 geo_info;
__le32 n_channels;
__le32 channels[0];
-} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */
-
-/**
- * enum iwl_geo_information - geographic information.
- * @GEO_NO_INFO: no special info for this geo profile.
- * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params
- * for the 5 GHz band.
- */
-enum iwl_geo_information {
- GEO_NO_INFO = 0,
- GEO_WMM_ETSI_5GHZ_INFO = BIT(0),
-};
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
/**
* struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
@@ -347,25 +335,26 @@ enum iwl_geo_information {
* @status: see &enum iwl_mcc_update_status
* @mcc: the new applied MCC
* @cap: capabilities for all channels which matches the MCC
- * @source_id: the MCC source, see iwl_mcc_source
- * @time: time elapsed from the MCC test start (in 30 seconds TU)
+ * @time: time elapsed from the MCC test start (in units of 30 seconds)
* @geo_info: geographic specific profile information
* see &enum iwl_geo_information.
- * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
- * channels, depending on platform)
+ * @source_id: the MCC source, see iwl_mcc_source
+ * @reserved: for four bytes alignment.
+ * @n_channels: number of channels in @channels_data.
* @channels: channel control data map, DWORD for each channel. Only the first
* 16bits are used.
*/
struct iwl_mcc_update_resp {
__le32 status;
__le16 mcc;
- u8 cap;
- u8 source_id;
+ __le16 cap;
__le16 time;
__le16 geo_info;
+ u8 source_id;
+ u8 reserved[3];
__le32 n_channels;
__le32 channels[0];
-} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */
/**
* struct iwl_mcc_chub_notif - chub notifies of mcc change
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index a3c77e01863b..286a22da232d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -316,7 +318,9 @@ enum iwl_dev_tx_power_cmd_mode {
IWL_TX_POWER_MODE_SET_DEVICE = 1,
IWL_TX_POWER_MODE_SET_CHAINS = 2,
IWL_TX_POWER_MODE_SET_ACK = 3,
-}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */;
+ IWL_TX_POWER_MODE_SET_SAR_TIMER = 4,
+ IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5,
+}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_5 */;
#define IWL_NUM_CHAIN_LIMITS 2
#define IWL_NUM_SUB_BANDS 5
@@ -350,13 +354,35 @@ struct iwl_dev_tx_power_cmd_v3 {
* reduction.
* @reserved: reserved (padding)
*/
-struct iwl_dev_tx_power_cmd {
+struct iwl_dev_tx_power_cmd_v4 {
/* v4 is just an extension of v3 - keep this here */
struct iwl_dev_tx_power_cmd_v3 v3;
u8 enable_ack_reduction;
u8 reserved[3];
} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * @v3: version 3 of the command, embedded here for easier software handling
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ */
+struct iwl_dev_tx_power_cmd {
+ /* v5 is just an extension of v3 - keep this here */
+ struct iwl_dev_tx_power_cmd_v3 v3;
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_5 */
+
#define IWL_NUM_GEO_PROFILES 3
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 087fae91baef..9eddc4dc2ae6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -66,12 +66,24 @@
/**
* enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
- * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC
+ * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for
+ * bandwidths <= 80MHz
* @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
+ * @IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK: enable STBC in HE at 160MHz
+ * bandwidth
+ * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK: enable HE Dual Carrier Modulation
+ * for BPSK (MCS 0) with 1 spatial
+ * stream
+ * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK: enable HE Dual Carrier Modulation
+ * for BPSK (MCS 0) with 2 spatial
+ * streams
*/
enum iwl_tlc_mng_cfg_flags {
- IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0),
- IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1),
+ IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0),
+ IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1),
+ IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK = BIT(2),
+ IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = BIT(3),
+ IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = BIT(4),
};
/**
@@ -217,66 +229,6 @@ struct iwl_tlc_update_notif {
__le32 amsdu_enabled;
} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
-/**
- * enum iwl_tlc_debug_flags - debug options
- * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling
- * @IWL_TLC_DEBUG_STATS_TH: threshold for sending statistics to the driver, in
- * frames
- * @IWL_TLC_DEBUG_STATS_TIME_TH: threshold for sending statistics to the
- * driver, in msec
- * @IWL_TLC_DEBUG_AGG_TIME_LIM: time limit for a BA session
- * @IWL_TLC_DEBUG_AGG_DIS_START_TH: frame with try-count greater than this
- * threshold should not start an aggregation session
- * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames in an aggregation
- * @IWL_TLC_DEBUG_RENEW_ADDBA_DELAY: delay between retries of ADD BA
- * @IWL_TLC_DEBUG_START_AC_RATE_IDX: frames per second to start a BA session
- * @IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK: disable BW scaling
- */
-enum iwl_tlc_debug_flags {
- IWL_TLC_DEBUG_FIXED_RATE,
- IWL_TLC_DEBUG_STATS_TH,
- IWL_TLC_DEBUG_STATS_TIME_TH,
- IWL_TLC_DEBUG_AGG_TIME_LIM,
- IWL_TLC_DEBUG_AGG_DIS_START_TH,
- IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
- IWL_TLC_DEBUG_RENEW_ADDBA_DELAY,
- IWL_TLC_DEBUG_START_AC_RATE_IDX,
- IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK,
-}; /* TLC_MNG_DEBUG_FLAGS_API_E_VER_1 */
-
-/**
- * struct iwl_dhc_tlc_dbg - fixed debug config
- * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id
- * @reserved1: reserved
- * @flags: bitmap of %IWL_TLC_DEBUG_\*
- * @fixed_rate: rate value
- * @stats_threshold: if number of tx-ed frames is greater, send statistics
- * @time_threshold: statistics threshold in usec
- * @agg_time_lim: max agg time
- * @agg_dis_start_threshold: frames with try-cont greater than this count will
- * not be aggregated
- * @agg_frame_count_lim: agg size
- * @addba_retry_delay: delay between retries of ADD BA
- * @start_ac_rate_idx: frames per second to start a BA session
- * @no_far_range_tweak: disable BW scaling
- * @reserved2: reserved
- */
-struct iwl_dhc_tlc_cmd {
- u8 sta_id;
- u8 reserved1[3];
- __le32 flags;
- __le32 fixed_rate;
- __le16 stats_threshold;
- __le16 time_threshold;
- __le16 agg_time_lim;
- __le16 agg_dis_start_threshold;
- __le16 agg_frame_count_lim;
- __le16 addba_retry_delay;
- u8 start_ac_rate_idx[IEEE80211_NUM_ACS];
- u8 no_far_range_tweak;
- u8 reserved2[3];
-} __packed;
-
/*
* These serve as indexes into
* struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 2f599353c885..0537496b6eb1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -362,18 +362,49 @@ enum iwl_rx_he_phy {
/* 6 bits reserved */
IWL_RX_HE_PHY_DELIM_EOF = BIT(31),
- /* second dword - MU data */
- IWL_RX_HE_PHY_SIGB_COMPRESSION = BIT_ULL(32 + 0),
- IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL,
+ /* second dword - common data */
IWL_RX_HE_PHY_HE_LTF_NUM_MASK = 0xe000000000ULL,
IWL_RX_HE_PHY_RU_ALLOC_SEC80 = BIT_ULL(32 + 8),
/* trigger encoded */
IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL,
- IWL_RX_HE_PHY_SIGB_MCS_MASK = 0xf000000000000ULL,
- /* 1 bit reserved */
- IWL_RX_HE_PHY_SIGB_DCM = BIT_ULL(32 + 21),
- IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL,
- /* 8 bits reserved */
+ IWL_RX_HE_PHY_INFO_TYPE_MASK = 0xf000000000000000ULL,
+ IWL_RX_HE_PHY_INFO_TYPE_SU = 0x0, /* TSF low valid (first DW) */
+ IWL_RX_HE_PHY_INFO_TYPE_MU = 0x1, /* TSF low/high valid (both DWs) */
+ IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO = 0x2, /* same + SIGB-common0/1/2 valid */
+ IWL_RX_HE_PHY_INFO_TYPE_TB = 0x3, /* TSF low/high valid (both DWs) */
+
+ /* second dword - MU data */
+ IWL_RX_HE_PHY_MU_SIGB_COMPRESSION = BIT_ULL(32 + 0),
+ IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL,
+ IWL_RX_HE_PHY_MU_SIGB_MCS_MASK = 0xf000000000000ULL,
+ IWL_RX_HE_PHY_MU_SIGB_DCM = BIT_ULL(32 + 21),
+ IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL,
+
+ /* second dword - TB data */
+ IWL_RX_HE_PHY_TB_PILOT_TYPE = BIT_ULL(32 + 0),
+ IWL_RX_HE_PHY_TB_LOW_SS_MASK = 0xe00000000ULL
+};
+
+enum iwl_rx_he_sigb_common0 {
+ /* the a1/a2/... is what the PHY/firmware calls the values */
+ IWL_RX_HE_SIGB_COMMON0_CH1_RU0 = 0x000000ff, /* a1 */
+ IWL_RX_HE_SIGB_COMMON0_CH1_RU2 = 0x0000ff00, /* a2 */
+ IWL_RX_HE_SIGB_COMMON0_CH2_RU0 = 0x00ff0000, /* b1 */
+ IWL_RX_HE_SIGB_COMMON0_CH2_RU2 = 0xff000000, /* b2 */
+};
+
+enum iwl_rx_he_sigb_common1 {
+ IWL_RX_HE_SIGB_COMMON1_CH1_RU1 = 0x000000ff, /* c1 */
+ IWL_RX_HE_SIGB_COMMON1_CH1_RU3 = 0x0000ff00, /* c2 */
+ IWL_RX_HE_SIGB_COMMON1_CH2_RU1 = 0x00ff0000, /* d1 */
+ IWL_RX_HE_SIGB_COMMON1_CH2_RU3 = 0xff000000, /* d2 */
+};
+
+enum iwl_rx_he_sigb_common2 {
+ IWL_RX_HE_SIGB_COMMON2_CH1_CTR_RU = 0x0001,
+ IWL_RX_HE_SIGB_COMMON2_CH2_CTR_RU = 0x0002,
+ IWL_RX_HE_SIGB_COMMON2_CH1_CRC_OK = 0x0004,
+ IWL_RX_HE_SIGB_COMMON2_CH2_CRC_OK = 0x0008,
};
/**
@@ -381,15 +412,31 @@ enum iwl_rx_he_phy {
*/
struct iwl_rx_mpdu_desc_v1 {
/* DW7 - carries rss_hash only when rpa_en == 1 */
- /**
- * @rss_hash: RSS hash value
- */
- __le32 rss_hash;
+ union {
+ /**
+ * @rss_hash: RSS hash value
+ */
+ __le32 rss_hash;
+
+ /**
+ * @sigb_common0: for HE sniffer, HE-SIG-B common part 0
+ */
+ __le32 sigb_common0;
+ };
+
/* DW8 - carries filter_match only when rpa_en == 1 */
- /**
- * @filter_match: filter match value
- */
- __le32 filter_match;
+ union {
+ /**
+ * @filter_match: filter match value
+ */
+ __le32 filter_match;
+
+ /**
+ * @sigb_common1: for HE sniffer, HE-SIG-B common part 1
+ */
+ __le32 sigb_common1;
+ };
+
/* DW9 */
/**
* @rate_n_flags: RX rate/flags encoding
@@ -439,15 +486,30 @@ struct iwl_rx_mpdu_desc_v1 {
*/
struct iwl_rx_mpdu_desc_v3 {
/* DW7 - carries filter_match only when rpa_en == 1 */
- /**
- * @filter_match: filter match value
- */
- __le32 filter_match;
+ union {
+ /**
+ * @filter_match: filter match value
+ */
+ __le32 filter_match;
+
+ /**
+ * @sigb_common0: for HE sniffer, HE-SIG-B common part 0
+ */
+ __le32 sigb_common0;
+ };
+
/* DW8 - carries rss_hash only when rpa_en == 1 */
- /**
- * @rss_hash: RSS hash value
- */
- __le32 rss_hash;
+ union {
+ /**
+ * @rss_hash: RSS hash value
+ */
+ __le32 rss_hash;
+
+ /**
+ * @sigb_common1: for HE sniffer, HE-SIG-B common part 1
+ */
+ __le32 sigb_common1;
+ };
/* DW9 */
/**
* @partial_hash: 31:0 ip/tcp header hash
@@ -543,10 +605,18 @@ struct iwl_rx_mpdu_desc {
* @raw_csum: raw checksum (alledgedly unreliable)
*/
__le16 raw_csum;
- /**
- * @l3l4_flags: &enum iwl_rx_l3l4_flags
- */
- __le16 l3l4_flags;
+
+ union {
+ /**
+ * @l3l4_flags: &enum iwl_rx_l3l4_flags
+ */
+ __le16 l3l4_flags;
+
+ /**
+ * @sigb_common2: for HE sniffer, HE-SIG-B common part 2
+ */
+ __le16 sigb_common2;
+ };
/* DW5 */
/**
* @status: &enum iwl_rx_mpdu_status
@@ -574,6 +644,69 @@ struct iwl_rx_mpdu_desc {
#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
+#define IWL_CD_STTS_OPTIMIZED_POS 0
+#define IWL_CD_STTS_OPTIMIZED_MSK 0x01
+#define IWL_CD_STTS_TRANSFER_STATUS_POS 1
+#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E
+#define IWL_CD_STTS_WIFI_STATUS_POS 4
+#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
+
+/**
+ * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
+ * @IWL_CD_STTS_UNUSED: unused
+ * @IWL_CD_STTS_UNUSED_2: unused
+ * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
+ * In sniffer mode, when split is used, set in last CD completion. (RX)
+ * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
+ * all CD completion. (RX)
+ * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
+ * @IWL_CD_STTS_ERROR: general error (RX)
+ */
+enum iwl_completion_desc_transfer_status {
+ IWL_CD_STTS_UNUSED,
+ IWL_CD_STTS_UNUSED_2,
+ IWL_CD_STTS_END_TRANSFER,
+ IWL_CD_STTS_OVERFLOW,
+ IWL_CD_STTS_ABORTED,
+ IWL_CD_STTS_ERROR,
+};
+
+/**
+ * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
+ * @IWL_CD_STTS_VALID: the packet is valid (RX)
+ * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
+ * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
+ * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
+ * @IWL_CD_STTS_DUP: duplicate packet (RX)
+ * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
+ * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
+ * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
+ * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
+ * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
+ * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
+ * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
+ * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
+ * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
+ * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
+ */
+enum iwl_completion_desc_wifi_status {
+ IWL_CD_STTS_VALID,
+ IWL_CD_STTS_FCS_ERR,
+ IWL_CD_STTS_SEC_KEY_ERR,
+ IWL_CD_STTS_DECRYPTION_ERR,
+ IWL_CD_STTS_DUP,
+ IWL_CD_STTS_ICV_MIC_ERR,
+ IWL_CD_STTS_INTERNAL_SNAP_ERR,
+ IWL_CD_STTS_SEC_PORT_FAIL,
+ IWL_CD_STTS_BA_OLD_SN,
+ IWL_CD_STTS_QOS_NULL,
+ IWL_CD_STTS_MAC_HDR_ERR,
+ IWL_CD_STTS_MAX_RETRANS,
+ IWL_CD_STTS_EX_LIFETIME,
+ IWL_CD_STTS_NOT_USED,
+ IWL_CD_STTS_REPLAY_ERR,
+};
+
struct iwl_frame_release {
u8 baid;
u8 reserved;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index a17c4a79b8d4..18741889ec30 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -262,6 +262,7 @@ enum iwl_scan_channel_flags {
IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0),
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1),
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2),
+ IWL_SCAN_CHANNEL_FLAG_EBS_FRAG = BIT(3),
};
/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
@@ -595,9 +596,12 @@ enum iwl_umac_scan_general_flags {
* enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2
* @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete
* notification per channel or not.
+ * @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
+ * reorder optimization or not.
*/
enum iwl_umac_scan_general_flags2 {
- IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
+ IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1),
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index dc40cbd52f92..450227f81706 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -391,7 +393,7 @@ enum iwl_sta_type {
* @tfd_queue_msk: tfd queues used by this station.
* Obselete for new TX API (9 and above).
* @rx_ba_window: aggregation window size
- * @sp_length: the size of the SP as it appears in the WME IE
+ * @sp_length: the size of the SP in actual number of frames
* @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
* enabled ACs.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 514b86123d3d..358bdf051e83 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -186,7 +186,7 @@ enum iwl_tx_cmd_sec_ctrl {
/*
* TID for non QoS frames - to be written in tid_tspec
*/
-#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
+#define IWL_TID_NON_QOS 0
/*
* Limits on the retransmissions - to be written in {data,rts}_retry_limit
@@ -747,9 +747,9 @@ enum iwl_mvm_ba_resp_flags {
* @tfd_cnt: number of TFD-Q elements
* @ra_tid_cnt: number of RATID-Q elements
* @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
- * for details.
+ * for details. Length in @tfd_cnt.
* @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
- * &iwl_mvm_compressed_ba_ratid for more details.
+ * &iwl_mvm_compressed_ba_ratid for more details. Length in @ra_tid_cnt.
*/
struct iwl_mvm_compressed_ba_notif {
__le32 flags;
@@ -766,7 +766,7 @@ struct iwl_mvm_compressed_ba_notif {
__le32 tx_rate;
__le16 tfd_cnt;
__le16 ra_tid_cnt;
- struct iwl_mvm_compressed_ba_tfd tfd[1];
+ struct iwl_mvm_compressed_ba_tfd tfd[0];
struct iwl_mvm_compressed_ba_ratid ra_tid[0];
} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index a31a42e673c4..c16757051f16 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -19,9 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program;
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -243,7 +240,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return;
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
/* Pull RXF1 */
iwl_fwrt_dump_rxf(fwrt, dump_data,
cfg->lmac[0].rxfifo1_size, 0, 0);
@@ -257,7 +254,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
LMAC2_PRPH_OFFSET, 2);
}
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
/* Pull TXF data from LMAC1 */
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
@@ -282,7 +279,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
}
}
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
@@ -458,8 +455,8 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
{ .start = 0x00a02400, .end = 0x00a02758 },
};
-static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start,
- u32 len_bytes, __le32 *data)
+static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
+ u32 len_bytes, __le32 *data)
{
u32 i;
@@ -467,21 +464,6 @@ static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start,
*data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
}
-static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start,
- u32 len_bytes, __le32 *data)
-{
- unsigned long flags;
- bool success = false;
-
- if (iwl_trans_grab_nic_access(trans, &flags)) {
- success = true;
- _iwl_read_prph_block(trans, start, len_bytes, data);
- iwl_trans_release_nic_access(trans, &flags);
- }
-
- return success;
-}
-
static void iwl_dump_prph(struct iwl_trans *trans,
struct iwl_fw_error_dump_data **data,
const struct iwl_prph_range *iwl_prph_dump_addr,
@@ -507,11 +489,11 @@ static void iwl_dump_prph(struct iwl_trans *trans,
prph = (void *)(*data)->data;
prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
- _iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
- /* our range is inclusive, hence + 4 */
- iwl_prph_dump_addr[i].end -
- iwl_prph_dump_addr[i].start + 4,
- (void *)prph->data);
+ iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
+ /* our range is inclusive, hence + 4 */
+ iwl_prph_dump_addr[i].end -
+ iwl_prph_dump_addr[i].start + 4,
+ (void *)prph->data);
*data = iwl_fw_error_next_data(*data);
}
@@ -556,42 +538,130 @@ static struct scatterlist *alloc_sgtable(int size)
return table;
}
-void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
+static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt)
+{
+ u32 prph_len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
+ i++) {
+ /* The range includes both boundaries */
+ int num_bytes_in_chunk =
+ iwl_prph_dump_addr_comm[i].end -
+ iwl_prph_dump_addr_comm[i].start + 4;
+
+ prph_len += sizeof(struct iwl_fw_error_dump_data) +
+ sizeof(struct iwl_fw_error_dump_prph) +
+ num_bytes_in_chunk;
+ }
+
+ if (fwrt->trans->cfg->mq_rx_supported) {
+ for (i = 0; i <
+ ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
+ /* The range includes both boundaries */
+ int num_bytes_in_chunk =
+ iwl_prph_dump_addr_9000[i].end -
+ iwl_prph_dump_addr_9000[i].start + 4;
+
+ prph_len += sizeof(struct iwl_fw_error_dump_data) +
+ sizeof(struct iwl_fw_error_dump_prph) +
+ num_bytes_in_chunk;
+ }
+ }
+ return prph_len;
+}
+
+static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_error_dump_data **dump_data,
+ u32 len, u32 ofs, u32 type)
+{
+ struct iwl_fw_error_dump_mem *dump_mem;
+
+ if (!len)
+ return;
+
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
+ dump_mem = (void *)(*dump_data)->data;
+ dump_mem->type = cpu_to_le32(type);
+ dump_mem->offset = cpu_to_le32(ofs);
+ iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+
+ IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
+}
+
+#define ADD_LEN(len, item_len, const_len) \
+ do {size_t item = item_len; len += (!!item) * const_len + item; } \
+ while (0)
+
+static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_shared_mem_cfg *mem_cfg)
+{
+ size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ u32 fifo_len = 0;
+ int i;
+
+ if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)))
+ goto dump_txf;
+
+ /* Count RXF2 size */
+ ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
+
+ /* Count RXF1 sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++)
+ ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
+
+dump_txf:
+ if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)))
+ goto dump_internal_txf;
+
+ /* Count TXF sizes */
+ for (i = 0; i < mem_cfg->num_lmacs; i++) {
+ int j;
+
+ for (j = 0; j < mem_cfg->num_txfifo_entries; j++)
+ ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j],
+ hdr_len);
+ }
+
+dump_internal_txf:
+ if (!((fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
+ fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++)
+ ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len);
+
+out:
+ return fifo_len;
+}
+
+static struct iwl_fw_error_dump_file *
+_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dump_ptrs *fw_error_dump)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_info *dump_info;
- struct iwl_fw_error_dump_mem *dump_mem;
struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
struct iwl_fw_error_dump_trigger_desc *dump_trig;
- struct iwl_fw_dump_ptrs *fw_error_dump;
- struct scatterlist *sg_dump_data;
u32 sram_len, sram_ofs;
- const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv;
+ const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv;
struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
- u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
- u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
- u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ?
+ u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0;
+ u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
+ u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
0 : fwrt->trans->cfg->dccm2_len;
bool monitor_dump_only = false;
int i;
- IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
-
- /* there's no point in fw dump if the bus is dead */
- if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
- IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
- goto out;
- }
-
if (fwrt->dump.trig &&
fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
monitor_dump_only = true;
- fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
- if (!fw_error_dump)
- goto out;
-
/* SRAM - include stack CCM if driver knows the values for it */
if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
const struct fw_img *img;
@@ -606,138 +676,43 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
/* reading RXF/TXF sizes */
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
- fifo_data_len = 0;
-
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
-
- /* Count RXF2 size */
- if (mem_cfg->rxfifo2_size) {
- /* Add header info */
- fifo_data_len +=
- mem_cfg->rxfifo2_size +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
-
- /* Count RXF1 sizes */
- for (i = 0; i < mem_cfg->num_lmacs; i++) {
- if (!mem_cfg->lmac[i].rxfifo1_size)
- continue;
-
- /* Add header info */
- fifo_data_len +=
- mem_cfg->lmac[i].rxfifo1_size +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
- }
-
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
- size_t fifo_const_len = sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
-
- /* Count TXF sizes */
- for (i = 0; i < mem_cfg->num_lmacs; i++) {
- int j;
-
- for (j = 0; j < mem_cfg->num_txfifo_entries;
- j++) {
- if (!mem_cfg->lmac[i].txfifo_size[j])
- continue;
-
- /* Add header info */
- fifo_data_len +=
- fifo_const_len +
- mem_cfg->lmac[i].txfifo_size[j];
- }
- }
- }
-
- if ((fwrt->fw->dbg_dump_mask &
- BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
- fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
- for (i = 0;
- i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
- i++) {
- if (!mem_cfg->internal_txfifo_size[i])
- continue;
-
- /* Add header info */
- fifo_data_len +=
- mem_cfg->internal_txfifo_size[i] +
- sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_fifo);
- }
- }
+ fifo_len = iwl_fw_fifo_len(fwrt, mem_cfg);
/* Make room for PRPH registers */
if (!fwrt->trans->cfg->gen2 &&
- fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
- for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
- i++) {
- /* The range includes both boundaries */
- int num_bytes_in_chunk =
- iwl_prph_dump_addr_comm[i].end -
- iwl_prph_dump_addr_comm[i].start + 4;
-
- prph_len += sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_prph) +
- num_bytes_in_chunk;
- }
- }
-
- if (!fwrt->trans->cfg->gen2 &&
- fwrt->trans->cfg->mq_rx_supported &&
- fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
- for (i = 0; i <
- ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
- /* The range includes both boundaries */
- int num_bytes_in_chunk =
- iwl_prph_dump_addr_9000[i].end -
- iwl_prph_dump_addr_9000[i].start + 4;
-
- prph_len += sizeof(*dump_data) +
- sizeof(struct iwl_fw_error_dump_prph) +
- num_bytes_in_chunk;
- }
- }
+ fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH))
+ prph_len += iwl_fw_get_prph_len(fwrt);
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
- fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
+ fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
}
- file_len = sizeof(*dump_file) +
- fifo_data_len +
- prph_len +
- radio_len;
+ file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
file_len += sizeof(*dump_data) + sizeof(*dump_info);
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
- /* Make room for the SMEM, if it exists */
- if (smem_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
- smem_len;
-
- /* Make room for the secondary SRAM, if it exists */
- if (sram2_len)
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
- sram2_len;
-
- /* Make room for MEM segments */
- for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
- file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
- le32_to_cpu(fw_dbg_mem[i].len);
- }
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+ size_t hdr_len = sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_mem);
+
+ /* Dump SRAM only if no mem_tlvs */
+ if (!fwrt->fw->dbg.n_mem_tlv)
+ ADD_LEN(file_len, sram_len, hdr_len);
+
+ /* Make room for all mem types that exist */
+ ADD_LEN(file_len, smem_len, hdr_len);
+ ADD_LEN(file_len, sram2_len, hdr_len);
+
+ for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++)
+ ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len);
}
/* Make room for fw's virtual image pages, if it exists */
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block)
@@ -746,33 +721,32 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
sizeof(struct iwl_fw_error_dump_paging) +
PAGING_BLOCK_SIZE);
+ if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
+ file_len += sizeof(*dump_data) +
+ fwrt->trans->cfg->d3_debug_data_length * 2;
+ }
+
/* If we only want a monitor dump, reset the file length */
if (monitor_dump_only) {
file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
}
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
fwrt->dump.desc->len;
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
- !fwrt->fw->n_dbg_mem_tlv)
- file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem);
-
dump_file = vzalloc(file_len);
- if (!dump_file) {
- kfree(fw_error_dump);
- goto out;
- }
+ if (!dump_file)
+ return NULL;
fw_error_dump->fwrt_ptr = dump_file;
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data;
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_info));
dump_info = (void *)dump_data->data;
@@ -793,7 +767,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data);
}
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
/* Dump shared memory configuration */
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
@@ -824,13 +798,13 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* We only dump the FIFOs if the FW is in error state */
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
+ if (fifo_len) {
iwl_fw_dump_fifos(fwrt, &dump_data);
if (radio_len)
iwl_read_radio_regs(fwrt, &dump_data);
}
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
@@ -844,89 +818,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
/* In case we only want monitor dump, skip to dump trasport data */
if (monitor_dump_only)
- goto dump_trans_data;
-
- if (!fwrt->fw->n_dbg_mem_tlv &&
- fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
- dump_mem = (void *)dump_data->data;
- dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
- dump_mem->offset = cpu_to_le32(sram_ofs);
- iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data,
- sram_len);
- dump_data = iwl_fw_error_next_data(dump_data);
- }
+ goto out;
+
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+ const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem =
+ fwrt->fw->dbg.mem_tlv;
- for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
- u32 len = le32_to_cpu(fw_dbg_mem[i].len);
- u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
- bool success;
-
- if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)))
- break;
-
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
- dump_mem = (void *)dump_data->data;
- dump_mem->type = fw_dbg_mem[i].data_type;
- dump_mem->offset = cpu_to_le32(ofs);
-
- IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n",
- dump_mem->type);
-
- switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) {
- case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR):
- iwl_trans_read_mem_bytes(fwrt->trans, ofs,
- dump_mem->data,
- len);
- success = true;
- break;
- case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH):
- success = iwl_read_prph_block(fwrt->trans, ofs, len,
- (void *)dump_mem->data);
- break;
- default:
- /*
- * shouldn't get here, we ignored this kind
- * of TLV earlier during the TLV parsing?!
- */
- WARN_ON(1);
- success = false;
+ if (!fwrt->fw->dbg.n_mem_tlv)
+ iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs,
+ IWL_FW_ERROR_DUMP_MEM_SRAM);
+
+ for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
+ u32 len = le32_to_cpu(fw_dbg_mem[i].len);
+ u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
+
+ iwl_fw_dump_mem(fwrt, &dump_data, len, ofs,
+ le32_to_cpu(fw_dbg_mem[i].data_type));
}
- if (success)
- dump_data = iwl_fw_error_next_data(dump_data);
- }
+ iwl_fw_dump_mem(fwrt, &dump_data, smem_len,
+ fwrt->trans->cfg->smem_offset,
+ IWL_FW_ERROR_DUMP_MEM_SMEM);
- if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
- IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
- dump_mem = (void *)dump_data->data;
- dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
- dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset);
- iwl_trans_read_mem_bytes(fwrt->trans,
- fwrt->trans->cfg->smem_offset,
- dump_mem->data, smem_len);
- dump_data = iwl_fw_error_next_data(dump_data);
+ iwl_fw_dump_mem(fwrt, &dump_data, sram2_len,
+ fwrt->trans->cfg->dccm2_offset,
+ IWL_FW_ERROR_DUMP_MEM_SRAM);
}
- if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
- IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
- dump_mem = (void *)dump_data->data;
- dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
- dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset);
- iwl_trans_read_mem_bytes(fwrt->trans,
- fwrt->trans->cfg->dccm2_offset,
- dump_mem->data, sram2_len);
+ if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
+ u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
+ size_t data_size = fwrt->trans->cfg->d3_debug_data_length;
+
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
+ dump_data->len = cpu_to_le32(data_size * 2);
+
+ memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size);
+
+ kfree(fwrt->dump.d3_debug_data);
+ fwrt->dump.d3_debug_data = NULL;
+
+ iwl_trans_read_mem_bytes(fwrt->trans, addr,
+ dump_data->data + data_size,
+ data_size);
+
dump_data = iwl_fw_error_next_data(dump_data);
}
/* Dump fw's virtual image */
- if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+ if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block) {
@@ -962,13 +901,44 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
ARRAY_SIZE(iwl_prph_dump_addr_9000));
}
-dump_trans_data:
+out:
+ dump_file->file_len = cpu_to_le32(file_len);
+ return dump_file;
+}
+
+void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_fw_dump_ptrs *fw_error_dump;
+ struct iwl_fw_error_dump_file *dump_file;
+ struct scatterlist *sg_dump_data;
+ u32 file_len;
+
+ IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
+
+ /* there's no point in fw dump if the bus is dead */
+ if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
+ IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
+ goto out;
+ }
+
+ fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
+ if (!fw_error_dump)
+ goto out;
+
+ dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
+ if (!dump_file) {
+ kfree(fw_error_dump);
+ goto out;
+ }
+
fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
fwrt->dump.trig);
+ file_len = le32_to_cpu(dump_file->file_len);
fw_error_dump->fwrt_len = file_len;
- if (fw_error_dump->trans_ptr)
+ if (fw_error_dump->trans_ptr) {
file_len += fw_error_dump->trans_ptr->len;
- dump_file->file_len = cpu_to_le32(file_len);
+ dump_file->file_len = cpu_to_le32(file_len);
+ }
sg_dump_data = alloc_sgtable(file_len);
if (sg_dump_data) {
@@ -1003,20 +973,39 @@ const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
};
IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
-int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
- const struct iwl_fw_dump_desc *desc,
- const struct iwl_fw_dbg_trigger_tlv *trigger)
+void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
{
- unsigned int delay = 0;
+ struct iwl_fw_dump_desc *iwl_dump_desc_no_alive =
+ kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL);
+
+ if (!iwl_dump_desc_no_alive)
+ return;
- if (trigger)
- delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+ iwl_dump_desc_no_alive->trig_desc.type =
+ cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE);
+ iwl_dump_desc_no_alive->len = 0;
+
+ if (WARN_ON(fwrt->dump.desc))
+ iwl_fw_free_dump_desc(fwrt);
+ IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
+ FW_DBG_TRIGGER_NO_ALIVE);
+
+ fwrt->dump.desc = iwl_dump_desc_no_alive;
+ iwl_fw_error_dump(fwrt);
+ clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
+
+int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
+ const struct iwl_fw_dump_desc *desc, void *trigger,
+ unsigned int delay)
+{
/*
* If the loading of the FW completed successfully, the next step is to
* get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
* zero, the FW was already loaded successully. If the state is "NO_FW"
- * in such a case - WARN and exit, since FW may be dead. Otherwise, we
+ * in such a case - exit, since FW may be dead. Otherwise, we
* can try to collect the data, since FW might just not be fully
* loaded (no "ALIVE" yet), and the debug data is accessible.
*
@@ -1024,12 +1013,12 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
* config. In such a case, due to HW access problems, we might
* collect garbage.
*/
- if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) &&
- fwrt->smem_cfg.num_lmacs,
- "Can't collect dbg data when FW isn't alive\n"))
+ if (fwrt->trans->state == IWL_TRANS_NO_FW &&
+ fwrt->smem_cfg.num_lmacs)
return -EIO;
- if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
+ if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) ||
+ test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status))
return -EBUSY;
if (WARN_ON(fwrt->dump.desc))
@@ -1050,25 +1039,38 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig,
const char *str, size_t len,
- const struct iwl_fw_dbg_trigger_tlv *trigger)
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
struct iwl_fw_dump_desc *desc;
+ unsigned int delay = 0;
- if (trigger && trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
- IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig);
- iwl_force_nmi(fwrt->trans);
- return 0;
+ if (trigger) {
+ u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
+
+ if (!le16_to_cpu(trigger->occurrences))
+ return 0;
+
+ if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
+ IWL_WARN(fwrt, "Force restart: trigger %d fired.\n",
+ trig);
+ iwl_force_nmi(fwrt->trans);
+ return 0;
+ }
+
+ trigger->occurrences = cpu_to_le16(occurrences);
+ delay = le16_to_cpu(trigger->trig_dis_ms);
}
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
if (!desc)
return -ENOMEM;
+
desc->len = len;
desc->trig_desc.type = cpu_to_le32(trig);
memcpy(desc->trig_desc.data, str, len);
- return iwl_fw_dbg_collect_desc(fwrt, desc, trigger);
+ return iwl_fw_dbg_collect_desc(fwrt, desc, trigger, delay);
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
@@ -1076,13 +1078,9 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...)
{
- u16 occurrences = le16_to_cpu(trigger->occurrences);
int ret, len = 0;
char buf[64];
- if (!occurrences)
- return 0;
-
if (fmt) {
va_list ap;
@@ -1105,7 +1103,6 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
if (ret)
return ret;
- trigger->occurrences = cpu_to_le16(occurrences - 1);
return 0;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
@@ -1116,29 +1113,26 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
int ret;
int i;
- if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv),
+ if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv),
"Invalid configuration %d\n", conf_id))
return -EINVAL;
/* EARLY START - firmware's configuration is hard coded */
- if ((!fwrt->fw->dbg_conf_tlv[conf_id] ||
- !fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
+ if ((!fwrt->fw->dbg.conf_tlv[conf_id] ||
+ !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) &&
conf_id == FW_DBG_START_FROM_ALIVE)
return 0;
- if (!fwrt->fw->dbg_conf_tlv[conf_id])
+ if (!fwrt->fw->dbg.conf_tlv[conf_id])
return -EINVAL;
if (fwrt->dump.conf != FW_DBG_INVALID)
IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n",
fwrt->dump.conf);
- /* start default config marker cmd for syncing logs */
- iwl_fw_trigger_timestamp(fwrt, 1);
-
/* Send all HCMDs for configuring the FW debug */
- ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd;
- for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
+ ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd;
+ for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) {
struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
struct iwl_host_cmd hcmd = {
.id = cmd->id,
@@ -1160,13 +1154,14 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
-void iwl_fw_error_dump_wk(struct work_struct *work)
+/* this function assumes dump_start was called beforehand and dump_end will be
+ * called afterwards
+ */
+void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
{
- struct iwl_fw_runtime *fwrt =
- container_of(work, struct iwl_fw_runtime, dump.wk.work);
+ struct iwl_fw_dbg_params params = {0};
- if (fwrt->ops && fwrt->ops->dump_start &&
- fwrt->ops->dump_start(fwrt->ops_ctx))
+ if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
return;
if (fwrt->ops && fwrt->ops->fw_running &&
@@ -1174,44 +1169,58 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
iwl_fw_free_dump_desc(fwrt);
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
- goto out;
+ return;
}
- if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- /* stop recording */
- iwl_fw_dbg_stop_recording(fwrt);
-
- iwl_fw_error_dump(fwrt);
-
- /* start recording again if the firmware is not crashed */
- if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
- fwrt->fw->dbg_dest_tlv) {
- iwl_clear_bits_prph(fwrt->trans,
- MON_BUFF_SAMPLE_CTL, 0x100);
- iwl_clear_bits_prph(fwrt->trans,
- MON_BUFF_SAMPLE_CTL, 0x1);
- iwl_set_bits_prph(fwrt->trans,
- MON_BUFF_SAMPLE_CTL, 0x1);
- }
- } else {
- u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
- u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
+ iwl_fw_dbg_stop_recording(fwrt, &params);
+
+ iwl_fw_error_dump(fwrt);
- iwl_fw_dbg_stop_recording(fwrt);
+ /* start recording again if the firmware is not crashed */
+ if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
+ fwrt->fw->dbg.dest_tlv) {
/* wait before we collect the data till the DBGC stop */
udelay(500);
+ iwl_fw_dbg_restart_recording(fwrt, &params);
+ }
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync);
- iwl_fw_error_dump(fwrt);
+void iwl_fw_error_dump_wk(struct work_struct *work)
+{
+ struct iwl_fw_runtime *fwrt =
+ container_of(work, struct iwl_fw_runtime, dump.wk.work);
+
+ if (fwrt->ops && fwrt->ops->dump_start &&
+ fwrt->ops->dump_start(fwrt->ops_ctx))
+ return;
+
+ iwl_fw_dbg_collect_sync(fwrt);
- /* start recording again if the firmware is not crashed */
- if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
- fwrt->fw->dbg_dest_tlv) {
- iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, in_sample);
- iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);
- }
- }
-out:
if (fwrt->ops && fwrt->ops->dump_end)
fwrt->ops->dump_end(fwrt->ops_ctx);
}
+void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
+{
+ const struct iwl_cfg *cfg = fwrt->trans->cfg;
+
+ if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt))
+ return;
+
+ if (!fwrt->dump.d3_debug_data) {
+ fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length,
+ GFP_KERNEL);
+ if (!fwrt->dump.d3_debug_data) {
+ IWL_ERR(fwrt,
+ "failed to allocate memory for D3 debug data\n");
+ return;
+ }
+ }
+
+ /* if the buffer holds previous debug data it is overwritten */
+ iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr,
+ fwrt->dump.d3_debug_data,
+ cfg->d3_debug_data_length);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 507d9a49fa97..6f8d3256f7b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -19,9 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program;
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -74,6 +71,7 @@
#include "iwl-io.h"
#include "file.h"
#include "error-dump.h"
+#include "api/commands.h"
/**
* struct iwl_fw_dump_desc - describes the dump
@@ -86,6 +84,16 @@ struct iwl_fw_dump_desc {
struct iwl_fw_error_dump_trigger_desc trig_desc;
};
+/**
+ * struct iwl_fw_dbg_params - register values to restore
+ * @in_sample: DBGC_IN_SAMPLE value
+ * @out_ctrl: DBGC_OUT_CTRL value
+ */
+struct iwl_fw_dbg_params {
+ u32 in_sample;
+ u32 out_ctrl;
+};
+
extern const struct iwl_fw_dump_desc iwl_dump_desc_assert;
static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
@@ -99,25 +107,25 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
- const struct iwl_fw_dbg_trigger_tlv *trigger);
+ void *trigger, unsigned int delay);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig,
const char *str, size_t len,
- const struct iwl_fw_dbg_trigger_tlv *trigger);
+ struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...) __printf(3, 4);
int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id);
#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \
- void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \
+ void *__dbg_trigger = (fw)->dbg.trigger_tlv[(id)]; \
unlikely(__dbg_trigger); \
})
static inline struct iwl_fw_dbg_trigger_tlv*
_iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id)
{
- return fw->dbg_trigger_tlv[id];
+ return fw->dbg.trigger_tlv[id];
}
#define iwl_fw_dbg_get_trigger(fw, id) ({ \
@@ -146,12 +154,9 @@ iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt,
}
static inline bool
-iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_dbg_trigger_tlv *trig)
+iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_ms)
{
- unsigned long wind_jiff =
- msecs_to_jiffies(le16_to_cpu(trig->trig_dis_ms));
- u32 id = le32_to_cpu(trig->id);
+ unsigned long wind_jiff = msecs_to_jiffies(dis_ms);
/* If this is the first event checked, jump to update start ts */
if (fwrt->dump.non_collect_ts_start[id] &&
@@ -171,7 +176,8 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev))
return false;
- if (iwl_fw_dbg_no_trig_window(fwrt, trig)) {
+ if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id),
+ le16_to_cpu(trig->trig_dis_ms))) {
IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n",
trig->id);
return false;
@@ -180,6 +186,30 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig);
}
+static inline struct iwl_fw_dbg_trigger_tlv*
+_iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
+ struct wireless_dev *wdev,
+ const enum iwl_fw_dbg_trigger id)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id))
+ return NULL;
+
+ trig = _iwl_fw_dbg_get_trigger(fwrt->fw, id);
+
+ if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trig))
+ return NULL;
+
+ return trig;
+}
+
+#define iwl_fw_dbg_trigger_on(fwrt, wdev, id) ({ \
+ BUILD_BUG_ON(!__builtin_constant_p(id)); \
+ BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \
+ _iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \
+})
+
static inline void
_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
@@ -199,17 +229,80 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
iwl_fw_dbg_get_trigger((fwrt)->fw,\
(trig)))
-static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
+static int iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start)
+{
+ struct iwl_continuous_record_cmd cont_rec = {};
+ struct iwl_host_cmd hcmd = {
+ .id = LDBG_CONFIG_CMD,
+ .flags = CMD_ASYNC,
+ .data[0] = &cont_rec,
+ .len[0] = sizeof(cont_rec),
+ };
+
+ cont_rec.record_mode.enable_recording = start ?
+ cpu_to_le16(START_DEBUG_RECORDING) :
+ cpu_to_le16(STOP_DEBUG_RECORDING);
+
+ return iwl_trans_send_cmd(fwrt->trans, &hcmd);
+}
+
+static inline void
+_iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
+ struct iwl_fw_dbg_params *params)
+{
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ return;
+ }
+
+ if (params) {
+ params->in_sample = iwl_read_prph(trans, DBGC_IN_SAMPLE);
+ params->out_ctrl = iwl_read_prph(trans, DBGC_OUT_CTRL);
+ }
+
+ iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
+ udelay(100);
+ iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+}
+
+static inline void
+iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dbg_params *params)
+{
+ if (fwrt->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ _iwl_fw_dbg_stop_recording(fwrt->trans, params);
+ else
+ iwl_fw_dbg_start_stop_hcmd(fwrt, false);
+}
+
+static inline void
+_iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
+ struct iwl_fw_dbg_params *params)
{
- if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ if (WARN_ON(!params))
+ return;
+
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
+ iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
} else {
- iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
+ iwl_write_prph(trans, DBGC_IN_SAMPLE, params->in_sample);
udelay(100);
- iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
+ iwl_write_prph(trans, DBGC_OUT_CTRL, params->out_ctrl);
}
}
+static inline void
+iwl_fw_dbg_restart_recording(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dbg_params *params)
+{
+ if (fwrt->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ _iwl_fw_dbg_restart_recording(fwrt->trans, params);
+ else
+ iwl_fw_dbg_start_stop_hcmd(fwrt, true);
+}
+
static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
{
fwrt->dump.conf = FW_DBG_INVALID;
@@ -217,6 +310,16 @@ static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
void iwl_fw_error_dump_wk(struct work_struct *work);
+static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
+{
+ return fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
+ fwrt->trans->cfg->d3_debug_data_length &&
+ fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
+}
+
+void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
+
static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt)
{
flush_delayed_work(&fwrt->dump.wk);
@@ -263,4 +366,6 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
+void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
+void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index 8ba5a60ec9ed..3e120dd47305 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program.
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -33,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,55 +65,117 @@
#include "debugfs.h"
#include "dbg.h"
-#define FWRT_DEBUGFS_READ_FILE_OPS(name) \
-static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt, \
- char *buf, size_t count, \
- loff_t *ppos); \
+#define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
+struct dbgfs_##name##_data { \
+ argtype *arg; \
+ bool read_done; \
+ ssize_t rlen; \
+ char rbuf[buflen]; \
+}; \
+static int _iwl_dbgfs_##name##_open(struct inode *inode, \
+ struct file *file) \
+{ \
+ struct dbgfs_##name##_data *data; \
+ \
+ data = kzalloc(sizeof(*data), GFP_KERNEL); \
+ if (!data) \
+ return -ENOMEM; \
+ \
+ data->read_done = false; \
+ data->arg = inode->i_private; \
+ file->private_data = data; \
+ \
+ return 0; \
+}
+
+#define FWRT_DEBUGFS_READ_WRAPPER(name) \
+static ssize_t _iwl_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct dbgfs_##name##_data *data = file->private_data; \
+ \
+ if (!data->read_done) { \
+ data->read_done = true; \
+ data->rlen = iwl_dbgfs_##name##_read(data->arg, \
+ sizeof(data->rbuf),\
+ data->rbuf); \
+ } \
+ \
+ if (data->rlen < 0) \
+ return data->rlen; \
+ return simple_read_from_buffer(user_buf, count, ppos, \
+ data->rbuf, data->rlen); \
+}
+
+static int _iwl_dbgfs_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+#define _FWRT_DEBUGFS_READ_FILE_OPS(name, buflen, argtype) \
+FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
+FWRT_DEBUGFS_READ_WRAPPER(name) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .read = iwl_dbgfs_##name##_read, \
- .open = simple_open, \
+ .read = _iwl_dbgfs_##name##_read, \
+ .open = _iwl_dbgfs_##name##_open, \
.llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
}
-#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \
-static ssize_t iwl_dbgfs_##name##_write(struct iwl_fw_runtime *fwrt, \
- char *buf, size_t count, \
- loff_t *ppos); \
+#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
const char __user *user_buf, \
size_t count, loff_t *ppos) \
{ \
- struct iwl_fw_runtime *fwrt = file->private_data; \
+ argtype *arg = \
+ ((struct dbgfs_##name##_data *)file->private_data)->arg;\
char buf[buflen] = {}; \
size_t buf_size = min(count, sizeof(buf) - 1); \
\
if (copy_from_user(buf, user_buf, buf_size)) \
return -EFAULT; \
\
- return iwl_dbgfs_##name##_write(fwrt, buf, buf_size, ppos); \
+ return iwl_dbgfs_##name##_write(arg, buf, buf_size); \
}
-#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen) \
-FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \
+#define _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \
+FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+FWRT_DEBUGFS_READ_WRAPPER(name) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = _iwl_dbgfs_##name##_write, \
- .read = iwl_dbgfs_##name##_read, \
- .open = simple_open, \
+ .read = _iwl_dbgfs_##name##_read, \
+ .open = _iwl_dbgfs_##name##_open, \
.llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
}
-#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen) \
-FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \
+#define _FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \
+FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = _iwl_dbgfs_##name##_write, \
- .open = simple_open, \
+ .open = _iwl_dbgfs_##name##_open, \
.llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
}
+#define FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz) \
+ _FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
+
+#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ _FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
+
+#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime)
+
#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
- if (!debugfs_create_file(alias, mode, parent, fwrt, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
+ if (!debugfs_create_file(alias, mode, parent, fwrt, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
} while (0)
#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
@@ -173,8 +234,7 @@ void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay)
}
static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt,
- char *buf, size_t count,
- loff_t *ppos)
+ char *buf, size_t count)
{
int ret;
u32 delay;
@@ -188,13 +248,85 @@ static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt,
return count;
}
-FWRT_DEBUGFS_WRITE_FILE_OPS(timestamp_marker, 10);
+static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt,
+ size_t size, char *buf)
+{
+ u32 delay_secs = jiffies_to_msecs(fwrt->timestamp.delay) / 1000;
+
+ return scnprintf(buf, size, "%d\n", delay_secs);
+}
+
+FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16);
+
+struct hcmd_write_data {
+ __be32 cmd_id;
+ __be32 flags;
+ __be16 length;
+ u8 data[0];
+} __packed;
+
+static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
+ size_t count)
+{
+ size_t header_size = (sizeof(u32) * 2 + sizeof(u16)) * 2;
+ size_t data_size = (count - 1) / 2;
+ int ret;
+ struct hcmd_write_data *data;
+ struct iwl_host_cmd hcmd = {
+ .len = { 0, },
+ .data = { NULL, },
+ };
+
+ if (fwrt->ops && fwrt->ops->fw_running &&
+ !fwrt->ops->fw_running(fwrt->ops_ctx))
+ return -EIO;
+
+ if (count < header_size + 1 || count > 1024 * 4)
+ return -EINVAL;
+
+ data = kmalloc(data_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = hex2bin((u8 *)data, buf, data_size);
+ if (ret)
+ goto out;
+
+ hcmd.id = be32_to_cpu(data->cmd_id);
+ hcmd.flags = be32_to_cpu(data->flags);
+ hcmd.len[0] = be16_to_cpu(data->length);
+ hcmd.data[0] = data->data;
+
+ if (count != header_size + hcmd.len[0] * 2 + 1) {
+ IWL_ERR(fwrt,
+ "host command data size does not match header length\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (fwrt->ops && fwrt->ops->send_hcmd)
+ ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
+ else
+ ret = -EPERM;
+
+ if (ret < 0)
+ goto out;
+
+ if (hcmd.flags & CMD_WANT_SKB)
+ iwl_free_resp(&hcmd);
+out:
+ kfree(data);
+ return ret ?: count;
+}
+
+FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
+ FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
return 0;
err:
IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
index cbbfa8e9e66d..88255035e8ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
@@ -18,9 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program.
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index ed7beca8817e..6fede174c664 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -8,6 +8,7 @@
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -35,6 +31,7 @@
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -116,6 +113,7 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */
IWL_FW_ERROR_DUMP_MEM_CFG = 16,
+ IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17,
IWL_FW_ERROR_DUMP_MAX,
};
@@ -330,6 +328,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* @FW_DBG_TDLS: trigger log collection upon TDLS related events.
* @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
* the firmware sends a tx reply.
+ * @FW_DBG_TRIGGER_NO_ALIVE: trigger log collection if alive flow fails
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
@@ -347,6 +346,7 @@ enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_TX_LATENCY,
FW_DBG_TRIGGER_TDLS,
FW_DBG_TRIGGER_TX_STATUS,
+ FW_DBG_TRIGGER_NO_ALIVE,
/* must be last */
FW_DBG_TRIGGER_MAX,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index bbf2b265a06a..6005a41c53d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -258,6 +253,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* deprecated.
* @IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2: This ucode supports version 8
* of scan request: SCAN_REQUEST_CMD_UMAC_API_S_VER_8
+ * @IWL_UCODE_TLV_API_FRAG_EBS: This ucode supports fragmented EBS
+ * @IWL_UCODE_TLV_API_REDUCE_TX_POWER: This ucode supports v5 of
+ * the REDUCE_TX_POWER_CMD.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -276,9 +274,12 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_OCE = (__force iwl_ucode_tlv_api_t)33,
IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
+ IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL = (__force iwl_ucode_tlv_api_t)36,
IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38,
IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41,
IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2 = (__force iwl_ucode_tlv_api_t)42,
+ IWL_UCODE_TLV_API_FRAG_EBS = (__force iwl_ucode_tlv_api_t)44,
+ IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@@ -325,6 +326,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
* @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
* @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
+ * @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -335,7 +337,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* antenna the beacon should be transmitted
* @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
* from AP and will send it upon d0i3 exit.
- * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
+ * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3: support LAR API V3
* @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
* @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
* thresholds reporting
@@ -349,6 +351,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* command size (command version 4) that supports toggling ACK TX
* power reduction.
* @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
+ * @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3
+ * @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax
+ * capability.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -381,6 +386,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41,
IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43,
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44,
+ IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
@@ -388,7 +394,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70,
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71,
IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = (__force iwl_ucode_tlv_capa_t)73,
IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74,
IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
@@ -396,7 +402,9 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
- IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)86,
+ IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87,
+ IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88,
+ IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89,
IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
NUM_IWL_UCODE_TLV_CAPA
@@ -528,22 +536,9 @@ enum iwl_fw_dbg_monitor_mode {
};
/**
- * enum iwl_fw_mem_seg_type - memory segment type
- * @FW_DBG_MEM_TYPE_MASK: mask for the type indication
- * @FW_DBG_MEM_TYPE_REGULAR: regular memory
- * @FW_DBG_MEM_TYPE_PRPH: periphery memory (requires special reading)
- */
-enum iwl_fw_mem_seg_type {
- FW_DBG_MEM_TYPE_MASK = 0xff000000,
- FW_DBG_MEM_TYPE_REGULAR = 0x00000000,
- FW_DBG_MEM_TYPE_PRPH = 0x01000000,
-};
-
-/**
* struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
*
- * @data_type: the memory segment type to record, see &enum iwl_fw_mem_seg_type
- * for what we care about
+ * @data_type: the memory segment type to record
* @ofs: the memory segment offset
* @len: the memory segment length, in bytes
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 0861b97c4233..54dbbd998abf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -203,6 +198,29 @@ enum iwl_fw_type {
};
/**
+ * struct iwl_fw_dbg - debug data
+ *
+ * @dest_tlv: points to debug destination TLV (typically SRAM or DRAM)
+ * @n_dest_reg: num of reg_ops in dest_tlv
+ * @conf_tlv: array of pointers to configuration HCMDs
+ * @trigger_tlv: array of pointers to triggers TLVs
+ * @trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
+ * @mem_tlv: Runtime addresses to dump
+ * @n_mem_tlv: number of runtime addresses
+ * @dump_mask: bitmask of dump regions
+*/
+struct iwl_fw_dbg {
+ struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
+ u8 n_dest_reg;
+ struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
+ struct iwl_fw_dbg_trigger_tlv *trigger_tlv[FW_DBG_TRIGGER_MAX];
+ size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX];
+ struct iwl_fw_dbg_mem_seg_tlv *mem_tlv;
+ size_t n_mem_tlv;
+ u32 dump_mask;
+};
+
+/**
* struct iwl_fw - variables associated with the firmware
*
* @ucode_ver: ucode version from the ucode file
@@ -222,12 +240,6 @@ enum iwl_fw_type {
* @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
* we get the ALIVE from the uCode
- * @dbg_dest_tlv: points to the destination TLV for debug
- * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
- * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
- * @dbg_trigger_tlv: array of pointers to triggers TLVs
- * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
- * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/
struct iwl_fw {
u32 ucode_ver;
@@ -255,15 +267,7 @@ struct iwl_fw {
struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
- struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
- struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
- size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
- struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
- struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
- size_t n_dbg_mem_tlv;
- size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
- u8 dbg_dest_reg_num;
- u32 dbg_dump_mask;
+ struct iwl_fw_dbg dbg;
};
static inline const char *get_fw_dbg_mode_string(int mode)
@@ -285,7 +289,7 @@ static inline const char *get_fw_dbg_mode_string(int mode)
static inline bool
iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
{
- const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
+ const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg.conf_tlv[id];
if (!conf_tlv)
return false;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
index 1096c945a68b..379735e086dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h
index 368884be4e7c..61b067eeeac9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index ed23367f7088..6b95d0e75889 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -71,6 +71,7 @@ struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
bool (*fw_running)(void *ctx);
+ int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
};
#define MAX_NUM_LMAC 2
@@ -88,6 +89,7 @@ struct iwl_fwrt_shared_mem_cfg {
enum iwl_fw_runtime_status {
IWL_FWRT_STATUS_DUMPING = 0,
+ IWL_FWRT_STATUS_WAIT_ALIVE,
};
/**
@@ -136,6 +138,7 @@ struct iwl_fw_runtime {
/* ts of the beginning of a non-collect fw dbg data period */
unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1];
+ u32 *d3_debug_data;
} dump;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h
index ee9347a54cdc..359537620c93 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h
@@ -16,11 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 12fddcf15bab..5eb906a0d0d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -388,6 +383,8 @@ struct iwl_csr_params {
* @gen2: 22000 and on transport operation
* @cdb: CDB support
* @nvm_type: see &enum iwl_nvm_type
+ * @d3_debug_data_base_addr: base address where D3 debug data is stored
+ * @d3_debug_data_length: length of the D3 debug data
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -452,6 +449,8 @@ struct iwl_cfg {
u8 ucode_api_min;
u32 min_umac_error_event_table;
u32 extra_phy_cfg_flags;
+ u32 d3_debug_data_base_addr;
+ u32 d3_debug_data_length;
};
static const struct iwl_csr_params iwl_csr_v1 = {
@@ -574,11 +573,18 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_jf;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
-#endif /* CONFIG_IWLMVM */
+#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index 4b6fdf3b15fb..5ed07e37e3ee 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -64,20 +64,41 @@
* the init done for driver command that configures several system modes
* @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
* @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
- * @IWL_CTXT_INFO_RB_SIZE_4K: Use 4K RB size (the default is 2K)
* @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
* exponent, the actual size is 2**value, valid sizes are 8-2048.
* The value is four bits long. Maximum valid exponent is 12
* @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
* default is short format - not supported by the driver)
+ * @IWL_CTXT_INFO_RB_SIZE_POS: RB size position
+ * (values are IWL_CTXT_INFO_RB_SIZE_*K)
+ * @IWL_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_4K: Value for 4K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_8K: Value for 8K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_12K: Value for 12K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_16K: Value for 16K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_20K: Value for 20K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_24K: Value for 24K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_28K: Value for 28K RB size
+ * @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size
*/
enum iwl_context_info_flags {
IWL_CTXT_INFO_AUTO_FUNC_INIT = BIT(0),
IWL_CTXT_INFO_EARLY_DEBUG = BIT(1),
IWL_CTXT_INFO_ENABLE_CDMP = BIT(2),
- IWL_CTXT_INFO_RB_SIZE_4K = BIT(3),
IWL_CTXT_INFO_RB_CB_SIZE_POS = 4,
IWL_CTXT_INFO_TFD_FORMAT_LONG = BIT(8),
+ IWL_CTXT_INFO_RB_SIZE_POS = 9,
+ IWL_CTXT_INFO_RB_SIZE_1K = 0x1,
+ IWL_CTXT_INFO_RB_SIZE_2K = 0x2,
+ IWL_CTXT_INFO_RB_SIZE_4K = 0x4,
+ IWL_CTXT_INFO_RB_SIZE_8K = 0x8,
+ IWL_CTXT_INFO_RB_SIZE_12K = 0x9,
+ IWL_CTXT_INFO_RB_SIZE_16K = 0xa,
+ IWL_CTXT_INFO_RB_SIZE_20K = 0xb,
+ IWL_CTXT_INFO_RB_SIZE_24K = 0xc,
+ IWL_CTXT_INFO_RB_SIZE_28K = 0xd,
+ IWL_CTXT_INFO_RB_SIZE_32K = 0xe,
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 9019de99f077..caa5806acd81 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -188,6 +183,7 @@
#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
+#define CSR_HW_IF_CONFIG_REG_D3_DEBUG (0x00000200)
#define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00)
#define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000)
#define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
index b1c3b0d0fcc6..e1a41fd503a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
@@ -16,11 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index c023fcf5d452..a2af68a0d34b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -13,10 +13,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
index a80e4202cd03..420e6d745f77 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -2,6 +2,7 @@
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -12,10 +13,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
@@ -33,38 +30,20 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_data
-TRACE_EVENT(iwlwifi_dev_tx_data,
- TP_PROTO(const struct device *dev,
- struct sk_buff *skb, u8 hdr_len),
- TP_ARGS(dev, skb, hdr_len),
+TRACE_EVENT(iwlwifi_dev_tx_tb,
+ TP_PROTO(const struct device *dev, struct sk_buff *skb,
+ u8 *data_src, size_t data_len),
+ TP_ARGS(dev, skb, data_src, data_len),
TP_STRUCT__entry(
DEV_ENTRY
__dynamic_array(u8, data,
- iwl_trace_data(skb) ? skb->len - hdr_len : 0)
+ iwl_trace_data(skb) ? data_len : 0)
),
TP_fast_assign(
DEV_ASSIGN;
if (iwl_trace_data(skb))
- skb_copy_bits(skb, hdr_len,
- __get_dynamic_array(data),
- skb->len - hdr_len);
- ),
- TP_printk("[%s] TX frame data", __get_str(dev))
-);
-
-TRACE_EVENT(iwlwifi_dev_tx_tso_chunk,
- TP_PROTO(const struct device *dev,
- u8 *data_src, size_t data_len),
- TP_ARGS(dev, data_src, data_len),
- TP_STRUCT__entry(
- DEV_ENTRY
-
- __dynamic_array(u8, data, data_len)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- memcpy(__get_dynamic_array(data), data_src, data_len);
+ memcpy(__get_dynamic_array(data), data_src, data_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
);
@@ -76,12 +55,11 @@ TRACE_EVENT(iwlwifi_dev_rx_data,
TP_ARGS(dev, trans, rxbuf, len),
TP_STRUCT__entry(
DEV_ENTRY
-
__dynamic_array(u8, data,
- len - iwl_rx_trace_len(trans, rxbuf, len))
+ len - iwl_rx_trace_len(trans, rxbuf, len, NULL))
),
TP_fast_assign(
- size_t offs = iwl_rx_trace_len(trans, rxbuf, len);
+ size_t offs = iwl_rx_trace_len(trans, rxbuf, len, NULL);
DEV_ASSIGN;
if (offs < len)
memcpy(__get_dynamic_array(data),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
index 4164dc1745ed..7bb4e0e9bb69 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
@@ -12,10 +12,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
index 27e3e4e96aa2..8e87186682e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -3,6 +3,7 @@
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -13,10 +14,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
@@ -75,13 +72,18 @@ TRACE_EVENT(iwlwifi_dev_rx,
TP_STRUCT__entry(
DEV_ENTRY
__field(u16, cmd)
- __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
+ __field(u8, hdr_offset)
+ __dynamic_array(u8, rxbuf,
+ iwl_rx_trace_len(trans, pkt, len, NULL))
),
TP_fast_assign(
+ size_t hdr_offset = 0;
+
DEV_ASSIGN;
__entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
memcpy(__get_dynamic_array(rxbuf), pkt,
- iwl_rx_trace_len(trans, pkt, len));
+ iwl_rx_trace_len(trans, pkt, len, &hdr_offset));
+ __entry->hdr_offset = hdr_offset;
),
TP_printk("[%s] RX cmd %#.2x",
__get_str(dev), __entry->cmd)
@@ -126,61 +128,6 @@ TRACE_EVENT(iwlwifi_dev_tx,
__entry->framelen, __entry->skbaddr)
);
-struct iwl_error_event_table;
-TRACE_EVENT(iwlwifi_dev_ucode_error,
- TP_PROTO(const struct device *dev, const struct iwl_error_event_table *table,
- u32 hw_ver, u32 brd_ver),
- TP_ARGS(dev, table, hw_ver, brd_ver),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, desc)
- __field(u32, tsf_low)
- __field(u32, data1)
- __field(u32, data2)
- __field(u32, line)
- __field(u32, blink2)
- __field(u32, ilink1)
- __field(u32, ilink2)
- __field(u32, bcon_time)
- __field(u32, gp1)
- __field(u32, gp2)
- __field(u32, rev_type)
- __field(u32, major)
- __field(u32, minor)
- __field(u32, hw_ver)
- __field(u32, brd_ver)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->desc = table->error_id;
- __entry->tsf_low = table->tsf_low;
- __entry->data1 = table->data1;
- __entry->data2 = table->data2;
- __entry->line = table->line;
- __entry->blink2 = table->blink2;
- __entry->ilink1 = table->ilink1;
- __entry->ilink2 = table->ilink2;
- __entry->bcon_time = table->bcon_time;
- __entry->gp1 = table->gp1;
- __entry->gp2 = table->gp2;
- __entry->rev_type = table->gp3;
- __entry->major = table->ucode_ver;
- __entry->minor = table->hw_ver;
- __entry->hw_ver = hw_ver;
- __entry->brd_ver = brd_ver;
- ),
- TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
- "blink2 0x%05X ilink 0x%05X 0x%05X "
- "bcon_tm %010u gp 0x%08X 0x%08X rev_type 0x%08X major 0x%08X "
- "minor 0x%08X hw 0x%08X brd 0x%08X",
- __get_str(dev), __entry->desc, __entry->tsf_low,
- __entry->data1, __entry->data2, __entry->line,
- __entry->blink2, __entry->ilink1, __entry->ilink2,
- __entry->bcon_time, __entry->gp1, __entry->gp2,
- __entry->rev_type, __entry->major, __entry->minor,
- __entry->hw_ver, __entry->brd_ver)
-);
-
TRACE_EVENT(iwlwifi_dev_ucode_event,
TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
TP_ARGS(dev, time, data, ev),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
index 5dfc9295a7e0..32984c1f39a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h
index e9b8673dd245..53842226ef1b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
index 6aa719865a58..9805432f124f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -11,10 +12,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
@@ -30,12 +27,10 @@
#ifndef __CHECKER__
#include "iwl-trans.h"
-#include "dvm/commands.h"
#define CREATE_TRACE_POINTS
#include "iwl-devtrace.h"
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
index f5c1127253cb..fc649b2bc017 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
@@ -1,7 +1,8 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
- * Copyright(C) 2016 Intel Deutschland GmbH
+ * Copyright(C) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -12,10 +13,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
@@ -60,16 +57,23 @@ static inline bool iwl_trace_data(struct sk_buff *skb)
}
static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
- void *rxbuf, size_t len)
+ void *rxbuf, size_t len,
+ size_t *out_hdr_offset)
{
struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32));
- struct ieee80211_hdr *hdr;
+ struct ieee80211_hdr *hdr = NULL;
+ size_t hdr_offset;
if (cmd->cmd != trans->rx_mpdu_cmd)
return len;
- hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) +
- trans->rx_mpdu_cmd_hdr_size);
+ hdr_offset = sizeof(struct iwl_cmd_header) +
+ trans->rx_mpdu_cmd_hdr_size;
+
+ if (out_hdr_offset)
+ *out_hdr_offset = hdr_offset;
+
+ hdr = (void *)((u8 *)cmd + hdr_offset);
if (!ieee80211_is_data(hdr->frame_control))
return len;
/* maybe try to identify EAPOL frames? */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index c0631255aee7..ba41d23b4211 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -173,12 +168,12 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
{
int i;
- kfree(drv->fw.dbg_dest_tlv);
- for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
- kfree(drv->fw.dbg_conf_tlv[i]);
- for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
- kfree(drv->fw.dbg_trigger_tlv[i]);
- kfree(drv->fw.dbg_mem_tlv);
+ kfree(drv->fw.dbg.dest_tlv);
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++)
+ kfree(drv->fw.dbg.conf_tlv[i]);
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++)
+ kfree(drv->fw.dbg.trigger_tlv[i]);
+ kfree(drv->fw.dbg.mem_tlv);
kfree(drv->fw.iml);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
@@ -308,7 +303,7 @@ struct iwl_firmware_pieces {
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
- size_t n_dbg_mem_tlv;
+ size_t n_mem_tlv;
};
/*
@@ -941,7 +936,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
IWL_INFO(drv, "Found debug destination: %s\n",
get_fw_dbg_mode_string(mon_mode));
- drv->fw.dbg_dest_reg_num = (dest_v1) ?
+ drv->fw.dbg.n_dest_reg = (dest_v1) ?
tlv_len -
offsetof(struct iwl_fw_dbg_dest_tlv_v1,
reg_ops) :
@@ -949,8 +944,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
offsetof(struct iwl_fw_dbg_dest_tlv,
reg_ops);
- drv->fw.dbg_dest_reg_num /=
- sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
+ drv->fw.dbg.n_dest_reg /=
+ sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]);
break;
}
@@ -964,7 +959,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
- if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) {
+ if (conf->id >= ARRAY_SIZE(drv->fw.dbg.conf_tlv)) {
IWL_ERR(drv,
"Skip unknown configuration: %d\n",
conf->id);
@@ -993,7 +988,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
(void *)tlv_data;
u32 trigger_id = le32_to_cpu(trigger->id);
- if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) {
+ if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
IWL_ERR(drv,
"Skip unknown trigger: %u\n",
trigger->id);
@@ -1020,7 +1015,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
- drv->fw.dbg_dump_mask =
+ drv->fw.dbg.dump_mask =
le32_to_cpup((__le32 *)tlv_data);
break;
}
@@ -1065,38 +1060,23 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_FW_MEM_SEG: {
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
(void *)tlv_data;
- u32 type;
size_t size;
struct iwl_fw_dbg_mem_seg_tlv *n;
if (tlv_len != (sizeof(*dbg_mem)))
goto invalid_tlv_len;
- type = le32_to_cpu(dbg_mem->data_type);
-
IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
dbg_mem->data_type);
- switch (type & FW_DBG_MEM_TYPE_MASK) {
- case FW_DBG_MEM_TYPE_REGULAR:
- case FW_DBG_MEM_TYPE_PRPH:
- /* we know how to handle these */
- break;
- default:
- IWL_ERR(drv,
- "Found debug memory segment with invalid type: 0x%x\n",
- type);
- return -EINVAL;
- }
-
size = sizeof(*pieces->dbg_mem_tlv) *
- (pieces->n_dbg_mem_tlv + 1);
+ (pieces->n_mem_tlv + 1);
n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL);
if (!n)
return -ENOMEM;
pieces->dbg_mem_tlv = n;
- pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem;
- pieces->n_dbg_mem_tlv++;
+ pieces->dbg_mem_tlv[pieces->n_mem_tlv] = *dbg_mem;
+ pieces->n_mem_tlv++;
break;
}
case IWL_UCODE_TLV_IML: {
@@ -1275,8 +1255,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
- /* dump all fw memory areas by default */
- fw->dbg_dump_mask = 0xffffffff;
+ /* dump all fw memory areas by default except d3 debug data */
+ fw->dbg.dump_mask = 0xfffdffff;
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
@@ -1343,21 +1323,21 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
goto out_free_fw;
if (pieces->dbg_dest_tlv_init) {
- size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) +
- sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
- drv->fw.dbg_dest_reg_num;
+ size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) +
+ sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
+ drv->fw.dbg.n_dest_reg;
- drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
+ drv->fw.dbg.dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
- if (!drv->fw.dbg_dest_tlv)
+ if (!drv->fw.dbg.dest_tlv)
goto out_free_fw;
if (*pieces->dbg_dest_ver == 0) {
- memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1,
+ memcpy(drv->fw.dbg.dest_tlv, pieces->dbg_dest_tlv_v1,
dbg_dest_size);
} else {
struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv =
- drv->fw.dbg_dest_tlv;
+ drv->fw.dbg.dest_tlv;
dest_tlv->version = pieces->dbg_dest_tlv->version;
dest_tlv->monitor_mode =
@@ -1372,8 +1352,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
pieces->dbg_dest_tlv->base_shift;
memcpy(dest_tlv->reg_ops,
pieces->dbg_dest_tlv->reg_ops,
- sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
- drv->fw.dbg_dest_reg_num);
+ sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
+ drv->fw.dbg.n_dest_reg);
/* In version 1 of the destination tlv, which is
* relevant for internal buffer exclusively,
@@ -1389,15 +1369,13 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
}
}
- for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) {
if (pieces->dbg_conf_tlv[i]) {
- drv->fw.dbg_conf_tlv_len[i] =
- pieces->dbg_conf_tlv_len[i];
- drv->fw.dbg_conf_tlv[i] =
+ drv->fw.dbg.conf_tlv[i] =
kmemdup(pieces->dbg_conf_tlv[i],
- drv->fw.dbg_conf_tlv_len[i],
+ pieces->dbg_conf_tlv_len[i],
GFP_KERNEL);
- if (!drv->fw.dbg_conf_tlv[i])
+ if (!pieces->dbg_conf_tlv_len[i])
goto out_free_fw;
}
}
@@ -1424,7 +1402,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] =
sizeof(struct iwl_fw_dbg_trigger_tdls);
- for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) {
if (pieces->dbg_trigger_tlv[i]) {
/*
* If the trigger isn't long enough, WARN and exit.
@@ -1437,22 +1415,22 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
(trigger_tlv_sz[i] +
sizeof(struct iwl_fw_dbg_trigger_tlv))))
goto out_free_fw;
- drv->fw.dbg_trigger_tlv_len[i] =
+ drv->fw.dbg.trigger_tlv_len[i] =
pieces->dbg_trigger_tlv_len[i];
- drv->fw.dbg_trigger_tlv[i] =
+ drv->fw.dbg.trigger_tlv[i] =
kmemdup(pieces->dbg_trigger_tlv[i],
- drv->fw.dbg_trigger_tlv_len[i],
+ drv->fw.dbg.trigger_tlv_len[i],
GFP_KERNEL);
- if (!drv->fw.dbg_trigger_tlv[i])
+ if (!drv->fw.dbg.trigger_tlv[i])
goto out_free_fw;
}
}
/* Now that we can no longer fail, copy information */
- drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv;
+ drv->fw.dbg.mem_tlv = pieces->dbg_mem_tlv;
pieces->dbg_mem_tlv = NULL;
- drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv;
+ drv->fw.dbg.n_mem_tlv = pieces->n_mem_tlv;
/*
* The (size - 16) / 12 formula is based on the information recorded
@@ -1493,6 +1471,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
break;
default:
WARN(1, "Invalid fw type %d\n", fw->type);
+ /* fall through */
case IWL_FW_MVM:
op = &iwlwifi_opmode_table[MVM_OP_MODE];
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 1f8a2eeb7dff..2be30af7bdc3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index a4c96215933b..4e3422a1c7bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -18,9 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program;
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -745,7 +742,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
else
rx_chains = hweight8(rx_chains);
- if (!(data->sku_cap_11n_enable) || !cfg->ht_params) {
+ if (!(data->sku_cap_11n_enable) ||
+ (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) ||
+ !cfg->ht_params) {
ht_info->ht_supported = false;
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
index 8be50ed12300..d910bda087f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
index ac965c34a2f8..a6db6a814257 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h
index 1ed78be06c23..47fced159800 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h
@@ -16,11 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index df0e9ffff706..c6a534303936 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -18,9 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program.
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -68,6 +65,8 @@
#include <linux/types.h>
#include <linux/bitfield.h>
+#include "iwl-trans.h"
+
/****************************/
/* Flow Handler Definitions */
/****************************/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index efb1998dcabd..4f10914f6048 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -14,10 +14,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index 5c8c0e130194..38085850a2d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -13,10 +13,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 97072cf75bca..6fc8dac4aab7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -17,9 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program;
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 73969dbeb5c5..96e101d79662 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -476,30 +471,40 @@ static struct ieee80211_sband_iftype_data iwl_he_capa = {
.has_he = true,
.he_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_HE_MAC_CAP0_HTC_HE,
+ IEEE80211_HE_MAC_CAP0_HTC_HE |
+ IEEE80211_HE_MAC_CAP0_TWT_REQ,
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
- IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[2] =
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
IEEE80211_HE_MAC_CAP2_ACK_EN,
.mac_cap_info[3] =
- IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
- IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
- .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] =
+ IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU |
+ IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39,
+ .mac_cap_info[5] =
+ IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 |
+ IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 |
+ IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU,
.phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_DUAL_BAND |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
.phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
- IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
.phy_cap_info[2] =
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
.phy_cap_info[3] =
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
@@ -511,18 +516,31 @@ static struct ieee80211_sband_iftype_data iwl_he_capa = {
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
.phy_cap_info[5] =
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 |
+ IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
+ IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK,
.phy_cap_info[6] =
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
.phy_cap_info[7] =
IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP7_MAX_NC_7,
+ IEEE80211_HE_PHY_CAP7_MAX_NC_1,
.phy_cap_info[8] =
IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU,
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ,
+ .phy_cap_info[9] =
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB,
},
/*
* Set default Tx/Rx HE MCS NSS Support field. Indicate support
@@ -559,9 +577,11 @@ static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
/* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
if ((tx_chains & rx_chains) != ANT_AB) {
iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &=
- ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS;
+ ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &=
- ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS;
+ ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
+ iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[7] &=
+ ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
}
}
@@ -1315,6 +1335,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
fw_has_capa(&fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+ bool empty_otp;
u32 mac_flags;
u32 sbands_flags = 0;
@@ -1330,7 +1351,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
}
rsp = (void *)hcmd.resp_pkt->data;
- if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP)
+ empty_otp = !!(le32_to_cpu(rsp->general.flags) &
+ NVM_GENERAL_FLAGS_EMPTY_OTP);
+ if (empty_otp)
IWL_INFO(trans, "OTP is empty\n");
nvm = kzalloc(sizeof(*nvm) +
@@ -1354,6 +1377,11 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
/* Initialize general data */
nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
+ nvm->n_hw_addrs = rsp->general.n_hw_addrs;
+ if (nvm->n_hw_addrs == 0)
+ IWL_WARN(trans,
+ "Firmware declares no reserved mac addresses. OTP is empty: %d\n",
+ empty_otp);
/* Initialize MAC sku data */
mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 234d1009a9de..b7e1ddf8f177 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index b49eda8150bb..cbd1a8eed620 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -8,6 +8,7 @@
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -35,6 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
index b7cd813ba70f..ae83cfdb750e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
index d34de3f71db6..7020dca05221 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
@@ -16,11 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 421a869633a3..0f51c7bea8d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-scd.h b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h
index 99b43da32adf..9f11f3912816 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-scd.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h
@@ -16,11 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 7e9c924e1220..727f73e0b3f1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 279dd7b7a3fb..26b3c73051ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -269,6 +264,7 @@ struct iwl_rx_cmd_buffer {
bool _page_stolen;
u32 _rx_page_order;
unsigned int truesize;
+ u8 status;
};
static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
@@ -538,9 +534,6 @@ struct iwl_trans_rxq_dma_data {
* @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
* TX'ed commands and similar. The buffer will be vfree'd by the caller.
* Note that the transport must fill in the proper file headers.
- * @dump_regs: dump using IWL_ERR configuration space and memory mapped
- * registers of the device to diagnose failure, e.g., when HW becomes
- * inaccessible.
*/
struct iwl_trans_ops {
@@ -569,7 +562,7 @@ struct iwl_trans_ops {
bool configure_scd);
/* 22000 functions */
int (*txq_alloc)(struct iwl_trans *trans,
- struct iwl_tx_queue_cfg_cmd *cmd,
+ __le16 flags, u8 sta_id, u8 tid,
int cmd_id, int size,
unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue);
@@ -611,8 +604,6 @@ struct iwl_trans_ops {
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
const struct iwl_fw_dbg_trigger_tlv
*trigger);
-
- void (*dump_regs)(struct iwl_trans *trans);
};
/**
@@ -688,6 +679,19 @@ enum iwl_plat_pm_mode {
* enter/exit (in msecs).
*/
#define IWL_TRANS_IDLE_TIMEOUT 2000
+#define IWL_MAX_DEBUG_ALLOCATIONS 1
+
+/**
+ * struct iwl_dram_data
+ * @physical: page phy pointer
+ * @block: pointer to the allocated block/page
+ * @size: size of the block/page
+ */
+struct iwl_dram_data {
+ dma_addr_t physical;
+ void *block;
+ int size;
+};
/**
* struct iwl_trans - transport common data
@@ -721,7 +725,9 @@ enum iwl_plat_pm_mode {
* @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
- * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ * @dbg_n_dest_reg: num of reg_ops in %dbg_dest_tlv
+ * @num_blocks: number of blocks in fw_mon
+ * @fw_mon: address of the buffers for firmware monitor
* @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
@@ -772,7 +778,9 @@ struct iwl_trans {
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u32 dbg_dump_mask;
- u8 dbg_dest_reg_num;
+ u8 dbg_n_dest_reg;
+ int num_blocks;
+ struct iwl_dram_data fw_mon[IWL_MAX_DEBUG_ALLOCATIONS];
enum iwl_plat_pm_mode system_pm_mode;
enum iwl_plat_pm_mode runtime_pm_mode;
@@ -897,12 +905,6 @@ iwl_trans_dump_data(struct iwl_trans *trans,
return trans->ops->dump_data(trans, trigger);
}
-static inline void iwl_trans_dump_regs(struct iwl_trans *trans)
-{
- if (trans->ops->dump_regs)
- trans->ops->dump_regs(trans);
-}
-
static inline struct iwl_device_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
@@ -985,7 +987,7 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue)
static inline int
iwl_trans_txq_alloc(struct iwl_trans *trans,
- struct iwl_tx_queue_cfg_cmd *cmd,
+ __le16 flags, u8 sta_id, u8 tid,
int cmd_id, int size,
unsigned int wdg_timeout)
{
@@ -999,7 +1001,8 @@ iwl_trans_txq_alloc(struct iwl_trans *trans,
return -EIO;
}
- return trans->ops->txq_alloc(trans, cmd, cmd_id, size, wdg_timeout);
+ return trans->ops->txq_alloc(trans, flags, sta_id, tid,
+ cmd_id, size, wdg_timeout);
}
static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
index 75d35f6b041e..4094a4158032 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 016e03a5034f..730e37744dc0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -331,7 +326,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct ieee80211_chanctx_conf *chanctx_conf;
/* default smps_mode is AUTOMATIC - only used for client modes */
enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
- u32 bt_activity_grading;
+ u32 bt_activity_grading, min_ag_for_static_smps;
int ave_rssi;
lockdep_assert_held(&mvm->mutex);
@@ -363,8 +358,13 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
return;
}
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
+ min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
+ else
+ min_ag_for_static_smps = BT_HIGH_TRAFFIC;
+
bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
- if (bt_activity_grading >= BT_HIGH_TRAFFIC)
+ if (bt_activity_grading >= min_ag_for_static_smps)
smps_mode = IEEE80211_SMPS_STATIC;
else if (bt_activity_grading >= BT_LOW_TRAFFIC)
smps_mode = IEEE80211_SMPS_DYNAMIC;
@@ -691,6 +691,15 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
return bt_activity >= BT_LOW_TRAFFIC;
}
+u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants)
+{
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) &&
+ (mvm->cfg->non_shared_ant & enabled_ants))
+ return mvm->cfg->non_shared_ant;
+
+ return first_antenna(enabled_ants);
+}
+
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index d61ff66ce07b..d96ada3c06fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 79bdae994822..843f3b41b72e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -434,23 +434,13 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u8 chains_static, chains_dynamic;
struct cfg80211_chan_def chandef;
int ret, i;
- struct iwl_binding_cmd binding_cmd = {};
+ struct iwl_binding_cmd_v1 binding_cmd = {};
struct iwl_time_quota_cmd quota_cmd = {};
struct iwl_time_quota_data *quota;
u32 status;
- int size;
-
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
- size = sizeof(binding_cmd);
- if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ ||
- !iwl_mvm_is_cdb_supported(mvm))
- binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
- else
- binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
- } else {
- size = IWL_BINDING_CMD_SIZE_V1;
- }
+
+ if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm)))
+ return -EINVAL;
/* add back the PHY */
if (WARN_ON(!mvmvif->phy_ctxt))
@@ -497,7 +487,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
status = 0;
ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
- size, &binding_cmd, &status);
+ IWL_BINDING_CMD_SIZE_V1, &binding_cmd,
+ &status);
if (ret) {
IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
return ret;
@@ -731,8 +722,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
{
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
+ bool unified = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
struct wowlan_key_data key_data = {
- .configure_keys = !d0i3,
+ .configure_keys = !d0i3 && !unified,
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
@@ -1042,7 +1035,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
* the recording automatically before entering D3. This can
* be removed once the FW starts doing that.
*/
- iwl_fw_dbg_stop_recording(&mvm->fwrt);
+ _iwl_fw_dbg_stop_recording(mvm->fwrt.trans, NULL);
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
@@ -1362,7 +1355,7 @@ static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
struct ieee80211_key_conf *key,
struct iwl_wowlan_status *status)
{
- union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
+ union iwl_all_tsc_rsc *rsc = &status->gtk[0].rsc.all_tsc_rsc;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
@@ -1419,7 +1412,8 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
*/
if (sta) {
struct ieee80211_key_seq seq = {};
- union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
+ union iwl_all_tsc_rsc *sc =
+ &data->status->gtk[0].rsc.all_tsc_rsc;
if (data->find_phase)
return;
@@ -1501,22 +1495,24 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
u8 key[32];
} conf = {
.conf.cipher = gtkdata.cipher,
- .conf.keyidx = status->gtk.key_index,
+ .conf.keyidx =
+ iwlmvm_wowlan_gtk_idx(&status->gtk[0]),
};
+ __be64 replay_ctr;
switch (gtkdata.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
conf.conf.keylen = WLAN_KEY_LEN_CCMP;
- memcpy(conf.conf.key, status->gtk.decrypt_key,
+ memcpy(conf.conf.key, status->gtk[0].key,
WLAN_KEY_LEN_CCMP);
break;
case WLAN_CIPHER_SUITE_TKIP:
conf.conf.keylen = WLAN_KEY_LEN_TKIP;
- memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
+ memcpy(conf.conf.key, status->gtk[0].key, 16);
/* leave TX MIC key zeroed, we don't use it anyway */
memcpy(conf.conf.key +
NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
- status->gtk.tkip_mic_key, 8);
+ status->gtk[0].tkip_mic_key, 8);
break;
}
@@ -1524,11 +1520,10 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
if (IS_ERR(key))
return false;
iwl_mvm_set_key_rx_seq(mvm, key, status);
- }
- if (status->num_of_gtk_rekeys) {
- __be64 replay_ctr =
+ replay_ctr =
cpu_to_be64(le64_to_cpu(status->replay_ctr));
+
ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
(void *)&replay_ctr, GFP_KERNEL);
}
@@ -1541,71 +1536,118 @@ out:
return true;
}
-static struct iwl_wowlan_status *
-iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
{
- u32 base = mvm->error_event_table[0];
- struct error_table_start {
- /* cf. struct iwl_error_event_table */
- u32 valid;
- u32 error_id;
- } err_info;
+ struct iwl_wowlan_status *v7, *status;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_WANT_SKB,
};
- struct iwl_wowlan_status *status, *fw_status;
int ret, len, status_size;
- iwl_trans_read_mem_bytes(mvm->trans, base,
- &err_info, sizeof(err_info));
-
- if (err_info.valid) {
- IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
- err_info.valid, err_info.error_id);
- if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
- struct cfg80211_wowlan_wakeup wakeup = {
- .rfkill_release = true,
- };
- ieee80211_report_wowlan_wakeup(vif, &wakeup,
- GFP_KERNEL);
- }
- return ERR_PTR(-EIO);
- }
-
- /* only for tracing for now */
- ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
- if (ret)
- IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+ lockdep_assert_held(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
- IWL_ERR(mvm, "failed to query status (%d)\n", ret);
+ IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret);
return ERR_PTR(ret);
}
- status_size = sizeof(*fw_status);
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
+ struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
+ int data_size;
+
+ status_size = sizeof(*v6);
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+
+ if (len < status_size) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ status = ERR_PTR(-EIO);
+ goto out_free_resp;
+ }
+
+ data_size = ALIGN(le32_to_cpu(v6->wake_packet_bufsize), 4);
+
+ if (len != (status_size + data_size)) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ status = ERR_PTR(-EIO);
+ goto out_free_resp;
+ }
+
+ status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL);
+ if (!status)
+ goto out_free_resp;
+
+ BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) >
+ sizeof(status->gtk[0].key));
+ BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) >
+ sizeof(status->gtk[0].tkip_mic_key));
+
+ /* copy GTK info to the right place */
+ memcpy(status->gtk[0].key, v6->gtk.decrypt_key,
+ sizeof(v6->gtk.decrypt_key));
+ memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key,
+ sizeof(v6->gtk.tkip_mic_key));
+ memcpy(&status->gtk[0].rsc, &v6->gtk.rsc,
+ sizeof(status->gtk[0].rsc));
+
+ /* hardcode the key length to 16 since v6 only supports 16 */
+ status->gtk[0].key_len = 16;
+
+ /*
+ * The key index only uses 2 bits (values 0 to 3) and
+ * we always set bit 7 which means this is the
+ * currently used key.
+ */
+ status->gtk[0].key_flags = v6->gtk.key_index | BIT(7);
+
+ status->replay_ctr = v6->replay_ctr;
+
+ /* everything starting from pattern_number is identical */
+ memcpy(&status->pattern_number, &v6->pattern_number,
+ offsetof(struct iwl_wowlan_status, wake_packet) -
+ offsetof(struct iwl_wowlan_status, pattern_number) +
+ data_size);
+
+ goto out_free_resp;
+ }
+ v7 = (void *)cmd.resp_pkt->data;
+ status_size = sizeof(*v7);
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+
if (len < status_size) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- fw_status = ERR_PTR(-EIO);
+ status = ERR_PTR(-EIO);
goto out_free_resp;
}
- status = (void *)cmd.resp_pkt->data;
if (len != (status_size +
- ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
+ ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4))) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- fw_status = ERR_PTR(-EIO);
+ status = ERR_PTR(-EIO);
goto out_free_resp;
}
- fw_status = kmemdup(status, len, GFP_KERNEL);
+ status = kmemdup(v7, len, GFP_KERNEL);
out_free_resp:
iwl_free_resp(&cmd);
- return fw_status;
+ return status;
+}
+
+static struct iwl_wowlan_status *
+iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ /* only for tracing for now */
+ ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
+ if (ret)
+ IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+
+ return iwl_mvm_send_wowlan_get_status(mvm);
}
/* releases the MVM mutex */
@@ -1618,7 +1660,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
bool keep;
struct iwl_mvm_sta *mvm_ap_sta;
- fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+ fw_status = iwl_mvm_get_wakeup_status(mvm);
if (IS_ERR_OR_NULL(fw_status))
goto out_unlock;
@@ -1743,7 +1785,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
u32 reasons = 0;
int i, j, n_matches, ret;
- fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+ fw_status = iwl_mvm_get_wakeup_status(mvm);
if (!IS_ERR_OR_NULL(fw_status)) {
reasons = le32_to_cpu(fw_status->wakeup_reasons);
kfree(fw_status);
@@ -1856,6 +1898,29 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
ieee80211_resume_disconnect(vif);
}
+static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 base = mvm->error_event_table[0];
+ struct error_table_start {
+ /* cf. struct iwl_error_event_table */
+ u32 valid;
+ u32 error_id;
+ } err_info;
+
+ iwl_trans_read_mem_bytes(mvm->trans, base,
+ &err_info, sizeof(err_info));
+
+ if (err_info.valid &&
+ err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+ ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
+ }
+ return err_info.valid;
+}
+
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct ieee80211_vif *vif = NULL;
@@ -1883,9 +1948,19 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
goto err;
}
+ iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
+ if (iwl_mvm_check_rt_status(mvm, vif)) {
+ set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
+ NULL, 0);
+ ret = 1;
+ goto err;
+ }
+
if (d0i3_first) {
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
if (ret < 0) {
@@ -2117,6 +2192,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
mvm->d3_test_active = false;
+ iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
+
rtnl_lock();
__iwl_mvm_resume(mvm, true);
rtnl_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 798605c4f122..1aa6c7e93088 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 05b77419953c..3b6b3d8fb961 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -671,16 +666,11 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
};
int ret, bt_force_ant_mode;
- for (bt_force_ant_mode = 0;
- bt_force_ant_mode < ARRAY_SIZE(modes_str);
- bt_force_ant_mode++) {
- if (!strcmp(buf, modes_str[bt_force_ant_mode]))
- break;
- }
-
- if (bt_force_ant_mode >= ARRAY_SIZE(modes_str))
- return -EINVAL;
+ ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf);
+ if (ret < 0)
+ return ret;
+ bt_force_ant_mode = ret;
ret = 0;
mutex_lock(&mvm->mutex);
if (mvm->bt_force_ant_mode == bt_force_ant_mode)
@@ -1733,6 +1723,35 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
}
static ssize_t
+iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_he_monitor_cmd he_mon_cmd = {};
+ u32 aid;
+ int ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid,
+ &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1],
+ &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3],
+ &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]);
+ if (ret != 7)
+ return -EINVAL;
+
+ he_mon_cmd.aid = cpu_to_le16(aid);
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD,
+ DATA_PATH_GROUP, 0), 0,
+ sizeof(he_mon_cmd), &he_mon_cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t
iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1801,6 +1820,8 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
#endif
+MVM_DEBUGFS_WRITE_FILE_OPS(he_sniffer_params, 32);
+
static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -1989,6 +2010,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
#ifdef CONFIG_ACPI
MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400);
#endif
+ MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0200);
if (!debugfs_create_bool("enable_scan_iteration_notif",
0600,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h
index ede6ef8d390e..a83d252c0602 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index e8e74dd558f7..143c7fcaea41 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 6bb1a99a197a..dade206d5511 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -304,6 +299,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { MVM_ALIVE };
+ set_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
if (ucode_type == IWL_UCODE_REGULAR &&
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
!(fw_has_capa(&mvm->fw->ucode_capa,
@@ -368,12 +364,20 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
- mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
+ /*
+ * Set a 'fake' TID for the command queue, since we use the
+ * hweight() of the tid_bitmap as a refcount now. Not that
+ * we ever even consider the command queue as one we might
+ * want to reuse, but be safe nevertheless.
+ */
+ mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
+ BIT(IWL_MAX_TID_COUNT + 2);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+ clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
return 0;
}
@@ -704,8 +708,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
enabled = !!(wifi_pkg->package.elements[1].integer.value);
n_profiles = wifi_pkg->package.elements[2].integer.value;
- /* in case of BIOS bug */
- if (n_profiles <= 0) {
+ /*
+ * Check the validity of n_profiles. The EWRD profiles start
+ * from index 1, so the maximum value allowed here is
+ * ACPI_SAR_PROFILES_NUM - 1.
+ */
+ if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
ret = -EINVAL;
goto out_free;
}
@@ -773,19 +781,28 @@ out_free:
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
- struct iwl_dev_tx_power_cmd cmd = {
- .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
- };
+ union {
+ struct iwl_dev_tx_power_cmd v5;
+ struct iwl_dev_tx_power_cmd_v4 v4;
+ } cmd;
int i, j, idx;
int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
- int len = sizeof(cmd);
+ int len;
BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
ACPI_SAR_TABLE_SIZE);
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
- len = sizeof(cmd.v3);
+ cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_REDUCE_TX_POWER))
+ len = sizeof(cmd.v5);
+ else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
+ len = sizeof(cmd.v4);
+ else
+ len = sizeof(cmd.v4.v3);
for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
struct iwl_mvm_sar_profile *prof;
@@ -812,7 +829,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
- cmd.v3.per_chain_restriction[i][j] =
+ cmd.v5.v3.per_chain_restriction[i][j] =
cpu_to_le16(prof->table[idx]);
IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
j, prof->table[idx]);
@@ -1018,7 +1035,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
mvm->fwrt.dump.conf = FW_DBG_INVALID;
/* if we have a destination, assume EARLY START */
- if (mvm->fw->dbg_dest_tlv)
+ if (mvm->fw->dbg.dest_tlv)
mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
index b27269504a62..9bb1de1cad64 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index b3fd20502abb..6486cfb33f40 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -35,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -85,6 +82,10 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
IWL_GEN2_EDCA_TX_FIFO_VI,
IWL_GEN2_EDCA_TX_FIFO_BE,
IWL_GEN2_EDCA_TX_FIFO_BK,
+ IWL_GEN2_TRIG_TX_FIFO_VO,
+ IWL_GEN2_TRIG_TX_FIFO_VI,
+ IWL_GEN2_TRIG_TX_FIFO_BE,
+ IWL_GEN2_TRIG_TX_FIFO_BK,
};
struct iwl_mvm_mac_iface_iterator_data {
@@ -1486,12 +1487,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
IWL_MVM_MISSED_BEACONS_THRESHOLD)
ieee80211_beacon_loss(vif);
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw,
- FW_DBG_TRIGGER_MISSED_BEACONS))
+ trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_MISSED_BEACONS);
+ if (!trigger)
return;
- trigger = iwl_fw_dbg_get_trigger(mvm->fw,
- FW_DBG_TRIGGER_MISSED_BEACONS);
bcon_trig = (void *)trigger->data;
stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
stop_trig_missed_bcon_since_rx =
@@ -1499,11 +1499,6 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
/* TODO: implement start trigger */
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif),
- trigger))
- return;
-
if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
rx_missed_bcon >= stop_trig_missed_bcon)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
@@ -1568,6 +1563,65 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
}
+static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_probe_resp_data_notif *notif = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_probe_resp_data *old_data, *new_data;
+
+ if (mvmvif->id != (u16)le32_to_cpu(notif->mac_id))
+ return;
+
+ new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
+ if (!new_data)
+ return;
+
+ memcpy(&new_data->notif, notif, sizeof(new_data->notif));
+
+ /* noa_attr contains 1 reserved byte, need to substruct it */
+ new_data->noa_len = sizeof(struct ieee80211_vendor_ie) +
+ sizeof(new_data->notif.noa_attr) - 1;
+
+ /*
+ * If it's a one time NoA, only one descriptor is needed,
+ * adjust the length according to len_low.
+ */
+ if (new_data->notif.noa_attr.len_low ==
+ sizeof(struct ieee80211_p2p_noa_desc) + 2)
+ new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc);
+
+ old_data = rcu_dereference_protected(mvmvif->probe_resp_data,
+ lockdep_is_held(&mvmvif->mvm->mutex));
+ rcu_assign_pointer(mvmvif->probe_resp_data, new_data);
+
+ if (old_data)
+ kfree_rcu(old_data, rcu_head);
+
+ if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA &&
+ notif->csa_counter >= 1)
+ ieee80211_csa_set_counter(vif, notif->csa_counter);
+}
+
+void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
+ int len = iwl_rx_packet_payload_len(pkt);
+
+ if (WARN_ON_ONCE(len < sizeof(*notif)))
+ return;
+
+ IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n",
+ notif->noa_active, notif->csa_counter);
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_ACTIVE,
+ iwl_mvm_probe_resp_data_iter,
+ notif);
+}
+
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index b15b0d84bb7e..505b0385d800 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -559,8 +554,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_remain_on_channel_duration = 10000;
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
- /* we can compensate an offset of up to 3 channels = 15 MHz */
- hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
/* Extract MAC address */
memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@@ -864,16 +857,13 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_BA);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- return;
-
switch (action) {
case IEEE80211_AMPDU_TX_OPERATIONAL: {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -1035,6 +1025,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
mvmvif->phy_ctxt = NULL;
memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
+ memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data));
}
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
@@ -1124,7 +1115,9 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
* would do.
*/
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+#ifdef CONFIG_PM
iwl_mvm_d0i3_enable_tx(mvm, NULL);
+#endif
}
return ret;
@@ -1162,7 +1155,9 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
mutex_lock(&mvm->mutex);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+#ifdef CONFIG_PM
iwl_mvm_d0i3_enable_tx(mvm, NULL);
+#endif
ret = iwl_mvm_update_quotas(mvm, true, NULL);
if (ret)
IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
@@ -1233,12 +1228,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
iwl_mvm_del_aux_sta(mvm);
/*
- * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
- * won't be called in this case).
+ * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
+ * hw (as restart_complete() won't be called in this case) and mac80211
+ * won't execute the restart.
* But make sure to cleanup interfaces that have gone down before/during
* HW restart was requested.
*/
- if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
ieee80211_iterate_interfaces(mvm->hw, 0,
iwl_mvm_cleanup_iterator, mvm);
@@ -1308,19 +1306,28 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power)
{
- struct iwl_dev_tx_power_cmd cmd = {
- .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
- .v3.mac_context_id =
+ int len;
+ union {
+ struct iwl_dev_tx_power_cmd v5;
+ struct iwl_dev_tx_power_cmd_v4 v4;
+ } cmd = {
+ .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+ .v5.v3.mac_context_id =
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
- .v3.pwr_restriction = cpu_to_le16(8 * tx_power),
+ .v5.v3.pwr_restriction = cpu_to_le16(8 * tx_power),
};
- int len = sizeof(cmd);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
- cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+ cmd.v5.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
- if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
- len = sizeof(cmd.v3);
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_REDUCE_TX_POWER))
+ len = sizeof(cmd.v5);
+ else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
+ len = sizeof(cmd.v4);
+ else
+ len = sizeof(cmd.v4.v3);
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
@@ -1333,6 +1340,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
int ret;
mvmvif->mvm = mvm;
+ RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL);
/*
* make sure D0i3 exit is completed, otherwise a target access
@@ -1497,6 +1505,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_probe_resp_data *probe_data;
iwl_mvm_prepare_mac_removal(mvm, vif);
@@ -1506,6 +1515,12 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ probe_data = rcu_dereference_protected(mvmvif->probe_resp_data,
+ lockdep_is_held(&mvm->mutex));
+ RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL);
+ if (probe_data)
+ kfree_rcu(probe_data, rcu_head);
+
if (mvm->bf_allowed_vif == mvmvif) {
mvm->bf_allowed_vif = NULL;
vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
@@ -1978,10 +1993,6 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
sta_ctxt_cmd.htc_flags |=
cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
}
- if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
- IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED)
- sta_ctxt_cmd.htc_flags |=
- cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED);
if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
@@ -2459,6 +2470,9 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_mac_ctxt_remove(mvm, vif);
+ kfree(mvmvif->ap_wep_key);
+ mvmvif->ap_wep_key = NULL;
+
mutex_unlock(&mvm->mutex);
}
@@ -2788,14 +2802,12 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_tdls *tdls_trig;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_TDLS);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
tdls_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- return;
if (!(tdls_trig->action_bitmap & BIT(action)))
return;
@@ -2931,7 +2943,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
- iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
+ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+ false);
ret = iwl_mvm_update_sta(mvm, vif, sta);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
@@ -2947,9 +2960,16 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
/* enable beacon filtering */
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
- iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
+ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+ true);
- ret = 0;
+ /* if wep is used, need to set the key for the station now */
+ if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key)
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta,
+ mvmvif->ap_wep_key,
+ STA_KEY_IDX_INVALID);
+ else
+ ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
/* disable beacon filtering */
@@ -3132,8 +3152,15 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ if (!mvm->trans->cfg->gen2) {
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE;
+ } else {
+ IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n");
+ return -EOPNOTSUPP;
+ }
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
@@ -3148,13 +3175,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- /* For non-client mode, only use WEP keys for TX as we probably
- * don't have a station yet anyway and would then have to keep
- * track of the keys, linking them to each of the clients/peers
- * as they appear. For now, don't do that, for performance WEP
- * offload doesn't really matter much, but we need it for some
- * other offload features in client mode.
- */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->ap_wep_key = kmemdup(key,
+ sizeof(*key) + key->keylen,
+ GFP_KERNEL);
+ if (!mvmvif->ap_wep_key)
+ return -ENOMEM;
+ }
+
if (vif->type != NL80211_IFTYPE_STATION)
return 0;
break;
@@ -4458,14 +4489,12 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_MLME);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
trig_mlme = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- return;
if (event->u.mlme.data == ASSOC_EVENT) {
if (event->u.mlme.status == MLME_DENIED)
@@ -4500,14 +4529,12 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_BA);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- return;
if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index b3987a0a7018..7ba5bc2ed1c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -336,6 +331,18 @@ struct iwl_mvm_vif_bf_data {
};
/**
+ * struct iwl_probe_resp_data - data for NoA/CSA updates
+ * @rcu_head: used for freeing the data on update
+ * @notif: notification data
+ * @noa_len: length of NoA attribute, calculated from the notification
+ */
+struct iwl_probe_resp_data {
+ struct rcu_head rcu_head;
+ struct iwl_probe_resp_data_notif notif;
+ int noa_len;
+};
+
+/**
* struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
* @id: between 0 and 3
* @color: to solve races upon MAC addition and removal
@@ -365,6 +372,8 @@ struct iwl_mvm_vif_bf_data {
* average signal of beacons retrieved from the firmware
* @csa_failed: CSA failed to schedule time event, report an error later
* @features: hw features active for this vif
+ * @probe_resp_data: data from FW notification to store NOA and CSA related
+ * data to be inserted into probe response.
*/
struct iwl_mvm_vif {
struct iwl_mvm *mvm;
@@ -460,6 +469,9 @@ struct iwl_mvm_vif {
/* TCP Checksum Offload */
netdev_features_t features;
+
+ struct iwl_probe_resp_data __rcu *probe_resp_data;
+ struct ieee80211_key_conf *ap_wep_key;
};
static inline struct iwl_mvm_vif *
@@ -500,6 +512,7 @@ enum iwl_mvm_scan_type {
IWL_SCAN_TYPE_WILD,
IWL_SCAN_TYPE_MILD,
IWL_SCAN_TYPE_FRAGMENTED,
+ IWL_SCAN_TYPE_FAST_BALANCE,
};
enum iwl_mvm_sched_scan_pass_all_states {
@@ -741,24 +754,12 @@ iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
* This is a state in which a single queue serves more than one TID, all of
* which are not aggregated. Note that the queue is only associated to one
* RA.
- * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it
- * This is a state of a queue that has had traffic on it, but during the
- * last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on
- * it. In this state, when a new queue is needed to be allocated but no
- * such free queue exists, an inactive queue might be freed and given to
- * the new RA/TID.
- * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
- * This is the state of a queue that has had traffic pass through it, but
- * needs to be reconfigured for some reason, e.g. the queue needs to
- * become unshared and aggregations re-enabled on.
*/
enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE,
IWL_MVM_QUEUE_RESERVED,
IWL_MVM_QUEUE_READY,
IWL_MVM_QUEUE_SHARED,
- IWL_MVM_QUEUE_INACTIVE,
- IWL_MVM_QUEUE_RECONFIGURING,
};
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
@@ -775,6 +776,17 @@ struct iwl_mvm_geo_profile {
u8 values[ACPI_GEO_TABLE_SIZE];
};
+struct iwl_mvm_dqa_txq_info {
+ u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
+ bool reserved; /* Is this the TXQ reserved for a STA */
+ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
+ u8 txq_tid; /* The TID "owner" of this queue*/
+ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
+ /* Timestamp for inactivation per TID of this queue */
+ unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
+ enum iwl_mvm_queue_status status;
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -831,17 +843,7 @@ struct iwl_mvm {
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
- struct {
- u8 hw_queue_refcount;
- u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
- bool reserved; /* Is this the TXQ reserved for a STA */
- u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
- u8 txq_tid; /* The TID "owner" of this queue*/
- u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
- /* Timestamp for inactivation per TID of this queue */
- unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
- enum iwl_mvm_queue_status status;
- } queue_info[IWL_MAX_HW_QUEUES];
+ struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */
@@ -1229,6 +1231,11 @@ static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm)
return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_OCE);
}
+static inline bool iwl_mvm_is_frag_ebs_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAG_EBS);
+}
+
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
{
/* For now we only use this mode to differentiate between
@@ -1602,6 +1609,8 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif);
+void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/* Bindings */
@@ -1685,7 +1694,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#endif /* CONFIG_IWLWIFI_DEBUGFS */
/* rate scaling */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync);
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
void rs_update_last_rssi(struct iwl_mvm *mvm,
@@ -1733,6 +1742,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
extern const struct file_operations iwl_dbgfs_d3_test_ops;
+struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm);
#ifdef CONFIG_PM
int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -1776,10 +1786,13 @@ void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
bool iwl_mvm_ref_taken(struct iwl_mvm *mvm);
+
+#ifdef CONFIG_PM
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode);
int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode);
int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
+#endif
/* BT Coex */
int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
@@ -1796,6 +1809,7 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
enum nl80211_band band);
+u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
@@ -1859,17 +1873,6 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
mvmvif->low_latency &= ~cause;
}
-/* hw scheduler queue config */
-bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int wdg_timeout);
-int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
- u8 sta_id, u8 tid, unsigned int timeout);
-
-int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u8 tid, u8 flags);
-int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
-
/* Return a bitmask with all the hw supported queues, except for the
* command queue, which can't be flushed.
*/
@@ -1881,6 +1884,11 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
+ lockdep_assert_held(&mvm->mutex);
+ /* calling this function without using dump_start/end since at this
+ * point we already hold the op mode mutex
+ */
+ iwl_fw_dbg_collect_sync(&mvm->fwrt);
iwl_fw_cancel_timestamp(&mvm->fwrt);
iwl_free_fw_paging(&mvm->fwrt);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
@@ -1966,8 +1974,6 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
-void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
-
#define MVM_TCM_PERIOD_MSEC 500
#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
#define MVM_LL_PERIOD (10 * HZ)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index cf48517944ec..3633f27d048a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -482,15 +477,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
u32 status;
int resp_len, n_channels;
u16 mcc;
- bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
return ERR_PTR(-EOPNOTSUPP);
cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
- if (!resp_v2)
- cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1);
IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
alpha2[0], alpha2[1], src_id);
@@ -502,7 +493,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
pkt = cmd.resp_pkt;
/* Extract MCC response */
- if (resp_v2) {
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) {
struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp->n_channels);
@@ -514,9 +506,9 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
goto exit;
}
} else {
- struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data;
+ struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
- n_channels = __le32_to_cpu(mcc_resp_v1->n_channels);
+ n_channels = __le32_to_cpu(mcc_resp_v3->n_channels);
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kzalloc(resp_len, GFP_KERNEL);
@@ -525,12 +517,14 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
goto exit;
}
- resp_cp->status = mcc_resp_v1->status;
- resp_cp->mcc = mcc_resp_v1->mcc;
- resp_cp->cap = mcc_resp_v1->cap;
- resp_cp->source_id = mcc_resp_v1->source_id;
- resp_cp->n_channels = mcc_resp_v1->n_channels;
- memcpy(resp_cp->channels, mcc_resp_v1->channels,
+ resp_cp->status = mcc_resp_v3->status;
+ resp_cp->mcc = mcc_resp_v3->mcc;
+ resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap);
+ resp_cp->source_id = mcc_resp_v3->source_id;
+ resp_cp->time = mcc_resp_v3->time;
+ resp_cp->geo_info = mcc_resp_v3->geo_info;
+ resp_cp->n_channels = mcc_resp_v3->n_channels;
+ memcpy(resp_cp->channels, mcc_resp_v3->channels,
n_channels * sizeof(__le32));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
index 6338d9cf7070..6d71e05626ad 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 0e26619fb330..0e2092526fae 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -182,6 +177,9 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
+ if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
+ reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
+
iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
@@ -189,7 +187,8 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
- CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
+ CSR_HW_IF_CONFIG_REG_D3_DEBUG,
reg_val);
IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
@@ -491,7 +490,9 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+#ifdef CONFIG_PM
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
+#endif
static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
{
@@ -564,10 +565,23 @@ static bool iwl_mvm_fwrt_fw_running(void *ctx)
return iwl_mvm_firmware_running(ctx);
}
+static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, host_cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.dump_start = iwl_mvm_fwrt_dump_start,
.dump_end = iwl_mvm_fwrt_dump_end,
.fw_running = iwl_mvm_fwrt_fw_running,
+ .send_hcmd = iwl_mvm_fwrt_send_hcmd,
};
static struct iwl_op_mode *
@@ -583,6 +597,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
};
int err, scan_size;
u32 min_backoff;
+ enum iwl_amsdu_size rb_size_default;
/*
* We use IWL_MVM_STATION_COUNT to check the validity of the station
@@ -602,9 +617,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (cfg->max_rx_agg_size)
hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
+ else
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
+ else
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
op_mode = hw->priv;
@@ -661,7 +680,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
+#ifdef CONFIG_PM
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
+#endif
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
@@ -691,8 +712,16 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.op_mode = op_mode;
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ rb_size_default = IWL_AMSDU_2K;
+ else
+ rb_size_default = IWL_AMSDU_4K;
+
switch (iwlwifi_mod_params.amsdu_size) {
case IWL_AMSDU_DEF:
+ trans_cfg.rx_buf_size = rb_size_default;
+ break;
case IWL_AMSDU_4K:
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
break;
@@ -705,16 +734,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
default:
pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
iwlwifi_mod_params.amsdu_size);
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- }
-
- /* the hardware splits the A-MSDU */
- if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
- trans_cfg.rx_buf_size = IWL_AMSDU_2K;
- /* TODO: remove when balanced power mode is fw supported */
- iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM;
- } else if (mvm->cfg->mq_rx_supported) {
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+ trans_cfg.rx_buf_size = rb_size_default;
}
trans->wide_cmd_header = true;
@@ -745,12 +765,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_trans_configure(mvm->trans, &trans_cfg);
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
- trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
- trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
- memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
+ trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv;
+ trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg;
+ memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv,
sizeof(trans->dbg_conf_tlv));
- trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
- trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
+ trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
+ trans->dbg_dump_mask = mvm->fw->dbg.dump_mask;
trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len;
@@ -781,6 +801,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true);
+ if (test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status))
+ iwl_fw_alive_error_dump(&mvm->fwrt);
if (!iwlmvm_mod_params.init_dbg || !err)
iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
@@ -950,15 +972,13 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_cmd *cmds_trig;
int i;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
+ FW_DBG_TRIGGER_FW_NOTIF);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
cmds_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
- return;
-
for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
/* don't collect on CMD 0 */
if (!cmds_trig->cmds[i].cmd_id)
@@ -1220,7 +1240,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
*/
if (!mvm->fw_restart && fw_error) {
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- NULL);
+ NULL, 0);
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
@@ -1246,7 +1266,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
- mvm->hw_registered) {
+ mvm->hw_registered &&
+ !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
/* don't let the transport/FW power down */
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -1261,7 +1282,8 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- iwl_mvm_dump_nic_error_log(mvm);
+ if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status))
+ iwl_mvm_dump_nic_error_log(mvm);
iwl_mvm_nic_restart(mvm, true);
}
@@ -1274,6 +1296,7 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
iwl_mvm_nic_restart(mvm, true);
}
+#ifdef CONFIG_PM
struct iwl_d0i3_iter_data {
struct iwl_mvm *mvm;
struct ieee80211_vif *connected_vif;
@@ -1596,25 +1619,23 @@ out:
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
- struct iwl_host_cmd get_status_cmd = {
- .id = WOWLAN_GET_STATUSES,
- .flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
- };
struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
.mvm = mvm,
};
struct iwl_wowlan_status *status;
- int ret;
u32 wakeup_reasons = 0;
__le16 *qos_seq = NULL;
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
- if (ret)
+
+ status = iwl_mvm_send_wowlan_get_status(mvm);
+ if (IS_ERR_OR_NULL(status)) {
+ /* set to NULL so we don't need to check before kfree'ing */
+ status = NULL;
goto out;
+ }
- status = (void *)get_status_cmd.resp_pkt->data;
wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
qos_seq = status->qos_seq_ctr;
@@ -1633,8 +1654,7 @@ out:
wakeup_reasons);
/* qos_seq might point inside resp_pkt, so free it only now */
- if (get_status_cmd.resp_pkt)
- iwl_free_resp(&get_status_cmd);
+ kfree(status);
/* the FW might have updated the regdomain */
iwl_mvm_update_changed_regdom(mvm);
@@ -1685,6 +1705,13 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
return _iwl_mvm_exit_d0i3(mvm);
}
+#define IWL_MVM_D0I3_OPS \
+ .enter_d0i3 = iwl_mvm_enter_d0i3, \
+ .exit_d0i3 = iwl_mvm_exit_d0i3,
+#else /* CONFIG_PM */
+#define IWL_MVM_D0I3_OPS
+#endif /* CONFIG_PM */
+
#define IWL_MVM_COMMON_OPS \
/* these could be differentiated */ \
.async_cb = iwl_mvm_async_cb, \
@@ -1695,8 +1722,7 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
.nic_error = iwl_mvm_nic_error, \
.cmd_queue_full = iwl_mvm_cmd_queue_full, \
.nic_config = iwl_mvm_nic_config, \
- .enter_d0i3 = iwl_mvm_enter_d0i3, \
- .exit_d0i3 = iwl_mvm_exit_d0i3, \
+ IWL_MVM_D0I3_OPS \
/* as we only register one, these MUST be common! */ \
.start = iwl_op_mode_mvm_start, \
.stop = iwl_op_mode_mvm_stop
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index c11fe2621d51..5a0a28fd762d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
index 690559bdf421..5e62b97af48b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 8169d1450b3b..7a98e1a1dc40 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -117,20 +117,42 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
{
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
bool vht_ena = vht_cap && vht_cap->vht_supported;
u16 flags = 0;
if (mvm->cfg->ht_params->stbc &&
- (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
- ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
- (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))))
- flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
+ if (he_cap && he_cap->has_he) {
+ if (he_cap->he_cap_elem.phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+
+ if (he_cap->he_cap_elem.phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
+ } else if ((ht_cap &&
+ (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
+ (vht_ena &&
+ (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ }
if (mvm->cfg->ht_params->ldpc &&
((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
(vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+ if (he_cap && he_cap->has_he &&
+ (he_cap->he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) {
+ flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
+
+ if (he_cap->he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK;
+ }
+
return flags;
}
@@ -311,7 +333,7 @@ out:
}
void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum nl80211_band band)
+ enum nl80211_band band, bool update)
{
struct ieee80211_hw *hw = mvm->hw;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -320,7 +342,8 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband;
struct iwl_tlc_config_cmd cfg_cmd = {
.sta_id = mvmsta->sta_id,
- .max_ch_width = rs_fw_bw_from_sta_bw(sta),
+ .max_ch_width = update ?
+ rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
.flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
.max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 30cfd7d50bc9..089972280daa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1239,7 +1239,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
- rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
+ if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
+ &tx_resp_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
#ifdef CONFIG_MAC80211_DEBUGFS
/* Disable last tx check if we are debugging with fixed rate but
@@ -1276,7 +1280,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
(unsigned long)(lq_sta->last_tx +
(IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
- iwl_mvm_rs_rate_init(mvm, sta, info->band);
+ iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
return;
}
lq_sta->last_tx = jiffies;
@@ -1290,7 +1294,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
table = &lq_sta->lq;
lq_hwrate = le32_to_cpu(table->rs_table[0]);
- rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
+ if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
/* Here we actually compare this rate to the latest LQ command */
if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
@@ -1392,8 +1399,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* Collect data for each rate used during failed TX attempts */
for (i = 0; i <= retries; ++i) {
lq_hwrate = le32_to_cpu(table->rs_table[i]);
- rs_rate_from_ucode_rate(lq_hwrate, info->band,
- &lq_rate);
+ if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
+ &lq_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
/*
* Only collect stats if retried rate is in the same RS
* table as active/search.
@@ -2859,9 +2870,8 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
static void rs_initialize_lq(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- enum nl80211_band band)
+ enum nl80211_band band, bool update)
{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_scale_tbl_info *tbl;
struct rs_rate *rate;
u8 active_tbl = 0;
@@ -2890,8 +2900,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
rs_set_expected_tpt_table(lq_sta, tbl);
rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
/* TODO restore station should remember the lq cmd */
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq,
- mvmsta->sta_state < IEEE80211_STA_AUTHORIZED);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update);
}
static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
@@ -3144,7 +3153,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
* Called after adding a new station to initialize rate scaling
*/
static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum nl80211_band band)
+ enum nl80211_band band, bool update)
{
int i, j;
struct ieee80211_hw *hw = mvm->hw;
@@ -3215,7 +3224,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
- first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
+ iwl_mvm_bt_coex_get_single_ant_msk(mvm, iwl_mvm_get_valid_tx_ant(mvm));
lq_sta->lq.dual_stream_ant_msk = ANT_AB;
/* as default allow aggregation for all tids */
@@ -3224,7 +3233,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_reset_frame_stats(mvm);
#endif
- rs_initialize_lq(mvm, sta, lq_sta, band);
+ rs_initialize_lq(mvm, sta, lq_sta, band, update);
}
static void rs_drv_rate_update(void *mvm_r,
@@ -3244,7 +3253,7 @@ static void rs_drv_rate_update(void *mvm_r,
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
ieee80211_stop_tx_ba_session(sta, tid);
- iwl_mvm_rs_rate_init(mvm, sta, sband->band);
+ iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
}
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -3262,7 +3271,10 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
for (i = 0; i < num_rates; i++)
lq_cmd->rs_table[i] = ucode_rate_le32;
- rs_rate_from_ucode_rate(ucode_rate, band, &rate);
+ if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
if (is_mimo(&rate))
lq_cmd->mimo_delim = num_rates - 1;
@@ -3578,7 +3590,8 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
- if (num_of_ant(initial_rate->ant) == 1)
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) &&
+ num_of_ant(initial_rate->ant) == 1)
lq_cmd->single_stream_ant_msk = initial_rate->ant;
lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
@@ -4098,12 +4111,12 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
};
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum nl80211_band band)
+ enum nl80211_band band, bool update)
{
if (iwl_mvm_has_tlc_offload(mvm))
- rs_fw_rate_init(mvm, sta, band);
+ rs_fw_rate_init(mvm, sta, band, update);
else
- rs_drv_rate_init(mvm, sta, band);
+ rs_drv_rate_init(mvm, sta, band, update);
}
int iwl_mvm_rate_control_register(void)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index d2cf484e2b73..d0f47899f284 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -420,7 +420,7 @@ struct iwl_lq_sta {
/* Initialize station's rate scaling information after adding station */
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum nl80211_band band);
+ enum nl80211_band band, bool init);
/* Notify RS about Tx status */
void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
@@ -461,7 +461,7 @@ void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum nl80211_band band);
+ enum nl80211_band band, bool update);
int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable);
void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index bfb163419c67..ef624833cf1b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -438,13 +433,14 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *tx_blocked_vif =
rcu_dereference(mvm->csa_tx_blocked_vif);
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct ieee80211_vif *vif = mvmsta->vif;
/* We have tx blocked stations (with CS bit). If we heard
* frames from a blocked station on a new channel we can
* TX to it again.
*/
- if (unlikely(tx_blocked_vif) &&
- mvmsta->vif == tx_blocked_vif) {
+ if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(tx_blocked_vif);
@@ -455,23 +451,18 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rs_update_last_rssi(mvm, mvmsta, rx_status);
- if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
- ieee80211_is_beacon(hdr->frame_control)) {
- struct iwl_fw_dbg_trigger_tlv *trig;
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_RSSI);
+
+ if (trig && ieee80211_is_beacon(hdr->frame_control)) {
struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
- bool trig_check;
s32 rssi;
- trig = iwl_fw_dbg_get_trigger(mvm->fw,
- FW_DBG_TRIGGER_RSSI);
rssi_trig = (void *)trig->data;
rssi = le32_to_cpu(rssi_trig->rssi);
- trig_check =
- iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(mvmsta->vif),
- trig);
- if (trig_check && rx_status->signal < rssi)
+ if (rx_status->signal < rssi)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
NULL);
}
@@ -698,15 +689,12 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
struct iwl_fw_dbg_trigger_stats *trig_stats;
u32 trig_offset, trig_thold;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
trig_stats = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
- return;
-
trig_offset = le32_to_cpu(trig_stats->stop_offset);
trig_thold = le32_to_cpu(trig_stats->stop_threshold);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index b53148f972a4..26ac9402568d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -283,6 +283,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
return 0;
+ if (mvm->trans->cfg->gen2 &&
+ !(status & RX_MPDU_RES_STATUS_MIC_OK))
+ stats->flag |= RX_FLAG_MMIC_ERROR;
+
*crypt_len = IEEE80211_TKIP_IV_LEN;
/* fall through if TTAK OK */
case IWL_RX_MPDU_STATUS_SEC_WEP:
@@ -294,8 +298,11 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
IWL_RX_MPDU_STATUS_SEC_WEP)
*crypt_len = IEEE80211_WEP_IV_LEN;
- if (pkt_flags & FH_RSCSR_RADA_EN)
+ if (pkt_flags & FH_RSCSR_RADA_EN) {
stats->flag |= RX_FLAG_ICV_STRIPPED;
+ if (mvm->trans->cfg->gen2)
+ stats->flag |= RX_FLAG_MMIC_STRIPPED;
+ }
return 0;
case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
@@ -856,6 +863,444 @@ static void iwl_mvm_flip_address(u8 *addr)
ether_addr_copy(addr, mac_addr);
}
+static void iwl_mvm_decode_he_sigb(struct iwl_mvm *mvm,
+ struct iwl_rx_mpdu_desc *desc,
+ u32 rate_n_flags,
+ struct ieee80211_radiotap_he_mu *he_mu)
+{
+ u32 sigb0, sigb1;
+ u16 sigb2;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ sigb0 = le32_to_cpu(desc->v3.sigb_common0);
+ sigb1 = le32_to_cpu(desc->v3.sigb_common1);
+ } else {
+ sigb0 = le32_to_cpu(desc->v1.sigb_common0);
+ sigb1 = le32_to_cpu(desc->v1.sigb_common1);
+ }
+
+ sigb2 = le16_to_cpu(desc->sigb_common2);
+
+ if (FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH1_CRC_OK, sigb2)) {
+ he_mu->flags1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN);
+
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH1_CTR_RU,
+ sigb2),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU);
+
+ he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH1_RU0,
+ sigb0);
+ he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH1_RU1,
+ sigb1);
+ he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH1_RU2,
+ sigb0);
+ he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH1_RU3,
+ sigb1);
+ }
+
+ if (FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH2_CRC_OK, sigb2) &&
+ (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) {
+ he_mu->flags1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN);
+
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH2_CTR_RU,
+ sigb2),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU);
+
+ he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH2_RU0,
+ sigb0);
+ he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH2_RU1,
+ sigb1);
+ he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH2_RU2,
+ sigb0);
+ he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH2_RU3,
+ sigb1);
+ }
+}
+
+static void
+iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags,
+ struct ieee80211_radiotap_he *he,
+ struct ieee80211_radiotap_he_mu *he_mu,
+ struct ieee80211_rx_status *rx_status)
+{
+ /*
+ * Unfortunately, we have to leave the mac80211 data
+ * incorrect for the case that we receive an HE-MU
+ * transmission and *don't* have the HE phy data (due
+ * to the bits being used for TSF). This shouldn't
+ * happen though as management frames where we need
+ * the TSF/timers are not be transmitted in HE-MU.
+ */
+ u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
+ u8 offs = 0;
+
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+ switch (ru) {
+ case 0 ... 36:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+ he->data2 |= le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+ he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
+ if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+
+ if (he_mu) {
+#define CHECK_BW(bw) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
+ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
+ CHECK_BW(20);
+ CHECK_BW(40);
+ CHECK_BW(80);
+ CHECK_BW(160);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
+ rate_n_flags),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
+ }
+}
+
+static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
+ struct iwl_rx_mpdu_desc *desc,
+ struct ieee80211_radiotap_he *he,
+ struct ieee80211_radiotap_he_mu *he_mu,
+ struct ieee80211_rx_status *rx_status,
+ u64 he_phy_data, u32 rate_n_flags,
+ int queue)
+{
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ bool sigb_data;
+ u16 d1known = IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN;
+ u16 d2known = IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN;
+
+ he->data1 |= cpu_to_le16(d1known);
+ he->data2 |= cpu_to_le16(d2known);
+ he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
+ he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_UPLINK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+ he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_LDPC_EXT_SYM,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
+ he->data4 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SPATIAL_REUSE_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
+ he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PRE_FEC_PAD_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
+ he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PE_DISAMBIG,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
+ he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_TXOP_DUR_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA6_TXOP);
+ he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_DOPPLER,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
+
+ switch (he_type) {
+ case RATE_MCS_HE_TYPE_MU:
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
+
+ sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK,
+ he_phy_data) ==
+ IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO;
+ if (sigb_data)
+ iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu);
+ /* fall through */
+ case RATE_MCS_HE_TYPE_TRIG:
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ he->data5 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+ break;
+ case RATE_MCS_HE_TYPE_SU:
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
+ he->data3 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BEAM_CHNG,
+ he_phy_data),
+ IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
+ break;
+ }
+
+ switch (FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data)) {
+ case IWL_RX_HE_PHY_INFO_TYPE_MU:
+ case IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO:
+ case IWL_RX_HE_PHY_INFO_TYPE_TB:
+ iwl_mvm_decode_he_phy_ru_alloc(he_phy_data, rate_n_flags,
+ he, he_mu, rx_status);
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+}
+
+static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_rx_mpdu_desc *desc,
+ u32 rate_n_flags, u16 phy_info, int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ /* this is invalid e.g. because puncture type doesn't allow 0b11 */
+#define HE_PHY_DATA_INVAL ((u64)-1)
+ u64 he_phy_data = HE_PHY_DATA_INVAL;
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ u8 stbc, ltf;
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+ };
+ static const struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+ .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
+ };
+ unsigned int radiotap_len = 0;
+
+ he = skb_put_data(skb, &known, sizeof(known));
+ radiotap_len += sizeof(known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+ else
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ if (he_type == RATE_MCS_HE_TYPE_MU) {
+ he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
+ radiotap_len += sizeof(mu_known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ }
+ }
+
+ /* temporarily hide the radiotap data */
+ __skb_pull(skb, radiotap_len);
+
+ if (he_phy_data != HE_PHY_DATA_INVAL &&
+ he_type == RATE_MCS_HE_TYPE_SU) {
+ /* report the AMPDU-EOF bit on single frames */
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, he_phy_data))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+ }
+
+ if (he_phy_data != HE_PHY_DATA_INVAL)
+ iwl_mvm_decode_he_phy_data(mvm, desc, he, he_mu, rx_status,
+ he_phy_data, rate_n_flags, queue);
+
+ /* update aggregation data for monitor sake on default queue */
+ if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+
+ /* toggle is switched whenever new aggregation starts */
+ if (toggle_bit != mvm->ampdu_toggle &&
+ he_phy_data != HE_PHY_DATA_INVAL &&
+ (he_type == RATE_MCS_HE_TYPE_MU ||
+ he_type == RATE_MCS_HE_TYPE_SU)) {
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+ he_phy_data))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+ }
+
+ if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+ rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ /* actually data is filled in mac80211 */
+ if (he_type == RATE_MCS_HE_TYPE_SU ||
+ he_type == RATE_MCS_HE_TYPE_EXT_SU)
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+ stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
+ rx_status->nss =
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
+
+ rx_status->he_dcm =
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+
+#define CHECK_TYPE(F) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+ CHECK_TYPE(SU);
+ CHECK_TYPE(EXT_SU);
+ CHECK_TYPE(MU);
+ CHECK_TYPE(TRIG);
+
+ he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+ switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+ RATE_MCS_HE_GI_LTF_POS) {
+ case 0:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ if (he_type == RATE_MCS_HE_TYPE_MU)
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ else
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
+ break;
+ case 1:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ break;
+ case 2:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG) {
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ } else {
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ }
+ break;
+ case 3:
+ if ((he_type == RATE_MCS_HE_TYPE_SU ||
+ he_type == RATE_MCS_HE_TYPE_EXT_SU) &&
+ rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ break;
+ }
+
+ he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+
+ if (he_type == RATE_MCS_HE_TYPE_SU ||
+ he_type == RATE_MCS_HE_TYPE_EXT_SU) {
+ u16 val;
+
+ /* LTF syms correspond to streams */
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ switch (rx_status->nss) {
+ case 1:
+ val = 0;
+ break;
+ case 2:
+ val = 1;
+ break;
+ case 3:
+ case 4:
+ val = 2;
+ break;
+ case 5:
+ case 6:
+ val = 3;
+ break;
+ case 7:
+ case 8:
+ val = 4;
+ break;
+ default:
+ WARN_ONCE(1, "invalid nss: %d\n",
+ rx_status->nss);
+ val = 0;
+ }
+
+ he->data5 |=
+ le16_encode_bits(val,
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+ }
+}
+
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -869,12 +1314,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
u8 crypt_len = 0, channel, energy_a, energy_b;
- struct ieee80211_radiotap_he *he = NULL;
- struct ieee80211_radiotap_he_mu *he_mu = NULL;
- u32 he_type = 0xffffffff;
- /* this is invalid e.g. because puncture type doesn't allow 0b11 */
-#define HE_PHY_DATA_INVAL ((u64)-1)
- u64 he_phy_data = HE_PHY_DATA_INVAL;
size_t desc_size;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
@@ -918,49 +1357,24 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
- if (rate_n_flags & RATE_MCS_HE_MSK) {
- static const struct ieee80211_radiotap_he known = {
- .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
- .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
- };
- static const struct ieee80211_radiotap_he_mu mu_known = {
- .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
- IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
- IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
- IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
- .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
- };
- unsigned int radiotap_len = 0;
-
- he = skb_put_data(skb, &known, sizeof(known));
- radiotap_len += sizeof(known);
- rx_status->flag |= RX_FLAG_RADIOTAP_HE;
-
- he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
-
- if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
- if (mvm->trans->cfg->device_family >=
- IWL_DEVICE_FAMILY_22560)
- he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
- else
- he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
-
- if (he_type == RATE_MCS_HE_TYPE_MU) {
- he_mu = skb_put_data(skb, &mu_known,
- sizeof(mu_known));
- radiotap_len += sizeof(mu_known);
- rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
- }
- }
-
- /* temporarily hide the radiotap data */
- __skb_pull(skb, radiotap_len);
+ /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status->bw = RATE_INFO_BW_160;
+ break;
}
+ if (rate_n_flags & RATE_MCS_HE_MSK)
+ iwl_mvm_rx_he(mvm, skb, desc, rate_n_flags, phy_info, queue);
+
rx_status = IEEE80211_SKB_RXCB(skb);
if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
@@ -995,53 +1409,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->mactime = tsf_on_air_rise;
/* TSF as indicated by the firmware is at INA time */
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
- } else if (he_type == RATE_MCS_HE_TYPE_SU) {
- u64 he_phy_data;
-
- if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
- he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
- else
- he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
-
- he->data1 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
- if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
- he_phy_data))
- he->data3 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
-
- if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
- rx_status->ampdu_reference = mvm->ampdu_ref;
- mvm->ampdu_ref++;
-
- rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
- rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
- if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
- he_phy_data))
- rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
- }
- } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
- he_mu->flags1 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
- he_mu->flags1 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
- he_mu->flags1 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
- he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
- he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
}
+
rx_status->device_timestamp = gp2_on_air_rise;
rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
@@ -1066,15 +1435,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (toggle_bit != mvm->ampdu_toggle) {
mvm->ampdu_ref++;
mvm->ampdu_toggle = toggle_bit;
-
- if (he_phy_data != HE_PHY_DATA_INVAL &&
- he_type == RATE_MCS_HE_TYPE_MU) {
- rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
- if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
- he_phy_data))
- rx_status->flag |=
- RX_FLAG_AMPDU_EOF_BIT;
- }
}
}
@@ -1103,6 +1463,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT);
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct ieee80211_vif *vif = mvmsta->vif;
if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
!is_multicast_ether_addr(hdr->addr1) &&
@@ -1115,8 +1477,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
* frames from a blocked station on a new channel we can
* TX to it again.
*/
- if (unlikely(tx_blocked_vif) &&
- tx_blocked_vif == mvmsta->vif) {
+ if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(tx_blocked_vif);
@@ -1127,23 +1488,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rs_update_last_rssi(mvm, mvmsta, rx_status);
- if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
- ieee80211_is_beacon(hdr->frame_control)) {
- struct iwl_fw_dbg_trigger_tlv *trig;
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_RSSI);
+
+ if (trig && ieee80211_is_beacon(hdr->frame_control)) {
struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
- bool trig_check;
s32 rssi;
- trig = iwl_fw_dbg_get_trigger(mvm->fw,
- FW_DBG_TRIGGER_RSSI);
rssi_trig = (void *)trig->data;
rssi = le32_to_cpu(rssi_trig->rssi);
- trig_check =
- iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(mvmsta->vif),
- trig);
- if (trig_check && rx_status->signal < rssi)
+ if (rx_status->signal < rssi)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
NULL);
}
@@ -1183,84 +1539,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
}
- switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
- case RATE_MCS_CHAN_WIDTH_20:
- break;
- case RATE_MCS_CHAN_WIDTH_40:
- rx_status->bw = RATE_INFO_BW_40;
- break;
- case RATE_MCS_CHAN_WIDTH_80:
- rx_status->bw = RATE_INFO_BW_80;
- break;
- case RATE_MCS_CHAN_WIDTH_160:
- rx_status->bw = RATE_INFO_BW_160;
- break;
- }
-
- if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
- rate_n_flags & RATE_MCS_HE_106T_MSK) {
- rx_status->bw = RATE_INFO_BW_HE_RU;
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
- }
-
- if (rate_n_flags & RATE_MCS_HE_MSK &&
- phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
- he_type == RATE_MCS_HE_TYPE_MU) {
- /*
- * Unfortunately, we have to leave the mac80211 data
- * incorrect for the case that we receive an HE-MU
- * transmission and *don't* have the he_mu pointer,
- * i.e. we don't have the phy data (due to the bits
- * being used for TSF). This shouldn't happen though
- * as management frames where we need the TSF/timers
- * are not be transmitted in HE-MU, I think.
- */
- u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
- u8 offs = 0;
-
- rx_status->bw = RATE_INFO_BW_HE_RU;
-
- switch (ru) {
- case 0 ... 36:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
- offs = ru;
- break;
- case 37 ... 52:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
- offs = ru - 37;
- break;
- case 53 ... 60:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
- offs = ru - 53;
- break;
- case 61 ... 64:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
- offs = ru - 61;
- break;
- case 65 ... 66:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
- offs = ru - 65;
- break;
- case 67:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
- break;
- case 68:
- rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
- break;
- }
- he->data2 |=
- le16_encode_bits(offs,
- IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
- he->data2 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
- if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
- he->data2 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
- } else if (he) {
- he->data1 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
- }
-
if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
rate_n_flags & RATE_MCS_SGI_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
@@ -1285,120 +1563,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_BF;
- } else if (he) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->nss =
- ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
- RATE_VHT_MCS_NSS_POS) + 1;
- rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
- rx_status->encoding = RX_ENC_HE;
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_BF;
-
- rx_status->he_dcm =
- !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
-
-#define CHECK_TYPE(F) \
- BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
- (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
-
- CHECK_TYPE(SU);
- CHECK_TYPE(EXT_SU);
- CHECK_TYPE(MU);
- CHECK_TYPE(TRIG);
-
- he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
-
- if (rate_n_flags & RATE_MCS_BF_POS)
- he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
-
- switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
- RATE_MCS_HE_GI_LTF_POS) {
- case 0:
- rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
- break;
- case 1:
- rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
- break;
- case 2:
- rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
- break;
- case 3:
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
- else
- rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
- break;
- }
-
- switch (he_type) {
- case RATE_MCS_HE_TYPE_SU: {
- u16 val;
-
- /* LTF syms correspond to streams */
- he->data2 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
- switch (rx_status->nss) {
- case 1:
- val = 0;
- break;
- case 2:
- val = 1;
- break;
- case 3:
- case 4:
- val = 2;
- break;
- case 5:
- case 6:
- val = 3;
- break;
- case 7:
- case 8:
- val = 4;
- break;
- default:
- WARN_ONCE(1, "invalid nss: %d\n",
- rx_status->nss);
- val = 0;
- }
- he->data5 |=
- le16_encode_bits(val,
- IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
- }
- break;
- case RATE_MCS_HE_TYPE_MU: {
- u16 val;
- u64 he_phy_data;
-
- if (mvm->trans->cfg->device_family >=
- IWL_DEVICE_FAMILY_22560)
- he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
- else
- he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
-
- if (he_phy_data == HE_PHY_DATA_INVAL)
- break;
-
- val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
- he_phy_data);
-
- he->data2 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
- he->data5 |=
- cpu_to_le16(FIELD_PREP(
- IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
- val));
- }
- break;
- case RATE_MCS_HE_TYPE_EXT_SU:
- case RATE_MCS_HE_TYPE_TRIG:
- /* not supported yet */
- break;
- }
- } else {
+ } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) {
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
@@ -1409,7 +1574,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
goto out;
}
rx_status->rate_idx = rate;
-
}
/* management stuff on default queue */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 11ecdf63b732..cfb784fea77b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -19,9 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -113,6 +110,10 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = {
.suspend_time = 95,
.max_out_time = 44,
},
+ [IWL_SCAN_TYPE_FAST_BALANCE] = {
+ .suspend_time = 30,
+ .max_out_time = 37,
+ },
};
struct iwl_mvm_scan_params {
@@ -238,8 +239,32 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
return mvm->tcm.result.band_load[band];
}
+struct iwl_is_dcm_with_go_iterator_data {
+ struct ieee80211_vif *current_vif;
+ bool is_dcm_with_p2p_go;
+};
+
+static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_is_dcm_with_go_iterator_data *data = _data;
+ struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif *curr_mvmvif =
+ iwl_mvm_vif_from_mac80211(data->current_vif);
+
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
+ other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
+ data->is_dcm_with_p2p_go = true;
+}
+
static enum
-iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
+iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
enum iwl_mvm_traffic_load load,
bool low_latency)
{
@@ -252,9 +277,30 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
if (!global_cnt)
return IWL_SCAN_TYPE_UNASSOC;
- if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device &&
- fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
- return IWL_SCAN_TYPE_FRAGMENTED;
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+ if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
+ (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
+ return IWL_SCAN_TYPE_FRAGMENTED;
+
+ /* in case of DCM with GO where BSS DTIM interval < 220msec
+ * set all scan requests as fast-balance scan
+ * */
+ if (vif && vif->type == NL80211_IFTYPE_STATION &&
+ vif->bss_conf.dtim_period < 220) {
+ struct iwl_is_dcm_with_go_iterator_data data = {
+ .current_vif = vif,
+ .is_dcm_with_p2p_go = false,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_is_dcm_with_go_iterator,
+ &data);
+ if (data.is_dcm_with_p2p_go)
+ return IWL_SCAN_TYPE_FAST_BALANCE;
+ }
+ }
if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
return IWL_SCAN_TYPE_MILD;
@@ -263,7 +309,8 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
}
static enum
-iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
enum iwl_mvm_traffic_load load;
bool low_latency;
@@ -271,12 +318,12 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
load = iwl_mvm_get_traffic_load(mvm);
low_latency = iwl_mvm_low_latency(mvm);
- return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
+ return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
}
static enum
iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
- bool p2p_device,
+ struct ieee80211_vif *vif,
enum nl80211_band band)
{
enum iwl_mvm_traffic_load load;
@@ -285,7 +332,7 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
load = iwl_mvm_get_traffic_load_band(mvm, band);
low_latency = iwl_mvm_low_latency_band(mvm, band);
- return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
+ return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
}
static int
@@ -836,16 +883,25 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
+ bool low_latency;
+
+ if (iwl_mvm_is_cdb_supported(mvm))
+ low_latency = iwl_mvm_low_latency_band(mvm, NL80211_BAND_5GHZ);
+ else
+ low_latency = iwl_mvm_low_latency(mvm);
/* We can only use EBS if:
* 1. the feature is supported;
* 2. the last EBS was successful;
* 3. if only single scan, the single scan EBS API is supported;
* 4. it's not a p2p find operation.
+ * 5. we are not in low latency mode,
+ * or if fragmented ebs is supported by the FW
*/
return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
- vif->type != NL80211_IFTYPE_P2P_DEVICE);
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)));
}
static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
@@ -854,6 +910,12 @@ static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
params->scan_plans[0].iterations == 1;
}
+static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)
+{
+ return (type == IWL_SCAN_TYPE_FRAGMENTED ||
+ type == IWL_SCAN_TYPE_FAST_BALANCE);
+}
+
static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
@@ -866,7 +928,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
- if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
+ if (iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm) &&
@@ -889,7 +951,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
- params->type != IWL_SCAN_TYPE_FRAGMENTED)
+ !iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
return flags;
@@ -1038,7 +1100,7 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
{
- enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
+ enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
struct iwl_scan_config_v1 *cfg = config;
cfg->flags = cpu_to_le32(flags);
@@ -1071,9 +1133,9 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
if (iwl_mvm_is_cdb_supported(mvm)) {
enum iwl_mvm_scan_type lb_type, hb_type;
- lb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ lb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_2GHZ);
- hb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_5GHZ);
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
@@ -1087,7 +1149,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
cpu_to_le32(scan_timing[hb_type].suspend_time);
} else {
enum iwl_mvm_scan_type type =
- iwl_mvm_get_scan_type(mvm, false);
+ iwl_mvm_get_scan_type(mvm, NULL);
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(scan_timing[type].max_out_time);
@@ -1124,14 +1186,14 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
return -ENOBUFS;
if (iwl_mvm_is_cdb_supported(mvm)) {
- type = iwl_mvm_get_scan_type_band(mvm, false,
+ type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_2GHZ);
- hb_type = iwl_mvm_get_scan_type_band(mvm, false,
+ hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_5GHZ);
if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
return 0;
} else {
- type = iwl_mvm_get_scan_type(mvm, false);
+ type = iwl_mvm_get_scan_type(mvm, NULL);
if (type == mvm->scan_type)
return 0;
}
@@ -1156,7 +1218,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
SCAN_CONFIG_N_CHANNELS(num_channels) |
- (type == IWL_SCAN_TYPE_FRAGMENTED ?
+ (iwl_mvm_is_scan_fragmented(type) ?
SCAN_CONFIG_FLAG_SET_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
@@ -1171,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
*/
if (iwl_mvm_cdb_scan_api(mvm)) {
if (iwl_mvm_is_cdb_supported(mvm))
- flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ?
+ flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
@@ -1332,11 +1394,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
- if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
+ if (iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
if (iwl_mvm_is_cdb_supported(mvm) &&
- params->hb_type == IWL_SCAN_TYPE_FRAGMENTED)
+ iwl_mvm_is_scan_fragmented(params->hb_type))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm) &&
@@ -1374,7 +1436,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
*/
if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
- params->type != IWL_SCAN_TYPE_FRAGMENTED &&
+ !iwl_mvm_is_scan_fragmented(params->type) &&
!iwl_mvm_is_adaptive_dwell_supported(mvm) &&
!iwl_mvm_is_oce_supported(mvm))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
@@ -1442,6 +1504,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED)
cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] =
IWL_SCAN_NUM_OF_FRAGS;
+
+ cmd->v8.general_flags2 =
+ IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
}
cmd->scan_start_mac_id = scan_vif->id;
@@ -1449,11 +1514,21 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
- if (iwl_mvm_scan_use_ebs(mvm, vif))
+ if (iwl_mvm_scan_use_ebs(mvm, vif)) {
channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+ /* set fragmented ebs for fragmented scan on HB channels */
+ if (iwl_mvm_is_frag_ebs_supported(mvm)) {
+ if (gen_flags &
+ IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED ||
+ (!iwl_mvm_is_cdb_supported(mvm) &&
+ gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED))
+ channel_flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+ }
+ }
+
chan_param->flags = channel_flags;
chan_param->count = params->n_channels;
@@ -1570,19 +1645,20 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work)
static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
- bool p2p)
+ struct ieee80211_vif *vif)
{
if (iwl_mvm_is_cdb_supported(mvm)) {
params->type =
- iwl_mvm_get_scan_type_band(mvm, p2p,
+ iwl_mvm_get_scan_type_band(mvm, vif,
NL80211_BAND_2GHZ);
params->hb_type =
- iwl_mvm_get_scan_type_band(mvm, p2p,
+ iwl_mvm_get_scan_type_band(mvm, vif,
NL80211_BAND_5GHZ);
} else {
- params->type = iwl_mvm_get_scan_type(mvm, p2p);
+ params->type = iwl_mvm_get_scan_type(mvm, vif);
}
}
+
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@@ -1630,8 +1706,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.scan_plans = &scan_plan;
params.n_scan_plans = 1;
- iwl_mvm_fill_scan_type(mvm, &params,
- vif->type == NL80211_IFTYPE_P2P_DEVICE);
+ iwl_mvm_fill_scan_type(mvm, &params, vif);
ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
if (ret < 0)
@@ -1726,8 +1801,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.n_scan_plans = req->n_scan_plans;
params.scan_plans = req->scan_plans;
- iwl_mvm_fill_scan_type(mvm, &params,
- vif->type == NL80211_IFTYPE_P2P_DEVICE);
+ iwl_mvm_fill_scan_type(mvm, &params, vif);
/* In theory, LMAC scans can handle a 32-bit delay, but since
* waiting for over 18 hours to start the scan is a bit silly
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 539b06bf0803..d1d76bb9a750 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 18db1ed92d9b..1887d2b9f185 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -72,6 +67,14 @@
#include "sta.h"
#include "rs.h"
+static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
+
+static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
+ u32 sta_id,
+ struct ieee80211_key_conf *key, bool mcast,
+ u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
+ u8 key_offset, bool mfp);
+
/*
* New version of ADD_STA_sta command added new fields at the end of the
* structure, so sending the size of the relevant API's structure is enough to
@@ -355,6 +358,108 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
return ret;
}
+static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
+ int mac80211_queue, u8 tid, u8 flags)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_DISABLE_QUEUE,
+ };
+ bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
+ int ret;
+
+ if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
+ return -EINVAL;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ if (remove_mac_queue)
+ mvm->hw_queue_to_mac80211[queue] &=
+ ~BIT(mac80211_queue);
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_trans_txq_free(mvm->trans, queue);
+
+ return 0;
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return 0;
+ }
+
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+ /*
+ * If there is another TID with the same AC - don't remove the MAC queue
+ * from the mapping
+ */
+ if (tid < IWL_MAX_TID_COUNT) {
+ unsigned long tid_bitmap =
+ mvm->queue_info[queue].tid_bitmap;
+ int ac = tid_to_mac80211_ac[tid];
+ int i;
+
+ for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
+ if (tid_to_mac80211_ac[i] == ac)
+ remove_mac_queue = false;
+ }
+ }
+
+ if (remove_mac_queue)
+ mvm->hw_queue_to_mac80211[queue] &=
+ ~BIT(mac80211_queue);
+
+ cmd.action = mvm->queue_info[queue].tid_bitmap ?
+ SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
+ if (cmd.action == SCD_CFG_DISABLE_QUEUE)
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
+ queue,
+ mvm->queue_info[queue].tid_bitmap,
+ mvm->hw_queue_to_mac80211[queue]);
+
+ /* If the queue is still enabled - nothing left to do in this func */
+ if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return 0;
+ }
+
+ cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+ cmd.tid = mvm->queue_info[queue].txq_tid;
+
+ /* Make sure queue info is correct even though we overwrite it */
+ WARN(mvm->queue_info[queue].tid_bitmap ||
+ mvm->hw_queue_to_mac80211[queue],
+ "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
+ queue, mvm->hw_queue_to_mac80211[queue],
+ mvm->queue_info[queue].tid_bitmap);
+
+ /* If we are here - the queue is freed and we can zero out these vals */
+ mvm->queue_info[queue].tid_bitmap = 0;
+ mvm->hw_queue_to_mac80211[queue] = 0;
+
+ /* Regardless if this is a reserved TXQ for a STA - mark it as false */
+ mvm->queue_info[queue].reserved = false;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_trans_txq_disable(mvm->trans, queue, false);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+ sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
+
+ if (ret)
+ IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
+ queue, ret);
+ return ret;
+}
+
static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
{
struct ieee80211_sta *sta;
@@ -444,11 +549,12 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
}
static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
- bool same_sta)
+ u8 new_sta_id)
{
struct iwl_mvm_sta *mvmsta;
u8 txq_curr_ac, sta_id, tid;
unsigned long disable_agg_tids = 0;
+ bool same_sta;
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -462,6 +568,8 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
tid = mvm->queue_info[queue].txq_tid;
spin_unlock_bh(&mvm->queue_info_lock);
+ same_sta = sta_id == new_sta_id;
+
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (WARN_ON(!mvmsta))
return -EINVAL;
@@ -476,10 +584,6 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
mvmsta->vif->hw_queue[txq_curr_ac],
tid, 0);
if (ret) {
- /* Re-mark the inactive queue as inactive */
- spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
- spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
queue, ret);
@@ -501,7 +605,13 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
u8 ac_to_queue[IEEE80211_NUM_ACS];
int i;
+ /*
+ * This protects us against grabbing a queue that's being reconfigured
+ * by the inactivity checker.
+ */
+ lockdep_assert_held(&mvm->mutex);
lockdep_assert_held(&mvm->queue_info_lock);
+
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
@@ -514,11 +624,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
continue;
- /* Don't try and take queues being reconfigured */
- if (mvm->queue_info[queue].status ==
- IWL_MVM_QUEUE_RECONFIGURING)
- continue;
-
ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
}
@@ -559,14 +664,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
return -ENOSPC;
}
- /* Make sure the queue isn't in the middle of being reconfigured */
- if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
- IWL_ERR(mvm,
- "TXQ %d is in the middle of re-config - try again\n",
- queue);
- return -EBUSY;
- }
-
return queue;
}
@@ -576,9 +673,9 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
* in such a case, otherwise - if no redirection required - it does nothing,
* unless the %force param is true.
*/
-int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
- int ac, int ssn, unsigned int wdg_timeout,
- bool force)
+static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
@@ -613,7 +710,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
cmd.tid = mvm->queue_info[queue].txq_tid;
mq = mvm->hw_queue_to_mac80211[queue];
- shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
+ shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
@@ -671,6 +768,57 @@ out:
return ret;
}
+static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
+ u8 minq, u8 maxq)
+{
+ int i;
+
+ lockdep_assert_held(&mvm->queue_info_lock);
+
+ /* This should not be hit with new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -ENOSPC;
+
+ /* Start by looking for a free queue */
+ for (i = minq; i <= maxq; i++)
+ if (mvm->queue_info[i].tid_bitmap == 0 &&
+ mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
+ return i;
+
+ return -ENOSPC;
+}
+
+static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+ u8 sta_id, u8 tid, unsigned int timeout)
+{
+ int queue, size = IWL_DEFAULT_QUEUE_SIZE;
+
+ if (tid == IWL_MAX_TID_COUNT) {
+ tid = IWL_MGMT_TID;
+ size = IWL_MGMT_QUEUE_SIZE;
+ }
+ queue = iwl_trans_txq_alloc(mvm->trans,
+ cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
+ sta_id, tid, SCD_QUEUE_CFG, size, timeout);
+
+ if (queue < 0) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
+ sta_id, tid, queue);
+ return queue;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
+ queue, sta_id, tid);
+
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d (mac80211 map:0x%x)\n",
+ queue, mvm->hw_queue_to_mac80211[queue]);
+
+ return queue;
+}
+
static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac,
int tid)
@@ -695,12 +843,428 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
spin_lock_bh(&mvmsta->lock);
mvmsta->tid_data[tid].txq_id = queue;
- mvmsta->tid_data[tid].is_tid_active = true;
spin_unlock_bh(&mvmsta->lock);
return 0;
}
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
+ int mac80211_queue, u8 sta_id, u8 tid)
+{
+ bool enable_queue = true;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ /* Make sure this TID isn't already enabled */
+ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
+ queue, tid);
+ return false;
+ }
+
+ /* Update mappings and refcounts */
+ if (mvm->queue_info[queue].tid_bitmap)
+ enable_queue = false;
+
+ if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
+ WARN(mac80211_queue >=
+ BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
+ "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
+ mac80211_queue, queue, sta_id, tid);
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+ }
+
+ mvm->queue_info[queue].tid_bitmap |= BIT(tid);
+ mvm->queue_info[queue].ra_sta_id = sta_id;
+
+ if (enable_queue) {
+ if (tid != IWL_MAX_TID_COUNT)
+ mvm->queue_info[queue].mac80211_ac =
+ tid_to_mac80211_ac[tid];
+ else
+ mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
+
+ mvm->queue_info[queue].txq_tid = tid;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
+ queue, mvm->queue_info[queue].tid_bitmap,
+ mvm->hw_queue_to_mac80211[queue]);
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ return enable_queue;
+}
+
+static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
+ int mac80211_queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_ENABLE_QUEUE,
+ .window = cfg->frame_limit,
+ .sta_id = cfg->sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = cfg->fifo,
+ .aggregate = cfg->aggregate,
+ .tid = cfg->tid,
+ };
+ bool inc_ssn;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
+ /* Send the enabling command if we need to */
+ if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
+ cfg->sta_id, cfg->tid))
+ return false;
+
+ inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
+ NULL, wdg_timeout);
+ if (inc_ssn)
+ le16_add_cpu(&cmd.ssn, 1);
+
+ WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
+ "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+
+ return inc_ssn;
+}
+
+static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_UPDATE_QUEUE_TID,
+ };
+ int tid;
+ unsigned long tid_bitmap;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
+ return;
+
+ /* Find any TID for queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ cmd.tid = tid;
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
+ queue, ret);
+ return;
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].txq_tid = tid;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
+ queue, tid);
+}
+
+static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+ int tid = -1;
+ unsigned long tid_bitmap;
+ unsigned int wdg_timeout;
+ int ssn;
+ int ret = true;
+
+ /* queue sharing is disabled on new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* Find TID for queue, and make sure it is the only one on the queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ if (tid_bitmap != BIT(tid)) {
+ IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
+ queue, tid_bitmap);
+ return;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
+ tid);
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+
+ ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
+
+ ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
+ tid_to_mac80211_ac[tid], ssn,
+ wdg_timeout, true);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
+ return;
+ }
+
+ /* If aggs should be turned back on - do it */
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
+ struct iwl_mvm_add_sta_cmd cmd = {0};
+
+ mvmsta->tid_disable_agg &= ~BIT(tid);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (!ret) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d is now aggregated again\n",
+ queue);
+
+ /* Mark queue intenally as aggregating again */
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
+ }
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+ spin_unlock_bh(&mvm->queue_info_lock);
+}
+
+/*
+ * Remove inactive TIDs of a given queue.
+ * If all queue TIDs are inactive - mark the queue as inactive
+ * If only some the queue TIDs are inactive - unmap them from the queue
+ *
+ * Returns %true if all TIDs were removed and the queue could be reused.
+ */
+static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, int queue,
+ unsigned long tid_bitmap,
+ unsigned long *unshare_queues,
+ unsigned long *changetid_queues)
+{
+ int tid;
+
+ lockdep_assert_held(&mvmsta->lock);
+ lockdep_assert_held(&mvm->queue_info_lock);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
+ /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ /* If some TFDs are still queued - don't mark TID as inactive */
+ if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
+ tid_bitmap &= ~BIT(tid);
+
+ /* Don't mark as inactive any TID that has an active BA */
+ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
+ tid_bitmap &= ~BIT(tid);
+ }
+
+ /* If all TIDs in the queue are inactive - return it can be reused */
+ if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
+ IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
+ return true;
+ }
+
+ /*
+ * If we are here, this is a shared queue and not all TIDs timed-out.
+ * Remove the ones that did.
+ */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
+ u16 tid_bitmap;
+
+ mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+ mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+
+ /*
+ * We need to take into account a situation in which a TXQ was
+ * allocated to TID x, and then turned shared by adding TIDs y
+ * and z. If TID x becomes inactive and is removed from the TXQ,
+ * ownership must be given to one of the remaining TIDs.
+ * This is mainly because if TID x continues - a new queue can't
+ * be allocated for it as long as it is an owner of another TXQ.
+ *
+ * Mark this queue in the right bitmap, we'll send the command
+ * to the firmware later.
+ */
+ if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
+ set_bit(queue, changetid_queues);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Removing inactive TID %d from shared Q:%d\n",
+ tid, queue);
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d left with tid bitmap 0x%x\n", queue,
+ mvm->queue_info[queue].tid_bitmap);
+
+ /*
+ * There may be different TIDs with the same mac queues, so make
+ * sure all TIDs have existing corresponding mac queues enabled
+ */
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ mvm->hw_queue_to_mac80211[queue] |=
+ BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
+ }
+
+ /* If the queue is marked as shared - "unshare" it */
+ if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
+ mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
+ IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
+ queue);
+ set_bit(queue, unshare_queues);
+ }
+
+ return false;
+}
+
+/*
+ * Check for inactivity - this includes checking if any queue
+ * can be unshared and finding one (and only one) that can be
+ * reused.
+ * This function is also invoked as a sort of clean-up task,
+ * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
+ *
+ * Returns the queue number, or -ENOSPC.
+ */
+static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
+{
+ unsigned long now = jiffies;
+ unsigned long unshare_queues = 0;
+ unsigned long changetid_queues = 0;
+ int i, ret, free_queue = -ENOSPC;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return -ENOSPC;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ rcu_read_lock();
+
+ /* we skip the CMD queue below by starting at 1 */
+ BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
+
+ for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+ int tid;
+ unsigned long inactive_tid_bitmap = 0;
+ unsigned long queue_tid_bitmap;
+
+ queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
+ if (!queue_tid_bitmap)
+ continue;
+
+ /* If TXQ isn't in active use anyway - nothing to do here... */
+ if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
+ mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
+ continue;
+
+ /* Check to see if there are inactive TIDs on this queue */
+ for_each_set_bit(tid, &queue_tid_bitmap,
+ IWL_MAX_TID_COUNT + 1) {
+ if (time_after(mvm->queue_info[i].last_frame_time[tid] +
+ IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+ continue;
+
+ inactive_tid_bitmap |= BIT(tid);
+ }
+
+ /* If all TIDs are active - finish check on this queue */
+ if (!inactive_tid_bitmap)
+ continue;
+
+ /*
+ * If we are here - the queue hadn't been served recently and is
+ * in use
+ */
+
+ sta_id = mvm->queue_info[i].ra_sta_id;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /*
+ * If the STA doesn't exist anymore, it isn't an error. It could
+ * be that it was removed since getting the queues, and in this
+ * case it should've inactivated its queues anyway.
+ */
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ /* this isn't so nice, but works OK due to the way we loop */
+ spin_unlock(&mvm->queue_info_lock);
+
+ /* and we need this locking order */
+ spin_lock(&mvmsta->lock);
+ spin_lock(&mvm->queue_info_lock);
+ ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
+ inactive_tid_bitmap,
+ &unshare_queues,
+ &changetid_queues);
+ if (ret >= 0 && free_queue < 0)
+ free_queue = ret;
+ /* only unlock sta lock - we still need the queue info lock */
+ spin_unlock(&mvmsta->lock);
+ }
+
+ rcu_read_unlock();
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* Reconfigure queues requiring reconfiguation */
+ for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
+ iwl_mvm_unshare_queue(mvm, i);
+ for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
+ iwl_mvm_change_queue_tid(mvm, i);
+
+ if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
+ ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
+ alloc_for_sta);
+ if (ret)
+ return ret;
+ }
+
+ return free_queue;
+}
+
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac, int tid,
struct ieee80211_hdr *hdr)
@@ -716,7 +1280,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
- bool using_inactive_queue = false, same_sta = false;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
bool shared_queue = false, inc_ssn;
@@ -753,9 +1316,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
(mvm->queue_info[mvmsta->reserved_queue].status ==
- IWL_MVM_QUEUE_RESERVED ||
- mvm->queue_info[mvmsta->reserved_queue].status ==
- IWL_MVM_QUEUE_INACTIVE)) {
+ IWL_MVM_QUEUE_RESERVED)) {
queue = mvmsta->reserved_queue;
mvm->queue_info[queue].reserved = true;
IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
@@ -765,21 +1326,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
+ if (queue < 0) {
+ spin_unlock_bh(&mvm->queue_info_lock);
- /*
- * Check if this queue is already allocated but inactive.
- * In such a case, we'll need to first free this queue before enabling
- * it again, so we'll mark it as reserved to make sure no new traffic
- * arrives on it
- */
- if (queue > 0 &&
- mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
- using_inactive_queue = true;
- same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
- IWL_DEBUG_TX_QUEUES(mvm,
- "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
- queue, mvmsta->sta_id, tid);
+ /* try harder - perhaps kill an inactive queue */
+ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
+
+ spin_lock_bh(&mvm->queue_info_lock);
}
/* No free queue - we'll have to share */
@@ -797,7 +1350,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
* This will allow avoiding re-acquiring the lock at the end of the
* configuration. On error we'll mark it back as free.
*/
- if ((queue > 0) && !shared_queue)
+ if (queue > 0 && !shared_queue)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
@@ -818,16 +1371,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
- /*
- * If this queue was previously inactive (idle) - we need to free it
- * first
- */
- if (using_inactive_queue) {
- ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
- if (ret)
- return ret;
- }
-
IWL_DEBUG_TX_QUEUES(mvm,
"Allocating %squeue #%d to sta %d on tid %d\n",
shared_queue ? "shared " : "", queue,
@@ -871,7 +1414,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (inc_ssn)
mvmsta->tid_data[tid].seq_number += 0x10;
mvmsta->tid_data[tid].txq_id = queue;
- mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue);
queue_state = mvmsta->tid_data[tid].state;
@@ -906,129 +1448,6 @@ out_err:
return ret;
}
-static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
-{
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_UPDATE_QUEUE_TID,
- };
- int tid;
- unsigned long tid_bitmap;
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return;
-
- spin_lock_bh(&mvm->queue_info_lock);
- tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- spin_unlock_bh(&mvm->queue_info_lock);
-
- if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
- return;
-
- /* Find any TID for queue */
- tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
- cmd.tid = tid;
- cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
-
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
- if (ret) {
- IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
- queue, ret);
- return;
- }
-
- spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[queue].txq_tid = tid;
- spin_unlock_bh(&mvm->queue_info_lock);
- IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
- queue, tid);
-}
-
-static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
-{
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
- u8 sta_id;
- int tid = -1;
- unsigned long tid_bitmap;
- unsigned int wdg_timeout;
- int ssn;
- int ret = true;
-
- /* queue sharing is disabled on new TX path */
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return;
-
- lockdep_assert_held(&mvm->mutex);
-
- spin_lock_bh(&mvm->queue_info_lock);
- sta_id = mvm->queue_info[queue].ra_sta_id;
- tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- spin_unlock_bh(&mvm->queue_info_lock);
-
- /* Find TID for queue, and make sure it is the only one on the queue */
- tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
- if (tid_bitmap != BIT(tid)) {
- IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
- queue, tid_bitmap);
- return;
- }
-
- IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
- tid);
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
-
- if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
- return;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
- wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
-
- ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
-
- ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
- tid_to_mac80211_ac[tid], ssn,
- wdg_timeout, true);
- if (ret) {
- IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
- return;
- }
-
- /* If aggs should be turned back on - do it */
- if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
- struct iwl_mvm_add_sta_cmd cmd = {0};
-
- mvmsta->tid_disable_agg &= ~BIT(tid);
-
- cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
- cmd.sta_id = mvmsta->sta_id;
- cmd.add_modify = STA_MODE_MODIFY;
- cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
- cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
- cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
- iwl_mvm_add_sta_cmd_size(mvm), &cmd);
- if (!ret) {
- IWL_DEBUG_TX_QUEUES(mvm,
- "TXQ #%d is now aggregated again\n",
- queue);
-
- /* Mark queue intenally as aggregating again */
- iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
- }
- }
-
- spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
- spin_unlock_bh(&mvm->queue_info_lock);
-}
-
static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
{
if (tid == IWL_MAX_TID_COUNT)
@@ -1097,47 +1516,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
unsigned long deferred_tid_traffic;
- int queue, sta_id, tid;
-
- /* Check inactivity of queues */
- iwl_mvm_inactivity_check(mvm);
+ int sta_id, tid;
mutex_lock(&mvm->mutex);
- /* No queue reconfiguration in TVQM mode */
- if (iwl_mvm_has_new_tx_api(mvm))
- goto alloc_queues;
-
- /* Reconfigure queues requiring reconfiguation */
- for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
- bool reconfig;
- bool change_owner;
-
- spin_lock_bh(&mvm->queue_info_lock);
- reconfig = (mvm->queue_info[queue].status ==
- IWL_MVM_QUEUE_RECONFIGURING);
+ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
- /*
- * We need to take into account a situation in which a TXQ was
- * allocated to TID x, and then turned shared by adding TIDs y
- * and z. If TID x becomes inactive and is removed from the TXQ,
- * ownership must be given to one of the remaining TIDs.
- * This is mainly because if TID x continues - a new queue can't
- * be allocated for it as long as it is an owner of another TXQ.
- */
- change_owner = !(mvm->queue_info[queue].tid_bitmap &
- BIT(mvm->queue_info[queue].txq_tid)) &&
- (mvm->queue_info[queue].status ==
- IWL_MVM_QUEUE_SHARED);
- spin_unlock_bh(&mvm->queue_info_lock);
-
- if (reconfig)
- iwl_mvm_unshare_queue(mvm, queue);
- else if (change_owner)
- iwl_mvm_change_queue_owner(mvm, queue);
- }
-
-alloc_queues:
/* Go over all stations with deferred traffic */
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
IWL_MVM_STATION_COUNT) {
@@ -1164,23 +1548,19 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int queue;
- bool using_inactive_queue = false, same_sta = false;
/* queue reserving is disabled on new TX path */
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return 0;
- /*
- * Check for inactive queues, so we don't reach a situation where we
- * can't add a STA due to a shortage in queues that doesn't really exist
- */
- iwl_mvm_inactivity_check(mvm);
+ /* run the general cleanup/unsharing of queues */
+ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure we have free resources for this STA */
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
- !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
+ !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
(mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
IWL_MVM_QUEUE_FREE))
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
@@ -1190,16 +1570,13 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) {
spin_unlock_bh(&mvm->queue_info_lock);
- IWL_ERR(mvm, "No available queues for new station\n");
- return -ENOSPC;
- } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
- /*
- * If this queue is already allocated but inactive we'll need to
- * first free this queue before enabling it again, we'll mark
- * it as reserved to make sure no new traffic arrives on it
- */
- using_inactive_queue = true;
- same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
+ /* try again - this time kick out a queue if needed */
+ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
+ if (queue < 0) {
+ IWL_ERR(mvm, "No available queues for new station\n");
+ return -ENOSPC;
+ }
+ spin_lock_bh(&mvm->queue_info_lock);
}
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
@@ -1207,9 +1584,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
mvmsta->reserved_queue = queue;
- if (using_inactive_queue)
- iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
-
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
queue, mvmsta->sta_id);
@@ -2101,6 +2475,19 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
&cfg, timeout);
+ if (mvmvif->ap_wep_key) {
+ u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
+
+ if (key_offset == STA_KEY_IDX_INVALID)
+ return -ENOSPC;
+
+ ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
+ mvmvif->ap_wep_key, 1, 0, NULL, 0,
+ key_offset, 0);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -3133,10 +3520,6 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
- if (vif->type == NL80211_IFTYPE_AP) {
- ret = -EINVAL;
- break;
- }
addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
/* get phase 1 key from mac80211 */
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 0fc211108149..de1a0a2d8723 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -312,9 +312,6 @@ enum iwl_mvm_agg_state {
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
* we are ready to finish the Tx AGG stop / start flow.
* @tx_time: medium time consumed by this A-MPDU
- * @is_tid_active: has this TID sent traffic in the last
- * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this
- * field should be ignored.
* @tpt_meas_start: time of the throughput measurements start, is reset every HZ
* @tx_count_last: number of frames transmitted during the last second
* @tx_count: counts the number of frames transmitted since the last reset of
@@ -332,7 +329,6 @@ struct iwl_mvm_tid_data {
u16 txq_id;
u16 ssn;
u16 tx_time;
- bool is_tid_active;
unsigned long tpt_meas_start;
u32 tx_count_last;
u32 tx_count;
@@ -572,8 +568,4 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
-int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
- int ac, int ssn, unsigned int wdg_timeout,
- bool force);
-
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 67f360c0d17e..e02f4eb20359 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -18,9 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program.
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h
index cbbc16fd006a..ff82af11de8d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index cd91bc44259c..e1a6f4e22253 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -254,17 +254,14 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_time_event *te_trig;
int i;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
+ ieee80211_vif_to_wdev(te_data->vif),
+ FW_DBG_TRIGGER_TIME_EVENT);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
te_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(te_data->vif),
- trig))
- return;
-
for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
u32 trig_action_bitmap =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
index 3d2e8b6159bb..1dd3d01245ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -17,11 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
index 2d0b8a391308..01e0a999063b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
@@ -16,11 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
index 2ff560aa1a82..8138d0606c52 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
@@ -16,11 +16,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 1232f63278eb..0b3e5c99d316 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -18,11 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index ff193dca2020..ec57682efe54 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -35,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -82,15 +79,12 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
- return;
-
if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
return;
@@ -245,14 +239,18 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
ssn);
} else {
- tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+ if (ieee80211_is_data(fc))
+ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+ else
+ tx_cmd->tid_tspec = IWL_MAX_TID_COUNT;
+
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
tx_flags |= TX_CMD_FLG_SEQ_CTL;
else
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
}
- /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
+ /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */
if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
else
@@ -620,6 +618,66 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
}
}
+static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(info->control.vif);
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
+ struct iwl_probe_resp_data *resp_data;
+ u8 *ie, *pos;
+ u8 match[] = {
+ (WLAN_OUI_WFA >> 16) & 0xff,
+ (WLAN_OUI_WFA >> 8) & 0xff,
+ WLAN_OUI_WFA & 0xff,
+ WLAN_OUI_TYPE_WFA_P2P,
+ };
+
+ rcu_read_lock();
+
+ resp_data = rcu_dereference(mvmvif->probe_resp_data);
+ if (!resp_data)
+ goto out;
+
+ if (!resp_data->notif.noa_active)
+ goto out;
+
+ ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
+ mgmt->u.probe_resp.variable,
+ skb->len - base_len,
+ match, 4, 2);
+ if (!ie) {
+ IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
+ goto out;
+ }
+
+ if (skb_tailroom(skb) < resp_data->noa_len) {
+ if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
+ IWL_ERR(mvm,
+ "Failed to reallocate probe resp\n");
+ goto out;
+ }
+ }
+
+ pos = skb_put(skb, resp_data->noa_len);
+
+ *pos++ = WLAN_EID_VENDOR_SPECIFIC;
+ /* Set length of IE body (not including ID and length itself) */
+ *pos++ = resp_data->noa_len - 2;
+ *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
+ *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
+ *pos++ = WLAN_OUI_WFA & 0xff;
+ *pos++ = WLAN_OUI_TYPE_WFA_P2P;
+
+ memcpy(pos, &resp_data->notif.noa_attr,
+ resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
+
+out:
+ rcu_read_unlock();
+}
+
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -628,6 +686,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
struct iwl_device_cmd *dev_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ __le16 fc = hdr->frame_control;
int queue;
/* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
@@ -668,7 +727,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
info.control.vif->type == NL80211_IFTYPE_AP ||
info.control.vif->type == NL80211_IFTYPE_ADHOC) {
- if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ if (!ieee80211_is_data(hdr->frame_control))
sta_id = mvmvif->bcast_sta.sta_id;
else
sta_id = mvmvif->mcast_sta.sta_id;
@@ -689,6 +748,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
}
}
+ if (unlikely(ieee80211_is_probe_resp(fc)))
+ iwl_mvm_probe_resp_set_noa(mvm, skb);
+
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
@@ -775,6 +837,36 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
return 0;
}
+static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ unsigned int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
+ u8 ac = tid_to_mac80211_ac[tid];
+ unsigned int txf;
+ int lmac = IWL_LMAC_24G_INDEX;
+
+ if (iwl_mvm_is_cdb_supported(mvm) &&
+ band == NL80211_BAND_5GHZ)
+ lmac = IWL_LMAC_5G_INDEX;
+
+ /* For HE redirect to trigger based fifos */
+ if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
+ ac += 4;
+
+ txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
+
+ /*
+ * Don't send an AMSDU that will be longer than the TXF.
+ * Add a security margin of 256 for the TX command + headers.
+ * We also want to have the start of the next packet inside the
+ * fifo to be able to send bursts.
+ */
+ return min_t(unsigned int, mvmsta->max_amsdu_len,
+ mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+}
+
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
@@ -787,7 +879,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 snap_ip_tcp, pad;
unsigned int dbg_max_amsdu_len;
netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
- u8 tid, txf;
+ u8 tid;
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
tcp_hdrlen(skb);
@@ -826,20 +918,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
!(mvmsta->amsdu_enabled & BIT(tid)))
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
- max_amsdu_len = mvmsta->max_amsdu_len;
-
- /* the Tx FIFO to which this A-MSDU will be routed */
- txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]);
-
- /*
- * Don't send an AMSDU that will be longer than the TXF.
- * Add a security margin of 256 for the TX command + headers.
- * We also want to have the start of the next packet inside the
- * fifo to be able to send bursts.
- */
- max_amsdu_len = min_t(unsigned int, max_amsdu_len,
- mvm->fwrt.smem_cfg.lmac[0].txfifo_size[txf] -
- 256);
+ max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
if (unlikely(dbg_max_amsdu_len))
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
@@ -1010,6 +1089,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
return -1;
+ if (unlikely(ieee80211_is_probe_resp(fc)))
+ iwl_mvm_probe_resp_set_noa(mvm, skb);
+
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
sta, mvmsta->sta_id);
if (!dev_cmd)
@@ -1049,6 +1131,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
/* update the tx_cmd hdr as it was already copied */
tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
}
+ } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) {
+ tid = IWL_TID_NON_QOS;
}
txq_id = mvmsta->tid_data[tid].txq_id;
@@ -1056,32 +1140,16 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */
- if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
- !mvmsta->tid_data[tid].is_tid_active)) {
- /* If TXQ needs to be allocated... */
- if (txq_id == IWL_MVM_INVALID_QUEUE) {
- iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
-
- /*
- * The frame is now deferred, and the worker scheduled
- * will re-allocate it, so we can free it for now.
- */
- iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
- spin_unlock(&mvmsta->lock);
- return 0;
- }
-
- /* queue should always be active in new TX path */
- WARN_ON(iwl_mvm_has_new_tx_api(mvm));
-
- /* If we are here - TXQ exists and needs to be re-activated */
- spin_lock(&mvm->queue_info_lock);
- mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
- mvmsta->tid_data[tid].is_tid_active = true;
- spin_unlock(&mvm->queue_info_lock);
+ if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
+ iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
- IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
- txq_id);
+ /*
+ * The frame is now deferred, and the worker scheduled
+ * will re-allocate it, so we can free it for now.
+ */
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ spin_unlock(&mvmsta->lock);
+ return 0;
}
if (!iwl_mvm_has_new_tx_api(mvm)) {
@@ -1327,15 +1395,13 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tx_status *status_trig;
int i;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
+ FW_DBG_TRIGGER_TX_STATUS);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
status_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
- return;
-
for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
/* don't collect on status 0 */
if (!status_trig->statuses[i].status)
@@ -1405,6 +1471,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
while (!skb_queue_empty(&skbs)) {
struct sk_buff *skb = __skb_dequeue(&skbs);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
bool flushed = false;
skb_freed++;
@@ -1434,6 +1501,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
break;
}
+ /*
+ * If we are freeing multiple frames, mark all the frames
+ * but the first one as acked, since they were acknowledged
+ * before
+ * */
+ if (skb_freed > 1)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
iwl_mvm_tx_status_check_trigger(mvm, status);
info->status.rates[0].count = tx_resp->failure_frame + 1;
@@ -1449,11 +1524,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- /* W/A FW bug: seq_ctl is wrong when the status isn't success */
- if (status != TX_STATUS_SUCCESS) {
- struct ieee80211_hdr *hdr = (void *)skb->data;
+ /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
+ if (ieee80211_is_back_req(hdr->frame_control))
+ seq_ctl = 0;
+ else if (status != TX_STATUS_SUCCESS)
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
- }
if (unlikely(!seq_ctl)) {
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -1525,7 +1600,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_mvm_tx_airtime(mvm, mvmsta,
le16_to_cpu(tx_resp->wireless_media_time));
- if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
+ if (sta->wme && tid != IWL_MGMT_TID) {
struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid];
bool send_eosp_ndp = false;
@@ -1645,20 +1720,24 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
struct iwl_mvm_sta *mvmsta;
int queue = SEQ_TO_QUEUE(sequence);
+ struct ieee80211_sta *sta;
if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
(queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
return;
- if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
- return;
-
iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
rcu_read_lock();
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (WARN_ON_ONCE(!sta || !sta->wme)) {
+ rcu_read_unlock();
+ return;
+ }
+
if (!WARN_ON_ONCE(!mvmsta)) {
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index b002a7afb5f5..818e1180bbdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -551,7 +546,6 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
- trace_iwlwifi_dev_ucode_error(trans->dev, &table, table.hw_ver, table.brd_ver);
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
desc_lookup(table.error_id));
IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
@@ -605,36 +599,6 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
}
-int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
-{
- int i;
-
- lockdep_assert_held(&mvm->queue_info_lock);
-
- /* This should not be hit with new TX path */
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return -ENOSPC;
-
- /* Start by looking for a free queue */
- for (i = minq; i <= maxq; i++)
- if (mvm->queue_info[i].hw_queue_refcount == 0 &&
- mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
- return i;
-
- /*
- * If no free queue found - settle for an inactive one to reconfigure
- * Make sure that the inactive queue either already belongs to this STA,
- * or that if it belongs to another one - it isn't the reserved queue
- */
- for (i = minq; i <= maxq; i++)
- if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
- (sta_id == mvm->queue_info[i].ra_sta_id ||
- !mvm->queue_info[i].reserved))
- return i;
-
- return -ENOSPC;
-}
-
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn)
{
@@ -655,7 +619,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
- if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
+ if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
"Trying to reconfig unallocated queue %d\n", queue)) {
spin_unlock_bh(&mvm->queue_info_lock);
return -ENXIO;
@@ -671,249 +635,21 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
return ret;
}
-static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
- int mac80211_queue, u8 sta_id, u8 tid)
-{
- bool enable_queue = true;
-
- spin_lock_bh(&mvm->queue_info_lock);
-
- /* Make sure this TID isn't already enabled */
- if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
- spin_unlock_bh(&mvm->queue_info_lock);
- IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
- queue, tid);
- return false;
- }
-
- /* Update mappings and refcounts */
- if (mvm->queue_info[queue].hw_queue_refcount > 0)
- enable_queue = false;
-
- if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
- WARN(mac80211_queue >=
- BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
- "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
- mac80211_queue, queue, sta_id, tid);
- mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
- }
-
- mvm->queue_info[queue].hw_queue_refcount++;
- mvm->queue_info[queue].tid_bitmap |= BIT(tid);
- mvm->queue_info[queue].ra_sta_id = sta_id;
-
- if (enable_queue) {
- if (tid != IWL_MAX_TID_COUNT)
- mvm->queue_info[queue].mac80211_ac =
- tid_to_mac80211_ac[tid];
- else
- mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
-
- mvm->queue_info[queue].txq_tid = tid;
- }
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
- queue, mvm->queue_info[queue].hw_queue_refcount,
- mvm->hw_queue_to_mac80211[queue]);
-
- spin_unlock_bh(&mvm->queue_info_lock);
-
- return enable_queue;
-}
-
-int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
- u8 sta_id, u8 tid, unsigned int timeout)
-{
- struct iwl_tx_queue_cfg_cmd cmd = {
- .flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
- .sta_id = sta_id,
- .tid = tid,
- };
- int queue, size = IWL_DEFAULT_QUEUE_SIZE;
-
- if (cmd.tid == IWL_MAX_TID_COUNT) {
- cmd.tid = IWL_MGMT_TID;
- size = IWL_MGMT_QUEUE_SIZE;
- }
- queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
- SCD_QUEUE_CFG, size, timeout);
-
- if (queue < 0) {
- IWL_DEBUG_TX_QUEUES(mvm,
- "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
- sta_id, tid, queue);
- return queue;
- }
-
- IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
- queue, sta_id, tid);
-
- mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
- IWL_DEBUG_TX_QUEUES(mvm,
- "Enabling TXQ #%d (mac80211 map:0x%x)\n",
- queue, mvm->hw_queue_to_mac80211[queue]);
-
- return queue;
-}
-
-bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int wdg_timeout)
-{
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_ENABLE_QUEUE,
- .window = cfg->frame_limit,
- .sta_id = cfg->sta_id,
- .ssn = cpu_to_le16(ssn),
- .tx_fifo = cfg->fifo,
- .aggregate = cfg->aggregate,
- .tid = cfg->tid,
- };
- bool inc_ssn;
-
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return false;
-
- /* Send the enabling command if we need to */
- if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
- cfg->sta_id, cfg->tid))
- return false;
-
- inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
- NULL, wdg_timeout);
- if (inc_ssn)
- le16_add_cpu(&cmd.ssn, 1);
-
- WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
- "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
-
- return inc_ssn;
-}
-
-int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
- u8 tid, u8 flags)
-{
- struct iwl_scd_txq_cfg_cmd cmd = {
- .scd_queue = queue,
- .action = SCD_CFG_DISABLE_QUEUE,
- };
- bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
- int ret;
-
- if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
- return -EINVAL;
-
- if (iwl_mvm_has_new_tx_api(mvm)) {
- spin_lock_bh(&mvm->queue_info_lock);
-
- if (remove_mac_queue)
- mvm->hw_queue_to_mac80211[queue] &=
- ~BIT(mac80211_queue);
-
- spin_unlock_bh(&mvm->queue_info_lock);
-
- iwl_trans_txq_free(mvm->trans, queue);
-
- return 0;
- }
-
- spin_lock_bh(&mvm->queue_info_lock);
-
- if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
- spin_unlock_bh(&mvm->queue_info_lock);
- return 0;
- }
-
- mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
-
- /*
- * If there is another TID with the same AC - don't remove the MAC queue
- * from the mapping
- */
- if (tid < IWL_MAX_TID_COUNT) {
- unsigned long tid_bitmap =
- mvm->queue_info[queue].tid_bitmap;
- int ac = tid_to_mac80211_ac[tid];
- int i;
-
- for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
- if (tid_to_mac80211_ac[i] == ac)
- remove_mac_queue = false;
- }
- }
-
- if (remove_mac_queue)
- mvm->hw_queue_to_mac80211[queue] &=
- ~BIT(mac80211_queue);
- mvm->queue_info[queue].hw_queue_refcount--;
-
- cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
- SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
- if (cmd.action == SCD_CFG_DISABLE_QUEUE)
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
- queue,
- mvm->queue_info[queue].hw_queue_refcount,
- mvm->hw_queue_to_mac80211[queue]);
-
- /* If the queue is still enabled - nothing left to do in this func */
- if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
- spin_unlock_bh(&mvm->queue_info_lock);
- return 0;
- }
-
- cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
- cmd.tid = mvm->queue_info[queue].txq_tid;
-
- /* Make sure queue info is correct even though we overwrite it */
- WARN(mvm->queue_info[queue].hw_queue_refcount ||
- mvm->queue_info[queue].tid_bitmap ||
- mvm->hw_queue_to_mac80211[queue],
- "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
- queue, mvm->queue_info[queue].hw_queue_refcount,
- mvm->hw_queue_to_mac80211[queue],
- mvm->queue_info[queue].tid_bitmap);
-
- /* If we are here - the queue is freed and we can zero out these vals */
- mvm->queue_info[queue].hw_queue_refcount = 0;
- mvm->queue_info[queue].tid_bitmap = 0;
- mvm->hw_queue_to_mac80211[queue] = 0;
-
- /* Regardless if this is a reserved TXQ for a STA - mark it as false */
- mvm->queue_info[queue].reserved = false;
-
- spin_unlock_bh(&mvm->queue_info_lock);
-
- iwl_trans_txq_disable(mvm->trans, queue, false);
- ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
- sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
-
- if (ret)
- IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
- queue, ret);
- return ret;
-}
-
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
- * @init: This command is sent as part of station initialization right
- * after station has been added.
+ * @sync: This command can be sent synchronously.
*
* The link quality command is sent as the last step of station creation.
* This is the special case in which init is set and we call a callback in
* this case to clear the state indicating that station creation is in
* progress.
*/
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
{
struct iwl_host_cmd cmd = {
.id = LQ_CMD,
.len = { sizeof(struct iwl_lq_cmd), },
- .flags = init ? 0 : CMD_ASYNC,
+ .flags = sync ? 0 : CMD_ASYNC,
.data = { lq, },
};
@@ -1249,14 +985,12 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_MLME);
+ if (!trig)
goto out;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
trig_mlme = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- goto out;
if (trig_mlme->stop_connection_loss &&
--trig_mlme->stop_connection_loss)
@@ -1268,171 +1002,6 @@ out:
ieee80211_connection_loss(vif);
}
-/*
- * Remove inactive TIDs of a given queue.
- * If all queue TIDs are inactive - mark the queue as inactive
- * If only some the queue TIDs are inactive - unmap them from the queue
- */
-static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvmsta, int queue,
- unsigned long tid_bitmap)
-{
- int tid;
-
- lockdep_assert_held(&mvmsta->lock);
- lockdep_assert_held(&mvm->queue_info_lock);
-
- if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
- return;
-
- /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- /* If some TFDs are still queued - don't mark TID as inactive */
- if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
- tid_bitmap &= ~BIT(tid);
-
- /* Don't mark as inactive any TID that has an active BA */
- if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
- tid_bitmap &= ~BIT(tid);
- }
-
- /* If all TIDs in the queue are inactive - mark queue as inactive. */
- if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
-
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
- mvmsta->tid_data[tid].is_tid_active = false;
-
- IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
- queue);
- return;
- }
-
- /*
- * If we are here, this is a shared queue and not all TIDs timed-out.
- * Remove the ones that did.
- */
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
-
- mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
- mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
- mvm->queue_info[queue].hw_queue_refcount--;
- mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
- mvmsta->tid_data[tid].is_tid_active = false;
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "Removing inactive TID %d from shared Q:%d\n",
- tid, queue);
- }
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "TXQ #%d left with tid bitmap 0x%x\n", queue,
- mvm->queue_info[queue].tid_bitmap);
-
- /*
- * There may be different TIDs with the same mac queues, so make
- * sure all TIDs have existing corresponding mac queues enabled
- */
- tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
- mvm->hw_queue_to_mac80211[queue] |=
- BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
- }
-
- /* If the queue is marked as shared - "unshare" it */
- if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
- mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
- IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
- queue);
- }
-}
-
-void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
-{
- unsigned long timeout_queues_map = 0;
- unsigned long now = jiffies;
- int i;
-
- if (iwl_mvm_has_new_tx_api(mvm))
- return;
-
- spin_lock_bh(&mvm->queue_info_lock);
- for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
- if (mvm->queue_info[i].hw_queue_refcount > 0)
- timeout_queues_map |= BIT(i);
- spin_unlock_bh(&mvm->queue_info_lock);
-
- rcu_read_lock();
-
- /*
- * If a queue time outs - mark it as INACTIVE (don't remove right away
- * if we don't have to.) This is an optimization in case traffic comes
- * later, and we don't HAVE to use a currently-inactive queue
- */
- for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
- u8 sta_id;
- int tid;
- unsigned long inactive_tid_bitmap = 0;
- unsigned long queue_tid_bitmap;
-
- spin_lock_bh(&mvm->queue_info_lock);
- queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
-
- /* If TXQ isn't in active use anyway - nothing to do here... */
- if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
- mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
- spin_unlock_bh(&mvm->queue_info_lock);
- continue;
- }
-
- /* Check to see if there are inactive TIDs on this queue */
- for_each_set_bit(tid, &queue_tid_bitmap,
- IWL_MAX_TID_COUNT + 1) {
- if (time_after(mvm->queue_info[i].last_frame_time[tid] +
- IWL_MVM_DQA_QUEUE_TIMEOUT, now))
- continue;
-
- inactive_tid_bitmap |= BIT(tid);
- }
- spin_unlock_bh(&mvm->queue_info_lock);
-
- /* If all TIDs are active - finish check on this queue */
- if (!inactive_tid_bitmap)
- continue;
-
- /*
- * If we are here - the queue hadn't been served recently and is
- * in use
- */
-
- sta_id = mvm->queue_info[i].ra_sta_id;
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-
- /*
- * If the STA doesn't exist anymore, it isn't an error. It could
- * be that it was removed since getting the queues, and in this
- * case it should've inactivated its queues anyway.
- */
- if (IS_ERR_OR_NULL(sta))
- continue;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- spin_lock_bh(&mvmsta->lock);
- spin_lock(&mvm->queue_info_lock);
- iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
- inactive_tid_bitmap);
- spin_unlock(&mvm->queue_info_lock);
- spin_unlock_bh(&mvmsta->lock);
- }
-
- rcu_read_unlock();
-}
-
void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
const struct ieee80211_sta *sta,
@@ -1441,14 +1010,12 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_BA);
+ if (!trig)
return;
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- return;
if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 2146fda8da2f..05ed4fb88e0c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -96,9 +96,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
/* Configure debug, for integration */
iwl_pcie_alloc_fw_monitor(trans, 0);
prph_sc_ctrl->hwm_cfg.hwm_base_addr =
- cpu_to_le64(trans_pcie->fw_mon_phys);
+ cpu_to_le64(trans->fw_mon[0].physical);
prph_sc_ctrl->hwm_cfg.hwm_size =
- cpu_to_le32(trans_pcie->fw_mon_size);
+ cpu_to_le32(trans->fw_mon[0].size);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index b2cd7ef5fc3a..6f45a0303ddd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -162,7 +162,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_context_info *ctxt_info;
struct iwl_context_info_rbd_cfg *rx_cfg;
- u32 control_flags = 0;
+ u32 control_flags = 0, rb_size;
int ret;
ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
@@ -177,11 +177,29 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* size is in DWs */
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
+ switch (trans_pcie->rx_buf_size) {
+ case IWL_AMSDU_2K:
+ rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
+ break;
+ case IWL_AMSDU_4K:
+ rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
+ break;
+ case IWL_AMSDU_8K:
+ rb_size = IWL_CTXT_INFO_RB_SIZE_8K;
+ break;
+ case IWL_AMSDU_12K:
+ rb_size = IWL_CTXT_INFO_RB_SIZE_12K;
+ break;
+ default:
+ WARN_ON(1);
+ rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
+ }
+
BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
- control_flags = IWL_CTXT_INFO_RB_SIZE_4K |
- IWL_CTXT_INFO_TFD_FORMAT_LONG |
- RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
- IWL_CTXT_INFO_RB_CB_SIZE_POS;
+ control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG |
+ (RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
+ IWL_CTXT_INFO_RB_CB_SIZE_POS) |
+ (rb_size << IWL_CTXT_INFO_RB_SIZE_POS);
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
/* initialize RX default queue */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index b150da4c6721..9e015212c2c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -646,34 +641,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+ {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index b63d44b7cd7c..f9c4c64dee66 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -17,9 +17,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program.
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
@@ -102,66 +99,6 @@ struct isr_statistics {
u32 unhandled;
};
-#define IWL_CD_STTS_OPTIMIZED_POS 0
-#define IWL_CD_STTS_OPTIMIZED_MSK 0x01
-#define IWL_CD_STTS_TRANSFER_STATUS_POS 1
-#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E
-#define IWL_CD_STTS_WIFI_STATUS_POS 4
-#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
-
-/**
- * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
- * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
- * In sniffer mode, when split is used, set in last CD completion. (RX)
- * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
- * all CD completion. (RX)
- * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
- */
-enum iwl_completion_desc_transfer_status {
- IWL_CD_STTS_UNUSED,
- IWL_CD_STTS_UNUSED_2,
- IWL_CD_STTS_END_TRANSFER,
- IWL_CD_STTS_OVERFLOW,
- IWL_CD_STTS_ABORTED,
- IWL_CD_STTS_ERROR,
-};
-
-/**
- * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
- * @IWL_CD_STTS_VALID: the packet is valid (RX)
- * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
- * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
- * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
- * @IWL_CD_STTS_DUP: duplicate packet (RX)
- * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
- * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
- * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
- * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
- * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
- * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
- * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
- * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
- * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
- * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
- */
-enum iwl_completion_desc_wifi_status {
- IWL_CD_STTS_VALID,
- IWL_CD_STTS_FCS_ERR,
- IWL_CD_STTS_SEC_KEY_ERR,
- IWL_CD_STTS_DECRYPTION_ERR,
- IWL_CD_STTS_DUP,
- IWL_CD_STTS_ICV_MIC_ERR,
- IWL_CD_STTS_INTERNAL_SNAP_ERR,
- IWL_CD_STTS_SEC_PORT_FAIL,
- IWL_CD_STTS_BA_OLD_SN,
- IWL_CD_STTS_QOS_NULL,
- IWL_CD_STTS_MAC_HDR_ERR,
- IWL_CD_STTS_MAX_RETRANS,
- IWL_CD_STTS_EX_LIFETIME,
- IWL_CD_STTS_NOT_USED,
- IWL_CD_STTS_REPLAY_ERR,
-};
-
#define IWL_RX_TD_TYPE_MSK 0xff000000
#define IWL_RX_TD_SIZE_MSK 0x00ffffff
#define IWL_RX_TD_SIZE_2K BIT(11)
@@ -464,18 +401,6 @@ enum iwl_image_response_code {
};
/**
- * struct iwl_dram_data
- * @physical: page phy pointer
- * @block: pointer to the allocated block/page
- * @size: size of the block/page
- */
-struct iwl_dram_data {
- dma_addr_t physical;
- void *block;
- int size;
-};
-
-/**
* struct iwl_self_init_dram - dram data used by self init process
* @fw: lmac and umac dram data
* @fw_cnt: total number of items in array
@@ -516,6 +441,7 @@ struct iwl_self_init_dram {
* @ucode_write_complete: indicates that the ucode has been copied.
* @ucode_write_waitq: wait queue for uCode load
* @cmd_queue - command queue number
+ * @def_rx_queue - default rx queue number
* @rx_buf_size: Rx buffer size
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
@@ -525,9 +451,6 @@ struct iwl_self_init_dram {
* @reg_lock: protect hw register access
* @mutex: to protect stop_device / start_fw / start_hw
* @cmd_in_flight: true when we have a host command in flight
- * @fw_mon_phys: physical address of the buffer for the firmware monitor
- * @fw_mon_page: points to the first page of the buffer for the firmware monitor
- * @fw_mon_size: size of the buffer for the firmware monitor
* @msix_entries: array of MSI-X entries
* @msix_enabled: true if managed to enable MSI-X
* @shared_vec_mask: the type of causes the shared vector handles
@@ -539,7 +462,6 @@ struct iwl_self_init_dram {
* @fh_mask: current unmasked fh causes
* @hw_mask: current unmasked hw causes
* @in_rescan: true if we have triggered a device rescan
- * @scheduled_for_removal: true if we have scheduled a device removal
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -596,6 +518,7 @@ struct iwl_trans_pcie {
u8 page_offs, dev_cmd_offs;
u8 cmd_queue;
+ u8 def_rx_queue;
u8 cmd_fifo;
unsigned int cmd_q_wdg_timeout;
u8 n_no_reclaim_cmds;
@@ -615,10 +538,6 @@ struct iwl_trans_pcie {
bool cmd_hold_nic_awake;
bool ref_cmd_in_flight;
- dma_addr_t fw_mon_phys;
- struct page *fw_mon_page;
- u32 fw_mon_size;
-
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
bool msix_enabled;
u8 shared_vec_mask;
@@ -631,7 +550,6 @@ struct iwl_trans_pcie {
cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
u16 tx_cmd_queue_size;
bool in_rescan;
- bool scheduled_for_removal;
};
static inline struct iwl_trans_pcie *
@@ -673,6 +591,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
/*****************************************************
* RX
******************************************************/
+int _iwl_pcie_rx_init(struct iwl_trans *trans);
int iwl_pcie_rx_init(struct iwl_trans *trans);
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
@@ -686,6 +605,7 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct iwl_rxq *rxq);
+int iwl_pcie_rx_alloc(struct iwl_trans *trans);
/*****************************************************
* ICT - interrupt handling
@@ -700,7 +620,8 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
* TX / HCMD
******************************************************/
int iwl_pcie_tx_init(struct iwl_trans *trans);
-int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
+int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
+ int queue_size);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
@@ -717,11 +638,17 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
+void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
+void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_txq *txq);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb);
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs);
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
u8 idx)
@@ -1039,6 +966,7 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
}
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
+void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
@@ -1057,6 +985,7 @@ void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common functions that are used by gen2 transport */
+int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
void iwl_pcie_apm_config(struct iwl_trans *trans);
int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
@@ -1088,8 +1017,16 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
+ struct iwl_txq *txq);
+int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
+ struct iwl_txq **intxq, int size,
+ unsigned int timeout);
+int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_host_cmd *hcmd);
int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
- struct iwl_tx_queue_cfg_cmd *cmd,
+ __le16 flags, u8 sta_id, u8 tid,
int cmd_id, int size,
unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index d017aa2a0a8b..e965cc588850 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -17,9 +17,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program.
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
@@ -776,7 +773,7 @@ err:
return -ENOMEM;
}
-static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
@@ -1002,7 +999,7 @@ int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
-static int _iwl_pcie_rx_init(struct iwl_trans *trans)
+int _iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *def_rxq;
@@ -1107,6 +1104,9 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
{
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
/*
* We don't configure the RFH.
* Restock will be done at alive, after firmware configured the RFH.
@@ -1144,6 +1144,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
kfree(trans_pcie->rxq);
}
+static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
+ struct iwl_rb_allocator *rba)
+{
+ spin_lock(&rba->lock);
+ list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+}
+
/*
* iwl_pcie_rx_reuse_rbd - Recycle used RBDs
*
@@ -1175,9 +1183,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
/* Move the 2 RBDs to the allocator ownership.
Allocator has another 6 from pool for the request completion*/
- spin_lock(&rba->lock);
- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
- spin_unlock(&rba->lock);
+ iwl_pcie_rx_move_to_allocator(rxq, rba);
atomic_inc(&rba->req_pending);
queue_work(rba->alloc_wq, &rba->rx_alloc);
@@ -1187,7 +1193,8 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_rxq *rxq,
struct iwl_rx_mem_buffer *rxb,
- bool emergency)
+ bool emergency,
+ int i)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
@@ -1213,6 +1220,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
.truesize = max_len,
};
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ rxcb.status = rxq->cd[i].status;
+
pkt = rxb_addr(&rxcb);
if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
@@ -1267,7 +1277,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
index = SEQ_TO_INDEX(sequence);
cmd_index = iwl_pcie_get_cmd_index(txq, index);
- if (rxq->id == 0)
+ if (rxq->id == trans_pcie->def_rx_queue)
iwl_op_mode_rx(trans->op_mode, &rxq->napi,
&rxcb);
else
@@ -1396,17 +1406,25 @@ restart:
IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
while (i != r) {
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct iwl_rx_mem_buffer *rxb;
-
- if (unlikely(rxq->used_count == rxq->queue_size / 2))
+ /* number of RBDs still waiting for page allocation */
+ u32 rb_pending_alloc =
+ atomic_read(&trans_pcie->rba.req_pending) *
+ RX_CLAIM_REQ_ALLOC;
+
+ if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
+ !emergency)) {
+ iwl_pcie_rx_move_to_allocator(rxq, rba);
emergency = true;
+ }
rxb = iwl_pcie_get_rxb(trans, rxq, i);
if (!rxb)
goto out;
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
- iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
+ iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
i = (i + 1) & (rxq->queue_size - 1);
@@ -1421,17 +1439,13 @@ restart:
iwl_pcie_rx_allocator_get(trans, rxq);
if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
/* Add the remaining empty RBDs for allocator use */
- spin_lock(&rba->lock);
- list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
- spin_unlock(&rba->lock);
+ iwl_pcie_rx_move_to_allocator(rxq, rba);
} else if (emergency) {
count++;
if (count == 8) {
count = 0;
- if (rxq->used_count < rxq->queue_size / 3)
+ if (rb_pending_alloc < rxq->queue_size / 3)
emergency = false;
rxq->read = i;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 2bc67219ed3e..77f3610e5ca9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -55,13 +55,14 @@
#include "iwl-context-info.h"
#include "iwl-context-info-gen3.h"
#include "internal.h"
+#include "fw/dbg.h"
/*
* Start up NIC's basic functionality after it has been reset
* (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
* NOTE: This does not load uCode nor start the embedded processor
*/
-static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
+int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
{
int ret = 0;
@@ -164,9 +165,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = true;
/* Stop dbgc before stopping device */
- iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
- udelay(100);
- iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+ _iwl_fw_dbg_stop_recording(trans, NULL);
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
@@ -265,7 +264,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_pcie_gen2_tx_init(trans))
+ if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, TFD_CMD_SLOTS))
return -ENOMEM;
/* enable shadow regs in HW */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 7d319b6863fe..5bafb3f46eb8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -19,11 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
@@ -92,7 +87,7 @@
#define IWL_FW_MEM_EXTENDED_START 0x40000
#define IWL_FW_MEM_EXTENDED_END 0x57FFF
-static void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
+void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
{
#define PCI_DUMP_SIZE 64
#define PREFIX_LEN 32
@@ -190,72 +185,42 @@ static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (!trans_pcie->fw_mon_page)
- return;
+ int i;
- dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
- trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
- __free_pages(trans_pcie->fw_mon_page,
- get_order(trans_pcie->fw_mon_size));
- trans_pcie->fw_mon_page = NULL;
- trans_pcie->fw_mon_phys = 0;
- trans_pcie->fw_mon_size = 0;
+ for (i = 0; i < trans->num_blocks; i++) {
+ dma_free_coherent(trans->dev, trans->fw_mon[i].size,
+ trans->fw_mon[i].block,
+ trans->fw_mon[i].physical);
+ trans->fw_mon[i].block = NULL;
+ trans->fw_mon[i].physical = 0;
+ trans->fw_mon[i].size = 0;
+ trans->num_blocks--;
+ }
}
-void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
+static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
+ u8 max_power, u8 min_power)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct page *page = NULL;
- dma_addr_t phys;
+ void *cpu_addr = NULL;
+ dma_addr_t phys = 0;
u32 size = 0;
u8 power;
- if (!max_power) {
- /* default max_power is maximum */
- max_power = 26;
- } else {
- max_power += 11;
- }
-
- if (WARN(max_power > 26,
- "External buffer size for monitor is too big %d, check the FW TLV\n",
- max_power))
- return;
-
- if (trans_pcie->fw_mon_page) {
- dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
- trans_pcie->fw_mon_size,
- DMA_FROM_DEVICE);
- return;
- }
-
- phys = 0;
- for (power = max_power; power >= 11; power--) {
- int order;
-
+ for (power = max_power; power >= min_power; power--) {
size = BIT(power);
- order = get_order(size);
- page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
- order);
- if (!page)
+ cpu_addr = dma_alloc_coherent(trans->dev, size, &phys,
+ GFP_KERNEL | __GFP_NOWARN |
+ __GFP_ZERO | __GFP_COMP);
+ if (!cpu_addr)
continue;
- phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(trans->dev, phys)) {
- __free_pages(page, order);
- page = NULL;
- continue;
- }
IWL_INFO(trans,
- "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
- size, order);
+ "Allocated 0x%08x bytes for firmware monitor.\n",
+ size);
break;
}
- if (WARN_ON_ONCE(!page))
+ if (WARN_ON_ONCE(!cpu_addr))
return;
if (power != max_power)
@@ -264,9 +229,34 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
(unsigned long)BIT(power - 10),
(unsigned long)BIT(max_power - 10));
- trans_pcie->fw_mon_page = page;
- trans_pcie->fw_mon_phys = phys;
- trans_pcie->fw_mon_size = size;
+ trans->fw_mon[trans->num_blocks].block = cpu_addr;
+ trans->fw_mon[trans->num_blocks].physical = phys;
+ trans->fw_mon[trans->num_blocks].size = size;
+ trans->num_blocks++;
+}
+
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
+{
+ if (!max_power) {
+ /* default max_power is maximum */
+ max_power = 26;
+ } else {
+ max_power += 11;
+ }
+
+ if (WARN(max_power > 26,
+ "External buffer size for monitor is too big %d, check the FW TLV\n",
+ max_power))
+ return;
+
+ /*
+ * This function allocats the default fw monitor.
+ * The optional additional ones will be allocated in runtime
+ */
+ if (trans->num_blocks)
+ return;
+
+ iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
}
static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
@@ -930,7 +920,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
int i;
@@ -942,7 +931,7 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
else
IWL_WARN(trans, "PCI should have external buffer debug\n");
- for (i = 0; i < trans->dbg_dest_reg_num; i++) {
+ for (i = 0; i < trans->dbg_n_dest_reg; i++) {
u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
u32 val = le32_to_cpu(dest->reg_ops[i].val);
@@ -981,18 +970,18 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
}
monitor:
- if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
+ if (dest->monitor_mode == EXTERNAL_MODE && trans->fw_mon[0].size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
- trans_pcie->fw_mon_phys >> dest->base_shift);
+ trans->fw_mon[0].physical >> dest->base_shift);
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans_pcie->fw_mon_phys +
- trans_pcie->fw_mon_size - 256) >>
+ (trans->fw_mon[0].physical +
+ trans->fw_mon[0].size - 256) >>
dest->end_shift);
else
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans_pcie->fw_mon_phys +
- trans_pcie->fw_mon_size) >>
+ (trans->fw_mon[0].physical +
+ trans->fw_mon[0].size) >>
dest->end_shift);
}
}
@@ -1000,7 +989,6 @@ monitor:
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret = 0;
int first_ucode_section;
@@ -1030,12 +1018,12 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
iwl_pcie_alloc_fw_monitor(trans, 0);
- if (trans_pcie->fw_mon_size) {
+ if (trans->fw_mon[0].size) {
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
- trans_pcie->fw_mon_phys >> 4);
+ trans->fw_mon[0].physical >> 4);
iwl_write_prph(trans, MON_BUFF_END_ADDR,
- (trans_pcie->fw_mon_phys +
- trans_pcie->fw_mon_size) >> 4);
+ (trans->fw_mon[0].physical +
+ trans->fw_mon[0].size) >> 4);
}
} else if (trans->dbg_dest_tlv) {
iwl_pcie_apply_destination(trans);
@@ -1262,13 +1250,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = true;
/* Stop dbgc before stopping device */
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
- } else {
- iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
- udelay(100);
- iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
- }
+ _iwl_fw_dbg_stop_recording(trans, NULL);
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
@@ -1830,18 +1812,30 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
+static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
+{
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ return 0x00FFFFFF;
+ else
+ return 0x000FFFFF;
+}
+
static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
{
+ u32 mask = iwl_trans_pcie_prph_msk(trans);
+
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
- ((reg & 0x000FFFFF) | (3 << 24)));
+ ((reg & mask) | (3 << 24)));
return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
}
static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
u32 val)
{
+ u32 mask = iwl_trans_pcie_prph_msk(trans);
+
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
- ((addr & 0x000FFFFF) | (3 << 24)));
+ ((addr & mask) | (3 << 24)));
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
@@ -2013,7 +2007,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
struct iwl_trans_pcie_removal *removal;
- if (trans_pcie->scheduled_for_removal)
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
goto err;
IWL_ERR(trans, "Device gone - scheduling removal!\n");
@@ -2039,7 +2033,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
* we don't need to clear this flag, because
* the trans will be freed and reallocated.
*/
- trans_pcie->scheduled_for_removal = true;
+ set_bit(STATUS_TRANS_DEAD, &trans->status);
removal->pdev = to_pci_dev(trans->dev);
INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
@@ -2266,6 +2260,10 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
unsigned long now = jiffies;
u8 wr_ptr;
+ /* Make sure the NIC is still alive in the bus */
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return -ENODEV;
+
if (!test_bit(txq_idx, trans_pcie->queue_used))
return -EINVAL;
@@ -2861,10 +2859,9 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
struct iwl_fw_error_dump_data **data,
u32 monitor_len)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 len = 0;
- if ((trans_pcie->fw_mon_page &&
+ if ((trans->num_blocks &&
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
trans->dbg_dest_tlv) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
@@ -2892,22 +2889,12 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
cpu_to_le32(iwl_read_prph(trans, base));
len += sizeof(**data) + sizeof(*fw_mon_data);
- if (trans_pcie->fw_mon_page) {
- /*
- * The firmware is now asserted, it won't write anything
- * to the buffer. CPU can take ownership to fetch the
- * data. The buffer will be handed back to the device
- * before the firmware will be restarted.
- */
- dma_sync_single_for_cpu(trans->dev,
- trans_pcie->fw_mon_phys,
- trans_pcie->fw_mon_size,
- DMA_FROM_DEVICE);
+ if (trans->num_blocks) {
memcpy(fw_mon_data->data,
- page_address(trans_pcie->fw_mon_page),
- trans_pcie->fw_mon_size);
+ trans->fw_mon[0].block,
+ trans->fw_mon[0].size);
- monitor_len = trans_pcie->fw_mon_size;
+ monitor_len = trans->fw_mon[0].size;
} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
/*
* Update pointers to reflect actual values after
@@ -2943,36 +2930,15 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
return len;
}
-static struct iwl_trans_dump_data
-*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
- const struct iwl_fw_dbg_trigger_tlv *trigger)
+static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
- struct iwl_fw_error_dump_txcmd *txcmd;
- struct iwl_trans_dump_data *dump_data;
- u32 len, num_rbs = 0;
- u32 monitor_len;
- int i, ptr;
- bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
- !trans->cfg->mq_rx_supported &&
- trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
-
- /* transport dump header */
- len = sizeof(*dump_data);
-
- /* host commands */
- len += sizeof(*data) +
- cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
-
- /* FW monitor */
- if (trans_pcie->fw_mon_page) {
- len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
- trans_pcie->fw_mon_size;
- monitor_len = trans_pcie->fw_mon_size;
+ if (trans->num_blocks) {
+ *len += sizeof(struct iwl_fw_error_dump_data) +
+ sizeof(struct iwl_fw_error_dump_fw_mon) +
+ trans->fw_mon[0].size;
+ return trans->fw_mon[0].size;
} else if (trans->dbg_dest_tlv) {
- u32 base, end, cfg_reg;
+ u32 base, end, cfg_reg, monitor_len;
if (trans->dbg_dest_tlv->version == 1) {
cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
@@ -3002,11 +2968,39 @@ static struct iwl_trans_dump_data
end += (1 << trans->dbg_dest_tlv->end_shift);
monitor_len = end - base;
}
- len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
- monitor_len;
- } else {
- monitor_len = 0;
+ *len += sizeof(struct iwl_fw_error_dump_data) +
+ sizeof(struct iwl_fw_error_dump_fw_mon) +
+ monitor_len;
+ return monitor_len;
}
+ return 0;
+}
+
+static struct iwl_trans_dump_data
+*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+ const struct iwl_fw_dbg_trigger_tlv *trigger)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_fw_error_dump_data *data;
+ struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_fw_error_dump_txcmd *txcmd;
+ struct iwl_trans_dump_data *dump_data;
+ u32 len, num_rbs = 0;
+ u32 monitor_len;
+ int i, ptr;
+ bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
+ !trans->cfg->mq_rx_supported &&
+ trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
+
+ /* transport dump header */
+ len = sizeof(*dump_data);
+
+ /* host commands */
+ len += sizeof(*data) +
+ cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
+
+ /* FW monitor */
+ monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
if (!(trans->dbg_dump_mask &
@@ -3175,7 +3169,6 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.ref = iwl_trans_pcie_ref, \
.unref = iwl_trans_pcie_unref, \
.dump_data = iwl_trans_pcie_dump_data, \
- .dump_regs = iwl_trans_pcie_dump_regs, \
.d3_suspend = iwl_trans_pcie_d3_suspend, \
.d3_resume = iwl_trans_pcie_d3_resume
@@ -3277,6 +3270,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
PCIE_LINK_STATE_CLKPM);
}
+ trans_pcie->def_rx_queue = 0;
+
if (cfg->use_tfh) {
addr_size = 64;
trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
@@ -3327,6 +3322,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
iwl_disable_interrupts(trans);
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
+ if (trans->hw_rev == 0xffffffff) {
+ dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
+ ret = -EIO;
+ goto out_no_pci;
+ }
+
/*
* In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
* changed, and now the revision step also includes bit 0-1 (no more
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index b99f33ff9123..e880f69eac26 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -87,9 +87,9 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
*/
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
+void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
@@ -127,8 +127,8 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
/*
* iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
*/
-static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
- struct iwl_txq *txq)
+void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_txq *txq)
{
lockdep_assert_held(&txq->lock);
@@ -330,7 +330,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@@ -347,8 +347,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
- tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
+ tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
@@ -416,6 +416,40 @@ out_err:
return NULL;
}
+static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ struct iwl_cmd_meta *out_meta)
+{
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t tb_phys;
+ int tb_idx;
+
+ if (!skb_frag_size(frag))
+ continue;
+
+ tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ return -ENOMEM;
+ tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
+ skb_frag_size(frag));
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb,
+ skb_frag_address(frag),
+ skb_frag_size(frag));
+ if (tb_idx < 0)
+ return tb_idx;
+
+ out_meta->tbs |= BIT(tb_idx);
+ }
+
+ return 0;
+}
+
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct iwl_txq *txq,
@@ -423,12 +457,13 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
- int tx_cmd_len)
+ int tx_cmd_len,
+ bool pad)
{
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
dma_addr_t tb_phys;
- int i, len, tb1_len, tb2_len;
+ int len, tb1_len, tb2_len;
void *tb1_addr;
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
@@ -447,7 +482,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
IWL_FIRST_TB_SIZE;
- tb1_len = ALIGN(len, 4);
+ if (pad)
+ tb1_len = ALIGN(len, 4);
+ else
+ tb1_len = len;
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
@@ -455,6 +493,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
/* set up TFD's third entry to point to remainder of skb's head */
tb2_len = skb_headlen(skb) - hdr_len;
@@ -465,30 +505,13 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb,
+ skb->data + hdr_len,
+ tb2_len);
}
- /* set up the remaining entries to point to the data */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- int tb_idx;
-
- if (!skb_frag_size(frag))
- continue;
-
- tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
- skb_frag_size(frag));
-
- out_meta->tbs |= BIT(tb_idx);
- }
-
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
- trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
+ if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
+ goto out_err;
return tfd;
@@ -526,12 +549,17 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
hdr_len = ieee80211_hdrlen(hdr->frame_control);
- if (amsdu)
+ /*
+ * Only build A-MSDUs here if doing so by GSO, otherwise it may be
+ * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
+ * built in the higher layers already.
+ */
+ if (amsdu && skb_shinfo(skb)->gso_size)
return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
out_meta, hdr_len, len);
return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
- hdr_len, len);
+ hdr_len, len, !amsdu);
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -1065,8 +1093,8 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_wake_queue(trans, txq);
}
-static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
- struct iwl_txq *txq)
+void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
+ struct iwl_txq *txq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct device *dev = trans->dev;
@@ -1120,23 +1148,13 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
clear_bit(txq_id, trans_pcie->queue_used);
}
-int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
- struct iwl_tx_queue_cfg_cmd *cmd,
- int cmd_id, int size,
- unsigned int timeout)
+int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
+ struct iwl_txq **intxq, int size,
+ unsigned int timeout)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue_cfg_rsp *rsp;
- struct iwl_txq *txq;
- struct iwl_host_cmd hcmd = {
- .id = cmd_id,
- .len = { sizeof(*cmd) },
- .data = { cmd, },
- .flags = CMD_WANT_SKB,
- };
- int ret, qid;
- u32 wr_ptr;
+ int ret;
+ struct iwl_txq *txq;
txq = kzalloc(sizeof(*txq), GFP_KERNEL);
if (!txq)
return -ENOMEM;
@@ -1164,20 +1182,30 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
txq->wd_timeout = msecs_to_jiffies(timeout);
- cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
- cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ *intxq = txq;
+ return 0;
- ret = iwl_trans_send_cmd(trans, &hcmd);
- if (ret)
- goto error;
+error:
+ iwl_pcie_gen2_txq_free_memory(trans, txq);
+ return ret;
+}
+
+int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_host_cmd *hcmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue_cfg_rsp *rsp;
+ int ret, qid;
+ u32 wr_ptr;
- if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
+ if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
+ sizeof(*rsp))) {
ret = -EINVAL;
goto error_free_resp;
}
- rsp = (void *)hcmd.resp_pkt->data;
+ rsp = (void *)hcmd->resp_pkt->data;
qid = le16_to_cpu(rsp->queue_number);
wr_ptr = le16_to_cpu(rsp->write_pointer);
@@ -1204,11 +1232,48 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
(txq->write_ptr) | (qid << 16));
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
- iwl_free_resp(&hcmd);
+ iwl_free_resp(hcmd);
return qid;
error_free_resp:
- iwl_free_resp(&hcmd);
+ iwl_free_resp(hcmd);
+ iwl_pcie_gen2_txq_free_memory(trans, txq);
+ return ret;
+}
+
+int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
+ __le16 flags, u8 sta_id, u8 tid,
+ int cmd_id, int size,
+ unsigned int timeout)
+{
+ struct iwl_txq *txq = NULL;
+ struct iwl_tx_queue_cfg_cmd cmd = {
+ .flags = flags,
+ .sta_id = sta_id,
+ .tid = tid,
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = cmd_id,
+ .len = { sizeof(cmd) },
+ .data = { &cmd, },
+ .flags = CMD_WANT_SKB,
+ };
+ int ret;
+
+ ret = iwl_trans_pcie_dyn_txq_alloc_dma(trans, &txq, size, timeout);
+ if (ret)
+ return ret;
+
+ cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
+ cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret)
+ goto error;
+
+ return iwl_trans_pcie_txq_alloc_response(trans, txq, &hcmd);
+
error:
iwl_pcie_gen2_txq_free_memory(trans, txq);
return ret;
@@ -1251,30 +1316,31 @@ void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
}
}
-int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
+int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *cmd_queue;
- int txq_id = trans_pcie->cmd_queue, ret;
+ struct iwl_txq *queue;
+ int ret;
- /* alloc and init the command queue */
+ /* alloc and init the tx queue */
if (!trans_pcie->txq[txq_id]) {
- cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
- if (!cmd_queue) {
- IWL_ERR(trans, "Not enough memory for command queue\n");
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue) {
+ IWL_ERR(trans, "Not enough memory for tx queue\n");
return -ENOMEM;
}
- trans_pcie->txq[txq_id] = cmd_queue;
- ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
+ trans_pcie->txq[txq_id] = queue;
+ ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
}
} else {
- cmd_queue = trans_pcie->txq[txq_id];
+ queue = trans_pcie->txq[txq_id];
}
- ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
+ ret = iwl_pcie_txq_init(trans, queue, queue_size,
+ (txq_id == trans_pcie->cmd_queue));
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 93f0d387688a..87b7225fe289 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -17,10 +17,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
@@ -1101,7 +1097,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (!iwl_queue_used(txq, last_to_free)) {
IWL_ERR(trans,
- "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+ "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free,
trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
@@ -1188,6 +1184,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
lockdep_assert_held(&trans_pcie->reg_lock);
+ /* Make sure the NIC is still alive in the bus */
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return -ENODEV;
+
if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
!trans_pcie->ref_cmd_in_flight) {
trans_pcie->ref_cmd_in_flight = true;
@@ -1230,7 +1230,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
* need to be reclaimed. As result, some free space forms. If there is
* enough free space (> low mark), wake the stack that feeds us.
*/
-static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
@@ -1912,7 +1912,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
}
if (test_bit(STATUS_FW_ERROR, &trans->status)) {
- iwl_trans_dump_regs(trans);
+ iwl_trans_pcie_dump_regs(trans);
IWL_ERR(trans, "FW error in SYNC CMD %s\n",
iwl_get_cmd_string(trans, cmd->id));
dump_stack();
@@ -1957,6 +1957,10 @@ cancel:
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
+ /* Make sure the NIC is still alive in the bus */
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return -ENODEV;
+
if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
@@ -1973,29 +1977,27 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
- struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_cmd_meta *out_meta)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 tb2_len;
+ u16 head_tb_len;
int i;
/*
* Set up TFD's third entry to point directly to remainder
* of skb's head, if any
*/
- tb2_len = skb_headlen(skb) - hdr_len;
+ head_tb_len = skb_headlen(skb) - hdr_len;
- if (tb2_len > 0) {
- dma_addr_t tb2_phys = dma_map_single(trans->dev,
- skb->data + hdr_len,
- tb2_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
- iwl_pcie_tfd_unmap(trans, out_meta, txq,
- txq->write_ptr);
+ if (head_tb_len > 0) {
+ dma_addr_t tb_phys = dma_map_single(trans->dev,
+ skb->data + hdr_len,
+ head_tb_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
- }
- iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb,
+ skb->data + hdr_len,
+ head_tb_len);
+ iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
}
/* set up the remaining entries to point to the data */
@@ -2010,23 +2012,19 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
- iwl_pcie_tfd_unmap(trans, out_meta, txq,
- txq->write_ptr);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
- }
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb,
+ skb_frag_address(frag),
+ skb_frag_size(frag));
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
+ if (tb_idx < 0)
+ return tb_idx;
out_meta->tbs |= BIT(tb_idx);
}
- trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
- trans_pcie->tfd_size,
- &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
- hdr_len);
- trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
return 0;
}
@@ -2087,7 +2085,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
struct page **page_ptr;
- int ret;
struct tso_t tso;
/* if the packet is protected, then it must be CCMP or GCMP */
@@ -2173,10 +2170,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
if (trans_pcie->sw_csum_tx) {
csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
GFP_ATOMIC);
- if (!csum_skb) {
- ret = -ENOMEM;
- goto out_unmap;
- }
+ if (!csum_skb)
+ return -ENOMEM;
iwl_compute_pseudo_hdr_csum(iph, tcph,
skb->protocol ==
@@ -2197,13 +2192,12 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
hdr_tb_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
dev_kfree_skb(csum_skb);
- ret = -EINVAL;
- goto out_unmap;
+ return -EINVAL;
}
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
- hdr_tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@@ -2223,14 +2217,13 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
size, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
dev_kfree_skb(csum_skb);
- ret = -EINVAL;
- goto out_unmap;
+ return -EINVAL;
}
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
size, false);
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
- size);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
+ size);
data_left -= size;
tso_build_data(skb, &tso, size);
@@ -2258,10 +2251,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
skb_push(skb, hdr_len + iv_len);
return 0;
-
-out_unmap:
- iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
- return ret;
}
#else /* CONFIG_INET */
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
@@ -2415,6 +2404,13 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
+ trace_iwlwifi_dev_tx(trans->dev, skb,
+ iwl_pcie_get_tfd(trans, txq,
+ txq->write_ptr),
+ trans_pcie->tfd_size,
+ &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
+ hdr_len);
+
/*
* If gso_size wasn't set, don't give the frame "amsdu treatment"
* (adding subframes, etc.).
@@ -2426,9 +2422,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
out_meta, dev_cmd,
tb1_len)))
goto out_err;
- } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
- out_meta, dev_cmd, tb1_len))) {
- goto out_err;
+ } else {
+ struct sk_buff *frag;
+
+ if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
+ out_meta)))
+ goto out_err;
+
+ skb_walk_frags(skb, frag) {
+ if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
+ out_meta)))
+ goto out_err;
+ }
}
/* building the A-MSDU might have changed this data, so memcpy it now */
@@ -2473,6 +2478,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_unlock(&txq->lock);
return 0;
out_err:
+ iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
spin_unlock(&txq->lock);
return -1;
}
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 94ad6fe29e69..21bb68457cfe 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -73,10 +73,6 @@
#define URB_ASYNC_UNLINK 0
#endif
-/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
-static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
-#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
-
struct header_struct {
/* 802.3 */
u8 dest[ETH_ALEN];
@@ -915,7 +911,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
default:
err("%s: Unexpected context state %d", __func__,
state);
- /* fall though */
+ /* fall through */
case EZUSB_CTX_REQ_TIMEOUT:
case EZUSB_CTX_REQ_FAILED:
case EZUSB_CTX_RESP_TIMEOUT:
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 3a4214d362ff..790784568ad2 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -121,8 +121,8 @@ static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb)
}
if (unlikely(!target_skb)) {
if (priv->rx_end - last_addr >= len) {
- target_skb = priv->tx_queue.prev;
- if (!skb_queue_empty(&priv->tx_queue)) {
+ target_skb = skb_peek_tail(&priv->tx_queue);
+ if (target_skb) {
info = IEEE80211_SKB_CB(target_skb);
range = (void *)info->rate_driver_data;
target_addr = range->end_addr;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 1068757ec42e..aa8058264d5b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3,6 +3,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -494,7 +495,6 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
static spinlock_t hwsim_radio_lock;
static LIST_HEAD(hwsim_radios);
-static struct workqueue_struct *hwsim_wq;
static struct rhashtable hwsim_radios_rht;
static int hwsim_radio_idx;
static int hwsim_radios_generation = 1;
@@ -520,7 +520,6 @@ struct mac80211_hwsim_data {
int channels, idx;
bool use_chanctx;
bool destroy_on_close;
- struct work_struct destroy_work;
u32 portid;
char alpha2[2];
const struct ieee80211_regdomain *regd;
@@ -2529,23 +2528,20 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz = {
IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
- IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[2] =
IEEE80211_HE_MAC_CAP2_BSR |
IEEE80211_HE_MAC_CAP2_MU_CASCADING |
IEEE80211_HE_MAC_CAP2_ACK_EN,
.mac_cap_info[3] =
- IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
.mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
- .phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_DUAL_BAND,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
- IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
.phy_cap_info[2] =
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
@@ -2579,18 +2575,16 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz = {
IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
- IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[2] =
IEEE80211_HE_MAC_CAP2_BSR |
IEEE80211_HE_MAC_CAP2_MU_CASCADING |
IEEE80211_HE_MAC_CAP2_ACK_EN,
.mac_cap_info[3] =
- IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
.mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
.phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_DUAL_BAND |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
@@ -2598,7 +2592,7 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz = {
IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
- IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
.phy_cap_info[2] =
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
@@ -2935,8 +2929,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hwsim_radios_generation++;
spin_unlock_bh(&hwsim_radio_lock);
- if (idx > 0)
- hwsim_mcast_new_radio(idx, info, param);
+ hwsim_mcast_new_radio(idx, info, param);
return idx;
@@ -3565,30 +3558,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
-static void destroy_radio(struct work_struct *work)
-{
- struct mac80211_hwsim_data *data =
- container_of(work, struct mac80211_hwsim_data, destroy_work);
-
- hwsim_radios_generation++;
- mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
-}
-
static void remove_user_radios(u32 portid)
{
struct mac80211_hwsim_data *entry, *tmp;
+ LIST_HEAD(list);
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
if (entry->destroy_on_close && entry->portid == portid) {
- list_del(&entry->list);
+ list_move(&entry->list, &list);
rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
hwsim_rht_params);
- INIT_WORK(&entry->destroy_work, destroy_radio);
- queue_work(hwsim_wq, &entry->destroy_work);
+ hwsim_radios_generation++;
}
}
spin_unlock_bh(&hwsim_radio_lock);
+
+ list_for_each_entry_safe(entry, tmp, &list, list) {
+ list_del(&entry->list);
+ mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy),
+ NULL);
+ }
}
static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
@@ -3646,6 +3636,7 @@ static __net_init int hwsim_init_net(struct net *net)
static void __net_exit hwsim_exit_net(struct net *net)
{
struct mac80211_hwsim_data *data, *tmp;
+ LIST_HEAD(list);
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
@@ -3656,17 +3647,19 @@ static void __net_exit hwsim_exit_net(struct net *net)
if (data->netgroup == hwsim_net_get_netgroup(&init_net))
continue;
- list_del(&data->list);
+ list_move(&data->list, &list);
rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
hwsim_rht_params);
hwsim_radios_generation++;
- spin_unlock_bh(&hwsim_radio_lock);
+ }
+ spin_unlock_bh(&hwsim_radio_lock);
+
+ list_for_each_entry_safe(data, tmp, &list, list) {
+ list_del(&data->list);
mac80211_hwsim_del_radio(data,
wiphy_name(data->hw->wiphy),
NULL);
- spin_lock_bh(&hwsim_radio_lock);
}
- spin_unlock_bh(&hwsim_radio_lock);
ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
}
@@ -3698,13 +3691,9 @@ static int __init init_mac80211_hwsim(void)
spin_lock_init(&hwsim_radio_lock);
- hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);
- if (!hwsim_wq)
- return -ENOMEM;
-
err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
if (err)
- goto out_free_wq;
+ return err;
err = register_pernet_device(&hwsim_net_ops);
if (err)
@@ -3835,8 +3824,6 @@ out_unregister_pernet:
unregister_pernet_device(&hwsim_net_ops);
out_free_rht:
rhashtable_destroy(&hwsim_radios_rht);
-out_free_wq:
- destroy_workqueue(hwsim_wq);
return err;
}
module_init(init_mac80211_hwsim);
@@ -3848,12 +3835,10 @@ static void __exit exit_mac80211_hwsim(void)
hwsim_exit_netlink();
mac80211_hwsim_free();
- flush_workqueue(hwsim_wq);
rhashtable_destroy(&hwsim_radios_rht);
unregister_netdev(hwsim_mon);
platform_driver_unregister(&mac80211_hwsim_driver);
unregister_pernet_device(&hwsim_net_ops);
- destroy_workqueue(hwsim_wq);
}
module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/marvell/libertas/if_cs.c b/drivers/net/wireless/marvell/libertas/if_cs.c
index 7d88223f890b..cebf03c6a622 100644
--- a/drivers/net/wireless/marvell/libertas/if_cs.c
+++ b/drivers/net/wireless/marvell/libertas/if_cs.c
@@ -900,8 +900,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
/* Make this card known to the libertas driver */
priv = lbs_add_card(card, &p_dev->dev);
- if (!priv) {
- ret = -ENOMEM;
+ if (IS_ERR(priv)) {
+ ret = PTR_ERR(priv);
goto out2;
}
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 43743c26c071..8d98e7fdd27c 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1206,8 +1206,8 @@ static int if_sdio_probe(struct sdio_func *func,
priv = lbs_add_card(card, &func->dev);
- if (!priv) {
- ret = -ENOMEM;
+ if (IS_ERR(priv)) {
+ ret = PTR_ERR(priv);
goto free;
}
@@ -1317,6 +1317,10 @@ static int if_sdio_suspend(struct device *dev)
if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
dev_info(dev, "Suspend without wake params -- powering down card\n");
if (priv->fw_ready) {
+ ret = lbs_suspend(priv);
+ if (ret)
+ return ret;
+
priv->power_up_on_resume = true;
if_sdio_power_off(card);
}
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index e9aec6cb1105..504d6e096476 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1146,8 +1146,8 @@ static int if_spi_probe(struct spi_device *spi)
* This will call alloc_etherdev.
*/
priv = lbs_add_card(card, &spi->dev);
- if (!priv) {
- err = -ENOMEM;
+ if (IS_ERR(priv)) {
+ err = PTR_ERR(priv);
goto free_card;
}
card->priv = priv;
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index c67a8e7be310..220dcdee8d2b 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -254,8 +254,11 @@ static int if_usb_probe(struct usb_interface *intf,
goto dealloc;
}
- if (!(priv = lbs_add_card(cardp, &intf->dev)))
+ priv = lbs_add_card(cardp, &intf->dev);
+ if (IS_ERR(priv)) {
+ r = PTR_ERR(priv);
goto err_add_card;
+ }
cardp->priv = priv;
@@ -456,8 +459,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn,
cardp);
- cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
-
lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb);
if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) {
lbs_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret);
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index f22e1c220cba..f7db60bc7c7f 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -907,25 +907,29 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
struct net_device *dev;
struct wireless_dev *wdev;
struct lbs_private *priv = NULL;
+ int err;
/* Allocate an Ethernet device and register it */
wdev = lbs_cfg_alloc(dmdev);
if (IS_ERR(wdev)) {
+ err = PTR_ERR(wdev);
pr_err("cfg80211 init failed\n");
- goto done;
+ goto err_cfg;
}
wdev->iftype = NL80211_IFTYPE_STATION;
priv = wdev_priv(wdev);
priv->wdev = wdev;
- if (lbs_init_adapter(priv)) {
+ err = lbs_init_adapter(priv);
+ if (err) {
pr_err("failed to initialize adapter structure\n");
goto err_wdev;
}
dev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup);
if (!dev) {
+ err = -ENOMEM;
dev_err(dmdev, "no memory for network device instance\n");
goto err_adapter;
}
@@ -949,6 +953,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
init_waitqueue_head(&priv->waitq);
priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main");
if (IS_ERR(priv->main_thread)) {
+ err = PTR_ERR(priv->main_thread);
lbs_deb_thread("Error creating main thread.\n");
goto err_ndev;
}
@@ -961,7 +966,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
priv->wol_gap = 20;
priv->ehs_remove_supported = true;
- goto done;
+ return priv;
err_ndev:
free_netdev(dev);
@@ -972,10 +977,8 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
err_wdev:
lbs_cfg_free(priv);
- priv = NULL;
-
-done:
- return priv;
+ err_cfg:
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(lbs_add_card);
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index e92fc5001171..789337ea676a 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -605,9 +605,10 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
{
unsigned long flags;
- if (recvlength > LBS_CMD_BUFFER_SIZE) {
+ if (recvlength < MESSAGE_HEADER_LEN ||
+ recvlength > LBS_CMD_BUFFER_SIZE) {
lbtf_deb_usbd(&cardp->udev->dev,
- "The receive buffer is too large\n");
+ "The receive buffer is invalid: %d\n", recvlength);
kfree_skb(skb);
return;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 433c6a16870b..d445acc4786b 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -298,6 +298,19 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
struct mwifiex_adapter *adapter = ctx->adapter;
struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+ if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
+ if (card->rx_cmd_ep == ctx->ep) {
+ mwifiex_dbg(adapter, INFO, "%s: free rx_cmd skb\n",
+ __func__);
+ dev_kfree_skb_any(ctx->skb);
+ ctx->skb = NULL;
+ }
+ mwifiex_dbg(adapter, ERROR,
+ "%s: card removed/suspended, EP %d rx_cmd URB submit skipped\n",
+ __func__, ctx->ep);
+ return -1;
+ }
+
if (card->rx_cmd_ep != ctx->ep) {
ctx->skb = dev_alloc_skb(size);
if (!ctx->skb) {
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index b6c5f17dca30..0ccbcd7e887d 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -5,33 +5,13 @@ config MT76_USB
tristate
depends on MT76_CORE
-config MT76x2_COMMON
+config MT76x02_LIB
tristate
- depends on MT76_CORE
-
-config MT76x0U
- tristate "MediaTek MT76x0U (USB) support"
- select MT76_CORE
- depends on MAC80211
- depends on USB
- help
- This adds support for MT7610U-based wireless USB dongles.
-
-config MT76x2E
- tristate "MediaTek MT76x2E (PCIe) support"
select MT76_CORE
- select MT76x2_COMMON
- depends on MAC80211
- depends on PCI
- ---help---
- This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
-config MT76x2U
- tristate "MediaTek MT76x2U (USB) support"
- select MT76_CORE
+config MT76x02_USB
+ tristate
select MT76_USB
- select MT76x2_COMMON
- depends on MAC80211
- depends on USB
- help
- This adds support for MT7612U-based wireless USB dongles.
+
+source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 158d10d2716c..9b8d7488c545 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -1,9 +1,7 @@
obj-$(CONFIG_MT76_CORE) += mt76.o
obj-$(CONFIG_MT76_USB) += mt76-usb.o
-obj-$(CONFIG_MT76x0U) += mt76x0/
-obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
-obj-$(CONFIG_MT76x2E) += mt76x2e.o
-obj-$(CONFIG_MT76x2U) += mt76x2u.o
+obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o
+obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
@@ -12,20 +10,13 @@ mt76-usb-y := usb.o usb_trace.o usb_mcu.o
CFLAGS_trace.o := -I$(src)
CFLAGS_usb_trace.o := -I$(src)
+CFLAGS_mt76x02_trace.o := -I$(src)
-mt76x2-common-y := \
- mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \
- mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \
- mt76x2_debugfs.o
+mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
+ mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
+ mt76x02_txrx.o mt76x02_trace.o
-mt76x2e-y := \
- mt76x2_pci.o mt76x2_dma.o \
- mt76x2_main.o mt76x2_init.o mt76x2_tx.o \
- mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \
- mt76x2_dfs.o mt76x2_trace.o
+mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
-mt76x2u-y := \
- mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \
- mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o
-
-CFLAGS_mt76x2_trace.o := -I$(src)
+obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index a38d05dea599..a5adf22c3ffa 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -56,6 +56,35 @@ mt76_queues_read(struct seq_file *s, void *data)
return 0;
}
+void mt76_seq_puts_array(struct seq_file *file, const char *str,
+ s8 *val, int len)
+{
+ int i;
+
+ seq_printf(file, "%10s:", str);
+ for (i = 0; i < len; i++)
+ seq_printf(file, " %2d", val[i]);
+ seq_puts(file, "\n");
+}
+EXPORT_SYMBOL_GPL(mt76_seq_puts_array);
+
+static int mt76_read_rate_txpower(struct seq_file *s, void *data)
+{
+ struct mt76_dev *dev = dev_get_drvdata(s->private);
+
+ mt76_seq_puts_array(s, "CCK", dev->rate_power.cck,
+ ARRAY_SIZE(dev->rate_power.cck));
+ mt76_seq_puts_array(s, "OFDM", dev->rate_power.ofdm,
+ ARRAY_SIZE(dev->rate_power.ofdm));
+ mt76_seq_puts_array(s, "STBC", dev->rate_power.stbc,
+ ARRAY_SIZE(dev->rate_power.stbc));
+ mt76_seq_puts_array(s, "HT", dev->rate_power.ht,
+ ARRAY_SIZE(dev->rate_power.ht));
+ mt76_seq_puts_array(s, "VHT", dev->rate_power.vht,
+ ARRAY_SIZE(dev->rate_power.vht));
+ return 0;
+}
+
struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
{
struct dentry *dir;
@@ -72,6 +101,8 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
if (dev->otp.data)
debugfs_create_blob("otp", 0400, dir, &dev->otp);
debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read);
+ debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir,
+ mt76_read_rate_txpower);
return dir;
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index c51da2205b93..f7fbd7016403 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -322,19 +322,13 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
int idx;
- void *(*alloc)(unsigned int fragsz);
-
- if (napi)
- alloc = napi_alloc_frag;
- else
- alloc = netdev_alloc_frag;
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
struct mt76_queue_buf qbuf;
- buf = alloc(q->buf_size);
+ buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
if (!buf)
break;
@@ -361,6 +355,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
static void
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
{
+ struct page *page;
void *buf;
bool more;
@@ -373,6 +368,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
skb_free_frag(buf);
} while (1);
spin_unlock_bh(&q->lock);
+
+ if (!q->rx_page.va)
+ return;
+
+ page = virt_to_page(q->rx_page.va);
+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
+ memset(&q->rx_page, 0, sizeof(q->rx_page));
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 27248e24a19b..357cc356342d 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -25,34 +25,6 @@
#define MT_DMA_CTL_LAST_SEC0 BIT(30)
#define MT_DMA_CTL_DMA_DONE BIT(31)
-#define MT_TXD_INFO_LEN GENMASK(15, 0)
-#define MT_TXD_INFO_NEXT_VLD BIT(16)
-#define MT_TXD_INFO_TX_BURST BIT(17)
-#define MT_TXD_INFO_80211 BIT(19)
-#define MT_TXD_INFO_TSO BIT(20)
-#define MT_TXD_INFO_CSO BIT(21)
-#define MT_TXD_INFO_WIV BIT(24)
-#define MT_TXD_INFO_QSEL GENMASK(26, 25)
-#define MT_TXD_INFO_DPORT GENMASK(29, 27)
-#define MT_TXD_INFO_TYPE GENMASK(31, 30)
-
-#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
-#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
-#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
-#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
-#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
-#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
-#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
-#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
-
-/* MCU request message header */
-#define MT_MCU_MSG_LEN GENMASK(15, 0)
-#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
-#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
-#define MT_MCU_MSG_PORT GENMASK(29, 27)
-#define MT_MCU_MSG_TYPE GENMASK(31, 30)
-#define MT_MCU_MSG_TYPE_CMD BIT(30)
-
#define MT_DMA_HDR_LEN 4
#define MT_RX_INFO_LEN 4
#define MT_FCE_INFO_LEN 4
@@ -65,14 +37,21 @@ struct mt76_desc {
__le32 info;
} __packed __aligned(4);
-enum dma_msg_port {
- WLAN_PORT,
- CPU_RX_PORT,
- CPU_TX_PORT,
- HOST_PORT,
- VIRTUAL_CPU_RX_PORT,
- VIRTUAL_CPU_TX_PORT,
- DISCARD,
+enum mt76_qsel {
+ MT_QSEL_MGMT,
+ MT_QSEL_HCCA,
+ MT_QSEL_EDCA,
+ MT_QSEL_EDCA_2,
+};
+
+enum mt76_mcu_evt_type {
+ EVT_CMD_DONE,
+ EVT_CMD_ERROR,
+ EVT_CMD_RETRY,
+ EVT_EVENT_PWR_RSP,
+ EVT_EVENT_WOW_RSP,
+ EVT_EVENT_CARRIER_DETECT_RSP,
+ EVT_EVENT_DFS_DETECT_RSP,
};
int mt76_dma_attach(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 029d54bce9e8..2a699e8b79bf 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -283,6 +283,7 @@ mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops)
spin_lock_init(&dev->rx_lock);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->cc_lock);
+ mutex_init(&dev->mutex);
init_waitqueue_head(&dev->tx_wait);
return dev;
@@ -305,6 +306,8 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
wiphy->available_antennas_tx = dev->antenna_mask;
wiphy->available_antennas_rx = dev->antenna_mask;
@@ -472,7 +475,7 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
EXPORT_SYMBOL(mt76_wcid_key_setup);
-static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
+struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct mt76_rx_status mstat;
@@ -497,6 +500,7 @@ static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
return wcid_to_sta(mstat.wcid);
}
+EXPORT_SYMBOL(mt76_rx_convert);
static int
mt76_check_ccmp_pn(struct sk_buff *skb)
@@ -546,6 +550,12 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
struct mt76_wcid *wcid = status->wcid;
bool ps;
+ if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
+ sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
+ if (sta)
+ wcid = status->wcid = (struct mt76_wcid *) sta->drv_priv;
+ }
+
if (!wcid || !wcid->sta)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 09a14dead6e3..1d6bbce76041 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -21,7 +21,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
{
u32 val;
- val = ioread32(dev->regs + offset);
+ val = ioread32(dev->mmio.regs + offset);
trace_reg_rr(dev, offset, val);
return val;
@@ -30,7 +30,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
{
trace_reg_wr(dev, offset, val);
- iowrite32(val, dev->regs + offset);
+ iowrite32(val, dev->mmio.regs + offset);
}
static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
@@ -43,7 +43,31 @@ static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data,
int len)
{
- __iowrite32_copy(dev->regs + offset, data, len >> 2);
+ __iowrite32_copy(dev->mmio.regs + offset, data, len >> 2);
+}
+
+static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int len)
+{
+ while (len > 0) {
+ mt76_mmio_wr(dev, data->reg, data->value);
+ data++;
+ len--;
+ }
+
+ return 0;
+}
+
+static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int len)
+{
+ while (len > 0) {
+ data->value = mt76_mmio_rr(dev, data->reg);
+ data++;
+ len--;
+ }
+
+ return 0;
}
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
@@ -53,9 +77,17 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
.rmw = mt76_mmio_rmw,
.wr = mt76_mmio_wr,
.copy = mt76_mmio_copy,
+ .wr_rp = mt76_mmio_wr_rp,
+ .rd_rp = mt76_mmio_rd_rp,
+ .type = MT76_BUS_MMIO,
};
dev->bus = &mt76_mmio_ops;
- dev->regs = regs;
+ dev->mmio.regs = regs;
+
+ skb_queue_head_init(&dev->mmio.mcu.res_q);
+ init_waitqueue_head(&dev->mmio.mcu.wait);
+ spin_lock_init(&dev->mmio.irq_lock);
+ mutex_init(&dev->mmio.mcu.mutex);
}
EXPORT_SYMBOL_GPL(mt76_mmio_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 2eab35879163..3bfa7f5e3513 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -33,14 +33,32 @@
struct mt76_dev;
struct mt76_wcid;
+struct mt76_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+enum mt76_bus_type {
+ MT76_BUS_MMIO,
+ MT76_BUS_USB,
+};
+
struct mt76_bus_ops {
u32 (*rr)(struct mt76_dev *dev, u32 offset);
void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
int len);
+ int (*wr_rp)(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *rp, int len);
+ int (*rd_rp)(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *rp, int len);
+ enum mt76_bus_type type;
};
+#define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB)
+#define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO)
+
enum mt76_txq_id {
MT_TXQ_VO = IEEE80211_AC_VO,
MT_TXQ_VI = IEEE80211_AC_VI,
@@ -112,6 +130,18 @@ struct mt76_queue {
dma_addr_t desc_dma;
struct sk_buff *rx_head;
+ struct page_frag_cache rx_page;
+ spinlock_t rx_page_lock;
+};
+
+struct mt76_mcu_ops {
+ struct sk_buff *(*mcu_msg_alloc)(const void *data, int len);
+ int (*mcu_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp);
+ int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *rp, int len);
+ int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *rp, int len);
};
struct mt76_queue_ops {
@@ -143,6 +173,8 @@ enum mt76_wcid_flags {
MT_WCID_FLAG_PS,
};
+#define MT76_N_WCIDS 128
+
struct mt76_wcid {
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
@@ -210,7 +242,6 @@ enum {
MT76_OFFCHANNEL,
MT76_REMOVED,
MT76_READING_STATS,
- MT76_MORE_STATS,
};
struct mt76_hw_cap {
@@ -252,6 +283,19 @@ struct mt76_sband {
struct mt76_channel_state *chan;
};
+struct mt76_rate_power {
+ union {
+ struct {
+ s8 cck[4];
+ s8 ofdm[8];
+ s8 stbc[10];
+ s8 ht[16];
+ s8 vht[10];
+ };
+ s8 all[48];
+ };
+};
+
/* addr req mask */
#define MT_VEND_TYPE_EEPROM BIT(31)
#define MT_VEND_TYPE_CFG BIT(30)
@@ -307,7 +351,27 @@ struct mt76_usb {
struct completion cmpl;
struct mt76u_buf res;
u32 msg_seq;
+
+ /* multiple reads */
+ struct mt76_reg_pair *rp;
+ int rp_len;
+ u32 base;
+ bool burst;
+ } mcu;
+};
+
+struct mt76_mmio {
+ struct mt76e_mcu {
+ struct mutex mutex;
+
+ wait_queue_head_t wait;
+ struct sk_buff_head res_q;
+
+ u32 msg_seq;
} mcu;
+ void __iomem *regs;
+ spinlock_t irq_lock;
+ u32 irqmask;
};
struct mt76_dev {
@@ -317,9 +381,12 @@ struct mt76_dev {
spinlock_t lock;
spinlock_t cc_lock;
+
+ struct mutex mutex;
+
const struct mt76_bus_ops *bus;
const struct mt76_driver_ops *drv;
- void __iomem *regs;
+ const struct mt76_mcu_ops *mcu_ops;
struct device *dev;
struct net_device napi_dev;
@@ -334,11 +401,17 @@ struct mt76_dev {
wait_queue_head_t tx_wait;
+ unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
+
+ struct mt76_wcid global_wcid;
+ struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
+
u8 macaddr[ETH_ALEN];
u32 rev;
unsigned long state;
u8 antenna_mask;
+ u16 chainmask;
struct mt76_sband sband_2g;
struct mt76_sband sband_5g;
@@ -346,6 +419,10 @@ struct mt76_dev {
struct debugfs_blob_wrapper otp;
struct mt76_hw_cap cap;
+ struct mt76_rate_power rate_power;
+ int txpower_conf;
+ int txpower_cur;
+
u32 debugfs_reg;
struct led_classdev led_cdev;
@@ -353,7 +430,12 @@ struct mt76_dev {
bool led_al;
u8 led_pin;
- struct mt76_usb usb;
+ u32 rxfilter;
+
+ union {
+ struct mt76_mmio mmio;
+ struct mt76_usb usb;
+ };
};
enum mt76_phy_type {
@@ -364,18 +446,6 @@ enum mt76_phy_type {
MT_PHY_TYPE_VHT,
};
-struct mt76_rate_power {
- union {
- struct {
- s8 cck[4];
- s8 ofdm[8];
- s8 ht[16];
- s8 vht[10];
- };
- s8 all[38];
- };
-};
-
struct mt76_rx_status {
struct mt76_wcid *wcid;
@@ -399,10 +469,23 @@ struct mt76_rx_status {
s8 chain_signal[IEEE80211_MAX_CHAINS];
};
+#define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
+#define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
+#define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
+#define __mt76_wr_copy(dev, ...) (dev)->bus->copy((dev), __VA_ARGS__)
+
+#define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val)
+#define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0)
+
#define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
#define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
#define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
#define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
+
+#define mt76_mcu_msg_alloc(dev, ...) (dev)->mt76.mcu_ops->mcu_msg_alloc(__VA_ARGS__)
+#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
@@ -413,6 +496,9 @@ struct mt76_rx_status {
#define mt76_rmw_field(_dev, _reg, _field, _val) \
mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+#define __mt76_rmw_field(_dev, _reg, _field, _val) \
+ __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
#define mt76_hw(dev) (dev)->mt76.hw
bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
@@ -469,6 +555,8 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
void mt76_unregister_device(struct mt76_dev *dev);
struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
+void mt76_seq_puts_array(struct seq_file *file, const char *str,
+ s8 *val, int len);
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);
@@ -485,13 +573,7 @@ static inline int mt76_decr(int val, int size)
return (val - 1) & (size - 1);
}
-/* Hardware uses mirrored order of queues with Q3
- * having the highest priority
- */
-static inline u8 q2hwq(u8 q)
-{
- return q ^ 0x3;
-}
+u8 mt76_ac_to_hwq(u8 ac);
static inline struct ieee80211_txq *
mtxq_to_txq(struct mt76_txq *mtxq)
@@ -543,6 +625,8 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key);
+struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
+
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
@@ -599,15 +683,9 @@ int mt76u_alloc_queues(struct mt76_dev *dev);
void mt76u_stop_queues(struct mt76_dev *dev);
void mt76u_stop_stat_wk(struct mt76_dev *dev);
void mt76u_queues_deinit(struct mt76_dev *dev);
-int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
-int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
- int data_len, u32 max_payload, u32 offset);
void mt76u_mcu_complete_urb(struct urb *urb);
-struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len);
-int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
- int cmd, bool wait_resp);
-void mt76u_mcu_fw_reset(struct mt76_dev *dev);
int mt76u_mcu_init_rx(struct mt76_dev *dev);
+void mt76u_mcu_deinit(struct mt76_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig
new file mode 100644
index 000000000000..9a6157db3893
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig
@@ -0,0 +1,20 @@
+config MT76x0_COMMON
+ tristate
+ select MT76x02_LIB
+
+config MT76x0U
+ tristate "MediaTek MT76x0U (USB) support"
+ select MT76x0_COMMON
+ select MT76x02_USB
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7610U-based wireless USB dongles.
+
+config MT76x0E
+ tristate "MediaTek MT76x0E (PCIe) support"
+ select MT76x0_COMMON
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7610/MT7630-based wireless PCIe devices.
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
index 7843908261ba..20672978dceb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
@@ -1,7 +1,12 @@
-obj-$(CONFIG_MT76x0U) += mt76x0.o
+obj-$(CONFIG_MT76x0U) += mt76x0u.o
+obj-$(CONFIG_MT76x0E) += mt76x0e.o
+obj-$(CONFIG_MT76x0_COMMON) += mt76x0-common.o
+
+mt76x0-common-y := \
+ init.o main.o trace.o eeprom.o phy.o \
+ mac.o debugfs.o
+mt76x0u-y := usb.o usb_mcu.o
+mt76x0e-y := pci.o pci_mcu.o
-mt76x0-objs = \
- usb.o init.o main.o mcu.o trace.o dma.o eeprom.o phy.o \
- mac.o util.o debugfs.o tx.o core.o
# ccflags-y := -DDEBUG
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/core.c b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
deleted file mode 100644
index 892803fce842..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "mt76x0.h"
-
-int mt76x0_wait_asic_ready(struct mt76x0_dev *dev)
-{
- int i = 100;
- u32 val;
-
- do {
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
- return -EIO;
-
- val = mt76_rr(dev, MT_MAC_CSR0);
- if (val && ~val)
- return 0;
-
- udelay(10);
- } while (i--);
-
- return -EIO;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
index e7a77a886068..3224e5b1a1e5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
@@ -19,29 +19,9 @@
#include "eeprom.h"
static int
-mt76_reg_set(void *data, u64 val)
-{
- struct mt76x0_dev *dev = data;
-
- mt76_wr(dev, dev->debugfs_reg, val);
- return 0;
-}
-
-static int
-mt76_reg_get(void *data, u64 *val)
-{
- struct mt76x0_dev *dev = data;
-
- *val = mt76_rr(dev, dev->debugfs_reg);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
-
-static int
mt76x0_ampdu_stat_read(struct seq_file *file, void *data)
{
- struct mt76x0_dev *dev = file->private;
+ struct mt76x02_dev *dev = file->private;
int i, j;
#define stat_printf(grp, off, name) \
@@ -95,72 +75,13 @@ static const struct file_operations fops_ampdu_stat = {
.release = single_release,
};
-static int
-mt76x0_eeprom_param_read(struct seq_file *file, void *data)
-{
- struct mt76x0_dev *dev = file->private;
- int i;
-
- seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
- seq_printf(file, "RSSI offset 2GHz: %hhx %hhx\n",
- dev->ee->rssi_offset_2ghz[0], dev->ee->rssi_offset_2ghz[1]);
- seq_printf(file, "RSSI offset 5GHz: %hhx %hhx %hhx\n",
- dev->ee->rssi_offset_5ghz[0], dev->ee->rssi_offset_5ghz[1],
- dev->ee->rssi_offset_5ghz[2]);
- seq_printf(file, "Temperature offset: %hhx\n", dev->ee->temp_off);
- seq_printf(file, "LNA gain 2Ghz: %hhx\n", dev->ee->lna_gain_2ghz);
- seq_printf(file, "LNA gain 5Ghz: %hhx %hhx %hhx\n",
- dev->ee->lna_gain_5ghz[0], dev->ee->lna_gain_5ghz[1],
- dev->ee->lna_gain_5ghz[2]);
- seq_printf(file, "Power Amplifier type %hhx\n", dev->ee->pa_type);
- seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
- dev->ee->reg.start + dev->ee->reg.num - 1);
-
- seq_puts(file, "Per channel power:\n");
- for (i = 0; i < 58; i++)
- seq_printf(file, "\t%d chan:%d pwr:%d\n", i, i,
- dev->ee->tx_pwr_per_chan[i]);
-
- seq_puts(file, "Per rate power 2GHz:\n");
- for (i = 0; i < 5; i++)
- seq_printf(file, "\t %d bw20:%d bw40:%d\n",
- i, dev->ee->tx_pwr_cfg_2g[i][0],
- dev->ee->tx_pwr_cfg_5g[i][1]);
-
- seq_puts(file, "Per rate power 5GHz:\n");
- for (i = 0; i < 5; i++)
- seq_printf(file, "\t %d bw20:%d bw40:%d\n",
- i, dev->ee->tx_pwr_cfg_5g[i][0],
- dev->ee->tx_pwr_cfg_5g[i][1]);
-
- return 0;
-}
-
-static int
-mt76x0_eeprom_param_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt76x0_eeprom_param_read, inode->i_private);
-}
-
-static const struct file_operations fops_eeprom_param = {
- .open = mt76x0_eeprom_param_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-void mt76x0_init_debugfs(struct mt76x0_dev *dev)
+void mt76x0_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
- dir = debugfs_create_dir("mt76x0", dev->mt76.hw->wiphy->debugfsdir);
+ dir = mt76_register_debugfs(&dev->mt76);
if (!dir)
return;
- debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
- debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
- &fops_regval);
debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
- debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
- &fops_eeprom_param);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
deleted file mode 100644
index e2efb430419b..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
+++ /dev/null
@@ -1,522 +0,0 @@
-/*
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "mt76x0.h"
-#include "dma.h"
-#include "usb.h"
-#include "trace.h"
-
-static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
- struct mt76x0_dma_buf_rx *e, gfp_t gfp);
-
-static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
-{
- const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
- unsigned int hdrlen;
-
- if (unlikely(len < 10))
- return 0;
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- if (unlikely(hdrlen > len))
- return 0;
- return hdrlen;
-}
-
-static struct sk_buff *
-mt76x0_rx_skb_from_seg(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
- void *data, u32 seg_len, u32 truesize, struct page *p)
-{
- struct sk_buff *skb;
- u32 true_len, hdr_len = 0, copy, frag;
-
- skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
- if (!skb)
- return NULL;
-
- true_len = mt76x0_mac_process_rx(dev, skb, data, rxwi);
- if (!true_len || true_len > seg_len)
- goto bad_frame;
-
- hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
- if (!hdr_len)
- goto bad_frame;
-
- if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
- memcpy(skb_put(skb, hdr_len), data, hdr_len);
-
- data += hdr_len + 2;
- true_len -= hdr_len;
- hdr_len = 0;
- }
-
- /* If not doing paged RX allocated skb will always have enough space */
- copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
- frag = true_len - copy;
-
- memcpy(skb_put(skb, copy), data, copy);
- data += copy;
-
- if (frag) {
- skb_add_rx_frag(skb, 0, p, data - page_address(p),
- frag, truesize);
- get_page(p);
- }
-
- return skb;
-
-bad_frame:
- dev_err_ratelimited(dev->mt76.dev, "Error: incorrect frame len:%u hdr:%u\n",
- true_len, hdr_len);
- dev_kfree_skb(skb);
- return NULL;
-}
-
-static void mt76x0_rx_process_seg(struct mt76x0_dev *dev, u8 *data,
- u32 seg_len, struct page *p)
-{
- struct sk_buff *skb;
- struct mt76x0_rxwi *rxwi;
- u32 fce_info, truesize = seg_len;
-
- /* DMA_INFO field at the beginning of the segment contains only some of
- * the information, we need to read the FCE descriptor from the end.
- */
- fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
- seg_len -= MT_FCE_INFO_LEN;
-
- data += MT_DMA_HDR_LEN;
- seg_len -= MT_DMA_HDR_LEN;
-
- rxwi = (struct mt76x0_rxwi *) data;
- data += sizeof(struct mt76x0_rxwi);
- seg_len -= sizeof(struct mt76x0_rxwi);
-
- if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
- dev_err_once(dev->mt76.dev, "Error: RX path seen a non-pkt urb\n");
-
- trace_mt76x0_rx(&dev->mt76, rxwi, fce_info);
-
- skb = mt76x0_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
- if (!skb)
- return;
-
- spin_lock(&dev->mac_lock);
- ieee80211_rx(dev->mt76.hw, skb);
- spin_unlock(&dev->mac_lock);
-}
-
-static u16 mt76x0_rx_next_seg_len(u8 *data, u32 data_len)
-{
- u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
- sizeof(struct mt76x0_rxwi) + MT_FCE_INFO_LEN;
- u16 dma_len = get_unaligned_le16(data);
-
- if (data_len < min_seg_len ||
- WARN_ON(!dma_len) ||
- WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
- WARN_ON(dma_len & 0x3))
- return 0;
-
- return MT_DMA_HDRS + dma_len;
-}
-
-static void
-mt76x0_rx_process_entry(struct mt76x0_dev *dev, struct mt76x0_dma_buf_rx *e)
-{
- u32 seg_len, data_len = e->urb->actual_length;
- u8 *data = page_address(e->p);
- struct page *new_p = NULL;
- int cnt = 0;
-
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
- return;
-
- /* Copy if there is very little data in the buffer. */
- if (data_len > 512)
- new_p = dev_alloc_pages(MT_RX_ORDER);
-
- while ((seg_len = mt76x0_rx_next_seg_len(data, data_len))) {
- mt76x0_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
-
- data_len -= seg_len;
- data += seg_len;
- cnt++;
- }
-
- if (cnt > 1)
- trace_mt76x0_rx_dma_aggr(&dev->mt76, cnt, !!new_p);
-
- if (new_p) {
- /* we have one extra ref from the allocator */
- __free_pages(e->p, MT_RX_ORDER);
-
- e->p = new_p;
- }
-}
-
-static struct mt76x0_dma_buf_rx *
-mt76x0_rx_get_pending_entry(struct mt76x0_dev *dev)
-{
- struct mt76x0_rx_queue *q = &dev->rx_q;
- struct mt76x0_dma_buf_rx *buf = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->rx_lock, flags);
-
- if (!q->pending)
- goto out;
-
- buf = &q->e[q->start];
- q->pending--;
- q->start = (q->start + 1) % q->entries;
-out:
- spin_unlock_irqrestore(&dev->rx_lock, flags);
-
- return buf;
-}
-
-static void mt76x0_complete_rx(struct urb *urb)
-{
- struct mt76x0_dev *dev = urb->context;
- struct mt76x0_rx_queue *q = &dev->rx_q;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->rx_lock, flags);
-
- if (mt76x0_urb_has_error(urb))
- dev_err(dev->mt76.dev, "Error: RX urb failed:%d\n", urb->status);
- if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
- goto out;
-
- q->end = (q->end + 1) % q->entries;
- q->pending++;
- tasklet_schedule(&dev->rx_tasklet);
-out:
- spin_unlock_irqrestore(&dev->rx_lock, flags);
-}
-
-static void mt76x0_rx_tasklet(unsigned long data)
-{
- struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
- struct mt76x0_dma_buf_rx *e;
-
- while ((e = mt76x0_rx_get_pending_entry(dev))) {
- if (e->urb->status)
- continue;
-
- mt76x0_rx_process_entry(dev, e);
- mt76x0_submit_rx_buf(dev, e, GFP_ATOMIC);
- }
-}
-
-static void mt76x0_complete_tx(struct urb *urb)
-{
- struct mt76x0_tx_queue *q = urb->context;
- struct mt76x0_dev *dev = q->dev;
- struct sk_buff *skb;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->tx_lock, flags);
-
- if (mt76x0_urb_has_error(urb))
- dev_err(dev->mt76.dev, "Error: TX urb failed:%d\n", urb->status);
- if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
- goto out;
-
- skb = q->e[q->start].skb;
- trace_mt76x0_tx_dma_done(&dev->mt76, skb);
-
- __skb_queue_tail(&dev->tx_skb_done, skb);
- tasklet_schedule(&dev->tx_tasklet);
-
- if (q->used == q->entries - q->entries / 8)
- ieee80211_wake_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
-
- q->start = (q->start + 1) % q->entries;
- q->used--;
-out:
- spin_unlock_irqrestore(&dev->tx_lock, flags);
-}
-
-static void mt76x0_tx_tasklet(unsigned long data)
-{
- struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
- struct sk_buff_head skbs;
- unsigned long flags;
-
- __skb_queue_head_init(&skbs);
-
- spin_lock_irqsave(&dev->tx_lock, flags);
-
- set_bit(MT76_MORE_STATS, &dev->mt76.state);
- if (!test_and_set_bit(MT76_READING_STATS, &dev->mt76.state))
- queue_delayed_work(dev->stat_wq, &dev->stat_work,
- msecs_to_jiffies(10));
-
- skb_queue_splice_init(&dev->tx_skb_done, &skbs);
-
- spin_unlock_irqrestore(&dev->tx_lock, flags);
-
- while (!skb_queue_empty(&skbs)) {
- struct sk_buff *skb = __skb_dequeue(&skbs);
-
- mt76x0_tx_status(dev, skb);
- }
-}
-
-static int mt76x0_dma_submit_tx(struct mt76x0_dev *dev,
- struct sk_buff *skb, u8 ep)
-{
- struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
- unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep]);
- struct mt76x0_dma_buf_tx *e;
- struct mt76x0_tx_queue *q = &dev->tx_q[ep];
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dev->tx_lock, flags);
-
- if (WARN_ON_ONCE(q->entries <= q->used)) {
- ret = -ENOSPC;
- goto out;
- }
-
- e = &q->e[q->end];
- e->skb = skb;
- usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
- mt76x0_complete_tx, q);
- ret = usb_submit_urb(e->urb, GFP_ATOMIC);
- if (ret) {
- /* Special-handle ENODEV from TX urb submission because it will
- * often be the first ENODEV we see after device is removed.
- */
- if (ret == -ENODEV)
- set_bit(MT76_REMOVED, &dev->mt76.state);
- else
- dev_err(dev->mt76.dev, "Error: TX urb submit failed:%d\n",
- ret);
- goto out;
- }
-
- q->end = (q->end + 1) % q->entries;
- q->used++;
-
- if (q->used >= q->entries)
- ieee80211_stop_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
-out:
- spin_unlock_irqrestore(&dev->tx_lock, flags);
-
- return ret;
-}
-
-/* Map USB endpoint number to Q id in the DMA engine */
-static enum mt76_qsel ep2dmaq(u8 ep)
-{
- if (ep == 5)
- return MT_QSEL_MGMT;
- return MT_QSEL_EDCA;
-}
-
-int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
- struct mt76_wcid *wcid, int hw_q)
-{
- u8 ep = q2ep(hw_q);
- u32 dma_flags;
- int ret;
-
- dma_flags = MT_TXD_PKT_INFO_80211;
- if (wcid->hw_key_idx == 0xff)
- dma_flags |= MT_TXD_PKT_INFO_WIV;
-
- ret = mt76x0_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
- if (ret)
- return ret;
-
- ret = mt76x0_dma_submit_tx(dev, skb, ep);
-
- if (ret) {
- ieee80211_free_txskb(dev->mt76.hw, skb);
- return ret;
- }
-
- return 0;
-}
-
-static void mt76x0_kill_rx(struct mt76x0_dev *dev)
-{
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->rx_lock, flags);
-
- for (i = 0; i < dev->rx_q.entries; i++) {
- int next = dev->rx_q.end;
-
- spin_unlock_irqrestore(&dev->rx_lock, flags);
- usb_poison_urb(dev->rx_q.e[next].urb);
- spin_lock_irqsave(&dev->rx_lock, flags);
- }
-
- spin_unlock_irqrestore(&dev->rx_lock, flags);
-}
-
-static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
- struct mt76x0_dma_buf_rx *e, gfp_t gfp)
-{
- struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
- u8 *buf = page_address(e->p);
- unsigned pipe;
- int ret;
-
- pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[MT_EP_IN_PKT_RX]);
-
- usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
- mt76x0_complete_rx, dev);
-
- trace_mt76x0_submit_urb(&dev->mt76, e->urb);
- ret = usb_submit_urb(e->urb, gfp);
- if (ret)
- dev_err(dev->mt76.dev, "Error: submit RX URB failed:%d\n", ret);
-
- return ret;
-}
-
-static int mt76x0_submit_rx(struct mt76x0_dev *dev)
-{
- int i, ret;
-
- for (i = 0; i < dev->rx_q.entries; i++) {
- ret = mt76x0_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static void mt76x0_free_rx(struct mt76x0_dev *dev)
-{
- int i;
-
- for (i = 0; i < dev->rx_q.entries; i++) {
- __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
- usb_free_urb(dev->rx_q.e[i].urb);
- }
-}
-
-static int mt76x0_alloc_rx(struct mt76x0_dev *dev)
-{
- int i;
-
- memset(&dev->rx_q, 0, sizeof(dev->rx_q));
- dev->rx_q.dev = dev;
- dev->rx_q.entries = N_RX_ENTRIES;
-
- for (i = 0; i < N_RX_ENTRIES; i++) {
- dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
- dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
-
- if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void mt76x0_free_tx_queue(struct mt76x0_tx_queue *q)
-{
- int i;
-
- WARN_ON(q->used);
-
- for (i = 0; i < q->entries; i++) {
- usb_poison_urb(q->e[i].urb);
- usb_free_urb(q->e[i].urb);
- }
-}
-
-static void mt76x0_free_tx(struct mt76x0_dev *dev)
-{
- int i;
-
- for (i = 0; i < __MT_EP_OUT_MAX; i++)
- mt76x0_free_tx_queue(&dev->tx_q[i]);
-}
-
-static int mt76x0_alloc_tx_queue(struct mt76x0_dev *dev,
- struct mt76x0_tx_queue *q)
-{
- int i;
-
- q->dev = dev;
- q->entries = N_TX_ENTRIES;
-
- for (i = 0; i < N_TX_ENTRIES; i++) {
- q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!q->e[i].urb)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int mt76x0_alloc_tx(struct mt76x0_dev *dev)
-{
- int i;
-
- dev->tx_q = devm_kcalloc(dev->mt76.dev, __MT_EP_OUT_MAX,
- sizeof(*dev->tx_q), GFP_KERNEL);
-
- for (i = 0; i < __MT_EP_OUT_MAX; i++)
- if (mt76x0_alloc_tx_queue(dev, &dev->tx_q[i]))
- return -ENOMEM;
-
- return 0;
-}
-
-int mt76x0_dma_init(struct mt76x0_dev *dev)
-{
- int ret = -ENOMEM;
-
- tasklet_init(&dev->tx_tasklet, mt76x0_tx_tasklet, (unsigned long) dev);
- tasklet_init(&dev->rx_tasklet, mt76x0_rx_tasklet, (unsigned long) dev);
-
- ret = mt76x0_alloc_tx(dev);
- if (ret)
- goto err;
- ret = mt76x0_alloc_rx(dev);
- if (ret)
- goto err;
-
- ret = mt76x0_submit_rx(dev);
- if (ret)
- goto err;
-
- return 0;
-err:
- mt76x0_dma_cleanup(dev);
- return ret;
-}
-
-void mt76x0_dma_cleanup(struct mt76x0_dev *dev)
-{
- mt76x0_kill_rx(dev);
-
- tasklet_kill(&dev->rx_tasklet);
-
- mt76x0_free_rx(dev);
- mt76x0_free_tx(dev);
-
- tasklet_kill(&dev->tx_tasklet);
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
deleted file mode 100644
index 891ce1c3461f..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76X0U_DMA_H
-#define __MT76X0U_DMA_H
-
-#include <asm/unaligned.h>
-#include <linux/skbuff.h>
-
-#define MT_DMA_HDR_LEN 4
-#define MT_RX_INFO_LEN 4
-#define MT_FCE_INFO_LEN 4
-#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
-
-/* Common Tx DMA descriptor fields */
-#define MT_TXD_INFO_LEN GENMASK(15, 0)
-#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
-#define MT_TXD_INFO_TYPE GENMASK(31, 30)
-
-/* Tx DMA MCU command specific flags */
-#define MT_TXD_CMD_SEQ GENMASK(19, 16)
-#define MT_TXD_CMD_TYPE GENMASK(26, 20)
-
-enum mt76_msg_port {
- WLAN_PORT,
- CPU_RX_PORT,
- CPU_TX_PORT,
- HOST_PORT,
- VIRTUAL_CPU_RX_PORT,
- VIRTUAL_CPU_TX_PORT,
- DISCARD,
-};
-
-enum mt76_info_type {
- DMA_PACKET,
- DMA_COMMAND,
-};
-
-/* Tx DMA packet specific flags */
-#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
-#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
-#define MT_TXD_PKT_INFO_80211 BIT(19)
-#define MT_TXD_PKT_INFO_TSO BIT(20)
-#define MT_TXD_PKT_INFO_CSO BIT(21)
-#define MT_TXD_PKT_INFO_WIV BIT(24)
-#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
-
-enum mt76_qsel {
- MT_QSEL_MGMT,
- MT_QSEL_HCCA,
- MT_QSEL_EDCA,
- MT_QSEL_EDCA_2,
-};
-
-
-static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb,
- enum mt76_msg_port d_port,
- enum mt76_info_type type, u32 flags)
-{
- u32 info;
-
- /* Buffer layout:
- * | 4B | xfer len | pad | 4B |
- * | TXINFO | pkt/cmd | zero pad to 4B | zero |
- *
- * length field of TXINFO should be set to 'xfer len'.
- */
-
- info = flags |
- FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
- FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
- FIELD_PREP(MT_TXD_INFO_TYPE, type);
-
- put_unaligned_le32(info, skb_push(skb, sizeof(info)));
- return skb_put_padto(skb, round_up(skb->len, 4) + 4);
-}
-
-static inline int
-mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
-{
- flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
- return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
-}
-
-/* Common Rx DMA descriptor fields */
-#define MT_RXD_INFO_LEN GENMASK(13, 0)
-#define MT_RXD_INFO_PCIE_INTR BIT(24)
-#define MT_RXD_INFO_QSEL GENMASK(26, 25)
-#define MT_RXD_INFO_PORT GENMASK(29, 27)
-#define MT_RXD_INFO_TYPE GENMASK(31, 30)
-
-/* Rx DMA packet specific flags */
-#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
-#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
-#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
-#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
-#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
-#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
-
-/* Rx DMA MCU command specific flags */
-#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
-#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
-#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
-
-enum mt76_evt_type {
- CMD_DONE,
- CMD_ERROR,
- CMD_RETRY,
- EVENT_PWR_RSP,
- EVENT_WOW_RSP,
- EVENT_CARRIER_DETECT_RSP,
- EVENT_DFS_DETECT_RSP,
-};
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 36da1e6bc21a..ab4fd6e0f23a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -13,6 +13,7 @@
* GNU General Public License for more details.
*/
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -20,81 +21,20 @@
#include <asm/unaligned.h>
#include "mt76x0.h"
#include "eeprom.h"
-
-static bool
-field_valid(u8 val)
-{
- return val != 0xff;
-}
-
-static s8
-field_validate(u8 val)
-{
- if (!field_valid(val))
- return 0;
-
- return val;
-}
-
-static inline int
-sign_extend(u32 val, unsigned int size)
-{
- bool sign = val & BIT(size - 1);
-
- val &= BIT(size - 1) - 1;
-
- return sign ? val : -val;
-}
-
-static int
-mt76x0_efuse_read(struct mt76x0_dev *dev, u16 addr, u8 *data,
- enum mt76x0_eeprom_access_modes mode)
-{
- u32 val;
- int i;
-
- val = mt76_rr(dev, MT_EFUSE_CTRL);
- val &= ~(MT_EFUSE_CTRL_AIN |
- MT_EFUSE_CTRL_MODE);
- val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
- FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
- MT_EFUSE_CTRL_KICK;
- mt76_wr(dev, MT_EFUSE_CTRL, val);
-
- if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
- return -ETIMEDOUT;
-
- val = mt76_rr(dev, MT_EFUSE_CTRL);
- if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
- /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
- * will not return valid data but it's ok.
- */
- memset(data, 0xff, 16);
- return 0;
- }
-
- for (i = 0; i < 4; i++) {
- val = mt76_rr(dev, MT_EFUSE_DATA(i));
- put_unaligned_le32(val, data + 4 * i);
- }
-
- return 0;
-}
+#include "../mt76x02_phy.h"
#define MT_MAP_READS DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16)
static int
-mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
+mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev)
{
u8 data[MT_MAP_READS * 16];
int ret, i;
u32 start = 0, end = 0, cnt_free;
- for (i = 0; i < MT_MAP_READS; i++) {
- ret = mt76x0_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
- data + i * 16, MT_EE_PHYSICAL_READ);
- if (ret)
- return ret;
- }
+ ret = mt76x02_get_efuse_data(dev, MT_EE_USAGE_MAP_START, data,
+ sizeof(data), MT_EE_PHYSICAL_READ);
+ if (ret)
+ return ret;
for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
if (!data[i]) {
@@ -105,341 +45,304 @@ mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
cnt_free = end - start + 1;
if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
- dev_err(dev->mt76.dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+ dev_err(dev->mt76.dev,
+ "driver does not support default EEPROM\n");
return -EINVAL;
}
return 0;
}
-static void
-mt76x0_set_chip_cap(struct mt76x0_dev *dev, u8 *eeprom)
+static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
{
- enum mt76x2_board_type { BOARD_TYPE_2GHZ = 1, BOARD_TYPE_5GHZ = 2 };
- u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
- u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
-
- dev_dbg(dev->mt76.dev, "NIC_CONF0: %04x NIC_CONF1: %04x\n", nic_conf0, nic_conf1);
-
- switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, nic_conf0)) {
- case BOARD_TYPE_5GHZ:
- dev->ee->has_5ghz = true;
- break;
- case BOARD_TYPE_2GHZ:
- dev->ee->has_2ghz = true;
- break;
- default:
- dev->ee->has_2ghz = true;
- dev->ee->has_5ghz = true;
- break;
- }
+ u16 nic_conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+ u16 nic_conf1 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+
+ mt76x02_eeprom_parse_hw_cap(dev);
+ dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n",
+ dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz);
- dev_dbg(dev->mt76.dev, "Has 2GHZ %d 5GHZ %d\n", dev->ee->has_2ghz, dev->ee->has_5ghz);
+ if (dev->no_2ghz) {
+ dev->mt76.cap.has_2ghz = false;
+ dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
+ }
- if (!field_valid(nic_conf1 & 0xff))
+ if (!mt76x02_field_valid(nic_conf1 & 0xff))
nic_conf1 &= 0xff00;
if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
dev_err(dev->mt76.dev,
- "Error: this driver does not support HW RF ctrl\n");
+ "driver does not support HW RF ctrl\n");
- if (!field_valid(nic_conf0 >> 8))
+ if (!mt76x02_field_valid(nic_conf0 >> 8))
return;
if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
- dev_err(dev->mt76.dev,
- "Error: device has more than 1 RX/TX stream!\n");
-
- dev->ee->pa_type = FIELD_GET(MT_EE_NIC_CONF_0_PA_TYPE, nic_conf0);
- dev_dbg(dev->mt76.dev, "PA Type %d\n", dev->ee->pa_type);
+ dev_err(dev->mt76.dev, "invalid tx-rx stream\n");
}
-static int
-mt76x0_set_macaddr(struct mt76x0_dev *dev, const u8 *eeprom)
+static void mt76x0_set_temp_offset(struct mt76x02_dev *dev)
{
- const void *src = eeprom + MT_EE_MAC_ADDR;
-
- ether_addr_copy(dev->macaddr, src);
-
- if (!is_valid_ether_addr(dev->macaddr)) {
- eth_random_addr(dev->macaddr);
- dev_info(dev->mt76.dev,
- "Invalid MAC address, using random address %pM\n",
- dev->macaddr);
- }
+ u8 val;
- mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
- mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
- FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
-
- return 0;
-}
-
-static void
-mt76x0_set_temp_offset(struct mt76x0_dev *dev, u8 *eeprom)
-{
- u8 temp = eeprom[MT_EE_TEMP_OFFSET];
-
- if (field_valid(temp))
- dev->ee->temp_off = sign_extend(temp, 8);
+ val = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER) >> 8;
+ if (mt76x02_field_valid(val))
+ dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8);
else
- dev->ee->temp_off = -10;
+ dev->cal.rx.temp_offset = -10;
}
-static void
-mt76x0_set_country_reg(struct mt76x0_dev *dev, u8 *eeprom)
+static void mt76x0_set_freq_offset(struct mt76x02_dev *dev)
{
- /* Note: - region 31 is not valid for mt76x0 (see rtmp_init.c)
- * - comments in rtmp_def.h are incorrect (see rt_channel.c)
- */
- static const struct reg_channel_bounds chan_bounds[] = {
- /* EEPROM country regions 0 - 7 */
- { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 },
- { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 },
- /* EEPROM country regions 32 - 33 */
- { 1, 11 }, { 1, 14 }
- };
- u8 val = eeprom[MT_EE_COUNTRY_REGION_2GHZ];
- int idx = -1;
-
- dev_dbg(dev->mt76.dev, "REG 2GHZ %u REG 5GHZ %u\n", val, eeprom[MT_EE_COUNTRY_REGION_5GHZ]);
- if (val < 8)
- idx = val;
- if (val > 31 && val < 33)
- idx = val - 32 + 8;
-
- if (idx != -1)
- dev_info(dev->mt76.dev,
- "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
- val, chan_bounds[idx].start,
- chan_bounds[idx].start + chan_bounds[idx].num - 1);
- else
- idx = 5; /* channels 1 - 14 */
-
- dev->ee->reg = chan_bounds[idx];
+ struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
+ u8 val;
- /* TODO: country region 33 is special - phy should be set to B-mode
- * before entering channel 14 (see sta/connect.c)
- */
-}
-
-static void
-mt76x0_set_rf_freq_off(struct mt76x0_dev *dev, u8 *eeprom)
-{
- u8 comp;
+ val = mt76x02_eeprom_get(dev, MT_EE_FREQ_OFFSET);
+ if (!mt76x02_field_valid(val))
+ val = 0;
+ caldata->freq_offset = val;
- dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
- comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+ val = mt76x02_eeprom_get(dev, MT_EE_TSSI_BOUND4) >> 8;
+ if (!mt76x02_field_valid(val))
+ val = 0;
- if (comp & BIT(7))
- dev->ee->rf_freq_off -= comp & 0x7f;
- else
- dev->ee->rf_freq_off += comp;
+ caldata->freq_offset -= mt76x02_sign_extend(val, 8);
}
-static void
-mt76x0_set_lna_gain(struct mt76x0_dev *dev, u8 *eeprom)
+void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
{
- u8 gain;
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
+ s8 val, lna_5g[3], lna_2g;
+ u16 rssi_offset;
+ int i;
- dev->ee->lna_gain_2ghz = eeprom[MT_EE_LNA_GAIN_2GHZ];
- dev->ee->lna_gain_5ghz[0] = eeprom[MT_EE_LNA_GAIN_5GHZ_0];
+ mt76x02_get_rx_gain(dev, chan->band, &rssi_offset, &lna_2g, lna_5g);
+ caldata->lna_gain = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
- gain = eeprom[MT_EE_LNA_GAIN_5GHZ_1];
- if (gain == 0xff || gain == 0)
- dev->ee->lna_gain_5ghz[1] = dev->ee->lna_gain_5ghz[0];
- else
- dev->ee->lna_gain_5ghz[1] = gain;
+ for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) {
+ val = rssi_offset >> (8 * i);
+ if (val < -10 || val > 10)
+ val = 0;
- gain = eeprom[MT_EE_LNA_GAIN_5GHZ_2];
- if (gain == 0xff || gain == 0)
- dev->ee->lna_gain_5ghz[2] = dev->ee->lna_gain_5ghz[0];
- else
- dev->ee->lna_gain_5ghz[2] = gain;
+ caldata->rssi_offset[i] = val;
+ }
}
-static void
-mt76x0_set_rssi_offset(struct mt76x0_dev *dev, u8 *eeprom)
+static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
{
- int i;
- s8 *rssi_offset = dev->ee->rssi_offset_2ghz;
-
- for (i = 0; i < 2; i++) {
- rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+ struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ u8 val;
- if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
- dev_warn(dev->mt76.dev,
- "Warning: EEPROM RSSI is invalid %02hhx\n",
- rssi_offset[i]);
- rssi_offset[i] = 0;
- }
- }
-
- rssi_offset = dev->ee->rssi_offset_5ghz;
+ if (mt76x0_tssi_enabled(dev))
+ return 0;
- for (i = 0; i < 3; i++) {
- rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET_5GHZ + i];
+ if (chandef->width == NL80211_CHAN_WIDTH_80) {
+ val = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER) >> 8;
+ } else if (chandef->width == NL80211_CHAN_WIDTH_40) {
+ u16 data;
- if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
- dev_warn(dev->mt76.dev,
- "Warning: EEPROM RSSI is invalid %02hhx\n",
- rssi_offset[i]);
- rssi_offset[i] = 0;
- }
+ data = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+ if (chandef->chan->band == NL80211_BAND_5GHZ)
+ val = data >> 8;
+ else
+ val = data;
+ } else {
+ return 0;
}
+
+ return mt76x02_rate_power_val(val);
}
-static u32
-calc_bw40_power_rate(u32 value, int delta)
+void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
{
- u32 ret = 0;
- int i, tmp;
-
- for (i = 0; i < 4; i++) {
- tmp = s6_to_int((value >> i*8) & 0xff) + delta;
- ret |= (u32)(int_to_s6(tmp)) << i*8;
- }
-
- return ret;
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
+ struct mt76_rate_power *t = &dev->mt76.rate_power;
+ s8 delta = mt76x0_get_delta(dev);
+ u16 val, addr;
+
+ memset(t, 0, sizeof(*t));
+
+ /* cck 1M, 2M, 5.5M, 11M */
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_BYRATE_BASE);
+ t->cck[0] = t->cck[1] = s6_to_s8(val);
+ t->cck[2] = t->cck[3] = s6_to_s8(val >> 8);
+
+ /* ofdm 6M, 9M, 12M, 18M */
+ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120;
+ val = mt76x02_eeprom_get(dev, addr);
+ t->ofdm[0] = t->ofdm[1] = s6_to_s8(val);
+ t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8);
+
+ /* ofdm 24M, 36M, 48M, 54M */
+ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122;
+ val = mt76x02_eeprom_get(dev, addr);
+ t->ofdm[4] = t->ofdm[5] = s6_to_s8(val);
+ t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8);
+
+ /* ht-vht mcs 1ss 0, 1, 2, 3 */
+ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124;
+ val = mt76x02_eeprom_get(dev, addr);
+ t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val);
+ t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8);
+
+ /* ht-vht mcs 1ss 4, 5, 6 */
+ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
+ val = mt76x02_eeprom_get(dev, addr);
+ t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
+ t->ht[6] = t->vht[6] = s6_to_s8(val >> 8);
+
+ /* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
+ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
+ val = mt76x02_eeprom_get(dev, addr);
+ t->stbc[0] = t->stbc[1] = s6_to_s8(val);
+ t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8);
+
+ /* ht-vht mcs 1ss 4, 5, 6 stbc */
+ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee;
+ val = mt76x02_eeprom_get(dev, addr);
+ t->stbc[4] = t->stbc[5] = s6_to_s8(val);
+ t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
+
+ /* vht mcs 8, 9 5GHz */
+ val = mt76x02_eeprom_get(dev, 0x132);
+ t->vht[7] = s6_to_s8(val);
+ t->vht[8] = s6_to_s8(val >> 8);
+
+ mt76x02_add_rate_power_offset(t, delta);
}
-static s8
-get_delta(u8 val)
+void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
{
- s8 ret;
+ struct mt76x0_chan_map {
+ u8 chan;
+ u8 offset;
+ } chan_map[] = {
+ { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 },
+ { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 },
+ { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 },
+ { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 },
+ { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 },
+ { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 },
+ { 167, 17 }, { 171, 18 }, { 173, 19 },
+ };
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ u8 offset, addr;
+ u16 data;
+ int i;
- if (!field_valid(val) || !(val & BIT(7)))
- return 0;
+ for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
+ if (chan_map[i].chan <= chan->hw_value) {
+ offset = chan_map[i].offset;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(chan_map))
+ offset = chan_map[0].offset;
+
+ if (chan->band == NL80211_BAND_2GHZ) {
+ addr = MT_EE_TX_POWER_DELTA_BW80 + offset;
+ } else {
+ switch (chan->hw_value) {
+ case 58:
+ offset = 8;
+ break;
+ case 106:
+ offset = 14;
+ break;
+ case 112:
+ offset = 20;
+ break;
+ case 155:
+ offset = 30;
+ break;
+ default:
+ break;
+ }
+ addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset;
+ }
- ret = val & 0x1f;
- if (ret > 8)
- ret = 8;
- if (val & BIT(6))
- ret = -ret;
+ data = mt76x02_eeprom_get(dev, addr);
- return ret;
+ info[0] = data;
+ if (!info[0] || info[0] > 0x3f)
+ info[0] = 5;
+
+ info[1] = data >> 8;
+ if (!info[1] || info[1] > 0x3f)
+ info[1] = 5;
}
-static void
-mt76x0_set_tx_power_per_rate(struct mt76x0_dev *dev, u8 *eeprom)
+static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
{
- s8 bw40_delta_2g, bw40_delta_5g;
- u32 val;
- int i;
-
- bw40_delta_2g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
- bw40_delta_5g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40 + 1]);
-
- for (i = 0; i < 5; i++) {
- val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+ u16 val;
- /* Skip last 16 bits. */
- if (i == 4)
- val &= 0x0000ffff;
+ val = get_unaligned_le16(dev->mt76.eeprom.data);
+ if (!val)
+ val = get_unaligned_le16(dev->mt76.eeprom.data +
+ MT_EE_PCI_ID);
- dev->ee->tx_pwr_cfg_2g[i][0] = val;
- dev->ee->tx_pwr_cfg_2g[i][1] = calc_bw40_power_rate(val, bw40_delta_2g);
+ switch (val) {
+ case 0x7650:
+ case 0x7610:
+ return 0;
+ default:
+ dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n",
+ val);
+ return -EINVAL;
}
-
- /* Reading per rate tx power for 5 GHz band is a bit more complex. Note
- * we mix 16 bit and 32 bit reads and sometimes do shifts.
- */
- val = get_unaligned_le16(eeprom + 0x120);
- val <<= 16;
- dev->ee->tx_pwr_cfg_5g[0][0] = val;
- dev->ee->tx_pwr_cfg_5g[0][1] = calc_bw40_power_rate(val, bw40_delta_5g);
-
- val = get_unaligned_le32(eeprom + 0x122);
- dev->ee->tx_pwr_cfg_5g[1][0] = val;
- dev->ee->tx_pwr_cfg_5g[1][1] = calc_bw40_power_rate(val, bw40_delta_5g);
-
- val = get_unaligned_le16(eeprom + 0x126);
- dev->ee->tx_pwr_cfg_5g[2][0] = val;
- dev->ee->tx_pwr_cfg_5g[2][1] = calc_bw40_power_rate(val, bw40_delta_5g);
-
- val = get_unaligned_le16(eeprom + 0xec);
- val <<= 16;
- dev->ee->tx_pwr_cfg_5g[3][0] = val;
- dev->ee->tx_pwr_cfg_5g[3][1] = calc_bw40_power_rate(val, bw40_delta_5g);
-
- val = get_unaligned_le16(eeprom + 0xee);
- dev->ee->tx_pwr_cfg_5g[4][0] = val;
- dev->ee->tx_pwr_cfg_5g[4][1] = calc_bw40_power_rate(val, bw40_delta_5g);
}
-static void
-mt76x0_set_tx_power_per_chan(struct mt76x0_dev *dev, u8 *eeprom)
+static int mt76x0_load_eeprom(struct mt76x02_dev *dev)
{
- int i;
- u8 tx_pwr;
+ int found;
- for (i = 0; i < 14; i++) {
- tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_2GHZ + i];
- if (tx_pwr <= 0x3f && tx_pwr > 0)
- dev->ee->tx_pwr_per_chan[i] = tx_pwr;
- else
- dev->ee->tx_pwr_per_chan[i] = 5;
- }
+ found = mt76_eeprom_init(&dev->mt76, MT76X0_EEPROM_SIZE);
+ if (found < 0)
+ return found;
- for (i = 0; i < 40; i++) {
- tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_5GHZ + i];
- if (tx_pwr <= 0x3f && tx_pwr > 0)
- dev->ee->tx_pwr_per_chan[14 + i] = tx_pwr;
- else
- dev->ee->tx_pwr_per_chan[14 + i] = 5;
- }
+ if (found && !mt76x0_check_eeprom(dev))
+ return 0;
+
+ found = mt76x0_efuse_physical_size_check(dev);
+ if (found < 0)
+ return found;
- dev->ee->tx_pwr_per_chan[54] = dev->ee->tx_pwr_per_chan[22];
- dev->ee->tx_pwr_per_chan[55] = dev->ee->tx_pwr_per_chan[28];
- dev->ee->tx_pwr_per_chan[56] = dev->ee->tx_pwr_per_chan[34];
- dev->ee->tx_pwr_per_chan[57] = dev->ee->tx_pwr_per_chan[44];
+ return mt76x02_get_efuse_data(dev, 0, dev->mt76.eeprom.data,
+ MT76X0_EEPROM_SIZE, MT_EE_READ);
}
-int
-mt76x0_eeprom_init(struct mt76x0_dev *dev)
+int mt76x0_eeprom_init(struct mt76x02_dev *dev)
{
- u8 *eeprom;
- int i, ret;
-
- ret = mt76x0_efuse_physical_size_check(dev);
- if (ret)
- return ret;
-
- dev->ee = devm_kzalloc(dev->mt76.dev, sizeof(*dev->ee), GFP_KERNEL);
- if (!dev->ee)
- return -ENOMEM;
+ u8 version, fae;
+ u16 data;
+ int err;
- eeprom = kmalloc(MT76X0_EEPROM_SIZE, GFP_KERNEL);
- if (!eeprom)
- return -ENOMEM;
+ err = mt76x0_load_eeprom(dev);
+ if (err < 0)
+ return err;
- for (i = 0; i + 16 <= MT76X0_EEPROM_SIZE; i += 16) {
- ret = mt76x0_efuse_read(dev, i, eeprom + i, MT_EE_READ);
- if (ret)
- goto out;
- }
+ data = mt76x02_eeprom_get(dev, MT_EE_VERSION);
+ version = data >> 8;
+ fae = data;
- if (eeprom[MT_EE_VERSION_EE] > MT76X0U_EE_MAX_VER)
+ if (version > MT76X0U_EE_MAX_VER)
dev_warn(dev->mt76.dev,
"Warning: unsupported EEPROM version %02hhx\n",
- eeprom[MT_EE_VERSION_EE]);
+ version);
dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
- eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
-
- mt76x0_set_macaddr(dev, eeprom);
- mt76x0_set_chip_cap(dev, eeprom);
- mt76x0_set_country_reg(dev, eeprom);
- mt76x0_set_rf_freq_off(dev, eeprom);
- mt76x0_set_temp_offset(dev, eeprom);
- mt76x0_set_lna_gain(dev, eeprom);
- mt76x0_set_rssi_offset(dev, eeprom);
- dev->chainmask = 0x0101;
-
- mt76x0_set_tx_power_per_rate(dev, eeprom);
- mt76x0_set_tx_power_per_chan(dev, eeprom);
-
-out:
- kfree(eeprom);
- return ret;
+ version, fae);
+
+ mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ mt76x0_set_chip_cap(dev);
+ mt76x0_set_freq_offset(dev);
+ mt76x0_set_temp_offset(dev);
+
+ dev->mt76.chainmask = 0x0101;
+
+ return 0;
}
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
index e37b573aed7b..ee9ade9f3c8b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -16,134 +16,31 @@
#ifndef __MT76X0U_EEPROM_H
#define __MT76X0U_EEPROM_H
-struct mt76x0_dev;
+#include "../mt76x02_eeprom.h"
-#define MT76X0U_EE_MAX_VER 0x0c
-#define MT76X0_EEPROM_SIZE 512
+struct mt76x02_dev;
-#define MT76X0U_DEFAULT_TX_POWER 6
+#define MT76X0U_EE_MAX_VER 0x0c
+#define MT76X0_EEPROM_SIZE 512
-enum mt76_eeprom_field {
- MT_EE_CHIP_ID = 0x00,
- MT_EE_VERSION_FAE = 0x02,
- MT_EE_VERSION_EE = 0x03,
- MT_EE_MAC_ADDR = 0x04,
- MT_EE_NIC_CONF_0 = 0x34,
- MT_EE_NIC_CONF_1 = 0x36,
- MT_EE_COUNTRY_REGION_5GHZ = 0x38,
- MT_EE_COUNTRY_REGION_2GHZ = 0x39,
- MT_EE_FREQ_OFFSET = 0x3a,
- MT_EE_NIC_CONF_2 = 0x42,
+int mt76x0_eeprom_init(struct mt76x02_dev *dev);
+void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
+void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev);
+void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info);
- MT_EE_LNA_GAIN_2GHZ = 0x44,
- MT_EE_LNA_GAIN_5GHZ_0 = 0x45,
- MT_EE_RSSI_OFFSET = 0x46,
- MT_EE_RSSI_OFFSET_5GHZ = 0x4a,
- MT_EE_LNA_GAIN_5GHZ_1 = 0x49,
- MT_EE_LNA_GAIN_5GHZ_2 = 0x4d,
-
- MT_EE_TX_POWER_DELTA_BW40 = 0x50,
-
- MT_EE_TX_POWER_OFFSET_2GHZ = 0x52,
-
- MT_EE_TX_TSSI_SLOPE = 0x6e,
- MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f,
- MT_EE_TX_TSSI_OFFSET = 0x76,
-
- MT_EE_TX_POWER_OFFSET_5GHZ = 0x78,
-
- MT_EE_TEMP_OFFSET = 0xd1,
- MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb,
- MT_EE_TX_POWER_BYRATE_BASE = 0xde,
-
- MT_EE_TX_POWER_BYRATE_BASE_5GHZ = 0x120,
-
- MT_EE_USAGE_MAP_START = 0x1e0,
- MT_EE_USAGE_MAP_END = 0x1fc,
-};
-
-#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
-#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
-#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
-#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
-
-#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
-#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
-#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
-#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
-#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
-
-#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
-#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
-#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
-#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
-#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
-#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
-
-#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \
- (i) * 4)
-
-#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
- MT_EE_USAGE_MAP_START + 1)
-
-enum mt76x0_eeprom_access_modes {
- MT_EE_READ = 0,
- MT_EE_PHYSICAL_READ = 1,
-};
-
-struct reg_channel_bounds {
- u8 start;
- u8 num;
-};
-
-struct mt76x0_eeprom_params {
- u8 rf_freq_off;
- s16 temp_off;
- s8 rssi_offset_2ghz[2];
- s8 rssi_offset_5ghz[3];
- s8 lna_gain_2ghz;
- s8 lna_gain_5ghz[3];
- u8 pa_type;
-
- /* TX_PWR_CFG_* values from EEPROM for 20 and 40 Mhz bandwidths. */
- u32 tx_pwr_cfg_2g[5][2];
- u32 tx_pwr_cfg_5g[5][2];
-
- u8 tx_pwr_per_chan[58];
-
- struct reg_channel_bounds reg;
-
- bool has_2ghz;
- bool has_5ghz;
-};
-
-int mt76x0_eeprom_init(struct mt76x0_dev *dev);
-
-static inline u32 s6_validate(u32 reg)
-{
- WARN_ON(reg & ~GENMASK(5, 0));
- return reg & GENMASK(5, 0);
-}
-
-static inline int s6_to_int(u32 reg)
+static inline s8 s6_to_s8(u32 val)
{
- int s6;
-
- s6 = s6_validate(reg);
- if (s6 & BIT(5))
- s6 -= BIT(6);
+ s8 ret = val & GENMASK(5, 0);
- return s6;
+ if (ret & BIT(5))
+ ret -= BIT(6);
+ return ret;
}
-static inline u32 int_to_s6(int val)
+static inline bool mt76x0_tssi_enabled(struct mt76x02_dev *dev)
{
- if (val < -0x20)
- return 0x20;
- if (val > 0x1f)
- return 0x1f;
-
- return val & 0x3f;
+ return (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TX_ALC_EN);
}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 7cdb3e740522..4a9408801260 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -18,15 +18,31 @@
#include "eeprom.h"
#include "trace.h"
#include "mcu.h"
-#include "usb.h"
-
#include "initvals.h"
-static void
-mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
+static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband)
{
+ struct ieee80211_sta_vht_cap *vht_cap = &sband->vht_cap;
+ u16 mcs_map = 0;
int i;
+ vht_cap->cap &= ~IEEE80211_VHT_CAP_RXLDPC;
+ for (i = 0; i < 8; i++) {
+ if (!i)
+ mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_7 << (i * 2));
+ else
+ mcs_map |=
+ (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
+ }
+ vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+}
+
+static void
+mt76x0_set_wlan_state(struct mt76x02_dev *dev, u32 val, bool enable)
+{
+ u32 mask = MT_CMB_CTRL_XTAL_RDY | MT_CMB_CTRL_PLL_LD;
+
/* Note: we don't turn off WLAN_CLK because that makes the device
* not respond properly on the probe path.
* In case anyone (PSM?) wants to use this function we can
@@ -42,32 +58,18 @@ mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
udelay(20);
- if (!enable)
- return;
-
- for (i = 200; i; i--) {
- val = mt76_rr(dev, MT_CMB_CTRL);
-
- if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
- break;
-
- udelay(20);
- }
-
/* Note: vendor driver tries to disable/enable wlan here and retry
* but the code which does it is so buggy it must have never
* triggered, so don't bother.
*/
- if (!i)
- dev_err(dev->mt76.dev, "Error: PLL and XTAL check failed!\n");
+ if (enable && !mt76_poll(dev, MT_CMB_CTRL, mask, mask, 2000))
+ dev_err(dev->mt76.dev, "PLL and XTAL check failed\n");
}
-void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
+void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset)
{
u32 val;
- mutex_lock(&dev->hw_atomic_mutex);
-
val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
if (reset) {
@@ -89,54 +91,25 @@ void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
udelay(20);
mt76x0_set_wlan_state(dev, val, enable);
-
- mutex_unlock(&dev->hw_atomic_mutex);
}
+EXPORT_SYMBOL_GPL(mt76x0_chip_onoff);
-static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev)
+static void mt76x0_reset_csr_bbp(struct mt76x02_dev *dev)
{
- u32 val;
-
- val = mt76_rr(dev, MT_PBF_SYS_CTRL);
- val &= ~0x2000;
- mt76_wr(dev, MT_PBF_SYS_CTRL, val);
-
- mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR |
- MT_MAC_SYS_CTRL_RESET_BBP);
-
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
msleep(200);
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP);
}
-static void mt76x0_init_usb_dma(struct mt76x0_dev *dev)
-{
- u32 val;
-
- val = mt76_rr(dev, MT_USB_DMA_CFG);
-
- val |= FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
- FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
- MT_USB_DMA_CFG_RX_BULK_EN |
- MT_USB_DMA_CFG_TX_BULK_EN;
- if (dev->in_max_packet == 512)
- val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
- mt76_wr(dev, MT_USB_DMA_CFG, val);
-
- val = mt76_rr(dev, MT_COM_REG0);
- if (val & 1)
- dev_dbg(dev->mt76.dev, "MCU not ready\n");
-
- val = mt76_rr(dev, MT_USB_DMA_CFG);
-
- val |= MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
- mt76_wr(dev, MT_USB_DMA_CFG, val);
- val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
- mt76_wr(dev, MT_USB_DMA_CFG, val);
-}
-
-#define RANDOM_WRITE(dev, tab) \
- mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, tab, ARRAY_SIZE(tab));
+#define RANDOM_WRITE(dev, tab) \
+ mt76_wr_rp(dev, MT_MCU_MEMMAP_WLAN, \
+ tab, ARRAY_SIZE(tab))
-static int mt76x0_init_bbp(struct mt76x0_dev *dev)
+static int mt76x0_init_bbp(struct mt76x02_dev *dev)
{
int ret, i;
@@ -159,30 +132,13 @@ static int mt76x0_init_bbp(struct mt76x0_dev *dev)
return 0;
}
-static void
-mt76_init_beacon_offsets(struct mt76x0_dev *dev)
-{
- u16 base = MT_BEACON_BASE;
- u32 regs[4] = {};
- int i;
-
- for (i = 0; i < 16; i++) {
- u16 addr = dev->beacon_offsets[i];
-
- regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
- }
-
- for (i = 0; i < 4; i++)
- mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
-}
-
-static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
+static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
{
u32 reg;
RANDOM_WRITE(dev, common_mac_reg_table);
- mt76_init_beacon_offsets(dev);
+ mt76x02_set_beacon_offsets(dev);
/* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
RANDOM_WRITE(dev, mt76x0_mac_reg_table);
@@ -192,13 +148,6 @@ static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
reg &= ~0x3;
mt76_wr(dev, MT_MAC_SYS_CTRL, reg);
- if (is_mt7610e(dev)) {
- /* Disable COEX_EN */
- reg = mt76_rr(dev, MT_COEXCFG0);
- reg &= 0xFFFFFFFE;
- mt76_wr(dev, MT_COEXCFG0, reg);
- }
-
/* Set 0x141C[15:12]=0xF */
reg = mt76_rr(dev, MT_EXT_CCA_CFG);
reg |= 0x0000F000;
@@ -216,115 +165,81 @@ static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
reg &= ~0x000003FF;
reg |= 0x00000201;
mt76_wr(dev, MT_WMM_CTRL, reg);
-
- /* TODO: Probably not needed */
- mt76_wr(dev, 0x7028, 0);
- mt76_wr(dev, 0x7010, 0);
- mt76_wr(dev, 0x7024, 0);
- msleep(10);
}
-static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev)
+static int mt76x0_init_wcid_mem(struct mt76x02_dev *dev)
{
u32 *vals;
- int i, ret;
+ int i;
- vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ vals = kmalloc(sizeof(*vals) * MT76_N_WCIDS * 2, GFP_KERNEL);
if (!vals)
return -ENOMEM;
- for (i = 0; i < N_WCIDS; i++) {
+ for (i = 0; i < MT76_N_WCIDS; i++) {
vals[i * 2] = 0xffffffff;
vals[i * 2 + 1] = 0x00ffffff;
}
- ret = mt76x0_burst_write_regs(dev, MT_WCID_ADDR_BASE,
- vals, N_WCIDS * 2);
+ mt76_wr_copy(dev, MT_WCID_ADDR_BASE, vals, MT76_N_WCIDS * 2);
kfree(vals);
-
- return ret;
+ return 0;
}
-static int mt76x0_init_key_mem(struct mt76x0_dev *dev)
+static void mt76x0_init_key_mem(struct mt76x02_dev *dev)
{
u32 vals[4] = {};
- return mt76x0_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
- vals, ARRAY_SIZE(vals));
+ mt76_wr_copy(dev, MT_SKEY_MODE_BASE_0, vals, ARRAY_SIZE(vals));
}
-static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev)
+static int mt76x0_init_wcid_attr_mem(struct mt76x02_dev *dev)
{
u32 *vals;
- int i, ret;
+ int i;
- vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ vals = kmalloc(sizeof(*vals) * MT76_N_WCIDS * 2, GFP_KERNEL);
if (!vals)
return -ENOMEM;
- for (i = 0; i < N_WCIDS * 2; i++)
+ for (i = 0; i < MT76_N_WCIDS * 2; i++)
vals[i] = 1;
- ret = mt76x0_burst_write_regs(dev, MT_WCID_ATTR_BASE,
- vals, N_WCIDS * 2);
+ mt76_wr_copy(dev, MT_WCID_ATTR_BASE, vals, MT76_N_WCIDS * 2);
kfree(vals);
-
- return ret;
+ return 0;
}
-static void mt76x0_reset_counters(struct mt76x0_dev *dev)
+static void mt76x0_reset_counters(struct mt76x02_dev *dev)
{
- mt76_rr(dev, MT_RX_STA_CNT0);
- mt76_rr(dev, MT_RX_STA_CNT1);
- mt76_rr(dev, MT_RX_STA_CNT2);
- mt76_rr(dev, MT_TX_STA_CNT0);
- mt76_rr(dev, MT_TX_STA_CNT1);
- mt76_rr(dev, MT_TX_STA_CNT2);
+ mt76_rr(dev, MT_RX_STAT_0);
+ mt76_rr(dev, MT_RX_STAT_1);
+ mt76_rr(dev, MT_RX_STAT_2);
+ mt76_rr(dev, MT_TX_STA_0);
+ mt76_rr(dev, MT_TX_STA_1);
+ mt76_rr(dev, MT_TX_STA_2);
}
-int mt76x0_mac_start(struct mt76x0_dev *dev)
+int mt76x0_mac_start(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
- if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
- MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+ if (!mt76x02_wait_for_wpdma(&dev->mt76, 200000))
return -ETIMEDOUT;
- dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
- MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
- MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
- MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
- MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
- MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
- MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
mt76_wr(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
-
- if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
- MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
- return -ETIMEDOUT;
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
- return 0;
+ return !mt76x02_wait_for_wpdma(&dev->mt76, 50) ? -ETIMEDOUT : 0;
}
+EXPORT_SYMBOL_GPL(mt76x0_mac_start);
-static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
+void mt76x0_mac_stop(struct mt76x02_dev *dev)
{
- int i, ok;
-
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
- return;
-
- mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
- MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
- MT_BEACON_TIME_CFG_BEACON_TX);
-
- if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
- dev_warn(dev->mt76.dev, "Warning: TX DMA did not stop!\n");
+ int i = 200, ok = 0;
/* Page count on TxQ */
- i = 200;
while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
(mt76_rr(dev, 0x0a30) & 0x000000ff) ||
(mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
@@ -337,9 +252,7 @@ static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
MT_MAC_SYS_CTRL_ENABLE_TX);
/* Page count on RxQ */
- ok = 0;
- i = 200;
- while (i--) {
+ for (i = 0; i < 200; i++) {
if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
!mt76_rr(dev, 0x0a30) &&
!mt76_rr(dev, 0x0a34)) {
@@ -352,91 +265,45 @@ static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n");
-
- if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
- dev_warn(dev->mt76.dev, "Warning: RX DMA did not stop!\n");
-}
-
-void mt76x0_mac_stop(struct mt76x0_dev *dev)
-{
- mt76x0_mac_stop_hw(dev);
- flush_delayed_work(&dev->stat_work);
- cancel_delayed_work_sync(&dev->stat_work);
-}
-
-static void mt76x0_stop_hardware(struct mt76x0_dev *dev)
-{
- mt76x0_chip_onoff(dev, false, false);
}
+EXPORT_SYMBOL_GPL(mt76x0_mac_stop);
-int mt76x0_init_hardware(struct mt76x0_dev *dev)
+int mt76x0_init_hardware(struct mt76x02_dev *dev)
{
- static const u16 beacon_offsets[16] = {
- /* 512 byte per beacon */
- 0xc000, 0xc200, 0xc400, 0xc600,
- 0xc800, 0xca00, 0xcc00, 0xce00,
- 0xd000, 0xd200, 0xd400, 0xd600,
- 0xd800, 0xda00, 0xdc00, 0xde00
- };
int ret;
- dev->beacon_offsets = beacon_offsets;
-
- mt76x0_chip_onoff(dev, true, true);
-
- ret = mt76x0_wait_asic_ready(dev);
- if (ret)
- goto err;
- ret = mt76x0_mcu_init(dev);
- if (ret)
- goto err;
-
- if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
- MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
- ret = -EIO;
- goto err;
- }
+ if (!mt76x02_wait_for_wpdma(&dev->mt76, 1000))
+ return -EIO;
/* Wait for ASIC ready after FW load. */
- ret = mt76x0_wait_asic_ready(dev);
- if (ret)
- goto err;
+ if (!mt76x02_wait_for_mac(&dev->mt76))
+ return -ETIMEDOUT;
mt76x0_reset_csr_bbp(dev);
- mt76x0_init_usb_dma(dev);
-
- mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0x0);
- mt76_wr(dev, MT_TSO_CTRL, 0x0);
-
- ret = mt76x0_mcu_cmd_init(dev);
+ ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false);
if (ret)
- goto err;
- ret = mt76x0_dma_init(dev);
- if (ret)
- goto err_mcu;
+ return ret;
mt76x0_init_mac_registers(dev);
- if (!mt76_poll_msec(dev, MT_MAC_STATUS,
- MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 1000)) {
- ret = -EIO;
- goto err_rx;
- }
+ if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
+ return -EIO;
ret = mt76x0_init_bbp(dev);
if (ret)
- goto err_rx;
+ return ret;
+
+ dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
ret = mt76x0_init_wcid_mem(dev);
if (ret)
- goto err_rx;
- ret = mt76x0_init_key_mem(dev);
- if (ret)
- goto err_rx;
+ return ret;
+
+ mt76x0_init_key_mem(dev);
+
ret = mt76x0_init_wcid_attr_mem(dev);
if (ret)
- goto err_rx;
+ return ret;
mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
@@ -445,276 +312,85 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev)
mt76x0_reset_counters(dev);
- mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
-
- mt76_wr(dev, MT_TXOP_CTRL_CFG,
- FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
- FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
-
ret = mt76x0_eeprom_init(dev);
if (ret)
- goto err_rx;
+ return ret;
mt76x0_phy_init(dev);
- return 0;
-
-err_rx:
- mt76x0_dma_cleanup(dev);
-err_mcu:
- mt76x0_mcu_cmd_deinit(dev);
-err:
- mt76x0_chip_onoff(dev, false, false);
- return ret;
-}
-
-void mt76x0_cleanup(struct mt76x0_dev *dev)
-{
- if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
- return;
- mt76x0_stop_hardware(dev);
- mt76x0_dma_cleanup(dev);
- mt76x0_mcu_cmd_deinit(dev);
+ return 0;
}
+EXPORT_SYMBOL_GPL(mt76x0_init_hardware);
-struct mt76x0_dev *mt76x0_alloc_device(struct device *pdev)
+struct mt76x02_dev *
+mt76x0_alloc_device(struct device *pdev,
+ const struct mt76_driver_ops *drv_ops,
+ const struct ieee80211_ops *ops)
{
- struct ieee80211_hw *hw;
- struct mt76x0_dev *dev;
+ struct mt76x02_dev *dev;
+ struct mt76_dev *mdev;
- hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x0_ops);
- if (!hw)
+ mdev = mt76_alloc_device(sizeof(*dev), ops);
+ if (!mdev)
return NULL;
- dev = hw->priv;
- dev->mt76.dev = pdev;
- dev->mt76.hw = hw;
- mutex_init(&dev->usb_ctrl_mtx);
- mutex_init(&dev->reg_atomic_mutex);
- mutex_init(&dev->hw_atomic_mutex);
- mutex_init(&dev->mutex);
- spin_lock_init(&dev->tx_lock);
- spin_lock_init(&dev->rx_lock);
- spin_lock_init(&dev->mt76.lock);
- spin_lock_init(&dev->mac_lock);
- spin_lock_init(&dev->con_mon_lock);
- atomic_set(&dev->avg_ampdu_len, 1);
- skb_queue_head_init(&dev->tx_skb_done);
+ mdev->dev = pdev;
+ mdev->drv = drv_ops;
- dev->stat_wq = alloc_workqueue("mt76x0", WQ_UNBOUND, 0);
- if (!dev->stat_wq) {
- ieee80211_free_hw(hw);
- return NULL;
- }
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+ mutex_init(&dev->phy_mutex);
+ atomic_set(&dev->avg_ampdu_len, 1);
return dev;
}
+EXPORT_SYMBOL_GPL(mt76x0_alloc_device);
-#define CHAN2G(_idx, _freq) { \
- .band = NL80211_BAND_2GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 30, \
-}
-
-static const struct ieee80211_channel mt76_channels_2ghz[] = {
- CHAN2G(1, 2412),
- CHAN2G(2, 2417),
- CHAN2G(3, 2422),
- CHAN2G(4, 2427),
- CHAN2G(5, 2432),
- CHAN2G(6, 2437),
- CHAN2G(7, 2442),
- CHAN2G(8, 2447),
- CHAN2G(9, 2452),
- CHAN2G(10, 2457),
- CHAN2G(11, 2462),
- CHAN2G(12, 2467),
- CHAN2G(13, 2472),
- CHAN2G(14, 2484),
-};
-
-#define CHAN5G(_idx, _freq) { \
- .band = NL80211_BAND_5GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 30, \
-}
-
-static const struct ieee80211_channel mt76_channels_5ghz[] = {
- CHAN5G(36, 5180),
- CHAN5G(40, 5200),
- CHAN5G(44, 5220),
- CHAN5G(46, 5230),
- CHAN5G(48, 5240),
- CHAN5G(52, 5260),
- CHAN5G(56, 5280),
- CHAN5G(60, 5300),
- CHAN5G(64, 5320),
-
- CHAN5G(100, 5500),
- CHAN5G(104, 5520),
- CHAN5G(108, 5540),
- CHAN5G(112, 5560),
- CHAN5G(116, 5580),
- CHAN5G(120, 5600),
- CHAN5G(124, 5620),
- CHAN5G(128, 5640),
- CHAN5G(132, 5660),
- CHAN5G(136, 5680),
- CHAN5G(140, 5700),
-};
-
-#define CCK_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
- .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
- .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
-}
-
-#define OFDM_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
- .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
-}
-
-static struct ieee80211_rate mt76_rates[] = {
- CCK_RATE(0, 10),
- CCK_RATE(1, 20),
- CCK_RATE(2, 55),
- CCK_RATE(3, 110),
- OFDM_RATE(0, 60),
- OFDM_RATE(1, 90),
- OFDM_RATE(2, 120),
- OFDM_RATE(3, 180),
- OFDM_RATE(4, 240),
- OFDM_RATE(5, 360),
- OFDM_RATE(6, 480),
- OFDM_RATE(7, 540),
-};
-
-static int
-mt76_init_sband(struct mt76x0_dev *dev, struct ieee80211_supported_band *sband,
- const struct ieee80211_channel *chan, int n_chan,
- struct ieee80211_rate *rates, int n_rates)
-{
- struct ieee80211_sta_ht_cap *ht_cap;
- void *chanlist;
- int size;
-
- size = n_chan * sizeof(*chan);
- chanlist = devm_kmemdup(dev->mt76.dev, chan, size, GFP_KERNEL);
- if (!chanlist)
- return -ENOMEM;
-
- sband->channels = chanlist;
- sband->n_channels = n_chan;
- sband->bitrates = rates;
- sband->n_bitrates = n_rates;
-
- ht_cap = &sband->ht_cap;
- ht_cap->ht_supported = true;
- ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_GRN_FLD |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 |
- (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
-
- ht_cap->mcs.rx_mask[0] = 0xff;
- ht_cap->mcs.rx_mask[4] = 0x1;
- ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
-
- return 0;
-}
-
-static int
-mt76_init_sband_2g(struct mt76x0_dev *dev)
-{
- dev->mt76.hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->mt76.sband_2g.sband;
-
- WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
- ARRAY_SIZE(mt76_channels_2ghz));
-
-
- return mt76_init_sband(dev, &dev->mt76.sband_2g.sband,
- mt76_channels_2ghz, ARRAY_SIZE(mt76_channels_2ghz),
- mt76_rates, ARRAY_SIZE(mt76_rates));
-}
-
-static int
-mt76_init_sband_5g(struct mt76x0_dev *dev)
-{
- dev->mt76.hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->mt76.sband_5g.sband;
-
- return mt76_init_sband(dev, &dev->mt76.sband_5g.sband,
- mt76_channels_5ghz, ARRAY_SIZE(mt76_channels_5ghz),
- mt76_rates + 4, ARRAY_SIZE(mt76_rates) - 4);
-}
-
-
-int mt76x0_register_device(struct mt76x0_dev *dev)
+int mt76x0_register_device(struct mt76x02_dev *dev)
{
- struct ieee80211_hw *hw = dev->mt76.hw;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct ieee80211_hw *hw = mdev->hw;
struct wiphy *wiphy = hw->wiphy;
int ret;
/* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
* entry no. 1 like it does in the vendor driver.
*/
- dev->wcid_mask[0] |= 1;
+ mdev->wcid_mask[0] |= 1;
/* init fake wcid for monitor interfaces */
- dev->mon_wcid = devm_kmalloc(dev->mt76.dev, sizeof(*dev->mon_wcid),
- GFP_KERNEL);
- if (!dev->mon_wcid)
- return -ENOMEM;
- dev->mon_wcid->idx = 0xff;
- dev->mon_wcid->hw_key_idx = -1;
+ mdev->global_wcid.idx = 0xff;
+ mdev->global_wcid.hw_key_idx = -1;
- SET_IEEE80211_DEV(hw, dev->mt76.dev);
+ /* init antenna configuration */
+ mdev->antenna_mask = 1;
hw->queues = 4;
- ieee80211_hw_set(hw, SIGNAL_DBM);
- ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
- ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
- ieee80211_hw_set(hw, AMPDU_AGGREGATION);
- ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
+ hw->extra_tx_headroom = 2;
+ if (mt76_is_usb(dev))
+ hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
+ MT_DMA_HDR_LEN;
- hw->sta_data_size = sizeof(struct mt76_sta);
- hw->vif_data_size = sizeof(struct mt76_vif);
-
- SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+ hw->sta_data_size = sizeof(struct mt76x02_sta);
+ hw->vif_data_size = sizeof(struct mt76x02_vif);
- wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
- if (dev->ee->has_2ghz) {
- ret = mt76_init_sband_2g(dev);
- if (ret)
- return ret;
- }
-
- if (dev->ee->has_5ghz) {
- ret = mt76_init_sband_5g(dev);
- if (ret)
- return ret;
- }
-
- dev->mt76.chandef.chan = &dev->mt76.sband_2g.sband.channels[0];
-
INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work);
- INIT_DELAYED_WORK(&dev->stat_work, mt76x0_tx_stat);
- ret = ieee80211_register_hw(hw);
+ ret = mt76_register_device(mdev, true, mt76x02_rates,
+ ARRAY_SIZE(mt76x02_rates));
if (ret)
return ret;
+ /* overwrite unsupported features */
+ if (mdev->cap.has_5ghz)
+ mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband);
+
mt76x0_init_debugfs(dev);
return 0;
}
+EXPORT_SYMBOL_GPL(mt76x0_register_device);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
index 24afcfd94b4e..236dce6860b4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -2,6 +2,7 @@
* (c) Copyright 2002-2010, Ralink Technology, Inc.
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
* Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -19,264 +20,215 @@
#include "phy.h"
static const struct mt76_reg_pair common_mac_reg_table[] = {
-#if 1
- {MT_BCN_OFFSET(0), 0xf8f0e8e0}, /* 0x3800(e0), 0x3A00(e8), 0x3C00(f0), 0x3E00(f8), 512B for each beacon */
- {MT_BCN_OFFSET(1), 0x6f77d0c8}, /* 0x3200(c8), 0x3400(d0), 0x1DC0(77), 0x1BC0(6f), 512B for each beacon */
-#endif
-
- {MT_LEGACY_BASIC_RATE, 0x0000013f}, /* Basic rate set bitmap*/
- {MT_HT_BASIC_RATE, 0x00008003}, /* Basic HT rate set , 20M, MCS=3, MM. Format is the same as in TXWI.*/
- {MT_MAC_SYS_CTRL, 0x00}, /* 0x1004, , default Disable RX*/
- {MT_RX_FILTR_CFG, 0x17f97}, /*0x1400 , RX filter control, */
- {MT_BKOFF_SLOT_CFG, 0x209}, /* default set short slot time, CC_DELAY_TIME should be 2 */
- /*{TX_SW_CFG0, 0x40a06}, Gary,2006-08-23 */
- {MT_TX_SW_CFG0, 0x0}, /* Gary,2008-05-21 for CWC test */
- {MT_TX_SW_CFG1, 0x80606}, /* Gary,2006-08-23 */
- {MT_TX_LINK_CFG, 0x1020}, /* Gary,2006-08-23 */
- /*{TX_TIMEOUT_CFG, 0x00182090}, CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT*/
- {MT_TX_TIMEOUT_CFG, 0x000a2090}, /* CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT , Modify for 2860E ,2007-08-01*/
- {MT_MAX_LEN_CFG, 0xa0fff | 0x00001000}, /* 0x3018, MAX frame length. Max PSDU = 16kbytes.*/
- {MT_LED_CFG, 0x7f031e46}, /* Gary, 2006-08-23*/
-
- {MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f /*0xbfbf3f1f*/},
- {MT_PBF_RX_MAX_PCNT, 0x9f},
-
- /*{TX_RTY_CFG, 0x6bb80408}, Jan, 2006/11/16*/
-/* WMM_ACM_SUPPORT */
-/* {TX_RTY_CFG, 0x6bb80101}, sample*/
- {MT_TX_RETRY_CFG, 0x47d01f0f}, /* Jan, 2006/11/16, Set TxWI->ACK =0 in Probe Rsp Modify for 2860E ,2007-08-03*/
-
- {MT_AUTO_RSP_CFG, 0x00000013}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
- {MT_CCK_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
- {MT_OFDM_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
- {MT_PBF_CFG, 0xf40006}, /* Only enable Queue 2*/
- {MT_MM40_PROT_CFG, 0x3F44084}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
- {MT_WPDMA_GLO_CFG, 0x00000030},
- {MT_GF20_PROT_CFG, 0x01744004}, /* set 19:18 --> Short NAV for MIMO PS*/
- {MT_GF40_PROT_CFG, 0x03F44084},
- {MT_MM20_PROT_CFG, 0x01744004},
- {MT_TXOP_CTRL_CFG, 0x0000583f, /*0x0000243f*/ /*0x000024bf*/}, /*Extension channel backoff.*/
- {MT_TX_RTS_CFG, 0x00092b20},
-
- {MT_EXP_ACK_TIME, 0x002400ca}, /* default value */
- {MT_TXOP_HLDR_ET, 0x00000002},
-
- /* Jerry comments 2008/01/16: we use SIFS = 10us in CCK defaultly, but it seems that 10us
- is too small for INTEL 2200bg card, so in MBSS mode, the delta time between beacon0
- and beacon1 is SIFS (10us), so if INTEL 2200bg card connects to BSS0, the ping
- will always lost. So we change the SIFS of CCK from 10us to 16us. */
- {MT_XIFS_TIME_CFG, 0x33a41010},
- {MT_PWR_PIN_CFG, 0x00000000},
+ { MT_BCN_OFFSET(0), 0xf8f0e8e0 },
+ { MT_BCN_OFFSET(1), 0x6f77d0c8 },
+ { MT_LEGACY_BASIC_RATE, 0x0000013f },
+ { MT_HT_BASIC_RATE, 0x00008003 },
+ { MT_MAC_SYS_CTRL, 0x00000000 },
+ { MT_RX_FILTR_CFG, 0x00017f97 },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TX_SW_CFG0, 0x00000000 },
+ { MT_TX_SW_CFG1, 0x00080606 },
+ { MT_TX_LINK_CFG, 0x00001020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2090 },
+ { MT_MAX_LEN_CFG, 0xa0fff | 0x00001000 },
+ { MT_LED_CFG, 0x7f031e46 },
+ { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f },
+ { MT_PBF_RX_MAX_PCNT, 0x0000fe9f },
+ { MT_TX_RETRY_CFG, 0x47d01f0f },
+ { MT_AUTO_RSP_CFG, 0x00000013 },
+ { MT_CCK_PROT_CFG, 0x05740003 },
+ { MT_OFDM_PROT_CFG, 0x05740003 },
+ { MT_PBF_CFG, 0x00f40006 },
+ { MT_WPDMA_GLO_CFG, 0x00000030 },
+ { MT_GF20_PROT_CFG, 0x01744004 },
+ { MT_GF40_PROT_CFG, 0x03f44084 },
+ { MT_MM20_PROT_CFG, 0x01744004 },
+ { MT_MM40_PROT_CFG, 0x03f54084 },
+ { MT_TXOP_CTRL_CFG, 0x0000583f },
+ { MT_TX_RTS_CFG, 0x00092b20 },
+ { MT_EXP_ACK_TIME, 0x002400ca },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { MT_XIFS_TIME_CFG, 0x33a41010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
};
static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
- /* {MT_IOCFG_6, 0xA0040080 }, */
- {MT_PBF_SYS_CTRL, 0x00080c00 },
- {MT_PBF_CFG, 0x77723c1f },
- {MT_FCE_PSE_CTRL, 0x00000001 },
-
- {MT_AMPDU_MAX_LEN_20M1S, 0xBAA99887 },
-
- /* Delay bb_tx_pe for proper tx_mcs_pwr update */
- {MT_TX_SW_CFG0, 0x00000601 },
-
- /* Set rf_tx_pe deassert time to 1us by Chee's comment @MT7650_CR_setting_1018.xlsx */
- {MT_TX_SW_CFG1, 0x00040000 },
- {MT_TX_SW_CFG2, 0x00000000 },
-
- /* disable Tx info report */
- {0xa44, 0x0000000 },
-
- {MT_HEADER_TRANS_CTRL_REG, 0x0},
- {MT_TSO_CTRL, 0x0},
-
- /* BB_PA_MODE_CFG0(0x1214) Keep default value @20120903 */
- {MT_BB_PA_MODE_CFG1, 0x00500055},
-
- /* RF_PA_MODE_CFG0(0x121C) Keep default value @20120903 */
- {MT_RF_PA_MODE_CFG1, 0x00500055},
-
- {MT_TX_ALC_CFG_0, 0x2F2F000C},
- {MT_TX0_BB_GAIN_ATTEN, 0x00000000}, /* set BBP atten gain = 0 */
-
- {MT_TX_PWR_CFG_0, 0x3A3A3A3A},
- {MT_TX_PWR_CFG_1, 0x3A3A3A3A},
- {MT_TX_PWR_CFG_2, 0x3A3A3A3A},
- {MT_TX_PWR_CFG_3, 0x3A3A3A3A},
- {MT_TX_PWR_CFG_4, 0x3A3A3A3A},
- {MT_TX_PWR_CFG_7, 0x3A3A3A3A},
- {MT_TX_PWR_CFG_8, 0x3A},
- {MT_TX_PWR_CFG_9, 0x3A},
- /* Enable Tx length > 4095 byte */
- {0x150C, 0x00000002},
-
- /* Disable bt_abort_tx_en(0x1238[21] = 0) which is not used at MT7650 */
- {0x1238, 0x001700C8},
- /* PMU_OCLEVEL<5:1> from default <5'b10010> to <5'b11011> for normal driver */
- /* {MT_LDO_CTRL_0, 0x00A647B6}, */
-
- /* Default LDO_DIG supply 1.26V, change to 1.2V */
- {MT_LDO_CTRL_1, 0x6B006464 },
-/*
- {MT_HT_BASIC_RATE, 0x00004003 },
- {MT_HT_CTRL_CFG, 0x000001FF },
-*/
+ { MT_IOCFG_6, 0xa0040080 },
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x77723c1f },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_AMPDU_MAX_LEN_20M1S, 0xAAA99887 },
+ { MT_TX_SW_CFG0, 0x00000601 },
+ { MT_TX_SW_CFG1, 0x00040000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { 0xa44, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_TSO_CTRL, 0x00000000 },
+ { MT_BB_PA_MODE_CFG1, 0x00500055 },
+ { MT_RF_PA_MODE_CFG1, 0x00500055 },
+ { MT_TX_ALC_CFG_0, 0x2F2F000C },
+ { MT_TX0_BB_GAIN_ATTEN, 0x00000000 },
+ { MT_TX_PWR_CFG_0, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_1, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_2, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_3, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_4, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_7, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_8, 0x0000003A },
+ { MT_TX_PWR_CFG_9, 0x0000003A },
+ { 0x150C, 0x00000002 },
+ { 0x1238, 0x001700C8 },
+ { MT_LDO_CTRL_0, 0x00A647B6 },
+ { MT_LDO_CTRL_1, 0x6B006464 },
+ { MT_HT_BASIC_RATE, 0x00004003 },
+ { MT_HT_CTRL_CFG, 0x000001FF },
+ { MT_TXOP_HLDR_ET, 0x00000000 },
+ { MT_PN_PAD_MODE, 0x00000003 },
};
-
static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
- {MT_BBP(CORE, 1), 0x00000002},
- {MT_BBP(CORE, 4), 0x00000000},
- {MT_BBP(CORE, 24), 0x00000000},
- {MT_BBP(CORE, 32), 0x4003000a},
- {MT_BBP(CORE, 42), 0x00000000},
- {MT_BBP(CORE, 44), 0x00000000},
-
- {MT_BBP(IBI, 11), 0x00000080},
-
- /*
- 0x2300[5] Default Antenna:
- 0 for WIFI main antenna
- 1 for WIFI aux antenna
-
- */
- {MT_BBP(AGC, 0), 0x00021400},
- {MT_BBP(AGC, 1), 0x00000003},
- {MT_BBP(AGC, 2), 0x003A6464},
- {MT_BBP(AGC, 15), 0x88A28CB8},
- {MT_BBP(AGC, 22), 0x00001E21},
- {MT_BBP(AGC, 23), 0x0000272C},
- {MT_BBP(AGC, 24), 0x00002F3A},
- {MT_BBP(AGC, 25), 0x8000005A},
- {MT_BBP(AGC, 26), 0x007C2005},
- {MT_BBP(AGC, 34), 0x000A0C0C},
- {MT_BBP(AGC, 37), 0x2121262C},
- {MT_BBP(AGC, 41), 0x38383E45},
- {MT_BBP(AGC, 57), 0x00001010},
- {MT_BBP(AGC, 59), 0xBAA20E96},
- {MT_BBP(AGC, 63), 0x00000001},
-
- {MT_BBP(TXC, 0), 0x00280403},
- {MT_BBP(TXC, 1), 0x00000000},
-
- {MT_BBP(RXC, 1), 0x00000012},
- {MT_BBP(RXC, 2), 0x00000011},
- {MT_BBP(RXC, 3), 0x00000005},
- {MT_BBP(RXC, 4), 0x00000000},
- {MT_BBP(RXC, 5), 0xF977C4EC},
- {MT_BBP(RXC, 7), 0x00000090},
-
- {MT_BBP(TXO, 8), 0x00000000},
-
- {MT_BBP(TXBE, 0), 0x00000000},
- {MT_BBP(TXBE, 4), 0x00000004},
- {MT_BBP(TXBE, 6), 0x00000000},
- {MT_BBP(TXBE, 8), 0x00000014},
- {MT_BBP(TXBE, 9), 0x20000000},
- {MT_BBP(TXBE, 10), 0x00000000},
- {MT_BBP(TXBE, 12), 0x00000000},
- {MT_BBP(TXBE, 13), 0x00000000},
- {MT_BBP(TXBE, 14), 0x00000000},
- {MT_BBP(TXBE, 15), 0x00000000},
- {MT_BBP(TXBE, 16), 0x00000000},
- {MT_BBP(TXBE, 17), 0x00000000},
-
- {MT_BBP(RXFE, 1), 0x00008800}, /* Add for E3 */
- {MT_BBP(RXFE, 3), 0x00000000},
- {MT_BBP(RXFE, 4), 0x00000000},
-
- {MT_BBP(RXO, 13), 0x00000092},
- {MT_BBP(RXO, 14), 0x00060612},
- {MT_BBP(RXO, 15), 0xC8321B18},
- {MT_BBP(RXO, 16), 0x0000001E},
- {MT_BBP(RXO, 17), 0x00000000},
- {MT_BBP(RXO, 18), 0xCC00A993},
- {MT_BBP(RXO, 19), 0xB9CB9CB9},
- {MT_BBP(RXO, 20), 0x26c00057},
- {MT_BBP(RXO, 21), 0x00000001},
- {MT_BBP(RXO, 24), 0x00000006},
+ { MT_BBP(CORE, 1), 0x00000002 },
+ { MT_BBP(CORE, 4), 0x00000000 },
+ { MT_BBP(CORE, 24), 0x00000000 },
+ { MT_BBP(CORE, 32), 0x4003000a },
+ { MT_BBP(CORE, 42), 0x00000000 },
+ { MT_BBP(CORE, 44), 0x00000000 },
+ { MT_BBP(IBI, 11), 0x0FDE8081 },
+ { MT_BBP(AGC, 0), 0x00021400 },
+ { MT_BBP(AGC, 1), 0x00000003 },
+ { MT_BBP(AGC, 2), 0x003A6464 },
+ { MT_BBP(AGC, 15), 0x88A28CB8 },
+ { MT_BBP(AGC, 22), 0x00001E21 },
+ { MT_BBP(AGC, 23), 0x0000272C },
+ { MT_BBP(AGC, 24), 0x00002F3A },
+ { MT_BBP(AGC, 25), 0x8000005A },
+ { MT_BBP(AGC, 26), 0x007C2005 },
+ { MT_BBP(AGC, 33), 0x00003238 },
+ { MT_BBP(AGC, 34), 0x000A0C0C },
+ { MT_BBP(AGC, 37), 0x2121262C },
+ { MT_BBP(AGC, 41), 0x38383E45 },
+ { MT_BBP(AGC, 57), 0x00001010 },
+ { MT_BBP(AGC, 59), 0xBAA20E96 },
+ { MT_BBP(AGC, 63), 0x00000001 },
+ { MT_BBP(TXC, 0), 0x00280403 },
+ { MT_BBP(TXC, 1), 0x00000000 },
+ { MT_BBP(RXC, 1), 0x00000012 },
+ { MT_BBP(RXC, 2), 0x00000011 },
+ { MT_BBP(RXC, 3), 0x00000005 },
+ { MT_BBP(RXC, 4), 0x00000000 },
+ { MT_BBP(RXC, 5), 0xF977C4EC },
+ { MT_BBP(RXC, 7), 0x00000090 },
+ { MT_BBP(TXO, 8), 0x00000000 },
+ { MT_BBP(TXBE, 0), 0x00000000 },
+ { MT_BBP(TXBE, 4), 0x00000004 },
+ { MT_BBP(TXBE, 6), 0x00000000 },
+ { MT_BBP(TXBE, 8), 0x00000014 },
+ { MT_BBP(TXBE, 9), 0x20000000 },
+ { MT_BBP(TXBE, 10), 0x00000000 },
+ { MT_BBP(TXBE, 12), 0x00000000 },
+ { MT_BBP(TXBE, 13), 0x00000000 },
+ { MT_BBP(TXBE, 14), 0x00000000 },
+ { MT_BBP(TXBE, 15), 0x00000000 },
+ { MT_BBP(TXBE, 16), 0x00000000 },
+ { MT_BBP(TXBE, 17), 0x00000000 },
+ { MT_BBP(RXFE, 1), 0x00008800 },
+ { MT_BBP(RXFE, 3), 0x00000000 },
+ { MT_BBP(RXFE, 4), 0x00000000 },
+ { MT_BBP(RXO, 13), 0x00000192 },
+ { MT_BBP(RXO, 14), 0x00060612 },
+ { MT_BBP(RXO, 15), 0xC8321B18 },
+ { MT_BBP(RXO, 16), 0x0000001E },
+ { MT_BBP(RXO, 17), 0x00000000 },
+ { MT_BBP(RXO, 18), 0xCC00A993 },
+ { MT_BBP(RXO, 19), 0xB9CB9CB9 },
+ { MT_BBP(RXO, 20), 0x26c00057 },
+ { MT_BBP(RXO, 21), 0x00000001 },
+ { MT_BBP(RXO, 24), 0x00000006 },
+ { MT_BBP(RXO, 28), 0x0000003F },
};
static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 8), 0x0E344EF0}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 8), 0x122C54F2}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 4), 0x1FEDA049 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 4), 0x1FECA054 } },
+
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 6), 0x00000045 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 6), 0x0000000A } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 14), 0x310F2E39}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 14), 0x310F2A3F}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 8), 0x16344EF0 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 8), 0x122C54F2 } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 32), 0x00003230}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 32), 0x0000181C}},
+ { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 12), 0x05052879 } },
+ { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 12), 0x050528F9 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 12), 0x050528F9 } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 33), 0x00003240}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 33), 0x00003218}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 13), 0x35050004 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 13), 0x2C3A0406 } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 35), 0x11112016}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 35), 0x11112016}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 14), 0x310F2E3C } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 14), 0x310F2A3F } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXO, 28), 0x0000008A}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXO, 28), 0x0000008A}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 26), 0x007C2005 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 26), 0x007C2005 } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 4), 0x1FEDA049}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 4), 0x1FECA054}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 27), 0x000000E1 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 27), 0x000000EC } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 6), 0x00000045}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 6), 0x0000000A}},
+ { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 28), 0x00060806 } },
+ { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 28), 0x00050806 } },
+ { RF_A_BAND | RF_BW_40, { MT_BBP(AGC, 28), 0x00060801 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_80, { MT_BBP(AGC, 28), 0x00060806 } },
- {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 12), 0x05052879}},
- {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 12), 0x050528F9}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 12), 0x050528F9}},
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(RXO, 28), 0x0000008A } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 13), 0x35050004}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 13), 0x2C3A0406}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 31), 0x00000E23 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 31), 0x00000E13 } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 27), 0x000000E1}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 27), 0x000000EC}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 32), 0x00003218 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 32), 0x0000181C } },
- {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 28), 0x00060806}},
- {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00050806}},
- {RF_A_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00060801}},
- {RF_A_BAND | RF_BW_20 | RF_BW_80, {MT_BBP(AGC, 28), 0x00060806}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 33), 0x00003240 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 33), 0x00003218 } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 31), 0x00000F23}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 31), 0x00000F13}},
+ { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 35), 0x11111616 } },
+ { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 35), 0x11111516 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 35), 0x11111111 } },
- {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 39), 0x2A2A3036}},
- {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A2C36}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A3036}},
- {RF_A_BAND | RF_BW_80, {MT_BBP(AGC, 39), 0x2A2A2A36}},
+ { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 39), 0x2A2A3036 } },
+ { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 39), 0x2A2A2C36 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 39), 0x2A2A2A2A } },
- {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 43), 0x27273438}},
- {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 43), 0x27272D38}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 43), 0x27272B30}},
+ { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 43), 0x27273438 } },
+ { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 43), 0x27272D38 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 43), 0x27271A1A } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 51), 0x17171C1C}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 51), 0xFFFFFFFF}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 51), 0x17171C1C } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 51), 0xFFFFFFFF } },
- {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 53), 0x26262A2F}},
- {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 53), 0x2626322F}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 53), 0xFFFFFFFF}},
+ { RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 53), 0x26262A2F } },
+ { RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 53), 0x2626322F } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 53), 0xFFFFFFFF } },
- {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 55), 0x40404E58}},
- {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 55), 0x40405858}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 55), 0xFFFFFFFF}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 55), 0x40404040 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 55), 0xFFFFFFFF } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 58), 0x00001010}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 58), 0x00000000}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 58), 0x00001010 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 58), 0x00000000 } },
- {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXFE, 0), 0x3D5000E0}},
- {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXFE, 0), 0x895000E0}},
+ { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(RXFE, 0), 0x3D5000E0 } },
+ { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(RXFE, 0), 0x895000E0 } },
};
static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
- {MT_BBP(CAL, 47), 0x000010F0 },
- {MT_BBP(CAL, 48), 0x00008080 },
- {MT_BBP(CAL, 49), 0x00000F07 },
- {MT_BBP(CAL, 50), 0x00000040 },
- {MT_BBP(CAL, 51), 0x00000404 },
- {MT_BBP(CAL, 52), 0x00080803 },
- {MT_BBP(CAL, 53), 0x00000704 },
- {MT_BBP(CAL, 54), 0x00002828 },
- {MT_BBP(CAL, 55), 0x00005050 },
+ { MT_BBP(CAL, 47), 0x000010F0 },
+ { MT_BBP(CAL, 48), 0x00008080 },
+ { MT_BBP(CAL, 49), 0x00000F07 },
+ { MT_BBP(CAL, 50), 0x00000040 },
+ { MT_BBP(CAL, 51), 0x00000404 },
+ { MT_BBP(CAL, 52), 0x00080803 },
+ { MT_BBP(CAL, 53), 0x00000704 },
+ { MT_BBP(CAL, 54), 0x00002828 },
+ { MT_BBP(CAL, 55), 0x00005050 },
};
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
index 91a84be36d3b..7a422c590211 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
@@ -13,241 +13,13 @@
* GNU General Public License for more details.
*/
-#include "mt76x0.h"
-#include "trace.h"
#include <linux/etherdevice.h>
-static void
-mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
- enum nl80211_band band)
-{
- u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
- txrate->idx = 0;
- txrate->flags = 0;
- txrate->count = 1;
-
- switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
- case MT_PHY_TYPE_OFDM:
- if (band == NL80211_BAND_2GHZ)
- idx += 4;
-
- txrate->idx = idx;
- return;
- case MT_PHY_TYPE_CCK:
- if (idx >= 8)
- idx -= 8;
-
- txrate->idx = idx;
- return;
- case MT_PHY_TYPE_HT_GF:
- txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* fall through */
- case MT_PHY_TYPE_HT:
- txrate->flags |= IEEE80211_TX_RC_MCS;
- txrate->idx = idx;
- break;
- case MT_PHY_TYPE_VHT:
- txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
- txrate->idx = idx;
- break;
- default:
- WARN_ON(1);
- return;
- }
-
- switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
- case MT_PHY_BW_20:
- break;
- case MT_PHY_BW_40:
- txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- break;
- case MT_PHY_BW_80:
- txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
- break;
- default:
- WARN_ON(1);
- return;
- }
-
- if (rate & MT_RXWI_RATE_SGI)
- txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
-}
-
-static void
-mt76_mac_fill_tx_status(struct mt76x0_dev *dev, struct ieee80211_tx_info *info,
- struct mt76_tx_status *st, int n_frames)
-{
- struct ieee80211_tx_rate *rate = info->status.rates;
- int cur_idx, last_rate;
- int i;
-
- if (!n_frames)
- return;
-
- last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
- mt76_mac_process_tx_rate(&rate[last_rate], st->rate,
- dev->mt76.chandef.chan->band);
- if (last_rate < IEEE80211_TX_MAX_RATES - 1)
- rate[last_rate + 1].idx = -1;
-
- cur_idx = rate[last_rate].idx + last_rate;
- for (i = 0; i <= last_rate; i++) {
- rate[i].flags = rate[last_rate].flags;
- rate[i].idx = max_t(int, 0, cur_idx - i);
- rate[i].count = 1;
- }
-
- rate[last_rate - 1].count = st->retry + 1 - last_rate;
-
- info->status.ampdu_len = n_frames;
- info->status.ampdu_ack_len = st->success ? n_frames : 0;
-
- if (st->pktid & MT_TXWI_PKTID_PROBE)
- info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
-
- if (st->aggr)
- info->flags |= IEEE80211_TX_CTL_AMPDU |
- IEEE80211_TX_STAT_AMPDU;
-
- if (!st->ack_req)
- info->flags |= IEEE80211_TX_CTL_NO_ACK;
- else if (st->success)
- info->flags |= IEEE80211_TX_STAT_ACK;
-}
-
-u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
- const struct ieee80211_tx_rate *rate, u8 *nss_val)
-{
- u16 rateval;
- u8 phy, rate_idx;
- u8 nss = 1;
- u8 bw = 0;
-
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- rate_idx = rate->idx;
- nss = 1 + (rate->idx >> 4);
- phy = MT_PHY_TYPE_VHT;
- if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- bw = 2;
- else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- bw = 1;
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- rate_idx = rate->idx;
- nss = 1 + (rate->idx >> 3);
- phy = MT_PHY_TYPE_HT;
- if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
- phy = MT_PHY_TYPE_HT_GF;
- if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- bw = 1;
- } else {
- const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
- u16 val;
-
- r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
- if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- val = r->hw_value_short;
- else
- val = r->hw_value;
-
- phy = val >> 8;
- rate_idx = val & 0xff;
- bw = 0;
- }
-
- rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
- rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
- rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- rateval |= MT_RXWI_RATE_SGI;
-
- *nss_val = nss;
- return cpu_to_le16(rateval);
-}
-
-void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
- const struct ieee80211_tx_rate *rate)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->mt76.lock, flags);
- wcid->tx_rate = mt76x0_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
- wcid->tx_rate_set = true;
- spin_unlock_irqrestore(&dev->mt76.lock, flags);
-}
-
-struct mt76_tx_status mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev)
-{
- struct mt76_tx_status stat = {};
- u32 stat2, stat1;
-
- stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
- stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
-
- stat.valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
- stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
- stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
- stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
- stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
- stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
-
- stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
- stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
-
- return stat;
-}
-
-void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update)
-{
- struct ieee80211_tx_info info = {};
- struct ieee80211_sta *sta = NULL;
- struct mt76_wcid *wcid = NULL;
- struct mt76_sta *msta = NULL;
-
- rcu_read_lock();
- if (stat->wcid < ARRAY_SIZE(dev->wcid))
- wcid = rcu_dereference(dev->wcid[stat->wcid]);
-
- if (wcid) {
- void *priv;
- priv = msta = container_of(wcid, struct mt76_sta, wcid);
- sta = container_of(priv, struct ieee80211_sta, drv_priv);
- }
-
- if (msta && stat->aggr) {
- u32 stat_val, stat_cache;
-
- stat_val = stat->rate;
- stat_val |= ((u32) stat->retry) << 16;
- stat_cache = msta->status.rate;
- stat_cache |= ((u32) msta->status.retry) << 16;
-
- if (*update == 0 && stat_val == stat_cache &&
- stat->wcid == msta->status.wcid && msta->n_frames < 32) {
- msta->n_frames++;
- goto out;
- }
-
- mt76_mac_fill_tx_status(dev, &info, &msta->status,
- msta->n_frames);
- msta->status = *stat;
- msta->n_frames = 1;
- *update = 0;
- } else {
- mt76_mac_fill_tx_status(dev, &info, stat, 1);
- *update = 1;
- }
-
- spin_lock_bh(&dev->mac_lock);
- ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
- spin_unlock_bh(&dev->mac_lock);
-out:
- rcu_read_unlock();
-}
+#include "mt76x0.h"
+#include "trace.h"
-void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
- int ht_mode)
+void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot,
+ int ht_mode)
{
int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
@@ -305,7 +77,7 @@ void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
}
-void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
+void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb)
{
if (short_preamb)
mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
@@ -313,7 +85,7 @@ void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
}
-void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
+void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval)
{
u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG);
@@ -333,7 +105,7 @@ void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
MT_BEACON_TIME_CFG_TBTT_EN;
}
-static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
+static void mt76x0_check_mac_err(struct mt76x02_dev *dev)
{
u32 val = mt76_rr(dev, 0x10f4);
@@ -348,15 +120,15 @@ static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
}
void mt76x0_mac_work(struct work_struct *work)
{
- struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
+ struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
mac_work.work);
struct {
u32 addr_base;
u32 span;
u64 *stat_base;
} spans[] = {
- { MT_RX_STA_CNT0, 3, dev->stats.rx_stat },
- { MT_TX_STA_CNT0, 3, dev->stats.tx_stat },
+ { MT_RX_STAT_0, 3, dev->stats.rx_stat },
+ { MT_TX_STA_0, 3, dev->stats.tx_stat },
{ MT_TX_AGG_STAT, 1, dev->stats.aggr_stat },
{ MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del },
{ MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] },
@@ -399,24 +171,7 @@ void mt76x0_mac_work(struct work_struct *work)
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ);
}
-void
-mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
-{
- u8 zmac[ETH_ALEN] = {};
- u32 attr;
-
- attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
- FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
-
- mt76_wr(dev, MT_WCID_ATTR(idx), attr);
-
- if (mac)
- memcpy(zmac, mac, sizeof(zmac));
-
- mt76x0_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
-}
-
-void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
+void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev)
{
struct ieee80211_sta *sta;
struct mt76_wcid *wcid;
@@ -425,12 +180,12 @@ void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
int i;
rcu_read_lock();
- for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
- wcid = rcu_dereference(dev->wcid[i]);
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
+ wcid = rcu_dereference(dev->mt76.wcid[i]);
if (!wcid)
continue;
- msta = container_of(wcid, struct mt76_sta, wcid);
+ msta = container_of(wcid, struct mt76x02_sta, wcid);
sta = container_of(msta, struct ieee80211_sta, drv_priv);
min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
@@ -440,219 +195,3 @@ void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
}
-
-static void
-mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
-{
- u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
- switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
- case MT_PHY_TYPE_OFDM:
- if (idx >= 8)
- idx = 0;
-
- if (status->band == NL80211_BAND_2GHZ)
- idx += 4;
-
- status->rate_idx = idx;
- return;
- case MT_PHY_TYPE_CCK:
- if (idx >= 8) {
- idx -= 8;
- status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
- }
-
- if (idx >= 4)
- idx = 0;
-
- status->rate_idx = idx;
- return;
- case MT_PHY_TYPE_HT_GF:
- status->enc_flags |= RX_ENC_FLAG_HT_GF;
- /* fall through */
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- status->rate_idx = idx;
- break;
- case MT_PHY_TYPE_VHT:
- status->encoding = RX_ENC_VHT;
- status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
- status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
- break;
- default:
- WARN_ON(1);
- return;
- }
-
- if (rate & MT_RXWI_RATE_LDPC)
- status->enc_flags |= RX_ENC_FLAG_LDPC;
-
- if (rate & MT_RXWI_RATE_SGI)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
-
- if (rate & MT_RXWI_RATE_STBC)
- status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
-
- switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
- case MT_PHY_BW_20:
- break;
- case MT_PHY_BW_40:
- status->bw = RATE_INFO_BW_40;
- break;
- case MT_PHY_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- default:
- WARN_ON(1);
- break;
- }
-}
-
-static void
-mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
- u16 rate, int rssi)
-{
- dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
- dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256;
-}
-
-static int
-mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
-
- return ieee80211_is_beacon(hdr->frame_control) &&
- ether_addr_equal(hdr->addr2, dev->ap_bssid);
-}
-
-u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
- u8 *data, void *rxi)
-{
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- struct mt76x0_rxwi *rxwi = rxi;
- u32 len, ctl = le32_to_cpu(rxwi->ctl);
- u16 rate = le16_to_cpu(rxwi->rate);
- int rssi;
-
- len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
- if (WARN_ON(len < 10))
- return 0;
-
- if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
- status->flag |= RX_FLAG_DECRYPTED;
- status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
- }
-
- status->chains = BIT(0);
- rssi = mt76x0_phy_get_rssi(dev, rxwi);
- status->chain_signal[0] = status->signal = rssi;
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
-
- mt76_mac_process_rate(status, rate);
-
- spin_lock_bh(&dev->con_mon_lock);
- if (mt76x0_rx_is_our_beacon(dev, data)) {
- mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi);
- } else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) {
- if (dev->avg_rssi == 0)
- dev->avg_rssi = rssi;
- else
- dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16;
-
- }
- spin_unlock_bh(&dev->con_mon_lock);
-
- return len;
-}
-
-static enum mt76_cipher_type
-mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
-{
- memset(key_data, 0, 32);
- if (!key)
- return MT_CIPHER_NONE;
-
- if (key->keylen > 32)
- return MT_CIPHER_NONE;
-
- memcpy(key_data, key->key, key->keylen);
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MT_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MT_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MT_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_CCMP:
- return MT_CIPHER_AES_CCMP;
- default:
- return MT_CIPHER_NONE;
- }
-}
-
-int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
- struct ieee80211_key_conf *key)
-{
- enum mt76_cipher_type cipher;
- u8 key_data[32];
- u8 iv_data[8];
- u32 val;
-
- cipher = mt76_mac_get_key_info(key, key_data);
- if (cipher == MT_CIPHER_NONE && key)
- return -EINVAL;
-
- trace_mt76x0_set_key(&dev->mt76, idx);
-
- mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
-
- memset(iv_data, 0, sizeof(iv_data));
- if (key) {
- iv_data[3] = key->keyidx << 6;
- if (cipher >= MT_CIPHER_TKIP) {
- /* Note: start with 1 to comply with spec,
- * (see comment on common/cmm_wpa.c:4291).
- */
- iv_data[0] |= 1;
- iv_data[3] |= 0x20;
- }
- }
- mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
-
- val = mt76_rr(dev, MT_WCID_ATTR(idx));
- val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
- val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
- FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
- val &= ~MT_WCID_ATTR_PAIRWISE;
- val |= MT_WCID_ATTR_PAIRWISE *
- !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
- mt76_wr(dev, MT_WCID_ATTR(idx), val);
-
- return 0;
-}
-
-int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
- struct ieee80211_key_conf *key)
-{
- enum mt76_cipher_type cipher;
- u8 key_data[32];
- u32 val;
-
- cipher = mt76_mac_get_key_info(key, key_data);
- if (cipher == MT_CIPHER_NONE && key)
- return -EINVAL;
-
- trace_mt76x0_set_shared_key(&dev->mt76, vif_idx, key_idx);
-
- mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
- key_data, sizeof(key_data));
-
- val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
- val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
- val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
- mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
-
- return 0;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
deleted file mode 100644
index bea067b71c13..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76_MAC_H
-#define __MT76_MAC_H
-
-/* Note: values in original "RSSI" and "SNR" fields are not actually what they
- * are called for MT76X0U, names used by this driver are educated guesses
- * (see vendor mac/ral_omac.c).
- */
-struct mt76x0_rxwi {
- __le32 rxinfo;
-
- __le32 ctl;
-
- __le16 tid_sn;
- __le16 rate;
-
- s8 rssi[4];
-
- __le32 bbp_rxinfo[4];
-} __packed __aligned(4);
-
-#define MT_RXINFO_BA BIT(0)
-#define MT_RXINFO_DATA BIT(1)
-#define MT_RXINFO_NULL BIT(2)
-#define MT_RXINFO_FRAG BIT(3)
-#define MT_RXINFO_U2M BIT(4)
-#define MT_RXINFO_MULTICAST BIT(5)
-#define MT_RXINFO_BROADCAST BIT(6)
-#define MT_RXINFO_MYBSS BIT(7)
-#define MT_RXINFO_CRCERR BIT(8)
-#define MT_RXINFO_ICVERR BIT(9)
-#define MT_RXINFO_MICERR BIT(10)
-#define MT_RXINFO_AMSDU BIT(11)
-#define MT_RXINFO_HTC BIT(12)
-#define MT_RXINFO_RSSI BIT(13)
-#define MT_RXINFO_L2PAD BIT(14)
-#define MT_RXINFO_AMPDU BIT(15)
-#define MT_RXINFO_DECRYPT BIT(16)
-#define MT_RXINFO_BSSIDX3 BIT(17)
-#define MT_RXINFO_WAPI_KEY BIT(18)
-#define MT_RXINFO_PN_LEN GENMASK(21, 19)
-#define MT_RXINFO_SW_PKT_80211 BIT(22)
-#define MT_RXINFO_TCP_SUM_BYPASS BIT(28)
-#define MT_RXINFO_IP_SUM_BYPASS BIT(29)
-#define MT_RXINFO_TCP_SUM_ERR BIT(30)
-#define MT_RXINFO_IP_SUM_ERR BIT(31)
-
-#define MT_RXWI_CTL_WCID GENMASK(7, 0)
-#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
-#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
-#define MT_RXWI_CTL_UDF GENMASK(15, 13)
-#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16)
-#define MT_RXWI_CTL_TID GENMASK(31, 28)
-
-#define MT_RXWI_FRAG GENMASK(3, 0)
-#define MT_RXWI_SN GENMASK(15, 4)
-
-#define MT_RXWI_RATE_INDEX GENMASK(5, 0)
-#define MT_RXWI_RATE_LDPC BIT(6)
-#define MT_RXWI_RATE_BW GENMASK(8, 7)
-#define MT_RXWI_RATE_SGI BIT(9)
-#define MT_RXWI_RATE_STBC BIT(10)
-#define MT_RXWI_RATE_LDPC_ETXBF BIT(11)
-#define MT_RXWI_RATE_SND BIT(12)
-#define MT_RXWI_RATE_PHY GENMASK(15, 13)
-
-#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0)
-#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4)
-
-#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0)
-#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6)
-#define MT_RXWI_ANT_AUX_LNA BIT(7)
-
-#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0)
-
-enum mt76_phy_bandwidth {
- MT_PHY_BW_20,
- MT_PHY_BW_40,
- MT_PHY_BW_80,
-};
-
-struct mt76_txwi {
- __le16 flags;
- __le16 rate_ctl;
- u8 ack_ctl;
- u8 wcid;
- __le16 len_ctl;
- __le32 iv;
- __le32 eiv;
- u8 aid;
- u8 txstream;
- u8 ctl2;
- u8 pktid;
-} __packed __aligned(4);
-
-#define MT_TXWI_FLAGS_FRAG BIT(0)
-#define MT_TXWI_FLAGS_MMPS BIT(1)
-#define MT_TXWI_FLAGS_CFACK BIT(2)
-#define MT_TXWI_FLAGS_TS BIT(3)
-#define MT_TXWI_FLAGS_AMPDU BIT(4)
-#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
-#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
-#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10)
-#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13)
-#define MT_TXWI_FLAGS_TX_RPT BIT(14)
-#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
-
-#define MT_TXWI_RATE_MCS GENMASK(6, 0)
-#define MT_TXWI_RATE_BW BIT(7)
-#define MT_TXWI_RATE_SGI BIT(8)
-#define MT_TXWI_RATE_STBC GENMASK(10, 9)
-#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14)
-
-#define MT_TXWI_ACK_CTL_REQ BIT(0)
-#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
-#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
-
-#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0)
-
-#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0)
-#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4)
-#define MT_TXWI_CTL_PIFS_REV BIT(6)
-
-#define MT_TXWI_PKTID_PROBE BIT(7)
-
-u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
- u8 *data, void *rxi);
-int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
- struct ieee80211_key_conf *key);
-void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
- const struct ieee80211_tx_rate *rate);
-
-int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
- struct ieee80211_key_conf *key);
-u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
- const struct ieee80211_tx_rate *rate, u8 *nss_val);
-struct mt76_tx_status
-mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev);
-void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update);
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index cf6ffb1ba4a2..9273d2d2764a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -13,145 +13,72 @@
* GNU General Public License for more details.
*/
-#include "mt76x0.h"
-#include "mac.h"
#include <linux/etherdevice.h>
+#include "mt76x0.h"
-static int mt76x0_start(struct ieee80211_hw *hw)
+static int
+mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
{
- struct mt76x0_dev *dev = hw->priv;
int ret;
- mutex_lock(&dev->mutex);
+ cancel_delayed_work_sync(&dev->cal_work);
- ret = mt76x0_mac_start(dev);
- if (ret)
- goto out;
+ mt76_set_channel(&dev->mt76);
+ ret = mt76x0_phy_set_channel(dev, chandef);
+ mt76_txq_schedule_all(&dev->mt76);
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
- MT_CALIBRATE_INTERVAL);
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
- MT_CALIBRATE_INTERVAL);
-out:
- mutex_unlock(&dev->mutex);
return ret;
}
-static void mt76x0_stop(struct ieee80211_hw *hw)
-{
- struct mt76x0_dev *dev = hw->priv;
-
- mutex_lock(&dev->mutex);
-
- cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mac_work);
- mt76x0_mac_stop(dev);
-
- mutex_unlock(&dev->mutex);
-}
-
-
-static int mt76x0_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
{
- struct mt76x0_dev *dev = hw->priv;
- struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
- unsigned int idx;
-
- idx = ffs(~dev->vif_mask);
- if (!idx || idx > 8)
- return -ENOSPC;
-
- idx--;
- dev->vif_mask |= BIT(idx);
-
- mvif->idx = idx;
- mvif->group_wcid.idx = GROUP_WCID(idx);
- mvif->group_wcid.hw_key_idx = -1;
-
- return 0;
-}
+ struct mt76x02_dev *dev = hw->priv;
+ int ret = 0;
-static void mt76x0_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct mt76x0_dev *dev = hw->priv;
- struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
- unsigned int wcid = mvif->group_wcid.idx;
+ mutex_lock(&dev->mt76.mutex);
- dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
-}
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt76x0_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
-static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct mt76x0_dev *dev = hw->priv;
- int ret = 0;
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ dev->mt76.txpower_conf = hw->conf.power_level * 2;
- mutex_lock(&dev->mutex);
+ if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ mt76x0_phy_set_txpower(dev);
+ }
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
- dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
else
- dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
-
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
- }
+ dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
}
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(mt76x0_config);
static void
-mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast)
+mt76x0_addr_wr(struct mt76x02_dev *dev, const u32 offset, const u8 *addr)
{
- struct mt76x0_dev *dev = hw->priv;
- u32 flags = 0;
-
-#define MT76_FILTER(_flag, _hw) do { \
- flags |= *total_flags & FIF_##_flag; \
- dev->rxfilter &= ~(_hw); \
- dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
- } while (0)
-
- mutex_lock(&dev->mutex);
-
- dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
-
- MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
- MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
- MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
- MT_RX_FILTR_CFG_CTS |
- MT_RX_FILTR_CFG_CFEND |
- MT_RX_FILTR_CFG_CFACK |
- MT_RX_FILTR_CFG_BA |
- MT_RX_FILTR_CFG_CTRL_RSV);
- MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
-
- *total_flags = flags;
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
- mutex_unlock(&dev->mutex);
+ mt76_wr(dev, offset, get_unaligned_le32(addr));
+ mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
}
-static void
-mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
+void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
{
- struct mt76x0_dev *dev = hw->priv;
-
- mutex_lock(&dev->mutex);
+ struct mt76x02_dev *dev = hw->priv;
- if (changed & BSS_CHANGED_ASSOC)
- mt76x0_phy_con_cal_onoff(dev, info);
+ mutex_lock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_BSSID) {
mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
@@ -166,8 +93,8 @@ mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & BSS_CHANGED_BASIC_RATES) {
mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
- mt76_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
- mt76_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+ mt76_wr(dev, MT_VHT_HT_FBK_CFG0, 0x65432100);
+ mt76_wr(dev, MT_VHT_HT_FBK_CFG1, 0xedcba980);
mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
}
@@ -192,212 +119,34 @@ mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & BSS_CHANGED_ASSOC)
mt76x0_phy_recalibrate_after_assoc(dev);
- mutex_unlock(&dev->mutex);
-}
-
-static int
-mt76x0_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x0_dev *dev = hw->priv;
- struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
- struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
- int ret = 0;
- int idx = 0;
-
- mutex_lock(&dev->mutex);
-
- idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
- if (idx < 0) {
- ret = -ENOSPC;
- goto out;
- }
-
- msta->wcid.idx = idx;
- msta->wcid.hw_key_idx = -1;
- mt76x0_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
- mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
- rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
- mt76x0_mac_set_ampdu_factor(dev);
-
-out:
- mutex_unlock(&dev->mutex);
-
- return ret;
-}
-
-static int
-mt76x0_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x0_dev *dev = hw->priv;
- struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
- int idx = msta->wcid.idx;
-
- mutex_lock(&dev->mutex);
- rcu_assign_pointer(dev->wcid[idx], NULL);
- mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
- dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
- mt76x0_mac_wcid_setup(dev, idx, 0, NULL);
- mt76x0_mac_set_ampdu_factor(dev);
- mutex_unlock(&dev->mutex);
-
- return 0;
+ mutex_unlock(&dev->mt76.mutex);
}
+EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed);
-static void
-mt76x0_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
-{
-}
-
-static void
-mt76x0_sw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *mac_addr)
+void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac_addr)
{
- struct mt76x0_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
- cancel_delayed_work_sync(&dev->cal_work);
- mt76x0_agc_save(dev);
set_bit(MT76_SCANNING, &dev->mt76.state);
}
+EXPORT_SYMBOL_GPL(mt76x0_sw_scan);
-static void
-mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
- struct mt76x0_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
- mt76x0_agc_restore(dev);
clear_bit(MT76_SCANNING, &dev->mt76.state);
-
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
- MT_CALIBRATE_INTERVAL);
-}
-
-static int
-mt76x0_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct mt76x0_dev *dev = hw->priv;
- struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
- struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
- struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
- int idx = key->keyidx;
- int ret;
-
- if (cmd == SET_KEY) {
- key->hw_key_idx = wcid->idx;
- wcid->hw_key_idx = idx;
- } else {
- if (idx == wcid->hw_key_idx)
- wcid->hw_key_idx = -1;
-
- key = NULL;
- }
-
- if (!msta) {
- if (key || wcid->hw_key_idx == idx) {
- ret = mt76x0_mac_wcid_set_key(dev, wcid->idx, key);
- if (ret)
- return ret;
- }
-
- return mt76x0_mac_shared_key_setup(dev, mvif->idx, idx, key);
- }
-
- return mt76x0_mac_wcid_set_key(dev, msta->wcid.idx, key);
}
+EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete);
-static int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- struct mt76x0_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
return 0;
}
-
-static int
-mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
-{
- struct mt76x0_dev *dev = hw->priv;
- struct ieee80211_sta *sta = params->sta;
- enum ieee80211_ampdu_mlme_action action = params->action;
- u16 tid = params->tid;
- u16 *ssn = &params->ssn;
- struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
-
- WARN_ON(msta->wcid.idx > N_WCIDS);
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
- break;
- case IEEE80211_AMPDU_RX_STOP:
- mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
- break;
- case IEEE80211_AMPDU_TX_STOP_FLUSH:
- case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- break;
- case IEEE80211_AMPDU_TX_START:
- msta->agg_ssn[tid] = *ssn << 4;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- case IEEE80211_AMPDU_TX_STOP_CONT:
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- }
-
- return 0;
-}
-
-static void
-mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x0_dev *dev = hw->priv;
- struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
- struct ieee80211_sta_rates *rates;
- struct ieee80211_tx_rate rate = {};
-
- rcu_read_lock();
- rates = rcu_dereference(sta->rates);
-
- if (!rates)
- goto out;
-
- rate.idx = rates->rate[0].idx;
- rate.flags = rates->rate[0].flags;
- mt76x0_mac_wcid_set_rate(dev, &msta->wcid, &rate);
-
-out:
- rcu_read_unlock();
-}
-
-const struct ieee80211_ops mt76x0_ops = {
- .tx = mt76x0_tx,
- .start = mt76x0_start,
- .stop = mt76x0_stop,
- .add_interface = mt76x0_add_interface,
- .remove_interface = mt76x0_remove_interface,
- .config = mt76x0_config,
- .configure_filter = mt76_configure_filter,
- .bss_info_changed = mt76x0_bss_info_changed,
- .sta_add = mt76x0_sta_add,
- .sta_remove = mt76x0_sta_remove,
- .sta_notify = mt76x0_sta_notify,
- .set_key = mt76x0_set_key,
- .conf_tx = mt76x0_conf_tx,
- .sw_scan_start = mt76x0_sw_scan,
- .sw_scan_complete = mt76x0_sw_scan_complete,
- .ampdu_action = mt76_ampdu_action,
- .sta_rate_tbl_update = mt76_sta_rate_tbl_update,
- .set_rts_threshold = mt76x0_set_rts_threshold,
-};
+EXPORT_SYMBOL_GPL(mt76x0_set_rts_threshold);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
deleted file mode 100644
index 8affacbab90a..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
+++ /dev/null
@@ -1,656 +0,0 @@
-/*
- * (c) Copyright 2002-2010, Ralink Technology, Inc.
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/firmware.h>
-#include <linux/delay.h>
-#include <linux/usb.h>
-#include <linux/skbuff.h>
-
-#include "mt76x0.h"
-#include "dma.h"
-#include "mcu.h"
-#include "usb.h"
-#include "trace.h"
-
-#define MCU_FW_URB_MAX_PAYLOAD 0x38f8
-#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
-#define MCU_RESP_URB_SIZE 1024
-
-static inline int firmware_running(struct mt76x0_dev *dev)
-{
- return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
-}
-
-static inline void skb_put_le32(struct sk_buff *skb, u32 val)
-{
- put_unaligned_le32(val, skb_put(skb, 4));
-}
-
-static inline void mt76x0_dma_skb_wrap_cmd(struct sk_buff *skb,
- u8 seq, enum mcu_cmd cmd)
-{
- WARN_ON(mt76x0_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
- FIELD_PREP(MT_TXD_CMD_SEQ, seq) |
- FIELD_PREP(MT_TXD_CMD_TYPE, cmd)));
-}
-
-static inline void trace_mt76x0_mcu_msg_send_cs(struct mt76_dev *dev,
- struct sk_buff *skb, bool need_resp)
-{
- u32 i, csum = 0;
-
- for (i = 0; i < skb->len / 4; i++)
- csum ^= get_unaligned_le32(skb->data + i * 4);
-
- trace_mt76x0_mcu_msg_send(dev, skb, csum, need_resp);
-}
-
-static struct sk_buff *
-mt76x0_mcu_msg_alloc(struct mt76x0_dev *dev, const void *data, int len)
-{
- struct sk_buff *skb;
-
- WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
-
- skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
- if (skb) {
- skb_reserve(skb, MT_DMA_HDR_LEN);
- memcpy(skb_put(skb, len), data, len);
- }
- return skb;
-}
-
-static void mt76x0_read_resp_regs(struct mt76x0_dev *dev, int len)
-{
- int i;
- int n = dev->mcu.reg_pairs_len;
- u8 *buf = dev->mcu.resp.buf;
-
- buf += 4;
- len -= 8;
-
- if (dev->mcu.burst_read) {
- u32 reg = dev->mcu.reg_pairs[0].reg - dev->mcu.reg_base;
-
- WARN_ON_ONCE(len/4 != n);
- for (i = 0; i < n; i++) {
- u32 val = get_unaligned_le32(buf + 4*i);
-
- dev->mcu.reg_pairs[i].reg = reg++;
- dev->mcu.reg_pairs[i].value = val;
- }
- } else {
- WARN_ON_ONCE(len/8 != n);
- for (i = 0; i < n; i++) {
- u32 reg = get_unaligned_le32(buf + 8*i) - dev->mcu.reg_base;
- u32 val = get_unaligned_le32(buf + 8*i + 4);
-
- WARN_ON_ONCE(dev->mcu.reg_pairs[i].reg != reg);
- dev->mcu.reg_pairs[i].value = val;
- }
- }
-}
-
-static int mt76x0_mcu_wait_resp(struct mt76x0_dev *dev, u8 seq)
-{
- struct urb *urb = dev->mcu.resp.urb;
- u32 rxfce;
- int urb_status, ret, try = 5;
-
- while (try--) {
- if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
- msecs_to_jiffies(300))) {
- dev_warn(dev->mt76.dev, "Warning: %s retrying\n", __func__);
- continue;
- }
-
- /* Make copies of important data before reusing the urb */
- rxfce = get_unaligned_le32(dev->mcu.resp.buf);
- urb_status = urb->status * mt76x0_urb_has_error(urb);
-
- if (urb_status == 0 && dev->mcu.reg_pairs)
- mt76x0_read_resp_regs(dev, urb->actual_length);
-
- ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
- &dev->mcu.resp, GFP_KERNEL,
- mt76x0_complete_urb,
- &dev->mcu.resp_cmpl);
- if (ret)
- return ret;
-
- if (urb_status)
- dev_err(dev->mt76.dev, "Error: MCU resp urb failed:%d\n",
- urb_status);
-
- if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
- FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
- return 0;
-
- dev_err(dev->mt76.dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
- FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
- seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
- }
-
- dev_err(dev->mt76.dev, "Error: %s timed out\n", __func__);
- return -ETIMEDOUT;
-}
-
-static int
-__mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
- enum mcu_cmd cmd, bool wait_resp)
-{
- struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
- unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
- dev->out_ep[MT_EP_OUT_INBAND_CMD]);
- int sent, ret;
- u8 seq = 0;
-
- if (wait_resp)
- while (!seq)
- seq = ++dev->mcu.msg_seq & 0xf;
-
- mt76x0_dma_skb_wrap_cmd(skb, seq, cmd);
-
- if (dev->mcu.resp_cmpl.done)
- dev_err(dev->mt76.dev, "Error: MCU response pre-completed!\n");
-
- trace_mt76x0_mcu_msg_send_cs(&dev->mt76, skb, wait_resp);
- trace_mt76x0_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len);
-
- ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
- if (ret) {
- dev_err(dev->mt76.dev, "Error: send MCU cmd failed:%d\n", ret);
- goto out;
- }
- if (sent != skb->len)
- dev_err(dev->mt76.dev, "Error: %s sent != skb->len\n", __func__);
-
- if (wait_resp)
- ret = mt76x0_mcu_wait_resp(dev, seq);
-
-out:
- return ret;
-}
-
-static int
-mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
- enum mcu_cmd cmd, bool wait_resp)
-{
- int ret;
-
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
- return 0;
-
- mutex_lock(&dev->mcu.mutex);
- ret = __mt76x0_mcu_msg_send(dev, skb, cmd, wait_resp);
- mutex_unlock(&dev->mcu.mutex);
-
- consume_skb(skb);
-
- return ret;
-}
-
-int mt76x0_mcu_function_select(struct mt76x0_dev *dev,
- enum mcu_function func, u32 val)
-{
- struct sk_buff *skb;
- struct {
- __le32 id;
- __le32 value;
- } __packed __aligned(4) msg = {
- .id = cpu_to_le32(func),
- .value = cpu_to_le32(val),
- };
-
- skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
- if (!skb)
- return -ENOMEM;
- return mt76x0_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
-}
-
-int
-mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val)
-{
- struct sk_buff *skb;
- struct {
- __le32 id;
- __le32 value;
- } __packed __aligned(4) msg = {
- .id = cpu_to_le32(cal),
- .value = cpu_to_le32(val),
- };
-
- skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
- if (!skb)
- return -ENOMEM;
- return mt76x0_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
-}
-
-int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
- const struct mt76_reg_pair *data, int n)
-{
- const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
- struct sk_buff *skb;
- int cnt, i, ret;
-
- if (!n)
- return 0;
-
- cnt = min(max_vals_per_cmd, n);
-
- skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- skb_reserve(skb, MT_DMA_HDR_LEN);
-
- for (i = 0; i < cnt; i++) {
- skb_put_le32(skb, base + data[i].reg);
- skb_put_le32(skb, data[i].value);
- }
-
- ret = mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
- if (ret)
- return ret;
-
- return mt76x0_write_reg_pairs(dev, base, data + cnt, n - cnt);
-}
-
-int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
- struct mt76_reg_pair *data, int n)
-{
- const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
- struct sk_buff *skb;
- int cnt, i, ret;
-
- if (!n)
- return 0;
-
- cnt = min(max_vals_per_cmd, n);
- if (cnt != n)
- return -EINVAL;
-
- skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- skb_reserve(skb, MT_DMA_HDR_LEN);
-
- for (i = 0; i < cnt; i++) {
- skb_put_le32(skb, base + data[i].reg);
- skb_put_le32(skb, data[i].value);
- }
-
- mutex_lock(&dev->mcu.mutex);
-
- dev->mcu.reg_pairs = data;
- dev->mcu.reg_pairs_len = n;
- dev->mcu.reg_base = base;
- dev->mcu.burst_read = false;
-
- ret = __mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_READ, true);
-
- dev->mcu.reg_pairs = NULL;
-
- mutex_unlock(&dev->mcu.mutex);
-
- consume_skb(skb);
-
- return ret;
-
-}
-
-int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
- const u32 *data, int n)
-{
- const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
- struct sk_buff *skb;
- int cnt, i, ret;
-
- if (!n)
- return 0;
-
- cnt = min(max_regs_per_cmd, n);
-
- skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- skb_reserve(skb, MT_DMA_HDR_LEN);
-
- skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
- for (i = 0; i < cnt; i++)
- skb_put_le32(skb, data[i]);
-
- ret = mt76x0_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
- if (ret)
- return ret;
-
- return mt76x0_burst_write_regs(dev, offset + cnt * 4,
- data + cnt, n - cnt);
-}
-
-#if 0
-static int mt76x0_burst_read_regs(struct mt76x0_dev *dev, u32 base,
- struct mt76_reg_pair *data, int n)
-{
- const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
- struct sk_buff *skb;
- int cnt, ret;
-
- if (!n)
- return 0;
-
- cnt = min(max_vals_per_cmd, n);
- if (cnt != n)
- return -EINVAL;
-
- skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- skb_reserve(skb, MT_DMA_HDR_LEN);
-
- skb_put_le32(skb, base + data[0].reg);
- skb_put_le32(skb, n);
-
- mutex_lock(&dev->mcu.mutex);
-
- dev->mcu.reg_pairs = data;
- dev->mcu.reg_pairs_len = n;
- dev->mcu.reg_base = base;
- dev->mcu.burst_read = true;
-
- ret = __mt76x0_mcu_msg_send(dev, skb, CMD_BURST_READ, true);
-
- dev->mcu.reg_pairs = NULL;
-
- mutex_unlock(&dev->mcu.mutex);
-
- consume_skb(skb);
-
- return ret;
-}
-#endif
-
-struct mt76_fw_header {
- __le32 ilm_len;
- __le32 dlm_len;
- __le16 build_ver;
- __le16 fw_ver;
- u8 pad[4];
- char build_time[16];
-};
-
-struct mt76_fw {
- struct mt76_fw_header hdr;
- u8 ivb[MT_MCU_IVB_SIZE];
- u8 ilm[];
-};
-
-static int __mt76x0_dma_fw(struct mt76x0_dev *dev,
- const struct mt76x0_dma_buf *dma_buf,
- const void *data, u32 len, u32 dst_addr)
-{
- DECLARE_COMPLETION_ONSTACK(cmpl);
- struct mt76x0_dma_buf buf = *dma_buf; /* we need to fake length */
- __le32 reg;
- u32 val;
- int ret;
-
- reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_COMMAND) |
- FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
- FIELD_PREP(MT_TXD_INFO_LEN, len));
- memcpy(buf.buf, &reg, sizeof(reg));
- memcpy(buf.buf + sizeof(reg), data, len);
- memset(buf.buf + sizeof(reg) + len, 0, 8);
-
- ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
- MT_FCE_DMA_ADDR, dst_addr);
- if (ret)
- return ret;
- len = roundup(len, 4);
- ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
- MT_FCE_DMA_LEN, len << 16);
- if (ret)
- return ret;
-
- buf.len = MT_DMA_HDR_LEN + len + 4;
- ret = mt76x0_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
- &buf, GFP_KERNEL,
- mt76x0_complete_urb, &cmpl);
- if (ret)
- return ret;
-
- if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
- dev_err(dev->mt76.dev, "Error: firmware upload timed out\n");
- usb_kill_urb(buf.urb);
- return -ETIMEDOUT;
- }
- if (mt76x0_urb_has_error(buf.urb)) {
- dev_err(dev->mt76.dev, "Error: firmware upload urb failed:%d\n",
- buf.urb->status);
- return buf.urb->status;
- }
-
- val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
- val++;
- mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
-
- msleep(5);
-
- return 0;
-}
-
-static int
-mt76x0_dma_fw(struct mt76x0_dev *dev, struct mt76x0_dma_buf *dma_buf,
- const void *data, int len, u32 dst_addr)
-{
- int n, ret;
-
- if (len == 0)
- return 0;
-
- n = min(MCU_FW_URB_MAX_PAYLOAD, len);
- ret = __mt76x0_dma_fw(dev, dma_buf, data, n, dst_addr);
- if (ret)
- return ret;
-
-#if 0
- if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
- return -ETIMEDOUT;
-#endif
-
- return mt76x0_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
-}
-
-static int
-mt76x0_upload_firmware(struct mt76x0_dev *dev, const struct mt76_fw *fw)
-{
- struct mt76x0_dma_buf dma_buf;
- void *ivb;
- u32 ilm_len, dlm_len;
- int i, ret;
-
- ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
- if (!ivb)
- return -ENOMEM;
- if (mt76x0_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
- ret = -ENOMEM;
- goto error;
- }
-
- ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
- dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %zu\n",
- ilm_len, sizeof(fw->ivb));
- ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
- if (ret)
- goto error;
-
- dlm_len = le32_to_cpu(fw->hdr.dlm_len);
- dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
- ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
- dlm_len, MT_MCU_DLM_OFFSET);
- if (ret)
- goto error;
-
- ret = mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
- 0x12, 0, ivb, sizeof(fw->ivb));
- if (ret < 0)
- goto error;
- ret = 0;
-
- for (i = 100; i && !firmware_running(dev); i--)
- msleep(10);
- if (!i) {
- ret = -ETIMEDOUT;
- goto error;
- }
-
- dev_dbg(dev->mt76.dev, "Firmware running!\n");
-error:
- kfree(ivb);
- mt76x0_usb_free_buf(dev, &dma_buf);
-
- return ret;
-}
-
-static int mt76x0_load_firmware(struct mt76x0_dev *dev)
-{
- const struct firmware *fw;
- const struct mt76_fw_header *hdr;
- int len, ret;
- u32 val;
-
- mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
- MT_USB_DMA_CFG_TX_BULK_EN));
-
- if (firmware_running(dev))
- return 0;
-
- ret = request_firmware(&fw, MT7610_FIRMWARE, dev->mt76.dev);
- if (ret)
- return ret;
-
- if (!fw || !fw->data || fw->size < sizeof(*hdr))
- goto err_inv_fw;
-
- hdr = (const struct mt76_fw_header *) fw->data;
-
- if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
- goto err_inv_fw;
-
- len = sizeof(*hdr);
- len += le32_to_cpu(hdr->ilm_len);
- len += le32_to_cpu(hdr->dlm_len);
-
- if (fw->size != len)
- goto err_inv_fw;
-
- val = le16_to_cpu(hdr->fw_ver);
- dev_dbg(dev->mt76.dev,
- "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
- (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
- le16_to_cpu(hdr->build_ver), hdr->build_time);
-
- len = le32_to_cpu(hdr->ilm_len);
-
- mt76_wr(dev, 0x1004, 0x2c);
-
- mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
- MT_USB_DMA_CFG_TX_BULK_EN) |
- FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
- mt76x0_vendor_reset(dev);
- msleep(5);
-/*
- mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
- MT_PBF_CFG_TX1Q_EN |
- MT_PBF_CFG_TX2Q_EN |
- MT_PBF_CFG_TX3Q_EN));
-*/
-
- mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
-
- /* FCE tx_fs_base_ptr */
- mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
- /* FCE tx_fs_max_cnt */
- mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
- /* FCE pdma enable */
- mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
- /* FCE skip_fs_en */
- mt76_wr(dev, MT_FCE_SKIP_FS, 3);
-
- val = mt76_rr(dev, MT_USB_DMA_CFG);
- val |= MT_USB_DMA_CFG_TX_WL_DROP;
- mt76_wr(dev, MT_USB_DMA_CFG, val);
- val &= ~MT_USB_DMA_CFG_TX_WL_DROP;
- mt76_wr(dev, MT_USB_DMA_CFG, val);
-
- ret = mt76x0_upload_firmware(dev, (const struct mt76_fw *)fw->data);
- release_firmware(fw);
-
- mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
-
- return ret;
-
-err_inv_fw:
- dev_err(dev->mt76.dev, "Invalid firmware image\n");
- release_firmware(fw);
- return -ENOENT;
-}
-
-int mt76x0_mcu_init(struct mt76x0_dev *dev)
-{
- int ret;
-
- mutex_init(&dev->mcu.mutex);
-
- ret = mt76x0_load_firmware(dev);
- if (ret)
- return ret;
-
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
-
- return 0;
-}
-
-int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev)
-{
- int ret;
-
- ret = mt76x0_mcu_function_select(dev, Q_SELECT, 1);
- if (ret)
- return ret;
-
- init_completion(&dev->mcu.resp_cmpl);
- if (mt76x0_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
- mt76x0_usb_free_buf(dev, &dev->mcu.resp);
- return -ENOMEM;
- }
-
- ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
- &dev->mcu.resp, GFP_KERNEL,
- mt76x0_complete_urb, &dev->mcu.resp_cmpl);
- if (ret) {
- mt76x0_usb_free_buf(dev, &dev->mcu.resp);
- return ret;
- }
-
- return 0;
-}
-
-void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev)
-{
- usb_kill_urb(dev->mcu.resp.urb);
- mt76x0_usb_free_buf(dev, &dev->mcu.resp);
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
index 8c2f77f4c3f5..3b34e1d2769f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
@@ -15,65 +15,18 @@
#ifndef __MT76X0U_MCU_H
#define __MT76X0U_MCU_H
-struct mt76x0_dev;
+#include "../mt76x02_mcu.h"
-/* Register definitions */
-#define MT_MCU_RESET_CTL 0x070C
-#define MT_MCU_INT_LEVEL 0x0718
-#define MT_MCU_COM_REG0 0x0730
-#define MT_MCU_COM_REG1 0x0734
-#define MT_MCU_COM_REG2 0x0738
-#define MT_MCU_COM_REG3 0x073C
+struct mt76x02_dev;
#define MT_MCU_IVB_SIZE 0x40
#define MT_MCU_DLM_OFFSET 0x80000
-#define MT_MCU_MEMMAP_WLAN 0x00410000
/* We use same space for BBP as for MAC regs
* #define MT_MCU_MEMMAP_BBP 0x40000000
*/
#define MT_MCU_MEMMAP_RF 0x80000000
-#define INBAND_PACKET_MAX_LEN 192
-
-enum mcu_cmd {
- CMD_FUN_SET_OP = 1,
- CMD_LOAD_CR = 2,
- CMD_INIT_GAIN_OP = 3,
- CMD_DYNC_VGA_OP = 6,
- CMD_TDLS_CH_SW = 7,
- CMD_BURST_WRITE = 8,
- CMD_READ_MODIFY_WRITE = 9,
- CMD_RANDOM_READ = 10,
- CMD_BURST_READ = 11,
- CMD_RANDOM_WRITE = 12,
- CMD_LED_MODE_OP = 16,
- CMD_POWER_SAVING_OP = 20,
- CMD_WOW_CONFIG = 21,
- CMD_WOW_QUERY = 22,
- CMD_WOW_FEATURE = 24,
- CMD_CARRIER_DETECT_OP = 28,
- CMD_RADOR_DETECT_OP = 29,
- CMD_SWITCH_CHANNEL_OP = 30,
- CMD_CALIBRATION_OP = 31,
- CMD_BEACON_OP = 32,
- CMD_ANTENNA_OP = 33,
-};
-
-enum mcu_function {
- Q_SELECT = 1,
- BW_SETTING = 2,
- ATOMIC_TSSI_SETTING = 5,
-};
-
-enum mcu_power_mode {
- RADIO_OFF = 0x30,
- RADIO_ON = 0x31,
- RADIO_OFF_AUTO_WAKEUP = 0x32,
- RADIO_OFF_ADVANCE = 0x33,
- RADIO_ON_ADVANCE = 0x34,
-};
-
enum mcu_calibrate {
MCU_CAL_R = 1,
MCU_CAL_RXDCOC,
@@ -86,16 +39,16 @@ enum mcu_calibrate {
MCU_CAL_TXDCOC,
MCU_CAL_RX_GROUP_DELAY,
MCU_CAL_TX_GROUP_DELAY,
+ MCU_CAL_VCO,
+ MCU_CAL_NO_SIGNAL = 0xfe,
+ MCU_CAL_FULL = 0xff,
};
-int mt76x0_mcu_init(struct mt76x0_dev *dev);
-int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev);
-void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev);
-
-int
-mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val);
-
-int
-mt76x0_mcu_function_select(struct mt76x0_dev *dev, enum mcu_function func, u32 val);
+int mt76x0e_mcu_init(struct mt76x02_dev *dev);
+int mt76x0u_mcu_init(struct mt76x02_dev *dev);
+static inline int mt76x0_firmware_running(struct mt76x02_dev *dev)
+{
+ return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
+}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index fc9857f61771..2187bafaf2e9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -25,306 +25,59 @@
#include <net/mac80211.h>
#include <linux/debugfs.h>
-#include "../mt76.h"
-#include "regs.h"
+#include "../mt76x02.h"
+#include "eeprom.h"
#define MT_CALIBRATE_INTERVAL (4 * HZ)
-#define MT_FREQ_CAL_INIT_DELAY (30 * HZ)
-#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ)
-#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2)
-
-#define MT_BBP_REG_VERSION 0x00
-
#define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */
#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
-#define MT_RX_ORDER 3
-#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER)
-
-struct mt76x0_dma_buf {
- struct urb *urb;
- void *buf;
- dma_addr_t dma;
- size_t len;
-};
-
-struct mt76x0_mcu {
- struct mutex mutex;
-
- u8 msg_seq;
-
- struct mt76x0_dma_buf resp;
- struct completion resp_cmpl;
-
- struct mt76_reg_pair *reg_pairs;
- unsigned int reg_pairs_len;
- u32 reg_base;
- bool burst_read;
-};
-
-struct mac_stats {
- u64 rx_stat[6];
- u64 tx_stat[6];
- u64 aggr_stat[2];
- u64 aggr_n[32];
- u64 zero_len_del[2];
-};
-
-#define N_RX_ENTRIES 16
-struct mt76x0_rx_queue {
- struct mt76x0_dev *dev;
-
- struct mt76x0_dma_buf_rx {
- struct urb *urb;
- struct page *p;
- } e[N_RX_ENTRIES];
-
- unsigned int start;
- unsigned int end;
- unsigned int entries;
- unsigned int pending;
-};
-
-#define N_TX_ENTRIES 64
-
-struct mt76x0_tx_queue {
- struct mt76x0_dev *dev;
-
- struct mt76x0_dma_buf_tx {
- struct urb *urb;
- struct sk_buff *skb;
- } e[N_TX_ENTRIES];
-
- unsigned int start;
- unsigned int end;
- unsigned int entries;
- unsigned int used;
- unsigned int fifo_seq;
-};
-
-/* WCID allocation:
- * 0: mcast wcid
- * 1: bssid wcid
- * 1...: STAs
- * ...7e: group wcids
- * 7f: reserved
- */
-#define N_WCIDS 128
-#define GROUP_WCID(idx) (254 - idx)
-
-struct mt76x0_eeprom_params;
-
-#define MT_EE_TEMPERATURE_SLOPE 39
-#define MT_FREQ_OFFSET_INVALID -128
-
-/* addr req mask */
-#define MT_VEND_TYPE_EEPROM BIT(31)
-#define MT_VEND_TYPE_CFG BIT(30)
-#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
-
-#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
-
-enum mt_bw {
- MT_BW_20,
- MT_BW_40,
-};
-
-/**
- * struct mt76x0_dev - adapter structure
- * @lock: protects @wcid->tx_rate.
- * @mac_lock: locks out mac80211's tx status and rx paths.
- * @tx_lock: protects @tx_q and changes of MT76_STATE_*_STATS
- * flags in @state.
- * @rx_lock: protects @rx_q.
- * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
- * @mutex: ensures exclusive access from mac80211 callbacks.
- * @reg_atomic_mutex: ensures atomicity of indirect register accesses
- * (accesses to RF and BBP).
- * @hw_atomic_mutex: ensures exclusive access to HW during critical
- * operations (power management, channel switch).
- */
-struct mt76x0_dev {
- struct mt76_dev mt76; /* must be first */
-
- struct mutex mutex;
-
- struct mutex usb_ctrl_mtx;
- u8 data[32];
-
- struct tasklet_struct rx_tasklet;
- struct tasklet_struct tx_tasklet;
-
- u8 out_ep[__MT_EP_OUT_MAX];
- u16 out_max_packet;
- u8 in_ep[__MT_EP_IN_MAX];
- u16 in_max_packet;
-
- unsigned long wcid_mask[DIV_ROUND_UP(N_WCIDS, BITS_PER_LONG)];
- unsigned long vif_mask;
-
- struct mt76x0_mcu mcu;
-
- struct delayed_work cal_work;
- struct delayed_work mac_work;
-
- struct workqueue_struct *stat_wq;
- struct delayed_work stat_work;
- struct mt76_wcid *mon_wcid;
- struct mt76_wcid __rcu *wcid[N_WCIDS];
-
- spinlock_t mac_lock;
-
- const u16 *beacon_offsets;
-
- u8 macaddr[ETH_ALEN];
- struct mt76x0_eeprom_params *ee;
-
- struct mutex reg_atomic_mutex;
- struct mutex hw_atomic_mutex;
-
- u32 rxfilter;
- u32 debugfs_reg;
-
- /* TX */
- spinlock_t tx_lock;
- struct mt76x0_tx_queue *tx_q;
- struct sk_buff_head tx_skb_done;
-
- atomic_t avg_ampdu_len;
-
- /* RX */
- spinlock_t rx_lock;
- struct mt76x0_rx_queue rx_q;
-
- /* Connection monitoring things */
- spinlock_t con_mon_lock;
- u8 ap_bssid[ETH_ALEN];
-
- s8 bcn_freq_off;
- u8 bcn_phy_mode;
-
- int avg_rssi; /* starts at 0 and converges */
-
- u8 agc_save;
- u16 chainmask;
-
- struct mac_stats stats;
-};
-
-struct mt76x0_wcid {
- u8 idx;
- u8 hw_key_idx;
-
- u16 tx_rate;
- bool tx_rate_set;
- u8 tx_rate_nss;
-};
-
-struct mt76_vif {
- u8 idx;
-
- struct mt76_wcid group_wcid;
-};
-
-struct mt76_tx_status {
- u8 valid:1;
- u8 success:1;
- u8 aggr:1;
- u8 ack_req:1;
- u8 is_probe:1;
- u8 wcid;
- u8 pktid;
- u8 retry;
- u16 rate;
-} __packed __aligned(2);
-
-struct mt76_sta {
- struct mt76_wcid wcid;
- struct mt76_tx_status status;
- int n_frames;
- u16 agg_ssn[IEEE80211_NUM_TIDS];
-};
-
-struct mt76_reg_pair {
- u32 reg;
- u32 value;
-};
-
-struct mt76x0_rxwi;
-
-extern const struct ieee80211_ops mt76x0_ops;
-
-static inline bool is_mt7610e(struct mt76x0_dev *dev)
+static inline bool is_mt7610e(struct mt76x02_dev *dev)
{
/* TODO */
return false;
}
-void mt76x0_init_debugfs(struct mt76x0_dev *dev);
-
-int mt76x0_wait_asic_ready(struct mt76x0_dev *dev);
-
-/* Compatibility with mt76 */
-#define mt76_rmw_field(_dev, _reg, _field, _val) \
- mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
-
-int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
- const struct mt76_reg_pair *data, int len);
-int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
- struct mt76_reg_pair *data, int len);
-int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
- const u32 *data, int n);
-void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr);
+void mt76x0_init_debugfs(struct mt76x02_dev *dev);
/* Init */
-struct mt76x0_dev *mt76x0_alloc_device(struct device *dev);
-int mt76x0_init_hardware(struct mt76x0_dev *dev);
-int mt76x0_register_device(struct mt76x0_dev *dev);
-void mt76x0_cleanup(struct mt76x0_dev *dev);
-void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
-
-int mt76x0_mac_start(struct mt76x0_dev *dev);
-void mt76x0_mac_stop(struct mt76x0_dev *dev);
+struct mt76x02_dev *
+mt76x0_alloc_device(struct device *pdev,
+ const struct mt76_driver_ops *drv_ops,
+ const struct ieee80211_ops *ops);
+int mt76x0_init_hardware(struct mt76x02_dev *dev);
+int mt76x0_register_device(struct mt76x02_dev *dev);
+void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
+
+int mt76x0_mac_start(struct mt76x02_dev *dev);
+void mt76x0_mac_stop(struct mt76x02_dev *dev);
+
+int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
+void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed);
+void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac_addr);
+void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
/* PHY */
-void mt76x0_phy_init(struct mt76x0_dev *dev);
-int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev);
-void mt76x0_agc_save(struct mt76x0_dev *dev);
-void mt76x0_agc_restore(struct mt76x0_dev *dev);
-int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
+void mt76x0_phy_init(struct mt76x02_dev *dev);
+int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev);
+int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef);
-void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev);
-int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi);
-void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
- struct ieee80211_bss_conf *info);
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev);
+void mt76x0_phy_set_txpower(struct mt76x02_dev *dev);
+void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on);
/* MAC */
void mt76x0_mac_work(struct work_struct *work);
-void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
+void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot,
int ht_mode);
-void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb);
-void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval);
-void
-mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
-void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev);
-
-/* TX */
-void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
- struct sk_buff *skb);
-int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
-void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb);
-void mt76x0_tx_stat(struct work_struct *work);
-
-/* util */
-void mt76x0_remove_hdr_pad(struct sk_buff *skb);
-int mt76x0_insert_hdr_pad(struct sk_buff *skb);
-
-int mt76x0_dma_init(struct mt76x0_dev *dev);
-void mt76x0_dma_cleanup(struct mt76x0_dev *dev);
-
-int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
- struct mt76_wcid *wcid, int hw_q);
+void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb);
+void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval);
+void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
new file mode 100644
index 000000000000..522c86059bcb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+
+static int mt76x0e_start(struct ieee80211_hw *hw)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76x02_mac_start(dev);
+ mt76x0_phy_calibrate(dev, true);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mac_work);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
+ 0, 1000))
+ dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
+
+ mt76x0_mac_stop(dev);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+ 0, 1000))
+ dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN);
+}
+
+static void mt76x0e_stop(struct ieee80211_hw *hw)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ mt76x0e_stop_hw(dev);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static const struct ieee80211_ops mt76x0e_ops = {
+ .tx = mt76x02_tx,
+ .start = mt76x0e_start,
+ .stop = mt76x0e_stop,
+ .add_interface = mt76x02_add_interface,
+ .remove_interface = mt76x02_remove_interface,
+ .config = mt76x0_config,
+ .configure_filter = mt76x02_configure_filter,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
+ .set_key = mt76x02_set_key,
+ .conf_tx = mt76x02_conf_tx,
+ .sw_scan_start = mt76x0_sw_scan,
+ .sw_scan_complete = mt76x0_sw_scan_complete,
+ .ampdu_action = mt76x02_ampdu_action,
+ .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+ .wake_tx_queue = mt76_wake_tx_queue,
+};
+
+static int mt76x0e_register_device(struct mt76x02_dev *dev)
+{
+ int err;
+
+ mt76x0_chip_onoff(dev, true, false);
+ if (!mt76x02_wait_for_mac(&dev->mt76))
+ return -ETIMEDOUT;
+
+ mt76x02_dma_disable(dev);
+ err = mt76x0e_mcu_init(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76x02_dma_init(dev);
+ if (err < 0)
+ return err;
+
+ err = mt76x0_init_hardware(dev);
+ if (err < 0)
+ return err;
+
+ if (mt76_chip(&dev->mt76) == 0x7610) {
+ u16 val;
+
+ mt76_clear(dev, MT_COEXCFG0, BIT(0));
+
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+ if (!(val & MT_EE_NIC_CONF_0_PA_IO_CURRENT))
+ mt76_set(dev, MT_XO_CTRL7, 0xc03);
+ }
+
+ mt76_clear(dev, 0x110, BIT(9));
+ mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
+
+ err = mt76x0_register_device(dev);
+ if (err < 0)
+ return err;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ return 0;
+}
+
+static int
+mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = sizeof(struct mt76x02_txwi),
+ .tx_prepare_skb = mt76x02_tx_prepare_skb,
+ .tx_complete_skb = mt76x02_tx_complete_skb,
+ .rx_skb = mt76x02_queue_rx_skb,
+ .rx_poll_complete = mt76x02_rx_poll_complete,
+ };
+ struct mt76x02_dev *dev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ dev = mt76x0_alloc_device(&pdev->dev, &drv_ops, &mt76x0e_ops);
+ if (!dev)
+ return -ENOMEM;
+
+ mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+
+ dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+ dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+ ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt76x0e_register_device(dev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ ieee80211_free_hw(mt76_hw(dev));
+ return ret;
+}
+
+static void mt76x0e_cleanup(struct mt76x02_dev *dev)
+{
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ mt76x0_chip_onoff(dev, false, false);
+ mt76x0e_stop_hw(dev);
+ mt76x02_dma_cleanup(dev);
+ mt76x02_mcu_cleanup(dev);
+}
+
+static void
+mt76x0e_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+ mt76_unregister_device(mdev);
+ mt76x0e_cleanup(dev);
+ ieee80211_free_hw(mdev->hw);
+}
+
+static const struct pci_device_id mt76x0e_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7630) },
+ { PCI_DEVICE(0x14c3, 0x7650) },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_driver mt76x0e_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt76x0e_device_table,
+ .probe = mt76x0e_probe,
+ .remove = mt76x0e_remove,
+};
+
+module_pci_driver(mt76x0e_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
new file mode 100644
index 000000000000..569861289aa5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+
+#define MT7610E_FIRMWARE "mediatek/mt7610e.bin"
+#define MT7650E_FIRMWARE "mediatek/mt7650e.bin"
+
+#define MT_MCU_IVB_ADDR (MT_MCU_ILM_ADDR + 0x54000 - MT_MCU_IVB_SIZE)
+
+static int mt76x0e_load_firmware(struct mt76x02_dev *dev)
+{
+ bool is_combo_chip = mt76_chip(&dev->mt76) != 0x7610;
+ u32 val, ilm_len, dlm_len, offset = 0;
+ const struct mt76x02_fw_header *hdr;
+ const struct firmware *fw;
+ const char *firmware;
+ const u8 *fw_payload;
+ int len, err;
+
+ if (is_combo_chip)
+ firmware = MT7650E_FIRMWARE;
+ else
+ firmware = MT7610E_FIRMWARE;
+
+ err = request_firmware(&fw, firmware, dev->mt76.dev);
+ if (err)
+ return err;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ err = -EIO;
+ goto out;
+ }
+
+ hdr = (const struct mt76x02_fw_header *)fw->data;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len) {
+ err = -EIO;
+ goto out;
+ }
+
+ fw_payload = fw->data + sizeof(*hdr);
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_dbg(dev->mt76.dev,
+ "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+ le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+ if (is_combo_chip && !mt76_poll(dev, MT_MCU_SEMAPHORE_00, 1, 1, 600)) {
+ dev_err(dev->mt76.dev,
+ "Could not get hardware semaphore for loading fw\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* upload ILM. */
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+ ilm_len = le32_to_cpu(hdr->ilm_len);
+ if (is_combo_chip) {
+ ilm_len -= MT_MCU_IVB_SIZE;
+ offset = MT_MCU_IVB_SIZE;
+ }
+ dev_dbg(dev->mt76.dev, "loading FW - ILM %u\n", ilm_len);
+ mt76_wr_copy(dev, MT_MCU_ILM_ADDR + offset, fw_payload + offset,
+ ilm_len);
+
+ /* upload IVB. */
+ if (is_combo_chip) {
+ dev_dbg(dev->mt76.dev, "loading FW - IVB %u\n",
+ MT_MCU_IVB_SIZE);
+ mt76_wr_copy(dev, MT_MCU_IVB_ADDR, fw_payload, MT_MCU_IVB_SIZE);
+ }
+
+ /* upload DLM. */
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+ dlm_len = le32_to_cpu(hdr->dlm_len);
+ dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+ mt76_wr_copy(dev, MT_MCU_ILM_ADDR,
+ fw_payload + le32_to_cpu(hdr->ilm_len), dlm_len);
+
+ /* trigger firmware */
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+ if (is_combo_chip)
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 0x3);
+ else
+ mt76_wr(dev, MT_MCU_RESET_CTL, 0x300);
+
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 1000)) {
+ dev_err(dev->mt76.dev, "Firmware failed to start\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt76x02_set_ethtool_fwver(dev, hdr);
+ dev_dbg(dev->mt76.dev, "Firmware running!\n");
+
+out:
+ if (is_combo_chip)
+ mt76_wr(dev, MT_MCU_SEMAPHORE_00, 0x1);
+ release_firmware(fw);
+
+ return err;
+}
+
+int mt76x0e_mcu_init(struct mt76x02_dev *dev)
+{
+ static const struct mt76_mcu_ops mt76x0e_mcu_ops = {
+ .mcu_msg_alloc = mt76x02_mcu_msg_alloc,
+ .mcu_send_msg = mt76x02_mcu_msg_send,
+ };
+ int err;
+
+ dev->mt76.mcu_ops = &mt76x0e_mcu_ops;
+
+ err = mt76x0e_load_firmware(dev);
+ if (err < 0)
+ return err;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 5da7bfbe907f..cf024950e0ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -14,6 +14,9 @@
* GNU General Public License for more details.
*/
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+
#include "mt76x0.h"
#include "mcu.h"
#include "eeprom.h"
@@ -21,11 +24,10 @@
#include "phy.h"
#include "initvals.h"
#include "initvals_phy.h"
-
-#include <linux/etherdevice.h>
+#include "../mt76x02_phy.h"
static int
-mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
+mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
{
int ret = 0;
u8 bank, reg;
@@ -36,10 +38,10 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
bank = MT_RF_BANK(offset);
reg = MT_RF_REG(offset);
- if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
return -EINVAL;
- mutex_lock(&dev->reg_atomic_mutex);
+ mutex_lock(&dev->phy_mutex);
if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
ret = -ETIMEDOUT;
@@ -54,7 +56,7 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
MT_RF_CSR_CFG_KICK);
trace_mt76x0_rf_write(&dev->mt76, bank, offset, value);
out:
- mutex_unlock(&dev->reg_atomic_mutex);
+ mutex_unlock(&dev->phy_mutex);
if (ret < 0)
dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n",
@@ -63,8 +65,7 @@ out:
return ret;
}
-static int
-mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
+static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
{
int ret = -ETIMEDOUT;
u32 val;
@@ -76,10 +77,10 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
bank = MT_RF_BANK(offset);
reg = MT_RF_REG(offset);
- if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
+ if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
return -EINVAL;
- mutex_lock(&dev->reg_atomic_mutex);
+ mutex_lock(&dev->phy_mutex);
if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
goto out;
@@ -99,7 +100,7 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret);
}
out:
- mutex_unlock(&dev->reg_atomic_mutex);
+ mutex_unlock(&dev->phy_mutex);
if (ret < 0)
dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n",
@@ -109,36 +110,38 @@ out:
}
static int
-rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
+rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
{
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ if (mt76_is_usb(dev)) {
struct mt76_reg_pair pair = {
.reg = offset,
.value = val,
};
- return mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+ WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
+ &dev->mt76.state));
+ return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
} else {
- WARN_ON_ONCE(1);
return mt76x0_rf_csr_wr(dev, offset, val);
}
}
static int
-rf_rr(struct mt76x0_dev *dev, u32 offset)
+rf_rr(struct mt76x02_dev *dev, u32 offset)
{
int ret;
u32 val;
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
+ if (mt76_is_usb(dev)) {
struct mt76_reg_pair pair = {
.reg = offset,
};
- ret = mt76x0_read_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
+ WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
+ &dev->mt76.state));
+ ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
val = pair.value;
} else {
- WARN_ON_ONCE(1);
ret = val = mt76x0_rf_csr_rr(dev, offset);
}
@@ -146,7 +149,7 @@ rf_rr(struct mt76x0_dev *dev, u32 offset)
}
static int
-rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
+rf_rmw(struct mt76x02_dev *dev, u32 offset, u8 mask, u8 val)
{
int ret;
@@ -162,30 +165,43 @@ rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
}
static int
-rf_set(struct mt76x0_dev *dev, u32 offset, u8 val)
+rf_set(struct mt76x02_dev *dev, u32 offset, u8 val)
{
return rf_rmw(dev, offset, 0, val);
}
#if 0
static int
-rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask)
+rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask)
{
return rf_rmw(dev, offset, mask, 0);
}
#endif
-#define RF_RANDOM_WRITE(dev, tab) \
- mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));
+static void
+mt76x0_rf_csr_wr_rp(struct mt76x02_dev *dev, const struct mt76_reg_pair *data,
+ int n)
+{
+ while (n-- > 0) {
+ mt76x0_rf_csr_wr(dev, data->reg, data->value);
+ data++;
+ }
+}
+
+#define RF_RANDOM_WRITE(dev, tab) do { \
+ if (mt76_is_mmio(dev)) \
+ mt76x0_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \
+ else \
+ mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\
+} while (0)
-int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
+int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
{
int i = 20;
u32 val;
do {
val = mt76_rr(dev, MT_BBP(CORE, 0));
- printk("BBP version %08x\n", val);
if (val && ~val)
break;
} while (--i);
@@ -195,55 +211,11 @@ int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
return -EIO;
}
+ dev_dbg(dev->mt76.dev, "BBP version %08x\n", val);
return 0;
}
-static void
-mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width,
- u8 ctrl)
-{
- int core_val, agc_val;
-
- switch (width) {
- case NL80211_CHAN_WIDTH_80:
- core_val = 3;
- agc_val = 7;
- break;
- case NL80211_CHAN_WIDTH_40:
- core_val = 2;
- agc_val = 3;
- break;
- default:
- core_val = 0;
- agc_val = 1;
- break;
- }
-
- mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
- mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
-}
-
-int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi)
-{
- s8 lna_gain, rssi_offset;
- int val;
-
- if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) {
- lna_gain = dev->ee->lna_gain_2ghz;
- rssi_offset = dev->ee->rssi_offset_2ghz[0];
- } else {
- lna_gain = dev->ee->lna_gain_5ghz[0];
- rssi_offset = dev->ee->rssi_offset_5ghz[0];
- }
-
- val = rxwi->rssi[0] + rssi_offset - lna_gain;
-
- return val;
-}
-
-static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
+static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
{
u8 val;
@@ -300,14 +272,7 @@ static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
}
static void
-mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper)
-{
- mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
- primary_upper);
-}
-
-static void
-mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
+mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
{
switch (band) {
case NL80211_BAND_2GHZ:
@@ -316,9 +281,6 @@ mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
rf_wr(dev, MT_RF(5, 0), 0x45);
rf_wr(dev, MT_RF(6, 0), 0x44);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
-
mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
break;
@@ -328,9 +290,6 @@ mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
rf_wr(dev, MT_RF(5, 0), 0x44);
rf_wr(dev, MT_RF(6, 0), 0x45);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
-
mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
break;
@@ -339,16 +298,12 @@ mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
}
}
-#define EXT_PA_2G_5G 0x0
-#define EXT_PA_5G_ONLY 0x1
-#define EXT_PA_2G_ONLY 0x2
-#define INT_PA_2G_5G 0x3
-
static void
-mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band)
{
u16 rf_band = rf_bw_band & 0xff00;
u16 rf_bw = rf_bw_band & 0x00ff;
+ enum nl80211_band band;
u32 mac_reg;
u8 rf_val;
int i;
@@ -495,11 +450,8 @@ mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band
mac_reg &= ~0xC; /* Clear 0x518[3:2] */
mt76_wr(dev, MT_RF_MISC, mac_reg);
- if (dev->ee->pa_type == INT_PA_2G_5G ||
- (dev->ee->pa_type == EXT_PA_5G_ONLY && (rf_band & RF_G_BAND)) ||
- (dev->ee->pa_type == EXT_PA_2G_ONLY && (rf_band & RF_A_BAND))) {
- ; /* Internal PA - nothing to do. */
- } else {
+ band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ if (mt76x02_ext_pa_enabled(dev, band)) {
/*
MT_RF_MISC (offset: 0x0518)
[2]1'b1: enable external A band PA, 1'b0: disable external A band PA
@@ -538,7 +490,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band
}
static void
-mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
+mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
{
int i;
@@ -551,20 +503,10 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_ban
if (pair->reg == MT_BBP(AGC, 8)) {
u32 val = pair->value;
- u8 gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
-
- if (channel > 14) {
- if (channel < 100)
- gain -= dev->ee->lna_gain_5ghz[0]*2;
- else if (channel < 137)
- gain -= dev->ee->lna_gain_5ghz[1]*2;
- else
- gain -= dev->ee->lna_gain_5ghz[2]*2;
-
- } else {
- gain -= dev->ee->lna_gain_2ghz*2;
- }
+ u8 gain;
+ gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
+ gain -= dev->cal.rx.lna_gain * 2;
val &= ~MT_BBP_AGC_GAIN;
val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain);
mt76_wr(dev, pair->reg, val);
@@ -574,46 +516,27 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_ban
}
}
-#if 0
-static void
-mt76x0_extra_power_over_mac(struct mt76x0_dev *dev)
+static void mt76x0_ant_select(struct mt76x02_dev *dev)
{
- u32 val;
-
- val = ((mt76_rr(dev, MT_TX_PWR_CFG_1) & 0x00003f00) >> 8);
- val |= ((mt76_rr(dev, MT_TX_PWR_CFG_2) & 0x00003f00) << 8);
- mt76_wr(dev, MT_TX_PWR_CFG_7, val);
-
- /* TODO: fix VHT */
- val = ((mt76_rr(dev, MT_TX_PWR_CFG_3) & 0x0000ff00) >> 8);
- mt76_wr(dev, MT_TX_PWR_CFG_8, val);
-
- val = ((mt76_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
- mt76_wr(dev, MT_TX_PWR_CFG_9, val);
-}
-
-static void
-mt76x0_phy_set_tx_power(struct mt76x0_dev *dev, u8 channel, u8 rf_bw_band)
-{
- u32 val;
- int i;
- int bw = (rf_bw_band & RF_BW_20) ? 0 : 1;
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
- for (i = 0; i < 4; i++) {
- if (channel <= 14)
- val = dev->ee->tx_pwr_cfg_2g[i][bw];
- else
- val = dev->ee->tx_pwr_cfg_5g[i][bw];
-
- mt76_wr(dev, MT_TX_PWR_CFG_0 + 4*i, val);
+ /* single antenna mode */
+ if (chan->band == NL80211_BAND_2GHZ) {
+ mt76_rmw(dev, MT_COEXCFG3,
+ BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
+ mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
+ } else {
+ mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(2),
+ BIT(4) | BIT(3));
+ mt76_clear(dev, MT_WLAN_FUN_CTRL,
+ BIT(6) | BIT(5));
}
-
- mt76x0_extra_power_over_mac(dev);
+ mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
+ mt76_clear(dev, MT_COEXCFG0, BIT(2));
}
-#endif
static void
-mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
+mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
{
enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
int bw;
@@ -640,39 +563,69 @@ mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
return ;
}
- mt76x0_mcu_function_select(dev, BW_SETTING, bw);
+ mt76x02_mcu_function_select(dev, BW_SETTING, bw, false);
}
-static void
-mt76x0_phy_set_chan_pwr(struct mt76x0_dev *dev, u8 channel)
+void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
{
- static const int mt76x0_tx_pwr_ch_list[] = {
- 1,2,3,4,5,6,7,8,9,10,11,12,13,14,
- 36,38,40,44,46,48,52,54,56,60,62,64,
- 100,102,104,108,110,112,116,118,120,124,126,128,132,134,136,140,
- 149,151,153,157,159,161,165,167,169,171,173,
- 42,58,106,122,155
- };
- int i;
- u32 val;
+ struct mt76_rate_power *t = &dev->mt76.rate_power;
+ u8 info[2];
- for (i = 0; i < ARRAY_SIZE(mt76x0_tx_pwr_ch_list); i++)
- if (mt76x0_tx_pwr_ch_list[i] == channel)
- break;
+ mt76x0_get_power_info(dev, info);
+ mt76x0_get_tx_power_per_rate(dev);
- if (WARN_ON(i == ARRAY_SIZE(mt76x0_tx_pwr_ch_list)))
- return;
+ mt76x02_add_rate_power_offset(t, info[0]);
+ mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
+ dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
+ mt76x02_add_rate_power_offset(t, -info[0]);
- val = mt76_rr(dev, MT_TX_ALC_CFG_0);
- val &= ~0x3f3f;
- val |= dev->ee->tx_pwr_per_chan[i];
- val |= 0x2f2f << 16;
- mt76_wr(dev, MT_TX_ALC_CFG_0, val);
+ mt76x02_phy_set_txpower(dev, info[0], info[1]);
}
-static int
-__mt76x0_phy_set_channel(struct mt76x0_dev *dev,
- struct cfg80211_chan_def *chandef)
+void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ u32 val, tx_alc, reg_val;
+
+ if (power_on) {
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value,
+ false);
+ usleep_range(10, 20);
+ /* XXX: tssi */
+ }
+
+ tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
+ usleep_range(500, 700);
+
+ reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
+ mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
+
+ if (chan->band == NL80211_BAND_5GHZ) {
+ if (chan->hw_value < 100)
+ val = 0x701;
+ else if (chan->hw_value < 140)
+ val = 0x801;
+ else
+ val = 0x901;
+ } else {
+ val = 0x600;
+ }
+
+ mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false);
+ msleep(350);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false);
+ usleep_range(15000, 20000);
+
+ mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
+ mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
+}
+EXPORT_SYMBOL_GPL(mt76x0_phy_calibrate);
+
+int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
+ struct cfg80211_chan_def *chandef)
{
u32 ext_cca_chan[4] = {
[0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
@@ -706,6 +659,7 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
freq1 = chandef->center_freq1;
channel = chandef->chan->hw_value;
rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND;
+ dev->mt76.chandef = *chandef;
switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
@@ -729,9 +683,20 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
break;
}
- mt76x0_bbp_set_bw(dev, chandef->width);
- mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index);
- mt76x0_mac_set_ctrlch(dev, ch_group_index & 1);
+ if (mt76_is_usb(dev)) {
+ mt76x0_bbp_set_bw(dev, chandef->width);
+ } else {
+ if (chandef->width == NL80211_CHAN_WIDTH_80 ||
+ chandef->width == NL80211_CHAN_WIDTH_40)
+ val = 0x201;
+ else
+ val = 0x601;
+ mt76_wr(dev, MT_TX_SW_CFG0, val);
+ }
+ mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
+ mt76x02_phy_set_band(dev, chandef->chan->band,
+ ch_group_index & 1);
+ mt76x0_ant_select(dev);
mt76_rmw(dev, MT_EXT_CCA_CFG,
(MT_EXT_CCA_CFG_CCA0 |
@@ -752,40 +717,37 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
val &= ~0x20;
mt76_wr(dev, MT_BBP(CORE, 1), val);
- mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band);
-
- /* Vendor driver don't do it */
- /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */
+ mt76x0_read_rx_gain(dev);
+ mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band);
+ mt76x02_init_agc_gain(dev);
- if (scan)
+ if (mt76_is_usb(dev)) {
mt76x0_vco_cal(dev, channel);
+ } else {
+ /* enable vco */
+ rf_set(dev, MT_RF(0, 4), BIT(7));
+ }
- mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
- mt76x0_phy_set_chan_pwr(dev, channel);
+ if (scan)
+ return 0;
- dev->mt76.chandef = *chandef;
- return 0;
-}
+ if (mt76_is_mmio(dev))
+ mt76x0_phy_calibrate(dev, false);
+ mt76x0_phy_set_txpower(dev);
-int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
- struct cfg80211_chan_def *chandef)
-{
- int ret;
-
- mutex_lock(&dev->hw_atomic_mutex);
- ret = __mt76x0_phy_set_channel(dev, chandef);
- mutex_unlock(&dev->hw_atomic_mutex);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
- return ret;
+ return 0;
}
-void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
+void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
{
u32 tx_alc, reg_val;
u8 channel = dev->mt76.chandef.chan->hw_value;
int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
- mt76x0_mcu_calibrate(dev, MCU_CAL_R, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
mt76x0_vco_cal(dev, channel);
@@ -793,148 +755,119 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
usleep_range(500, 700);
- reg_val = mt76_rr(dev, 0x2124);
- reg_val &= 0xffffff7e;
- mt76_wr(dev, 0x2124, reg_val);
+ reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
+ mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
- mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false);
- mt76x0_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
- mt76x0_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz);
- mt76x0_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
- mt76x0_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz);
- mt76x0_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz);
- mt76x0_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false);
- mt76_wr(dev, 0x2124, reg_val);
+ mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
msleep(100);
- mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
}
-void mt76x0_agc_save(struct mt76x0_dev *dev)
-{
- /* Only one RX path */
- dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
-}
-
-void mt76x0_agc_restore(struct mt76x0_dev *dev)
-{
- mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
-}
-
-static void mt76x0_temp_sensor(struct mt76x0_dev *dev)
+static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
{
u8 rf_b7_73, rf_b0_66, rf_b0_67;
- int cycle, temp;
- u32 val;
- s32 sval;
+ s8 val;
rf_b7_73 = rf_rr(dev, MT_RF(7, 73));
rf_b0_66 = rf_rr(dev, MT_RF(0, 66));
- rf_b0_67 = rf_rr(dev, MT_RF(0, 73));
+ rf_b0_67 = rf_rr(dev, MT_RF(0, 67));
rf_wr(dev, MT_RF(7, 73), 0x02);
rf_wr(dev, MT_RF(0, 66), 0x23);
- rf_wr(dev, MT_RF(0, 73), 0x01);
+ rf_wr(dev, MT_RF(0, 67), 0x01);
mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
- for (cycle = 0; cycle < 2000; cycle++) {
- val = mt76_rr(dev, MT_BBP(CORE, 34));
- if (!(val & 0x10))
- break;
- udelay(3);
- }
-
- if (cycle >= 2000) {
- val &= 0x10;
- mt76_wr(dev, MT_BBP(CORE, 34), val);
+ if (!mt76_poll(dev, MT_BBP(CORE, 34), BIT(4), 0, 2000)) {
+ mt76_clear(dev, MT_BBP(CORE, 34), BIT(4));
goto done;
}
- sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
- if (!(sval & 0x80))
- sval &= 0x7f; /* Positive */
- else
- sval |= 0xffffff00; /* Negative */
+ val = mt76_rr(dev, MT_BBP(CORE, 35));
+ val = (35 * (val - dev->cal.rx.temp_offset)) / 10 + 25;
- temp = (35 * (sval - dev->ee->temp_off))/ 10 + 25;
+ if (abs(val - dev->cal.temp_vco) > 20) {
+ mt76x02_mcu_calibrate(dev, MCU_CAL_VCO,
+ dev->mt76.chandef.chan->hw_value,
+ false);
+ dev->cal.temp_vco = val;
+ }
+ if (abs(val - dev->cal.temp) > 30) {
+ mt76x0_phy_calibrate(dev, false);
+ dev->cal.temp = val;
+ }
done:
rf_wr(dev, MT_RF(7, 73), rf_b7_73);
rf_wr(dev, MT_RF(0, 66), rf_b0_66);
- rf_wr(dev, MT_RF(0, 73), rf_b0_67);
+ rf_wr(dev, MT_RF(0, 67), rf_b0_67);
}
-static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev)
+static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
{
- u32 val, init_vga;
-
- init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E;
- if (dev->avg_rssi > -60)
- init_vga -= 0x20;
- else if (dev->avg_rssi > -70)
- init_vga -= 0x10;
-
- val = mt76_rr(dev, MT_BBP(AGC, 8));
- val &= 0xFFFF80FF;
- val |= init_vga << 8;
- mt76_wr(dev, MT_BBP(AGC,8), val);
+ u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+ u32 val = 0x122c << 16 | 0xf2;
+
+ mt76_wr(dev, MT_BBP(AGC, 8),
+ val | FIELD_PREP(MT_BBP_AGC_GAIN, gain));
}
-static void mt76x0_phy_calibrate(struct work_struct *work)
+static void
+mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
{
- struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
- cal_work.work);
+ bool gain_change;
+ u8 gain_delta;
+ int low_gain;
- mt76x0_dynamic_vga_tuning(dev);
- mt76x0_temp_sensor(dev);
+ dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
- MT_CALIBRATE_INTERVAL);
-}
+ low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
+ (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
-void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
- struct ieee80211_bss_conf *info)
-{
- /* Start/stop collecting beacon data */
- spin_lock_bh(&dev->con_mon_lock);
- ether_addr_copy(dev->ap_bssid, info->bssid);
- dev->avg_rssi = 0;
- dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
- spin_unlock_bh(&dev->con_mon_lock);
-}
+ gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
+ dev->cal.low_gain = low_gain;
-static void
-mt76x0_set_rx_chains(struct mt76x0_dev *dev)
-{
- u32 val;
-
- val = mt76_rr(dev, MT_BBP(AGC, 0));
- val &= ~(BIT(3) | BIT(4));
+ if (!gain_change) {
+ if (mt76x02_phy_adjust_vga_gain(dev))
+ mt76x0_phy_set_gain_val(dev);
+ return;
+ }
- if (dev->chainmask & BIT(1))
- val |= BIT(3);
+ dev->cal.agc_gain_adjust = (low_gain == 2) ? 0 : 10;
+ gain_delta = (low_gain == 2) ? 10 : 0;
- mt76_wr(dev, MT_BBP(AGC, 0), val);
+ dev->cal.agc_gain_cur[0] = dev->cal.agc_gain_init[0] - gain_delta;
+ mt76x0_phy_set_gain_val(dev);
- mb();
- val = mt76_rr(dev, MT_BBP(AGC, 0));
+ /* clear false CCA counters */
+ mt76_rr(dev, MT_RX_STAT_1);
}
-static void
-mt76x0_set_tx_dac(struct mt76x0_dev *dev)
+static void mt76x0_phy_calibration_work(struct work_struct *work)
{
- if (dev->chainmask & BIT(1))
- mt76_set(dev, MT_BBP(TXBE, 5), 3);
- else
- mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+ struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+ cal_work.work);
+
+ mt76x0_phy_update_channel_gain(dev);
+ if (!mt76x0_tssi_enabled(dev))
+ mt76x0_temp_sensor(dev);
+
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
}
-static void
-mt76x0_rf_init(struct mt76x0_dev *dev)
+static void mt76x0_rf_init(struct mt76x02_dev *dev)
{
int i;
u8 val;
@@ -966,7 +899,8 @@ mt76x0_rf_init(struct mt76x0_dev *dev)
E1: B0.R22<6:0>: xo_cxo<6:0>
E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
*/
- rf_wr(dev, MT_RF(0, 22), min_t(u8, dev->ee->rf_freq_off, 0xBF));
+ rf_wr(dev, MT_RF(0, 22),
+ min_t(u8, dev->cal.rx.freq_offset, 0xbf));
val = rf_rr(dev, MT_RF(0, 22));
/*
@@ -986,23 +920,11 @@ mt76x0_rf_init(struct mt76x0_dev *dev)
rf_set(dev, MT_RF(0, 4), 0x80);
}
-static void mt76x0_ant_select(struct mt76x0_dev *dev)
+void mt76x0_phy_init(struct mt76x02_dev *dev)
{
- /* Single antenna mode. */
- mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
- mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
- mt76_clear(dev, MT_COEXCFG0, BIT(2));
- mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
-}
-
-void mt76x0_phy_init(struct mt76x0_dev *dev)
-{
- INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate);
-
- mt76x0_ant_select(dev);
+ INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work);
mt76x0_rf_init(dev);
-
- mt76x0_set_rx_chains(dev);
- mt76x0_set_tx_dac(dev);
+ mt76x02_phy_set_rxpath(dev);
+ mt76x02_phy_set_txdac(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
deleted file mode 100644
index 16bed4aaa242..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
+++ /dev/null
@@ -1,651 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76_REGS_H
-#define __MT76_REGS_H
-
-#include <linux/bitops.h>
-
-#define MT_ASIC_VERSION 0x0000
-
-#define MT76XX_REV_E3 0x22
-#define MT76XX_REV_E4 0x33
-
-#define MT_CMB_CTRL 0x0020
-#define MT_CMB_CTRL_XTAL_RDY BIT(22)
-#define MT_CMB_CTRL_PLL_LD BIT(23)
-
-#define MT_EFUSE_CTRL 0x0024
-#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
-#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
-#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
-#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
-#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
-#define MT_EFUSE_CTRL_KICK BIT(30)
-#define MT_EFUSE_CTRL_SEL BIT(31)
-
-#define MT_EFUSE_DATA_BASE 0x0028
-#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
-
-#define MT_COEXCFG0 0x0040
-#define MT_COEXCFG0_COEX_EN BIT(0)
-
-#define MT_COEXCFG3 0x004c
-
-#define MT_LDO_CTRL_0 0x006c
-#define MT_LDO_CTRL_1 0x0070
-
-#define MT_WLAN_FUN_CTRL 0x0080
-#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
-#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
-#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
-
-#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
-#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
-
-#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
-#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
-#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
-#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
-
-#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
-#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
-
-#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
-#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
-#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
-
-#define MT_XO_CTRL0 0x0100
-#define MT_XO_CTRL1 0x0104
-#define MT_XO_CTRL2 0x0108
-#define MT_XO_CTRL3 0x010c
-#define MT_XO_CTRL4 0x0110
-
-#define MT_XO_CTRL5 0x0114
-#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
-
-#define MT_XO_CTRL6 0x0118
-#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
-
-#define MT_XO_CTRL7 0x011c
-
-#define MT_IOCFG_6 0x0124
-#define MT_WLAN_MTC_CTRL 0x10148
-#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
-#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
-#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
-#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
-#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
-#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
-#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
-#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
-#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
-#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
-#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
-#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
-
-#define MT_INT_SOURCE_CSR 0x0200
-#define MT_INT_MASK_CSR 0x0204
-
-#define MT_INT_RX_DONE(_n) BIT(_n)
-#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
-#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
-#define MT_INT_TX_DONE(_n) BIT(_n + 4)
-#define MT_INT_RX_COHERENT BIT(16)
-#define MT_INT_TX_COHERENT BIT(17)
-#define MT_INT_ANY_COHERENT BIT(18)
-#define MT_INT_MCU_CMD BIT(19)
-#define MT_INT_TBTT BIT(20)
-#define MT_INT_PRE_TBTT BIT(21)
-#define MT_INT_TX_STAT BIT(22)
-#define MT_INT_AUTO_WAKEUP BIT(23)
-#define MT_INT_GPTIMER BIT(24)
-#define MT_INT_RXDELAYINT BIT(26)
-#define MT_INT_TXDELAYINT BIT(27)
-
-#define MT_WPDMA_GLO_CFG 0x0208
-#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
-#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
-#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
-#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
-#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
-#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
-#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
-#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
-#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
-#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
-
-#define MT_WPDMA_RST_IDX 0x020c
-
-#define MT_WPDMA_DELAY_INT_CFG 0x0210
-
-#define MT_WMM_AIFSN 0x0214
-#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
-#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
-
-#define MT_WMM_CWMIN 0x0218
-#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
-#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
-
-#define MT_WMM_CWMAX 0x021c
-#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
-#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
-
-#define MT_WMM_TXOP_BASE 0x0220
-#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
-#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
-#define MT_WMM_TXOP_MASK GENMASK(15, 0)
-
-#define MT_WMM_CTRL 0x0230 /* MT76x0 */
-
-#define MT_FCE_DMA_ADDR 0x0230
-#define MT_FCE_DMA_LEN 0x0234
-
-#define MT_USB_DMA_CFG 0x238
-#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
-#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
-#define MT_USB_DMA_CFG_TX_WL_DROP BIT(16)
-#define MT_USB_DMA_CFG_WAKEUP_EN BIT(17)
-#define MT_USB_DMA_CFG_RX_DROP_OR_PADDING BIT(18)
-#define MT_USB_DMA_CFG_TX_CLR BIT(19)
-#define MT_USB_DMA_CFG_WL_LPK_EN BIT(20)
-#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
-#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
-#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
-#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24)
-#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
-#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
-#if 0
-#define MT_USB_DMA_CFG_TX_CLR BIT(19)
-#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
-#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
-#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
-#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
-#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25)
-#endif
-
-#define MT_TSO_CTRL 0x0250
-#define MT_HEADER_TRANS_CTRL_REG 0x0260
-
-#define MT_US_CYC_CFG 0x02a4
-#define MT_US_CYC_CNT GENMASK(7, 0)
-
-#define MT_TX_RING_BASE 0x0300
-#define MT_RX_RING_BASE 0x03c0
-#define MT_RING_SIZE 0x10
-
-#define MT_TX_HW_QUEUE_MCU 8
-#define MT_TX_HW_QUEUE_MGMT 9
-
-#define MT_PBF_SYS_CTRL 0x0400
-#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
-#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
-#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
-#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
-#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
-
-#define MT_PBF_CFG 0x0404
-#define MT_PBF_CFG_TX0Q_EN BIT(0)
-#define MT_PBF_CFG_TX1Q_EN BIT(1)
-#define MT_PBF_CFG_TX2Q_EN BIT(2)
-#define MT_PBF_CFG_TX3Q_EN BIT(3)
-#define MT_PBF_CFG_RX0Q_EN BIT(4)
-#define MT_PBF_CFG_RX_DROP_EN BIT(8)
-
-#define MT_PBF_TX_MAX_PCNT 0x0408
-#define MT_PBF_RX_MAX_PCNT 0x040c
-
-#define MT_BCN_OFFSET_BASE 0x041c
-#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
-
-#define MT_RXQ_STA 0x0430
-#define MT_TXQ_STA 0x0434
-#define MT_RF_CSR_CFG 0x0500
-#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
-#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
-#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
-#define MT_RF_CSR_CFG_WR BIT(30)
-#define MT_RF_CSR_CFG_KICK BIT(31)
-
-#define MT_RF_BYPASS_0 0x0504
-#define MT_RF_BYPASS_1 0x0508
-#define MT_RF_SETTING_0 0x050c
-
-#define MT_RF_MISC 0x0518
-#define MT_RF_DATA_WRITE 0x0524
-
-#define MT_RF_CTRL 0x0528
-#define MT_RF_CTRL_ADDR GENMASK(11, 0)
-#define MT_RF_CTRL_WRITE BIT(12)
-#define MT_RF_CTRL_BUSY BIT(13)
-#define MT_RF_CTRL_IDX BIT(16)
-
-#define MT_RF_DATA_READ 0x052c
-
-#define MT_COM_REG0 0x0730
-#define MT_COM_REG1 0x0734
-#define MT_COM_REG2 0x0738
-#define MT_COM_REG3 0x073C
-
-#define MT_FCE_PSE_CTRL 0x0800
-#define MT_FCE_PARAMETERS 0x0804
-#define MT_FCE_CSO 0x0808
-
-#define MT_FCE_L2_STUFF 0x080c
-#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
-#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
-#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
-#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
-#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
-#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
-#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
-#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
-#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
-
-#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
-
-#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
-#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
-#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
-
-#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
-
-#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
-
-#define MT_FCE_SKIP_FS 0x0a6c
-
-#define MT_MAC_CSR0 0x1000
-#define MT_MAC_SYS_CTRL 0x1004
-#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
-#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
-#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
-#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
-
-#define MT_MAC_ADDR_DW0 0x1008
-#define MT_MAC_ADDR_DW1 0x100c
-#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
-
-#define MT_MAC_BSSID_DW0 0x1010
-#define MT_MAC_BSSID_DW1 0x1014
-#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
-#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
-#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
-#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
-#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
-#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
-#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
-
-#define MT_MAX_LEN_CFG 0x1018
-#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12)
-
-#define MT_LED_CFG 0x102c
-
-#define MT_AMPDU_MAX_LEN_20M1S 0x1030
-#define MT_AMPDU_MAX_LEN_20M2S 0x1034
-#define MT_AMPDU_MAX_LEN_40M1S 0x1038
-#define MT_AMPDU_MAX_LEN_40M2S 0x103c
-#define MT_AMPDU_MAX_LEN 0x1040
-
-#define MT_WCID_DROP_BASE 0x106c
-#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
-#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
-
-#define MT_BCN_BYPASS_MASK 0x108c
-
-#define MT_MAC_APC_BSSID_BASE 0x1090
-#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
-#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
-#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
-#define MT_MAC_APC_BSSID0_H_EN BIT(16)
-
-#define MT_XIFS_TIME_CFG 0x1100
-#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
-#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
-#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
-#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
-#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
-
-#define MT_BKOFF_SLOT_CFG 0x1104
-#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
-#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
-
-#define MT_BEACON_TIME_CFG 0x1114
-#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
-#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
-#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
-#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
-#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
-#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
-
-#define MT_TBTT_SYNC_CFG 0x1118
-#define MT_TBTT_TIMER_CFG 0x1124
-
-#define MT_INT_TIMER_CFG 0x1128
-#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
-#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
-
-#define MT_INT_TIMER_EN 0x112c
-#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
-#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
-
-#define MT_MAC_STATUS 0x1200
-#define MT_MAC_STATUS_TX BIT(0)
-#define MT_MAC_STATUS_RX BIT(1)
-
-#define MT_PWR_PIN_CFG 0x1204
-#define MT_AUX_CLK_CFG 0x120c
-
-#define MT_BB_PA_MODE_CFG0 0x1214
-#define MT_BB_PA_MODE_CFG1 0x1218
-#define MT_RF_PA_MODE_CFG0 0x121c
-#define MT_RF_PA_MODE_CFG1 0x1220
-
-#define MT_RF_PA_MODE_ADJ0 0x1228
-#define MT_RF_PA_MODE_ADJ1 0x122c
-
-#define MT_DACCLK_EN_DLY_CFG 0x1264
-
-#define MT_EDCA_CFG_BASE 0x1300
-#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
-#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
-#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
-#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
-#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
-
-#define MT_TX_PWR_CFG_0 0x1314
-#define MT_TX_PWR_CFG_1 0x1318
-#define MT_TX_PWR_CFG_2 0x131c
-#define MT_TX_PWR_CFG_3 0x1320
-#define MT_TX_PWR_CFG_4 0x1324
-
-#define MT_TX_BAND_CFG 0x132c
-#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
-#define MT_TX_BAND_CFG_5G BIT(1)
-#define MT_TX_BAND_CFG_2G BIT(2)
-
-#define MT_HT_FBK_TO_LEGACY 0x1384
-#define MT_TX_MPDU_ADJ_INT 0x1388
-
-#define MT_TX_PWR_CFG_7 0x13d4
-#define MT_TX_PWR_CFG_8 0x13d8
-#define MT_TX_PWR_CFG_9 0x13dc
-
-#define MT_TX_SW_CFG0 0x1330
-#define MT_TX_SW_CFG1 0x1334
-#define MT_TX_SW_CFG2 0x1338
-
-#define MT_TXOP_CTRL_CFG 0x1340
-#define MT_TXOP_TRUN_EN GENMASK(5, 0)
-#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
-#define MT_TXOP_CTRL
-
-#define MT_TX_RTS_CFG 0x1344
-#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
-#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
-#define MT_TX_RTS_FALLBACK BIT(24)
-
-#define MT_TX_TIMEOUT_CFG 0x1348
-#define MT_TX_RETRY_CFG 0x134c
-#define MT_TX_LINK_CFG 0x1350
-#define MT_HT_FBK_CFG0 0x1354
-#define MT_HT_FBK_CFG1 0x1358
-#define MT_LG_FBK_CFG0 0x135c
-#define MT_LG_FBK_CFG1 0x1360
-
-#define MT_CCK_PROT_CFG 0x1364
-#define MT_OFDM_PROT_CFG 0x1368
-#define MT_MM20_PROT_CFG 0x136c
-#define MT_MM40_PROT_CFG 0x1370
-#define MT_GF20_PROT_CFG 0x1374
-#define MT_GF40_PROT_CFG 0x1378
-
-#define MT_PROT_RATE GENMASK(15, 0)
-#define MT_PROT_CTRL_RTS_CTS BIT(16)
-#define MT_PROT_CTRL_CTS2SELF BIT(17)
-#define MT_PROT_NAV_SHORT BIT(18)
-#define MT_PROT_NAV_LONG BIT(19)
-#define MT_PROT_TXOP_ALLOW_CCK BIT(20)
-#define MT_PROT_TXOP_ALLOW_OFDM BIT(21)
-#define MT_PROT_TXOP_ALLOW_MM20 BIT(22)
-#define MT_PROT_TXOP_ALLOW_MM40 BIT(23)
-#define MT_PROT_TXOP_ALLOW_GF20 BIT(24)
-#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
-#define MT_PROT_RTS_THR_EN BIT(26)
-#define MT_PROT_RATE_CCK_11 0x0003
-#define MT_PROT_RATE_OFDM_6 0x4000
-#define MT_PROT_RATE_OFDM_24 0x4004
-#define MT_PROT_RATE_DUP_OFDM_24 0x4084
-#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
-#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
- ~MT_PROT_TXOP_ALLOW_MM40 & \
- ~MT_PROT_TXOP_ALLOW_GF40)
-
-#define MT_EXP_ACK_TIME 0x1380
-
-#define MT_TX_PWR_CFG_0_EXT 0x1390
-#define MT_TX_PWR_CFG_1_EXT 0x1394
-
-#define MT_TX_FBK_LIMIT 0x1398
-#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
-#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
-#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
-#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
-#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
-
-#define MT_TX0_RF_GAIN_CORR 0x13a0
-#define MT_TX1_RF_GAIN_CORR 0x13a4
-#define MT_TX0_RF_GAIN_ATTEN 0x13a8
-
-#define MT_TX_ALC_CFG_0 0x13b0
-#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
-#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
-#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
-#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
-
-#define MT_TX_ALC_CFG_1 0x13b4
-#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
-
-#define MT_TX_ALC_CFG_2 0x13a8
-#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
-
-#define MT_TX0_BB_GAIN_ATTEN 0x13c0
-
-#define MT_TX_ALC_VGA3 0x13c8
-
-#define MT_TX_PROT_CFG6 0x13e0
-#define MT_TX_PROT_CFG7 0x13e4
-#define MT_TX_PROT_CFG8 0x13e8
-
-#define MT_PIFS_TX_CFG 0x13ec
-
-#define MT_RX_FILTR_CFG 0x1400
-
-#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
-#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
-#define MT_RX_FILTR_CFG_PROMISC BIT(2)
-#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
-#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
-#define MT_RX_FILTR_CFG_MCAST BIT(5)
-#define MT_RX_FILTR_CFG_BCAST BIT(6)
-#define MT_RX_FILTR_CFG_DUP BIT(7)
-#define MT_RX_FILTR_CFG_CFACK BIT(8)
-#define MT_RX_FILTR_CFG_CFEND BIT(9)
-#define MT_RX_FILTR_CFG_ACK BIT(10)
-#define MT_RX_FILTR_CFG_CTS BIT(11)
-#define MT_RX_FILTR_CFG_RTS BIT(12)
-#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
-#define MT_RX_FILTR_CFG_BA BIT(14)
-#define MT_RX_FILTR_CFG_BAR BIT(15)
-#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
-
-#define MT_AUTO_RSP_CFG 0x1404
-
-#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
-
-#define MT_LEGACY_BASIC_RATE 0x1408
-#define MT_HT_BASIC_RATE 0x140c
-#define MT_HT_CTRL_CFG 0x1410
-#define MT_RX_PARSER_CFG 0x1418
-#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0)
-
-#define MT_EXT_CCA_CFG 0x141c
-#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
-#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
-#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
-#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
-#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
-#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
-
-#define MT_TX_SW_CFG3 0x1478
-
-#define MT_PN_PAD_MODE 0x150c
-
-#define MT_TXOP_HLDR_ET 0x1608
-
-#define MT_PROT_AUTO_TX_CFG 0x1648
-
-#define MT_RX_STA_CNT0 0x1700
-#define MT_RX_STA_CNT1 0x1704
-#define MT_RX_STA_CNT2 0x1708
-#define MT_TX_STA_CNT0 0x170c
-#define MT_TX_STA_CNT1 0x1710
-#define MT_TX_STA_CNT2 0x1714
-
-/* Vendor driver defines content of the second word of STAT_FIFO as follows:
- * MT_TX_STAT_FIFO_RATE GENMASK(26, 16)
- * MT_TX_STAT_FIFO_ETXBF BIT(27)
- * MT_TX_STAT_FIFO_SND BIT(28)
- * MT_TX_STAT_FIFO_ITXBF BIT(29)
- * However, tests show that b16-31 have the same layout as TXWI rate_ctl
- * with rate set to rate at which frame was acked.
- */
-#define MT_TX_STAT_FIFO 0x1718
-#define MT_TX_STAT_FIFO_VALID BIT(0)
-#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
-#define MT_TX_STAT_FIFO_AGGR BIT(6)
-#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
-#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
-#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
-
-#define MT_TX_AGG_STAT 0x171c
-
-#define MT_TX_AGG_CNT_BASE0 0x1720
-
-#define MT_MPDU_DENSITY_CNT 0x1740
-
-#define MT_TX_AGG_CNT_BASE1 0x174c
-
-#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
- MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
- MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
-
-#define MT_TX_STAT_FIFO_EXT 0x1798
-#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
-#define MT_TX_STAT_FIFO_EXT_PKTID GENMASK(15, 8)
-
-#define MT_BBP_CORE_BASE 0x2000
-#define MT_BBP_IBI_BASE 0x2100
-#define MT_BBP_AGC_BASE 0x2300
-#define MT_BBP_TXC_BASE 0x2400
-#define MT_BBP_RXC_BASE 0x2500
-#define MT_BBP_TXO_BASE 0x2600
-#define MT_BBP_TXBE_BASE 0x2700
-#define MT_BBP_RXFE_BASE 0x2800
-#define MT_BBP_RXO_BASE 0x2900
-#define MT_BBP_DFS_BASE 0x2a00
-#define MT_BBP_TR_BASE 0x2b00
-#define MT_BBP_CAL_BASE 0x2c00
-#define MT_BBP_DSC_BASE 0x2e00
-#define MT_BBP_PFMU_BASE 0x2f00
-
-#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
-
-#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
-
-#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
-#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
-
-/* AGC, R4/R5 */
-#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16)
-
-/* AGC, R8/R9 */
-#define MT_BBP_AGC_GAIN GENMASK(14, 8)
-
-#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
-#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
-
-#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
-
-#define MT_WCID_ADDR_BASE 0x1800
-#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
-
-#define MT_SRAM_BASE 0x4000
-
-#define MT_WCID_KEY_BASE 0x8000
-#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
-
-#define MT_WCID_IV_BASE 0xa000
-#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
-
-#define MT_WCID_ATTR_BASE 0xa800
-#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
-
-#define MT_WCID_ATTR_PAIRWISE BIT(0)
-#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
-#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
-#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
-#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
-#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
-#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
-#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
-
-#define MT_SKEY_BASE_0 0xac00
-#define MT_SKEY_BASE_1 0xb400
-#define MT_SKEY_0(_bss, _idx) \
- (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
-#define MT_SKEY_1(_bss, _idx) \
- (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
-#define MT_SKEY(_bss, _idx) \
- ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
-
-#define MT_SKEY_MODE_BASE_0 0xb000
-#define MT_SKEY_MODE_BASE_1 0xb3f0
-#define MT_SKEY_MODE_0(_bss) \
- (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
-#define MT_SKEY_MODE_1(_bss) \
- (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
-#define MT_SKEY_MODE(_bss) \
- ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
-#define MT_SKEY_MODE_MASK GENMASK(3, 0)
-#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
-
-#define MT_BEACON_BASE 0xc000
-
-#define MT_TEMP_SENSOR 0x1d000
-#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
-
-enum mt76_cipher_type {
- MT_CIPHER_NONE,
- MT_CIPHER_WEP40,
- MT_CIPHER_WEP104,
- MT_CIPHER_TKIP,
- MT_CIPHER_AES_CCMP,
- MT_CIPHER_CKIP40,
- MT_CIPHER_CKIP104,
- MT_CIPHER_CKIP128,
- MT_CIPHER_WAPI,
-};
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
index 8a752a09f2dc..75d1d6738c34 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
@@ -17,7 +17,6 @@
#include <linux/tracepoint.h>
#include "mt76x0.h"
-#include "mac.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mt76x0
@@ -178,11 +177,11 @@ DECLARE_EVENT_CLASS(dev_simple_evt,
);
TRACE_EVENT(mt76x0_rx,
- TP_PROTO(struct mt76_dev *dev, struct mt76x0_rxwi *rxwi, u32 f),
+ TP_PROTO(struct mt76_dev *dev, struct mt76x02_rxwi *rxwi, u32 f),
TP_ARGS(dev, rxwi, f),
TP_STRUCT__entry(
DEV_ENTRY
- __field_struct(struct mt76x0_rxwi, rxwi)
+ __field_struct(struct mt76x02_rxwi, rxwi)
__field(u32, fce_info)
),
TP_fast_assign(
@@ -197,13 +196,13 @@ TRACE_EVENT(mt76x0_rx,
TRACE_EVENT(mt76x0_tx,
TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb,
- struct mt76_sta *sta, struct mt76_txwi *h),
+ struct mt76x02_sta *sta, struct mt76x02_txwi *h),
TP_ARGS(dev, skb, sta, h),
TP_STRUCT__entry(
DEV_ENTRY
- __field_struct(struct mt76_txwi, h)
+ __field_struct(struct mt76x02_txwi, h)
__field(struct sk_buff *, skb)
- __field(struct mt76_sta *, sta)
+ __field(struct mt76x02_sta *, sta)
),
TP_fast_assign(
DEV_ASSIGN;
@@ -211,11 +210,11 @@ TRACE_EVENT(mt76x0_tx,
__entry->skb = skb;
__entry->sta = sta;
),
- TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx "
+ TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate:%04hx "
"ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
__entry->skb, __entry->sta,
le16_to_cpu(__entry->h.flags),
- le16_to_cpu(__entry->h.rate_ctl),
+ le16_to_cpu(__entry->h.rate),
__entry->h.ack_ctl, __entry->h.wcid,
le16_to_cpu(__entry->h.len_ctl))
);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
deleted file mode 100644
index 751b49c28ae5..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "mt76x0.h"
-#include "trace.h"
-
-/* Take mac80211 Q id from the skb and translate it to hardware Q id */
-static u8 skb2q(struct sk_buff *skb)
-{
- int qid = skb_get_queue_mapping(skb);
-
- if (WARN_ON(qid >= MT_TXQ_PSD)) {
- qid = MT_TXQ_BE;
- skb_set_queue_mapping(skb, qid);
- }
-
- return q2hwq(qid);
-}
-
-static void mt76x0_tx_skb_remove_dma_overhead(struct sk_buff *skb,
- struct ieee80211_tx_info *info)
-{
- int pkt_len = (unsigned long)info->status.status_driver_data[0];
-
- skb_pull(skb, sizeof(struct mt76_txwi) + 4);
- if (ieee80211_get_hdrlen_from_skb(skb) % 4)
- mt76x0_remove_hdr_pad(skb);
-
- skb_trim(skb, pkt_len);
-}
-
-void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- mt76x0_tx_skb_remove_dma_overhead(skb, info);
-
- ieee80211_tx_info_clear_status(info);
- info->status.rates[0].idx = -1;
- info->flags |= IEEE80211_TX_STAT_ACK;
-
- spin_lock(&dev->mac_lock);
- ieee80211_tx_status(dev->mt76.hw, skb);
- spin_unlock(&dev->mac_lock);
-}
-
-static int mt76x0_skb_rooms(struct mt76x0_dev *dev, struct sk_buff *skb)
-{
- int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
- u32 need_head;
-
- need_head = sizeof(struct mt76_txwi) + 4;
- if (hdr_len % 4)
- need_head += 2;
-
- return skb_cow(skb, need_head);
-}
-
-static struct mt76_txwi *
-mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb,
- struct ieee80211_sta *sta, struct mt76_wcid *wcid,
- int pkt_len)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rate = &info->control.rates[0];
- struct mt76_txwi *txwi;
- unsigned long flags;
- u16 txwi_flags = 0;
- u32 pkt_id;
- u16 rate_ctl;
- u8 nss;
-
- txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
- memset(txwi, 0, sizeof(*txwi));
-
- if (!wcid->tx_rate_set)
- ieee80211_get_tx_rates(info->control.vif, sta, skb,
- info->control.rates, 1);
-
- spin_lock_irqsave(&dev->mt76.lock, flags);
- if (rate->idx < 0 || !rate->count) {
- rate_ctl = wcid->tx_rate;
- nss = wcid->tx_rate_nss;
- } else {
- rate_ctl = mt76x0_mac_tx_rate_val(dev, rate, &nss);
- }
- spin_unlock_irqrestore(&dev->mt76.lock, flags);
-
- txwi->rate_ctl = cpu_to_le16(rate_ctl);
-
- if (info->flags & IEEE80211_TX_CTL_LDPC)
- txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_LDPC);
- if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
- txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_STBC);
- if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
- txwi_flags |= MT_TXWI_FLAGS_MMPS;
-
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
- pkt_id = 1;
- } else {
- pkt_id = 0;
- }
-
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- pkt_id |= MT_TXWI_PKTID_PROBE;
-
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
- txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
-
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
- u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
-
- ba_size <<= sta->ht_cap.ampdu_factor;
- ba_size = min_t(int, 7, ba_size - 1);
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
- ba_size = 0;
- } else {
- txwi_flags |= MT_TXWI_FLAGS_AMPDU;
- txwi_flags |= FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
- sta->ht_cap.ampdu_density);
- }
- txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
- }
-
- txwi->wcid = wcid->idx;
- txwi->flags |= cpu_to_le16(txwi_flags);
- txwi->len_ctl = cpu_to_le16(pkt_len);
- txwi->pktid = pkt_id;
-
- return txwi;
-}
-
-void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct mt76x0_dev *dev = hw->priv;
- struct ieee80211_vif *vif = info->control.vif;
- struct ieee80211_sta *sta = control->sta;
- struct mt76_sta *msta = NULL;
- struct mt76_wcid *wcid = dev->mon_wcid;
- struct mt76_txwi *txwi;
- int pkt_len = skb->len;
- int hw_q = skb2q(skb);
-
- BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
- info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
-
- if (mt76x0_skb_rooms(dev, skb) || mt76x0_insert_hdr_pad(skb)) {
- ieee80211_free_txskb(dev->mt76.hw, skb);
- return;
- }
-
- if (sta) {
- msta = (struct mt76_sta *) sta->drv_priv;
- wcid = &msta->wcid;
- } else if (vif && (!info->control.hw_key && wcid->hw_key_idx != -1)) {
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
-
- wcid = &mvif->group_wcid;
- }
-
- txwi = mt76x0_push_txwi(dev, skb, sta, wcid, pkt_len);
-
- if (mt76x0_dma_enqueue_tx(dev, skb, wcid, hw_q))
- return;
-
- trace_mt76x0_tx(&dev->mt76, skb, msta, txwi);
-}
-
-void mt76x0_tx_stat(struct work_struct *work)
-{
- struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
- stat_work.work);
- struct mt76_tx_status stat;
- unsigned long flags;
- int cleaned = 0;
- u8 update = 1;
-
- while (!test_bit(MT76_REMOVED, &dev->mt76.state)) {
- stat = mt76x0_mac_fetch_tx_status(dev);
- if (!stat.valid)
- break;
-
- mt76x0_send_tx_status(dev, &stat, &update);
-
- cleaned++;
- }
- trace_mt76x0_tx_status_cleaned(&dev->mt76, cleaned);
-
- spin_lock_irqsave(&dev->tx_lock, flags);
- if (cleaned)
- queue_delayed_work(dev->stat_wq, &dev->stat_work,
- msecs_to_jiffies(10));
- else if (test_and_clear_bit(MT76_MORE_STATS, &dev->mt76.state))
- queue_delayed_work(dev->stat_wq, &dev->stat_work,
- msecs_to_jiffies(20));
- else
- clear_bit(MT76_READING_STATS, &dev->mt76.state);
- spin_unlock_irqrestore(&dev->tx_lock, flags);
-}
-
-int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
-{
- struct mt76x0_dev *dev = hw->priv;
- u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
- u32 val;
-
- /* TODO: should we do funny things with the parameters?
- * See what mt76x0_set_default_edca() used to do in init.c.
- */
-
- if (params->cw_min)
- cw_min = fls(params->cw_min);
- if (params->cw_max)
- cw_max = fls(params->cw_max);
-
- WARN_ON(params->txop > 0xff);
- WARN_ON(params->aifs > 0xf);
- WARN_ON(cw_min > 0xf);
- WARN_ON(cw_max > 0xf);
-
- val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
- FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
- FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
- /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
- * a really long txop on AC0 (see connect.c:2009) but only on
- * connect? When not connected should be 0.
- */
- if (!hw_q)
- val |= 0x60;
- else
- val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
- mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
-
- val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
- val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
- val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
- mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
-
- val = mt76_rr(dev, MT_WMM_AIFSN);
- val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
- val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
- mt76_wr(dev, MT_WMM_AIFSN, val);
-
- val = mt76_rr(dev, MT_WMM_CWMIN);
- val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
- val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
- mt76_wr(dev, MT_WMM_CWMIN, val);
-
- val = mt76_rr(dev, MT_WMM_CWMAX);
- val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
- val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
- mt76_wr(dev, MT_WMM_CWMAX, val);
-
- return 0;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 54ae1f113be2..a7fd36c2f633 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -16,8 +16,9 @@
#include <linux/usb.h>
#include "mt76x0.h"
-#include "usb.h"
+#include "mcu.h"
#include "trace.h"
+#include "../mt76x02_usb.h"
static struct usb_device_id mt76x0_device_table[] = {
{ USB_DEVICE(0x148F, 0x7610) }, /* MT7610U */
@@ -40,256 +41,215 @@ static struct usb_device_id mt76x0_device_table[] = {
{ USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */
{ USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */
{ USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */
- { USB_DEVICE(0x2357, 0x0105) }, /* TP-LINK Archer T1U */
+ { USB_DEVICE(0x2357, 0x0105),
+ .driver_info = 1, }, /* TP-LINK Archer T1U */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, /* MT7630U */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, /* MT7650U */
{ 0, }
};
-bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
- struct mt76x0_dma_buf *buf)
+static void mt76x0_init_usb_dma(struct mt76x02_dev *dev)
{
- struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
+ u32 val;
- buf->len = len;
- buf->urb = usb_alloc_urb(0, GFP_KERNEL);
- buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
- return !buf->urb || !buf->buf;
-}
-
-void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf)
-{
- struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
-
- usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
- usb_free_urb(buf->urb);
-}
-
-int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
- struct mt76x0_dma_buf *buf, gfp_t gfp,
- usb_complete_t complete_fn, void *context)
-{
- struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
- unsigned pipe;
- int ret;
-
- if (dir == USB_DIR_IN)
- pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[ep_idx]);
- else
- pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep_idx]);
-
- usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
- complete_fn, context);
- buf->urb->transfer_dma = buf->dma;
- buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
- trace_mt76x0_submit_urb(&dev->mt76, buf->urb);
- ret = usb_submit_urb(buf->urb, gfp);
- if (ret)
- dev_err(dev->mt76.dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
- dir, ep_idx, ret);
- return ret;
-}
-
-void mt76x0_complete_urb(struct urb *urb)
-{
- struct completion *cmpl = urb->context;
+ val |= MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
- complete(cmpl);
-}
+ /* disable AGGR_BULK_RX in order to receive one
+ * frame in each rx urb and avoid copies
+ */
+ val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
-int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
- const u8 direction, const u16 val, const u16 offset,
- void *buf, const size_t buflen)
-{
- int i, ret;
- struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
- const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- const unsigned int pipe = (direction == USB_DIR_IN) ?
- usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
-
- for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
- ret = usb_control_msg(usb_dev, pipe, req, req_type,
- val, offset, buf, buflen,
- MT_VEND_REQ_TOUT_MS);
- trace_mt76x0_vend_req(&dev->mt76, pipe, req, req_type, val, offset,
- buf, buflen, ret);
-
- if (ret == -ENODEV)
- set_bit(MT76_REMOVED, &dev->mt76.state);
- if (ret >= 0 || ret == -ENODEV)
- return ret;
-
- msleep(5);
- }
+ val = mt76_rr(dev, MT_COM_REG0);
+ if (val & 1)
+ dev_dbg(dev->mt76.dev, "MCU not ready\n");
- dev_err(dev->mt76.dev, "Vendor request req:%02x off:%04x failed:%d\n",
- req, offset, ret);
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
- return ret;
+ val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PAD;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
}
-void mt76x0_vendor_reset(struct mt76x0_dev *dev)
+static void mt76x0u_cleanup(struct mt76x02_dev *dev)
{
- mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
- MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ mt76x0_chip_onoff(dev, false, false);
+ mt76u_queues_deinit(&dev->mt76);
+ mt76u_mcu_deinit(&dev->mt76);
}
-static u32 mt76x0_rr(struct mt76_dev *dev, u32 offset)
+static void mt76x0u_mac_stop(struct mt76x02_dev *dev)
{
- struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
- int ret;
- u32 val = ~0;
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mac_work);
+ mt76u_stop_stat_wk(&dev->mt76);
- WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+ if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ return;
- mutex_lock(&mdev->usb_ctrl_mtx);
+ mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
- ret = mt76x0_vendor_request((struct mt76x0_dev *)dev, MT_VEND_MULTI_READ, USB_DIR_IN,
- 0, offset, mdev->data, MT_VEND_BUF);
- if (ret == MT_VEND_BUF)
- val = get_unaligned_le32(mdev->data);
- else if (ret > 0)
- dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
- ret, offset);
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+ dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
- mutex_unlock(&mdev->usb_ctrl_mtx);
+ mt76x0_mac_stop(dev);
- trace_mt76x0_reg_read(dev, offset, val);
- return val;
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+ dev_warn(dev->mt76.dev, "RX DMA did not stop\n");
}
-int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
- const u16 offset, const u32 val)
+static int mt76x0u_start(struct ieee80211_hw *hw)
{
- struct mt76x0_dev *mdev = dev;
+ struct mt76x02_dev *dev = hw->priv;
int ret;
- mutex_lock(&mdev->usb_ctrl_mtx);
+ mutex_lock(&dev->mt76.mutex);
- ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
- val & 0xffff, offset, NULL, 0);
- if (!ret)
- ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
- val >> 16, offset + 2, NULL, 0);
+ ret = mt76x0_mac_start(dev);
+ if (ret)
+ goto out;
- mutex_unlock(&mdev->usb_ctrl_mtx);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+out:
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
-static void mt76x0_wr(struct mt76_dev *dev, u32 offset, u32 val)
+static void mt76x0u_stop(struct ieee80211_hw *hw)
{
- struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
- int ret;
+ struct mt76x02_dev *dev = hw->priv;
- WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
-
- mutex_lock(&mdev->usb_ctrl_mtx);
-
- put_unaligned_le32(val, mdev->data);
- ret = mt76x0_vendor_request(mdev, MT_VEND_MULTI_WRITE, USB_DIR_OUT,
- 0, offset, mdev->data, MT_VEND_BUF);
- trace_mt76x0_reg_write(dev, offset, val);
-
- mutex_unlock(&mdev->usb_ctrl_mtx);
+ mutex_lock(&dev->mt76.mutex);
+ mt76x0u_mac_stop(dev);
+ mutex_unlock(&dev->mt76.mutex);
}
-static u32 mt76x0_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
-{
- val |= mt76x0_rr(dev, offset) & ~mask;
- mt76x0_wr(dev, offset, val);
- return val;
-}
+static const struct ieee80211_ops mt76x0u_ops = {
+ .tx = mt76x02_tx,
+ .start = mt76x0u_start,
+ .stop = mt76x0u_stop,
+ .add_interface = mt76x02_add_interface,
+ .remove_interface = mt76x02_remove_interface,
+ .config = mt76x0_config,
+ .configure_filter = mt76x02_configure_filter,
+ .bss_info_changed = mt76x0_bss_info_changed,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
+ .set_key = mt76x02_set_key,
+ .conf_tx = mt76x02_conf_tx,
+ .sw_scan_start = mt76x0_sw_scan,
+ .sw_scan_complete = mt76x0_sw_scan_complete,
+ .ampdu_action = mt76x02_ampdu_action,
+ .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+ .set_rts_threshold = mt76x0_set_rts_threshold,
+ .wake_tx_queue = mt76_wake_tx_queue,
+};
-static void mt76x0_wr_copy(struct mt76_dev *dev, u32 offset,
- const void *data, int len)
+static int mt76x0u_register_device(struct mt76x02_dev *dev)
{
- WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
- WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+ struct ieee80211_hw *hw = dev->mt76.hw;
+ int err;
- mt76x0_burst_write_regs((struct mt76x0_dev *) dev, offset, data, len / 4);
-}
+ err = mt76u_alloc_queues(&dev->mt76);
+ if (err < 0)
+ goto out_err;
-void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr)
-{
- mt76_wr(dev, offset, get_unaligned_le32(addr));
- mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
-}
+ err = mt76u_mcu_init_rx(&dev->mt76);
+ if (err < 0)
+ goto out_err;
-static int mt76x0_assign_pipes(struct usb_interface *usb_intf,
- struct mt76x0_dev *dev)
-{
- struct usb_endpoint_descriptor *ep_desc;
- struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
- unsigned i, ep_i = 0, ep_o = 0;
-
- BUILD_BUG_ON(sizeof(dev->in_ep) < __MT_EP_IN_MAX);
- BUILD_BUG_ON(sizeof(dev->out_ep) < __MT_EP_OUT_MAX);
-
- for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
- ep_desc = &intf_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_bulk_in(ep_desc) &&
- ep_i++ < __MT_EP_IN_MAX) {
- dev->in_ep[ep_i - 1] = usb_endpoint_num(ep_desc);
- dev->in_max_packet = usb_endpoint_maxp(ep_desc);
- /* Note: this is ignored by usb sub-system but vendor
- * code does it. We can drop this at some point.
- */
- dev->in_ep[ep_i - 1] |= USB_DIR_IN;
- } else if (usb_endpoint_is_bulk_out(ep_desc) &&
- ep_o++ < __MT_EP_OUT_MAX) {
- dev->out_ep[ep_o - 1] = usb_endpoint_num(ep_desc);
- dev->out_max_packet = usb_endpoint_maxp(ep_desc);
- }
+ mt76x0_chip_onoff(dev, true, true);
+ if (!mt76x02_wait_for_mac(&dev->mt76)) {
+ err = -ETIMEDOUT;
+ goto out_err;
}
- if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
- dev_err(dev->mt76.dev, "Error: wrong pipe number in:%d out:%d\n",
- ep_i, ep_o);
- return -EINVAL;
- }
+ err = mt76x0u_mcu_init(dev);
+ if (err < 0)
+ goto out_err;
+
+ mt76x0_init_usb_dma(dev);
+ err = mt76x0_init_hardware(dev);
+ if (err < 0)
+ goto out_err;
+
+ mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+ mt76_wr(dev, MT_TXOP_CTRL_CFG,
+ FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+ FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+ err = mt76x0_register_device(dev);
+ if (err < 0)
+ goto out_err;
+
+ /* check hw sg support in order to enable AMSDU */
+ if (mt76u_check_sg(&dev->mt76))
+ hw->max_tx_fragments = MT_SG_MAX_SIZE;
+ else
+ hw->max_tx_fragments = 1;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
return 0;
+
+out_err:
+ mt76x0u_cleanup(dev);
+ return err;
}
-static int mt76x0_probe(struct usb_interface *usb_intf,
+static int mt76x0u_probe(struct usb_interface *usb_intf,
const struct usb_device_id *id)
{
+ static const struct mt76_driver_ops drv_ops = {
+ .tx_prepare_skb = mt76x02u_tx_prepare_skb,
+ .tx_complete_skb = mt76x02u_tx_complete_skb,
+ .tx_status_data = mt76x02_tx_status_data,
+ .rx_skb = mt76x02_queue_rx_skb,
+ };
struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
- struct mt76x0_dev *dev;
+ struct mt76x02_dev *dev;
u32 asic_rev, mac_rev;
int ret;
- static const struct mt76_bus_ops usb_ops = {
- .rr = mt76x0_rr,
- .wr = mt76x0_wr,
- .rmw = mt76x0_rmw,
- .copy = mt76x0_wr_copy,
- };
- dev = mt76x0_alloc_device(&usb_intf->dev);
+ dev = mt76x0_alloc_device(&usb_intf->dev, &drv_ops,
+ &mt76x0u_ops);
if (!dev)
return -ENOMEM;
+ /* Quirk for Archer T1U */
+ if (id->driver_info)
+ dev->no_2ghz = true;
+
usb_dev = usb_get_dev(usb_dev);
usb_reset_device(usb_dev);
usb_set_intfdata(usb_intf, dev);
- dev->mt76.bus = &usb_ops;
-
- ret = mt76x0_assign_pipes(usb_intf, dev);
+ mt76x02u_init_mcu(&dev->mt76);
+ ret = mt76u_init(&dev->mt76, usb_intf);
if (ret)
goto err;
/* Disable the HW, otherwise MCU fail to initalize on hot reboot */
mt76x0_chip_onoff(dev, false, false);
- ret = mt76x0_wait_asic_ready(dev);
- if (ret)
+ if (!mt76x02_wait_for_mac(&dev->mt76)) {
+ ret = -ETIMEDOUT;
goto err;
+ }
asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
mac_rev = mt76_rr(dev, MT_MAC_CSR0);
@@ -300,77 +260,89 @@ static int mt76x0_probe(struct usb_interface *usb_intf,
if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n");
- ret = mt76x0_init_hardware(dev);
- if (ret)
+ ret = mt76x0u_register_device(dev);
+ if (ret < 0)
goto err;
- ret = mt76x0_register_device(dev);
- if (ret)
- goto err_hw;
-
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
-
return 0;
-err_hw:
- mt76x0_cleanup(dev);
+
err:
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
- destroy_workqueue(dev->stat_wq);
ieee80211_free_hw(dev->mt76.hw);
return ret;
}
static void mt76x0_disconnect(struct usb_interface *usb_intf)
{
- struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
if (!initalized)
return;
ieee80211_unregister_hw(dev->mt76.hw);
- mt76x0_cleanup(dev);
+ mt76x0u_cleanup(dev);
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
- destroy_workqueue(dev->stat_wq);
ieee80211_free_hw(dev->mt76.hw);
}
-static int mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state)
+static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
+ pm_message_t state)
{
- struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
+ struct mt76_usb *usb = &dev->mt76.usb;
- mt76x0_cleanup(dev);
+ mt76u_stop_queues(&dev->mt76);
+ mt76x0u_mac_stop(dev);
+ usb_kill_urb(usb->mcu.res.urb);
return 0;
}
-static int mt76x0_resume(struct usb_interface *usb_intf)
+static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
{
- struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
+ struct mt76_usb *usb = &dev->mt76.usb;
int ret;
+ reinit_completion(&usb->mcu.cmpl);
+ ret = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
+ MT_EP_IN_CMD_RESP,
+ &usb->mcu.res, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (ret < 0)
+ goto err;
+
+ ret = mt76u_submit_rx_buffers(&dev->mt76);
+ if (ret < 0)
+ goto err;
+
+ tasklet_enable(&usb->rx_tasklet);
+ tasklet_enable(&usb->tx_tasklet);
+
ret = mt76x0_init_hardware(dev);
if (ret)
- return ret;
-
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ goto err;
return 0;
+err:
+ mt76x0u_cleanup(dev);
+ return ret;
}
MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
-MODULE_FIRMWARE(MT7610_FIRMWARE);
MODULE_LICENSE("GPL");
static struct usb_driver mt76x0_driver = {
.name = KBUILD_MODNAME,
.id_table = mt76x0_device_table,
- .probe = mt76x0_probe,
+ .probe = mt76x0u_probe,
.disconnect = mt76x0_disconnect,
.suspend = mt76x0_suspend,
.resume = mt76x0_resume,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
deleted file mode 100644
index 492e431390a8..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76X0U_USB_H
-#define __MT76X0U_USB_H
-
-#include "mt76x0.h"
-
-#define MT7610_FIRMWARE "mediatek/mt7610u.bin"
-
-#define MT_VEND_REQ_MAX_RETRY 10
-#define MT_VEND_REQ_TOUT_MS 300
-
-#define MT_VEND_DEV_MODE_RESET 1
-
-#define MT_VEND_BUF sizeof(__le32)
-
-static inline struct usb_device *mt76x0_to_usb_dev(struct mt76x0_dev *mt76x0)
-{
- return interface_to_usbdev(to_usb_interface(mt76x0->mt76.dev));
-}
-
-static inline struct usb_device *mt76_to_usb_dev(struct mt76_dev *mt76)
-{
- return interface_to_usbdev(to_usb_interface(mt76->dev));
-}
-
-static inline bool mt76x0_urb_has_error(struct urb *urb)
-{
- return urb->status &&
- urb->status != -ENOENT &&
- urb->status != -ECONNRESET &&
- urb->status != -ESHUTDOWN;
-}
-
-bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
- struct mt76x0_dma_buf *buf);
-void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf);
-int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
- struct mt76x0_dma_buf *buf, gfp_t gfp,
- usb_complete_t complete_fn, void *context);
-void mt76x0_complete_urb(struct urb *urb);
-
-int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
- const u8 direction, const u16 val, const u16 offset,
- void *buf, const size_t buflen);
-void mt76x0_vendor_reset(struct mt76x0_dev *dev);
-int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
- const u16 offset, const u32 val);
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
new file mode 100644
index 000000000000..a9f14d5149d1
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+
+#include "mt76x0.h"
+#include "mcu.h"
+#include "../mt76x02_usb.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x38f8
+#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MT7610U_FIRMWARE "mediatek/mt7610u.bin"
+
+static int
+mt76x0u_upload_firmware(struct mt76x02_dev *dev,
+ const struct mt76x02_fw_header *hdr)
+{
+ u8 *fw_payload = (u8 *)(hdr + 1);
+ u32 ilm_len, dlm_len;
+ void *ivb;
+ int err;
+
+ ivb = kmemdup(fw_payload, MT_MCU_IVB_SIZE, GFP_KERNEL);
+ if (!ivb)
+ return -ENOMEM;
+
+ ilm_len = le32_to_cpu(hdr->ilm_len) - MT_MCU_IVB_SIZE;
+ dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %u\n",
+ ilm_len, MT_MCU_IVB_SIZE);
+ err = mt76x02u_mcu_fw_send_data(dev, fw_payload + MT_MCU_IVB_SIZE,
+ ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+ MT_MCU_IVB_SIZE);
+ if (err)
+ goto out;
+
+ dlm_len = le32_to_cpu(hdr->dlm_len);
+ dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
+ err = mt76x02u_mcu_fw_send_data(dev,
+ fw_payload + le32_to_cpu(hdr->ilm_len),
+ dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+ MT_MCU_DLM_OFFSET);
+ if (err)
+ goto out;
+
+ err = mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x12, 0, ivb, MT_MCU_IVB_SIZE);
+ if (err < 0)
+ goto out;
+
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 1000)) {
+ dev_err(dev->mt76.dev, "Firmware failed to start\n");
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ dev_dbg(dev->mt76.dev, "Firmware running!\n");
+
+out:
+ kfree(ivb);
+
+ return err;
+}
+
+static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76x02_fw_header *hdr;
+ int len, ret;
+ u32 val;
+
+ mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN));
+
+ if (mt76x0_firmware_running(dev))
+ return 0;
+
+ ret = request_firmware(&fw, MT7610U_FIRMWARE, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto err_inv_fw;
+
+ hdr = (const struct mt76x02_fw_header *)fw->data;
+
+ if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+ goto err_inv_fw;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto err_inv_fw;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_dbg(dev->mt76.dev,
+ "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+ le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt76_wr(dev, 0x1004, 0x2c);
+
+ mt76_set(dev, MT_USB_DMA_CFG,
+ (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
+ mt76x02u_mcu_fw_reset(dev);
+ usleep_range(5000, 6000);
+/*
+ mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+ MT_PBF_CFG_TX1Q_EN |
+ MT_PBF_CFG_TX2Q_EN |
+ MT_PBF_CFG_TX3Q_EN));
+*/
+
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ /* FCE tx_fs_base_ptr */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+ /* FCE pdma enable */
+ mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt76_wr(dev, MT_FCE_SKIP_FS, 3);
+
+ val = mt76_rr(dev, MT_USB_DMA_CFG);
+ val |= MT_USB_DMA_CFG_UDMA_TX_WL_DROP;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_UDMA_TX_WL_DROP;
+ mt76_wr(dev, MT_USB_DMA_CFG, val);
+
+ ret = mt76x0u_upload_firmware(dev, hdr);
+ release_firmware(fw);
+
+ mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ return ret;
+
+err_inv_fw:
+ dev_err(dev->mt76.dev, "Invalid firmware image\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+int mt76x0u_mcu_init(struct mt76x02_dev *dev)
+{
+ int ret;
+
+ ret = mt76x0u_load_firmware(dev);
+ if (ret < 0)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+ return 0;
+}
+
+MODULE_FIRMWARE(MT7610U_FIRMWARE);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
deleted file mode 100644
index 7856dd760419..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "mt76x0.h"
-
-void mt76x0_remove_hdr_pad(struct sk_buff *skb)
-{
- int len = ieee80211_get_hdrlen_from_skb(skb);
-
- memmove(skb->data + 2, skb->data, len);
- skb_pull(skb, 2);
-}
-
-int mt76x0_insert_hdr_pad(struct sk_buff *skb)
-{
- int len = ieee80211_get_hdrlen_from_skb(skb);
- int ret;
-
- if (len % 4 == 0)
- return 0;
-
- ret = skb_cow(skb, 2);
- if (ret)
- return ret;
-
- skb_push(skb, 2);
- memmove(skb->data, skb->data + 2, len);
-
- skb->data[len] = 0;
- skb->data[len + 1] = 0;
- return 0;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
new file mode 100644
index 000000000000..47c42c607964
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76X02_UTIL_H
+#define __MT76X02_UTIL_H
+
+#include <linux/kfifo.h>
+
+#include "mt76.h"
+#include "mt76x02_regs.h"
+#include "mt76x02_mac.h"
+#include "mt76x02_dfs.h"
+#include "mt76x02_dma.h"
+
+struct mt76x02_mac_stats {
+ u64 rx_stat[6];
+ u64 tx_stat[6];
+ u64 aggr_stat[2];
+ u64 aggr_n[32];
+ u64 zero_len_del[2];
+};
+
+#define MT_MAX_CHAINS 2
+struct mt76x02_rx_freq_cal {
+ s8 high_gain[MT_MAX_CHAINS];
+ s8 rssi_offset[MT_MAX_CHAINS];
+ s8 lna_gain;
+ u32 mcu_gain;
+ s16 temp_offset;
+ u8 freq_offset;
+};
+
+struct mt76x02_calibration {
+ struct mt76x02_rx_freq_cal rx;
+
+ u8 agc_gain_init[MT_MAX_CHAINS];
+ u8 agc_gain_cur[MT_MAX_CHAINS];
+
+ u16 false_cca;
+ s8 avg_rssi_all;
+ s8 agc_gain_adjust;
+ s8 low_gain;
+
+ s8 temp_vco;
+ s8 temp;
+
+ bool init_cal_done;
+ bool tssi_cal_done;
+ bool tssi_comp_pending;
+ bool dpd_cal_done;
+ bool channel_cal_done;
+};
+
+struct mt76x02_dev {
+ struct mt76_dev mt76; /* must be first */
+
+ struct mac_address macaddr_list[8];
+
+ struct mutex phy_mutex;
+ struct mutex mutex;
+
+ u8 txdone_seq;
+ DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
+
+ struct sk_buff *rx_head;
+
+ struct tasklet_struct tx_tasklet;
+ struct tasklet_struct pre_tbtt_tasklet;
+ struct delayed_work cal_work;
+ struct delayed_work mac_work;
+
+ struct mt76x02_mac_stats stats;
+ atomic_t avg_ampdu_len;
+ u32 aggr_stats[32];
+
+ struct sk_buff *beacons[8];
+ u8 beacon_mask;
+ u8 beacon_data_mask;
+
+ u8 tbtt_count;
+ u16 beacon_int;
+
+ struct mt76x02_calibration cal;
+
+ s8 target_power;
+ s8 target_power_delta[2];
+ bool enable_tpc;
+
+ bool no_2ghz;
+
+ u8 coverage_class;
+ u8 slottime;
+
+ struct mt76x02_dfs_pattern_detector dfs_pd;
+};
+
+extern struct ieee80211_rate mt76x02_rates[12];
+
+void mt76x02_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
+int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
+ unsigned int idx);
+int mt76x02_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void mt76x02_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
+ const struct ieee80211_tx_rate *rate);
+s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr,
+ s8 max_txpwr_adj);
+void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
+int mt76x02_insert_hdr_pad(struct sk_buff *skb);
+void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
+void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb);
+bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
+void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
+void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info);
+
+extern const u16 mt76x02_beacon_offsets[16];
+void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev);
+void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set);
+void mt76x02_mac_start(struct mt76x02_dev *dev);
+
+static inline bool is_mt76x2(struct mt76x02_dev *dev)
+{
+ return mt76_chip(&dev->mt76) == 0x7612 ||
+ mt76_chip(&dev->mt76) == 0x7662 ||
+ mt76_chip(&dev->mt76) == 0x7602;
+}
+
+static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask)
+{
+ mt76x02_set_irq_mask(dev, 0, mask);
+}
+
+static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask)
+{
+ mt76x02_set_irq_mask(dev, mask, 0);
+}
+
+static inline bool
+mt76x02_wait_for_txrx_idle(struct mt76_dev *dev)
+{
+ return __mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+ 0, 100);
+}
+
+static inline struct mt76x02_sta *
+mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx)
+{
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->wcid[idx]);
+ if (!wcid)
+ return NULL;
+
+ return container_of(wcid, struct mt76x02_sta, wcid);
+}
+
+static inline struct mt76_wcid *
+mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast)
+{
+ if (!sta)
+ return NULL;
+
+ if (unicast)
+ return &sta->wcid;
+ else
+ return &sta->vif->group_wcid;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
index 693f421bf096..7e177c934592 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
@@ -14,8 +14,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef __MT76x2_DFS_H
-#define __MT76x2_DFS_H
+#ifndef __MT76x02_DFS_H
+#define __MT76x02_DFS_H
#include <linux/types.h>
#include <linux/nl80211.h>
@@ -49,7 +49,7 @@
#define MT_DFS_ETSI_MAX_PRI (133333 + 125000 + 117647 + 1000)
#define MT_DFS_ETSI_MIN_PRI (4500 - 20)
-struct mt76x2_radar_specs {
+struct mt76x02_radar_specs {
u8 mode;
u16 avg_len;
u16 e_low;
@@ -70,7 +70,7 @@ struct mt76x2_radar_specs {
#define MT_DFS_EVENT_ENGINE(x) (((x) & BIT(31)) ? 2 : 0)
#define MT_DFS_EVENT_TIMESTAMP(x) ((x) & GENMASK(21, 0))
#define MT_DFS_EVENT_WIDTH(x) ((x) & GENMASK(11, 0))
-struct mt76x2_dfs_event {
+struct mt76x02_dfs_event {
unsigned long fetch_ts;
u32 ts;
u16 width;
@@ -78,12 +78,12 @@ struct mt76x2_dfs_event {
};
#define MT_DFS_EVENT_BUFLEN 256
-struct mt76x2_dfs_event_rb {
- struct mt76x2_dfs_event data[MT_DFS_EVENT_BUFLEN];
+struct mt76x02_dfs_event_rb {
+ struct mt76x02_dfs_event data[MT_DFS_EVENT_BUFLEN];
int h_rb, t_rb;
};
-struct mt76x2_dfs_sequence {
+struct mt76x02_dfs_sequence {
struct list_head head;
u32 first_ts;
u32 last_ts;
@@ -92,7 +92,7 @@ struct mt76x2_dfs_sequence {
u8 engine;
};
-struct mt76x2_dfs_hw_pulse {
+struct mt76x02_dfs_hw_pulse {
u8 engine;
u32 period;
u32 w1;
@@ -100,47 +100,41 @@ struct mt76x2_dfs_hw_pulse {
u32 burst;
};
-struct mt76x2_dfs_sw_detector_params {
+struct mt76x02_dfs_sw_detector_params {
u32 min_pri;
u32 max_pri;
u32 pri_margin;
};
-struct mt76x2_dfs_engine_stats {
+struct mt76x02_dfs_engine_stats {
u32 hw_pattern;
u32 hw_pulse_discarded;
u32 sw_pattern;
};
-struct mt76x2_dfs_seq_stats {
+struct mt76x02_dfs_seq_stats {
u32 seq_pool_len;
u32 seq_len;
};
-struct mt76x2_dfs_pattern_detector {
+struct mt76x02_dfs_pattern_detector {
enum nl80211_dfs_regions region;
u8 chirp_pulse_cnt;
u32 chirp_pulse_ts;
- struct mt76x2_dfs_sw_detector_params sw_dpd_params;
- struct mt76x2_dfs_event_rb event_rb[2];
+ struct mt76x02_dfs_sw_detector_params sw_dpd_params;
+ struct mt76x02_dfs_event_rb event_rb[2];
struct list_head sequences;
struct list_head seq_pool;
- struct mt76x2_dfs_seq_stats seq_stats;
+ struct mt76x02_dfs_seq_stats seq_stats;
unsigned long last_sw_check;
u32 last_event_ts;
- struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
+ struct mt76x02_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
struct tasklet_struct dfs_tasklet;
};
-void mt76x2_dfs_init_params(struct mt76x2_dev *dev);
-void mt76x2_dfs_init_detector(struct mt76x2_dev *dev);
-void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev);
-void mt76x2_dfs_set_domain(struct mt76x2_dev *dev,
- enum nl80211_dfs_regions region);
-
-#endif /* __MT76x2_DFS_H */
+#endif /* __MT76x02_DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
new file mode 100644
index 000000000000..6394010a565f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x02_DMA_H
+#define __MT76x02_DMA_H
+
+#include "mt76x02.h"
+#include "dma.h"
+
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_INFO_TX_BURST BIT(17)
+#define MT_TXD_INFO_80211 BIT(19)
+#define MT_TXD_INFO_TSO BIT(20)
+#define MT_TXD_INFO_CSO BIT(21)
+#define MT_TXD_INFO_WIV BIT(24)
+#define MT_TXD_INFO_QSEL GENMASK(26, 25)
+#define MT_TXD_INFO_DPORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
+#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
+
+/* MCU request message header */
+#define MT_MCU_MSG_LEN GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
+#define MT_MCU_MSG_PORT GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD BIT(30)
+
+#define MT_RX_HEADROOM 32
+#define MT76X02_RX_RING_SIZE 256
+
+enum dma_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
+static inline bool
+mt76x02_wait_for_wpdma(struct mt76_dev *dev, int timeout)
+{
+ return __mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+ 0, timeout);
+}
+
+int mt76x02_dma_init(struct mt76x02_dev *dev);
+void mt76x02_dma_disable(struct mt76x02_dev *dev);
+void mt76x02_dma_cleanup(struct mt76x02_dev *dev);
+
+#endif /* __MT76x02_DMA_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
new file mode 100644
index 000000000000..9390de2a323e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+
+#include "mt76x02_eeprom.h"
+
+static int
+mt76x02_efuse_read(struct mt76x02_dev *dev, u16 addr, u8 *data,
+ enum mt76x02_eeprom_modes mode)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_MODE, mode);
+ val |= MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ udelay(2);
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
+ int len, enum mt76x02_eeprom_modes mode)
+{
+ int ret, i;
+
+ for (i = 0; i + 16 <= len; i += 16) {
+ ret = mt76x02_efuse_read(dev, base + i, buf + i, mode);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_efuse_data);
+
+void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev)
+{
+ u16 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+ switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
+ case BOARD_TYPE_5GHZ:
+ dev->mt76.cap.has_5ghz = true;
+ break;
+ case BOARD_TYPE_2GHZ:
+ dev->mt76.cap.has_2ghz = true;
+ break;
+ default:
+ dev->mt76.cap.has_2ghz = true;
+ dev->mt76.cap.has_5ghz = true;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_eeprom_parse_hw_cap);
+
+bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band)
+{
+ u16 conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+ if (band == NL80211_BAND_5GHZ)
+ return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G);
+ else
+ return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
+}
+EXPORT_SYMBOL_GPL(mt76x02_ext_pa_enabled);
+
+void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
+ u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g)
+{
+ u16 val;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_LNA_GAIN);
+ *lna_2g = val & 0xff;
+ lna_5g[0] = val >> 8;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1);
+ lna_5g[1] = val >> 8;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1);
+ lna_5g[2] = val >> 8;
+
+ if (!mt76x02_field_valid(lna_5g[1]))
+ lna_5g[1] = lna_5g[0];
+
+ if (!mt76x02_field_valid(lna_5g[2]))
+ lna_5g[2] = lna_5g[0];
+
+ if (band == NL80211_BAND_2GHZ)
+ *rssi_offset = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0);
+ else
+ *rssi_offset = mt76x02_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0);
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_rx_gain);
+
+u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
+ s8 *lna_2g, s8 *lna_5g,
+ struct ieee80211_channel *chan)
+{
+ u16 val;
+ u8 lna;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
+ *lna_2g = 0;
+ if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
+ memset(lna_5g, 0, sizeof(s8) * 3);
+
+ if (chan->band == NL80211_BAND_2GHZ)
+ lna = *lna_2g;
+ else if (chan->hw_value <= 64)
+ lna = lna_5g[0];
+ else if (chan->hw_value <= 128)
+ lna = lna_5g[1];
+ else
+ lna = lna_5g[2];
+
+ return lna != 0xff ? lna : 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_lna_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
index 0f3e4d2f4fee..b3ec74835d10 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -14,18 +15,21 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef __MT76x2_EEPROM_H
-#define __MT76x2_EEPROM_H
+#ifndef __MT76x02_EEPROM_H
+#define __MT76x02_EEPROM_H
-#include "mt76x2.h"
+#include "mt76x02.h"
-enum mt76x2_eeprom_field {
+enum mt76x02_eeprom_field {
MT_EE_CHIP_ID = 0x000,
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
MT_EE_PCI_ID = 0x00A,
MT_EE_NIC_CONF_0 = 0x034,
MT_EE_NIC_CONF_1 = 0x036,
+ MT_EE_COUNTRY_REGION_5GHZ = 0x038,
+ MT_EE_COUNTRY_REGION_2GHZ = 0x039,
+ MT_EE_FREQ_OFFSET = 0x03a,
MT_EE_NIC_CONF_2 = 0x042,
MT_EE_XTAL_TRIM_1 = 0x03a,
@@ -34,8 +38,10 @@ enum mt76x2_eeprom_field {
MT_EE_LNA_GAIN = 0x044,
MT_EE_RSSI_OFFSET_2G_0 = 0x046,
MT_EE_RSSI_OFFSET_2G_1 = 0x048,
+ MT_EE_LNA_GAIN_5GHZ_1 = 0x049,
MT_EE_RSSI_OFFSET_5G_0 = 0x04a,
MT_EE_RSSI_OFFSET_5G_1 = 0x04c,
+ MT_EE_LNA_GAIN_5GHZ_2 = 0x04d,
MT_EE_TX_POWER_DELTA_BW40 = 0x050,
MT_EE_TX_POWER_DELTA_BW80 = 0x052,
@@ -68,6 +74,17 @@ enum mt76x2_eeprom_field {
MT_EE_TX_POWER_VHT_MCS4 = 0x0bc,
MT_EE_TX_POWER_VHT_MCS8 = 0x0be,
+ MT_EE_2G_TARGET_POWER = 0x0d0,
+ MT_EE_TEMP_OFFSET = 0x0d1,
+ MT_EE_5G_TARGET_POWER = 0x0d2,
+ MT_EE_TSSI_BOUND1 = 0x0d4,
+ MT_EE_TSSI_BOUND2 = 0x0d6,
+ MT_EE_TSSI_BOUND3 = 0x0d8,
+ MT_EE_TSSI_BOUND4 = 0x0da,
+ MT_EE_FREQ_OFFSET_COMPENSATION = 0x0db,
+ MT_EE_TSSI_BOUND5 = 0x0dc,
+ MT_EE_TX_POWER_BYRATE_BASE = 0x0de,
+
MT_EE_RF_TEMP_COMP_SLOPE_5G = 0x0f2,
MT_EE_RF_TEMP_COMP_SLOPE_2G = 0x0f4,
@@ -81,13 +98,21 @@ enum mt76x2_eeprom_field {
MT_EE_BT_VCDL_CALIBRATION = 0x13c,
MT_EE_BT_PMUCFG = 0x13e,
+ MT_EE_USAGE_MAP_START = 0x1e0,
+ MT_EE_USAGE_MAP_END = 0x1fc,
+
__MT_EE_MAX
};
+#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
#define MT_EE_NIC_CONF_0_PA_INT_2G BIT(8)
#define MT_EE_NIC_CONF_0_PA_INT_5G BIT(9)
+#define MT_EE_NIC_CONF_0_PA_IO_CURRENT BIT(10)
#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
@@ -100,93 +125,68 @@ enum mt76x2_eeprom_field {
#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
-enum mt76x2_board_type {
- BOARD_TYPE_2GHZ = 1,
- BOARD_TYPE_5GHZ = 2,
-};
+#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
+ MT_EE_USAGE_MAP_START + 1)
-enum mt76x2_cal_channel_group {
- MT_CH_5G_JAPAN,
- MT_CH_5G_UNII_1,
- MT_CH_5G_UNII_2,
- MT_CH_5G_UNII_2E_1,
- MT_CH_5G_UNII_2E_2,
- MT_CH_5G_UNII_3,
- __MT_CH_MAX
+enum mt76x02_eeprom_modes {
+ MT_EE_READ,
+ MT_EE_PHYSICAL_READ,
};
-struct mt76x2_tx_power_info {
- u8 target_power;
-
- s8 delta_bw40;
- s8 delta_bw80;
-
- struct {
- s8 tssi_slope;
- s8 tssi_offset;
- s8 target_power;
- s8 delta;
- } chain[MT_MAX_CHAINS];
+enum mt76x02_board_type {
+ BOARD_TYPE_2GHZ = 1,
+ BOARD_TYPE_5GHZ = 2,
};
-struct mt76x2_temp_comp {
- u8 temp_25_ref;
- int lower_bound; /* J */
- int upper_bound; /* J */
- unsigned int high_slope; /* J / dB */
- unsigned int low_slope; /* J / dB */
-};
+static inline bool mt76x02_field_valid(u8 val)
+{
+ return val != 0 && val != 0xff;
+}
static inline int
-mt76x2_eeprom_get(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field)
+mt76x02_sign_extend(u32 val, unsigned int size)
{
- if ((field & 1) || field >= __MT_EE_MAX)
- return -1;
+ bool sign = val & BIT(size - 1);
- return get_unaligned_le16(dev->mt76.eeprom.data + field);
+ val &= BIT(size - 1) - 1;
+
+ return sign ? val : -val;
}
-void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
- struct ieee80211_channel *chan);
-int mt76x2_get_max_rate_power(struct mt76_rate_power *r);
-void mt76x2_get_power_info(struct mt76x2_dev *dev,
- struct mt76x2_tx_power_info *t,
- struct ieee80211_channel *chan);
-int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
-bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
-void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
-void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev);
-
-static inline bool
-mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
+static inline int
+mt76x02_sign_extend_optional(u32 val, unsigned int size)
{
- u16 val;
-
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
- if (!(val & BIT(15)))
- return false;
+ bool enable = val & BIT(size);
- return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
- MT_EE_NIC_CONF_1_TEMP_TX_ALC;
+ return enable ? mt76x02_sign_extend(val, size) : 0;
}
-static inline bool
-mt76x2_tssi_enabled(struct mt76x2_dev *dev)
+static inline s8 mt76x02_rate_power_val(u8 val)
{
- return !mt76x2_temp_tx_alc_enabled(dev) &&
- (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
- MT_EE_NIC_CONF_1_TX_ALC_EN);
+ if (!mt76x02_field_valid(val))
+ return 0;
+
+ return mt76x02_sign_extend_optional(val, 7);
}
-static inline bool
-mt76x2_has_ext_lna(struct mt76x2_dev *dev)
+static inline int
+mt76x02_eeprom_get(struct mt76x02_dev *dev,
+ enum mt76x02_eeprom_field field)
{
- u32 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if ((field & 1) || field >= __MT_EE_MAX)
+ return -1;
- if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
- return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
- else
- return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
+ return get_unaligned_le16(dev->mt76.eeprom.data + field);
}
-#endif
+bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band);
+int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
+ int len, enum mt76x02_eeprom_modes mode);
+void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
+ u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g);
+u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
+ s8 *lna_2g, s8 *lna_5g,
+ struct ieee80211_channel *chan);
+void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev);
+
+#endif /* __MT76x02_EEPROM_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 6542644bc325..10578e4cb269 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -15,222 +15,11 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include "mt76x2.h"
+#include "mt76x02.h"
+#include "mt76x02_trace.h"
-void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
-{
- bool stopped = false;
- u32 rts_cfg;
- int i;
-
- mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
-
- rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
- mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
-
- /* Wait for MAC to become idle */
- for (i = 0; i < 300; i++) {
- if ((mt76_rr(dev, MT_MAC_STATUS) &
- (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
- mt76_rr(dev, MT_BBP(IBI, 12))) {
- udelay(1);
- continue;
- }
-
- stopped = true;
- break;
- }
-
- if (force && !stopped) {
- mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
- mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
-
- mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
- mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
- }
-
- mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
-
-bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
- struct mt76x2_tx_status *stat)
-{
- u32 stat1, stat2;
-
- stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
- stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
-
- stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
- if (!stat->valid)
- return false;
-
- stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
- stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
- stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
- stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
- stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
-
- stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
- stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
-
- return true;
-}
-EXPORT_SYMBOL_GPL(mt76x2_mac_load_tx_status);
-
-static int
-mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
- enum nl80211_band band)
-{
- u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
-
- txrate->idx = 0;
- txrate->flags = 0;
- txrate->count = 1;
-
- switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
- case MT_PHY_TYPE_OFDM:
- if (band == NL80211_BAND_2GHZ)
- idx += 4;
-
- txrate->idx = idx;
- return 0;
- case MT_PHY_TYPE_CCK:
- if (idx >= 8)
- idx -= 8;
-
- txrate->idx = idx;
- return 0;
- case MT_PHY_TYPE_HT_GF:
- txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* fall through */
- case MT_PHY_TYPE_HT:
- txrate->flags |= IEEE80211_TX_RC_MCS;
- txrate->idx = idx;
- break;
- case MT_PHY_TYPE_VHT:
- txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
- txrate->idx = idx;
- break;
- default:
- return -EINVAL;
- }
-
- switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
- case MT_PHY_BW_20:
- break;
- case MT_PHY_BW_40:
- txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- break;
- case MT_PHY_BW_80:
- txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
- break;
- default:
- return -EINVAL;
- }
-
- if (rate & MT_RXWI_RATE_SGI)
- txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
-
- return 0;
-}
-
-static void
-mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
- struct ieee80211_tx_info *info,
- struct mt76x2_tx_status *st, int n_frames)
-{
- struct ieee80211_tx_rate *rate = info->status.rates;
- int cur_idx, last_rate;
- int i;
-
- if (!n_frames)
- return;
-
- last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
- mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
- dev->mt76.chandef.chan->band);
- if (last_rate < IEEE80211_TX_MAX_RATES - 1)
- rate[last_rate + 1].idx = -1;
-
- cur_idx = rate[last_rate].idx + last_rate;
- for (i = 0; i <= last_rate; i++) {
- rate[i].flags = rate[last_rate].flags;
- rate[i].idx = max_t(int, 0, cur_idx - i);
- rate[i].count = 1;
- }
- rate[last_rate].count = st->retry + 1 - last_rate;
-
- info->status.ampdu_len = n_frames;
- info->status.ampdu_ack_len = st->success ? n_frames : 0;
-
- if (st->pktid & MT_TXWI_PKTID_PROBE)
- info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
-
- if (st->aggr)
- info->flags |= IEEE80211_TX_CTL_AMPDU |
- IEEE80211_TX_STAT_AMPDU;
-
- if (!st->ack_req)
- info->flags |= IEEE80211_TX_CTL_NO_ACK;
- else if (st->success)
- info->flags |= IEEE80211_TX_STAT_ACK;
-}
-
-void mt76x2_send_tx_status(struct mt76x2_dev *dev,
- struct mt76x2_tx_status *stat, u8 *update)
-{
- struct ieee80211_tx_info info = {};
- struct ieee80211_sta *sta = NULL;
- struct mt76_wcid *wcid = NULL;
- struct mt76x2_sta *msta = NULL;
-
- rcu_read_lock();
- if (stat->wcid < ARRAY_SIZE(dev->wcid))
- wcid = rcu_dereference(dev->wcid[stat->wcid]);
-
- if (wcid) {
- void *priv;
-
- priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
- sta = container_of(priv, struct ieee80211_sta,
- drv_priv);
- }
-
- if (msta && stat->aggr) {
- u32 stat_val, stat_cache;
-
- stat_val = stat->rate;
- stat_val |= ((u32) stat->retry) << 16;
- stat_cache = msta->status.rate;
- stat_cache |= ((u32) msta->status.retry) << 16;
-
- if (*update == 0 && stat_val == stat_cache &&
- stat->wcid == msta->status.wcid && msta->n_frames < 32) {
- msta->n_frames++;
- goto out;
- }
-
- mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
- msta->n_frames);
-
- msta->status = *stat;
- msta->n_frames = 1;
- *update = 0;
- } else {
- mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
- *update = 1;
- }
-
- ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
-
-out:
- rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(mt76x2_send_tx_status);
-
-static enum mt76x2_cipher_type
-mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+enum mt76x02_cipher_type
+mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
{
memset(key_data, 0, 32);
if (!key)
@@ -254,15 +43,16 @@ mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
return MT_CIPHER_NONE;
}
}
+EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info);
-int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
- struct ieee80211_key_conf *key)
+int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
+ u8 key_idx, struct ieee80211_key_conf *key)
{
- enum mt76x2_cipher_type cipher;
+ enum mt76x02_cipher_type cipher;
u8 key_data[32];
u32 val;
- cipher = mt76x2_mac_get_key_info(key, key_data);
+ cipher = mt76x02_mac_get_key_info(key, key_data);
if (cipher == MT_CIPHER_NONE && key)
return -EOPNOTSUPP;
@@ -276,21 +66,21 @@ int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
return 0;
}
-EXPORT_SYMBOL_GPL(mt76x2_mac_shared_key_setup);
+EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
-int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
- struct ieee80211_key_conf *key)
+int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
{
- enum mt76x2_cipher_type cipher;
+ enum mt76x02_cipher_type cipher;
u8 key_data[32];
u8 iv_data[8];
- cipher = mt76x2_mac_get_key_info(key, key_data);
+ cipher = mt76x02_mac_get_key_info(key, key_data);
if (cipher == MT_CIPHER_NONE && key)
return -EOPNOTSUPP;
- mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+ mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
memset(iv_data, 0, sizeof(iv_data));
if (key) {
@@ -305,11 +95,70 @@ int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
return 0;
}
-EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_key);
+EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key);
+
+void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
+ u8 vif_idx, u8 *mac)
+{
+ struct mt76_wcid_addr addr = {};
+ u32 attr;
+
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+
+ if (idx >= 128)
+ return;
+
+ if (mac)
+ memcpy(addr.macaddr, mac, ETH_ALEN);
+
+ mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
+
+void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
+{
+ u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+ u32 bit = MT_WCID_DROP_MASK(idx);
+
+ /* prevent unnecessary writes */
+ if ((val & bit) != (bit * drop))
+ mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop);
+
+void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq)
+{
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return;
+
+ mtxq = (struct mt76_txq *) txq->drv_priv;
+ if (txq->sta) {
+ struct mt76x02_sta *sta;
+
+ sta = (struct mt76x02_sta *) txq->sta->drv_priv;
+ mtxq->wcid = &sta->wcid;
+ } else {
+ struct mt76x02_vif *mvif;
+
+ mvif = (struct mt76x02_vif *) txq->vif->drv_priv;
+ mtxq->wcid = &mvif->group_wcid;
+ }
+
+ mt76_txq_init(&dev->mt76, txq);
+}
+EXPORT_SYMBOL_GPL(mt76x02_txq_init);
static __le16
-mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
- const struct ieee80211_tx_rate *rate, u8 *nss_val)
+mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
{
u16 rateval;
u8 phy, rate_idx;
@@ -337,7 +186,7 @@ mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
int band = dev->mt76.chandef.chan->band;
u16 val;
- r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
val = r->hw_value_short;
else
@@ -358,29 +207,110 @@ mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
return cpu_to_le16(rateval);
}
-void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
- const struct ieee80211_tx_rate *rate)
+void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
{
spin_lock_bh(&dev->mt76.lock);
- wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
wcid->tx_rate_set = true;
spin_unlock_bh(&dev->mt76.lock);
}
-EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_rate);
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta, int len)
+bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat)
{
+ u32 stat1, stat2;
+
+ stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+ stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+
+ stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
+ if (!stat->valid)
+ return false;
+
+ stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+ stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+ stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+ stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+ stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+
+ stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+ stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_load_tx_status);
+
+static int
+mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+ enum nl80211_band band)
+{
+ u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (band == NL80211_BAND_2GHZ)
+ idx += 4;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return 0;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ case MT_PHY_TYPE_VHT:
+ txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case MT_PHY_BW_80:
+ txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ return 0;
+}
+
+void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int len)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
struct ieee80211_key_conf *key = info->control.hw_key;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
u16 txwi_flags = 0;
u8 nss;
s8 txpwr_adj, max_txpwr_adj;
- u8 ccmp_pn[8];
+ u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf;
memset(txwi, 0, sizeof(*txwi));
@@ -411,22 +341,22 @@ void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
max_txpwr_adj = wcid->max_txpwr_adj;
nss = wcid->tx_rate_nss;
} else {
- txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
- max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
+ txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
+ max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
}
spin_unlock_bh(&dev->mt76.lock);
- txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
- max_txpwr_adj);
+ txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
+ max_txpwr_adj);
txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
- if (mt76xx_rev(dev) >= MT76XX_REV_E4)
+ if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
txwi->txstream = 0x13;
- else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
+ else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
!(txwi->rate & cpu_to_le16(rate_ht_mask)))
txwi->txstream = 0x93;
- if (info->flags & IEEE80211_TX_CTL_LDPC)
+ if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
@@ -459,44 +389,104 @@ void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
txwi->flags |= cpu_to_le16(txwi_flags);
txwi->len_ctl = cpu_to_le16(len);
}
-EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi);
+EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
-void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
+static void
+mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev,
+ struct ieee80211_tx_info *info,
+ struct mt76x02_tx_status *st, int n_frames)
{
- u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
- u32 bit = MT_WCID_DROP_MASK(idx);
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
- /* prevent unnecessary writes */
- if ((val & bit) != (bit * drop))
- mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+ if (!n_frames)
+ return;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76x02_mac_process_tx_rate(&rate[last_rate], st->rate,
+ dev->mt76.chandef.chan->band);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + last_rate;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+ rate[last_rate].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = n_frames;
+ info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+ if (st->pktid & MT_TXWI_PKTID_PROBE)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
}
-EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_drop);
-void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+void mt76x02_send_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat, u8 *update)
{
- struct mt76_wcid_addr addr = {};
- u32 attr;
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ struct mt76x02_sta *msta = NULL;
- attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
- FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
+ wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
- mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+ if (wcid) {
+ void *priv;
- mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
- mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+ priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
+ sta = container_of(priv, struct ieee80211_sta,
+ drv_priv);
+ }
- if (idx >= 128)
- return;
+ if (msta && stat->aggr) {
+ u32 stat_val, stat_cache;
- if (mac)
- memcpy(addr.macaddr, mac, ETH_ALEN);
+ stat_val = stat->rate;
+ stat_val |= ((u32) stat->retry) << 16;
+ stat_cache = msta->status.rate;
+ stat_cache |= ((u32) msta->status.retry) << 16;
- mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+ if (*update == 0 && stat_val == stat_cache &&
+ stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+ msta->n_frames++;
+ goto out;
+ }
+
+ mt76x02_mac_fill_tx_status(dev, &info, &msta->status,
+ msta->n_frames);
+
+ msta->status = *stat;
+ msta->n_frames = 1;
+ *update = 0;
+ } else {
+ mt76x02_mac_fill_tx_status(dev, &info, stat, 1);
+ *update = 1;
+ }
+
+ ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
+
+out:
+ rcu_read_unlock();
}
-EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_setup);
+EXPORT_SYMBOL_GPL(mt76x02_send_tx_status);
-static int
-mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
+int
+mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
{
u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
@@ -561,22 +551,30 @@ mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
return 0;
}
+EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate);
-static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
+void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr)
{
- int hdrlen;
+ ether_addr_copy(dev->mt76.macaddr, addr);
- if (!len)
- return;
+ if (!is_valid_ether_addr(dev->mt76.macaddr)) {
+ eth_random_addr(dev->mt76.macaddr);
+ dev_info(dev->mt76.dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->mt76.macaddr);
+ }
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- memmove(skb->data + len, skb->data, hdrlen);
- skb_pull(skb, len);
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1,
+ get_unaligned_le16(dev->mt76.macaddr + 4) |
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
}
+EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
-int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
+static int
+mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain)
{
- struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
+ struct mt76x02_rx_freq_cal *cal = &dev->cal.rx;
rssi += cal->rssi_offset[chain];
rssi -= cal->lna_gain;
@@ -584,46 +582,19 @@ int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
return rssi;
}
-static struct mt76x2_sta *
-mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
-{
- struct mt76_wcid *wcid;
-
- if (idx >= ARRAY_SIZE(dev->wcid))
- return NULL;
-
- wcid = rcu_dereference(dev->wcid[idx]);
- if (!wcid)
- return NULL;
-
- return container_of(wcid, struct mt76x2_sta, wcid);
-}
-
-static struct mt76_wcid *
-mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta,
- bool unicast)
-{
- if (!sta)
- return NULL;
-
- if (unicast)
- return &sta->wcid;
- else
- return &sta->vif->group_wcid;
-}
-
-int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
- void *rxi)
+int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
+ void *rxi)
{
struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
- struct mt76x2_rxwi *rxwi = rxi;
- struct mt76x2_sta *sta;
+ struct mt76x02_rxwi *rxwi = rxi;
+ struct mt76x02_sta *sta;
u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
u32 ctl = le32_to_cpu(rxwi->ctl);
u16 rate = le16_to_cpu(rxwi->rate);
u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
- int pad_len = 0;
+ int i, pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
+ s8 signal;
u8 pn_len;
u8 wcid;
int len;
@@ -642,8 +613,8 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
}
wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
- sta = mt76x2_rx_get_sta(dev, wcid);
- status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
+ sta = mt76x02_rx_get_sta(&dev->mt76, wcid);
+ status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast);
len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
@@ -670,7 +641,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
}
}
- mt76x2_remove_hdr_pad(skb, pad_len);
+ mt76x02_remove_hdr_pad(skb, pad_len);
if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
status->aggr = true;
@@ -679,10 +650,17 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
return -EINVAL;
pskb_trim(skb, len);
- status->chains = BIT(0) | BIT(1);
- status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0);
- status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1);
- status->signal = max(status->chain_signal[0], status->chain_signal[1]);
+
+ status->chains = BIT(0);
+ signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
+ for (i = 1; i < nstreams; i++) {
+ status->chains |= BIT(i);
+ status->chain_signal[i] = mt76x02_mac_get_rssi(dev,
+ rxwi->rssi[i],
+ i);
+ signal = max_t(s8, signal, status->chain_signal[i]);
+ }
+ status->signal = signal;
status->freq = dev->mt76.chandef.chan->center_freq;
status->band = dev->mt76.chandef.chan->band;
@@ -694,6 +672,66 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
sta->inactive_count = 0;
}
- return mt76x2_mac_process_rate(status, rate);
+ return mt76x02_mac_process_rate(status, rate);
+}
+
+void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
+{
+ struct mt76x02_tx_status stat = {};
+ unsigned long flags;
+ u8 update = 1;
+ bool ret;
+
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ return;
+
+ trace_mac_txstat_poll(dev);
+
+ while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
+ spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
+ ret = mt76x02_mac_load_tx_status(dev, &stat);
+ spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
+
+ if (!ret)
+ break;
+
+ trace_mac_txstat_fetch(dev, &stat);
+
+ if (!irq) {
+ mt76x02_send_tx_status(dev, &stat, &update);
+ continue;
+ }
+
+ kfifo_put(&dev->txstatus_fifo, stat);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_poll_tx_status);
+
+static void
+mt76x02_mac_queue_txdone(struct mt76x02_dev *dev, struct sk_buff *skb,
+ void *txwi_ptr)
+{
+ struct mt76x02_tx_info *txi = mt76x02_skb_tx_info(skb);
+ struct mt76x02_txwi *txwi = txwi_ptr;
+
+ mt76x02_mac_poll_tx_status(dev, false);
+
+ txi->tries = 0;
+ txi->jiffies = jiffies;
+ txi->wcid = txwi->wcid;
+ txi->pktid = txwi->pktid;
+ trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
+ mt76x02_tx_complete(&dev->mt76, skb);
+}
+
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+ if (e->txwi)
+ mt76x02_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
+ else
+ dev_kfree_skb_any(e->skb);
}
-EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx);
+EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 5af0107ba748..d99c18743969 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -14,17 +15,14 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef __MT76x2_MAC_H
-#define __MT76x2_MAC_H
+#ifndef __MT76X02_MAC_H
+#define __MT76X02_MAC_H
-#include "mt76.h"
+#include <linux/average.h>
-struct mt76x2_dev;
-struct mt76x2_sta;
-struct mt76x2_vif;
-struct mt76x2_txwi;
+struct mt76x02_dev;
-struct mt76x2_tx_status {
+struct mt76x02_tx_status {
u8 valid:1;
u8 success:1;
u8 aggr:1;
@@ -35,7 +33,16 @@ struct mt76x2_tx_status {
u16 rate;
} __packed __aligned(2);
-struct mt76x2_tx_info {
+#define MT_VIF_WCID(_n) (254 - ((_n) & 7))
+#define MT_MAX_VIFS 8
+
+struct mt76x02_vif {
+ u8 idx;
+
+ struct mt76_wcid group_wcid;
+};
+
+struct mt76x02_tx_info {
unsigned long jiffies;
u8 tries;
@@ -44,17 +51,17 @@ struct mt76x2_tx_info {
u8 retry;
};
-struct mt76x2_rxwi {
- __le32 rxinfo;
-
- __le32 ctl;
+DECLARE_EWMA(signal, 10, 8);
- __le16 tid_sn;
- __le16 rate;
+struct mt76x02_sta {
+ struct mt76_wcid wcid; /* must be first */
- u8 rssi[4];
+ struct mt76x02_vif *vif;
+ struct mt76x02_tx_status status;
+ int n_frames;
- __le32 bbp_rxinfo[4];
+ struct ewma_signal rssi;
+ int inactive_count;
};
#define MT_RXINFO_BA BIT(0)
@@ -108,6 +115,19 @@ struct mt76x2_rxwi {
#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0)
#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4)
+struct mt76x02_rxwi {
+ __le32 rxinfo;
+
+ __le32 ctl;
+
+ __le16 tid_sn;
+ __le16 rate;
+
+ u8 rssi[4];
+
+ __le32 bbp_rxinfo[4];
+};
+
#define MT_TX_PWR_ADJ GENMASK(3, 0)
enum mt76x2_phy_bandwidth {
@@ -135,7 +155,7 @@ enum mt76x2_phy_bandwidth {
#define MT_TXWI_PKTID_PROBE BIT(7)
-struct mt76x2_txwi {
+struct mt76x02_txwi {
__le16 flags;
__le16 rate;
u8 ack_ctl;
@@ -149,41 +169,61 @@ struct mt76x2_txwi {
u8 pktid;
} __packed __aligned(4);
-static inline struct mt76x2_tx_info *
-mt76x2_skb_tx_info(struct sk_buff *skb)
+static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- return (void *) info->status.status_driver_data;
+ const u32 MAC_CSR0 = 0x1000;
+ int i;
+
+ for (i = 0; i < 500; i++) {
+ if (test_bit(MT76_REMOVED, &dev->state))
+ return false;
+
+ switch (dev->bus->rr(dev, MAC_CSR0)) {
+ case 0:
+ case ~0:
+ break;
+ default:
+ return true;
+ }
+ usleep_range(5000, 10000);
+ }
+ return false;
}
-int mt76x2_mac_start(struct mt76x2_dev *dev);
-void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force);
-void mt76x2_mac_resume(struct mt76x2_dev *dev);
-void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr);
-
-int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
- void *rxi);
-void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta, int len);
-void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
-int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
- struct ieee80211_key_conf *key);
-void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
- const struct ieee80211_tx_rate *rate);
-void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop);
-
-int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
- struct ieee80211_key_conf *key);
-
-int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
- struct sk_buff *skb);
-void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val);
-
-void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq);
-void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev);
+static inline struct mt76x02_tx_info *
+mt76x02_skb_tx_info(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-void mt76x2_mac_work(struct work_struct *work);
+ return (void *)info->status.status_driver_data;
+}
+void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq);
+enum mt76x02_cipher_type
+mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data);
+
+int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
+ u8 key_idx, struct ieee80211_key_conf *key);
+int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
+ u8 *mac);
+void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
+void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat);
+void mt76x02_send_tx_status(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat, u8 *update);
+int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
+ void *rxi);
+int
+mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate);
+void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr);
+void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int len);
+void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
new file mode 100644
index 000000000000..1b853bb723fb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x02_mcu.h"
+
+struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+ memcpy(skb_put(skb, len), data, len);
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc);
+
+static struct sk_buff *
+mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires)
+{
+ unsigned long timeout;
+
+ if (!time_is_after_jiffies(expires))
+ return NULL;
+
+ timeout = expires - jiffies;
+ wait_event_timeout(dev->mt76.mmio.mcu.wait,
+ !skb_queue_empty(&dev->mt76.mmio.mcu.res_q),
+ timeout);
+ return skb_dequeue(&dev->mt76.mmio.mcu.res_q);
+}
+
+static int
+mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, int cmd, int seq)
+{
+ struct mt76_queue *q = &dev->mt76.q_tx[qid];
+ struct mt76_queue_buf buf;
+ dma_addr_t addr;
+ u32 tx_info;
+
+ tx_info = MT_MCU_MSG_TYPE_CMD |
+ FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+ FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+ FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
+
+ addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev->mt76.dev, addr))
+ return -ENOMEM;
+
+ buf.addr = addr;
+ buf.len = skb->len;
+
+ spin_lock_bh(&q->lock);
+ mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+ mt76_queue_kick(dev, q);
+ spin_unlock_bh(&q->lock);
+
+ return 0;
+}
+
+int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ unsigned long expires = jiffies + HZ;
+ int ret;
+ u8 seq;
+
+ if (!skb)
+ return -EINVAL;
+
+ mutex_lock(&mdev->mmio.mcu.mutex);
+
+ seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+
+ ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
+ if (ret)
+ goto out;
+
+ while (wait_resp) {
+ u32 *rxfce;
+ bool check_seq = false;
+
+ skb = mt76x02_mcu_get_response(dev, expires);
+ if (!skb) {
+ dev_err(mdev->dev,
+ "MCU message %d (seq %d) timed out\n", cmd,
+ seq);
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ rxfce = (u32 *) skb->cb;
+
+ if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
+ check_seq = true;
+
+ dev_kfree_skb(skb);
+ if (check_seq)
+ break;
+ }
+
+out:
+ mutex_unlock(&mdev->mmio.mcu.mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send);
+
+int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
+ enum mcu_function func,
+ u32 val, bool wait_resp)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, wait_resp);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
+
+int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
+ bool wait_resp)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 mode;
+ __le32 level;
+ } __packed __aligned(4) msg = {
+ .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
+ .level = cpu_to_le32(0),
+ };
+
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, wait_resp);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
+
+int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
+ u32 param, bool wait)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(type),
+ .value = cpu_to_le32(param),
+ };
+ int ret;
+
+ if (wait)
+ mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
+
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ ret = mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
+ if (ret)
+ return ret;
+
+ if (wait &&
+ WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
+ BIT(31), BIT(31), 100)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate);
+
+int mt76x02_mcu_cleanup(struct mt76x02_dev *dev)
+{
+ struct sk_buff *skb;
+
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
+ usleep_range(20000, 30000);
+
+ while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL)
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup);
+
+void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
+ const struct mt76x02_fw_header *h)
+{
+ u16 bld = le16_to_cpu(h->build_ver);
+ u16 ver = le16_to_cpu(h->fw_ver);
+
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%d.%d.%02d-b%x",
+ (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_ethtool_fwver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
new file mode 100644
index 000000000000..2d8fd2514570
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x02_MCU_H
+#define __MT76x02_MCU_H
+
+#include "mt76x02.h"
+
+#define MT_MCU_RESET_CTL 0x070C
+#define MT_MCU_INT_LEVEL 0x0718
+#define MT_MCU_COM_REG0 0x0730
+#define MT_MCU_COM_REG1 0x0734
+#define MT_MCU_COM_REG2 0x0738
+#define MT_MCU_COM_REG3 0x073C
+
+#define MT_INBAND_PACKET_MAX_LEN 192
+#define MT_MCU_MEMMAP_WLAN 0x410000
+
+#define MT_MCU_PCIE_REMAP_BASE4 0x074C
+
+#define MT_MCU_SEMAPHORE_00 0x07B0
+#define MT_MCU_SEMAPHORE_01 0x07B4
+#define MT_MCU_SEMAPHORE_02 0x07B8
+#define MT_MCU_SEMAPHORE_03 0x07BC
+
+#define MT_MCU_ILM_ADDR 0x80000
+
+enum mcu_cmd {
+ CMD_FUN_SET_OP = 1,
+ CMD_LOAD_CR = 2,
+ CMD_INIT_GAIN_OP = 3,
+ CMD_DYNC_VGA_OP = 6,
+ CMD_TDLS_CH_SW = 7,
+ CMD_BURST_WRITE = 8,
+ CMD_READ_MODIFY_WRITE = 9,
+ CMD_RANDOM_READ = 10,
+ CMD_BURST_READ = 11,
+ CMD_RANDOM_WRITE = 12,
+ CMD_LED_MODE_OP = 16,
+ CMD_POWER_SAVING_OP = 20,
+ CMD_WOW_CONFIG = 21,
+ CMD_WOW_QUERY = 22,
+ CMD_WOW_FEATURE = 24,
+ CMD_CARRIER_DETECT_OP = 28,
+ CMD_RADOR_DETECT_OP = 29,
+ CMD_SWITCH_CHANNEL_OP = 30,
+ CMD_CALIBRATION_OP = 31,
+ CMD_BEACON_OP = 32,
+ CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_power_mode {
+ RADIO_OFF = 0x30,
+ RADIO_ON = 0x31,
+ RADIO_OFF_AUTO_WAKEUP = 0x32,
+ RADIO_OFF_ADVANCE = 0x33,
+ RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_function {
+ Q_SELECT = 1,
+ BW_SETTING = 2,
+ USB2_SW_DISCONNECT = 2,
+ USB3_SW_DISCONNECT = 3,
+ LOG_FW_DEBUG_MSG = 4,
+ GET_FW_VERSION = 5,
+};
+
+struct mt76x02_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76x02_patch_header {
+ char build_time[16];
+ char platform[4];
+ char hw_version[4];
+ char patch_version[4];
+ u8 pad[2];
+};
+
+int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
+int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
+ u32 param, bool wait);
+struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len);
+int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, bool wait_resp);
+int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
+ enum mcu_function func,
+ u32 val, bool wait_resp);
+int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
+ bool wait_resp);
+void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
+ const struct mt76x02_fw_header *h);
+
+#endif /* __MT76x02_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
new file mode 100644
index 000000000000..39f092034240
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+
+#include "mt76x02.h"
+#include "mt76x02_trace.h"
+
+static int
+mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc)
+{
+ int ret;
+
+ q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->hw_idx = idx;
+
+ ret = mt76_queue_alloc(dev, q);
+ if (ret)
+ return ret;
+
+ mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
+
+ return 0;
+}
+
+static int
+mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize)
+{
+ int ret;
+
+ q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->buf_size = bufsize;
+
+ ret = mt76_queue_alloc(dev, q);
+ if (ret)
+ return ret;
+
+ mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
+
+ return 0;
+}
+
+static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
+{
+ struct mt76x02_tx_status stat;
+ u8 update = 1;
+
+ while (kfifo_get(&dev->txstatus_fifo, &stat))
+ mt76x02_send_tx_status(dev, &stat, &update);
+}
+
+static void mt76x02_tx_tasklet(unsigned long data)
+{
+ struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
+ int i;
+
+ mt76x02_process_tx_status_fifo(dev);
+
+ for (i = MT_TXQ_MCU; i >= 0; i--)
+ mt76_queue_tx_cleanup(dev, i, false);
+
+ mt76x02_mac_poll_tx_status(dev, false);
+ mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
+}
+
+int mt76x02_dma_init(struct mt76x02_dev *dev)
+{
+ struct mt76_txwi_cache __maybe_unused *t;
+ int i, ret, fifo_size;
+ struct mt76_queue *q;
+ void *status_fifo;
+
+ BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi));
+ BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
+
+ fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
+ status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
+ if (!status_fifo)
+ return -ENOMEM;
+
+ tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev);
+ kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+
+ mt76_dma_attach(&dev->mt76);
+
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i],
+ mt76_ac_to_hwq(i),
+ MT_TX_RING_SIZE);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+ MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+ MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+ q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
+ ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
+ MT_RX_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ return mt76_init_queues(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_init);
+
+void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt76x02_dev *dev;
+
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
+ mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
+
+irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
+{
+ struct mt76x02_dev *dev = dev_instance;
+ u32 intr;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return IRQ_NONE;
+
+ trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask);
+
+ intr &= dev->mt76.mmio.irqmask;
+
+ if (intr & MT_INT_TX_DONE_ALL) {
+ mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ tasklet_schedule(&dev->tx_tasklet);
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
+ mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
+ mt76x02_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+
+ if (intr & MT_INT_PRE_TBTT)
+ tasklet_schedule(&dev->pre_tbtt_tasklet);
+
+ /* send buffered multicast frames now */
+ if (intr & MT_INT_TBTT)
+ mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+
+ if (intr & MT_INT_TX_STAT) {
+ mt76x02_mac_poll_tx_status(dev, true);
+ tasklet_schedule(&dev->tx_tasklet);
+ }
+
+ if (intr & MT_INT_GPTIMER) {
+ mt76x02_irq_disable(dev, MT_INT_GPTIMER);
+ tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
+
+void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
+ dev->mt76.mmio.irqmask &= ~clear;
+ dev->mt76.mmio.irqmask |= set;
+ mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
+ spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask);
+
+static void mt76x02_dma_enable(struct mt76x02_dev *dev)
+{
+ u32 val;
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+ mt76x02_wait_for_wpdma(&dev->mt76, 1000);
+ usleep_range(50, 100);
+
+ val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN;
+ mt76_set(dev, MT_WPDMA_GLO_CFG, val);
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_enable);
+
+void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
+{
+ tasklet_kill(&dev->tx_tasklet);
+ mt76_dma_cleanup(&dev->mt76);
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
+
+void mt76x02_dma_disable(struct mt76x02_dev *dev)
+{
+ u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+
+ val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
+ MT_WPDMA_GLO_CFG_BIG_ENDIAN |
+ MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
+ val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
+ mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+}
+EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
+
+void mt76x02_mac_start(struct mt76x02_dev *dev)
+{
+ mt76x02_dma_enable(dev);
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76x02_irq_enable(dev,
+ MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_TX_STAT);
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_start);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
new file mode 100644
index 000000000000..0f1d7b5c9f68
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include "mt76x02.h"
+#include "mt76x02_phy.h"
+
+void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
+{
+ u32 val;
+
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+ val &= ~BIT(4);
+
+ switch (dev->mt76.chainmask & 0xf) {
+ case 2:
+ val |= BIT(3);
+ break;
+ default:
+ val &= ~BIT(3);
+ break;
+ }
+
+ mt76_wr(dev, MT_BBP(AGC, 0), val);
+ mb();
+ val = mt76_rr(dev, MT_BBP(AGC, 0));
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath);
+
+void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
+{
+ int txpath;
+
+ txpath = (dev->mt76.chainmask >> 8) & 0xf;
+ switch (txpath) {
+ case 2:
+ mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
+ break;
+ default:
+ mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_txdac);
+
+static u32
+mt76x02_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+ u32 val = 0;
+
+ val |= (v1 & (BIT(6) - 1)) << 0;
+ val |= (v2 & (BIT(6) - 1)) << 8;
+ val |= (v3 & (BIT(6) - 1)) << 16;
+ val |= (v4 & (BIT(6) - 1)) << 24;
+ return val;
+}
+
+int mt76x02_get_max_rate_power(struct mt76_rate_power *r)
+{
+ s8 ret = 0;
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ ret = max(ret, r->all[i]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_max_rate_power);
+
+void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ if (r->all[i] > limit)
+ r->all[i] = limit;
+}
+EXPORT_SYMBOL_GPL(mt76x02_limit_rate_power);
+
+void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+ int i;
+
+ for (i = 0; i < sizeof(r->all); i++)
+ r->all[i] += offset;
+}
+EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset);
+
+void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
+{
+ struct mt76_rate_power *t = &dev->mt76.rate_power;
+
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+ mt76_wr(dev, MT_TX_PWR_CFG_0,
+ mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
+ t->ofdm[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_1,
+ mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
+ t->ht[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_2,
+ mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
+ t->ht[10]));
+ mt76_wr(dev, MT_TX_PWR_CFG_3,
+ mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
+ t->stbc[2]));
+ mt76_wr(dev, MT_TX_PWR_CFG_4,
+ mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
+ mt76_wr(dev, MT_TX_PWR_CFG_7,
+ mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
+ t->vht[9]));
+ mt76_wr(dev, MT_TX_PWR_CFG_8,
+ mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
+ mt76_wr(dev, MT_TX_PWR_CFG_9,
+ mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
+
+int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev)
+{
+ struct mt76x02_sta *sta;
+ struct mt76_wcid *wcid;
+ int i, j, min_rssi = 0;
+ s8 cur_rssi;
+
+ local_bh_disable();
+ rcu_read_lock();
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) {
+ unsigned long mask = dev->mt76.wcid_mask[i];
+
+ if (!mask)
+ continue;
+
+ for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+ if (!(mask & 1))
+ continue;
+
+ wcid = rcu_dereference(dev->mt76.wcid[j]);
+ if (!wcid)
+ continue;
+
+ sta = container_of(wcid, struct mt76x02_sta, wcid);
+ spin_lock(&dev->mt76.rx_lock);
+ if (sta->inactive_count++ < 5)
+ cur_rssi = ewma_signal_read(&sta->rssi);
+ else
+ cur_rssi = 0;
+ spin_unlock(&dev->mt76.rx_lock);
+
+ if (cur_rssi < min_rssi)
+ min_rssi = cur_rssi;
+ }
+ }
+
+ rcu_read_unlock();
+ local_bh_enable();
+
+ if (!min_rssi)
+ return -75;
+
+ return min_rssi;
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi);
+
+void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
+{
+ int core_val, agc_val;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_80:
+ core_val = 3;
+ agc_val = 7;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ core_val = 2;
+ agc_val = 3;
+ break;
+ default:
+ core_val = 0;
+ agc_val = 1;
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+ mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+ mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_bw);
+
+void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
+ bool primary_upper)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ case NL80211_BAND_5GHZ:
+ mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+ mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+ break;
+ }
+
+ mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+ primary_upper);
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_set_band);
+
+bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
+{
+ u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
+ bool ret = false;
+ u32 false_cca;
+
+ false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
+ dev->cal.false_cca = false_cca;
+ if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) {
+ dev->cal.agc_gain_adjust += 2;
+ ret = true;
+ } else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
+ (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) {
+ dev->cal.agc_gain_adjust -= 2;
+ ret = true;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
+
+void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
+{
+ dev->cal.agc_gain_init[0] = mt76_get_field(dev, MT_BBP(AGC, 8),
+ MT_BBP_AGC_GAIN);
+ dev->cal.agc_gain_init[1] = mt76_get_field(dev, MT_BBP(AGC, 9),
+ MT_BBP_AGC_GAIN);
+ memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
+ sizeof(dev->cal.agc_gain_cur));
+ dev->cal.low_gain = -1;
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
new file mode 100644
index 000000000000..2b316cf7c70c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x02_PHY_H
+#define __MT76x02_PHY_H
+
+#include "mt76x02_regs.h"
+
+static inline int
+mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
+{
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ return -62;
+ case NL80211_CHAN_WIDTH_40:
+ return -65;
+ default:
+ return -68;
+ }
+}
+
+static inline int
+mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
+{
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ return -76;
+ case NL80211_CHAN_WIDTH_40:
+ return -79;
+ default:
+ return -82;
+ }
+}
+
+void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset);
+void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_2);
+void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit);
+int mt76x02_get_max_rate_power(struct mt76_rate_power *r);
+void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev);
+void mt76x02_phy_set_txdac(struct mt76x02_dev *dev);
+int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev);
+void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
+void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
+ bool primary_upper);
+bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev);
+void mt76x02_init_agc_gain(struct mt76x02_dev *dev);
+
+#endif /* __MT76x02_PHY_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index 1551ea453180..f7de77d09d28 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -14,8 +14,8 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef __MT76x2_REGS_H
-#define __MT76x2_REGS_H
+#ifndef __MT76X02_REGS_H
+#define __MT76X02_REGS_H
#define MT_ASIC_VERSION 0x0000
@@ -46,6 +46,11 @@
#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+#define MT_COEXCFG3 0x004c
+
+#define MT_LDO_CTRL_0 0x006c
+#define MT_LDO_CTRL_1 0x0070
+
#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
@@ -75,6 +80,8 @@
#define MT_XO_CTRL7 0x011c
+#define MT_IOCFG_6 0x0124
+
#define MT_USB_U3DMA_CFG 0x9018
#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
@@ -156,18 +163,23 @@
#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+#define MT_WMM_CTRL 0x0230 /* MT76x0 */
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+#define MT_USB_DMA_CFG 0x0238
+
#define MT_TSO_CTRL 0x0250
#define MT_HEADER_TRANS_CTRL_REG 0x0260
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
#define MT_TX_RING_BASE 0x0300
#define MT_RX_RING_BASE 0x03c0
#define MT_TX_HW_QUEUE_MCU 8
#define MT_TX_HW_QUEUE_MGMT 9
-#define MT_US_CYC_CFG 0x02a4
-#define MT_US_CYC_CNT GENMASK(7, 0)
-
#define MT_PBF_SYS_CTRL 0x0400
#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
@@ -189,10 +201,20 @@
#define MT_BCN_OFFSET_BASE 0x041c
#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
+#define MT_RXQ_STA 0x0430
+#define MT_TXQ_STA 0x0434
+#define MT_RF_CSR_CFG 0x0500
+#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID GENMASK(14, 8)
+#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 15)
+#define MT_RF_CSR_CFG_WR BIT(30)
+#define MT_RF_CSR_CFG_KICK BIT(31)
+
#define MT_RF_BYPASS_0 0x0504
#define MT_RF_BYPASS_1 0x0508
#define MT_RF_SETTING_0 0x050c
+#define MT_RF_MISC 0x0518
#define MT_RF_DATA_WRITE 0x0524
#define MT_RF_CTRL 0x0528
@@ -203,6 +225,11 @@
#define MT_RF_DATA_READ 0x052c
+#define MT_COM_REG0 0x0730
+#define MT_COM_REG1 0x0734
+#define MT_COM_REG2 0x0738
+#define MT_COM_REG3 0x073C
+
#define MT_FCE_PSE_CTRL 0x0800
#define MT_FCE_PARAMETERS 0x0804
#define MT_FCE_CSO 0x0808
@@ -222,6 +249,7 @@
#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
#define MT_FCE_SKIP_FS 0x0a6c
@@ -250,6 +278,9 @@
#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
#define MT_MAX_LEN_CFG 0x1018
+#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12)
+
+#define MT_LED_CFG 0x102c
#define MT_AMPDU_MAX_LEN_20M1S 0x1030
#define MT_AMPDU_MAX_LEN_20M2S 0x1034
@@ -365,6 +396,8 @@
#define MT_TX_SW_CFG2 0x1338
#define MT_TXOP_CTRL_CFG 0x1340
+#define MT_TXOP_TRUN_EN GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
#define MT_TX_RTS_CFG 0x1344
#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
@@ -376,7 +409,10 @@
#define MT_TX_RETRY_CFG 0x134c
#define MT_TX_LINK_CFG 0x1350
+#define MT_VHT_HT_FBK_CFG0 0x1354
#define MT_VHT_HT_FBK_CFG1 0x1358
+#define MT_LG_FBK_CFG0 0x135c
+#define MT_LG_FBK_CFG1 0x1360
#define MT_PROT_CFG_RATE GENMASK(15, 0)
#define MT_PROT_CFG_CTRL GENMASK(17, 16)
@@ -391,6 +427,27 @@
#define MT_GF20_PROT_CFG 0x1374
#define MT_GF40_PROT_CFG 0x1378
+#define MT_PROT_RATE GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS BIT(16)
+#define MT_PROT_CTRL_CTS2SELF BIT(17)
+#define MT_PROT_NAV_SHORT BIT(18)
+#define MT_PROT_NAV_LONG BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20 BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40 BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20 BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
+#define MT_PROT_RTS_THR_EN BIT(26)
+#define MT_PROT_RATE_CCK_11 0x0003
+#define MT_PROT_RATE_OFDM_6 0x4000
+#define MT_PROT_RATE_OFDM_24 0x4004
+#define MT_PROT_RATE_DUP_OFDM_24 0x4084
+#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
+ ~MT_PROT_TXOP_ALLOW_MM40 & \
+ ~MT_PROT_TXOP_ALLOW_GF40)
+
#define MT_EXP_ACK_TIME 0x1380
#define MT_TX_PWR_CFG_0_EXT 0x1390
@@ -405,6 +462,8 @@
#define MT_TX0_RF_GAIN_CORR 0x13a0
#define MT_TX1_RF_GAIN_CORR 0x13a4
+#define MT_TX0_RF_GAIN_ATTEN 0x13a8
+#define MT_TX0_RF_GAIN_ATTEN 0x13a8 /* MT76x0 */
#define MT_TX_ALC_CFG_0 0x13b0
#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
@@ -421,6 +480,7 @@
#define MT_TX_ALC_CFG_3 0x13ac
#define MT_TX_ALC_CFG_4 0x13c0
#define MT_TX_ALC_CFG_4_LOWGAIN_CH_EN BIT(31)
+#define MT_TX0_BB_GAIN_ATTEN 0x13c0 /* MT76x0 */
#define MT_TX_ALC_VGA3 0x13c8
@@ -451,10 +511,13 @@
#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
#define MT_AUTO_RSP_CFG 0x1404
+#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
#define MT_LEGACY_BASIC_RATE 0x1408
#define MT_HT_BASIC_RATE 0x140c
#define MT_HT_CTRL_CFG 0x1410
+#define MT_RX_PARSER_CFG 0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0)
#define MT_EXT_CCA_CFG 0x141c
#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
@@ -498,7 +561,10 @@
#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
+#define MT_TX_AGG_STAT 0x171c
+
#define MT_TX_AGG_CNT_BASE0 0x1720
+#define MT_MPDU_DENSITY_CNT 0x1740
#define MT_TX_AGG_CNT_BASE1 0x174c
#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
@@ -604,7 +670,7 @@ struct mt76_wcid_key {
u8 rx_mic[8];
} __packed __aligned(4);
-enum mt76x2_cipher_type {
+enum mt76x02_cipher_type {
MT_CIPHER_NONE,
MT_CIPHER_WEP40,
MT_CIPHER_WEP104,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c
index a09f117848d6..5b42d2c87937 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c
@@ -18,6 +18,6 @@
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
-#include "mt76x2_trace.h"
+#include "mt76x02_trace.h"
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
index 4cd424148d4b..713f12d3c8de 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
@@ -14,14 +14,14 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#if !defined(__MT76x2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define __MT76x2_TRACE_H
+#if !defined(__MT76x02_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76x02_TRACE_H
#include <linux/tracepoint.h>
-#include "mt76x2.h"
+#include "mt76x02.h"
#undef TRACE_SYSTEM
-#define TRACE_SYSTEM mt76x2
+#define TRACE_SYSTEM mt76x02
#define MAXNAME 32
#define DEV_ENTRY __array(char, wiphy_name, 32)
@@ -35,7 +35,7 @@
#define TXID_PR_ARG __entry->wcid, __entry->pktid
DECLARE_EVENT_CLASS(dev_evt,
- TP_PROTO(struct mt76x2_dev *dev),
+ TP_PROTO(struct mt76x02_dev *dev),
TP_ARGS(dev),
TP_STRUCT__entry(
DEV_ENTRY
@@ -47,7 +47,7 @@ DECLARE_EVENT_CLASS(dev_evt,
);
DECLARE_EVENT_CLASS(dev_txid_evt,
- TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+ TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
TP_ARGS(dev, wcid, pktid),
TP_STRUCT__entry(
DEV_ENTRY
@@ -63,19 +63,19 @@ DECLARE_EVENT_CLASS(dev_txid_evt,
)
);
-DEFINE_EVENT(dev_evt, mac_txstat_poll,
- TP_PROTO(struct mt76x2_dev *dev),
- TP_ARGS(dev)
-);
-
DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
- TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+ TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
TP_ARGS(dev, wcid, pktid)
);
+DEFINE_EVENT(dev_evt, mac_txstat_poll,
+ TP_PROTO(struct mt76x02_dev *dev),
+ TP_ARGS(dev)
+);
+
TRACE_EVENT(mac_txstat_fetch,
- TP_PROTO(struct mt76x2_dev *dev,
- struct mt76x2_tx_status *stat),
+ TP_PROTO(struct mt76x02_dev *dev,
+ struct mt76x02_tx_status *stat),
TP_ARGS(dev, stat),
@@ -110,9 +110,8 @@ TRACE_EVENT(mac_txstat_fetch,
)
);
-
TRACE_EVENT(dev_irq,
- TP_PROTO(struct mt76x2_dev *dev, u32 val, u32 mask),
+ TP_PROTO(struct mt76x02_dev *dev, u32 val, u32 mask),
TP_ARGS(dev, val, mask),
@@ -139,6 +138,6 @@ TRACE_EVENT(dev_irq,
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE mt76x2_trace
+#define TRACE_INCLUDE_FILE mt76x02_trace
#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
new file mode 100644
index 000000000000..d3de08872d6e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include "mt76x02.h"
+
+void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x02_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+
+ if (control->sta) {
+ struct mt76x02_sta *msta;
+
+ msta = (struct mt76x02_sta *)control->sta->drv_priv;
+ wcid = &msta->wcid;
+ /* sw encrypted frames */
+ if (!info->control.hw_key && wcid->hw_key_idx != 0xff)
+ control->sta = NULL;
+ }
+
+ if (vif && !control->sta) {
+ struct mt76x02_vif *mvif;
+
+ mvif = (struct mt76x02_vif *)vif->drv_priv;
+ wcid = &mvif->group_wcid;
+ }
+
+ mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx);
+
+void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ void *rxwi = skb->data;
+
+ if (q == MT_RXQ_MCU) {
+ /* this is used just by mmio code */
+ skb_queue_tail(&mdev->mmio.mcu.res_q, skb);
+ wake_up(&mdev->mmio.mcu.wait);
+ return;
+ }
+
+ skb_pull(skb, sizeof(struct mt76x02_rxwi));
+ if (mt76x02_mac_process_rx(dev, skb, rxwi)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ mt76_rx(mdev, q, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb);
+
+s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
+ const struct ieee80211_tx_rate *rate)
+{
+ s8 max_txpwr;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+ if (mcs == 8 || mcs == 9) {
+ max_txpwr = dev->mt76.rate_power.vht[8];
+ } else {
+ u8 nss, idx;
+
+ nss = ieee80211_rate_get_vht_nss(rate);
+ idx = ((nss - 1) << 3) + mcs;
+ max_txpwr = dev->mt76.rate_power.ht[idx & 0xf];
+ }
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
+ } else {
+ enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+ if (band == NL80211_BAND_2GHZ) {
+ const struct ieee80211_rate *r;
+ struct wiphy *wiphy = dev->mt76.hw->wiphy;
+ struct mt76_rate_power *rp = &dev->mt76.rate_power;
+
+ r = &wiphy->bands[band]->bitrates[rate->idx];
+ if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+ max_txpwr = rp->cck[r->hw_value & 0x3];
+ else
+ max_txpwr = rp->ofdm[r->hw_value & 0x7];
+ } else {
+ max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7];
+ }
+ }
+
+ return max_txpwr;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj);
+
+s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+ txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf);
+ txpwr -= (dev->target_power + dev->target_power_delta[0]);
+ txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+ if (!dev->enable_tpc)
+ return 0;
+ else if (txpwr >= 0)
+ return min_t(s8, txpwr, 7);
+ else
+ return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_get_txpwr_adj);
+
+void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
+{
+ s8 txpwr_adj;
+
+ txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr,
+ dev->mt76.rate_power.ofdm[4]);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+ mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+ MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto);
+
+void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ieee80211_free_txskb(dev->hw, skb);
+ } else {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status(dev->hw, skb);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_complete);
+
+bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76x02_tx_status stat;
+
+ if (!mt76x02_mac_load_tx_status(dev, &stat))
+ return false;
+
+ mt76x02_send_tx_status(dev, &stat, update);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
+
+int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int qsel = MT_QSEL_EDCA;
+ int ret;
+
+ if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
+ mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
+
+ mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
+
+ ret = mt76x02_insert_hdr_pad(skb);
+ if (ret < 0)
+ return ret;
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ qsel = MT_QSEL_MGMT;
+
+ *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
+
+ if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+ *tx_info |= MT_TXD_INFO_WIV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
new file mode 100644
index 000000000000..0126e51d77ed
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x02_USB_H
+#define __MT76x02_USB_H
+
+#include "mt76x02.h"
+
+void mt76x02u_init_mcu(struct mt76_dev *dev);
+void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev);
+int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
+ int data_len, u32 max_payload, u32 offset);
+
+int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
+int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info);
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush);
+#endif /* __MT76x02_USB_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
new file mode 100644
index 000000000000..dc2226c722dd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x02.h"
+
+static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
+{
+ int hdr_len;
+
+ skb_pull(skb, sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN);
+ hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ if (hdr_len % 4)
+ mt76x02_remove_hdr_pad(skb, 2);
+}
+
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush)
+{
+ mt76x02u_remove_dma_hdr(e->skb);
+ mt76x02_tx_complete(mdev, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
+
+int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
+{
+ struct sk_buff *iter, *last = skb;
+ u32 info, pad;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+ info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ skb_walk_frags(skb, iter) {
+ last = iter;
+ if (!iter->next) {
+ skb->data_len += pad;
+ skb->len += pad;
+ break;
+ }
+ }
+
+ if (unlikely(pad)) {
+ if (skb_pad(last, pad))
+ return -ENOMEM;
+ __skb_put(last, pad);
+ }
+ return 0;
+}
+
+static int
+mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mt76_qsel qsel;
+ u32 flags;
+
+ if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+ ep == MT_EP_OUT_HCCA)
+ qsel = MT_QSEL_MGMT;
+ else
+ qsel = MT_QSEL_EDCA;
+
+ flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
+ if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+ flags |= MT_TXD_INFO_WIV;
+
+ return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
+}
+
+int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+ struct sk_buff *skb, struct mt76_queue *q,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ u32 *tx_info)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76x02_txwi *txwi;
+ int len = skb->len;
+
+ mt76x02_insert_hdr_pad(skb);
+
+ txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
+ mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+
+ return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
+}
+EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
new file mode 100644
index 000000000000..da299b8a1334
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include "mt76x02.h"
+#include "mt76x02_mcu.h"
+#include "mt76x02_usb.h"
+
+#define MT_CMD_HDR_LEN 4
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+static struct sk_buff *
+mt76x02u_mcu_msg_alloc(const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, MT_CMD_HDR_LEN);
+ skb_put_data(skb, data, len);
+
+ return skb;
+}
+
+static void
+mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ u32 reg, val;
+ int i;
+
+ if (usb->mcu.burst) {
+ WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
+
+ reg = usb->mcu.rp[0].reg - usb->mcu.base;
+ for (i = 0; i < usb->mcu.rp_len; i++) {
+ val = get_unaligned_le32(data + 4 * i);
+ usb->mcu.rp[i].reg = reg++;
+ usb->mcu.rp[i].value = val;
+ }
+ } else {
+ WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
+
+ for (i = 0; i < usb->mcu.rp_len; i++) {
+ reg = get_unaligned_le32(data + 8 * i) -
+ usb->mcu.base;
+ val = get_unaligned_le32(data + 8 * i + 4);
+
+ WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
+ usb->mcu.rp[i].value = val;
+ }
+ }
+}
+
+static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
+{
+ struct mt76_usb *usb = &dev->usb;
+ struct mt76u_buf *buf = &usb->mcu.res;
+ struct urb *urb = buf->urb;
+ int i, ret;
+ u32 rxfce;
+ u8 *data;
+
+ for (i = 0; i < 5; i++) {
+ if (!wait_for_completion_timeout(&usb->mcu.cmpl,
+ msecs_to_jiffies(300)))
+ continue;
+
+ if (urb->status)
+ return -EIO;
+
+ data = sg_virt(&urb->sg[0]);
+ if (usb->mcu.rp)
+ mt76x02u_multiple_mcu_reads(dev, data + 4,
+ urb->actual_length - 8);
+
+ rxfce = get_unaligned_le32(data);
+ ret = mt76u_submit_buf(dev, USB_DIR_IN,
+ MT_EP_IN_CMD_RESP,
+ buf, GFP_KERNEL,
+ mt76u_mcu_complete_urb,
+ &usb->mcu.cmpl);
+ if (ret)
+ return ret;
+
+ if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
+ FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
+ return 0;
+
+ dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
+ FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
+ }
+
+ dev_err(dev->dev, "error: %s timed out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+static int
+__mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct usb_interface *intf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct mt76_usb *usb = &dev->usb;
+ unsigned int pipe;
+ int ret, sent;
+ u8 seq = 0;
+ u32 info;
+
+ if (!skb)
+ return -EINVAL;
+
+ if (test_bit(MT76_REMOVED, &dev->state))
+ return 0;
+
+ pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
+ if (wait_resp) {
+ seq = ++usb->mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++usb->mcu.msg_seq & 0xf;
+ }
+
+ info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+ FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+ MT_MCU_MSG_TYPE_CMD;
+ ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
+ if (ret)
+ return ret;
+
+ ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
+ if (ret)
+ return ret;
+
+ if (wait_resp)
+ ret = mt76x02u_mcu_wait_resp(dev, seq);
+
+ consume_skb(skb);
+
+ return ret;
+}
+
+static int
+mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int err;
+
+ mutex_lock(&usb->mcu.mutex);
+ err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
+ mutex_unlock(&usb->mcu.mutex);
+
+ return err;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+ put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static int
+mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
+{
+ const int CMD_RANDOM_WRITE = 12;
+ const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ ret = mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
+}
+
+static int
+mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ const int CMD_RANDOM_READ = 10;
+ const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
+ struct mt76_usb *usb = &dev->usb;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+ if (cnt != n)
+ return -EINVAL;
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ mutex_lock(&usb->mcu.mutex);
+
+ usb->mcu.rp = data;
+ usb->mcu.rp_len = n;
+ usb->mcu.base = base;
+ usb->mcu.burst = false;
+
+ ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
+
+ usb->mcu.rp = NULL;
+
+ mutex_unlock(&usb->mcu.mutex);
+
+ return ret;
+}
+
+void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
+{
+ mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ 0x1, 0, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
+
+static int
+__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
+ const void *fw_data, int len, u32 dst_addr)
+{
+ u8 *data = sg_virt(&buf->urb->sg[0]);
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ __le32 info;
+ u32 val;
+ int err;
+
+ info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_MCU_MSG_LEN, len) |
+ MT_MCU_MSG_TYPE_CMD);
+
+ memcpy(data, &info, sizeof(info));
+ memcpy(data + sizeof(info), fw_data, len);
+ memset(data + sizeof(info) + len, 0, 4);
+
+ mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ len = roundup(len, 4);
+ mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+
+ buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
+ err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT,
+ MT_EP_OUT_INBAND_CMD,
+ buf, GFP_KERNEL,
+ mt76u_mcu_complete_urb, &cmpl);
+ if (err < 0)
+ return err;
+
+ if (!wait_for_completion_timeout(&cmpl,
+ msecs_to_jiffies(1000))) {
+ dev_err(dev->mt76.dev, "firmware upload timed out\n");
+ usb_kill_urb(buf->urb);
+ return -ETIMEDOUT;
+ }
+
+ if (mt76u_urb_error(buf->urb)) {
+ dev_err(dev->mt76.dev, "firmware upload failed: %d\n",
+ buf->urb->status);
+ return buf->urb->status;
+ }
+
+ val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ return 0;
+}
+
+int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
+ int data_len, u32 max_payload, u32 offset)
+{
+ int err, len, pos = 0, max_len = max_payload - 8;
+ struct mt76u_buf buf;
+
+ err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ while (data_len > 0) {
+ len = min_t(int, data_len, max_len);
+ err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos,
+ len, offset + pos);
+ if (err < 0)
+ break;
+
+ data_len -= len;
+ pos += len;
+ usleep_range(5000, 10000);
+ }
+ mt76u_buf_free(&buf);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
+
+void mt76x02u_init_mcu(struct mt76_dev *dev)
+{
+ static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
+ .mcu_msg_alloc = mt76x02u_mcu_msg_alloc,
+ .mcu_send_msg = mt76x02u_mcu_send_msg,
+ .mcu_wr_rp = mt76x02u_mcu_wr_rp,
+ .mcu_rd_rp = mt76x02u_mcu_rd_rp,
+ };
+
+ dev->mcu_ops = &mt76x02u_mcu_ops;
+}
+EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
new file mode 100644
index 000000000000..ca05332f81fc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include "mt76x02.h"
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+struct ieee80211_rate mt76x02_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+EXPORT_SYMBOL_GPL(mt76x02_rates);
+
+void mt76x02_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->mt76.rxfilter &= ~(_hw); \
+ dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mt76.mutex);
+
+ dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x02_configure_filter);
+
+int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+ int i;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt76x02_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76x02_mac_wcid_set_drop(dev, idx, false);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76x02_txq_init(dev, sta->txq[i]);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
+ ewma_signal_init(&msta->rssi);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], &msta->wcid);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_add);
+
+int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+ int idx = msta->wcid.idx;
+ int i;
+
+ mutex_lock(&dev->mt76.mutex);
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76_txq_remove(&dev->mt76, sta->txq[i]);
+ mt76x02_mac_wcid_set_drop(dev, idx, true);
+ mt76_wcid_free(dev->mt76.wcid_mask, idx);
+ mt76x02_mac_wcid_setup(dev, idx, 0, NULL);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_remove);
+
+void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
+ unsigned int idx)
+{
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+
+ mvif->idx = idx;
+ mvif->group_wcid.idx = MT_VIF_WCID(idx);
+ mvif->group_wcid.hw_key_idx = -1;
+ mt76x02_txq_init(dev, vif->txq);
+}
+EXPORT_SYMBOL_GPL(mt76x02_vif_init);
+
+int
+mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ unsigned int idx = 0;
+
+ if (vif->addr[0] & BIT(1))
+ idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
+
+ /*
+ * Client mode typically only has one configurable BSSID register,
+ * which is used for bssidx=0. This is linked to the MAC address.
+ * Since mac80211 allows changing interface types, and we cannot
+ * force the use of the primary MAC address for a station mode
+ * interface, we need some other way of configuring a per-interface
+ * remote BSSID.
+ * The hardware provides an AP-Client feature, where bssidx 0-7 are
+ * used for AP mode and bssidx 8-15 for client mode.
+ * We shift the station interface bss index by 8 to force the
+ * hardware to recognize the BSSID.
+ * The resulting bssidx mismatch for unicast frames is ignored by hw.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ idx += 8;
+
+ mt76x02_vif_init(dev, vif, idx);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_add_interface);
+
+void mt76x02_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ mt76_txq_remove(&dev->mt76, vif->txq);
+}
+EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
+
+int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct ieee80211_sta *sta = params->sta;
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid,
+ *ssn, params->buf_size);
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_ampdu_action);
+
+int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct mt76x02_sta *msta;
+ struct mt76_wcid *wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * The hardware does not support per-STA RX GTK, fall back
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ msta = sta ? (struct mt76x02_sta *) sta->drv_priv : NULL;
+ wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ wcid->sw_iv = true;
+ }
+ } else {
+ if (idx == wcid->hw_key_idx) {
+ wcid->hw_key_idx = -1;
+ wcid->sw_iv = true;
+ }
+
+ key = NULL;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76x02_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76x02_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76x02_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_key);
+
+int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, qid;
+ u32 val;
+
+ qid = dev->mt76.q_tx[queue].hw_idx;
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+ FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+ mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(qid));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_TXOP(qid), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_conf_tx);
+
+void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+ struct ieee80211_tx_rate rate = {};
+
+ if (!rates)
+ return;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76x02_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+ msta->wcid.max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, &rate);
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_rate_tbl_update);
+
+int mt76x02_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ if (len % 4 == 0)
+ return 0;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 2;
+}
+EXPORT_SYMBOL_GPL(mt76x02_insert_hdr_pad);
+
+void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len)
+{
+ int hdrlen;
+
+ if (!len)
+ return;
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + len, skb->data, hdrlen);
+ skb_pull(skb, len);
+}
+EXPORT_SYMBOL_GPL(mt76x02_remove_hdr_pad);
+
+const u16 mt76x02_beacon_offsets[16] = {
+ /* 1024 byte per beacon */
+ 0xc000,
+ 0xc400,
+ 0xc800,
+ 0xcc00,
+ 0xd000,
+ 0xd400,
+ 0xd800,
+ 0xdc00,
+ /* BSS idx 8-15 not used for beacons */
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+ 0xc000,
+};
+EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets);
+
+void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
+{
+ u16 val, base = MT_BEACON_BASE;
+ u32 regs[4] = {};
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ val = mt76x02_beacon_offsets[i] - base;
+ regs[i / 4] |= (val / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
deleted file mode 100644
index dca3209bf5f1..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2.h
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76x2_H
-#define __MT76x2_H
-
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/bitops.h>
-#include <linux/kfifo.h>
-#include <linux/average.h>
-
-#define MT7662_FIRMWARE "mt7662.bin"
-#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
-#define MT7662_EEPROM_SIZE 512
-
-#define MT7662U_FIRMWARE "mediatek/mt7662u.bin"
-#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin"
-
-#define MT76x2_RX_RING_SIZE 256
-#define MT_RX_HEADROOM 32
-
-#define MT_MAX_CHAINS 2
-
-#define MT_CALIBRATE_INTERVAL HZ
-
-#define MT_MAX_VIFS 8
-#define MT_VIF_WCID(_n) (254 - ((_n) & 7))
-
-#include "mt76.h"
-#include "mt76x2_regs.h"
-#include "mt76x2_mac.h"
-#include "mt76x2_dfs.h"
-
-DECLARE_EWMA(signal, 10, 8)
-
-struct mt76x2_mcu {
- struct mutex mutex;
-
- wait_queue_head_t wait;
- struct sk_buff_head res_q;
- struct mt76u_buf res_u;
-
- u32 msg_seq;
-};
-
-struct mt76x2_rx_freq_cal {
- s8 high_gain[MT_MAX_CHAINS];
- s8 rssi_offset[MT_MAX_CHAINS];
- s8 lna_gain;
- u32 mcu_gain;
-};
-
-struct mt76x2_calibration {
- struct mt76x2_rx_freq_cal rx;
-
- u8 agc_gain_init[MT_MAX_CHAINS];
- u8 agc_gain_cur[MT_MAX_CHAINS];
-
- u16 false_cca;
- s8 avg_rssi_all;
- s8 agc_gain_adjust;
- s8 low_gain;
-
- u8 temp;
-
- bool init_cal_done;
- bool tssi_cal_done;
- bool tssi_comp_pending;
- bool dpd_cal_done;
- bool channel_cal_done;
-};
-
-struct mt76x2_dev {
- struct mt76_dev mt76; /* must be first */
-
- struct mac_address macaddr_list[8];
-
- struct mutex mutex;
-
- const u16 *beacon_offsets;
- unsigned long wcid_mask[128 / BITS_PER_LONG];
-
- int txpower_conf;
- int txpower_cur;
-
- u8 txdone_seq;
- DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x2_tx_status);
-
- struct mt76x2_mcu mcu;
- struct sk_buff *rx_head;
-
- struct tasklet_struct tx_tasklet;
- struct tasklet_struct pre_tbtt_tasklet;
- struct delayed_work cal_work;
- struct delayed_work mac_work;
-
- u32 aggr_stats[32];
-
- struct mt76_wcid global_wcid;
- struct mt76_wcid __rcu *wcid[128];
-
- spinlock_t irq_lock;
- u32 irqmask;
-
- struct sk_buff *beacons[8];
- u8 beacon_mask;
- u8 beacon_data_mask;
-
- u8 tbtt_count;
- u16 beacon_int;
-
- u16 chainmask;
-
- u32 rxfilter;
-
- struct mt76x2_calibration cal;
-
- s8 target_power;
- s8 target_power_delta[2];
- struct mt76_rate_power rate_power;
- bool enable_tpc;
-
- u8 coverage_class;
- u8 slottime;
-
- struct mt76x2_dfs_pattern_detector dfs_pd;
-};
-
-struct mt76x2_vif {
- u8 idx;
-
- struct mt76_wcid group_wcid;
-};
-
-struct mt76x2_sta {
- struct mt76_wcid wcid; /* must be first */
-
- struct mt76x2_vif *vif;
- struct mt76x2_tx_status status;
- int n_frames;
-
- struct ewma_signal rssi;
- int inactive_count;
-};
-
-static inline bool mt76x2_wait_for_mac(struct mt76x2_dev *dev)
-{
- int i;
-
- for (i = 0; i < 500; i++) {
- switch (mt76_rr(dev, MT_MAC_CSR0)) {
- case 0:
- case ~0:
- break;
- default:
- return true;
- }
- usleep_range(5000, 10000);
- }
- return false;
-}
-
-static inline bool is_mt7612(struct mt76x2_dev *dev)
-{
- return mt76_chip(&dev->mt76) == 0x7612;
-}
-
-void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
-
-static inline bool mt76x2_channel_silent(struct mt76x2_dev *dev)
-{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-
- return ((chan->flags & IEEE80211_CHAN_RADAR) &&
- chan->dfs_state != NL80211_DFS_AVAILABLE);
-}
-
-static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
-{
- mt76x2_set_irq_mask(dev, 0, mask);
-}
-
-static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
-{
- mt76x2_set_irq_mask(dev, mask, 0);
-}
-
-static inline bool mt76x2_wait_for_bbp(struct mt76x2_dev *dev)
-{
- return mt76_poll_msec(dev, MT_MAC_STATUS,
- MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
- 0, 100);
-}
-
-static inline bool wait_for_wpdma(struct mt76x2_dev *dev)
-{
- return mt76_poll(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
- MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
- 0, 1000);
-}
-
-extern const struct ieee80211_ops mt76x2_ops;
-
-extern struct ieee80211_rate mt76x2_rates[12];
-
-struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
-int mt76x2_register_device(struct mt76x2_dev *dev);
-void mt76x2_init_debugfs(struct mt76x2_dev *dev);
-void mt76x2_init_device(struct mt76x2_dev *dev);
-
-irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
-void mt76x2_phy_power_on(struct mt76x2_dev *dev);
-int mt76x2_init_hardware(struct mt76x2_dev *dev);
-void mt76x2_stop_hardware(struct mt76x2_dev *dev);
-int mt76x2_eeprom_init(struct mt76x2_dev *dev);
-int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel);
-void mt76x2_set_tx_ackto(struct mt76x2_dev *dev);
-
-void mt76x2_phy_set_antenna(struct mt76x2_dev *dev);
-int mt76x2_phy_start(struct mt76x2_dev *dev);
-int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
- struct cfg80211_chan_def *chandef);
-int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
-void mt76x2_phy_calibrate(struct work_struct *work);
-void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
-
-int mt76x2_mcu_init(struct mt76x2_dev *dev);
-int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
- u8 bw_index, bool scan);
-int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on);
-int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
- u8 channel);
-int mt76x2_mcu_cleanup(struct mt76x2_dev *dev);
-
-int mt76x2_dma_init(struct mt76x2_dev *dev);
-void mt76x2_dma_cleanup(struct mt76x2_dev *dev);
-
-void mt76x2_cleanup(struct mt76x2_dev *dev);
-
-int mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
- struct sk_buff *skb, int cmd, int seq);
-void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
- struct sk_buff *skb);
-void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb);
-int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info);
-void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
-void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val);
-
-void mt76x2_pre_tbtt_tasklet(unsigned long arg);
-
-void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
-void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb);
-
-void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
-
-void mt76x2_update_channel(struct mt76_dev *mdev);
-
-s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
- const struct ieee80211_tx_rate *rate);
-s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
-void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
-
-int mt76x2_insert_hdr_pad(struct sk_buff *skb);
-
-bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
- struct mt76x2_tx_status *stat);
-void mt76x2_send_tx_status(struct mt76x2_dev *dev,
- struct mt76x2_tx_status *stat, u8 *update);
-void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable);
-void mt76x2_init_txpower(struct mt76x2_dev *dev,
- struct ieee80211_supported_band *sband);
-void mt76_write_mac_initvals(struct mt76x2_dev *dev);
-
-int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params);
-int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-void mt76x2_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key);
-int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params);
-void mt76x2_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast);
-void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq);
-void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-
-void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
- enum nl80211_band band);
-void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
- enum nl80211_band band, u8 bw);
-void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl);
-void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper);
-int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev);
-void mt76x2_apply_gain_adj(struct mt76x2_dev *dev);
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig
new file mode 100644
index 000000000000..2b414a0e9088
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig
@@ -0,0 +1,20 @@
+config MT76x2_COMMON
+ tristate
+ select MT76x02_LIB
+
+config MT76x2E
+ tristate "MediaTek MT76x2E (PCIe) support"
+ select MT76x2_COMMON
+ depends on MAC80211
+ depends on PCI
+ ---help---
+ This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
+
+config MT76x2U
+ tristate "MediaTek MT76x2U (USB) support"
+ select MT76x2_COMMON
+ select MT76x02_USB
+ depends on MAC80211
+ depends on USB
+ help
+ This adds support for MT7612U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
new file mode 100644
index 000000000000..b71bb1049170
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
@@ -0,0 +1,16 @@
+obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
+obj-$(CONFIG_MT76x2E) += mt76x2e.o
+obj-$(CONFIG_MT76x2U) += mt76x2u.o
+
+mt76x2-common-y := \
+ eeprom.o mac.o init.o phy.o debugfs.o mcu.o
+
+mt76x2e-y := \
+ pci.o pci_main.o pci_init.o pci_tx.o \
+ pci_mac.o pci_mcu.o pci_phy.o pci_dfs.o
+
+mt76x2u-y := \
+ usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \
+ usb_phy.o
+
+CFLAGS_pci_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c
index 77b5ff1be05f..e8f8ccc0a5ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c
@@ -20,7 +20,7 @@
static int
mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
{
- struct mt76x2_dev *dev = file->private;
+ struct mt76x02_dev *dev = file->private;
int i, j;
for (i = 0; i < 4; i++) {
@@ -47,33 +47,14 @@ mt76x2_ampdu_stat_open(struct inode *inode, struct file *f)
return single_open(f, mt76x2_ampdu_stat_read, inode->i_private);
}
-static void
-seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len)
-{
- int i;
-
- seq_printf(file, "%10s:", str);
- for (i = 0; i < len; i++)
- seq_printf(file, " %2d", val[i]);
- seq_puts(file, "\n");
-}
-
static int read_txpower(struct seq_file *file, void *data)
{
- struct mt76x2_dev *dev = dev_get_drvdata(file->private);
+ struct mt76x02_dev *dev = dev_get_drvdata(file->private);
seq_printf(file, "Target power: %d\n", dev->target_power);
- seq_puts_array(file, "Delta", dev->target_power_delta,
- ARRAY_SIZE(dev->target_power_delta));
- seq_puts_array(file, "CCK", dev->rate_power.cck,
- ARRAY_SIZE(dev->rate_power.cck));
- seq_puts_array(file, "OFDM", dev->rate_power.ofdm,
- ARRAY_SIZE(dev->rate_power.ofdm));
- seq_puts_array(file, "HT", dev->rate_power.ht,
- ARRAY_SIZE(dev->rate_power.ht));
- seq_puts_array(file, "VHT", dev->rate_power.vht,
- ARRAY_SIZE(dev->rate_power.vht));
+ mt76_seq_puts_array(file, "Delta", dev->target_power_delta,
+ ARRAY_SIZE(dev->target_power_delta));
return 0;
}
@@ -87,9 +68,9 @@ static const struct file_operations fops_ampdu_stat = {
static int
mt76x2_dfs_stat_read(struct seq_file *file, void *data)
{
+ struct mt76x02_dev *dev = file->private;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
int i;
- struct mt76x2_dev *dev = file->private;
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
seq_printf(file, "allocated sequences:\t%d\n",
dfs_pd->seq_stats.seq_pool_len);
@@ -125,7 +106,7 @@ static const struct file_operations fops_dfs_stat = {
static int read_agc(struct seq_file *file, void *data)
{
- struct mt76x2_dev *dev = dev_get_drvdata(file->private);
+ struct mt76x02_dev *dev = dev_get_drvdata(file->private);
seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all);
seq_printf(file, "low_gain: %d\n", dev->cal.low_gain);
@@ -135,7 +116,7 @@ static int read_agc(struct seq_file *file, void *data)
return 0;
}
-void mt76x2_init_debugfs(struct mt76x2_dev *dev)
+void mt76x2_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h
index da294558c268..3cb9d1864286 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -14,16 +14,13 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef __MT76x2_DMA_H
-#define __MT76x2_DMA_H
+#ifndef __DFS_H
+#define __DFS_H
-#include "dma.h"
+void mt76x2_dfs_init_params(struct mt76x02_dev *dev);
+void mt76x2_dfs_init_detector(struct mt76x02_dev *dev);
+void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev);
+void mt76x2_dfs_set_domain(struct mt76x02_dev *dev,
+ enum nl80211_dfs_regions region);
-enum mt76x2_qsel {
- MT_QSEL_MGMT,
- MT_QSEL_HCCA,
- MT_QSEL_EDCA,
- MT_QSEL_EDCA_2,
-};
-
-#endif
+#endif /* __DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 1753bcb36356..f39b622d03f4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -14,14 +14,15 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/module.h>
#include <asm/unaligned.h>
#include "mt76x2.h"
-#include "mt76x2_eeprom.h"
+#include "eeprom.h"
#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
static int
-mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field,
+mt76x2_eeprom_copy(struct mt76x02_dev *dev, enum mt76x02_eeprom_field field,
void *dest, int len)
{
if (field + len > dev->mt76.eeprom.size)
@@ -32,7 +33,7 @@ mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field,
}
static int
-mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
+mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev)
{
void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
@@ -40,73 +41,8 @@ mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
return 0;
}
-void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
-{
- u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
-
- switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
- case BOARD_TYPE_5GHZ:
- dev->mt76.cap.has_5ghz = true;
- break;
- case BOARD_TYPE_2GHZ:
- dev->mt76.cap.has_2ghz = true;
- break;
- default:
- dev->mt76.cap.has_2ghz = true;
- dev->mt76.cap.has_5ghz = true;
- break;
- }
-}
-EXPORT_SYMBOL_GPL(mt76x2_eeprom_parse_hw_cap);
-
-static int
-mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
-{
- u32 val;
- int i;
-
- val = mt76_rr(dev, MT_EFUSE_CTRL);
- val &= ~(MT_EFUSE_CTRL_AIN |
- MT_EFUSE_CTRL_MODE);
- val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
- val |= MT_EFUSE_CTRL_KICK;
- mt76_wr(dev, MT_EFUSE_CTRL, val);
-
- if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
- return -ETIMEDOUT;
-
- udelay(2);
-
- val = mt76_rr(dev, MT_EFUSE_CTRL);
- if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
- memset(data, 0xff, 16);
- return 0;
- }
-
- for (i = 0; i < 4; i++) {
- val = mt76_rr(dev, MT_EFUSE_DATA(i));
- put_unaligned_le32(val, data + 4 * i);
- }
-
- return 0;
-}
-
-static int
-mt76x2_get_efuse_data(struct mt76x2_dev *dev, void *buf, int len)
-{
- int ret, i;
-
- for (i = 0; i + 16 <= len; i += 16) {
- ret = mt76x2_efuse_read(dev, i, buf + i);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static bool
-mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+mt76x2_has_cal_free_data(struct mt76x02_dev *dev, u8 *efuse)
{
u16 *efuse_w = (u16 *) efuse;
@@ -132,7 +68,7 @@ mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
}
static void
-mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+mt76x2_apply_cal_free_data(struct mt76x02_dev *dev, u8 *efuse)
{
#define GROUP_5G(_id) \
MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \
@@ -201,7 +137,7 @@ mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
eeprom[MT_EE_BT_PMUCFG] = val & 0xff;
}
-static int mt76x2_check_eeprom(struct mt76x2_dev *dev)
+static int mt76x2_check_eeprom(struct mt76x02_dev *dev)
{
u16 val = get_unaligned_le16(dev->mt76.eeprom.data);
@@ -219,7 +155,7 @@ static int mt76x2_check_eeprom(struct mt76x2_dev *dev)
}
static int
-mt76x2_eeprom_load(struct mt76x2_dev *dev)
+mt76x2_eeprom_load(struct mt76x02_dev *dev)
{
void *efuse;
bool found;
@@ -241,7 +177,8 @@ mt76x2_eeprom_load(struct mt76x2_dev *dev)
efuse = dev->mt76.otp.data;
- if (mt76x2_get_efuse_data(dev, efuse, MT7662_EEPROM_SIZE))
+ if (mt76x02_get_efuse_data(dev, 0, efuse, MT7662_EEPROM_SIZE,
+ MT_EE_READ))
goto out;
if (found) {
@@ -259,56 +196,32 @@ out:
return 0;
}
-static inline int
-mt76x2_sign_extend(u32 val, unsigned int size)
-{
- bool sign = val & BIT(size - 1);
-
- val &= BIT(size - 1) - 1;
-
- return sign ? val : -val;
-}
-
-static inline int
-mt76x2_sign_extend_optional(u32 val, unsigned int size)
-{
- bool enable = val & BIT(size);
-
- return enable ? mt76x2_sign_extend(val, size) : 0;
-}
-
-static bool
-field_valid(u8 val)
-{
- return val != 0 && val != 0xff;
-}
-
static void
-mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val)
+mt76x2_set_rx_gain_group(struct mt76x02_dev *dev, u8 val)
{
s8 *dest = dev->cal.rx.high_gain;
- if (!field_valid(val)) {
+ if (!mt76x02_field_valid(val)) {
dest[0] = 0;
dest[1] = 0;
return;
}
- dest[0] = mt76x2_sign_extend(val, 4);
- dest[1] = mt76x2_sign_extend(val >> 4, 4);
+ dest[0] = mt76x02_sign_extend(val, 4);
+ dest[1] = mt76x02_sign_extend(val >> 4, 4);
}
static void
-mt76x2_set_rssi_offset(struct mt76x2_dev *dev, int chain, u8 val)
+mt76x2_set_rssi_offset(struct mt76x02_dev *dev, int chain, u8 val)
{
s8 *dest = dev->cal.rx.rssi_offset;
- if (!field_valid(val)) {
+ if (!mt76x02_field_valid(val)) {
dest[chain] = 0;
return;
}
- dest[chain] = mt76x2_sign_extend_optional(val, 7);
+ dest[chain] = mt76x02_sign_extend_optional(val, 7);
}
static enum mt76x2_cal_channel_group
@@ -328,28 +241,34 @@ mt76x2_get_cal_channel_group(int channel)
}
static u8
-mt76x2_get_5g_rx_gain(struct mt76x2_dev *dev, u8 channel)
+mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel)
{
enum mt76x2_cal_channel_group group;
group = mt76x2_get_cal_channel_group(channel);
switch (group) {
case MT_CH_5G_JAPAN:
- return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
+ return mt76x02_eeprom_get(dev,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
case MT_CH_5G_UNII_1:
- return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
+ return mt76x02_eeprom_get(dev,
+ MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
case MT_CH_5G_UNII_2:
- return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
+ return mt76x02_eeprom_get(dev,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
case MT_CH_5G_UNII_2E_1:
- return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
+ return mt76x02_eeprom_get(dev,
+ MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
case MT_CH_5G_UNII_2E_2:
- return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
+ return mt76x02_eeprom_get(dev,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
default:
- return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
+ return mt76x02_eeprom_get(dev,
+ MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
}
}
-void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
+void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
int channel = chan->hw_value;
@@ -358,75 +277,27 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
- val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
+ val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
else
val = mt76x2_get_5g_rx_gain(dev, channel);
mt76x2_set_rx_gain_group(dev, val);
- if (chan->band == NL80211_BAND_2GHZ) {
- val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0);
- mt76x2_set_rssi_offset(dev, 0, val);
- mt76x2_set_rssi_offset(dev, 1, val >> 8);
- } else {
- val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0);
- mt76x2_set_rssi_offset(dev, 0, val);
- mt76x2_set_rssi_offset(dev, 1, val >> 8);
- }
-
- val = mt76x2_eeprom_get(dev, MT_EE_LNA_GAIN);
- lna_2g = val & 0xff;
- lna_5g[0] = val >> 8;
-
- val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1);
- lna_5g[1] = val >> 8;
-
- val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1);
- lna_5g[2] = val >> 8;
-
- if (!field_valid(lna_5g[1]))
- lna_5g[1] = lna_5g[0];
-
- if (!field_valid(lna_5g[2]))
- lna_5g[2] = lna_5g[0];
+ mt76x02_get_rx_gain(dev, chan->band, &val, &lna_2g, lna_5g);
+ mt76x2_set_rssi_offset(dev, 0, val);
+ mt76x2_set_rssi_offset(dev, 1, val >> 8);
dev->cal.rx.mcu_gain = (lna_2g & 0xff);
dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8;
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
- val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
- lna_2g = 0;
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
- memset(lna_5g, 0, sizeof(lna_5g));
-
- if (chan->band == NL80211_BAND_2GHZ)
- lna = lna_2g;
- else if (channel <= 64)
- lna = lna_5g[0];
- else if (channel <= 128)
- lna = lna_5g[1];
- else
- lna = lna_5g[2];
-
- if (lna == 0xff)
- lna = 0;
-
- dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
+ lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+ dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
-static s8
-mt76x2_rate_power_val(u8 val)
-{
- if (!field_valid(val))
- return 0;
-
- return mt76x2_sign_extend_optional(val, 7);
-}
-
-void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
+void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
struct ieee80211_channel *chan)
{
bool is_5ghz;
@@ -436,70 +307,64 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
memset(t, 0, sizeof(*t));
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_CCK);
- t->cck[0] = t->cck[1] = mt76x2_rate_power_val(val);
- t->cck[2] = t->cck[3] = mt76x2_rate_power_val(val >> 8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_CCK);
+ t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val);
+ t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8);
if (is_5ghz)
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
else
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
- t->ofdm[0] = t->ofdm[1] = mt76x2_rate_power_val(val);
- t->ofdm[2] = t->ofdm[3] = mt76x2_rate_power_val(val >> 8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
+ t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val);
+ t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8);
if (is_5ghz)
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
else
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
- t->ofdm[4] = t->ofdm[5] = mt76x2_rate_power_val(val);
- t->ofdm[6] = t->ofdm[7] = mt76x2_rate_power_val(val >> 8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
+ t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val);
+ t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8);
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
- t->ht[0] = t->ht[1] = mt76x2_rate_power_val(val);
- t->ht[2] = t->ht[3] = mt76x2_rate_power_val(val >> 8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
+ t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val);
+ t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8);
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
- t->ht[4] = t->ht[5] = mt76x2_rate_power_val(val);
- t->ht[6] = t->ht[7] = mt76x2_rate_power_val(val >> 8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
+ t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val);
+ t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8);
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
- t->ht[8] = t->ht[9] = mt76x2_rate_power_val(val);
- t->ht[10] = t->ht[11] = mt76x2_rate_power_val(val >> 8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
+ t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val);
+ t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8);
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
- t->ht[12] = t->ht[13] = mt76x2_rate_power_val(val);
- t->ht[14] = t->ht[15] = mt76x2_rate_power_val(val >> 8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
+ t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val);
+ t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8);
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
- t->vht[0] = t->vht[1] = mt76x2_rate_power_val(val);
- t->vht[2] = t->vht[3] = mt76x2_rate_power_val(val >> 8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
+ t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val);
+ t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8);
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
- t->vht[4] = t->vht[5] = mt76x2_rate_power_val(val);
- t->vht[6] = t->vht[7] = mt76x2_rate_power_val(val >> 8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
+ t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val);
+ t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8);
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
if (!is_5ghz)
val >>= 8;
- t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
-}
-EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
+ t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8);
-int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
-{
- int i;
- s8 ret = 0;
-
- for (i = 0; i < sizeof(r->all); i++)
- ret = max(ret, r->all[i]);
-
- return ret;
+ memcpy(t->stbc, t->ht, sizeof(t->stbc[0]) * 8);
+ t->stbc[8] = t->vht[8];
+ t->stbc[9] = t->vht[9];
}
-EXPORT_SYMBOL_GPL(mt76x2_get_max_rate_power);
+EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
static void
-mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
- struct ieee80211_channel *chan, int chain, int offset)
+mt76x2_get_power_info_2g(struct mt76x02_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan,
+ int chain, int offset)
{
int channel = chan->hw_value;
int delta_idx;
@@ -518,15 +383,17 @@ mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
t->chain[chain].tssi_slope = data[0];
t->chain[chain].tssi_offset = data[1];
t->chain[chain].target_power = data[2];
- t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
+ t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7);
- val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
+ val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
t->target_power = val >> 8;
}
static void
-mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
- struct ieee80211_channel *chan, int chain, int offset)
+mt76x2_get_power_info_5g(struct mt76x02_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan,
+ int chain, int offset)
{
int channel = chan->hw_value;
enum mt76x2_cal_channel_group group;
@@ -567,13 +434,13 @@ mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
t->chain[chain].tssi_slope = data[0];
t->chain[chain].tssi_offset = data[1];
t->chain[chain].target_power = data[2];
- t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
+ t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7);
- val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
+ val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
t->target_power = val & 0xff;
}
-void mt76x2_get_power_info(struct mt76x2_dev *dev,
+void mt76x2_get_power_info(struct mt76x02_dev *dev,
struct mt76x2_tx_power_info *t,
struct ieee80211_channel *chan)
{
@@ -581,8 +448,8 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
memset(t, 0, sizeof(*t));
- bw40 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
- bw80 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
+ bw40 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+ bw80 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
if (chan->band == NL80211_BAND_5GHZ) {
bw40 >>= 8;
@@ -597,15 +464,16 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
MT_EE_TX_POWER_1_START_2G);
}
- if (mt76x2_tssi_enabled(dev) || !field_valid(t->target_power))
+ if (mt76x2_tssi_enabled(dev) ||
+ !mt76x02_field_valid(t->target_power))
t->target_power = t->chain[0].target_power;
- t->delta_bw40 = mt76x2_rate_power_val(bw40);
- t->delta_bw80 = mt76x2_rate_power_val(bw80);
+ t->delta_bw40 = mt76x02_rate_power_val(bw40);
+ t->delta_bw80 = mt76x02_rate_power_val(bw80);
}
EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
-int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
+int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t)
{
enum nl80211_band band = dev->mt76.chandef.chan->band;
u16 val, slope;
@@ -616,17 +484,18 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
if (!mt76x2_temp_tx_alc_enabled(dev))
return -EINVAL;
- if (!mt76x2_ext_pa_enabled(dev, band))
+ if (!mt76x02_ext_pa_enabled(dev, band))
return -EINVAL;
- val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
t->temp_25_ref = val & 0x7f;
if (band == NL80211_BAND_5GHZ) {
- slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
- bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+ slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
+ bounds = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
} else {
- slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
- bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80) >> 8;
+ slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
+ bounds = mt76x02_eeprom_get(dev,
+ MT_EE_TX_POWER_DELTA_BW80) >> 8;
}
t->high_slope = slope & 0xff;
@@ -638,18 +507,7 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
}
EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp);
-bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
-{
- u16 conf0 = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
-
- if (band == NL80211_BAND_5GHZ)
- return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G);
- else
- return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
-}
-EXPORT_SYMBOL_GPL(mt76x2_ext_pa_enabled);
-
-int mt76x2_eeprom_init(struct mt76x2_dev *dev)
+int mt76x2_eeprom_init(struct mt76x02_dev *dev)
{
int ret;
@@ -657,7 +515,7 @@ int mt76x2_eeprom_init(struct mt76x2_dev *dev)
if (ret)
return ret;
- mt76x2_eeprom_parse_hw_cap(dev);
+ mt76x02_eeprom_parse_hw_cap(dev);
mt76x2_eeprom_get_macaddr(dev);
mt76_eeprom_override(&dev->mt76);
dev->mt76.macaddr[0] &= ~BIT(1);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
new file mode 100644
index 000000000000..9e735524d367
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_EEPROM_H
+#define __MT76x2_EEPROM_H
+
+#include "../mt76x02_eeprom.h"
+
+enum mt76x2_cal_channel_group {
+ MT_CH_5G_JAPAN,
+ MT_CH_5G_UNII_1,
+ MT_CH_5G_UNII_2,
+ MT_CH_5G_UNII_2E_1,
+ MT_CH_5G_UNII_2E_2,
+ MT_CH_5G_UNII_3,
+ __MT_CH_MAX
+};
+
+struct mt76x2_tx_power_info {
+ u8 target_power;
+
+ s8 delta_bw40;
+ s8 delta_bw80;
+
+ struct {
+ s8 tssi_slope;
+ s8 tssi_offset;
+ s8 target_power;
+ s8 delta;
+ } chain[MT_MAX_CHAINS];
+};
+
+struct mt76x2_temp_comp {
+ u8 temp_25_ref;
+ int lower_bound; /* J */
+ int upper_bound; /* J */
+ unsigned int high_slope; /* J / dB */
+ unsigned int low_slope; /* J / dB */
+};
+
+void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
+ struct ieee80211_channel *chan);
+void mt76x2_get_power_info(struct mt76x02_dev *dev,
+ struct mt76x2_tx_power_info *t,
+ struct ieee80211_channel *chan);
+int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t);
+void mt76x2_read_rx_gain(struct mt76x02_dev *dev);
+
+static inline bool
+mt76x2_has_ext_lna(struct mt76x02_dev *dev)
+{
+ u32 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+ return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
+ else
+ return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
+}
+
+static inline bool
+mt76x2_temp_tx_alc_enabled(struct mt76x02_dev *dev)
+{
+ u16 val;
+
+ val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+ if (!(val & BIT(15)))
+ return false;
+
+ return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TEMP_TX_ALC;
+}
+
+static inline bool
+mt76x2_tssi_enabled(struct mt76x02_dev *dev)
+{
+ return !mt76x2_temp_tx_alc_enabled(dev) &&
+ (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+ MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index 324b2a4b8b67..3c73fdeaf30f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -16,44 +16,11 @@
*/
#include "mt76x2.h"
-#include "mt76x2_eeprom.h"
-
-#define CCK_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
- .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
- .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
-}
-
-#define OFDM_RATE(_idx, _rate) { \
- .bitrate = _rate, \
- .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
- .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
-}
-
-struct ieee80211_rate mt76x2_rates[] = {
- CCK_RATE(0, 10),
- CCK_RATE(1, 20),
- CCK_RATE(2, 55),
- CCK_RATE(3, 110),
- OFDM_RATE(0, 60),
- OFDM_RATE(1, 90),
- OFDM_RATE(2, 120),
- OFDM_RATE(3, 180),
- OFDM_RATE(4, 240),
- OFDM_RATE(5, 360),
- OFDM_RATE(6, 480),
- OFDM_RATE(7, 540),
-};
-EXPORT_SYMBOL_GPL(mt76x2_rates);
-
-struct mt76x2_reg_pair {
- u32 reg;
- u32 value;
-};
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
static void
-mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
+mt76x2_set_wlan_state(struct mt76x02_dev *dev, bool enable)
{
u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
@@ -68,10 +35,13 @@ mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
udelay(20);
}
-void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable)
{
u32 val;
+ if (!enable)
+ goto out;
+
val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
@@ -87,22 +57,12 @@ void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
udelay(20);
+out:
mt76x2_set_wlan_state(dev, enable);
}
EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
-static void
-mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
- const struct mt76x2_reg_pair *data, int len)
-{
- while (len > 0) {
- mt76_wr(dev, data->reg, data->value);
- len--;
- data++;
- }
-}
-
-void mt76_write_mac_initvals(struct mt76x2_dev *dev)
+void mt76_write_mac_initvals(struct mt76x02_dev *dev)
{
#define DEFAULT_PROT_CFG_CCK \
(FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \
@@ -128,7 +88,7 @@ void mt76_write_mac_initvals(struct mt76x2_dev *dev)
FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
- static const struct mt76x2_reg_pair vals[] = {
+ static const struct mt76_reg_pair vals[] = {
/* Copied from MediaTek reference source */
{ MT_PBF_SYS_CTRL, 0x00080c00 },
{ MT_PBF_CFG, 0x1efebcff },
@@ -184,7 +144,7 @@ void mt76_write_mac_initvals(struct mt76x2_dev *dev)
{ MT_PROT_AUTO_TX_CFG, 0x00830083 },
{ MT_HT_CTRL_CFG, 0x000001ff },
};
- struct mt76x2_reg_pair prot_vals[] = {
+ struct mt76_reg_pair prot_vals[] = {
{ MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK },
{ MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG_OFDM },
{ MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
@@ -193,12 +153,12 @@ void mt76_write_mac_initvals(struct mt76x2_dev *dev)
{ MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
};
- mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
- mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
+ mt76_wr_rp(dev, 0, vals, ARRAY_SIZE(vals));
+ mt76_wr_rp(dev, 0, prot_vals, ARRAY_SIZE(prot_vals));
}
EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
-void mt76x2_init_device(struct mt76x2_dev *dev)
+void mt76x2_init_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@@ -207,9 +167,12 @@ void mt76x2_init_device(struct mt76x2_dev *dev)
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
hw->extra_tx_headroom = 2;
+ if (mt76_is_usb(dev))
+ hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
+ MT_DMA_HDR_LEN;
- hw->sta_data_size = sizeof(struct mt76x2_sta);
- hw->vif_data_size = sizeof(struct mt76x2_vif);
+ hw->sta_data_size = sizeof(struct mt76x02_sta);
+ hw->vif_data_size = sizeof(struct mt76x02_vif);
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
@@ -217,9 +180,9 @@ void mt76x2_init_device(struct mt76x2_dev *dev)
dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->chainmask = 0x202;
- dev->global_wcid.idx = 255;
- dev->global_wcid.hw_key_idx = -1;
+ dev->mt76.chainmask = 0x202;
+ dev->mt76.global_wcid.idx = 255;
+ dev->mt76.global_wcid.hw_key_idx = -1;
dev->slottime = 9;
/* init antenna configuration */
@@ -227,7 +190,7 @@ void mt76x2_init_device(struct mt76x2_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x2_init_device);
-void mt76x2_init_txpower(struct mt76x2_dev *dev,
+void mt76x2_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband)
{
struct ieee80211_channel *chan;
@@ -248,7 +211,7 @@ void mt76x2_init_txpower(struct mt76x2_dev *dev,
mt76x2_get_rate_power(dev, &t, chan);
- chan->max_power = mt76x2_get_max_rate_power(&t) +
+ chan->max_power = mt76x02_get_max_rate_power(&t) +
target_power;
chan->max_power /= 2;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
new file mode 100644
index 000000000000..e25905c91ee2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force)
+{
+ bool stopped = false;
+ u32 rts_cfg;
+ int i;
+
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+ /* Wait for MAC to become idle */
+ for (i = 0; i < 300; i++) {
+ if ((mt76_rr(dev, MT_MAC_STATUS) &
+ (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
+ mt76_rr(dev, MT_BBP(IBI, 12))) {
+ udelay(1);
+ continue;
+ }
+
+ stopped = true;
+ break;
+ }
+
+ if (force && !stopped) {
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+ }
+
+ mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
new file mode 100644
index 000000000000..a31bd49ae6cb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MAC_H
+#define __MT76x2_MAC_H
+
+#include "mt76x2.h"
+
+struct mt76x02_dev;
+struct mt76x2_sta;
+struct mt76x02_vif;
+
+int mt76x2_mac_start(struct mt76x02_dev *dev);
+void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force);
+void mt76x2_mac_resume(struct mt76x02_dev *dev);
+void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
+
+int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
+ struct sk_buff *skb);
+void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, bool val);
+
+void mt76x2_mac_work(struct work_struct *work);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
new file mode 100644
index 000000000000..88bd62cfbdf9
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan)
+{
+ struct sk_buff *skb;
+ struct {
+ u8 idx;
+ u8 scan;
+ u8 bw;
+ u8 _pad0;
+
+ __le16 chainmask;
+ u8 ext_chan;
+ u8 _pad1;
+
+ } __packed __aligned(4) msg = {
+ .idx = channel,
+ .scan = scan,
+ .bw = bw,
+ .chainmask = cpu_to_le16(dev->mt76.chainmask),
+ };
+
+ /* first set the channel without the extension channel info */
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
+
+ usleep_range(5000, 10000);
+
+ msg.ext_chan = 0xe0 + bw_index;
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel);
+
+int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
+ u8 channel)
+{
+ struct sk_buff *skb;
+ struct {
+ u8 cr_mode;
+ u8 temp;
+ u8 ch;
+ u8 _pad0;
+
+ __le32 cfg;
+ } __packed __aligned(4) msg = {
+ .cr_mode = type,
+ .temp = temp_level,
+ .ch = channel,
+ };
+ u32 val;
+
+ val = BIT(31);
+ val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+ val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+ msg.cfg = cpu_to_le32(val);
+
+ /* first set the channel without the extension channel info */
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt76_mcu_send_msg(dev, skb, CMD_LOAD_CR, true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr);
+
+int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
+ bool force)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 channel;
+ __le32 gain_val;
+ } __packed __aligned(4) msg = {
+ .channel = cpu_to_le32(channel),
+ .gain_val = cpu_to_le32(gain),
+ };
+
+ if (force)
+ msg.channel |= cpu_to_le32(BIT(31));
+
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt76_mcu_send_msg(dev, skb, CMD_INIT_GAIN_OP, true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain);
+
+int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev,
+ struct mt76x2_tssi_comp *tssi_data)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ struct mt76x2_tssi_comp data;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+ .data = *tssi_data,
+ };
+
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
+}
+EXPORT_SYMBOL_GPL(mt76x2_mcu_tssi_comp);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
index e40293f21417..acfa2b570c7c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h
@@ -17,19 +17,14 @@
#ifndef __MT76x2_MCU_H
#define __MT76x2_MCU_H
+#include "../mt76x02_mcu.h"
+
/* Register definitions */
#define MT_MCU_CPU_CTL 0x0704
#define MT_MCU_CLOCK_CTL 0x0708
-#define MT_MCU_RESET_CTL 0x070C
-#define MT_MCU_INT_LEVEL 0x0718
-#define MT_MCU_COM_REG0 0x0730
-#define MT_MCU_COM_REG1 0x0734
-#define MT_MCU_COM_REG2 0x0738
-#define MT_MCU_COM_REG3 0x073C
#define MT_MCU_PCIE_REMAP_BASE1 0x0740
#define MT_MCU_PCIE_REMAP_BASE2 0x0744
#define MT_MCU_PCIE_REMAP_BASE3 0x0748
-#define MT_MCU_PCIE_REMAP_BASE4 0x074C
#define MT_LED_CTRL 0x0770
#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
@@ -54,62 +49,15 @@
#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
MT_LED_STATUS_DURATION_MASK)
-#define MT_MCU_SEMAPHORE_00 0x07B0
-#define MT_MCU_SEMAPHORE_01 0x07B4
-#define MT_MCU_SEMAPHORE_02 0x07B8
-#define MT_MCU_SEMAPHORE_03 0x07BC
-
#define MT_MCU_ROM_PATCH_OFFSET 0x80000
#define MT_MCU_ROM_PATCH_ADDR 0x90000
#define MT_MCU_ILM_OFFSET 0x80000
-#define MT_MCU_ILM_ADDR 0x80000
#define MT_MCU_DLM_OFFSET 0x100000
#define MT_MCU_DLM_ADDR 0x90000
#define MT_MCU_DLM_ADDR_E3 0x90800
-enum mcu_cmd {
- CMD_FUN_SET_OP = 1,
- CMD_LOAD_CR = 2,
- CMD_INIT_GAIN_OP = 3,
- CMD_DYNC_VGA_OP = 6,
- CMD_TDLS_CH_SW = 7,
- CMD_BURST_WRITE = 8,
- CMD_READ_MODIFY_WRITE = 9,
- CMD_RANDOM_READ = 10,
- CMD_BURST_READ = 11,
- CMD_RANDOM_WRITE = 12,
- CMD_LED_MODE_OP = 16,
- CMD_POWER_SAVING_OP = 20,
- CMD_WOW_CONFIG = 21,
- CMD_WOW_QUERY = 22,
- CMD_WOW_FEATURE = 24,
- CMD_CARRIER_DETECT_OP = 28,
- CMD_RADOR_DETECT_OP = 29,
- CMD_SWITCH_CHANNEL_OP = 30,
- CMD_CALIBRATION_OP = 31,
- CMD_BEACON_OP = 32,
- CMD_ANTENNA_OP = 33,
-};
-
-enum mcu_function {
- Q_SELECT = 1,
- BW_SETTING = 2,
- USB2_SW_DISCONNECT = 2,
- USB3_SW_DISCONNECT = 3,
- LOG_FW_DEBUG_MSG = 4,
- GET_FW_VERSION = 5,
-};
-
-enum mcu_power_mode {
- RADIO_OFF = 0x30,
- RADIO_ON = 0x31,
- RADIO_OFF_AUTO_WAKEUP = 0x32,
- RADIO_OFF_ADVANCE = 0x33,
- RADIO_ON_ADVANCE = 0x34,
-};
-
enum mcu_calibration {
MCU_CAL_R = 1,
MCU_CAL_TEMP_SENSOR,
@@ -146,27 +94,8 @@ struct mt76x2_tssi_comp {
u8 offset1;
} __packed __aligned(4);
-struct mt76x2_fw_header {
- __le32 ilm_len;
- __le32 dlm_len;
- __le16 build_ver;
- __le16 fw_ver;
- u8 pad[4];
- char build_time[16];
-};
-
-struct mt76x2_patch_header {
- char build_time[16];
- char platform[4];
- char hw_version[4];
- char patch_version[4];
- u8 pad[2];
-};
-
-int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
- u32 param);
-int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
-int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, struct mt76x2_tssi_comp *tssi_data);
+int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
bool force);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
new file mode 100644
index 000000000000..ab93125f46de
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_H
+#define __MT76x2_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+
+#define MT7662_FIRMWARE "mt7662.bin"
+#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
+#define MT7662_EEPROM_SIZE 512
+
+#define MT7662U_FIRMWARE "mediatek/mt7662u.bin"
+#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin"
+
+#define MT_CALIBRATE_INTERVAL HZ
+
+#include "../mt76x02.h"
+#include "mac.h"
+#include "dfs.h"
+
+static inline bool is_mt7612(struct mt76x02_dev *dev)
+{
+ return mt76_chip(&dev->mt76) == 0x7612;
+}
+
+static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+ return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+ chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
+extern const struct ieee80211_ops mt76x2_ops;
+
+struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev);
+int mt76x2_register_device(struct mt76x02_dev *dev);
+void mt76x2_init_debugfs(struct mt76x02_dev *dev);
+void mt76x2_init_device(struct mt76x02_dev *dev);
+
+void mt76x2_phy_power_on(struct mt76x02_dev *dev);
+int mt76x2_init_hardware(struct mt76x02_dev *dev);
+void mt76x2_stop_hardware(struct mt76x02_dev *dev);
+int mt76x2_eeprom_init(struct mt76x02_dev *dev);
+int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
+void mt76x2_set_tx_ackto(struct mt76x02_dev *dev);
+
+void mt76x2_phy_set_antenna(struct mt76x02_dev *dev);
+int mt76x2_phy_start(struct mt76x02_dev *dev);
+int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x2_phy_calibrate(struct work_struct *work);
+void mt76x2_phy_set_txpower(struct mt76x02_dev *dev);
+
+int mt76x2_mcu_init(struct mt76x02_dev *dev);
+int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
+ u8 bw_index, bool scan);
+int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
+ u8 channel);
+
+void mt76x2_cleanup(struct mt76x02_dev *dev);
+
+void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val);
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg);
+
+void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
+
+void mt76x2_update_channel(struct mt76_dev *mdev);
+
+void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
+void mt76x2_init_txpower(struct mt76x02_dev *dev,
+ struct ieee80211_supported_band *sband);
+void mt76_write_mac_initvals(struct mt76x02_dev *dev);
+
+void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait);
+void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
+ enum nl80211_band band);
+void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
+ enum nl80211_band band, u8 bw);
+void mt76x2_apply_gain_adj(struct mt76x02_dev *dev);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
new file mode 100644
index 000000000000..6e932b5010ef
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2U_H
+#define __MT76x2U_H
+
+#include <linux/device.h>
+
+#include "mt76x2.h"
+#include "mcu.h"
+
+#define MT7612U_EEPROM_SIZE 512
+
+#define MT_USB_AGGR_SIZE_LIMIT 21 /* 1024B unit */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* 33ns unit */
+
+extern const struct ieee80211_ops mt76x2u_ops;
+
+struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev);
+int mt76x2u_register_device(struct mt76x02_dev *dev);
+int mt76x2u_init_hardware(struct mt76x02_dev *dev);
+void mt76x2u_cleanup(struct mt76x02_dev *dev);
+void mt76x2u_stop_hw(struct mt76x02_dev *dev);
+
+int mt76x2u_mac_reset(struct mt76x02_dev *dev);
+void mt76x2u_mac_resume(struct mt76x02_dev *dev);
+int mt76x2u_mac_start(struct mt76x02_dev *dev);
+int mt76x2u_mac_stop(struct mt76x02_dev *dev);
+
+int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt76x2u_phy_calibrate(struct work_struct *work);
+void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev);
+
+void mt76x2u_mcu_complete_urb(struct urb *urb);
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap,
+ bool ext, int rssi, u32 false_cca);
+int mt76x2u_mcu_init(struct mt76x02_dev *dev);
+int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev);
+
+int mt76x2u_alloc_queues(struct mt76x02_dev *dev);
+void mt76x2u_queues_deinit(struct mt76x02_dev *dev);
+void mt76x2u_stop_queues(struct mt76x02_dev *dev);
+int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
+ u32 flags);
+
+#endif /* __MT76x2U_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index e66f047ea448..92432fe97312 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -19,7 +19,6 @@
#include <linux/pci.h>
#include "mt76x2.h"
-#include "mt76x2_trace.h"
static const struct pci_device_id mt76pci_device_table[] = {
{ PCI_DEVICE(0x14c3, 0x7662) },
@@ -31,7 +30,7 @@ static const struct pci_device_id mt76pci_device_table[] = {
static int
mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
int ret;
ret = pcim_enable_device(pdev);
@@ -53,11 +52,12 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+ mt76x2_reset_wlan(dev, false);
dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
- ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler,
+ ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto error;
@@ -88,7 +88,7 @@ static void
mt76pci_remove(struct pci_dev *pdev)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mt76_unregister_device(mdev);
mt76x2_cleanup(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c
index 374cc655c11d..b56febae8945 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c
@@ -36,7 +36,7 @@
.pwr_jmp = power_jmp \
}
-static const struct mt76x2_radar_specs etsi_radar_specs[] = {
+static const struct mt76x02_radar_specs etsi_radar_specs[] = {
/* 20MHz */
RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
0x7fffffff, 0x155cc0, 0x19cc),
@@ -66,7 +66,7 @@ static const struct mt76x2_radar_specs etsi_radar_specs[] = {
0x7fffffff, 0x2191c0, 0x15cc)
};
-static const struct mt76x2_radar_specs fcc_radar_specs[] = {
+static const struct mt76x02_radar_specs fcc_radar_specs[] = {
/* 20MHz */
RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
0x7fffffff, 0xfe808, 0x13dc),
@@ -96,7 +96,7 @@ static const struct mt76x2_radar_specs fcc_radar_specs[] = {
0x3938700, 0x57bcf00, 0x1289)
};
-static const struct mt76x2_radar_specs jp_w56_radar_specs[] = {
+static const struct mt76x02_radar_specs jp_w56_radar_specs[] = {
/* 20MHz */
RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
0x7fffffff, 0x14c080, 0x13dc),
@@ -126,7 +126,7 @@ static const struct mt76x2_radar_specs jp_w56_radar_specs[] = {
0x3938700, 0X57bcf00, 0x1289)
};
-static const struct mt76x2_radar_specs jp_w53_radar_specs[] = {
+static const struct mt76x02_radar_specs jp_w53_radar_specs[] = {
/* 20MHz */
RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
0x7fffffff, 0x14c080, 0x16cc),
@@ -150,8 +150,9 @@ static const struct mt76x2_radar_specs jp_w53_radar_specs[] = {
{ 0 }
};
-static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
- u8 enable)
+static void
+mt76x2_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev,
+ u8 enable)
{
u32 data;
@@ -159,10 +160,10 @@ static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
mt76_wr(dev, MT_BBP(DFS, 36), data);
}
-static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev,
- struct mt76x2_dfs_sequence *seq)
+static void mt76x2_dfs_seq_pool_put(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_sequence *seq)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
list_add(&seq->head, &dfs_pd->seq_pool);
@@ -170,17 +171,17 @@ static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev,
dfs_pd->seq_stats.seq_len--;
}
-static
-struct mt76x2_dfs_sequence *mt76x2_dfs_seq_pool_get(struct mt76x2_dev *dev)
+static struct mt76x02_dfs_sequence *
+mt76x2_dfs_seq_pool_get(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_sequence *seq;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sequence *seq;
if (list_empty(&dfs_pd->seq_pool)) {
seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC);
} else {
seq = list_first_entry(&dfs_pd->seq_pool,
- struct mt76x2_dfs_sequence,
+ struct mt76x02_dfs_sequence,
head);
list_del(&seq->head);
dfs_pd->seq_stats.seq_pool_len--;
@@ -213,10 +214,10 @@ static int mt76x2_dfs_get_multiple(int val, int frac, int margin)
return factor;
}
-static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev)
+static void mt76x2_dfs_detector_reset(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_sequence *seq, *tmp_seq;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sequence *seq, *tmp_seq;
int i;
/* reset hw detector */
@@ -234,11 +235,11 @@ static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev)
}
}
-static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
+static bool mt76x2_dfs_check_chirp(struct mt76x02_dev *dev)
{
bool ret = false;
u32 current_ts, delta_ts;
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
@@ -255,8 +256,8 @@ static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
return ret;
}
-static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev,
- struct mt76x2_dfs_hw_pulse *pulse)
+static void mt76x2_dfs_get_hw_pulse(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_hw_pulse *pulse)
{
u32 data;
@@ -275,8 +276,8 @@ static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev,
pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
}
-static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
- struct mt76x2_dfs_hw_pulse *pulse)
+static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_hw_pulse *pulse)
{
bool ret = false;
@@ -370,8 +371,8 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
return ret;
}
-static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev,
- struct mt76x2_dfs_event *event)
+static bool mt76x2_dfs_fetch_event(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
{
u32 data;
@@ -397,12 +398,12 @@ static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev,
return true;
}
-static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev,
- struct mt76x2_dfs_event *event)
+static bool mt76x2_dfs_check_event(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
{
if (event->engine == 2) {
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_event_rb *event_buff = &dfs_pd->event_rb[1];
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_event_rb *event_buff = &dfs_pd->event_rb[1];
u16 last_event_idx;
u32 delta_ts;
@@ -416,11 +417,11 @@ static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev,
return true;
}
-static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev,
- struct mt76x2_dfs_event *event)
+static void mt76x2_dfs_queue_event(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_event_rb *event_buff;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_event_rb *event_buff;
/* add radar event to ring buffer */
event_buff = event->engine == 2 ? &dfs_pd->event_rb[1]
@@ -434,16 +435,16 @@ static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev,
MT_DFS_EVENT_BUFLEN);
}
-static int mt76x2_dfs_create_sequence(struct mt76x2_dev *dev,
- struct mt76x2_dfs_event *event,
+static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event,
u16 cur_len)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_sw_detector_params *sw_params;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sw_detector_params *sw_params;
u32 width_delta, with_sum, factor, cur_pri;
- struct mt76x2_dfs_sequence seq, *seq_p;
- struct mt76x2_dfs_event_rb *event_rb;
- struct mt76x2_dfs_event *cur_event;
+ struct mt76x02_dfs_sequence seq, *seq_p;
+ struct mt76x02_dfs_event_rb *event_rb;
+ struct mt76x02_dfs_event *cur_event;
int i, j, end, pri;
event_rb = event->engine == 2 ? &dfs_pd->event_rb[1]
@@ -521,12 +522,12 @@ next:
return 0;
}
-static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev,
- struct mt76x2_dfs_event *event)
+static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_sw_detector_params *sw_params;
- struct mt76x2_dfs_sequence *seq, *tmp_seq;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sw_detector_params *sw_params;
+ struct mt76x02_dfs_sequence *seq, *tmp_seq;
u16 max_seq_len = 0;
u32 factor, pri;
@@ -553,10 +554,10 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev,
return max_seq_len;
}
-static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev)
+static bool mt76x2_dfs_check_detection(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_sequence *seq;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_sequence *seq;
if (list_empty(&dfs_pd->sequences))
return false;
@@ -570,10 +571,10 @@ static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev)
return false;
}
-static void mt76x2_dfs_add_events(struct mt76x2_dev *dev)
+static void mt76x2_dfs_add_events(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_event event;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_event event;
int i, seq_len;
/* disable debug mode */
@@ -597,11 +598,11 @@ static void mt76x2_dfs_add_events(struct mt76x2_dev *dev)
mt76x2_dfs_set_capture_mode_ctrl(dev, true);
}
-static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev)
+static void mt76x2_dfs_check_event_window(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
- struct mt76x2_dfs_event_rb *event_buff;
- struct mt76x2_dfs_event *event;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_event_rb *event_buff;
+ struct mt76x02_dfs_event *event;
int i;
for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) {
@@ -622,8 +623,8 @@ static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev)
static void mt76x2_dfs_tasklet(unsigned long arg)
{
- struct mt76x2_dev *dev = (struct mt76x2_dev *)arg;
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
u32 engine_mask;
int i;
@@ -653,7 +654,7 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
goto out;
for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
- struct mt76x2_dfs_hw_pulse pulse;
+ struct mt76x02_dfs_hw_pulse pulse;
if (!(engine_mask & (1 << i)))
continue;
@@ -678,12 +679,12 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
out:
- mt76x2_irq_enable(dev, MT_INT_GPTIMER);
+ mt76x02_irq_enable(dev, MT_INT_GPTIMER);
}
-static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev)
+static void mt76x2_dfs_init_sw_detector(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
switch (dev->dfs_pd.region) {
case NL80211_DFS_FCC:
@@ -707,11 +708,11 @@ static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev)
}
}
-static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
+static void mt76x2_dfs_set_bbp_params(struct mt76x02_dev *dev)
{
- u32 data;
+ const struct mt76x02_radar_specs *radar_specs;
u8 i, shift;
- const struct mt76x2_radar_specs *radar_specs;
+ u32 data;
switch (dev->mt76.chandef.width) {
case NL80211_CHAN_WIDTH_40:
@@ -802,7 +803,7 @@ static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
mt76_wr(dev, 0x212c, 0x0c350001);
}
-void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
+void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev)
{
u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
@@ -823,7 +824,7 @@ void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
}
-void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
+void mt76x2_dfs_init_params(struct mt76x02_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
@@ -834,7 +835,7 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
/* enable debug mode */
mt76x2_dfs_set_capture_mode_ctrl(dev, true);
- mt76x2_irq_enable(dev, MT_INT_GPTIMER);
+ mt76x02_irq_enable(dev, MT_INT_GPTIMER);
mt76_rmw_field(dev, MT_INT_TIMER_EN,
MT_INT_TIMER_EN_GP_TIMER_EN, 1);
} else {
@@ -844,15 +845,15 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
mt76_wr(dev, 0x212c, 0);
- mt76x2_irq_disable(dev, MT_INT_GPTIMER);
+ mt76x02_irq_disable(dev, MT_INT_GPTIMER);
mt76_rmw_field(dev, MT_INT_TIMER_EN,
MT_INT_TIMER_EN_GP_TIMER_EN, 0);
}
}
-void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
+void mt76x2_dfs_init_detector(struct mt76x02_dev *dev)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
INIT_LIST_HEAD(&dfs_pd->sequences);
INIT_LIST_HEAD(&dfs_pd->seq_pool);
@@ -862,10 +863,10 @@ void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
(unsigned long)dev);
}
-void mt76x2_dfs_set_domain(struct mt76x2_dev *dev,
+void mt76x2_dfs_set_domain(struct mt76x02_dev *dev,
enum nl80211_dfs_regions region)
{
- struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+ struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
if (dfs_pd->region != region) {
tasklet_disable(&dfs_pd->dfs_tasklet);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index b814391f79ac..3824290b219d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -16,11 +16,11 @@
#include <linux/delay.h>
#include "mt76x2.h"
-#include "mt76x2_eeprom.h"
-#include "mt76x2_mcu.h"
+#include "eeprom.h"
+#include "mcu.h"
static void
-mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
+mt76x2_mac_pbf_init(struct mt76x02_dev *dev)
{
u32 val;
@@ -38,12 +38,12 @@ mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
}
static void
-mt76x2_fixup_xtal(struct mt76x2_dev *dev)
+mt76x2_fixup_xtal(struct mt76x02_dev *dev)
{
u16 eep_val;
s8 offset = 0;
- eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
offset = eep_val & 0x7f;
if ((eep_val & 0xff) == 0xff)
@@ -53,7 +53,7 @@ mt76x2_fixup_xtal(struct mt76x2_dev *dev)
eep_val >>= 8;
if (eep_val == 0x00 || eep_val == 0xff) {
- eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
eep_val &= 0xff;
if (eep_val == 0x00 || eep_val == 0xff)
@@ -64,7 +64,7 @@ mt76x2_fixup_xtal(struct mt76x2_dev *dev)
mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
- eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
case 0:
mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
@@ -77,31 +77,14 @@ mt76x2_fixup_xtal(struct mt76x2_dev *dev)
}
}
-static void
-mt76x2_init_beacon_offsets(struct mt76x2_dev *dev)
-{
- u16 base = MT_BEACON_BASE;
- u32 regs[4] = {};
- int i;
-
- for (i = 0; i < 16; i++) {
- u16 addr = dev->beacon_offsets[i];
-
- regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
- }
-
- for (i = 0; i < 4; i++)
- mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
-}
-
-static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
+static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
{
static const u8 null_addr[ETH_ALEN] = {};
const u8 *macaddr = dev->mt76.macaddr;
u32 val;
int i, k;
- if (!mt76x2_wait_for_mac(dev))
+ if (!mt76x02_wait_for_mac(&dev->mt76))
return -ETIMEDOUT;
val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
@@ -160,14 +143,14 @@ static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
for (i = 0; i < 256; i++)
- mt76x2_mac_wcid_setup(dev, i, 0, NULL);
+ mt76x02_mac_wcid_setup(dev, i, 0, NULL);
for (i = 0; i < MT_MAX_VIFS; i++)
- mt76x2_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL);
+ mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL);
for (i = 0; i < 16; i++)
for (k = 0; k < 4; k++)
- mt76x2_mac_shared_key_setup(dev, i, k, NULL);
+ mt76x02_mac_shared_key_setup(dev, i, k, NULL);
for (i = 0; i < 8; i++) {
mt76x2_mac_set_bssid(dev, i, null_addr);
@@ -185,14 +168,14 @@ static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
MT_CH_TIME_CFG_EIFS_AS_BUSY |
FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
- mt76x2_init_beacon_offsets(dev);
+ mt76x02_set_beacon_offsets(dev);
mt76x2_set_tx_ackto(dev);
return 0;
}
-int mt76x2_mac_start(struct mt76x2_dev *dev)
+int mt76x2_mac_start(struct mt76x02_dev *dev)
{
int i;
@@ -203,30 +186,12 @@ int mt76x2_mac_start(struct mt76x2_dev *dev)
mt76_rr(dev, MT_TX_STAT_FIFO);
memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats));
-
- mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
- wait_for_wpdma(dev);
- usleep_range(50, 100);
-
- mt76_set(dev, MT_WPDMA_GLO_CFG,
- MT_WPDMA_GLO_CFG_TX_DMA_EN |
- MT_WPDMA_GLO_CFG_RX_DMA_EN);
-
- mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
-
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
- mt76_wr(dev, MT_MAC_SYS_CTRL,
- MT_MAC_SYS_CTRL_ENABLE_TX |
- MT_MAC_SYS_CTRL_ENABLE_RX);
-
- mt76x2_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
- MT_INT_TX_STAT);
+ mt76x02_mac_start(dev);
return 0;
}
-void mt76x2_mac_resume(struct mt76x2_dev *dev)
+void mt76x2_mac_resume(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX |
@@ -234,7 +199,7 @@ void mt76x2_mac_resume(struct mt76x2_dev *dev)
}
static void
-mt76x2_power_on_rf_patch(struct mt76x2_dev *dev)
+mt76x2_power_on_rf_patch(struct mt76x02_dev *dev)
{
mt76_set(dev, 0x10130, BIT(0) | BIT(16));
udelay(1);
@@ -255,7 +220,7 @@ mt76x2_power_on_rf_patch(struct mt76x2_dev *dev)
}
static void
-mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit)
+mt76x2_power_on_rf(struct mt76x02_dev *dev, int unit)
{
int shift = unit ? 8 : 0;
@@ -277,7 +242,7 @@ mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit)
}
static void
-mt76x2_power_on(struct mt76x2_dev *dev)
+mt76x2_power_on(struct mt76x02_dev *dev)
{
u32 val;
@@ -312,7 +277,7 @@ mt76x2_power_on(struct mt76x2_dev *dev)
mt76x2_power_on_rf(dev, 1);
}
-void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
+void mt76x2_set_tx_ackto(struct mt76x02_dev *dev)
{
u8 ackto, sifs, slottime = dev->slottime;
@@ -329,43 +294,14 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
MT_TX_TIMEOUT_CFG_ACKTO, ackto);
}
-int mt76x2_init_hardware(struct mt76x2_dev *dev)
+int mt76x2_init_hardware(struct mt76x02_dev *dev)
{
- static const u16 beacon_offsets[16] = {
- /* 1024 byte per beacon */
- 0xc000,
- 0xc400,
- 0xc800,
- 0xcc00,
- 0xd000,
- 0xd400,
- 0xd800,
- 0xdc00,
-
- /* BSS idx 8-15 not used for beacons */
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- };
- u32 val;
int ret;
- dev->beacon_offsets = beacon_offsets;
tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
(unsigned long) dev);
- val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
- val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
- MT_WPDMA_GLO_CFG_BIG_ENDIAN |
- MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
- val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
- mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
-
+ mt76x02_dma_disable(dev);
mt76x2_reset_wlan(dev, true);
mt76x2_power_on(dev);
@@ -377,9 +313,9 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev)
if (ret)
return ret;
- dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+ dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
- ret = mt76x2_dma_init(dev);
+ ret = mt76x02_dma_init(dev);
if (ret)
return ret;
@@ -397,46 +333,44 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev)
return 0;
}
-void mt76x2_stop_hardware(struct mt76x2_dev *dev)
+void mt76x2_stop_hardware(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
- mt76x2_mcu_set_radio_state(dev, false);
+ mt76x02_mcu_set_radio_state(dev, false, true);
mt76x2_mac_stop(dev, false);
}
-void mt76x2_cleanup(struct mt76x2_dev *dev)
+void mt76x2_cleanup(struct mt76x02_dev *dev)
{
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
tasklet_disable(&dev->pre_tbtt_tasklet);
mt76x2_stop_hardware(dev);
- mt76x2_dma_cleanup(dev);
- mt76x2_mcu_cleanup(dev);
+ mt76x02_dma_cleanup(dev);
+ mt76x02_mcu_cleanup(dev);
}
-struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
+struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
{
static const struct mt76_driver_ops drv_ops = {
- .txwi_size = sizeof(struct mt76x2_txwi),
+ .txwi_size = sizeof(struct mt76x02_txwi),
.update_survey = mt76x2_update_channel,
- .tx_prepare_skb = mt76x2_tx_prepare_skb,
- .tx_complete_skb = mt76x2_tx_complete_skb,
- .rx_skb = mt76x2_queue_rx_skb,
- .rx_poll_complete = mt76x2_rx_poll_complete,
+ .tx_prepare_skb = mt76x02_tx_prepare_skb,
+ .tx_complete_skb = mt76x02_tx_complete_skb,
+ .rx_skb = mt76x02_queue_rx_skb,
+ .rx_poll_complete = mt76x02_rx_poll_complete,
.sta_ps = mt76x2_sta_ps,
};
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
struct mt76_dev *mdev;
mdev = mt76_alloc_device(sizeof(*dev), &mt76x2_ops);
if (!mdev)
return NULL;
- dev = container_of(mdev, struct mt76x2_dev, mt76);
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev->dev = pdev;
mdev->drv = &drv_ops;
- mutex_init(&dev->mutex);
- spin_lock_init(&dev->irq_lock);
return dev;
}
@@ -445,7 +379,7 @@ static void mt76x2_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
mt76x2_dfs_set_domain(dev, request->dfs_region);
}
@@ -481,8 +415,8 @@ static const struct ieee80211_iface_combination if_comb[] = {
static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
u8 delay_off)
{
- struct mt76x2_dev *dev = container_of(mt76, struct mt76x2_dev,
- mt76);
+ struct mt76x02_dev *dev = container_of(mt76, struct mt76x02_dev,
+ mt76);
u32 val;
val = MT_LED_STATUS_DURATION(0xff) |
@@ -526,20 +460,12 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
mt76x2_led_set_config(mt76, 0xff, 0);
}
-int mt76x2_register_device(struct mt76x2_dev *dev)
+int mt76x2_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
- void *status_fifo;
- int fifo_size;
int i, ret;
- fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x2_tx_status));
- status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
- if (!status_fifo)
- return -ENOMEM;
-
- kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
@@ -584,8 +510,8 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
- ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
- ARRAY_SIZE(mt76x2_rates));
+ ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
+ ARRAY_SIZE(mt76x02_rates));
if (ret)
goto fail;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
index 23cf437d14f9..4b331ed14bb2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
@@ -16,11 +16,10 @@
#include <linux/delay.h>
#include "mt76x2.h"
-#include "mt76x2_mcu.h"
-#include "mt76x2_eeprom.h"
-#include "mt76x2_trace.h"
+#include "mcu.h"
+#include "eeprom.h"
-void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
+void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
{
idx &= 7;
mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
@@ -28,84 +27,16 @@ void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
get_unaligned_le16(addr + 4));
}
-void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
-{
- struct mt76x2_tx_status stat = {};
- unsigned long flags;
- u8 update = 1;
- bool ret;
-
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
- return;
-
- trace_mac_txstat_poll(dev);
-
- while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
- spin_lock_irqsave(&dev->irq_lock, flags);
- ret = mt76x2_mac_load_tx_status(dev, &stat);
- spin_unlock_irqrestore(&dev->irq_lock, flags);
-
- if (!ret)
- break;
-
- trace_mac_txstat_fetch(dev, &stat);
-
- if (!irq) {
- mt76x2_send_tx_status(dev, &stat, &update);
- continue;
- }
-
- kfifo_put(&dev->txstatus_fifo, stat);
- }
-}
-
-static void
-mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
- void *txwi_ptr)
-{
- struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
- struct mt76x2_txwi *txwi = txwi_ptr;
-
- mt76x2_mac_poll_tx_status(dev, false);
-
- txi->tries = 0;
- txi->jiffies = jiffies;
- txi->wcid = txwi->wcid;
- txi->pktid = txwi->pktid;
- trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
- mt76x2_tx_complete(dev, skb);
-}
-
-void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
-{
- struct mt76x2_tx_status stat;
- u8 update = 1;
-
- while (kfifo_get(&dev->txstatus_fifo, &stat))
- mt76x2_send_tx_status(dev, &stat, &update);
-}
-
-void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-
- if (e->txwi)
- mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
- else
- dev_kfree_skb_any(e->skb);
-}
-
static int
-mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
+mt76_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
{
- int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
- struct mt76x2_txwi txwi;
+ int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
+ struct mt76x02_txwi txwi;
- if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
+ if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
return -ENOSPC;
- mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
+ mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
offset += sizeof(txwi);
@@ -115,10 +46,10 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
}
static int
-__mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
+__mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx, struct sk_buff *skb)
{
- int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
- int beacon_addr = dev->beacon_offsets[bcn_idx];
+ int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
+ int beacon_addr = mt76x02_beacon_offsets[bcn_idx];
int ret = 0;
int i;
@@ -128,8 +59,7 @@ __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
if (skb) {
ret = mt76_write_beacon(dev, beacon_addr, skb);
if (!ret)
- dev->beacon_data_mask |= BIT(bcn_idx) &
- dev->beacon_mask;
+ dev->beacon_data_mask |= BIT(bcn_idx);
} else {
dev->beacon_data_mask &= ~BIT(bcn_idx);
for (i = 0; i < beacon_len; i += 4)
@@ -141,7 +71,7 @@ __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
return ret;
}
-int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
+int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
struct sk_buff *skb)
{
bool force_update = false;
@@ -176,7 +106,8 @@ int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
return 0;
}
-void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
+void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev,
+ u8 vif_idx, bool val)
{
u8 old_mask = dev->beacon_mask;
bool en;
@@ -201,14 +132,14 @@ void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
if (en)
- mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+ mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
else
- mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+ mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
}
void mt76x2_update_channel(struct mt76_dev *mdev)
{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76_channel_state *state;
u32 active, busy;
@@ -225,8 +156,8 @@ void mt76x2_update_channel(struct mt76_dev *mdev)
void mt76x2_mac_work(struct work_struct *work)
{
- struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
- mac_work.work);
+ struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+ mac_work.work);
int i, idx;
mt76x2_update_channel(&dev->mt76);
@@ -241,7 +172,7 @@ void mt76x2_mac_work(struct work_struct *work)
MT_CALIBRATE_INTERVAL);
}
-void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val)
+void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val)
{
u32 data = 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 680a89f8aa87..034a06295668 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -19,10 +19,10 @@
static int
mt76x2_start(struct ieee80211_hw *hw)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
int ret;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
ret = mt76x2_mac_start(dev);
if (ret)
@@ -38,57 +38,23 @@ mt76x2_start(struct ieee80211_hw *hw)
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
out:
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
static void
mt76x2_stop(struct ieee80211_hw *hw)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt76x2_stop_hardware(dev);
- mutex_unlock(&dev->mutex);
-}
-
-static int
-mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- unsigned int idx = 0;
-
- if (vif->addr[0] & BIT(1))
- idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
-
- /*
- * Client mode typically only has one configurable BSSID register,
- * which is used for bssidx=0. This is linked to the MAC address.
- * Since mac80211 allows changing interface types, and we cannot
- * force the use of the primary MAC address for a station mode
- * interface, we need some other way of configuring a per-interface
- * remote BSSID.
- * The hardware provides an AP-Client feature, where bssidx 0-7 are
- * used for AP mode and bssidx 8-15 for client mode.
- * We shift the station interface bss index by 8 to force the
- * hardware to recognize the BSSID.
- * The resulting bssidx mismatch for unicast frames is ignored by hw.
- */
- if (vif->type == NL80211_IFTYPE_STATION)
- idx += 8;
-
- mvif->idx = idx;
- mvif->group_wcid.idx = MT_VIF_WCID(idx);
- mvif->group_wcid.hw_key_idx = -1;
- mt76x2_txq_init(dev, vif->txq);
-
- return 0;
+ mutex_unlock(&dev->mt76.mutex);
}
static int
-mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
+mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
{
int ret;
@@ -124,29 +90,29 @@ mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
static int
mt76x2_config(struct ieee80211_hw *hw, u32 changed)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
int ret = 0;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
- dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
else
- dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+ dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->txpower_conf = hw->conf.power_level * 2;
+ dev->mt76.txpower_conf = hw->conf.power_level * 2;
/* convert to per-chain power for 2x2 devices */
- dev->txpower_conf -= 6;
+ dev->mt76.txpower_conf -= 6;
if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
mt76x2_phy_set_txpower(dev);
- mt76x2_tx_set_txpwr_auto(dev, dev->txpower_conf);
+ mt76x02_tx_set_txpwr_auto(dev, dev->mt76.txpower_conf);
}
}
@@ -156,7 +122,7 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
ieee80211_wake_queues(hw);
}
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
@@ -165,10 +131,10 @@ static void
mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_BSSID)
mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
@@ -195,25 +161,25 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76x2_set_tx_ackto(dev);
}
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
}
void
mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
{
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+ struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
int idx = msta->wcid.idx;
mt76_stop_tx_queues(&dev->mt76, sta, true);
- mt76x2_mac_wcid_set_drop(dev, idx, ps);
+ mt76x02_mac_wcid_set_drop(dev, idx, ps);
}
static void
mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
tasklet_disable(&dev->pre_tbtt_tasklet);
set_bit(MT76_SCANNING, &dev->mt76.state);
@@ -222,7 +188,7 @@ mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static void
mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
tasklet_enable(&dev->pre_tbtt_tasklet);
@@ -237,9 +203,9 @@ mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static int
mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
- *dbm = dev->txpower_cur / 2;
+ *dbm = dev->mt76.txpower_cur / 2;
/* convert from per-chain power to combined output on 2x2 devices */
*dbm += 3;
@@ -250,12 +216,12 @@ mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
s16 coverage_class)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
dev->coverage_class = coverage_class;
mt76x2_set_tx_ackto(dev);
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
}
static int
@@ -267,20 +233,20 @@ mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
u32 rx_ant)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
if (!tx_ant || tx_ant > 3 || tx_ant != rx_ant)
return -EINVAL;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
- dev->chainmask = (tx_ant == 3) ? 0x202 : 0x101;
+ dev->mt76.chainmask = (tx_ant == 3) ? 0x202 : 0x101;
dev->mt76.antenna_mask = tx_ant;
mt76_set_stream_caps(&dev->mt76, true);
mt76x2_phy_set_antenna(dev);
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
return 0;
}
@@ -288,12 +254,12 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
u32 *rx_ant)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
*tx_ant = dev->mt76.antenna_mask;
*rx_ant = dev->mt76.antenna_mask;
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
return 0;
}
@@ -301,7 +267,7 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
static int
mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
if (val != ~0 && val > 0xffff)
return -EINVAL;
@@ -314,25 +280,25 @@ mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
}
const struct ieee80211_ops mt76x2_ops = {
- .tx = mt76x2_tx,
+ .tx = mt76x02_tx,
.start = mt76x2_start,
.stop = mt76x2_stop,
- .add_interface = mt76x2_add_interface,
- .remove_interface = mt76x2_remove_interface,
+ .add_interface = mt76x02_add_interface,
+ .remove_interface = mt76x02_remove_interface,
.config = mt76x2_config,
- .configure_filter = mt76x2_configure_filter,
+ .configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x2_bss_info_changed,
- .sta_add = mt76x2_sta_add,
- .sta_remove = mt76x2_sta_remove,
- .set_key = mt76x2_set_key,
- .conf_tx = mt76x2_conf_tx,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
+ .set_key = mt76x02_set_key,
+ .conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76x2_sw_scan,
.sw_scan_complete = mt76x2_sw_scan_complete,
.flush = mt76x2_flush,
- .ampdu_action = mt76x2_ampdu_action,
+ .ampdu_action = mt76x02_ampdu_action,
.get_txpower = mt76x2_get_txpower,
.wake_tx_queue = mt76_wake_tx_queue,
- .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
+ .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.release_buffered_frames = mt76_release_buffered_frames,
.set_coverage_class = mt76x2_set_coverage_class,
.get_survey = mt76_get_survey,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
new file mode 100644
index 000000000000..d8fa9ba56437
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mcu.h"
+#include "eeprom.h"
+
+static int
+mt76pci_load_rom_patch(struct mt76x02_dev *dev)
+{
+ const struct firmware *fw = NULL;
+ struct mt76x02_patch_header *hdr;
+ bool rom_protect = !is_mt7612(dev);
+ int len, ret = 0;
+ __le32 *cur;
+ u32 patch_mask, patch_reg;
+
+ if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+ dev_err(dev->mt76.dev,
+ "Could not get hardware semaphore for ROM PATCH\n");
+ return -ETIMEDOUT;
+ }
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+ patch_mask = BIT(0);
+ patch_reg = MT_MCU_CLOCK_CTL;
+ } else {
+ patch_mask = BIT(1);
+ patch_reg = MT_MCU_COM_REG0;
+ }
+
+ if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+ dev_info(dev->mt76.dev, "ROM patch already applied\n");
+ goto out;
+ }
+
+ ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
+ if (ret)
+ goto out;
+
+ if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+ ret = -EIO;
+ dev_err(dev->mt76.dev, "Failed to load firmware\n");
+ goto out;
+ }
+
+ hdr = (struct mt76x02_patch_header *)fw->data;
+ dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
+
+ cur = (__le32 *) (fw->data + sizeof(*hdr));
+ len = fw->size - sizeof(*hdr);
+ mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+ /* Trigger ROM */
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
+
+ if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
+ dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ /* release semaphore */
+ if (rom_protect)
+ mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+ release_firmware(fw);
+ return ret;
+}
+
+static int
+mt76pci_load_firmware(struct mt76x02_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76x02_fw_header *hdr;
+ int len, ret;
+ __le32 *cur;
+ u32 offset, val;
+
+ ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto error;
+
+ hdr = (const struct mt76x02_fw_header *)fw->data;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto error;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+ val = le16_to_cpu(hdr->build_ver);
+ dev_info(dev->mt76.dev, "Build: %x\n", val);
+ dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+ cur = (__le32 *) (fw->data + sizeof(*hdr));
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
+ mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
+
+ cur += len / sizeof(*cur);
+ len = le32_to_cpu(hdr->dlm_len);
+
+ if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+ offset = MT_MCU_DLM_ADDR_E3;
+ else
+ offset = MT_MCU_DLM_ADDR;
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+ mt76_wr_copy(dev, offset, cur, len);
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
+ mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
+
+ /* trigger firmware */
+ mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) {
+ dev_err(dev->mt76.dev, "Firmware failed to start\n");
+ release_firmware(fw);
+ return -ETIMEDOUT;
+ }
+
+ mt76x02_set_ethtool_fwver(dev, hdr);
+ dev_info(dev->mt76.dev, "Firmware running!\n");
+
+ release_firmware(fw);
+
+ return ret;
+
+error:
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+int mt76x2_mcu_init(struct mt76x02_dev *dev)
+{
+ static const struct mt76_mcu_ops mt76x2_mcu_ops = {
+ .mcu_msg_alloc = mt76x02_mcu_msg_alloc,
+ .mcu_send_msg = mt76x02_mcu_msg_send,
+ };
+ int ret;
+
+ dev->mt76.mcu_ops = &mt76x2_mcu_ops;
+
+ ret = mt76pci_load_rom_patch(dev);
+ if (ret)
+ return ret;
+
+ ret = mt76pci_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ mt76x02_mcu_function_select(dev, Q_SELECT, 1, true);
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
index 84c96c0415b6..5bda44540225 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -16,11 +16,12 @@
#include <linux/delay.h>
#include "mt76x2.h"
-#include "mt76x2_mcu.h"
-#include "mt76x2_eeprom.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
static bool
-mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
+mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
u32 flag = 0;
@@ -34,16 +35,16 @@ mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
if (chan->band == NL80211_BAND_5GHZ)
flag |= BIT(0);
- if (mt76x2_ext_pa_enabled(dev, chan->band))
+ if (mt76x02_ext_pa_enabled(dev, chan->band))
flag |= BIT(8);
- mt76x2_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, true);
dev->cal.tssi_cal_done = true;
return true;
}
static void
-mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
+mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
@@ -61,13 +62,13 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
mt76x2_mac_stop(dev, false);
if (is_5ghz)
- mt76x2_mcu_calibrate(dev, MCU_CAL_LC, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, true);
- mt76x2_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
- mt76x2_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
- mt76x2_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
- mt76x2_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
- mt76x2_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0, true);
if (!mac_stopped)
mt76x2_mac_resume(dev);
@@ -77,7 +78,7 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
dev->cal.channel_cal_done = true;
}
-void mt76x2_phy_set_antenna(struct mt76x2_dev *dev)
+void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
{
u32 val;
@@ -124,40 +125,7 @@ void mt76x2_phy_set_antenna(struct mt76x2_dev *dev)
}
static void
-mt76x2_get_agc_gain(struct mt76x2_dev *dev, u8 *dest)
-{
- dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN);
- dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN);
-}
-
-static int
-mt76x2_get_rssi_gain_thresh(struct mt76x2_dev *dev)
-{
- switch (dev->mt76.chandef.width) {
- case NL80211_CHAN_WIDTH_80:
- return -62;
- case NL80211_CHAN_WIDTH_40:
- return -65;
- default:
- return -68;
- }
-}
-
-static int
-mt76x2_get_low_rssi_gain_thresh(struct mt76x2_dev *dev)
-{
- switch (dev->mt76.chandef.width) {
- case NL80211_CHAN_WIDTH_80:
- return -76;
- case NL80211_CHAN_WIDTH_40:
- return -79;
- default:
- return -82;
- }
-}
-
-static void
-mt76x2_phy_set_gain_val(struct mt76x2_dev *dev)
+mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
{
u32 val;
u8 gain_val[2];
@@ -182,26 +150,7 @@ mt76x2_phy_set_gain_val(struct mt76x2_dev *dev)
}
static void
-mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
-{
- u32 false_cca;
- u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
-
- false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
- dev->cal.false_cca = false_cca;
- if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
- dev->cal.agc_gain_adjust += 2;
- else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
- (dev->cal.agc_gain_adjust >= limit && false_cca < 500))
- dev->cal.agc_gain_adjust -= 2;
- else
- return;
-
- mt76x2_phy_set_gain_val(dev);
-}
-
-static void
-mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
+mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
{
u8 *gain = dev->cal.agc_gain_init;
u8 low_gain_delta, gain_delta;
@@ -209,16 +158,17 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
int low_gain;
u32 val;
- dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
+ dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
- low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
- (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
+ low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
+ (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
dev->cal.low_gain = low_gain;
if (!gain_change) {
- mt76x2_phy_adjust_vga_gain(dev);
+ if (mt76x02_phy_adjust_vga_gain(dev))
+ mt76x2_phy_set_gain_val(dev);
return;
}
@@ -264,7 +214,7 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
mt76_rr(dev, MT_RX_STAT_1);
}
-int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
+int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_channel *chan = chandef->chan;
@@ -336,8 +286,8 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
mt76x2_configure_tx_delay(dev, band, bw);
mt76x2_phy_set_txpower(dev);
- mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
- mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+ mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
mt76_rmw(dev, MT_EXT_CCA_CFG,
(MT_EXT_CCA_CFG_CCA0 |
@@ -360,17 +310,17 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
if (!dev->cal.init_cal_done) {
- u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+ u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
if (val != 0xff)
- mt76x2_mcu_calibrate(dev, MCU_CAL_R, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, true);
}
- mt76x2_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, true);
/* Rx LPF calibration */
if (!dev->cal.init_cal_done)
- mt76x2_mcu_calibrate(dev, MCU_CAL_RC, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, true);
dev->cal.init_cal_done = true;
@@ -383,11 +333,8 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
if (scan)
return 0;
- dev->cal.low_gain = -1;
mt76x2_phy_channel_calibrate(dev, true);
- mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init);
- memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
- sizeof(dev->cal.agc_gain_cur));
+ mt76x02_init_agc_gain(dev);
/* init default values for temp compensation */
if (mt76x2_tssi_enabled(dev)) {
@@ -404,48 +351,7 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
}
static void
-mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev)
-{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
- struct mt76x2_tx_power_info txp;
- struct mt76x2_tssi_comp t = {};
-
- if (!dev->cal.tssi_cal_done)
- return;
-
- if (!dev->cal.tssi_comp_pending) {
- /* TSSI trigger */
- t.cal_mode = BIT(0);
- mt76x2_mcu_tssi_comp(dev, &t);
- dev->cal.tssi_comp_pending = true;
- } else {
- if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
- return;
-
- dev->cal.tssi_comp_pending = false;
- mt76x2_get_power_info(dev, &txp, chan);
-
- if (mt76x2_ext_pa_enabled(dev, chan->band))
- t.pa_mode = 1;
-
- t.cal_mode = BIT(1);
- t.slope0 = txp.chain[0].tssi_slope;
- t.offset0 = txp.chain[0].tssi_offset;
- t.slope1 = txp.chain[1].tssi_slope;
- t.offset1 = txp.chain[1].tssi_offset;
- mt76x2_mcu_tssi_comp(dev, &t);
-
- if (t.pa_mode || dev->cal.dpd_cal_done)
- return;
-
- usleep_range(10000, 20000);
- mt76x2_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
- dev->cal.dpd_cal_done = true;
- }
-}
-
-static void
-mt76x2_phy_temp_compensate(struct mt76x2_dev *dev)
+mt76x2_phy_temp_compensate(struct mt76x02_dev *dev)
{
struct mt76x2_temp_comp t;
int temp, db_diff;
@@ -474,22 +380,22 @@ mt76x2_phy_temp_compensate(struct mt76x2_dev *dev)
void mt76x2_phy_calibrate(struct work_struct *work)
{
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
- dev = container_of(work, struct mt76x2_dev, cal_work.work);
+ dev = container_of(work, struct mt76x02_dev, cal_work.work);
mt76x2_phy_channel_calibrate(dev, false);
- mt76x2_phy_tssi_compensate(dev);
+ mt76x2_phy_tssi_compensate(dev, true);
mt76x2_phy_temp_compensate(dev);
mt76x2_phy_update_channel_gain(dev);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
MT_CALIBRATE_INTERVAL);
}
-int mt76x2_phy_start(struct mt76x2_dev *dev)
+int mt76x2_phy_start(struct mt76x02_dev *dev)
{
int ret;
- ret = mt76x2_mcu_set_radio_state(dev, true);
+ ret = mt76x02_mcu_set_radio_state(dev, true, true);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c
index 4c907882e8b0..3a2ec86d3e88 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c
@@ -15,50 +15,18 @@
*/
#include "mt76x2.h"
-#include "mt76x2_dma.h"
struct beacon_bc_data {
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
struct sk_buff_head q;
struct sk_buff *tail[8];
};
-int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int qsel = MT_QSEL_EDCA;
- int ret;
-
- if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
- mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
-
- mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
-
- ret = mt76x2_insert_hdr_pad(skb);
- if (ret < 0)
- return ret;
-
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- qsel = MT_QSEL_MGMT;
-
- *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
- MT_TXD_INFO_80211;
-
- if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
- *tx_info |= MT_TXD_INFO_WIV;
-
- return 0;
-}
-
static void
mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- struct mt76x2_dev *dev = (struct mt76x2_dev *) priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct mt76x02_dev *dev = (struct mt76x02_dev *) priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
struct sk_buff *skb = NULL;
if (!(dev->beacon_mask & BIT(mvif->idx)))
@@ -75,8 +43,8 @@ static void
mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
struct beacon_bc_data *data = priv;
- struct mt76x2_dev *dev = data->dev;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct mt76x02_dev *dev = data->dev;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
@@ -96,7 +64,7 @@ mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
}
static void
-mt76x2_resync_beacon_timer(struct mt76x2_dev *dev)
+mt76x2_resync_beacon_timer(struct mt76x02_dev *dev)
{
u32 timer_val = dev->beacon_int << 4;
@@ -128,7 +96,7 @@ mt76x2_resync_beacon_timer(struct mt76x2_dev *dev)
void mt76x2_pre_tbtt_tasklet(unsigned long arg)
{
- struct mt76x2_dev *dev = (struct mt76x2_dev *) arg;
+ struct mt76x02_dev *dev = (struct mt76x02_dev *) arg;
struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
struct beacon_bc_data data = {};
struct sk_buff *skb;
@@ -164,7 +132,7 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg)
while ((skb = __skb_dequeue(&data.q)) != NULL) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
NULL);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index 9fd6ab4cbb94..e9fff5b7f125 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -16,10 +16,12 @@
*/
#include "mt76x2.h"
-#include "mt76x2_eeprom.h"
+#include "eeprom.h"
+#include "mcu.h"
+#include "../mt76x02_phy.h"
static void
-mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+mt76x2_adjust_high_lna_gain(struct mt76x02_dev *dev, int reg, s8 offset)
{
s8 gain;
@@ -29,7 +31,7 @@ mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
}
static void
-mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+mt76x2_adjust_agc_gain(struct mt76x02_dev *dev, int reg, s8 offset)
{
s8 gain;
@@ -38,7 +40,7 @@ mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
}
-void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
+void mt76x2_apply_gain_adj(struct mt76x02_dev *dev)
{
s8 *gain_adj = dev->cal.rx.high_gain;
@@ -50,7 +52,7 @@ void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
-void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
+void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
enum nl80211_band band)
{
u32 pa_mode[2];
@@ -63,7 +65,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
- if (mt76x2_ext_pa_enabled(dev, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
} else {
@@ -74,7 +76,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
pa_mode[0] = 0x0000ffff;
pa_mode[1] = 0x00ff00ff;
- if (mt76x2_ext_pa_enabled(dev, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
} else {
@@ -82,7 +84,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
}
- if (mt76x2_ext_pa_enabled(dev, band))
+ if (mt76x02_ext_pa_enabled(dev, band))
pa_mode_adj = 0x04000000;
else
pa_mode_adj = 0;
@@ -96,7 +98,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
- if (mt76x2_ext_pa_enabled(dev, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
u32 val;
if (band == NL80211_BAND_2GHZ)
@@ -123,37 +125,6 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
-static void
-mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
-{
- int i;
-
- for (i = 0; i < sizeof(r->all); i++)
- if (r->all[i] > limit)
- r->all[i] = limit;
-}
-
-static u32
-mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
-{
- u32 val = 0;
-
- val |= (v1 & (BIT(6) - 1)) << 0;
- val |= (v2 & (BIT(6) - 1)) << 8;
- val |= (v3 & (BIT(6) - 1)) << 16;
- val |= (v4 & (BIT(6) - 1)) << 24;
- return val;
-}
-
-static void
-mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
-{
- int i;
-
- for (i = 0; i < sizeof(r->all); i++)
- r->all[i] += offset;
-}
-
static int
mt76x2_get_min_rate_power(struct mt76_rate_power *r)
{
@@ -173,7 +144,7 @@ mt76x2_get_min_rate_power(struct mt76_rate_power *r)
return ret;
}
-void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
{
enum nl80211_chan_width width = dev->mt76.chandef.width;
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
@@ -190,9 +161,9 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
delta = txp.delta_bw80;
mt76x2_get_rate_power(dev, &t, chan);
- mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
- mt76x2_limit_rate_power(&t, dev->txpower_conf);
- dev->txpower_cur = mt76x2_get_max_rate_power(&t);
+ mt76x02_add_rate_power_offset(&t, txp.chain[0].target_power);
+ mt76x02_limit_rate_power(&t, dev->mt76.txpower_conf);
+ dev->mt76.txpower_cur = mt76x02_get_max_rate_power(&t);
base_power = mt76x2_get_min_rate_power(&t);
delta += base_power - txp.chain[0].target_power;
@@ -210,40 +181,22 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
txp_1 = 0x2f;
}
- mt76x2_add_rate_power_offset(&t, -base_power);
+ mt76x02_add_rate_power_offset(&t, -base_power);
dev->target_power = txp.chain[0].target_power;
dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
- dev->rate_power = t;
-
- mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
- mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
-
- mt76_wr(dev, MT_TX_PWR_CFG_0,
- mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_1,
- mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_2,
- mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
- mt76_wr(dev, MT_TX_PWR_CFG_3,
- mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
- mt76_wr(dev, MT_TX_PWR_CFG_4,
- mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
- mt76_wr(dev, MT_TX_PWR_CFG_7,
- mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
- mt76_wr(dev, MT_TX_PWR_CFG_8,
- mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
- mt76_wr(dev, MT_TX_PWR_CFG_9,
- mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
+ dev->mt76.rate_power = t;
+
+ mt76x02_phy_set_txpower(dev, txp_0, txp_1);
}
EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
-void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
+void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
enum nl80211_band band, u8 bw)
{
u32 cfg0, cfg1;
- if (mt76x2_ext_pa_enabled(dev, band)) {
+ if (mt76x02_ext_pa_enabled(dev, band)) {
cfg0 = bw ? 0x000b0c01 : 0x00101101;
cfg1 = 0x00011414;
} else {
@@ -257,93 +210,43 @@ void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
-void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
+void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
{
- int core_val, agc_val;
-
- switch (width) {
- case NL80211_CHAN_WIDTH_80:
- core_val = 3;
- agc_val = 7;
- break;
- case NL80211_CHAN_WIDTH_40:
- core_val = 2;
- agc_val = 3;
- break;
- default:
- core_val = 0;
- agc_val = 1;
- break;
- }
-
- mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
- mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
- mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
-}
-EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
-
-void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
-{
- switch (band) {
- case NL80211_BAND_2GHZ:
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- case NL80211_BAND_5GHZ:
- mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
- mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
- break;
- }
-
- mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
- primary_upper);
-}
-EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
-
-int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
-{
- struct mt76x2_sta *sta;
- struct mt76_wcid *wcid;
- int i, j, min_rssi = 0;
- s8 cur_rssi;
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct mt76x2_tx_power_info txp;
+ struct mt76x2_tssi_comp t = {};
- local_bh_disable();
- rcu_read_lock();
+ if (!dev->cal.tssi_cal_done)
+ return;
- for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
- unsigned long mask = dev->wcid_mask[i];
+ if (!dev->cal.tssi_comp_pending) {
+ /* TSSI trigger */
+ t.cal_mode = BIT(0);
+ mt76x2_mcu_tssi_comp(dev, &t);
+ dev->cal.tssi_comp_pending = true;
+ } else {
+ if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+ return;
- if (!mask)
- continue;
+ dev->cal.tssi_comp_pending = false;
+ mt76x2_get_power_info(dev, &txp, chan);
- for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
- if (!(mask & 1))
- continue;
+ if (mt76x02_ext_pa_enabled(dev, chan->band))
+ t.pa_mode = 1;
- wcid = rcu_dereference(dev->wcid[j]);
- if (!wcid)
- continue;
+ t.cal_mode = BIT(1);
+ t.slope0 = txp.chain[0].tssi_slope;
+ t.offset0 = txp.chain[0].tssi_offset;
+ t.slope1 = txp.chain[1].tssi_slope;
+ t.offset1 = txp.chain[1].tssi_offset;
+ mt76x2_mcu_tssi_comp(dev, &t);
- sta = container_of(wcid, struct mt76x2_sta, wcid);
- spin_lock(&dev->mt76.rx_lock);
- if (sta->inactive_count++ < 5)
- cur_rssi = ewma_signal_read(&sta->rssi);
- else
- cur_rssi = 0;
- spin_unlock(&dev->mt76.rx_lock);
+ if (t.pa_mode || dev->cal.dpd_cal_done)
+ return;
- if (cur_rssi < min_rssi)
- min_rssi = cur_rssi;
- }
+ usleep_range(10000, 20000);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value, wait);
+ dev->cal.dpd_cal_done = true;
}
-
- rcu_read_unlock();
- local_bh_enable();
-
- if (!min_rssi)
- return -75;
-
- return min_rssi;
}
-EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi);
+EXPORT_SYMBOL_GPL(mt76x2_phy_tssi_compensate);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index 1428cfdee579..57baf8d1c830 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -17,9 +17,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include "../mt76x02_usb.h"
#include "mt76x2u.h"
static const struct usb_device_id mt76x2u_device_table[] = {
+ { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */
{ USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
{ USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
{ USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
@@ -35,7 +37,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
int err;
dev = mt76x2u_alloc_device(&intf->dev);
@@ -45,6 +47,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
udev = usb_get_dev(udev);
usb_reset_device(udev);
+ mt76x02u_init_mcu(&dev->mt76);
err = mt76u_init(&dev->mt76, intf);
if (err < 0)
goto err;
@@ -69,7 +72,7 @@ err:
static void mt76x2u_disconnect(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
- struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(intf);
struct ieee80211_hw *hw = mt76_hw(dev);
set_bit(MT76_REMOVED, &dev->mt76.state);
@@ -84,7 +87,7 @@ static void mt76x2u_disconnect(struct usb_interface *intf)
static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
pm_message_t state)
{
- struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(intf);
struct mt76_usb *usb = &dev->mt76.usb;
mt76u_stop_queues(&dev->mt76);
@@ -96,7 +99,7 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
{
- struct mt76x2_dev *dev = usb_get_intfdata(intf);
+ struct mt76x02_dev *dev = usb_get_intfdata(intf);
struct mt76_usb *usb = &dev->mt76.usb;
int err;
@@ -107,16 +110,24 @@ static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
mt76u_mcu_complete_urb,
&usb->mcu.cmpl);
if (err < 0)
- return err;
+ goto err;
err = mt76u_submit_rx_buffers(&dev->mt76);
if (err < 0)
- return err;
+ goto err;
tasklet_enable(&usb->rx_tasklet);
tasklet_enable(&usb->tx_tasklet);
- return mt76x2u_init_hardware(dev);
+ err = mt76x2u_init_hardware(dev);
+ if (err < 0)
+ goto err;
+
+ return 0;
+
+err:
+ mt76x2u_cleanup(dev);
+ return err;
}
MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index 9b81e7641c06..13cce2937573 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -17,9 +17,11 @@
#include <linux/delay.h>
#include "mt76x2u.h"
-#include "mt76x2_eeprom.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
+#include "../mt76x02_usb.h"
-static void mt76x2u_init_dma(struct mt76x2_dev *dev)
+static void mt76x2u_init_dma(struct mt76x02_dev *dev)
{
u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
@@ -34,7 +36,7 @@ static void mt76x2u_init_dma(struct mt76x2_dev *dev)
mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
}
-static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev)
+static void mt76x2u_power_on_rf_patch(struct mt76x02_dev *dev)
{
mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
udelay(1);
@@ -54,7 +56,7 @@ static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev)
mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
}
-static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit)
+static void mt76x2u_power_on_rf(struct mt76x02_dev *dev, int unit)
{
int shift = unit ? 8 : 0;
u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
@@ -76,7 +78,7 @@ static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit)
mt76_set(dev, 0x530, 0xf);
}
-static void mt76x2u_power_on(struct mt76x2_dev *dev)
+static void mt76x2u_power_on(struct mt76x02_dev *dev)
{
u32 val;
@@ -112,7 +114,7 @@ static void mt76x2u_power_on(struct mt76x2_dev *dev)
mt76x2u_power_on_rf(dev, 1);
}
-static int mt76x2u_init_eeprom(struct mt76x2_dev *dev)
+static int mt76x2u_init_eeprom(struct mt76x02_dev *dev)
{
u32 val, i;
@@ -128,35 +130,33 @@ static int mt76x2u_init_eeprom(struct mt76x2_dev *dev)
put_unaligned_le32(val, dev->mt76.eeprom.data + i);
}
- mt76x2_eeprom_parse_hw_cap(dev);
+ mt76x02_eeprom_parse_hw_cap(dev);
return 0;
}
-struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev)
+struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev)
{
static const struct mt76_driver_ops drv_ops = {
- .tx_prepare_skb = mt76x2u_tx_prepare_skb,
- .tx_complete_skb = mt76x2u_tx_complete_skb,
- .tx_status_data = mt76x2u_tx_status_data,
- .rx_skb = mt76x2_queue_rx_skb,
+ .tx_prepare_skb = mt76x02u_tx_prepare_skb,
+ .tx_complete_skb = mt76x02u_tx_complete_skb,
+ .tx_status_data = mt76x02_tx_status_data,
+ .rx_skb = mt76x02_queue_rx_skb,
};
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
struct mt76_dev *mdev;
mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
if (!mdev)
return NULL;
- dev = container_of(mdev, struct mt76x2_dev, mt76);
+ dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev->dev = pdev;
mdev->drv = &drv_ops;
- mutex_init(&dev->mutex);
-
return dev;
}
-static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev)
+static void mt76x2u_init_beacon_offsets(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
@@ -164,27 +164,18 @@ static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev)
mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
}
-int mt76x2u_init_hardware(struct mt76x2_dev *dev)
+int mt76x2u_init_hardware(struct mt76x02_dev *dev)
{
- static const u16 beacon_offsets[] = {
- /* 512 byte per beacon */
- 0xc000, 0xc200, 0xc400, 0xc600,
- 0xc800, 0xca00, 0xcc00, 0xce00,
- 0xd000, 0xd200, 0xd400, 0xd600,
- 0xd800, 0xda00, 0xdc00, 0xde00
- };
const struct mt76_wcid_addr addr = {
.macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
.ba_mask = 0,
};
int i, err;
- dev->beacon_offsets = beacon_offsets;
-
mt76x2_reset_wlan(dev, true);
mt76x2u_power_on(dev);
- if (!mt76x2_wait_for_mac(dev))
+ if (!mt76x02_wait_for_mac(&dev->mt76))
return -ETIMEDOUT;
err = mt76x2u_mcu_fw_init(dev);
@@ -197,7 +188,7 @@ int mt76x2u_init_hardware(struct mt76x2_dev *dev)
return -EIO;
/* wait for asic ready after fw load. */
- if (!mt76x2_wait_for_mac(dev))
+ if (!mt76x02_wait_for_mac(&dev->mt76))
return -ETIMEDOUT;
mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
@@ -213,12 +204,12 @@ int mt76x2u_init_hardware(struct mt76x2_dev *dev)
if (err < 0)
return err;
- mt76x2u_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
- dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+ mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
+ dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
mt76x2u_init_beacon_offsets(dev);
- if (!mt76x2_wait_for_bbp(dev))
+ if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
return -ETIMEDOUT;
/* reset wcid table */
@@ -241,17 +232,17 @@ int mt76x2u_init_hardware(struct mt76x2_dev *dev)
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
- err = mt76x2u_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+ err = mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
if (err < 0)
return err;
- mt76x2u_phy_set_rxpath(dev);
- mt76x2u_phy_set_txdac(dev);
+ mt76x02_phy_set_rxpath(dev);
+ mt76x02_phy_set_txdac(dev);
return mt76x2u_mac_stop(dev);
}
-int mt76x2u_register_device(struct mt76x2_dev *dev)
+int mt76x2u_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
@@ -264,11 +255,11 @@ int mt76x2u_register_device(struct mt76x2_dev *dev)
if (err < 0)
return err;
- err = mt76u_mcu_init_rx(&dev->mt76);
+ err = mt76u_alloc_queues(&dev->mt76);
if (err < 0)
- return err;
+ goto fail;
- err = mt76u_alloc_queues(&dev->mt76);
+ err = mt76u_mcu_init_rx(&dev->mt76);
if (err < 0)
goto fail;
@@ -278,8 +269,8 @@ int mt76x2u_register_device(struct mt76x2_dev *dev)
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
- err = mt76_register_device(&dev->mt76, true, mt76x2_rates,
- ARRAY_SIZE(mt76x2_rates));
+ err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
+ ARRAY_SIZE(mt76x02_rates));
if (err)
goto fail;
@@ -302,17 +293,17 @@ fail:
return err;
}
-void mt76x2u_stop_hw(struct mt76x2_dev *dev)
+void mt76x2u_stop_hw(struct mt76x02_dev *dev)
{
mt76u_stop_stat_wk(&dev->mt76);
cancel_delayed_work_sync(&dev->cal_work);
mt76x2u_mac_stop(dev);
}
-void mt76x2u_cleanup(struct mt76x2_dev *dev)
+void mt76x2u_cleanup(struct mt76x02_dev *dev)
{
- mt76x2u_mcu_set_radio_state(dev, false);
+ mt76x02_mcu_set_radio_state(dev, false, false);
mt76x2u_stop_hw(dev);
mt76u_queues_deinit(&dev->mt76);
- mt76x2u_mcu_deinit(dev);
+ mt76u_mcu_deinit(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index eab7ab297aa6..db2194a92e67 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -15,9 +15,9 @@
*/
#include "mt76x2u.h"
-#include "mt76x2_eeprom.h"
+#include "eeprom.h"
-static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev)
+static void mt76x2u_mac_reset_counters(struct mt76x02_dev *dev)
{
mt76_rr(dev, MT_RX_STAT_0);
mt76_rr(dev, MT_RX_STAT_1);
@@ -27,12 +27,12 @@ static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev)
mt76_rr(dev, MT_TX_STA_2);
}
-static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
+static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
{
s8 offset = 0;
u16 eep_val;
- eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
offset = eep_val & 0x7f;
if ((eep_val & 0xff) == 0xff)
@@ -42,7 +42,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
eep_val >>= 8;
if (eep_val == 0x00 || eep_val == 0xff) {
- eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
eep_val &= 0xff;
if (eep_val == 0x00 || eep_val == 0xff)
@@ -67,7 +67,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
/* init fce */
mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
- eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
case 0:
mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
@@ -80,7 +80,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
}
}
-int mt76x2u_mac_reset(struct mt76x2_dev *dev)
+int mt76x2u_mac_reset(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5));
@@ -114,15 +114,15 @@ int mt76x2u_mac_reset(struct mt76x2_dev *dev)
return 0;
}
-int mt76x2u_mac_start(struct mt76x2_dev *dev)
+int mt76x2u_mac_start(struct mt76x02_dev *dev)
{
mt76x2u_mac_reset_counters(dev);
mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
- wait_for_wpdma(dev);
+ mt76x02_wait_for_wpdma(&dev->mt76, 1000);
usleep_range(50, 100);
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX |
@@ -131,7 +131,7 @@ int mt76x2u_mac_start(struct mt76x2_dev *dev)
return 0;
}
-int mt76x2u_mac_stop(struct mt76x2_dev *dev)
+int mt76x2u_mac_stop(struct mt76x02_dev *dev)
{
int i, count = 0, val;
bool stopped = false;
@@ -212,7 +212,7 @@ int mt76x2u_mac_stop(struct mt76x2_dev *dev)
return 0;
}
-void mt76x2u_mac_resume(struct mt76x2_dev *dev)
+void mt76x2u_mac_resume(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX |
@@ -220,21 +220,3 @@ void mt76x2u_mac_resume(struct mt76x2_dev *dev)
mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20));
mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1));
}
-
-void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr)
-{
- ether_addr_copy(dev->mt76.macaddr, addr);
-
- if (!is_valid_ether_addr(dev->mt76.macaddr)) {
- eth_random_addr(dev->mt76.macaddr);
- dev_info(dev->mt76.dev,
- "Invalid MAC address, using random address %pM\n",
- dev->mt76.macaddr);
- }
-
- mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
- mt76_wr(dev, MT_MAC_ADDR_DW1,
- get_unaligned_le16(dev->mt76.macaddr + 4) |
- FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
-}
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 7367ba111119..1971a1b00038 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -18,10 +18,10 @@
static int mt76x2u_start(struct ieee80211_hw *hw)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
int ret;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
ret = mt76x2u_mac_start(dev);
if (ret)
@@ -30,40 +30,34 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
out:
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
static void mt76x2u_stop(struct ieee80211_hw *hw)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt76x2u_stop_hw(dev);
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
}
static int mt76x2u_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *)vif->drv_priv;
- unsigned int idx = 0;
+ struct mt76x02_dev *dev = hw->priv;
if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
- mt76x2u_mac_setaddr(dev, vif->addr);
-
- mvif->idx = idx;
- mvif->group_wcid.idx = MT_VIF_WCID(idx);
- mvif->group_wcid.hw_key_idx = -1;
- mt76x2_txq_init(dev, vif->txq);
+ mt76x02_mac_setaddr(dev, vif->addr);
+ mt76x02_vif_init(dev, vif, 0);
return 0;
}
static int
-mt76x2u_set_channel(struct mt76x2_dev *dev,
+mt76x2u_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
{
int err;
@@ -91,9 +85,9 @@ static void
mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_ASSOC) {
mt76x2u_phy_channel_calibrate(dev);
@@ -107,23 +101,23 @@ mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
get_unaligned_le16(info->bssid + 4));
}
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
}
static int
mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
int err = 0;
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mt76.mutex);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
- dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+ dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
else
- dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+ dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@@ -133,16 +127,16 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->txpower_conf = hw->conf.power_level * 2;
+ dev->mt76.txpower_conf = hw->conf.power_level * 2;
/* convert to per-chain power for 2x2 devices */
- dev->txpower_conf -= 6;
+ dev->mt76.txpower_conf -= 6;
if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
mt76x2_phy_set_txpower(dev);
}
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mt76.mutex);
return err;
}
@@ -151,7 +145,7 @@ static void
mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
set_bit(MT76_SCANNING, &dev->mt76.state);
}
@@ -159,27 +153,27 @@ mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static void
mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
}
const struct ieee80211_ops mt76x2u_ops = {
- .tx = mt76x2_tx,
+ .tx = mt76x02_tx,
.start = mt76x2u_start,
.stop = mt76x2u_stop,
.add_interface = mt76x2u_add_interface,
- .remove_interface = mt76x2_remove_interface,
- .sta_add = mt76x2_sta_add,
- .sta_remove = mt76x2_sta_remove,
- .set_key = mt76x2_set_key,
- .ampdu_action = mt76x2_ampdu_action,
+ .remove_interface = mt76x02_remove_interface,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
+ .set_key = mt76x02_set_key,
+ .ampdu_action = mt76x02_ampdu_action,
.config = mt76x2u_config,
.wake_tx_queue = mt76_wake_tx_queue,
.bss_info_changed = mt76x2u_bss_info_changed,
- .configure_filter = mt76x2_configure_filter,
- .conf_tx = mt76x2_conf_tx,
+ .configure_filter = mt76x02_configure_filter,
+ .conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76x2u_sw_scan,
.sw_scan_complete = mt76x2u_sw_scan_complete,
- .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
+ .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
index 22c16d638baa..3f1e558e5e6d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
@@ -17,11 +17,10 @@
#include <linux/firmware.h>
#include "mt76x2u.h"
-#include "mt76x2_eeprom.h"
+#include "eeprom.h"
+#include "../mt76x02_usb.h"
#define MT_CMD_HDR_LEN 4
-#define MT_INBAND_PACKET_MAX_LEN 192
-#define MT_MCU_MEMMAP_WLAN 0x410000
#define MCU_FW_URB_MAX_PAYLOAD 0x3900
#define MCU_ROM_PATCH_MAX_PAYLOAD 2048
@@ -30,151 +29,7 @@
#define MT76U_MCU_DLM_OFFSET 0x110000
#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
-static int
-mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
- u32 val)
-{
- struct {
- __le32 id;
- __le32 value;
- } __packed __aligned(4) msg = {
- .id = cpu_to_le32(func),
- .value = cpu_to_le32(val),
- };
- struct sk_buff *skb;
-
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
- if (!skb)
- return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP,
- func != Q_SELECT);
-}
-
-int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val)
-{
- struct {
- __le32 mode;
- __le32 level;
- } __packed __aligned(4) msg = {
- .mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF),
- .level = cpu_to_le32(0),
- };
- struct sk_buff *skb;
-
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
- if (!skb)
- return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP,
- false);
-}
-
-int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
- u8 channel)
-{
- struct {
- u8 cr_mode;
- u8 temp;
- u8 ch;
- u8 _pad0;
- __le32 cfg;
- } __packed __aligned(4) msg = {
- .cr_mode = type,
- .temp = temp_level,
- .ch = channel,
- };
- struct sk_buff *skb;
- u32 val;
-
- val = BIT(31);
- val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
- val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
- msg.cfg = cpu_to_le32(val);
-
- /* first set the channel without the extension channel info */
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
- if (!skb)
- return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true);
-}
-
-int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
- u8 bw_index, bool scan)
-{
- struct {
- u8 idx;
- u8 scan;
- u8 bw;
- u8 _pad0;
-
- __le16 chainmask;
- u8 ext_chan;
- u8 _pad1;
-
- } __packed __aligned(4) msg = {
- .idx = channel,
- .scan = scan,
- .bw = bw,
- .chainmask = cpu_to_le16(dev->chainmask),
- };
- struct sk_buff *skb;
-
- /* first set the channel without the extension channel info */
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
- if (!skb)
- return -ENOMEM;
-
- mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
-
- usleep_range(5000, 10000);
-
- msg.ext_chan = 0xe0 + bw_index;
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
- if (!skb)
- return -ENOMEM;
-
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
-}
-
-int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
- u32 val)
-{
- struct {
- __le32 id;
- __le32 value;
- } __packed __aligned(4) msg = {
- .id = cpu_to_le32(type),
- .value = cpu_to_le32(val),
- };
- struct sk_buff *skb;
-
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
- if (!skb)
- return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
-}
-
-int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
- bool force)
-{
- struct {
- __le32 channel;
- __le32 gain_val;
- } __packed __aligned(4) msg = {
- .channel = cpu_to_le32(channel),
- .gain_val = cpu_to_le32(gain),
- };
- struct sk_buff *skb;
-
- if (force)
- msg.channel |= cpu_to_le32(BIT(31));
-
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
- if (!skb)
- return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true);
-}
-
-int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
+int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap,
bool ext, int rssi, u32 false_cca)
{
struct {
@@ -194,38 +49,18 @@ int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
val |= BIT(30);
msg.channel = cpu_to_le32(val);
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
- if (!skb)
- return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true);
+ skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt76_mcu_send_msg(dev, skb, CMD_DYNC_VGA_OP, true);
}
-int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
- struct mt76x2_tssi_comp *tssi_data)
-{
- struct {
- __le32 id;
- struct mt76x2_tssi_comp data;
- } __packed __aligned(4) msg = {
- .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
- .data = *tssi_data,
- };
- struct sk_buff *skb;
-
- skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
- if (!skb)
- return -ENOMEM;
- return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
-}
-
-static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev)
+static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev)
{
mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
USB_DIR_OUT | USB_TYPE_VENDOR,
0x12, 0, NULL, 0);
}
-static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev)
+static void mt76x2u_mcu_enable_patch(struct mt76x02_dev *dev)
{
struct mt76_usb *usb = &dev->mt76.usb;
const u8 data[] = {
@@ -240,7 +75,7 @@ static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev)
0x12, 0, usb->data, sizeof(data));
}
-static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev)
+static void mt76x2u_mcu_reset_wmt(struct mt76x02_dev *dev)
{
struct mt76_usb *usb = &dev->mt76.usb;
u8 data[] = {
@@ -254,10 +89,10 @@ static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev)
0x12, 0, usb->data, sizeof(data));
}
-static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
+static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
{
bool rom_protect = !is_mt7612(dev);
- struct mt76x2_patch_header *hdr;
+ struct mt76x02_patch_header *hdr;
u32 val, patch_mask, patch_reg;
const struct firmware *fw;
int err;
@@ -292,7 +127,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
goto out;
}
- hdr = (struct mt76x2_patch_header *)fw->data;
+ hdr = (struct mt76x02_patch_header *)fw->data;
dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
/* enable USB_DMA_CFG */
@@ -302,7 +137,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
/* vendor reset */
- mt76u_mcu_fw_reset(&dev->mt76);
+ mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 10000);
/* enable FCE to send in-band cmd */
@@ -316,10 +151,10 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
/* FCE skip_fs_en */
mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
- err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
- fw->size - sizeof(*hdr),
- MCU_ROM_PATCH_MAX_PAYLOAD,
- MT76U_MCU_ROM_PATCH_OFFSET);
+ err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
+ fw->size - sizeof(*hdr),
+ MCU_ROM_PATCH_MAX_PAYLOAD,
+ MT76U_MCU_ROM_PATCH_OFFSET);
if (err < 0) {
err = -EIO;
goto out;
@@ -341,10 +176,10 @@ out:
return err;
}
-static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
+static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
{
u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
- const struct mt76x2_fw_header *hdr;
+ const struct mt76x02_fw_header *hdr;
int err, len, ilm_len, dlm_len;
const struct firmware *fw;
@@ -357,7 +192,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
goto out;
}
- hdr = (const struct mt76x2_fw_header *)fw->data;
+ hdr = (const struct mt76x02_fw_header *)fw->data;
ilm_len = le32_to_cpu(hdr->ilm_len);
dlm_len = le32_to_cpu(hdr->dlm_len);
len = sizeof(*hdr) + ilm_len + dlm_len;
@@ -375,7 +210,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
/* vendor reset */
- mt76u_mcu_fw_reset(&dev->mt76);
+ mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 10000);
/* enable USB_DMA_CFG */
@@ -395,9 +230,9 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
/* load ILM */
- err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
- ilm_len, MCU_FW_URB_MAX_PAYLOAD,
- MT76U_MCU_ILM_OFFSET);
+ err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
+ ilm_len, MCU_FW_URB_MAX_PAYLOAD,
+ MT76U_MCU_ILM_OFFSET);
if (err < 0) {
err = -EIO;
goto out;
@@ -406,10 +241,9 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
/* load DLM */
if (mt76xx_rev(dev) >= MT76XX_REV_E3)
dlm_offset += 0x800;
- err = mt76u_mcu_fw_send_data(&dev->mt76,
- fw->data + sizeof(*hdr) + ilm_len,
- dlm_len, MCU_FW_URB_MAX_PAYLOAD,
- dlm_offset);
+ err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr) + ilm_len,
+ dlm_len, MCU_FW_URB_MAX_PAYLOAD,
+ dlm_offset);
if (err < 0) {
err = -EIO;
goto out;
@@ -425,6 +259,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
/* enable FCE to send in-band cmd */
mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
+ mt76x02_set_ethtool_fwver(dev, hdr);
dev_dbg(dev->mt76.dev, "firmware running\n");
out:
@@ -432,7 +267,7 @@ out:
return err;
}
-int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev)
+int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev)
{
int err;
@@ -443,21 +278,13 @@ int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev)
return mt76x2u_mcu_load_firmware(dev);
}
-int mt76x2u_mcu_init(struct mt76x2_dev *dev)
+int mt76x2u_mcu_init(struct mt76x02_dev *dev)
{
int err;
- err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1);
+ err = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false);
if (err < 0)
return err;
- return mt76x2u_mcu_set_radio_state(dev, true);
-}
-
-void mt76x2u_mcu_deinit(struct mt76x2_dev *dev)
-{
- struct mt76_usb *usb = &dev->mt76.usb;
-
- usb_kill_urb(usb->mcu.res.urb);
- mt76u_buf_free(&usb->mcu.res);
+ return mt76x02_mcu_set_radio_state(dev, true, false);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
index 5158063d0c2e..ca96ba60510e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
@@ -15,42 +15,10 @@
*/
#include "mt76x2u.h"
-#include "mt76x2_eeprom.h"
+#include "eeprom.h"
+#include "../mt76x02_phy.h"
-void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev)
-{
- u32 val;
-
- val = mt76_rr(dev, MT_BBP(AGC, 0));
- val &= ~BIT(4);
-
- switch (dev->chainmask & 0xf) {
- case 2:
- val |= BIT(3);
- break;
- default:
- val &= ~BIT(3);
- break;
- }
- mt76_wr(dev, MT_BBP(AGC, 0), val);
-}
-
-void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev)
-{
- int txpath;
-
- txpath = (dev->chainmask >> 8) & 0xf;
- switch (txpath) {
- case 2:
- mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
- break;
- default:
- mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
- break;
- }
-}
-
-void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev)
+void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
@@ -61,59 +29,18 @@ void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev)
mt76x2u_mac_stop(dev);
if (is_5ghz)
- mt76x2u_mcu_calibrate(dev, MCU_CAL_LC, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, false);
- mt76x2u_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
- mt76x2u_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
- mt76x2u_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
- mt76x2u_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, false);
mt76x2u_mac_resume(dev);
}
static void
-mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev)
-{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
- struct mt76x2_tx_power_info txp;
- struct mt76x2_tssi_comp t = {};
-
- if (!dev->cal.tssi_cal_done)
- return;
-
- if (!dev->cal.tssi_comp_pending) {
- /* TSSI trigger */
- t.cal_mode = BIT(0);
- mt76x2u_mcu_tssi_comp(dev, &t);
- dev->cal.tssi_comp_pending = true;
- } else {
- if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
- return;
-
- dev->cal.tssi_comp_pending = false;
- mt76x2_get_power_info(dev, &txp, chan);
-
- if (mt76x2_ext_pa_enabled(dev, chan->band))
- t.pa_mode = 1;
-
- t.cal_mode = BIT(1);
- t.slope0 = txp.chain[0].tssi_slope;
- t.offset0 = txp.chain[0].tssi_offset;
- t.slope1 = txp.chain[1].tssi_slope;
- t.offset1 = txp.chain[1].tssi_offset;
- mt76x2u_mcu_tssi_comp(dev, &t);
-
- if (t.pa_mode || dev->cal.dpd_cal_done)
- return;
-
- usleep_range(10000, 20000);
- mt76x2u_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
- dev->cal.dpd_cal_done = true;
- }
-}
-
-static void
-mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
+mt76x2u_phy_update_channel_gain(struct mt76x02_dev *dev)
{
u8 channel = dev->mt76.chandef.chan->hw_value;
int freq, freq1;
@@ -142,7 +69,7 @@ mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
break;
}
- dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
+ dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
mt76_rr(dev, MT_RX_STAT_1));
@@ -152,17 +79,17 @@ mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
void mt76x2u_phy_calibrate(struct work_struct *work)
{
- struct mt76x2_dev *dev;
+ struct mt76x02_dev *dev;
- dev = container_of(work, struct mt76x2_dev, cal_work.work);
- mt76x2u_phy_tssi_compensate(dev);
+ dev = container_of(work, struct mt76x02_dev, cal_work.work);
+ mt76x2_phy_tssi_compensate(dev, false);
mt76x2u_phy_update_channel_gain(dev);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
MT_CALIBRATE_INTERVAL);
}
-int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
+int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
{
u32 ext_cca_chan[4] = {
@@ -228,8 +155,8 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
mt76x2_configure_tx_delay(dev, chan->band, bw);
mt76x2_phy_set_txpower(dev);
- mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
- mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+ mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
+ mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
mt76_rmw(dev, MT_EXT_CCA_CFG,
(MT_EXT_CCA_CFG_CCA0 |
@@ -239,28 +166,28 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
MT_EXT_CCA_CFG_CCA_MASK),
ext_cca_chan[ch_group_index]);
- ret = mt76x2u_mcu_set_channel(dev, channel, bw, bw_index, scan);
+ ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
if (ret)
return ret;
- mt76x2u_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+ mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
/* Enable LDPC Rx */
if (mt76xx_rev(dev) >= MT76XX_REV_E3)
mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
if (!dev->cal.init_cal_done) {
- u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+ u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
if (val != 0xff)
- mt76x2u_mcu_calibrate(dev, MCU_CAL_R, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
}
- mt76x2u_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, false);
/* Rx LPF calibration */
if (!dev->cal.init_cal_done)
- mt76x2u_mcu_calibrate(dev, MCU_CAL_RC, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, false);
dev->cal.init_cal_done = true;
mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
@@ -290,9 +217,9 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
chan = dev->mt76.chandef.chan;
if (chan->band == NL80211_BAND_5GHZ)
flag |= BIT(0);
- if (mt76x2_ext_pa_enabled(dev, chan->band))
+ if (mt76x02_ext_pa_enabled(dev, chan->band))
flag |= BIT(8);
- mt76x2u_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, false);
dev->cal.tssi_cal_done = true;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
deleted file mode 100644
index a2338ba139b4..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-
-void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
-{
- struct mt76_txq *mtxq;
-
- if (!txq)
- return;
-
- mtxq = (struct mt76_txq *) txq->drv_priv;
- if (txq->sta) {
- struct mt76x2_sta *sta;
-
- sta = (struct mt76x2_sta *) txq->sta->drv_priv;
- mtxq->wcid = &sta->wcid;
- } else {
- struct mt76x2_vif *mvif;
-
- mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
- mtxq->wcid = &mvif->group_wcid;
- }
-
- mt76_txq_init(&dev->mt76, txq);
-}
-EXPORT_SYMBOL_GPL(mt76x2_txq_init);
-
-int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
-{
- enum ieee80211_ampdu_mlme_action action = params->action;
- struct ieee80211_sta *sta = params->sta;
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct ieee80211_txq *txq = sta->txq[params->tid];
- u16 tid = params->tid;
- u16 *ssn = &params->ssn;
- struct mt76_txq *mtxq;
-
- if (!txq)
- return -EINVAL;
-
- mtxq = (struct mt76_txq *)txq->drv_priv;
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
- mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
- break;
- case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
- mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
- BIT(16 + tid));
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- mtxq->aggr = true;
- mtxq->send_bar = false;
- ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- break;
- case IEEE80211_AMPDU_TX_STOP_FLUSH:
- case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- mtxq->aggr = false;
- ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- break;
- case IEEE80211_AMPDU_TX_START:
- mtxq->agg_ssn = *ssn << 4;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- case IEEE80211_AMPDU_TX_STOP_CONT:
- mtxq->aggr = false;
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x2_ampdu_action);
-
-int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- int ret = 0;
- int idx = 0;
- int i;
-
- mutex_lock(&dev->mutex);
-
- idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
- if (idx < 0) {
- ret = -ENOSPC;
- goto out;
- }
-
- msta->vif = mvif;
- msta->wcid.sta = 1;
- msta->wcid.idx = idx;
- msta->wcid.hw_key_idx = -1;
- mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
- mt76x2_mac_wcid_set_drop(dev, idx, false);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76x2_txq_init(dev, sta->txq[i]);
-
- if (vif->type == NL80211_IFTYPE_AP)
- set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
-
- ewma_signal_init(&msta->rssi);
-
- rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
-
-out:
- mutex_unlock(&dev->mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(mt76x2_sta_add);
-
-int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- int idx = msta->wcid.idx;
- int i;
-
- mutex_lock(&dev->mutex);
- rcu_assign_pointer(dev->wcid[idx], NULL);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76_txq_remove(&dev->mt76, sta->txq[i]);
- mt76x2_mac_wcid_set_drop(dev, idx, true);
- mt76_wcid_free(dev->wcid_mask, idx);
- mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
- mutex_unlock(&dev->mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x2_sta_remove);
-
-void mt76x2_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct mt76x2_dev *dev = hw->priv;
-
- mt76_txq_remove(&dev->mt76, vif->txq);
-}
-EXPORT_SYMBOL_GPL(mt76x2_remove_interface);
-
-int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
- struct mt76x2_sta *msta;
- struct mt76_wcid *wcid;
- int idx = key->keyidx;
- int ret;
-
- /* fall back to sw encryption for unsupported ciphers */
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- case WLAN_CIPHER_SUITE_TKIP:
- case WLAN_CIPHER_SUITE_CCMP:
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- /*
- * The hardware does not support per-STA RX GTK, fall back
- * to software mode for these.
- */
- if ((vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MESH_POINT) &&
- (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
- key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EOPNOTSUPP;
-
- msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
- wcid = msta ? &msta->wcid : &mvif->group_wcid;
-
- if (cmd == SET_KEY) {
- key->hw_key_idx = wcid->idx;
- wcid->hw_key_idx = idx;
- if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
- wcid->sw_iv = true;
- }
- } else {
- if (idx == wcid->hw_key_idx) {
- wcid->hw_key_idx = -1;
- wcid->sw_iv = true;
- }
-
- key = NULL;
- }
- mt76_wcid_key_setup(&dev->mt76, wcid, key);
-
- if (!msta) {
- if (key || wcid->hw_key_idx == idx) {
- ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
- if (ret)
- return ret;
- }
-
- return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
- }
-
- return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
-}
-EXPORT_SYMBOL_GPL(mt76x2_set_key);
-
-int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u16 queue, const struct ieee80211_tx_queue_params *params)
-{
- struct mt76x2_dev *dev = hw->priv;
- u8 cw_min = 5, cw_max = 10, qid;
- u32 val;
-
- qid = dev->mt76.q_tx[queue].hw_idx;
-
- if (params->cw_min)
- cw_min = fls(params->cw_min);
- if (params->cw_max)
- cw_max = fls(params->cw_max);
-
- val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
- FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
- FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
- FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
- mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
-
- val = mt76_rr(dev, MT_WMM_TXOP(qid));
- val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
- val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
- mt76_wr(dev, MT_WMM_TXOP(qid), val);
-
- val = mt76_rr(dev, MT_WMM_AIFSN);
- val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
- val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
- mt76_wr(dev, MT_WMM_AIFSN, val);
-
- val = mt76_rr(dev, MT_WMM_CWMIN);
- val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
- val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
- mt76_wr(dev, MT_WMM_CWMIN, val);
-
- val = mt76_rr(dev, MT_WMM_CWMAX);
- val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
- val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
- mt76_wr(dev, MT_WMM_CWMAX, val);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x2_conf_tx);
-
-void mt76x2_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags, u64 multicast)
-{
- struct mt76x2_dev *dev = hw->priv;
- u32 flags = 0;
-
-#define MT76_FILTER(_flag, _hw) do { \
- flags |= *total_flags & FIF_##_flag; \
- dev->rxfilter &= ~(_hw); \
- dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
- } while (0)
-
- mutex_lock(&dev->mutex);
-
- dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
-
- MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
- MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
- MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
- MT_RX_FILTR_CFG_CTS |
- MT_RX_FILTR_CFG_CFEND |
- MT_RX_FILTR_CFG_CFACK |
- MT_RX_FILTR_CFG_BA |
- MT_RX_FILTR_CFG_CTRL_RSV);
- MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
-
- *total_flags = flags;
- mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
-
- mutex_unlock(&dev->mutex);
-}
-EXPORT_SYMBOL_GPL(mt76x2_configure_filter);
-
-void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt76x2_dev *dev = hw->priv;
- struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
- struct ieee80211_tx_rate rate = {};
-
- if (!rates)
- return;
-
- rate.idx = rates->rate[0].idx;
- rate.flags = rates->rate[0].flags;
- mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
- msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
-}
-EXPORT_SYMBOL_GPL(mt76x2_sta_rate_tbl_update);
-
-void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
- void *rxwi = skb->data;
-
- if (q == MT_RXQ_MCU) {
- skb_queue_tail(&dev->mcu.res_q, skb);
- wake_up(&dev->mcu.wait);
- return;
- }
-
- skb_pull(skb, sizeof(struct mt76x2_rxwi));
- if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
- dev_kfree_skb(skb);
- return;
- }
-
- mt76_rx(&dev->mt76, q, skb);
-}
-EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
deleted file mode 100644
index 2629779e8d3e..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include "mt76x2.h"
-#include "mt76x2_trace.h"
-
-void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->irq_lock, flags);
- dev->irqmask &= ~clear;
- dev->irqmask |= set;
- mt76_wr(dev, MT_INT_MASK_CSR, dev->irqmask);
- spin_unlock_irqrestore(&dev->irq_lock, flags);
-}
-
-void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-
- mt76x2_irq_enable(dev, MT_INT_RX_DONE(q));
-}
-
-irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance)
-{
- struct mt76x2_dev *dev = dev_instance;
- u32 intr;
-
- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
- return IRQ_NONE;
-
- trace_dev_irq(dev, intr, dev->irqmask);
-
- intr &= dev->irqmask;
-
- if (intr & MT_INT_TX_DONE_ALL) {
- mt76x2_irq_disable(dev, MT_INT_TX_DONE_ALL);
- tasklet_schedule(&dev->tx_tasklet);
- }
-
- if (intr & MT_INT_RX_DONE(0)) {
- mt76x2_irq_disable(dev, MT_INT_RX_DONE(0));
- napi_schedule(&dev->mt76.napi[0]);
- }
-
- if (intr & MT_INT_RX_DONE(1)) {
- mt76x2_irq_disable(dev, MT_INT_RX_DONE(1));
- napi_schedule(&dev->mt76.napi[1]);
- }
-
- if (intr & MT_INT_PRE_TBTT)
- tasklet_schedule(&dev->pre_tbtt_tasklet);
-
- /* send buffered multicast frames now */
- if (intr & MT_INT_TBTT)
- mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
-
- if (intr & MT_INT_TX_STAT) {
- mt76x2_mac_poll_tx_status(dev, true);
- tasklet_schedule(&dev->tx_tasklet);
- }
-
- if (intr & MT_INT_GPTIMER) {
- mt76x2_irq_disable(dev, MT_INT_GPTIMER);
- tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
- }
-
- return IRQ_HANDLED;
-}
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
deleted file mode 100644
index 6720a6a1313f..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-#include "mt76x2_dma.h"
-
-int
-mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
- struct sk_buff *skb, int cmd, int seq)
-{
- struct mt76_queue *q = &dev->mt76.q_tx[qid];
- struct mt76_queue_buf buf;
- dma_addr_t addr;
- u32 tx_info;
-
- tx_info = MT_MCU_MSG_TYPE_CMD |
- FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
- FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
- FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
- FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
-
- addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev->mt76.dev, addr))
- return -ENOMEM;
-
- buf.addr = addr;
- buf.len = skb->len;
- spin_lock_bh(&q->lock);
- mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
- mt76_queue_kick(dev, q);
- spin_unlock_bh(&q->lock);
-
- return 0;
-}
-
-static int
-mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
- int idx, int n_desc)
-{
- int ret;
-
- q->regs = dev->mt76.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
- q->ndesc = n_desc;
- q->hw_idx = idx;
-
- ret = mt76_queue_alloc(dev, q);
- if (ret)
- return ret;
-
- mt76x2_irq_enable(dev, MT_INT_TX_DONE(idx));
-
- return 0;
-}
-
-static int
-mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
- int idx, int n_desc, int bufsize)
-{
- int ret;
-
- q->regs = dev->mt76.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
- q->ndesc = n_desc;
- q->buf_size = bufsize;
-
- ret = mt76_queue_alloc(dev, q);
- if (ret)
- return ret;
-
- mt76x2_irq_enable(dev, MT_INT_RX_DONE(idx));
-
- return 0;
-}
-
-static void
-mt76x2_tx_tasklet(unsigned long data)
-{
- struct mt76x2_dev *dev = (struct mt76x2_dev *) data;
- int i;
-
- mt76x2_mac_process_tx_status_fifo(dev);
-
- for (i = MT_TXQ_MCU; i >= 0; i--)
- mt76_queue_tx_cleanup(dev, i, false);
-
- mt76x2_mac_poll_tx_status(dev, false);
- mt76x2_irq_enable(dev, MT_INT_TX_DONE_ALL);
-}
-
-int mt76x2_dma_init(struct mt76x2_dev *dev)
-{
- static const u8 wmm_queue_map[] = {
- [IEEE80211_AC_BE] = 0,
- [IEEE80211_AC_BK] = 1,
- [IEEE80211_AC_VI] = 2,
- [IEEE80211_AC_VO] = 3,
- };
- int ret;
- int i;
- struct mt76_txwi_cache __maybe_unused *t;
- struct mt76_queue *q;
-
- BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x2_txwi));
- BUILD_BUG_ON(sizeof(struct mt76x2_rxwi) > MT_RX_HEADROOM);
-
- mt76_dma_attach(&dev->mt76);
-
- init_waitqueue_head(&dev->mcu.wait);
- skb_queue_head_init(&dev->mcu.res_q);
-
- tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long) dev);
-
- mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
-
- for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
- ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[i],
- wmm_queue_map[i], MT_TX_RING_SIZE);
- if (ret)
- return ret;
- }
-
- ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
- MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
- if (ret)
- return ret;
-
- ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
- MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
- if (ret)
- return ret;
-
- ret = mt76x2_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
- MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
- if (ret)
- return ret;
-
- q = &dev->mt76.q_rx[MT_RXQ_MAIN];
- q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x2_rxwi);
- ret = mt76x2_init_rx_queue(dev, q, 0, MT76x2_RX_RING_SIZE, MT_RX_BUF_SIZE);
- if (ret)
- return ret;
-
- return mt76_init_queues(dev);
-}
-
-void mt76x2_dma_cleanup(struct mt76x2_dev *dev)
-{
- tasklet_kill(&dev->tx_tasklet);
- mt76_dma_cleanup(&dev->mt76);
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
deleted file mode 100644
index 743da57760dc..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/firmware.h>
-#include <linux/delay.h>
-
-#include "mt76x2.h"
-#include "mt76x2_mcu.h"
-#include "mt76x2_dma.h"
-#include "mt76x2_eeprom.h"
-
-static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
-{
- struct sk_buff *skb;
-
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb)
- return NULL;
- memcpy(skb_put(skb, len), data, len);
-
- return skb;
-}
-
-static struct sk_buff *
-mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires)
-{
- unsigned long timeout;
-
- if (!time_is_after_jiffies(expires))
- return NULL;
-
- timeout = expires - jiffies;
- wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q),
- timeout);
- return skb_dequeue(&dev->mcu.res_q);
-}
-
-static int
-mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb,
- enum mcu_cmd cmd)
-{
- unsigned long expires = jiffies + HZ;
- int ret;
- u8 seq;
-
- if (!skb)
- return -EINVAL;
-
- mutex_lock(&dev->mcu.mutex);
-
- seq = ++dev->mcu.msg_seq & 0xf;
- if (!seq)
- seq = ++dev->mcu.msg_seq & 0xf;
-
- ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
- if (ret)
- goto out;
-
- while (1) {
- u32 *rxfce;
- bool check_seq = false;
-
- skb = mt76x2_mcu_get_response(dev, expires);
- if (!skb) {
- dev_err(dev->mt76.dev,
- "MCU message %d (seq %d) timed out\n", cmd,
- seq);
- ret = -ETIMEDOUT;
- break;
- }
-
- rxfce = (u32 *) skb->cb;
-
- if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
- check_seq = true;
-
- dev_kfree_skb(skb);
- if (check_seq)
- break;
- }
-
-out:
- mutex_unlock(&dev->mcu.mutex);
-
- return ret;
-}
-
-static int
-mt76pci_load_rom_patch(struct mt76x2_dev *dev)
-{
- const struct firmware *fw = NULL;
- struct mt76x2_patch_header *hdr;
- bool rom_protect = !is_mt7612(dev);
- int len, ret = 0;
- __le32 *cur;
- u32 patch_mask, patch_reg;
-
- if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
- dev_err(dev->mt76.dev,
- "Could not get hardware semaphore for ROM PATCH\n");
- return -ETIMEDOUT;
- }
-
- if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
- patch_mask = BIT(0);
- patch_reg = MT_MCU_CLOCK_CTL;
- } else {
- patch_mask = BIT(1);
- patch_reg = MT_MCU_COM_REG0;
- }
-
- if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
- dev_info(dev->mt76.dev, "ROM patch already applied\n");
- goto out;
- }
-
- ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
- if (ret)
- goto out;
-
- if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
- ret = -EIO;
- dev_err(dev->mt76.dev, "Failed to load firmware\n");
- goto out;
- }
-
- hdr = (struct mt76x2_patch_header *) fw->data;
- dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
-
- mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
-
- cur = (__le32 *) (fw->data + sizeof(*hdr));
- len = fw->size - sizeof(*hdr);
- mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
-
- mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
-
- /* Trigger ROM */
- mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
-
- if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
- dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
- ret = -ETIMEDOUT;
- }
-
-out:
- /* release semaphore */
- if (rom_protect)
- mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
- release_firmware(fw);
- return ret;
-}
-
-static int
-mt76pci_load_firmware(struct mt76x2_dev *dev)
-{
- const struct firmware *fw;
- const struct mt76x2_fw_header *hdr;
- int len, ret;
- __le32 *cur;
- u32 offset, val;
-
- ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
- if (ret)
- return ret;
-
- if (!fw || !fw->data || fw->size < sizeof(*hdr))
- goto error;
-
- hdr = (const struct mt76x2_fw_header *) fw->data;
-
- len = sizeof(*hdr);
- len += le32_to_cpu(hdr->ilm_len);
- len += le32_to_cpu(hdr->dlm_len);
-
- if (fw->size != len)
- goto error;
-
- val = le16_to_cpu(hdr->fw_ver);
- dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
- (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
-
- val = le16_to_cpu(hdr->build_ver);
- dev_info(dev->mt76.dev, "Build: %x\n", val);
- dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
-
- cur = (__le32 *) (fw->data + sizeof(*hdr));
- len = le32_to_cpu(hdr->ilm_len);
-
- mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
- mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
-
- cur += len / sizeof(*cur);
- len = le32_to_cpu(hdr->dlm_len);
-
- if (mt76xx_rev(dev) >= MT76XX_REV_E3)
- offset = MT_MCU_DLM_ADDR_E3;
- else
- offset = MT_MCU_DLM_ADDR;
-
- mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
- mt76_wr_copy(dev, offset, cur, len);
-
- mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
-
- val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
- if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
- mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
-
- /* trigger firmware */
- mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
- if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) {
- dev_err(dev->mt76.dev, "Firmware failed to start\n");
- release_firmware(fw);
- return -ETIMEDOUT;
- }
-
- dev_info(dev->mt76.dev, "Firmware running!\n");
-
- release_firmware(fw);
-
- return ret;
-
-error:
- dev_err(dev->mt76.dev, "Invalid firmware\n");
- release_firmware(fw);
- return -ENOENT;
-}
-
-static int
-mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
- u32 val)
-{
- struct sk_buff *skb;
- struct {
- __le32 id;
- __le32 value;
- } __packed __aligned(4) msg = {
- .id = cpu_to_le32(func),
- .value = cpu_to_le32(val),
- };
-
- skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
- return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP);
-}
-
-int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
- u8 channel)
-{
- struct sk_buff *skb;
- struct {
- u8 cr_mode;
- u8 temp;
- u8 ch;
- u8 _pad0;
-
- __le32 cfg;
- } __packed __aligned(4) msg = {
- .cr_mode = type,
- .temp = temp_level,
- .ch = channel,
- };
- u32 val;
-
- val = BIT(31);
- val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
- val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
- msg.cfg = cpu_to_le32(val);
-
- /* first set the channel without the extension channel info */
- skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
- return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR);
-}
-
-int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
- u8 bw_index, bool scan)
-{
- struct sk_buff *skb;
- struct {
- u8 idx;
- u8 scan;
- u8 bw;
- u8 _pad0;
-
- __le16 chainmask;
- u8 ext_chan;
- u8 _pad1;
-
- } __packed __aligned(4) msg = {
- .idx = channel,
- .scan = scan,
- .bw = bw,
- .chainmask = cpu_to_le16(dev->chainmask),
- };
-
- /* first set the channel without the extension channel info */
- skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
- mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
-
- usleep_range(5000, 10000);
-
- msg.ext_chan = 0xe0 + bw_index;
- skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
- return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
-}
-
-int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
-{
- struct sk_buff *skb;
- struct {
- __le32 mode;
- __le32 level;
- } __packed __aligned(4) msg = {
- .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
- .level = cpu_to_le32(0),
- };
-
- skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
- return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP);
-}
-
-int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
- u32 param)
-{
- struct sk_buff *skb;
- struct {
- __le32 id;
- __le32 value;
- } __packed __aligned(4) msg = {
- .id = cpu_to_le32(type),
- .value = cpu_to_le32(param),
- };
- int ret;
-
- mt76_clear(dev, MT_MCU_COM_REG0, BIT(31));
-
- skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
- ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
- if (ret)
- return ret;
-
- if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
- BIT(31), BIT(31), 100)))
- return -ETIMEDOUT;
-
- return 0;
-}
-
-int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
- struct mt76x2_tssi_comp *tssi_data)
-{
- struct sk_buff *skb;
- struct {
- __le32 id;
- struct mt76x2_tssi_comp data;
- } __packed __aligned(4) msg = {
- .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
- .data = *tssi_data,
- };
-
- skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
- return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
-}
-
-int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
- bool force)
-{
- struct sk_buff *skb;
- struct {
- __le32 channel;
- __le32 gain_val;
- } __packed __aligned(4) msg = {
- .channel = cpu_to_le32(channel),
- .gain_val = cpu_to_le32(gain),
- };
-
- if (force)
- msg.channel |= cpu_to_le32(BIT(31));
-
- skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
- return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP);
-}
-
-int mt76x2_mcu_init(struct mt76x2_dev *dev)
-{
- int ret;
-
- mutex_init(&dev->mcu.mutex);
-
- ret = mt76pci_load_rom_patch(dev);
- if (ret)
- return ret;
-
- ret = mt76pci_load_firmware(dev);
- if (ret)
- return ret;
-
- mt76x2_mcu_function_select(dev, Q_SELECT, 1);
- return 0;
-}
-
-int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
-{
- struct sk_buff *skb;
-
- mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
- usleep_range(20000, 30000);
-
- while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL)
- dev_kfree_skb(skb);
-
- return 0;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
deleted file mode 100644
index 36afb166fa3f..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-#include "mt76x2_dma.h"
-
-void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct mt76x2_dev *dev = hw->priv;
- struct ieee80211_vif *vif = info->control.vif;
- struct mt76_wcid *wcid = &dev->global_wcid;
-
- if (control->sta) {
- struct mt76x2_sta *msta;
-
- msta = (struct mt76x2_sta *)control->sta->drv_priv;
- wcid = &msta->wcid;
- /* sw encrypted frames */
- if (!info->control.hw_key && wcid->hw_key_idx != -1)
- control->sta = NULL;
- }
-
- if (vif && !control->sta) {
- struct mt76x2_vif *mvif;
-
- mvif = (struct mt76x2_vif *)vif->drv_priv;
- wcid = &mvif->group_wcid;
- }
-
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx);
-
-int mt76x2_insert_hdr_pad(struct sk_buff *skb)
-{
- int len = ieee80211_get_hdrlen_from_skb(skb);
-
- if (len % 4 == 0)
- return 0;
-
- skb_push(skb, 2);
- memmove(skb->data, skb->data + 2, len);
-
- skb->data[len] = 0;
- skb->data[len + 1] = 0;
- return 2;
-}
-EXPORT_SYMBOL_GPL(mt76x2_insert_hdr_pad);
-
-s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
- const struct ieee80211_tx_rate *rate)
-{
- s8 max_txpwr;
-
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- u8 mcs = ieee80211_rate_get_vht_mcs(rate);
-
- if (mcs == 8 || mcs == 9) {
- max_txpwr = dev->rate_power.vht[8];
- } else {
- u8 nss, idx;
-
- nss = ieee80211_rate_get_vht_nss(rate);
- idx = ((nss - 1) << 3) + mcs;
- max_txpwr = dev->rate_power.ht[idx & 0xf];
- }
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
- } else {
- enum nl80211_band band = dev->mt76.chandef.chan->band;
-
- if (band == NL80211_BAND_2GHZ) {
- const struct ieee80211_rate *r;
- struct wiphy *wiphy = mt76_hw(dev)->wiphy;
- struct mt76_rate_power *rp = &dev->rate_power;
-
- r = &wiphy->bands[band]->bitrates[rate->idx];
- if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
- max_txpwr = rp->cck[r->hw_value & 0x3];
- else
- max_txpwr = rp->ofdm[r->hw_value & 0x7];
- } else {
- max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
- }
- }
-
- return max_txpwr;
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx_get_max_txpwr_adj);
-
-s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
-{
- txpwr = min_t(s8, txpwr, dev->txpower_conf);
- txpwr -= (dev->target_power + dev->target_power_delta[0]);
- txpwr = min_t(s8, txpwr, max_txpwr_adj);
-
- if (!dev->enable_tpc)
- return 0;
- else if (txpwr >= 0)
- return min_t(s8, txpwr, 7);
- else
- return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx_get_txpwr_adj);
-
-void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
-{
- s8 txpwr_adj;
-
- txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
- dev->rate_power.ofdm[4]);
- mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
- MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
- mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
- MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx_set_txpwr_auto);
-
-void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- ieee80211_free_txskb(mt76_hw(dev), skb);
- } else {
- ieee80211_tx_info_clear_status(info);
- info->status.rates[0].idx = -1;
- info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status(mt76_hw(dev), skb);
- }
-}
-EXPORT_SYMBOL_GPL(mt76x2_tx_complete);
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
deleted file mode 100644
index 008092f0cd8a..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __MT76x2U_H
-#define __MT76x2U_H
-
-#include <linux/device.h>
-
-#include "mt76x2.h"
-#include "mt76x2_dma.h"
-#include "mt76x2_mcu.h"
-
-#define MT7612U_EEPROM_SIZE 512
-
-#define MT_USB_AGGR_SIZE_LIMIT 21 /* 1024B unit */
-#define MT_USB_AGGR_TIMEOUT 0x80 /* 33ns unit */
-
-extern const struct ieee80211_ops mt76x2u_ops;
-
-struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev);
-int mt76x2u_register_device(struct mt76x2_dev *dev);
-int mt76x2u_init_hardware(struct mt76x2_dev *dev);
-void mt76x2u_cleanup(struct mt76x2_dev *dev);
-void mt76x2u_stop_hw(struct mt76x2_dev *dev);
-
-void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr);
-int mt76x2u_mac_reset(struct mt76x2_dev *dev);
-void mt76x2u_mac_resume(struct mt76x2_dev *dev);
-int mt76x2u_mac_start(struct mt76x2_dev *dev);
-int mt76x2u_mac_stop(struct mt76x2_dev *dev);
-
-int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
- struct cfg80211_chan_def *chandef);
-void mt76x2u_phy_calibrate(struct work_struct *work);
-void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev);
-void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev);
-void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev);
-
-void mt76x2u_mcu_complete_urb(struct urb *urb);
-int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
- u8 bw_index, bool scan);
-int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
- u32 val);
-int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
- struct mt76x2_tssi_comp *tssi_data);
-int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
- bool force);
-int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
- bool ext, int rssi, u32 false_cca);
-int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val);
-int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type,
- u8 temp_level, u8 channel);
-int mt76x2u_mcu_init(struct mt76x2_dev *dev);
-int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev);
-void mt76x2u_mcu_deinit(struct mt76x2_dev *dev);
-
-int mt76x2u_alloc_queues(struct mt76x2_dev *dev);
-void mt76x2u_queues_deinit(struct mt76x2_dev *dev);
-void mt76x2u_stop_queues(struct mt76x2_dev *dev);
-bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update);
-int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info);
-void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
-int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
- u32 flags);
-
-#endif /* __MT76x2U_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
deleted file mode 100644
index 1ca5dd05b265..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2u.h"
-#include "dma.h"
-
-static void mt76x2u_remove_dma_hdr(struct sk_buff *skb)
-{
- int hdr_len;
-
- skb_pull(skb, sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN);
- hdr_len = ieee80211_get_hdrlen_from_skb(skb);
- if (hdr_len % 4) {
- memmove(skb->data + 2, skb->data, hdr_len);
- skb_pull(skb, 2);
- }
-}
-
-static int
-mt76x2u_check_skb_rooms(struct sk_buff *skb)
-{
- int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
- u32 need_head;
-
- need_head = sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN;
- if (hdr_len % 4)
- need_head += 2;
- return skb_cow(skb, need_head);
-}
-
-static int
-mt76x2u_set_txinfo(struct sk_buff *skb,
- struct mt76_wcid *wcid, u8 ep)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- enum mt76x2_qsel qsel;
- u32 flags;
-
- if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
- ep == MT_EP_OUT_HCCA)
- qsel = MT_QSEL_MGMT;
- else
- qsel = MT_QSEL_EDCA;
-
- flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
- MT_TXD_INFO_80211;
- if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
- flags |= MT_TXD_INFO_WIV;
-
- return mt76u_skb_dma_info(skb, WLAN_PORT, flags);
-}
-
-bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
- struct mt76x2_tx_status stat;
-
- if (!mt76x2_mac_load_tx_status(dev, &stat))
- return false;
-
- mt76x2_send_tx_status(dev, &stat, update);
-
- return true;
-}
-
-int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
- struct mt76x2_txwi *txwi;
- int err, len = skb->len;
-
- err = mt76x2u_check_skb_rooms(skb);
- if (err < 0)
- return -ENOMEM;
-
- mt76x2_insert_hdr_pad(skb);
-
- txwi = skb_push(skb, sizeof(struct mt76x2_txwi));
- mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
-
- return mt76x2u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
-}
-
-void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
-
- mt76x2u_remove_dma_hdr(e->skb);
- mt76x2_tx_complete(dev, e->skb);
-}
-
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index af48d43bb7dc..7cbce03aa65b 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -91,11 +91,24 @@ mt76_txq_get_qid(struct ieee80211_txq *txq)
return txq->ac;
}
+static void
+mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ !ieee80211_is_data_present(hdr->frame_control))
+ return;
+
+ mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
+}
+
void
mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct mt76_queue *q;
int qid = skb_get_queue_mapping(skb);
@@ -108,6 +121,19 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
+ if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
+ struct ieee80211_txq *txq;
+ struct mt76_txq *mtxq;
+ u8 tid;
+
+ tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ txq = sta->txq[tid];
+ mtxq = (struct mt76_txq *) txq->drv_priv;
+
+ if (mtxq->aggr)
+ mt76_check_agg_ssn(mtxq, skb);
+ }
+
q = &dev->q_tx[qid];
spin_lock_bh(&q->lock);
@@ -144,17 +170,6 @@ mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
}
static void
-mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-
- if (!ieee80211_is_data_qos(hdr->frame_control))
- return;
-
- mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
-}
-
-static void
mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct sk_buff *skb, bool last)
{
@@ -442,3 +457,19 @@ void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
}
EXPORT_SYMBOL_GPL(mt76_txq_init);
+
+u8 mt76_ac_to_hwq(u8 ac)
+{
+ static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+ };
+
+ if (WARN_ON(ac >= IEEE80211_NUM_ACS))
+ return 0;
+
+ return wmm_queue_map[ac];
+}
+EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 7780b07543bb..5f0faf07c346 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/module.h>
#include "mt76.h"
#include "usb_trace.h"
#include "dma.h"
@@ -109,6 +110,7 @@ u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
return ret;
}
+EXPORT_SYMBOL_GPL(mt76u_rr);
/* should be called with usb_ctrl_mtx locked */
static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
@@ -140,6 +142,7 @@ void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
__mt76u_wr(dev, addr, val);
mutex_unlock(&dev->usb.usb_ctrl_mtx);
}
+EXPORT_SYMBOL_GPL(mt76u_wr);
static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
u32 mask, u32 val)
@@ -187,6 +190,60 @@ void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
EXPORT_SYMBOL_GPL(mt76u_single_wr);
static int
+mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (len > 0) {
+ __mt76u_wr(dev, base + data->reg, data->value);
+ len--;
+ data++;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+
+ return 0;
+}
+
+static int
+mt76u_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
+ return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
+ else
+ return mt76u_req_wr_rp(dev, base, data, n);
+}
+
+static int
+mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
+ int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (len > 0) {
+ data->value = __mt76u_rr(dev, base + data->reg);
+ len--;
+ data++;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+
+ return 0;
+}
+
+static int
+mt76u_rd_rp(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
+ return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
+ else
+ return mt76u_req_rd_rp(dev, base, data, n);
+}
+
+static int
mt76u_set_endpoints(struct usb_interface *intf,
struct mt76_usb *usb)
{
@@ -219,15 +276,17 @@ static int
mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
int nsgs, int len, int sglen)
{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct urb *urb = buf->urb;
int i;
+ spin_lock_bh(&q->rx_page_lock);
for (i = 0; i < nsgs; i++) {
struct page *page;
void *data;
int offset;
- data = netdev_alloc_frag(len);
+ data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
if (!data)
break;
@@ -235,6 +294,7 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
offset = data - page_address(page);
sg_set_page(&urb->sg[i], page, sglen, offset);
}
+ spin_unlock_bh(&q->rx_page_lock);
if (i < nsgs) {
int j;
@@ -258,7 +318,7 @@ int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
if (!buf->urb)
return -ENOMEM;
- buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
+ buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
gfp);
if (!buf->urb->sg)
return -ENOMEM;
@@ -326,9 +386,9 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
MT_FCE_INFO_LEN;
- if (data_len < min_len || WARN_ON(!dma_len) ||
- WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
- WARN_ON(dma_len & 0x3))
+ if (data_len < min_len || !dma_len ||
+ dma_len + MT_DMA_HDR_LEN > data_len ||
+ (dma_len & 0x3))
return -EINVAL;
return dma_len;
}
@@ -463,9 +523,10 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
int i, err, nsgs;
+ spin_lock_init(&q->rx_page_lock);
spin_lock_init(&q->lock);
- q->entry = devm_kzalloc(dev->dev,
- MT_NUM_RX_ENTRIES * sizeof(*q->entry),
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_RX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
@@ -494,10 +555,21 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
static void mt76u_free_rx(struct mt76_dev *dev)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct page *page;
int i;
for (i = 0; i < q->ndesc; i++)
mt76u_buf_free(&q->entry[i].ubuf);
+
+ spin_lock_bh(&q->rx_page_lock);
+ if (!q->rx_page.va)
+ goto out;
+
+ page = virt_to_page(q->rx_page.va);
+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
+ memset(&q->rx_page, 0, sizeof(q->rx_page));
+out:
+ spin_unlock_bh(&q->rx_page_lock);
}
static void mt76u_stop_rx(struct mt76_dev *dev)
@@ -509,40 +581,6 @@ static void mt76u_stop_rx(struct mt76_dev *dev)
usb_kill_urb(q->entry[i].ubuf.urb);
}
-int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
-{
- struct sk_buff *iter, *last = skb;
- u32 info, pad;
-
- /* Buffer layout:
- * | 4B | xfer len | pad | 4B |
- * | TXINFO | pkt/cmd | zero pad to 4B | zero |
- *
- * length field of TXINFO should be set to 'xfer len'.
- */
- info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
- FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
- put_unaligned_le32(info, skb_push(skb, sizeof(info)));
-
- pad = round_up(skb->len, 4) + 4 - skb->len;
- skb_walk_frags(skb, iter) {
- last = iter;
- if (!iter->next) {
- skb->data_len += pad;
- skb->len += pad;
- break;
- }
- }
-
- if (unlikely(pad)) {
- if (__skb_pad(last, pad, true))
- return -ENOMEM;
- __skb_put(last, pad);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
-
static void mt76u_tx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
@@ -715,10 +753,10 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
q = &dev->q_tx[i];
spin_lock_init(&q->lock);
INIT_LIST_HEAD(&q->swq);
- q->hw_idx = q2hwq(i);
+ q->hw_idx = mt76_ac_to_hwq(i);
- q->entry = devm_kzalloc(dev->dev,
- MT_NUM_TX_ENTRIES * sizeof(*q->entry),
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_TX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
@@ -822,6 +860,9 @@ int mt76u_init(struct mt76_dev *dev,
.wr = mt76u_wr,
.rmw = mt76u_rmw,
.copy = mt76u_copy,
+ .wr_rp = mt76u_wr_rp,
+ .rd_rp = mt76u_rd_rp,
+ .type = MT76_BUS_USB,
};
struct mt76_usb *usb = &dev->usb;
diff --git a/drivers/net/wireless/mediatek/mt76/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
index 070be803d463..036be4163e69 100644
--- a/drivers/net/wireless/mediatek/mt76/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
@@ -14,32 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/firmware.h>
-
#include "mt76.h"
-#include "dma.h"
-
-#define MT_CMD_HDR_LEN 4
-
-#define MT_FCE_DMA_ADDR 0x0230
-#define MT_FCE_DMA_LEN 0x0234
-
-#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
-
-struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len)
-{
- struct sk_buff *skb;
-
- skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, MT_CMD_HDR_LEN);
- skb_put_data(skb, data, len);
-
- return skb;
-}
-EXPORT_SYMBOL_GPL(mt76u_mcu_msg_alloc);
void mt76u_mcu_complete_urb(struct urb *urb)
{
@@ -49,176 +24,6 @@ void mt76u_mcu_complete_urb(struct urb *urb)
}
EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb);
-static int mt76u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
-{
- struct mt76_usb *usb = &dev->usb;
- struct mt76u_buf *buf = &usb->mcu.res;
- int i, ret;
- u32 rxfce;
-
- for (i = 0; i < 5; i++) {
- if (!wait_for_completion_timeout(&usb->mcu.cmpl,
- msecs_to_jiffies(300)))
- continue;
-
- if (buf->urb->status)
- return -EIO;
-
- rxfce = get_unaligned_le32(sg_virt(&buf->urb->sg[0]));
- ret = mt76u_submit_buf(dev, USB_DIR_IN,
- MT_EP_IN_CMD_RESP,
- buf, GFP_KERNEL,
- mt76u_mcu_complete_urb,
- &usb->mcu.cmpl);
- if (ret)
- return ret;
-
- if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce))
- return 0;
-
- dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
- FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
- seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
- }
-
- dev_err(dev->dev, "error: %s timed out\n", __func__);
- return -ETIMEDOUT;
-}
-
-int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
- int cmd, bool wait_resp)
-{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
- struct mt76_usb *usb = &dev->usb;
- unsigned int pipe;
- int ret, sent;
- u8 seq = 0;
- u32 info;
-
- if (test_bit(MT76_REMOVED, &dev->state))
- return 0;
-
- mutex_lock(&usb->mcu.mutex);
-
- pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
- if (wait_resp) {
- seq = ++usb->mcu.msg_seq & 0xf;
- if (!seq)
- seq = ++usb->mcu.msg_seq & 0xf;
- }
-
- info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
- FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
- MT_MCU_MSG_TYPE_CMD;
- ret = mt76u_skb_dma_info(skb, CPU_TX_PORT, info);
- if (ret)
- goto out;
-
- ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
- if (ret)
- goto out;
-
- if (wait_resp)
- ret = mt76u_mcu_wait_resp(dev, seq);
-
-out:
- mutex_unlock(&usb->mcu.mutex);
-
- consume_skb(skb);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(mt76u_mcu_send_msg);
-
-void mt76u_mcu_fw_reset(struct mt76_dev *dev)
-{
- mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR,
- 0x1, 0, NULL, 0);
-}
-EXPORT_SYMBOL_GPL(mt76u_mcu_fw_reset);
-
-static int
-__mt76u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
- const void *fw_data, int len, u32 dst_addr)
-{
- u8 *data = sg_virt(&buf->urb->sg[0]);
- DECLARE_COMPLETION_ONSTACK(cmpl);
- __le32 info;
- u32 val;
- int err;
-
- info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
- FIELD_PREP(MT_MCU_MSG_LEN, len) |
- MT_MCU_MSG_TYPE_CMD);
-
- memcpy(data, &info, sizeof(info));
- memcpy(data + sizeof(info), fw_data, len);
- memset(data + sizeof(info) + len, 0, 4);
-
- mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
- MT_FCE_DMA_ADDR, dst_addr);
- len = roundup(len, 4);
- mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
- MT_FCE_DMA_LEN, len << 16);
-
- buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
- err = mt76u_submit_buf(dev, USB_DIR_OUT,
- MT_EP_OUT_INBAND_CMD,
- buf, GFP_KERNEL,
- mt76u_mcu_complete_urb, &cmpl);
- if (err < 0)
- return err;
-
- if (!wait_for_completion_timeout(&cmpl,
- msecs_to_jiffies(1000))) {
- dev_err(dev->dev, "firmware upload timed out\n");
- usb_kill_urb(buf->urb);
- return -ETIMEDOUT;
- }
-
- if (mt76u_urb_error(buf->urb)) {
- dev_err(dev->dev, "firmware upload failed: %d\n",
- buf->urb->status);
- return buf->urb->status;
- }
-
- val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
- val++;
- mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
-
- return 0;
-}
-
-int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
- int data_len, u32 max_payload, u32 offset)
-{
- int err, len, pos = 0, max_len = max_payload - 8;
- struct mt76u_buf buf;
-
- err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
- GFP_KERNEL);
- if (err < 0)
- return err;
-
- while (data_len > 0) {
- len = min_t(int, data_len, max_len);
- err = __mt76u_mcu_fw_send_data(dev, &buf, data + pos,
- len, offset + pos);
- if (err < 0)
- break;
-
- data_len -= len;
- pos += len;
- usleep_range(5000, 10000);
- }
- mt76u_buf_free(&buf);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(mt76u_mcu_fw_send_data);
-
int mt76u_mcu_init_rx(struct mt76_dev *dev)
{
struct mt76_usb *usb = &dev->usb;
@@ -240,3 +45,12 @@ int mt76u_mcu_init_rx(struct mt76_dev *dev)
return err;
}
EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx);
+
+void mt76u_mcu_deinit(struct mt76_dev *dev)
+{
+ struct mt76_usb *usb = &dev->usb;
+
+ usb_kill_urb(usb->mcu.res.urb);
+ mt76u_buf_free(&usb->mcu.res);
+}
+EXPORT_SYMBOL_GPL(mt76u_mcu_deinit);
diff --git a/drivers/net/wireless/quantenna/Kconfig b/drivers/net/wireless/quantenna/Kconfig
index de84ce125c26..7628d9c1ea6a 100644
--- a/drivers/net/wireless/quantenna/Kconfig
+++ b/drivers/net/wireless/quantenna/Kconfig
@@ -1,7 +1,7 @@
config WLAN_VENDOR_QUANTENNA
bool "Quantenna wireless cards support"
default y
- ---help---
+ help
If you have a wireless card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Kconfig b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
index 8d1492a90bd1..b8c12a5f16b4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/Kconfig
+++ b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
@@ -11,7 +11,7 @@ config QTNFMAC_PEARL_PCIE
select QTNFMAC
select FW_LOADER
select CRC32
- ---help---
+ help
This option adds support for wireless adapters based on Quantenna
802.11ac QSR10g (aka Pearl) FullMAC chipset running over PCIe.
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Makefile b/drivers/net/wireless/quantenna/qtnfmac/Makefile
index 97f760a3d599..17cd7adb4109 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/Makefile
+++ b/drivers/net/wireless/quantenna/qtnfmac/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_QTNFMAC_PEARL_PCIE) += qtnfmac_pearl_pcie.o
qtnfmac_pearl_pcie-objs += \
shm_ipc.o \
- pearl/pcie.o
+ pcie/pcie.o \
+ pcie/pearl_pcie.o
qtnfmac_pearl_pcie-$(CONFIG_DEBUG_FS) += debug.o
diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h
index 323e47cea1e2..528ca7f5e070 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/bus.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h
@@ -20,6 +20,9 @@
#include <linux/netdevice.h>
#include <linux/workqueue.h>
+#include "trans.h"
+#include "core.h"
+
#define QTNF_MAX_MAC 3
enum qtnf_fw_state {
@@ -57,10 +60,8 @@ struct qtnf_bus {
struct qtnf_wmac *mac[QTNF_MAX_MAC];
struct qtnf_qlink_transport trans;
struct qtnf_hw_info hw_info;
- char fwname[32];
struct napi_struct mux_napi;
struct net_device mux_dev;
- struct completion firmware_init_complete;
struct workqueue_struct *workqueue;
struct work_struct fw_work;
struct work_struct event_work;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 4aa332f4646b..51b33ec78fac 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -141,8 +141,8 @@ qtnf_change_virtual_intf(struct wiphy *wiphy,
ret = qtnf_cmd_send_change_intf_type(vif, type, mac_addr);
if (ret) {
- pr_err("VIF%u.%u: failed to change VIF type: %d\n",
- vif->mac->macid, vif->vifid, ret);
+ pr_err("VIF%u.%u: failed to change type to %d\n",
+ vif->mac->macid, vif->vifid, type);
return ret;
}
@@ -216,7 +216,6 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
eth_zero_addr(vif->mac_addr);
eth_zero_addr(vif->bssid);
vif->bss_priority = QTNF_DEF_BSS_PRIORITY;
- vif->sta_state = QTNF_STA_DISCONNECTED;
memset(&vif->wdev, 0, sizeof(vif->wdev));
vif->wdev.wiphy = wiphy;
vif->wdev.iftype = type;
@@ -229,18 +228,22 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
if (params)
mac_addr = params->macaddr;
- if (qtnf_cmd_send_add_intf(vif, type, mac_addr)) {
- pr_err("VIF%u.%u: failed to add VIF\n", mac->macid, vif->vifid);
+ ret = qtnf_cmd_send_add_intf(vif, type, mac_addr);
+ if (ret) {
+ pr_err("VIF%u.%u: failed to add VIF %pM\n",
+ mac->macid, vif->vifid, mac_addr);
goto err_cmd;
}
if (!is_valid_ether_addr(vif->mac_addr)) {
pr_err("VIF%u.%u: FW reported bad MAC: %pM\n",
mac->macid, vif->vifid, vif->mac_addr);
+ ret = -EINVAL;
goto err_mac;
}
- if (qtnf_core_net_attach(mac, vif, name, name_assign_t)) {
+ ret = qtnf_core_net_attach(mac, vif, name, name_assign_t);
+ if (ret) {
pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid,
vif->vifid);
goto err_net;
@@ -256,7 +259,7 @@ err_mac:
err_cmd:
vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
- return ERR_PTR(-EFAULT);
+ return ERR_PTR(ret);
}
static int qtnf_mgmt_set_appie(struct qtnf_vif *vif,
@@ -335,12 +338,11 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
qtnf_scan_done(vif->mac, true);
ret = qtnf_cmd_send_stop_ap(vif);
- if (ret) {
+ if (ret)
pr_err("VIF%u.%u: failed to stop AP operation in FW\n",
vif->mac->macid, vif->vifid);
- netif_carrier_off(vif->netdev);
- }
+ netif_carrier_off(vif->netdev);
return ret;
}
@@ -478,19 +480,31 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
const struct qtnf_sta_node *sta_node;
int ret;
- sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx);
+ switch (vif->wdev.iftype) {
+ case NL80211_IFTYPE_STATION:
+ if (idx != 0 || !vif->wdev.current_bss)
+ return -ENOENT;
- if (unlikely(!sta_node))
- return -ENOENT;
+ ether_addr_copy(mac, vif->bssid);
+ break;
+ case NL80211_IFTYPE_AP:
+ sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx);
+ if (unlikely(!sta_node))
+ return -ENOENT;
- ether_addr_copy(mac, sta_node->mac_addr);
+ ether_addr_copy(mac, sta_node->mac_addr);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
- ret = qtnf_cmd_get_sta_info(vif, sta_node->mac_addr, sinfo);
+ ret = qtnf_cmd_get_sta_info(vif, mac, sinfo);
- if (unlikely(ret == -ENOENT)) {
- qtnf_sta_list_del(vif, mac);
- cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL);
- sinfo->filled = 0;
+ if (vif->wdev.iftype == NL80211_IFTYPE_AP) {
+ if (ret == -ENOENT) {
+ cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL);
+ sinfo->filled = 0;
+ }
}
sinfo->generation = vif->generation;
@@ -521,9 +535,16 @@ static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev,
int ret;
ret = qtnf_cmd_send_del_key(vif, key_index, pairwise, mac_addr);
- if (ret)
- pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n",
- vif->mac->macid, vif->vifid, key_index, pairwise);
+ if (ret) {
+ if (ret == -ENOENT) {
+ pr_debug("VIF%u.%u: key index %d out of bounds\n",
+ vif->mac->macid, vif->vifid, key_index);
+ } else {
+ pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n",
+ vif->mac->macid, vif->vifid,
+ key_index, pairwise);
+ }
+ }
return ret;
}
@@ -590,6 +611,7 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
if (ret)
pr_err("VIF%u.%u: failed to delete STA %pM\n",
vif->mac->macid, vif->vifid, params->mac);
+
return ret;
}
@@ -597,21 +619,25 @@ static int
qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ int ret;
cancel_delayed_work_sync(&mac->scan_timeout);
mac->scan_req = request;
- if (qtnf_cmd_send_scan(mac)) {
+ ret = qtnf_cmd_send_scan(mac);
+ if (ret) {
pr_err("MAC%u: failed to start scan\n", mac->macid);
mac->scan_req = NULL;
- return -EFAULT;
+ goto out;
}
+ pr_debug("MAC%u: scan started\n", mac->macid);
queue_delayed_work(mac->bus->workqueue, &mac->scan_timeout,
QTNF_SCAN_TIMEOUT_SEC * HZ);
- return 0;
+out:
+ return ret;
}
static int
@@ -624,9 +650,6 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev,
if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
- if (vif->sta_state != QTNF_STA_DISCONNECTED)
- return -EBUSY;
-
if (sme->bssid)
ether_addr_copy(vif->bssid, sme->bssid);
else
@@ -634,13 +657,13 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev,
ret = qtnf_cmd_send_connect(vif, sme);
if (ret) {
- pr_err("VIF%u.%u: failed to connect\n", vif->mac->macid,
- vif->vifid);
- return ret;
+ pr_err("VIF%u.%u: failed to connect\n",
+ vif->mac->macid, vif->vifid);
+ goto out;
}
- vif->sta_state = QTNF_STA_CONNECTING;
- return 0;
+out:
+ return ret;
}
static int
@@ -662,22 +685,18 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
goto out;
}
- qtnf_scan_done(mac, true);
-
- if (vif->sta_state == QTNF_STA_DISCONNECTED)
- goto out;
-
ret = qtnf_cmd_send_disconnect(vif, reason_code);
- if (ret) {
- pr_err("VIF%u.%u: failed to disconnect\n", mac->macid,
- vif->vifid);
- goto out;
+ if (ret)
+ pr_err("VIF%u.%u: failed to disconnect\n",
+ mac->macid, vif->vifid);
+
+ if (vif->wdev.current_bss) {
+ netif_carrier_off(vif->netdev);
+ cfg80211_disconnected(vif->netdev, reason_code,
+ NULL, 0, true, GFP_KERNEL);
}
out:
- if (vif->sta_state == QTNF_STA_CONNECTING)
- vif->sta_state = QTNF_STA_DISCONNECTED;
-
return ret;
}
@@ -691,11 +710,8 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
const struct cfg80211_chan_def *chandef = &wdev->chandef;
struct ieee80211_channel *chan;
struct qtnf_chan_stats stats;
- struct qtnf_vif *vif;
int ret;
- vif = qtnf_netdev_get_priv(dev);
-
sband = wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
@@ -750,7 +766,6 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
default:
pr_debug("failed to get chan(%d) stats from card\n",
chan->hw_value);
- ret = -EINVAL;
break;
}
@@ -773,6 +788,7 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
ret = qtnf_cmd_get_channel(vif, chandef);
if (ret) {
pr_err("%s: failed to get channel: %d\n", ndev->name, ret);
+ ret = -ENODATA;
goto out;
}
@@ -782,6 +798,7 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
chandef->center_freq1, chandef->center_freq2,
chandef->width);
ret = -ENODATA;
+ goto out;
}
out:
@@ -851,10 +868,8 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY :
QLINK_PM_OFF, timeout);
- if (ret) {
+ if (ret)
pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret);
- return ret;
- }
return ret;
}
@@ -974,9 +989,16 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
ret = qtnf_cmd_reg_notify(bus, req);
if (ret) {
- if (ret != -EOPNOTSUPP && ret != -EALREADY)
+ if (ret == -EOPNOTSUPP) {
+ pr_warn("reg update not supported\n");
+ } else if (ret == -EALREADY) {
+ pr_info("regulatory domain is already set to %c%c",
+ req->alpha2[0], req->alpha2[1]);
+ } else {
pr_err("failed to update reg domain to %c%c\n",
req->alpha2[0], req->alpha2[1]);
+ }
+
return;
}
@@ -1091,6 +1113,10 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
+ if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_DWELL)
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+
wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2;
@@ -1109,6 +1135,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ if (!(hw_info->hw_capab & QLINK_HW_CAPAB_OBSS_SCAN))
+ wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN;
+
#ifdef CONFIG_PM
if (macinfo->wowlan)
wiphy->wowlan = macinfo->wowlan;
@@ -1123,6 +1152,15 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
}
+ if (mac->macinfo.extended_capabilities_len) {
+ wiphy->extended_capabilities =
+ mac->macinfo.extended_capabilities;
+ wiphy->extended_capabilities_mask =
+ mac->macinfo.extended_capabilities_mask;
+ wiphy->extended_capabilities_len =
+ mac->macinfo.extended_capabilities_len;
+ }
+
strlcpy(wiphy->fw_version, hw_info->fw_version,
sizeof(wiphy->fw_version));
wiphy->hw_version = hw_info->hw_version;
@@ -1146,7 +1184,8 @@ void qtnf_netdev_updown(struct net_device *ndev, bool up)
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
if (qtnf_cmd_send_updown_intf(vif, up))
- pr_err("failed to send up/down command to FW\n");
+ pr_err("failed to send %s command to VIF%u.%u\n",
+ up ? "UP" : "DOWN", vif->mac->macid, vif->vifid);
}
void qtnf_virtual_intf_cleanup(struct net_device *ndev)
@@ -1154,57 +1193,20 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev)
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
struct qtnf_wmac *mac = wiphy_priv(vif->wdev.wiphy);
- if (vif->wdev.iftype == NL80211_IFTYPE_STATION) {
- switch (vif->sta_state) {
- case QTNF_STA_DISCONNECTED:
- break;
- case QTNF_STA_CONNECTING:
- cfg80211_connect_result(vif->netdev,
- vif->bssid, NULL, 0,
- NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- qtnf_disconnect(vif->wdev.wiphy, ndev,
- WLAN_REASON_DEAUTH_LEAVING);
- break;
- case QTNF_STA_CONNECTED:
- cfg80211_disconnected(vif->netdev,
- WLAN_REASON_DEAUTH_LEAVING,
- NULL, 0, 1, GFP_KERNEL);
- qtnf_disconnect(vif->wdev.wiphy, ndev,
- WLAN_REASON_DEAUTH_LEAVING);
- break;
- }
-
- vif->sta_state = QTNF_STA_DISCONNECTED;
- }
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION)
+ qtnf_disconnect(vif->wdev.wiphy, ndev,
+ WLAN_REASON_DEAUTH_LEAVING);
qtnf_scan_done(mac, true);
}
void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif)
{
- if (vif->wdev.iftype == NL80211_IFTYPE_STATION) {
- switch (vif->sta_state) {
- case QTNF_STA_CONNECTING:
- cfg80211_connect_result(vif->netdev,
- vif->bssid, NULL, 0,
- NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- break;
- case QTNF_STA_CONNECTED:
- cfg80211_disconnected(vif->netdev,
- WLAN_REASON_DEAUTH_LEAVING,
- NULL, 0, 1, GFP_KERNEL);
- break;
- case QTNF_STA_DISCONNECTED:
- break;
- }
- }
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION)
+ cfg80211_disconnected(vif->netdev, WLAN_REASON_DEAUTH_LEAVING,
+ NULL, 0, 1, GFP_KERNEL);
cfg80211_shutdown_all_interfaces(vif->wdev.wiphy);
- vif->sta_state = QTNF_STA_DISCONNECTED;
}
void qtnf_band_init_rates(struct ieee80211_supported_band *band)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index ae9e77300533..bfdc1ad30c13 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -80,7 +80,6 @@ static int qtnf_cmd_resp_result_decode(enum qlink_cmd_result qcode)
static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
struct sk_buff *cmd_skb,
struct sk_buff **response_skb,
- u16 *result_code,
size_t const_resp_size,
size_t *var_resp_size)
{
@@ -88,7 +87,8 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
const struct qlink_resp *resp;
struct sk_buff *resp_skb = NULL;
u16 cmd_id;
- u8 mac_id, vif_id;
+ u8 mac_id;
+ u8 vif_id;
int ret;
cmd = (struct qlink_cmd *)cmd_skb->data;
@@ -97,8 +97,11 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
vif_id = cmd->vifid;
cmd->mhdr.len = cpu_to_le16(cmd_skb->len);
- if (unlikely(bus->fw_state != QTNF_FW_STATE_ACTIVE &&
- le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT)) {
+ pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id,
+ le16_to_cpu(cmd->cmd_id));
+
+ if (bus->fw_state != QTNF_FW_STATE_ACTIVE &&
+ le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT) {
pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n",
mac_id, vif_id, le16_to_cpu(cmd->cmd_id),
bus->fw_state);
@@ -106,24 +109,16 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
return -ENODEV;
}
- pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id,
- le16_to_cpu(cmd->cmd_id));
-
ret = qtnf_trans_send_cmd_with_resp(bus, cmd_skb, &resp_skb);
-
- if (unlikely(ret))
+ if (ret)
goto out;
resp = (const struct qlink_resp *)resp_skb->data;
ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id,
const_resp_size);
-
- if (unlikely(ret))
+ if (ret)
goto out;
- if (likely(result_code))
- *result_code = le16_to_cpu(resp->result);
-
/* Return length of variable part of response */
if (response_skb && var_resp_size)
*var_resp_size = le16_to_cpu(resp->mhdr.len) - const_resp_size;
@@ -134,14 +129,18 @@ out:
else
consume_skb(resp_skb);
+ if (!ret && resp)
+ return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result));
+
+ pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n",
+ mac_id, vif_id, le16_to_cpu(cmd->cmd_id), ret);
+
return ret;
}
-static inline int qtnf_cmd_send(struct qtnf_bus *bus,
- struct sk_buff *cmd_skb,
- u16 *result_code)
+static inline int qtnf_cmd_send(struct qtnf_bus *bus, struct sk_buff *cmd_skb)
{
- return qtnf_cmd_send_with_reply(bus, cmd_skb, NULL, result_code,
+ return qtnf_cmd_send_with_reply(bus, cmd_skb, NULL,
sizeof(struct qlink_resp), NULL);
}
@@ -228,7 +227,6 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
struct sk_buff *cmd_skb;
struct qlink_cmd_start_ap *cmd;
struct qlink_auth_encr *aen;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
int i;
@@ -329,30 +327,21 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
}
qtnf_bus_lock(vif->mac->bus);
-
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
netif_carrier_on(vif->netdev);
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif)
{
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -362,23 +351,13 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
-
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
-
- netif_carrier_off(vif->netdev);
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -386,7 +365,6 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg)
{
struct sk_buff *cmd_skb;
struct qlink_cmd_mgmt_frame_register *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -401,20 +379,13 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg)
cmd->frame_type = cpu_to_le16(frame_type);
cmd->do_register = reg;
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -423,7 +394,6 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_mgmt_frame_tx *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
if (sizeof(*cmd) + len > QTNF_MAX_CMD_BUF_SIZE) {
@@ -448,20 +418,13 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
if (len && buf)
qtnf_cmd_skb_put_buffer(cmd_skb, buf, len);
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
- goto out;
- }
-
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -469,7 +432,6 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
const u8 *buf, size_t len)
{
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
if (len > QTNF_MAX_CMD_BUF_SIZE) {
@@ -487,21 +449,13 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
qtnf_cmd_tlv_ie_set_add(cmd_skb, frame_type, buf, len);
qtnf_bus_lock(vif->mac->bus);
-
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u frame %u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, frame_type, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -544,6 +498,9 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
rate_dst->flags |= RATE_INFO_FLAGS_MCS;
else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_VHT_MCS)
rate_dst->flags |= RATE_INFO_FLAGS_VHT_MCS;
+
+ if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_SHORT_GI)
+ rate_dst->flags |= RATE_INFO_FLAGS_SHORT_GI;
}
static void
@@ -730,7 +687,6 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
struct qlink_cmd_get_sta_info *cmd;
const struct qlink_resp_get_sta_info *resp;
size_t var_resp_len;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -745,31 +701,13 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
ether_addr_copy(cmd->sta_addr, sta_mac);
ret = qtnf_cmd_send_with_reply(vif->mac->bus, cmd_skb, &resp_skb,
- &res_code, sizeof(*resp),
- &var_resp_len);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- switch (res_code) {
- case QLINK_CMD_RESULT_ENOTFOUND:
- pr_warn("VIF%u.%u: %pM STA not found\n",
- vif->mac->macid, vif->vifid, sta_mac);
- ret = -ENOENT;
- break;
- default:
- pr_err("VIF%u.%u: can't get info for %pM: %u\n",
- vif->mac->macid, vif->vifid, sta_mac, res_code);
- ret = -EFAULT;
- break;
- }
+ sizeof(*resp), &var_resp_len);
+ if (ret)
goto out;
- }
resp = (const struct qlink_resp_get_sta_info *)resp_skb->data;
- if (unlikely(!ether_addr_equal(sta_mac, resp->sta_addr))) {
+ if (!ether_addr_equal(sta_mac, resp->sta_addr)) {
pr_err("VIF%u.%u: wrong mac in reply: %pM != %pM\n",
vif->mac->macid, vif->vifid, resp->sta_addr, sta_mac);
ret = -EINVAL;
@@ -795,7 +733,6 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
struct sk_buff *cmd_skb, *resp_skb = NULL;
struct qlink_cmd_manage_intf *cmd;
const struct qlink_resp_manage_intf *resp;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -828,17 +765,9 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif,
eth_zero_addr(cmd->intf_info.mac_addr);
ret = qtnf_cmd_send_with_reply(vif->mac->bus, cmd_skb, &resp_skb,
- &res_code, sizeof(*resp), NULL);
-
- if (unlikely(ret))
- goto out;
-
- ret = qtnf_cmd_resp_result_decode(res_code);
- if (ret) {
- pr_err("VIF%u.%u: CMD %d failed: %u\n", vif->mac->macid,
- vif->vifid, cmd_type, res_code);
+ sizeof(*resp), NULL);
+ if (ret)
goto out;
- }
resp = (const struct qlink_resp_manage_intf *)resp_skb->data;
ether_addr_copy(vif->mac_addr, resp->intf_info.mac_addr);
@@ -868,7 +797,6 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
{
struct sk_buff *cmd_skb;
struct qlink_cmd_manage_intf *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -897,17 +825,9 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
eth_zero_addr(cmd->intf_info.mac_addr);
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
@@ -1353,8 +1273,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
ext_capa_mask = NULL;
}
- kfree(mac->macinfo.extended_capabilities);
- kfree(mac->macinfo.extended_capabilities_mask);
+ qtnf_mac_ext_caps_free(mac);
mac->macinfo.extended_capabilities = ext_capa;
mac->macinfo.extended_capabilities_mask = ext_capa_mask;
mac->macinfo.extended_capabilities_len = ext_capa_len;
@@ -1732,7 +1651,6 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
struct sk_buff *cmd_skb, *resp_skb = NULL;
const struct qlink_resp_get_mac_info *resp;
size_t var_data_len;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
@@ -1742,18 +1660,11 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
return -ENOMEM;
qtnf_bus_lock(mac->bus);
-
- ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+ ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
sizeof(*resp), &var_data_len);
- if (unlikely(ret))
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
- ret = -EFAULT;
- goto out;
- }
-
resp = (const struct qlink_resp_get_mac_info *)resp_skb->data;
qtnf_cmd_resp_proc_mac_info(mac, resp);
ret = qtnf_parse_variable_mac_info(mac, resp->var_info, var_data_len);
@@ -1769,7 +1680,6 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
const struct qlink_resp_get_hw_info *resp;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
size_t info_len;
@@ -1780,18 +1690,10 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
return -ENOMEM;
qtnf_bus_lock(bus);
-
- ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code,
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
sizeof(*resp), &info_len);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("cmd exec failed: 0x%.4X\n", res_code);
- ret = -EFAULT;
+ if (ret)
goto out;
- }
resp = (const struct qlink_resp_get_hw_info *)resp_skb->data;
ret = qtnf_cmd_resp_proc_hw_info(bus, resp, info_len);
@@ -1810,7 +1712,6 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
size_t info_len;
struct qlink_cmd_band_info_get *cmd;
struct qlink_resp_band_info_get *resp;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
u8 qband;
@@ -1838,18 +1739,10 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
cmd->band = qband;
qtnf_bus_lock(mac->bus);
-
- ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+ ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
sizeof(*resp), &info_len);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
- ret = -EFAULT;
+ if (ret)
goto out;
- }
resp = (struct qlink_resp_band_info_get *)resp_skb->data;
if (resp->band != qband) {
@@ -1873,7 +1766,6 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
struct sk_buff *cmd_skb, *resp_skb = NULL;
size_t response_size;
struct qlink_resp_phy_params *resp;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
@@ -1883,19 +1775,11 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
return -ENOMEM;
qtnf_bus_lock(mac->bus);
-
- ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+ ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
sizeof(*resp), &response_size);
-
- if (unlikely(ret))
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
- ret = -EFAULT;
- goto out;
- }
-
resp = (struct qlink_resp_phy_params *)resp_skb->data;
ret = qtnf_cmd_resp_proc_phy_params(mac, resp->info, response_size);
@@ -1910,7 +1794,6 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
@@ -1931,26 +1814,19 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS,
wiphy->coverage_class);
- ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(mac->bus);
+
return ret;
}
int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
{
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
@@ -1960,20 +1836,13 @@ int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
return -ENOMEM;
qtnf_bus_lock(bus);
-
- ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("cmd exec failed: 0x%.4X\n", res_code);
- ret = -EFAULT;
- goto out;
- }
-
out:
qtnf_bus_unlock(bus);
+
return ret;
}
@@ -1988,9 +1857,7 @@ void qtnf_cmd_send_deinit_fw(struct qtnf_bus *bus)
return;
qtnf_bus_lock(bus);
-
- qtnf_cmd_send(bus, cmd_skb, NULL);
-
+ qtnf_cmd_send(bus, cmd_skb);
qtnf_bus_unlock(bus);
}
@@ -1999,7 +1866,6 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_add_key *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2031,19 +1897,13 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
params->seq,
params->seq_len);
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
- if (unlikely(ret))
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n",
- vif->mac->macid, vif->vifid, res_code);
- ret = -EFAULT;
- goto out;
- }
-
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2052,7 +1912,6 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_del_key *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2072,19 +1931,14 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise,
cmd->key_index = key_index;
cmd->pairwise = pairwise;
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
- if (unlikely(ret))
- goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n",
- vif->mac->macid, vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2093,7 +1947,6 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_set_def_key *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2108,19 +1961,14 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index,
cmd->key_index = key_index;
cmd->unicast = unicast;
cmd->multicast = multicast;
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
- if (unlikely(ret))
- goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2128,7 +1976,6 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index)
{
struct sk_buff *cmd_skb;
struct qlink_cmd_set_def_mgmt_key *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2141,19 +1988,14 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index)
cmd = (struct qlink_cmd_set_def_mgmt_key *)cmd_skb->data;
cmd->key_index = key_index;
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
- if (unlikely(ret))
- goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2183,7 +2025,6 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_change_sta *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2214,19 +2055,13 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
goto out;
}
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
- if (unlikely(ret))
- goto out;
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2235,7 +2070,6 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_del_sta *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2256,19 +2090,13 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif,
cmd->subtype = params->subtype;
cmd->reason_code = cpu_to_le16(params->reason_code);
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
- if (unlikely(ret))
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
- goto out;
- }
-
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2312,7 +2140,6 @@ static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
struct ieee80211_channel *sc;
struct cfg80211_scan_request *scan_req = mac->scan_req;
int n_channels;
@@ -2370,20 +2197,28 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
scan_req->mac_addr_mask);
}
- ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
+ if (scan_req->flags & NL80211_SCAN_FLAG_FLUSH) {
+ pr_debug("MAC%u: flush cache before scan\n", mac->macid);
- if (unlikely(ret))
- goto out;
+ qtnf_cmd_skb_put_tlv_tag(cmd_skb, QTN_TLV_ID_SCAN_FLUSH);
+ }
- pr_debug("MAC%u: scan started\n", mac->macid);
+ if (scan_req->duration) {
+ pr_debug("MAC%u: %s scan duration %u\n", mac->macid,
+ scan_req->duration_mandatory ? "mandatory" : "max",
+ scan_req->duration);
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code);
- ret = -EFAULT;
- goto out;
+ qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_SCAN_DWELL,
+ scan_req->duration);
}
+
+ ret = qtnf_cmd_send(mac->bus, cmd_skb);
+ if (ret)
+ goto out;
+
out:
qtnf_bus_unlock(mac->bus);
+
return ret;
}
@@ -2393,7 +2228,6 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
struct sk_buff *cmd_skb;
struct qlink_cmd_connect *cmd;
struct qlink_auth_encr *aen;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
int i;
u32 connect_flags = 0;
@@ -2474,20 +2308,13 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
qtnf_cmd_channel_tlv_add(cmd_skb, sme->channel);
qtnf_bus_lock(vif->mac->bus);
-
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
- goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2495,7 +2322,6 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code)
{
struct sk_buff *cmd_skb;
struct qlink_cmd_disconnect *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2509,19 +2335,13 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code)
cmd = (struct qlink_cmd_disconnect *)cmd_skb->data;
cmd->reason = cpu_to_le16(reason_code);
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
- goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2529,7 +2349,6 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up)
{
struct sk_buff *cmd_skb;
struct qlink_cmd_updown *cmd;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2542,20 +2361,13 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up)
cmd->if_up = !!up;
qtnf_bus_lock(vif->mac->bus);
-
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
- goto out;
- }
out:
qtnf_bus_unlock(vif->mac->bus);
+
return ret;
}
@@ -2563,7 +2375,6 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req)
{
struct sk_buff *cmd_skb;
int ret;
- u16 res_code;
struct qlink_cmd_reg_notify *cmd;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
@@ -2604,29 +2415,10 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req)
}
qtnf_bus_lock(bus);
-
- ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+ ret = qtnf_cmd_send(bus, cmd_skb);
if (ret)
goto out;
- switch (res_code) {
- case QLINK_CMD_RESULT_ENOTSUPP:
- pr_warn("reg update not supported\n");
- ret = -EOPNOTSUPP;
- break;
- case QLINK_CMD_RESULT_EALREADY:
- pr_info("regulatory domain is already set to %c%c",
- req->alpha2[0], req->alpha2[1]);
- ret = -EALREADY;
- break;
- case QLINK_CMD_RESULT_OK:
- ret = 0;
- break;
- default:
- ret = -EFAULT;
- break;
- }
-
out:
qtnf_bus_unlock(bus);
@@ -2640,7 +2432,6 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
struct qlink_cmd_get_chan_stats *cmd;
struct qlink_resp_get_chan_stats *resp;
size_t var_data_len;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
@@ -2654,25 +2445,10 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
cmd = (struct qlink_cmd_get_chan_stats *)cmd_skb->data;
cmd->channel = cpu_to_le16(channel);
- ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
+ ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
sizeof(*resp), &var_data_len);
- if (unlikely(ret)) {
- qtnf_bus_unlock(mac->bus);
- return ret;
- }
-
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- switch (res_code) {
- case QLINK_CMD_RESULT_ENOTFOUND:
- ret = -ENOENT;
- break;
- default:
- pr_err("cmd exec failed: 0x%.4X\n", res_code);
- ret = -EFAULT;
- break;
- }
+ if (ret)
goto out;
- }
resp = (struct qlink_resp_get_chan_stats *)resp_skb->data;
ret = qtnf_cmd_resp_proc_chan_stat_info(stats, resp->info,
@@ -2681,6 +2457,7 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
out:
qtnf_bus_unlock(mac->bus);
consume_skb(resp_skb);
+
return ret;
}
@@ -2690,7 +2467,6 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
struct qtnf_wmac *mac = vif->mac;
struct qlink_cmd_chan_switch *cmd;
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid,
@@ -2707,32 +2483,13 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
cmd->block_tx = params->block_tx;
cmd->beacon_count = params->count;
- ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(mac->bus, cmd_skb);
+ if (ret)
goto out;
- switch (res_code) {
- case QLINK_CMD_RESULT_OK:
- ret = 0;
- break;
- case QLINK_CMD_RESULT_ENOTFOUND:
- ret = -ENOENT;
- break;
- case QLINK_CMD_RESULT_ENOTSUPP:
- ret = -EOPNOTSUPP;
- break;
- case QLINK_CMD_RESULT_EALREADY:
- ret = -EALREADY;
- break;
- case QLINK_CMD_RESULT_INVALID:
- default:
- ret = -EFAULT;
- break;
- }
-
out:
qtnf_bus_unlock(mac->bus);
+
return ret;
}
@@ -2742,7 +2499,6 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef)
const struct qlink_resp_channel_get *resp;
struct sk_buff *cmd_skb;
struct sk_buff *resp_skb = NULL;
- u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2752,25 +2508,18 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef)
return -ENOMEM;
qtnf_bus_lock(bus);
-
- ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code,
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
sizeof(*resp), NULL);
-
- qtnf_bus_unlock(bus);
-
- if (unlikely(ret))
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- ret = -ENODATA;
- goto out;
- }
-
resp = (const struct qlink_resp_channel_get *)resp_skb->data;
qlink_chandef_q2cfg(priv_to_wiphy(vif->mac), &resp->chan, chdef);
out:
+ qtnf_bus_unlock(bus);
consume_skb(resp_skb);
+
return ret;
}
@@ -2782,7 +2531,6 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
struct sk_buff *cmd_skb;
struct qlink_cmd_start_cac *cmd;
int ret;
- u16 res_code;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_START_CAC,
@@ -2795,19 +2543,12 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
qlink_chandef_cfg2q(chdef, &cmd->chan);
qtnf_bus_lock(bus);
- ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
- qtnf_bus_unlock(bus);
-
+ ret = qtnf_cmd_send(bus, cmd_skb);
if (ret)
- return ret;
+ goto out;
- switch (res_code) {
- case QLINK_CMD_RESULT_OK:
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
+out:
+ qtnf_bus_unlock(bus);
return ret;
}
@@ -2819,7 +2560,6 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
struct sk_buff *cmd_skb;
struct qlink_tlv_hdr *tlv;
size_t acl_size = qtnf_cmd_acl_data_size(params);
- u16 res_code;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -2834,22 +2574,12 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
qlink_acl_data_cfg2q(params, (struct qlink_acl_data *)tlv->val);
qtnf_bus_lock(bus);
- ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
- qtnf_bus_unlock(bus);
-
- if (unlikely(ret))
- return ret;
+ ret = qtnf_cmd_send(bus, cmd_skb);
+ if (ret)
+ goto out;
- switch (res_code) {
- case QLINK_CMD_RESULT_OK:
- break;
- case QLINK_CMD_RESULT_INVALID:
- ret = -EINVAL;
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
+out:
+ qtnf_bus_unlock(bus);
return ret;
}
@@ -2858,7 +2588,6 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
{
struct qtnf_bus *bus = vif->mac->bus;
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
struct qlink_cmd_pm_set *cmd;
int ret = 0;
@@ -2873,18 +2602,13 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
qtnf_bus_lock(bus);
- ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("cmd exec failed: 0x%.4X\n", res_code);
- ret = -EFAULT;
- }
-
out:
qtnf_bus_unlock(bus);
+
return ret;
}
@@ -2893,7 +2617,6 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
{
struct qtnf_bus *bus = vif->mac->bus;
struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
struct qlink_cmd_wowlan_set *cmd;
u32 triggers = 0;
int count = 0;
@@ -2929,16 +2652,10 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
cmd->triggers = cpu_to_le32(triggers);
- ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
-
- if (unlikely(ret))
+ ret = qtnf_cmd_send(bus, cmd_skb);
+ if (ret)
goto out;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("cmd exec failed: 0x%.4X\n", res_code);
- ret = -EFAULT;
- }
-
out:
qtnf_bus_unlock(bus);
return ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 19abbc4e23e0..5d18a4a917c9 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -304,6 +304,19 @@ void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac)
}
}
+void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac)
+{
+ if (mac->macinfo.extended_capabilities_len) {
+ kfree(mac->macinfo.extended_capabilities);
+ mac->macinfo.extended_capabilities = NULL;
+
+ kfree(mac->macinfo.extended_capabilities_mask);
+ mac->macinfo.extended_capabilities_mask = NULL;
+
+ mac->macinfo.extended_capabilities_len = 0;
+ }
+}
+
static void qtnf_vif_reset_handler(struct work_struct *work)
{
struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work);
@@ -370,6 +383,7 @@ static void qtnf_mac_scan_timeout(struct work_struct *work)
static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
unsigned int macid)
{
+ struct qtnf_vif *vif;
struct wiphy *wiphy;
struct qtnf_wmac *mac;
unsigned int i;
@@ -382,18 +396,20 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
mac->macid = macid;
mac->bus = bus;
+ mutex_init(&mac->mac_lock);
+ INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout);
for (i = 0; i < QTNF_MAX_INTF; i++) {
- memset(&mac->iflist[i], 0, sizeof(struct qtnf_vif));
- mac->iflist[i].wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
- mac->iflist[i].mac = mac;
- mac->iflist[i].vifid = i;
- qtnf_sta_list_init(&mac->iflist[i].sta_list);
- mutex_init(&mac->mac_lock);
- INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout);
- mac->iflist[i].stats64 =
- netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!mac->iflist[i].stats64)
+ vif = &mac->iflist[i];
+
+ memset(vif, 0, sizeof(*vif));
+ vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ vif->mac = mac;
+ vif->vifid = i;
+ qtnf_sta_list_init(&vif->sta_list);
+
+ vif->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!vif->stats64)
pr_warn("VIF%u.%u: per cpu stats allocation failed\n",
macid, i);
}
@@ -493,8 +509,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
}
qtnf_mac_iface_comb_free(mac);
- kfree(mac->macinfo.extended_capabilities);
- kfree(mac->macinfo.extended_capabilities_mask);
+ qtnf_mac_ext_caps_free(mac);
kfree(mac->macinfo.wowlan);
wiphy_free(wiphy);
bus->mac[macid] = NULL;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index a1e338a1f055..293055049caa 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -64,12 +64,6 @@ struct qtnf_sta_list {
atomic_t size;
};
-enum qtnf_sta_state {
- QTNF_STA_DISCONNECTED,
- QTNF_STA_CONNECTING,
- QTNF_STA_CONNECTED
-};
-
struct qtnf_vif {
struct wireless_dev wdev;
u8 bssid[ETH_ALEN];
@@ -77,7 +71,6 @@ struct qtnf_vif {
u8 vifid;
u8 bss_priority;
u8 bss_status;
- enum qtnf_sta_state sta_state;
u16 mgmt_frames_bitmask;
struct net_device *netdev;
struct qtnf_wmac *mac;
@@ -151,6 +144,7 @@ struct qtnf_hw_info {
struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
+void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac);
struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
const char *name, unsigned char name_assign_type);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 68da81bec4e9..8b542b431b75 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -171,24 +171,14 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
return -EPROTO;
}
- if (vif->sta_state != QTNF_STA_CONNECTING) {
- pr_err("VIF%u.%u: BSS_JOIN event when STA is not connecting\n",
- vif->mac->macid, vif->vifid);
- return -EPROTO;
- }
-
pr_debug("VIF%u.%u: BSSID:%pM\n", vif->mac->macid, vif->vifid,
join_info->bssid);
cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, NULL,
0, le16_to_cpu(join_info->status), GFP_KERNEL);
- if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS) {
- vif->sta_state = QTNF_STA_CONNECTED;
+ if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS)
netif_carrier_on(vif->netdev);
- } else {
- vif->sta_state = QTNF_STA_DISCONNECTED;
- }
return 0;
}
@@ -211,16 +201,10 @@ qtnf_event_handle_bss_leave(struct qtnf_vif *vif,
return -EPROTO;
}
- if (vif->sta_state != QTNF_STA_CONNECTED)
- pr_warn("VIF%u.%u: BSS_LEAVE event when STA is not connected\n",
- vif->mac->macid, vif->vifid);
-
pr_debug("VIF%u.%u: disconnected\n", vif->mac->macid, vif->vifid);
cfg80211_disconnected(vif->netdev, le16_to_cpu(leave_info->reason),
NULL, 0, 0, GFP_KERNEL);
-
- vif->sta_state = QTNF_STA_DISCONNECTED;
netif_carrier_off(vif->netdev);
return 0;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
new file mode 100644
index 000000000000..16795dbe475b
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018 Quantenna Communications, Inc. All rights reserved. */
+
+#include <linux/printk.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/seq_file.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
+#include "pcie_priv.h"
+#include "bus.h"
+#include "shm_ipc.h"
+#include "core.h"
+#include "debug.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "qtnf_pcie: %s: " fmt, __func__
+
+#define QTN_SYSCTL_BAR 0
+#define QTN_SHMEM_BAR 2
+#define QTN_DMA_BAR 3
+
+int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+ int ret;
+
+ ret = qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len);
+
+ if (ret == -ETIMEDOUT) {
+ pr_err("EP firmware is dead\n");
+ bus->fw_state = QTNF_FW_STATE_EP_DEAD;
+ }
+
+ return ret;
+}
+
+int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv)
+{
+ struct sk_buff **vaddr;
+ int len;
+
+ len = priv->tx_bd_num * sizeof(*priv->tx_skb) +
+ priv->rx_bd_num * sizeof(*priv->rx_skb);
+ vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL);
+
+ if (!vaddr)
+ return -ENOMEM;
+
+ priv->tx_skb = vaddr;
+
+ vaddr += priv->tx_bd_num;
+ priv->rx_skb = vaddr;
+
+ return 0;
+}
+
+void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+ struct pci_dev *pdev = priv->pdev;
+
+ get_device(&pdev->dev);
+ schedule_work(&bus->fw_work);
+}
+
+static int qtnf_dbg_mps_show(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+
+ seq_printf(s, "%d\n", priv->mps);
+
+ return 0;
+}
+
+static int qtnf_dbg_msi_show(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+
+ seq_printf(s, "%u\n", priv->msi_enabled);
+
+ return 0;
+}
+
+static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+
+ seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n",
+ priv->shm_ipc_ep_in.tx_packet_count);
+ seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n",
+ priv->shm_ipc_ep_in.rx_packet_count);
+ seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n",
+ priv->shm_ipc_ep_out.tx_timeout_count);
+ seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n",
+ priv->shm_ipc_ep_out.rx_packet_count);
+
+ return 0;
+}
+
+void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success,
+ const char *drv_name)
+{
+ struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
+ struct pci_dev *pdev = priv->pdev;
+ int ret;
+
+ if (boot_success) {
+ bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE;
+
+ ret = qtnf_core_attach(bus);
+ if (ret) {
+ pr_err("failed to attach core\n");
+ boot_success = false;
+ }
+ }
+
+ if (boot_success) {
+ qtnf_debugfs_init(bus, drv_name);
+ qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
+ qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
+ qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
+ } else {
+ bus->fw_state = QTNF_FW_STATE_DETACHED;
+ }
+
+ put_device(&pdev->dev);
+}
+
+static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+ struct pci_dev *parent;
+ int mps_p, mps_o, mps_m, mps;
+ int ret;
+
+ /* current mps */
+ mps_o = pcie_get_mps(pdev);
+
+ /* maximum supported mps */
+ mps_m = 128 << pdev->pcie_mpss;
+
+ /* suggested new mps value */
+ mps = mps_m;
+
+ if (pdev->bus && pdev->bus->self) {
+ /* parent (bus) mps */
+ parent = pdev->bus->self;
+
+ if (pci_is_pcie(parent)) {
+ mps_p = pcie_get_mps(parent);
+ mps = min(mps_m, mps_p);
+ }
+ }
+
+ ret = pcie_set_mps(pdev, mps);
+ if (ret) {
+ pr_err("failed to set mps to %d, keep using current %d\n",
+ mps, mps_o);
+ priv->mps = mps_o;
+ return;
+ }
+
+ pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m);
+ priv->mps = mps;
+}
+
+static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi)
+{
+ struct pci_dev *pdev = priv->pdev;
+
+ /* fall back to legacy INTx interrupts by default */
+ priv->msi_enabled = 0;
+
+ /* check if MSI capability is available */
+ if (use_msi) {
+ if (!pci_enable_msi(pdev)) {
+ pr_debug("enabled MSI interrupt\n");
+ priv->msi_enabled = 1;
+ } else {
+ pr_warn("failed to enable MSI interrupts");
+ }
+ }
+
+ if (!priv->msi_enabled) {
+ pr_warn("legacy PCIE interrupts enabled\n");
+ pci_intx(pdev, 1);
+ }
+}
+
+static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
+{
+ void __iomem *vaddr;
+ dma_addr_t busaddr;
+ size_t len;
+ int ret;
+
+ ret = pcim_iomap_regions(priv->pdev, 1 << index, "qtnfmac_pcie");
+ if (ret)
+ return IOMEM_ERR_PTR(ret);
+
+ busaddr = pci_resource_start(priv->pdev, index);
+ len = pci_resource_len(priv->pdev, index);
+ vaddr = pcim_iomap_table(priv->pdev)[index];
+ if (!vaddr)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n",
+ index, vaddr, &busaddr, (int)len);
+
+ return vaddr;
+}
+
+static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv)
+{
+ int ret = -ENOMEM;
+
+ priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR);
+ if (IS_ERR(priv->sysctl_bar)) {
+ pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
+ return ret;
+ }
+
+ priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR);
+ if (IS_ERR(priv->dmareg_bar)) {
+ pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
+ return ret;
+ }
+
+ priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR);
+ if (IS_ERR(priv->epmem_bar)) {
+ pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qtnf_pcie_control_rx_callback(void *arg, const u8 __iomem *buf,
+ size_t len)
+{
+ struct qtnf_pcie_bus_priv *priv = arg;
+ struct qtnf_bus *bus = pci_get_drvdata(priv->pdev);
+ struct sk_buff *skb;
+
+ if (unlikely(len == 0)) {
+ pr_warn("zero length packet received\n");
+ return;
+ }
+
+ skb = __dev_alloc_skb(len, GFP_KERNEL);
+
+ if (unlikely(!skb)) {
+ pr_err("failed to allocate skb\n");
+ return;
+ }
+
+ memcpy_fromio(skb_put(skb, len), buf, len);
+
+ qtnf_trans_handle_rx_ctl_packet(bus, skb);
+}
+
+void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv,
+ struct qtnf_shm_ipc_region __iomem *ipc_tx_reg,
+ struct qtnf_shm_ipc_region __iomem *ipc_rx_reg,
+ const struct qtnf_shm_ipc_int *ipc_int)
+{
+ const struct qtnf_shm_ipc_rx_callback rx_callback = {
+ qtnf_pcie_control_rx_callback, priv };
+
+ qtnf_shm_ipc_init(&priv->shm_ipc_ep_in, QTNF_SHM_IPC_OUTBOUND,
+ ipc_tx_reg, priv->workqueue,
+ ipc_int, &rx_callback);
+ qtnf_shm_ipc_init(&priv->shm_ipc_ep_out, QTNF_SHM_IPC_INBOUND,
+ ipc_rx_reg, priv->workqueue,
+ ipc_int, &rx_callback);
+}
+
+int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size,
+ const struct qtnf_bus_ops *bus_ops, u64 dma_mask,
+ bool use_msi)
+{
+ struct qtnf_pcie_bus_priv *pcie_priv;
+ struct qtnf_bus *bus;
+ int ret;
+
+ bus = devm_kzalloc(&pdev->dev,
+ sizeof(*bus) + priv_size, GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ pcie_priv = get_bus_priv(bus);
+
+ pci_set_drvdata(pdev, bus);
+ bus->bus_ops = bus_ops;
+ bus->dev = &pdev->dev;
+ bus->fw_state = QTNF_FW_STATE_RESET;
+ pcie_priv->pdev = pdev;
+ pcie_priv->tx_stopped = 0;
+
+ mutex_init(&bus->bus_lock);
+ spin_lock_init(&pcie_priv->tx_lock);
+ spin_lock_init(&pcie_priv->tx_reclaim_lock);
+
+ pcie_priv->tx_full_count = 0;
+ pcie_priv->tx_done_count = 0;
+ pcie_priv->pcie_irq_count = 0;
+ pcie_priv->tx_reclaim_done = 0;
+ pcie_priv->tx_reclaim_req = 0;
+
+ pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE");
+ if (!pcie_priv->workqueue) {
+ pr_err("failed to alloc bus workqueue\n");
+ ret = -ENODEV;
+ goto err_init;
+ }
+
+ init_dummy_netdev(&bus->mux_dev);
+
+ if (!pci_is_pcie(pdev)) {
+ pr_err("device %s is not PCI Express\n", pci_name(pdev));
+ ret = -EIO;
+ goto err_base;
+ }
+
+ qtnf_tune_pcie_mps(pcie_priv);
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ pr_err("failed to init PCI device %x\n", pdev->device);
+ goto err_base;
+ } else {
+ pr_debug("successful init of PCI device %x\n", pdev->device);
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
+ if (ret) {
+ pr_err("PCIE DMA coherent mask init failed\n");
+ goto err_base;
+ }
+
+ pci_set_master(pdev);
+ qtnf_pcie_init_irq(pcie_priv, use_msi);
+
+ ret = qtnf_pcie_init_memory(pcie_priv);
+ if (ret < 0) {
+ pr_err("PCIE memory init failed\n");
+ goto err_base;
+ }
+
+ pci_save_state(pdev);
+
+ return 0;
+
+err_base:
+ flush_workqueue(pcie_priv->workqueue);
+ destroy_workqueue(pcie_priv->workqueue);
+err_init:
+ pci_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv)
+{
+ qtnf_shm_ipc_free(&priv->shm_ipc_ep_in);
+ qtnf_shm_ipc_free(&priv->shm_ipc_ep_out);
+}
+
+void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv)
+{
+ cancel_work_sync(&bus->fw_work);
+
+ if (bus->fw_state == QTNF_FW_STATE_ACTIVE ||
+ bus->fw_state == QTNF_FW_STATE_EP_DEAD)
+ qtnf_core_detach(bus);
+
+ netif_napi_del(&bus->mux_napi);
+ flush_workqueue(priv->workqueue);
+ destroy_workqueue(priv->workqueue);
+ tasklet_kill(&priv->reclaim_tq);
+
+ qtnf_pcie_free_shm_ipc(priv);
+ qtnf_debugfs_remove(bus);
+ pci_set_drvdata(priv->pdev, NULL);
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
new file mode 100644
index 000000000000..5c70fb4c0f92
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018 Quantenna Communications, Inc. All rights reserved. */
+
+#ifndef _QTN_FMAC_PCIE_H_
+#define _QTN_FMAC_PCIE_H_
+
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+
+#include "shm_ipc.h"
+#include "bus.h"
+
+#define SKB_BUF_SIZE 2048
+
+#define QTN_FW_DL_TIMEOUT_MS 3000
+#define QTN_FW_QLINK_TIMEOUT_MS 30000
+#define QTN_EP_RESET_WAIT_MS 1000
+
+struct qtnf_pcie_bus_priv {
+ struct pci_dev *pdev;
+
+ spinlock_t tx_reclaim_lock;
+ spinlock_t tx_lock;
+ int mps;
+
+ struct workqueue_struct *workqueue;
+ struct tasklet_struct reclaim_tq;
+
+ void __iomem *sysctl_bar;
+ void __iomem *epmem_bar;
+ void __iomem *dmareg_bar;
+
+ struct qtnf_shm_ipc shm_ipc_ep_in;
+ struct qtnf_shm_ipc shm_ipc_ep_out;
+
+ u16 tx_bd_num;
+ u16 rx_bd_num;
+
+ struct sk_buff **tx_skb;
+ struct sk_buff **rx_skb;
+
+ u32 rx_bd_w_index;
+ u32 rx_bd_r_index;
+
+ u32 tx_bd_w_index;
+ u32 tx_bd_r_index;
+
+ /* diagnostics stats */
+ u32 pcie_irq_count;
+ u32 tx_full_count;
+ u32 tx_done_count;
+ u32 tx_reclaim_done;
+ u32 tx_reclaim_req;
+
+ u8 msi_enabled;
+ u8 tx_stopped;
+};
+
+int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb);
+int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv);
+void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus);
+void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success,
+ const char *drv_name);
+void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv,
+ struct qtnf_shm_ipc_region __iomem *ipc_tx_reg,
+ struct qtnf_shm_ipc_region __iomem *ipc_rx_reg,
+ const struct qtnf_shm_ipc_int *ipc_int);
+int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size,
+ const struct qtnf_bus_ops *bus_ops, u64 dma_mask,
+ bool use_msi);
+void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv);
+
+static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg)
+{
+ writel(val, basereg);
+
+ /* flush posted write */
+ readl(basereg);
+}
+
+#endif /* _QTN_FMAC_PCIE_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
new file mode 100644
index 000000000000..95c7b95c6f8a
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -0,0 +1,1249 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018 Quantenna Communications */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/crc32.h>
+#include <linux/spinlock.h>
+#include <linux/circ_buf.h>
+#include <linux/log2.h>
+
+#include "pcie_priv.h"
+#include "pearl_pcie_regs.h"
+#include "pearl_pcie_ipc.h"
+#include "qtn_hw_ids.h"
+#include "core.h"
+#include "bus.h"
+#include "shm_ipc.h"
+#include "debug.h"
+
+static bool use_msi = true;
+module_param(use_msi, bool, 0644);
+MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
+
+static unsigned int tx_bd_size_param = 32;
+module_param(tx_bd_size_param, uint, 0644);
+MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two");
+
+static unsigned int rx_bd_size_param = 256;
+module_param(rx_bd_size_param, uint, 0644);
+MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two");
+
+static u8 flashboot = 1;
+module_param(flashboot, byte, 0644);
+MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
+
+#define DRV_NAME "qtnfmac_pearl_pcie"
+
+struct qtnf_pearl_bda {
+ __le16 bda_len;
+ __le16 bda_version;
+ __le32 bda_pci_endian;
+ __le32 bda_ep_state;
+ __le32 bda_rc_state;
+ __le32 bda_dma_mask;
+ __le32 bda_msi_addr;
+ __le32 bda_flashsz;
+ u8 bda_boardname[PCIE_BDA_NAMELEN];
+ __le32 bda_rc_msi_enabled;
+ u8 bda_hhbm_list[PCIE_HHBM_MAX_SIZE];
+ __le32 bda_dsbw_start_index;
+ __le32 bda_dsbw_end_index;
+ __le32 bda_dsbw_total_bytes;
+ __le32 bda_rc_tx_bd_base;
+ __le32 bda_rc_tx_bd_num;
+ u8 bda_pcie_mac[QTN_ENET_ADDR_LENGTH];
+ struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); /* host TX */
+ struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); /* host RX */
+} __packed;
+
+struct qtnf_pearl_tx_bd {
+ __le32 addr;
+ __le32 addr_h;
+ __le32 info;
+ __le32 info_h;
+} __packed;
+
+struct qtnf_pearl_rx_bd {
+ __le32 addr;
+ __le32 addr_h;
+ __le32 info;
+ __le32 info_h;
+ __le32 next_ptr;
+ __le32 next_ptr_h;
+} __packed;
+
+struct qtnf_pearl_fw_hdr {
+ u8 boardflg[8];
+ __le32 fwsize;
+ __le32 seqnum;
+ __le32 type;
+ __le32 pktlen;
+ __le32 crc;
+} __packed;
+
+struct qtnf_pcie_pearl_state {
+ struct qtnf_pcie_bus_priv base;
+
+ /* lock for irq configuration changes */
+ spinlock_t irq_lock;
+
+ struct qtnf_pearl_bda __iomem *bda;
+ void __iomem *pcie_reg_base;
+
+ struct qtnf_pearl_tx_bd *tx_bd_vbase;
+ dma_addr_t tx_bd_pbase;
+
+ struct qtnf_pearl_rx_bd *rx_bd_vbase;
+ dma_addr_t rx_bd_pbase;
+
+ dma_addr_t bd_table_paddr;
+ void *bd_table_vaddr;
+ u32 bd_table_len;
+ u32 pcie_irq_mask;
+ u32 pcie_irq_rx_count;
+ u32 pcie_irq_tx_count;
+ u32 pcie_irq_uf_count;
+};
+
+static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ps->irq_lock, flags);
+ ps->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS);
+ spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ps->irq_lock, flags);
+ writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+ spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_pearl_state *ps)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ps->irq_lock, flags);
+ writel(0x0, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+ spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_pearl_state *ps)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ps->irq_lock, flags);
+ ps->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS;
+ writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+ spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_pearl_state *ps)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ps->irq_lock, flags);
+ ps->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS;
+ writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+ spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static inline void qtnf_en_txdone_irq(struct qtnf_pcie_pearl_state *ps)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ps->irq_lock, flags);
+ ps->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS;
+ writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+ spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_pearl_state *ps)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ps->irq_lock, flags);
+ ps->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS;
+ writel(ps->pcie_irq_mask, PCIE_HDP_INT_EN(ps->pcie_reg_base));
+ spin_unlock_irqrestore(&ps->irq_lock, flags);
+}
+
+static void qtnf_deassert_intx(struct qtnf_pcie_pearl_state *ps)
+{
+ void __iomem *reg = ps->base.sysctl_bar + PEARL_PCIE_CFG0_OFFSET;
+ u32 cfg;
+
+ cfg = readl(reg);
+ cfg &= ~PEARL_ASSERT_INTX;
+ qtnf_non_posted_write(cfg, reg);
+}
+
+static void qtnf_pearl_reset_ep(struct qtnf_pcie_pearl_state *ps)
+{
+ const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET);
+ void __iomem *reg = ps->base.sysctl_bar +
+ QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
+
+ qtnf_non_posted_write(data, reg);
+ msleep(QTN_EP_RESET_WAIT_MS);
+ pci_restore_state(ps->base.pdev);
+}
+
+static void qtnf_pcie_pearl_ipc_gen_ep_int(void *arg)
+{
+ const struct qtnf_pcie_pearl_state *ps = arg;
+ const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ);
+ void __iomem *reg = ps->base.sysctl_bar +
+ QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
+
+ qtnf_non_posted_write(data, reg);
+}
+
+static int qtnf_is_state(__le32 __iomem *reg, u32 state)
+{
+ u32 s = readl(reg);
+
+ return s & state;
+}
+
+static void qtnf_set_state(__le32 __iomem *reg, u32 state)
+{
+ u32 s = readl(reg);
+
+ qtnf_non_posted_write(state | s, reg);
+}
+
+static void qtnf_clear_state(__le32 __iomem *reg, u32 state)
+{
+ u32 s = readl(reg);
+
+ qtnf_non_posted_write(s & ~state, reg);
+}
+
+static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
+{
+ u32 timeout = 0;
+
+ while ((qtnf_is_state(reg, state) == 0)) {
+ usleep_range(1000, 1200);
+ if (++timeout > delay_in_ms)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int pearl_alloc_bd_table(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ dma_addr_t paddr;
+ void *vaddr;
+ int len;
+
+ len = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd) +
+ priv->rx_bd_num * sizeof(struct qtnf_pearl_rx_bd);
+
+ vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+
+ /* tx bd */
+
+ memset(vaddr, 0, len);
+
+ ps->bd_table_vaddr = vaddr;
+ ps->bd_table_paddr = paddr;
+ ps->bd_table_len = len;
+
+ ps->tx_bd_vbase = vaddr;
+ ps->tx_bd_pbase = paddr;
+
+ pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ priv->tx_bd_r_index = 0;
+ priv->tx_bd_w_index = 0;
+
+ /* rx bd */
+
+ vaddr = ((struct qtnf_pearl_tx_bd *)vaddr) + priv->tx_bd_num;
+ paddr += priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
+
+ ps->rx_bd_vbase = vaddr;
+ ps->rx_bd_pbase = paddr;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(paddr),
+ PCIE_HDP_TX_HOST_Q_BASE_H(ps->pcie_reg_base));
+#endif
+ writel(QTN_HOST_LO32(paddr),
+ PCIE_HDP_TX_HOST_Q_BASE_L(ps->pcie_reg_base));
+ writel(priv->rx_bd_num | (sizeof(struct qtnf_pearl_rx_bd)) << 16,
+ PCIE_HDP_TX_HOST_Q_SZ_CTRL(ps->pcie_reg_base));
+
+ pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ return 0;
+}
+
+static int pearl_skb2rbd_attach(struct qtnf_pcie_pearl_state *ps, u16 index)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ struct qtnf_pearl_rx_bd *rxbd;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ priv->rx_skb[index] = NULL;
+ return -ENOMEM;
+ }
+
+ priv->rx_skb[index] = skb;
+ rxbd = &ps->rx_bd_vbase[index];
+
+ paddr = pci_map_single(priv->pdev, skb->data,
+ SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(priv->pdev, paddr)) {
+ pr_err("skb DMA mapping error: %pad\n", &paddr);
+ return -ENOMEM;
+ }
+
+ /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
+ rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
+ rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
+ rxbd->info = 0x0;
+
+ priv->rx_bd_w_index = index;
+
+ /* sync up all descriptor updates */
+ wmb();
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(paddr),
+ PCIE_HDP_HHBM_BUF_PTR_H(ps->pcie_reg_base));
+#endif
+ writel(QTN_HOST_LO32(paddr),
+ PCIE_HDP_HHBM_BUF_PTR(ps->pcie_reg_base));
+
+ writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(ps->pcie_reg_base));
+ return 0;
+}
+
+static int pearl_alloc_rx_buffers(struct qtnf_pcie_pearl_state *ps)
+{
+ u16 i;
+ int ret = 0;
+
+ memset(ps->rx_bd_vbase, 0x0,
+ ps->base.rx_bd_num * sizeof(struct qtnf_pearl_rx_bd));
+
+ for (i = 0; i < ps->base.rx_bd_num; i++) {
+ ret = pearl_skb2rbd_attach(ps, i);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* all rx/tx activity should have ceased before calling this function */
+static void qtnf_pearl_free_xfer_buffers(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ struct qtnf_pearl_tx_bd *txbd;
+ struct qtnf_pearl_rx_bd *rxbd;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int i;
+
+ /* free rx buffers */
+ for (i = 0; i < priv->rx_bd_num; i++) {
+ if (priv->rx_skb && priv->rx_skb[i]) {
+ rxbd = &ps->rx_bd_vbase[i];
+ skb = priv->rx_skb[i];
+ paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
+ le32_to_cpu(rxbd->addr));
+ pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ priv->rx_skb[i] = NULL;
+ }
+ }
+
+ /* free tx buffers */
+ for (i = 0; i < priv->tx_bd_num; i++) {
+ if (priv->tx_skb && priv->tx_skb[i]) {
+ txbd = &ps->tx_bd_vbase[i];
+ skb = priv->tx_skb[i];
+ paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
+ le32_to_cpu(txbd->addr));
+ pci_unmap_single(priv->pdev, paddr, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[i] = NULL;
+ }
+ }
+}
+
+static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps)
+{
+ u32 val;
+
+ val = readl(PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ val |= HHBM_CONFIG_SOFT_RESET;
+ writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ usleep_range(50, 100);
+ val &= ~HHBM_CONFIG_SOFT_RESET;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ val |= HHBM_64BIT;
+#endif
+ writel(val, PCIE_HHBM_CONFIG(ps->pcie_reg_base));
+ writel(ps->base.rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(ps->pcie_reg_base));
+
+ return 0;
+}
+
+static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ int ret;
+ u32 val;
+
+ priv->tx_bd_num = tx_bd_size_param;
+ priv->rx_bd_num = rx_bd_size_param;
+ priv->rx_bd_w_index = 0;
+ priv->rx_bd_r_index = 0;
+
+ if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) {
+ pr_err("tx_bd_size_param %u is not power of two\n",
+ priv->tx_bd_num);
+ return -EINVAL;
+ }
+
+ val = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
+ if (val > PCIE_HHBM_MAX_SIZE) {
+ pr_err("tx_bd_size_param %u is too large\n",
+ priv->tx_bd_num);
+ return -EINVAL;
+ }
+
+ if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
+ pr_err("rx_bd_size_param %u is not power of two\n",
+ priv->rx_bd_num);
+ return -EINVAL;
+ }
+
+ val = priv->rx_bd_num * sizeof(dma_addr_t);
+ if (val > PCIE_HHBM_MAX_SIZE) {
+ pr_err("rx_bd_size_param %u is too large\n",
+ priv->rx_bd_num);
+ return -EINVAL;
+ }
+
+ ret = pearl_hhbm_init(ps);
+ if (ret) {
+ pr_err("failed to init h/w queues\n");
+ return ret;
+ }
+
+ ret = qtnf_pcie_alloc_skb_array(priv);
+ if (ret) {
+ pr_err("failed to allocate skb array\n");
+ return ret;
+ }
+
+ ret = pearl_alloc_bd_table(ps);
+ if (ret) {
+ pr_err("failed to allocate bd table\n");
+ return ret;
+ }
+
+ ret = pearl_alloc_rx_buffers(ps);
+ if (ret) {
+ pr_err("failed to allocate rx buffers\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ struct qtnf_pearl_tx_bd *txbd;
+ struct sk_buff *skb;
+ unsigned long flags;
+ dma_addr_t paddr;
+ u32 tx_done_index;
+ int count = 0;
+ int i;
+
+ spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
+
+ tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
+ & (priv->tx_bd_num - 1);
+
+ i = priv->tx_bd_r_index;
+
+ while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
+ skb = priv->tx_skb[i];
+ if (likely(skb)) {
+ txbd = &ps->tx_bd_vbase[i];
+ paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
+ le32_to_cpu(txbd->addr));
+ pci_unmap_single(priv->pdev, paddr, skb->len,
+ PCI_DMA_TODEVICE);
+
+ if (skb->dev) {
+ qtnf_update_tx_stats(skb->dev, skb);
+ if (unlikely(priv->tx_stopped)) {
+ qtnf_wake_all_queues(skb->dev);
+ priv->tx_stopped = 0;
+ }
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ priv->tx_skb[i] = NULL;
+ count++;
+
+ if (++i >= priv->tx_bd_num)
+ i = 0;
+ }
+
+ priv->tx_reclaim_done += count;
+ priv->tx_reclaim_req++;
+ priv->tx_bd_r_index = i;
+
+ spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
+}
+
+static int qtnf_tx_queue_ready(struct qtnf_pcie_pearl_state *ps)
+{
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+
+ if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
+ priv->tx_bd_num)) {
+ qtnf_pearl_data_tx_reclaim(ps);
+
+ if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
+ priv->tx_bd_num)) {
+ pr_warn_ratelimited("reclaim full Tx queue\n");
+ priv->tx_full_count++;
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+ struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ dma_addr_t txbd_paddr, skb_paddr;
+ struct qtnf_pearl_tx_bd *txbd;
+ unsigned long flags;
+ int len, i;
+ u32 info;
+ int ret = 0;
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ if (!qtnf_tx_queue_ready(ps)) {
+ if (skb->dev) {
+ netif_tx_stop_all_queues(skb->dev);
+ priv->tx_stopped = 1;
+ }
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ i = priv->tx_bd_w_index;
+ priv->tx_skb[i] = skb;
+ len = skb->len;
+
+ skb_paddr = pci_map_single(priv->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
+ pr_err("skb DMA mapping error: %pad\n", &skb_paddr);
+ ret = -ENOMEM;
+ goto tx_done;
+ }
+
+ txbd = &ps->tx_bd_vbase[i];
+ txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
+ txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr));
+
+ info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT;
+ txbd->info = cpu_to_le32(info);
+
+ /* sync up all descriptor updates before passing them to EP */
+ dma_wmb();
+
+ /* write new TX descriptor to PCIE_RX_FIFO on EP */
+ txbd_paddr = ps->tx_bd_pbase + i * sizeof(struct qtnf_pearl_tx_bd);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ writel(QTN_HOST_HI32(txbd_paddr),
+ PCIE_HDP_HOST_WR_DESC0_H(ps->pcie_reg_base));
+#endif
+ writel(QTN_HOST_LO32(txbd_paddr),
+ PCIE_HDP_HOST_WR_DESC0(ps->pcie_reg_base));
+
+ if (++i >= priv->tx_bd_num)
+ i = 0;
+
+ priv->tx_bd_w_index = i;
+
+tx_done:
+ if (ret && skb) {
+ pr_err_ratelimited("drop skb\n");
+ if (skb->dev)
+ skb->dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+
+ priv->tx_done_count++;
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ qtnf_pearl_data_tx_reclaim(ps);
+
+ return NETDEV_TX_OK;
+}
+
+static irqreturn_t qtnf_pcie_pearl_interrupt(int irq, void *data)
+{
+ struct qtnf_bus *bus = (struct qtnf_bus *)data;
+ struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ u32 status;
+
+ priv->pcie_irq_count++;
+ status = readl(PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
+
+ qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
+ qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
+
+ if (!(status & ps->pcie_irq_mask))
+ goto irq_done;
+
+ if (status & PCIE_HDP_INT_RX_BITS)
+ ps->pcie_irq_rx_count++;
+
+ if (status & PCIE_HDP_INT_TX_BITS)
+ ps->pcie_irq_tx_count++;
+
+ if (status & PCIE_HDP_INT_HHBM_UF)
+ ps->pcie_irq_uf_count++;
+
+ if (status & PCIE_HDP_INT_RX_BITS) {
+ qtnf_dis_rxdone_irq(ps);
+ napi_schedule(&bus->mux_napi);
+ }
+
+ if (status & PCIE_HDP_INT_TX_BITS) {
+ qtnf_dis_txdone_irq(ps);
+ tasklet_hi_schedule(&priv->reclaim_tq);
+ }
+
+irq_done:
+ /* H/W workaround: clean all bits, not only enabled */
+ qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(ps->pcie_reg_base));
+
+ if (!priv->msi_enabled)
+ qtnf_deassert_intx(ps);
+
+ return IRQ_HANDLED;
+}
+
+static int qtnf_rx_data_ready(struct qtnf_pcie_pearl_state *ps)
+{
+ u16 index = ps->base.rx_bd_r_index;
+ struct qtnf_pearl_rx_bd *rxbd;
+ u32 descw;
+
+ rxbd = &ps->rx_bd_vbase[index];
+ descw = le32_to_cpu(rxbd->info);
+
+ if (descw & QTN_TXDONE_MASK)
+ return 1;
+
+ return 0;
+}
+
+static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
+ struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+ struct net_device *ndev = NULL;
+ struct sk_buff *skb = NULL;
+ int processed = 0;
+ struct qtnf_pearl_rx_bd *rxbd;
+ dma_addr_t skb_paddr;
+ int consume;
+ u32 descw;
+ u32 psize;
+ u16 r_idx;
+ u16 w_idx;
+ int ret;
+
+ while (processed < budget) {
+ if (!qtnf_rx_data_ready(ps))
+ goto rx_out;
+
+ r_idx = priv->rx_bd_r_index;
+ rxbd = &ps->rx_bd_vbase[r_idx];
+ descw = le32_to_cpu(rxbd->info);
+
+ skb = priv->rx_skb[r_idx];
+ psize = QTN_GET_LEN(descw);
+ consume = 1;
+
+ if (!(descw & QTN_TXDONE_MASK)) {
+ pr_warn("skip invalid rxbd[%d]\n", r_idx);
+ consume = 0;
+ }
+
+ if (!skb) {
+ pr_warn("skip missing rx_skb[%d]\n", r_idx);
+ consume = 0;
+ }
+
+ if (skb && (skb_tailroom(skb) < psize)) {
+ pr_err("skip packet with invalid length: %u > %u\n",
+ psize, skb_tailroom(skb));
+ consume = 0;
+ }
+
+ if (skb) {
+ skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
+ le32_to_cpu(rxbd->addr));
+ pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ if (consume) {
+ skb_put(skb, psize);
+ ndev = qtnf_classify_skb(bus, skb);
+ if (likely(ndev)) {
+ qtnf_update_rx_stats(ndev, skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(napi, skb);
+ } else {
+ pr_debug("drop untagged skb\n");
+ bus->mux_dev.stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ if (skb) {
+ bus->mux_dev.stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+ }
+
+ priv->rx_skb[r_idx] = NULL;
+ if (++r_idx >= priv->rx_bd_num)
+ r_idx = 0;
+
+ priv->rx_bd_r_index = r_idx;
+
+ /* repalce processed buffer by a new one */
+ w_idx = priv->rx_bd_w_index;
+ while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
+ priv->rx_bd_num) > 0) {
+ if (++w_idx >= priv->rx_bd_num)
+ w_idx = 0;
+
+ ret = pearl_skb2rbd_attach(ps, w_idx);
+ if (ret) {
+ pr_err("failed to allocate new rx_skb[%d]\n",
+ w_idx);
+ break;
+ }
+ }
+
+ processed++;
+ }
+
+rx_out:
+ if (processed < budget) {
+ napi_complete(napi);
+ qtnf_en_rxdone_irq(ps);
+ }
+
+ return processed;
+}
+
+static void
+qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
+{
+ struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
+
+ tasklet_hi_schedule(&ps->base.reclaim_tq);
+}
+
+static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
+
+ qtnf_enable_hdp_irqs(ps);
+ napi_enable(&bus->mux_napi);
+}
+
+static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
+
+ napi_disable(&bus->mux_napi);
+ qtnf_disable_hdp_irqs(ps);
+}
+
+static const struct qtnf_bus_ops qtnf_pcie_pearl_bus_ops = {
+ /* control path methods */
+ .control_tx = qtnf_pcie_control_tx,
+
+ /* data path methods */
+ .data_tx = qtnf_pcie_data_tx,
+ .data_tx_timeout = qtnf_pcie_data_tx_timeout,
+ .data_rx_start = qtnf_pcie_data_rx_start,
+ .data_rx_stop = qtnf_pcie_data_rx_stop,
+};
+
+static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
+ u32 reg = readl(PCIE_HDP_INT_EN(ps->pcie_reg_base));
+ u32 status;
+
+ seq_printf(s, "pcie_irq_count(%u)\n", ps->base.pcie_irq_count);
+ seq_printf(s, "pcie_irq_tx_count(%u)\n", ps->pcie_irq_tx_count);
+ status = reg & PCIE_HDP_INT_TX_BITS;
+ seq_printf(s, "pcie_irq_tx_status(%s)\n",
+ (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS");
+ seq_printf(s, "pcie_irq_rx_count(%u)\n", ps->pcie_irq_rx_count);
+ status = reg & PCIE_HDP_INT_RX_BITS;
+ seq_printf(s, "pcie_irq_rx_status(%s)\n",
+ (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS");
+ seq_printf(s, "pcie_irq_uf_count(%u)\n", ps->pcie_irq_uf_count);
+ status = reg & PCIE_HDP_INT_HHBM_UF;
+ seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n",
+ (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS");
+
+ return 0;
+}
+
+static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
+ struct qtnf_pcie_bus_priv *priv = &ps->base;
+
+ seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
+ seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
+ seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
+ seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
+
+ seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
+ seq_printf(s, "tx_bd_p_index(%u)\n",
+ readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
+ & (priv->tx_bd_num - 1));
+ seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
+ seq_printf(s, "tx queue len(%u)\n",
+ CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
+ priv->tx_bd_num));
+
+ seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
+ seq_printf(s, "rx_bd_p_index(%u)\n",
+ readl(PCIE_HDP_TX0DMA_CNT(ps->pcie_reg_base))
+ & (priv->rx_bd_num - 1));
+ seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
+ seq_printf(s, "rx alloc queue len(%u)\n",
+ CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
+ priv->rx_bd_num));
+
+ return 0;
+}
+
+static int qtnf_ep_fw_send(struct pci_dev *pdev, uint32_t size,
+ int blk, const u8 *pblk, const u8 *fw)
+{
+ struct qtnf_bus *bus = pci_get_drvdata(pdev);
+
+ struct qtnf_pearl_fw_hdr *hdr;
+ u8 *pdata;
+
+ int hds = sizeof(*hdr);
+ struct sk_buff *skb = NULL;
+ int len = 0;
+ int ret;
+
+ skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb->len = QTN_PCIE_FW_BUFSZ;
+ skb->dev = NULL;
+
+ hdr = (struct qtnf_pearl_fw_hdr *)skb->data;
+ memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG));
+ hdr->fwsize = cpu_to_le32(size);
+ hdr->seqnum = cpu_to_le32(blk);
+
+ if (blk)
+ hdr->type = cpu_to_le32(QTN_FW_DSUB);
+ else
+ hdr->type = cpu_to_le32(QTN_FW_DBEGIN);
+
+ pdata = skb->data + hds;
+
+ len = QTN_PCIE_FW_BUFSZ - hds;
+ if (pblk >= (fw + size - len)) {
+ len = fw + size - pblk;
+ hdr->type = cpu_to_le32(QTN_FW_DEND);
+ }
+
+ hdr->pktlen = cpu_to_le32(len);
+ memcpy(pdata, pblk, len);
+ hdr->crc = cpu_to_le32(~crc32(0, pdata, len));
+
+ ret = qtnf_pcie_data_tx(bus, skb);
+
+ return (ret == NETDEV_TX_OK) ? len : 0;
+}
+
+static int
+qtnf_ep_fw_load(struct qtnf_pcie_pearl_state *ps, const u8 *fw, u32 fw_size)
+{
+ int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pearl_fw_hdr);
+ int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0);
+ const u8 *pblk = fw;
+ int threshold = 0;
+ int blk = 0;
+ int len;
+
+ pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size);
+
+ while (blk < blk_count) {
+ if (++threshold > 10000) {
+ pr_err("FW upload failed: too many retries\n");
+ return -ETIMEDOUT;
+ }
+
+ len = qtnf_ep_fw_send(ps->base.pdev, fw_size, blk, pblk, fw);
+ if (len <= 0)
+ continue;
+
+ if (!((blk + 1) & QTN_PCIE_FW_DLMASK) ||
+ (blk == (blk_count - 1))) {
+ qtnf_set_state(&ps->bda->bda_rc_state,
+ QTN_RC_FW_SYNC);
+ if (qtnf_poll_state(&ps->bda->bda_ep_state,
+ QTN_EP_FW_SYNC,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("FW upload failed: SYNC timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ qtnf_clear_state(&ps->bda->bda_ep_state,
+ QTN_EP_FW_SYNC);
+
+ if (qtnf_is_state(&ps->bda->bda_ep_state,
+ QTN_EP_FW_RETRY)) {
+ if (blk == (blk_count - 1)) {
+ int last_round =
+ blk_count & QTN_PCIE_FW_DLMASK;
+ blk -= last_round;
+ pblk -= ((last_round - 1) *
+ blk_size + len);
+ } else {
+ blk -= QTN_PCIE_FW_DLMASK;
+ pblk -= QTN_PCIE_FW_DLMASK * blk_size;
+ }
+
+ qtnf_clear_state(&ps->bda->bda_ep_state,
+ QTN_EP_FW_RETRY);
+
+ pr_warn("FW upload retry: block #%d\n", blk);
+ continue;
+ }
+
+ qtnf_pearl_data_tx_reclaim(ps);
+ }
+
+ pblk += len;
+ blk++;
+ }
+
+ pr_debug("FW upload completed: totally sent %d blocks\n", blk);
+ return 0;
+}
+
+static void qtnf_pearl_fw_work_handler(struct work_struct *work)
+{
+ struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
+ struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
+ struct pci_dev *pdev = ps->base.pdev;
+ const struct firmware *fw;
+ int ret;
+ u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
+ const char *fwname = QTN_PCI_PEARL_FW_NAME;
+ bool fw_boot_success = false;
+
+ if (flashboot) {
+ state |= QTN_RC_FW_FLASHBOOT;
+ } else {
+ ret = request_firmware(&fw, fwname, &pdev->dev);
+ if (ret < 0) {
+ pr_err("failed to get firmware %s\n", fwname);
+ goto fw_load_exit;
+ }
+ }
+
+ qtnf_set_state(&ps->bda->bda_rc_state, state);
+
+ if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("card is not ready\n");
+
+ if (!flashboot)
+ release_firmware(fw);
+
+ goto fw_load_exit;
+ }
+
+ qtnf_clear_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY);
+
+ if (flashboot) {
+ pr_info("booting firmware from flash\n");
+
+ } else {
+ pr_info("starting firmware upload: %s\n", fwname);
+
+ ret = qtnf_ep_fw_load(ps, fw->data, fw->size);
+ release_firmware(fw);
+ if (ret) {
+ pr_err("firmware upload error\n");
+ goto fw_load_exit;
+ }
+ }
+
+ if (qtnf_poll_state(&ps->bda->bda_ep_state, QTN_EP_FW_DONE,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("firmware bringup timed out\n");
+ goto fw_load_exit;
+ }
+
+ pr_info("firmware is up and running\n");
+
+ if (qtnf_poll_state(&ps->bda->bda_ep_state,
+ QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) {
+ pr_err("firmware runtime failure\n");
+ goto fw_load_exit;
+ }
+
+ fw_boot_success = true;
+
+fw_load_exit:
+ qtnf_pcie_fw_boot_done(bus, fw_boot_success, DRV_NAME);
+
+ if (fw_boot_success) {
+ qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
+ qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
+ }
+}
+
+static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data)
+{
+ struct qtnf_pcie_pearl_state *ps = (void *)data;
+
+ qtnf_pearl_data_tx_reclaim(ps);
+ qtnf_en_txdone_irq(ps);
+}
+
+static int qtnf_pearl_check_chip_id(struct qtnf_pcie_pearl_state *ps)
+{
+ unsigned int chipid;
+
+ chipid = qtnf_chip_id_get(ps->base.sysctl_bar);
+
+ switch (chipid) {
+ case QTN_CHIP_ID_PEARL:
+ case QTN_CHIP_ID_PEARL_B:
+ case QTN_CHIP_ID_PEARL_C:
+ pr_info("chip ID is 0x%x\n", chipid);
+ break;
+ default:
+ pr_err("incorrect chip ID 0x%x\n", chipid);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int qtnf_pcie_pearl_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct qtnf_shm_ipc_int ipc_int;
+ struct qtnf_pcie_pearl_state *ps;
+ struct qtnf_bus *bus;
+ int ret;
+ u64 dma_mask;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ dma_mask = DMA_BIT_MASK(64);
+#else
+ dma_mask = DMA_BIT_MASK(32);
+#endif
+
+ ret = qtnf_pcie_probe(pdev, sizeof(*ps), &qtnf_pcie_pearl_bus_ops,
+ dma_mask, use_msi);
+ if (ret)
+ return ret;
+
+ bus = pci_get_drvdata(pdev);
+ ps = get_bus_priv(bus);
+
+ spin_lock_init(&ps->irq_lock);
+
+ tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn,
+ (unsigned long)ps);
+ netif_napi_add(&bus->mux_dev, &bus->mux_napi,
+ qtnf_pcie_pearl_rx_poll, 10);
+ INIT_WORK(&bus->fw_work, qtnf_pearl_fw_work_handler);
+
+ ps->pcie_reg_base = ps->base.dmareg_bar;
+ ps->bda = ps->base.epmem_bar;
+ writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled);
+
+ ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
+ ipc_int.arg = ps;
+ qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1,
+ &ps->bda->bda_shm_reg2, &ipc_int);
+
+ ret = qtnf_pearl_check_chip_id(ps);
+ if (ret)
+ goto error;
+
+ ret = qtnf_pcie_pearl_init_xfer(ps);
+ if (ret) {
+ pr_err("PCIE xfer init failed\n");
+ goto error;
+ }
+
+ /* init default irq settings */
+ qtnf_init_hdp_irqs(ps);
+
+ /* start with disabled irqs */
+ qtnf_disable_hdp_irqs(ps);
+
+ ret = devm_request_irq(&pdev->dev, pdev->irq,
+ &qtnf_pcie_pearl_interrupt, 0,
+ "qtnf_pcie_irq", (void *)bus);
+ if (ret) {
+ pr_err("failed to request pcie irq %d\n", pdev->irq);
+ goto err_xfer;
+ }
+
+ qtnf_pcie_bringup_fw_async(bus);
+
+ return 0;
+
+err_xfer:
+ qtnf_pearl_free_xfer_buffers(ps);
+error:
+ qtnf_pcie_remove(bus, &ps->base);
+
+ return ret;
+}
+
+static void qtnf_pcie_pearl_remove(struct pci_dev *pdev)
+{
+ struct qtnf_pcie_pearl_state *ps;
+ struct qtnf_bus *bus;
+
+ bus = pci_get_drvdata(pdev);
+ if (!bus)
+ return;
+
+ ps = get_bus_priv(bus);
+
+ qtnf_pcie_remove(bus, &ps->base);
+ qtnf_pearl_reset_ep(ps);
+ qtnf_pearl_free_xfer_buffers(ps);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int qtnf_pcie_pearl_suspend(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static int qtnf_pcie_pearl_resume(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_SLEEP
+/* Power Management Hooks */
+static SIMPLE_DEV_PM_OPS(qtnf_pcie_pearl_pm_ops, qtnf_pcie_pearl_suspend,
+ qtnf_pcie_pearl_resume);
+#endif
+
+static const struct pci_device_id qtnf_pcie_devid_table[] = {
+ {
+ PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
+
+static struct pci_driver qtnf_pcie_pearl_drv_data = {
+ .name = DRV_NAME,
+ .id_table = qtnf_pcie_devid_table,
+ .probe = qtnf_pcie_pearl_probe,
+ .remove = qtnf_pcie_pearl_remove,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &qtnf_pcie_pearl_pm_ops,
+ },
+#endif
+};
+
+static int __init qtnf_pcie_pearl_register(void)
+{
+ pr_info("register Quantenna QSR10g FullMAC PCIE driver\n");
+ return pci_register_driver(&qtnf_pcie_pearl_drv_data);
+}
+
+static void __exit qtnf_pcie_pearl_exit(void)
+{
+ pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n");
+ pci_unregister_driver(&qtnf_pcie_pearl_drv_data);
+}
+
+module_init(qtnf_pcie_pearl_register);
+module_exit(qtnf_pcie_pearl_exit);
+
+MODULE_AUTHOR("Quantenna Communications");
+MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
index 00bb21a1c47a..634480fe6a64 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015-2016 Quantenna Communications */
#ifndef _QTN_FMAC_PCIE_IPC_H_
#define _QTN_FMAC_PCIE_IPC_H_
@@ -43,11 +30,6 @@
#define QTN_RC_FW_LOADRDY BIT(8)
#define QTN_RC_FW_SYNC BIT(9)
-/* state transition timeouts */
-#define QTN_FW_DL_TIMEOUT_MS 3000
-#define QTN_FW_QLINK_TIMEOUT_MS 30000
-#define QTN_EP_RESET_WAIT_MS 1000
-
#define PCIE_HDP_INT_RX_BITS (0 \
| PCIE_HDP_INT_EP_TXDMA \
| PCIE_HDP_INT_EP_TXEMPTY \
@@ -68,17 +50,11 @@
#define QTN_HOST_ADDR(h, l) ((u32)l)
#endif
-#define QTN_SYSCTL_BAR 0
-#define QTN_SHMEM_BAR 2
-#define QTN_DMA_BAR 3
-
#define QTN_PCIE_BDA_VERSION 0x1002
#define PCIE_BDA_NAMELEN 32
#define PCIE_HHBM_MAX_SIZE 2048
-#define SKB_BUF_SIZE 2048
-
#define QTN_PCIE_BOARDFLG "PCIEQTN"
#define QTN_PCIE_FW_DLMASK 0xF
#define QTN_PCIE_FW_BUFSZ 2048
@@ -96,49 +72,6 @@
#define QTN_EP_LHOST_TQE_PORT 4
-enum qtnf_pcie_bda_ipc_flags {
- QTN_PCIE_IPC_FLAG_HBM_MAGIC = BIT(0),
- QTN_PCIE_IPC_FLAG_SHM_PIO = BIT(1),
-};
-
-struct qtnf_pcie_bda {
- __le16 bda_len;
- __le16 bda_version;
- __le32 bda_pci_endian;
- __le32 bda_ep_state;
- __le32 bda_rc_state;
- __le32 bda_dma_mask;
- __le32 bda_msi_addr;
- __le32 bda_flashsz;
- u8 bda_boardname[PCIE_BDA_NAMELEN];
- __le32 bda_rc_msi_enabled;
- u8 bda_hhbm_list[PCIE_HHBM_MAX_SIZE];
- __le32 bda_dsbw_start_index;
- __le32 bda_dsbw_end_index;
- __le32 bda_dsbw_total_bytes;
- __le32 bda_rc_tx_bd_base;
- __le32 bda_rc_tx_bd_num;
- u8 bda_pcie_mac[QTN_ENET_ADDR_LENGTH];
- struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); /* host TX */
- struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); /* host RX */
-} __packed;
-
-struct qtnf_tx_bd {
- __le32 addr;
- __le32 addr_h;
- __le32 info;
- __le32 info_h;
-} __packed;
-
-struct qtnf_rx_bd {
- __le32 addr;
- __le32 addr_h;
- __le32 info;
- __le32 info_h;
- __le32 next_ptr;
- __le32 next_ptr_h;
-} __packed;
-
enum qtnf_fw_loadtype {
QTN_FW_DBEGIN,
QTN_FW_DSUB,
@@ -146,13 +79,4 @@ enum qtnf_fw_loadtype {
QTN_FW_CTRL
};
-struct qtnf_pcie_fw_hdr {
- u8 boardflg[8];
- __le32 fwsize;
- __le32 seqnum;
- __le32 type;
- __le32 pktlen;
- __le32 crc;
-} __packed;
-
#endif /* _QTN_FMAC_PCIE_IPC_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
new file mode 100644
index 000000000000..6e9a5c61d46f
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2015 Quantenna Communications */
+
+#ifndef __PEARL_PCIE_H
+#define __PEARL_PCIE_H
+
+/* Pearl PCIe HDP registers */
+#define PCIE_HDP_CTRL(base) ((base) + 0x2c00)
+#define PCIE_HDP_AXI_CTRL(base) ((base) + 0x2c04)
+#define PCIE_HDP_HOST_WR_DESC0(base) ((base) + 0x2c10)
+#define PCIE_HDP_HOST_WR_DESC0_H(base) ((base) + 0x2c14)
+#define PCIE_HDP_HOST_WR_DESC1(base) ((base) + 0x2c18)
+#define PCIE_HDP_HOST_WR_DESC1_H(base) ((base) + 0x2c1c)
+#define PCIE_HDP_HOST_WR_DESC2(base) ((base) + 0x2c20)
+#define PCIE_HDP_HOST_WR_DESC2_H(base) ((base) + 0x2c24)
+#define PCIE_HDP_HOST_WR_DESC3(base) ((base) + 0x2c28)
+#define PCIE_HDP_HOST_WR_DESC4_H(base) ((base) + 0x2c2c)
+#define PCIE_HDP_RX_INT_CTRL(base) ((base) + 0x2c30)
+#define PCIE_HDP_TX_INT_CTRL(base) ((base) + 0x2c34)
+#define PCIE_HDP_INT_STATUS(base) ((base) + 0x2c38)
+#define PCIE_HDP_INT_EN(base) ((base) + 0x2c3c)
+#define PCIE_HDP_RX_DESC0_PTR(base) ((base) + 0x2c40)
+#define PCIE_HDP_RX_DESC0_NOE(base) ((base) + 0x2c44)
+#define PCIE_HDP_RX_DESC1_PTR(base) ((base) + 0x2c48)
+#define PCIE_HDP_RX_DESC1_NOE(base) ((base) + 0x2c4c)
+#define PCIE_HDP_RX_DESC2_PTR(base) ((base) + 0x2c50)
+#define PCIE_HDP_RX_DESC2_NOE(base) ((base) + 0x2c54)
+#define PCIE_HDP_RX_DESC3_PTR(base) ((base) + 0x2c58)
+#define PCIE_HDP_RX_DESC3_NOE(base) ((base) + 0x2c5c)
+
+#define PCIE_HDP_TX0_BASE_ADDR(base) ((base) + 0x2c60)
+#define PCIE_HDP_TX1_BASE_ADDR(base) ((base) + 0x2c64)
+#define PCIE_HDP_TX0_Q_CTRL(base) ((base) + 0x2c70)
+#define PCIE_HDP_TX1_Q_CTRL(base) ((base) + 0x2c74)
+#define PCIE_HDP_CFG0(base) ((base) + 0x2c80)
+#define PCIE_HDP_CFG1(base) ((base) + 0x2c84)
+#define PCIE_HDP_CFG2(base) ((base) + 0x2c88)
+#define PCIE_HDP_CFG3(base) ((base) + 0x2c8c)
+#define PCIE_HDP_CFG4(base) ((base) + 0x2c90)
+#define PCIE_HDP_CFG5(base) ((base) + 0x2c94)
+#define PCIE_HDP_CFG6(base) ((base) + 0x2c98)
+#define PCIE_HDP_CFG7(base) ((base) + 0x2c9c)
+#define PCIE_HDP_CFG8(base) ((base) + 0x2ca0)
+#define PCIE_HDP_CFG9(base) ((base) + 0x2ca4)
+#define PCIE_HDP_CFG10(base) ((base) + 0x2ca8)
+#define PCIE_HDP_CFG11(base) ((base) + 0x2cac)
+#define PCIE_INT(base) ((base) + 0x2cb0)
+#define PCIE_INT_MASK(base) ((base) + 0x2cb4)
+#define PCIE_MSI_MASK(base) ((base) + 0x2cb8)
+#define PCIE_MSI_PNDG(base) ((base) + 0x2cbc)
+#define PCIE_PRI_CFG(base) ((base) + 0x2cc0)
+#define PCIE_PHY_CR(base) ((base) + 0x2cc4)
+#define PCIE_HDP_CTAG_CTRL(base) ((base) + 0x2cf4)
+#define PCIE_HDP_HHBM_BUF_PTR(base) ((base) + 0x2d00)
+#define PCIE_HDP_HHBM_BUF_PTR_H(base) ((base) + 0x2d04)
+#define PCIE_HDP_HHBM_BUF_FIFO_NOE(base) ((base) + 0x2d04)
+#define PCIE_HDP_RX0DMA_CNT(base) ((base) + 0x2d10)
+#define PCIE_HDP_RX1DMA_CNT(base) ((base) + 0x2d14)
+#define PCIE_HDP_RX2DMA_CNT(base) ((base) + 0x2d18)
+#define PCIE_HDP_RX3DMA_CNT(base) ((base) + 0x2d1c)
+#define PCIE_HDP_TX0DMA_CNT(base) ((base) + 0x2d20)
+#define PCIE_HDP_TX1DMA_CNT(base) ((base) + 0x2d24)
+#define PCIE_HDP_RXDMA_CTRL(base) ((base) + 0x2d28)
+#define PCIE_HDP_TX_HOST_Q_SZ_CTRL(base) ((base) + 0x2d2c)
+#define PCIE_HDP_TX_HOST_Q_BASE_L(base) ((base) + 0x2d30)
+#define PCIE_HDP_TX_HOST_Q_BASE_H(base) ((base) + 0x2d34)
+#define PCIE_HDP_TX_HOST_Q_WR_PTR(base) ((base) + 0x2d38)
+#define PCIE_HDP_TX_HOST_Q_RD_PTR(base) ((base) + 0x2d3c)
+#define PCIE_HDP_TX_HOST_Q_STS(base) ((base) + 0x2d40)
+
+/* Pearl PCIe HBM pool registers */
+#define PCIE_HHBM_CSR_REG(base) ((base) + 0x2e00)
+#define PCIE_HHBM_Q_BASE_REG(base) ((base) + 0x2e04)
+#define PCIE_HHBM_Q_LIMIT_REG(base) ((base) + 0x2e08)
+#define PCIE_HHBM_Q_WR_REG(base) ((base) + 0x2e0c)
+#define PCIE_HHBM_Q_RD_REG(base) ((base) + 0x2e10)
+#define PCIE_HHBM_POOL_DATA_0_H(base) ((base) + 0x2e90)
+#define PCIE_HHBM_CONFIG(base) ((base) + 0x2f9c)
+#define PCIE_HHBM_POOL_REQ_0(base) ((base) + 0x2f10)
+#define PCIE_HHBM_POOL_DATA_0(base) ((base) + 0x2f40)
+#define PCIE_HHBM_WATERMARK_MASKED_INT(base) ((base) + 0x2f68)
+#define PCIE_HHBM_WATERMARK_INT(base) ((base) + 0x2f6c)
+#define PCIE_HHBM_POOL_WATERMARK(base) ((base) + 0x2f70)
+#define PCIE_HHBM_POOL_OVERFLOW_CNT(base) ((base) + 0x2f90)
+#define PCIE_HHBM_POOL_UNDERFLOW_CNT(base) ((base) + 0x2f94)
+#define HBM_INT_STATUS(base) ((base) + 0x2f9c)
+#define PCIE_HHBM_POOL_CNFIG(base) ((base) + 0x2f9c)
+
+/* Pearl PCIe HBM bit field definitions */
+#define HHBM_CONFIG_SOFT_RESET (BIT(8))
+#define HHBM_WR_REQ (BIT(0))
+#define HHBM_RD_REQ (BIT(1))
+#define HHBM_DONE (BIT(31))
+#define HHBM_64BIT (BIT(10))
+
+/* PCIe HDP interrupt status definition */
+#define PCIE_HDP_INT_EP_RXDMA (BIT(0))
+#define PCIE_HDP_INT_HBM_UF (BIT(1))
+#define PCIE_HDP_INT_RX_LEN_ERR (BIT(2))
+#define PCIE_HDP_INT_RX_HDR_LEN_ERR (BIT(3))
+#define PCIE_HDP_INT_EP_TXDMA (BIT(12))
+#define PCIE_HDP_INT_HHBM_UF (BIT(13))
+#define PCIE_HDP_INT_EP_TXEMPTY (BIT(15))
+#define PCIE_HDP_INT_IPC (BIT(29))
+
+/* PCIe interrupt status definition */
+#define PCIE_INT_MSI (BIT(24))
+#define PCIE_INT_INTX (BIT(23))
+
+/* PCIe legacy INTx */
+#define PEARL_PCIE_CFG0_OFFSET (0x6C)
+#define PEARL_ASSERT_INTX (BIT(9))
+
+/* SYS CTL regs */
+#define QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET (0x001C)
+
+#define QTN_PEARL_IPC_IRQ_WORD(irq) (BIT(irq) | BIT(irq + 16))
+#define QTN_PEARL_LHOST_IPC_IRQ (6)
+#define QTN_PEARL_LHOST_EP_RESET (7)
+
+#endif /* __PEARL_PCIE_H */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
deleted file mode 100644
index 3120d49df565..000000000000
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
+++ /dev/null
@@ -1,1494 +0,0 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/firmware.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/crc32.h>
-#include <linux/spinlock.h>
-#include <linux/circ_buf.h>
-#include <linux/log2.h>
-
-#include "qtn_hw_ids.h"
-#include "pcie_bus_priv.h"
-#include "core.h"
-#include "bus.h"
-#include "debug.h"
-
-static bool use_msi = true;
-module_param(use_msi, bool, 0644);
-MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
-
-static unsigned int tx_bd_size_param = 32;
-module_param(tx_bd_size_param, uint, 0644);
-MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two");
-
-static unsigned int rx_bd_size_param = 256;
-module_param(rx_bd_size_param, uint, 0644);
-MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two");
-
-static u8 flashboot = 1;
-module_param(flashboot, byte, 0644);
-MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
-
-#define DRV_NAME "qtnfmac_pearl_pcie"
-
-static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg)
-{
- writel(val, basereg);
-
- /* flush posted write */
- readl(basereg);
-}
-
-static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->irq_lock, flags);
- priv->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS);
- spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->irq_lock, flags);
- writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
- spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_bus_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->irq_lock, flags);
- writel(0x0, PCIE_HDP_INT_EN(priv->pcie_reg_base));
- spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_bus_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->irq_lock, flags);
- priv->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS;
- writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
- spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_bus_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->irq_lock, flags);
- priv->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS;
- writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
- spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static inline void qtnf_en_txdone_irq(struct qtnf_pcie_bus_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->irq_lock, flags);
- priv->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS;
- writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
- spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_bus_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->irq_lock, flags);
- priv->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS;
- writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base));
- spin_unlock_irqrestore(&priv->irq_lock, flags);
-}
-
-static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv)
-{
- struct pci_dev *pdev = priv->pdev;
-
- /* fall back to legacy INTx interrupts by default */
- priv->msi_enabled = 0;
-
- /* check if MSI capability is available */
- if (use_msi) {
- if (!pci_enable_msi(pdev)) {
- pr_debug("MSI interrupt enabled\n");
- priv->msi_enabled = 1;
- } else {
- pr_warn("failed to enable MSI interrupts");
- }
- }
-
- if (!priv->msi_enabled) {
- pr_warn("legacy PCIE interrupts enabled\n");
- pci_intx(pdev, 1);
- }
-}
-
-static void qtnf_deassert_intx(struct qtnf_pcie_bus_priv *priv)
-{
- void __iomem *reg = priv->sysctl_bar + PEARL_PCIE_CFG0_OFFSET;
- u32 cfg;
-
- cfg = readl(reg);
- cfg &= ~PEARL_ASSERT_INTX;
- qtnf_non_posted_write(cfg, reg);
-}
-
-static void qtnf_reset_card(struct qtnf_pcie_bus_priv *priv)
-{
- const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET);
- void __iomem *reg = priv->sysctl_bar +
- QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
-
- qtnf_non_posted_write(data, reg);
- msleep(QTN_EP_RESET_WAIT_MS);
- pci_restore_state(priv->pdev);
-}
-
-static void qtnf_ipc_gen_ep_int(void *arg)
-{
- const struct qtnf_pcie_bus_priv *priv = arg;
- const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ);
- void __iomem *reg = priv->sysctl_bar +
- QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET;
-
- qtnf_non_posted_write(data, reg);
-}
-
-static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
-{
- void __iomem *vaddr;
- dma_addr_t busaddr;
- size_t len;
- int ret;
-
- ret = pcim_iomap_regions(priv->pdev, 1 << index, DRV_NAME);
- if (ret)
- return IOMEM_ERR_PTR(ret);
-
- busaddr = pci_resource_start(priv->pdev, index);
- len = pci_resource_len(priv->pdev, index);
- vaddr = pcim_iomap_table(priv->pdev)[index];
- if (!vaddr)
- return IOMEM_ERR_PTR(-ENOMEM);
-
- pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n",
- index, vaddr, &busaddr, (int)len);
-
- return vaddr;
-}
-
-static void qtnf_pcie_control_rx_callback(void *arg, const u8 *buf, size_t len)
-{
- struct qtnf_pcie_bus_priv *priv = arg;
- struct qtnf_bus *bus = pci_get_drvdata(priv->pdev);
- struct sk_buff *skb;
-
- if (unlikely(len == 0)) {
- pr_warn("zero length packet received\n");
- return;
- }
-
- skb = __dev_alloc_skb(len, GFP_KERNEL);
-
- if (unlikely(!skb)) {
- pr_err("failed to allocate skb\n");
- return;
- }
-
- skb_put_data(skb, buf, len);
-
- qtnf_trans_handle_rx_ctl_packet(bus, skb);
-}
-
-static int qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv)
-{
- struct qtnf_shm_ipc_region __iomem *ipc_tx_reg;
- struct qtnf_shm_ipc_region __iomem *ipc_rx_reg;
- const struct qtnf_shm_ipc_int ipc_int = { qtnf_ipc_gen_ep_int, priv };
- const struct qtnf_shm_ipc_rx_callback rx_callback = {
- qtnf_pcie_control_rx_callback, priv };
-
- ipc_tx_reg = &priv->bda->bda_shm_reg1;
- ipc_rx_reg = &priv->bda->bda_shm_reg2;
-
- qtnf_shm_ipc_init(&priv->shm_ipc_ep_in, QTNF_SHM_IPC_OUTBOUND,
- ipc_tx_reg, priv->workqueue,
- &ipc_int, &rx_callback);
- qtnf_shm_ipc_init(&priv->shm_ipc_ep_out, QTNF_SHM_IPC_INBOUND,
- ipc_rx_reg, priv->workqueue,
- &ipc_int, &rx_callback);
-
- return 0;
-}
-
-static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv)
-{
- qtnf_shm_ipc_free(&priv->shm_ipc_ep_in);
- qtnf_shm_ipc_free(&priv->shm_ipc_ep_out);
-}
-
-static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv)
-{
- int ret = -ENOMEM;
-
- priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR);
- if (IS_ERR(priv->sysctl_bar)) {
- pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
- return ret;
- }
-
- priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR);
- if (IS_ERR(priv->dmareg_bar)) {
- pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
- return ret;
- }
-
- priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR);
- if (IS_ERR(priv->epmem_bar)) {
- pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
- return ret;
- }
-
- priv->pcie_reg_base = priv->dmareg_bar;
- priv->bda = priv->epmem_bar;
- writel(priv->msi_enabled, &priv->bda->bda_rc_msi_enabled);
-
- return 0;
-}
-
-static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv)
-{
- struct pci_dev *pdev = priv->pdev;
- struct pci_dev *parent;
- int mps_p, mps_o, mps_m, mps;
- int ret;
-
- /* current mps */
- mps_o = pcie_get_mps(pdev);
-
- /* maximum supported mps */
- mps_m = 128 << pdev->pcie_mpss;
-
- /* suggested new mps value */
- mps = mps_m;
-
- if (pdev->bus && pdev->bus->self) {
- /* parent (bus) mps */
- parent = pdev->bus->self;
-
- if (pci_is_pcie(parent)) {
- mps_p = pcie_get_mps(parent);
- mps = min(mps_m, mps_p);
- }
- }
-
- ret = pcie_set_mps(pdev, mps);
- if (ret) {
- pr_err("failed to set mps to %d, keep using current %d\n",
- mps, mps_o);
- priv->mps = mps_o;
- return;
- }
-
- pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m);
- priv->mps = mps;
-}
-
-static int qtnf_is_state(__le32 __iomem *reg, u32 state)
-{
- u32 s = readl(reg);
-
- return s & state;
-}
-
-static void qtnf_set_state(__le32 __iomem *reg, u32 state)
-{
- u32 s = readl(reg);
-
- qtnf_non_posted_write(state | s, reg);
-}
-
-static void qtnf_clear_state(__le32 __iomem *reg, u32 state)
-{
- u32 s = readl(reg);
-
- qtnf_non_posted_write(s & ~state, reg);
-}
-
-static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
-{
- u32 timeout = 0;
-
- while ((qtnf_is_state(reg, state) == 0)) {
- usleep_range(1000, 1200);
- if (++timeout > delay_in_ms)
- return -1;
- }
-
- return 0;
-}
-
-static int alloc_skb_array(struct qtnf_pcie_bus_priv *priv)
-{
- struct sk_buff **vaddr;
- int len;
-
- len = priv->tx_bd_num * sizeof(*priv->tx_skb) +
- priv->rx_bd_num * sizeof(*priv->rx_skb);
- vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL);
-
- if (!vaddr)
- return -ENOMEM;
-
- priv->tx_skb = vaddr;
-
- vaddr += priv->tx_bd_num;
- priv->rx_skb = vaddr;
-
- return 0;
-}
-
-static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv)
-{
- dma_addr_t paddr;
- void *vaddr;
- int len;
-
- len = priv->tx_bd_num * sizeof(struct qtnf_tx_bd) +
- priv->rx_bd_num * sizeof(struct qtnf_rx_bd);
-
- vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
- if (!vaddr)
- return -ENOMEM;
-
- /* tx bd */
-
- memset(vaddr, 0, len);
-
- priv->bd_table_vaddr = vaddr;
- priv->bd_table_paddr = paddr;
- priv->bd_table_len = len;
-
- priv->tx_bd_vbase = vaddr;
- priv->tx_bd_pbase = paddr;
-
- pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
-
- priv->tx_bd_r_index = 0;
- priv->tx_bd_w_index = 0;
-
- /* rx bd */
-
- vaddr = ((struct qtnf_tx_bd *)vaddr) + priv->tx_bd_num;
- paddr += priv->tx_bd_num * sizeof(struct qtnf_tx_bd);
-
- priv->rx_bd_vbase = vaddr;
- priv->rx_bd_pbase = paddr;
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(QTN_HOST_HI32(paddr),
- PCIE_HDP_TX_HOST_Q_BASE_H(priv->pcie_reg_base));
-#endif
- writel(QTN_HOST_LO32(paddr),
- PCIE_HDP_TX_HOST_Q_BASE_L(priv->pcie_reg_base));
- writel(priv->rx_bd_num | (sizeof(struct qtnf_rx_bd)) << 16,
- PCIE_HDP_TX_HOST_Q_SZ_CTRL(priv->pcie_reg_base));
-
- pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
-
- return 0;
-}
-
-static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 index)
-{
- struct qtnf_rx_bd *rxbd;
- struct sk_buff *skb;
- dma_addr_t paddr;
-
- skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
- if (!skb) {
- priv->rx_skb[index] = NULL;
- return -ENOMEM;
- }
-
- priv->rx_skb[index] = skb;
- rxbd = &priv->rx_bd_vbase[index];
-
- paddr = pci_map_single(priv->pdev, skb->data,
- SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(priv->pdev, paddr)) {
- pr_err("skb DMA mapping error: %pad\n", &paddr);
- return -ENOMEM;
- }
-
- /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */
- rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
- rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr));
- rxbd->info = 0x0;
-
- priv->rx_bd_w_index = index;
-
- /* sync up all descriptor updates */
- wmb();
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(QTN_HOST_HI32(paddr),
- PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base));
-#endif
- writel(QTN_HOST_LO32(paddr),
- PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base));
-
- writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base));
- return 0;
-}
-
-static int alloc_rx_buffers(struct qtnf_pcie_bus_priv *priv)
-{
- u16 i;
- int ret = 0;
-
- memset(priv->rx_bd_vbase, 0x0,
- priv->rx_bd_num * sizeof(struct qtnf_rx_bd));
-
- for (i = 0; i < priv->rx_bd_num; i++) {
- ret = skb2rbd_attach(priv, i);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-/* all rx/tx activity should have ceased before calling this function */
-static void qtnf_free_xfer_buffers(struct qtnf_pcie_bus_priv *priv)
-{
- struct qtnf_tx_bd *txbd;
- struct qtnf_rx_bd *rxbd;
- struct sk_buff *skb;
- dma_addr_t paddr;
- int i;
-
- /* free rx buffers */
- for (i = 0; i < priv->rx_bd_num; i++) {
- if (priv->rx_skb && priv->rx_skb[i]) {
- rxbd = &priv->rx_bd_vbase[i];
- skb = priv->rx_skb[i];
- paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
- le32_to_cpu(rxbd->addr));
- pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any(skb);
- priv->rx_skb[i] = NULL;
- }
- }
-
- /* free tx buffers */
- for (i = 0; i < priv->tx_bd_num; i++) {
- if (priv->tx_skb && priv->tx_skb[i]) {
- txbd = &priv->tx_bd_vbase[i];
- skb = priv->tx_skb[i];
- paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
- le32_to_cpu(txbd->addr));
- pci_unmap_single(priv->pdev, paddr, skb->len,
- PCI_DMA_TODEVICE);
- dev_kfree_skb_any(skb);
- priv->tx_skb[i] = NULL;
- }
- }
-}
-
-static int qtnf_hhbm_init(struct qtnf_pcie_bus_priv *priv)
-{
- u32 val;
-
- val = readl(PCIE_HHBM_CONFIG(priv->pcie_reg_base));
- val |= HHBM_CONFIG_SOFT_RESET;
- writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base));
- usleep_range(50, 100);
- val &= ~HHBM_CONFIG_SOFT_RESET;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- val |= HHBM_64BIT;
-#endif
- writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base));
- writel(priv->rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(priv->pcie_reg_base));
-
- return 0;
-}
-
-static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv)
-{
- int ret;
- u32 val;
-
- priv->tx_bd_num = tx_bd_size_param;
- priv->rx_bd_num = rx_bd_size_param;
- priv->rx_bd_w_index = 0;
- priv->rx_bd_r_index = 0;
-
- if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) {
- pr_err("tx_bd_size_param %u is not power of two\n",
- priv->tx_bd_num);
- return -EINVAL;
- }
-
- val = priv->tx_bd_num * sizeof(struct qtnf_tx_bd);
- if (val > PCIE_HHBM_MAX_SIZE) {
- pr_err("tx_bd_size_param %u is too large\n",
- priv->tx_bd_num);
- return -EINVAL;
- }
-
- if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
- pr_err("rx_bd_size_param %u is not power of two\n",
- priv->rx_bd_num);
- return -EINVAL;
- }
-
- val = priv->rx_bd_num * sizeof(dma_addr_t);
- if (val > PCIE_HHBM_MAX_SIZE) {
- pr_err("rx_bd_size_param %u is too large\n",
- priv->rx_bd_num);
- return -EINVAL;
- }
-
- ret = qtnf_hhbm_init(priv);
- if (ret) {
- pr_err("failed to init h/w queues\n");
- return ret;
- }
-
- ret = alloc_skb_array(priv);
- if (ret) {
- pr_err("failed to allocate skb array\n");
- return ret;
- }
-
- ret = alloc_bd_table(priv);
- if (ret) {
- pr_err("failed to allocate bd table\n");
- return ret;
- }
-
- ret = alloc_rx_buffers(priv);
- if (ret) {
- pr_err("failed to allocate rx buffers\n");
- return ret;
- }
-
- return ret;
-}
-
-static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv)
-{
- struct qtnf_tx_bd *txbd;
- struct sk_buff *skb;
- unsigned long flags;
- dma_addr_t paddr;
- u32 tx_done_index;
- int count = 0;
- int i;
-
- spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
-
- tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base))
- & (priv->tx_bd_num - 1);
-
- i = priv->tx_bd_r_index;
-
- while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
- skb = priv->tx_skb[i];
- if (likely(skb)) {
- txbd = &priv->tx_bd_vbase[i];
- paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h),
- le32_to_cpu(txbd->addr));
- pci_unmap_single(priv->pdev, paddr, skb->len,
- PCI_DMA_TODEVICE);
-
- if (skb->dev) {
- qtnf_update_tx_stats(skb->dev, skb);
- if (unlikely(priv->tx_stopped)) {
- qtnf_wake_all_queues(skb->dev);
- priv->tx_stopped = 0;
- }
- }
-
- dev_kfree_skb_any(skb);
- }
-
- priv->tx_skb[i] = NULL;
- count++;
-
- if (++i >= priv->tx_bd_num)
- i = 0;
- }
-
- priv->tx_reclaim_done += count;
- priv->tx_reclaim_req++;
- priv->tx_bd_r_index = i;
-
- spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
-}
-
-static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv)
-{
- if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
- priv->tx_bd_num)) {
- qtnf_pcie_data_tx_reclaim(priv);
-
- if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
- priv->tx_bd_num)) {
- pr_warn_ratelimited("reclaim full Tx queue\n");
- priv->tx_full_count++;
- return 0;
- }
- }
-
- return 1;
-}
-
-static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
-{
- struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
- dma_addr_t txbd_paddr, skb_paddr;
- struct qtnf_tx_bd *txbd;
- unsigned long flags;
- int len, i;
- u32 info;
- int ret = 0;
-
- spin_lock_irqsave(&priv->tx0_lock, flags);
-
- if (!qtnf_tx_queue_ready(priv)) {
- if (skb->dev) {
- netif_tx_stop_all_queues(skb->dev);
- priv->tx_stopped = 1;
- }
-
- spin_unlock_irqrestore(&priv->tx0_lock, flags);
- return NETDEV_TX_BUSY;
- }
-
- i = priv->tx_bd_w_index;
- priv->tx_skb[i] = skb;
- len = skb->len;
-
- skb_paddr = pci_map_single(priv->pdev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
- pr_err("skb DMA mapping error: %pad\n", &skb_paddr);
- ret = -ENOMEM;
- goto tx_done;
- }
-
- txbd = &priv->tx_bd_vbase[i];
- txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
- txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr));
-
- info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT;
- txbd->info = cpu_to_le32(info);
-
- /* sync up all descriptor updates before passing them to EP */
- dma_wmb();
-
- /* write new TX descriptor to PCIE_RX_FIFO on EP */
- txbd_paddr = priv->tx_bd_pbase + i * sizeof(struct qtnf_tx_bd);
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(QTN_HOST_HI32(txbd_paddr),
- PCIE_HDP_HOST_WR_DESC0_H(priv->pcie_reg_base));
-#endif
- writel(QTN_HOST_LO32(txbd_paddr),
- PCIE_HDP_HOST_WR_DESC0(priv->pcie_reg_base));
-
- if (++i >= priv->tx_bd_num)
- i = 0;
-
- priv->tx_bd_w_index = i;
-
-tx_done:
- if (ret && skb) {
- pr_err_ratelimited("drop skb\n");
- if (skb->dev)
- skb->dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- }
-
- priv->tx_done_count++;
- spin_unlock_irqrestore(&priv->tx0_lock, flags);
-
- qtnf_pcie_data_tx_reclaim(priv);
-
- return NETDEV_TX_OK;
-}
-
-static int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
-{
- struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
- int ret;
-
- ret = qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len);
-
- if (ret == -ETIMEDOUT) {
- pr_err("EP firmware is dead\n");
- bus->fw_state = QTNF_FW_STATE_EP_DEAD;
- }
-
- return ret;
-}
-
-static irqreturn_t qtnf_interrupt(int irq, void *data)
-{
- struct qtnf_bus *bus = (struct qtnf_bus *)data;
- struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
- u32 status;
-
- priv->pcie_irq_count++;
- status = readl(PCIE_HDP_INT_STATUS(priv->pcie_reg_base));
-
- qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
- qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
-
- if (!(status & priv->pcie_irq_mask))
- goto irq_done;
-
- if (status & PCIE_HDP_INT_RX_BITS)
- priv->pcie_irq_rx_count++;
-
- if (status & PCIE_HDP_INT_TX_BITS)
- priv->pcie_irq_tx_count++;
-
- if (status & PCIE_HDP_INT_HHBM_UF)
- priv->pcie_irq_uf_count++;
-
- if (status & PCIE_HDP_INT_RX_BITS) {
- qtnf_dis_rxdone_irq(priv);
- napi_schedule(&bus->mux_napi);
- }
-
- if (status & PCIE_HDP_INT_TX_BITS) {
- qtnf_dis_txdone_irq(priv);
- tasklet_hi_schedule(&priv->reclaim_tq);
- }
-
-irq_done:
- /* H/W workaround: clean all bits, not only enabled */
- qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(priv->pcie_reg_base));
-
- if (!priv->msi_enabled)
- qtnf_deassert_intx(priv);
-
- return IRQ_HANDLED;
-}
-
-static int qtnf_rx_data_ready(struct qtnf_pcie_bus_priv *priv)
-{
- u16 index = priv->rx_bd_r_index;
- struct qtnf_rx_bd *rxbd;
- u32 descw;
-
- rxbd = &priv->rx_bd_vbase[index];
- descw = le32_to_cpu(rxbd->info);
-
- if (descw & QTN_TXDONE_MASK)
- return 1;
-
- return 0;
-}
-
-static int qtnf_rx_poll(struct napi_struct *napi, int budget)
-{
- struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
- struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
- struct net_device *ndev = NULL;
- struct sk_buff *skb = NULL;
- int processed = 0;
- struct qtnf_rx_bd *rxbd;
- dma_addr_t skb_paddr;
- int consume;
- u32 descw;
- u32 psize;
- u16 r_idx;
- u16 w_idx;
- int ret;
-
- while (processed < budget) {
-
-
- if (!qtnf_rx_data_ready(priv))
- goto rx_out;
-
- r_idx = priv->rx_bd_r_index;
- rxbd = &priv->rx_bd_vbase[r_idx];
- descw = le32_to_cpu(rxbd->info);
-
- skb = priv->rx_skb[r_idx];
- psize = QTN_GET_LEN(descw);
- consume = 1;
-
- if (!(descw & QTN_TXDONE_MASK)) {
- pr_warn("skip invalid rxbd[%d]\n", r_idx);
- consume = 0;
- }
-
- if (!skb) {
- pr_warn("skip missing rx_skb[%d]\n", r_idx);
- consume = 0;
- }
-
- if (skb && (skb_tailroom(skb) < psize)) {
- pr_err("skip packet with invalid length: %u > %u\n",
- psize, skb_tailroom(skb));
- consume = 0;
- }
-
- if (skb) {
- skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
- le32_to_cpu(rxbd->addr));
- pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- }
-
- if (consume) {
- skb_put(skb, psize);
- ndev = qtnf_classify_skb(bus, skb);
- if (likely(ndev)) {
- qtnf_update_rx_stats(ndev, skb);
- skb->protocol = eth_type_trans(skb, ndev);
- napi_gro_receive(napi, skb);
- } else {
- pr_debug("drop untagged skb\n");
- bus->mux_dev.stats.rx_dropped++;
- dev_kfree_skb_any(skb);
- }
- } else {
- if (skb) {
- bus->mux_dev.stats.rx_dropped++;
- dev_kfree_skb_any(skb);
- }
- }
-
- priv->rx_skb[r_idx] = NULL;
- if (++r_idx >= priv->rx_bd_num)
- r_idx = 0;
-
- priv->rx_bd_r_index = r_idx;
-
- /* repalce processed buffer by a new one */
- w_idx = priv->rx_bd_w_index;
- while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
- priv->rx_bd_num) > 0) {
- if (++w_idx >= priv->rx_bd_num)
- w_idx = 0;
-
- ret = skb2rbd_attach(priv, w_idx);
- if (ret) {
- pr_err("failed to allocate new rx_skb[%d]\n",
- w_idx);
- break;
- }
- }
-
- processed++;
- }
-
-rx_out:
- if (processed < budget) {
- napi_complete(napi);
- qtnf_en_rxdone_irq(priv);
- }
-
- return processed;
-}
-
-static void
-qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
-{
- struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
-
- tasklet_hi_schedule(&priv->reclaim_tq);
-}
-
-static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
-{
- struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
-
- qtnf_enable_hdp_irqs(priv);
- napi_enable(&bus->mux_napi);
-}
-
-static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
-{
- struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
-
- napi_disable(&bus->mux_napi);
- qtnf_disable_hdp_irqs(priv);
-}
-
-static const struct qtnf_bus_ops qtnf_pcie_bus_ops = {
- /* control path methods */
- .control_tx = qtnf_pcie_control_tx,
-
- /* data path methods */
- .data_tx = qtnf_pcie_data_tx,
- .data_tx_timeout = qtnf_pcie_data_tx_timeout,
- .data_rx_start = qtnf_pcie_data_rx_start,
- .data_rx_stop = qtnf_pcie_data_rx_stop,
-};
-
-static int qtnf_dbg_mps_show(struct seq_file *s, void *data)
-{
- struct qtnf_bus *bus = dev_get_drvdata(s->private);
- struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
-
- seq_printf(s, "%d\n", priv->mps);
-
- return 0;
-}
-
-static int qtnf_dbg_msi_show(struct seq_file *s, void *data)
-{
- struct qtnf_bus *bus = dev_get_drvdata(s->private);
- struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
-
- seq_printf(s, "%u\n", priv->msi_enabled);
-
- return 0;
-}
-
-static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
-{
- struct qtnf_bus *bus = dev_get_drvdata(s->private);
- struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
- u32 reg = readl(PCIE_HDP_INT_EN(priv->pcie_reg_base));
- u32 status;
-
- seq_printf(s, "pcie_irq_count(%u)\n", priv->pcie_irq_count);
- seq_printf(s, "pcie_irq_tx_count(%u)\n", priv->pcie_irq_tx_count);
- status = reg & PCIE_HDP_INT_TX_BITS;
- seq_printf(s, "pcie_irq_tx_status(%s)\n",
- (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS");
- seq_printf(s, "pcie_irq_rx_count(%u)\n", priv->pcie_irq_rx_count);
- status = reg & PCIE_HDP_INT_RX_BITS;
- seq_printf(s, "pcie_irq_rx_status(%s)\n",
- (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS");
- seq_printf(s, "pcie_irq_uf_count(%u)\n", priv->pcie_irq_uf_count);
- status = reg & PCIE_HDP_INT_HHBM_UF;
- seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n",
- (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS");
-
- return 0;
-}
-
-static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
-{
- struct qtnf_bus *bus = dev_get_drvdata(s->private);
- struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
-
- seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
- seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
- seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
- seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
-
- seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
- seq_printf(s, "tx_bd_p_index(%u)\n",
- readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base))
- & (priv->tx_bd_num - 1));
- seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
- seq_printf(s, "tx queue len(%u)\n",
- CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
- priv->tx_bd_num));
-
- seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
- seq_printf(s, "rx_bd_p_index(%u)\n",
- readl(PCIE_HDP_TX0DMA_CNT(priv->pcie_reg_base))
- & (priv->rx_bd_num - 1));
- seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
- seq_printf(s, "rx alloc queue len(%u)\n",
- CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
- priv->rx_bd_num));
-
- return 0;
-}
-
-static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
-{
- struct qtnf_bus *bus = dev_get_drvdata(s->private);
- struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
-
- seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n",
- priv->shm_ipc_ep_in.tx_packet_count);
- seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n",
- priv->shm_ipc_ep_in.rx_packet_count);
- seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n",
- priv->shm_ipc_ep_out.tx_timeout_count);
- seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n",
- priv->shm_ipc_ep_out.rx_packet_count);
-
- return 0;
-}
-
-static int qtnf_ep_fw_send(struct qtnf_pcie_bus_priv *priv, uint32_t size,
- int blk, const u8 *pblk, const u8 *fw)
-{
- struct pci_dev *pdev = priv->pdev;
- struct qtnf_bus *bus = pci_get_drvdata(pdev);
-
- struct qtnf_pcie_fw_hdr *hdr;
- u8 *pdata;
-
- int hds = sizeof(*hdr);
- struct sk_buff *skb = NULL;
- int len = 0;
- int ret;
-
- skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- skb->len = QTN_PCIE_FW_BUFSZ;
- skb->dev = NULL;
-
- hdr = (struct qtnf_pcie_fw_hdr *)skb->data;
- memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG));
- hdr->fwsize = cpu_to_le32(size);
- hdr->seqnum = cpu_to_le32(blk);
-
- if (blk)
- hdr->type = cpu_to_le32(QTN_FW_DSUB);
- else
- hdr->type = cpu_to_le32(QTN_FW_DBEGIN);
-
- pdata = skb->data + hds;
-
- len = QTN_PCIE_FW_BUFSZ - hds;
- if (pblk >= (fw + size - len)) {
- len = fw + size - pblk;
- hdr->type = cpu_to_le32(QTN_FW_DEND);
- }
-
- hdr->pktlen = cpu_to_le32(len);
- memcpy(pdata, pblk, len);
- hdr->crc = cpu_to_le32(~crc32(0, pdata, len));
-
- ret = qtnf_pcie_data_tx(bus, skb);
-
- return (ret == NETDEV_TX_OK) ? len : 0;
-}
-
-static int
-qtnf_ep_fw_load(struct qtnf_pcie_bus_priv *priv, const u8 *fw, u32 fw_size)
-{
- int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pcie_fw_hdr);
- int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0);
- const u8 *pblk = fw;
- int threshold = 0;
- int blk = 0;
- int len;
-
- pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size);
-
- while (blk < blk_count) {
- if (++threshold > 10000) {
- pr_err("FW upload failed: too many retries\n");
- return -ETIMEDOUT;
- }
-
- len = qtnf_ep_fw_send(priv, fw_size, blk, pblk, fw);
- if (len <= 0)
- continue;
-
- if (!((blk + 1) & QTN_PCIE_FW_DLMASK) ||
- (blk == (blk_count - 1))) {
- qtnf_set_state(&priv->bda->bda_rc_state,
- QTN_RC_FW_SYNC);
- if (qtnf_poll_state(&priv->bda->bda_ep_state,
- QTN_EP_FW_SYNC,
- QTN_FW_DL_TIMEOUT_MS)) {
- pr_err("FW upload failed: SYNC timed out\n");
- return -ETIMEDOUT;
- }
-
- qtnf_clear_state(&priv->bda->bda_ep_state,
- QTN_EP_FW_SYNC);
-
- if (qtnf_is_state(&priv->bda->bda_ep_state,
- QTN_EP_FW_RETRY)) {
- if (blk == (blk_count - 1)) {
- int last_round =
- blk_count & QTN_PCIE_FW_DLMASK;
- blk -= last_round;
- pblk -= ((last_round - 1) *
- blk_size + len);
- } else {
- blk -= QTN_PCIE_FW_DLMASK;
- pblk -= QTN_PCIE_FW_DLMASK * blk_size;
- }
-
- qtnf_clear_state(&priv->bda->bda_ep_state,
- QTN_EP_FW_RETRY);
-
- pr_warn("FW upload retry: block #%d\n", blk);
- continue;
- }
-
- qtnf_pcie_data_tx_reclaim(priv);
- }
-
- pblk += len;
- blk++;
- }
-
- pr_debug("FW upload completed: totally sent %d blocks\n", blk);
- return 0;
-}
-
-static void qtnf_fw_work_handler(struct work_struct *work)
-{
- struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
- struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
- struct pci_dev *pdev = priv->pdev;
- const struct firmware *fw;
- int ret;
- u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
-
- if (flashboot) {
- state |= QTN_RC_FW_FLASHBOOT;
- } else {
- ret = request_firmware(&fw, bus->fwname, &pdev->dev);
- if (ret < 0) {
- pr_err("failed to get firmware %s\n", bus->fwname);
- goto fw_load_fail;
- }
- }
-
- qtnf_set_state(&priv->bda->bda_rc_state, state);
-
- if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY,
- QTN_FW_DL_TIMEOUT_MS)) {
- pr_err("card is not ready\n");
-
- if (!flashboot)
- release_firmware(fw);
-
- goto fw_load_fail;
- }
-
- qtnf_clear_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY);
-
- if (flashboot) {
- pr_info("booting firmware from flash\n");
- } else {
- pr_info("starting firmware upload: %s\n", bus->fwname);
-
- ret = qtnf_ep_fw_load(priv, fw->data, fw->size);
- release_firmware(fw);
- if (ret) {
- pr_err("firmware upload error\n");
- goto fw_load_fail;
- }
- }
-
- if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_DONE,
- QTN_FW_DL_TIMEOUT_MS)) {
- pr_err("firmware bringup timed out\n");
- goto fw_load_fail;
- }
-
- bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE;
- pr_info("firmware is up and running\n");
-
- if (qtnf_poll_state(&priv->bda->bda_ep_state,
- QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) {
- pr_err("firmware runtime failure\n");
- goto fw_load_fail;
- }
-
- ret = qtnf_core_attach(bus);
- if (ret) {
- pr_err("failed to attach core\n");
- goto fw_load_fail;
- }
-
- qtnf_debugfs_init(bus, DRV_NAME);
- qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
- qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
- qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
- qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
- qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
-
- goto fw_load_exit;
-
-fw_load_fail:
- bus->fw_state = QTNF_FW_STATE_DETACHED;
-
-fw_load_exit:
- complete(&bus->firmware_init_complete);
- put_device(&pdev->dev);
-}
-
-static void qtnf_bringup_fw_async(struct qtnf_bus *bus)
-{
- struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
- struct pci_dev *pdev = priv->pdev;
-
- get_device(&pdev->dev);
- INIT_WORK(&bus->fw_work, qtnf_fw_work_handler);
- schedule_work(&bus->fw_work);
-}
-
-static void qtnf_reclaim_tasklet_fn(unsigned long data)
-{
- struct qtnf_pcie_bus_priv *priv = (void *)data;
-
- qtnf_pcie_data_tx_reclaim(priv);
- qtnf_en_txdone_irq(priv);
-}
-
-static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct qtnf_pcie_bus_priv *pcie_priv;
- struct qtnf_bus *bus;
- int ret;
-
- bus = devm_kzalloc(&pdev->dev,
- sizeof(*bus) + sizeof(*pcie_priv), GFP_KERNEL);
- if (!bus)
- return -ENOMEM;
-
- pcie_priv = get_bus_priv(bus);
-
- pci_set_drvdata(pdev, bus);
- bus->bus_ops = &qtnf_pcie_bus_ops;
- bus->dev = &pdev->dev;
- bus->fw_state = QTNF_FW_STATE_RESET;
- pcie_priv->pdev = pdev;
-
- strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME);
- init_completion(&bus->firmware_init_complete);
- mutex_init(&bus->bus_lock);
- spin_lock_init(&pcie_priv->tx0_lock);
- spin_lock_init(&pcie_priv->irq_lock);
- spin_lock_init(&pcie_priv->tx_reclaim_lock);
-
- /* init stats */
- pcie_priv->tx_full_count = 0;
- pcie_priv->tx_done_count = 0;
- pcie_priv->pcie_irq_count = 0;
- pcie_priv->pcie_irq_rx_count = 0;
- pcie_priv->pcie_irq_tx_count = 0;
- pcie_priv->pcie_irq_uf_count = 0;
- pcie_priv->tx_reclaim_done = 0;
- pcie_priv->tx_reclaim_req = 0;
-
- tasklet_init(&pcie_priv->reclaim_tq, qtnf_reclaim_tasklet_fn,
- (unsigned long)pcie_priv);
-
- init_dummy_netdev(&bus->mux_dev);
- netif_napi_add(&bus->mux_dev, &bus->mux_napi,
- qtnf_rx_poll, 10);
-
- pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PEARL_PCIE");
- if (!pcie_priv->workqueue) {
- pr_err("failed to alloc bus workqueue\n");
- ret = -ENODEV;
- goto err_init;
- }
-
- if (!pci_is_pcie(pdev)) {
- pr_err("device %s is not PCI Express\n", pci_name(pdev));
- ret = -EIO;
- goto err_base;
- }
-
- qtnf_tune_pcie_mps(pcie_priv);
-
- ret = pcim_enable_device(pdev);
- if (ret) {
- pr_err("failed to init PCI device %x\n", pdev->device);
- goto err_base;
- } else {
- pr_debug("successful init of PCI device %x\n", pdev->device);
- }
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-#else
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-#endif
- if (ret) {
- pr_err("PCIE DMA coherent mask init failed\n");
- goto err_base;
- }
-
- pci_set_master(pdev);
- qtnf_pcie_init_irq(pcie_priv);
-
- ret = qtnf_pcie_init_memory(pcie_priv);
- if (ret < 0) {
- pr_err("PCIE memory init failed\n");
- goto err_base;
- }
-
- pci_save_state(pdev);
-
- ret = qtnf_pcie_init_shm_ipc(pcie_priv);
- if (ret < 0) {
- pr_err("PCIE SHM IPC init failed\n");
- goto err_base;
- }
-
- ret = qtnf_pcie_init_xfer(pcie_priv);
- if (ret) {
- pr_err("PCIE xfer init failed\n");
- goto err_ipc;
- }
-
- /* init default irq settings */
- qtnf_init_hdp_irqs(pcie_priv);
-
- /* start with disabled irqs */
- qtnf_disable_hdp_irqs(pcie_priv);
-
- ret = devm_request_irq(&pdev->dev, pdev->irq, &qtnf_interrupt, 0,
- "qtnf_pcie_irq", (void *)bus);
- if (ret) {
- pr_err("failed to request pcie irq %d\n", pdev->irq);
- goto err_xfer;
- }
-
- qtnf_bringup_fw_async(bus);
-
- return 0;
-
-err_xfer:
- qtnf_free_xfer_buffers(pcie_priv);
-
-err_ipc:
- qtnf_pcie_free_shm_ipc(pcie_priv);
-
-err_base:
- flush_workqueue(pcie_priv->workqueue);
- destroy_workqueue(pcie_priv->workqueue);
- netif_napi_del(&bus->mux_napi);
-
-err_init:
- tasklet_kill(&pcie_priv->reclaim_tq);
- pci_set_drvdata(pdev, NULL);
-
- return ret;
-}
-
-static void qtnf_pcie_remove(struct pci_dev *pdev)
-{
- struct qtnf_pcie_bus_priv *priv;
- struct qtnf_bus *bus;
-
- bus = pci_get_drvdata(pdev);
- if (!bus)
- return;
-
- wait_for_completion(&bus->firmware_init_complete);
-
- if (bus->fw_state == QTNF_FW_STATE_ACTIVE ||
- bus->fw_state == QTNF_FW_STATE_EP_DEAD)
- qtnf_core_detach(bus);
-
- priv = get_bus_priv(bus);
-
- netif_napi_del(&bus->mux_napi);
- flush_workqueue(priv->workqueue);
- destroy_workqueue(priv->workqueue);
- tasklet_kill(&priv->reclaim_tq);
-
- qtnf_free_xfer_buffers(priv);
- qtnf_debugfs_remove(bus);
-
- qtnf_pcie_free_shm_ipc(priv);
- qtnf_reset_card(priv);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int qtnf_pcie_suspend(struct device *dev)
-{
- return -EOPNOTSUPP;
-}
-
-static int qtnf_pcie_resume(struct device *dev)
-{
- return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_PM_SLEEP
-/* Power Management Hooks */
-static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend,
- qtnf_pcie_resume);
-#endif
-
-static const struct pci_device_id qtnf_pcie_devid_table[] = {
- {
- PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- },
- { },
-};
-
-MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
-
-static struct pci_driver qtnf_pcie_drv_data = {
- .name = DRV_NAME,
- .id_table = qtnf_pcie_devid_table,
- .probe = qtnf_pcie_probe,
- .remove = qtnf_pcie_remove,
-#ifdef CONFIG_PM_SLEEP
- .driver = {
- .pm = &qtnf_pcie_pm_ops,
- },
-#endif
-};
-
-static int __init qtnf_pcie_register(void)
-{
- pr_info("register Quantenna QSR10g FullMAC PCIE driver\n");
- return pci_register_driver(&qtnf_pcie_drv_data);
-}
-
-static void __exit qtnf_pcie_exit(void)
-{
- pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n");
- pci_unregister_driver(&qtnf_pcie_drv_data);
-}
-
-module_init(qtnf_pcie_register);
-module_exit(qtnf_pcie_exit);
-
-MODULE_AUTHOR("Quantenna Communications");
-MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
deleted file mode 100644
index 397875a50fc2..000000000000
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2015-2016 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _QTN_FMAC_PCIE_H_
-#define _QTN_FMAC_PCIE_H_
-
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-
-#include "pcie_regs_pearl.h"
-#include "pcie_ipc.h"
-#include "shm_ipc.h"
-
-struct bus;
-
-struct qtnf_pcie_bus_priv {
- struct pci_dev *pdev;
-
- /* lock for irq configuration changes */
- spinlock_t irq_lock;
-
- /* lock for tx reclaim operations */
- spinlock_t tx_reclaim_lock;
- /* lock for tx0 operations */
- spinlock_t tx0_lock;
- u8 msi_enabled;
- u8 tx_stopped;
- int mps;
-
- struct workqueue_struct *workqueue;
- struct tasklet_struct reclaim_tq;
-
- void __iomem *sysctl_bar;
- void __iomem *epmem_bar;
- void __iomem *dmareg_bar;
-
- struct qtnf_shm_ipc shm_ipc_ep_in;
- struct qtnf_shm_ipc shm_ipc_ep_out;
-
- struct qtnf_pcie_bda __iomem *bda;
- void __iomem *pcie_reg_base;
-
- u16 tx_bd_num;
- u16 rx_bd_num;
-
- struct sk_buff **tx_skb;
- struct sk_buff **rx_skb;
-
- struct qtnf_tx_bd *tx_bd_vbase;
- dma_addr_t tx_bd_pbase;
-
- struct qtnf_rx_bd *rx_bd_vbase;
- dma_addr_t rx_bd_pbase;
-
- dma_addr_t bd_table_paddr;
- void *bd_table_vaddr;
- u32 bd_table_len;
-
- u32 rx_bd_w_index;
- u32 rx_bd_r_index;
-
- u32 tx_bd_w_index;
- u32 tx_bd_r_index;
-
- u32 pcie_irq_mask;
-
- /* diagnostics stats */
- u32 pcie_irq_count;
- u32 pcie_irq_rx_count;
- u32 pcie_irq_tx_count;
- u32 pcie_irq_uf_count;
- u32 tx_full_count;
- u32 tx_done_count;
- u32 tx_reclaim_done;
- u32 tx_reclaim_req;
-};
-
-#endif /* _QTN_FMAC_PCIE_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h
deleted file mode 100644
index 0bfe285b6b48..000000000000
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright (c) 2015 Quantenna Communications, Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __PEARL_PCIE_H
-#define __PEARL_PCIE_H
-
-#define PCIE_GEN2_BASE (0xe9000000)
-#define PCIE_GEN3_BASE (0xe7000000)
-
-#define PEARL_CUR_PCIE_BASE (PCIE_GEN2_BASE)
-#define PCIE_HDP_OFFSET (0x2000)
-
-#define PCIE_HDP_CTRL(base) ((base) + 0x2c00)
-#define PCIE_HDP_AXI_CTRL(base) ((base) + 0x2c04)
-#define PCIE_HDP_HOST_WR_DESC0(base) ((base) + 0x2c10)
-#define PCIE_HDP_HOST_WR_DESC0_H(base) ((base) + 0x2c14)
-#define PCIE_HDP_HOST_WR_DESC1(base) ((base) + 0x2c18)
-#define PCIE_HDP_HOST_WR_DESC1_H(base) ((base) + 0x2c1c)
-#define PCIE_HDP_HOST_WR_DESC2(base) ((base) + 0x2c20)
-#define PCIE_HDP_HOST_WR_DESC2_H(base) ((base) + 0x2c24)
-#define PCIE_HDP_HOST_WR_DESC3(base) ((base) + 0x2c28)
-#define PCIE_HDP_HOST_WR_DESC4_H(base) ((base) + 0x2c2c)
-#define PCIE_HDP_RX_INT_CTRL(base) ((base) + 0x2c30)
-#define PCIE_HDP_TX_INT_CTRL(base) ((base) + 0x2c34)
-#define PCIE_HDP_INT_STATUS(base) ((base) + 0x2c38)
-#define PCIE_HDP_INT_EN(base) ((base) + 0x2c3c)
-#define PCIE_HDP_RX_DESC0_PTR(base) ((base) + 0x2c40)
-#define PCIE_HDP_RX_DESC0_NOE(base) ((base) + 0x2c44)
-#define PCIE_HDP_RX_DESC1_PTR(base) ((base) + 0x2c48)
-#define PCIE_HDP_RX_DESC1_NOE(base) ((base) + 0x2c4c)
-#define PCIE_HDP_RX_DESC2_PTR(base) ((base) + 0x2c50)
-#define PCIE_HDP_RX_DESC2_NOE(base) ((base) + 0x2c54)
-#define PCIE_HDP_RX_DESC3_PTR(base) ((base) + 0x2c58)
-#define PCIE_HDP_RX_DESC3_NOE(base) ((base) + 0x2c5c)
-
-#define PCIE_HDP_TX0_BASE_ADDR(base) ((base) + 0x2c60)
-#define PCIE_HDP_TX1_BASE_ADDR(base) ((base) + 0x2c64)
-#define PCIE_HDP_TX0_Q_CTRL(base) ((base) + 0x2c70)
-#define PCIE_HDP_TX1_Q_CTRL(base) ((base) + 0x2c74)
-#define PCIE_HDP_CFG0(base) ((base) + 0x2c80)
-#define PCIE_HDP_CFG1(base) ((base) + 0x2c84)
-#define PCIE_HDP_CFG2(base) ((base) + 0x2c88)
-#define PCIE_HDP_CFG3(base) ((base) + 0x2c8c)
-#define PCIE_HDP_CFG4(base) ((base) + 0x2c90)
-#define PCIE_HDP_CFG5(base) ((base) + 0x2c94)
-#define PCIE_HDP_CFG6(base) ((base) + 0x2c98)
-#define PCIE_HDP_CFG7(base) ((base) + 0x2c9c)
-#define PCIE_HDP_CFG8(base) ((base) + 0x2ca0)
-#define PCIE_HDP_CFG9(base) ((base) + 0x2ca4)
-#define PCIE_HDP_CFG10(base) ((base) + 0x2ca8)
-#define PCIE_HDP_CFG11(base) ((base) + 0x2cac)
-#define PCIE_INT(base) ((base) + 0x2cb0)
-#define PCIE_INT_MASK(base) ((base) + 0x2cb4)
-#define PCIE_MSI_MASK(base) ((base) + 0x2cb8)
-#define PCIE_MSI_PNDG(base) ((base) + 0x2cbc)
-#define PCIE_PRI_CFG(base) ((base) + 0x2cc0)
-#define PCIE_PHY_CR(base) ((base) + 0x2cc4)
-#define PCIE_HDP_CTAG_CTRL(base) ((base) + 0x2cf4)
-#define PCIE_HDP_HHBM_BUF_PTR(base) ((base) + 0x2d00)
-#define PCIE_HDP_HHBM_BUF_PTR_H(base) ((base) + 0x2d04)
-#define PCIE_HDP_HHBM_BUF_FIFO_NOE(base) ((base) + 0x2d04)
-#define PCIE_HDP_RX0DMA_CNT(base) ((base) + 0x2d10)
-#define PCIE_HDP_RX1DMA_CNT(base) ((base) + 0x2d14)
-#define PCIE_HDP_RX2DMA_CNT(base) ((base) + 0x2d18)
-#define PCIE_HDP_RX3DMA_CNT(base) ((base) + 0x2d1c)
-#define PCIE_HDP_TX0DMA_CNT(base) ((base) + 0x2d20)
-#define PCIE_HDP_TX1DMA_CNT(base) ((base) + 0x2d24)
-#define PCIE_HDP_RXDMA_CTRL(base) ((base) + 0x2d28)
-#define PCIE_HDP_TX_HOST_Q_SZ_CTRL(base) ((base) + 0x2d2c)
-#define PCIE_HDP_TX_HOST_Q_BASE_L(base) ((base) + 0x2d30)
-#define PCIE_HDP_TX_HOST_Q_BASE_H(base) ((base) + 0x2d34)
-#define PCIE_HDP_TX_HOST_Q_WR_PTR(base) ((base) + 0x2d38)
-#define PCIE_HDP_TX_HOST_Q_RD_PTR(base) ((base) + 0x2d3c)
-#define PCIE_HDP_TX_HOST_Q_STS(base) ((base) + 0x2d40)
-
-/* Host HBM pool registers */
-#define PCIE_HHBM_CSR_REG(base) ((base) + 0x2e00)
-#define PCIE_HHBM_Q_BASE_REG(base) ((base) + 0x2e04)
-#define PCIE_HHBM_Q_LIMIT_REG(base) ((base) + 0x2e08)
-#define PCIE_HHBM_Q_WR_REG(base) ((base) + 0x2e0c)
-#define PCIE_HHBM_Q_RD_REG(base) ((base) + 0x2e10)
-#define PCIE_HHBM_POOL_DATA_0_H(base) ((base) + 0x2e90)
-#define PCIE_HHBM_CONFIG(base) ((base) + 0x2f9c)
-#define PCIE_HHBM_POOL_REQ_0(base) ((base) + 0x2f10)
-#define PCIE_HHBM_POOL_DATA_0(base) ((base) + 0x2f40)
-#define PCIE_HHBM_WATERMARK_MASKED_INT(base) ((base) + 0x2f68)
-#define PCIE_HHBM_WATERMARK_INT(base) ((base) + 0x2f6c)
-#define PCIE_HHBM_POOL_WATERMARK(base) ((base) + 0x2f70)
-#define PCIE_HHBM_POOL_OVERFLOW_CNT(base) ((base) + 0x2f90)
-#define PCIE_HHBM_POOL_UNDERFLOW_CNT(base) ((base) + 0x2f94)
-#define HBM_INT_STATUS(base) ((base) + 0x2f9c)
-#define PCIE_HHBM_POOL_CNFIG(base) ((base) + 0x2f9c)
-
-/* host HBM bit field definition */
-#define HHBM_CONFIG_SOFT_RESET (BIT(8))
-#define HHBM_WR_REQ (BIT(0))
-#define HHBM_RD_REQ (BIT(1))
-#define HHBM_DONE (BIT(31))
-#define HHBM_64BIT (BIT(10))
-
-/* offsets for dual PCIE */
-#define PCIE_PORT_LINK_CTL(base) ((base) + 0x0710)
-#define PCIE_GEN2_CTL(base) ((base) + 0x080C)
-#define PCIE_GEN3_OFF(base) ((base) + 0x0890)
-#define PCIE_ATU_CTRL1(base) ((base) + 0x0904)
-#define PCIE_ATU_CTRL2(base) ((base) + 0x0908)
-#define PCIE_ATU_BASE_LOW(base) ((base) + 0x090C)
-#define PCIE_ATU_BASE_HIGH(base) ((base) + 0x0910)
-#define PCIE_ATU_BASE_LIMIT(base) ((base) + 0x0914)
-#define PCIE_ATU_TGT_LOW(base) ((base) + 0x0918)
-#define PCIE_ATU_TGT_HIGH(base) ((base) + 0x091C)
-#define PCIE_DMA_WR_ENABLE(base) ((base) + 0x097C)
-#define PCIE_DMA_WR_CHWTLOW(base) ((base) + 0x0988)
-#define PCIE_DMA_WR_CHWTHIG(base) ((base) + 0x098C)
-#define PCIE_DMA_WR_INTSTS(base) ((base) + 0x09BC)
-#define PCIE_DMA_WR_INTMASK(base) ((base) + 0x09C4)
-#define PCIE_DMA_WR_INTCLER(base) ((base) + 0x09C8)
-#define PCIE_DMA_WR_DONE_IMWR_ADDR_L(base) ((base) + 0x09D0)
-#define PCIE_DMA_WR_DONE_IMWR_ADDR_H(base) ((base) + 0x09D4)
-#define PCIE_DMA_WR_ABORT_IMWR_ADDR_L(base) ((base) + 0x09D8)
-#define PCIE_DMA_WR_ABORT_IMWR_ADDR_H(base) ((base) + 0x09DC)
-#define PCIE_DMA_WR_IMWR_DATA(base) ((base) + 0x09E0)
-#define PCIE_DMA_WR_LL_ERR_EN(base) ((base) + 0x0A00)
-#define PCIE_DMA_WR_DOORBELL(base) ((base) + 0x0980)
-#define PCIE_DMA_RD_ENABLE(base) ((base) + 0x099C)
-#define PCIE_DMA_RD_DOORBELL(base) ((base) + 0x09A0)
-#define PCIE_DMA_RD_CHWTLOW(base) ((base) + 0x09A8)
-#define PCIE_DMA_RD_CHWTHIG(base) ((base) + 0x09AC)
-#define PCIE_DMA_RD_INTSTS(base) ((base) + 0x0A10)
-#define PCIE_DMA_RD_INTMASK(base) ((base) + 0x0A18)
-#define PCIE_DMA_RD_INTCLER(base) ((base) + 0x0A1C)
-#define PCIE_DMA_RD_ERR_STS_L(base) ((base) + 0x0A24)
-#define PCIE_DMA_RD_ERR_STS_H(base) ((base) + 0x0A28)
-#define PCIE_DMA_RD_LL_ERR_EN(base) ((base) + 0x0A34)
-#define PCIE_DMA_RD_DONE_IMWR_ADDR_L(base) ((base) + 0x0A3C)
-#define PCIE_DMA_RD_DONE_IMWR_ADDR_H(base) ((base) + 0x0A40)
-#define PCIE_DMA_RD_ABORT_IMWR_ADDR_L(base) ((base) + 0x0A44)
-#define PCIE_DMA_RD_ABORT_IMWR_ADDR_H(base) ((base) + 0x0A48)
-#define PCIE_DMA_RD_IMWR_DATA(base) ((base) + 0x0A4C)
-#define PCIE_DMA_CHNL_CONTEXT(base) ((base) + 0x0A6C)
-#define PCIE_DMA_CHNL_CNTRL(base) ((base) + 0x0A70)
-#define PCIE_DMA_XFR_SIZE(base) ((base) + 0x0A78)
-#define PCIE_DMA_SAR_LOW(base) ((base) + 0x0A7C)
-#define PCIE_DMA_SAR_HIGH(base) ((base) + 0x0A80)
-#define PCIE_DMA_DAR_LOW(base) ((base) + 0x0A84)
-#define PCIE_DMA_DAR_HIGH(base) ((base) + 0x0A88)
-#define PCIE_DMA_LLPTR_LOW(base) ((base) + 0x0A8C)
-#define PCIE_DMA_LLPTR_HIGH(base) ((base) + 0x0A90)
-#define PCIE_DMA_WRLL_ERR_ENB(base) ((base) + 0x0A00)
-#define PCIE_DMA_RDLL_ERR_ENB(base) ((base) + 0x0A34)
-#define PCIE_DMABD_CHNL_CNTRL(base) ((base) + 0x8000)
-#define PCIE_DMABD_XFR_SIZE(base) ((base) + 0x8004)
-#define PCIE_DMABD_SAR_LOW(base) ((base) + 0x8008)
-#define PCIE_DMABD_SAR_HIGH(base) ((base) + 0x800c)
-#define PCIE_DMABD_DAR_LOW(base) ((base) + 0x8010)
-#define PCIE_DMABD_DAR_HIGH(base) ((base) + 0x8014)
-#define PCIE_DMABD_LLPTR_LOW(base) ((base) + 0x8018)
-#define PCIE_DMABD_LLPTR_HIGH(base) ((base) + 0x801c)
-#define PCIE_WRDMA0_CHNL_CNTRL(base) ((base) + 0x8000)
-#define PCIE_WRDMA0_XFR_SIZE(base) ((base) + 0x8004)
-#define PCIE_WRDMA0_SAR_LOW(base) ((base) + 0x8008)
-#define PCIE_WRDMA0_SAR_HIGH(base) ((base) + 0x800c)
-#define PCIE_WRDMA0_DAR_LOW(base) ((base) + 0x8010)
-#define PCIE_WRDMA0_DAR_HIGH(base) ((base) + 0x8014)
-#define PCIE_WRDMA0_LLPTR_LOW(base) ((base) + 0x8018)
-#define PCIE_WRDMA0_LLPTR_HIGH(base) ((base) + 0x801c)
-#define PCIE_WRDMA1_CHNL_CNTRL(base) ((base) + 0x8020)
-#define PCIE_WRDMA1_XFR_SIZE(base) ((base) + 0x8024)
-#define PCIE_WRDMA1_SAR_LOW(base) ((base) + 0x8028)
-#define PCIE_WRDMA1_SAR_HIGH(base) ((base) + 0x802c)
-#define PCIE_WRDMA1_DAR_LOW(base) ((base) + 0x8030)
-#define PCIE_WRDMA1_DAR_HIGH(base) ((base) + 0x8034)
-#define PCIE_WRDMA1_LLPTR_LOW(base) ((base) + 0x8038)
-#define PCIE_WRDMA1_LLPTR_HIGH(base) ((base) + 0x803c)
-#define PCIE_RDDMA0_CHNL_CNTRL(base) ((base) + 0x8040)
-#define PCIE_RDDMA0_XFR_SIZE(base) ((base) + 0x8044)
-#define PCIE_RDDMA0_SAR_LOW(base) ((base) + 0x8048)
-#define PCIE_RDDMA0_SAR_HIGH(base) ((base) + 0x804c)
-#define PCIE_RDDMA0_DAR_LOW(base) ((base) + 0x8050)
-#define PCIE_RDDMA0_DAR_HIGH(base) ((base) + 0x8054)
-#define PCIE_RDDMA0_LLPTR_LOW(base) ((base) + 0x8058)
-#define PCIE_RDDMA0_LLPTR_HIGH(base) ((base) + 0x805c)
-#define PCIE_RDDMA1_CHNL_CNTRL(base) ((base) + 0x8060)
-#define PCIE_RDDMA1_XFR_SIZE(base) ((base) + 0x8064)
-#define PCIE_RDDMA1_SAR_LOW(base) ((base) + 0x8068)
-#define PCIE_RDDMA1_SAR_HIGH(base) ((base) + 0x806c)
-#define PCIE_RDDMA1_DAR_LOW(base) ((base) + 0x8070)
-#define PCIE_RDDMA1_DAR_HIGH(base) ((base) + 0x8074)
-#define PCIE_RDDMA1_LLPTR_LOW(base) ((base) + 0x8078)
-#define PCIE_RDDMA1_LLPTR_HIGH(base) ((base) + 0x807c)
-
-#define PCIE_ID(base) ((base) + 0x0000)
-#define PCIE_CMD(base) ((base) + 0x0004)
-#define PCIE_BAR(base, n) ((base) + 0x0010 + ((n) << 2))
-#define PCIE_CAP_PTR(base) ((base) + 0x0034)
-#define PCIE_MSI_LBAR(base) ((base) + 0x0054)
-#define PCIE_MSI_CTRL(base) ((base) + 0x0050)
-#define PCIE_MSI_ADDR_L(base) ((base) + 0x0054)
-#define PCIE_MSI_ADDR_H(base) ((base) + 0x0058)
-#define PCIE_MSI_DATA(base) ((base) + 0x005C)
-#define PCIE_MSI_MASK_BIT(base) ((base) + 0x0060)
-#define PCIE_MSI_PEND_BIT(base) ((base) + 0x0064)
-#define PCIE_DEVCAP(base) ((base) + 0x0074)
-#define PCIE_DEVCTLSTS(base) ((base) + 0x0078)
-
-#define PCIE_CMDSTS(base) ((base) + 0x0004)
-#define PCIE_LINK_STAT(base) ((base) + 0x80)
-#define PCIE_LINK_CTL2(base) ((base) + 0xa0)
-#define PCIE_ASPM_L1_CTRL(base) ((base) + 0x70c)
-#define PCIE_ASPM_LINK_CTRL(base) (PCIE_LINK_STAT)
-#define PCIE_ASPM_L1_SUBSTATE_TIMING(base) ((base) + 0xB44)
-#define PCIE_L1SUB_CTRL1(base) ((base) + 0x150)
-#define PCIE_PMCSR(base) ((base) + 0x44)
-#define PCIE_CFG_SPACE_LIMIT(base) ((base) + 0x100)
-
-/* PCIe link defines */
-#define PEARL_PCIE_LINKUP (0x7)
-#define PEARL_PCIE_DATA_LINK (BIT(0))
-#define PEARL_PCIE_PHY_LINK (BIT(1))
-#define PEARL_PCIE_LINK_RST (BIT(3))
-#define PEARL_PCIE_FATAL_ERR (BIT(5))
-#define PEARL_PCIE_NONFATAL_ERR (BIT(6))
-
-/* PCIe Lane defines */
-#define PCIE_G2_LANE_X1 ((BIT(0)) << 16)
-#define PCIE_G2_LANE_X2 ((BIT(0) | BIT(1)) << 16)
-
-/* PCIe DLL link enable */
-#define PCIE_DLL_LINK_EN ((BIT(0)) << 5)
-
-#define PCIE_LINK_GEN1 (BIT(0))
-#define PCIE_LINK_GEN2 (BIT(1))
-#define PCIE_LINK_GEN3 (BIT(2))
-#define PCIE_LINK_MODE(x) (((x) >> 16) & 0x7)
-
-#define MSI_EN (BIT(0))
-#define MSI_64_EN (BIT(7))
-#define PCIE_MSI_ADDR_OFFSET(a) ((a) & 0xFFFF)
-#define PCIE_MSI_ADDR_ALIGN(a) ((a) & (~0xFFFF))
-
-#define PCIE_BAR_MASK(base, n) ((base) + 0x1010 + ((n) << 2))
-#define PCIE_MAX_BAR (6)
-
-#define PCIE_ATU_VIEW(base) ((base) + 0x0900)
-#define PCIE_ATU_CTL1(base) ((base) + 0x0904)
-#define PCIE_ATU_CTL2(base) ((base) + 0x0908)
-#define PCIE_ATU_LBAR(base) ((base) + 0x090c)
-#define PCIE_ATU_UBAR(base) ((base) + 0x0910)
-#define PCIE_ATU_LAR(base) ((base) + 0x0914)
-#define PCIE_ATU_LTAR(base) ((base) + 0x0918)
-#define PCIE_ATU_UTAR(base) ((base) + 0x091c)
-
-#define PCIE_MSI_ADDR_LOWER(base) ((base) + 0x0820)
-#define PCIE_MSI_ADDR_UPPER(base) ((base) + 0x0824)
-#define PCIE_MSI_ENABLE(base) ((base) + 0x0828)
-#define PCIE_MSI_MASK_RC(base) ((base) + 0x082c)
-#define PCIE_MSI_STATUS(base) ((base) + 0x0830)
-#define PEARL_PCIE_MSI_REGION (0xce000000)
-#define PEARL_PCIE_MSI_DATA (0)
-#define PCIE_MSI_GPIO(base) ((base) + 0x0888)
-
-#define PCIE_HDP_HOST_QUEUE_FULL (BIT(17))
-#define USE_BAR_MATCH_MODE
-#define PCIE_ATU_OB_REGION (BIT(0))
-#define PCIE_ATU_EN_REGION (BIT(31))
-#define PCIE_ATU_EN_MATCH (BIT(30))
-#define PCIE_BASE_REGION (0xb0000000)
-#define PCIE_MEM_MAP_SIZE (512 * 1024)
-
-#define PCIE_OB_REG_REGION (0xcf000000)
-#define PCIE_CONFIG_REGION (0xcf000000)
-#define PCIE_CONFIG_SIZE (4096)
-#define PCIE_CONFIG_CH (1)
-
-/* inbound mapping */
-#define PCIE_IB_BAR0 (0x00000000) /* ddr */
-#define PCIE_IB_BAR0_CH (0)
-#define PCIE_IB_BAR3 (0xe0000000) /* sys_reg */
-#define PCIE_IB_BAR3_CH (1)
-
-/* outbound mapping */
-#define PCIE_MEM_CH (0)
-#define PCIE_REG_CH (1)
-#define PCIE_MEM_REGION (0xc0000000)
-#define PCIE_MEM_SIZE (0x000fffff)
-#define PCIE_MEM_TAR (0x80000000)
-
-#define PCIE_MSI_REGION (0xce000000)
-#define PCIE_MSI_SIZE (KBYTE(4) - 1)
-#define PCIE_MSI_CH (1)
-
-/* size of config region */
-#define PCIE_CFG_SIZE (0x0000ffff)
-
-#define PCIE_ATU_DIR_IB (BIT(31))
-#define PCIE_ATU_DIR_OB (0)
-#define PCIE_ATU_DIR_CFG (2)
-#define PCIE_ATU_DIR_MATCH_IB (BIT(31) | BIT(30))
-
-#define PCIE_DMA_WR_0 (0)
-#define PCIE_DMA_WR_1 (1)
-#define PCIE_DMA_RD_0 (2)
-#define PCIE_DMA_RD_1 (3)
-
-#define PCIE_DMA_CHNL_CNTRL_CB (BIT(0))
-#define PCIE_DMA_CHNL_CNTRL_TCB (BIT(1))
-#define PCIE_DMA_CHNL_CNTRL_LLP (BIT(2))
-#define PCIE_DMA_CHNL_CNTRL_LIE (BIT(3))
-#define PCIE_DMA_CHNL_CNTRL_RIE (BIT(4))
-#define PCIE_DMA_CHNL_CNTRL_CSS (BIT(8))
-#define PCIE_DMA_CHNL_CNTRL_LLE (BIT(9))
-#define PCIE_DMA_CHNL_CNTRL_TLP (BIT(26))
-
-#define PCIE_DMA_CHNL_CONTEXT_RD (BIT(31))
-#define PCIE_DMA_CHNL_CONTEXT_WR (0)
-#define PCIE_MAX_BAR (6)
-
-/* PCIe HDP interrupt status definition */
-#define PCIE_HDP_INT_EP_RXDMA (BIT(0))
-#define PCIE_HDP_INT_HBM_UF (BIT(1))
-#define PCIE_HDP_INT_RX_LEN_ERR (BIT(2))
-#define PCIE_HDP_INT_RX_HDR_LEN_ERR (BIT(3))
-#define PCIE_HDP_INT_EP_TXDMA (BIT(12))
-#define PCIE_HDP_INT_HHBM_UF (BIT(13))
-#define PCIE_HDP_INT_EP_TXEMPTY (BIT(15))
-#define PCIE_HDP_INT_IPC (BIT(29))
-
-/* PCIe interrupt status definition */
-#define PCIE_INT_MSI (BIT(24))
-#define PCIE_INT_INTX (BIT(23))
-
-/* PCIe legacy INTx */
-#define PEARL_PCIE_CFG0_OFFSET (0x6C)
-#define PEARL_ASSERT_INTX (BIT(9))
-
-/* SYS CTL regs */
-#define QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET (0x001C)
-
-#define QTN_PEARL_IPC_IRQ_WORD(irq) (BIT(irq) | BIT(irq + 16))
-#define QTN_PEARL_LHOST_IPC_IRQ (6)
-#define QTN_PEARL_LHOST_EP_RESET (7)
-
-#endif /* __PEARL_PCIE_H */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 99d37e3efba6..8d62addea895 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -71,6 +71,7 @@ struct qlink_msg_header {
* @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality
* @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address
* Randomization in probe requests.
+ * @QLINK_HW_CAPAB_OBSS_SCAN: device can perform OBSS scanning.
*/
enum qlink_hw_capab {
QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
@@ -78,6 +79,8 @@ enum qlink_hw_capab {
QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2),
QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3),
QLINK_HW_CAPAB_PWR_MGMT = BIT(4),
+ QLINK_HW_CAPAB_OBSS_SCAN = BIT(5),
+ QLINK_HW_CAPAB_SCAN_DWELL = BIT(6),
};
enum qlink_iface_type {
@@ -1149,6 +1152,8 @@ enum qlink_tlv_id {
QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409,
QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
+ QTN_TLV_ID_SCAN_FLUSH = 0x0412,
+ QTN_TLV_ID_SCAN_DWELL = 0x0413,
};
struct qlink_tlv_hdr {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index 54caeb38917c..960d5d97492f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -40,6 +40,14 @@ static inline void qtnf_cmd_skb_put_tlv_arr(struct sk_buff *skb,
memcpy(hdr->val, arr, arr_len);
}
+static inline void qtnf_cmd_skb_put_tlv_tag(struct sk_buff *skb, u16 tlv_id)
+{
+ struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr));
+
+ hdr->type = cpu_to_le16(tlv_id);
+ hdr->len = cpu_to_le16(0);
+}
+
static inline void qtnf_cmd_skb_put_tlv_u8(struct sk_buff *skb, u16 tlv_id,
u8 value)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
index c4ad40d59085..1fe798a9a667 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
@@ -25,8 +25,22 @@
#define PCIE_DEVICE_ID_QTN_PEARL (0x0008)
+#define QTN_REG_SYS_CTRL_CSR 0x14
+#define QTN_CHIP_ID_MASK 0xF0
+#define QTN_CHIP_ID_TOPAZ 0x40
+#define QTN_CHIP_ID_PEARL 0x50
+#define QTN_CHIP_ID_PEARL_B 0x60
+#define QTN_CHIP_ID_PEARL_C 0x70
+
/* FW names */
#define QTN_PCI_PEARL_FW_NAME "qtn/fmac_qsr10g.img"
+static inline unsigned int qtnf_chip_id_get(const void __iomem *regs_base)
+{
+ u32 board_rev = readl(regs_base + QTN_REG_SYS_CTRL_CSR);
+
+ return board_rev & QTN_CHIP_ID_MASK;
+}
+
#endif /* _QTN_HW_IDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
index aa106dd0a14b..2ec334199c2b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c
@@ -42,19 +42,18 @@ static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc)
if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) {
pr_err("wrong rx packet size: %zu\n", size);
rx_buff_ok = false;
- } else {
- memcpy_fromio(ipc->rx_data, ipc->shm_region->data, size);
+ }
+
+ if (likely(rx_buff_ok)) {
+ ipc->rx_packet_count++;
+ ipc->rx_callback.fn(ipc->rx_callback.arg,
+ ipc->shm_region->data, size);
}
writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags);
readl(&shm_reg_hdr->flags); /* flush PCIe write */
ipc->interrupt.fn(ipc->interrupt.arg);
-
- if (likely(rx_buff_ok)) {
- ipc->rx_packet_count++;
- ipc->rx_callback.fn(ipc->rx_callback.arg, ipc->rx_data, size);
- }
}
static void qtnf_shm_ipc_irq_work(struct work_struct *work)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
index 453dd6477b12..c2a3702a9ee7 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h
@@ -32,7 +32,7 @@ struct qtnf_shm_ipc_int {
};
struct qtnf_shm_ipc_rx_callback {
- void (*fn)(void *arg, const u8 *buf, size_t len);
+ void (*fn)(void *arg, const u8 __iomem *buf, size_t len);
void *arg;
};
@@ -51,8 +51,6 @@ struct qtnf_shm_ipc {
u8 waiting_for_ack;
- u8 rx_data[QTN_IPC_MAX_DATA_SZ] __aligned(sizeof(u32));
-
struct qtnf_shm_ipc_int interrupt;
struct qtnf_shm_ipc_rx_callback rx_callback;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index a567bc273ffc..9e7b8933d30c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -957,6 +957,47 @@ static void rt2800_rate_from_status(struct skb_frame_desc *skbdesc,
skbdesc->tx_rate_flags = flags;
}
+static bool rt2800_txdone_entry_check(struct queue_entry *entry, u32 reg)
+{
+ __le32 *txwi;
+ u32 word;
+ int wcid, ack, pid;
+ int tx_wcid, tx_ack, tx_pid, is_agg;
+
+ /*
+ * This frames has returned with an IO error,
+ * so the status report is not intended for this
+ * frame.
+ */
+ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
+ return false;
+
+ wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
+ ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
+ pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
+ is_agg = rt2x00_get_field32(reg, TX_STA_FIFO_TX_AGGRE);
+
+ /*
+ * Validate if this TX status report is intended for
+ * this entry by comparing the WCID/ACK/PID fields.
+ */
+ txwi = rt2800_drv_get_txwi(entry);
+
+ word = rt2x00_desc_read(txwi, 1);
+ tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+ tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
+ tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
+
+ if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) {
+ rt2x00_dbg(entry->queue->rt2x00dev,
+ "TX status report missed for queue %d entry %d\n",
+ entry->queue->qid, entry->entry_idx);
+ return false;
+ }
+
+ return true;
+}
+
void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
bool match)
{
@@ -1059,6 +1100,119 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
}
EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
+void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ struct queue_entry *entry;
+ u32 reg;
+ u8 qid;
+ bool match;
+
+ while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
+ /*
+ * TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus qid is
+ * guaranteed to be one of the TX QIDs .
+ */
+ qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
+
+ if (unlikely(rt2x00queue_empty(queue))) {
+ rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
+ qid);
+ break;
+ }
+
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+
+ if (unlikely(test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
+ !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))) {
+ rt2x00_warn(rt2x00dev, "Data pending for entry %u in queue %u\n",
+ entry->entry_idx, qid);
+ break;
+ }
+
+ match = rt2800_txdone_entry_check(entry, reg);
+ rt2800_txdone_entry(entry, reg, rt2800_drv_get_txwi(entry), match);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800_txdone);
+
+static inline bool rt2800_entry_txstatus_timeout(struct rt2x00_dev *rt2x00dev,
+ struct queue_entry *entry)
+{
+ bool ret;
+ unsigned long tout;
+
+ if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
+ return false;
+
+ if (test_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags))
+ tout = msecs_to_jiffies(50);
+ else
+ tout = msecs_to_jiffies(2000);
+
+ ret = time_after(jiffies, entry->last_action + tout);
+ if (unlikely(ret))
+ rt2x00_dbg(entry->queue->rt2x00dev,
+ "TX status timeout for entry %d in queue %d\n",
+ entry->entry_idx, entry->queue->qid);
+ return ret;
+}
+
+bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ struct queue_entry *entry;
+
+ if (!test_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags)) {
+ unsigned long tout = msecs_to_jiffies(1000);
+
+ if (time_before(jiffies, rt2x00dev->last_nostatus_check + tout))
+ return false;
+ }
+
+ rt2x00dev->last_nostatus_check = jiffies;
+
+ tx_queue_for_each(rt2x00dev, queue) {
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+ if (rt2800_entry_txstatus_timeout(rt2x00dev, entry))
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(rt2800_txstatus_timeout);
+
+void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ struct queue_entry *entry;
+
+ /*
+ * Process any trailing TX status reports for IO failures,
+ * we loop until we find the first non-IO error entry. This
+ * can either be a frame which is free, is being uploaded,
+ * or has completed the upload but didn't have an entry
+ * in the TX_STAT_FIFO register yet.
+ */
+ tx_queue_for_each(rt2x00dev, queue) {
+ while (!rt2x00queue_empty(queue)) {
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+
+ if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
+ !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
+ break;
+
+ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) ||
+ rt2800_entry_txstatus_timeout(rt2x00dev, entry))
+ rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
+ else
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800_txdone_nostatus);
+
static unsigned int rt2800_hw_beacon_base(struct rt2x00_dev *rt2x00dev,
unsigned int index)
{
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 51d9c2a932cc..0dff2c7b3010 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -195,6 +195,9 @@ void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *tx
void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
bool match);
+void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
+void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev);
+bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev);
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
void rt2800_clear_beacon(struct queue_entry *entry);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index e1a7ed7e4892..ddb88cfeace2 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -175,161 +175,6 @@ static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
}
-static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status)
-{
- __le32 *txwi;
- u32 word;
- int wcid, tx_wcid;
-
- wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
-
- txwi = rt2800_drv_get_txwi(entry);
- word = rt2x00_desc_read(txwi, 1);
- tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
-
- return (tx_wcid == wcid);
-}
-
-static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data)
-{
- u32 status = *(u32 *)data;
-
- /*
- * rt2800pci hardware might reorder frames when exchanging traffic
- * with multiple BA enabled STAs.
- *
- * For example, a tx queue
- * [ STA1 | STA2 | STA1 | STA2 ]
- * can result in tx status reports
- * [ STA1 | STA1 | STA2 | STA2 ]
- * when the hw decides to aggregate the frames for STA1 into one AMPDU.
- *
- * To mitigate this effect, associate the tx status to the first frame
- * in the tx queue with a matching wcid.
- */
- if (rt2800mmio_txdone_entry_check(entry, status) &&
- !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
- /*
- * Got a matching frame, associate the tx status with
- * the frame
- */
- entry->status = status;
- set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
- return true;
- }
-
- /* Check the next frame */
- return false;
-}
-
-static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data)
-{
- u32 status = *(u32 *)data;
-
- /*
- * Find the first frame without tx status and assign this status to it
- * regardless if it matches or not.
- */
- if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
- /*
- * Got a matching frame, associate the tx status with
- * the frame
- */
- entry->status = status;
- set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
- return true;
- }
-
- /* Check the next frame */
- return false;
-}
-static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
- void *data)
-{
- if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
- rt2800_txdone_entry(entry, entry->status,
- rt2800mmio_get_txwi(entry), true);
- return false;
- }
-
- /* No more frames to release */
- return true;
-}
-
-static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
- u32 status;
- u8 qid;
- int max_tx_done = 16;
-
- while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
- qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
- if (unlikely(qid >= QID_RX)) {
- /*
- * Unknown queue, this shouldn't happen. Just drop
- * this tx status.
- */
- rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
- qid);
- break;
- }
-
- queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
- if (unlikely(queue == NULL)) {
- /*
- * The queue is NULL, this shouldn't happen. Stop
- * processing here and drop the tx status
- */
- rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
- qid);
- break;
- }
-
- if (unlikely(rt2x00queue_empty(queue))) {
- /*
- * The queue is empty. Stop processing here
- * and drop the tx status.
- */
- rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
- qid);
- break;
- }
-
- /*
- * Let's associate this tx status with the first
- * matching frame.
- */
- if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
- Q_INDEX, &status,
- rt2800mmio_txdone_find_entry)) {
- /*
- * We cannot match the tx status to any frame, so just
- * use the first one.
- */
- if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
- Q_INDEX, &status,
- rt2800mmio_txdone_match_first)) {
- rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
- qid);
- break;
- }
- }
-
- /*
- * Release all frames with a valid tx status.
- */
- rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
- Q_INDEX, NULL,
- rt2800mmio_txdone_release_entries);
-
- if (--max_tx_done == 0)
- break;
- }
-
- return !max_tx_done;
-}
-
static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
struct rt2x00_field32 irq_field)
{
@@ -346,20 +191,6 @@ static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
-void rt2800mmio_txstatus_tasklet(unsigned long data)
-{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- if (rt2800mmio_txdone(rt2x00dev))
- tasklet_schedule(&rt2x00dev->txstatus_tasklet);
-
- /*
- * No need to enable the tx status interrupt here as we always
- * leave it enabled to minimize the possibility of a tx status
- * register overflow. See comment in interrupt handler.
- */
-}
-EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
-
void rt2800mmio_pretbtt_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
@@ -424,12 +255,26 @@ void rt2800mmio_autowake_tasklet(unsigned long data)
}
EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
-static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
+static void rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
+{
+ bool timeout = false;
+
+ while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
+ (timeout = rt2800_txstatus_timeout(rt2x00dev))) {
+
+ rt2800_txdone(rt2x00dev);
+
+ if (timeout)
+ rt2800_txdone_nostatus(rt2x00dev);
+ }
+}
+
+static bool rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
{
u32 status;
- int i;
+ bool more = false;
- /*
+ /* FIXEME: rewrite this comment
* The TX_FIFO_STATUS interrupt needs special care. We should
* read TX_STA_FIFO but we should do it immediately as otherwise
* the register can overflow and we would lose status reports.
@@ -440,28 +285,36 @@ static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
* because we can schedule the tasklet multiple times (when the
* interrupt fires again during tx status processing).
*
- * Furthermore we don't disable the TX_FIFO_STATUS
- * interrupt here but leave it enabled so that the TX_STA_FIFO
- * can also be read while the tx status tasklet gets executed.
- *
- * Since we have only one producer and one consumer we don't
+ * txstatus tasklet is called with INT_SOURCE_CSR_TX_FIFO_STATUS
+ * disabled so have only one producer and one consumer - we don't
* need to lock the kfifo.
*/
- for (i = 0; i < rt2x00dev->tx->limit; i++) {
+ while (!kfifo_is_full(&rt2x00dev->txstatus_fifo)) {
status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO);
-
if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
break;
- if (!kfifo_put(&rt2x00dev->txstatus_fifo, status)) {
- rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
- break;
- }
+ kfifo_put(&rt2x00dev->txstatus_fifo, status);
+ more = true;
}
- /* Schedule the tasklet for processing the tx status. */
- tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+ return more;
+}
+
+void rt2800mmio_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+
+ do {
+ rt2800mmio_txdone(rt2x00dev);
+
+ } while (rt2800mmio_fetch_txstatus(rt2x00dev));
+
+ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2800mmio_enable_interrupt(rt2x00dev,
+ INT_SOURCE_CSR_TX_FIFO_STATUS);
}
+EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
{
@@ -486,11 +339,8 @@ irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
mask = ~reg;
if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
- rt2800mmio_txstatus_interrupt(rt2x00dev);
- /*
- * Never disable the TX_FIFO_STATUS interrupt.
- */
- rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ rt2800mmio_fetch_txstatus(rt2x00dev);
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
}
if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
@@ -616,6 +466,53 @@ void rt2800mmio_kick_queue(struct data_queue *queue)
}
EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
+void rt2800mmio_flush_queue(struct data_queue *queue, bool drop)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ bool tx_queue = false;
+ unsigned int i;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ tx_queue = true;
+ break;
+ case QID_RX:
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < 5; i++) {
+ /*
+ * Check if the driver is already done, otherwise we
+ * have to sleep a little while to give the driver/hw
+ * the oppurtunity to complete interrupt process itself.
+ */
+ if (rt2x00queue_empty(queue))
+ break;
+
+ /*
+ * For TX queues schedule completion tasklet to catch
+ * tx status timeouts, othewise just wait.
+ */
+ if (tx_queue) {
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ rt2800mmio_txdone(rt2x00dev);
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ }
+
+ /*
+ * Wait for a little while to give the driver
+ * the oppurtunity to recover itself.
+ */
+ msleep(50);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_flush_queue);
+
void rt2800mmio_stop_queue(struct data_queue *queue)
{
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
index b63312ce3f27..3a513273f414 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
@@ -148,6 +148,7 @@ void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
/* Queue handlers */
void rt2800mmio_start_queue(struct data_queue *queue);
void rt2800mmio_kick_queue(struct data_queue *queue);
+void rt2800mmio_flush_queue(struct data_queue *queue, bool drop);
void rt2800mmio_stop_queue(struct data_queue *queue);
void rt2800mmio_queue_init(struct data_queue *queue);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index 71b1affc3885..0291441ac548 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -364,7 +364,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.start_queue = rt2800mmio_start_queue,
.kick_queue = rt2800mmio_kick_queue,
.stop_queue = rt2800mmio_stop_queue,
- .flush_queue = rt2x00mmio_flush_queue,
+ .flush_queue = rt2800mmio_flush_queue,
.write_tx_desc = rt2800mmio_write_tx_desc,
.write_tx_data = rt2800_write_tx_data,
.write_beacon = rt2800_write_beacon,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 98a7313fea4a..19eabf16147b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -116,35 +116,6 @@ static bool rt2800usb_txstatus_pending(struct rt2x00_dev *rt2x00dev)
return false;
}
-static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
-{
- bool tout;
-
- if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
- return false;
-
- tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(500));
- if (unlikely(tout))
- rt2x00_dbg(entry->queue->rt2x00dev,
- "TX status timeout for entry %d in queue %d\n",
- entry->entry_idx, entry->queue->qid);
- return tout;
-
-}
-
-static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
- struct queue_entry *entry;
-
- tx_queue_for_each(rt2x00dev, queue) {
- entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- if (rt2800usb_entry_txstatus_timeout(entry))
- return true;
- }
- return false;
-}
-
#define TXSTATUS_READ_INTERVAL 1000000
static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
@@ -171,7 +142,7 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
}
/* Check if there is any entry that timedout waiting on TX status */
- if (rt2800usb_txstatus_timeout(rt2x00dev))
+ if (rt2800_txstatus_timeout(rt2x00dev))
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
if (rt2800usb_txstatus_pending(rt2x00dev)) {
@@ -501,123 +472,17 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
/*
* TX control handlers
*/
-static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
-{
- __le32 *txwi;
- u32 word;
- int wcid, ack, pid;
- int tx_wcid, tx_ack, tx_pid, is_agg;
-
- /*
- * This frames has returned with an IO error,
- * so the status report is not intended for this
- * frame.
- */
- if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
- return false;
-
- wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
- ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
- pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
- is_agg = rt2x00_get_field32(reg, TX_STA_FIFO_TX_AGGRE);
-
- /*
- * Validate if this TX status report is intended for
- * this entry by comparing the WCID/ACK/PID fields.
- */
- txwi = rt2800usb_get_txwi(entry);
-
- word = rt2x00_desc_read(txwi, 1);
- tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
- tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
- tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
-
- if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) {
- rt2x00_dbg(entry->queue->rt2x00dev,
- "TX status report missed for queue %d entry %d\n",
- entry->queue->qid, entry->entry_idx);
- return false;
- }
-
- return true;
-}
-
-static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
- struct queue_entry *entry;
- u32 reg;
- u8 qid;
- bool match;
-
- while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
- /*
- * TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus qid is
- * guaranteed to be one of the TX QIDs .
- */
- qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
- queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
-
- if (unlikely(rt2x00queue_empty(queue))) {
- rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
- qid);
- break;
- }
-
- entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
-
- if (unlikely(test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
- !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))) {
- rt2x00_warn(rt2x00dev, "Data pending for entry %u in queue %u\n",
- entry->entry_idx, qid);
- break;
- }
-
- match = rt2800usb_txdone_entry_check(entry, reg);
- rt2800_txdone_entry(entry, reg, rt2800usb_get_txwi(entry), match);
- }
-}
-
-static void rt2800usb_txdone_nostatus(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
- struct queue_entry *entry;
-
- /*
- * Process any trailing TX status reports for IO failures,
- * we loop until we find the first non-IO error entry. This
- * can either be a frame which is free, is being uploaded,
- * or has completed the upload but didn't have an entry
- * in the TX_STAT_FIFO register yet.
- */
- tx_queue_for_each(rt2x00dev, queue) {
- while (!rt2x00queue_empty(queue)) {
- entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
-
- if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
- !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
- break;
-
- if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) ||
- rt2800usb_entry_txstatus_timeout(entry))
- rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
- else
- break;
- }
- }
-}
-
static void rt2800usb_work_txdone(struct work_struct *work)
{
struct rt2x00_dev *rt2x00dev =
container_of(work, struct rt2x00_dev, txdone_work);
while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
- rt2800usb_txstatus_timeout(rt2x00dev)) {
+ rt2800_txstatus_timeout(rt2x00dev)) {
- rt2800usb_txdone(rt2x00dev);
+ rt2800_txdone(rt2x00dev);
- rt2800usb_txdone_nostatus(rt2x00dev);
+ rt2800_txdone_nostatus(rt2x00dev);
/*
* The hw may delay sending the packet after DMA complete
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index a279a4363bc1..4b1744e9fb78 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -665,6 +665,7 @@ enum rt2x00_state_flags {
DEVICE_STATE_STARTED,
DEVICE_STATE_ENABLED_RADIO,
DEVICE_STATE_SCANNING,
+ DEVICE_STATE_FLUSHING,
/*
* Driver configuration
@@ -980,6 +981,8 @@ struct rt2x00_dev {
*/
DECLARE_KFIFO_PTR(txstatus_fifo, u32);
+ unsigned long last_nostatus_check;
+
/*
* Timer to ensure tx status reports are read (rt2800usb).
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index acc399b5574e..61ba573e8bf1 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -464,11 +464,7 @@ static ssize_t rt2x00debug_read_##__name(struct file *file, \
\
size = sprintf(line, __format, value); \
\
- if (copy_to_user(buf, line, size)) \
- return -EFAULT; \
- \
- *offset += size; \
- return size; \
+ return simple_read_from_buffer(buf, length, offset, line, size); \
}
#define RT2X00DEBUGFS_OPS_WRITE(__name, __type) \
@@ -545,11 +541,7 @@ static ssize_t rt2x00debug_read_dev_flags(struct file *file,
size = sprintf(line, "0x%.8x\n", (unsigned int)intf->rt2x00dev->flags);
- if (copy_to_user(buf, line, size))
- return -EFAULT;
-
- *offset += size;
- return size;
+ return simple_read_from_buffer(buf, length, offset, line, size);
}
static const struct file_operations rt2x00debug_fop_dev_flags = {
@@ -574,11 +566,7 @@ static ssize_t rt2x00debug_read_cap_flags(struct file *file,
size = sprintf(line, "0x%.8x\n", (unsigned int)intf->rt2x00dev->cap_flags);
- if (copy_to_user(buf, line, size))
- return -EFAULT;
-
- *offset += size;
- return size;
+ return simple_read_from_buffer(buf, length, offset, line, size);
}
static const struct file_operations rt2x00debug_fop_cap_flags = {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index fa2fd64084ac..2825560e2424 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -720,8 +720,12 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return;
+ set_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags);
+
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_flush_queue(queue, drop);
+
+ clear_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags);
}
EXPORT_SYMBOL_GPL(rt2x00mac_flush);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 710e9641552e..92ddc19e7bf7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -113,6 +113,7 @@ int rt2x00queue_map_txskb(struct queue_entry *entry)
return -ENOMEM;
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
+ rt2x00lib_dmadone(entry);
return 0;
}
EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
@@ -1038,6 +1039,7 @@ void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
*/
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_start_queue(queue);
+ rt2x00dev->last_nostatus_check = jiffies;
rt2x00queue_start_queue(rt2x00dev->rx);
}
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 9a1d15b3ce45..cec37787ecf8 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -499,7 +499,7 @@ static void rtl8187b_status_cb(struct urb *urb)
if (cmd_type == 1) {
unsigned int pkt_rc, seq_no;
bool tok;
- struct sk_buff *skb;
+ struct sk_buff *skb, *iter;
struct ieee80211_hdr *ieee80211hdr;
unsigned long flags;
@@ -508,8 +508,9 @@ static void rtl8187b_status_cb(struct urb *urb)
seq_no = (val >> 16) & 0xFFF;
spin_lock_irqsave(&priv->b_tx_status.queue.lock, flags);
- skb_queue_reverse_walk(&priv->b_tx_status.queue, skb) {
- ieee80211hdr = (struct ieee80211_hdr *)skb->data;
+ skb = NULL;
+ skb_queue_reverse_walk(&priv->b_tx_status.queue, iter) {
+ ieee80211hdr = (struct ieee80211_hdr *)iter->data;
/*
* While testing, it was discovered that the seq_no
@@ -522,10 +523,12 @@ static void rtl8187b_status_cb(struct urb *urb)
* it's unlikely we wrongly ack some sent data
*/
if ((le16_to_cpu(ieee80211hdr->seq_ctrl)
- & 0xFFF) == seq_no)
+ & 0xFFF) == seq_no) {
+ skb = iter;
break;
+ }
}
- if (skb != (struct sk_buff *) &priv->b_tx_status.queue) {
+ if (skb) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
__skb_unlink(skb, &priv->b_tx_status.queue);
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
index c2d5b495c179..c089540116fa 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
@@ -146,7 +146,7 @@ static int rtl8187_register_led(struct ieee80211_hw *dev,
led->dev = dev;
led->ledpin = ledpin;
led->is_radio = is_radio;
- strncpy(led->name, name, sizeof(led->name));
+ strlcpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 505ab1b055ff..56040b181cf5 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4918,11 +4918,10 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct device *dev = &priv->udev->dev;
u32 queue, rts_rate;
u16 pktlen = skb->len;
- u16 seq_number;
u16 rate_flag = tx_info->control.rates[0].flags;
int tx_desc_size = priv->fops->tx_desc_size;
int ret;
- bool usedesc40, ampdu_enable, sgi = false, short_preamble = false;
+ bool ampdu_enable, sgi = false, short_preamble = false;
if (skb_headroom(skb) < tx_desc_size) {
dev_warn(dev,
@@ -4946,7 +4945,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
if (ieee80211_is_action(hdr->frame_control))
rtl8xxxu_dump_action(dev, hdr);
- usedesc40 = (tx_desc_size == 40);
tx_info->rate_driver_data[0] = hw;
if (control && control->sta)
@@ -5013,7 +5011,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
else
rts_rate = 0;
- seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble,
ampdu_enable, rts_rate);
@@ -6231,6 +6228,8 @@ static const struct usb_device_id dev_table[] = {
{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
/* Currently untested 8188 series devices */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x018a, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8170, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index b026e80940a4..6fbf8845a2ab 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -1324,13 +1324,13 @@ bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv)
switch (rtlpriv->rtlhal.interface) {
case INTF_PCI:
- wifionly_cfg->chip_interface = BTC_INTF_PCI;
+ wifionly_cfg->chip_interface = WIFIONLY_INTF_PCI;
break;
case INTF_USB:
- wifionly_cfg->chip_interface = BTC_INTF_USB;
+ wifionly_cfg->chip_interface = WIFIONLY_INTF_USB;
break;
default:
- wifionly_cfg->chip_interface = BTC_INTF_UNKNOWN;
+ wifionly_cfg->chip_interface = WIFIONLY_INTF_UNKNOWN;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 988d5ac57d02..cfc8762c55f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -951,12 +951,8 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
static void _rtl88ee_hw_configure(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 reg_bw_opmode;
- u32 reg_ratr, reg_prsr;
+ u32 reg_prsr;
- reg_bw_opmode = BW_OPMODE_20MHZ;
- reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
- RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index 545115db507e..f783e4a8083d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -799,11 +799,9 @@ static void _rtl8723e_hw_configure(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 reg_bw_opmode;
- u32 reg_ratr, reg_prsr;
+ u32 reg_prsr;
reg_bw_opmode = BW_OPMODE_20MHZ;
- reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
- RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 317c1b3101da..ba258318ee9f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -3404,75 +3404,6 @@ static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw,
"%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
}
-static u8 _rtl8821ae_mrate_idx_to_arfr_id(
- struct ieee80211_hw *hw, u8 rate_index,
- enum wireless_mode wirelessmode)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &rtlpriv->phy;
- u8 ret = 0;
- switch (rate_index) {
- case RATR_INX_WIRELESS_NGB:
- if (rtlphy->rf_type == RF_1T1R)
- ret = 1;
- else
- ret = 0;
- ; break;
- case RATR_INX_WIRELESS_N:
- case RATR_INX_WIRELESS_NG:
- if (rtlphy->rf_type == RF_1T1R)
- ret = 5;
- else
- ret = 4;
- ; break;
- case RATR_INX_WIRELESS_NB:
- if (rtlphy->rf_type == RF_1T1R)
- ret = 3;
- else
- ret = 2;
- ; break;
- case RATR_INX_WIRELESS_GB:
- ret = 6;
- break;
- case RATR_INX_WIRELESS_G:
- ret = 7;
- break;
- case RATR_INX_WIRELESS_B:
- ret = 8;
- break;
- case RATR_INX_WIRELESS_MC:
- if ((wirelessmode == WIRELESS_MODE_B)
- || (wirelessmode == WIRELESS_MODE_G)
- || (wirelessmode == WIRELESS_MODE_N_24G)
- || (wirelessmode == WIRELESS_MODE_AC_24G))
- ret = 6;
- else
- ret = 7;
- case RATR_INX_WIRELESS_AC_5N:
- if (rtlphy->rf_type == RF_1T1R)
- ret = 10;
- else
- ret = 9;
- break;
- case RATR_INX_WIRELESS_AC_24N:
- if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
- if (rtlphy->rf_type == RF_1T1R)
- ret = 10;
- else
- ret = 9;
- } else {
- if (rtlphy->rf_type == RF_1T1R)
- ret = 11;
- else
- ret = 12;
- }
- break;
- default:
- ret = 0; break;
- }
- return ret;
-}
-
static u32 _rtl8821ae_rate_to_bitmap_2ssvht(__le16 vht_rate)
{
u8 i, j, tmp_rate;
@@ -3761,7 +3692,7 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
break;
}
- ratr_index = _rtl8821ae_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode);
+ ratr_index = rtl_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode);
sta_entry->ratr_index = ratr_index;
ratr_bitmap = _rtl8821ae_set_ra_vht_ratr_bitmap(hw, wirelessmode,
ratr_bitmap);
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 0f3b98c5227f..87bc21bb5e8b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1905,10 +1905,6 @@ struct rtl_efuse {
u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
u16 efuse_usedbytes;
u8 efuse_usedpercentage;
-#ifdef EFUSE_REPG_WORKAROUND
- bool efuse_re_pg_sec1flag;
- u8 efuse_re_pg_data[8];
-#endif
u8 autoload_failflag;
u8 autoload_status;
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 01edf960ff3c..182b06629371 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -282,10 +282,8 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
struct rsi_hw *adapter = common->priv;
struct ieee80211_vif *vif;
struct ieee80211_tx_info *info;
- struct skb_info *tx_params;
struct ieee80211_bss_conf *bss;
int status = -EINVAL;
- u8 header_size;
if (!skb)
return 0;
@@ -297,8 +295,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
goto err;
vif = info->control.vif;
bss = &vif->bss_conf;
- tx_params = (struct skb_info *)info->driver_data;
- header_size = tx_params->internal_hdr_size;
if (((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 4e510cbe0a89..e56fc83faf0e 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -924,7 +924,7 @@ static int rsi_hal_key_config(struct ieee80211_hw *hw,
if (status)
return status;
- if (vif->type == NL80211_IFTYPE_STATION && key->key &&
+ if (vif->type == NL80211_IFTYPE_STATION &&
(key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
key->cipher == WLAN_CIPHER_SUITE_WEP40)) {
if (!rsi_send_block_unblock_frame(adapter->priv, false))
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index c0a163e40402..f360690396dd 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -266,15 +266,17 @@ static void rsi_rx_done_handler(struct urb *urb)
if (urb->status)
goto out;
- if (urb->actual_length <= 0) {
- rsi_dbg(INFO_ZONE, "%s: Zero length packet\n", __func__);
+ if (urb->actual_length <= 0 ||
+ urb->actual_length > rx_cb->rx_skb->len) {
+ rsi_dbg(INFO_ZONE, "%s: Invalid packet length = %d\n",
+ __func__, urb->actual_length);
goto out;
}
if (skb_queue_len(&dev->rx_q) >= RSI_MAX_RX_PKTS) {
rsi_dbg(INFO_ZONE, "Max RX packets reached\n");
goto out;
}
- skb_put(rx_cb->rx_skb, urb->actual_length);
+ skb_trim(rx_cb->rx_skb, urb->actual_length);
skb_queue_tail(&dev->rx_q, rx_cb->rx_skb);
rsi_set_event(&dev->rx_thread.event);
@@ -308,6 +310,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num)
if (!skb)
return -ENOMEM;
skb_reserve(skb, MAX_DWORD_ALIGN_BYTES);
+ skb_put(skb, RSI_MAX_RX_USB_PKT_SIZE - MAX_DWORD_ALIGN_BYTES);
dword_align_bytes = (unsigned long)skb->data & 0x3f;
if (dword_align_bytes > 0)
skb_push(skb, dword_align_bytes);
@@ -319,7 +322,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num)
usb_rcvbulkpipe(dev->usbdev,
dev->bulkin_endpoint_addr[ep_num - 1]),
urb->transfer_buffer,
- RSI_MAX_RX_USB_PKT_SIZE,
+ skb->len,
rsi_rx_done_handler,
rx_cb);
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
index d9ff3b8be86e..60f1f286b030 100644
--- a/drivers/net/wireless/rsi/rsi_common.h
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -75,7 +75,6 @@ static inline int rsi_kill_thread(struct rsi_thread *handle)
atomic_inc(&handle->thread_done);
rsi_set_event(&handle->event);
- wait_for_completion(&handle->completion);
return kthread_stop(handle->task);
}
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index f7b1b0062db3..8c800ef23159 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -624,9 +624,9 @@ cw1200_tx_h_bt(struct cw1200_common *priv,
priority = WSM_EPTA_PRIORITY_ACTION;
else if (ieee80211_is_mgmt(t->hdr->frame_control))
priority = WSM_EPTA_PRIORITY_MGT;
- else if ((wsm->queue_id == WSM_QUEUE_VOICE))
+ else if (wsm->queue_id == WSM_QUEUE_VOICE)
priority = WSM_EPTA_PRIORITY_VOICE;
- else if ((wsm->queue_id == WSM_QUEUE_VIDEO))
+ else if (wsm->queue_id == WSM_QUEUE_VIDEO)
priority = WSM_EPTA_PRIORITY_VIDEO;
else
priority = WSM_EPTA_PRIORITY_DATA;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 89b0d0fade9f..26b187336875 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include "wlcore.h"
#include "debug.h"
@@ -957,6 +958,8 @@ static void wl1271_recovery_work(struct work_struct *work)
BUG_ON(wl->conf.recovery.bug_on_recovery &&
!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
+ clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
+
if (wl->conf.recovery.no_recovery) {
wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
goto out_unlock;
@@ -6625,13 +6628,25 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
}
#ifdef CONFIG_PM
+ device_init_wakeup(wl->dev, true);
+
ret = enable_irq_wake(wl->irq);
if (!ret) {
wl->irq_wake_enabled = true;
- device_init_wakeup(wl->dev, 1);
if (pdev_data->pwr_in_suspend)
wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
}
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (res) {
+ wl->wakeirq = res->start;
+ wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
+ ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
+ if (ret)
+ wl->wakeirq = -ENODEV;
+ } else {
+ wl->wakeirq = -ENODEV;
+ }
#endif
disable_irq(wl->irq);
wl1271_power_off(wl);
@@ -6659,6 +6674,9 @@ out_unreg:
wl1271_unregister_hw(wl);
out_irq:
+ if (wl->wakeirq >= 0)
+ dev_pm_clear_wake_irq(wl->dev);
+ device_init_wakeup(wl->dev, false);
free_irq(wl->irq, wl);
out_free_nvs:
@@ -6710,6 +6728,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
int ret;
unsigned long start_time = jiffies;
bool pending = false;
+ bool recovery = false;
/* Nothing to do if no ELP mode requested */
if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
@@ -6726,7 +6745,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
if (ret < 0) {
- wl12xx_queue_recovery_work(wl);
+ recovery = true;
goto err;
}
@@ -6734,11 +6753,12 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
ret = wait_for_completion_timeout(&compl,
msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
if (ret == 0) {
- wl1271_error("ELP wakeup timeout!");
- wl12xx_queue_recovery_work(wl);
+ wl1271_warning("ELP wakeup timeout!");
/* Return no error for runtime PM for recovery */
- return 0;
+ ret = 0;
+ recovery = true;
+ goto err;
}
}
@@ -6753,6 +6773,12 @@ err:
spin_lock_irqsave(&wl->wl_lock, flags);
wl->elp_compl = NULL;
spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ if (recovery) {
+ set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
+ wl12xx_queue_recovery_work(wl);
+ }
+
return ret;
}
@@ -6815,10 +6841,16 @@ int wlcore_remove(struct platform_device *pdev)
if (!wl->initialized)
return 0;
- if (wl->irq_wake_enabled) {
- device_init_wakeup(wl->dev, 0);
- disable_irq_wake(wl->irq);
+ if (wl->wakeirq >= 0) {
+ dev_pm_clear_wake_irq(wl->dev);
+ wl->wakeirq = -ENODEV;
}
+
+ device_init_wakeup(wl->dev, false);
+
+ if (wl->irq_wake_enabled)
+ disable_irq_wake(wl->irq);
+
wl1271_unregister_hw(wl);
pm_runtime_put_sync(wl->dev);
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 750bea3574ee..4c2154b9e6a3 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -241,7 +241,7 @@ static const struct of_device_id wlcore_sdio_of_match_table[] = {
{ }
};
-static int wlcore_probe_of(struct device *dev, int *irq,
+static int wlcore_probe_of(struct device *dev, int *irq, int *wakeirq,
struct wlcore_platdev_data *pdev_data)
{
struct device_node *np = dev->of_node;
@@ -259,6 +259,8 @@ static int wlcore_probe_of(struct device *dev, int *irq,
return -EINVAL;
}
+ *wakeirq = irq_of_parse_and_map(np, 1);
+
/* optional clock frequency params */
of_property_read_u32(np, "ref-clock-frequency",
&pdev_data->ref_clock_freq);
@@ -268,7 +270,7 @@ static int wlcore_probe_of(struct device *dev, int *irq,
return 0;
}
#else
-static int wlcore_probe_of(struct device *dev, int *irq,
+static int wlcore_probe_of(struct device *dev, int *irq, int *wakeirq,
struct wlcore_platdev_data *pdev_data)
{
return -ENODATA;
@@ -280,10 +282,10 @@ static int wl1271_probe(struct sdio_func *func,
{
struct wlcore_platdev_data *pdev_data;
struct wl12xx_sdio_glue *glue;
- struct resource res[1];
+ struct resource res[2];
mmc_pm_flag_t mmcflags;
int ret = -ENOMEM;
- int irq;
+ int irq, wakeirq;
const char *chip_family;
/* We are only able to handle the wlan function */
@@ -308,7 +310,7 @@ static int wl1271_probe(struct sdio_func *func,
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
- ret = wlcore_probe_of(&func->dev, &irq, pdev_data);
+ ret = wlcore_probe_of(&func->dev, &irq, &wakeirq, pdev_data);
if (ret)
goto out;
@@ -351,6 +353,11 @@ static int wl1271_probe(struct sdio_func *func,
irqd_get_trigger_type(irq_get_irq_data(irq));
res[0].name = "irq";
+ res[1].start = wakeirq;
+ res[1].flags = IORESOURCE_IRQ |
+ irqd_get_trigger_type(irq_get_irq_data(wakeirq));
+ res[1].name = "wakeirq";
+
ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
if (ret) {
dev_err(glue->dev, "can't add resources\n");
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index d4b1f66ef457..dd14850b0603 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -199,8 +199,10 @@ struct wl1271 {
struct wl1271_if_operations *if_ops;
int irq;
+ int wakeirq;
int irq_flags;
+ int wakeirq_flags;
spinlock_t wl_lock;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 1f6d9f357e57..9ccd780695f0 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -235,7 +235,7 @@ void zd_mac_clear(struct zd_mac *mac)
{
flush_workqueue(zd_workqueue);
zd_chip_clear(&mac->chip);
- ZD_ASSERT(!spin_is_locked(&mac->lock));
+ lockdep_assert_held(&mac->lock);
ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index a46a1e94505d..936c0b3e0ba2 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -241,8 +241,9 @@ struct xenvif_hash_cache {
struct xenvif_hash {
unsigned int alg;
u32 flags;
+ bool mapping_sel;
u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
- u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
+ u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
unsigned int size;
struct xenvif_hash_cache cache;
};
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 3c4c58b9fe76..0ccb021f1e78 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
vif->hash.size = size;
- memset(vif->hash.mapping, 0, sizeof(u32) * size);
+ memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
+ sizeof(u32) * size);
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
@@ -332,31 +333,49 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
u32 off)
{
- u32 *mapping = &vif->hash.mapping[off];
- struct gnttab_copy copy_op = {
+ u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
+ unsigned int nr = 1;
+ struct gnttab_copy copy_op[2] = {{
.source.u.ref = gref,
.source.domid = vif->domid,
- .dest.u.gmfn = virt_to_gfn(mapping),
.dest.domid = DOMID_SELF,
- .dest.offset = xen_offset_in_page(mapping),
- .len = len * sizeof(u32),
+ .len = len * sizeof(*mapping),
.flags = GNTCOPY_source_gref
- };
+ }};
- if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+ if ((off + len < off) || (off + len > vif->hash.size) ||
+ len > XEN_PAGE_SIZE / sizeof(*mapping))
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
- while (len-- != 0)
- if (mapping[off++] >= vif->num_queues)
- return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+ copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
+ copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
+ if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
+ copy_op[1] = copy_op[0];
+ copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
+ copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
+ copy_op[1].dest.offset = 0;
+ copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
+ copy_op[0].len = copy_op[1].source.offset;
+ nr = 2;
+ }
- if (copy_op.len != 0) {
- gnttab_batch_copy(&copy_op, 1);
+ memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
+ vif->hash.size * sizeof(*mapping));
- if (copy_op.status != GNTST_okay)
+ if (copy_op[0].len != 0) {
+ gnttab_batch_copy(copy_op, nr);
+
+ if (copy_op[0].status != GNTST_okay ||
+ copy_op[nr - 1].status != GNTST_okay)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
}
+ while (len-- != 0)
+ if (mapping[off++] >= vif->num_queues)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ vif->hash.mapping_sel = !vif->hash.mapping_sel;
+
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
@@ -408,6 +427,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
}
if (vif->hash.size != 0) {
+ const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
+
seq_puts(m, "\nHash Mapping:\n");
for (i = 0; i < vif->hash.size; ) {
@@ -420,7 +441,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
for (j = 0; j < n; j++, i++)
- seq_printf(m, "%4u ", vif->hash.mapping[i]);
+ seq_printf(m, "%4u ", mapping[i]);
seq_puts(m, "\n");
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 92274c237200..182d6770f102 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -162,10 +162,12 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
if (size == 0)
return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
- return vif->hash.mapping[skb_get_hash_raw(skb) % size];
+ return vif->hash.mapping[vif->hash.mapping_sel]
+ [skb_get_hash_raw(skb) % size];
}
-static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 3621e05a7494..80aae3a32c2a 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1660,8 +1660,7 @@ module_init(netback_init);
static void __exit netback_fini(void)
{
#ifdef CONFIG_DEBUG_FS
- if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
- debugfs_remove_recursive(xen_netback_dbg_root);
+ debugfs_remove_recursive(xen_netback_dbg_root);
#endif /* CONFIG_DEBUG_FS */
xenvif_xenbus_fini();
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index cd51492ae6c2..fe1d52247bbe 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -254,8 +254,7 @@ static void xenvif_debugfs_delif(struct xenvif *vif)
if (IS_ERR_OR_NULL(xen_netback_dbg_root))
return;
- if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root))
- debugfs_remove_recursive(vif->xenvif_dbg_root);
+ debugfs_remove_recursive(vif->xenvif_dbg_root);
vif->xenvif_dbg_root = NULL;
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 9407acbd19a9..f17f602e6171 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -908,7 +908,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
BUG_ON(pull_to <= skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
- BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
+ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+ queue->rx.rsp_cons = ++cons;
+ kfree_skb(nskb);
+ return ~0U;
+ }
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(nfrag),
@@ -1045,6 +1049,8 @@ err:
skb->len += rx->status;
i = xennet_fill_frags(queue, skb, &tmpq);
+ if (unlikely(i == ~0U))
+ goto err;
if (rx->flags & XEN_NETRXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 62e9cb167aad..db45c6bbb7bb 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -290,7 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
}
set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
- device_add_disk(dev, disk);
+ device_add_disk(dev, disk, NULL);
revalidate_disk(disk);
return 0;
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 0360c015f658..b123b0dcf274 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1556,7 +1556,7 @@ static int btt_blk_init(struct btt *btt)
}
}
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
- device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
+ device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
revalidate_disk(btt->btt_disk);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 8aae6dcc839f..f1fb39921236 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -54,12 +54,6 @@ static int to_nd_device_type(struct device *dev)
static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- /*
- * Ensure that region devices always have their numa node set as
- * early as possible.
- */
- if (is_nd_region(dev))
- set_dev_node(dev, to_nd_region(dev)->numa_node);
return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
to_nd_device_type(dev));
}
@@ -488,6 +482,8 @@ static void nd_async_device_register(void *d, async_cookie_t cookie)
put_device(dev);
}
put_device(dev);
+ if (dev->parent)
+ put_device(dev->parent);
}
static void nd_async_device_unregister(void *d, async_cookie_t cookie)
@@ -506,7 +502,19 @@ void __nd_device_register(struct device *dev)
{
if (!dev)
return;
+
+ /*
+ * Ensure that region devices always have their NUMA node set as
+ * early as possible. This way we are able to make certain that
+ * any memory associated with the creation and the creation
+ * itself of the region is associated with the correct node.
+ */
+ if (is_nd_region(dev))
+ set_dev_node(dev, to_nd_region(dev)->numa_node);
+
dev->bus = &nvdimm_bus_type;
+ if (dev->parent)
+ get_device(dev->parent);
get_device(dev);
async_schedule_domain(nd_async_device_register, dev,
&nd_async_domain);
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 6c8fb7590838..9899c97138a3 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -75,7 +75,7 @@ static int nvdimm_probe(struct device *dev)
* DIMM capacity. We fail the dimm probe to prevent regions from
* attempting to parse the label area.
*/
- rc = nvdimm_init_config_data(ndd);
+ rc = nd_label_data_init(ndd);
if (rc == -EACCES)
nvdimm_set_locked(dev);
if (rc)
@@ -84,10 +84,6 @@ static int nvdimm_probe(struct device *dev)
dev_dbg(dev, "config data size: %d\n", ndd->nsarea.config_size);
nvdimm_bus_lock(dev);
- ndd->ns_current = nd_label_validate(ndd);
- ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
- nd_label_copy(ndd, to_next_namespace_index(ndd),
- to_current_namespace_index(ndd));
if (ndd->ns_current >= 0) {
rc = nd_label_reserve_dpa(ndd);
if (rc == 0)
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 863cabc35215..6c3de2317390 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -85,56 +85,48 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
return cmd_rc;
}
-int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
+int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
+ size_t offset, size_t len)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
int rc = validate_dimm(ndd), cmd_rc = 0;
struct nd_cmd_get_config_data_hdr *cmd;
- struct nvdimm_bus_descriptor *nd_desc;
- u32 max_cmd_size, config_size;
- size_t offset;
+ size_t max_cmd_size, buf_offset;
if (rc)
return rc;
- if (ndd->data)
- return 0;
-
- if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
- || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
- dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
- ndd->nsarea.max_xfer, ndd->nsarea.config_size);
+ if (offset + len > ndd->nsarea.config_size)
return -ENXIO;
- }
- ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL);
- if (!ndd->data)
- return -ENOMEM;
-
- max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
- cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
+ max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
+ cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
- nd_desc = nvdimm_bus->nd_desc;
- for (config_size = ndd->nsarea.config_size, offset = 0;
- config_size; config_size -= cmd->in_length,
- offset += cmd->in_length) {
- cmd->in_length = min(config_size, max_cmd_size);
- cmd->in_offset = offset;
+ for (buf_offset = 0; len;
+ len -= cmd->in_length, buf_offset += cmd->in_length) {
+ size_t cmd_size;
+
+ cmd->in_offset = offset + buf_offset;
+ cmd->in_length = min(max_cmd_size, len);
+
+ cmd_size = sizeof(*cmd) + cmd->in_length;
+
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_GET_CONFIG_DATA, cmd,
- cmd->in_length + sizeof(*cmd), &cmd_rc);
+ ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
if (rc < 0)
break;
if (cmd_rc < 0) {
rc = cmd_rc;
break;
}
- memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
+
+ /* out_buf should be valid, copy it into our output buffer */
+ memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
}
- dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
- kfree(cmd);
+ kvfree(cmd);
return rc;
}
@@ -151,15 +143,11 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
if (rc)
return rc;
- if (!ndd->data)
- return -ENXIO;
-
if (offset + len > ndd->nsarea.config_size)
return -ENXIO;
- max_cmd_size = min_t(u32, PAGE_SIZE, len);
- max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
- cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
+ max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
+ cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -183,7 +171,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
break;
}
}
- kfree(cmd);
+ kvfree(cmd);
return rc;
}
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 1d28cd656536..750dbaa6ce82 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -75,7 +75,8 @@ size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
/*
* Per UEFI 2.7, the minimum size of the Label Storage Area is large
* enough to hold 2 index blocks and 2 labels. The minimum index
- * block size is 256 bytes, and the minimum label size is 256 bytes.
+ * block size is 256 bytes. The label size is 128 for namespaces
+ * prior to version 1.2 and at minimum 256 for version 1.2 and later.
*/
nslot = nvdimm_num_label_slots(ndd);
space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
@@ -183,6 +184,13 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
__le64_to_cpu(nsindex[i]->otheroff));
continue;
}
+ if (__le64_to_cpu(nsindex[i]->labeloff)
+ != 2 * sizeof_namespace_index(ndd)) {
+ dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
+ i, (unsigned long long)
+ __le64_to_cpu(nsindex[i]->labeloff));
+ continue;
+ }
size = __le64_to_cpu(nsindex[i]->mysize);
if (size > sizeof_namespace_index(ndd)
@@ -227,7 +235,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
return -1;
}
-int nd_label_validate(struct nvdimm_drvdata *ndd)
+static int nd_label_validate(struct nvdimm_drvdata *ndd)
{
/*
* In order to probe for and validate namespace index blocks we
@@ -250,12 +258,12 @@ int nd_label_validate(struct nvdimm_drvdata *ndd)
return -1;
}
-void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
- struct nd_namespace_index *src)
+static void nd_label_copy(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_index *dst,
+ struct nd_namespace_index *src)
{
- if (dst && src)
- /* pass */;
- else
+ /* just exit if either destination or source is NULL */
+ if (!dst || !src)
return;
memcpy(dst, src, sizeof_namespace_index(ndd));
@@ -410,6 +418,128 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
return 0;
}
+int nd_label_data_init(struct nvdimm_drvdata *ndd)
+{
+ size_t config_size, read_size, max_xfer, offset;
+ struct nd_namespace_index *nsindex;
+ unsigned int i;
+ int rc = 0;
+ u32 nslot;
+
+ if (ndd->data)
+ return 0;
+
+ if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
+ dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
+ ndd->nsarea.max_xfer, ndd->nsarea.config_size);
+ return -ENXIO;
+ }
+
+ /*
+ * We need to determine the maximum index area as this is the section
+ * we must read and validate before we can start processing labels.
+ *
+ * If the area is too small to contain the two indexes and 2 labels
+ * then we abort.
+ *
+ * Start at a label size of 128 as this should result in the largest
+ * possible namespace index size.
+ */
+ ndd->nslabel_size = 128;
+ read_size = sizeof_namespace_index(ndd) * 2;
+ if (!read_size)
+ return -ENXIO;
+
+ /* Allocate config data */
+ config_size = ndd->nsarea.config_size;
+ ndd->data = kvzalloc(config_size, GFP_KERNEL);
+ if (!ndd->data)
+ return -ENOMEM;
+
+ /*
+ * We want to guarantee as few reads as possible while conserving
+ * memory. To do that we figure out how much unused space will be left
+ * in the last read, divide that by the total number of reads it is
+ * going to take given our maximum transfer size, and then reduce our
+ * maximum transfer size based on that result.
+ */
+ max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
+ if (read_size < max_xfer) {
+ /* trim waste */
+ max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
+ DIV_ROUND_UP(config_size, max_xfer);
+ /* make certain we read indexes in exactly 1 read */
+ if (max_xfer < read_size)
+ max_xfer = read_size;
+ }
+
+ /* Make our initial read size a multiple of max_xfer size */
+ read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
+ config_size);
+
+ /* Read the index data */
+ rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
+ if (rc)
+ goto out_err;
+
+ /* Validate index data, if not valid assume all labels are invalid */
+ ndd->ns_current = nd_label_validate(ndd);
+ if (ndd->ns_current < 0)
+ return 0;
+
+ /* Record our index values */
+ ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
+
+ /* Copy "current" index on top of the "next" index */
+ nsindex = to_current_namespace_index(ndd);
+ nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
+
+ /* Determine starting offset for label data */
+ offset = __le64_to_cpu(nsindex->labeloff);
+ nslot = __le32_to_cpu(nsindex->nslot);
+
+ /* Loop through the free list pulling in any active labels */
+ for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
+ size_t label_read_size;
+
+ /* zero out the unused labels */
+ if (test_bit_le(i, nsindex->free)) {
+ memset(ndd->data + offset, 0, ndd->nslabel_size);
+ continue;
+ }
+
+ /* if we already read past here then just continue */
+ if (offset + ndd->nslabel_size <= read_size)
+ continue;
+
+ /* if we haven't read in a while reset our read_size offset */
+ if (read_size < offset)
+ read_size = offset;
+
+ /* determine how much more will be read after this next call. */
+ label_read_size = offset + ndd->nslabel_size - read_size;
+ label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
+ max_xfer;
+
+ /* truncate last read if needed */
+ if (read_size + label_read_size > config_size)
+ label_read_size = config_size - read_size;
+
+ /* Read the label data */
+ rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
+ read_size, label_read_size);
+ if (rc)
+ goto out_err;
+
+ /* push read_size to next read offset */
+ read_size += label_read_size;
+ }
+
+ dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
+out_err:
+ return rc;
+}
+
int nd_label_active_count(struct nvdimm_drvdata *ndd)
{
struct nd_namespace_index *nsindex;
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index 18bbe183b3a9..e9a2ad3c2150 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -138,9 +138,7 @@ static inline int nd_label_next_nsindex(int index)
}
struct nvdimm_drvdata;
-int nd_label_validate(struct nvdimm_drvdata *ndd);
-void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
- struct nd_namespace_index *src);
+int nd_label_data_init(struct nvdimm_drvdata *ndd);
size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd);
int nd_label_active_count(struct nvdimm_drvdata *ndd);
struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 4a4266250c28..681af3a8fd62 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -2099,7 +2099,6 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
return NULL;
}
dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
- dev->parent = &nd_region->dev;
dev->groups = nd_namespace_attribute_groups;
nd_namespace_pmem_set_resource(nd_region, nspm, 0);
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index ac68072fb8cd..182258f64417 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -14,7 +14,6 @@
#define __ND_CORE_H__
#include <linux/libnvdimm.h>
#include <linux/device.h>
-#include <linux/libnvdimm.h>
#include <linux/sizes.h>
#include <linux/mutex.h>
#include <linux/nd.h>
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 98317e7ce5b5..e79cc8e5c114 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -241,6 +241,8 @@ struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
int nvdimm_check_config_data(struct device *dev);
int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
+int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
+ size_t offset, size_t len);
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
void *buf, size_t len);
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 3f7ad5bc443e..24c64090169e 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -361,6 +361,65 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
return dev;
}
+/*
+ * nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap
+ * space associated with the namespace. If the memmap is set to DRAM, then
+ * this is a no-op. Since the memmap area is freshly initialized during
+ * probe, we have an opportunity to clear any badblocks in this area.
+ */
+static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
+{
+ struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ void *zero_page = page_address(ZERO_PAGE(0));
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+ int num_bad, meta_num, rc, bb_present;
+ sector_t first_bad, meta_start;
+ struct nd_namespace_io *nsio;
+
+ if (nd_pfn->mode != PFN_MODE_PMEM)
+ return 0;
+
+ nsio = to_nd_namespace_io(&ndns->dev);
+ meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
+ meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
+
+ do {
+ unsigned long zero_len;
+ u64 nsoff;
+
+ bb_present = badblocks_check(&nd_region->bb, meta_start,
+ meta_num, &first_bad, &num_bad);
+ if (bb_present) {
+ dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %lx\n",
+ num_bad, first_bad);
+ nsoff = ALIGN_DOWN((nd_region->ndr_start
+ + (first_bad << 9)) - nsio->res.start,
+ PAGE_SIZE);
+ zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
+ while (zero_len) {
+ unsigned long chunk = min(zero_len, PAGE_SIZE);
+
+ rc = nvdimm_write_bytes(ndns, nsoff, zero_page,
+ chunk, 0);
+ if (rc)
+ break;
+
+ zero_len -= chunk;
+ nsoff += chunk;
+ }
+ if (rc) {
+ dev_err(&nd_pfn->dev,
+ "error clearing %x badblocks at %lx\n",
+ num_bad, first_bad);
+ return rc;
+ }
+ }
+ } while (bb_present);
+
+ return 0;
+}
+
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
@@ -477,7 +536,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -ENXIO;
}
- return 0;
+ return nd_pfn_clear_memmap_errors(nd_pfn);
}
EXPORT_SYMBOL(nd_pfn_validate);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 6071e2942053..0e39e3d1846f 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -421,9 +421,11 @@ static int pmem_attach_disk(struct device *dev,
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
- } else
+ } else {
addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM);
+ memcpy(&bb_res, &nsio->res, sizeof(bb_res));
+ }
/*
* At release time the queue must be frozen before
@@ -474,7 +476,7 @@ static int pmem_attach_disk(struct device *dev,
gendev = disk_to_dev(disk);
gendev->groups = pmem_attribute_groups;
- device_add_disk(dev, disk);
+ device_add_disk(dev, disk, NULL);
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index fa37afcd43ff..174a418cb171 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -560,10 +560,17 @@ static ssize_t region_badblocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
+ ssize_t rc;
- return badblocks_show(&nd_region->bb, buf, 0);
-}
+ device_lock(dev);
+ if (dev->driver)
+ rc = badblocks_show(&nd_region->bb, buf, 0);
+ else
+ rc = -ENXIO;
+ device_unlock(dev);
+ return rc;
+}
static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
static ssize_t resource_show(struct device *dev,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index dd8ec1dd9219..2e65be8b1387 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -971,7 +971,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
break;
default:
- /* Skip unnkown types */
+ /* Skip unknown types */
len = cur->nidl;
break;
}
@@ -1132,7 +1132,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return nvme_submit_user_cmd(ns->queue, &c,
(void __user *)(uintptr_t)io.addr, length,
- metadata, meta_len, io.slba, NULL, 0);
+ metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
}
static u32 nvme_known_admin_effects(u8 opcode)
@@ -2076,7 +2076,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
- strncpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
+ strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
return;
}
@@ -2729,11 +2729,19 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-const struct attribute_group nvme_ns_id_attr_group = {
+static const struct attribute_group nvme_ns_id_attr_group = {
.attrs = nvme_ns_id_attrs,
.is_visible = nvme_ns_id_attrs_are_visible,
};
+const struct attribute_group *nvme_ns_id_attr_groups[] = {
+ &nvme_ns_id_attr_group,
+#ifdef CONFIG_NVM
+ &nvme_nvm_attr_group,
+#endif
+ NULL,
+};
+
#define nvme_show_str_function(field) \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -2900,9 +2908,14 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
unsigned nsid, struct nvme_id_ns *id)
{
struct nvme_ns_head *head;
+ size_t size = sizeof(*head);
int ret = -ENOMEM;
- head = kzalloc(sizeof(*head), GFP_KERNEL);
+#ifdef CONFIG_NVME_MULTIPATH
+ size += num_possible_nodes() * sizeof(struct nvme_ns *);
+#endif
+
+ head = kzalloc(size, GFP_KERNEL);
if (!head)
goto out;
ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
@@ -3051,7 +3064,11 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->queue = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ns->queue))
goto out_free_ns;
+
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
+ if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
+ blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
+
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
@@ -3099,14 +3116,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
nvme_get_ctrl(ctrl);
- device_add_disk(ctrl->device, ns->disk);
- if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
- &nvme_ns_id_attr_group))
- pr_warn("%s: failed to create sysfs group for identification\n",
- ns->disk->disk_name);
- if (ns->ndev && nvme_nvm_register_sysfs(ns))
- pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
- ns->disk->disk_name);
+ device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
nvme_mpath_add_disk(ns, id);
nvme_fault_inject_init(ns);
@@ -3132,10 +3142,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_fault_inject_fini(ns);
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
- sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
- &nvme_ns_id_attr_group);
- if (ns->ndev)
- nvme_nvm_unregister_sysfs(ns);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
if (blk_get_integrity(ns->disk))
@@ -3143,8 +3149,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
}
mutex_lock(&ns->ctrl->subsys->lock);
- nvme_mpath_clear_current_path(ns);
list_del_rcu(&ns->siblings);
+ nvme_mpath_clear_current_path(ns);
mutex_unlock(&ns->ctrl->subsys->lock);
down_write(&ns->ctrl->namespaces_rwsem);
@@ -3411,16 +3417,21 @@ static void nvme_fw_act_work(struct work_struct *work)
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
- switch ((result & 0xff00) >> 8) {
+ u32 aer_notice_type = (result & 0xff00) >> 8;
+
+ switch (aer_notice_type) {
case NVME_AER_NOTICE_NS_CHANGED:
+ trace_nvme_async_event(ctrl, aer_notice_type);
set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
nvme_queue_scan(ctrl);
break;
case NVME_AER_NOTICE_FW_ACT_STARTING:
+ trace_nvme_async_event(ctrl, aer_notice_type);
queue_work(nvme_wq, &ctrl->fw_act_work);
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
+ trace_nvme_async_event(ctrl, aer_notice_type);
if (!ctrl->ana_log_buf)
break;
queue_work(nvme_wq, &ctrl->ana_work);
@@ -3435,11 +3446,12 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
volatile union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
+ u32 aer_type = result & 0x07;
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
- switch (result & 0x7) {
+ switch (aer_type) {
case NVME_AER_NOTICE:
nvme_handle_aen_notice(ctrl, result);
break;
@@ -3447,6 +3459,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
case NVME_AER_SMART:
case NVME_AER_CSS:
case NVME_AER_VS:
+ trace_nvme_async_event(ctrl, aer_type);
ctrl->aen_result = result;
break;
default:
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 206d63cb1afc..bd0969db6225 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -552,8 +552,11 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
ctrl->state != NVME_CTRL_DEAD &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
- nvme_req(rq)->status = NVME_SC_ABORT_REQ;
- return BLK_STS_IOERR;
+
+ nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
+ blk_mq_start_request(rq);
+ nvme_complete_rq(rq);
+ return BLK_STS_OK;
}
EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
@@ -865,6 +868,36 @@ static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
return 0;
}
+bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
+ struct nvmf_ctrl_options *opts)
+{
+ if (!nvmf_ctlr_matches_baseopts(ctrl, opts) ||
+ strcmp(opts->traddr, ctrl->opts->traddr) ||
+ strcmp(opts->trsvcid, ctrl->opts->trsvcid))
+ return false;
+
+ /*
+ * Checking the local address is rough. In most cases, none is specified
+ * and the host port is selected by the stack.
+ *
+ * Assume no match if:
+ * - local address is specified and address is not the same
+ * - local address is not specified but remote is, or vice versa
+ * (admin using specific host_traddr when it matters).
+ */
+ if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
+ (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
+ if (strcmp(opts->host_traddr, ctrl->opts->host_traddr))
+ return false;
+ } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) ||
+ (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
+
static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
unsigned int allowed_opts)
{
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index aa2fdb2a2e8f..6ea6275f332a 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -166,6 +166,8 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq);
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live);
+bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
+ struct nvmf_ctrl_options *opts);
static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 611e70cae754..e52b9d3c0bd6 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -20,6 +20,7 @@
#include <uapi/scsi/fc/fc_fs.h>
#include <uapi/scsi/fc/fc_els.h>
#include <linux/delay.h>
+#include <linux/overflow.h>
#include "nvme.h"
#include "fabrics.h"
@@ -104,6 +105,12 @@ struct nvme_fc_fcp_op {
struct nvme_fc_ersp_iu rsp_iu;
};
+struct nvme_fcp_op_w_sgl {
+ struct nvme_fc_fcp_op op;
+ struct scatterlist sgl[SG_CHUNK_SIZE];
+ uint8_t priv[0];
+};
+
struct nvme_fc_lport {
struct nvme_fc_local_port localport;
@@ -122,6 +129,7 @@ struct nvme_fc_rport {
struct list_head endp_list; /* for lport->endp_list */
struct list_head ctrl_list;
struct list_head ls_req_list;
+ struct list_head disc_list;
struct device *dev; /* physical device for dma */
struct nvme_fc_lport *lport;
spinlock_t lock;
@@ -210,7 +218,6 @@ static DEFINE_IDA(nvme_fc_ctrl_cnt);
* These items are short-term. They will eventually be moved into
* a generic FC class. See comments in module init.
*/
-static struct class *fc_class;
static struct device *fc_udev_device;
@@ -317,7 +324,7 @@ out_done:
* @template: LLDD entrypoints and operational parameters for the port
* @dev: physical hardware device node port corresponds to. Will be
* used for DMA mappings
- * @lport_p: pointer to a local port pointer. Upon success, the routine
+ * @portptr: pointer to a local port pointer. Upon success, the routine
* will allocate a nvme_fc_local_port structure and place its
* address in the local port pointer. Upon failure, local port
* pointer will be set to 0.
@@ -425,8 +432,7 @@ EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
* nvme_fc_unregister_localport - transport entry point called by an
* LLDD to deregister/remove a previously
* registered a NVME host FC port.
- * @localport: pointer to the (registered) local port that is to be
- * deregistered.
+ * @portptr: pointer to the (registered) local port that is to be deregistered.
*
* Returns:
* a completion status. Must be 0 upon success; a negative errno
@@ -507,6 +513,7 @@ nvme_fc_free_rport(struct kref *ref)
list_del(&rport->endp_list);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
+ WARN_ON(!list_empty(&rport->disc_list));
ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
kfree(rport);
@@ -631,7 +638,7 @@ __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
* @localport: pointer to the (registered) local port that the remote
* subsystem port is connected to.
* @pinfo: pointer to information about the port to be registered
- * @rport_p: pointer to a remote port pointer. Upon success, the routine
+ * @portptr: pointer to a remote port pointer. Upon success, the routine
* will allocate a nvme_fc_remote_port structure and place its
* address in the remote port pointer. Upon failure, remote port
* pointer will be set to 0.
@@ -694,6 +701,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
INIT_LIST_HEAD(&newrec->endp_list);
INIT_LIST_HEAD(&newrec->ctrl_list);
INIT_LIST_HEAD(&newrec->ls_req_list);
+ INIT_LIST_HEAD(&newrec->disc_list);
kref_init(&newrec->ref);
atomic_set(&newrec->act_ctrl_cnt, 0);
spin_lock_init(&newrec->lock);
@@ -807,8 +815,8 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
* nvme_fc_unregister_remoteport - transport entry point called by an
* LLDD to deregister/remove a previously
* registered a NVME subsystem FC port.
- * @remoteport: pointer to the (registered) remote port that is to be
- * deregistered.
+ * @portptr: pointer to the (registered) remote port that is to be
+ * deregistered.
*
* Returns:
* a completion status. Must be 0 upon success; a negative errno
@@ -1385,7 +1393,7 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
__nvme_fc_finish_ls_req(lsop);
- /* fc-nvme iniator doesn't care about success or failure of cmd */
+ /* fc-nvme initiator doesn't care about success or failure of cmd */
kfree(lsop);
}
@@ -1685,6 +1693,8 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
struct request *rq, u32 rqno)
{
+ struct nvme_fcp_op_w_sgl *op_w_sgl =
+ container_of(op, typeof(*op_w_sgl), op);
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
int ret = 0;
@@ -1694,7 +1704,6 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
op->fcp_req.rspaddr = &op->rsp_iu;
op->fcp_req.rsplen = sizeof(op->rsp_iu);
op->fcp_req.done = nvme_fc_fcpio_done;
- op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
op->ctrl = ctrl;
op->queue = queue;
@@ -1733,12 +1742,17 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct nvme_fc_ctrl *ctrl = set->driver_data;
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
+ int res;
nvme_req(rq)->ctrl = &ctrl->ctrl;
- return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
+ res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
+ if (res)
+ return res;
+ op->op.fcp_req.first_sgl = &op->sgl[0];
+ return res;
}
static int
@@ -1768,7 +1782,6 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
}
aen_op->flags = FCOP_FLAGS_AEN;
- aen_op->fcp_req.first_sgl = NULL; /* no sg list */
aen_op->fcp_req.private = private;
memset(sqe, 0, sizeof(*sqe));
@@ -2422,10 +2435,9 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
ctrl->tag_set.numa_node = NUMA_NO_NODE;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
- (SG_CHUNK_SIZE *
- sizeof(struct scatterlist)) +
- ctrl->lport->ops->fcprqst_priv_sz;
+ ctrl->tag_set.cmd_size =
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz);
ctrl->tag_set.driver_data = ctrl;
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
@@ -3027,10 +3039,9 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
- ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
- (SG_CHUNK_SIZE *
- sizeof(struct scatterlist)) +
- ctrl->lport->ops->fcprqst_priv_sz;
+ ctrl->admin_tag_set.cmd_size =
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz);
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
@@ -3159,7 +3170,7 @@ nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
substring_t wwn = { name, &name[sizeof(name)-1] };
int nnoffset, pnoffset;
- /* validate it string one of the 2 allowed formats */
+ /* validate if string is one of the 2 allowed formats */
if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
@@ -3254,6 +3265,90 @@ static struct nvmf_transport_ops nvme_fc_transport = {
.create_ctrl = nvme_fc_create_ctrl,
};
+/* Arbitrary successive failures max. With lots of subsystems could be high */
+#define DISCOVERY_MAX_FAIL 20
+
+static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ LIST_HEAD(local_disc_list);
+ struct nvme_fc_lport *lport;
+ struct nvme_fc_rport *rport;
+ int failcnt = 0;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+restart:
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+ list_for_each_entry(rport, &lport->endp_list, endp_list) {
+ if (!nvme_fc_lport_get(lport))
+ continue;
+ if (!nvme_fc_rport_get(rport)) {
+ /*
+ * This is a temporary condition. Upon restart
+ * this rport will be gone from the list.
+ *
+ * Revert the lport put and retry. Anything
+ * added to the list already will be skipped (as
+ * they are no longer list_empty). Loops should
+ * resume at rports that were not yet seen.
+ */
+ nvme_fc_lport_put(lport);
+
+ if (failcnt++ < DISCOVERY_MAX_FAIL)
+ goto restart;
+
+ pr_err("nvme_discovery: too many reference "
+ "failures\n");
+ goto process_local_list;
+ }
+ if (list_empty(&rport->disc_list))
+ list_add_tail(&rport->disc_list,
+ &local_disc_list);
+ }
+ }
+
+process_local_list:
+ while (!list_empty(&local_disc_list)) {
+ rport = list_first_entry(&local_disc_list,
+ struct nvme_fc_rport, disc_list);
+ list_del_init(&rport->disc_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ lport = rport->lport;
+ /* signal discovery. Won't hurt if it repeats */
+ nvme_fc_signal_discovery_scan(lport, rport);
+ nvme_fc_rport_put(rport);
+ nvme_fc_lport_put(lport);
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ }
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ return count;
+}
+static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
+
+static struct attribute *nvme_fc_attrs[] = {
+ &dev_attr_nvme_discovery.attr,
+ NULL
+};
+
+static struct attribute_group nvme_fc_attr_group = {
+ .attrs = nvme_fc_attrs,
+};
+
+static const struct attribute_group *nvme_fc_attr_groups[] = {
+ &nvme_fc_attr_group,
+ NULL
+};
+
+static struct class fc_class = {
+ .name = "fc",
+ .dev_groups = nvme_fc_attr_groups,
+ .owner = THIS_MODULE,
+};
+
static int __init nvme_fc_init_module(void)
{
int ret;
@@ -3272,16 +3367,16 @@ static int __init nvme_fc_init_module(void)
* put in place, this code will move to a more generic
* location for the class.
*/
- fc_class = class_create(THIS_MODULE, "fc");
- if (IS_ERR(fc_class)) {
+ ret = class_register(&fc_class);
+ if (ret) {
pr_err("couldn't register class fc\n");
- return PTR_ERR(fc_class);
+ return ret;
}
/*
* Create a device for the FC-centric udev events
*/
- fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL,
+ fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
"fc_udev_device");
if (IS_ERR(fc_udev_device)) {
pr_err("couldn't create fc_udev device!\n");
@@ -3296,9 +3391,9 @@ static int __init nvme_fc_init_module(void)
return 0;
out_destroy_device:
- device_destroy(fc_class, MKDEV(0, 0));
+ device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class:
- class_destroy(fc_class);
+ class_unregister(&fc_class);
return ret;
}
@@ -3313,8 +3408,8 @@ static void __exit nvme_fc_exit_module(void)
ida_destroy(&nvme_fc_local_port_cnt);
ida_destroy(&nvme_fc_ctrl_cnt);
- device_destroy(fc_class, MKDEV(0, 0));
- class_destroy(fc_class);
+ device_destroy(&fc_class, MKDEV(0, 0));
+ class_unregister(&fc_class);
}
module_init(nvme_fc_init_module);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 6fe5923c95d4..a4f3b263cd6c 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -567,13 +567,13 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
* Expect the lba in device format
*/
static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
- struct nvm_chk_meta *meta,
- sector_t slba, int nchks)
+ sector_t slba, int nchks,
+ struct nvm_chk_meta *meta)
{
struct nvm_geo *geo = &ndev->geo;
struct nvme_ns *ns = ndev->q->queuedata;
struct nvme_ctrl *ctrl = ns->ctrl;
- struct nvme_nvm_chk_meta *dev_meta = (struct nvme_nvm_chk_meta *)meta;
+ struct nvme_nvm_chk_meta *dev_meta, *dev_meta_off;
struct ppa_addr ppa;
size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
size_t log_pos, offset, len;
@@ -585,6 +585,10 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
*/
max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
+ dev_meta = kmalloc(max_len, GFP_KERNEL);
+ if (!dev_meta)
+ return -ENOMEM;
+
/* Normalize lba address space to obtain log offset */
ppa.ppa = slba;
ppa = dev_to_generic_addr(ndev, ppa);
@@ -598,6 +602,9 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
while (left) {
len = min_t(unsigned int, left, max_len);
+ memset(dev_meta, 0, max_len);
+ dev_meta_off = dev_meta;
+
ret = nvme_get_log(ctrl, ns->head->ns_id,
NVME_NVM_LOG_REPORT_CHUNK, 0, dev_meta, len,
offset);
@@ -607,21 +614,23 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
}
for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
- meta->state = dev_meta->state;
- meta->type = dev_meta->type;
- meta->wi = dev_meta->wi;
- meta->slba = le64_to_cpu(dev_meta->slba);
- meta->cnlb = le64_to_cpu(dev_meta->cnlb);
- meta->wp = le64_to_cpu(dev_meta->wp);
+ meta->state = dev_meta_off->state;
+ meta->type = dev_meta_off->type;
+ meta->wi = dev_meta_off->wi;
+ meta->slba = le64_to_cpu(dev_meta_off->slba);
+ meta->cnlb = le64_to_cpu(dev_meta_off->cnlb);
+ meta->wp = le64_to_cpu(dev_meta_off->wp);
meta++;
- dev_meta++;
+ dev_meta_off++;
}
offset += len;
left -= len;
}
+ kfree(dev_meta);
+
return ret;
}
@@ -968,6 +977,9 @@ void nvme_nvm_update_nvm_info(struct nvme_ns *ns)
struct nvm_dev *ndev = ns->ndev;
struct nvm_geo *geo = &ndev->geo;
+ if (geo->version == NVM_OCSSD_SPEC_12)
+ return;
+
geo->csecs = 1 << ns->lba_shift;
geo->sos = ns->ms;
}
@@ -1190,10 +1202,29 @@ static NVM_DEV_ATTR_12_RO(multiplane_modes);
static NVM_DEV_ATTR_12_RO(media_capabilities);
static NVM_DEV_ATTR_12_RO(max_phys_secs);
-static struct attribute *nvm_dev_attrs_12[] = {
+/* 2.0 values */
+static NVM_DEV_ATTR_20_RO(groups);
+static NVM_DEV_ATTR_20_RO(punits);
+static NVM_DEV_ATTR_20_RO(chunks);
+static NVM_DEV_ATTR_20_RO(clba);
+static NVM_DEV_ATTR_20_RO(ws_min);
+static NVM_DEV_ATTR_20_RO(ws_opt);
+static NVM_DEV_ATTR_20_RO(maxoc);
+static NVM_DEV_ATTR_20_RO(maxocpu);
+static NVM_DEV_ATTR_20_RO(mw_cunits);
+static NVM_DEV_ATTR_20_RO(write_typ);
+static NVM_DEV_ATTR_20_RO(write_max);
+static NVM_DEV_ATTR_20_RO(reset_typ);
+static NVM_DEV_ATTR_20_RO(reset_max);
+
+static struct attribute *nvm_dev_attrs[] = {
+ /* version agnostic attrs */
&dev_attr_version.attr,
&dev_attr_capabilities.attr,
+ &dev_attr_read_typ.attr,
+ &dev_attr_read_max.attr,
+ /* 1.2 attrs */
&dev_attr_vendor_opcode.attr,
&dev_attr_device_mode.attr,
&dev_attr_media_manager.attr,
@@ -1208,8 +1239,6 @@ static struct attribute *nvm_dev_attrs_12[] = {
&dev_attr_page_size.attr,
&dev_attr_hw_sector_size.attr,
&dev_attr_oob_sector_size.attr,
- &dev_attr_read_typ.attr,
- &dev_attr_read_max.attr,
&dev_attr_prog_typ.attr,
&dev_attr_prog_max.attr,
&dev_attr_erase_typ.attr,
@@ -1218,33 +1247,7 @@ static struct attribute *nvm_dev_attrs_12[] = {
&dev_attr_media_capabilities.attr,
&dev_attr_max_phys_secs.attr,
- NULL,
-};
-
-static const struct attribute_group nvm_dev_attr_group_12 = {
- .name = "lightnvm",
- .attrs = nvm_dev_attrs_12,
-};
-
-/* 2.0 values */
-static NVM_DEV_ATTR_20_RO(groups);
-static NVM_DEV_ATTR_20_RO(punits);
-static NVM_DEV_ATTR_20_RO(chunks);
-static NVM_DEV_ATTR_20_RO(clba);
-static NVM_DEV_ATTR_20_RO(ws_min);
-static NVM_DEV_ATTR_20_RO(ws_opt);
-static NVM_DEV_ATTR_20_RO(maxoc);
-static NVM_DEV_ATTR_20_RO(maxocpu);
-static NVM_DEV_ATTR_20_RO(mw_cunits);
-static NVM_DEV_ATTR_20_RO(write_typ);
-static NVM_DEV_ATTR_20_RO(write_max);
-static NVM_DEV_ATTR_20_RO(reset_typ);
-static NVM_DEV_ATTR_20_RO(reset_max);
-
-static struct attribute *nvm_dev_attrs_20[] = {
- &dev_attr_version.attr,
- &dev_attr_capabilities.attr,
-
+ /* 2.0 attrs */
&dev_attr_groups.attr,
&dev_attr_punits.attr,
&dev_attr_chunks.attr,
@@ -1255,8 +1258,6 @@ static struct attribute *nvm_dev_attrs_20[] = {
&dev_attr_maxocpu.attr,
&dev_attr_mw_cunits.attr,
- &dev_attr_read_typ.attr,
- &dev_attr_read_max.attr,
&dev_attr_write_typ.attr,
&dev_attr_write_max.attr,
&dev_attr_reset_typ.attr,
@@ -1265,44 +1266,38 @@ static struct attribute *nvm_dev_attrs_20[] = {
NULL,
};
-static const struct attribute_group nvm_dev_attr_group_20 = {
- .name = "lightnvm",
- .attrs = nvm_dev_attrs_20,
-};
-
-int nvme_nvm_register_sysfs(struct nvme_ns *ns)
+static umode_t nvm_dev_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gendisk *disk = dev_to_disk(dev);
+ struct nvme_ns *ns = disk->private_data;
struct nvm_dev *ndev = ns->ndev;
- struct nvm_geo *geo = &ndev->geo;
+ struct device_attribute *dev_attr =
+ container_of(attr, typeof(*dev_attr), attr);
if (!ndev)
- return -EINVAL;
-
- switch (geo->major_ver_id) {
- case 1:
- return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group_12);
- case 2:
- return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group_20);
- }
+ return 0;
- return -EINVAL;
-}
+ if (dev_attr->show == nvm_dev_attr_show)
+ return attr->mode;
-void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
-{
- struct nvm_dev *ndev = ns->ndev;
- struct nvm_geo *geo = &ndev->geo;
-
- switch (geo->major_ver_id) {
+ switch (ndev->geo.major_ver_id) {
case 1:
- sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group_12);
+ if (dev_attr->show == nvm_dev_attr_show_12)
+ return attr->mode;
break;
case 2:
- sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group_20);
+ if (dev_attr->show == nvm_dev_attr_show_20)
+ return attr->mode;
break;
}
+
+ return 0;
}
+
+const struct attribute_group nvme_nvm_attr_group = {
+ .name = "lightnvm",
+ .attrs = nvm_dev_attrs,
+ .is_visible = nvm_dev_attrs_visible,
+};
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5a9562881d4e..5e3cc8c59a39 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -77,6 +77,13 @@ void nvme_failover_req(struct request *req)
queue_work(nvme_wq, &ns->ctrl->ana_work);
}
break;
+ case NVME_SC_HOST_PATH_ERROR:
+ /*
+ * Temporary transport disruption in talking to the controller.
+ * Try to send on a new path.
+ */
+ nvme_mpath_clear_current_path(ns);
+ break;
default:
/*
* Reset the controller for any non-ANA error as we don't know
@@ -110,29 +117,55 @@ static const char *nvme_ana_state_names[] = {
[NVME_ANA_CHANGE] = "change",
};
-static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head)
+void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+ int node;
+
+ if (!head)
+ return;
+
+ for_each_node(node) {
+ if (ns == rcu_access_pointer(head->current_path[node]))
+ rcu_assign_pointer(head->current_path[node], NULL);
+ }
+}
+
+static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
{
- struct nvme_ns *ns, *fallback = NULL;
+ int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
+ struct nvme_ns *found = NULL, *fallback = NULL, *ns;
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (ns->ctrl->state != NVME_CTRL_LIVE ||
test_bit(NVME_NS_ANA_PENDING, &ns->flags))
continue;
+
+ distance = node_distance(node, dev_to_node(ns->ctrl->dev));
+
switch (ns->ana_state) {
case NVME_ANA_OPTIMIZED:
- rcu_assign_pointer(head->current_path, ns);
- return ns;
+ if (distance < found_distance) {
+ found_distance = distance;
+ found = ns;
+ }
+ break;
case NVME_ANA_NONOPTIMIZED:
- fallback = ns;
+ if (distance < fallback_distance) {
+ fallback_distance = distance;
+ fallback = ns;
+ }
break;
default:
break;
}
}
- if (fallback)
- rcu_assign_pointer(head->current_path, fallback);
- return fallback;
+ if (!found)
+ found = fallback;
+ if (found)
+ rcu_assign_pointer(head->current_path[node], found);
+ return found;
}
static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
@@ -143,10 +176,12 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
{
- struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu);
+ int node = numa_node_id();
+ struct nvme_ns *ns;
+ ns = srcu_dereference(head->current_path[node], &head->srcu);
if (unlikely(!ns || !nvme_path_is_optimized(ns)))
- ns = __nvme_find_path(head);
+ ns = __nvme_find_path(head, node);
return ns;
}
@@ -193,7 +228,7 @@ static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
int srcu_idx;
srcu_idx = srcu_read_lock(&head->srcu);
- ns = srcu_dereference(head->current_path, &head->srcu);
+ ns = srcu_dereference(head->current_path[numa_node_id()], &head->srcu);
if (likely(ns && nvme_path_is_optimized(ns)))
found = ns->queue->poll_fn(q, qc);
srcu_read_unlock(&head->srcu, srcu_idx);
@@ -282,12 +317,17 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
if (!head->disk)
return;
- if (!(head->disk->flags & GENHD_FL_UP)) {
- device_add_disk(&head->subsys->dev, head->disk);
- if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
- &nvme_ns_id_attr_group))
- dev_warn(&head->subsys->dev,
- "failed to create id group.\n");
+ if (!(head->disk->flags & GENHD_FL_UP))
+ device_add_disk(&head->subsys->dev, head->disk,
+ nvme_ns_id_attr_groups);
+
+ if (nvme_path_is_optimized(ns)) {
+ int node, srcu_idx;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ for_each_node(node)
+ __nvme_find_path(head, node);
+ srcu_read_unlock(&head->srcu, srcu_idx);
}
kblockd_schedule_work(&ns->head->requeue_work);
@@ -494,11 +534,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- if (head->disk->flags & GENHD_FL_UP) {
- sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
- &nvme_ns_id_attr_group);
+ if (head->disk->flags & GENHD_FL_UP)
del_gendisk(head->disk);
- }
blk_set_queue_dying(head->disk->queue);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
@@ -537,8 +574,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
INIT_WORK(&ctrl->ana_work, nvme_ana_work);
ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
- if (!ctrl->ana_log_buf)
+ if (!ctrl->ana_log_buf) {
+ error = -ENOMEM;
goto out;
+ }
error = nvme_read_ana_log(ctrl, true);
if (error)
@@ -547,7 +586,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
out_free_ana_log_buf:
kfree(ctrl->ana_log_buf);
out:
- return -ENOMEM;
+ return error;
}
void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bb4a2003c097..cee79cb388af 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -277,14 +277,6 @@ struct nvme_ns_ids {
* only ever has a single entry for private namespaces.
*/
struct nvme_ns_head {
-#ifdef CONFIG_NVME_MULTIPATH
- struct gendisk *disk;
- struct nvme_ns __rcu *current_path;
- struct bio_list requeue_list;
- spinlock_t requeue_lock;
- struct work_struct requeue_work;
- struct mutex lock;
-#endif
struct list_head list;
struct srcu_struct srcu;
struct nvme_subsystem *subsys;
@@ -293,6 +285,14 @@ struct nvme_ns_head {
struct list_head entry;
struct kref ref;
int instance;
+#ifdef CONFIG_NVME_MULTIPATH
+ struct gendisk *disk;
+ struct bio_list requeue_list;
+ spinlock_t requeue_lock;
+ struct work_struct requeue_work;
+ struct mutex lock;
+ struct nvme_ns __rcu *current_path[];
+#endif
};
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
@@ -343,6 +343,7 @@ struct nvme_ctrl_ops {
unsigned int flags;
#define NVME_F_FABRICS (1 << 0)
#define NVME_F_METADATA_SUPPORTED (1 << 1)
+#define NVME_F_PCI_P2PDMA (1 << 2)
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@ -459,7 +460,7 @@ int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
void *log, size_t size, u64 offset);
-extern const struct attribute_group nvme_ns_id_attr_group;
+extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
@@ -474,14 +475,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
-
-static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
-{
- struct nvme_ns_head *head = ns->head;
-
- if (head && ns == rcu_access_pointer(head->current_path))
- rcu_assign_pointer(head->current_path, NULL);
-}
+void nvme_mpath_clear_current_path(struct nvme_ns *ns);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
@@ -551,8 +545,7 @@ static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
void nvme_nvm_unregister(struct nvme_ns *ns);
-int nvme_nvm_register_sysfs(struct nvme_ns *ns);
-void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
+extern const struct attribute_group nvme_nvm_attr_group;
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
#else
static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
@@ -563,11 +556,6 @@ static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
}
static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
-static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
-{
- return 0;
-}
-static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
unsigned long arg)
{
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d668682f91df..f30031945ee4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/sed-opal.h>
+#include <linux/pci-p2pdma.h>
#include "nvme.h"
@@ -99,9 +100,8 @@ struct nvme_dev {
struct work_struct remove_work;
struct mutex shutdown_lock;
bool subsystem;
- void __iomem *cmb;
- pci_bus_addr_t cmb_bus_addr;
u64 cmb_size;
+ bool cmb_use_sqes;
u32 cmbsz;
u32 cmbloc;
struct nvme_ctrl ctrl;
@@ -158,7 +158,7 @@ struct nvme_queue {
struct nvme_dev *dev;
spinlock_t sq_lock;
struct nvme_command *sq_cmds;
- struct nvme_command __iomem *sq_cmds_io;
+ bool sq_cmds_is_io;
spinlock_t cq_lock ____cacheline_aligned_in_smp;
volatile struct nvme_completion *cqes;
struct blk_mq_tags **tags;
@@ -447,11 +447,8 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
spin_lock(&nvmeq->sq_lock);
- if (nvmeq->sq_cmds_io)
- memcpy_toio(&nvmeq->sq_cmds_io[nvmeq->sq_tail], cmd,
- sizeof(*cmd));
- else
- memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
+
+ memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
@@ -748,8 +745,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out;
ret = BLK_STS_RESOURCE;
- nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
- DMA_ATTR_NO_WARN);
+
+ if (is_pci_p2pdma_page(sg_page(iod->sg)))
+ nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents,
+ dma_dir);
+ else
+ nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
+ dma_dir, DMA_ATTR_NO_WARN);
if (!nr_mapped)
goto out;
@@ -772,10 +774,10 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
goto out_unmap;
- }
- if (blk_integrity_rq(req))
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
+ }
+
return BLK_STS_OK;
out_unmap:
@@ -791,7 +793,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
DMA_TO_DEVICE : DMA_FROM_DEVICE;
if (iod->nents) {
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
+ /* P2PDMA requests do not need to be unmapped */
+ if (!is_pci_p2pdma_page(sg_page(iod->sg)))
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
+
if (blk_integrity_rq(req))
dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
}
@@ -1232,9 +1237,18 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
- if (nvmeq->sq_cmds)
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
- nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+
+ if (nvmeq->sq_cmds) {
+ if (nvmeq->sq_cmds_is_io)
+ pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev),
+ nvmeq->sq_cmds,
+ SQ_SIZE(nvmeq->q_depth));
+ else
+ dma_free_coherent(nvmeq->q_dmadev,
+ SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds,
+ nvmeq->sq_dma_addr);
+ }
}
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
@@ -1249,7 +1263,7 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
/**
* nvme_suspend_queue - put queue into suspended state
- * @nvmeq - queue to suspend
+ * @nvmeq: queue to suspend
*/
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
@@ -1323,12 +1337,21 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
int qid, int depth)
{
- /* CMB SQEs will be mapped before creation */
- if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
- return 0;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
+ nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
+ nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
+ nvmeq->sq_cmds);
+ nvmeq->sq_cmds_is_io = true;
+ }
+
+ if (!nvmeq->sq_cmds) {
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
+ nvmeq->sq_cmds_is_io = false;
+ }
- nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
- &nvmeq->sq_dma_addr, GFP_KERNEL);
if (!nvmeq->sq_cmds)
return -ENOMEM;
return 0;
@@ -1405,13 +1428,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
int result;
s16 vector;
- if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
- unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
- dev->ctrl.page_size);
- nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
- nvmeq->sq_cmds_io = dev->cmb + offset;
- }
-
/*
* A queue's vector matches the queue identifier unless the controller
* has only one vector available.
@@ -1652,9 +1668,6 @@ static void nvme_map_cmb(struct nvme_dev *dev)
return;
dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
- if (!use_cmb_sqes)
- return;
-
size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
bar = NVME_CMB_BIR(dev->cmbloc);
@@ -1671,11 +1684,18 @@ static void nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
- if (!dev->cmb)
+ if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
+ dev_warn(dev->ctrl.device,
+ "failed to register the CMB\n");
return;
- dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
+ }
+
dev->cmb_size = size;
+ dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS);
+
+ if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
+ (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
+ pci_p2pmem_publish(pdev, true);
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL))
@@ -1685,12 +1705,10 @@ static void nvme_map_cmb(struct nvme_dev *dev)
static inline void nvme_release_cmb(struct nvme_dev *dev)
{
- if (dev->cmb) {
- iounmap(dev->cmb);
- dev->cmb = NULL;
+ if (dev->cmb_size) {
sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL);
- dev->cmbsz = 0;
+ dev->cmb_size = 0;
}
}
@@ -1889,13 +1907,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0)
return 0;
- if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) {
+ if (dev->cmb_use_sqes) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
sizeof(struct nvme_command));
if (result > 0)
dev->q_depth = result;
else
- nvme_release_cmb(dev);
+ dev->cmb_use_sqes = false;
}
do {
@@ -2390,7 +2408,8 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
- .flags = NVME_F_METADATA_SUPPORTED,
+ .flags = NVME_F_METADATA_SUPPORTED |
+ NVME_F_PCI_P2PDMA,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
@@ -2564,13 +2583,12 @@ static void nvme_remove(struct pci_dev *pdev)
struct nvme_dev *dev = pci_get_drvdata(pdev);
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
-
- cancel_work_sync(&dev->ctrl.reset_work);
pci_set_drvdata(pdev, NULL);
if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
nvme_dev_disable(dev, true);
+ nvme_dev_remove_admin(dev);
}
flush_work(&dev->ctrl.reset_work);
@@ -2649,7 +2667,6 @@ static void nvme_error_resume(struct pci_dev *pdev)
struct nvme_dev *dev = pci_get_drvdata(pdev);
flush_work(&dev->ctrl.reset_work);
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
static const struct pci_error_handlers nvme_err_handler = {
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index dc042017c293..d181cafedc58 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -233,8 +233,15 @@ static void nvme_rdma_qp_event(struct ib_event *event, void *context)
static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
{
- wait_for_completion_interruptible_timeout(&queue->cm_done,
+ int ret;
+
+ ret = wait_for_completion_interruptible_timeout(&queue->cm_done,
msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
+ WARN_ON_ONCE(queue->cm_error > 0);
return queue->cm_error;
}
@@ -1849,54 +1856,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.stop_ctrl = nvme_rdma_stop_ctrl,
};
-static inline bool
-__nvme_rdma_options_match(struct nvme_rdma_ctrl *ctrl,
- struct nvmf_ctrl_options *opts)
-{
- char *stdport = __stringify(NVME_RDMA_IP_PORT);
-
-
- if (!nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts) ||
- strcmp(opts->traddr, ctrl->ctrl.opts->traddr))
- return false;
-
- if (opts->mask & NVMF_OPT_TRSVCID &&
- ctrl->ctrl.opts->mask & NVMF_OPT_TRSVCID) {
- if (strcmp(opts->trsvcid, ctrl->ctrl.opts->trsvcid))
- return false;
- } else if (opts->mask & NVMF_OPT_TRSVCID) {
- if (strcmp(opts->trsvcid, stdport))
- return false;
- } else if (ctrl->ctrl.opts->mask & NVMF_OPT_TRSVCID) {
- if (strcmp(stdport, ctrl->ctrl.opts->trsvcid))
- return false;
- }
- /* else, it's a match as both have stdport. Fall to next checks */
-
- /*
- * checking the local address is rough. In most cases, one
- * is not specified and the host port is selected by the stack.
- *
- * Assume no match if:
- * local address is specified and address is not the same
- * local address is not specified but remote is, or vice versa
- * (admin using specific host_traddr when it matters).
- */
- if (opts->mask & NVMF_OPT_HOST_TRADDR &&
- ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
- if (strcmp(opts->host_traddr, ctrl->ctrl.opts->host_traddr))
- return false;
- } else if (opts->mask & NVMF_OPT_HOST_TRADDR ||
- ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
- return false;
- /*
- * if neither controller had an host port specified, assume it's
- * a match as everything else matched.
- */
-
- return true;
-}
-
/*
* Fails a connection request if it matches an existing controller
* (association) with the same tuple:
@@ -1917,7 +1876,7 @@ nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
mutex_lock(&nvme_rdma_ctrl_mutex);
list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
- found = __nvme_rdma_options_match(ctrl, opts);
+ found = nvmf_ip_options_match(&ctrl->ctrl, opts);
if (found)
break;
}
@@ -1932,7 +1891,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
struct nvme_rdma_ctrl *ctrl;
int ret;
bool changed;
- char *port;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -1940,15 +1898,21 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
ctrl->ctrl.opts = opts;
INIT_LIST_HEAD(&ctrl->list);
- if (opts->mask & NVMF_OPT_TRSVCID)
- port = opts->trsvcid;
- else
- port = __stringify(NVME_RDMA_IP_PORT);
+ if (!(opts->mask & NVMF_OPT_TRSVCID)) {
+ opts->trsvcid =
+ kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL);
+ if (!opts->trsvcid) {
+ ret = -ENOMEM;
+ goto out_free_ctrl;
+ }
+ opts->mask |= NVMF_OPT_TRSVCID;
+ }
ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
- opts->traddr, port, &ctrl->addr);
+ opts->traddr, opts->trsvcid, &ctrl->addr);
if (ret) {
- pr_err("malformed address passed: %s:%s\n", opts->traddr, port);
+ pr_err("malformed address passed: %s:%s\n",
+ opts->traddr, opts->trsvcid);
goto out_free_ctrl;
}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index a490790d6691..196d5bd56718 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -156,6 +156,34 @@ TRACE_EVENT(nvme_complete_rq,
);
+#define aer_name(aer) { aer, #aer }
+
+TRACE_EVENT(nvme_async_event,
+ TP_PROTO(struct nvme_ctrl *ctrl, u32 result),
+ TP_ARGS(ctrl, result),
+ TP_STRUCT__entry(
+ __field(int, ctrl_id)
+ __field(u32, result)
+ ),
+ TP_fast_assign(
+ __entry->ctrl_id = ctrl->instance;
+ __entry->result = result;
+ ),
+ TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
+ __entry->ctrl_id, __entry->result,
+ __print_symbolic(__entry->result,
+ aer_name(NVME_AER_NOTICE_NS_CHANGED),
+ aer_name(NVME_AER_NOTICE_ANA),
+ aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
+ aer_name(NVME_AER_ERROR),
+ aer_name(NVME_AER_SMART),
+ aer_name(NVME_AER_CSS),
+ aer_name(NVME_AER_VS))
+ )
+);
+
+#undef aer_name
+
#endif /* _TRACE_NVME_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index a21caea1e080..1179f6314323 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -58,7 +58,7 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
if (!ns) {
- pr_err("nvmet : Could not find namespace id : %d\n",
+ pr_err("Could not find namespace id : %d\n",
le32_to_cpu(req->cmd->get_log_page.nsid));
return NVME_SC_INVALID_NS;
}
@@ -245,6 +245,10 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
offset += len;
ngrps++;
}
+ for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
+ if (nvmet_ana_group_enabled[grpid])
+ ngrps++;
+ }
hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
hdr.ngrps = cpu_to_le16(ngrps);
@@ -349,7 +353,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
- strcpy(id->subnqn, ctrl->subsys->subsysnqn);
+ strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
/* Max command capsule size is sqe + single page of in-capsule data */
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index b37a8e3e3f80..d895579b6c5d 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -17,6 +17,8 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/ctype.h>
+#include <linux/pci.h>
+#include <linux/pci-p2pdma.h>
#include "nvmet.h"
@@ -340,6 +342,48 @@ out_unlock:
CONFIGFS_ATTR(nvmet_ns_, device_path);
+#ifdef CONFIG_PCI_P2PDMA
+static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+
+ return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
+}
+
+static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct pci_dev *p2p_dev = NULL;
+ bool use_p2pmem;
+ int ret = count;
+ int error;
+
+ mutex_lock(&ns->subsys->lock);
+ if (ns->enabled) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
+ if (error) {
+ ret = error;
+ goto out_unlock;
+ }
+
+ ns->use_p2pmem = use_p2pmem;
+ pci_dev_put(ns->p2p_dev);
+ ns->p2p_dev = p2p_dev;
+
+out_unlock:
+ mutex_unlock(&ns->subsys->lock);
+
+ return ret;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, p2pmem);
+#endif /* CONFIG_PCI_P2PDMA */
+
static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
{
return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
@@ -509,6 +553,9 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_ana_grpid,
&nvmet_ns_attr_enable,
&nvmet_ns_attr_buffered_io,
+#ifdef CONFIG_PCI_P2PDMA
+ &nvmet_ns_attr_p2pmem,
+#endif
NULL,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b5ec96abd048..f4efe289dc7b 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/rculist.h>
+#include <linux/pci-p2pdma.h>
#include "nvmet.h"
@@ -365,9 +366,93 @@ static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
nvmet_file_ns_disable(ns);
}
+static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
+{
+ int ret;
+ struct pci_dev *p2p_dev;
+
+ if (!ns->use_p2pmem)
+ return 0;
+
+ if (!ns->bdev) {
+ pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
+ return -EINVAL;
+ }
+
+ if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) {
+ pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
+ ns->device_path);
+ return -EINVAL;
+ }
+
+ if (ns->p2p_dev) {
+ ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
+ if (ret < 0)
+ return -EINVAL;
+ } else {
+ /*
+ * Right now we just check that there is p2pmem available so
+ * we can report an error to the user right away if there
+ * is not. We'll find the actual device to use once we
+ * setup the controller when the port's device is available.
+ */
+
+ p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
+ if (!p2p_dev) {
+ pr_err("no peer-to-peer memory is available for %s\n",
+ ns->device_path);
+ return -EINVAL;
+ }
+
+ pci_dev_put(p2p_dev);
+ }
+
+ return 0;
+}
+
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
+ struct nvmet_ns *ns)
+{
+ struct device *clients[2];
+ struct pci_dev *p2p_dev;
+ int ret;
+
+ if (!ctrl->p2p_client)
+ return;
+
+ if (ns->p2p_dev) {
+ ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
+ if (ret < 0)
+ return;
+
+ p2p_dev = pci_dev_get(ns->p2p_dev);
+ } else {
+ clients[0] = ctrl->p2p_client;
+ clients[1] = nvmet_ns_dev(ns);
+
+ p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
+ if (!p2p_dev) {
+ pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
+ dev_name(ctrl->p2p_client), ns->device_path);
+ return;
+ }
+ }
+
+ ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
+ if (ret < 0)
+ pci_dev_put(p2p_dev);
+
+ pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
+ ns->nsid);
+}
+
int nvmet_ns_enable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
int ret;
mutex_lock(&subsys->lock);
@@ -384,6 +469,13 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
if (ret)
goto out_unlock;
+ ret = nvmet_p2pmem_ns_enable(ns);
+ if (ret)
+ goto out_unlock;
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
+
ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
0, GFP_KERNEL);
if (ret)
@@ -418,6 +510,9 @@ out_unlock:
mutex_unlock(&subsys->lock);
return ret;
out_dev_put:
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+
nvmet_ns_dev_disable(ns);
goto out_unlock;
}
@@ -425,6 +520,7 @@ out_dev_put:
void nvmet_ns_disable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
mutex_lock(&subsys->lock);
if (!ns->enabled)
@@ -434,6 +530,10 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
list_del_rcu(&ns->dev_link);
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+
mutex_unlock(&subsys->lock);
/*
@@ -450,6 +550,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
percpu_ref_exit(&ns->ref);
mutex_lock(&subsys->lock);
+
subsys->nr_namespaces--;
nvmet_ns_changed(subsys, ns->nsid);
nvmet_ns_dev_disable(ns);
@@ -725,6 +826,51 @@ void nvmet_req_execute(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_execute);
+int nvmet_req_alloc_sgl(struct nvmet_req *req)
+{
+ struct pci_dev *p2p_dev = NULL;
+
+ if (IS_ENABLED(CONFIG_PCI_P2PDMA)) {
+ if (req->sq->ctrl && req->ns)
+ p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
+ req->ns->nsid);
+
+ req->p2p_dev = NULL;
+ if (req->sq->qid && p2p_dev) {
+ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
+ req->transfer_len);
+ if (req->sg) {
+ req->p2p_dev = p2p_dev;
+ return 0;
+ }
+ }
+
+ /*
+ * If no P2P memory was available we fallback to using
+ * regular memory
+ */
+ }
+
+ req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
+ if (!req->sg)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl);
+
+void nvmet_req_free_sgl(struct nvmet_req *req)
+{
+ if (req->p2p_dev)
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+ else
+ sgl_free(req->sg);
+
+ req->sg = NULL;
+ req->sg_cnt = 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_free_sgl);
+
static inline bool nvmet_cc_en(u32 cc)
{
return (cc >> NVME_CC_EN_SHIFT) & 0x1;
@@ -921,6 +1067,37 @@ bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
return __nvmet_host_allowed(subsys, hostnqn);
}
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
+ struct nvmet_req *req)
+{
+ struct nvmet_ns *ns;
+
+ if (!req->p2p_client)
+ return;
+
+ ctrl->p2p_client = get_device(req->p2p_client);
+
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
+}
+
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
+ pci_dev_put(radix_tree_deref_slot(slot));
+
+ put_device(ctrl->p2p_client);
+}
+
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
{
@@ -962,6 +1139,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
+ INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@@ -1026,6 +1204,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
mutex_lock(&subsys->lock);
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
+ nvmet_setup_p2p_ns_map(ctrl, req);
mutex_unlock(&subsys->lock);
*ctrlp = ctrl;
@@ -1053,6 +1232,7 @@ static void nvmet_ctrl_free(struct kref *ref)
struct nvmet_subsys *subsys = ctrl->subsys;
mutex_lock(&subsys->lock);
+ nvmet_release_p2p_ns_map(ctrl);
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
@@ -1105,8 +1285,7 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
if (!port)
return NULL;
- if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
- NVMF_NQN_SIZE)) {
+ if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
return NULL;
return nvmet_disc_subsys;
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index eae29f493a07..bc0aa0bf1543 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -174,7 +174,7 @@ static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
- strcpy(id->subnqn, ctrl->subsys->subsysnqn);
+ strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
@@ -219,12 +219,10 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
default:
- pr_err("unsupported cmd %d\n", cmd->common.opcode);
+ pr_err("unhandled cmd %d\n", cmd->common.opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
- pr_err("unhandled cmd %d\n", cmd->common.opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
int __init nvmet_init_discovery(void)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 29b4b236afd8..409081a03b24 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -110,11 +110,19 @@ struct nvmet_fc_tgtport {
struct list_head ls_busylist;
struct list_head assoc_list;
struct ida assoc_cnt;
- struct nvmet_port *port;
+ struct nvmet_fc_port_entry *pe;
struct kref ref;
u32 max_sg_cnt;
};
+struct nvmet_fc_port_entry {
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_port *port;
+ u64 node_name;
+ u64 port_name;
+ struct list_head pe_list;
+};
+
struct nvmet_fc_defer_fcp_req {
struct list_head req_list;
struct nvmefc_tgt_fcp_req *fcp_req;
@@ -132,7 +140,6 @@ struct nvmet_fc_tgt_queue {
atomic_t zrspcnt;
atomic_t rsn;
spinlock_t qlock;
- struct nvmet_port *port;
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
struct nvmet_fc_tgt_assoc *assoc;
@@ -221,6 +228,7 @@ static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
static LIST_HEAD(nvmet_fc_target_list);
static DEFINE_IDA(nvmet_fc_tgtport_cnt);
+static LIST_HEAD(nvmet_fc_portentry_list);
static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
@@ -645,7 +653,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
queue->qid = qid;
queue->sqsize = sqsize;
queue->assoc = assoc;
- queue->port = assoc->tgtport->port;
queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
INIT_LIST_HEAD(&queue->fod_list);
INIT_LIST_HEAD(&queue->avail_defer_list);
@@ -957,6 +964,83 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
return ret;
}
+static void
+nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_port_entry *pe,
+ struct nvmet_port *port)
+{
+ lockdep_assert_held(&nvmet_fc_tgtlock);
+
+ pe->tgtport = tgtport;
+ tgtport->pe = pe;
+
+ pe->port = port;
+ port->priv = pe;
+
+ pe->node_name = tgtport->fc_target_port.node_name;
+ pe->port_name = tgtport->fc_target_port.port_name;
+ INIT_LIST_HEAD(&pe->pe_list);
+
+ list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
+}
+
+static void
+nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ if (pe->tgtport)
+ pe->tgtport->pe = NULL;
+ list_del(&pe->pe_list);
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/*
+ * called when a targetport deregisters. Breaks the relationship
+ * with the nvmet port, but leaves the port_entry in place so that
+ * re-registration can resume operation.
+ */
+static void
+nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_port_entry *pe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ pe = tgtport->pe;
+ if (pe)
+ pe->tgtport = NULL;
+ tgtport->pe = NULL;
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/*
+ * called when a new targetport is registered. Looks in the
+ * existing nvmet port_entries to see if the nvmet layer is
+ * configured for the targetport's wwn's. (the targetport existed,
+ * nvmet configured, the lldd unregistered the tgtport, and is now
+ * reregistering the same targetport). If so, set the nvmet port
+ * port entry on the targetport.
+ */
+static void
+nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_port_entry *pe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
+ if (tgtport->fc_target_port.node_name == pe->node_name &&
+ tgtport->fc_target_port.port_name == pe->port_name) {
+ WARN_ON(pe->tgtport);
+ tgtport->pe = pe;
+ pe->tgtport = tgtport;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
/**
* nvme_fc_register_targetport - transport entry point called by an
@@ -1034,6 +1118,8 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
goto out_free_newrec;
}
+ nvmet_fc_portentry_rebind_tgt(newrec);
+
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
@@ -1159,8 +1245,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
* nvme_fc_unregister_targetport - transport entry point called by an
* LLDD to deregister/remove a previously
* registered a local NVME subsystem FC port.
- * @tgtport: pointer to the (registered) target port that is to be
- * deregistered.
+ * @target_port: pointer to the (registered) target port that is to be
+ * deregistered.
*
* Returns:
* a completion status. Must be 0 upon success; a negative errno
@@ -1171,6 +1257,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
{
struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ nvmet_fc_portentry_unbind_tgt(tgtport);
+
/* terminate any outstanding associations */
__nvmet_fc_free_assocs(tgtport);
@@ -1661,7 +1749,7 @@ nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
*
* If this routine returns error, the LLDD should abort the exchange.
*
- * @tgtport: pointer to the (registered) target port the LS was
+ * @target_port: pointer to the (registered) target port the LS was
* received on.
* @lsreq: pointer to a lsreq request structure to be used to reference
* the exchange corresponding to the LS.
@@ -2147,7 +2235,7 @@ nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
/*
- * Actual processing routine for received FC-NVME LS Requests from the LLD
+ * Actual processing routine for received FC-NVME I/O Requests from the LLD
*/
static void
nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
@@ -2158,6 +2246,13 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
int ret;
/*
+ * if there is no nvmet mapping to the targetport there
+ * shouldn't be requests. just terminate them.
+ */
+ if (!tgtport->pe)
+ goto transport_error;
+
+ /*
* Fused commands are currently not supported in the linux
* implementation.
*
@@ -2184,7 +2279,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.rsp = &fod->rspiubuf.cqe;
- fod->req.port = fod->queue->port;
+ fod->req.port = tgtport->pe->port;
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
@@ -2468,7 +2563,7 @@ nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
substring_t wwn = { name, &name[sizeof(name)-1] };
int nnoffset, pnoffset;
- /* validate it string one of the 2 allowed formats */
+ /* validate if string is one of the 2 allowed formats */
if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
@@ -2508,6 +2603,7 @@ static int
nvmet_fc_add_port(struct nvmet_port *port)
{
struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_port_entry *pe;
struct nvmet_fc_traddr traddr = { 0L, 0L };
unsigned long flags;
int ret;
@@ -2524,24 +2620,40 @@ nvmet_fc_add_port(struct nvmet_port *port)
if (ret)
return ret;
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
ret = -ENXIO;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
if ((tgtport->fc_target_port.node_name == traddr.nn) &&
(tgtport->fc_target_port.port_name == traddr.pn)) {
- tgtport->port = port;
- ret = 0;
+ /* a FC port can only be 1 nvmet port id */
+ if (!tgtport->pe) {
+ nvmet_fc_portentry_bind(tgtport, pe, port);
+ ret = 0;
+ } else
+ ret = -EALREADY;
break;
}
}
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ if (ret)
+ kfree(pe);
+
return ret;
}
static void
nvmet_fc_remove_port(struct nvmet_port *port)
{
- /* nothing to do */
+ struct nvmet_fc_port_entry *pe = port->priv;
+
+ nvmet_fc_portentry_unbind(pe);
+
+ kfree(pe);
}
static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 5251689a1d9a..291f4121f516 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -648,6 +648,7 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
break;
/* Fall-Thru to RSP handling */
+ /* FALLTHRU */
case NVMET_FCOP_RSP:
if (fcpreq) {
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 7bc9f6240432..c1ec3475a140 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -58,7 +58,7 @@ static void nvmet_bio_done(struct bio *bio)
static void nvmet_bdev_execute_rw(struct nvmet_req *req)
{
int sg_cnt = req->sg_cnt;
- struct bio *bio = &req->b.inline_bio;
+ struct bio *bio;
struct scatterlist *sg;
sector_t sector;
blk_qc_t cookie;
@@ -78,10 +78,18 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
op = REQ_OP_READ;
}
+ if (is_pci_p2pdma_page(sg_page(req->sg)))
+ op_flags |= REQ_NOMERGE;
+
sector = le64_to_cpu(req->cmd->rw.slba);
sector <<= (req->ns->blksize_shift - 9);
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ bio = &req->b.inline_bio;
+ bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ } else {
+ bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ }
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 81a9dc5290a8..39d972e2595f 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -246,7 +246,8 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
break;
offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
- len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
+ len = le32_to_cpu(range.nlb);
+ len <<= req->ns->blksize_shift;
if (offset + len > req->ns->size) {
ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
break;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index ec9af4ee03b6..c2b4d9ee6391 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -26,6 +26,7 @@
#include <linux/configfs.h>
#include <linux/rcupdate.h>
#include <linux/blkdev.h>
+#include <linux/radix-tree.h>
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
@@ -77,6 +78,9 @@ struct nvmet_ns {
struct completion disable_done;
mempool_t *bvec_pool;
struct kmem_cache *bvec_cache;
+
+ int use_p2pmem;
+ struct pci_dev *p2p_dev;
};
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
@@ -84,6 +88,11 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
return container_of(to_config_group(item), struct nvmet_ns, group);
}
+static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
+{
+ return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
+}
+
struct nvmet_cq {
u16 qid;
u16 size;
@@ -184,6 +193,9 @@ struct nvmet_ctrl {
char subsysnqn[NVMF_NQN_FIELD_LEN];
char hostnqn[NVMF_NQN_FIELD_LEN];
+
+ struct device *p2p_client;
+ struct radix_tree_root p2p_ns_map;
};
struct nvmet_subsys {
@@ -264,6 +276,7 @@ struct nvmet_fabrics_ops {
};
#define NVMET_MAX_INLINE_BIOVEC 8
+#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
struct nvmet_req {
struct nvme_command *cmd;
@@ -294,6 +307,9 @@ struct nvmet_req {
void (*execute)(struct nvmet_req *req);
const struct nvmet_fabrics_ops *ops;
+
+ struct pci_dev *p2p_dev;
+ struct device *p2p_client;
};
extern struct workqueue_struct *buffered_io_wq;
@@ -336,6 +352,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
void nvmet_req_uninit(struct nvmet_req *req);
void nvmet_req_execute(struct nvmet_req *req);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
+int nvmet_req_alloc_sgl(struct nvmet_req *req);
+void nvmet_req_free_sgl(struct nvmet_req *req);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
u16 size);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index bfc4da660bb4..ddce100be57a 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -122,6 +122,7 @@ struct nvmet_rdma_device {
int inline_page_count;
};
+static struct workqueue_struct *nvmet_rdma_delete_wq;
static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
@@ -503,7 +504,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
}
if (rsp->req.sg != rsp->cmd->inline_sg)
- sgl_free(rsp->req.sg);
+ nvmet_req_free_sgl(&rsp->req);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
nvmet_rdma_process_wr_wait_list(queue);
@@ -652,24 +653,24 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
{
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
u64 addr = le64_to_cpu(sgl->addr);
- u32 len = get_unaligned_le24(sgl->length);
u32 key = get_unaligned_le32(sgl->key);
int ret;
+ rsp->req.transfer_len = get_unaligned_le24(sgl->length);
+
/* no data command? */
- if (!len)
+ if (!rsp->req.transfer_len)
return 0;
- rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
- if (!rsp->req.sg)
- return NVME_SC_INTERNAL;
+ ret = nvmet_req_alloc_sgl(&rsp->req);
+ if (ret < 0)
+ goto error_out;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
nvmet_data_dir(&rsp->req));
if (ret < 0)
- return NVME_SC_INTERNAL;
- rsp->req.transfer_len += len;
+ goto error_out;
rsp->n_rdma += ret;
if (invalidate) {
@@ -678,6 +679,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
}
return 0;
+
+error_out:
+ rsp->req.transfer_len = 0;
+ return NVME_SC_INTERNAL;
}
static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
@@ -745,6 +750,8 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
cmd->send_sge.addr, cmd->send_sge.length,
DMA_TO_DEVICE);
+ cmd->req.p2p_client = &queue->dev->device->dev;
+
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_rdma_ops))
return;
@@ -1267,12 +1274,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_rdma_delete_wq);
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) {
- schedule_work(&queue->release_work);
+ queue_work(nvmet_rdma_delete_wq, &queue->release_work);
/* Destroying rdma_cm id is not needed here */
return 0;
}
@@ -1337,7 +1344,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) {
rdma_disconnect(queue->cm_id);
- schedule_work(&queue->release_work);
+ queue_work(nvmet_rdma_delete_wq, &queue->release_work);
}
}
@@ -1367,7 +1374,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx);
- schedule_work(&queue->release_work);
+ queue_work(nvmet_rdma_delete_wq, &queue->release_work);
}
/**
@@ -1649,8 +1656,17 @@ static int __init nvmet_rdma_init(void)
if (ret)
goto err_ib_client;
+ nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvmet_rdma_delete_wq) {
+ ret = -ENOMEM;
+ goto err_unreg_transport;
+ }
+
return 0;
+err_unreg_transport:
+ nvmet_unregister_transport(&nvmet_rdma_ops);
err_ib_client:
ib_unregister_client(&nvmet_rdma_ib_client);
return ret;
@@ -1658,6 +1674,7 @@ err_ib_client:
static void __exit nvmet_rdma_exit(void)
{
+ destroy_workqueue(nvmet_rdma_delete_wq);
nvmet_unregister_transport(&nvmet_rdma_ops);
ib_unregister_client(&nvmet_rdma_ib_client);
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 5957cd4fa262..c7fa5a9697c9 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -170,18 +170,6 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
}
EXPORT_SYMBOL_GPL(of_dma_configure);
-/**
- * of_dma_deconfigure - Clean up DMA configuration
- * @dev: Device for which to clean up DMA configuration
- *
- * Clean up all configuration performed by of_dma_configure_ops() and free all
- * resources that have been allocated.
- */
-void of_dma_deconfigure(struct device *dev)
-{
- arch_teardown_dma_ops(dev);
-}
-
int of_device_register(struct platform_device *pdev)
{
device_initialize(&pdev->dev);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 722537e14848..41b49716ac75 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -771,6 +771,9 @@ static void __init of_unittest_parse_interrupts(void)
struct of_phandle_args args;
int i, rc;
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
if (!np) {
pr_err("missing testcase data\n");
@@ -845,6 +848,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
struct of_phandle_args args;
int i, rc;
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
if (!np) {
pr_err("missing testcase data\n");
@@ -1001,15 +1007,19 @@ static void __init of_unittest_platform_populate(void)
pdev = of_find_device_by_node(np);
unittest(pdev, "device 1 creation failed\n");
- irq = platform_get_irq(pdev, 0);
- unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
+ if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+ irq = platform_get_irq(pdev, 0);
+ unittest(irq == -EPROBE_DEFER,
+ "device deferred probe failed - %d\n", irq);
- /* Test that a parsing failure does not return -EPROBE_DEFER */
- np = of_find_node_by_path("/testcase-data/testcase-device2");
- pdev = of_find_device_by_node(np);
- unittest(pdev, "device 2 creation failed\n");
- irq = platform_get_irq(pdev, 0);
- unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+ /* Test that a parsing failure does not return -EPROBE_DEFER */
+ np = of_find_node_by_path("/testcase-data/testcase-device2");
+ pdev = of_find_device_by_node(np);
+ unittest(pdev, "device 2 creation failed\n");
+ irq = platform_get_irq(pdev, 0);
+ unittest(irq < 0 && irq != -EPROBE_DEFER,
+ "device parsing error failed - %d\n", irq);
+ }
np = of_find_node_by_path("/testcase-data/platform-tests");
unittest(np, "No testcase data in device tree\n");
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 31ff03dbeb83..2c2df4e4fc14 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -48,9 +48,14 @@ static struct opp_device *_find_opp_dev(const struct device *dev,
static struct opp_table *_find_opp_table_unlocked(struct device *dev)
{
struct opp_table *opp_table;
+ bool found;
list_for_each_entry(opp_table, &opp_tables, node) {
- if (_find_opp_dev(dev, opp_table)) {
+ mutex_lock(&opp_table->lock);
+ found = !!_find_opp_dev(dev, opp_table);
+ mutex_unlock(&opp_table->lock);
+
+ if (found) {
_get_opp_table_kref(opp_table);
return opp_table;
@@ -313,7 +318,7 @@ int dev_pm_opp_get_opp_count(struct device *dev)
count = PTR_ERR(opp_table);
dev_dbg(dev, "%s: OPP table not found (%d)\n",
__func__, count);
- return 0;
+ return count;
}
count = _get_opp_count(opp_table);
@@ -754,8 +759,8 @@ static void _remove_opp_dev(struct opp_device *opp_dev,
kfree(opp_dev);
}
-struct opp_device *_add_opp_dev(const struct device *dev,
- struct opp_table *opp_table)
+static struct opp_device *_add_opp_dev_unlocked(const struct device *dev,
+ struct opp_table *opp_table)
{
struct opp_device *opp_dev;
int ret;
@@ -766,6 +771,7 @@ struct opp_device *_add_opp_dev(const struct device *dev,
/* Initialize opp-dev */
opp_dev->dev = dev;
+
list_add(&opp_dev->node, &opp_table->dev_list);
/* Create debugfs entries for the opp_table */
@@ -777,7 +783,19 @@ struct opp_device *_add_opp_dev(const struct device *dev,
return opp_dev;
}
-static struct opp_table *_allocate_opp_table(struct device *dev)
+struct opp_device *_add_opp_dev(const struct device *dev,
+ struct opp_table *opp_table)
+{
+ struct opp_device *opp_dev;
+
+ mutex_lock(&opp_table->lock);
+ opp_dev = _add_opp_dev_unlocked(dev, opp_table);
+ mutex_unlock(&opp_table->lock);
+
+ return opp_dev;
+}
+
+static struct opp_table *_allocate_opp_table(struct device *dev, int index)
{
struct opp_table *opp_table;
struct opp_device *opp_dev;
@@ -791,6 +809,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev)
if (!opp_table)
return NULL;
+ mutex_init(&opp_table->lock);
INIT_LIST_HEAD(&opp_table->dev_list);
opp_dev = _add_opp_dev(dev, opp_table);
@@ -799,7 +818,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev)
return NULL;
}
- _of_init_opp_table(opp_table, dev);
+ _of_init_opp_table(opp_table, dev, index);
/* Find clk for the device */
opp_table->clk = clk_get(dev, NULL);
@@ -812,7 +831,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev)
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
INIT_LIST_HEAD(&opp_table->opp_list);
- mutex_init(&opp_table->lock);
kref_init(&opp_table->kref);
/* Secure the device table modification */
@@ -825,7 +843,7 @@ void _get_opp_table_kref(struct opp_table *opp_table)
kref_get(&opp_table->kref);
}
-struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
+static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
{
struct opp_table *opp_table;
@@ -836,31 +854,56 @@ struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
if (!IS_ERR(opp_table))
goto unlock;
- opp_table = _allocate_opp_table(dev);
+ opp_table = _managed_opp(dev, index);
+ if (opp_table) {
+ if (!_add_opp_dev_unlocked(dev, opp_table)) {
+ dev_pm_opp_put_opp_table(opp_table);
+ opp_table = NULL;
+ }
+ goto unlock;
+ }
+
+ opp_table = _allocate_opp_table(dev, index);
unlock:
mutex_unlock(&opp_table_lock);
return opp_table;
}
+
+struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
+{
+ return _opp_get_opp_table(dev, 0);
+}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table);
+struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev,
+ int index)
+{
+ return _opp_get_opp_table(dev, index);
+}
+
static void _opp_table_kref_release(struct kref *kref)
{
struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
- struct opp_device *opp_dev;
+ struct opp_device *opp_dev, *temp;
/* Release clk */
if (!IS_ERR(opp_table->clk))
clk_put(opp_table->clk);
- opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
- node);
+ WARN_ON(!list_empty(&opp_table->opp_list));
- _remove_opp_dev(opp_dev, opp_table);
+ list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
+ /*
+ * The OPP table is getting removed, drop the performance state
+ * constraints.
+ */
+ if (opp_table->genpd_performance_state)
+ dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0);
- /* dev_list must be empty now */
- WARN_ON(!list_empty(&opp_table->dev_list));
+ _remove_opp_dev(opp_dev, opp_table);
+ }
mutex_destroy(&opp_table->lock);
list_del(&opp_table->node);
@@ -869,6 +912,33 @@ static void _opp_table_kref_release(struct kref *kref)
mutex_unlock(&opp_table_lock);
}
+void _opp_remove_all_static(struct opp_table *opp_table)
+{
+ struct dev_pm_opp *opp, *tmp;
+
+ list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
+ if (!opp->dynamic)
+ dev_pm_opp_put(opp);
+ }
+
+ opp_table->parsed_static_opps = false;
+}
+
+static void _opp_table_list_kref_release(struct kref *kref)
+{
+ struct opp_table *opp_table = container_of(kref, struct opp_table,
+ list_kref);
+
+ _opp_remove_all_static(opp_table);
+ mutex_unlock(&opp_table_lock);
+}
+
+void _put_opp_list_kref(struct opp_table *opp_table)
+{
+ kref_put_mutex(&opp_table->list_kref, _opp_table_list_kref_release,
+ &opp_table_lock);
+}
+
void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
{
kref_put_mutex(&opp_table->kref, _opp_table_kref_release,
@@ -896,7 +966,6 @@ static void _opp_kref_release(struct kref *kref)
kfree(opp);
mutex_unlock(&opp_table->lock);
- dev_pm_opp_put_opp_table(opp_table);
}
void dev_pm_opp_get(struct dev_pm_opp *opp)
@@ -940,11 +1009,15 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
if (found) {
dev_pm_opp_put(opp);
+
+ /* Drop the reference taken by dev_pm_opp_add() */
+ dev_pm_opp_put_opp_table(opp_table);
} else {
dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
__func__, freq);
}
+ /* Drop the reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
@@ -1062,9 +1135,6 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
new_opp->opp_table = opp_table;
kref_init(&new_opp->kref);
- /* Get a reference to the OPP table */
- _get_opp_table_kref(opp_table);
-
ret = opp_debug_create_one(new_opp, opp_table);
if (ret)
dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
@@ -1543,8 +1613,9 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
return -ENOMEM;
ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
+ if (ret)
+ dev_pm_opp_put_opp_table(opp_table);
- dev_pm_opp_put_opp_table(opp_table);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_add);
@@ -1707,35 +1778,7 @@ int dev_pm_opp_unregister_notifier(struct device *dev,
}
EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
-/*
- * Free OPPs either created using static entries present in DT or even the
- * dynamically added entries based on remove_all param.
- */
-void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
- bool remove_all)
-{
- struct dev_pm_opp *opp, *tmp;
-
- /* Find if opp_table manages a single device */
- if (list_is_singular(&opp_table->dev_list)) {
- /* Free static OPPs */
- list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
- if (remove_all || !opp->dynamic)
- dev_pm_opp_put(opp);
- }
-
- /*
- * The OPP table is getting removed, drop the performance state
- * constraints.
- */
- if (opp_table->genpd_performance_state)
- dev_pm_genpd_set_performance_state(dev, 0);
- } else {
- _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
- }
-}
-
-void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
+void _dev_pm_opp_find_and_remove_table(struct device *dev)
{
struct opp_table *opp_table;
@@ -1752,8 +1795,12 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
return;
}
- _dev_pm_opp_remove_table(opp_table, dev, remove_all);
+ _put_opp_list_kref(opp_table);
+
+ /* Drop reference taken by _find_opp_table() */
+ dev_pm_opp_put_opp_table(opp_table);
+ /* Drop reference taken while the OPP table was added */
dev_pm_opp_put_opp_table(opp_table);
}
@@ -1766,6 +1813,6 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
*/
void dev_pm_opp_remove_table(struct device *dev)
{
- _dev_pm_opp_find_and_remove_table(dev, true);
+ _dev_pm_opp_find_and_remove_table(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
index 0c0910709435..ab6d07e78945 100644
--- a/drivers/opp/cpu.c
+++ b/drivers/opp/cpu.c
@@ -108,7 +108,8 @@ void dev_pm_opp_free_cpufreq_table(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
-void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
+void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask,
+ int last_cpu)
{
struct device *cpu_dev;
int cpu;
@@ -116,6 +117,9 @@ void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
WARN_ON(cpumask_empty(cpumask));
for_each_cpu(cpu, cpumask) {
+ if (cpu == last_cpu)
+ break;
+
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
@@ -123,10 +127,7 @@ void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
continue;
}
- if (of)
- dev_pm_opp_of_remove_table(cpu_dev);
- else
- dev_pm_opp_remove_table(cpu_dev);
+ _dev_pm_opp_find_and_remove_table(cpu_dev);
}
}
@@ -140,7 +141,7 @@ void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
*/
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
{
- _dev_pm_opp_cpumask_remove_table(cpumask, false);
+ _dev_pm_opp_cpumask_remove_table(cpumask, -1);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
@@ -222,8 +223,10 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
cpumask_clear(cpumask);
if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
+ mutex_lock(&opp_table->lock);
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
cpumask_set_cpu(opp_dev->dev->id, cpumask);
+ mutex_unlock(&opp_table->lock);
} else {
cpumask_set_cpu(cpu_dev->id, cpumask);
}
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 7af0ddec936b..5a4b47958073 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -23,11 +23,32 @@
#include "opp.h"
-static struct opp_table *_managed_opp(const struct device_node *np)
+/*
+ * Returns opp descriptor node for a device node, caller must
+ * do of_node_put().
+ */
+static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
+ int index)
+{
+ /* "operating-points-v2" can be an array for power domain providers */
+ return of_parse_phandle(np, "operating-points-v2", index);
+}
+
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
+struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
+{
+ return _opp_of_get_opp_desc_node(dev->of_node, 0);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
+
+struct opp_table *_managed_opp(struct device *dev, int index)
{
struct opp_table *opp_table, *managed_table = NULL;
+ struct device_node *np;
- mutex_lock(&opp_table_lock);
+ np = _opp_of_get_opp_desc_node(dev->of_node, index);
+ if (!np)
+ return NULL;
list_for_each_entry(opp_table, &opp_tables, node) {
if (opp_table->np == np) {
@@ -47,29 +68,45 @@ static struct opp_table *_managed_opp(const struct device_node *np)
}
}
- mutex_unlock(&opp_table_lock);
+ of_node_put(np);
return managed_table;
}
-void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
+ int index)
{
- struct device_node *np;
+ struct device_node *np, *opp_np;
+ u32 val;
/*
* Only required for backward compatibility with v1 bindings, but isn't
* harmful for other cases. And so we do it unconditionally.
*/
np = of_node_get(dev->of_node);
- if (np) {
- u32 val;
-
- if (!of_property_read_u32(np, "clock-latency", &val))
- opp_table->clock_latency_ns_max = val;
- of_property_read_u32(np, "voltage-tolerance",
- &opp_table->voltage_tolerance_v1);
- of_node_put(np);
- }
+ if (!np)
+ return;
+
+ if (!of_property_read_u32(np, "clock-latency", &val))
+ opp_table->clock_latency_ns_max = val;
+ of_property_read_u32(np, "voltage-tolerance",
+ &opp_table->voltage_tolerance_v1);
+
+ /* Get OPP table node */
+ opp_np = _opp_of_get_opp_desc_node(np, index);
+ of_node_put(np);
+
+ if (!opp_np)
+ return;
+
+ if (of_property_read_bool(opp_np, "opp-shared"))
+ opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
+ else
+ opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
+
+ opp_table->np = opp_np;
+
+ of_node_put(opp_np);
}
static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
@@ -245,26 +282,10 @@ free_microvolt:
*/
void dev_pm_opp_of_remove_table(struct device *dev)
{
- _dev_pm_opp_find_and_remove_table(dev, false);
+ _dev_pm_opp_find_and_remove_table(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-/* Returns opp descriptor node for a device node, caller must
- * do of_node_put() */
-static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
- int index)
-{
- /* "operating-points-v2" can be an array for power domain providers */
- return of_parse_phandle(np, "operating-points-v2", index);
-}
-
-/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
-{
- return _opp_of_get_opp_desc_node(dev->of_node, 0);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
-
/**
* _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
* @opp_table: OPP table
@@ -276,15 +297,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
* removed by dev_pm_opp_remove.
*
* Return:
- * 0 On success OR
+ * Valid OPP pointer:
+ * On success
+ * NULL:
* Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
+ * OR if the OPP is not supported by hardware.
+ * ERR_PTR(-EEXIST):
+ * Freq are same and volt are different OR
* Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- * -EINVAL Failed parsing the OPP node
+ * ERR_PTR(-ENOMEM):
+ * Memory allocation failure
+ * ERR_PTR(-EINVAL):
+ * Failed parsing the OPP node
*/
-static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
- struct device_node *np)
+static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
+ struct device *dev, struct device_node *np)
{
struct dev_pm_opp *new_opp;
u64 rate = 0;
@@ -294,7 +321,7 @@ static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
new_opp = _opp_allocate(opp_table);
if (!new_opp)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
ret = of_property_read_u64(np, "opp-hz", &rate);
if (ret < 0) {
@@ -369,52 +396,47 @@ static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
* frequency/voltage list.
*/
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
- return 0;
+ return new_opp;
free_opp:
_opp_free(new_opp);
- return ret;
+ return ERR_PTR(ret);
}
/* Initializes OPP tables based on new bindings */
-static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
+static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
{
struct device_node *np;
- struct opp_table *opp_table;
- int ret = 0, count = 0, pstate_count = 0;
+ int ret, count = 0, pstate_count = 0;
struct dev_pm_opp *opp;
- opp_table = _managed_opp(opp_np);
- if (opp_table) {
- /* OPPs are already managed */
- if (!_add_opp_dev(dev, opp_table))
- ret = -ENOMEM;
- goto put_opp_table;
+ /* OPP table is already initialized for the device */
+ if (opp_table->parsed_static_opps) {
+ kref_get(&opp_table->list_kref);
+ return 0;
}
- opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return -ENOMEM;
+ kref_init(&opp_table->list_kref);
/* We have opp-table node now, iterate over it and add OPPs */
- for_each_available_child_of_node(opp_np, np) {
- count++;
-
- ret = _opp_add_static_v2(opp_table, dev, np);
- if (ret) {
+ for_each_available_child_of_node(opp_table->np, np) {
+ opp = _opp_add_static_v2(opp_table, dev, np);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
ret);
- _dev_pm_opp_remove_table(opp_table, dev, false);
of_node_put(np);
- goto put_opp_table;
+ goto put_list_kref;
+ } else if (opp) {
+ count++;
}
}
/* There should be one of more OPP defined */
if (WARN_ON(!count)) {
ret = -ENOENT;
- goto put_opp_table;
+ goto put_list_kref;
}
list_for_each_entry(opp, &opp_table->opp_list, node)
@@ -425,28 +447,25 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
count, pstate_count);
ret = -ENOENT;
- goto put_opp_table;
+ goto put_list_kref;
}
if (pstate_count)
opp_table->genpd_performance_state = true;
- opp_table->np = opp_np;
- if (of_property_read_bool(opp_np, "opp-shared"))
- opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
- else
- opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
+ opp_table->parsed_static_opps = true;
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
+ return 0;
+
+put_list_kref:
+ _put_opp_list_kref(opp_table);
return ret;
}
/* Initializes OPP tables based on old-deprecated bindings */
-static int _of_add_opp_table_v1(struct device *dev)
+static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
{
- struct opp_table *opp_table;
const struct property *prop;
const __be32 *val;
int nr, ret = 0;
@@ -467,9 +486,7 @@ static int _of_add_opp_table_v1(struct device *dev)
return -EINVAL;
}
- opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return -ENOMEM;
+ kref_init(&opp_table->list_kref);
val = prop->value;
while (nr) {
@@ -480,13 +497,12 @@ static int _of_add_opp_table_v1(struct device *dev)
if (ret) {
dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
__func__, freq, ret);
- _dev_pm_opp_remove_table(opp_table, dev, false);
- break;
+ _put_opp_list_kref(opp_table);
+ return ret;
}
nr -= 2;
}
- dev_pm_opp_put_opp_table(opp_table);
return ret;
}
@@ -509,24 +525,24 @@ static int _of_add_opp_table_v1(struct device *dev)
*/
int dev_pm_opp_of_add_table(struct device *dev)
{
- struct device_node *opp_np;
+ struct opp_table *opp_table;
int ret;
+ opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0);
+ if (!opp_table)
+ return -ENOMEM;
+
/*
- * OPPs have two version of bindings now. The older one is deprecated,
- * try for the new binding first.
+ * OPPs have two version of bindings now. Also try the old (v1)
+ * bindings for backward compatibility with older dtbs.
*/
- opp_np = dev_pm_opp_of_get_opp_desc_node(dev);
- if (!opp_np) {
- /*
- * Try old-deprecated bindings for backward compatibility with
- * older dtbs.
- */
- return _of_add_opp_table_v1(dev);
- }
+ if (opp_table->np)
+ ret = _of_add_opp_table_v2(dev, opp_table);
+ else
+ ret = _of_add_opp_table_v1(dev, opp_table);
- ret = _of_add_opp_table_v2(dev, opp_np);
- of_node_put(opp_np);
+ if (ret)
+ dev_pm_opp_put_opp_table(opp_table);
return ret;
}
@@ -553,28 +569,29 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
*/
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- struct device_node *opp_np;
+ struct opp_table *opp_table;
int ret, count;
-again:
- opp_np = _opp_of_get_opp_desc_node(dev->of_node, index);
- if (!opp_np) {
+ if (index) {
/*
* If only one phandle is present, then the same OPP table
* applies for all index requests.
*/
count = of_count_phandle_with_args(dev->of_node,
"operating-points-v2", NULL);
- if (count == 1 && index) {
- index = 0;
- goto again;
- }
+ if (count != 1)
+ return -ENODEV;
- return -ENODEV;
+ index = 0;
}
- ret = _of_add_opp_table_v2(dev, opp_np);
- of_node_put(opp_np);
+ opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
+ if (!opp_table)
+ return -ENOMEM;
+
+ ret = _of_add_opp_table_v2(dev, opp_table);
+ if (ret)
+ dev_pm_opp_put_opp_table(opp_table);
return ret;
}
@@ -591,7 +608,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
*/
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
{
- _dev_pm_opp_cpumask_remove_table(cpumask, true);
+ _dev_pm_opp_cpumask_remove_table(cpumask, -1);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
@@ -604,16 +621,18 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
{
struct device *cpu_dev;
- int cpu, ret = 0;
+ int cpu, ret;
- WARN_ON(cpumask_empty(cpumask));
+ if (WARN_ON(cpumask_empty(cpumask)))
+ return -ENODEV;
for_each_cpu(cpu, cpumask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
cpu);
- continue;
+ ret = -ENODEV;
+ goto remove_table;
}
ret = dev_pm_opp_of_add_table(cpu_dev);
@@ -625,12 +644,16 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
__func__, cpu, ret);
- /* Free all other OPPs */
- dev_pm_opp_of_cpumask_remove_table(cpumask);
- break;
+ goto remove_table;
}
}
+ return 0;
+
+remove_table:
+ /* Free all other OPPs */
+ _dev_pm_opp_cpumask_remove_table(cpumask, cpu);
+
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 7c540fd063b2..9c6544b4f4f9 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -126,9 +126,11 @@ enum opp_table_access {
* @dev_list: list of devices that share these OPPs
* @opp_list: table of opps
* @kref: for reference count of the table.
- * @lock: mutex protecting the opp_list.
+ * @list_kref: for reference count of the OPP list.
+ * @lock: mutex protecting the opp_list and dev_list.
* @np: struct device_node pointer for opp's DT node.
* @clock_latency_ns_max: Max clock latency in nanoseconds.
+ * @parsed_static_opps: True if OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
* @suspend_opp: Pointer to OPP to be used during device suspend.
* @supported_hw: Array of version number to support.
@@ -156,6 +158,7 @@ struct opp_table {
struct list_head dev_list;
struct list_head opp_list;
struct kref kref;
+ struct kref list_kref;
struct mutex lock;
struct device_node *np;
@@ -164,6 +167,7 @@ struct opp_table {
/* For backward compatibility with v1 bindings */
unsigned int voltage_tolerance_v1;
+ bool parsed_static_opps;
enum opp_table_access shared_opp;
struct dev_pm_opp *suspend_opp;
@@ -186,23 +190,26 @@ struct opp_table {
/* Routines internal to opp core */
void dev_pm_opp_get(struct dev_pm_opp *opp);
+void _opp_remove_all_static(struct opp_table *opp_table);
void _get_opp_table_kref(struct opp_table *opp_table);
int _get_opp_count(struct opp_table *opp_table);
struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
-void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, bool remove_all);
-void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all);
+void _dev_pm_opp_find_and_remove_table(struct device *dev);
struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
void _opp_free(struct dev_pm_opp *opp);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
-void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
+void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
struct opp_table *_add_opp_table(struct device *dev);
+void _put_opp_list_kref(struct opp_table *opp_table);
#ifdef CONFIG_OF
-void _of_init_opp_table(struct opp_table *opp_table, struct device *dev);
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index);
+struct opp_table *_managed_opp(struct device *dev, int index);
#else
-static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {}
+static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) {}
+static inline struct opp_table *_managed_opp(struct device *dev, int index) { return NULL; }
#endif
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/parisc/Makefile b/drivers/parisc/Makefile
index 3cd5e6cb8478..99fa6a89e0b9 100644
--- a/drivers/parisc/Makefile
+++ b/drivers/parisc/Makefile
@@ -8,9 +8,6 @@
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_IOMMU_SBA) += sba_iommu.o
obj-$(CONFIG_PCI_LBA) += lba_pci.o
-
-# Only use one of them: ccio-rm-dma is for PCX-W systems *only*
-# obj-$(CONFIG_IOMMU_CCIO) += ccio-rm-dma.o
obj-$(CONFIG_IOMMU_CCIO) += ccio-dma.o
obj-$(CONFIG_GSC) += gsc.o
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 614823617b8b..701a7d6a74d5 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -609,14 +609,13 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
** PCX-T'? Don't know. (eg C110 or similar K-class)
**
** See PDC_MODEL/option 0/SW_CAP word for "Non-coherent IO-PDIR bit".
- ** Hopefully we can patch (NOP) these out at boot time somehow.
**
** "Since PCX-U employs an offset hash that is incompatible with
** the real mode coherence index generation of U2, the PDIR entry
** must be flushed to memory to retain coherence."
*/
- asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
- asm volatile("sync");
+ asm_io_fdc(pdir_ptr);
+ asm_io_sync();
}
/**
@@ -682,17 +681,14 @@ ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
** FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
** PCX-U/U+ do. (eg C200/C240)
** See PDC_MODEL/option 0/SW_CAP for "Non-coherent IO-PDIR bit".
- **
- ** Hopefully someone figures out how to patch (NOP) the
- ** FDC/SYNC out at boot time.
*/
- asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr[7]));
+ asm_io_fdc(pdir_ptr);
iovp += IOVP_SIZE;
byte_cnt -= IOVP_SIZE;
}
- asm volatile("sync");
+ asm_io_sync();
ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
}
diff --git a/drivers/parisc/ccio-rm-dma.c b/drivers/parisc/ccio-rm-dma.c
deleted file mode 100644
index df7932af48b7..000000000000
--- a/drivers/parisc/ccio-rm-dma.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * ccio-rm-dma.c:
- * DMA management routines for first generation cache-coherent machines.
- * "Real Mode" operation refers to U2/Uturn chip operation. The chip
- * can perform coherency checks w/o using the I/O MMU. That's all we
- * need until support for more than 4GB phys mem is needed.
- *
- * This is the trivial case - basically what x86 does.
- *
- * Drawbacks of using Real Mode are:
- * o outbound DMA is slower since one isn't using the prefetching
- * U2 can do for outbound DMA.
- * o Ability to do scatter/gather in HW is also lost.
- * o only known to work with PCX-W processor. (eg C360)
- * (PCX-U/U+ are not coherent with U2 in real mode.)
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- *
- * Original version/author:
- * CVSROOT=:pserver:anonymous@198.186.203.37:/cvsroot/linux-parisc
- * cvs -z3 co linux/arch/parisc/kernel/dma-rm.c
- *
- * (C) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
- *
- *
- * Adopted for The Puffin Group's parisc-linux port by Grant Grundler.
- * (C) Copyright 2000 Grant Grundler <grundler@puffin.external.hp.com>
- *
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-
-#include <linux/uaccess.h>
-
-#include <asm/io.h>
-#include <asm/hardware.h>
-#include <asm/page.h>
-
-/* Only chose "ccio" since that's what HP-UX calls it....
-** Make it easier for folks to migrate from one to the other :^)
-*/
-#define MODULE_NAME "ccio"
-
-#define U2_IOA_RUNWAY 0x580
-#define U2_BC_GSC 0x501
-#define UTURN_IOA_RUNWAY 0x581
-#define UTURN_BC_GSC 0x502
-
-#define IS_U2(id) ( \
- (((id)->hw_type == HPHW_IOA) && ((id)->hversion == U2_IOA_RUNWAY)) || \
- (((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == U2_BC_GSC)) \
-)
-
-#define IS_UTURN(id) ( \
- (((id)->hw_type == HPHW_IOA) && ((id)->hversion == UTURN_IOA_RUNWAY)) || \
- (((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == UTURN_BC_GSC)) \
-)
-
-static int ccio_dma_supported( struct pci_dev *dev, u64 mask)
-{
- if (dev == NULL) {
- printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
- BUG();
- return(0);
- }
-
- /* only support 32-bit devices (ie PCI/GSC) */
- return((int) (mask >= 0xffffffffUL));
-}
-
-
-static void *ccio_alloc_consistent(struct pci_dev *dev, size_t size,
- dma_addr_t *handle)
-{
- void *ret;
-
- ret = (void *)__get_free_pages(GFP_ATOMIC, get_order(size));
-
- if (ret != NULL) {
- memset(ret, 0, size);
- *handle = virt_to_phys(ret);
- }
- return ret;
-}
-
-static void ccio_free_consistent(struct pci_dev *dev, size_t size,
- void *vaddr, dma_addr_t handle)
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static dma_addr_t ccio_map_single(struct pci_dev *dev, void *ptr, size_t size,
- int direction)
-{
- return virt_to_phys(ptr);
-}
-
-static void ccio_unmap_single(struct pci_dev *dev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- /* Nothing to do */
-}
-
-
-static int ccio_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
-{
- int tmp = nents;
-
- /* KISS: map each buffer separately. */
- while (nents) {
- sg_dma_address(sglist) = ccio_map_single(dev, sglist->address, sglist->length, direction);
- sg_dma_len(sglist) = sglist->length;
- nents--;
- sglist++;
- }
-
- return tmp;
-}
-
-
-static void ccio_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
-{
-#if 0
- while (nents) {
- ccio_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
- nents--;
- sglist++;
- }
- return;
-#else
- /* Do nothing (copied from current ccio_unmap_single() :^) */
-#endif
-}
-
-
-static struct pci_dma_ops ccio_ops = {
- ccio_dma_supported,
- ccio_alloc_consistent,
- ccio_free_consistent,
- ccio_map_single,
- ccio_unmap_single,
- ccio_map_sg,
- ccio_unmap_sg,
- NULL, /* dma_sync_single_for_cpu : NOP for U2 */
- NULL, /* dma_sync_single_for_device : NOP for U2 */
- NULL, /* dma_sync_sg_for_cpu : ditto */
- NULL, /* dma_sync_sg_for_device : ditto */
-};
-
-
-/*
-** Determine if u2 should claim this chip (return 0) or not (return 1).
-** If so, initialize the chip and tell other partners in crime they
-** have work to do.
-*/
-static int __init
-ccio_probe(struct parisc_device *dev)
-{
- printk(KERN_INFO "%s found %s at 0x%lx\n", MODULE_NAME,
- dev->id.hversion == U2_BC_GSC ? "U2" : "UTurn",
- dev->hpa.start);
-
-/*
-** FIXME - should check U2 registers to verify it's really running
-** in "Real Mode".
-*/
-
-#if 0
-/* will need this for "Virtual Mode" operation */
- ccio_hw_init(ccio_dev);
- ccio_common_init(ccio_dev);
-#endif
- hppa_dma_ops = &ccio_ops;
- return 0;
-}
-
-static const struct parisc_device_id ccio_tbl[] __initconst = {
- { HPHW_BCPORT, HVERSION_REV_ANY_ID, U2_BC_GSC, 0xc },
- { HPHW_BCPORT, HVERSION_REV_ANY_ID, UTURN_BC_GSC, 0xc },
- { 0, }
-};
-
-static struct parisc_driver ccio_driver __refdata = {
- .name = "U2/Uturn",
- .id_table = ccio_tbl,
- .probe = ccio_probe,
-};
-
-void __init ccio_init(void)
-{
- register_parisc_driver(&ccio_driver);
-}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 7390fb8ca9d1..dfeea458a789 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -382,7 +382,7 @@ ilr_again:
DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n",
__func__, irq, intr_dev, mask);
generic_handle_irq(irq);
- mask &= ~(1 << local_irq);
+ mask &= ~DINO_MASK_IRQ(local_irq);
} while (mask);
/* Support for level triggered IRQ lines.
@@ -396,9 +396,8 @@ ilr_again:
if (mask) {
if (--ilr_loop > 0)
goto ilr_again;
- printk(KERN_ERR "Dino 0x%px: stuck interrupt %d\n",
+ pr_warn_ratelimited("Dino 0x%px: stuck interrupt %d\n",
dino_dev->hba.base_addr, mask);
- return IRQ_NONE;
}
return IRQ_HANDLED;
}
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 11de0eccf968..c1e599a429af 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -587,8 +587,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
* (bit #61, big endian), we have to flush and sync every time
* IO-PDIR is changed in Ike/Astro.
*/
- if (ioc_needs_fdc)
- asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
+ asm_io_fdc(pdir_ptr);
}
@@ -641,8 +640,8 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
do {
/* clear I/O Pdir entry "valid" bit first */
((u8 *) pdir_ptr)[7] = 0;
+ asm_io_fdc(pdir_ptr);
if (ioc_needs_fdc) {
- asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
#if 0
entries_per_cacheline = L1_CACHE_SHIFT - 3;
#endif
@@ -661,8 +660,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
** could dump core on HPMC.
*/
((u8 *) pdir_ptr)[7] = 0;
- if (ioc_needs_fdc)
- asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
+ asm_io_fdc(pdir_ptr);
WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
}
@@ -773,8 +771,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
}
/* force FDC ops in io_pdir_entry() to be visible to IOMMU */
- if (ioc_needs_fdc)
- asm volatile("sync" : : );
+ asm_io_sync();
#ifdef ASSERT_PDIR_SANITY
sba_check_pdir(ioc,"Check after sba_map_single()");
@@ -858,8 +855,7 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
sba_free_range(ioc, iova, size);
/* If fdc's were issued, force fdc's to be visible now */
- if (ioc_needs_fdc)
- asm volatile("sync" : : );
+ asm_io_sync();
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
#endif /* DELAYED_RESOURCE_CNT == 0 */
@@ -1008,8 +1004,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
/* force FDC ops in io_pdir_entry() to be visible to IOMMU */
- if (ioc_needs_fdc)
- asm volatile("sync" : : );
+ asm_io_sync();
#ifdef ASSERT_PDIR_SANITY
if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 56ff8f6d31fc..2dcc30429e8b 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -98,6 +98,9 @@ config PCI_ECAM
config PCI_LOCKLESS_CONFIG
bool
+config PCI_BRIDGE_EMUL
+ bool
+
config PCI_IOV
bool "PCI IOV support"
depends on PCI
@@ -132,6 +135,23 @@ config PCI_PASID
If unsure, say N.
+config PCI_P2PDMA
+ bool "PCI peer-to-peer transfer support"
+ depends on PCI && ZONE_DEVICE
+ select GENERIC_ALLOCATOR
+ help
+ Enableѕ drivers to do PCI peer-to-peer transactions to and from
+ BARs that are exposed in other devices that are the part of
+ the hierarchy where peer-to-peer DMA is guaranteed by the PCI
+ specification to work (ie. anything below a single PCI bridge).
+
+ Many PCIe root complexes do not support P2P transactions and
+ it's hard to tell which support it at all, so at this time,
+ P2P DMA transations must be between devices behind the same root
+ port.
+
+ If unsure, say N.
+
config PCI_LABEL
def_bool y if (DMI || ACPI)
depends on PCI
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 1b2cfe51e8d7..f2bda77a2df1 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o
+obj-$(CONFIG_PCI_BRIDGE_EMUL) += pci-bridge-emul.o
obj-$(CONFIG_ACPI) += pci-acpi.o
obj-$(CONFIG_PCI_LABEL) += pci-label.o
obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
@@ -26,6 +27,7 @@ obj-$(CONFIG_PCI_SYSCALL) += syscall.o
obj-$(CONFIG_PCI_STUB) += pci-stub.o
obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o
obj-$(CONFIG_PCI_ECAM) += ecam.o
+obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
# Endpoint library must be initialized before its users
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index a3ad2fe185b9..544922f097c0 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -33,7 +33,7 @@ DEFINE_RAW_SPINLOCK(pci_lock);
#endif
#define PCI_OP_READ(size, type, len) \
-int pci_bus_read_config_##size \
+int noinline pci_bus_read_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
{ \
int res; \
@@ -48,7 +48,7 @@ int pci_bus_read_config_##size \
}
#define PCI_OP_WRITE(size, type, len) \
-int pci_bus_write_config_##size \
+int noinline pci_bus_write_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type value) \
{ \
int res; \
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 028b287466fb..6671946dbf66 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -9,12 +9,14 @@ config PCI_MVEBU
depends on MVEBU_MBUS
depends on ARM
depends on OF
+ select PCI_BRIDGE_EMUL
config PCI_AARDVARK
bool "Aardvark PCIe controller"
depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
+ select PCI_BRIDGE_EMUL
help
Add support for Aardvark 64bit PCIe Host Controller. This
controller is part of the South Bridge of the Marvel Armada
@@ -231,7 +233,7 @@ config PCIE_ROCKCHIP_EP
available to support GEN2 with 4 slots.
config PCIE_MEDIATEK
- bool "MediaTek PCIe controller"
+ tristate "MediaTek PCIe controller"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 5d2ce72c7a52..fcf91eacfc63 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
-obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
+obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index ce9224a36f62..a32d6dde7a57 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -542,7 +542,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
};
/*
- * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
+ * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
* @dra7xx: the dra7xx device where the workaround should be applied
*
* Access to the PCIe slave port that are not 32-bit aligned will result
@@ -552,7 +552,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
*
* To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
*/
-static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
+static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
{
int ret;
struct device_node *np = dev->of_node;
@@ -704,6 +704,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
DEVICE_TYPE_RC);
+
+ ret = dra7xx_pcie_unaligned_memaccess(dev);
+ if (ret)
+ dev_err(dev, "WA for Errata i870 not applied\n");
+
ret = dra7xx_add_pcie_port(dra7xx, pdev);
if (ret < 0)
goto err_gpio;
@@ -717,7 +722,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
DEVICE_TYPE_EP);
- ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
+ ret = dra7xx_pcie_unaligned_memaccess(dev);
if (ret)
goto err_gpio;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 4a9a673b4777..2cbef2d7c207 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -50,6 +50,7 @@ struct imx6_pcie {
struct regmap *iomuxc_gpr;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
+ struct reset_control *turnoff_reset;
enum imx6_pcie_variants variant;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
@@ -97,6 +98,16 @@ struct imx6_pcie {
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_ATEOVRD 0x10
+#define PCIE_PHY_ATEOVRD_EN (0x1 << 2)
+#define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
+#define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
+
+#define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
+#define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
+#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
+#define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9)
+
#define PCIE_PHY_RX_ASIC_OUT 0x100D
#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
@@ -508,6 +519,50 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
}
+static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+{
+ unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ int mult, div;
+ u32 val;
+
+ switch (phy_rate) {
+ case 125000000:
+ /*
+ * The default settings of the MPLL are for a 125MHz input
+ * clock, so no need to reconfigure anything in that case.
+ */
+ return 0;
+ case 100000000:
+ mult = 25;
+ div = 0;
+ break;
+ case 200000000:
+ mult = 25;
+ div = 1;
+ break;
+ default:
+ dev_err(imx6_pcie->pci->dev,
+ "Unsupported PHY reference clock rate %lu\n", phy_rate);
+ return -EINVAL;
+ }
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
+ PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
+ val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
+ val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
+ PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
+ val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
+ val |= PCIE_PHY_ATEOVRD_EN;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+
+ return 0;
+}
+
static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -542,6 +597,24 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
return -EINVAL;
}
+static void imx6_pcie_ltssm_enable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->variant) {
+ case IMX6Q:
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2,
+ IMX6Q_GPR12_PCIE_CTL_2);
+ break;
+ case IMX7D:
+ reset_control_deassert(imx6_pcie->apps_reset);
+ break;
+ }
+}
+
static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -560,11 +633,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
/* Start LTSSM. */
- if (imx6_pcie->variant == IMX7D)
- reset_control_deassert(imx6_pcie->apps_reset);
- else
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+ imx6_pcie_ltssm_enable(dev);
ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret)
@@ -632,6 +701,7 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
imx6_pcie_assert_core_reset(imx6_pcie);
imx6_pcie_init_phy(imx6_pcie);
imx6_pcie_deassert_core_reset(imx6_pcie);
+ imx6_setup_phy_mpll(imx6_pcie);
dw_pcie_setup_rc(pp);
imx6_pcie_establish_link(imx6_pcie);
@@ -682,6 +752,94 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = imx6_pcie_link_up,
};
+#ifdef CONFIG_PM_SLEEP
+static void imx6_pcie_ltssm_disable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0);
+ break;
+ case IMX7D:
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
+ default:
+ dev_err(dev, "ltssm_disable not supported\n");
+ }
+}
+
+static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
+{
+ reset_control_assert(imx6_pcie->turnoff_reset);
+ reset_control_deassert(imx6_pcie->turnoff_reset);
+
+ /*
+ * Components with an upstream port must respond to
+ * PME_Turn_Off with PME_TO_Ack but we can't check.
+ *
+ * The standard recommends a 1-10ms timeout after which to
+ * proceed anyway as if acks were received.
+ */
+ usleep_range(1000, 10000);
+}
+
+static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+{
+ clk_disable_unprepare(imx6_pcie->pcie);
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+
+ if (imx6_pcie->variant == IMX7D) {
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ }
+}
+
+static int imx6_pcie_suspend_noirq(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ if (imx6_pcie->variant != IMX7D)
+ return 0;
+
+ imx6_pcie_pm_turnoff(imx6_pcie);
+ imx6_pcie_clk_disable(imx6_pcie);
+ imx6_pcie_ltssm_disable(dev);
+
+ return 0;
+}
+
+static int imx6_pcie_resume_noirq(struct device *dev)
+{
+ int ret;
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct pcie_port *pp = &imx6_pcie->pci->pp;
+
+ if (imx6_pcie->variant != IMX7D)
+ return 0;
+
+ imx6_pcie_assert_core_reset(imx6_pcie);
+ imx6_pcie_init_phy(imx6_pcie);
+ imx6_pcie_deassert_core_reset(imx6_pcie);
+ dw_pcie_setup_rc(pp);
+
+ ret = imx6_pcie_establish_link(imx6_pcie);
+ if (ret < 0)
+ dev_info(dev, "pcie link is down after resume.\n");
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx6_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
+ imx6_pcie_resume_noirq)
+};
+
static int imx6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -776,6 +934,13 @@ static int imx6_pcie_probe(struct platform_device *pdev)
break;
}
+ /* Grab turnoff reset */
+ imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
+ if (IS_ERR(imx6_pcie->turnoff_reset)) {
+ dev_err(dev, "Failed to get TURNOFF reset control\n");
+ return PTR_ERR(imx6_pcie->turnoff_reset);
+ }
+
/* Grab GPR config register range */
imx6_pcie->iomuxc_gpr =
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
@@ -848,6 +1013,7 @@ static struct platform_driver imx6_pcie_driver = {
.name = "imx6q-pcie",
.of_match_table = imx6_pcie_of_match,
.suppress_bind_attrs = true,
+ .pm = &imx6_pcie_pm_ops,
},
.probe = imx6_pcie_probe,
.shutdown = imx6_pcie_shutdown,
diff --git a/drivers/pci/controller/dwc/pci-keystone-dw.c b/drivers/pci/controller/dwc/pci-keystone-dw.c
deleted file mode 100644
index 0682213328e9..000000000000
--- a/drivers/pci/controller/dwc/pci-keystone-dw.c
+++ /dev/null
@@ -1,484 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DesignWare application register space functions for Keystone PCI controller
- *
- * Copyright (C) 2013-2014 Texas Instruments., Ltd.
- * http://www.ti.com
- *
- * Author: Murali Karicheri <m-karicheri2@ti.com>
- */
-
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/irqreturn.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_pci.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-
-#include "pcie-designware.h"
-#include "pci-keystone.h"
-
-/* Application register defines */
-#define LTSSM_EN_VAL 1
-#define LTSSM_STATE_MASK 0x1f
-#define LTSSM_STATE_L0 0x11
-#define DBI_CS2_EN_VAL 0x20
-#define OB_XLAT_EN_VAL 2
-
-/* Application registers */
-#define CMD_STATUS 0x004
-#define CFG_SETUP 0x008
-#define OB_SIZE 0x030
-#define CFG_PCIM_WIN_SZ_IDX 3
-#define CFG_PCIM_WIN_CNT 32
-#define SPACE0_REMOTE_CFG_OFFSET 0x1000
-#define OB_OFFSET_INDEX(n) (0x200 + (8 * n))
-#define OB_OFFSET_HI(n) (0x204 + (8 * n))
-
-/* IRQ register defines */
-#define IRQ_EOI 0x050
-#define IRQ_STATUS 0x184
-#define IRQ_ENABLE_SET 0x188
-#define IRQ_ENABLE_CLR 0x18c
-
-#define MSI_IRQ 0x054
-#define MSI0_IRQ_STATUS 0x104
-#define MSI0_IRQ_ENABLE_SET 0x108
-#define MSI0_IRQ_ENABLE_CLR 0x10c
-#define IRQ_STATUS 0x184
-#define MSI_IRQ_OFFSET 4
-
-/* Error IRQ bits */
-#define ERR_AER BIT(5) /* ECRC error */
-#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
-#define ERR_CORR BIT(3) /* Correctable error */
-#define ERR_NONFATAL BIT(2) /* Non-fatal error */
-#define ERR_FATAL BIT(1) /* Fatal error */
-#define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */
-#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
- ERR_NONFATAL | ERR_FATAL | ERR_SYS)
-#define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI)
-#define ERR_IRQ_STATUS_RAW 0x1c0
-#define ERR_IRQ_STATUS 0x1c4
-#define ERR_IRQ_ENABLE_SET 0x1c8
-#define ERR_IRQ_ENABLE_CLR 0x1cc
-
-/* Config space registers */
-#define DEBUG0 0x728
-
-#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
-
-static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
- u32 *bit_pos)
-{
- *reg_offset = offset % 8;
- *bit_pos = offset >> 3;
-}
-
-phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- return ks_pcie->app.start + MSI_IRQ;
-}
-
-static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
-{
- return readl(ks_pcie->va_app_base + offset);
-}
-
-static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
-{
- writel(val, ks_pcie->va_app_base + offset);
-}
-
-void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- u32 pending, vector;
- int src, virq;
-
- pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
-
- /*
- * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
- * shows 1, 9, 17, 25 and so forth
- */
- for (src = 0; src < 4; src++) {
- if (BIT(src) & pending) {
- vector = offset + (src << 3);
- virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
- src, vector, virq);
- generic_handle_irq(virq);
- }
- }
-}
-
-void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
-{
- u32 reg_offset, bit_pos;
- struct keystone_pcie *ks_pcie;
- struct dw_pcie *pci;
-
- pci = to_dw_pcie_from_pp(pp);
- ks_pcie = to_keystone_pcie(pci);
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
-
- ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
- BIT(bit_pos));
- ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
-}
-
-void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
-{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
- BIT(bit_pos));
-}
-
-void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
-{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
- BIT(bit_pos));
-}
-
-int ks_dw_pcie_msi_host_init(struct pcie_port *pp)
-{
- return dw_pcie_allocate_domains(pp);
-}
-
-void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
-{
- int i;
-
- for (i = 0; i < PCI_NUM_INTX; i++)
- ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
-}
-
-void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct device *dev = pci->dev;
- u32 pending;
- int virq;
-
- pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
-
- if (BIT(0) & pending) {
- virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
- dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
- generic_handle_irq(virq);
- }
-
- /* EOI the INTx interrupt */
- ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
-}
-
-void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
-{
- ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
-}
-
-irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
-{
- u32 status;
-
- status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
- if (!status)
- return IRQ_NONE;
-
- if (status & ERR_FATAL_IRQ)
- dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n",
- status);
-
- /* Ack the IRQ; status bits are RW1C */
- ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
- return IRQ_HANDLED;
-}
-
-static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
-{
-}
-
-static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
-{
-}
-
-static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
-{
-}
-
-static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
- .name = "Keystone-PCI-Legacy-IRQ",
- .irq_ack = ks_dw_pcie_ack_legacy_irq,
- .irq_mask = ks_dw_pcie_mask_legacy_irq,
- .irq_unmask = ks_dw_pcie_unmask_legacy_irq,
-};
-
-static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
- unsigned int irq, irq_hw_number_t hw_irq)
-{
- irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
- handle_level_irq);
- irq_set_chip_data(irq, d->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
- .map = ks_dw_pcie_init_legacy_irq_map,
- .xlate = irq_domain_xlate_onetwocell,
-};
-
-/**
- * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
- * registers
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
-
- do {
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- } while (!(val & DBI_CS2_EN_VAL));
-}
-
-/**
- * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
-
- do {
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- } while (val & DBI_CS2_EN_VAL);
-}
-
-void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- u32 start = pp->mem->start, end = pp->mem->end;
- int i, tr_size;
- u32 val;
-
- /* Disable BARs for inbound access */
- ks_dw_pcie_set_dbi_mode(ks_pcie);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
- ks_dw_pcie_clear_dbi_mode(ks_pcie);
-
- /* Set outbound translation size per window division */
- ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
-
- tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
-
- /* Using Direct 1:1 mapping of RC <-> PCI memory space */
- for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
- ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
- ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
- start += tr_size;
- }
-
- /* Enable OB translation */
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
-}
-
-/**
- * ks_pcie_cfg_setup() - Set up configuration space address for a device
- *
- * @ks_pcie: ptr to keystone_pcie structure
- * @bus: Bus number the device is residing on
- * @devfn: device, function number info
- *
- * Forms and returns the address of configuration space mapped in PCIESS
- * address space 0. Also configures CFG_SETUP for remote configuration space
- * access.
- *
- * The address space has two regions to access configuration - local and remote.
- * We access local region for bus 0 (as RC is attached on bus 0) and remote
- * region for others with TYPE 1 access when bus > 1. As for device on bus = 1,
- * we will do TYPE 0 access as it will be on our secondary bus (logical).
- * CFG_SETUP is needed only for remote configuration access.
- */
-static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
- unsigned int devfn)
-{
- u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- u32 regval;
-
- if (bus == 0)
- return pci->dbi_base;
-
- regval = (bus << 16) | (device << 8) | function;
-
- /*
- * Since Bus#1 will be a virtual bus, we need to have TYPE0
- * access only.
- * TYPE 1
- */
- if (bus != 1)
- regval |= BIT(24);
-
- ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
- return pp->va_cfg0_base;
-}
-
-int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u8 bus_num = bus->number;
- void __iomem *addr;
-
- addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
-
- return dw_pcie_read(addr + where, size, val);
-}
-
-int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u8 bus_num = bus->number;
- void __iomem *addr;
-
- addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
-
- return dw_pcie_write(addr + where, size, val);
-}
-
-/**
- * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
- *
- * This sets BAR0 to enable inbound access for MSI_IRQ register
- */
-void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- /* Configure and set up BAR0 */
- ks_dw_pcie_set_dbi_mode(ks_pcie);
-
- /* Enable BAR0 */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
-
- ks_dw_pcie_clear_dbi_mode(ks_pcie);
-
- /*
- * For BAR0, just setting bus address for inbound writes (MSI) should
- * be sufficient. Use physical address to avoid any conflicts.
- */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
-}
-
-/**
- * ks_dw_pcie_link_up() - Check if link up
- */
-int ks_dw_pcie_link_up(struct dw_pcie *pci)
-{
- u32 val;
-
- val = dw_pcie_readl_dbi(pci, DEBUG0);
- return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
-}
-
-void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- /* Disable Link training */
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- val &= ~LTSSM_EN_VAL;
- ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-
- /* Initiate Link Training */
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-}
-
-/**
- * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
- *
- * Ioremap the register resources, initialize legacy irq domain
- * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
- * PCI host controller.
- */
-int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
- struct device_node *msi_intc_np)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
-
- /* Index 0 is the config reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- /*
- * We set these same and is used in pcie rd/wr_other_conf
- * functions
- */
- pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
- pp->va_cfg1_base = pp->va_cfg0_base;
-
- /* Index 1 is the application reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ks_pcie->va_app_base))
- return PTR_ERR(ks_pcie->va_app_base);
-
- ks_pcie->app = *res;
-
- /* Create legacy IRQ domain */
- ks_pcie->legacy_irq_domain =
- irq_domain_add_linear(ks_pcie->legacy_intc_np,
- PCI_NUM_INTX,
- &ks_dw_pcie_legacy_irq_domain_ops,
- NULL);
- if (!ks_pcie->legacy_irq_domain) {
- dev_err(dev, "Failed to add irq domain for legacy irqs\n");
- return -EINVAL;
- }
-
- return dw_pcie_host_init(pp);
-}
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index e88bd221fffe..14f2b0b4ed5e 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -9,40 +9,510 @@
* Implementation based on pci-exynos.c and pcie-designware.c
*/
-#include <linux/irqchip/chained_irq.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/init.h>
+#include <linux/mfd/syscon.h>
#include <linux/msi.h>
-#include <linux/of_irq.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
-#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/resource.h>
#include <linux/signal.h>
#include "pcie-designware.h"
-#include "pci-keystone.h"
-#define DRIVER_NAME "keystone-pcie"
+#define PCIE_VENDORID_MASK 0xffff
+#define PCIE_DEVICEID_SHIFT 16
+
+/* Application registers */
+#define CMD_STATUS 0x004
+#define LTSSM_EN_VAL BIT(0)
+#define OB_XLAT_EN_VAL BIT(1)
+#define DBI_CS2 BIT(5)
+
+#define CFG_SETUP 0x008
+#define CFG_BUS(x) (((x) & 0xff) << 16)
+#define CFG_DEVICE(x) (((x) & 0x1f) << 8)
+#define CFG_FUNC(x) ((x) & 0x7)
+#define CFG_TYPE1 BIT(24)
+
+#define OB_SIZE 0x030
+#define SPACE0_REMOTE_CFG_OFFSET 0x1000
+#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
+#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
+#define OB_ENABLEN BIT(0)
+#define OB_WIN_SIZE 8 /* 8MB */
+
+/* IRQ register defines */
+#define IRQ_EOI 0x050
+#define IRQ_STATUS 0x184
+#define IRQ_ENABLE_SET 0x188
+#define IRQ_ENABLE_CLR 0x18c
+
+#define MSI_IRQ 0x054
+#define MSI0_IRQ_STATUS 0x104
+#define MSI0_IRQ_ENABLE_SET 0x108
+#define MSI0_IRQ_ENABLE_CLR 0x10c
+#define IRQ_STATUS 0x184
+#define MSI_IRQ_OFFSET 4
+
+#define ERR_IRQ_STATUS 0x1c4
+#define ERR_IRQ_ENABLE_SET 0x1c8
+#define ERR_AER BIT(5) /* ECRC error */
+#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
+#define ERR_CORR BIT(3) /* Correctable error */
+#define ERR_NONFATAL BIT(2) /* Non-fatal error */
+#define ERR_FATAL BIT(1) /* Fatal error */
+#define ERR_SYS BIT(0) /* System error */
+#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
+ ERR_NONFATAL | ERR_FATAL | ERR_SYS)
+
+#define MAX_MSI_HOST_IRQS 8
+/* PCIE controller device IDs */
+#define PCIE_RC_K2HK 0xb008
+#define PCIE_RC_K2E 0xb009
+#define PCIE_RC_K2L 0xb00a
+#define PCIE_RC_K2G 0xb00b
+
+#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+
+struct keystone_pcie {
+ struct dw_pcie *pci;
+ /* PCI Device ID */
+ u32 device_id;
+ int num_legacy_host_irqs;
+ int legacy_host_irqs[PCI_NUM_INTX];
+ struct device_node *legacy_intc_np;
+
+ int num_msi_host_irqs;
+ int msi_host_irqs[MAX_MSI_HOST_IRQS];
+ int num_lanes;
+ u32 num_viewport;
+ struct phy **phy;
+ struct device_link **link;
+ struct device_node *msi_intc_np;
+ struct irq_domain *legacy_irq_domain;
+ struct device_node *np;
+
+ int error_irq;
+
+ /* Application register space */
+ void __iomem *va_app_base; /* DT 1st resource */
+ struct resource app;
+};
-/* DEV_STAT_CTRL */
-#define PCIE_CAP_BASE 0x70
+static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
+ u32 *bit_pos)
+{
+ *reg_offset = offset % 8;
+ *bit_pos = offset >> 3;
+}
-/* PCIE controller device IDs */
-#define PCIE_RC_K2HK 0xb008
-#define PCIE_RC_K2E 0xb009
-#define PCIE_RC_K2L 0xb00a
+static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ return ks_pcie->app.start + MSI_IRQ;
+}
+
+static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
+{
+ return readl(ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
+ u32 val)
+{
+ writel(val, ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ u32 pending, vector;
+ int src, virq;
+
+ pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
+
+ /*
+ * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+ * shows 1, 9, 17, 25 and so forth
+ */
+ for (src = 0; src < 4; src++) {
+ if (BIT(src) & pending) {
+ vector = offset + (src << 3);
+ virq = irq_linear_revmap(pp->irq_domain, vector);
+ dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
+ src, vector, virq);
+ generic_handle_irq(virq);
+ }
+ }
+}
+
+static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
+{
+ u32 reg_offset, bit_pos;
+ struct keystone_pcie *ks_pcie;
+ struct dw_pcie *pci;
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+
+ ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
+ BIT(bit_pos));
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
+}
+
+static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+{
+ u32 reg_offset, bit_pos;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+ ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
+ BIT(bit_pos));
+}
+
+static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+{
+ u32 reg_offset, bit_pos;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+ ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
+ BIT(bit_pos));
+}
+
+static int ks_pcie_msi_host_init(struct pcie_port *pp)
+{
+ return dw_pcie_allocate_domains(pp);
+}
+
+static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
+{
+ int i;
+
+ for (i = 0; i < PCI_NUM_INTX; i++)
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
+}
+
+static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
+ int offset)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 pending;
+ int virq;
+
+ pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
+
+ if (BIT(0) & pending) {
+ virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
+ dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
+ generic_handle_irq(virq);
+ }
+
+ /* EOI the INTx interrupt */
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
+}
+
+static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
+{
+ ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
+}
+
+static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
+{
+ u32 reg;
+ struct device *dev = ks_pcie->pci->dev;
+
+ reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
+ if (!reg)
+ return IRQ_NONE;
+
+ if (reg & ERR_SYS)
+ dev_err(dev, "System Error\n");
+
+ if (reg & ERR_FATAL)
+ dev_err(dev, "Fatal Error\n");
+
+ if (reg & ERR_NONFATAL)
+ dev_dbg(dev, "Non Fatal Error\n");
+
+ if (reg & ERR_CORR)
+ dev_dbg(dev, "Correctable Error\n");
+
+ if (reg & ERR_AXI)
+ dev_err(dev, "AXI tag lookup fatal Error\n");
+
+ if (reg & ERR_AER)
+ dev_err(dev, "ECRC Error\n");
+
+ ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
+
+ return IRQ_HANDLED;
+}
+
+static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip ks_pcie_legacy_irq_chip = {
+ .name = "Keystone-PCI-Legacy-IRQ",
+ .irq_ack = ks_pcie_ack_legacy_irq,
+ .irq_mask = ks_pcie_mask_legacy_irq,
+ .irq_unmask = ks_pcie_unmask_legacy_irq,
+};
+
+static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw_irq)
+{
+ irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
+ .map = ks_pcie_init_legacy_irq_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
+ * registers
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (val & DBI_CS2);
+}
+
+static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+ u32 num_viewport = ks_pcie->num_viewport;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ u64 start = pp->mem->start;
+ u64 end = pp->mem->end;
+ int i;
+
+ /* Disable BARs for inbound access */
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ val = ilog2(OB_WIN_SIZE);
+ ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
+
+ /* Using Direct 1:1 mapping of RC <-> PCI memory space */
+ for (i = 0; i < num_viewport && (start < end); i++) {
+ ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
+ lower_32_bits(start) | OB_ENABLEN);
+ ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
+ upper_32_bits(start));
+ start += OB_WIN_SIZE;
+ }
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= OB_XLAT_EN_VAL;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+}
+
+static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size,
+ u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 reg;
+
+ reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+ CFG_FUNC(PCI_FUNC(devfn));
+ if (bus->parent->number != pp->root_bus_nr)
+ reg |= CFG_TYPE1;
+ ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
+
+ return dw_pcie_read(pp->va_cfg0_base + where, size, val);
+}
+
+static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size,
+ u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 reg;
+
+ reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+ CFG_FUNC(PCI_FUNC(devfn));
+ if (bus->parent->number != pp->root_bus_nr)
+ reg |= CFG_TYPE1;
+ ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
+
+ return dw_pcie_write(pp->va_cfg0_base + where, size, val);
+}
+
+/**
+ * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
+ *
+ * This sets BAR0 to enable inbound access for MSI_IRQ register
+ */
+static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ /* Configure and set up BAR0 */
+ ks_pcie_set_dbi_mode(ks_pcie);
+
+ /* Enable BAR0 */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+}
+
+/**
+ * ks_pcie_link_up() - Check if link up
+ */
+static int ks_pcie_link_up(struct dw_pcie *pci)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
+ val &= PORT_LOGIC_LTSSM_STATE_MASK;
+ return (val == PORT_LOGIC_LTSSM_STATE_L0);
+}
+
+static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
-#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+ /* Disable Link training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~LTSSM_EN_VAL;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-static void quirk_limit_mrrs(struct pci_dev *dev)
+ /* Initiate Link Training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+}
+
+/**
+ * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware
+ *
+ * Ioremap the register resources, initialize legacy irq domain
+ * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
+ * PCI host controller.
+ */
+static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ /* Index 0 is the config reg. space address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ /*
+ * We set these same and is used in pcie rd/wr_other_conf
+ * functions
+ */
+ pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
+ pp->va_cfg1_base = pp->va_cfg0_base;
+
+ /* Index 1 is the application reg. space address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ks_pcie->va_app_base))
+ return PTR_ERR(ks_pcie->va_app_base);
+
+ ks_pcie->app = *res;
+
+ /* Create legacy IRQ domain */
+ ks_pcie->legacy_irq_domain =
+ irq_domain_add_linear(ks_pcie->legacy_intc_np,
+ PCI_NUM_INTX,
+ &ks_pcie_legacy_irq_domain_ops,
+ NULL);
+ if (!ks_pcie->legacy_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+ return -EINVAL;
+ }
+
+ return dw_pcie_host_init(pp);
+}
+
+static void ks_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
- struct pci_dev *bridge = bus->self;
+ struct pci_dev *bridge;
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
@@ -50,11 +520,13 @@ static void quirk_limit_mrrs(struct pci_dev *dev)
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ 0, },
};
if (pci_is_root_bus(bus))
- return;
+ bridge = dev;
/* look for the host bridge */
while (!pci_is_root_bus(bus)) {
@@ -62,43 +534,39 @@ static void quirk_limit_mrrs(struct pci_dev *dev)
bus = bus->parent;
}
- if (bridge) {
- /*
- * Keystone PCI controller has a h/w limitation of
- * 256 bytes maximum read request size. It can't handle
- * anything higher than this. So force this limit on
- * all downstream devices.
- */
- if (pci_match_id(rc_pci_devids, bridge)) {
- if (pcie_get_readrq(dev) > 256) {
- dev_info(&dev->dev, "limiting MRRS to 256\n");
- pcie_set_readrq(dev, 256);
- }
+ if (!bridge)
+ return;
+
+ /*
+ * Keystone PCI controller has a h/w limitation of
+ * 256 bytes maximum read request size. It can't handle
+ * anything higher than this. So force this limit on
+ * all downstream devices.
+ */
+ if (pci_match_id(rc_pci_devids, bridge)) {
+ if (pcie_get_readrq(dev) > 256) {
+ dev_info(&dev->dev, "limiting MRRS to 256\n");
+ pcie_set_readrq(dev, 256);
}
}
}
-DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
{
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
- unsigned int retries;
-
- dw_pcie_setup_rc(pp);
if (dw_pcie_link_up(pci)) {
dev_info(dev, "Link already up\n");
return 0;
}
+ ks_pcie_initiate_link_train(ks_pcie);
+
/* check if the link is up or not */
- for (retries = 0; retries < 5; retries++) {
- ks_dw_pcie_initiate_link_train(ks_pcie);
- if (!dw_pcie_wait_for_link(pci))
- return 0;
- }
+ if (!dw_pcie_wait_for_link(pci))
+ return 0;
dev_err(dev, "phy link never came up\n");
return -ETIMEDOUT;
@@ -121,7 +589,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
+ ks_pcie_handle_msi_irq(ks_pcie, offset);
chained_irq_exit(chip, desc);
}
@@ -150,7 +618,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+ ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
chained_irq_exit(chip, desc);
}
@@ -222,7 +690,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
ks_pcie_legacy_irq_handler,
ks_pcie);
}
- ks_dw_pcie_enable_legacy_irqs(ks_pcie);
+ ks_pcie_enable_legacy_irqs(ks_pcie);
/* MSI IRQ */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
@@ -234,7 +702,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
}
if (ks_pcie->error_irq > 0)
- ks_dw_pcie_enable_error_irq(ks_pcie);
+ ks_pcie_enable_error_irq(ks_pcie);
}
/*
@@ -242,8 +710,8 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
* bus error instead of returning 0xffffffff. This handler always returns 0
* for this kind of faults.
*/
-static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
- struct pt_regs *regs)
+static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
{
unsigned long instr = *(unsigned long *) instruction_pointer(regs);
@@ -257,59 +725,78 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
return 0;
}
+static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
+{
+ int ret;
+ unsigned int id;
+ struct regmap *devctrl_regs;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+
+ devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
+ if (IS_ERR(devctrl_regs))
+ return PTR_ERR(devctrl_regs);
+
+ ret = regmap_read(devctrl_regs, 0, &id);
+ if (ret)
+ return ret;
+
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
+
+ return 0;
+}
+
static int __init ks_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u32 val;
+ int ret;
+
+ dw_pcie_setup_rc(pp);
ks_pcie_establish_link(ks_pcie);
- ks_dw_pcie_setup_rc_app_regs(ks_pcie);
+ ks_pcie_setup_rc_app_regs(ks_pcie);
ks_pcie_setup_interrupts(ks_pcie);
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
- /* update the Vendor ID */
- writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID);
-
- /* update the DEV_STAT_CTRL to publish right mrrs */
- val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
- val &= ~PCI_EXP_DEVCTL_READRQ;
- /* set the mrrs to 256 bytes */
- val |= BIT(12);
- writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
+ ret = ks_pcie_init_id(ks_pcie);
+ if (ret < 0)
+ return ret;
/*
* PCIe access errors that result into OCP errors are caught by ARM as
* "External aborts"
*/
- hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
+ hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
"Asynchronous external abort");
return 0;
}
-static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
- .rd_other_conf = ks_dw_pcie_rd_other_conf,
- .wr_other_conf = ks_dw_pcie_wr_other_conf,
+static const struct dw_pcie_host_ops ks_pcie_host_ops = {
+ .rd_other_conf = ks_pcie_rd_other_conf,
+ .wr_other_conf = ks_pcie_wr_other_conf,
.host_init = ks_pcie_host_init,
- .msi_set_irq = ks_dw_pcie_msi_set_irq,
- .msi_clear_irq = ks_dw_pcie_msi_clear_irq,
- .get_msi_addr = ks_dw_pcie_get_msi_addr,
- .msi_host_init = ks_dw_pcie_msi_host_init,
- .msi_irq_ack = ks_dw_pcie_msi_irq_ack,
- .scan_bus = ks_dw_pcie_v3_65_scan_bus,
+ .msi_set_irq = ks_pcie_msi_set_irq,
+ .msi_clear_irq = ks_pcie_msi_clear_irq,
+ .get_msi_addr = ks_pcie_get_msi_addr,
+ .msi_host_init = ks_pcie_msi_host_init,
+ .msi_irq_ack = ks_pcie_msi_irq_ack,
+ .scan_bus = ks_pcie_v3_65_scan_bus,
};
-static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
+static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
{
struct keystone_pcie *ks_pcie = priv;
- return ks_dw_pcie_handle_error_irq(ks_pcie);
+ return ks_pcie_handle_error_irq(ks_pcie);
}
-static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
- struct platform_device *pdev)
+static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
+ struct platform_device *pdev)
{
struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp;
@@ -338,7 +825,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
if (ks_pcie->error_irq <= 0)
dev_info(dev, "no error IRQ defined\n");
else {
- ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
+ ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler,
IRQF_SHARED, "pcie-error-irq", ks_pcie);
if (ret < 0) {
dev_err(dev, "failed to request error IRQ %d\n",
@@ -347,8 +834,8 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
}
}
- pp->ops = &keystone_pcie_host_ops;
- ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
+ pp->ops = &ks_pcie_host_ops;
+ ret = ks_pcie_dw_host_init(ks_pcie);
if (ret) {
dev_err(dev, "failed to initialize host\n");
return ret;
@@ -365,28 +852,62 @@ static const struct of_device_id ks_pcie_of_match[] = {
{ },
};
-static const struct dw_pcie_ops dw_pcie_ops = {
- .link_up = ks_dw_pcie_link_up,
+static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
+ .link_up = ks_pcie_link_up,
};
-static int __exit ks_pcie_remove(struct platform_device *pdev)
+static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
{
- struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ int num_lanes = ks_pcie->num_lanes;
- clk_disable_unprepare(ks_pcie->clk);
+ while (num_lanes--) {
+ phy_power_off(ks_pcie->phy[num_lanes]);
+ phy_exit(ks_pcie->phy[num_lanes]);
+ }
+}
+
+static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
+{
+ int i;
+ int ret;
+ int num_lanes = ks_pcie->num_lanes;
+
+ for (i = 0; i < num_lanes; i++) {
+ ret = phy_init(ks_pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(ks_pcie->phy[i]);
+ if (ret < 0) {
+ phy_exit(ks_pcie->phy[i]);
+ goto err_phy;
+ }
+ }
return 0;
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(ks_pcie->phy[i]);
+ phy_exit(ks_pcie->phy[i]);
+ }
+
+ return ret;
}
static int __init ks_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct dw_pcie *pci;
struct keystone_pcie *ks_pcie;
- struct resource *res;
- void __iomem *reg_p;
- struct phy *phy;
+ struct device_link **link;
+ u32 num_viewport;
+ struct phy **phy;
+ u32 num_lanes;
+ char name[10];
int ret;
+ int i;
ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
if (!ks_pcie)
@@ -397,54 +918,99 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pci->dev = dev;
- pci->ops = &dw_pcie_ops;
+ pci->ops = &ks_pcie_dw_pcie_ops;
- ks_pcie->pci = pci;
+ ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-viewport* property\n");
+ return ret;
+ }
- /* initialize SerDes Phy if present */
- phy = devm_phy_get(dev, "pcie-phy");
- if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER)
- return PTR_ERR(phy);
+ ret = of_property_read_u32(np, "num-lanes", &num_lanes);
+ if (ret)
+ num_lanes = 1;
- if (!IS_ERR_OR_NULL(phy)) {
- ret = phy_init(phy);
- if (ret < 0)
- return ret;
+ phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ for (i = 0; i < num_lanes; i++) {
+ snprintf(name, sizeof(name), "pcie-phy%d", i);
+ phy[i] = devm_phy_optional_get(dev, name);
+ if (IS_ERR(phy[i])) {
+ ret = PTR_ERR(phy[i]);
+ goto err_link;
+ }
+
+ if (!phy[i])
+ continue;
+
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+ if (!link[i]) {
+ ret = -EINVAL;
+ goto err_link;
+ }
}
- /* index 2 is to read PCI DEVICE_ID */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- reg_p = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg_p))
- return PTR_ERR(reg_p);
- ks_pcie->device_id = readl(reg_p) >> 16;
- devm_iounmap(dev, reg_p);
- devm_release_mem_region(dev, res->start, resource_size(res));
+ ks_pcie->np = np;
+ ks_pcie->pci = pci;
+ ks_pcie->link = link;
+ ks_pcie->num_lanes = num_lanes;
+ ks_pcie->num_viewport = num_viewport;
+ ks_pcie->phy = phy;
- ks_pcie->np = dev->of_node;
- platform_set_drvdata(pdev, ks_pcie);
- ks_pcie->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(ks_pcie->clk)) {
- dev_err(dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(ks_pcie->clk);
+ ret = ks_pcie_enable_phy(ks_pcie);
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ goto err_link;
}
- ret = clk_prepare_enable(ks_pcie->clk);
- if (ret)
- return ret;
platform_set_drvdata(pdev, ks_pcie);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
- ret = ks_add_pcie_port(ks_pcie, pdev);
+ ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
if (ret < 0)
- goto fail_clk;
+ goto err_get_sync;
return 0;
-fail_clk:
- clk_disable_unprepare(ks_pcie->clk);
+
+err_get_sync:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ ks_pcie_disable_phy(ks_pcie);
+
+err_link:
+ while (--i >= 0 && link[i])
+ device_link_del(link[i]);
return ret;
}
+static int __exit ks_pcie_remove(struct platform_device *pdev)
+{
+ struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ struct device_link **link = ks_pcie->link;
+ int num_lanes = ks_pcie->num_lanes;
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ ks_pcie_disable_phy(ks_pcie);
+ while (num_lanes--)
+ device_link_del(link[num_lanes]);
+
+ return 0;
+}
+
static struct platform_driver ks_pcie_driver __refdata = {
.probe = ks_pcie_probe,
.remove = __exit_p(ks_pcie_remove),
diff --git a/drivers/pci/controller/dwc/pci-keystone.h b/drivers/pci/controller/dwc/pci-keystone.h
deleted file mode 100644
index 8a13da391543..000000000000
--- a/drivers/pci/controller/dwc/pci-keystone.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Keystone PCI Controller's common includes
- *
- * Copyright (C) 2013-2014 Texas Instruments., Ltd.
- * http://www.ti.com
- *
- * Author: Murali Karicheri <m-karicheri2@ti.com>
- */
-
-#define MAX_MSI_HOST_IRQS 8
-
-struct keystone_pcie {
- struct dw_pcie *pci;
- struct clk *clk;
- /* PCI Device ID */
- u32 device_id;
- int num_legacy_host_irqs;
- int legacy_host_irqs[PCI_NUM_INTX];
- struct device_node *legacy_intc_np;
-
- int num_msi_host_irqs;
- int msi_host_irqs[MAX_MSI_HOST_IRQS];
- struct device_node *msi_intc_np;
- struct irq_domain *legacy_irq_domain;
- struct device_node *np;
-
- int error_irq;
-
- /* Application register space */
- void __iomem *va_app_base; /* DT 1st resource */
- struct resource app;
-};
-
-/* Keystone DW specific MSI controller APIs/definitions */
-void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
-phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
-
-/* Keystone specific PCI controller APIs */
-void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
-void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
-void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie);
-irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie);
-int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
- struct device_node *msi_intc_np);
-int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val);
-int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val);
-void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
-void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
-void ks_dw_pcie_msi_irq_ack(int i, struct pcie_port *pp);
-void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
-void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
-void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
-int ks_dw_pcie_msi_host_init(struct pcie_port *pp);
-int ks_dw_pcie_link_up(struct dw_pcie *pci);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 778c4f76a884..2153956a0b20 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -135,7 +135,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
if (val & PCIE_ATU_ENABLE)
return;
- usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
@@ -178,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
if (val & PCIE_ATU_ENABLE)
return;
- usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
@@ -236,7 +236,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
if (val & PCIE_ATU_ENABLE)
return 0;
- usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
@@ -282,7 +282,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
if (val & PCIE_ATU_ENABLE)
return 0;
- usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 96126fd8403c..0989d880ac46 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -26,8 +26,7 @@
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
-#define LINK_WAIT_IATU_MIN 9000
-#define LINK_WAIT_IATU_MAX 10000
+#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
#define PCIE_PORT_LINK_CONTROL 0x710
@@ -37,6 +36,10 @@
#define PORT_LINK_MODE_4_LANES (0x7 << 16)
#define PORT_LINK_MODE_8_LANES (0xf << 16)
+#define PCIE_PORT_DEBUG0 0x728
+#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
+#define PORT_LOGIC_LTSSM_STATE_L0 0x11
+
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 5352e0c3be82..9b599296205d 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -467,8 +467,8 @@ static int kirin_pcie_add_msi(struct dw_pcie *pci,
return 0;
}
-static int __init kirin_add_pcie_port(struct dw_pcie *pci,
- struct platform_device *pdev)
+static int kirin_add_pcie_port(struct dw_pcie *pci,
+ struct platform_device *pdev)
{
int ret;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 4352c1cb926d..d185ea5fe996 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1089,7 +1089,6 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
struct qcom_pcie *pcie = to_qcom_pcie(pci);
int ret;
- pm_runtime_get_sync(pci->dev);
qcom_ep_reset_assert(pcie);
ret = pcie->ops->init(pcie);
@@ -1126,7 +1125,6 @@ err_disable_phy:
phy_power_off(pcie->phy);
err_deinit:
pcie->ops->deinit(pcie);
- pm_runtime_put(pci->dev);
return ret;
}
@@ -1216,6 +1214,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
@@ -1225,44 +1229,56 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->ops = of_device_get_match_data(dev);
pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
- if (IS_ERR(pcie->reset))
- return PTR_ERR(pcie->reset);
+ if (IS_ERR(pcie->reset)) {
+ ret = PTR_ERR(pcie->reset);
+ goto err_pm_runtime_put;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
pcie->parf = devm_ioremap_resource(dev, res);
- if (IS_ERR(pcie->parf))
- return PTR_ERR(pcie->parf);
+ if (IS_ERR(pcie->parf)) {
+ ret = PTR_ERR(pcie->parf);
+ goto err_pm_runtime_put;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
+ if (IS_ERR(pci->dbi_base)) {
+ ret = PTR_ERR(pci->dbi_base);
+ goto err_pm_runtime_put;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
pcie->elbi = devm_ioremap_resource(dev, res);
- if (IS_ERR(pcie->elbi))
- return PTR_ERR(pcie->elbi);
+ if (IS_ERR(pcie->elbi)) {
+ ret = PTR_ERR(pcie->elbi);
+ goto err_pm_runtime_put;
+ }
pcie->phy = devm_phy_optional_get(dev, "pciephy");
- if (IS_ERR(pcie->phy))
- return PTR_ERR(pcie->phy);
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
+ goto err_pm_runtime_put;
+ }
ret = pcie->ops->get_resources(pcie);
if (ret)
- return ret;
+ goto err_pm_runtime_put;
pp->ops = &qcom_pcie_dw_ops;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
+ if (pp->msi_irq < 0) {
+ ret = pp->msi_irq;
+ goto err_pm_runtime_put;
+ }
}
ret = phy_init(pcie->phy);
if (ret) {
pm_runtime_disable(&pdev->dev);
- return ret;
+ goto err_pm_runtime_put;
}
platform_set_drvdata(pdev, pcie);
@@ -1271,10 +1287,16 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "cannot initialize host\n");
pm_runtime_disable(&pdev->dev);
- return ret;
+ goto err_pm_runtime_put;
}
return 0;
+
+err_pm_runtime_put:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
}
static const struct of_device_id qcom_pcie_match[] = {
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 6b4555ff2548..750081c1cb48 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -20,12 +20,16 @@
#include <linux/of_pci.h>
#include "../pci.h"
+#include "../pci-bridge-emul.h"
/* PCIe core registers */
+#define PCIE_CORE_DEV_ID_REG 0x0
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
+#define PCIE_CORE_DEV_REV_REG 0x8
+#define PCIE_CORE_PCIEXP_CAP 0xc0
#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
@@ -41,7 +45,10 @@
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
-
+#define PCIE_CORE_INT_A_ASSERT_ENABLE 1
+#define PCIE_CORE_INT_B_ASSERT_ENABLE 2
+#define PCIE_CORE_INT_C_ASSERT_ENABLE 3
+#define PCIE_CORE_INT_D_ASSERT_ENABLE 4
/* PIO registers base address and register offsets */
#define PIO_BASE_ADDR 0x4000
#define PIO_CTRL (PIO_BASE_ADDR + 0x0)
@@ -93,7 +100,9 @@
#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
+#define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30)
#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
+#define PCIE_MSG_PM_PME_MASK BIT(7)
#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
#define PCIE_ISR0_MSI_INT_PENDING BIT(24)
#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
@@ -189,6 +198,7 @@ struct advk_pcie {
struct mutex msi_used_lock;
u16 msi_msg;
int root_bus_nr;
+ struct pci_bridge_emul bridge;
};
static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
@@ -390,6 +400,109 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
+
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+
+ switch (reg) {
+ case PCI_EXP_SLTCTL:
+ *value = PCI_EXP_SLTSTA_PDS << 16;
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ case PCI_EXP_RTCTL: {
+ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ *value = (val & PCIE_MSG_PM_PME_MASK) ? PCI_EXP_RTCTL_PMEIE : 0;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
+ case PCI_EXP_RTSTA: {
+ u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG);
+ u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG);
+ *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16);
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
+ case PCI_CAP_LIST_ID:
+ case PCI_EXP_DEVCAP:
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_LNKCAP:
+ case PCI_EXP_LNKCTL:
+ *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
+ return PCI_BRIDGE_EMUL_HANDLED;
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+
+}
+
+static void
+advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_LNKCTL:
+ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ break;
+
+ case PCI_EXP_RTCTL:
+ new = (new & PCI_EXP_RTCTL_PMEIE) << 3;
+ advk_writel(pcie, new, PCIE_ISR0_MASK_REG);
+ break;
+
+ case PCI_EXP_RTSTA:
+ new = (new & PCI_EXP_RTSTA_PME) >> 9;
+ advk_writel(pcie, new, PCIE_ISR0_REG);
+ break;
+
+ default:
+ break;
+ }
+}
+
+struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
+ .read_pcie = advk_pci_bridge_emul_pcie_conf_read,
+ .write_pcie = advk_pci_bridge_emul_pcie_conf_write,
+};
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
+{
+ struct pci_bridge_emul *bridge = &pcie->bridge;
+
+ bridge->conf.vendor = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff;
+ bridge->conf.device = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16;
+ bridge->conf.class_revision =
+ advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff;
+
+ /* Support 32 bits I/O addressing */
+ bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
+ bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
+
+ /* Support 64 bits memory pref */
+ bridge->conf.pref_mem_base = PCI_PREF_RANGE_TYPE_64;
+ bridge->conf.pref_mem_limit = PCI_PREF_RANGE_TYPE_64;
+
+ /* Support interrupt A for MSI feature */
+ bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
+
+ bridge->has_pcie = true;
+ bridge->data = pcie;
+ bridge->ops = &advk_pci_bridge_emul_ops;
+
+ pci_bridge_emul_init(bridge);
+
+}
+
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
int devfn)
{
@@ -411,6 +524,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return PCIBIOS_DEVICE_NOT_FOUND;
}
+ if (bus->number == pcie->root_bus_nr)
+ return pci_bridge_emul_conf_read(&pcie->bridge, where,
+ size, val);
+
/* Start PIO */
advk_writel(pcie, 0, PIO_START);
advk_writel(pcie, 1, PIO_ISR);
@@ -418,7 +535,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
reg &= ~PIO_CTRL_TYPE_MASK;
- if (bus->number == pcie->root_bus_nr)
+ if (bus->primary == pcie->root_bus_nr)
reg |= PCIE_CONFIG_RD_TYPE0;
else
reg |= PCIE_CONFIG_RD_TYPE1;
@@ -463,6 +580,10 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (!advk_pcie_valid_device(pcie, bus, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
+ if (bus->number == pcie->root_bus_nr)
+ return pci_bridge_emul_conf_write(&pcie->bridge, where,
+ size, val);
+
if (where % size)
return PCIBIOS_SET_FAILED;
@@ -473,7 +594,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
reg &= ~PIO_CTRL_TYPE_MASK;
- if (bus->number == pcie->root_bus_nr)
+ if (bus->primary == pcie->root_bus_nr)
reg |= PCIE_CONFIG_WR_TYPE0;
else
reg |= PCIE_CONFIG_WR_TYPE1;
@@ -875,6 +996,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
advk_pcie_setup_hw(pcie);
+ advk_sw_pci_bridge_init(pcie);
+
ret = advk_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(dev, "Failed to initialize irq\n");
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index d8f10451f273..c742881b5061 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -58,9 +58,7 @@ err_out:
int pci_host_common_probe(struct platform_device *pdev,
struct pci_ecam_ops *ops)
{
- const char *type;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge;
struct pci_config_window *cfg;
struct list_head resources;
@@ -70,12 +68,6 @@ int pci_host_common_probe(struct platform_device *pdev,
if (!bridge)
return -ENOMEM;
- type = of_get_property(np, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
-
of_pci_check_probe_only();
/* Parse and map our Configuration Space windows */
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index c00f82cc54aa..9ba4d12c179c 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -89,6 +89,9 @@ static enum pci_protocol_version_t pci_protocol_version;
#define STATUS_REVISION_MISMATCH 0xC0000059
+/* space for 32bit serial number as string */
+#define SLOT_NAME_SIZE 11
+
/*
* Message Types
*/
@@ -494,6 +497,7 @@ struct hv_pci_dev {
struct list_head list_entry;
refcount_t refs;
enum hv_pcichild_state state;
+ struct pci_slot *pci_slot;
struct pci_function_description desc;
bool reported_missing;
struct hv_pcibus_device *hbus;
@@ -1457,6 +1461,36 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
}
+/*
+ * Assign entries in sysfs pci slot directory.
+ *
+ * Note that this function does not need to lock the children list
+ * because it is called from pci_devices_present_work which
+ * is serialized with hv_eject_device_work because they are on the
+ * same ordered workqueue. Therefore hbus->children list will not change
+ * even when pci_create_slot sleeps.
+ */
+static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
+{
+ struct hv_pci_dev *hpdev;
+ char name[SLOT_NAME_SIZE];
+ int slot_nr;
+
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if (hpdev->pci_slot)
+ continue;
+
+ slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
+ snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
+ hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
+ name, NULL);
+ if (IS_ERR(hpdev->pci_slot)) {
+ pr_warn("pci_create slot %s failed\n", name);
+ hpdev->pci_slot = NULL;
+ }
+ }
+}
+
/**
* create_root_hv_pci_bus() - Expose a new root PCI bus
* @hbus: Root PCI bus, as understood by this driver
@@ -1480,6 +1514,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
pci_bus_assign_resources(hbus->pci_bus);
+ hv_pci_assign_slots(hbus);
pci_bus_add_devices(hbus->pci_bus);
pci_unlock_rescan_remove();
hbus->state = hv_pcibus_installed;
@@ -1742,6 +1777,7 @@ static void pci_devices_present_work(struct work_struct *work)
*/
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
+ hv_pci_assign_slots(hbus);
pci_unlock_rescan_remove();
break;
@@ -1858,6 +1894,9 @@ static void hv_eject_device_work(struct work_struct *work)
list_del(&hpdev->list_entry);
spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+ if (hpdev->pci_slot)
+ pci_destroy_slot(hpdev->pci_slot);
+
memset(&ctxt, 0, sizeof(ctxt));
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 50eb0729385b..fa0fc46edb0c 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -22,6 +22,7 @@
#include <linux/of_platform.h>
#include "../pci.h"
+#include "../pci-bridge-emul.h"
/*
* PCIe unit register offsets.
@@ -63,61 +64,6 @@
#define PCIE_DEBUG_CTRL 0x1a60
#define PCIE_DEBUG_SOFT_RESET BIT(20)
-enum {
- PCISWCAP = PCI_BRIDGE_CONTROL + 2,
- PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID,
- PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP,
- PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL,
- PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP,
- PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL,
- PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP,
- PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL,
- PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL,
- PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA,
- PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2,
- PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2,
- PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2,
- PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2,
- PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2,
- PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2,
-};
-
-/* PCI configuration space of a PCI-to-PCI bridge */
-struct mvebu_sw_pci_bridge {
- u16 vendor;
- u16 device;
- u16 command;
- u16 status;
- u16 class;
- u8 interface;
- u8 revision;
- u8 bist;
- u8 header_type;
- u8 latency_timer;
- u8 cache_line_size;
- u32 bar[2];
- u8 primary_bus;
- u8 secondary_bus;
- u8 subordinate_bus;
- u8 secondary_latency_timer;
- u8 iobase;
- u8 iolimit;
- u16 secondary_status;
- u16 membase;
- u16 memlimit;
- u16 iobaseupper;
- u16 iolimitupper;
- u32 romaddr;
- u8 intline;
- u8 intpin;
- u16 bridgectrl;
-
- /* PCI express capability */
- u32 pcie_sltcap;
- u16 pcie_devctl;
- u16 pcie_rtctl;
-};
-
struct mvebu_pcie_port;
/* Structure representing all PCIe interfaces */
@@ -153,7 +99,7 @@ struct mvebu_pcie_port {
struct clk *clk;
struct gpio_desc *reset_gpio;
char *reset_name;
- struct mvebu_sw_pci_bridge bridge;
+ struct pci_bridge_emul bridge;
struct device_node *dn;
struct mvebu_pcie *pcie;
struct mvebu_pcie_window memwin;
@@ -415,11 +361,12 @@ static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
{
struct mvebu_pcie_window desired = {};
+ struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new iobase/iolimit values invalid? */
- if (port->bridge.iolimit < port->bridge.iobase ||
- port->bridge.iolimitupper < port->bridge.iobaseupper ||
- !(port->bridge.command & PCI_COMMAND_IO)) {
+ if (conf->iolimit < conf->iobase ||
+ conf->iolimitupper < conf->iobaseupper ||
+ !(conf->command & PCI_COMMAND_IO)) {
mvebu_pcie_set_window(port, port->io_target, port->io_attr,
&desired, &port->iowin);
return;
@@ -438,11 +385,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
* specifications. iobase is the bus address, port->iowin_base
* is the CPU address.
*/
- desired.remap = ((port->bridge.iobase & 0xF0) << 8) |
- (port->bridge.iobaseupper << 16);
+ desired.remap = ((conf->iobase & 0xF0) << 8) |
+ (conf->iobaseupper << 16);
desired.base = port->pcie->io.start + desired.remap;
- desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
- (port->bridge.iolimitupper << 16)) -
+ desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
+ (conf->iolimitupper << 16)) -
desired.remap) +
1;
@@ -453,10 +400,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
{
struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
+ struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new membase/memlimit values invalid? */
- if (port->bridge.memlimit < port->bridge.membase ||
- !(port->bridge.command & PCI_COMMAND_MEMORY)) {
+ if (conf->memlimit < conf->membase ||
+ !(conf->command & PCI_COMMAND_MEMORY)) {
mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
&desired, &port->memwin);
return;
@@ -468,130 +416,32 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
* window to setup, according to the PCI-to-PCI bridge
* specifications.
*/
- desired.base = ((port->bridge.membase & 0xFFF0) << 16);
- desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+ desired.base = ((conf->membase & 0xFFF0) << 16);
+ desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
desired.base + 1;
mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
&port->memwin);
}
-/*
- * Initialize the configuration space of the PCI-to-PCI bridge
- * associated with the given PCIe interface.
- */
-static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
-{
- struct mvebu_sw_pci_bridge *bridge = &port->bridge;
-
- memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge));
-
- bridge->class = PCI_CLASS_BRIDGE_PCI;
- bridge->vendor = PCI_VENDOR_ID_MARVELL;
- bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
- bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
- bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
- bridge->cache_line_size = 0x10;
-
- /* We support 32 bits I/O addressing */
- bridge->iobase = PCI_IO_RANGE_TYPE_32;
- bridge->iolimit = PCI_IO_RANGE_TYPE_32;
-
- /* Add capabilities */
- bridge->status = PCI_STATUS_CAP_LIST;
-}
-
-/*
- * Read the configuration space of the PCI-to-PCI bridge associated to
- * the given PCIe interface.
- */
-static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
- unsigned int where, int size, u32 *value)
+static pci_bridge_emul_read_status_t
+mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
{
- struct mvebu_sw_pci_bridge *bridge = &port->bridge;
-
- switch (where & ~3) {
- case PCI_VENDOR_ID:
- *value = bridge->device << 16 | bridge->vendor;
- break;
-
- case PCI_COMMAND:
- *value = bridge->command | bridge->status << 16;
- break;
-
- case PCI_CLASS_REVISION:
- *value = bridge->class << 16 | bridge->interface << 8 |
- bridge->revision;
- break;
+ struct mvebu_pcie_port *port = bridge->data;
- case PCI_CACHE_LINE_SIZE:
- *value = bridge->bist << 24 | bridge->header_type << 16 |
- bridge->latency_timer << 8 | bridge->cache_line_size;
- break;
-
- case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
- *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4];
- break;
-
- case PCI_PRIMARY_BUS:
- *value = (bridge->secondary_latency_timer << 24 |
- bridge->subordinate_bus << 16 |
- bridge->secondary_bus << 8 |
- bridge->primary_bus);
- break;
-
- case PCI_IO_BASE:
- if (!mvebu_has_ioport(port))
- *value = bridge->secondary_status << 16;
- else
- *value = (bridge->secondary_status << 16 |
- bridge->iolimit << 8 |
- bridge->iobase);
- break;
-
- case PCI_MEMORY_BASE:
- *value = (bridge->memlimit << 16 | bridge->membase);
- break;
-
- case PCI_PREF_MEMORY_BASE:
- *value = 0;
- break;
-
- case PCI_IO_BASE_UPPER16:
- *value = (bridge->iolimitupper << 16 | bridge->iobaseupper);
- break;
-
- case PCI_CAPABILITY_LIST:
- *value = PCISWCAP;
- break;
-
- case PCI_ROM_ADDRESS1:
- *value = 0;
- break;
-
- case PCI_INTERRUPT_LINE:
- /* LINE PIN MIN_GNT MAX_LAT */
- *value = 0;
- break;
-
- case PCISWCAP_EXP_LIST_ID:
- /* Set PCIe v2, root port, slot support */
- *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
- PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP;
- break;
-
- case PCISWCAP_EXP_DEVCAP:
+ switch (reg) {
+ case PCI_EXP_DEVCAP:
*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
break;
- case PCISWCAP_EXP_DEVCTL:
+ case PCI_EXP_DEVCTL:
*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
- *value |= bridge->pcie_devctl;
break;
- case PCISWCAP_EXP_LNKCAP:
+ case PCI_EXP_LNKCAP:
/*
* PCIe requires the clock power management capability to be
* hard-wired to zero for downstream ports
@@ -600,176 +450,140 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
~PCI_EXP_LNKCAP_CLKPM;
break;
- case PCISWCAP_EXP_LNKCTL:
+ case PCI_EXP_LNKCTL:
*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
break;
- case PCISWCAP_EXP_SLTCAP:
- *value = bridge->pcie_sltcap;
- break;
-
- case PCISWCAP_EXP_SLTCTL:
+ case PCI_EXP_SLTCTL:
*value = PCI_EXP_SLTSTA_PDS << 16;
break;
- case PCISWCAP_EXP_RTCTL:
- *value = bridge->pcie_rtctl;
- break;
-
- case PCISWCAP_EXP_RTSTA:
+ case PCI_EXP_RTSTA:
*value = mvebu_readl(port, PCIE_RC_RTSTA);
break;
- /* PCIe requires the v2 fields to be hard-wired to zero */
- case PCISWCAP_EXP_DEVCAP2:
- case PCISWCAP_EXP_DEVCTL2:
- case PCISWCAP_EXP_LNKCAP2:
- case PCISWCAP_EXP_LNKCTL2:
- case PCISWCAP_EXP_SLTCAP2:
- case PCISWCAP_EXP_SLTCTL2:
default:
- /*
- * PCI defines configuration read accesses to reserved or
- * unimplemented registers to read as zero and complete
- * normally.
- */
- *value = 0;
- return PCIBIOS_SUCCESSFUL;
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
}
- if (size == 2)
- *value = (*value >> (8 * (where & 3))) & 0xffff;
- else if (size == 1)
- *value = (*value >> (8 * (where & 3))) & 0xff;
-
- return PCIBIOS_SUCCESSFUL;
+ return PCI_BRIDGE_EMUL_HANDLED;
}
-/* Write to the PCI-to-PCI bridge configuration space */
-static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
- unsigned int where, int size, u32 value)
+static void
+mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
{
- struct mvebu_sw_pci_bridge *bridge = &port->bridge;
- u32 mask, reg;
- int err;
-
- if (size == 4)
- mask = 0x0;
- else if (size == 2)
- mask = ~(0xffff << ((where & 3) * 8));
- else if (size == 1)
- mask = ~(0xff << ((where & 3) * 8));
- else
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, &reg);
- if (err)
- return err;
+ struct mvebu_pcie_port *port = bridge->data;
+ struct pci_bridge_emul_conf *conf = &bridge->conf;
- value = (reg & mask) | value << ((where & 3) * 8);
-
- switch (where & ~3) {
+ switch (reg) {
case PCI_COMMAND:
{
- u32 old = bridge->command;
-
if (!mvebu_has_ioport(port))
- value &= ~PCI_COMMAND_IO;
+ conf->command &= ~PCI_COMMAND_IO;
- bridge->command = value & 0xffff;
- if ((old ^ bridge->command) & PCI_COMMAND_IO)
+ if ((old ^ new) & PCI_COMMAND_IO)
mvebu_pcie_handle_iobase_change(port);
- if ((old ^ bridge->command) & PCI_COMMAND_MEMORY)
+ if ((old ^ new) & PCI_COMMAND_MEMORY)
mvebu_pcie_handle_membase_change(port);
- break;
- }
- case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
- bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value;
break;
+ }
case PCI_IO_BASE:
/*
- * We also keep bit 1 set, it is a read-only bit that
+ * We keep bit 1 set, it is a read-only bit that
* indicates we support 32 bits addressing for the
* I/O
*/
- bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32;
- bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32;
+ conf->iobase |= PCI_IO_RANGE_TYPE_32;
+ conf->iolimit |= PCI_IO_RANGE_TYPE_32;
mvebu_pcie_handle_iobase_change(port);
break;
case PCI_MEMORY_BASE:
- bridge->membase = value & 0xffff;
- bridge->memlimit = value >> 16;
mvebu_pcie_handle_membase_change(port);
break;
case PCI_IO_BASE_UPPER16:
- bridge->iobaseupper = value & 0xffff;
- bridge->iolimitupper = value >> 16;
mvebu_pcie_handle_iobase_change(port);
break;
case PCI_PRIMARY_BUS:
- bridge->primary_bus = value & 0xff;
- bridge->secondary_bus = (value >> 8) & 0xff;
- bridge->subordinate_bus = (value >> 16) & 0xff;
- bridge->secondary_latency_timer = (value >> 24) & 0xff;
- mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus);
+ mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
+ break;
+
+ default:
break;
+ }
+}
+
+static void
+mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct mvebu_pcie_port *port = bridge->data;
- case PCISWCAP_EXP_DEVCTL:
+ switch (reg) {
+ case PCI_EXP_DEVCTL:
/*
* Armada370 data says these bits must always
* be zero when in root complex mode.
*/
- value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
+ new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
- /*
- * If the mask is 0xffff0000, then we only want to write
- * the device control register, rather than clearing the
- * RW1C bits in the device status register. Mask out the
- * status register bits.
- */
- if (mask == 0xffff0000)
- value &= 0xffff;
-
- mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
break;
- case PCISWCAP_EXP_LNKCTL:
+ case PCI_EXP_LNKCTL:
/*
* If we don't support CLKREQ, we must ensure that the
* CLKREQ enable bit always reads zero. Since we haven't
* had this capability, and it's dependent on board wiring,
* disable it for the time being.
*/
- value &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
+ new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- /*
- * If the mask is 0xffff0000, then we only want to write
- * the link control register, rather than clearing the
- * RW1C bits in the link status register. Mask out the
- * RW1C status register bits.
- */
- if (mask == 0xffff0000)
- value &= ~((PCI_EXP_LNKSTA_LABS |
- PCI_EXP_LNKSTA_LBMS) << 16);
-
- mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
break;
- case PCISWCAP_EXP_RTSTA:
- mvebu_writel(port, value, PCIE_RC_RTSTA);
+ case PCI_EXP_RTSTA:
+ mvebu_writel(port, new, PCIE_RC_RTSTA);
break;
+ }
+}
- default:
- break;
+struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
+ .write_base = mvebu_pci_bridge_emul_base_conf_write,
+ .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
+ .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
+};
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
+{
+ struct pci_bridge_emul *bridge = &port->bridge;
+
+ bridge->conf.vendor = PCI_VENDOR_ID_MARVELL;
+ bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
+ bridge->conf.class_revision =
+ mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
+
+ if (mvebu_has_ioport(port)) {
+ /* We support 32 bits I/O addressing */
+ bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
+ bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
}
- return PCIBIOS_SUCCESSFUL;
+ bridge->has_pcie = true;
+ bridge->data = port;
+ bridge->ops = &mvebu_pci_bridge_emul_ops;
+
+ pci_bridge_emul_init(bridge);
}
static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
@@ -789,8 +603,8 @@ static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
if (bus->number == 0 && port->devfn == devfn)
return port;
if (bus->number != 0 &&
- bus->number >= port->bridge.secondary_bus &&
- bus->number <= port->bridge.subordinate_bus)
+ bus->number >= port->bridge.conf.secondary_bus &&
+ bus->number <= port->bridge.conf.subordinate_bus)
return port;
}
@@ -811,7 +625,8 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
/* Access the emulated PCI-to-PCI bridge */
if (bus->number == 0)
- return mvebu_sw_pci_bridge_write(port, where, size, val);
+ return pci_bridge_emul_conf_write(&port->bridge, where,
+ size, val);
if (!mvebu_pcie_link_up(port))
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -839,7 +654,8 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
/* Access the emulated PCI-to-PCI bridge */
if (bus->number == 0)
- return mvebu_sw_pci_bridge_read(port, where, size, val);
+ return pci_bridge_emul_conf_read(&port->bridge, where,
+ size, val);
if (!mvebu_pcie_link_up(port)) {
*val = 0xffffffff;
@@ -1145,7 +961,6 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
struct device_node *np = dev->of_node;
- unsigned int i;
int ret;
INIT_LIST_HEAD(&pcie->resources);
@@ -1179,13 +994,58 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
resource_size(&pcie->io) - 1);
pcie->realio.name = "PCI I/O";
+ pci_add_resource(&pcie->resources, &pcie->realio);
+ }
+
+ return devm_request_pci_bus_resources(dev, &pcie->resources);
+}
+
+/*
+ * This is a copy of pci_host_probe(), except that it does the I/O
+ * remap as the last step, once we are sure we won't fail.
+ *
+ * It should be removed once the I/O remap error handling issue has
+ * been sorted out.
+ */
+static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
+{
+ struct mvebu_pcie *pcie;
+ struct pci_bus *bus, *child;
+ int ret;
+
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret < 0) {
+ dev_err(bridge->dev.parent, "Scanning root bridge failed");
+ return ret;
+ }
+
+ pcie = pci_host_bridge_priv(bridge);
+ if (resource_size(&pcie->io) != 0) {
+ unsigned int i;
+
for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
pci_ioremap_io(i, pcie->io.start + i);
+ }
- pci_add_resource(&pcie->resources, &pcie->realio);
+ bus = bridge->bus;
+
+ /*
+ * We insert PCI resources into the iomem_resource and
+ * ioport_resource trees in either pci_bus_claim_resources()
+ * or pci_bus_assign_resources().
+ */
+ if (pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_claim_resources(bus);
+ } else {
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
}
- return devm_request_pci_bus_resources(dev, &pcie->resources);
+ pci_bus_add_devices(bus);
+ return 0;
}
static int mvebu_pcie_probe(struct platform_device *pdev)
@@ -1253,7 +1113,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
mvebu_pcie_setup_hw(port);
mvebu_pcie_set_local_dev_nr(port, 1);
- mvebu_sw_pci_bridge_init(port);
+ mvebu_pci_bridge_emul_init(port);
}
pcie->nports = i;
@@ -1268,7 +1128,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
bridge->align_resource = mvebu_pcie_align_resource;
bridge->msi = pcie->msi;
- return pci_host_probe(bridge);
+ return mvebu_pci_host_probe(bridge);
}
static const struct of_device_id mvebu_pcie_of_match_table[] = {
diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c
index 9e87dd7f9ac3..c3a088910f48 100644
--- a/drivers/pci/controller/pcie-cadence-ep.c
+++ b/drivers/pci/controller/pcie-cadence-ep.c
@@ -258,7 +258,6 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
u8 intx, bool is_asserted)
{
struct cdns_pcie *pcie = &ep->pcie;
- u32 r = ep->max_regions - 1;
u32 offset;
u16 status;
u8 msg_code;
@@ -268,8 +267,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
/* Set the outbound region if needed. */
if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
ep->irq_pci_fn != fn)) {
- /* Last region was reserved for IRQ writes. */
- cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r,
+ /* First region was reserved for IRQ writes. */
+ cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0,
ep->irq_phys_addr);
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
ep->irq_pci_fn = fn;
@@ -347,8 +346,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
/* Set the outbound region if needed. */
if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
ep->irq_pci_fn != fn)) {
- /* Last region was reserved for IRQ writes. */
- cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1,
+ /* First region was reserved for IRQ writes. */
+ cdns_pcie_set_outbound_region(pcie, fn, 0,
false,
ep->irq_phys_addr,
pci_addr & ~pci_addr_mask,
@@ -356,7 +355,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
ep->irq_pci_fn = fn;
}
- writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
+ writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
return 0;
}
@@ -517,6 +516,8 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
goto free_epc_mem;
}
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
+ /* Reserve region 0 for IRQs */
+ set_bit(0, &ep->ob_region_map);
return 0;
diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c
index ec394f6a19c8..97e251090b4f 100644
--- a/drivers/pci/controller/pcie-cadence-host.c
+++ b/drivers/pci/controller/pcie-cadence-host.c
@@ -235,7 +235,6 @@ static int cdns_pcie_host_init(struct device *dev,
static int cdns_pcie_host_probe(struct platform_device *pdev)
{
- const char *type;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge;
@@ -268,12 +267,6 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
rc->device_id = 0xffff;
of_property_read_u16(np, "device-id", &rc->device_id);
- type = of_get_property(np, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
pcie->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->reg_base)) {
diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c
index 86f1b002c846..cd795f6fc1e2 100644
--- a/drivers/pci/controller/pcie-cadence.c
+++ b/drivers/pci/controller/pcie-cadence.c
@@ -180,24 +180,26 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
return 0;
}
- phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
+ phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
+ link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
for (i = 0; i < phy_count; i++) {
of_property_read_string_index(np, "phy-names", i, &name);
- phy[i] = devm_phy_optional_get(dev, name);
- if (IS_ERR(phy))
- return PTR_ERR(phy);
-
+ phy[i] = devm_phy_get(dev, name);
+ if (IS_ERR(phy[i])) {
+ ret = PTR_ERR(phy[i]);
+ goto err_phy;
+ }
link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
if (!link[i]) {
+ devm_phy_put(dev, phy[i]);
ret = -EINVAL;
- goto err_link;
+ goto err_phy;
}
}
@@ -207,13 +209,15 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
ret = cdns_pcie_enable_phy(pcie);
if (ret)
- goto err_link;
+ goto err_phy;
return 0;
-err_link:
- while (--i >= 0)
+err_phy:
+ while (--i >= 0) {
device_link_del(link[i]);
+ devm_phy_put(dev, phy[i]);
+ }
return ret;
}
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 3160e9342a2f..c20fd6bd68fd 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -630,14 +630,6 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
return (pcie->base + offset);
}
- /*
- * PAXC is connected to an internally emulated EP within the SoC. It
- * allows only one device.
- */
- if (pcie->ep_is_internal)
- if (slot > 0)
- return NULL;
-
return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
}
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 861dda69f366..d069a76cbb95 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -15,6 +15,7 @@
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/msi.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
@@ -162,6 +163,7 @@ struct mtk_pcie_soc {
* @phy: pointer to PHY control block
* @lane: lane count
* @slot: port slot
+ * @irq: GIC irq
* @irq_domain: legacy INTx IRQ domain
* @inner_domain: inner IRQ domain
* @msi_domain: MSI IRQ domain
@@ -182,6 +184,7 @@ struct mtk_pcie_port {
struct phy *phy;
u32 lane;
u32 slot;
+ int irq;
struct irq_domain *irq_domain;
struct irq_domain *inner_domain;
struct irq_domain *msi_domain;
@@ -225,10 +228,8 @@ static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
clk_disable_unprepare(pcie->free_ck);
- if (dev->pm_domain) {
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
- }
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
}
static void mtk_pcie_port_free(struct mtk_pcie_port *port)
@@ -337,6 +338,17 @@ static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
{
struct mtk_pcie *pcie = bus->sysdata;
struct mtk_pcie_port *port;
+ struct pci_dev *dev = NULL;
+
+ /*
+ * Walk the bus hierarchy to get the devfn value
+ * of the port in the root bus.
+ */
+ while (bus && bus->number) {
+ dev = bus->self;
+ bus = dev->bus;
+ devfn = dev->devfn;
+ }
list_for_each_entry(port, &pcie->ports, list)
if (port->slot == PCI_SLOT(devfn))
@@ -383,75 +395,6 @@ static struct pci_ops mtk_pcie_ops_v2 = {
.write = mtk_pcie_config_write,
};
-static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
-{
- struct mtk_pcie *pcie = port->pcie;
- struct resource *mem = &pcie->mem;
- const struct mtk_pcie_soc *soc = port->pcie->soc;
- u32 val;
- size_t size;
- int err;
-
- /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
- if (pcie->base) {
- val = readl(pcie->base + PCIE_SYS_CFG_V2);
- val |= PCIE_CSR_LTSSM_EN(port->slot) |
- PCIE_CSR_ASPM_L1_EN(port->slot);
- writel(val, pcie->base + PCIE_SYS_CFG_V2);
- }
-
- /* Assert all reset signals */
- writel(0, port->base + PCIE_RST_CTRL);
-
- /*
- * Enable PCIe link down reset, if link status changed from link up to
- * link down, this will reset MAC control registers and configuration
- * space.
- */
- writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
-
- /* De-assert PHY, PE, PIPE, MAC and configuration reset */
- val = readl(port->base + PCIE_RST_CTRL);
- val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
- PCIE_MAC_SRSTB | PCIE_CRSTB;
- writel(val, port->base + PCIE_RST_CTRL);
-
- /* Set up vendor ID and class code */
- if (soc->need_fix_class_id) {
- val = PCI_VENDOR_ID_MEDIATEK;
- writew(val, port->base + PCIE_CONF_VEND_ID);
-
- val = PCI_CLASS_BRIDGE_HOST;
- writew(val, port->base + PCIE_CONF_CLASS_ID);
- }
-
- /* 100ms timeout value should be enough for Gen1/2 training */
- err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
- !!(val & PCIE_PORT_LINKUP_V2), 20,
- 100 * USEC_PER_MSEC);
- if (err)
- return -ETIMEDOUT;
-
- /* Set INTx mask */
- val = readl(port->base + PCIE_INT_MASK);
- val &= ~INTX_MASK;
- writel(val, port->base + PCIE_INT_MASK);
-
- /* Set AHB to PCIe translation windows */
- size = mem->end - mem->start;
- val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
- writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
-
- val = upper_32_bits(mem->start);
- writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
-
- /* Set PCIe to AXI translation memory space.*/
- val = fls(0xffffffff) | WIN_ENABLE;
- writel(val, port->base + PCIE_AXI_WINDOW0);
-
- return 0;
-}
-
static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
@@ -590,6 +533,27 @@ static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
writel(val, port->base + PCIE_INT_MASK);
}
+static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
+{
+ struct mtk_pcie_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+
+ if (port->irq_domain)
+ irq_domain_remove(port->irq_domain);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ if (port->msi_domain)
+ irq_domain_remove(port->msi_domain);
+ if (port->inner_domain)
+ irq_domain_remove(port->inner_domain);
+ }
+
+ irq_dispose_mapping(port->irq);
+ }
+}
+
static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -628,8 +592,6 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
ret = mtk_pcie_allocate_msi_domains(port);
if (ret)
return ret;
-
- mtk_pcie_enable_msi(port);
}
return 0;
@@ -682,7 +644,7 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
struct mtk_pcie *pcie = port->pcie;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- int err, irq;
+ int err;
err = mtk_pcie_init_irq_domain(port, node);
if (err) {
@@ -690,8 +652,81 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
return err;
}
- irq = platform_get_irq(pdev, port->slot);
- irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
+ port->irq = platform_get_irq(pdev, port->slot);
+ irq_set_chained_handler_and_data(port->irq,
+ mtk_pcie_intr_handler, port);
+
+ return 0;
+}
+
+static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ struct resource *mem = &pcie->mem;
+ const struct mtk_pcie_soc *soc = port->pcie->soc;
+ u32 val;
+ size_t size;
+ int err;
+
+ /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
+ if (pcie->base) {
+ val = readl(pcie->base + PCIE_SYS_CFG_V2);
+ val |= PCIE_CSR_LTSSM_EN(port->slot) |
+ PCIE_CSR_ASPM_L1_EN(port->slot);
+ writel(val, pcie->base + PCIE_SYS_CFG_V2);
+ }
+
+ /* Assert all reset signals */
+ writel(0, port->base + PCIE_RST_CTRL);
+
+ /*
+ * Enable PCIe link down reset, if link status changed from link up to
+ * link down, this will reset MAC control registers and configuration
+ * space.
+ */
+ writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
+
+ /* De-assert PHY, PE, PIPE, MAC and configuration reset */
+ val = readl(port->base + PCIE_RST_CTRL);
+ val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
+ PCIE_MAC_SRSTB | PCIE_CRSTB;
+ writel(val, port->base + PCIE_RST_CTRL);
+
+ /* Set up vendor ID and class code */
+ if (soc->need_fix_class_id) {
+ val = PCI_VENDOR_ID_MEDIATEK;
+ writew(val, port->base + PCIE_CONF_VEND_ID);
+
+ val = PCI_CLASS_BRIDGE_PCI;
+ writew(val, port->base + PCIE_CONF_CLASS_ID);
+ }
+
+ /* 100ms timeout value should be enough for Gen1/2 training */
+ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
+ !!(val & PCIE_PORT_LINKUP_V2), 20,
+ 100 * USEC_PER_MSEC);
+ if (err)
+ return -ETIMEDOUT;
+
+ /* Set INTx mask */
+ val = readl(port->base + PCIE_INT_MASK);
+ val &= ~INTX_MASK;
+ writel(val, port->base + PCIE_INT_MASK);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ mtk_pcie_enable_msi(port);
+
+ /* Set AHB to PCIe translation windows */
+ size = mem->end - mem->start;
+ val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
+ writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
+
+ val = upper_32_bits(mem->start);
+ writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
+
+ /* Set PCIe to AXI translation memory space.*/
+ val = fls(0xffffffff) | WIN_ENABLE;
+ writel(val, port->base + PCIE_AXI_WINDOW0);
return 0;
}
@@ -987,10 +1022,8 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
pcie->free_ck = NULL;
}
- if (dev->pm_domain) {
- pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
- }
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
/* enable top level clock */
err = clk_prepare_enable(pcie->free_ck);
@@ -1002,10 +1035,8 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
return 0;
err_free_ck:
- if (dev->pm_domain) {
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
- }
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return err;
}
@@ -1109,36 +1140,10 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
if (err < 0)
return err;
- devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
-
- return 0;
-}
-
-static int mtk_pcie_register_host(struct pci_host_bridge *host)
-{
- struct mtk_pcie *pcie = pci_host_bridge_priv(host);
- struct pci_bus *child;
- int err;
-
- host->busnr = pcie->busn.start;
- host->dev.parent = pcie->dev;
- host->ops = pcie->soc->ops;
- host->map_irq = of_irq_parse_and_map_pci;
- host->swizzle_irq = pci_common_swizzle;
- host->sysdata = pcie;
-
- err = pci_scan_root_bus_bridge(host);
- if (err < 0)
+ err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
+ if (err)
return err;
- pci_bus_size_bridges(host->bus);
- pci_bus_assign_resources(host->bus);
-
- list_for_each_entry(child, &host->bus->children, node)
- pcie_bus_configure_settings(child);
-
- pci_bus_add_devices(host->bus);
-
return 0;
}
@@ -1168,7 +1173,14 @@ static int mtk_pcie_probe(struct platform_device *pdev)
if (err)
goto put_resources;
- err = mtk_pcie_register_host(host);
+ host->busnr = pcie->busn.start;
+ host->dev.parent = pcie->dev;
+ host->ops = pcie->soc->ops;
+ host->map_irq = of_irq_parse_and_map_pci;
+ host->swizzle_irq = pci_common_swizzle;
+ host->sysdata = pcie;
+
+ err = pci_host_probe(host);
if (err)
goto put_resources;
@@ -1181,6 +1193,80 @@ put_resources:
return err;
}
+
+static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
+{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct list_head *windows = &host->windows;
+
+ pci_free_resource_list(windows);
+}
+
+static int mtk_pcie_remove(struct platform_device *pdev)
+{
+ struct mtk_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+
+ pci_stop_root_bus(host->bus);
+ pci_remove_root_bus(host->bus);
+ mtk_pcie_free_resources(pcie);
+
+ mtk_pcie_irq_teardown(pcie);
+
+ mtk_pcie_put_resources(pcie);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+{
+ struct mtk_pcie *pcie = dev_get_drvdata(dev);
+ struct mtk_pcie_port *port;
+
+ if (list_empty(&pcie->ports))
+ return 0;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ clk_disable_unprepare(port->pipe_ck);
+ clk_disable_unprepare(port->obff_ck);
+ clk_disable_unprepare(port->axi_ck);
+ clk_disable_unprepare(port->aux_ck);
+ clk_disable_unprepare(port->ahb_ck);
+ clk_disable_unprepare(port->sys_ck);
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ }
+
+ clk_disable_unprepare(pcie->free_ck);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+{
+ struct mtk_pcie *pcie = dev_get_drvdata(dev);
+ struct mtk_pcie_port *port, *tmp;
+
+ if (list_empty(&pcie->ports))
+ return 0;
+
+ clk_prepare_enable(pcie->free_ck);
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ mtk_pcie_enable_port(port);
+
+ /* In case of EP was removed while system suspend. */
+ if (list_empty(&pcie->ports))
+ clk_disable_unprepare(pcie->free_ck);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mtk_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
+};
+
static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
.ops = &mtk_pcie_ops,
.startup = mtk_pcie_startup_port,
@@ -1209,10 +1295,13 @@ static const struct of_device_id mtk_pcie_ids[] = {
static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
+ .remove = mtk_pcie_remove,
.driver = {
.name = "mtk-pcie",
.of_match_table = mtk_pcie_ids,
.suppress_bind_attrs = true,
+ .pm = &mtk_pcie_pm_ops,
},
};
-builtin_platform_driver(mtk_pcie_driver);
+module_platform_driver(mtk_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index a939e8d31735..77052a0712d0 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -301,13 +301,6 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
struct platform_device *pdev = pcie->pdev;
struct device_node *node = dev->of_node;
struct resource *res;
- const char *type;
-
- type = of_get_property(node, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
/* map config resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index fb32840ce8e6..81538d77f790 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -777,16 +777,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
struct platform_device *pdev)
{
struct device *dev = pcie->dev;
- struct device_node *node = dev->of_node;
struct resource *res;
- const char *type;
-
- /* Check for device type */
- type = of_get_property(node, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
pcie->breg_base = devm_ioremap_resource(dev, res);
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 7b1389d8e2a5..9bd1a35cd5d8 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -574,15 +574,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
struct resource regs;
- const char *type;
int err;
- type = of_get_property(node, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
-
err = of_address_to_resource(node, 0, &regs);
if (err) {
dev_err(dev, "missing \"reg\" property\n");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index fd2dbd7eed7b..e50b0b5815ff 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -404,12 +404,10 @@ static int vmd_dma_supported(struct device *dev, u64 mask)
return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
}
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
static u64 vmd_get_required_mask(struct device *dev)
{
return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
}
-#endif
static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
{
@@ -450,9 +448,7 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd)
ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
-#endif
add_dma_domain(domain);
}
#undef ASSIGN_VMD_DMA_OPS
@@ -813,12 +809,12 @@ static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
- vmd_detach_resources(vmd);
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
pci_remove_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
vmd_teardown_dma_ops(vmd);
+ vmd_detach_resources(vmd);
irq_domain_remove(vmd->irq_domain);
}
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
new file mode 100644
index 000000000000..a32070be5adf
--- /dev/null
+++ b/drivers/pci/hotplug/TODO
@@ -0,0 +1,74 @@
+Contributions are solicited in particular to remedy the following issues:
+
+cpcihp:
+
+* There are no implementations of the ->hardware_test, ->get_power and
+ ->set_power callbacks in struct cpci_hp_controller_ops. Why were they
+ introduced? Can they be removed from the struct?
+
+cpqphp:
+
+* The driver spawns a kthread cpqhp_event_thread() which is woken by the
+ hardirq handler cpqhp_ctrl_intr(). Convert this to threaded IRQ handling.
+ The kthread is also woken from the timer pushbutton_helper_thread(),
+ convert it to call irq_wake_thread(). Use pciehp as a template.
+
+* A large portion of cpqphp_ctrl.c and cpqphp_pci.c concerns resource
+ management. Doesn't this duplicate functionality in the core?
+
+ibmphp:
+
+* Implementations of hotplug_slot_ops callbacks such as get_adapter_present()
+ in ibmphp_core.c create a copy of the struct slot on the stack, then perform
+ the actual operation on that copy. Determine if this overhead is necessary,
+ delete it if not. The functions also perform a NULL pointer check on the
+ struct hotplug_slot, this seems superfluous.
+
+* Several functions access the pci_slot member in struct hotplug_slot even
+ though pci_hotplug.h declares it private. See get_max_bus_speed() for an
+ example. Either the pci_slot member should no longer be declared private
+ or ibmphp should store a pointer to its bus in struct slot. Probably the
+ former.
+
+* The functions get_max_adapter_speed() and get_bus_name() are commented out.
+ Can they be deleted? There are also forward declarations at the top of
+ ibmphp_core.c as well as pointers in ibmphp_hotplug_slot_ops, likewise
+ commented out.
+
+* ibmphp_init_devno() takes a struct slot **, it could instead take a
+ struct slot *.
+
+* The return value of pci_hp_register() is not checked.
+
+* iounmap(io_mem) is called in the error path of ebda_rsrc_controller()
+ and once more in the error path of its caller ibmphp_access_ebda().
+
+* The various slot data structures are difficult to follow and need to be
+ simplified. A lot of functions are too large and too complex, they need
+ to be broken up into smaller, manageable pieces. Negative examples are
+ ebda_rsrc_controller() and configure_bridge().
+
+* A large portion of ibmphp_res.c and ibmphp_pci.c concerns resource
+ management. Doesn't this duplicate functionality in the core?
+
+sgi_hotplug:
+
+* Several functions access the pci_slot member in struct hotplug_slot even
+ though pci_hotplug.h declares it private. See sn_hp_destroy() for an
+ example. Either the pci_slot member should no longer be declared private
+ or sgi_hotplug should store a pointer to it in struct slot. Probably the
+ former.
+
+shpchp:
+
+* There is only a single implementation of struct hpc_ops. Can the struct be
+ removed and its functions invoked directly? This has already been done in
+ pciehp with commit 82a9e79ef132 ("PCI: pciehp: remove hpc_ops"). Clarify
+ if there was a specific reason not to apply the same change to shpchp.
+
+* The ->get_mode1_ECC_cap callback in shpchp_hpc_ops is never invoked.
+ Why was it introduced? Can it be removed?
+
+* The hardirq handler shpc_isr() queues events on a workqueue. It can be
+ simplified by converting it to threaded IRQ handling. Use pciehp as a
+ template.
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index e438a2d734f2..cf3058404f41 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -33,15 +33,19 @@ struct acpiphp_slot;
* struct slot - slot information for each *physical* slot
*/
struct slot {
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct acpiphp_slot *acpi_slot;
- struct hotplug_slot_info info;
unsigned int sun; /* ACPI _SUN (Slot User Number) value */
};
static inline const char *slot_name(struct slot *slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return hotplug_slot_name(&slot->hotplug_slot);
+}
+
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
}
/*
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index ad32ffbc4b91..c9e2bd40c038 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -57,7 +57,7 @@ static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
-static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
+static const struct hotplug_slot_ops acpi_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
@@ -118,7 +118,7 @@ EXPORT_SYMBOL_GPL(acpiphp_unregister_attention);
*/
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -135,7 +135,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
*/
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -179,7 +179,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
*/
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -225,7 +225,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
*/
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -245,7 +245,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
*/
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -266,39 +266,26 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
if (!slot)
goto error;
- slot->hotplug_slot = kzalloc(sizeof(*slot->hotplug_slot), GFP_KERNEL);
- if (!slot->hotplug_slot)
- goto error_slot;
-
- slot->hotplug_slot->info = &slot->info;
-
- slot->hotplug_slot->private = slot;
- slot->hotplug_slot->ops = &acpi_hotplug_slot_ops;
+ slot->hotplug_slot.ops = &acpi_hotplug_slot_ops;
slot->acpi_slot = acpiphp_slot;
- slot->hotplug_slot->info->power_status = acpiphp_get_power_status(slot->acpi_slot);
- slot->hotplug_slot->info->attention_status = 0;
- slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot);
- slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot);
acpiphp_slot->slot = slot;
slot->sun = sun;
snprintf(name, SLOT_NAME_SIZE, "%u", sun);
- retval = pci_hp_register(slot->hotplug_slot, acpiphp_slot->bus,
+ retval = pci_hp_register(&slot->hotplug_slot, acpiphp_slot->bus,
acpiphp_slot->device, name);
if (retval == -EBUSY)
- goto error_hpslot;
+ goto error_slot;
if (retval) {
pr_err("pci_hp_register failed with error %d\n", retval);
- goto error_hpslot;
+ goto error_slot;
}
pr_info("Slot [%s] registered\n", slot_name(slot));
return 0;
-error_hpslot:
- kfree(slot->hotplug_slot);
error_slot:
kfree(slot);
error:
@@ -312,8 +299,7 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
pr_info("Slot [%s] unregistered\n", slot_name(slot));
- pci_hp_deregister(slot->hotplug_slot);
- kfree(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
kfree(slot);
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index ef0b1b6ba86f..12afa7fdf77e 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -457,17 +457,18 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
/**
* enable_slot - enable, configure a slot
* @slot: slot to be enabled
+ * @bridge: true if enable is for the whole bridge (not a single slot)
*
* This function should be called per *physical slot*,
* not per each slot object in ACPI namespace.
*/
-static void enable_slot(struct acpiphp_slot *slot)
+static void enable_slot(struct acpiphp_slot *slot, bool bridge)
{
struct pci_dev *dev;
struct pci_bus *bus = slot->bus;
struct acpiphp_func *func;
- if (bus->self && hotplug_is_native(bus->self)) {
+ if (bridge && bus->self && hotplug_is_native(bus->self)) {
/*
* If native hotplug is used, it will take care of hotplug
* slot management and resource allocation for hotplug
@@ -701,7 +702,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
trim_stale_devices(dev);
/* configure all functions */
- enable_slot(slot);
+ enable_slot(slot, true);
} else {
disable_slot(slot);
}
@@ -785,7 +786,7 @@ static void hotplug_event(u32 type, struct acpiphp_context *context)
if (bridge)
acpiphp_check_bridge(bridge);
else if (!(slot->flags & SLOT_IS_GOING_AWAY))
- enable_slot(slot);
+ enable_slot(slot, false);
break;
@@ -973,7 +974,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
/* configure all functions */
if (!(slot->flags & SLOT_ENABLED))
- enable_slot(slot);
+ enable_slot(slot, false);
pci_unlock_rescan_remove();
return 0;
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 41713f16ff97..df48b3b03ab4 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -41,7 +41,7 @@ MODULE_VERSION(DRIVER_VERSION);
#define IBM_HARDWARE_ID1 "IBM37D0"
#define IBM_HARDWARE_ID2 "IBM37D4"
-#define hpslot_to_sun(A) (((struct slot *)((A)->private))->sun)
+#define hpslot_to_sun(A) (to_slot(A)->sun)
/* union apci_descriptor - allows access to the
* various device descriptors that are embedded in the
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index 4658557be01a..f33ff2bca414 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -32,8 +32,10 @@ struct slot {
unsigned int devfn;
struct pci_bus *bus;
struct pci_dev *dev;
+ unsigned int latch_status:1;
+ unsigned int adapter_status:1;
unsigned int extracting;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct list_head slot_list;
};
@@ -58,7 +60,12 @@ struct cpci_hp_controller {
static inline const char *slot_name(struct slot *slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return hotplug_slot_name(&slot->hotplug_slot);
+}
+
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
}
int cpci_hp_register_controller(struct cpci_hp_controller *controller);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 52a339baf06c..603eadf3d965 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -57,7 +57,7 @@ static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
-static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
+static const struct hotplug_slot_ops cpci_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
@@ -68,29 +68,9 @@ static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
};
static int
-update_latch_status(struct hotplug_slot *hotplug_slot, u8 value)
-{
- struct hotplug_slot_info info;
-
- memcpy(&info, hotplug_slot->info, sizeof(struct hotplug_slot_info));
- info.latch_status = value;
- return pci_hp_change_slot_info(hotplug_slot, &info);
-}
-
-static int
-update_adapter_status(struct hotplug_slot *hotplug_slot, u8 value)
-{
- struct hotplug_slot_info info;
-
- memcpy(&info, hotplug_slot->info, sizeof(struct hotplug_slot_info));
- info.adapter_status = value;
- return pci_hp_change_slot_info(hotplug_slot, &info);
-}
-
-static int
enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int retval = 0;
dbg("%s - physical_slot = %s", __func__, slot_name(slot));
@@ -103,7 +83,7 @@ enable_slot(struct hotplug_slot *hotplug_slot)
static int
disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int retval = 0;
dbg("%s - physical_slot = %s", __func__, slot_name(slot));
@@ -135,8 +115,7 @@ disable_slot(struct hotplug_slot *hotplug_slot)
goto disable_error;
}
- if (update_adapter_status(slot->hotplug_slot, 0))
- warn("failure to update adapter file");
+ slot->adapter_status = 0;
if (slot->extracting) {
slot->extracting = 0;
@@ -160,7 +139,7 @@ cpci_get_power_status(struct slot *slot)
static int
get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
*value = cpci_get_power_status(slot);
return 0;
@@ -169,7 +148,7 @@ get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int
get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
*value = cpci_get_attention_status(slot);
return 0;
@@ -178,27 +157,29 @@ get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int
set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
- return cpci_set_attention_status(hotplug_slot->private, status);
+ return cpci_set_attention_status(to_slot(hotplug_slot), status);
}
static int
get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- *value = hotplug_slot->info->adapter_status;
+ struct slot *slot = to_slot(hotplug_slot);
+
+ *value = slot->adapter_status;
return 0;
}
static int
get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- *value = hotplug_slot->info->latch_status;
+ struct slot *slot = to_slot(hotplug_slot);
+
+ *value = slot->latch_status;
return 0;
}
static void release_slot(struct slot *slot)
{
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
pci_dev_put(slot->dev);
kfree(slot);
}
@@ -209,8 +190,6 @@ int
cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
{
struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
int status;
int i;
@@ -229,43 +208,19 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
goto error;
}
- hotplug_slot =
- kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot) {
- status = -ENOMEM;
- goto error_slot;
- }
- slot->hotplug_slot = hotplug_slot;
-
- info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
- if (!info) {
- status = -ENOMEM;
- goto error_hpslot;
- }
- hotplug_slot->info = info;
-
slot->bus = bus;
slot->number = i;
slot->devfn = PCI_DEVFN(i, 0);
snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i);
- hotplug_slot->private = slot;
- hotplug_slot->ops = &cpci_hotplug_slot_ops;
-
- /*
- * Initialize the slot info structure with some known
- * good values.
- */
- dbg("initializing slot %s", name);
- info->power_status = cpci_get_power_status(slot);
- info->attention_status = cpci_get_attention_status(slot);
+ slot->hotplug_slot.ops = &cpci_hotplug_slot_ops;
dbg("registering slot %s", name);
- status = pci_hp_register(slot->hotplug_slot, bus, i, name);
+ status = pci_hp_register(&slot->hotplug_slot, bus, i, name);
if (status) {
err("pci_hp_register failed with error %d", status);
- goto error_info;
+ goto error_slot;
}
dbg("slot registered with name: %s", slot_name(slot));
@@ -276,10 +231,6 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
up_write(&list_rwsem);
}
return 0;
-error_info:
- kfree(info);
-error_hpslot:
- kfree(hotplug_slot);
error_slot:
kfree(slot);
error:
@@ -305,7 +256,7 @@ cpci_hp_unregister_bus(struct pci_bus *bus)
slots--;
dbg("deregistering slot %s", slot_name(slot));
- pci_hp_deregister(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
release_slot(slot);
}
}
@@ -359,10 +310,8 @@ init_slots(int clear_ins)
__func__, slot_name(slot));
dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0));
if (dev) {
- if (update_adapter_status(slot->hotplug_slot, 1))
- warn("failure to update adapter file");
- if (update_latch_status(slot->hotplug_slot, 1))
- warn("failure to update latch file");
+ slot->adapter_status = 1;
+ slot->latch_status = 1;
slot->dev = dev;
}
}
@@ -424,11 +373,8 @@ check_slots(void)
dbg("%s - slot %s HS_CSR (2) = %04x",
__func__, slot_name(slot), hs_csr);
- if (update_latch_status(slot->hotplug_slot, 1))
- warn("failure to update latch file");
-
- if (update_adapter_status(slot->hotplug_slot, 1))
- warn("failure to update adapter file");
+ slot->latch_status = 1;
+ slot->adapter_status = 1;
cpci_led_off(slot);
@@ -449,9 +395,7 @@ check_slots(void)
__func__, slot_name(slot), hs_csr);
if (!slot->extracting) {
- if (update_latch_status(slot->hotplug_slot, 0))
- warn("failure to update latch file");
-
+ slot->latch_status = 0;
slot->extracting = 1;
atomic_inc(&extracting);
}
@@ -465,8 +409,7 @@ check_slots(void)
*/
err("card in slot %s was improperly removed",
slot_name(slot));
- if (update_adapter_status(slot->hotplug_slot, 0))
- warn("failure to update adapter file");
+ slot->adapter_status = 0;
slot->extracting = 0;
atomic_dec(&extracting);
}
@@ -615,7 +558,7 @@ cleanup_slots(void)
goto cleanup_null;
list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) {
list_del(&slot->slot_list);
- pci_hp_deregister(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
release_slot(slot);
}
cleanup_null:
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 389b8fb50cd9..2c16adb7f4ec 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -194,8 +194,7 @@ int cpci_led_on(struct slot *slot)
slot->devfn,
hs_cap + 2,
hs_csr)) {
- err("Could not set LOO for slot %s",
- hotplug_slot_name(slot->hotplug_slot));
+ err("Could not set LOO for slot %s", slot_name(slot));
return -ENODEV;
}
}
@@ -223,8 +222,7 @@ int cpci_led_off(struct slot *slot)
slot->devfn,
hs_cap + 2,
hs_csr)) {
- err("Could not clear LOO for slot %s",
- hotplug_slot_name(slot->hotplug_slot));
+ err("Could not clear LOO for slot %s", slot_name(slot));
return -ENODEV;
}
}
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index db78b394a075..77e4e0142fbc 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -260,7 +260,7 @@ struct slot {
u8 hp_slot;
struct controller *ctrl;
void __iomem *p_sm_slot;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
};
struct pci_resource {
@@ -445,7 +445,12 @@ extern u8 cpqhp_disk_irq;
static inline const char *slot_name(struct slot *slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return hotplug_slot_name(&slot->hotplug_slot);
+}
+
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
}
/*
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 5a06636e910a..16bbb183695a 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -121,7 +121,6 @@ static int init_SERR(struct controller *ctrl)
{
u32 tempdword;
u32 number_of_slots;
- u8 physical_slot;
if (!ctrl)
return 1;
@@ -131,7 +130,6 @@ static int init_SERR(struct controller *ctrl)
number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
/* Loop through slots */
while (number_of_slots) {
- physical_slot = tempdword;
writeb(0, ctrl->hpc_reg + SLOT_SERR);
tempdword++;
number_of_slots--;
@@ -275,9 +273,7 @@ static int ctrl_slot_cleanup(struct controller *ctrl)
while (old_slot) {
next_slot = old_slot->next;
- pci_hp_deregister(old_slot->hotplug_slot);
- kfree(old_slot->hotplug_slot->info);
- kfree(old_slot->hotplug_slot);
+ pci_hp_deregister(&old_slot->hotplug_slot);
kfree(old_slot);
old_slot = next_slot;
}
@@ -419,7 +415,7 @@ cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func,
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
struct pci_func *slot_func;
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
u8 bus;
u8 devfn;
@@ -446,7 +442,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
static int process_SI(struct hotplug_slot *hotplug_slot)
{
struct pci_func *slot_func;
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
u8 bus;
u8 devfn;
@@ -478,7 +474,7 @@ static int process_SI(struct hotplug_slot *hotplug_slot)
static int process_SS(struct hotplug_slot *hotplug_slot)
{
struct pci_func *slot_func;
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
u8 bus;
u8 devfn;
@@ -505,7 +501,7 @@ static int process_SS(struct hotplug_slot *hotplug_slot)
static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -516,7 +512,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -527,7 +523,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -538,7 +534,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -550,7 +546,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -560,7 +556,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
+static const struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
.enable_slot = process_SI,
.disable_slot = process_SS,
@@ -578,8 +574,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
void __iomem *smbios_table)
{
struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *hotplug_slot_info;
struct pci_bus *bus = ctrl->pci_bus;
u8 number_of_slots;
u8 slot_device;
@@ -605,22 +599,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
goto error;
}
- slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
- GFP_KERNEL);
- if (!slot->hotplug_slot) {
- result = -ENOMEM;
- goto error_slot;
- }
- hotplug_slot = slot->hotplug_slot;
-
- hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
- GFP_KERNEL);
- if (!hotplug_slot->info) {
- result = -ENOMEM;
- goto error_hpslot;
- }
- hotplug_slot_info = hotplug_slot->info;
-
slot->ctrl = ctrl;
slot->bus = ctrl->bus;
slot->device = slot_device;
@@ -669,29 +647,20 @@ static int ctrl_slot_setup(struct controller *ctrl,
((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
/* register this slot with the hotplug pci core */
- hotplug_slot->private = slot;
snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
- hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
-
- hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
- hotplug_slot_info->attention_status =
- cpq_get_attention_status(ctrl, slot);
- hotplug_slot_info->latch_status =
- cpq_get_latch_status(ctrl, slot);
- hotplug_slot_info->adapter_status =
- get_presence_status(ctrl, slot);
+ slot->hotplug_slot.ops = &cpqphp_hotplug_slot_ops;
dbg("registering bus %d, dev %d, number %d, ctrl->slot_device_offset %d, slot %d\n",
slot->bus, slot->device,
slot->number, ctrl->slot_device_offset,
slot_number);
- result = pci_hp_register(hotplug_slot,
+ result = pci_hp_register(&slot->hotplug_slot,
ctrl->pci_dev->bus,
slot->device,
name);
if (result) {
err("pci_hp_register failed with error %d\n", result);
- goto error_info;
+ goto error_slot;
}
slot->next = ctrl->slot;
@@ -703,10 +672,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
}
return 0;
-error_info:
- kfree(hotplug_slot_info);
-error_hpslot:
- kfree(hotplug_slot);
error_slot:
kfree(slot);
error:
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 616df442520b..b7f4e1f099d9 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1130,9 +1130,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
for (slot = ctrl->slot; slot; slot = slot->next) {
if (slot->device == (hp_slot + ctrl->slot_device_offset))
continue;
- if (!slot->hotplug_slot || !slot->hotplug_slot->info)
- continue;
- if (slot->hotplug_slot->info->adapter_status == 0)
+ if (get_presence_status(ctrl, slot) == 0)
continue;
/* If another adapter is running on the same segment but at a
* lower speed/mode, we allow the new adapter to function at
@@ -1767,24 +1765,6 @@ void cpqhp_event_stop_thread(void)
}
-static int update_slot_info(struct controller *ctrl, struct slot *slot)
-{
- struct hotplug_slot_info *info;
- int result;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->power_status = get_slot_enabled(ctrl, slot);
- info->attention_status = cpq_get_attention_status(ctrl, slot);
- info->latch_status = cpq_get_latch_status(ctrl, slot);
- info->adapter_status = get_presence_status(ctrl, slot);
- result = pci_hp_change_slot_info(slot->hotplug_slot, info);
- kfree(info);
- return result;
-}
-
static void interrupt_event_handler(struct controller *ctrl)
{
int loop = 0;
@@ -1884,9 +1864,6 @@ static void interrupt_event_handler(struct controller *ctrl)
/***********POWER FAULT */
else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) {
dbg("power fault\n");
- } else {
- /* refresh notification */
- update_slot_info(ctrl, p_slot);
}
ctrl->event_queue[loop].event_type = 0;
@@ -2057,9 +2034,6 @@ int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
if (rc)
dbg("%s: rc = %d\n", __func__, rc);
- if (p_slot)
- update_slot_info(ctrl, p_slot);
-
return rc;
}
@@ -2125,9 +2099,6 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
rc = 1;
}
- if (p_slot)
- update_slot_info(ctrl, p_slot);
-
return rc;
}
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index fddb78606c74..b89f850c3a4e 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -698,7 +698,7 @@ struct slot {
u8 supported_bus_mode;
u8 flag; /* this is for disable slot and polling */
u8 ctlr_index;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct controller *ctrl;
struct pci_func *func;
u8 irq[4];
@@ -740,7 +740,12 @@ int ibmphp_do_disable_slot(struct slot *slot_cur);
int ibmphp_update_slot_info(struct slot *); /* This function is called from HPC, so we need it to not be be static */
int ibmphp_configure_card(struct pci_func *, u8);
int ibmphp_unconfigure_card(struct slot **, int);
-extern struct hotplug_slot_ops ibmphp_hotplug_slot_ops;
+extern const struct hotplug_slot_ops ibmphp_hotplug_slot_ops;
+
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
+}
#endif //__IBMPHP_H
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 4ea57e9019f1..08a58e911fc2 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -247,11 +247,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
break;
}
if (rc == 0) {
- pslot = hotplug_slot->private;
- if (pslot)
- rc = ibmphp_hpc_writeslot(pslot, cmd);
- else
- rc = -ENODEV;
+ pslot = to_slot(hotplug_slot);
+ rc = ibmphp_hpc_writeslot(pslot, cmd);
}
} else
rc = -ENODEV;
@@ -273,19 +270,15 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
ibmphp_lock_operations();
if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
- if (!rc)
- rc = ibmphp_hpc_readslot(pslot,
- READ_EXTSLOTSTATUS,
- &(myslot.ext_status));
- if (!rc)
- *value = SLOT_ATTN(myslot.status,
- myslot.ext_status);
- }
+ pslot = to_slot(hotplug_slot);
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &myslot.status);
+ if (!rc)
+ rc = ibmphp_hpc_readslot(pslot, READ_EXTSLOTSTATUS,
+ &myslot.ext_status);
+ if (!rc)
+ *value = SLOT_ATTN(myslot.status, myslot.ext_status);
}
ibmphp_unlock_operations();
@@ -303,14 +296,12 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
(ulong) hotplug_slot, (ulong) value);
ibmphp_lock_operations();
if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
- if (!rc)
- *value = SLOT_LATCH(myslot.status);
- }
+ pslot = to_slot(hotplug_slot);
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &myslot.status);
+ if (!rc)
+ *value = SLOT_LATCH(myslot.status);
}
ibmphp_unlock_operations();
@@ -330,14 +321,12 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
(ulong) hotplug_slot, (ulong) value);
ibmphp_lock_operations();
if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
- if (!rc)
- *value = SLOT_PWRGD(myslot.status);
- }
+ pslot = to_slot(hotplug_slot);
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &myslot.status);
+ if (!rc)
+ *value = SLOT_PWRGD(myslot.status);
}
ibmphp_unlock_operations();
@@ -357,18 +346,16 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 *value)
(ulong) hotplug_slot, (ulong) value);
ibmphp_lock_operations();
if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
- if (!rc) {
- present = SLOT_PRESENT(myslot.status);
- if (present == HPC_SLOT_EMPTY)
- *value = 0;
- else
- *value = 1;
- }
+ pslot = to_slot(hotplug_slot);
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &myslot.status);
+ if (!rc) {
+ present = SLOT_PRESENT(myslot.status);
+ if (present == HPC_SLOT_EMPTY)
+ *value = 0;
+ else
+ *value = 1;
}
}
@@ -382,7 +369,7 @@ static int get_max_bus_speed(struct slot *slot)
int rc = 0;
u8 mode = 0;
enum pci_bus_speed speed;
- struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus;
+ struct pci_bus *bus = slot->hotplug_slot.pci_slot->bus;
debug("%s - Entry slot[%p]\n", __func__, slot);
@@ -582,29 +569,10 @@ static int validate(struct slot *slot_cur, int opn)
****************************************************************************/
int ibmphp_update_slot_info(struct slot *slot_cur)
{
- struct hotplug_slot_info *info;
- struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus;
- int rc;
+ struct pci_bus *bus = slot_cur->hotplug_slot.pci_slot->bus;
u8 bus_speed;
u8 mode;
- info = kmalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->power_status = SLOT_PWRGD(slot_cur->status);
- info->attention_status = SLOT_ATTN(slot_cur->status,
- slot_cur->ext_status);
- info->latch_status = SLOT_LATCH(slot_cur->status);
- if (!SLOT_PRESENT(slot_cur->status)) {
- info->adapter_status = 0;
-/* info->max_adapter_speed_status = MAX_ADAPTER_NONE; */
- } else {
- info->adapter_status = 1;
-/* get_max_adapter_speed_1(slot_cur->hotplug_slot,
- &info->max_adapter_speed_status, 0); */
- }
-
bus_speed = slot_cur->bus_on->current_speed;
mode = slot_cur->bus_on->current_bus_mode;
@@ -630,9 +598,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
bus->cur_bus_speed = bus_speed;
// To do: bus_names
- rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info);
- kfree(info);
- return rc;
+ return 0;
}
@@ -673,7 +639,7 @@ static void free_slots(void)
list_for_each_entry_safe(slot_cur, next, &ibmphp_slot_head,
ibm_slot_list) {
- pci_hp_del(slot_cur->hotplug_slot);
+ pci_hp_del(&slot_cur->hotplug_slot);
slot_cur->ctrl = NULL;
slot_cur->bus_on = NULL;
@@ -683,9 +649,7 @@ static void free_slots(void)
*/
ibmphp_unconfigure_card(&slot_cur, -1);
- pci_hp_destroy(slot_cur->hotplug_slot);
- kfree(slot_cur->hotplug_slot->info);
- kfree(slot_cur->hotplug_slot);
+ pci_hp_destroy(&slot_cur->hotplug_slot);
kfree(slot_cur);
}
debug("%s -- exit\n", __func__);
@@ -1007,7 +971,7 @@ static int enable_slot(struct hotplug_slot *hs)
ibmphp_lock_operations();
debug("ENABLING SLOT........\n");
- slot_cur = hs->private;
+ slot_cur = to_slot(hs);
rc = validate(slot_cur, ENABLE);
if (rc) {
@@ -1095,8 +1059,7 @@ static int enable_slot(struct hotplug_slot *hs)
slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL);
if (!slot_cur->func) {
- /* We cannot do update_slot_info here, since no memory for
- * kmalloc n.e.ways, and update_slot_info allocates some */
+ /* do update_slot_info here? */
rc = -ENOMEM;
goto error_power;
}
@@ -1169,7 +1132,7 @@ error_power:
**************************************************************/
static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int rc;
ibmphp_lock_operations();
@@ -1259,7 +1222,7 @@ error:
goto exit;
}
-struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
+const struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = ibmphp_disable_slot,
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 6f8e90e3ec08..11a2661dc062 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -666,36 +666,8 @@ static int fillslotinfo(struct hotplug_slot *hotplug_slot)
struct slot *slot;
int rc = 0;
- if (!hotplug_slot || !hotplug_slot->private)
- return -EINVAL;
-
- slot = hotplug_slot->private;
+ slot = to_slot(hotplug_slot);
rc = ibmphp_hpc_readslot(slot, READ_ALLSTAT, NULL);
- if (rc)
- return rc;
-
- // power - enabled:1 not:0
- hotplug_slot->info->power_status = SLOT_POWER(slot->status);
-
- // attention - off:0, on:1, blinking:2
- hotplug_slot->info->attention_status = SLOT_ATTN(slot->status, slot->ext_status);
-
- // latch - open:1 closed:0
- hotplug_slot->info->latch_status = SLOT_LATCH(slot->status);
-
- // pci board - present:1 not:0
- if (SLOT_PRESENT(slot->status))
- hotplug_slot->info->adapter_status = 1;
- else
- hotplug_slot->info->adapter_status = 0;
-/*
- if (slot->bus_on->supported_bus_mode
- && (slot->bus_on->supported_speed == BUS_SPEED_66))
- hotplug_slot->info->max_bus_speed_status = BUS_SPEED_66PCIX;
- else
- hotplug_slot->info->max_bus_speed_status = slot->bus_on->supported_speed;
-*/
-
return rc;
}
@@ -712,7 +684,6 @@ static int __init ebda_rsrc_controller(void)
u8 ctlr_id, temp, bus_index;
u16 ctlr, slot, bus;
u16 slot_num, bus_num, index;
- struct hotplug_slot *hp_slot_ptr;
struct controller *hpc_ptr;
struct ebda_hpc_bus *bus_ptr;
struct ebda_hpc_slot *slot_ptr;
@@ -771,7 +742,7 @@ static int __init ebda_rsrc_controller(void)
bus_info_ptr1 = kzalloc(sizeof(struct bus_info), GFP_KERNEL);
if (!bus_info_ptr1) {
rc = -ENOMEM;
- goto error_no_hp_slot;
+ goto error_no_slot;
}
bus_info_ptr1->slot_min = slot_ptr->slot_num;
bus_info_ptr1->slot_max = slot_ptr->slot_num;
@@ -842,7 +813,7 @@ static int __init ebda_rsrc_controller(void)
(hpc_ptr->u.isa_ctlr.io_end - hpc_ptr->u.isa_ctlr.io_start + 1),
"ibmphp")) {
rc = -ENODEV;
- goto error_no_hp_slot;
+ goto error_no_slot;
}
hpc_ptr->irq = readb(io_mem + addr + 4);
addr += 5;
@@ -857,7 +828,7 @@ static int __init ebda_rsrc_controller(void)
break;
default:
rc = -ENODEV;
- goto error_no_hp_slot;
+ goto error_no_slot;
}
//reorganize chassis' linked list
@@ -870,19 +841,6 @@ static int __init ebda_rsrc_controller(void)
// register slots with hpc core as well as create linked list of ibm slot
for (index = 0; index < hpc_ptr->slot_count; index++) {
-
- hp_slot_ptr = kzalloc(sizeof(*hp_slot_ptr), GFP_KERNEL);
- if (!hp_slot_ptr) {
- rc = -ENOMEM;
- goto error_no_hp_slot;
- }
-
- hp_slot_ptr->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
- if (!hp_slot_ptr->info) {
- rc = -ENOMEM;
- goto error_no_hp_info;
- }
-
tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL);
if (!tmp_slot) {
rc = -ENOMEM;
@@ -909,7 +867,6 @@ static int __init ebda_rsrc_controller(void)
bus_info_ptr1 = ibmphp_find_same_bus_num(hpc_ptr->slots[index].slot_bus_num);
if (!bus_info_ptr1) {
- kfree(tmp_slot);
rc = -ENODEV;
goto error;
}
@@ -919,22 +876,19 @@ static int __init ebda_rsrc_controller(void)
tmp_slot->ctlr_index = hpc_ptr->slots[index].ctl_index;
tmp_slot->number = hpc_ptr->slots[index].slot_num;
- tmp_slot->hotplug_slot = hp_slot_ptr;
-
- hp_slot_ptr->private = tmp_slot;
- rc = fillslotinfo(hp_slot_ptr);
+ rc = fillslotinfo(&tmp_slot->hotplug_slot);
if (rc)
goto error;
- rc = ibmphp_init_devno((struct slot **) &hp_slot_ptr->private);
+ rc = ibmphp_init_devno(&tmp_slot);
if (rc)
goto error;
- hp_slot_ptr->ops = &ibmphp_hotplug_slot_ops;
+ tmp_slot->hotplug_slot.ops = &ibmphp_hotplug_slot_ops;
// end of registering ibm slot with hotplug core
- list_add(&((struct slot *)(hp_slot_ptr->private))->ibm_slot_list, &ibmphp_slot_head);
+ list_add(&tmp_slot->ibm_slot_list, &ibmphp_slot_head);
}
print_bus_info();
@@ -944,7 +898,7 @@ static int __init ebda_rsrc_controller(void)
list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) {
snprintf(name, SLOT_NAME_SIZE, "%s", create_file_name(tmp_slot));
- pci_hp_register(tmp_slot->hotplug_slot,
+ pci_hp_register(&tmp_slot->hotplug_slot,
pci_find_bus(0, tmp_slot->bus), tmp_slot->device, name);
}
@@ -953,12 +907,8 @@ static int __init ebda_rsrc_controller(void)
return 0;
error:
- kfree(hp_slot_ptr->private);
+ kfree(tmp_slot);
error_no_slot:
- kfree(hp_slot_ptr->info);
-error_no_hp_info:
- kfree(hp_slot_ptr);
-error_no_hp_slot:
free_ebda_hpc(hpc_ptr);
error_no_hpc:
iounmap(io_mem);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 90fde5f106d8..5ac31f683b85 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -49,15 +49,13 @@ static DEFINE_MUTEX(pci_hp_mutex);
#define GET_STATUS(name, type) \
static int get_##name(struct hotplug_slot *slot, type *value) \
{ \
- struct hotplug_slot_ops *ops = slot->ops; \
+ const struct hotplug_slot_ops *ops = slot->ops; \
int retval = 0; \
- if (!try_module_get(ops->owner)) \
+ if (!try_module_get(slot->owner)) \
return -ENODEV; \
if (ops->get_##name) \
retval = ops->get_##name(slot, value); \
- else \
- *value = slot->info->name; \
- module_put(ops->owner); \
+ module_put(slot->owner); \
return retval; \
}
@@ -90,7 +88,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
power = (u8)(lpower & 0xff);
dbg("power = %d\n", power);
- if (!try_module_get(slot->ops->owner)) {
+ if (!try_module_get(slot->owner)) {
retval = -ENODEV;
goto exit;
}
@@ -109,7 +107,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
err("Illegal value specified for power\n");
retval = -EINVAL;
}
- module_put(slot->ops->owner);
+ module_put(slot->owner);
exit:
if (retval)
@@ -138,7 +136,8 @@ static ssize_t attention_read_file(struct pci_slot *pci_slot, char *buf)
static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
size_t count)
{
- struct hotplug_slot_ops *ops = pci_slot->hotplug->ops;
+ struct hotplug_slot *slot = pci_slot->hotplug;
+ const struct hotplug_slot_ops *ops = slot->ops;
unsigned long lattention;
u8 attention;
int retval = 0;
@@ -147,13 +146,13 @@ static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
attention = (u8)(lattention & 0xff);
dbg(" - attention = %d\n", attention);
- if (!try_module_get(ops->owner)) {
+ if (!try_module_get(slot->owner)) {
retval = -ENODEV;
goto exit;
}
if (ops->set_attention_status)
- retval = ops->set_attention_status(pci_slot->hotplug, attention);
- module_put(ops->owner);
+ retval = ops->set_attention_status(slot, attention);
+ module_put(slot->owner);
exit:
if (retval)
@@ -213,13 +212,13 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
test = (u32)(ltest & 0xffffffff);
dbg("test = %d\n", test);
- if (!try_module_get(slot->ops->owner)) {
+ if (!try_module_get(slot->owner)) {
retval = -ENODEV;
goto exit;
}
if (slot->ops->hardware_test)
retval = slot->ops->hardware_test(slot, test);
- module_put(slot->ops->owner);
+ module_put(slot->owner);
exit:
if (retval)
@@ -444,11 +443,11 @@ int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus,
if (slot == NULL)
return -ENODEV;
- if ((slot->info == NULL) || (slot->ops == NULL))
+ if (slot->ops == NULL)
return -EINVAL;
- slot->ops->owner = owner;
- slot->ops->mod_name = mod_name;
+ slot->owner = owner;
+ slot->mod_name = mod_name;
/*
* No problems if we call this interface from both ACPI_PCI_SLOT
@@ -559,28 +558,6 @@ void pci_hp_destroy(struct hotplug_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_hp_destroy);
-/**
- * pci_hp_change_slot_info - changes the slot's information structure in the core
- * @slot: pointer to the slot whose info has changed
- * @info: pointer to the info copy into the slot's info structure
- *
- * @slot must have been registered with the pci
- * hotplug subsystem previously with a call to pci_hp_register().
- *
- * Returns 0 if successful, anything else for an error.
- */
-int pci_hp_change_slot_info(struct hotplug_slot *slot,
- struct hotplug_slot_info *info)
-{
- if (!slot || !info)
- return -ENODEV;
-
- memcpy(slot->info, info, sizeof(struct hotplug_slot_info));
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
-
static int __init pci_hotplug_init(void)
{
int result;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 811cf83f956d..506e1d923a1f 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -19,7 +19,6 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/delay.h>
-#include <linux/sched/signal.h> /* signal_pending() */
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/workqueue.h>
@@ -60,71 +59,63 @@ do { \
#define SLOT_NAME_SIZE 10
/**
- * struct slot - PCIe hotplug slot
- * @state: current state machine position
- * @ctrl: pointer to the slot's controller structure
- * @hotplug_slot: pointer to the structure registered with the PCI hotplug core
- * @work: work item to turn the slot on or off after 5 seconds in response to
- * an Attention Button press
- * @lock: protects reads and writes of @state;
- * protects scheduling, execution and cancellation of @work
- */
-struct slot {
- u8 state;
- struct controller *ctrl;
- struct hotplug_slot *hotplug_slot;
- struct delayed_work work;
- struct mutex lock;
-};
-
-/**
* struct controller - PCIe hotplug controller
- * @ctrl_lock: serializes writes to the Slot Control register
* @pcie: pointer to the controller's PCIe port service device
- * @reset_lock: prevents access to the Data Link Layer Link Active bit in the
- * Link Status register and to the Presence Detect State bit in the Slot
- * Status register during a slot reset which may cause them to flap
- * @slot: pointer to the controller's slot structure
- * @queue: wait queue to wake up on reception of a Command Completed event,
- * used for synchronous writes to the Slot Control register
* @slot_cap: cached copy of the Slot Capabilities register
* @slot_ctrl: cached copy of the Slot Control register
- * @poll_thread: thread to poll for slot events if no IRQ is available,
- * enabled with pciehp_poll_mode module parameter
+ * @ctrl_lock: serializes writes to the Slot Control register
* @cmd_started: jiffies when the Slot Control register was last written;
* the next write is allowed 1 second later, absent a Command Completed
* interrupt (PCIe r4.0, sec 6.7.3.2)
* @cmd_busy: flag set on Slot Control register write, cleared by IRQ handler
* on reception of a Command Completed event
- * @link_active_reporting: cached copy of Data Link Layer Link Active Reporting
- * Capable bit in Link Capabilities register; if this bit is zero, the
- * Data Link Layer Link Active bit in the Link Status register will never
- * be set and the driver is thus confined to wait 1 second before assuming
- * the link to a hotplugged device is up and accessing it
+ * @queue: wait queue to wake up on reception of a Command Completed event,
+ * used for synchronous writes to the Slot Control register
+ * @pending_events: used by the IRQ handler to save events retrieved from the
+ * Slot Status register for later consumption by the IRQ thread
* @notification_enabled: whether the IRQ was requested successfully
* @power_fault_detected: whether a power fault was detected by the hardware
* that has not yet been cleared by the user
- * @pending_events: used by the IRQ handler to save events retrieved from the
- * Slot Status register for later consumption by the IRQ thread
+ * @poll_thread: thread to poll for slot events if no IRQ is available,
+ * enabled with pciehp_poll_mode module parameter
+ * @state: current state machine position
+ * @state_lock: protects reads and writes of @state;
+ * protects scheduling, execution and cancellation of @button_work
+ * @button_work: work item to turn the slot on or off after 5 seconds
+ * in response to an Attention Button press
+ * @hotplug_slot: structure registered with the PCI hotplug core
+ * @reset_lock: prevents access to the Data Link Layer Link Active bit in the
+ * Link Status register and to the Presence Detect State bit in the Slot
+ * Status register during a slot reset which may cause them to flap
* @request_result: result of last user request submitted to the IRQ thread
* @requester: wait queue to wake up on completion of user request,
* used for synchronous slot enable/disable request via sysfs
+ *
+ * PCIe hotplug has a 1:1 relationship between controller and slot, hence
+ * unlike other drivers, the two aren't represented by separate structures.
*/
struct controller {
- struct mutex ctrl_lock;
struct pcie_device *pcie;
- struct rw_semaphore reset_lock;
- struct slot *slot;
- wait_queue_head_t queue;
- u32 slot_cap;
- u16 slot_ctrl;
- struct task_struct *poll_thread;
- unsigned long cmd_started; /* jiffies */
+
+ u32 slot_cap; /* capabilities and quirks */
+
+ u16 slot_ctrl; /* control register access */
+ struct mutex ctrl_lock;
+ unsigned long cmd_started;
unsigned int cmd_busy:1;
- unsigned int link_active_reporting:1;
+ wait_queue_head_t queue;
+
+ atomic_t pending_events; /* event handling */
unsigned int notification_enabled:1;
unsigned int power_fault_detected;
- atomic_t pending_events;
+ struct task_struct *poll_thread;
+
+ u8 state; /* state machine */
+ struct mutex state_lock;
+ struct delayed_work button_work;
+
+ struct hotplug_slot hotplug_slot; /* hotplug core interface */
+ struct rw_semaphore reset_lock;
int request_result;
wait_queue_head_t requester;
};
@@ -174,42 +165,50 @@ struct controller {
#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
#define PSN(ctrl) (((ctrl)->slot_cap & PCI_EXP_SLTCAP_PSN) >> 19)
-int pciehp_sysfs_enable_slot(struct slot *slot);
-int pciehp_sysfs_disable_slot(struct slot *slot);
void pciehp_request(struct controller *ctrl, int action);
-void pciehp_handle_button_press(struct slot *slot);
-void pciehp_handle_disable_request(struct slot *slot);
-void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events);
-int pciehp_configure_device(struct slot *p_slot);
-void pciehp_unconfigure_device(struct slot *p_slot);
+void pciehp_handle_button_press(struct controller *ctrl);
+void pciehp_handle_disable_request(struct controller *ctrl);
+void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events);
+int pciehp_configure_device(struct controller *ctrl);
+void pciehp_unconfigure_device(struct controller *ctrl, bool presence);
void pciehp_queue_pushbutton_work(struct work_struct *work);
struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
void pcie_shutdown_notification(struct controller *ctrl);
void pcie_clear_hotplug_events(struct controller *ctrl);
-int pciehp_power_on_slot(struct slot *slot);
-void pciehp_power_off_slot(struct slot *slot);
-void pciehp_get_power_status(struct slot *slot, u8 *status);
-void pciehp_get_attention_status(struct slot *slot, u8 *status);
-
-void pciehp_set_attention_status(struct slot *slot, u8 status);
-void pciehp_get_latch_status(struct slot *slot, u8 *status);
-void pciehp_get_adapter_status(struct slot *slot, u8 *status);
-int pciehp_query_power_fault(struct slot *slot);
-void pciehp_green_led_on(struct slot *slot);
-void pciehp_green_led_off(struct slot *slot);
-void pciehp_green_led_blink(struct slot *slot);
+void pcie_enable_interrupt(struct controller *ctrl);
+void pcie_disable_interrupt(struct controller *ctrl);
+int pciehp_power_on_slot(struct controller *ctrl);
+void pciehp_power_off_slot(struct controller *ctrl);
+void pciehp_get_power_status(struct controller *ctrl, u8 *status);
+
+void pciehp_set_attention_status(struct controller *ctrl, u8 status);
+void pciehp_get_latch_status(struct controller *ctrl, u8 *status);
+int pciehp_query_power_fault(struct controller *ctrl);
+void pciehp_green_led_on(struct controller *ctrl);
+void pciehp_green_led_off(struct controller *ctrl);
+void pciehp_green_led_blink(struct controller *ctrl);
+bool pciehp_card_present(struct controller *ctrl);
+bool pciehp_card_present_or_link_active(struct controller *ctrl);
int pciehp_check_link_status(struct controller *ctrl);
bool pciehp_check_link_active(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
-int pciehp_reset_slot(struct slot *slot, int probe);
+int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
+int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot);
+int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe);
+int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status);
int pciehp_set_raw_indicator_status(struct hotplug_slot *h_slot, u8 status);
int pciehp_get_raw_indicator_status(struct hotplug_slot *h_slot, u8 *status);
-static inline const char *slot_name(struct slot *slot)
+static inline const char *slot_name(struct controller *ctrl)
+{
+ return hotplug_slot_name(&ctrl->hotplug_slot);
+}
+
+static inline struct controller *to_ctrl(struct hotplug_slot *hotplug_slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return container_of(hotplug_slot, struct controller, hotplug_slot);
}
#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index ec48c9433ae5..fc5366b50e95 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -23,8 +23,6 @@
#include <linux/types.h>
#include <linux/pci.h>
#include "pciehp.h"
-#include <linux/interrupt.h>
-#include <linux/time.h>
#include "../pci.h"
@@ -47,45 +45,30 @@ MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
#define PCIE_MODULE_NAME "pciehp"
static int set_attention_status(struct hotplug_slot *slot, u8 value);
-static int enable_slot(struct hotplug_slot *slot);
-static int disable_slot(struct hotplug_slot *slot);
static int get_power_status(struct hotplug_slot *slot, u8 *value);
-static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
-static int reset_slot(struct hotplug_slot *slot, int probe);
static int init_slot(struct controller *ctrl)
{
- struct slot *slot = ctrl->slot;
- struct hotplug_slot *hotplug = NULL;
- struct hotplug_slot_info *info = NULL;
- struct hotplug_slot_ops *ops = NULL;
+ struct hotplug_slot_ops *ops;
char name[SLOT_NAME_SIZE];
- int retval = -ENOMEM;
-
- hotplug = kzalloc(sizeof(*hotplug), GFP_KERNEL);
- if (!hotplug)
- goto out;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto out;
+ int retval;
/* Setup hotplug slot ops */
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
- goto out;
+ return -ENOMEM;
- ops->enable_slot = enable_slot;
- ops->disable_slot = disable_slot;
+ ops->enable_slot = pciehp_sysfs_enable_slot;
+ ops->disable_slot = pciehp_sysfs_disable_slot;
ops->get_power_status = get_power_status;
ops->get_adapter_status = get_adapter_status;
- ops->reset_slot = reset_slot;
+ ops->reset_slot = pciehp_reset_slot;
if (MRL_SENS(ctrl))
ops->get_latch_status = get_latch_status;
if (ATTN_LED(ctrl)) {
- ops->get_attention_status = get_attention_status;
+ ops->get_attention_status = pciehp_get_attention_status;
ops->set_attention_status = set_attention_status;
} else if (ctrl->pcie->port->hotplug_user_indicators) {
ops->get_attention_status = pciehp_get_raw_indicator_status;
@@ -93,33 +76,24 @@ static int init_slot(struct controller *ctrl)
}
/* register this slot with the hotplug pci core */
- hotplug->info = info;
- hotplug->private = slot;
- hotplug->ops = ops;
- slot->hotplug_slot = hotplug;
+ ctrl->hotplug_slot.ops = ops;
snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
- retval = pci_hp_initialize(hotplug,
+ retval = pci_hp_initialize(&ctrl->hotplug_slot,
ctrl->pcie->port->subordinate, 0, name);
- if (retval)
- ctrl_err(ctrl, "pci_hp_initialize failed: error %d\n", retval);
-out:
if (retval) {
+ ctrl_err(ctrl, "pci_hp_initialize failed: error %d\n", retval);
kfree(ops);
- kfree(info);
- kfree(hotplug);
}
return retval;
}
static void cleanup_slot(struct controller *ctrl)
{
- struct hotplug_slot *hotplug_slot = ctrl->slot->hotplug_slot;
+ struct hotplug_slot *hotplug_slot = &ctrl->hotplug_slot;
pci_hp_destroy(hotplug_slot);
kfree(hotplug_slot->ops);
- kfree(hotplug_slot->info);
- kfree(hotplug_slot);
}
/*
@@ -127,79 +101,48 @@ static void cleanup_slot(struct controller *ctrl)
*/
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = slot->ctrl->pcie->port;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
- pciehp_set_attention_status(slot, status);
+ pciehp_set_attention_status(ctrl, status);
pci_config_pm_runtime_put(pdev);
return 0;
}
-
-static int enable_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- return pciehp_sysfs_enable_slot(slot);
-}
-
-
-static int disable_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- return pciehp_sysfs_disable_slot(slot);
-}
-
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = slot->ctrl->pcie->port;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
- pciehp_get_power_status(slot, value);
+ pciehp_get_power_status(ctrl, value);
pci_config_pm_runtime_put(pdev);
return 0;
}
-static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
-
- pciehp_get_attention_status(slot, value);
- return 0;
-}
-
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = slot->ctrl->pcie->port;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
- pciehp_get_latch_status(slot, value);
+ pciehp_get_latch_status(ctrl, value);
pci_config_pm_runtime_put(pdev);
return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = slot->ctrl->pcie->port;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
- pciehp_get_adapter_status(slot, value);
+ *value = pciehp_card_present_or_link_active(ctrl);
pci_config_pm_runtime_put(pdev);
return 0;
}
-static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
-{
- struct slot *slot = hotplug_slot->private;
-
- return pciehp_reset_slot(slot, probe);
-}
-
/**
* pciehp_check_presence() - synthesize event if presence has changed
*
@@ -212,20 +155,19 @@ static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
*/
static void pciehp_check_presence(struct controller *ctrl)
{
- struct slot *slot = ctrl->slot;
- u8 occupied;
+ bool occupied;
down_read(&ctrl->reset_lock);
- mutex_lock(&slot->lock);
+ mutex_lock(&ctrl->state_lock);
- pciehp_get_adapter_status(slot, &occupied);
- if ((occupied && (slot->state == OFF_STATE ||
- slot->state == BLINKINGON_STATE)) ||
- (!occupied && (slot->state == ON_STATE ||
- slot->state == BLINKINGOFF_STATE)))
+ occupied = pciehp_card_present_or_link_active(ctrl);
+ if ((occupied && (ctrl->state == OFF_STATE ||
+ ctrl->state == BLINKINGON_STATE)) ||
+ (!occupied && (ctrl->state == ON_STATE ||
+ ctrl->state == BLINKINGOFF_STATE)))
pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
- mutex_unlock(&slot->lock);
+ mutex_unlock(&ctrl->state_lock);
up_read(&ctrl->reset_lock);
}
@@ -233,7 +175,6 @@ static int pciehp_probe(struct pcie_device *dev)
{
int rc;
struct controller *ctrl;
- struct slot *slot;
/* If this is not a "hotplug" service, we have no business here. */
if (dev->service != PCIE_PORT_SERVICE_HP)
@@ -271,8 +212,7 @@ static int pciehp_probe(struct pcie_device *dev)
}
/* Publish to user space */
- slot = ctrl->slot;
- rc = pci_hp_add(slot->hotplug_slot);
+ rc = pci_hp_add(&ctrl->hotplug_slot);
if (rc) {
ctrl_err(ctrl, "Publication to user space failed (%d)\n", rc);
goto err_out_shutdown_notification;
@@ -295,29 +235,43 @@ static void pciehp_remove(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
- pci_hp_del(ctrl->slot->hotplug_slot);
+ pci_hp_del(&ctrl->hotplug_slot);
pcie_shutdown_notification(ctrl);
cleanup_slot(ctrl);
pciehp_release_ctrl(ctrl);
}
#ifdef CONFIG_PM
+static bool pme_is_native(struct pcie_device *dev)
+{
+ const struct pci_host_bridge *host;
+
+ host = pci_find_host_bridge(dev->port->bus);
+ return pcie_ports_native || host->native_pme;
+}
+
static int pciehp_suspend(struct pcie_device *dev)
{
+ /*
+ * Disable hotplug interrupt so that it does not trigger
+ * immediately when the downstream link goes down.
+ */
+ if (pme_is_native(dev))
+ pcie_disable_interrupt(get_service_data(dev));
+
return 0;
}
static int pciehp_resume_noirq(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
- struct slot *slot = ctrl->slot;
/* pci_restore_state() just wrote to the Slot Control register */
ctrl->cmd_started = jiffies;
ctrl->cmd_busy = true;
/* clear spurious events from rediscovery of inserted card */
- if (slot->state == ON_STATE || slot->state == BLINKINGOFF_STATE)
+ if (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE)
pcie_clear_hotplug_events(ctrl);
return 0;
@@ -327,10 +281,29 @@ static int pciehp_resume(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
+ if (pme_is_native(dev))
+ pcie_enable_interrupt(ctrl);
+
pciehp_check_presence(ctrl);
return 0;
}
+
+static int pciehp_runtime_resume(struct pcie_device *dev)
+{
+ struct controller *ctrl = get_service_data(dev);
+
+ /* pci_restore_state() just wrote to the Slot Control register */
+ ctrl->cmd_started = jiffies;
+ ctrl->cmd_busy = true;
+
+ /* clear spurious events from rediscovery of inserted card */
+ if ((ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE) &&
+ pme_is_native(dev))
+ pcie_clear_hotplug_events(ctrl);
+
+ return pciehp_resume(dev);
+}
#endif /* PM */
static struct pcie_port_service_driver hpdriver_portdrv = {
@@ -345,10 +318,12 @@ static struct pcie_port_service_driver hpdriver_portdrv = {
.suspend = pciehp_suspend,
.resume_noirq = pciehp_resume_noirq,
.resume = pciehp_resume,
+ .runtime_suspend = pciehp_suspend,
+ .runtime_resume = pciehp_runtime_resume,
#endif /* PM */
};
-static int __init pcied_init(void)
+int __init pcie_hp_init(void)
{
int retval = 0;
@@ -359,4 +334,3 @@ static int __init pcied_init(void)
return retval;
}
-device_initcall(pcied_init);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index da7c72372ffc..3f3df4c29f6e 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -13,24 +13,24 @@
*
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
-#include "../pci.h"
#include "pciehp.h"
/* The following routines constitute the bulk of the
hotplug controller logic
*/
-static void set_slot_off(struct controller *ctrl, struct slot *pslot)
+#define SAFE_REMOVAL true
+#define SURPRISE_REMOVAL false
+
+static void set_slot_off(struct controller *ctrl)
{
/* turn off slot, turn on Amber LED, turn off Green LED if supported*/
if (POWER_CTRL(ctrl)) {
- pciehp_power_off_slot(pslot);
+ pciehp_power_off_slot(ctrl);
/*
* After turning power off, we must wait for at least 1 second
@@ -40,31 +40,30 @@ static void set_slot_off(struct controller *ctrl, struct slot *pslot)
msleep(1000);
}
- pciehp_green_led_off(pslot);
- pciehp_set_attention_status(pslot, 1);
+ pciehp_green_led_off(ctrl);
+ pciehp_set_attention_status(ctrl, 1);
}
/**
* board_added - Called after a board has been added to the system.
- * @p_slot: &slot where board is added
+ * @ctrl: PCIe hotplug controller where board is added
*
* Turns power on for the board.
* Configures board.
*/
-static int board_added(struct slot *p_slot)
+static int board_added(struct controller *ctrl)
{
int retval = 0;
- struct controller *ctrl = p_slot->ctrl;
struct pci_bus *parent = ctrl->pcie->port->subordinate;
if (POWER_CTRL(ctrl)) {
/* Power on slot */
- retval = pciehp_power_on_slot(p_slot);
+ retval = pciehp_power_on_slot(ctrl);
if (retval)
return retval;
}
- pciehp_green_led_blink(p_slot);
+ pciehp_green_led_blink(ctrl);
/* Check link training status */
retval = pciehp_check_link_status(ctrl);
@@ -74,13 +73,13 @@ static int board_added(struct slot *p_slot)
}
/* Check for a power fault */
- if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
- ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(p_slot));
+ if (ctrl->power_fault_detected || pciehp_query_power_fault(ctrl)) {
+ ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
retval = -EIO;
goto err_exit;
}
- retval = pciehp_configure_device(p_slot);
+ retval = pciehp_configure_device(ctrl);
if (retval) {
if (retval != -EEXIST) {
ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
@@ -89,27 +88,26 @@ static int board_added(struct slot *p_slot)
}
}
- pciehp_green_led_on(p_slot);
- pciehp_set_attention_status(p_slot, 0);
+ pciehp_green_led_on(ctrl);
+ pciehp_set_attention_status(ctrl, 0);
return 0;
err_exit:
- set_slot_off(ctrl, p_slot);
+ set_slot_off(ctrl);
return retval;
}
/**
* remove_board - Turns off slot and LEDs
- * @p_slot: slot where board is being removed
+ * @ctrl: PCIe hotplug controller where board is being removed
+ * @safe_removal: whether the board is safely removed (versus surprise removed)
*/
-static void remove_board(struct slot *p_slot)
+static void remove_board(struct controller *ctrl, bool safe_removal)
{
- struct controller *ctrl = p_slot->ctrl;
-
- pciehp_unconfigure_device(p_slot);
+ pciehp_unconfigure_device(ctrl, safe_removal);
if (POWER_CTRL(ctrl)) {
- pciehp_power_off_slot(p_slot);
+ pciehp_power_off_slot(ctrl);
/*
* After turning power off, we must wait for at least 1 second
@@ -120,11 +118,11 @@ static void remove_board(struct slot *p_slot)
}
/* turn off Green LED */
- pciehp_green_led_off(p_slot);
+ pciehp_green_led_off(ctrl);
}
-static int pciehp_enable_slot(struct slot *slot);
-static int pciehp_disable_slot(struct slot *slot);
+static int pciehp_enable_slot(struct controller *ctrl);
+static int pciehp_disable_slot(struct controller *ctrl, bool safe_removal);
void pciehp_request(struct controller *ctrl, int action)
{
@@ -135,11 +133,11 @@ void pciehp_request(struct controller *ctrl, int action)
void pciehp_queue_pushbutton_work(struct work_struct *work)
{
- struct slot *p_slot = container_of(work, struct slot, work.work);
- struct controller *ctrl = p_slot->ctrl;
+ struct controller *ctrl = container_of(work, struct controller,
+ button_work.work);
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGOFF_STATE:
pciehp_request(ctrl, DISABLE_SLOT);
break;
@@ -149,30 +147,28 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
default:
break;
}
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
}
-void pciehp_handle_button_press(struct slot *p_slot)
+void pciehp_handle_button_press(struct controller *ctrl)
{
- struct controller *ctrl = p_slot->ctrl;
-
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case OFF_STATE:
case ON_STATE:
- if (p_slot->state == ON_STATE) {
- p_slot->state = BLINKINGOFF_STATE;
+ if (ctrl->state == ON_STATE) {
+ ctrl->state = BLINKINGOFF_STATE;
ctrl_info(ctrl, "Slot(%s): Powering off due to button press\n",
- slot_name(p_slot));
+ slot_name(ctrl));
} else {
- p_slot->state = BLINKINGON_STATE;
+ ctrl->state = BLINKINGON_STATE;
ctrl_info(ctrl, "Slot(%s) Powering on due to button press\n",
- slot_name(p_slot));
+ slot_name(ctrl));
}
/* blink green LED and turn off amber */
- pciehp_green_led_blink(p_slot);
- pciehp_set_attention_status(p_slot, 0);
- schedule_delayed_work(&p_slot->work, 5 * HZ);
+ pciehp_green_led_blink(ctrl);
+ pciehp_set_attention_status(ctrl, 0);
+ schedule_delayed_work(&ctrl->button_work, 5 * HZ);
break;
case BLINKINGOFF_STATE:
case BLINKINGON_STATE:
@@ -181,197 +177,184 @@ void pciehp_handle_button_press(struct slot *p_slot)
* press the attention again before the 5 sec. limit
* expires to cancel hot-add or hot-remove
*/
- ctrl_info(ctrl, "Slot(%s): Button cancel\n", slot_name(p_slot));
- cancel_delayed_work(&p_slot->work);
- if (p_slot->state == BLINKINGOFF_STATE) {
- p_slot->state = ON_STATE;
- pciehp_green_led_on(p_slot);
+ ctrl_info(ctrl, "Slot(%s): Button cancel\n", slot_name(ctrl));
+ cancel_delayed_work(&ctrl->button_work);
+ if (ctrl->state == BLINKINGOFF_STATE) {
+ ctrl->state = ON_STATE;
+ pciehp_green_led_on(ctrl);
} else {
- p_slot->state = OFF_STATE;
- pciehp_green_led_off(p_slot);
+ ctrl->state = OFF_STATE;
+ pciehp_green_led_off(ctrl);
}
- pciehp_set_attention_status(p_slot, 0);
+ pciehp_set_attention_status(ctrl, 0);
ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
default:
ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
- slot_name(p_slot), p_slot->state);
+ slot_name(ctrl), ctrl->state);
break;
}
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
}
-void pciehp_handle_disable_request(struct slot *slot)
+void pciehp_handle_disable_request(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
- mutex_lock(&slot->lock);
- switch (slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGON_STATE:
case BLINKINGOFF_STATE:
- cancel_delayed_work(&slot->work);
+ cancel_delayed_work(&ctrl->button_work);
break;
}
- slot->state = POWEROFF_STATE;
- mutex_unlock(&slot->lock);
+ ctrl->state = POWEROFF_STATE;
+ mutex_unlock(&ctrl->state_lock);
- ctrl->request_result = pciehp_disable_slot(slot);
+ ctrl->request_result = pciehp_disable_slot(ctrl, SAFE_REMOVAL);
}
-void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events)
+void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
{
- struct controller *ctrl = slot->ctrl;
- bool link_active;
- u8 present;
+ bool present, link_active;
/*
* If the slot is on and presence or link has changed, turn it off.
* Even if it's occupied again, we cannot assume the card is the same.
*/
- mutex_lock(&slot->lock);
- switch (slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGOFF_STATE:
- cancel_delayed_work(&slot->work);
+ cancel_delayed_work(&ctrl->button_work);
/* fall through */
case ON_STATE:
- slot->state = POWEROFF_STATE;
- mutex_unlock(&slot->lock);
+ ctrl->state = POWEROFF_STATE;
+ mutex_unlock(&ctrl->state_lock);
if (events & PCI_EXP_SLTSTA_DLLSC)
ctrl_info(ctrl, "Slot(%s): Link Down\n",
- slot_name(slot));
+ slot_name(ctrl));
if (events & PCI_EXP_SLTSTA_PDC)
ctrl_info(ctrl, "Slot(%s): Card not present\n",
- slot_name(slot));
- pciehp_disable_slot(slot);
+ slot_name(ctrl));
+ pciehp_disable_slot(ctrl, SURPRISE_REMOVAL);
break;
default:
- mutex_unlock(&slot->lock);
+ mutex_unlock(&ctrl->state_lock);
break;
}
/* Turn the slot on if it's occupied or link is up */
- mutex_lock(&slot->lock);
- pciehp_get_adapter_status(slot, &present);
+ mutex_lock(&ctrl->state_lock);
+ present = pciehp_card_present(ctrl);
link_active = pciehp_check_link_active(ctrl);
if (!present && !link_active) {
- mutex_unlock(&slot->lock);
+ mutex_unlock(&ctrl->state_lock);
return;
}
- switch (slot->state) {
+ switch (ctrl->state) {
case BLINKINGON_STATE:
- cancel_delayed_work(&slot->work);
+ cancel_delayed_work(&ctrl->button_work);
/* fall through */
case OFF_STATE:
- slot->state = POWERON_STATE;
- mutex_unlock(&slot->lock);
+ ctrl->state = POWERON_STATE;
+ mutex_unlock(&ctrl->state_lock);
if (present)
ctrl_info(ctrl, "Slot(%s): Card present\n",
- slot_name(slot));
+ slot_name(ctrl));
if (link_active)
ctrl_info(ctrl, "Slot(%s): Link Up\n",
- slot_name(slot));
- ctrl->request_result = pciehp_enable_slot(slot);
+ slot_name(ctrl));
+ ctrl->request_result = pciehp_enable_slot(ctrl);
break;
default:
- mutex_unlock(&slot->lock);
+ mutex_unlock(&ctrl->state_lock);
break;
}
}
-static int __pciehp_enable_slot(struct slot *p_slot)
+static int __pciehp_enable_slot(struct controller *ctrl)
{
u8 getstatus = 0;
- struct controller *ctrl = p_slot->ctrl;
- pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus) {
- ctrl_info(ctrl, "Slot(%s): No adapter\n", slot_name(p_slot));
- return -ENODEV;
- }
- if (MRL_SENS(p_slot->ctrl)) {
- pciehp_get_latch_status(p_slot, &getstatus);
+ if (MRL_SENS(ctrl)) {
+ pciehp_get_latch_status(ctrl, &getstatus);
if (getstatus) {
ctrl_info(ctrl, "Slot(%s): Latch open\n",
- slot_name(p_slot));
+ slot_name(ctrl));
return -ENODEV;
}
}
- if (POWER_CTRL(p_slot->ctrl)) {
- pciehp_get_power_status(p_slot, &getstatus);
+ if (POWER_CTRL(ctrl)) {
+ pciehp_get_power_status(ctrl, &getstatus);
if (getstatus) {
ctrl_info(ctrl, "Slot(%s): Already enabled\n",
- slot_name(p_slot));
+ slot_name(ctrl));
return 0;
}
}
- return board_added(p_slot);
+ return board_added(ctrl);
}
-static int pciehp_enable_slot(struct slot *slot)
+static int pciehp_enable_slot(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
int ret;
pm_runtime_get_sync(&ctrl->pcie->port->dev);
- ret = __pciehp_enable_slot(slot);
+ ret = __pciehp_enable_slot(ctrl);
if (ret && ATTN_BUTTN(ctrl))
- pciehp_green_led_off(slot); /* may be blinking */
+ pciehp_green_led_off(ctrl); /* may be blinking */
pm_runtime_put(&ctrl->pcie->port->dev);
- mutex_lock(&slot->lock);
- slot->state = ret ? OFF_STATE : ON_STATE;
- mutex_unlock(&slot->lock);
+ mutex_lock(&ctrl->state_lock);
+ ctrl->state = ret ? OFF_STATE : ON_STATE;
+ mutex_unlock(&ctrl->state_lock);
return ret;
}
-static int __pciehp_disable_slot(struct slot *p_slot)
+static int __pciehp_disable_slot(struct controller *ctrl, bool safe_removal)
{
u8 getstatus = 0;
- struct controller *ctrl = p_slot->ctrl;
- if (POWER_CTRL(p_slot->ctrl)) {
- pciehp_get_power_status(p_slot, &getstatus);
+ if (POWER_CTRL(ctrl)) {
+ pciehp_get_power_status(ctrl, &getstatus);
if (!getstatus) {
ctrl_info(ctrl, "Slot(%s): Already disabled\n",
- slot_name(p_slot));
+ slot_name(ctrl));
return -EINVAL;
}
}
- remove_board(p_slot);
+ remove_board(ctrl, safe_removal);
return 0;
}
-static int pciehp_disable_slot(struct slot *slot)
+static int pciehp_disable_slot(struct controller *ctrl, bool safe_removal)
{
- struct controller *ctrl = slot->ctrl;
int ret;
pm_runtime_get_sync(&ctrl->pcie->port->dev);
- ret = __pciehp_disable_slot(slot);
+ ret = __pciehp_disable_slot(ctrl, safe_removal);
pm_runtime_put(&ctrl->pcie->port->dev);
- mutex_lock(&slot->lock);
- slot->state = OFF_STATE;
- mutex_unlock(&slot->lock);
+ mutex_lock(&ctrl->state_lock);
+ ctrl->state = OFF_STATE;
+ mutex_unlock(&ctrl->state_lock);
return ret;
}
-int pciehp_sysfs_enable_slot(struct slot *p_slot)
+int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct controller *ctrl = p_slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGON_STATE:
case OFF_STATE:
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
/*
* The IRQ thread becomes a no-op if the user pulls out the
* card before the thread wakes up, so initialize to -ENODEV.
@@ -383,53 +366,53 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
return ctrl->request_result;
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
case BLINKINGOFF_STATE:
case ON_STATE:
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already enabled\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
default:
ctrl_err(ctrl, "Slot(%s): Invalid state %#x\n",
- slot_name(p_slot), p_slot->state);
+ slot_name(ctrl), ctrl->state);
break;
}
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
return -ENODEV;
}
-int pciehp_sysfs_disable_slot(struct slot *p_slot)
+int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct controller *ctrl = p_slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGOFF_STATE:
case ON_STATE:
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
pciehp_request(ctrl, DISABLE_SLOT);
wait_event(ctrl->requester,
!atomic_read(&ctrl->pending_events));
return ctrl->request_result;
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
case BLINKINGON_STATE:
case OFF_STATE:
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already disabled\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
default:
ctrl_err(ctrl, "Slot(%s): Invalid state %#x\n",
- slot_name(p_slot), p_slot->state);
+ slot_name(ctrl), ctrl->state);
break;
}
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
return -ENODEV;
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index a938abdb41ce..7dd443aea5a5 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -13,15 +13,12 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/types.h>
-#include <linux/signal.h>
#include <linux/jiffies.h>
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
-#include <linux/time.h>
#include <linux/slab.h>
#include "../pci.h"
@@ -43,7 +40,7 @@ static inline int pciehp_request_irq(struct controller *ctrl)
if (pciehp_poll_mode) {
ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
"pciehp_poll-%s",
- slot_name(ctrl->slot));
+ slot_name(ctrl));
return PTR_ERR_OR_ZERO(ctrl->poll_thread);
}
@@ -217,13 +214,6 @@ bool pciehp_check_link_active(struct controller *ctrl)
return ret;
}
-static void pcie_wait_link_active(struct controller *ctrl)
-{
- struct pci_dev *pdev = ctrl_dev(ctrl);
-
- pcie_wait_for_link(pdev, true);
-}
-
static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
{
u32 l;
@@ -256,18 +246,9 @@ int pciehp_check_link_status(struct controller *ctrl)
bool found;
u16 lnk_status;
- /*
- * Data Link Layer Link Active Reporting must be capable for
- * hot-plug capable downstream port. But old controller might
- * not implement it. In this case, we wait for 1000 ms.
- */
- if (ctrl->link_active_reporting)
- pcie_wait_link_active(ctrl);
- else
- msleep(1000);
+ if (!pcie_wait_for_link(pdev, true))
+ return -1;
- /* wait 100ms before read pci conf, and try in 1s */
- msleep(100);
found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
PCI_DEVFN(0, 0));
@@ -318,8 +299,8 @@ static int pciehp_link_enable(struct controller *ctrl)
int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
u8 *status)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
pci_config_pm_runtime_get(pdev);
@@ -329,9 +310,9 @@ int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-void pciehp_get_attention_status(struct slot *slot, u8 *status)
+int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status)
{
- struct controller *ctrl = slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
@@ -355,11 +336,12 @@ void pciehp_get_attention_status(struct slot *slot, u8 *status)
*status = 0xFF;
break;
}
+
+ return 0;
}
-void pciehp_get_power_status(struct slot *slot, u8 *status)
+void pciehp_get_power_status(struct controller *ctrl, u8 *status)
{
- struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
@@ -380,27 +362,41 @@ void pciehp_get_power_status(struct slot *slot, u8 *status)
}
}
-void pciehp_get_latch_status(struct slot *slot, u8 *status)
+void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
{
- struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
}
-void pciehp_get_adapter_status(struct slot *slot, u8 *status)
+bool pciehp_card_present(struct controller *ctrl)
{
- struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- *status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
+ return slot_status & PCI_EXP_SLTSTA_PDS;
}
-int pciehp_query_power_fault(struct slot *slot)
+/**
+ * pciehp_card_present_or_link_active() - whether given slot is occupied
+ * @ctrl: PCIe hotplug controller
+ *
+ * Unlike pciehp_card_present(), which determines presence solely from the
+ * Presence Detect State bit, this helper also returns true if the Link Active
+ * bit is set. This is a concession to broken hotplug ports which hardwire
+ * Presence Detect State to zero, such as Wilocity's [1ae9:0200].
+ */
+bool pciehp_card_present_or_link_active(struct controller *ctrl)
+{
+ return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl);
+}
+
+int pciehp_query_power_fault(struct controller *ctrl)
{
- struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
@@ -410,8 +406,7 @@ int pciehp_query_power_fault(struct slot *slot)
int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
u8 status)
{
- struct slot *slot = hotplug_slot->private;
- struct controller *ctrl = slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
pci_config_pm_runtime_get(pdev);
@@ -421,9 +416,8 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-void pciehp_set_attention_status(struct slot *slot, u8 value)
+void pciehp_set_attention_status(struct controller *ctrl, u8 value)
{
- struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
if (!ATTN_LED(ctrl))
@@ -447,10 +441,8 @@ void pciehp_set_attention_status(struct slot *slot, u8 value)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
}
-void pciehp_green_led_on(struct slot *slot)
+void pciehp_green_led_on(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
if (!PWR_LED(ctrl))
return;
@@ -461,10 +453,8 @@ void pciehp_green_led_on(struct slot *slot)
PCI_EXP_SLTCTL_PWR_IND_ON);
}
-void pciehp_green_led_off(struct slot *slot)
+void pciehp_green_led_off(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
if (!PWR_LED(ctrl))
return;
@@ -475,10 +465,8 @@ void pciehp_green_led_off(struct slot *slot)
PCI_EXP_SLTCTL_PWR_IND_OFF);
}
-void pciehp_green_led_blink(struct slot *slot)
+void pciehp_green_led_blink(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
if (!PWR_LED(ctrl))
return;
@@ -489,9 +477,8 @@ void pciehp_green_led_blink(struct slot *slot)
PCI_EXP_SLTCTL_PWR_IND_BLINK);
}
-int pciehp_power_on_slot(struct slot *slot)
+int pciehp_power_on_slot(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
int retval;
@@ -515,10 +502,8 @@ int pciehp_power_on_slot(struct slot *slot)
return retval;
}
-void pciehp_power_off_slot(struct slot *slot)
+void pciehp_power_off_slot(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
@@ -533,9 +518,11 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
u16 status, events;
/*
- * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
+ * Interrupts only occur in D3hot or shallower and only if enabled
+ * in the Slot Control register (PCIe r4.0, sec 6.7.3.4).
*/
- if (pdev->current_state == PCI_D3cold)
+ if (pdev->current_state == PCI_D3cold ||
+ (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode))
return IRQ_NONE;
/*
@@ -616,7 +603,6 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
- struct slot *slot = ctrl->slot;
irqreturn_t ret;
u32 events;
@@ -642,16 +628,16 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
/* Check Attention Button Pressed */
if (events & PCI_EXP_SLTSTA_ABP) {
ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
- slot_name(slot));
- pciehp_handle_button_press(slot);
+ slot_name(ctrl));
+ pciehp_handle_button_press(ctrl);
}
/* Check Power Fault Detected */
if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
- ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
- pciehp_set_attention_status(slot, 1);
- pciehp_green_led_off(slot);
+ ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
+ pciehp_set_attention_status(ctrl, 1);
+ pciehp_green_led_off(ctrl);
}
/*
@@ -660,9 +646,9 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
*/
down_read(&ctrl->reset_lock);
if (events & DISABLE_SLOT)
- pciehp_handle_disable_request(slot);
+ pciehp_handle_disable_request(ctrl);
else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
- pciehp_handle_presence_or_link_change(slot, events);
+ pciehp_handle_presence_or_link_change(ctrl, events);
up_read(&ctrl->reset_lock);
pci_config_pm_runtime_put(pdev);
@@ -748,6 +734,16 @@ void pcie_clear_hotplug_events(struct controller *ctrl)
PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
}
+void pcie_enable_interrupt(struct controller *ctrl)
+{
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_HPIE, PCI_EXP_SLTCTL_HPIE);
+}
+
+void pcie_disable_interrupt(struct controller *ctrl)
+{
+ pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_HPIE);
+}
+
/*
* pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
* bus reset of the bridge, but at the same time we want to ensure that it is
@@ -756,9 +752,9 @@ void pcie_clear_hotplug_events(struct controller *ctrl)
* momentarily, if we see that they could interfere. Also, clear any spurious
* events after.
*/
-int pciehp_reset_slot(struct slot *slot, int probe)
+int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe)
{
- struct controller *ctrl = slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 stat_mask = 0, ctrl_mask = 0;
int rc;
@@ -808,34 +804,6 @@ void pcie_shutdown_notification(struct controller *ctrl)
}
}
-static int pcie_init_slot(struct controller *ctrl)
-{
- struct pci_bus *subordinate = ctrl_dev(ctrl)->subordinate;
- struct slot *slot;
-
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
- return -ENOMEM;
-
- down_read(&pci_bus_sem);
- slot->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
- up_read(&pci_bus_sem);
-
- slot->ctrl = ctrl;
- mutex_init(&slot->lock);
- INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
- ctrl->slot = slot;
- return 0;
-}
-
-static void pcie_cleanup_slot(struct controller *ctrl)
-{
- struct slot *slot = ctrl->slot;
-
- cancel_delayed_work_sync(&slot->work);
- kfree(slot);
-}
-
static inline void dbg_ctrl(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl->pcie->port;
@@ -857,12 +825,13 @@ struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
u32 slot_cap, link_cap;
- u8 occupied, poweron;
+ u8 poweron;
struct pci_dev *pdev = dev->port;
+ struct pci_bus *subordinate = pdev->subordinate;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
- goto abort;
+ return NULL;
ctrl->pcie = dev;
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
@@ -879,15 +848,19 @@ struct controller *pcie_init(struct pcie_device *dev)
ctrl->slot_cap = slot_cap;
mutex_init(&ctrl->ctrl_lock);
+ mutex_init(&ctrl->state_lock);
init_rwsem(&ctrl->reset_lock);
init_waitqueue_head(&ctrl->requester);
init_waitqueue_head(&ctrl->queue);
+ INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work);
dbg_ctrl(ctrl);
+ down_read(&pci_bus_sem);
+ ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
+ up_read(&pci_bus_sem);
+
/* Check if Data Link Layer Link Active Reporting is implemented */
pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
- if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
- ctrl->link_active_reporting = 1;
/* Clear all remaining event bits in Slot Status register. */
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
@@ -909,33 +882,24 @@ struct controller *pcie_init(struct pcie_device *dev)
FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
- if (pcie_init_slot(ctrl))
- goto abort_ctrl;
-
/*
* If empty slot's power status is on, turn power off. The IRQ isn't
* requested yet, so avoid triggering a notification with this command.
*/
if (POWER_CTRL(ctrl)) {
- pciehp_get_adapter_status(ctrl->slot, &occupied);
- pciehp_get_power_status(ctrl->slot, &poweron);
- if (!occupied && poweron) {
+ pciehp_get_power_status(ctrl, &poweron);
+ if (!pciehp_card_present_or_link_active(ctrl) && poweron) {
pcie_disable_notification(ctrl);
- pciehp_power_off_slot(ctrl->slot);
+ pciehp_power_off_slot(ctrl);
}
}
return ctrl;
-
-abort_ctrl:
- kfree(ctrl);
-abort:
- return NULL;
}
void pciehp_release_ctrl(struct controller *ctrl)
{
- pcie_cleanup_slot(ctrl);
+ cancel_delayed_work_sync(&ctrl->button_work);
kfree(ctrl);
}
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 5c58c22e0c08..b9c1396db6fe 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -13,20 +13,26 @@
*
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
-int pciehp_configure_device(struct slot *p_slot)
+/**
+ * pciehp_configure_device() - enumerate PCI devices below a hotplug bridge
+ * @ctrl: PCIe hotplug controller
+ *
+ * Enumerate PCI devices below a hotplug bridge and add them to the system.
+ * Return 0 on success, %-EEXIST if the devices are already enumerated or
+ * %-ENODEV if enumeration failed.
+ */
+int pciehp_configure_device(struct controller *ctrl)
{
struct pci_dev *dev;
- struct pci_dev *bridge = p_slot->ctrl->pcie->port;
+ struct pci_dev *bridge = ctrl->pcie->port;
struct pci_bus *parent = bridge->subordinate;
int num, ret = 0;
- struct controller *ctrl = p_slot->ctrl;
pci_lock_rescan_remove();
@@ -62,17 +68,28 @@ int pciehp_configure_device(struct slot *p_slot)
return ret;
}
-void pciehp_unconfigure_device(struct slot *p_slot)
+/**
+ * pciehp_unconfigure_device() - remove PCI devices below a hotplug bridge
+ * @ctrl: PCIe hotplug controller
+ * @presence: whether the card is still present in the slot;
+ * true for safe removal via sysfs or an Attention Button press,
+ * false for surprise removal
+ *
+ * Unbind PCI devices below a hotplug bridge from their drivers and remove
+ * them from the system. Safely removed devices are quiesced. Surprise
+ * removed devices are marked as such to prevent further accesses.
+ */
+void pciehp_unconfigure_device(struct controller *ctrl, bool presence)
{
- u8 presence = 0;
struct pci_dev *dev, *temp;
- struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
+ struct pci_bus *parent = ctrl->pcie->port->subordinate;
u16 command;
- struct controller *ctrl = p_slot->ctrl;
ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n",
__func__, pci_domain_nr(parent), parent->number);
- pciehp_get_adapter_status(p_slot, &presence);
+
+ if (!presence)
+ pci_walk_bus(parent, pci_dev_set_disconnected, NULL);
pci_lock_rescan_remove();
@@ -85,12 +102,6 @@ void pciehp_unconfigure_device(struct slot *p_slot)
list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
bus_list) {
pci_dev_get(dev);
- if (!presence) {
- pci_dev_set_disconnected(dev, NULL);
- if (pci_has_subordinate(dev))
- pci_walk_bus(dev->subordinate,
- pci_dev_set_disconnected, NULL);
- }
pci_stop_and_remove_bus_device(dev);
/*
* Ensure that no new Requests will be generated from
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 3276a5e4c430..ee54f5bacad1 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -275,14 +275,13 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
goto free_fdt1;
}
- fdt = kzalloc(fdt_totalsize(fdt1), GFP_KERNEL);
+ fdt = kmemdup(fdt1, fdt_totalsize(fdt1), GFP_KERNEL);
if (!fdt) {
ret = -ENOMEM;
goto free_fdt1;
}
/* Unflatten device tree blob */
- memcpy(fdt, fdt1, fdt_totalsize(fdt1));
dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL);
if (!dt) {
ret = -EINVAL;
@@ -328,10 +327,15 @@ out:
return ret;
}
+static inline struct pnv_php_slot *to_pnv_php_slot(struct hotplug_slot *slot)
+{
+ return container_of(slot, struct pnv_php_slot, slot);
+}
+
int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
uint8_t state)
{
- struct pnv_php_slot *php_slot = slot->private;
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
struct opal_msg msg;
int ret;
@@ -363,7 +367,7 @@ EXPORT_SYMBOL_GPL(pnv_php_set_slot_power_state);
static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
{
- struct pnv_php_slot *php_slot = slot->private;
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
uint8_t power_state = OPAL_PCI_SLOT_POWER_ON;
int ret;
@@ -378,7 +382,6 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
ret);
} else {
*state = power_state;
- slot->info->power_status = power_state;
}
return 0;
@@ -386,7 +389,7 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
{
- struct pnv_php_slot *php_slot = slot->private;
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
uint8_t presence = OPAL_PCI_SLOT_EMPTY;
int ret;
@@ -397,7 +400,6 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
ret = pnv_pci_get_presence_state(php_slot->id, &presence);
if (ret >= 0) {
*state = presence;
- slot->info->adapter_status = presence;
ret = 0;
} else {
pci_warn(php_slot->pdev, "Error %d getting presence\n", ret);
@@ -406,10 +408,20 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
return ret;
}
+static int pnv_php_get_attention_state(struct hotplug_slot *slot, u8 *state)
+{
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+
+ *state = php_slot->attention_state;
+ return 0;
+}
+
static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state)
{
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+
/* FIXME: Make it real once firmware supports it */
- slot->info->attention_status = state;
+ php_slot->attention_state = state;
return 0;
}
@@ -501,15 +513,14 @@ scan:
static int pnv_php_enable_slot(struct hotplug_slot *slot)
{
- struct pnv_php_slot *php_slot = container_of(slot,
- struct pnv_php_slot, slot);
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
return pnv_php_enable(php_slot, true);
}
static int pnv_php_disable_slot(struct hotplug_slot *slot)
{
- struct pnv_php_slot *php_slot = slot->private;
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
int ret;
if (php_slot->state != PNV_PHP_STATE_POPULATED)
@@ -530,9 +541,10 @@ static int pnv_php_disable_slot(struct hotplug_slot *slot)
return ret;
}
-static struct hotplug_slot_ops php_slot_ops = {
+static const struct hotplug_slot_ops php_slot_ops = {
.get_power_status = pnv_php_get_power_state,
.get_adapter_status = pnv_php_get_adapter_state,
+ .get_attention_status = pnv_php_get_attention_state,
.set_attention_status = pnv_php_set_attention_state,
.enable_slot = pnv_php_enable_slot,
.disable_slot = pnv_php_disable_slot,
@@ -594,8 +606,6 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
php_slot->id = id;
php_slot->power_state_check = false;
php_slot->slot.ops = &php_slot_ops;
- php_slot->slot.info = &php_slot->slot_info;
- php_slot->slot.private = php_slot;
INIT_LIST_HEAD(&php_slot->children);
INIT_LIST_HEAD(&php_slot->link);
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index c8311724bd76..bdc954d70869 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -63,16 +63,22 @@ struct slot {
u32 index;
u32 type;
u32 power_domain;
+ u8 attention_status;
char *name;
struct device_node *dn;
struct pci_bus *bus;
struct list_head *pci_devs;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
};
-extern struct hotplug_slot_ops rpaphp_hotplug_slot_ops;
+extern const struct hotplug_slot_ops rpaphp_hotplug_slot_ops;
extern struct list_head rpaphp_slot_head;
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
+}
+
/* function prototypes */
/* rpaphp_pci.c */
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 857c358b727b..bcd5d357ca23 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -52,7 +52,7 @@ module_param_named(debug, rpaphp_debug, bool, 0644);
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
{
int rc;
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
switch (value) {
case 0:
@@ -66,7 +66,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
rc = rtas_set_indicator(DR_INDICATOR, slot->index, value);
if (!rc)
- hotplug_slot->info->attention_status = value;
+ slot->attention_status = value;
return rc;
}
@@ -79,7 +79,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int retval, level;
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
retval = rtas_get_power_level(slot->power_domain, &level);
if (!retval)
@@ -94,14 +94,14 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
*/
static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
- *value = slot->hotplug_slot->info->attention_status;
+ struct slot *slot = to_slot(hotplug_slot);
+ *value = slot->attention_status;
return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int rc, state;
rc = rpaphp_get_sensor_state(slot, &state);
@@ -409,7 +409,7 @@ static void __exit cleanup_slots(void)
list_for_each_entry_safe(slot, next, &rpaphp_slot_head,
rpaphp_slot_list) {
list_del(&slot->rpaphp_slot_list);
- pci_hp_deregister(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
dealloc_slot_struct(slot);
}
return;
@@ -434,7 +434,7 @@ static void __exit rpaphp_exit(void)
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int state;
int retval;
@@ -464,7 +464,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
if (slot->state == NOT_CONFIGURED)
return -EINVAL;
@@ -477,7 +477,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
return 0;
}
-struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
+const struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 0aac33e15dab..beca61badeea 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -54,25 +54,21 @@ int rpaphp_get_sensor_state(struct slot *slot, int *state)
* rpaphp_enable_slot - record slot state, config pci device
* @slot: target &slot
*
- * Initialize values in the slot, and the hotplug_slot info
- * structures to indicate if there is a pci card plugged into
- * the slot. If the slot is not empty, run the pcibios routine
+ * Initialize values in the slot structure to indicate if there is a pci card
+ * plugged into the slot. If the slot is not empty, run the pcibios routine
* to get pcibios stuff correctly set up.
*/
int rpaphp_enable_slot(struct slot *slot)
{
int rc, level, state;
struct pci_bus *bus;
- struct hotplug_slot_info *info = slot->hotplug_slot->info;
- info->adapter_status = NOT_VALID;
slot->state = EMPTY;
/* Find out if the power is turned on for the slot */
rc = rtas_get_power_level(slot->power_domain, &level);
if (rc)
return rc;
- info->power_status = level;
/* Figure out if there is an adapter in the slot */
rc = rpaphp_get_sensor_state(slot, &state);
@@ -85,13 +81,11 @@ int rpaphp_enable_slot(struct slot *slot)
return -EINVAL;
}
- info->adapter_status = EMPTY;
slot->bus = bus;
slot->pci_devs = &bus->devices;
/* if there's an adapter in the slot, go add the pci devices */
if (state == PRESENT) {
- info->adapter_status = NOT_CONFIGURED;
slot->state = NOT_CONFIGURED;
/* non-empty slot has to have child */
@@ -105,7 +99,6 @@ int rpaphp_enable_slot(struct slot *slot)
pci_hp_add_devices(bus);
if (!list_empty(&bus->devices)) {
- info->adapter_status = CONFIGURED;
slot->state = CONFIGURED;
}
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index b916c8e4372d..5282aa3e33c5 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -21,9 +21,7 @@
/* free up the memory used by a slot */
void dealloc_slot_struct(struct slot *slot)
{
- kfree(slot->hotplug_slot->info);
kfree(slot->name);
- kfree(slot->hotplug_slot);
kfree(slot);
}
@@ -35,28 +33,16 @@ struct slot *alloc_slot_struct(struct device_node *dn,
slot = kzalloc(sizeof(struct slot), GFP_KERNEL);
if (!slot)
goto error_nomem;
- slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!slot->hotplug_slot)
- goto error_slot;
- slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
- GFP_KERNEL);
- if (!slot->hotplug_slot->info)
- goto error_hpslot;
slot->name = kstrdup(drc_name, GFP_KERNEL);
if (!slot->name)
- goto error_info;
+ goto error_slot;
slot->dn = dn;
slot->index = drc_index;
slot->power_domain = power_domain;
- slot->hotplug_slot->private = slot;
- slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops;
+ slot->hotplug_slot.ops = &rpaphp_hotplug_slot_ops;
return (slot);
-error_info:
- kfree(slot->hotplug_slot->info);
-error_hpslot:
- kfree(slot->hotplug_slot);
error_slot:
kfree(slot);
error_nomem:
@@ -77,7 +63,7 @@ static int is_registered(struct slot *slot)
int rpaphp_deregister_slot(struct slot *slot)
{
int retval = 0;
- struct hotplug_slot *php_slot = slot->hotplug_slot;
+ struct hotplug_slot *php_slot = &slot->hotplug_slot;
dbg("%s - Entry: deregistering slot=%s\n",
__func__, slot->name);
@@ -93,7 +79,7 @@ EXPORT_SYMBOL_GPL(rpaphp_deregister_slot);
int rpaphp_register_slot(struct slot *slot)
{
- struct hotplug_slot *php_slot = slot->hotplug_slot;
+ struct hotplug_slot *php_slot = &slot->hotplug_slot;
struct device_node *child;
u32 my_index;
int retval;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 93b5341d282c..30ee72268790 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -32,10 +32,15 @@ static int zpci_fn_configured(enum zpci_state state)
*/
struct slot {
struct list_head slot_list;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct zpci_dev *zdev;
};
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
+}
+
static inline int slot_configure(struct slot *slot)
{
int ret = sclp_pci_configure(slot->zdev->fid);
@@ -60,7 +65,7 @@ static inline int slot_deconfigure(struct slot *slot)
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int rc;
if (slot->zdev->state != ZPCI_FN_STATE_STANDBY)
@@ -88,7 +93,7 @@ out_deconfigure:
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct pci_dev *pdev;
int rc;
@@ -110,7 +115,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
switch (slot->zdev->state) {
case ZPCI_FN_STATE_STANDBY:
@@ -130,7 +135,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static struct hotplug_slot_ops s390_hotplug_slot_ops = {
+static const struct hotplug_slot_ops s390_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
@@ -139,8 +144,6 @@ static struct hotplug_slot_ops s390_hotplug_slot_ops = {
int zpci_init_slot(struct zpci_dev *zdev)
{
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
struct slot *slot;
int rc;
@@ -152,26 +155,11 @@ int zpci_init_slot(struct zpci_dev *zdev)
if (!slot)
goto error;
- hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot)
- goto error_hp;
- hotplug_slot->private = slot;
-
- slot->hotplug_slot = hotplug_slot;
slot->zdev = zdev;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto error_info;
- hotplug_slot->info = info;
-
- hotplug_slot->ops = &s390_hotplug_slot_ops;
-
- get_power_status(hotplug_slot, &info->power_status);
- get_adapter_status(hotplug_slot, &info->adapter_status);
+ slot->hotplug_slot.ops = &s390_hotplug_slot_ops;
snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
- rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
+ rc = pci_hp_register(&slot->hotplug_slot, zdev->bus,
ZPCI_DEVFN, name);
if (rc)
goto error_reg;
@@ -180,10 +168,6 @@ int zpci_init_slot(struct zpci_dev *zdev)
return 0;
error_reg:
- kfree(info);
-error_info:
- kfree(hotplug_slot);
-error_hp:
kfree(slot);
error:
return -ENOMEM;
@@ -198,9 +182,7 @@ void zpci_exit_slot(struct zpci_dev *zdev)
if (slot->zdev != zdev)
continue;
list_del(&slot->slot_list);
- pci_hp_deregister(slot->hotplug_slot);
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
kfree(slot);
}
}
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index babd23409f61..231f5bdd3d2d 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -56,7 +56,7 @@ struct slot {
int device_num;
struct pci_bus *pci_bus;
/* this struct for glue internal only */
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct list_head hp_list;
char physical_path[SN_SLOT_NAME_SIZE];
};
@@ -80,7 +80,7 @@ static int enable_slot(struct hotplug_slot *slot);
static int disable_slot(struct hotplug_slot *slot);
static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
-static struct hotplug_slot_ops sn_hotplug_slot_ops = {
+static const struct hotplug_slot_ops sn_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
@@ -88,10 +88,15 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = {
static DEFINE_MUTEX(sn_hotplug_mutex);
+static struct slot *to_slot(struct hotplug_slot *bss_hotplug_slot)
+{
+ return container_of(bss_hotplug_slot, struct slot, hotplug_slot);
+}
+
static ssize_t path_show(struct pci_slot *pci_slot, char *buf)
{
int retval = -ENOENT;
- struct slot *slot = pci_slot->hotplug->private;
+ struct slot *slot = to_slot(pci_slot->hotplug);
if (!slot)
return retval;
@@ -156,7 +161,7 @@ static int sn_pci_bus_valid(struct pci_bus *pci_bus)
return -EIO;
}
-static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
+static int sn_hp_slot_private_alloc(struct hotplug_slot **bss_hotplug_slot,
struct pci_bus *pci_bus, int device,
char *name)
{
@@ -168,7 +173,6 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
- bss_hotplug_slot->private = slot;
slot->device_num = device;
slot->pci_bus = pci_bus;
@@ -179,8 +183,8 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
sn_generate_path(pci_bus, slot->physical_path);
- slot->hotplug_slot = bss_hotplug_slot;
list_add(&slot->hp_list, &sn_hp_list);
+ *bss_hotplug_slot = &slot->hotplug_slot;
return 0;
}
@@ -192,10 +196,9 @@ static struct hotplug_slot *sn_hp_destroy(void)
struct hotplug_slot *bss_hotplug_slot = NULL;
list_for_each_entry(slot, &sn_hp_list, hp_list) {
- bss_hotplug_slot = slot->hotplug_slot;
+ bss_hotplug_slot = &slot->hotplug_slot;
pci_slot = bss_hotplug_slot->pci_slot;
- list_del(&((struct slot *)bss_hotplug_slot->private)->
- hp_list);
+ list_del(&slot->hp_list);
sysfs_remove_file(&pci_slot->kobj,
&sn_slot_path_attr.attr);
break;
@@ -227,7 +230,7 @@ static void sn_bus_free_data(struct pci_dev *dev)
static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
int device_num, char **ssdt)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pcibus_info *pcibus_info;
struct pcibr_slot_enable_resp resp;
int rc;
@@ -267,7 +270,7 @@ static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
int device_num, int action)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pcibus_info *pcibus_info;
struct pcibr_slot_disable_resp resp;
int rc;
@@ -323,7 +326,7 @@ static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
*/
static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pci_bus *new_bus = NULL;
struct pci_dev *dev;
int num_funcs;
@@ -469,7 +472,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pci_dev *dev, *temp;
int rc;
acpi_handle ssdt_hdl = NULL;
@@ -571,7 +574,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
u8 *value)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pcibus_info *pcibus_info;
u32 power;
@@ -585,9 +588,7 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
{
- kfree(bss_hotplug_slot->info);
- kfree(bss_hotplug_slot->private);
- kfree(bss_hotplug_slot);
+ kfree(to_slot(bss_hotplug_slot));
}
static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
@@ -607,22 +608,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
if (sn_pci_slot_valid(pci_bus, device) != 1)
continue;
- bss_hotplug_slot = kzalloc(sizeof(*bss_hotplug_slot),
- GFP_KERNEL);
- if (!bss_hotplug_slot) {
- rc = -ENOMEM;
- goto alloc_err;
- }
-
- bss_hotplug_slot->info =
- kzalloc(sizeof(struct hotplug_slot_info),
- GFP_KERNEL);
- if (!bss_hotplug_slot->info) {
- rc = -ENOMEM;
- goto alloc_err;
- }
-
- if (sn_hp_slot_private_alloc(bss_hotplug_slot,
+ if (sn_hp_slot_private_alloc(&bss_hotplug_slot,
pci_bus, device, name)) {
rc = -ENOMEM;
goto alloc_err;
@@ -637,7 +623,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
rc = sysfs_create_file(&pci_slot->kobj,
&sn_slot_path_attr.attr);
if (rc)
- goto register_err;
+ goto alloc_err;
}
pci_dbg(pci_bus->self, "Registered bus with hotplug\n");
return rc;
@@ -646,14 +632,11 @@ register_err:
pci_dbg(pci_bus->self, "bus failed to register with err = %d\n",
rc);
-alloc_err:
- if (rc == -ENOMEM)
- pci_dbg(pci_bus->self, "Memory allocation error\n");
-
/* destroy THIS element */
- if (bss_hotplug_slot)
- sn_release_slot(bss_hotplug_slot);
+ sn_hp_destroy();
+ sn_release_slot(bss_hotplug_slot);
+alloc_err:
/* destroy anything else on the list */
while ((bss_hotplug_slot = sn_hp_destroy())) {
pci_hp_deregister(bss_hotplug_slot);
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 516e4835019c..f7f13ee5d06e 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -67,11 +67,13 @@ struct slot {
u32 number;
u8 is_a_board;
u8 state;
+ u8 attention_save;
u8 presence_save;
+ u8 latch_save;
u8 pwr_save;
struct controller *ctrl;
const struct hpc_ops *hpc_ops;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct list_head slot_list;
struct delayed_work work; /* work for button event */
struct mutex lock;
@@ -169,7 +171,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev);
static inline const char *slot_name(struct slot *slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return hotplug_slot_name(&slot->hotplug_slot);
}
struct ctrl_reg {
@@ -207,7 +209,7 @@ enum ctrl_offsets {
static inline struct slot *get_slot(struct hotplug_slot *hotplug_slot)
{
- return hotplug_slot->private;
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
}
static inline struct slot *shpchp_find_slot(struct controller *ctrl, u8 device)
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 97cee23f3d51..81a918d47895 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -51,7 +51,7 @@ static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
-static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
+static const struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
@@ -65,7 +65,6 @@ static int init_slots(struct controller *ctrl)
{
struct slot *slot;
struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
int retval;
int i;
@@ -77,19 +76,7 @@ static int init_slots(struct controller *ctrl)
goto error;
}
- hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot) {
- retval = -ENOMEM;
- goto error_slot;
- }
- slot->hotplug_slot = hotplug_slot;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- retval = -ENOMEM;
- goto error_hpslot;
- }
- hotplug_slot->info = info;
+ hotplug_slot = &slot->hotplug_slot;
slot->hp_slot = i;
slot->ctrl = ctrl;
@@ -101,14 +88,13 @@ static int init_slots(struct controller *ctrl)
slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number);
if (!slot->wq) {
retval = -ENOMEM;
- goto error_info;
+ goto error_slot;
}
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work);
/* register this slot with the hotplug pci core */
- hotplug_slot->private = slot;
snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
hotplug_slot->ops = &shpchp_hotplug_slot_ops;
@@ -116,7 +102,7 @@ static int init_slots(struct controller *ctrl)
pci_domain_nr(ctrl->pci_dev->subordinate),
slot->bus, slot->device, slot->hp_slot, slot->number,
ctrl->slot_device_offset);
- retval = pci_hp_register(slot->hotplug_slot,
+ retval = pci_hp_register(hotplug_slot,
ctrl->pci_dev->subordinate, slot->device, name);
if (retval) {
ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
@@ -124,10 +110,10 @@ static int init_slots(struct controller *ctrl)
goto error_slotwq;
}
- get_power_status(hotplug_slot, &info->power_status);
- get_attention_status(hotplug_slot, &info->attention_status);
- get_latch_status(hotplug_slot, &info->latch_status);
- get_adapter_status(hotplug_slot, &info->adapter_status);
+ get_power_status(hotplug_slot, &slot->pwr_save);
+ get_attention_status(hotplug_slot, &slot->attention_save);
+ get_latch_status(hotplug_slot, &slot->latch_save);
+ get_adapter_status(hotplug_slot, &slot->presence_save);
list_add(&slot->slot_list, &ctrl->slot_list);
}
@@ -135,10 +121,6 @@ static int init_slots(struct controller *ctrl)
return 0;
error_slotwq:
destroy_workqueue(slot->wq);
-error_info:
- kfree(info);
-error_hpslot:
- kfree(hotplug_slot);
error_slot:
kfree(slot);
error:
@@ -153,9 +135,7 @@ void cleanup_slots(struct controller *ctrl)
list_del(&slot->slot_list);
cancel_delayed_work(&slot->work);
destroy_workqueue(slot->wq);
- pci_hp_deregister(slot->hotplug_slot);
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
kfree(slot);
}
}
@@ -170,7 +150,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- hotplug_slot->info->attention_status = status;
+ slot->attention_save = status;
slot->hpc_ops->set_attention_status(slot, status);
return 0;
@@ -206,7 +186,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
retval = slot->hpc_ops->get_power_status(slot, value);
if (retval < 0)
- *value = hotplug_slot->info->power_status;
+ *value = slot->pwr_save;
return 0;
}
@@ -221,7 +201,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
retval = slot->hpc_ops->get_attention_status(slot, value);
if (retval < 0)
- *value = hotplug_slot->info->attention_status;
+ *value = slot->attention_save;
return 0;
}
@@ -236,7 +216,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
retval = slot->hpc_ops->get_latch_status(slot, value);
if (retval < 0)
- *value = hotplug_slot->info->latch_status;
+ *value = slot->latch_save;
return 0;
}
@@ -251,7 +231,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
retval = slot->hpc_ops->get_adapter_status(slot, value);
if (retval < 0)
- *value = hotplug_slot->info->adapter_status;
+ *value = slot->presence_save;
return 0;
}
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 1267dcc5a531..078003dcde5b 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -446,23 +446,12 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
mutex_unlock(&p_slot->lock);
}
-static int update_slot_info (struct slot *slot)
+static void update_slot_info(struct slot *slot)
{
- struct hotplug_slot_info *info;
- int result;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- slot->hpc_ops->get_power_status(slot, &(info->power_status));
- slot->hpc_ops->get_attention_status(slot, &(info->attention_status));
- slot->hpc_ops->get_latch_status(slot, &(info->latch_status));
- slot->hpc_ops->get_adapter_status(slot, &(info->adapter_status));
-
- result = pci_hp_change_slot_info(slot->hotplug_slot, info);
- kfree (info);
- return result;
+ slot->hpc_ops->get_power_status(slot, &slot->pwr_save);
+ slot->hpc_ops->get_attention_status(slot, &slot->attention_save);
+ slot->hpc_ops->get_latch_status(slot, &slot->latch_save);
+ slot->hpc_ops->get_adapter_status(slot, &slot->presence_save);
}
/*
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index c5f3cd4ed766..9616eca3182f 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -13,7 +13,6 @@
#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <linux/pci-ats.h>
#include "pci.h"
#define VIRTFN_ID_LEN 16
@@ -133,6 +132,8 @@ static void pci_read_vf_config_common(struct pci_dev *virtfn)
&physfn->sriov->subsystem_vendor);
pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID,
&physfn->sriov->subsystem_device);
+
+ physfn->sriov->cfg_size = pci_cfg_space_size(virtfn);
}
int pci_iov_add_virtfn(struct pci_dev *dev, int id)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index f2ef896464b3..af24ed50a245 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -958,7 +958,6 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
}
}
}
- WARN_ON(!!dev->msix_enabled);
/* Check whether driver already requested for MSI irq */
if (dev->msi_enabled) {
@@ -1028,8 +1027,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (!pci_msi_supported(dev, minvec))
return -EINVAL;
- WARN_ON(!!dev->msi_enabled);
-
/* Check whether driver already requested MSI-X irqs */
if (dev->msix_enabled) {
pci_info(dev, "can't enable MSI (MSI-X already enabled)\n");
@@ -1039,6 +1036,9 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (maxvec < minvec)
return -ERANGE;
+ if (WARN_ON_ONCE(dev->msi_enabled))
+ return -EINVAL;
+
nvec = pci_msi_vec_count(dev);
if (nvec < 0)
return nvec;
@@ -1087,6 +1087,9 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
if (maxvec < minvec)
return -ERANGE;
+ if (WARN_ON_ONCE(dev->msix_enabled))
+ return -EINVAL;
+
for (;;) {
if (affd) {
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
new file mode 100644
index 000000000000..ae3c5b25dcc7
--- /dev/null
+++ b/drivers/pci/p2pdma.c
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Peer 2 Peer DMA support.
+ *
+ * Copyright (c) 2016-2018, Logan Gunthorpe
+ * Copyright (c) 2016-2017, Microsemi Corporation
+ * Copyright (c) 2017, Christoph Hellwig
+ * Copyright (c) 2018, Eideticom Inc.
+ */
+
+#define pr_fmt(fmt) "pci-p2pdma: " fmt
+#include <linux/ctype.h>
+#include <linux/pci-p2pdma.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/genalloc.h>
+#include <linux/memremap.h>
+#include <linux/percpu-refcount.h>
+#include <linux/random.h>
+#include <linux/seq_buf.h>
+
+struct pci_p2pdma {
+ struct percpu_ref devmap_ref;
+ struct completion devmap_ref_done;
+ struct gen_pool *pool;
+ bool p2pmem_published;
+};
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ size_t size = 0;
+
+ if (pdev->p2pdma->pool)
+ size = gen_pool_size(pdev->p2pdma->pool);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", size);
+}
+static DEVICE_ATTR_RO(size);
+
+static ssize_t available_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ size_t avail = 0;
+
+ if (pdev->p2pdma->pool)
+ avail = gen_pool_avail(pdev->p2pdma->pool);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", avail);
+}
+static DEVICE_ATTR_RO(available);
+
+static ssize_t published_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pdev->p2pdma->p2pmem_published);
+}
+static DEVICE_ATTR_RO(published);
+
+static struct attribute *p2pmem_attrs[] = {
+ &dev_attr_size.attr,
+ &dev_attr_available.attr,
+ &dev_attr_published.attr,
+ NULL,
+};
+
+static const struct attribute_group p2pmem_group = {
+ .attrs = p2pmem_attrs,
+ .name = "p2pmem",
+};
+
+static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
+{
+ struct pci_p2pdma *p2p =
+ container_of(ref, struct pci_p2pdma, devmap_ref);
+
+ complete_all(&p2p->devmap_ref_done);
+}
+
+static void pci_p2pdma_percpu_kill(void *data)
+{
+ struct percpu_ref *ref = data;
+
+ /*
+ * pci_p2pdma_add_resource() may be called multiple times
+ * by a driver and may register the percpu_kill devm action multiple
+ * times. We only want the first action to actually kill the
+ * percpu_ref.
+ */
+ if (percpu_ref_is_dying(ref))
+ return;
+
+ percpu_ref_kill(ref);
+}
+
+static void pci_p2pdma_release(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ if (!pdev->p2pdma)
+ return;
+
+ wait_for_completion(&pdev->p2pdma->devmap_ref_done);
+ percpu_ref_exit(&pdev->p2pdma->devmap_ref);
+
+ gen_pool_destroy(pdev->p2pdma->pool);
+ sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
+ pdev->p2pdma = NULL;
+}
+
+static int pci_p2pdma_setup(struct pci_dev *pdev)
+{
+ int error = -ENOMEM;
+ struct pci_p2pdma *p2p;
+
+ p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL);
+ if (!p2p)
+ return -ENOMEM;
+
+ p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
+ if (!p2p->pool)
+ goto out;
+
+ init_completion(&p2p->devmap_ref_done);
+ error = percpu_ref_init(&p2p->devmap_ref,
+ pci_p2pdma_percpu_release, 0, GFP_KERNEL);
+ if (error)
+ goto out_pool_destroy;
+
+ error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
+ if (error)
+ goto out_pool_destroy;
+
+ pdev->p2pdma = p2p;
+
+ error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
+ if (error)
+ goto out_pool_destroy;
+
+ return 0;
+
+out_pool_destroy:
+ pdev->p2pdma = NULL;
+ gen_pool_destroy(p2p->pool);
+out:
+ devm_kfree(&pdev->dev, p2p);
+ return error;
+}
+
+/**
+ * pci_p2pdma_add_resource - add memory for use as p2p memory
+ * @pdev: the device to add the memory to
+ * @bar: PCI BAR to add
+ * @size: size of the memory to add, may be zero to use the whole BAR
+ * @offset: offset into the PCI BAR
+ *
+ * The memory will be given ZONE_DEVICE struct pages so that it may
+ * be used with any DMA request.
+ */
+int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
+ u64 offset)
+{
+ struct dev_pagemap *pgmap;
+ void *addr;
+ int error;
+
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
+ return -EINVAL;
+
+ if (offset >= pci_resource_len(pdev, bar))
+ return -EINVAL;
+
+ if (!size)
+ size = pci_resource_len(pdev, bar) - offset;
+
+ if (size + offset > pci_resource_len(pdev, bar))
+ return -EINVAL;
+
+ if (!pdev->p2pdma) {
+ error = pci_p2pdma_setup(pdev);
+ if (error)
+ return error;
+ }
+
+ pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL);
+ if (!pgmap)
+ return -ENOMEM;
+
+ pgmap->res.start = pci_resource_start(pdev, bar) + offset;
+ pgmap->res.end = pgmap->res.start + size - 1;
+ pgmap->res.flags = pci_resource_flags(pdev, bar);
+ pgmap->ref = &pdev->p2pdma->devmap_ref;
+ pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
+ pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
+ pci_resource_start(pdev, bar);
+
+ addr = devm_memremap_pages(&pdev->dev, pgmap);
+ if (IS_ERR(addr)) {
+ error = PTR_ERR(addr);
+ goto pgmap_free;
+ }
+
+ error = gen_pool_add_virt(pdev->p2pdma->pool, (unsigned long)addr,
+ pci_bus_address(pdev, bar) + offset,
+ resource_size(&pgmap->res), dev_to_node(&pdev->dev));
+ if (error)
+ goto pgmap_free;
+
+ error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_percpu_kill,
+ &pdev->p2pdma->devmap_ref);
+ if (error)
+ goto pgmap_free;
+
+ pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
+ &pgmap->res);
+
+ return 0;
+
+pgmap_free:
+ devm_kfree(&pdev->dev, pgmap);
+ return error;
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
+
+/*
+ * Note this function returns the parent PCI device with a
+ * reference taken. It is the caller's responsibily to drop
+ * the reference.
+ */
+static struct pci_dev *find_parent_pci_dev(struct device *dev)
+{
+ struct device *parent;
+
+ dev = get_device(dev);
+
+ while (dev) {
+ if (dev_is_pci(dev))
+ return to_pci_dev(dev);
+
+ parent = get_device(dev->parent);
+ put_device(dev);
+ dev = parent;
+ }
+
+ return NULL;
+}
+
+/*
+ * Check if a PCI bridge has its ACS redirection bits set to redirect P2P
+ * TLPs upstream via ACS. Returns 1 if the packets will be redirected
+ * upstream, 0 otherwise.
+ */
+static int pci_bridge_has_acs_redir(struct pci_dev *pdev)
+{
+ int pos;
+ u16 ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return 0;
+
+ pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
+
+ if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC))
+ return 1;
+
+ return 0;
+}
+
+static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
+{
+ if (!buf)
+ return;
+
+ seq_buf_printf(buf, "%s;", pci_name(pdev));
+}
+
+/*
+ * Find the distance through the nearest common upstream bridge between
+ * two PCI devices.
+ *
+ * If the two devices are the same device then 0 will be returned.
+ *
+ * If there are two virtual functions of the same device behind the same
+ * bridge port then 2 will be returned (one step down to the PCIe switch,
+ * then one step back to the same device).
+ *
+ * In the case where two devices are connected to the same PCIe switch, the
+ * value 4 will be returned. This corresponds to the following PCI tree:
+ *
+ * -+ Root Port
+ * \+ Switch Upstream Port
+ * +-+ Switch Downstream Port
+ * + \- Device A
+ * \-+ Switch Downstream Port
+ * \- Device B
+ *
+ * The distance is 4 because we traverse from Device A through the downstream
+ * port of the switch, to the common upstream port, back up to the second
+ * downstream port and then to Device B.
+ *
+ * Any two devices that don't have a common upstream bridge will return -1.
+ * In this way devices on separate PCIe root ports will be rejected, which
+ * is what we want for peer-to-peer seeing each PCIe root port defines a
+ * separate hierarchy domain and there's no way to determine whether the root
+ * complex supports forwarding between them.
+ *
+ * In the case where two devices are connected to different PCIe switches,
+ * this function will still return a positive distance as long as both
+ * switches eventually have a common upstream bridge. Note this covers
+ * the case of using multiple PCIe switches to achieve a desired level of
+ * fan-out from a root port. The exact distance will be a function of the
+ * number of switches between Device A and Device B.
+ *
+ * If a bridge which has any ACS redirection bits set is in the path
+ * then this functions will return -2. This is so we reject any
+ * cases where the TLPs are forwarded up into the root complex.
+ * In this case, a list of all infringing bridge addresses will be
+ * populated in acs_list (assuming it's non-null) for printk purposes.
+ */
+static int upstream_bridge_distance(struct pci_dev *a,
+ struct pci_dev *b,
+ struct seq_buf *acs_list)
+{
+ int dist_a = 0;
+ int dist_b = 0;
+ struct pci_dev *bb = NULL;
+ int acs_cnt = 0;
+
+ /*
+ * Note, we don't need to take references to devices returned by
+ * pci_upstream_bridge() seeing we hold a reference to a child
+ * device which will already hold a reference to the upstream bridge.
+ */
+
+ while (a) {
+ dist_b = 0;
+
+ if (pci_bridge_has_acs_redir(a)) {
+ seq_buf_print_bus_devfn(acs_list, a);
+ acs_cnt++;
+ }
+
+ bb = b;
+
+ while (bb) {
+ if (a == bb)
+ goto check_b_path_acs;
+
+ bb = pci_upstream_bridge(bb);
+ dist_b++;
+ }
+
+ a = pci_upstream_bridge(a);
+ dist_a++;
+ }
+
+ return -1;
+
+check_b_path_acs:
+ bb = b;
+
+ while (bb) {
+ if (a == bb)
+ break;
+
+ if (pci_bridge_has_acs_redir(bb)) {
+ seq_buf_print_bus_devfn(acs_list, bb);
+ acs_cnt++;
+ }
+
+ bb = pci_upstream_bridge(bb);
+ }
+
+ if (acs_cnt)
+ return -2;
+
+ return dist_a + dist_b;
+}
+
+static int upstream_bridge_distance_warn(struct pci_dev *provider,
+ struct pci_dev *client)
+{
+ struct seq_buf acs_list;
+ int ret;
+
+ seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
+ if (!acs_list.buffer)
+ return -ENOMEM;
+
+ ret = upstream_bridge_distance(provider, client, &acs_list);
+ if (ret == -2) {
+ pci_warn(client, "cannot be used for peer-to-peer DMA as ACS redirect is set between the client and provider (%s)\n",
+ pci_name(provider));
+ /* Drop final semicolon */
+ acs_list.buffer[acs_list.len-1] = 0;
+ pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
+ acs_list.buffer);
+
+ } else if (ret < 0) {
+ pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge\n",
+ pci_name(provider));
+ }
+
+ kfree(acs_list.buffer);
+
+ return ret;
+}
+
+/**
+ * pci_p2pdma_distance_many - Determive the cumulative distance between
+ * a p2pdma provider and the clients in use.
+ * @provider: p2pdma provider to check against the client list
+ * @clients: array of devices to check (NULL-terminated)
+ * @num_clients: number of clients in the array
+ * @verbose: if true, print warnings for devices when we return -1
+ *
+ * Returns -1 if any of the clients are not compatible (behind the same
+ * root port as the provider), otherwise returns a positive number where
+ * a lower number is the preferrable choice. (If there's one client
+ * that's the same as the provider it will return 0, which is best choice).
+ *
+ * For now, "compatible" means the provider and the clients are all behind
+ * the same PCI root port. This cuts out cases that may work but is safest
+ * for the user. Future work can expand this to white-list root complexes that
+ * can safely forward between each ports.
+ */
+int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
+ int num_clients, bool verbose)
+{
+ bool not_supported = false;
+ struct pci_dev *pci_client;
+ int distance = 0;
+ int i, ret;
+
+ if (num_clients == 0)
+ return -1;
+
+ for (i = 0; i < num_clients; i++) {
+ pci_client = find_parent_pci_dev(clients[i]);
+ if (!pci_client) {
+ if (verbose)
+ dev_warn(clients[i],
+ "cannot be used for peer-to-peer DMA as it is not a PCI device\n");
+ return -1;
+ }
+
+ if (verbose)
+ ret = upstream_bridge_distance_warn(provider,
+ pci_client);
+ else
+ ret = upstream_bridge_distance(provider, pci_client,
+ NULL);
+
+ pci_dev_put(pci_client);
+
+ if (ret < 0)
+ not_supported = true;
+
+ if (not_supported && !verbose)
+ break;
+
+ distance += ret;
+ }
+
+ if (not_supported)
+ return -1;
+
+ return distance;
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
+
+/**
+ * pci_has_p2pmem - check if a given PCI device has published any p2pmem
+ * @pdev: PCI device to check
+ */
+bool pci_has_p2pmem(struct pci_dev *pdev)
+{
+ return pdev->p2pdma && pdev->p2pdma->p2pmem_published;
+}
+EXPORT_SYMBOL_GPL(pci_has_p2pmem);
+
+/**
+ * pci_p2pmem_find - find a peer-to-peer DMA memory device compatible with
+ * the specified list of clients and shortest distance (as determined
+ * by pci_p2pmem_dma())
+ * @clients: array of devices to check (NULL-terminated)
+ * @num_clients: number of client devices in the list
+ *
+ * If multiple devices are behind the same switch, the one "closest" to the
+ * client devices in use will be chosen first. (So if one of the providers are
+ * the same as one of the clients, that provider will be used ahead of any
+ * other providers that are unrelated). If multiple providers are an equal
+ * distance away, one will be chosen at random.
+ *
+ * Returns a pointer to the PCI device with a reference taken (use pci_dev_put
+ * to return the reference) or NULL if no compatible device is found. The
+ * found provider will also be assigned to the client list.
+ */
+struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
+{
+ struct pci_dev *pdev = NULL;
+ int distance;
+ int closest_distance = INT_MAX;
+ struct pci_dev **closest_pdevs;
+ int dev_cnt = 0;
+ const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs);
+ int i;
+
+ closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!closest_pdevs)
+ return NULL;
+
+ while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
+ if (!pci_has_p2pmem(pdev))
+ continue;
+
+ distance = pci_p2pdma_distance_many(pdev, clients,
+ num_clients, false);
+ if (distance < 0 || distance > closest_distance)
+ continue;
+
+ if (distance == closest_distance && dev_cnt >= max_devs)
+ continue;
+
+ if (distance < closest_distance) {
+ for (i = 0; i < dev_cnt; i++)
+ pci_dev_put(closest_pdevs[i]);
+
+ dev_cnt = 0;
+ closest_distance = distance;
+ }
+
+ closest_pdevs[dev_cnt++] = pci_dev_get(pdev);
+ }
+
+ if (dev_cnt)
+ pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]);
+
+ for (i = 0; i < dev_cnt; i++)
+ pci_dev_put(closest_pdevs[i]);
+
+ kfree(closest_pdevs);
+ return pdev;
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
+
+/**
+ * pci_alloc_p2p_mem - allocate peer-to-peer DMA memory
+ * @pdev: the device to allocate memory from
+ * @size: number of bytes to allocate
+ *
+ * Returns the allocated memory or NULL on error.
+ */
+void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
+{
+ void *ret;
+
+ if (unlikely(!pdev->p2pdma))
+ return NULL;
+
+ if (unlikely(!percpu_ref_tryget_live(&pdev->p2pdma->devmap_ref)))
+ return NULL;
+
+ ret = (void *)gen_pool_alloc(pdev->p2pdma->pool, size);
+
+ if (unlikely(!ret))
+ percpu_ref_put(&pdev->p2pdma->devmap_ref);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
+
+/**
+ * pci_free_p2pmem - free peer-to-peer DMA memory
+ * @pdev: the device the memory was allocated from
+ * @addr: address of the memory that was allocated
+ * @size: number of bytes that was allocated
+ */
+void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
+{
+ gen_pool_free(pdev->p2pdma->pool, (uintptr_t)addr, size);
+ percpu_ref_put(&pdev->p2pdma->devmap_ref);
+}
+EXPORT_SYMBOL_GPL(pci_free_p2pmem);
+
+/**
+ * pci_virt_to_bus - return the PCI bus address for a given virtual
+ * address obtained with pci_alloc_p2pmem()
+ * @pdev: the device the memory was allocated from
+ * @addr: address of the memory that was allocated
+ */
+pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
+{
+ if (!addr)
+ return 0;
+ if (!pdev->p2pdma)
+ return 0;
+
+ /*
+ * Note: when we added the memory to the pool we used the PCI
+ * bus address as the physical address. So gen_pool_virt_to_phys()
+ * actually returns the bus address despite the misleading name.
+ */
+ return gen_pool_virt_to_phys(pdev->p2pdma->pool, (unsigned long)addr);
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
+
+/**
+ * pci_p2pmem_alloc_sgl - allocate peer-to-peer DMA memory in a scatterlist
+ * @pdev: the device to allocate memory from
+ * @nents: the number of SG entries in the list
+ * @length: number of bytes to allocate
+ *
+ * Returns 0 on success
+ */
+struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
+ unsigned int *nents, u32 length)
+{
+ struct scatterlist *sg;
+ void *addr;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return NULL;
+
+ sg_init_table(sg, 1);
+
+ addr = pci_alloc_p2pmem(pdev, length);
+ if (!addr)
+ goto out_free_sg;
+
+ sg_set_buf(sg, addr, length);
+ *nents = 1;
+ return sg;
+
+out_free_sg:
+ kfree(sg);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl);
+
+/**
+ * pci_p2pmem_free_sgl - free a scatterlist allocated by pci_p2pmem_alloc_sgl()
+ * @pdev: the device to allocate memory from
+ * @sgl: the allocated scatterlist
+ */
+void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl)
+{
+ struct scatterlist *sg;
+ int count;
+
+ for_each_sg(sgl, sg, INT_MAX, count) {
+ if (!sg)
+ break;
+
+ pci_free_p2pmem(pdev, sg_virt(sg), sg->length);
+ }
+ kfree(sgl);
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
+
+/**
+ * pci_p2pmem_publish - publish the peer-to-peer DMA memory for use by
+ * other devices with pci_p2pmem_find()
+ * @pdev: the device with peer-to-peer DMA memory to publish
+ * @publish: set to true to publish the memory, false to unpublish it
+ *
+ * Published memory can be used by other PCI device drivers for
+ * peer-2-peer DMA operations. Non-published memory is reserved for
+ * exlusive use of the device driver that registers the peer-to-peer
+ * memory.
+ */
+void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
+{
+ if (pdev->p2pdma)
+ pdev->p2pdma->p2pmem_published = publish;
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
+
+/**
+ * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA
+ * @dev: device doing the DMA request
+ * @sg: scatter list to map
+ * @nents: elements in the scatterlist
+ * @dir: DMA direction
+ *
+ * Scatterlists mapped with this function should not be unmapped in any way.
+ *
+ * Returns the number of SG entries mapped or 0 on error.
+ */
+int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ struct dev_pagemap *pgmap;
+ struct scatterlist *s;
+ phys_addr_t paddr;
+ int i;
+
+ /*
+ * p2pdma mappings are not compatible with devices that use
+ * dma_virt_ops. If the upper layers do the right thing
+ * this should never happen because it will be prevented
+ * by the check in pci_p2pdma_add_client()
+ */
+ if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
+ dev->dma_ops == &dma_virt_ops))
+ return 0;
+
+ for_each_sg(sg, s, nents, i) {
+ pgmap = sg_page(s)->pgmap;
+ paddr = sg_phys(s);
+
+ s->dma_address = paddr - pgmap->pci_p2pdma_bus_offset;
+ sg_dma_len(s) = s->length;
+ }
+
+ return nents;
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg);
+
+/**
+ * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store
+ * to enable p2pdma
+ * @page: contents of the value to be stored
+ * @p2p_dev: returns the PCI device that was selected to be used
+ * (if one was specified in the stored value)
+ * @use_p2pdma: returns whether to enable p2pdma or not
+ *
+ * Parses an attribute value to decide whether to enable p2pdma.
+ * The value can select a PCI device (using it's full BDF device
+ * name) or a boolean (in any format strtobool() accepts). A false
+ * value disables p2pdma, a true value expects the caller
+ * to automatically find a compatible device and specifying a PCI device
+ * expects the caller to use the specific provider.
+ *
+ * pci_p2pdma_enable_show() should be used as the show operation for
+ * the attribute.
+ *
+ * Returns 0 on success
+ */
+int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
+ bool *use_p2pdma)
+{
+ struct device *dev;
+
+ dev = bus_find_device_by_name(&pci_bus_type, NULL, page);
+ if (dev) {
+ *use_p2pdma = true;
+ *p2p_dev = to_pci_dev(dev);
+
+ if (!pci_has_p2pmem(*p2p_dev)) {
+ pci_err(*p2p_dev,
+ "PCI device has no peer-to-peer memory: %s\n",
+ page);
+ pci_dev_put(*p2p_dev);
+ return -ENODEV;
+ }
+
+ return 0;
+ } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
+ /*
+ * If the user enters a PCI device that doesn't exist
+ * like "0000:01:00.1", we don't want strtobool to think
+ * it's a '0' when it's clearly not what the user wanted.
+ * So we require 0's and 1's to be exactly one character.
+ */
+ } else if (!strtobool(page, use_p2pdma)) {
+ return 0;
+ }
+
+ pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page);
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
+
+/**
+ * pci_p2pdma_enable_show - show a configfs/sysfs attribute indicating
+ * whether p2pdma is enabled
+ * @page: contents of the stored value
+ * @p2p_dev: the selected p2p device (NULL if no device is selected)
+ * @use_p2pdma: whether p2pdme has been enabled
+ *
+ * Attributes that use pci_p2pdma_enable_store() should use this function
+ * to show the value of the attribute.
+ *
+ * Returns 0 on success
+ */
+ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
+ bool use_p2pdma)
+{
+ if (!use_p2pdma)
+ return sprintf(page, "0\n");
+
+ if (!p2p_dev)
+ return sprintf(page, "1\n");
+
+ return sprintf(page, "%s\n", pci_name(p2p_dev));
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index c2ab57705043..2a4aa6468579 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -519,6 +519,46 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
return PCI_POWER_ERROR;
}
+static struct acpi_device *acpi_pci_find_companion(struct device *dev);
+
+static bool acpi_pci_bridge_d3(struct pci_dev *dev)
+{
+ const struct fwnode_handle *fwnode;
+ struct acpi_device *adev;
+ struct pci_dev *root;
+ u8 val;
+
+ if (!dev->is_hotplug_bridge)
+ return false;
+
+ /*
+ * Look for a special _DSD property for the root port and if it
+ * is set we know the hierarchy behind it supports D3 just fine.
+ */
+ root = pci_find_pcie_root_port(dev);
+ if (!root)
+ return false;
+
+ adev = ACPI_COMPANION(&root->dev);
+ if (root == dev) {
+ /*
+ * It is possible that the ACPI companion is not yet bound
+ * for the root port so look it up manually here.
+ */
+ if (!adev && !pci_dev_is_added(root))
+ adev = acpi_pci_find_companion(&root->dev);
+ }
+
+ if (!adev)
+ return false;
+
+ fwnode = acpi_fwnode_handle(adev);
+ if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val))
+ return false;
+
+ return val == 1;
+}
+
static bool acpi_pci_power_manageable(struct pci_dev *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
@@ -548,6 +588,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
error = -EBUSY;
break;
}
+ /* Fall through */
case PCI_D0:
case PCI_D1:
case PCI_D2:
@@ -635,6 +676,7 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
}
static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
+ .bridge_d3 = acpi_pci_bridge_d3,
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
.get_state = acpi_pci_get_power_state,
@@ -751,10 +793,15 @@ static void pci_acpi_setup(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct acpi_device *adev = ACPI_COMPANION(dev);
+ int node;
if (!adev)
return;
+ node = acpi_get_node(adev->handle);
+ if (node != NUMA_NO_NODE)
+ set_dev_node(dev, node);
+
pci_acpi_optimize_delay(pci_dev, adev->handle);
pci_acpi_add_pm_notifier(adev, pci_dev);
@@ -762,19 +809,33 @@ static void pci_acpi_setup(struct device *dev)
return;
device_set_wakeup_capable(dev, true);
+ /*
+ * For bridges that can do D3 we enable wake automatically (as
+ * we do for the power management itself in that case). The
+ * reason is that the bridge may have additional methods such as
+ * _DSW that need to be called.
+ */
+ if (pci_dev->bridge_d3)
+ device_wakeup_enable(dev);
+
acpi_pci_wakeup(pci_dev, false);
}
static void pci_acpi_cleanup(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct pci_dev *pci_dev = to_pci_dev(dev);
if (!adev)
return;
pci_acpi_remove_pm_notifier(adev);
- if (adev->wakeup.flags.valid)
+ if (adev->wakeup.flags.valid) {
+ if (pci_dev->bridge_d3)
+ device_wakeup_disable(dev);
+
device_set_wakeup_capable(dev, false);
+ }
}
static bool pci_acpi_bus_match(struct device *dev)
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
new file mode 100644
index 000000000000..129738362d90
--- /dev/null
+++ b/drivers/pci/pci-bridge-emul.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell
+ *
+ * Author: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
+ *
+ * This file helps PCI controller drivers implement a fake root port
+ * PCI bridge when the HW doesn't provide such a root port PCI
+ * bridge.
+ *
+ * It emulates a PCI bridge by providing a fake PCI configuration
+ * space (and optionally a PCIe capability configuration space) in
+ * memory. By default the read/write operations simply read and update
+ * this fake configuration space in memory. However, PCI controller
+ * drivers can provide through the 'struct pci_sw_bridge_ops'
+ * structure a set of operations to override or complement this
+ * default behavior.
+ */
+
+#include <linux/pci.h>
+#include "pci-bridge-emul.h"
+
+#define PCI_BRIDGE_CONF_END PCI_STD_HEADER_SIZEOF
+#define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
+#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
+
+/*
+ * Initialize a pci_bridge_emul structure to represent a fake PCI
+ * bridge configuration space. The caller needs to have initialized
+ * the PCI configuration space with whatever values make sense
+ * (typically at least vendor, device, revision), the ->ops pointer,
+ * and optionally ->data and ->has_pcie.
+ */
+void pci_bridge_emul_init(struct pci_bridge_emul *bridge)
+{
+ bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
+ bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
+ bridge->conf.cache_line_size = 0x10;
+ bridge->conf.status = PCI_STATUS_CAP_LIST;
+
+ if (bridge->has_pcie) {
+ bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
+ bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
+ /* Set PCIe v2, root port, slot support */
+ bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
+ PCI_EXP_FLAGS_SLOT;
+ }
+}
+
+struct pci_bridge_reg_behavior {
+ /* Read-only bits */
+ u32 ro;
+
+ /* Read-write bits */
+ u32 rw;
+
+ /* Write-1-to-clear bits */
+ u32 w1c;
+
+ /* Reserved bits (hardwired to 0) */
+ u32 rsvd;
+};
+
+const static struct pci_bridge_reg_behavior pci_regs_behavior[] = {
+ [PCI_VENDOR_ID / 4] = { .ro = ~0 },
+ [PCI_COMMAND / 4] = {
+ .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_PARITY |
+ PCI_COMMAND_SERR),
+ .ro = ((PCI_COMMAND_SPECIAL | PCI_COMMAND_INVALIDATE |
+ PCI_COMMAND_VGA_PALETTE | PCI_COMMAND_WAIT |
+ PCI_COMMAND_FAST_BACK) |
+ (PCI_STATUS_CAP_LIST | PCI_STATUS_66MHZ |
+ PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MASK) << 16),
+ .rsvd = GENMASK(15, 10) | ((BIT(6) | GENMASK(3, 0)) << 16),
+ .w1c = (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_DETECTED_PARITY) << 16,
+ },
+ [PCI_CLASS_REVISION / 4] = { .ro = ~0 },
+
+ /*
+ * Cache Line Size register: implement as read-only, we do not
+ * pretend implementing "Memory Write and Invalidate"
+ * transactions"
+ *
+ * Latency Timer Register: implemented as read-only, as "A
+ * bridge that is not capable of a burst transfer of more than
+ * two data phases on its primary interface is permitted to
+ * hardwire the Latency Timer to a value of 16 or less"
+ *
+ * Header Type: always read-only
+ *
+ * BIST register: implemented as read-only, as "A bridge that
+ * does not support BIST must implement this register as a
+ * read-only register that returns 0 when read"
+ */
+ [PCI_CACHE_LINE_SIZE / 4] = { .ro = ~0 },
+
+ /*
+ * Base Address registers not used must be implemented as
+ * read-only registers that return 0 when read.
+ */
+ [PCI_BASE_ADDRESS_0 / 4] = { .ro = ~0 },
+ [PCI_BASE_ADDRESS_1 / 4] = { .ro = ~0 },
+
+ [PCI_PRIMARY_BUS / 4] = {
+ /* Primary, secondary and subordinate bus are RW */
+ .rw = GENMASK(24, 0),
+ /* Secondary latency is read-only */
+ .ro = GENMASK(31, 24),
+ },
+
+ [PCI_IO_BASE / 4] = {
+ /* The high four bits of I/O base/limit are RW */
+ .rw = (GENMASK(15, 12) | GENMASK(7, 4)),
+
+ /* The low four bits of I/O base/limit are RO */
+ .ro = (((PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK |
+ PCI_STATUS_DEVSEL_MASK) << 16) |
+ GENMASK(11, 8) | GENMASK(3, 0)),
+
+ .w1c = (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_DETECTED_PARITY) << 16,
+
+ .rsvd = ((BIT(6) | GENMASK(4, 0)) << 16),
+ },
+
+ [PCI_MEMORY_BASE / 4] = {
+ /* The high 12-bits of mem base/limit are RW */
+ .rw = GENMASK(31, 20) | GENMASK(15, 4),
+
+ /* The low four bits of mem base/limit are RO */
+ .ro = GENMASK(19, 16) | GENMASK(3, 0),
+ },
+
+ [PCI_PREF_MEMORY_BASE / 4] = {
+ /* The high 12-bits of pref mem base/limit are RW */
+ .rw = GENMASK(31, 20) | GENMASK(15, 4),
+
+ /* The low four bits of pref mem base/limit are RO */
+ .ro = GENMASK(19, 16) | GENMASK(3, 0),
+ },
+
+ [PCI_PREF_BASE_UPPER32 / 4] = {
+ .rw = ~0,
+ },
+
+ [PCI_PREF_LIMIT_UPPER32 / 4] = {
+ .rw = ~0,
+ },
+
+ [PCI_IO_BASE_UPPER16 / 4] = {
+ .rw = ~0,
+ },
+
+ [PCI_CAPABILITY_LIST / 4] = {
+ .ro = GENMASK(7, 0),
+ .rsvd = GENMASK(31, 8),
+ },
+
+ [PCI_ROM_ADDRESS1 / 4] = {
+ .rw = GENMASK(31, 11) | BIT(0),
+ .rsvd = GENMASK(10, 1),
+ },
+
+ /*
+ * Interrupt line (bits 7:0) are RW, interrupt pin (bits 15:8)
+ * are RO, and bridge control (31:16) are a mix of RW, RO,
+ * reserved and W1C bits
+ */
+ [PCI_INTERRUPT_LINE / 4] = {
+ /* Interrupt line is RW */
+ .rw = (GENMASK(7, 0) |
+ ((PCI_BRIDGE_CTL_PARITY |
+ PCI_BRIDGE_CTL_SERR |
+ PCI_BRIDGE_CTL_ISA |
+ PCI_BRIDGE_CTL_VGA |
+ PCI_BRIDGE_CTL_MASTER_ABORT |
+ PCI_BRIDGE_CTL_BUS_RESET |
+ BIT(8) | BIT(9) | BIT(11)) << 16)),
+
+ /* Interrupt pin is RO */
+ .ro = (GENMASK(15, 8) | ((PCI_BRIDGE_CTL_FAST_BACK) << 16)),
+
+ .w1c = BIT(10) << 16,
+
+ .rsvd = (GENMASK(15, 12) | BIT(4)) << 16,
+ },
+};
+
+const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
+ [PCI_CAP_LIST_ID / 4] = {
+ /*
+ * Capability ID, Next Capability Pointer and
+ * Capabilities register are all read-only.
+ */
+ .ro = ~0,
+ },
+
+ [PCI_EXP_DEVCAP / 4] = {
+ .ro = ~0,
+ },
+
+ [PCI_EXP_DEVCTL / 4] = {
+ /* Device control register is RW */
+ .rw = GENMASK(15, 0),
+
+ /*
+ * Device status register has 4 bits W1C, then 2 bits
+ * RO, the rest is reserved
+ */
+ .w1c = GENMASK(19, 16),
+ .ro = GENMASK(20, 19),
+ .rsvd = GENMASK(31, 21),
+ },
+
+ [PCI_EXP_LNKCAP / 4] = {
+ /* All bits are RO, except bit 23 which is reserved */
+ .ro = lower_32_bits(~BIT(23)),
+ .rsvd = BIT(23),
+ },
+
+ [PCI_EXP_LNKCTL / 4] = {
+ /*
+ * Link control has bits [1:0] and [11:3] RW, the
+ * other bits are reserved.
+ * Link status has bits [13:0] RO, and bits [14:15]
+ * W1C.
+ */
+ .rw = GENMASK(11, 3) | GENMASK(1, 0),
+ .ro = GENMASK(13, 0) << 16,
+ .w1c = GENMASK(15, 14) << 16,
+ .rsvd = GENMASK(15, 12) | BIT(2),
+ },
+
+ [PCI_EXP_SLTCAP / 4] = {
+ .ro = ~0,
+ },
+
+ [PCI_EXP_SLTCTL / 4] = {
+ /*
+ * Slot control has bits [12:0] RW, the rest is
+ * reserved.
+ *
+ * Slot status has a mix of W1C and RO bits, as well
+ * as reserved bits.
+ */
+ .rw = GENMASK(12, 0),
+ .w1c = (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
+ PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC) << 16,
+ .ro = (PCI_EXP_SLTSTA_MRLSS | PCI_EXP_SLTSTA_PDS |
+ PCI_EXP_SLTSTA_EIS) << 16,
+ .rsvd = GENMASK(15, 12) | (GENMASK(15, 9) << 16),
+ },
+
+ [PCI_EXP_RTCTL / 4] = {
+ /*
+ * Root control has bits [4:0] RW, the rest is
+ * reserved.
+ *
+ * Root status has bit 0 RO, the rest is reserved.
+ */
+ .rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
+ PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE |
+ PCI_EXP_RTCTL_CRSSVE),
+ .ro = PCI_EXP_RTCAP_CRSVIS << 16,
+ .rsvd = GENMASK(15, 5) | (GENMASK(15, 1) << 16),
+ },
+
+ [PCI_EXP_RTSTA / 4] = {
+ .ro = GENMASK(15, 0) | PCI_EXP_RTSTA_PENDING,
+ .w1c = PCI_EXP_RTSTA_PME,
+ .rsvd = GENMASK(31, 18),
+ },
+};
+
+/*
+ * Should be called by the PCI controller driver when reading the PCI
+ * configuration space of the fake bridge. It will call back the
+ * ->ops->read_base or ->ops->read_pcie operations.
+ */
+int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
+ int size, u32 *value)
+{
+ int ret;
+ int reg = where & ~3;
+ pci_bridge_emul_read_status_t (*read_op)(struct pci_bridge_emul *bridge,
+ int reg, u32 *value);
+ u32 *cfgspace;
+ const struct pci_bridge_reg_behavior *behavior;
+
+ if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END) {
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (!bridge->has_pcie && reg >= PCI_BRIDGE_CONF_END) {
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
+ reg -= PCI_CAP_PCIE_START;
+ read_op = bridge->ops->read_pcie;
+ cfgspace = (u32 *) &bridge->pcie_conf;
+ behavior = pcie_cap_regs_behavior;
+ } else {
+ read_op = bridge->ops->read_base;
+ cfgspace = (u32 *) &bridge->conf;
+ behavior = pci_regs_behavior;
+ }
+
+ if (read_op)
+ ret = read_op(bridge, reg, value);
+ else
+ ret = PCI_BRIDGE_EMUL_NOT_HANDLED;
+
+ if (ret == PCI_BRIDGE_EMUL_NOT_HANDLED)
+ *value = cfgspace[reg / 4];
+
+ /*
+ * Make sure we never return any reserved bit with a value
+ * different from 0.
+ */
+ *value &= ~behavior[reg / 4].rsvd;
+
+ if (size == 1)
+ *value = (*value >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *value = (*value >> (8 * (where & 3))) & 0xffff;
+ else if (size != 4)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * Should be called by the PCI controller driver when writing the PCI
+ * configuration space of the fake bridge. It will call back the
+ * ->ops->write_base or ->ops->write_pcie operations.
+ */
+int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
+ int size, u32 value)
+{
+ int reg = where & ~3;
+ int mask, ret, old, new, shift;
+ void (*write_op)(struct pci_bridge_emul *bridge, int reg,
+ u32 old, u32 new, u32 mask);
+ u32 *cfgspace;
+ const struct pci_bridge_reg_behavior *behavior;
+
+ if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (!bridge->has_pcie && reg >= PCI_BRIDGE_CONF_END)
+ return PCIBIOS_SUCCESSFUL;
+
+ shift = (where & 0x3) * 8;
+
+ if (size == 4)
+ mask = 0xffffffff;
+ else if (size == 2)
+ mask = 0xffff << shift;
+ else if (size == 1)
+ mask = 0xff << shift;
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ ret = pci_bridge_emul_conf_read(bridge, reg, 4, &old);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
+ reg -= PCI_CAP_PCIE_START;
+ write_op = bridge->ops->write_pcie;
+ cfgspace = (u32 *) &bridge->pcie_conf;
+ behavior = pcie_cap_regs_behavior;
+ } else {
+ write_op = bridge->ops->write_base;
+ cfgspace = (u32 *) &bridge->conf;
+ behavior = pci_regs_behavior;
+ }
+
+ /* Keep all bits, except the RW bits */
+ new = old & (~mask | ~behavior[reg / 4].rw);
+
+ /* Update the value of the RW bits */
+ new |= (value << shift) & (behavior[reg / 4].rw & mask);
+
+ /* Clear the W1C bits */
+ new &= ~((value << shift) & (behavior[reg / 4].w1c & mask));
+
+ cfgspace[reg / 4] = new;
+
+ if (write_op)
+ write_op(bridge, reg, old, new, mask);
+
+ return PCIBIOS_SUCCESSFUL;
+}
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
new file mode 100644
index 000000000000..9d510ccf738b
--- /dev/null
+++ b/drivers/pci/pci-bridge-emul.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PCI_BRIDGE_EMUL_H__
+#define __PCI_BRIDGE_EMUL_H__
+
+#include <linux/kernel.h>
+
+/* PCI configuration space of a PCI-to-PCI bridge. */
+struct pci_bridge_emul_conf {
+ u16 vendor;
+ u16 device;
+ u16 command;
+ u16 status;
+ u32 class_revision;
+ u8 cache_line_size;
+ u8 latency_timer;
+ u8 header_type;
+ u8 bist;
+ u32 bar[2];
+ u8 primary_bus;
+ u8 secondary_bus;
+ u8 subordinate_bus;
+ u8 secondary_latency_timer;
+ u8 iobase;
+ u8 iolimit;
+ u16 secondary_status;
+ u16 membase;
+ u16 memlimit;
+ u16 pref_mem_base;
+ u16 pref_mem_limit;
+ u32 prefbaseupper;
+ u32 preflimitupper;
+ u16 iobaseupper;
+ u16 iolimitupper;
+ u8 capabilities_pointer;
+ u8 reserve[3];
+ u32 romaddr;
+ u8 intline;
+ u8 intpin;
+ u16 bridgectrl;
+};
+
+/* PCI configuration space of the PCIe capabilities */
+struct pci_bridge_emul_pcie_conf {
+ u8 cap_id;
+ u8 next;
+ u16 cap;
+ u32 devcap;
+ u16 devctl;
+ u16 devsta;
+ u32 lnkcap;
+ u16 lnkctl;
+ u16 lnksta;
+ u32 slotcap;
+ u16 slotctl;
+ u16 slotsta;
+ u16 rootctl;
+ u16 rsvd;
+ u32 rootsta;
+ u32 devcap2;
+ u16 devctl2;
+ u16 devsta2;
+ u32 lnkcap2;
+ u16 lnkctl2;
+ u16 lnksta2;
+ u32 slotcap2;
+ u16 slotctl2;
+ u16 slotsta2;
+};
+
+struct pci_bridge_emul;
+
+typedef enum { PCI_BRIDGE_EMUL_HANDLED,
+ PCI_BRIDGE_EMUL_NOT_HANDLED } pci_bridge_emul_read_status_t;
+
+struct pci_bridge_emul_ops {
+ /*
+ * Called when reading from the regular PCI bridge
+ * configuration space. Return PCI_BRIDGE_EMUL_HANDLED when the
+ * operation has handled the read operation and filled in the
+ * *value, or PCI_BRIDGE_EMUL_NOT_HANDLED when the read should
+ * be emulated by the common code by reading from the
+ * in-memory copy of the configuration space.
+ */
+ pci_bridge_emul_read_status_t (*read_base)(struct pci_bridge_emul *bridge,
+ int reg, u32 *value);
+
+ /*
+ * Same as ->read_base(), except it is for reading from the
+ * PCIe capability configuration space.
+ */
+ pci_bridge_emul_read_status_t (*read_pcie)(struct pci_bridge_emul *bridge,
+ int reg, u32 *value);
+ /*
+ * Called when writing to the regular PCI bridge configuration
+ * space. old is the current value, new is the new value being
+ * written, and mask indicates which parts of the value are
+ * being changed.
+ */
+ void (*write_base)(struct pci_bridge_emul *bridge, int reg,
+ u32 old, u32 new, u32 mask);
+
+ /*
+ * Same as ->write_base(), except it is for writing from the
+ * PCIe capability configuration space.
+ */
+ void (*write_pcie)(struct pci_bridge_emul *bridge, int reg,
+ u32 old, u32 new, u32 mask);
+};
+
+struct pci_bridge_emul {
+ struct pci_bridge_emul_conf conf;
+ struct pci_bridge_emul_pcie_conf pcie_conf;
+ struct pci_bridge_emul_ops *ops;
+ void *data;
+ bool has_pcie;
+};
+
+void pci_bridge_emul_init(struct pci_bridge_emul *bridge);
+int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
+ int size, u32 *value);
+int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
+ int size, u32 value);
+
+#endif /* __PCI_BRIDGE_EMUL_H__ */
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index 314e135014dc..30fbe2ea6eab 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -62,8 +62,8 @@ static const struct pci_platform_pm_ops mid_pci_platform_pm = {
* arch/x86/platform/intel-mid/pwr.c.
*/
static const struct x86_cpu_id lpss_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_PENWELL),
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD),
+ ICPU(INTEL_FAM6_ATOM_SALTWELL_MID),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID),
{}
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1835f3a7aa8d..d068f11d08a7 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -35,6 +35,8 @@
#include <linux/aer.h>
#include "pci.h"
+DEFINE_MUTEX(pci_slot_mutex);
+
const char *pci_power_names[] = {
"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
};
@@ -196,7 +198,7 @@ EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
/**
* pci_dev_str_match_path - test if a path string matches a device
* @dev: the PCI device to test
- * @p: string to match the device against
+ * @path: string to match the device against
* @endptr: pointer to the string after the match
*
* Test if a string (typically from a kernel parameter) formatted as a
@@ -791,6 +793,11 @@ static inline bool platform_pci_need_resume(struct pci_dev *dev)
return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
}
+static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
+{
+ return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
+}
+
/**
* pci_raw_set_power_state - Use PCI PM registers to set the power state of
* given PCI device
@@ -999,7 +1006,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
* because have already delayed for the bridge.
*/
if (dev->runtime_d3cold) {
- if (dev->d3cold_delay)
+ if (dev->d3cold_delay && !dev->imm_ready)
msleep(dev->d3cold_delay);
/*
* When powering on a bridge from D3cold, the
@@ -1284,17 +1291,18 @@ int pci_save_state(struct pci_dev *dev)
if (i != 0)
return i;
+ pci_save_dpc_state(dev);
return pci_save_vc_state(dev);
}
EXPORT_SYMBOL(pci_save_state);
static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
- u32 saved_val, int retry)
+ u32 saved_val, int retry, bool force)
{
u32 val;
pci_read_config_dword(pdev, offset, &val);
- if (val == saved_val)
+ if (!force && val == saved_val)
return;
for (;;) {
@@ -1313,25 +1321,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
}
static void pci_restore_config_space_range(struct pci_dev *pdev,
- int start, int end, int retry)
+ int start, int end, int retry,
+ bool force)
{
int index;
for (index = end; index >= start; index--)
pci_restore_config_dword(pdev, 4 * index,
pdev->saved_config_space[index],
- retry);
+ retry, force);
}
static void pci_restore_config_space(struct pci_dev *pdev)
{
if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
- pci_restore_config_space_range(pdev, 10, 15, 0);
+ pci_restore_config_space_range(pdev, 10, 15, 0, false);
/* Restore BARs before the command register. */
- pci_restore_config_space_range(pdev, 4, 9, 10);
- pci_restore_config_space_range(pdev, 0, 3, 0);
+ pci_restore_config_space_range(pdev, 4, 9, 10, false);
+ pci_restore_config_space_range(pdev, 0, 3, 0, false);
+ } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_restore_config_space_range(pdev, 12, 15, 0, false);
+
+ /*
+ * Force rewriting of prefetch registers to avoid S3 resume
+ * issues on Intel PCI bridges that occur when these
+ * registers are not explicitly written.
+ */
+ pci_restore_config_space_range(pdev, 9, 11, 0, true);
+ pci_restore_config_space_range(pdev, 0, 8, 0, false);
} else {
- pci_restore_config_space_range(pdev, 0, 15, 0);
+ pci_restore_config_space_range(pdev, 0, 15, 0, false);
}
}
@@ -1378,6 +1397,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_ats_state(dev);
pci_restore_vc_state(dev);
pci_restore_rebar_state(dev);
+ pci_restore_dpc_state(dev);
pci_cleanup_aer_error_status_regs(dev);
@@ -2133,10 +2153,13 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable
int ret = 0;
/*
- * Bridges can only signal wakeup on behalf of subordinate devices,
- * but that is set up elsewhere, so skip them.
+ * Bridges that are not power-manageable directly only signal
+ * wakeup on behalf of subordinate devices which is set up
+ * elsewhere, so skip them. However, bridges that are
+ * power-manageable may signal wakeup for themselves (for example,
+ * on a hotplug event) and they need to be covered here.
*/
- if (pci_has_subordinate(dev))
+ if (!pci_power_manageable(dev))
return 0;
/* Don't do the same thing twice in a row for one device. */
@@ -2511,6 +2534,10 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
if (bridge->is_thunderbolt)
return true;
+ /* Platform might know better if the bridge supports D3 */
+ if (platform_pci_bridge_d3(bridge))
+ return true;
+
/*
* Hotplug ports handled natively by the OS were not validated
* by vendors for runtime D3 at least until 2018 because there
@@ -2644,6 +2671,7 @@ EXPORT_SYMBOL_GPL(pci_d3cold_disable);
void pci_pm_init(struct pci_dev *dev)
{
int pm;
+ u16 status;
u16 pmc;
pm_runtime_forbid(&dev->dev);
@@ -2706,6 +2734,10 @@ void pci_pm_init(struct pci_dev *dev)
/* Disable the PME# generation functionality */
pci_pme_active(dev, false);
}
+
+ pci_read_config_word(dev, PCI_STATUS, &status);
+ if (status & PCI_STATUS_IMM_READY)
+ dev->imm_ready = 1;
}
static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
@@ -4376,6 +4408,9 @@ int pcie_flr(struct pci_dev *dev)
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+ if (dev->imm_ready)
+ return 0;
+
/*
* Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
* 100ms, but may silently discard requests while the FLR is in
@@ -4417,6 +4452,9 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
+ if (dev->imm_ready)
+ return 0;
+
/*
* Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
* updated 27 July 2006; a device must complete an FLR within
@@ -4485,21 +4523,42 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
bool ret;
u16 lnk_status;
+ /*
+ * Some controllers might not implement link active reporting. In this
+ * case, we wait for 1000 + 100 ms.
+ */
+ if (!pdev->link_active_reporting) {
+ msleep(1100);
+ return true;
+ }
+
+ /*
+ * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
+ * after which we should expect an link active if the reset was
+ * successful. If so, software must wait a minimum 100ms before sending
+ * configuration requests to devices downstream this port.
+ *
+ * If the link fails to activate, either the device was physically
+ * removed or the link is permanently failed.
+ */
+ if (active)
+ msleep(20);
for (;;) {
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
if (ret == active)
- return true;
+ break;
if (timeout <= 0)
break;
msleep(10);
timeout -= 10;
}
-
- pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
- active ? "set" : "cleared");
-
- return false;
+ if (active && ret)
+ msleep(100);
+ else if (ret != active)
+ pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
+ active ? "set" : "cleared");
+ return ret == active;
}
void pci_reset_secondary_bus(struct pci_dev *dev)
@@ -4571,13 +4630,13 @@ static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
{
int rc = -ENOTTY;
- if (!hotplug || !try_module_get(hotplug->ops->owner))
+ if (!hotplug || !try_module_get(hotplug->owner))
return rc;
if (hotplug->ops->reset_slot)
rc = hotplug->ops->reset_slot(hotplug, probe);
- module_put(hotplug->ops->owner);
+ module_put(hotplug->owner);
return rc;
}
@@ -5154,6 +5213,41 @@ static int pci_bus_reset(struct pci_bus *bus, int probe)
}
/**
+ * pci_bus_error_reset - reset the bridge's subordinate bus
+ * @bridge: The parent device that connects to the bus to reset
+ *
+ * This function will first try to reset the slots on this bus if the method is
+ * available. If slot reset fails or is not available, this will fall back to a
+ * secondary bus reset.
+ */
+int pci_bus_error_reset(struct pci_dev *bridge)
+{
+ struct pci_bus *bus = bridge->subordinate;
+ struct pci_slot *slot;
+
+ if (!bus)
+ return -ENOTTY;
+
+ mutex_lock(&pci_slot_mutex);
+ if (list_empty(&bus->slots))
+ goto bus_reset;
+
+ list_for_each_entry(slot, &bus->slots, list)
+ if (pci_probe_reset_slot(slot))
+ goto bus_reset;
+
+ list_for_each_entry(slot, &bus->slots, list)
+ if (pci_slot_reset(slot, 0))
+ goto bus_reset;
+
+ mutex_unlock(&pci_slot_mutex);
+ return 0;
+bus_reset:
+ mutex_unlock(&pci_slot_mutex);
+ return pci_bus_reset(bridge->subordinate, 0);
+}
+
+/**
* pci_probe_reset_bus - probe whether a PCI bus can be reset
* @bus: PCI bus to probe
*
@@ -5690,8 +5784,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
{
if (!dev->dma_alias_mask)
- dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
- sizeof(long), GFP_KERNEL);
+ dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL);
if (!dev->dma_alias_mask) {
pci_warn(dev, "Unable to allocate DMA alias mask\n");
return;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 6e0d1528d471..662b7457db23 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -35,10 +35,13 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
int pci_probe_reset_function(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
+int pci_bus_error_reset(struct pci_dev *dev);
/**
* struct pci_platform_pm_ops - Firmware PM callbacks
*
+ * @bridge_d3: Does the bridge allow entering into D3
+ *
* @is_manageable: returns 'true' if given device is power manageable by the
* platform firmware
*
@@ -60,6 +63,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
* these callbacks are mandatory.
*/
struct pci_platform_pm_ops {
+ bool (*bridge_d3)(struct pci_dev *dev);
bool (*is_manageable)(struct pci_dev *dev);
int (*set_state)(struct pci_dev *dev, pci_power_t state);
pci_power_t (*get_state)(struct pci_dev *dev);
@@ -136,6 +140,7 @@ static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
/* Lock for read/write access to pci device and bus lists */
extern struct rw_semaphore pci_bus_sem;
+extern struct mutex pci_slot_mutex;
extern raw_spinlock_t pci_lock;
@@ -285,6 +290,7 @@ struct pci_sriov {
u16 driver_max_VFs; /* Max num VFs driver supports */
struct pci_dev *dev; /* Lowest numbered PF */
struct pci_dev *self; /* This PF */
+ u32 cfg_size; /* VF config space size */
u32 class; /* VF device */
u8 hdr_type; /* VF header type */
u16 subsystem_vendor; /* VF subsystem vendor */
@@ -293,21 +299,71 @@ struct pci_sriov {
bool drivers_autoprobe; /* Auto probing of VFs by driver */
};
-/* pci_dev priv_flags */
-#define PCI_DEV_DISCONNECTED 0
-#define PCI_DEV_ADDED 1
+/**
+ * pci_dev_set_io_state - Set the new error state if possible.
+ *
+ * @dev - pci device to set new error_state
+ * @new - the state we want dev to be in
+ *
+ * Must be called with device_lock held.
+ *
+ * Returns true if state has been changed to the requested state.
+ */
+static inline bool pci_dev_set_io_state(struct pci_dev *dev,
+ pci_channel_state_t new)
+{
+ bool changed = false;
+
+ device_lock_assert(&dev->dev);
+ switch (new) {
+ case pci_channel_io_perm_failure:
+ switch (dev->error_state) {
+ case pci_channel_io_frozen:
+ case pci_channel_io_normal:
+ case pci_channel_io_perm_failure:
+ changed = true;
+ break;
+ }
+ break;
+ case pci_channel_io_frozen:
+ switch (dev->error_state) {
+ case pci_channel_io_frozen:
+ case pci_channel_io_normal:
+ changed = true;
+ break;
+ }
+ break;
+ case pci_channel_io_normal:
+ switch (dev->error_state) {
+ case pci_channel_io_frozen:
+ case pci_channel_io_normal:
+ changed = true;
+ break;
+ }
+ break;
+ }
+ if (changed)
+ dev->error_state = new;
+ return changed;
+}
static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
{
- set_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
+ device_lock(&dev->dev);
+ pci_dev_set_io_state(dev, pci_channel_io_perm_failure);
+ device_unlock(&dev->dev);
+
return 0;
}
static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
{
- return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
+ return dev->error_state == pci_channel_io_perm_failure;
}
+/* pci_dev priv_flags */
+#define PCI_DEV_ADDED 0
+
static inline void pci_dev_assign_added(struct pci_dev *dev, bool added)
{
assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added);
@@ -346,6 +402,14 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
#endif /* CONFIG_PCIEAER */
+#ifdef CONFIG_PCIE_DPC
+void pci_save_dpc_state(struct pci_dev *dev);
+void pci_restore_dpc_state(struct pci_dev *dev);
+#else
+static inline void pci_save_dpc_state(struct pci_dev *dev) {}
+static inline void pci_restore_dpc_state(struct pci_dev *dev) {}
+#endif
+
#ifdef CONFIG_PCI_ATS
void pci_restore_ats_state(struct pci_dev *dev);
#else
@@ -423,8 +487,8 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
#endif
/* PCI error reporting and recovery */
-void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service);
-void pcie_do_nonfatal_recovery(struct pci_dev *dev);
+void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
+ u32 service);
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 0a1e9d379bc5..44742b2e1126 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -36,7 +36,6 @@ config PCIEAER
config PCIEAER_INJECT
tristate "PCI Express error injection support"
depends on PCIEAER
- default n
help
This enables PCI Express Root Port Advanced Error Reporting
(AER) software error injector.
@@ -84,7 +83,6 @@ config PCIEASPM
config PCIEASPM_DEBUG
bool "Debug PCI Express ASPM"
depends on PCIEASPM
- default n
help
This enables PCI Express ASPM debug support. It will add per-device
interface to control ASPM.
@@ -129,7 +127,6 @@ config PCIE_PME
config PCIE_DPC
bool "PCI Express Downstream Port Containment support"
depends on PCIEPORTBUS && PCIEAER
- default n
help
This enables PCI Express Downstream Port Containment (DPC)
driver support. DPC events from Root and Downstream ports
@@ -139,7 +136,6 @@ config PCIE_DPC
config PCIE_PTM
bool "PCI Express Precision Time Measurement support"
- default n
depends on PCIEPORTBUS
help
This enables PCI Express Precision Time Measurement (PTM)
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 83180edd6ed4..a90a9194ac4a 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -30,7 +30,7 @@
#include "../pci.h"
#include "portdrv.h"
-#define AER_ERROR_SOURCES_MAX 100
+#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
#define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/
@@ -42,21 +42,7 @@ struct aer_err_source {
struct aer_rpc {
struct pci_dev *rpd; /* Root Port device */
- struct work_struct dpc_handler;
- struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
- struct aer_err_info e_info;
- unsigned short prod_idx; /* Error Producer Index */
- unsigned short cons_idx; /* Error Consumer Index */
- int isr;
- spinlock_t e_lock; /*
- * Lock access to Error Status/ID Regs
- * and error producer/consumer index
- */
- struct mutex rpc_mutex; /*
- * only one thread could do
- * recovery on the same
- * root port hierarchy
- */
+ DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX);
};
/* AER stats for the device */
@@ -866,7 +852,7 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
{
if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
- e_info->dev[e_info->error_dev_num] = dev;
+ e_info->dev[e_info->error_dev_num] = pci_dev_get(dev);
e_info->error_dev_num++;
return 0;
}
@@ -1010,9 +996,12 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
info->status);
pci_aer_clear_device_status(dev);
} else if (info->severity == AER_NONFATAL)
- pcie_do_nonfatal_recovery(dev);
+ pcie_do_recovery(dev, pci_channel_io_normal,
+ PCIE_PORT_SERVICE_AER);
else if (info->severity == AER_FATAL)
- pcie_do_fatal_recovery(dev, PCIE_PORT_SERVICE_AER);
+ pcie_do_recovery(dev, pci_channel_io_frozen,
+ PCIE_PORT_SERVICE_AER);
+ pci_dev_put(dev);
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -1047,9 +1036,11 @@ static void aer_recover_work_func(struct work_struct *work)
}
cper_print_aer(pdev, entry.severity, entry.regs);
if (entry.severity == AER_NONFATAL)
- pcie_do_nonfatal_recovery(pdev);
+ pcie_do_recovery(pdev, pci_channel_io_normal,
+ PCIE_PORT_SERVICE_AER);
else if (entry.severity == AER_FATAL)
- pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_AER);
+ pcie_do_recovery(pdev, pci_channel_io_frozen,
+ PCIE_PORT_SERVICE_AER);
pci_dev_put(pdev);
}
}
@@ -1065,7 +1056,6 @@ static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
int severity, struct aer_capability_regs *aer_regs)
{
- unsigned long flags;
struct aer_recover_entry entry = {
.bus = bus,
.devfn = devfn,
@@ -1074,13 +1064,12 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
.regs = aer_regs,
};
- spin_lock_irqsave(&aer_recover_ring_lock, flags);
- if (kfifo_put(&aer_recover_ring, entry))
+ if (kfifo_in_spinlocked(&aer_recover_ring, &entry, sizeof(entry),
+ &aer_recover_ring_lock))
schedule_work(&aer_recover_work);
else
pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
}
EXPORT_SYMBOL_GPL(aer_recover_queue);
#endif
@@ -1115,8 +1104,9 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
&info->mask);
if (!(info->status & ~info->mask))
return 0;
- } else if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- info->severity == AER_NONFATAL) {
+ } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
+ info->severity == AER_NONFATAL) {
/* Link is still healthy for IO reads */
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
@@ -1170,7 +1160,7 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
struct aer_err_source *e_src)
{
struct pci_dev *pdev = rpc->rpd;
- struct aer_err_info *e_info = &rpc->e_info;
+ struct aer_err_info e_info;
pci_rootport_aer_stats_incr(pdev, e_src);
@@ -1179,83 +1169,57 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
* uncorrectable error being logged. Report correctable error first.
*/
if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
- e_info->id = ERR_COR_ID(e_src->id);
- e_info->severity = AER_CORRECTABLE;
+ e_info.id = ERR_COR_ID(e_src->id);
+ e_info.severity = AER_CORRECTABLE;
if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
- e_info->multi_error_valid = 1;
+ e_info.multi_error_valid = 1;
else
- e_info->multi_error_valid = 0;
- aer_print_port_info(pdev, e_info);
+ e_info.multi_error_valid = 0;
+ aer_print_port_info(pdev, &e_info);
- if (find_source_device(pdev, e_info))
- aer_process_err_devices(e_info);
+ if (find_source_device(pdev, &e_info))
+ aer_process_err_devices(&e_info);
}
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
- e_info->id = ERR_UNCOR_ID(e_src->id);
+ e_info.id = ERR_UNCOR_ID(e_src->id);
if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- e_info->severity = AER_FATAL;
+ e_info.severity = AER_FATAL;
else
- e_info->severity = AER_NONFATAL;
+ e_info.severity = AER_NONFATAL;
if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
- e_info->multi_error_valid = 1;
+ e_info.multi_error_valid = 1;
else
- e_info->multi_error_valid = 0;
+ e_info.multi_error_valid = 0;
- aer_print_port_info(pdev, e_info);
+ aer_print_port_info(pdev, &e_info);
- if (find_source_device(pdev, e_info))
- aer_process_err_devices(e_info);
+ if (find_source_device(pdev, &e_info))
+ aer_process_err_devices(&e_info);
}
}
/**
- * get_e_source - retrieve an error source
- * @rpc: pointer to the root port which holds an error
- * @e_src: pointer to store retrieved error source
- *
- * Return 1 if an error source is retrieved, otherwise 0.
- *
- * Invoked by DPC handler to consume an error.
- */
-static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
-{
- unsigned long flags;
-
- /* Lock access to Root error producer/consumer index */
- spin_lock_irqsave(&rpc->e_lock, flags);
- if (rpc->prod_idx == rpc->cons_idx) {
- spin_unlock_irqrestore(&rpc->e_lock, flags);
- return 0;
- }
-
- *e_src = rpc->e_sources[rpc->cons_idx];
- rpc->cons_idx++;
- if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
- rpc->cons_idx = 0;
- spin_unlock_irqrestore(&rpc->e_lock, flags);
-
- return 1;
-}
-
-/**
* aer_isr - consume errors detected by root port
* @work: definition of this work item
*
* Invoked, as DPC, when root port records new detected error
*/
-static void aer_isr(struct work_struct *work)
+static irqreturn_t aer_isr(int irq, void *context)
{
- struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
+ struct pcie_device *dev = (struct pcie_device *)context;
+ struct aer_rpc *rpc = get_service_data(dev);
struct aer_err_source uninitialized_var(e_src);
- mutex_lock(&rpc->rpc_mutex);
- while (get_e_source(rpc, &e_src))
+ if (kfifo_is_empty(&rpc->aer_fifo))
+ return IRQ_NONE;
+
+ while (kfifo_get(&rpc->aer_fifo, &e_src))
aer_isr_one_error(rpc, &e_src);
- mutex_unlock(&rpc->rpc_mutex);
+ return IRQ_HANDLED;
}
/**
@@ -1265,56 +1229,26 @@ static void aer_isr(struct work_struct *work)
*
* Invoked when Root Port detects AER messages.
*/
-irqreturn_t aer_irq(int irq, void *context)
+static irqreturn_t aer_irq(int irq, void *context)
{
- unsigned int status, id;
struct pcie_device *pdev = (struct pcie_device *)context;
struct aer_rpc *rpc = get_service_data(pdev);
- int next_prod_idx;
- unsigned long flags;
- int pos;
-
- pos = pdev->port->aer_cap;
- /*
- * Must lock access to Root Error Status Reg, Root Error ID Reg,
- * and Root error producer/consumer index
- */
- spin_lock_irqsave(&rpc->e_lock, flags);
+ struct pci_dev *rp = rpc->rpd;
+ struct aer_err_source e_src = {};
+ int pos = rp->aer_cap;
- /* Read error status */
- pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status);
- if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) {
- spin_unlock_irqrestore(&rpc->e_lock, flags);
+ pci_read_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, &e_src.status);
+ if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV)))
return IRQ_NONE;
- }
- /* Read error source and clear error status */
- pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id);
- pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status);
+ pci_read_config_dword(rp, pos + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
+ pci_write_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, e_src.status);
- /* Store error source for later DPC handler */
- next_prod_idx = rpc->prod_idx + 1;
- if (next_prod_idx == AER_ERROR_SOURCES_MAX)
- next_prod_idx = 0;
- if (next_prod_idx == rpc->cons_idx) {
- /*
- * Error Storm Condition - possibly the same error occurred.
- * Drop the error.
- */
- spin_unlock_irqrestore(&rpc->e_lock, flags);
+ if (!kfifo_put(&rpc->aer_fifo, e_src))
return IRQ_HANDLED;
- }
- rpc->e_sources[rpc->prod_idx].status = status;
- rpc->e_sources[rpc->prod_idx].id = id;
- rpc->prod_idx = next_prod_idx;
- spin_unlock_irqrestore(&rpc->e_lock, flags);
-
- /* Invoke DPC handler */
- schedule_work(&rpc->dpc_handler);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
-EXPORT_SYMBOL_GPL(aer_irq);
static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
@@ -1423,33 +1357,6 @@ static void aer_disable_rootport(struct aer_rpc *rpc)
}
/**
- * aer_alloc_rpc - allocate Root Port data structure
- * @dev: pointer to the pcie_dev data structure
- *
- * Invoked when Root Port's AER service is loaded.
- */
-static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
-{
- struct aer_rpc *rpc;
-
- rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL);
- if (!rpc)
- return NULL;
-
- /* Initialize Root lock access, e_lock, to Root Error Status Reg */
- spin_lock_init(&rpc->e_lock);
-
- rpc->rpd = dev->port;
- INIT_WORK(&rpc->dpc_handler, aer_isr);
- mutex_init(&rpc->rpc_mutex);
-
- /* Use PCIe bus function to store rpc into PCIe device */
- set_service_data(dev, rpc);
-
- return rpc;
-}
-
-/**
* aer_remove - clean up resources
* @dev: pointer to the pcie_dev data structure
*
@@ -1459,16 +1366,7 @@ static void aer_remove(struct pcie_device *dev)
{
struct aer_rpc *rpc = get_service_data(dev);
- if (rpc) {
- /* If register interrupt service, it must be free. */
- if (rpc->isr)
- free_irq(dev->irq, dev);
-
- flush_work(&rpc->dpc_handler);
- aer_disable_rootport(rpc);
- kfree(rpc);
- set_service_data(dev, NULL);
- }
+ aer_disable_rootport(rpc);
}
/**
@@ -1481,27 +1379,24 @@ static int aer_probe(struct pcie_device *dev)
{
int status;
struct aer_rpc *rpc;
- struct device *device = &dev->port->dev;
+ struct device *device = &dev->device;
- /* Alloc rpc data structure */
- rpc = aer_alloc_rpc(dev);
+ rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL);
if (!rpc) {
dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n");
- aer_remove(dev);
return -ENOMEM;
}
+ rpc->rpd = dev->port;
+ set_service_data(dev, rpc);
- /* Request IRQ ISR */
- status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
+ status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
+ IRQF_SHARED, "aerdrv", dev);
if (status) {
dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n",
dev->irq);
- aer_remove(dev);
return status;
}
- rpc->isr = 1;
-
aer_enable_rootport(rpc);
dev_info(device, "AER enabled with IRQ %d\n", dev->irq);
return 0;
@@ -1526,7 +1421,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- rc = pci_bridge_secondary_bus_reset(dev);
+ rc = pci_bus_error_reset(dev);
pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
@@ -1541,18 +1436,6 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
-/**
- * aer_error_resume - clean up corresponding error status bits
- * @dev: pointer to Root Port's pci_dev data structure
- *
- * Invoked by Port Bus driver during nonfatal recovery.
- */
-static void aer_error_resume(struct pci_dev *dev)
-{
- pci_aer_clear_device_status(dev);
- pci_cleanup_aer_uncorrect_error_status(dev);
-}
-
static struct pcie_port_service_driver aerdriver = {
.name = "aer",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
@@ -1560,7 +1443,6 @@ static struct pcie_port_service_driver aerdriver = {
.probe = aer_probe,
.remove = aer_remove,
- .error_resume = aer_error_resume,
.reset_link = aer_root_reset,
};
@@ -1569,10 +1451,9 @@ static struct pcie_port_service_driver aerdriver = {
*
* Invoked when AER root service driver is loaded.
*/
-static int __init aer_service_init(void)
+int __init pcie_aer_init(void)
{
if (!pci_aer_available() || aer_acpi_firmware_first())
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
-device_initcall(aer_service_init);
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 0eb24346cad3..95d4759664b3 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -175,14 +176,48 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where,
return target;
}
+static int aer_inj_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ struct pci_ops *ops, *my_ops;
+ int rv;
+
+ ops = __find_pci_bus_ops(bus);
+ if (!ops)
+ return -1;
+
+ my_ops = bus->ops;
+ bus->ops = ops;
+ rv = ops->read(bus, devfn, where, size, val);
+ bus->ops = my_ops;
+
+ return rv;
+}
+
+static int aer_inj_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
+{
+ struct pci_ops *ops, *my_ops;
+ int rv;
+
+ ops = __find_pci_bus_ops(bus);
+ if (!ops)
+ return -1;
+
+ my_ops = bus->ops;
+ bus->ops = ops;
+ rv = ops->write(bus, devfn, where, size, val);
+ bus->ops = my_ops;
+
+ return rv;
+}
+
static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
u32 *sim;
struct aer_error *err;
unsigned long flags;
- struct pci_ops *ops;
- struct pci_ops *my_ops;
int domain;
int rv;
@@ -203,18 +238,7 @@ static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
return 0;
}
out:
- ops = __find_pci_bus_ops(bus);
- /*
- * pci_lock must already be held, so we can directly
- * manipulate bus->ops. Many config access functions,
- * including pci_generic_config_read() require the original
- * bus->ops be installed to function, so temporarily put them
- * back.
- */
- my_ops = bus->ops;
- bus->ops = ops;
- rv = ops->read(bus, devfn, where, size, val);
- bus->ops = my_ops;
+ rv = aer_inj_read(bus, devfn, where, size, val);
spin_unlock_irqrestore(&inject_lock, flags);
return rv;
}
@@ -226,8 +250,6 @@ static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
struct aer_error *err;
unsigned long flags;
int rw1cs;
- struct pci_ops *ops;
- struct pci_ops *my_ops;
int domain;
int rv;
@@ -251,18 +273,7 @@ static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
return 0;
}
out:
- ops = __find_pci_bus_ops(bus);
- /*
- * pci_lock must already be held, so we can directly
- * manipulate bus->ops. Many config access functions,
- * including pci_generic_config_write() require the original
- * bus->ops be installed to function, so temporarily put them
- * back.
- */
- my_ops = bus->ops;
- bus->ops = ops;
- rv = ops->write(bus, devfn, where, size, val);
- bus->ops = my_ops;
+ rv = aer_inj_write(bus, devfn, where, size, val);
spin_unlock_irqrestore(&inject_lock, flags);
return rv;
}
@@ -303,32 +314,13 @@ out:
return 0;
}
-static int find_aer_device_iter(struct device *device, void *data)
-{
- struct pcie_device **result = data;
- struct pcie_device *pcie_dev;
-
- if (device->bus == &pcie_port_bus_type) {
- pcie_dev = to_pcie_device(device);
- if (pcie_dev->service & PCIE_PORT_SERVICE_AER) {
- *result = pcie_dev;
- return 1;
- }
- }
- return 0;
-}
-
-static int find_aer_device(struct pci_dev *dev, struct pcie_device **result)
-{
- return device_for_each_child(&dev->dev, result, find_aer_device_iter);
-}
-
static int aer_inject(struct aer_error_inj *einj)
{
struct aer_error *err, *rperr;
struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
struct pci_dev *dev, *rpdev;
struct pcie_device *edev;
+ struct device *device;
unsigned long flags;
unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
int pos_cap_err, rp_pos_cap_err;
@@ -464,7 +456,9 @@ static int aer_inject(struct aer_error_inj *einj)
if (ret)
goto out_put;
- if (find_aer_device(rpdev, &edev)) {
+ device = pcie_port_find_device(rpdev, PCIE_PORT_SERVICE_AER);
+ if (device) {
+ edev = to_pcie_device(device);
if (!get_service_data(edev)) {
dev_warn(&edev->device,
"aer_inject: AER service is not initialized\n");
@@ -474,7 +468,9 @@ static int aer_inject(struct aer_error_inj *einj)
dev_info(&edev->device,
"aer_inject: Injecting errors %08x/%08x into device %s\n",
einj->cor_status, einj->uncor_status, pci_name(dev));
- aer_irq(-1, edev);
+ local_irq_disable();
+ generic_handle_irq(edev->irq);
+ local_irq_enable();
} else {
pci_err(rpdev, "aer_inject: AER device not found\n");
ret = -ENODEV;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5326916715d2..dcb29cb76dc6 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -895,7 +895,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
struct pcie_link_state *link;
int blacklist = !!pcie_aspm_sanity_check(pdev);
- if (!aspm_support_enabled)
+ if (!aspm_support_enabled || aspm_disabled)
return;
if (pdev->link_state)
@@ -991,7 +991,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
* All PCIe functions are in one slot, remove one function will remove
* the whole slot, so just wait until we are the last function left.
*/
- if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
+ if (!list_empty(&parent->subordinate->devices))
goto out;
link = parent->link_state;
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index f03279fc87cd..e435d12e61a0 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -44,6 +44,58 @@ static const char * const rp_pio_error_string[] = {
"Memory Request Completion Timeout", /* Bit Position 18 */
};
+static struct dpc_dev *to_dpc_dev(struct pci_dev *dev)
+{
+ struct device *device;
+
+ device = pcie_port_find_device(dev, PCIE_PORT_SERVICE_DPC);
+ if (!device)
+ return NULL;
+ return get_service_data(to_pcie_device(device));
+}
+
+void pci_save_dpc_state(struct pci_dev *dev)
+{
+ struct dpc_dev *dpc;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ dpc = to_dpc_dev(dev);
+ if (!dpc)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
+ if (!save_state)
+ return;
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pci_read_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, cap);
+}
+
+void pci_restore_dpc_state(struct pci_dev *dev)
+{
+ struct dpc_dev *dpc;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ dpc = to_dpc_dev(dev);
+ if (!dpc)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
+ if (!save_state)
+ return;
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pci_write_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, *cap);
+}
+
static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
{
unsigned long timeout = jiffies + HZ;
@@ -67,18 +119,13 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
struct dpc_dev *dpc;
- struct pcie_device *pciedev;
- struct device *devdpc;
-
u16 cap;
/*
* DPC disables the Link automatically in hardware, so it has
* already been reset by the time we get here.
*/
- devdpc = pcie_port_find_device(pdev, PCIE_PORT_SERVICE_DPC);
- pciedev = to_pcie_device(devdpc);
- dpc = get_service_data(pciedev);
+ dpc = to_dpc_dev(pdev);
cap = dpc->cap_pos;
/*
@@ -93,10 +140,12 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER);
+ if (!pcie_wait_for_link(pdev, true))
+ return PCI_ERS_RESULT_DISCONNECT;
+
return PCI_ERS_RESULT_RECOVERED;
}
-
static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
{
struct device *dev = &dpc->dev->device;
@@ -169,7 +218,7 @@ static irqreturn_t dpc_handler(int irq, void *context)
reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
- dev_warn(dev, "DPC %s detected, remove downstream devices\n",
+ dev_warn(dev, "DPC %s detected\n",
(reason == 0) ? "unmasked uncorrectable error" :
(reason == 1) ? "ERR_NONFATAL" :
(reason == 2) ? "ERR_FATAL" :
@@ -186,7 +235,7 @@ static irqreturn_t dpc_handler(int irq, void *context)
}
/* We configure DPC so it only triggers on ERR_FATAL */
- pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
+ pcie_do_recovery(pdev, pci_channel_io_frozen, PCIE_PORT_SERVICE_DPC);
return IRQ_HANDLED;
}
@@ -259,6 +308,8 @@ static int dpc_probe(struct pcie_device *dev)
FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size,
FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
+
+ pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
return status;
}
@@ -282,8 +333,7 @@ static struct pcie_port_service_driver dpcdriver = {
.reset_link = dpc_reset_link,
};
-static int __init dpc_service_init(void)
+int __init pcie_dpc_init(void)
{
return pcie_port_service_register(&dpcdriver);
}
-device_initcall(dpc_service_init);
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 708fd3a0d646..773197a12568 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -12,18 +12,12 @@
#include <linux/pci.h>
#include <linux/module.h>
-#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/aer.h>
#include "portdrv.h"
#include "../pci.h"
-struct aer_broadcast_data {
- enum pci_channel_state state;
- enum pci_ers_result result;
-};
-
static pci_ers_result_t merge_result(enum pci_ers_result orig,
enum pci_ers_result new)
{
@@ -49,66 +43,52 @@ static pci_ers_result_t merge_result(enum pci_ers_result orig,
return orig;
}
-static int report_error_detected(struct pci_dev *dev, void *data)
+static int report_error_detected(struct pci_dev *dev,
+ enum pci_channel_state state,
+ enum pci_ers_result *result)
{
pci_ers_result_t vote;
const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
-
- result_data = (struct aer_broadcast_data *) data;
device_lock(&dev->dev);
- dev->error_state = result_data->state;
-
- if (!dev->driver ||
+ if (!pci_dev_set_io_state(dev, state) ||
+ !dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->error_detected) {
- if (result_data->state == pci_channel_io_frozen &&
- dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
- /*
- * In case of fatal recovery, if one of down-
- * stream device has no driver. We might be
- * unable to recover because a later insmod
- * of a driver for this device is unaware of
- * its hw state.
- */
- pci_printk(KERN_DEBUG, dev, "device has %s\n",
- dev->driver ?
- "no AER-aware driver" : "no driver");
- }
-
/*
- * If there's any device in the subtree that does not
- * have an error_detected callback, returning
- * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
- * the subsequent mmio_enabled/slot_reset/resume
- * callbacks of "any" device in the subtree. All the
- * devices in the subtree are left in the error state
- * without recovery.
+ * If any device in the subtree does not have an error_detected
+ * callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent
+ * error callbacks of "any" device in the subtree, and will
+ * exit in the disconnected error state.
*/
-
if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
vote = PCI_ERS_RESULT_NO_AER_DRIVER;
else
vote = PCI_ERS_RESULT_NONE;
} else {
err_handler = dev->driver->err_handler;
- vote = err_handler->error_detected(dev, result_data->state);
- pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
+ vote = err_handler->error_detected(dev, state);
}
-
- result_data->result = merge_result(result_data->result, vote);
+ pci_uevent_ers(dev, vote);
+ *result = merge_result(*result, vote);
device_unlock(&dev->dev);
return 0;
}
+static int report_frozen_detected(struct pci_dev *dev, void *data)
+{
+ return report_error_detected(dev, pci_channel_io_frozen, data);
+}
+
+static int report_normal_detected(struct pci_dev *dev, void *data)
+{
+ return report_error_detected(dev, pci_channel_io_normal, data);
+}
+
static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
- pci_ers_result_t vote;
+ pci_ers_result_t vote, *result = data;
const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
-
- result_data = (struct aer_broadcast_data *) data;
device_lock(&dev->dev);
if (!dev->driver ||
@@ -118,7 +98,7 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data)
err_handler = dev->driver->err_handler;
vote = err_handler->mmio_enabled(dev);
- result_data->result = merge_result(result_data->result, vote);
+ *result = merge_result(*result, vote);
out:
device_unlock(&dev->dev);
return 0;
@@ -126,11 +106,8 @@ out:
static int report_slot_reset(struct pci_dev *dev, void *data)
{
- pci_ers_result_t vote;
+ pci_ers_result_t vote, *result = data;
const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
-
- result_data = (struct aer_broadcast_data *) data;
device_lock(&dev->dev);
if (!dev->driver ||
@@ -140,7 +117,7 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
err_handler = dev->driver->err_handler;
vote = err_handler->slot_reset(dev);
- result_data->result = merge_result(result_data->result, vote);
+ *result = merge_result(*result, vote);
out:
device_unlock(&dev->dev);
return 0;
@@ -151,17 +128,16 @@ static int report_resume(struct pci_dev *dev, void *data)
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
- dev->error_state = pci_channel_io_normal;
-
- if (!dev->driver ||
+ if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
+ !dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->resume)
goto out;
err_handler = dev->driver->err_handler;
err_handler->resume(dev);
- pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
out:
+ pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
device_unlock(&dev->dev);
return 0;
}
@@ -177,207 +153,86 @@ static pci_ers_result_t default_reset_link(struct pci_dev *dev)
{
int rc;
- rc = pci_bridge_secondary_bus_reset(dev);
+ rc = pci_bus_error_reset(dev);
pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
{
- struct pci_dev *udev;
pci_ers_result_t status;
struct pcie_port_service_driver *driver = NULL;
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- /* Reset this port for all subordinates */
- udev = dev;
- } else {
- /* Reset the upstream component (likely downstream port) */
- udev = dev->bus->self;
- }
-
- /* Use the aer driver of the component firstly */
- driver = pcie_port_find_service(udev, service);
-
+ driver = pcie_port_find_service(dev, service);
if (driver && driver->reset_link) {
- status = driver->reset_link(udev);
- } else if (udev->has_secondary_link) {
- status = default_reset_link(udev);
+ status = driver->reset_link(dev);
+ } else if (dev->has_secondary_link) {
+ status = default_reset_link(dev);
} else {
pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
- pci_name(udev));
+ pci_name(dev));
return PCI_ERS_RESULT_DISCONNECT;
}
if (status != PCI_ERS_RESULT_RECOVERED) {
pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
- pci_name(udev));
+ pci_name(dev));
return PCI_ERS_RESULT_DISCONNECT;
}
return status;
}
-/**
- * broadcast_error_message - handle message broadcast to downstream drivers
- * @dev: pointer to from where in a hierarchy message is broadcasted down
- * @state: error state
- * @error_mesg: message to print
- * @cb: callback to be broadcasted
- *
- * Invoked during error recovery process. Once being invoked, the content
- * of error severity will be broadcasted to all downstream drivers in a
- * hierarchy in question.
- */
-static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
- enum pci_channel_state state,
- char *error_mesg,
- int (*cb)(struct pci_dev *, void *))
-{
- struct aer_broadcast_data result_data;
-
- pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
- result_data.state = state;
- if (cb == report_error_detected)
- result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
- else
- result_data.result = PCI_ERS_RESULT_RECOVERED;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- /*
- * If the error is reported by a bridge, we think this error
- * is related to the downstream link of the bridge, so we
- * do error recovery on all subordinates of the bridge instead
- * of the bridge and clear the error status of the bridge.
- */
- if (cb == report_error_detected)
- dev->error_state = state;
- pci_walk_bus(dev->subordinate, cb, &result_data);
- if (cb == report_resume) {
- pci_aer_clear_device_status(dev);
- pci_cleanup_aer_uncorrect_error_status(dev);
- dev->error_state = pci_channel_io_normal;
- }
- } else {
- /*
- * If the error is reported by an end point, we think this
- * error is related to the upstream link of the end point.
- * The error is non fatal so the bus is ok; just invoke
- * the callback for the function that logged the error.
- */
- cb(dev, &result_data);
- }
-
- return result_data.result;
-}
-
-/**
- * pcie_do_fatal_recovery - handle fatal error recovery process
- * @dev: pointer to a pci_dev data structure of agent detecting an error
- *
- * Invoked when an error is fatal. Once being invoked, removes the devices
- * beneath this AER agent, followed by reset link e.g. secondary bus reset
- * followed by re-enumeration of devices.
- */
-void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
+void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
+ u32 service)
{
- struct pci_dev *udev;
- struct pci_bus *parent;
- struct pci_dev *pdev, *temp;
- pci_ers_result_t result;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
- udev = dev;
+ pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
+ struct pci_bus *bus;
+
+ /*
+ * Error recovery runs on all subordinates of the first downstream port.
+ * If the downstream port detected the error, it is cleared at the end.
+ */
+ if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM))
+ dev = dev->bus->self;
+ bus = dev->subordinate;
+
+ pci_dbg(dev, "broadcast error_detected message\n");
+ if (state == pci_channel_io_frozen)
+ pci_walk_bus(bus, report_frozen_detected, &status);
else
- udev = dev->bus->self;
-
- parent = udev->subordinate;
- pci_lock_rescan_remove();
- pci_dev_get(dev);
- list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
- bus_list) {
- pci_dev_get(pdev);
- pci_dev_set_disconnected(pdev, NULL);
- if (pci_has_subordinate(pdev))
- pci_walk_bus(pdev->subordinate,
- pci_dev_set_disconnected, NULL);
- pci_stop_and_remove_bus_device(pdev);
- pci_dev_put(pdev);
- }
-
- result = reset_link(udev, service);
+ pci_walk_bus(bus, report_normal_detected, &status);
- if ((service == PCIE_PORT_SERVICE_AER) &&
- (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
- /*
- * If the error is reported by a bridge, we think this error
- * is related to the downstream link of the bridge, so we
- * do error recovery on all subordinates of the bridge instead
- * of the bridge and clear the error status of the bridge.
- */
- pci_aer_clear_fatal_status(dev);
- pci_aer_clear_device_status(dev);
- }
+ if (state == pci_channel_io_frozen &&
+ reset_link(dev, service) != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
- if (result == PCI_ERS_RESULT_RECOVERED) {
- if (pcie_wait_for_link(udev, true))
- pci_rescan_bus(udev->bus);
- pci_info(dev, "Device recovery from fatal error successful\n");
- } else {
- pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
- pci_info(dev, "Device recovery from fatal error failed\n");
+ if (status == PCI_ERS_RESULT_CAN_RECOVER) {
+ status = PCI_ERS_RESULT_RECOVERED;
+ pci_dbg(dev, "broadcast mmio_enabled message\n");
+ pci_walk_bus(bus, report_mmio_enabled, &status);
}
- pci_dev_put(dev);
- pci_unlock_rescan_remove();
-}
-
-/**
- * pcie_do_nonfatal_recovery - handle nonfatal error recovery process
- * @dev: pointer to a pci_dev data structure of agent detecting an error
- *
- * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
- * error detected message to all downstream drivers within a hierarchy in
- * question and return the returned code.
- */
-void pcie_do_nonfatal_recovery(struct pci_dev *dev)
-{
- pci_ers_result_t status;
- enum pci_channel_state state;
-
- state = pci_channel_io_normal;
-
- status = broadcast_error_message(dev,
- state,
- "error_detected",
- report_error_detected);
-
- if (status == PCI_ERS_RESULT_CAN_RECOVER)
- status = broadcast_error_message(dev,
- state,
- "mmio_enabled",
- report_mmio_enabled);
-
if (status == PCI_ERS_RESULT_NEED_RESET) {
/*
* TODO: Should call platform-specific
* functions to reset slot before calling
* drivers' slot_reset callbacks?
*/
- status = broadcast_error_message(dev,
- state,
- "slot_reset",
- report_slot_reset);
+ status = PCI_ERS_RESULT_RECOVERED;
+ pci_dbg(dev, "broadcast slot_reset message\n");
+ pci_walk_bus(bus, report_slot_reset, &status);
}
if (status != PCI_ERS_RESULT_RECOVERED)
goto failed;
- broadcast_error_message(dev,
- state,
- "resume",
- report_resume);
+ pci_dbg(dev, "broadcast resume message\n");
+ pci_walk_bus(bus, report_resume, &status);
+ pci_aer_clear_device_status(dev);
+ pci_cleanup_aer_uncorrect_error_status(dev);
pci_info(dev, "AER: Device recovery successful\n");
return;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 3ed67676ea2a..0dbcf429089f 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -432,6 +432,31 @@ static void pcie_pme_remove(struct pcie_device *srv)
kfree(get_service_data(srv));
}
+static int pcie_pme_runtime_suspend(struct pcie_device *srv)
+{
+ struct pcie_pme_service_data *data = get_service_data(srv);
+
+ spin_lock_irq(&data->lock);
+ pcie_pme_interrupt_enable(srv->port, false);
+ pcie_clear_root_pme_status(srv->port);
+ data->noirq = true;
+ spin_unlock_irq(&data->lock);
+
+ return 0;
+}
+
+static int pcie_pme_runtime_resume(struct pcie_device *srv)
+{
+ struct pcie_pme_service_data *data = get_service_data(srv);
+
+ spin_lock_irq(&data->lock);
+ pcie_pme_interrupt_enable(srv->port, true);
+ data->noirq = false;
+ spin_unlock_irq(&data->lock);
+
+ return 0;
+}
+
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
@@ -439,6 +464,8 @@ static struct pcie_port_service_driver pcie_pme_driver = {
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
+ .runtime_suspend = pcie_pme_runtime_suspend,
+ .runtime_resume = pcie_pme_runtime_resume,
.resume = pcie_pme_resume,
.remove = pcie_pme_remove,
};
@@ -446,8 +473,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
/**
* pcie_pme_service_init - Register the PCIe PME service driver.
*/
-static int __init pcie_pme_service_init(void)
+int __init pcie_pme_init(void)
{
return pcie_port_service_register(&pcie_pme_driver);
}
-device_initcall(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d59afa42fc14..e495f04394d0 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -23,6 +23,30 @@
#define PCIE_PORT_DEVICE_MAXSERVICES 4
+#ifdef CONFIG_PCIEAER
+int pcie_aer_init(void);
+#else
+static inline int pcie_aer_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_HOTPLUG_PCI_PCIE
+int pcie_hp_init(void);
+#else
+static inline int pcie_hp_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_PCIE_PME
+int pcie_pme_init(void);
+#else
+static inline int pcie_pme_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_PCIE_DPC
+int pcie_dpc_init(void);
+#else
+static inline int pcie_dpc_init(void) { return 0; }
+#endif
+
/* Port Type */
#define PCIE_ANY_PORT (~0)
@@ -52,6 +76,8 @@ struct pcie_port_service_driver {
int (*suspend) (struct pcie_device *dev);
int (*resume_noirq) (struct pcie_device *dev);
int (*resume) (struct pcie_device *dev);
+ int (*runtime_suspend) (struct pcie_device *dev);
+ int (*runtime_resume) (struct pcie_device *dev);
/* Device driver may resume normal operations */
void (*error_resume)(struct pci_dev *dev);
@@ -85,6 +111,8 @@ int pcie_port_device_register(struct pci_dev *dev);
int pcie_port_device_suspend(struct device *dev);
int pcie_port_device_resume_noirq(struct device *dev);
int pcie_port_device_resume(struct device *dev);
+int pcie_port_device_runtime_suspend(struct device *dev);
+int pcie_port_device_runtime_resume(struct device *dev);
#endif
void pcie_port_device_remove(struct pci_dev *dev);
int __must_check pcie_port_bus_register(void);
@@ -123,10 +151,6 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
}
#endif
-#ifdef CONFIG_PCIEAER
-irqreturn_t aer_irq(int irq, void *context);
-#endif
-
struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
u32 service);
struct device *pcie_port_find_device(struct pci_dev *dev, u32 service);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 7c37d815229e..f458ac9cb70c 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -395,6 +395,26 @@ int pcie_port_device_resume(struct device *dev)
size_t off = offsetof(struct pcie_port_service_driver, resume);
return device_for_each_child(dev, &off, pm_iter);
}
+
+/**
+ * pcie_port_device_runtime_suspend - runtime suspend port services
+ * @dev: PCI Express port to handle
+ */
+int pcie_port_device_runtime_suspend(struct device *dev)
+{
+ size_t off = offsetof(struct pcie_port_service_driver, runtime_suspend);
+ return device_for_each_child(dev, &off, pm_iter);
+}
+
+/**
+ * pcie_port_device_runtime_resume - runtime resume port services
+ * @dev: PCI Express port to handle
+ */
+int pcie_port_device_runtime_resume(struct device *dev)
+{
+ size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
+ return device_for_each_child(dev, &off, pm_iter);
+}
#endif /* PM */
static int remove_iter(struct device *dev, void *data)
@@ -466,6 +486,7 @@ struct device *pcie_port_find_device(struct pci_dev *dev,
device = pdrvs.dev;
return device;
}
+EXPORT_SYMBOL_GPL(pcie_port_find_device);
/**
* pcie_port_device_remove - unregister PCI Express port service devices
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index eef22dc29140..0acca3596807 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -45,12 +45,10 @@ __setup("pcie_ports=", pcie_port_setup);
#ifdef CONFIG_PM
static int pcie_port_runtime_suspend(struct device *dev)
{
- return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY;
-}
+ if (!to_pci_dev(dev)->bridge_d3)
+ return -EBUSY;
-static int pcie_port_runtime_resume(struct device *dev)
-{
- return 0;
+ return pcie_port_device_runtime_suspend(dev);
}
static int pcie_port_runtime_idle(struct device *dev)
@@ -73,7 +71,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.restore_noirq = pcie_port_device_resume_noirq,
.restore = pcie_port_device_resume,
.runtime_suspend = pcie_port_runtime_suspend,
- .runtime_resume = pcie_port_runtime_resume,
+ .runtime_resume = pcie_port_device_runtime_resume,
.runtime_idle = pcie_port_runtime_idle,
};
@@ -109,8 +107,8 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
pci_save_state(dev);
- dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_LEAVE_SUSPENDED);
+ dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NEVER_SKIP |
+ DPM_FLAG_SMART_SUSPEND);
if (pci_bridge_d3_possible(dev)) {
/*
@@ -146,6 +144,13 @@ static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
return PCI_ERS_RESULT_CAN_RECOVER;
}
+static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
+{
+ pci_restore_state(dev);
+ pci_save_state(dev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
{
return PCI_ERS_RESULT_RECOVERED;
@@ -185,6 +190,7 @@ static const struct pci_device_id port_pci_ids[] = { {
static const struct pci_error_handlers pcie_portdrv_err_handler = {
.error_detected = pcie_portdrv_error_detected,
+ .slot_reset = pcie_portdrv_slot_reset,
.mmio_enabled = pcie_portdrv_mmio_enabled,
.resume = pcie_portdrv_err_resume,
};
@@ -226,11 +232,20 @@ static const struct dmi_system_id pcie_portdrv_dmi_table[] __initconst = {
{}
};
+static void __init pcie_init_services(void)
+{
+ pcie_aer_init();
+ pcie_pme_init();
+ pcie_dpc_init();
+ pcie_hp_init();
+}
+
static int __init pcie_portdrv_init(void)
{
if (pcie_ports_disabled)
return -EACCES;
+ pcie_init_services();
dmi_check_system(pcie_portdrv_dmi_table);
return pci_register_driver(&pcie_portdriver);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 201f9e5ff55c..b1c05b5054a0 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -713,6 +713,7 @@ static void pci_set_bus_speed(struct pci_bus *bus)
pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
+ bridge->link_active_reporting = !!(linkcap & PCI_EXP_LNKCAP_DLLLARC);
pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
pcie_update_link_speed(bus, linksta);
@@ -1438,12 +1439,29 @@ static int pci_cfg_space_size_ext(struct pci_dev *dev)
return PCI_CFG_SPACE_EXP_SIZE;
}
+#ifdef CONFIG_PCI_IOV
+static bool is_vf0(struct pci_dev *dev)
+{
+ if (pci_iov_virtfn_devfn(dev->physfn, 0) == dev->devfn &&
+ pci_iov_virtfn_bus(dev->physfn, 0) == dev->bus->number)
+ return true;
+
+ return false;
+}
+#endif
+
int pci_cfg_space_size(struct pci_dev *dev)
{
int pos;
u32 status;
u16 class;
+#ifdef CONFIG_PCI_IOV
+ /* Read cached value for all VFs except for VF0 */
+ if (dev->is_virtfn && !is_vf0(dev))
+ return dev->physfn->sriov->cfg_size;
+#endif
+
if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
return PCI_CFG_SPACE_SIZE;
@@ -2143,7 +2161,7 @@ static void pci_release_dev(struct device *dev)
pcibios_release_device(pci_dev);
pci_bus_put(pci_dev->bus);
kfree(pci_dev->driver_override);
- kfree(pci_dev->dma_alias_mask);
+ bitmap_free(pci_dev->dma_alias_mask);
kfree(pci_dev);
}
@@ -2397,8 +2415,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
- pci_set_dma_max_seg_size(dev, 65536);
- pci_set_dma_seg_boundary(dev, 0xffffffff);
+ dma_set_max_seg_size(&dev->dev, 65536);
+ dma_set_seg_boundary(&dev->dev, 0xffffffff);
/* Fix up broken headers */
pci_fixup_device(pci_fixup_header, dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6bc27b7fd452..4700d24e5d55 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3190,7 +3190,11 @@ static void disable_igfx_irq(struct pci_dev *dev)
pci_iounmap(dev, regs);
}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
@@ -4987,7 +4991,6 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
void __iomem *mmio;
struct ntb_info_regs __iomem *mmio_ntb;
struct ntb_ctrl_regs __iomem *mmio_ctrl;
- struct sys_info_regs __iomem *mmio_sys_info;
u64 partition_map;
u8 partition;
int pp;
@@ -5008,7 +5011,6 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
- mmio_sys_info = mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
partition = ioread8(&mmio_ntb->partition_id);
@@ -5057,59 +5059,37 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
pci_iounmap(pdev, mmio);
pci_disable_device(pdev);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8531,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8532,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8533,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8534,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8535,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8536,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8543,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8544,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8545,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8546,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8551,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8552,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8553,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8554,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8555,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8556,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8561,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8562,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8563,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8564,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8565,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8566,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8571,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8572,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8573,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8574,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8575,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8576,
- quirk_switchtec_ntb_dma_alias);
+#define SWITCHTEC_QUIRK(vid) \
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
+ PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
+
+SWITCHTEC_QUIRK(0x8531); /* PFX 24xG3 */
+SWITCHTEC_QUIRK(0x8532); /* PFX 32xG3 */
+SWITCHTEC_QUIRK(0x8533); /* PFX 48xG3 */
+SWITCHTEC_QUIRK(0x8534); /* PFX 64xG3 */
+SWITCHTEC_QUIRK(0x8535); /* PFX 80xG3 */
+SWITCHTEC_QUIRK(0x8536); /* PFX 96xG3 */
+SWITCHTEC_QUIRK(0x8541); /* PSX 24xG3 */
+SWITCHTEC_QUIRK(0x8542); /* PSX 32xG3 */
+SWITCHTEC_QUIRK(0x8543); /* PSX 48xG3 */
+SWITCHTEC_QUIRK(0x8544); /* PSX 64xG3 */
+SWITCHTEC_QUIRK(0x8545); /* PSX 80xG3 */
+SWITCHTEC_QUIRK(0x8546); /* PSX 96xG3 */
+SWITCHTEC_QUIRK(0x8551); /* PAX 24XG3 */
+SWITCHTEC_QUIRK(0x8552); /* PAX 32XG3 */
+SWITCHTEC_QUIRK(0x8553); /* PAX 48XG3 */
+SWITCHTEC_QUIRK(0x8554); /* PAX 64XG3 */
+SWITCHTEC_QUIRK(0x8555); /* PAX 80XG3 */
+SWITCHTEC_QUIRK(0x8556); /* PAX 96XG3 */
+SWITCHTEC_QUIRK(0x8561); /* PFXL 24XG3 */
+SWITCHTEC_QUIRK(0x8562); /* PFXL 32XG3 */
+SWITCHTEC_QUIRK(0x8563); /* PFXL 48XG3 */
+SWITCHTEC_QUIRK(0x8564); /* PFXL 64XG3 */
+SWITCHTEC_QUIRK(0x8565); /* PFXL 80XG3 */
+SWITCHTEC_QUIRK(0x8566); /* PFXL 96XG3 */
+SWITCHTEC_QUIRK(0x8571); /* PFXI 24XG3 */
+SWITCHTEC_QUIRK(0x8572); /* PFXI 32XG3 */
+SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
+SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
+SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
+SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 461e7fd2756f..e9c6b120cf45 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -25,9 +25,6 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_dev_assign_added(dev, false);
}
-
- if (dev->bus->self)
- pcie_aspm_exit_link_state(dev);
}
static void pci_destroy_dev(struct pci_dev *dev)
@@ -41,6 +38,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
list_del(&dev->bus_list);
up_write(&pci_bus_sem);
+ pcie_aspm_exit_link_state(dev);
pci_bridge_d3_update(dev);
pci_free_resources(dev);
put_device(&dev->dev);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 79b1824e83b4..ed960436df5e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -811,6 +811,8 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus,
static resource_size_t calculate_iosize(resource_size_t size,
resource_size_t min_size,
resource_size_t size1,
+ resource_size_t add_size,
+ resource_size_t children_add_size,
resource_size_t old_size,
resource_size_t align)
{
@@ -823,15 +825,18 @@ static resource_size_t calculate_iosize(resource_size_t size,
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
size = (size & 0xff) + ((size & ~0xffUL) << 2);
#endif
- size = ALIGN(size + size1, align);
+ size = size + size1;
if (size < old_size)
size = old_size;
+
+ size = ALIGN(max(size, add_size) + children_add_size, align);
return size;
}
static resource_size_t calculate_memsize(resource_size_t size,
resource_size_t min_size,
- resource_size_t size1,
+ resource_size_t add_size,
+ resource_size_t children_add_size,
resource_size_t old_size,
resource_size_t align)
{
@@ -841,7 +846,8 @@ static resource_size_t calculate_memsize(resource_size_t size,
old_size = 0;
if (size < old_size)
size = old_size;
- size = ALIGN(size + size1, align);
+
+ size = ALIGN(max(size, add_size) + children_add_size, align);
return size;
}
@@ -930,12 +936,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
}
}
- size0 = calculate_iosize(size, min_size, size1,
+ size0 = calculate_iosize(size, min_size, size1, 0, 0,
resource_size(b_res), min_align);
- if (children_add_size > add_size)
- add_size = children_add_size;
- size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
- calculate_iosize(size, min_size, add_size + size1,
+ size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
+ calculate_iosize(size, min_size, size1, add_size, children_add_size,
resource_size(b_res), min_align);
if (!size0 && !size1) {
if (b_res->start || b_res->end)
@@ -1079,12 +1083,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
min_align = calculate_mem_align(aligns, max_order);
min_align = max(min_align, window_alignment(bus, b_res->flags));
- size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
+ size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
add_align = max(min_align, add_align);
- if (children_add_size > add_size)
- add_size = children_add_size;
- size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
- calculate_memsize(size, min_size, add_size,
+ size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
+ calculate_memsize(size, min_size, add_size, children_add_size,
resource_size(b_res), add_align);
if (!size0 && !size1) {
if (b_res->start || b_res->end)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index e634229ece89..c46d5e1ff536 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -14,7 +14,6 @@
struct kset *pci_slots_kset;
EXPORT_SYMBOL_GPL(pci_slots_kset);
-static DEFINE_MUTEX(pci_slot_mutex);
static ssize_t pci_slot_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -371,7 +370,7 @@ void pci_hp_create_module_link(struct pci_slot *pci_slot)
if (!slot || !slot->ops)
return;
- kobj = kset_find_obj(module_kset, slot->ops->mod_name);
+ kobj = kset_find_obj(module_kset, slot->mod_name);
if (!kobj)
return;
ret = sysfs_create_link(&pci_slot->kobj, kobj, "module");
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 18802096148e..41ce410f7f97 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -284,7 +284,7 @@ int pcmcia_fixup_iowidth(struct pcmcia_device *p_dev)
io_on.stop = s->io[i].res->end;
s->ops->set_io_map(s, &io_off);
- mdelay(40);
+ msleep(40);
s->ops->set_io_map(s, &io_on);
}
unlock:
@@ -567,7 +567,7 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev)
!(flags & CONF_ENABLE_PULSE_IRQ))
option |= COR_LEVEL_REQ;
pcmcia_write_cis_mem(s, 1, (base + CISREG_COR)>>1, 1, &option);
- mdelay(40);
+ msleep(40);
}
if (p_dev->config_regs & PRESENT_STATUS)
pcmcia_write_cis_mem(s, 1, (base + CISREG_CCSR)>>1, 1, &status);
diff --git a/drivers/pcmcia/ricoh.h b/drivers/pcmcia/ricoh.h
index 01098c841f87..8ac7b138c094 100644
--- a/drivers/pcmcia/ricoh.h
+++ b/drivers/pcmcia/ricoh.h
@@ -119,6 +119,10 @@
#define RL5C4XX_MISC_CONTROL 0x2F /* 8 bit */
#define RL5C4XX_ZV_ENABLE 0x08
+/* Misc Control 3 Register */
+#define RL5C4XX_MISC3 0x00A2 /* 16 bit */
+#define RL5C47X_MISC3_CB_CLKRUN_DIS BIT(1)
+
#ifdef __YENTA_H
#define rl_misc(socket) ((socket)->private[0])
@@ -156,6 +160,35 @@ static void ricoh_set_zv(struct yenta_socket *socket)
}
}
+static void ricoh_set_clkrun(struct yenta_socket *socket, bool quiet)
+{
+ u16 misc3;
+
+ /*
+ * RL5C475II likely has this setting, too, however no datasheet
+ * is publicly available for this chip
+ */
+ if (socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C476 &&
+ socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C478)
+ return;
+
+ if (socket->dev->revision < 0x80)
+ return;
+
+ misc3 = config_readw(socket, RL5C4XX_MISC3);
+ if (misc3 & RL5C47X_MISC3_CB_CLKRUN_DIS) {
+ if (!quiet)
+ dev_dbg(&socket->dev->dev,
+ "CLKRUN feature already disabled\n");
+ } else if (disable_clkrun) {
+ if (!quiet)
+ dev_info(&socket->dev->dev,
+ "Disabling CLKRUN feature\n");
+ misc3 |= RL5C47X_MISC3_CB_CLKRUN_DIS;
+ config_writew(socket, RL5C4XX_MISC3, misc3);
+ }
+}
+
static void ricoh_save_state(struct yenta_socket *socket)
{
rl_misc(socket) = config_readw(socket, RL5C4XX_MISC);
@@ -172,6 +205,7 @@ static void ricoh_restore_state(struct yenta_socket *socket)
config_writew(socket, RL5C4XX_16BIT_IO_0, rl_io(socket));
config_writew(socket, RL5C4XX_16BIT_MEM_0, rl_mem(socket));
config_writew(socket, RL5C4XX_CONFIG, rl_config(socket));
+ ricoh_set_clkrun(socket, true);
}
@@ -197,6 +231,7 @@ static int ricoh_override(struct yenta_socket *socket)
config_writew(socket, RL5C4XX_CONFIG, config);
ricoh_set_zv(socket);
+ ricoh_set_clkrun(socket, false);
return 0;
}
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index c5f2344c189b..3a8c84bb174d 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -351,19 +351,20 @@ static int soc_common_pcmcia_config_skt(
if (ret == 0) {
struct gpio_desc *descs[2];
- int values[2], n = 0;
+ DECLARE_BITMAP(values, 2);
+ int n = 0;
if (skt->gpio_reset) {
descs[n] = skt->gpio_reset;
- values[n++] = !!(state->flags & SS_RESET);
+ __assign_bit(n++, values, state->flags & SS_RESET);
}
if (skt->gpio_bus_enable) {
descs[n] = skt->gpio_bus_enable;
- values[n++] = !!(state->flags & SS_OUTPUT_ENA);
+ __assign_bit(n++, values, state->flags & SS_OUTPUT_ENA);
}
if (n)
- gpiod_set_array_value_cansleep(n, descs, values);
+ gpiod_set_array_value_cansleep(n, descs, NULL, values);
/*
* This really needs a better solution. The IRQ
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index ab3da2262f0f..ac6a3f46b1e6 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -26,7 +26,8 @@
static bool disable_clkrun;
module_param(disable_clkrun, bool, 0444);
-MODULE_PARM_DESC(disable_clkrun, "If PC card doesn't function properly, please try this option");
+MODULE_PARM_DESC(disable_clkrun,
+ "If PC card doesn't function properly, please try this option (TI and Ricoh bridges only)");
static bool isa_probe = 1;
module_param(isa_probe, bool, 0444);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 7f01f6f60b87..d0b7dd8fb184 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -485,7 +485,13 @@ static int armpmu_filter_match(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
unsigned int cpu = smp_processor_id();
- return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
+ int ret;
+
+ ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
+ if (ret && armpmu->filter_match)
+ return armpmu->filter_match(event);
+
+ return ret;
}
static ssize_t armpmu_cpumask_show(struct device *dev,
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 96075cecb0ae..933bd8410fc2 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -77,14 +77,14 @@ static int pmu_parse_irq_affinity(struct device_node *node, int i)
dn = of_parse_phandle(node, "interrupt-affinity", i);
if (!dn) {
- pr_warn("failed to parse interrupt-affinity[%d] for %s\n",
- i, node->name);
+ pr_warn("failed to parse interrupt-affinity[%d] for %pOFn\n",
+ i, node);
return -EINVAL;
}
cpu = of_cpu_node_to_id(dn);
if (cpu < 0) {
- pr_warn("failed to find logical CPU for %s\n", dn->name);
+ pr_warn("failed to find logical CPU for %pOFn\n", dn);
cpu = nr_cpu_ids;
}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 5c8d452e35e2..60f949e2a684 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -43,16 +43,19 @@ config PHY_XGENE
source "drivers/phy/allwinner/Kconfig"
source "drivers/phy/amlogic/Kconfig"
source "drivers/phy/broadcom/Kconfig"
+source "drivers/phy/cadence/Kconfig"
source "drivers/phy/hisilicon/Kconfig"
source "drivers/phy/lantiq/Kconfig"
source "drivers/phy/marvell/Kconfig"
source "drivers/phy/mediatek/Kconfig"
source "drivers/phy/motorola/Kconfig"
+source "drivers/phy/mscc/Kconfig"
source "drivers/phy/qualcomm/Kconfig"
source "drivers/phy/ralink/Kconfig"
source "drivers/phy/renesas/Kconfig"
source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
+source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 84e3bd9c5665..0301e25d07c1 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -15,11 +15,14 @@ obj-$(CONFIG_ARCH_RENESAS) += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += broadcom/ \
+ cadence/ \
hisilicon/ \
marvell/ \
motorola/ \
+ mscc/ \
qualcomm/ \
ralink/ \
samsung/ \
+ socionext/ \
st/ \
ti/
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 8786a9674471..aa917a61071d 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -60,7 +60,8 @@ config PHY_NS2_USB_DRD
config PHY_BRCM_SATA
tristate "Broadcom SATA PHY driver"
- depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || COMPILE_TEST
+ depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || \
+ ARCH_BCM_63XX || COMPILE_TEST
depends on OF
select GENERIC_PHY
default ARCH_BCM_IPROC
diff --git a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
index 0f4ac5d63cff..b074682d9dd8 100644
--- a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
@@ -153,8 +153,8 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
struct cygnus_pcie_phy *p;
if (of_property_read_u32(child, "reg", &id)) {
- dev_err(dev, "missing reg property for %s\n",
- child->name);
+ dev_err(dev, "missing reg property for %pOFn\n",
+ child);
ret = -EINVAL;
goto put_child;
}
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 8708ea3b4d6d..0f4a06ff7fd3 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -47,6 +47,7 @@ enum brcm_sata_phy_version {
BRCM_SATA_PHY_IPROC_NS2,
BRCM_SATA_PHY_IPROC_NSP,
BRCM_SATA_PHY_IPROC_SR,
+ BRCM_SATA_PHY_DSL_28NM,
};
enum brcm_sata_phy_rxaeq_mode {
@@ -96,7 +97,10 @@ enum sata_phy_regs {
PLLCONTROL_0_FREQ_DET_RESTART = BIT(13),
PLLCONTROL_0_FREQ_MONITOR = BIT(12),
PLLCONTROL_0_SEQ_START = BIT(15),
+ PLL_CAP_CHARGE_TIME = 0x83,
+ PLL_VCO_CAL_THRESH = 0x84,
PLL_CAP_CONTROL = 0x85,
+ PLL_FREQ_DET_TIME = 0x86,
PLL_ACTRL2 = 0x8b,
PLL_ACTRL2_SELDIV_MASK = 0x1f,
PLL_ACTRL2_SELDIV_SHIFT = 9,
@@ -106,6 +110,9 @@ enum sata_phy_regs {
PLL1_ACTRL2 = 0x82,
PLL1_ACTRL3 = 0x83,
PLL1_ACTRL4 = 0x84,
+ PLL1_ACTRL5 = 0x85,
+ PLL1_ACTRL6 = 0x86,
+ PLL1_ACTRL7 = 0x87,
TX_REG_BANK = 0x070,
TX_ACTRL0 = 0x80,
@@ -119,6 +126,8 @@ enum sata_phy_regs {
AEQ_FRC_EQ_FORCE = BIT(0),
AEQ_FRC_EQ_FORCE_VAL = BIT(1),
AEQRX_REG_BANK_1 = 0xe0,
+ AEQRX_SLCAL0_CTRL0 = 0x82,
+ AEQRX_SLCAL1_CTRL0 = 0x86,
OOB_REG_BANK = 0x150,
OOB1_REG_BANK = 0x160,
@@ -168,6 +177,7 @@ static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
switch (priv->version) {
case BRCM_SATA_PHY_STB_28NM:
case BRCM_SATA_PHY_IPROC_NS2:
+ case BRCM_SATA_PHY_DSL_28NM:
size = SATA_PCB_REG_28NM_SPACE_SIZE;
break;
case BRCM_SATA_PHY_STB_40NM:
@@ -482,6 +492,61 @@ static int brcm_sr_sata_init(struct brcm_sata_port *port)
return 0;
}
+static int brcm_dsl_sata_init(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ struct device *dev = port->phy_priv->dev;
+ unsigned int try;
+ u32 tmp;
+
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL7, 0, 0x873);
+
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0xc000);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ 0, 0x3089);
+ usleep_range(1000, 2000);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ 0, 0x3088);
+ usleep_range(1000, 2000);
+
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, AEQRX_SLCAL0_CTRL0,
+ 0, 0x3000);
+
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, AEQRX_SLCAL1_CTRL0,
+ 0, 0x3000);
+ usleep_range(1000, 2000);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_CAP_CHARGE_TIME, 0, 0x32);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_VCO_CAL_THRESH, 0, 0xa);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_FREQ_DET_TIME, 0, 0x64);
+ usleep_range(1000, 2000);
+
+ /* Acquire PLL lock */
+ try = 50;
+ while (try) {
+ tmp = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ BLOCK0_XGXSSTATUS);
+ if (tmp & BLOCK0_XGXSSTATUS_PLL_LOCK)
+ break;
+ msleep(20);
+ try--;
+ };
+
+ if (!try) {
+ /* PLL did not lock; give up */
+ dev_err(dev, "port%d PLL did not lock\n", port->portnum);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "port%d initialized\n", port->portnum);
+
+ return 0;
+}
+
static int brcm_sata_phy_init(struct phy *phy)
{
int rc;
@@ -501,6 +566,9 @@ static int brcm_sata_phy_init(struct phy *phy)
case BRCM_SATA_PHY_IPROC_SR:
rc = brcm_sr_sata_init(port);
break;
+ case BRCM_SATA_PHY_DSL_28NM:
+ rc = brcm_dsl_sata_init(port);
+ break;
default:
rc = -ENODEV;
}
@@ -552,6 +620,8 @@ static const struct of_device_id brcm_sata_phy_of_match[] = {
.data = (void *)BRCM_SATA_PHY_IPROC_NSP },
{ .compatible = "brcm,iproc-sr-sata-phy",
.data = (void *)BRCM_SATA_PHY_IPROC_SR },
+ { .compatible = "brcm,bcm63138-sata-phy",
+ .data = (void *)BRCM_SATA_PHY_DSL_28NM },
{},
};
MODULE_DEVICE_TABLE(of, brcm_sata_phy_of_match);
@@ -600,8 +670,8 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
struct brcm_sata_port *port;
if (of_property_read_u32(child, "reg", &id)) {
- dev_err(dev, "missing reg property in node %s\n",
- child->name);
+ dev_err(dev, "missing reg property in node %pOFn\n",
+ child);
ret = -EINVAL;
goto put_child;
}
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index d1dab36fa5b7..f59b1dc30399 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -372,10 +372,8 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
clk_disable(priv->usb_30_clk);
phy_provider = devm_of_phy_provider_register(dev, brcm_usb_phy_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
- return 0;
+ return PTR_ERR_OR_ZERO(phy_provider);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig
new file mode 100644
index 000000000000..57fff7de4031
--- /dev/null
+++ b/drivers/phy/cadence/Kconfig
@@ -0,0 +1,10 @@
+#
+# Phy driver for Cadence MHDP DisplayPort controller
+#
+config PHY_CADENCE_DP
+ tristate "Cadence MHDP DisplayPort PHY driver"
+ depends on OF
+ depends on HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Support for Cadence MHDP DisplayPort PHY.
diff --git a/drivers/phy/cadence/Makefile b/drivers/phy/cadence/Makefile
new file mode 100644
index 000000000000..e5b0a11cf28a
--- /dev/null
+++ b/drivers/phy/cadence/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PHY_CADENCE_DP) += phy-cadence-dp.o
diff --git a/drivers/phy/cadence/phy-cadence-dp.c b/drivers/phy/cadence/phy-cadence-dp.c
new file mode 100644
index 000000000000..bc10cb264b7a
--- /dev/null
+++ b/drivers/phy/cadence/phy-cadence-dp.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Cadence MHDP DisplayPort SD0801 PHY driver.
+ *
+ * Copyright 2018 Cadence Design Systems, Inc.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#define DEFAULT_NUM_LANES 2
+#define MAX_NUM_LANES 4
+#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */
+
+#define POLL_TIMEOUT_US 2000
+#define LANE_MASK 0x7
+
+/*
+ * register offsets from DPTX PHY register block base (i.e MHDP
+ * register base + 0x30a00)
+ */
+#define PHY_AUX_CONFIG 0x00
+#define PHY_AUX_CTRL 0x04
+#define PHY_RESET 0x20
+#define PHY_PMA_XCVR_PLLCLK_EN 0x24
+#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28
+#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c
+#define PHY_POWER_STATE_LN_0 0x0000
+#define PHY_POWER_STATE_LN_1 0x0008
+#define PHY_POWER_STATE_LN_2 0x0010
+#define PHY_POWER_STATE_LN_3 0x0018
+#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30
+#define PHY_PMA_CMN_READY 0x34
+#define PHY_PMA_XCVR_TX_VMARGIN 0x38
+#define PHY_PMA_XCVR_TX_DEEMPH 0x3c
+
+/*
+ * register offsets from SD0801 PHY register block base (i.e MHDP
+ * register base + 0x500000)
+ */
+#define CMN_SSM_BANDGAP_TMR 0x00084
+#define CMN_SSM_BIAS_TMR 0x00088
+#define CMN_PLLSM0_PLLPRE_TMR 0x000a8
+#define CMN_PLLSM0_PLLLOCK_TMR 0x000b0
+#define CMN_PLLSM1_PLLPRE_TMR 0x000c8
+#define CMN_PLLSM1_PLLLOCK_TMR 0x000d0
+#define CMN_BGCAL_INIT_TMR 0x00190
+#define CMN_BGCAL_ITER_TMR 0x00194
+#define CMN_IBCAL_INIT_TMR 0x001d0
+#define CMN_PLL0_VCOCAL_INIT_TMR 0x00210
+#define CMN_PLL0_VCOCAL_ITER_TMR 0x00214
+#define CMN_PLL0_VCOCAL_REFTIM_START 0x00218
+#define CMN_PLL0_VCOCAL_PLLCNT_START 0x00220
+#define CMN_PLL0_INTDIV_M0 0x00240
+#define CMN_PLL0_FRACDIVL_M0 0x00244
+#define CMN_PLL0_FRACDIVH_M0 0x00248
+#define CMN_PLL0_HIGH_THR_M0 0x0024c
+#define CMN_PLL0_DSM_DIAG_M0 0x00250
+#define CMN_PLL0_LOCK_PLLCNT_START 0x00278
+#define CMN_PLL1_VCOCAL_INIT_TMR 0x00310
+#define CMN_PLL1_VCOCAL_ITER_TMR 0x00314
+#define CMN_PLL1_DSM_DIAG_M0 0x00350
+#define CMN_TXPUCAL_INIT_TMR 0x00410
+#define CMN_TXPUCAL_ITER_TMR 0x00414
+#define CMN_TXPDCAL_INIT_TMR 0x00430
+#define CMN_TXPDCAL_ITER_TMR 0x00434
+#define CMN_RXCAL_INIT_TMR 0x00450
+#define CMN_RXCAL_ITER_TMR 0x00454
+#define CMN_SD_CAL_INIT_TMR 0x00490
+#define CMN_SD_CAL_ITER_TMR 0x00494
+#define CMN_SD_CAL_REFTIM_START 0x00498
+#define CMN_SD_CAL_PLLCNT_START 0x004a0
+#define CMN_PDIAG_PLL0_CTRL_M0 0x00680
+#define CMN_PDIAG_PLL0_CLK_SEL_M0 0x00684
+#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x00690
+#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x00694
+#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x00698
+#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x006d0
+#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x006d4
+#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x00704
+#define XCVR_DIAG_PLLDRC_CTRL 0x10394
+#define XCVR_DIAG_HSCLK_SEL 0x10398
+#define XCVR_DIAG_HSCLK_DIV 0x1039c
+#define TX_PSC_A0 0x10400
+#define TX_PSC_A1 0x10404
+#define TX_PSC_A2 0x10408
+#define TX_PSC_A3 0x1040c
+#define RX_PSC_A0 0x20000
+#define RX_PSC_A1 0x20004
+#define RX_PSC_A2 0x20008
+#define RX_PSC_A3 0x2000c
+#define PHY_PLL_CFG 0x30038
+
+struct cdns_dp_phy {
+ void __iomem *base; /* DPTX registers base */
+ void __iomem *sd_base; /* SD0801 registers base */
+ u32 num_lanes; /* Number of lanes to use */
+ u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
+ struct device *dev;
+};
+
+static int cdns_dp_phy_init(struct phy *phy);
+static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
+ unsigned int lane);
+static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
+ unsigned int offset,
+ unsigned char start_bit,
+ unsigned char num_bits,
+ unsigned int val);
+
+static const struct phy_ops cdns_dp_phy_ops = {
+ .init = cdns_dp_phy_init,
+ .owner = THIS_MODULE,
+};
+
+static int cdns_dp_phy_init(struct phy *phy)
+{
+ unsigned char lane_bits;
+
+ struct cdns_dp_phy *cdns_phy = phy_get_drvdata(phy);
+
+ writel(0x0003, cdns_phy->base + PHY_AUX_CTRL); /* enable AUX */
+
+ /* PHY PMA registers configuration function */
+ cdns_dp_phy_pma_cfg(cdns_phy);
+
+ /*
+ * Set lines power state to A0
+ * Set lines pll clk enable to 0
+ */
+
+ cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ,
+ PHY_POWER_STATE_LN_0, 6, 0x0000);
+
+ if (cdns_phy->num_lanes >= 2) {
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_POWER_STATE_REQ,
+ PHY_POWER_STATE_LN_1, 6, 0x0000);
+
+ if (cdns_phy->num_lanes == 4) {
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_POWER_STATE_REQ,
+ PHY_POWER_STATE_LN_2, 6, 0);
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_POWER_STATE_REQ,
+ PHY_POWER_STATE_LN_3, 6, 0);
+ }
+ }
+
+ cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN,
+ 0, 1, 0x0000);
+
+ if (cdns_phy->num_lanes >= 2) {
+ cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN,
+ 1, 1, 0x0000);
+ if (cdns_phy->num_lanes == 4) {
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_PLLCLK_EN,
+ 2, 1, 0x0000);
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_PLLCLK_EN,
+ 3, 1, 0x0000);
+ }
+ }
+
+ /*
+ * release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on
+ * used lanes
+ */
+ lane_bits = (1 << cdns_phy->num_lanes) - 1;
+ writel(((0xF & ~lane_bits) << 4) | (0xF & lane_bits),
+ cdns_phy->base + PHY_RESET);
+
+ /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
+ writel(0x0001, cdns_phy->base + PHY_PMA_XCVR_PLLCLK_EN);
+
+ /* PHY PMA registers configuration functions */
+ cdns_dp_phy_pma_cmn_vco_cfg_25mhz(cdns_phy);
+ cdns_dp_phy_pma_cmn_rate(cdns_phy);
+
+ /* take out of reset */
+ cdns_dp_phy_write_field(cdns_phy, PHY_RESET, 8, 1, 1);
+ cdns_dp_phy_wait_pma_cmn_ready(cdns_phy);
+ cdns_dp_phy_run(cdns_phy);
+
+ return 0;
+}
+
+static void cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_CMN_READY, reg,
+ reg & 1, 0, 500);
+ if (ret == -ETIMEDOUT)
+ dev_err(cdns_phy->dev,
+ "timeout waiting for PMA common ready\n");
+}
+
+static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy)
+{
+ unsigned int i;
+
+ /* PMA common configuration */
+ cdns_dp_phy_pma_cmn_cfg_25mhz(cdns_phy);
+
+ /* PMA lane configuration to deal with multi-link operation */
+ for (i = 0; i < cdns_phy->num_lanes; i++)
+ cdns_dp_phy_pma_lane_cfg(cdns_phy, i);
+}
+
+static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy)
+{
+ /* refclock registers - assumes 25 MHz refclock */
+ writel(0x0019, cdns_phy->sd_base + CMN_SSM_BIAS_TMR);
+ writel(0x0032, cdns_phy->sd_base + CMN_PLLSM0_PLLPRE_TMR);
+ writel(0x00D1, cdns_phy->sd_base + CMN_PLLSM0_PLLLOCK_TMR);
+ writel(0x0032, cdns_phy->sd_base + CMN_PLLSM1_PLLPRE_TMR);
+ writel(0x00D1, cdns_phy->sd_base + CMN_PLLSM1_PLLLOCK_TMR);
+ writel(0x007D, cdns_phy->sd_base + CMN_BGCAL_INIT_TMR);
+ writel(0x007D, cdns_phy->sd_base + CMN_BGCAL_ITER_TMR);
+ writel(0x0019, cdns_phy->sd_base + CMN_IBCAL_INIT_TMR);
+ writel(0x001E, cdns_phy->sd_base + CMN_TXPUCAL_INIT_TMR);
+ writel(0x0006, cdns_phy->sd_base + CMN_TXPUCAL_ITER_TMR);
+ writel(0x001E, cdns_phy->sd_base + CMN_TXPDCAL_INIT_TMR);
+ writel(0x0006, cdns_phy->sd_base + CMN_TXPDCAL_ITER_TMR);
+ writel(0x02EE, cdns_phy->sd_base + CMN_RXCAL_INIT_TMR);
+ writel(0x0006, cdns_phy->sd_base + CMN_RXCAL_ITER_TMR);
+ writel(0x0002, cdns_phy->sd_base + CMN_SD_CAL_INIT_TMR);
+ writel(0x0002, cdns_phy->sd_base + CMN_SD_CAL_ITER_TMR);
+ writel(0x000E, cdns_phy->sd_base + CMN_SD_CAL_REFTIM_START);
+ writel(0x012B, cdns_phy->sd_base + CMN_SD_CAL_PLLCNT_START);
+ /* PLL registers */
+ writel(0x0409, cdns_phy->sd_base + CMN_PDIAG_PLL0_CP_PADJ_M0);
+ writel(0x1001, cdns_phy->sd_base + CMN_PDIAG_PLL0_CP_IADJ_M0);
+ writel(0x0F08, cdns_phy->sd_base + CMN_PDIAG_PLL0_FILT_PADJ_M0);
+ writel(0x0004, cdns_phy->sd_base + CMN_PLL0_DSM_DIAG_M0);
+ writel(0x00FA, cdns_phy->sd_base + CMN_PLL0_VCOCAL_INIT_TMR);
+ writel(0x0004, cdns_phy->sd_base + CMN_PLL0_VCOCAL_ITER_TMR);
+ writel(0x00FA, cdns_phy->sd_base + CMN_PLL1_VCOCAL_INIT_TMR);
+ writel(0x0004, cdns_phy->sd_base + CMN_PLL1_VCOCAL_ITER_TMR);
+ writel(0x0318, cdns_phy->sd_base + CMN_PLL0_VCOCAL_REFTIM_START);
+}
+
+static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy)
+{
+ /* Assumes 25 MHz refclock */
+ switch (cdns_phy->max_bit_rate) {
+ /* Setting VCO for 10.8GHz */
+ case 2700:
+ case 5400:
+ writel(0x01B0, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
+ writel(0x0000, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
+ writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
+ writel(0x0120, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
+ break;
+ /* Setting VCO for 9.72GHz */
+ case 2430:
+ case 3240:
+ writel(0x0184, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
+ writel(0xCCCD, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
+ writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
+ writel(0x0104, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
+ break;
+ /* Setting VCO for 8.64GHz */
+ case 2160:
+ case 4320:
+ writel(0x0159, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
+ writel(0x999A, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
+ writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
+ writel(0x00E7, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
+ break;
+ /* Setting VCO for 8.1GHz */
+ case 8100:
+ writel(0x0144, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
+ writel(0x0000, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
+ writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
+ writel(0x00D8, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
+ break;
+ }
+
+ writel(0x0002, cdns_phy->sd_base + CMN_PDIAG_PLL0_CTRL_M0);
+ writel(0x0318, cdns_phy->sd_base + CMN_PLL0_VCOCAL_PLLCNT_START);
+}
+
+static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy)
+{
+ unsigned int clk_sel_val = 0;
+ unsigned int hsclk_div_val = 0;
+ unsigned int i;
+
+ /* 16'h0000 for single DP link configuration */
+ writel(0x0000, cdns_phy->sd_base + PHY_PLL_CFG);
+
+ switch (cdns_phy->max_bit_rate) {
+ case 1620:
+ clk_sel_val = 0x0f01;
+ hsclk_div_val = 2;
+ break;
+ case 2160:
+ case 2430:
+ case 2700:
+ clk_sel_val = 0x0701;
+ hsclk_div_val = 1;
+ break;
+ case 3240:
+ clk_sel_val = 0x0b00;
+ hsclk_div_val = 2;
+ break;
+ case 4320:
+ case 5400:
+ clk_sel_val = 0x0301;
+ hsclk_div_val = 0;
+ break;
+ case 8100:
+ clk_sel_val = 0x0200;
+ hsclk_div_val = 0;
+ break;
+ }
+
+ writel(clk_sel_val, cdns_phy->sd_base + CMN_PDIAG_PLL0_CLK_SEL_M0);
+
+ /* PMA lane configuration to deal with multi-link operation */
+ for (i = 0; i < cdns_phy->num_lanes; i++) {
+ writel(hsclk_div_val,
+ cdns_phy->sd_base + (XCVR_DIAG_HSCLK_DIV | (i<<11)));
+ }
+}
+
+static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
+ unsigned int lane)
+{
+ unsigned int lane_bits = (lane & LANE_MASK) << 11;
+
+ /* Writing Tx/Rx Power State Controllers registers */
+ writel(0x00FB, cdns_phy->sd_base + (TX_PSC_A0 | lane_bits));
+ writel(0x04AA, cdns_phy->sd_base + (TX_PSC_A2 | lane_bits));
+ writel(0x04AA, cdns_phy->sd_base + (TX_PSC_A3 | lane_bits));
+ writel(0x0000, cdns_phy->sd_base + (RX_PSC_A0 | lane_bits));
+ writel(0x0000, cdns_phy->sd_base + (RX_PSC_A2 | lane_bits));
+ writel(0x0000, cdns_phy->sd_base + (RX_PSC_A3 | lane_bits));
+
+ writel(0x0001, cdns_phy->sd_base + (XCVR_DIAG_PLLDRC_CTRL | lane_bits));
+ writel(0x0000, cdns_phy->sd_base + (XCVR_DIAG_HSCLK_SEL | lane_bits));
+}
+
+static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy)
+{
+ unsigned int read_val;
+ u32 write_val1 = 0;
+ u32 write_val2 = 0;
+ u32 mask = 0;
+ int ret;
+
+ /*
+ * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the
+ * master lane
+ */
+ ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_PLLCLK_EN_ACK,
+ read_val, read_val & 1, 0, POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT)
+ dev_err(cdns_phy->dev,
+ "timeout waiting for link PLL clock enable ack\n");
+
+ ndelay(100);
+
+ switch (cdns_phy->num_lanes) {
+
+ case 1: /* lane 0 */
+ write_val1 = 0x00000004;
+ write_val2 = 0x00000001;
+ mask = 0x0000003f;
+ break;
+ case 2: /* lane 0-1 */
+ write_val1 = 0x00000404;
+ write_val2 = 0x00000101;
+ mask = 0x00003f3f;
+ break;
+ case 4: /* lane 0-3 */
+ write_val1 = 0x04040404;
+ write_val2 = 0x01010101;
+ mask = 0x3f3f3f3f;
+ break;
+ }
+
+ writel(write_val1, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
+
+ ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_ACK,
+ read_val, (read_val & mask) == write_val1, 0,
+ POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT)
+ dev_err(cdns_phy->dev,
+ "timeout waiting for link power state ack\n");
+
+ writel(0, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
+ ndelay(100);
+
+ writel(write_val2, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
+
+ ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_ACK,
+ read_val, (read_val & mask) == write_val2, 0,
+ POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT)
+ dev_err(cdns_phy->dev,
+ "timeout waiting for link power state ack\n");
+
+ writel(0, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
+ ndelay(100);
+}
+
+static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
+ unsigned int offset,
+ unsigned char start_bit,
+ unsigned char num_bits,
+ unsigned int val)
+{
+ unsigned int read_val;
+
+ read_val = readl(cdns_phy->base + offset);
+ writel(((val << start_bit) | (read_val & ~(((1 << num_bits) - 1) <<
+ start_bit))), cdns_phy->base + offset);
+}
+
+static int cdns_dp_phy_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ struct cdns_dp_phy *cdns_phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct phy *phy;
+ int err;
+
+ cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL);
+ if (!cdns_phy)
+ return -ENOMEM;
+
+ cdns_phy->dev = &pdev->dev;
+
+ phy = devm_phy_create(dev, NULL, &cdns_dp_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create DisplayPort PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cdns_phy->base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(cdns_phy->base))
+ return PTR_ERR(cdns_phy->base);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cdns_phy->sd_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(cdns_phy->sd_base))
+ return PTR_ERR(cdns_phy->sd_base);
+
+ err = device_property_read_u32(dev, "num_lanes",
+ &(cdns_phy->num_lanes));
+ if (err)
+ cdns_phy->num_lanes = DEFAULT_NUM_LANES;
+
+ switch (cdns_phy->num_lanes) {
+ case 1:
+ case 2:
+ case 4:
+ /* valid number of lanes */
+ break;
+ default:
+ dev_err(dev, "unsupported number of lanes: %d\n",
+ cdns_phy->num_lanes);
+ return -EINVAL;
+ }
+
+ err = device_property_read_u32(dev, "max_bit_rate",
+ &(cdns_phy->max_bit_rate));
+ if (err)
+ cdns_phy->max_bit_rate = DEFAULT_MAX_BIT_RATE;
+
+ switch (cdns_phy->max_bit_rate) {
+ case 2160:
+ case 2430:
+ case 2700:
+ case 3240:
+ case 4320:
+ case 5400:
+ case 8100:
+ /* valid bit rate */
+ break;
+ default:
+ dev_err(dev, "unsupported max bit rate: %dMbps\n",
+ cdns_phy->max_bit_rate);
+ return -EINVAL;
+ }
+
+ phy_set_drvdata(phy, cdns_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n",
+ cdns_phy->num_lanes,
+ cdns_phy->max_bit_rate / 1000,
+ cdns_phy->max_bit_rate % 1000);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id cdns_dp_phy_of_match[] = {
+ {
+ .compatible = "cdns,dp-phy"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cdns_dp_phy_of_match);
+
+static struct platform_driver cdns_dp_phy_driver = {
+ .probe = cdns_dp_phy_probe,
+ .driver = {
+ .name = "cdns-dp-phy",
+ .of_match_table = cdns_dp_phy_of_match,
+ }
+};
+module_platform_driver(cdns_dp_phy_driver);
+
+MODULE_AUTHOR("Cadence Design Systems, Inc.");
+MODULE_DESCRIPTION("Cadence MHDP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
index 986224fca9e9..f9e0dd19ff26 100644
--- a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
+++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
@@ -156,7 +156,6 @@ static int ltq_rcu_usb2_of_parse(struct ltq_rcu_usb2_priv *priv,
{
struct device *dev = priv->dev;
const __be32 *offset;
- int ret;
priv->reg_bits = of_device_get_match_data(dev);
@@ -196,10 +195,8 @@ static int ltq_rcu_usb2_of_parse(struct ltq_rcu_usb2_priv *priv,
}
priv->phy_reset = devm_reset_control_get_optional(dev, "phy");
- if (IS_ERR(priv->phy_reset))
- return PTR_ERR(priv->phy_reset);
- return 0;
+ return PTR_ERR_OR_ZERO(priv->phy_reset);
}
static int ltq_rcu_usb2_phy_probe(struct platform_device *pdev)
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 68e321225400..6fb4b56e4c14 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -59,3 +59,14 @@ config PHY_PXA_28NM_USB2
The PHY driver will be used by Marvell udc/ehci/otg driver.
To compile this driver as a module, choose M here.
+
+config PHY_PXA_USB
+ tristate "Marvell PXA USB PHY Driver"
+ depends on ARCH_PXA || ARCH_MMP
+ select GENERIC_PHY
+ help
+ Enable this to support Marvell PXA USB PHY driver for Marvell
+ SoC. This driver will do the PHY initialization and shutdown.
+ The PHY driver will be used by Marvell udc/ehci/otg driver.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile
index 5c3ec5d10e0d..3975b144f8ec 100644
--- a/drivers/phy/marvell/Makefile
+++ b/drivers/phy/marvell/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_PHY_MVEBU_CP110_COMPHY) += phy-mvebu-cp110-comphy.o
obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o
obj-$(CONFIG_PHY_PXA_28NM_USB2) += phy-pxa-28nm-usb2.o
+obj-$(CONFIG_PHY_PXA_USB) += phy-pxa-usb.o
diff --git a/drivers/phy/marvell/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c
index c1bb6725e48f..a91fc67fc4e0 100644
--- a/drivers/phy/marvell/phy-berlin-sata.c
+++ b/drivers/phy/marvell/phy-berlin-sata.c
@@ -231,14 +231,14 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
struct phy_berlin_desc *phy_desc;
if (of_property_read_u32(child, "reg", &phy_id)) {
- dev_err(dev, "missing reg property in node %s\n",
- child->name);
+ dev_err(dev, "missing reg property in node %pOFn\n",
+ child);
ret = -EINVAL;
goto put_child;
}
if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) {
- dev_err(dev, "invalid reg in node %s\n", child->name);
+ dev_err(dev, "invalid reg in node %pOFn\n", child);
ret = -EINVAL;
goto put_child;
}
diff --git a/drivers/phy/marvell/phy-pxa-usb.c b/drivers/phy/marvell/phy-pxa-usb.c
new file mode 100644
index 000000000000..87ff7550b912
--- /dev/null
+++ b/drivers/phy/marvell/phy-pxa-usb.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Copyright (C) 2018 Lubomir Rintel <lkundrak@v3.sk>
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+/* phy regs */
+#define UTMI_REVISION 0x0
+#define UTMI_CTRL 0x4
+#define UTMI_PLL 0x8
+#define UTMI_TX 0xc
+#define UTMI_RX 0x10
+#define UTMI_IVREF 0x14
+#define UTMI_T0 0x18
+#define UTMI_T1 0x1c
+#define UTMI_T2 0x20
+#define UTMI_T3 0x24
+#define UTMI_T4 0x28
+#define UTMI_T5 0x2c
+#define UTMI_RESERVE 0x30
+#define UTMI_USB_INT 0x34
+#define UTMI_DBG_CTL 0x38
+#define UTMI_OTG_ADDON 0x3c
+
+/* For UTMICTRL Register */
+#define UTMI_CTRL_USB_CLK_EN (1 << 31)
+/* pxa168 */
+#define UTMI_CTRL_SUSPEND_SET1 (1 << 30)
+#define UTMI_CTRL_SUSPEND_SET2 (1 << 29)
+#define UTMI_CTRL_RXBUF_PDWN (1 << 24)
+#define UTMI_CTRL_TXBUF_PDWN (1 << 11)
+
+#define UTMI_CTRL_INPKT_DELAY_SHIFT 30
+#define UTMI_CTRL_INPKT_DELAY_SOF_SHIFT 28
+#define UTMI_CTRL_PU_REF_SHIFT 20
+#define UTMI_CTRL_ARC_PULLDN_SHIFT 12
+#define UTMI_CTRL_PLL_PWR_UP_SHIFT 1
+#define UTMI_CTRL_PWR_UP_SHIFT 0
+
+/* For UTMI_PLL Register */
+#define UTMI_PLL_PLLCALI12_SHIFT 29
+#define UTMI_PLL_PLLCALI12_MASK (0x3 << 29)
+
+#define UTMI_PLL_PLLVDD18_SHIFT 27
+#define UTMI_PLL_PLLVDD18_MASK (0x3 << 27)
+
+#define UTMI_PLL_PLLVDD12_SHIFT 25
+#define UTMI_PLL_PLLVDD12_MASK (0x3 << 25)
+
+#define UTMI_PLL_CLK_BLK_EN_SHIFT 24
+#define CLK_BLK_EN (0x1 << 24)
+#define PLL_READY (0x1 << 23)
+#define KVCO_EXT (0x1 << 22)
+#define VCOCAL_START (0x1 << 21)
+
+#define UTMI_PLL_KVCO_SHIFT 15
+#define UTMI_PLL_KVCO_MASK (0x7 << 15)
+
+#define UTMI_PLL_ICP_SHIFT 12
+#define UTMI_PLL_ICP_MASK (0x7 << 12)
+
+#define UTMI_PLL_FBDIV_SHIFT 4
+#define UTMI_PLL_FBDIV_MASK (0xFF << 4)
+
+#define UTMI_PLL_REFDIV_SHIFT 0
+#define UTMI_PLL_REFDIV_MASK (0xF << 0)
+
+/* For UTMI_TX Register */
+#define UTMI_TX_REG_EXT_FS_RCAL_SHIFT 27
+#define UTMI_TX_REG_EXT_FS_RCAL_MASK (0xf << 27)
+
+#define UTMI_TX_REG_EXT_FS_RCAL_EN_SHIFT 26
+#define UTMI_TX_REG_EXT_FS_RCAL_EN_MASK (0x1 << 26)
+
+#define UTMI_TX_TXVDD12_SHIFT 22
+#define UTMI_TX_TXVDD12_MASK (0x3 << 22)
+
+#define UTMI_TX_CK60_PHSEL_SHIFT 17
+#define UTMI_TX_CK60_PHSEL_MASK (0xf << 17)
+
+#define UTMI_TX_IMPCAL_VTH_SHIFT 14
+#define UTMI_TX_IMPCAL_VTH_MASK (0x7 << 14)
+
+#define REG_RCAL_START (0x1 << 12)
+
+#define UTMI_TX_LOW_VDD_EN_SHIFT 11
+
+#define UTMI_TX_AMP_SHIFT 0
+#define UTMI_TX_AMP_MASK (0x7 << 0)
+
+/* For UTMI_RX Register */
+#define UTMI_REG_SQ_LENGTH_SHIFT 15
+#define UTMI_REG_SQ_LENGTH_MASK (0x3 << 15)
+
+#define UTMI_RX_SQ_THRESH_SHIFT 4
+#define UTMI_RX_SQ_THRESH_MASK (0xf << 4)
+
+#define UTMI_OTG_ADDON_OTG_ON (1 << 0)
+
+enum pxa_usb_phy_version {
+ PXA_USB_PHY_MMP2,
+ PXA_USB_PHY_PXA910,
+ PXA_USB_PHY_PXA168,
+};
+
+struct pxa_usb_phy {
+ struct phy *phy;
+ void __iomem *base;
+ enum pxa_usb_phy_version version;
+};
+
+/*****************************************************************************
+ * The registers read/write routines
+ *****************************************************************************/
+
+static unsigned int u2o_get(void __iomem *base, unsigned int offset)
+{
+ return readl_relaxed(base + offset);
+}
+
+static void u2o_set(void __iomem *base, unsigned int offset,
+ unsigned int value)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + offset);
+ reg |= value;
+ writel_relaxed(reg, base + offset);
+ readl_relaxed(base + offset);
+}
+
+static void u2o_clear(void __iomem *base, unsigned int offset,
+ unsigned int value)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + offset);
+ reg &= ~value;
+ writel_relaxed(reg, base + offset);
+ readl_relaxed(base + offset);
+}
+
+static void u2o_write(void __iomem *base, unsigned int offset,
+ unsigned int value)
+{
+ writel_relaxed(value, base + offset);
+ readl_relaxed(base + offset);
+}
+
+static int pxa_usb_phy_init(struct phy *phy)
+{
+ struct pxa_usb_phy *pxa_usb_phy = phy_get_drvdata(phy);
+ void __iomem *base = pxa_usb_phy->base;
+ int loops;
+
+ dev_info(&phy->dev, "initializing Marvell PXA USB PHY");
+
+ /* Initialize the USB PHY power */
+ if (pxa_usb_phy->version == PXA_USB_PHY_PXA910) {
+ u2o_set(base, UTMI_CTRL, (1<<UTMI_CTRL_INPKT_DELAY_SOF_SHIFT)
+ | (1<<UTMI_CTRL_PU_REF_SHIFT));
+ }
+
+ u2o_set(base, UTMI_CTRL, 1<<UTMI_CTRL_PLL_PWR_UP_SHIFT);
+ u2o_set(base, UTMI_CTRL, 1<<UTMI_CTRL_PWR_UP_SHIFT);
+
+ /* UTMI_PLL settings */
+ u2o_clear(base, UTMI_PLL, UTMI_PLL_PLLVDD18_MASK
+ | UTMI_PLL_PLLVDD12_MASK | UTMI_PLL_PLLCALI12_MASK
+ | UTMI_PLL_FBDIV_MASK | UTMI_PLL_REFDIV_MASK
+ | UTMI_PLL_ICP_MASK | UTMI_PLL_KVCO_MASK);
+
+ u2o_set(base, UTMI_PLL, 0xee<<UTMI_PLL_FBDIV_SHIFT
+ | 0xb<<UTMI_PLL_REFDIV_SHIFT | 3<<UTMI_PLL_PLLVDD18_SHIFT
+ | 3<<UTMI_PLL_PLLVDD12_SHIFT | 3<<UTMI_PLL_PLLCALI12_SHIFT
+ | 1<<UTMI_PLL_ICP_SHIFT | 3<<UTMI_PLL_KVCO_SHIFT);
+
+ /* UTMI_TX */
+ u2o_clear(base, UTMI_TX, UTMI_TX_REG_EXT_FS_RCAL_EN_MASK
+ | UTMI_TX_TXVDD12_MASK | UTMI_TX_CK60_PHSEL_MASK
+ | UTMI_TX_IMPCAL_VTH_MASK | UTMI_TX_REG_EXT_FS_RCAL_MASK
+ | UTMI_TX_AMP_MASK);
+ u2o_set(base, UTMI_TX, 3<<UTMI_TX_TXVDD12_SHIFT
+ | 4<<UTMI_TX_CK60_PHSEL_SHIFT | 4<<UTMI_TX_IMPCAL_VTH_SHIFT
+ | 8<<UTMI_TX_REG_EXT_FS_RCAL_SHIFT | 3<<UTMI_TX_AMP_SHIFT);
+
+ /* UTMI_RX */
+ u2o_clear(base, UTMI_RX, UTMI_RX_SQ_THRESH_MASK
+ | UTMI_REG_SQ_LENGTH_MASK);
+ u2o_set(base, UTMI_RX, 7<<UTMI_RX_SQ_THRESH_SHIFT
+ | 2<<UTMI_REG_SQ_LENGTH_SHIFT);
+
+ /* UTMI_IVREF */
+ if (pxa_usb_phy->version == PXA_USB_PHY_PXA168) {
+ /*
+ * fixing Microsoft Altair board interface with NEC hub issue -
+ * Set UTMI_IVREF from 0x4a3 to 0x4bf
+ */
+ u2o_write(base, UTMI_IVREF, 0x4bf);
+ }
+
+ /* toggle VCOCAL_START bit of UTMI_PLL */
+ udelay(200);
+ u2o_set(base, UTMI_PLL, VCOCAL_START);
+ udelay(40);
+ u2o_clear(base, UTMI_PLL, VCOCAL_START);
+
+ /* toggle REG_RCAL_START bit of UTMI_TX */
+ udelay(400);
+ u2o_set(base, UTMI_TX, REG_RCAL_START);
+ udelay(40);
+ u2o_clear(base, UTMI_TX, REG_RCAL_START);
+ udelay(400);
+
+ /* Make sure PHY PLL is ready */
+ loops = 0;
+ while ((u2o_get(base, UTMI_PLL) & PLL_READY) == 0) {
+ mdelay(1);
+ loops++;
+ if (loops > 100) {
+ dev_warn(&phy->dev, "calibrate timeout, UTMI_PLL %x\n",
+ u2o_get(base, UTMI_PLL));
+ break;
+ }
+ }
+
+ if (pxa_usb_phy->version == PXA_USB_PHY_PXA168) {
+ u2o_set(base, UTMI_RESERVE, 1 << 5);
+ /* Turn on UTMI PHY OTG extension */
+ u2o_write(base, UTMI_OTG_ADDON, 1);
+ }
+
+ return 0;
+
+}
+
+static int pxa_usb_phy_exit(struct phy *phy)
+{
+ struct pxa_usb_phy *pxa_usb_phy = phy_get_drvdata(phy);
+ void __iomem *base = pxa_usb_phy->base;
+
+ dev_info(&phy->dev, "deinitializing Marvell PXA USB PHY");
+
+ if (pxa_usb_phy->version == PXA_USB_PHY_PXA168)
+ u2o_clear(base, UTMI_OTG_ADDON, UTMI_OTG_ADDON_OTG_ON);
+
+ u2o_clear(base, UTMI_CTRL, UTMI_CTRL_RXBUF_PDWN);
+ u2o_clear(base, UTMI_CTRL, UTMI_CTRL_TXBUF_PDWN);
+ u2o_clear(base, UTMI_CTRL, UTMI_CTRL_USB_CLK_EN);
+ u2o_clear(base, UTMI_CTRL, 1<<UTMI_CTRL_PWR_UP_SHIFT);
+ u2o_clear(base, UTMI_CTRL, 1<<UTMI_CTRL_PLL_PWR_UP_SHIFT);
+
+ return 0;
+}
+
+static const struct phy_ops pxa_usb_phy_ops = {
+ .init = pxa_usb_phy_init,
+ .exit = pxa_usb_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id pxa_usb_phy_of_match[] = {
+ {
+ .compatible = "marvell,mmp2-usb-phy",
+ .data = (void *)PXA_USB_PHY_MMP2,
+ }, {
+ .compatible = "marvell,pxa910-usb-phy",
+ .data = (void *)PXA_USB_PHY_PXA910,
+ }, {
+ .compatible = "marvell,pxa168-usb-phy",
+ .data = (void *)PXA_USB_PHY_PXA168,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pxa_usb_phy_of_match);
+
+static int pxa_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *resource;
+ struct pxa_usb_phy *pxa_usb_phy;
+ struct phy_provider *provider;
+ const struct of_device_id *of_id;
+
+ pxa_usb_phy = devm_kzalloc(dev, sizeof(struct pxa_usb_phy), GFP_KERNEL);
+ if (!pxa_usb_phy)
+ return -ENOMEM;
+
+ of_id = of_match_node(pxa_usb_phy_of_match, dev->of_node);
+ if (of_id)
+ pxa_usb_phy->version = (enum pxa_usb_phy_version)of_id->data;
+ else
+ pxa_usb_phy->version = PXA_USB_PHY_MMP2;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pxa_usb_phy->base = devm_ioremap_resource(dev, resource);
+ if (IS_ERR(pxa_usb_phy->base)) {
+ dev_err(dev, "failed to remap PHY regs\n");
+ return PTR_ERR(pxa_usb_phy->base);
+ }
+
+ pxa_usb_phy->phy = devm_phy_create(dev, NULL, &pxa_usb_phy_ops);
+ if (IS_ERR(pxa_usb_phy->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(pxa_usb_phy->phy);
+ }
+
+ phy_set_drvdata(pxa_usb_phy->phy, pxa_usb_phy);
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(dev, "failed to register PHY provider\n");
+ return PTR_ERR(provider);
+ }
+
+ if (!dev->of_node) {
+ phy_create_lookup(pxa_usb_phy->phy, "usb", "mv-udc");
+ phy_create_lookup(pxa_usb_phy->phy, "usb", "pxa-u2oehci");
+ phy_create_lookup(pxa_usb_phy->phy, "usb", "mv-otg");
+ }
+
+ dev_info(dev, "Marvell PXA USB PHY");
+ return 0;
+}
+
+static struct platform_driver pxa_usb_phy_driver = {
+ .probe = pxa_usb_phy_probe,
+ .driver = {
+ .name = "pxa-usb-phy",
+ .of_match_table = pxa_usb_phy_of_match,
+ },
+};
+module_platform_driver(pxa_usb_phy_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Marvell PXA USB PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 0075fb0bef8c..25d456a323c2 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -157,15 +157,13 @@ static const struct phy_ops gpio_usb_ops = {
*/
static void phy_mdm6600_cmd(struct phy_mdm6600 *ddata, int val)
{
- int values[PHY_MDM6600_NR_CMD_LINES];
- int i;
+ DECLARE_BITMAP(values, PHY_MDM6600_NR_CMD_LINES);
- val &= (1 << PHY_MDM6600_NR_CMD_LINES) - 1;
- for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++)
- values[i] = (val & BIT(i)) >> i;
+ values[0] = val;
gpiod_set_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES,
- ddata->cmd_gpios->desc, values);
+ ddata->cmd_gpios->desc,
+ ddata->cmd_gpios->info, values);
}
/**
@@ -176,7 +174,7 @@ static void phy_mdm6600_status(struct work_struct *work)
{
struct phy_mdm6600 *ddata;
struct device *dev;
- int values[PHY_MDM6600_NR_STATUS_LINES];
+ DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES);
int error, i, val = 0;
ddata = container_of(work, struct phy_mdm6600, status_work.work);
@@ -184,16 +182,17 @@ static void phy_mdm6600_status(struct work_struct *work)
error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
ddata->status_gpios->desc,
+ ddata->status_gpios->info,
values);
if (error)
return;
for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
- val |= values[i] << i;
+ val |= test_bit(i, values) << i;
dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
- __func__, i, values[i], val);
+ __func__, i, test_bit(i, values), val);
}
- ddata->status = val;
+ ddata->status = values[0];
dev_info(dev, "modem status: %i %s\n",
ddata->status,
diff --git a/drivers/phy/mscc/Kconfig b/drivers/phy/mscc/Kconfig
new file mode 100644
index 000000000000..2e2a466efd66
--- /dev/null
+++ b/drivers/phy/mscc/Kconfig
@@ -0,0 +1,11 @@
+#
+# Phy drivers for Microsemi devices
+#
+
+config PHY_OCELOT_SERDES
+ tristate "SerDes PHY driver for Microsemi Ocelot"
+ select GENERIC_PHY
+ depends on OF
+ depends on MFD_SYSCON
+ help
+ Enable this for supporting SerDes muxing with Microsemi Ocelot.
diff --git a/drivers/phy/mscc/Makefile b/drivers/phy/mscc/Makefile
new file mode 100644
index 000000000000..e14749170fc9
--- /dev/null
+++ b/drivers/phy/mscc/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Microsemi phy drivers.
+#
+
+obj-$(CONFIG_PHY_OCELOT_SERDES) := phy-ocelot-serdes.o
diff --git a/drivers/phy/mscc/phy-ocelot-serdes.c b/drivers/phy/mscc/phy-ocelot-serdes.c
new file mode 100644
index 000000000000..cbb49d9da6f9
--- /dev/null
+++ b/drivers/phy/mscc/phy-ocelot-serdes.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * SerDes PHY driver for Microsemi Ocelot
+ *
+ * Copyright (c) 2018 Microsemi
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <soc/mscc/ocelot_hsio.h>
+#include <dt-bindings/phy/phy-ocelot-serdes.h>
+
+struct serdes_ctrl {
+ struct regmap *regs;
+ struct device *dev;
+ struct phy *phys[SERDES_MAX];
+};
+
+struct serdes_macro {
+ u8 idx;
+ /* Not used when in QSGMII or PCIe mode */
+ int port;
+ struct serdes_ctrl *ctrl;
+};
+
+#define MCB_S1G_CFG_TIMEOUT 50
+
+static int __serdes_write_mcb_s1g(struct regmap *regmap, u8 macro, u32 op)
+{
+ unsigned int regval;
+
+ regmap_write(regmap, HSIO_MCB_S1G_ADDR_CFG, op |
+ HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(BIT(macro)));
+
+ return regmap_read_poll_timeout(regmap, HSIO_MCB_S1G_ADDR_CFG, regval,
+ (regval & op) != op, 100,
+ MCB_S1G_CFG_TIMEOUT * 1000);
+}
+
+static int serdes_commit_mcb_s1g(struct regmap *regmap, u8 macro)
+{
+ return __serdes_write_mcb_s1g(regmap, macro,
+ HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT);
+}
+
+static int serdes_update_mcb_s1g(struct regmap *regmap, u8 macro)
+{
+ return __serdes_write_mcb_s1g(regmap, macro,
+ HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT);
+}
+
+static int serdes_init_s1g(struct regmap *regmap, u8 serdes)
+{
+ int ret;
+
+ ret = serdes_update_mcb_s1g(regmap, serdes);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(regmap, HSIO_S1G_COMMON_CFG,
+ HSIO_S1G_COMMON_CFG_SYS_RST |
+ HSIO_S1G_COMMON_CFG_ENA_LANE |
+ HSIO_S1G_COMMON_CFG_ENA_ELOOP |
+ HSIO_S1G_COMMON_CFG_ENA_FLOOP,
+ HSIO_S1G_COMMON_CFG_ENA_LANE);
+
+ regmap_update_bits(regmap, HSIO_S1G_PLL_CFG,
+ HSIO_S1G_PLL_CFG_PLL_FSM_ENA |
+ HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M,
+ HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(200) |
+ HSIO_S1G_PLL_CFG_PLL_FSM_ENA);
+
+ regmap_update_bits(regmap, HSIO_S1G_MISC_CFG,
+ HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA |
+ HSIO_S1G_MISC_CFG_LANE_RST,
+ HSIO_S1G_MISC_CFG_LANE_RST);
+
+ ret = serdes_commit_mcb_s1g(regmap, serdes);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(regmap, HSIO_S1G_COMMON_CFG,
+ HSIO_S1G_COMMON_CFG_SYS_RST,
+ HSIO_S1G_COMMON_CFG_SYS_RST);
+
+ regmap_update_bits(regmap, HSIO_S1G_MISC_CFG,
+ HSIO_S1G_MISC_CFG_LANE_RST, 0);
+
+ ret = serdes_commit_mcb_s1g(regmap, serdes);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct serdes_mux {
+ u8 idx;
+ u8 port;
+ enum phy_mode mode;
+ u32 mask;
+ u32 mux;
+};
+
+#define SERDES_MUX(_idx, _port, _mode, _mask, _mux) { \
+ .idx = _idx, \
+ .port = _port, \
+ .mode = _mode, \
+ .mask = _mask, \
+ .mux = _mux, \
+}
+
+#define SERDES_MUX_SGMII(i, p, m, c) SERDES_MUX(i, p, PHY_MODE_SGMII, m, c)
+#define SERDES_MUX_QSGMII(i, p, m, c) SERDES_MUX(i, p, PHY_MODE_QSGMII, m, c)
+
+static const struct serdes_mux ocelot_serdes_muxes[] = {
+ SERDES_MUX_SGMII(SERDES1G(0), 0, 0, 0),
+ SERDES_MUX_SGMII(SERDES1G(1), 1, HSIO_HW_CFG_DEV1G_5_MODE, 0),
+ SERDES_MUX_SGMII(SERDES1G(1), 5, HSIO_HW_CFG_QSGMII_ENA |
+ HSIO_HW_CFG_DEV1G_5_MODE, HSIO_HW_CFG_DEV1G_5_MODE),
+ SERDES_MUX_SGMII(SERDES1G(2), 2, HSIO_HW_CFG_DEV1G_4_MODE, 0),
+ SERDES_MUX_SGMII(SERDES1G(2), 4, HSIO_HW_CFG_QSGMII_ENA |
+ HSIO_HW_CFG_DEV1G_4_MODE, HSIO_HW_CFG_DEV1G_4_MODE),
+ SERDES_MUX_SGMII(SERDES1G(3), 3, HSIO_HW_CFG_DEV1G_6_MODE, 0),
+ SERDES_MUX_SGMII(SERDES1G(3), 6, HSIO_HW_CFG_QSGMII_ENA |
+ HSIO_HW_CFG_DEV1G_6_MODE, HSIO_HW_CFG_DEV1G_6_MODE),
+ SERDES_MUX_SGMII(SERDES1G(4), 4, HSIO_HW_CFG_QSGMII_ENA |
+ HSIO_HW_CFG_DEV1G_4_MODE | HSIO_HW_CFG_DEV1G_9_MODE,
+ 0),
+ SERDES_MUX_SGMII(SERDES1G(4), 9, HSIO_HW_CFG_DEV1G_4_MODE |
+ HSIO_HW_CFG_DEV1G_9_MODE, HSIO_HW_CFG_DEV1G_4_MODE |
+ HSIO_HW_CFG_DEV1G_9_MODE),
+ SERDES_MUX_SGMII(SERDES1G(5), 5, HSIO_HW_CFG_QSGMII_ENA |
+ HSIO_HW_CFG_DEV1G_5_MODE | HSIO_HW_CFG_DEV2G5_10_MODE,
+ 0),
+ SERDES_MUX_SGMII(SERDES1G(5), 10, HSIO_HW_CFG_PCIE_ENA |
+ HSIO_HW_CFG_DEV1G_5_MODE | HSIO_HW_CFG_DEV2G5_10_MODE,
+ HSIO_HW_CFG_DEV1G_5_MODE | HSIO_HW_CFG_DEV2G5_10_MODE),
+ SERDES_MUX_QSGMII(SERDES6G(0), 4, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA),
+ SERDES_MUX_QSGMII(SERDES6G(0), 5, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA),
+ SERDES_MUX_QSGMII(SERDES6G(0), 6, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA),
+ SERDES_MUX_SGMII(SERDES6G(0), 7, HSIO_HW_CFG_QSGMII_ENA, 0),
+ SERDES_MUX_QSGMII(SERDES6G(0), 7, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA),
+ SERDES_MUX_SGMII(SERDES6G(1), 8, 0, 0),
+ SERDES_MUX_SGMII(SERDES6G(2), 10, HSIO_HW_CFG_PCIE_ENA |
+ HSIO_HW_CFG_DEV2G5_10_MODE, 0),
+ SERDES_MUX(SERDES6G(2), 10, PHY_MODE_PCIE, HSIO_HW_CFG_PCIE_ENA,
+ HSIO_HW_CFG_PCIE_ENA),
+};
+
+static int serdes_set_mode(struct phy *phy, enum phy_mode mode)
+{
+ struct serdes_macro *macro = phy_get_drvdata(phy);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ocelot_serdes_muxes); i++) {
+ if (macro->idx != ocelot_serdes_muxes[i].idx ||
+ mode != ocelot_serdes_muxes[i].mode)
+ continue;
+
+ if (mode != PHY_MODE_QSGMII &&
+ macro->port != ocelot_serdes_muxes[i].port)
+ continue;
+
+ ret = regmap_update_bits(macro->ctrl->regs, HSIO_HW_CFG,
+ ocelot_serdes_muxes[i].mask,
+ ocelot_serdes_muxes[i].mux);
+ if (ret)
+ return ret;
+
+ if (macro->idx <= SERDES1G_MAX)
+ return serdes_init_s1g(macro->ctrl->regs, macro->idx);
+
+ /* SERDES6G and PCIe not supported yet */
+ return -EOPNOTSUPP;
+ }
+
+ return -EINVAL;
+}
+
+static const struct phy_ops serdes_ops = {
+ .set_mode = serdes_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *serdes_simple_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct serdes_ctrl *ctrl = dev_get_drvdata(dev);
+ unsigned int port, idx, i;
+
+ if (args->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ port = args->args[0];
+ idx = args->args[1];
+
+ for (i = 0; i < SERDES_MAX; i++) {
+ struct serdes_macro *macro = phy_get_drvdata(ctrl->phys[i]);
+
+ if (idx != macro->idx)
+ continue;
+
+ /* SERDES6G(0) is the only SerDes capable of QSGMII */
+ if (idx != SERDES6G(0) && macro->port >= 0)
+ return ERR_PTR(-EBUSY);
+
+ macro->port = port;
+ return ctrl->phys[i];
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int serdes_phy_create(struct serdes_ctrl *ctrl, u8 idx, struct phy **phy)
+{
+ struct serdes_macro *macro;
+
+ *phy = devm_phy_create(ctrl->dev, NULL, &serdes_ops);
+ if (IS_ERR(*phy))
+ return PTR_ERR(*phy);
+
+ macro = devm_kzalloc(ctrl->dev, sizeof(*macro), GFP_KERNEL);
+ if (!macro)
+ return -ENOMEM;
+
+ macro->idx = idx;
+ macro->ctrl = ctrl;
+ macro->port = -1;
+
+ phy_set_drvdata(*phy, macro);
+
+ return 0;
+}
+
+static int serdes_probe(struct platform_device *pdev)
+{
+ struct phy_provider *provider;
+ struct serdes_ctrl *ctrl;
+ unsigned int i;
+ int ret;
+
+ ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->dev = &pdev->dev;
+ ctrl->regs = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(ctrl->regs))
+ return PTR_ERR(ctrl->regs);
+
+ for (i = 0; i < SERDES_MAX; i++) {
+ ret = serdes_phy_create(ctrl, i, &ctrl->phys[i]);
+ if (ret)
+ return ret;
+ }
+
+ dev_set_drvdata(&pdev->dev, ctrl);
+
+ provider = devm_of_phy_provider_register(ctrl->dev,
+ serdes_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id serdes_ids[] = {
+ { .compatible = "mscc,vsc7514-serdes", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, serdes_ids);
+
+static struct platform_driver mscc_ocelot_serdes = {
+ .probe = serdes_probe,
+ .driver = {
+ .name = "mscc,ocelot-serdes",
+ .of_match_table = of_match_ptr(serdes_ids),
+ },
+};
+
+module_platform_driver(mscc_ocelot_serdes);
+
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@bootlin.com>");
+MODULE_DESCRIPTION("SerDes driver for Microsemi Ocelot");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index 632a0e73ee10..32f7d34eb784 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -50,6 +50,23 @@ config PHY_QCOM_UFS
help
Support for UFS PHY on QCOM chipsets.
+if PHY_QCOM_UFS
+
+config PHY_QCOM_UFS_14NM
+ tristate
+ default PHY_QCOM_UFS
+ help
+ Support for 14nm UFS QMP phy present on QCOM chipsets.
+
+config PHY_QCOM_UFS_20NM
+ tristate
+ default PHY_QCOM_UFS
+ depends on BROKEN
+ help
+ Support for 20nm UFS QMP phy present on QCOM chipsets.
+
+endif
+
config PHY_QCOM_USB_HS
tristate "Qualcomm USB HS PHY module"
depends on USB_ULPI_BUS
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index deb831f453ae..c56efd3af205 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_QMP) += phy-qcom-qmp.o
obj-$(CONFIG_PHY_QCOM_QUSB2) += phy-qcom-qusb2.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o
-obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o
-obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-20nm.o
+obj-$(CONFIG_PHY_QCOM_UFS_14NM) += phy-qcom-ufs-qmp-14nm.o
+obj-$(CONFIG_PHY_QCOM_UFS_20NM) += phy-qcom-ufs-qmp-20nm.o
obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o
obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 4c470104a0d6..a83332411026 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -156,6 +156,11 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
};
+static const unsigned int sdm845_ufsphy_regs_layout[] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x160,
+};
+
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
@@ -601,6 +606,83 @@ static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60),
};
+static const struct qmp_phy_init_tbl sdm845_ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xda),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE1, 0xc1),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE1, 0x0f),
+
+ /* Rate B */
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x44),
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_TERM_BW, 0x5b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59),
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_CTRL2, 0x6e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SYM_RESYNC_CTRL, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_CTRL1, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_MIN_HIBERN8_TIME, 0x9a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MULTI_LANE_CTRL1, 0x02),
+};
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
@@ -649,9 +731,14 @@ struct qmp_phy_cfg {
/* true, if PHY has a separate DP_COM control block */
bool has_phy_dp_com_ctrl;
+ /* true, if PHY has secondary tx/rx lanes to be configured */
+ bool is_dual_lane_phy;
/* Register offset of secondary tx/rx lanes for USB DP combo PHY */
unsigned int tx_b_lane_offset;
unsigned int rx_b_lane_offset;
+
+ /* true, if PCS block has no separate SW_RESET register */
+ bool no_pcs_sw_reset;
};
/**
@@ -748,6 +835,10 @@ static const char * const qmp_v3_phy_clk_l[] = {
"aux", "cfg_ahb", "ref", "com_aux",
};
+static const char * const sdm845_ufs_phy_clk_l[] = {
+ "ref", "ref_aux",
+};
+
/* list of resets */
static const char * const msm8996_pciephy_reset_l[] = {
"phy", "common", "cfg",
@@ -758,7 +849,7 @@ static const char * const msm8996_usb3phy_reset_l[] = {
};
/* list of regulators */
-static const char * const msm8996_phy_vreg_l[] = {
+static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
};
@@ -778,8 +869,8 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
.num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
.reset_list = msm8996_pciephy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_pciephy_reset_l),
- .vreg_list = msm8996_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = pciephy_regs_layout,
.start_ctrl = PCS_START | PLL_READY_GATE_EN,
@@ -809,8 +900,8 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = msm8996_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = usb3phy_regs_layout,
.start_ctrl = SERDES_START | PCS_START,
@@ -870,8 +961,8 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = msm8996_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
.start_ctrl = SERDES_START | PCS_START,
@@ -883,6 +974,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
.tx_b_lane_offset = 0x400,
.rx_b_lane_offset = 0x400,
};
@@ -903,8 +995,8 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = msm8996_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
.start_ctrl = SERDES_START | PCS_START,
@@ -916,6 +1008,35 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
+static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sdm845_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sdm845_ufsphy_serdes_tbl),
+ .tx_tbl = sdm845_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_tx_tbl),
+ .rx_tbl = sdm845_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_rx_tbl),
+ .pcs_tbl = sdm845_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sdm845_ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sdm845_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PCS_READY,
+
+ .is_dual_lane_phy = true,
+ .tx_b_lane_offset = 0x400,
+ .rx_b_lane_offset = 0x400,
+
+ .no_pcs_sw_reset = true,
+};
+
static void qcom_qmp_phy_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
@@ -935,10 +1056,12 @@ static void qcom_qmp_phy_configure(void __iomem *base,
}
}
-static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
+static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
{
+ struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qmp->cfg;
void __iomem *serdes = qmp->serdes;
+ void __iomem *pcs = qphy->pcs;
void __iomem *dp_com = qmp->dp_com;
int ret, i;
@@ -979,10 +1102,6 @@ static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
goto err_rst;
}
- if (cfg->has_phy_com_ctrl)
- qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
- SW_PWRDN);
-
if (cfg->has_phy_dp_com_ctrl) {
qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
SW_PWRDN);
@@ -1000,6 +1119,12 @@ static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
}
+ if (cfg->has_phy_com_ctrl)
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+ else
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
/* Serdes configuration */
qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl,
cfg->serdes_tbl_num);
@@ -1090,7 +1215,7 @@ static int qcom_qmp_phy_init(struct phy *phy)
dev_vdbg(qmp->dev, "Initializing QMP phy\n");
- ret = qcom_qmp_phy_com_init(qmp);
+ ret = qcom_qmp_phy_com_init(qphy);
if (ret)
return ret;
@@ -1112,22 +1237,31 @@ static int qcom_qmp_phy_init(struct phy *phy)
/* Tx, Rx, and PCS configurations */
qcom_qmp_phy_configure(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num);
/* Configuration for other LANE for USB-DP combo PHY */
- if (cfg->has_phy_dp_com_ctrl)
+ if (cfg->is_dual_lane_phy)
qcom_qmp_phy_configure(tx + cfg->tx_b_lane_offset, cfg->regs,
cfg->tx_tbl, cfg->tx_tbl_num);
qcom_qmp_phy_configure(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num);
- if (cfg->has_phy_dp_com_ctrl)
+ if (cfg->is_dual_lane_phy)
qcom_qmp_phy_configure(rx + cfg->rx_b_lane_offset, cfg->regs,
cfg->rx_tbl, cfg->rx_tbl_num);
qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
/*
+ * UFS PHY requires the deassert of software reset before serdes start.
+ * For UFS PHYs that do not have software reset control bits, defer
+ * starting serdes until the power on callback.
+ */
+ if ((cfg->type == PHY_TYPE_UFS) && cfg->no_pcs_sw_reset)
+ goto out;
+
+ /*
* Pull out PHY from POWER DOWN state.
* This is active low enable signal to power-down PHY.
*/
- qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+ if(cfg->type == PHY_TYPE_PCIE)
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
if (cfg->has_pwrdn_delay)
usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
@@ -1151,6 +1285,7 @@ static int qcom_qmp_phy_init(struct phy *phy)
}
qmp->phy_initialized = true;
+out:
return ret;
err_pcs_ready:
@@ -1173,7 +1308,8 @@ static int qcom_qmp_phy_exit(struct phy *phy)
clk_disable_unprepare(qphy->pipe_clk);
/* PHY reset */
- qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ if (!cfg->no_pcs_sw_reset)
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
/* stop SerDes and Phy-Coding-Sublayer */
qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
@@ -1191,6 +1327,44 @@ static int qcom_qmp_phy_exit(struct phy *phy)
return 0;
}
+static int qcom_qmp_phy_poweron(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *status;
+ unsigned int mask, val;
+ int ret = 0;
+
+ if (cfg->type != PHY_TYPE_UFS)
+ return 0;
+
+ /*
+ * For UFS PHY that has not software reset control, serdes start
+ * should only happen when UFS driver explicitly calls phy_power_on
+ * after it deasserts software reset.
+ */
+ if (cfg->no_pcs_sw_reset && !qmp->phy_initialized &&
+ (qmp->init_count != 0)) {
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = cfg->mask_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, !(val & mask), 1,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ return ret;
+ }
+ qmp->phy_initialized = true;
+ }
+
+ return ret;
+}
+
static int qcom_qmp_phy_set_mode(struct phy *phy, enum phy_mode mode)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
@@ -1400,7 +1574,7 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
ret = of_property_read_string(np, "clock-output-names", &init.name);
if (ret) {
- dev_err(qmp->dev, "%s: No clock-output-names\n", np->name);
+ dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
return ret;
}
@@ -1420,6 +1594,7 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
static const struct phy_ops qcom_qmp_phy_gen_ops = {
.init = qcom_qmp_phy_init,
.exit = qcom_qmp_phy_exit,
+ .power_on = qcom_qmp_phy_poweron,
.set_mode = qcom_qmp_phy_set_mode,
.owner = THIS_MODULE,
};
@@ -1522,6 +1697,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
}, {
.compatible = "qcom,sdm845-qmp-usb3-uni-phy",
.data = &qmp_v3_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
},
{ },
};
@@ -1586,7 +1764,9 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
ret = qcom_qmp_phy_vreg_init(dev);
if (ret) {
- dev_err(dev, "failed to get regulator supplies\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 5d78d43ba9fc..d201cc307151 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -184,6 +184,8 @@
#define QSERDES_V3_COM_VCO_TUNE2_MODE0 0x0f8
#define QSERDES_V3_COM_VCO_TUNE1_MODE1 0x0fc
#define QSERDES_V3_COM_VCO_TUNE2_MODE1 0x100
+#define QSERDES_V3_COM_VCO_TUNE_INITVAL1 0x104
+#define QSERDES_V3_COM_VCO_TUNE_INITVAL2 0x108
#define QSERDES_V3_COM_VCO_TUNE_TIMER1 0x11c
#define QSERDES_V3_COM_VCO_TUNE_TIMER2 0x120
#define QSERDES_V3_COM_CLK_SELECT 0x138
@@ -211,8 +213,13 @@
/* Only for QMP V3 PHY - RX registers */
#define QSERDES_V3_RX_UCDR_SO_GAIN_HALF 0x00c
#define QSERDES_V3_RX_UCDR_SO_GAIN 0x014
+#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF 0x024
+#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER 0x028
+#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN 0x02c
#define QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN 0x030
#define QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V3_RX_UCDR_PI_CONTROLS 0x044
#define QSERDES_V3_RX_RX_TERM_BW 0x07c
#define QSERDES_V3_RX_VGA_CAL_CNTRL1 0x0bc
#define QSERDES_V3_RX_VGA_CAL_CNTRL2 0x0c0
@@ -239,6 +246,8 @@
#define QPHY_V3_PCS_TXMGN_V3 0x018
#define QPHY_V3_PCS_TXMGN_V4 0x01c
#define QPHY_V3_PCS_TXMGN_LS 0x020
+#define QPHY_V3_PCS_TX_LARGE_AMP_DRV_LVL 0x02c
+#define QPHY_V3_PCS_TX_SMALL_AMP_DRV_LVL 0x034
#define QPHY_V3_PCS_TXDEEMPH_M6DB_V0 0x024
#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0 0x028
#define QPHY_V3_PCS_TXDEEMPH_M6DB_V1 0x02c
@@ -275,6 +284,12 @@
#define QPHY_V3_PCS_FLL_CNT_VAL_L 0x0cc
#define QPHY_V3_PCS_FLL_CNT_VAL_H_TOL 0x0d0
#define QPHY_V3_PCS_FLL_MAN_CODE 0x0d4
+#define QPHY_V3_PCS_RX_SYM_RESYNC_CTRL 0x134
+#define QPHY_V3_PCS_RX_MIN_HIBERN8_TIME 0x138
+#define QPHY_V3_PCS_RX_SIGDET_CTRL1 0x13c
+#define QPHY_V3_PCS_RX_SIGDET_CTRL2 0x140
+#define QPHY_V3_PCS_TX_MID_TERM_CTRL1 0x1bc
+#define QPHY_V3_PCS_MULTI_LANE_CTRL1 0x1c4
#define QPHY_V3_PCS_RX_SIGDET_LVL 0x1d8
#define QPHY_V3_PCS_REFGEN_REQ_CONFIG1 0x20c
#define QPHY_V3_PCS_REFGEN_REQ_CONFIG2 0x210
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index e70e425f26f5..9ce531194f8a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -800,7 +800,9 @@ static int qusb2_phy_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(dev, num, qphy->vregs);
if (ret) {
- dev_err(dev, "failed to get regulator supplies\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
index 822c83b8efcd..681644e43248 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
@@ -17,9 +17,9 @@
#include <linux/module.h>
#include <linux/clk.h>
+#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <linux/phy/phy-qcom-ufs.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/delay.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index c5493ea51282..f2979ccad00a 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -431,56 +431,6 @@ static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
}
}
-#define UFS_REF_CLK_EN (1 << 5)
-
-static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable)
-{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
- if (phy->dev_ref_clk_ctrl_mmio &&
- (enable ^ phy->is_dev_ref_clk_enabled)) {
- u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio);
-
- if (enable)
- temp |= UFS_REF_CLK_EN;
- else
- temp &= ~UFS_REF_CLK_EN;
-
- /*
- * If we are here to disable this clock immediately after
- * entering into hibern8, we need to make sure that device
- * ref_clk is active atleast 1us after the hibern8 enter.
- */
- if (!enable)
- udelay(1);
-
- writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio);
- /* ensure that ref_clk is enabled/disabled before we return */
- wmb();
- /*
- * If we call hibern8 exit after this, we need to make sure that
- * device ref_clk is stable for atleast 1us before the hibern8
- * exit command.
- */
- if (enable)
- udelay(1);
-
- phy->is_dev_ref_clk_enabled = enable;
- }
-}
-
-void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
-{
- ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk);
-
-void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
-{
- ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
-
/* Turn ON M-PHY RMMI interface clocks */
static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy)
{
diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig
index 4bd390c79d21..e340a925bbb1 100644
--- a/drivers/phy/renesas/Kconfig
+++ b/drivers/phy/renesas/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Phy drivers for Renesas platforms
#
diff --git a/drivers/phy/renesas/Makefile b/drivers/phy/renesas/Makefile
index 4b76fc439ed6..b599ff8a4349 100644
--- a/drivers/phy/renesas/Makefile
+++ b/drivers/phy/renesas/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_RCAR_GEN2) += phy-rcar-gen2.o
obj-$(CONFIG_PHY_RCAR_GEN3_PCIE) += phy-rcar-gen3-pcie.o
obj-$(CONFIG_PHY_RCAR_GEN3_USB2) += phy-rcar-gen3-usb2.o
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index 97d4dd6ea924..72eeb066912d 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen2 PHY driver
*
* Copyright (C) 2014 Renesas Solutions Corp.
* Copyright (C) 2014 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index fb8f05e39cf7..d0f412c25981 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen3 for USB2.0 PHY driver
*
@@ -6,10 +7,6 @@
* This is based on the phy-rcar-gen2 driver:
* Copyright (C) 2014 Renesas Solutions Corp.
* Copyright (C) 2014 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/extcon-provider.h>
@@ -81,18 +78,29 @@
#define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */
#define USB2_ADPCTRL_DRVVBUS BIT(4)
-#define RCAR_GEN3_PHY_HAS_DEDICATED_PINS 1
-
struct rcar_gen3_chan {
void __iomem *base;
struct extcon_dev *extcon;
struct phy *phy;
struct regulator *vbus;
struct work_struct work;
+ enum usb_dr_mode dr_mode;
bool extcon_host;
- bool has_otg_pins;
+ bool is_otg_channel;
+ bool uses_otg_pins;
};
+/*
+ * Combination about is_otg_channel and uses_otg_pins:
+ *
+ * Parameters || Behaviors
+ * is_otg_channel | uses_otg_pins || irqs | role sysfs
+ * ---------------------+---------------++--------------+------------
+ * true | true || enabled | enabled
+ * true | false || disabled | enabled
+ * false | any || disabled | disabled
+ */
+
static void rcar_gen3_phy_usb2_work(struct work_struct *work)
{
struct rcar_gen3_chan *ch = container_of(work, struct rcar_gen3_chan,
@@ -147,6 +155,18 @@ static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus)
writel(val, usb2_base + USB2_ADPCTRL);
}
+static void rcar_gen3_control_otg_irq(struct rcar_gen3_chan *ch, int enable)
+{
+ void __iomem *usb2_base = ch->base;
+ u32 val = readl(usb2_base + USB2_OBINTEN);
+
+ if (ch->uses_otg_pins && enable)
+ val |= USB2_OBINT_BITS;
+ else
+ val &= ~USB2_OBINT_BITS;
+ writel(val, usb2_base + USB2_OBINTEN);
+}
+
static void rcar_gen3_init_for_host(struct rcar_gen3_chan *ch)
{
rcar_gen3_set_linectrl(ch, 1, 1);
@@ -192,20 +212,19 @@ static void rcar_gen3_init_for_a_peri(struct rcar_gen3_chan *ch)
static void rcar_gen3_init_from_a_peri_to_a_host(struct rcar_gen3_chan *ch)
{
- void __iomem *usb2_base = ch->base;
- u32 val;
+ rcar_gen3_control_otg_irq(ch, 0);
- val = readl(usb2_base + USB2_OBINTEN);
- writel(val & ~USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
-
- rcar_gen3_enable_vbus_ctrl(ch, 0);
+ rcar_gen3_enable_vbus_ctrl(ch, 1);
rcar_gen3_init_for_host(ch);
- writel(val | USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
+ rcar_gen3_control_otg_irq(ch, 1);
}
static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
{
+ if (!ch->uses_otg_pins)
+ return (ch->dr_mode == USB_DR_MODE_HOST) ? false : true;
+
return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
}
@@ -237,7 +256,7 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
bool is_b_device;
enum phy_mode cur_mode, new_mode;
- if (!ch->has_otg_pins || !ch->phy->init_count)
+ if (!ch->is_otg_channel || !ch->phy->init_count)
return -EIO;
if (!strncmp(buf, "host", strlen("host")))
@@ -275,7 +294,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr,
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
- if (!ch->has_otg_pins || !ch->phy->init_count)
+ if (!ch->is_otg_channel || !ch->phy->init_count)
return -EIO;
return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
@@ -291,8 +310,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
val = readl(usb2_base + USB2_VBCTRL);
writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
- val = readl(usb2_base + USB2_OBINTEN);
- writel(val | USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
+ rcar_gen3_control_otg_irq(ch, 1);
val = readl(usb2_base + USB2_ADPCTRL);
writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
val = readl(usb2_base + USB2_LINECTRL1);
@@ -314,7 +332,7 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
/* Initialize otg part */
- if (channel->has_otg_pins)
+ if (channel->is_otg_channel)
rcar_gen3_init_otg(channel);
return 0;
@@ -388,21 +406,10 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
}
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
- {
- .compatible = "renesas,usb2-phy-r8a7795",
- .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
- },
- {
- .compatible = "renesas,usb2-phy-r8a7796",
- .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
- },
- {
- .compatible = "renesas,usb2-phy-r8a77965",
- .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
- },
- {
- .compatible = "renesas,rcar-gen3-usb2-phy",
- },
+ { .compatible = "renesas,usb2-phy-r8a7795" },
+ { .compatible = "renesas,usb2-phy-r8a7796" },
+ { .compatible = "renesas,usb2-phy-r8a77965" },
+ { .compatible = "renesas,rcar-gen3-usb2-phy" },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_gen3_phy_usb2_match_table);
@@ -445,10 +452,13 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
dev_err(dev, "No irq handler (%d)\n", irq);
}
- if (of_usb_get_dr_mode_by_phy(dev->of_node, 0) == USB_DR_MODE_OTG) {
+ channel->dr_mode = of_usb_get_dr_mode_by_phy(dev->of_node, 0);
+ if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
int ret;
- channel->has_otg_pins = (uintptr_t)of_device_get_match_data(dev);
+ channel->is_otg_channel = true;
+ channel->uses_otg_pins = !of_property_read_bool(dev->of_node,
+ "renesas,no-otg-pins");
channel->extcon = devm_extcon_dev_allocate(dev,
rcar_gen3_phy_cable);
if (IS_ERR(channel->extcon))
@@ -490,7 +500,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
dev_err(dev, "Failed to register PHY provider\n");
ret = PTR_ERR(provider);
goto error;
- } else if (channel->has_otg_pins) {
+ } else if (channel->is_otg_channel) {
int ret;
ret = device_create_file(dev, &dev_attr_role);
@@ -510,7 +520,7 @@ static int rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
{
struct rcar_gen3_chan *channel = platform_get_drvdata(pdev);
- if (channel->has_otg_pins)
+ if (channel->is_otg_channel)
device_remove_file(&pdev->dev, &dev_attr_role);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb3.c b/drivers/phy/renesas/phy-rcar-gen3-usb3.c
index 88c83c9b8ff9..566b4cf4ff38 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb3.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb3.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen3 for USB3.0 PHY driver
*
* Copyright (C) 2017 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 0e15119ddfc6..990204a46eb6 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -15,6 +15,14 @@ config PHY_ROCKCHIP_EMMC
help
Enable this to support the Rockchip EMMC PHY.
+config PHY_ROCKCHIP_INNO_HDMI
+ tristate "Rockchip INNO HDMI PHY Driver"
+ depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip Innosilicon HDMI PHY.
+
config PHY_ROCKCHIP_INNO_USB2
tristate "Rockchip INNO USB2PHY Driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index 7f149d989046..fd21cbaf40dd 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o
obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
+obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
obj-$(CONFIG_PHY_ROCKCHIP_TYPEC) += phy-rockchip-typec.o
diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
index b237360f95f6..19bf84f0bc67 100644
--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
+++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
@@ -337,8 +337,8 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
return -ENOMEM;
if (of_property_read_u32(dev->of_node, "reg", &reg_offset)) {
- dev_err(dev, "missing reg property in node %s\n",
- dev->of_node->name);
+ dev_err(dev, "missing reg property in node %pOFn\n",
+ dev->of_node);
return -EINVAL;
}
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
new file mode 100644
index 000000000000..b10a84cab4a7
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -0,0 +1,1277 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Zheng Yang <zhengyang@rock-chips.com>
+ * Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/phy/phy.h>
+#include <linux/slab.h>
+
+#define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l)))
+
+/* REG: 0x00 */
+#define RK3228_PRE_PLL_REFCLK_SEL_PCLK BIT(0)
+/* REG: 0x01 */
+#define RK3228_BYPASS_RXSENSE_EN BIT(2)
+#define RK3228_BYPASS_PWRON_EN BIT(1)
+#define RK3228_BYPASS_PLLPD_EN BIT(0)
+/* REG: 0x02 */
+#define RK3228_BYPASS_PDATA_EN BIT(4)
+#define RK3228_PDATAEN_DISABLE BIT(0)
+/* REG: 0x03 */
+#define RK3228_BYPASS_AUTO_TERM_RES_CAL BIT(7)
+#define RK3228_AUTO_TERM_RES_CAL_SPEED_14_8(x) UPDATE(x, 6, 0)
+/* REG: 0x04 */
+#define RK3228_AUTO_TERM_RES_CAL_SPEED_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xaa */
+#define RK3228_POST_PLL_CTRL_MANUAL BIT(0)
+/* REG: 0xe0 */
+#define RK3228_POST_PLL_POWER_DOWN BIT(5)
+#define RK3228_PRE_PLL_POWER_DOWN BIT(4)
+#define RK3228_RXSENSE_CLK_CH_ENABLE BIT(3)
+#define RK3228_RXSENSE_DATA_CH2_ENABLE BIT(2)
+#define RK3228_RXSENSE_DATA_CH1_ENABLE BIT(1)
+#define RK3228_RXSENSE_DATA_CH0_ENABLE BIT(0)
+/* REG: 0xe1 */
+#define RK3228_BANDGAP_ENABLE BIT(4)
+#define RK3228_TMDS_DRIVER_ENABLE GENMASK(3, 0)
+/* REG: 0xe2 */
+#define RK3228_PRE_PLL_FB_DIV_8_MASK BIT(7)
+#define RK3228_PRE_PLL_FB_DIV_8(x) UPDATE((x) >> 8, 7, 7)
+#define RK3228_PCLK_VCO_DIV_5_MASK BIT(5)
+#define RK3228_PCLK_VCO_DIV_5(x) UPDATE(x, 5, 5)
+#define RK3228_PRE_PLL_PRE_DIV_MASK GENMASK(4, 0)
+#define RK3228_PRE_PLL_PRE_DIV(x) UPDATE(x, 4, 0)
+/* REG: 0xe3 */
+#define RK3228_PRE_PLL_FB_DIV_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xe4 */
+#define RK3228_PRE_PLL_PCLK_DIV_B_MASK GENMASK(6, 5)
+#define RK3228_PRE_PLL_PCLK_DIV_B_SHIFT 5
+#define RK3228_PRE_PLL_PCLK_DIV_B(x) UPDATE(x, 6, 5)
+#define RK3228_PRE_PLL_PCLK_DIV_A_MASK GENMASK(4, 0)
+#define RK3228_PRE_PLL_PCLK_DIV_A(x) UPDATE(x, 4, 0)
+/* REG: 0xe5 */
+#define RK3228_PRE_PLL_PCLK_DIV_C_MASK GENMASK(6, 5)
+#define RK3228_PRE_PLL_PCLK_DIV_C(x) UPDATE(x, 6, 5)
+#define RK3228_PRE_PLL_PCLK_DIV_D_MASK GENMASK(4, 0)
+#define RK3228_PRE_PLL_PCLK_DIV_D(x) UPDATE(x, 4, 0)
+/* REG: 0xe6 */
+#define RK3228_PRE_PLL_TMDSCLK_DIV_C_MASK GENMASK(5, 4)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_C(x) UPDATE(x, 5, 4)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_A_MASK GENMASK(3, 2)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_A(x) UPDATE(x, 3, 2)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_B_MASK GENMASK(1, 0)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_B(x) UPDATE(x, 1, 0)
+/* REG: 0xe8 */
+#define RK3228_PRE_PLL_LOCK_STATUS BIT(0)
+/* REG: 0xe9 */
+#define RK3228_POST_PLL_POST_DIV_ENABLE UPDATE(3, 7, 6)
+#define RK3228_POST_PLL_PRE_DIV_MASK GENMASK(4, 0)
+#define RK3228_POST_PLL_PRE_DIV(x) UPDATE(x, 4, 0)
+/* REG: 0xea */
+#define RK3228_POST_PLL_FB_DIV_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xeb */
+#define RK3228_POST_PLL_FB_DIV_8_MASK BIT(7)
+#define RK3228_POST_PLL_FB_DIV_8(x) UPDATE((x) >> 8, 7, 7)
+#define RK3228_POST_PLL_POST_DIV_MASK GENMASK(5, 4)
+#define RK3228_POST_PLL_POST_DIV(x) UPDATE(x, 5, 4)
+#define RK3228_POST_PLL_LOCK_STATUS BIT(0)
+/* REG: 0xee */
+#define RK3228_TMDS_CH_TA_ENABLE GENMASK(7, 4)
+/* REG: 0xef */
+#define RK3228_TMDS_CLK_CH_TA(x) UPDATE(x, 7, 6)
+#define RK3228_TMDS_DATA_CH2_TA(x) UPDATE(x, 5, 4)
+#define RK3228_TMDS_DATA_CH1_TA(x) UPDATE(x, 3, 2)
+#define RK3228_TMDS_DATA_CH0_TA(x) UPDATE(x, 1, 0)
+/* REG: 0xf0 */
+#define RK3228_TMDS_DATA_CH2_PRE_EMPHASIS_MASK GENMASK(5, 4)
+#define RK3228_TMDS_DATA_CH2_PRE_EMPHASIS(x) UPDATE(x, 5, 4)
+#define RK3228_TMDS_DATA_CH1_PRE_EMPHASIS_MASK GENMASK(3, 2)
+#define RK3228_TMDS_DATA_CH1_PRE_EMPHASIS(x) UPDATE(x, 3, 2)
+#define RK3228_TMDS_DATA_CH0_PRE_EMPHASIS_MASK GENMASK(1, 0)
+#define RK3228_TMDS_DATA_CH0_PRE_EMPHASIS(x) UPDATE(x, 1, 0)
+/* REG: 0xf1 */
+#define RK3228_TMDS_CLK_CH_OUTPUT_SWING(x) UPDATE(x, 7, 4)
+#define RK3228_TMDS_DATA_CH2_OUTPUT_SWING(x) UPDATE(x, 3, 0)
+/* REG: 0xf2 */
+#define RK3228_TMDS_DATA_CH1_OUTPUT_SWING(x) UPDATE(x, 7, 4)
+#define RK3228_TMDS_DATA_CH0_OUTPUT_SWING(x) UPDATE(x, 3, 0)
+
+/* REG: 0x01 */
+#define RK3328_BYPASS_RXSENSE_EN BIT(2)
+#define RK3328_BYPASS_POWERON_EN BIT(1)
+#define RK3328_BYPASS_PLLPD_EN BIT(0)
+/* REG: 0x02 */
+#define RK3328_INT_POL_HIGH BIT(7)
+#define RK3328_BYPASS_PDATA_EN BIT(4)
+#define RK3328_PDATA_EN BIT(0)
+/* REG:0x05 */
+#define RK3328_INT_TMDS_CLK(x) UPDATE(x, 7, 4)
+#define RK3328_INT_TMDS_D2(x) UPDATE(x, 3, 0)
+/* REG:0x07 */
+#define RK3328_INT_TMDS_D1(x) UPDATE(x, 7, 4)
+#define RK3328_INT_TMDS_D0(x) UPDATE(x, 3, 0)
+/* for all RK3328_INT_TMDS_*, ESD_DET as defined in 0xc8-0xcb */
+#define RK3328_INT_AGND_LOW_PULSE_LOCKED BIT(3)
+#define RK3328_INT_RXSENSE_LOW_PULSE_LOCKED BIT(2)
+#define RK3328_INT_VSS_AGND_ESD_DET BIT(1)
+#define RK3328_INT_AGND_VSS_ESD_DET BIT(0)
+/* REG: 0xa0 */
+#define RK3328_PCLK_VCO_DIV_5_MASK BIT(1)
+#define RK3328_PCLK_VCO_DIV_5(x) UPDATE(x, 1, 1)
+#define RK3328_PRE_PLL_POWER_DOWN BIT(0)
+/* REG: 0xa1 */
+#define RK3328_PRE_PLL_PRE_DIV_MASK GENMASK(5, 0)
+#define RK3328_PRE_PLL_PRE_DIV(x) UPDATE(x, 5, 0)
+/* REG: 0xa2 */
+/* unset means center spread */
+#define RK3328_SPREAD_SPECTRUM_MOD_DOWN BIT(7)
+#define RK3328_SPREAD_SPECTRUM_MOD_DISABLE BIT(6)
+#define RK3328_PRE_PLL_FRAC_DIV_DISABLE UPDATE(3, 5, 4)
+#define RK3328_PRE_PLL_FB_DIV_11_8_MASK GENMASK(3, 0)
+#define RK3328_PRE_PLL_FB_DIV_11_8(x) UPDATE((x) >> 8, 3, 0)
+/* REG: 0xa3 */
+#define RK3328_PRE_PLL_FB_DIV_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xa4*/
+#define RK3328_PRE_PLL_TMDSCLK_DIV_C_MASK GENMASK(1, 0)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_C(x) UPDATE(x, 1, 0)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_B_MASK GENMASK(3, 2)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_B(x) UPDATE(x, 3, 2)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_A_MASK GENMASK(5, 4)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_A(x) UPDATE(x, 5, 4)
+/* REG: 0xa5 */
+#define RK3328_PRE_PLL_PCLK_DIV_B_SHIFT 5
+#define RK3328_PRE_PLL_PCLK_DIV_B_MASK GENMASK(6, 5)
+#define RK3328_PRE_PLL_PCLK_DIV_B(x) UPDATE(x, 6, 5)
+#define RK3328_PRE_PLL_PCLK_DIV_A_MASK GENMASK(4, 0)
+#define RK3328_PRE_PLL_PCLK_DIV_A(x) UPDATE(x, 4, 0)
+/* REG: 0xa6 */
+#define RK3328_PRE_PLL_PCLK_DIV_C_SHIFT 5
+#define RK3328_PRE_PLL_PCLK_DIV_C_MASK GENMASK(6, 5)
+#define RK3328_PRE_PLL_PCLK_DIV_C(x) UPDATE(x, 6, 5)
+#define RK3328_PRE_PLL_PCLK_DIV_D_MASK GENMASK(4, 0)
+#define RK3328_PRE_PLL_PCLK_DIV_D(x) UPDATE(x, 4, 0)
+/* REG: 0xa9 */
+#define RK3328_PRE_PLL_LOCK_STATUS BIT(0)
+/* REG: 0xaa */
+#define RK3328_POST_PLL_POST_DIV_ENABLE GENMASK(3, 2)
+#define RK3328_POST_PLL_REFCLK_SEL_TMDS BIT(1)
+#define RK3328_POST_PLL_POWER_DOWN BIT(0)
+/* REG:0xab */
+#define RK3328_POST_PLL_FB_DIV_8(x) UPDATE((x) >> 8, 7, 7)
+#define RK3328_POST_PLL_PRE_DIV(x) UPDATE(x, 4, 0)
+/* REG: 0xac */
+#define RK3328_POST_PLL_FB_DIV_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xad */
+#define RK3328_POST_PLL_POST_DIV_MASK GENMASK(1, 0)
+#define RK3328_POST_PLL_POST_DIV_2 0x0
+#define RK3328_POST_PLL_POST_DIV_4 0x1
+#define RK3328_POST_PLL_POST_DIV_8 0x3
+/* REG: 0xaf */
+#define RK3328_POST_PLL_LOCK_STATUS BIT(0)
+/* REG: 0xb0 */
+#define RK3328_BANDGAP_ENABLE BIT(2)
+/* REG: 0xb2 */
+#define RK3328_TMDS_CLK_DRIVER_EN BIT(3)
+#define RK3328_TMDS_D2_DRIVER_EN BIT(2)
+#define RK3328_TMDS_D1_DRIVER_EN BIT(1)
+#define RK3328_TMDS_D0_DRIVER_EN BIT(0)
+#define RK3328_TMDS_DRIVER_ENABLE (RK3328_TMDS_CLK_DRIVER_EN | \
+ RK3328_TMDS_D2_DRIVER_EN | \
+ RK3328_TMDS_D1_DRIVER_EN | \
+ RK3328_TMDS_D0_DRIVER_EN)
+/* REG:0xc5 */
+#define RK3328_BYPASS_TERM_RESISTOR_CALIB BIT(7)
+#define RK3328_TERM_RESISTOR_CALIB_SPEED_14_8(x) UPDATE((x) >> 8, 6, 0)
+/* REG:0xc6 */
+#define RK3328_TERM_RESISTOR_CALIB_SPEED_7_0(x) UPDATE(x, 7, 9)
+/* REG:0xc7 */
+#define RK3328_TERM_RESISTOR_50 UPDATE(0, 2, 1)
+#define RK3328_TERM_RESISTOR_62_5 UPDATE(1, 2, 1)
+#define RK3328_TERM_RESISTOR_75 UPDATE(2, 2, 1)
+#define RK3328_TERM_RESISTOR_100 UPDATE(3, 2, 1)
+/* REG 0xc8 - 0xcb */
+#define RK3328_ESD_DETECT_MASK GENMASK(7, 6)
+#define RK3328_ESD_DETECT_340MV (0x0 << 6)
+#define RK3328_ESD_DETECT_280MV (0x1 << 6)
+#define RK3328_ESD_DETECT_260MV (0x2 << 6)
+#define RK3328_ESD_DETECT_240MV (0x3 << 6)
+/* resistors can be used in parallel */
+#define RK3328_TMDS_TERM_RESIST_MASK GENMASK(5, 0)
+#define RK3328_TMDS_TERM_RESIST_75 BIT(5)
+#define RK3328_TMDS_TERM_RESIST_150 BIT(4)
+#define RK3328_TMDS_TERM_RESIST_300 BIT(3)
+#define RK3328_TMDS_TERM_RESIST_600 BIT(2)
+#define RK3328_TMDS_TERM_RESIST_1000 BIT(1)
+#define RK3328_TMDS_TERM_RESIST_2000 BIT(0)
+/* REG: 0xd1 */
+#define RK3328_PRE_PLL_FRAC_DIV_23_16(x) UPDATE((x) >> 16, 7, 0)
+/* REG: 0xd2 */
+#define RK3328_PRE_PLL_FRAC_DIV_15_8(x) UPDATE((x) >> 8, 7, 0)
+/* REG: 0xd3 */
+#define RK3328_PRE_PLL_FRAC_DIV_7_0(x) UPDATE(x, 7, 0)
+
+struct inno_hdmi_phy_drv_data;
+
+struct inno_hdmi_phy {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+
+ struct phy *phy;
+ struct clk *sysclk;
+ struct clk *refoclk;
+ struct clk *refpclk;
+
+ /* platform data */
+ const struct inno_hdmi_phy_drv_data *plat_data;
+ int chip_version;
+
+ /* clk provider */
+ struct clk_hw hw;
+ struct clk *phyclk;
+ unsigned long pixclock;
+};
+
+struct pre_pll_config {
+ unsigned long pixclock;
+ unsigned long tmdsclock;
+ u8 prediv;
+ u16 fbdiv;
+ u8 tmds_div_a;
+ u8 tmds_div_b;
+ u8 tmds_div_c;
+ u8 pclk_div_a;
+ u8 pclk_div_b;
+ u8 pclk_div_c;
+ u8 pclk_div_d;
+ u8 vco_div_5_en;
+ u32 fracdiv;
+};
+
+struct post_pll_config {
+ unsigned long tmdsclock;
+ u8 prediv;
+ u16 fbdiv;
+ u8 postdiv;
+ u8 version;
+};
+
+struct phy_config {
+ unsigned long tmdsclock;
+ u8 regs[14];
+};
+
+struct inno_hdmi_phy_ops {
+ int (*init)(struct inno_hdmi_phy *inno);
+ int (*power_on)(struct inno_hdmi_phy *inno,
+ const struct post_pll_config *cfg,
+ const struct phy_config *phy_cfg);
+ void (*power_off)(struct inno_hdmi_phy *inno);
+};
+
+struct inno_hdmi_phy_drv_data {
+ const struct inno_hdmi_phy_ops *ops;
+ const struct clk_ops *clk_ops;
+ const struct phy_config *phy_cfg_table;
+};
+
+static const struct pre_pll_config pre_pll_cfg_table[] = {
+ { 27000000, 27000000, 1, 90, 3, 2, 2, 10, 3, 3, 4, 0, 0},
+ { 27000000, 33750000, 1, 90, 1, 3, 3, 10, 3, 3, 4, 0, 0},
+ { 40000000, 40000000, 1, 80, 2, 2, 2, 12, 2, 2, 2, 0, 0},
+ { 59341000, 59341000, 1, 98, 3, 1, 2, 1, 3, 3, 4, 0, 0xE6AE6B},
+ { 59400000, 59400000, 1, 99, 3, 1, 1, 1, 3, 3, 4, 0, 0},
+ { 59341000, 74176250, 1, 98, 0, 3, 3, 1, 3, 3, 4, 0, 0xE6AE6B},
+ { 59400000, 74250000, 1, 99, 1, 2, 2, 1, 3, 3, 4, 0, 0},
+ { 74176000, 74176000, 1, 98, 1, 2, 2, 1, 2, 3, 4, 0, 0xE6AE6B},
+ { 74250000, 74250000, 1, 99, 1, 2, 2, 1, 2, 3, 4, 0, 0},
+ { 74176000, 92720000, 4, 494, 1, 2, 2, 1, 3, 3, 4, 0, 0x816817},
+ { 74250000, 92812500, 4, 495, 1, 2, 2, 1, 3, 3, 4, 0, 0},
+ {148352000, 148352000, 1, 98, 1, 1, 1, 1, 2, 2, 2, 0, 0xE6AE6B},
+ {148500000, 148500000, 1, 99, 1, 1, 1, 1, 2, 2, 2, 0, 0},
+ {148352000, 185440000, 4, 494, 0, 2, 2, 1, 3, 2, 2, 0, 0x816817},
+ {148500000, 185625000, 4, 495, 0, 2, 2, 1, 3, 2, 2, 0, 0},
+ {296703000, 296703000, 1, 98, 0, 1, 1, 1, 0, 2, 2, 0, 0xE6AE6B},
+ {297000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 2, 2, 0, 0},
+ {296703000, 370878750, 4, 494, 1, 2, 0, 1, 3, 1, 1, 0, 0x816817},
+ {297000000, 371250000, 4, 495, 1, 2, 0, 1, 3, 1, 1, 0, 0},
+ {593407000, 296703500, 1, 98, 0, 1, 1, 1, 0, 2, 1, 0, 0xE6AE6B},
+ {594000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 2, 1, 0, 0},
+ {593407000, 370879375, 4, 494, 1, 2, 0, 1, 3, 1, 1, 1, 0x816817},
+ {594000000, 371250000, 4, 495, 1, 2, 0, 1, 3, 1, 1, 1, 0},
+ {593407000, 593407000, 1, 98, 0, 2, 0, 1, 0, 1, 1, 0, 0xE6AE6B},
+ {594000000, 594000000, 1, 99, 0, 2, 0, 1, 0, 1, 1, 0, 0},
+ { /* sentinel */ }
+};
+
+static const struct post_pll_config post_pll_cfg_table[] = {
+ {33750000, 1, 40, 8, 1},
+ {33750000, 1, 80, 8, 2},
+ {74250000, 1, 40, 8, 1},
+ {74250000, 18, 80, 8, 2},
+ {148500000, 2, 40, 4, 3},
+ {297000000, 4, 40, 2, 3},
+ {594000000, 8, 40, 1, 3},
+ { /* sentinel */ }
+};
+
+/* phy tuning values for an undocumented set of registers */
+static const struct phy_config rk3228_phy_cfg[] = {
+ { 165000000, {
+ 0xaa, 0x00, 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ }, {
+ 340000000, {
+ 0xaa, 0x15, 0x6a, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ }, {
+ 594000000, {
+ 0xaa, 0x15, 0x7a, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ }, { /* sentinel */ },
+};
+
+/* phy tuning values for an undocumented set of registers */
+static const struct phy_config rk3328_phy_cfg[] = {
+ { 165000000, {
+ 0x07, 0x0a, 0x0a, 0x0a, 0x00, 0x00, 0x08, 0x08, 0x08,
+ 0x00, 0xac, 0xcc, 0xcc, 0xcc,
+ },
+ }, {
+ 340000000, {
+ 0x0b, 0x0d, 0x0d, 0x0d, 0x07, 0x15, 0x08, 0x08, 0x08,
+ 0x3f, 0xac, 0xcc, 0xcd, 0xdd,
+ },
+ }, {
+ 594000000, {
+ 0x10, 0x1a, 0x1a, 0x1a, 0x07, 0x15, 0x08, 0x08, 0x08,
+ 0x00, 0xac, 0xcc, 0xcc, 0xcc,
+ },
+ }, { /* sentinel */ },
+};
+
+static inline struct inno_hdmi_phy *to_inno_hdmi_phy(struct clk_hw *hw)
+{
+ return container_of(hw, struct inno_hdmi_phy, hw);
+}
+
+/*
+ * The register description of the IP block does not use any distinct names
+ * but instead the databook simply numbers the registers in one-increments.
+ * As the registers are obviously 32bit sized, the inno_* functions
+ * translate the databook register names to the actual registers addresses.
+ */
+static inline void inno_write(struct inno_hdmi_phy *inno, u32 reg, u8 val)
+{
+ regmap_write(inno->regmap, reg * 4, val);
+}
+
+static inline u8 inno_read(struct inno_hdmi_phy *inno, u32 reg)
+{
+ u32 val;
+
+ regmap_read(inno->regmap, reg * 4, &val);
+
+ return val;
+}
+
+static inline void inno_update_bits(struct inno_hdmi_phy *inno, u8 reg,
+ u8 mask, u8 val)
+{
+ regmap_update_bits(inno->regmap, reg * 4, mask, val);
+}
+
+#define inno_poll(inno, reg, val, cond, sleep_us, timeout_us) \
+ regmap_read_poll_timeout((inno)->regmap, (reg) * 4, val, cond, \
+ sleep_us, timeout_us)
+
+static unsigned long inno_hdmi_phy_get_tmdsclk(struct inno_hdmi_phy *inno,
+ unsigned long rate)
+{
+ int bus_width = phy_get_bus_width(inno->phy);
+
+ switch (bus_width) {
+ case 4:
+ case 5:
+ case 6:
+ case 10:
+ case 12:
+ case 16:
+ return (u64)rate * bus_width / 8;
+ default:
+ return rate;
+ }
+}
+
+static irqreturn_t inno_hdmi_phy_rk3328_hardirq(int irq, void *dev_id)
+{
+ struct inno_hdmi_phy *inno = dev_id;
+ int intr_stat1, intr_stat2, intr_stat3;
+
+ intr_stat1 = inno_read(inno, 0x04);
+ intr_stat2 = inno_read(inno, 0x06);
+ intr_stat3 = inno_read(inno, 0x08);
+
+ if (intr_stat1)
+ inno_write(inno, 0x04, intr_stat1);
+ if (intr_stat2)
+ inno_write(inno, 0x06, intr_stat2);
+ if (intr_stat3)
+ inno_write(inno, 0x08, intr_stat3);
+
+ if (intr_stat1 || intr_stat2 || intr_stat3)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t inno_hdmi_phy_rk3328_irq(int irq, void *dev_id)
+{
+ struct inno_hdmi_phy *inno = dev_id;
+
+ inno_update_bits(inno, 0x02, RK3328_PDATA_EN, 0);
+ usleep_range(10, 20);
+ inno_update_bits(inno, 0x02, RK3328_PDATA_EN, RK3328_PDATA_EN);
+
+ return IRQ_HANDLED;
+}
+
+static int inno_hdmi_phy_power_on(struct phy *phy)
+{
+ struct inno_hdmi_phy *inno = phy_get_drvdata(phy);
+ const struct post_pll_config *cfg = post_pll_cfg_table;
+ const struct phy_config *phy_cfg = inno->plat_data->phy_cfg_table;
+ unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno,
+ inno->pixclock);
+ int ret;
+
+ if (!tmdsclock) {
+ dev_err(inno->dev, "TMDS clock is zero!\n");
+ return -EINVAL;
+ }
+
+ if (!inno->plat_data->ops->power_on)
+ return -EINVAL;
+
+ for (; cfg->tmdsclock != 0; cfg++)
+ if (tmdsclock <= cfg->tmdsclock &&
+ cfg->version & inno->chip_version)
+ break;
+
+ for (; phy_cfg->tmdsclock != 0; phy_cfg++)
+ if (tmdsclock <= phy_cfg->tmdsclock)
+ break;
+
+ if (cfg->tmdsclock == 0 || phy_cfg->tmdsclock == 0)
+ return -EINVAL;
+
+ dev_dbg(inno->dev, "Inno HDMI PHY Power On\n");
+
+ ret = clk_prepare_enable(inno->phyclk);
+ if (ret)
+ return ret;
+
+ ret = inno->plat_data->ops->power_on(inno, cfg, phy_cfg);
+ if (ret) {
+ clk_disable_unprepare(inno->phyclk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int inno_hdmi_phy_power_off(struct phy *phy)
+{
+ struct inno_hdmi_phy *inno = phy_get_drvdata(phy);
+
+ if (!inno->plat_data->ops->power_off)
+ return -EINVAL;
+
+ inno->plat_data->ops->power_off(inno);
+
+ clk_disable_unprepare(inno->phyclk);
+
+ dev_dbg(inno->dev, "Inno HDMI PHY Power Off\n");
+
+ return 0;
+}
+
+static const struct phy_ops inno_hdmi_phy_ops = {
+ .owner = THIS_MODULE,
+ .power_on = inno_hdmi_phy_power_on,
+ .power_off = inno_hdmi_phy_power_off,
+};
+
+static const
+struct pre_pll_config *inno_hdmi_phy_get_pre_pll_cfg(struct inno_hdmi_phy *inno,
+ unsigned long rate)
+{
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno, rate);
+
+ for (; cfg->pixclock != 0; cfg++)
+ if (cfg->pixclock == rate && cfg->tmdsclock == tmdsclock)
+ break;
+
+ if (cfg->pixclock == 0)
+ return ERR_PTR(-EINVAL);
+
+ return cfg;
+}
+
+static int inno_hdmi_phy_rk3228_clk_is_prepared(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ u8 status;
+
+ status = inno_read(inno, 0xe0) & RK3228_PRE_PLL_POWER_DOWN;
+ return status ? 0 : 1;
+}
+
+static int inno_hdmi_phy_rk3228_clk_prepare(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN, 0);
+ return 0;
+}
+
+static void inno_hdmi_phy_rk3228_clk_unprepare(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN,
+ RK3228_PRE_PLL_POWER_DOWN);
+}
+
+static
+unsigned long inno_hdmi_phy_rk3228_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ u8 nd, no_a, no_b, no_d;
+ u64 vco;
+ u16 nf;
+
+ nd = inno_read(inno, 0xe2) & RK3228_PRE_PLL_PRE_DIV_MASK;
+ nf = (inno_read(inno, 0xe2) & RK3228_PRE_PLL_FB_DIV_8_MASK) << 1;
+ nf |= inno_read(inno, 0xe3);
+ vco = parent_rate * nf;
+
+ if (inno_read(inno, 0xe2) & RK3228_PCLK_VCO_DIV_5_MASK) {
+ do_div(vco, nd * 5);
+ } else {
+ no_a = inno_read(inno, 0xe4) & RK3228_PRE_PLL_PCLK_DIV_A_MASK;
+ if (!no_a)
+ no_a = 1;
+ no_b = inno_read(inno, 0xe4) & RK3228_PRE_PLL_PCLK_DIV_B_MASK;
+ no_b >>= RK3228_PRE_PLL_PCLK_DIV_B_SHIFT;
+ no_b += 2;
+ no_d = inno_read(inno, 0xe5) & RK3228_PRE_PLL_PCLK_DIV_D_MASK;
+
+ do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
+ }
+
+ inno->pixclock = vco;
+
+ dev_dbg(inno->dev, "%s rate %lu\n", __func__, inno->pixclock);
+
+ return vco;
+}
+
+static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+
+ for (; cfg->pixclock != 0; cfg++)
+ if (cfg->pixclock == rate && !cfg->fracdiv)
+ break;
+
+ if (cfg->pixclock == 0)
+ return -EINVAL;
+
+ return cfg->pixclock;
+}
+
+static int inno_hdmi_phy_rk3228_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno, rate);
+ u32 v;
+ int ret;
+
+ dev_dbg(inno->dev, "%s rate %lu tmdsclk %lu\n",
+ __func__, rate, tmdsclock);
+
+ cfg = inno_hdmi_phy_get_pre_pll_cfg(inno, rate);
+ if (IS_ERR(cfg))
+ return PTR_ERR(cfg);
+
+ /* Power down PRE-PLL */
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN,
+ RK3228_PRE_PLL_POWER_DOWN);
+
+ inno_update_bits(inno, 0xe2, RK3228_PRE_PLL_FB_DIV_8_MASK |
+ RK3228_PCLK_VCO_DIV_5_MASK |
+ RK3228_PRE_PLL_PRE_DIV_MASK,
+ RK3228_PRE_PLL_FB_DIV_8(cfg->fbdiv) |
+ RK3228_PCLK_VCO_DIV_5(cfg->vco_div_5_en) |
+ RK3228_PRE_PLL_PRE_DIV(cfg->prediv));
+ inno_write(inno, 0xe3, RK3228_PRE_PLL_FB_DIV_7_0(cfg->fbdiv));
+ inno_update_bits(inno, 0xe4, RK3228_PRE_PLL_PCLK_DIV_B_MASK |
+ RK3228_PRE_PLL_PCLK_DIV_A_MASK,
+ RK3228_PRE_PLL_PCLK_DIV_B(cfg->pclk_div_b) |
+ RK3228_PRE_PLL_PCLK_DIV_A(cfg->pclk_div_a));
+ inno_update_bits(inno, 0xe5, RK3228_PRE_PLL_PCLK_DIV_C_MASK |
+ RK3228_PRE_PLL_PCLK_DIV_D_MASK,
+ RK3228_PRE_PLL_PCLK_DIV_C(cfg->pclk_div_c) |
+ RK3228_PRE_PLL_PCLK_DIV_D(cfg->pclk_div_d));
+ inno_update_bits(inno, 0xe6, RK3228_PRE_PLL_TMDSCLK_DIV_C_MASK |
+ RK3228_PRE_PLL_TMDSCLK_DIV_A_MASK |
+ RK3228_PRE_PLL_TMDSCLK_DIV_B_MASK,
+ RK3228_PRE_PLL_TMDSCLK_DIV_C(cfg->tmds_div_c) |
+ RK3228_PRE_PLL_TMDSCLK_DIV_A(cfg->tmds_div_a) |
+ RK3228_PRE_PLL_TMDSCLK_DIV_B(cfg->tmds_div_b));
+
+ /* Power up PRE-PLL */
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN, 0);
+
+ /* Wait for Pre-PLL lock */
+ ret = inno_poll(inno, 0xe8, v, v & RK3228_PRE_PLL_LOCK_STATUS,
+ 100, 100000);
+ if (ret) {
+ dev_err(inno->dev, "Pre-PLL locking failed\n");
+ return ret;
+ }
+
+ inno->pixclock = rate;
+
+ return 0;
+}
+
+static const struct clk_ops inno_hdmi_phy_rk3228_clk_ops = {
+ .prepare = inno_hdmi_phy_rk3228_clk_prepare,
+ .unprepare = inno_hdmi_phy_rk3228_clk_unprepare,
+ .is_prepared = inno_hdmi_phy_rk3228_clk_is_prepared,
+ .recalc_rate = inno_hdmi_phy_rk3228_clk_recalc_rate,
+ .round_rate = inno_hdmi_phy_rk3228_clk_round_rate,
+ .set_rate = inno_hdmi_phy_rk3228_clk_set_rate,
+};
+
+static int inno_hdmi_phy_rk3328_clk_is_prepared(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ u8 status;
+
+ status = inno_read(inno, 0xa0) & RK3328_PRE_PLL_POWER_DOWN;
+ return status ? 0 : 1;
+}
+
+static int inno_hdmi_phy_rk3328_clk_prepare(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+
+ inno_update_bits(inno, 0xa0, RK3328_PRE_PLL_POWER_DOWN, 0);
+ return 0;
+}
+
+static void inno_hdmi_phy_rk3328_clk_unprepare(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+
+ inno_update_bits(inno, 0xa0, RK3328_PRE_PLL_POWER_DOWN,
+ RK3328_PRE_PLL_POWER_DOWN);
+}
+
+static
+unsigned long inno_hdmi_phy_rk3328_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ unsigned long frac;
+ u8 nd, no_a, no_b, no_c, no_d;
+ u64 vco;
+ u16 nf;
+
+ nd = inno_read(inno, 0xa1) & RK3328_PRE_PLL_PRE_DIV_MASK;
+ nf = ((inno_read(inno, 0xa2) & RK3328_PRE_PLL_FB_DIV_11_8_MASK) << 8);
+ nf |= inno_read(inno, 0xa3);
+ vco = parent_rate * nf;
+
+ if (!(inno_read(inno, 0xa2) & RK3328_PRE_PLL_FRAC_DIV_DISABLE)) {
+ frac = inno_read(inno, 0xd3) |
+ (inno_read(inno, 0xd2) << 8) |
+ (inno_read(inno, 0xd1) << 16);
+ vco += DIV_ROUND_CLOSEST(parent_rate * frac, (1 << 24));
+ }
+
+ if (inno_read(inno, 0xa0) & RK3328_PCLK_VCO_DIV_5_MASK) {
+ do_div(vco, nd * 5);
+ } else {
+ no_a = inno_read(inno, 0xa5) & RK3328_PRE_PLL_PCLK_DIV_A_MASK;
+ no_b = inno_read(inno, 0xa5) & RK3328_PRE_PLL_PCLK_DIV_B_MASK;
+ no_b >>= RK3328_PRE_PLL_PCLK_DIV_B_SHIFT;
+ no_b += 2;
+ no_c = inno_read(inno, 0xa6) & RK3328_PRE_PLL_PCLK_DIV_C_MASK;
+ no_c >>= RK3328_PRE_PLL_PCLK_DIV_C_SHIFT;
+ no_c = 1 << no_c;
+ no_d = inno_read(inno, 0xa6) & RK3328_PRE_PLL_PCLK_DIV_D_MASK;
+
+ do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
+ }
+
+ inno->pixclock = vco;
+ dev_dbg(inno->dev, "%s rate %lu\n", __func__, inno->pixclock);
+
+ return vco;
+}
+
+static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+
+ for (; cfg->pixclock != 0; cfg++)
+ if (cfg->pixclock == rate)
+ break;
+
+ if (cfg->pixclock == 0)
+ return -EINVAL;
+
+ return cfg->pixclock;
+}
+
+static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno, rate);
+ u32 val;
+ int ret;
+
+ dev_dbg(inno->dev, "%s rate %lu tmdsclk %lu\n",
+ __func__, rate, tmdsclock);
+
+ cfg = inno_hdmi_phy_get_pre_pll_cfg(inno, rate);
+ if (IS_ERR(cfg))
+ return PTR_ERR(cfg);
+
+ inno_update_bits(inno, 0xa0, RK3328_PRE_PLL_POWER_DOWN,
+ RK3328_PRE_PLL_POWER_DOWN);
+
+ /* Configure pre-pll */
+ inno_update_bits(inno, 0xa0, RK3228_PCLK_VCO_DIV_5_MASK,
+ RK3228_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
+ inno_write(inno, 0xa1, RK3328_PRE_PLL_PRE_DIV(cfg->prediv));
+
+ val = RK3328_SPREAD_SPECTRUM_MOD_DISABLE;
+ if (!cfg->fracdiv)
+ val |= RK3328_PRE_PLL_FRAC_DIV_DISABLE;
+ inno_write(inno, 0xa2, RK3328_PRE_PLL_FB_DIV_11_8(cfg->fbdiv) | val);
+ inno_write(inno, 0xa3, RK3328_PRE_PLL_FB_DIV_7_0(cfg->fbdiv));
+ inno_write(inno, 0xa5, RK3328_PRE_PLL_PCLK_DIV_A(cfg->pclk_div_a) |
+ RK3328_PRE_PLL_PCLK_DIV_B(cfg->pclk_div_b));
+ inno_write(inno, 0xa6, RK3328_PRE_PLL_PCLK_DIV_C(cfg->pclk_div_c) |
+ RK3328_PRE_PLL_PCLK_DIV_D(cfg->pclk_div_d));
+ inno_write(inno, 0xa4, RK3328_PRE_PLL_TMDSCLK_DIV_C(cfg->tmds_div_c) |
+ RK3328_PRE_PLL_TMDSCLK_DIV_A(cfg->tmds_div_a) |
+ RK3328_PRE_PLL_TMDSCLK_DIV_B(cfg->tmds_div_b));
+ inno_write(inno, 0xd3, RK3328_PRE_PLL_FRAC_DIV_7_0(cfg->fracdiv));
+ inno_write(inno, 0xd2, RK3328_PRE_PLL_FRAC_DIV_15_8(cfg->fracdiv));
+ inno_write(inno, 0xd1, RK3328_PRE_PLL_FRAC_DIV_23_16(cfg->fracdiv));
+
+ inno_update_bits(inno, 0xa0, RK3328_PRE_PLL_POWER_DOWN, 0);
+
+ /* Wait for Pre-PLL lock */
+ ret = inno_poll(inno, 0xa9, val, val & RK3328_PRE_PLL_LOCK_STATUS,
+ 1000, 10000);
+ if (ret) {
+ dev_err(inno->dev, "Pre-PLL locking failed\n");
+ return ret;
+ }
+
+ inno->pixclock = rate;
+
+ return 0;
+}
+
+static const struct clk_ops inno_hdmi_phy_rk3328_clk_ops = {
+ .prepare = inno_hdmi_phy_rk3328_clk_prepare,
+ .unprepare = inno_hdmi_phy_rk3328_clk_unprepare,
+ .is_prepared = inno_hdmi_phy_rk3328_clk_is_prepared,
+ .recalc_rate = inno_hdmi_phy_rk3328_clk_recalc_rate,
+ .round_rate = inno_hdmi_phy_rk3328_clk_round_rate,
+ .set_rate = inno_hdmi_phy_rk3328_clk_set_rate,
+};
+
+static int inno_hdmi_phy_clk_register(struct inno_hdmi_phy *inno)
+{
+ struct device *dev = inno->dev;
+ struct device_node *np = dev->of_node;
+ struct clk_init_data init;
+ const char *parent_name;
+ int ret;
+
+ parent_name = __clk_get_name(inno->refoclk);
+
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+ init.name = "pin_hd20_pclk";
+ init.ops = inno->plat_data->clk_ops;
+
+ /* optional override of the clock name */
+ of_property_read_string(np, "clock-output-names", &init.name);
+
+ inno->hw.init = &init;
+
+ inno->phyclk = devm_clk_register(dev, &inno->hw);
+ if (IS_ERR(inno->phyclk)) {
+ ret = PTR_ERR(inno->phyclk);
+ dev_err(dev, "failed to register clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get, inno->phyclk);
+ if (ret) {
+ dev_err(dev, "failed to register clock provider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int inno_hdmi_phy_rk3228_init(struct inno_hdmi_phy *inno)
+{
+ /*
+ * Use phy internal register control
+ * rxsense/poweron/pllpd/pdataen signal.
+ */
+ inno_write(inno, 0x01, RK3228_BYPASS_RXSENSE_EN |
+ RK3228_BYPASS_PWRON_EN |
+ RK3228_BYPASS_PLLPD_EN);
+ inno_update_bits(inno, 0x02, RK3228_BYPASS_PDATA_EN,
+ RK3228_BYPASS_PDATA_EN);
+
+ /* manual power down post-PLL */
+ inno_update_bits(inno, 0xaa, RK3228_POST_PLL_CTRL_MANUAL,
+ RK3228_POST_PLL_CTRL_MANUAL);
+
+ inno->chip_version = 1;
+
+ return 0;
+}
+
+static int
+inno_hdmi_phy_rk3228_power_on(struct inno_hdmi_phy *inno,
+ const struct post_pll_config *cfg,
+ const struct phy_config *phy_cfg)
+{
+ int ret;
+ u32 v;
+
+ inno_update_bits(inno, 0x02, RK3228_PDATAEN_DISABLE,
+ RK3228_PDATAEN_DISABLE);
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN |
+ RK3228_POST_PLL_POWER_DOWN,
+ RK3228_PRE_PLL_POWER_DOWN |
+ RK3228_POST_PLL_POWER_DOWN);
+
+ /* Post-PLL update */
+ inno_update_bits(inno, 0xe9, RK3228_POST_PLL_PRE_DIV_MASK,
+ RK3228_POST_PLL_PRE_DIV(cfg->prediv));
+ inno_update_bits(inno, 0xeb, RK3228_POST_PLL_FB_DIV_8_MASK,
+ RK3228_POST_PLL_FB_DIV_8(cfg->fbdiv));
+ inno_write(inno, 0xea, RK3228_POST_PLL_FB_DIV_7_0(cfg->fbdiv));
+
+ if (cfg->postdiv == 1) {
+ inno_update_bits(inno, 0xe9, RK3228_POST_PLL_POST_DIV_ENABLE,
+ 0);
+ } else {
+ int div = cfg->postdiv / 2 - 1;
+
+ inno_update_bits(inno, 0xe9, RK3228_POST_PLL_POST_DIV_ENABLE,
+ RK3228_POST_PLL_POST_DIV_ENABLE);
+ inno_update_bits(inno, 0xeb, RK3228_POST_PLL_POST_DIV_MASK,
+ RK3228_POST_PLL_POST_DIV(div));
+ }
+
+ for (v = 0; v < 4; v++)
+ inno_write(inno, 0xef + v, phy_cfg->regs[v]);
+
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN |
+ RK3228_POST_PLL_POWER_DOWN, 0);
+ inno_update_bits(inno, 0xe1, RK3228_BANDGAP_ENABLE,
+ RK3228_BANDGAP_ENABLE);
+ inno_update_bits(inno, 0xe1, RK3228_TMDS_DRIVER_ENABLE,
+ RK3228_TMDS_DRIVER_ENABLE);
+
+ /* Wait for post PLL lock */
+ ret = inno_poll(inno, 0xeb, v, v & RK3228_POST_PLL_LOCK_STATUS,
+ 100, 100000);
+ if (ret) {
+ dev_err(inno->dev, "Post-PLL locking failed\n");
+ return ret;
+ }
+
+ if (cfg->tmdsclock > 340000000)
+ msleep(100);
+
+ inno_update_bits(inno, 0x02, RK3228_PDATAEN_DISABLE, 0);
+ return 0;
+}
+
+static void inno_hdmi_phy_rk3228_power_off(struct inno_hdmi_phy *inno)
+{
+ inno_update_bits(inno, 0xe1, RK3228_TMDS_DRIVER_ENABLE, 0);
+ inno_update_bits(inno, 0xe1, RK3228_BANDGAP_ENABLE, 0);
+ inno_update_bits(inno, 0xe0, RK3228_POST_PLL_POWER_DOWN,
+ RK3228_POST_PLL_POWER_DOWN);
+}
+
+static const struct inno_hdmi_phy_ops rk3228_hdmi_phy_ops = {
+ .init = inno_hdmi_phy_rk3228_init,
+ .power_on = inno_hdmi_phy_rk3228_power_on,
+ .power_off = inno_hdmi_phy_rk3228_power_off,
+};
+
+static int inno_hdmi_phy_rk3328_init(struct inno_hdmi_phy *inno)
+{
+ struct nvmem_cell *cell;
+ unsigned char *efuse_buf;
+ size_t len;
+
+ /*
+ * Use phy internal register control
+ * rxsense/poweron/pllpd/pdataen signal.
+ */
+ inno_write(inno, 0x01, RK3328_BYPASS_RXSENSE_EN |
+ RK3328_BYPASS_POWERON_EN |
+ RK3328_BYPASS_PLLPD_EN);
+ inno_write(inno, 0x02, RK3328_INT_POL_HIGH | RK3328_BYPASS_PDATA_EN |
+ RK3328_PDATA_EN);
+
+ /* Disable phy irq */
+ inno_write(inno, 0x05, 0);
+ inno_write(inno, 0x07, 0);
+
+ /* try to read the chip-version */
+ inno->chip_version = 1;
+ cell = nvmem_cell_get(inno->dev, "cpu-version");
+ if (IS_ERR(cell)) {
+ if (PTR_ERR(cell) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ return 0;
+ }
+
+ efuse_buf = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(efuse_buf))
+ return 0;
+ if (len == 1)
+ inno->chip_version = efuse_buf[0] + 1;
+ kfree(efuse_buf);
+
+ return 0;
+}
+
+static int
+inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
+ const struct post_pll_config *cfg,
+ const struct phy_config *phy_cfg)
+{
+ int ret;
+ u32 v;
+
+ inno_update_bits(inno, 0x02, RK3328_PDATA_EN, 0);
+ inno_update_bits(inno, 0xaa, RK3328_POST_PLL_POWER_DOWN,
+ RK3328_POST_PLL_POWER_DOWN);
+
+ inno_write(inno, 0xac, RK3328_POST_PLL_FB_DIV_7_0(cfg->fbdiv));
+ if (cfg->postdiv == 1) {
+ inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS);
+ inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
+ RK3328_POST_PLL_PRE_DIV(cfg->prediv));
+ } else {
+ v = (cfg->postdiv / 2) - 1;
+ v &= RK3328_POST_PLL_POST_DIV_MASK;
+ inno_write(inno, 0xad, v);
+ inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
+ RK3328_POST_PLL_PRE_DIV(cfg->prediv));
+ inno_write(inno, 0xaa, RK3328_POST_PLL_POST_DIV_ENABLE |
+ RK3328_POST_PLL_REFCLK_SEL_TMDS);
+ }
+
+ for (v = 0; v < 14; v++)
+ inno_write(inno, 0xb5 + v, phy_cfg->regs[v]);
+
+ /* set ESD detection threshold for TMDS CLK, D2, D1 and D0 */
+ for (v = 0; v < 4; v++)
+ inno_update_bits(inno, 0xc8 + v, RK3328_ESD_DETECT_MASK,
+ RK3328_ESD_DETECT_340MV);
+
+ if (phy_cfg->tmdsclock > 340000000) {
+ /* Set termination resistor to 100ohm */
+ v = clk_get_rate(inno->sysclk) / 100000;
+ inno_write(inno, 0xc5, RK3328_TERM_RESISTOR_CALIB_SPEED_14_8(v)
+ | RK3328_BYPASS_TERM_RESISTOR_CALIB);
+ inno_write(inno, 0xc6, RK3328_TERM_RESISTOR_CALIB_SPEED_7_0(v));
+ inno_write(inno, 0xc7, RK3328_TERM_RESISTOR_100);
+ inno_update_bits(inno, 0xc5,
+ RK3328_BYPASS_TERM_RESISTOR_CALIB, 0);
+ } else {
+ inno_write(inno, 0xc5, RK3328_BYPASS_TERM_RESISTOR_CALIB);
+
+ /* clk termination resistor is 50ohm (parallel resistors) */
+ if (phy_cfg->tmdsclock > 165000000)
+ inno_update_bits(inno, 0xc8,
+ RK3328_TMDS_TERM_RESIST_MASK,
+ RK3328_TMDS_TERM_RESIST_75 |
+ RK3328_TMDS_TERM_RESIST_150);
+
+ /* data termination resistor for D2, D1 and D0 is 150ohm */
+ for (v = 0; v < 3; v++)
+ inno_update_bits(inno, 0xc9 + v,
+ RK3328_TMDS_TERM_RESIST_MASK,
+ RK3328_TMDS_TERM_RESIST_150);
+ }
+
+ inno_update_bits(inno, 0xaa, RK3328_POST_PLL_POWER_DOWN, 0);
+ inno_update_bits(inno, 0xb0, RK3328_BANDGAP_ENABLE,
+ RK3328_BANDGAP_ENABLE);
+ inno_update_bits(inno, 0xb2, RK3328_TMDS_DRIVER_ENABLE,
+ RK3328_TMDS_DRIVER_ENABLE);
+
+ /* Wait for post PLL lock */
+ ret = inno_poll(inno, 0xaf, v, v & RK3328_POST_PLL_LOCK_STATUS,
+ 1000, 10000);
+ if (ret) {
+ dev_err(inno->dev, "Post-PLL locking failed\n");
+ return ret;
+ }
+
+ if (phy_cfg->tmdsclock > 340000000)
+ msleep(100);
+
+ inno_update_bits(inno, 0x02, RK3328_PDATA_EN, RK3328_PDATA_EN);
+
+ /* Enable PHY IRQ */
+ inno_write(inno, 0x05, RK3328_INT_TMDS_CLK(RK3328_INT_VSS_AGND_ESD_DET)
+ | RK3328_INT_TMDS_D2(RK3328_INT_VSS_AGND_ESD_DET));
+ inno_write(inno, 0x07, RK3328_INT_TMDS_D1(RK3328_INT_VSS_AGND_ESD_DET)
+ | RK3328_INT_TMDS_D0(RK3328_INT_VSS_AGND_ESD_DET));
+ return 0;
+}
+
+static void inno_hdmi_phy_rk3328_power_off(struct inno_hdmi_phy *inno)
+{
+ inno_update_bits(inno, 0xb2, RK3328_TMDS_DRIVER_ENABLE, 0);
+ inno_update_bits(inno, 0xb0, RK3328_BANDGAP_ENABLE, 0);
+ inno_update_bits(inno, 0xaa, RK3328_POST_PLL_POWER_DOWN,
+ RK3328_POST_PLL_POWER_DOWN);
+
+ /* Disable PHY IRQ */
+ inno_write(inno, 0x05, 0);
+ inno_write(inno, 0x07, 0);
+}
+
+static const struct inno_hdmi_phy_ops rk3328_hdmi_phy_ops = {
+ .init = inno_hdmi_phy_rk3328_init,
+ .power_on = inno_hdmi_phy_rk3328_power_on,
+ .power_off = inno_hdmi_phy_rk3328_power_off,
+};
+
+static const struct inno_hdmi_phy_drv_data rk3228_hdmi_phy_drv_data = {
+ .ops = &rk3228_hdmi_phy_ops,
+ .clk_ops = &inno_hdmi_phy_rk3228_clk_ops,
+ .phy_cfg_table = rk3228_phy_cfg,
+};
+
+static const struct inno_hdmi_phy_drv_data rk3328_hdmi_phy_drv_data = {
+ .ops = &rk3328_hdmi_phy_ops,
+ .clk_ops = &inno_hdmi_phy_rk3328_clk_ops,
+ .phy_cfg_table = rk3328_phy_cfg,
+};
+
+static const struct regmap_config inno_hdmi_phy_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x400,
+};
+
+static void inno_hdmi_phy_action(void *data)
+{
+ struct inno_hdmi_phy *inno = data;
+
+ clk_disable_unprepare(inno->refpclk);
+ clk_disable_unprepare(inno->sysclk);
+}
+
+static int inno_hdmi_phy_probe(struct platform_device *pdev)
+{
+ struct inno_hdmi_phy *inno;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ inno = devm_kzalloc(&pdev->dev, sizeof(*inno), GFP_KERNEL);
+ if (!inno)
+ return -ENOMEM;
+
+ inno->dev = &pdev->dev;
+
+ inno->plat_data = of_device_get_match_data(inno->dev);
+ if (!inno->plat_data || !inno->plat_data->ops)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(inno->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ inno->sysclk = devm_clk_get(inno->dev, "sysclk");
+ if (IS_ERR(inno->sysclk)) {
+ ret = PTR_ERR(inno->sysclk);
+ dev_err(inno->dev, "failed to get sysclk: %d\n", ret);
+ return ret;
+ }
+
+ inno->refpclk = devm_clk_get(inno->dev, "refpclk");
+ if (IS_ERR(inno->refpclk)) {
+ ret = PTR_ERR(inno->refpclk);
+ dev_err(inno->dev, "failed to get ref clock: %d\n", ret);
+ return ret;
+ }
+
+ inno->refoclk = devm_clk_get(inno->dev, "refoclk");
+ if (IS_ERR(inno->refoclk)) {
+ ret = PTR_ERR(inno->refoclk);
+ dev_err(inno->dev, "failed to get oscillator-ref clock: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(inno->sysclk);
+ if (ret) {
+ dev_err(inno->dev, "Cannot enable inno phy sysclk: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Refpclk needs to be on, on at least the rk3328 for still
+ * unknown reasons.
+ */
+ ret = clk_prepare_enable(inno->refpclk);
+ if (ret) {
+ dev_err(inno->dev, "failed to enable refpclk\n");
+ clk_disable_unprepare(inno->sysclk);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(inno->dev, inno_hdmi_phy_action,
+ inno);
+ if (ret)
+ return ret;
+
+ inno->regmap = devm_regmap_init_mmio(inno->dev, regs,
+ &inno_hdmi_phy_regmap_config);
+ if (IS_ERR(inno->regmap))
+ return PTR_ERR(inno->regmap);
+
+ /* only the newer rk3328 hdmiphy has an interrupt */
+ inno->irq = platform_get_irq(pdev, 0);
+ if (inno->irq > 0) {
+ ret = devm_request_threaded_irq(inno->dev, inno->irq,
+ inno_hdmi_phy_rk3328_hardirq,
+ inno_hdmi_phy_rk3328_irq,
+ IRQF_SHARED,
+ dev_name(inno->dev), inno);
+ if (ret)
+ return ret;
+ }
+
+ inno->phy = devm_phy_create(inno->dev, NULL, &inno_hdmi_phy_ops);
+ if (IS_ERR(inno->phy)) {
+ dev_err(inno->dev, "failed to create HDMI PHY\n");
+ return PTR_ERR(inno->phy);
+ }
+
+ phy_set_drvdata(inno->phy, inno);
+ phy_set_bus_width(inno->phy, 8);
+
+ if (inno->plat_data->ops->init) {
+ ret = inno->plat_data->ops->init(inno);
+ if (ret)
+ return ret;
+ }
+
+ ret = inno_hdmi_phy_clk_register(inno);
+ if (ret)
+ return ret;
+
+ phy_provider = devm_of_phy_provider_register(inno->dev,
+ of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static int inno_hdmi_phy_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id inno_hdmi_phy_of_match[] = {
+ {
+ .compatible = "rockchip,rk3228-hdmi-phy",
+ .data = &rk3228_hdmi_phy_drv_data
+ }, {
+ .compatible = "rockchip,rk3328-hdmi-phy",
+ .data = &rk3328_hdmi_phy_drv_data
+ }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, inno_hdmi_phy_of_match);
+
+static struct platform_driver inno_hdmi_phy_driver = {
+ .probe = inno_hdmi_phy_probe,
+ .remove = inno_hdmi_phy_remove,
+ .driver = {
+ .name = "inno-hdmi-phy",
+ .of_match_table = inno_hdmi_phy_of_match,
+ },
+};
+module_platform_driver(inno_hdmi_phy_driver);
+
+MODULE_AUTHOR("Zheng Yang <zhengyang@rock-chips.com>");
+MODULE_DESCRIPTION("Innosilion HDMI 2.0 Transmitter PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 5049dac79bd0..24bd2717abdb 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -1116,8 +1116,8 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
}
if (of_property_read_u32(np, "reg", &reg)) {
- dev_err(dev, "the reg property is not assigned in %s node\n",
- np->name);
+ dev_err(dev, "the reg property is not assigned in %pOFn node\n",
+ np);
return -EINVAL;
}
@@ -1143,8 +1143,8 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
}
if (!rphy->phy_cfg) {
- dev_err(dev, "no phy-config can be matched with %s node\n",
- np->name);
+ dev_err(dev, "no phy-config can be matched with %pOFn node\n",
+ np);
return -EINVAL;
}
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 76a4b58ec771..c57e496f0b0c 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1145,8 +1145,8 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
}
if (!tcphy->port_cfgs) {
- dev_err(dev, "no phy-config can be matched with %s node\n",
- np->name);
+ dev_err(dev, "no phy-config can be matched with %pOFn node\n",
+ np);
return -EINVAL;
}
@@ -1186,8 +1186,8 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
continue;
if (IS_ERR(phy)) {
- dev_err(dev, "failed to create phy: %s\n",
- child_np->name);
+ dev_err(dev, "failed to create phy: %pOFn\n",
+ child_np);
pm_runtime_disable(dev);
return PTR_ERR(phy);
}
diff --git a/drivers/phy/rockchip/phy-rockchip-usb.c b/drivers/phy/rockchip/phy-rockchip-usb.c
index 3378eeb7a562..b2899c744ad9 100644
--- a/drivers/phy/rockchip/phy-rockchip-usb.c
+++ b/drivers/phy/rockchip/phy-rockchip-usb.c
@@ -36,7 +36,22 @@ static int enable_usb_uart;
#define HIWORD_UPDATE(val, mask) \
((val) | (mask) << 16)
-#define UOC_CON0_SIDDQ BIT(13)
+#define UOC_CON0 0x00
+#define UOC_CON0_SIDDQ BIT(13)
+#define UOC_CON0_DISABLE BIT(4)
+#define UOC_CON0_COMMON_ON_N BIT(0)
+
+#define UOC_CON2 0x08
+#define UOC_CON2_SOFT_CON_SEL BIT(2)
+
+#define UOC_CON3 0x0c
+/* bits present on rk3188 and rk3288 phys */
+#define UOC_CON3_UTMI_TERMSEL_FULLSPEED BIT(5)
+#define UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC (1 << 3)
+#define UOC_CON3_UTMI_XCVRSEELCT_MASK (3 << 3)
+#define UOC_CON3_UTMI_OPMODE_NODRIVING (1 << 1)
+#define UOC_CON3_UTMI_OPMODE_MASK (3 << 1)
+#define UOC_CON3_UTMI_SUSPENDN BIT(0)
struct rockchip_usb_phys {
int reg;
@@ -46,7 +61,8 @@ struct rockchip_usb_phys {
struct rockchip_usb_phy_base;
struct rockchip_usb_phy_pdata {
struct rockchip_usb_phys *phys;
- int (*init_usb_uart)(struct regmap *grf);
+ int (*init_usb_uart)(struct regmap *grf,
+ const struct rockchip_usb_phy_pdata *pdata);
int usb_uart_phy;
};
@@ -208,8 +224,8 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
rk_phy->np = child;
if (of_property_read_u32(child, "reg", &reg_offset)) {
- dev_err(base->dev, "missing reg property in node %s\n",
- child->name);
+ dev_err(base->dev, "missing reg property in node %pOFn\n",
+ child);
return -EINVAL;
}
@@ -313,28 +329,88 @@ static const struct rockchip_usb_phy_pdata rk3066a_pdata = {
},
};
+static int __init rockchip_init_usb_uart_common(struct regmap *grf,
+ const struct rockchip_usb_phy_pdata *pdata)
+{
+ int regoffs = pdata->phys[pdata->usb_uart_phy].reg;
+ int ret;
+ u32 val;
+
+ /*
+ * COMMON_ON and DISABLE settings are described in the TRM,
+ * but were not present in the original code.
+ * Also disable the analog phy components to save power.
+ */
+ val = HIWORD_UPDATE(UOC_CON0_COMMON_ON_N
+ | UOC_CON0_DISABLE
+ | UOC_CON0_SIDDQ,
+ UOC_CON0_COMMON_ON_N
+ | UOC_CON0_DISABLE
+ | UOC_CON0_SIDDQ);
+ ret = regmap_write(grf, regoffs + UOC_CON0, val);
+ if (ret)
+ return ret;
+
+ val = HIWORD_UPDATE(UOC_CON2_SOFT_CON_SEL,
+ UOC_CON2_SOFT_CON_SEL);
+ ret = regmap_write(grf, regoffs + UOC_CON2, val);
+ if (ret)
+ return ret;
+
+ val = HIWORD_UPDATE(UOC_CON3_UTMI_OPMODE_NODRIVING
+ | UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC
+ | UOC_CON3_UTMI_TERMSEL_FULLSPEED,
+ UOC_CON3_UTMI_SUSPENDN
+ | UOC_CON3_UTMI_OPMODE_MASK
+ | UOC_CON3_UTMI_XCVRSEELCT_MASK
+ | UOC_CON3_UTMI_TERMSEL_FULLSPEED);
+ ret = regmap_write(grf, UOC_CON3, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+#define RK3188_UOC0_CON0 0x10c
+#define RK3188_UOC0_CON0_BYPASSSEL BIT(9)
+#define RK3188_UOC0_CON0_BYPASSDMEN BIT(8)
+
+/*
+ * Enable the bypass of uart2 data through the otg usb phy.
+ * See description of rk3288-variant for details.
+ */
+static int __init rk3188_init_usb_uart(struct regmap *grf,
+ const struct rockchip_usb_phy_pdata *pdata)
+{
+ u32 val;
+ int ret;
+
+ ret = rockchip_init_usb_uart_common(grf, pdata);
+ if (ret)
+ return ret;
+
+ val = HIWORD_UPDATE(RK3188_UOC0_CON0_BYPASSSEL
+ | RK3188_UOC0_CON0_BYPASSDMEN,
+ RK3188_UOC0_CON0_BYPASSSEL
+ | RK3188_UOC0_CON0_BYPASSDMEN);
+ ret = regmap_write(grf, RK3188_UOC0_CON0, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct rockchip_usb_phy_pdata rk3188_pdata = {
.phys = (struct rockchip_usb_phys[]){
{ .reg = 0x10c, .pll_name = "sclk_otgphy0_480m" },
{ .reg = 0x11c, .pll_name = "sclk_otgphy1_480m" },
{ /* sentinel */ }
},
+ .init_usb_uart = rk3188_init_usb_uart,
+ .usb_uart_phy = 0,
};
-#define RK3288_UOC0_CON0 0x320
-#define RK3288_UOC0_CON0_COMMON_ON_N BIT(0)
-#define RK3288_UOC0_CON0_DISABLE BIT(4)
-
-#define RK3288_UOC0_CON2 0x328
-#define RK3288_UOC0_CON2_SOFT_CON_SEL BIT(2)
-
#define RK3288_UOC0_CON3 0x32c
-#define RK3288_UOC0_CON3_UTMI_SUSPENDN BIT(0)
-#define RK3288_UOC0_CON3_UTMI_OPMODE_NODRIVING (1 << 1)
-#define RK3288_UOC0_CON3_UTMI_OPMODE_MASK (3 << 1)
-#define RK3288_UOC0_CON3_UTMI_XCVRSEELCT_FSTRANSC (1 << 3)
-#define RK3288_UOC0_CON3_UTMI_XCVRSEELCT_MASK (3 << 3)
-#define RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED BIT(5)
#define RK3288_UOC0_CON3_BYPASSDMEN BIT(6)
#define RK3288_UOC0_CON3_BYPASSSEL BIT(7)
@@ -353,40 +429,13 @@ static const struct rockchip_usb_phy_pdata rk3188_pdata = {
*
* The actual code in the vendor kernel does some things differently.
*/
-static int __init rk3288_init_usb_uart(struct regmap *grf)
+static int __init rk3288_init_usb_uart(struct regmap *grf,
+ const struct rockchip_usb_phy_pdata *pdata)
{
u32 val;
int ret;
- /*
- * COMMON_ON and DISABLE settings are described in the TRM,
- * but were not present in the original code.
- * Also disable the analog phy components to save power.
- */
- val = HIWORD_UPDATE(RK3288_UOC0_CON0_COMMON_ON_N
- | RK3288_UOC0_CON0_DISABLE
- | UOC_CON0_SIDDQ,
- RK3288_UOC0_CON0_COMMON_ON_N
- | RK3288_UOC0_CON0_DISABLE
- | UOC_CON0_SIDDQ);
- ret = regmap_write(grf, RK3288_UOC0_CON0, val);
- if (ret)
- return ret;
-
- val = HIWORD_UPDATE(RK3288_UOC0_CON2_SOFT_CON_SEL,
- RK3288_UOC0_CON2_SOFT_CON_SEL);
- ret = regmap_write(grf, RK3288_UOC0_CON2, val);
- if (ret)
- return ret;
-
- val = HIWORD_UPDATE(RK3288_UOC0_CON3_UTMI_OPMODE_NODRIVING
- | RK3288_UOC0_CON3_UTMI_XCVRSEELCT_FSTRANSC
- | RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED,
- RK3288_UOC0_CON3_UTMI_SUSPENDN
- | RK3288_UOC0_CON3_UTMI_OPMODE_MASK
- | RK3288_UOC0_CON3_UTMI_XCVRSEELCT_MASK
- | RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED);
- ret = regmap_write(grf, RK3288_UOC0_CON3, val);
+ ret = rockchip_init_usb_uart_common(grf, pdata);
if (ret)
return ret;
@@ -516,7 +565,7 @@ static int __init rockchip_init_usb_uart(void)
return PTR_ERR(grf);
}
- ret = data->init_usb_uart(grf);
+ ret = data->init_usb_uart(grf, data);
if (ret) {
pr_err("%s: could not init usb_uart, %d\n", __func__, ret);
enable_usb_uart = 0;
diff --git a/drivers/phy/socionext/Kconfig b/drivers/phy/socionext/Kconfig
new file mode 100644
index 000000000000..467e8147972b
--- /dev/null
+++ b/drivers/phy/socionext/Kconfig
@@ -0,0 +1,34 @@
+#
+# PHY drivers for Socionext platforms.
+#
+
+config PHY_UNIPHIER_USB2
+ tristate "UniPhier USB2 PHY driver"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support USB PHY implemented on USB2 controller
+ on UniPhier SoCs. This driver provides interface to interact
+ with USB 2.0 PHY that is part of the UniPhier SoC.
+ In case of Pro4, it is necessary to specify this USB2 PHY instead
+ of USB3 HS-PHY.
+
+config PHY_UNIPHIER_USB3
+ tristate "UniPhier USB3 PHY driver"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Enable this to support USB PHY implemented in USB3 controller
+ on UniPhier SoCs. This controller supports USB3.0 and lower speed.
+
+config PHY_UNIPHIER_PCIE
+ tristate "Uniphier PHY driver for PCIe controller"
+ depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ default PCIE_UNIPHIER
+ select GENERIC_PHY
+ help
+ Enable this to support PHY implemented in PCIe controller
+ on UniPhier SoCs. This driver supports LD20 and PXs3 SoCs.
diff --git a/drivers/phy/socionext/Makefile b/drivers/phy/socionext/Makefile
new file mode 100644
index 000000000000..7dc9095b5bb7
--- /dev/null
+++ b/drivers/phy/socionext/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the phy drivers.
+#
+
+obj-$(CONFIG_PHY_UNIPHIER_USB2) += phy-uniphier-usb2.o
+obj-$(CONFIG_PHY_UNIPHIER_USB3) += phy-uniphier-usb3hs.o phy-uniphier-usb3ss.o
+obj-$(CONFIG_PHY_UNIPHIER_PCIE) += phy-uniphier-pcie.o
diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c
new file mode 100644
index 000000000000..93ffbd2940fa
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-pcie.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-pcie.c - PHY driver for UniPhier PCIe controller
+ * Copyright 2018, Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+
+/* PHY */
+#define PCL_PHY_TEST_I 0x2000
+#define PCL_PHY_TEST_O 0x2004
+#define TESTI_DAT_MASK GENMASK(13, 6)
+#define TESTI_ADR_MASK GENMASK(5, 1)
+#define TESTI_WR_EN BIT(0)
+
+#define PCL_PHY_RESET 0x200c
+#define PCL_PHY_RESET_N_MNMODE BIT(8) /* =1:manual */
+#define PCL_PHY_RESET_N BIT(0) /* =1:deasssert */
+
+/* SG */
+#define SG_USBPCIESEL 0x590
+#define SG_USBPCIESEL_PCIE BIT(0)
+
+#define PCL_PHY_R00 0
+#define RX_EQ_ADJ_EN BIT(3) /* enable for EQ adjustment */
+#define PCL_PHY_R06 6
+#define RX_EQ_ADJ GENMASK(5, 0) /* EQ adjustment value */
+#define RX_EQ_ADJ_VAL 0
+#define PCL_PHY_R26 26
+#define VCO_CTRL GENMASK(7, 4) /* Tx VCO adjustment value */
+#define VCO_CTRL_INIT_VAL 5
+
+struct uniphier_pciephy_priv {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+ struct reset_control *rst;
+ const struct uniphier_pciephy_soc_data *data;
+};
+
+struct uniphier_pciephy_soc_data {
+ bool has_syscon;
+};
+
+static void uniphier_pciephy_testio_write(struct uniphier_pciephy_priv *priv,
+ u32 data)
+{
+ /* need to read TESTO twice after accessing TESTI */
+ writel(data, priv->base + PCL_PHY_TEST_I);
+ readl(priv->base + PCL_PHY_TEST_O);
+ readl(priv->base + PCL_PHY_TEST_O);
+}
+
+static void uniphier_pciephy_set_param(struct uniphier_pciephy_priv *priv,
+ u32 reg, u32 mask, u32 param)
+{
+ u32 val;
+
+ /* read previous data */
+ val = FIELD_PREP(TESTI_DAT_MASK, 1);
+ val |= FIELD_PREP(TESTI_ADR_MASK, reg);
+ uniphier_pciephy_testio_write(priv, val);
+ val = readl(priv->base + PCL_PHY_TEST_O);
+
+ /* update value */
+ val &= ~FIELD_PREP(TESTI_DAT_MASK, mask);
+ val = FIELD_PREP(TESTI_DAT_MASK, mask & param);
+ val |= FIELD_PREP(TESTI_ADR_MASK, reg);
+ uniphier_pciephy_testio_write(priv, val);
+ uniphier_pciephy_testio_write(priv, val | TESTI_WR_EN);
+ uniphier_pciephy_testio_write(priv, val);
+
+ /* read current data as dummy */
+ val = FIELD_PREP(TESTI_DAT_MASK, 1);
+ val |= FIELD_PREP(TESTI_ADR_MASK, reg);
+ uniphier_pciephy_testio_write(priv, val);
+ readl(priv->base + PCL_PHY_TEST_O);
+}
+
+static void uniphier_pciephy_assert(struct uniphier_pciephy_priv *priv)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_PHY_RESET);
+ val &= ~PCL_PHY_RESET_N;
+ val |= PCL_PHY_RESET_N_MNMODE;
+ writel(val, priv->base + PCL_PHY_RESET);
+}
+
+static void uniphier_pciephy_deassert(struct uniphier_pciephy_priv *priv)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_PHY_RESET);
+ val |= PCL_PHY_RESET_N_MNMODE | PCL_PHY_RESET_N;
+ writel(val, priv->base + PCL_PHY_RESET);
+}
+
+static int uniphier_pciephy_init(struct phy *phy)
+{
+ struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ uniphier_pciephy_set_param(priv, PCL_PHY_R00,
+ RX_EQ_ADJ_EN, RX_EQ_ADJ_EN);
+ uniphier_pciephy_set_param(priv, PCL_PHY_R06, RX_EQ_ADJ,
+ FIELD_PREP(RX_EQ_ADJ, RX_EQ_ADJ_VAL));
+ uniphier_pciephy_set_param(priv, PCL_PHY_R26, VCO_CTRL,
+ FIELD_PREP(VCO_CTRL, VCO_CTRL_INIT_VAL));
+ usleep_range(1, 10);
+
+ uniphier_pciephy_deassert(priv);
+ usleep_range(1, 10);
+
+ return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static int uniphier_pciephy_exit(struct phy *phy)
+{
+ struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy);
+
+ uniphier_pciephy_assert(priv);
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct phy_ops uniphier_pciephy_ops = {
+ .init = uniphier_pciephy_init,
+ .exit = uniphier_pciephy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_pciephy_probe(struct platform_device *pdev)
+{
+ struct uniphier_pciephy_priv *priv;
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ struct resource *res;
+ struct phy *phy;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data))
+ return -EINVAL;
+
+ priv->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ phy = devm_phy_create(dev, dev->of_node, &uniphier_pciephy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "socionext,syscon");
+ if (!IS_ERR(regmap) && priv->data->has_syscon)
+ regmap_update_bits(regmap, SG_USBPCIESEL,
+ SG_USBPCIESEL_PCIE, SG_USBPCIESEL_PCIE);
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct uniphier_pciephy_soc_data uniphier_ld20_data = {
+ .has_syscon = true,
+};
+
+static const struct uniphier_pciephy_soc_data uniphier_pxs3_data = {
+ .has_syscon = false,
+};
+
+static const struct of_device_id uniphier_pciephy_match[] = {
+ {
+ .compatible = "socionext,uniphier-ld20-pcie-phy",
+ .data = &uniphier_ld20_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-pcie-phy",
+ .data = &uniphier_pxs3_data,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, uniphier_pciephy_match);
+
+static struct platform_driver uniphier_pciephy_driver = {
+ .probe = uniphier_pciephy_probe,
+ .driver = {
+ .name = "uniphier-pcie-phy",
+ .of_match_table = uniphier_pciephy_match,
+ },
+};
+module_platform_driver(uniphier_pciephy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PHY driver for PCIe controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/socionext/phy-uniphier-usb2.c b/drivers/phy/socionext/phy-uniphier-usb2.c
new file mode 100644
index 000000000000..3f2086ed4fe4
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-usb2.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-usb2.c - PHY driver for UniPhier USB2 controller
+ * Copyright 2015-2018 Socionext Inc.
+ * Author:
+ * Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define SG_USBPHY1CTRL 0x500
+#define SG_USBPHY1CTRL2 0x504
+#define SG_USBPHY2CTRL 0x508
+#define SG_USBPHY2CTRL2 0x50c /* LD11 */
+#define SG_USBPHY12PLL 0x50c /* Pro4 */
+#define SG_USBPHY3CTRL 0x510
+#define SG_USBPHY3CTRL2 0x514
+#define SG_USBPHY4CTRL 0x518 /* Pro4 */
+#define SG_USBPHY4CTRL2 0x51c /* Pro4 */
+#define SG_USBPHY34PLL 0x51c /* Pro4 */
+
+struct uniphier_u2phy_param {
+ u32 offset;
+ u32 value;
+};
+
+struct uniphier_u2phy_soc_data {
+ struct uniphier_u2phy_param config0;
+ struct uniphier_u2phy_param config1;
+};
+
+struct uniphier_u2phy_priv {
+ struct regmap *regmap;
+ struct phy *phy;
+ struct regulator *vbus;
+ const struct uniphier_u2phy_soc_data *data;
+ struct uniphier_u2phy_priv *next;
+};
+
+static int uniphier_u2phy_power_on(struct phy *phy)
+{
+ struct uniphier_u2phy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ if (priv->vbus)
+ ret = regulator_enable(priv->vbus);
+
+ return ret;
+}
+
+static int uniphier_u2phy_power_off(struct phy *phy)
+{
+ struct uniphier_u2phy_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
+ return 0;
+}
+
+static int uniphier_u2phy_init(struct phy *phy)
+{
+ struct uniphier_u2phy_priv *priv = phy_get_drvdata(phy);
+
+ if (!priv->data)
+ return 0;
+
+ regmap_write(priv->regmap, priv->data->config0.offset,
+ priv->data->config0.value);
+ regmap_write(priv->regmap, priv->data->config1.offset,
+ priv->data->config1.value);
+
+ return 0;
+}
+
+static struct phy *uniphier_u2phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct uniphier_u2phy_priv *priv = dev_get_drvdata(dev);
+
+ while (priv && args->np != priv->phy->dev.of_node)
+ priv = priv->next;
+
+ if (!priv) {
+ dev_err(dev, "Failed to find appropriate phy\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return priv->phy;
+}
+
+static const struct phy_ops uniphier_u2phy_ops = {
+ .init = uniphier_u2phy_init,
+ .power_on = uniphier_u2phy_power_on,
+ .power_off = uniphier_u2phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_u2phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent, *child;
+ struct uniphier_u2phy_priv *priv = NULL, *next = NULL;
+ struct phy_provider *phy_provider;
+ struct regmap *regmap;
+ const struct uniphier_u2phy_soc_data *data;
+ int ret, data_idx, ndatas;
+
+ data = of_device_get_match_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ /* get number of data */
+ for (ndatas = 0; data[ndatas].config0.offset; ndatas++)
+ ;
+
+ parent = of_get_parent(dev->of_node);
+ regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to get regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ for_each_child_of_node(dev->of_node, child) {
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_put_child;
+ }
+ priv->regmap = regmap;
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(priv->vbus)) {
+ if (PTR_ERR(priv->vbus) == -EPROBE_DEFER) {
+ ret = PTR_ERR(priv->vbus);
+ goto out_put_child;
+ }
+ priv->vbus = NULL;
+ }
+
+ priv->phy = devm_phy_create(dev, child, &uniphier_u2phy_ops);
+ if (IS_ERR(priv->phy)) {
+ dev_err(dev, "Failed to create phy\n");
+ ret = PTR_ERR(priv->phy);
+ goto out_put_child;
+ }
+
+ ret = of_property_read_u32(child, "reg", &data_idx);
+ if (ret) {
+ dev_err(dev, "Failed to get reg property\n");
+ goto out_put_child;
+ }
+
+ if (data_idx < ndatas)
+ priv->data = &data[data_idx];
+ else
+ dev_warn(dev, "No phy configuration: %s\n",
+ child->full_name);
+
+ phy_set_drvdata(priv->phy, priv);
+ priv->next = next;
+ next = priv;
+ }
+
+ dev_set_drvdata(dev, priv);
+ phy_provider = devm_of_phy_provider_register(dev,
+ uniphier_u2phy_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+out_put_child:
+ of_node_put(child);
+
+ return ret;
+}
+
+static const struct uniphier_u2phy_soc_data uniphier_pro4_data[] = {
+ {
+ .config0 = { SG_USBPHY1CTRL, 0x05142400 },
+ .config1 = { SG_USBPHY12PLL, 0x00010010 },
+ },
+ {
+ .config0 = { SG_USBPHY2CTRL, 0x05142400 },
+ .config1 = { SG_USBPHY12PLL, 0x00010010 },
+ },
+ {
+ .config0 = { SG_USBPHY3CTRL, 0x05142400 },
+ .config1 = { SG_USBPHY34PLL, 0x00010010 },
+ },
+ {
+ .config0 = { SG_USBPHY4CTRL, 0x05142400 },
+ .config1 = { SG_USBPHY34PLL, 0x00010010 },
+ },
+ { /* sentinel */ }
+};
+
+static const struct uniphier_u2phy_soc_data uniphier_ld11_data[] = {
+ {
+ .config0 = { SG_USBPHY1CTRL, 0x82280000 },
+ .config1 = { SG_USBPHY1CTRL2, 0x00000106 },
+ },
+ {
+ .config0 = { SG_USBPHY2CTRL, 0x82280000 },
+ .config1 = { SG_USBPHY2CTRL2, 0x00000106 },
+ },
+ {
+ .config0 = { SG_USBPHY3CTRL, 0x82280000 },
+ .config1 = { SG_USBPHY3CTRL2, 0x00000106 },
+ },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id uniphier_u2phy_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro4-usb2-phy",
+ .data = &uniphier_pro4_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-usb2-phy",
+ .data = &uniphier_ld11_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_u2phy_match);
+
+static struct platform_driver uniphier_u2phy_driver = {
+ .probe = uniphier_u2phy_probe,
+ .driver = {
+ .name = "uniphier-usb2-phy",
+ .of_match_table = uniphier_u2phy_match,
+ },
+};
+module_platform_driver(uniphier_u2phy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PHY driver for USB2 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/socionext/phy-uniphier-usb3hs.c b/drivers/phy/socionext/phy-uniphier-usb3hs.c
new file mode 100644
index 000000000000..b1b048be6166
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-usb3hs.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-usb3hs.c - HS-PHY driver for Socionext UniPhier USB3 controller
+ * Copyright 2015-2018 Socionext Inc.
+ * Author:
+ * Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ * Contributors:
+ * Motoya Tanigawa <tanigawa.motoya@socionext.com>
+ * Masami Hiramatsu <masami.hiramatsu@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define HSPHY_CFG0 0x0
+#define HSPHY_CFG0_HS_I_MASK GENMASK(31, 28)
+#define HSPHY_CFG0_HSDISC_MASK GENMASK(27, 26)
+#define HSPHY_CFG0_SWING_MASK GENMASK(17, 16)
+#define HSPHY_CFG0_SEL_T_MASK GENMASK(15, 12)
+#define HSPHY_CFG0_RTERM_MASK GENMASK(7, 6)
+#define HSPHY_CFG0_TRIMMASK (HSPHY_CFG0_HS_I_MASK \
+ | HSPHY_CFG0_SEL_T_MASK \
+ | HSPHY_CFG0_RTERM_MASK)
+
+#define HSPHY_CFG1 0x4
+#define HSPHY_CFG1_DAT_EN BIT(29)
+#define HSPHY_CFG1_ADR_EN BIT(28)
+#define HSPHY_CFG1_ADR_MASK GENMASK(27, 16)
+#define HSPHY_CFG1_DAT_MASK GENMASK(23, 16)
+
+#define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) }
+
+#define LS_SLEW PHY_F(10, 6, 6) /* LS mode slew rate */
+#define FS_LS_DRV PHY_F(10, 5, 5) /* FS/LS slew rate */
+
+#define MAX_PHY_PARAMS 2
+
+struct uniphier_u3hsphy_param {
+ struct {
+ int reg_no;
+ int msb;
+ int lsb;
+ } field;
+ u8 value;
+};
+
+struct uniphier_u3hsphy_trim_param {
+ unsigned int rterm;
+ unsigned int sel_t;
+ unsigned int hs_i;
+};
+
+#define trim_param_is_valid(p) ((p)->rterm || (p)->sel_t || (p)->hs_i)
+
+struct uniphier_u3hsphy_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk, *clk_parent, *clk_ext;
+ struct reset_control *rst, *rst_parent;
+ struct regulator *vbus;
+ const struct uniphier_u3hsphy_soc_data *data;
+};
+
+struct uniphier_u3hsphy_soc_data {
+ int nparams;
+ const struct uniphier_u3hsphy_param param[MAX_PHY_PARAMS];
+ u32 config0;
+ u32 config1;
+ void (*trim_func)(struct uniphier_u3hsphy_priv *priv, u32 *pconfig,
+ struct uniphier_u3hsphy_trim_param *pt);
+};
+
+static void uniphier_u3hsphy_trim_ld20(struct uniphier_u3hsphy_priv *priv,
+ u32 *pconfig,
+ struct uniphier_u3hsphy_trim_param *pt)
+{
+ *pconfig &= ~HSPHY_CFG0_RTERM_MASK;
+ *pconfig |= FIELD_PREP(HSPHY_CFG0_RTERM_MASK, pt->rterm);
+
+ *pconfig &= ~HSPHY_CFG0_SEL_T_MASK;
+ *pconfig |= FIELD_PREP(HSPHY_CFG0_SEL_T_MASK, pt->sel_t);
+
+ *pconfig &= ~HSPHY_CFG0_HS_I_MASK;
+ *pconfig |= FIELD_PREP(HSPHY_CFG0_HS_I_MASK, pt->hs_i);
+}
+
+static int uniphier_u3hsphy_get_nvparam(struct uniphier_u3hsphy_priv *priv,
+ const char *name, unsigned int *val)
+{
+ struct nvmem_cell *cell;
+ u8 *buf;
+
+ cell = devm_nvmem_cell_get(priv->dev, name);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, NULL);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ *val = *buf;
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int uniphier_u3hsphy_get_nvparams(struct uniphier_u3hsphy_priv *priv,
+ struct uniphier_u3hsphy_trim_param *pt)
+{
+ int ret;
+
+ ret = uniphier_u3hsphy_get_nvparam(priv, "rterm", &pt->rterm);
+ if (ret)
+ return ret;
+
+ ret = uniphier_u3hsphy_get_nvparam(priv, "sel_t", &pt->sel_t);
+ if (ret)
+ return ret;
+
+ ret = uniphier_u3hsphy_get_nvparam(priv, "hs_i", &pt->hs_i);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int uniphier_u3hsphy_update_config(struct uniphier_u3hsphy_priv *priv,
+ u32 *pconfig)
+{
+ struct uniphier_u3hsphy_trim_param trim;
+ int ret, trimmed = 0;
+
+ if (priv->data->trim_func) {
+ ret = uniphier_u3hsphy_get_nvparams(priv, &trim);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ /*
+ * call trim_func only when trimming parameters that aren't
+ * all-zero can be acquired. All-zero parameters mean nothing
+ * has been written to nvmem.
+ */
+ if (!ret && trim_param_is_valid(&trim)) {
+ priv->data->trim_func(priv, pconfig, &trim);
+ trimmed = 1;
+ } else {
+ dev_dbg(priv->dev, "can't get parameter from nvmem\n");
+ }
+ }
+
+ /* use default parameters without trimming values */
+ if (!trimmed) {
+ *pconfig &= ~HSPHY_CFG0_HSDISC_MASK;
+ *pconfig |= FIELD_PREP(HSPHY_CFG0_HSDISC_MASK, 3);
+ }
+
+ return 0;
+}
+
+static void uniphier_u3hsphy_set_param(struct uniphier_u3hsphy_priv *priv,
+ const struct uniphier_u3hsphy_param *p)
+{
+ u32 val;
+ u32 field_mask = GENMASK(p->field.msb, p->field.lsb);
+ u8 data;
+
+ val = readl(priv->base + HSPHY_CFG1);
+ val &= ~HSPHY_CFG1_ADR_MASK;
+ val |= FIELD_PREP(HSPHY_CFG1_ADR_MASK, p->field.reg_no)
+ | HSPHY_CFG1_ADR_EN;
+ writel(val, priv->base + HSPHY_CFG1);
+
+ val = readl(priv->base + HSPHY_CFG1);
+ val &= ~HSPHY_CFG1_ADR_EN;
+ writel(val, priv->base + HSPHY_CFG1);
+
+ val = readl(priv->base + HSPHY_CFG1);
+ val &= ~FIELD_PREP(HSPHY_CFG1_DAT_MASK, field_mask);
+ data = field_mask & (p->value << p->field.lsb);
+ val |= FIELD_PREP(HSPHY_CFG1_DAT_MASK, data) | HSPHY_CFG1_DAT_EN;
+ writel(val, priv->base + HSPHY_CFG1);
+
+ val = readl(priv->base + HSPHY_CFG1);
+ val &= ~HSPHY_CFG1_DAT_EN;
+ writel(val, priv->base + HSPHY_CFG1);
+}
+
+static int uniphier_u3hsphy_power_on(struct phy *phy)
+{
+ struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk_ext);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto out_clk_ext_disable;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ if (priv->vbus) {
+ ret = regulator_enable(priv->vbus);
+ if (ret)
+ goto out_rst_assert;
+ }
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+out_clk_ext_disable:
+ clk_disable_unprepare(priv->clk_ext);
+
+ return ret;
+}
+
+static int uniphier_u3hsphy_power_off(struct phy *phy)
+{
+ struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_ext);
+
+ return 0;
+}
+
+static int uniphier_u3hsphy_init(struct phy *phy)
+{
+ struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+ u32 config0, config1;
+ int i, ret;
+
+ ret = clk_prepare_enable(priv->clk_parent);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(priv->rst_parent);
+ if (ret)
+ goto out_clk_disable;
+
+ if (!priv->data->config0 && !priv->data->config1)
+ return 0;
+
+ config0 = priv->data->config0;
+ config1 = priv->data->config1;
+
+ ret = uniphier_u3hsphy_update_config(priv, &config0);
+ if (ret)
+ goto out_rst_assert;
+
+ writel(config0, priv->base + HSPHY_CFG0);
+ writel(config1, priv->base + HSPHY_CFG1);
+
+ for (i = 0; i < priv->data->nparams; i++)
+ uniphier_u3hsphy_set_param(priv, &priv->data->param[i]);
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst_parent);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk_parent);
+
+ return ret;
+}
+
+static int uniphier_u3hsphy_exit(struct phy *phy)
+{
+ struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+
+ reset_control_assert(priv->rst_parent);
+ clk_disable_unprepare(priv->clk_parent);
+
+ return 0;
+}
+
+static const struct phy_ops uniphier_u3hsphy_ops = {
+ .init = uniphier_u3hsphy_init,
+ .exit = uniphier_u3hsphy_exit,
+ .power_on = uniphier_u3hsphy_power_on,
+ .power_off = uniphier_u3hsphy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_u3hsphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_u3hsphy_priv *priv;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct phy *phy;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data ||
+ priv->data->nparams > MAX_PHY_PARAMS))
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->clk_parent = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk_parent))
+ return PTR_ERR(priv->clk_parent);
+
+ priv->clk_ext = devm_clk_get(dev, "phy-ext");
+ if (IS_ERR(priv->clk_ext)) {
+ if (PTR_ERR(priv->clk_ext) == -ENOENT)
+ priv->clk_ext = NULL;
+ else
+ return PTR_ERR(priv->clk_ext);
+ }
+
+ priv->rst = devm_reset_control_get_shared(dev, "phy");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ priv->rst_parent = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst_parent))
+ return PTR_ERR(priv->rst_parent);
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(priv->vbus)) {
+ if (PTR_ERR(priv->vbus) == -EPROBE_DEFER)
+ return PTR_ERR(priv->vbus);
+ priv->vbus = NULL;
+ }
+
+ phy = devm_phy_create(dev, dev->of_node, &uniphier_u3hsphy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct uniphier_u3hsphy_soc_data uniphier_pxs2_data = {
+ .nparams = 0,
+};
+
+static const struct uniphier_u3hsphy_soc_data uniphier_ld20_data = {
+ .nparams = 2,
+ .param = {
+ { LS_SLEW, 1 },
+ { FS_LS_DRV, 1 },
+ },
+ .trim_func = uniphier_u3hsphy_trim_ld20,
+ .config0 = 0x92316680,
+ .config1 = 0x00000106,
+};
+
+static const struct uniphier_u3hsphy_soc_data uniphier_pxs3_data = {
+ .nparams = 0,
+ .trim_func = uniphier_u3hsphy_trim_ld20,
+ .config0 = 0x92316680,
+ .config1 = 0x00000106,
+};
+
+static const struct of_device_id uniphier_u3hsphy_match[] = {
+ {
+ .compatible = "socionext,uniphier-pxs2-usb3-hsphy",
+ .data = &uniphier_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-usb3-hsphy",
+ .data = &uniphier_ld20_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-usb3-hsphy",
+ .data = &uniphier_pxs3_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_u3hsphy_match);
+
+static struct platform_driver uniphier_u3hsphy_driver = {
+ .probe = uniphier_u3hsphy_probe,
+ .driver = {
+ .name = "uniphier-usb3-hsphy",
+ .of_match_table = uniphier_u3hsphy_match,
+ },
+};
+
+module_platform_driver(uniphier_u3hsphy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier HS-PHY driver for USB3 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c
new file mode 100644
index 000000000000..4be95679c7d8
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-usb3ss.c - SS-PHY driver for Socionext UniPhier USB3 controller
+ * Copyright 2015-2018 Socionext Inc.
+ * Author:
+ * Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ * Contributors:
+ * Motoya Tanigawa <tanigawa.motoya@socionext.com>
+ * Masami Hiramatsu <masami.hiramatsu@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#define SSPHY_TESTI 0x0
+#define SSPHY_TESTO 0x4
+#define TESTI_DAT_MASK GENMASK(13, 6)
+#define TESTI_ADR_MASK GENMASK(5, 1)
+#define TESTI_WR_EN BIT(0)
+
+#define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) }
+
+#define CDR_CPD_TRIM PHY_F(7, 3, 0) /* RxPLL charge pump current */
+#define CDR_CPF_TRIM PHY_F(8, 3, 0) /* RxPLL charge pump current 2 */
+#define TX_PLL_TRIM PHY_F(9, 3, 0) /* TxPLL charge pump current */
+#define BGAP_TRIM PHY_F(11, 3, 0) /* Bandgap voltage */
+#define CDR_TRIM PHY_F(13, 6, 5) /* Clock Data Recovery setting */
+#define VCO_CTRL PHY_F(26, 7, 4) /* VCO control */
+#define VCOPLL_CTRL PHY_F(27, 2, 0) /* TxPLL VCO tuning */
+#define VCOPLL_CM PHY_F(28, 1, 0) /* TxPLL voltage */
+
+#define MAX_PHY_PARAMS 7
+
+struct uniphier_u3ssphy_param {
+ struct {
+ int reg_no;
+ int msb;
+ int lsb;
+ } field;
+ u8 value;
+};
+
+struct uniphier_u3ssphy_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk, *clk_ext, *clk_parent, *clk_parent_gio;
+ struct reset_control *rst, *rst_parent, *rst_parent_gio;
+ struct regulator *vbus;
+ const struct uniphier_u3ssphy_soc_data *data;
+};
+
+struct uniphier_u3ssphy_soc_data {
+ bool is_legacy;
+ int nparams;
+ const struct uniphier_u3ssphy_param param[MAX_PHY_PARAMS];
+};
+
+static void uniphier_u3ssphy_testio_write(struct uniphier_u3ssphy_priv *priv,
+ u32 data)
+{
+ /* need to read TESTO twice after accessing TESTI */
+ writel(data, priv->base + SSPHY_TESTI);
+ readl(priv->base + SSPHY_TESTO);
+ readl(priv->base + SSPHY_TESTO);
+}
+
+static void uniphier_u3ssphy_set_param(struct uniphier_u3ssphy_priv *priv,
+ const struct uniphier_u3ssphy_param *p)
+{
+ u32 val;
+ u8 field_mask = GENMASK(p->field.msb, p->field.lsb);
+ u8 data;
+
+ /* read previous data */
+ val = FIELD_PREP(TESTI_DAT_MASK, 1);
+ val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
+ uniphier_u3ssphy_testio_write(priv, val);
+ val = readl(priv->base + SSPHY_TESTO);
+
+ /* update value */
+ val &= ~FIELD_PREP(TESTI_DAT_MASK, field_mask);
+ data = field_mask & (p->value << p->field.lsb);
+ val = FIELD_PREP(TESTI_DAT_MASK, data);
+ val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
+ uniphier_u3ssphy_testio_write(priv, val);
+ uniphier_u3ssphy_testio_write(priv, val | TESTI_WR_EN);
+ uniphier_u3ssphy_testio_write(priv, val);
+
+ /* read current data as dummy */
+ val = FIELD_PREP(TESTI_DAT_MASK, 1);
+ val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
+ uniphier_u3ssphy_testio_write(priv, val);
+ readl(priv->base + SSPHY_TESTO);
+}
+
+static int uniphier_u3ssphy_power_on(struct phy *phy)
+{
+ struct uniphier_u3ssphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk_ext);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto out_clk_ext_disable;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ if (priv->vbus) {
+ ret = regulator_enable(priv->vbus);
+ if (ret)
+ goto out_rst_assert;
+ }
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+out_clk_ext_disable:
+ clk_disable_unprepare(priv->clk_ext);
+
+ return ret;
+}
+
+static int uniphier_u3ssphy_power_off(struct phy *phy)
+{
+ struct uniphier_u3ssphy_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_ext);
+
+ return 0;
+}
+
+static int uniphier_u3ssphy_init(struct phy *phy)
+{
+ struct uniphier_u3ssphy_priv *priv = phy_get_drvdata(phy);
+ int i, ret;
+
+ ret = clk_prepare_enable(priv->clk_parent);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk_parent_gio);
+ if (ret)
+ goto out_clk_disable;
+
+ ret = reset_control_deassert(priv->rst_parent);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_parent_gio);
+ if (ret)
+ goto out_rst_assert;
+
+ if (priv->data->is_legacy)
+ return 0;
+
+ for (i = 0; i < priv->data->nparams; i++)
+ uniphier_u3ssphy_set_param(priv, &priv->data->param[i]);
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst_parent);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_parent_gio);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk_parent);
+
+ return ret;
+}
+
+static int uniphier_u3ssphy_exit(struct phy *phy)
+{
+ struct uniphier_u3ssphy_priv *priv = phy_get_drvdata(phy);
+
+ reset_control_assert(priv->rst_parent_gio);
+ reset_control_assert(priv->rst_parent);
+ clk_disable_unprepare(priv->clk_parent_gio);
+ clk_disable_unprepare(priv->clk_parent);
+
+ return 0;
+}
+
+static const struct phy_ops uniphier_u3ssphy_ops = {
+ .init = uniphier_u3ssphy_init,
+ .exit = uniphier_u3ssphy_exit,
+ .power_on = uniphier_u3ssphy_power_on,
+ .power_off = uniphier_u3ssphy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_u3ssphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_u3ssphy_priv *priv;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct phy *phy;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data ||
+ priv->data->nparams > MAX_PHY_PARAMS))
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ if (!priv->data->is_legacy) {
+ priv->clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->clk_ext = devm_clk_get(dev, "phy-ext");
+ if (IS_ERR(priv->clk_ext)) {
+ if (PTR_ERR(priv->clk_ext) == -ENOENT)
+ priv->clk_ext = NULL;
+ else
+ return PTR_ERR(priv->clk_ext);
+ }
+
+ priv->rst = devm_reset_control_get_shared(dev, "phy");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+ } else {
+ priv->clk_parent_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_parent_gio))
+ return PTR_ERR(priv->clk_parent_gio);
+
+ priv->rst_parent_gio =
+ devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_parent_gio))
+ return PTR_ERR(priv->rst_parent_gio);
+ }
+
+ priv->clk_parent = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk_parent))
+ return PTR_ERR(priv->clk_parent);
+
+ priv->rst_parent = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst_parent))
+ return PTR_ERR(priv->rst_parent);
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(priv->vbus)) {
+ if (PTR_ERR(priv->vbus) == -EPROBE_DEFER)
+ return PTR_ERR(priv->vbus);
+ priv->vbus = NULL;
+ }
+
+ phy = devm_phy_create(dev, dev->of_node, &uniphier_u3ssphy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct uniphier_u3ssphy_soc_data uniphier_pro4_data = {
+ .is_legacy = true,
+};
+
+static const struct uniphier_u3ssphy_soc_data uniphier_pxs2_data = {
+ .is_legacy = false,
+ .nparams = 7,
+ .param = {
+ { CDR_CPD_TRIM, 10 },
+ { CDR_CPF_TRIM, 3 },
+ { TX_PLL_TRIM, 5 },
+ { BGAP_TRIM, 9 },
+ { CDR_TRIM, 2 },
+ { VCOPLL_CTRL, 7 },
+ { VCOPLL_CM, 1 },
+ },
+};
+
+static const struct uniphier_u3ssphy_soc_data uniphier_ld20_data = {
+ .is_legacy = false,
+ .nparams = 3,
+ .param = {
+ { CDR_CPD_TRIM, 6 },
+ { CDR_TRIM, 2 },
+ { VCO_CTRL, 5 },
+ },
+};
+
+static const struct of_device_id uniphier_u3ssphy_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro4-usb3-ssphy",
+ .data = &uniphier_pro4_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-usb3-ssphy",
+ .data = &uniphier_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-usb3-ssphy",
+ .data = &uniphier_ld20_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-usb3-ssphy",
+ .data = &uniphier_ld20_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_u3ssphy_match);
+
+static struct platform_driver uniphier_u3ssphy_driver = {
+ .probe = uniphier_u3ssphy_probe,
+ .driver = {
+ .name = "uniphier-usb3-ssphy",
+ .of_match_table = uniphier_u3ssphy_match,
+ },
+};
+
+module_platform_driver(uniphier_u3ssphy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier SS-PHY driver for USB3 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index de1b4ebe4de2..5b3b8863363e 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -115,8 +115,8 @@ int tegra_xusb_lane_parse_dt(struct tegra_xusb_lane *lane,
err = match_string(lane->soc->funcs, lane->soc->num_funcs, function);
if (err < 0) {
- dev_err(dev, "invalid function \"%s\" for lane \"%s\"\n",
- function, np->name);
+ dev_err(dev, "invalid function \"%s\" for lane \"%pOFn\"\n",
+ function, np);
return err;
}
diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
index a44680d64f9b..c267afb68f07 100644
--- a/drivers/phy/ti/phy-twl4030-usb.c
+++ b/drivers/phy/ti/phy-twl4030-usb.c
@@ -144,6 +144,7 @@
#define PMBR1 0x0D
#define GPIO_USB_4PIN_ULPI_2430C (3 << 0)
+static irqreturn_t twl4030_usb_irq(int irq, void *_twl);
/*
* If VBUS is valid or ID is ground, then we know a
* cable is present and we need to be runtime-enabled
@@ -395,6 +396,33 @@ static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
}
+static int __maybe_unused twl4030_usb_suspend(struct device *dev)
+{
+ struct twl4030_usb *twl = dev_get_drvdata(dev);
+
+ /*
+ * we need enabled runtime on resume,
+ * so turn irq off here, so we do not get it early
+ * note: wakeup on usb plug works independently of this
+ */
+ dev_dbg(twl->dev, "%s\n", __func__);
+ disable_irq(twl->irq);
+
+ return 0;
+}
+
+static int __maybe_unused twl4030_usb_resume(struct device *dev)
+{
+ struct twl4030_usb *twl = dev_get_drvdata(dev);
+
+ dev_dbg(twl->dev, "%s\n", __func__);
+ enable_irq(twl->irq);
+ /* check whether cable status changed */
+ twl4030_usb_irq(0, twl);
+
+ return 0;
+}
+
static int __maybe_unused twl4030_usb_runtime_suspend(struct device *dev)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
@@ -655,6 +683,7 @@ static const struct phy_ops ops = {
static const struct dev_pm_ops twl4030_usb_pm_ops = {
SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend,
twl4030_usb_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(twl4030_usb_suspend, twl4030_usb_resume)
};
static int twl4030_usb_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index e86752be1f19..4d8c00eac742 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -195,6 +195,16 @@ config PINCTRL_RZA1
help
This selects pinctrl driver for Renesas RZ/A1 platforms.
+config PINCTRL_RZN1
+ bool "Renesas RZ/N1 pinctrl driver"
+ depends on OF
+ depends on ARCH_RZN1 || COMPILE_TEST
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ help
+ This selects pinctrl driver for Renesas RZ/N1 devices.
+
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
depends on OF
@@ -309,12 +319,14 @@ config PINCTRL_ZYNQ
config PINCTRL_INGENIC
bool "Pinctrl driver for the Ingenic JZ47xx SoCs"
- default y
+ default MACH_INGENIC
depends on OF
- depends on MACH_INGENIC || COMPILE_TEST
+ depends on MIPS || COMPILE_TEST
select GENERIC_PINCONF
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
select REGMAP_MMIO
config PINCTRL_RK805
@@ -346,6 +358,7 @@ source "drivers/pinctrl/freescale/Kconfig"
source "drivers/pinctrl/intel/Kconfig"
source "drivers/pinctrl/mvebu/Kconfig"
source "drivers/pinctrl/nomadik/Kconfig"
+source "drivers/pinctrl/nuvoton/Kconfig"
source "drivers/pinctrl/pxa/Kconfig"
source "drivers/pinctrl/qcom/Kconfig"
source "drivers/pinctrl/samsung/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 46ef9bd52096..18a13c1e2c21 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o
obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o
+obj-$(CONFIG_PINCTRL_RZN1) += pinctrl-rzn1.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_SIRF) += sirf/
obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
@@ -51,6 +52,7 @@ obj-y += freescale/
obj-$(CONFIG_X86) += intel/
obj-y += mvebu/
obj-y += nomadik/
+obj-$(CONFIG_ARCH_NPCM7XX) += nuvoton/
obj-$(CONFIG_PINCTRL_PXA) += pxa/
obj-$(CONFIG_ARCH_QCOM) += qcom/
obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index aefe3c33dffd..eb87ab774269 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -715,7 +715,7 @@ int aspeed_pin_config_set(struct pinctrl_dev *pctldev, unsigned int offset,
pmap = find_pinconf_map(param, MAP_TYPE_ARG, arg);
- if (unlikely(WARN_ON(!pmap)))
+ if (WARN_ON(!pmap))
return -EINVAL;
val = pmap->val << pconf->bit;
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 0f38d51f47c6..c8575399d6f7 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -73,6 +73,19 @@ config PINCTRL_CYGNUS_MUX
configuration, with the exception that certain individual pins
can be overridden to GPIO function
+config PINCTRL_NS
+ bool "Broadcom Northstar pins driver"
+ depends on OF && (ARCH_BCM_5301X || COMPILE_TEST)
+ select PINMUX
+ select GENERIC_PINCONF
+ default ARCH_BCM_5301X
+ help
+ Say yes here to enable the Broadcom NS SoC pins driver.
+
+ The Broadcom Northstar pins driver supports muxing multi-purpose pins
+ that can be used for various functions (e.g. SPI, I2C, UART) as well
+ as GPIOs.
+
config PINCTRL_NSP_GPIO
bool "Broadcom NSP GPIO (with PINCONF) driver"
depends on OF_GPIO && (ARCH_BCM_NSP || COMPILE_TEST)
diff --git a/drivers/pinctrl/bcm/Makefile b/drivers/pinctrl/bcm/Makefile
index 80ceb9dae944..79d5e49fdd9a 100644
--- a/drivers/pinctrl/bcm/Makefile
+++ b/drivers/pinctrl/bcm/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
obj-$(CONFIG_PINCTRL_IPROC_GPIO) += pinctrl-iproc-gpio.o
obj-$(CONFIG_PINCTRL_CYGNUS_MUX) += pinctrl-cygnus-mux.o
+obj-$(CONFIG_PINCTRL_NS) += pinctrl-ns.o
obj-$(CONFIG_PINCTRL_NSP_GPIO) += pinctrl-nsp-gpio.o
obj-$(CONFIG_PINCTRL_NS2_MUX) += pinctrl-ns2-mux.o
obj-$(CONFIG_PINCTRL_NSP_MUX) += pinctrl-nsp-mux.o
diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
new file mode 100644
index 000000000000..d7f8175d2c1c
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-ns.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define FLAG_BCM4708 BIT(1)
+#define FLAG_BCM4709 BIT(2)
+#define FLAG_BCM53012 BIT(3)
+
+struct ns_pinctrl {
+ struct device *dev;
+ unsigned int chipset_flag;
+ struct pinctrl_dev *pctldev;
+ void __iomem *base;
+
+ struct pinctrl_desc pctldesc;
+ struct ns_pinctrl_group *groups;
+ unsigned int num_groups;
+ struct ns_pinctrl_function *functions;
+ unsigned int num_functions;
+};
+
+/*
+ * Pins
+ */
+
+static const struct pinctrl_pin_desc ns_pinctrl_pins[] = {
+ { 0, "spi_clk", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 1, "spi_ss", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 2, "spi_mosi", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 3, "spi_miso", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 4, "i2c_scl", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 5, "i2c_sda", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 6, "mdc", (void *)(FLAG_BCM4709 | FLAG_BCM53012) },
+ { 7, "mdio", (void *)(FLAG_BCM4709 | FLAG_BCM53012) },
+ { 8, "pwm0", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 9, "pwm1", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 10, "pwm2", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 11, "pwm3", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 12, "uart1_rx", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 13, "uart1_tx", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 14, "uart1_cts", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 15, "uart1_rts", (void *)(FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012) },
+ { 16, "uart2_rx", (void *)(FLAG_BCM4709 | FLAG_BCM53012) },
+ { 17, "uart2_tx", (void *)(FLAG_BCM4709 | FLAG_BCM53012) },
+/* TODO { ??, "xtal_out", (void *)(FLAG_BCM4709) }, */
+ { 22, "sdio_pwr", (void *)(FLAG_BCM4709 | FLAG_BCM53012) },
+ { 23, "sdio_en_1p8v", (void *)(FLAG_BCM4709 | FLAG_BCM53012) },
+};
+
+/*
+ * Groups
+ */
+
+struct ns_pinctrl_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned int num_pins;
+ unsigned int chipsets;
+};
+
+static const unsigned int spi_pins[] = { 0, 1, 2, 3 };
+static const unsigned int i2c_pins[] = { 4, 5 };
+static const unsigned int mdio_pins[] = { 6, 7 };
+static const unsigned int pwm0_pins[] = { 8 };
+static const unsigned int pwm1_pins[] = { 9 };
+static const unsigned int pwm2_pins[] = { 10 };
+static const unsigned int pwm3_pins[] = { 11 };
+static const unsigned int uart1_pins[] = { 12, 13, 14, 15 };
+static const unsigned int uart2_pins[] = { 16, 17 };
+static const unsigned int sdio_pwr_pins[] = { 22 };
+static const unsigned int sdio_1p8v_pins[] = { 23 };
+
+#define NS_GROUP(_name, _pins, _chipsets) \
+{ \
+ .name = _name, \
+ .pins = _pins, \
+ .num_pins = ARRAY_SIZE(_pins), \
+ .chipsets = _chipsets, \
+}
+
+static const struct ns_pinctrl_group ns_pinctrl_groups[] = {
+ NS_GROUP("spi_grp", spi_pins, FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012),
+ NS_GROUP("i2c_grp", i2c_pins, FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012),
+ NS_GROUP("mdio_grp", mdio_pins, FLAG_BCM4709 | FLAG_BCM53012),
+ NS_GROUP("pwm0_grp", pwm0_pins, FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012),
+ NS_GROUP("pwm1_grp", pwm1_pins, FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012),
+ NS_GROUP("pwm2_grp", pwm2_pins, FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012),
+ NS_GROUP("pwm3_grp", pwm3_pins, FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012),
+ NS_GROUP("uart1_grp", uart1_pins, FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012),
+ NS_GROUP("uart2_grp", uart2_pins, FLAG_BCM4709 | FLAG_BCM53012),
+ NS_GROUP("sdio_pwr_grp", sdio_pwr_pins, FLAG_BCM4709 | FLAG_BCM53012),
+ NS_GROUP("sdio_1p8v_grp", sdio_1p8v_pins, FLAG_BCM4709 | FLAG_BCM53012),
+};
+
+/*
+ * Functions
+ */
+
+struct ns_pinctrl_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned int num_groups;
+ unsigned int chipsets;
+};
+
+static const char * const spi_groups[] = { "spi_grp" };
+static const char * const i2c_groups[] = { "i2c_grp" };
+static const char * const mdio_groups[] = { "mdio_grp" };
+static const char * const pwm_groups[] = { "pwm0_grp", "pwm1_grp", "pwm2_grp",
+ "pwm3_grp" };
+static const char * const uart1_groups[] = { "uart1_grp" };
+static const char * const uart2_groups[] = { "uart2_grp" };
+static const char * const sdio_groups[] = { "sdio_pwr_grp", "sdio_1p8v_grp" };
+
+#define NS_FUNCTION(_name, _groups, _chipsets) \
+{ \
+ .name = _name, \
+ .groups = _groups, \
+ .num_groups = ARRAY_SIZE(_groups), \
+ .chipsets = _chipsets, \
+}
+
+static const struct ns_pinctrl_function ns_pinctrl_functions[] = {
+ NS_FUNCTION("spi", spi_groups, FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012),
+ NS_FUNCTION("i2c", i2c_groups, FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012),
+ NS_FUNCTION("mdio", mdio_groups, FLAG_BCM4709 | FLAG_BCM53012),
+ NS_FUNCTION("pwm", pwm_groups, FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012),
+ NS_FUNCTION("uart1", uart1_groups, FLAG_BCM4708 | FLAG_BCM4709 | FLAG_BCM53012),
+ NS_FUNCTION("uart2", uart2_groups, FLAG_BCM4709 | FLAG_BCM53012),
+ NS_FUNCTION("sdio", sdio_groups, FLAG_BCM4709 | FLAG_BCM53012),
+};
+
+/*
+ * Groups code
+ */
+
+static int ns_pinctrl_get_groups_count(struct pinctrl_dev *pctrl_dev)
+{
+ struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return ns_pinctrl->num_groups;
+}
+
+static const char *ns_pinctrl_get_group_name(struct pinctrl_dev *pctrl_dev,
+ unsigned int selector)
+{
+ struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return ns_pinctrl->groups[selector].name;
+}
+
+static int ns_pinctrl_get_group_pins(struct pinctrl_dev *pctrl_dev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ *pins = ns_pinctrl->groups[selector].pins;
+ *num_pins = ns_pinctrl->groups[selector].num_pins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops ns_pinctrl_ops = {
+ .get_groups_count = ns_pinctrl_get_groups_count,
+ .get_group_name = ns_pinctrl_get_group_name,
+ .get_group_pins = ns_pinctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+/*
+ * Functions code
+ */
+
+static int ns_pinctrl_get_functions_count(struct pinctrl_dev *pctrl_dev)
+{
+ struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return ns_pinctrl->num_functions;
+}
+
+static const char *ns_pinctrl_get_function_name(struct pinctrl_dev *pctrl_dev,
+ unsigned int selector)
+{
+ struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return ns_pinctrl->functions[selector].name;
+}
+
+static int ns_pinctrl_get_function_groups(struct pinctrl_dev *pctrl_dev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ *groups = ns_pinctrl->functions[selector].groups;
+ *num_groups = ns_pinctrl->functions[selector].num_groups;
+
+ return 0;
+}
+
+static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
+ unsigned int func_select,
+ unsigned int grp_select)
+{
+ struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+ u32 unset = 0;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < ns_pinctrl->groups[grp_select].num_pins; i++) {
+ int pin_number = ns_pinctrl->groups[grp_select].pins[i];
+
+ unset |= BIT(pin_number);
+ }
+
+ tmp = readl(ns_pinctrl->base);
+ tmp &= ~unset;
+ writel(tmp, ns_pinctrl->base);
+
+ return 0;
+}
+
+static const struct pinmux_ops ns_pinctrl_pmxops = {
+ .get_functions_count = ns_pinctrl_get_functions_count,
+ .get_function_name = ns_pinctrl_get_function_name,
+ .get_function_groups = ns_pinctrl_get_function_groups,
+ .set_mux = ns_pinctrl_set_mux,
+};
+
+/*
+ * Controller code
+ */
+
+static struct pinctrl_desc ns_pinctrl_desc = {
+ .name = "pinctrl-ns",
+ .pctlops = &ns_pinctrl_ops,
+ .pmxops = &ns_pinctrl_pmxops,
+};
+
+static const struct of_device_id ns_pinctrl_of_match_table[] = {
+ { .compatible = "brcm,bcm4708-pinmux", .data = (void *)FLAG_BCM4708, },
+ { .compatible = "brcm,bcm4709-pinmux", .data = (void *)FLAG_BCM4709, },
+ { .compatible = "brcm,bcm53012-pinmux", .data = (void *)FLAG_BCM53012, },
+ { }
+};
+
+static int ns_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ struct ns_pinctrl *ns_pinctrl;
+ struct pinctrl_desc *pctldesc;
+ struct pinctrl_pin_desc *pin;
+ struct ns_pinctrl_group *group;
+ struct ns_pinctrl_function *function;
+ struct resource *res;
+ int i;
+
+ ns_pinctrl = devm_kzalloc(dev, sizeof(*ns_pinctrl), GFP_KERNEL);
+ if (!ns_pinctrl)
+ return -ENOMEM;
+ pctldesc = &ns_pinctrl->pctldesc;
+ platform_set_drvdata(pdev, ns_pinctrl);
+
+ /* Set basic properties */
+
+ ns_pinctrl->dev = dev;
+
+ of_id = of_match_device(ns_pinctrl_of_match_table, dev);
+ if (!of_id)
+ return -EINVAL;
+ ns_pinctrl->chipset_flag = (uintptr_t)of_id->data;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cru_gpio_control");
+ ns_pinctrl->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ns_pinctrl->base)) {
+ dev_err(dev, "Failed to map pinctrl regs\n");
+ return PTR_ERR(ns_pinctrl->base);
+ }
+
+ memcpy(pctldesc, &ns_pinctrl_desc, sizeof(*pctldesc));
+
+ /* Set pinctrl properties */
+
+ pctldesc->pins = devm_kcalloc(dev, ARRAY_SIZE(ns_pinctrl_pins),
+ sizeof(struct pinctrl_pin_desc),
+ GFP_KERNEL);
+ if (!pctldesc->pins)
+ return -ENOMEM;
+ for (i = 0, pin = (struct pinctrl_pin_desc *)&pctldesc->pins[0];
+ i < ARRAY_SIZE(ns_pinctrl_pins); i++) {
+ const struct pinctrl_pin_desc *src = &ns_pinctrl_pins[i];
+ unsigned int chipsets = (uintptr_t)src->drv_data;
+
+ if (chipsets & ns_pinctrl->chipset_flag) {
+ memcpy(pin++, src, sizeof(*src));
+ pctldesc->npins++;
+ }
+ }
+
+ ns_pinctrl->groups = devm_kcalloc(dev, ARRAY_SIZE(ns_pinctrl_groups),
+ sizeof(struct ns_pinctrl_group),
+ GFP_KERNEL);
+ if (!ns_pinctrl->groups)
+ return -ENOMEM;
+ for (i = 0, group = &ns_pinctrl->groups[0];
+ i < ARRAY_SIZE(ns_pinctrl_groups); i++) {
+ const struct ns_pinctrl_group *src = &ns_pinctrl_groups[i];
+
+ if (src->chipsets & ns_pinctrl->chipset_flag) {
+ memcpy(group++, src, sizeof(*src));
+ ns_pinctrl->num_groups++;
+ }
+ }
+
+ ns_pinctrl->functions = devm_kcalloc(dev,
+ ARRAY_SIZE(ns_pinctrl_functions),
+ sizeof(struct ns_pinctrl_function),
+ GFP_KERNEL);
+ if (!ns_pinctrl->functions)
+ return -ENOMEM;
+ for (i = 0, function = &ns_pinctrl->functions[0];
+ i < ARRAY_SIZE(ns_pinctrl_functions); i++) {
+ const struct ns_pinctrl_function *src = &ns_pinctrl_functions[i];
+
+ if (src->chipsets & ns_pinctrl->chipset_flag) {
+ memcpy(function++, src, sizeof(*src));
+ ns_pinctrl->num_functions++;
+ }
+ }
+
+ /* Register */
+
+ ns_pinctrl->pctldev = devm_pinctrl_register(dev, pctldesc, ns_pinctrl);
+ if (IS_ERR(ns_pinctrl->pctldev)) {
+ dev_err(dev, "Failed to register pinctrl\n");
+ return PTR_ERR(ns_pinctrl->pctldev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver ns_pinctrl_driver = {
+ .probe = ns_pinctrl_probe,
+ .driver = {
+ .name = "ns-pinmux",
+ .of_match_table = ns_pinctrl_of_match_table,
+ },
+};
+
+module_platform_driver(ns_pinctrl_driver);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, ns_pinctrl_of_match_table);
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index b5903fffb3d0..b17a03cf87be 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -64,16 +64,14 @@ static int berlin_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrl_dev,
ret = of_property_read_string(node, "function", &function_name);
if (ret) {
dev_err(pctrl->dev,
- "missing function property in node %s\n",
- node->name);
+ "missing function property in node %pOFn\n", node);
return -EINVAL;
}
ngroups = of_property_count_strings(node, "groups");
if (ngroups < 0) {
dev_err(pctrl->dev,
- "missing groups property in node %s\n",
- node->name);
+ "missing groups property in node %pOFn\n", node);
return -EINVAL;
}
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index c4f4d904e4a6..a5dda832024a 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -550,7 +550,7 @@ static void __maybe_unused madera_pin_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, " DRV=%umA", madera_pin_unmake_drv_str(priv, conf[1]));
if (conf[0] & MADERA_GP1_IP_CFG_MASK)
- seq_puts(s, "SCHMITT");
+ seq_puts(s, " SCHMITT");
}
@@ -608,7 +608,7 @@ static int madera_mux_set_mux(struct pinctrl_dev *pctldev,
unsigned int n_chip_groups = priv->chip->n_pin_groups;
const char *func_name = madera_mux_funcs[selector].name;
unsigned int reg;
- int i, ret;
+ int i, ret = 0;
dev_dbg(priv->dev, "%s selecting %u (%s) for group %u (%s)\n",
__func__, selector, func_name, group,
@@ -801,7 +801,7 @@ static int madera_pin_conf_get(struct pinctrl_dev *pctldev, unsigned int pin,
result = 1;
break;
default:
- break;
+ return -ENOTSUPP;
}
*config = pinconf_to_config_packed(param, result);
@@ -905,7 +905,7 @@ static int madera_pin_conf_set(struct pinctrl_dev *pctldev, unsigned int pin,
conf[1] &= ~MADERA_GP1_DIR;
break;
default:
- break;
+ return -ENOTSUPP;
}
++configs;
@@ -971,10 +971,10 @@ static int madera_pin_conf_group_set(struct pinctrl_dev *pctldev,
}
static const struct pinconf_ops madera_pin_conf_ops = {
+ .is_generic = true,
.pin_config_get = madera_pin_conf_get,
.pin_config_set = madera_pin_conf_set,
.pin_config_group_set = madera_pin_conf_group_set,
-
};
static struct pinctrl_desc madera_pin_desc = {
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index a3dd777e3ce8..c6ff4d5fa482 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -627,7 +627,7 @@ static int pinctrl_generic_group_name_to_selector(struct pinctrl_dev *pctldev,
while (selector < ngroups) {
const char *gname = ops->get_group_name(pctldev, selector);
- if (!strcmp(function, gname))
+ if (gname && !strcmp(function, gname))
return selector;
selector++;
@@ -743,7 +743,7 @@ int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
while (group_selector < ngroups) {
const char *gname = pctlops->get_group_name(pctldev,
group_selector);
- if (!strcmp(gname, pin_group)) {
+ if (gname && !strcmp(gname, pin_group)) {
dev_dbg(pctldev->dev,
"found group selector %u for %s\n",
group_selector,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index b04edc22dad7..4e8cf0e357c6 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -69,8 +69,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
*/
grp = imx_pinctrl_find_group_by_name(pctldev, np->name);
if (!grp) {
- dev_err(ipctl->dev, "unable to find group for node %s\n",
- np->name);
+ dev_err(ipctl->dev, "unable to find group for node %pOFn\n", np);
return -EINVAL;
}
@@ -434,7 +433,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
int i;
u32 config;
- dev_dbg(ipctl->dev, "group(%d): %s\n", index, np->name);
+ dev_dbg(ipctl->dev, "group(%d): %pOFn\n", index, np);
if (info->flags & SHARE_MUX_CONF_REG)
pin_size = FSL_PIN_SHARE_SIZE;
@@ -544,7 +543,7 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
struct group_desc *grp;
u32 i = 0;
- dev_dbg(pctl->dev, "parse function(%d): %s\n", index, np->name);
+ dev_dbg(pctl->dev, "parse function(%d): %pOFn\n", index, np);
func = pinmux_generic_get_function(pctl, index);
if (!func)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index deb7870b3d1a..7e29e3fecdb2 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -233,8 +233,8 @@ static int imx1_dt_node_to_map(struct pinctrl_dev *pctldev,
*/
grp = imx1_pinctrl_find_group_by_name(info, np->name);
if (!grp) {
- dev_err(info->dev, "unable to find group for node %s\n",
- np->name);
+ dev_err(info->dev, "unable to find group for node %pOFn\n",
+ np);
return -EINVAL;
}
@@ -466,7 +466,7 @@ static int imx1_pinctrl_parse_groups(struct device_node *np,
const __be32 *list;
int i;
- dev_dbg(info->dev, "group(%d): %s\n", index, np->name);
+ dev_dbg(info->dev, "group(%d): %pOFn\n", index, np);
/* Initialise group */
grp->name = np->name;
@@ -477,8 +477,8 @@ static int imx1_pinctrl_parse_groups(struct device_node *np,
list = of_get_property(np, "fsl,pins", &size);
/* we do not check return since it's safe node passed down */
if (!size || size % 12) {
- dev_notice(info->dev, "Not a valid fsl,pins property (%s)\n",
- np->name);
+ dev_notice(info->dev, "Not a valid fsl,pins property (%pOFn)\n",
+ np);
return -EINVAL;
}
@@ -513,7 +513,7 @@ static int imx1_pinctrl_parse_functions(struct device_node *np,
static u32 grp_index;
u32 i = 0;
- dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name);
+ dev_dbg(info->dev, "parse function(%d): %pOFn\n", index, np);
func = &info->functions[index];
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index a612e46ca51c..641b3088876f 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -556,4 +556,3 @@ err:
iounmap(d->base);
return ret;
}
-EXPORT_SYMBOL_GPL(mxs_pinctrl_probe);
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index f38d596efa05..6d1a43c0c251 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -6,18 +6,19 @@
* Author: Mathias Nyman <mathias.nyman@linux.intel.com>
*/
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
+#include <linux/acpi.h>
#include <linux/bitops.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
-#include <linux/acpi.h>
-#include <linux/platform_device.h>
-#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/seq_file.h>
+
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
@@ -682,7 +683,7 @@ static const struct pinctrl_pin_desc byt_ncore_pins[] = {
PINCTRL_PIN(27, "GPIO_NCORE27"),
};
-static unsigned const byt_ncore_pins_map[BYT_NGPIO_NCORE] = {
+static const unsigned int byt_ncore_pins_map[BYT_NGPIO_NCORE] = {
19, 18, 17, 20, 21, 22, 24, 25, 23, 16,
14, 15, 12, 26, 27, 1, 4, 8, 11, 0,
3, 6, 10, 13, 2, 5, 9, 7,
@@ -926,7 +927,7 @@ static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
return 0;
}
-static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
+static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned int offset)
{
/* SCORE pin 92-93 */
if (!strcmp(vg->soc_data->uid, BYT_SCORE_ACPI_UID) &&
@@ -1310,7 +1311,7 @@ static const struct pinctrl_desc byt_pinctrl_desc = {
.owner = THIS_MODULE,
};
-static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct byt_gpio *vg = gpiochip_get_data(chip);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
@@ -1324,7 +1325,7 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(val & BYT_LEVEL);
}
-static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct byt_gpio *vg = gpiochip_get_data(chip);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
@@ -1358,9 +1359,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
raw_spin_unlock_irqrestore(&vg->lock, flags);
if (!(value & BYT_OUTPUT_EN))
- return GPIOF_DIR_OUT;
+ return 0;
if (!(value & BYT_INPUT_EN))
- return GPIOF_DIR_IN;
+ return 1;
return -EINVAL;
}
@@ -1495,7 +1496,7 @@ static void byt_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct byt_gpio *vg = gpiochip_get_data(gc);
- unsigned offset = irqd_to_hwirq(d);
+ unsigned int offset = irqd_to_hwirq(d);
void __iomem *reg;
reg = byt_gpio_reg(vg, offset, BYT_INT_STAT_REG);
@@ -1519,7 +1520,7 @@ static void byt_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct byt_gpio *vg = gpiochip_get_data(gc);
- unsigned offset = irqd_to_hwirq(d);
+ unsigned int offset = irqd_to_hwirq(d);
unsigned long flags;
void __iomem *reg;
u32 value;
@@ -1775,13 +1776,11 @@ static const struct acpi_device_id byt_gpio_acpi_match[] = {
{ "INT33FC", (kernel_ulong_t)byt_soc_data },
{ }
};
-MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
static int byt_pinctrl_probe(struct platform_device *pdev)
{
const struct byt_pinctrl_soc_data *soc_data = NULL;
const struct byt_pinctrl_soc_data **soc_table;
- const struct acpi_device_id *acpi_id;
struct acpi_device *acpi_dev;
struct byt_gpio *vg;
int i, ret;
@@ -1790,11 +1789,7 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
if (!acpi_dev)
return -ENODEV;
- acpi_id = acpi_match_device(byt_gpio_acpi_match, &pdev->dev);
- if (!acpi_id)
- return -ENODEV;
-
- soc_table = (const struct byt_pinctrl_soc_data **)acpi_id->driver_data;
+ soc_table = (const struct byt_pinctrl_soc_data **)device_get_match_data(&pdev->dev);
for (i = 0; soc_table[i]; i++) {
if (!strcmp(acpi_dev->pnp.unique_id, soc_table[i]->uid)) {
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index 8b1c7b59ad3e..68fefd4618bd 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -6,10 +6,10 @@
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
+
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-intel.h"
@@ -117,17 +117,17 @@ static const struct pinctrl_pin_desc bxt_north_pins[] = {
PINCTRL_PIN(82, "TDO"),
};
-static const unsigned bxt_north_pwm0_pins[] = { 34 };
-static const unsigned bxt_north_pwm1_pins[] = { 35 };
-static const unsigned bxt_north_pwm2_pins[] = { 36 };
-static const unsigned bxt_north_pwm3_pins[] = { 37 };
-static const unsigned bxt_north_uart0_pins[] = { 38, 39, 40, 41 };
-static const unsigned bxt_north_uart1_pins[] = { 42, 43, 44, 45 };
-static const unsigned bxt_north_uart2_pins[] = { 46, 47, 48, 49 };
-static const unsigned bxt_north_uart0b_pins[] = { 50, 51, 52, 53 };
-static const unsigned bxt_north_uart1b_pins[] = { 54, 55, 56, 57 };
-static const unsigned bxt_north_uart2b_pins[] = { 58, 59, 60, 61 };
-static const unsigned bxt_north_uart3_pins[] = { 58, 59, 60, 61 };
+static const unsigned int bxt_north_pwm0_pins[] = { 34 };
+static const unsigned int bxt_north_pwm1_pins[] = { 35 };
+static const unsigned int bxt_north_pwm2_pins[] = { 36 };
+static const unsigned int bxt_north_pwm3_pins[] = { 37 };
+static const unsigned int bxt_north_uart0_pins[] = { 38, 39, 40, 41 };
+static const unsigned int bxt_north_uart1_pins[] = { 42, 43, 44, 45 };
+static const unsigned int bxt_north_uart2_pins[] = { 46, 47, 48, 49 };
+static const unsigned int bxt_north_uart0b_pins[] = { 50, 51, 52, 53 };
+static const unsigned int bxt_north_uart1b_pins[] = { 54, 55, 56, 57 };
+static const unsigned int bxt_north_uart2b_pins[] = { 58, 59, 60, 61 };
+static const unsigned int bxt_north_uart3_pins[] = { 58, 59, 60, 61 };
static const struct intel_pingroup bxt_north_groups[] = {
PIN_GROUP("pwm0_grp", bxt_north_pwm0_pins, 1),
@@ -260,12 +260,12 @@ static const struct pinctrl_pin_desc bxt_northwest_pins[] = {
PINCTRL_PIN(71, "GP_SSP_2_TXD"),
};
-static const unsigned bxt_northwest_ssp0_pins[] = { 53, 54, 55, 56, 57, 58 };
-static const unsigned bxt_northwest_ssp1_pins[] = {
+static const unsigned int bxt_northwest_ssp0_pins[] = { 53, 54, 55, 56, 57, 58 };
+static const unsigned int bxt_northwest_ssp1_pins[] = {
59, 60, 61, 62, 63, 64, 65
};
-static const unsigned bxt_northwest_ssp2_pins[] = { 66, 67, 68, 69, 70, 71 };
-static const unsigned bxt_northwest_uart3_pins[] = { 67, 68, 69, 70 };
+static const unsigned int bxt_northwest_ssp2_pins[] = { 66, 67, 68, 69, 70, 71 };
+static const unsigned int bxt_northwest_uart3_pins[] = { 67, 68, 69, 70 };
static const struct intel_pingroup bxt_northwest_groups[] = {
PIN_GROUP("ssp0_grp", bxt_northwest_ssp0_pins, 1),
@@ -347,17 +347,17 @@ static const struct pinctrl_pin_desc bxt_west_pins[] = {
PINCTRL_PIN(41, "OSC_CLK_OUT_3"),
};
-static const unsigned bxt_west_i2c0_pins[] = { 0, 1 };
-static const unsigned bxt_west_i2c1_pins[] = { 2, 3 };
-static const unsigned bxt_west_i2c2_pins[] = { 4, 5 };
-static const unsigned bxt_west_i2c3_pins[] = { 6, 7 };
-static const unsigned bxt_west_i2c4_pins[] = { 8, 9 };
-static const unsigned bxt_west_i2c5_pins[] = { 10, 11 };
-static const unsigned bxt_west_i2c6_pins[] = { 12, 13 };
-static const unsigned bxt_west_i2c7_pins[] = { 14, 15 };
-static const unsigned bxt_west_i2c5b_pins[] = { 16, 17 };
-static const unsigned bxt_west_i2c6b_pins[] = { 18, 19 };
-static const unsigned bxt_west_i2c7b_pins[] = { 20, 21 };
+static const unsigned int bxt_west_i2c0_pins[] = { 0, 1 };
+static const unsigned int bxt_west_i2c1_pins[] = { 2, 3 };
+static const unsigned int bxt_west_i2c2_pins[] = { 4, 5 };
+static const unsigned int bxt_west_i2c3_pins[] = { 6, 7 };
+static const unsigned int bxt_west_i2c4_pins[] = { 8, 9 };
+static const unsigned int bxt_west_i2c5_pins[] = { 10, 11 };
+static const unsigned int bxt_west_i2c6_pins[] = { 12, 13 };
+static const unsigned int bxt_west_i2c7_pins[] = { 14, 15 };
+static const unsigned int bxt_west_i2c5b_pins[] = { 16, 17 };
+static const unsigned int bxt_west_i2c6b_pins[] = { 18, 19 };
+static const unsigned int bxt_west_i2c7b_pins[] = { 20, 21 };
static const struct intel_pingroup bxt_west_groups[] = {
PIN_GROUP("i2c0_grp", bxt_west_i2c0_pins, 1),
@@ -443,13 +443,13 @@ static const struct pinctrl_pin_desc bxt_southwest_pins[] = {
PINCTRL_PIN(30, "SDCARD_LVL_WP"),
};
-static const unsigned bxt_southwest_emmc0_pins[] = {
+static const unsigned int bxt_southwest_emmc0_pins[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 26,
};
-static const unsigned bxt_southwest_sdio_pins[] = {
+static const unsigned int bxt_southwest_sdio_pins[] = {
10, 11, 12, 13, 14, 15, 27,
};
-static const unsigned bxt_southwest_sdcard_pins[] = {
+static const unsigned int bxt_southwest_sdcard_pins[] = {
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 28, 29, 30,
};
@@ -611,13 +611,13 @@ static const struct pinctrl_pin_desc apl_north_pins[] = {
PINCTRL_PIN(77, "SVID0_CLK"),
};
-static const unsigned apl_north_pwm0_pins[] = { 34 };
-static const unsigned apl_north_pwm1_pins[] = { 35 };
-static const unsigned apl_north_pwm2_pins[] = { 36 };
-static const unsigned apl_north_pwm3_pins[] = { 37 };
-static const unsigned apl_north_uart0_pins[] = { 38, 39, 40, 41 };
-static const unsigned apl_north_uart1_pins[] = { 42, 43, 44, 45 };
-static const unsigned apl_north_uart2_pins[] = { 46, 47, 48, 49 };
+static const unsigned int apl_north_pwm0_pins[] = { 34 };
+static const unsigned int apl_north_pwm1_pins[] = { 35 };
+static const unsigned int apl_north_pwm2_pins[] = { 36 };
+static const unsigned int apl_north_pwm3_pins[] = { 37 };
+static const unsigned int apl_north_uart0_pins[] = { 38, 39, 40, 41 };
+static const unsigned int apl_north_uart1_pins[] = { 42, 43, 44, 45 };
+static const unsigned int apl_north_uart2_pins[] = { 46, 47, 48, 49 };
static const struct intel_pingroup apl_north_groups[] = {
PIN_GROUP("pwm0_grp", apl_north_pwm0_pins, 1),
@@ -743,10 +743,10 @@ static const struct pinctrl_pin_desc apl_northwest_pins[] = {
PINCTRL_PIN(76, "GP_SSP_2_TXD"),
};
-static const unsigned apl_northwest_ssp0_pins[] = { 61, 62, 63, 64, 65 };
-static const unsigned apl_northwest_ssp1_pins[] = { 66, 67, 68, 69, 70 };
-static const unsigned apl_northwest_ssp2_pins[] = { 71, 72, 73, 74, 75, 76 };
-static const unsigned apl_northwest_uart3_pins[] = { 67, 68, 69, 70 };
+static const unsigned int apl_northwest_ssp0_pins[] = { 61, 62, 63, 64, 65 };
+static const unsigned int apl_northwest_ssp1_pins[] = { 66, 67, 68, 69, 70 };
+static const unsigned int apl_northwest_ssp2_pins[] = { 71, 72, 73, 74, 75, 76 };
+static const unsigned int apl_northwest_uart3_pins[] = { 67, 68, 69, 70 };
static const struct intel_pingroup apl_northwest_groups[] = {
PIN_GROUP("ssp0_grp", apl_northwest_ssp0_pins, 1),
@@ -833,15 +833,15 @@ static const struct pinctrl_pin_desc apl_west_pins[] = {
PINCTRL_PIN(46, "SUSPWRDNACK"),
};
-static const unsigned apl_west_i2c0_pins[] = { 0, 1 };
-static const unsigned apl_west_i2c1_pins[] = { 2, 3 };
-static const unsigned apl_west_i2c2_pins[] = { 4, 5 };
-static const unsigned apl_west_i2c3_pins[] = { 6, 7 };
-static const unsigned apl_west_i2c4_pins[] = { 8, 9 };
-static const unsigned apl_west_i2c5_pins[] = { 10, 11 };
-static const unsigned apl_west_i2c6_pins[] = { 12, 13 };
-static const unsigned apl_west_i2c7_pins[] = { 14, 15 };
-static const unsigned apl_west_uart2_pins[] = { 20, 21, 22, 34 };
+static const unsigned int apl_west_i2c0_pins[] = { 0, 1 };
+static const unsigned int apl_west_i2c1_pins[] = { 2, 3 };
+static const unsigned int apl_west_i2c2_pins[] = { 4, 5 };
+static const unsigned int apl_west_i2c3_pins[] = { 6, 7 };
+static const unsigned int apl_west_i2c4_pins[] = { 8, 9 };
+static const unsigned int apl_west_i2c5_pins[] = { 10, 11 };
+static const unsigned int apl_west_i2c6_pins[] = { 12, 13 };
+static const unsigned int apl_west_i2c7_pins[] = { 14, 15 };
+static const unsigned int apl_west_uart2_pins[] = { 20, 21, 22, 34 };
static const struct intel_pingroup apl_west_groups[] = {
PIN_GROUP("i2c0_grp", apl_west_i2c0_pins, 1),
@@ -939,16 +939,16 @@ static const struct pinctrl_pin_desc apl_southwest_pins[] = {
PINCTRL_PIN(42, "LPC_FRAMEB"),
};
-static const unsigned apl_southwest_emmc0_pins[] = {
+static const unsigned int apl_southwest_emmc0_pins[] = {
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 29,
};
-static const unsigned apl_southwest_sdio_pins[] = {
+static const unsigned int apl_southwest_sdio_pins[] = {
14, 15, 16, 17, 18, 19, 30,
};
-static const unsigned apl_southwest_sdcard_pins[] = {
+static const unsigned int apl_southwest_sdcard_pins[] = {
20, 21, 22, 23, 24, 25, 26, 27, 28,
};
-static const unsigned apl_southwest_i2c7_pins[] = { 32, 33 };
+static const unsigned int apl_southwest_i2c7_pins[] = { 32, 33 };
static const struct intel_pingroup apl_southwest_groups[] = {
PIN_GROUP("emmc0_grp", apl_southwest_emmc0_pins, 1),
@@ -1008,50 +1008,10 @@ static const struct platform_device_id bxt_pinctrl_platform_ids[] = {
static int bxt_pinctrl_probe(struct platform_device *pdev)
{
- const struct intel_pinctrl_soc_data *soc_data = NULL;
- const struct intel_pinctrl_soc_data **soc_table;
- struct acpi_device *adev;
- int i;
-
- adev = ACPI_COMPANION(&pdev->dev);
- if (adev) {
- const struct acpi_device_id *id;
-
- id = acpi_match_device(bxt_pinctrl_acpi_match, &pdev->dev);
- if (!id)
- return -ENODEV;
-
- soc_table = (const struct intel_pinctrl_soc_data **)
- id->driver_data;
-
- for (i = 0; soc_table[i]; i++) {
- if (!strcmp(adev->pnp.unique_id, soc_table[i]->uid)) {
- soc_data = soc_table[i];
- break;
- }
- }
- } else {
- const struct platform_device_id *pid;
-
- pid = platform_get_device_id(pdev);
- if (!pid)
- return -ENODEV;
-
- soc_table = (const struct intel_pinctrl_soc_data **)
- pid->driver_data;
- soc_data = soc_table[pdev->id];
- }
-
- if (!soc_data)
- return -ENODEV;
-
- return intel_pinctrl_probe(pdev, soc_data);
+ return intel_pinctrl_probe_by_uid(pdev);
}
-static const struct dev_pm_ops bxt_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
- intel_pinctrl_resume)
-};
+static INTEL_PINCTRL_PM_OPS(bxt_pinctrl_pm_ops);
static struct platform_driver bxt_pinctrl_driver = {
.probe = bxt_pinctrl_probe,
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
index fb1afe55bf53..fb121b3ed2f2 100644
--- a/drivers/pinctrl/intel/pinctrl-cannonlake.c
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -7,18 +7,19 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
+
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-intel.h"
-#define CNL_PAD_OWN 0x020
-#define CNL_PADCFGLOCK 0x080
-#define CNL_HOSTSW_OWN 0x0b0
-#define CNL_GPI_IE 0x120
+#define CNL_PAD_OWN 0x020
+#define CNL_PADCFGLOCK 0x080
+#define CNL_LP_HOSTSW_OWN 0x0b0
+#define CNL_H_HOSTSW_OWN 0x0c0
+#define CNL_GPI_IE 0x120
#define CNL_GPP(r, s, e, g) \
{ \
@@ -30,12 +31,12 @@
#define CNL_NO_GPIO -1
-#define CNL_COMMUNITY(b, s, e, g) \
+#define CNL_COMMUNITY(b, s, e, o, g) \
{ \
.barno = (b), \
.padown_offset = CNL_PAD_OWN, \
.padcfglock_offset = CNL_PADCFGLOCK, \
- .hostown_offset = CNL_HOSTSW_OWN, \
+ .hostown_offset = (o), \
.ie_offset = CNL_GPI_IE, \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
@@ -43,6 +44,12 @@
.ngpps = ARRAY_SIZE(g), \
}
+#define CNLLP_COMMUNITY(b, s, e, g) \
+ CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g)
+
+#define CNLH_COMMUNITY(b, s, e, g) \
+ CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g)
+
/* Cannon Lake-H */
static const struct pinctrl_pin_desc cnlh_pins[] = {
/* GPP_A */
@@ -379,7 +386,7 @@ static const struct intel_padgroup cnlh_community1_gpps[] = {
static const struct intel_padgroup cnlh_community3_gpps[] = {
CNL_GPP(0, 155, 178, 192), /* GPP_K */
CNL_GPP(1, 179, 202, 224), /* GPP_H */
- CNL_GPP(2, 203, 215, 258), /* GPP_E */
+ CNL_GPP(2, 203, 215, 256), /* GPP_E */
CNL_GPP(3, 216, 239, 288), /* GPP_F */
CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */
};
@@ -442,10 +449,10 @@ static const struct intel_function cnlh_functions[] = {
};
static const struct intel_community cnlh_communities[] = {
- CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
- CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
- CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
- CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
+ CNLH_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
+ CNLH_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
+ CNLH_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
+ CNLH_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
};
static const struct intel_pinctrl_soc_data cnlh_soc_data = {
@@ -803,9 +810,9 @@ static const struct intel_padgroup cnllp_community4_gpps[] = {
};
static const struct intel_community cnllp_communities[] = {
- CNL_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
- CNL_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
- CNL_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
+ CNLLP_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
+ CNLLP_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
+ CNLLP_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
};
static const struct intel_pinctrl_soc_data cnllp_soc_data = {
@@ -828,21 +835,10 @@ MODULE_DEVICE_TABLE(acpi, cnl_pinctrl_acpi_match);
static int cnl_pinctrl_probe(struct platform_device *pdev)
{
- const struct intel_pinctrl_soc_data *soc_data;
- const struct acpi_device_id *id;
-
- id = acpi_match_device(cnl_pinctrl_acpi_match, &pdev->dev);
- if (!id || !id->driver_data)
- return -ENODEV;
-
- soc_data = (const struct intel_pinctrl_soc_data *)id->driver_data;
- return intel_pinctrl_probe(pdev, soc_data);
+ return intel_pinctrl_probe_by_hid(pdev);
}
-static const struct dev_pm_ops cnl_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
- intel_pinctrl_resume)
-};
+static INTEL_PINCTRL_PM_OPS(cnl_pinctrl_pm_ops);
static struct platform_driver cnl_pinctrl_driver = {
.probe = cnl_pinctrl_probe,
diff --git a/drivers/pinctrl/intel/pinctrl-cedarfork.c b/drivers/pinctrl/intel/pinctrl-cedarfork.c
index c788e37e338e..7e068fc61ce1 100644
--- a/drivers/pinctrl/intel/pinctrl-cedarfork.c
+++ b/drivers/pinctrl/intel/pinctrl-cedarfork.c
@@ -9,7 +9,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
+
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-intel.h"
@@ -335,10 +335,7 @@ static int cdf_pinctrl_probe(struct platform_device *pdev)
return intel_pinctrl_probe(pdev, &cdf_soc_data);
}
-static const struct dev_pm_ops cdf_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
- intel_pinctrl_resume)
-};
+static INTEL_PINCTRL_PM_OPS(cdf_pinctrl_pm_ops);
static const struct acpi_device_id cdf_pinctrl_acpi_match[] = {
{ "INTC3001" },
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 6d31ad799987..9b0f4b9ef482 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -10,19 +10,20 @@
* Alan Cox <alan@linux.intel.com>
*/
+#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/gpio/driver.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
-#include <linux/gpio.h>
-#include <linux/gpio/driver.h>
-#include <linux/acpi.h>
+
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/platform_device.h>
+
+#include "pinctrl-intel.h"
#define CHV_INTSTAT 0x300
#define CHV_INTMASK 0x380
@@ -73,7 +74,7 @@
* @invert_oe: Invert OE for this pin
*/
struct chv_alternate_function {
- unsigned pin;
+ unsigned int pin;
u8 mode;
bool invert_oe;
};
@@ -90,7 +91,7 @@ struct chv_alternate_function {
*/
struct chv_pingroup {
const char *name;
- const unsigned *pins;
+ const unsigned int *pins;
size_t npins;
struct chv_alternate_function altfunc;
const struct chv_alternate_function *overrides;
@@ -98,25 +99,13 @@ struct chv_pingroup {
};
/**
- * struct chv_function - A CHV pinmux function
- * @name: Name of the function
- * @groups: An array of groups for this function
- * @ngroups: Number of groups in @groups
- */
-struct chv_function {
- const char *name;
- const char * const *groups;
- size_t ngroups;
-};
-
-/**
* struct chv_gpio_pinrange - A range of pins that can be used as GPIOs
* @base: Start pin number
* @npins: Number of pins in this range
*/
struct chv_gpio_pinrange {
- unsigned base;
- unsigned npins;
+ unsigned int base;
+ unsigned int npins;
};
/**
@@ -131,6 +120,7 @@ struct chv_gpio_pinrange {
* @gpio_ranges: An array of GPIO ranges in this community
* @ngpio_ranges: Number of GPIO ranges
* @nirqs: Total number of IRQs this community can generate
+ * @acpi_space_id: An address space ID for ACPI OpRegion handler
*/
struct chv_community {
const char *uid;
@@ -138,7 +128,7 @@ struct chv_community {
size_t npins;
const struct chv_pingroup *groups;
size_t ngroups;
- const struct chv_function *functions;
+ const struct intel_function *functions;
size_t nfunctions;
const struct chv_gpio_pinrange *gpio_ranges;
size_t ngpio_ranges;
@@ -161,6 +151,8 @@ struct chv_pin_context {
* @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
* offset (in GPIO number space)
* @community: Community this pinctrl instance represents
+ * @saved_intmask: Interrupt mask saved for system sleep
+ * @saved_pin_context: Pointer to a context of the pins saved for system sleep
*
* The first group in @groups is expected to contain all pins that can be
* used as GPIOs.
@@ -184,7 +176,7 @@ struct chv_pinctrl {
.invert_oe = (i), \
}
-#define PIN_GROUP(n, p, m, i) \
+#define PIN_GROUP_WITH_ALT(n, p, m, i) \
{ \
.name = (n), \
.pins = (p), \
@@ -204,13 +196,6 @@ struct chv_pinctrl {
.noverrides = ARRAY_SIZE((o)), \
}
-#define FUNCTION(n, g) \
- { \
- .name = (n), \
- .groups = (g), \
- .ngroups = ARRAY_SIZE((g)), \
- }
-
#define GPIO_PINRANGE(start, end) \
{ \
.base = (start), \
@@ -282,7 +267,6 @@ static const struct pinctrl_pin_desc southwest_pins[] = {
PINCTRL_PIN(97, "GP_SSP_2_TXD"),
};
-static const unsigned southwest_fspi_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
static const unsigned southwest_uart0_pins[] = { 16, 20 };
static const unsigned southwest_uart1_pins[] = { 15, 16, 18, 20 };
static const unsigned southwest_uart2_pins[] = { 17, 19, 21, 22 };
@@ -298,7 +282,6 @@ static const unsigned southwest_i2c4_pins[] = { 46, 50 };
static const unsigned southwest_i2c5_pins[] = { 45, 48 };
static const unsigned southwest_i2c6_pins[] = { 47, 51 };
static const unsigned southwest_i2c_nfc_pins[] = { 49, 52 };
-static const unsigned southwest_smbus_pins[] = { 79, 81, 82 };
static const unsigned southwest_spi3_pins[] = { 76, 79, 80, 81, 82 };
/* LPE I2S TXD pins need to have invert_oe set */
@@ -318,18 +301,18 @@ static const struct chv_alternate_function southwest_spi3_altfuncs[] = {
};
static const struct chv_pingroup southwest_groups[] = {
- PIN_GROUP("uart0_grp", southwest_uart0_pins, 2, false),
- PIN_GROUP("uart1_grp", southwest_uart1_pins, 1, false),
- PIN_GROUP("uart2_grp", southwest_uart2_pins, 1, false),
- PIN_GROUP("hda_grp", southwest_hda_pins, 2, false),
- PIN_GROUP("i2c0_grp", southwest_i2c0_pins, 1, true),
- PIN_GROUP("i2c1_grp", southwest_i2c1_pins, 1, true),
- PIN_GROUP("i2c2_grp", southwest_i2c2_pins, 1, true),
- PIN_GROUP("i2c3_grp", southwest_i2c3_pins, 1, true),
- PIN_GROUP("i2c4_grp", southwest_i2c4_pins, 1, true),
- PIN_GROUP("i2c5_grp", southwest_i2c5_pins, 1, true),
- PIN_GROUP("i2c6_grp", southwest_i2c6_pins, 1, true),
- PIN_GROUP("i2c_nfc_grp", southwest_i2c_nfc_pins, 2, true),
+ PIN_GROUP_WITH_ALT("uart0_grp", southwest_uart0_pins, 2, false),
+ PIN_GROUP_WITH_ALT("uart1_grp", southwest_uart1_pins, 1, false),
+ PIN_GROUP_WITH_ALT("uart2_grp", southwest_uart2_pins, 1, false),
+ PIN_GROUP_WITH_ALT("hda_grp", southwest_hda_pins, 2, false),
+ PIN_GROUP_WITH_ALT("i2c0_grp", southwest_i2c0_pins, 1, true),
+ PIN_GROUP_WITH_ALT("i2c1_grp", southwest_i2c1_pins, 1, true),
+ PIN_GROUP_WITH_ALT("i2c2_grp", southwest_i2c2_pins, 1, true),
+ PIN_GROUP_WITH_ALT("i2c3_grp", southwest_i2c3_pins, 1, true),
+ PIN_GROUP_WITH_ALT("i2c4_grp", southwest_i2c4_pins, 1, true),
+ PIN_GROUP_WITH_ALT("i2c5_grp", southwest_i2c5_pins, 1, true),
+ PIN_GROUP_WITH_ALT("i2c6_grp", southwest_i2c6_pins, 1, true),
+ PIN_GROUP_WITH_ALT("i2c_nfc_grp", southwest_i2c_nfc_pins, 2, true),
PIN_GROUP_WITH_OVERRIDE("lpe_grp", southwest_lpe_pins, 1, false,
southwest_lpe_altfuncs),
@@ -356,7 +339,7 @@ static const char * const southwest_spi3_groups[] = { "spi3_grp" };
* Only do pinmuxing for certain LPSS devices for now. Rest of the pins are
* enabled only as GPIOs.
*/
-static const struct chv_function southwest_functions[] = {
+static const struct intel_function southwest_functions[] = {
FUNCTION("uart0", southwest_uart0_groups),
FUNCTION("uart1", southwest_uart1_groups),
FUNCTION("uart2", southwest_uart2_groups),
@@ -610,13 +593,13 @@ static const unsigned southeast_spi1_pins[] = { 60, 61, 62, 64, 66 };
static const unsigned southeast_spi2_pins[] = { 2, 3, 4, 6, 7 };
static const struct chv_pingroup southeast_groups[] = {
- PIN_GROUP("pwm0_grp", southeast_pwm0_pins, 1, false),
- PIN_GROUP("pwm1_grp", southeast_pwm1_pins, 1, false),
- PIN_GROUP("sdmmc1_grp", southeast_sdmmc1_pins, 1, false),
- PIN_GROUP("sdmmc2_grp", southeast_sdmmc2_pins, 1, false),
- PIN_GROUP("sdmmc3_grp", southeast_sdmmc3_pins, 1, false),
- PIN_GROUP("spi1_grp", southeast_spi1_pins, 1, false),
- PIN_GROUP("spi2_grp", southeast_spi2_pins, 4, false),
+ PIN_GROUP_WITH_ALT("pwm0_grp", southeast_pwm0_pins, 1, false),
+ PIN_GROUP_WITH_ALT("pwm1_grp", southeast_pwm1_pins, 1, false),
+ PIN_GROUP_WITH_ALT("sdmmc1_grp", southeast_sdmmc1_pins, 1, false),
+ PIN_GROUP_WITH_ALT("sdmmc2_grp", southeast_sdmmc2_pins, 1, false),
+ PIN_GROUP_WITH_ALT("sdmmc3_grp", southeast_sdmmc3_pins, 1, false),
+ PIN_GROUP_WITH_ALT("spi1_grp", southeast_spi1_pins, 1, false),
+ PIN_GROUP_WITH_ALT("spi2_grp", southeast_spi2_pins, 4, false),
};
static const char * const southeast_pwm0_groups[] = { "pwm0_grp" };
@@ -627,7 +610,7 @@ static const char * const southeast_sdmmc3_groups[] = { "sdmmc3_grp" };
static const char * const southeast_spi1_groups[] = { "spi1_grp" };
static const char * const southeast_spi2_groups[] = { "spi2_grp" };
-static const struct chv_function southeast_functions[] = {
+static const struct intel_function southeast_functions[] = {
FUNCTION("pwm0", southeast_pwm0_groups),
FUNCTION("pwm1", southeast_pwm1_groups),
FUNCTION("sdmmc1", southeast_sdmmc1_groups),
@@ -678,11 +661,11 @@ static const struct chv_community *chv_communities[] = {
*/
static DEFINE_RAW_SPINLOCK(chv_lock);
-static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset,
- unsigned reg)
+static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned int offset,
+ unsigned int reg)
{
- unsigned family_no = offset / MAX_FAMILY_PAD_GPIO_NO;
- unsigned pad_no = offset % MAX_FAMILY_PAD_GPIO_NO;
+ unsigned int family_no = offset / MAX_FAMILY_PAD_GPIO_NO;
+ unsigned int pad_no = offset % MAX_FAMILY_PAD_GPIO_NO;
offset = FAMILY_PAD_REGS_OFF + FAMILY_PAD_REGS_SIZE * family_no +
GPIO_REGS_SIZE * pad_no;
@@ -698,7 +681,7 @@ static void chv_writel(u32 value, void __iomem *reg)
}
/* When Pad Cfg is locked, driver can only change GPIOTXState or GPIORXState */
-static bool chv_pad_locked(struct chv_pinctrl *pctrl, unsigned offset)
+static bool chv_pad_locked(struct chv_pinctrl *pctrl, unsigned int offset)
{
void __iomem *reg;
@@ -714,15 +697,15 @@ static int chv_get_groups_count(struct pinctrl_dev *pctldev)
}
static const char *chv_get_group_name(struct pinctrl_dev *pctldev,
- unsigned group)
+ unsigned int group)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->community->groups[group].name;
}
-static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned group,
- const unsigned **pins, unsigned *npins)
+static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
+ const unsigned int **pins, unsigned int *npins)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -732,7 +715,7 @@ static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned group,
}
static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
- unsigned offset)
+ unsigned int offset)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
unsigned long flags;
@@ -779,7 +762,7 @@ static int chv_get_functions_count(struct pinctrl_dev *pctldev)
}
static const char *chv_get_function_name(struct pinctrl_dev *pctldev,
- unsigned function)
+ unsigned int function)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -787,9 +770,9 @@ static const char *chv_get_function_name(struct pinctrl_dev *pctldev,
}
static int chv_get_function_groups(struct pinctrl_dev *pctldev,
- unsigned function,
+ unsigned int function,
const char * const **groups,
- unsigned * const ngroups)
+ unsigned int * const ngroups)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -798,8 +781,8 @@ static int chv_get_function_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
- unsigned group)
+static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
const struct chv_pingroup *grp;
@@ -865,7 +848,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset)
+ unsigned int offset)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
unsigned long flags;
@@ -925,7 +908,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
static void chv_gpio_disable_free(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset)
+ unsigned int offset)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
unsigned long flags;
@@ -943,7 +926,7 @@ static void chv_gpio_disable_free(struct pinctrl_dev *pctldev,
static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset, bool input)
+ unsigned int offset, bool input)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
@@ -974,7 +957,7 @@ static const struct pinmux_ops chv_pinmux_ops = {
.gpio_set_direction = chv_gpio_set_direction,
};
-static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin,
+static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *config)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -1054,7 +1037,7 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin,
return 0;
}
-static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
+static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned int pin,
enum pin_config_param param, u32 arg)
{
void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
@@ -1141,8 +1124,8 @@ static int chv_config_set_oden(struct chv_pinctrl *pctrl, unsigned int pin,
return 0;
}
-static int chv_config_set(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned long *configs, unsigned nconfigs)
+static int chv_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int nconfigs)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
@@ -1243,7 +1226,7 @@ static struct pinctrl_desc chv_pinctrl_desc = {
.owner = THIS_MODULE,
};
-static int chv_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int chv_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
unsigned long flags;
@@ -1261,7 +1244,7 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(ctrl0 & CHV_PADCTRL0_GPIORXSTATE);
}
-static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void chv_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
unsigned long flags;
@@ -1283,7 +1266,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
raw_spin_unlock_irqrestore(&chv_lock, flags);
}
-static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
u32 ctrl0, direction;
@@ -1299,12 +1282,12 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
return direction != CHV_PADCTRL0_GPIOCFG_GPO;
}
-static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
return pinctrl_gpio_direction_input(chip->base + offset);
}
-static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
chv_gpio_set(chip, offset, value);
@@ -1388,7 +1371,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
if (irqd_get_trigger_type(d) == IRQ_TYPE_NONE) {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
- unsigned pin = irqd_to_hwirq(d);
+ unsigned int pin = irqd_to_hwirq(d);
irq_flow_handler_t handler;
unsigned long flags;
u32 intsel, value;
@@ -1415,11 +1398,11 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
return 0;
}
-static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
+static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
- unsigned pin = irqd_to_hwirq(d);
+ unsigned int pin = irqd_to_hwirq(d);
unsigned long flags;
u32 value;
diff --git a/drivers/pinctrl/intel/pinctrl-denverton.c b/drivers/pinctrl/intel/pinctrl-denverton.c
index f321ab0d76e5..88bc55281b83 100644
--- a/drivers/pinctrl/intel/pinctrl-denverton.c
+++ b/drivers/pinctrl/intel/pinctrl-denverton.c
@@ -9,7 +9,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
+
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-intel.h"
@@ -262,10 +262,7 @@ static int dnv_pinctrl_probe(struct platform_device *pdev)
return intel_pinctrl_probe(pdev, &dnv_soc_data);
}
-static const struct dev_pm_ops dnv_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
- intel_pinctrl_resume)
-};
+static INTEL_PINCTRL_PM_OPS(dnv_pinctrl_pm_ops);
static const struct acpi_device_id dnv_pinctrl_acpi_match[] = {
{ "INTC3000" },
diff --git a/drivers/pinctrl/intel/pinctrl-geminilake.c b/drivers/pinctrl/intel/pinctrl-geminilake.c
index 5c4c96752fc1..67600314454c 100644
--- a/drivers/pinctrl/intel/pinctrl-geminilake.c
+++ b/drivers/pinctrl/intel/pinctrl-geminilake.c
@@ -6,17 +6,17 @@
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
+
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-intel.h"
#define GLK_PAD_OWN 0x020
-#define GLK_HOSTSW_OWN 0x0b0
#define GLK_PADCFGLOCK 0x080
+#define GLK_HOSTSW_OWN 0x0b0
#define GLK_GPI_IE 0x110
#define GLK_COMMUNITY(s, e) \
@@ -58,16 +58,16 @@ static const struct pinctrl_pin_desc glk_northwest_pins[] = {
PINCTRL_PIN(23, "GPIO_23"),
PINCTRL_PIN(24, "GPIO_24"),
PINCTRL_PIN(25, "GPIO_25"),
- PINCTRL_PIN(26, "GPIO_26"),
- PINCTRL_PIN(27, "GPIO_27"),
- PINCTRL_PIN(28, "GPIO_28"),
- PINCTRL_PIN(29, "GPIO_29"),
- PINCTRL_PIN(30, "GPIO_30"),
- PINCTRL_PIN(31, "GPIO_31"),
- PINCTRL_PIN(32, "GPIO_32"),
- PINCTRL_PIN(33, "GPIO_33"),
- PINCTRL_PIN(34, "GPIO_34"),
- PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(26, "ISH_GPIO_0"),
+ PINCTRL_PIN(27, "ISH_GPIO_1"),
+ PINCTRL_PIN(28, "ISH_GPIO_2"),
+ PINCTRL_PIN(29, "ISH_GPIO_3"),
+ PINCTRL_PIN(30, "ISH_GPIO_4"),
+ PINCTRL_PIN(31, "ISH_GPIO_5"),
+ PINCTRL_PIN(32, "ISH_GPIO_6"),
+ PINCTRL_PIN(33, "ISH_GPIO_7"),
+ PINCTRL_PIN(34, "ISH_GPIO_8"),
+ PINCTRL_PIN(35, "ISH_GPIO_9"),
PINCTRL_PIN(36, "GPIO_36"),
PINCTRL_PIN(37, "GPIO_37"),
PINCTRL_PIN(38, "GPIO_38"),
@@ -195,12 +195,12 @@ static const struct pinctrl_pin_desc glk_north_pins[] = {
PINCTRL_PIN(5, "LPSS_SPI_0_FS1"),
PINCTRL_PIN(6, "LPSS_SPI_0_RXD"),
PINCTRL_PIN(7, "LPSS_SPI_0_TXD"),
- PINCTRL_PIN(8, "LPSS_SPI_1_CLK"),
- PINCTRL_PIN(9, "LPSS_SPI_1_FS0"),
- PINCTRL_PIN(10, "LPSS_SPI_1_FS1"),
- PINCTRL_PIN(11, "LPSS_SPI_1_FS2"),
- PINCTRL_PIN(12, "LPSS_SPI_1_RXD"),
- PINCTRL_PIN(13, "LPSS_SPI_1_TXD"),
+ PINCTRL_PIN(8, "LPSS_SPI_2_CLK"),
+ PINCTRL_PIN(9, "LPSS_SPI_2_FS0"),
+ PINCTRL_PIN(10, "LPSS_SPI_2_FS1"),
+ PINCTRL_PIN(11, "LPSS_SPI_2_FS2"),
+ PINCTRL_PIN(12, "LPSS_SPI_2_RXD"),
+ PINCTRL_PIN(13, "LPSS_SPI_2_TXD"),
PINCTRL_PIN(14, "FST_SPI_CS0_B"),
PINCTRL_PIN(15, "FST_SPI_CS1_B"),
PINCTRL_PIN(16, "FST_SPI_MOSI_IO0"),
@@ -215,8 +215,8 @@ static const struct pinctrl_pin_desc glk_north_pins[] = {
PINCTRL_PIN(25, "PMU_SLP_S3_B"),
PINCTRL_PIN(26, "PMU_SLP_S4_B"),
PINCTRL_PIN(27, "SUSPWRDNACK"),
- PINCTRL_PIN(28, "EMMC_PWR_EN_B"),
- PINCTRL_PIN(29, "PMU_AC_PRESENT"),
+ PINCTRL_PIN(28, "EMMC_DNX_PWR_EN_B"),
+ PINCTRL_PIN(29, "GPIO_105"),
PINCTRL_PIN(30, "PMU_BATLOW_B"),
PINCTRL_PIN(31, "PMU_RESETBUTTON_B"),
PINCTRL_PIN(32, "PMU_SUSCLK"),
@@ -449,42 +449,15 @@ static const struct intel_pinctrl_soc_data *glk_pinctrl_soc_data[] = {
};
static const struct acpi_device_id glk_pinctrl_acpi_match[] = {
- { "INT3453" },
+ { "INT3453", (kernel_ulong_t)glk_pinctrl_soc_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, glk_pinctrl_acpi_match);
-static int glk_pinctrl_probe(struct platform_device *pdev)
-{
- const struct intel_pinctrl_soc_data *soc_data = NULL;
- struct acpi_device *adev;
- int i;
-
- adev = ACPI_COMPANION(&pdev->dev);
- if (!adev)
- return -ENODEV;
-
- for (i = 0; glk_pinctrl_soc_data[i]; i++) {
- if (!strcmp(adev->pnp.unique_id,
- glk_pinctrl_soc_data[i]->uid)) {
- soc_data = glk_pinctrl_soc_data[i];
- break;
- }
- }
-
- if (!soc_data)
- return -ENODEV;
-
- return intel_pinctrl_probe(pdev, soc_data);
-}
-
-static const struct dev_pm_ops glk_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
- intel_pinctrl_resume)
-};
+static INTEL_PINCTRL_PM_OPS(glk_pinctrl_pm_ops);
static struct platform_driver glk_pinctrl_driver = {
- .probe = glk_pinctrl_probe,
+ .probe = intel_pinctrl_probe_by_uid,
.driver = {
.name = "geminilake-pinctrl",
.acpi_match_table = glk_pinctrl_acpi_match,
diff --git a/drivers/pinctrl/intel/pinctrl-icelake.c b/drivers/pinctrl/intel/pinctrl-icelake.c
index 630b966ce081..f33a5deafb97 100644
--- a/drivers/pinctrl/intel/pinctrl-icelake.c
+++ b/drivers/pinctrl/intel/pinctrl-icelake.c
@@ -10,7 +10,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
+
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-intel.h"
@@ -408,10 +408,7 @@ static int icl_pinctrl_probe(struct platform_device *pdev)
return intel_pinctrl_probe(pdev, &icllp_soc_data);
}
-static const struct dev_pm_ops icl_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
- intel_pinctrl_resume)
-};
+static INTEL_PINCTRL_PM_OPS(icl_pinctrl_pm_ops);
static const struct acpi_device_id icl_pinctrl_acpi_match[] = {
{ "INT3455" },
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 62b009b27eda..8cda7b535b02 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -7,11 +7,14 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/gpio/driver.h>
#include <linux/log2.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
@@ -115,7 +118,7 @@ struct intel_pinctrl {
#define padgroup_offset(g, p) ((p) - (g)->base)
static struct intel_community *intel_get_community(struct intel_pinctrl *pctrl,
- unsigned pin)
+ unsigned int pin)
{
struct intel_community *community;
int i;
@@ -133,7 +136,7 @@ static struct intel_community *intel_get_community(struct intel_pinctrl *pctrl,
static const struct intel_padgroup *
intel_community_get_padgroup(const struct intel_community *community,
- unsigned pin)
+ unsigned int pin)
{
int i;
@@ -147,11 +150,11 @@ intel_community_get_padgroup(const struct intel_community *community,
return NULL;
}
-static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin,
- unsigned reg)
+static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl,
+ unsigned int pin, unsigned int reg)
{
const struct intel_community *community;
- unsigned padno;
+ unsigned int padno;
size_t nregs;
community = intel_get_community(pctrl, pin);
@@ -167,11 +170,11 @@ static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin,
return community->pad_regs + reg + padno * nregs * 4;
}
-static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
+static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct intel_community *community;
const struct intel_padgroup *padgrp;
- unsigned gpp, offset, gpp_offset;
+ unsigned int gpp, offset, gpp_offset;
void __iomem *padown;
community = intel_get_community(pctrl, pin);
@@ -192,11 +195,11 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
return !(readl(padown) & PADOWN_MASK(gpp_offset));
}
-static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned pin)
+static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct intel_community *community;
const struct intel_padgroup *padgrp;
- unsigned offset, gpp_offset;
+ unsigned int offset, gpp_offset;
void __iomem *hostown;
community = intel_get_community(pctrl, pin);
@@ -216,11 +219,11 @@ static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned pin)
return !(readl(hostown) & BIT(gpp_offset));
}
-static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
+static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned int pin)
{
struct intel_community *community;
const struct intel_padgroup *padgrp;
- unsigned offset, gpp_offset;
+ unsigned int offset, gpp_offset;
u32 value;
community = intel_get_community(pctrl, pin);
@@ -253,7 +256,7 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
return false;
}
-static bool intel_pad_usable(struct intel_pinctrl *pctrl, unsigned pin)
+static bool intel_pad_usable(struct intel_pinctrl *pctrl, unsigned int pin)
{
return intel_pad_owned_by_host(pctrl, pin) &&
!intel_pad_locked(pctrl, pin);
@@ -267,15 +270,15 @@ static int intel_get_groups_count(struct pinctrl_dev *pctldev)
}
static const char *intel_get_group_name(struct pinctrl_dev *pctldev,
- unsigned group)
+ unsigned int group)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->groups[group].name;
}
-static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned group,
- const unsigned **pins, unsigned *npins)
+static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
+ const unsigned int **pins, unsigned int *npins)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -285,7 +288,7 @@ static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned group,
}
static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
- unsigned pin)
+ unsigned int pin)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg;
@@ -344,7 +347,7 @@ static int intel_get_functions_count(struct pinctrl_dev *pctldev)
}
static const char *intel_get_function_name(struct pinctrl_dev *pctldev,
- unsigned function)
+ unsigned int function)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -352,9 +355,9 @@ static const char *intel_get_function_name(struct pinctrl_dev *pctldev,
}
static int intel_get_function_groups(struct pinctrl_dev *pctldev,
- unsigned function,
+ unsigned int function,
const char * const **groups,
- unsigned * const ngroups)
+ unsigned int * const ngroups)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -363,8 +366,8 @@ static int intel_get_function_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
- unsigned group)
+static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
const struct intel_pingroup *grp = &pctrl->soc->groups[group];
@@ -436,7 +439,7 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned pin)
+ unsigned int pin)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
@@ -461,7 +464,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned pin, bool input)
+ unsigned int pin, bool input)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
@@ -486,7 +489,7 @@ static const struct pinmux_ops intel_pinmux_ops = {
.gpio_set_direction = intel_gpio_set_direction,
};
-static int intel_config_get(struct pinctrl_dev *pctldev, unsigned pin,
+static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *config)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -575,11 +578,11 @@ static int intel_config_get(struct pinctrl_dev *pctldev, unsigned pin,
return 0;
}
-static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin,
+static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
unsigned long config)
{
- unsigned param = pinconf_to_config_param(config);
- unsigned arg = pinconf_to_config_argument(config);
+ unsigned int param = pinconf_to_config_param(config);
+ unsigned int arg = pinconf_to_config_argument(config);
const struct intel_community *community;
void __iomem *padcfg1;
unsigned long flags;
@@ -653,8 +656,8 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin,
return ret;
}
-static int intel_config_set_debounce(struct intel_pinctrl *pctrl, unsigned pin,
- unsigned debounce)
+static int intel_config_set_debounce(struct intel_pinctrl *pctrl,
+ unsigned int pin, unsigned int debounce)
{
void __iomem *padcfg0, *padcfg2;
unsigned long flags;
@@ -700,8 +703,8 @@ exit_unlock:
return ret;
}
-static int intel_config_set(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned long *configs, unsigned nconfigs)
+static int intel_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int nconfigs)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
int i, ret;
@@ -747,13 +750,63 @@ static const struct pinctrl_desc intel_pinctrl_desc = {
.owner = THIS_MODULE,
};
-static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
+/**
+ * intel_gpio_to_pin() - Translate from GPIO offset to pin number
+ * @pctrl: Pinctrl structure
+ * @offset: GPIO offset from gpiolib
+ * @community: Community is filled here if not %NULL
+ * @padgrp: Pad group is filled here if not %NULL
+ *
+ * When coming through gpiolib irqchip, the GPIO offset is not
+ * automatically translated to pinctrl pin number. This function can be
+ * used to find out the corresponding pinctrl pin.
+ */
+static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset,
+ const struct intel_community **community,
+ const struct intel_padgroup **padgrp)
+{
+ int i;
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ const struct intel_community *comm = &pctrl->communities[i];
+ int j;
+
+ for (j = 0; j < comm->ngpps; j++) {
+ const struct intel_padgroup *pgrp = &comm->gpps[j];
+
+ if (pgrp->gpio_base < 0)
+ continue;
+
+ if (offset >= pgrp->gpio_base &&
+ offset < pgrp->gpio_base + pgrp->size) {
+ int pin;
+
+ pin = pgrp->base + offset - pgrp->gpio_base;
+ if (community)
+ *community = comm;
+ if (padgrp)
+ *padgrp = pgrp;
+
+ return pin;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int intel_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
void __iomem *reg;
u32 padcfg0;
+ int pin;
+
+ pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
+ if (pin < 0)
+ return -EINVAL;
- reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+ reg = intel_get_padcfg(pctrl, pin, PADCFG0);
if (!reg)
return -EINVAL;
@@ -764,14 +817,20 @@ static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(padcfg0 & PADCFG0_GPIORXSTATE);
}
-static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void intel_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
unsigned long flags;
void __iomem *reg;
u32 padcfg0;
+ int pin;
+
+ pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
+ if (pin < 0)
+ return;
- reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+ reg = intel_get_padcfg(pctrl, pin, PADCFG0);
if (!reg)
return;
@@ -790,8 +849,13 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
void __iomem *reg;
u32 padcfg0;
+ int pin;
+
+ pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
+ if (pin < 0)
+ return -EINVAL;
- reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+ reg = intel_get_padcfg(pctrl, pin, PADCFG0);
if (!reg)
return -EINVAL;
@@ -803,12 +867,12 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
return !!(padcfg0 & PADCFG0_GPIOTXDIS);
}
-static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
return pinctrl_gpio_direction_input(chip->base + offset);
}
-static int intel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+static int intel_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
intel_gpio_set(chip, offset, value);
@@ -827,81 +891,6 @@ static const struct gpio_chip intel_gpio_chip = {
.set_config = gpiochip_generic_config,
};
-/**
- * intel_gpio_to_pin() - Translate from GPIO offset to pin number
- * @pctrl: Pinctrl structure
- * @offset: GPIO offset from gpiolib
- * @commmunity: Community is filled here if not %NULL
- * @padgrp: Pad group is filled here if not %NULL
- *
- * When coming through gpiolib irqchip, the GPIO offset is not
- * automatically translated to pinctrl pin number. This function can be
- * used to find out the corresponding pinctrl pin.
- */
-static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
- const struct intel_community **community,
- const struct intel_padgroup **padgrp)
-{
- int i;
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- const struct intel_community *comm = &pctrl->communities[i];
- int j;
-
- for (j = 0; j < comm->ngpps; j++) {
- const struct intel_padgroup *pgrp = &comm->gpps[j];
-
- if (pgrp->gpio_base < 0)
- continue;
-
- if (offset >= pgrp->gpio_base &&
- offset < pgrp->gpio_base + pgrp->size) {
- int pin;
-
- pin = pgrp->base + offset - pgrp->gpio_base;
- if (community)
- *community = comm;
- if (padgrp)
- *padgrp = pgrp;
-
- return pin;
- }
- }
- }
-
- return -EINVAL;
-}
-
-static int intel_gpio_irq_reqres(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- int pin;
- int ret;
-
- pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
- if (pin >= 0) {
- ret = gpiochip_lock_as_irq(gc, pin);
- if (ret) {
- dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n",
- pin);
- return ret;
- }
- }
- return 0;
-}
-
-static void intel_gpio_irq_relres(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- int pin;
-
- pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
- if (pin >= 0)
- gpiochip_unlock_as_irq(gc, pin);
-}
-
static void intel_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -912,7 +901,7 @@ static void intel_gpio_irq_ack(struct irq_data *d)
pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), &community, &padgrp);
if (pin >= 0) {
- unsigned gpp, gpp_offset, is_offset;
+ unsigned int gpp, gpp_offset, is_offset;
gpp = padgrp->reg_num;
gpp_offset = padgroup_offset(padgrp, pin);
@@ -934,7 +923,7 @@ static void intel_gpio_irq_enable(struct irq_data *d)
pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), &community, &padgrp);
if (pin >= 0) {
- unsigned gpp, gpp_offset, is_offset;
+ unsigned int gpp, gpp_offset, is_offset;
unsigned long flags;
u32 value;
@@ -963,7 +952,7 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), &community, &padgrp);
if (pin >= 0) {
- unsigned gpp, gpp_offset;
+ unsigned int gpp, gpp_offset;
unsigned long flags;
void __iomem *reg;
u32 value;
@@ -994,11 +983,11 @@ static void intel_gpio_irq_unmask(struct irq_data *d)
intel_gpio_irq_mask_unmask(d, false);
}
-static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
+static int intel_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- unsigned pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
+ unsigned int pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
unsigned long flags;
void __iomem *reg;
u32 value;
@@ -1055,7 +1044,7 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- unsigned pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
+ unsigned int pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
if (on)
enable_irq_wake(pctrl->irq);
@@ -1117,8 +1106,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
static struct irq_chip intel_gpio_irqchip = {
.name = "intel-gpio",
- .irq_request_resources = intel_gpio_irq_reqres,
- .irq_release_resources = intel_gpio_irq_relres,
.irq_enable = intel_gpio_irq_enable,
.irq_ack = intel_gpio_irq_ack,
.irq_mask = intel_gpio_irq_mask,
@@ -1152,7 +1139,7 @@ static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
static unsigned intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
{
const struct intel_community *community;
- unsigned ngpio = 0;
+ unsigned int ngpio = 0;
int i, j;
for (i = 0; i < pctrl->ncommunities; i++) {
@@ -1228,8 +1215,8 @@ static int intel_pinctrl_add_padgroups(struct intel_pinctrl *pctrl,
struct intel_community *community)
{
struct intel_padgroup *gpps;
- unsigned npins = community->npins;
- unsigned padown_num = 0;
+ unsigned int npins = community->npins;
+ unsigned int padown_num = 0;
size_t ngpps, i;
if (community->gpps)
@@ -1245,7 +1232,7 @@ static int intel_pinctrl_add_padgroups(struct intel_pinctrl *pctrl,
if (community->gpps) {
gpps[i] = community->gpps[i];
} else {
- unsigned gpp_size = community->gpp_size;
+ unsigned int gpp_size = community->gpp_size;
gpps[i].reg_num = i;
gpps[i].base = community->pin_base + i * gpp_size;
@@ -1415,8 +1402,52 @@ int intel_pinctrl_probe(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(intel_pinctrl_probe);
+int intel_pinctrl_probe_by_hid(struct platform_device *pdev)
+{
+ const struct intel_pinctrl_soc_data *data;
+
+ data = device_get_match_data(&pdev->dev);
+ return intel_pinctrl_probe(pdev, data);
+}
+EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_hid);
+
+int intel_pinctrl_probe_by_uid(struct platform_device *pdev)
+{
+ const struct intel_pinctrl_soc_data *data = NULL;
+ const struct intel_pinctrl_soc_data **table;
+ struct acpi_device *adev;
+ unsigned int i;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (adev) {
+ const void *match = device_get_match_data(&pdev->dev);
+
+ table = (const struct intel_pinctrl_soc_data **)match;
+ for (i = 0; table[i]; i++) {
+ if (!strcmp(adev->pnp.unique_id, table[i]->uid)) {
+ data = table[i];
+ break;
+ }
+ }
+ } else {
+ const struct platform_device_id *id;
+
+ id = platform_get_device_id(pdev);
+ if (!id)
+ return -ENODEV;
+
+ table = (const struct intel_pinctrl_soc_data **)id->driver_data;
+ data = table[pdev->id];
+ }
+ if (!data)
+ return -ENODEV;
+
+ return intel_pinctrl_probe(pdev, data);
+}
+EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_uid);
+
#ifdef CONFIG_PM_SLEEP
-static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin)
+static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
@@ -1467,7 +1498,7 @@ int intel_pinctrl_suspend(struct device *dev)
for (i = 0; i < pctrl->ncommunities; i++) {
struct intel_community *community = &pctrl->communities[i];
void __iomem *base;
- unsigned gpp;
+ unsigned int gpp;
base = community->regs + community->ie_offset;
for (gpp = 0; gpp < community->ngpps; gpp++)
@@ -1485,7 +1516,7 @@ static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
for (i = 0; i < pctrl->ncommunities; i++) {
const struct intel_community *community;
void __iomem *base;
- unsigned gpp;
+ unsigned int gpp;
community = &pctrl->communities[i];
base = community->regs;
@@ -1549,7 +1580,7 @@ int intel_pinctrl_resume(struct device *dev)
for (i = 0; i < pctrl->ncommunities; i++) {
struct intel_community *community = &pctrl->communities[i];
void __iomem *base;
- unsigned gpp;
+ unsigned int gpp;
base = community->regs + community->ie_offset;
for (gpp = 0; gpp < community->ngpps; gpp++) {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 1785abf157e4..9fb4645f3c55 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -10,6 +10,8 @@
#ifndef PINCTRL_INTEL_H
#define PINCTRL_INTEL_H
+#include <linux/pm.h>
+
struct pinctrl_pin_desc;
struct platform_device;
struct device;
@@ -25,10 +27,10 @@ struct device;
*/
struct intel_pingroup {
const char *name;
- const unsigned *pins;
+ const unsigned int *pins;
size_t npins;
unsigned short mode;
- const unsigned *modes;
+ const unsigned int *modes;
};
/**
@@ -56,11 +58,11 @@ struct intel_function {
* to specify them.
*/
struct intel_padgroup {
- unsigned reg_num;
- unsigned base;
- unsigned size;
+ unsigned int reg_num;
+ unsigned int base;
+ unsigned int size;
int gpio_base;
- unsigned padown_num;
+ unsigned int padown_num;
};
/**
@@ -96,17 +98,17 @@ struct intel_padgroup {
* pass custom @gpps and @ngpps instead.
*/
struct intel_community {
- unsigned barno;
- unsigned padown_offset;
- unsigned padcfglock_offset;
- unsigned hostown_offset;
- unsigned is_offset;
- unsigned ie_offset;
- unsigned pin_base;
- unsigned gpp_size;
- unsigned gpp_num_padown_regs;
+ unsigned int barno;
+ unsigned int padown_offset;
+ unsigned int padcfglock_offset;
+ unsigned int hostown_offset;
+ unsigned int is_offset;
+ unsigned int ie_offset;
+ unsigned int pin_base;
+ unsigned int gpp_size;
+ unsigned int gpp_num_padown_regs;
size_t npins;
- unsigned features;
+ unsigned int features;
const struct intel_padgroup *gpps;
size_t ngpps;
/* Reserved for the core driver */
@@ -173,9 +175,17 @@ struct intel_pinctrl_soc_data {
int intel_pinctrl_probe(struct platform_device *pdev,
const struct intel_pinctrl_soc_data *soc_data);
+int intel_pinctrl_probe_by_hid(struct platform_device *pdev);
+int intel_pinctrl_probe_by_uid(struct platform_device *pdev);
+
#ifdef CONFIG_PM_SLEEP
int intel_pinctrl_suspend(struct device *dev);
int intel_pinctrl_resume(struct device *dev);
#endif
+#define INTEL_PINCTRL_PM_OPS(_name) \
+const struct dev_pm_ops _name = { \
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend, intel_pinctrl_resume) \
+}
+
#endif /* PINCTRL_INTEL_H */
diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
index 99894647eddd..70ea9c518460 100644
--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
+++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
@@ -9,7 +9,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
+
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-intel.h"
@@ -313,10 +313,7 @@ static int lbg_pinctrl_probe(struct platform_device *pdev)
return intel_pinctrl_probe(pdev, &lbg_soc_data);
}
-static const struct dev_pm_ops lbg_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
- intel_pinctrl_resume)
-};
+static INTEL_PINCTRL_PM_OPS(lbg_pinctrl_pm_ops);
static const struct acpi_device_id lbg_pinctrl_acpi_match[] = {
{ "INT3536" },
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 4fa69f988c7b..2e9988dac55f 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -476,6 +476,34 @@ static void __iomem *mrfld_get_bufcfg(struct mrfld_pinctrl *mp, unsigned int pin
return family->regs + BUFCFG_OFFSET + bufno * 4;
}
+static int mrfld_read_bufcfg(struct mrfld_pinctrl *mp, unsigned int pin, u32 *value)
+{
+ void __iomem *bufcfg;
+
+ if (!mrfld_buf_available(mp, pin))
+ return -EBUSY;
+
+ bufcfg = mrfld_get_bufcfg(mp, pin);
+ *value = readl(bufcfg);
+
+ return 0;
+}
+
+static void mrfld_update_bufcfg(struct mrfld_pinctrl *mp, unsigned int pin,
+ u32 bits, u32 mask)
+{
+ void __iomem *bufcfg;
+ u32 value;
+
+ bufcfg = mrfld_get_bufcfg(mp, pin);
+ value = readl(bufcfg);
+
+ value &= ~mask;
+ value |= bits & mask;
+
+ writel(value, bufcfg);
+}
+
static int mrfld_get_groups_count(struct pinctrl_dev *pctldev)
{
struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
@@ -505,17 +533,15 @@ static void mrfld_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
unsigned int pin)
{
struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- void __iomem *bufcfg;
u32 value, mode;
+ int ret;
- if (!mrfld_buf_available(mp, pin)) {
+ ret = mrfld_read_bufcfg(mp, pin, &value);
+ if (ret) {
seq_puts(s, "not available");
return;
}
- bufcfg = mrfld_get_bufcfg(mp, pin);
- value = readl(bufcfg);
-
mode = (value & BUFCFG_PINMODE_MASK) >> BUFCFG_PINMODE_SHIFT;
if (!mode)
seq_puts(s, "GPIO ");
@@ -559,21 +585,6 @@ static int mrfld_get_function_groups(struct pinctrl_dev *pctldev,
return 0;
}
-static void mrfld_update_bufcfg(struct mrfld_pinctrl *mp, unsigned int pin,
- u32 bits, u32 mask)
-{
- void __iomem *bufcfg;
- u32 value;
-
- bufcfg = mrfld_get_bufcfg(mp, pin);
- value = readl(bufcfg);
-
- value &= ~mask;
- value |= bits & mask;
-
- writel(value, bufcfg);
-}
-
static int mrfld_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int function,
unsigned int group)
@@ -637,11 +648,12 @@ static int mrfld_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
enum pin_config_param param = pinconf_to_config_param(*config);
u32 value, term;
u16 arg = 0;
+ int ret;
- if (!mrfld_buf_available(mp, pin))
+ ret = mrfld_read_bufcfg(mp, pin, &value);
+ if (ret)
return -ENOTSUPP;
- value = readl(mrfld_get_bufcfg(mp, pin));
term = (value & BUFCFG_PUPD_VAL_MASK) >> BUFCFG_PUPD_VAL_SHIFT;
switch (param) {
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 7984392104fe..38a7c811ff58 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -7,10 +7,10 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
+
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-intel.h"
@@ -593,21 +593,10 @@ MODULE_DEVICE_TABLE(acpi, spt_pinctrl_acpi_match);
static int spt_pinctrl_probe(struct platform_device *pdev)
{
- const struct intel_pinctrl_soc_data *soc_data;
- const struct acpi_device_id *id;
-
- id = acpi_match_device(spt_pinctrl_acpi_match, &pdev->dev);
- if (!id || !id->driver_data)
- return -ENODEV;
-
- soc_data = (const struct intel_pinctrl_soc_data *)id->driver_data;
- return intel_pinctrl_probe(pdev, soc_data);
+ return intel_pinctrl_probe_by_hid(pdev);
}
-static const struct dev_pm_ops spt_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
- intel_pinctrl_resume)
-};
+static INTEL_PINCTRL_PM_OPS(spt_pinctrl_pm_ops);
static struct platform_driver spt_pinctrl_driver = {
.probe = spt_pinctrl_probe,
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 9905dc672f6b..9d142e1da567 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -3,7 +3,8 @@ menu "MediaTek pinctrl drivers"
config EINT_MTK
bool "MediaTek External Interrupt Support"
- depends on PINCTRL_MTK || PINCTRL_MT7622 || COMPILE_TEST
+ depends on PINCTRL_MTK || PINCTRL_MTK_MOORE || COMPILE_TEST
+ select GPIOLIB
select IRQ_DOMAIN
config PINCTRL_MTK
@@ -15,6 +16,24 @@ config PINCTRL_MTK
select EINT_MTK
select OF_GPIO
+config PINCTRL_MTK_MOORE
+ bool "MediaTek Moore Core that implements generic binding"
+ depends on OF
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GPIOLIB
+ select OF_GPIO
+
+config PINCTRL_MTK_PARIS
+ bool "MediaTek Paris Core that implements vendor binding"
+ depends on OF
+ select PINMUX
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select EINT_MTK
+ select OF_GPIO
+
# For ARMv7 SoCs
config PINCTRL_MT2701
bool "Mediatek MT2701 pin control"
@@ -23,6 +42,12 @@ config PINCTRL_MT2701
default MACH_MT2701
select PINCTRL_MTK
+config PINCTRL_MT7623
+ bool "Mediatek MT7623 pin control with generic binding"
+ depends on MACH_MT7623 || COMPILE_TEST
+ depends on PINCTRL_MTK_MOORE
+ default y
+
config PINCTRL_MT8135
bool "Mediatek MT8135 pin control"
depends on MACH_MT8135 || COMPILE_TEST
@@ -45,15 +70,18 @@ config PINCTRL_MT2712
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK
+config PINCTRL_MT6765
+ bool "Mediatek MT6765 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_PARIS
+
config PINCTRL_MT7622
bool "MediaTek MT7622 pin control"
- depends on OF
depends on ARM64 || COMPILE_TEST
- select GENERIC_PINCONF
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
- select GPIOLIB
- select OF_GPIO
+ depends on PINCTRL_MTK_MOORE
+ default y
config PINCTRL_MT8173
bool "Mediatek MT8173 pin control"
@@ -62,6 +90,13 @@ config PINCTRL_MT8173
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK
+config PINCTRL_MT8183
+ bool "Mediatek MT8183 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_PARIS
+
# For PMIC
config PINCTRL_MT6397
bool "Mediatek MT6397 pin control"
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 3de7156df345..70d800054f69 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -2,12 +2,17 @@
# Core
obj-$(CONFIG_EINT_MTK) += mtk-eint.o
obj-$(CONFIG_PINCTRL_MTK) += pinctrl-mtk-common.o
+obj-$(CONFIG_PINCTRL_MTK_MOORE) += pinctrl-moore.o pinctrl-mtk-common-v2.o
+obj-$(CONFIG_PINCTRL_MTK_PARIS) += pinctrl-paris.o pinctrl-mtk-common-v2.o
# SoC Drivers
obj-$(CONFIG_PINCTRL_MT2701) += pinctrl-mt2701.o
obj-$(CONFIG_PINCTRL_MT2712) += pinctrl-mt2712.o
obj-$(CONFIG_PINCTRL_MT8135) += pinctrl-mt8135.o
obj-$(CONFIG_PINCTRL_MT8127) += pinctrl-mt8127.o
+obj-$(CONFIG_PINCTRL_MT6765) += pinctrl-mt6765.o
obj-$(CONFIG_PINCTRL_MT7622) += pinctrl-mt7622.o
+obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
+obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
obj-$(CONFIG_PINCTRL_MT6397) += pinctrl-mt6397.o
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index a613e546717a..f464f8cd274b 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -11,7 +11,7 @@
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
diff --git a/drivers/pinctrl/mediatek/mtk-eint.h b/drivers/pinctrl/mediatek/mtk-eint.h
index c286a9b940f2..48468d0fae68 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.h
+++ b/drivers/pinctrl/mediatek/mtk-eint.h
@@ -92,13 +92,13 @@ static inline int mtk_eint_do_resume(struct mtk_eint *eint)
return -EOPNOTSUPP;
}
-int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_n,
+static inline int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_n,
unsigned int debounce)
{
return -EOPNOTSUPP;
}
-int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
+static inline int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
new file mode 100644
index 000000000000..3133ec0f2e67
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek Pinctrl Moore Driver, which implement the generic dt-binding
+ * pinctrl-bindings.txt for MediaTek SoC.
+ *
+ * Copyright (C) 2017-2018 MediaTek Inc.
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/gpio/driver.h>
+#include "pinctrl-moore.h"
+
+#define PINCTRL_PINCTRL_DEV KBUILD_MODNAME
+
+/* Custom pinconf parameters */
+#define MTK_PIN_CONFIG_TDSEL (PIN_CONFIG_END + 1)
+#define MTK_PIN_CONFIG_RDSEL (PIN_CONFIG_END + 2)
+#define MTK_PIN_CONFIG_PU_ADV (PIN_CONFIG_END + 3)
+#define MTK_PIN_CONFIG_PD_ADV (PIN_CONFIG_END + 4)
+
+static const struct pinconf_generic_params mtk_custom_bindings[] = {
+ {"mediatek,tdsel", MTK_PIN_CONFIG_TDSEL, 0},
+ {"mediatek,rdsel", MTK_PIN_CONFIG_RDSEL, 0},
+ {"mediatek,pull-up-adv", MTK_PIN_CONFIG_PU_ADV, 1},
+ {"mediatek,pull-down-adv", MTK_PIN_CONFIG_PD_ADV, 1},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item mtk_conf_items[] = {
+ PCONFDUMP(MTK_PIN_CONFIG_TDSEL, "tdsel", NULL, true),
+ PCONFDUMP(MTK_PIN_CONFIG_RDSEL, "rdsel", NULL, true),
+ PCONFDUMP(MTK_PIN_CONFIG_PU_ADV, "pu-adv", NULL, true),
+ PCONFDUMP(MTK_PIN_CONFIG_PD_ADV, "pd-adv", NULL, true),
+};
+#endif
+
+static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned int group)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ struct function_desc *func;
+ struct group_desc *grp;
+ int i;
+
+ func = pinmux_generic_get_function(pctldev, selector);
+ if (!func)
+ return -EINVAL;
+
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ return -EINVAL;
+
+ dev_dbg(pctldev->dev, "enable function %s group %s\n",
+ func->name, grp->name);
+
+ for (i = 0; i < grp->num_pins; i++) {
+ const struct mtk_pin_desc *desc;
+ int *pin_modes = grp->data;
+ int pin = grp->pins[i];
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+ mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE,
+ pin_modes[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ const struct mtk_pin_desc *desc;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE,
+ hw->soc->gpio_m);
+}
+
+static int mtk_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin, bool input)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ const struct mtk_pin_desc *desc;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+ /* hardware would take 0 as input direction */
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR, !input);
+}
+
+static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ u32 param = pinconf_to_config_param(*config);
+ int val, val2, err, reg, ret = 1;
+ const struct mtk_pin_desc *desc;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (hw->soc->bias_disable_get) {
+ err = hw->soc->bias_disable_get(hw, desc, &ret);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (hw->soc->bias_get) {
+ err = hw->soc->bias_get(hw, desc, 1, &ret);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (hw->soc->bias_get) {
+ err = hw->soc->bias_get(hw, desc, 0, &ret);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &val);
+ if (err)
+ return err;
+
+ if (!val)
+ return -EINVAL;
+
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &val);
+ if (err)
+ return err;
+
+ /* HW takes input mode as zero; output mode as non-zero */
+ if ((val && param == PIN_CONFIG_INPUT_ENABLE) ||
+ (!val && param == PIN_CONFIG_OUTPUT_ENABLE))
+ return -EINVAL;
+
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &val);
+ if (err)
+ return err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &val2);
+ if (err)
+ return err;
+
+ if (val || !val2)
+ return -EINVAL;
+
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ if (hw->soc->drive_get) {
+ err = hw->soc->drive_get(hw, desc, &ret);
+ if (err)
+ return err;
+ } else {
+ err = -ENOTSUPP;
+ }
+ break;
+ case MTK_PIN_CONFIG_TDSEL:
+ case MTK_PIN_CONFIG_RDSEL:
+ reg = (param == MTK_PIN_CONFIG_TDSEL) ?
+ PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
+
+ err = mtk_hw_get_value(hw, desc, reg, &val);
+ if (err)
+ return err;
+
+ ret = val;
+
+ break;
+ case MTK_PIN_CONFIG_PU_ADV:
+ case MTK_PIN_CONFIG_PD_ADV:
+ if (hw->soc->adv_pull_get) {
+ bool pullup;
+
+ pullup = param == MTK_PIN_CONFIG_PU_ADV;
+ err = hw->soc->adv_pull_get(hw, desc, pullup, &ret);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, ret);
+
+ return 0;
+}
+
+static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ const struct mtk_pin_desc *desc;
+ u32 reg, param, arg;
+ int cfg, err = 0;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+ for (cfg = 0; cfg < num_configs; cfg++) {
+ param = pinconf_to_config_param(configs[cfg]);
+ arg = pinconf_to_config_argument(configs[cfg]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (hw->soc->bias_disable_set) {
+ err = hw->soc->bias_disable_set(hw, desc);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (hw->soc->bias_set) {
+ err = hw->soc->bias_set(hw, desc, 1);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (hw->soc->bias_set) {
+ err = hw->soc->bias_set(hw, desc, 0);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
+ MTK_DISABLE);
+ if (err)
+ goto err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+ MTK_OUTPUT);
+ if (err)
+ goto err;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+
+ if (hw->soc->ies_present) {
+ mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES,
+ MTK_ENABLE);
+ }
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+ MTK_INPUT);
+ if (err)
+ goto err;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SR,
+ arg);
+ if (err)
+ goto err;
+
+ break;
+ case PIN_CONFIG_OUTPUT:
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+ MTK_OUTPUT);
+ if (err)
+ goto err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO,
+ arg);
+ if (err)
+ goto err;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ /* arg = 1: Input mode & SMT enable ;
+ * arg = 0: Output mode & SMT disable
+ */
+ arg = arg ? 2 : 1;
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+ arg & 1);
+ if (err)
+ goto err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
+ !!(arg & 2));
+ if (err)
+ goto err;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ if (hw->soc->drive_set) {
+ err = hw->soc->drive_set(hw, desc, arg);
+ if (err)
+ return err;
+ } else {
+ err = -ENOTSUPP;
+ }
+ break;
+ case MTK_PIN_CONFIG_TDSEL:
+ case MTK_PIN_CONFIG_RDSEL:
+ reg = (param == MTK_PIN_CONFIG_TDSEL) ?
+ PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
+
+ err = mtk_hw_set_value(hw, desc, reg, arg);
+ if (err)
+ goto err;
+ break;
+ case MTK_PIN_CONFIG_PU_ADV:
+ case MTK_PIN_CONFIG_PD_ADV:
+ if (hw->soc->adv_pull_set) {
+ bool pullup;
+
+ pullup = param == MTK_PIN_CONFIG_PU_ADV;
+ err = hw->soc->adv_pull_set(hw, desc, pullup,
+ arg);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ default:
+ err = -ENOTSUPP;
+ }
+ }
+err:
+ return err;
+}
+
+static int mtk_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *config)
+{
+ const unsigned int *pins;
+ unsigned int i, npins, old = 0;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ if (mtk_pinconf_get(pctldev, pins[i], config))
+ return -ENOTSUPP;
+
+ /* configs do not match between two pins */
+ if (i && old != *config)
+ return -ENOTSUPP;
+
+ old = *config;
+ }
+
+ return 0;
+}
+
+static int mtk_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int i, npins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = mtk_pinconf_set(pctldev, pins[i], configs, num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinctrl_ops mtk_pctlops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static const struct pinmux_ops mtk_pmxops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = mtk_pinmux_set_mux,
+ .gpio_request_enable = mtk_pinmux_gpio_request_enable,
+ .gpio_set_direction = mtk_pinmux_gpio_set_direction,
+ .strict = true,
+};
+
+static const struct pinconf_ops mtk_confops = {
+ .is_generic = true,
+ .pin_config_get = mtk_pinconf_get,
+ .pin_config_set = mtk_pinconf_set,
+ .pin_config_group_get = mtk_pinconf_group_get,
+ .pin_config_group_set = mtk_pinconf_group_set,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static struct pinctrl_desc mtk_desc = {
+ .name = PINCTRL_PINCTRL_DEV,
+ .pctlops = &mtk_pctlops,
+ .pmxops = &mtk_pmxops,
+ .confops = &mtk_confops,
+ .owner = THIS_MODULE,
+};
+
+static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ const struct mtk_pin_desc *desc;
+ int value, err;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DI, &value);
+ if (err)
+ return err;
+
+ return !!value;
+}
+
+static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ const struct mtk_pin_desc *desc;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+
+ mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
+}
+
+static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
+{
+ return pinctrl_gpio_direction_input(chip->base + gpio);
+}
+
+static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
+ int value)
+{
+ mtk_gpio_set(chip, gpio, value);
+
+ return pinctrl_gpio_direction_output(chip->base + gpio);
+}
+
+static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ const struct mtk_pin_desc *desc;
+
+ if (!hw->eint)
+ return -ENOTSUPP;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[offset];
+
+ if (desc->eint.eint_n == (u16)EINT_NA)
+ return -ENOTSUPP;
+
+ return mtk_eint_find_irq(hw->eint, desc->eint.eint_n);
+}
+
+static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ const struct mtk_pin_desc *desc;
+ u32 debounce;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[offset];
+
+ if (!hw->eint ||
+ pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE ||
+ desc->eint.eint_n == (u16)EINT_NA)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+
+ return mtk_eint_set_debounce(hw->eint, desc->eint.eint_n, debounce);
+}
+
+static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
+{
+ struct gpio_chip *chip = &hw->chip;
+ int ret;
+
+ chip->label = PINCTRL_PINCTRL_DEV;
+ chip->parent = hw->dev;
+ chip->request = gpiochip_generic_request;
+ chip->free = gpiochip_generic_free;
+ chip->direction_input = mtk_gpio_direction_input;
+ chip->direction_output = mtk_gpio_direction_output;
+ chip->get = mtk_gpio_get;
+ chip->set = mtk_gpio_set;
+ chip->to_irq = mtk_gpio_to_irq,
+ chip->set_config = mtk_gpio_set_config,
+ chip->base = -1;
+ chip->ngpio = hw->soc->npins;
+ chip->of_node = np;
+ chip->of_gpio_n_cells = 2;
+
+ ret = gpiochip_add_data(chip, hw);
+ if (ret < 0)
+ return ret;
+
+ /* Just for backward compatible for these old pinctrl nodes without
+ * "gpio-ranges" property. Otherwise, called directly from a
+ * DeviceTree-supported pinctrl driver is DEPRECATED.
+ * Please see Section 2.1 of
+ * Documentation/devicetree/bindings/gpio/gpio.txt on how to
+ * bind pinctrl and gpio drivers via the "gpio-ranges" property.
+ */
+ if (!of_find_property(np, "gpio-ranges", NULL)) {
+ ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
+ chip->ngpio);
+ if (ret < 0) {
+ gpiochip_remove(chip);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mtk_build_groups(struct mtk_pinctrl *hw)
+{
+ int err, i;
+
+ for (i = 0; i < hw->soc->ngrps; i++) {
+ const struct group_desc *group = hw->soc->grps + i;
+
+ err = pinctrl_generic_add_group(hw->pctrl, group->name,
+ group->pins, group->num_pins,
+ group->data);
+ if (err < 0) {
+ dev_err(hw->dev, "Failed to register group %s\n",
+ group->name);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mtk_build_functions(struct mtk_pinctrl *hw)
+{
+ int i, err;
+
+ for (i = 0; i < hw->soc->nfuncs ; i++) {
+ const struct function_desc *func = hw->soc->funcs + i;
+
+ err = pinmux_generic_add_function(hw->pctrl, func->name,
+ func->group_names,
+ func->num_group_names,
+ func->data);
+ if (err < 0) {
+ dev_err(hw->dev, "Failed to register function %s\n",
+ func->name);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mtk_moore_pinctrl_probe(struct platform_device *pdev,
+ const struct mtk_pin_soc *soc)
+{
+ struct pinctrl_pin_desc *pins;
+ struct resource *res;
+ struct mtk_pinctrl *hw;
+ int err, i;
+
+ hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ hw->soc = soc;
+ hw->dev = &pdev->dev;
+
+ if (!hw->soc->nbase_names) {
+ dev_err(&pdev->dev,
+ "SoC should be assigned at least one register base\n");
+ return -EINVAL;
+ }
+
+ hw->base = devm_kmalloc_array(&pdev->dev, hw->soc->nbase_names,
+ sizeof(*hw->base), GFP_KERNEL);
+ if (!hw->base)
+ return -ENOMEM;
+
+ for (i = 0; i < hw->soc->nbase_names; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ hw->soc->base_names[i]);
+ if (!res) {
+ dev_err(&pdev->dev, "missing IO resource\n");
+ return -ENXIO;
+ }
+
+ hw->base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->base[i]))
+ return PTR_ERR(hw->base[i]);
+ }
+
+ hw->nbase = hw->soc->nbase_names;
+
+ /* Copy from internal struct mtk_pin_desc to register to the core */
+ pins = devm_kmalloc_array(&pdev->dev, hw->soc->npins, sizeof(*pins),
+ GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < hw->soc->npins; i++) {
+ pins[i].number = hw->soc->pins[i].number;
+ pins[i].name = hw->soc->pins[i].name;
+ }
+
+ /* Setup pins descriptions per SoC types */
+ mtk_desc.pins = (const struct pinctrl_pin_desc *)pins;
+ mtk_desc.npins = hw->soc->npins;
+ mtk_desc.num_custom_params = ARRAY_SIZE(mtk_custom_bindings);
+ mtk_desc.custom_params = mtk_custom_bindings;
+#ifdef CONFIG_DEBUG_FS
+ mtk_desc.custom_conf_items = mtk_conf_items;
+#endif
+
+ err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw,
+ &hw->pctrl);
+ if (err)
+ return err;
+
+ /* Setup groups descriptions per SoC types */
+ err = mtk_build_groups(hw);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to build groups\n");
+ return err;
+ }
+
+ /* Setup functions descriptions per SoC types */
+ err = mtk_build_functions(hw);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to build functions\n");
+ return err;
+ }
+
+ /* For able to make pinctrl_claim_hogs, we must not enable pinctrl
+ * until all groups and functions are being added one.
+ */
+ err = pinctrl_enable(hw->pctrl);
+ if (err)
+ return err;
+
+ err = mtk_build_eint(hw, pdev);
+ if (err)
+ dev_warn(&pdev->dev,
+ "Failed to add EINT, but pinctrl still can work\n");
+
+ /* Build gpiochip should be after pinctrl_enable is done */
+ err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hw);
+
+ return 0;
+}
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.h b/drivers/pinctrl/mediatek/pinctrl-moore.h
new file mode 100644
index 000000000000..e1b4b82b9d3d
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017-2018 MediaTek Inc.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+#ifndef __PINCTRL_MOORE_H
+#define __PINCTRL_MOORE_H
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+#include "mtk-eint.h"
+#include "pinctrl-mtk-common-v2.h"
+
+#define MTK_RANGE(_a) { .range = (_a), .nranges = ARRAY_SIZE(_a), }
+
+#define MTK_PIN(_number, _name, _eint_m, _eint_n, _drv_n) { \
+ .number = _number, \
+ .name = _name, \
+ .eint = { \
+ .eint_m = _eint_m, \
+ .eint_n = _eint_n, \
+ }, \
+ .drv_n = _drv_n, \
+ .funcs = NULL, \
+ }
+
+#define PINCTRL_PIN_GROUP(name, id) \
+ { \
+ name, \
+ id##_pins, \
+ ARRAY_SIZE(id##_pins), \
+ id##_funcs, \
+ }
+
+int mtk_moore_pinctrl_probe(struct platform_device *pdev,
+ const struct mtk_pin_soc *soc);
+
+#endif /* __PINCTRL_MOORE_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6765.c b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
new file mode 100644
index 000000000000..32451e8693be
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
@@ -0,0 +1,1108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: ZH Chen <zh.chen@mediatek.com>
+ *
+ */
+
+#include "pinctrl-mtk-mt6765.h"
+#include "pinctrl-paris.h"
+
+/* MT6765 have multiple bases to program pin configuration listed as the below:
+ * iocfg[0]:0x10005000, iocfg[1]:0x10002C00, iocfg[2]:0x10002800,
+ * iocfg[3]:0x10002A00, iocfg[4]:0x10002000, iocfg[5]:0x10002200,
+ * iocfg[6]:0x10002500, iocfg[7]:0x10002600.
+ * _i_base could be used to indicate what base the pin should be mapped into.
+ */
+
+#define PIN_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 0)
+
+#define PINS_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 1)
+
+static const struct mtk_pin_field_calc mt6765_pin_mode_range[] = {
+ PIN_FIELD(0, 202, 0x300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_dir_range[] = {
+ PIN_FIELD(0, 202, 0x0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_di_range[] = {
+ PIN_FIELD(0, 202, 0x200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_do_range[] = {
+ PIN_FIELD(0, 202, 0x100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_smt_range[] = {
+ PINS_FIELD_BASE(0, 3, 2, 0x00b0, 0x10, 4, 1),
+ PINS_FIELD_BASE(4, 7, 2, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(8, 8, 3, 0x0080, 0x10, 3, 1),
+ PINS_FIELD_BASE(9, 11, 2, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(12, 12, 5, 0x0060, 0x10, 9, 1),
+ PINS_FIELD_BASE(13, 16, 6, 0x00b0, 0x10, 10, 1),
+ PINS_FIELD_BASE(17, 20, 6, 0x00b0, 0x10, 8, 1),
+ PINS_FIELD_BASE(21, 24, 6, 0x00b0, 0x10, 9, 1),
+ PINS_FIELD_BASE(25, 28, 6, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(29, 29, 6, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 6, 0x00b0, 0x10, 1, 1),
+ PINS_FIELD_BASE(31, 34, 6, 0x00b0, 0x10, 2, 1),
+ PINS_FIELD_BASE(35, 36, 6, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(37, 37, 6, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(38, 38, 6, 0x00b0, 0x10, 4, 1),
+ PINS_FIELD_BASE(39, 40, 6, 0x00b0, 0x10, 3, 1),
+ PINS_FIELD_BASE(41, 42, 7, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(43, 43, 7, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x00c0, 0x10, 8, 1),
+ PINS_FIELD_BASE(46, 47, 7, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(48, 48, 7, 0x00c0, 0x10, 15, 1),
+ PIN_FIELD_BASE(49, 49, 7, 0x00c0, 0x10, 17, 1),
+ PIN_FIELD_BASE(50, 50, 7, 0x00c0, 0x10, 14, 1),
+ PIN_FIELD_BASE(51, 51, 7, 0x00c0, 0x10, 16, 1),
+ PINS_FIELD_BASE(52, 57, 7, 0x00c0, 0x10, 0, 1),
+ PINS_FIELD_BASE(58, 60, 7, 0x00c0, 0x10, 12, 1),
+ PINS_FIELD_BASE(61, 62, 3, 0x0080, 0x10, 5, 1),
+ PINS_FIELD_BASE(63, 64, 3, 0x0080, 0x10, 4, 1),
+ PINS_FIELD_BASE(65, 66, 3, 0x0080, 0x10, 7, 1),
+ PINS_FIELD_BASE(67, 68, 3, 0x0080, 0x10, 6, 1),
+ PINS_FIELD_BASE(69, 73, 3, 0x0080, 0x10, 1, 1),
+ PINS_FIELD_BASE(74, 78, 3, 0x0080, 0x10, 2, 1),
+ PINS_FIELD_BASE(79, 80, 3, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(85, 85, 7, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(86, 86, 7, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(87, 87, 7, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(88, 88, 7, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x00b0, 0x10, 13, 1),
+ PIN_FIELD_BASE(90, 90, 3, 0x0080, 0x10, 8, 1),
+ PINS_FIELD_BASE(91, 92, 2, 0x00b0, 0x10, 8, 1),
+ PINS_FIELD_BASE(93, 94, 2, 0x00b0, 0x10, 7, 1),
+ PINS_FIELD_BASE(95, 96, 2, 0x00b0, 0x10, 14, 1),
+ PINS_FIELD_BASE(97, 98, 2, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x00b0, 0x10, 1, 1),
+ PINS_FIELD_BASE(101, 102, 2, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x00b0, 0x10, 12, 1),
+ PIN_FIELD_BASE(107, 107, 1, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(108, 108, 1, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(109, 109, 1, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(110, 110, 1, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(111, 111, 1, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(112, 112, 1, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 1, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(114, 114, 1, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(115, 115, 1, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(116, 116, 1, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(117, 117, 1, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(118, 118, 1, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(119, 119, 1, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(120, 120, 1, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 1, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(126, 126, 4, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(127, 127, 4, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(128, 128, 4, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(129, 129, 4, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(130, 130, 4, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(131, 131, 4, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(132, 132, 4, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(133, 133, 4, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(134, 134, 5, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(135, 135, 5, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(136, 136, 5, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(137, 137, 5, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(138, 138, 5, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(139, 139, 5, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(140, 140, 5, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(141, 141, 5, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(142, 142, 5, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(143, 143, 5, 0x0060, 0x10, 3, 1),
+ PINS_FIELD_BASE(144, 147, 5, 0x0060, 0x10, 10, 1),
+ PINS_FIELD_BASE(148, 149, 5, 0x0060, 0x10, 12, 1),
+ PINS_FIELD_BASE(150, 151, 7, 0x00c0, 0x10, 9, 1),
+ PINS_FIELD_BASE(152, 153, 7, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(154, 154, 7, 0x00c0, 0x10, 11, 1),
+ PINS_FIELD_BASE(155, 158, 3, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(159, 159, 7, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(161, 161, 1, 0x0080, 0x10, 15, 1),
+ PIN_FIELD_BASE(162, 162, 1, 0x0080, 0x10, 16, 1),
+ PINS_FIELD_BASE(163, 170, 4, 0x0080, 0x10, 0, 1),
+ PINS_FIELD_BASE(171, 179, 7, 0x00c0, 0x10, 5, 1),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_pd_range[] = {
+ PIN_FIELD_BASE(0, 0, 2, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(1, 1, 2, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(2, 2, 2, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(3, 3, 2, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(4, 4, 2, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(5, 5, 2, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(6, 6, 2, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(7, 7, 2, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(8, 8, 3, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(9, 9, 2, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(10, 10, 2, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(11, 11, 2, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(12, 12, 5, 0x0030, 0x10, 9, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(14, 14, 6, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(16, 16, 6, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(17, 17, 6, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(18, 18, 6, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(19, 19, 6, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(20, 20, 6, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(21, 21, 6, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(22, 22, 6, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(23, 23, 6, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(24, 24, 6, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(25, 25, 6, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(26, 26, 6, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(27, 27, 6, 0x0040, 0x10, 3, 1),
+ PINS_FIELD_BASE(28, 40, 6, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 7, 0x0060, 0x10, 19, 1),
+ PIN_FIELD_BASE(42, 42, 7, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 7, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x0060, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 7, 0x0060, 0x10, 21, 1),
+ PIN_FIELD_BASE(47, 47, 7, 0x0060, 0x10, 20, 1),
+ PIN_FIELD_BASE(48, 48, 7, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 7, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(50, 50, 7, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(51, 51, 7, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(52, 52, 7, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(53, 53, 7, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(54, 54, 7, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(55, 55, 7, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(56, 56, 7, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(57, 57, 7, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(58, 58, 7, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(59, 59, 7, 0x0060, 0x10, 31, 1),
+ PIN_FIELD_BASE(60, 60, 7, 0x0060, 0x10, 30, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0040, 0x10, 18, 1),
+ PIN_FIELD_BASE(62, 62, 3, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(63, 63, 3, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(64, 64, 3, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(65, 65, 3, 0x0040, 0x10, 20, 1),
+ PIN_FIELD_BASE(66, 66, 3, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(67, 67, 3, 0x0040, 0x10, 19, 1),
+ PIN_FIELD_BASE(68, 68, 3, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(69, 69, 3, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(70, 70, 3, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(71, 71, 3, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(72, 72, 3, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(74, 74, 3, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(75, 75, 3, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(76, 76, 3, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0040, 0x10, 25, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0040, 0x10, 24, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0040, 0x10, 22, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0040, 0x10, 23, 1),
+ PIN_FIELD_BASE(85, 85, 7, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(86, 86, 7, 0x0060, 0x10, 29, 1),
+ PIN_FIELD_BASE(87, 87, 7, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(88, 88, 7, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x0040, 0x10, 21, 1),
+ PINS_FIELD_BASE(90, 94, 3, 0x0040, 0x10, 21, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0040, 0x10, 22, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0040, 0x10, 23, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0040, 0x10, 19, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0040, 0x10, 18, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0040, 0x10, 20, 1),
+ PIN_FIELD_BASE(107, 107, 1, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(108, 108, 1, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(109, 109, 1, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(110, 110, 1, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(111, 111, 1, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(112, 112, 1, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 1, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(114, 114, 1, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(115, 115, 1, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(116, 116, 1, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(117, 117, 1, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(118, 118, 1, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(119, 119, 1, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(120, 120, 1, 0x0040, 0x10, 11, 1),
+ PINS_FIELD_BASE(121, 133, 1, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(134, 134, 5, 0x0030, 0x10, 14, 1),
+ PIN_FIELD_BASE(135, 135, 5, 0x0030, 0x10, 19, 1),
+ PIN_FIELD_BASE(136, 136, 5, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(137, 137, 5, 0x0030, 0x10, 7, 1),
+ PIN_FIELD_BASE(138, 138, 5, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(139, 139, 5, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(140, 140, 5, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(141, 141, 5, 0x0030, 0x10, 6, 1),
+ PIN_FIELD_BASE(142, 142, 5, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(143, 143, 5, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(144, 144, 5, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(145, 145, 5, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(146, 146, 5, 0x0030, 0x10, 13, 1),
+ PIN_FIELD_BASE(147, 147, 5, 0x0030, 0x10, 10, 1),
+ PIN_FIELD_BASE(148, 148, 5, 0x0030, 0x10, 15, 1),
+ PIN_FIELD_BASE(149, 149, 5, 0x0030, 0x10, 16, 1),
+ PIN_FIELD_BASE(150, 150, 7, 0x0060, 0x10, 23, 1),
+ PIN_FIELD_BASE(151, 151, 7, 0x0060, 0x10, 24, 1),
+ PIN_FIELD_BASE(152, 152, 7, 0x0060, 0x10, 25, 1),
+ PIN_FIELD_BASE(153, 153, 7, 0x0060, 0x10, 26, 1),
+ PIN_FIELD_BASE(154, 154, 7, 0x0060, 0x10, 28, 1),
+ PIN_FIELD_BASE(155, 155, 3, 0x0040, 0x10, 28, 1),
+ PIN_FIELD_BASE(156, 156, 3, 0x0040, 0x10, 27, 1),
+ PIN_FIELD_BASE(157, 157, 3, 0x0040, 0x10, 29, 1),
+ PIN_FIELD_BASE(158, 158, 3, 0x0040, 0x10, 26, 1),
+ PIN_FIELD_BASE(159, 159, 7, 0x0060, 0x10, 27, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x0030, 0x10, 8, 1),
+ PIN_FIELD_BASE(161, 161, 1, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(162, 162, 1, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x0020, 0x10, 1, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x0020, 0x10, 2, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x0020, 0x10, 3, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x0020, 0x10, 4, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x0020, 0x10, 5, 1),
+ PIN_FIELD_BASE(169, 169, 4, 0x0020, 0x10, 6, 1),
+ PIN_FIELD_BASE(170, 170, 4, 0x0020, 0x10, 7, 1),
+ PIN_FIELD_BASE(171, 171, 7, 0x0060, 0x10, 17, 1),
+ PIN_FIELD_BASE(172, 172, 7, 0x0060, 0x10, 18, 1),
+ PIN_FIELD_BASE(173, 173, 7, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(174, 174, 7, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(175, 175, 7, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(176, 176, 7, 0x0060, 0x10, 14, 1),
+ PIN_FIELD_BASE(177, 177, 7, 0x0060, 0x10, 15, 1),
+ PINS_FIELD_BASE(178, 179, 7, 0x0060, 0x10, 16, 1),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_pu_range[] = {
+ PIN_FIELD_BASE(0, 0, 2, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(1, 1, 2, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(2, 2, 2, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(3, 3, 2, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(4, 4, 2, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(5, 5, 2, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(6, 6, 2, 0x0060, 0x10, 14, 1),
+ PIN_FIELD_BASE(7, 7, 2, 0x0060, 0x10, 15, 1),
+ PIN_FIELD_BASE(8, 8, 3, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(9, 9, 2, 0x0060, 0x10, 16, 1),
+ PIN_FIELD_BASE(10, 10, 2, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(11, 11, 2, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(12, 12, 5, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0060, 0x10, 14, 1),
+ PIN_FIELD_BASE(14, 14, 6, 0x0060, 0x10, 13, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0060, 0x10, 15, 1),
+ PIN_FIELD_BASE(16, 16, 6, 0x0060, 0x10, 12, 1),
+ PIN_FIELD_BASE(17, 17, 6, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(18, 18, 6, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(19, 19, 6, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(20, 20, 6, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(21, 21, 6, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(22, 22, 6, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(23, 23, 6, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(24, 24, 6, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(25, 25, 6, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(26, 26, 6, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(27, 27, 6, 0x0060, 0x10, 3, 1),
+ PINS_FIELD_BASE(28, 40, 6, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 7, 0x0080, 0x10, 19, 1),
+ PIN_FIELD_BASE(42, 42, 7, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 7, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x0080, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 7, 0x0080, 0x10, 21, 1),
+ PIN_FIELD_BASE(47, 47, 7, 0x0080, 0x10, 20, 1),
+ PIN_FIELD_BASE(48, 48, 7, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 7, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(50, 50, 7, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(51, 51, 7, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(52, 52, 7, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(53, 53, 7, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(54, 54, 7, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(55, 55, 7, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(56, 56, 7, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(57, 57, 7, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(58, 58, 7, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(59, 59, 7, 0x0080, 0x10, 31, 1),
+ PIN_FIELD_BASE(60, 60, 7, 0x0080, 0x10, 30, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(62, 62, 3, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(63, 63, 3, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(64, 64, 3, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(65, 65, 3, 0x0050, 0x10, 20, 1),
+ PIN_FIELD_BASE(66, 66, 3, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(67, 67, 3, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(68, 68, 3, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(69, 69, 3, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(70, 70, 3, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(71, 71, 3, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(72, 72, 3, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(74, 74, 3, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(75, 75, 3, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(76, 76, 3, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0050, 0x10, 25, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0050, 0x10, 24, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0050, 0x10, 22, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(85, 85, 7, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(86, 86, 7, 0x0080, 0x10, 29, 1),
+ PIN_FIELD_BASE(87, 87, 7, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(88, 88, 7, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x0060, 0x10, 21, 1),
+ PINS_FIELD_BASE(90, 94, 3, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0060, 0x10, 22, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0060, 0x10, 23, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0060, 0x10, 17, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0060, 0x10, 19, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0060, 0x10, 18, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0060, 0x10, 20, 1),
+ PIN_FIELD_BASE(107, 107, 1, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(108, 108, 1, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(109, 109, 1, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(110, 110, 1, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(111, 111, 1, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(112, 112, 1, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 1, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(114, 114, 1, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(115, 115, 1, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(116, 116, 1, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(117, 117, 1, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(118, 118, 1, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(119, 119, 1, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(120, 120, 1, 0x0050, 0x10, 11, 1),
+ PINS_FIELD_BASE(121, 133, 1, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(134, 134, 5, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(135, 135, 5, 0x0040, 0x10, 19, 1),
+ PIN_FIELD_BASE(136, 136, 5, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(137, 137, 5, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(138, 138, 5, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(139, 139, 5, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(140, 140, 5, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(141, 141, 5, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(142, 142, 5, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(143, 143, 5, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(144, 144, 5, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(145, 145, 5, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(146, 146, 5, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(147, 147, 5, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(148, 148, 5, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(149, 149, 5, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(150, 150, 7, 0x0080, 0x10, 23, 1),
+ PIN_FIELD_BASE(151, 151, 7, 0x0080, 0x10, 24, 1),
+ PIN_FIELD_BASE(152, 152, 7, 0x0080, 0x10, 25, 1),
+ PIN_FIELD_BASE(153, 153, 7, 0x0080, 0x10, 26, 1),
+ PIN_FIELD_BASE(154, 154, 7, 0x0080, 0x10, 28, 1),
+ PIN_FIELD_BASE(155, 155, 3, 0x0050, 0x10, 28, 1),
+ PIN_FIELD_BASE(156, 156, 3, 0x0050, 0x10, 27, 1),
+ PIN_FIELD_BASE(157, 157, 3, 0x0050, 0x10, 29, 1),
+ PIN_FIELD_BASE(158, 158, 3, 0x0050, 0x10, 26, 1),
+ PIN_FIELD_BASE(159, 159, 7, 0x0080, 0x10, 27, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(161, 161, 1, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(162, 162, 1, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(169, 169, 4, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(170, 170, 4, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(171, 171, 7, 0x0080, 0x10, 17, 1),
+ PIN_FIELD_BASE(172, 172, 7, 0x0080, 0x10, 18, 1),
+ PIN_FIELD_BASE(173, 173, 7, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(174, 174, 7, 0x0080, 0x10, 12, 1),
+ PIN_FIELD_BASE(175, 175, 7, 0x0080, 0x10, 13, 1),
+ PIN_FIELD_BASE(176, 176, 7, 0x0080, 0x10, 14, 1),
+ PIN_FIELD_BASE(177, 177, 7, 0x0080, 0x10, 15, 1),
+ PINS_FIELD_BASE(178, 179, 7, 0x0080, 0x10, 16, 1),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_tdsel_range[] = {
+ PINS_FIELD_BASE(0, 3, 2, 0x00c0, 0x10, 16, 4),
+ PINS_FIELD_BASE(4, 7, 2, 0x00c0, 0x10, 20, 4),
+ PIN_FIELD_BASE(8, 8, 3, 0x0090, 0x10, 12, 4),
+ PINS_FIELD_BASE(9, 11, 2, 0x00c0, 0x10, 24, 4),
+ PIN_FIELD_BASE(12, 12, 5, 0x0080, 0x10, 4, 4),
+ PINS_FIELD_BASE(13, 16, 6, 0x00e0, 0x10, 8, 4),
+ PINS_FIELD_BASE(17, 20, 6, 0x00e0, 0x10, 0, 4),
+ PINS_FIELD_BASE(21, 24, 6, 0x00e0, 0x10, 4, 4),
+ PINS_FIELD_BASE(25, 28, 6, 0x00d0, 0x10, 28, 4),
+ PIN_FIELD_BASE(29, 29, 6, 0x00d0, 0x10, 0, 4),
+ PIN_FIELD_BASE(30, 30, 6, 0x00d0, 0x10, 4, 4),
+ PINS_FIELD_BASE(31, 34, 6, 0x00d0, 0x10, 8, 4),
+ PINS_FIELD_BASE(35, 36, 6, 0x00d0, 0x10, 20, 4),
+ PIN_FIELD_BASE(37, 37, 6, 0x00d0, 0x10, 24, 4),
+ PIN_FIELD_BASE(38, 38, 6, 0x00d0, 0x10, 16, 4),
+ PINS_FIELD_BASE(39, 40, 6, 0x00d0, 0x10, 12, 4),
+ PINS_FIELD_BASE(41, 42, 7, 0x00d0, 0x10, 24, 4),
+ PIN_FIELD_BASE(43, 43, 7, 0x00d0, 0x10, 12, 4),
+ PIN_FIELD_BASE(44, 44, 7, 0x00d0, 0x10, 16, 4),
+ PIN_FIELD_BASE(45, 45, 7, 0x00e0, 0x10, 0, 4),
+ PINS_FIELD_BASE(46, 47, 7, 0x00d0, 0x10, 28, 4),
+ PINS_FIELD_BASE(48, 49, 7, 0x00e0, 0x10, 28, 4),
+ PINS_FIELD_BASE(50, 51, 7, 0x00e0, 0x10, 24, 4),
+ PINS_FIELD_BASE(52, 57, 7, 0x00d0, 0x10, 0, 4),
+ PINS_FIELD_BASE(58, 60, 7, 0x00e0, 0x10, 16, 4),
+ PINS_FIELD_BASE(61, 62, 3, 0x0090, 0x10, 20, 4),
+ PINS_FIELD_BASE(63, 64, 3, 0x0090, 0x10, 16, 4),
+ PINS_FIELD_BASE(65, 66, 3, 0x0090, 0x10, 28, 4),
+ PINS_FIELD_BASE(67, 68, 3, 0x0090, 0x10, 24, 4),
+ PINS_FIELD_BASE(69, 73, 3, 0x0090, 0x10, 4, 4),
+ PINS_FIELD_BASE(74, 78, 3, 0x0090, 0x10, 8, 4),
+ PINS_FIELD_BASE(79, 80, 3, 0x0090, 0x10, 0, 4),
+ PIN_FIELD_BASE(81, 81, 3, 0x00a0, 0x10, 8, 4),
+ PINS_FIELD_BASE(82, 83, 3, 0x00a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(84, 84, 3, 0x00a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(85, 85, 7, 0x00e0, 0x10, 16, 4),
+ PIN_FIELD_BASE(86, 86, 7, 0x00e0, 0x10, 20, 4),
+ PIN_FIELD_BASE(87, 87, 7, 0x00d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(88, 88, 7, 0x00d0, 0x10, 4, 4),
+ PIN_FIELD_BASE(89, 89, 2, 0x00d0, 0x10, 12, 4),
+ PIN_FIELD_BASE(90, 90, 3, 0x00a0, 0x10, 0, 4),
+ PINS_FIELD_BASE(91, 92, 2, 0x00d0, 0x10, 0, 4),
+ PINS_FIELD_BASE(93, 94, 2, 0x00c0, 0x10, 28, 4),
+ PINS_FIELD_BASE(95, 96, 2, 0x00d0, 0x10, 16, 4),
+ PINS_FIELD_BASE(97, 98, 2, 0x00c0, 0x10, 8, 4),
+ PIN_FIELD_BASE(99, 99, 2, 0x00c0, 0x10, 0, 4),
+ PIN_FIELD_BASE(100, 100, 2, 0x00c0, 0x10, 4, 4),
+ PINS_FIELD_BASE(101, 102, 2, 0x00c0, 0x10, 12, 4),
+ PINS_FIELD_BASE(103, 104, 2, 0x00d0, 0x10, 4, 4),
+ PINS_FIELD_BASE(105, 106, 2, 0x00d0, 0x10, 8, 4),
+ PIN_FIELD_BASE(107, 107, 1, 0x0090, 0x10, 16, 4),
+ PIN_FIELD_BASE(108, 108, 1, 0x0090, 0x10, 12, 4),
+ PIN_FIELD_BASE(109, 109, 1, 0x0090, 0x10, 20, 4),
+ PIN_FIELD_BASE(110, 110, 1, 0x0090, 0x10, 0, 4),
+ PIN_FIELD_BASE(111, 111, 1, 0x0090, 0x10, 4, 4),
+ PIN_FIELD_BASE(112, 112, 1, 0x0090, 0x10, 8, 4),
+ PIN_FIELD_BASE(113, 113, 1, 0x00a0, 0x10, 4, 4),
+ PIN_FIELD_BASE(114, 114, 1, 0x00a0, 0x10, 8, 4),
+ PIN_FIELD_BASE(115, 115, 1, 0x0090, 0x10, 24, 4),
+ PIN_FIELD_BASE(116, 116, 1, 0x0090, 0x10, 28, 4),
+ PIN_FIELD_BASE(117, 117, 1, 0x00a0, 0x10, 16, 4),
+ PIN_FIELD_BASE(118, 118, 1, 0x00a0, 0x10, 20, 4),
+ PIN_FIELD_BASE(119, 119, 1, 0x00a0, 0x10, 24, 4),
+ PIN_FIELD_BASE(120, 120, 1, 0x00a0, 0x10, 12, 4),
+ PIN_FIELD_BASE(121, 121, 1, 0x00a0, 0x10, 0, 4),
+ PIN_FIELD_BASE(122, 122, 4, 0x0090, 0x10, 8, 4),
+ PIN_FIELD_BASE(123, 123, 4, 0x0090, 0x10, 12, 4),
+ PIN_FIELD_BASE(124, 124, 4, 0x0090, 0x10, 4, 4),
+ PINS_FIELD_BASE(125, 130, 4, 0x0090, 0x10, 12, 4),
+ PIN_FIELD_BASE(131, 131, 4, 0x0090, 0x10, 16, 4),
+ PIN_FIELD_BASE(132, 132, 4, 0x0090, 0x10, 12, 4),
+ PIN_FIELD_BASE(133, 133, 4, 0x0090, 0x10, 20, 4),
+ PIN_FIELD_BASE(134, 134, 5, 0x0080, 0x10, 12, 4),
+ PIN_FIELD_BASE(135, 135, 5, 0x0080, 0x10, 20, 4),
+ PIN_FIELD_BASE(136, 136, 5, 0x0070, 0x10, 4, 4),
+ PIN_FIELD_BASE(137, 137, 5, 0x0070, 0x10, 28, 4),
+ PIN_FIELD_BASE(138, 138, 5, 0x0070, 0x10, 16, 4),
+ PIN_FIELD_BASE(139, 139, 5, 0x0070, 0x10, 20, 4),
+ PIN_FIELD_BASE(140, 140, 5, 0x0070, 0x10, 0, 4),
+ PIN_FIELD_BASE(141, 141, 5, 0x0070, 0x10, 24, 4),
+ PIN_FIELD_BASE(142, 142, 5, 0x0070, 0x10, 8, 4),
+ PIN_FIELD_BASE(143, 143, 5, 0x0070, 0x10, 12, 4),
+ PINS_FIELD_BASE(144, 147, 5, 0x0080, 0x10, 8, 4),
+ PINS_FIELD_BASE(148, 149, 5, 0x0080, 0x10, 16, 4),
+ PINS_FIELD_BASE(150, 151, 7, 0x00e0, 0x10, 4, 4),
+ PINS_FIELD_BASE(152, 153, 7, 0x00e0, 0x10, 8, 4),
+ PIN_FIELD_BASE(154, 154, 7, 0x00e0, 0x10, 12, 4),
+ PINS_FIELD_BASE(155, 158, 3, 0x00a0, 0x10, 12, 4),
+ PIN_FIELD_BASE(159, 159, 7, 0x00e0, 0x10, 12, 4),
+ PIN_FIELD_BASE(160, 160, 5, 0x0080, 0x10, 0, 4),
+ PINS_FIELD_BASE(161, 162, 1, 0x00a0, 0x10, 28, 4),
+ PINS_FIELD_BASE(163, 170, 4, 0x0090, 0x10, 0, 4),
+ PINS_FIELD_BASE(171, 179, 7, 0x00d0, 0x10, 20, 4),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_rdsel_range[] = {
+ PINS_FIELD_BASE(0, 3, 2, 0x0090, 0x10, 8, 2),
+ PINS_FIELD_BASE(4, 7, 2, 0x0090, 0x10, 10, 2),
+ PIN_FIELD_BASE(8, 8, 3, 0x0060, 0x10, 6, 2),
+ PINS_FIELD_BASE(9, 11, 2, 0x0090, 0x10, 12, 2),
+ PIN_FIELD_BASE(12, 12, 5, 0x0050, 0x10, 18, 2),
+ PINS_FIELD_BASE(13, 16, 6, 0x00a0, 0x10, 18, 2),
+ PINS_FIELD_BASE(17, 20, 6, 0x00a0, 0x10, 14, 2),
+ PINS_FIELD_BASE(21, 24, 6, 0x00a0, 0x10, 16, 2),
+ PINS_FIELD_BASE(25, 28, 6, 0x00a0, 0x10, 12, 2),
+ PIN_FIELD_BASE(29, 29, 6, 0x0090, 0x10, 0, 6),
+ PIN_FIELD_BASE(30, 30, 6, 0x0090, 0x10, 6, 6),
+ PINS_FIELD_BASE(31, 34, 6, 0x0090, 0x10, 12, 6),
+ PINS_FIELD_BASE(35, 36, 6, 0x00a0, 0x10, 0, 6),
+ PIN_FIELD_BASE(37, 37, 6, 0x00a0, 0x10, 6, 6),
+ PIN_FIELD_BASE(38, 38, 6, 0x0090, 0x10, 24, 6),
+ PINS_FIELD_BASE(39, 40, 6, 0x0090, 0x10, 18, 6),
+ PINS_FIELD_BASE(41, 42, 7, 0x00a0, 0x10, 12, 2),
+ PIN_FIELD_BASE(43, 43, 7, 0x00a0, 0x10, 6, 2),
+ PIN_FIELD_BASE(44, 44, 7, 0x00a0, 0x10, 8, 2),
+ PIN_FIELD_BASE(45, 45, 7, 0x00a0, 0x10, 16, 2),
+ PINS_FIELD_BASE(46, 47, 7, 0x00a0, 0x10, 14, 2),
+ PINS_FIELD_BASE(48, 49, 7, 0x00a0, 0x10, 30, 2),
+ PINS_FIELD_BASE(50, 51, 7, 0x00a0, 0x10, 28, 2),
+ PINS_FIELD_BASE(52, 57, 7, 0x00a0, 0x10, 0, 2),
+ PINS_FIELD_BASE(58, 60, 7, 0x00a0, 0x10, 24, 2),
+ PINS_FIELD_BASE(61, 62, 3, 0x0060, 0x10, 10, 2),
+ PINS_FIELD_BASE(63, 64, 3, 0x0060, 0x10, 8, 2),
+ PINS_FIELD_BASE(65, 66, 3, 0x0060, 0x10, 14, 2),
+ PINS_FIELD_BASE(67, 68, 3, 0x0060, 0x10, 12, 2),
+ PINS_FIELD_BASE(69, 73, 3, 0x0060, 0x10, 2, 2),
+ PINS_FIELD_BASE(74, 78, 3, 0x0060, 0x10, 4, 2),
+ PINS_FIELD_BASE(79, 80, 3, 0x0060, 0x10, 0, 2),
+ PIN_FIELD_BASE(81, 81, 3, 0x0060, 0x10, 20, 2),
+ PINS_FIELD_BASE(82, 83, 3, 0x0060, 0x10, 18, 2),
+ PIN_FIELD_BASE(84, 84, 3, 0x0060, 0x10, 20, 2),
+ PIN_FIELD_BASE(85, 85, 7, 0x00a0, 0x10, 24, 2),
+ PIN_FIELD_BASE(86, 86, 7, 0x00a0, 0x10, 26, 2),
+ PIN_FIELD_BASE(87, 87, 7, 0x00a0, 0x10, 4, 2),
+ PIN_FIELD_BASE(88, 88, 7, 0x00a0, 0x10, 2, 2),
+ PIN_FIELD_BASE(89, 89, 2, 0x0090, 0x10, 22, 2),
+ PIN_FIELD_BASE(90, 90, 3, 0x0060, 0x10, 16, 2),
+ PINS_FIELD_BASE(91, 92, 2, 0x0090, 0x10, 16, 2),
+ PINS_FIELD_BASE(93, 94, 2, 0x0090, 0x10, 14, 2),
+ PINS_FIELD_BASE(95, 96, 2, 0x0090, 0x10, 24, 2),
+ PINS_FIELD_BASE(97, 98, 2, 0x0090, 0x10, 4, 2),
+ PIN_FIELD_BASE(99, 99, 2, 0x0090, 0x10, 0, 2),
+ PIN_FIELD_BASE(100, 100, 2, 0x0090, 0x10, 2, 2),
+ PINS_FIELD_BASE(101, 102, 2, 0x0090, 0x10, 6, 2),
+ PINS_FIELD_BASE(103, 104, 2, 0x0090, 0x10, 18, 2),
+ PINS_FIELD_BASE(105, 106, 2, 0x0090, 0x10, 20, 2),
+ PIN_FIELD_BASE(107, 107, 1, 0x0060, 0x10, 8, 2),
+ PIN_FIELD_BASE(108, 108, 1, 0x0060, 0x10, 6, 2),
+ PIN_FIELD_BASE(109, 109, 1, 0x0060, 0x10, 10, 2),
+ PIN_FIELD_BASE(110, 110, 1, 0x0060, 0x10, 0, 2),
+ PIN_FIELD_BASE(111, 111, 1, 0x0060, 0x10, 2, 2),
+ PIN_FIELD_BASE(112, 112, 1, 0x0060, 0x10, 4, 2),
+ PIN_FIELD_BASE(113, 113, 1, 0x0060, 0x10, 18, 2),
+ PIN_FIELD_BASE(114, 114, 1, 0x0060, 0x10, 20, 2),
+ PIN_FIELD_BASE(115, 115, 1, 0x0060, 0x10, 12, 2),
+ PIN_FIELD_BASE(116, 116, 1, 0x0060, 0x10, 14, 2),
+ PIN_FIELD_BASE(117, 117, 1, 0x0060, 0x10, 24, 2),
+ PIN_FIELD_BASE(118, 118, 1, 0x0060, 0x10, 26, 2),
+ PIN_FIELD_BASE(119, 119, 1, 0x0060, 0x10, 28, 2),
+ PIN_FIELD_BASE(120, 120, 1, 0x0060, 0x10, 22, 2),
+ PIN_FIELD_BASE(121, 121, 1, 0x0060, 0x10, 16, 2),
+ PIN_FIELD_BASE(122, 122, 4, 0x0070, 0x10, 8, 6),
+ PIN_FIELD_BASE(123, 123, 4, 0x0070, 0x10, 14, 6),
+ PIN_FIELD_BASE(124, 124, 4, 0x0070, 0x10, 2, 6),
+ PINS_FIELD_BASE(125, 130, 4, 0x0070, 0x10, 14, 6),
+ PIN_FIELD_BASE(131, 131, 4, 0x0070, 0x10, 20, 6),
+ PIN_FIELD_BASE(132, 132, 4, 0x0070, 0x10, 14, 6),
+ PIN_FIELD_BASE(133, 133, 4, 0x0070, 0x10, 26, 6),
+ PIN_FIELD_BASE(134, 134, 5, 0x0050, 0x10, 22, 2),
+ PIN_FIELD_BASE(135, 135, 5, 0x0050, 0x10, 30, 2),
+ PIN_FIELD_BASE(136, 136, 5, 0x0050, 0x10, 2, 2),
+ PIN_FIELD_BASE(137, 137, 5, 0x0050, 0x10, 14, 2),
+ PIN_FIELD_BASE(138, 138, 5, 0x0050, 0x10, 8, 2),
+ PIN_FIELD_BASE(139, 139, 5, 0x0050, 0x10, 10, 2),
+ PIN_FIELD_BASE(140, 140, 5, 0x0050, 0x10, 0, 2),
+ PIN_FIELD_BASE(141, 141, 5, 0x0050, 0x10, 12, 2),
+ PIN_FIELD_BASE(142, 142, 5, 0x0050, 0x10, 4, 2),
+ PIN_FIELD_BASE(143, 143, 5, 0x0050, 0x10, 6, 2),
+ PINS_FIELD_BASE(144, 147, 5, 0x0050, 0x10, 20, 2),
+ PINS_FIELD_BASE(148, 149, 5, 0x0050, 0x10, 24, 2),
+ PINS_FIELD_BASE(150, 151, 7, 0x00a0, 0x10, 18, 2),
+ PINS_FIELD_BASE(152, 153, 7, 0x00a0, 0x10, 20, 2),
+ PIN_FIELD_BASE(154, 154, 7, 0x00a0, 0x10, 22, 2),
+ PINS_FIELD_BASE(155, 158, 3, 0x0060, 0x10, 22, 2),
+ PIN_FIELD_BASE(159, 159, 7, 0x00a0, 0x10, 22, 2),
+ PIN_FIELD_BASE(160, 160, 5, 0x0050, 0x10, 16, 2),
+ PINS_FIELD_BASE(161, 162, 1, 0x0060, 0x10, 30, 2),
+ PINS_FIELD_BASE(163, 170, 4, 0x0070, 0x10, 0, 2),
+ PINS_FIELD_BASE(171, 179, 7, 0x00a0, 0x10, 10, 2),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_drv_range[] = {
+ PINS_FIELD_BASE(0, 2, 2, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(3, 3, 2, 0x0000, 0x10, 15, 3),
+ PINS_FIELD_BASE(4, 6, 2, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(7, 7, 2, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(8, 8, 3, 0x0000, 0x10, 9, 3),
+ PINS_FIELD_BASE(9, 11, 2, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(12, 12, 5, 0x0000, 0x10, 27, 3),
+ PINS_FIELD_BASE(13, 15, 6, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(16, 16, 6, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(17, 17, 6, 0x0000, 0x10, 23, 3),
+ PIN_FIELD_BASE(18, 18, 6, 0x0000, 0x10, 26, 3),
+ PINS_FIELD_BASE(19, 20, 6, 0x0000, 0x10, 23, 3),
+ PINS_FIELD_BASE(21, 23, 6, 0x0000, 0x10, 29, 3),
+ PIN_FIELD_BASE(24, 24, 6, 0x0010, 0x10, 0, 3),
+ PINS_FIELD_BASE(25, 27, 6, 0x0000, 0x10, 17, 3),
+ PIN_FIELD_BASE(28, 28, 6, 0x0000, 0x10, 20, 3),
+ PIN_FIELD_BASE(29, 29, 6, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(30, 30, 6, 0x0000, 0x10, 3, 3),
+ PINS_FIELD_BASE(31, 34, 6, 0x0000, 0x10, 6, 3),
+ PINS_FIELD_BASE(35, 36, 6, 0x0000, 0x10, 13, 2),
+ PIN_FIELD_BASE(37, 37, 6, 0x0000, 0x10, 15, 2),
+ PIN_FIELD_BASE(38, 38, 6, 0x0000, 0x10, 11, 2),
+ PINS_FIELD_BASE(39, 40, 6, 0x0000, 0x10, 9, 2),
+ PINS_FIELD_BASE(41, 42, 7, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(43, 43, 7, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(44, 44, 7, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(45, 45, 7, 0x0000, 0x10, 27, 3),
+ PINS_FIELD_BASE(46, 47, 7, 0x0000, 0x10, 24, 3),
+ PINS_FIELD_BASE(48, 49, 7, 0x0010, 0x10, 18, 3),
+ PINS_FIELD_BASE(50, 51, 7, 0x0010, 0x10, 15, 3),
+ PINS_FIELD_BASE(52, 57, 7, 0x0000, 0x10, 0, 3),
+ PINS_FIELD_BASE(58, 60, 7, 0x0010, 0x10, 9, 3),
+ PINS_FIELD_BASE(61, 62, 3, 0x0000, 0x10, 15, 3),
+ PINS_FIELD_BASE(63, 64, 3, 0x0000, 0x10, 12, 3),
+ PINS_FIELD_BASE(65, 66, 3, 0x0000, 0x10, 21, 3),
+ PINS_FIELD_BASE(67, 68, 3, 0x0000, 0x10, 18, 3),
+ PINS_FIELD_BASE(69, 73, 3, 0x0000, 0x10, 3, 3),
+ PINS_FIELD_BASE(74, 78, 3, 0x0000, 0x10, 6, 3),
+ PINS_FIELD_BASE(79, 80, 3, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(81, 81, 3, 0x0010, 0x10, 0, 3),
+ PINS_FIELD_BASE(82, 83, 3, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(84, 84, 3, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(85, 85, 7, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(86, 86, 7, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(87, 87, 7, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(88, 88, 7, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(89, 89, 2, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(90, 90, 3, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(91, 91, 2, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(92, 92, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(93, 93, 2, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(94, 94, 2, 0x0010, 0x10, 0, 3),
+ PINS_FIELD_BASE(95, 96, 2, 0x0010, 0x10, 18, 3),
+ PINS_FIELD_BASE(97, 98, 2, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(99, 99, 2, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(100, 100, 2, 0x0000, 0x10, 3, 3),
+ PINS_FIELD_BASE(101, 102, 2, 0x0000, 0x10, 9, 3),
+ PINS_FIELD_BASE(103, 104, 2, 0x0010, 0x10, 9, 3),
+ PINS_FIELD_BASE(105, 106, 2, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(107, 107, 1, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(108, 108, 1, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(109, 109, 1, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(110, 110, 1, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(111, 111, 1, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(112, 112, 1, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(113, 113, 1, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(114, 114, 1, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(115, 115, 1, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(116, 116, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(117, 117, 1, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(118, 118, 1, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(119, 119, 1, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(120, 120, 1, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(121, 121, 1, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(122, 122, 4, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(123, 123, 4, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(124, 124, 4, 0x0000, 0x10, 6, 3),
+ PINS_FIELD_BASE(125, 130, 4, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(131, 131, 4, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(132, 132, 4, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(133, 133, 4, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(134, 134, 5, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(135, 135, 5, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(136, 136, 5, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(137, 137, 5, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(138, 138, 5, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(139, 139, 5, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(140, 140, 5, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(141, 141, 5, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(142, 142, 5, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(143, 143, 5, 0x0000, 0x10, 9, 3),
+ PINS_FIELD_BASE(144, 146, 5, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(147, 147, 5, 0x0010, 0x10, 3, 3),
+ PINS_FIELD_BASE(148, 149, 5, 0x0010, 0x10, 9, 3),
+ PINS_FIELD_BASE(150, 151, 7, 0x0010, 0x10, 0, 3),
+ PINS_FIELD_BASE(152, 153, 7, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(154, 154, 7, 0x0010, 0x10, 6, 3),
+ PINS_FIELD_BASE(155, 157, 3, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(158, 158, 3, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(159, 159, 7, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(160, 160, 5, 0x0000, 0x10, 24, 3),
+ PINS_FIELD_BASE(161, 162, 1, 0x0010, 0x10, 15, 3),
+ PINS_FIELD_BASE(163, 166, 4, 0x0000, 0x10, 0, 3),
+ PINS_FIELD_BASE(167, 170, 4, 0x0000, 0x10, 3, 3),
+ PINS_FIELD_BASE(171, 174, 7, 0x0000, 0x10, 18, 3),
+ PINS_FIELD_BASE(175, 179, 7, 0x0000, 0x10, 15, 3),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_pupd_range[] = {
+ PINS_FIELD_BASE(0, 28, 0, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(29, 29, 6, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 6, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(31, 31, 6, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(32, 32, 6, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(33, 33, 6, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(34, 34, 6, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(35, 35, 6, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(36, 36, 6, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(37, 37, 6, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(38, 38, 6, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(39, 39, 6, 0x0050, 0x10, 8, 1),
+ PINS_FIELD_BASE(40, 90, 6, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0050, 0x10, 0, 1),
+ PINS_FIELD_BASE(94, 121, 2, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(126, 126, 4, 0x0030, 0x10, 6, 1),
+ PIN_FIELD_BASE(127, 127, 4, 0x0030, 0x10, 8, 1),
+ PIN_FIELD_BASE(128, 128, 4, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 4, 0x0030, 0x10, 7, 1),
+ PIN_FIELD_BASE(130, 130, 4, 0x0030, 0x10, 9, 1),
+ PIN_FIELD_BASE(131, 131, 4, 0x0030, 0x10, 10, 1),
+ PIN_FIELD_BASE(132, 132, 4, 0x0030, 0x10, 5, 1),
+ PINS_FIELD_BASE(133, 179, 4, 0x0030, 0x10, 11, 1),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_r0_range[] = {
+ PINS_FIELD_BASE(0, 28, 4, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(29, 29, 6, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 6, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(31, 31, 6, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(32, 32, 6, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(33, 33, 6, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(34, 34, 6, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(35, 35, 6, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(36, 36, 6, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(37, 37, 6, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(38, 38, 6, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(39, 39, 6, 0x0070, 0x10, 8, 1),
+ PINS_FIELD_BASE(40, 90, 6, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0070, 0x10, 0, 1),
+ PINS_FIELD_BASE(94, 121, 2, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(126, 126, 4, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(127, 127, 4, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(128, 128, 4, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 4, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(130, 130, 4, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(131, 131, 4, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(132, 132, 4, 0x0050, 0x10, 5, 1),
+ PINS_FIELD_BASE(133, 179, 4, 0x0050, 0x10, 11, 1),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_r1_range[] = {
+ PINS_FIELD_BASE(0, 28, 4, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(29, 29, 6, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 6, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(31, 31, 6, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(32, 32, 6, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(33, 33, 6, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(34, 34, 6, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(35, 35, 6, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(36, 36, 6, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(37, 37, 6, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(38, 38, 6, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(39, 39, 6, 0x0080, 0x10, 8, 1),
+ PINS_FIELD_BASE(40, 90, 6, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0080, 0x10, 0, 1),
+ PINS_FIELD_BASE(94, 121, 2, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(126, 126, 4, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(127, 127, 4, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(128, 128, 4, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(129, 129, 4, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(130, 130, 4, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(131, 131, 4, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(132, 132, 4, 0x0060, 0x10, 5, 1),
+ PINS_FIELD_BASE(133, 179, 4, 0x0060, 0x10, 11, 1),
+};
+
+static const struct mtk_pin_field_calc mt6765_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 2, 0x0030, 0x10, 6, 1),
+ PIN_FIELD_BASE(1, 1, 2, 0x0030, 0x10, 7, 1),
+ PIN_FIELD_BASE(2, 2, 2, 0x0030, 0x10, 10, 1),
+ PIN_FIELD_BASE(3, 3, 2, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(4, 4, 2, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(5, 5, 2, 0x0030, 0x10, 13, 1),
+ PIN_FIELD_BASE(6, 6, 2, 0x0030, 0x10, 14, 1),
+ PIN_FIELD_BASE(7, 7, 2, 0x0030, 0x10, 15, 1),
+ PIN_FIELD_BASE(8, 8, 3, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(9, 9, 2, 0x0030, 0x10, 16, 1),
+ PIN_FIELD_BASE(10, 10, 2, 0x0030, 0x10, 8, 1),
+ PIN_FIELD_BASE(11, 11, 2, 0x0030, 0x10, 9, 1),
+ PIN_FIELD_BASE(12, 12, 5, 0x0020, 0x10, 9, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0020, 0x10, 26, 1),
+ PIN_FIELD_BASE(14, 14, 6, 0x0020, 0x10, 25, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0020, 0x10, 27, 1),
+ PIN_FIELD_BASE(16, 16, 6, 0x0020, 0x10, 24, 1),
+ PIN_FIELD_BASE(17, 17, 6, 0x0020, 0x10, 19, 1),
+ PIN_FIELD_BASE(18, 18, 6, 0x0020, 0x10, 16, 1),
+ PIN_FIELD_BASE(19, 19, 6, 0x0020, 0x10, 18, 1),
+ PIN_FIELD_BASE(20, 20, 6, 0x0020, 0x10, 17, 1),
+ PIN_FIELD_BASE(21, 21, 6, 0x0020, 0x10, 22, 1),
+ PIN_FIELD_BASE(22, 22, 6, 0x0020, 0x10, 21, 1),
+ PIN_FIELD_BASE(23, 23, 6, 0x0020, 0x10, 23, 1),
+ PIN_FIELD_BASE(24, 24, 6, 0x0020, 0x10, 20, 1),
+ PIN_FIELD_BASE(25, 25, 6, 0x0020, 0x10, 14, 1),
+ PIN_FIELD_BASE(26, 26, 6, 0x0020, 0x10, 13, 1),
+ PIN_FIELD_BASE(27, 27, 6, 0x0020, 0x10, 15, 1),
+ PIN_FIELD_BASE(28, 28, 6, 0x0020, 0x10, 12, 1),
+ PIN_FIELD_BASE(29, 29, 6, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 6, 0x0020, 0x10, 1, 1),
+ PIN_FIELD_BASE(31, 31, 6, 0x0020, 0x10, 5, 1),
+ PIN_FIELD_BASE(32, 32, 6, 0x0020, 0x10, 2, 1),
+ PIN_FIELD_BASE(33, 33, 6, 0x0020, 0x10, 4, 1),
+ PIN_FIELD_BASE(34, 34, 6, 0x0020, 0x10, 3, 1),
+ PIN_FIELD_BASE(35, 35, 6, 0x0020, 0x10, 10, 1),
+ PIN_FIELD_BASE(36, 36, 6, 0x0020, 0x10, 11, 1),
+ PIN_FIELD_BASE(37, 37, 6, 0x0020, 0x10, 9, 1),
+ PIN_FIELD_BASE(38, 38, 6, 0x0020, 0x10, 6, 1),
+ PIN_FIELD_BASE(39, 39, 6, 0x0020, 0x10, 8, 1),
+ PIN_FIELD_BASE(40, 40, 6, 0x0020, 0x10, 7, 1),
+ PIN_FIELD_BASE(41, 41, 7, 0x0040, 0x10, 19, 1),
+ PIN_FIELD_BASE(42, 42, 7, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 7, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x0040, 0x10, 22, 1),
+ PIN_FIELD_BASE(46, 46, 7, 0x0040, 0x10, 21, 1),
+ PIN_FIELD_BASE(47, 47, 7, 0x0040, 0x10, 20, 1),
+ PIN_FIELD_BASE(48, 48, 7, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(49, 49, 7, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(50, 50, 7, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(51, 51, 7, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(52, 52, 7, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(53, 53, 7, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(54, 54, 7, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(55, 55, 7, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(56, 56, 7, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(57, 57, 7, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(58, 58, 7, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(59, 59, 7, 0x0040, 0x10, 31, 1),
+ PIN_FIELD_BASE(60, 60, 7, 0x0040, 0x10, 30, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0030, 0x10, 18, 1),
+ PIN_FIELD_BASE(62, 62, 3, 0x0030, 0x10, 14, 1),
+ PIN_FIELD_BASE(63, 63, 3, 0x0030, 0x10, 17, 1),
+ PIN_FIELD_BASE(64, 64, 3, 0x0030, 0x10, 13, 1),
+ PIN_FIELD_BASE(65, 65, 3, 0x0030, 0x10, 20, 1),
+ PIN_FIELD_BASE(66, 66, 3, 0x0030, 0x10, 16, 1),
+ PIN_FIELD_BASE(67, 67, 3, 0x0030, 0x10, 19, 1),
+ PIN_FIELD_BASE(68, 68, 3, 0x0030, 0x10, 15, 1),
+ PIN_FIELD_BASE(69, 69, 3, 0x0030, 0x10, 8, 1),
+ PIN_FIELD_BASE(70, 70, 3, 0x0030, 0x10, 7, 1),
+ PIN_FIELD_BASE(71, 71, 3, 0x0030, 0x10, 6, 1),
+ PIN_FIELD_BASE(72, 72, 3, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(74, 74, 3, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(75, 75, 3, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(76, 76, 3, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0030, 0x10, 9, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0030, 0x10, 10, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0030, 0x10, 25, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0030, 0x10, 24, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0030, 0x10, 22, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0030, 0x10, 23, 1),
+ PIN_FIELD_BASE(85, 85, 7, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(86, 86, 7, 0x0040, 0x10, 29, 1),
+ PIN_FIELD_BASE(87, 87, 7, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(88, 88, 7, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x0030, 0x10, 25, 1),
+ PIN_FIELD_BASE(90, 90, 3, 0x0030, 0x10, 21, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0030, 0x10, 20, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0030, 0x10, 19, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0030, 0x10, 17, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x0030, 0x10, 18, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0030, 0x10, 26, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0030, 0x10, 27, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0030, 0x10, 21, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0030, 0x10, 23, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0030, 0x10, 22, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0030, 0x10, 24, 1),
+ PIN_FIELD_BASE(107, 107, 1, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(108, 108, 1, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(109, 109, 1, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(110, 110, 1, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(111, 111, 1, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(112, 112, 1, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 1, 0x0030, 0x10, 9, 1),
+ PIN_FIELD_BASE(114, 114, 1, 0x0030, 0x10, 10, 1),
+ PIN_FIELD_BASE(115, 115, 1, 0x0030, 0x10, 6, 1),
+ PIN_FIELD_BASE(116, 116, 1, 0x0030, 0x10, 7, 1),
+ PIN_FIELD_BASE(117, 117, 1, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(118, 118, 1, 0x0030, 0x10, 13, 1),
+ PIN_FIELD_BASE(119, 119, 1, 0x0030, 0x10, 14, 1),
+ PIN_FIELD_BASE(120, 120, 1, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 1, 0x0030, 0x10, 8, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x0010, 0x10, 9, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x0010, 0x10, 10, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x0010, 0x10, 8, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x0010, 0x10, 12, 1),
+ PIN_FIELD_BASE(126, 126, 4, 0x0010, 0x10, 14, 1),
+ PIN_FIELD_BASE(127, 127, 4, 0x0010, 0x10, 16, 1),
+ PIN_FIELD_BASE(128, 128, 4, 0x0010, 0x10, 11, 1),
+ PIN_FIELD_BASE(129, 129, 4, 0x0010, 0x10, 15, 1),
+ PIN_FIELD_BASE(130, 130, 4, 0x0010, 0x10, 17, 1),
+ PIN_FIELD_BASE(131, 131, 4, 0x0010, 0x10, 18, 1),
+ PIN_FIELD_BASE(132, 132, 4, 0x0010, 0x10, 13, 1),
+ PIN_FIELD_BASE(133, 133, 4, 0x0010, 0x10, 19, 1),
+ PIN_FIELD_BASE(134, 134, 5, 0x0020, 0x10, 14, 1),
+ PIN_FIELD_BASE(135, 135, 5, 0x0020, 0x10, 17, 1),
+ PIN_FIELD_BASE(136, 136, 5, 0x0020, 0x10, 1, 1),
+ PIN_FIELD_BASE(137, 137, 5, 0x0020, 0x10, 7, 1),
+ PIN_FIELD_BASE(138, 138, 5, 0x0020, 0x10, 4, 1),
+ PIN_FIELD_BASE(139, 139, 5, 0x0020, 0x10, 5, 1),
+ PIN_FIELD_BASE(140, 140, 5, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(141, 141, 5, 0x0020, 0x10, 6, 1),
+ PIN_FIELD_BASE(142, 142, 5, 0x0020, 0x10, 2, 1),
+ PIN_FIELD_BASE(143, 143, 5, 0x0020, 0x10, 3, 1),
+ PIN_FIELD_BASE(144, 144, 5, 0x0020, 0x10, 12, 1),
+ PIN_FIELD_BASE(145, 145, 5, 0x0020, 0x10, 11, 1),
+ PIN_FIELD_BASE(146, 146, 5, 0x0020, 0x10, 13, 1),
+ PIN_FIELD_BASE(147, 147, 5, 0x0020, 0x10, 10, 1),
+ PIN_FIELD_BASE(148, 148, 5, 0x0020, 0x10, 15, 1),
+ PIN_FIELD_BASE(149, 149, 5, 0x0020, 0x10, 16, 1),
+ PIN_FIELD_BASE(150, 150, 7, 0x0040, 0x10, 23, 1),
+ PIN_FIELD_BASE(151, 151, 7, 0x0040, 0x10, 24, 1),
+ PIN_FIELD_BASE(152, 152, 7, 0x0040, 0x10, 25, 1),
+ PIN_FIELD_BASE(153, 153, 7, 0x0040, 0x10, 26, 1),
+ PIN_FIELD_BASE(154, 154, 7, 0x0040, 0x10, 28, 1),
+ PIN_FIELD_BASE(155, 155, 3, 0x0030, 0x10, 28, 1),
+ PIN_FIELD_BASE(156, 156, 3, 0x0030, 0x10, 27, 1),
+ PIN_FIELD_BASE(157, 157, 3, 0x0030, 0x10, 29, 1),
+ PIN_FIELD_BASE(158, 158, 3, 0x0030, 0x10, 26, 1),
+ PIN_FIELD_BASE(159, 159, 7, 0x0040, 0x10, 27, 1),
+ PIN_FIELD_BASE(160, 160, 5, 0x0020, 0x10, 8, 1),
+ PIN_FIELD_BASE(161, 161, 1, 0x0030, 0x10, 15, 1),
+ PIN_FIELD_BASE(162, 162, 1, 0x0030, 0x10, 16, 1),
+ PIN_FIELD_BASE(163, 163, 4, 0x0010, 0x10, 0, 1),
+ PIN_FIELD_BASE(164, 164, 4, 0x0010, 0x10, 1, 1),
+ PIN_FIELD_BASE(165, 165, 4, 0x0010, 0x10, 2, 1),
+ PIN_FIELD_BASE(166, 166, 4, 0x0010, 0x10, 3, 1),
+ PIN_FIELD_BASE(167, 167, 4, 0x0010, 0x10, 4, 1),
+ PIN_FIELD_BASE(168, 168, 4, 0x0010, 0x10, 5, 1),
+ PIN_FIELD_BASE(169, 169, 4, 0x0010, 0x10, 6, 1),
+ PIN_FIELD_BASE(170, 170, 4, 0x0010, 0x10, 7, 1),
+ PIN_FIELD_BASE(171, 171, 7, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(172, 172, 7, 0x0040, 0x10, 18, 1),
+ PIN_FIELD_BASE(173, 173, 7, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(174, 174, 7, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(175, 175, 7, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(176, 176, 7, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(177, 177, 7, 0x0040, 0x10, 15, 1),
+ PINS_FIELD_BASE(178, 179, 7, 0x0040, 0x10, 16, 1),
+};
+
+static const struct mtk_pin_reg_calc mt6765_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt6765_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt6765_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt6765_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt6765_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt6765_pin_smt_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt6765_pin_pd_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt6765_pin_pu_range),
+ [PINCTRL_PIN_REG_TDSEL] = MTK_RANGE(mt6765_pin_tdsel_range),
+ [PINCTRL_PIN_REG_RDSEL] = MTK_RANGE(mt6765_pin_rdsel_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt6765_pin_drv_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt6765_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt6765_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt6765_pin_r1_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt6765_pin_ies_range),
+};
+
+static const char * const mt6765_pinctrl_register_base_names[] = {
+ "iocfg0", "iocfg1", "iocfg2", "iocfg3", "iocfg4", "iocfg5",
+ "iocfg6", "iocfg7",
+};
+
+static const struct mtk_eint_hw mt6765_eint_hw = {
+ .port_mask = 7,
+ .ports = 6,
+ .ap_num = 160,
+ .db_cnt = 13,
+};
+
+static const struct mtk_pin_soc mt6765_data = {
+ .reg_cal = mt6765_reg_cals,
+ .pins = mtk_pins_mt6765,
+ .npins = ARRAY_SIZE(mtk_pins_mt6765),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt6765),
+ .eint_hw = &mt6765_eint_hw,
+ .gpio_m = 0,
+ .ies_present = true,
+ .base_names = mt6765_pinctrl_register_base_names,
+ .nbase_names = ARRAY_SIZE(mt6765_pinctrl_register_base_names),
+ .bias_disable_set = mtk_pinconf_bias_disable_set,
+ .bias_disable_get = mtk_pinconf_bias_disable_get,
+ .bias_set = mtk_pinconf_bias_set,
+ .bias_get = mtk_pinconf_bias_get,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_pull_get = mtk_pinconf_adv_pull_get,
+ .adv_pull_set = mtk_pinconf_adv_pull_set,
+};
+
+static const struct of_device_id mt6765_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt6765-pinctrl", },
+ { }
+};
+
+static int mt6765_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_paris_pinctrl_probe(pdev, &mt6765_data);
+}
+
+static struct platform_driver mt6765_pinctrl_driver = {
+ .driver = {
+ .name = "mt6765-pinctrl",
+ .of_match_table = mt6765_pinctrl_of_match,
+ },
+ .probe = mt6765_pinctrl_probe,
+};
+
+static int __init mt6765_pinctrl_init(void)
+{
+ return platform_driver_register(&mt6765_pinctrl_driver);
+}
+arch_initcall(mt6765_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index 6f931b85701b..ce4a8a0cc19c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -1,297 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * MediaTek MT7622 Pinctrl Driver
+ * Copyright (C) 2017-2018 MediaTek Inc.
*
- * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ * Author: Sean Wang <sean.wang@mediatek.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/gpio.h>
-#include <linux/gpio/driver.h>
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/mfd/syscon.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/regmap.h>
-
-#include "../core.h"
-#include "../pinconf.h"
-#include "../pinmux.h"
-#include "mtk-eint.h"
-
-#define PINCTRL_PINCTRL_DEV KBUILD_MODNAME
-#define MTK_RANGE(_a) { .range = (_a), .nranges = ARRAY_SIZE(_a), }
-#define PINCTRL_PIN_GROUP(name, id) \
- { \
- name, \
- id##_pins, \
- ARRAY_SIZE(id##_pins), \
- id##_funcs, \
- }
-
-#define MTK_GPIO_MODE 1
-#define MTK_INPUT 0
-#define MTK_OUTPUT 1
-#define MTK_DISABLE 0
-#define MTK_ENABLE 1
-
-/* Custom pinconf parameters */
-#define MTK_PIN_CONFIG_TDSEL (PIN_CONFIG_END + 1)
-#define MTK_PIN_CONFIG_RDSEL (PIN_CONFIG_END + 2)
-
-/* List these attributes which could be modified for the pin */
-enum {
- PINCTRL_PIN_REG_MODE,
- PINCTRL_PIN_REG_DIR,
- PINCTRL_PIN_REG_DI,
- PINCTRL_PIN_REG_DO,
- PINCTRL_PIN_REG_SR,
- PINCTRL_PIN_REG_SMT,
- PINCTRL_PIN_REG_PD,
- PINCTRL_PIN_REG_PU,
- PINCTRL_PIN_REG_E4,
- PINCTRL_PIN_REG_E8,
- PINCTRL_PIN_REG_TDSEL,
- PINCTRL_PIN_REG_RDSEL,
- PINCTRL_PIN_REG_MAX,
-};
-
-/* struct mtk_pin_field - the structure that holds the information of the field
- * used to describe the attribute for the pin
- * @offset: the register offset relative to the base address
- * @mask: the mask used to filter out the field from the register
- * @bitpos: the start bit relative to the register
- * @next: the indication that the field would be extended to the
- next register
*/
-struct mtk_pin_field {
- u32 offset;
- u32 mask;
- u8 bitpos;
- u8 next;
-};
-/* struct mtk_pin_field_calc - the structure that holds the range providing
- * the guide used to look up the relevant field
- * @s_pin: the start pin within the range
- * @e_pin: the end pin within the range
- * @s_addr: the start address for the range
- * @x_addrs: the address distance between two consecutive registers
- * within the range
- * @s_bit: the start bit for the first register within the range
- * @x_bits: the bit distance between two consecutive pins within
- * the range
- */
-struct mtk_pin_field_calc {
- u16 s_pin;
- u16 e_pin;
- u32 s_addr;
- u8 x_addrs;
- u8 s_bit;
- u8 x_bits;
-};
+#include "pinctrl-moore.h"
-/* struct mtk_pin_reg_calc - the structure that holds all ranges used to
- * determine which register the pin would make use of
- * for certain pin attribute.
- * @range: the start address for the range
- * @nranges: the number of items in the range
- */
-struct mtk_pin_reg_calc {
- const struct mtk_pin_field_calc *range;
- unsigned int nranges;
-};
-
-/* struct mtk_pin_soc - the structure that holds SoC-specific data */
-struct mtk_pin_soc {
- const struct mtk_pin_reg_calc *reg_cal;
- const struct pinctrl_pin_desc *pins;
- unsigned int npins;
- const struct group_desc *grps;
- unsigned int ngrps;
- const struct function_desc *funcs;
- unsigned int nfuncs;
- const struct mtk_eint_regs *eint_regs;
- const struct mtk_eint_hw *eint_hw;
-};
-
-struct mtk_pinctrl {
- struct pinctrl_dev *pctrl;
- void __iomem *base;
- struct device *dev;
- struct gpio_chip chip;
- const struct mtk_pin_soc *soc;
- struct mtk_eint *eint;
-};
+#define MT7622_PIN(_number, _name) \
+ MTK_PIN(_number, _name, 1, _number, DRV_GRP0)
static const struct mtk_pin_field_calc mt7622_pin_mode_range[] = {
- {0, 0, 0x320, 0x10, 16, 4},
- {1, 4, 0x3a0, 0x10, 16, 4},
- {5, 5, 0x320, 0x10, 0, 4},
- {6, 6, 0x300, 0x10, 4, 4},
- {7, 7, 0x300, 0x10, 4, 4},
- {8, 9, 0x350, 0x10, 20, 4},
- {10, 10, 0x300, 0x10, 8, 4},
- {11, 11, 0x300, 0x10, 8, 4},
- {12, 12, 0x300, 0x10, 8, 4},
- {13, 13, 0x300, 0x10, 8, 4},
- {14, 15, 0x320, 0x10, 4, 4},
- {16, 17, 0x320, 0x10, 20, 4},
- {18, 21, 0x310, 0x10, 16, 4},
- {22, 22, 0x380, 0x10, 16, 4},
- {23, 23, 0x300, 0x10, 24, 4},
- {24, 24, 0x300, 0x10, 24, 4},
- {25, 25, 0x300, 0x10, 12, 4},
- {25, 25, 0x300, 0x10, 12, 4},
- {26, 26, 0x300, 0x10, 12, 4},
- {27, 27, 0x300, 0x10, 12, 4},
- {28, 28, 0x300, 0x10, 12, 4},
- {29, 29, 0x300, 0x10, 12, 4},
- {30, 30, 0x300, 0x10, 12, 4},
- {31, 31, 0x300, 0x10, 12, 4},
- {32, 32, 0x300, 0x10, 12, 4},
- {33, 33, 0x300, 0x10, 12, 4},
- {34, 34, 0x300, 0x10, 12, 4},
- {35, 35, 0x300, 0x10, 12, 4},
- {36, 36, 0x300, 0x10, 12, 4},
- {37, 37, 0x300, 0x10, 20, 4},
- {38, 38, 0x300, 0x10, 20, 4},
- {39, 39, 0x300, 0x10, 20, 4},
- {40, 40, 0x300, 0x10, 20, 4},
- {41, 41, 0x300, 0x10, 20, 4},
- {42, 42, 0x300, 0x10, 20, 4},
- {43, 43, 0x300, 0x10, 20, 4},
- {44, 44, 0x300, 0x10, 20, 4},
- {45, 46, 0x300, 0x10, 20, 4},
- {47, 47, 0x300, 0x10, 20, 4},
- {48, 48, 0x300, 0x10, 20, 4},
- {49, 49, 0x300, 0x10, 20, 4},
- {50, 50, 0x300, 0x10, 20, 4},
- {51, 70, 0x330, 0x10, 4, 4},
- {71, 71, 0x300, 0x10, 16, 4},
- {72, 72, 0x300, 0x10, 16, 4},
- {73, 76, 0x310, 0x10, 0, 4},
- {77, 77, 0x320, 0x10, 28, 4},
- {78, 78, 0x320, 0x10, 12, 4},
- {79, 82, 0x3a0, 0x10, 0, 4},
- {83, 83, 0x350, 0x10, 28, 4},
- {84, 84, 0x330, 0x10, 0, 4},
- {85, 90, 0x360, 0x10, 4, 4},
- {91, 94, 0x390, 0x10, 16, 4},
- {95, 97, 0x380, 0x10, 20, 4},
- {98, 101, 0x390, 0x10, 0, 4},
- {102, 102, 0x360, 0x10, 0, 4},
+ PIN_FIELD(0, 0, 0x320, 0x10, 16, 4),
+ PIN_FIELD(1, 4, 0x3a0, 0x10, 16, 4),
+ PIN_FIELD(5, 5, 0x320, 0x10, 0, 4),
+ PINS_FIELD(6, 7, 0x300, 0x10, 4, 4),
+ PIN_FIELD(8, 9, 0x350, 0x10, 20, 4),
+ PINS_FIELD(10, 13, 0x300, 0x10, 8, 4),
+ PIN_FIELD(14, 15, 0x320, 0x10, 4, 4),
+ PIN_FIELD(16, 17, 0x320, 0x10, 20, 4),
+ PIN_FIELD(18, 21, 0x310, 0x10, 16, 4),
+ PIN_FIELD(22, 22, 0x380, 0x10, 16, 4),
+ PINS_FIELD(23, 24, 0x300, 0x10, 24, 4),
+ PINS_FIELD(25, 36, 0x300, 0x10, 12, 4),
+ PINS_FIELD(37, 50, 0x300, 0x10, 20, 4),
+ PIN_FIELD(51, 70, 0x330, 0x10, 4, 4),
+ PINS_FIELD(71, 72, 0x300, 0x10, 16, 4),
+ PIN_FIELD(73, 76, 0x310, 0x10, 0, 4),
+ PIN_FIELD(77, 77, 0x320, 0x10, 28, 4),
+ PIN_FIELD(78, 78, 0x320, 0x10, 12, 4),
+ PIN_FIELD(79, 82, 0x3a0, 0x10, 0, 4),
+ PIN_FIELD(83, 83, 0x350, 0x10, 28, 4),
+ PIN_FIELD(84, 84, 0x330, 0x10, 0, 4),
+ PIN_FIELD(85, 90, 0x360, 0x10, 4, 4),
+ PIN_FIELD(91, 94, 0x390, 0x10, 16, 4),
+ PIN_FIELD(95, 97, 0x380, 0x10, 20, 4),
+ PIN_FIELD(98, 101, 0x390, 0x10, 0, 4),
+ PIN_FIELD(102, 102, 0x360, 0x10, 0, 4),
};
static const struct mtk_pin_field_calc mt7622_pin_dir_range[] = {
- {0, 102, 0x0, 0x10, 0, 1},
+ PIN_FIELD(0, 102, 0x0, 0x10, 0, 1),
};
static const struct mtk_pin_field_calc mt7622_pin_di_range[] = {
- {0, 102, 0x200, 0x10, 0, 1},
+ PIN_FIELD(0, 102, 0x200, 0x10, 0, 1),
};
static const struct mtk_pin_field_calc mt7622_pin_do_range[] = {
- {0, 102, 0x100, 0x10, 0, 1},
+ PIN_FIELD(0, 102, 0x100, 0x10, 0, 1),
};
static const struct mtk_pin_field_calc mt7622_pin_sr_range[] = {
- {0, 31, 0x910, 0x10, 0, 1},
- {32, 50, 0xa10, 0x10, 0, 1},
- {51, 70, 0x810, 0x10, 0, 1},
- {71, 72, 0xb10, 0x10, 0, 1},
- {73, 86, 0xb10, 0x10, 4, 1},
- {87, 90, 0xc10, 0x10, 0, 1},
- {91, 102, 0xb10, 0x10, 18, 1},
+ PIN_FIELD(0, 31, 0x910, 0x10, 0, 1),
+ PIN_FIELD(32, 50, 0xa10, 0x10, 0, 1),
+ PIN_FIELD(51, 70, 0x810, 0x10, 0, 1),
+ PIN_FIELD(71, 72, 0xb10, 0x10, 0, 1),
+ PIN_FIELD(73, 86, 0xb10, 0x10, 4, 1),
+ PIN_FIELD(87, 90, 0xc10, 0x10, 0, 1),
+ PIN_FIELD(91, 102, 0xb10, 0x10, 18, 1),
};
static const struct mtk_pin_field_calc mt7622_pin_smt_range[] = {
- {0, 31, 0x920, 0x10, 0, 1},
- {32, 50, 0xa20, 0x10, 0, 1},
- {51, 70, 0x820, 0x10, 0, 1},
- {71, 72, 0xb20, 0x10, 0, 1},
- {73, 86, 0xb20, 0x10, 4, 1},
- {87, 90, 0xc20, 0x10, 0, 1},
- {91, 102, 0xb20, 0x10, 18, 1},
+ PIN_FIELD(0, 31, 0x920, 0x10, 0, 1),
+ PIN_FIELD(32, 50, 0xa20, 0x10, 0, 1),
+ PIN_FIELD(51, 70, 0x820, 0x10, 0, 1),
+ PIN_FIELD(71, 72, 0xb20, 0x10, 0, 1),
+ PIN_FIELD(73, 86, 0xb20, 0x10, 4, 1),
+ PIN_FIELD(87, 90, 0xc20, 0x10, 0, 1),
+ PIN_FIELD(91, 102, 0xb20, 0x10, 18, 1),
};
static const struct mtk_pin_field_calc mt7622_pin_pu_range[] = {
- {0, 31, 0x930, 0x10, 0, 1},
- {32, 50, 0xa30, 0x10, 0, 1},
- {51, 70, 0x830, 0x10, 0, 1},
- {71, 72, 0xb30, 0x10, 0, 1},
- {73, 86, 0xb30, 0x10, 4, 1},
- {87, 90, 0xc30, 0x10, 0, 1},
- {91, 102, 0xb30, 0x10, 18, 1},
+ PIN_FIELD(0, 31, 0x930, 0x10, 0, 1),
+ PIN_FIELD(32, 50, 0xa30, 0x10, 0, 1),
+ PIN_FIELD(51, 70, 0x830, 0x10, 0, 1),
+ PIN_FIELD(71, 72, 0xb30, 0x10, 0, 1),
+ PIN_FIELD(73, 86, 0xb30, 0x10, 4, 1),
+ PIN_FIELD(87, 90, 0xc30, 0x10, 0, 1),
+ PIN_FIELD(91, 102, 0xb30, 0x10, 18, 1),
};
static const struct mtk_pin_field_calc mt7622_pin_pd_range[] = {
- {0, 31, 0x940, 0x10, 0, 1},
- {32, 50, 0xa40, 0x10, 0, 1},
- {51, 70, 0x840, 0x10, 0, 1},
- {71, 72, 0xb40, 0x10, 0, 1},
- {73, 86, 0xb40, 0x10, 4, 1},
- {87, 90, 0xc40, 0x10, 0, 1},
- {91, 102, 0xb40, 0x10, 18, 1},
+ PIN_FIELD(0, 31, 0x940, 0x10, 0, 1),
+ PIN_FIELD(32, 50, 0xa40, 0x10, 0, 1),
+ PIN_FIELD(51, 70, 0x840, 0x10, 0, 1),
+ PIN_FIELD(71, 72, 0xb40, 0x10, 0, 1),
+ PIN_FIELD(73, 86, 0xb40, 0x10, 4, 1),
+ PIN_FIELD(87, 90, 0xc40, 0x10, 0, 1),
+ PIN_FIELD(91, 102, 0xb40, 0x10, 18, 1),
};
static const struct mtk_pin_field_calc mt7622_pin_e4_range[] = {
- {0, 31, 0x960, 0x10, 0, 1},
- {32, 50, 0xa60, 0x10, 0, 1},
- {51, 70, 0x860, 0x10, 0, 1},
- {71, 72, 0xb60, 0x10, 0, 1},
- {73, 86, 0xb60, 0x10, 4, 1},
- {87, 90, 0xc60, 0x10, 0, 1},
- {91, 102, 0xb60, 0x10, 18, 1},
+ PIN_FIELD(0, 31, 0x960, 0x10, 0, 1),
+ PIN_FIELD(32, 50, 0xa60, 0x10, 0, 1),
+ PIN_FIELD(51, 70, 0x860, 0x10, 0, 1),
+ PIN_FIELD(71, 72, 0xb60, 0x10, 0, 1),
+ PIN_FIELD(73, 86, 0xb60, 0x10, 4, 1),
+ PIN_FIELD(87, 90, 0xc60, 0x10, 0, 1),
+ PIN_FIELD(91, 102, 0xb60, 0x10, 18, 1),
};
static const struct mtk_pin_field_calc mt7622_pin_e8_range[] = {
- {0, 31, 0x970, 0x10, 0, 1},
- {32, 50, 0xa70, 0x10, 0, 1},
- {51, 70, 0x870, 0x10, 0, 1},
- {71, 72, 0xb70, 0x10, 0, 1},
- {73, 86, 0xb70, 0x10, 4, 1},
- {87, 90, 0xc70, 0x10, 0, 1},
- {91, 102, 0xb70, 0x10, 18, 1},
+ PIN_FIELD(0, 31, 0x970, 0x10, 0, 1),
+ PIN_FIELD(32, 50, 0xa70, 0x10, 0, 1),
+ PIN_FIELD(51, 70, 0x870, 0x10, 0, 1),
+ PIN_FIELD(71, 72, 0xb70, 0x10, 0, 1),
+ PIN_FIELD(73, 86, 0xb70, 0x10, 4, 1),
+ PIN_FIELD(87, 90, 0xc70, 0x10, 0, 1),
+ PIN_FIELD(91, 102, 0xb70, 0x10, 18, 1),
};
static const struct mtk_pin_field_calc mt7622_pin_tdsel_range[] = {
- {0, 31, 0x980, 0x4, 0, 4},
- {32, 50, 0xa80, 0x4, 0, 4},
- {51, 70, 0x880, 0x4, 0, 4},
- {71, 72, 0xb80, 0x4, 0, 4},
- {73, 86, 0xb80, 0x4, 16, 4},
- {87, 90, 0xc80, 0x4, 0, 4},
- {91, 102, 0xb88, 0x4, 8, 4},
+ PIN_FIELD(0, 31, 0x980, 0x4, 0, 4),
+ PIN_FIELD(32, 50, 0xa80, 0x4, 0, 4),
+ PIN_FIELD(51, 70, 0x880, 0x4, 0, 4),
+ PIN_FIELD(71, 72, 0xb80, 0x4, 0, 4),
+ PIN_FIELD(73, 86, 0xb80, 0x4, 16, 4),
+ PIN_FIELD(87, 90, 0xc80, 0x4, 0, 4),
+ PIN_FIELD(91, 102, 0xb88, 0x4, 8, 4),
};
static const struct mtk_pin_field_calc mt7622_pin_rdsel_range[] = {
- {0, 31, 0x990, 0x4, 0, 6},
- {32, 50, 0xa90, 0x4, 0, 6},
- {51, 58, 0x890, 0x4, 0, 6},
- {59, 60, 0x894, 0x4, 28, 6},
- {61, 62, 0x894, 0x4, 16, 6},
- {63, 66, 0x898, 0x4, 8, 6},
- {67, 68, 0x89c, 0x4, 12, 6},
- {69, 70, 0x89c, 0x4, 0, 6},
- {71, 72, 0xb90, 0x4, 0, 6},
- {73, 86, 0xb90, 0x4, 24, 6},
- {87, 90, 0xc90, 0x4, 0, 6},
- {91, 102, 0xb9c, 0x4, 12, 6},
+ PIN_FIELD(0, 31, 0x990, 0x4, 0, 6),
+ PIN_FIELD(32, 50, 0xa90, 0x4, 0, 6),
+ PIN_FIELD(51, 58, 0x890, 0x4, 0, 6),
+ PIN_FIELD(59, 60, 0x894, 0x4, 28, 6),
+ PIN_FIELD(61, 62, 0x894, 0x4, 16, 6),
+ PIN_FIELD(63, 66, 0x898, 0x4, 8, 6),
+ PIN_FIELD(67, 68, 0x89c, 0x4, 12, 6),
+ PIN_FIELD(69, 70, 0x89c, 0x4, 0, 6),
+ PIN_FIELD(71, 72, 0xb90, 0x4, 0, 6),
+ PIN_FIELD(73, 86, 0xb90, 0x4, 24, 6),
+ PIN_FIELD(87, 90, 0xc90, 0x4, 0, 6),
+ PIN_FIELD(91, 102, 0xb9c, 0x4, 12, 6),
};
static const struct mtk_pin_reg_calc mt7622_reg_cals[PINCTRL_PIN_REG_MAX] = {
@@ -309,110 +152,110 @@ static const struct mtk_pin_reg_calc mt7622_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_RDSEL] = MTK_RANGE(mt7622_pin_rdsel_range),
};
-static const struct pinctrl_pin_desc mt7622_pins[] = {
- PINCTRL_PIN(0, "GPIO_A"),
- PINCTRL_PIN(1, "I2S1_IN"),
- PINCTRL_PIN(2, "I2S1_OUT"),
- PINCTRL_PIN(3, "I2S_BCLK"),
- PINCTRL_PIN(4, "I2S_WS"),
- PINCTRL_PIN(5, "I2S_MCLK"),
- PINCTRL_PIN(6, "TXD0"),
- PINCTRL_PIN(7, "RXD0"),
- PINCTRL_PIN(8, "SPI_WP"),
- PINCTRL_PIN(9, "SPI_HOLD"),
- PINCTRL_PIN(10, "SPI_CLK"),
- PINCTRL_PIN(11, "SPI_MOSI"),
- PINCTRL_PIN(12, "SPI_MISO"),
- PINCTRL_PIN(13, "SPI_CS"),
- PINCTRL_PIN(14, "I2C_SDA"),
- PINCTRL_PIN(15, "I2C_SCL"),
- PINCTRL_PIN(16, "I2S2_IN"),
- PINCTRL_PIN(17, "I2S3_IN"),
- PINCTRL_PIN(18, "I2S4_IN"),
- PINCTRL_PIN(19, "I2S2_OUT"),
- PINCTRL_PIN(20, "I2S3_OUT"),
- PINCTRL_PIN(21, "I2S4_OUT"),
- PINCTRL_PIN(22, "GPIO_B"),
- PINCTRL_PIN(23, "MDC"),
- PINCTRL_PIN(24, "MDIO"),
- PINCTRL_PIN(25, "G2_TXD0"),
- PINCTRL_PIN(26, "G2_TXD1"),
- PINCTRL_PIN(27, "G2_TXD2"),
- PINCTRL_PIN(28, "G2_TXD3"),
- PINCTRL_PIN(29, "G2_TXEN"),
- PINCTRL_PIN(30, "G2_TXC"),
- PINCTRL_PIN(31, "G2_RXD0"),
- PINCTRL_PIN(32, "G2_RXD1"),
- PINCTRL_PIN(33, "G2_RXD2"),
- PINCTRL_PIN(34, "G2_RXD3"),
- PINCTRL_PIN(35, "G2_RXDV"),
- PINCTRL_PIN(36, "G2_RXC"),
- PINCTRL_PIN(37, "NCEB"),
- PINCTRL_PIN(38, "NWEB"),
- PINCTRL_PIN(39, "NREB"),
- PINCTRL_PIN(40, "NDL4"),
- PINCTRL_PIN(41, "NDL5"),
- PINCTRL_PIN(42, "NDL6"),
- PINCTRL_PIN(43, "NDL7"),
- PINCTRL_PIN(44, "NRB"),
- PINCTRL_PIN(45, "NCLE"),
- PINCTRL_PIN(46, "NALE"),
- PINCTRL_PIN(47, "NDL0"),
- PINCTRL_PIN(48, "NDL1"),
- PINCTRL_PIN(49, "NDL2"),
- PINCTRL_PIN(50, "NDL3"),
- PINCTRL_PIN(51, "MDI_TP_P0"),
- PINCTRL_PIN(52, "MDI_TN_P0"),
- PINCTRL_PIN(53, "MDI_RP_P0"),
- PINCTRL_PIN(54, "MDI_RN_P0"),
- PINCTRL_PIN(55, "MDI_TP_P1"),
- PINCTRL_PIN(56, "MDI_TN_P1"),
- PINCTRL_PIN(57, "MDI_RP_P1"),
- PINCTRL_PIN(58, "MDI_RN_P1"),
- PINCTRL_PIN(59, "MDI_RP_P2"),
- PINCTRL_PIN(60, "MDI_RN_P2"),
- PINCTRL_PIN(61, "MDI_TP_P2"),
- PINCTRL_PIN(62, "MDI_TN_P2"),
- PINCTRL_PIN(63, "MDI_TP_P3"),
- PINCTRL_PIN(64, "MDI_TN_P3"),
- PINCTRL_PIN(65, "MDI_RP_P3"),
- PINCTRL_PIN(66, "MDI_RN_P3"),
- PINCTRL_PIN(67, "MDI_RP_P4"),
- PINCTRL_PIN(68, "MDI_RN_P4"),
- PINCTRL_PIN(69, "MDI_TP_P4"),
- PINCTRL_PIN(70, "MDI_TN_P4"),
- PINCTRL_PIN(71, "PMIC_SCL"),
- PINCTRL_PIN(72, "PMIC_SDA"),
- PINCTRL_PIN(73, "SPIC1_CLK"),
- PINCTRL_PIN(74, "SPIC1_MOSI"),
- PINCTRL_PIN(75, "SPIC1_MISO"),
- PINCTRL_PIN(76, "SPIC1_CS"),
- PINCTRL_PIN(77, "GPIO_D"),
- PINCTRL_PIN(78, "WATCHDOG"),
- PINCTRL_PIN(79, "RTS3_N"),
- PINCTRL_PIN(80, "CTS3_N"),
- PINCTRL_PIN(81, "TXD3"),
- PINCTRL_PIN(82, "RXD3"),
- PINCTRL_PIN(83, "PERST0_N"),
- PINCTRL_PIN(84, "PERST1_N"),
- PINCTRL_PIN(85, "WLED_N"),
- PINCTRL_PIN(86, "EPHY_LED0_N"),
- PINCTRL_PIN(87, "AUXIN0"),
- PINCTRL_PIN(88, "AUXIN1"),
- PINCTRL_PIN(89, "AUXIN2"),
- PINCTRL_PIN(90, "AUXIN3"),
- PINCTRL_PIN(91, "TXD4"),
- PINCTRL_PIN(92, "RXD4"),
- PINCTRL_PIN(93, "RTS4_N"),
- PINCTRL_PIN(94, "CTS4_N"),
- PINCTRL_PIN(95, "PWM1"),
- PINCTRL_PIN(96, "PWM2"),
- PINCTRL_PIN(97, "PWM3"),
- PINCTRL_PIN(98, "PWM4"),
- PINCTRL_PIN(99, "PWM5"),
- PINCTRL_PIN(100, "PWM6"),
- PINCTRL_PIN(101, "PWM7"),
- PINCTRL_PIN(102, "GPIO_E"),
+static const struct mtk_pin_desc mt7622_pins[] = {
+ MT7622_PIN(0, "GPIO_A"),
+ MT7622_PIN(1, "I2S1_IN"),
+ MT7622_PIN(2, "I2S1_OUT"),
+ MT7622_PIN(3, "I2S_BCLK"),
+ MT7622_PIN(4, "I2S_WS"),
+ MT7622_PIN(5, "I2S_MCLK"),
+ MT7622_PIN(6, "TXD0"),
+ MT7622_PIN(7, "RXD0"),
+ MT7622_PIN(8, "SPI_WP"),
+ MT7622_PIN(9, "SPI_HOLD"),
+ MT7622_PIN(10, "SPI_CLK"),
+ MT7622_PIN(11, "SPI_MOSI"),
+ MT7622_PIN(12, "SPI_MISO"),
+ MT7622_PIN(13, "SPI_CS"),
+ MT7622_PIN(14, "I2C_SDA"),
+ MT7622_PIN(15, "I2C_SCL"),
+ MT7622_PIN(16, "I2S2_IN"),
+ MT7622_PIN(17, "I2S3_IN"),
+ MT7622_PIN(18, "I2S4_IN"),
+ MT7622_PIN(19, "I2S2_OUT"),
+ MT7622_PIN(20, "I2S3_OUT"),
+ MT7622_PIN(21, "I2S4_OUT"),
+ MT7622_PIN(22, "GPIO_B"),
+ MT7622_PIN(23, "MDC"),
+ MT7622_PIN(24, "MDIO"),
+ MT7622_PIN(25, "G2_TXD0"),
+ MT7622_PIN(26, "G2_TXD1"),
+ MT7622_PIN(27, "G2_TXD2"),
+ MT7622_PIN(28, "G2_TXD3"),
+ MT7622_PIN(29, "G2_TXEN"),
+ MT7622_PIN(30, "G2_TXC"),
+ MT7622_PIN(31, "G2_RXD0"),
+ MT7622_PIN(32, "G2_RXD1"),
+ MT7622_PIN(33, "G2_RXD2"),
+ MT7622_PIN(34, "G2_RXD3"),
+ MT7622_PIN(35, "G2_RXDV"),
+ MT7622_PIN(36, "G2_RXC"),
+ MT7622_PIN(37, "NCEB"),
+ MT7622_PIN(38, "NWEB"),
+ MT7622_PIN(39, "NREB"),
+ MT7622_PIN(40, "NDL4"),
+ MT7622_PIN(41, "NDL5"),
+ MT7622_PIN(42, "NDL6"),
+ MT7622_PIN(43, "NDL7"),
+ MT7622_PIN(44, "NRB"),
+ MT7622_PIN(45, "NCLE"),
+ MT7622_PIN(46, "NALE"),
+ MT7622_PIN(47, "NDL0"),
+ MT7622_PIN(48, "NDL1"),
+ MT7622_PIN(49, "NDL2"),
+ MT7622_PIN(50, "NDL3"),
+ MT7622_PIN(51, "MDI_TP_P0"),
+ MT7622_PIN(52, "MDI_TN_P0"),
+ MT7622_PIN(53, "MDI_RP_P0"),
+ MT7622_PIN(54, "MDI_RN_P0"),
+ MT7622_PIN(55, "MDI_TP_P1"),
+ MT7622_PIN(56, "MDI_TN_P1"),
+ MT7622_PIN(57, "MDI_RP_P1"),
+ MT7622_PIN(58, "MDI_RN_P1"),
+ MT7622_PIN(59, "MDI_RP_P2"),
+ MT7622_PIN(60, "MDI_RN_P2"),
+ MT7622_PIN(61, "MDI_TP_P2"),
+ MT7622_PIN(62, "MDI_TN_P2"),
+ MT7622_PIN(63, "MDI_TP_P3"),
+ MT7622_PIN(64, "MDI_TN_P3"),
+ MT7622_PIN(65, "MDI_RP_P3"),
+ MT7622_PIN(66, "MDI_RN_P3"),
+ MT7622_PIN(67, "MDI_RP_P4"),
+ MT7622_PIN(68, "MDI_RN_P4"),
+ MT7622_PIN(69, "MDI_TP_P4"),
+ MT7622_PIN(70, "MDI_TN_P4"),
+ MT7622_PIN(71, "PMIC_SCL"),
+ MT7622_PIN(72, "PMIC_SDA"),
+ MT7622_PIN(73, "SPIC1_CLK"),
+ MT7622_PIN(74, "SPIC1_MOSI"),
+ MT7622_PIN(75, "SPIC1_MISO"),
+ MT7622_PIN(76, "SPIC1_CS"),
+ MT7622_PIN(77, "GPIO_D"),
+ MT7622_PIN(78, "WATCHDOG"),
+ MT7622_PIN(79, "RTS3_N"),
+ MT7622_PIN(80, "CTS3_N"),
+ MT7622_PIN(81, "TXD3"),
+ MT7622_PIN(82, "RXD3"),
+ MT7622_PIN(83, "PERST0_N"),
+ MT7622_PIN(84, "PERST1_N"),
+ MT7622_PIN(85, "WLED_N"),
+ MT7622_PIN(86, "EPHY_LED0_N"),
+ MT7622_PIN(87, "AUXIN0"),
+ MT7622_PIN(88, "AUXIN1"),
+ MT7622_PIN(89, "AUXIN2"),
+ MT7622_PIN(90, "AUXIN3"),
+ MT7622_PIN(91, "TXD4"),
+ MT7622_PIN(92, "RXD4"),
+ MT7622_PIN(93, "RTS4_N"),
+ MT7622_PIN(94, "CTS4_N"),
+ MT7622_PIN(95, "PWM1"),
+ MT7622_PIN(96, "PWM2"),
+ MT7622_PIN(97, "PWM3"),
+ MT7622_PIN(98, "PWM4"),
+ MT7622_PIN(99, "PWM5"),
+ MT7622_PIN(100, "PWM6"),
+ MT7622_PIN(101, "PWM7"),
+ MT7622_PIN(102, "GPIO_E"),
};
/* List all groups consisting of these pins dedicated to the enablement of
@@ -906,18 +749,6 @@ static const struct function_desc mt7622_functions[] = {
{"watchdog", mt7622_wdt_groups, ARRAY_SIZE(mt7622_wdt_groups)},
};
-static const struct pinconf_generic_params mtk_custom_bindings[] = {
- {"mediatek,tdsel", MTK_PIN_CONFIG_TDSEL, 0},
- {"mediatek,rdsel", MTK_PIN_CONFIG_RDSEL, 0},
-};
-
-#ifdef CONFIG_DEBUG_FS
-static const struct pin_config_item mtk_conf_items[] = {
- PCONFDUMP(MTK_PIN_CONFIG_TDSEL, "tdsel", NULL, true),
- PCONFDUMP(MTK_PIN_CONFIG_RDSEL, "rdsel", NULL, true),
-};
-#endif
-
static const struct mtk_eint_hw mt7622_eint_hw = {
.port_mask = 7,
.ports = 7,
@@ -934,830 +765,38 @@ static const struct mtk_pin_soc mt7622_data = {
.funcs = mt7622_functions,
.nfuncs = ARRAY_SIZE(mt7622_functions),
.eint_hw = &mt7622_eint_hw,
-};
-
-static void mtk_w32(struct mtk_pinctrl *pctl, u32 reg, u32 val)
-{
- writel_relaxed(val, pctl->base + reg);
-}
-
-static u32 mtk_r32(struct mtk_pinctrl *pctl, u32 reg)
-{
- return readl_relaxed(pctl->base + reg);
-}
-
-static void mtk_rmw(struct mtk_pinctrl *pctl, u32 reg, u32 mask, u32 set)
-{
- u32 val;
-
- val = mtk_r32(pctl, reg);
- val &= ~mask;
- val |= set;
- mtk_w32(pctl, reg, val);
-}
-
-static int mtk_hw_pin_field_lookup(struct mtk_pinctrl *hw, int pin,
- const struct mtk_pin_reg_calc *rc,
- struct mtk_pin_field *pfd)
-{
- const struct mtk_pin_field_calc *c, *e;
- u32 bits;
-
- c = rc->range;
- e = c + rc->nranges;
-
- while (c < e) {
- if (pin >= c->s_pin && pin <= c->e_pin)
- break;
- c++;
- }
-
- if (c >= e) {
- dev_err(hw->dev, "Out of range for pin = %d\n", pin);
- return -EINVAL;
- }
-
- /* Caculated bits as the overall offset the pin is located at */
- bits = c->s_bit + (pin - c->s_pin) * (c->x_bits);
-
- /* Fill pfd from bits and 32-bit register applied is assumed */
- pfd->offset = c->s_addr + c->x_addrs * (bits / 32);
- pfd->bitpos = bits % 32;
- pfd->mask = (1 << c->x_bits) - 1;
-
- /* pfd->next is used for indicating that bit wrapping-around happens
- * which requires the manipulation for bit 0 starting in the next
- * register to form the complete field read/write.
- */
- pfd->next = pfd->bitpos + c->x_bits - 1 > 31 ? c->x_addrs : 0;
-
- return 0;
-}
-
-static int mtk_hw_pin_field_get(struct mtk_pinctrl *hw, int pin,
- int field, struct mtk_pin_field *pfd)
-{
- const struct mtk_pin_reg_calc *rc;
-
- if (field < 0 || field >= PINCTRL_PIN_REG_MAX) {
- dev_err(hw->dev, "Invalid Field %d\n", field);
- return -EINVAL;
- }
-
- if (hw->soc->reg_cal && hw->soc->reg_cal[field].range) {
- rc = &hw->soc->reg_cal[field];
- } else {
- dev_err(hw->dev, "Undefined range for field %d\n", field);
- return -EINVAL;
- }
-
- return mtk_hw_pin_field_lookup(hw, pin, rc, pfd);
-}
-
-static void mtk_hw_bits_part(struct mtk_pin_field *pf, int *h, int *l)
-{
- *l = 32 - pf->bitpos;
- *h = get_count_order(pf->mask) - *l;
-}
-
-static void mtk_hw_write_cross_field(struct mtk_pinctrl *hw,
- struct mtk_pin_field *pf, int value)
-{
- int nbits_l, nbits_h;
-
- mtk_hw_bits_part(pf, &nbits_h, &nbits_l);
-
- mtk_rmw(hw, pf->offset, pf->mask << pf->bitpos,
- (value & pf->mask) << pf->bitpos);
-
- mtk_rmw(hw, pf->offset + pf->next, BIT(nbits_h) - 1,
- (value & pf->mask) >> nbits_l);
-}
-
-static void mtk_hw_read_cross_field(struct mtk_pinctrl *hw,
- struct mtk_pin_field *pf, int *value)
-{
- int nbits_l, nbits_h, h, l;
-
- mtk_hw_bits_part(pf, &nbits_h, &nbits_l);
-
- l = (mtk_r32(hw, pf->offset) >> pf->bitpos) & (BIT(nbits_l) - 1);
- h = (mtk_r32(hw, pf->offset + pf->next)) & (BIT(nbits_h) - 1);
-
- *value = (h << nbits_l) | l;
-}
-
-static int mtk_hw_set_value(struct mtk_pinctrl *hw, int pin, int field,
- int value)
-{
- struct mtk_pin_field pf;
- int err;
-
- err = mtk_hw_pin_field_get(hw, pin, field, &pf);
- if (err)
- return err;
-
- if (!pf.next)
- mtk_rmw(hw, pf.offset, pf.mask << pf.bitpos,
- (value & pf.mask) << pf.bitpos);
- else
- mtk_hw_write_cross_field(hw, &pf, value);
-
- return 0;
-}
-
-static int mtk_hw_get_value(struct mtk_pinctrl *hw, int pin, int field,
- int *value)
-{
- struct mtk_pin_field pf;
- int err;
-
- err = mtk_hw_pin_field_get(hw, pin, field, &pf);
- if (err)
- return err;
-
- if (!pf.next)
- *value = (mtk_r32(hw, pf.offset) >> pf.bitpos) & pf.mask;
- else
- mtk_hw_read_cross_field(hw, &pf, value);
-
- return 0;
-}
-
-static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
- unsigned int selector, unsigned int group)
-{
- struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
- struct group_desc *grp;
- int i;
-
- func = pinmux_generic_get_function(pctldev, selector);
- if (!func)
- return -EINVAL;
-
- grp = pinctrl_generic_get_group(pctldev, group);
- if (!grp)
- return -EINVAL;
-
- dev_dbg(pctldev->dev, "enable function %s group %s\n",
- func->name, grp->name);
-
- for (i = 0; i < grp->num_pins; i++) {
- int *pin_modes = grp->data;
-
- mtk_hw_set_value(hw, grp->pins[i], PINCTRL_PIN_REG_MODE,
- pin_modes[i]);
- }
-
- return 0;
-}
-
-static int mtk_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range,
- unsigned int pin)
-{
- struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
-
- return mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_MODE, MTK_GPIO_MODE);
-}
-
-static int mtk_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range,
- unsigned int pin, bool input)
-{
- struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
-
- /* hardware would take 0 as input direction */
- return mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_DIR, !input);
-}
-
-static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
- unsigned int pin, unsigned long *config)
-{
- struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
- u32 param = pinconf_to_config_param(*config);
- int val, val2, err, reg, ret = 1;
-
- switch (param) {
- case PIN_CONFIG_BIAS_DISABLE:
- err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_PU, &val);
- if (err)
- return err;
-
- err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_PD, &val2);
- if (err)
- return err;
-
- if (val || val2)
- return -EINVAL;
-
- break;
- case PIN_CONFIG_BIAS_PULL_UP:
- case PIN_CONFIG_BIAS_PULL_DOWN:
- case PIN_CONFIG_SLEW_RATE:
- reg = (param == PIN_CONFIG_BIAS_PULL_UP) ?
- PINCTRL_PIN_REG_PU :
- (param == PIN_CONFIG_BIAS_PULL_DOWN) ?
- PINCTRL_PIN_REG_PD : PINCTRL_PIN_REG_SR;
-
- err = mtk_hw_get_value(hw, pin, reg, &val);
- if (err)
- return err;
-
- if (!val)
- return -EINVAL;
-
- break;
- case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT_ENABLE:
- err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_DIR, &val);
- if (err)
- return err;
-
- /* HW takes input mode as zero; output mode as non-zero */
- if ((val && param == PIN_CONFIG_INPUT_ENABLE) ||
- (!val && param == PIN_CONFIG_OUTPUT_ENABLE))
- return -EINVAL;
-
- break;
- case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_DIR, &val);
- if (err)
- return err;
-
- err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_SMT, &val2);
- if (err)
- return err;
-
- if (val || !val2)
- return -EINVAL;
-
- break;
- case PIN_CONFIG_DRIVE_STRENGTH:
- err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_E4, &val);
- if (err)
- return err;
-
- err = mtk_hw_get_value(hw, pin, PINCTRL_PIN_REG_E8, &val2);
- if (err)
- return err;
-
- /* 4mA when (e8, e4) = (0, 0); 8mA when (e8, e4) = (0, 1)
- * 12mA when (e8, e4) = (1, 0); 16mA when (e8, e4) = (1, 1)
- */
- ret = ((val2 << 1) + val + 1) * 4;
-
- break;
- case MTK_PIN_CONFIG_TDSEL:
- case MTK_PIN_CONFIG_RDSEL:
- reg = (param == MTK_PIN_CONFIG_TDSEL) ?
- PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
-
- err = mtk_hw_get_value(hw, pin, reg, &val);
- if (err)
- return err;
-
- ret = val;
-
- break;
- default:
- return -ENOTSUPP;
- }
-
- *config = pinconf_to_config_packed(param, ret);
-
- return 0;
-}
-
-static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long *configs, unsigned int num_configs)
-{
- struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
- u32 reg, param, arg;
- int cfg, err = 0;
-
- for (cfg = 0; cfg < num_configs; cfg++) {
- param = pinconf_to_config_param(configs[cfg]);
- arg = pinconf_to_config_argument(configs[cfg]);
-
- switch (param) {
- case PIN_CONFIG_BIAS_DISABLE:
- case PIN_CONFIG_BIAS_PULL_UP:
- case PIN_CONFIG_BIAS_PULL_DOWN:
- arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 :
- (param == PIN_CONFIG_BIAS_PULL_UP) ? 1 : 2;
-
- err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_PU,
- arg & 1);
- if (err)
- goto err;
-
- err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_PD,
- !!(arg & 2));
- if (err)
- goto err;
- break;
- case PIN_CONFIG_OUTPUT_ENABLE:
- err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_SMT,
- MTK_DISABLE);
- if (err)
- goto err;
- /* else: fall through */
- case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_SLEW_RATE:
- reg = (param == PIN_CONFIG_SLEW_RATE) ?
- PINCTRL_PIN_REG_SR : PINCTRL_PIN_REG_DIR;
-
- arg = (param == PIN_CONFIG_INPUT_ENABLE) ? 0 :
- (param == PIN_CONFIG_OUTPUT_ENABLE) ? 1 : arg;
- err = mtk_hw_set_value(hw, pin, reg, arg);
- if (err)
- goto err;
-
- break;
- case PIN_CONFIG_OUTPUT:
- err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_DIR,
- MTK_OUTPUT);
- if (err)
- goto err;
-
- err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_DO,
- arg);
- if (err)
- goto err;
- break;
- case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- /* arg = 1: Input mode & SMT enable ;
- * arg = 0: Output mode & SMT disable
- */
- arg = arg ? 2 : 1;
- err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_DIR,
- arg & 1);
- if (err)
- goto err;
-
- err = mtk_hw_set_value(hw, pin, PINCTRL_PIN_REG_SMT,
- !!(arg & 2));
- if (err)
- goto err;
- break;
- case PIN_CONFIG_DRIVE_STRENGTH:
- /* 4mA when (e8, e4) = (0, 0);
- * 8mA when (e8, e4) = (0, 1);
- * 12mA when (e8, e4) = (1, 0);
- * 16mA when (e8, e4) = (1, 1)
- */
- if (!(arg % 4) && (arg >= 4 && arg <= 16)) {
- arg = arg / 4 - 1;
- err = mtk_hw_set_value(hw, pin,
- PINCTRL_PIN_REG_E4,
- arg & 0x1);
- if (err)
- goto err;
-
- err = mtk_hw_set_value(hw, pin,
- PINCTRL_PIN_REG_E8,
- (arg & 0x2) >> 1);
- if (err)
- goto err;
- } else {
- err = -ENOTSUPP;
- }
- break;
- case MTK_PIN_CONFIG_TDSEL:
- case MTK_PIN_CONFIG_RDSEL:
- reg = (param == MTK_PIN_CONFIG_TDSEL) ?
- PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
-
- err = mtk_hw_set_value(hw, pin, reg, arg);
- if (err)
- goto err;
- break;
- default:
- err = -ENOTSUPP;
- }
- }
-err:
- return err;
-}
-
-static int mtk_pinconf_group_get(struct pinctrl_dev *pctldev,
- unsigned int group, unsigned long *config)
-{
- const unsigned int *pins;
- unsigned int i, npins, old = 0;
- int ret;
-
- ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
- if (ret)
- return ret;
-
- for (i = 0; i < npins; i++) {
- if (mtk_pinconf_get(pctldev, pins[i], config))
- return -ENOTSUPP;
-
- /* configs do not match between two pins */
- if (i && old != *config)
- return -ENOTSUPP;
-
- old = *config;
- }
-
- return 0;
-}
-
-static int mtk_pinconf_group_set(struct pinctrl_dev *pctldev,
- unsigned int group, unsigned long *configs,
- unsigned int num_configs)
-{
- const unsigned int *pins;
- unsigned int i, npins;
- int ret;
-
- ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
- if (ret)
- return ret;
-
- for (i = 0; i < npins; i++) {
- ret = mtk_pinconf_set(pctldev, pins[i], configs, num_configs);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static const struct pinctrl_ops mtk_pctlops = {
- .get_groups_count = pinctrl_generic_get_group_count,
- .get_group_name = pinctrl_generic_get_group_name,
- .get_group_pins = pinctrl_generic_get_group_pins,
- .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
- .dt_free_map = pinconf_generic_dt_free_map,
-};
-
-static const struct pinmux_ops mtk_pmxops = {
- .get_functions_count = pinmux_generic_get_function_count,
- .get_function_name = pinmux_generic_get_function_name,
- .get_function_groups = pinmux_generic_get_function_groups,
- .set_mux = mtk_pinmux_set_mux,
- .gpio_request_enable = mtk_pinmux_gpio_request_enable,
- .gpio_set_direction = mtk_pinmux_gpio_set_direction,
- .strict = true,
-};
-
-static const struct pinconf_ops mtk_confops = {
- .is_generic = true,
- .pin_config_get = mtk_pinconf_get,
- .pin_config_set = mtk_pinconf_set,
- .pin_config_group_get = mtk_pinconf_group_get,
- .pin_config_group_set = mtk_pinconf_group_set,
- .pin_config_config_dbg_show = pinconf_generic_dump_config,
-};
-
-static struct pinctrl_desc mtk_desc = {
- .name = PINCTRL_PINCTRL_DEV,
- .pctlops = &mtk_pctlops,
- .pmxops = &mtk_pmxops,
- .confops = &mtk_confops,
- .owner = THIS_MODULE,
-};
-
-static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
-{
- struct mtk_pinctrl *hw = gpiochip_get_data(chip);
- int value, err;
-
- err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value);
- if (err)
- return err;
-
- return !!value;
-}
-
-static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
-{
- struct mtk_pinctrl *hw = gpiochip_get_data(chip);
-
- mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value);
-}
-
-static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
-{
- return pinctrl_gpio_direction_input(chip->base + gpio);
-}
-
-static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
- int value)
-{
- mtk_gpio_set(chip, gpio, value);
-
- return pinctrl_gpio_direction_output(chip->base + gpio);
-}
-
-static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
-{
- struct mtk_pinctrl *hw = gpiochip_get_data(chip);
- unsigned long eint_n;
-
- if (!hw->eint)
- return -ENOTSUPP;
-
- eint_n = offset;
-
- return mtk_eint_find_irq(hw->eint, eint_n);
-}
-
-static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
- unsigned long config)
-{
- struct mtk_pinctrl *hw = gpiochip_get_data(chip);
- unsigned long eint_n;
- u32 debounce;
-
- if (!hw->eint ||
- pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
- return -ENOTSUPP;
-
- debounce = pinconf_to_config_argument(config);
- eint_n = offset;
-
- return mtk_eint_set_debounce(hw->eint, eint_n, debounce);
-}
-
-static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
-{
- struct gpio_chip *chip = &hw->chip;
- int ret;
-
- chip->label = PINCTRL_PINCTRL_DEV;
- chip->parent = hw->dev;
- chip->request = gpiochip_generic_request;
- chip->free = gpiochip_generic_free;
- chip->direction_input = mtk_gpio_direction_input;
- chip->direction_output = mtk_gpio_direction_output;
- chip->get = mtk_gpio_get;
- chip->set = mtk_gpio_set;
- chip->to_irq = mtk_gpio_to_irq,
- chip->set_config = mtk_gpio_set_config,
- chip->base = -1;
- chip->ngpio = hw->soc->npins;
- chip->of_node = np;
- chip->of_gpio_n_cells = 2;
-
- ret = gpiochip_add_data(chip, hw);
- if (ret < 0)
- return ret;
-
- /* Just for backward compatible for these old pinctrl nodes without
- * "gpio-ranges" property. Otherwise, called directly from a
- * DeviceTree-supported pinctrl driver is DEPRECATED.
- * Please see Section 2.1 of
- * Documentation/devicetree/bindings/gpio/gpio.txt on how to
- * bind pinctrl and gpio drivers via the "gpio-ranges" property.
- */
- if (!of_find_property(np, "gpio-ranges", NULL)) {
- ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
- chip->ngpio);
- if (ret < 0) {
- gpiochip_remove(chip);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int mtk_build_groups(struct mtk_pinctrl *hw)
-{
- int err, i;
-
- for (i = 0; i < hw->soc->ngrps; i++) {
- const struct group_desc *group = hw->soc->grps + i;
-
- err = pinctrl_generic_add_group(hw->pctrl, group->name,
- group->pins, group->num_pins,
- group->data);
- if (err < 0) {
- dev_err(hw->dev, "Failed to register group %s\n",
- group->name);
- return err;
- }
- }
-
- return 0;
-}
-
-static int mtk_build_functions(struct mtk_pinctrl *hw)
-{
- int i, err;
-
- for (i = 0; i < hw->soc->nfuncs ; i++) {
- const struct function_desc *func = hw->soc->funcs + i;
-
- err = pinmux_generic_add_function(hw->pctrl, func->name,
- func->group_names,
- func->num_group_names,
- func->data);
- if (err < 0) {
- dev_err(hw->dev, "Failed to register function %s\n",
- func->name);
- return err;
- }
- }
-
- return 0;
-}
-
-static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n,
- unsigned int *gpio_n,
- struct gpio_chip **gpio_chip)
-{
- struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
-
- *gpio_chip = &hw->chip;
- *gpio_n = eint_n;
-
- return 0;
-}
-
-static int mtk_xt_get_gpio_state(void *data, unsigned long eint_n)
-{
- struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
- struct gpio_chip *gpio_chip;
- unsigned int gpio_n;
- int err;
-
- err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip);
- if (err)
- return err;
-
- return mtk_gpio_get(gpio_chip, gpio_n);
-}
-
-static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n)
-{
- struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
- struct gpio_chip *gpio_chip;
- unsigned int gpio_n;
- int err;
-
- err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip);
- if (err)
- return err;
-
- err = mtk_hw_set_value(hw, gpio_n, PINCTRL_PIN_REG_MODE,
- MTK_GPIO_MODE);
- if (err)
- return err;
-
- err = mtk_hw_set_value(hw, gpio_n, PINCTRL_PIN_REG_DIR, MTK_INPUT);
- if (err)
- return err;
-
- err = mtk_hw_set_value(hw, gpio_n, PINCTRL_PIN_REG_SMT, MTK_ENABLE);
- if (err)
- return err;
-
- return 0;
-}
-
-static const struct mtk_eint_xt mtk_eint_xt = {
- .get_gpio_n = mtk_xt_get_gpio_n,
- .get_gpio_state = mtk_xt_get_gpio_state,
- .set_gpio_as_eint = mtk_xt_set_gpio_as_eint,
-};
-
-static int
-mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct resource *res;
-
- if (!IS_ENABLED(CONFIG_EINT_MTK))
- return 0;
-
- if (!of_property_read_bool(np, "interrupt-controller"))
- return -ENODEV;
-
- hw->eint = devm_kzalloc(hw->dev, sizeof(*hw->eint), GFP_KERNEL);
- if (!hw->eint)
- return -ENOMEM;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "eint");
- if (!res) {
- dev_err(&pdev->dev, "Unable to get eint resource\n");
- return -ENODEV;
- }
-
- hw->eint->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hw->eint->base))
- return PTR_ERR(hw->eint->base);
-
- hw->eint->irq = irq_of_parse_and_map(np, 0);
- if (!hw->eint->irq)
- return -EINVAL;
-
- hw->eint->dev = &pdev->dev;
- hw->eint->hw = hw->soc->eint_hw;
- hw->eint->pctl = hw;
- hw->eint->gpio_xlate = &mtk_eint_xt;
-
- return mtk_eint_do_init(hw->eint);
-}
-
-static const struct of_device_id mtk_pinctrl_of_match[] = {
- { .compatible = "mediatek,mt7622-pinctrl", .data = &mt7622_data},
+ .gpio_m = 1,
+ .ies_present = false,
+ .base_names = mtk_default_register_base_names,
+ .nbase_names = ARRAY_SIZE(mtk_default_register_base_names),
+ .bias_disable_set = mtk_pinconf_bias_disable_set,
+ .bias_disable_get = mtk_pinconf_bias_disable_get,
+ .bias_set = mtk_pinconf_bias_set,
+ .bias_get = mtk_pinconf_bias_get,
+ .drive_set = mtk_pinconf_drive_set,
+ .drive_get = mtk_pinconf_drive_get,
+};
+
+static const struct of_device_id mt7622_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt7622-pinctrl", },
{ }
};
-static int mtk_pinctrl_probe(struct platform_device *pdev)
+static int mt7622_pinctrl_probe(struct platform_device *pdev)
{
- struct resource *res;
- struct mtk_pinctrl *hw;
- const struct of_device_id *of_id =
- of_match_device(mtk_pinctrl_of_match, &pdev->dev);
- int err;
-
- hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return -ENOMEM;
-
- hw->soc = of_id->data;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "missing IO resource\n");
- return -ENXIO;
- }
-
- hw->dev = &pdev->dev;
- hw->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hw->base))
- return PTR_ERR(hw->base);
-
- /* Setup pins descriptions per SoC types */
- mtk_desc.pins = hw->soc->pins;
- mtk_desc.npins = hw->soc->npins;
- mtk_desc.num_custom_params = ARRAY_SIZE(mtk_custom_bindings);
- mtk_desc.custom_params = mtk_custom_bindings;
-#ifdef CONFIG_DEBUG_FS
- mtk_desc.custom_conf_items = mtk_conf_items;
-#endif
-
- err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw,
- &hw->pctrl);
- if (err)
- return err;
-
- /* Setup groups descriptions per SoC types */
- err = mtk_build_groups(hw);
- if (err) {
- dev_err(&pdev->dev, "Failed to build groups\n");
- return err;
- }
-
- /* Setup functions descriptions per SoC types */
- err = mtk_build_functions(hw);
- if (err) {
- dev_err(&pdev->dev, "Failed to build functions\n");
- return err;
- }
-
- /* For able to make pinctrl_claim_hogs, we must not enable pinctrl
- * until all groups and functions are being added one.
- */
- err = pinctrl_enable(hw->pctrl);
- if (err)
- return err;
-
- err = mtk_build_eint(hw, pdev);
- if (err)
- dev_warn(&pdev->dev,
- "Failed to add EINT, but pinctrl still can work\n");
-
- /* Build gpiochip should be after pinctrl_enable is done */
- err = mtk_build_gpiochip(hw, pdev->dev.of_node);
- if (err) {
- dev_err(&pdev->dev, "Failed to add gpio_chip\n");
- return err;
- }
-
- platform_set_drvdata(pdev, hw);
-
- return 0;
+ return mtk_moore_pinctrl_probe(pdev, &mt7622_data);
}
-static struct platform_driver mtk_pinctrl_driver = {
+static struct platform_driver mt7622_pinctrl_driver = {
.driver = {
- .name = "mtk-pinctrl",
- .of_match_table = mtk_pinctrl_of_match,
+ .name = "mt7622-pinctrl",
+ .of_match_table = mt7622_pinctrl_of_match,
},
- .probe = mtk_pinctrl_probe,
+ .probe = mt7622_pinctrl_probe,
};
-static int __init mtk_pinctrl_init(void)
+static int __init mt7622_pinctrl_init(void)
{
- return platform_driver_register(&mtk_pinctrl_driver);
+ return platform_driver_register(&mt7622_pinctrl_driver);
}
-arch_initcall(mtk_pinctrl_init);
+arch_initcall(mt7622_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7623.c b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
new file mode 100644
index 000000000000..b8d9d31db74f
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
@@ -0,0 +1,1441 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The MT7623 driver based on Linux generic pinctrl binding.
+ *
+ * Copyright (C) 2015 - 2018 MediaTek Inc.
+ * Author: Biao Huang <biao.huang@mediatek.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include "pinctrl-moore.h"
+
+#define PIN_BOND_REG0 0xb10
+#define PIN_BOND_REG1 0xf20
+#define PIN_BOND_REG2 0xef0
+#define BOND_PCIE_CLR (0x77 << 3)
+#define BOND_I2S_CLR 0x3
+#define BOND_MSDC0E_CLR 0x1
+
+#define PIN_FIELD15(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 15, false)
+
+#define PIN_FIELD16(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 16, 0)
+
+#define PINS_FIELD16(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 16, 1)
+
+#define MT7623_PIN(_number, _name, _eint_n, _drv_grp) \
+ MTK_PIN(_number, _name, 0, _eint_n, _drv_grp)
+
+static const struct mtk_pin_field_calc mt7623_pin_mode_range[] = {
+ PIN_FIELD15(0, 278, 0x760, 0x10, 0, 3),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_dir_range[] = {
+ PIN_FIELD16(0, 175, 0x0, 0x10, 0, 1),
+ PIN_FIELD16(176, 278, 0xc0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_di_range[] = {
+ PIN_FIELD16(0, 278, 0x630, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_do_range[] = {
+ PIN_FIELD16(0, 278, 0x500, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_ies_range[] = {
+ PINS_FIELD16(0, 6, 0xb20, 0x10, 0, 1),
+ PINS_FIELD16(7, 9, 0xb20, 0x10, 1, 1),
+ PINS_FIELD16(10, 13, 0xb30, 0x10, 3, 1),
+ PINS_FIELD16(14, 15, 0xb30, 0x10, 13, 1),
+ PINS_FIELD16(16, 17, 0xb40, 0x10, 7, 1),
+ PINS_FIELD16(18, 29, 0xb40, 0x10, 13, 1),
+ PINS_FIELD16(30, 32, 0xb40, 0x10, 7, 1),
+ PINS_FIELD16(33, 37, 0xb40, 0x10, 13, 1),
+ PIN_FIELD16(38, 38, 0xb20, 0x10, 13, 1),
+ PINS_FIELD16(39, 42, 0xb40, 0x10, 13, 1),
+ PINS_FIELD16(43, 45, 0xb20, 0x10, 10, 1),
+ PINS_FIELD16(47, 48, 0xb20, 0x10, 11, 1),
+ PIN_FIELD16(49, 49, 0xb20, 0x10, 12, 1),
+ PINS_FIELD16(50, 52, 0xb20, 0x10, 13, 1),
+ PINS_FIELD16(53, 56, 0xb20, 0x10, 14, 1),
+ PINS_FIELD16(57, 58, 0xb20, 0x10, 15, 1),
+ PIN_FIELD16(59, 59, 0xb30, 0x10, 10, 1),
+ PINS_FIELD16(60, 62, 0xb30, 0x10, 0, 1),
+ PINS_FIELD16(63, 65, 0xb30, 0x10, 1, 1),
+ PINS_FIELD16(66, 71, 0xb30, 0x10, 2, 1),
+ PINS_FIELD16(72, 74, 0xb20, 0x10, 12, 1),
+ PINS_FIELD16(75, 76, 0xb30, 0x10, 3, 1),
+ PINS_FIELD16(77, 78, 0xb30, 0x10, 4, 1),
+ PINS_FIELD16(79, 82, 0xb30, 0x10, 5, 1),
+ PINS_FIELD16(83, 84, 0xb30, 0x10, 2, 1),
+ PIN_FIELD16(85, 85, 0xda0, 0x10, 4, 1),
+ PIN_FIELD16(86, 86, 0xd90, 0x10, 4, 1),
+ PINS_FIELD16(87, 90, 0xdb0, 0x10, 4, 1),
+ PINS_FIELD16(101, 104, 0xb30, 0x10, 6, 1),
+ PIN_FIELD16(105, 105, 0xd40, 0x10, 4, 1),
+ PIN_FIELD16(106, 106, 0xd30, 0x10, 4, 1),
+ PINS_FIELD16(107, 110, 0xd50, 0x10, 4, 1),
+ PINS_FIELD16(111, 115, 0xce0, 0x10, 4, 1),
+ PIN_FIELD16(116, 116, 0xcd0, 0x10, 4, 1),
+ PIN_FIELD16(117, 117, 0xcc0, 0x10, 4, 1),
+ PINS_FIELD16(118, 121, 0xce0, 0x10, 4, 1),
+ PINS_FIELD16(122, 125, 0xb30, 0x10, 7, 1),
+ PIN_FIELD16(126, 126, 0xb20, 0x10, 12, 1),
+ PINS_FIELD16(127, 142, 0xb30, 0x10, 9, 1),
+ PINS_FIELD16(143, 160, 0xb30, 0x10, 10, 1),
+ PINS_FIELD16(161, 168, 0xb30, 0x10, 12, 1),
+ PINS_FIELD16(169, 183, 0xb30, 0x10, 10, 1),
+ PINS_FIELD16(184, 186, 0xb30, 0x10, 9, 1),
+ PIN_FIELD16(187, 187, 0xb30, 0x10, 14, 1),
+ PIN_FIELD16(188, 188, 0xb20, 0x10, 13, 1),
+ PINS_FIELD16(189, 193, 0xb30, 0x10, 15, 1),
+ PINS_FIELD16(194, 198, 0xb40, 0x10, 0, 1),
+ PIN_FIELD16(199, 199, 0xb20, 0x10, 1, 1),
+ PINS_FIELD16(200, 202, 0xb40, 0x10, 1, 1),
+ PINS_FIELD16(203, 207, 0xb40, 0x10, 2, 1),
+ PINS_FIELD16(208, 209, 0xb40, 0x10, 3, 1),
+ PIN_FIELD16(210, 210, 0xb40, 0x10, 4, 1),
+ PINS_FIELD16(211, 235, 0xb40, 0x10, 5, 1),
+ PINS_FIELD16(236, 241, 0xb40, 0x10, 6, 1),
+ PINS_FIELD16(242, 243, 0xb40, 0x10, 7, 1),
+ PINS_FIELD16(244, 247, 0xb40, 0x10, 8, 1),
+ PIN_FIELD16(248, 248, 0xb40, 0x10, 9, 1),
+ PINS_FIELD16(249, 257, 0xfc0, 0x10, 4, 1),
+ PIN_FIELD16(258, 258, 0xcb0, 0x10, 4, 1),
+ PIN_FIELD16(259, 259, 0xc90, 0x10, 4, 1),
+ PIN_FIELD16(260, 260, 0x3a0, 0x10, 4, 1),
+ PIN_FIELD16(261, 261, 0xd50, 0x10, 4, 1),
+ PINS_FIELD16(262, 277, 0xb40, 0x10, 12, 1),
+ PIN_FIELD16(278, 278, 0xb40, 0x10, 13, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_smt_range[] = {
+ PINS_FIELD16(0, 6, 0xb50, 0x10, 0, 1),
+ PINS_FIELD16(7, 9, 0xb50, 0x10, 1, 1),
+ PINS_FIELD16(10, 13, 0xb60, 0x10, 3, 1),
+ PINS_FIELD16(14, 15, 0xb60, 0x10, 13, 1),
+ PINS_FIELD16(16, 17, 0xb70, 0x10, 7, 1),
+ PINS_FIELD16(18, 29, 0xb70, 0x10, 13, 1),
+ PINS_FIELD16(30, 32, 0xb70, 0x10, 7, 1),
+ PINS_FIELD16(33, 37, 0xb70, 0x10, 13, 1),
+ PIN_FIELD16(38, 38, 0xb50, 0x10, 13, 1),
+ PINS_FIELD16(39, 42, 0xb70, 0x10, 13, 1),
+ PINS_FIELD16(43, 45, 0xb50, 0x10, 10, 1),
+ PINS_FIELD16(47, 48, 0xb50, 0x10, 11, 1),
+ PIN_FIELD16(49, 49, 0xb50, 0x10, 12, 1),
+ PINS_FIELD16(50, 52, 0xb50, 0x10, 13, 1),
+ PINS_FIELD16(53, 56, 0xb50, 0x10, 14, 1),
+ PINS_FIELD16(57, 58, 0xb50, 0x10, 15, 1),
+ PIN_FIELD16(59, 59, 0xb60, 0x10, 10, 1),
+ PINS_FIELD16(60, 62, 0xb60, 0x10, 0, 1),
+ PINS_FIELD16(63, 65, 0xb60, 0x10, 1, 1),
+ PINS_FIELD16(66, 71, 0xb60, 0x10, 2, 1),
+ PINS_FIELD16(72, 74, 0xb50, 0x10, 12, 1),
+ PINS_FIELD16(75, 76, 0xb60, 0x10, 3, 1),
+ PINS_FIELD16(77, 78, 0xb60, 0x10, 4, 1),
+ PINS_FIELD16(79, 82, 0xb60, 0x10, 5, 1),
+ PINS_FIELD16(83, 84, 0xb60, 0x10, 2, 1),
+ PIN_FIELD16(85, 85, 0xda0, 0x10, 11, 1),
+ PIN_FIELD16(86, 86, 0xd90, 0x10, 11, 1),
+ PIN_FIELD16(87, 87, 0xdc0, 0x10, 3, 1),
+ PIN_FIELD16(88, 88, 0xdc0, 0x10, 7, 1),
+ PIN_FIELD16(89, 89, 0xdc0, 0x10, 11, 1),
+ PIN_FIELD16(90, 90, 0xdc0, 0x10, 15, 1),
+ PINS_FIELD16(101, 104, 0xb60, 0x10, 6, 1),
+ PIN_FIELD16(105, 105, 0xd40, 0x10, 11, 1),
+ PIN_FIELD16(106, 106, 0xd30, 0x10, 11, 1),
+ PIN_FIELD16(107, 107, 0xd60, 0x10, 3, 1),
+ PIN_FIELD16(108, 108, 0xd60, 0x10, 7, 1),
+ PIN_FIELD16(109, 109, 0xd60, 0x10, 11, 1),
+ PIN_FIELD16(110, 110, 0xd60, 0x10, 15, 1),
+ PIN_FIELD16(111, 111, 0xd00, 0x10, 15, 1),
+ PIN_FIELD16(112, 112, 0xd00, 0x10, 11, 1),
+ PIN_FIELD16(113, 113, 0xd00, 0x10, 7, 1),
+ PIN_FIELD16(114, 114, 0xd00, 0x10, 3, 1),
+ PIN_FIELD16(115, 115, 0xd10, 0x10, 3, 1),
+ PIN_FIELD16(116, 116, 0xcd0, 0x10, 11, 1),
+ PIN_FIELD16(117, 117, 0xcc0, 0x10, 11, 1),
+ PIN_FIELD16(118, 118, 0xcf0, 0x10, 15, 1),
+ PIN_FIELD16(119, 119, 0xcf0, 0x10, 7, 1),
+ PIN_FIELD16(120, 120, 0xcf0, 0x10, 3, 1),
+ PIN_FIELD16(121, 121, 0xcf0, 0x10, 7, 1),
+ PINS_FIELD16(122, 125, 0xb60, 0x10, 7, 1),
+ PIN_FIELD16(126, 126, 0xb50, 0x10, 12, 1),
+ PINS_FIELD16(127, 142, 0xb60, 0x10, 9, 1),
+ PINS_FIELD16(143, 160, 0xb60, 0x10, 10, 1),
+ PINS_FIELD16(161, 168, 0xb60, 0x10, 12, 1),
+ PINS_FIELD16(169, 183, 0xb60, 0x10, 10, 1),
+ PINS_FIELD16(184, 186, 0xb60, 0x10, 9, 1),
+ PIN_FIELD16(187, 187, 0xb60, 0x10, 14, 1),
+ PIN_FIELD16(188, 188, 0xb50, 0x10, 13, 1),
+ PINS_FIELD16(189, 193, 0xb60, 0x10, 15, 1),
+ PINS_FIELD16(194, 198, 0xb70, 0x10, 0, 1),
+ PIN_FIELD16(199, 199, 0xb50, 0x10, 1, 1),
+ PINS_FIELD16(200, 202, 0xb70, 0x10, 1, 1),
+ PINS_FIELD16(203, 207, 0xb70, 0x10, 2, 1),
+ PINS_FIELD16(208, 209, 0xb70, 0x10, 3, 1),
+ PIN_FIELD16(210, 210, 0xb70, 0x10, 4, 1),
+ PINS_FIELD16(211, 235, 0xb70, 0x10, 5, 1),
+ PINS_FIELD16(236, 241, 0xb70, 0x10, 6, 1),
+ PINS_FIELD16(242, 243, 0xb70, 0x10, 7, 1),
+ PINS_FIELD16(244, 247, 0xb70, 0x10, 8, 1),
+ PIN_FIELD16(248, 248, 0xb70, 0x10, 9, 10),
+ PIN_FIELD16(249, 249, 0x140, 0x10, 3, 1),
+ PIN_FIELD16(250, 250, 0x130, 0x10, 15, 1),
+ PIN_FIELD16(251, 251, 0x130, 0x10, 11, 1),
+ PIN_FIELD16(252, 252, 0x130, 0x10, 7, 1),
+ PIN_FIELD16(253, 253, 0x130, 0x10, 3, 1),
+ PIN_FIELD16(254, 254, 0xf40, 0x10, 15, 1),
+ PIN_FIELD16(255, 255, 0xf40, 0x10, 11, 1),
+ PIN_FIELD16(256, 256, 0xf40, 0x10, 7, 1),
+ PIN_FIELD16(257, 257, 0xf40, 0x10, 3, 1),
+ PIN_FIELD16(258, 258, 0xcb0, 0x10, 11, 1),
+ PIN_FIELD16(259, 259, 0xc90, 0x10, 11, 1),
+ PIN_FIELD16(260, 260, 0x3a0, 0x10, 11, 1),
+ PIN_FIELD16(261, 261, 0x0b0, 0x10, 3, 1),
+ PINS_FIELD16(262, 277, 0xb70, 0x10, 12, 1),
+ PIN_FIELD16(278, 278, 0xb70, 0x10, 13, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_pullen_range[] = {
+ PIN_FIELD16(0, 278, 0x150, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_pullsel_range[] = {
+ PIN_FIELD16(0, 278, 0x280, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_drv_range[] = {
+ PINS_FIELD16(0, 6, 0xf50, 0x10, 0, 4),
+ PINS_FIELD16(7, 9, 0xf50, 0x10, 4, 4),
+ PINS_FIELD16(10, 13, 0xf50, 0x10, 4, 4),
+ PINS_FIELD16(14, 15, 0xf50, 0x10, 12, 4),
+ PINS_FIELD16(16, 17, 0xf60, 0x10, 0, 4),
+ PINS_FIELD16(18, 21, 0xf60, 0x10, 0, 4),
+ PINS_FIELD16(22, 26, 0xf60, 0x10, 8, 4),
+ PINS_FIELD16(27, 29, 0xf60, 0x10, 12, 4),
+ PINS_FIELD16(30, 32, 0xf60, 0x10, 0, 4),
+ PINS_FIELD16(33, 37, 0xf70, 0x10, 0, 4),
+ PIN_FIELD16(38, 38, 0xf70, 0x10, 4, 4),
+ PINS_FIELD16(39, 42, 0xf70, 0x10, 8, 4),
+ PINS_FIELD16(43, 45, 0xf70, 0x10, 12, 4),
+ PINS_FIELD16(47, 48, 0xf80, 0x10, 0, 4),
+ PIN_FIELD16(49, 49, 0xf80, 0x10, 4, 4),
+ PINS_FIELD16(50, 52, 0xf70, 0x10, 4, 4),
+ PINS_FIELD16(53, 56, 0xf80, 0x10, 12, 4),
+ PINS_FIELD16(60, 62, 0xf90, 0x10, 8, 4),
+ PINS_FIELD16(63, 65, 0xf90, 0x10, 12, 4),
+ PINS_FIELD16(66, 71, 0xfa0, 0x10, 0, 4),
+ PINS_FIELD16(72, 74, 0xf80, 0x10, 4, 4),
+ PIN_FIELD16(85, 85, 0xda0, 0x10, 0, 4),
+ PIN_FIELD16(86, 86, 0xd90, 0x10, 0, 4),
+ PINS_FIELD16(87, 90, 0xdb0, 0x10, 0, 4),
+ PIN_FIELD16(105, 105, 0xd40, 0x10, 0, 4),
+ PIN_FIELD16(106, 106, 0xd30, 0x10, 0, 4),
+ PINS_FIELD16(107, 110, 0xd50, 0x10, 0, 4),
+ PINS_FIELD16(111, 115, 0xce0, 0x10, 0, 4),
+ PIN_FIELD16(116, 116, 0xcd0, 0x10, 0, 4),
+ PIN_FIELD16(117, 117, 0xcc0, 0x10, 0, 4),
+ PINS_FIELD16(118, 121, 0xce0, 0x10, 0, 4),
+ PIN_FIELD16(126, 126, 0xf80, 0x10, 4, 4),
+ PIN_FIELD16(188, 188, 0xf70, 0x10, 4, 4),
+ PINS_FIELD16(189, 193, 0xfe0, 0x10, 8, 4),
+ PINS_FIELD16(194, 198, 0xfe0, 0x10, 12, 4),
+ PIN_FIELD16(199, 199, 0xf50, 0x10, 4, 4),
+ PINS_FIELD16(200, 202, 0xfd0, 0x10, 0, 4),
+ PINS_FIELD16(203, 207, 0xfd0, 0x10, 4, 4),
+ PINS_FIELD16(208, 209, 0xfd0, 0x10, 8, 4),
+ PIN_FIELD16(210, 210, 0xfd0, 0x10, 12, 4),
+ PINS_FIELD16(211, 235, 0xff0, 0x10, 0, 4),
+ PINS_FIELD16(236, 241, 0xff0, 0x10, 4, 4),
+ PINS_FIELD16(242, 243, 0xff0, 0x10, 8, 4),
+ PIN_FIELD16(248, 248, 0xf00, 0x10, 0, 4),
+ PINS_FIELD16(249, 256, 0xfc0, 0x10, 0, 4),
+ PIN_FIELD16(257, 257, 0xce0, 0x10, 0, 4),
+ PIN_FIELD16(258, 258, 0xcb0, 0x10, 0, 4),
+ PIN_FIELD16(259, 259, 0xc90, 0x10, 0, 4),
+ PIN_FIELD16(260, 260, 0x3a0, 0x10, 0, 4),
+ PIN_FIELD16(261, 261, 0xd50, 0x10, 0, 4),
+ PINS_FIELD16(262, 277, 0xf00, 0x10, 8, 4),
+ PIN_FIELD16(278, 278, 0xf70, 0x10, 8, 4),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_tdsel_range[] = {
+ PINS_FIELD16(262, 276, 0x4c0, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_pupd_range[] = {
+ /* MSDC0 */
+ PIN_FIELD16(111, 111, 0xd00, 0x10, 12, 1),
+ PIN_FIELD16(112, 112, 0xd00, 0x10, 8, 1),
+ PIN_FIELD16(113, 113, 0xd00, 0x10, 4, 1),
+ PIN_FIELD16(114, 114, 0xd00, 0x10, 0, 1),
+ PIN_FIELD16(115, 115, 0xd10, 0x10, 0, 1),
+ PIN_FIELD16(116, 116, 0xcd0, 0x10, 8, 1),
+ PIN_FIELD16(117, 117, 0xcc0, 0x10, 8, 1),
+ PIN_FIELD16(118, 118, 0xcf0, 0x10, 12, 1),
+ PIN_FIELD16(119, 119, 0xcf0, 0x10, 8, 1),
+ PIN_FIELD16(120, 120, 0xcf0, 0x10, 4, 1),
+ PIN_FIELD16(121, 121, 0xcf0, 0x10, 0, 1),
+ /* MSDC1 */
+ PIN_FIELD16(105, 105, 0xd40, 0x10, 8, 1),
+ PIN_FIELD16(106, 106, 0xd30, 0x10, 8, 1),
+ PIN_FIELD16(107, 107, 0xd60, 0x10, 0, 1),
+ PIN_FIELD16(108, 108, 0xd60, 0x10, 10, 1),
+ PIN_FIELD16(109, 109, 0xd60, 0x10, 4, 1),
+ PIN_FIELD16(110, 110, 0xc60, 0x10, 12, 1),
+ /* MSDC1 */
+ PIN_FIELD16(85, 85, 0xda0, 0x10, 8, 1),
+ PIN_FIELD16(86, 86, 0xd90, 0x10, 8, 1),
+ PIN_FIELD16(87, 87, 0xdc0, 0x10, 0, 1),
+ PIN_FIELD16(88, 88, 0xdc0, 0x10, 10, 1),
+ PIN_FIELD16(89, 89, 0xdc0, 0x10, 4, 1),
+ PIN_FIELD16(90, 90, 0xdc0, 0x10, 12, 1),
+ /* MSDC0E */
+ PIN_FIELD16(249, 249, 0x140, 0x10, 0, 1),
+ PIN_FIELD16(250, 250, 0x130, 0x10, 12, 1),
+ PIN_FIELD16(251, 251, 0x130, 0x10, 8, 1),
+ PIN_FIELD16(252, 252, 0x130, 0x10, 4, 1),
+ PIN_FIELD16(253, 253, 0x130, 0x10, 0, 1),
+ PIN_FIELD16(254, 254, 0xf40, 0x10, 12, 1),
+ PIN_FIELD16(255, 255, 0xf40, 0x10, 8, 1),
+ PIN_FIELD16(256, 256, 0xf40, 0x10, 4, 1),
+ PIN_FIELD16(257, 257, 0xf40, 0x10, 0, 1),
+ PIN_FIELD16(258, 258, 0xcb0, 0x10, 8, 1),
+ PIN_FIELD16(259, 259, 0xc90, 0x10, 8, 1),
+ PIN_FIELD16(261, 261, 0x140, 0x10, 8, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_r1_range[] = {
+ /* MSDC0 */
+ PIN_FIELD16(111, 111, 0xd00, 0x10, 13, 1),
+ PIN_FIELD16(112, 112, 0xd00, 0x10, 9, 1),
+ PIN_FIELD16(113, 113, 0xd00, 0x10, 5, 1),
+ PIN_FIELD16(114, 114, 0xd00, 0x10, 1, 1),
+ PIN_FIELD16(115, 115, 0xd10, 0x10, 1, 1),
+ PIN_FIELD16(116, 116, 0xcd0, 0x10, 9, 1),
+ PIN_FIELD16(117, 117, 0xcc0, 0x10, 9, 1),
+ PIN_FIELD16(118, 118, 0xcf0, 0x10, 13, 1),
+ PIN_FIELD16(119, 119, 0xcf0, 0x10, 9, 1),
+ PIN_FIELD16(120, 120, 0xcf0, 0x10, 5, 1),
+ PIN_FIELD16(121, 121, 0xcf0, 0x10, 1, 1),
+ /* MSDC1 */
+ PIN_FIELD16(105, 105, 0xd40, 0x10, 9, 1),
+ PIN_FIELD16(106, 106, 0xd30, 0x10, 9, 1),
+ PIN_FIELD16(107, 107, 0xd60, 0x10, 1, 1),
+ PIN_FIELD16(108, 108, 0xd60, 0x10, 9, 1),
+ PIN_FIELD16(109, 109, 0xd60, 0x10, 5, 1),
+ PIN_FIELD16(110, 110, 0xc60, 0x10, 13, 1),
+ /* MSDC2 */
+ PIN_FIELD16(85, 85, 0xda0, 0x10, 9, 1),
+ PIN_FIELD16(86, 86, 0xd90, 0x10, 9, 1),
+ PIN_FIELD16(87, 87, 0xdc0, 0x10, 1, 1),
+ PIN_FIELD16(88, 88, 0xdc0, 0x10, 9, 1),
+ PIN_FIELD16(89, 89, 0xdc0, 0x10, 5, 1),
+ PIN_FIELD16(90, 90, 0xdc0, 0x10, 13, 1),
+ /* MSDC0E */
+ PIN_FIELD16(249, 249, 0x140, 0x10, 1, 1),
+ PIN_FIELD16(250, 250, 0x130, 0x10, 13, 1),
+ PIN_FIELD16(251, 251, 0x130, 0x10, 9, 1),
+ PIN_FIELD16(252, 252, 0x130, 0x10, 5, 1),
+ PIN_FIELD16(253, 253, 0x130, 0x10, 1, 1),
+ PIN_FIELD16(254, 254, 0xf40, 0x10, 13, 1),
+ PIN_FIELD16(255, 255, 0xf40, 0x10, 9, 1),
+ PIN_FIELD16(256, 256, 0xf40, 0x10, 5, 1),
+ PIN_FIELD16(257, 257, 0xf40, 0x10, 1, 1),
+ PIN_FIELD16(258, 258, 0xcb0, 0x10, 9, 1),
+ PIN_FIELD16(259, 259, 0xc90, 0x10, 9, 1),
+ PIN_FIELD16(261, 261, 0x140, 0x10, 9, 1),
+};
+
+static const struct mtk_pin_field_calc mt7623_pin_r0_range[] = {
+ /* MSDC0 */
+ PIN_FIELD16(111, 111, 0xd00, 0x10, 14, 1),
+ PIN_FIELD16(112, 112, 0xd00, 0x10, 10, 1),
+ PIN_FIELD16(113, 113, 0xd00, 0x10, 6, 1),
+ PIN_FIELD16(114, 114, 0xd00, 0x10, 2, 1),
+ PIN_FIELD16(115, 115, 0xd10, 0x10, 2, 1),
+ PIN_FIELD16(116, 116, 0xcd0, 0x10, 10, 1),
+ PIN_FIELD16(117, 117, 0xcc0, 0x10, 10, 1),
+ PIN_FIELD16(118, 118, 0xcf0, 0x10, 14, 1),
+ PIN_FIELD16(119, 119, 0xcf0, 0x10, 10, 1),
+ PIN_FIELD16(120, 120, 0xcf0, 0x10, 6, 1),
+ PIN_FIELD16(121, 121, 0xcf0, 0x10, 2, 1),
+ /* MSDC1 */
+ PIN_FIELD16(105, 105, 0xd40, 0x10, 10, 1),
+ PIN_FIELD16(106, 106, 0xd30, 0x10, 10, 1),
+ PIN_FIELD16(107, 107, 0xd60, 0x10, 2, 1),
+ PIN_FIELD16(108, 108, 0xd60, 0x10, 8, 1),
+ PIN_FIELD16(109, 109, 0xd60, 0x10, 6, 1),
+ PIN_FIELD16(110, 110, 0xc60, 0x10, 14, 1),
+ /* MSDC2 */
+ PIN_FIELD16(85, 85, 0xda0, 0x10, 10, 1),
+ PIN_FIELD16(86, 86, 0xd90, 0x10, 10, 1),
+ PIN_FIELD16(87, 87, 0xdc0, 0x10, 2, 1),
+ PIN_FIELD16(88, 88, 0xdc0, 0x10, 8, 1),
+ PIN_FIELD16(89, 89, 0xdc0, 0x10, 6, 1),
+ PIN_FIELD16(90, 90, 0xdc0, 0x10, 14, 1),
+ /* MSDC0E */
+ PIN_FIELD16(249, 249, 0x140, 0x10, 2, 1),
+ PIN_FIELD16(250, 250, 0x130, 0x10, 14, 1),
+ PIN_FIELD16(251, 251, 0x130, 0x10, 10, 1),
+ PIN_FIELD16(252, 252, 0x130, 0x10, 6, 1),
+ PIN_FIELD16(253, 253, 0x130, 0x10, 2, 1),
+ PIN_FIELD16(254, 254, 0xf40, 0x10, 14, 1),
+ PIN_FIELD16(255, 255, 0xf40, 0x10, 10, 1),
+ PIN_FIELD16(256, 256, 0xf40, 0x10, 6, 1),
+ PIN_FIELD16(257, 257, 0xf40, 0x10, 5, 1),
+ PIN_FIELD16(258, 258, 0xcb0, 0x10, 10, 1),
+ PIN_FIELD16(259, 259, 0xc90, 0x10, 10, 1),
+ PIN_FIELD16(261, 261, 0x140, 0x10, 10, 1),
+};
+
+static const struct mtk_pin_reg_calc mt7623_reg_cals[] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt7623_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt7623_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt7623_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt7623_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt7623_pin_smt_range),
+ [PINCTRL_PIN_REG_PULLSEL] = MTK_RANGE(mt7623_pin_pullsel_range),
+ [PINCTRL_PIN_REG_PULLEN] = MTK_RANGE(mt7623_pin_pullen_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt7623_pin_drv_range),
+ [PINCTRL_PIN_REG_TDSEL] = MTK_RANGE(mt7623_pin_tdsel_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt7623_pin_ies_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt7623_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt7623_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt7623_pin_r1_range),
+};
+
+static const struct mtk_pin_desc mt7623_pins[] = {
+ MT7623_PIN(0, "PWRAP_SPI0_MI", 148, DRV_GRP3),
+ MT7623_PIN(1, "PWRAP_SPI0_MO", 149, DRV_GRP3),
+ MT7623_PIN(2, "PWRAP_INT", 150, DRV_GRP3),
+ MT7623_PIN(3, "PWRAP_SPI0_CK", 151, DRV_GRP3),
+ MT7623_PIN(4, "PWRAP_SPI0_CSN", 152, DRV_GRP3),
+ MT7623_PIN(5, "PWRAP_SPI0_CK2", 153, DRV_GRP3),
+ MT7623_PIN(6, "PWRAP_SPI0_CSN2", 154, DRV_GRP3),
+ MT7623_PIN(7, "SPI1_CSN", 155, DRV_GRP3),
+ MT7623_PIN(8, "SPI1_MI", 156, DRV_GRP3),
+ MT7623_PIN(9, "SPI1_MO", 157, DRV_GRP3),
+ MT7623_PIN(10, "RTC32K_CK", 158, DRV_GRP3),
+ MT7623_PIN(11, "WATCHDOG", 159, DRV_GRP3),
+ MT7623_PIN(12, "SRCLKENA", 160, DRV_GRP3),
+ MT7623_PIN(13, "SRCLKENAI", 161, DRV_GRP3),
+ MT7623_PIN(14, "URXD2", 162, DRV_GRP1),
+ MT7623_PIN(15, "UTXD2", 163, DRV_GRP1),
+ MT7623_PIN(16, "I2S5_DATA_IN", 164, DRV_GRP1),
+ MT7623_PIN(17, "I2S5_BCK", 165, DRV_GRP1),
+ MT7623_PIN(18, "PCM_CLK", 166, DRV_GRP1),
+ MT7623_PIN(19, "PCM_SYNC", 167, DRV_GRP1),
+ MT7623_PIN(20, "PCM_RX", EINT_NA, DRV_GRP1),
+ MT7623_PIN(21, "PCM_TX", EINT_NA, DRV_GRP1),
+ MT7623_PIN(22, "EINT0", 0, DRV_GRP1),
+ MT7623_PIN(23, "EINT1", 1, DRV_GRP1),
+ MT7623_PIN(24, "EINT2", 2, DRV_GRP1),
+ MT7623_PIN(25, "EINT3", 3, DRV_GRP1),
+ MT7623_PIN(26, "EINT4", 4, DRV_GRP1),
+ MT7623_PIN(27, "EINT5", 5, DRV_GRP1),
+ MT7623_PIN(28, "EINT6", 6, DRV_GRP1),
+ MT7623_PIN(29, "EINT7", 7, DRV_GRP1),
+ MT7623_PIN(30, "I2S5_LRCK", 12, DRV_GRP1),
+ MT7623_PIN(31, "I2S5_MCLK", 13, DRV_GRP1),
+ MT7623_PIN(32, "I2S5_DATA", 14, DRV_GRP1),
+ MT7623_PIN(33, "I2S1_DATA", 15, DRV_GRP1),
+ MT7623_PIN(34, "I2S1_DATA_IN", 16, DRV_GRP1),
+ MT7623_PIN(35, "I2S1_BCK", 17, DRV_GRP1),
+ MT7623_PIN(36, "I2S1_LRCK", 18, DRV_GRP1),
+ MT7623_PIN(37, "I2S1_MCLK", 19, DRV_GRP1),
+ MT7623_PIN(38, "I2S2_DATA", 20, DRV_GRP1),
+ MT7623_PIN(39, "JTMS", 21, DRV_GRP3),
+ MT7623_PIN(40, "JTCK", 22, DRV_GRP3),
+ MT7623_PIN(41, "JTDI", 23, DRV_GRP3),
+ MT7623_PIN(42, "JTDO", 24, DRV_GRP3),
+ MT7623_PIN(43, "NCLE", 25, DRV_GRP1),
+ MT7623_PIN(44, "NCEB1", 26, DRV_GRP1),
+ MT7623_PIN(45, "NCEB0", 27, DRV_GRP1),
+ MT7623_PIN(46, "IR", 28, DRV_FIXED),
+ MT7623_PIN(47, "NREB", 29, DRV_GRP1),
+ MT7623_PIN(48, "NRNB", 30, DRV_GRP1),
+ MT7623_PIN(49, "I2S0_DATA", 31, DRV_GRP1),
+ MT7623_PIN(50, "I2S2_BCK", 32, DRV_GRP1),
+ MT7623_PIN(51, "I2S2_DATA_IN", 33, DRV_GRP1),
+ MT7623_PIN(52, "I2S2_LRCK", 34, DRV_GRP1),
+ MT7623_PIN(53, "SPI0_CSN", 35, DRV_GRP1),
+ MT7623_PIN(54, "SPI0_CK", 36, DRV_GRP1),
+ MT7623_PIN(55, "SPI0_MI", 37, DRV_GRP1),
+ MT7623_PIN(56, "SPI0_MO", 38, DRV_GRP1),
+ MT7623_PIN(57, "SDA1", 39, DRV_FIXED),
+ MT7623_PIN(58, "SCL1", 40, DRV_FIXED),
+ MT7623_PIN(59, "RAMBUF_I_CLK", EINT_NA, DRV_FIXED),
+ MT7623_PIN(60, "WB_RSTB", 41, DRV_GRP3),
+ MT7623_PIN(61, "F2W_DATA", 42, DRV_GRP3),
+ MT7623_PIN(62, "F2W_CLK", 43, DRV_GRP3),
+ MT7623_PIN(63, "WB_SCLK", 44, DRV_GRP3),
+ MT7623_PIN(64, "WB_SDATA", 45, DRV_GRP3),
+ MT7623_PIN(65, "WB_SEN", 46, DRV_GRP3),
+ MT7623_PIN(66, "WB_CRTL0", 47, DRV_GRP3),
+ MT7623_PIN(67, "WB_CRTL1", 48, DRV_GRP3),
+ MT7623_PIN(68, "WB_CRTL2", 49, DRV_GRP3),
+ MT7623_PIN(69, "WB_CRTL3", 50, DRV_GRP3),
+ MT7623_PIN(70, "WB_CRTL4", 51, DRV_GRP3),
+ MT7623_PIN(71, "WB_CRTL5", 52, DRV_GRP3),
+ MT7623_PIN(72, "I2S0_DATA_IN", 53, DRV_GRP1),
+ MT7623_PIN(73, "I2S0_LRCK", 54, DRV_GRP1),
+ MT7623_PIN(74, "I2S0_BCK", 55, DRV_GRP1),
+ MT7623_PIN(75, "SDA0", 56, DRV_FIXED),
+ MT7623_PIN(76, "SCL0", 57, DRV_FIXED),
+ MT7623_PIN(77, "SDA2", 58, DRV_FIXED),
+ MT7623_PIN(78, "SCL2", 59, DRV_FIXED),
+ MT7623_PIN(79, "URXD0", 60, DRV_FIXED),
+ MT7623_PIN(80, "UTXD0", 61, DRV_FIXED),
+ MT7623_PIN(81, "URXD1", 62, DRV_FIXED),
+ MT7623_PIN(82, "UTXD1", 63, DRV_FIXED),
+ MT7623_PIN(83, "LCM_RST", 64, DRV_FIXED),
+ MT7623_PIN(84, "DSI_TE", 65, DRV_FIXED),
+ MT7623_PIN(85, "MSDC2_CMD", 66, DRV_GRP4),
+ MT7623_PIN(86, "MSDC2_CLK", 67, DRV_GRP4),
+ MT7623_PIN(87, "MSDC2_DAT0", 68, DRV_GRP4),
+ MT7623_PIN(88, "MSDC2_DAT1", 69, DRV_GRP4),
+ MT7623_PIN(89, "MSDC2_DAT2", 70, DRV_GRP4),
+ MT7623_PIN(90, "MSDC2_DAT3", 71, DRV_GRP4),
+ MT7623_PIN(91, "TDN3", EINT_NA, DRV_FIXED),
+ MT7623_PIN(92, "TDP3", EINT_NA, DRV_FIXED),
+ MT7623_PIN(93, "TDN2", EINT_NA, DRV_FIXED),
+ MT7623_PIN(94, "TDP2", EINT_NA, DRV_FIXED),
+ MT7623_PIN(95, "TCN", EINT_NA, DRV_FIXED),
+ MT7623_PIN(96, "TCP", EINT_NA, DRV_FIXED),
+ MT7623_PIN(97, "TDN1", EINT_NA, DRV_FIXED),
+ MT7623_PIN(98, "TDP1", EINT_NA, DRV_FIXED),
+ MT7623_PIN(99, "TDN0", EINT_NA, DRV_FIXED),
+ MT7623_PIN(100, "TDP0", EINT_NA, DRV_FIXED),
+ MT7623_PIN(101, "SPI2_CSN", 74, DRV_FIXED),
+ MT7623_PIN(102, "SPI2_MI", 75, DRV_FIXED),
+ MT7623_PIN(103, "SPI2_MO", 76, DRV_FIXED),
+ MT7623_PIN(104, "SPI2_CLK", 77, DRV_FIXED),
+ MT7623_PIN(105, "MSDC1_CMD", 78, DRV_GRP4),
+ MT7623_PIN(106, "MSDC1_CLK", 79, DRV_GRP4),
+ MT7623_PIN(107, "MSDC1_DAT0", 80, DRV_GRP4),
+ MT7623_PIN(108, "MSDC1_DAT1", 81, DRV_GRP4),
+ MT7623_PIN(109, "MSDC1_DAT2", 82, DRV_GRP4),
+ MT7623_PIN(110, "MSDC1_DAT3", 83, DRV_GRP4),
+ MT7623_PIN(111, "MSDC0_DAT7", 84, DRV_GRP4),
+ MT7623_PIN(112, "MSDC0_DAT6", 85, DRV_GRP4),
+ MT7623_PIN(113, "MSDC0_DAT5", 86, DRV_GRP4),
+ MT7623_PIN(114, "MSDC0_DAT4", 87, DRV_GRP4),
+ MT7623_PIN(115, "MSDC0_RSTB", 88, DRV_GRP4),
+ MT7623_PIN(116, "MSDC0_CMD", 89, DRV_GRP4),
+ MT7623_PIN(117, "MSDC0_CLK", 90, DRV_GRP4),
+ MT7623_PIN(118, "MSDC0_DAT3", 91, DRV_GRP4),
+ MT7623_PIN(119, "MSDC0_DAT2", 92, DRV_GRP4),
+ MT7623_PIN(120, "MSDC0_DAT1", 93, DRV_GRP4),
+ MT7623_PIN(121, "MSDC0_DAT0", 94, DRV_GRP4),
+ MT7623_PIN(122, "CEC", 95, DRV_FIXED),
+ MT7623_PIN(123, "HTPLG", 96, DRV_FIXED),
+ MT7623_PIN(124, "HDMISCK", 97, DRV_FIXED),
+ MT7623_PIN(125, "HDMISD", 98, DRV_FIXED),
+ MT7623_PIN(126, "I2S0_MCLK", 99, DRV_GRP1),
+ MT7623_PIN(127, "RAMBUF_IDATA0", EINT_NA, DRV_FIXED),
+ MT7623_PIN(128, "RAMBUF_IDATA1", EINT_NA, DRV_FIXED),
+ MT7623_PIN(129, "RAMBUF_IDATA2", EINT_NA, DRV_FIXED),
+ MT7623_PIN(130, "RAMBUF_IDATA3", EINT_NA, DRV_FIXED),
+ MT7623_PIN(131, "RAMBUF_IDATA4", EINT_NA, DRV_FIXED),
+ MT7623_PIN(132, "RAMBUF_IDATA5", EINT_NA, DRV_FIXED),
+ MT7623_PIN(133, "RAMBUF_IDATA6", EINT_NA, DRV_FIXED),
+ MT7623_PIN(134, "RAMBUF_IDATA7", EINT_NA, DRV_FIXED),
+ MT7623_PIN(135, "RAMBUF_IDATA8", EINT_NA, DRV_FIXED),
+ MT7623_PIN(136, "RAMBUF_IDATA9", EINT_NA, DRV_FIXED),
+ MT7623_PIN(137, "RAMBUF_IDATA10", EINT_NA, DRV_FIXED),
+ MT7623_PIN(138, "RAMBUF_IDATA11", EINT_NA, DRV_FIXED),
+ MT7623_PIN(139, "RAMBUF_IDATA12", EINT_NA, DRV_FIXED),
+ MT7623_PIN(140, "RAMBUF_IDATA13", EINT_NA, DRV_FIXED),
+ MT7623_PIN(141, "RAMBUF_IDATA14", EINT_NA, DRV_FIXED),
+ MT7623_PIN(142, "RAMBUF_IDATA15", EINT_NA, DRV_FIXED),
+ MT7623_PIN(143, "RAMBUF_ODATA0", EINT_NA, DRV_FIXED),
+ MT7623_PIN(144, "RAMBUF_ODATA1", EINT_NA, DRV_FIXED),
+ MT7623_PIN(145, "RAMBUF_ODATA2", EINT_NA, DRV_FIXED),
+ MT7623_PIN(146, "RAMBUF_ODATA3", EINT_NA, DRV_FIXED),
+ MT7623_PIN(147, "RAMBUF_ODATA4", EINT_NA, DRV_FIXED),
+ MT7623_PIN(148, "RAMBUF_ODATA5", EINT_NA, DRV_FIXED),
+ MT7623_PIN(149, "RAMBUF_ODATA6", EINT_NA, DRV_FIXED),
+ MT7623_PIN(150, "RAMBUF_ODATA7", EINT_NA, DRV_FIXED),
+ MT7623_PIN(151, "RAMBUF_ODATA8", EINT_NA, DRV_FIXED),
+ MT7623_PIN(152, "RAMBUF_ODATA9", EINT_NA, DRV_FIXED),
+ MT7623_PIN(153, "RAMBUF_ODATA10", EINT_NA, DRV_FIXED),
+ MT7623_PIN(154, "RAMBUF_ODATA11", EINT_NA, DRV_FIXED),
+ MT7623_PIN(155, "RAMBUF_ODATA12", EINT_NA, DRV_FIXED),
+ MT7623_PIN(156, "RAMBUF_ODATA13", EINT_NA, DRV_FIXED),
+ MT7623_PIN(157, "RAMBUF_ODATA14", EINT_NA, DRV_FIXED),
+ MT7623_PIN(158, "RAMBUF_ODATA15", EINT_NA, DRV_FIXED),
+ MT7623_PIN(159, "RAMBUF_BE0", EINT_NA, DRV_FIXED),
+ MT7623_PIN(160, "RAMBUF_BE1", EINT_NA, DRV_FIXED),
+ MT7623_PIN(161, "AP2PT_INT", EINT_NA, DRV_FIXED),
+ MT7623_PIN(162, "AP2PT_INT_CLR", EINT_NA, DRV_FIXED),
+ MT7623_PIN(163, "PT2AP_INT", EINT_NA, DRV_FIXED),
+ MT7623_PIN(164, "PT2AP_INT_CLR", EINT_NA, DRV_FIXED),
+ MT7623_PIN(165, "AP2UP_INT", EINT_NA, DRV_FIXED),
+ MT7623_PIN(166, "AP2UP_INT_CLR", EINT_NA, DRV_FIXED),
+ MT7623_PIN(167, "UP2AP_INT", EINT_NA, DRV_FIXED),
+ MT7623_PIN(168, "UP2AP_INT_CLR", EINT_NA, DRV_FIXED),
+ MT7623_PIN(169, "RAMBUF_ADDR0", EINT_NA, DRV_FIXED),
+ MT7623_PIN(170, "RAMBUF_ADDR1", EINT_NA, DRV_FIXED),
+ MT7623_PIN(171, "RAMBUF_ADDR2", EINT_NA, DRV_FIXED),
+ MT7623_PIN(172, "RAMBUF_ADDR3", EINT_NA, DRV_FIXED),
+ MT7623_PIN(173, "RAMBUF_ADDR4", EINT_NA, DRV_FIXED),
+ MT7623_PIN(174, "RAMBUF_ADDR5", EINT_NA, DRV_FIXED),
+ MT7623_PIN(175, "RAMBUF_ADDR6", EINT_NA, DRV_FIXED),
+ MT7623_PIN(176, "RAMBUF_ADDR7", EINT_NA, DRV_FIXED),
+ MT7623_PIN(177, "RAMBUF_ADDR8", EINT_NA, DRV_FIXED),
+ MT7623_PIN(178, "RAMBUF_ADDR9", EINT_NA, DRV_FIXED),
+ MT7623_PIN(179, "RAMBUF_ADDR10", EINT_NA, DRV_FIXED),
+ MT7623_PIN(180, "RAMBUF_RW", EINT_NA, DRV_FIXED),
+ MT7623_PIN(181, "RAMBUF_LAST", EINT_NA, DRV_FIXED),
+ MT7623_PIN(182, "RAMBUF_HP", EINT_NA, DRV_FIXED),
+ MT7623_PIN(183, "RAMBUF_REQ", EINT_NA, DRV_FIXED),
+ MT7623_PIN(184, "RAMBUF_ALE", EINT_NA, DRV_FIXED),
+ MT7623_PIN(185, "RAMBUF_DLE", EINT_NA, DRV_FIXED),
+ MT7623_PIN(186, "RAMBUF_WDLE", EINT_NA, DRV_FIXED),
+ MT7623_PIN(187, "RAMBUF_O_CLK", EINT_NA, DRV_FIXED),
+ MT7623_PIN(188, "I2S2_MCLK", 100, DRV_GRP1),
+ MT7623_PIN(189, "I2S3_DATA", 101, DRV_GRP1),
+ MT7623_PIN(190, "I2S3_DATA_IN", 102, DRV_GRP1),
+ MT7623_PIN(191, "I2S3_BCK", 103, DRV_GRP1),
+ MT7623_PIN(192, "I2S3_LRCK", 104, DRV_GRP1),
+ MT7623_PIN(193, "I2S3_MCLK", 105, DRV_GRP1),
+ MT7623_PIN(194, "I2S4_DATA", 106, DRV_GRP1),
+ MT7623_PIN(195, "I2S4_DATA_IN", 107, DRV_GRP1),
+ MT7623_PIN(196, "I2S4_BCK", 108, DRV_GRP1),
+ MT7623_PIN(197, "I2S4_LRCK", 109, DRV_GRP1),
+ MT7623_PIN(198, "I2S4_MCLK", 110, DRV_GRP1),
+ MT7623_PIN(199, "SPI1_CLK", 111, DRV_GRP3),
+ MT7623_PIN(200, "SPDIF_OUT", 112, DRV_GRP1),
+ MT7623_PIN(201, "SPDIF_IN0", 113, DRV_GRP1),
+ MT7623_PIN(202, "SPDIF_IN1", 114, DRV_GRP1),
+ MT7623_PIN(203, "PWM0", 115, DRV_GRP1),
+ MT7623_PIN(204, "PWM1", 116, DRV_GRP1),
+ MT7623_PIN(205, "PWM2", 117, DRV_GRP1),
+ MT7623_PIN(206, "PWM3", 118, DRV_GRP1),
+ MT7623_PIN(207, "PWM4", 119, DRV_GRP1),
+ MT7623_PIN(208, "AUD_EXT_CK1", 120, DRV_GRP1),
+ MT7623_PIN(209, "AUD_EXT_CK2", 121, DRV_GRP1),
+ MT7623_PIN(210, "AUD_CLOCK", EINT_NA, DRV_GRP3),
+ MT7623_PIN(211, "DVP_RESET", EINT_NA, DRV_GRP3),
+ MT7623_PIN(212, "DVP_CLOCK", EINT_NA, DRV_GRP3),
+ MT7623_PIN(213, "DVP_CS", EINT_NA, DRV_GRP3),
+ MT7623_PIN(214, "DVP_CK", EINT_NA, DRV_GRP3),
+ MT7623_PIN(215, "DVP_DI", EINT_NA, DRV_GRP3),
+ MT7623_PIN(216, "DVP_DO", EINT_NA, DRV_GRP3),
+ MT7623_PIN(217, "AP_CS", EINT_NA, DRV_GRP3),
+ MT7623_PIN(218, "AP_CK", EINT_NA, DRV_GRP3),
+ MT7623_PIN(219, "AP_DI", EINT_NA, DRV_GRP3),
+ MT7623_PIN(220, "AP_DO", EINT_NA, DRV_GRP3),
+ MT7623_PIN(221, "DVD_BCLK", EINT_NA, DRV_GRP3),
+ MT7623_PIN(222, "T8032_CLK", EINT_NA, DRV_GRP3),
+ MT7623_PIN(223, "AP_BCLK", EINT_NA, DRV_GRP3),
+ MT7623_PIN(224, "HOST_CS", EINT_NA, DRV_GRP3),
+ MT7623_PIN(225, "HOST_CK", EINT_NA, DRV_GRP3),
+ MT7623_PIN(226, "HOST_DO0", EINT_NA, DRV_GRP3),
+ MT7623_PIN(227, "HOST_DO1", EINT_NA, DRV_GRP3),
+ MT7623_PIN(228, "SLV_CS", EINT_NA, DRV_GRP3),
+ MT7623_PIN(229, "SLV_CK", EINT_NA, DRV_GRP3),
+ MT7623_PIN(230, "SLV_DI0", EINT_NA, DRV_GRP3),
+ MT7623_PIN(231, "SLV_DI1", EINT_NA, DRV_GRP3),
+ MT7623_PIN(232, "AP2DSP_INT", EINT_NA, DRV_GRP3),
+ MT7623_PIN(233, "AP2DSP_INT_CLR", EINT_NA, DRV_GRP3),
+ MT7623_PIN(234, "DSP2AP_INT", EINT_NA, DRV_GRP3),
+ MT7623_PIN(235, "DSP2AP_INT_CLR", EINT_NA, DRV_GRP3),
+ MT7623_PIN(236, "EXT_SDIO3", 122, DRV_GRP1),
+ MT7623_PIN(237, "EXT_SDIO2", 123, DRV_GRP1),
+ MT7623_PIN(238, "EXT_SDIO1", 124, DRV_GRP1),
+ MT7623_PIN(239, "EXT_SDIO0", 125, DRV_GRP1),
+ MT7623_PIN(240, "EXT_XCS", 126, DRV_GRP1),
+ MT7623_PIN(241, "EXT_SCK", 127, DRV_GRP1),
+ MT7623_PIN(242, "URTS2", 128, DRV_GRP1),
+ MT7623_PIN(243, "UCTS2", 129, DRV_GRP1),
+ MT7623_PIN(244, "HDMI_SDA_RX", 130, DRV_FIXED),
+ MT7623_PIN(245, "HDMI_SCL_RX", 131, DRV_FIXED),
+ MT7623_PIN(246, "MHL_SENCE", 132, DRV_FIXED),
+ MT7623_PIN(247, "HDMI_HPD_CBUS_RX", 69, DRV_FIXED),
+ MT7623_PIN(248, "HDMI_TESTOUTP_RX", 133, DRV_GRP1),
+ MT7623_PIN(249, "MSDC0E_RSTB", 134, DRV_GRP4),
+ MT7623_PIN(250, "MSDC0E_DAT7", 135, DRV_GRP4),
+ MT7623_PIN(251, "MSDC0E_DAT6", 136, DRV_GRP4),
+ MT7623_PIN(252, "MSDC0E_DAT5", 137, DRV_GRP4),
+ MT7623_PIN(253, "MSDC0E_DAT4", 138, DRV_GRP4),
+ MT7623_PIN(254, "MSDC0E_DAT3", 139, DRV_GRP4),
+ MT7623_PIN(255, "MSDC0E_DAT2", 140, DRV_GRP4),
+ MT7623_PIN(256, "MSDC0E_DAT1", 141, DRV_GRP4),
+ MT7623_PIN(257, "MSDC0E_DAT0", 142, DRV_GRP4),
+ MT7623_PIN(258, "MSDC0E_CMD", 143, DRV_GRP4),
+ MT7623_PIN(259, "MSDC0E_CLK", 144, DRV_GRP4),
+ MT7623_PIN(260, "MSDC0E_DSL", 145, DRV_GRP4),
+ MT7623_PIN(261, "MSDC1_INS", 146, DRV_GRP4),
+ MT7623_PIN(262, "G2_TXEN", 8, DRV_GRP1),
+ MT7623_PIN(263, "G2_TXD3", 9, DRV_GRP1),
+ MT7623_PIN(264, "G2_TXD2", 10, DRV_GRP1),
+ MT7623_PIN(265, "G2_TXD1", 11, DRV_GRP1),
+ MT7623_PIN(266, "G2_TXD0", EINT_NA, DRV_GRP1),
+ MT7623_PIN(267, "G2_TXC", EINT_NA, DRV_GRP1),
+ MT7623_PIN(268, "G2_RXC", EINT_NA, DRV_GRP1),
+ MT7623_PIN(269, "G2_RXD0", EINT_NA, DRV_GRP1),
+ MT7623_PIN(270, "G2_RXD1", EINT_NA, DRV_GRP1),
+ MT7623_PIN(271, "G2_RXD2", EINT_NA, DRV_GRP1),
+ MT7623_PIN(272, "G2_RXD3", EINT_NA, DRV_GRP1),
+ MT7623_PIN(273, "ESW_INT", 168, DRV_GRP1),
+ MT7623_PIN(274, "G2_RXDV", EINT_NA, DRV_GRP1),
+ MT7623_PIN(275, "MDC", EINT_NA, DRV_GRP1),
+ MT7623_PIN(276, "MDIO", EINT_NA, DRV_GRP1),
+ MT7623_PIN(277, "ESW_RST", EINT_NA, DRV_GRP1),
+ MT7623_PIN(278, "JTAG_RESET", 147, DRV_GRP3),
+ MT7623_PIN(279, "USB3_RES_BOND", EINT_NA, DRV_GRP1),
+};
+
+/* List all groups consisting of these pins dedicated to the enablement of
+ * certain hardware block and the corresponding mode for all of the pins.
+ * The hardware probably has multiple combinations of these pinouts.
+ */
+
+/* AUDIO EXT CLK */
+static int mt7623_aud_ext_clk0_pins[] = { 208, };
+static int mt7623_aud_ext_clk0_funcs[] = { 1, };
+static int mt7623_aud_ext_clk1_pins[] = { 209, };
+static int mt7623_aud_ext_clk1_funcs[] = { 1, };
+
+/* DISP PWM */
+static int mt7623_disp_pwm_0_pins[] = { 72, };
+static int mt7623_disp_pwm_0_funcs[] = { 5, };
+static int mt7623_disp_pwm_1_pins[] = { 203, };
+static int mt7623_disp_pwm_1_funcs[] = { 2, };
+static int mt7623_disp_pwm_2_pins[] = { 208, };
+static int mt7623_disp_pwm_2_funcs[] = { 5, };
+
+/* ESW */
+static int mt7623_esw_int_pins[] = { 273, };
+static int mt7623_esw_int_funcs[] = { 1, };
+static int mt7623_esw_rst_pins[] = { 277, };
+static int mt7623_esw_rst_funcs[] = { 1, };
+
+/* EPHY */
+static int mt7623_ephy_pins[] = { 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 274, };
+static int mt7623_ephy_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, };
+
+/* EXT_SDIO */
+static int mt7623_ext_sdio_pins[] = { 236, 237, 238, 239, 240, 241, };
+static int mt7623_ext_sdio_funcs[] = { 1, 1, 1, 1, 1, 1, };
+
+/* HDMI RX */
+static int mt7623_hdmi_rx_pins[] = { 247, 248, };
+static int mt7623_hdmi_rx_funcs[] = { 1, 1 };
+static int mt7623_hdmi_rx_i2c_pins[] = { 244, 245, };
+static int mt7623_hdmi_rx_i2c_funcs[] = { 1, 1 };
+
+/* HDMI TX */
+static int mt7623_hdmi_cec_pins[] = { 122, };
+static int mt7623_hdmi_cec_funcs[] = { 1, };
+static int mt7623_hdmi_htplg_pins[] = { 123, };
+static int mt7623_hdmi_htplg_funcs[] = { 1, };
+static int mt7623_hdmi_i2c_pins[] = { 124, 125, };
+static int mt7623_hdmi_i2c_funcs[] = { 1, 1 };
+
+/* I2C */
+static int mt7623_i2c0_pins[] = { 75, 76, };
+static int mt7623_i2c0_funcs[] = { 1, 1, };
+static int mt7623_i2c1_0_pins[] = { 57, 58, };
+static int mt7623_i2c1_0_funcs[] = { 1, 1, };
+static int mt7623_i2c1_1_pins[] = { 242, 243, };
+static int mt7623_i2c1_1_funcs[] = { 4, 4, };
+static int mt7623_i2c1_2_pins[] = { 85, 86, };
+static int mt7623_i2c1_2_funcs[] = { 3, 3, };
+static int mt7623_i2c1_3_pins[] = { 105, 106, };
+static int mt7623_i2c1_3_funcs[] = { 3, 3, };
+static int mt7623_i2c1_4_pins[] = { 124, 125, };
+static int mt7623_i2c1_4_funcs[] = { 4, 4, };
+static int mt7623_i2c2_0_pins[] = { 77, 78, };
+static int mt7623_i2c2_0_funcs[] = { 1, 1, };
+static int mt7623_i2c2_1_pins[] = { 89, 90, };
+static int mt7623_i2c2_1_funcs[] = { 3, 3, };
+static int mt7623_i2c2_2_pins[] = { 109, 110, };
+static int mt7623_i2c2_2_funcs[] = { 3, 3, };
+static int mt7623_i2c2_3_pins[] = { 122, 123, };
+static int mt7623_i2c2_3_funcs[] = { 4, 4, };
+
+/* I2S */
+static int mt7623_i2s0_pins[] = { 49, 72, 73, 74, 126, };
+static int mt7623_i2s0_funcs[] = { 1, 1, 1, 1, 1, };
+static int mt7623_i2s1_pins[] = { 33, 34, 35, 36, 37, };
+static int mt7623_i2s1_funcs[] = { 1, 1, 1, 1, 1, };
+static int mt7623_i2s2_bclk_lrclk_mclk_pins[] = { 50, 52, 188, };
+static int mt7623_i2s2_bclk_lrclk_mclk_funcs[] = { 1, 1, 1, };
+static int mt7623_i2s2_data_in_pins[] = { 51, };
+static int mt7623_i2s2_data_in_funcs[] = { 1, };
+static int mt7623_i2s2_data_0_pins[] = { 203, };
+static int mt7623_i2s2_data_0_funcs[] = { 9, };
+static int mt7623_i2s2_data_1_pins[] = { 38, };
+static int mt7623_i2s2_data_1_funcs[] = { 4, };
+static int mt7623_i2s3_bclk_lrclk_mclk_pins[] = { 191, 192, 193, };
+static int mt7623_i2s3_bclk_lrclk_mclk_funcs[] = { 1, 1, 1, };
+static int mt7623_i2s3_data_in_pins[] = { 190, };
+static int mt7623_i2s3_data_in_funcs[] = { 1, };
+static int mt7623_i2s3_data_0_pins[] = { 204, };
+static int mt7623_i2s3_data_0_funcs[] = { 9, };
+static int mt7623_i2s3_data_1_pins[] = { 2, };
+static int mt7623_i2s3_data_1_funcs[] = { 0, };
+static int mt7623_i2s4_pins[] = { 194, 195, 196, 197, 198, };
+static int mt7623_i2s4_funcs[] = { 1, 1, 1, 1, 1, };
+static int mt7623_i2s5_pins[] = { 16, 17, 30, 31, 32, };
+static int mt7623_i2s5_funcs[] = { 1, 1, 1, 1, 1, };
+
+/* IR */
+static int mt7623_ir_pins[] = { 46, };
+static int mt7623_ir_funcs[] = { 1, };
+
+/* LCD */
+static int mt7623_mipi_tx_pins[] = { 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, };
+static int mt7623_mipi_tx_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, };
+static int mt7623_dsi_te_pins[] = { 84, };
+static int mt7623_dsi_te_funcs[] = { 1, };
+static int mt7623_lcm_rst_pins[] = { 83, };
+static int mt7623_lcm_rst_funcs[] = { 1, };
+
+/* MDC/MDIO */
+static int mt7623_mdc_mdio_pins[] = { 275, 276, };
+static int mt7623_mdc_mdio_funcs[] = { 1, 1, };
+
+/* MSDC */
+static int mt7623_msdc0_pins[] = { 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, };
+static int mt7623_msdc0_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, };
+static int mt7623_msdc1_pins[] = { 105, 106, 107, 108, 109, 110, };
+static int mt7623_msdc1_funcs[] = { 1, 1, 1, 1, 1, 1, };
+static int mt7623_msdc1_ins_pins[] = { 261, };
+static int mt7623_msdc1_ins_funcs[] = { 1, };
+static int mt7623_msdc1_wp_0_pins[] = { 29, };
+static int mt7623_msdc1_wp_0_funcs[] = { 1, };
+static int mt7623_msdc1_wp_1_pins[] = { 55, };
+static int mt7623_msdc1_wp_1_funcs[] = { 3, };
+static int mt7623_msdc1_wp_2_pins[] = { 209, };
+static int mt7623_msdc1_wp_2_funcs[] = { 2, };
+static int mt7623_msdc2_pins[] = { 85, 86, 87, 88, 89, 90, };
+static int mt7623_msdc2_funcs[] = { 1, 1, 1, 1, 1, 1, };
+static int mt7623_msdc3_pins[] = { 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, };
+static int mt7623_msdc3_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, };
+
+/* NAND */
+static int mt7623_nandc_pins[] = { 43, 47, 48, 111, 112, 113, 114, 115,
+ 116, 117, 118, 119, 120, 121, };
+static int mt7623_nandc_funcs[] = { 1, 1, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, };
+static int mt7623_nandc_ceb0_pins[] = { 45, };
+static int mt7623_nandc_ceb0_funcs[] = { 1, };
+static int mt7623_nandc_ceb1_pins[] = { 44, };
+static int mt7623_nandc_ceb1_funcs[] = { 1, };
+
+/* RTC */
+static int mt7623_rtc_pins[] = { 10, };
+static int mt7623_rtc_funcs[] = { 1, };
+
+/* OTG */
+static int mt7623_otg_iddig0_0_pins[] = { 29, };
+static int mt7623_otg_iddig0_0_funcs[] = { 1, };
+static int mt7623_otg_iddig0_1_pins[] = { 44, };
+static int mt7623_otg_iddig0_1_funcs[] = { 2, };
+static int mt7623_otg_iddig0_2_pins[] = { 236, };
+static int mt7623_otg_iddig0_2_funcs[] = { 2, };
+static int mt7623_otg_iddig1_0_pins[] = { 27, };
+static int mt7623_otg_iddig1_0_funcs[] = { 2, };
+static int mt7623_otg_iddig1_1_pins[] = { 47, };
+static int mt7623_otg_iddig1_1_funcs[] = { 2, };
+static int mt7623_otg_iddig1_2_pins[] = { 238, };
+static int mt7623_otg_iddig1_2_funcs[] = { 2, };
+static int mt7623_otg_drv_vbus0_0_pins[] = { 28, };
+static int mt7623_otg_drv_vbus0_0_funcs[] = { 1, };
+static int mt7623_otg_drv_vbus0_1_pins[] = { 45, };
+static int mt7623_otg_drv_vbus0_1_funcs[] = { 2, };
+static int mt7623_otg_drv_vbus0_2_pins[] = { 237, };
+static int mt7623_otg_drv_vbus0_2_funcs[] = { 2, };
+static int mt7623_otg_drv_vbus1_0_pins[] = { 26, };
+static int mt7623_otg_drv_vbus1_0_funcs[] = { 2, };
+static int mt7623_otg_drv_vbus1_1_pins[] = { 48, };
+static int mt7623_otg_drv_vbus1_1_funcs[] = { 2, };
+static int mt7623_otg_drv_vbus1_2_pins[] = { 239, };
+static int mt7623_otg_drv_vbus1_2_funcs[] = { 2, };
+
+/* PCIE */
+static int mt7623_pcie0_0_perst_pins[] = { 208, };
+static int mt7623_pcie0_0_perst_funcs[] = { 3, };
+static int mt7623_pcie0_1_perst_pins[] = { 22, };
+static int mt7623_pcie0_1_perst_funcs[] = { 2, };
+static int mt7623_pcie1_0_perst_pins[] = { 209, };
+static int mt7623_pcie1_0_perst_funcs[] = { 3, };
+static int mt7623_pcie1_1_perst_pins[] = { 23, };
+static int mt7623_pcie1_1_perst_funcs[] = { 2, };
+static int mt7623_pcie2_0_perst_pins[] = { 24, };
+static int mt7623_pcie2_0_perst_funcs[] = { 2, };
+static int mt7623_pcie2_1_perst_pins[] = { 29, };
+static int mt7623_pcie2_1_perst_funcs[] = { 6, };
+static int mt7623_pcie0_0_wake_pins[] = { 28, };
+static int mt7623_pcie0_0_wake_funcs[] = { 6, };
+static int mt7623_pcie0_1_wake_pins[] = { 251, };
+static int mt7623_pcie0_1_wake_funcs[] = { 6, };
+static int mt7623_pcie1_0_wake_pins[] = { 27, };
+static int mt7623_pcie1_0_wake_funcs[] = { 6, };
+static int mt7623_pcie1_1_wake_pins[] = { 253, };
+static int mt7623_pcie1_1_wake_funcs[] = { 6, };
+static int mt7623_pcie2_0_wake_pins[] = { 26, };
+static int mt7623_pcie2_0_wake_funcs[] = { 6, };
+static int mt7623_pcie2_1_wake_pins[] = { 255, };
+static int mt7623_pcie2_1_wake_funcs[] = { 6, };
+static int mt7623_pcie0_clkreq_pins[] = { 250, };
+static int mt7623_pcie0_clkreq_funcs[] = { 6, };
+static int mt7623_pcie1_clkreq_pins[] = { 252, };
+static int mt7623_pcie1_clkreq_funcs[] = { 6, };
+static int mt7623_pcie2_clkreq_pins[] = { 254, };
+static int mt7623_pcie2_clkreq_funcs[] = { 6, };
+
+/* the pcie_*_rev are only used for MT7623 */
+static int mt7623_pcie0_0_rev_perst_pins[] = { 208, };
+static int mt7623_pcie0_0_rev_perst_funcs[] = { 11, };
+static int mt7623_pcie0_1_rev_perst_pins[] = { 22, };
+static int mt7623_pcie0_1_rev_perst_funcs[] = { 10, };
+static int mt7623_pcie1_0_rev_perst_pins[] = { 209, };
+static int mt7623_pcie1_0_rev_perst_funcs[] = { 11, };
+static int mt7623_pcie1_1_rev_perst_pins[] = { 23, };
+static int mt7623_pcie1_1_rev_perst_funcs[] = { 10, };
+static int mt7623_pcie2_0_rev_perst_pins[] = { 24, };
+static int mt7623_pcie2_0_rev_perst_funcs[] = { 11, };
+static int mt7623_pcie2_1_rev_perst_pins[] = { 29, };
+static int mt7623_pcie2_1_rev_perst_funcs[] = { 14, };
+
+/* PCM */
+static int mt7623_pcm_clk_0_pins[] = { 18, };
+static int mt7623_pcm_clk_0_funcs[] = { 1, };
+static int mt7623_pcm_clk_1_pins[] = { 17, };
+static int mt7623_pcm_clk_1_funcs[] = { 3, };
+static int mt7623_pcm_clk_2_pins[] = { 35, };
+static int mt7623_pcm_clk_2_funcs[] = { 3, };
+static int mt7623_pcm_clk_3_pins[] = { 50, };
+static int mt7623_pcm_clk_3_funcs[] = { 3, };
+static int mt7623_pcm_clk_4_pins[] = { 74, };
+static int mt7623_pcm_clk_4_funcs[] = { 3, };
+static int mt7623_pcm_clk_5_pins[] = { 191, };
+static int mt7623_pcm_clk_5_funcs[] = { 3, };
+static int mt7623_pcm_clk_6_pins[] = { 196, };
+static int mt7623_pcm_clk_6_funcs[] = { 3, };
+static int mt7623_pcm_sync_0_pins[] = { 19, };
+static int mt7623_pcm_sync_0_funcs[] = { 1, };
+static int mt7623_pcm_sync_1_pins[] = { 30, };
+static int mt7623_pcm_sync_1_funcs[] = { 3, };
+static int mt7623_pcm_sync_2_pins[] = { 36, };
+static int mt7623_pcm_sync_2_funcs[] = { 3, };
+static int mt7623_pcm_sync_3_pins[] = { 52, };
+static int mt7623_pcm_sync_3_funcs[] = { 31, };
+static int mt7623_pcm_sync_4_pins[] = { 73, };
+static int mt7623_pcm_sync_4_funcs[] = { 3, };
+static int mt7623_pcm_sync_5_pins[] = { 192, };
+static int mt7623_pcm_sync_5_funcs[] = { 3, };
+static int mt7623_pcm_sync_6_pins[] = { 197, };
+static int mt7623_pcm_sync_6_funcs[] = { 3, };
+static int mt7623_pcm_rx_0_pins[] = { 20, };
+static int mt7623_pcm_rx_0_funcs[] = { 1, };
+static int mt7623_pcm_rx_1_pins[] = { 16, };
+static int mt7623_pcm_rx_1_funcs[] = { 3, };
+static int mt7623_pcm_rx_2_pins[] = { 34, };
+static int mt7623_pcm_rx_2_funcs[] = { 3, };
+static int mt7623_pcm_rx_3_pins[] = { 51, };
+static int mt7623_pcm_rx_3_funcs[] = { 3, };
+static int mt7623_pcm_rx_4_pins[] = { 72, };
+static int mt7623_pcm_rx_4_funcs[] = { 3, };
+static int mt7623_pcm_rx_5_pins[] = { 190, };
+static int mt7623_pcm_rx_5_funcs[] = { 3, };
+static int mt7623_pcm_rx_6_pins[] = { 195, };
+static int mt7623_pcm_rx_6_funcs[] = { 3, };
+static int mt7623_pcm_tx_0_pins[] = { 21, };
+static int mt7623_pcm_tx_0_funcs[] = { 1, };
+static int mt7623_pcm_tx_1_pins[] = { 32, };
+static int mt7623_pcm_tx_1_funcs[] = { 3, };
+static int mt7623_pcm_tx_2_pins[] = { 33, };
+static int mt7623_pcm_tx_2_funcs[] = { 3, };
+static int mt7623_pcm_tx_3_pins[] = { 38, };
+static int mt7623_pcm_tx_3_funcs[] = { 3, };
+static int mt7623_pcm_tx_4_pins[] = { 49, };
+static int mt7623_pcm_tx_4_funcs[] = { 3, };
+static int mt7623_pcm_tx_5_pins[] = { 189, };
+static int mt7623_pcm_tx_5_funcs[] = { 3, };
+static int mt7623_pcm_tx_6_pins[] = { 194, };
+static int mt7623_pcm_tx_6_funcs[] = { 3, };
+
+/* PWM */
+static int mt7623_pwm_ch1_0_pins[] = { 203, };
+static int mt7623_pwm_ch1_0_funcs[] = { 1, };
+static int mt7623_pwm_ch1_1_pins[] = { 208, };
+static int mt7623_pwm_ch1_1_funcs[] = { 2, };
+static int mt7623_pwm_ch1_2_pins[] = { 72, };
+static int mt7623_pwm_ch1_2_funcs[] = { 4, };
+static int mt7623_pwm_ch1_3_pins[] = { 88, };
+static int mt7623_pwm_ch1_3_funcs[] = { 3, };
+static int mt7623_pwm_ch1_4_pins[] = { 108, };
+static int mt7623_pwm_ch1_4_funcs[] = { 3, };
+static int mt7623_pwm_ch2_0_pins[] = { 204, };
+static int mt7623_pwm_ch2_0_funcs[] = { 1, };
+static int mt7623_pwm_ch2_1_pins[] = { 53, };
+static int mt7623_pwm_ch2_1_funcs[] = { 5, };
+static int mt7623_pwm_ch2_2_pins[] = { 88, };
+static int mt7623_pwm_ch2_2_funcs[] = { 6, };
+static int mt7623_pwm_ch2_3_pins[] = { 108, };
+static int mt7623_pwm_ch2_3_funcs[] = { 6, };
+static int mt7623_pwm_ch2_4_pins[] = { 209, };
+static int mt7623_pwm_ch2_4_funcs[] = { 5, };
+static int mt7623_pwm_ch3_0_pins[] = { 205, };
+static int mt7623_pwm_ch3_0_funcs[] = { 1, };
+static int mt7623_pwm_ch3_1_pins[] = { 55, };
+static int mt7623_pwm_ch3_1_funcs[] = { 5, };
+static int mt7623_pwm_ch3_2_pins[] = { 89, };
+static int mt7623_pwm_ch3_2_funcs[] = { 6, };
+static int mt7623_pwm_ch3_3_pins[] = { 109, };
+static int mt7623_pwm_ch3_3_funcs[] = { 6, };
+static int mt7623_pwm_ch4_0_pins[] = { 206, };
+static int mt7623_pwm_ch4_0_funcs[] = { 1, };
+static int mt7623_pwm_ch4_1_pins[] = { 90, };
+static int mt7623_pwm_ch4_1_funcs[] = { 6, };
+static int mt7623_pwm_ch4_2_pins[] = { 110, };
+static int mt7623_pwm_ch4_2_funcs[] = { 6, };
+static int mt7623_pwm_ch4_3_pins[] = { 124, };
+static int mt7623_pwm_ch4_3_funcs[] = { 5, };
+static int mt7623_pwm_ch5_0_pins[] = { 207, };
+static int mt7623_pwm_ch5_0_funcs[] = { 1, };
+static int mt7623_pwm_ch5_1_pins[] = { 125, };
+static int mt7623_pwm_ch5_1_funcs[] = { 5, };
+
+/* PWRAP */
+static int mt7623_pwrap_pins[] = { 0, 1, 2, 3, 4, 5, 6, };
+static int mt7623_pwrap_funcs[] = { 1, 1, 1, 1, 1, 1, 1, };
+
+/* SPDIF */
+static int mt7623_spdif_in0_0_pins[] = { 56, };
+static int mt7623_spdif_in0_0_funcs[] = { 3, };
+static int mt7623_spdif_in0_1_pins[] = { 201, };
+static int mt7623_spdif_in0_1_funcs[] = { 1, };
+static int mt7623_spdif_in1_0_pins[] = { 54, };
+static int mt7623_spdif_in1_0_funcs[] = { 3, };
+static int mt7623_spdif_in1_1_pins[] = { 202, };
+static int mt7623_spdif_in1_1_funcs[] = { 1, };
+static int mt7623_spdif_out_pins[] = { 202, };
+static int mt7623_spdif_out_funcs[] = { 1, };
+
+/* SPI */
+static int mt7623_spi0_pins[] = { 53, 54, 55, 56, };
+static int mt7623_spi0_funcs[] = { 1, 1, 1, 1, };
+static int mt7623_spi1_pins[] = { 7, 199, 8, 9, };
+static int mt7623_spi1_funcs[] = { 1, 1, 1, 1, };
+static int mt7623_spi2_pins[] = { 101, 104, 102, 103, };
+static int mt7623_spi2_funcs[] = { 1, 1, 1, 1, };
+
+/* UART */
+static int mt7623_uart0_0_txd_rxd_pins[] = { 79, 80, };
+static int mt7623_uart0_0_txd_rxd_funcs[] = { 1, 1, };
+static int mt7623_uart0_1_txd_rxd_pins[] = { 87, 88, };
+static int mt7623_uart0_1_txd_rxd_funcs[] = { 5, 5, };
+static int mt7623_uart0_2_txd_rxd_pins[] = { 107, 108, };
+static int mt7623_uart0_2_txd_rxd_funcs[] = { 5, 5, };
+static int mt7623_uart0_3_txd_rxd_pins[] = { 123, 122, };
+static int mt7623_uart0_3_txd_rxd_funcs[] = { 5, 5, };
+static int mt7623_uart0_rts_cts_pins[] = { 22, 23, };
+static int mt7623_uart0_rts_cts_funcs[] = { 1, 1, };
+static int mt7623_uart1_0_txd_rxd_pins[] = { 81, 82, };
+static int mt7623_uart1_0_txd_rxd_funcs[] = { 1, 1, };
+static int mt7623_uart1_1_txd_rxd_pins[] = { 89, 90, };
+static int mt7623_uart1_1_txd_rxd_funcs[] = { 5, 5, };
+static int mt7623_uart1_2_txd_rxd_pins[] = { 109, 110, };
+static int mt7623_uart1_2_txd_rxd_funcs[] = { 5, 5, };
+static int mt7623_uart1_rts_cts_pins[] = { 24, 25, };
+static int mt7623_uart1_rts_cts_funcs[] = { 1, 1, };
+static int mt7623_uart2_0_txd_rxd_pins[] = { 14, 15, };
+static int mt7623_uart2_0_txd_rxd_funcs[] = { 1, 1, };
+static int mt7623_uart2_1_txd_rxd_pins[] = { 200, 201, };
+static int mt7623_uart2_1_txd_rxd_funcs[] = { 6, 6, };
+static int mt7623_uart2_rts_cts_pins[] = { 242, 243, };
+static int mt7623_uart2_rts_cts_funcs[] = { 1, 1, };
+static int mt7623_uart3_txd_rxd_pins[] = { 242, 243, };
+static int mt7623_uart3_txd_rxd_funcs[] = { 2, 2, };
+static int mt7623_uart3_rts_cts_pins[] = { 26, 27, };
+static int mt7623_uart3_rts_cts_funcs[] = { 1, 1, };
+
+/* Watchdog */
+static int mt7623_watchdog_0_pins[] = { 11, };
+static int mt7623_watchdog_0_funcs[] = { 1, };
+static int mt7623_watchdog_1_pins[] = { 121, };
+static int mt7623_watchdog_1_funcs[] = { 5, };
+
+static const struct group_desc mt7623_groups[] = {
+ PINCTRL_PIN_GROUP("aud_ext_clk0", mt7623_aud_ext_clk0),
+ PINCTRL_PIN_GROUP("aud_ext_clk1", mt7623_aud_ext_clk1),
+ PINCTRL_PIN_GROUP("dsi_te", mt7623_dsi_te),
+ PINCTRL_PIN_GROUP("disp_pwm_0", mt7623_disp_pwm_0),
+ PINCTRL_PIN_GROUP("disp_pwm_1", mt7623_disp_pwm_1),
+ PINCTRL_PIN_GROUP("disp_pwm_2", mt7623_disp_pwm_2),
+ PINCTRL_PIN_GROUP("ephy", mt7623_ephy),
+ PINCTRL_PIN_GROUP("esw_int", mt7623_esw_int),
+ PINCTRL_PIN_GROUP("esw_rst", mt7623_esw_rst),
+ PINCTRL_PIN_GROUP("ext_sdio", mt7623_ext_sdio),
+ PINCTRL_PIN_GROUP("hdmi_cec", mt7623_hdmi_cec),
+ PINCTRL_PIN_GROUP("hdmi_htplg", mt7623_hdmi_htplg),
+ PINCTRL_PIN_GROUP("hdmi_i2c", mt7623_hdmi_i2c),
+ PINCTRL_PIN_GROUP("hdmi_rx", mt7623_hdmi_rx),
+ PINCTRL_PIN_GROUP("hdmi_rx_i2c", mt7623_hdmi_rx_i2c),
+ PINCTRL_PIN_GROUP("i2c0", mt7623_i2c0),
+ PINCTRL_PIN_GROUP("i2c1_0", mt7623_i2c1_0),
+ PINCTRL_PIN_GROUP("i2c1_1", mt7623_i2c1_1),
+ PINCTRL_PIN_GROUP("i2c1_2", mt7623_i2c1_2),
+ PINCTRL_PIN_GROUP("i2c1_3", mt7623_i2c1_3),
+ PINCTRL_PIN_GROUP("i2c1_4", mt7623_i2c1_4),
+ PINCTRL_PIN_GROUP("i2c2_0", mt7623_i2c2_0),
+ PINCTRL_PIN_GROUP("i2c2_1", mt7623_i2c2_1),
+ PINCTRL_PIN_GROUP("i2c2_2", mt7623_i2c2_2),
+ PINCTRL_PIN_GROUP("i2c2_3", mt7623_i2c2_3),
+ PINCTRL_PIN_GROUP("i2s0", mt7623_i2s0),
+ PINCTRL_PIN_GROUP("i2s1", mt7623_i2s1),
+ PINCTRL_PIN_GROUP("i2s4", mt7623_i2s4),
+ PINCTRL_PIN_GROUP("i2s5", mt7623_i2s5),
+ PINCTRL_PIN_GROUP("i2s2_bclk_lrclk_mclk", mt7623_i2s2_bclk_lrclk_mclk),
+ PINCTRL_PIN_GROUP("i2s3_bclk_lrclk_mclk", mt7623_i2s3_bclk_lrclk_mclk),
+ PINCTRL_PIN_GROUP("i2s2_data_in", mt7623_i2s2_data_in),
+ PINCTRL_PIN_GROUP("i2s3_data_in", mt7623_i2s3_data_in),
+ PINCTRL_PIN_GROUP("i2s2_data_0", mt7623_i2s2_data_0),
+ PINCTRL_PIN_GROUP("i2s2_data_1", mt7623_i2s2_data_1),
+ PINCTRL_PIN_GROUP("i2s3_data_0", mt7623_i2s3_data_0),
+ PINCTRL_PIN_GROUP("i2s3_data_1", mt7623_i2s3_data_1),
+ PINCTRL_PIN_GROUP("ir", mt7623_ir),
+ PINCTRL_PIN_GROUP("lcm_rst", mt7623_lcm_rst),
+ PINCTRL_PIN_GROUP("mdc_mdio", mt7623_mdc_mdio),
+ PINCTRL_PIN_GROUP("mipi_tx", mt7623_mipi_tx),
+ PINCTRL_PIN_GROUP("msdc0", mt7623_msdc0),
+ PINCTRL_PIN_GROUP("msdc1", mt7623_msdc1),
+ PINCTRL_PIN_GROUP("msdc1_ins", mt7623_msdc1_ins),
+ PINCTRL_PIN_GROUP("msdc1_wp_0", mt7623_msdc1_wp_0),
+ PINCTRL_PIN_GROUP("msdc1_wp_1", mt7623_msdc1_wp_1),
+ PINCTRL_PIN_GROUP("msdc1_wp_2", mt7623_msdc1_wp_2),
+ PINCTRL_PIN_GROUP("msdc2", mt7623_msdc2),
+ PINCTRL_PIN_GROUP("msdc3", mt7623_msdc3),
+ PINCTRL_PIN_GROUP("nandc", mt7623_nandc),
+ PINCTRL_PIN_GROUP("nandc_ceb0", mt7623_nandc_ceb0),
+ PINCTRL_PIN_GROUP("nandc_ceb1", mt7623_nandc_ceb1),
+ PINCTRL_PIN_GROUP("otg_iddig0_0", mt7623_otg_iddig0_0),
+ PINCTRL_PIN_GROUP("otg_iddig0_1", mt7623_otg_iddig0_1),
+ PINCTRL_PIN_GROUP("otg_iddig0_2", mt7623_otg_iddig0_2),
+ PINCTRL_PIN_GROUP("otg_iddig1_0", mt7623_otg_iddig1_0),
+ PINCTRL_PIN_GROUP("otg_iddig1_1", mt7623_otg_iddig1_1),
+ PINCTRL_PIN_GROUP("otg_iddig1_2", mt7623_otg_iddig1_2),
+ PINCTRL_PIN_GROUP("otg_drv_vbus0_0", mt7623_otg_drv_vbus0_0),
+ PINCTRL_PIN_GROUP("otg_drv_vbus0_1", mt7623_otg_drv_vbus0_1),
+ PINCTRL_PIN_GROUP("otg_drv_vbus0_2", mt7623_otg_drv_vbus0_2),
+ PINCTRL_PIN_GROUP("otg_drv_vbus1_0", mt7623_otg_drv_vbus1_0),
+ PINCTRL_PIN_GROUP("otg_drv_vbus1_1", mt7623_otg_drv_vbus1_1),
+ PINCTRL_PIN_GROUP("otg_drv_vbus1_2", mt7623_otg_drv_vbus1_2),
+ PINCTRL_PIN_GROUP("pcie0_0_perst", mt7623_pcie0_0_perst),
+ PINCTRL_PIN_GROUP("pcie0_1_perst", mt7623_pcie0_1_perst),
+ PINCTRL_PIN_GROUP("pcie1_0_perst", mt7623_pcie1_0_perst),
+ PINCTRL_PIN_GROUP("pcie1_1_perst", mt7623_pcie1_1_perst),
+ PINCTRL_PIN_GROUP("pcie1_1_perst", mt7623_pcie1_1_perst),
+ PINCTRL_PIN_GROUP("pcie0_0_rev_perst", mt7623_pcie0_0_rev_perst),
+ PINCTRL_PIN_GROUP("pcie0_1_rev_perst", mt7623_pcie0_1_rev_perst),
+ PINCTRL_PIN_GROUP("pcie1_0_rev_perst", mt7623_pcie1_0_rev_perst),
+ PINCTRL_PIN_GROUP("pcie1_1_rev_perst", mt7623_pcie1_1_rev_perst),
+ PINCTRL_PIN_GROUP("pcie2_0_rev_perst", mt7623_pcie2_0_rev_perst),
+ PINCTRL_PIN_GROUP("pcie2_1_rev_perst", mt7623_pcie2_1_rev_perst),
+ PINCTRL_PIN_GROUP("pcie2_0_perst", mt7623_pcie2_0_perst),
+ PINCTRL_PIN_GROUP("pcie2_1_perst", mt7623_pcie2_1_perst),
+ PINCTRL_PIN_GROUP("pcie0_0_wake", mt7623_pcie0_0_wake),
+ PINCTRL_PIN_GROUP("pcie0_1_wake", mt7623_pcie0_1_wake),
+ PINCTRL_PIN_GROUP("pcie1_0_wake", mt7623_pcie1_0_wake),
+ PINCTRL_PIN_GROUP("pcie1_1_wake", mt7623_pcie1_1_wake),
+ PINCTRL_PIN_GROUP("pcie2_0_wake", mt7623_pcie2_0_wake),
+ PINCTRL_PIN_GROUP("pcie2_1_wake", mt7623_pcie2_1_wake),
+ PINCTRL_PIN_GROUP("pcie0_clkreq", mt7623_pcie0_clkreq),
+ PINCTRL_PIN_GROUP("pcie1_clkreq", mt7623_pcie1_clkreq),
+ PINCTRL_PIN_GROUP("pcie2_clkreq", mt7623_pcie2_clkreq),
+ PINCTRL_PIN_GROUP("pcm_clk_0", mt7623_pcm_clk_0),
+ PINCTRL_PIN_GROUP("pcm_clk_1", mt7623_pcm_clk_1),
+ PINCTRL_PIN_GROUP("pcm_clk_2", mt7623_pcm_clk_2),
+ PINCTRL_PIN_GROUP("pcm_clk_3", mt7623_pcm_clk_3),
+ PINCTRL_PIN_GROUP("pcm_clk_4", mt7623_pcm_clk_4),
+ PINCTRL_PIN_GROUP("pcm_clk_5", mt7623_pcm_clk_5),
+ PINCTRL_PIN_GROUP("pcm_clk_6", mt7623_pcm_clk_6),
+ PINCTRL_PIN_GROUP("pcm_sync_0", mt7623_pcm_sync_0),
+ PINCTRL_PIN_GROUP("pcm_sync_1", mt7623_pcm_sync_1),
+ PINCTRL_PIN_GROUP("pcm_sync_2", mt7623_pcm_sync_2),
+ PINCTRL_PIN_GROUP("pcm_sync_3", mt7623_pcm_sync_3),
+ PINCTRL_PIN_GROUP("pcm_sync_4", mt7623_pcm_sync_4),
+ PINCTRL_PIN_GROUP("pcm_sync_5", mt7623_pcm_sync_5),
+ PINCTRL_PIN_GROUP("pcm_sync_6", mt7623_pcm_sync_6),
+ PINCTRL_PIN_GROUP("pcm_rx_0", mt7623_pcm_rx_0),
+ PINCTRL_PIN_GROUP("pcm_rx_1", mt7623_pcm_rx_1),
+ PINCTRL_PIN_GROUP("pcm_rx_2", mt7623_pcm_rx_2),
+ PINCTRL_PIN_GROUP("pcm_rx_3", mt7623_pcm_rx_3),
+ PINCTRL_PIN_GROUP("pcm_rx_4", mt7623_pcm_rx_4),
+ PINCTRL_PIN_GROUP("pcm_rx_5", mt7623_pcm_rx_5),
+ PINCTRL_PIN_GROUP("pcm_rx_6", mt7623_pcm_rx_6),
+ PINCTRL_PIN_GROUP("pcm_tx_0", mt7623_pcm_tx_0),
+ PINCTRL_PIN_GROUP("pcm_tx_1", mt7623_pcm_tx_1),
+ PINCTRL_PIN_GROUP("pcm_tx_2", mt7623_pcm_tx_2),
+ PINCTRL_PIN_GROUP("pcm_tx_3", mt7623_pcm_tx_3),
+ PINCTRL_PIN_GROUP("pcm_tx_4", mt7623_pcm_tx_4),
+ PINCTRL_PIN_GROUP("pcm_tx_5", mt7623_pcm_tx_5),
+ PINCTRL_PIN_GROUP("pcm_tx_6", mt7623_pcm_tx_6),
+ PINCTRL_PIN_GROUP("pwm_ch1_0", mt7623_pwm_ch1_0),
+ PINCTRL_PIN_GROUP("pwm_ch1_1", mt7623_pwm_ch1_1),
+ PINCTRL_PIN_GROUP("pwm_ch1_2", mt7623_pwm_ch1_2),
+ PINCTRL_PIN_GROUP("pwm_ch1_3", mt7623_pwm_ch1_3),
+ PINCTRL_PIN_GROUP("pwm_ch1_4", mt7623_pwm_ch1_4),
+ PINCTRL_PIN_GROUP("pwm_ch2_0", mt7623_pwm_ch2_0),
+ PINCTRL_PIN_GROUP("pwm_ch2_1", mt7623_pwm_ch2_1),
+ PINCTRL_PIN_GROUP("pwm_ch2_2", mt7623_pwm_ch2_2),
+ PINCTRL_PIN_GROUP("pwm_ch2_3", mt7623_pwm_ch2_3),
+ PINCTRL_PIN_GROUP("pwm_ch2_4", mt7623_pwm_ch2_4),
+ PINCTRL_PIN_GROUP("pwm_ch3_0", mt7623_pwm_ch3_0),
+ PINCTRL_PIN_GROUP("pwm_ch3_1", mt7623_pwm_ch3_1),
+ PINCTRL_PIN_GROUP("pwm_ch3_2", mt7623_pwm_ch3_2),
+ PINCTRL_PIN_GROUP("pwm_ch3_3", mt7623_pwm_ch3_3),
+ PINCTRL_PIN_GROUP("pwm_ch4_0", mt7623_pwm_ch4_0),
+ PINCTRL_PIN_GROUP("pwm_ch4_1", mt7623_pwm_ch4_1),
+ PINCTRL_PIN_GROUP("pwm_ch4_2", mt7623_pwm_ch4_2),
+ PINCTRL_PIN_GROUP("pwm_ch4_3", mt7623_pwm_ch4_3),
+ PINCTRL_PIN_GROUP("pwm_ch5_0", mt7623_pwm_ch5_0),
+ PINCTRL_PIN_GROUP("pwm_ch5_1", mt7623_pwm_ch5_1),
+ PINCTRL_PIN_GROUP("pwrap", mt7623_pwrap),
+ PINCTRL_PIN_GROUP("rtc", mt7623_rtc),
+ PINCTRL_PIN_GROUP("spdif_in0_0", mt7623_spdif_in0_0),
+ PINCTRL_PIN_GROUP("spdif_in0_1", mt7623_spdif_in0_1),
+ PINCTRL_PIN_GROUP("spdif_in1_0", mt7623_spdif_in1_0),
+ PINCTRL_PIN_GROUP("spdif_in1_1", mt7623_spdif_in1_1),
+ PINCTRL_PIN_GROUP("spdif_out", mt7623_spdif_out),
+ PINCTRL_PIN_GROUP("spi0", mt7623_spi0),
+ PINCTRL_PIN_GROUP("spi1", mt7623_spi1),
+ PINCTRL_PIN_GROUP("spi2", mt7623_spi2),
+ PINCTRL_PIN_GROUP("uart0_0_txd_rxd", mt7623_uart0_0_txd_rxd),
+ PINCTRL_PIN_GROUP("uart0_1_txd_rxd", mt7623_uart0_1_txd_rxd),
+ PINCTRL_PIN_GROUP("uart0_2_txd_rxd", mt7623_uart0_2_txd_rxd),
+ PINCTRL_PIN_GROUP("uart0_3_txd_rxd", mt7623_uart0_3_txd_rxd),
+ PINCTRL_PIN_GROUP("uart1_0_txd_rxd", mt7623_uart1_0_txd_rxd),
+ PINCTRL_PIN_GROUP("uart1_1_txd_rxd", mt7623_uart1_1_txd_rxd),
+ PINCTRL_PIN_GROUP("uart1_2_txd_rxd", mt7623_uart1_2_txd_rxd),
+ PINCTRL_PIN_GROUP("uart2_0_txd_rxd", mt7623_uart2_0_txd_rxd),
+ PINCTRL_PIN_GROUP("uart2_1_txd_rxd", mt7623_uart2_1_txd_rxd),
+ PINCTRL_PIN_GROUP("uart3_txd_rxd", mt7623_uart3_txd_rxd),
+ PINCTRL_PIN_GROUP("uart0_rts_cts", mt7623_uart0_rts_cts),
+ PINCTRL_PIN_GROUP("uart1_rts_cts", mt7623_uart1_rts_cts),
+ PINCTRL_PIN_GROUP("uart2_rts_cts", mt7623_uart2_rts_cts),
+ PINCTRL_PIN_GROUP("uart3_rts_cts", mt7623_uart3_rts_cts),
+ PINCTRL_PIN_GROUP("watchdog_0", mt7623_watchdog_0),
+ PINCTRL_PIN_GROUP("watchdog_1", mt7623_watchdog_1),
+};
+
+/* Joint those groups owning the same capability in user point of view which
+ * allows that people tend to use through the device tree.
+ */
+static const char *mt7623_aud_clk_groups[] = { "aud_ext_clk0",
+ "aud_ext_clk1", };
+static const char *mt7623_disp_pwm_groups[] = { "disp_pwm_0", "disp_pwm_1",
+ "disp_pwm_2", };
+static const char *mt7623_ethernet_groups[] = { "esw_int", "esw_rst",
+ "ephy", "mdc_mdio", };
+static const char *mt7623_ext_sdio_groups[] = { "ext_sdio", };
+static const char *mt7623_hdmi_groups[] = { "hdmi_cec", "hdmi_htplg",
+ "hdmi_i2c", "hdmi_rx",
+ "hdmi_rx_i2c", };
+static const char *mt7623_i2c_groups[] = { "i2c0", "i2c1_0", "i2c1_1",
+ "i2c1_2", "i2c1_3", "i2c1_4",
+ "i2c2_0", "i2c2_1", "i2c2_2",
+ "i2c2_3", };
+static const char *mt7623_i2s_groups[] = { "i2s0", "i2s1",
+ "i2s2_bclk_lrclk_mclk",
+ "i2s3_bclk_lrclk_mclk",
+ "i2s4", "i2s5",
+ "i2s2_data_in", "i2s3_data_in",
+ "i2s2_data_0", "i2s2_data_1",
+ "i2s3_data_0", "i2s3_data_1", };
+static const char *mt7623_ir_groups[] = { "ir", };
+static const char *mt7623_lcd_groups[] = { "dsi_te", "lcm_rst", "mipi_tx", };
+static const char *mt7623_msdc_groups[] = { "msdc0", "msdc1", "msdc1_ins",
+ "msdc1_wp_0", "msdc1_wp_1",
+ "msdc1_wp_2", "msdc2",
+ "msdc3", };
+static const char *mt7623_nandc_groups[] = { "nandc", "nandc_ceb0",
+ "nandc_ceb1", };
+static const char *mt7623_otg_groups[] = { "otg_iddig0_0", "otg_iddig0_1",
+ "otg_iddig0_2", "otg_iddig1_0",
+ "otg_iddig1_1", "otg_iddig1_2",
+ "otg_drv_vbus0_0",
+ "otg_drv_vbus0_1",
+ "otg_drv_vbus0_2",
+ "otg_drv_vbus1_0",
+ "otg_drv_vbus1_1",
+ "otg_drv_vbus1_2", };
+static const char *mt7623_pcie_groups[] = { "pcie0_0_perst", "pcie0_1_perst",
+ "pcie1_0_perst", "pcie1_1_perst",
+ "pcie2_0_perst", "pcie2_1_perst",
+ "pcie0_0_rev_perst",
+ "pcie0_1_rev_perst",
+ "pcie1_0_rev_perst",
+ "pcie1_1_rev_perst",
+ "pcie2_0_rev_perst",
+ "pcie2_1_rev_perst",
+ "pcie0_0_wake", "pcie0_1_wake",
+ "pcie2_0_wake", "pcie2_1_wake",
+ "pcie0_clkreq", "pcie1_clkreq",
+ "pcie2_clkreq", };
+static const char *mt7623_pcm_groups[] = { "pcm_clk_0", "pcm_clk_1",
+ "pcm_clk_2", "pcm_clk_3",
+ "pcm_clk_4", "pcm_clk_5",
+ "pcm_clk_6", "pcm_sync_0",
+ "pcm_sync_1", "pcm_sync_2",
+ "pcm_sync_3", "pcm_sync_4",
+ "pcm_sync_5", "pcm_sync_6",
+ "pcm_rx_0", "pcm_rx_1",
+ "pcm_rx_2", "pcm_rx_3",
+ "pcm_rx_4", "pcm_rx_5",
+ "pcm_rx_6", "pcm_tx_0",
+ "pcm_tx_1", "pcm_tx_2",
+ "pcm_tx_3", "pcm_tx_4",
+ "pcm_tx_5", "pcm_tx_6", };
+static const char *mt7623_pwm_groups[] = { "pwm_ch1_0", "pwm_ch1_1",
+ "pwm_ch1_2", "pwm_ch2_0",
+ "pwm_ch2_1", "pwm_ch2_2",
+ "pwm_ch3_0", "pwm_ch3_1",
+ "pwm_ch3_2", "pwm_ch4_0",
+ "pwm_ch4_1", "pwm_ch4_2",
+ "pwm_ch4_3", "pwm_ch5_0",
+ "pwm_ch5_1", "pwm_ch5_2",
+ "pwm_ch6_0", "pwm_ch6_1",
+ "pwm_ch6_2", "pwm_ch6_3",
+ "pwm_ch7_0", "pwm_ch7_1",
+ "pwm_ch7_2", };
+static const char *mt7623_pwrap_groups[] = { "pwrap", };
+static const char *mt7623_rtc_groups[] = { "rtc", };
+static const char *mt7623_spi_groups[] = { "spi0", "spi2", "spi2", };
+static const char *mt7623_spdif_groups[] = { "spdif_in0_0", "spdif_in0_1",
+ "spdif_in1_0", "spdif_in1_1",
+ "spdif_out", };
+static const char *mt7623_uart_groups[] = { "uart0_0_txd_rxd",
+ "uart0_1_txd_rxd",
+ "uart0_2_txd_rxd",
+ "uart0_3_txd_rxd",
+ "uart1_0_txd_rxd",
+ "uart1_1_txd_rxd",
+ "uart1_2_txd_rxd",
+ "uart2_0_txd_rxd",
+ "uart2_1_txd_rxd",
+ "uart3_txd_rxd",
+ "uart0_rts_cts",
+ "uart1_rts_cts",
+ "uart2_rts_cts",
+ "uart3_rts_cts", };
+static const char *mt7623_wdt_groups[] = { "watchdog_0", "watchdog_1", };
+
+static const struct function_desc mt7623_functions[] = {
+ {"audck", mt7623_aud_clk_groups, ARRAY_SIZE(mt7623_aud_clk_groups)},
+ {"disp", mt7623_disp_pwm_groups, ARRAY_SIZE(mt7623_disp_pwm_groups)},
+ {"eth", mt7623_ethernet_groups, ARRAY_SIZE(mt7623_ethernet_groups)},
+ {"sdio", mt7623_ext_sdio_groups, ARRAY_SIZE(mt7623_ext_sdio_groups)},
+ {"hdmi", mt7623_hdmi_groups, ARRAY_SIZE(mt7623_hdmi_groups)},
+ {"i2c", mt7623_i2c_groups, ARRAY_SIZE(mt7623_i2c_groups)},
+ {"i2s", mt7623_i2s_groups, ARRAY_SIZE(mt7623_i2s_groups)},
+ {"ir", mt7623_ir_groups, ARRAY_SIZE(mt7623_ir_groups)},
+ {"lcd", mt7623_lcd_groups, ARRAY_SIZE(mt7623_lcd_groups)},
+ {"msdc", mt7623_msdc_groups, ARRAY_SIZE(mt7623_msdc_groups)},
+ {"nand", mt7623_nandc_groups, ARRAY_SIZE(mt7623_nandc_groups)},
+ {"otg", mt7623_otg_groups, ARRAY_SIZE(mt7623_otg_groups)},
+ {"pcie", mt7623_pcie_groups, ARRAY_SIZE(mt7623_pcie_groups)},
+ {"pcm", mt7623_pcm_groups, ARRAY_SIZE(mt7623_pcm_groups)},
+ {"pwm", mt7623_pwm_groups, ARRAY_SIZE(mt7623_pwm_groups)},
+ {"pwrap", mt7623_pwrap_groups, ARRAY_SIZE(mt7623_pwrap_groups)},
+ {"rtc", mt7623_rtc_groups, ARRAY_SIZE(mt7623_rtc_groups)},
+ {"spi", mt7623_spi_groups, ARRAY_SIZE(mt7623_spi_groups)},
+ {"spdif", mt7623_spdif_groups, ARRAY_SIZE(mt7623_spdif_groups)},
+ {"uart", mt7623_uart_groups, ARRAY_SIZE(mt7623_uart_groups)},
+ {"watchdog", mt7623_wdt_groups, ARRAY_SIZE(mt7623_wdt_groups)},
+};
+
+static const struct mtk_eint_hw mt7623_eint_hw = {
+ .port_mask = 6,
+ .ports = 6,
+ .ap_num = 169,
+ .db_cnt = 20,
+};
+
+static struct mtk_pin_soc mt7623_data = {
+ .reg_cal = mt7623_reg_cals,
+ .pins = mt7623_pins,
+ .npins = ARRAY_SIZE(mt7623_pins),
+ .grps = mt7623_groups,
+ .ngrps = ARRAY_SIZE(mt7623_groups),
+ .funcs = mt7623_functions,
+ .nfuncs = ARRAY_SIZE(mt7623_functions),
+ .eint_hw = &mt7623_eint_hw,
+ .gpio_m = 0,
+ .ies_present = true,
+ .base_names = mtk_default_register_base_names,
+ .nbase_names = ARRAY_SIZE(mtk_default_register_base_names),
+ .bias_disable_set = mtk_pinconf_bias_disable_set_rev1,
+ .bias_disable_get = mtk_pinconf_bias_disable_get_rev1,
+ .bias_set = mtk_pinconf_bias_set_rev1,
+ .bias_get = mtk_pinconf_bias_get_rev1,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_pull_get = mtk_pinconf_adv_pull_get,
+ .adv_pull_set = mtk_pinconf_adv_pull_set,
+};
+
+/*
+ * There are some specific pins have mux functions greater than 8,
+ * and if we want to switch thees high modes we need to disable
+ * bonding constraints firstly.
+ */
+static void mt7623_bonding_disable(struct platform_device *pdev)
+{
+ struct mtk_pinctrl *hw = platform_get_drvdata(pdev);
+
+ mtk_rmw(hw, 0, PIN_BOND_REG0, BOND_PCIE_CLR, BOND_PCIE_CLR);
+ mtk_rmw(hw, 0, PIN_BOND_REG1, BOND_I2S_CLR, BOND_I2S_CLR);
+ mtk_rmw(hw, 0, PIN_BOND_REG2, BOND_MSDC0E_CLR, BOND_MSDC0E_CLR);
+}
+
+static const struct of_device_id mt7623_pctrl_match[] = {
+ { .compatible = "mediatek,mt7623-moore-pinctrl", },
+ {}
+};
+
+static int mt7623_pinctrl_probe(struct platform_device *pdev)
+{
+ int err;
+
+ err = mtk_moore_pinctrl_probe(pdev, &mt7623_data);
+ if (err)
+ return err;
+
+ mt7623_bonding_disable(pdev);
+
+ return 0;
+}
+
+static struct platform_driver mtk_pinctrl_driver = {
+ .probe = mt7623_pinctrl_probe,
+ .driver = {
+ .name = "mt7623-moore-pinctrl",
+ .of_match_table = mt7623_pctrl_match,
+ },
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+ return platform_driver_register(&mtk_pinctrl_driver);
+}
+arch_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8183.c b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
new file mode 100644
index 000000000000..6262fd3678ea
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#include "pinctrl-mtk-mt8183.h"
+#include "pinctrl-paris.h"
+
+/* MT8183 have multiple bases to program pin configuration listed as the below:
+ * iocfg[0]:0x10005000, iocfg[1]:0x11F20000, iocfg[2]:0x11E80000,
+ * iocfg[3]:0x11E70000, iocfg[4]:0x11E90000, iocfg[5]:0x11D30000,
+ * iocfg[6]:0x11D20000, iocfg[7]:0x11C50000, iocfg[8]:0x11F30000.
+ * _i_based could be used to indicate what base the pin should be mapped into.
+ */
+
+#define PIN_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 0)
+
+#define PINS_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 1)
+
+static const struct mtk_pin_field_calc mt8183_pin_mode_range[] = {
+ PIN_FIELD(0, 192, 0x300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_dir_range[] = {
+ PIN_FIELD(0, 192, 0x0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_di_range[] = {
+ PIN_FIELD(0, 192, 0x200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_do_range[] = {
+ PIN_FIELD(0, 192, 0x100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_ies_range[] = {
+ PINS_FIELD_BASE(0, 3, 6, 0x000, 0x10, 3, 1),
+ PINS_FIELD_BASE(4, 7, 6, 0x000, 0x10, 5, 1),
+ PIN_FIELD_BASE(8, 8, 6, 0x000, 0x10, 0, 1),
+ PINS_FIELD_BASE(9, 10, 6, 0x000, 0x10, 12, 1),
+ PIN_FIELD_BASE(11, 11, 1, 0x000, 0x10, 3, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x000, 0x10, 7, 1),
+ PINS_FIELD_BASE(13, 16, 2, 0x000, 0x10, 2, 1),
+ PINS_FIELD_BASE(17, 20, 2, 0x000, 0x10, 3, 1),
+ PINS_FIELD_BASE(21, 24, 2, 0x000, 0x10, 4, 1),
+ PINS_FIELD_BASE(25, 28, 2, 0x000, 0x10, 5, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x000, 0x10, 6, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x000, 0x10, 7, 1),
+ PINS_FIELD_BASE(31, 31, 2, 0x000, 0x10, 8, 1),
+ PINS_FIELD_BASE(32, 34, 2, 0x000, 0x10, 7, 1),
+ PINS_FIELD_BASE(35, 37, 3, 0x000, 0x10, 0, 1),
+ PINS_FIELD_BASE(38, 40, 3, 0x000, 0x10, 1, 1),
+ PINS_FIELD_BASE(41, 42, 3, 0x000, 0x10, 2, 1),
+ PINS_FIELD_BASE(43, 45, 3, 0x000, 0x10, 3, 1),
+ PINS_FIELD_BASE(46, 47, 3, 0x000, 0x10, 4, 1),
+ PINS_FIELD_BASE(48, 49, 3, 0x000, 0x10, 5, 1),
+ PINS_FIELD_BASE(50, 51, 4, 0x000, 0x10, 0, 1),
+ PINS_FIELD_BASE(52, 57, 4, 0x000, 0x10, 1, 1),
+ PINS_FIELD_BASE(58, 60, 4, 0x000, 0x10, 2, 1),
+ PINS_FIELD_BASE(61, 64, 5, 0x000, 0x10, 0, 1),
+ PINS_FIELD_BASE(65, 66, 5, 0x000, 0x10, 1, 1),
+ PINS_FIELD_BASE(67, 68, 5, 0x000, 0x10, 2, 1),
+ PINS_FIELD_BASE(69, 71, 5, 0x000, 0x10, 3, 1),
+ PINS_FIELD_BASE(72, 76, 5, 0x000, 0x10, 4, 1),
+ PINS_FIELD_BASE(77, 80, 5, 0x000, 0x10, 5, 1),
+ PIN_FIELD_BASE(81, 81, 5, 0x000, 0x10, 6, 1),
+ PINS_FIELD_BASE(82, 83, 5, 0x000, 0x10, 7, 1),
+ PIN_FIELD_BASE(84, 84, 5, 0x000, 0x10, 6, 1),
+ PINS_FIELD_BASE(85, 88, 5, 0x000, 0x10, 8, 1),
+ PIN_FIELD_BASE(89, 89, 6, 0x000, 0x10, 11, 1),
+ PIN_FIELD_BASE(90, 90, 6, 0x000, 0x10, 1, 1),
+ PINS_FIELD_BASE(91, 94, 6, 0x000, 0x10, 2, 1),
+ PINS_FIELD_BASE(95, 96, 6, 0x000, 0x10, 6, 1),
+ PINS_FIELD_BASE(97, 98, 6, 0x000, 0x10, 7, 1),
+ PIN_FIELD_BASE(99, 99, 6, 0x000, 0x10, 8, 1),
+ PIN_FIELD_BASE(100, 100, 6, 0x000, 0x10, 9, 1),
+ PINS_FIELD_BASE(101, 102, 6, 0x000, 0x10, 10, 1),
+ PINS_FIELD_BASE(103, 104, 6, 0x000, 0x10, 13, 1),
+ PINS_FIELD_BASE(105, 106, 6, 0x000, 0x10, 14, 1),
+ PIN_FIELD_BASE(107, 107, 7, 0x000, 0x10, 0, 1),
+ PIN_FIELD_BASE(108, 108, 7, 0x000, 0x10, 1, 1),
+ PIN_FIELD_BASE(109, 109, 7, 0x000, 0x10, 2, 1),
+ PIN_FIELD_BASE(110, 110, 7, 0x000, 0x10, 0, 1),
+ PIN_FIELD_BASE(111, 111, 7, 0x000, 0x10, 3, 1),
+ PIN_FIELD_BASE(112, 112, 7, 0x000, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 7, 0x000, 0x10, 4, 1),
+ PIN_FIELD_BASE(114, 114, 7, 0x000, 0x10, 5, 1),
+ PIN_FIELD_BASE(115, 115, 7, 0x000, 0x10, 6, 1),
+ PIN_FIELD_BASE(116, 116, 7, 0x000, 0x10, 7, 1),
+ PIN_FIELD_BASE(117, 117, 7, 0x000, 0x10, 8, 1),
+ PIN_FIELD_BASE(118, 118, 7, 0x000, 0x10, 9, 1),
+ PIN_FIELD_BASE(119, 119, 7, 0x000, 0x10, 10, 1),
+ PIN_FIELD_BASE(120, 120, 7, 0x000, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 7, 0x000, 0x10, 12, 1),
+ PIN_FIELD_BASE(122, 122, 8, 0x000, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 8, 0x000, 0x10, 1, 1),
+ PIN_FIELD_BASE(124, 124, 8, 0x000, 0x10, 2, 1),
+ PINS_FIELD_BASE(125, 130, 8, 0x000, 0x10, 1, 1),
+ PIN_FIELD_BASE(131, 131, 8, 0x000, 0x10, 3, 1),
+ PIN_FIELD_BASE(132, 132, 8, 0x000, 0x10, 1, 1),
+ PIN_FIELD_BASE(133, 133, 8, 0x000, 0x10, 4, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x000, 0x10, 0, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x000, 0x10, 1, 1),
+ PINS_FIELD_BASE(136, 143, 1, 0x000, 0x10, 2, 1),
+ PINS_FIELD_BASE(144, 147, 1, 0x000, 0x10, 4, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x000, 0x10, 5, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x000, 0x10, 6, 1),
+ PINS_FIELD_BASE(150, 153, 1, 0x000, 0x10, 8, 1),
+ PIN_FIELD_BASE(154, 154, 1, 0x000, 0x10, 9, 1),
+ PINS_FIELD_BASE(155, 157, 1, 0x000, 0x10, 10, 1),
+ PINS_FIELD_BASE(158, 160, 1, 0x000, 0x10, 8, 1),
+ PINS_FIELD_BASE(161, 164, 2, 0x000, 0x10, 0, 1),
+ PINS_FIELD_BASE(165, 166, 2, 0x000, 0x10, 1, 1),
+ PINS_FIELD_BASE(167, 168, 4, 0x000, 0x10, 2, 1),
+ PIN_FIELD_BASE(169, 169, 4, 0x000, 0x10, 3, 1),
+ PINS_FIELD_BASE(170, 174, 4, 0x000, 0x10, 4, 1),
+ PINS_FIELD_BASE(175, 176, 4, 0x000, 0x10, 3, 1),
+ PINS_FIELD_BASE(177, 179, 6, 0x000, 0x10, 4, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_smt_range[] = {
+ PINS_FIELD_BASE(0, 3, 6, 0x010, 0x10, 3, 1),
+ PINS_FIELD_BASE(4, 7, 6, 0x010, 0x10, 5, 1),
+ PIN_FIELD_BASE(8, 8, 6, 0x010, 0x10, 0, 1),
+ PINS_FIELD_BASE(9, 10, 6, 0x010, 0x10, 12, 1),
+ PIN_FIELD_BASE(11, 11, 1, 0x010, 0x10, 3, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x010, 0x10, 7, 1),
+ PINS_FIELD_BASE(13, 16, 2, 0x010, 0x10, 2, 1),
+ PINS_FIELD_BASE(17, 20, 2, 0x010, 0x10, 3, 1),
+ PINS_FIELD_BASE(21, 24, 2, 0x010, 0x10, 4, 1),
+ PINS_FIELD_BASE(25, 28, 2, 0x010, 0x10, 5, 1),
+ PIN_FIELD_BASE(29, 29, 2, 0x010, 0x10, 6, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x010, 0x10, 7, 1),
+ PINS_FIELD_BASE(31, 31, 2, 0x010, 0x10, 8, 1),
+ PINS_FIELD_BASE(32, 34, 2, 0x010, 0x10, 7, 1),
+ PINS_FIELD_BASE(35, 37, 3, 0x010, 0x10, 0, 1),
+ PINS_FIELD_BASE(38, 40, 3, 0x010, 0x10, 1, 1),
+ PINS_FIELD_BASE(41, 42, 3, 0x010, 0x10, 2, 1),
+ PINS_FIELD_BASE(43, 45, 3, 0x010, 0x10, 3, 1),
+ PINS_FIELD_BASE(46, 47, 3, 0x010, 0x10, 4, 1),
+ PINS_FIELD_BASE(48, 49, 3, 0x010, 0x10, 5, 1),
+ PINS_FIELD_BASE(50, 51, 4, 0x010, 0x10, 0, 1),
+ PINS_FIELD_BASE(52, 57, 4, 0x010, 0x10, 1, 1),
+ PINS_FIELD_BASE(58, 60, 4, 0x010, 0x10, 2, 1),
+ PINS_FIELD_BASE(61, 64, 5, 0x010, 0x10, 0, 1),
+ PINS_FIELD_BASE(65, 66, 5, 0x010, 0x10, 1, 1),
+ PINS_FIELD_BASE(67, 68, 5, 0x010, 0x10, 2, 1),
+ PINS_FIELD_BASE(69, 71, 5, 0x010, 0x10, 3, 1),
+ PINS_FIELD_BASE(72, 76, 5, 0x010, 0x10, 4, 1),
+ PINS_FIELD_BASE(77, 80, 5, 0x010, 0x10, 5, 1),
+ PIN_FIELD_BASE(81, 81, 5, 0x010, 0x10, 6, 1),
+ PINS_FIELD_BASE(82, 83, 5, 0x010, 0x10, 7, 1),
+ PIN_FIELD_BASE(84, 84, 5, 0x010, 0x10, 6, 1),
+ PINS_FIELD_BASE(85, 88, 5, 0x010, 0x10, 8, 1),
+ PIN_FIELD_BASE(89, 89, 6, 0x010, 0x10, 11, 1),
+ PIN_FIELD_BASE(90, 90, 6, 0x010, 0x10, 1, 1),
+ PINS_FIELD_BASE(91, 94, 6, 0x010, 0x10, 2, 1),
+ PINS_FIELD_BASE(95, 96, 6, 0x010, 0x10, 6, 1),
+ PINS_FIELD_BASE(97, 98, 6, 0x010, 0x10, 7, 1),
+ PIN_FIELD_BASE(99, 99, 6, 0x010, 0x10, 8, 1),
+ PIN_FIELD_BASE(100, 100, 6, 0x010, 0x10, 9, 1),
+ PINS_FIELD_BASE(101, 102, 6, 0x010, 0x10, 10, 1),
+ PINS_FIELD_BASE(103, 104, 6, 0x010, 0x10, 13, 1),
+ PINS_FIELD_BASE(105, 106, 6, 0x010, 0x10, 14, 1),
+ PIN_FIELD_BASE(107, 107, 7, 0x010, 0x10, 0, 1),
+ PIN_FIELD_BASE(108, 108, 7, 0x010, 0x10, 1, 1),
+ PIN_FIELD_BASE(109, 109, 7, 0x010, 0x10, 2, 1),
+ PIN_FIELD_BASE(110, 110, 7, 0x010, 0x10, 0, 1),
+ PIN_FIELD_BASE(111, 111, 7, 0x010, 0x10, 3, 1),
+ PIN_FIELD_BASE(112, 112, 7, 0x010, 0x10, 2, 1),
+ PIN_FIELD_BASE(113, 113, 7, 0x010, 0x10, 4, 1),
+ PIN_FIELD_BASE(114, 114, 7, 0x010, 0x10, 5, 1),
+ PIN_FIELD_BASE(115, 115, 7, 0x010, 0x10, 6, 1),
+ PIN_FIELD_BASE(116, 116, 7, 0x010, 0x10, 7, 1),
+ PIN_FIELD_BASE(117, 117, 7, 0x010, 0x10, 8, 1),
+ PIN_FIELD_BASE(118, 118, 7, 0x010, 0x10, 9, 1),
+ PIN_FIELD_BASE(119, 119, 7, 0x010, 0x10, 10, 1),
+ PIN_FIELD_BASE(120, 120, 7, 0x010, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 7, 0x010, 0x10, 12, 1),
+ PIN_FIELD_BASE(122, 122, 8, 0x010, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 8, 0x010, 0x10, 1, 1),
+ PIN_FIELD_BASE(124, 124, 8, 0x010, 0x10, 2, 1),
+ PINS_FIELD_BASE(125, 130, 8, 0x010, 0x10, 1, 1),
+ PIN_FIELD_BASE(131, 131, 8, 0x010, 0x10, 3, 1),
+ PIN_FIELD_BASE(132, 132, 8, 0x010, 0x10, 1, 1),
+ PIN_FIELD_BASE(133, 133, 8, 0x010, 0x10, 4, 1),
+ PIN_FIELD_BASE(134, 134, 1, 0x010, 0x10, 0, 1),
+ PIN_FIELD_BASE(135, 135, 1, 0x010, 0x10, 1, 1),
+ PINS_FIELD_BASE(136, 143, 1, 0x010, 0x10, 2, 1),
+ PINS_FIELD_BASE(144, 147, 1, 0x010, 0x10, 4, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x010, 0x10, 5, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x010, 0x10, 6, 1),
+ PINS_FIELD_BASE(150, 153, 1, 0x010, 0x10, 8, 1),
+ PIN_FIELD_BASE(154, 154, 1, 0x010, 0x10, 9, 1),
+ PINS_FIELD_BASE(155, 157, 1, 0x010, 0x10, 10, 1),
+ PINS_FIELD_BASE(158, 160, 1, 0x010, 0x10, 8, 1),
+ PINS_FIELD_BASE(161, 164, 2, 0x010, 0x10, 0, 1),
+ PINS_FIELD_BASE(165, 166, 2, 0x010, 0x10, 1, 1),
+ PINS_FIELD_BASE(167, 168, 4, 0x010, 0x10, 2, 1),
+ PIN_FIELD_BASE(169, 169, 4, 0x010, 0x10, 3, 1),
+ PINS_FIELD_BASE(170, 174, 4, 0x010, 0x10, 4, 1),
+ PINS_FIELD_BASE(175, 176, 4, 0x010, 0x10, 3, 1),
+ PINS_FIELD_BASE(177, 179, 6, 0x010, 0x10, 4, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_pullen_range[] = {
+ PIN_FIELD_BASE(0, 3, 6, 0x060, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 7, 6, 0x060, 0x10, 11, 1),
+ PIN_FIELD_BASE(8, 8, 6, 0x060, 0x10, 0, 1),
+ PIN_FIELD_BASE(9, 10, 6, 0x060, 0x10, 26, 1),
+ PIN_FIELD_BASE(11, 11, 1, 0x060, 0x10, 10, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x060, 0x10, 17, 1),
+ PIN_FIELD_BASE(13, 28, 2, 0x060, 0x10, 6, 1),
+ PIN_FIELD_BASE(43, 49, 3, 0x060, 0x10, 8, 1),
+ PIN_FIELD_BASE(50, 60, 4, 0x060, 0x10, 0, 1),
+ PIN_FIELD_BASE(61, 88, 5, 0x060, 0x10, 0, 1),
+ PIN_FIELD_BASE(89, 89, 6, 0x060, 0x10, 24, 1),
+ PIN_FIELD_BASE(90, 90, 6, 0x060, 0x10, 1, 1),
+ PIN_FIELD_BASE(95, 95, 6, 0x060, 0x10, 15, 1),
+ PIN_FIELD_BASE(96, 102, 6, 0x060, 0x10, 17, 1),
+ PIN_FIELD_BASE(103, 106, 6, 0x060, 0x10, 28, 1),
+ PIN_FIELD_BASE(107, 121, 7, 0x060, 0x10, 0, 1),
+ PIN_FIELD_BASE(134, 143, 1, 0x060, 0x10, 0, 1),
+ PIN_FIELD_BASE(144, 149, 1, 0x060, 0x10, 11, 1),
+ PIN_FIELD_BASE(150, 160, 1, 0x060, 0x10, 18, 1),
+ PIN_FIELD_BASE(161, 166, 2, 0x060, 0x10, 0, 1),
+ PIN_FIELD_BASE(167, 176, 4, 0x060, 0x10, 11, 1),
+ PIN_FIELD_BASE(177, 177, 6, 0x060, 0x10, 10, 1),
+ PIN_FIELD_BASE(178, 178, 6, 0x060, 0x10, 16, 1),
+ PIN_FIELD_BASE(179, 179, 6, 0x060, 0x10, 25, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_pullsel_range[] = {
+ PIN_FIELD_BASE(0, 3, 6, 0x080, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 7, 6, 0x080, 0x10, 11, 1),
+ PIN_FIELD_BASE(8, 8, 6, 0x080, 0x10, 0, 1),
+ PIN_FIELD_BASE(9, 10, 6, 0x080, 0x10, 26, 1),
+ PIN_FIELD_BASE(11, 11, 1, 0x080, 0x10, 10, 1),
+ PIN_FIELD_BASE(12, 12, 1, 0x080, 0x10, 17, 1),
+ PIN_FIELD_BASE(13, 28, 2, 0x080, 0x10, 6, 1),
+ PIN_FIELD_BASE(43, 49, 3, 0x080, 0x10, 8, 1),
+ PIN_FIELD_BASE(50, 60, 4, 0x080, 0x10, 0, 1),
+ PIN_FIELD_BASE(61, 88, 5, 0x080, 0x10, 0, 1),
+ PIN_FIELD_BASE(89, 89, 6, 0x080, 0x10, 24, 1),
+ PIN_FIELD_BASE(90, 90, 6, 0x080, 0x10, 1, 1),
+ PIN_FIELD_BASE(95, 95, 6, 0x080, 0x10, 15, 1),
+ PIN_FIELD_BASE(96, 102, 6, 0x080, 0x10, 17, 1),
+ PIN_FIELD_BASE(103, 106, 6, 0x080, 0x10, 28, 1),
+ PIN_FIELD_BASE(107, 121, 7, 0x080, 0x10, 0, 1),
+ PIN_FIELD_BASE(134, 143, 1, 0x080, 0x10, 0, 1),
+ PIN_FIELD_BASE(144, 149, 1, 0x080, 0x10, 11, 1),
+ PIN_FIELD_BASE(150, 160, 1, 0x080, 0x10, 18, 1),
+ PIN_FIELD_BASE(161, 166, 2, 0x080, 0x10, 0, 1),
+ PIN_FIELD_BASE(167, 176, 4, 0x080, 0x10, 11, 1),
+ PIN_FIELD_BASE(177, 177, 6, 0x080, 0x10, 10, 1),
+ PIN_FIELD_BASE(178, 178, 6, 0x080, 0x10, 16, 1),
+ PIN_FIELD_BASE(179, 179, 6, 0x080, 0x10, 25, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_drv_range[] = {
+ PINS_FIELD_BASE(0, 3, 6, 0x0A0, 0x10, 12, 3),
+ PINS_FIELD_BASE(4, 7, 6, 0x0A0, 0x10, 20, 3),
+ PIN_FIELD_BASE(8, 8, 6, 0x0A0, 0x10, 0, 3),
+ PINS_FIELD_BASE(9, 10, 6, 0x0B0, 0x10, 16, 3),
+ PIN_FIELD_BASE(11, 11, 1, 0x0A0, 0x10, 12, 3),
+ PIN_FIELD_BASE(12, 12, 1, 0x0A0, 0x10, 28, 3),
+ PINS_FIELD_BASE(13, 16, 2, 0x0A0, 0x10, 8, 3),
+ PINS_FIELD_BASE(17, 20, 2, 0x0A0, 0x10, 12, 3),
+ PINS_FIELD_BASE(21, 24, 2, 0x0A0, 0x10, 16, 3),
+ PINS_FIELD_BASE(25, 28, 2, 0x0A0, 0x10, 20, 3),
+ PIN_FIELD_BASE(29, 29, 2, 0x0A0, 0x10, 24, 3),
+ PIN_FIELD_BASE(30, 30, 2, 0x0A0, 0x10, 28, 3),
+ PINS_FIELD_BASE(31, 31, 2, 0x0B0, 0x10, 0, 3),
+ PINS_FIELD_BASE(32, 34, 2, 0x0A0, 0x10, 28, 3),
+ PINS_FIELD_BASE(35, 37, 3, 0x0A0, 0x10, 0, 3),
+ PINS_FIELD_BASE(38, 40, 3, 0x0A0, 0x10, 4, 3),
+ PINS_FIELD_BASE(41, 42, 3, 0x0A0, 0x10, 8, 3),
+ PINS_FIELD_BASE(43, 45, 3, 0x0A0, 0x10, 12, 3),
+ PINS_FIELD_BASE(46, 47, 3, 0x0A0, 0x10, 16, 3),
+ PINS_FIELD_BASE(48, 49, 3, 0x0A0, 0x10, 20, 3),
+ PINS_FIELD_BASE(50, 51, 4, 0x0A0, 0x10, 0, 3),
+ PINS_FIELD_BASE(52, 57, 4, 0x0A0, 0x10, 4, 3),
+ PINS_FIELD_BASE(58, 60, 4, 0x0A0, 0x10, 8, 3),
+ PINS_FIELD_BASE(61, 64, 5, 0x0A0, 0x10, 0, 3),
+ PINS_FIELD_BASE(65, 66, 5, 0x0A0, 0x10, 4, 3),
+ PINS_FIELD_BASE(67, 68, 5, 0x0A0, 0x10, 8, 3),
+ PINS_FIELD_BASE(69, 71, 5, 0x0A0, 0x10, 12, 3),
+ PINS_FIELD_BASE(72, 76, 5, 0x0A0, 0x10, 16, 3),
+ PINS_FIELD_BASE(77, 80, 5, 0x0A0, 0x10, 20, 3),
+ PIN_FIELD_BASE(81, 81, 5, 0x0A0, 0x10, 24, 3),
+ PINS_FIELD_BASE(82, 83, 5, 0x0A0, 0x10, 28, 3),
+ PIN_FIELD_BASE(84, 84, 5, 0x0A0, 0x10, 24, 3),
+ PINS_FIELD_BASE(85, 88, 5, 0x0B0, 0x10, 0, 3),
+ PIN_FIELD_BASE(89, 89, 6, 0x0B0, 0x10, 12, 3),
+ PIN_FIELD_BASE(90, 90, 6, 0x0A0, 0x10, 4, 3),
+ PINS_FIELD_BASE(91, 94, 6, 0x0A0, 0x10, 8, 3),
+ PINS_FIELD_BASE(95, 96, 6, 0x0A0, 0x10, 24, 3),
+ PINS_FIELD_BASE(97, 98, 6, 0x0A0, 0x10, 28, 3),
+ PIN_FIELD_BASE(99, 99, 6, 0x0B0, 0x10, 0, 3),
+ PIN_FIELD_BASE(100, 100, 6, 0x0B0, 0x10, 4, 3),
+ PINS_FIELD_BASE(101, 102, 6, 0x0B0, 0x10, 8, 3),
+ PINS_FIELD_BASE(103, 104, 6, 0x0B0, 0x10, 20, 3),
+ PINS_FIELD_BASE(105, 106, 6, 0x0B0, 0x10, 24, 3),
+ PIN_FIELD_BASE(107, 107, 7, 0x0A0, 0x10, 0, 3),
+ PIN_FIELD_BASE(108, 108, 7, 0x0A0, 0x10, 4, 3),
+ PIN_FIELD_BASE(109, 109, 7, 0x0A0, 0x10, 8, 3),
+ PIN_FIELD_BASE(110, 110, 7, 0x0A0, 0x10, 0, 3),
+ PIN_FIELD_BASE(111, 111, 7, 0x0A0, 0x10, 4, 3),
+ PIN_FIELD_BASE(112, 112, 7, 0x0A0, 0x10, 8, 3),
+ PIN_FIELD_BASE(113, 113, 7, 0x0A0, 0x10, 16, 3),
+ PIN_FIELD_BASE(114, 114, 7, 0x0A0, 0x10, 20, 3),
+ PIN_FIELD_BASE(115, 115, 7, 0x0A0, 0x10, 24, 3),
+ PIN_FIELD_BASE(116, 116, 7, 0x0A0, 0x10, 28, 3),
+ PIN_FIELD_BASE(117, 117, 7, 0x0B0, 0x10, 0, 3),
+ PIN_FIELD_BASE(118, 118, 7, 0x0B0, 0x10, 4, 3),
+ PIN_FIELD_BASE(119, 119, 7, 0x0B0, 0x10, 8, 3),
+ PIN_FIELD_BASE(120, 120, 7, 0x0B0, 0x10, 12, 3),
+ PIN_FIELD_BASE(121, 121, 7, 0x0B0, 0x10, 16, 3),
+ PIN_FIELD_BASE(122, 122, 8, 0x0A0, 0x10, 0, 3),
+ PIN_FIELD_BASE(123, 123, 8, 0x0A0, 0x10, 4, 3),
+ PIN_FIELD_BASE(124, 124, 8, 0x0A0, 0x10, 8, 3),
+ PINS_FIELD_BASE(125, 130, 8, 0x0A0, 0x10, 4, 3),
+ PIN_FIELD_BASE(131, 131, 8, 0x0A0, 0x10, 12, 3),
+ PIN_FIELD_BASE(132, 132, 8, 0x0A0, 0x10, 4, 3),
+ PIN_FIELD_BASE(133, 133, 8, 0x0A0, 0x10, 16, 3),
+ PIN_FIELD_BASE(134, 134, 1, 0x0A0, 0x10, 0, 3),
+ PIN_FIELD_BASE(135, 135, 1, 0x0A0, 0x10, 4, 3),
+ PINS_FIELD_BASE(136, 143, 1, 0x0A0, 0x10, 8, 3),
+ PINS_FIELD_BASE(144, 147, 1, 0x0A0, 0x10, 16, 3),
+ PIN_FIELD_BASE(148, 148, 1, 0x0A0, 0x10, 20, 3),
+ PIN_FIELD_BASE(149, 149, 1, 0x0A0, 0x10, 24, 3),
+ PINS_FIELD_BASE(150, 153, 1, 0x0B0, 0x10, 0, 3),
+ PIN_FIELD_BASE(154, 154, 1, 0x0B0, 0x10, 4, 3),
+ PINS_FIELD_BASE(155, 157, 1, 0x0B0, 0x10, 8, 3),
+ PINS_FIELD_BASE(158, 160, 1, 0x0B0, 0x10, 0, 3),
+ PINS_FIELD_BASE(161, 164, 2, 0x0A0, 0x10, 0, 3),
+ PINS_FIELD_BASE(165, 166, 2, 0x0A0, 0x10, 4, 3),
+ PINS_FIELD_BASE(167, 168, 4, 0x0A0, 0x10, 8, 3),
+ PIN_FIELD_BASE(169, 169, 4, 0x0A0, 0x10, 12, 3),
+ PINS_FIELD_BASE(170, 174, 4, 0x0A0, 0x10, 16, 3),
+ PINS_FIELD_BASE(175, 176, 4, 0x0A0, 0x10, 12, 3),
+ PINS_FIELD_BASE(177, 179, 6, 0x0A0, 0x10, 16, 3),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_pupd_range[] = {
+ PIN_FIELD_BASE(29, 29, 2, 0x0C0, 0x10, 2, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0C0, 0x10, 6, 1),
+ PIN_FIELD_BASE(31, 31, 2, 0x0C0, 0x10, 10, 1),
+ PIN_FIELD_BASE(32, 32, 2, 0x0C0, 0x10, 14, 1),
+ PIN_FIELD_BASE(33, 33, 2, 0x0C0, 0x10, 18, 1),
+ PIN_FIELD_BASE(34, 34, 2, 0x0C0, 0x10, 22, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x0C0, 0x10, 2, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x0C0, 0x10, 6, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x0C0, 0x10, 10, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x0C0, 0x10, 14, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x0C0, 0x10, 18, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x0C0, 0x10, 22, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x0C0, 0x10, 26, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x0C0, 0x10, 30, 1),
+ PIN_FIELD_BASE(91, 91, 6, 0x0C0, 0x10, 2, 1),
+ PIN_FIELD_BASE(92, 92, 6, 0x0C0, 0x10, 6, 1),
+ PIN_FIELD_BASE(93, 93, 6, 0x0C0, 0x10, 10, 1),
+ PIN_FIELD_BASE(94, 94, 6, 0x0C0, 0x10, 14, 1),
+ PIN_FIELD_BASE(122, 122, 8, 0x0C0, 0x10, 2, 1),
+ PIN_FIELD_BASE(123, 123, 8, 0x0C0, 0x10, 6, 1),
+ PIN_FIELD_BASE(124, 124, 8, 0x0C0, 0x10, 10, 1),
+ PIN_FIELD_BASE(125, 125, 8, 0x0C0, 0x10, 14, 1),
+ PIN_FIELD_BASE(126, 126, 8, 0x0C0, 0x10, 18, 1),
+ PIN_FIELD_BASE(127, 127, 8, 0x0C0, 0x10, 22, 1),
+ PIN_FIELD_BASE(128, 128, 8, 0x0C0, 0x10, 26, 1),
+ PIN_FIELD_BASE(129, 129, 8, 0x0C0, 0x10, 30, 1),
+ PIN_FIELD_BASE(130, 130, 8, 0x0D0, 0x10, 2, 1),
+ PIN_FIELD_BASE(131, 131, 8, 0x0D0, 0x10, 6, 1),
+ PIN_FIELD_BASE(132, 132, 8, 0x0D0, 0x10, 10, 1),
+ PIN_FIELD_BASE(133, 133, 8, 0x0D0, 0x10, 14, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_r0_range[] = {
+ PIN_FIELD_BASE(29, 29, 2, 0x0C0, 0x10, 0, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0C0, 0x10, 4, 1),
+ PIN_FIELD_BASE(31, 31, 2, 0x0C0, 0x10, 8, 1),
+ PIN_FIELD_BASE(32, 32, 2, 0x0C0, 0x10, 12, 1),
+ PIN_FIELD_BASE(33, 33, 2, 0x0C0, 0x10, 16, 1),
+ PIN_FIELD_BASE(34, 34, 2, 0x0C0, 0x10, 20, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x0C0, 0x10, 0, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x0C0, 0x10, 4, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x0C0, 0x10, 8, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x0C0, 0x10, 12, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x0C0, 0x10, 16, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x0C0, 0x10, 20, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x0C0, 0x10, 24, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x0C0, 0x10, 28, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x0F0, 0x10, 18, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x0F0, 0x10, 13, 1),
+ PIN_FIELD_BASE(50, 50, 4, 0x0F0, 0x10, 10, 1),
+ PIN_FIELD_BASE(51, 51, 4, 0x0F0, 0x10, 5, 1),
+ PIN_FIELD_BASE(81, 81, 5, 0x0F0, 0x10, 7, 1),
+ PIN_FIELD_BASE(82, 82, 5, 0x0F0, 0x10, 5, 1),
+ PIN_FIELD_BASE(83, 83, 5, 0x0F0, 0x10, 15, 1),
+ PIN_FIELD_BASE(84, 84, 5, 0x0F0, 0x10, 17, 1),
+ PIN_FIELD_BASE(91, 91, 6, 0x0C0, 0x10, 0, 1),
+ PIN_FIELD_BASE(92, 92, 6, 0x0C0, 0x10, 4, 1),
+ PIN_FIELD_BASE(93, 93, 6, 0x0C0, 0x10, 8, 1),
+ PIN_FIELD_BASE(94, 94, 6, 0x0C0, 0x10, 12, 1),
+ PIN_FIELD_BASE(103, 103, 6, 0x0F0, 0x10, 20, 1),
+ PIN_FIELD_BASE(104, 104, 6, 0x0F0, 0x10, 10, 1),
+ PIN_FIELD_BASE(105, 105, 6, 0x0F0, 0x10, 22, 1),
+ PIN_FIELD_BASE(106, 106, 6, 0x0F0, 0x10, 12, 1),
+ PIN_FIELD_BASE(122, 122, 8, 0x0C0, 0x10, 0, 1),
+ PIN_FIELD_BASE(123, 123, 8, 0x0C0, 0x10, 4, 1),
+ PIN_FIELD_BASE(124, 124, 8, 0x0C0, 0x10, 8, 1),
+ PIN_FIELD_BASE(125, 125, 8, 0x0C0, 0x10, 12, 1),
+ PIN_FIELD_BASE(126, 126, 8, 0x0C0, 0x10, 16, 1),
+ PIN_FIELD_BASE(127, 127, 8, 0x0C0, 0x10, 20, 1),
+ PIN_FIELD_BASE(128, 128, 8, 0x0C0, 0x10, 24, 1),
+ PIN_FIELD_BASE(129, 129, 8, 0x0C0, 0x10, 28, 1),
+ PIN_FIELD_BASE(130, 130, 8, 0x0D0, 0x10, 0, 1),
+ PIN_FIELD_BASE(131, 131, 8, 0x0D0, 0x10, 4, 1),
+ PIN_FIELD_BASE(132, 132, 8, 0x0D0, 0x10, 8, 1),
+ PIN_FIELD_BASE(133, 133, 8, 0x0D0, 0x10, 12, 1),
+};
+
+static const struct mtk_pin_field_calc mt8183_pin_r1_range[] = {
+ PIN_FIELD_BASE(29, 29, 2, 0x0C0, 0x10, 1, 1),
+ PIN_FIELD_BASE(30, 30, 2, 0x0C0, 0x10, 5, 1),
+ PIN_FIELD_BASE(31, 31, 2, 0x0C0, 0x10, 9, 1),
+ PIN_FIELD_BASE(32, 32, 2, 0x0C0, 0x10, 13, 1),
+ PIN_FIELD_BASE(33, 33, 2, 0x0C0, 0x10, 17, 1),
+ PIN_FIELD_BASE(34, 34, 2, 0x0C0, 0x10, 21, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x0C0, 0x10, 1, 1),
+ PIN_FIELD_BASE(36, 36, 3, 0x0C0, 0x10, 5, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x0C0, 0x10, 9, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x0C0, 0x10, 13, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x0C0, 0x10, 17, 1),
+ PIN_FIELD_BASE(40, 40, 3, 0x0C0, 0x10, 21, 1),
+ PIN_FIELD_BASE(41, 41, 3, 0x0C0, 0x10, 25, 1),
+ PIN_FIELD_BASE(42, 42, 3, 0x0C0, 0x10, 29, 1),
+ PIN_FIELD_BASE(48, 48, 3, 0x0F0, 0x10, 19, 1),
+ PIN_FIELD_BASE(49, 49, 3, 0x0F0, 0x10, 14, 1),
+ PIN_FIELD_BASE(50, 50, 4, 0x0F0, 0x10, 11, 1),
+ PIN_FIELD_BASE(51, 51, 4, 0x0F0, 0x10, 6, 1),
+ PIN_FIELD_BASE(81, 81, 5, 0x0F0, 0x10, 8, 1),
+ PIN_FIELD_BASE(82, 82, 5, 0x0F0, 0x10, 6, 1),
+ PIN_FIELD_BASE(83, 83, 5, 0x0F0, 0x10, 16, 1),
+ PIN_FIELD_BASE(84, 84, 5, 0x0F0, 0x10, 18, 1),
+ PIN_FIELD_BASE(91, 91, 6, 0x0C0, 0x10, 1, 1),
+ PIN_FIELD_BASE(92, 92, 6, 0x0C0, 0x10, 5, 1),
+ PIN_FIELD_BASE(93, 93, 6, 0x0C0, 0x10, 9, 1),
+ PIN_FIELD_BASE(94, 94, 6, 0x0C0, 0x10, 13, 1),
+ PIN_FIELD_BASE(103, 103, 6, 0x0F0, 0x10, 21, 1),
+ PIN_FIELD_BASE(104, 104, 6, 0x0F0, 0x10, 11, 1),
+ PIN_FIELD_BASE(105, 105, 6, 0x0F0, 0x10, 23, 1),
+ PIN_FIELD_BASE(106, 106, 6, 0x0F0, 0x10, 13, 1),
+ PIN_FIELD_BASE(122, 122, 8, 0x0C0, 0x10, 1, 1),
+ PIN_FIELD_BASE(123, 123, 8, 0x0C0, 0x10, 5, 1),
+ PIN_FIELD_BASE(124, 124, 8, 0x0C0, 0x10, 9, 1),
+ PIN_FIELD_BASE(125, 125, 8, 0x0C0, 0x10, 13, 1),
+ PIN_FIELD_BASE(126, 126, 8, 0x0C0, 0x10, 17, 1),
+ PIN_FIELD_BASE(127, 127, 8, 0x0C0, 0x10, 21, 1),
+ PIN_FIELD_BASE(128, 128, 8, 0x0C0, 0x10, 25, 1),
+ PIN_FIELD_BASE(129, 129, 8, 0x0C0, 0x10, 29, 1),
+ PIN_FIELD_BASE(130, 130, 8, 0x0D0, 0x10, 1, 1),
+ PIN_FIELD_BASE(131, 131, 8, 0x0D0, 0x10, 5, 1),
+ PIN_FIELD_BASE(132, 132, 8, 0x0D0, 0x10, 9, 1),
+ PIN_FIELD_BASE(133, 133, 8, 0x0D0, 0x10, 13, 1),
+};
+
+static const struct mtk_pin_reg_calc mt8183_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt8183_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8183_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8183_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8183_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8183_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8183_pin_ies_range),
+ [PINCTRL_PIN_REG_PULLEN] = MTK_RANGE(mt8183_pin_pullen_range),
+ [PINCTRL_PIN_REG_PULLSEL] = MTK_RANGE(mt8183_pin_pullsel_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt8183_pin_drv_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8183_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8183_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8183_pin_r1_range),
+};
+
+static const char * const mt8183_pinctrl_register_base_names[] = {
+ "iocfg0", "iocfg1", "iocfg2", "iocfg3", "iocfg4", "iocfg5",
+ "iocfg6", "iocfg7", "iocfg8",
+};
+
+static const struct mtk_eint_hw mt8183_eint_hw = {
+ .port_mask = 7,
+ .ports = 6,
+ .ap_num = 212,
+ .db_cnt = 13,
+};
+
+static const struct mtk_pin_soc mt8183_data = {
+ .reg_cal = mt8183_reg_cals,
+ .pins = mtk_pins_mt8183,
+ .npins = ARRAY_SIZE(mtk_pins_mt8183),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt8183),
+ .eint_hw = &mt8183_eint_hw,
+ .gpio_m = 0,
+ .ies_present = true,
+ .base_names = mt8183_pinctrl_register_base_names,
+ .nbase_names = ARRAY_SIZE(mt8183_pinctrl_register_base_names),
+ .bias_disable_set = mtk_pinconf_bias_disable_set_rev1,
+ .bias_disable_get = mtk_pinconf_bias_disable_get_rev1,
+ .bias_set = mtk_pinconf_bias_set_rev1,
+ .bias_get = mtk_pinconf_bias_get_rev1,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_pull_get = mtk_pinconf_adv_pull_get,
+ .adv_pull_set = mtk_pinconf_adv_pull_set,
+};
+
+static const struct of_device_id mt8183_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt8183-pinctrl", },
+ { }
+};
+
+static int mt8183_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_paris_pinctrl_probe(pdev, &mt8183_data);
+}
+
+static struct platform_driver mt8183_pinctrl_driver = {
+ .driver = {
+ .name = "mt8183-pinctrl",
+ .of_match_table = mt8183_pinctrl_of_match,
+ },
+ .probe = mt8183_pinctrl_probe,
+};
+
+static int __init mt8183_pinctrl_init(void)
+{
+ return platform_driver_register(&mt8183_pinctrl_driver);
+}
+arch_initcall(mt8183_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
new file mode 100644
index 000000000000..4a9e0d4c2bbc
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+
+#include "mtk-eint.h"
+#include "pinctrl-mtk-common-v2.h"
+
+/**
+ * struct mtk_drive_desc - the structure that holds the information
+ * of the driving current
+ * @min: the minimum current of this group
+ * @max: the maximum current of this group
+ * @step: the step current of this group
+ * @scal: the weight factor
+ *
+ * formula: output = ((input) / step - 1) * scal
+ */
+struct mtk_drive_desc {
+ u8 min;
+ u8 max;
+ u8 step;
+ u8 scal;
+};
+
+/* The groups of drive strength */
+static const struct mtk_drive_desc mtk_drive[] = {
+ [DRV_GRP0] = { 4, 16, 4, 1 },
+ [DRV_GRP1] = { 4, 16, 4, 2 },
+ [DRV_GRP2] = { 2, 8, 2, 1 },
+ [DRV_GRP3] = { 2, 8, 2, 2 },
+ [DRV_GRP4] = { 2, 16, 2, 1 },
+};
+
+static void mtk_w32(struct mtk_pinctrl *pctl, u8 i, u32 reg, u32 val)
+{
+ writel_relaxed(val, pctl->base[i] + reg);
+}
+
+static u32 mtk_r32(struct mtk_pinctrl *pctl, u8 i, u32 reg)
+{
+ return readl_relaxed(pctl->base[i] + reg);
+}
+
+void mtk_rmw(struct mtk_pinctrl *pctl, u8 i, u32 reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = mtk_r32(pctl, i, reg);
+ val &= ~mask;
+ val |= set;
+ mtk_w32(pctl, i, reg, val);
+}
+
+static int mtk_hw_pin_field_lookup(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ int field, struct mtk_pin_field *pfd)
+{
+ const struct mtk_pin_field_calc *c, *e;
+ const struct mtk_pin_reg_calc *rc;
+ u32 bits;
+
+ if (hw->soc->reg_cal && hw->soc->reg_cal[field].range) {
+ rc = &hw->soc->reg_cal[field];
+ } else {
+ dev_dbg(hw->dev,
+ "Not support field %d for pin %d (%s)\n",
+ field, desc->number, desc->name);
+ return -ENOTSUPP;
+ }
+
+ c = rc->range;
+ e = c + rc->nranges;
+
+ while (c < e) {
+ if (desc->number >= c->s_pin && desc->number <= c->e_pin)
+ break;
+ c++;
+ }
+
+ if (c >= e) {
+ dev_dbg(hw->dev, "Not support field %d for pin = %d (%s)\n",
+ field, desc->number, desc->name);
+ return -ENOTSUPP;
+ }
+
+ if (c->i_base > hw->nbase - 1) {
+ dev_err(hw->dev,
+ "Invalid base for field %d for pin = %d (%s)\n",
+ field, desc->number, desc->name);
+ return -EINVAL;
+ }
+
+ /* Calculated bits as the overall offset the pin is located at,
+ * if c->fixed is held, that determines the all the pins in the
+ * range use the same field with the s_pin.
+ */
+ bits = c->fixed ? c->s_bit : c->s_bit +
+ (desc->number - c->s_pin) * (c->x_bits);
+
+ /* Fill pfd from bits. For example 32-bit register applied is assumed
+ * when c->sz_reg is equal to 32.
+ */
+ pfd->index = c->i_base;
+ pfd->offset = c->s_addr + c->x_addrs * (bits / c->sz_reg);
+ pfd->bitpos = bits % c->sz_reg;
+ pfd->mask = (1 << c->x_bits) - 1;
+
+ /* pfd->next is used for indicating that bit wrapping-around happens
+ * which requires the manipulation for bit 0 starting in the next
+ * register to form the complete field read/write.
+ */
+ pfd->next = pfd->bitpos + c->x_bits > c->sz_reg ? c->x_addrs : 0;
+
+ return 0;
+}
+
+static int mtk_hw_pin_field_get(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ int field, struct mtk_pin_field *pfd)
+{
+ if (field < 0 || field >= PINCTRL_PIN_REG_MAX) {
+ dev_err(hw->dev, "Invalid Field %d\n", field);
+ return -EINVAL;
+ }
+
+ return mtk_hw_pin_field_lookup(hw, desc, field, pfd);
+}
+
+static void mtk_hw_bits_part(struct mtk_pin_field *pf, int *h, int *l)
+{
+ *l = 32 - pf->bitpos;
+ *h = get_count_order(pf->mask) - *l;
+}
+
+static void mtk_hw_write_cross_field(struct mtk_pinctrl *hw,
+ struct mtk_pin_field *pf, int value)
+{
+ int nbits_l, nbits_h;
+
+ mtk_hw_bits_part(pf, &nbits_h, &nbits_l);
+
+ mtk_rmw(hw, pf->index, pf->offset, pf->mask << pf->bitpos,
+ (value & pf->mask) << pf->bitpos);
+
+ mtk_rmw(hw, pf->index, pf->offset + pf->next, BIT(nbits_h) - 1,
+ (value & pf->mask) >> nbits_l);
+}
+
+static void mtk_hw_read_cross_field(struct mtk_pinctrl *hw,
+ struct mtk_pin_field *pf, int *value)
+{
+ int nbits_l, nbits_h, h, l;
+
+ mtk_hw_bits_part(pf, &nbits_h, &nbits_l);
+
+ l = (mtk_r32(hw, pf->index, pf->offset)
+ >> pf->bitpos) & (BIT(nbits_l) - 1);
+ h = (mtk_r32(hw, pf->index, pf->offset + pf->next))
+ & (BIT(nbits_h) - 1);
+
+ *value = (h << nbits_l) | l;
+}
+
+int mtk_hw_set_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
+ int field, int value)
+{
+ struct mtk_pin_field pf;
+ int err;
+
+ err = mtk_hw_pin_field_get(hw, desc, field, &pf);
+ if (err)
+ return err;
+
+ if (!pf.next)
+ mtk_rmw(hw, pf.index, pf.offset, pf.mask << pf.bitpos,
+ (value & pf.mask) << pf.bitpos);
+ else
+ mtk_hw_write_cross_field(hw, &pf, value);
+
+ return 0;
+}
+
+int mtk_hw_get_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
+ int field, int *value)
+{
+ struct mtk_pin_field pf;
+ int err;
+
+ err = mtk_hw_pin_field_get(hw, desc, field, &pf);
+ if (err)
+ return err;
+
+ if (!pf.next)
+ *value = (mtk_r32(hw, pf.index, pf.offset)
+ >> pf.bitpos) & pf.mask;
+ else
+ mtk_hw_read_cross_field(hw, &pf, value);
+
+ return 0;
+}
+
+static int mtk_xt_find_eint_num(struct mtk_pinctrl *hw, unsigned long eint_n)
+{
+ const struct mtk_pin_desc *desc;
+ int i = 0;
+
+ desc = (const struct mtk_pin_desc *)hw->soc->pins;
+
+ while (i < hw->soc->npins) {
+ if (desc[i].eint.eint_n == eint_n)
+ return desc[i].number;
+ i++;
+ }
+
+ return EINT_NA;
+}
+
+static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n,
+ unsigned int *gpio_n,
+ struct gpio_chip **gpio_chip)
+{
+ struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
+ const struct mtk_pin_desc *desc;
+
+ desc = (const struct mtk_pin_desc *)hw->soc->pins;
+ *gpio_chip = &hw->chip;
+
+ /* Be greedy to guess first gpio_n is equal to eint_n */
+ if (desc[eint_n].eint.eint_n == eint_n)
+ *gpio_n = eint_n;
+ else
+ *gpio_n = mtk_xt_find_eint_num(hw, eint_n);
+
+ return *gpio_n == EINT_NA ? -EINVAL : 0;
+}
+
+static int mtk_xt_get_gpio_state(void *data, unsigned long eint_n)
+{
+ struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
+ const struct mtk_pin_desc *desc;
+ struct gpio_chip *gpio_chip;
+ unsigned int gpio_n;
+ int value, err;
+
+ err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip);
+ if (err)
+ return err;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n];
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DI, &value);
+ if (err)
+ return err;
+
+ return !!value;
+}
+
+static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n)
+{
+ struct mtk_pinctrl *hw = (struct mtk_pinctrl *)data;
+ const struct mtk_pin_desc *desc;
+ struct gpio_chip *gpio_chip;
+ unsigned int gpio_n;
+ int err;
+
+ err = mtk_xt_get_gpio_n(hw, eint_n, &gpio_n, &gpio_chip);
+ if (err)
+ return err;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n];
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE,
+ desc->eint.eint_m);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR, MTK_INPUT);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT, MTK_ENABLE);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct mtk_eint_xt mtk_eint_xt = {
+ .get_gpio_n = mtk_xt_get_gpio_n,
+ .get_gpio_state = mtk_xt_get_gpio_state,
+ .set_gpio_as_eint = mtk_xt_set_gpio_as_eint,
+};
+
+int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+
+ if (!IS_ENABLED(CONFIG_EINT_MTK))
+ return 0;
+
+ if (!of_property_read_bool(np, "interrupt-controller"))
+ return -ENODEV;
+
+ hw->eint = devm_kzalloc(hw->dev, sizeof(*hw->eint), GFP_KERNEL);
+ if (!hw->eint)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "eint");
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get eint resource\n");
+ return -ENODEV;
+ }
+
+ hw->eint->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->eint->base))
+ return PTR_ERR(hw->eint->base);
+
+ hw->eint->irq = irq_of_parse_and_map(np, 0);
+ if (!hw->eint->irq)
+ return -EINVAL;
+
+ if (!hw->soc->eint_hw)
+ return -ENODEV;
+
+ hw->eint->dev = &pdev->dev;
+ hw->eint->hw = hw->soc->eint_hw;
+ hw->eint->pctl = hw;
+ hw->eint->gpio_xlate = &mtk_eint_xt;
+
+ return mtk_eint_do_init(hw->eint);
+}
+
+/* Revision 0 */
+int mtk_pinconf_bias_disable_set(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc)
+{
+ int err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU,
+ MTK_DISABLE);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD,
+ MTK_DISABLE);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mtk_pinconf_bias_disable_get(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *res)
+{
+ int v, v2;
+ int err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PU, &v);
+ if (err)
+ return err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PD, &v2);
+ if (err)
+ return err;
+
+ if (v == MTK_ENABLE || v2 == MTK_ENABLE)
+ return -EINVAL;
+
+ *res = 1;
+
+ return 0;
+}
+
+int mtk_pinconf_bias_set(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup)
+{
+ int err, arg;
+
+ arg = pullup ? 1 : 2;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PU, arg & 1);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PD,
+ !!(arg & 2));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mtk_pinconf_bias_get(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup, int *res)
+{
+ int reg, err, v;
+
+ reg = pullup ? PINCTRL_PIN_REG_PU : PINCTRL_PIN_REG_PD;
+
+ err = mtk_hw_get_value(hw, desc, reg, &v);
+ if (err)
+ return err;
+
+ if (!v)
+ return -EINVAL;
+
+ *res = 1;
+
+ return 0;
+}
+
+/* Revision 1 */
+int mtk_pinconf_bias_disable_set_rev1(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc)
+{
+ int err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLEN,
+ MTK_DISABLE);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mtk_pinconf_bias_disable_get_rev1(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *res)
+{
+ int v, err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLEN, &v);
+ if (err)
+ return err;
+
+ if (v == MTK_ENABLE)
+ return -EINVAL;
+
+ *res = 1;
+
+ return 0;
+}
+
+int mtk_pinconf_bias_set_rev1(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup)
+{
+ int err, arg;
+
+ arg = pullup ? MTK_PULLUP : MTK_PULLDOWN;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLEN,
+ MTK_ENABLE);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PULLSEL, arg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup,
+ int *res)
+{
+ int err, v;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLEN, &v);
+ if (err)
+ return err;
+
+ if (v == MTK_DISABLE)
+ return -EINVAL;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PULLSEL, &v);
+ if (err)
+ return err;
+
+ if (pullup ^ (v == MTK_PULLUP))
+ return -EINVAL;
+
+ *res = 1;
+
+ return 0;
+}
+
+/* Revision 0 */
+int mtk_pinconf_drive_set(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg)
+{
+ const struct mtk_drive_desc *tb;
+ int err = -ENOTSUPP;
+
+ tb = &mtk_drive[desc->drv_n];
+ /* 4mA when (e8, e4) = (0, 0)
+ * 8mA when (e8, e4) = (0, 1)
+ * 12mA when (e8, e4) = (1, 0)
+ * 16mA when (e8, e4) = (1, 1)
+ */
+ if ((arg >= tb->min && arg <= tb->max) && !(arg % tb->step)) {
+ arg = (arg / tb->step - 1) * tb->scal;
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_E4,
+ arg & 0x1);
+ if (err)
+ return err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_E8,
+ (arg & 0x2) >> 1);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+int mtk_pinconf_drive_get(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *val)
+{
+ const struct mtk_drive_desc *tb;
+ int err, val1, val2;
+
+ tb = &mtk_drive[desc->drv_n];
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_E4, &val1);
+ if (err)
+ return err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_E8, &val2);
+ if (err)
+ return err;
+
+ /* 4mA when (e8, e4) = (0, 0); 8mA when (e8, e4) = (0, 1)
+ * 12mA when (e8, e4) = (1, 0); 16mA when (e8, e4) = (1, 1)
+ */
+ *val = (((val2 << 1) + val1) / tb->scal + 1) * tb->step;
+
+ return 0;
+}
+
+/* Revision 1 */
+int mtk_pinconf_drive_set_rev1(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg)
+{
+ const struct mtk_drive_desc *tb;
+ int err = -ENOTSUPP;
+
+ tb = &mtk_drive[desc->drv_n];
+
+ if ((arg >= tb->min && arg <= tb->max) && !(arg % tb->step)) {
+ arg = (arg / tb->step - 1) * tb->scal;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV,
+ arg);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *val)
+{
+ const struct mtk_drive_desc *tb;
+ int err, val1;
+
+ tb = &mtk_drive[desc->drv_n];
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV, &val1);
+ if (err)
+ return err;
+
+ *val = ((val1 & 0x7) / tb->scal + 1) * tb->step;
+
+ return 0;
+}
+
+int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup,
+ u32 arg)
+{
+ int err;
+
+ /* 10K off & 50K (75K) off, when (R0, R1) = (0, 0);
+ * 10K off & 50K (75K) on, when (R0, R1) = (0, 1);
+ * 10K on & 50K (75K) off, when (R0, R1) = (1, 0);
+ * 10K on & 50K (75K) on, when (R0, R1) = (1, 1)
+ */
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_R0, arg & 1);
+ if (err)
+ return 0;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_R1,
+ !!(arg & 2));
+ if (err)
+ return 0;
+
+ arg = pullup ? 0 : 1;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_PUPD, arg);
+
+ /* If PUPD register is not supported for that pin, let's fallback to
+ * general bias control.
+ */
+ if (err == -ENOTSUPP) {
+ if (hw->soc->bias_set) {
+ err = hw->soc->bias_set(hw, desc, pullup);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ }
+
+ return err;
+}
+
+int mtk_pinconf_adv_pull_get(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup,
+ u32 *val)
+{
+ u32 t, t2;
+ int err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PUPD, &t);
+
+ /* If PUPD register is not supported for that pin, let's fallback to
+ * general bias control.
+ */
+ if (err == -ENOTSUPP) {
+ if (hw->soc->bias_get) {
+ err = hw->soc->bias_get(hw, desc, pullup, val);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ } else {
+ /* t == 0 supposes PULLUP for the customized PULL setup */
+ if (err)
+ return err;
+
+ if (pullup ^ !t)
+ return -EINVAL;
+ }
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R0, &t);
+ if (err)
+ return err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_R1, &t2);
+ if (err)
+ return err;
+
+ *val = (t | t2 << 1) & 0x7;
+
+ return 0;
+}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
new file mode 100644
index 000000000000..6d24522739d9
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#ifndef __PINCTRL_MTK_COMMON_V2_H
+#define __PINCTRL_MTK_COMMON_V2_H
+
+#include <linux/gpio/driver.h>
+
+#define MTK_INPUT 0
+#define MTK_OUTPUT 1
+#define MTK_DISABLE 0
+#define MTK_ENABLE 1
+#define MTK_PULLDOWN 0
+#define MTK_PULLUP 1
+
+#define EINT_NA U16_MAX
+#define NO_EINT_SUPPORT EINT_NA
+
+#define PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, \
+ _s_bit, _x_bits, _sz_reg, _fixed) { \
+ .s_pin = _s_pin, \
+ .e_pin = _e_pin, \
+ .i_base = _i_base, \
+ .s_addr = _s_addr, \
+ .x_addrs = _x_addrs, \
+ .s_bit = _s_bit, \
+ .x_bits = _x_bits, \
+ .sz_reg = _sz_reg, \
+ .fixed = _fixed, \
+ }
+
+#define PIN_FIELD(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 0)
+
+#define PINS_FIELD(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 1)
+
+/* List these attributes which could be modified for the pin */
+enum {
+ PINCTRL_PIN_REG_MODE,
+ PINCTRL_PIN_REG_DIR,
+ PINCTRL_PIN_REG_DI,
+ PINCTRL_PIN_REG_DO,
+ PINCTRL_PIN_REG_SR,
+ PINCTRL_PIN_REG_SMT,
+ PINCTRL_PIN_REG_PD,
+ PINCTRL_PIN_REG_PU,
+ PINCTRL_PIN_REG_E4,
+ PINCTRL_PIN_REG_E8,
+ PINCTRL_PIN_REG_TDSEL,
+ PINCTRL_PIN_REG_RDSEL,
+ PINCTRL_PIN_REG_DRV,
+ PINCTRL_PIN_REG_PUPD,
+ PINCTRL_PIN_REG_R0,
+ PINCTRL_PIN_REG_R1,
+ PINCTRL_PIN_REG_IES,
+ PINCTRL_PIN_REG_PULLEN,
+ PINCTRL_PIN_REG_PULLSEL,
+ PINCTRL_PIN_REG_MAX,
+};
+
+/* Group the pins by the driving current */
+enum {
+ DRV_FIXED,
+ DRV_GRP0,
+ DRV_GRP1,
+ DRV_GRP2,
+ DRV_GRP3,
+ DRV_GRP4,
+ DRV_GRP_MAX,
+};
+
+static const char * const mtk_default_register_base_names[] = {
+ "base",
+};
+
+/* struct mtk_pin_field - the structure that holds the information of the field
+ * used to describe the attribute for the pin
+ * @base: the index pointing to the entry in base address list
+ * @offset: the register offset relative to the base address
+ * @mask: the mask used to filter out the field from the register
+ * @bitpos: the start bit relative to the register
+ * @next: the indication that the field would be extended to the
+ next register
+ */
+struct mtk_pin_field {
+ u8 index;
+ u32 offset;
+ u32 mask;
+ u8 bitpos;
+ u8 next;
+};
+
+/* struct mtk_pin_field_calc - the structure that holds the range providing
+ * the guide used to look up the relevant field
+ * @s_pin: the start pin within the range
+ * @e_pin: the end pin within the range
+ * @i_base: the index pointing to the entry in base address list
+ * @s_addr: the start address for the range
+ * @x_addrs: the address distance between two consecutive registers
+ * within the range
+ * @s_bit: the start bit for the first register within the range
+ * @x_bits: the bit distance between two consecutive pins within
+ * the range
+ * @sz_reg: the size of bits in a register
+ * @fixed: the consecutive pins share the same bits with the 1st
+ * pin
+ */
+struct mtk_pin_field_calc {
+ u16 s_pin;
+ u16 e_pin;
+ u8 i_base;
+ u32 s_addr;
+ u8 x_addrs;
+ u8 s_bit;
+ u8 x_bits;
+ u8 sz_reg;
+ u8 fixed;
+};
+
+/* struct mtk_pin_reg_calc - the structure that holds all ranges used to
+ * determine which register the pin would make use of
+ * for certain pin attribute.
+ * @range: the start address for the range
+ * @nranges: the number of items in the range
+ */
+struct mtk_pin_reg_calc {
+ const struct mtk_pin_field_calc *range;
+ unsigned int nranges;
+};
+
+/**
+ * struct mtk_func_desc - the structure that providing information
+ * all the funcs for this pin
+ * @name: the name of function
+ * @muxval: the mux to the function
+ */
+struct mtk_func_desc {
+ const char *name;
+ u8 muxval;
+};
+
+/**
+ * struct mtk_eint_desc - the structure that providing information
+ * for eint data per pin
+ * @eint_m: the eint mux for this pin
+ * @eitn_n: the eint number for this pin
+ */
+struct mtk_eint_desc {
+ u16 eint_m;
+ u16 eint_n;
+};
+
+/**
+ * struct mtk_pin_desc - the structure that providing information
+ * for each pin of chips
+ * @number: unique pin number from the global pin number space
+ * @name: name for this pin
+ * @eint: the eint data for this pin
+ * @drv_n: the index with the driving group
+ * @funcs: all available functions for this pins (only used in
+ * those drivers compatible to pinctrl-mtk-common.c-like
+ * ones)
+ */
+struct mtk_pin_desc {
+ unsigned int number;
+ const char *name;
+ struct mtk_eint_desc eint;
+ u8 drv_n;
+ struct mtk_func_desc *funcs;
+};
+
+struct mtk_pinctrl_group {
+ const char *name;
+ unsigned long config;
+ unsigned pin;
+};
+
+struct mtk_pinctrl;
+
+/* struct mtk_pin_soc - the structure that holds SoC-specific data */
+struct mtk_pin_soc {
+ const struct mtk_pin_reg_calc *reg_cal;
+ const struct mtk_pin_desc *pins;
+ unsigned int npins;
+ const struct group_desc *grps;
+ unsigned int ngrps;
+ const struct function_desc *funcs;
+ unsigned int nfuncs;
+ const struct mtk_eint_regs *eint_regs;
+ const struct mtk_eint_hw *eint_hw;
+
+ /* Specific parameters per SoC */
+ u8 gpio_m;
+ bool ies_present;
+ const char * const *base_names;
+ unsigned int nbase_names;
+
+ /* Specific pinconfig operations */
+ int (*bias_disable_set)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc);
+ int (*bias_disable_get)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *res);
+ int (*bias_set)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup);
+ int (*bias_get)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup, int *res);
+
+ int (*drive_set)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg);
+ int (*drive_get)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *val);
+
+ int (*adv_pull_set)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup,
+ u32 arg);
+ int (*adv_pull_get)(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup,
+ u32 *val);
+
+ /* Specific driver data */
+ void *driver_data;
+};
+
+struct mtk_pinctrl {
+ struct pinctrl_dev *pctrl;
+ void __iomem **base;
+ u8 nbase;
+ struct device *dev;
+ struct gpio_chip chip;
+ const struct mtk_pin_soc *soc;
+ struct mtk_eint *eint;
+ struct mtk_pinctrl_group *groups;
+ const char **grp_names;
+};
+
+void mtk_rmw(struct mtk_pinctrl *pctl, u8 i, u32 reg, u32 mask, u32 set);
+
+int mtk_hw_set_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
+ int field, int value);
+int mtk_hw_get_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
+ int field, int *value);
+
+int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev);
+
+int mtk_pinconf_bias_disable_set(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc);
+int mtk_pinconf_bias_disable_get(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *res);
+int mtk_pinconf_bias_set(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup);
+int mtk_pinconf_bias_get(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup,
+ int *res);
+
+int mtk_pinconf_bias_disable_set_rev1(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc);
+int mtk_pinconf_bias_disable_get_rev1(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ int *res);
+int mtk_pinconf_bias_set_rev1(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup);
+int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup,
+ int *res);
+
+int mtk_pinconf_drive_set(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg);
+int mtk_pinconf_drive_get(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *val);
+
+int mtk_pinconf_drive_set_rev1(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, u32 arg);
+int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, int *val);
+
+int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup,
+ u32 arg);
+int mtk_pinconf_adv_pull_get(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc, bool pullup,
+ u32 *val);
+
+#endif /* __PINCTRL_MTK_COMMON_V2_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 16ff56f93501..071623873ca5 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -514,8 +514,8 @@ static int mtk_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
pins = of_find_property(node, "pinmux", NULL);
if (!pins) {
- dev_err(pctl->dev, "missing pins property in node %s .\n",
- node->name);
+ dev_err(pctl->dev, "missing pins property in node %pOFn .\n",
+ node);
return -EINVAL;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt6765.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6765.h
new file mode 100644
index 000000000000..772563720461
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6765.h
@@ -0,0 +1,1754 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: ZH Chen <zh.chen@mediatek.com>
+ *
+ */
+
+#ifndef __PINCTRL_MTK_MT6765_H
+#define __PINCTRL_MTK_MT6765_H
+
+#include "pinctrl-paris.h"
+
+static struct mtk_pin_desc mtk_pins_mt6765[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "MD_INT0"),
+ MTK_FUNCTION(4, "I2S0_MCK"),
+ MTK_FUNCTION(5, "MD_UTXD1"),
+ MTK_FUNCTION(6, "TP_GPIO0_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B9")
+ ),
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "CLKM1"),
+ MTK_FUNCTION(4, "I2S0_BCK"),
+ MTK_FUNCTION(5, "MD_URXD1"),
+ MTK_FUNCTION(6, "TP_GPIO1_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B10")
+ ),
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "UCTS0"),
+ MTK_FUNCTION(2, "CLKM2"),
+ MTK_FUNCTION(3, "UTXD1"),
+ MTK_FUNCTION(4, "I2S0_LRCK"),
+ MTK_FUNCTION(5, "ANT_SEL6"),
+ MTK_FUNCTION(6, "TP_GPIO2_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B11")
+ ),
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "URTS0"),
+ MTK_FUNCTION(2, "CLKM3"),
+ MTK_FUNCTION(3, "URXD1"),
+ MTK_FUNCTION(4, "I2S0_DI"),
+ MTK_FUNCTION(5, "ANT_SEL7"),
+ MTK_FUNCTION(6, "TP_GPIO3_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B12")
+ ),
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "SPI1_B_MI"),
+ MTK_FUNCTION(2, "SCP_SPI1_MI"),
+ MTK_FUNCTION(3, "UCTS0"),
+ MTK_FUNCTION(4, "I2S3_MCK"),
+ MTK_FUNCTION(5, "SSPM_URXD_AO"),
+ MTK_FUNCTION(6, "TP_GPIO4_AO")
+ ),
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "SPI1_B_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI1_CS"),
+ MTK_FUNCTION(3, "URTS0"),
+ MTK_FUNCTION(4, "I2S3_BCK"),
+ MTK_FUNCTION(5, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(6, "TP_GPIO5_AO")
+ ),
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "SPI1_B_MO"),
+ MTK_FUNCTION(2, "SCP_SPI1_MO"),
+ MTK_FUNCTION(3, "PWM0"),
+ MTK_FUNCTION(4, "I2S3_LRCK"),
+ MTK_FUNCTION(5, "MD_UTXD0"),
+ MTK_FUNCTION(6, "TP_GPIO6_AO")
+ ),
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "SPI1_B_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI1_CK"),
+ MTK_FUNCTION(3, "PWM1"),
+ MTK_FUNCTION(4, "I2S3_DO"),
+ MTK_FUNCTION(5, "MD_URXD0"),
+ MTK_FUNCTION(6, "TP_GPIO7_AO")
+ ),
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "SRCLKENAI0"),
+ MTK_FUNCTION(3, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(4, "ANT_SEL3"),
+ MTK_FUNCTION(5, "MFG_JTAG_TRSTN"),
+ MTK_FUNCTION(6, "I2S2_MCK"),
+ MTK_FUNCTION(7, "JTRSTN_SEL1")
+ ),
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "MD_INT0"),
+ MTK_FUNCTION(2, "CMMCLK2"),
+ MTK_FUNCTION(3, "CONN_MCU_TRST_B"),
+ MTK_FUNCTION(4, "IDDIG"),
+ MTK_FUNCTION(5, "SDA_6306"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TRSTN"),
+ MTK_FUNCTION(7, "DBG_MON_B22")
+ ),
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(3, "CONN_MCU_DBGI_N"),
+ MTK_FUNCTION(4, "SRCLKENAI1"),
+ MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(6, "CMVREF1"),
+ MTK_FUNCTION(7, "DBG_MON_B23")
+ ),
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(2, "CLKM3"),
+ MTK_FUNCTION(3, "ANT_SEL6"),
+ MTK_FUNCTION(4, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(6, "UCTS1"),
+ MTK_FUNCTION(7, "DBG_MON_B24")
+ ),
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "PWM0"),
+ MTK_FUNCTION(2, "SRCLKENAI1"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "MD_INT0"),
+ MTK_FUNCTION(5, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(6, "URTS1")
+ ),
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "ANT_SEL0"),
+ MTK_FUNCTION(2, "SPI4_MI"),
+ MTK_FUNCTION(3, "SCP_SPI0_MI"),
+ MTK_FUNCTION(4, "MD_URXD0"),
+ MTK_FUNCTION(5, "CLKM0"),
+ MTK_FUNCTION(6, "I2S0_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A0")
+ ),
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "ANT_SEL1"),
+ MTK_FUNCTION(2, "SPI4_CSB"),
+ MTK_FUNCTION(3, "SCP_SPI0_CS"),
+ MTK_FUNCTION(4, "MD_UTXD0"),
+ MTK_FUNCTION(5, "CLKM1"),
+ MTK_FUNCTION(6, "I2S0_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A1")
+ ),
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "ANT_SEL2"),
+ MTK_FUNCTION(2, "SPI4_MO"),
+ MTK_FUNCTION(3, "SCP_SPI0_MO"),
+ MTK_FUNCTION(4, "MD_URXD1"),
+ MTK_FUNCTION(5, "CLKM2"),
+ MTK_FUNCTION(6, "I2S0_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A2")
+ ),
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "ANT_SEL3"),
+ MTK_FUNCTION(2, "SPI4_CLK"),
+ MTK_FUNCTION(3, "SCP_SPI0_CK"),
+ MTK_FUNCTION(4, "MD_UTXD1"),
+ MTK_FUNCTION(5, "CLKM3"),
+ MTK_FUNCTION(6, "I2S3_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A3")
+ ),
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "ANT_SEL4"),
+ MTK_FUNCTION(2, "SPI2_MO"),
+ MTK_FUNCTION(3, "SCP_SPI0_MO"),
+ MTK_FUNCTION(4, "PWM1"),
+ MTK_FUNCTION(5, "IDDIG"),
+ MTK_FUNCTION(6, "I2S0_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A4")
+ ),
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "ANT_SEL5"),
+ MTK_FUNCTION(2, "SPI2_CLK"),
+ MTK_FUNCTION(3, "SCP_SPI0_CK"),
+ MTK_FUNCTION(4, "MD_INT0"),
+ MTK_FUNCTION(5, "USB_DRVVBUS"),
+ MTK_FUNCTION(6, "I2S3_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A5")
+ ),
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "ANT_SEL6"),
+ MTK_FUNCTION(2, "SPI2_MI"),
+ MTK_FUNCTION(3, "SCP_SPI0_MI"),
+ MTK_FUNCTION(4, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(6, "I2S3_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A6")
+ ),
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "ANT_SEL7"),
+ MTK_FUNCTION(2, "SPI2_CSB"),
+ MTK_FUNCTION(3, "SCP_SPI0_CS"),
+ MTK_FUNCTION(4, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(5, "CMMCLK3"),
+ MTK_FUNCTION(6, "I2S3_DO"),
+ MTK_FUNCTION(7, "DBG_MON_A7")
+ ),
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "SPI3_MI"),
+ MTK_FUNCTION(2, "SRCLKENAI1"),
+ MTK_FUNCTION(3, "DAP_MD32_SWD"),
+ MTK_FUNCTION(4, "CMVREF0"),
+ MTK_FUNCTION(5, "SCP_SPI0_MI"),
+ MTK_FUNCTION(6, "I2S2_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A8")
+ ),
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "SPI3_CSB"),
+ MTK_FUNCTION(2, "SRCLKENAI0"),
+ MTK_FUNCTION(3, "DAP_MD32_SWCK"),
+ MTK_FUNCTION(4, "CMVREF1"),
+ MTK_FUNCTION(5, "SCP_SPI0_CS"),
+ MTK_FUNCTION(6, "I2S2_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A9")
+ ),
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "SPI3_MO"),
+ MTK_FUNCTION(2, "PWM0"),
+ MTK_FUNCTION(3, "KPROW7"),
+ MTK_FUNCTION(4, "ANT_SEL3"),
+ MTK_FUNCTION(5, "SCP_SPI0_MO"),
+ MTK_FUNCTION(6, "I2S2_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A10")
+ ),
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "SPI3_CLK"),
+ MTK_FUNCTION(2, "UDI_TCK"),
+ MTK_FUNCTION(3, "IO_JTAG_TCK"),
+ MTK_FUNCTION(4, "SSPM_JTAG_TCK"),
+ MTK_FUNCTION(5, "SCP_SPI0_CK"),
+ MTK_FUNCTION(6, "I2S2_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A11")
+ ),
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "SPI1_A_MI"),
+ MTK_FUNCTION(2, "UDI_TMS"),
+ MTK_FUNCTION(3, "IO_JTAG_TMS"),
+ MTK_FUNCTION(4, "SSPM_JTAG_TMS"),
+ MTK_FUNCTION(5, "KPROW3"),
+ MTK_FUNCTION(6, "I2S1_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A12")
+ ),
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "SPI1_A_CSB"),
+ MTK_FUNCTION(2, "UDI_TDI"),
+ MTK_FUNCTION(3, "IO_JTAG_TDI"),
+ MTK_FUNCTION(4, "SSPM_JTAG_TDI"),
+ MTK_FUNCTION(5, "KPROW4"),
+ MTK_FUNCTION(6, "I2S1_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A13")
+ ),
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "SPI1_A_MO"),
+ MTK_FUNCTION(2, "UDI_TDO"),
+ MTK_FUNCTION(3, "IO_JTAG_TDO"),
+ MTK_FUNCTION(4, "SSPM_JTAG_TDO"),
+ MTK_FUNCTION(5, "KPROW5"),
+ MTK_FUNCTION(6, "I2S1_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A14")
+ ),
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "SPI1_A_CLK"),
+ MTK_FUNCTION(2, "UDI_NTRST"),
+ MTK_FUNCTION(3, "IO_JTAG_TRSTN"),
+ MTK_FUNCTION(4, "SSPM_JTAG_TRSTN"),
+ MTK_FUNCTION(5, "KPROW6"),
+ MTK_FUNCTION(6, "I2S1_DO"),
+ MTK_FUNCTION(7, "DBG_MON_A15")
+ ),
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "IO_JTAG_TCK"),
+ MTK_FUNCTION(3, "UDI_TCK"),
+ MTK_FUNCTION(4, "CONN_DSP_JCK"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TCK"),
+ MTK_FUNCTION(6, "CONN_MCU_AICE_TCKC"),
+ MTK_FUNCTION(7, "DAP_MD32_SWCK")
+ ),
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(2, "IO_JTAG_TMS"),
+ MTK_FUNCTION(3, "UDI_TMS"),
+ MTK_FUNCTION(4, "CONN_DSP_JMS"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TMS"),
+ MTK_FUNCTION(6, "CONN_MCU_AICE_TMSC"),
+ MTK_FUNCTION(7, "DAP_MD32_SWD")
+ ),
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "MSDC1_DAT3")
+ ),
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "IO_JTAG_TDI"),
+ MTK_FUNCTION(3, "UDI_TDI"),
+ MTK_FUNCTION(4, "CONN_DSP_JDI"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "IO_JTAG_TRSTN"),
+ MTK_FUNCTION(3, "UDI_NTRST"),
+ MTK_FUNCTION(4, "CONN_DSP_JINTP"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "IO_JTAG_TDO"),
+ MTK_FUNCTION(3, "UDI_TDO"),
+ MTK_FUNCTION(4, "CONN_DSP_JDO"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(2, "CCU_JTAG_TDO"),
+ MTK_FUNCTION(3, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(5, "SCP_JTAG_TDO"),
+ MTK_FUNCTION(6, "CONN_DSP_JDO"),
+ MTK_FUNCTION(7, "DBG_MON_A16")
+ ),
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(2, "CCU_JTAG_TMS"),
+ MTK_FUNCTION(3, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(4, "CONN_MCU_AICE_TMSC"),
+ MTK_FUNCTION(5, "SCP_JTAG_TMS"),
+ MTK_FUNCTION(6, "CONN_DSP_JMS"),
+ MTK_FUNCTION(7, "DBG_MON_A17")
+ ),
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(2, "CCU_JTAG_TDI"),
+ MTK_FUNCTION(3, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(5, "SCP_JTAG_TDI"),
+ MTK_FUNCTION(6, "CONN_DSP_JDI"),
+ MTK_FUNCTION(7, "DBG_MON_A18")
+ ),
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(3, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(7, "DBG_MON_A19")
+ ),
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(2, "CCU_JTAG_TCK"),
+ MTK_FUNCTION(3, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(4, "CONN_MCU_AICE_TCKC"),
+ MTK_FUNCTION(5, "SCP_JTAG_TCK"),
+ MTK_FUNCTION(6, "CONN_DSP_JCK"),
+ MTK_FUNCTION(7, "DBG_MON_A20")
+ ),
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(2, "CCU_JTAG_TRST"),
+ MTK_FUNCTION(3, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(5, "SCP_JTAG_TRSTN"),
+ MTK_FUNCTION(6, "CONN_DSP_JINTP"),
+ MTK_FUNCTION(7, "DBG_MON_A21")
+ ),
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "IDDIG"),
+ MTK_FUNCTION(2, "URXD1"),
+ MTK_FUNCTION(3, "UCTS0"),
+ MTK_FUNCTION(4, "KPCOL2"),
+ MTK_FUNCTION(5, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(6, "MD_INT0"),
+ MTK_FUNCTION(7, "DBG_MON_A22")
+ ),
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "USB_DRVVBUS"),
+ MTK_FUNCTION(2, "UTXD1"),
+ MTK_FUNCTION(3, "URTS0"),
+ MTK_FUNCTION(4, "KPROW2"),
+ MTK_FUNCTION(5, "SSPM_URXD_AO"),
+ MTK_FUNCTION(6, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(7, "DBG_MON_A23")
+ ),
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "DISP_PWM"),
+ MTK_FUNCTION(7, "DBG_MON_A24")
+ ),
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "DSI_TE"),
+ MTK_FUNCTION(7, "DBG_MON_A25")
+ ),
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "LCM_RST"),
+ MTK_FUNCTION(7, "DBG_MON_A26")
+ ),
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(2, "UCTS0"),
+ MTK_FUNCTION(3, "UCTS1"),
+ MTK_FUNCTION(4, "IDDIG"),
+ MTK_FUNCTION(5, "SCL_6306"),
+ MTK_FUNCTION(6, "TP_UCTS1_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A27")
+ ),
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(2, "URTS0"),
+ MTK_FUNCTION(3, "URTS1"),
+ MTK_FUNCTION(4, "USB_DRVVBUS"),
+ MTK_FUNCTION(5, "SDA_6306"),
+ MTK_FUNCTION(6, "TP_URTS1_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A28")
+ ),
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "SCL5"),
+ MTK_FUNCTION(7, "DBG_MON_A29")
+ ),
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "SDA5"),
+ MTK_FUNCTION(7, "DBG_MON_A30")
+ ),
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "SCL3"),
+ MTK_FUNCTION(2, "URXD1"),
+ MTK_FUNCTION(3, "MD_URXD1"),
+ MTK_FUNCTION(4, "SSPM_URXD_AO"),
+ MTK_FUNCTION(5, "IDDIG"),
+ MTK_FUNCTION(6, "TP_URXD1_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A31")
+ ),
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "SDA3"),
+ MTK_FUNCTION(2, "UTXD1"),
+ MTK_FUNCTION(3, "MD_UTXD1"),
+ MTK_FUNCTION(4, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(5, "USB_DRVVBUS"),
+ MTK_FUNCTION(6, "TP_UTXD1_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A32")
+ ),
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(0, 52),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "BPI_BUS15")
+ ),
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(0, 53),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "BPI_BUS13")
+ ),
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(0, 54),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "BPI_BUS12")
+ ),
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(0, 55),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "BPI_BUS8")
+ ),
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(0, 56),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "BPI_BUS9"),
+ MTK_FUNCTION(2, "SCL_6306")
+ ),
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(0, 57),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "BPI_BUS10"),
+ MTK_FUNCTION(2, "SDA_6306")
+ ),
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(0, 58),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "RFIC0_BSI_D2")
+ ),
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(0, 59),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "RFIC0_BSI_D1")
+ ),
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "RFIC0_BSI_D0")
+ ),
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "MIPI1_SDATA")
+ ),
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "MIPI1_SCLK")
+ ),
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "MIPI0_SDATA")
+ ),
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "MIPI0_SCLK")
+ ),
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "MIPI3_SDATA"),
+ MTK_FUNCTION(2, "BPI_BUS16")
+ ),
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(0, 66),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "MIPI3_SCLK"),
+ MTK_FUNCTION(2, "BPI_BUS17")
+ ),
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(0, 67),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "MIPI2_SDATA")
+ ),
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(0, 68),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "MIPI2_SCLK")
+ ),
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(0, 69),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "BPI_BUS7")
+ ),
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "BPI_BUS6")
+ ),
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "BPI_BUS5")
+ ),
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "BPI_BUS4")
+ ),
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "BPI_BUS3")
+ ),
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "BPI_BUS2")
+ ),
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(0, 75),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "BPI_BUS1")
+ ),
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(0, 76),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "BPI_BUS0")
+ ),
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(0, 77),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "BPI_BUS14")
+ ),
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(0, 78),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "BPI_BUS11")
+ ),
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "BPI_PA_VM1"),
+ MTK_FUNCTION(2, "MIPI4_SDATA")
+ ),
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "BPI_PA_VM0"),
+ MTK_FUNCTION(2, "MIPI4_SCLK")
+ ),
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "SDA1"),
+ MTK_FUNCTION(7, "DBG_MON_B0")
+ ),
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "SDA0"),
+ MTK_FUNCTION(7, "DBG_MON_B1")
+ ),
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "SCL0"),
+ MTK_FUNCTION(7, "DBG_MON_B2")
+ ),
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "SCL1"),
+ MTK_FUNCTION(7, "DBG_MON_B3")
+ ),
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "RFIC0_BSI_EN")
+ ),
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "RFIC0_BSI_CK")
+ ),
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(2, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(3, "CMVREF0"),
+ MTK_FUNCTION(4, "MD_URXD0"),
+ MTK_FUNCTION(5, "AGPS_SYNC"),
+ MTK_FUNCTION(6, "EXT_FRAME_SYNC")
+ ),
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "CMMCLK3"),
+ MTK_FUNCTION(2, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(3, "CMVREF1"),
+ MTK_FUNCTION(4, "MD_UTXD0"),
+ MTK_FUNCTION(5, "AGPS_SYNC"),
+ MTK_FUNCTION(6, "DVFSRC_EXT_REQ")
+ ),
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "PWM2"),
+ MTK_FUNCTION(3, "MD_INT0"),
+ MTK_FUNCTION(4, "USB_DRVVBUS"),
+ MTK_FUNCTION(5, "SCL_6306"),
+ MTK_FUNCTION(6, "TP_GPIO4_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B21")
+ ),
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "PWM0"),
+ MTK_FUNCTION(3, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(4, "ANT_SEL4"),
+ MTK_FUNCTION(5, "USB_DRVVBUS"),
+ MTK_FUNCTION(6, "I2S2_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B4")
+ ),
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "KPROW1"),
+ MTK_FUNCTION(2, "PWM2"),
+ MTK_FUNCTION(3, "MD_INT0"),
+ MTK_FUNCTION(4, "ANT_SEL5"),
+ MTK_FUNCTION(5, "IDDIG"),
+ MTK_FUNCTION(6, "I2S2_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B5")
+ ),
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "KPROW0"),
+ MTK_FUNCTION(5, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(6, "I2S2_DI"),
+ MTK_FUNCTION(7, "DBG_MON_B6")
+ ),
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "KPCOL0"),
+ MTK_FUNCTION(7, "DBG_MON_B7")
+ ),
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "KPCOL1"),
+ MTK_FUNCTION(5, "CMFLASH"),
+ MTK_FUNCTION(6, "CMVREF0"),
+ MTK_FUNCTION(7, "DBG_MON_B8")
+ ),
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "UTXD0"),
+ MTK_FUNCTION(3, "MD_URXD0"),
+ MTK_FUNCTION(4, "PTA_RXD"),
+ MTK_FUNCTION(5, "SSPM_URXD_AO"),
+ MTK_FUNCTION(6, "WIFI_RXD")
+ ),
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "URXD0"),
+ MTK_FUNCTION(3, "MD_UTXD0"),
+ MTK_FUNCTION(4, "PTA_TXD"),
+ MTK_FUNCTION(5, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(6, "WIFI_TXD")
+ ),
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "UCTS0"),
+ MTK_FUNCTION(2, "I2S1_MCK"),
+ MTK_FUNCTION(3, "CONN_MCU_TDO"),
+ MTK_FUNCTION(4, "SPI5_MI"),
+ MTK_FUNCTION(5, "SCL_6306"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TDO"),
+ MTK_FUNCTION(7, "DBG_MON_B15")
+ ),
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "URTS0"),
+ MTK_FUNCTION(2, "I2S1_BCK"),
+ MTK_FUNCTION(3, "CONN_MCU_TMS"),
+ MTK_FUNCTION(4, "SPI5_CSB"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TMS"),
+ MTK_FUNCTION(7, "DBG_MON_B16")
+ ),
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(0, 99),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "CMMCLK0"),
+ MTK_FUNCTION(4, "AUXIF_CLK"),
+ MTK_FUNCTION(5, "PTA_RXD"),
+ MTK_FUNCTION(6, "CONN_UART0_RXD"),
+ MTK_FUNCTION(7, "DBG_MON_B17")
+ ),
+
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(0, 100),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "CMMCLK1"),
+ MTK_FUNCTION(4, "AUXIF_ST"),
+ MTK_FUNCTION(5, "PTA_TXD"),
+ MTK_FUNCTION(6, "CONN_UART0_TXD"),
+ MTK_FUNCTION(7, "DBG_MON_B18")
+ ),
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(0, 101),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "CMFLASH"),
+ MTK_FUNCTION(2, "I2S1_LRCK"),
+ MTK_FUNCTION(3, "CONN_MCU_TCK"),
+ MTK_FUNCTION(4, "SPI5_MO"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TCK"),
+ MTK_FUNCTION(7, "DBG_MON_B19")
+ ),
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(0, 102),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "CMVREF0"),
+ MTK_FUNCTION(2, "I2S1_DO"),
+ MTK_FUNCTION(3, "CONN_MCU_TDI"),
+ MTK_FUNCTION(4, "SPI5_CLK"),
+ MTK_FUNCTION(5, "AGPS_SYNC"),
+ MTK_FUNCTION(6, "MCUPM_JTAG_TDI"),
+ MTK_FUNCTION(7, "DBG_MON_B20")
+ ),
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "SCL2"),
+ MTK_FUNCTION(2, "TP_UTXD1_AO"),
+ MTK_FUNCTION(3, "MD_UTXD0"),
+ MTK_FUNCTION(4, "MD_UTXD1"),
+ MTK_FUNCTION(5, "TP_URTS2_AO"),
+ MTK_FUNCTION(6, "WIFI_TXD"),
+ MTK_FUNCTION(7, "DBG_MON_B25")
+ ),
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(0, 104),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "SDA2"),
+ MTK_FUNCTION(2, "TP_URXD1_AO"),
+ MTK_FUNCTION(3, "MD_URXD0"),
+ MTK_FUNCTION(4, "MD_URXD1"),
+ MTK_FUNCTION(5, "TP_UCTS2_AO"),
+ MTK_FUNCTION(6, "WIFI_RXD"),
+ MTK_FUNCTION(7, "DBG_MON_B26")
+ ),
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(0, 105),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "SCL4"),
+ MTK_FUNCTION(3, "MD_UTXD1"),
+ MTK_FUNCTION(4, "MD_UTXD0"),
+ MTK_FUNCTION(5, "TP_UTXD2_AO"),
+ MTK_FUNCTION(6, "PTA_TXD"),
+ MTK_FUNCTION(7, "DBG_MON_B27")
+ ),
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "SDA4"),
+ MTK_FUNCTION(3, "MD_URXD1"),
+ MTK_FUNCTION(4, "MD_URXD0"),
+ MTK_FUNCTION(5, "TP_URXD2_AO"),
+ MTK_FUNCTION(6, "PTA_RXD"),
+ MTK_FUNCTION(7, "DBG_MON_B28")
+ ),
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "MD_UTXD0"),
+ MTK_FUNCTION(3, "SDA_6306"),
+ MTK_FUNCTION(4, "KPCOL3"),
+ MTK_FUNCTION(5, "CMVREF0"),
+ MTK_FUNCTION(6, "URTS0"),
+ MTK_FUNCTION(7, "DBG_MON_B29")
+ ),
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "CMMCLK2"),
+ MTK_FUNCTION(2, "MD_INT0"),
+ MTK_FUNCTION(3, "CONN_MCU_DBGACK_N"),
+ MTK_FUNCTION(4, "KPCOL4"),
+ MTK_FUNCTION(6, "I2S3_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B30")
+ ),
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "MD_URXD0"),
+ MTK_FUNCTION(3, "ANT_SEL7"),
+ MTK_FUNCTION(4, "KPCOL5"),
+ MTK_FUNCTION(5, "CMVREF1"),
+ MTK_FUNCTION(6, "UCTS0"),
+ MTK_FUNCTION(7, "DBG_MON_B31")
+ ),
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "ANT_SEL0"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "PWM3"),
+ MTK_FUNCTION(4, "MD_INT0"),
+ MTK_FUNCTION(5, "IDDIG"),
+ MTK_FUNCTION(6, "I2S3_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B13")
+ ),
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "ANT_SEL1"),
+ MTK_FUNCTION(2, "CLKM1"),
+ MTK_FUNCTION(3, "PWM4"),
+ MTK_FUNCTION(4, "PTA_RXD"),
+ MTK_FUNCTION(5, "CMVREF0"),
+ MTK_FUNCTION(6, "I2S3_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B14")
+ ),
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "ANT_SEL2"),
+ MTK_FUNCTION(2, "CLKM2"),
+ MTK_FUNCTION(3, "PWM5"),
+ MTK_FUNCTION(4, "PTA_TXD"),
+ MTK_FUNCTION(5, "CMVREF1"),
+ MTK_FUNCTION(6, "I2S3_DO")
+ ),
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "CONN_TOP_CLK")
+ ),
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "CONN_TOP_DATA")
+ ),
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "CONN_BT_CLK")
+ ),
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "CONN_BT_DATA")
+ ),
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL0")
+ ),
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL1")
+ ),
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL2")
+ ),
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "CONN_WB_PTA")
+ ),
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "CONN_HRST_B")
+ ),
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "MSDC0_CMD"),
+ MTK_FUNCTION(2, "MSDC0_CMD")
+ ),
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "MSDC0_DAT0"),
+ MTK_FUNCTION(2, "MSDC0_DAT4")
+ ),
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "MSDC0_CLK"),
+ MTK_FUNCTION(2, "MSDC0_CLK")
+ ),
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "MSDC0_DAT2"),
+ MTK_FUNCTION(2, "MSDC0_DAT5")
+ ),
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "MSDC0_DAT4"),
+ MTK_FUNCTION(2, "MSDC0_DAT2")
+ ),
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "MSDC0_DAT6"),
+ MTK_FUNCTION(2, "MSDC0_DAT1")
+ ),
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "MSDC0_DAT1"),
+ MTK_FUNCTION(2, "MSDC0_DAT6")
+ ),
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "MSDC0_DAT5"),
+ MTK_FUNCTION(2, "MSDC0_DAT0")
+ ),
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "MSDC0_DAT7"),
+ MTK_FUNCTION(2, "MSDC0_DAT7")
+ ),
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "MSDC0_DSL"),
+ MTK_FUNCTION(2, "MSDC0_DSL")
+ ),
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "MSDC0_DAT3"),
+ MTK_FUNCTION(2, "MSDC0_DAT3")
+ ),
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "MSDC0_RSTB"),
+ MTK_FUNCTION(2, "MSDC0_RSTB")
+ ),
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "WATCHDOG")
+ ),
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "AUD_CLK_MOSI"),
+ MTK_FUNCTION(2, "AUD_CLK_MISO"),
+ MTK_FUNCTION(3, "I2S1_MCK")
+ ),
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "AUD_SYNC_MOSI"),
+ MTK_FUNCTION(2, "AUD_SYNC_MISO"),
+ MTK_FUNCTION(3, "I2S1_BCK")
+ ),
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI0"),
+ MTK_FUNCTION(2, "AUD_DAT_MISO0"),
+ MTK_FUNCTION(3, "I2S1_LRCK")
+ ),
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI1"),
+ MTK_FUNCTION(2, "AUD_DAT_MISO1"),
+ MTK_FUNCTION(3, "I2S1_DO")
+ ),
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "AUD_CLK_MISO"),
+ MTK_FUNCTION(2, "AUD_CLK_MOSI"),
+ MTK_FUNCTION(3, "I2S2_MCK")
+ ),
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "AUD_SYNC_MISO"),
+ MTK_FUNCTION(2, "AUD_SYNC_MOSI"),
+ MTK_FUNCTION(3, "I2S2_BCK")
+ ),
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO0"),
+ MTK_FUNCTION(2, "AUD_DAT_MOSI0"),
+ MTK_FUNCTION(3, "I2S2_LRCK")
+ ),
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO1"),
+ MTK_FUNCTION(2, "AUD_DAT_MOSI1"),
+ MTK_FUNCTION(3, "I2S2_DI")
+ ),
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO144"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MO")
+ ),
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO145"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
+ ),
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO146"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MI")
+ ),
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO147"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CK")
+ ),
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO148"),
+ MTK_FUNCTION(1, "SRCLKENA0")
+ ),
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO149"),
+ MTK_FUNCTION(1, "SRCLKENA1")
+ ),
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO150"),
+ MTK_FUNCTION(1, "PWM0"),
+ MTK_FUNCTION(2, "CMFLASH"),
+ MTK_FUNCTION(3, "ANT_SEL3"),
+ MTK_FUNCTION(5, "MD_URXD0"),
+ MTK_FUNCTION(6, "TP_URXD2_AO")
+ ),
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO151"),
+ MTK_FUNCTION(1, "PWM1"),
+ MTK_FUNCTION(2, "CMVREF0"),
+ MTK_FUNCTION(3, "ANT_SEL4"),
+ MTK_FUNCTION(5, "MD_UTXD0"),
+ MTK_FUNCTION(6, "TP_UTXD2_AO")
+ ),
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO152"),
+ MTK_FUNCTION(1, "PWM2"),
+ MTK_FUNCTION(2, "CMVREF1"),
+ MTK_FUNCTION(3, "ANT_SEL5"),
+ MTK_FUNCTION(5, "MD_URXD1"),
+ MTK_FUNCTION(6, "TP_UCTS1_AO")
+ ),
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO153"),
+ MTK_FUNCTION(1, "PWM3"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "ANT_SEL6"),
+ MTK_FUNCTION(5, "MD_UTXD1"),
+ MTK_FUNCTION(6, "TP_URTS1_AO")
+ ),
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO154"),
+ MTK_FUNCTION(1, "PWM5"),
+ MTK_FUNCTION(2, "CLKM2"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(5, "PTA_TXD"),
+ MTK_FUNCTION(6, "CONN_UART0_TXD")
+ ),
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO155"),
+ MTK_FUNCTION(1, "SPI0_MI"),
+ MTK_FUNCTION(2, "IDDIG"),
+ MTK_FUNCTION(3, "AGPS_SYNC"),
+ MTK_FUNCTION(4, "TP_GPIO0_AO"),
+ MTK_FUNCTION(5, "MFG_JTAG_TDO"),
+ MTK_FUNCTION(6, "DFD_TDO"),
+ MTK_FUNCTION(7, "JTDO_SEL1")
+ ),
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO156"),
+ MTK_FUNCTION(1, "SPI0_CSB"),
+ MTK_FUNCTION(2, "USB_DRVVBUS"),
+ MTK_FUNCTION(3, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(4, "TP_GPIO1_AO"),
+ MTK_FUNCTION(5, "MFG_JTAG_TMS"),
+ MTK_FUNCTION(6, "DFD_TMS"),
+ MTK_FUNCTION(7, "JTMS_SEL1")
+ ),
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO157"),
+ MTK_FUNCTION(1, "SPI0_MO"),
+ MTK_FUNCTION(2, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(3, "CLKM0"),
+ MTK_FUNCTION(4, "TP_GPIO2_AO"),
+ MTK_FUNCTION(5, "MFG_JTAG_TDI"),
+ MTK_FUNCTION(6, "DFD_TDI"),
+ MTK_FUNCTION(7, "JTDI_SEL1")
+ ),
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO158"),
+ MTK_FUNCTION(1, "SPI0_CLK"),
+ MTK_FUNCTION(2, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "TP_GPIO3_AO"),
+ MTK_FUNCTION(5, "MFG_JTAG_TCK"),
+ MTK_FUNCTION(6, "DFD_TCK_XI"),
+ MTK_FUNCTION(7, "JTCK_SEL1")
+ ),
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO159"),
+ MTK_FUNCTION(1, "PWM4"),
+ MTK_FUNCTION(2, "CLKM1"),
+ MTK_FUNCTION(3, "ANT_SEL7"),
+ MTK_FUNCTION(5, "PTA_RXD"),
+ MTK_FUNCTION(6, "CONN_UART0_RXD")
+ ),
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO160"),
+ MTK_FUNCTION(1, "CLKM0"),
+ MTK_FUNCTION(2, "PWM2"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "TP_GPIO5_AO"),
+ MTK_FUNCTION(5, "AGPS_SYNC"),
+ MTK_FUNCTION(6, "DVFSRC_EXT_REQ")
+ ),
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO161"),
+ MTK_FUNCTION(1, "SCL6"),
+ MTK_FUNCTION(2, "SCL_6306"),
+ MTK_FUNCTION(3, "TP_GPIO6_AO"),
+ MTK_FUNCTION(4, "KPCOL6"),
+ MTK_FUNCTION(5, "PTA_RXD"),
+ MTK_FUNCTION(6, "CONN_UART0_RXD")
+ ),
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO162"),
+ MTK_FUNCTION(1, "SDA6"),
+ MTK_FUNCTION(2, "SDA_6306"),
+ MTK_FUNCTION(3, "TP_GPIO7_AO"),
+ MTK_FUNCTION(4, "KPCOL7"),
+ MTK_FUNCTION(5, "PTA_TXD"),
+ MTK_FUNCTION(6, "CONN_UART0_TXD")
+ ),
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO163")
+ ),
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO164")
+ ),
+ MTK_PIN(
+ 165, "GPIO165",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO165")
+ ),
+ MTK_PIN(
+ 166, "GPIO166",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO166")
+ ),
+ MTK_PIN(
+ 167, "GPIO167",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO167")
+ ),
+ MTK_PIN(
+ 168, "GPIO168",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO168")
+ ),
+ MTK_PIN(
+ 169, "GPIO169",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO169")
+ ),
+ MTK_PIN(
+ 170, "GPIO170",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO170")
+ ),
+ MTK_PIN(
+ 171, "GPIO171",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO171")
+ ),
+ MTK_PIN(
+ 172, "GPIO172",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO172")
+ ),
+ MTK_PIN(
+ 173, "GPIO173",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO173")
+ ),
+ MTK_PIN(
+ 174, "GPIO174",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO174")
+ ),
+ MTK_PIN(
+ 175, "GPIO175",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO175")
+ ),
+ MTK_PIN(
+ 176, "GPIO176",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO176")
+ ),
+ MTK_PIN(
+ 177, "GPIO177",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO177")
+ ),
+ MTK_PIN(
+ 178, "GPIO178",
+ MTK_EINT_FUNCTION(0, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO178")
+ ),
+ MTK_PIN(
+ 179, "GPIO179",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO179")
+ ),
+};
+
+#endif /* __PINCTRL_MTK_MT6765_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8183.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8183.h
new file mode 100644
index 000000000000..79adf5b8a186
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8183.h
@@ -0,0 +1,1916 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#ifndef __PINCTRL_MTK_MT8183_H
+#define __PINCTRL_MTK_MT8183_H
+
+#include "pinctrl-paris.h"
+
+static struct mtk_pin_desc mtk_pins_mt8183[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "MRG_SYNC"),
+ MTK_FUNCTION(2, "PCM0_SYNC"),
+ MTK_FUNCTION(3, "TP_GPIO0_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "SCP_SPI2_CS"),
+ MTK_FUNCTION(6, "I2S3_MCK"),
+ MTK_FUNCTION(7, "SPI2_CSB")
+ ),
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "MRG_CLK"),
+ MTK_FUNCTION(2, "PCM0_CLK"),
+ MTK_FUNCTION(3, "TP_GPIO1_AO"),
+ MTK_FUNCTION(4, "CLKM3"),
+ MTK_FUNCTION(5, "SCP_SPI2_MO"),
+ MTK_FUNCTION(6, "I2S3_BCK"),
+ MTK_FUNCTION(7, "SPI2_MO")
+ ),
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "MRG_DO"),
+ MTK_FUNCTION(2, "PCM0_DO"),
+ MTK_FUNCTION(3, "TP_GPIO2_AO"),
+ MTK_FUNCTION(4, "SCL6"),
+ MTK_FUNCTION(5, "SCP_SPI2_CK"),
+ MTK_FUNCTION(6, "I2S3_LRCK"),
+ MTK_FUNCTION(7, "SPI2_CLK")
+ ),
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "MRG_DI"),
+ MTK_FUNCTION(2, "PCM0_DI"),
+ MTK_FUNCTION(3, "TP_GPIO3_AO"),
+ MTK_FUNCTION(4, "SDA6"),
+ MTK_FUNCTION(5, "TDM_MCK"),
+ MTK_FUNCTION(6, "I2S3_DO"),
+ MTK_FUNCTION(7, "SCP_VREQ_VAO")
+ ),
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "PWM_B"),
+ MTK_FUNCTION(2, "I2S0_MCK"),
+ MTK_FUNCTION(3, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(4, "MD_URXD1"),
+ MTK_FUNCTION(5, "TDM_BCK"),
+ MTK_FUNCTION(6, "TP_GPIO4_AO"),
+ MTK_FUNCTION(7, "DAP_MD32_SWD")
+ ),
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "PWM_C"),
+ MTK_FUNCTION(2, "I2S0_BCK"),
+ MTK_FUNCTION(3, "SSPM_URXD_AO"),
+ MTK_FUNCTION(4, "MD_UTXD1"),
+ MTK_FUNCTION(5, "TDM_LRCK"),
+ MTK_FUNCTION(6, "TP_GPIO5_AO"),
+ MTK_FUNCTION(7, "DAP_MD32_SWCK")
+ ),
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "PWM_A"),
+ MTK_FUNCTION(2, "I2S0_LRCK"),
+ MTK_FUNCTION(3, "IDDIG"),
+ MTK_FUNCTION(4, "MD_URXD0"),
+ MTK_FUNCTION(5, "TDM_DATA0"),
+ MTK_FUNCTION(6, "TP_GPIO6_AO"),
+ MTK_FUNCTION(7, "CMFLASH")
+ ),
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "SPI1_B_MI"),
+ MTK_FUNCTION(2, "I2S0_DI"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "MD_UTXD0"),
+ MTK_FUNCTION(5, "TDM_DATA1"),
+ MTK_FUNCTION(6, "TP_GPIO7_AO"),
+ MTK_FUNCTION(7, "DVFSRC_EXT_REQ")
+ ),
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "SPI1_B_CSB"),
+ MTK_FUNCTION(2, "ANT_SEL3"),
+ MTK_FUNCTION(3, "SCL7"),
+ MTK_FUNCTION(4, "CONN_MCU_TRST_B"),
+ MTK_FUNCTION(5, "TDM_DATA2"),
+ MTK_FUNCTION(6, "MD_INT0"),
+ MTK_FUNCTION(7, "JTRSTN_SEL1")
+ ),
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "SPI1_B_MO"),
+ MTK_FUNCTION(2, "ANT_SEL4"),
+ MTK_FUNCTION(3, "CMMCLK2"),
+ MTK_FUNCTION(4, "CONN_MCU_DBGACK_N"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TRSTN"),
+ MTK_FUNCTION(6, "IO_JTAG_TRSTN"),
+ MTK_FUNCTION(7, "DBG_MON_B10")
+ ),
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "SPI1_B_CLK"),
+ MTK_FUNCTION(2, "ANT_SEL5"),
+ MTK_FUNCTION(3, "CMMCLK3"),
+ MTK_FUNCTION(4, "CONN_MCU_DBGI_N"),
+ MTK_FUNCTION(5, "TDM_DATA3"),
+ MTK_FUNCTION(6, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(7, "DBG_MON_B11")
+ ),
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "TP_URXD1_AO"),
+ MTK_FUNCTION(2, "IDDIG"),
+ MTK_FUNCTION(3, "SCL6"),
+ MTK_FUNCTION(4, "UCTS1"),
+ MTK_FUNCTION(5, "UCTS0"),
+ MTK_FUNCTION(6, "SRCLKENAI1"),
+ MTK_FUNCTION(7, "I2S5_MCK")
+ ),
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "TP_UTXD1_AO"),
+ MTK_FUNCTION(2, "USB_DRVVBUS"),
+ MTK_FUNCTION(3, "SDA6"),
+ MTK_FUNCTION(4, "URTS1"),
+ MTK_FUNCTION(5, "URTS0"),
+ MTK_FUNCTION(6, "I2S2_DI2"),
+ MTK_FUNCTION(7, "I2S5_BCK")
+ ),
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "DBPI_D0"),
+ MTK_FUNCTION(2, "SPI5_MI"),
+ MTK_FUNCTION(3, "PCM0_SYNC"),
+ MTK_FUNCTION(4, "MD_URXD0"),
+ MTK_FUNCTION(5, "ANT_SEL3"),
+ MTK_FUNCTION(6, "I2S0_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B15")
+ ),
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "DBPI_D1"),
+ MTK_FUNCTION(2, "SPI5_CSB"),
+ MTK_FUNCTION(3, "PCM0_CLK"),
+ MTK_FUNCTION(4, "MD_UTXD0"),
+ MTK_FUNCTION(5, "ANT_SEL4"),
+ MTK_FUNCTION(6, "I2S0_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B16")
+ ),
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "DBPI_D2"),
+ MTK_FUNCTION(2, "SPI5_MO"),
+ MTK_FUNCTION(3, "PCM0_DO"),
+ MTK_FUNCTION(4, "MD_URXD1"),
+ MTK_FUNCTION(5, "ANT_SEL5"),
+ MTK_FUNCTION(6, "I2S0_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B17")
+ ),
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "DBPI_D3"),
+ MTK_FUNCTION(2, "SPI5_CLK"),
+ MTK_FUNCTION(3, "PCM0_DI"),
+ MTK_FUNCTION(4, "MD_UTXD1"),
+ MTK_FUNCTION(5, "ANT_SEL6"),
+ MTK_FUNCTION(6, "I2S0_DI"),
+ MTK_FUNCTION(7, "DBG_MON_B23")
+ ),
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "DBPI_D4"),
+ MTK_FUNCTION(2, "SPI4_MI"),
+ MTK_FUNCTION(3, "CONN_MCU_TRST_B"),
+ MTK_FUNCTION(4, "MD_INT0"),
+ MTK_FUNCTION(5, "ANT_SEL7"),
+ MTK_FUNCTION(6, "I2S3_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A1")
+ ),
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "DBPI_D5"),
+ MTK_FUNCTION(2, "SPI4_CSB"),
+ MTK_FUNCTION(3, "CONN_MCU_DBGI_N"),
+ MTK_FUNCTION(4, "MD_INT0"),
+ MTK_FUNCTION(5, "SCP_VREQ_VAO"),
+ MTK_FUNCTION(6, "I2S3_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A2")
+ ),
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "DBPI_D6"),
+ MTK_FUNCTION(2, "SPI4_MO"),
+ MTK_FUNCTION(3, "CONN_MCU_TDO"),
+ MTK_FUNCTION(4, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(5, "URXD1"),
+ MTK_FUNCTION(6, "I2S3_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A3")
+ ),
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "DBPI_D7"),
+ MTK_FUNCTION(2, "SPI4_CLK"),
+ MTK_FUNCTION(3, "CONN_MCU_DBGACK_N"),
+ MTK_FUNCTION(4, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(5, "UTXD1"),
+ MTK_FUNCTION(6, "I2S3_DO"),
+ MTK_FUNCTION(7, "DBG_MON_A19")
+ ),
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "DBPI_D8"),
+ MTK_FUNCTION(2, "SPI3_MI"),
+ MTK_FUNCTION(3, "CONN_MCU_TMS"),
+ MTK_FUNCTION(4, "DAP_MD32_SWD"),
+ MTK_FUNCTION(5, "CONN_MCU_AICE_TMSC"),
+ MTK_FUNCTION(6, "I2S2_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B5")
+ ),
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "DBPI_D9"),
+ MTK_FUNCTION(2, "SPI3_CSB"),
+ MTK_FUNCTION(3, "CONN_MCU_TCK"),
+ MTK_FUNCTION(4, "DAP_MD32_SWCK"),
+ MTK_FUNCTION(5, "CONN_MCU_AICE_TCKC"),
+ MTK_FUNCTION(6, "I2S2_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B6")
+ ),
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "DBPI_D10"),
+ MTK_FUNCTION(2, "SPI3_MO"),
+ MTK_FUNCTION(3, "CONN_MCU_TDI"),
+ MTK_FUNCTION(4, "UCTS1"),
+ MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(6, "I2S2_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B7")
+ ),
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "DBPI_D11"),
+ MTK_FUNCTION(2, "SPI3_CLK"),
+ MTK_FUNCTION(3, "SRCLKENAI0"),
+ MTK_FUNCTION(4, "URTS1"),
+ MTK_FUNCTION(5, "IO_JTAG_TCK"),
+ MTK_FUNCTION(6, "I2S2_DI"),
+ MTK_FUNCTION(7, "DBG_MON_B31")
+ ),
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "DBPI_HSYNC"),
+ MTK_FUNCTION(2, "ANT_SEL0"),
+ MTK_FUNCTION(3, "SCL6"),
+ MTK_FUNCTION(4, "KPCOL2"),
+ MTK_FUNCTION(5, "IO_JTAG_TMS"),
+ MTK_FUNCTION(6, "I2S1_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B0")
+ ),
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "DBPI_VSYNC"),
+ MTK_FUNCTION(2, "ANT_SEL1"),
+ MTK_FUNCTION(3, "SDA6"),
+ MTK_FUNCTION(4, "KPROW2"),
+ MTK_FUNCTION(5, "IO_JTAG_TDI"),
+ MTK_FUNCTION(6, "I2S1_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B1")
+ ),
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "DBPI_DE"),
+ MTK_FUNCTION(2, "ANT_SEL2"),
+ MTK_FUNCTION(3, "SCL7"),
+ MTK_FUNCTION(4, "DMIC_CLK"),
+ MTK_FUNCTION(5, "IO_JTAG_TDO"),
+ MTK_FUNCTION(6, "I2S1_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B9")
+ ),
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "DBPI_CK"),
+ MTK_FUNCTION(2, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(3, "SDA7"),
+ MTK_FUNCTION(4, "DMIC_DAT"),
+ MTK_FUNCTION(5, "IO_JTAG_TRSTN"),
+ MTK_FUNCTION(6, "I2S1_DO"),
+ MTK_FUNCTION(7, "DBG_MON_B32")
+ ),
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "IO_JTAG_TCK"),
+ MTK_FUNCTION(3, "UDI_TCK"),
+ MTK_FUNCTION(4, "CONN_DSP_JCK"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TCK"),
+ MTK_FUNCTION(6, "PCM1_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_A6")
+ ),
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(2, "DAP_MD32_SWD"),
+ MTK_FUNCTION(3, "CONN_MCU_AICE_TMSC"),
+ MTK_FUNCTION(4, "CONN_DSP_JINTP"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TRSTN"),
+ MTK_FUNCTION(6, "PCM1_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A7")
+ ),
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(2, "IO_JTAG_TMS"),
+ MTK_FUNCTION(3, "UDI_TMS"),
+ MTK_FUNCTION(4, "CONN_DSP_JMS"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TMS"),
+ MTK_FUNCTION(6, "PCM1_SYNC"),
+ MTK_FUNCTION(7, "DBG_MON_A8")
+ ),
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "IO_JTAG_TDI"),
+ MTK_FUNCTION(3, "UDI_TDI"),
+ MTK_FUNCTION(4, "CONN_DSP_JDI"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TDI"),
+ MTK_FUNCTION(6, "PCM1_DO0"),
+ MTK_FUNCTION(7, "DBG_MON_A9")
+ ),
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "IO_JTAG_TRSTN"),
+ MTK_FUNCTION(3, "UDI_NTRST"),
+ MTK_FUNCTION(4, "DAP_MD32_SWCK"),
+ MTK_FUNCTION(5, "CONN_MCU_AICE_TCKC"),
+ MTK_FUNCTION(6, "PCM1_DO2"),
+ MTK_FUNCTION(7, "DBG_MON_A10")
+ ),
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "IO_JTAG_TDO"),
+ MTK_FUNCTION(3, "UDI_TDO"),
+ MTK_FUNCTION(4, "CONN_DSP_JDO"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TDO"),
+ MTK_FUNCTION(6, "PCM1_DO1"),
+ MTK_FUNCTION(7, "DBG_MON_A11")
+ ),
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(2, "CCU_JTAG_TDO"),
+ MTK_FUNCTION(3, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(5, "SCP_JTAG_TDO"),
+ MTK_FUNCTION(6, "CONN_DSP_JMS"),
+ MTK_FUNCTION(7, "DBG_MON_A28")
+ ),
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(2, "CCU_JTAG_TMS"),
+ MTK_FUNCTION(3, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(4, "CONN_MCU_AICE_TMSC"),
+ MTK_FUNCTION(5, "SCP_JTAG_TMS"),
+ MTK_FUNCTION(6, "CONN_DSP_JINTP"),
+ MTK_FUNCTION(7, "DBG_MON_A29")
+ ),
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(2, "CCU_JTAG_TDI"),
+ MTK_FUNCTION(3, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(5, "SCP_JTAG_TDI"),
+ MTK_FUNCTION(6, "CONN_DSP_JDO"),
+ MTK_FUNCTION(7, "DBG_MON_A30")
+ ),
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(3, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(4, "CONN_MCU_AICE_TCKC"),
+ MTK_FUNCTION(7, "DBG_MON_A20")
+ ),
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(2, "CCU_JTAG_TCK"),
+ MTK_FUNCTION(3, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(5, "SCP_JTAG_TCK"),
+ MTK_FUNCTION(6, "CONN_DSP_JCK"),
+ MTK_FUNCTION(7, "DBG_MON_A31")
+ ),
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(2, "CCU_JTAG_TRST"),
+ MTK_FUNCTION(3, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(5, "SCP_JTAG_TRSTN"),
+ MTK_FUNCTION(6, "CONN_DSP_JDI"),
+ MTK_FUNCTION(7, "DBG_MON_A32")
+ ),
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "IDDIG"),
+ MTK_FUNCTION(2, "URXD1"),
+ MTK_FUNCTION(3, "UCTS0"),
+ MTK_FUNCTION(4, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(6, "DMIC_CLK")
+ ),
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "USB_DRVVBUS"),
+ MTK_FUNCTION(2, "UTXD1"),
+ MTK_FUNCTION(3, "URTS0"),
+ MTK_FUNCTION(4, "SSPM_URXD_AO"),
+ MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(6, "DMIC_DAT")
+ ),
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "DISP_PWM")
+ ),
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "DSI_TE")
+ ),
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "LCM_RST")
+ ),
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(2, "URXD1"),
+ MTK_FUNCTION(3, "UCTS1"),
+ MTK_FUNCTION(4, "CCU_UTXD_AO"),
+ MTK_FUNCTION(5, "TP_UCTS1_AO"),
+ MTK_FUNCTION(6, "IDDIG"),
+ MTK_FUNCTION(7, "I2S5_LRCK")
+ ),
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(2, "UTXD1"),
+ MTK_FUNCTION(3, "URTS1"),
+ MTK_FUNCTION(4, "CCU_URXD_AO"),
+ MTK_FUNCTION(5, "TP_URTS1_AO"),
+ MTK_FUNCTION(6, "USB_DRVVBUS"),
+ MTK_FUNCTION(7, "I2S5_DO")
+ ),
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "SCL5")
+ ),
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "SDA5")
+ ),
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "SCL3")
+ ),
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "SDA3")
+ ),
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(0, 52),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "BPI_ANT2")
+ ),
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(0, 53),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "BPI_ANT0")
+ ),
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(0, 54),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "BPI_OLAT1")
+ ),
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(0, 55),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "BPI_BUS8")
+ ),
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(0, 56),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "BPI_BUS9"),
+ MTK_FUNCTION(2, "SCL_6306")
+ ),
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(0, 57),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "BPI_BUS10"),
+ MTK_FUNCTION(2, "SDA_6306")
+ ),
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(0, 58),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "RFIC0_BSI_D2"),
+ MTK_FUNCTION(2, "SPM_BSI_D2"),
+ MTK_FUNCTION(3, "PWM_B")
+ ),
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(0, 59),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "RFIC0_BSI_D1"),
+ MTK_FUNCTION(2, "SPM_BSI_D1")
+ ),
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "RFIC0_BSI_D0"),
+ MTK_FUNCTION(2, "SPM_BSI_D0")
+ ),
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "MIPI1_SDATA")
+ ),
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "MIPI1_SCLK")
+ ),
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "MIPI0_SDATA")
+ ),
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "MIPI0_SCLK")
+ ),
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "MIPI3_SDATA"),
+ MTK_FUNCTION(2, "BPI_OLAT2")
+ ),
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(0, 66),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "MIPI3_SCLK"),
+ MTK_FUNCTION(2, "BPI_OLAT3")
+ ),
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(0, 67),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "MIPI2_SDATA")
+ ),
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(0, 68),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "MIPI2_SCLK")
+ ),
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(0, 69),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "BPI_BUS7")
+ ),
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "BPI_BUS6")
+ ),
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "BPI_BUS5")
+ ),
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "BPI_BUS4")
+ ),
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "BPI_BUS3")
+ ),
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "BPI_BUS2")
+ ),
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(0, 75),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "BPI_BUS1")
+ ),
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(0, 76),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "BPI_BUS0")
+ ),
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(0, 77),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "BPI_ANT1")
+ ),
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(0, 78),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "BPI_OLAT0")
+ ),
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "BPI_PA_VM1"),
+ MTK_FUNCTION(2, "MIPI4_SDATA")
+ ),
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "BPI_PA_VM0"),
+ MTK_FUNCTION(2, "MIPI4_SCLK")
+ ),
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "SDA1")
+ ),
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "SDA0")
+ ),
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "SCL0")
+ ),
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "SCL1")
+ ),
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "SPI0_MI"),
+ MTK_FUNCTION(2, "SCP_SPI0_MI"),
+ MTK_FUNCTION(3, "CLKM3"),
+ MTK_FUNCTION(4, "I2S1_BCK"),
+ MTK_FUNCTION(5, "MFG_DFD_JTAG_TDO"),
+ MTK_FUNCTION(6, "DFD_TDO"),
+ MTK_FUNCTION(7, "JTDO_SEL1")
+ ),
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "SPI0_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI0_CS"),
+ MTK_FUNCTION(3, "CLKM0"),
+ MTK_FUNCTION(4, "I2S1_LRCK"),
+ MTK_FUNCTION(5, "MFG_DFD_JTAG_TMS"),
+ MTK_FUNCTION(6, "DFD_TMS"),
+ MTK_FUNCTION(7, "JTMS_SEL1")
+ ),
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "SPI0_MO"),
+ MTK_FUNCTION(2, "SCP_SPI0_MO"),
+ MTK_FUNCTION(3, "SDA1"),
+ MTK_FUNCTION(4, "I2S1_DO"),
+ MTK_FUNCTION(5, "MFG_DFD_JTAG_TDI"),
+ MTK_FUNCTION(6, "DFD_TDI"),
+ MTK_FUNCTION(7, "JTDI_SEL1")
+ ),
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "SPI0_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI0_CK"),
+ MTK_FUNCTION(3, "SCL1"),
+ MTK_FUNCTION(4, "I2S1_MCK"),
+ MTK_FUNCTION(5, "MFG_DFD_JTAG_TCK"),
+ MTK_FUNCTION(6, "DFD_TCK_XI"),
+ MTK_FUNCTION(7, "JTCK_SEL1")
+ ),
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "PWM_C"),
+ MTK_FUNCTION(3, "I2S5_BCK"),
+ MTK_FUNCTION(4, "ANT_SEL6"),
+ MTK_FUNCTION(5, "SDA8"),
+ MTK_FUNCTION(6, "CMVREF0"),
+ MTK_FUNCTION(7, "DBG_MON_A21")
+ ),
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "PWM_A"),
+ MTK_FUNCTION(2, "CMMCLK2"),
+ MTK_FUNCTION(3, "I2S5_LRCK"),
+ MTK_FUNCTION(4, "SCP_VREQ_VAO"),
+ MTK_FUNCTION(5, "SCL8"),
+ MTK_FUNCTION(6, "PTA_RXD"),
+ MTK_FUNCTION(7, "DBG_MON_A22")
+ ),
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "KPROW1"),
+ MTK_FUNCTION(2, "PWM_B"),
+ MTK_FUNCTION(3, "I2S5_DO"),
+ MTK_FUNCTION(4, "ANT_SEL7"),
+ MTK_FUNCTION(5, "CMMCLK3"),
+ MTK_FUNCTION(6, "PTA_TXD")
+ ),
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "KPROW0")
+ ),
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "KPCOL0"),
+ MTK_FUNCTION(7, "DBG_MON_B27")
+ ),
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "KPCOL1"),
+ MTK_FUNCTION(2, "I2S2_DI2"),
+ MTK_FUNCTION(3, "I2S5_MCK"),
+ MTK_FUNCTION(4, "CMMCLK2"),
+ MTK_FUNCTION(5, "SCP_SPI2_MI"),
+ MTK_FUNCTION(6, "SRCLKENAI1"),
+ MTK_FUNCTION(7, "SPI2_MI")
+ ),
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "UTXD0"),
+ MTK_FUNCTION(3, "MD_URXD0"),
+ MTK_FUNCTION(4, "MD_URXD1"),
+ MTK_FUNCTION(5, "SSPM_URXD_AO"),
+ MTK_FUNCTION(6, "CCU_URXD_AO")
+ ),
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "URXD0"),
+ MTK_FUNCTION(3, "MD_UTXD0"),
+ MTK_FUNCTION(4, "MD_UTXD1"),
+ MTK_FUNCTION(5, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(6, "CCU_UTXD_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B2")
+ ),
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "UCTS0"),
+ MTK_FUNCTION(2, "I2S2_MCK"),
+ MTK_FUNCTION(3, "IDDIG"),
+ MTK_FUNCTION(4, "CONN_MCU_TDO"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TDO"),
+ MTK_FUNCTION(6, "IO_JTAG_TDO"),
+ MTK_FUNCTION(7, "DBG_MON_B3")
+ ),
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "URTS0"),
+ MTK_FUNCTION(2, "I2S2_BCK"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "CONN_MCU_TMS"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TMS"),
+ MTK_FUNCTION(6, "IO_JTAG_TMS"),
+ MTK_FUNCTION(7, "DBG_MON_B4")
+ ),
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(0, 99),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "CMMCLK0"),
+ MTK_FUNCTION(4, "CONN_MCU_AICE_TMSC"),
+ MTK_FUNCTION(7, "DBG_MON_B28")
+ ),
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(0, 100),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "CMMCLK1"),
+ MTK_FUNCTION(2, "PWM_C"),
+ MTK_FUNCTION(3, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(4, "CONN_MCU_AICE_TCKC"),
+ MTK_FUNCTION(7, "DBG_MON_B29")
+ ),
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(0, 101),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "CLKM2"),
+ MTK_FUNCTION(2, "I2S2_LRCK"),
+ MTK_FUNCTION(3, "CMVREF1"),
+ MTK_FUNCTION(4, "CONN_MCU_TCK"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TCK"),
+ MTK_FUNCTION(6, "IO_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(0, 102),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "CLKM1"),
+ MTK_FUNCTION(2, "I2S2_DI"),
+ MTK_FUNCTION(3, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(4, "CONN_MCU_TDI"),
+ MTK_FUNCTION(5, "SSPM_JTAG_TDI"),
+ MTK_FUNCTION(6, "IO_JTAG_TDI"),
+ MTK_FUNCTION(7, "DBG_MON_B8")
+ ),
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "SCL2")
+ ),
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(0, 104),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "SDA2")
+ ),
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(0, 105),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "SCL4")
+ ),
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "SDA4")
+ ),
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "DMIC_CLK"),
+ MTK_FUNCTION(2, "ANT_SEL0"),
+ MTK_FUNCTION(3, "CLKM0"),
+ MTK_FUNCTION(4, "SDA7"),
+ MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(6, "PWM_A"),
+ MTK_FUNCTION(7, "DBG_MON_B12")
+ ),
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "CMMCLK2"),
+ MTK_FUNCTION(2, "ANT_SEL1"),
+ MTK_FUNCTION(3, "CLKM1"),
+ MTK_FUNCTION(4, "SCL8"),
+ MTK_FUNCTION(5, "DAP_MD32_SWD"),
+ MTK_FUNCTION(6, "PWM_B"),
+ MTK_FUNCTION(7, "DBG_MON_B13")
+ ),
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "DMIC_DAT"),
+ MTK_FUNCTION(2, "ANT_SEL2"),
+ MTK_FUNCTION(3, "CLKM2"),
+ MTK_FUNCTION(4, "SDA8"),
+ MTK_FUNCTION(5, "DAP_MD32_SWCK"),
+ MTK_FUNCTION(6, "PWM_C"),
+ MTK_FUNCTION(7, "DBG_MON_B14")
+ ),
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "SCL7"),
+ MTK_FUNCTION(2, "ANT_SEL0"),
+ MTK_FUNCTION(3, "TP_URXD1_AO"),
+ MTK_FUNCTION(4, "USB_DRVVBUS"),
+ MTK_FUNCTION(5, "SRCLKENAI1"),
+ MTK_FUNCTION(6, "KPCOL2"),
+ MTK_FUNCTION(7, "URXD1")
+ ),
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "CMMCLK3"),
+ MTK_FUNCTION(2, "ANT_SEL1"),
+ MTK_FUNCTION(3, "SRCLKENAI0"),
+ MTK_FUNCTION(4, "SCP_VREQ_VAO"),
+ MTK_FUNCTION(5, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(7, "DVFSRC_EXT_REQ")
+ ),
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "SDA7"),
+ MTK_FUNCTION(2, "ANT_SEL2"),
+ MTK_FUNCTION(3, "TP_UTXD1_AO"),
+ MTK_FUNCTION(4, "IDDIG"),
+ MTK_FUNCTION(5, "AGPS_SYNC"),
+ MTK_FUNCTION(6, "KPROW2"),
+ MTK_FUNCTION(7, "UTXD1")
+ ),
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, 113),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "CONN_TOP_CLK"),
+ MTK_FUNCTION(3, "SCL6"),
+ MTK_FUNCTION(4, "AUXIF_CLK0"),
+ MTK_FUNCTION(6, "TP_UCTS1_AO")
+ ),
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, 114),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "CONN_TOP_DATA"),
+ MTK_FUNCTION(3, "SDA6"),
+ MTK_FUNCTION(4, "AUXIF_ST0"),
+ MTK_FUNCTION(6, "TP_URTS1_AO")
+ ),
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, 115),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "CONN_BT_CLK"),
+ MTK_FUNCTION(2, "UTXD1"),
+ MTK_FUNCTION(3, "PTA_TXD"),
+ MTK_FUNCTION(4, "AUXIF_CLK1"),
+ MTK_FUNCTION(5, "DAP_MD32_SWD"),
+ MTK_FUNCTION(6, "TP_UTXD1_AO")
+ ),
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, 116),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "CONN_BT_DATA"),
+ MTK_FUNCTION(2, "IPU_JTAG_TRST"),
+ MTK_FUNCTION(4, "AUXIF_ST1"),
+ MTK_FUNCTION(5, "DAP_MD32_SWCK"),
+ MTK_FUNCTION(6, "TP_URXD2_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A0")
+ ),
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, 117),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "CONN_WF_HB0"),
+ MTK_FUNCTION(2, "IPU_JTAG_TDO"),
+ MTK_FUNCTION(6, "TP_UTXD2_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A4")
+ ),
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, 118),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "CONN_WF_HB1"),
+ MTK_FUNCTION(2, "IPU_JTAG_TDI"),
+ MTK_FUNCTION(5, "SSPM_URXD_AO"),
+ MTK_FUNCTION(6, "TP_UCTS2_AO"),
+ MTK_FUNCTION(7, "DBG_MON_A5")
+ ),
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, 119),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "CONN_WF_HB2"),
+ MTK_FUNCTION(2, "IPU_JTAG_TCK"),
+ MTK_FUNCTION(5, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(6, "TP_URTS2_AO")
+ ),
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, 120),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "CONN_WB_PTA"),
+ MTK_FUNCTION(2, "IPU_JTAG_TMS"),
+ MTK_FUNCTION(5, "CCU_URXD_AO")
+ ),
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, 121),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "CONN_HRST_B"),
+ MTK_FUNCTION(2, "URXD1"),
+ MTK_FUNCTION(3, "PTA_RXD"),
+ MTK_FUNCTION(5, "CCU_UTXD_AO"),
+ MTK_FUNCTION(6, "TP_URXD1_AO")
+ ),
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, 122),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "MSDC0_CMD"),
+ MTK_FUNCTION(2, "SSPM_URXD2_AO"),
+ MTK_FUNCTION(3, "ANT_SEL1"),
+ MTK_FUNCTION(7, "DBG_MON_A12")
+ ),
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(0, 123),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "MSDC0_DAT0"),
+ MTK_FUNCTION(3, "ANT_SEL0"),
+ MTK_FUNCTION(7, "DBG_MON_A13")
+ ),
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(0, 124),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "MSDC0_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_A14")
+ ),
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, 125),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "MSDC0_DAT2"),
+ MTK_FUNCTION(3, "MRG_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_A15")
+ ),
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, 126),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "MSDC0_DAT4"),
+ MTK_FUNCTION(3, "ANT_SEL5"),
+ MTK_FUNCTION(6, "UFS_MPHY_SCL"),
+ MTK_FUNCTION(7, "DBG_MON_A16")
+ ),
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(0, 127),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "MSDC0_DAT6"),
+ MTK_FUNCTION(3, "ANT_SEL4"),
+ MTK_FUNCTION(6, "UFS_MPHY_SDA"),
+ MTK_FUNCTION(7, "DBG_MON_A17")
+ ),
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(0, 128),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "MSDC0_DAT1"),
+ MTK_FUNCTION(3, "ANT_SEL2"),
+ MTK_FUNCTION(6, "UFS_UNIPRO_SDA"),
+ MTK_FUNCTION(7, "DBG_MON_A18")
+ ),
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, 129),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "MSDC0_DAT5"),
+ MTK_FUNCTION(3, "ANT_SEL3"),
+ MTK_FUNCTION(6, "UFS_UNIPRO_SCL"),
+ MTK_FUNCTION(7, "DBG_MON_A23")
+ ),
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, 130),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "MSDC0_DAT7"),
+ MTK_FUNCTION(3, "MRG_DO"),
+ MTK_FUNCTION(7, "DBG_MON_A24")
+ ),
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(0, 131),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "MSDC0_DSL"),
+ MTK_FUNCTION(3, "MRG_SYNC"),
+ MTK_FUNCTION(7, "DBG_MON_A25")
+ ),
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(0, 132),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "MSDC0_DAT3"),
+ MTK_FUNCTION(3, "MRG_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A26")
+ ),
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(0, 133),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "MSDC0_RSTB"),
+ MTK_FUNCTION(3, "AGPS_SYNC"),
+ MTK_FUNCTION(7, "DBG_MON_A27")
+ ),
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(0, 134),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(0, 135),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "WATCHDOG")
+ ),
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(0, 136),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "AUD_CLK_MOSI"),
+ MTK_FUNCTION(2, "AUD_CLK_MISO"),
+ MTK_FUNCTION(3, "I2S1_MCK"),
+ MTK_FUNCTION(6, "UFS_UNIPRO_SCL")
+ ),
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, 137),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "AUD_SYNC_MOSI"),
+ MTK_FUNCTION(2, "AUD_SYNC_MISO"),
+ MTK_FUNCTION(3, "I2S1_BCK")
+ ),
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, 138),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI0"),
+ MTK_FUNCTION(2, "AUD_DAT_MISO0"),
+ MTK_FUNCTION(3, "I2S1_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B24")
+ ),
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, 139),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI1"),
+ MTK_FUNCTION(2, "AUD_DAT_MISO1"),
+ MTK_FUNCTION(3, "I2S1_DO"),
+ MTK_FUNCTION(6, "UFS_MPHY_SDA")
+ ),
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, 140),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "AUD_CLK_MISO"),
+ MTK_FUNCTION(2, "AUD_CLK_MOSI"),
+ MTK_FUNCTION(3, "I2S0_MCK"),
+ MTK_FUNCTION(6, "UFS_UNIPRO_SDA")
+ ),
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, 141),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "AUD_SYNC_MISO"),
+ MTK_FUNCTION(2, "AUD_SYNC_MOSI"),
+ MTK_FUNCTION(3, "I2S0_BCK")
+ ),
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, 142),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO0"),
+ MTK_FUNCTION(2, "AUD_DAT_MOSI0"),
+ MTK_FUNCTION(3, "I2S0_LRCK"),
+ MTK_FUNCTION(4, "VOW_DAT_MISO"),
+ MTK_FUNCTION(7, "DBG_MON_B25")
+ ),
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, 143),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO1"),
+ MTK_FUNCTION(2, "AUD_DAT_MOSI1"),
+ MTK_FUNCTION(3, "I2S0_DI"),
+ MTK_FUNCTION(4, "VOW_CLK_MISO"),
+ MTK_FUNCTION(6, "UFS_MPHY_SCL"),
+ MTK_FUNCTION(7, "DBG_MON_B26")
+ ),
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, 144),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO144"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MO")
+ ),
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, 145),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO145"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
+ ),
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, 146),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO146"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MI")
+ ),
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, 147),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO147"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CK")
+ ),
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, 148),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO148"),
+ MTK_FUNCTION(1, "SRCLKENA0")
+ ),
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, 149),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO149"),
+ MTK_FUNCTION(1, "SRCLKENA1")
+ ),
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, 150),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO150"),
+ MTK_FUNCTION(1, "PWM_A"),
+ MTK_FUNCTION(2, "CMFLASH"),
+ MTK_FUNCTION(3, "CLKM0"),
+ MTK_FUNCTION(7, "DBG_MON_B30")
+ ),
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO151"),
+ MTK_FUNCTION(1, "PWM_B"),
+ MTK_FUNCTION(2, "CMVREF0"),
+ MTK_FUNCTION(3, "CLKM1"),
+ MTK_FUNCTION(7, "DBG_MON_B20")
+ ),
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, 152),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO152"),
+ MTK_FUNCTION(1, "PWM_C"),
+ MTK_FUNCTION(2, "CMFLASH"),
+ MTK_FUNCTION(3, "CLKM2"),
+ MTK_FUNCTION(7, "DBG_MON_B21")
+ ),
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, 153),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO153"),
+ MTK_FUNCTION(1, "PWM_A"),
+ MTK_FUNCTION(2, "CMVREF0"),
+ MTK_FUNCTION(3, "CLKM3"),
+ MTK_FUNCTION(7, "DBG_MON_B22")
+ ),
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, 154),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO154"),
+ MTK_FUNCTION(1, "SCP_VREQ_VAO"),
+ MTK_FUNCTION(2, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(7, "DBG_MON_B18")
+ ),
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, 155),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO155"),
+ MTK_FUNCTION(1, "ANT_SEL0"),
+ MTK_FUNCTION(2, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(3, "CMVREF1"),
+ MTK_FUNCTION(7, "SCP_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, 156),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO156"),
+ MTK_FUNCTION(1, "ANT_SEL1"),
+ MTK_FUNCTION(2, "SRCLKENAI0"),
+ MTK_FUNCTION(3, "SCL6"),
+ MTK_FUNCTION(4, "KPCOL2"),
+ MTK_FUNCTION(5, "IDDIG"),
+ MTK_FUNCTION(7, "SCP_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, 157),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO157"),
+ MTK_FUNCTION(1, "ANT_SEL2"),
+ MTK_FUNCTION(2, "SRCLKENAI1"),
+ MTK_FUNCTION(3, "SDA6"),
+ MTK_FUNCTION(4, "KPROW2"),
+ MTK_FUNCTION(5, "USB_DRVVBUS"),
+ MTK_FUNCTION(7, "SCP_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, 158),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO158"),
+ MTK_FUNCTION(1, "ANT_SEL3")
+ ),
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, 159),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO159"),
+ MTK_FUNCTION(1, "ANT_SEL4")
+ ),
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, 160),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO160"),
+ MTK_FUNCTION(1, "ANT_SEL5")
+ ),
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, 161),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO161"),
+ MTK_FUNCTION(1, "SPI1_A_MI"),
+ MTK_FUNCTION(2, "SCP_SPI1_MI"),
+ MTK_FUNCTION(3, "IDDIG"),
+ MTK_FUNCTION(4, "ANT_SEL6"),
+ MTK_FUNCTION(5, "KPCOL2"),
+ MTK_FUNCTION(6, "PTA_RXD"),
+ MTK_FUNCTION(7, "DBG_MON_B19")
+ ),
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, 162),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO162"),
+ MTK_FUNCTION(1, "SPI1_A_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI1_CS"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "ANT_SEL5"),
+ MTK_FUNCTION(5, "KPROW2"),
+ MTK_FUNCTION(6, "PTA_TXD")
+ ),
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, 163),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO163"),
+ MTK_FUNCTION(1, "SPI1_A_MO"),
+ MTK_FUNCTION(2, "SCP_SPI1_MO"),
+ MTK_FUNCTION(3, "SDA1"),
+ MTK_FUNCTION(4, "ANT_SEL4"),
+ MTK_FUNCTION(5, "CMMCLK2"),
+ MTK_FUNCTION(6, "DMIC_CLK")
+ ),
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, 164),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO164"),
+ MTK_FUNCTION(1, "SPI1_A_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI1_CK"),
+ MTK_FUNCTION(3, "SCL1"),
+ MTK_FUNCTION(4, "ANT_SEL3"),
+ MTK_FUNCTION(5, "CMMCLK3"),
+ MTK_FUNCTION(6, "DMIC_DAT")
+ ),
+ MTK_PIN(
+ 165, "GPIO165",
+ MTK_EINT_FUNCTION(0, 165),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO165"),
+ MTK_FUNCTION(1, "PWM_B"),
+ MTK_FUNCTION(2, "CMMCLK2"),
+ MTK_FUNCTION(3, "SCP_VREQ_VAO"),
+ MTK_FUNCTION(6, "TDM_MCK_2ND"),
+ MTK_FUNCTION(7, "SCP_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 166, "GPIO166",
+ MTK_EINT_FUNCTION(0, 166),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO166"),
+ MTK_FUNCTION(1, "ANT_SEL6")
+ ),
+ MTK_PIN(
+ 167, "GPIO167",
+ MTK_EINT_FUNCTION(0, 167),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO167"),
+ MTK_FUNCTION(1, "RFIC0_BSI_EN"),
+ MTK_FUNCTION(2, "SPM_BSI_EN")
+ ),
+ MTK_PIN(
+ 168, "GPIO168",
+ MTK_EINT_FUNCTION(0, 168),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO168"),
+ MTK_FUNCTION(1, "RFIC0_BSI_CK"),
+ MTK_FUNCTION(2, "SPM_BSI_CK")
+ ),
+ MTK_PIN(
+ 169, "GPIO169",
+ MTK_EINT_FUNCTION(0, 169),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO169"),
+ MTK_FUNCTION(1, "PWM_C"),
+ MTK_FUNCTION(2, "CMMCLK3"),
+ MTK_FUNCTION(3, "CMVREF1"),
+ MTK_FUNCTION(4, "ANT_SEL7"),
+ MTK_FUNCTION(5, "AGPS_SYNC"),
+ MTK_FUNCTION(6, "TDM_BCK_2ND"),
+ MTK_FUNCTION(7, "SCP_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 170, "GPIO170",
+ MTK_EINT_FUNCTION(0, 170),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO170"),
+ MTK_FUNCTION(1, "I2S1_BCK"),
+ MTK_FUNCTION(2, "I2S3_BCK"),
+ MTK_FUNCTION(3, "SCL7"),
+ MTK_FUNCTION(4, "I2S5_BCK"),
+ MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(6, "TDM_LRCK_2ND"),
+ MTK_FUNCTION(7, "ANT_SEL3")
+ ),
+ MTK_PIN(
+ 171, "GPIO171",
+ MTK_EINT_FUNCTION(0, 184),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO171"),
+ MTK_FUNCTION(1, "I2S1_LRCK"),
+ MTK_FUNCTION(2, "I2S3_LRCK"),
+ MTK_FUNCTION(3, "SDA7"),
+ MTK_FUNCTION(4, "I2S5_LRCK"),
+ MTK_FUNCTION(5, "URXD1"),
+ MTK_FUNCTION(6, "TDM_DATA0_2ND"),
+ MTK_FUNCTION(7, "ANT_SEL4")
+ ),
+ MTK_PIN(
+ 172, "GPIO172",
+ MTK_EINT_FUNCTION(0, 185),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO172"),
+ MTK_FUNCTION(1, "I2S1_DO"),
+ MTK_FUNCTION(2, "I2S3_DO"),
+ MTK_FUNCTION(3, "SCL8"),
+ MTK_FUNCTION(4, "I2S5_DO"),
+ MTK_FUNCTION(5, "UTXD1"),
+ MTK_FUNCTION(6, "TDM_DATA1_2ND"),
+ MTK_FUNCTION(7, "ANT_SEL5")
+ ),
+ MTK_PIN(
+ 173, "GPIO173",
+ MTK_EINT_FUNCTION(0, 186),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO173"),
+ MTK_FUNCTION(1, "I2S1_MCK"),
+ MTK_FUNCTION(2, "I2S3_MCK"),
+ MTK_FUNCTION(3, "SDA8"),
+ MTK_FUNCTION(4, "I2S5_MCK"),
+ MTK_FUNCTION(5, "UCTS0"),
+ MTK_FUNCTION(6, "TDM_DATA2_2ND"),
+ MTK_FUNCTION(7, "ANT_SEL6")
+ ),
+ MTK_PIN(
+ 174, "GPIO174",
+ MTK_EINT_FUNCTION(0, 187),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO174"),
+ MTK_FUNCTION(1, "I2S2_DI"),
+ MTK_FUNCTION(2, "I2S0_DI"),
+ MTK_FUNCTION(3, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(4, "I2S2_DI2"),
+ MTK_FUNCTION(5, "URTS0"),
+ MTK_FUNCTION(6, "TDM_DATA3_2ND"),
+ MTK_FUNCTION(7, "ANT_SEL7")
+ ),
+ MTK_PIN(
+ 175, "GPIO175",
+ MTK_EINT_FUNCTION(0, 188),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO175"),
+ MTK_FUNCTION(1, "ANT_SEL7")
+ ),
+ MTK_PIN(
+ 176, "GPIO176",
+ MTK_EINT_FUNCTION(0, 189),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO176")
+ ),
+ MTK_PIN(
+ 177, "GPIO177",
+ MTK_EINT_FUNCTION(0, 190),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO177")
+ ),
+ MTK_PIN(
+ 178, "GPIO178",
+ MTK_EINT_FUNCTION(0, 191),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO178")
+ ),
+ MTK_PIN(
+ 179, "GPIO179",
+ MTK_EINT_FUNCTION(0, 192),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO179")
+ ),
+ MTK_PIN(
+ 180, "GPIO180",
+ MTK_EINT_FUNCTION(0, 171),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO180")
+ ),
+ MTK_PIN(
+ 181, "GPIO181",
+ MTK_EINT_FUNCTION(0, 172),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO181")
+ ),
+ MTK_PIN(
+ 182, "GPIO182",
+ MTK_EINT_FUNCTION(0, 173),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO182")
+ ),
+ MTK_PIN(
+ 183, "GPIO183",
+ MTK_EINT_FUNCTION(0, 174),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO183")
+ ),
+ MTK_PIN(
+ 184, "GPIO184",
+ MTK_EINT_FUNCTION(0, 175),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO184")
+ ),
+ MTK_PIN(
+ 185, "GPIO185",
+ MTK_EINT_FUNCTION(0, 177),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO185")
+ ),
+ MTK_PIN(
+ 186, "GPIO186",
+ MTK_EINT_FUNCTION(0, 178),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO186")
+ ),
+ MTK_PIN(
+ 187, "GPIO187",
+ MTK_EINT_FUNCTION(0, 179),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO187")
+ ),
+ MTK_PIN(
+ 188, "GPIO188",
+ MTK_EINT_FUNCTION(0, 180),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO188")
+ ),
+ MTK_PIN(
+ 189, "GPIO189",
+ MTK_EINT_FUNCTION(0, 181),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO189")
+ ),
+ MTK_PIN(
+ 190, "GPIO190",
+ MTK_EINT_FUNCTION(0, 182),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO190")
+ ),
+ MTK_PIN(
+ 191, "GPIO191",
+ MTK_EINT_FUNCTION(0, 183),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO191")
+ ),
+};
+
+#endif /* __PINCTRL_MTK_MT8183_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
new file mode 100644
index 000000000000..d2179028f134
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -0,0 +1,907 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek Pinctrl Paris Driver, which implement the vendor per-pin
+ * bindings for MediaTek SoC.
+ *
+ * Copyright (C) 2018 MediaTek Inc.
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ * Zhiyong Tao <zhiyong.tao@mediatek.com>
+ * Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ */
+
+#include <linux/gpio/driver.h>
+#include <dt-bindings/pinctrl/mt65xx.h>
+#include "pinctrl-paris.h"
+
+#define PINCTRL_PINCTRL_DEV KBUILD_MODNAME
+
+/* Custom pinconf parameters */
+#define MTK_PIN_CONFIG_TDSEL (PIN_CONFIG_END + 1)
+#define MTK_PIN_CONFIG_RDSEL (PIN_CONFIG_END + 2)
+#define MTK_PIN_CONFIG_PU_ADV (PIN_CONFIG_END + 3)
+#define MTK_PIN_CONFIG_PD_ADV (PIN_CONFIG_END + 4)
+
+static const struct pinconf_generic_params mtk_custom_bindings[] = {
+ {"mediatek,tdsel", MTK_PIN_CONFIG_TDSEL, 0},
+ {"mediatek,rdsel", MTK_PIN_CONFIG_RDSEL, 0},
+ {"mediatek,pull-up-adv", MTK_PIN_CONFIG_PU_ADV, 1},
+ {"mediatek,pull-down-adv", MTK_PIN_CONFIG_PD_ADV, 1},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item mtk_conf_items[] = {
+ PCONFDUMP(MTK_PIN_CONFIG_TDSEL, "tdsel", NULL, true),
+ PCONFDUMP(MTK_PIN_CONFIG_RDSEL, "rdsel", NULL, true),
+ PCONFDUMP(MTK_PIN_CONFIG_PU_ADV, "pu-adv", NULL, true),
+ PCONFDUMP(MTK_PIN_CONFIG_PD_ADV, "pd-adv", NULL, true),
+};
+#endif
+
+static const char * const mtk_gpio_functions[] = {
+ "func0", "func1", "func2", "func3",
+ "func4", "func5", "func6", "func7",
+ "func8", "func9", "func10", "func11",
+ "func12", "func13", "func14", "func15",
+};
+
+static int mtk_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ const struct mtk_pin_desc *desc;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE,
+ hw->soc->gpio_m);
+}
+
+static int mtk_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin, bool input)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ const struct mtk_pin_desc *desc;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+ /* hardware would take 0 as input direction */
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR, !input);
+}
+
+static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ u32 param = pinconf_to_config_param(*config);
+ int val, val2, err, reg, ret = 1;
+ const struct mtk_pin_desc *desc;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (hw->soc->bias_disable_get) {
+ err = hw->soc->bias_disable_get(hw, desc, &ret);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (hw->soc->bias_get) {
+ err = hw->soc->bias_get(hw, desc, 1, &ret);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (hw->soc->bias_get) {
+ err = hw->soc->bias_get(hw, desc, 0, &ret);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &val);
+ if (err)
+ return err;
+
+ if (!val)
+ return -EINVAL;
+
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &val);
+ if (err)
+ return err;
+
+ /* HW takes input mode as zero; output mode as non-zero */
+ if ((val && param == PIN_CONFIG_INPUT_ENABLE) ||
+ (!val && param == PIN_CONFIG_OUTPUT_ENABLE))
+ return -EINVAL;
+
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &val);
+ if (err)
+ return err;
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &val2);
+ if (err)
+ return err;
+
+ if (val || !val2)
+ return -EINVAL;
+
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ if (hw->soc->drive_get) {
+ err = hw->soc->drive_get(hw, desc, &ret);
+ if (err)
+ return err;
+ } else {
+ err = -ENOTSUPP;
+ }
+ break;
+ case MTK_PIN_CONFIG_TDSEL:
+ case MTK_PIN_CONFIG_RDSEL:
+ reg = (param == MTK_PIN_CONFIG_TDSEL) ?
+ PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
+
+ err = mtk_hw_get_value(hw, desc, reg, &val);
+ if (err)
+ return err;
+
+ ret = val;
+
+ break;
+ case MTK_PIN_CONFIG_PU_ADV:
+ case MTK_PIN_CONFIG_PD_ADV:
+ if (hw->soc->adv_pull_get) {
+ bool pullup;
+
+ pullup = param == MTK_PIN_CONFIG_PU_ADV;
+ err = hw->soc->adv_pull_get(hw, desc, pullup, &ret);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, ret);
+
+ return 0;
+}
+
+static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ enum pin_config_param param,
+ enum pin_config_param arg)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ const struct mtk_pin_desc *desc;
+ int err = 0;
+ u32 reg;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
+
+ switch ((u32)param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (hw->soc->bias_disable_set) {
+ err = hw->soc->bias_disable_set(hw, desc);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (hw->soc->bias_set) {
+ err = hw->soc->bias_set(hw, desc, 1);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (hw->soc->bias_set) {
+ err = hw->soc->bias_set(hw, desc, 0);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
+ MTK_DISABLE);
+ if (err)
+ goto err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+ MTK_OUTPUT);
+ if (err)
+ goto err;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ if (hw->soc->ies_present) {
+ mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES,
+ MTK_ENABLE);
+ }
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+ MTK_INPUT);
+ if (err)
+ goto err;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SR,
+ arg);
+ if (err)
+ goto err;
+
+ break;
+ case PIN_CONFIG_OUTPUT:
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+ MTK_OUTPUT);
+ if (err)
+ goto err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO,
+ arg);
+ if (err)
+ goto err;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ /* arg = 1: Input mode & SMT enable ;
+ * arg = 0: Output mode & SMT disable
+ */
+ arg = arg ? 2 : 1;
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
+ arg & 1);
+ if (err)
+ goto err;
+
+ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
+ !!(arg & 2));
+ if (err)
+ goto err;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ if (hw->soc->drive_set) {
+ err = hw->soc->drive_set(hw, desc, arg);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ case MTK_PIN_CONFIG_TDSEL:
+ case MTK_PIN_CONFIG_RDSEL:
+ reg = (param == MTK_PIN_CONFIG_TDSEL) ?
+ PINCTRL_PIN_REG_TDSEL : PINCTRL_PIN_REG_RDSEL;
+
+ err = mtk_hw_set_value(hw, desc, reg, arg);
+ if (err)
+ goto err;
+ break;
+ case MTK_PIN_CONFIG_PU_ADV:
+ case MTK_PIN_CONFIG_PD_ADV:
+ if (hw->soc->adv_pull_set) {
+ bool pullup;
+
+ pullup = param == MTK_PIN_CONFIG_PU_ADV;
+ err = hw->soc->adv_pull_set(hw, desc, pullup,
+ arg);
+ if (err)
+ return err;
+ } else {
+ return -ENOTSUPP;
+ }
+ break;
+ default:
+ err = -ENOTSUPP;
+ }
+
+err:
+ return err;
+}
+
+static struct mtk_pinctrl_group *
+mtk_pctrl_find_group_by_pin(struct mtk_pinctrl *hw, u32 pin)
+{
+ int i;
+
+ for (i = 0; i < hw->soc->ngrps; i++) {
+ struct mtk_pinctrl_group *grp = hw->groups + i;
+
+ if (grp->pin == pin)
+ return grp;
+ }
+
+ return NULL;
+}
+
+static const struct mtk_func_desc *
+mtk_pctrl_find_function_by_pin(struct mtk_pinctrl *hw, u32 pin_num, u32 fnum)
+{
+ const struct mtk_pin_desc *pin = hw->soc->pins + pin_num;
+ const struct mtk_func_desc *func = pin->funcs;
+
+ while (func && func->name) {
+ if (func->muxval == fnum)
+ return func;
+ func++;
+ }
+
+ return NULL;
+}
+
+static bool mtk_pctrl_is_function_valid(struct mtk_pinctrl *hw, u32 pin_num,
+ u32 fnum)
+{
+ int i;
+
+ for (i = 0; i < hw->soc->npins; i++) {
+ const struct mtk_pin_desc *pin = hw->soc->pins + i;
+
+ if (pin->number == pin_num) {
+ const struct mtk_func_desc *func = pin->funcs;
+
+ while (func && func->name) {
+ if (func->muxval == fnum)
+ return true;
+ func++;
+ }
+
+ break;
+ }
+ }
+
+ return false;
+}
+
+static int mtk_pctrl_dt_node_to_map_func(struct mtk_pinctrl *pctl,
+ u32 pin, u32 fnum,
+ struct mtk_pinctrl_group *grp,
+ struct pinctrl_map **map,
+ unsigned *reserved_maps,
+ unsigned *num_maps)
+{
+ bool ret;
+
+ if (*num_maps == *reserved_maps)
+ return -ENOSPC;
+
+ (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+ (*map)[*num_maps].data.mux.group = grp->name;
+
+ ret = mtk_pctrl_is_function_valid(pctl, pin, fnum);
+ if (!ret) {
+ dev_err(pctl->dev, "invalid function %d on pin %d .\n",
+ fnum, pin);
+ return -EINVAL;
+ }
+
+ (*map)[*num_maps].data.mux.function = mtk_gpio_functions[fnum];
+ (*num_maps)++;
+
+ return 0;
+}
+
+static int mtk_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *node,
+ struct pinctrl_map **map,
+ unsigned *reserved_maps,
+ unsigned *num_maps)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ int num_pins, num_funcs, maps_per_pin, i, err;
+ struct mtk_pinctrl_group *grp;
+ unsigned int num_configs;
+ bool has_config = false;
+ unsigned long *configs;
+ u32 pinfunc, pin, func;
+ struct property *pins;
+ unsigned reserve = 0;
+
+ pins = of_find_property(node, "pinmux", NULL);
+ if (!pins) {
+ dev_err(hw->dev, "missing pins property in node %s .\n",
+ node->name);
+ return -EINVAL;
+ }
+
+ err = pinconf_generic_parse_dt_config(node, pctldev, &configs,
+ &num_configs);
+ if (err)
+ return err;
+
+ if (num_configs)
+ has_config = true;
+
+ num_pins = pins->length / sizeof(u32);
+ num_funcs = num_pins;
+ maps_per_pin = 0;
+ if (num_funcs)
+ maps_per_pin++;
+ if (has_config && num_pins >= 1)
+ maps_per_pin++;
+
+ if (!num_pins || !maps_per_pin) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ reserve = num_pins * maps_per_pin;
+
+ err = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps,
+ reserve);
+ if (err < 0)
+ goto exit;
+
+ for (i = 0; i < num_pins; i++) {
+ err = of_property_read_u32_index(node, "pinmux", i, &pinfunc);
+ if (err)
+ goto exit;
+
+ pin = MTK_GET_PIN_NO(pinfunc);
+ func = MTK_GET_PIN_FUNC(pinfunc);
+
+ if (pin >= hw->soc->npins ||
+ func >= ARRAY_SIZE(mtk_gpio_functions)) {
+ dev_err(hw->dev, "invalid pins value.\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+ grp = mtk_pctrl_find_group_by_pin(hw, pin);
+ if (!grp) {
+ dev_err(hw->dev, "unable to match pin %d to group\n",
+ pin);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ err = mtk_pctrl_dt_node_to_map_func(hw, pin, func, grp, map,
+ reserved_maps, num_maps);
+ if (err < 0)
+ goto exit;
+
+ if (has_config) {
+ err = pinctrl_utils_add_map_configs(pctldev, map,
+ reserved_maps,
+ num_maps,
+ grp->name,
+ configs,
+ num_configs,
+ PIN_MAP_TYPE_CONFIGS_GROUP);
+ if (err < 0)
+ goto exit;
+ }
+ }
+
+ err = 0;
+
+exit:
+ kfree(configs);
+ return err;
+}
+
+static int mtk_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map,
+ unsigned *num_maps)
+{
+ struct device_node *np;
+ unsigned reserved_maps;
+ int ret;
+
+ *map = NULL;
+ *num_maps = 0;
+ reserved_maps = 0;
+
+ for_each_child_of_node(np_config, np) {
+ ret = mtk_pctrl_dt_subnode_to_map(pctldev, np, map,
+ &reserved_maps,
+ num_maps);
+ if (ret < 0) {
+ pinctrl_utils_free_map(pctldev, *map, *num_maps);
+ of_node_put(np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mtk_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+
+ return hw->soc->ngrps;
+}
+
+static const char *mtk_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+
+ return hw->groups[group].name;
+}
+
+static int mtk_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = (unsigned *)&hw->groups[group].pin;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const struct pinctrl_ops mtk_pctlops = {
+ .dt_node_to_map = mtk_pctrl_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_free_map,
+ .get_groups_count = mtk_pctrl_get_groups_count,
+ .get_group_name = mtk_pctrl_get_group_name,
+ .get_group_pins = mtk_pctrl_get_group_pins,
+};
+
+static int mtk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(mtk_gpio_functions);
+}
+
+static const char *mtk_pmx_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return mtk_gpio_functions[selector];
+}
+
+static int mtk_pmx_get_func_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = hw->grp_names;
+ *num_groups = hw->soc->ngrps;
+
+ return 0;
+}
+
+static int mtk_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ struct mtk_pinctrl_group *grp = hw->groups + group;
+ const struct mtk_func_desc *desc_func;
+ const struct mtk_pin_desc *desc;
+ bool ret;
+
+ ret = mtk_pctrl_is_function_valid(hw, grp->pin, function);
+ if (!ret) {
+ dev_err(hw->dev, "invalid function %d on group %d .\n",
+ function, group);
+ return -EINVAL;
+ }
+
+ desc_func = mtk_pctrl_find_function_by_pin(hw, grp->pin, function);
+ if (!desc_func)
+ return -EINVAL;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[grp->pin];
+ mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE, desc_func->muxval);
+
+ return 0;
+}
+
+static const struct pinmux_ops mtk_pmxops = {
+ .get_functions_count = mtk_pmx_get_funcs_cnt,
+ .get_function_name = mtk_pmx_get_func_name,
+ .get_function_groups = mtk_pmx_get_func_groups,
+ .set_mux = mtk_pmx_set_mux,
+ .gpio_set_direction = mtk_pinmux_gpio_set_direction,
+ .gpio_request_enable = mtk_pinmux_gpio_request_enable,
+};
+
+static int mtk_pconf_group_get(struct pinctrl_dev *pctldev, unsigned group,
+ unsigned long *config)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+
+ *config = hw->groups[group].config;
+
+ return 0;
+}
+
+static int mtk_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
+ unsigned long *configs, unsigned num_configs)
+{
+ struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+ struct mtk_pinctrl_group *grp = &hw->groups[group];
+ int i, ret;
+
+ for (i = 0; i < num_configs; i++) {
+ ret = mtk_pinconf_set(pctldev, grp->pin,
+ pinconf_to_config_param(configs[i]),
+ pinconf_to_config_argument(configs[i]));
+ if (ret < 0)
+ return ret;
+
+ grp->config = configs[i];
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops mtk_confops = {
+ .pin_config_get = mtk_pinconf_get,
+ .pin_config_group_get = mtk_pconf_group_get,
+ .pin_config_group_set = mtk_pconf_group_set,
+};
+
+static struct pinctrl_desc mtk_desc = {
+ .name = PINCTRL_PINCTRL_DEV,
+ .pctlops = &mtk_pctlops,
+ .pmxops = &mtk_pmxops,
+ .confops = &mtk_confops,
+ .owner = THIS_MODULE,
+};
+
+static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ const struct mtk_pin_desc *desc;
+ int value, err;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &value);
+ if (err)
+ return err;
+
+ return !value;
+}
+
+static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ const struct mtk_pin_desc *desc;
+ int value, err;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DI, &value);
+ if (err)
+ return err;
+
+ return !!value;
+}
+
+static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ const struct mtk_pin_desc *desc;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio];
+
+ mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
+}
+
+static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
+{
+ return pinctrl_gpio_direction_input(chip->base + gpio);
+}
+
+static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
+ int value)
+{
+ mtk_gpio_set(chip, gpio, value);
+
+ return pinctrl_gpio_direction_output(chip->base + gpio);
+}
+
+static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ const struct mtk_pin_desc *desc;
+
+ if (!hw->eint)
+ return -ENOTSUPP;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[offset];
+
+ if (desc->eint.eint_n == EINT_NA)
+ return -ENOTSUPP;
+
+ return mtk_eint_find_irq(hw->eint, desc->eint.eint_n);
+}
+
+static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
+ const struct mtk_pin_desc *desc;
+ u32 debounce;
+
+ desc = (const struct mtk_pin_desc *)&hw->soc->pins[offset];
+
+ if (!hw->eint ||
+ pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE ||
+ desc->eint.eint_n == EINT_NA)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+
+ return mtk_eint_set_debounce(hw->eint, desc->eint.eint_n, debounce);
+}
+
+static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
+{
+ struct gpio_chip *chip = &hw->chip;
+ int ret;
+
+ chip->label = PINCTRL_PINCTRL_DEV;
+ chip->parent = hw->dev;
+ chip->request = gpiochip_generic_request;
+ chip->free = gpiochip_generic_free;
+ chip->get_direction = mtk_gpio_get_direction;
+ chip->direction_input = mtk_gpio_direction_input;
+ chip->direction_output = mtk_gpio_direction_output;
+ chip->get = mtk_gpio_get;
+ chip->set = mtk_gpio_set;
+ chip->to_irq = mtk_gpio_to_irq,
+ chip->set_config = mtk_gpio_set_config,
+ chip->base = -1;
+ chip->ngpio = hw->soc->npins;
+ chip->of_node = np;
+ chip->of_gpio_n_cells = 2;
+
+ ret = gpiochip_add_data(chip, hw);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int mtk_pctrl_build_state(struct platform_device *pdev)
+{
+ struct mtk_pinctrl *hw = platform_get_drvdata(pdev);
+ int i;
+
+ /* Allocate groups */
+ hw->groups = devm_kmalloc_array(&pdev->dev, hw->soc->ngrps,
+ sizeof(*hw->groups), GFP_KERNEL);
+ if (!hw->groups)
+ return -ENOMEM;
+
+ /* We assume that one pin is one group, use pin name as group name. */
+ hw->grp_names = devm_kmalloc_array(&pdev->dev, hw->soc->ngrps,
+ sizeof(*hw->grp_names), GFP_KERNEL);
+ if (!hw->grp_names)
+ return -ENOMEM;
+
+ for (i = 0; i < hw->soc->npins; i++) {
+ const struct mtk_pin_desc *pin = hw->soc->pins + i;
+ struct mtk_pinctrl_group *group = hw->groups + i;
+
+ group->name = pin->name;
+ group->pin = pin->number;
+
+ hw->grp_names[i] = pin->name;
+ }
+
+ return 0;
+}
+
+int mtk_paris_pinctrl_probe(struct platform_device *pdev,
+ const struct mtk_pin_soc *soc)
+{
+ struct pinctrl_pin_desc *pins;
+ struct mtk_pinctrl *hw;
+ struct resource *res;
+ int err, i;
+
+ hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hw);
+ hw->soc = soc;
+ hw->dev = &pdev->dev;
+
+ if (!hw->soc->nbase_names) {
+ dev_err(&pdev->dev,
+ "SoC should be assigned at least one register base\n");
+ return -EINVAL;
+ }
+
+ hw->base = devm_kmalloc_array(&pdev->dev, hw->soc->nbase_names,
+ sizeof(*hw->base), GFP_KERNEL);
+ if (!hw->base)
+ return -ENOMEM;
+
+ for (i = 0; i < hw->soc->nbase_names; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ hw->soc->base_names[i]);
+ if (!res) {
+ dev_err(&pdev->dev, "missing IO resource\n");
+ return -ENXIO;
+ }
+
+ hw->base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->base[i]))
+ return PTR_ERR(hw->base[i]);
+ }
+
+ hw->nbase = hw->soc->nbase_names;
+
+ err = mtk_pctrl_build_state(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "build state failed: %d\n", err);
+ return -EINVAL;
+ }
+
+ /* Copy from internal struct mtk_pin_desc to register to the core */
+ pins = devm_kmalloc_array(&pdev->dev, hw->soc->npins, sizeof(*pins),
+ GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < hw->soc->npins; i++) {
+ pins[i].number = hw->soc->pins[i].number;
+ pins[i].name = hw->soc->pins[i].name;
+ }
+
+ /* Setup pins descriptions per SoC types */
+ mtk_desc.pins = (const struct pinctrl_pin_desc *)pins;
+ mtk_desc.npins = hw->soc->npins;
+ mtk_desc.num_custom_params = ARRAY_SIZE(mtk_custom_bindings);
+ mtk_desc.custom_params = mtk_custom_bindings;
+#ifdef CONFIG_DEBUG_FS
+ mtk_desc.custom_conf_items = mtk_conf_items;
+#endif
+
+ err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw,
+ &hw->pctrl);
+ if (err)
+ return err;
+
+ err = pinctrl_enable(hw->pctrl);
+ if (err)
+ return err;
+
+ err = mtk_build_eint(hw, pdev);
+ if (err)
+ dev_warn(&pdev->dev,
+ "Failed to add EINT, but pinctrl still can work\n");
+
+ /* Build gpiochip should be after pinctrl_enable is done */
+ err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hw);
+
+ return 0;
+}
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.h b/drivers/pinctrl/mediatek/pinctrl-paris.h
new file mode 100644
index 000000000000..37146caa667d
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ * Zhiyong Tao <zhiyong.tao@mediatek.com>
+ * Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ */
+#ifndef __PINCTRL_PARIS_H
+#define __PINCTRL_PARIS_H
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinctrl-utils.h"
+#include "../pinmux.h"
+#include "mtk-eint.h"
+#include "pinctrl-mtk-common-v2.h"
+
+#define MTK_RANGE(_a) { .range = (_a), .nranges = ARRAY_SIZE(_a), }
+
+#define MTK_EINT_FUNCTION(_eintmux, _eintnum) \
+ { \
+ .eint_m = _eintmux, \
+ .eint_n = _eintnum, \
+ }
+
+#define MTK_FUNCTION(_val, _name) \
+ { \
+ .muxval = _val, \
+ .name = _name, \
+ }
+
+#define MTK_PIN(_number, _name, _eint, _drv_n, ...) { \
+ .number = _number, \
+ .name = _name, \
+ .eint = _eint, \
+ .drv_n = _drv_n, \
+ .funcs = (struct mtk_func_desc[]){ \
+ __VA_ARGS__, { } }, \
+ }
+
+#define PINCTRL_PIN_GROUP(name, id) \
+ { \
+ name, \
+ id##_pins, \
+ ARRAY_SIZE(id##_pins), \
+ id##_funcs, \
+ }
+
+int mtk_paris_pinctrl_probe(struct platform_device *pdev,
+ const struct mtk_pin_soc *soc);
+
+#endif /* __PINCTRL_PARIS_H */
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
index c80951d6caff..9ab537eb78a3 100644
--- a/drivers/pinctrl/meson/Kconfig
+++ b/drivers/pinctrl/meson/Kconfig
@@ -47,4 +47,10 @@ config PINCTRL_MESON_AXG
config PINCTRL_MESON_AXG_PMX
bool
+config PINCTRL_MESON_G12A
+ bool "Meson g12a Soc pinctrl driver"
+ depends on ARM64
+ select PINCTRL_MESON_AXG_PMX
+ default y
+
endif
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index 3c6580c2d9d7..cf283f48f9d8 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_PINCTRL_MESON_GXBB) += pinctrl-meson-gxbb.o
obj-$(CONFIG_PINCTRL_MESON_GXL) += pinctrl-meson-gxl.o
obj-$(CONFIG_PINCTRL_MESON_AXG_PMX) += pinctrl-meson-axg-pmx.o
obj-$(CONFIG_PINCTRL_MESON_AXG) += pinctrl-meson-axg.o
+obj-$(CONFIG_PINCTRL_MESON_G12A) += pinctrl-meson-g12a.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
new file mode 100644
index 000000000000..d494492e98e9
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
@@ -0,0 +1,1404 @@
+// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/*
+ * Pin controller and GPIO driver for Amlogic Meson G12A SoC.
+ *
+ * Copyright (c) 2018 Amlogic, Inc. All rights reserved.
+ * Author: Xingyu Chen <xingyu.chen@amlogic.com>
+ * Author: Yixun Lan <yixun.lan@amlogic.com>
+ */
+
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+#include "pinctrl-meson.h"
+#include "pinctrl-meson-axg-pmx.h"
+
+static const struct pinctrl_pin_desc meson_g12a_periphs_pins[] = {
+ MESON_PIN(GPIOZ_0),
+ MESON_PIN(GPIOZ_1),
+ MESON_PIN(GPIOZ_2),
+ MESON_PIN(GPIOZ_3),
+ MESON_PIN(GPIOZ_4),
+ MESON_PIN(GPIOZ_5),
+ MESON_PIN(GPIOZ_6),
+ MESON_PIN(GPIOZ_7),
+ MESON_PIN(GPIOZ_8),
+ MESON_PIN(GPIOZ_9),
+ MESON_PIN(GPIOZ_10),
+ MESON_PIN(GPIOZ_11),
+ MESON_PIN(GPIOZ_12),
+ MESON_PIN(GPIOZ_13),
+ MESON_PIN(GPIOZ_14),
+ MESON_PIN(GPIOZ_15),
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+ MESON_PIN(GPIOH_4),
+ MESON_PIN(GPIOH_5),
+ MESON_PIN(GPIOH_6),
+ MESON_PIN(GPIOH_7),
+ MESON_PIN(GPIOH_8),
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+ MESON_PIN(GPIOC_0),
+ MESON_PIN(GPIOC_1),
+ MESON_PIN(GPIOC_2),
+ MESON_PIN(GPIOC_3),
+ MESON_PIN(GPIOC_4),
+ MESON_PIN(GPIOC_5),
+ MESON_PIN(GPIOC_6),
+ MESON_PIN(GPIOC_7),
+ MESON_PIN(GPIOA_0),
+ MESON_PIN(GPIOA_1),
+ MESON_PIN(GPIOA_2),
+ MESON_PIN(GPIOA_3),
+ MESON_PIN(GPIOA_4),
+ MESON_PIN(GPIOA_5),
+ MESON_PIN(GPIOA_6),
+ MESON_PIN(GPIOA_7),
+ MESON_PIN(GPIOA_8),
+ MESON_PIN(GPIOA_9),
+ MESON_PIN(GPIOA_10),
+ MESON_PIN(GPIOA_11),
+ MESON_PIN(GPIOA_12),
+ MESON_PIN(GPIOA_13),
+ MESON_PIN(GPIOA_14),
+ MESON_PIN(GPIOA_15),
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+ MESON_PIN(GPIOX_19),
+};
+
+static const struct pinctrl_pin_desc meson_g12a_aobus_pins[] = {
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+ MESON_PIN(GPIOAO_10),
+ MESON_PIN(GPIOAO_11),
+ MESON_PIN(GPIOE_0),
+ MESON_PIN(GPIOE_1),
+ MESON_PIN(GPIOE_2),
+};
+
+/* emmc */
+static const unsigned int emmc_nand_d0_pins[] = { BOOT_0 };
+static const unsigned int emmc_nand_d1_pins[] = { BOOT_1 };
+static const unsigned int emmc_nand_d2_pins[] = { BOOT_2 };
+static const unsigned int emmc_nand_d3_pins[] = { BOOT_3 };
+static const unsigned int emmc_nand_d4_pins[] = { BOOT_4 };
+static const unsigned int emmc_nand_d5_pins[] = { BOOT_5 };
+static const unsigned int emmc_nand_d6_pins[] = { BOOT_6 };
+static const unsigned int emmc_nand_d7_pins[] = { BOOT_7 };
+static const unsigned int emmc_clk_pins[] = { BOOT_8 };
+static const unsigned int emmc_cmd_pins[] = { BOOT_10 };
+static const unsigned int emmc_nand_ds_pins[] = { BOOT_13 };
+
+/* nand */
+static const unsigned int nand_wen_clk_pins[] = { BOOT_8 };
+static const unsigned int nand_ale_pins[] = { BOOT_9 };
+static const unsigned int nand_cle_pins[] = { BOOT_10 };
+static const unsigned int nand_ce0_pins[] = { BOOT_11 };
+static const unsigned int nand_ren_wr_pins[] = { BOOT_12 };
+static const unsigned int nand_rb0_pins[] = { BOOT_14 };
+static const unsigned int nand_ce1_pins[] = { BOOT_15 };
+
+/* nor */
+static const unsigned int nor_hold_pins[] = { BOOT_3 };
+static const unsigned int nor_d_pins[] = { BOOT_4 };
+static const unsigned int nor_q_pins[] = { BOOT_5 };
+static const unsigned int nor_c_pins[] = { BOOT_6 };
+static const unsigned int nor_wp_pins[] = { BOOT_7 };
+static const unsigned int nor_cs_pins[] = { BOOT_14 };
+
+/* sdio */
+static const unsigned int sdio_d0_pins[] = { GPIOX_0 };
+static const unsigned int sdio_d1_pins[] = { GPIOX_1 };
+static const unsigned int sdio_d2_pins[] = { GPIOX_2 };
+static const unsigned int sdio_d3_pins[] = { GPIOX_3 };
+static const unsigned int sdio_clk_pins[] = { GPIOX_4 };
+static const unsigned int sdio_cmd_pins[] = { GPIOX_5 };
+
+/* sdcard */
+static const unsigned int sdcard_d0_c_pins[] = { GPIOC_0 };
+static const unsigned int sdcard_d1_c_pins[] = { GPIOC_1 };
+static const unsigned int sdcard_d2_c_pins[] = { GPIOC_2 };
+static const unsigned int sdcard_d3_c_pins[] = { GPIOC_3 };
+static const unsigned int sdcard_clk_c_pins[] = { GPIOC_4 };
+static const unsigned int sdcard_cmd_c_pins[] = { GPIOC_5 };
+
+static const unsigned int sdcard_d0_z_pins[] = { GPIOZ_2 };
+static const unsigned int sdcard_d1_z_pins[] = { GPIOZ_3 };
+static const unsigned int sdcard_d2_z_pins[] = { GPIOZ_4 };
+static const unsigned int sdcard_d3_z_pins[] = { GPIOZ_5 };
+static const unsigned int sdcard_clk_z_pins[] = { GPIOZ_6 };
+static const unsigned int sdcard_cmd_z_pins[] = { GPIOZ_7 };
+
+/* spi0 */
+static const unsigned int spi0_mosi_c_pins[] = { GPIOC_0 };
+static const unsigned int spi0_miso_c_pins[] = { GPIOC_1 };
+static const unsigned int spi0_ss0_c_pins[] = { GPIOC_2 };
+static const unsigned int spi0_clk_c_pins[] = { GPIOC_3 };
+
+static const unsigned int spi0_mosi_x_pins[] = { GPIOX_8 };
+static const unsigned int spi0_miso_x_pins[] = { GPIOX_9 };
+static const unsigned int spi0_ss0_x_pins[] = { GPIOX_10 };
+static const unsigned int spi0_clk_x_pins[] = { GPIOX_11 };
+
+/* spi1 */
+static const unsigned int spi1_mosi_pins[] = { GPIOH_4 };
+static const unsigned int spi1_miso_pins[] = { GPIOH_5 };
+static const unsigned int spi1_ss0_pins[] = { GPIOH_6 };
+static const unsigned int spi1_clk_pins[] = { GPIOH_7 };
+
+/* i2c0 */
+static const unsigned int i2c0_sda_c_pins[] = { GPIOC_5 };
+static const unsigned int i2c0_sck_c_pins[] = { GPIOC_6 };
+static const unsigned int i2c0_sda_z0_pins[] = { GPIOZ_0 };
+static const unsigned int i2c0_sck_z1_pins[] = { GPIOZ_1 };
+static const unsigned int i2c0_sda_z7_pins[] = { GPIOZ_7 };
+static const unsigned int i2c0_sck_z8_pins[] = { GPIOZ_8 };
+
+/* i2c1 */
+static const unsigned int i2c1_sda_x_pins[] = { GPIOX_10 };
+static const unsigned int i2c1_sck_x_pins[] = { GPIOX_11 };
+static const unsigned int i2c1_sda_h2_pins[] = { GPIOH_2 };
+static const unsigned int i2c1_sck_h3_pins[] = { GPIOH_3 };
+static const unsigned int i2c1_sda_h6_pins[] = { GPIOH_6 };
+static const unsigned int i2c1_sck_h7_pins[] = { GPIOH_7 };
+
+/* i2c2 */
+static const unsigned int i2c2_sda_x_pins[] = { GPIOX_17 };
+static const unsigned int i2c2_sck_x_pins[] = { GPIOX_18 };
+static const unsigned int i2c2_sda_z_pins[] = { GPIOZ_14 };
+static const unsigned int i2c2_sck_z_pins[] = { GPIOZ_15 };
+
+/* i2c3 */
+static const unsigned int i2c3_sda_h_pins[] = { GPIOH_0 };
+static const unsigned int i2c3_sck_h_pins[] = { GPIOH_1 };
+static const unsigned int i2c3_sda_a_pins[] = { GPIOA_14 };
+static const unsigned int i2c3_sck_a_pins[] = { GPIOA_15 };
+
+/* uart_a */
+static const unsigned int uart_a_tx_pins[] = { GPIOX_12 };
+static const unsigned int uart_a_rx_pins[] = { GPIOX_13 };
+static const unsigned int uart_a_cts_pins[] = { GPIOX_14 };
+static const unsigned int uart_a_rts_pins[] = { GPIOX_15 };
+
+/* uart_b */
+static const unsigned int uart_b_tx_pins[] = { GPIOX_6 };
+static const unsigned int uart_b_rx_pins[] = { GPIOX_7 };
+
+/* uart_c */
+static const unsigned int uart_c_rts_pins[] = { GPIOH_4 };
+static const unsigned int uart_c_cts_pins[] = { GPIOH_5 };
+static const unsigned int uart_c_rx_pins[] = { GPIOH_6 };
+static const unsigned int uart_c_tx_pins[] = { GPIOH_7 };
+
+/* uart_ao_a_c */
+static const unsigned int uart_ao_a_rx_c_pins[] = { GPIOC_2 };
+static const unsigned int uart_ao_a_tx_c_pins[] = { GPIOC_3 };
+
+/* iso7816 */
+static const unsigned int iso7816_clk_c_pins[] = { GPIOC_5 };
+static const unsigned int iso7816_data_c_pins[] = { GPIOC_6 };
+static const unsigned int iso7816_clk_x_pins[] = { GPIOX_8 };
+static const unsigned int iso7816_data_x_pins[] = { GPIOX_9 };
+static const unsigned int iso7816_clk_h_pins[] = { GPIOH_6 };
+static const unsigned int iso7816_data_h_pins[] = { GPIOH_7 };
+static const unsigned int iso7816_clk_z_pins[] = { GPIOZ_0 };
+static const unsigned int iso7816_data_z_pins[] = { GPIOZ_1 };
+
+/* eth */
+static const unsigned int eth_mdio_pins[] = { GPIOZ_0 };
+static const unsigned int eth_mdc_pins[] = { GPIOZ_1 };
+static const unsigned int eth_rgmii_rx_clk_pins[] = { GPIOZ_2 };
+static const unsigned int eth_rx_dv_pins[] = { GPIOZ_3 };
+static const unsigned int eth_rxd0_pins[] = { GPIOZ_4 };
+static const unsigned int eth_rxd1_pins[] = { GPIOZ_5 };
+static const unsigned int eth_rxd2_rgmii_pins[] = { GPIOZ_6 };
+static const unsigned int eth_rxd3_rgmii_pins[] = { GPIOZ_7 };
+static const unsigned int eth_rgmii_tx_clk_pins[] = { GPIOZ_8 };
+static const unsigned int eth_txen_pins[] = { GPIOZ_9 };
+static const unsigned int eth_txd0_pins[] = { GPIOZ_10 };
+static const unsigned int eth_txd1_pins[] = { GPIOZ_11 };
+static const unsigned int eth_txd2_rgmii_pins[] = { GPIOZ_12 };
+static const unsigned int eth_txd3_rgmii_pins[] = { GPIOZ_13 };
+static const unsigned int eth_link_led_pins[] = { GPIOZ_14 };
+static const unsigned int eth_act_led_pins[] = { GPIOZ_15 };
+
+/* pwm_a */
+static const unsigned int pwm_a_pins[] = { GPIOX_6 };
+
+/* pwm_b */
+static const unsigned int pwm_b_x7_pins[] = { GPIOX_7 };
+static const unsigned int pwm_b_x19_pins[] = { GPIOX_19 };
+
+/* pwm_c */
+static const unsigned int pwm_c_c_pins[] = { GPIOC_4 };
+static const unsigned int pwm_c_x5_pins[] = { GPIOX_5 };
+static const unsigned int pwm_c_x8_pins[] = { GPIOX_8 };
+
+/* pwm_d */
+static const unsigned int pwm_d_x3_pins[] = { GPIOX_3 };
+static const unsigned int pwm_d_x6_pins[] = { GPIOX_6 };
+
+/* pwm_e */
+static const unsigned int pwm_e_pins[] = { GPIOX_16 };
+
+/* pwm_f */
+static const unsigned int pwm_f_x_pins[] = { GPIOX_7 };
+static const unsigned int pwm_f_h_pins[] = { GPIOH_5 };
+
+/* cec_ao */
+static const unsigned int cec_ao_a_h_pins[] = { GPIOH_3 };
+static const unsigned int cec_ao_b_h_pins[] = { GPIOH_3 };
+
+/* jtag_b */
+static const unsigned int jtag_b_tdo_pins[] = { GPIOC_0 };
+static const unsigned int jtag_b_tdi_pins[] = { GPIOC_1 };
+static const unsigned int jtag_b_clk_pins[] = { GPIOC_4 };
+static const unsigned int jtag_b_tms_pins[] = { GPIOC_5 };
+
+/* bt565_a */
+static const unsigned int bt565_a_vs_pins[] = { GPIOZ_0 };
+static const unsigned int bt565_a_hs_pins[] = { GPIOZ_1 };
+static const unsigned int bt565_a_clk_pins[] = { GPIOZ_3 };
+static const unsigned int bt565_a_din0_pins[] = { GPIOZ_4 };
+static const unsigned int bt565_a_din1_pins[] = { GPIOZ_5 };
+static const unsigned int bt565_a_din2_pins[] = { GPIOZ_6 };
+static const unsigned int bt565_a_din3_pins[] = { GPIOZ_7 };
+static const unsigned int bt565_a_din4_pins[] = { GPIOZ_8 };
+static const unsigned int bt565_a_din5_pins[] = { GPIOZ_9 };
+static const unsigned int bt565_a_din6_pins[] = { GPIOZ_10 };
+static const unsigned int bt565_a_din7_pins[] = { GPIOZ_11 };
+
+/* tsin_a */
+static const unsigned int tsin_a_valid_pins[] = { GPIOX_2 };
+static const unsigned int tsin_a_sop_pins[] = { GPIOX_1 };
+static const unsigned int tsin_a_din0_pins[] = { GPIOX_0 };
+static const unsigned int tsin_a_clk_pins[] = { GPIOX_3 };
+
+/* tsin_b */
+static const unsigned int tsin_b_valid_x_pins[] = { GPIOX_9 };
+static const unsigned int tsin_b_sop_x_pins[] = { GPIOX_8 };
+static const unsigned int tsin_b_din0_x_pins[] = { GPIOX_10 };
+static const unsigned int tsin_b_clk_x_pins[] = { GPIOX_11 };
+
+static const unsigned int tsin_b_valid_z_pins[] = { GPIOZ_2 };
+static const unsigned int tsin_b_sop_z_pins[] = { GPIOZ_3 };
+static const unsigned int tsin_b_din0_z_pins[] = { GPIOZ_4 };
+static const unsigned int tsin_b_clk_z_pins[] = { GPIOZ_5 };
+
+static const unsigned int tsin_b_fail_pins[] = { GPIOZ_6 };
+static const unsigned int tsin_b_din1_pins[] = { GPIOZ_7 };
+static const unsigned int tsin_b_din2_pins[] = { GPIOZ_8 };
+static const unsigned int tsin_b_din3_pins[] = { GPIOZ_9 };
+static const unsigned int tsin_b_din4_pins[] = { GPIOZ_10 };
+static const unsigned int tsin_b_din5_pins[] = { GPIOZ_11 };
+static const unsigned int tsin_b_din6_pins[] = { GPIOZ_12 };
+static const unsigned int tsin_b_din7_pins[] = { GPIOZ_13 };
+
+/* hdmitx */
+static const unsigned int hdmitx_sda_pins[] = { GPIOH_0 };
+static const unsigned int hdmitx_sck_pins[] = { GPIOH_1 };
+static const unsigned int hdmitx_hpd_in_pins[] = { GPIOH_2 };
+
+/* pdm */
+static const unsigned int pdm_din0_c_pins[] = { GPIOC_0 };
+static const unsigned int pdm_din1_c_pins[] = { GPIOC_1 };
+static const unsigned int pdm_din2_c_pins[] = { GPIOC_2 };
+static const unsigned int pdm_din3_c_pins[] = { GPIOC_3 };
+static const unsigned int pdm_dclk_c_pins[] = { GPIOC_4 };
+
+static const unsigned int pdm_din0_x_pins[] = { GPIOX_0 };
+static const unsigned int pdm_din1_x_pins[] = { GPIOX_1 };
+static const unsigned int pdm_din2_x_pins[] = { GPIOX_2 };
+static const unsigned int pdm_din3_x_pins[] = { GPIOX_3 };
+static const unsigned int pdm_dclk_x_pins[] = { GPIOX_4 };
+
+static const unsigned int pdm_din0_z_pins[] = { GPIOZ_2 };
+static const unsigned int pdm_din1_z_pins[] = { GPIOZ_3 };
+static const unsigned int pdm_din2_z_pins[] = { GPIOZ_4 };
+static const unsigned int pdm_din3_z_pins[] = { GPIOZ_5 };
+static const unsigned int pdm_dclk_z_pins[] = { GPIOZ_6 };
+
+static const unsigned int pdm_din0_a_pins[] = { GPIOA_8 };
+static const unsigned int pdm_din1_a_pins[] = { GPIOA_9 };
+static const unsigned int pdm_din2_a_pins[] = { GPIOA_6 };
+static const unsigned int pdm_din3_a_pins[] = { GPIOA_5 };
+static const unsigned int pdm_dclk_a_pins[] = { GPIOA_7 };
+
+/* spdif_in */
+static const unsigned int spdif_in_h_pins[] = { GPIOH_5 };
+static const unsigned int spdif_in_a10_pins[] = { GPIOA_10 };
+static const unsigned int spdif_in_a12_pins[] = { GPIOA_12 };
+
+/* spdif_out */
+static const unsigned int spdif_out_h_pins[] = { GPIOH_4 };
+static const unsigned int spdif_out_a11_pins[] = { GPIOA_11 };
+static const unsigned int spdif_out_a13_pins[] = { GPIOA_13 };
+
+/* mclk0 */
+static const unsigned int mclk0_a_pins[] = { GPIOA_0 };
+
+/* mclk1 */
+static const unsigned int mclk1_x_pins[] = { GPIOX_5 };
+static const unsigned int mclk1_z_pins[] = { GPIOZ_8 };
+static const unsigned int mclk1_a_pins[] = { GPIOA_11 };
+
+/* tdm */
+static const unsigned int tdm_a_slv_sclk_pins[] = { GPIOX_11 };
+static const unsigned int tdm_a_slv_fs_pins[] = { GPIOX_10 };
+static const unsigned int tdm_a_sclk_pins[] = { GPIOX_11 };
+static const unsigned int tdm_a_fs_pins[] = { GPIOX_10 };
+static const unsigned int tdm_a_din0_pins[] = { GPIOX_9 };
+static const unsigned int tdm_a_din1_pins[] = { GPIOX_8 };
+static const unsigned int tdm_a_dout0_pins[] = { GPIOX_9 };
+static const unsigned int tdm_a_dout1_pins[] = { GPIOX_8 };
+
+static const unsigned int tdm_b_slv_sclk_pins[] = { GPIOA_1 };
+static const unsigned int tdm_b_slv_fs_pins[] = { GPIOA_2 };
+static const unsigned int tdm_b_sclk_pins[] = { GPIOA_1 };
+static const unsigned int tdm_b_fs_pins[] = { GPIOA_2 };
+static const unsigned int tdm_b_din0_pins[] = { GPIOA_3 };
+static const unsigned int tdm_b_din1_pins[] = { GPIOA_4 };
+static const unsigned int tdm_b_din2_pins[] = { GPIOA_5 };
+static const unsigned int tdm_b_din3_a_pins[] = { GPIOA_6 };
+static const unsigned int tdm_b_din3_h_pins[] = { GPIOH_5 };
+static const unsigned int tdm_b_dout0_pins[] = { GPIOA_3 };
+static const unsigned int tdm_b_dout1_pins[] = { GPIOA_4 };
+static const unsigned int tdm_b_dout2_pins[] = { GPIOA_5 };
+static const unsigned int tdm_b_dout3_a_pins[] = { GPIOA_6 };
+static const unsigned int tdm_b_dout3_h_pins[] = { GPIOH_5 };
+
+static const unsigned int tdm_c_slv_sclk_a_pins[] = { GPIOA_12 };
+static const unsigned int tdm_c_slv_fs_a_pins[] = { GPIOA_13 };
+static const unsigned int tdm_c_slv_sclk_z_pins[] = { GPIOZ_7 };
+static const unsigned int tdm_c_slv_fs_z_pins[] = { GPIOZ_6 };
+static const unsigned int tdm_c_sclk_a_pins[] = { GPIOA_12 };
+static const unsigned int tdm_c_fs_a_pins[] = { GPIOA_13 };
+static const unsigned int tdm_c_sclk_z_pins[] = { GPIOZ_7 };
+static const unsigned int tdm_c_fs_z_pins[] = { GPIOZ_6 };
+static const unsigned int tdm_c_din0_a_pins[] = { GPIOA_10 };
+static const unsigned int tdm_c_din1_a_pins[] = { GPIOA_9 };
+static const unsigned int tdm_c_din2_a_pins[] = { GPIOA_8 };
+static const unsigned int tdm_c_din3_a_pins[] = { GPIOA_7 };
+static const unsigned int tdm_c_din0_z_pins[] = { GPIOZ_2 };
+static const unsigned int tdm_c_din1_z_pins[] = { GPIOZ_3 };
+static const unsigned int tdm_c_din2_z_pins[] = { GPIOZ_4 };
+static const unsigned int tdm_c_din3_z_pins[] = { GPIOZ_5 };
+static const unsigned int tdm_c_dout0_a_pins[] = { GPIOA_10 };
+static const unsigned int tdm_c_dout1_a_pins[] = { GPIOA_9 };
+static const unsigned int tdm_c_dout2_a_pins[] = { GPIOA_8 };
+static const unsigned int tdm_c_dout3_a_pins[] = { GPIOA_7 };
+static const unsigned int tdm_c_dout0_z_pins[] = { GPIOZ_2 };
+static const unsigned int tdm_c_dout1_z_pins[] = { GPIOZ_3 };
+static const unsigned int tdm_c_dout2_z_pins[] = { GPIOZ_4 };
+static const unsigned int tdm_c_dout3_z_pins[] = { GPIOZ_5 };
+
+static struct meson_pmx_group meson_g12a_periphs_groups[] = {
+ GPIO_GROUP(GPIOZ_0),
+ GPIO_GROUP(GPIOZ_1),
+ GPIO_GROUP(GPIOZ_2),
+ GPIO_GROUP(GPIOZ_3),
+ GPIO_GROUP(GPIOZ_4),
+ GPIO_GROUP(GPIOZ_5),
+ GPIO_GROUP(GPIOZ_6),
+ GPIO_GROUP(GPIOZ_7),
+ GPIO_GROUP(GPIOZ_8),
+ GPIO_GROUP(GPIOZ_9),
+ GPIO_GROUP(GPIOZ_10),
+ GPIO_GROUP(GPIOZ_11),
+ GPIO_GROUP(GPIOZ_12),
+ GPIO_GROUP(GPIOZ_13),
+ GPIO_GROUP(GPIOZ_14),
+ GPIO_GROUP(GPIOZ_15),
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+ GPIO_GROUP(GPIOH_4),
+ GPIO_GROUP(GPIOH_5),
+ GPIO_GROUP(GPIOH_6),
+ GPIO_GROUP(GPIOH_7),
+ GPIO_GROUP(GPIOH_8),
+ GPIO_GROUP(BOOT_0),
+ GPIO_GROUP(BOOT_1),
+ GPIO_GROUP(BOOT_2),
+ GPIO_GROUP(BOOT_3),
+ GPIO_GROUP(BOOT_4),
+ GPIO_GROUP(BOOT_5),
+ GPIO_GROUP(BOOT_6),
+ GPIO_GROUP(BOOT_7),
+ GPIO_GROUP(BOOT_8),
+ GPIO_GROUP(BOOT_9),
+ GPIO_GROUP(BOOT_10),
+ GPIO_GROUP(BOOT_11),
+ GPIO_GROUP(BOOT_12),
+ GPIO_GROUP(BOOT_13),
+ GPIO_GROUP(BOOT_14),
+ GPIO_GROUP(BOOT_15),
+ GPIO_GROUP(GPIOC_0),
+ GPIO_GROUP(GPIOC_1),
+ GPIO_GROUP(GPIOC_2),
+ GPIO_GROUP(GPIOC_3),
+ GPIO_GROUP(GPIOC_4),
+ GPIO_GROUP(GPIOC_5),
+ GPIO_GROUP(GPIOC_6),
+ GPIO_GROUP(GPIOC_7),
+ GPIO_GROUP(GPIOA_0),
+ GPIO_GROUP(GPIOA_1),
+ GPIO_GROUP(GPIOA_2),
+ GPIO_GROUP(GPIOA_3),
+ GPIO_GROUP(GPIOA_4),
+ GPIO_GROUP(GPIOA_5),
+ GPIO_GROUP(GPIOA_6),
+ GPIO_GROUP(GPIOA_7),
+ GPIO_GROUP(GPIOA_8),
+ GPIO_GROUP(GPIOA_9),
+ GPIO_GROUP(GPIOA_10),
+ GPIO_GROUP(GPIOA_11),
+ GPIO_GROUP(GPIOA_12),
+ GPIO_GROUP(GPIOA_13),
+ GPIO_GROUP(GPIOA_14),
+ GPIO_GROUP(GPIOA_15),
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+ GPIO_GROUP(GPIOX_19),
+
+ /* bank BOOT */
+ GROUP(emmc_nand_d0, 1),
+ GROUP(emmc_nand_d1, 1),
+ GROUP(emmc_nand_d2, 1),
+ GROUP(emmc_nand_d3, 1),
+ GROUP(emmc_nand_d4, 1),
+ GROUP(emmc_nand_d5, 1),
+ GROUP(emmc_nand_d6, 1),
+ GROUP(emmc_nand_d7, 1),
+ GROUP(emmc_clk, 1),
+ GROUP(emmc_cmd, 1),
+ GROUP(emmc_nand_ds, 1),
+ GROUP(nand_ce0, 2),
+ GROUP(nand_ale, 2),
+ GROUP(nand_cle, 2),
+ GROUP(nand_wen_clk, 2),
+ GROUP(nand_ren_wr, 2),
+ GROUP(nand_rb0, 2),
+ GROUP(nand_ce1, 2),
+ GROUP(nor_hold, 3),
+ GROUP(nor_d, 3),
+ GROUP(nor_q, 3),
+ GROUP(nor_c, 3),
+ GROUP(nor_wp, 3),
+ GROUP(nor_cs, 3),
+
+ /* bank GPIOZ */
+ GROUP(sdcard_d0_z, 5),
+ GROUP(sdcard_d1_z, 5),
+ GROUP(sdcard_d2_z, 5),
+ GROUP(sdcard_d3_z, 5),
+ GROUP(sdcard_clk_z, 5),
+ GROUP(sdcard_cmd_z, 5),
+ GROUP(i2c0_sda_z0, 4),
+ GROUP(i2c0_sck_z1, 4),
+ GROUP(i2c0_sda_z7, 7),
+ GROUP(i2c0_sck_z8, 7),
+ GROUP(i2c2_sda_z, 3),
+ GROUP(i2c2_sck_z, 3),
+ GROUP(iso7816_clk_z, 3),
+ GROUP(iso7816_data_z, 3),
+ GROUP(eth_mdio, 1),
+ GROUP(eth_mdc, 1),
+ GROUP(eth_rgmii_rx_clk, 1),
+ GROUP(eth_rx_dv, 1),
+ GROUP(eth_rxd0, 1),
+ GROUP(eth_rxd1, 1),
+ GROUP(eth_rxd2_rgmii, 1),
+ GROUP(eth_rxd3_rgmii, 1),
+ GROUP(eth_rgmii_tx_clk, 1),
+ GROUP(eth_txen, 1),
+ GROUP(eth_txd0, 1),
+ GROUP(eth_txd1, 1),
+ GROUP(eth_txd2_rgmii, 1),
+ GROUP(eth_txd3_rgmii, 1),
+ GROUP(eth_link_led, 1),
+ GROUP(eth_act_led, 1),
+ GROUP(bt565_a_vs, 2),
+ GROUP(bt565_a_hs, 2),
+ GROUP(bt565_a_clk, 2),
+ GROUP(bt565_a_din0, 2),
+ GROUP(bt565_a_din1, 2),
+ GROUP(bt565_a_din2, 2),
+ GROUP(bt565_a_din3, 2),
+ GROUP(bt565_a_din4, 2),
+ GROUP(bt565_a_din5, 2),
+ GROUP(bt565_a_din6, 2),
+ GROUP(bt565_a_din7, 2),
+ GROUP(tsin_b_valid_z, 3),
+ GROUP(tsin_b_sop_z, 3),
+ GROUP(tsin_b_din0_z, 3),
+ GROUP(tsin_b_clk_z, 3),
+ GROUP(tsin_b_fail, 3),
+ GROUP(tsin_b_din1, 3),
+ GROUP(tsin_b_din2, 3),
+ GROUP(tsin_b_din3, 3),
+ GROUP(tsin_b_din4, 3),
+ GROUP(tsin_b_din5, 3),
+ GROUP(tsin_b_din6, 3),
+ GROUP(tsin_b_din7, 3),
+ GROUP(pdm_din0_z, 7),
+ GROUP(pdm_din1_z, 7),
+ GROUP(pdm_din2_z, 7),
+ GROUP(pdm_din3_z, 7),
+ GROUP(pdm_dclk_z, 7),
+ GROUP(tdm_c_slv_sclk_z, 6),
+ GROUP(tdm_c_slv_fs_z, 6),
+ GROUP(tdm_c_din0_z, 6),
+ GROUP(tdm_c_din1_z, 6),
+ GROUP(tdm_c_din2_z, 6),
+ GROUP(tdm_c_din3_z, 6),
+ GROUP(tdm_c_sclk_z, 4),
+ GROUP(tdm_c_fs_z, 4),
+ GROUP(tdm_c_dout0_z, 4),
+ GROUP(tdm_c_dout1_z, 4),
+ GROUP(tdm_c_dout2_z, 4),
+ GROUP(tdm_c_dout3_z, 4),
+ GROUP(mclk1_z, 4),
+
+ /* bank GPIOX */
+ GROUP(sdio_d0, 1),
+ GROUP(sdio_d1, 1),
+ GROUP(sdio_d2, 1),
+ GROUP(sdio_d3, 1),
+ GROUP(sdio_clk, 1),
+ GROUP(sdio_cmd, 1),
+ GROUP(spi0_mosi_x, 4),
+ GROUP(spi0_miso_x, 4),
+ GROUP(spi0_ss0_x, 4),
+ GROUP(spi0_clk_x, 4),
+ GROUP(i2c1_sda_x, 5),
+ GROUP(i2c1_sck_x, 5),
+ GROUP(i2c2_sda_x, 1),
+ GROUP(i2c2_sck_x, 1),
+ GROUP(uart_a_tx, 1),
+ GROUP(uart_a_rx, 1),
+ GROUP(uart_a_cts, 1),
+ GROUP(uart_a_rts, 1),
+ GROUP(uart_b_tx, 2),
+ GROUP(uart_b_rx, 2),
+ GROUP(iso7816_clk_x, 6),
+ GROUP(iso7816_data_x, 6),
+ GROUP(pwm_a, 1),
+ GROUP(pwm_b_x7, 4),
+ GROUP(pwm_b_x19, 1),
+ GROUP(pwm_c_x5, 4),
+ GROUP(pwm_c_x8, 5),
+ GROUP(pwm_d_x3, 4),
+ GROUP(pwm_d_x6, 4),
+ GROUP(pwm_e, 1),
+ GROUP(pwm_f_x, 1),
+ GROUP(tsin_a_valid, 3),
+ GROUP(tsin_a_sop, 3),
+ GROUP(tsin_a_din0, 3),
+ GROUP(tsin_a_clk, 3),
+ GROUP(tsin_b_valid_x, 3),
+ GROUP(tsin_b_sop_x, 3),
+ GROUP(tsin_b_din0_x, 3),
+ GROUP(tsin_b_clk_x, 3),
+ GROUP(pdm_din0_x, 2),
+ GROUP(pdm_din1_x, 2),
+ GROUP(pdm_din2_x, 2),
+ GROUP(pdm_din3_x, 2),
+ GROUP(pdm_dclk_x, 2),
+ GROUP(tdm_a_slv_sclk, 2),
+ GROUP(tdm_a_slv_fs, 2),
+ GROUP(tdm_a_din0, 2),
+ GROUP(tdm_a_din1, 2),
+ GROUP(tdm_a_sclk, 1),
+ GROUP(tdm_a_fs, 1),
+ GROUP(tdm_a_dout0, 1),
+ GROUP(tdm_a_dout1, 1),
+ GROUP(mclk1_x, 2),
+
+ /* bank GPIOC */
+ GROUP(sdcard_d0_c, 1),
+ GROUP(sdcard_d1_c, 1),
+ GROUP(sdcard_d2_c, 1),
+ GROUP(sdcard_d3_c, 1),
+ GROUP(sdcard_clk_c, 1),
+ GROUP(sdcard_cmd_c, 1),
+ GROUP(spi0_mosi_c, 5),
+ GROUP(spi0_miso_c, 5),
+ GROUP(spi0_ss0_c, 5),
+ GROUP(spi0_clk_c, 5),
+ GROUP(i2c0_sda_c, 3),
+ GROUP(i2c0_sck_c, 3),
+ GROUP(uart_ao_a_rx_c, 2),
+ GROUP(uart_ao_a_tx_c, 2),
+ GROUP(iso7816_clk_c, 5),
+ GROUP(iso7816_data_c, 5),
+ GROUP(pwm_c_c, 5),
+ GROUP(jtag_b_tdo, 2),
+ GROUP(jtag_b_tdi, 2),
+ GROUP(jtag_b_clk, 2),
+ GROUP(jtag_b_tms, 2),
+ GROUP(pdm_din0_c, 4),
+ GROUP(pdm_din1_c, 4),
+ GROUP(pdm_din2_c, 4),
+ GROUP(pdm_din3_c, 4),
+ GROUP(pdm_dclk_c, 4),
+
+ /* bank GPIOH */
+ GROUP(spi1_mosi, 3),
+ GROUP(spi1_miso, 3),
+ GROUP(spi1_ss0, 3),
+ GROUP(spi1_clk, 3),
+ GROUP(i2c1_sda_h2, 2),
+ GROUP(i2c1_sck_h3, 2),
+ GROUP(i2c1_sda_h6, 4),
+ GROUP(i2c1_sck_h7, 4),
+ GROUP(i2c3_sda_h, 2),
+ GROUP(i2c3_sck_h, 2),
+ GROUP(uart_c_tx, 2),
+ GROUP(uart_c_rx, 2),
+ GROUP(uart_c_cts, 2),
+ GROUP(uart_c_rts, 2),
+ GROUP(iso7816_clk_h, 1),
+ GROUP(iso7816_data_h, 1),
+ GROUP(pwm_f_h, 4),
+ GROUP(cec_ao_a_h, 4),
+ GROUP(cec_ao_b_h, 5),
+ GROUP(hdmitx_sda, 1),
+ GROUP(hdmitx_sck, 1),
+ GROUP(hdmitx_hpd_in, 1),
+ GROUP(spdif_out_h, 1),
+ GROUP(spdif_in_h, 1),
+ GROUP(tdm_b_din3_h, 6),
+ GROUP(tdm_b_dout3_h, 5),
+
+ /* bank GPIOA */
+ GROUP(i2c3_sda_a, 2),
+ GROUP(i2c3_sck_a, 2),
+ GROUP(pdm_din0_a, 1),
+ GROUP(pdm_din1_a, 1),
+ GROUP(pdm_din2_a, 1),
+ GROUP(pdm_din3_a, 1),
+ GROUP(pdm_dclk_a, 1),
+ GROUP(spdif_in_a10, 1),
+ GROUP(spdif_in_a12, 1),
+ GROUP(spdif_out_a11, 1),
+ GROUP(spdif_out_a13, 1),
+ GROUP(tdm_b_slv_sclk, 2),
+ GROUP(tdm_b_slv_fs, 2),
+ GROUP(tdm_b_din0, 2),
+ GROUP(tdm_b_din1, 2),
+ GROUP(tdm_b_din2, 2),
+ GROUP(tdm_b_din3_a, 2),
+ GROUP(tdm_b_sclk, 1),
+ GROUP(tdm_b_fs, 1),
+ GROUP(tdm_b_dout0, 1),
+ GROUP(tdm_b_dout1, 1),
+ GROUP(tdm_b_dout2, 3),
+ GROUP(tdm_b_dout3_a, 3),
+ GROUP(tdm_c_slv_sclk_a, 3),
+ GROUP(tdm_c_slv_fs_a, 3),
+ GROUP(tdm_c_din0_a, 3),
+ GROUP(tdm_c_din1_a, 3),
+ GROUP(tdm_c_din2_a, 3),
+ GROUP(tdm_c_din3_a, 3),
+ GROUP(tdm_c_sclk_a, 2),
+ GROUP(tdm_c_fs_a, 2),
+ GROUP(tdm_c_dout0_a, 2),
+ GROUP(tdm_c_dout1_a, 2),
+ GROUP(tdm_c_dout2_a, 2),
+ GROUP(tdm_c_dout3_a, 2),
+ GROUP(mclk0_a, 1),
+ GROUP(mclk1_a, 2),
+};
+
+/* uart_ao_a */
+static const unsigned int uart_ao_a_tx_pins[] = { GPIOAO_0 };
+static const unsigned int uart_ao_a_rx_pins[] = { GPIOAO_1 };
+static const unsigned int uart_ao_a_cts_pins[] = { GPIOE_0 };
+static const unsigned int uart_ao_a_rts_pins[] = { GPIOE_1 };
+
+/* uart_ao_b */
+static const unsigned int uart_ao_b_tx_2_pins[] = { GPIOAO_2 };
+static const unsigned int uart_ao_b_rx_3_pins[] = { GPIOAO_3 };
+static const unsigned int uart_ao_b_tx_8_pins[] = { GPIOAO_8 };
+static const unsigned int uart_ao_b_rx_9_pins[] = { GPIOAO_9 };
+static const unsigned int uart_ao_b_cts_pins[] = { GPIOE_0 };
+static const unsigned int uart_ao_b_rts_pins[] = { GPIOE_1 };
+
+/* i2c_ao */
+static const unsigned int i2c_ao_sck_pins[] = { GPIOAO_2 };
+static const unsigned int i2c_ao_sda_pins[] = { GPIOAO_3 };
+
+static const unsigned int i2c_ao_sck_e_pins[] = { GPIOE_0 };
+static const unsigned int i2c_ao_sda_e_pins[] = { GPIOE_1 };
+
+/* i2c_ao_slave */
+static const unsigned int i2c_ao_slave_sck_pins[] = { GPIOAO_2 };
+static const unsigned int i2c_ao_slave_sda_pins[] = { GPIOAO_3 };
+
+/* ir_in */
+static const unsigned int remote_ao_input_pins[] = { GPIOAO_5 };
+
+/* ir_out */
+static const unsigned int remote_ao_out_pins[] = { GPIOAO_4 };
+
+/* pwm_ao_a */
+static const unsigned int pwm_ao_a_pins[] = { GPIOAO_11 };
+static const unsigned int pwm_ao_a_hiz_pins[] = { GPIOAO_11 };
+
+/* pwm_ao_b */
+static const unsigned int pwm_ao_b_pins[] = { GPIOE_0 };
+
+/* pwm_ao_c */
+static const unsigned int pwm_ao_c_4_pins[] = { GPIOAO_4 };
+static const unsigned int pwm_ao_c_hiz_pins[] = { GPIOAO_4 };
+static const unsigned int pwm_ao_c_6_pins[] = { GPIOAO_6 };
+
+/* pwm_ao_d */
+static const unsigned int pwm_ao_d_5_pins[] = { GPIOAO_5 };
+static const unsigned int pwm_ao_d_10_pins[] = { GPIOAO_10 };
+static const unsigned int pwm_ao_d_e_pins[] = { GPIOE_1 };
+
+/* jtag_a */
+static const unsigned int jtag_a_tdi_pins[] = { GPIOAO_8 };
+static const unsigned int jtag_a_tdo_pins[] = { GPIOAO_9 };
+static const unsigned int jtag_a_clk_pins[] = { GPIOAO_6 };
+static const unsigned int jtag_a_tms_pins[] = { GPIOAO_7 };
+
+/* cec_ao */
+static const unsigned int cec_ao_a_pins[] = { GPIOAO_10 };
+static const unsigned int cec_ao_b_pins[] = { GPIOAO_10 };
+
+/* tsin_ao_a */
+static const unsigned int tsin_ao_asop_pins[] = { GPIOAO_6 };
+static const unsigned int tsin_ao_adin0_pins[] = { GPIOAO_7 };
+static const unsigned int tsin_ao_aclk_pins[] = { GPIOAO_8 };
+static const unsigned int tsin_ao_a_valid_pins[] = { GPIOAO_9 };
+
+/* spdif_ao_out */
+static const unsigned int spdif_ao_out_pins[] = { GPIOAO_10 };
+
+/* tdm_ao_b */
+static const unsigned int tdm_ao_b_slv_fs_pins[] = { GPIOAO_7 };
+static const unsigned int tdm_ao_b_slv_sclk_pins[] = { GPIOAO_8 };
+static const unsigned int tdm_ao_b_fs_pins[] = { GPIOAO_7 };
+static const unsigned int tdm_ao_b_sclk_pins[] = { GPIOAO_8 };
+static const unsigned int tdm_ao_b_din0_pins[] = { GPIOAO_4 };
+static const unsigned int tdm_ao_b_din1_pins[] = { GPIOAO_10 };
+static const unsigned int tdm_ao_b_din2_pins[] = { GPIOAO_6 };
+static const unsigned int tdm_ao_b_dout0_pins[] = { GPIOAO_4 };
+static const unsigned int tdm_ao_b_dout1_pins[] = { GPIOAO_10 };
+static const unsigned int tdm_ao_b_dout2_pins[] = { GPIOAO_6 };
+
+/* mclk0_ao */
+static const unsigned int mclk0_ao_pins[] = { GPIOAO_9 };
+
+static struct meson_pmx_group meson_g12a_aobus_groups[] = {
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
+ GPIO_GROUP(GPIOAO_10),
+ GPIO_GROUP(GPIOAO_11),
+ GPIO_GROUP(GPIOE_0),
+ GPIO_GROUP(GPIOE_1),
+ GPIO_GROUP(GPIOE_2),
+
+ /* bank AO */
+ GROUP(uart_ao_a_tx, 1),
+ GROUP(uart_ao_a_rx, 1),
+ GROUP(uart_ao_a_cts, 1),
+ GROUP(uart_ao_a_rts, 1),
+ GROUP(uart_ao_b_tx_2, 2),
+ GROUP(uart_ao_b_rx_3, 2),
+ GROUP(uart_ao_b_tx_8, 3),
+ GROUP(uart_ao_b_rx_9, 3),
+ GROUP(uart_ao_b_cts, 2),
+ GROUP(uart_ao_b_rts, 2),
+ GROUP(i2c_ao_sck, 1),
+ GROUP(i2c_ao_sda, 1),
+ GROUP(i2c_ao_sck_e, 4),
+ GROUP(i2c_ao_sda_e, 4),
+ GROUP(i2c_ao_slave_sck, 3),
+ GROUP(i2c_ao_slave_sda, 3),
+ GROUP(remote_ao_input, 1),
+ GROUP(remote_ao_out, 1),
+ GROUP(pwm_ao_a, 3),
+ GROUP(pwm_ao_a_hiz, 2),
+ GROUP(pwm_ao_b, 3),
+ GROUP(pwm_ao_c_4, 3),
+ GROUP(pwm_ao_c_hiz, 4),
+ GROUP(pwm_ao_c_6, 3),
+ GROUP(pwm_ao_d_5, 3),
+ GROUP(pwm_ao_d_10, 3),
+ GROUP(pwm_ao_d_e, 3),
+ GROUP(jtag_a_tdi, 1),
+ GROUP(jtag_a_tdo, 1),
+ GROUP(jtag_a_clk, 1),
+ GROUP(jtag_a_tms, 1),
+ GROUP(cec_ao_a, 1),
+ GROUP(cec_ao_b, 2),
+ GROUP(tsin_ao_asop, 4),
+ GROUP(tsin_ao_adin0, 4),
+ GROUP(tsin_ao_aclk, 4),
+ GROUP(tsin_ao_a_valid, 4),
+ GROUP(spdif_ao_out, 4),
+ GROUP(tdm_ao_b_dout0, 5),
+ GROUP(tdm_ao_b_dout1, 5),
+ GROUP(tdm_ao_b_dout2, 5),
+ GROUP(tdm_ao_b_fs, 5),
+ GROUP(tdm_ao_b_sclk, 5),
+ GROUP(tdm_ao_b_din0, 6),
+ GROUP(tdm_ao_b_din1, 6),
+ GROUP(tdm_ao_b_din2, 6),
+ GROUP(tdm_ao_b_slv_fs, 6),
+ GROUP(tdm_ao_b_slv_sclk, 6),
+ GROUP(mclk0_ao, 5),
+};
+
+static const char * const gpio_periphs_groups[] = {
+ "GPIOZ_0", "GPIOZ_1", "GPIOZ_2", "GPIOZ_3", "GPIOZ_4",
+ "GPIOZ_5", "GPIOZ_6", "GPIOZ_7", "GPIOZ_8", "GPIOZ_9",
+ "GPIOZ_10", "GPIOZ_11", "GPIOZ_12", "GPIOZ_13", "GPIOZ_14",
+ "GPIOZ_15",
+
+ "GPIOH_0", "GPIOH_1", "GPIOH_2", "GPIOH_3", "GPIOH_4",
+ "GPIOH_5", "GPIOH_6", "GPIOH_7", "GPIOH_8",
+
+ "BOOT_0", "BOOT_1", "BOOT_2", "BOOT_3", "BOOT_4",
+ "BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9",
+ "BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
+ "BOOT_15",
+
+ "GPIOC_0", "GPIOC_1", "GPIOC_2", "GPIOC_3", "GPIOC_4",
+ "GPIOC_5", "GPIOC_6", "GPIOC_7",
+
+ "GPIOA_0", "GPIOA_1", "GPIOA_2", "GPIOA_3", "GPIOA_4",
+ "GPIOA_5", "GPIOA_6", "GPIOA_7", "GPIOA_8", "GPIOA_9",
+ "GPIOA_10", "GPIOA_11", "GPIOA_12", "GPIOA_13", "GPIOA_14",
+ "GPIOA_15",
+
+ "GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4",
+ "GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
+ "GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
+ "GPIOX_15", "GPIOX_16", "GPIOX_17", "GPIOX_18", "GPIOX_19",
+};
+
+static const char * const emmc_groups[] = {
+ "emmc_nand_d0", "emmc_nand_d1", "emmc_nand_d2",
+ "emmc_nand_d3", "emmc_nand_d4", "emmc_nand_d5",
+ "emmc_nand_d6", "emmc_nand_d7",
+ "emmc_clk", "emmc_cmd", "emmc_nand_ds",
+};
+
+static const char * const nand_groups[] = {
+ "emmc_nand_d0", "emmc_nand_d1", "emmc_nand_d2",
+ "emmc_nand_d3", "emmc_nand_d4", "emmc_nand_d5",
+ "emmc_nand_d6", "emmc_nand_d7",
+ "nand_ce0", "nand_ale", "nand_cle",
+ "nand_wen_clk", "nand_ren_wr", "nand_rb0",
+ "emmc_nand_ds", "nand_ce1",
+};
+
+static const char * const nor_groups[] = {
+ "nor_d", "nor_q", "nor_c", "nor_cs",
+ "nor_hold", "nor_wp",
+};
+
+static const char * const sdio_groups[] = {
+ "sdio_d0", "sdio_d1", "sdio_d2", "sdio_d3",
+ "sdio_cmd", "sdio_clk", "sdio_dummy",
+};
+
+static const char * const sdcard_groups[] = {
+ "sdcard_d0_c", "sdcard_d1_c", "sdcard_d2_c", "sdcard_d3_c",
+ "sdcard_clk_c", "sdcard_cmd_c",
+ "sdcard_d0_z", "sdcard_d1_z", "sdcard_d2_z", "sdcard_d3_z",
+ "sdcard_clk_z", "sdcard_cmd_z",
+};
+
+static const char * const spi0_groups[] = {
+ "spi0_mosi_c", "spi0_miso_c", "spi0_ss0_c", "spi0_clk_c",
+ "spi0_mosi_x", "spi0_miso_x", "spi0_ss0_x", "spi0_clk_x",
+};
+
+static const char * const spi1_groups[] = {
+ "spi1_mosi", "spi1_miso", "spi1_ss0", "spi1_clk",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0_sda_c", "i2c0_sck_c",
+ "i2c0_sda_z0", "i2c0_sck_z1",
+ "i2c0_sda_z7", "i2c0_sck_z8",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_sda_x", "i2c1_sck_x",
+ "i2c1_sda_h2", "i2c1_sck_h3",
+ "i2c1_sda_h6", "i2c1_sck_h7",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_sda_x", "i2c2_sck_x",
+ "i2c2_sda_z", "i2c2_sck_z",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3_sda_h", "i2c3_sck_h",
+ "i2c3_sda_a", "i2c3_sck_a",
+};
+
+static const char * const uart_a_groups[] = {
+ "uart_a_tx", "uart_a_rx", "uart_a_cts", "uart_a_rts",
+};
+
+static const char * const uart_b_groups[] = {
+ "uart_b_tx", "uart_b_rx",
+};
+
+static const char * const uart_c_groups[] = {
+ "uart_c_tx", "uart_c_rx", "uart_c_cts", "uart_c_rts",
+};
+
+static const char * const uart_ao_a_c_groups[] = {
+ "uart_ao_a_rx_c", "uart_ao_a_tx_c",
+};
+
+static const char * const iso7816_groups[] = {
+ "iso7816_clk_c", "iso7816_data_c",
+ "iso7816_clk_x", "iso7816_data_x",
+ "iso7816_clk_h", "iso7816_data_h",
+ "iso7816_clk_z", "iso7816_data_z",
+};
+
+static const char * const eth_groups[] = {
+ "eth_rxd2_rgmii", "eth_rxd3_rgmii", "eth_rgmii_tx_clk",
+ "eth_txd2_rgmii", "eth_txd3_rgmii", "eth_rgmii_rx_clk",
+ "eth_txd0", "eth_txd1", "eth_txen", "eth_mdc",
+ "eth_rxd0", "eth_rxd1", "eth_rx_dv", "eth_mdio",
+ "eth_link_led", "eth_act_led",
+};
+
+static const char * const pwm_a_groups[] = {
+ "pwm_a",
+};
+
+static const char * const pwm_b_groups[] = {
+ "pwm_b_x7", "pwm_b_x19",
+};
+
+static const char * const pwm_c_groups[] = {
+ "pwm_c_c", "pwm_c_x5", "pwm_c_x8",
+};
+
+static const char * const pwm_d_groups[] = {
+ "pwm_d_x3", "pwm_d_x6",
+};
+
+static const char * const pwm_e_groups[] = {
+ "pwm_e",
+};
+
+static const char * const pwm_f_groups[] = {
+ "pwm_f_x", "pwm_f_h",
+};
+
+static const char * const cec_ao_a_h_groups[] = {
+ "cec_ao_a_h",
+};
+
+static const char * const cec_ao_b_h_groups[] = {
+ "cec_ao_b_h",
+};
+
+static const char * const jtag_b_groups[] = {
+ "jtag_b_tdi", "jtag_b_tdo", "jtag_b_clk", "jtag_b_tms",
+};
+
+static const char * const bt565_a_groups[] = {
+ "bt565_a_vs", "bt565_a_hs", "bt565_a_clk",
+ "bt565_a_din0", "bt565_a_din1", "bt565_a_din2",
+ "bt565_a_din3", "bt565_a_din4", "bt565_a_din5",
+ "bt565_a_din6", "bt565_a_din7",
+};
+
+static const char * const tsin_a_groups[] = {
+ "tsin_a_valid", "tsin_a_sop", "tsin_a_din0",
+ "tsin_a_clk",
+};
+
+static const char * const tsin_b_groups[] = {
+ "tsin_b_valid_x", "tsin_b_sop_x", "tsin_b_din0_x", "tsin_b_clk_x",
+ "tsin_b_valid_z", "tsin_b_sop_z", "tsin_b_din0_z", "tsin_b_clk_z",
+ "tsin_b_fail", "tsin_b_din1", "tsin_b_din2", "tsin_b_din3",
+ "tsin_b_din4", "tsin_b_din5", "tsin_b_din6", "tsin_b_din7",
+};
+
+static const char * const hdmitx_groups[] = {
+ "hdmitx_sda", "hdmitx_sck", "hdmitx_hpd_in",
+};
+
+static const char * const pdm_groups[] = {
+ "pdm_din0_c", "pdm_din1_c", "pdm_din2_c", "pdm_din3_c",
+ "pdm_dclk_c",
+ "pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_din3_x",
+ "pdm_dclk_x",
+ "pdm_din0_z", "pdm_din1_z", "pdm_din2_z", "pdm_din3_z",
+ "pdm_dclk_z",
+ "pdm_din0_a", "pdm_din1_a", "pdm_din2_a", "pdm_din3_a",
+ "pdm_dclk_a",
+};
+
+static const char * const spdif_in_groups[] = {
+ "spdif_in_h", "spdif_in_a10", "spdif_in_a12",
+};
+
+static const char * const spdif_out_groups[] = {
+ "spdif_out_h", "spdif_out_a11", "spdif_out_a13",
+};
+
+static const char * const mclk0_groups[] = {
+ "mclk0_a",
+};
+
+static const char * const mclk1_groups[] = {
+ "mclk1_x", "mclk1_z", "mclk1_a",
+};
+
+static const char * const tdm_a_groups[] = {
+ "tdm_a_slv_sclk", "tdm_a_slv_fs", "tdm_a_sclk", "tdm_a_fs",
+ "tdm_a_din0", "tdm_a_din1", "tdm_a_dout0", "tdm_a_dout1",
+};
+
+static const char * const tdm_b_groups[] = {
+ "tdm_b_slv_sclk", "tdm_b_slv_fs", "tdm_b_sclk", "tdm_b_fs",
+ "tdm_b_din0", "tdm_b_din1", "tdm_b_din2",
+ "tdm_b_din3_a", "tdm_b_din3_h",
+ "tdm_b_dout0", "tdm_b_dout1", "tdm_b_dout2",
+ "tdm_b_dout3_a", "tdm_b_dout3_h",
+};
+
+static const char * const tdm_c_groups[] = {
+ "tdm_c_slv_sclk_a", "tdm_c_slv_fs_a",
+ "tdm_c_slv_sclk_z", "tdm_c_slv_fs_z",
+ "tdm_c_sclk_a", "tdm_c_fs_a",
+ "tdm_c_sclk_z", "tdm_c_fs_z",
+ "tdm_c_din0_a", "tdm_c_din1_a",
+ "tdm_c_din2_a", "tdm_c_din3_a",
+ "tdm_c_din0_z", "tdm_c_din1_z",
+ "tdm_c_din2_z", "tdm_c_din3_z",
+ "tdm_c_dout0_a", "tdm_c_dout1_a",
+ "tdm_c_dout2_a", "tdm_c_dout3_a",
+ "tdm_c_dout0_z", "tdm_c_dout1_z",
+ "tdm_c_dout2_z", "tdm_c_dout3_z",
+};
+
+static const char * const gpio_aobus_groups[] = {
+ "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
+ "GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
+ "GPIOAO_10", "GPIOAO_11", "GPIOE_0", "GPIOE_1", "GPIOE_2",
+};
+
+static const char * const uart_ao_a_groups[] = {
+ "uart_ao_a_tx", "uart_ao_a_rx",
+ "uart_ao_a_cts", "uart_ao_a_rts",
+};
+
+static const char * const uart_ao_b_groups[] = {
+ "uart_ao_b_tx_2", "uart_ao_b_rx_3",
+ "uart_ao_b_tx_8", "uart_ao_b_rx_9",
+ "uart_ao_b_cts", "uart_ao_b_rts",
+};
+
+static const char * const i2c_ao_groups[] = {
+ "i2c_ao_sck", "i2c_ao_sda",
+ "i2c_ao_sck_e", "i2c_ao_sda_e",
+};
+
+static const char * const i2c_ao_slave_groups[] = {
+ "i2c_ao_slave_sck", "i2c_ao_slave_sda",
+};
+
+static const char * const remote_ao_input_groups[] = {
+ "remote_ao_input",
+};
+
+static const char * const remote_ao_out_groups[] = {
+ "remote_ao_out",
+};
+
+static const char * const pwm_ao_a_groups[] = {
+ "pwm_ao_a", "pwm_ao_a_hiz",
+};
+
+static const char * const pwm_ao_b_groups[] = {
+ "pwm_ao_b",
+};
+
+static const char * const pwm_ao_c_groups[] = {
+ "pwm_ao_c_4", "pwm_ao_c_hiz",
+ "pwm_ao_c_6",
+};
+
+static const char * const pwm_ao_d_groups[] = {
+ "pwm_ao_d_5", "pwm_ao_d_10", "pwm_ao_d_e",
+};
+
+static const char * const jtag_a_groups[] = {
+ "jtag_a_tdi", "jtag_a_tdo", "jtag_a_clk", "jtag_a_tms",
+};
+
+static const char * const cec_ao_a_groups[] = {
+ "cec_ao_a",
+};
+
+static const char * const cec_ao_b_groups[] = {
+ "cec_ao_b",
+};
+
+static const char * const tsin_ao_a_groups[] = {
+ "tsin_ao_asop", "tsin_ao_adin0", "tsin_ao_aclk", "tsin_ao_a_valid",
+};
+
+static const char * const spdif_ao_out_groups[] = {
+ "spdif_ao_out",
+};
+
+static const char * const tdm_ao_b_groups[] = {
+ "tdm_ao_b_dout0", "tdm_ao_b_dout1", "tdm_ao_b_dout2",
+ "tdm_ao_b_fs", "tdm_ao_b_sclk",
+ "tdm_ao_b_din0", "tdm_ao_b_din1", "tdm_ao_b_din2",
+ "tdm_ao_b_slv_fs", "tdm_ao_b_slv_sclk",
+};
+
+static const char * const mclk0_ao_groups[] = {
+ "mclk0_ao",
+};
+
+static struct meson_pmx_func meson_g12a_periphs_functions[] = {
+ FUNCTION(gpio_periphs),
+ FUNCTION(emmc),
+ FUNCTION(nor),
+ FUNCTION(spi0),
+ FUNCTION(spi1),
+ FUNCTION(sdio),
+ FUNCTION(nand),
+ FUNCTION(sdcard),
+ FUNCTION(i2c0),
+ FUNCTION(i2c1),
+ FUNCTION(i2c2),
+ FUNCTION(i2c3),
+ FUNCTION(uart_a),
+ FUNCTION(uart_b),
+ FUNCTION(uart_c),
+ FUNCTION(uart_ao_a_c),
+ FUNCTION(iso7816),
+ FUNCTION(eth),
+ FUNCTION(pwm_a),
+ FUNCTION(pwm_b),
+ FUNCTION(pwm_c),
+ FUNCTION(pwm_d),
+ FUNCTION(pwm_e),
+ FUNCTION(pwm_f),
+ FUNCTION(cec_ao_a_h),
+ FUNCTION(cec_ao_b_h),
+ FUNCTION(jtag_b),
+ FUNCTION(bt565_a),
+ FUNCTION(tsin_a),
+ FUNCTION(tsin_b),
+ FUNCTION(hdmitx),
+ FUNCTION(pdm),
+ FUNCTION(spdif_out),
+ FUNCTION(spdif_in),
+ FUNCTION(mclk0),
+ FUNCTION(mclk1),
+ FUNCTION(tdm_a),
+ FUNCTION(tdm_b),
+ FUNCTION(tdm_c),
+};
+
+static struct meson_pmx_func meson_g12a_aobus_functions[] = {
+ FUNCTION(gpio_aobus),
+ FUNCTION(uart_ao_a),
+ FUNCTION(uart_ao_b),
+ FUNCTION(i2c_ao),
+ FUNCTION(i2c_ao_slave),
+ FUNCTION(remote_ao_input),
+ FUNCTION(remote_ao_out),
+ FUNCTION(pwm_ao_a),
+ FUNCTION(pwm_ao_b),
+ FUNCTION(pwm_ao_c),
+ FUNCTION(pwm_ao_d),
+ FUNCTION(jtag_a),
+ FUNCTION(cec_ao_a),
+ FUNCTION(cec_ao_b),
+ FUNCTION(tsin_ao_a),
+ FUNCTION(spdif_ao_out),
+ FUNCTION(tdm_ao_b),
+ FUNCTION(mclk0_ao),
+};
+
+static struct meson_bank meson_g12a_periphs_banks[] = {
+ /* name first last irq pullen pull dir out in */
+ BANK("Z", GPIOZ_0, GPIOZ_15, 12, 27,
+ 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
+ BANK("H", GPIOH_0, GPIOH_8, 28, 36,
+ 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
+ BANK("BOOT", BOOT_0, BOOT_15, 37, 52,
+ 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
+ BANK("C", GPIOC_0, GPIOC_7, 53, 60,
+ 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
+ BANK("A", GPIOA_0, GPIOA_15, 61, 76,
+ 5, 0, 5, 0, 16, 0, 17, 0, 18, 0),
+ BANK("X", GPIOX_0, GPIOX_19, 77, 96,
+ 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
+};
+
+static struct meson_bank meson_g12a_aobus_banks[] = {
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIOAO_11, 0, 11,
+ 3, 0, 2, 0, 0, 0, 4, 0, 1, 0),
+ /* GPIOE actually located in the AO bank */
+ BANK("E", GPIOE_0, GPIOE_2, 97, 99,
+ 3, 16, 2, 16, 0, 16, 4, 16, 1, 16),
+};
+
+static struct meson_pmx_bank meson_g12a_periphs_pmx_banks[] = {
+ /* name first lask reg offset */
+ BANK_PMX("Z", GPIOZ_0, GPIOZ_15, 0x6, 0),
+ BANK_PMX("H", GPIOH_0, GPIOH_8, 0xb, 0),
+ BANK_PMX("BOOT", BOOT_0, BOOT_15, 0x0, 0),
+ BANK_PMX("C", GPIOC_0, GPIOC_7, 0x9, 0),
+ BANK_PMX("A", GPIOA_0, GPIOA_15, 0xd, 0),
+ BANK_PMX("X", GPIOX_0, GPIOX_19, 0x3, 0),
+};
+
+static struct meson_axg_pmx_data meson_g12a_periphs_pmx_banks_data = {
+ .pmx_banks = meson_g12a_periphs_pmx_banks,
+ .num_pmx_banks = ARRAY_SIZE(meson_g12a_periphs_pmx_banks),
+};
+
+static struct meson_pmx_bank meson_g12a_aobus_pmx_banks[] = {
+ BANK_PMX("AO", GPIOAO_0, GPIOAO_11, 0x0, 0),
+ BANK_PMX("E", GPIOE_0, GPIOE_2, 0x1, 16),
+};
+
+static struct meson_axg_pmx_data meson_g12a_aobus_pmx_banks_data = {
+ .pmx_banks = meson_g12a_aobus_pmx_banks,
+ .num_pmx_banks = ARRAY_SIZE(meson_g12a_aobus_pmx_banks),
+};
+
+static struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = {
+ .name = "periphs-banks",
+ .pins = meson_g12a_periphs_pins,
+ .groups = meson_g12a_periphs_groups,
+ .funcs = meson_g12a_periphs_functions,
+ .banks = meson_g12a_periphs_banks,
+ .num_pins = ARRAY_SIZE(meson_g12a_periphs_pins),
+ .num_groups = ARRAY_SIZE(meson_g12a_periphs_groups),
+ .num_funcs = ARRAY_SIZE(meson_g12a_periphs_functions),
+ .num_banks = ARRAY_SIZE(meson_g12a_periphs_banks),
+ .pmx_ops = &meson_axg_pmx_ops,
+ .pmx_data = &meson_g12a_periphs_pmx_banks_data,
+};
+
+static struct meson_pinctrl_data meson_g12a_aobus_pinctrl_data = {
+ .name = "aobus-banks",
+ .pins = meson_g12a_aobus_pins,
+ .groups = meson_g12a_aobus_groups,
+ .funcs = meson_g12a_aobus_functions,
+ .banks = meson_g12a_aobus_banks,
+ .num_pins = ARRAY_SIZE(meson_g12a_aobus_pins),
+ .num_groups = ARRAY_SIZE(meson_g12a_aobus_groups),
+ .num_funcs = ARRAY_SIZE(meson_g12a_aobus_functions),
+ .num_banks = ARRAY_SIZE(meson_g12a_aobus_banks),
+ .pmx_ops = &meson_axg_pmx_ops,
+ .pmx_data = &meson_g12a_aobus_pmx_banks_data,
+};
+
+static const struct of_device_id meson_g12a_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson-g12a-periphs-pinctrl",
+ .data = &meson_g12a_periphs_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson-g12a-aobus-pinctrl",
+ .data = &meson_g12a_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson_g12a_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson-g12a-pinctrl",
+ .of_match_table = meson_g12a_pinctrl_dt_match,
+ },
+};
+
+builtin_platform_driver(meson_g12a_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 29a458da78db..f8b778a7d471 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -41,7 +41,7 @@
*/
#include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -451,7 +451,7 @@ static struct regmap *meson_map_resource(struct meson_pinctrl *pc,
meson_regmap_config.max_register = resource_size(&res) - 4;
meson_regmap_config.name = devm_kasprintf(pc->dev, GFP_KERNEL,
- "%s-%s", node->name,
+ "%pOFn-%s", node,
name);
if (!meson_regmap_config.name)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index 12a391109329..eff61ea1c67e 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -11,7 +11,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index d7ec7119701b..35ecb92483d5 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -17,7 +17,7 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -413,14 +413,14 @@ static int mvebu_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
ret = of_property_read_string(np, "marvell,function", &function);
if (ret) {
dev_err(pctl->dev,
- "missing marvell,function in node %s\n", np->name);
+ "missing marvell,function in node %pOFn\n", np);
return 0;
}
nmaps = of_property_count_strings(np, "marvell,pins");
if (nmaps < 0) {
dev_err(pctl->dev,
- "missing marvell,pins in node %s\n", np->name);
+ "missing marvell,pins in node %pOFn\n", np);
return 0;
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-ab8500.c b/drivers/pinctrl/nomadik/pinctrl-ab8500.c
index 2ac2d0ad3025..0723627c7bc2 100644
--- a/drivers/pinctrl/nomadik/pinctrl-ab8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-ab8500.c
@@ -9,7 +9,7 @@
*/
#include <linux/kernel.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/mfd/abx500/ab8500.h>
#include "pinctrl-abx500.h"
diff --git a/drivers/pinctrl/nomadik/pinctrl-ab8505.c b/drivers/pinctrl/nomadik/pinctrl-ab8505.c
index 42c6e1f7886b..2683509c1410 100644
--- a/drivers/pinctrl/nomadik/pinctrl-ab8505.c
+++ b/drivers/pinctrl/nomadik/pinctrl-ab8505.c
@@ -9,7 +9,7 @@
*/
#include <linux/kernel.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/mfd/abx500/ab8500.h>
#include "pinctrl-abx500.h"
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index e3689cc62a41..3d630a0544e1 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -18,7 +18,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index f0e7a8c114b2..4cc2c47f8778 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -17,7 +17,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
@@ -203,7 +203,7 @@ typedef unsigned long pin_cfg_t;
#define GPIO_BLOCK_SHIFT 5
#define NMK_GPIO_PER_CHIP (1 << GPIO_BLOCK_SHIFT)
-#define NMK_MAX_BANKS DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)
+#define NMK_MAX_BANKS DIV_ROUND_UP(512, NMK_GPIO_PER_CHIP)
/* Register in the logic block */
#define NMK_GPIO_DAT 0x00
@@ -971,7 +971,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
data_out ? "hi" : "lo",
(mode < 0) ? "unknown" : modes[mode]);
} else {
- int irq = gpio_to_irq(gpio);
+ int irq = chip->to_irq(chip, offset);
struct irq_desc *desc = irq_to_desc(irq);
int pullidx = 0;
int val;
@@ -1051,7 +1051,7 @@ static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np,
gpio_pdev = of_find_device_by_node(np);
if (!gpio_pdev) {
- pr_err("populate \"%s\": device not found\n", np->name);
+ pr_err("populate \"%pOFn\": device not found\n", np);
return ERR_PTR(-ENODEV);
}
if (of_property_read_u32(np, "gpio-bank", &id)) {
@@ -1904,8 +1904,8 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
gpio_np = of_parse_phandle(np, "nomadik-gpio-chips", i);
if (gpio_np) {
dev_info(&pdev->dev,
- "populate NMK GPIO %d \"%s\"\n",
- i, gpio_np->name);
+ "populate NMK GPIO %d \"%pOFn\"\n",
+ i, gpio_np);
nmk_chip = nmk_gpio_populate_chip(gpio_np, pdev);
if (IS_ERR(nmk_chip))
dev_err(&pdev->dev,
diff --git a/drivers/pinctrl/nuvoton/Kconfig b/drivers/pinctrl/nuvoton/Kconfig
new file mode 100644
index 000000000000..6056841a3c32
--- /dev/null
+++ b/drivers/pinctrl/nuvoton/Kconfig
@@ -0,0 +1,12 @@
+config PINCTRL_NPCM7XX
+ bool "Pinctrl and GPIO driver for Nuvoton NPCM7XX"
+ depends on (ARCH_NPCM7XX || COMPILE_TEST) && OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y here to enable pin controller and GPIO support
+ for Nuvoton NPCM750/730/715/705 SoCs.
diff --git a/drivers/pinctrl/nuvoton/Makefile b/drivers/pinctrl/nuvoton/Makefile
new file mode 100644
index 000000000000..886d00784cef
--- /dev/null
+++ b/drivers/pinctrl/nuvoton/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+# Nuvoton pinctrl support
+
+obj-$(CONFIG_PINCTRL_NPCM7XX) += pinctrl-npcm7xx.o
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
new file mode 100644
index 000000000000..7ad50d9268aa
--- /dev/null
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -0,0 +1,2072 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2016-2018 Nuvoton Technology corporation.
+// Copyright (c) 2016, Dell Inc
+
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* GCR registers */
+#define NPCM7XX_GCR_PDID 0x00
+#define NPCM7XX_GCR_MFSEL1 0x0C
+#define NPCM7XX_GCR_MFSEL2 0x10
+#define NPCM7XX_GCR_MFSEL3 0x64
+#define NPCM7XX_GCR_MFSEL4 0xb0
+#define NPCM7XX_GCR_CPCTL 0xD0
+#define NPCM7XX_GCR_CP2BST 0xD4
+#define NPCM7XX_GCR_B2CPNT 0xD8
+#define NPCM7XX_GCR_I2CSEGSEL 0xE0
+#define NPCM7XX_GCR_I2CSEGCTL 0xE4
+#define NPCM7XX_GCR_SRCNT 0x68
+#define NPCM7XX_GCR_FLOCKR1 0x74
+#define NPCM7XX_GCR_DSCNT 0x78
+
+#define SRCNT_ESPI BIT(3)
+
+/* GPIO registers */
+#define NPCM7XX_GP_N_TLOCK1 0x00
+#define NPCM7XX_GP_N_DIN 0x04 /* Data IN */
+#define NPCM7XX_GP_N_POL 0x08 /* Polarity */
+#define NPCM7XX_GP_N_DOUT 0x0c /* Data OUT */
+#define NPCM7XX_GP_N_OE 0x10 /* Output Enable */
+#define NPCM7XX_GP_N_OTYP 0x14
+#define NPCM7XX_GP_N_MP 0x18
+#define NPCM7XX_GP_N_PU 0x1c /* Pull-up */
+#define NPCM7XX_GP_N_PD 0x20 /* Pull-down */
+#define NPCM7XX_GP_N_DBNC 0x24 /* Debounce */
+#define NPCM7XX_GP_N_EVTYP 0x28 /* Event Type */
+#define NPCM7XX_GP_N_EVBE 0x2c /* Event Both Edge */
+#define NPCM7XX_GP_N_OBL0 0x30
+#define NPCM7XX_GP_N_OBL1 0x34
+#define NPCM7XX_GP_N_OBL2 0x38
+#define NPCM7XX_GP_N_OBL3 0x3c
+#define NPCM7XX_GP_N_EVEN 0x40 /* Event Enable */
+#define NPCM7XX_GP_N_EVENS 0x44 /* Event Set (enable) */
+#define NPCM7XX_GP_N_EVENC 0x48 /* Event Clear (disable) */
+#define NPCM7XX_GP_N_EVST 0x4c /* Event Status */
+#define NPCM7XX_GP_N_SPLCK 0x50
+#define NPCM7XX_GP_N_MPLCK 0x54
+#define NPCM7XX_GP_N_IEM 0x58 /* Input Enable */
+#define NPCM7XX_GP_N_OSRC 0x5c
+#define NPCM7XX_GP_N_ODSC 0x60
+#define NPCM7XX_GP_N_DOS 0x68 /* Data OUT Set */
+#define NPCM7XX_GP_N_DOC 0x6c /* Data OUT Clear */
+#define NPCM7XX_GP_N_OES 0x70 /* Output Enable Set */
+#define NPCM7XX_GP_N_OEC 0x74 /* Output Enable Clear */
+#define NPCM7XX_GP_N_TLOCK2 0x7c
+
+#define NPCM7XX_GPIO_PER_BANK 32
+#define NPCM7XX_GPIO_BANK_NUM 8
+#define NPCM7XX_GCR_NONE 0
+
+/* Structure for register banks */
+struct npcm7xx_gpio {
+ void __iomem *base;
+ struct gpio_chip gc;
+ int irqbase;
+ int irq;
+ void *priv;
+ struct irq_chip irq_chip;
+ u32 pinctrl_id;
+ int (*direction_input)(struct gpio_chip *chip, unsigned offset);
+ int (*direction_output)(struct gpio_chip *chip, unsigned offset,
+ int value);
+ int (*request)(struct gpio_chip *chip, unsigned offset);
+ void (*free)(struct gpio_chip *chip, unsigned offset);
+};
+
+struct npcm7xx_pinctrl {
+ struct pinctrl_dev *pctldev;
+ struct device *dev;
+ struct npcm7xx_gpio gpio_bank[NPCM7XX_GPIO_BANK_NUM];
+ struct irq_domain *domain;
+ struct regmap *gcr_regmap;
+ void __iomem *regs;
+ u32 bank_num;
+};
+
+/* GPIO handling in the pinctrl driver */
+static void npcm_gpio_set(struct gpio_chip *gc, void __iomem *reg,
+ unsigned int pinmask)
+{
+ unsigned long flags;
+ unsigned long val;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+ val = ioread32(reg) | pinmask;
+ iowrite32(val, reg);
+
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static void npcm_gpio_clr(struct gpio_chip *gc, void __iomem *reg,
+ unsigned int pinmask)
+{
+ unsigned long flags;
+ unsigned long val;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+ val = ioread32(reg) & ~pinmask;
+ iowrite32(val, reg);
+
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static void npcmgpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ struct npcm7xx_gpio *bank = gpiochip_get_data(chip);
+
+ seq_printf(s, "-- module %d [gpio%d - %d]\n",
+ bank->gc.base / bank->gc.ngpio,
+ bank->gc.base,
+ bank->gc.base + bank->gc.ngpio);
+ seq_printf(s, "DIN :%.8x DOUT:%.8x IE :%.8x OE :%.8x\n",
+ ioread32(bank->base + NPCM7XX_GP_N_DIN),
+ ioread32(bank->base + NPCM7XX_GP_N_DOUT),
+ ioread32(bank->base + NPCM7XX_GP_N_IEM),
+ ioread32(bank->base + NPCM7XX_GP_N_OE));
+ seq_printf(s, "PU :%.8x PD :%.8x DB :%.8x POL :%.8x\n",
+ ioread32(bank->base + NPCM7XX_GP_N_PU),
+ ioread32(bank->base + NPCM7XX_GP_N_PD),
+ ioread32(bank->base + NPCM7XX_GP_N_DBNC),
+ ioread32(bank->base + NPCM7XX_GP_N_POL));
+ seq_printf(s, "ETYP:%.8x EVBE:%.8x EVEN:%.8x EVST:%.8x\n",
+ ioread32(bank->base + NPCM7XX_GP_N_EVTYP),
+ ioread32(bank->base + NPCM7XX_GP_N_EVBE),
+ ioread32(bank->base + NPCM7XX_GP_N_EVEN),
+ ioread32(bank->base + NPCM7XX_GP_N_EVST));
+ seq_printf(s, "OTYP:%.8x OSRC:%.8x ODSC:%.8x\n",
+ ioread32(bank->base + NPCM7XX_GP_N_OTYP),
+ ioread32(bank->base + NPCM7XX_GP_N_OSRC),
+ ioread32(bank->base + NPCM7XX_GP_N_ODSC));
+ seq_printf(s, "OBL0:%.8x OBL1:%.8x OBL2:%.8x OBL3:%.8x\n",
+ ioread32(bank->base + NPCM7XX_GP_N_OBL0),
+ ioread32(bank->base + NPCM7XX_GP_N_OBL1),
+ ioread32(bank->base + NPCM7XX_GP_N_OBL2),
+ ioread32(bank->base + NPCM7XX_GP_N_OBL3));
+ seq_printf(s, "SLCK:%.8x MLCK:%.8x\n",
+ ioread32(bank->base + NPCM7XX_GP_N_SPLCK),
+ ioread32(bank->base + NPCM7XX_GP_N_MPLCK));
+}
+
+static int npcmgpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct npcm7xx_gpio *bank = gpiochip_get_data(chip);
+ int ret;
+
+ ret = pinctrl_gpio_direction_input(offset + chip->base);
+ if (ret)
+ return ret;
+
+ return bank->direction_input(chip, offset);
+}
+
+/* Set GPIO to Output with initial value */
+static int npcmgpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct npcm7xx_gpio *bank = gpiochip_get_data(chip);
+ int ret;
+
+ dev_dbg(chip->parent, "gpio_direction_output: offset%d = %x\n", offset,
+ value);
+
+ ret = pinctrl_gpio_direction_output(offset + chip->base);
+ if (ret)
+ return ret;
+
+ return bank->direction_output(chip, offset, value);
+}
+
+static int npcmgpio_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct npcm7xx_gpio *bank = gpiochip_get_data(chip);
+ int ret;
+
+ dev_dbg(chip->parent, "gpio_request: offset%d\n", offset);
+ ret = pinctrl_gpio_request(offset + chip->base);
+ if (ret)
+ return ret;
+
+ return bank->request(chip, offset);
+}
+
+static void npcmgpio_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ dev_dbg(chip->parent, "gpio_free: offset%d\n", offset);
+ pinctrl_gpio_free(offset + chip->base);
+}
+
+static void npcmgpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc;
+ struct irq_chip *chip;
+ struct npcm7xx_gpio *bank;
+ u32 sts, en, bit;
+
+ gc = irq_desc_get_handler_data(desc);
+ bank = gpiochip_get_data(gc);
+ chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+ sts = ioread32(bank->base + NPCM7XX_GP_N_EVST);
+ en = ioread32(bank->base + NPCM7XX_GP_N_EVEN);
+ dev_dbg(chip->parent_device, "==> got irq sts %.8x %.8x\n", sts,
+ en);
+
+ sts &= en;
+ for_each_set_bit(bit, (const void *)&sts, NPCM7XX_GPIO_PER_BANK)
+ generic_handle_irq(irq_linear_revmap(gc->irq.domain, bit));
+ chained_irq_exit(chip, desc);
+}
+
+static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct npcm7xx_gpio *bank =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int gpio = BIT(d->hwirq);
+
+ dev_dbg(d->chip->parent_device, "setirqtype: %u.%u = %u\n", gpio,
+ d->irq, type);
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ dev_dbg(d->chip->parent_device, "edge.rising\n");
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ dev_dbg(d->chip->parent_device, "edge.falling\n");
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
+ npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ dev_dbg(d->chip->parent_device, "edge.both\n");
+ npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ dev_dbg(d->chip->parent_device, "level.low\n");
+ npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ dev_dbg(d->chip->parent_device, "level.high\n");
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
+ break;
+ default:
+ dev_dbg(d->chip->parent_device, "invalid irq type\n");
+ return -EINVAL;
+ }
+
+ if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVTYP, gpio);
+ irq_set_handler_locked(d, handle_level_irq);
+ } else if (type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_EDGE_RISING
+ | IRQ_TYPE_EDGE_FALLING)) {
+ npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVTYP, gpio);
+ irq_set_handler_locked(d, handle_edge_irq);
+ }
+
+ return 0;
+}
+
+static void npcmgpio_irq_ack(struct irq_data *d)
+{
+ struct npcm7xx_gpio *bank =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int gpio = d->hwirq;
+
+ dev_dbg(d->chip->parent_device, "irq_ack: %u.%u\n", gpio, d->irq);
+ iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVST);
+}
+
+/* Disable GPIO interrupt */
+static void npcmgpio_irq_mask(struct irq_data *d)
+{
+ struct npcm7xx_gpio *bank =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int gpio = d->hwirq;
+
+ /* Clear events */
+ dev_dbg(d->chip->parent_device, "irq_mask: %u.%u\n", gpio, d->irq);
+ iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENC);
+}
+
+/* Enable GPIO interrupt */
+static void npcmgpio_irq_unmask(struct irq_data *d)
+{
+ struct npcm7xx_gpio *bank =
+ gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ unsigned int gpio = d->hwirq;
+
+ /* Enable events */
+ dev_dbg(d->chip->parent_device, "irq_unmask: %u.%u\n", gpio, d->irq);
+ iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENS);
+}
+
+static unsigned int npcmgpio_irq_startup(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ unsigned int gpio = d->hwirq;
+
+ /* active-high, input, clear interrupt, enable interrupt */
+ dev_dbg(d->chip->parent_device, "startup: %u.%u\n", gpio, d->irq);
+ npcmgpio_direction_input(gc, gpio);
+ npcmgpio_irq_ack(d);
+ npcmgpio_irq_unmask(d);
+
+ return 0;
+}
+
+static struct irq_chip npcmgpio_irqchip = {
+ .name = "NPCM7XX-GPIO-IRQ",
+ .irq_ack = npcmgpio_irq_ack,
+ .irq_unmask = npcmgpio_irq_unmask,
+ .irq_mask = npcmgpio_irq_mask,
+ .irq_set_type = npcmgpio_set_irq_type,
+ .irq_startup = npcmgpio_irq_startup,
+};
+
+/* pinmux handing in the pinctrl driver*/
+static const int smb0_pins[] = { 115, 114 };
+static const int smb0b_pins[] = { 195, 194 };
+static const int smb0c_pins[] = { 202, 196 };
+static const int smb0d_pins[] = { 198, 199 };
+static const int smb0den_pins[] = { 197 };
+
+static const int smb1_pins[] = { 117, 116 };
+static const int smb1b_pins[] = { 126, 127 };
+static const int smb1c_pins[] = { 124, 125 };
+static const int smb1d_pins[] = { 4, 5 };
+
+static const int smb2_pins[] = { 119, 118 };
+static const int smb2b_pins[] = { 122, 123 };
+static const int smb2c_pins[] = { 120, 121 };
+static const int smb2d_pins[] = { 6, 7 };
+
+static const int smb3_pins[] = { 30, 31 };
+static const int smb3b_pins[] = { 39, 40 };
+static const int smb3c_pins[] = { 37, 38 };
+static const int smb3d_pins[] = { 59, 60 };
+
+static const int smb4_pins[] = { 28, 29 };
+static const int smb4b_pins[] = { 18, 19 };
+static const int smb4c_pins[] = { 20, 21 };
+static const int smb4d_pins[] = { 22, 23 };
+static const int smb4den_pins[] = { 17 };
+
+static const int smb5_pins[] = { 26, 27 };
+static const int smb5b_pins[] = { 13, 12 };
+static const int smb5c_pins[] = { 15, 14 };
+static const int smb5d_pins[] = { 94, 93 };
+static const int ga20kbc_pins[] = { 94, 93 };
+
+static const int smb6_pins[] = { 172, 171 };
+static const int smb7_pins[] = { 174, 173 };
+static const int smb8_pins[] = { 129, 128 };
+static const int smb9_pins[] = { 131, 130 };
+static const int smb10_pins[] = { 133, 132 };
+static const int smb11_pins[] = { 135, 134 };
+static const int smb12_pins[] = { 221, 220 };
+static const int smb13_pins[] = { 223, 222 };
+static const int smb14_pins[] = { 22, 23 };
+static const int smb15_pins[] = { 20, 21 };
+
+static const int fanin0_pins[] = { 64 };
+static const int fanin1_pins[] = { 65 };
+static const int fanin2_pins[] = { 66 };
+static const int fanin3_pins[] = { 67 };
+static const int fanin4_pins[] = { 68 };
+static const int fanin5_pins[] = { 69 };
+static const int fanin6_pins[] = { 70 };
+static const int fanin7_pins[] = { 71 };
+static const int fanin8_pins[] = { 72 };
+static const int fanin9_pins[] = { 73 };
+static const int fanin10_pins[] = { 74 };
+static const int fanin11_pins[] = { 75 };
+static const int fanin12_pins[] = { 76 };
+static const int fanin13_pins[] = { 77 };
+static const int fanin14_pins[] = { 78 };
+static const int fanin15_pins[] = { 79 };
+static const int faninx_pins[] = { 175, 176, 177, 203 };
+
+static const int pwm0_pins[] = { 80 };
+static const int pwm1_pins[] = { 81 };
+static const int pwm2_pins[] = { 82 };
+static const int pwm3_pins[] = { 83 };
+static const int pwm4_pins[] = { 144 };
+static const int pwm5_pins[] = { 145 };
+static const int pwm6_pins[] = { 146 };
+static const int pwm7_pins[] = { 147 };
+
+static const int uart1_pins[] = { 43, 44, 45, 46, 47, 61, 62, 63 };
+static const int uart2_pins[] = { 48, 49, 50, 51, 52, 53, 54, 55 };
+
+/* RGMII 1 pin group */
+static const int rg1_pins[] = { 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
+ 106, 107 };
+/* RGMII 1 MD interface pin group */
+static const int rg1mdio_pins[] = { 108, 109 };
+
+/* RGMII 2 pin group */
+static const int rg2_pins[] = { 110, 111, 112, 113, 208, 209, 210, 211, 212,
+ 213, 214, 215 };
+/* RGMII 2 MD interface pin group */
+static const int rg2mdio_pins[] = { 216, 217 };
+
+static const int ddr_pins[] = { 110, 111, 112, 113, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217 };
+/* Serial I/O Expander 1 */
+static const int iox1_pins[] = { 0, 1, 2, 3 };
+/* Serial I/O Expander 2 */
+static const int iox2_pins[] = { 4, 5, 6, 7 };
+/* Host Serial I/O Expander 2 */
+static const int ioxh_pins[] = { 10, 11, 24, 25 };
+
+static const int mmc_pins[] = { 152, 154, 156, 157, 158, 159 };
+static const int mmcwp_pins[] = { 153 };
+static const int mmccd_pins[] = { 155 };
+static const int mmcrst_pins[] = { 155 };
+static const int mmc8_pins[] = { 148, 149, 150, 151 };
+
+/* RMII 1 pin groups */
+static const int r1_pins[] = { 178, 179, 180, 181, 182, 193, 201 };
+static const int r1err_pins[] = { 56 };
+static const int r1md_pins[] = { 57, 58 };
+
+/* RMII 2 pin groups */
+static const int r2_pins[] = { 84, 85, 86, 87, 88, 89, 200 };
+static const int r2err_pins[] = { 90 };
+static const int r2md_pins[] = { 91, 92 };
+
+static const int sd1_pins[] = { 136, 137, 138, 139, 140, 141, 142, 143 };
+static const int sd1pwr_pins[] = { 143 };
+
+static const int wdog1_pins[] = { 218 };
+static const int wdog2_pins[] = { 219 };
+
+/* BMC serial port 0 */
+static const int bmcuart0a_pins[] = { 41, 42 };
+static const int bmcuart0b_pins[] = { 48, 49 };
+
+static const int bmcuart1_pins[] = { 43, 44, 62, 63 };
+
+/* System Control Interrupt and Power Management Event pin group */
+static const int scipme_pins[] = { 169 };
+/* System Management Interrupt pin group */
+static const int sci_pins[] = { 170 };
+/* Serial Interrupt Line pin group */
+static const int serirq_pins[] = { 162 };
+
+static const int clkout_pins[] = { 160 };
+static const int clkreq_pins[] = { 231 };
+
+static const int jtag2_pins[] = { 43, 44, 45, 46, 47 };
+/* Graphics SPI Clock pin group */
+static const int gspi_pins[] = { 12, 13, 14, 15 };
+
+static const int spix_pins[] = { 224, 225, 226, 227, 229, 230 };
+static const int spixcs1_pins[] = { 228 };
+
+static const int pspi1_pins[] = { 175, 176, 177 };
+static const int pspi2_pins[] = { 17, 18, 19 };
+
+static const int spi0cs1_pins[] = { 32 };
+
+static const int spi3_pins[] = { 183, 184, 185, 186 };
+static const int spi3cs1_pins[] = { 187 };
+static const int spi3quad_pins[] = { 188, 189 };
+static const int spi3cs2_pins[] = { 188 };
+static const int spi3cs3_pins[] = { 189 };
+
+static const int ddc_pins[] = { 204, 205, 206, 207 };
+
+static const int lpc_pins[] = { 95, 161, 163, 164, 165, 166, 167 };
+static const int lpcclk_pins[] = { 168 };
+static const int espi_pins[] = { 95, 161, 163, 164, 165, 166, 167, 168 };
+
+static const int lkgpo0_pins[] = { 16 };
+static const int lkgpo1_pins[] = { 8 };
+static const int lkgpo2_pins[] = { 9 };
+
+static const int nprd_smi_pins[] = { 190 };
+
+/*
+ * pin: name, number
+ * group: name, npins, pins
+ * function: name, ngroups, groups
+ */
+struct npcm7xx_group {
+ const char *name;
+ const unsigned int *pins;
+ int npins;
+};
+
+#define NPCM7XX_GRPS \
+ NPCM7XX_GRP(smb0), \
+ NPCM7XX_GRP(smb0b), \
+ NPCM7XX_GRP(smb0c), \
+ NPCM7XX_GRP(smb0d), \
+ NPCM7XX_GRP(smb0den), \
+ NPCM7XX_GRP(smb1), \
+ NPCM7XX_GRP(smb1b), \
+ NPCM7XX_GRP(smb1c), \
+ NPCM7XX_GRP(smb1d), \
+ NPCM7XX_GRP(smb2), \
+ NPCM7XX_GRP(smb2b), \
+ NPCM7XX_GRP(smb2c), \
+ NPCM7XX_GRP(smb2d), \
+ NPCM7XX_GRP(smb3), \
+ NPCM7XX_GRP(smb3b), \
+ NPCM7XX_GRP(smb3c), \
+ NPCM7XX_GRP(smb3d), \
+ NPCM7XX_GRP(smb4), \
+ NPCM7XX_GRP(smb4b), \
+ NPCM7XX_GRP(smb4c), \
+ NPCM7XX_GRP(smb4d), \
+ NPCM7XX_GRP(smb4den), \
+ NPCM7XX_GRP(smb5), \
+ NPCM7XX_GRP(smb5b), \
+ NPCM7XX_GRP(smb5c), \
+ NPCM7XX_GRP(smb5d), \
+ NPCM7XX_GRP(ga20kbc), \
+ NPCM7XX_GRP(smb6), \
+ NPCM7XX_GRP(smb7), \
+ NPCM7XX_GRP(smb8), \
+ NPCM7XX_GRP(smb9), \
+ NPCM7XX_GRP(smb10), \
+ NPCM7XX_GRP(smb11), \
+ NPCM7XX_GRP(smb12), \
+ NPCM7XX_GRP(smb13), \
+ NPCM7XX_GRP(smb14), \
+ NPCM7XX_GRP(smb15), \
+ NPCM7XX_GRP(fanin0), \
+ NPCM7XX_GRP(fanin1), \
+ NPCM7XX_GRP(fanin2), \
+ NPCM7XX_GRP(fanin3), \
+ NPCM7XX_GRP(fanin4), \
+ NPCM7XX_GRP(fanin5), \
+ NPCM7XX_GRP(fanin6), \
+ NPCM7XX_GRP(fanin7), \
+ NPCM7XX_GRP(fanin8), \
+ NPCM7XX_GRP(fanin9), \
+ NPCM7XX_GRP(fanin10), \
+ NPCM7XX_GRP(fanin11), \
+ NPCM7XX_GRP(fanin12), \
+ NPCM7XX_GRP(fanin13), \
+ NPCM7XX_GRP(fanin14), \
+ NPCM7XX_GRP(fanin15), \
+ NPCM7XX_GRP(faninx), \
+ NPCM7XX_GRP(pwm0), \
+ NPCM7XX_GRP(pwm1), \
+ NPCM7XX_GRP(pwm2), \
+ NPCM7XX_GRP(pwm3), \
+ NPCM7XX_GRP(pwm4), \
+ NPCM7XX_GRP(pwm5), \
+ NPCM7XX_GRP(pwm6), \
+ NPCM7XX_GRP(pwm7), \
+ NPCM7XX_GRP(rg1), \
+ NPCM7XX_GRP(rg1mdio), \
+ NPCM7XX_GRP(rg2), \
+ NPCM7XX_GRP(rg2mdio), \
+ NPCM7XX_GRP(ddr), \
+ NPCM7XX_GRP(uart1), \
+ NPCM7XX_GRP(uart2), \
+ NPCM7XX_GRP(bmcuart0a), \
+ NPCM7XX_GRP(bmcuart0b), \
+ NPCM7XX_GRP(bmcuart1), \
+ NPCM7XX_GRP(iox1), \
+ NPCM7XX_GRP(iox2), \
+ NPCM7XX_GRP(ioxh), \
+ NPCM7XX_GRP(gspi), \
+ NPCM7XX_GRP(mmc), \
+ NPCM7XX_GRP(mmcwp), \
+ NPCM7XX_GRP(mmccd), \
+ NPCM7XX_GRP(mmcrst), \
+ NPCM7XX_GRP(mmc8), \
+ NPCM7XX_GRP(r1), \
+ NPCM7XX_GRP(r1err), \
+ NPCM7XX_GRP(r1md), \
+ NPCM7XX_GRP(r2), \
+ NPCM7XX_GRP(r2err), \
+ NPCM7XX_GRP(r2md), \
+ NPCM7XX_GRP(sd1), \
+ NPCM7XX_GRP(sd1pwr), \
+ NPCM7XX_GRP(wdog1), \
+ NPCM7XX_GRP(wdog2), \
+ NPCM7XX_GRP(scipme), \
+ NPCM7XX_GRP(sci), \
+ NPCM7XX_GRP(serirq), \
+ NPCM7XX_GRP(jtag2), \
+ NPCM7XX_GRP(spix), \
+ NPCM7XX_GRP(spixcs1), \
+ NPCM7XX_GRP(pspi1), \
+ NPCM7XX_GRP(pspi2), \
+ NPCM7XX_GRP(ddc), \
+ NPCM7XX_GRP(clkreq), \
+ NPCM7XX_GRP(clkout), \
+ NPCM7XX_GRP(spi3), \
+ NPCM7XX_GRP(spi3cs1), \
+ NPCM7XX_GRP(spi3quad), \
+ NPCM7XX_GRP(spi3cs2), \
+ NPCM7XX_GRP(spi3cs3), \
+ NPCM7XX_GRP(spi0cs1), \
+ NPCM7XX_GRP(lpc), \
+ NPCM7XX_GRP(lpcclk), \
+ NPCM7XX_GRP(espi), \
+ NPCM7XX_GRP(lkgpo0), \
+ NPCM7XX_GRP(lkgpo1), \
+ NPCM7XX_GRP(lkgpo2), \
+ NPCM7XX_GRP(nprd_smi), \
+ \
+
+enum {
+#define NPCM7XX_GRP(x) fn_ ## x
+ NPCM7XX_GRPS
+ /* add placeholder for none/gpio */
+ NPCM7XX_GRP(none),
+ NPCM7XX_GRP(gpio),
+#undef NPCM7XX_GRP
+};
+
+static struct npcm7xx_group npcm7xx_groups[] = {
+#define NPCM7XX_GRP(x) { .name = #x, .pins = x ## _pins, \
+ .npins = ARRAY_SIZE(x ## _pins) }
+ NPCM7XX_GRPS
+#undef NPCM7XX_GRP
+};
+
+#define NPCM7XX_SFUNC(a) NPCM7XX_FUNC(a, #a)
+#define NPCM7XX_FUNC(a, b...) static const char *a ## _grp[] = { b }
+#define NPCM7XX_MKFUNC(nm) { .name = #nm, .ngroups = ARRAY_SIZE(nm ## _grp), \
+ .groups = nm ## _grp }
+struct npcm7xx_func {
+ const char *name;
+ const unsigned int ngroups;
+ const char *const *groups;
+};
+
+NPCM7XX_SFUNC(smb0);
+NPCM7XX_SFUNC(smb0b);
+NPCM7XX_SFUNC(smb0c);
+NPCM7XX_SFUNC(smb0d);
+NPCM7XX_SFUNC(smb0den);
+NPCM7XX_SFUNC(smb1);
+NPCM7XX_SFUNC(smb1b);
+NPCM7XX_SFUNC(smb1c);
+NPCM7XX_SFUNC(smb1d);
+NPCM7XX_SFUNC(smb2);
+NPCM7XX_SFUNC(smb2b);
+NPCM7XX_SFUNC(smb2c);
+NPCM7XX_SFUNC(smb2d);
+NPCM7XX_SFUNC(smb3);
+NPCM7XX_SFUNC(smb3b);
+NPCM7XX_SFUNC(smb3c);
+NPCM7XX_SFUNC(smb3d);
+NPCM7XX_SFUNC(smb4);
+NPCM7XX_SFUNC(smb4b);
+NPCM7XX_SFUNC(smb4c);
+NPCM7XX_SFUNC(smb4d);
+NPCM7XX_SFUNC(smb4den);
+NPCM7XX_SFUNC(smb5);
+NPCM7XX_SFUNC(smb5b);
+NPCM7XX_SFUNC(smb5c);
+NPCM7XX_SFUNC(smb5d);
+NPCM7XX_SFUNC(ga20kbc);
+NPCM7XX_SFUNC(smb6);
+NPCM7XX_SFUNC(smb7);
+NPCM7XX_SFUNC(smb8);
+NPCM7XX_SFUNC(smb9);
+NPCM7XX_SFUNC(smb10);
+NPCM7XX_SFUNC(smb11);
+NPCM7XX_SFUNC(smb12);
+NPCM7XX_SFUNC(smb13);
+NPCM7XX_SFUNC(smb14);
+NPCM7XX_SFUNC(smb15);
+NPCM7XX_SFUNC(fanin0);
+NPCM7XX_SFUNC(fanin1);
+NPCM7XX_SFUNC(fanin2);
+NPCM7XX_SFUNC(fanin3);
+NPCM7XX_SFUNC(fanin4);
+NPCM7XX_SFUNC(fanin5);
+NPCM7XX_SFUNC(fanin6);
+NPCM7XX_SFUNC(fanin7);
+NPCM7XX_SFUNC(fanin8);
+NPCM7XX_SFUNC(fanin9);
+NPCM7XX_SFUNC(fanin10);
+NPCM7XX_SFUNC(fanin11);
+NPCM7XX_SFUNC(fanin12);
+NPCM7XX_SFUNC(fanin13);
+NPCM7XX_SFUNC(fanin14);
+NPCM7XX_SFUNC(fanin15);
+NPCM7XX_SFUNC(faninx);
+NPCM7XX_SFUNC(pwm0);
+NPCM7XX_SFUNC(pwm1);
+NPCM7XX_SFUNC(pwm2);
+NPCM7XX_SFUNC(pwm3);
+NPCM7XX_SFUNC(pwm4);
+NPCM7XX_SFUNC(pwm5);
+NPCM7XX_SFUNC(pwm6);
+NPCM7XX_SFUNC(pwm7);
+NPCM7XX_SFUNC(rg1);
+NPCM7XX_SFUNC(rg1mdio);
+NPCM7XX_SFUNC(rg2);
+NPCM7XX_SFUNC(rg2mdio);
+NPCM7XX_SFUNC(ddr);
+NPCM7XX_SFUNC(uart1);
+NPCM7XX_SFUNC(uart2);
+NPCM7XX_SFUNC(bmcuart0a);
+NPCM7XX_SFUNC(bmcuart0b);
+NPCM7XX_SFUNC(bmcuart1);
+NPCM7XX_SFUNC(iox1);
+NPCM7XX_SFUNC(iox2);
+NPCM7XX_SFUNC(ioxh);
+NPCM7XX_SFUNC(gspi);
+NPCM7XX_SFUNC(mmc);
+NPCM7XX_SFUNC(mmcwp);
+NPCM7XX_SFUNC(mmccd);
+NPCM7XX_SFUNC(mmcrst);
+NPCM7XX_SFUNC(mmc8);
+NPCM7XX_SFUNC(r1);
+NPCM7XX_SFUNC(r1err);
+NPCM7XX_SFUNC(r1md);
+NPCM7XX_SFUNC(r2);
+NPCM7XX_SFUNC(r2err);
+NPCM7XX_SFUNC(r2md);
+NPCM7XX_SFUNC(sd1);
+NPCM7XX_SFUNC(sd1pwr);
+NPCM7XX_SFUNC(wdog1);
+NPCM7XX_SFUNC(wdog2);
+NPCM7XX_SFUNC(scipme);
+NPCM7XX_SFUNC(sci);
+NPCM7XX_SFUNC(serirq);
+NPCM7XX_SFUNC(jtag2);
+NPCM7XX_SFUNC(spix);
+NPCM7XX_SFUNC(spixcs1);
+NPCM7XX_SFUNC(pspi1);
+NPCM7XX_SFUNC(pspi2);
+NPCM7XX_SFUNC(ddc);
+NPCM7XX_SFUNC(clkreq);
+NPCM7XX_SFUNC(clkout);
+NPCM7XX_SFUNC(spi3);
+NPCM7XX_SFUNC(spi3cs1);
+NPCM7XX_SFUNC(spi3quad);
+NPCM7XX_SFUNC(spi3cs2);
+NPCM7XX_SFUNC(spi3cs3);
+NPCM7XX_SFUNC(spi0cs1);
+NPCM7XX_SFUNC(lpc);
+NPCM7XX_SFUNC(lpcclk);
+NPCM7XX_SFUNC(espi);
+NPCM7XX_SFUNC(lkgpo0);
+NPCM7XX_SFUNC(lkgpo1);
+NPCM7XX_SFUNC(lkgpo2);
+NPCM7XX_SFUNC(nprd_smi);
+
+/* Function names */
+static struct npcm7xx_func npcm7xx_funcs[] = {
+ NPCM7XX_MKFUNC(smb0),
+ NPCM7XX_MKFUNC(smb0b),
+ NPCM7XX_MKFUNC(smb0c),
+ NPCM7XX_MKFUNC(smb0d),
+ NPCM7XX_MKFUNC(smb0den),
+ NPCM7XX_MKFUNC(smb1),
+ NPCM7XX_MKFUNC(smb1b),
+ NPCM7XX_MKFUNC(smb1c),
+ NPCM7XX_MKFUNC(smb1d),
+ NPCM7XX_MKFUNC(smb2),
+ NPCM7XX_MKFUNC(smb2b),
+ NPCM7XX_MKFUNC(smb2c),
+ NPCM7XX_MKFUNC(smb2d),
+ NPCM7XX_MKFUNC(smb3),
+ NPCM7XX_MKFUNC(smb3b),
+ NPCM7XX_MKFUNC(smb3c),
+ NPCM7XX_MKFUNC(smb3d),
+ NPCM7XX_MKFUNC(smb4),
+ NPCM7XX_MKFUNC(smb4b),
+ NPCM7XX_MKFUNC(smb4c),
+ NPCM7XX_MKFUNC(smb4d),
+ NPCM7XX_MKFUNC(smb4den),
+ NPCM7XX_MKFUNC(smb5),
+ NPCM7XX_MKFUNC(smb5b),
+ NPCM7XX_MKFUNC(smb5c),
+ NPCM7XX_MKFUNC(smb5d),
+ NPCM7XX_MKFUNC(ga20kbc),
+ NPCM7XX_MKFUNC(smb6),
+ NPCM7XX_MKFUNC(smb7),
+ NPCM7XX_MKFUNC(smb8),
+ NPCM7XX_MKFUNC(smb9),
+ NPCM7XX_MKFUNC(smb10),
+ NPCM7XX_MKFUNC(smb11),
+ NPCM7XX_MKFUNC(smb12),
+ NPCM7XX_MKFUNC(smb13),
+ NPCM7XX_MKFUNC(smb14),
+ NPCM7XX_MKFUNC(smb15),
+ NPCM7XX_MKFUNC(fanin0),
+ NPCM7XX_MKFUNC(fanin1),
+ NPCM7XX_MKFUNC(fanin2),
+ NPCM7XX_MKFUNC(fanin3),
+ NPCM7XX_MKFUNC(fanin4),
+ NPCM7XX_MKFUNC(fanin5),
+ NPCM7XX_MKFUNC(fanin6),
+ NPCM7XX_MKFUNC(fanin7),
+ NPCM7XX_MKFUNC(fanin8),
+ NPCM7XX_MKFUNC(fanin9),
+ NPCM7XX_MKFUNC(fanin10),
+ NPCM7XX_MKFUNC(fanin11),
+ NPCM7XX_MKFUNC(fanin12),
+ NPCM7XX_MKFUNC(fanin13),
+ NPCM7XX_MKFUNC(fanin14),
+ NPCM7XX_MKFUNC(fanin15),
+ NPCM7XX_MKFUNC(faninx),
+ NPCM7XX_MKFUNC(pwm0),
+ NPCM7XX_MKFUNC(pwm1),
+ NPCM7XX_MKFUNC(pwm2),
+ NPCM7XX_MKFUNC(pwm3),
+ NPCM7XX_MKFUNC(pwm4),
+ NPCM7XX_MKFUNC(pwm5),
+ NPCM7XX_MKFUNC(pwm6),
+ NPCM7XX_MKFUNC(pwm7),
+ NPCM7XX_MKFUNC(rg1),
+ NPCM7XX_MKFUNC(rg1mdio),
+ NPCM7XX_MKFUNC(rg2),
+ NPCM7XX_MKFUNC(rg2mdio),
+ NPCM7XX_MKFUNC(ddr),
+ NPCM7XX_MKFUNC(uart1),
+ NPCM7XX_MKFUNC(uart2),
+ NPCM7XX_MKFUNC(bmcuart0a),
+ NPCM7XX_MKFUNC(bmcuart0b),
+ NPCM7XX_MKFUNC(bmcuart1),
+ NPCM7XX_MKFUNC(iox1),
+ NPCM7XX_MKFUNC(iox2),
+ NPCM7XX_MKFUNC(ioxh),
+ NPCM7XX_MKFUNC(gspi),
+ NPCM7XX_MKFUNC(mmc),
+ NPCM7XX_MKFUNC(mmcwp),
+ NPCM7XX_MKFUNC(mmccd),
+ NPCM7XX_MKFUNC(mmcrst),
+ NPCM7XX_MKFUNC(mmc8),
+ NPCM7XX_MKFUNC(r1),
+ NPCM7XX_MKFUNC(r1err),
+ NPCM7XX_MKFUNC(r1md),
+ NPCM7XX_MKFUNC(r2),
+ NPCM7XX_MKFUNC(r2err),
+ NPCM7XX_MKFUNC(r2md),
+ NPCM7XX_MKFUNC(sd1),
+ NPCM7XX_MKFUNC(sd1pwr),
+ NPCM7XX_MKFUNC(wdog1),
+ NPCM7XX_MKFUNC(wdog2),
+ NPCM7XX_MKFUNC(scipme),
+ NPCM7XX_MKFUNC(sci),
+ NPCM7XX_MKFUNC(serirq),
+ NPCM7XX_MKFUNC(jtag2),
+ NPCM7XX_MKFUNC(spix),
+ NPCM7XX_MKFUNC(spixcs1),
+ NPCM7XX_MKFUNC(pspi1),
+ NPCM7XX_MKFUNC(pspi2),
+ NPCM7XX_MKFUNC(ddc),
+ NPCM7XX_MKFUNC(clkreq),
+ NPCM7XX_MKFUNC(clkout),
+ NPCM7XX_MKFUNC(spi3),
+ NPCM7XX_MKFUNC(spi3cs1),
+ NPCM7XX_MKFUNC(spi3quad),
+ NPCM7XX_MKFUNC(spi3cs2),
+ NPCM7XX_MKFUNC(spi3cs3),
+ NPCM7XX_MKFUNC(spi0cs1),
+ NPCM7XX_MKFUNC(lpc),
+ NPCM7XX_MKFUNC(lpcclk),
+ NPCM7XX_MKFUNC(espi),
+ NPCM7XX_MKFUNC(lkgpo0),
+ NPCM7XX_MKFUNC(lkgpo1),
+ NPCM7XX_MKFUNC(lkgpo2),
+ NPCM7XX_MKFUNC(nprd_smi),
+};
+
+#define NPCM7XX_PINCFG(a, b, c, d, e, f, g, h, i, j, k) \
+ [a] { .fn0 = fn_ ## b, .reg0 = NPCM7XX_GCR_ ## c, .bit0 = d, \
+ .fn1 = fn_ ## e, .reg1 = NPCM7XX_GCR_ ## f, .bit1 = g, \
+ .fn2 = fn_ ## h, .reg2 = NPCM7XX_GCR_ ## i, .bit2 = j, \
+ .flag = k }
+
+/* Drive strength controlled by NPCM7XX_GP_N_ODSC */
+#define DRIVE_STRENGTH_LO_SHIFT 8
+#define DRIVE_STRENGTH_HI_SHIFT 12
+#define DRIVE_STRENGTH_MASK 0x0000FF00
+
+#define DS(lo, hi) (((lo) << DRIVE_STRENGTH_LO_SHIFT) | \
+ ((hi) << DRIVE_STRENGTH_HI_SHIFT))
+#define DSLO(x) (((x) >> DRIVE_STRENGTH_LO_SHIFT) & 0xF)
+#define DSHI(x) (((x) >> DRIVE_STRENGTH_HI_SHIFT) & 0xF)
+
+#define GPI 0x1 /* Not GPO */
+#define GPO 0x2 /* Not GPI */
+#define SLEW 0x4 /* Has Slew Control, NPCM7XX_GP_N_OSRC */
+#define SLEWLPC 0x8 /* Has Slew Control, SRCNT.3 */
+
+struct npcm7xx_pincfg {
+ int flag;
+ int fn0, reg0, bit0;
+ int fn1, reg1, bit1;
+ int fn2, reg2, bit2;
+};
+
+static const struct npcm7xx_pincfg pincfg[] = {
+ /* PIN FUNCTION 1 FUNCTION 2 FUNCTION 3 FLAGS */
+ NPCM7XX_PINCFG(0, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(1, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(2, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(3, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(4, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(5, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(6, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(7, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(8, lkgpo1, FLOCKR1, 4, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(9, lkgpo2, FLOCKR1, 8, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(10, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(11, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(12, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(13, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(14, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(15, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(16, lkgpo0, FLOCKR1, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(17, pspi2, MFSEL3, 13, smb4den, I2CSEGSEL, 23, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(18, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(19, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(20, smb4c, I2CSEGSEL, 15, smb15, MFSEL3, 8, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(21, smb4c, I2CSEGSEL, 15, smb15, MFSEL3, 8, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(22, smb4d, I2CSEGSEL, 16, smb14, MFSEL3, 7, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(23, smb4d, I2CSEGSEL, 16, smb14, MFSEL3, 7, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(24, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(25, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(26, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(27, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(28, smb4, MFSEL1, 1, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(29, smb4, MFSEL1, 1, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(30, smb3, MFSEL1, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(31, smb3, MFSEL1, 0, none, NONE, 0, none, NONE, 0, 0),
+
+ NPCM7XX_PINCFG(32, spi0cs1, MFSEL1, 3, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(33, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(34, none, NONE, 0, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(37, smb3c, I2CSEGSEL, 12, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(38, smb3c, I2CSEGSEL, 12, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(39, smb3b, I2CSEGSEL, 11, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(40, smb3b, I2CSEGSEL, 11, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(41, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(42, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, DS(2, 4) | GPO),
+ NPCM7XX_PINCFG(43, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, bmcuart1, MFSEL3, 24, 0),
+ NPCM7XX_PINCFG(44, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, bmcuart1, MFSEL3, 24, 0),
+ NPCM7XX_PINCFG(45, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(46, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DS(2, 8)),
+ NPCM7XX_PINCFG(47, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DS(2, 8)),
+ NPCM7XX_PINCFG(48, uart2, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, GPO),
+ NPCM7XX_PINCFG(49, uart2, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(50, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(51, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM7XX_PINCFG(52, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(53, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM7XX_PINCFG(54, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(55, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(56, r1err, MFSEL1, 12, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(57, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DS(2, 4)),
+ NPCM7XX_PINCFG(58, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DS(2, 4)),
+ NPCM7XX_PINCFG(59, smb3d, I2CSEGSEL, 13, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(60, smb3d, I2CSEGSEL, 13, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(61, uart1, MFSEL1, 10, none, NONE, 0, none, NONE, 0, GPO),
+ NPCM7XX_PINCFG(62, uart1, MFSEL1, 10, bmcuart1, MFSEL3, 24, none, NONE, 0, GPO),
+ NPCM7XX_PINCFG(63, uart1, MFSEL1, 10, bmcuart1, MFSEL3, 24, none, NONE, 0, GPO),
+
+ NPCM7XX_PINCFG(64, fanin0, MFSEL2, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(65, fanin1, MFSEL2, 1, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(66, fanin2, MFSEL2, 2, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(67, fanin3, MFSEL2, 3, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(68, fanin4, MFSEL2, 4, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(69, fanin5, MFSEL2, 5, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(70, fanin6, MFSEL2, 6, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(71, fanin7, MFSEL2, 7, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(72, fanin8, MFSEL2, 8, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(73, fanin9, MFSEL2, 9, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(74, fanin10, MFSEL2, 10, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(75, fanin11, MFSEL2, 11, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(76, fanin12, MFSEL2, 12, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(77, fanin13, MFSEL2, 13, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(78, fanin14, MFSEL2, 14, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(79, fanin15, MFSEL2, 15, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(80, pwm0, MFSEL2, 16, none, NONE, 0, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(81, pwm1, MFSEL2, 17, none, NONE, 0, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(82, pwm2, MFSEL2, 18, none, NONE, 0, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(83, pwm3, MFSEL2, 19, none, NONE, 0, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(84, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(85, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(86, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(87, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(88, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(89, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(90, r2err, MFSEL1, 15, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(91, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DS(2, 4)),
+ NPCM7XX_PINCFG(92, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DS(2, 4)),
+ NPCM7XX_PINCFG(93, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(94, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(95, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, 0),
+
+ NPCM7XX_PINCFG(96, rg1, MFSEL4, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(97, rg1, MFSEL4, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(98, rg1, MFSEL4, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(99, rg1, MFSEL4, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(100, rg1, MFSEL4, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(101, rg1, MFSEL4, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(102, rg1, MFSEL4, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(103, rg1, MFSEL4, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(104, rg1, MFSEL4, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(105, rg1, MFSEL4, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(106, rg1, MFSEL4, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(107, rg1, MFSEL4, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(108, rg1mdio, MFSEL4, 21, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(109, rg1mdio, MFSEL4, 21, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(110, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(111, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(112, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(113, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(114, smb0, MFSEL1, 6, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(115, smb0, MFSEL1, 6, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(116, smb1, MFSEL1, 7, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(117, smb1, MFSEL1, 7, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(118, smb2, MFSEL1, 8, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(119, smb2, MFSEL1, 8, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(120, smb2c, I2CSEGSEL, 9, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(121, smb2c, I2CSEGSEL, 9, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(122, smb2b, I2CSEGSEL, 8, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(123, smb2b, I2CSEGSEL, 8, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(124, smb1c, I2CSEGSEL, 6, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(125, smb1c, I2CSEGSEL, 6, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(126, smb1b, I2CSEGSEL, 5, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(127, smb1b, I2CSEGSEL, 5, none, NONE, 0, none, NONE, 0, SLEW),
+
+ NPCM7XX_PINCFG(128, smb8, MFSEL4, 11, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(129, smb8, MFSEL4, 11, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(130, smb9, MFSEL4, 12, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(131, smb9, MFSEL4, 12, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(132, smb10, MFSEL4, 13, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(133, smb10, MFSEL4, 13, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(134, smb11, MFSEL4, 14, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(135, smb11, MFSEL4, 14, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(136, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(137, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(138, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(139, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(140, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(141, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(142, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(143, sd1, MFSEL3, 12, sd1pwr, MFSEL4, 5, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(144, pwm4, MFSEL2, 20, none, NONE, 0, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(145, pwm5, MFSEL2, 21, none, NONE, 0, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(146, pwm6, MFSEL2, 22, none, NONE, 0, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(147, pwm7, MFSEL2, 23, none, NONE, 0, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(148, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(149, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(150, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(151, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(152, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(153, mmcwp, FLOCKR1, 24, none, NONE, 0, none, NONE, 0, 0), /* Z1/A1 */
+ NPCM7XX_PINCFG(154, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(155, mmccd, MFSEL3, 25, mmcrst, MFSEL4, 6, none, NONE, 0, 0), /* Z1/A1 */
+ NPCM7XX_PINCFG(156, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(157, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(158, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+
+ NPCM7XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(161, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, DS(8, 12)),
+ NPCM7XX_PINCFG(162, serirq, NONE, 0, gpio, MFSEL1, 31, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(163, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, 0),
+ NPCM7XX_PINCFG(164, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC),
+ NPCM7XX_PINCFG(165, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC),
+ NPCM7XX_PINCFG(166, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC),
+ NPCM7XX_PINCFG(167, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC),
+ NPCM7XX_PINCFG(168, lpcclk, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL3, 16, 0),
+ NPCM7XX_PINCFG(169, scipme, MFSEL3, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(170, sci, MFSEL1, 22, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(171, smb6, MFSEL3, 1, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(172, smb6, MFSEL3, 1, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(173, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(174, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(175, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(176, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(177, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(178, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(179, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(180, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(181, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(182, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(183, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(184, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO),
+ NPCM7XX_PINCFG(185, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO),
+ NPCM7XX_PINCFG(186, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(187, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(188, spi3quad, MFSEL4, 20, spi3cs2, MFSEL4, 18, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(189, spi3quad, MFSEL4, 20, spi3cs3, MFSEL4, 19, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(190, gpio, FLOCKR1, 20, nprd_smi, NONE, 0, none, NONE, 0, DS(2, 4)),
+ NPCM7XX_PINCFG(191, none, NONE, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), /* XX */
+
+ NPCM7XX_PINCFG(192, none, NONE, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), /* XX */
+ NPCM7XX_PINCFG(193, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(194, smb0b, I2CSEGSEL, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(195, smb0b, I2CSEGSEL, 0, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(196, smb0c, I2CSEGSEL, 1, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(197, smb0den, I2CSEGSEL, 22, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(198, smb0d, I2CSEGSEL, 2, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(199, smb0d, I2CSEGSEL, 2, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(200, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(201, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(202, smb0c, I2CSEGSEL, 1, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(203, faninx, MFSEL3, 3, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(204, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(205, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(206, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(207, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(208, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(209, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(210, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(211, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(212, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(213, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(214, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(215, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(216, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(217, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(218, wdog1, MFSEL3, 19, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(219, wdog2, MFSEL3, 20, none, NONE, 0, none, NONE, 0, DS(4, 8)),
+ NPCM7XX_PINCFG(220, smb12, MFSEL3, 5, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(221, smb12, MFSEL3, 5, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(222, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, 0),
+ NPCM7XX_PINCFG(223, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, 0),
+
+ NPCM7XX_PINCFG(224, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, SLEW),
+ NPCM7XX_PINCFG(225, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO),
+ NPCM7XX_PINCFG(226, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO),
+ NPCM7XX_PINCFG(227, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(228, spixcs1, MFSEL4, 28, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(229, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(230, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW),
+ NPCM7XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, DS(8, 12)),
+ NPCM7XX_PINCFG(253, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC1 power */
+ NPCM7XX_PINCFG(254, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC2 power */
+ NPCM7XX_PINCFG(255, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* DACOSEL */
+};
+
+/* number, name, drv_data */
+static const struct pinctrl_pin_desc npcm7xx_pins[] = {
+ PINCTRL_PIN(0, "GPIO0/IOX1DI"),
+ PINCTRL_PIN(1, "GPIO1/IOX1LD"),
+ PINCTRL_PIN(2, "GPIO2/IOX1CK"),
+ PINCTRL_PIN(3, "GPIO3/IOX1D0"),
+ PINCTRL_PIN(4, "GPIO4/IOX2DI/SMB1DSDA"),
+ PINCTRL_PIN(5, "GPIO5/IOX2LD/SMB1DSCL"),
+ PINCTRL_PIN(6, "GPIO6/IOX2CK/SMB2DSDA"),
+ PINCTRL_PIN(7, "GPIO7/IOX2D0/SMB2DSCL"),
+ PINCTRL_PIN(8, "GPIO8/LKGPO1"),
+ PINCTRL_PIN(9, "GPIO9/LKGPO2"),
+ PINCTRL_PIN(10, "GPIO10/IOXHLD"),
+ PINCTRL_PIN(11, "GPIO11/IOXHCK"),
+ PINCTRL_PIN(12, "GPIO12/GSPICK/SMB5BSCL"),
+ PINCTRL_PIN(13, "GPIO13/GSPIDO/SMB5BSDA"),
+ PINCTRL_PIN(14, "GPIO14/GSPIDI/SMB5CSCL"),
+ PINCTRL_PIN(15, "GPIO15/GSPICS/SMB5CSDA"),
+ PINCTRL_PIN(16, "GPIO16/LKGPO0"),
+ PINCTRL_PIN(17, "GPIO17/PSPI2DI/SMB4DEN"),
+ PINCTRL_PIN(18, "GPIO18/PSPI2D0/SMB4BSDA"),
+ PINCTRL_PIN(19, "GPIO19/PSPI2CK/SMB4BSCL"),
+ PINCTRL_PIN(20, "GPIO20/SMB4CSDA/SMB15SDA"),
+ PINCTRL_PIN(21, "GPIO21/SMB4CSCL/SMB15SCL"),
+ PINCTRL_PIN(22, "GPIO22/SMB4DSDA/SMB14SDA"),
+ PINCTRL_PIN(23, "GPIO23/SMB4DSCL/SMB14SCL"),
+ PINCTRL_PIN(24, "GPIO24/IOXHDO"),
+ PINCTRL_PIN(25, "GPIO25/IOXHDI"),
+ PINCTRL_PIN(26, "GPIO26/SMB5SDA"),
+ PINCTRL_PIN(27, "GPIO27/SMB5SCL"),
+ PINCTRL_PIN(28, "GPIO28/SMB4SDA"),
+ PINCTRL_PIN(29, "GPIO29/SMB4SCL"),
+ PINCTRL_PIN(30, "GPIO30/SMB3SDA"),
+ PINCTRL_PIN(31, "GPIO31/SMB3SCL"),
+
+ PINCTRL_PIN(32, "GPIO32/nSPI0CS1"),
+ PINCTRL_PIN(33, "SPI0D2"),
+ PINCTRL_PIN(34, "SPI0D3"),
+ PINCTRL_PIN(37, "GPIO37/SMB3CSDA"),
+ PINCTRL_PIN(38, "GPIO38/SMB3CSCL"),
+ PINCTRL_PIN(39, "GPIO39/SMB3BSDA"),
+ PINCTRL_PIN(40, "GPIO40/SMB3BSCL"),
+ PINCTRL_PIN(41, "GPIO41/BSPRXD"),
+ PINCTRL_PIN(42, "GPO42/BSPTXD/STRAP11"),
+ PINCTRL_PIN(43, "GPIO43/RXD1/JTMS2/BU1RXD"),
+ PINCTRL_PIN(44, "GPIO44/nCTS1/JTDI2/BU1CTS"),
+ PINCTRL_PIN(45, "GPIO45/nDCD1/JTDO2"),
+ PINCTRL_PIN(46, "GPIO46/nDSR1/JTCK2"),
+ PINCTRL_PIN(47, "GPIO47/nRI1/JCP_RDY2"),
+ PINCTRL_PIN(48, "GPIO48/TXD2/BSPTXD"),
+ PINCTRL_PIN(49, "GPIO49/RXD2/BSPRXD"),
+ PINCTRL_PIN(50, "GPIO50/nCTS2"),
+ PINCTRL_PIN(51, "GPO51/nRTS2/STRAP2"),
+ PINCTRL_PIN(52, "GPIO52/nDCD2"),
+ PINCTRL_PIN(53, "GPO53/nDTR2_BOUT2/STRAP1"),
+ PINCTRL_PIN(54, "GPIO54/nDSR2"),
+ PINCTRL_PIN(55, "GPIO55/nRI2"),
+ PINCTRL_PIN(56, "GPIO56/R1RXERR"),
+ PINCTRL_PIN(57, "GPIO57/R1MDC"),
+ PINCTRL_PIN(58, "GPIO58/R1MDIO"),
+ PINCTRL_PIN(59, "GPIO59/SMB3DSDA"),
+ PINCTRL_PIN(60, "GPIO60/SMB3DSCL"),
+ PINCTRL_PIN(61, "GPO61/nDTR1_BOUT1/STRAP6"),
+ PINCTRL_PIN(62, "GPO62/nRTST1/STRAP5"),
+ PINCTRL_PIN(63, "GPO63/TXD1/STRAP4"),
+
+ PINCTRL_PIN(64, "GPIO64/FANIN0"),
+ PINCTRL_PIN(65, "GPIO65/FANIN1"),
+ PINCTRL_PIN(66, "GPIO66/FANIN2"),
+ PINCTRL_PIN(67, "GPIO67/FANIN3"),
+ PINCTRL_PIN(68, "GPIO68/FANIN4"),
+ PINCTRL_PIN(69, "GPIO69/FANIN5"),
+ PINCTRL_PIN(70, "GPIO70/FANIN6"),
+ PINCTRL_PIN(71, "GPIO71/FANIN7"),
+ PINCTRL_PIN(72, "GPIO72/FANIN8"),
+ PINCTRL_PIN(73, "GPIO73/FANIN9"),
+ PINCTRL_PIN(74, "GPIO74/FANIN10"),
+ PINCTRL_PIN(75, "GPIO75/FANIN11"),
+ PINCTRL_PIN(76, "GPIO76/FANIN12"),
+ PINCTRL_PIN(77, "GPIO77/FANIN13"),
+ PINCTRL_PIN(78, "GPIO78/FANIN14"),
+ PINCTRL_PIN(79, "GPIO79/FANIN15"),
+ PINCTRL_PIN(80, "GPIO80/PWM0"),
+ PINCTRL_PIN(81, "GPIO81/PWM1"),
+ PINCTRL_PIN(82, "GPIO82/PWM2"),
+ PINCTRL_PIN(83, "GPIO83/PWM3"),
+ PINCTRL_PIN(84, "GPIO84/R2TXD0"),
+ PINCTRL_PIN(85, "GPIO85/R2TXD1"),
+ PINCTRL_PIN(86, "GPIO86/R2TXEN"),
+ PINCTRL_PIN(87, "GPIO87/R2RXD0"),
+ PINCTRL_PIN(88, "GPIO88/R2RXD1"),
+ PINCTRL_PIN(89, "GPIO89/R2CRSDV"),
+ PINCTRL_PIN(90, "GPIO90/R2RXERR"),
+ PINCTRL_PIN(91, "GPIO91/R2MDC"),
+ PINCTRL_PIN(92, "GPIO92/R2MDIO"),
+ PINCTRL_PIN(93, "GPIO93/GA20/SMB5DSCL"),
+ PINCTRL_PIN(94, "GPIO94/nKBRST/SMB5DSDA"),
+ PINCTRL_PIN(95, "GPIO95/nLRESET/nESPIRST"),
+
+ PINCTRL_PIN(96, "GPIO96/RG1TXD0"),
+ PINCTRL_PIN(97, "GPIO97/RG1TXD1"),
+ PINCTRL_PIN(98, "GPIO98/RG1TXD2"),
+ PINCTRL_PIN(99, "GPIO99/RG1TXD3"),
+ PINCTRL_PIN(100, "GPIO100/RG1TXC"),
+ PINCTRL_PIN(101, "GPIO101/RG1TXCTL"),
+ PINCTRL_PIN(102, "GPIO102/RG1RXD0"),
+ PINCTRL_PIN(103, "GPIO103/RG1RXD1"),
+ PINCTRL_PIN(104, "GPIO104/RG1RXD2"),
+ PINCTRL_PIN(105, "GPIO105/RG1RXD3"),
+ PINCTRL_PIN(106, "GPIO106/RG1RXC"),
+ PINCTRL_PIN(107, "GPIO107/RG1RXCTL"),
+ PINCTRL_PIN(108, "GPIO108/RG1MDC"),
+ PINCTRL_PIN(109, "GPIO109/RG1MDIO"),
+ PINCTRL_PIN(110, "GPIO110/RG2TXD0/DDRV0"),
+ PINCTRL_PIN(111, "GPIO111/RG2TXD1/DDRV1"),
+ PINCTRL_PIN(112, "GPIO112/RG2TXD2/DDRV2"),
+ PINCTRL_PIN(113, "GPIO113/RG2TXD3/DDRV3"),
+ PINCTRL_PIN(114, "GPIO114/SMB0SCL"),
+ PINCTRL_PIN(115, "GPIO115/SMB0SDA"),
+ PINCTRL_PIN(116, "GPIO116/SMB1SCL"),
+ PINCTRL_PIN(117, "GPIO117/SMB1SDA"),
+ PINCTRL_PIN(118, "GPIO118/SMB2SCL"),
+ PINCTRL_PIN(119, "GPIO119/SMB2SDA"),
+ PINCTRL_PIN(120, "GPIO120/SMB2CSDA"),
+ PINCTRL_PIN(121, "GPIO121/SMB2CSCL"),
+ PINCTRL_PIN(122, "GPIO122/SMB2BSDA"),
+ PINCTRL_PIN(123, "GPIO123/SMB2BSCL"),
+ PINCTRL_PIN(124, "GPIO124/SMB1CSDA"),
+ PINCTRL_PIN(125, "GPIO125/SMB1CSCL"),
+ PINCTRL_PIN(126, "GPIO126/SMB1BSDA"),
+ PINCTRL_PIN(127, "GPIO127/SMB1BSCL"),
+
+ PINCTRL_PIN(128, "GPIO128/SMB8SCL"),
+ PINCTRL_PIN(129, "GPIO129/SMB8SDA"),
+ PINCTRL_PIN(130, "GPIO130/SMB9SCL"),
+ PINCTRL_PIN(131, "GPIO131/SMB9SDA"),
+ PINCTRL_PIN(132, "GPIO132/SMB10SCL"),
+ PINCTRL_PIN(133, "GPIO133/SMB10SDA"),
+ PINCTRL_PIN(134, "GPIO134/SMB11SCL"),
+ PINCTRL_PIN(135, "GPIO135/SMB11SDA"),
+ PINCTRL_PIN(136, "GPIO136/SD1DT0"),
+ PINCTRL_PIN(137, "GPIO137/SD1DT1"),
+ PINCTRL_PIN(138, "GPIO138/SD1DT2"),
+ PINCTRL_PIN(139, "GPIO139/SD1DT3"),
+ PINCTRL_PIN(140, "GPIO140/SD1CLK"),
+ PINCTRL_PIN(141, "GPIO141/SD1WP"),
+ PINCTRL_PIN(142, "GPIO142/SD1CMD"),
+ PINCTRL_PIN(143, "GPIO143/SD1CD/SD1PWR"),
+ PINCTRL_PIN(144, "GPIO144/PWM4"),
+ PINCTRL_PIN(145, "GPIO145/PWM5"),
+ PINCTRL_PIN(146, "GPIO146/PWM6"),
+ PINCTRL_PIN(147, "GPIO147/PWM7"),
+ PINCTRL_PIN(148, "GPIO148/MMCDT4"),
+ PINCTRL_PIN(149, "GPIO149/MMCDT5"),
+ PINCTRL_PIN(150, "GPIO150/MMCDT6"),
+ PINCTRL_PIN(151, "GPIO151/MMCDT7"),
+ PINCTRL_PIN(152, "GPIO152/MMCCLK"),
+ PINCTRL_PIN(153, "GPIO153/MMCWP"),
+ PINCTRL_PIN(154, "GPIO154/MMCCMD"),
+ PINCTRL_PIN(155, "GPIO155/nMMCCD/nMMCRST"),
+ PINCTRL_PIN(156, "GPIO156/MMCDT0"),
+ PINCTRL_PIN(157, "GPIO157/MMCDT1"),
+ PINCTRL_PIN(158, "GPIO158/MMCDT2"),
+ PINCTRL_PIN(159, "GPIO159/MMCDT3"),
+
+ PINCTRL_PIN(160, "GPIO160/CLKOUT/RNGOSCOUT"),
+ PINCTRL_PIN(161, "GPIO161/nLFRAME/nESPICS"),
+ PINCTRL_PIN(162, "GPIO162/SERIRQ"),
+ PINCTRL_PIN(163, "GPIO163/LCLK/ESPICLK"),
+ PINCTRL_PIN(164, "GPIO164/LAD0/ESPI_IO0"/*dscnt6*/),
+ PINCTRL_PIN(165, "GPIO165/LAD1/ESPI_IO1"/*dscnt6*/),
+ PINCTRL_PIN(166, "GPIO166/LAD2/ESPI_IO2"/*dscnt6*/),
+ PINCTRL_PIN(167, "GPIO167/LAD3/ESPI_IO3"/*dscnt6*/),
+ PINCTRL_PIN(168, "GPIO168/nCLKRUN/nESPIALERT"),
+ PINCTRL_PIN(169, "GPIO169/nSCIPME"),
+ PINCTRL_PIN(170, "GPIO170/nSMI"),
+ PINCTRL_PIN(171, "GPIO171/SMB6SCL"),
+ PINCTRL_PIN(172, "GPIO172/SMB6SDA"),
+ PINCTRL_PIN(173, "GPIO173/SMB7SCL"),
+ PINCTRL_PIN(174, "GPIO174/SMB7SDA"),
+ PINCTRL_PIN(175, "GPIO175/PSPI1CK/FANIN19"),
+ PINCTRL_PIN(176, "GPIO176/PSPI1DO/FANIN18"),
+ PINCTRL_PIN(177, "GPIO177/PSPI1DI/FANIN17"),
+ PINCTRL_PIN(178, "GPIO178/R1TXD0"),
+ PINCTRL_PIN(179, "GPIO179/R1TXD1"),
+ PINCTRL_PIN(180, "GPIO180/R1TXEN"),
+ PINCTRL_PIN(181, "GPIO181/R1RXD0"),
+ PINCTRL_PIN(182, "GPIO182/R1RXD1"),
+ PINCTRL_PIN(183, "GPIO183/SPI3CK"),
+ PINCTRL_PIN(184, "GPO184/SPI3D0/STRAP9"),
+ PINCTRL_PIN(185, "GPO185/SPI3D1/STRAP10"),
+ PINCTRL_PIN(186, "GPIO186/nSPI3CS0"),
+ PINCTRL_PIN(187, "GPIO187/nSPI3CS1"),
+ PINCTRL_PIN(188, "GPIO188/SPI3D2/nSPI3CS2"),
+ PINCTRL_PIN(189, "GPIO189/SPI3D3/nSPI3CS3"),
+ PINCTRL_PIN(190, "GPIO190/nPRD_SMI"),
+ PINCTRL_PIN(191, "GPIO191"),
+
+ PINCTRL_PIN(192, "GPIO192"),
+ PINCTRL_PIN(193, "GPIO193/R1CRSDV"),
+ PINCTRL_PIN(194, "GPIO194/SMB0BSCL"),
+ PINCTRL_PIN(195, "GPIO195/SMB0BSDA"),
+ PINCTRL_PIN(196, "GPIO196/SMB0CSCL"),
+ PINCTRL_PIN(197, "GPIO197/SMB0DEN"),
+ PINCTRL_PIN(198, "GPIO198/SMB0DSDA"),
+ PINCTRL_PIN(199, "GPIO199/SMB0DSCL"),
+ PINCTRL_PIN(200, "GPIO200/R2CK"),
+ PINCTRL_PIN(201, "GPIO201/R1CK"),
+ PINCTRL_PIN(202, "GPIO202/SMB0CSDA"),
+ PINCTRL_PIN(203, "GPIO203/FANIN16"),
+ PINCTRL_PIN(204, "GPIO204/DDC2SCL"),
+ PINCTRL_PIN(205, "GPIO205/DDC2SDA"),
+ PINCTRL_PIN(206, "GPIO206/HSYNC2"),
+ PINCTRL_PIN(207, "GPIO207/VSYNC2"),
+ PINCTRL_PIN(208, "GPIO208/RG2TXC/DVCK"),
+ PINCTRL_PIN(209, "GPIO209/RG2TXCTL/DDRV4"),
+ PINCTRL_PIN(210, "GPIO210/RG2RXD0/DDRV5"),
+ PINCTRL_PIN(211, "GPIO211/RG2RXD1/DDRV6"),
+ PINCTRL_PIN(212, "GPIO212/RG2RXD2/DDRV7"),
+ PINCTRL_PIN(213, "GPIO213/RG2RXD3/DDRV8"),
+ PINCTRL_PIN(214, "GPIO214/RG2RXC/DDRV9"),
+ PINCTRL_PIN(215, "GPIO215/RG2RXCTL/DDRV10"),
+ PINCTRL_PIN(216, "GPIO216/RG2MDC/DDRV11"),
+ PINCTRL_PIN(217, "GPIO217/RG2MDIO/DVHSYNC"),
+ PINCTRL_PIN(218, "GPIO218/nWDO1"),
+ PINCTRL_PIN(219, "GPIO219/nWDO2"),
+ PINCTRL_PIN(220, "GPIO220/SMB12SCL"),
+ PINCTRL_PIN(221, "GPIO221/SMB12SDA"),
+ PINCTRL_PIN(222, "GPIO222/SMB13SCL"),
+ PINCTRL_PIN(223, "GPIO223/SMB13SDA"),
+
+ PINCTRL_PIN(224, "GPIO224/SPIXCK"),
+ PINCTRL_PIN(225, "GPO225/SPIXD0/STRAP12"),
+ PINCTRL_PIN(226, "GPO226/SPIXD1/STRAP13"),
+ PINCTRL_PIN(227, "GPIO227/nSPIXCS0"),
+ PINCTRL_PIN(228, "GPIO228/nSPIXCS1"),
+ PINCTRL_PIN(229, "GPO229/SPIXD2/STRAP3"),
+ PINCTRL_PIN(230, "GPIO230/SPIXD3"),
+ PINCTRL_PIN(231, "GPIO231/nCLKREQ"),
+ PINCTRL_PIN(255, "GPI255/DACOSEL"),
+};
+
+/* Enable mode in pin group */
+static void npcm7xx_setfunc(struct regmap *gcr_regmap, const unsigned int *pin,
+ int pin_number, int mode)
+{
+ const struct npcm7xx_pincfg *cfg;
+ int i;
+
+ for (i = 0 ; i < pin_number ; i++) {
+ cfg = &pincfg[pin[i]];
+ if (mode == fn_gpio || cfg->fn0 == mode || cfg->fn1 == mode || cfg->fn2 == mode) {
+ if (cfg->reg0)
+ regmap_update_bits(gcr_regmap, cfg->reg0,
+ BIT(cfg->bit0),
+ !!(cfg->fn0 == mode) ?
+ BIT(cfg->bit0) : 0);
+ if (cfg->reg1)
+ regmap_update_bits(gcr_regmap, cfg->reg1,
+ BIT(cfg->bit1),
+ !!(cfg->fn1 == mode) ?
+ BIT(cfg->bit1) : 0);
+ if (cfg->reg2)
+ regmap_update_bits(gcr_regmap, cfg->reg2,
+ BIT(cfg->bit2),
+ !!(cfg->fn2 == mode) ?
+ BIT(cfg->bit2) : 0);
+ }
+ }
+}
+
+/* Get slew rate of pin (high/low) */
+static int npcm7xx_get_slew_rate(struct npcm7xx_gpio *bank,
+ struct regmap *gcr_regmap, unsigned int pin)
+{
+ u32 val;
+ int gpio = (pin % bank->gc.ngpio);
+ unsigned long pinmask = BIT(gpio);
+
+ if (pincfg[pin].flag & SLEW)
+ return ioread32(bank->base + NPCM7XX_GP_N_OSRC)
+ & pinmask;
+ /* LPC Slew rate in SRCNT register */
+ if (pincfg[pin].flag & SLEWLPC) {
+ regmap_read(gcr_regmap, NPCM7XX_GCR_SRCNT, &val);
+ return !!(val & SRCNT_ESPI);
+ }
+
+ return -EINVAL;
+}
+
+/* Set slew rate of pin (high/low) */
+static int npcm7xx_set_slew_rate(struct npcm7xx_gpio *bank,
+ struct regmap *gcr_regmap, unsigned int pin,
+ int arg)
+{
+ int gpio = BIT(pin % bank->gc.ngpio);
+
+ if (pincfg[pin].flag & SLEW) {
+ switch (arg) {
+ case 0:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_OSRC,
+ gpio);
+ return 0;
+ case 1:
+ npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_OSRC,
+ gpio);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
+ /* LPC Slew rate in SRCNT register */
+ if (pincfg[pin].flag & SLEWLPC) {
+ switch (arg) {
+ case 0:
+ regmap_update_bits(gcr_regmap, NPCM7XX_GCR_SRCNT,
+ SRCNT_ESPI, 0);
+ return 0;
+ case 1:
+ regmap_update_bits(gcr_regmap, NPCM7XX_GCR_SRCNT,
+ SRCNT_ESPI, SRCNT_ESPI);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/* Get drive strength for a pin, if supported */
+static int npcm7xx_get_drive_strength(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+ struct npcm7xx_gpio *bank =
+ &npcm->gpio_bank[pin / NPCM7XX_GPIO_PER_BANK];
+ int gpio = (pin % bank->gc.ngpio);
+ unsigned long pinmask = BIT(gpio);
+ u32 ds = 0;
+ int flg, val;
+
+ flg = pincfg[pin].flag;
+ if (flg & DRIVE_STRENGTH_MASK) {
+ /* Get standard reading */
+ val = ioread32(bank->base + NPCM7XX_GP_N_ODSC)
+ & pinmask;
+ ds = val ? DSHI(flg) : DSLO(flg);
+ dev_dbg(bank->gc.parent,
+ "pin %d strength %d = %d\n", pin, val, ds);
+ return ds;
+ }
+
+ return -EINVAL;
+}
+
+/* Set drive strength for a pin, if supported */
+static int npcm7xx_set_drive_strength(struct npcm7xx_pinctrl *npcm,
+ unsigned int pin, int nval)
+{
+ int v;
+ struct npcm7xx_gpio *bank =
+ &npcm->gpio_bank[pin / NPCM7XX_GPIO_PER_BANK];
+ int gpio = BIT(pin % bank->gc.ngpio);
+
+ v = (pincfg[pin].flag & DRIVE_STRENGTH_MASK);
+ if (!nval || !v)
+ return -ENOTSUPP;
+ if (DSLO(v) == nval) {
+ dev_dbg(bank->gc.parent,
+ "setting pin %d to low strength [%d]\n", pin, nval);
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_ODSC, gpio);
+ return 0;
+ } else if (DSHI(v) == nval) {
+ dev_dbg(bank->gc.parent,
+ "setting pin %d to high strength [%d]\n", pin, nval);
+ npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_ODSC, gpio);
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+/* pinctrl_ops */
+static void npcm7xx_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int offset)
+{
+ seq_printf(s, "pinctrl_ops.dbg: %d", offset);
+}
+
+static int npcm7xx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+
+ dev_dbg(npcm->dev, "group size: %d\n", ARRAY_SIZE(npcm7xx_groups));
+ return ARRAY_SIZE(npcm7xx_groups);
+}
+
+static const char *npcm7xx_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ return npcm7xx_groups[selector].name;
+}
+
+static int npcm7xx_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *npins)
+{
+ *npins = npcm7xx_groups[selector].npins;
+ *pins = npcm7xx_groups[selector].pins;
+
+ return 0;
+}
+
+static int npcm7xx_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map,
+ u32 *num_maps)
+{
+ struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+
+ dev_dbg(npcm->dev, "dt_node_to_map: %s\n", np_config->name);
+ return pinconf_generic_dt_node_to_map(pctldev, np_config,
+ map, num_maps,
+ PIN_MAP_TYPE_INVALID);
+}
+
+static void npcm7xx_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, u32 num_maps)
+{
+ kfree(map);
+}
+
+static struct pinctrl_ops npcm7xx_pinctrl_ops = {
+ .get_groups_count = npcm7xx_get_groups_count,
+ .get_group_name = npcm7xx_get_group_name,
+ .get_group_pins = npcm7xx_get_group_pins,
+ .pin_dbg_show = npcm7xx_pin_dbg_show,
+ .dt_node_to_map = npcm7xx_dt_node_to_map,
+ .dt_free_map = npcm7xx_dt_free_map,
+};
+
+/* pinmux_ops */
+static int npcm7xx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(npcm7xx_funcs);
+}
+
+static const char *npcm7xx_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int function)
+{
+ return npcm7xx_funcs[function].name;
+}
+
+static int npcm7xx_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ const char * const **groups,
+ unsigned int * const ngroups)
+{
+ *ngroups = npcm7xx_funcs[function].ngroups;
+ *groups = npcm7xx_funcs[function].groups;
+
+ return 0;
+}
+
+static int npcm7xx_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ unsigned int group)
+{
+ struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+
+ dev_dbg(npcm->dev, "set_mux: %d, %d[%s]\n", function, group,
+ npcm7xx_groups[group].name);
+
+ npcm7xx_setfunc(npcm->gcr_regmap, npcm7xx_groups[group].pins,
+ npcm7xx_groups[group].npins, group);
+
+ return 0;
+}
+
+static int npcm7xx_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+
+ if (!range) {
+ dev_err(npcm->dev, "invalid range\n");
+ return -EINVAL;
+ }
+ if (!range->gc) {
+ dev_err(npcm->dev, "invalid gpiochip\n");
+ return -EINVAL;
+ }
+
+ npcm7xx_setfunc(npcm->gcr_regmap, &offset, 1, fn_gpio);
+
+ return 0;
+}
+
+/* Release GPIO back to pinctrl mode */
+static void npcm7xx_gpio_request_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+ int virq;
+
+ virq = irq_find_mapping(npcm->domain, offset);
+ if (virq)
+ irq_dispose_mapping(virq);
+}
+
+/* Set GPIO direction */
+static int npcm_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset, bool input)
+{
+ struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+ struct npcm7xx_gpio *bank =
+ &npcm->gpio_bank[offset / NPCM7XX_GPIO_PER_BANK];
+ int gpio = BIT(offset % bank->gc.ngpio);
+
+ dev_dbg(bank->gc.parent, "GPIO Set Direction: %d = %d\n", offset,
+ input);
+ if (input)
+ iowrite32(gpio, bank->base + NPCM7XX_GP_N_OEC);
+ else
+ iowrite32(gpio, bank->base + NPCM7XX_GP_N_OES);
+
+ return 0;
+}
+
+static struct pinmux_ops npcm7xx_pinmux_ops = {
+ .get_functions_count = npcm7xx_get_functions_count,
+ .get_function_name = npcm7xx_get_function_name,
+ .get_function_groups = npcm7xx_get_function_groups,
+ .set_mux = npcm7xx_pinmux_set_mux,
+ .gpio_request_enable = npcm7xx_gpio_request_enable,
+ .gpio_disable_free = npcm7xx_gpio_request_free,
+ .gpio_set_direction = npcm_gpio_set_direction,
+};
+
+/* pinconf_ops */
+static int npcm7xx_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+ struct npcm7xx_gpio *bank =
+ &npcm->gpio_bank[pin / NPCM7XX_GPIO_PER_BANK];
+ int gpio = (pin % bank->gc.ngpio);
+ unsigned long pinmask = BIT(gpio);
+ u32 ie, oe, pu, pd;
+ int rc = 0;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ pu = ioread32(bank->base + NPCM7XX_GP_N_PU) & pinmask;
+ pd = ioread32(bank->base + NPCM7XX_GP_N_PD) & pinmask;
+ if (param == PIN_CONFIG_BIAS_DISABLE)
+ rc = (!pu && !pd);
+ else if (param == PIN_CONFIG_BIAS_PULL_UP)
+ rc = (pu && !pd);
+ else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
+ rc = (!pu && pd);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_INPUT_ENABLE:
+ ie = ioread32(bank->base + NPCM7XX_GP_N_IEM) & pinmask;
+ oe = ioread32(bank->base + NPCM7XX_GP_N_OE) & pinmask;
+ if (param == PIN_CONFIG_INPUT_ENABLE)
+ rc = (ie && !oe);
+ else if (param == PIN_CONFIG_OUTPUT)
+ rc = (!ie && oe);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ rc = !(ioread32(bank->base + NPCM7XX_GP_N_OTYP) & pinmask);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ rc = ioread32(bank->base + NPCM7XX_GP_N_OTYP) & pinmask;
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ rc = ioread32(bank->base + NPCM7XX_GP_N_DBNC) & pinmask;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ rc = npcm7xx_get_drive_strength(pctldev, pin);
+ if (rc)
+ *config = pinconf_to_config_packed(param, rc);
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ rc = npcm7xx_get_slew_rate(bank, npcm->gcr_regmap, pin);
+ if (rc >= 0)
+ *config = pinconf_to_config_packed(param, rc);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (!rc)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int npcm7xx_config_set_one(struct npcm7xx_pinctrl *npcm,
+ unsigned int pin, unsigned long config)
+{
+ enum pin_config_param param = pinconf_to_config_param(config);
+ u16 arg = pinconf_to_config_argument(config);
+ struct npcm7xx_gpio *bank =
+ &npcm->gpio_bank[pin / NPCM7XX_GPIO_PER_BANK];
+ int gpio = BIT(pin % bank->gc.ngpio);
+
+ dev_dbg(bank->gc.parent, "param=%d %d[GPIO]\n", param, pin);
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_PU, gpio);
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_PD, gpio);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_PU, gpio);
+ npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_PD, gpio);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_PD, gpio);
+ npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_PU, gpio);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ if (arg) {
+ iowrite32(gpio, bank->base + NPCM7XX_GP_N_OEC);
+ npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_IEM,
+ gpio);
+ } else
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_IEM,
+ gpio);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_IEM, gpio);
+ iowrite32(gpio, arg ? bank->base + NPCM7XX_GP_N_DOS :
+ bank->base + NPCM7XX_GP_N_DOC);
+ iowrite32(gpio, bank->base + NPCM7XX_GP_N_OES);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_OTYP, gpio);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_OTYP, gpio);
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_DBNC, gpio);
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ return npcm7xx_set_slew_rate(bank, npcm->gcr_regmap, pin, arg);
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ return npcm7xx_set_drive_strength(npcm, pin, arg);
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+/* Set multiple configuration settings for a pin */
+static int npcm7xx_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
+ int rc;
+
+ while (num_configs--) {
+ rc = npcm7xx_config_set_one(npcm, pin, *configs++);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static struct pinconf_ops npcm7xx_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = npcm7xx_config_get,
+ .pin_config_set = npcm7xx_config_set,
+};
+
+/* pinctrl_desc */
+static struct pinctrl_desc npcm7xx_pinctrl_desc = {
+ .name = "npcm7xx-pinctrl",
+ .pins = npcm7xx_pins,
+ .npins = ARRAY_SIZE(npcm7xx_pins),
+ .pctlops = &npcm7xx_pinctrl_ops,
+ .pmxops = &npcm7xx_pinmux_ops,
+ .confops = &npcm7xx_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
+{
+ int ret = -ENXIO;
+ struct resource res;
+ int id = 0, irq;
+ struct device_node *np;
+ struct of_phandle_args pinspec;
+
+ for_each_available_child_of_node(pctrl->dev->of_node, np)
+ if (of_find_property(np, "gpio-controller", NULL)) {
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret < 0) {
+ dev_err(pctrl->dev,
+ "Resource fail for GPIO bank %u\n", id);
+ return ret;
+ }
+
+ pctrl->gpio_bank[id].base =
+ ioremap(res.start, resource_size(&res));
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq < 0) {
+ dev_err(pctrl->dev,
+ "No IRQ for GPIO bank %u\n", id);
+ ret = irq;
+ return ret;
+ }
+
+ ret = bgpio_init(&pctrl->gpio_bank[id].gc,
+ pctrl->dev, 4,
+ pctrl->gpio_bank[id].base +
+ NPCM7XX_GP_N_DIN,
+ pctrl->gpio_bank[id].base +
+ NPCM7XX_GP_N_DOUT,
+ NULL,
+ NULL,
+ pctrl->gpio_bank[id].base +
+ NPCM7XX_GP_N_IEM,
+ BGPIOF_READ_OUTPUT_REG_SET);
+ if (ret) {
+ dev_err(pctrl->dev, "bgpio_init() failed\n");
+ return ret;
+ }
+
+ ret = of_parse_phandle_with_fixed_args(np,
+ "gpio-ranges", 3,
+ 0, &pinspec);
+ if (ret < 0) {
+ dev_err(pctrl->dev,
+ "gpio-ranges fail for GPIO bank %u\n",
+ id);
+ return ret;
+ }
+
+ pctrl->gpio_bank[id].irq = irq;
+ pctrl->gpio_bank[id].irq_chip = npcmgpio_irqchip;
+ pctrl->gpio_bank[id].gc.parent = pctrl->dev;
+ pctrl->gpio_bank[id].irqbase =
+ id * NPCM7XX_GPIO_PER_BANK;
+ pctrl->gpio_bank[id].pinctrl_id = pinspec.args[0];
+ pctrl->gpio_bank[id].gc.base = pinspec.args[1];
+ pctrl->gpio_bank[id].gc.ngpio = pinspec.args[2];
+ pctrl->gpio_bank[id].gc.owner = THIS_MODULE;
+ pctrl->gpio_bank[id].gc.label =
+ devm_kasprintf(pctrl->dev, GFP_KERNEL, "%pOF",
+ np);
+ pctrl->gpio_bank[id].gc.dbg_show = npcmgpio_dbg_show;
+ pctrl->gpio_bank[id].direction_input =
+ pctrl->gpio_bank[id].gc.direction_input;
+ pctrl->gpio_bank[id].gc.direction_input =
+ npcmgpio_direction_input;
+ pctrl->gpio_bank[id].direction_output =
+ pctrl->gpio_bank[id].gc.direction_output;
+ pctrl->gpio_bank[id].gc.direction_output =
+ npcmgpio_direction_output;
+ pctrl->gpio_bank[id].request =
+ pctrl->gpio_bank[id].gc.request;
+ pctrl->gpio_bank[id].gc.request = npcmgpio_gpio_request;
+ pctrl->gpio_bank[id].gc.free = npcmgpio_gpio_free;
+ pctrl->gpio_bank[id].gc.of_node = np;
+ id++;
+ }
+
+ pctrl->bank_num = id;
+ return ret;
+}
+
+static int npcm7xx_gpio_register(struct npcm7xx_pinctrl *pctrl)
+{
+ int ret, id;
+
+ for (id = 0 ; id < pctrl->bank_num ; id++) {
+ ret = devm_gpiochip_add_data(pctrl->dev,
+ &pctrl->gpio_bank[id].gc,
+ &pctrl->gpio_bank[id]);
+ if (ret) {
+ dev_err(pctrl->dev, "Failed to add GPIO chip %u\n", id);
+ goto err_register;
+ }
+
+ ret = gpiochip_add_pin_range(&pctrl->gpio_bank[id].gc,
+ dev_name(pctrl->dev),
+ pctrl->gpio_bank[id].pinctrl_id,
+ pctrl->gpio_bank[id].gc.base,
+ pctrl->gpio_bank[id].gc.ngpio);
+ if (ret < 0) {
+ dev_err(pctrl->dev, "Failed to add GPIO bank %u\n", id);
+ gpiochip_remove(&pctrl->gpio_bank[id].gc);
+ goto err_register;
+ }
+
+ ret = gpiochip_irqchip_add(&pctrl->gpio_bank[id].gc,
+ &pctrl->gpio_bank[id].irq_chip,
+ 0, handle_level_irq,
+ IRQ_TYPE_NONE);
+ if (ret < 0) {
+ dev_err(pctrl->dev,
+ "Failed to add IRQ chip %u\n", id);
+ gpiochip_remove(&pctrl->gpio_bank[id].gc);
+ goto err_register;
+ }
+
+ gpiochip_set_chained_irqchip(&pctrl->gpio_bank[id].gc,
+ &pctrl->gpio_bank[id].irq_chip,
+ pctrl->gpio_bank[id].irq,
+ npcmgpio_irq_handler);
+ }
+
+ return 0;
+
+err_register:
+ for (; id > 0; id--)
+ gpiochip_remove(&pctrl->gpio_bank[id - 1].gc);
+
+ return ret;
+}
+
+static int npcm7xx_pinctrl_probe(struct platform_device *pdev)
+{
+ struct npcm7xx_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, pctrl);
+
+ pctrl->gcr_regmap =
+ syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
+ if (IS_ERR(pctrl->gcr_regmap)) {
+ dev_err(pctrl->dev, "didn't find nuvoton,npcm750-gcr\n");
+ return PTR_ERR(pctrl->gcr_regmap);
+ }
+
+ ret = npcm7xx_gpio_of(pctrl);
+ if (ret < 0) {
+ dev_err(pctrl->dev, "Failed to gpio dt-binding %u\n", ret);
+ return ret;
+ }
+
+ pctrl->pctldev = devm_pinctrl_register(&pdev->dev,
+ &npcm7xx_pinctrl_desc, pctrl);
+ if (IS_ERR(pctrl->pctldev)) {
+ dev_err(&pdev->dev, "Failed to register pinctrl device\n");
+ return PTR_ERR(pctrl->pctldev);
+ }
+
+ ret = npcm7xx_gpio_register(pctrl);
+ if (ret < 0) {
+ dev_err(pctrl->dev, "Failed to register gpio %u\n", ret);
+ return ret;
+ }
+
+ pr_info("NPCM7xx Pinctrl driver probed\n");
+ return 0;
+}
+
+static const struct of_device_id npcm7xx_pinctrl_match[] = {
+ { .compatible = "nuvoton,npcm750-pinctrl" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, npcm7xx_pinctrl_match);
+
+static struct platform_driver npcm7xx_pinctrl_driver = {
+ .probe = npcm7xx_pinctrl_probe,
+ .driver = {
+ .name = "npcm7xx-pinctrl",
+ .of_match_table = npcm7xx_pinctrl_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init npcm7xx_pinctrl_register(void)
+{
+ return platform_driver_register(&npcm7xx_pinctrl_driver);
+}
+arch_initcall(npcm7xx_pinctrl_register);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("jordan_hargrave@dell.com");
+MODULE_AUTHOR("tomer.maimon@nuvoton.com");
+MODULE_DESCRIPTION("Nuvoton NPCM7XX Pinctrl and GPIO driver");
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 41ccc759b8b8..67718b0f978d 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -24,7 +24,7 @@
#include <linux/errno.h>
#include <linux/log2.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
@@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
- /*
- * When debounce logic is enabled it takes ~900 us before interrupts
- * can be enabled. During this "debounce warm up" period the
- * "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it
- * reads back as 1, signaling that interrupts are now enabled.
- */
- while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
- continue;
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
@@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d)
static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
int ret = 0;
- u32 pin_reg;
+ u32 pin_reg, pin_reg_irq_en, mask;
unsigned long flags, irq_flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
@@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
}
pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
+ /*
+ * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the
+ * debounce registers of any GPIO will block wake/interrupt status
+ * generation for *all* GPIOs for a lenght of time that depends on
+ * WAKE_INT_MASTER_REG.MaskStsLength[11:0]. During this period the
+ * INTERRUPT_ENABLE bit will read as 0.
+ *
+ * We temporarily enable irq for the GPIO whose configuration is
+ * changing, and then wait for it to read back as 1 to know when
+ * debounce has settled and then disable the irq again.
+ * We do this polling with the spinlock held to ensure other GPIO
+ * access routines do not read an incorrect value for the irq enable
+ * bit of other GPIOs. We keep the GPIO masked while polling to avoid
+ * spurious irqs, and disable the irq again after polling.
+ */
+ mask = BIT(INTERRUPT_ENABLE_OFF);
+ pin_reg_irq_en = pin_reg;
+ pin_reg_irq_en |= mask;
+ pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF);
+ writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4);
+ while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
+ continue;
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 4e9fe7854e8a..13c193156363 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -21,7 +21,7 @@
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/as3722.h>
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index ef7ab208b951..5a850491a5cb 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -17,8 +17,6 @@
#include <dt-bindings/pinctrl/at91.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
-/* FIXME: needed for gpio_to_irq(), get rid of this */
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/init.h>
@@ -264,6 +262,13 @@ static struct irq_chip atmel_gpio_irq_chip = {
.irq_set_wake = atmel_gpio_irq_set_wake,
};
+static int atmel_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
+
+ return irq_find_mapping(atmel_pioctrl->irq_domain, offset);
+}
+
static void atmel_gpio_irq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
@@ -297,8 +302,9 @@ static void atmel_gpio_irq_handler(struct irq_desc *desc)
break;
for_each_set_bit(n, &isr, BITS_PER_LONG)
- generic_handle_irq(gpio_to_irq(bank *
- ATMEL_PIO_NPINS_PER_BANK + n));
+ generic_handle_irq(atmel_gpio_to_irq(
+ atmel_pioctrl->gpio_chip,
+ bank * ATMEL_PIO_NPINS_PER_BANK + n));
}
chained_irq_exit(chip, desc);
@@ -360,13 +366,6 @@ static void atmel_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
BIT(pin->line));
}
-static int atmel_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
-
- return irq_find_mapping(atmel_pioctrl->irq_domain, offset);
-}
-
static struct gpio_chip atmel_gpio_chip = {
.direction_input = atmel_gpio_direction_input,
.get = atmel_gpio_get,
@@ -493,7 +492,6 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
unsigned num_pins, num_configs, reserve;
unsigned long *configs;
struct property *pins;
- bool has_config;
u32 pinfunc;
int ret, i;
@@ -509,9 +507,6 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
return ret;
}
- if (num_configs)
- has_config = true;
-
num_pins = pins->length / sizeof(u32);
if (!num_pins) {
dev_err(pctldev->dev, "no pins found in node %pOF\n", np);
@@ -524,7 +519,7 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
* map for each pin.
*/
reserve = 1;
- if (has_config && num_pins >= 1)
+ if (num_configs)
reserve++;
reserve *= num_pins;
ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps,
@@ -547,7 +542,7 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps,
group, func);
- if (has_config) {
+ if (num_configs) {
ret = pinctrl_utils_add_map_configs(pctldev, map,
reserved_maps, num_maps, group,
configs, num_configs,
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 50f0ec42c637..3d49bbbcdbc7 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -16,7 +16,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -263,8 +263,8 @@ static int at91_dt_node_to_map(struct pinctrl_dev *pctldev,
*/
grp = at91_pinctrl_find_group_by_name(info, np->name);
if (!grp) {
- dev_err(info->dev, "unable to find group for node %s\n",
- np->name);
+ dev_err(info->dev, "unable to find group for node %pOFn\n",
+ np);
return -EINVAL;
}
@@ -1071,7 +1071,7 @@ static int at91_pinctrl_parse_groups(struct device_node *np,
const __be32 *list;
int i, j;
- dev_dbg(info->dev, "group(%d): %s\n", index, np->name);
+ dev_dbg(info->dev, "group(%d): %pOFn\n", index, np);
/* Initialise group */
grp->name = np->name;
@@ -1122,7 +1122,7 @@ static int at91_pinctrl_parse_functions(struct device_node *np,
static u32 grp_index;
u32 i = 0;
- dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name);
+ dev_dbg(info->dev, "parse function(%d): %pOFn\n", index, np);
func = &info->functions[index];
@@ -1487,7 +1487,7 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
return 0;
case IRQ_TYPE_NONE:
default:
- pr_warn("AT91: No type for irq %d\n", gpio_to_irq(d->irq));
+ pr_warn("AT91: No type for GPIO irq offset %d\n", d->irq);
return -EINVAL;
}
@@ -1574,16 +1574,6 @@ void at91_pinctrl_gpio_resume(void)
#define gpio_irq_set_wake NULL
#endif /* CONFIG_PM */
-static struct irq_chip gpio_irqchip = {
- .name = "GPIO",
- .irq_ack = gpio_irq_ack,
- .irq_disable = gpio_irq_mask,
- .irq_mask = gpio_irq_mask,
- .irq_unmask = gpio_irq_unmask,
- /* .irq_set_type is set dynamically */
- .irq_set_wake = gpio_irq_set_wake,
-};
-
static void gpio_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1624,12 +1614,22 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
struct gpio_chip *gpiochip_prev = NULL;
struct at91_gpio_chip *prev = NULL;
struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
+ struct irq_chip *gpio_irqchip;
int ret, i;
+ gpio_irqchip = devm_kzalloc(&pdev->dev, sizeof(*gpio_irqchip), GFP_KERNEL);
+ if (!gpio_irqchip)
+ return -ENOMEM;
+
at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
- /* Setup proper .irq_set_type function */
- gpio_irqchip.irq_set_type = at91_gpio->ops->irq_type;
+ gpio_irqchip->name = "GPIO";
+ gpio_irqchip->irq_ack = gpio_irq_ack;
+ gpio_irqchip->irq_disable = gpio_irq_mask;
+ gpio_irqchip->irq_mask = gpio_irq_mask;
+ gpio_irqchip->irq_unmask = gpio_irq_unmask;
+ gpio_irqchip->irq_set_wake = gpio_irq_set_wake,
+ gpio_irqchip->irq_set_type = at91_gpio->ops->irq_type;
/* Disable irqs of this PIO controller */
writel_relaxed(~0, at91_gpio->regbase + PIO_IDR);
@@ -1640,7 +1640,7 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
* interrupt.
*/
ret = gpiochip_irqchip_add(&at91_gpio->chip,
- &gpio_irqchip,
+ gpio_irqchip,
0,
handle_edge_irq,
IRQ_TYPE_NONE);
@@ -1658,7 +1658,7 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
if (!gpiochip_prev) {
/* Then register the chain on the parent IRQ */
gpiochip_set_chained_irqchip(&at91_gpio->chip,
- &gpio_irqchip,
+ gpio_irqchip,
at91_gpio->pioc_virq,
gpio_irq_handler);
return 0;
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 7939b178c6ae..63035181dfde 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -15,7 +15,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index 5353b23f775c..b7533726340d 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -20,7 +20,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/spinlock.h>
#include <linux/pinctrl/machine.h>
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
index fb73dcbb5ef3..4d032e637f5c 100644
--- a/drivers/pinctrl/pinctrl-falcon.c
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -10,7 +10,7 @@
* Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/export.h>
diff --git a/drivers/pinctrl/pinctrl-gemini.c b/drivers/pinctrl/pinctrl-gemini.c
index fa7d998e1d5a..f75bf6f16a2e 100644
--- a/drivers/pinctrl/pinctrl-gemini.c
+++ b/drivers/pinctrl/pinctrl-gemini.c
@@ -591,13 +591,16 @@ static const unsigned int tvc_3512_pins[] = {
319, /* TVC_DATA[1] */
301, /* TVC_DATA[2] */
283, /* TVC_DATA[3] */
- 265, /* TVC_CLK */
320, /* TVC_DATA[4] */
302, /* TVC_DATA[5] */
284, /* TVC_DATA[6] */
266, /* TVC_DATA[7] */
};
+static const unsigned int tvc_clk_3512_pins[] = {
+ 265, /* TVC_CLK */
+};
+
/* NAND flash pins */
static const unsigned int nflash_3512_pins[] = {
199, 200, 201, 202, 216, 217, 218, 219, 220, 234, 235, 236, 237, 252,
@@ -629,7 +632,7 @@ static const unsigned int pflash_3512_pins_extended[] = {
/* Serial flash pins CE0, CE1, DI, DO, CK */
static const unsigned int sflash_3512_pins[] = { 230, 231, 232, 233, 211 };
-/* The GPIO0A (0) pin overlap with TVC and extended parallel flash */
+/* The GPIO0A (0) pin overlap with TVC CLK and extended parallel flash */
static const unsigned int gpio0a_3512_pins[] = { 265 };
/* The GPIO0B (1-4) pins overlap with TVC and ICE */
@@ -823,7 +826,13 @@ static const struct gemini_pin_group gemini_3512_pin_groups[] = {
.num_pins = ARRAY_SIZE(tvc_3512_pins),
/* Conflict with character LCD and ICE */
.mask = LCD_PADS_ENABLE,
- .value = TVC_PADS_ENABLE | TVC_CLK_PAD_ENABLE,
+ .value = TVC_PADS_ENABLE,
+ },
+ {
+ .name = "tvcclkgrp",
+ .pins = tvc_clk_3512_pins,
+ .num_pins = ARRAY_SIZE(tvc_clk_3512_pins),
+ .value = TVC_CLK_PAD_ENABLE,
},
/*
* The construction is done such that it is possible to use a serial
@@ -860,8 +869,8 @@ static const struct gemini_pin_group gemini_3512_pin_groups[] = {
.name = "gpio0agrp",
.pins = gpio0a_3512_pins,
.num_pins = ARRAY_SIZE(gpio0a_3512_pins),
- /* Conflict with TVC */
- .mask = TVC_PADS_ENABLE,
+ /* Conflict with TVC CLK */
+ .mask = TVC_CLK_PAD_ENABLE,
},
{
.name = "gpio0bgrp",
@@ -1531,13 +1540,16 @@ static const unsigned int tvc_3516_pins[] = {
311, /* TVC_DATA[1] */
394, /* TVC_DATA[2] */
374, /* TVC_DATA[3] */
- 333, /* TVC_CLK */
354, /* TVC_DATA[4] */
395, /* TVC_DATA[5] */
312, /* TVC_DATA[6] */
334, /* TVC_DATA[7] */
};
+static const unsigned int tvc_clk_3516_pins[] = {
+ 333, /* TVC_CLK */
+};
+
/* NAND flash pins */
static const unsigned int nflash_3516_pins[] = {
243, 260, 261, 224, 280, 262, 281, 264, 300, 263, 282, 301, 320, 283,
@@ -1570,7 +1582,7 @@ static const unsigned int pflash_3516_pins_extended[] = {
static const unsigned int sflash_3516_pins[] = { 296, 338, 295, 359, 339 };
/* The GPIO0A (0-4) pins overlap with TVC and extended parallel flash */
-static const unsigned int gpio0a_3516_pins[] = { 333, 354, 395, 312, 334 };
+static const unsigned int gpio0a_3516_pins[] = { 354, 395, 312, 334 };
/* The GPIO0B (5-7) pins overlap with ICE */
static const unsigned int gpio0b_3516_pins[] = { 375, 396, 376 };
@@ -1602,6 +1614,9 @@ static const unsigned int gpio0j_3516_pins[] = { 359, 339 };
/* The GPIO0K (30,31) pins overlap with NAND flash */
static const unsigned int gpio0k_3516_pins[] = { 275, 298 };
+/* The GPIO0L (0) pins overlap with TVC_CLK */
+static const unsigned int gpio0l_3516_pins[] = { 333 };
+
/* The GPIO1A (0-4) pins that overlap with IDE and parallel flash */
static const unsigned int gpio1a_3516_pins[] = { 221, 200, 222, 201, 220 };
@@ -1761,7 +1776,13 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
.num_pins = ARRAY_SIZE(tvc_3516_pins),
/* Conflict with character LCD */
.mask = LCD_PADS_ENABLE,
- .value = TVC_PADS_ENABLE | TVC_CLK_PAD_ENABLE,
+ .value = TVC_PADS_ENABLE,
+ },
+ {
+ .name = "tvcclkgrp",
+ .pins = tvc_clk_3516_pins,
+ .num_pins = ARRAY_SIZE(tvc_clk_3516_pins),
+ .value = TVC_CLK_PAD_ENABLE,
},
/*
* The construction is done such that it is possible to use a serial
@@ -1873,6 +1894,13 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
.value = PFLASH_PADS_DISABLE | NAND_PADS_DISABLE,
},
{
+ .name = "gpio0lgrp",
+ .pins = gpio0l_3516_pins,
+ .num_pins = ARRAY_SIZE(gpio0l_3516_pins),
+ /* Conflict with TVE CLK */
+ .mask = TVC_CLK_PAD_ENABLE,
+ },
+ {
.name = "gpio1agrp",
.pins = gpio1a_3516_pins,
.num_pins = ARRAY_SIZE(gpio1a_3516_pins),
@@ -2179,12 +2207,13 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
return -ENODEV;
}
- dev_info(pmx->dev,
- "ACTIVATE function \"%s\" with group \"%s\"\n",
- func->name, grp->name);
+ dev_dbg(pmx->dev,
+ "ACTIVATE function \"%s\" with group \"%s\"\n",
+ func->name, grp->name);
regmap_read(pmx->map, GLOBAL_MISC_CTRL, &before);
- regmap_update_bits(pmx->map, GLOBAL_MISC_CTRL, grp->mask,
+ regmap_update_bits(pmx->map, GLOBAL_MISC_CTRL,
+ grp->mask | grp->value,
grp->value);
regmap_read(pmx->map, GLOBAL_MISC_CTRL, &after);
@@ -2211,10 +2240,10 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
"GLOBAL MISC CTRL before: %08x, after %08x, expected %08x\n",
before, after, expected);
} else {
- dev_info(pmx->dev,
- "padgroup %s %s\n",
- gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ dev_dbg(pmx->dev,
+ "padgroup %s %s\n",
+ gemini_padgroups[i],
+ enabled ? "enabled" : "disabled");
}
}
@@ -2233,10 +2262,10 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
"GLOBAL MISC CTRL before: %08x, after %08x, expected %08x\n",
before, after, expected);
} else {
- dev_info(pmx->dev,
- "padgroup %s %s\n",
- gemini_padgroups[i],
- enabled ? "enabled" : "disabled");
+ dev_dbg(pmx->dev,
+ "padgroup %s %s\n",
+ gemini_padgroups[i],
+ enabled ? "enabled" : "disabled");
}
}
@@ -2463,9 +2492,9 @@ static int gemini_pinconf_group_set(struct pinctrl_dev *pctldev,
regmap_update_bits(pmx->map, GLOBAL_IODRIVE,
grp->driving_mask,
val);
- dev_info(pmx->dev,
- "set group %s to %d mA drive strength mask %08x val %08x\n",
- grp->name, arg, grp->driving_mask, val);
+ dev_dbg(pmx->dev,
+ "set group %s to %d mA drive strength mask %08x val %08x\n",
+ grp->name, arg, grp->driving_mask, val);
break;
default:
dev_err(pmx->dev, "invalid config param %04x\n", param);
@@ -2556,8 +2585,8 @@ static int gemini_pmx_probe(struct platform_device *pdev)
/* Print initial state */
tmp = val;
for_each_set_bit(i, &tmp, PADS_MAXBIT) {
- dev_info(dev, "pad group %s %s\n", gemini_padgroups[i],
- (val & BIT(i)) ? "enabled" : "disabled");
+ dev_dbg(dev, "pad group %s %s\n", gemini_padgroups[i],
+ (val & BIT(i)) ? "enabled" : "disabled");
}
/* Check if flash pin is set */
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 628817c40e3b..db6b48ea5f47 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -7,10 +7,11 @@
*/
#include <linux/compiler.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
@@ -24,6 +25,9 @@
#include "pinconf.h"
#include "pinmux.h"
+#define GPIO_PIN 0x00
+#define GPIO_MSK 0x20
+
#define JZ4740_GPIO_DATA 0x10
#define JZ4740_GPIO_PULL_DIS 0x30
#define JZ4740_GPIO_FUNC 0x40
@@ -33,7 +37,6 @@
#define JZ4740_GPIO_FLAG 0x80
#define JZ4770_GPIO_INT 0x10
-#define JZ4770_GPIO_MSK 0x20
#define JZ4770_GPIO_PAT1 0x30
#define JZ4770_GPIO_PAT0 0x40
#define JZ4770_GPIO_FLAG 0x50
@@ -46,6 +49,7 @@
enum jz_version {
ID_JZ4740,
+ ID_JZ4725B,
ID_JZ4770,
ID_JZ4780,
};
@@ -72,6 +76,13 @@ struct ingenic_pinctrl {
const struct ingenic_chip_info *info;
};
+struct ingenic_gpio_chip {
+ struct ingenic_pinctrl *jzpc;
+ struct gpio_chip gc;
+ struct irq_chip irq_chip;
+ unsigned int irq, reg_base;
+};
+
static const u32 jz4740_pull_ups[4] = {
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
};
@@ -205,6 +216,99 @@ static const struct ingenic_chip_info jz4740_chip_info = {
.pull_downs = jz4740_pull_downs,
};
+static int jz4725b_mmc0_1bit_pins[] = { 0x48, 0x49, 0x5c, };
+static int jz4725b_mmc0_4bit_pins[] = { 0x5d, 0x5b, 0x56, };
+static int jz4725b_mmc1_1bit_pins[] = { 0x7a, 0x7b, 0x7c, };
+static int jz4725b_mmc1_4bit_pins[] = { 0x7d, 0x7e, 0x7f, };
+static int jz4725b_uart_data_pins[] = { 0x4c, 0x4d, };
+static int jz4725b_nand_cs1_pins[] = { 0x55, };
+static int jz4725b_nand_cs2_pins[] = { 0x56, };
+static int jz4725b_nand_cs3_pins[] = { 0x57, };
+static int jz4725b_nand_cs4_pins[] = { 0x58, };
+static int jz4725b_nand_cle_ale_pins[] = { 0x48, 0x49 };
+static int jz4725b_nand_fre_fwe_pins[] = { 0x5c, 0x5d };
+static int jz4725b_pwm_pwm0_pins[] = { 0x4a, };
+static int jz4725b_pwm_pwm1_pins[] = { 0x4b, };
+static int jz4725b_pwm_pwm2_pins[] = { 0x4c, };
+static int jz4725b_pwm_pwm3_pins[] = { 0x4d, };
+static int jz4725b_pwm_pwm4_pins[] = { 0x4e, };
+static int jz4725b_pwm_pwm5_pins[] = { 0x4f, };
+
+static int jz4725b_mmc0_1bit_funcs[] = { 1, 1, 1, };
+static int jz4725b_mmc0_4bit_funcs[] = { 1, 0, 1, };
+static int jz4725b_mmc1_1bit_funcs[] = { 0, 0, 0, };
+static int jz4725b_mmc1_4bit_funcs[] = { 0, 0, 0, };
+static int jz4725b_uart_data_funcs[] = { 1, 1, };
+static int jz4725b_nand_cs1_funcs[] = { 0, };
+static int jz4725b_nand_cs2_funcs[] = { 0, };
+static int jz4725b_nand_cs3_funcs[] = { 0, };
+static int jz4725b_nand_cs4_funcs[] = { 0, };
+static int jz4725b_nand_cle_ale_funcs[] = { 0, 0, };
+static int jz4725b_nand_fre_fwe_funcs[] = { 0, 0, };
+static int jz4725b_pwm_pwm0_funcs[] = { 0, };
+static int jz4725b_pwm_pwm1_funcs[] = { 0, };
+static int jz4725b_pwm_pwm2_funcs[] = { 0, };
+static int jz4725b_pwm_pwm3_funcs[] = { 0, };
+static int jz4725b_pwm_pwm4_funcs[] = { 0, };
+static int jz4725b_pwm_pwm5_funcs[] = { 0, };
+
+static const struct group_desc jz4725b_groups[] = {
+ INGENIC_PIN_GROUP("mmc0-1bit", jz4725b_mmc0_1bit),
+ INGENIC_PIN_GROUP("mmc0-4bit", jz4725b_mmc0_4bit),
+ INGENIC_PIN_GROUP("mmc1-1bit", jz4725b_mmc1_1bit),
+ INGENIC_PIN_GROUP("mmc1-4bit", jz4725b_mmc1_4bit),
+ INGENIC_PIN_GROUP("uart-data", jz4725b_uart_data),
+ INGENIC_PIN_GROUP("nand-cs1", jz4725b_nand_cs1),
+ INGENIC_PIN_GROUP("nand-cs2", jz4725b_nand_cs2),
+ INGENIC_PIN_GROUP("nand-cs3", jz4725b_nand_cs3),
+ INGENIC_PIN_GROUP("nand-cs4", jz4725b_nand_cs4),
+ INGENIC_PIN_GROUP("nand-cle-ale", jz4725b_nand_cle_ale),
+ INGENIC_PIN_GROUP("nand-fre-fwe", jz4725b_nand_fre_fwe),
+ INGENIC_PIN_GROUP("pwm0", jz4725b_pwm_pwm0),
+ INGENIC_PIN_GROUP("pwm1", jz4725b_pwm_pwm1),
+ INGENIC_PIN_GROUP("pwm2", jz4725b_pwm_pwm2),
+ INGENIC_PIN_GROUP("pwm3", jz4725b_pwm_pwm3),
+ INGENIC_PIN_GROUP("pwm4", jz4725b_pwm_pwm4),
+ INGENIC_PIN_GROUP("pwm5", jz4725b_pwm_pwm5),
+};
+
+static const char *jz4725b_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", };
+static const char *jz4725b_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
+static const char *jz4725b_uart_groups[] = { "uart-data", };
+static const char *jz4725b_nand_groups[] = {
+ "nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4",
+ "nand-cle-ale", "nand-fre-fwe",
+};
+static const char *jz4725b_pwm0_groups[] = { "pwm0", };
+static const char *jz4725b_pwm1_groups[] = { "pwm1", };
+static const char *jz4725b_pwm2_groups[] = { "pwm2", };
+static const char *jz4725b_pwm3_groups[] = { "pwm3", };
+static const char *jz4725b_pwm4_groups[] = { "pwm4", };
+static const char *jz4725b_pwm5_groups[] = { "pwm5", };
+
+static const struct function_desc jz4725b_functions[] = {
+ { "mmc0", jz4725b_mmc0_groups, ARRAY_SIZE(jz4725b_mmc0_groups), },
+ { "mmc1", jz4725b_mmc1_groups, ARRAY_SIZE(jz4725b_mmc1_groups), },
+ { "uart", jz4725b_uart_groups, ARRAY_SIZE(jz4725b_uart_groups), },
+ { "nand", jz4725b_nand_groups, ARRAY_SIZE(jz4725b_nand_groups), },
+ { "pwm0", jz4725b_pwm0_groups, ARRAY_SIZE(jz4725b_pwm0_groups), },
+ { "pwm1", jz4725b_pwm1_groups, ARRAY_SIZE(jz4725b_pwm1_groups), },
+ { "pwm2", jz4725b_pwm2_groups, ARRAY_SIZE(jz4725b_pwm2_groups), },
+ { "pwm3", jz4725b_pwm3_groups, ARRAY_SIZE(jz4725b_pwm3_groups), },
+ { "pwm4", jz4725b_pwm4_groups, ARRAY_SIZE(jz4725b_pwm4_groups), },
+ { "pwm5", jz4725b_pwm5_groups, ARRAY_SIZE(jz4725b_pwm5_groups), },
+};
+
+static const struct ingenic_chip_info jz4725b_chip_info = {
+ .num_chips = 4,
+ .groups = jz4725b_groups,
+ .num_groups = ARRAY_SIZE(jz4725b_groups),
+ .functions = jz4725b_functions,
+ .num_functions = ARRAY_SIZE(jz4725b_functions),
+ .pull_ups = jz4740_pull_ups,
+ .pull_downs = jz4740_pull_downs,
+};
+
static const u32 jz4770_pull_ups[6] = {
0x3fffffff, 0xfff0030c, 0xffffffff, 0xffff4fff, 0xfffffb7c, 0xffa7f00f,
};
@@ -438,6 +542,235 @@ static const struct ingenic_chip_info jz4770_chip_info = {
.pull_downs = jz4770_pull_downs,
};
+static u32 gpio_ingenic_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
+{
+ unsigned int val;
+
+ regmap_read(jzgc->jzpc->map, jzgc->reg_base + reg, &val);
+
+ return (u32) val;
+}
+
+static void gpio_ingenic_set_bit(struct ingenic_gpio_chip *jzgc,
+ u8 reg, u8 offset, bool set)
+{
+ if (set)
+ reg = REG_SET(reg);
+ else
+ reg = REG_CLEAR(reg);
+
+ regmap_write(jzgc->jzpc->map, jzgc->reg_base + reg, BIT(offset));
+}
+
+static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
+ u8 offset)
+{
+ unsigned int val = gpio_ingenic_read_reg(jzgc, GPIO_PIN);
+
+ return !!(val & BIT(offset));
+}
+
+static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
+ u8 offset, int value)
+{
+ if (jzgc->jzpc->version >= ID_JZ4770)
+ gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
+ else
+ gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
+}
+
+static void irq_set_type(struct ingenic_gpio_chip *jzgc,
+ u8 offset, unsigned int type)
+{
+ u8 reg1, reg2;
+
+ if (jzgc->jzpc->version >= ID_JZ4770) {
+ reg1 = JZ4770_GPIO_PAT1;
+ reg2 = JZ4770_GPIO_PAT0;
+ } else {
+ reg1 = JZ4740_GPIO_TRIG;
+ reg2 = JZ4740_GPIO_DIR;
+ }
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ gpio_ingenic_set_bit(jzgc, reg2, offset, true);
+ gpio_ingenic_set_bit(jzgc, reg1, offset, true);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ gpio_ingenic_set_bit(jzgc, reg2, offset, false);
+ gpio_ingenic_set_bit(jzgc, reg1, offset, true);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ gpio_ingenic_set_bit(jzgc, reg2, offset, true);
+ gpio_ingenic_set_bit(jzgc, reg1, offset, false);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ default:
+ gpio_ingenic_set_bit(jzgc, reg2, offset, false);
+ gpio_ingenic_set_bit(jzgc, reg1, offset, false);
+ break;
+ }
+}
+
+static void ingenic_gpio_irq_mask(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+
+ gpio_ingenic_set_bit(jzgc, GPIO_MSK, irqd->hwirq, true);
+}
+
+static void ingenic_gpio_irq_unmask(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+
+ gpio_ingenic_set_bit(jzgc, GPIO_MSK, irqd->hwirq, false);
+}
+
+static void ingenic_gpio_irq_enable(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ int irq = irqd->hwirq;
+
+ if (jzgc->jzpc->version >= ID_JZ4770)
+ gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
+ else
+ gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
+
+ ingenic_gpio_irq_unmask(irqd);
+}
+
+static void ingenic_gpio_irq_disable(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ int irq = irqd->hwirq;
+
+ ingenic_gpio_irq_mask(irqd);
+
+ if (jzgc->jzpc->version >= ID_JZ4770)
+ gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
+ else
+ gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
+}
+
+static void ingenic_gpio_irq_ack(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ int irq = irqd->hwirq;
+ bool high;
+
+ if (irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) {
+ /*
+ * Switch to an interrupt for the opposite edge to the one that
+ * triggered the interrupt being ACKed.
+ */
+ high = ingenic_gpio_get_value(jzgc, irq);
+ if (high)
+ irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_FALLING);
+ else
+ irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING);
+ }
+
+ if (jzgc->jzpc->version >= ID_JZ4770)
+ gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
+ else
+ gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
+}
+
+static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_set_handler_locked(irqd, handle_edge_irq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(irqd, handle_level_irq);
+ break;
+ default:
+ irq_set_handler_locked(irqd, handle_bad_irq);
+ }
+
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ /*
+ * The hardware does not support interrupts on both edges. The
+ * best we can do is to set up a single-edge interrupt and then
+ * switch to the opposing edge when ACKing the interrupt.
+ */
+ bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq);
+
+ type = high ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
+ }
+
+ irq_set_type(jzgc, irqd->hwirq, type);
+ return 0;
+}
+
+static int ingenic_gpio_irq_set_wake(struct irq_data *irqd, unsigned int on)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+
+ return irq_set_irq_wake(jzgc->irq, on);
+}
+
+static void ingenic_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ struct irq_chip *irq_chip = irq_data_get_irq_chip(&desc->irq_data);
+ unsigned long flag, i;
+
+ chained_irq_enter(irq_chip, desc);
+
+ if (jzgc->jzpc->version >= ID_JZ4770)
+ flag = gpio_ingenic_read_reg(jzgc, JZ4770_GPIO_FLAG);
+ else
+ flag = gpio_ingenic_read_reg(jzgc, JZ4740_GPIO_FLAG);
+
+ for_each_set_bit(i, &flag, 32)
+ generic_handle_irq(irq_linear_revmap(gc->irq.domain, i));
+ chained_irq_exit(irq_chip, desc);
+}
+
+static void ingenic_gpio_set(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+
+ ingenic_gpio_set_value(jzgc, offset, value);
+}
+
+static int ingenic_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+
+ return (int) ingenic_gpio_get_value(jzgc, offset);
+}
+
+static int ingenic_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ return pinctrl_gpio_direction_input(gc->base + offset);
+}
+
+static int ingenic_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ ingenic_gpio_set(gc, offset, value);
+ return pinctrl_gpio_direction_output(gc->base + offset);
+}
+
static inline void ingenic_config_pin(struct ingenic_pinctrl *jzpc,
unsigned int pin, u8 reg, bool set)
{
@@ -460,6 +793,21 @@ static inline bool ingenic_get_pin_config(struct ingenic_pinctrl *jzpc,
return val & BIT(idx);
}
+static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ struct ingenic_pinctrl *jzpc = jzgc->jzpc;
+ unsigned int pin = gc->base + offset;
+
+ if (jzpc->version >= ID_JZ4770)
+ return ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1);
+
+ if (ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_SELECT))
+ return true;
+
+ return !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_DIR);
+}
+
static const struct pinctrl_ops ingenic_pctlops = {
.get_groups_count = pinctrl_generic_get_group_count,
.get_group_name = pinctrl_generic_get_group_name,
@@ -479,7 +827,7 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
if (jzpc->version >= ID_JZ4770) {
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
- ingenic_config_pin(jzpc, pin, JZ4770_GPIO_MSK, false);
+ ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
} else {
@@ -532,7 +880,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
if (jzpc->version >= ID_JZ4770) {
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
- ingenic_config_pin(jzpc, pin, JZ4770_GPIO_MSK, true);
+ ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
@@ -712,12 +1060,95 @@ static const struct regmap_config ingenic_pinctrl_regmap_config = {
static const struct of_device_id ingenic_pinctrl_of_match[] = {
{ .compatible = "ingenic,jz4740-pinctrl", .data = (void *) ID_JZ4740 },
+ { .compatible = "ingenic,jz4725b-pinctrl", .data = (void *)ID_JZ4725B },
{ .compatible = "ingenic,jz4770-pinctrl", .data = (void *) ID_JZ4770 },
{ .compatible = "ingenic,jz4780-pinctrl", .data = (void *) ID_JZ4780 },
{},
};
-static int ingenic_pinctrl_probe(struct platform_device *pdev)
+static const struct of_device_id ingenic_gpio_of_match[] __initconst = {
+ { .compatible = "ingenic,jz4740-gpio", },
+ { .compatible = "ingenic,jz4770-gpio", },
+ { .compatible = "ingenic,jz4780-gpio", },
+ {},
+};
+
+static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
+ struct device_node *node)
+{
+ struct ingenic_gpio_chip *jzgc;
+ struct device *dev = jzpc->dev;
+ unsigned int bank;
+ int err;
+
+ err = of_property_read_u32(node, "reg", &bank);
+ if (err) {
+ dev_err(dev, "Cannot read \"reg\" property: %i\n", err);
+ return err;
+ }
+
+ jzgc = devm_kzalloc(dev, sizeof(*jzgc), GFP_KERNEL);
+ if (!jzgc)
+ return -ENOMEM;
+
+ jzgc->jzpc = jzpc;
+ jzgc->reg_base = bank * 0x100;
+
+ jzgc->gc.label = devm_kasprintf(dev, GFP_KERNEL, "GPIO%c", 'A' + bank);
+ if (!jzgc->gc.label)
+ return -ENOMEM;
+
+ /* DO NOT EXPAND THIS: FOR BACKWARD GPIO NUMBERSPACE COMPATIBIBILITY
+ * ONLY: WORK TO TRANSITION CONSUMERS TO USE THE GPIO DESCRIPTOR API IN
+ * <linux/gpio/consumer.h> INSTEAD.
+ */
+ jzgc->gc.base = bank * 32;
+
+ jzgc->gc.ngpio = 32;
+ jzgc->gc.parent = dev;
+ jzgc->gc.of_node = node;
+ jzgc->gc.owner = THIS_MODULE;
+
+ jzgc->gc.set = ingenic_gpio_set;
+ jzgc->gc.get = ingenic_gpio_get;
+ jzgc->gc.direction_input = ingenic_gpio_direction_input;
+ jzgc->gc.direction_output = ingenic_gpio_direction_output;
+ jzgc->gc.get_direction = ingenic_gpio_get_direction;
+
+ if (of_property_read_bool(node, "gpio-ranges")) {
+ jzgc->gc.request = gpiochip_generic_request;
+ jzgc->gc.free = gpiochip_generic_free;
+ }
+
+ err = devm_gpiochip_add_data(dev, &jzgc->gc, jzgc);
+ if (err)
+ return err;
+
+ jzgc->irq = irq_of_parse_and_map(node, 0);
+ if (!jzgc->irq)
+ return -EINVAL;
+
+ jzgc->irq_chip.name = jzgc->gc.label;
+ jzgc->irq_chip.irq_enable = ingenic_gpio_irq_enable;
+ jzgc->irq_chip.irq_disable = ingenic_gpio_irq_disable;
+ jzgc->irq_chip.irq_unmask = ingenic_gpio_irq_unmask;
+ jzgc->irq_chip.irq_mask = ingenic_gpio_irq_mask;
+ jzgc->irq_chip.irq_ack = ingenic_gpio_irq_ack;
+ jzgc->irq_chip.irq_set_type = ingenic_gpio_irq_set_type;
+ jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
+ jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+ err = gpiochip_irqchip_add(&jzgc->gc, &jzgc->irq_chip, 0,
+ handle_level_irq, IRQ_TYPE_NONE);
+ if (err)
+ return err;
+
+ gpiochip_set_chained_irqchip(&jzgc->gc, &jzgc->irq_chip,
+ jzgc->irq, ingenic_gpio_irq_handler);
+ return 0;
+}
+
+static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ingenic_pinctrl *jzpc;
@@ -727,6 +1158,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
const struct of_device_id *of_id = of_match_device(
ingenic_pinctrl_of_match, dev);
const struct ingenic_chip_info *chip_info;
+ struct device_node *node;
unsigned int i;
int err;
@@ -755,6 +1187,8 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
if (jzpc->version >= ID_JZ4770)
chip_info = &jz4770_chip_info;
+ else if (jzpc->version >= ID_JZ4725B)
+ chip_info = &jz4725b_chip_info;
else
chip_info = &jz4740_chip_info;
jzpc->info = chip_info;
@@ -815,11 +1249,11 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
dev_set_drvdata(dev, jzpc->map);
- if (dev->of_node) {
- err = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (err) {
- dev_err(dev, "Failed to probe GPIO devices\n");
- return err;
+ for_each_child_of_node(dev->of_node, node) {
+ if (of_match_node(ingenic_gpio_of_match, node)) {
+ err = ingenic_gpio_probe(jzpc, node);
+ if (err)
+ return err;
}
}
@@ -828,6 +1262,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
static const struct platform_device_id ingenic_pinctrl_ids[] = {
{ "jz4740-pinctrl", ID_JZ4740 },
+ { "jz4725b-pinctrl", ID_JZ4725B },
{ "jz4770-pinctrl", ID_JZ4770 },
{ "jz4780-pinctrl", ID_JZ4780 },
{},
@@ -837,14 +1272,13 @@ static struct platform_driver ingenic_pinctrl_driver = {
.driver = {
.name = "pinctrl-ingenic",
.of_match_table = of_match_ptr(ingenic_pinctrl_of_match),
- .suppress_bind_attrs = true,
},
- .probe = ingenic_pinctrl_probe,
.id_table = ingenic_pinctrl_ids,
};
static int __init ingenic_pinctrl_drv_register(void)
{
- return platform_driver_register(&ingenic_pinctrl_driver);
+ return platform_driver_probe(&ingenic_pinctrl_driver,
+ ingenic_pinctrl_probe);
}
-postcore_initcall(ingenic_pinctrl_drv_register);
+subsys_initcall(ingenic_pinctrl_drv_register);
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c
index 81632af3a86a..22e80613e269 100644
--- a/drivers/pinctrl/pinctrl-lantiq.c
+++ b/drivers/pinctrl/pinctrl-lantiq.c
@@ -80,14 +80,14 @@ static void ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
int ret, i;
if (!pins && !groups) {
- dev_err(pctldev->dev, "%s defines neither pins nor groups\n",
- np->name);
+ dev_err(pctldev->dev, "%pOFn defines neither pins nor groups\n",
+ np);
return;
}
if (pins && groups) {
- dev_err(pctldev->dev, "%s defines both pins and groups\n",
- np->name);
+ dev_err(pctldev->dev, "%pOFn defines both pins and groups\n",
+ np);
return;
}
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index 190f17e4bbda..a14bc5e5fc24 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -844,8 +844,11 @@ static int lpc18xx_pconf_get_pin(struct pinctrl_dev *pctldev, unsigned param,
*arg = (reg & LPC18XX_SCU_PIN_EHD_MASK) >> LPC18XX_SCU_PIN_EHD_POS;
switch (*arg) {
case 3: *arg += 5;
+ /* fall through */
case 2: *arg += 5;
+ /* fall through */
case 1: *arg += 3;
+ /* fall through */
case 0: *arg += 4;
}
break;
@@ -1060,8 +1063,11 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev, unsigned param,
switch (param_val) {
case 20: param_val -= 5;
+ /* fall through */
case 14: param_val -= 5;
+ /* fall through */
case 8: param_val -= 3;
+ /* fall through */
case 4: param_val -= 4;
break;
default:
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 4a8a8efadefa..b03481ef99a1 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -4,7 +4,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/spi/mcp23s08.h>
@@ -636,6 +636,14 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
return err;
}
+ return 0;
+}
+
+static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
+{
+ struct gpio_chip *chip = &mcp->chip;
+ int err;
+
err = gpiochip_irqchip_add_nested(chip,
&mcp23s08_irq_chip,
0,
@@ -912,7 +920,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
}
if (mcp->irq && mcp->irq_controller) {
- ret = mcp23s08_irq_setup(mcp);
+ ret = mcp23s08_irqchip_setup(mcp);
if (ret)
goto fail;
}
@@ -944,6 +952,9 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
goto fail;
}
+ if (mcp->irq)
+ ret = mcp23s08_irq_setup(mcp);
+
fail:
if (ret < 0)
dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 302190d1558d..aa5f949ef219 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -9,7 +9,6 @@
* version 2, as published by the Free Software Foundation.
*/
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index f4a61429e06e..95e4a06de019 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -27,7 +27,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/bitops.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/machine.h>
@@ -501,8 +501,8 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
*/
grp = pinctrl_name_to_group(info, np->name);
if (!grp) {
- dev_err(info->dev, "unable to find group for node %s\n",
- np->name);
+ dev_err(info->dev, "unable to find group for node %pOFn\n",
+ np);
return -EINVAL;
}
@@ -2454,7 +2454,7 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
int i, j;
int ret;
- dev_dbg(info->dev, "group(%d): %s\n", index, np->name);
+ dev_dbg(info->dev, "group(%d): %pOFn\n", index, np);
/* Initialise group */
grp->name = np->name;
@@ -2519,7 +2519,7 @@ static int rockchip_pinctrl_parse_functions(struct device_node *np,
static u32 grp_index;
u32 i = 0;
- dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name);
+ dev_dbg(info->dev, "parse function(%d): %pOFn\n", index, np);
func = &info->functions[index];
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index f76edf664539..14eb576c04a2 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Combined GPIO and pin controller support for Renesas RZ/A1 (r7s72100) SoC
*
* Copyright (C) 2017 Jacopo Mondi
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
/*
@@ -930,8 +927,8 @@ static int rza1_parse_pinmux_node(struct rza1_pinctrl *rza1_pctl,
&npin_configs);
if (ret) {
dev_err(rza1_pctl->dev,
- "Unable to parse pin configuration options for %s\n",
- np->name);
+ "Unable to parse pin configuration options for %pOFn\n",
+ np);
return ret;
}
@@ -1226,8 +1223,8 @@ static int rza1_parse_gpiochip(struct rza1_pinctrl *rza1_pctl,
*chip = rza1_gpiochip_template;
chip->base = -1;
- chip->label = devm_kasprintf(rza1_pctl->dev, GFP_KERNEL, "%s",
- np->name);
+ chip->label = devm_kasprintf(rza1_pctl->dev, GFP_KERNEL, "%pOFn",
+ np);
chip->ngpio = of_args.args[2];
chip->of_node = np;
chip->parent = rza1_pctl->dev;
@@ -1287,7 +1284,7 @@ static int rza1_gpio_register(struct rza1_pinctrl *rza1_pctl)
ret = rza1_parse_gpiochip(rza1_pctl, child, &gpio_chips[i],
&gpio_ranges[i]);
if (ret)
- goto gpiochip_remove;
+ return ret;
++i;
}
@@ -1295,12 +1292,6 @@ static int rza1_gpio_register(struct rza1_pinctrl *rza1_pctl)
dev_info(rza1_pctl->dev, "Registered %u gpio controllers\n", i);
return 0;
-
-gpiochip_remove:
- for (; i > 0; i--)
- devm_gpiochip_remove(rza1_pctl->dev, &gpio_chips[i - 1]);
-
- return ret;
}
/**
diff --git a/drivers/pinctrl/pinctrl-rzn1.c b/drivers/pinctrl/pinctrl-rzn1.c
new file mode 100644
index 000000000000..57886dcff53d
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-rzn1.c
@@ -0,0 +1,947 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2018 Renesas Electronics Europe Limited
+ *
+ * Phil Edworthy <phil.edworthy@renesas.com>
+ * Based on a driver originally written by Michel Pollet at Renesas.
+ */
+
+#include <dt-bindings/pinctrl/rzn1-pinctrl.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "core.h"
+#include "pinconf.h"
+#include "pinctrl-utils.h"
+
+/* Field positions and masks in the pinmux registers */
+#define RZN1_L1_PIN_DRIVE_STRENGTH 10
+#define RZN1_L1_PIN_DRIVE_STRENGTH_4MA 0
+#define RZN1_L1_PIN_DRIVE_STRENGTH_6MA 1
+#define RZN1_L1_PIN_DRIVE_STRENGTH_8MA 2
+#define RZN1_L1_PIN_DRIVE_STRENGTH_12MA 3
+#define RZN1_L1_PIN_PULL 8
+#define RZN1_L1_PIN_PULL_NONE 0
+#define RZN1_L1_PIN_PULL_UP 1
+#define RZN1_L1_PIN_PULL_DOWN 3
+#define RZN1_L1_FUNCTION 0
+#define RZN1_L1_FUNC_MASK 0xf
+#define RZN1_L1_FUNCTION_L2 0xf
+
+/*
+ * The hardware manual describes two levels of multiplexing, but it's more
+ * logical to think of the hardware as three levels, with level 3 consisting of
+ * the multiplexing for Ethernet MDIO signals.
+ *
+ * Level 1 functions go from 0 to 9, with level 1 function '15' (0xf) specifying
+ * that level 2 functions are used instead. Level 2 has a lot more options,
+ * going from 0 to 61. Level 3 allows selection of MDIO functions which can be
+ * floating, or one of seven internal peripherals. Unfortunately, there are two
+ * level 2 functions that can select MDIO, and two MDIO channels so we have four
+ * sets of level 3 functions.
+ *
+ * For this driver, we've compounded the numbers together, so:
+ * 0 to 9 is level 1
+ * 10 to 71 is 10 + level 2 number
+ * 72 to 79 is 72 + MDIO0 source for level 2 MDIO function.
+ * 80 to 87 is 80 + MDIO0 source for level 2 MDIO_E1 function.
+ * 88 to 95 is 88 + MDIO1 source for level 2 MDIO function.
+ * 96 to 103 is 96 + MDIO1 source for level 2 MDIO_E1 function.
+ * Examples:
+ * Function 28 corresponds UART0
+ * Function 73 corresponds to MDIO0 to GMAC0
+ *
+ * There are 170 configurable pins (called PL_GPIO in the datasheet).
+ */
+
+/*
+ * Structure detailing the HW registers on the RZ/N1 devices.
+ * Both the Level 1 mux registers and Level 2 mux registers have the same
+ * structure. The only difference is that Level 2 has additional MDIO registers
+ * at the end.
+ */
+struct rzn1_pinctrl_regs {
+ u32 conf[170];
+ u32 pad0[86];
+ u32 status_protect; /* 0x400 */
+ /* MDIO mux registers, level2 only */
+ u32 l2_mdio[2];
+};
+
+/**
+ * struct rzn1_pmx_func - describes rzn1 pinmux functions
+ * @name: the name of this specific function
+ * @groups: corresponding pin groups
+ * @num_groups: the number of groups
+ */
+struct rzn1_pmx_func {
+ const char *name;
+ const char **groups;
+ unsigned int num_groups;
+};
+
+/**
+ * struct rzn1_pin_group - describes an rzn1 pin group
+ * @name: the name of this specific pin group
+ * @func: the name of the function selected by this group
+ * @npins: the number of pins in this group array, i.e. the number of
+ * elements in .pins so we can iterate over that array
+ * @pins: array of pins. Needed due to pinctrl_ops.get_group_pins()
+ * @pin_ids: array of pin_ids, i.e. the value used to select the mux
+ */
+struct rzn1_pin_group {
+ const char *name;
+ const char *func;
+ unsigned int npins;
+ unsigned int *pins;
+ u8 *pin_ids;
+};
+
+struct rzn1_pinctrl {
+ struct device *dev;
+ struct clk *clk;
+ struct pinctrl_dev *pctl;
+ struct rzn1_pinctrl_regs __iomem *lev1;
+ struct rzn1_pinctrl_regs __iomem *lev2;
+ u32 lev1_protect_phys;
+ u32 lev2_protect_phys;
+ u32 mdio_func[2];
+
+ struct rzn1_pin_group *groups;
+ unsigned int ngroups;
+
+ struct rzn1_pmx_func *functions;
+ unsigned int nfunctions;
+};
+
+#define RZN1_PINS_PROP "pinmux"
+
+#define RZN1_PIN(pin) PINCTRL_PIN(pin, "pl_gpio"#pin)
+
+static const struct pinctrl_pin_desc rzn1_pins[] = {
+ RZN1_PIN(0), RZN1_PIN(1), RZN1_PIN(2), RZN1_PIN(3), RZN1_PIN(4),
+ RZN1_PIN(5), RZN1_PIN(6), RZN1_PIN(7), RZN1_PIN(8), RZN1_PIN(9),
+ RZN1_PIN(10), RZN1_PIN(11), RZN1_PIN(12), RZN1_PIN(13), RZN1_PIN(14),
+ RZN1_PIN(15), RZN1_PIN(16), RZN1_PIN(17), RZN1_PIN(18), RZN1_PIN(19),
+ RZN1_PIN(20), RZN1_PIN(21), RZN1_PIN(22), RZN1_PIN(23), RZN1_PIN(24),
+ RZN1_PIN(25), RZN1_PIN(26), RZN1_PIN(27), RZN1_PIN(28), RZN1_PIN(29),
+ RZN1_PIN(30), RZN1_PIN(31), RZN1_PIN(32), RZN1_PIN(33), RZN1_PIN(34),
+ RZN1_PIN(35), RZN1_PIN(36), RZN1_PIN(37), RZN1_PIN(38), RZN1_PIN(39),
+ RZN1_PIN(40), RZN1_PIN(41), RZN1_PIN(42), RZN1_PIN(43), RZN1_PIN(44),
+ RZN1_PIN(45), RZN1_PIN(46), RZN1_PIN(47), RZN1_PIN(48), RZN1_PIN(49),
+ RZN1_PIN(50), RZN1_PIN(51), RZN1_PIN(52), RZN1_PIN(53), RZN1_PIN(54),
+ RZN1_PIN(55), RZN1_PIN(56), RZN1_PIN(57), RZN1_PIN(58), RZN1_PIN(59),
+ RZN1_PIN(60), RZN1_PIN(61), RZN1_PIN(62), RZN1_PIN(63), RZN1_PIN(64),
+ RZN1_PIN(65), RZN1_PIN(66), RZN1_PIN(67), RZN1_PIN(68), RZN1_PIN(69),
+ RZN1_PIN(70), RZN1_PIN(71), RZN1_PIN(72), RZN1_PIN(73), RZN1_PIN(74),
+ RZN1_PIN(75), RZN1_PIN(76), RZN1_PIN(77), RZN1_PIN(78), RZN1_PIN(79),
+ RZN1_PIN(80), RZN1_PIN(81), RZN1_PIN(82), RZN1_PIN(83), RZN1_PIN(84),
+ RZN1_PIN(85), RZN1_PIN(86), RZN1_PIN(87), RZN1_PIN(88), RZN1_PIN(89),
+ RZN1_PIN(90), RZN1_PIN(91), RZN1_PIN(92), RZN1_PIN(93), RZN1_PIN(94),
+ RZN1_PIN(95), RZN1_PIN(96), RZN1_PIN(97), RZN1_PIN(98), RZN1_PIN(99),
+ RZN1_PIN(100), RZN1_PIN(101), RZN1_PIN(102), RZN1_PIN(103),
+ RZN1_PIN(104), RZN1_PIN(105), RZN1_PIN(106), RZN1_PIN(107),
+ RZN1_PIN(108), RZN1_PIN(109), RZN1_PIN(110), RZN1_PIN(111),
+ RZN1_PIN(112), RZN1_PIN(113), RZN1_PIN(114), RZN1_PIN(115),
+ RZN1_PIN(116), RZN1_PIN(117), RZN1_PIN(118), RZN1_PIN(119),
+ RZN1_PIN(120), RZN1_PIN(121), RZN1_PIN(122), RZN1_PIN(123),
+ RZN1_PIN(124), RZN1_PIN(125), RZN1_PIN(126), RZN1_PIN(127),
+ RZN1_PIN(128), RZN1_PIN(129), RZN1_PIN(130), RZN1_PIN(131),
+ RZN1_PIN(132), RZN1_PIN(133), RZN1_PIN(134), RZN1_PIN(135),
+ RZN1_PIN(136), RZN1_PIN(137), RZN1_PIN(138), RZN1_PIN(139),
+ RZN1_PIN(140), RZN1_PIN(141), RZN1_PIN(142), RZN1_PIN(143),
+ RZN1_PIN(144), RZN1_PIN(145), RZN1_PIN(146), RZN1_PIN(147),
+ RZN1_PIN(148), RZN1_PIN(149), RZN1_PIN(150), RZN1_PIN(151),
+ RZN1_PIN(152), RZN1_PIN(153), RZN1_PIN(154), RZN1_PIN(155),
+ RZN1_PIN(156), RZN1_PIN(157), RZN1_PIN(158), RZN1_PIN(159),
+ RZN1_PIN(160), RZN1_PIN(161), RZN1_PIN(162), RZN1_PIN(163),
+ RZN1_PIN(164), RZN1_PIN(165), RZN1_PIN(166), RZN1_PIN(167),
+ RZN1_PIN(168), RZN1_PIN(169),
+};
+
+enum {
+ LOCK_LEVEL1 = 0x1,
+ LOCK_LEVEL2 = 0x2,
+ LOCK_ALL = LOCK_LEVEL1 | LOCK_LEVEL2,
+};
+
+static void rzn1_hw_set_lock(struct rzn1_pinctrl *ipctl, u8 lock, u8 value)
+{
+ /*
+ * The pinmux configuration is locked by writing the physical address of
+ * the status_protect register to itself. It is unlocked by writing the
+ * address | 1.
+ */
+ if (lock & LOCK_LEVEL1) {
+ u32 val = ipctl->lev1_protect_phys | !(value & LOCK_LEVEL1);
+
+ writel(val, &ipctl->lev1->status_protect);
+ }
+
+ if (lock & LOCK_LEVEL2) {
+ u32 val = ipctl->lev2_protect_phys | !(value & LOCK_LEVEL2);
+
+ writel(val, &ipctl->lev2->status_protect);
+ }
+}
+
+static void rzn1_pinctrl_mdio_select(struct rzn1_pinctrl *ipctl, int mdio,
+ u32 func)
+{
+ if (ipctl->mdio_func[mdio] >= 0 && ipctl->mdio_func[mdio] != func)
+ dev_warn(ipctl->dev, "conflicting setting for mdio%d!\n", mdio);
+ ipctl->mdio_func[mdio] = func;
+
+ dev_dbg(ipctl->dev, "setting mdio%d to %u\n", mdio, func);
+
+ writel(func, &ipctl->lev2->l2_mdio[mdio]);
+}
+
+/*
+ * Using a composite pin description, set the hardware pinmux registers
+ * with the corresponding values.
+ * Make sure to unlock write protection and reset it afterward.
+ *
+ * NOTE: There is no protection for potential concurrency, it is assumed these
+ * calls are serialized already.
+ */
+static int rzn1_set_hw_pin_func(struct rzn1_pinctrl *ipctl, unsigned int pin,
+ u32 pin_config, u8 use_locks)
+{
+ u32 l1_cache;
+ u32 l2_cache;
+ u32 l1;
+ u32 l2;
+
+ /* Level 3 MDIO multiplexing */
+ if (pin_config >= RZN1_FUNC_MDIO0_HIGHZ &&
+ pin_config <= RZN1_FUNC_MDIO1_E1_SWITCH) {
+ int mdio_channel;
+ u32 mdio_func;
+
+ if (pin_config <= RZN1_FUNC_MDIO1_HIGHZ)
+ mdio_channel = 0;
+ else
+ mdio_channel = 1;
+
+ /* Get MDIO func, and convert the func to the level 2 number */
+ if (pin_config <= RZN1_FUNC_MDIO0_SWITCH) {
+ mdio_func = pin_config - RZN1_FUNC_MDIO0_HIGHZ;
+ pin_config = RZN1_FUNC_ETH_MDIO;
+ } else if (pin_config <= RZN1_FUNC_MDIO0_E1_SWITCH) {
+ mdio_func = pin_config - RZN1_FUNC_MDIO0_E1_HIGHZ;
+ pin_config = RZN1_FUNC_ETH_MDIO_E1;
+ } else if (pin_config <= RZN1_FUNC_MDIO1_SWITCH) {
+ mdio_func = pin_config - RZN1_FUNC_MDIO1_HIGHZ;
+ pin_config = RZN1_FUNC_ETH_MDIO;
+ } else {
+ mdio_func = pin_config - RZN1_FUNC_MDIO1_E1_HIGHZ;
+ pin_config = RZN1_FUNC_ETH_MDIO_E1;
+ }
+ rzn1_pinctrl_mdio_select(ipctl, mdio_channel, mdio_func);
+ }
+
+ /* Note here, we do not allow anything past the MDIO Mux values */
+ if (pin >= ARRAY_SIZE(ipctl->lev1->conf) ||
+ pin_config >= RZN1_FUNC_MDIO0_HIGHZ)
+ return -EINVAL;
+
+ l1 = readl(&ipctl->lev1->conf[pin]);
+ l1_cache = l1;
+ l2 = readl(&ipctl->lev2->conf[pin]);
+ l2_cache = l2;
+
+ dev_dbg(ipctl->dev, "setting func for pin %u to %u\n", pin, pin_config);
+
+ l1 &= ~(RZN1_L1_FUNC_MASK << RZN1_L1_FUNCTION);
+
+ if (pin_config < RZN1_FUNC_L2_OFFSET) {
+ l1 |= (pin_config << RZN1_L1_FUNCTION);
+ } else {
+ l1 |= (RZN1_L1_FUNCTION_L2 << RZN1_L1_FUNCTION);
+
+ l2 = pin_config - RZN1_FUNC_L2_OFFSET;
+ }
+
+ /* If either configuration changes, we update both anyway */
+ if (l1 != l1_cache || l2 != l2_cache) {
+ writel(l1, &ipctl->lev1->conf[pin]);
+ writel(l2, &ipctl->lev2->conf[pin]);
+ }
+
+ return 0;
+}
+
+static const struct rzn1_pin_group *rzn1_pinctrl_find_group_by_name(
+ const struct rzn1_pinctrl *ipctl, const char *name)
+{
+ unsigned int i;
+
+ for (i = 0; i < ipctl->ngroups; i++) {
+ if (!strcmp(ipctl->groups[i].name, name))
+ return &ipctl->groups[i];
+ }
+
+ return NULL;
+}
+
+static int rzn1_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return ipctl->ngroups;
+}
+
+static const char *rzn1_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return ipctl->groups[selector].name;
+}
+
+static int rzn1_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector, const unsigned int **pins,
+ unsigned int *npins)
+{
+ struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+
+ if (selector >= ipctl->ngroups)
+ return -EINVAL;
+
+ *pins = ipctl->groups[selector].pins;
+ *npins = ipctl->groups[selector].npins;
+
+ return 0;
+}
+
+/*
+ * This function is called for each pinctl 'Function' node.
+ * Sub-nodes can be used to describe multiple 'Groups' for the 'Function'
+ * If there aren't any sub-nodes, the 'Group' is essentially the 'Function'.
+ * Each 'Group' uses pinmux = <...> to detail the pins and data used to select
+ * the functionality. Each 'Group' has optional pin configurations that apply
+ * to all pins in the 'Group'.
+ */
+static int rzn1_dt_node_to_map_one(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct rzn1_pin_group *grp;
+ unsigned long *configs = NULL;
+ unsigned int reserved_maps = *num_maps;
+ unsigned int num_configs = 0;
+ unsigned int reserve = 1;
+ int ret;
+
+ dev_dbg(ipctl->dev, "processing node %pOF\n", np);
+
+ grp = rzn1_pinctrl_find_group_by_name(ipctl, np->name);
+ if (!grp) {
+ dev_err(ipctl->dev, "unable to find group for node %pOF\n", np);
+
+ return -EINVAL;
+ }
+
+ /* Get the group's pin configuration */
+ ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
+ &num_configs);
+ if (ret < 0) {
+ dev_err(ipctl->dev, "%pOF: could not parse property\n", np);
+
+ return ret;
+ }
+
+ if (num_configs)
+ reserve++;
+
+ /* Increase the number of maps to cover this group */
+ ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps, num_maps,
+ reserve);
+ if (ret < 0)
+ goto out;
+
+ /* Associate the group with the function */
+ ret = pinctrl_utils_add_map_mux(pctldev, map, &reserved_maps, num_maps,
+ grp->name, grp->func);
+ if (ret < 0)
+ goto out;
+
+ if (num_configs) {
+ /* Associate the group's pin configuration with the group */
+ ret = pinctrl_utils_add_map_configs(pctldev, map,
+ &reserved_maps, num_maps, grp->name,
+ configs, num_configs,
+ PIN_MAP_TYPE_CONFIGS_GROUP);
+ if (ret < 0)
+ goto out;
+ }
+
+ dev_dbg(pctldev->dev, "maps: function %s group %s (%d pins)\n",
+ grp->func, grp->name, grp->npins);
+
+out:
+ kfree(configs);
+
+ return ret;
+}
+
+static int rzn1_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct device_node *child;
+ int ret;
+
+ *map = NULL;
+ *num_maps = 0;
+
+ ret = rzn1_dt_node_to_map_one(pctldev, np, map, num_maps);
+ if (ret < 0)
+ return ret;
+
+ for_each_child_of_node(np, child) {
+ ret = rzn1_dt_node_to_map_one(pctldev, child, map, num_maps);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinctrl_ops rzn1_pctrl_ops = {
+ .get_groups_count = rzn1_get_groups_count,
+ .get_group_name = rzn1_get_group_name,
+ .get_group_pins = rzn1_get_group_pins,
+ .dt_node_to_map = rzn1_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int rzn1_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return ipctl->nfunctions;
+}
+
+static const char *rzn1_pmx_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return ipctl->functions[selector].name;
+}
+
+static int rzn1_pmx_get_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = ipctl->functions[selector].groups;
+ *num_groups = ipctl->functions[selector].num_groups;
+
+ return 0;
+}
+
+static int rzn1_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
+ unsigned int group)
+{
+ struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ struct rzn1_pin_group *grp = &ipctl->groups[group];
+ unsigned int i, grp_pins = grp->npins;
+
+ dev_dbg(ipctl->dev, "set mux %s(%d) group %s(%d)\n",
+ ipctl->functions[selector].name, selector, grp->name, group);
+
+ rzn1_hw_set_lock(ipctl, LOCK_ALL, LOCK_ALL);
+ for (i = 0; i < grp_pins; i++)
+ rzn1_set_hw_pin_func(ipctl, grp->pins[i], grp->pin_ids[i], 0);
+ rzn1_hw_set_lock(ipctl, LOCK_ALL, 0);
+
+ return 0;
+}
+
+static const struct pinmux_ops rzn1_pmx_ops = {
+ .get_functions_count = rzn1_pmx_get_funcs_count,
+ .get_function_name = rzn1_pmx_get_func_name,
+ .get_function_groups = rzn1_pmx_get_groups,
+ .set_mux = rzn1_set_mux,
+};
+
+static int rzn1_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ const u32 reg_drive[4] = { 4, 6, 8, 12 };
+ u32 pull, drive, l1mux;
+ u32 l1, l2, arg = 0;
+
+ if (pin >= ARRAY_SIZE(ipctl->lev1->conf))
+ return -EINVAL;
+
+ l1 = readl(&ipctl->lev1->conf[pin]);
+
+ l1mux = l1 & RZN1_L1_FUNC_MASK;
+ pull = (l1 >> RZN1_L1_PIN_PULL) & 0x3;
+ drive = (l1 >> RZN1_L1_PIN_DRIVE_STRENGTH) & 0x3;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (pull != RZN1_L1_PIN_PULL_UP)
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (pull != RZN1_L1_PIN_PULL_DOWN)
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (pull != RZN1_L1_PIN_PULL_NONE)
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ arg = reg_drive[drive];
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ l2 = readl(&ipctl->lev2->conf[pin]);
+ if (l1mux == RZN1_L1_FUNCTION_L2) {
+ if (l2 != 0)
+ return -EINVAL;
+ } else if (l1mux != RZN1_FUNC_HIGHZ) {
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int rzn1_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ unsigned int i;
+ u32 l1, l1_cache;
+ u32 drv;
+ u32 arg;
+
+ if (pin >= ARRAY_SIZE(ipctl->lev1->conf))
+ return -EINVAL;
+
+ l1 = readl(&ipctl->lev1->conf[pin]);
+ l1_cache = l1;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ dev_dbg(ipctl->dev, "set pin %d pull up\n", pin);
+ l1 &= ~(0x3 << RZN1_L1_PIN_PULL);
+ l1 |= (RZN1_L1_PIN_PULL_UP << RZN1_L1_PIN_PULL);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ dev_dbg(ipctl->dev, "set pin %d pull down\n", pin);
+ l1 &= ~(0x3 << RZN1_L1_PIN_PULL);
+ l1 |= (RZN1_L1_PIN_PULL_DOWN << RZN1_L1_PIN_PULL);
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ dev_dbg(ipctl->dev, "set pin %d bias off\n", pin);
+ l1 &= ~(0x3 << RZN1_L1_PIN_PULL);
+ l1 |= (RZN1_L1_PIN_PULL_NONE << RZN1_L1_PIN_PULL);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ dev_dbg(ipctl->dev, "set pin %d drv %umA\n", pin, arg);
+ switch (arg) {
+ case 4:
+ drv = RZN1_L1_PIN_DRIVE_STRENGTH_4MA;
+ break;
+ case 6:
+ drv = RZN1_L1_PIN_DRIVE_STRENGTH_6MA;
+ break;
+ case 8:
+ drv = RZN1_L1_PIN_DRIVE_STRENGTH_8MA;
+ break;
+ case 12:
+ drv = RZN1_L1_PIN_DRIVE_STRENGTH_12MA;
+ break;
+ default:
+ dev_err(ipctl->dev,
+ "Drive strength %umA not supported\n",
+ arg);
+
+ return -EINVAL;
+ }
+
+ l1 &= ~(0x3 << RZN1_L1_PIN_DRIVE_STRENGTH);
+ l1 |= (drv << RZN1_L1_PIN_DRIVE_STRENGTH);
+ break;
+
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ dev_dbg(ipctl->dev, "set pin %d High-Z\n", pin);
+ l1 &= ~RZN1_L1_FUNC_MASK;
+ l1 |= RZN1_FUNC_HIGHZ;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ if (l1 != l1_cache) {
+ rzn1_hw_set_lock(ipctl, LOCK_LEVEL1, LOCK_LEVEL1);
+ writel(l1, &ipctl->lev1->conf[pin]);
+ rzn1_hw_set_lock(ipctl, LOCK_LEVEL1, 0);
+ }
+
+ return 0;
+}
+
+static int rzn1_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *config)
+{
+ struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ struct rzn1_pin_group *grp = &ipctl->groups[selector];
+ unsigned long old = 0;
+ unsigned int i;
+
+ dev_dbg(ipctl->dev, "group get %s selector:%u\n", grp->name, selector);
+
+ for (i = 0; i < grp->npins; i++) {
+ if (rzn1_pinconf_get(pctldev, grp->pins[i], config))
+ return -ENOTSUPP;
+
+ /* configs do not match between two pins */
+ if (i && (old != *config))
+ return -ENOTSUPP;
+
+ old = *config;
+ }
+
+ return 0;
+}
+
+static int rzn1_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ struct rzn1_pin_group *grp = &ipctl->groups[selector];
+ unsigned int i;
+ int ret;
+
+ dev_dbg(ipctl->dev, "group set %s selector:%u configs:%p/%d\n",
+ grp->name, selector, configs, num_configs);
+
+ for (i = 0; i < grp->npins; i++) {
+ unsigned int pin = grp->pins[i];
+
+ ret = rzn1_pinconf_set(pctldev, pin, configs, num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops rzn1_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = rzn1_pinconf_get,
+ .pin_config_set = rzn1_pinconf_set,
+ .pin_config_group_get = rzn1_pinconf_group_get,
+ .pin_config_group_set = rzn1_pinconf_group_set,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static struct pinctrl_desc rzn1_pinctrl_desc = {
+ .pctlops = &rzn1_pctrl_ops,
+ .pmxops = &rzn1_pmx_ops,
+ .confops = &rzn1_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int rzn1_pinctrl_parse_groups(struct device_node *np,
+ struct rzn1_pin_group *grp,
+ struct rzn1_pinctrl *ipctl)
+{
+ const __be32 *list;
+ unsigned int i;
+ int size;
+
+ dev_dbg(ipctl->dev, "%s: %s\n", __func__, np->name);
+
+ /* Initialise group */
+ grp->name = np->name;
+
+ /*
+ * The binding format is
+ * pinmux = <PIN_FUNC_ID CONFIG ...>,
+ * do sanity check and calculate pins number
+ */
+ list = of_get_property(np, RZN1_PINS_PROP, &size);
+ if (!list) {
+ dev_err(ipctl->dev,
+ "no " RZN1_PINS_PROP " property in node %pOF\n", np);
+
+ return -EINVAL;
+ }
+
+ if (!size) {
+ dev_err(ipctl->dev, "Invalid " RZN1_PINS_PROP " in node %pOF\n",
+ np);
+
+ return -EINVAL;
+ }
+
+ grp->npins = size / sizeof(list[0]);
+ grp->pin_ids = devm_kmalloc_array(ipctl->dev,
+ grp->npins, sizeof(grp->pin_ids[0]),
+ GFP_KERNEL);
+ grp->pins = devm_kmalloc_array(ipctl->dev,
+ grp->npins, sizeof(grp->pins[0]),
+ GFP_KERNEL);
+ if (!grp->pin_ids || !grp->pins)
+ return -ENOMEM;
+
+ for (i = 0; i < grp->npins; i++) {
+ u32 pin_id = be32_to_cpu(*list++);
+
+ grp->pins[i] = pin_id & 0xff;
+ grp->pin_ids[i] = (pin_id >> 8) & 0x7f;
+ }
+
+ return grp->npins;
+}
+
+static int rzn1_pinctrl_count_function_groups(struct device_node *np)
+{
+ struct device_node *child;
+ int count = 0;
+
+ if (of_property_count_u32_elems(np, RZN1_PINS_PROP) > 0)
+ count++;
+
+ for_each_child_of_node(np, child) {
+ if (of_property_count_u32_elems(child, RZN1_PINS_PROP) > 0)
+ count++;
+ }
+
+ return count;
+}
+
+static int rzn1_pinctrl_parse_functions(struct device_node *np,
+ struct rzn1_pinctrl *ipctl,
+ unsigned int index)
+{
+ struct rzn1_pmx_func *func;
+ struct rzn1_pin_group *grp;
+ struct device_node *child;
+ unsigned int i = 0;
+ int ret;
+
+ func = &ipctl->functions[index];
+
+ /* Initialise function */
+ func->name = np->name;
+ func->num_groups = rzn1_pinctrl_count_function_groups(np);
+ if (func->num_groups == 0) {
+ dev_err(ipctl->dev, "no groups defined in %pOF\n", np);
+ return -EINVAL;
+ }
+ dev_dbg(ipctl->dev, "function %s has %d groups\n",
+ np->name, func->num_groups);
+
+ func->groups = devm_kmalloc_array(ipctl->dev,
+ func->num_groups, sizeof(char *),
+ GFP_KERNEL);
+ if (!func->groups)
+ return -ENOMEM;
+
+ if (of_property_count_u32_elems(np, RZN1_PINS_PROP) > 0) {
+ func->groups[i] = np->name;
+ grp = &ipctl->groups[ipctl->ngroups];
+ grp->func = func->name;
+ ret = rzn1_pinctrl_parse_groups(np, grp, ipctl);
+ if (ret < 0)
+ return ret;
+ i++;
+ ipctl->ngroups++;
+ }
+
+ for_each_child_of_node(np, child) {
+ func->groups[i] = child->name;
+ grp = &ipctl->groups[ipctl->ngroups];
+ grp->func = func->name;
+ ret = rzn1_pinctrl_parse_groups(child, grp, ipctl);
+ if (ret < 0)
+ return ret;
+ i++;
+ ipctl->ngroups++;
+ }
+
+ dev_dbg(ipctl->dev, "function %s parsed %u/%u groups\n",
+ np->name, i, func->num_groups);
+
+ return 0;
+}
+
+static int rzn1_pinctrl_probe_dt(struct platform_device *pdev,
+ struct rzn1_pinctrl *ipctl)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ unsigned int maxgroups = 0;
+ unsigned int nfuncs = 0;
+ unsigned int i = 0;
+ int ret;
+
+ nfuncs = of_get_child_count(np);
+ if (nfuncs <= 0)
+ return 0;
+
+ ipctl->nfunctions = nfuncs;
+ ipctl->functions = devm_kmalloc_array(&pdev->dev, nfuncs,
+ sizeof(*ipctl->functions),
+ GFP_KERNEL);
+ if (!ipctl->functions)
+ return -ENOMEM;
+
+ ipctl->ngroups = 0;
+ for_each_child_of_node(np, child)
+ maxgroups += rzn1_pinctrl_count_function_groups(child);
+
+ ipctl->groups = devm_kmalloc_array(&pdev->dev,
+ maxgroups,
+ sizeof(*ipctl->groups),
+ GFP_KERNEL);
+ if (!ipctl->groups)
+ return -ENOMEM;
+
+ for_each_child_of_node(np, child) {
+ ret = rzn1_pinctrl_parse_functions(child, ipctl, i++);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rzn1_pinctrl_probe(struct platform_device *pdev)
+{
+ struct rzn1_pinctrl *ipctl;
+ struct resource *res;
+ int ret;
+
+ /* Create state holders etc for this driver */
+ ipctl = devm_kzalloc(&pdev->dev, sizeof(*ipctl), GFP_KERNEL);
+ if (!ipctl)
+ return -ENOMEM;
+
+ ipctl->mdio_func[0] = -1;
+ ipctl->mdio_func[1] = -1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ipctl->lev1_protect_phys = (u32)res->start + 0x400;
+ ipctl->lev1 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ipctl->lev1))
+ return PTR_ERR(ipctl->lev1);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ipctl->lev2_protect_phys = (u32)res->start + 0x400;
+ ipctl->lev2 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ipctl->lev2))
+ return PTR_ERR(ipctl->lev2);
+
+ ipctl->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ipctl->clk))
+ return PTR_ERR(ipctl->clk);
+ ret = clk_prepare_enable(ipctl->clk);
+ if (ret)
+ return ret;
+
+ ipctl->dev = &pdev->dev;
+ rzn1_pinctrl_desc.name = dev_name(&pdev->dev);
+ rzn1_pinctrl_desc.pins = rzn1_pins;
+ rzn1_pinctrl_desc.npins = ARRAY_SIZE(rzn1_pins);
+
+ ret = rzn1_pinctrl_probe_dt(pdev, ipctl);
+ if (ret) {
+ dev_err(&pdev->dev, "fail to probe dt properties\n");
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, ipctl);
+
+ ret = devm_pinctrl_register_and_init(&pdev->dev, &rzn1_pinctrl_desc,
+ ipctl, &ipctl->pctl);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register rzn1 pinctrl driver\n");
+ goto err_clk;
+ }
+
+ ret = pinctrl_enable(ipctl->pctl);
+ if (ret)
+ goto err_clk;
+
+ dev_info(&pdev->dev, "probed\n");
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(ipctl->clk);
+
+ return ret;
+}
+
+static int rzn1_pinctrl_remove(struct platform_device *pdev)
+{
+ struct rzn1_pinctrl *ipctl = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ipctl->clk);
+
+ return 0;
+}
+
+static const struct of_device_id rzn1_pinctrl_match[] = {
+ { .compatible = "renesas,rzn1-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rzn1_pinctrl_match);
+
+static struct platform_driver rzn1_pinctrl_driver = {
+ .probe = rzn1_pinctrl_probe,
+ .remove = rzn1_pinctrl_remove,
+ .driver = {
+ .name = "rzn1-pinctrl",
+ .of_match_table = rzn1_pinctrl_match,
+ },
+};
+
+static int __init _pinctrl_drv_register(void)
+{
+ return platform_driver_register(&rzn1_pinctrl_driver);
+}
+subsys_initcall(_pinctrl_drv_register);
+
+MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/N1 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 7ec72ff2419a..1e0614daee9b 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1022,14 +1022,14 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
vals[found].reg = pcs->base + offset;
vals[found].val = pinctrl_spec.args[1];
- dev_dbg(pcs->dev, "%s index: 0x%x value: 0x%x\n",
- pinctrl_spec.np->name, offset, pinctrl_spec.args[1]);
+ dev_dbg(pcs->dev, "%pOFn index: 0x%x value: 0x%x\n",
+ pinctrl_spec.np, offset, pinctrl_spec.args[1]);
pin = pcs_get_pin_by_offset(pcs, offset);
if (pin < 0) {
dev_err(pcs->dev,
- "could not add functions for %s %ux\n",
- np->name, offset);
+ "could not add functions for %pOFn %ux\n",
+ np, offset);
break;
}
pins[found++] = pin;
@@ -1135,8 +1135,8 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
val = pinctrl_spec.args[1];
mask = pinctrl_spec.args[2];
- dev_dbg(pcs->dev, "%s index: 0x%x value: 0x%x mask: 0x%x\n",
- pinctrl_spec.np->name, offset, val, mask);
+ dev_dbg(pcs->dev, "%pOFn index: 0x%x value: 0x%x mask: 0x%x\n",
+ pinctrl_spec.np, offset, val, mask);
/* Parse pins in each row from LSB */
while (mask) {
@@ -1148,8 +1148,8 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
if ((mask & mask_pos) == 0) {
dev_err(pcs->dev,
- "Invalid mask for %s at 0x%x\n",
- np->name, offset);
+ "Invalid mask for %pOFn at 0x%x\n",
+ np, offset);
break;
}
@@ -1157,8 +1157,8 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
if (submask != mask_pos) {
dev_warn(pcs->dev,
- "Invalid submask 0x%x for %s at 0x%x\n",
- submask, np->name, offset);
+ "Invalid submask 0x%x for %pOFn at 0x%x\n",
+ submask, np, offset);
continue;
}
@@ -1169,8 +1169,8 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
pin = pcs_get_pin_by_offset(pcs, offset);
if (pin < 0) {
dev_err(pcs->dev,
- "could not add functions for %s %ux\n",
- np->name, offset);
+ "could not add functions for %pOFn %ux\n",
+ np, offset);
break;
}
pins[found++] = pin + pin_num_from_lsb;
@@ -1254,16 +1254,16 @@ static int pcs_dt_node_to_map(struct pinctrl_dev *pctldev,
ret = pcs_parse_bits_in_pinctrl_entry(pcs, np_config, map,
num_maps, pgnames);
if (ret < 0) {
- dev_err(pcs->dev, "no pins entries for %s\n",
- np_config->name);
+ dev_err(pcs->dev, "no pins entries for %pOFn\n",
+ np_config);
goto free_pgnames;
}
} else {
ret = pcs_parse_one_pinctrl_entry(pcs, np_config, map,
num_maps, pgnames);
if (ret < 0) {
- dev_err(pcs->dev, "no pins entries for %s\n",
- np_config->name);
+ dev_err(pcs->dev, "no pins entries for %pOFn\n",
+ np_config);
goto free_pgnames;
}
}
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 0966bb0bf71f..e66af93f2cbf 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -817,8 +817,8 @@ static int st_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
grp = st_pctl_find_group_by_name(info, np->name);
if (!grp) {
- dev_err(info->dev, "unable to find group for node %s\n",
- np->name);
+ dev_err(info->dev, "unable to find group for node %pOFn\n",
+ np);
return -EINVAL;
}
@@ -1184,7 +1184,7 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
if (pp->length / sizeof(__be32) >= OF_GPIO_ARGS_MIN) {
npins++;
} else {
- pr_warn("Invalid st,pins in %s node\n", np->name);
+ pr_warn("Invalid st,pins in %pOFn node\n", np);
return -EINVAL;
}
}
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 195492033075..836e9f3eae4c 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -114,6 +114,14 @@ config PINCTRL_MSM8998
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm MSM8998 platform.
+config PINCTRL_QCS404
+ tristate "Qualcomm QCS404 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ TLMM block found in the Qualcomm QCS404 platform.
+
config PINCTRL_QDF2XXX
tristate "Qualcomm Technologies QDF2xxx pin controller driver"
depends on GPIOLIB && ACPI
@@ -147,6 +155,15 @@ config PINCTRL_QCOM_SSBI_PMIC
which are using SSBI for communication with SoC. Example PMIC's
devices are pm8058 and pm8921.
+config PINCTRL_SDM660
+ tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDM660 platform.
+
config PINCTRL_SDM845
tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 0c6f3ddc296d..344b4c6a6c6e 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -13,10 +13,12 @@ obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
obj-$(CONFIG_PINCTRL_MSM8994) += pinctrl-msm8994.o
obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o
obj-$(CONFIG_PINCTRL_MSM8998) += pinctrl-msm8998.o
+obj-$(CONFIG_PINCTRL_QCS404) += pinctrl-qcs404.o
obj-$(CONFIG_PINCTRL_QDF2XXX) += pinctrl-qdf2xxx.o
obj-$(CONFIG_PINCTRL_MDM9615) += pinctrl-mdm9615.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
+obj-$(CONFIG_PINCTRL_SDM660) += pinctrl-sdm660.o
obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 5d72ffad32c2..7c7d083e2c0d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -24,7 +24,7 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/reboot.h>
@@ -37,6 +37,7 @@
#include "../pinctrl-utils.h"
#define MAX_NR_GPIO 300
+#define MAX_NR_TILES 4
#define PS_HOLD_OFFSET 0x820
/**
@@ -52,7 +53,7 @@
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection.
* @soc; Reference to soc_data of platform specific data.
- * @regs: Base address for the TLMM register map.
+ * @regs: Base addresses for the TLMM tiles.
*/
struct msm_pinctrl {
struct device *dev;
@@ -70,9 +71,27 @@ struct msm_pinctrl {
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc;
- void __iomem *regs;
+ void __iomem *regs[MAX_NR_TILES];
};
+#define MSM_ACCESSOR(name) \
+static u32 msm_readl_##name(struct msm_pinctrl *pctrl, \
+ const struct msm_pingroup *g) \
+{ \
+ return readl(pctrl->regs[g->tile] + g->name##_reg); \
+} \
+static void msm_writel_##name(u32 val, struct msm_pinctrl *pctrl, \
+ const struct msm_pingroup *g) \
+{ \
+ writel(val, pctrl->regs[g->tile] + g->name##_reg); \
+}
+
+MSM_ACCESSOR(ctl)
+MSM_ACCESSOR(io)
+MSM_ACCESSOR(intr_cfg)
+MSM_ACCESSOR(intr_status)
+MSM_ACCESSOR(intr_target)
+
static int msm_get_groups_count(struct pinctrl_dev *pctldev)
{
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -166,21 +185,37 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->ctl_reg);
+ val = msm_readl_ctl(pctrl, g);
val &= ~mask;
val |= i << g->mux_bit;
- writel(val, pctrl->regs + g->ctl_reg);
+ msm_writel_ctl(val, pctrl, g);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
+static int msm_pinmux_request_gpio(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct msm_pingroup *g = &pctrl->soc->groups[offset];
+
+ /* No funcs? Probably ACPI so can't do anything here */
+ if (!g->nfuncs)
+ return 0;
+
+ /* For now assume function 0 is GPIO because it always is */
+ return msm_pinmux_set_mux(pctldev, g->funcs[0], offset);
+}
+
static const struct pinmux_ops msm_pinmux_ops = {
.request = msm_pinmux_request,
.get_functions_count = msm_get_functions_count,
.get_function_name = msm_get_function_name,
.get_function_groups = msm_get_function_groups,
+ .gpio_request_enable = msm_pinmux_request_gpio,
.set_mux = msm_pinmux_set_mux,
};
@@ -244,7 +279,7 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
if (ret < 0)
return ret;
- val = readl(pctrl->regs + g->ctl_reg);
+ val = msm_readl_ctl(pctrl, g);
arg = (val >> bit) & mask;
/* Convert register value to pinconf value */
@@ -283,7 +318,7 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
if (!arg)
return -EINVAL;
- val = readl(pctrl->regs + g->io_reg);
+ val = msm_readl_io(pctrl, g);
arg = !!(val & BIT(g->in_bit));
break;
case PIN_CONFIG_INPUT_ENABLE:
@@ -357,12 +392,12 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_OUTPUT:
/* set output value */
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->io_reg);
+ val = msm_readl_io(pctrl, g);
if (arg)
val |= BIT(g->out_bit);
else
val &= ~BIT(g->out_bit);
- writel(val, pctrl->regs + g->io_reg);
+ msm_writel_io(val, pctrl, g);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
/* enable output */
@@ -385,10 +420,10 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
}
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->ctl_reg);
+ val = msm_readl_ctl(pctrl, g);
val &= ~(mask << bit);
val |= arg << bit;
- writel(val, pctrl->regs + g->ctl_reg);
+ msm_writel_ctl(val, pctrl, g);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
@@ -412,9 +447,9 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->ctl_reg);
+ val = msm_readl_ctl(pctrl, g);
val &= ~BIT(g->oe_bit);
- writel(val, pctrl->regs + g->ctl_reg);
+ msm_writel_ctl(val, pctrl, g);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -432,16 +467,16 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->io_reg);
+ val = msm_readl_io(pctrl, g);
if (value)
val |= BIT(g->out_bit);
else
val &= ~BIT(g->out_bit);
- writel(val, pctrl->regs + g->io_reg);
+ msm_writel_io(val, pctrl, g);
- val = readl(pctrl->regs + g->ctl_reg);
+ val = msm_readl_ctl(pctrl, g);
val |= BIT(g->oe_bit);
- writel(val, pctrl->regs + g->ctl_reg);
+ msm_writel_ctl(val, pctrl, g);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -456,7 +491,7 @@ static int msm_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
g = &pctrl->soc->groups[offset];
- val = readl(pctrl->regs + g->ctl_reg);
+ val = msm_readl_ctl(pctrl, g);
/* 0 = output, 1 = input */
return val & BIT(g->oe_bit) ? 0 : 1;
@@ -470,7 +505,7 @@ static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
g = &pctrl->soc->groups[offset];
- val = readl(pctrl->regs + g->io_reg);
+ val = msm_readl_io(pctrl, g);
return !!(val & BIT(g->in_bit));
}
@@ -485,12 +520,12 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->io_reg);
+ val = msm_readl_io(pctrl, g);
if (value)
val |= BIT(g->out_bit);
else
val &= ~BIT(g->out_bit);
- writel(val, pctrl->regs + g->io_reg);
+ msm_writel_io(val, pctrl, g);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
@@ -530,8 +565,8 @@ static void msm_gpio_dbg_show_one(struct seq_file *s,
return;
g = &pctrl->soc->groups[offset];
- ctl_reg = readl(pctrl->regs + g->ctl_reg);
- io_reg = readl(pctrl->regs + g->io_reg);
+ ctl_reg = msm_readl_ctl(pctrl, g);
+ io_reg = msm_readl_io(pctrl, g);
is_out = !!(ctl_reg & BIT(g->oe_bit));
func = (ctl_reg >> g->mux_bit) & 7;
@@ -566,6 +601,42 @@ static void msm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
#define msm_gpio_dbg_show NULL
#endif
+static int msm_gpio_init_valid_mask(struct gpio_chip *chip)
+{
+ struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
+ int ret;
+ unsigned int len, i;
+ unsigned int max_gpios = pctrl->soc->ngpios;
+ u16 *tmp;
+
+ /* The number of GPIOs in the ACPI tables */
+ len = ret = device_property_read_u16_array(pctrl->dev, "gpios", NULL,
+ 0);
+ if (ret < 0)
+ return 0;
+
+ if (ret > max_gpios)
+ return -EINVAL;
+
+ tmp = kmalloc_array(len, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = device_property_read_u16_array(pctrl->dev, "gpios", tmp, len);
+ if (ret < 0) {
+ dev_err(pctrl->dev, "could not read list of GPIOs\n");
+ goto out;
+ }
+
+ bitmap_zero(chip->valid_mask, max_gpios);
+ for (i = 0; i < len; i++)
+ set_bit(tmp[i], chip->valid_mask);
+
+out:
+ kfree(tmp);
+ return ret;
+}
+
static const struct gpio_chip msm_gpio_template = {
.direction_input = msm_gpio_direction_input,
.direction_output = msm_gpio_direction_output,
@@ -575,6 +646,7 @@ static const struct gpio_chip msm_gpio_template = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.dbg_show = msm_gpio_dbg_show,
+ .init_valid_mask = msm_gpio_init_valid_mask,
};
/* For dual-edge interrupts in software, since some hardware has no
@@ -606,14 +678,14 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl,
unsigned pol;
do {
- val = readl(pctrl->regs + g->io_reg) & BIT(g->in_bit);
+ val = msm_readl_io(pctrl, g) & BIT(g->in_bit);
- pol = readl(pctrl->regs + g->intr_cfg_reg);
+ pol = msm_readl_intr_cfg(pctrl, g);
pol ^= BIT(g->intr_polarity_bit);
- writel(pol, pctrl->regs + g->intr_cfg_reg);
+ msm_writel_intr_cfg(val, pctrl, g);
- val2 = readl(pctrl->regs + g->io_reg) & BIT(g->in_bit);
- intstat = readl(pctrl->regs + g->intr_status_reg);
+ val2 = msm_readl_io(pctrl, g) & BIT(g->in_bit);
+ intstat = msm_readl_intr_status(pctrl, g);
if (intstat || (val == val2))
return;
} while (loop_limit-- > 0);
@@ -633,7 +705,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->intr_cfg_reg);
+ val = msm_readl_intr_cfg(pctrl, g);
/*
* There are two bits that control interrupt forwarding to the CPU. The
* RAW_STATUS_EN bit causes the level or edge sensed on the line to be
@@ -658,7 +730,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
val &= ~BIT(g->intr_raw_status_bit);
val &= ~BIT(g->intr_enable_bit);
- writel(val, pctrl->regs + g->intr_cfg_reg);
+ msm_writel_intr_cfg(val, pctrl, g);
clear_bit(d->hwirq, pctrl->enabled_irqs);
@@ -677,10 +749,10 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->intr_cfg_reg);
+ val = msm_readl_intr_cfg(pctrl, g);
val |= BIT(g->intr_raw_status_bit);
val |= BIT(g->intr_enable_bit);
- writel(val, pctrl->regs + g->intr_cfg_reg);
+ msm_writel_intr_cfg(val, pctrl, g);
set_bit(d->hwirq, pctrl->enabled_irqs);
@@ -699,12 +771,12 @@ static void msm_gpio_irq_ack(struct irq_data *d)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->intr_status_reg);
+ val = msm_readl_intr_status(pctrl, g);
if (g->intr_ack_high)
val |= BIT(g->intr_status_bit);
else
val &= ~BIT(g->intr_status_bit);
- writel(val, pctrl->regs + g->intr_status_reg);
+ msm_writel_intr_status(val, pctrl, g);
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -733,17 +805,17 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
clear_bit(d->hwirq, pctrl->dual_edge_irqs);
/* Route interrupts to application cpu */
- val = readl(pctrl->regs + g->intr_target_reg);
+ val = msm_readl_intr_target(pctrl, g);
val &= ~(7 << g->intr_target_bit);
val |= g->intr_target_kpss_val << g->intr_target_bit;
- writel(val, pctrl->regs + g->intr_target_reg);
+ msm_writel_intr_target(val, pctrl, g);
/* Update configuration for gpio.
* RAW_STATUS_EN is left on for all gpio irqs. Due to the
* internal circuitry of TLMM, toggling the RAW_STATUS
* could cause the INTR_STATUS to be set for EDGE interrupts.
*/
- val = readl(pctrl->regs + g->intr_cfg_reg);
+ val = msm_readl_intr_cfg(pctrl, g);
val |= BIT(g->intr_raw_status_bit);
if (g->intr_detection_width == 2) {
val &= ~(3 << g->intr_detection_bit);
@@ -791,7 +863,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
} else {
BUG();
}
- writel(val, pctrl->regs + g->intr_cfg_reg);
+ msm_writel_intr_cfg(val, pctrl, g);
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -821,6 +893,41 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
+static int msm_gpio_irq_reqres(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ int ret;
+
+ if (!try_module_get(gc->owner))
+ return -ENODEV;
+
+ ret = msm_pinmux_request_gpio(pctrl->pctrl, NULL, d->hwirq);
+ if (ret)
+ goto out;
+ msm_gpio_direction_input(gc, d->hwirq);
+
+ if (gpiochip_lock_as_irq(gc, d->hwirq)) {
+ dev_err(gc->parent,
+ "unable to lock HW IRQ %lu for IRQ\n",
+ d->hwirq);
+ ret = -EINVAL;
+ goto out;
+ }
+ return 0;
+out:
+ module_put(gc->owner);
+ return ret;
+}
+
+static void msm_gpio_irq_relres(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ gpiochip_unlock_as_irq(gc, d->hwirq);
+ module_put(gc->owner);
+}
+
static void msm_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -840,7 +947,7 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
*/
for_each_set_bit(i, pctrl->enabled_irqs, pctrl->chip.ngpio) {
g = &pctrl->soc->groups[i];
- val = readl(pctrl->regs + g->intr_status_reg);
+ val = msm_readl_intr_status(pctrl, g);
if (val & BIT(g->intr_status_bit)) {
irq_pin = irq_find_mapping(gc->irq.domain, i);
generic_handle_irq(irq_pin);
@@ -855,41 +962,6 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int msm_gpio_init_valid_mask(struct gpio_chip *chip,
- struct msm_pinctrl *pctrl)
-{
- int ret;
- unsigned int len, i;
- unsigned int max_gpios = pctrl->soc->ngpios;
- u16 *tmp;
-
- /* The number of GPIOs in the ACPI tables */
- len = ret = device_property_read_u16_array(pctrl->dev, "gpios", NULL, 0);
- if (ret < 0)
- return 0;
-
- if (ret > max_gpios)
- return -EINVAL;
-
- tmp = kmalloc_array(len, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- ret = device_property_read_u16_array(pctrl->dev, "gpios", tmp, len);
- if (ret < 0) {
- dev_err(pctrl->dev, "could not read list of GPIOs\n");
- goto out;
- }
-
- bitmap_zero(chip->valid_mask, max_gpios);
- for (i = 0; i < len; i++)
- set_bit(tmp[i], chip->valid_mask);
-
-out:
- kfree(tmp);
- return ret;
-}
-
static bool msm_gpio_needs_valid_mask(struct msm_pinctrl *pctrl)
{
return device_property_read_u16_array(pctrl->dev, "gpios", NULL, 0) > 0;
@@ -919,6 +991,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
+ pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
+ pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {
@@ -926,13 +1000,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
return ret;
}
- ret = msm_gpio_init_valid_mask(chip, pctrl);
- if (ret) {
- dev_err(pctrl->dev, "Failed to setup irq valid bits\n");
- gpiochip_remove(&pctrl->chip);
- return ret;
- }
-
/*
* For DeviceTree-supported systems, the gpio core checks the
* pinctrl's device node for the "gpio-ranges" property.
@@ -975,7 +1042,7 @@ static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
{
struct msm_pinctrl *pctrl = container_of(nb, struct msm_pinctrl, restart_nb);
- writel(0, pctrl->regs + PS_HOLD_OFFSET);
+ writel(0, pctrl->regs[0] + PS_HOLD_OFFSET);
mdelay(1000);
return NOTIFY_DONE;
}
@@ -1011,6 +1078,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
struct msm_pinctrl *pctrl;
struct resource *res;
int ret;
+ int i;
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
@@ -1022,10 +1090,20 @@ int msm_pinctrl_probe(struct platform_device *pdev,
raw_spin_lock_init(&pctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pctrl->regs))
- return PTR_ERR(pctrl->regs);
+ if (soc_data->tiles) {
+ for (i = 0; i < soc_data->ntiles; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ soc_data->tiles[i]);
+ pctrl->regs[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctrl->regs[i]))
+ return PTR_ERR(pctrl->regs[i]);
+ }
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pctrl->regs[0] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctrl->regs[0]))
+ return PTR_ERR(pctrl->regs[0]);
+ }
msm_pinctrl_setup_pm_reset(pctrl);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 9b9feea540ff..29172fdf5882 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -76,6 +76,8 @@ struct msm_pingroup {
u32 intr_status_reg;
u32 intr_target_reg;
+ unsigned int tile:2;
+
unsigned mux_bit:5;
unsigned pull_bit:5;
@@ -117,6 +119,8 @@ struct msm_pinctrl_soc_data {
unsigned ngroups;
unsigned ngpios;
bool pull_no_keeper;
+ const char *const *tiles;
+ unsigned int ntiles;
};
int msm_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
new file mode 100644
index 000000000000..7aae52a09ff0
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -0,0 +1,1697 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+static const char * const qcs404_tiles[] = {
+ "north",
+ "south",
+ "east"
+};
+
+enum {
+ NORTH,
+ SOUTH,
+ EAST
+};
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, _tile, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = 0x1000 * id, \
+ .io_reg = 0x1000 * id + 0x4, \
+ .intr_cfg_reg = 0x1000 * id + 0x8, \
+ .intr_status_reg = 0x1000 * id + 0xc, \
+ .intr_target_reg = 0x1000 * id + 0x8, \
+ .tile = _tile, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .tile = NORTH, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .tile = NORTH, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc qcs404_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "SDC1_RCLK"),
+ PINCTRL_PIN(121, "SDC1_CLK"),
+ PINCTRL_PIN(122, "SDC1_CMD"),
+ PINCTRL_PIN(123, "SDC1_DATA"),
+ PINCTRL_PIN(124, "SDC2_CLK"),
+ PINCTRL_PIN(125, "SDC2_CMD"),
+ PINCTRL_PIN(126, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+
+static const unsigned int sdc1_rclk_pins[] = { 120 };
+static const unsigned int sdc1_clk_pins[] = { 121 };
+static const unsigned int sdc1_cmd_pins[] = { 122 };
+static const unsigned int sdc1_data_pins[] = { 123 };
+static const unsigned int sdc2_clk_pins[] = { 124 };
+static const unsigned int sdc2_cmd_pins[] = { 125 };
+static const unsigned int sdc2_data_pins[] = { 126 };
+
+enum qcs404_functions {
+ msm_mux_gpio,
+ msm_mux_hdmi_tx,
+ msm_mux_hdmi_ddc,
+ msm_mux_blsp_uart_tx_a2,
+ msm_mux_blsp_spi2,
+ msm_mux_m_voc,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_blsp_uart_rx_a2,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_blsp_uart2,
+ msm_mux_aud_cdc,
+ msm_mux_blsp_i2c_sda_a2,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_blsp_i2c_scl_a2,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_spi_mosi_a1,
+ msm_mux_blsp_spi_miso_a1,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_spi_cs_n_a1,
+ msm_mux_gcc_plltest,
+ msm_mux_blsp_spi_clk_a1,
+ msm_mux_rgb_data0,
+ msm_mux_blsp_uart5,
+ msm_mux_blsp_spi5,
+ msm_mux_adsp_ext,
+ msm_mux_rgb_data1,
+ msm_mux_prng_rosc,
+ msm_mux_rgb_data2,
+ msm_mux_blsp_i2c5,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_rgb_data3,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_blsp_spi0,
+ msm_mux_blsp_uart0,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_blsp_i2c0,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_pcie_clk,
+ msm_mux_nfc_irq,
+ msm_mux_blsp_spi4,
+ msm_mux_nfc_dwl,
+ msm_mux_audio_ts,
+ msm_mux_rgb_data4,
+ msm_mux_spi_lcd,
+ msm_mux_blsp_uart_tx_b2,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_rgb_data5,
+ msm_mux_blsp_uart_rx_b2,
+ msm_mux_blsp_i2c_sda_b2,
+ msm_mux_blsp_i2c_scl_b2,
+ msm_mux_pwm_led11,
+ msm_mux_i2s_3_data0_a,
+ msm_mux_ebi2_lcd,
+ msm_mux_i2s_3_data1_a,
+ msm_mux_i2s_3_data2_a,
+ msm_mux_atest_char,
+ msm_mux_pwm_led3,
+ msm_mux_i2s_3_data3_a,
+ msm_mux_pwm_led4,
+ msm_mux_i2s_4,
+ msm_mux_ebi2_a,
+ msm_mux_dsd_clk_b,
+ msm_mux_pwm_led5,
+ msm_mux_pwm_led6,
+ msm_mux_pwm_led7,
+ msm_mux_pwm_led8,
+ msm_mux_pwm_led24,
+ msm_mux_spkr_dac0,
+ msm_mux_blsp_i2c4,
+ msm_mux_pwm_led9,
+ msm_mux_pwm_led10,
+ msm_mux_spdifrx_opt,
+ msm_mux_pwm_led12,
+ msm_mux_pwm_led13,
+ msm_mux_pwm_led14,
+ msm_mux_wlan1_adc1,
+ msm_mux_rgb_data_b0,
+ msm_mux_pwm_led15,
+ msm_mux_blsp_spi_mosi_b1,
+ msm_mux_wlan1_adc0,
+ msm_mux_rgb_data_b1,
+ msm_mux_pwm_led16,
+ msm_mux_blsp_spi_miso_b1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_wlan2_adc1,
+ msm_mux_rgb_data_b2,
+ msm_mux_pwm_led17,
+ msm_mux_blsp_spi_cs_n_b1,
+ msm_mux_wlan2_adc0,
+ msm_mux_rgb_data_b3,
+ msm_mux_pwm_led18,
+ msm_mux_blsp_spi_clk_b1,
+ msm_mux_rgb_data_b4,
+ msm_mux_pwm_led19,
+ msm_mux_ext_mclk1_b,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_rgb_data_b5,
+ msm_mux_pwm_led20,
+ msm_mux_atest_char3,
+ msm_mux_i2s_3_sck_b,
+ msm_mux_ldo_update,
+ msm_mux_bimc_dte0,
+ msm_mux_rgb_hsync,
+ msm_mux_pwm_led21,
+ msm_mux_i2s_3_ws_b,
+ msm_mux_dbg_out,
+ msm_mux_rgb_vsync,
+ msm_mux_i2s_3_data0_b,
+ msm_mux_ldo_en,
+ msm_mux_hdmi_dtest,
+ msm_mux_rgb_de,
+ msm_mux_i2s_3_data1_b,
+ msm_mux_hdmi_lbk9,
+ msm_mux_rgb_clk,
+ msm_mux_atest_char1,
+ msm_mux_i2s_3_data2_b,
+ msm_mux_ebi_cdc,
+ msm_mux_hdmi_lbk8,
+ msm_mux_rgb_mdp,
+ msm_mux_atest_char0,
+ msm_mux_i2s_3_data3_b,
+ msm_mux_hdmi_lbk7,
+ msm_mux_rgb_data_b6,
+ msm_mux_rgb_data_b7,
+ msm_mux_hdmi_lbk6,
+ msm_mux_rgmii_int,
+ msm_mux_cri_trng1,
+ msm_mux_rgmii_wol,
+ msm_mux_cri_trng0,
+ msm_mux_gcc_tlmm,
+ msm_mux_rgmii_ck,
+ msm_mux_rgmii_tx,
+ msm_mux_hdmi_lbk5,
+ msm_mux_hdmi_pixel,
+ msm_mux_hdmi_rcv,
+ msm_mux_hdmi_lbk4,
+ msm_mux_rgmii_ctl,
+ msm_mux_ext_lpass,
+ msm_mux_rgmii_rx,
+ msm_mux_cri_trng,
+ msm_mux_hdmi_lbk3,
+ msm_mux_hdmi_lbk2,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_rgmii_mdio,
+ msm_mux_hdmi_lbk1,
+ msm_mux_rgmii_mdc,
+ msm_mux_hdmi_lbk0,
+ msm_mux_ir_in,
+ msm_mux_wsa_en,
+ msm_mux_rgb_data6,
+ msm_mux_rgb_data7,
+ msm_mux_atest_char2,
+ msm_mux_ebi_ch0,
+ msm_mux_blsp_uart3,
+ msm_mux_blsp_spi3,
+ msm_mux_sd_write,
+ msm_mux_blsp_i2c3,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_ext_mclk0,
+ msm_mux_mclk_in1,
+ msm_mux_i2s_1,
+ msm_mux_dsd_clk_a,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_rgmi_dll1,
+ msm_mux_pwm_led22,
+ msm_mux_pwm_led23,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_rgmi_dll2,
+ msm_mux_pwm_led1,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_pwm_led2,
+ msm_mux_i2s_2,
+ msm_mux_pll_bist,
+ msm_mux_ext_mclk1_a,
+ msm_mux_mclk_in2,
+ msm_mux_bimc_dte1,
+ msm_mux_i2s_3_sck_a,
+ msm_mux_i2s_3_ws_a,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio21", "gpio21", "gpio22", "gpio22", "gpio23", "gpio23", "gpio24",
+ "gpio25", "gpio26", "gpio27", "gpio28", "gpio29", "gpio30", "gpio31",
+ "gpio32", "gpio33", "gpio34", "gpio35", "gpio36", "gpio36", "gpio36",
+ "gpio36", "gpio37", "gpio37", "gpio37", "gpio38", "gpio38", "gpio38",
+ "gpio39", "gpio39", "gpio40", "gpio40", "gpio41", "gpio41", "gpio41",
+ "gpio42", "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48",
+ "gpio49", "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55",
+ "gpio56", "gpio57", "gpio58", "gpio59", "gpio59", "gpio60", "gpio61",
+ "gpio62", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68",
+ "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
+ "gpio76", "gpio77", "gpio77", "gpio78", "gpio78", "gpio78", "gpio79",
+ "gpio79", "gpio79", "gpio80", "gpio81", "gpio81", "gpio82", "gpio83",
+ "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90",
+ "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97",
+ "gpio98", "gpio99", "gpio100", "gpio101", "gpio102", "gpio103",
+ "gpio104", "gpio105", "gpio106", "gpio107", "gpio108", "gpio108",
+ "gpio108", "gpio109", "gpio109", "gpio110", "gpio111", "gpio112",
+ "gpio113", "gpio114", "gpio115", "gpio116", "gpio117", "gpio118",
+ "gpio119",
+};
+
+static const char * const hdmi_tx_groups[] = {
+ "gpio14",
+};
+
+static const char * const hdmi_ddc_groups[] = {
+ "gpio15", "gpio16",
+};
+
+static const char * const blsp_uart_tx_a2_groups[] = {
+ "gpio17",
+};
+
+static const char * const blsp_spi2_groups[] = {
+ "gpio17", "gpio18", "gpio19", "gpio20",
+};
+
+static const char * const m_voc_groups[] = {
+ "gpio17", "gpio21",
+};
+
+static const char * const qdss_cti_trig_in_a0_groups[] = {
+ "gpio17",
+};
+
+static const char * const blsp_uart_rx_a2_groups[] = {
+ "gpio18",
+};
+
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio18",
+};
+
+static const char * const blsp_uart2_groups[] = {
+ "gpio19", "gpio20",
+};
+
+static const char * const aud_cdc_groups[] = {
+ "gpio19", "gpio20",
+};
+
+static const char * const blsp_i2c_sda_a2_groups[] = {
+ "gpio19",
+};
+
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio19", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio30",
+ "gpio31", "gpio32", "gpio36", "gpio38", "gpio39", "gpio42", "gpio43",
+ "gpio82", "gpio83",
+};
+
+static const char * const blsp_i2c_scl_a2_groups[] = {
+ "gpio20",
+};
+
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio20",
+};
+
+static const char * const qdss_cti_trig_in_b0_groups[] = {
+ "gpio21",
+};
+
+static const char * const blsp_uart1_groups[] = {
+ "gpio22", "gpio23", "gpio24", "gpio25",
+};
+
+static const char * const blsp_spi_mosi_a1_groups[] = {
+ "gpio22",
+};
+
+static const char * const blsp_spi_miso_a1_groups[] = {
+ "gpio23",
+};
+
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio23", "gpio35", "gpio40", "gpio41", "gpio44", "gpio45", "gpio46",
+ "gpio47", "gpio49", "gpio50", "gpio55", "gpio61", "gpio62", "gpio85",
+ "gpio89", "gpio93",
+};
+
+static const char * const blsp_i2c1_groups[] = {
+ "gpio24", "gpio25",
+};
+
+static const char * const blsp_spi_cs_n_a1_groups[] = {
+ "gpio24",
+};
+
+static const char * const gcc_plltest_groups[] = {
+ "gpio24", "gpio25",
+};
+
+static const char * const blsp_spi_clk_a1_groups[] = {
+ "gpio25",
+};
+
+static const char * const rgb_data0_groups[] = {
+ "gpio26", "gpio41",
+};
+
+static const char * const blsp_uart5_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio29",
+};
+
+static const char * const blsp_spi5_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio29", "gpio44", "gpio45", "gpio46",
+};
+
+static const char * const adsp_ext_groups[] = {
+ "gpio26",
+};
+
+static const char * const rgb_data1_groups[] = {
+ "gpio27", "gpio42",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio27",
+};
+
+static const char * const rgb_data2_groups[] = {
+ "gpio28", "gpio43",
+};
+
+static const char * const blsp_i2c5_groups[] = {
+ "gpio28", "gpio29",
+};
+
+static const char * const gcc_gp1_clk_b_groups[] = {
+ "gpio28",
+};
+
+static const char * const rgb_data3_groups[] = {
+ "gpio29", "gpio44",
+};
+
+static const char * const gcc_gp2_clk_b_groups[] = {
+ "gpio29",
+};
+
+static const char * const blsp_spi0_groups[] = {
+ "gpio30", "gpio31", "gpio32", "gpio33",
+};
+
+static const char * const blsp_uart0_groups[] = {
+ "gpio30", "gpio31", "gpio32", "gpio33",
+};
+
+static const char * const gcc_gp3_clk_b_groups[] = {
+ "gpio30",
+};
+
+static const char * const blsp_i2c0_groups[] = {
+ "gpio32", "gpio33",
+};
+
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio34",
+};
+
+static const char * const pcie_clk_groups[] = {
+ "gpio35",
+};
+
+static const char * const nfc_irq_groups[] = {
+ "gpio37",
+};
+
+static const char * const blsp_spi4_groups[] = {
+ "gpio37", "gpio38", "gpio117", "gpio118",
+};
+
+static const char * const nfc_dwl_groups[] = {
+ "gpio38",
+};
+
+static const char * const audio_ts_groups[] = {
+ "gpio38",
+};
+
+static const char * const rgb_data4_groups[] = {
+ "gpio39", "gpio45",
+};
+
+static const char * const spi_lcd_groups[] = {
+ "gpio39", "gpio40",
+};
+
+static const char * const blsp_uart_tx_b2_groups[] = {
+ "gpio39",
+};
+
+static const char * const gcc_gp3_clk_a_groups[] = {
+ "gpio39",
+};
+
+static const char * const rgb_data5_groups[] = {
+ "gpio40", "gpio46",
+};
+
+static const char * const blsp_uart_rx_b2_groups[] = {
+ "gpio40",
+};
+
+static const char * const blsp_i2c_sda_b2_groups[] = {
+ "gpio41",
+};
+
+static const char * const blsp_i2c_scl_b2_groups[] = {
+ "gpio42",
+};
+
+static const char * const pwm_led11_groups[] = {
+ "gpio43",
+};
+
+static const char * const i2s_3_data0_a_groups[] = {
+ "gpio106",
+};
+
+static const char * const ebi2_lcd_groups[] = {
+ "gpio106", "gpio107", "gpio108", "gpio109",
+};
+
+static const char * const i2s_3_data1_a_groups[] = {
+ "gpio107",
+};
+
+static const char * const i2s_3_data2_a_groups[] = {
+ "gpio108",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio108",
+};
+
+static const char * const pwm_led3_groups[] = {
+ "gpio108",
+};
+
+static const char * const i2s_3_data3_a_groups[] = {
+ "gpio109",
+};
+
+static const char * const pwm_led4_groups[] = {
+ "gpio109",
+};
+
+static const char * const i2s_4_groups[] = {
+ "gpio110", "gpio111", "gpio111", "gpio112", "gpio112", "gpio113",
+ "gpio113", "gpio114", "gpio114", "gpio115", "gpio115", "gpio116",
+};
+
+static const char * const ebi2_a_groups[] = {
+ "gpio110",
+};
+
+static const char * const dsd_clk_b_groups[] = {
+ "gpio110",
+};
+
+static const char * const pwm_led5_groups[] = {
+ "gpio110",
+};
+
+static const char * const pwm_led6_groups[] = {
+ "gpio111",
+};
+
+static const char * const pwm_led7_groups[] = {
+ "gpio112",
+};
+
+static const char * const pwm_led8_groups[] = {
+ "gpio113",
+};
+
+static const char * const pwm_led24_groups[] = {
+ "gpio114",
+};
+
+static const char * const spkr_dac0_groups[] = {
+ "gpio116",
+};
+
+static const char * const blsp_i2c4_groups[] = {
+ "gpio117", "gpio118",
+};
+
+static const char * const pwm_led9_groups[] = {
+ "gpio117",
+};
+
+static const char * const pwm_led10_groups[] = {
+ "gpio118",
+};
+
+static const char * const spdifrx_opt_groups[] = {
+ "gpio119",
+};
+
+static const char * const pwm_led12_groups[] = {
+ "gpio44",
+};
+
+static const char * const pwm_led13_groups[] = {
+ "gpio45",
+};
+
+static const char * const pwm_led14_groups[] = {
+ "gpio46",
+};
+
+static const char * const wlan1_adc1_groups[] = {
+ "gpio46",
+};
+
+static const char * const rgb_data_b0_groups[] = {
+ "gpio47",
+};
+
+static const char * const pwm_led15_groups[] = {
+ "gpio47",
+};
+
+static const char * const blsp_spi_mosi_b1_groups[] = {
+ "gpio47",
+};
+
+static const char * const wlan1_adc0_groups[] = {
+ "gpio47",
+};
+
+static const char * const rgb_data_b1_groups[] = {
+ "gpio48",
+};
+
+static const char * const pwm_led16_groups[] = {
+ "gpio48",
+};
+
+static const char * const blsp_spi_miso_b1_groups[] = {
+ "gpio48",
+};
+
+static const char * const qdss_cti_trig_out_b0_groups[] = {
+ "gpio48",
+};
+
+static const char * const wlan2_adc1_groups[] = {
+ "gpio48",
+};
+
+static const char * const rgb_data_b2_groups[] = {
+ "gpio49",
+};
+
+static const char * const pwm_led17_groups[] = {
+ "gpio49",
+};
+
+static const char * const blsp_spi_cs_n_b1_groups[] = {
+ "gpio49",
+};
+
+static const char * const wlan2_adc0_groups[] = {
+ "gpio49",
+};
+
+static const char * const rgb_data_b3_groups[] = {
+ "gpio50",
+};
+
+static const char * const pwm_led18_groups[] = {
+ "gpio50",
+};
+
+static const char * const blsp_spi_clk_b1_groups[] = {
+ "gpio50",
+};
+
+static const char * const rgb_data_b4_groups[] = {
+ "gpio51",
+};
+
+static const char * const pwm_led19_groups[] = {
+ "gpio51",
+};
+
+static const char * const ext_mclk1_b_groups[] = {
+ "gpio51",
+};
+
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio51",
+};
+
+static const char * const rgb_data_b5_groups[] = {
+ "gpio52",
+};
+
+static const char * const pwm_led20_groups[] = {
+ "gpio52",
+};
+
+static const char * const atest_char3_groups[] = {
+ "gpio52",
+};
+
+static const char * const i2s_3_sck_b_groups[] = {
+ "gpio52",
+};
+
+static const char * const ldo_update_groups[] = {
+ "gpio52",
+};
+
+static const char * const bimc_dte0_groups[] = {
+ "gpio52", "gpio54",
+};
+
+static const char * const rgb_hsync_groups[] = {
+ "gpio53",
+};
+
+static const char * const pwm_led21_groups[] = {
+ "gpio53",
+};
+
+static const char * const i2s_3_ws_b_groups[] = {
+ "gpio53",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio53",
+};
+
+static const char * const rgb_vsync_groups[] = {
+ "gpio54",
+};
+
+static const char * const i2s_3_data0_b_groups[] = {
+ "gpio54",
+};
+
+static const char * const ldo_en_groups[] = {
+ "gpio54",
+};
+
+static const char * const hdmi_dtest_groups[] = {
+ "gpio54",
+};
+
+static const char * const rgb_de_groups[] = {
+ "gpio55",
+};
+
+static const char * const i2s_3_data1_b_groups[] = {
+ "gpio55",
+};
+
+static const char * const hdmi_lbk9_groups[] = {
+ "gpio55",
+};
+
+static const char * const rgb_clk_groups[] = {
+ "gpio56",
+};
+
+static const char * const atest_char1_groups[] = {
+ "gpio56",
+};
+
+static const char * const i2s_3_data2_b_groups[] = {
+ "gpio56",
+};
+
+static const char * const ebi_cdc_groups[] = {
+ "gpio56", "gpio58", "gpio106", "gpio107", "gpio108", "gpio111",
+};
+
+static const char * const hdmi_lbk8_groups[] = {
+ "gpio56",
+};
+
+static const char * const rgb_mdp_groups[] = {
+ "gpio57",
+};
+
+static const char * const atest_char0_groups[] = {
+ "gpio57",
+};
+
+static const char * const i2s_3_data3_b_groups[] = {
+ "gpio57",
+};
+
+static const char * const hdmi_lbk7_groups[] = {
+ "gpio57",
+};
+
+static const char * const rgb_data_b6_groups[] = {
+ "gpio58",
+};
+
+static const char * const rgb_data_b7_groups[] = {
+ "gpio59",
+};
+
+static const char * const hdmi_lbk6_groups[] = {
+ "gpio59",
+};
+
+static const char * const rgmii_int_groups[] = {
+ "gpio61",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio61",
+};
+
+static const char * const rgmii_wol_groups[] = {
+ "gpio62",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio62",
+};
+
+static const char * const gcc_tlmm_groups[] = {
+ "gpio62",
+};
+
+static const char * const rgmii_ck_groups[] = {
+ "gpio63", "gpio69",
+};
+
+static const char * const rgmii_tx_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67",
+};
+
+static const char * const hdmi_lbk5_groups[] = {
+ "gpio64",
+};
+
+static const char * const hdmi_pixel_groups[] = {
+ "gpio65",
+};
+
+static const char * const hdmi_rcv_groups[] = {
+ "gpio66",
+};
+
+static const char * const hdmi_lbk4_groups[] = {
+ "gpio67",
+};
+
+static const char * const rgmii_ctl_groups[] = {
+ "gpio68", "gpio74",
+};
+
+static const char * const ext_lpass_groups[] = {
+ "gpio69",
+};
+
+static const char * const rgmii_rx_groups[] = {
+ "gpio70", "gpio71", "gpio72", "gpio73",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio70",
+};
+
+static const char * const hdmi_lbk3_groups[] = {
+ "gpio71",
+};
+
+static const char * const hdmi_lbk2_groups[] = {
+ "gpio72",
+};
+
+static const char * const qdss_cti_trig_out_b1_groups[] = {
+ "gpio73",
+};
+
+static const char * const rgmii_mdio_groups[] = {
+ "gpio75",
+};
+
+static const char * const hdmi_lbk1_groups[] = {
+ "gpio75",
+};
+
+static const char * const rgmii_mdc_groups[] = {
+ "gpio76",
+};
+
+static const char * const hdmi_lbk0_groups[] = {
+ "gpio76",
+};
+
+static const char * const ir_in_groups[] = {
+ "gpio77",
+};
+
+static const char * const wsa_en_groups[] = {
+ "gpio77",
+};
+
+static const char * const rgb_data6_groups[] = {
+ "gpio78", "gpio80",
+};
+
+static const char * const rgb_data7_groups[] = {
+ "gpio79", "gpio81",
+};
+
+static const char * const atest_char2_groups[] = {
+ "gpio80",
+};
+
+static const char * const ebi_ch0_groups[] = {
+ "gpio81",
+};
+
+static const char * const blsp_uart3_groups[] = {
+ "gpio82", "gpio83", "gpio84", "gpio85",
+};
+
+static const char * const blsp_spi3_groups[] = {
+ "gpio82", "gpio83", "gpio84", "gpio85",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio82",
+};
+
+static const char * const blsp_i2c3_groups[] = {
+ "gpio84", "gpio85",
+};
+
+static const char * const gcc_gp1_clk_a_groups[] = {
+ "gpio84",
+};
+
+static const char * const qdss_cti_trig_in_b1_groups[] = {
+ "gpio84",
+};
+
+static const char * const gcc_gp2_clk_a_groups[] = {
+ "gpio85",
+};
+
+static const char * const ext_mclk0_groups[] = {
+ "gpio86",
+};
+
+static const char * const mclk_in1_groups[] = {
+ "gpio86",
+};
+
+static const char * const i2s_1_groups[] = {
+ "gpio87", "gpio88", "gpio88", "gpio89", "gpio89", "gpio90", "gpio90",
+ "gpio91", "gpio91", "gpio92", "gpio92", "gpio93", "gpio93", "gpio94",
+ "gpio94", "gpio95", "gpio95", "gpio96",
+};
+
+static const char * const dsd_clk_a_groups[] = {
+ "gpio87",
+};
+
+static const char * const qdss_cti_trig_in_a1_groups[] = {
+ "gpio92",
+};
+
+static const char * const rgmi_dll1_groups[] = {
+ "gpio92",
+};
+
+static const char * const pwm_led22_groups[] = {
+ "gpio93",
+};
+
+static const char * const pwm_led23_groups[] = {
+ "gpio94",
+};
+
+static const char * const qdss_cti_trig_out_a0_groups[] = {
+ "gpio94",
+};
+
+static const char * const rgmi_dll2_groups[] = {
+ "gpio94",
+};
+
+static const char * const pwm_led1_groups[] = {
+ "gpio95",
+};
+
+static const char * const qdss_cti_trig_out_a1_groups[] = {
+ "gpio95",
+};
+
+static const char * const pwm_led2_groups[] = {
+ "gpio96",
+};
+
+static const char * const i2s_2_groups[] = {
+ "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102",
+};
+
+static const char * const pll_bist_groups[] = {
+ "gpio100",
+};
+
+static const char * const ext_mclk1_a_groups[] = {
+ "gpio103",
+};
+
+static const char * const mclk_in2_groups[] = {
+ "gpio103",
+};
+
+static const char * const bimc_dte1_groups[] = {
+ "gpio103", "gpio109",
+};
+
+static const char * const i2s_3_sck_a_groups[] = {
+ "gpio104",
+};
+
+static const char * const i2s_3_ws_a_groups[] = {
+ "gpio105",
+};
+
+static const struct msm_function qcs404_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(hdmi_tx),
+ FUNCTION(hdmi_ddc),
+ FUNCTION(blsp_uart_tx_a2),
+ FUNCTION(blsp_spi2),
+ FUNCTION(m_voc),
+ FUNCTION(qdss_cti_trig_in_a0),
+ FUNCTION(blsp_uart_rx_a2),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(blsp_uart2),
+ FUNCTION(aud_cdc),
+ FUNCTION(blsp_i2c_sda_a2),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(blsp_i2c_scl_a2),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(qdss_cti_trig_in_b0),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_spi_mosi_a1),
+ FUNCTION(blsp_spi_miso_a1),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_spi_cs_n_a1),
+ FUNCTION(gcc_plltest),
+ FUNCTION(blsp_spi_clk_a1),
+ FUNCTION(rgb_data0),
+ FUNCTION(blsp_uart5),
+ FUNCTION(blsp_spi5),
+ FUNCTION(adsp_ext),
+ FUNCTION(rgb_data1),
+ FUNCTION(prng_rosc),
+ FUNCTION(rgb_data2),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(rgb_data3),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(blsp_spi0),
+ FUNCTION(blsp_uart0),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(blsp_i2c0),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(pcie_clk),
+ FUNCTION(nfc_irq),
+ FUNCTION(blsp_spi4),
+ FUNCTION(nfc_dwl),
+ FUNCTION(audio_ts),
+ FUNCTION(rgb_data4),
+ FUNCTION(spi_lcd),
+ FUNCTION(blsp_uart_tx_b2),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(rgb_data5),
+ FUNCTION(blsp_uart_rx_b2),
+ FUNCTION(blsp_i2c_sda_b2),
+ FUNCTION(blsp_i2c_scl_b2),
+ FUNCTION(pwm_led11),
+ FUNCTION(i2s_3_data0_a),
+ FUNCTION(ebi2_lcd),
+ FUNCTION(i2s_3_data1_a),
+ FUNCTION(i2s_3_data2_a),
+ FUNCTION(atest_char),
+ FUNCTION(pwm_led3),
+ FUNCTION(i2s_3_data3_a),
+ FUNCTION(pwm_led4),
+ FUNCTION(i2s_4),
+ FUNCTION(ebi2_a),
+ FUNCTION(dsd_clk_b),
+ FUNCTION(pwm_led5),
+ FUNCTION(pwm_led6),
+ FUNCTION(pwm_led7),
+ FUNCTION(pwm_led8),
+ FUNCTION(pwm_led24),
+ FUNCTION(spkr_dac0),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(pwm_led9),
+ FUNCTION(pwm_led10),
+ FUNCTION(spdifrx_opt),
+ FUNCTION(pwm_led12),
+ FUNCTION(pwm_led13),
+ FUNCTION(pwm_led14),
+ FUNCTION(wlan1_adc1),
+ FUNCTION(rgb_data_b0),
+ FUNCTION(pwm_led15),
+ FUNCTION(blsp_spi_mosi_b1),
+ FUNCTION(wlan1_adc0),
+ FUNCTION(rgb_data_b1),
+ FUNCTION(pwm_led16),
+ FUNCTION(blsp_spi_miso_b1),
+ FUNCTION(qdss_cti_trig_out_b0),
+ FUNCTION(wlan2_adc1),
+ FUNCTION(rgb_data_b2),
+ FUNCTION(pwm_led17),
+ FUNCTION(blsp_spi_cs_n_b1),
+ FUNCTION(wlan2_adc0),
+ FUNCTION(rgb_data_b3),
+ FUNCTION(pwm_led18),
+ FUNCTION(blsp_spi_clk_b1),
+ FUNCTION(rgb_data_b4),
+ FUNCTION(pwm_led19),
+ FUNCTION(ext_mclk1_b),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(rgb_data_b5),
+ FUNCTION(pwm_led20),
+ FUNCTION(atest_char3),
+ FUNCTION(i2s_3_sck_b),
+ FUNCTION(ldo_update),
+ FUNCTION(bimc_dte0),
+ FUNCTION(rgb_hsync),
+ FUNCTION(pwm_led21),
+ FUNCTION(i2s_3_ws_b),
+ FUNCTION(dbg_out),
+ FUNCTION(rgb_vsync),
+ FUNCTION(i2s_3_data0_b),
+ FUNCTION(ldo_en),
+ FUNCTION(hdmi_dtest),
+ FUNCTION(rgb_de),
+ FUNCTION(i2s_3_data1_b),
+ FUNCTION(hdmi_lbk9),
+ FUNCTION(rgb_clk),
+ FUNCTION(atest_char1),
+ FUNCTION(i2s_3_data2_b),
+ FUNCTION(ebi_cdc),
+ FUNCTION(hdmi_lbk8),
+ FUNCTION(rgb_mdp),
+ FUNCTION(atest_char0),
+ FUNCTION(i2s_3_data3_b),
+ FUNCTION(hdmi_lbk7),
+ FUNCTION(rgb_data_b6),
+ FUNCTION(rgb_data_b7),
+ FUNCTION(hdmi_lbk6),
+ FUNCTION(rgmii_int),
+ FUNCTION(cri_trng1),
+ FUNCTION(rgmii_wol),
+ FUNCTION(cri_trng0),
+ FUNCTION(gcc_tlmm),
+ FUNCTION(rgmii_ck),
+ FUNCTION(rgmii_tx),
+ FUNCTION(hdmi_lbk5),
+ FUNCTION(hdmi_pixel),
+ FUNCTION(hdmi_rcv),
+ FUNCTION(hdmi_lbk4),
+ FUNCTION(rgmii_ctl),
+ FUNCTION(ext_lpass),
+ FUNCTION(rgmii_rx),
+ FUNCTION(cri_trng),
+ FUNCTION(hdmi_lbk3),
+ FUNCTION(hdmi_lbk2),
+ FUNCTION(qdss_cti_trig_out_b1),
+ FUNCTION(rgmii_mdio),
+ FUNCTION(hdmi_lbk1),
+ FUNCTION(rgmii_mdc),
+ FUNCTION(hdmi_lbk0),
+ FUNCTION(ir_in),
+ FUNCTION(wsa_en),
+ FUNCTION(rgb_data6),
+ FUNCTION(rgb_data7),
+ FUNCTION(atest_char2),
+ FUNCTION(ebi_ch0),
+ FUNCTION(blsp_uart3),
+ FUNCTION(blsp_spi3),
+ FUNCTION(sd_write),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(qdss_cti_trig_in_b1),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(ext_mclk0),
+ FUNCTION(mclk_in1),
+ FUNCTION(i2s_1),
+ FUNCTION(dsd_clk_a),
+ FUNCTION(qdss_cti_trig_in_a1),
+ FUNCTION(rgmi_dll1),
+ FUNCTION(pwm_led22),
+ FUNCTION(pwm_led23),
+ FUNCTION(qdss_cti_trig_out_a0),
+ FUNCTION(rgmi_dll2),
+ FUNCTION(pwm_led1),
+ FUNCTION(qdss_cti_trig_out_a1),
+ FUNCTION(pwm_led2),
+ FUNCTION(i2s_2),
+ FUNCTION(pll_bist),
+ FUNCTION(ext_mclk1_a),
+ FUNCTION(mclk_in2),
+ FUNCTION(bimc_dte1),
+ FUNCTION(i2s_3_sck_a),
+ FUNCTION(i2s_3_ws_a),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup qcs404_groups[] = {
+ [0] = PINGROUP(0, SOUTH, _, _, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, SOUTH, _, _, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, SOUTH, _, _, _, _, _, _, _, _, _),
+ [3] = PINGROUP(3, SOUTH, _, _, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, SOUTH, _, _, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, SOUTH, _, _, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, SOUTH, _, _, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, SOUTH, _, _, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, SOUTH, _, _, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, SOUTH, _, _, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, SOUTH, _, _, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, SOUTH, _, _, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, SOUTH, _, _, _, _, _, _, _, _, _),
+ [13] = PINGROUP(13, SOUTH, _, _, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, SOUTH, hdmi_tx, _, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, SOUTH, hdmi_ddc, _, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, SOUTH, hdmi_ddc, _, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, NORTH, blsp_uart_tx_a2, blsp_spi2, m_voc, _, _, _, _, _, _),
+ [18] = PINGROUP(18, NORTH, blsp_uart_rx_a2, blsp_spi2, _, _, _, _, _, qdss_tracectl_a, _),
+ [19] = PINGROUP(19, NORTH, blsp_uart2, aud_cdc, blsp_i2c_sda_a2, blsp_spi2, _, qdss_tracedata_a, _, _, _),
+ [20] = PINGROUP(20, NORTH, blsp_uart2, aud_cdc, blsp_i2c_scl_a2, blsp_spi2, _, _, _, _, _),
+ [21] = PINGROUP(21, SOUTH, m_voc, _, _, _, _, _, _, _, qdss_cti_trig_in_b0),
+ [22] = PINGROUP(22, NORTH, blsp_uart1, blsp_spi_mosi_a1, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, NORTH, blsp_uart1, blsp_spi_miso_a1, _, _, _, _, _, qdss_tracedata_b, _),
+ [24] = PINGROUP(24, NORTH, blsp_uart1, blsp_i2c1, blsp_spi_cs_n_a1, gcc_plltest, _, _, _, _, _),
+ [25] = PINGROUP(25, NORTH, blsp_uart1, blsp_i2c1, blsp_spi_clk_a1, gcc_plltest, _, _, _, _, _),
+ [26] = PINGROUP(26, EAST, rgb_data0, blsp_uart5, blsp_spi5, adsp_ext, _, _, _, _, _),
+ [27] = PINGROUP(27, EAST, rgb_data1, blsp_uart5, blsp_spi5, prng_rosc, _, _, _, _, _),
+ [28] = PINGROUP(28, EAST, rgb_data2, blsp_uart5, blsp_i2c5, blsp_spi5, gcc_gp1_clk_b, _, _, _, _),
+ [29] = PINGROUP(29, EAST, rgb_data3, blsp_uart5, blsp_i2c5, blsp_spi5, gcc_gp2_clk_b, _, _, _, _),
+ [30] = PINGROUP(30, NORTH, blsp_spi0, blsp_uart0, gcc_gp3_clk_b, _, _, _, _, _, _),
+ [31] = PINGROUP(31, NORTH, blsp_spi0, blsp_uart0, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, NORTH, blsp_spi0, blsp_uart0, blsp_i2c0, _, _, _, _, _, _),
+ [33] = PINGROUP(33, NORTH, blsp_spi0, blsp_uart0, blsp_i2c0, _, _, _, _, _, _),
+ [34] = PINGROUP(34, SOUTH, _, qdss_traceclk_b, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, SOUTH, pcie_clk, _, qdss_tracedata_b, _, _, _, _, _, _),
+ [36] = PINGROUP(36, NORTH, _, _, _, _, _, _, qdss_tracedata_a, _, _),
+ [37] = PINGROUP(37, NORTH, nfc_irq, blsp_spi4, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, NORTH, nfc_dwl, blsp_spi4, audio_ts, _, _, _, _, _, _),
+ [39] = PINGROUP(39, EAST, rgb_data4, spi_lcd, blsp_uart_tx_b2, gcc_gp3_clk_a, qdss_tracedata_a, _, _, _, _),
+ [40] = PINGROUP(40, EAST, rgb_data5, spi_lcd, blsp_uart_rx_b2, _, qdss_tracedata_b, _, _, _, _),
+ [41] = PINGROUP(41, EAST, rgb_data0, blsp_i2c_sda_b2, _, qdss_tracedata_b, _, _, _, _, _),
+ [42] = PINGROUP(42, EAST, rgb_data1, blsp_i2c_scl_b2, _, _, _, _, _, qdss_tracedata_a, _),
+ [43] = PINGROUP(43, EAST, rgb_data2, pwm_led11, _, _, _, _, _, _, _),
+ [44] = PINGROUP(44, EAST, rgb_data3, pwm_led12, blsp_spi5, _, _, _, _, _, _),
+ [45] = PINGROUP(45, EAST, rgb_data4, pwm_led13, blsp_spi5, qdss_tracedata_b, _, _, _, _, _),
+ [46] = PINGROUP(46, EAST, rgb_data5, pwm_led14, blsp_spi5, qdss_tracedata_b, _, wlan1_adc1, _, _, _),
+ [47] = PINGROUP(47, EAST, rgb_data_b0, pwm_led15, blsp_spi_mosi_b1, qdss_tracedata_b, _, wlan1_adc0, _, _, _),
+ [48] = PINGROUP(48, EAST, rgb_data_b1, pwm_led16, blsp_spi_miso_b1, _, qdss_cti_trig_out_b0, _, wlan2_adc1, _, _),
+ [49] = PINGROUP(49, EAST, rgb_data_b2, pwm_led17, blsp_spi_cs_n_b1, _, qdss_tracedata_b, _, wlan2_adc0, _, _),
+ [50] = PINGROUP(50, EAST, rgb_data_b3, pwm_led18, blsp_spi_clk_b1, qdss_tracedata_b, _, _, _, _, _),
+ [51] = PINGROUP(51, EAST, rgb_data_b4, pwm_led19, ext_mclk1_b, qdss_traceclk_a, _, _, _, _, _),
+ [52] = PINGROUP(52, EAST, rgb_data_b5, pwm_led20, atest_char3, i2s_3_sck_b, ldo_update, bimc_dte0, _, _, _),
+ [53] = PINGROUP(53, EAST, rgb_hsync, pwm_led21, i2s_3_ws_b, dbg_out, _, _, _, _, _),
+ [54] = PINGROUP(54, EAST, rgb_vsync, i2s_3_data0_b, ldo_en, bimc_dte0, _, hdmi_dtest, _, _, _),
+ [55] = PINGROUP(55, EAST, rgb_de, i2s_3_data1_b, _, qdss_tracedata_b, _, hdmi_lbk9, _, _, _),
+ [56] = PINGROUP(56, EAST, rgb_clk, atest_char1, i2s_3_data2_b, ebi_cdc, _, hdmi_lbk8, _, _, _),
+ [57] = PINGROUP(57, EAST, rgb_mdp, atest_char0, i2s_3_data3_b, _, hdmi_lbk7, _, _, _, _),
+ [58] = PINGROUP(58, EAST, rgb_data_b6, _, ebi_cdc, _, _, _, _, _, _),
+ [59] = PINGROUP(59, EAST, rgb_data_b7, _, hdmi_lbk6, _, _, _, _, _, _),
+ [60] = PINGROUP(60, NORTH, _, _, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, NORTH, rgmii_int, cri_trng1, qdss_tracedata_b, _, _, _, _, _, _),
+ [62] = PINGROUP(62, NORTH, rgmii_wol, cri_trng0, qdss_tracedata_b, gcc_tlmm, _, _, _, _, _),
+ [63] = PINGROUP(63, NORTH, rgmii_ck, _, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, NORTH, rgmii_tx, _, hdmi_lbk5, _, _, _, _, _, _),
+ [65] = PINGROUP(65, NORTH, rgmii_tx, _, hdmi_pixel, _, _, _, _, _, _),
+ [66] = PINGROUP(66, NORTH, rgmii_tx, _, hdmi_rcv, _, _, _, _, _, _),
+ [67] = PINGROUP(67, NORTH, rgmii_tx, _, hdmi_lbk4, _, _, _, _, _, _),
+ [68] = PINGROUP(68, NORTH, rgmii_ctl, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, NORTH, rgmii_ck, ext_lpass, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, NORTH, rgmii_rx, cri_trng, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, NORTH, rgmii_rx, _, hdmi_lbk3, _, _, _, _, _, _),
+ [72] = PINGROUP(72, NORTH, rgmii_rx, _, hdmi_lbk2, _, _, _, _, _, _),
+ [73] = PINGROUP(73, NORTH, rgmii_rx, _, _, _, _, qdss_cti_trig_out_b1, _, _, _),
+ [74] = PINGROUP(74, NORTH, rgmii_ctl, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, NORTH, rgmii_mdio, _, hdmi_lbk1, _, _, _, _, _, _),
+ [76] = PINGROUP(76, NORTH, rgmii_mdc, _, _, _, _, _, hdmi_lbk0, _, _),
+ [77] = PINGROUP(77, NORTH, ir_in, wsa_en, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, EAST, rgb_data6, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, EAST, rgb_data7, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, EAST, rgb_data6, atest_char2, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, EAST, rgb_data7, ebi_ch0, _, _, _, _, _, _, _),
+ [82] = PINGROUP(82, NORTH, blsp_uart3, blsp_spi3, sd_write, _, _, _, _, _, qdss_tracedata_a),
+ [83] = PINGROUP(83, NORTH, blsp_uart3, blsp_spi3, _, _, _, _, qdss_tracedata_a, _, _),
+ [84] = PINGROUP(84, NORTH, blsp_uart3, blsp_i2c3, blsp_spi3, gcc_gp1_clk_a, qdss_cti_trig_in_b1, _, _, _, _),
+ [85] = PINGROUP(85, NORTH, blsp_uart3, blsp_i2c3, blsp_spi3, gcc_gp2_clk_a, qdss_tracedata_b, _, _, _, _),
+ [86] = PINGROUP(86, EAST, ext_mclk0, mclk_in1, _, _, _, _, _, _, _),
+ [87] = PINGROUP(87, EAST, i2s_1, dsd_clk_a, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, EAST, i2s_1, i2s_1, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, EAST, i2s_1, i2s_1, _, _, _, _, _, _, qdss_tracedata_b),
+ [90] = PINGROUP(90, EAST, i2s_1, i2s_1, _, _, _, _, _, _, _),
+ [91] = PINGROUP(91, EAST, i2s_1, i2s_1, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, EAST, i2s_1, i2s_1, _, _, _, _, _, qdss_cti_trig_in_a1, _),
+ [93] = PINGROUP(93, EAST, i2s_1, pwm_led22, i2s_1, _, _, _, _, _, qdss_tracedata_b),
+ [94] = PINGROUP(94, EAST, i2s_1, pwm_led23, i2s_1, _, qdss_cti_trig_out_a0, _, rgmi_dll2, _, _),
+ [95] = PINGROUP(95, EAST, i2s_1, pwm_led1, i2s_1, _, qdss_cti_trig_out_a1, _, _, _, _),
+ [96] = PINGROUP(96, EAST, i2s_1, pwm_led2, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, EAST, i2s_2, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, EAST, i2s_2, _, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, EAST, i2s_2, _, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, EAST, i2s_2, pll_bist, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, EAST, i2s_2, _, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, EAST, i2s_2, _, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, EAST, ext_mclk1_a, mclk_in2, bimc_dte1, _, _, _, _, _, _),
+ [104] = PINGROUP(104, EAST, i2s_3_sck_a, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, EAST, i2s_3_ws_a, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, EAST, i2s_3_data0_a, ebi2_lcd, _, _, ebi_cdc, _, _, _, _),
+ [107] = PINGROUP(107, EAST, i2s_3_data1_a, ebi2_lcd, _, _, ebi_cdc, _, _, _, _),
+ [108] = PINGROUP(108, EAST, i2s_3_data2_a, ebi2_lcd, atest_char, pwm_led3, ebi_cdc, _, _, _, _),
+ [109] = PINGROUP(109, EAST, i2s_3_data3_a, ebi2_lcd, pwm_led4, bimc_dte1, _, _, _, _, _),
+ [110] = PINGROUP(110, EAST, i2s_4, ebi2_a, dsd_clk_b, pwm_led5, _, _, _, _, _),
+ [111] = PINGROUP(111, EAST, i2s_4, i2s_4, pwm_led6, ebi_cdc, _, _, _, _, _),
+ [112] = PINGROUP(112, EAST, i2s_4, i2s_4, pwm_led7, _, _, _, _, _, _),
+ [113] = PINGROUP(113, EAST, i2s_4, i2s_4, pwm_led8, _, _, _, _, _, _),
+ [114] = PINGROUP(114, EAST, i2s_4, i2s_4, pwm_led24, _, _, _, _, _, _),
+ [115] = PINGROUP(115, EAST, i2s_4, i2s_4, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, EAST, i2s_4, spkr_dac0, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, NORTH, blsp_i2c4, blsp_spi4, pwm_led9, _, _, _, _, _, _),
+ [118] = PINGROUP(118, NORTH, blsp_i2c4, blsp_spi4, pwm_led10, _, _, _, _, _, _),
+ [119] = PINGROUP(119, EAST, spdifrx_opt, _, _, _, _, _, _, _, _),
+ [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xc2000, 15, 0),
+ [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0xc2000, 13, 6),
+ [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0xc2000, 11, 3),
+ [123] = SDC_QDSD_PINGROUP(sdc1_data, 0xc2000, 9, 0),
+ [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0xc3000, 14, 6),
+ [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xc3000, 11, 3),
+ [126] = SDC_QDSD_PINGROUP(sdc2_data, 0xc3000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data qcs404_pinctrl = {
+ .pins = qcs404_pins,
+ .npins = ARRAY_SIZE(qcs404_pins),
+ .functions = qcs404_functions,
+ .nfunctions = ARRAY_SIZE(qcs404_functions),
+ .groups = qcs404_groups,
+ .ngroups = ARRAY_SIZE(qcs404_groups),
+ .ngpios = 120,
+ .tiles = qcs404_tiles,
+ .ntiles = ARRAY_SIZE(qcs404_tiles),
+};
+
+static int qcs404_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &qcs404_pinctrl);
+}
+
+static const struct of_device_id qcs404_pinctrl_of_match[] = {
+ { .compatible = "qcom,qcs404-pinctrl", },
+ { },
+};
+
+static struct platform_driver qcs404_pinctrl_driver = {
+ .driver = {
+ .name = "qcs404-pinctrl",
+ .of_match_table = qcs404_pinctrl_of_match,
+ },
+ .probe = qcs404_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init qcs404_pinctrl_init(void)
+{
+ return platform_driver_register(&qcs404_pinctrl_driver);
+}
+arch_initcall(qcs404_pinctrl_init);
+
+static void __exit qcs404_pinctrl_exit(void)
+{
+ platform_driver_unregister(&qcs404_pinctrl_driver);
+}
+module_exit(qcs404_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm QCS404 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, qcs404_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c
new file mode 100644
index 000000000000..6838b38555a1
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c
@@ -0,0 +1,1455 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018, Craig Tatlor.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+static const char * const sdm660_tiles[] = {
+ "north",
+ "center",
+ "south"
+};
+
+enum {
+ NORTH,
+ CENTER,
+ SOUTH
+};
+
+#define REG_SIZE 0x1000
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = base + REG_SIZE * id, \
+ .io_reg = base + 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = base + 0x8 + REG_SIZE * id, \
+ .intr_status_reg = base + 0xc + REG_SIZE * id, \
+ .intr_target_reg = base + 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sdm660_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "SDC1_CLK"),
+ PINCTRL_PIN(115, "SDC1_CMD"),
+ PINCTRL_PIN(116, "SDC1_DATA"),
+ PINCTRL_PIN(117, "SDC2_CLK"),
+ PINCTRL_PIN(118, "SDC2_CMD"),
+ PINCTRL_PIN(119, "SDC2_DATA"),
+ PINCTRL_PIN(120, "SDC1_RCLK"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+
+static const unsigned int sdc1_clk_pins[] = { 114 };
+static const unsigned int sdc1_cmd_pins[] = { 115 };
+static const unsigned int sdc1_data_pins[] = { 116 };
+static const unsigned int sdc1_rclk_pins[] = { 120 };
+static const unsigned int sdc2_clk_pins[] = { 117 };
+static const unsigned int sdc2_cmd_pins[] = { 118 };
+static const unsigned int sdc2_data_pins[] = { 119 };
+
+enum sdm660_functions {
+ msm_mux_adsp_ext,
+ msm_mux_agera_pll,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_gpsadc0,
+ msm_mux_atest_gpsadc1,
+ msm_mux_atest_tsens,
+ msm_mux_atest_tsens2,
+ msm_mux_atest_usb1,
+ msm_mux_atest_usb10,
+ msm_mux_atest_usb11,
+ msm_mux_atest_usb12,
+ msm_mux_atest_usb13,
+ msm_mux_atest_usb2,
+ msm_mux_atest_usb20,
+ msm_mux_atest_usb21,
+ msm_mux_atest_usb22,
+ msm_mux_atest_usb23,
+ msm_mux_audio_ref,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_i2c5,
+ msm_mux_blsp_i2c6,
+ msm_mux_blsp_i2c7,
+ msm_mux_blsp_i2c8_a,
+ msm_mux_blsp_i2c8_b,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi3_cs1,
+ msm_mux_blsp_spi3_cs2,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_spi7,
+ msm_mux_blsp_spi8_a,
+ msm_mux_blsp_spi8_b,
+ msm_mux_blsp_spi8_cs1,
+ msm_mux_blsp_spi8_cs2,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uart5,
+ msm_mux_blsp_uart6_a,
+ msm_mux_blsp_uart6_b,
+ msm_mux_blsp_uim1,
+ msm_mux_blsp_uim2,
+ msm_mux_blsp_uim5,
+ msm_mux_blsp_uim6,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gpio,
+ msm_mux_gps_tx_a,
+ msm_mux_gps_tx_b,
+ msm_mux_gps_tx_c,
+ msm_mux_isense_dbg,
+ msm_mux_jitter_bist,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_m_voc,
+ msm_mux_mdp_vsync,
+ msm_mux_mdss_vsync0,
+ msm_mux_mdss_vsync1,
+ msm_mux_mdss_vsync2,
+ msm_mux_mdss_vsync3,
+ msm_mux_mss_lte,
+ msm_mux_nav_pps_a,
+ msm_mux_nav_pps_b,
+ msm_mux_nav_pps_c,
+ msm_mux_pa_indicator,
+ msm_mux_phase_flag0,
+ msm_mux_phase_flag1,
+ msm_mux_phase_flag2,
+ msm_mux_phase_flag3,
+ msm_mux_phase_flag4,
+ msm_mux_phase_flag5,
+ msm_mux_phase_flag6,
+ msm_mux_phase_flag7,
+ msm_mux_phase_flag8,
+ msm_mux_phase_flag9,
+ msm_mux_phase_flag10,
+ msm_mux_phase_flag11,
+ msm_mux_phase_flag12,
+ msm_mux_phase_flag13,
+ msm_mux_phase_flag14,
+ msm_mux_phase_flag15,
+ msm_mux_phase_flag16,
+ msm_mux_phase_flag17,
+ msm_mux_phase_flag18,
+ msm_mux_phase_flag19,
+ msm_mux_phase_flag20,
+ msm_mux_phase_flag21,
+ msm_mux_phase_flag22,
+ msm_mux_phase_flag23,
+ msm_mux_phase_flag24,
+ msm_mux_phase_flag25,
+ msm_mux_phase_flag26,
+ msm_mux_phase_flag27,
+ msm_mux_phase_flag28,
+ msm_mux_phase_flag29,
+ msm_mux_phase_flag30,
+ msm_mux_phase_flag31,
+ msm_mux_pll_bypassnl,
+ msm_mux_pll_reset,
+ msm_mux_pri_mi2s,
+ msm_mux_pri_mi2s_ws,
+ msm_mux_prng_rosc,
+ msm_mux_pwr_crypto,
+ msm_mux_pwr_modem,
+ msm_mux_pwr_nav,
+ msm_mux_qdss_cti0_a,
+ msm_mux_qdss_cti0_b,
+ msm_mux_qdss_cti1_a,
+ msm_mux_qdss_cti1_b,
+ msm_mux_qdss_gpio,
+ msm_mux_qdss_gpio0,
+ msm_mux_qdss_gpio1,
+ msm_mux_qdss_gpio10,
+ msm_mux_qdss_gpio11,
+ msm_mux_qdss_gpio12,
+ msm_mux_qdss_gpio13,
+ msm_mux_qdss_gpio14,
+ msm_mux_qdss_gpio15,
+ msm_mux_qdss_gpio2,
+ msm_mux_qdss_gpio3,
+ msm_mux_qdss_gpio4,
+ msm_mux_qdss_gpio5,
+ msm_mux_qdss_gpio6,
+ msm_mux_qdss_gpio7,
+ msm_mux_qdss_gpio8,
+ msm_mux_qdss_gpio9,
+ msm_mux_qlink_enable,
+ msm_mux_qlink_request,
+ msm_mux_qspi_clk,
+ msm_mux_qspi_cs,
+ msm_mux_qspi_data0,
+ msm_mux_qspi_data1,
+ msm_mux_qspi_data2,
+ msm_mux_qspi_data3,
+ msm_mux_qspi_resetn,
+ msm_mux_sec_mi2s,
+ msm_mux_sndwire_clk,
+ msm_mux_sndwire_data,
+ msm_mux_sp_cmu,
+ msm_mux_ssc_irq,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_uim_batt,
+ msm_mux_vfr_1,
+ msm_mux_vsense_clkout,
+ msm_mux_vsense_data0,
+ msm_mux_vsense_data1,
+ msm_mux_vsense_mode,
+ msm_mux_wlan1_adc0,
+ msm_mux_wlan1_adc1,
+ msm_mux_wlan2_adc0,
+ msm_mux_wlan2_adc1,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113",
+};
+
+static const char * const adsp_ext_groups[] = {
+ "gpio65",
+};
+static const char * const agera_pll_groups[] = {
+ "gpio34", "gpio36",
+};
+static const char * const atest_char0_groups[] = {
+ "gpio62",
+};
+static const char * const atest_char1_groups[] = {
+ "gpio61",
+};
+static const char * const atest_char2_groups[] = {
+ "gpio60",
+};
+static const char * const atest_char3_groups[] = {
+ "gpio59",
+};
+static const char * const atest_char_groups[] = {
+ "gpio58",
+};
+static const char * const atest_gpsadc0_groups[] = {
+ "gpio1",
+};
+static const char * const atest_gpsadc1_groups[] = {
+ "gpio0",
+};
+static const char * const atest_tsens2_groups[] = {
+ "gpio3",
+};
+static const char * const atest_tsens_groups[] = {
+ "gpio36",
+};
+static const char * const atest_usb10_groups[] = {
+ "gpio11",
+};
+static const char * const atest_usb11_groups[] = {
+ "gpio10",
+};
+static const char * const atest_usb12_groups[] = {
+ "gpio9",
+};
+static const char * const atest_usb13_groups[] = {
+ "gpio8",
+};
+static const char * const atest_usb1_groups[] = {
+ "gpio3",
+};
+static const char * const atest_usb20_groups[] = {
+ "gpio56",
+};
+static const char * const atest_usb21_groups[] = {
+ "gpio36",
+};
+static const char * const atest_usb22_groups[] = {
+ "gpio57",
+};
+static const char * const atest_usb23_groups[] = {
+ "gpio37",
+};
+static const char * const atest_usb2_groups[] = {
+ "gpio35",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio62",
+};
+static const char * const bimc_dte0_groups[] = {
+ "gpio9", "gpio11",
+};
+static const char * const bimc_dte1_groups[] = {
+ "gpio8", "gpio10",
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3",
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+static const char * const blsp_i2c4_groups[] = {
+ "gpio14", "gpio15",
+};
+static const char * const blsp_i2c5_groups[] = {
+ "gpio18", "gpio19",
+};
+static const char * const blsp_i2c6_groups[] = {
+ "gpio22", "gpio23",
+};
+static const char * const blsp_i2c7_groups[] = {
+ "gpio26", "gpio27",
+};
+static const char * const blsp_i2c8_a_groups[] = {
+ "gpio30", "gpio31",
+};
+static const char * const blsp_i2c8_b_groups[] = {
+ "gpio44", "gpio52",
+};
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio46",
+};
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_spi3_cs1_groups[] = {
+ "gpio30",
+};
+static const char * const blsp_spi3_cs2_groups[] = {
+ "gpio65",
+};
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio49", "gpio52", "gpio22", "gpio23",
+};
+static const char * const blsp_spi7_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27",
+};
+static const char * const blsp_spi8_a_groups[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31",
+};
+static const char * const blsp_spi8_b_groups[] = {
+ "gpio40", "gpio41", "gpio44", "gpio52",
+};
+static const char * const blsp_spi8_cs1_groups[] = {
+ "gpio64",
+};
+static const char * const blsp_spi8_cs2_groups[] = {
+ "gpio76",
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uart5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const blsp_uart6_a_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27",
+};
+static const char * const blsp_uart6_b_groups[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31",
+};
+static const char * const blsp_uim1_groups[] = {
+ "gpio0", "gpio1",
+};
+static const char * const blsp_uim2_groups[] = {
+ "gpio4", "gpio5",
+};
+static const char * const blsp_uim5_groups[] = {
+ "gpio16", "gpio17",
+};
+static const char * const blsp_uim6_groups[] = {
+ "gpio20", "gpio21",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+static const char * const cci_async_groups[] = {
+ "gpio45",
+};
+static const char * const cci_i2c_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+static const char * const cri_trng0_groups[] = {
+ "gpio60",
+};
+static const char * const cri_trng1_groups[] = {
+ "gpio61",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio62",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio11",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio3", "gpio8", "gpio9", "gpio10",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio57", "gpio78",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio58", "gpio81",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio59", "gpio82",
+};
+static const char * const gps_tx_a_groups[] = {
+ "gpio65",
+};
+static const char * const gps_tx_b_groups[] = {
+ "gpio98",
+};
+static const char * const gps_tx_c_groups[] = {
+ "gpio80",
+};
+static const char * const isense_dbg_groups[] = {
+ "gpio68",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio35",
+};
+static const char * const ldo_en_groups[] = {
+ "gpio97",
+};
+static const char * const ldo_update_groups[] = {
+ "gpio98",
+};
+static const char * const m_voc_groups[] = {
+ "gpio28",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio59", "gpio74",
+};
+static const char * const mdss_vsync0_groups[] = {
+ "gpio42",
+};
+static const char * const mdss_vsync1_groups[] = {
+ "gpio42",
+};
+static const char * const mdss_vsync2_groups[] = {
+ "gpio42",
+};
+static const char * const mdss_vsync3_groups[] = {
+ "gpio42",
+};
+static const char * const mss_lte_groups[] = {
+ "gpio81", "gpio82",
+};
+static const char * const nav_pps_a_groups[] = {
+ "gpio65",
+};
+static const char * const nav_pps_b_groups[] = {
+ "gpio98",
+};
+static const char * const nav_pps_c_groups[] = {
+ "gpio80",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio92",
+};
+static const char * const phase_flag0_groups[] = {
+ "gpio68",
+};
+static const char * const phase_flag1_groups[] = {
+ "gpio48",
+};
+static const char * const phase_flag2_groups[] = {
+ "gpio49",
+};
+static const char * const phase_flag3_groups[] = {
+ "gpio4",
+};
+static const char * const phase_flag4_groups[] = {
+ "gpio57",
+};
+static const char * const phase_flag5_groups[] = {
+ "gpio17",
+};
+static const char * const phase_flag6_groups[] = {
+ "gpio53",
+};
+static const char * const phase_flag7_groups[] = {
+ "gpio69",
+};
+static const char * const phase_flag8_groups[] = {
+ "gpio70",
+};
+static const char * const phase_flag9_groups[] = {
+ "gpio50",
+};
+static const char * const phase_flag10_groups[] = {
+ "gpio56",
+};
+static const char * const phase_flag11_groups[] = {
+ "gpio21",
+};
+static const char * const phase_flag12_groups[] = {
+ "gpio22",
+};
+static const char * const phase_flag13_groups[] = {
+ "gpio23",
+};
+static const char * const phase_flag14_groups[] = {
+ "gpio5",
+};
+static const char * const phase_flag15_groups[] = {
+ "gpio51",
+};
+static const char * const phase_flag16_groups[] = {
+ "gpio52",
+};
+static const char * const phase_flag17_groups[] = {
+ "gpio24",
+};
+static const char * const phase_flag18_groups[] = {
+ "gpio25",
+};
+static const char * const phase_flag19_groups[] = {
+ "gpio26",
+};
+static const char * const phase_flag20_groups[] = {
+ "gpio27",
+};
+static const char * const phase_flag21_groups[] = {
+ "gpio28",
+};
+static const char * const phase_flag22_groups[] = {
+ "gpio29",
+};
+static const char * const phase_flag23_groups[] = {
+ "gpio30",
+};
+static const char * const phase_flag24_groups[] = {
+ "gpio31",
+};
+static const char * const phase_flag25_groups[] = {
+ "gpio55",
+};
+static const char * const phase_flag26_groups[] = {
+ "gpio12",
+};
+static const char * const phase_flag27_groups[] = {
+ "gpio13",
+};
+static const char * const phase_flag28_groups[] = {
+ "gpio14",
+};
+static const char * const phase_flag29_groups[] = {
+ "gpio54",
+};
+static const char * const phase_flag30_groups[] = {
+ "gpio47",
+};
+static const char * const phase_flag31_groups[] = {
+ "gpio6",
+};
+static const char * const pll_bypassnl_groups[] = {
+ "gpio36",
+};
+static const char * const pll_reset_groups[] = {
+ "gpio37",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio12", "gpio14", "gpio15", "gpio61",
+};
+static const char * const pri_mi2s_ws_groups[] = {
+ "gpio13",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio102",
+};
+static const char * const pwr_crypto_groups[] = {
+ "gpio33",
+};
+static const char * const pwr_modem_groups[] = {
+ "gpio31",
+};
+static const char * const pwr_nav_groups[] = {
+ "gpio32",
+};
+static const char * const qdss_cti0_a_groups[] = {
+ "gpio49", "gpio50",
+};
+static const char * const qdss_cti0_b_groups[] = {
+ "gpio13", "gpio21",
+};
+static const char * const qdss_cti1_a_groups[] = {
+ "gpio53", "gpio55",
+};
+static const char * const qdss_cti1_b_groups[] = {
+ "gpio12", "gpio66",
+};
+static const char * const qdss_gpio0_groups[] = {
+ "gpio32", "gpio67",
+};
+static const char * const qdss_gpio10_groups[] = {
+ "gpio43", "gpio77",
+};
+static const char * const qdss_gpio11_groups[] = {
+ "gpio44", "gpio79",
+};
+static const char * const qdss_gpio12_groups[] = {
+ "gpio45", "gpio80",
+};
+static const char * const qdss_gpio13_groups[] = {
+ "gpio46", "gpio78",
+};
+static const char * const qdss_gpio14_groups[] = {
+ "gpio47", "gpio72",
+};
+static const char * const qdss_gpio15_groups[] = {
+ "gpio48", "gpio73",
+};
+static const char * const qdss_gpio1_groups[] = {
+ "gpio33", "gpio63",
+};
+static const char * const qdss_gpio2_groups[] = {
+ "gpio34", "gpio64",
+};
+static const char * const qdss_gpio3_groups[] = {
+ "gpio35", "gpio56",
+};
+static const char * const qdss_gpio4_groups[] = {
+ "gpio0", "gpio36",
+};
+static const char * const qdss_gpio5_groups[] = {
+ "gpio1", "gpio37",
+};
+static const char * const qdss_gpio6_groups[] = {
+ "gpio38", "gpio70",
+};
+static const char * const qdss_gpio7_groups[] = {
+ "gpio39", "gpio71",
+};
+static const char * const qdss_gpio8_groups[] = {
+ "gpio51", "gpio75",
+};
+static const char * const qdss_gpio9_groups[] = {
+ "gpio42", "gpio76",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio31", "gpio52", "gpio68", "gpio69",
+};
+static const char * const qlink_enable_groups[] = {
+ "gpio100",
+};
+static const char * const qlink_request_groups[] = {
+ "gpio99",
+};
+static const char * const qspi_clk_groups[] = {
+ "gpio47",
+};
+static const char * const qspi_cs_groups[] = {
+ "gpio43", "gpio50",
+};
+static const char * const qspi_data0_groups[] = {
+ "gpio33",
+};
+static const char * const qspi_data1_groups[] = {
+ "gpio34",
+};
+static const char * const qspi_data2_groups[] = {
+ "gpio35",
+};
+static const char * const qspi_data3_groups[] = {
+ "gpio51",
+};
+static const char * const qspi_resetn_groups[] = {
+ "gpio48",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27", "gpio62",
+};
+static const char * const sndwire_clk_groups[] = {
+ "gpio24",
+};
+static const char * const sndwire_data_groups[] = {
+ "gpio25",
+};
+static const char * const sp_cmu_groups[] = {
+ "gpio64",
+};
+static const char * const ssc_irq_groups[] = {
+ "gpio67", "gpio68", "gpio69", "gpio70", "gpio71", "gpio72", "gpio74",
+ "gpio75", "gpio76",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio0",
+};
+static const char * const tgu_ch1_groups[] = {
+ "gpio1",
+};
+static const char * const tsense_pwm1_groups[] = {
+ "gpio71",
+};
+static const char * const tsense_pwm2_groups[] = {
+ "gpio71",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio88",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio87",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio90",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio89",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio84",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio83",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio86",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio85",
+};
+static const char * const uim_batt_groups[] = {
+ "gpio91",
+};
+static const char * const vfr_1_groups[] = {
+ "gpio27",
+};
+static const char * const vsense_clkout_groups[] = {
+ "gpio24",
+};
+static const char * const vsense_data0_groups[] = {
+ "gpio21",
+};
+static const char * const vsense_data1_groups[] = {
+ "gpio22",
+};
+static const char * const vsense_mode_groups[] = {
+ "gpio23",
+};
+static const char * const wlan1_adc0_groups[] = {
+ "gpio9",
+};
+static const char * const wlan1_adc1_groups[] = {
+ "gpio8",
+};
+static const char * const wlan2_adc0_groups[] = {
+ "gpio11",
+};
+static const char * const wlan2_adc1_groups[] = {
+ "gpio10",
+};
+
+static const struct msm_function sdm660_functions[] = {
+ FUNCTION(adsp_ext),
+ FUNCTION(agera_pll),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_gpsadc0),
+ FUNCTION(atest_gpsadc1),
+ FUNCTION(atest_tsens),
+ FUNCTION(atest_tsens2),
+ FUNCTION(atest_usb1),
+ FUNCTION(atest_usb10),
+ FUNCTION(atest_usb11),
+ FUNCTION(atest_usb12),
+ FUNCTION(atest_usb13),
+ FUNCTION(atest_usb2),
+ FUNCTION(atest_usb20),
+ FUNCTION(atest_usb21),
+ FUNCTION(atest_usb22),
+ FUNCTION(atest_usb23),
+ FUNCTION(audio_ref),
+ FUNCTION(bimc_dte0),
+ FUNCTION(bimc_dte1),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(blsp_i2c7),
+ FUNCTION(blsp_i2c8_a),
+ FUNCTION(blsp_i2c8_b),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi3_cs1),
+ FUNCTION(blsp_spi3_cs2),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_spi7),
+ FUNCTION(blsp_spi8_a),
+ FUNCTION(blsp_spi8_b),
+ FUNCTION(blsp_spi8_cs1),
+ FUNCTION(blsp_spi8_cs2),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uart5),
+ FUNCTION(blsp_uart6_a),
+ FUNCTION(blsp_uart6_b),
+ FUNCTION(blsp_uim1),
+ FUNCTION(blsp_uim2),
+ FUNCTION(blsp_uim5),
+ FUNCTION(blsp_uim6),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gpio),
+ FUNCTION(gps_tx_a),
+ FUNCTION(gps_tx_b),
+ FUNCTION(gps_tx_c),
+ FUNCTION(isense_dbg),
+ FUNCTION(jitter_bist),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(m_voc),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdss_vsync0),
+ FUNCTION(mdss_vsync1),
+ FUNCTION(mdss_vsync2),
+ FUNCTION(mdss_vsync3),
+ FUNCTION(mss_lte),
+ FUNCTION(nav_pps_a),
+ FUNCTION(nav_pps_b),
+ FUNCTION(nav_pps_c),
+ FUNCTION(pa_indicator),
+ FUNCTION(phase_flag0),
+ FUNCTION(phase_flag1),
+ FUNCTION(phase_flag2),
+ FUNCTION(phase_flag3),
+ FUNCTION(phase_flag4),
+ FUNCTION(phase_flag5),
+ FUNCTION(phase_flag6),
+ FUNCTION(phase_flag7),
+ FUNCTION(phase_flag8),
+ FUNCTION(phase_flag9),
+ FUNCTION(phase_flag10),
+ FUNCTION(phase_flag11),
+ FUNCTION(phase_flag12),
+ FUNCTION(phase_flag13),
+ FUNCTION(phase_flag14),
+ FUNCTION(phase_flag15),
+ FUNCTION(phase_flag16),
+ FUNCTION(phase_flag17),
+ FUNCTION(phase_flag18),
+ FUNCTION(phase_flag19),
+ FUNCTION(phase_flag20),
+ FUNCTION(phase_flag21),
+ FUNCTION(phase_flag22),
+ FUNCTION(phase_flag23),
+ FUNCTION(phase_flag24),
+ FUNCTION(phase_flag25),
+ FUNCTION(phase_flag26),
+ FUNCTION(phase_flag27),
+ FUNCTION(phase_flag28),
+ FUNCTION(phase_flag29),
+ FUNCTION(phase_flag30),
+ FUNCTION(phase_flag31),
+ FUNCTION(pll_bypassnl),
+ FUNCTION(pll_reset),
+ FUNCTION(pri_mi2s),
+ FUNCTION(pri_mi2s_ws),
+ FUNCTION(prng_rosc),
+ FUNCTION(pwr_crypto),
+ FUNCTION(pwr_modem),
+ FUNCTION(pwr_nav),
+ FUNCTION(qdss_cti0_a),
+ FUNCTION(qdss_cti0_b),
+ FUNCTION(qdss_cti1_a),
+ FUNCTION(qdss_cti1_b),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qdss_gpio0),
+ FUNCTION(qdss_gpio1),
+ FUNCTION(qdss_gpio10),
+ FUNCTION(qdss_gpio11),
+ FUNCTION(qdss_gpio12),
+ FUNCTION(qdss_gpio13),
+ FUNCTION(qdss_gpio14),
+ FUNCTION(qdss_gpio15),
+ FUNCTION(qdss_gpio2),
+ FUNCTION(qdss_gpio3),
+ FUNCTION(qdss_gpio4),
+ FUNCTION(qdss_gpio5),
+ FUNCTION(qdss_gpio6),
+ FUNCTION(qdss_gpio7),
+ FUNCTION(qdss_gpio8),
+ FUNCTION(qdss_gpio9),
+ FUNCTION(qlink_enable),
+ FUNCTION(qlink_request),
+ FUNCTION(qspi_clk),
+ FUNCTION(qspi_cs),
+ FUNCTION(qspi_data0),
+ FUNCTION(qspi_data1),
+ FUNCTION(qspi_data2),
+ FUNCTION(qspi_data3),
+ FUNCTION(qspi_resetn),
+ FUNCTION(sec_mi2s),
+ FUNCTION(sndwire_clk),
+ FUNCTION(sndwire_data),
+ FUNCTION(sp_cmu),
+ FUNCTION(ssc_irq),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim_batt),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_clkout),
+ FUNCTION(vsense_data0),
+ FUNCTION(vsense_data1),
+ FUNCTION(vsense_mode),
+ FUNCTION(wlan1_adc0),
+ FUNCTION(wlan1_adc1),
+ FUNCTION(wlan2_adc0),
+ FUNCTION(wlan2_adc1),
+};
+
+static const struct msm_pingroup sdm660_groups[] = {
+ PINGROUP(0, SOUTH, blsp_spi1, blsp_uart1, blsp_uim1, tgu_ch0, _, _, qdss_gpio4, atest_gpsadc1, _),
+ PINGROUP(1, SOUTH, blsp_spi1, blsp_uart1, blsp_uim1, tgu_ch1, _, _, qdss_gpio5, atest_gpsadc0, _),
+ PINGROUP(2, SOUTH, blsp_spi1, blsp_uart1, blsp_i2c1, _, _, _, _, _, _),
+ PINGROUP(3, SOUTH, blsp_spi1, blsp_uart1, blsp_i2c1, ddr_bist, _, _, atest_tsens2, atest_usb1, _),
+ PINGROUP(4, NORTH, blsp_spi2, blsp_uim2, blsp_uart2, phase_flag3, _, _, _, _, _),
+ PINGROUP(5, SOUTH, blsp_spi2, blsp_uim2, blsp_uart2, phase_flag14, _, _, _, _, _),
+ PINGROUP(6, SOUTH, blsp_spi2, blsp_i2c2, blsp_uart2, phase_flag31, _, _, _, _, _),
+ PINGROUP(7, SOUTH, blsp_spi2, blsp_i2c2, blsp_uart2, _, _, _, _, _, _),
+ PINGROUP(8, NORTH, blsp_spi3, ddr_bist, _, _, _, wlan1_adc1, atest_usb13, bimc_dte1, _),
+ PINGROUP(9, NORTH, blsp_spi3, ddr_bist, _, _, _, wlan1_adc0, atest_usb12, bimc_dte0, _),
+ PINGROUP(10, NORTH, blsp_spi3, blsp_i2c3, ddr_bist, _, _, wlan2_adc1, atest_usb11, bimc_dte1, _),
+ PINGROUP(11, NORTH, blsp_spi3, blsp_i2c3, _, dbg_out, wlan2_adc0, atest_usb10, bimc_dte0, _, _),
+ PINGROUP(12, NORTH, blsp_spi4, pri_mi2s, _, phase_flag26, qdss_cti1_b, _, _, _, _),
+ PINGROUP(13, NORTH, blsp_spi4, _, pri_mi2s_ws, _, _, phase_flag27, qdss_cti0_b, _, _),
+ PINGROUP(14, NORTH, blsp_spi4, blsp_i2c4, pri_mi2s, _, phase_flag28, _, _, _, _),
+ PINGROUP(15, NORTH, blsp_spi4, blsp_i2c4, pri_mi2s, _, _, _, _, _, _),
+ PINGROUP(16, CENTER, blsp_uart5, blsp_spi5, blsp_uim5, _, _, _, _, _, _),
+ PINGROUP(17, CENTER, blsp_uart5, blsp_spi5, blsp_uim5, _, phase_flag5, _, _, _, _),
+ PINGROUP(18, CENTER, blsp_uart5, blsp_spi5, blsp_i2c5, _, _, _, _, _, _),
+ PINGROUP(19, CENTER, blsp_uart5, blsp_spi5, blsp_i2c5, _, _, _, _, _, _),
+ PINGROUP(20, SOUTH, _, _, blsp_uim6, _, _, _, _, _, _),
+ PINGROUP(21, SOUTH, _, _, blsp_uim6, _, phase_flag11, qdss_cti0_b, vsense_data0, _, _),
+ PINGROUP(22, CENTER, blsp_spi6, _, blsp_i2c6, _, phase_flag12, vsense_data1, _, _, _),
+ PINGROUP(23, CENTER, blsp_spi6, _, blsp_i2c6, _, phase_flag13, vsense_mode, _, _, _),
+ PINGROUP(24, NORTH, blsp_spi7, blsp_uart6_a, sec_mi2s, sndwire_clk, _, _, phase_flag17, vsense_clkout, _),
+ PINGROUP(25, NORTH, blsp_spi7, blsp_uart6_a, sec_mi2s, sndwire_data, _, _, phase_flag18, _, _),
+ PINGROUP(26, NORTH, blsp_spi7, blsp_uart6_a, blsp_i2c7, sec_mi2s, _, phase_flag19, _, _, _),
+ PINGROUP(27, NORTH, blsp_spi7, blsp_uart6_a, blsp_i2c7, vfr_1, sec_mi2s, _, phase_flag20, _, _),
+ PINGROUP(28, CENTER, blsp_spi8_a, blsp_uart6_b, m_voc, _, phase_flag21, _, _, _, _),
+ PINGROUP(29, CENTER, blsp_spi8_a, blsp_uart6_b, _, _, phase_flag22, _, _, _, _),
+ PINGROUP(30, CENTER, blsp_spi8_a, blsp_uart6_b, blsp_i2c8_a, blsp_spi3_cs1, _, phase_flag23, _, _, _),
+ PINGROUP(31, CENTER, blsp_spi8_a, blsp_uart6_b, blsp_i2c8_a, pwr_modem, _, phase_flag24, qdss_gpio, _, _),
+ PINGROUP(32, SOUTH, cam_mclk, pwr_nav, _, _, qdss_gpio0, _, _, _, _),
+ PINGROUP(33, SOUTH, cam_mclk, qspi_data0, pwr_crypto, _, _, qdss_gpio1, _, _, _),
+ PINGROUP(34, SOUTH, cam_mclk, qspi_data1, agera_pll, _, _, qdss_gpio2, _, _, _),
+ PINGROUP(35, SOUTH, cam_mclk, qspi_data2, jitter_bist, _, _, qdss_gpio3, _, atest_usb2, _),
+ PINGROUP(36, SOUTH, cci_i2c, pll_bypassnl, agera_pll, _, _, qdss_gpio4, atest_tsens, atest_usb21, _),
+ PINGROUP(37, SOUTH, cci_i2c, pll_reset, _, _, qdss_gpio5, atest_usb23, _, _, _),
+ PINGROUP(38, SOUTH, cci_i2c, _, _, qdss_gpio6, _, _, _, _, _),
+ PINGROUP(39, SOUTH, cci_i2c, _, _, qdss_gpio7, _, _, _, _, _),
+ PINGROUP(40, SOUTH, _, _, blsp_spi8_b, _, _, _, _, _, _),
+ PINGROUP(41, SOUTH, _, _, blsp_spi8_b, _, _, _, _, _, _),
+ PINGROUP(42, SOUTH, mdss_vsync0, mdss_vsync1, mdss_vsync2, mdss_vsync3, _, _, qdss_gpio9, _, _),
+ PINGROUP(43, SOUTH, _, _, qspi_cs, _, _, qdss_gpio10, _, _, _),
+ PINGROUP(44, SOUTH, _, _, blsp_spi8_b, blsp_i2c8_b, _, _, qdss_gpio11, _, _),
+ PINGROUP(45, SOUTH, cci_async, _, _, qdss_gpio12, _, _, _, _, _),
+ PINGROUP(46, SOUTH, blsp_spi1, _, _, qdss_gpio13, _, _, _, _, _),
+ PINGROUP(47, SOUTH, qspi_clk, _, phase_flag30, qdss_gpio14, _, _, _, _, _),
+ PINGROUP(48, SOUTH, _, phase_flag1, qdss_gpio15, _, _, _, _, _, _),
+ PINGROUP(49, SOUTH, blsp_spi6, phase_flag2, qdss_cti0_a, _, _, _, _, _, _),
+ PINGROUP(50, SOUTH, qspi_cs, _, phase_flag9, qdss_cti0_a, _, _, _, _, _),
+ PINGROUP(51, SOUTH, qspi_data3, _, phase_flag15, qdss_gpio8, _, _, _, _, _),
+ PINGROUP(52, SOUTH, _, blsp_spi8_b, blsp_i2c8_b, blsp_spi6, phase_flag16, qdss_gpio, _, _, _),
+ PINGROUP(53, NORTH, _, phase_flag6, qdss_cti1_a, _, _, _, _, _, _),
+ PINGROUP(54, NORTH, _, _, phase_flag29, _, _, _, _, _, _),
+ PINGROUP(55, SOUTH, _, phase_flag25, qdss_cti1_a, _, _, _, _, _, _),
+ PINGROUP(56, SOUTH, _, phase_flag10, qdss_gpio3, _, atest_usb20, _, _, _, _),
+ PINGROUP(57, SOUTH, gcc_gp1, _, phase_flag4, atest_usb22, _, _, _, _, _),
+ PINGROUP(58, SOUTH, _, gcc_gp2, _, _, atest_char, _, _, _, _),
+ PINGROUP(59, NORTH, mdp_vsync, gcc_gp3, _, _, atest_char3, _, _, _, _),
+ PINGROUP(60, NORTH, cri_trng0, _, _, atest_char2, _, _, _, _, _),
+ PINGROUP(61, NORTH, pri_mi2s, cri_trng1, _, _, atest_char1, _, _, _, _),
+ PINGROUP(62, NORTH, sec_mi2s, audio_ref, _, cri_trng, _, _, atest_char0, _, _),
+ PINGROUP(63, NORTH, _, _, _, qdss_gpio1, _, _, _, _, _),
+ PINGROUP(64, SOUTH, blsp_spi8_cs1, sp_cmu, _, _, qdss_gpio2, _, _, _, _),
+ PINGROUP(65, SOUTH, _, nav_pps_a, nav_pps_a, gps_tx_a, blsp_spi3_cs2, adsp_ext, _, _, _),
+ PINGROUP(66, NORTH, _, _, qdss_cti1_b, _, _, _, _, _, _),
+ PINGROUP(67, NORTH, _, _, qdss_gpio0, _, _, _, _, _, _),
+ PINGROUP(68, NORTH, isense_dbg, _, phase_flag0, qdss_gpio, _, _, _, _, _),
+ PINGROUP(69, NORTH, _, phase_flag7, qdss_gpio, _, _, _, _, _, _),
+ PINGROUP(70, NORTH, _, phase_flag8, qdss_gpio6, _, _, _, _, _, _),
+ PINGROUP(71, NORTH, _, _, qdss_gpio7, tsense_pwm1, tsense_pwm2, _, _, _, _),
+ PINGROUP(72, NORTH, _, qdss_gpio14, _, _, _, _, _, _, _),
+ PINGROUP(73, NORTH, _, _, qdss_gpio15, _, _, _, _, _, _),
+ PINGROUP(74, NORTH, mdp_vsync, _, _, _, _, _, _, _, _),
+ PINGROUP(75, NORTH, _, _, qdss_gpio8, _, _, _, _, _, _),
+ PINGROUP(76, NORTH, blsp_spi8_cs2, _, _, _, qdss_gpio9, _, _, _, _),
+ PINGROUP(77, NORTH, _, _, qdss_gpio10, _, _, _, _, _, _),
+ PINGROUP(78, NORTH, gcc_gp1, _, qdss_gpio13, _, _, _, _, _, _),
+ PINGROUP(79, SOUTH, _, _, qdss_gpio11, _, _, _, _, _, _),
+ PINGROUP(80, SOUTH, nav_pps_b, nav_pps_b, gps_tx_c, _, _, qdss_gpio12, _, _, _),
+ PINGROUP(81, CENTER, mss_lte, gcc_gp2, _, _, _, _, _, _, _),
+ PINGROUP(82, CENTER, mss_lte, gcc_gp3, _, _, _, _, _, _, _),
+ PINGROUP(83, SOUTH, uim2_data, _, _, _, _, _, _, _, _),
+ PINGROUP(84, SOUTH, uim2_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(85, SOUTH, uim2_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(86, SOUTH, uim2_present, _, _, _, _, _, _, _, _),
+ PINGROUP(87, SOUTH, uim1_data, _, _, _, _, _, _, _, _),
+ PINGROUP(88, SOUTH, uim1_clk, _, _, _, _, _, _, _, _),
+ PINGROUP(89, SOUTH, uim1_reset, _, _, _, _, _, _, _, _),
+ PINGROUP(90, SOUTH, uim1_present, _, _, _, _, _, _, _, _),
+ PINGROUP(91, SOUTH, uim_batt, _, _, _, _, _, _, _, _),
+ PINGROUP(92, SOUTH, _, _, pa_indicator, _, _, _, _, _, _),
+ PINGROUP(93, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(94, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(95, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(96, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(97, SOUTH, _, ldo_en, _, _, _, _, _, _, _),
+ PINGROUP(98, SOUTH, _, nav_pps_c, nav_pps_c, gps_tx_b, ldo_update, _, _, _, _),
+ PINGROUP(99, SOUTH, qlink_request, _, _, _, _, _, _, _, _),
+ PINGROUP(100, SOUTH, qlink_enable, _, _, _, _, _, _, _, _),
+ PINGROUP(101, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(102, SOUTH, _, prng_rosc, _, _, _, _, _, _, _),
+ PINGROUP(103, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(104, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(105, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(106, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(107, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(108, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(109, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(110, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(111, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(112, SOUTH, _, _, _, _, _, _, _, _, _),
+ PINGROUP(113, SOUTH, _, _, _, _, _, _, _, _, _),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x99a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x99a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x99a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x99b000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x99b000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x99b000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc1_rclk, 0x99a000, 15, 0),
+};
+
+static const struct msm_pinctrl_soc_data sdm660_pinctrl = {
+ .pins = sdm660_pins,
+ .npins = ARRAY_SIZE(sdm660_pins),
+ .functions = sdm660_functions,
+ .nfunctions = ARRAY_SIZE(sdm660_functions),
+ .groups = sdm660_groups,
+ .ngroups = ARRAY_SIZE(sdm660_groups),
+ .ngpios = 114,
+ .tiles = sdm660_tiles,
+ .ntiles = ARRAY_SIZE(sdm660_tiles),
+};
+
+static int sdm660_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sdm660_pinctrl);
+}
+
+static const struct of_device_id sdm660_pinctrl_of_match[] = {
+ { .compatible = "qcom,sdm660-pinctrl", },
+ { .compatible = "qcom,sdm630-pinctrl", },
+ { },
+};
+
+static struct platform_driver sdm660_pinctrl_driver = {
+ .driver = {
+ .name = "sdm660-pinctrl",
+ .of_match_table = sdm660_pinctrl_of_match,
+ },
+ .probe = sdm660_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sdm660_pinctrl_init(void)
+{
+ return platform_driver_register(&sdm660_pinctrl_driver);
+}
+arch_initcall(sdm660_pinctrl_init);
+
+static void __exit sdm660_pinctrl_exit(void)
+{
+ platform_driver_unregister(&sdm660_pinctrl_driver);
+}
+module_exit(sdm660_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI sdm660 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sdm660_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index cf82db78e69e..a29efbe08f48 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -11,7 +11,7 @@
* GNU General Public License for more details.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 6556dbeae65e..d6ddc47b57ec 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -11,7 +11,7 @@
* GNU General Public License for more details.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
@@ -319,6 +319,8 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
pad->function = function;
ret = pmic_mpp_write_mode_ctl(state, pad);
+ if (ret < 0)
+ return ret;
val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
@@ -343,13 +345,12 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- arg = pad->pullup == PMIC_MPP_PULL_UP_OPEN;
+ if (pad->pullup != PMIC_MPP_PULL_UP_OPEN)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_UP:
switch (pad->pullup) {
- case PMIC_MPP_PULL_UP_OPEN:
- arg = 0;
- break;
case PMIC_MPP_PULL_UP_0P6KOHM:
arg = 600;
break;
@@ -364,13 +365,17 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
}
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
- arg = !pad->is_enabled;
+ if (pad->is_enabled)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_POWER_SOURCE:
arg = pad->power_source;
break;
case PIN_CONFIG_INPUT_ENABLE:
- arg = pad->input_enabled;
+ if (!pad->input_enabled)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_OUTPUT:
arg = pad->out_value;
@@ -382,7 +387,9 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
arg = pad->amux_input;
break;
case PMIC_MPP_CONF_PAIRED:
- arg = pad->paired;
+ if (!pad->paired)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
arg = pad->drive_strength;
@@ -455,7 +462,7 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
pad->dtest = arg;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- arg = pad->drive_strength;
+ pad->drive_strength = arg;
break;
case PMIC_MPP_CONF_AMUX_ROUTE:
if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
@@ -502,6 +509,10 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (ret < 0)
return ret;
+ ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_SINK_CTL, pad->drive_strength);
+ if (ret < 0)
+ return ret;
+
val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val);
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index f53e32a9d8fc..6b30bef829ab 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -20,7 +20,7 @@
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/slab.h>
#include <linux/regmap.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -260,22 +260,32 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- arg = pin->bias == PM8XXX_GPIO_BIAS_NP;
+ if (pin->bias != PM8XXX_GPIO_BIAS_NP)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- arg = pin->bias == PM8XXX_GPIO_BIAS_PD;
+ if (pin->bias != PM8XXX_GPIO_BIAS_PD)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_UP:
- arg = pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30;
+ if (pin->bias > PM8XXX_GPIO_BIAS_PU_1P5_30)
+ return -EINVAL;
+ arg = 1;
break;
case PM8XXX_QCOM_PULL_UP_STRENGTH:
arg = pin->pull_up_strength;
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
- arg = pin->disable;
+ if (!pin->disable)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_INPUT_ENABLE:
- arg = pin->mode == PM8XXX_GPIO_MODE_INPUT;
+ if (pin->mode != PM8XXX_GPIO_MODE_INPUT)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_OUTPUT:
if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT)
@@ -290,10 +300,14 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
arg = pin->output_strength;
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
- arg = !pin->open_drain;
+ if (pin->open_drain)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- arg = pin->open_drain;
+ if (!pin->open_drain)
+ return -EINVAL;
+ arg = 1;
break;
default:
return -EINVAL;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 1e513bd6d0a9..1a7dab150ef6 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -20,7 +20,7 @@
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/slab.h>
#include <linux/regmap.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 698c7d8c9a08..ee6ee2338606 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -20,7 +20,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/irqdomain.h>
#include <linux/of_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index e571bbd7139b..379f34a9a482 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -19,7 +19,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
/**
* enum pincfg_type - possible pin configuration types supported.
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 43d950c16528..e941ba60d4b7 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Renesas SH and SH Mobile PINCTRL drivers
#
@@ -39,6 +40,11 @@ config PINCTRL_PFC_R8A7743
depends on ARCH_R8A7743
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A7744
+ def_bool y
+ depends on ARCH_R8A7744
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7745
def_bool y
depends on ARCH_R8A7745
@@ -49,6 +55,16 @@ config PINCTRL_PFC_R8A77470
depends on ARCH_R8A77470
select PINCTRL_SH_PFC
+config PINCTRL_PFC_R8A774A1
+ def_bool y
+ depends on ARCH_R8A774A1
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A774C0
+ def_bool y
+ depends on ARCH_R8A774C0
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7778
def_bool y
depends on ARCH_R8A7778
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index d0b29c51c159..82ebb2a91ee0 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -5,8 +5,11 @@ obj-$(CONFIG_PINCTRL_PFC_EMEV2) += pfc-emev2.o
obj-$(CONFIG_PINCTRL_PFC_R8A73A4) += pfc-r8a73a4.o
obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
obj-$(CONFIG_PINCTRL_PFC_R8A7743) += pfc-r8a7791.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7744) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7745) += pfc-r8a7794.o
obj-$(CONFIG_PINCTRL_PFC_R8A77470) += pfc-r8a77470.o
+obj-$(CONFIG_PINCTRL_PFC_R8A774A1) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A774C0) += pfc-r8a77990.o
obj-$(CONFIG_PINCTRL_PFC_R8A7778) += pfc-r8a7778.o
obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
obj-$(CONFIG_PINCTRL_PFC_R8A7790) += pfc-r8a7790.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index c671c3c4aca6..a10f7050a74f 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Pin Control and GPIO driver for SuperH Pin Function Controller.
*
@@ -5,10 +6,6 @@
*
* Copyright (C) 2008 Magnus Damm
* Copyright (C) 2009 - 2012 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#define DRV_NAME "sh-pfc"
@@ -497,6 +494,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7743_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7744
+ {
+ .compatible = "renesas,pfc-r8a7744",
+ .data = &r8a7744_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A7745
{
.compatible = "renesas,pfc-r8a7745",
@@ -509,6 +512,18 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a77470_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A774A1
+ {
+ .compatible = "renesas,pfc-r8a774a1",
+ .data = &r8a774a1_pinmux_info,
+ },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A774C0
+ {
+ .compatible = "renesas,pfc-r8a774c0",
+ .data = &r8a774c0_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A7778
{
.compatible = "renesas,pfc-r8a7778",
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index 5af8ee26c03e..b5b1d163e98a 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* SuperH Pin Function Controller support.
*
* Copyright (C) 2012 Renesas Solutions Corp.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#ifndef __SH_PFC_CORE_H__
#define __SH_PFC_CORE_H__
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 6ffdc6beb203..4f3a34ee1cd4 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Pin Function Controller GPIO driver.
*
* Copyright (C) 2008 Magnus Damm
* Copyright (C) 2009 - 2012 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/device.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-emev2.c b/drivers/pinctrl/sh-pfc/pfc-emev2.c
index 1cbbe04d7df6..dc271c3243df 100644
--- a/drivers/pinctrl/sh-pfc/pfc-emev2.c
+++ b/drivers/pinctrl/sh-pfc/pfc-emev2.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Pin Function Controller Support
*
* Copyright (C) 2015 Niklas Söderlund
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
index ff5655dee67e..5acbacb3727f 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012-2013 Renesas Solutions Corp.
* Copyright (C) 2013 Magnus Damm
* Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/io.h>
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index 35f436bcb849..d4f81491996d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A7740 processor support
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2011 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/io.h>
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77470.c b/drivers/pinctrl/sh-pfc/pfc-r8a77470.c
index 9d3ed438ec7b..3d36e5f4ca7b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77470.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77470.c
@@ -1093,6 +1093,233 @@ static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
};
+/* - AVB -------------------------------------------------------------------- */
+static const unsigned int avb_col_pins[] = {
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int avb_col_mux[] = {
+ AVB_COL_MARK,
+};
+static const unsigned int avb_crs_pins[] = {
+ RCAR_GP_PIN(5, 17),
+};
+static const unsigned int avb_crs_mux[] = {
+ AVB_CRS_MARK,
+};
+static const unsigned int avb_link_pins[] = {
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int avb_link_mux[] = {
+ AVB_LINK_MARK,
+};
+static const unsigned int avb_magic_pins[] = {
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int avb_magic_mux[] = {
+ AVB_MAGIC_MARK,
+};
+static const unsigned int avb_phy_int_pins[] = {
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int avb_phy_int_mux[] = {
+ AVB_PHY_INT_MARK,
+};
+static const unsigned int avb_mdio_pins[] = {
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 13),
+};
+static const unsigned int avb_mdio_mux[] = {
+ AVB_MDC_MARK, AVB_MDIO_MARK,
+};
+static const unsigned int avb_mii_tx_rx_pins[] = {
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
+ RCAR_GP_PIN(3, 16), RCAR_GP_PIN(3, 27), RCAR_GP_PIN(3, 13),
+
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 1),
+ RCAR_GP_PIN(3, 10),
+};
+static const unsigned int avb_mii_tx_rx_mux[] = {
+ AVB_TX_CLK_MARK, AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK,
+ AVB_TXD3_MARK, AVB_TX_EN_MARK,
+
+ AVB_RX_CLK_MARK, AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK,
+ AVB_RXD3_MARK, AVB_RX_DV_MARK, AVB_RX_ER_MARK,
+};
+static const unsigned int avb_mii_tx_er_pins[] = {
+ RCAR_GP_PIN(5, 23),
+};
+static const unsigned int avb_mii_tx_er_mux[] = {
+ AVB_TX_ER_MARK,
+};
+static const unsigned int avb_gmii_tx_rx_pins[] = {
+ RCAR_GP_PIN(4, 1), RCAR_GP_PIN(3, 11), RCAR_GP_PIN(3, 12),
+ RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16),
+ RCAR_GP_PIN(3, 27), RCAR_GP_PIN(3, 28), RCAR_GP_PIN(3, 29),
+ RCAR_GP_PIN(4, 0), RCAR_GP_PIN(5, 22), RCAR_GP_PIN(3, 13),
+ RCAR_GP_PIN(5, 23),
+
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 6),
+ RCAR_GP_PIN(3, 7), RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 10),
+};
+static const unsigned int avb_gmii_tx_rx_mux[] = {
+ AVB_GTX_CLK_MARK, AVB_GTXREFCLK_MARK, AVB_TX_CLK_MARK, AVB_TXD0_MARK,
+ AVB_TXD1_MARK, AVB_TXD2_MARK, AVB_TXD3_MARK, AVB_TXD4_MARK,
+ AVB_TXD5_MARK, AVB_TXD6_MARK, AVB_TXD7_MARK, AVB_TX_EN_MARK,
+ AVB_TX_ER_MARK,
+
+ AVB_RX_CLK_MARK, AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK,
+ AVB_RXD3_MARK, AVB_RXD4_MARK, AVB_RXD5_MARK, AVB_RXD6_MARK,
+ AVB_RXD7_MARK, AVB_RX_DV_MARK, AVB_RX_ER_MARK,
+};
+static const unsigned int avb_avtp_match_a_pins[] = {
+ RCAR_GP_PIN(1, 15),
+};
+static const unsigned int avb_avtp_match_a_mux[] = {
+ AVB_AVTP_MATCH_A_MARK,
+};
+static const unsigned int avb_avtp_capture_a_pins[] = {
+ RCAR_GP_PIN(1, 14),
+};
+static const unsigned int avb_avtp_capture_a_mux[] = {
+ AVB_AVTP_CAPTURE_A_MARK,
+};
+static const unsigned int avb_avtp_match_b_pins[] = {
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int avb_avtp_match_b_mux[] = {
+ AVB_AVTP_MATCH_B_MARK,
+};
+static const unsigned int avb_avtp_capture_b_pins[] = {
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int avb_avtp_capture_b_mux[] = {
+ AVB_AVTP_CAPTURE_B_MARK,
+};
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du0_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 5),
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 2),
+ RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 10),
+ RCAR_GP_PIN(2, 23), RCAR_GP_PIN(2, 22), RCAR_GP_PIN(2, 21),
+ RCAR_GP_PIN(2, 20), RCAR_GP_PIN(2, 19), RCAR_GP_PIN(2, 18),
+};
+static const unsigned int du0_rgb666_mux[] = {
+ DU0_DR7_MARK, DU0_DR6_MARK, DU0_DR5_MARK, DU0_DR4_MARK,
+ DU0_DR3_MARK, DU0_DR2_MARK,
+ DU0_DG7_MARK, DU0_DG6_MARK, DU0_DG5_MARK, DU0_DG4_MARK,
+ DU0_DG3_MARK, DU0_DG2_MARK,
+ DU0_DB7_MARK, DU0_DB6_MARK, DU0_DB5_MARK, DU0_DB4_MARK,
+ DU0_DB3_MARK, DU0_DB2_MARK,
+};
+static const unsigned int du0_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 5),
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 2),
+ RCAR_GP_PIN(2, 1), RCAR_GP_PIN(2, 0),
+ RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 10),
+ RCAR_GP_PIN(2, 9), RCAR_GP_PIN(2, 8),
+ RCAR_GP_PIN(2, 23), RCAR_GP_PIN(2, 22), RCAR_GP_PIN(2, 21),
+ RCAR_GP_PIN(2, 20), RCAR_GP_PIN(2, 19), RCAR_GP_PIN(2, 18),
+ RCAR_GP_PIN(2, 17), RCAR_GP_PIN(2, 16),
+};
+static const unsigned int du0_rgb888_mux[] = {
+ DU0_DR7_MARK, DU0_DR6_MARK, DU0_DR5_MARK, DU0_DR4_MARK,
+ DU0_DR3_MARK, DU0_DR2_MARK, DU0_DR1_MARK, DU0_DR0_MARK,
+ DU0_DG7_MARK, DU0_DG6_MARK, DU0_DG5_MARK, DU0_DG4_MARK,
+ DU0_DG3_MARK, DU0_DG2_MARK, DU0_DG1_MARK, DU0_DG0_MARK,
+ DU0_DB7_MARK, DU0_DB6_MARK, DU0_DB5_MARK, DU0_DB4_MARK,
+ DU0_DB3_MARK, DU0_DB2_MARK, DU0_DB1_MARK, DU0_DB0_MARK,
+};
+static const unsigned int du0_clk0_out_pins[] = {
+ /* DOTCLKOUT0 */
+ RCAR_GP_PIN(2, 25),
+};
+static const unsigned int du0_clk0_out_mux[] = {
+ DU0_DOTCLKOUT0_MARK
+};
+static const unsigned int du0_clk1_out_pins[] = {
+ /* DOTCLKOUT1 */
+ RCAR_GP_PIN(2, 26),
+};
+static const unsigned int du0_clk1_out_mux[] = {
+ DU0_DOTCLKOUT1_MARK
+};
+static const unsigned int du0_clk_in_pins[] = {
+ /* CLKIN */
+ RCAR_GP_PIN(2, 24),
+};
+static const unsigned int du0_clk_in_mux[] = {
+ DU0_DOTCLKIN_MARK
+};
+static const unsigned int du0_sync_pins[] = {
+ /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+ RCAR_GP_PIN(2, 28), RCAR_GP_PIN(2, 27),
+};
+static const unsigned int du0_sync_mux[] = {
+ DU0_EXVSYNC_DU0_VSYNC_MARK, DU0_EXHSYNC_DU0_HSYNC_MARK
+};
+static const unsigned int du0_oddf_pins[] = {
+ /* EXODDF/ODDF/DISP/CDE */
+ RCAR_GP_PIN(2, 29),
+};
+static const unsigned int du0_oddf_mux[] = {
+ DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK,
+};
+static const unsigned int du0_cde_pins[] = {
+ /* CDE */
+ RCAR_GP_PIN(2, 31),
+};
+static const unsigned int du0_cde_mux[] = {
+ DU0_CDE_MARK,
+};
+static const unsigned int du0_disp_pins[] = {
+ /* DISP */
+ RCAR_GP_PIN(2, 30),
+};
+static const unsigned int du0_disp_mux[] = {
+ DU0_DISP_MARK
+};
+/* - I2C4 ------------------------------------------------------------------- */
+static const unsigned int i2c4_a_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(4, 10), RCAR_GP_PIN(4, 11),
+};
+static const unsigned int i2c4_a_mux[] = {
+ SCL4_A_MARK, SDA4_A_MARK,
+};
+static const unsigned int i2c4_b_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 30), RCAR_GP_PIN(5, 31),
+};
+static const unsigned int i2c4_b_mux[] = {
+ SCL4_B_MARK, SDA4_B_MARK,
+};
+static const unsigned int i2c4_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 5),
+};
+static const unsigned int i2c4_c_mux[] = {
+ SCL4_C_MARK, SDA4_C_MARK,
+};
+static const unsigned int i2c4_d_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(2, 16), RCAR_GP_PIN(2, 17),
+};
+static const unsigned int i2c4_d_mux[] = {
+ SCL4_D_MARK, SDA4_D_MARK,
+};
+static const unsigned int i2c4_e_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int i2c4_e_mux[] = {
+ SCL4_E_MARK, SDA4_E_MARK,
+};
/* - MMC -------------------------------------------------------------------- */
static const unsigned int mmc_data1_pins[] = {
/* D0 */
@@ -1130,6 +1357,30 @@ static const unsigned int mmc_ctrl_pins[] = {
static const unsigned int mmc_ctrl_mux[] = {
MMC0_CLK_SDHI1_CLK_MARK, MMC0_CMD_SDHI1_CMD_MARK,
};
+/* - QSPI ------------------------------------------------------------------- */
+static const unsigned int qspi0_ctrl_pins[] = {
+ /* SPCLK, SSL */
+ RCAR_GP_PIN(1, 16), RCAR_GP_PIN(1, 21),
+};
+static const unsigned int qspi0_ctrl_mux[] = {
+ QSPI0_SPCLK_MARK, QSPI0_SSL_MARK,
+};
+static const unsigned int qspi0_data2_pins[] = {
+ /* MOSI_IO0, MISO_IO1 */
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 18),
+};
+static const unsigned int qspi0_data2_mux[] = {
+ QSPI0_MOSI_QSPI0_IO0_MARK, QSPI0_MISO_QSPI0_IO1_MARK,
+};
+static const unsigned int qspi0_data4_pins[] = {
+ /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 19),
+ RCAR_GP_PIN(1, 20),
+};
+static const unsigned int qspi0_data4_mux[] = {
+ QSPI0_MOSI_QSPI0_IO0_MARK, QSPI0_MISO_QSPI0_IO1_MARK,
+ QSPI0_IO2_MARK, QSPI0_IO3_MARK,
+};
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_a_pins[] = {
/* RX, TX */
@@ -1368,12 +1619,97 @@ static const unsigned int scif_clk_b_pins[] = {
static const unsigned int scif_clk_b_mux[] = {
SCIF_CLK_B_MARK,
};
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi2_data1_mux[] = {
+ SD2_DAT0_MARK,
+};
+static const unsigned int sdhi2_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 16), RCAR_GP_PIN(4, 17),
+ RCAR_GP_PIN(4, 18), RCAR_GP_PIN(4, 19),
+};
+static const unsigned int sdhi2_data4_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK, SD2_DAT2_MARK, SD2_DAT3_MARK,
+};
+static const unsigned int sdhi2_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 15),
+};
+static const unsigned int sdhi2_ctrl_mux[] = {
+ SD2_CLK_MARK, SD2_CMD_MARK,
+};
+static const unsigned int sdhi2_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 20),
+};
+static const unsigned int sdhi2_cd_mux[] = {
+ SD2_CD_MARK,
+};
+static const unsigned int sdhi2_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 21),
+};
+static const unsigned int sdhi2_wp_mux[] = {
+ SD2_WP_MARK,
+};
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+ RCAR_GP_PIN(0, 0), /* PWEN */
+ RCAR_GP_PIN(0, 1), /* OVC */
+};
+static const unsigned int usb0_mux[] = {
+ USB0_PWEN_MARK,
+ USB0_OVC_MARK,
+};
+/* - USB1 ------------------------------------------------------------------- */
+static const unsigned int usb1_pins[] = {
+ RCAR_GP_PIN(0, 2), /* PWEN */
+ RCAR_GP_PIN(0, 3), /* OVC */
+};
+static const unsigned int usb1_mux[] = {
+ USB1_PWEN_MARK,
+ USB1_OVC_MARK,
+};
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(avb_col),
+ SH_PFC_PIN_GROUP(avb_crs),
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP(avb_mdio),
+ SH_PFC_PIN_GROUP(avb_mii_tx_rx),
+ SH_PFC_PIN_GROUP(avb_mii_tx_er),
+ SH_PFC_PIN_GROUP(avb_gmii_tx_rx),
+ SH_PFC_PIN_GROUP(avb_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb_avtp_match_b),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(du0_rgb666),
+ SH_PFC_PIN_GROUP(du0_rgb888),
+ SH_PFC_PIN_GROUP(du0_clk0_out),
+ SH_PFC_PIN_GROUP(du0_clk1_out),
+ SH_PFC_PIN_GROUP(du0_clk_in),
+ SH_PFC_PIN_GROUP(du0_sync),
+ SH_PFC_PIN_GROUP(du0_oddf),
+ SH_PFC_PIN_GROUP(du0_cde),
+ SH_PFC_PIN_GROUP(du0_disp),
+ SH_PFC_PIN_GROUP(i2c4_a),
+ SH_PFC_PIN_GROUP(i2c4_b),
+ SH_PFC_PIN_GROUP(i2c4_c),
+ SH_PFC_PIN_GROUP(i2c4_d),
+ SH_PFC_PIN_GROUP(i2c4_e),
SH_PFC_PIN_GROUP(mmc_data1),
SH_PFC_PIN_GROUP(mmc_data4),
SH_PFC_PIN_GROUP(mmc_data8),
SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(qspi0_ctrl),
+ SH_PFC_PIN_GROUP(qspi0_data2),
+ SH_PFC_PIN_GROUP(qspi0_data4),
SH_PFC_PIN_GROUP(scif0_data_a),
SH_PFC_PIN_GROUP(scif0_data_b),
SH_PFC_PIN_GROUP(scif0_data_c),
@@ -1407,6 +1743,49 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif5_data_f),
SH_PFC_PIN_GROUP(scif_clk_a),
SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd),
+ SH_PFC_PIN_GROUP(sdhi2_wp),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb1),
+};
+
+static const char * const avb_groups[] = {
+ "avb_col",
+ "avb_crs",
+ "avb_link",
+ "avb_magic",
+ "avb_phy_int",
+ "avb_mdio",
+ "avb_mii_tx_rx",
+ "avb_mii_tx_er",
+ "avb_gmii_tx_rx",
+ "avb_avtp_match_a",
+ "avb_avtp_capture_a",
+ "avb_avtp_match_b",
+ "avb_avtp_capture_b",
+};
+
+static const char * const du0_groups[] = {
+ "du0_rgb666",
+ "du0_rgb888",
+ "du0_clk0_out",
+ "du0_clk1_out",
+ "du0_clk_in",
+ "du0_sync",
+ "du0_oddf",
+ "du0_cde",
+ "du0_disp",
+};
+
+static const char * const i2c4_groups[] = {
+ "i2c4_a",
+ "i2c4_b",
+ "i2c4_c",
+ "i2c4_d",
+ "i2c4_e",
};
static const char * const mmc_groups[] = {
@@ -1416,6 +1795,12 @@ static const char * const mmc_groups[] = {
"mmc_ctrl",
};
+static const char * const qspi0_groups[] = {
+ "qspi0_ctrl",
+ "qspi0_data2",
+ "qspi0_data4",
+};
+
static const char * const scif0_groups[] = {
"scif0_data_a",
"scif0_data_b",
@@ -1470,8 +1855,28 @@ static const char * const scif_clk_groups[] = {
"scif_clk_b",
};
+static const char * const sdhi2_groups[] = {
+ "sdhi2_data1",
+ "sdhi2_data4",
+ "sdhi2_ctrl",
+ "sdhi2_cd",
+ "sdhi2_wp",
+};
+
+static const char * const usb0_groups[] = {
+ "usb0",
+};
+
+static const char * const usb1_groups[] = {
+ "usb1",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(du0),
+ SH_PFC_FUNCTION(i2c4),
SH_PFC_FUNCTION(mmc),
+ SH_PFC_FUNCTION(qspi0),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -1479,6 +1884,9 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif5),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 00d61d175249..6bcdb4b5e69e 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7778 processor support - PFC hardware block
*
@@ -9,15 +10,6 @@
* based on
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2011 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
index 5bef934f823d..64bace100316 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7779 processor support - PFC hardware block
*
* Copyright (C) 2011, 2013 Renesas Solutions Corp.
* Copyright (C) 2011 Magnus Damm
* Copyright (C) 2013 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index f6332f247368..ab7a35392cd8 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A7790 processor support
*
@@ -5,20 +6,6 @@
* Copyright (C) 2013 Magnus Damm
* Copyright (C) 2012 Renesas Solutions Corp.
* Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/io.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 5811784d88cb..209f74a6e6ce 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7791/r8a7743 processor support - PFC hardware block.
*
* Copyright (C) 2013 Renesas Electronics Corporation
* Copyright (C) 2014-2017 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -4458,7 +4455,7 @@ static const unsigned int vin2_clk_mux[] = {
static const struct {
struct sh_pfc_pin_group common[346];
- struct sh_pfc_pin_group r8a779x[9];
+ struct sh_pfc_pin_group automotive[9];
} pinmux_groups = {
.common = {
SH_PFC_PIN_GROUP(audio_clk_a),
@@ -4808,7 +4805,7 @@ static const struct {
SH_PFC_PIN_GROUP(vin2_clkenb),
SH_PFC_PIN_GROUP(vin2_clk),
},
- .r8a779x = {
+ .automotive = {
SH_PFC_PIN_GROUP(adi_common),
SH_PFC_PIN_GROUP(adi_chsel0),
SH_PFC_PIN_GROUP(adi_chsel1),
@@ -5365,7 +5362,7 @@ static const char * const vin2_groups[] = {
static const struct {
struct sh_pfc_function common[58];
- struct sh_pfc_function r8a779x[2];
+ struct sh_pfc_function automotive[2];
} pinmux_functions = {
.common = {
SH_PFC_FUNCTION(audio_clk),
@@ -5427,7 +5424,7 @@ static const struct {
SH_PFC_FUNCTION(vin1),
SH_PFC_FUNCTION(vin2),
},
- .r8a779x = {
+ .automotive = {
SH_PFC_FUNCTION(adi),
SH_PFC_FUNCTION(mlb),
}
@@ -6634,6 +6631,28 @@ const struct sh_pfc_soc_info r8a7743_pinmux_info = {
};
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7744
+const struct sh_pfc_soc_info r8a7744_pinmux_info = {
+ .name = "r8a77440_pfc",
+ .ops = &r8a7791_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common),
+
+ .cfg_regs = pinmux_config_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
+
#ifdef CONFIG_PINCTRL_PFC_R8A7791
const struct sh_pfc_soc_info r8a7791_pinmux_info = {
.name = "r8a77910_pfc",
@@ -6646,10 +6665,10 @@ const struct sh_pfc_soc_info r8a7791_pinmux_info = {
.nr_pins = ARRAY_SIZE(pinmux_pins),
.groups = pinmux_groups.common,
.nr_groups = ARRAY_SIZE(pinmux_groups.common) +
- ARRAY_SIZE(pinmux_groups.r8a779x),
+ ARRAY_SIZE(pinmux_groups.automotive),
.functions = pinmux_functions.common,
.nr_functions = ARRAY_SIZE(pinmux_functions.common) +
- ARRAY_SIZE(pinmux_functions.r8a779x),
+ ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
@@ -6670,10 +6689,10 @@ const struct sh_pfc_soc_info r8a7793_pinmux_info = {
.nr_pins = ARRAY_SIZE(pinmux_pins),
.groups = pinmux_groups.common,
.nr_groups = ARRAY_SIZE(pinmux_groups.common) +
- ARRAY_SIZE(pinmux_groups.r8a779x),
+ ARRAY_SIZE(pinmux_groups.automotive),
.functions = pinmux_functions.common,
.nr_functions = ARRAY_SIZE(pinmux_functions.common) +
- ARRAY_SIZE(pinmux_functions.r8a779x),
+ ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
index cc3597f66605..bf0681b38181 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7792 processor support - PFC hardware block.
*
* Copyright (C) 2013-2014 Renesas Electronics Corporation
* Copyright (C) 2016 Cogent Embedded, Inc., <source@cogentembedded.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index 164002437594..6d1e5fdc03f8 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* r8a7794/r8a7745 processor support - PFC hardware block.
*
* Copyright (C) 2014-2015 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
* Copyright (C) 2015-2017 Cogent Embedded, Inc. <source@cogentembedded.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
index a6c5d50557e6..8c7de44615d1 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A7795 ES1.x processor support - PFC hardware block.
*
* Copyright (C) 2015-2017 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 4f55b1562ad4..0af737d11403 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A7795 ES2.0+ processor support - PFC hardware block.
*
* Copyright (C) 2015-2017 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 3ea133cfb241..3a6d21d87107 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A7796 processor support - PFC hardware block.
*
@@ -8,10 +9,6 @@
* R-Car Gen3 processor support - PFC hardware block.
*
* Copyright (C) 2015 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/kernel.h>
@@ -4126,347 +4123,354 @@ static const unsigned int vin5_clk_mux[] = {
VI5_CLK_MARK,
};
-static const struct sh_pfc_pin_group pinmux_groups[] = {
- SH_PFC_PIN_GROUP(audio_clk_a_a),
- SH_PFC_PIN_GROUP(audio_clk_a_b),
- SH_PFC_PIN_GROUP(audio_clk_a_c),
- SH_PFC_PIN_GROUP(audio_clk_b_a),
- SH_PFC_PIN_GROUP(audio_clk_b_b),
- SH_PFC_PIN_GROUP(audio_clk_c_a),
- SH_PFC_PIN_GROUP(audio_clk_c_b),
- SH_PFC_PIN_GROUP(audio_clkout_a),
- SH_PFC_PIN_GROUP(audio_clkout_b),
- SH_PFC_PIN_GROUP(audio_clkout_c),
- SH_PFC_PIN_GROUP(audio_clkout_d),
- SH_PFC_PIN_GROUP(audio_clkout1_a),
- SH_PFC_PIN_GROUP(audio_clkout1_b),
- SH_PFC_PIN_GROUP(audio_clkout2_a),
- SH_PFC_PIN_GROUP(audio_clkout2_b),
- SH_PFC_PIN_GROUP(audio_clkout3_a),
- SH_PFC_PIN_GROUP(audio_clkout3_b),
- SH_PFC_PIN_GROUP(avb_link),
- SH_PFC_PIN_GROUP(avb_magic),
- SH_PFC_PIN_GROUP(avb_phy_int),
- SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio), /* Deprecated */
- SH_PFC_PIN_GROUP(avb_mdio),
- SH_PFC_PIN_GROUP(avb_mii),
- SH_PFC_PIN_GROUP(avb_avtp_pps),
- SH_PFC_PIN_GROUP(avb_avtp_match_a),
- SH_PFC_PIN_GROUP(avb_avtp_capture_a),
- SH_PFC_PIN_GROUP(avb_avtp_match_b),
- SH_PFC_PIN_GROUP(avb_avtp_capture_b),
- SH_PFC_PIN_GROUP(can0_data_a),
- SH_PFC_PIN_GROUP(can0_data_b),
- SH_PFC_PIN_GROUP(can1_data),
- SH_PFC_PIN_GROUP(can_clk),
- SH_PFC_PIN_GROUP(canfd0_data_a),
- SH_PFC_PIN_GROUP(canfd0_data_b),
- SH_PFC_PIN_GROUP(canfd1_data),
- SH_PFC_PIN_GROUP(drif0_ctrl_a),
- SH_PFC_PIN_GROUP(drif0_data0_a),
- SH_PFC_PIN_GROUP(drif0_data1_a),
- SH_PFC_PIN_GROUP(drif0_ctrl_b),
- SH_PFC_PIN_GROUP(drif0_data0_b),
- SH_PFC_PIN_GROUP(drif0_data1_b),
- SH_PFC_PIN_GROUP(drif0_ctrl_c),
- SH_PFC_PIN_GROUP(drif0_data0_c),
- SH_PFC_PIN_GROUP(drif0_data1_c),
- SH_PFC_PIN_GROUP(drif1_ctrl_a),
- SH_PFC_PIN_GROUP(drif1_data0_a),
- SH_PFC_PIN_GROUP(drif1_data1_a),
- SH_PFC_PIN_GROUP(drif1_ctrl_b),
- SH_PFC_PIN_GROUP(drif1_data0_b),
- SH_PFC_PIN_GROUP(drif1_data1_b),
- SH_PFC_PIN_GROUP(drif1_ctrl_c),
- SH_PFC_PIN_GROUP(drif1_data0_c),
- SH_PFC_PIN_GROUP(drif1_data1_c),
- SH_PFC_PIN_GROUP(drif2_ctrl_a),
- SH_PFC_PIN_GROUP(drif2_data0_a),
- SH_PFC_PIN_GROUP(drif2_data1_a),
- SH_PFC_PIN_GROUP(drif2_ctrl_b),
- SH_PFC_PIN_GROUP(drif2_data0_b),
- SH_PFC_PIN_GROUP(drif2_data1_b),
- SH_PFC_PIN_GROUP(drif3_ctrl_a),
- SH_PFC_PIN_GROUP(drif3_data0_a),
- SH_PFC_PIN_GROUP(drif3_data1_a),
- SH_PFC_PIN_GROUP(drif3_ctrl_b),
- SH_PFC_PIN_GROUP(drif3_data0_b),
- SH_PFC_PIN_GROUP(drif3_data1_b),
- SH_PFC_PIN_GROUP(du_rgb666),
- SH_PFC_PIN_GROUP(du_rgb888),
- SH_PFC_PIN_GROUP(du_clk_out_0),
- SH_PFC_PIN_GROUP(du_clk_out_1),
- SH_PFC_PIN_GROUP(du_sync),
- SH_PFC_PIN_GROUP(du_oddf),
- SH_PFC_PIN_GROUP(du_cde),
- SH_PFC_PIN_GROUP(du_disp),
- SH_PFC_PIN_GROUP(hdmi0_cec),
- SH_PFC_PIN_GROUP(hscif0_data),
- SH_PFC_PIN_GROUP(hscif0_clk),
- SH_PFC_PIN_GROUP(hscif0_ctrl),
- SH_PFC_PIN_GROUP(hscif1_data_a),
- SH_PFC_PIN_GROUP(hscif1_clk_a),
- SH_PFC_PIN_GROUP(hscif1_ctrl_a),
- SH_PFC_PIN_GROUP(hscif1_data_b),
- SH_PFC_PIN_GROUP(hscif1_clk_b),
- SH_PFC_PIN_GROUP(hscif1_ctrl_b),
- SH_PFC_PIN_GROUP(hscif2_data_a),
- SH_PFC_PIN_GROUP(hscif2_clk_a),
- SH_PFC_PIN_GROUP(hscif2_ctrl_a),
- SH_PFC_PIN_GROUP(hscif2_data_b),
- SH_PFC_PIN_GROUP(hscif2_clk_b),
- SH_PFC_PIN_GROUP(hscif2_ctrl_b),
- SH_PFC_PIN_GROUP(hscif2_data_c),
- SH_PFC_PIN_GROUP(hscif2_clk_c),
- SH_PFC_PIN_GROUP(hscif2_ctrl_c),
- SH_PFC_PIN_GROUP(hscif3_data_a),
- SH_PFC_PIN_GROUP(hscif3_clk),
- SH_PFC_PIN_GROUP(hscif3_ctrl),
- SH_PFC_PIN_GROUP(hscif3_data_b),
- SH_PFC_PIN_GROUP(hscif3_data_c),
- SH_PFC_PIN_GROUP(hscif3_data_d),
- SH_PFC_PIN_GROUP(hscif4_data_a),
- SH_PFC_PIN_GROUP(hscif4_clk),
- SH_PFC_PIN_GROUP(hscif4_ctrl),
- SH_PFC_PIN_GROUP(hscif4_data_b),
- SH_PFC_PIN_GROUP(i2c1_a),
- SH_PFC_PIN_GROUP(i2c1_b),
- SH_PFC_PIN_GROUP(i2c2_a),
- SH_PFC_PIN_GROUP(i2c2_b),
- SH_PFC_PIN_GROUP(i2c6_a),
- SH_PFC_PIN_GROUP(i2c6_b),
- SH_PFC_PIN_GROUP(i2c6_c),
- SH_PFC_PIN_GROUP(intc_ex_irq0),
- SH_PFC_PIN_GROUP(intc_ex_irq1),
- SH_PFC_PIN_GROUP(intc_ex_irq2),
- SH_PFC_PIN_GROUP(intc_ex_irq3),
- SH_PFC_PIN_GROUP(intc_ex_irq4),
- SH_PFC_PIN_GROUP(intc_ex_irq5),
- SH_PFC_PIN_GROUP(msiof0_clk),
- SH_PFC_PIN_GROUP(msiof0_sync),
- SH_PFC_PIN_GROUP(msiof0_ss1),
- SH_PFC_PIN_GROUP(msiof0_ss2),
- SH_PFC_PIN_GROUP(msiof0_txd),
- SH_PFC_PIN_GROUP(msiof0_rxd),
- SH_PFC_PIN_GROUP(msiof1_clk_a),
- SH_PFC_PIN_GROUP(msiof1_sync_a),
- SH_PFC_PIN_GROUP(msiof1_ss1_a),
- SH_PFC_PIN_GROUP(msiof1_ss2_a),
- SH_PFC_PIN_GROUP(msiof1_txd_a),
- SH_PFC_PIN_GROUP(msiof1_rxd_a),
- SH_PFC_PIN_GROUP(msiof1_clk_b),
- SH_PFC_PIN_GROUP(msiof1_sync_b),
- SH_PFC_PIN_GROUP(msiof1_ss1_b),
- SH_PFC_PIN_GROUP(msiof1_ss2_b),
- SH_PFC_PIN_GROUP(msiof1_txd_b),
- SH_PFC_PIN_GROUP(msiof1_rxd_b),
- SH_PFC_PIN_GROUP(msiof1_clk_c),
- SH_PFC_PIN_GROUP(msiof1_sync_c),
- SH_PFC_PIN_GROUP(msiof1_ss1_c),
- SH_PFC_PIN_GROUP(msiof1_ss2_c),
- SH_PFC_PIN_GROUP(msiof1_txd_c),
- SH_PFC_PIN_GROUP(msiof1_rxd_c),
- SH_PFC_PIN_GROUP(msiof1_clk_d),
- SH_PFC_PIN_GROUP(msiof1_sync_d),
- SH_PFC_PIN_GROUP(msiof1_ss1_d),
- SH_PFC_PIN_GROUP(msiof1_ss2_d),
- SH_PFC_PIN_GROUP(msiof1_txd_d),
- SH_PFC_PIN_GROUP(msiof1_rxd_d),
- SH_PFC_PIN_GROUP(msiof1_clk_e),
- SH_PFC_PIN_GROUP(msiof1_sync_e),
- SH_PFC_PIN_GROUP(msiof1_ss1_e),
- SH_PFC_PIN_GROUP(msiof1_ss2_e),
- SH_PFC_PIN_GROUP(msiof1_txd_e),
- SH_PFC_PIN_GROUP(msiof1_rxd_e),
- SH_PFC_PIN_GROUP(msiof1_clk_f),
- SH_PFC_PIN_GROUP(msiof1_sync_f),
- SH_PFC_PIN_GROUP(msiof1_ss1_f),
- SH_PFC_PIN_GROUP(msiof1_ss2_f),
- SH_PFC_PIN_GROUP(msiof1_txd_f),
- SH_PFC_PIN_GROUP(msiof1_rxd_f),
- SH_PFC_PIN_GROUP(msiof1_clk_g),
- SH_PFC_PIN_GROUP(msiof1_sync_g),
- SH_PFC_PIN_GROUP(msiof1_ss1_g),
- SH_PFC_PIN_GROUP(msiof1_ss2_g),
- SH_PFC_PIN_GROUP(msiof1_txd_g),
- SH_PFC_PIN_GROUP(msiof1_rxd_g),
- SH_PFC_PIN_GROUP(msiof2_clk_a),
- SH_PFC_PIN_GROUP(msiof2_sync_a),
- SH_PFC_PIN_GROUP(msiof2_ss1_a),
- SH_PFC_PIN_GROUP(msiof2_ss2_a),
- SH_PFC_PIN_GROUP(msiof2_txd_a),
- SH_PFC_PIN_GROUP(msiof2_rxd_a),
- SH_PFC_PIN_GROUP(msiof2_clk_b),
- SH_PFC_PIN_GROUP(msiof2_sync_b),
- SH_PFC_PIN_GROUP(msiof2_ss1_b),
- SH_PFC_PIN_GROUP(msiof2_ss2_b),
- SH_PFC_PIN_GROUP(msiof2_txd_b),
- SH_PFC_PIN_GROUP(msiof2_rxd_b),
- SH_PFC_PIN_GROUP(msiof2_clk_c),
- SH_PFC_PIN_GROUP(msiof2_sync_c),
- SH_PFC_PIN_GROUP(msiof2_ss1_c),
- SH_PFC_PIN_GROUP(msiof2_ss2_c),
- SH_PFC_PIN_GROUP(msiof2_txd_c),
- SH_PFC_PIN_GROUP(msiof2_rxd_c),
- SH_PFC_PIN_GROUP(msiof2_clk_d),
- SH_PFC_PIN_GROUP(msiof2_sync_d),
- SH_PFC_PIN_GROUP(msiof2_ss1_d),
- SH_PFC_PIN_GROUP(msiof2_ss2_d),
- SH_PFC_PIN_GROUP(msiof2_txd_d),
- SH_PFC_PIN_GROUP(msiof2_rxd_d),
- SH_PFC_PIN_GROUP(msiof3_clk_a),
- SH_PFC_PIN_GROUP(msiof3_sync_a),
- SH_PFC_PIN_GROUP(msiof3_ss1_a),
- SH_PFC_PIN_GROUP(msiof3_ss2_a),
- SH_PFC_PIN_GROUP(msiof3_txd_a),
- SH_PFC_PIN_GROUP(msiof3_rxd_a),
- SH_PFC_PIN_GROUP(msiof3_clk_b),
- SH_PFC_PIN_GROUP(msiof3_sync_b),
- SH_PFC_PIN_GROUP(msiof3_ss1_b),
- SH_PFC_PIN_GROUP(msiof3_ss2_b),
- SH_PFC_PIN_GROUP(msiof3_txd_b),
- SH_PFC_PIN_GROUP(msiof3_rxd_b),
- SH_PFC_PIN_GROUP(msiof3_clk_c),
- SH_PFC_PIN_GROUP(msiof3_sync_c),
- SH_PFC_PIN_GROUP(msiof3_txd_c),
- SH_PFC_PIN_GROUP(msiof3_rxd_c),
- SH_PFC_PIN_GROUP(msiof3_clk_d),
- SH_PFC_PIN_GROUP(msiof3_sync_d),
- SH_PFC_PIN_GROUP(msiof3_ss1_d),
- SH_PFC_PIN_GROUP(msiof3_txd_d),
- SH_PFC_PIN_GROUP(msiof3_rxd_d),
- SH_PFC_PIN_GROUP(msiof3_clk_e),
- SH_PFC_PIN_GROUP(msiof3_sync_e),
- SH_PFC_PIN_GROUP(msiof3_ss1_e),
- SH_PFC_PIN_GROUP(msiof3_ss2_e),
- SH_PFC_PIN_GROUP(msiof3_txd_e),
- SH_PFC_PIN_GROUP(msiof3_rxd_e),
- SH_PFC_PIN_GROUP(pwm0),
- SH_PFC_PIN_GROUP(pwm1_a),
- SH_PFC_PIN_GROUP(pwm1_b),
- SH_PFC_PIN_GROUP(pwm2_a),
- SH_PFC_PIN_GROUP(pwm2_b),
- SH_PFC_PIN_GROUP(pwm3_a),
- SH_PFC_PIN_GROUP(pwm3_b),
- SH_PFC_PIN_GROUP(pwm4_a),
- SH_PFC_PIN_GROUP(pwm4_b),
- SH_PFC_PIN_GROUP(pwm5_a),
- SH_PFC_PIN_GROUP(pwm5_b),
- SH_PFC_PIN_GROUP(pwm6_a),
- SH_PFC_PIN_GROUP(pwm6_b),
- SH_PFC_PIN_GROUP(scif0_data),
- SH_PFC_PIN_GROUP(scif0_clk),
- SH_PFC_PIN_GROUP(scif0_ctrl),
- SH_PFC_PIN_GROUP(scif1_data_a),
- SH_PFC_PIN_GROUP(scif1_clk),
- SH_PFC_PIN_GROUP(scif1_ctrl),
- SH_PFC_PIN_GROUP(scif1_data_b),
- SH_PFC_PIN_GROUP(scif2_data_a),
- SH_PFC_PIN_GROUP(scif2_clk),
- SH_PFC_PIN_GROUP(scif2_data_b),
- SH_PFC_PIN_GROUP(scif3_data_a),
- SH_PFC_PIN_GROUP(scif3_clk),
- SH_PFC_PIN_GROUP(scif3_ctrl),
- SH_PFC_PIN_GROUP(scif3_data_b),
- SH_PFC_PIN_GROUP(scif4_data_a),
- SH_PFC_PIN_GROUP(scif4_clk_a),
- SH_PFC_PIN_GROUP(scif4_ctrl_a),
- SH_PFC_PIN_GROUP(scif4_data_b),
- SH_PFC_PIN_GROUP(scif4_clk_b),
- SH_PFC_PIN_GROUP(scif4_ctrl_b),
- SH_PFC_PIN_GROUP(scif4_data_c),
- SH_PFC_PIN_GROUP(scif4_clk_c),
- SH_PFC_PIN_GROUP(scif4_ctrl_c),
- SH_PFC_PIN_GROUP(scif5_data_a),
- SH_PFC_PIN_GROUP(scif5_clk_a),
- SH_PFC_PIN_GROUP(scif5_data_b),
- SH_PFC_PIN_GROUP(scif5_clk_b),
- SH_PFC_PIN_GROUP(scif_clk_a),
- SH_PFC_PIN_GROUP(scif_clk_b),
- SH_PFC_PIN_GROUP(sdhi0_data1),
- SH_PFC_PIN_GROUP(sdhi0_data4),
- SH_PFC_PIN_GROUP(sdhi0_ctrl),
- SH_PFC_PIN_GROUP(sdhi0_cd),
- SH_PFC_PIN_GROUP(sdhi0_wp),
- SH_PFC_PIN_GROUP(sdhi1_data1),
- SH_PFC_PIN_GROUP(sdhi1_data4),
- SH_PFC_PIN_GROUP(sdhi1_ctrl),
- SH_PFC_PIN_GROUP(sdhi1_cd),
- SH_PFC_PIN_GROUP(sdhi1_wp),
- SH_PFC_PIN_GROUP(sdhi2_data1),
- SH_PFC_PIN_GROUP(sdhi2_data4),
- SH_PFC_PIN_GROUP(sdhi2_data8),
- SH_PFC_PIN_GROUP(sdhi2_ctrl),
- SH_PFC_PIN_GROUP(sdhi2_cd_a),
- SH_PFC_PIN_GROUP(sdhi2_wp_a),
- SH_PFC_PIN_GROUP(sdhi2_cd_b),
- SH_PFC_PIN_GROUP(sdhi2_wp_b),
- SH_PFC_PIN_GROUP(sdhi2_ds),
- SH_PFC_PIN_GROUP(sdhi3_data1),
- SH_PFC_PIN_GROUP(sdhi3_data4),
- SH_PFC_PIN_GROUP(sdhi3_data8),
- SH_PFC_PIN_GROUP(sdhi3_ctrl),
- SH_PFC_PIN_GROUP(sdhi3_cd),
- SH_PFC_PIN_GROUP(sdhi3_wp),
- SH_PFC_PIN_GROUP(sdhi3_ds),
- SH_PFC_PIN_GROUP(ssi0_data),
- SH_PFC_PIN_GROUP(ssi01239_ctrl),
- SH_PFC_PIN_GROUP(ssi1_data_a),
- SH_PFC_PIN_GROUP(ssi1_data_b),
- SH_PFC_PIN_GROUP(ssi1_ctrl_a),
- SH_PFC_PIN_GROUP(ssi1_ctrl_b),
- SH_PFC_PIN_GROUP(ssi2_data_a),
- SH_PFC_PIN_GROUP(ssi2_data_b),
- SH_PFC_PIN_GROUP(ssi2_ctrl_a),
- SH_PFC_PIN_GROUP(ssi2_ctrl_b),
- SH_PFC_PIN_GROUP(ssi3_data),
- SH_PFC_PIN_GROUP(ssi349_ctrl),
- SH_PFC_PIN_GROUP(ssi4_data),
- SH_PFC_PIN_GROUP(ssi4_ctrl),
- SH_PFC_PIN_GROUP(ssi5_data),
- SH_PFC_PIN_GROUP(ssi5_ctrl),
- SH_PFC_PIN_GROUP(ssi6_data),
- SH_PFC_PIN_GROUP(ssi6_ctrl),
- SH_PFC_PIN_GROUP(ssi7_data),
- SH_PFC_PIN_GROUP(ssi78_ctrl),
- SH_PFC_PIN_GROUP(ssi8_data),
- SH_PFC_PIN_GROUP(ssi9_data_a),
- SH_PFC_PIN_GROUP(ssi9_data_b),
- SH_PFC_PIN_GROUP(ssi9_ctrl_a),
- SH_PFC_PIN_GROUP(ssi9_ctrl_b),
- SH_PFC_PIN_GROUP(tmu_tclk1_a),
- SH_PFC_PIN_GROUP(tmu_tclk1_b),
- SH_PFC_PIN_GROUP(tmu_tclk2_a),
- SH_PFC_PIN_GROUP(tmu_tclk2_b),
- SH_PFC_PIN_GROUP(usb0),
- SH_PFC_PIN_GROUP(usb1),
- SH_PFC_PIN_GROUP(usb30),
- VIN_DATA_PIN_GROUP(vin4_data_a, 8),
- VIN_DATA_PIN_GROUP(vin4_data_a, 10),
- VIN_DATA_PIN_GROUP(vin4_data_a, 12),
- VIN_DATA_PIN_GROUP(vin4_data_a, 16),
- SH_PFC_PIN_GROUP(vin4_data18_a),
- VIN_DATA_PIN_GROUP(vin4_data_a, 20),
- VIN_DATA_PIN_GROUP(vin4_data_a, 24),
- VIN_DATA_PIN_GROUP(vin4_data_b, 8),
- VIN_DATA_PIN_GROUP(vin4_data_b, 10),
- VIN_DATA_PIN_GROUP(vin4_data_b, 12),
- VIN_DATA_PIN_GROUP(vin4_data_b, 16),
- SH_PFC_PIN_GROUP(vin4_data18_b),
- VIN_DATA_PIN_GROUP(vin4_data_b, 20),
- VIN_DATA_PIN_GROUP(vin4_data_b, 24),
- SH_PFC_PIN_GROUP(vin4_sync),
- SH_PFC_PIN_GROUP(vin4_field),
- SH_PFC_PIN_GROUP(vin4_clkenb),
- SH_PFC_PIN_GROUP(vin4_clk),
- SH_PFC_PIN_GROUP(vin5_data8),
- SH_PFC_PIN_GROUP(vin5_data10),
- SH_PFC_PIN_GROUP(vin5_data12),
- SH_PFC_PIN_GROUP(vin5_data16),
- SH_PFC_PIN_GROUP(vin5_sync),
- SH_PFC_PIN_GROUP(vin5_field),
- SH_PFC_PIN_GROUP(vin5_clkenb),
- SH_PFC_PIN_GROUP(vin5_clk),
+static const struct {
+ struct sh_pfc_pin_group common[307];
+ struct sh_pfc_pin_group automotive[33];
+} pinmux_groups = {
+ .common = {
+ SH_PFC_PIN_GROUP(audio_clk_a_a),
+ SH_PFC_PIN_GROUP(audio_clk_a_b),
+ SH_PFC_PIN_GROUP(audio_clk_a_c),
+ SH_PFC_PIN_GROUP(audio_clk_b_a),
+ SH_PFC_PIN_GROUP(audio_clk_b_b),
+ SH_PFC_PIN_GROUP(audio_clk_c_a),
+ SH_PFC_PIN_GROUP(audio_clk_c_b),
+ SH_PFC_PIN_GROUP(audio_clkout_a),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(audio_clkout1_a),
+ SH_PFC_PIN_GROUP(audio_clkout1_b),
+ SH_PFC_PIN_GROUP(audio_clkout2_a),
+ SH_PFC_PIN_GROUP(audio_clkout2_b),
+ SH_PFC_PIN_GROUP(audio_clkout3_a),
+ SH_PFC_PIN_GROUP(audio_clkout3_b),
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio), /* Deprecated */
+ SH_PFC_PIN_GROUP(avb_mdio),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_avtp_pps),
+ SH_PFC_PIN_GROUP(avb_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb_avtp_match_b),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(can0_data_a),
+ SH_PFC_PIN_GROUP(can0_data_b),
+ SH_PFC_PIN_GROUP(can1_data),
+ SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_oddf),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(hdmi0_cec),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_a),
+ SH_PFC_PIN_GROUP(hscif2_clk_a),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_clk_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk),
+ SH_PFC_PIN_GROUP(hscif3_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_data_c),
+ SH_PFC_PIN_GROUP(hscif3_data_d),
+ SH_PFC_PIN_GROUP(hscif4_data_a),
+ SH_PFC_PIN_GROUP(hscif4_clk),
+ SH_PFC_PIN_GROUP(hscif4_ctrl),
+ SH_PFC_PIN_GROUP(hscif4_data_b),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+ SH_PFC_PIN_GROUP(msiof1_clk_a),
+ SH_PFC_PIN_GROUP(msiof1_sync_a),
+ SH_PFC_PIN_GROUP(msiof1_ss1_a),
+ SH_PFC_PIN_GROUP(msiof1_ss2_a),
+ SH_PFC_PIN_GROUP(msiof1_txd_a),
+ SH_PFC_PIN_GROUP(msiof1_rxd_a),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_sync_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_txd_b),
+ SH_PFC_PIN_GROUP(msiof1_rxd_b),
+ SH_PFC_PIN_GROUP(msiof1_clk_c),
+ SH_PFC_PIN_GROUP(msiof1_sync_c),
+ SH_PFC_PIN_GROUP(msiof1_ss1_c),
+ SH_PFC_PIN_GROUP(msiof1_ss2_c),
+ SH_PFC_PIN_GROUP(msiof1_txd_c),
+ SH_PFC_PIN_GROUP(msiof1_rxd_c),
+ SH_PFC_PIN_GROUP(msiof1_clk_d),
+ SH_PFC_PIN_GROUP(msiof1_sync_d),
+ SH_PFC_PIN_GROUP(msiof1_ss1_d),
+ SH_PFC_PIN_GROUP(msiof1_ss2_d),
+ SH_PFC_PIN_GROUP(msiof1_txd_d),
+ SH_PFC_PIN_GROUP(msiof1_rxd_d),
+ SH_PFC_PIN_GROUP(msiof1_clk_e),
+ SH_PFC_PIN_GROUP(msiof1_sync_e),
+ SH_PFC_PIN_GROUP(msiof1_ss1_e),
+ SH_PFC_PIN_GROUP(msiof1_ss2_e),
+ SH_PFC_PIN_GROUP(msiof1_txd_e),
+ SH_PFC_PIN_GROUP(msiof1_rxd_e),
+ SH_PFC_PIN_GROUP(msiof1_clk_f),
+ SH_PFC_PIN_GROUP(msiof1_sync_f),
+ SH_PFC_PIN_GROUP(msiof1_ss1_f),
+ SH_PFC_PIN_GROUP(msiof1_ss2_f),
+ SH_PFC_PIN_GROUP(msiof1_txd_f),
+ SH_PFC_PIN_GROUP(msiof1_rxd_f),
+ SH_PFC_PIN_GROUP(msiof1_clk_g),
+ SH_PFC_PIN_GROUP(msiof1_sync_g),
+ SH_PFC_PIN_GROUP(msiof1_ss1_g),
+ SH_PFC_PIN_GROUP(msiof1_ss2_g),
+ SH_PFC_PIN_GROUP(msiof1_txd_g),
+ SH_PFC_PIN_GROUP(msiof1_rxd_g),
+ SH_PFC_PIN_GROUP(msiof2_clk_a),
+ SH_PFC_PIN_GROUP(msiof2_sync_a),
+ SH_PFC_PIN_GROUP(msiof2_ss1_a),
+ SH_PFC_PIN_GROUP(msiof2_ss2_a),
+ SH_PFC_PIN_GROUP(msiof2_txd_a),
+ SH_PFC_PIN_GROUP(msiof2_rxd_a),
+ SH_PFC_PIN_GROUP(msiof2_clk_b),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1_b),
+ SH_PFC_PIN_GROUP(msiof2_ss2_b),
+ SH_PFC_PIN_GROUP(msiof2_txd_b),
+ SH_PFC_PIN_GROUP(msiof2_rxd_b),
+ SH_PFC_PIN_GROUP(msiof2_clk_c),
+ SH_PFC_PIN_GROUP(msiof2_sync_c),
+ SH_PFC_PIN_GROUP(msiof2_ss1_c),
+ SH_PFC_PIN_GROUP(msiof2_ss2_c),
+ SH_PFC_PIN_GROUP(msiof2_txd_c),
+ SH_PFC_PIN_GROUP(msiof2_rxd_c),
+ SH_PFC_PIN_GROUP(msiof2_clk_d),
+ SH_PFC_PIN_GROUP(msiof2_sync_d),
+ SH_PFC_PIN_GROUP(msiof2_ss1_d),
+ SH_PFC_PIN_GROUP(msiof2_ss2_d),
+ SH_PFC_PIN_GROUP(msiof2_txd_d),
+ SH_PFC_PIN_GROUP(msiof2_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_a),
+ SH_PFC_PIN_GROUP(msiof3_sync_a),
+ SH_PFC_PIN_GROUP(msiof3_ss1_a),
+ SH_PFC_PIN_GROUP(msiof3_ss2_a),
+ SH_PFC_PIN_GROUP(msiof3_txd_a),
+ SH_PFC_PIN_GROUP(msiof3_rxd_a),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_ss1_b),
+ SH_PFC_PIN_GROUP(msiof3_ss2_b),
+ SH_PFC_PIN_GROUP(msiof3_txd_b),
+ SH_PFC_PIN_GROUP(msiof3_rxd_b),
+ SH_PFC_PIN_GROUP(msiof3_clk_c),
+ SH_PFC_PIN_GROUP(msiof3_sync_c),
+ SH_PFC_PIN_GROUP(msiof3_txd_c),
+ SH_PFC_PIN_GROUP(msiof3_rxd_c),
+ SH_PFC_PIN_GROUP(msiof3_clk_d),
+ SH_PFC_PIN_GROUP(msiof3_sync_d),
+ SH_PFC_PIN_GROUP(msiof3_ss1_d),
+ SH_PFC_PIN_GROUP(msiof3_txd_d),
+ SH_PFC_PIN_GROUP(msiof3_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_e),
+ SH_PFC_PIN_GROUP(msiof3_sync_e),
+ SH_PFC_PIN_GROUP(msiof3_ss1_e),
+ SH_PFC_PIN_GROUP(msiof3_ss2_e),
+ SH_PFC_PIN_GROUP(msiof3_txd_e),
+ SH_PFC_PIN_GROUP(msiof3_rxd_e),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4_a),
+ SH_PFC_PIN_GROUP(pwm4_b),
+ SH_PFC_PIN_GROUP(pwm5_a),
+ SH_PFC_PIN_GROUP(pwm5_b),
+ SH_PFC_PIN_GROUP(pwm6_a),
+ SH_PFC_PIN_GROUP(pwm6_b),
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_a),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif2_data_a),
+ SH_PFC_PIN_GROUP(scif2_clk),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(scif3_ctrl),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif4_data_a),
+ SH_PFC_PIN_GROUP(scif4_clk_a),
+ SH_PFC_PIN_GROUP(scif4_ctrl_a),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_clk_b),
+ SH_PFC_PIN_GROUP(scif4_ctrl_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif4_clk_c),
+ SH_PFC_PIN_GROUP(scif4_ctrl_c),
+ SH_PFC_PIN_GROUP(scif5_data_a),
+ SH_PFC_PIN_GROUP(scif5_clk_a),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scif5_clk_b),
+ SH_PFC_PIN_GROUP(scif_clk_a),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_data8),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd_a),
+ SH_PFC_PIN_GROUP(sdhi2_wp_a),
+ SH_PFC_PIN_GROUP(sdhi2_cd_b),
+ SH_PFC_PIN_GROUP(sdhi2_wp_b),
+ SH_PFC_PIN_GROUP(sdhi2_ds),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_data8),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(sdhi3_ds),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi01239_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data_a),
+ SH_PFC_PIN_GROUP(ssi1_data_b),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi2_data_a),
+ SH_PFC_PIN_GROUP(ssi2_data_b),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi349_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi9_data_a),
+ SH_PFC_PIN_GROUP(ssi9_data_b),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+ SH_PFC_PIN_GROUP(tmu_tclk1_a),
+ SH_PFC_PIN_GROUP(tmu_tclk1_b),
+ SH_PFC_PIN_GROUP(tmu_tclk2_a),
+ SH_PFC_PIN_GROUP(tmu_tclk2_b),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb30),
+ VIN_DATA_PIN_GROUP(vin4_data_a, 8),
+ VIN_DATA_PIN_GROUP(vin4_data_a, 10),
+ VIN_DATA_PIN_GROUP(vin4_data_a, 12),
+ VIN_DATA_PIN_GROUP(vin4_data_a, 16),
+ SH_PFC_PIN_GROUP(vin4_data18_a),
+ VIN_DATA_PIN_GROUP(vin4_data_a, 20),
+ VIN_DATA_PIN_GROUP(vin4_data_a, 24),
+ VIN_DATA_PIN_GROUP(vin4_data_b, 8),
+ VIN_DATA_PIN_GROUP(vin4_data_b, 10),
+ VIN_DATA_PIN_GROUP(vin4_data_b, 12),
+ VIN_DATA_PIN_GROUP(vin4_data_b, 16),
+ SH_PFC_PIN_GROUP(vin4_data18_b),
+ VIN_DATA_PIN_GROUP(vin4_data_b, 20),
+ VIN_DATA_PIN_GROUP(vin4_data_b, 24),
+ SH_PFC_PIN_GROUP(vin4_sync),
+ SH_PFC_PIN_GROUP(vin4_field),
+ SH_PFC_PIN_GROUP(vin4_clkenb),
+ SH_PFC_PIN_GROUP(vin4_clk),
+ SH_PFC_PIN_GROUP(vin5_data8),
+ SH_PFC_PIN_GROUP(vin5_data10),
+ SH_PFC_PIN_GROUP(vin5_data12),
+ SH_PFC_PIN_GROUP(vin5_data16),
+ SH_PFC_PIN_GROUP(vin5_sync),
+ SH_PFC_PIN_GROUP(vin5_field),
+ SH_PFC_PIN_GROUP(vin5_clkenb),
+ SH_PFC_PIN_GROUP(vin5_clk),
+ },
+ .automotive = {
+ SH_PFC_PIN_GROUP(canfd0_data_a),
+ SH_PFC_PIN_GROUP(canfd0_data_b),
+ SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(drif0_ctrl_a),
+ SH_PFC_PIN_GROUP(drif0_data0_a),
+ SH_PFC_PIN_GROUP(drif0_data1_a),
+ SH_PFC_PIN_GROUP(drif0_ctrl_b),
+ SH_PFC_PIN_GROUP(drif0_data0_b),
+ SH_PFC_PIN_GROUP(drif0_data1_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_c),
+ SH_PFC_PIN_GROUP(drif0_data0_c),
+ SH_PFC_PIN_GROUP(drif0_data1_c),
+ SH_PFC_PIN_GROUP(drif1_ctrl_a),
+ SH_PFC_PIN_GROUP(drif1_data0_a),
+ SH_PFC_PIN_GROUP(drif1_data1_a),
+ SH_PFC_PIN_GROUP(drif1_ctrl_b),
+ SH_PFC_PIN_GROUP(drif1_data0_b),
+ SH_PFC_PIN_GROUP(drif1_data1_b),
+ SH_PFC_PIN_GROUP(drif1_ctrl_c),
+ SH_PFC_PIN_GROUP(drif1_data0_c),
+ SH_PFC_PIN_GROUP(drif1_data1_c),
+ SH_PFC_PIN_GROUP(drif2_ctrl_a),
+ SH_PFC_PIN_GROUP(drif2_data0_a),
+ SH_PFC_PIN_GROUP(drif2_data1_a),
+ SH_PFC_PIN_GROUP(drif2_ctrl_b),
+ SH_PFC_PIN_GROUP(drif2_data0_b),
+ SH_PFC_PIN_GROUP(drif2_data1_b),
+ SH_PFC_PIN_GROUP(drif3_ctrl_a),
+ SH_PFC_PIN_GROUP(drif3_data0_a),
+ SH_PFC_PIN_GROUP(drif3_data1_a),
+ SH_PFC_PIN_GROUP(drif3_ctrl_b),
+ SH_PFC_PIN_GROUP(drif3_data0_b),
+ SH_PFC_PIN_GROUP(drif3_data1_b),
+ }
};
static const char * const audio_clk_groups[] = {
@@ -4962,58 +4966,65 @@ static const char * const vin5_groups[] = {
"vin5_clk",
};
-static const struct sh_pfc_function pinmux_functions[] = {
- SH_PFC_FUNCTION(audio_clk),
- SH_PFC_FUNCTION(avb),
- SH_PFC_FUNCTION(can0),
- SH_PFC_FUNCTION(can1),
- SH_PFC_FUNCTION(can_clk),
- SH_PFC_FUNCTION(canfd0),
- SH_PFC_FUNCTION(canfd1),
- SH_PFC_FUNCTION(drif0),
- SH_PFC_FUNCTION(drif1),
- SH_PFC_FUNCTION(drif2),
- SH_PFC_FUNCTION(drif3),
- SH_PFC_FUNCTION(du),
- SH_PFC_FUNCTION(hdmi0),
- SH_PFC_FUNCTION(hscif0),
- SH_PFC_FUNCTION(hscif1),
- SH_PFC_FUNCTION(hscif2),
- SH_PFC_FUNCTION(hscif3),
- SH_PFC_FUNCTION(hscif4),
- SH_PFC_FUNCTION(i2c1),
- SH_PFC_FUNCTION(i2c2),
- SH_PFC_FUNCTION(i2c6),
- SH_PFC_FUNCTION(intc_ex),
- SH_PFC_FUNCTION(msiof0),
- SH_PFC_FUNCTION(msiof1),
- SH_PFC_FUNCTION(msiof2),
- SH_PFC_FUNCTION(msiof3),
- SH_PFC_FUNCTION(pwm0),
- SH_PFC_FUNCTION(pwm1),
- SH_PFC_FUNCTION(pwm2),
- SH_PFC_FUNCTION(pwm3),
- SH_PFC_FUNCTION(pwm4),
- SH_PFC_FUNCTION(pwm5),
- SH_PFC_FUNCTION(pwm6),
- SH_PFC_FUNCTION(scif0),
- SH_PFC_FUNCTION(scif1),
- SH_PFC_FUNCTION(scif2),
- SH_PFC_FUNCTION(scif3),
- SH_PFC_FUNCTION(scif4),
- SH_PFC_FUNCTION(scif5),
- SH_PFC_FUNCTION(scif_clk),
- SH_PFC_FUNCTION(sdhi0),
- SH_PFC_FUNCTION(sdhi1),
- SH_PFC_FUNCTION(sdhi2),
- SH_PFC_FUNCTION(sdhi3),
- SH_PFC_FUNCTION(ssi),
- SH_PFC_FUNCTION(tmu),
- SH_PFC_FUNCTION(usb0),
- SH_PFC_FUNCTION(usb1),
- SH_PFC_FUNCTION(usb30),
- SH_PFC_FUNCTION(vin4),
- SH_PFC_FUNCTION(vin5),
+static const struct {
+ struct sh_pfc_function common[45];
+ struct sh_pfc_function automotive[6];
+} pinmux_functions = {
+ .common = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(can0),
+ SH_PFC_FUNCTION(can1),
+ SH_PFC_FUNCTION(can_clk),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(hdmi0),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+ SH_PFC_FUNCTION(hscif4),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(intc_ex),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(tmu),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
+ SH_PFC_FUNCTION(usb30),
+ SH_PFC_FUNCTION(vin4),
+ SH_PFC_FUNCTION(vin5),
+ },
+ .automotive = {
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(drif0),
+ SH_PFC_FUNCTION(drif1),
+ SH_PFC_FUNCTION(drif2),
+ SH_PFC_FUNCTION(drif3),
+ }
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -6137,6 +6148,32 @@ static const struct sh_pfc_soc_operations r8a7796_pinmux_ops = {
.set_bias = r8a7796_pinmux_set_bias,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A774A1
+const struct sh_pfc_soc_info r8a774a1_pinmux_info = {
+ .name = "r8a774a1_pfc",
+ .ops = &r8a7796_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A7796
const struct sh_pfc_soc_info r8a7796_pinmux_info = {
.name = "r8a77960_pfc",
.ops = &r8a7796_pinmux_ops,
@@ -6146,10 +6183,12 @@ const struct sh_pfc_soc_info r8a7796_pinmux_info = {
.pins = pinmux_pins,
.nr_pins = ARRAY_SIZE(pinmux_pins),
- .groups = pinmux_groups,
- .nr_groups = ARRAY_SIZE(pinmux_groups),
- .functions = pinmux_functions,
- .nr_functions = ARRAY_SIZE(pinmux_functions),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.automotive),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
@@ -6159,3 +6198,4 @@ const struct sh_pfc_soc_info r8a7796_pinmux_info = {
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index cfd7de67e3e3..dfdd982984d4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -1575,6 +1575,128 @@ static const struct sh_pfc_pin pinmux_pins[] = {
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 30, ASEBRK, CFG_FLAGS),
};
+/* - AUDIO CLOCK ------------------------------------------------------------ */
+static const unsigned int audio_clk_a_a_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(6, 22),
+};
+static const unsigned int audio_clk_a_a_mux[] = {
+ AUDIO_CLKA_A_MARK,
+};
+static const unsigned int audio_clk_a_b_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int audio_clk_a_b_mux[] = {
+ AUDIO_CLKA_B_MARK,
+};
+static const unsigned int audio_clk_a_c_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clk_a_c_mux[] = {
+ AUDIO_CLKA_C_MARK,
+};
+static const unsigned int audio_clk_b_a_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int audio_clk_b_a_mux[] = {
+ AUDIO_CLKB_A_MARK,
+};
+static const unsigned int audio_clk_b_b_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(6, 23),
+};
+static const unsigned int audio_clk_b_b_mux[] = {
+ AUDIO_CLKB_B_MARK,
+};
+static const unsigned int audio_clk_c_a_pins[] = {
+ /* CLK C */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clk_c_a_mux[] = {
+ AUDIO_CLKC_A_MARK,
+};
+static const unsigned int audio_clk_c_b_pins[] = {
+ /* CLK C */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int audio_clk_c_b_mux[] = {
+ AUDIO_CLKC_B_MARK,
+};
+static const unsigned int audio_clkout_a_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int audio_clkout_a_mux[] = {
+ AUDIO_CLKOUT_A_MARK,
+};
+static const unsigned int audio_clkout_b_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(6, 28),
+};
+static const unsigned int audio_clkout_b_mux[] = {
+ AUDIO_CLKOUT_B_MARK,
+};
+static const unsigned int audio_clkout_c_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int audio_clkout_c_mux[] = {
+ AUDIO_CLKOUT_C_MARK,
+};
+static const unsigned int audio_clkout_d_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clkout_d_mux[] = {
+ AUDIO_CLKOUT_D_MARK,
+};
+static const unsigned int audio_clkout1_a_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int audio_clkout1_a_mux[] = {
+ AUDIO_CLKOUT1_A_MARK,
+};
+static const unsigned int audio_clkout1_b_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(6, 29),
+};
+static const unsigned int audio_clkout1_b_mux[] = {
+ AUDIO_CLKOUT1_B_MARK,
+};
+static const unsigned int audio_clkout2_a_pins[] = {
+ /* CLKOUT2 */
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int audio_clkout2_a_mux[] = {
+ AUDIO_CLKOUT2_A_MARK,
+};
+static const unsigned int audio_clkout2_b_pins[] = {
+ /* CLKOUT2 */
+ RCAR_GP_PIN(6, 30),
+};
+static const unsigned int audio_clkout2_b_mux[] = {
+ AUDIO_CLKOUT2_B_MARK,
+};
+
+static const unsigned int audio_clkout3_a_pins[] = {
+ /* CLKOUT3 */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clkout3_a_mux[] = {
+ AUDIO_CLKOUT3_A_MARK,
+};
+static const unsigned int audio_clkout3_b_pins[] = {
+ /* CLKOUT3 */
+ RCAR_GP_PIN(6, 31),
+};
+static const unsigned int audio_clkout3_b_mux[] = {
+ AUDIO_CLKOUT3_B_MARK,
+};
+
/* - EtherAVB --------------------------------------------------------------- */
static const unsigned int avb_link_pins[] = {
/* AVB_LINK */
@@ -2907,6 +3029,25 @@ static const unsigned int pwm6_b_mux[] = {
PWM6_B_MARK,
};
+/* - SATA --------------------------------------------------------------------*/
+static const unsigned int sata0_devslp_a_pins[] = {
+ /* DEVSLP */
+ RCAR_GP_PIN(6, 16),
+};
+
+static const unsigned int sata0_devslp_a_mux[] = {
+ SATA_DEVSLP_A_MARK,
+};
+
+static const unsigned int sata0_devslp_b_pins[] = {
+ /* DEVSLP */
+ RCAR_GP_PIN(4, 6),
+};
+
+static const unsigned int sata0_devslp_b_mux[] = {
+ SATA_DEVSLP_B_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
/* RX, TX */
@@ -3376,6 +3517,184 @@ static const unsigned int sdhi3_ds_mux[] = {
SD3_DS_MARK,
};
+/* - SSI -------------------------------------------------------------------- */
+static const unsigned int ssi0_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 2),
+};
+static const unsigned int ssi0_data_mux[] = {
+ SSI_SDATA0_MARK,
+};
+static const unsigned int ssi01239_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
+};
+static const unsigned int ssi01239_ctrl_mux[] = {
+ SSI_SCK01239_MARK, SSI_WS01239_MARK,
+};
+static const unsigned int ssi1_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 3),
+};
+static const unsigned int ssi1_data_a_mux[] = {
+ SSI_SDATA1_A_MARK,
+};
+static const unsigned int ssi1_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int ssi1_data_b_mux[] = {
+ SSI_SDATA1_B_MARK,
+};
+static const unsigned int ssi1_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int ssi1_ctrl_a_mux[] = {
+ SSI_SCK1_A_MARK, SSI_WS1_A_MARK,
+};
+static const unsigned int ssi1_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi1_ctrl_b_mux[] = {
+ SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
+};
+static const unsigned int ssi2_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int ssi2_data_a_mux[] = {
+ SSI_SDATA2_A_MARK,
+};
+static const unsigned int ssi2_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int ssi2_data_b_mux[] = {
+ SSI_SDATA2_B_MARK,
+};
+static const unsigned int ssi2_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int ssi2_ctrl_a_mux[] = {
+ SSI_SCK2_A_MARK, SSI_WS2_A_MARK,
+};
+static const unsigned int ssi2_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int ssi2_ctrl_b_mux[] = {
+ SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
+};
+static const unsigned int ssi3_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int ssi3_data_mux[] = {
+ SSI_SDATA3_MARK,
+};
+static const unsigned int ssi349_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int ssi349_ctrl_mux[] = {
+ SSI_SCK349_MARK, SSI_WS349_MARK,
+};
+static const unsigned int ssi4_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int ssi4_data_mux[] = {
+ SSI_SDATA4_MARK,
+};
+static const unsigned int ssi4_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int ssi4_ctrl_mux[] = {
+ SSI_SCK4_MARK, SSI_WS4_MARK,
+};
+static const unsigned int ssi5_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 13),
+};
+static const unsigned int ssi5_data_mux[] = {
+ SSI_SDATA5_MARK,
+};
+static const unsigned int ssi5_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
+};
+static const unsigned int ssi5_ctrl_mux[] = {
+ SSI_SCK5_MARK, SSI_WS5_MARK,
+};
+static const unsigned int ssi6_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 16),
+};
+static const unsigned int ssi6_data_mux[] = {
+ SSI_SDATA6_MARK,
+};
+static const unsigned int ssi6_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int ssi6_ctrl_mux[] = {
+ SSI_SCK6_MARK, SSI_WS6_MARK,
+};
+static const unsigned int ssi7_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int ssi7_data_mux[] = {
+ SSI_SDATA7_MARK,
+};
+static const unsigned int ssi78_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int ssi78_ctrl_mux[] = {
+ SSI_SCK78_MARK, SSI_WS78_MARK,
+};
+static const unsigned int ssi8_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int ssi8_data_mux[] = {
+ SSI_SDATA8_MARK,
+};
+static const unsigned int ssi9_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi9_data_a_mux[] = {
+ SSI_SDATA9_A_MARK,
+};
+static const unsigned int ssi9_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int ssi9_data_b_mux[] = {
+ SSI_SDATA9_B_MARK,
+};
+static const unsigned int ssi9_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int ssi9_ctrl_a_mux[] = {
+ SSI_SCK9_A_MARK, SSI_WS9_A_MARK,
+};
+static const unsigned int ssi9_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int ssi9_ctrl_b_mux[] = {
+ SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
+};
+
+
/* - USB0 ------------------------------------------------------------------- */
static const unsigned int usb0_pins[] = {
/* PWEN, OVC */
@@ -3407,6 +3726,23 @@ static const unsigned int usb30_mux[] = {
};
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clk_a_a),
+ SH_PFC_PIN_GROUP(audio_clk_a_b),
+ SH_PFC_PIN_GROUP(audio_clk_a_c),
+ SH_PFC_PIN_GROUP(audio_clk_b_a),
+ SH_PFC_PIN_GROUP(audio_clk_b_b),
+ SH_PFC_PIN_GROUP(audio_clk_c_a),
+ SH_PFC_PIN_GROUP(audio_clk_c_b),
+ SH_PFC_PIN_GROUP(audio_clkout_a),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(audio_clkout1_a),
+ SH_PFC_PIN_GROUP(audio_clkout1_b),
+ SH_PFC_PIN_GROUP(audio_clkout2_a),
+ SH_PFC_PIN_GROUP(audio_clkout2_b),
+ SH_PFC_PIN_GROUP(audio_clkout3_a),
+ SH_PFC_PIN_GROUP(audio_clkout3_b),
SH_PFC_PIN_GROUP(avb_link),
SH_PFC_PIN_GROUP(avb_magic),
SH_PFC_PIN_GROUP(avb_phy_int),
@@ -3579,6 +3915,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(pwm5_b),
SH_PFC_PIN_GROUP(pwm6_a),
SH_PFC_PIN_GROUP(pwm6_b),
+ SH_PFC_PIN_GROUP(sata0_devslp_a),
+ SH_PFC_PIN_GROUP(sata0_devslp_b),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -3634,11 +3972,56 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(sdhi3_cd),
SH_PFC_PIN_GROUP(sdhi3_wp),
SH_PFC_PIN_GROUP(sdhi3_ds),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi01239_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data_a),
+ SH_PFC_PIN_GROUP(ssi1_data_b),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi2_data_a),
+ SH_PFC_PIN_GROUP(ssi2_data_b),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi349_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi9_data_a),
+ SH_PFC_PIN_GROUP(ssi9_data_b),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_b),
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb30),
};
+static const char * const audio_clk_groups[] = {
+ "audio_clk_a_a",
+ "audio_clk_a_b",
+ "audio_clk_a_c",
+ "audio_clk_b_a",
+ "audio_clk_b_b",
+ "audio_clk_c_a",
+ "audio_clk_c_b",
+ "audio_clkout_a",
+ "audio_clkout_b",
+ "audio_clkout_c",
+ "audio_clkout_d",
+ "audio_clkout1_a",
+ "audio_clkout1_b",
+ "audio_clkout2_a",
+ "audio_clkout2_b",
+ "audio_clkout3_a",
+ "audio_clkout3_b",
+};
+
static const char * const avb_groups[] = {
"avb_link",
"avb_magic",
@@ -3877,6 +4260,11 @@ static const char * const pwm6_groups[] = {
"pwm6_b",
};
+static const char * const sata0_groups[] = {
+ "sata0_devslp_a",
+ "sata0_devslp_b",
+};
+
static const char * const scif0_groups[] = {
"scif0_data",
"scif0_clk",
@@ -3964,6 +4352,34 @@ static const char * const sdhi3_groups[] = {
"sdhi3_ds",
};
+static const char * const ssi_groups[] = {
+ "ssi0_data",
+ "ssi01239_ctrl",
+ "ssi1_data_a",
+ "ssi1_data_b",
+ "ssi1_ctrl_a",
+ "ssi1_ctrl_b",
+ "ssi2_data_a",
+ "ssi2_data_b",
+ "ssi2_ctrl_a",
+ "ssi2_ctrl_b",
+ "ssi3_data",
+ "ssi349_ctrl",
+ "ssi4_data",
+ "ssi4_ctrl",
+ "ssi5_data",
+ "ssi5_ctrl",
+ "ssi6_data",
+ "ssi6_ctrl",
+ "ssi7_data",
+ "ssi78_ctrl",
+ "ssi8_data",
+ "ssi9_data_a",
+ "ssi9_data_b",
+ "ssi9_ctrl_a",
+ "ssi9_ctrl_b",
+};
+
static const char * const usb0_groups[] = {
"usb0",
};
@@ -3977,6 +4393,7 @@ static const char * const usb30_groups[] = {
};
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
SH_PFC_FUNCTION(du),
SH_PFC_FUNCTION(hscif0),
@@ -3999,6 +4416,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(pwm4),
SH_PFC_FUNCTION(pwm5),
SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(sata0),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -4010,6 +4428,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(sdhi1),
SH_PFC_FUNCTION(sdhi2),
SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(usb30),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
index eeb58b3bbc9a..44f9eefc86b5 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A77970 processor support - PFC hardware block.
*
@@ -9,10 +10,6 @@
* R-Car Gen3 processor support - PFC hardware block.
*
* Copyright (C) 2015 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/io.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
index b81c807ac54d..1fdafa48479c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
@@ -1371,6 +1371,94 @@ static const unsigned int avb_avtp_capture_a_mux[] = {
AVB_AVTP_CAPTURE_A_MARK,
};
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 3), RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 0),
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 10),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 11),
+ RCAR_GP_PIN(0, 1), RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16),
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int du_rgb666_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+ DU_DR3_MARK, DU_DR2_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+ DU_DG3_MARK, DU_DG2_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+ DU_DB3_MARK, DU_DB2_MARK,
+};
+static const unsigned int du_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 3), RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 0),
+ RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 21),
+ RCAR_GP_PIN(1, 9), RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 10),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 11),
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 9),
+ RCAR_GP_PIN(0, 1), RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16),
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+};
+static const unsigned int du_rgb888_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+ DU_DR3_MARK, DU_DR2_MARK, DU_DR1_MARK, DU_DR0_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+ DU_DG3_MARK, DU_DG2_MARK, DU_DG1_MARK, DU_DG0_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+ DU_DB3_MARK, DU_DB2_MARK, DU_DB1_MARK, DU_DB0_MARK,
+};
+static const unsigned int du_clk_in_0_pins[] = {
+ /* CLKIN0 */
+ RCAR_GP_PIN(0, 16),
+};
+static const unsigned int du_clk_in_0_mux[] = {
+ DU_DOTCLKIN0_MARK
+};
+static const unsigned int du_clk_in_1_pins[] = {
+ /* CLKIN1 */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int du_clk_in_1_mux[] = {
+ DU_DOTCLKIN1_MARK
+};
+static const unsigned int du_clk_out_0_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int du_clk_out_0_mux[] = {
+ DU_DOTCLKOUT0_MARK
+};
+static const unsigned int du_sync_pins[] = {
+ /* VSYNC, HSYNC */
+ RCAR_GP_PIN(1, 11), RCAR_GP_PIN(1, 8),
+};
+static const unsigned int du_sync_mux[] = {
+ DU_VSYNC_MARK, DU_HSYNC_MARK
+};
+static const unsigned int du_disp_cde_pins[] = {
+ /* DISP_CDE */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int du_disp_cde_mux[] = {
+ DU_DISP_CDE_MARK,
+};
+static const unsigned int du_cde_pins[] = {
+ /* CDE */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int du_cde_mux[] = {
+ DU_CDE_MARK,
+};
+static const unsigned int du_disp_pins[] = {
+ /* DISP */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int du_disp_mux[] = {
+ DU_DISP_MARK,
+};
+
/* - I2C -------------------------------------------------------------------- */
static const unsigned int i2c1_a_pins[] = {
/* SCL, SDA */
@@ -1507,6 +1595,520 @@ static const unsigned int i2c7_b_mux[] = {
SCL7_B_MARK, SDA7_B_MARK,
};
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_pins[] = {
+ /* IRQ0 */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int intc_ex_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_ex_irq1_pins[] = {
+ /* IRQ1 */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int intc_ex_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_ex_irq2_pins[] = {
+ /* IRQ2 */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int intc_ex_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_ex_irq3_pins[] = {
+ /* IRQ3 */
+ RCAR_GP_PIN(1, 9),
+};
+static const unsigned int intc_ex_irq3_mux[] = {
+ IRQ3_MARK,
+};
+static const unsigned int intc_ex_irq4_pins[] = {
+ /* IRQ4 */
+ RCAR_GP_PIN(1, 10),
+};
+static const unsigned int intc_ex_irq4_mux[] = {
+ IRQ4_MARK,
+};
+static const unsigned int intc_ex_irq5_pins[] = {
+ /* IRQ5 */
+ RCAR_GP_PIN(0, 7),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+ IRQ5_MARK,
+};
+
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 10),
+};
+
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+
+static const unsigned int msiof0_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 13),
+};
+
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+
+static const unsigned int msiof0_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 14),
+};
+
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+
+static const unsigned int msiof0_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 15),
+};
+
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+
+static const unsigned int msiof0_txd_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 12),
+};
+
+static const unsigned int msiof0_txd_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+
+static const unsigned int msiof0_rxd_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 11),
+};
+
+static const unsigned int msiof0_rxd_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 19),
+};
+
+static const unsigned int msiof1_clk_mux[] = {
+ MSIOF1_SCK_MARK,
+};
+
+static const unsigned int msiof1_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 16),
+};
+
+static const unsigned int msiof1_sync_mux[] = {
+ MSIOF1_SYNC_MARK,
+};
+
+static const unsigned int msiof1_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 14),
+};
+
+static const unsigned int msiof1_ss1_mux[] = {
+ MSIOF1_SS1_MARK,
+};
+
+static const unsigned int msiof1_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(1, 15),
+};
+
+static const unsigned int msiof1_ss2_mux[] = {
+ MSIOF1_SS2_MARK,
+};
+
+static const unsigned int msiof1_txd_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 18),
+};
+
+static const unsigned int msiof1_txd_mux[] = {
+ MSIOF1_TXD_MARK,
+};
+
+static const unsigned int msiof1_rxd_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 17),
+};
+
+static const unsigned int msiof1_rxd_mux[] = {
+ MSIOF1_RXD_MARK,
+};
+
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 8),
+};
+
+static const unsigned int msiof2_clk_a_mux[] = {
+ MSIOF2_SCK_A_MARK,
+};
+
+static const unsigned int msiof2_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 9),
+};
+
+static const unsigned int msiof2_sync_a_mux[] = {
+ MSIOF2_SYNC_A_MARK,
+};
+
+static const unsigned int msiof2_ss1_a_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 15),
+};
+
+static const unsigned int msiof2_ss1_a_mux[] = {
+ MSIOF2_SS1_A_MARK,
+};
+
+static const unsigned int msiof2_ss2_a_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 14),
+};
+
+static const unsigned int msiof2_ss2_a_mux[] = {
+ MSIOF2_SS2_A_MARK,
+};
+
+static const unsigned int msiof2_txd_a_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 11),
+};
+
+static const unsigned int msiof2_txd_a_mux[] = {
+ MSIOF2_TXD_A_MARK,
+};
+
+static const unsigned int msiof2_rxd_a_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 10),
+};
+
+static const unsigned int msiof2_rxd_a_mux[] = {
+ MSIOF2_RXD_A_MARK,
+};
+
+static const unsigned int msiof2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 13),
+};
+
+static const unsigned int msiof2_clk_b_mux[] = {
+ MSIOF2_SCK_B_MARK,
+};
+
+static const unsigned int msiof2_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 10),
+};
+
+static const unsigned int msiof2_sync_b_mux[] = {
+ MSIOF2_SYNC_B_MARK,
+};
+
+static const unsigned int msiof2_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 16),
+};
+
+static const unsigned int msiof2_ss1_b_mux[] = {
+ MSIOF2_SS1_B_MARK,
+};
+
+static const unsigned int msiof2_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(1, 12),
+};
+
+static const unsigned int msiof2_ss2_b_mux[] = {
+ MSIOF2_SS2_B_MARK,
+};
+
+static const unsigned int msiof2_txd_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 15),
+};
+
+static const unsigned int msiof2_txd_b_mux[] = {
+ MSIOF2_TXD_B_MARK,
+};
+
+static const unsigned int msiof2_rxd_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 14),
+};
+
+static const unsigned int msiof2_rxd_b_mux[] = {
+ MSIOF2_RXD_B_MARK,
+};
+
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 0),
+};
+
+static const unsigned int msiof3_clk_a_mux[] = {
+ MSIOF3_SCK_A_MARK,
+};
+
+static const unsigned int msiof3_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 1),
+};
+
+static const unsigned int msiof3_sync_a_mux[] = {
+ MSIOF3_SYNC_A_MARK,
+};
+
+static const unsigned int msiof3_ss1_a_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 15),
+};
+
+static const unsigned int msiof3_ss1_a_mux[] = {
+ MSIOF3_SS1_A_MARK,
+};
+
+static const unsigned int msiof3_ss2_a_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 4),
+};
+
+static const unsigned int msiof3_ss2_a_mux[] = {
+ MSIOF3_SS2_A_MARK,
+};
+
+static const unsigned int msiof3_txd_a_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 3),
+};
+
+static const unsigned int msiof3_txd_a_mux[] = {
+ MSIOF3_TXD_A_MARK,
+};
+
+static const unsigned int msiof3_rxd_a_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 2),
+};
+
+static const unsigned int msiof3_rxd_a_mux[] = {
+ MSIOF3_RXD_A_MARK,
+};
+
+static const unsigned int msiof3_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 5),
+};
+
+static const unsigned int msiof3_clk_b_mux[] = {
+ MSIOF3_SCK_B_MARK,
+};
+
+static const unsigned int msiof3_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 4),
+};
+
+static const unsigned int msiof3_sync_b_mux[] = {
+ MSIOF3_SYNC_B_MARK,
+};
+
+static const unsigned int msiof3_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 0),
+};
+
+static const unsigned int msiof3_ss1_b_mux[] = {
+ MSIOF3_SS1_B_MARK,
+};
+
+static const unsigned int msiof3_txd_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 7),
+};
+
+static const unsigned int msiof3_txd_b_mux[] = {
+ MSIOF3_TXD_B_MARK,
+};
+
+static const unsigned int msiof3_rxd_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 6),
+};
+
+static const unsigned int msiof3_rxd_b_mux[] = {
+ MSIOF3_RXD_B_MARK,
+};
+
+/* - PWM0 --------------------------------------------------------------------*/
+static const unsigned int pwm0_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 22),
+};
+
+static const unsigned int pwm0_a_mux[] = {
+ PWM0_A_MARK,
+};
+
+static const unsigned int pwm0_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(6, 3),
+};
+
+static const unsigned int pwm0_b_mux[] = {
+ PWM0_B_MARK,
+};
+
+/* - PWM1 --------------------------------------------------------------------*/
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 23),
+};
+
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(6, 4),
+};
+
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+
+/* - PWM2 --------------------------------------------------------------------*/
+static const unsigned int pwm2_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 0),
+};
+
+static const unsigned int pwm2_a_mux[] = {
+ PWM2_A_MARK,
+};
+
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 4),
+};
+
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+
+static const unsigned int pwm2_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(6, 5),
+};
+
+static const unsigned int pwm2_c_mux[] = {
+ PWM2_C_MARK,
+};
+
+/* - PWM3 --------------------------------------------------------------------*/
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 1),
+};
+
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 5),
+};
+
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+
+static const unsigned int pwm3_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(6, 6),
+};
+
+static const unsigned int pwm3_c_mux[] = {
+ PWM3_C_MARK,
+};
+
+/* - PWM4 --------------------------------------------------------------------*/
+static const unsigned int pwm4_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 3),
+};
+
+static const unsigned int pwm4_a_mux[] = {
+ PWM4_A_MARK,
+};
+
+static const unsigned int pwm4_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(6, 7),
+};
+
+static const unsigned int pwm4_b_mux[] = {
+ PWM4_B_MARK,
+};
+
+/* - PWM5 --------------------------------------------------------------------*/
+static const unsigned int pwm5_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 24),
+};
+
+static const unsigned int pwm5_a_mux[] = {
+ PWM5_A_MARK,
+};
+
+static const unsigned int pwm5_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(6, 10),
+};
+
+static const unsigned int pwm5_b_mux[] = {
+ PWM5_B_MARK,
+};
+
+/* - PWM6 --------------------------------------------------------------------*/
+static const unsigned int pwm6_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 25),
+};
+
+static const unsigned int pwm6_a_mux[] = {
+ PWM6_A_MARK,
+};
+
+static const unsigned int pwm6_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(6, 11),
+};
+
+static const unsigned int pwm6_b_mux[] = {
+ PWM6_B_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_a_pins[] = {
/* RX, TX */
@@ -1831,64 +2433,135 @@ static const unsigned int usb30_id_mux[] = {
USB3HS0_ID_MARK,
};
-static const struct sh_pfc_pin_group pinmux_groups[] = {
- SH_PFC_PIN_GROUP(avb_link),
- SH_PFC_PIN_GROUP(avb_magic),
- SH_PFC_PIN_GROUP(avb_phy_int),
- SH_PFC_PIN_GROUP(avb_mii),
- SH_PFC_PIN_GROUP(avb_avtp_pps),
- SH_PFC_PIN_GROUP(avb_avtp_match_a),
- SH_PFC_PIN_GROUP(avb_avtp_capture_a),
- SH_PFC_PIN_GROUP(i2c1_a),
- SH_PFC_PIN_GROUP(i2c1_b),
- SH_PFC_PIN_GROUP(i2c1_c),
- SH_PFC_PIN_GROUP(i2c1_d),
- SH_PFC_PIN_GROUP(i2c2_a),
- SH_PFC_PIN_GROUP(i2c2_b),
- SH_PFC_PIN_GROUP(i2c2_c),
- SH_PFC_PIN_GROUP(i2c2_d),
- SH_PFC_PIN_GROUP(i2c2_e),
- SH_PFC_PIN_GROUP(i2c4),
- SH_PFC_PIN_GROUP(i2c5),
- SH_PFC_PIN_GROUP(i2c6_a),
- SH_PFC_PIN_GROUP(i2c6_b),
- SH_PFC_PIN_GROUP(i2c7_a),
- SH_PFC_PIN_GROUP(i2c7_b),
- SH_PFC_PIN_GROUP(scif0_data_a),
- SH_PFC_PIN_GROUP(scif0_clk_a),
- SH_PFC_PIN_GROUP(scif0_ctrl_a),
- SH_PFC_PIN_GROUP(scif0_data_b),
- SH_PFC_PIN_GROUP(scif0_clk_b),
- SH_PFC_PIN_GROUP(scif1_data),
- SH_PFC_PIN_GROUP(scif1_clk),
- SH_PFC_PIN_GROUP(scif1_ctrl),
- SH_PFC_PIN_GROUP(scif2_data_a),
- SH_PFC_PIN_GROUP(scif2_clk_a),
- SH_PFC_PIN_GROUP(scif2_data_b),
- SH_PFC_PIN_GROUP(scif3_data_a),
- SH_PFC_PIN_GROUP(scif3_clk_a),
- SH_PFC_PIN_GROUP(scif3_ctrl_a),
- SH_PFC_PIN_GROUP(scif3_data_b),
- SH_PFC_PIN_GROUP(scif3_data_c),
- SH_PFC_PIN_GROUP(scif3_clk_c),
- SH_PFC_PIN_GROUP(scif4_data_a),
- SH_PFC_PIN_GROUP(scif4_clk_a),
- SH_PFC_PIN_GROUP(scif4_ctrl_a),
- SH_PFC_PIN_GROUP(scif4_data_b),
- SH_PFC_PIN_GROUP(scif4_clk_b),
- SH_PFC_PIN_GROUP(scif4_data_c),
- SH_PFC_PIN_GROUP(scif4_ctrl_c),
- SH_PFC_PIN_GROUP(scif5_data_a),
- SH_PFC_PIN_GROUP(scif5_clk_a),
- SH_PFC_PIN_GROUP(scif5_data_b),
- SH_PFC_PIN_GROUP(scif5_data_c),
- SH_PFC_PIN_GROUP(scif_clk_a),
- SH_PFC_PIN_GROUP(scif_clk_b),
- SH_PFC_PIN_GROUP(usb0_a),
- SH_PFC_PIN_GROUP(usb0_b),
- SH_PFC_PIN_GROUP(usb0_id),
- SH_PFC_PIN_GROUP(usb30),
- SH_PFC_PIN_GROUP(usb30_id),
+static const struct {
+ struct sh_pfc_pin_group common[123];
+ struct sh_pfc_pin_group automotive[0];
+} pinmux_groups = {
+ .common = {
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_avtp_pps),
+ SH_PFC_PIN_GROUP(avb_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_in_0),
+ SH_PFC_PIN_GROUP(du_clk_in_1),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_disp_cde),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c1_c),
+ SH_PFC_PIN_GROUP(i2c1_d),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c2_c),
+ SH_PFC_PIN_GROUP(i2c2_d),
+ SH_PFC_PIN_GROUP(i2c2_e),
+ SH_PFC_PIN_GROUP(i2c4),
+ SH_PFC_PIN_GROUP(i2c5),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c7_a),
+ SH_PFC_PIN_GROUP(i2c7_b),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_txd),
+ SH_PFC_PIN_GROUP(msiof1_rxd),
+ SH_PFC_PIN_GROUP(msiof2_clk_a),
+ SH_PFC_PIN_GROUP(msiof2_sync_a),
+ SH_PFC_PIN_GROUP(msiof2_ss1_a),
+ SH_PFC_PIN_GROUP(msiof2_ss2_a),
+ SH_PFC_PIN_GROUP(msiof2_txd_a),
+ SH_PFC_PIN_GROUP(msiof2_rxd_a),
+ SH_PFC_PIN_GROUP(msiof2_clk_b),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1_b),
+ SH_PFC_PIN_GROUP(msiof2_ss2_b),
+ SH_PFC_PIN_GROUP(msiof2_txd_b),
+ SH_PFC_PIN_GROUP(msiof2_rxd_b),
+ SH_PFC_PIN_GROUP(msiof3_clk_a),
+ SH_PFC_PIN_GROUP(msiof3_sync_a),
+ SH_PFC_PIN_GROUP(msiof3_ss1_a),
+ SH_PFC_PIN_GROUP(msiof3_ss2_a),
+ SH_PFC_PIN_GROUP(msiof3_txd_a),
+ SH_PFC_PIN_GROUP(msiof3_rxd_a),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_ss1_b),
+ SH_PFC_PIN_GROUP(msiof3_txd_b),
+ SH_PFC_PIN_GROUP(msiof3_rxd_b),
+ SH_PFC_PIN_GROUP(pwm0_a),
+ SH_PFC_PIN_GROUP(pwm0_b),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm2_c),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm3_c),
+ SH_PFC_PIN_GROUP(pwm4_a),
+ SH_PFC_PIN_GROUP(pwm4_b),
+ SH_PFC_PIN_GROUP(pwm5_a),
+ SH_PFC_PIN_GROUP(pwm5_b),
+ SH_PFC_PIN_GROUP(pwm6_a),
+ SH_PFC_PIN_GROUP(pwm6_b),
+ SH_PFC_PIN_GROUP(scif0_data_a),
+ SH_PFC_PIN_GROUP(scif0_clk_a),
+ SH_PFC_PIN_GROUP(scif0_ctrl_a),
+ SH_PFC_PIN_GROUP(scif0_data_b),
+ SH_PFC_PIN_GROUP(scif0_clk_b),
+ SH_PFC_PIN_GROUP(scif1_data),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif2_data_a),
+ SH_PFC_PIN_GROUP(scif2_clk_a),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk_a),
+ SH_PFC_PIN_GROUP(scif3_ctrl_a),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_c),
+ SH_PFC_PIN_GROUP(scif3_clk_c),
+ SH_PFC_PIN_GROUP(scif4_data_a),
+ SH_PFC_PIN_GROUP(scif4_clk_a),
+ SH_PFC_PIN_GROUP(scif4_ctrl_a),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_clk_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif4_ctrl_c),
+ SH_PFC_PIN_GROUP(scif5_data_a),
+ SH_PFC_PIN_GROUP(scif5_clk_a),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scif5_data_c),
+ SH_PFC_PIN_GROUP(scif_clk_a),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(usb0_a),
+ SH_PFC_PIN_GROUP(usb0_b),
+ SH_PFC_PIN_GROUP(usb0_id),
+ SH_PFC_PIN_GROUP(usb30),
+ SH_PFC_PIN_GROUP(usb30_id),
+ }
};
static const char * const avb_groups[] = {
@@ -1901,6 +2574,18 @@ static const char * const avb_groups[] = {
"avb_avtp_capture_a",
};
+static const char * const du_groups[] = {
+ "du_rgb666",
+ "du_rgb888",
+ "du_clk_in_0",
+ "du_clk_in_1",
+ "du_clk_out_0",
+ "du_sync",
+ "du_disp_cde",
+ "du_cde",
+ "du_disp",
+};
+
static const char * const i2c1_groups[] = {
"i2c1_a",
"i2c1_b",
@@ -1934,6 +2619,99 @@ static const char * const i2c7_groups[] = {
"i2c7_b",
};
+static const char * const intc_ex_groups[] = {
+ "intc_ex_irq0",
+ "intc_ex_irq1",
+ "intc_ex_irq2",
+ "intc_ex_irq3",
+ "intc_ex_irq4",
+ "intc_ex_irq5",
+};
+
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_txd",
+ "msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk",
+ "msiof1_sync",
+ "msiof1_ss1",
+ "msiof1_ss2",
+ "msiof1_txd",
+ "msiof1_rxd",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk_a",
+ "msiof2_sync_a",
+ "msiof2_ss1_a",
+ "msiof2_ss2_a",
+ "msiof2_txd_a",
+ "msiof2_rxd_a",
+ "msiof2_clk_b",
+ "msiof2_sync_b",
+ "msiof2_ss1_b",
+ "msiof2_ss2_b",
+ "msiof2_txd_b",
+ "msiof2_rxd_b",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk_a",
+ "msiof3_sync_a",
+ "msiof3_ss1_a",
+ "msiof3_ss2_a",
+ "msiof3_txd_a",
+ "msiof3_rxd_a",
+ "msiof3_clk_b",
+ "msiof3_sync_b",
+ "msiof3_ss1_b",
+ "msiof3_txd_b",
+ "msiof3_rxd_b",
+};
+
+static const char * const pwm0_groups[] = {
+ "pwm0_a",
+ "pwm0_b",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+ "pwm2_a",
+ "pwm2_b",
+ "pwm2_c",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+ "pwm3_c",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4_a",
+ "pwm4_b",
+};
+
+static const char * const pwm5_groups[] = {
+ "pwm5_a",
+ "pwm5_b",
+};
+
+static const char * const pwm6_groups[] = {
+ "pwm6_a",
+ "pwm6_b",
+};
+
static const char * const scif0_groups[] = {
"scif0_data_a",
"scif0_clk_a",
@@ -1996,23 +2774,41 @@ static const char * const usb30_groups[] = {
"usb30_id",
};
-static const struct sh_pfc_function pinmux_functions[] = {
- SH_PFC_FUNCTION(avb),
- SH_PFC_FUNCTION(i2c1),
- SH_PFC_FUNCTION(i2c2),
- SH_PFC_FUNCTION(i2c4),
- SH_PFC_FUNCTION(i2c5),
- SH_PFC_FUNCTION(i2c6),
- SH_PFC_FUNCTION(i2c7),
- SH_PFC_FUNCTION(scif0),
- SH_PFC_FUNCTION(scif1),
- SH_PFC_FUNCTION(scif2),
- SH_PFC_FUNCTION(scif3),
- SH_PFC_FUNCTION(scif4),
- SH_PFC_FUNCTION(scif5),
- SH_PFC_FUNCTION(scif_clk),
- SH_PFC_FUNCTION(usb0),
- SH_PFC_FUNCTION(usb30),
+static const struct {
+ struct sh_pfc_function common[29];
+ struct sh_pfc_function automotive[0];
+} pinmux_functions = {
+ .common = {
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c4),
+ SH_PFC_FUNCTION(i2c5),
+ SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(i2c7),
+ SH_PFC_FUNCTION(intc_ex),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb30),
+ }
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -2738,6 +3534,30 @@ static const struct sh_pfc_soc_operations r8a77990_pinmux_ops = {
.set_bias = r8a77990_pinmux_set_bias,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A774C0
+const struct sh_pfc_soc_info r8a774c0_pinmux_info = {
+ .name = "r8a774c0_pfc",
+ .ops = &r8a77990_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common),
+
+ .cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A77990
const struct sh_pfc_soc_info r8a77990_pinmux_info = {
.name = "r8a77990_pfc",
.ops = &r8a77990_pinmux_ops,
@@ -2747,10 +3567,12 @@ const struct sh_pfc_soc_info r8a77990_pinmux_info = {
.pins = pinmux_pins,
.nr_pins = ARRAY_SIZE(pinmux_pins),
- .groups = pinmux_groups,
- .nr_groups = ARRAY_SIZE(pinmux_groups),
- .functions = pinmux_functions,
- .nr_functions = ARRAY_SIZE(pinmux_functions),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.automotive),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
.bias_regs = pinmux_bias_regs,
@@ -2758,3 +3580,4 @@ const struct sh_pfc_soc_info r8a77990_pinmux_info = {
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
index adade5b7ffbc..9484eaa8522a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A77995 processor support - PFC hardware block.
*
@@ -8,10 +9,6 @@
* R-Car Gen3 processor support - PFC hardware block.
*
* Copyright (C) 2015 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/kernel.h>
@@ -520,6 +517,10 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(QSPI0_SPCLK),
PINMUX_SINGLE(SCL0),
PINMUX_SINGLE(SDA0),
+ PINMUX_SINGLE(MSIOF0_RXD),
+ PINMUX_SINGLE(MSIOF0_TXD),
+ PINMUX_SINGLE(MSIOF0_SYNC),
+ PINMUX_SINGLE(MSIOF0_SCK),
/* IPSR0 */
PINMUX_IPSR_MSEL(IP0_3_0, IRQ0_A, SEL_IRQ_0_0),
@@ -1277,6 +1278,289 @@ static const unsigned int mmc_ctrl_mux[] = {
MMC_CLK_MARK, MMC_CMD_MARK,
};
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 12),
+};
+
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+
+static const unsigned int msiof0_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(4, 13),
+};
+
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+
+static const unsigned int msiof0_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(4, 20),
+};
+
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+
+static const unsigned int msiof0_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(4, 21),
+};
+
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+
+static const unsigned int msiof0_txd_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(4, 14),
+};
+
+static const unsigned int msiof0_txd_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+
+static const unsigned int msiof0_rxd_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(4, 15),
+};
+
+static const unsigned int msiof0_rxd_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 16),
+};
+
+static const unsigned int msiof1_clk_mux[] = {
+ MSIOF1_SCK_MARK,
+};
+
+static const unsigned int msiof1_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(4, 19),
+};
+
+static const unsigned int msiof1_sync_mux[] = {
+ MSIOF1_SYNC_MARK,
+};
+
+static const unsigned int msiof1_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(4, 25),
+};
+
+static const unsigned int msiof1_ss1_mux[] = {
+ MSIOF1_SS1_MARK,
+};
+
+static const unsigned int msiof1_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(4, 22),
+};
+
+static const unsigned int msiof1_ss2_mux[] = {
+ MSIOF1_SS2_MARK,
+};
+
+static const unsigned int msiof1_txd_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(4, 17),
+};
+
+static const unsigned int msiof1_txd_mux[] = {
+ MSIOF1_TXD_MARK,
+};
+
+static const unsigned int msiof1_rxd_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(4, 18),
+};
+
+static const unsigned int msiof1_rxd_mux[] = {
+ MSIOF1_RXD_MARK,
+};
+
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 3),
+};
+
+static const unsigned int msiof2_clk_mux[] = {
+ MSIOF2_SCK_MARK,
+};
+
+static const unsigned int msiof2_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 6),
+};
+
+static const unsigned int msiof2_sync_a_mux[] = {
+ MSIOF2_SYNC_A_MARK,
+};
+
+static const unsigned int msiof2_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 2),
+};
+
+static const unsigned int msiof2_sync_b_mux[] = {
+ MSIOF2_SYNC_B_MARK,
+};
+
+static const unsigned int msiof2_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 7),
+};
+
+static const unsigned int msiof2_ss1_mux[] = {
+ MSIOF2_SS1_MARK,
+};
+
+static const unsigned int msiof2_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 8),
+};
+
+static const unsigned int msiof2_ss2_mux[] = {
+ MSIOF2_SS2_MARK,
+};
+
+static const unsigned int msiof2_txd_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 4),
+};
+
+static const unsigned int msiof2_txd_mux[] = {
+ MSIOF2_TXD_MARK,
+};
+
+static const unsigned int msiof2_rxd_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 5),
+};
+
+static const unsigned int msiof2_rxd_mux[] = {
+ MSIOF2_RXD_MARK,
+};
+
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 24),
+};
+
+static const unsigned int msiof3_clk_a_mux[] = {
+ MSIOF3_SCK_A_MARK,
+};
+
+static const unsigned int msiof3_sync_a_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(2, 21),
+};
+
+static const unsigned int msiof3_sync_a_mux[] = {
+ MSIOF3_SYNC_A_MARK,
+};
+
+static const unsigned int msiof3_ss1_a_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(2, 14),
+};
+
+static const unsigned int msiof3_ss1_a_mux[] = {
+ MSIOF3_SS1_A_MARK,
+};
+
+static const unsigned int msiof3_ss2_a_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(2, 10),
+};
+
+static const unsigned int msiof3_ss2_a_mux[] = {
+ MSIOF3_SS2_A_MARK,
+};
+
+static const unsigned int msiof3_txd_a_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(2, 22),
+};
+
+static const unsigned int msiof3_txd_a_mux[] = {
+ MSIOF3_TXD_A_MARK,
+};
+
+static const unsigned int msiof3_rxd_a_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(2, 23),
+};
+
+static const unsigned int msiof3_rxd_a_mux[] = {
+ MSIOF3_RXD_A_MARK,
+};
+
+static const unsigned int msiof3_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 8),
+};
+
+static const unsigned int msiof3_clk_b_mux[] = {
+ MSIOF3_SCK_B_MARK,
+};
+
+static const unsigned int msiof3_sync_b_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(1, 9),
+};
+
+static const unsigned int msiof3_sync_b_mux[] = {
+ MSIOF3_SYNC_B_MARK,
+};
+
+static const unsigned int msiof3_ss1_b_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(1, 6),
+};
+
+static const unsigned int msiof3_ss1_b_mux[] = {
+ MSIOF3_SS1_B_MARK,
+};
+
+static const unsigned int msiof3_ss2_b_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(1, 7),
+};
+
+static const unsigned int msiof3_ss2_b_mux[] = {
+ MSIOF3_SS2_B_MARK,
+};
+
+static const unsigned int msiof3_txd_b_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(1, 0),
+};
+
+static const unsigned int msiof3_txd_b_mux[] = {
+ MSIOF3_TXD_B_MARK,
+};
+
+static const unsigned int msiof3_rxd_b_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(1, 1),
+};
+
+static const unsigned int msiof3_rxd_b_mux[] = {
+ MSIOF3_RXD_B_MARK,
+};
+
/* - PWM0 ------------------------------------------------------------------ */
static const unsigned int pwm0_a_pins[] = {
/* PWM */
@@ -1752,6 +2036,37 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(mmc_data4),
SH_PFC_PIN_GROUP(mmc_data8),
SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_txd),
+ SH_PFC_PIN_GROUP(msiof1_rxd),
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync_a),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_txd),
+ SH_PFC_PIN_GROUP(msiof2_rxd),
+ SH_PFC_PIN_GROUP(msiof3_clk_a),
+ SH_PFC_PIN_GROUP(msiof3_sync_a),
+ SH_PFC_PIN_GROUP(msiof3_ss1_a),
+ SH_PFC_PIN_GROUP(msiof3_ss2_a),
+ SH_PFC_PIN_GROUP(msiof3_txd_a),
+ SH_PFC_PIN_GROUP(msiof3_rxd_a),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_ss1_b),
+ SH_PFC_PIN_GROUP(msiof3_ss2_b),
+ SH_PFC_PIN_GROUP(msiof3_txd_b),
+ SH_PFC_PIN_GROUP(msiof3_rxd_b),
SH_PFC_PIN_GROUP(pwm0_a),
SH_PFC_PIN_GROUP(pwm0_b),
SH_PFC_PIN_GROUP(pwm0_c),
@@ -1982,6 +2297,49 @@ static const char * const vin4_groups[] = {
"vin4_clk",
};
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_txd",
+ "msiof0_rxd",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk",
+ "msiof1_sync",
+ "msiof1_ss1",
+ "msiof1_ss2",
+ "msiof1_txd",
+ "msiof1_rxd",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk",
+ "msiof2_sync_a",
+ "msiof2_sync_b",
+ "msiof2_ss1",
+ "msiof2_ss2",
+ "msiof2_txd",
+ "msiof2_rxd",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk_a",
+ "msiof3_sync_a",
+ "msiof3_ss1_a",
+ "msiof3_ss2_a",
+ "msiof3_txd_a",
+ "msiof3_rxd_a",
+ "msiof3_clk_b",
+ "msiof3_sync_b",
+ "msiof3_ss1_b",
+ "msiof3_ss2_b",
+ "msiof3_txd_b",
+ "msiof3_rxd_b",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb0),
@@ -1996,6 +2354,10 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c3),
SH_PFC_FUNCTION(mmc),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
SH_PFC_FUNCTION(pwm0),
SH_PFC_FUNCTION(pwm1),
SH_PFC_FUNCTION(pwm2),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7203.c b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
index 61b27ec48876..9ee468a9bd0e 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7203.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH7203 Pinmux
*
* Copyright (C) 2008 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
index 8070765311db..4f44ce0d7237 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH7264 Pinmux
*
* Copyright (C) 2012 Renesas Electronics Europe Ltd
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index a50d22bef1f4..5b48a0368e55 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH7269 Pinmux
*
* Copyright (C) 2012 Renesas Electronics Europe Ltd
* Copyright (C) 2012 Phil Edworthy
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index d25e6f674d0a..654029fc8d96 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* sh73a0 processor support - PFC hardware block
*
* Copyright (C) 2010 Renesas Solutions Corp.
* Copyright (C) 2010 NISHIMOTO Hiroki
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; version 2 of the
- * License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/io.h>
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7720.c b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
index e07a82df42c8..65694bfaa08d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7720.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH7720 Pinmux
*
* Copyright (C) 2008 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7723.c b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
index 8ea18df03492..86f9a88726b7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7723.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH7723 Pinmux
*
* Copyright (C) 2008 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7724.c b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
index 7f6c36c1a8fa..2cc4aa7df613 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7724.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH7724 Pinmux
*
@@ -7,10 +8,6 @@
*
* Based on SH7723 Pinmux
* Copyright (C) 2008 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index 6502e676d368..b0533c86053a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH7734 processor support - PFC hardware block
*
* Copyright (C) 2012 Renesas Solutions Corp.
* Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7757.c b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
index 6d8c31caefc1..b16090690ee3 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7757.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH7757 (B0 step) Pinmux
*
@@ -7,10 +8,6 @@
*
* Based on SH7723 Pinmux
* Copyright (C) 2008 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7785.c b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
index 1934cbec3965..193179f7fdd9 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7785.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH7785 Pinmux
*
* Copyright (C) 2008 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7786.c b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
index c98585d80de8..cc2657c4f85c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7786.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH7786 Pinmux
*
@@ -7,10 +8,6 @@
* Based on SH7785 pinmux
*
* Copyright (C) 2008 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/init.h>
diff --git a/drivers/pinctrl/sh-pfc/pfc-shx3.c b/drivers/pinctrl/sh-pfc/pfc-shx3.c
index 3f60c900645e..905ae00cc6f1 100644
--- a/drivers/pinctrl/sh-pfc/pfc-shx3.c
+++ b/drivers/pinctrl/sh-pfc/pfc-shx3.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH-X3 prototype CPU pinmux
*
* Copyright (C) 2010 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 654dc20e171b..274d5ff87078 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Pin Function Controller pinmux support.
*
* Copyright (C) 2012 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#define DRV_NAME "sh-pfc"
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 3d0b31636d6d..458ae0a6b540 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* SuperH Pin Function Controller Support
*
* Copyright (c) 2008 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#ifndef __SH_PFC_H
@@ -273,8 +270,11 @@ extern const struct sh_pfc_soc_info emev2_pinmux_info;
extern const struct sh_pfc_soc_info r8a73a4_pinmux_info;
extern const struct sh_pfc_soc_info r8a7740_pinmux_info;
extern const struct sh_pfc_soc_info r8a7743_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7744_pinmux_info;
extern const struct sh_pfc_soc_info r8a7745_pinmux_info;
extern const struct sh_pfc_soc_info r8a77470_pinmux_info;
+extern const struct sh_pfc_soc_info r8a774a1_pinmux_info;
+extern const struct sh_pfc_soc_info r8a774c0_pinmux_info;
extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
extern const struct sh_pfc_soc_info r8a7790_pinmux_info;
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 3abb028f6158..4ba171827428 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -19,14 +19,13 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
/* Definition of Pad&Mux Properties */
#define N 0
@@ -5540,14 +5539,10 @@ static int atlas7_pinmux_resume_noirq(struct device *dev)
{
struct atlas7_pmx *pmx = dev_get_drvdata(dev);
struct atlas7_pad_status *status;
- struct atlas7_pad_config *conf;
int idx;
- u32 bank;
for (idx = 0; idx < pmx->pctl_desc.npins; idx++) {
/* Get this Pad's descriptor from PINCTRL */
- conf = &pmx->pctl_data->confs[idx];
- bank = atlas7_pin_to_bank(idx);
status = &pmx->sleep_data[idx];
/* Restore Function selector */
@@ -6058,8 +6053,8 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
ret = gpiochip_add_data(chip, a7gc);
if (ret) {
dev_err(&pdev->dev,
- "%s: error in probe function with status %d\n",
- np->name, ret);
+ "%pOF: error in probe function with status %d\n",
+ np, ret);
goto failed;
}
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 505845c66dd0..2e42d738b589 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -27,7 +27,7 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/bitops.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of_gpio.h>
#include "pinctrl-sirf.h"
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index aa5cf7032231..db029b148c87 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -12,7 +12,7 @@
#ifndef __PINMUX_SPEAR_H__
#define __PINMUX_SPEAR_H__
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/types.h>
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index 78c2f548b25f..4537b5453996 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -1059,6 +1059,12 @@ int sprd_pinctrl_core_probe(struct platform_device *pdev,
return ret;
}
+ ret = sprd_pinctrl_parse_dt(sprd_pctl);
+ if (ret) {
+ dev_err(&pdev->dev, "fail to parse dt properties\n");
+ return ret;
+ }
+
pin_desc = devm_kcalloc(&pdev->dev,
pinctrl_info->npins,
sizeof(struct pinctrl_pin_desc),
@@ -1083,13 +1089,6 @@ int sprd_pinctrl_core_probe(struct platform_device *pdev,
return PTR_ERR(sprd_pctl->pctl);
}
- ret = sprd_pinctrl_parse_dt(sprd_pctl);
- if (ret) {
- dev_err(&pdev->dev, "fail to parse dt properties\n");
- pinctrl_unregister(sprd_pctl->pctl);
- return ret;
- }
-
return 0;
}
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index a9bec6e6fdd1..0fbfcc9ea07c 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -416,8 +416,8 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
pins = of_find_property(node, "pinmux", NULL);
if (!pins) {
- dev_err(pctl->dev, "missing pins property in node %s .\n",
- node->name);
+ dev_err(pctl->dev, "missing pins property in node %pOFn .\n",
+ node);
return -EINVAL;
}
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 4d9bf9b3e9f3..34e17376ef99 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -332,15 +332,15 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
function = sunxi_pctrl_parse_function_prop(node);
if (!function) {
- dev_err(pctl->dev, "missing function property in node %s\n",
- node->name);
+ dev_err(pctl->dev, "missing function property in node %pOFn\n",
+ node);
return -EINVAL;
}
pin_prop = sunxi_pctrl_find_pins_prop(node, &npins);
if (!pin_prop) {
- dev_err(pctl->dev, "missing pins property in node %s\n",
- node->name);
+ dev_err(pctl->dev, "missing pins property in node %pOFn\n",
+ node);
return -EINVAL;
}
@@ -1042,6 +1042,7 @@ static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl,
static int sunxi_pinctrl_build_state(struct platform_device *pdev)
{
struct sunxi_pinctrl *pctl = platform_get_drvdata(pdev);
+ void *ptr;
int i;
/*
@@ -1079,10 +1080,9 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
* We suppose that we won't have any more functions than pins,
* we'll reallocate that later anyway
*/
- pctl->functions = devm_kcalloc(&pdev->dev,
- pctl->ngroups,
- sizeof(*pctl->functions),
- GFP_KERNEL);
+ pctl->functions = kcalloc(pctl->ngroups,
+ sizeof(*pctl->functions),
+ GFP_KERNEL);
if (!pctl->functions)
return -ENOMEM;
@@ -1109,13 +1109,15 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
}
/* And now allocated and fill the array for real */
- pctl->functions = krealloc(pctl->functions,
- pctl->nfunctions * sizeof(*pctl->functions),
- GFP_KERNEL);
- if (!pctl->functions) {
+ ptr = krealloc(pctl->functions,
+ pctl->nfunctions * sizeof(*pctl->functions),
+ GFP_KERNEL);
+ if (!ptr) {
kfree(pctl->functions);
+ pctl->functions = NULL;
return -ENOMEM;
}
+ pctl->functions = ptr;
for (i = 0; i < pctl->desc->npins; i++) {
const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
@@ -1133,8 +1135,10 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
func_item = sunxi_pinctrl_find_function_by_name(pctl,
func->name);
- if (!func_item)
+ if (!func_item) {
+ kfree(pctl->functions);
return -EINVAL;
+ }
if (!func_item->groups) {
func_item->groups =
@@ -1142,8 +1146,10 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
func_item->ngroups,
sizeof(*func_item->groups),
GFP_KERNEL);
- if (!func_item->groups)
+ if (!func_item->groups) {
+ kfree(pctl->functions);
return -ENOMEM;
+ }
}
func_grp = func_item->groups;
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 1aba75897d14..a5008c066bac 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -737,4 +737,3 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
return 0;
}
-EXPORT_SYMBOL_GPL(tegra_pinctrl_probe);
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index 8782c348ebe9..a4bc506a01a3 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -452,8 +452,8 @@ static int ti_iodelay_node_iterator(struct pinctrl_dev *pctldev,
pin = ti_iodelay_offset_to_pin(iod, cfg[pin_index].offset);
if (pin < 0) {
- dev_err(iod->dev, "could not add functions for %s %ux\n",
- np->name, cfg[pin_index].offset);
+ dev_err(iod->dev, "could not add functions for %pOFn %ux\n",
+ np, cfg[pin_index].offset);
return -ENODEV;
}
pins[pin_index] = pin;
@@ -461,8 +461,8 @@ static int ti_iodelay_node_iterator(struct pinctrl_dev *pctldev,
pd = &iod->pa[pin];
pd->drv_data = &cfg[pin_index];
- dev_dbg(iod->dev, "%s offset=%x a_delay = %d g_delay = %d\n",
- np->name, cfg[pin_index].offset, cfg[pin_index].a_delay,
+ dev_dbg(iod->dev, "%pOFn offset=%x a_delay = %d g_delay = %d\n",
+ np, cfg[pin_index].offset, cfg[pin_index].a_delay,
cfg[pin_index].g_delay);
return 0;
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
index 60722898d5c7..4326f5c3683c 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
@@ -1048,9 +1048,8 @@ static const unsigned nand_cs1_pins[] = {131, 132};
static const int nand_cs1_muxvals[] = {1, 1};
static const unsigned sd_pins[] = {150, 151, 152, 153, 154, 155, 156, 157, 158};
static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
-static const unsigned sd1_pins[] = {319, 320, 321, 322, 323, 324, 325, 326,
- 327};
-static const int sd1_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned int sd1_pins[] = {319, 320, 321, 322, 323, 324, 325, 326};
+static const int sd1_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned spi0_pins[] = {199, 200, 201, 202};
static const int spi0_muxvals[] = {11, 11, 11, 11};
static const unsigned spi1_pins[] = {195, 196, 197, 198, 235, 238, 239};
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier.h b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
index 0a3d2ac27503..c63e3c8b97cd 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier.h
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
@@ -16,7 +16,7 @@
#ifndef __PINCTRL_UNIPHIER_H__
#define __PINCTRL_UNIPHIER_H__
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/build_bug.h>
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index c08318a5a91b..ccdf68e766b8 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -494,10 +494,8 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
u32 val;
val = readl_relaxed(data->base + reg_dir);
- if (val & BIT(bit))
- return GPIOF_DIR_OUT;
- else
- return GPIOF_DIR_IN;
+ /* Return 0 == output, 1 == input */
+ return !(val & BIT(bit));
}
static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.h b/drivers/pinctrl/vt8500/pinctrl-wmt.h
index 885613396fe7..ade8be3b98b0 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.h
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.h
@@ -13,7 +13,7 @@
* more details.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
/* VT8500 has no enable register in the extgpio bank. */
#define NO_REG 0xFFFF
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 398393ab5df8..b6fd4838f60f 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -520,7 +520,7 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
ret = cros_ec_cmd_xfer(ec_dev, msg);
if (ret > 0) {
ec_dev->event_size = ret - 1;
- memcpy(&ec_dev->event_data, msg->data, ec_dev->event_size);
+ memcpy(&ec_dev->event_data, msg->data, ret);
}
return ret;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0c1aa6c314f5..bdac939de223 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -867,6 +867,8 @@ config INTEL_CHT_INT33FE
tristate "Intel Cherry Trail ACPI INT33FE Driver"
depends on X86 && ACPI && I2C && REGULATOR
depends on CHARGER_BQ24190=y || (CHARGER_BQ24190=m && m)
+ depends on USB_ROLES_INTEL_XHCI=y || (USB_ROLES_INTEL_XHCI=m && m)
+ depends on TYPEC_MUX_PI3USB30532=y || (TYPEC_MUX_PI3USB30532=m && m)
---help---
This driver add support for the INT33FE ACPI device found on
some Intel Cherry Trail devices.
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index d975462a4c57..f10af5c383c5 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
if (obj && obj->type == ACPI_TYPE_INTEGER)
*out_data = (u32) obj->integer.value;
}
+ kfree(output.pointer);
return status;
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 2d6e272315a8..93ee2d5466f8 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -254,7 +254,7 @@ struct asus_wmi {
int asus_hwmon_num_fans;
int asus_hwmon_pwm;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
struct mutex wmi_lock;
struct workqueue_struct *hotplug_workqueue;
@@ -753,7 +753,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
if (asus->wlan.rfkill)
rfkill_set_sw_state(asus->wlan.rfkill, blocked);
- if (asus->hotplug_slot) {
+ if (asus->hotplug_slot.ops) {
bus = pci_find_bus(0, 1);
if (!bus) {
pr_warn("Unable to find PCI bus 1?\n");
@@ -858,7 +858,8 @@ static void asus_unregister_rfkill_notifier(struct asus_wmi *asus, char *node)
static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
u8 *value)
{
- struct asus_wmi *asus = hotplug_slot->private;
+ struct asus_wmi *asus = container_of(hotplug_slot,
+ struct asus_wmi, hotplug_slot);
int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
if (result < 0)
@@ -868,8 +869,7 @@ static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-static struct hotplug_slot_ops asus_hotplug_slot_ops = {
- .owner = THIS_MODULE,
+static const struct hotplug_slot_ops asus_hotplug_slot_ops = {
.get_adapter_status = asus_get_adapter_status,
.get_power_status = asus_get_adapter_status,
};
@@ -899,21 +899,9 @@ static int asus_setup_pci_hotplug(struct asus_wmi *asus)
INIT_WORK(&asus->hotplug_work, asus_hotplug_work);
- asus->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!asus->hotplug_slot)
- goto error_slot;
-
- asus->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
- GFP_KERNEL);
- if (!asus->hotplug_slot->info)
- goto error_info;
+ asus->hotplug_slot.ops = &asus_hotplug_slot_ops;
- asus->hotplug_slot->private = asus;
- asus->hotplug_slot->ops = &asus_hotplug_slot_ops;
- asus_get_adapter_status(asus->hotplug_slot,
- &asus->hotplug_slot->info->adapter_status);
-
- ret = pci_hp_register(asus->hotplug_slot, bus, 0, "asus-wifi");
+ ret = pci_hp_register(&asus->hotplug_slot, bus, 0, "asus-wifi");
if (ret) {
pr_err("Unable to register hotplug slot - %d\n", ret);
goto error_register;
@@ -922,11 +910,7 @@ static int asus_setup_pci_hotplug(struct asus_wmi *asus)
return 0;
error_register:
- kfree(asus->hotplug_slot->info);
-error_info:
- kfree(asus->hotplug_slot);
- asus->hotplug_slot = NULL;
-error_slot:
+ asus->hotplug_slot.ops = NULL;
destroy_workqueue(asus->hotplug_workqueue);
error_workqueue:
return ret;
@@ -1054,11 +1038,8 @@ static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
* asus_unregister_rfkill_notifier()
*/
asus_rfkill_hotplug(asus);
- if (asus->hotplug_slot) {
- pci_hp_deregister(asus->hotplug_slot);
- kfree(asus->hotplug_slot->info);
- kfree(asus->hotplug_slot);
- }
+ if (asus->hotplug_slot.ops)
+ pci_hp_deregister(&asus->hotplug_slot);
if (asus->hotplug_workqueue)
destroy_workqueue(asus->hotplug_workqueue);
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index 88afe5651d24..cf2229ece9ff 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev)
dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
priv->buf->std.output[0], priv->buf->std.output[1],
priv->buf->std.output[2], priv->buf->std.output[3]);
+ kfree(output.pointer);
return 0;
}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index a4bbf6ecd1f0..e6946a9beb5a 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -177,7 +177,7 @@ struct eeepc_laptop {
struct rfkill *wwan3g_rfkill;
struct rfkill *wimax_rfkill;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
struct led_classdev tpd_led;
@@ -582,7 +582,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
mutex_lock(&eeepc->hotplug_lock);
pci_lock_rescan_remove();
- if (!eeepc->hotplug_slot)
+ if (!eeepc->hotplug_slot.ops)
goto out_unlock;
port = acpi_get_pci_dev(handle);
@@ -715,8 +715,11 @@ static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc,
static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
u8 *value)
{
- struct eeepc_laptop *eeepc = hotplug_slot->private;
- int val = get_acpi(eeepc, CM_ASL_WLAN);
+ struct eeepc_laptop *eeepc;
+ int val;
+
+ eeepc = container_of(hotplug_slot, struct eeepc_laptop, hotplug_slot);
+ val = get_acpi(eeepc, CM_ASL_WLAN);
if (val == 1 || val == 0)
*value = val;
@@ -726,8 +729,7 @@ static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
- .owner = THIS_MODULE,
+static const struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
.get_adapter_status = eeepc_get_adapter_status,
.get_power_status = eeepc_get_adapter_status,
};
@@ -742,21 +744,9 @@ static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
return -ENODEV;
}
- eeepc->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!eeepc->hotplug_slot)
- goto error_slot;
+ eeepc->hotplug_slot.ops = &eeepc_hotplug_slot_ops;
- eeepc->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
- GFP_KERNEL);
- if (!eeepc->hotplug_slot->info)
- goto error_info;
-
- eeepc->hotplug_slot->private = eeepc;
- eeepc->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
- eeepc_get_adapter_status(eeepc->hotplug_slot,
- &eeepc->hotplug_slot->info->adapter_status);
-
- ret = pci_hp_register(eeepc->hotplug_slot, bus, 0, "eeepc-wifi");
+ ret = pci_hp_register(&eeepc->hotplug_slot, bus, 0, "eeepc-wifi");
if (ret) {
pr_err("Unable to register hotplug slot - %d\n", ret);
goto error_register;
@@ -765,11 +755,7 @@ static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
return 0;
error_register:
- kfree(eeepc->hotplug_slot->info);
-error_info:
- kfree(eeepc->hotplug_slot);
- eeepc->hotplug_slot = NULL;
-error_slot:
+ eeepc->hotplug_slot.ops = NULL;
return ret;
}
@@ -830,11 +816,8 @@ static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
eeepc->wlan_rfkill = NULL;
}
- if (eeepc->hotplug_slot) {
- pci_hp_deregister(eeepc->hotplug_slot);
- kfree(eeepc->hotplug_slot->info);
- kfree(eeepc->hotplug_slot);
- }
+ if (eeepc->hotplug_slot.ops)
+ pci_hp_deregister(&eeepc->hotplug_slot);
if (eeepc->bluetooth_rfkill) {
rfkill_unregister(eeepc->bluetooth_rfkill);
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
index 39d4100c60a2..f40b1c192106 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -24,6 +24,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -34,7 +35,7 @@ struct cht_int33fe_data {
struct i2c_client *fusb302;
struct i2c_client *pi3usb30532;
/* Contain a list-head must be per device */
- struct device_connection connections[3];
+ struct device_connection connections[5];
};
/*
@@ -88,9 +89,9 @@ static const struct property_entry fusb302_props[] = {
{ }
};
-static int cht_int33fe_probe(struct i2c_client *client)
+static int cht_int33fe_probe(struct platform_device *pdev)
{
- struct device *dev = &client->dev;
+ struct device *dev = &pdev->dev;
struct i2c_board_info board_info;
struct cht_int33fe_data *data;
struct i2c_client *max17047;
@@ -174,19 +175,20 @@ static int cht_int33fe_probe(struct i2c_client *client)
return -EPROBE_DEFER; /* Wait for i2c-adapter to load */
}
- data->connections[0].endpoint[0] = "i2c-fusb302";
+ data->connections[0].endpoint[0] = "port0";
data->connections[0].endpoint[1] = "i2c-pi3usb30532";
data->connections[0].id = "typec-switch";
- data->connections[1].endpoint[0] = "i2c-fusb302";
+ data->connections[1].endpoint[0] = "port0";
data->connections[1].endpoint[1] = "i2c-pi3usb30532";
data->connections[1].id = "typec-mux";
- data->connections[2].endpoint[0] = "i2c-fusb302";
- data->connections[2].endpoint[1] = "intel_xhci_usb_sw-role-switch";
- data->connections[2].id = "usb-role-switch";
+ data->connections[2].endpoint[0] = "port0";
+ data->connections[2].endpoint[1] = "i2c-pi3usb30532";
+ data->connections[2].id = "idff01m01";
+ data->connections[3].endpoint[0] = "i2c-fusb302";
+ data->connections[3].endpoint[1] = "intel_xhci_usb_sw-role-switch";
+ data->connections[3].id = "usb-role-switch";
- device_connection_add(&data->connections[0]);
- device_connection_add(&data->connections[1]);
- device_connection_add(&data->connections[2]);
+ device_connections_add(data->connections);
memset(&board_info, 0, sizeof(board_info));
strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
@@ -206,7 +208,7 @@ static int cht_int33fe_probe(struct i2c_client *client)
if (!data->pi3usb30532)
goto out_unregister_fusb302;
- i2c_set_clientdata(client, data);
+ platform_set_drvdata(pdev, data);
return 0;
@@ -217,52 +219,41 @@ out_unregister_max17047:
if (data->max17047)
i2c_unregister_device(data->max17047);
- device_connection_remove(&data->connections[2]);
- device_connection_remove(&data->connections[1]);
- device_connection_remove(&data->connections[0]);
+ device_connections_remove(data->connections);
return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
}
-static int cht_int33fe_remove(struct i2c_client *i2c)
+static int cht_int33fe_remove(struct platform_device *pdev)
{
- struct cht_int33fe_data *data = i2c_get_clientdata(i2c);
+ struct cht_int33fe_data *data = platform_get_drvdata(pdev);
i2c_unregister_device(data->pi3usb30532);
i2c_unregister_device(data->fusb302);
if (data->max17047)
i2c_unregister_device(data->max17047);
- device_connection_remove(&data->connections[2]);
- device_connection_remove(&data->connections[1]);
- device_connection_remove(&data->connections[0]);
+ device_connections_remove(data->connections);
return 0;
}
-static const struct i2c_device_id cht_int33fe_i2c_id[] = {
- { }
-};
-MODULE_DEVICE_TABLE(i2c, cht_int33fe_i2c_id);
-
static const struct acpi_device_id cht_int33fe_acpi_ids[] = {
{ "INT33FE", },
{ }
};
MODULE_DEVICE_TABLE(acpi, cht_int33fe_acpi_ids);
-static struct i2c_driver cht_int33fe_driver = {
+static struct platform_driver cht_int33fe_driver = {
.driver = {
.name = "Intel Cherry Trail ACPI INT33FE driver",
.acpi_match_table = ACPI_PTR(cht_int33fe_acpi_ids),
},
- .probe_new = cht_int33fe_probe,
+ .probe = cht_int33fe_probe,
.remove = cht_int33fe_remove,
- .id_table = cht_int33fe_i2c_id,
- .disable_i2c_core_irq_mapping = true,
};
-module_i2c_driver(cht_int33fe_driver);
+module_platform_driver(cht_int33fe_driver);
MODULE_DESCRIPTION("Intel Cherry Trail ACPI INT33FE pseudo device driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index a473dc51b18d..e89ad4964dc1 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -60,7 +60,7 @@ static const struct x86_cpu_id int0002_cpu_ids[] = {
/*
* Limit ourselves to Cherry Trail for now, until testing shows we
* need to handle the INT0002 device on Baytrail too.
- * ICPU(INTEL_FAM6_ATOM_SILVERMONT1), * Valleyview, Bay Trail *
+ * ICPU(INTEL_FAM6_ATOM_SILVERMONT), * Valleyview, Bay Trail *
*/
ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
{}
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index d79fbf924b13..5ad44204a9c3 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -125,8 +125,8 @@ static const struct mid_pb_ddata mrfld_ddata = {
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata }
static const struct x86_cpu_id mid_pb_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_PENWELL, mfld_ddata),
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, mrfld_ddata),
+ ICPU(INTEL_FAM6_ATOM_SALTWELL_MID, mfld_ddata),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, mrfld_ddata),
{}
};
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index ffd0474b0531..cee08f236292 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -320,7 +320,7 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_debugfs_conf),
- TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, telem_apl_debugfs_conf),
+ TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, telem_apl_debugfs_conf),
{}
};
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 2f889d6c270e..fcc6bee51a42 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -192,7 +192,7 @@ static struct telemetry_plt_config telem_glk_config = {
static const struct x86_cpu_id telemetry_cpu_ids[] = {
TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_config),
- TELEM_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, telem_glk_config),
+ TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, telem_glk_config),
{}
};
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 0206cce328b3..2b686c55b717 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -19,6 +19,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk/at91_pmc.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -69,7 +70,10 @@ struct shdwc_config {
struct shdwc {
const struct shdwc_config *cfg;
- void __iomem *at91_shdwc_base;
+ struct clk *sclk;
+ void __iomem *shdwc_base;
+ void __iomem *mpddrc_base;
+ void __iomem *pmc_base;
};
/*
@@ -77,8 +81,6 @@ struct shdwc {
* since pm_power_off itself is global.
*/
static struct shdwc *at91_shdwc;
-static struct clk *sclk;
-static void __iomem *mpddrc_base;
static const unsigned long long sdwc_dbc_period[] = {
0, 3, 32, 512, 4096, 32768,
@@ -90,7 +92,7 @@ static void __init at91_wakeup_status(struct platform_device *pdev)
u32 reg;
char *reason = "unknown";
- reg = readl(shdw->at91_shdwc_base + AT91_SHDW_SR);
+ reg = readl(shdw->shdwc_base + AT91_SHDW_SR);
dev_dbg(&pdev->dev, "%s: status = %#x\n", __func__, reg);
@@ -108,12 +110,6 @@ static void __init at91_wakeup_status(struct platform_device *pdev)
static void at91_poweroff(void)
{
- writel(AT91_SHDW_KEY | AT91_SHDW_SHDW,
- at91_shdwc->at91_shdwc_base + AT91_SHDW_CR);
-}
-
-static void at91_lpddr_poweroff(void)
-{
asm volatile(
/* Align to cache lines */
".balign 32\n\t"
@@ -122,16 +118,29 @@ static void at91_lpddr_poweroff(void)
" ldr r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
/* Power down SDRAM0 */
+ " tst %0, #0\n\t"
+ " beq 1f\n\t"
" str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+
+ /* Switch the master clock source to slow clock. */
+ "1: ldr r6, [%4, #" __stringify(AT91_PMC_MCKR) "]\n\t"
+ " bic r6, r6, #" __stringify(AT91_PMC_CSS) "\n\t"
+ " str r6, [%4, #" __stringify(AT91_PMC_MCKR) "]\n\t"
+ /* Wait for clock switch. */
+ "2: ldr r6, [%4, #" __stringify(AT91_PMC_SR) "]\n\t"
+ " tst r6, #" __stringify(AT91_PMC_MCKRDY) "\n\t"
+ " beq 2b\n\t"
+
/* Shutdown CPU */
" str %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
" b .\n\t"
:
- : "r" (mpddrc_base),
+ : "r" (at91_shdwc->mpddrc_base),
"r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
- "r" (at91_shdwc->at91_shdwc_base),
- "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
+ "r" (at91_shdwc->shdwc_base),
+ "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW),
+ "r" (at91_shdwc->pmc_base)
: "r6");
}
@@ -213,10 +222,10 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
mode |= SHDW_RTCWKEN(shdw->cfg);
dev_dbg(&pdev->dev, "%s: mode = %#x\n", __func__, mode);
- writel(mode, shdw->at91_shdwc_base + AT91_SHDW_MR);
+ writel(mode, shdw->shdwc_base + AT91_SHDW_MR);
input = at91_shdwc_get_wakeup_input(pdev, np);
- writel(input, shdw->at91_shdwc_base + AT91_SHDW_WUIR);
+ writel(input, shdw->shdwc_base + AT91_SHDW_WUIR);
}
static const struct shdwc_config sama5d2_shdwc_config = {
@@ -246,6 +255,9 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
if (!pdev->dev.of_node)
return -ENODEV;
+ if (at91_shdwc)
+ return -EBUSY;
+
at91_shdwc = devm_kzalloc(&pdev->dev, sizeof(*at91_shdwc), GFP_KERNEL);
if (!at91_shdwc)
return -ENOMEM;
@@ -253,20 +265,20 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, at91_shdwc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- at91_shdwc->at91_shdwc_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(at91_shdwc->at91_shdwc_base)) {
+ at91_shdwc->shdwc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(at91_shdwc->shdwc_base)) {
dev_err(&pdev->dev, "Could not map reset controller address\n");
- return PTR_ERR(at91_shdwc->at91_shdwc_base);
+ return PTR_ERR(at91_shdwc->shdwc_base);
}
match = of_match_node(at91_shdwc_of_match, pdev->dev.of_node);
at91_shdwc->cfg = match->data;
- sclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sclk))
- return PTR_ERR(sclk);
+ at91_shdwc->sclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(at91_shdwc->sclk))
+ return PTR_ERR(at91_shdwc->sclk);
- ret = clk_prepare_enable(sclk);
+ ret = clk_prepare_enable(at91_shdwc->sclk);
if (ret) {
dev_err(&pdev->dev, "Could not enable slow clock\n");
return ret;
@@ -276,41 +288,70 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
at91_shdwc_dt_configure(pdev);
- pm_power_off = at91_poweroff;
+ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-pmc");
+ if (!np) {
+ ret = -ENODEV;
+ goto clk_disable;
+ }
+
+ at91_shdwc->pmc_base = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!at91_shdwc->pmc_base) {
+ ret = -ENOMEM;
+ goto clk_disable;
+ }
np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc");
- if (!np)
- return 0;
+ if (!np) {
+ ret = -ENODEV;
+ goto unmap;
+ }
- mpddrc_base = of_iomap(np, 0);
+ at91_shdwc->mpddrc_base = of_iomap(np, 0);
of_node_put(np);
- if (!mpddrc_base)
- return 0;
+ if (!at91_shdwc->mpddrc_base) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
- ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD;
- if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) ||
- (ddr_type == AT91_DDRSDRC_MD_LPDDR3))
- pm_power_off = at91_lpddr_poweroff;
- else
- iounmap(mpddrc_base);
+ pm_power_off = at91_poweroff;
+
+ ddr_type = readl(at91_shdwc->mpddrc_base + AT91_DDRSDRC_MDR) &
+ AT91_DDRSDRC_MD;
+ if (ddr_type != AT91_DDRSDRC_MD_LPDDR2 &&
+ ddr_type != AT91_DDRSDRC_MD_LPDDR3) {
+ iounmap(at91_shdwc->mpddrc_base);
+ at91_shdwc->mpddrc_base = NULL;
+ }
return 0;
+
+unmap:
+ iounmap(at91_shdwc->pmc_base);
+clk_disable:
+ clk_disable_unprepare(at91_shdwc->sclk);
+
+ return ret;
}
static int __exit at91_shdwc_remove(struct platform_device *pdev)
{
struct shdwc *shdw = platform_get_drvdata(pdev);
- if (pm_power_off == at91_poweroff ||
- pm_power_off == at91_lpddr_poweroff)
+ if (pm_power_off == at91_poweroff)
pm_power_off = NULL;
/* Reset values to disable wake-up features */
- writel(0, shdw->at91_shdwc_base + AT91_SHDW_MR);
- writel(0, shdw->at91_shdwc_base + AT91_SHDW_WUIR);
+ writel(0, shdw->shdwc_base + AT91_SHDW_MR);
+ writel(0, shdw->shdwc_base + AT91_SHDW_WUIR);
+
+ if (shdw->mpddrc_base)
+ iounmap(shdw->mpddrc_base);
+ iounmap(shdw->pmc_base);
- clk_disable_unprepare(sclk);
+ clk_disable_unprepare(shdw->sclk);
return 0;
}
diff --git a/drivers/power/reset/qcom-pon.c b/drivers/power/reset/qcom-pon.c
index 0c4caaa7e88f..3fa1642d4c54 100644
--- a/drivers/power/reset/qcom-pon.c
+++ b/drivers/power/reset/qcom-pon.c
@@ -74,6 +74,7 @@ static int pm8916_pon_probe(struct platform_device *pdev)
static const struct of_device_id pm8916_pon_id_table[] = {
{ .compatible = "qcom,pm8916-pon" },
+ { .compatible = "qcom,pms405-pon" },
{ }
};
MODULE_DEVICE_TABLE(of, pm8916_pon_id_table);
diff --git a/drivers/power/reset/rmobile-reset.c b/drivers/power/reset/rmobile-reset.c
index e6569df76941..bd3b396558e0 100644
--- a/drivers/power/reset/rmobile-reset.c
+++ b/drivers/power/reset/rmobile-reset.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Mobile Reset Driver
*
* Copyright (C) 2014 Glider bvba
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/io.h>
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index ff6dab0bf0dd..f27cf0709500 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -645,4 +645,11 @@ config CHARGER_CROS_USBPD
what is connected to USB PD ports from the EC and converts
that into power_supply properties.
+config CHARGER_SC2731
+ tristate "Spreadtrum SC2731 charger driver"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ Say Y here to enable support for battery charging with SC2731
+ PMIC chips.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index a26b402c45d9..767105b88d00 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -85,3 +85,4 @@ obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
+obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 02356f9b5f22..776102c31305 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2433,17 +2433,14 @@ static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf,
size_t count)
{
unsigned long charge_full;
- ssize_t ret;
+ int ret;
ret = kstrtoul(buf, 10, &charge_full);
+ if (ret)
+ return ret;
- dev_dbg(di->dev, "Ret %zd charge_full %lu", ret, charge_full);
-
- if (!ret) {
- di->bat_cap.max_mah = (int) charge_full;
- ret = count;
- }
- return ret;
+ di->bat_cap.max_mah = (int) charge_full;
+ return count;
}
static ssize_t charge_now_show(struct ab8500_fg *di, char *buf)
@@ -2455,20 +2452,16 @@ static ssize_t charge_now_store(struct ab8500_fg *di, const char *buf,
size_t count)
{
unsigned long charge_now;
- ssize_t ret;
+ int ret;
ret = kstrtoul(buf, 10, &charge_now);
+ if (ret)
+ return ret;
- dev_dbg(di->dev, "Ret %zd charge_now %lu was %d",
- ret, charge_now, di->bat_cap.prev_mah);
-
- if (!ret) {
- di->bat_cap.user_mah = (int) charge_now;
- di->flags.user_cap = true;
- ret = count;
- queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
- }
- return ret;
+ di->bat_cap.user_mah = (int) charge_now;
+ di->flags.user_cap = true;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ return count;
}
static struct ab8500_fg_sysfs_entry charge_full_attr =
@@ -2582,11 +2575,12 @@ static ssize_t ab8505_powercut_flagtime_write(struct device *dev,
const char *buf, size_t count)
{
int ret;
- long unsigned reg_value;
+ int reg_value;
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
if (reg_value > 0x7F) {
dev_err(dev, "Incorrect parameter, echo 0 (1.98s) - 127 (15.625ms) for flagtime\n");
@@ -2636,7 +2630,9 @@ static ssize_t ab8505_powercut_maxtime_write(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
+
if (reg_value > 0x7F) {
dev_err(dev, "Incorrect parameter, echo 0 (0.0s) - 127 (1.98s) for maxtime\n");
goto fail;
@@ -2684,7 +2680,9 @@ static ssize_t ab8505_powercut_restart_write(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
+
if (reg_value > 0xF) {
dev_err(dev, "Incorrect parameter, echo 0 - 15 for number of restart\n");
goto fail;
@@ -2777,7 +2775,9 @@ static ssize_t ab8505_powercut_write(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
+
if (reg_value > 0x1) {
dev_err(dev, "Incorrect parameter, echo 0/1 to disable/enable Pcut feature\n");
goto fail;
@@ -2849,7 +2849,9 @@ static ssize_t ab8505_powercut_debounce_write(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
+
if (reg_value > 0x7) {
dev_err(dev, "Incorrect parameter, echo 0 to 7 for debounce setting\n");
goto fail;
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 8e2c41ded171..70b90db5ae38 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -32,6 +32,7 @@
#define BQ25890_IRQ_PIN "bq25890_irq"
#define BQ25890_ID 3
+#define BQ25896_ID 0
enum bq25890_fields {
F_EN_HIZ, F_EN_ILIM, F_IILIM, /* Reg00 */
@@ -153,8 +154,8 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_CONV_RATE] = REG_FIELD(0x02, 6, 6),
[F_BOOSTF] = REG_FIELD(0x02, 5, 5),
[F_ICO_EN] = REG_FIELD(0x02, 4, 4),
- [F_HVDCP_EN] = REG_FIELD(0x02, 3, 3),
- [F_MAXC_EN] = REG_FIELD(0x02, 2, 2),
+ [F_HVDCP_EN] = REG_FIELD(0x02, 3, 3), // reserved on BQ25896
+ [F_MAXC_EN] = REG_FIELD(0x02, 2, 2), // reserved on BQ25896
[F_FORCE_DPM] = REG_FIELD(0x02, 1, 1),
[F_AUTO_DPDM_EN] = REG_FIELD(0x02, 0, 0),
/* REG03 */
@@ -163,6 +164,7 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_OTG_CFG] = REG_FIELD(0x03, 5, 5),
[F_CHG_CFG] = REG_FIELD(0x03, 4, 4),
[F_SYSVMIN] = REG_FIELD(0x03, 1, 3),
+ /* MIN_VBAT_SEL on BQ25896 */
/* REG04 */
[F_PUMPX_EN] = REG_FIELD(0x04, 7, 7),
[F_ICHG] = REG_FIELD(0x04, 0, 6),
@@ -181,7 +183,7 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_CHG_TMR] = REG_FIELD(0x07, 1, 2),
[F_JEITA_ISET] = REG_FIELD(0x07, 0, 0),
/* REG08 */
- [F_BATCMP] = REG_FIELD(0x08, 6, 7),
+ [F_BATCMP] = REG_FIELD(0x08, 6, 7), // 5-7 on BQ25896
[F_VCLAMP] = REG_FIELD(0x08, 2, 4),
[F_TREG] = REG_FIELD(0x08, 0, 1),
/* REG09 */
@@ -195,12 +197,13 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_PUMPX_DN] = REG_FIELD(0x09, 0, 0),
/* REG0A */
[F_BOOSTV] = REG_FIELD(0x0A, 4, 7),
+ /* PFM_OTG_DIS 3 on BQ25896 */
[F_BOOSTI] = REG_FIELD(0x0A, 0, 2),
/* REG0B */
[F_VBUS_STAT] = REG_FIELD(0x0B, 5, 7),
[F_CHG_STAT] = REG_FIELD(0x0B, 3, 4),
[F_PG_STAT] = REG_FIELD(0x0B, 2, 2),
- [F_SDP_STAT] = REG_FIELD(0x0B, 1, 1),
+ [F_SDP_STAT] = REG_FIELD(0x0B, 1, 1), // reserved on BQ25896
[F_VSYS_STAT] = REG_FIELD(0x0B, 0, 0),
/* REG0C */
[F_WD_FAULT] = REG_FIELD(0x0C, 7, 7),
@@ -244,10 +247,7 @@ enum bq25890_table_ids {
/* range tables */
TBL_ICHG,
TBL_ITERM,
- TBL_IPRECHG,
TBL_VREG,
- TBL_BATCMP,
- TBL_VCLAMP,
TBL_BOOSTV,
TBL_SYSVMIN,
@@ -287,8 +287,6 @@ static const union {
[TBL_ICHG] = { .rt = {0, 5056000, 64000} }, /* uA */
[TBL_ITERM] = { .rt = {64000, 1024000, 64000} }, /* uA */
[TBL_VREG] = { .rt = {3840000, 4608000, 16000} }, /* uV */
- [TBL_BATCMP] = { .rt = {0, 140, 20} }, /* mOhm */
- [TBL_VCLAMP] = { .rt = {0, 224000, 32000} }, /* uV */
[TBL_BOOSTV] = { .rt = {4550000, 5510000, 64000} }, /* uV */
[TBL_SYSVMIN] = { .rt = {3000000, 3700000, 100000} }, /* uV */
@@ -401,6 +399,16 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
val->strval = BQ25890_MANUFACTURER;
break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ if (bq->chip_id == BQ25890_ID)
+ val->strval = "BQ25890";
+ else if (bq->chip_id == BQ25896_ID)
+ val->strval = "BQ25896";
+ else
+ val->strval = "UNKNOWN";
+
+ break;
+
case POWER_SUPPLY_PROP_ONLINE:
val->intval = state.online;
break;
@@ -453,6 +461,15 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
val->intval = bq25890_find_val(bq->init_data.iterm, TBL_ITERM);
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = bq25890_field_read(bq, F_SYSV); /* read measured value */
+ if (ret < 0)
+ return ret;
+
+ /* converted_val = 2.304V + ADC_val * 20mV (table 10.3.15) */
+ val->intval = 2304000 + ret * 20000;
+ break;
+
default:
return -EINVAL;
}
@@ -608,30 +625,40 @@ static int bq25890_hw_init(struct bq25890_device *bq)
};
ret = bq25890_chip_reset(bq);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Reset failed %d\n", ret);
return ret;
+ }
/* disable watchdog */
ret = bq25890_field_write(bq, F_WD, 0);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Disabling watchdog failed %d\n", ret);
return ret;
+ }
/* initialize currents/voltages and other parameters */
for (i = 0; i < ARRAY_SIZE(init_data); i++) {
ret = bq25890_field_write(bq, init_data[i].id,
init_data[i].value);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Writing init data failed %d\n", ret);
return ret;
+ }
}
/* Configure ADC for continuous conversions. This does not enable it. */
ret = bq25890_field_write(bq, F_CONV_RATE, 1);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Config ADC failed %d\n", ret);
return ret;
+ }
ret = bq25890_get_chip_state(bq, &state);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Get state failed %d\n", ret);
return ret;
+ }
mutex_lock(&bq->lock);
bq->state = state;
@@ -642,6 +669,7 @@ static int bq25890_hw_init(struct bq25890_device *bq)
static enum power_supply_property bq25890_power_supply_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_HEALTH,
@@ -650,6 +678,7 @@ static enum power_supply_property bq25890_power_supply_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
};
static char *bq25890_charger_supplied_to[] = {
@@ -767,6 +796,9 @@ static int bq25890_fw_read_u32_props(struct bq25890_device *bq)
if (props[i].optional)
continue;
+ dev_err(bq->dev, "Unable to read property %d %s\n", ret,
+ props[i].name);
+
return ret;
}
@@ -840,7 +872,7 @@ static int bq25890_probe(struct i2c_client *client,
return bq->chip_id;
}
- if (bq->chip_id != BQ25890_ID) {
+ if ((bq->chip_id != BQ25890_ID) && (bq->chip_id != BQ25896_ID)) {
dev_err(dev, "Chip with ID=%d, not supported!\n", bq->chip_id);
return -ENODEV;
}
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index f022e1b550df..6dbbe95844a3 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -432,6 +432,7 @@ static u8
[BQ27XXX_REG_AP] = 0x18,
BQ27XXX_DM_REG_ROWS,
};
+#define bq27411_regs bq27421_regs
#define bq27425_regs bq27421_regs
#define bq27426_regs bq27421_regs
#define bq27441_regs bq27421_regs
@@ -665,6 +666,7 @@ static enum power_supply_property bq27421_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_MANUFACTURER,
};
+#define bq27411_props bq27421_props
#define bq27425_props bq27421_props
#define bq27426_props bq27421_props
#define bq27441_props bq27421_props
@@ -725,6 +727,12 @@ static struct bq27xxx_dm_reg bq27545_dm_regs[] = {
#define bq27545_dm_regs 0
#endif
+static struct bq27xxx_dm_reg bq27411_dm_regs[] = {
+ [BQ27XXX_DM_DESIGN_CAPACITY] = { 82, 10, 2, 0, 32767 },
+ [BQ27XXX_DM_DESIGN_ENERGY] = { 82, 12, 2, 0, 32767 },
+ [BQ27XXX_DM_TERMINATE_VOLTAGE] = { 82, 16, 2, 2800, 3700 },
+};
+
static struct bq27xxx_dm_reg bq27421_dm_regs[] = {
[BQ27XXX_DM_DESIGN_CAPACITY] = { 82, 10, 2, 0, 8000 },
[BQ27XXX_DM_DESIGN_ENERGY] = { 82, 12, 2, 0, 32767 },
@@ -802,6 +810,7 @@ static struct {
[BQ27546] = BQ27XXX_DATA(bq27546, 0 , BQ27XXX_O_OTDC),
[BQ27742] = BQ27XXX_DATA(bq27742, 0 , BQ27XXX_O_OTDC),
[BQ27545] = BQ27XXX_DATA(bq27545, 0x04143672, BQ27XXX_O_OTDC),
+ [BQ27411] = BQ27XXX_DATA(bq27411, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
[BQ27421] = BQ27XXX_DATA(bq27421, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
[BQ27425] = BQ27XXX_DATA(bq27425, 0x04143672, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP),
[BQ27426] = BQ27XXX_DATA(bq27426, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 40069128ad44..2677c38a8a42 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -247,6 +247,7 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27546", BQ27546 },
{ "bq27742", BQ27742 },
{ "bq27545", BQ27545 },
+ { "bq27411", BQ27411 },
{ "bq27421", BQ27421 },
{ "bq27425", BQ27425 },
{ "bq27426", BQ27426 },
@@ -279,6 +280,7 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = {
{ .compatible = "ti,bq27546" },
{ .compatible = "ti,bq27742" },
{ .compatible = "ti,bq27545" },
+ { .compatible = "ti,bq27411" },
{ .compatible = "ti,bq27421" },
{ .compatible = "ti,bq27425" },
{ .compatible = "ti,bq27426" },
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index 688a16bacfbb..7e9c3984ef6a 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -12,8 +12,12 @@
#include <linux/power_supply.h>
#include <linux/slab.h>
-#define CHARGER_DIR_NAME "CROS_USBPD_CHARGER%d"
-#define CHARGER_DIR_NAME_LENGTH sizeof(CHARGER_DIR_NAME)
+#define CHARGER_USBPD_DIR_NAME "CROS_USBPD_CHARGER%d"
+#define CHARGER_DEDICATED_DIR_NAME "CROS_DEDICATED_CHARGER"
+#define CHARGER_DIR_NAME_LENGTH (sizeof(CHARGER_USBPD_DIR_NAME) >= \
+ sizeof(CHARGER_DEDICATED_DIR_NAME) ? \
+ sizeof(CHARGER_USBPD_DIR_NAME) : \
+ sizeof(CHARGER_DEDICATED_DIR_NAME))
#define CHARGER_CACHE_UPDATE_DELAY msecs_to_jiffies(500)
#define CHARGER_MANUFACTURER_MODEL_LENGTH 32
@@ -42,6 +46,7 @@ struct charger_data {
struct cros_ec_dev *ec_dev;
struct cros_ec_device *ec_device;
int num_charger_ports;
+ int num_usbpd_ports;
int num_registered_psy;
struct port_data *ports[EC_USB_PD_MAX_PORTS];
struct notifier_block notifier;
@@ -58,6 +63,12 @@ static enum power_supply_property cros_usbpd_charger_props[] = {
POWER_SUPPLY_PROP_USB_TYPE
};
+static enum power_supply_property cros_usbpd_dedicated_charger_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+};
+
static enum power_supply_usb_type cros_usbpd_charger_usb_types[] = {
POWER_SUPPLY_USB_TYPE_UNKNOWN,
POWER_SUPPLY_USB_TYPE_SDP,
@@ -69,6 +80,11 @@ static enum power_supply_usb_type cros_usbpd_charger_usb_types[] = {
POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID
};
+static bool cros_usbpd_charger_port_is_dedicated(struct port_data *port)
+{
+ return port->port_number >= port->charger->num_usbpd_ports;
+}
+
static int cros_usbpd_charger_ec_command(struct charger_data *charger,
unsigned int version,
unsigned int command,
@@ -103,6 +119,23 @@ static int cros_usbpd_charger_ec_command(struct charger_data *charger,
static int cros_usbpd_charger_get_num_ports(struct charger_data *charger)
{
+ struct ec_response_charge_port_count resp;
+ int ret;
+
+ ret = cros_usbpd_charger_ec_command(charger, 0,
+ EC_CMD_CHARGE_PORT_COUNT,
+ NULL, 0, &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_err(charger->dev,
+ "Unable to get the number of ports (err:0x%x)\n", ret);
+ return ret;
+ }
+
+ return resp.port_count;
+}
+
+static int cros_usbpd_charger_get_usbpd_num_ports(struct charger_data *charger)
+{
struct ec_response_usb_pd_ports resp;
int ret;
@@ -246,7 +279,10 @@ static int cros_usbpd_charger_get_power_info(struct port_data *port)
port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
}
- port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
+ if (cros_usbpd_charger_port_is_dedicated(port))
+ port->psy_desc.type = POWER_SUPPLY_TYPE_MAINS;
+ else
+ port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
dev_dbg(dev,
"Port %d: type=%d vmax=%d vnow=%d cmax=%d clim=%d pmax=%d\n",
@@ -281,7 +317,8 @@ static int cros_usbpd_charger_get_port_status(struct port_data *port,
if (ret < 0)
return ret;
- ret = cros_usbpd_charger_get_discovery_info(port);
+ if (!cros_usbpd_charger_port_is_dedicated(port))
+ ret = cros_usbpd_charger_get_discovery_info(port);
port->last_update = jiffies;
return ret;
@@ -378,12 +415,10 @@ static int cros_usbpd_charger_ec_event(struct notifier_block *nb,
{
struct cros_ec_device *ec_device;
struct charger_data *charger;
- struct device *dev;
u32 host_event;
charger = container_of(nb, struct charger_data, notifier);
ec_device = charger->ec_device;
- dev = charger->dev;
host_event = cros_ec_get_host_event(ec_device);
if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU)) {
@@ -426,17 +461,56 @@ static int cros_usbpd_charger_probe(struct platform_device *pd)
platform_set_drvdata(pd, charger);
+ /*
+ * We need to know the number of USB PD ports in order to know whether
+ * there is a dedicated port. The dedicated port will always be
+ * after the USB PD ports, and there should be only one.
+ */
+ charger->num_usbpd_ports =
+ cros_usbpd_charger_get_usbpd_num_ports(charger);
+ if (charger->num_usbpd_ports <= 0) {
+ /*
+ * This can happen on a system that doesn't support USB PD.
+ * Log a message, but no need to warn.
+ */
+ dev_info(dev, "No USB PD charging ports found\n");
+ }
+
charger->num_charger_ports = cros_usbpd_charger_get_num_ports(charger);
- if (charger->num_charger_ports <= 0) {
+ if (charger->num_charger_ports < 0) {
/*
* This can happen on a system that doesn't support USB PD.
* Log a message, but no need to warn.
+ * Older ECs do not support the above command, in that case
+ * let's set up the number of charger ports equal to the number
+ * of USB PD ports
+ */
+ dev_info(dev, "Could not get charger port count\n");
+ charger->num_charger_ports = charger->num_usbpd_ports;
+ }
+
+ if (charger->num_charger_ports <= 0) {
+ /*
+ * This can happen on a system that doesn't support USB PD and
+ * doesn't have a dedicated port.
+ * Log a message, but no need to warn.
*/
dev_info(dev, "No charging ports found\n");
ret = -ENODEV;
goto fail_nowarn;
}
+ /*
+ * Sanity checks on the number of ports:
+ * there should be at most 1 dedicated port
+ */
+ if (charger->num_charger_ports < charger->num_usbpd_ports ||
+ charger->num_charger_ports > (charger->num_usbpd_ports + 1)) {
+ dev_err(dev, "Unexpected number of charge port count\n");
+ ret = -EPROTO;
+ goto fail_nowarn;
+ }
+
for (i = 0; i < charger->num_charger_ports; i++) {
struct power_supply_config psy_cfg = {};
@@ -448,22 +522,33 @@ static int cros_usbpd_charger_probe(struct platform_device *pd)
port->charger = charger;
port->port_number = i;
- sprintf(port->name, CHARGER_DIR_NAME, i);
psy_desc = &port->psy_desc;
- psy_desc->name = port->name;
- psy_desc->type = POWER_SUPPLY_TYPE_USB;
psy_desc->get_property = cros_usbpd_charger_get_prop;
psy_desc->external_power_changed =
cros_usbpd_charger_power_changed;
- psy_desc->properties = cros_usbpd_charger_props;
- psy_desc->num_properties =
- ARRAY_SIZE(cros_usbpd_charger_props);
- psy_desc->usb_types = cros_usbpd_charger_usb_types;
- psy_desc->num_usb_types =
- ARRAY_SIZE(cros_usbpd_charger_usb_types);
psy_cfg.drv_data = port;
+ if (cros_usbpd_charger_port_is_dedicated(port)) {
+ sprintf(port->name, CHARGER_DEDICATED_DIR_NAME);
+ psy_desc->type = POWER_SUPPLY_TYPE_MAINS;
+ psy_desc->properties =
+ cros_usbpd_dedicated_charger_props;
+ psy_desc->num_properties =
+ ARRAY_SIZE(cros_usbpd_dedicated_charger_props);
+ } else {
+ sprintf(port->name, CHARGER_USBPD_DIR_NAME, i);
+ psy_desc->type = POWER_SUPPLY_TYPE_USB;
+ psy_desc->properties = cros_usbpd_charger_props;
+ psy_desc->num_properties =
+ ARRAY_SIZE(cros_usbpd_charger_props);
+ psy_desc->usb_types = cros_usbpd_charger_usb_types;
+ psy_desc->num_usb_types =
+ ARRAY_SIZE(cros_usbpd_charger_usb_types);
+ }
+
+ psy_desc->name = port->name;
+
psy = devm_power_supply_register_no_ws(dev, psy_desc,
&psy_cfg);
if (IS_ERR(psy)) {
diff --git a/drivers/power/supply/ds2780_battery.c b/drivers/power/supply/ds2780_battery.c
index 370e9109342b..cad14ba1b648 100644
--- a/drivers/power/supply/ds2780_battery.c
+++ b/drivers/power/supply/ds2780_battery.c
@@ -829,5 +829,5 @@ module_platform_driver(ds2780_battery_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
-MODULE_DESCRIPTION("Maxim/Dallas DS2780 Stand-Alone Fuel Gauage IC driver");
+MODULE_DESCRIPTION("Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC driver");
MODULE_ALIAS("platform:ds2780-battery");
diff --git a/drivers/power/supply/ds2781_battery.c b/drivers/power/supply/ds2781_battery.c
index d1b5a19aae7c..5e794607f732 100644
--- a/drivers/power/supply/ds2781_battery.c
+++ b/drivers/power/supply/ds2781_battery.c
@@ -829,6 +829,6 @@ module_platform_driver(ds2781_battery_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>");
-MODULE_DESCRIPTION("Maxim/Dallas DS2781 Stand-Alone Fuel Gauage IC driver");
+MODULE_DESCRIPTION("Maxim/Dallas DS2781 Stand-Alone Fuel Gauge IC driver");
MODULE_ALIAS("platform:ds2781-battery");
diff --git a/drivers/power/supply/ds2782_battery.c b/drivers/power/supply/ds2782_battery.c
index a1b7e0592245..019c58493e3d 100644
--- a/drivers/power/supply/ds2782_battery.c
+++ b/drivers/power/supply/ds2782_battery.c
@@ -471,5 +471,5 @@ static struct i2c_driver ds278x_battery_driver = {
module_i2c_driver(ds278x_battery_driver);
MODULE_AUTHOR("Ryan Mallon");
-MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver");
+MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauge IC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c
index 449fc56f09eb..8a59feac6468 100644
--- a/drivers/power/supply/max14577_charger.c
+++ b/drivers/power/supply/max14577_charger.c
@@ -1,19 +1,9 @@
-/*
- * max14577_charger.c - Battery charger driver for the Maxim 14577/77836
- *
- * Copyright (C) 2013,2014 Samsung Electronics
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max14577_charger.c - Battery charger driver for the Maxim 14577/77836
+//
+// Copyright (C) 2013,2014 Samsung Electronics
+// Krzysztof Kozlowski <krzk@kernel.org>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 33c40f79d23d..91cafc7bed30 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -1,14 +1,10 @@
-/*
- * max17040_battery.c
- * fuel-gauge systems for lithium-ion (Li+) batteries
- *
- * Copyright (C) 2009 Samsung Electronics
- * Minkyu Kang <mk7.kang@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// max17040_battery.c
+// fuel-gauge systems for lithium-ion (Li+) batteries
+//
+// Copyright (C) 2009 Samsung Electronics
+// Minkyu Kang <mk7.kang@samsung.com>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 1a568df383db..2a8d75e5e930 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -1,26 +1,12 @@
-/*
- * Fuel gauge driver for Maxim 17042 / 8966 / 8997
- * Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max17040_battery.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Fuel gauge driver for Maxim 17042 / 8966 / 8997
+// Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
+//
+// Copyright (C) 2011 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
+//
+// This driver is based on max17040_battery.c
#include <linux/acpi.h>
#include <linux/init.h>
diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c
index 749c7926e3c9..a2c5c9858639 100644
--- a/drivers/power/supply/max77693_charger.c
+++ b/drivers/power/supply/max77693_charger.c
@@ -1,19 +1,9 @@
-/*
- * max77693_charger.c - Battery charger driver for the Maxim 77693
- *
- * Copyright (C) 2014 Samsung Electronics
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77693_charger.c - Battery charger driver for the Maxim 77693
+//
+// Copyright (C) 2014 Samsung Electronics
+// Krzysztof Kozlowski <krzk@kernel.org>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/power/supply/max8925_power.c b/drivers/power/supply/max8925_power.c
index 3b94620ce5c1..39b4d5b6ac39 100644
--- a/drivers/power/supply/max8925_power.c
+++ b/drivers/power/supply/max8925_power.c
@@ -124,6 +124,7 @@ static irqreturn_t max8925_charger_handler(int irq, void *data)
case MAX8925_IRQ_VCHG_THM_OK_F:
/* Battery is not ready yet */
dev_dbg(chip->dev, "Battery temperature is out of range\n");
+ /* Fall through */
case MAX8925_IRQ_VCHG_DC_OVP:
dev_dbg(chip->dev, "Error detection\n");
__set_charger(info, 0);
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index c73fb4221695..f5e84cd47924 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -1,23 +1,9 @@
-/*
- * max8997_charger.c - Power supply consumer driver for the Maxim 8997/8966
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8997_charger.c - Power supply consumer driver for the Maxim 8997/8966
+//
+// Copyright (C) 2011 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
#include <linux/err.h>
#include <linux/module.h>
diff --git a/drivers/power/supply/max8998_charger.c b/drivers/power/supply/max8998_charger.c
index cad7d1a8feec..9a926c7c0f22 100644
--- a/drivers/power/supply/max8998_charger.c
+++ b/drivers/power/supply/max8998_charger.c
@@ -1,23 +1,9 @@
-/*
- * max8998_charger.c - Power supply consumer driver for the Maxim 8998/LP3974
- *
- * Copyright (C) 2009-2010 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8998_charger.c - Power supply consumer driver for the Maxim 8998/LP3974
+//
+// Copyright (C) 2009-2010 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
#include <linux/err.h>
#include <linux/module.h>
@@ -86,7 +72,7 @@ static const struct power_supply_desc max8998_battery_desc = {
static int max8998_battery_probe(struct platform_device *pdev)
{
struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8998_platform_data *pdata = iodev->pdata;
struct power_supply_config psy_cfg = {};
struct max8998_battery_data *max8998;
struct i2c_client *i2c;
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 6170ed8b6854..dce24f596160 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -131,7 +131,8 @@ static ssize_t power_supply_show_property(struct device *dev,
dev_dbg(dev, "driver has no data for `%s' property\n",
attr->attr.name);
else if (ret != -ENODEV && ret != -EAGAIN)
- dev_err(dev, "driver failed to report `%s' property: %zd\n",
+ dev_err_ratelimited(dev,
+ "driver failed to report `%s' property: %zd\n",
attr->attr.name, ret);
return ret;
}
diff --git a/drivers/power/supply/sc2731_charger.c b/drivers/power/supply/sc2731_charger.c
new file mode 100644
index 000000000000..525a820537bf
--- /dev/null
+++ b/drivers/power/supply/sc2731_charger.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Spreadtrum Communications Inc.
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/usb/phy.h>
+#include <linux/regmap.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+
+/* PMIC global registers definition */
+#define SC2731_CHARGE_STATUS 0xedc
+#define SC2731_CHARGE_FULL BIT(4)
+#define SC2731_MODULE_EN1 0xc0c
+#define SC2731_CHARGE_EN BIT(5)
+
+/* SC2731 switch charger registers definition */
+#define SC2731_CHG_CFG0 0x0
+#define SC2731_CHG_CFG1 0x4
+#define SC2731_CHG_CFG2 0x8
+#define SC2731_CHG_CFG3 0xc
+#define SC2731_CHG_CFG4 0x10
+#define SC2731_CHG_CFG5 0x28
+
+/* SC2731_CHG_CFG0 register definition */
+#define SC2731_PRECHG_RNG_SHIFT 11
+#define SC2731_PRECHG_RNG_MASK GENMASK(12, 11)
+
+#define SC2731_TERMINATION_VOL_MASK GENMASK(2, 1)
+#define SC2731_TERMINATION_VOL_SHIFT 1
+#define SC2731_TERMINATION_VOL_CAL_MASK GENMASK(8, 3)
+#define SC2731_TERMINATION_VOL_CAL_SHIFT 3
+#define SC2731_TERMINATION_CUR_MASK GENMASK(2, 0)
+
+#define SC2731_CC_EN BIT(13)
+#define SC2731_CHARGER_PD BIT(0)
+
+/* SC2731_CHG_CFG1 register definition */
+#define SC2731_CUR_MASK GENMASK(5, 0)
+
+/* SC2731_CHG_CFG5 register definition */
+#define SC2731_CUR_LIMIT_SHIFT 8
+#define SC2731_CUR_LIMIT_MASK GENMASK(9, 8)
+
+/* Default current definition (unit is mA) */
+#define SC2731_CURRENT_LIMIT_100 100
+#define SC2731_CURRENT_LIMIT_500 500
+#define SC2731_CURRENT_LIMIT_900 900
+#define SC2731_CURRENT_LIMIT_2000 2000
+#define SC2731_CURRENT_PRECHG 450
+#define SC2731_CURRENT_STEP 50
+
+struct sc2731_charger_info {
+ struct device *dev;
+ struct regmap *regmap;
+ struct usb_phy *usb_phy;
+ struct notifier_block usb_notify;
+ struct power_supply *psy_usb;
+ struct mutex lock;
+ bool charging;
+ u32 base;
+};
+
+static void sc2731_charger_stop_charge(struct sc2731_charger_info *info)
+{
+ regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_CC_EN, 0);
+
+ regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_CHARGER_PD, SC2731_CHARGER_PD);
+}
+
+static int sc2731_charger_start_charge(struct sc2731_charger_info *info)
+{
+ int ret;
+
+ /* Enable charger constant current mode */
+ ret = regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_CC_EN, SC2731_CC_EN);
+ if (ret)
+ return ret;
+
+ /* Start charging */
+ return regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_CHARGER_PD, 0);
+}
+
+static int sc2731_charger_set_current_limit(struct sc2731_charger_info *info,
+ u32 limit)
+{
+ u32 val;
+
+ if (limit <= SC2731_CURRENT_LIMIT_100)
+ val = 0;
+ else if (limit <= SC2731_CURRENT_LIMIT_500)
+ val = 3;
+ else if (limit <= SC2731_CURRENT_LIMIT_900)
+ val = 2;
+ else
+ val = 1;
+
+ return regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG5,
+ SC2731_CUR_LIMIT_MASK,
+ val << SC2731_CUR_LIMIT_SHIFT);
+}
+
+static int sc2731_charger_set_current(struct sc2731_charger_info *info, u32 cur)
+{
+ u32 val;
+ int ret;
+
+ if (cur > SC2731_CURRENT_LIMIT_2000)
+ cur = SC2731_CURRENT_LIMIT_2000;
+ else if (cur < SC2731_CURRENT_PRECHG)
+ cur = SC2731_CURRENT_PRECHG;
+
+ /* Calculate the step value, each step is 50 mA */
+ val = (cur - SC2731_CURRENT_PRECHG) / SC2731_CURRENT_STEP;
+
+ /* Set pre-charge current as 450 mA */
+ ret = regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_PRECHG_RNG_MASK,
+ 0x3 << SC2731_PRECHG_RNG_SHIFT);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG1,
+ SC2731_CUR_MASK, val);
+}
+
+static int sc2731_charger_get_status(struct sc2731_charger_info *info)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(info->regmap, SC2731_CHARGE_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (val & SC2731_CHARGE_FULL)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ return POWER_SUPPLY_STATUS_CHARGING;
+}
+
+static int sc2731_charger_get_current(struct sc2731_charger_info *info,
+ u32 *cur)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read(info->regmap, info->base + SC2731_CHG_CFG1, &val);
+ if (ret)
+ return ret;
+
+ val &= SC2731_CUR_MASK;
+ *cur = val * SC2731_CURRENT_STEP + SC2731_CURRENT_PRECHG;
+
+ return 0;
+}
+
+static int sc2731_charger_get_current_limit(struct sc2731_charger_info *info,
+ u32 *cur)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read(info->regmap, info->base + SC2731_CHG_CFG5, &val);
+ if (ret)
+ return ret;
+
+ val = (val & SC2731_CUR_LIMIT_MASK) >> SC2731_CUR_LIMIT_SHIFT;
+
+ switch (val) {
+ case 0:
+ *cur = SC2731_CURRENT_LIMIT_100;
+ break;
+
+ case 1:
+ *cur = SC2731_CURRENT_LIMIT_2000;
+ break;
+
+ case 2:
+ *cur = SC2731_CURRENT_LIMIT_900;
+ break;
+
+ case 3:
+ *cur = SC2731_CURRENT_LIMIT_500;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+sc2731_charger_usb_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct sc2731_charger_info *info = power_supply_get_drvdata(psy);
+ int ret;
+
+ mutex_lock(&info->lock);
+
+ if (!info->charging) {
+ mutex_unlock(&info->lock);
+ return -ENODEV;
+ }
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = sc2731_charger_set_current(info, val->intval / 1000);
+ if (ret < 0)
+ dev_err(info->dev, "set charge current failed\n");
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = sc2731_charger_set_current_limit(info,
+ val->intval / 1000);
+ if (ret < 0)
+ dev_err(info->dev, "set input current limit failed\n");
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int sc2731_charger_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct sc2731_charger_info *info = power_supply_get_drvdata(psy);
+ int ret = 0;
+ u32 cur;
+
+ mutex_lock(&info->lock);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (info->charging)
+ val->intval = sc2731_charger_get_status(info);
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ if (!info->charging) {
+ val->intval = 0;
+ } else {
+ ret = sc2731_charger_get_current(info, &cur);
+ if (ret)
+ goto out;
+
+ val->intval = cur * 1000;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ if (!info->charging) {
+ val->intval = 0;
+ } else {
+ ret = sc2731_charger_get_current_limit(info, &cur);
+ if (ret)
+ goto out;
+
+ val->intval = cur * 1000;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int sc2731_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = 1;
+ break;
+
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property sc2731_usb_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+};
+
+static const struct power_supply_desc sc2731_charger_desc = {
+ .name = "sc2731_charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = sc2731_usb_props,
+ .num_properties = ARRAY_SIZE(sc2731_usb_props),
+ .get_property = sc2731_charger_usb_get_property,
+ .set_property = sc2731_charger_usb_set_property,
+ .property_is_writeable = sc2731_charger_property_is_writeable,
+};
+
+static int sc2731_charger_usb_change(struct notifier_block *nb,
+ unsigned long limit, void *data)
+{
+ struct sc2731_charger_info *info =
+ container_of(nb, struct sc2731_charger_info, usb_notify);
+ int ret = 0;
+
+ mutex_lock(&info->lock);
+
+ if (limit > 0) {
+ /* set current limitation and start to charge */
+ ret = sc2731_charger_set_current_limit(info, limit);
+ if (ret)
+ goto out;
+
+ ret = sc2731_charger_set_current(info, limit);
+ if (ret)
+ goto out;
+
+ ret = sc2731_charger_start_charge(info);
+ if (ret)
+ goto out;
+
+ info->charging = true;
+ } else {
+ /* Stop charging */
+ info->charging = false;
+ sc2731_charger_stop_charge(info);
+ }
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int sc2731_charger_hw_init(struct sc2731_charger_info *info)
+{
+ struct power_supply_battery_info bat_info = { };
+ u32 term_currrent, term_voltage, cur_val, vol_val;
+ int ret;
+
+ /* Enable charger module */
+ ret = regmap_update_bits(info->regmap, SC2731_MODULE_EN1,
+ SC2731_CHARGE_EN, SC2731_CHARGE_EN);
+ if (ret)
+ return ret;
+
+ ret = power_supply_get_battery_info(info->psy_usb, &bat_info);
+ if (ret) {
+ dev_warn(info->dev, "no battery information is supplied\n");
+
+ /*
+ * If no battery information is supplied, we should set
+ * default charge termination current to 120 mA, and default
+ * charge termination voltage to 4.35V.
+ */
+ cur_val = 0x2;
+ vol_val = 0x1;
+ } else {
+ term_currrent = bat_info.charge_term_current_ua / 1000;
+
+ if (term_currrent <= 90)
+ cur_val = 0;
+ else if (term_currrent >= 265)
+ cur_val = 0x7;
+ else
+ cur_val = ((term_currrent - 90) / 25) + 1;
+
+ term_voltage = bat_info.constant_charge_voltage_max_uv / 1000;
+
+ if (term_voltage > 4500)
+ term_voltage = 4500;
+
+ if (term_voltage > 4200)
+ vol_val = (term_voltage - 4200) / 100;
+ else
+ vol_val = 0;
+ }
+
+ /* Set charge termination current */
+ ret = regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG2,
+ SC2731_TERMINATION_CUR_MASK, cur_val);
+ if (ret)
+ goto error;
+
+ /* Set charge termination voltage */
+ ret = regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_TERMINATION_VOL_MASK |
+ SC2731_TERMINATION_VOL_CAL_MASK,
+ (vol_val << SC2731_TERMINATION_VOL_SHIFT) |
+ (0x6 << SC2731_TERMINATION_VOL_CAL_SHIFT));
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ regmap_update_bits(info->regmap, SC2731_MODULE_EN1, SC2731_CHARGE_EN, 0);
+ return ret;
+}
+
+static int sc2731_charger_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sc2731_charger_info *info;
+ struct power_supply_config charger_cfg = { };
+ int ret;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ mutex_init(&info->lock);
+ info->dev = &pdev->dev;
+
+ info->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!info->regmap) {
+ dev_err(&pdev->dev, "failed to get charger regmap\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "reg", &info->base);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get register address\n");
+ return -ENODEV;
+ }
+
+ charger_cfg.drv_data = info;
+ charger_cfg.of_node = np;
+ info->psy_usb = devm_power_supply_register(&pdev->dev,
+ &sc2731_charger_desc,
+ &charger_cfg);
+ if (IS_ERR(info->psy_usb)) {
+ dev_err(&pdev->dev, "failed to register power supply\n");
+ return PTR_ERR(info->psy_usb);
+ }
+
+ ret = sc2731_charger_hw_init(info);
+ if (ret)
+ return ret;
+
+ info->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "phys", 0);
+ if (IS_ERR(info->usb_phy)) {
+ dev_err(&pdev->dev, "failed to find USB phy\n");
+ return PTR_ERR(info->usb_phy);
+ }
+
+ info->usb_notify.notifier_call = sc2731_charger_usb_change;
+ ret = usb_register_notifier(info->usb_phy, &info->usb_notify);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register notifier: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sc2731_charger_remove(struct platform_device *pdev)
+{
+ struct sc2731_charger_info *info = platform_get_drvdata(pdev);
+
+ usb_unregister_notifier(info->usb_phy, &info->usb_notify);
+
+ return 0;
+}
+
+static const struct of_device_id sc2731_charger_of_match[] = {
+ { .compatible = "sprd,sc2731-charger", },
+ { }
+};
+
+static struct platform_driver sc2731_charger_driver = {
+ .driver = {
+ .name = "sc2731-charger",
+ .of_match_table = sc2731_charger_of_match,
+ },
+ .probe = sc2731_charger_probe,
+ .remove = sc2731_charger_remove,
+};
+
+module_platform_driver(sc2731_charger_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SC2731 Charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index bbcaee56db9d..80582c8f98fa 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -420,7 +420,8 @@ static void twl4030_current_worker(struct work_struct *data)
if (v < USB_MIN_VOLT) {
/* Back up and stop adjusting. */
- bci->usb_cur -= USB_CUR_STEP;
+ if (bci->usb_cur >= USB_CUR_STEP)
+ bci->usb_cur -= USB_CUR_STEP;
bci->usb_cur_target = bci->usb_cur;
} else if (bci->usb_cur >= bci->usb_cur_target ||
bci->usb_cur + USB_CUR_STEP > USB_MAX_CURRENT) {
@@ -439,6 +440,7 @@ static void twl4030_current_worker(struct work_struct *data)
static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
{
int ret;
+ u32 reg;
if (bci->usb_mode == CHARGE_OFF)
enable = false;
@@ -452,14 +454,38 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
bci->usb_enabled = 1;
}
- if (bci->usb_mode == CHARGE_AUTO)
+ if (bci->usb_mode == CHARGE_AUTO) {
+ /* Enable interrupts now. */
+ reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC |
+ TWL4030_TBATOR2 | TWL4030_TBATOR1 |
+ TWL4030_BATSTS);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
+ TWL4030_INTERRUPTS_BCIIMR1A);
+ if (ret < 0) {
+ dev_err(bci->dev,
+ "failed to unmask interrupts: %d\n",
+ ret);
+ return ret;
+ }
/* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */
ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB);
+ }
/* forcing USBFASTMCHG(BCIMFSTS4[2]) to 1 */
ret = twl4030_clear_set(TWL_MODULE_MAIN_CHARGE, 0,
TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4);
if (bci->usb_mode == CHARGE_LINEAR) {
+ /* Enable interrupts now. */
+ reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_TBATOR2 |
+ TWL4030_TBATOR1 | TWL4030_BATSTS);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
+ TWL4030_INTERRUPTS_BCIIMR1A);
+ if (ret < 0) {
+ dev_err(bci->dev,
+ "failed to unmask interrupts: %d\n",
+ ret);
+ return ret;
+ }
twl4030_clear_set_boot_bci(TWL4030_BCIAUTOAC|TWL4030_CVENAC, 0);
/* Watch dog key: WOVF acknowledge */
ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x33,
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 295d8dcba48c..6cdb2c14eee4 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1133,47 +1133,40 @@ static const struct rapl_defaults rapl_defaults_cht = {
.compute_time_window = rapl_compute_time_window_atom,
};
-#define RAPL_CPU(_model, _ops) { \
- .vendor = X86_VENDOR_INTEL, \
- .family = 6, \
- .model = _model, \
- .driver_data = (kernel_ulong_t)&_ops, \
- }
-
static const struct x86_cpu_id rapl_ids[] __initconst = {
- RAPL_CPU(INTEL_FAM6_SANDYBRIDGE, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_SANDYBRIDGE_X, rapl_defaults_core),
-
- RAPL_CPU(INTEL_FAM6_IVYBRIDGE, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_IVYBRIDGE_X, rapl_defaults_core),
-
- RAPL_CPU(INTEL_FAM6_HASWELL_CORE, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_HASWELL_ULT, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_HASWELL_GT3E, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_HASWELL_X, rapl_defaults_hsw_server),
-
- RAPL_CPU(INTEL_FAM6_BROADWELL_CORE, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_BROADWELL_GT3E, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_BROADWELL_XEON_D, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_BROADWELL_X, rapl_defaults_hsw_server),
-
- RAPL_CPU(INTEL_FAM6_SKYLAKE_DESKTOP, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_SKYLAKE_MOBILE, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_SKYLAKE_X, rapl_defaults_hsw_server),
- RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_CANNONLAKE_MOBILE, rapl_defaults_core),
-
- RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1, rapl_defaults_byt),
- RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT, rapl_defaults_cht),
- RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD, rapl_defaults_tng),
- RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD, rapl_defaults_ann),
- RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core),
-
- RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server),
- RAPL_CPU(INTEL_FAM6_XEON_PHI_KNM, rapl_defaults_hsw_server),
+ INTEL_CPU_FAM6(SANDYBRIDGE, rapl_defaults_core),
+ INTEL_CPU_FAM6(SANDYBRIDGE_X, rapl_defaults_core),
+
+ INTEL_CPU_FAM6(IVYBRIDGE, rapl_defaults_core),
+ INTEL_CPU_FAM6(IVYBRIDGE_X, rapl_defaults_core),
+
+ INTEL_CPU_FAM6(HASWELL_CORE, rapl_defaults_core),
+ INTEL_CPU_FAM6(HASWELL_ULT, rapl_defaults_core),
+ INTEL_CPU_FAM6(HASWELL_GT3E, rapl_defaults_core),
+ INTEL_CPU_FAM6(HASWELL_X, rapl_defaults_hsw_server),
+
+ INTEL_CPU_FAM6(BROADWELL_CORE, rapl_defaults_core),
+ INTEL_CPU_FAM6(BROADWELL_GT3E, rapl_defaults_core),
+ INTEL_CPU_FAM6(BROADWELL_XEON_D, rapl_defaults_core),
+ INTEL_CPU_FAM6(BROADWELL_X, rapl_defaults_hsw_server),
+
+ INTEL_CPU_FAM6(SKYLAKE_DESKTOP, rapl_defaults_core),
+ INTEL_CPU_FAM6(SKYLAKE_MOBILE, rapl_defaults_core),
+ INTEL_CPU_FAM6(SKYLAKE_X, rapl_defaults_hsw_server),
+ INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core),
+ INTEL_CPU_FAM6(KABYLAKE_DESKTOP, rapl_defaults_core),
+ INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core),
+
+ INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
+ INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
+ INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, rapl_defaults_tng),
+ INTEL_CPU_FAM6(ATOM_AIRMONT_MID, rapl_defaults_ann),
+ INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core),
+ INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core),
+ INTEL_CPU_FAM6(ATOM_GOLDMONT_X, rapl_defaults_core),
+
+ INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server),
+ INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server),
{}
};
MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 01b0e2bb3319..2012551d93e0 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -24,6 +24,8 @@
#include <linux/slab.h>
#include <linux/timekeeping.h>
+#include <linux/nospec.h>
+
#include "ptp_private.h"
static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
@@ -248,6 +250,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EINVAL;
break;
}
+ pin_index = array_index_nospec(pin_index, ops->n_pins);
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
pd = ops->pin_config[pin_index];
@@ -266,6 +269,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EINVAL;
break;
}
+ pin_index = array_index_nospec(pin_index, ops->n_pins);
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c
index a7dc43368df4..5b6393e3ea27 100644
--- a/drivers/ptp/ptp_dte.c
+++ b/drivers/ptp/ptp_dte.c
@@ -288,8 +288,7 @@ static int ptp_dte_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int ptp_dte_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ptp_dte *ptp_dte = platform_get_drvdata(pdev);
+ struct ptp_dte *ptp_dte = dev_get_drvdata(dev);
u8 i;
for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++) {
@@ -305,8 +304,7 @@ static int ptp_dte_suspend(struct device *dev)
static int ptp_dte_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ptp_dte *ptp_dte = platform_get_drvdata(pdev);
+ struct ptp_dte *ptp_dte = dev_get_drvdata(dev);
u8 i;
for (i = 0; i < DTE_NUM_REGS_TO_RESTORE; i++) {
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 329cdd33ed62..926cee0d0b5f 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -189,7 +189,7 @@ config REGULATOR_BD718XX
and LDO regulators.
This driver can also be built as a module. If so, the module
- will be called bd71837-regulator.
+ will be called bd718x7-regulator.
config REGULATOR_BD9571MWV
tristate "ROHM BD9571MWV Regulators"
@@ -356,6 +356,13 @@ config REGULATOR_LM363X
One boost output voltage is configurable and always on.
Other LDOs are used for the display module.
+config REGULATOR_LOCHNAGAR
+ tristate "Cirrus Logic Lochnagar regulator driver"
+ depends on MFD_LOCHNAGAR
+ help
+ This enables regulator support on the Cirrus Logic Lochnagar audio
+ development board.
+
config REGULATOR_LP3971
tristate "National Semiconductors LP3971 PMIC regulator driver"
depends on I2C
@@ -803,6 +810,18 @@ config REGULATOR_STM32_VREFBUF
This driver can also be built as a module. If so, the module
will be called stm32-vrefbuf.
+config REGULATOR_STPMIC1
+ tristate "STMicroelectronics STPMIC1 PMIC Regulators"
+ depends on MFD_STPMIC1
+ help
+ This driver supports STMicroelectronics STPMIC1 PMIC voltage
+ regulators and switches. The STPMIC1 regulators supply power to
+ an application processor as well as to external system
+ peripherals such as DDR, Flash memories and system devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stpmic1_regulator.
+
config REGULATOR_TI_ABB
tristate "TI Adaptive Body Bias on-chip LDO"
depends on ARCH_OMAP
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 801d9a34a203..72488ef11b8a 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
-obj-$(CONFIG_REGULATOR_BD718XX) += bd71837-regulator.o
+obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
@@ -46,6 +46,7 @@ obj-$(CONFIG_REGULATOR_HI655X) += hi655x-regulator.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_ISL9305) += isl9305.o
obj-$(CONFIG_REGULATOR_LM363X) += lm363x-regulator.o
+obj-$(CONFIG_REGULATOR_LOCHNAGAR) += lochnagar-regulator.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
@@ -101,6 +102,7 @@ obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o
obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
+obj-$(CONFIG_REGULATOR_STPMIC1) += stpmic1_regulator.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
obj-$(CONFIG_REGULATOR_SY8106A) += sy8106a-regulator.o
obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index e976d073f28d..9a72eae4926d 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -260,7 +260,7 @@ static int arizona_ldo1_common_init(struct platform_device *pdev,
* so clean up would happen at the wrong time
*/
config.ena_gpiod = gpiod_get_optional(parent_dev, "wlf,ldoena",
- GPIOD_OUT_LOW);
+ GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(config.ena_gpiod))
return PTR_ERR(config.ena_gpiod);
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 91b8ff8bac15..a3734039a86a 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -509,10 +509,10 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
/*
* AXP803/AXP813 DCDC work frequency setting has the same
* range and step as AXP22X, but at a different register.
- * Fall through to the check below.
* (See include/linux/mfd/axp20x.h)
*/
reg = AXP803_DCDC_FREQ_CTRL;
+ /* Fall through to the check below.*/
case AXP806_ID:
/*
* AXP806 also have DCDC work frequency setting register at a
@@ -520,6 +520,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
*/
if (axp20x->variant == AXP806_ID)
reg = AXP806_DCDC_FREQ_CTRL;
+ /* Fall through */
case AXP221_ID:
case AXP223_ID:
case AXP809_ID:
diff --git a/drivers/regulator/bd71837-regulator.c b/drivers/regulator/bd71837-regulator.c
deleted file mode 100644
index 0f8ac8dec3e1..000000000000
--- a/drivers/regulator/bd71837-regulator.c
+++ /dev/null
@@ -1,626 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 ROHM Semiconductors
-// bd71837-regulator.c ROHM BD71837MWV regulator driver
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mfd/rohm-bd718x7.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/of_regulator.h>
-#include <linux/slab.h>
-
-struct bd71837_pmic {
- struct regulator_desc descs[BD71837_REGULATOR_CNT];
- struct bd71837 *mfd;
- struct platform_device *pdev;
- struct regulator_dev *rdev[BD71837_REGULATOR_CNT];
-};
-
-/*
- * BUCK1/2/3/4
- * BUCK1RAMPRATE[1:0] BUCK1 DVS ramp rate setting
- * 00: 10.00mV/usec 10mV 1uS
- * 01: 5.00mV/usec 10mV 2uS
- * 10: 2.50mV/usec 10mV 4uS
- * 11: 1.25mV/usec 10mV 8uS
- */
-static int bd71837_buck1234_set_ramp_delay(struct regulator_dev *rdev,
- int ramp_delay)
-{
- struct bd71837_pmic *pmic = rdev_get_drvdata(rdev);
- struct bd71837 *mfd = pmic->mfd;
- int id = rdev->desc->id;
- unsigned int ramp_value = BUCK_RAMPRATE_10P00MV;
-
- dev_dbg(&pmic->pdev->dev, "Buck[%d] Set Ramp = %d\n", id + 1,
- ramp_delay);
- switch (ramp_delay) {
- case 1 ... 1250:
- ramp_value = BUCK_RAMPRATE_1P25MV;
- break;
- case 1251 ... 2500:
- ramp_value = BUCK_RAMPRATE_2P50MV;
- break;
- case 2501 ... 5000:
- ramp_value = BUCK_RAMPRATE_5P00MV;
- break;
- case 5001 ... 10000:
- ramp_value = BUCK_RAMPRATE_10P00MV;
- break;
- default:
- ramp_value = BUCK_RAMPRATE_10P00MV;
- dev_err(&pmic->pdev->dev,
- "%s: ramp_delay: %d not supported, setting 10000mV//us\n",
- rdev->desc->name, ramp_delay);
- }
-
- return regmap_update_bits(mfd->regmap, BD71837_REG_BUCK1_CTRL + id,
- BUCK_RAMPRATE_MASK, ramp_value << 6);
-}
-
-/* Bucks 1 to 4 support DVS. PWM mode is used when voltage is changed.
- * Bucks 5 to 8 and LDOs can use PFM and must be disabled when voltage
- * is changed. Hence we return -EBUSY for these if voltage is changed
- * when BUCK/LDO is enabled.
- */
-static int bd71837_set_voltage_sel_restricted(struct regulator_dev *rdev,
- unsigned int sel)
-{
- if (regulator_is_enabled_regmap(rdev))
- return -EBUSY;
-
- return regulator_set_voltage_sel_regmap(rdev, sel);
-}
-
-static struct regulator_ops bd71837_ldo_regulator_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .set_voltage_sel = bd71837_set_voltage_sel_restricted,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
-};
-
-static struct regulator_ops bd71837_ldo_regulator_nolinear_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = bd71837_set_voltage_sel_restricted,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
-};
-
-static struct regulator_ops bd71837_buck_regulator_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .set_voltage_sel = bd71837_set_voltage_sel_restricted,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
-};
-
-static struct regulator_ops bd71837_buck_regulator_nolinear_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = bd71837_set_voltage_sel_restricted,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
-};
-
-static struct regulator_ops bd71837_buck1234_regulator_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
- .set_ramp_delay = bd71837_buck1234_set_ramp_delay,
-};
-
-/*
- * BUCK1/2/3/4
- * 0.70 to 1.30V (10mV step)
- */
-static const struct regulator_linear_range bd71837_buck1234_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(700000, 0x00, 0x3C, 10000),
- REGULATOR_LINEAR_RANGE(1300000, 0x3D, 0x3F, 0),
-};
-
-/*
- * BUCK5
- * 0.9V to 1.35V ()
- */
-static const struct regulator_linear_range bd71837_buck5_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(700000, 0x00, 0x03, 100000),
- REGULATOR_LINEAR_RANGE(1050000, 0x04, 0x05, 50000),
- REGULATOR_LINEAR_RANGE(1200000, 0x06, 0x07, 150000),
-};
-
-/*
- * BUCK6
- * 3.0V to 3.3V (step 100mV)
- */
-static const struct regulator_linear_range bd71837_buck6_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(3000000, 0x00, 0x03, 100000),
-};
-
-/*
- * BUCK7
- * 000 = 1.605V
- * 001 = 1.695V
- * 010 = 1.755V
- * 011 = 1.8V (Initial)
- * 100 = 1.845V
- * 101 = 1.905V
- * 110 = 1.95V
- * 111 = 1.995V
- */
-static const unsigned int buck_7_volts[] = {
- 1605000, 1695000, 1755000, 1800000, 1845000, 1905000, 1950000, 1995000
-};
-
-/*
- * BUCK8
- * 0.8V to 1.40V (step 10mV)
- */
-static const struct regulator_linear_range bd71837_buck8_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(800000, 0x00, 0x3C, 10000),
- REGULATOR_LINEAR_RANGE(1400000, 0x3D, 0x3F, 0),
-};
-
-/*
- * LDO1
- * 3.0 to 3.3V (100mV step)
- */
-static const struct regulator_linear_range bd71837_ldo1_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(3000000, 0x00, 0x03, 100000),
-};
-
-/*
- * LDO2
- * 0.8 or 0.9V
- */
-static const unsigned int ldo_2_volts[] = {
- 900000, 800000
-};
-
-/*
- * LDO3
- * 1.8 to 3.3V (100mV step)
- */
-static const struct regulator_linear_range bd71837_ldo3_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
-};
-
-/*
- * LDO4
- * 0.9 to 1.8V (100mV step)
- */
-static const struct regulator_linear_range bd71837_ldo4_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(900000, 0x00, 0x09, 100000),
- REGULATOR_LINEAR_RANGE(1800000, 0x0A, 0x0F, 0),
-};
-
-/*
- * LDO5
- * 1.8 to 3.3V (100mV step)
- */
-static const struct regulator_linear_range bd71837_ldo5_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
-};
-
-/*
- * LDO6
- * 0.9 to 1.8V (100mV step)
- */
-static const struct regulator_linear_range bd71837_ldo6_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(900000, 0x00, 0x09, 100000),
- REGULATOR_LINEAR_RANGE(1800000, 0x0A, 0x0F, 0),
-};
-
-/*
- * LDO7
- * 1.8 to 3.3V (100mV step)
- */
-static const struct regulator_linear_range bd71837_ldo7_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
-};
-
-static const struct regulator_desc bd71837_regulators[] = {
- {
- .name = "buck1",
- .of_match = of_match_ptr("BUCK1"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_BUCK1,
- .ops = &bd71837_buck1234_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_BUCK1_VOLTAGE_NUM,
- .linear_ranges = bd71837_buck1234_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_buck1234_voltage_ranges),
- .vsel_reg = BD71837_REG_BUCK1_VOLT_RUN,
- .vsel_mask = BUCK1_RUN_MASK,
- .enable_reg = BD71837_REG_BUCK1_CTRL,
- .enable_mask = BD71837_BUCK_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "buck2",
- .of_match = of_match_ptr("BUCK2"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_BUCK2,
- .ops = &bd71837_buck1234_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_BUCK2_VOLTAGE_NUM,
- .linear_ranges = bd71837_buck1234_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_buck1234_voltage_ranges),
- .vsel_reg = BD71837_REG_BUCK2_VOLT_RUN,
- .vsel_mask = BUCK2_RUN_MASK,
- .enable_reg = BD71837_REG_BUCK2_CTRL,
- .enable_mask = BD71837_BUCK_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "buck3",
- .of_match = of_match_ptr("BUCK3"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_BUCK3,
- .ops = &bd71837_buck1234_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_BUCK3_VOLTAGE_NUM,
- .linear_ranges = bd71837_buck1234_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_buck1234_voltage_ranges),
- .vsel_reg = BD71837_REG_BUCK3_VOLT_RUN,
- .vsel_mask = BUCK3_RUN_MASK,
- .enable_reg = BD71837_REG_BUCK3_CTRL,
- .enable_mask = BD71837_BUCK_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "buck4",
- .of_match = of_match_ptr("BUCK4"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_BUCK4,
- .ops = &bd71837_buck1234_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_BUCK4_VOLTAGE_NUM,
- .linear_ranges = bd71837_buck1234_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_buck1234_voltage_ranges),
- .vsel_reg = BD71837_REG_BUCK4_VOLT_RUN,
- .vsel_mask = BUCK4_RUN_MASK,
- .enable_reg = BD71837_REG_BUCK4_CTRL,
- .enable_mask = BD71837_BUCK_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "buck5",
- .of_match = of_match_ptr("BUCK5"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_BUCK5,
- .ops = &bd71837_buck_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_BUCK5_VOLTAGE_NUM,
- .linear_ranges = bd71837_buck5_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_buck5_voltage_ranges),
- .vsel_reg = BD71837_REG_BUCK5_VOLT,
- .vsel_mask = BUCK5_MASK,
- .enable_reg = BD71837_REG_BUCK5_CTRL,
- .enable_mask = BD71837_BUCK_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "buck6",
- .of_match = of_match_ptr("BUCK6"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_BUCK6,
- .ops = &bd71837_buck_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_BUCK6_VOLTAGE_NUM,
- .linear_ranges = bd71837_buck6_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_buck6_voltage_ranges),
- .vsel_reg = BD71837_REG_BUCK6_VOLT,
- .vsel_mask = BUCK6_MASK,
- .enable_reg = BD71837_REG_BUCK6_CTRL,
- .enable_mask = BD71837_BUCK_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "buck7",
- .of_match = of_match_ptr("BUCK7"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_BUCK7,
- .ops = &bd71837_buck_regulator_nolinear_ops,
- .type = REGULATOR_VOLTAGE,
- .volt_table = &buck_7_volts[0],
- .n_voltages = ARRAY_SIZE(buck_7_volts),
- .vsel_reg = BD71837_REG_BUCK7_VOLT,
- .vsel_mask = BUCK7_MASK,
- .enable_reg = BD71837_REG_BUCK7_CTRL,
- .enable_mask = BD71837_BUCK_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "buck8",
- .of_match = of_match_ptr("BUCK8"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_BUCK8,
- .ops = &bd71837_buck_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_BUCK8_VOLTAGE_NUM,
- .linear_ranges = bd71837_buck8_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_buck8_voltage_ranges),
- .vsel_reg = BD71837_REG_BUCK8_VOLT,
- .vsel_mask = BUCK8_MASK,
- .enable_reg = BD71837_REG_BUCK8_CTRL,
- .enable_mask = BD71837_BUCK_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo1",
- .of_match = of_match_ptr("LDO1"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_LDO1,
- .ops = &bd71837_ldo_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_LDO1_VOLTAGE_NUM,
- .linear_ranges = bd71837_ldo1_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_ldo1_voltage_ranges),
- .vsel_reg = BD71837_REG_LDO1_VOLT,
- .vsel_mask = LDO1_MASK,
- .enable_reg = BD71837_REG_LDO1_VOLT,
- .enable_mask = BD71837_LDO_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo2",
- .of_match = of_match_ptr("LDO2"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_LDO2,
- .ops = &bd71837_ldo_regulator_nolinear_ops,
- .type = REGULATOR_VOLTAGE,
- .volt_table = &ldo_2_volts[0],
- .vsel_reg = BD71837_REG_LDO2_VOLT,
- .vsel_mask = LDO2_MASK,
- .n_voltages = ARRAY_SIZE(ldo_2_volts),
- .n_voltages = BD71837_LDO2_VOLTAGE_NUM,
- .enable_reg = BD71837_REG_LDO2_VOLT,
- .enable_mask = BD71837_LDO_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo3",
- .of_match = of_match_ptr("LDO3"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_LDO3,
- .ops = &bd71837_ldo_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_LDO3_VOLTAGE_NUM,
- .linear_ranges = bd71837_ldo3_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_ldo3_voltage_ranges),
- .vsel_reg = BD71837_REG_LDO3_VOLT,
- .vsel_mask = LDO3_MASK,
- .enable_reg = BD71837_REG_LDO3_VOLT,
- .enable_mask = BD71837_LDO_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo4",
- .of_match = of_match_ptr("LDO4"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_LDO4,
- .ops = &bd71837_ldo_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_LDO4_VOLTAGE_NUM,
- .linear_ranges = bd71837_ldo4_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_ldo4_voltage_ranges),
- .vsel_reg = BD71837_REG_LDO4_VOLT,
- .vsel_mask = LDO4_MASK,
- .enable_reg = BD71837_REG_LDO4_VOLT,
- .enable_mask = BD71837_LDO_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo5",
- .of_match = of_match_ptr("LDO5"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_LDO5,
- .ops = &bd71837_ldo_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_LDO5_VOLTAGE_NUM,
- .linear_ranges = bd71837_ldo5_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_ldo5_voltage_ranges),
- /* LDO5 is supplied by buck6 */
- .supply_name = "buck6",
- .vsel_reg = BD71837_REG_LDO5_VOLT,
- .vsel_mask = LDO5_MASK,
- .enable_reg = BD71837_REG_LDO5_VOLT,
- .enable_mask = BD71837_LDO_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo6",
- .of_match = of_match_ptr("LDO6"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_LDO6,
- .ops = &bd71837_ldo_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_LDO6_VOLTAGE_NUM,
- .linear_ranges = bd71837_ldo6_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_ldo6_voltage_ranges),
- /* LDO6 is supplied by buck7 */
- .supply_name = "buck7",
- .vsel_reg = BD71837_REG_LDO6_VOLT,
- .vsel_mask = LDO6_MASK,
- .enable_reg = BD71837_REG_LDO6_VOLT,
- .enable_mask = BD71837_LDO_EN,
- .owner = THIS_MODULE,
- },
- {
- .name = "ldo7",
- .of_match = of_match_ptr("LDO7"),
- .regulators_node = of_match_ptr("regulators"),
- .id = BD71837_LDO7,
- .ops = &bd71837_ldo_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = BD71837_LDO7_VOLTAGE_NUM,
- .linear_ranges = bd71837_ldo7_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(bd71837_ldo7_voltage_ranges),
- .vsel_reg = BD71837_REG_LDO7_VOLT,
- .vsel_mask = LDO7_MASK,
- .enable_reg = BD71837_REG_LDO7_VOLT,
- .enable_mask = BD71837_LDO_EN,
- .owner = THIS_MODULE,
- },
-};
-
-struct reg_init {
- unsigned int reg;
- unsigned int mask;
-};
-
-static int bd71837_probe(struct platform_device *pdev)
-{
- struct bd71837_pmic *pmic;
- struct regulator_config config = { 0 };
- struct reg_init pmic_regulator_inits[] = {
- {
- .reg = BD71837_REG_BUCK1_CTRL,
- .mask = BD71837_BUCK_SEL,
- }, {
- .reg = BD71837_REG_BUCK2_CTRL,
- .mask = BD71837_BUCK_SEL,
- }, {
- .reg = BD71837_REG_BUCK3_CTRL,
- .mask = BD71837_BUCK_SEL,
- }, {
- .reg = BD71837_REG_BUCK4_CTRL,
- .mask = BD71837_BUCK_SEL,
- }, {
- .reg = BD71837_REG_BUCK5_CTRL,
- .mask = BD71837_BUCK_SEL,
- }, {
- .reg = BD71837_REG_BUCK6_CTRL,
- .mask = BD71837_BUCK_SEL,
- }, {
- .reg = BD71837_REG_BUCK7_CTRL,
- .mask = BD71837_BUCK_SEL,
- }, {
- .reg = BD71837_REG_BUCK8_CTRL,
- .mask = BD71837_BUCK_SEL,
- }, {
- .reg = BD71837_REG_LDO1_VOLT,
- .mask = BD71837_LDO_SEL,
- }, {
- .reg = BD71837_REG_LDO2_VOLT,
- .mask = BD71837_LDO_SEL,
- }, {
- .reg = BD71837_REG_LDO3_VOLT,
- .mask = BD71837_LDO_SEL,
- }, {
- .reg = BD71837_REG_LDO4_VOLT,
- .mask = BD71837_LDO_SEL,
- }, {
- .reg = BD71837_REG_LDO5_VOLT,
- .mask = BD71837_LDO_SEL,
- }, {
- .reg = BD71837_REG_LDO6_VOLT,
- .mask = BD71837_LDO_SEL,
- }, {
- .reg = BD71837_REG_LDO7_VOLT,
- .mask = BD71837_LDO_SEL,
- }
- };
-
- int i, err;
-
- pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
- if (!pmic)
- return -ENOMEM;
-
- memcpy(pmic->descs, bd71837_regulators, sizeof(pmic->descs));
-
- pmic->pdev = pdev;
- pmic->mfd = dev_get_drvdata(pdev->dev.parent);
-
- if (!pmic->mfd) {
- dev_err(&pdev->dev, "No MFD driver data\n");
- err = -EINVAL;
- goto err;
- }
- platform_set_drvdata(pdev, pmic);
-
- /* Register LOCK release */
- err = regmap_update_bits(pmic->mfd->regmap, BD71837_REG_REGLOCK,
- (REGLOCK_PWRSEQ | REGLOCK_VREG), 0);
- if (err) {
- dev_err(&pmic->pdev->dev, "Failed to unlock PMIC (%d)\n", err);
- goto err;
- } else {
- dev_dbg(&pmic->pdev->dev, "Unlocked lock register 0x%x\n",
- BD71837_REG_REGLOCK);
- }
-
- for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) {
-
- struct regulator_desc *desc;
- struct regulator_dev *rdev;
-
- desc = &pmic->descs[i];
-
- config.dev = pdev->dev.parent;
- config.driver_data = pmic;
- config.regmap = pmic->mfd->regmap;
-
- rdev = devm_regulator_register(&pdev->dev, desc, &config);
- if (IS_ERR(rdev)) {
- dev_err(pmic->mfd->dev,
- "failed to register %s regulator\n",
- desc->name);
- err = PTR_ERR(rdev);
- goto err;
- }
- /* Regulator register gets the regulator constraints and
- * applies them (set_machine_constraints). This should have
- * turned the control register(s) to correct values and we
- * can now switch the control from PMIC state machine to the
- * register interface
- */
- err = regmap_update_bits(pmic->mfd->regmap,
- pmic_regulator_inits[i].reg,
- pmic_regulator_inits[i].mask,
- 0xFFFFFFFF);
- if (err) {
- dev_err(&pmic->pdev->dev,
- "Failed to write BUCK/LDO SEL bit for (%s)\n",
- desc->name);
- goto err;
- }
-
- pmic->rdev[i] = rdev;
- }
-
-err:
- return err;
-}
-
-static struct platform_driver bd71837_regulator = {
- .driver = {
- .name = "bd71837-pmic",
- },
- .probe = bd71837_probe,
-};
-
-module_platform_driver(bd71837_regulator);
-
-MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
-MODULE_DESCRIPTION("BD71837 voltage regulator driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
new file mode 100644
index 000000000000..3a47e0372e77
--- /dev/null
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -0,0 +1,1119 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 ROHM Semiconductors
+// bd71837-regulator.c ROHM BD71837MWV/BD71847MWV regulator driver
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd718x7.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+/*
+ * BUCK1/2/3/4
+ * BUCK1RAMPRATE[1:0] BUCK1 DVS ramp rate setting
+ * 00: 10.00mV/usec 10mV 1uS
+ * 01: 5.00mV/usec 10mV 2uS
+ * 10: 2.50mV/usec 10mV 4uS
+ * 11: 1.25mV/usec 10mV 8uS
+ */
+static int bd718xx_buck1234_set_ramp_delay(struct regulator_dev *rdev,
+ int ramp_delay)
+{
+ int id = rdev->desc->id;
+ unsigned int ramp_value = BUCK_RAMPRATE_10P00MV;
+
+ dev_dbg(&rdev->dev, "Buck[%d] Set Ramp = %d\n", id + 1,
+ ramp_delay);
+ switch (ramp_delay) {
+ case 1 ... 1250:
+ ramp_value = BUCK_RAMPRATE_1P25MV;
+ break;
+ case 1251 ... 2500:
+ ramp_value = BUCK_RAMPRATE_2P50MV;
+ break;
+ case 2501 ... 5000:
+ ramp_value = BUCK_RAMPRATE_5P00MV;
+ break;
+ case 5001 ... 10000:
+ ramp_value = BUCK_RAMPRATE_10P00MV;
+ break;
+ default:
+ ramp_value = BUCK_RAMPRATE_10P00MV;
+ dev_err(&rdev->dev,
+ "%s: ramp_delay: %d not supported, setting 10000mV//us\n",
+ rdev->desc->name, ramp_delay);
+ }
+
+ return regmap_update_bits(rdev->regmap, BD718XX_REG_BUCK1_CTRL + id,
+ BUCK_RAMPRATE_MASK, ramp_value << 6);
+}
+
+/* Bucks 1 to 4 support DVS. PWM mode is used when voltage is changed.
+ * Bucks 5 to 8 and LDOs can use PFM and must be disabled when voltage
+ * is changed. Hence we return -EBUSY for these if voltage is changed
+ * when BUCK/LDO is enabled.
+ */
+static int bd718xx_set_voltage_sel_restricted(struct regulator_dev *rdev,
+ unsigned int sel)
+{
+ if (regulator_is_enabled_regmap(rdev))
+ return -EBUSY;
+
+ return regulator_set_voltage_sel_regmap(rdev, sel);
+}
+
+static int bd718xx_set_voltage_sel_pickable_restricted(
+ struct regulator_dev *rdev, unsigned int sel)
+{
+ if (regulator_is_enabled_regmap(rdev))
+ return -EBUSY;
+
+ return regulator_set_voltage_sel_pickable_regmap(rdev, sel);
+}
+
+static struct regulator_ops bd718xx_pickable_range_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_pickable_linear_range,
+ .set_voltage_sel = bd718xx_set_voltage_sel_pickable_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
+};
+
+static struct regulator_ops bd718xx_pickable_range_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_pickable_linear_range,
+ .set_voltage_sel = bd718xx_set_voltage_sel_pickable_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops bd718xx_ldo_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = bd718xx_set_voltage_sel_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = bd718xx_set_voltage_sel_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static struct regulator_ops bd718xx_buck_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = bd718xx_set_voltage_sel_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops bd718xx_buck_regulator_nolinear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = bd718xx_set_voltage_sel_restricted,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops bd718xx_dvs_buck_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = bd718xx_buck1234_set_ramp_delay,
+};
+
+/*
+ * BD71837 BUCK1/2/3/4
+ * BD71847 BUCK1/2
+ * 0.70 to 1.30V (10mV step)
+ */
+static const struct regulator_linear_range bd718xx_dvs_buck_volts[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0x00, 0x3C, 10000),
+ REGULATOR_LINEAR_RANGE(1300000, 0x3D, 0x3F, 0),
+};
+
+/*
+ * BD71837 BUCK5
+ * 0.7V to 1.35V (range 0)
+ * and
+ * 0.675 to 1.325 (range 1)
+ */
+static const struct regulator_linear_range bd71837_buck5_volts[] = {
+ /* Ranges when VOLT_SEL bit is 0 */
+ REGULATOR_LINEAR_RANGE(700000, 0x00, 0x03, 100000),
+ REGULATOR_LINEAR_RANGE(1050000, 0x04, 0x05, 50000),
+ REGULATOR_LINEAR_RANGE(1200000, 0x06, 0x07, 150000),
+ /* Ranges when VOLT_SEL bit is 1 */
+ REGULATOR_LINEAR_RANGE(675000, 0x0, 0x3, 100000),
+ REGULATOR_LINEAR_RANGE(1025000, 0x4, 0x5, 50000),
+ REGULATOR_LINEAR_RANGE(1175000, 0x6, 0x7, 150000),
+};
+
+/*
+ * Range selector for first 3 linear ranges is 0x0
+ * and 0x1 for last 3 ranges.
+ */
+static const unsigned int bd71837_buck5_volt_range_sel[] = {
+ 0x0, 0x0, 0x0, 0x80, 0x80, 0x80
+};
+
+/*
+ * BD71847 BUCK3
+ */
+static const struct regulator_linear_range bd71847_buck3_volts[] = {
+ /* Ranges when VOLT_SEL bits are 00 */
+ REGULATOR_LINEAR_RANGE(700000, 0x00, 0x03, 100000),
+ REGULATOR_LINEAR_RANGE(1050000, 0x04, 0x05, 50000),
+ REGULATOR_LINEAR_RANGE(1200000, 0x06, 0x07, 150000),
+ /* Ranges when VOLT_SEL bits are 01 */
+ REGULATOR_LINEAR_RANGE(550000, 0x0, 0x7, 50000),
+ /* Ranges when VOLT_SEL bits are 11 */
+ REGULATOR_LINEAR_RANGE(675000, 0x0, 0x3, 100000),
+ REGULATOR_LINEAR_RANGE(1025000, 0x4, 0x5, 50000),
+ REGULATOR_LINEAR_RANGE(1175000, 0x6, 0x7, 150000),
+};
+
+static const unsigned int bd71847_buck3_volt_range_sel[] = {
+ 0x0, 0x0, 0x0, 0x40, 0x80, 0x80, 0x80
+};
+
+static const struct regulator_linear_range bd71847_buck4_volts[] = {
+ REGULATOR_LINEAR_RANGE(3000000, 0x00, 0x03, 100000),
+ REGULATOR_LINEAR_RANGE(2600000, 0x00, 0x03, 100000),
+};
+
+static const unsigned int bd71847_buck4_volt_range_sel[] = { 0x0, 0x40 };
+
+/*
+ * BUCK6
+ * 3.0V to 3.3V (step 100mV)
+ */
+static const struct regulator_linear_range bd71837_buck6_volts[] = {
+ REGULATOR_LINEAR_RANGE(3000000, 0x00, 0x03, 100000),
+};
+
+/*
+ * BD71837 BUCK7
+ * BD71847 BUCK5
+ * 000 = 1.605V
+ * 001 = 1.695V
+ * 010 = 1.755V
+ * 011 = 1.8V (Initial)
+ * 100 = 1.845V
+ * 101 = 1.905V
+ * 110 = 1.95V
+ * 111 = 1.995V
+ */
+static const unsigned int bd718xx_3rd_nodvs_buck_volts[] = {
+ 1605000, 1695000, 1755000, 1800000, 1845000, 1905000, 1950000, 1995000
+};
+
+/*
+ * BUCK8
+ * 0.8V to 1.40V (step 10mV)
+ */
+static const struct regulator_linear_range bd718xx_4th_nodvs_buck_volts[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0x3C, 10000),
+};
+
+/*
+ * LDO1
+ * 3.0 to 3.3V (100mV step)
+ */
+static const struct regulator_linear_range bd718xx_ldo1_volts[] = {
+ REGULATOR_LINEAR_RANGE(3000000, 0x00, 0x03, 100000),
+ REGULATOR_LINEAR_RANGE(1600000, 0x00, 0x03, 100000),
+};
+
+static const unsigned int bd718xx_ldo1_volt_range_sel[] = { 0x0, 0x20 };
+
+/*
+ * LDO2
+ * 0.8 or 0.9V
+ */
+static const unsigned int ldo_2_volts[] = {
+ 900000, 800000
+};
+
+/*
+ * LDO3
+ * 1.8 to 3.3V (100mV step)
+ */
+static const struct regulator_linear_range bd718xx_ldo3_volts[] = {
+ REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
+};
+
+/*
+ * LDO4
+ * 0.9 to 1.8V (100mV step)
+ */
+static const struct regulator_linear_range bd718xx_ldo4_volts[] = {
+ REGULATOR_LINEAR_RANGE(900000, 0x00, 0x09, 100000),
+};
+
+/*
+ * LDO5 for BD71837
+ * 1.8 to 3.3V (100mV step)
+ */
+static const struct regulator_linear_range bd71837_ldo5_volts[] = {
+ REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
+};
+
+/*
+ * LDO5 for BD71837
+ * 1.8 to 3.3V (100mV step)
+ */
+static const struct regulator_linear_range bd71847_ldo5_volts[] = {
+ REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0x0F, 100000),
+};
+
+static const unsigned int bd71847_ldo5_volt_range_sel[] = { 0x0, 0x20 };
+
+/*
+ * LDO6
+ * 0.9 to 1.8V (100mV step)
+ */
+static const struct regulator_linear_range bd718xx_ldo6_volts[] = {
+ REGULATOR_LINEAR_RANGE(900000, 0x00, 0x09, 100000),
+};
+
+/*
+ * LDO7
+ * 1.8 to 3.3V (100mV step)
+ */
+static const struct regulator_linear_range bd71837_ldo7_volts[] = {
+ REGULATOR_LINEAR_RANGE(1800000, 0x00, 0x0F, 100000),
+};
+
+struct reg_init {
+ unsigned int reg;
+ unsigned int mask;
+ unsigned int val;
+};
+struct bd718xx_regulator_data {
+ struct regulator_desc desc;
+ const struct reg_init init;
+ const struct reg_init *additional_inits;
+ int additional_init_amnt;
+};
+
+/*
+ * There is a HW quirk in BD71837. The shutdown sequence timings for
+ * bucks/LDOs which are controlled via register interface are changed.
+ * At PMIC poweroff the voltage for BUCK6/7 is cut immediately at the
+ * beginning of shut-down sequence. As bucks 6 and 7 are parent
+ * supplies for LDO5 and LDO6 - this causes LDO5/6 voltage
+ * monitoring to errorneously detect under voltage and force PMIC to
+ * emergency state instead of poweroff. In order to avoid this we
+ * disable voltage monitoring for LDO5 and LDO6
+ */
+static const struct reg_init bd71837_ldo5_inits[] = {
+ {
+ .reg = BD718XX_REG_MVRFLTMASK2,
+ .mask = BD718XX_LDO5_VRMON80,
+ .val = BD718XX_LDO5_VRMON80,
+ },
+};
+
+static const struct reg_init bd71837_ldo6_inits[] = {
+ {
+ .reg = BD718XX_REG_MVRFLTMASK2,
+ .mask = BD718XX_LDO6_VRMON80,
+ .val = BD718XX_LDO6_VRMON80,
+ },
+};
+
+static const struct bd718xx_regulator_data bd71847_regulators[] = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK1,
+ .ops = &bd718xx_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_DVS_BUCK_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_dvs_buck_volts,
+ .n_linear_ranges =
+ ARRAY_SIZE(bd718xx_dvs_buck_volts),
+ .vsel_reg = BD718XX_REG_BUCK1_VOLT_RUN,
+ .vsel_mask = DVS_BUCK_RUN_MASK,
+ .enable_reg = BD718XX_REG_BUCK1_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_BUCK1_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK2,
+ .ops = &bd718xx_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_DVS_BUCK_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_dvs_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_dvs_buck_volts),
+ .vsel_reg = BD718XX_REG_BUCK2_VOLT_RUN,
+ .vsel_mask = DVS_BUCK_RUN_MASK,
+ .enable_reg = BD718XX_REG_BUCK2_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_BUCK2_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("BUCK3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK3,
+ .ops = &bd718xx_pickable_range_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71847_BUCK3_VOLTAGE_NUM,
+ .linear_ranges = bd71847_buck3_volts,
+ .n_linear_ranges =
+ ARRAY_SIZE(bd71847_buck3_volts),
+ .vsel_reg = BD718XX_REG_1ST_NODVS_BUCK_VOLT,
+ .vsel_mask = BD718XX_1ST_NODVS_BUCK_MASK,
+ .vsel_range_reg = BD718XX_REG_1ST_NODVS_BUCK_VOLT,
+ .vsel_range_mask = BD71847_BUCK3_RANGE_MASK,
+ .linear_range_selectors = bd71847_buck3_volt_range_sel,
+ .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("BUCK4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK4,
+ .ops = &bd718xx_pickable_range_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71847_BUCK4_VOLTAGE_NUM,
+ .linear_ranges = bd71847_buck4_volts,
+ .n_linear_ranges =
+ ARRAY_SIZE(bd71847_buck4_volts),
+ .enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
+ .vsel_reg = BD718XX_REG_2ND_NODVS_BUCK_VOLT,
+ .vsel_mask = BD71847_BUCK4_MASK,
+ .vsel_range_reg = BD718XX_REG_2ND_NODVS_BUCK_VOLT,
+ .vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
+ .linear_range_selectors = bd71847_buck4_volt_range_sel,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck5",
+ .of_match = of_match_ptr("BUCK5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK5,
+ .ops = &bd718xx_buck_regulator_nolinear_ops,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &bd718xx_3rd_nodvs_buck_volts[0],
+ .n_voltages = ARRAY_SIZE(bd718xx_3rd_nodvs_buck_volts),
+ .vsel_reg = BD718XX_REG_3RD_NODVS_BUCK_VOLT,
+ .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
+ .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck6",
+ .of_match = of_match_ptr("BUCK6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK6,
+ .ops = &bd718xx_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_4TH_NODVS_BUCK_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_4th_nodvs_buck_volts,
+ .n_linear_ranges =
+ ARRAY_SIZE(bd718xx_4th_nodvs_buck_volts),
+ .vsel_reg = BD718XX_REG_4TH_NODVS_BUCK_VOLT,
+ .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
+ .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO1,
+ .ops = &bd718xx_pickable_range_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_LDO1_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_ldo1_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_ldo1_volts),
+ .vsel_reg = BD718XX_REG_LDO1_VOLT,
+ .vsel_mask = BD718XX_LDO1_MASK,
+ .vsel_range_reg = BD718XX_REG_LDO1_VOLT,
+ .vsel_range_mask = BD718XX_LDO1_RANGE_MASK,
+ .linear_range_selectors = bd718xx_ldo1_volt_range_sel,
+ .enable_reg = BD718XX_REG_LDO1_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_LDO1_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO2,
+ .ops = &bd718xx_ldo_regulator_nolinear_ops,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &ldo_2_volts[0],
+ .vsel_reg = BD718XX_REG_LDO2_VOLT,
+ .vsel_mask = BD718XX_LDO2_MASK,
+ .n_voltages = ARRAY_SIZE(ldo_2_volts),
+ .enable_reg = BD718XX_REG_LDO2_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_LDO2_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO3,
+ .ops = &bd718xx_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_LDO3_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_ldo3_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_ldo3_volts),
+ .vsel_reg = BD718XX_REG_LDO3_VOLT,
+ .vsel_mask = BD718XX_LDO3_MASK,
+ .enable_reg = BD718XX_REG_LDO3_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_LDO3_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo4",
+ .of_match = of_match_ptr("LDO4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO4,
+ .ops = &bd718xx_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_LDO4_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_ldo4_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_ldo4_volts),
+ .vsel_reg = BD718XX_REG_LDO4_VOLT,
+ .vsel_mask = BD718XX_LDO4_MASK,
+ .enable_reg = BD718XX_REG_LDO4_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_LDO4_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo5",
+ .of_match = of_match_ptr("LDO5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO5,
+ .ops = &bd718xx_pickable_range_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71847_LDO5_VOLTAGE_NUM,
+ .linear_ranges = bd71847_ldo5_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71847_ldo5_volts),
+ .vsel_reg = BD718XX_REG_LDO5_VOLT,
+ .vsel_mask = BD71847_LDO5_MASK,
+ .vsel_range_reg = BD718XX_REG_LDO5_VOLT,
+ .vsel_range_mask = BD71847_LDO5_RANGE_MASK,
+ .linear_range_selectors = bd71847_ldo5_volt_range_sel,
+ .enable_reg = BD718XX_REG_LDO5_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_LDO5_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo6",
+ .of_match = of_match_ptr("LDO6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO6,
+ .ops = &bd718xx_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_LDO6_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_ldo6_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_ldo6_volts),
+ /* LDO6 is supplied by buck5 */
+ .supply_name = "buck5",
+ .vsel_reg = BD718XX_REG_LDO6_VOLT,
+ .vsel_mask = BD718XX_LDO6_MASK,
+ .enable_reg = BD718XX_REG_LDO6_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_LDO6_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ },
+};
+
+static const struct bd718xx_regulator_data bd71837_regulators[] = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK1,
+ .ops = &bd718xx_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_DVS_BUCK_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_dvs_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_dvs_buck_volts),
+ .vsel_reg = BD718XX_REG_BUCK1_VOLT_RUN,
+ .vsel_mask = DVS_BUCK_RUN_MASK,
+ .enable_reg = BD718XX_REG_BUCK1_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_BUCK1_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK2,
+ .ops = &bd718xx_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_DVS_BUCK_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_dvs_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_dvs_buck_volts),
+ .vsel_reg = BD718XX_REG_BUCK2_VOLT_RUN,
+ .vsel_mask = DVS_BUCK_RUN_MASK,
+ .enable_reg = BD718XX_REG_BUCK2_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_BUCK2_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("BUCK3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK3,
+ .ops = &bd718xx_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_DVS_BUCK_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_dvs_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_dvs_buck_volts),
+ .vsel_reg = BD71837_REG_BUCK3_VOLT_RUN,
+ .vsel_mask = DVS_BUCK_RUN_MASK,
+ .enable_reg = BD71837_REG_BUCK3_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD71837_REG_BUCK3_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("BUCK4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK4,
+ .ops = &bd718xx_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_DVS_BUCK_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_dvs_buck_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_dvs_buck_volts),
+ .vsel_reg = BD71837_REG_BUCK4_VOLT_RUN,
+ .vsel_mask = DVS_BUCK_RUN_MASK,
+ .enable_reg = BD71837_REG_BUCK4_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD71837_REG_BUCK4_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck5",
+ .of_match = of_match_ptr("BUCK5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK5,
+ .ops = &bd718xx_pickable_range_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_BUCK5_VOLTAGE_NUM,
+ .linear_ranges = bd71837_buck5_volts,
+ .n_linear_ranges =
+ ARRAY_SIZE(bd71837_buck5_volts),
+ .vsel_reg = BD718XX_REG_1ST_NODVS_BUCK_VOLT,
+ .vsel_mask = BD71837_BUCK5_MASK,
+ .vsel_range_reg = BD718XX_REG_1ST_NODVS_BUCK_VOLT,
+ .vsel_range_mask = BD71837_BUCK5_RANGE_MASK,
+ .linear_range_selectors = bd71837_buck5_volt_range_sel,
+ .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck6",
+ .of_match = of_match_ptr("BUCK6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK6,
+ .ops = &bd718xx_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_BUCK6_VOLTAGE_NUM,
+ .linear_ranges = bd71837_buck6_volts,
+ .n_linear_ranges =
+ ARRAY_SIZE(bd71837_buck6_volts),
+ .vsel_reg = BD718XX_REG_2ND_NODVS_BUCK_VOLT,
+ .vsel_mask = BD71837_BUCK6_MASK,
+ .enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck7",
+ .of_match = of_match_ptr("BUCK7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK7,
+ .ops = &bd718xx_buck_regulator_nolinear_ops,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &bd718xx_3rd_nodvs_buck_volts[0],
+ .n_voltages = ARRAY_SIZE(bd718xx_3rd_nodvs_buck_volts),
+ .vsel_reg = BD718XX_REG_3RD_NODVS_BUCK_VOLT,
+ .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
+ .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck8",
+ .of_match = of_match_ptr("BUCK8"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_BUCK8,
+ .ops = &bd718xx_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_4TH_NODVS_BUCK_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_4th_nodvs_buck_volts,
+ .n_linear_ranges =
+ ARRAY_SIZE(bd718xx_4th_nodvs_buck_volts),
+ .vsel_reg = BD718XX_REG_4TH_NODVS_BUCK_VOLT,
+ .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
+ .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
+ .enable_mask = BD718XX_BUCK_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
+ .mask = BD718XX_BUCK_SEL,
+ .val = BD718XX_BUCK_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO1,
+ .ops = &bd718xx_pickable_range_ldo_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_LDO1_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_ldo1_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_ldo1_volts),
+ .vsel_reg = BD718XX_REG_LDO1_VOLT,
+ .vsel_mask = BD718XX_LDO1_MASK,
+ .vsel_range_reg = BD718XX_REG_LDO1_VOLT,
+ .vsel_range_mask = BD718XX_LDO1_RANGE_MASK,
+ .linear_range_selectors = bd718xx_ldo1_volt_range_sel,
+ .enable_reg = BD718XX_REG_LDO1_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_LDO1_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO2,
+ .ops = &bd718xx_ldo_regulator_nolinear_ops,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &ldo_2_volts[0],
+ .vsel_reg = BD718XX_REG_LDO2_VOLT,
+ .vsel_mask = BD718XX_LDO2_MASK,
+ .n_voltages = ARRAY_SIZE(ldo_2_volts),
+ .enable_reg = BD718XX_REG_LDO2_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_LDO2_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo3",
+ .of_match = of_match_ptr("LDO3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO3,
+ .ops = &bd718xx_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_LDO3_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_ldo3_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_ldo3_volts),
+ .vsel_reg = BD718XX_REG_LDO3_VOLT,
+ .vsel_mask = BD718XX_LDO3_MASK,
+ .enable_reg = BD718XX_REG_LDO3_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_LDO3_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo4",
+ .of_match = of_match_ptr("LDO4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO4,
+ .ops = &bd718xx_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_LDO4_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_ldo4_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_ldo4_volts),
+ .vsel_reg = BD718XX_REG_LDO4_VOLT,
+ .vsel_mask = BD718XX_LDO4_MASK,
+ .enable_reg = BD718XX_REG_LDO4_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_LDO4_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo5",
+ .of_match = of_match_ptr("LDO5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO5,
+ .ops = &bd718xx_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_LDO5_VOLTAGE_NUM,
+ .linear_ranges = bd71837_ldo5_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_ldo5_volts),
+ /* LDO5 is supplied by buck6 */
+ .supply_name = "buck6",
+ .vsel_reg = BD718XX_REG_LDO5_VOLT,
+ .vsel_mask = BD71837_LDO5_MASK,
+ .enable_reg = BD718XX_REG_LDO5_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_LDO5_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ .additional_inits = bd71837_ldo5_inits,
+ .additional_init_amnt = ARRAY_SIZE(bd71837_ldo5_inits),
+ },
+ {
+ .desc = {
+ .name = "ldo6",
+ .of_match = of_match_ptr("LDO6"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO6,
+ .ops = &bd718xx_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD718XX_LDO6_VOLTAGE_NUM,
+ .linear_ranges = bd718xx_ldo6_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd718xx_ldo6_volts),
+ /* LDO6 is supplied by buck7 */
+ .supply_name = "buck7",
+ .vsel_reg = BD718XX_REG_LDO6_VOLT,
+ .vsel_mask = BD718XX_LDO6_MASK,
+ .enable_reg = BD718XX_REG_LDO6_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD718XX_REG_LDO6_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ .additional_inits = bd71837_ldo6_inits,
+ .additional_init_amnt = ARRAY_SIZE(bd71837_ldo6_inits),
+ },
+ {
+ .desc = {
+ .name = "ldo7",
+ .of_match = of_match_ptr("LDO7"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD718XX_LDO7,
+ .ops = &bd718xx_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD71837_LDO7_VOLTAGE_NUM,
+ .linear_ranges = bd71837_ldo7_volts,
+ .n_linear_ranges = ARRAY_SIZE(bd71837_ldo7_volts),
+ .vsel_reg = BD71837_REG_LDO7_VOLT,
+ .vsel_mask = BD71837_LDO7_MASK,
+ .enable_reg = BD71837_REG_LDO7_VOLT,
+ .enable_mask = BD718XX_LDO_EN,
+ .owner = THIS_MODULE,
+ },
+ .init = {
+ .reg = BD71837_REG_LDO7_VOLT,
+ .mask = BD718XX_LDO_SEL,
+ .val = BD718XX_LDO_SEL,
+ },
+ },
+};
+
+struct bd718xx_pmic_inits {
+ const struct bd718xx_regulator_data (*r_datas)[];
+ unsigned int r_amount;
+};
+
+static int bd718xx_probe(struct platform_device *pdev)
+{
+ struct bd718xx *mfd;
+ struct regulator_config config = { 0 };
+ struct bd718xx_pmic_inits pmic_regulators[] = {
+ [BD718XX_TYPE_BD71837] = {
+ .r_datas = &bd71837_regulators,
+ .r_amount = ARRAY_SIZE(bd71837_regulators),
+ },
+ [BD718XX_TYPE_BD71847] = {
+ .r_datas = &bd71847_regulators,
+ .r_amount = ARRAY_SIZE(bd71847_regulators),
+ },
+ };
+
+ int i, j, err;
+
+ mfd = dev_get_drvdata(pdev->dev.parent);
+ if (!mfd) {
+ dev_err(&pdev->dev, "No MFD driver data\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ if (mfd->chip_type >= BD718XX_TYPE_AMOUNT ||
+ !pmic_regulators[mfd->chip_type].r_datas) {
+ dev_err(&pdev->dev, "Unsupported chip type\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* Register LOCK release */
+ err = regmap_update_bits(mfd->regmap, BD718XX_REG_REGLOCK,
+ (REGLOCK_PWRSEQ | REGLOCK_VREG), 0);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to unlock PMIC (%d)\n", err);
+ goto err;
+ } else {
+ dev_dbg(&pdev->dev, "Unlocked lock register 0x%x\n",
+ BD718XX_REG_REGLOCK);
+ }
+
+ for (i = 0; i < pmic_regulators[mfd->chip_type].r_amount; i++) {
+
+ const struct regulator_desc *desc;
+ struct regulator_dev *rdev;
+ const struct bd718xx_regulator_data *r;
+
+ r = &(*pmic_regulators[mfd->chip_type].r_datas)[i];
+ desc = &r->desc;
+
+ config.dev = pdev->dev.parent;
+ config.regmap = mfd->regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
+ "failed to register %s regulator\n",
+ desc->name);
+ err = PTR_ERR(rdev);
+ goto err;
+ }
+ /* Regulator register gets the regulator constraints and
+ * applies them (set_machine_constraints). This should have
+ * turned the control register(s) to correct values and we
+ * can now switch the control from PMIC state machine to the
+ * register interface
+ */
+ err = regmap_update_bits(mfd->regmap, r->init.reg,
+ r->init.mask, r->init.val);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to write BUCK/LDO SEL bit for (%s)\n",
+ desc->name);
+ goto err;
+ }
+ for (j = 0; j < r->additional_init_amnt; j++) {
+ err = regmap_update_bits(mfd->regmap,
+ r->additional_inits[j].reg,
+ r->additional_inits[j].mask,
+ r->additional_inits[j].val);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Buck (%s) initialization failed\n",
+ desc->name);
+ goto err;
+ }
+ }
+ }
+
+err:
+ return err;
+}
+
+static struct platform_driver bd718xx_regulator = {
+ .driver = {
+ .name = "bd718xx-pmic",
+ },
+ .probe = bd718xx_probe,
+};
+
+module_platform_driver(bd718xx_regulator);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD71837/BD71847 voltage regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index bb1324f93143..2c66b528aede 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -426,19 +426,24 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(name);
-static ssize_t regulator_print_opmode(char *buf, int mode)
+static const char *regulator_opmode_to_str(int mode)
{
switch (mode) {
case REGULATOR_MODE_FAST:
- return sprintf(buf, "fast\n");
+ return "fast";
case REGULATOR_MODE_NORMAL:
- return sprintf(buf, "normal\n");
+ return "normal";
case REGULATOR_MODE_IDLE:
- return sprintf(buf, "idle\n");
+ return "idle";
case REGULATOR_MODE_STANDBY:
- return sprintf(buf, "standby\n");
+ return "standby";
}
- return sprintf(buf, "unknown\n");
+ return "unknown";
+}
+
+static ssize_t regulator_print_opmode(char *buf, int mode)
+{
+ return sprintf(buf, "%s\n", regulator_opmode_to_str(mode));
}
static ssize_t regulator_opmode_show(struct device *dev,
@@ -2783,6 +2788,11 @@ static int regulator_map_voltage(struct regulator_dev *rdev, int min_uV,
if (desc->ops->list_voltage == regulator_list_voltage_linear_range)
return regulator_map_voltage_linear_range(rdev, min_uV, max_uV);
+ if (desc->ops->list_voltage ==
+ regulator_list_voltage_pickable_linear_range)
+ return regulator_map_voltage_pickable_linear_range(rdev,
+ min_uV, max_uV);
+
return regulator_map_voltage_iterate(rdev, min_uV, max_uV);
}
@@ -3161,7 +3171,7 @@ static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
if (!rstate->changeable)
return -EPERM;
- rstate->enabled = en;
+ rstate->enabled = (en) ? ENABLE_IN_SUSPEND : DISABLE_IN_SUSPEND;
return 0;
}
@@ -3470,21 +3480,23 @@ out:
}
EXPORT_SYMBOL_GPL(regulator_set_current_limit);
+static int _regulator_get_current_limit_unlocked(struct regulator_dev *rdev)
+{
+ /* sanity check */
+ if (!rdev->desc->ops->get_current_limit)
+ return -EINVAL;
+
+ return rdev->desc->ops->get_current_limit(rdev);
+}
+
static int _regulator_get_current_limit(struct regulator_dev *rdev)
{
int ret;
regulator_lock(rdev);
-
- /* sanity check */
- if (!rdev->desc->ops->get_current_limit) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = rdev->desc->ops->get_current_limit(rdev);
-out:
+ ret = _regulator_get_current_limit_unlocked(rdev);
regulator_unlock(rdev);
+
return ret;
}
@@ -3549,21 +3561,23 @@ out:
}
EXPORT_SYMBOL_GPL(regulator_set_mode);
+static unsigned int _regulator_get_mode_unlocked(struct regulator_dev *rdev)
+{
+ /* sanity check */
+ if (!rdev->desc->ops->get_mode)
+ return -EINVAL;
+
+ return rdev->desc->ops->get_mode(rdev);
+}
+
static unsigned int _regulator_get_mode(struct regulator_dev *rdev)
{
int ret;
regulator_lock(rdev);
-
- /* sanity check */
- if (!rdev->desc->ops->get_mode) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = rdev->desc->ops->get_mode(rdev);
-out:
+ ret = _regulator_get_mode_unlocked(rdev);
regulator_unlock(rdev);
+
return ret;
}
@@ -4395,13 +4409,13 @@ regulator_register(const struct regulator_desc *regulator_desc,
!rdev->desc->fixed_uV)
rdev->is_switch = true;
+ dev_set_drvdata(&rdev->dev, rdev);
ret = device_register(&rdev->dev);
if (ret != 0) {
put_device(&rdev->dev);
goto unset_supplies;
}
- dev_set_drvdata(&rdev->dev, rdev);
rdev_init_debugfs(rdev);
/* try to resolve regulators supply since a new one was registered */
@@ -4455,41 +4469,33 @@ void regulator_unregister(struct regulator_dev *rdev)
EXPORT_SYMBOL_GPL(regulator_unregister);
#ifdef CONFIG_SUSPEND
-static int _regulator_suspend(struct device *dev, void *data)
-{
- struct regulator_dev *rdev = dev_to_rdev(dev);
- suspend_state_t *state = data;
- int ret;
-
- regulator_lock(rdev);
- ret = suspend_set_state(rdev, *state);
- regulator_unlock(rdev);
-
- return ret;
-}
-
/**
* regulator_suspend - prepare regulators for system wide suspend
- * @state: system suspend state
+ * @dev: ``&struct device`` pointer that is passed to _regulator_suspend()
*
* Configure each regulator with it's suspend operating parameters for state.
*/
static int regulator_suspend(struct device *dev)
{
+ struct regulator_dev *rdev = dev_to_rdev(dev);
suspend_state_t state = pm_suspend_target_state;
+ int ret;
+
+ regulator_lock(rdev);
+ ret = suspend_set_state(rdev, state);
+ regulator_unlock(rdev);
- return class_for_each_device(&regulator_class, NULL, &state,
- _regulator_suspend);
+ return ret;
}
-static int _regulator_resume(struct device *dev, void *data)
+static int regulator_resume(struct device *dev)
{
- int ret = 0;
+ suspend_state_t state = pm_suspend_target_state;
struct regulator_dev *rdev = dev_to_rdev(dev);
- suspend_state_t *state = data;
struct regulator_state *rstate;
+ int ret = 0;
- rstate = regulator_get_suspend_state(rdev, *state);
+ rstate = regulator_get_suspend_state(rdev, state);
if (rstate == NULL)
return 0;
@@ -4504,15 +4510,6 @@ static int _regulator_resume(struct device *dev, void *data)
return ret;
}
-
-static int regulator_resume(struct device *dev)
-{
- suspend_state_t state = pm_suspend_target_state;
-
- return class_for_each_device(&regulator_class, NULL, &state,
- _regulator_resume);
-}
-
#else /* !CONFIG_SUSPEND */
#define regulator_suspend NULL
@@ -4670,17 +4667,23 @@ static void regulator_summary_show_subtree(struct seq_file *s,
struct regulation_constraints *c;
struct regulator *consumer;
struct summary_data summary_data;
+ unsigned int opmode;
if (!rdev)
return;
- seq_printf(s, "%*s%-*s %3d %4d %6d ",
+ regulator_lock_nested(rdev, level);
+
+ opmode = _regulator_get_mode_unlocked(rdev);
+ seq_printf(s, "%*s%-*s %3d %4d %6d %7s ",
level * 3 + 1, "",
30 - level * 3, rdev_get_name(rdev),
- rdev->use_count, rdev->open_count, rdev->bypass_count);
+ rdev->use_count, rdev->open_count, rdev->bypass_count,
+ regulator_opmode_to_str(opmode));
seq_printf(s, "%5dmV ", _regulator_get_voltage(rdev) / 1000);
- seq_printf(s, "%5dmA ", _regulator_get_current_limit(rdev) / 1000);
+ seq_printf(s, "%5dmA ",
+ _regulator_get_current_limit_unlocked(rdev) / 1000);
c = rdev->constraints;
if (c) {
@@ -4709,7 +4712,8 @@ static void regulator_summary_show_subtree(struct seq_file *s,
switch (rdev->desc->type) {
case REGULATOR_VOLTAGE:
- seq_printf(s, "%37dmV %5dmV",
+ seq_printf(s, "%37dmA %5dmV %5dmV",
+ consumer->uA_load / 1000,
consumer->voltage[PM_SUSPEND_ON].min_uV / 1000,
consumer->voltage[PM_SUSPEND_ON].max_uV / 1000);
break;
@@ -4726,6 +4730,8 @@ static void regulator_summary_show_subtree(struct seq_file *s,
class_for_each_device(&regulator_class, NULL, &summary_data,
regulator_summary_show_children);
+
+ regulator_unlock(rdev);
}
static int regulator_summary_show_roots(struct device *dev, void *data)
@@ -4741,8 +4747,8 @@ static int regulator_summary_show_roots(struct device *dev, void *data)
static int regulator_summary_show(struct seq_file *s, void *data)
{
- seq_puts(s, " regulator use open bypass voltage current min max\n");
- seq_puts(s, "-------------------------------------------------------------------------------\n");
+ seq_puts(s, " regulator use open bypass opmode voltage current min max\n");
+ seq_puts(s, "---------------------------------------------------------------------------------------\n");
class_for_each_device(&regulator_class, NULL, s,
regulator_summary_show_roots);
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 9ececfef42d6..37e4025203e3 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -420,7 +420,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
config.dev = &pdev->dev;
config.driver_data = regulator;
config.regmap = da9052->regmap;
- if (pdata && pdata->regulators) {
+ if (pdata) {
config.init_data = pdata->regulators[cell->id];
} else {
#ifdef CONFIG_OF
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index f40c3b8644ae..588c3d2445cf 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -612,7 +612,7 @@ static int da9055_regulator_probe(struct platform_device *pdev)
config.driver_data = regulator;
config.regmap = da9055->regmap;
- if (pdata && pdata->regulators) {
+ if (pdata) {
config.init_data = pdata->regulators[pdev->id];
} else {
ret = da9055_regulator_dt_init(pdev, regulator, &config,
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 6c122b3df5d0..8f68c7a05d27 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -294,11 +294,11 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
pdata->init_data[n] = da9211_matches[i].init_data;
pdata->reg_node[n] = da9211_matches[i].of_node;
pdata->gpiod_ren[n] = devm_gpiod_get_from_of_node(dev,
- da9211_matches[i].of_node,
- "enable",
- 0,
- GPIOD_OUT_HIGH,
- "da9211-enable");
+ da9211_matches[i].of_node,
+ "enable",
+ 0,
+ GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "da9211-enable");
n++;
}
diff --git a/drivers/regulator/fixed-helper.c b/drivers/regulator/fixed-helper.c
index 777fac6fb4cb..2c6098e6f4bc 100644
--- a/drivers/regulator/fixed-helper.c
+++ b/drivers/regulator/fixed-helper.c
@@ -43,7 +43,6 @@ struct platform_device *regulator_register_always_on(int id, const char *name,
}
data->cfg.microvolts = uv;
- data->cfg.gpio = -EINVAL;
data->cfg.enabled_at_boot = 1;
data->cfg.init_data = &data->init_data;
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 988a7472c2ab..ccc29038f19a 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -24,10 +24,9 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/fixed.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
@@ -78,15 +77,16 @@ of_get_fixed_voltage_config(struct device *dev,
if (init_data->constraints.boot_on)
config->enabled_at_boot = true;
- config->gpio = of_get_named_gpio(np, "gpio", 0);
- if ((config->gpio < 0) && (config->gpio != -ENOENT))
- return ERR_PTR(config->gpio);
-
of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
- config->enable_high = of_property_read_bool(np, "enable-active-high");
- config->gpio_is_open_drain = of_property_read_bool(np,
- "gpio-open-drain");
+ /*
+ * FIXME: we pulled active low/high and open drain handling into
+ * gpiolib so it will be handled there. Delete this in the second
+ * step when we also remove the custom inversion handling for all
+ * legacy boardfiles.
+ */
+ config->enable_high = 1;
+ config->gpio_is_open_drain = 0;
if (of_find_property(np, "vin-supply", NULL))
config->input_supply = "vin";
@@ -102,6 +102,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
struct fixed_voltage_config *config;
struct fixed_voltage_data *drvdata;
struct regulator_config cfg = { };
+ enum gpiod_flags gflags;
int ret;
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct fixed_voltage_data),
@@ -150,25 +151,41 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->desc.fixed_uV = config->microvolts;
- if (gpio_is_valid(config->gpio)) {
- cfg.ena_gpio = config->gpio;
- if (pdev->dev.of_node)
- cfg.ena_gpio_initialized = true;
- }
cfg.ena_gpio_invert = !config->enable_high;
if (config->enabled_at_boot) {
if (config->enable_high)
- cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+ gflags = GPIOD_OUT_HIGH;
else
- cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
+ gflags = GPIOD_OUT_LOW;
} else {
if (config->enable_high)
- cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
+ gflags = GPIOD_OUT_LOW;
else
- cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+ gflags = GPIOD_OUT_HIGH;
}
- if (config->gpio_is_open_drain)
- cfg.ena_gpio_flags |= GPIOF_OPEN_DRAIN;
+ if (config->gpio_is_open_drain) {
+ if (gflags == GPIOD_OUT_HIGH)
+ gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
+ else
+ gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
+ }
+
+ /*
+ * Some fixed regulators share the enable line between two
+ * regulators which makes it necessary to get a handle on the
+ * same descriptor for two different consumers. This will get
+ * the GPIO descriptor, but only the first call will initialize
+ * it so any flags such as inversion or open drain will only
+ * be set up by the first caller and assumed identical on the
+ * next caller.
+ *
+ * FIXME: find a better way to deal with this.
+ */
+ gflags |= GPIOD_FLAGS_BIT_NONEXCLUSIVE;
+
+ cfg.ena_gpiod = devm_gpiod_get_optional(&pdev->dev, NULL, gflags);
+ if (IS_ERR(cfg.ena_gpiod))
+ return PTR_ERR(cfg.ena_gpiod);
cfg.dev = &pdev->dev;
cfg.init_data = config->init_data;
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 2ae7c3ac5940..5686a1335bd3 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -103,6 +103,128 @@ int regulator_disable_regmap(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(regulator_disable_regmap);
+static int regulator_range_selector_to_index(struct regulator_dev *rdev,
+ unsigned int rval)
+{
+ int i;
+
+ if (!rdev->desc->linear_range_selectors)
+ return -EINVAL;
+
+ rval &= rdev->desc->vsel_range_mask;
+
+ for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
+ if (rdev->desc->linear_range_selectors[i] == rval)
+ return i;
+ }
+ return -EINVAL;
+}
+
+/**
+ * regulator_get_voltage_sel_pickable_regmap - pickable range get_voltage_sel
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O and use pickable
+ * ranges can set the vsel_reg, vsel_mask, vsel_range_reg and vsel_range_mask
+ * fields in their descriptor and then use this as their get_voltage_vsel
+ * operation, saving some code.
+ */
+int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev)
+{
+ unsigned int r_val;
+ int range;
+ unsigned int val;
+ int ret, i;
+ unsigned int voltages_in_range = 0;
+
+ if (!rdev->desc->linear_ranges)
+ return -EINVAL;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
+ if (ret != 0)
+ return ret;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->vsel_range_reg, &r_val);
+ if (ret != 0)
+ return ret;
+
+ val &= rdev->desc->vsel_mask;
+ val >>= ffs(rdev->desc->vsel_mask) - 1;
+
+ range = regulator_range_selector_to_index(rdev, r_val);
+ if (range < 0)
+ return -EINVAL;
+
+ for (i = 0; i < range; i++)
+ voltages_in_range += (rdev->desc->linear_ranges[i].max_sel -
+ rdev->desc->linear_ranges[i].min_sel) + 1;
+
+ return val + voltages_in_range;
+}
+EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_pickable_regmap);
+
+/**
+ * regulator_set_voltage_sel_pickable_regmap - pickable range set_voltage_sel
+ *
+ * @rdev: regulator to operate on
+ * @sel: Selector to set
+ *
+ * Regulators that use regmap for their register I/O and use pickable
+ * ranges can set the vsel_reg, vsel_mask, vsel_range_reg and vsel_range_mask
+ * fields in their descriptor and then use this as their set_voltage_vsel
+ * operation, saving some code.
+ */
+int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev,
+ unsigned int sel)
+{
+ unsigned int range;
+ int ret, i;
+ unsigned int voltages_in_range = 0;
+
+ for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
+ voltages_in_range = (rdev->desc->linear_ranges[i].max_sel -
+ rdev->desc->linear_ranges[i].min_sel) + 1;
+ if (sel < voltages_in_range)
+ break;
+ sel -= voltages_in_range;
+ }
+
+ if (i == rdev->desc->n_linear_ranges)
+ return -EINVAL;
+
+ sel <<= ffs(rdev->desc->vsel_mask) - 1;
+ sel += rdev->desc->linear_ranges[i].min_sel;
+
+ range = rdev->desc->linear_range_selectors[i];
+
+ if (rdev->desc->vsel_reg == rdev->desc->vsel_range_reg) {
+ ret = regmap_update_bits(rdev->regmap,
+ rdev->desc->vsel_reg,
+ rdev->desc->vsel_range_mask |
+ rdev->desc->vsel_mask, sel | range);
+ } else {
+ ret = regmap_update_bits(rdev->regmap,
+ rdev->desc->vsel_range_reg,
+ rdev->desc->vsel_range_mask, range);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+ rdev->desc->vsel_mask, sel);
+ }
+
+ if (ret)
+ return ret;
+
+ if (rdev->desc->apply_bit)
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg,
+ rdev->desc->apply_bit,
+ rdev->desc->apply_bit);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_pickable_regmap);
+
/**
* regulator_get_voltage_sel_regmap - standard get_voltage_sel for regmap users
*
@@ -321,20 +443,91 @@ int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
ret += range->min_sel;
- break;
+ /*
+ * Map back into a voltage to verify we're still in bounds.
+ * If we are not, then continue checking rest of the ranges.
+ */
+ voltage = rdev->desc->ops->list_voltage(rdev, ret);
+ if (voltage >= min_uV && voltage <= max_uV)
+ break;
}
if (i == rdev->desc->n_linear_ranges)
return -EINVAL;
- /* Map back into a voltage to verify we're still in bounds */
- voltage = rdev->desc->ops->list_voltage(rdev, ret);
- if (voltage < min_uV || voltage > max_uV)
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_linear_range);
+
+/**
+ * regulator_map_voltage_pickable_linear_range - map_voltage, pickable ranges
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers providing pickable linear_ranges in their descriptor can use
+ * this as their map_voltage() callback.
+ */
+int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ const struct regulator_linear_range *range;
+ int ret = -EINVAL;
+ int voltage, i;
+ unsigned int selector = 0;
+
+ if (!rdev->desc->n_linear_ranges) {
+ BUG_ON(!rdev->desc->n_linear_ranges);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
+ int linear_max_uV;
+
+ range = &rdev->desc->linear_ranges[i];
+ linear_max_uV = range->min_uV +
+ (range->max_sel - range->min_sel) * range->uV_step;
+
+ if (!(min_uV <= linear_max_uV && max_uV >= range->min_uV)) {
+ selector += (range->max_sel - range->min_sel + 1);
+ continue;
+ }
+
+ if (min_uV <= range->min_uV)
+ min_uV = range->min_uV;
+
+ /* range->uV_step == 0 means fixed voltage range */
+ if (range->uV_step == 0) {
+ ret = 0;
+ } else {
+ ret = DIV_ROUND_UP(min_uV - range->min_uV,
+ range->uV_step);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret += selector;
+
+ voltage = rdev->desc->ops->list_voltage(rdev, ret);
+
+ /*
+ * Map back into a voltage to verify we're still in bounds.
+ * We may have overlapping voltage ranges. Hence we don't
+ * exit but retry until we have checked all ranges.
+ */
+ if (voltage < min_uV || voltage > max_uV)
+ selector += (range->max_sel - range->min_sel + 1);
+ else
+ break;
+ }
+
+ if (i == rdev->desc->n_linear_ranges)
return -EINVAL;
return ret;
}
-EXPORT_SYMBOL_GPL(regulator_map_voltage_linear_range);
+EXPORT_SYMBOL_GPL(regulator_map_voltage_pickable_linear_range);
/**
* regulator_list_voltage_linear - List voltages with simple calculation
@@ -361,6 +554,46 @@ int regulator_list_voltage_linear(struct regulator_dev *rdev,
EXPORT_SYMBOL_GPL(regulator_list_voltage_linear);
/**
+ * regulator_list_voltage_pickable_linear_range - pickable range list voltages
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * list_voltage() operation, intended to be used by drivers utilizing pickable
+ * ranges helpers.
+ */
+int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ const struct regulator_linear_range *range;
+ int i;
+ unsigned int all_sels = 0;
+
+ if (!rdev->desc->n_linear_ranges) {
+ BUG_ON(!rdev->desc->n_linear_ranges);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
+ unsigned int sels_in_range;
+
+ range = &rdev->desc->linear_ranges[i];
+
+ sels_in_range = range->max_sel - range->min_sel;
+
+ if (all_sels + sels_in_range >= selector) {
+ selector -= all_sels;
+ return range->min_uV + (range->uV_step * selector);
+ }
+
+ all_sels += (sels_in_range + 1);
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_list_voltage_pickable_linear_range);
+
+/**
* regulator_list_voltage_linear_range - List voltages for linear ranges
*
* @rdev: Regulator device
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
index 257c1943e753..9c2607e912cf 100644
--- a/drivers/regulator/isl9305.c
+++ b/drivers/regulator/isl9305.c
@@ -84,6 +84,7 @@ static const struct regulator_desc isl9305_regulators[] = {
.enable_mask = ISL9305_DCD1_EN,
.supply_name = "VINDCD1",
.ops = &isl9305_ops,
+ .owner = THIS_MODULE,
},
[ISL9305_DCD2] = {
.name = "DCD2",
@@ -98,6 +99,7 @@ static const struct regulator_desc isl9305_regulators[] = {
.enable_mask = ISL9305_DCD2_EN,
.supply_name = "VINDCD2",
.ops = &isl9305_ops,
+ .owner = THIS_MODULE,
},
[ISL9305_LDO1] = {
.name = "LDO1",
@@ -112,6 +114,7 @@ static const struct regulator_desc isl9305_regulators[] = {
.enable_mask = ISL9305_LDO1_EN,
.supply_name = "VINLDO1",
.ops = &isl9305_ops,
+ .owner = THIS_MODULE,
},
[ISL9305_LDO2] = {
.name = "LDO2",
@@ -126,6 +129,7 @@ static const struct regulator_desc isl9305_regulators[] = {
.enable_mask = ISL9305_LDO2_EN,
.supply_name = "VINLDO2",
.ops = &isl9305_ops,
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
index b615a413ca9f..bbedb08d257b 100644
--- a/drivers/regulator/lm363x-regulator.c
+++ b/drivers/regulator/lm363x-regulator.c
@@ -227,9 +227,11 @@ static struct gpio_desc *lm363x_regulator_of_get_enable_gpio(struct device *dev,
*/
switch (id) {
case LM3632_LDO_POS:
- return devm_gpiod_get_index_optional(dev, "enable", 0, GPIOD_OUT_LOW);
+ return devm_gpiod_get_index_optional(dev, "enable", 0,
+ GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
case LM3632_LDO_NEG:
- return devm_gpiod_get_index_optional(dev, "enable", 1, GPIOD_OUT_LOW);
+ return devm_gpiod_get_index_optional(dev, "enable", 1,
+ GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
default:
return NULL;
}
diff --git a/drivers/regulator/lochnagar-regulator.c b/drivers/regulator/lochnagar-regulator.c
new file mode 100644
index 000000000000..2b5073b9ff86
--- /dev/null
+++ b/drivers/regulator/lochnagar-regulator.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Lochnagar regulator driver
+//
+// Copyright (c) 2017-2018 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+// Author: Charles Keepax <ckeepax@opensource.cirrus.com>
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#include <linux/mfd/lochnagar.h>
+
+static const struct regulator_ops lochnagar_micvdd_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_linear_range lochnagar_micvdd_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1000000, 0, 0xC, 50000),
+ REGULATOR_LINEAR_RANGE(1700000, 0xD, 0x1F, 100000),
+};
+
+static int lochnagar_micbias_enable(struct regulator_dev *rdev)
+{
+ struct lochnagar *lochnagar = rdev_get_drvdata(rdev);
+ int ret;
+
+ mutex_lock(&lochnagar->analogue_config_lock);
+
+ ret = regulator_enable_regmap(rdev);
+ if (ret < 0)
+ goto err;
+
+ ret = lochnagar_update_config(lochnagar);
+
+err:
+ mutex_unlock(&lochnagar->analogue_config_lock);
+
+ return ret;
+}
+
+static int lochnagar_micbias_disable(struct regulator_dev *rdev)
+{
+ struct lochnagar *lochnagar = rdev_get_drvdata(rdev);
+ int ret;
+
+ mutex_lock(&lochnagar->analogue_config_lock);
+
+ ret = regulator_disable_regmap(rdev);
+ if (ret < 0)
+ goto err;
+
+ ret = lochnagar_update_config(lochnagar);
+
+err:
+ mutex_unlock(&lochnagar->analogue_config_lock);
+
+ return ret;
+}
+
+static const struct regulator_ops lochnagar_micbias_ops = {
+ .enable = lochnagar_micbias_enable,
+ .disable = lochnagar_micbias_disable,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_ops lochnagar_vddcore_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_linear_range lochnagar_vddcore_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x8, 0x41, 12500),
+};
+
+enum lochnagar_regulators {
+ LOCHNAGAR_MICVDD,
+ LOCHNAGAR_MIC1VDD,
+ LOCHNAGAR_MIC2VDD,
+ LOCHNAGAR_VDDCORE,
+};
+
+static int lochnagar_micbias_of_parse(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ struct lochnagar *lochnagar = config->driver_data;
+ int shift = (desc->id - LOCHNAGAR_MIC1VDD) *
+ LOCHNAGAR2_P2_MICBIAS_SRC_SHIFT;
+ int mask = LOCHNAGAR2_P1_MICBIAS_SRC_MASK << shift;
+ unsigned int val;
+ int ret;
+
+ ret = of_property_read_u32(np, "cirrus,micbias-input", &val);
+ if (ret >= 0) {
+ mutex_lock(&lochnagar->analogue_config_lock);
+ ret = regmap_update_bits(lochnagar->regmap,
+ LOCHNAGAR2_ANALOGUE_PATH_CTRL2,
+ mask, val << shift);
+ mutex_unlock(&lochnagar->analogue_config_lock);
+ if (ret < 0) {
+ dev_err(lochnagar->dev,
+ "Failed to update micbias source: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct regulator_desc lochnagar_regulators[] = {
+ [LOCHNAGAR_MICVDD] = {
+ .name = "MICVDD",
+ .supply_name = "SYSVDD",
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 32,
+ .ops = &lochnagar_micvdd_ops,
+
+ .id = LOCHNAGAR_MICVDD,
+ .of_match = of_match_ptr("MICVDD"),
+
+ .enable_reg = LOCHNAGAR2_MICVDD_CTRL1,
+ .enable_mask = LOCHNAGAR2_MICVDD_REG_ENA_MASK,
+ .vsel_reg = LOCHNAGAR2_MICVDD_CTRL2,
+ .vsel_mask = LOCHNAGAR2_MICVDD_VSEL_MASK,
+
+ .linear_ranges = lochnagar_micvdd_ranges,
+ .n_linear_ranges = ARRAY_SIZE(lochnagar_micvdd_ranges),
+
+ .enable_time = 3000,
+ .ramp_delay = 1000,
+
+ .owner = THIS_MODULE,
+ },
+ [LOCHNAGAR_MIC1VDD] = {
+ .name = "MIC1VDD",
+ .supply_name = "MICBIAS1",
+ .type = REGULATOR_VOLTAGE,
+ .ops = &lochnagar_micbias_ops,
+
+ .id = LOCHNAGAR_MIC1VDD,
+ .of_match = of_match_ptr("MIC1VDD"),
+ .of_parse_cb = lochnagar_micbias_of_parse,
+
+ .enable_reg = LOCHNAGAR2_ANALOGUE_PATH_CTRL2,
+ .enable_mask = LOCHNAGAR2_P1_INPUT_BIAS_ENA_MASK,
+
+ .owner = THIS_MODULE,
+ },
+ [LOCHNAGAR_MIC2VDD] = {
+ .name = "MIC2VDD",
+ .supply_name = "MICBIAS2",
+ .type = REGULATOR_VOLTAGE,
+ .ops = &lochnagar_micbias_ops,
+
+ .id = LOCHNAGAR_MIC2VDD,
+ .of_match = of_match_ptr("MIC2VDD"),
+ .of_parse_cb = lochnagar_micbias_of_parse,
+
+ .enable_reg = LOCHNAGAR2_ANALOGUE_PATH_CTRL2,
+ .enable_mask = LOCHNAGAR2_P2_INPUT_BIAS_ENA_MASK,
+
+ .owner = THIS_MODULE,
+ },
+ [LOCHNAGAR_VDDCORE] = {
+ .name = "VDDCORE",
+ .supply_name = "SYSVDD",
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 57,
+ .ops = &lochnagar_vddcore_ops,
+
+ .id = LOCHNAGAR_VDDCORE,
+ .of_match = of_match_ptr("VDDCORE"),
+
+ .enable_reg = LOCHNAGAR2_VDDCORE_CDC_CTRL1,
+ .enable_mask = LOCHNAGAR2_VDDCORE_CDC_REG_ENA_MASK,
+ .vsel_reg = LOCHNAGAR2_VDDCORE_CDC_CTRL2,
+ .vsel_mask = LOCHNAGAR2_VDDCORE_CDC_VSEL_MASK,
+
+ .linear_ranges = lochnagar_vddcore_ranges,
+ .n_linear_ranges = ARRAY_SIZE(lochnagar_vddcore_ranges),
+
+ .enable_time = 3000,
+ .ramp_delay = 1000,
+
+ .owner = THIS_MODULE,
+ },
+};
+
+static int lochnagar_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct lochnagar *lochnagar = dev_get_drvdata(dev->parent);
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ int ret, i;
+
+ config.dev = lochnagar->dev;
+ config.regmap = lochnagar->regmap;
+ config.driver_data = lochnagar;
+
+ for (i = 0; i < ARRAY_SIZE(lochnagar_regulators); i++) {
+ const struct regulator_desc *desc = &lochnagar_regulators[i];
+
+ rdev = devm_regulator_register(dev, desc, &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(dev, "Failed to register %s regulator: %d\n",
+ desc->name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver lochnagar_regulator_driver = {
+ .driver = {
+ .name = "lochnagar-regulator",
+ },
+
+ .probe = lochnagar_regulator_probe,
+};
+module_platform_driver(lochnagar_regulator_driver);
+
+MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.cirrus.com>");
+MODULE_DESCRIPTION("Regulator driver for Cirrus Logic Lochnagar Board");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lochnagar-regulator");
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index f2347474a106..553b4790050f 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -503,9 +503,10 @@ static int lp8788_config_ldo_enable_mode(struct platform_device *pdev,
/* FIXME: check default mode for GPIO here: high or low? */
ldo->ena_gpiod = devm_gpiod_get_index_optional(&pdev->dev,
- "enable",
- enable_id,
- GPIOD_OUT_HIGH);
+ "enable",
+ enable_id,
+ GPIOD_OUT_HIGH |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(ldo->ena_gpiod))
return PTR_ERR(ldo->ena_gpiod);
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 18d5b01ddcb2..63f724f260ef 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -404,7 +404,8 @@ static const struct regmap_config ltc3589_regmap_config = {
.max_register = LTC3589_L2DTV2,
.reg_defaults = ltc3589_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(ltc3589_reg_defaults),
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.cache_type = REGCACHE_RBTREE,
};
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index 9dec1609ff66..71fd0f2a4b76 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -321,7 +321,8 @@ static const struct regmap_config ltc3676_regmap_config = {
.readable_reg = ltc3676_readable_reg,
.volatile_reg = ltc3676_volatile_reg,
.max_register = LTC3676_CLIRQ,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.cache_type = REGCACHE_RBTREE,
};
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index f1e77ed5dfec..6c39fff73b8a 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -230,6 +230,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
gflags = GPIOD_OUT_HIGH;
else
gflags = GPIOD_OUT_LOW;
+ gflags |= GPIOD_FLAGS_BIT_NONEXCLUSIVE;
gpiod = devm_gpiod_get_optional(&client->dev,
"max8952,en",
gflags);
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 7cd493ec6315..e7a58b509032 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -758,6 +758,7 @@ static int max8973_probe(struct i2c_client *client,
gflags = GPIOD_OUT_HIGH;
else
gflags = GPIOD_OUT_LOW;
+ gflags |= GPIOD_FLAGS_BIT_NONEXCLUSIVE;
gpiod = devm_gpiod_get_optional(&client->dev,
"maxim,enable",
gflags);
diff --git a/drivers/regulator/max8997-regulator.c b/drivers/regulator/max8997-regulator.c
index ad0c806b0737..3bf5ddfaaea8 100644
--- a/drivers/regulator/max8997-regulator.c
+++ b/drivers/regulator/max8997-regulator.c
@@ -929,8 +929,8 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
break;
if (i == ARRAY_SIZE(regulators)) {
- dev_warn(&pdev->dev, "don't know how to configure regulator %s\n",
- reg_np->name);
+ dev_warn(&pdev->dev, "don't know how to configure regulator %pOFn\n",
+ reg_np);
continue;
}
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index da4fb9824757..65eb1e0350cf 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -203,7 +203,7 @@ struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
if (!found)
dev_warn(&pdev->dev,
- "Unknown regulator: %s\n", child->name);
+ "Unknown regulator: %pOFn\n", child);
}
of_node_put(parent);
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 638f17d4c848..c4223b3e0dff 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -95,8 +95,8 @@ static void of_get_regulation_constraints(struct device_node *np,
if (!ret)
constraints->settling_time_up = pval;
if (constraints->settling_time_up && constraints->settling_time) {
- pr_warn("%s: ambiguous configuration for settling time, ignoring 'regulator-settling-time-up-us'\n",
- np->name);
+ pr_warn("%pOFn: ambiguous configuration for settling time, ignoring 'regulator-settling-time-up-us'\n",
+ np);
constraints->settling_time_up = 0;
}
@@ -105,8 +105,8 @@ static void of_get_regulation_constraints(struct device_node *np,
if (!ret)
constraints->settling_time_down = pval;
if (constraints->settling_time_down && constraints->settling_time) {
- pr_warn("%s: ambiguous configuration for settling time, ignoring 'regulator-settling-time-down-us'\n",
- np->name);
+ pr_warn("%pOFn: ambiguous configuration for settling time, ignoring 'regulator-settling-time-down-us'\n",
+ np);
constraints->settling_time_down = 0;
}
@@ -127,12 +127,12 @@ static void of_get_regulation_constraints(struct device_node *np,
if (desc && desc->of_map_mode) {
mode = desc->of_map_mode(pval);
if (mode == REGULATOR_MODE_INVALID)
- pr_err("%s: invalid mode %u\n", np->name, pval);
+ pr_err("%pOFn: invalid mode %u\n", np, pval);
else
constraints->initial_mode = mode;
} else {
- pr_warn("%s: mapping for mode %d not defined\n",
- np->name, pval);
+ pr_warn("%pOFn: mapping for mode %d not defined\n",
+ np, pval);
}
}
@@ -144,14 +144,14 @@ static void of_get_regulation_constraints(struct device_node *np,
ret = of_property_read_u32_index(np,
"regulator-allowed-modes", i, &pval);
if (ret) {
- pr_err("%s: couldn't read allowed modes index %d, ret=%d\n",
- np->name, i, ret);
+ pr_err("%pOFn: couldn't read allowed modes index %d, ret=%d\n",
+ np, i, ret);
break;
}
mode = desc->of_map_mode(pval);
if (mode == REGULATOR_MODE_INVALID)
- pr_err("%s: invalid regulator-allowed-modes element %u\n",
- np->name, pval);
+ pr_err("%pOFn: invalid regulator-allowed-modes element %u\n",
+ np, pval);
else
constraints->valid_modes_mask |= mode;
}
@@ -159,7 +159,7 @@ static void of_get_regulation_constraints(struct device_node *np,
constraints->valid_ops_mask
|= REGULATOR_CHANGE_MODE;
} else {
- pr_warn("%s: mode mapping not defined\n", np->name);
+ pr_warn("%pOFn: mode mapping not defined\n", np);
}
}
@@ -197,13 +197,13 @@ static void of_get_regulation_constraints(struct device_node *np,
if (desc && desc->of_map_mode) {
mode = desc->of_map_mode(pval);
if (mode == REGULATOR_MODE_INVALID)
- pr_err("%s: invalid mode %u\n",
- np->name, pval);
+ pr_err("%pOFn: invalid mode %u\n",
+ np, pval);
else
suspend_state->mode = mode;
} else {
- pr_warn("%s: mapping for mode %d not defined\n",
- np->name, pval);
+ pr_warn("%pOFn: mapping for mode %d not defined\n",
+ np, pval);
}
}
@@ -213,8 +213,6 @@ static void of_get_regulation_constraints(struct device_node *np,
else if (of_property_read_bool(suspend_np,
"regulator-off-in-suspend"))
suspend_state->enabled = DISABLE_IN_SUSPEND;
- else
- suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
&pval))
@@ -351,8 +349,8 @@ int of_regulator_match(struct device *dev, struct device_node *node,
match->desc);
if (!match->init_data) {
dev_err(dev,
- "failed to parse DT for regulator %s\n",
- child->name);
+ "failed to parse DT for regulator %pOFn\n",
+ child);
of_node_put(child);
return -EINVAL;
}
@@ -401,16 +399,16 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
init_data = of_get_regulator_init_data(dev, child, desc);
if (!init_data) {
dev_err(dev,
- "failed to parse DT for regulator %s\n",
- child->name);
+ "failed to parse DT for regulator %pOFn\n",
+ child);
break;
}
if (desc->of_parse_cb) {
if (desc->of_parse_cb(child, desc, config)) {
dev_err(dev,
- "driver callback failed to parse DT for regulator %s\n",
- child->name);
+ "driver callback failed to parse DT for regulator %pOFn\n",
+ child);
init_data = NULL;
break;
}
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 31c3a236120a..dd41a9bb3f5c 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -31,11 +31,17 @@
#define PFUZE100_COINVOL 0x1a
#define PFUZE100_SW1ABVOL 0x20
+#define PFUZE100_SW1ABMODE 0x23
#define PFUZE100_SW1CVOL 0x2e
+#define PFUZE100_SW1CMODE 0x31
#define PFUZE100_SW2VOL 0x35
+#define PFUZE100_SW2MODE 0x38
#define PFUZE100_SW3AVOL 0x3c
+#define PFUZE100_SW3AMODE 0x3f
#define PFUZE100_SW3BVOL 0x43
+#define PFUZE100_SW3BMODE 0x46
#define PFUZE100_SW4VOL 0x4a
+#define PFUZE100_SW4MODE 0x4d
#define PFUZE100_SWBSTCON1 0x66
#define PFUZE100_VREFDDRCON 0x6a
#define PFUZE100_VSNVSVOL 0x6b
@@ -46,6 +52,13 @@
#define PFUZE100_VGEN5VOL 0x70
#define PFUZE100_VGEN6VOL 0x71
+#define PFUZE100_SWxMODE_MASK 0xf
+#define PFUZE100_SWxMODE_APS_APS 0x8
+#define PFUZE100_SWxMODE_APS_OFF 0x4
+
+#define PFUZE100_VGENxLPWR BIT(6)
+#define PFUZE100_VGENxSTBY BIT(5)
+
enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3, PFUZE3001 = 0x31, };
struct pfuze_regulator {
@@ -559,6 +572,69 @@ static inline struct device_node *match_of_node(int index)
}
#endif
+static struct pfuze_chip *syspm_pfuze_chip;
+
+static void pfuze_power_off_prepare(void)
+{
+ dev_info(syspm_pfuze_chip->dev, "Configure standby mode for power off");
+
+ /* Switch from default mode: APS/APS to APS/Off */
+ regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_SW1ABMODE,
+ PFUZE100_SWxMODE_MASK, PFUZE100_SWxMODE_APS_OFF);
+ regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_SW1CMODE,
+ PFUZE100_SWxMODE_MASK, PFUZE100_SWxMODE_APS_OFF);
+ regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_SW2MODE,
+ PFUZE100_SWxMODE_MASK, PFUZE100_SWxMODE_APS_OFF);
+ regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_SW3AMODE,
+ PFUZE100_SWxMODE_MASK, PFUZE100_SWxMODE_APS_OFF);
+ regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_SW3BMODE,
+ PFUZE100_SWxMODE_MASK, PFUZE100_SWxMODE_APS_OFF);
+ regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_SW4MODE,
+ PFUZE100_SWxMODE_MASK, PFUZE100_SWxMODE_APS_OFF);
+
+ regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN1VOL,
+ PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
+ PFUZE100_VGENxSTBY);
+ regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN2VOL,
+ PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
+ PFUZE100_VGENxSTBY);
+ regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN3VOL,
+ PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
+ PFUZE100_VGENxSTBY);
+ regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN4VOL,
+ PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
+ PFUZE100_VGENxSTBY);
+ regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN5VOL,
+ PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
+ PFUZE100_VGENxSTBY);
+ regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN6VOL,
+ PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY,
+ PFUZE100_VGENxSTBY);
+}
+
+static int pfuze_power_off_prepare_init(struct pfuze_chip *pfuze_chip)
+{
+ if (pfuze_chip->chip_id != PFUZE100) {
+ dev_warn(pfuze_chip->dev, "Requested pm_power_off_prepare handler for not supported chip\n");
+ return -ENODEV;
+ }
+
+ if (pm_power_off_prepare) {
+ dev_warn(pfuze_chip->dev, "pm_power_off_prepare is already registered.\n");
+ return -EBUSY;
+ }
+
+ if (syspm_pfuze_chip) {
+ dev_warn(pfuze_chip->dev, "syspm_pfuze_chip is already set.\n");
+ return -EBUSY;
+ }
+
+ syspm_pfuze_chip = pfuze_chip;
+ pm_power_off_prepare = pfuze_power_off_prepare;
+
+ return 0;
+}
+
static int pfuze_identify(struct pfuze_chip *pfuze_chip)
{
unsigned int value;
@@ -753,6 +829,20 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
}
}
+ if (of_property_read_bool(client->dev.of_node,
+ "fsl,pmic-stby-poweroff"))
+ return pfuze_power_off_prepare_init(pfuze_chip);
+
+ return 0;
+}
+
+static int pfuze100_regulator_remove(struct i2c_client *client)
+{
+ if (syspm_pfuze_chip) {
+ syspm_pfuze_chip = NULL;
+ pm_power_off_prepare = NULL;
+ }
+
return 0;
}
@@ -763,6 +853,7 @@ static struct i2c_driver pfuze_driver = {
.of_match_table = pfuze_dt_ids,
},
.probe = pfuze100_regulator_probe,
+ .remove = pfuze100_regulator_remove,
};
module_i2c_driver(pfuze_driver);
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 9f27daebd8c8..39ccf53fdeb3 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -414,7 +414,7 @@ static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg, struct device *dev,
break;
if (!rpmh_data->name) {
- dev_err(dev, "Unknown regulator %s\n", node->name);
+ dev_err(dev, "Unknown regulator %pOFn\n", node);
return -EINVAL;
}
@@ -423,8 +423,8 @@ static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg, struct device *dev,
vreg->addr = cmd_db_read_addr(rpmh_resource_name);
if (!vreg->addr) {
- dev_err(dev, "%s: could not find RPMh address for resource %s\n",
- node->name, rpmh_resource_name);
+ dev_err(dev, "%pOFn: could not find RPMh address for resource %s\n",
+ node, rpmh_resource_name);
return -ENODEV;
}
@@ -469,13 +469,13 @@ static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg, struct device *dev,
rdev = devm_regulator_register(dev, &vreg->rdesc, &reg_config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
- dev_err(dev, "%s: devm_regulator_register() failed, ret=%d\n",
- node->name, ret);
+ dev_err(dev, "%pOFn: devm_regulator_register() failed, ret=%d\n",
+ node, ret);
return ret;
}
- dev_dbg(dev, "%s regulator registered for RPMh resource %s @ 0x%05X\n",
- node->name, rpmh_resource_name, vreg->addr);
+ dev_dbg(dev, "%pOFn regulator registered for RPMh resource %s @ 0x%05X\n",
+ node, rpmh_resource_name, vreg->addr);
return 0;
}
@@ -504,6 +504,7 @@ static unsigned int rpmh_regulator_pmic4_ldo_of_map_mode(unsigned int rpmh_mode)
break;
default:
mode = REGULATOR_MODE_INVALID;
+ break;
}
return mode;
@@ -537,6 +538,7 @@ rpmh_regulator_pmic4_smps_of_map_mode(unsigned int rpmh_mode)
break;
default:
mode = REGULATOR_MODE_INVALID;
+ break;
}
return mode;
@@ -566,6 +568,7 @@ static unsigned int rpmh_regulator_pmic4_bob_of_map_mode(unsigned int rpmh_mode)
break;
default:
mode = REGULATOR_MODE_INVALID;
+ break;
}
return mode;
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index fe2fb36803e0..f5bca77d67c1 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -420,6 +420,60 @@ static const struct regulator_desc pmi8998_bob = {
.ops = &rpm_bob_ops,
};
+static const struct regulator_desc pms405_hfsmps3 = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 216,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pms405_nldo300 = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 128,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pms405_nldo1200 = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 128,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pms405_pldo50 = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1664000, 0, 128, 16000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 129,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pms405_pldo150 = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1664000, 0, 128, 16000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 129,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pms405_pldo600 = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1256000, 0, 98, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 99,
+ .ops = &rpm_smps_ldo_ops,
+};
+
struct rpm_regulator_data {
const char *name;
u32 type;
@@ -661,6 +715,28 @@ static const struct rpm_regulator_data rpm_pmi8998_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pms405_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pms405_hfsmps3, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pms405_hfsmps3, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pms405_hfsmps3, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pms405_hfsmps3, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pms405_hfsmps3, "vdd_s5" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pms405_nldo1200, "vdd_l1_l2" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pms405_nldo1200, "vdd_l1_l2" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pms405_nldo1200, "vdd_l3_l8" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pms405_nldo300, "vdd_l4" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pms405_pldo600, "vdd_l5_l6" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pms405_pldo600, "vdd_l5_l6" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pms405_pldo150, "vdd_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pms405_nldo1200, "vdd_l3_l8" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pms405_nldo1200, "vdd_l9" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pms405_pldo50, "vdd_l10_l11_l12_l13" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pms405_pldo150, "vdd_l10_l11_l12_l13" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pms405_pldo150, "vdd_l10_l11_l12_l13" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pms405_pldo150, "vdd_l10_l11_l12_l13" },
+ {}
+};
+
static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
@@ -669,6 +745,7 @@ static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8998-regulators", .data = &rpm_pm8998_regulators },
{ .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators },
{ .compatible = "qcom,rpm-pmi8998-regulators", .data = &rpm_pmi8998_regulators },
+ { .compatible = "qcom,rpm-pms405-regulators", .data = &rpm_pms405_regulators },
{}
};
MODULE_DEVICE_TABLE(of, rpm_of_match);
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 667d16dc83ce..219b9afda0cb 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -447,15 +447,15 @@ static void s5m8767_regulator_config_ext_control(struct s5m8767_info *s5m8767,
}
if (mode != S5M8767_ENCTRL_USE_GPIO) {
dev_warn(s5m8767->dev,
- "ext-control for %s: mismatched op_mode (%x), ignoring\n",
- rdata->reg_node->name, mode);
+ "ext-control for %pOFn: mismatched op_mode (%x), ignoring\n",
+ rdata->reg_node, mode);
return;
}
if (!rdata->ext_control_gpiod) {
dev_warn(s5m8767->dev,
- "ext-control for %s: GPIO not valid, ignoring\n",
- rdata->reg_node->name);
+ "ext-control for %pOFn: GPIO not valid, ignoring\n",
+ rdata->reg_node);
return;
}
@@ -566,17 +566,18 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
if (i == ARRAY_SIZE(regulators)) {
dev_warn(iodev->dev,
- "don't know how to configure regulator %s\n",
- reg_np->name);
+ "don't know how to configure regulator %pOFn\n",
+ reg_np);
continue;
}
- rdata->ext_control_gpiod = devm_gpiod_get_from_of_node(&pdev->dev,
- reg_np,
- "s5m8767,pmic-ext-control-gpios",
- 0,
- GPIOD_OUT_HIGH,
- "s5m8767");
+ rdata->ext_control_gpiod = devm_gpiod_get_from_of_node(
+ &pdev->dev,
+ reg_np,
+ "s5m8767,pmic-ext-control-gpios",
+ 0,
+ GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "s5m8767");
if (IS_ERR(rdata->ext_control_gpiod))
return PTR_ERR(rdata->ext_control_gpiod);
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
new file mode 100644
index 000000000000..e15634edb8ce
--- /dev/null
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -0,0 +1,674 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) STMicroelectronics 2018
+// Author: Pascal Paillet <p.paillet@st.com> for STMicroelectronics.
+
+#include <linux/interrupt.h>
+#include <linux/mfd/stpmic1.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+/**
+ * stpmic1 regulator description
+ * @desc: regulator framework description
+ * @mask_reset_reg: mask reset register address
+ * @mask_reset_mask: mask rank and mask reset register mask
+ * @icc_reg: icc register address
+ * @icc_mask: icc register mask
+ */
+struct stpmic1_regulator_cfg {
+ struct regulator_desc desc;
+ u8 mask_reset_reg;
+ u8 mask_reset_mask;
+ u8 icc_reg;
+ u8 icc_mask;
+};
+
+/**
+ * stpmic1 regulator data: this structure is used as driver data
+ * @regul_id: regulator id
+ * @reg_node: DT node of regulator (unused on non-DT platforms)
+ * @cfg: stpmic specific regulator description
+ * @mask_reset: mask_reset bit value
+ * @irq_curlim: current limit interrupt number
+ * @regmap: point to parent regmap structure
+ */
+struct stpmic1_regulator {
+ unsigned int regul_id;
+ struct device_node *reg_node;
+ struct stpmic1_regulator_cfg *cfg;
+ u8 mask_reset;
+ int irq_curlim;
+ struct regmap *regmap;
+};
+
+static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode);
+static unsigned int stpmic1_get_mode(struct regulator_dev *rdev);
+static int stpmic1_set_icc(struct regulator_dev *rdev);
+static int stpmic1_regulator_parse_dt(void *driver_data);
+static unsigned int stpmic1_map_mode(unsigned int mode);
+
+enum {
+ STPMIC1_BUCK1 = 0,
+ STPMIC1_BUCK2 = 1,
+ STPMIC1_BUCK3 = 2,
+ STPMIC1_BUCK4 = 3,
+ STPMIC1_LDO1 = 4,
+ STPMIC1_LDO2 = 5,
+ STPMIC1_LDO3 = 6,
+ STPMIC1_LDO4 = 7,
+ STPMIC1_LDO5 = 8,
+ STPMIC1_LDO6 = 9,
+ STPMIC1_VREF_DDR = 10,
+ STPMIC1_BOOST = 11,
+ STPMIC1_VBUS_OTG = 12,
+ STPMIC1_SW_OUT = 13,
+};
+
+/* Enable time worst case is 5000mV/(2250uV/uS) */
+#define PMIC_ENABLE_TIME_US 2200
+
+#define STPMIC1_BUCK_MODE_NORMAL 0
+#define STPMIC1_BUCK_MODE_LP BUCK_HPLP_ENABLE_MASK
+
+struct regulator_linear_range buck1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 30, 25000),
+ REGULATOR_LINEAR_RANGE(1350000, 31, 63, 0),
+};
+
+struct regulator_linear_range buck2_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1000000, 0, 17, 0),
+ REGULATOR_LINEAR_RANGE(1050000, 18, 19, 0),
+ REGULATOR_LINEAR_RANGE(1100000, 20, 21, 0),
+ REGULATOR_LINEAR_RANGE(1150000, 22, 23, 0),
+ REGULATOR_LINEAR_RANGE(1200000, 24, 25, 0),
+ REGULATOR_LINEAR_RANGE(1250000, 26, 27, 0),
+ REGULATOR_LINEAR_RANGE(1300000, 28, 29, 0),
+ REGULATOR_LINEAR_RANGE(1350000, 30, 31, 0),
+ REGULATOR_LINEAR_RANGE(1400000, 32, 33, 0),
+ REGULATOR_LINEAR_RANGE(1450000, 34, 35, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 36, 63, 0),
+};
+
+struct regulator_linear_range buck3_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1000000, 0, 19, 0),
+ REGULATOR_LINEAR_RANGE(1100000, 20, 23, 0),
+ REGULATOR_LINEAR_RANGE(1200000, 24, 27, 0),
+ REGULATOR_LINEAR_RANGE(1300000, 28, 31, 0),
+ REGULATOR_LINEAR_RANGE(1400000, 32, 35, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 36, 55, 100000),
+ REGULATOR_LINEAR_RANGE(3400000, 56, 63, 0),
+
+};
+
+struct regulator_linear_range buck4_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 27, 25000),
+ REGULATOR_LINEAR_RANGE(1300000, 28, 29, 0),
+ REGULATOR_LINEAR_RANGE(1350000, 30, 31, 0),
+ REGULATOR_LINEAR_RANGE(1400000, 32, 33, 0),
+ REGULATOR_LINEAR_RANGE(1450000, 34, 35, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 36, 60, 100000),
+ REGULATOR_LINEAR_RANGE(3900000, 61, 63, 0),
+
+};
+
+struct regulator_linear_range ldo1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
+ REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
+ REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0),
+
+};
+
+struct regulator_linear_range ldo2_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
+ REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
+ REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0),
+
+};
+
+struct regulator_linear_range ldo3_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
+ REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000),
+ REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0),
+ /* with index 31 LDO3 is in DDR mode */
+ REGULATOR_LINEAR_RANGE(500000, 31, 31, 0),
+};
+
+struct regulator_linear_range ldo5_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0),
+ REGULATOR_LINEAR_RANGE(1700000, 8, 30, 100000),
+ REGULATOR_LINEAR_RANGE(3900000, 31, 31, 0),
+};
+
+struct regulator_linear_range ldo6_ranges[] = {
+ REGULATOR_LINEAR_RANGE(900000, 0, 24, 100000),
+ REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0),
+};
+
+static struct regulator_ops stpmic1_ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_pull_down = regulator_set_pull_down_regmap,
+ .set_over_current_protection = stpmic1_set_icc,
+};
+
+static struct regulator_ops stpmic1_ldo3_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_iterate,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_pull_down = regulator_set_pull_down_regmap,
+ .get_bypass = regulator_get_bypass_regmap,
+ .set_bypass = regulator_set_bypass_regmap,
+ .set_over_current_protection = stpmic1_set_icc,
+};
+
+static struct regulator_ops stpmic1_ldo4_fixed_regul_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_pull_down = regulator_set_pull_down_regmap,
+ .set_over_current_protection = stpmic1_set_icc,
+};
+
+static struct regulator_ops stpmic1_buck_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_pull_down = regulator_set_pull_down_regmap,
+ .set_mode = stpmic1_set_mode,
+ .get_mode = stpmic1_get_mode,
+ .set_over_current_protection = stpmic1_set_icc,
+};
+
+static struct regulator_ops stpmic1_vref_ddr_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_pull_down = regulator_set_pull_down_regmap,
+};
+
+static struct regulator_ops stpmic1_switch_regul_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_over_current_protection = stpmic1_set_icc,
+};
+
+#define REG_LDO(ids, base) { \
+ .name = #ids, \
+ .id = STPMIC1_##ids, \
+ .n_voltages = 32, \
+ .ops = &stpmic1_ldo_ops, \
+ .linear_ranges = base ## _ranges, \
+ .n_linear_ranges = ARRAY_SIZE(base ## _ranges), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = ids##_ACTIVE_CR, \
+ .vsel_mask = LDO_VOLTAGE_MASK, \
+ .enable_reg = ids##_ACTIVE_CR, \
+ .enable_mask = LDO_ENABLE_MASK, \
+ .enable_val = 1, \
+ .disable_val = 0, \
+ .enable_time = PMIC_ENABLE_TIME_US, \
+ .pull_down_reg = ids##_PULL_DOWN_REG, \
+ .pull_down_mask = ids##_PULL_DOWN_MASK, \
+ .supply_name = #base, \
+}
+
+#define REG_LDO3(ids, base) { \
+ .name = #ids, \
+ .id = STPMIC1_##ids, \
+ .n_voltages = 32, \
+ .ops = &stpmic1_ldo3_ops, \
+ .linear_ranges = ldo3_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(ldo3_ranges), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = LDO3_ACTIVE_CR, \
+ .vsel_mask = LDO_VOLTAGE_MASK, \
+ .enable_reg = LDO3_ACTIVE_CR, \
+ .enable_mask = LDO_ENABLE_MASK, \
+ .enable_val = 1, \
+ .disable_val = 0, \
+ .enable_time = PMIC_ENABLE_TIME_US, \
+ .bypass_reg = LDO3_ACTIVE_CR, \
+ .bypass_mask = LDO_BYPASS_MASK, \
+ .bypass_val_on = LDO_BYPASS_MASK, \
+ .bypass_val_off = 0, \
+ .pull_down_reg = ids##_PULL_DOWN_REG, \
+ .pull_down_mask = ids##_PULL_DOWN_MASK, \
+ .supply_name = #base, \
+}
+
+#define REG_LDO4(ids, base) { \
+ .name = #ids, \
+ .id = STPMIC1_##ids, \
+ .n_voltages = 1, \
+ .ops = &stpmic1_ldo4_fixed_regul_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 3300000, \
+ .fixed_uV = 3300000, \
+ .enable_reg = LDO4_ACTIVE_CR, \
+ .enable_mask = LDO_ENABLE_MASK, \
+ .enable_val = 1, \
+ .disable_val = 0, \
+ .enable_time = PMIC_ENABLE_TIME_US, \
+ .pull_down_reg = ids##_PULL_DOWN_REG, \
+ .pull_down_mask = ids##_PULL_DOWN_MASK, \
+ .supply_name = #base, \
+}
+
+#define REG_BUCK(ids, base) { \
+ .name = #ids, \
+ .id = STPMIC1_##ids, \
+ .ops = &stpmic1_buck_ops, \
+ .n_voltages = 64, \
+ .linear_ranges = base ## _ranges, \
+ .n_linear_ranges = ARRAY_SIZE(base ## _ranges), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = ids##_ACTIVE_CR, \
+ .vsel_mask = BUCK_VOLTAGE_MASK, \
+ .enable_reg = ids##_ACTIVE_CR, \
+ .enable_mask = BUCK_ENABLE_MASK, \
+ .enable_val = 1, \
+ .disable_val = 0, \
+ .enable_time = PMIC_ENABLE_TIME_US, \
+ .of_map_mode = stpmic1_map_mode, \
+ .pull_down_reg = ids##_PULL_DOWN_REG, \
+ .pull_down_mask = ids##_PULL_DOWN_MASK, \
+ .supply_name = #base, \
+}
+
+#define REG_VREF_DDR(ids, base) { \
+ .name = #ids, \
+ .id = STPMIC1_##ids, \
+ .n_voltages = 1, \
+ .ops = &stpmic1_vref_ddr_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 500000, \
+ .fixed_uV = 500000, \
+ .enable_reg = VREF_DDR_ACTIVE_CR, \
+ .enable_mask = BUCK_ENABLE_MASK, \
+ .enable_val = 1, \
+ .disable_val = 0, \
+ .enable_time = PMIC_ENABLE_TIME_US, \
+ .pull_down_reg = ids##_PULL_DOWN_REG, \
+ .pull_down_mask = ids##_PULL_DOWN_MASK, \
+ .supply_name = #base, \
+}
+
+#define REG_SWITCH(ids, base, reg, mask, val) { \
+ .name = #ids, \
+ .id = STPMIC1_##ids, \
+ .n_voltages = 1, \
+ .ops = &stpmic1_switch_regul_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = 0, \
+ .fixed_uV = 5000000, \
+ .enable_reg = (reg), \
+ .enable_mask = (mask), \
+ .enable_val = (val), \
+ .disable_val = 0, \
+ .enable_time = PMIC_ENABLE_TIME_US, \
+ .supply_name = #base, \
+}
+
+struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = {
+ [STPMIC1_BUCK1] = {
+ .desc = REG_BUCK(BUCK1, buck1),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(0),
+ .mask_reset_reg = BUCKS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(0),
+ },
+ [STPMIC1_BUCK2] = {
+ .desc = REG_BUCK(BUCK2, buck2),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(1),
+ .mask_reset_reg = BUCKS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(1),
+ },
+ [STPMIC1_BUCK3] = {
+ .desc = REG_BUCK(BUCK3, buck3),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(2),
+ .mask_reset_reg = BUCKS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(2),
+ },
+ [STPMIC1_BUCK4] = {
+ .desc = REG_BUCK(BUCK4, buck4),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(3),
+ .mask_reset_reg = BUCKS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(3),
+ },
+ [STPMIC1_LDO1] = {
+ .desc = REG_LDO(LDO1, ldo1),
+ .icc_reg = LDOS_ICCTO_CR,
+ .icc_mask = BIT(0),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(0),
+ },
+ [STPMIC1_LDO2] = {
+ .desc = REG_LDO(LDO2, ldo2),
+ .icc_reg = LDOS_ICCTO_CR,
+ .icc_mask = BIT(1),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(1),
+ },
+ [STPMIC1_LDO3] = {
+ .desc = REG_LDO3(LDO3, ldo3),
+ .icc_reg = LDOS_ICCTO_CR,
+ .icc_mask = BIT(2),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(2),
+ },
+ [STPMIC1_LDO4] = {
+ .desc = REG_LDO4(LDO4, ldo4),
+ .icc_reg = LDOS_ICCTO_CR,
+ .icc_mask = BIT(3),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(3),
+ },
+ [STPMIC1_LDO5] = {
+ .desc = REG_LDO(LDO5, ldo5),
+ .icc_reg = LDOS_ICCTO_CR,
+ .icc_mask = BIT(4),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(4),
+ },
+ [STPMIC1_LDO6] = {
+ .desc = REG_LDO(LDO6, ldo6),
+ .icc_reg = LDOS_ICCTO_CR,
+ .icc_mask = BIT(5),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(5),
+ },
+ [STPMIC1_VREF_DDR] = {
+ .desc = REG_VREF_DDR(VREF_DDR, vref_ddr),
+ .mask_reset_reg = LDOS_MASK_RESET_CR,
+ .mask_reset_mask = BIT(6),
+ },
+ [STPMIC1_BOOST] = {
+ .desc = REG_SWITCH(BOOST, boost, BST_SW_CR,
+ BOOST_ENABLED,
+ BOOST_ENABLED),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(6),
+ },
+ [STPMIC1_VBUS_OTG] = {
+ .desc = REG_SWITCH(VBUS_OTG, pwr_sw1, BST_SW_CR,
+ USBSW_OTG_SWITCH_ENABLED,
+ USBSW_OTG_SWITCH_ENABLED),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(4),
+ },
+ [STPMIC1_SW_OUT] = {
+ .desc = REG_SWITCH(SW_OUT, pwr_sw2, BST_SW_CR,
+ SWIN_SWOUT_ENABLED,
+ SWIN_SWOUT_ENABLED),
+ .icc_reg = BUCKS_ICCTO_CR,
+ .icc_mask = BIT(5),
+ },
+};
+
+static unsigned int stpmic1_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case STPMIC1_BUCK_MODE_NORMAL:
+ return REGULATOR_MODE_NORMAL;
+ case STPMIC1_BUCK_MODE_LP:
+ return REGULATOR_MODE_STANDBY;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+static unsigned int stpmic1_get_mode(struct regulator_dev *rdev)
+{
+ int value;
+
+ regmap_read(rdev->regmap, rdev->desc->enable_reg, &value);
+
+ if (value & STPMIC1_BUCK_MODE_LP)
+ return REGULATOR_MODE_STANDBY;
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ int value;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ value = STPMIC1_BUCK_MODE_NORMAL;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ value = STPMIC1_BUCK_MODE_LP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ STPMIC1_BUCK_MODE_LP, value);
+}
+
+static int stpmic1_set_icc(struct regulator_dev *rdev)
+{
+ struct stpmic1_regulator *regul = rdev_get_drvdata(rdev);
+
+ /* enable switch off in case of over current */
+ return regmap_update_bits(regul->regmap, regul->cfg->icc_reg,
+ regul->cfg->icc_mask, regul->cfg->icc_mask);
+}
+
+static irqreturn_t stpmic1_curlim_irq_handler(int irq, void *data)
+{
+ struct regulator_dev *rdev = (struct regulator_dev *)data;
+
+ mutex_lock(&rdev->mutex);
+
+ /* Send an overcurrent notification */
+ regulator_notifier_call_chain(rdev,
+ REGULATOR_EVENT_OVER_CURRENT,
+ NULL);
+
+ mutex_unlock(&rdev->mutex);
+
+ return IRQ_HANDLED;
+}
+
+static int stpmic1_regulator_init(struct platform_device *pdev,
+ struct regulator_dev *rdev)
+{
+ struct stpmic1_regulator *regul = rdev_get_drvdata(rdev);
+ int ret = 0;
+
+ /* set mask reset */
+ if (regul->mask_reset && regul->cfg->mask_reset_reg != 0) {
+ ret = regmap_update_bits(regul->regmap,
+ regul->cfg->mask_reset_reg,
+ regul->cfg->mask_reset_mask,
+ regul->cfg->mask_reset_mask);
+ if (ret) {
+ dev_err(&pdev->dev, "set mask reset failed\n");
+ return ret;
+ }
+ }
+
+ /* setup an irq handler for over-current detection */
+ if (regul->irq_curlim > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev,
+ regul->irq_curlim, NULL,
+ stpmic1_curlim_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ pdev->name, rdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Request IRQ failed\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+#define MATCH(_name, _id) \
+ [STPMIC1_##_id] = { \
+ .name = #_name, \
+ .desc = &stpmic1_regulator_cfgs[STPMIC1_##_id].desc, \
+ }
+
+static struct of_regulator_match stpmic1_regulators_matches[] = {
+ MATCH(buck1, BUCK1),
+ MATCH(buck2, BUCK2),
+ MATCH(buck3, BUCK3),
+ MATCH(buck4, BUCK4),
+ MATCH(ldo1, LDO1),
+ MATCH(ldo2, LDO2),
+ MATCH(ldo3, LDO3),
+ MATCH(ldo4, LDO4),
+ MATCH(ldo5, LDO5),
+ MATCH(ldo6, LDO6),
+ MATCH(vref_ddr, VREF_DDR),
+ MATCH(boost, BOOST),
+ MATCH(pwr_sw1, VBUS_OTG),
+ MATCH(pwr_sw2, SW_OUT),
+};
+
+static int stpmic1_regulator_parse_dt(void *driver_data)
+{
+ struct stpmic1_regulator *regul =
+ (struct stpmic1_regulator *)driver_data;
+
+ if (!regul)
+ return -EINVAL;
+
+ if (of_get_property(regul->reg_node, "st,mask-reset", NULL))
+ regul->mask_reset = 1;
+
+ regul->irq_curlim = of_irq_get(regul->reg_node, 0);
+
+ return 0;
+}
+
+static struct
+regulator_dev *stpmic1_regulator_register(struct platform_device *pdev, int id,
+ struct regulator_init_data *init_data,
+ struct stpmic1_regulator *regul)
+{
+ struct stpmic1 *pmic_dev = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_dev *rdev;
+ struct regulator_config config = {};
+
+ config.dev = &pdev->dev;
+ config.init_data = init_data;
+ config.of_node = stpmic1_regulators_matches[id].of_node;
+ config.regmap = pmic_dev->regmap;
+ config.driver_data = regul;
+
+ regul->regul_id = id;
+ regul->reg_node = config.of_node;
+ regul->cfg = &stpmic1_regulator_cfgs[id];
+ regul->regmap = pmic_dev->regmap;
+
+ rdev = devm_regulator_register(&pdev->dev, &regul->cfg->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s regulator\n",
+ regul->cfg->desc.name);
+ }
+
+ return rdev;
+}
+
+static int stpmic1_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev;
+ struct stpmic1_regulator *regul;
+ struct regulator_init_data *init_data;
+ struct device_node *np;
+ int i, ret;
+
+ np = pdev->dev.of_node;
+
+ ret = of_regulator_match(&pdev->dev, np,
+ stpmic1_regulators_matches,
+ ARRAY_SIZE(stpmic1_regulators_matches));
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Error in PMIC regulator device tree node");
+ return ret;
+ }
+
+ regul = devm_kzalloc(&pdev->dev, ARRAY_SIZE(stpmic1_regulator_cfgs) *
+ sizeof(struct stpmic1_regulator),
+ GFP_KERNEL);
+ if (!regul)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(stpmic1_regulator_cfgs); i++) {
+ /* Parse DT & find regulators to register */
+ init_data = stpmic1_regulators_matches[i].init_data;
+ if (init_data)
+ init_data->regulator_init = &stpmic1_regulator_parse_dt;
+
+ rdev = stpmic1_regulator_register(pdev, i, init_data, regul);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+
+ ret = stpmic1_regulator_init(pdev, rdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to initialize regulator %d\n", ret);
+ return ret;
+ }
+
+ regul++;
+ }
+
+ dev_dbg(&pdev->dev, "stpmic1_regulator driver probed\n");
+
+ return 0;
+}
+
+static const struct of_device_id of_pmic_regulator_match[] = {
+ { .compatible = "st,stpmic1-regulators" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, of_pmic_regulator_match);
+
+static struct platform_driver stpmic1_regulator_driver = {
+ .driver = {
+ .name = "stpmic1-regulator",
+ .of_match_table = of_match_ptr(of_pmic_regulator_match),
+ },
+ .probe = stpmic1_regulator_probe,
+};
+
+module_platform_driver(stpmic1_regulator_driver);
+
+MODULE_DESCRIPTION("STPMIC1 PMIC voltage regulator driver");
+MODULE_AUTHOR("Pascal Paillet <p.paillet@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index edaef9e4dc74..db714d5edafc 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -374,6 +374,7 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
gflags = GPIOD_OUT_HIGH;
else
gflags = GPIOD_OUT_LOW;
+ gflags |= GPIOD_FLAGS_BIT_NONEXCLUSIVE;
rpdata->gpiod = devm_gpiod_get_from_of_node(&pdev->dev,
tps65090_matches[idx].of_node,
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 97d9f08271c5..77911fa8f31d 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -67,6 +67,7 @@ static const struct imx7_src_signal imx7_src_signals[IMX7_RESET_NUM] = {
[IMX7_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR, BIT(2) | BIT(1) },
[IMX7_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
[IMX7_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) },
+ [IMX7_RESET_PCIE_CTRL_APPS_TURNOFF] = { SRC_PCIEPHY_RCR, BIT(11) },
[IMX7_RESET_DDRC_PRST] = { SRC_DDRC_RCR, BIT(0) },
[IMX7_RESET_DDRC_CORE_RST] = { SRC_DDRC_RCR, BIT(1) },
};
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index a23e7d394a0a..5e9ebdb0594c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3309,10 +3309,8 @@ dasd_exit(void)
dasd_proc_exit();
#endif
dasd_eer_exit();
- if (dasd_page_cache != NULL) {
- kmem_cache_destroy(dasd_page_cache);
- dasd_page_cache = NULL;
- }
+ kmem_cache_destroy(dasd_page_cache);
+ dasd_page_cache = NULL;
dasd_gendisk_exit();
dasd_devmap_exit();
if (dasd_debug_area != NULL) {
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 7036a6c6f86f..5542d9eadfe0 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -76,7 +76,7 @@ int dasd_gendisk_alloc(struct dasd_block *block)
gdp->queue = block->request_queue;
block->gdp = gdp;
set_capacity(block->gdp, 0);
- device_add_disk(&base->cdev->dev, block->gdp);
+ device_add_disk(&base->cdev->dev, block->gdp, NULL);
return 0;
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 23e526cda5c1..4e8aedd50cb0 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -685,7 +685,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
}
get_device(&dev_info->dev);
- device_add_disk(&dev_info->dev, dev_info->gd);
+ device_add_disk(&dev_info->dev, dev_info->gd, NULL);
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 98f66b7b6794..e01889394c84 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -500,7 +500,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
/* 512 byte sectors */
set_capacity(bdev->gendisk, scmdev->size >> 9);
- device_add_disk(&scmdev->dev, bdev->gendisk);
+ device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
return 0;
out_queue:
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index c6ab34f94b1b..3072b89785dd 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -11,6 +11,7 @@ endif
GCOV_PROFILE_sclp_early_core.o := n
KCOV_INSTRUMENT_sclp_early_core.o := n
UBSAN_SANITIZE_sclp_early_core.o := n
+KASAN_SANITIZE_sclp_early_core.o := n
CFLAGS_sclp_early_core.o += -D__NO_FORTIFY
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 4f1a69c9d81d..fdc0c0b7a6f5 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -58,22 +58,31 @@ struct mon_private {
static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
{
- struct appldata_product_id id;
+ struct appldata_parameter_list *parm_list;
+ struct appldata_product_id *id;
int rc;
- memcpy(id.prod_nr, "LNXAPPL", 7);
- id.prod_fn = myhdr->applid;
- id.record_nr = myhdr->record_num;
- id.version_nr = myhdr->version;
- id.release_nr = myhdr->release;
- id.mod_lvl = myhdr->mod_level;
- rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
+ id = kmalloc(sizeof(*id), GFP_KERNEL);
+ parm_list = kmalloc(sizeof(*parm_list), GFP_KERNEL);
+ rc = -ENOMEM;
+ if (!id || !parm_list)
+ goto out;
+ memcpy(id->prod_nr, "LNXAPPL", 7);
+ id->prod_fn = myhdr->applid;
+ id->record_nr = myhdr->record_num;
+ id->version_nr = myhdr->version;
+ id->release_nr = myhdr->release;
+ id->mod_lvl = myhdr->mod_level;
+ rc = appldata_asm(parm_list, id, fcn,
+ (void *) buffer, myhdr->datalen);
if (rc <= 0)
- return rc;
+ goto out;
pr_err("Writing monitor data failed with rc=%i\n", rc);
- if (rc == 5)
- return -EPERM;
- return -EINVAL;
+ rc = (rc == 5) ? -EPERM : -EINVAL;
+out:
+ kfree(id);
+ kfree(parm_list);
+ return rc;
}
static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 1fe4918088e7..b3fcc24b1182 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -63,6 +63,9 @@
typedef unsigned int sclp_cmdw_t;
#define SCLP_CMDW_READ_CPU_INFO 0x00010001
+#define SCLP_CMDW_READ_SCP_INFO 0x00020001
+#define SCLP_CMDW_READ_STORAGE_INFO 0x00040001
+#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
@@ -156,6 +159,54 @@ struct read_cpu_info_sccb {
u8 reserved[4096 - 16];
} __attribute__((packed, aligned(PAGE_SIZE)));
+struct read_info_sccb {
+ struct sccb_header header; /* 0-7 */
+ u16 rnmax; /* 8-9 */
+ u8 rnsize; /* 10 */
+ u8 _pad_11[16 - 11]; /* 11-15 */
+ u16 ncpurl; /* 16-17 */
+ u16 cpuoff; /* 18-19 */
+ u8 _pad_20[24 - 20]; /* 20-23 */
+ u8 loadparm[8]; /* 24-31 */
+ u8 _pad_32[42 - 32]; /* 32-41 */
+ u8 fac42; /* 42 */
+ u8 fac43; /* 43 */
+ u8 _pad_44[48 - 44]; /* 44-47 */
+ u64 facilities; /* 48-55 */
+ u8 _pad_56[66 - 56]; /* 56-65 */
+ u8 fac66; /* 66 */
+ u8 _pad_67[76 - 67]; /* 67-83 */
+ u32 ibc; /* 76-79 */
+ u8 _pad80[84 - 80]; /* 80-83 */
+ u8 fac84; /* 84 */
+ u8 fac85; /* 85 */
+ u8 _pad_86[91 - 86]; /* 86-90 */
+ u8 fac91; /* 91 */
+ u8 _pad_92[98 - 92]; /* 92-97 */
+ u8 fac98; /* 98 */
+ u8 hamaxpow; /* 99 */
+ u32 rnsize2; /* 100-103 */
+ u64 rnmax2; /* 104-111 */
+ u32 hsa_size; /* 112-115 */
+ u8 fac116; /* 116 */
+ u8 fac117; /* 117 */
+ u8 fac118; /* 118 */
+ u8 fac119; /* 119 */
+ u16 hcpua; /* 120-121 */
+ u8 _pad_122[124 - 122]; /* 122-123 */
+ u32 hmfai; /* 124-127 */
+ u8 _pad_128[4096 - 128]; /* 128-4095 */
+} __packed __aligned(PAGE_SIZE);
+
+struct read_storage_sccb {
+ struct sccb_header header;
+ u16 max_id;
+ u16 assigned;
+ u16 standby;
+ u16 :16;
+ u32 entries[0];
+} __packed;
+
static inline void sclp_fill_core_info(struct sclp_core_info *info,
struct read_cpu_info_sccb *sccb)
{
@@ -275,6 +326,7 @@ unsigned int sclp_early_con_check_vt220(struct init_sccb *sccb);
int sclp_early_set_event_mask(struct init_sccb *sccb,
sccb_mask_t receive_mask,
sccb_mask_t send_mask);
+int sclp_early_get_info(struct read_info_sccb *info);
/* useful inlines */
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index d7686a68c093..37d42de06079 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -460,15 +460,6 @@ static int sclp_mem_freeze(struct device *dev)
return -EPERM;
}
-struct read_storage_sccb {
- struct sccb_header header;
- u16 max_id;
- u16 assigned;
- u16 standby;
- u16 :16;
- u32 entries[0];
-} __packed;
-
static const struct dev_pm_ops sclp_mem_pm_ops = {
.freeze = sclp_mem_freeze,
};
@@ -498,7 +489,7 @@ static int __init sclp_detect_standby_memory(void)
for (id = 0; id <= sclp_max_storage_id; id++) {
memset(sccb, 0, PAGE_SIZE);
sccb->header.length = PAGE_SIZE;
- rc = sclp_sync_request(0x00040001 | id << 8, sccb);
+ rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 9a74abb9224d..e792cee3b51c 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -15,80 +15,17 @@
#include "sclp_sdias.h"
#include "sclp.h"
-#define SCLP_CMDW_READ_SCP_INFO 0x00020001
-#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
-
-struct read_info_sccb {
- struct sccb_header header; /* 0-7 */
- u16 rnmax; /* 8-9 */
- u8 rnsize; /* 10 */
- u8 _pad_11[16 - 11]; /* 11-15 */
- u16 ncpurl; /* 16-17 */
- u16 cpuoff; /* 18-19 */
- u8 _pad_20[24 - 20]; /* 20-23 */
- u8 loadparm[8]; /* 24-31 */
- u8 _pad_32[42 - 32]; /* 32-41 */
- u8 fac42; /* 42 */
- u8 fac43; /* 43 */
- u8 _pad_44[48 - 44]; /* 44-47 */
- u64 facilities; /* 48-55 */
- u8 _pad_56[66 - 56]; /* 56-65 */
- u8 fac66; /* 66 */
- u8 _pad_67[76 - 67]; /* 67-83 */
- u32 ibc; /* 76-79 */
- u8 _pad80[84 - 80]; /* 80-83 */
- u8 fac84; /* 84 */
- u8 fac85; /* 85 */
- u8 _pad_86[91 - 86]; /* 86-90 */
- u8 fac91; /* 91 */
- u8 _pad_92[98 - 92]; /* 92-97 */
- u8 fac98; /* 98 */
- u8 hamaxpow; /* 99 */
- u32 rnsize2; /* 100-103 */
- u64 rnmax2; /* 104-111 */
- u8 _pad_112[116 - 112]; /* 112-115 */
- u8 fac116; /* 116 */
- u8 fac117; /* 117 */
- u8 fac118; /* 118 */
- u8 fac119; /* 119 */
- u16 hcpua; /* 120-121 */
- u8 _pad_122[124 - 122]; /* 122-123 */
- u32 hmfai; /* 124-127 */
- u8 _pad_128[4096 - 128]; /* 128-4095 */
-} __packed __aligned(PAGE_SIZE);
-
static struct sclp_ipl_info sclp_ipl_info;
struct sclp_info sclp;
EXPORT_SYMBOL(sclp);
-static int __init sclp_early_read_info(struct read_info_sccb *sccb)
-{
- int i;
- sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
- SCLP_CMDW_READ_SCP_INFO};
-
- for (i = 0; i < ARRAY_SIZE(commands); i++) {
- memset(sccb, 0, sizeof(*sccb));
- sccb->header.length = sizeof(*sccb);
- sccb->header.function_code = 0x80;
- sccb->header.control_mask[2] = 0x80;
- if (sclp_early_cmd(commands[i], sccb))
- break;
- if (sccb->header.response_code == 0x10)
- return 0;
- if (sccb->header.response_code != 0x1f0)
- break;
- }
- return -EIO;
-}
-
static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
{
struct sclp_core_entry *cpue;
u16 boot_cpu_address, cpu;
- if (sclp_early_read_info(sccb))
+ if (sclp_early_get_info(sccb))
return;
sclp.facilities = sccb->facilities;
@@ -147,6 +84,8 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp_ipl_info.has_dump = 1;
memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
+ if (sccb->hsa_size)
+ sclp.hsa_size = (sccb->hsa_size - 1) * PAGE_SIZE;
sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0;
sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0;
sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
@@ -189,61 +128,6 @@ int __init sclp_early_get_core_info(struct sclp_core_info *info)
return 0;
}
-static long __init sclp_early_hsa_size_init(struct sdias_sccb *sccb)
-{
- memset(sccb, 0, sizeof(*sccb));
- sccb->hdr.length = sizeof(*sccb);
- sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
- sccb->evbuf.hdr.type = EVTYP_SDIAS;
- sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
- sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
- sccb->evbuf.event_id = 4712;
- sccb->evbuf.dbs = 1;
- if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
- return -EIO;
- if (sccb->hdr.response_code != 0x20)
- return -EIO;
- if (sccb->evbuf.blk_cnt == 0)
- return 0;
- return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
-}
-
-static long __init sclp_early_hsa_copy_wait(struct sdias_sccb *sccb)
-{
- memset(sccb, 0, PAGE_SIZE);
- sccb->hdr.length = PAGE_SIZE;
- if (sclp_early_cmd(SCLP_CMDW_READ_EVENT_DATA, sccb))
- return -EIO;
- if ((sccb->hdr.response_code != 0x20) && (sccb->hdr.response_code != 0x220))
- return -EIO;
- if (sccb->evbuf.blk_cnt == 0)
- return 0;
- return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
-}
-
-static void __init sclp_early_hsa_size_detect(void *sccb)
-{
- unsigned long flags;
- long size = -EIO;
-
- raw_local_irq_save(flags);
- if (sclp_early_set_event_mask(sccb, EVTYP_SDIAS_MASK, EVTYP_SDIAS_MASK))
- goto out;
- size = sclp_early_hsa_size_init(sccb);
- /* First check for synchronous response (LPAR) */
- if (size)
- goto out_mask;
- if (!(S390_lowcore.ext_params & 1))
- sclp_early_wait_irq();
- size = sclp_early_hsa_copy_wait(sccb);
-out_mask:
- sclp_early_set_event_mask(sccb, 0, 0);
-out:
- raw_local_irq_restore(flags);
- if (size > 0)
- sclp.hsa_size = size;
-}
-
static void __init sclp_early_console_detect(struct init_sccb *sccb)
{
if (sccb->header.response_code != 0x20)
@@ -262,7 +146,6 @@ void __init sclp_early_detect(void)
sclp_early_facilities_detect(sccb);
sclp_early_init_core_info(sccb);
- sclp_early_hsa_size_detect(sccb);
/*
* Turn off SCLP event notifications. Also save remote masks in the
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index eceba3858cef..387c114ded3f 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -9,9 +9,13 @@
#include <asm/lowcore.h>
#include <asm/ebcdic.h>
#include <asm/irq.h>
+#include <asm/sections.h>
+#include <asm/mem_detect.h>
#include "sclp.h"
#include "sclp_rw.h"
+static struct read_info_sccb __bootdata(sclp_info_sccb);
+static int __bootdata(sclp_info_sccb_valid);
char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data);
int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
/*
@@ -210,11 +214,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
* Output one or more lines of text on the SCLP console (VT220 and /
* or line-mode).
*/
-void __sclp_early_printk(const char *str, unsigned int len)
+void __sclp_early_printk(const char *str, unsigned int len, unsigned int force)
{
int have_linemode, have_vt220;
- if (sclp_init_state != sclp_init_state_uninitialized)
+ if (!force && sclp_init_state != sclp_init_state_uninitialized)
return;
if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
return;
@@ -227,5 +231,122 @@ void __sclp_early_printk(const char *str, unsigned int len)
void sclp_early_printk(const char *str)
{
- __sclp_early_printk(str, strlen(str));
+ __sclp_early_printk(str, strlen(str), 0);
+}
+
+void sclp_early_printk_force(const char *str)
+{
+ __sclp_early_printk(str, strlen(str), 1);
+}
+
+int __init sclp_early_read_info(void)
+{
+ int i;
+ struct read_info_sccb *sccb = &sclp_info_sccb;
+ sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
+ SCLP_CMDW_READ_SCP_INFO};
+
+ for (i = 0; i < ARRAY_SIZE(commands); i++) {
+ memset(sccb, 0, sizeof(*sccb));
+ sccb->header.length = sizeof(*sccb);
+ sccb->header.function_code = 0x80;
+ sccb->header.control_mask[2] = 0x80;
+ if (sclp_early_cmd(commands[i], sccb))
+ break;
+ if (sccb->header.response_code == 0x10) {
+ sclp_info_sccb_valid = 1;
+ return 0;
+ }
+ if (sccb->header.response_code != 0x1f0)
+ break;
+ }
+ return -EIO;
+}
+
+int __init sclp_early_get_info(struct read_info_sccb *info)
+{
+ if (!sclp_info_sccb_valid)
+ return -EIO;
+
+ *info = sclp_info_sccb;
+ return 0;
+}
+
+int __init sclp_early_get_memsize(unsigned long *mem)
+{
+ unsigned long rnmax;
+ unsigned long rnsize;
+ struct read_info_sccb *sccb = &sclp_info_sccb;
+
+ if (!sclp_info_sccb_valid)
+ return -EIO;
+
+ rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
+ rnsize = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
+ rnsize <<= 20;
+ *mem = rnsize * rnmax;
+ return 0;
+}
+
+int __init sclp_early_get_hsa_size(unsigned long *hsa_size)
+{
+ if (!sclp_info_sccb_valid)
+ return -EIO;
+
+ *hsa_size = 0;
+ if (sclp_info_sccb.hsa_size)
+ *hsa_size = (sclp_info_sccb.hsa_size - 1) * PAGE_SIZE;
+ return 0;
+}
+
+#define SCLP_STORAGE_INFO_FACILITY 0x0000400000000000UL
+
+void __weak __init add_mem_detect_block(u64 start, u64 end) {}
+int __init sclp_early_read_storage_info(void)
+{
+ struct read_storage_sccb *sccb = (struct read_storage_sccb *)&sclp_early_sccb;
+ int rc, id, max_id = 0;
+ unsigned long rn, rzm;
+ sclp_cmdw_t command;
+ u16 sn;
+
+ if (!sclp_info_sccb_valid)
+ return -EIO;
+
+ if (!(sclp_info_sccb.facilities & SCLP_STORAGE_INFO_FACILITY))
+ return -EOPNOTSUPP;
+
+ rzm = sclp_info_sccb.rnsize ?: sclp_info_sccb.rnsize2;
+ rzm <<= 20;
+
+ for (id = 0; id <= max_id; id++) {
+ memset(sclp_early_sccb, 0, sizeof(sclp_early_sccb));
+ sccb->header.length = sizeof(sclp_early_sccb);
+ command = SCLP_CMDW_READ_STORAGE_INFO | (id << 8);
+ rc = sclp_early_cmd(command, sccb);
+ if (rc)
+ goto fail;
+
+ max_id = sccb->max_id;
+ switch (sccb->header.response_code) {
+ case 0x0010:
+ for (sn = 0; sn < sccb->assigned; sn++) {
+ if (!sccb->entries[sn])
+ continue;
+ rn = sccb->entries[sn] >> 16;
+ add_mem_detect_block((rn - 1) * rzm, rn * rzm);
+ }
+ break;
+ case 0x0310:
+ case 0x0410:
+ break;
+ default:
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ mem_detect.count = 0;
+ return -EIO;
}
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
index e7c84a4e5eb5..995e9196852e 100644
--- a/drivers/s390/char/sclp_pci.c
+++ b/drivers/s390/char/sclp_pci.c
@@ -24,6 +24,7 @@
#define SCLP_ATYPE_PCI 2
+#define SCLP_ERRNOTIFY_AQ_RESET 0
#define SCLP_ERRNOTIFY_AQ_REPAIR 1
#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2
@@ -111,9 +112,14 @@ static int sclp_pci_check_report(struct zpci_report_error_header *report)
if (report->version != 1)
return -EINVAL;
- if (report->action != SCLP_ERRNOTIFY_AQ_REPAIR &&
- report->action != SCLP_ERRNOTIFY_AQ_INFO_LOG)
+ switch (report->action) {
+ case SCLP_ERRNOTIFY_AQ_RESET:
+ case SCLP_ERRNOTIFY_AQ_REPAIR:
+ case SCLP_ERRNOTIFY_AQ_INFO_LOG:
+ break;
+ default:
return -EINVAL;
+ }
if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb)))
return -EINVAL;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index cdcde18e7220..4554cdf4d6bd 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -971,7 +971,7 @@ tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb)
snprintf(exception, BUFSIZE, "Data degraded");
break;
case 0x03:
- snprintf(exception, BUFSIZE, "Data degraded in partion %i",
+ snprintf(exception, BUFSIZE, "Data degraded in partition %i",
sense->fmt.f70.mp);
break;
case 0x04:
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 069b9ef08206..58333cb4503f 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -153,7 +153,7 @@ static struct vmlogrdr_priv_t sys_ser[] = {
}
};
-#define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
+#define MAXMINOR ARRAY_SIZE(sys_ser)
static char FENCE[] = {"EOR"};
static int vmlogrdr_major = 0;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 93b2862bd3fa..4ebf6d4fc66c 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -608,6 +608,36 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
}
EXPORT_SYMBOL(ccwgroup_driver_unregister);
+static int __ccwgroupdev_check_busid(struct device *dev, void *id)
+{
+ char *bus_id = id;
+
+ return (strcmp(bus_id, dev_name(dev)) == 0);
+}
+
+/**
+ * get_ccwgroupdev_by_busid() - obtain device from a bus id
+ * @gdrv: driver the device is owned by
+ * @bus_id: bus id of the device to be searched
+ *
+ * This function searches all devices owned by @gdrv for a device with a bus
+ * id matching @bus_id.
+ * Returns:
+ * If a match is found, its reference count of the found device is increased
+ * and it is returned; else %NULL is returned.
+ */
+struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
+ char *bus_id)
+{
+ struct device *dev;
+
+ dev = driver_find_device(&gdrv->driver, NULL, bus_id,
+ __ccwgroupdev_check_busid);
+
+ return dev ? to_ccwgroupdev(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(get_ccwgroupdev_by_busid);
+
/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
* @cdev: ccw device to be probed
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9c7d9da42ba0..9537e656e927 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -595,19 +595,11 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
return 0;
}
-static inline int contains_aobs(struct qdio_q *q)
-{
- return !q->is_input_q && q->u.out.use_cq;
-}
-
static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
{
unsigned char state = 0;
int j, b = start;
- if (!contains_aobs(q))
- return;
-
for (j = 0; j < count; ++j) {
get_buf_state(q, b, &state, 0);
if (state == SLSB_P_OUTPUT_PENDING) {
@@ -618,8 +610,6 @@ static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
q->u.out.sbal_state[b].flags |=
QDIO_OUTBUF_STATE_FLAG_PENDING;
q->u.out.aobs[b] = NULL;
- } else if (state == SLSB_P_OUTPUT_EMPTY) {
- q->u.out.sbal_state[b].aob = NULL;
}
b = next_buf(b);
}
@@ -638,7 +628,6 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
- q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
WARN_ON_ONCE(phys_aob & 0xFF);
@@ -666,10 +655,10 @@ static void qdio_kick_handler(struct qdio_q *q)
qperf_inc(q, outbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
start, count);
+ if (q->u.out.use_cq)
+ qdio_handle_aobs(q, start, count);
}
- qdio_handle_aobs(q, start, count);
-
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
q->irq_ptr->int_parm);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 78f1be41b05e..e324d890a4f6 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -27,7 +27,6 @@ struct qaob *qdio_allocate_aob(void)
{
return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
}
-EXPORT_SYMBOL_GPL(qdio_allocate_aob);
void qdio_release_aob(struct qaob *aob)
{
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index dbe7c7ac9ac8..fd77e46eb3b2 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -163,7 +163,7 @@ static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
for (i = 0; i < pat->pat_nr; i++, pa++)
for (j = 0; j < pa->pa_nr; j++)
- if (pa->pa_iova_pfn[i] == iova_pfn)
+ if (pa->pa_iova_pfn[j] == iova_pfn)
return true;
return false;
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 770fa9cfc310..f47d16b5810b 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -22,6 +22,7 @@
#include "vfio_ccw_private.h"
struct workqueue_struct *vfio_ccw_work_q;
+struct kmem_cache *vfio_ccw_io_region;
/*
* Helpers
@@ -79,7 +80,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
cp_update_scsw(&private->cp, &irb->scsw);
cp_free(&private->cp);
}
- memcpy(private->io_region.irb_area, irb, sizeof(*irb));
+ memcpy(private->io_region->irb_area, irb, sizeof(*irb));
if (private->io_trigger)
eventfd_signal(private->io_trigger, 1);
@@ -114,6 +115,14 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private)
return -ENOMEM;
+
+ private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
+ GFP_KERNEL | GFP_DMA);
+ if (!private->io_region) {
+ kfree(private);
+ return -ENOMEM;
+ }
+
private->sch = sch;
dev_set_drvdata(&sch->dev, private);
@@ -139,6 +148,7 @@ out_disable:
cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private);
return ret;
}
@@ -153,6 +163,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
dev_set_drvdata(&sch->dev, NULL);
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private);
return 0;
@@ -232,10 +243,20 @@ static int __init vfio_ccw_sch_init(void)
if (!vfio_ccw_work_q)
return -ENOMEM;
+ vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
+ sizeof(struct ccw_io_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_io_region), NULL);
+ if (!vfio_ccw_io_region) {
+ destroy_workqueue(vfio_ccw_work_q);
+ return -ENOMEM;
+ }
+
isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) {
isc_unregister(VFIO_CCW_ISC);
+ kmem_cache_destroy(vfio_ccw_io_region);
destroy_workqueue(vfio_ccw_work_q);
}
@@ -246,6 +267,7 @@ static void __exit vfio_ccw_sch_exit(void)
{
css_driver_unregister(&vfio_ccw_sch_driver);
isc_unregister(VFIO_CCW_ISC);
+ kmem_cache_destroy(vfio_ccw_io_region);
destroy_workqueue(vfio_ccw_work_q);
}
module_init(vfio_ccw_sch_init);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 797a82731159..f94aa01f9c36 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -93,13 +93,13 @@ static void fsm_io_error(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
- private->io_region.ret_code = -EIO;
+ private->io_region->ret_code = -EIO;
}
static void fsm_io_busy(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
- private->io_region.ret_code = -EBUSY;
+ private->io_region->ret_code = -EBUSY;
}
static void fsm_disabled_irq(struct vfio_ccw_private *private,
@@ -126,7 +126,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
{
union orb *orb;
union scsw *scsw = &private->scsw;
- struct ccw_io_region *io_region = &private->io_region;
+ struct ccw_io_region *io_region = private->io_region;
struct mdev_device *mdev = private->mdev;
char *errstr = "request";
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 41eeb57d68a3..f673e106c041 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -174,7 +174,7 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
return -EINVAL;
private = dev_get_drvdata(mdev_parent_dev(mdev));
- region = &private->io_region;
+ region = private->io_region;
if (copy_to_user(buf, (void *)region + *ppos, count))
return -EFAULT;
@@ -196,7 +196,7 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
if (private->state != VFIO_CCW_STATE_IDLE)
return -EACCES;
- region = &private->io_region;
+ region = private->io_region;
if (copy_from_user((void *)region + *ppos, buf, count))
return -EFAULT;
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 78a66d96756b..078e46f9623d 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -41,7 +41,7 @@ struct vfio_ccw_private {
atomic_t avail;
struct mdev_device *mdev;
struct notifier_block nb;
- struct ccw_io_region io_region;
+ struct ccw_io_region *io_region;
struct channel_program cp;
struct irb irb;
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index b59af548ed1c..6ccd93d0b1cb 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -10,8 +10,12 @@ zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
-obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
+obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
# pkey kernel module
pkey-objs := pkey_api.o
obj-$(CONFIG_PKEY) += pkey.o
+
+# adjunct processor matrix
+vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o
+obj-$(CONFIG_VFIO_AP) += vfio_ap.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f039266b275d..048665e4f13d 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -65,12 +65,11 @@ static struct device *ap_root_device;
DEFINE_SPINLOCK(ap_list_lock);
LIST_HEAD(ap_card_list);
-/* Default permissions (card and domain masking) */
-static struct ap_perms {
- DECLARE_BITMAP(apm, AP_DEVICES);
- DECLARE_BITMAP(aqm, AP_DOMAINS);
-} ap_perms;
-static DEFINE_MUTEX(ap_perms_mutex);
+/* Default permissions (ioctl, card and domain masking) */
+struct ap_perms ap_perms;
+EXPORT_SYMBOL(ap_perms);
+DEFINE_MUTEX(ap_perms_mutex);
+EXPORT_SYMBOL(ap_perms_mutex);
static struct ap_config_info *ap_configuration;
static bool initialised;
@@ -944,21 +943,9 @@ static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
return 0;
}
-/*
- * process_mask_arg() - parse a bitmap string and clear/set the
- * bits in the bitmap accordingly. The string may be given as
- * absolute value, a hex string like 0x1F2E3D4C5B6A" simple over-
- * writing the current content of the bitmap. Or as relative string
- * like "+1-16,-32,-0x40,+128" where only single bits or ranges of
- * bits are cleared or set. Distinction is done based on the very
- * first character which may be '+' or '-' for the relative string
- * and othewise assume to be an absolute value string. If parsing fails
- * a negative errno value is returned. All arguments and bitmaps are
- * big endian order.
- */
-static int process_mask_arg(const char *str,
- unsigned long *bitmap, int bits,
- struct mutex *lock)
+int ap_parse_mask_str(const char *str,
+ unsigned long *bitmap, int bits,
+ struct mutex *lock)
{
unsigned long *newmap, size;
int rc;
@@ -989,6 +976,7 @@ static int process_mask_arg(const char *str,
kfree(newmap);
return rc;
}
+EXPORT_SYMBOL(ap_parse_mask_str);
/*
* AP bus attributes.
@@ -1049,6 +1037,21 @@ static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
static BUS_ATTR_RO(ap_usage_domain_mask);
+static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
+{
+ if (!ap_configuration) /* QCI not supported */
+ return snprintf(buf, PAGE_SIZE, "not supported\n");
+
+ return snprintf(buf, PAGE_SIZE,
+ "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_configuration->apm[0], ap_configuration->apm[1],
+ ap_configuration->apm[2], ap_configuration->apm[3],
+ ap_configuration->apm[4], ap_configuration->apm[5],
+ ap_configuration->apm[6], ap_configuration->apm[7]);
+}
+
+static BUS_ATTR_RO(ap_adapter_mask);
+
static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n",
@@ -1161,7 +1164,7 @@ static ssize_t apmask_store(struct bus_type *bus, const char *buf,
{
int rc;
- rc = process_mask_arg(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex);
+ rc = ap_parse_mask_str(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex);
if (rc)
return rc;
@@ -1192,7 +1195,7 @@ static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
{
int rc;
- rc = process_mask_arg(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex);
+ rc = ap_parse_mask_str(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex);
if (rc)
return rc;
@@ -1207,6 +1210,7 @@ static struct bus_attribute *const ap_bus_attrs[] = {
&bus_attr_ap_domain,
&bus_attr_ap_control_domain_mask,
&bus_attr_ap_usage_domain_mask,
+ &bus_attr_ap_adapter_mask,
&bus_attr_config_time,
&bus_attr_poll_thread,
&bus_attr_ap_interrupts,
@@ -1218,11 +1222,10 @@ static struct bus_attribute *const ap_bus_attrs[] = {
};
/**
- * ap_select_domain(): Select an AP domain.
- *
- * Pick one of the 16 AP domains.
+ * ap_select_domain(): Select an AP domain if possible and we haven't
+ * already done so before.
*/
-static int ap_select_domain(void)
+static void ap_select_domain(void)
{
int count, max_count, best_domain;
struct ap_queue_status status;
@@ -1237,7 +1240,7 @@ static int ap_select_domain(void)
if (ap_domain_index >= 0) {
/* Domain has already been selected. */
spin_unlock_bh(&ap_domain_lock);
- return 0;
+ return;
}
best_domain = -1;
max_count = 0;
@@ -1264,11 +1267,8 @@ static int ap_select_domain(void)
if (best_domain >= 0) {
ap_domain_index = best_domain;
AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index);
- spin_unlock_bh(&ap_domain_lock);
- return 0;
}
spin_unlock_bh(&ap_domain_lock);
- return -ENODEV;
}
/*
@@ -1346,8 +1346,7 @@ static void ap_scan_bus(struct work_struct *unused)
AP_DBF(DBF_DEBUG, "%s running\n", __func__);
ap_query_configuration(ap_configuration);
- if (ap_select_domain() != 0)
- goto out;
+ ap_select_domain();
for (id = 0; id < AP_DEVICES; id++) {
/* check if device is registered */
@@ -1467,12 +1466,11 @@ static void ap_scan_bus(struct work_struct *unused)
}
} /* end device loop */
- if (defdomdevs < 1)
+ if (ap_domain_index >= 0 && defdomdevs < 1)
AP_DBF(DBF_INFO,
"no queue device with default domain %d available\n",
ap_domain_index);
-out:
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
}
@@ -1496,21 +1494,22 @@ static int __init ap_debug_init(void)
static void __init ap_perms_init(void)
{
/* all resources useable if no kernel parameter string given */
+ memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm));
memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
/* apm kernel parameter string */
if (apm_str) {
memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
- process_mask_arg(apm_str, ap_perms.apm, AP_DEVICES,
- &ap_perms_mutex);
+ ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
+ &ap_perms_mutex);
}
/* aqm kernel parameter string */
if (aqm_str) {
memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
- process_mask_arg(aqm_str, ap_perms.aqm, AP_DOMAINS,
- &ap_perms_mutex);
+ ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
+ &ap_perms_mutex);
}
}
@@ -1533,7 +1532,7 @@ static int __init ap_module_init(void)
return -ENODEV;
}
- /* set up the AP permissions (ap and aq masks) */
+ /* set up the AP permissions (ioctls, ap and aq masks) */
ap_perms_init();
/* Get AP configuration data if available */
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 5246cd8c16a6..3eed1b36c876 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -20,6 +20,7 @@
#define AP_DEVICES 256 /* Number of AP devices. */
#define AP_DOMAINS 256 /* Number of AP domains. */
+#define AP_IOCTLS 256 /* Number of ioctls. */
#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
@@ -257,6 +258,14 @@ void ap_queue_resume(struct ap_device *ap_dev);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
int comp_device_type, unsigned int functions);
+struct ap_perms {
+ unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)];
+ unsigned long apm[BITS_TO_LONGS(AP_DEVICES)];
+ unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)];
+};
+extern struct ap_perms ap_perms;
+extern struct mutex ap_perms_mutex;
+
/*
* check APQN for owned/reserved by ap bus and default driver(s).
* Checks if this APQN is or will be in use by the ap bus
@@ -280,4 +289,20 @@ int ap_owned_by_def_drv(int card, int queue);
int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
unsigned long *aqm);
+/*
+ * ap_parse_mask_str() - helper function to parse a bitmap string
+ * and clear/set the bits in the bitmap accordingly. The string may be
+ * given as absolute value, a hex string like 0x1F2E3D4C5B6A" simple
+ * overwriting the current content of the bitmap. Or as relative string
+ * like "+1-16,-32,-0x40,+128" where only single bits or ranges of
+ * bits are cleared or set. Distinction is done based on the very
+ * first character which may be '+' or '-' for the relative string
+ * and othewise assume to be an absolute value string. If parsing fails
+ * a negative errno value is returned. All arguments and bitmaps are
+ * big endian order.
+ */
+int ap_parse_mask_str(const char *str,
+ unsigned long *bitmap, int bits,
+ struct mutex *lock);
+
#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 1b4001e0285f..2f92bbed4bf6 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -16,9 +16,12 @@
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include <linux/debugfs.h>
+#include <linux/random.h>
+#include <linux/cpufeature.h>
#include <asm/zcrypt.h>
#include <asm/cpacf.h>
#include <asm/pkey.h>
+#include <crypto/aes.h>
#include "zcrypt_api.h"
@@ -32,6 +35,9 @@ MODULE_DESCRIPTION("s390 protected key interface");
/* Size of vardata block used for some of the cca requests/replies */
#define VARDATASIZE 4096
+/* mask of available pckmo subfunctions, fetched once at module init */
+static cpacf_mask_t pckmo_functions;
+
/*
* debug feature data and functions
*/
@@ -55,6 +61,24 @@ static void __exit pkey_debug_exit(void)
debug_unregister(debug_info);
}
+/* Key token types */
+#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */
+#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal key token */
+
+/* For TOKTYPE_NON_CCA: */
+#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */
+
+/* For TOKTYPE_CCA_INTERNAL: */
+#define TOKVER_CCA_AES 0x04 /* CCA AES key token */
+
+/* header part of a key token */
+struct keytoken_header {
+ u8 type; /* one of the TOKTYPE values */
+ u8 res0[3];
+ u8 version; /* one of the TOKVER values */
+ u8 res1[3];
+} __packed;
+
/* inside view of a secure key token (only type 0x01 version 0x04) */
struct secaeskeytoken {
u8 type; /* 0x01 for internal key token */
@@ -71,6 +95,17 @@ struct secaeskeytoken {
u8 tvv[4]; /* token validation value */
} __packed;
+/* inside view of a protected key token (only type 0x00 version 0x01) */
+struct protaeskeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* should be 0x01 for protected AES key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
+ u32 len; /* bytes actually stored in protkey[] */
+ u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
+} __packed;
+
/*
* Simple check if the token is a valid CCA secure AES key
* token. If keybitsize is given, the bitsize of the key is
@@ -80,16 +115,16 @@ static int check_secaeskeytoken(const u8 *token, int keybitsize)
{
struct secaeskeytoken *t = (struct secaeskeytoken *) token;
- if (t->type != 0x01) {
+ if (t->type != TOKTYPE_CCA_INTERNAL) {
DEBUG_ERR(
- "%s secure token check failed, type mismatch 0x%02x != 0x01\n",
- __func__, (int) t->type);
+ "%s secure token check failed, type mismatch 0x%02x != 0x%02x\n",
+ __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
return -EINVAL;
}
- if (t->version != 0x04) {
+ if (t->version != TOKVER_CCA_AES) {
DEBUG_ERR(
- "%s secure token check failed, version mismatch 0x%02x != 0x04\n",
- __func__, (int) t->version);
+ "%s secure token check failed, version mismatch 0x%02x != 0x%02x\n",
+ __func__, (int) t->version, TOKVER_CCA_AES);
return -EINVAL;
}
if (keybitsize > 0 && t->bitsize != keybitsize) {
@@ -647,6 +682,16 @@ int pkey_clr2protkey(u32 keytype,
return -EINVAL;
}
+ /*
+ * Check if the needed pckmo subfunction is available.
+ * These subfunctions can be enabled/disabled by customers
+ * in the LPAR profile or may even change on the fly.
+ */
+ if (!cpacf_test_func(&pckmo_functions, fc)) {
+ DEBUG_ERR("%s pckmo functions not available\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
/* prepare param block */
memset(paramblock, 0, sizeof(paramblock));
memcpy(paramblock, clrkey->clrkey, keysize);
@@ -1052,6 +1097,166 @@ out:
EXPORT_SYMBOL(pkey_verifykey);
/*
+ * Generate a random protected key
+ */
+int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey)
+{
+ struct pkey_clrkey clrkey;
+ int keysize;
+ int rc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ keysize = 16;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ keysize = 24;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ keysize = 32;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
+ keytype);
+ return -EINVAL;
+ }
+
+ /* generate a dummy random clear key */
+ get_random_bytes(clrkey.clrkey, keysize);
+
+ /* convert it to a dummy protected key */
+ rc = pkey_clr2protkey(keytype, &clrkey, protkey);
+ if (rc)
+ return rc;
+
+ /* replace the key part of the protected key with random bytes */
+ get_random_bytes(protkey->protkey, keysize);
+
+ return 0;
+}
+EXPORT_SYMBOL(pkey_genprotkey);
+
+/*
+ * Verify if a protected key is still valid
+ */
+int pkey_verifyprotkey(const struct pkey_protkey *protkey)
+{
+ unsigned long fc;
+ struct {
+ u8 iv[AES_BLOCK_SIZE];
+ u8 key[MAXPROTKEYSIZE];
+ } param;
+ u8 null_msg[AES_BLOCK_SIZE];
+ u8 dest_buf[AES_BLOCK_SIZE];
+ unsigned int k;
+
+ switch (protkey->type) {
+ case PKEY_KEYTYPE_AES_128:
+ fc = CPACF_KMC_PAES_128;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ fc = CPACF_KMC_PAES_192;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ fc = CPACF_KMC_PAES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
+ protkey->type);
+ return -EINVAL;
+ }
+
+ memset(null_msg, 0, sizeof(null_msg));
+
+ memset(param.iv, 0, sizeof(param.iv));
+ memcpy(param.key, protkey->protkey, sizeof(param.key));
+
+ k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
+ sizeof(null_msg));
+ if (k != sizeof(null_msg)) {
+ DEBUG_ERR("%s protected key is not valid\n", __func__);
+ return -EKEYREJECTED;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pkey_verifyprotkey);
+
+/*
+ * Transform a non-CCA key token into a protected key
+ */
+static int pkey_nonccatok2pkey(const __u8 *key, __u32 keylen,
+ struct pkey_protkey *protkey)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct protaeskeytoken *t;
+
+ switch (hdr->version) {
+ case TOKVER_PROTECTED_KEY:
+ if (keylen != sizeof(struct protaeskeytoken))
+ return -EINVAL;
+
+ t = (struct protaeskeytoken *)key;
+ protkey->len = t->len;
+ protkey->type = t->keytype;
+ memcpy(protkey->protkey, t->protkey,
+ sizeof(protkey->protkey));
+
+ return pkey_verifyprotkey(protkey);
+ default:
+ DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
+ __func__, hdr->version);
+ return -EINVAL;
+ }
+}
+
+/*
+ * Transform a CCA internal key token into a protected key
+ */
+static int pkey_ccainttok2pkey(const __u8 *key, __u32 keylen,
+ struct pkey_protkey *protkey)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ switch (hdr->version) {
+ case TOKVER_CCA_AES:
+ if (keylen != sizeof(struct secaeskeytoken))
+ return -EINVAL;
+
+ return pkey_skey2pkey((struct pkey_seckey *)key,
+ protkey);
+ default:
+ DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n",
+ __func__, hdr->version);
+ return -EINVAL;
+ }
+}
+
+/*
+ * Transform a key blob (of any type) into a protected key
+ */
+int pkey_keyblob2pkey(const __u8 *key, __u32 keylen,
+ struct pkey_protkey *protkey)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(struct keytoken_header))
+ return -EINVAL;
+
+ switch (hdr->type) {
+ case TOKTYPE_NON_CCA:
+ return pkey_nonccatok2pkey(key, keylen, protkey);
+ case TOKTYPE_CCA_INTERNAL:
+ return pkey_ccainttok2pkey(key, keylen, protkey);
+ default:
+ DEBUG_ERR("%s unknown/unsupported blob type %d\n", __func__,
+ hdr->type);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(pkey_keyblob2pkey);
+
+/*
* File io functions
*/
@@ -1167,6 +1372,58 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
break;
}
+ case PKEY_GENPROTK: {
+ struct pkey_genprotk __user *ugp = (void __user *) arg;
+ struct pkey_genprotk kgp;
+
+ if (copy_from_user(&kgp, ugp, sizeof(kgp)))
+ return -EFAULT;
+ rc = pkey_genprotkey(kgp.keytype, &kgp.protkey);
+ DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc);
+ if (rc)
+ break;
+ if (copy_to_user(ugp, &kgp, sizeof(kgp)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_VERIFYPROTK: {
+ struct pkey_verifyprotk __user *uvp = (void __user *) arg;
+ struct pkey_verifyprotk kvp;
+
+ if (copy_from_user(&kvp, uvp, sizeof(kvp)))
+ return -EFAULT;
+ rc = pkey_verifyprotkey(&kvp.protkey);
+ DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc);
+ break;
+ }
+ case PKEY_KBLOB2PROTK: {
+ struct pkey_kblob2pkey __user *utp = (void __user *) arg;
+ struct pkey_kblob2pkey ktp;
+ __u8 __user *ukey;
+ __u8 *kkey;
+
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ if (ktp.keylen < MINKEYBLOBSIZE ||
+ ktp.keylen > MAXKEYBLOBSIZE)
+ return -EINVAL;
+ ukey = ktp.key;
+ kkey = kmalloc(ktp.keylen, GFP_KERNEL);
+ if (kkey == NULL)
+ return -ENOMEM;
+ if (copy_from_user(kkey, ukey, ktp.keylen)) {
+ kfree(kkey);
+ return -EFAULT;
+ }
+ rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
+ DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+ kfree(kkey);
+ if (rc)
+ break;
+ if (copy_to_user(utp, &ktp, sizeof(ktp)))
+ return -EFAULT;
+ break;
+ }
default:
/* unknown/unsupported ioctl cmd */
return -ENOTTY;
@@ -1178,6 +1435,236 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
/*
* Sysfs and file io operations
*/
+
+/*
+ * Sysfs attribute read function for all protected key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
+ loff_t off, size_t count)
+{
+ struct protaeskeytoken protkeytoken;
+ struct pkey_protkey protkey;
+ int rc;
+
+ if (off != 0 || count < sizeof(protkeytoken))
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * sizeof(protkeytoken))
+ return -EINVAL;
+
+ memset(&protkeytoken, 0, sizeof(protkeytoken));
+ protkeytoken.type = TOKTYPE_NON_CCA;
+ protkeytoken.version = TOKVER_PROTECTED_KEY;
+ protkeytoken.keytype = keytype;
+
+ rc = pkey_genprotkey(protkeytoken.keytype, &protkey);
+ if (rc)
+ return rc;
+
+ protkeytoken.len = protkey.len;
+ memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
+
+ memcpy(buf, &protkeytoken, sizeof(protkeytoken));
+
+ if (is_xts) {
+ rc = pkey_genprotkey(protkeytoken.keytype, &protkey);
+ if (rc)
+ return rc;
+
+ protkeytoken.len = protkey.len;
+ memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
+
+ memcpy(buf + sizeof(protkeytoken), &protkeytoken,
+ sizeof(protkeytoken));
+
+ return 2 * sizeof(protkeytoken);
+ }
+
+ return sizeof(protkeytoken);
+}
+
+static ssize_t protkey_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
+static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
+
+static struct bin_attribute *protkey_attrs[] = {
+ &bin_attr_protkey_aes_128,
+ &bin_attr_protkey_aes_192,
+ &bin_attr_protkey_aes_256,
+ &bin_attr_protkey_aes_128_xts,
+ &bin_attr_protkey_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group protkey_attr_group = {
+ .name = "protkey",
+ .bin_attrs = protkey_attrs,
+};
+
+/*
+ * Sysfs attribute read function for all secure key ccadata binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
+ loff_t off, size_t count)
+{
+ int rc;
+
+ if (off != 0 || count < sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * sizeof(struct secaeskeytoken))
+ return -EINVAL;
+
+ rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf);
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ buf += sizeof(struct pkey_seckey);
+ rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf);
+ if (rc)
+ return rc;
+
+ return 2 * sizeof(struct secaeskeytoken);
+ }
+
+ return sizeof(struct secaeskeytoken);
+}
+
+static ssize_t ccadata_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
+static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
+
+static struct bin_attribute *ccadata_attrs[] = {
+ &bin_attr_ccadata_aes_128,
+ &bin_attr_ccadata_aes_192,
+ &bin_attr_ccadata_aes_256,
+ &bin_attr_ccadata_aes_128_xts,
+ &bin_attr_ccadata_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ccadata_attr_group = {
+ .name = "ccadata",
+ .bin_attrs = ccadata_attrs,
+};
+
+static const struct attribute_group *pkey_attr_groups[] = {
+ &protkey_attr_group,
+ &ccadata_attr_group,
+ NULL,
+};
+
static const struct file_operations pkey_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
@@ -1190,6 +1677,7 @@ static struct miscdevice pkey_dev = {
.minor = MISC_DYNAMIC_MINOR,
.mode = 0666,
.fops = &pkey_fops,
+ .groups = pkey_attr_groups,
};
/*
@@ -1197,14 +1685,23 @@ static struct miscdevice pkey_dev = {
*/
static int __init pkey_init(void)
{
- cpacf_mask_t pckmo_functions;
+ cpacf_mask_t kmc_functions;
- /* check for pckmo instructions available */
+ /*
+ * The pckmo instruction should be available - even if we don't
+ * actually invoke it. This instruction comes with MSA 3 which
+ * is also the minimum level for the kmc instructions which
+ * are able to work with protected keys.
+ */
if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
return -EOPNOTSUPP;
- if (!cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_128_KEY) ||
- !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_192_KEY) ||
- !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_256_KEY))
+
+ /* check for kmc instructions available */
+ if (!cpacf_query(CPACF_KMC, &kmc_functions))
+ return -EOPNOTSUPP;
+ if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
+ !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
+ !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256))
return -EOPNOTSUPP;
pkey_debug_init();
@@ -1222,5 +1719,5 @@ static void __exit pkey_exit(void)
pkey_debug_exit();
}
-module_init(pkey_init);
+module_cpu_feature_match(MSA, pkey_init);
module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
new file mode 100644
index 000000000000..7667b38728f0
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VFIO based AP device driver
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "vfio_ap_private.h"
+
+#define VFIO_AP_ROOT_NAME "vfio_ap"
+#define VFIO_AP_DEV_TYPE_NAME "ap_matrix"
+#define VFIO_AP_DEV_NAME "matrix"
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
+MODULE_LICENSE("GPL v2");
+
+static struct ap_driver vfio_ap_drv;
+
+static struct device_type vfio_ap_dev_type = {
+ .name = VFIO_AP_DEV_TYPE_NAME,
+};
+
+struct ap_matrix_dev *matrix_dev;
+
+/* Only type 10 adapters (CEX4 and later) are supported
+ * by the AP matrix device driver
+ */
+static struct ap_device_id ap_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX5,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX6,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of sibling */ },
+};
+
+MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
+
+static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
+{
+ return 0;
+}
+
+static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
+{
+ /* Nothing to do yet */
+}
+
+static void vfio_ap_matrix_dev_release(struct device *dev)
+{
+ struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev);
+
+ kfree(matrix_dev);
+}
+
+static int vfio_ap_matrix_dev_create(void)
+{
+ int ret;
+ struct device *root_device;
+
+ root_device = root_device_register(VFIO_AP_ROOT_NAME);
+ if (IS_ERR(root_device))
+ return PTR_ERR(root_device);
+
+ matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL);
+ if (!matrix_dev) {
+ ret = -ENOMEM;
+ goto matrix_alloc_err;
+ }
+
+ /* Fill in config info via PQAP(QCI), if available */
+ if (test_facility(12)) {
+ ret = ap_qci(&matrix_dev->info);
+ if (ret)
+ goto matrix_alloc_err;
+ }
+
+ mutex_init(&matrix_dev->lock);
+ INIT_LIST_HEAD(&matrix_dev->mdev_list);
+
+ matrix_dev->device.type = &vfio_ap_dev_type;
+ dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
+ matrix_dev->device.parent = root_device;
+ matrix_dev->device.release = vfio_ap_matrix_dev_release;
+ matrix_dev->device.driver = &vfio_ap_drv.driver;
+
+ ret = device_register(&matrix_dev->device);
+ if (ret)
+ goto matrix_reg_err;
+
+ return 0;
+
+matrix_reg_err:
+ put_device(&matrix_dev->device);
+matrix_alloc_err:
+ root_device_unregister(root_device);
+
+ return ret;
+}
+
+static void vfio_ap_matrix_dev_destroy(void)
+{
+ device_unregister(&matrix_dev->device);
+ root_device_unregister(matrix_dev->device.parent);
+}
+
+static int __init vfio_ap_init(void)
+{
+ int ret;
+
+ /* If there are no AP instructions, there is nothing to pass through. */
+ if (!ap_instructions_available())
+ return -ENODEV;
+
+ ret = vfio_ap_matrix_dev_create();
+ if (ret)
+ return ret;
+
+ memset(&vfio_ap_drv, 0, sizeof(vfio_ap_drv));
+ vfio_ap_drv.probe = vfio_ap_queue_dev_probe;
+ vfio_ap_drv.remove = vfio_ap_queue_dev_remove;
+ vfio_ap_drv.ids = ap_queue_ids;
+
+ ret = ap_driver_register(&vfio_ap_drv, THIS_MODULE, VFIO_AP_DRV_NAME);
+ if (ret) {
+ vfio_ap_matrix_dev_destroy();
+ return ret;
+ }
+
+ ret = vfio_ap_mdev_register();
+ if (ret) {
+ ap_driver_unregister(&vfio_ap_drv);
+ vfio_ap_matrix_dev_destroy();
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit vfio_ap_exit(void)
+{
+ vfio_ap_mdev_unregister();
+ ap_driver_unregister(&vfio_ap_drv);
+ vfio_ap_matrix_dev_destroy();
+}
+
+module_init(vfio_ap_init);
+module_exit(vfio_ap_exit);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
new file mode 100644
index 000000000000..272ef427dcc0
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -0,0 +1,939 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Adjunct processor matrix VFIO device driver callbacks.
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Halil Pasic <pasic@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
+ */
+#include <linux/string.h>
+#include <linux/vfio.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <asm/kvm.h>
+#include <asm/zcrypt.h>
+
+#include "vfio_ap_private.h"
+
+#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
+#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
+
+static void vfio_ap_matrix_init(struct ap_config_info *info,
+ struct ap_matrix *matrix)
+{
+ matrix->apm_max = info->apxa ? info->Na : 63;
+ matrix->aqm_max = info->apxa ? info->Nd : 15;
+ matrix->adm_max = info->apxa ? info->Nd : 15;
+}
+
+static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
+ return -EPERM;
+
+ matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
+ if (!matrix_mdev) {
+ atomic_inc(&matrix_dev->available_instances);
+ return -ENOMEM;
+ }
+
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
+ mdev_set_drvdata(mdev, matrix_mdev);
+ mutex_lock(&matrix_dev->lock);
+ list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
+ mutex_unlock(&matrix_dev->lock);
+
+ return 0;
+}
+
+static int vfio_ap_mdev_remove(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ mutex_lock(&matrix_dev->lock);
+ list_del(&matrix_mdev->node);
+ mutex_unlock(&matrix_dev->lock);
+
+ kfree(matrix_mdev);
+ mdev_set_drvdata(mdev, NULL);
+ atomic_inc(&matrix_dev->available_instances);
+
+ return 0;
+}
+
+static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
+}
+
+static MDEV_TYPE_ATTR_RO(name);
+
+static ssize_t available_instances_show(struct kobject *kobj,
+ struct device *dev, char *buf)
+{
+ return sprintf(buf, "%d\n",
+ atomic_read(&matrix_dev->available_instances));
+}
+
+static MDEV_TYPE_ATTR_RO(available_instances);
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
+}
+
+static MDEV_TYPE_ATTR_RO(device_api);
+
+static struct attribute *vfio_ap_mdev_type_attrs[] = {
+ &mdev_type_attr_name.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_available_instances.attr,
+ NULL,
+};
+
+static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
+ .name = VFIO_AP_MDEV_TYPE_HWVIRT,
+ .attrs = vfio_ap_mdev_type_attrs,
+};
+
+static struct attribute_group *vfio_ap_mdev_type_groups[] = {
+ &vfio_ap_mdev_hwvirt_type_group,
+ NULL,
+};
+
+struct vfio_ap_queue_reserved {
+ unsigned long *apid;
+ unsigned long *apqi;
+ bool reserved;
+};
+
+/**
+ * vfio_ap_has_queue
+ *
+ * @dev: an AP queue device
+ * @data: a struct vfio_ap_queue_reserved reference
+ *
+ * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
+ * apid or apqi specified in @data:
+ *
+ * - If @data contains both an apid and apqi value, then @data will be flagged
+ * as reserved if the APID and APQI fields for the AP queue device matches
+ *
+ * - If @data contains only an apid value, @data will be flagged as
+ * reserved if the APID field in the AP queue device matches
+ *
+ * - If @data contains only an apqi value, @data will be flagged as
+ * reserved if the APQI field in the AP queue device matches
+ *
+ * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
+ * @data does not contain either an apid or apqi.
+ */
+static int vfio_ap_has_queue(struct device *dev, void *data)
+{
+ struct vfio_ap_queue_reserved *qres = data;
+ struct ap_queue *ap_queue = to_ap_queue(dev);
+ ap_qid_t qid;
+ unsigned long id;
+
+ if (qres->apid && qres->apqi) {
+ qid = AP_MKQID(*qres->apid, *qres->apqi);
+ if (qid == ap_queue->qid)
+ qres->reserved = true;
+ } else if (qres->apid && !qres->apqi) {
+ id = AP_QID_CARD(ap_queue->qid);
+ if (id == *qres->apid)
+ qres->reserved = true;
+ } else if (!qres->apid && qres->apqi) {
+ id = AP_QID_QUEUE(ap_queue->qid);
+ if (id == *qres->apqi)
+ qres->reserved = true;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vfio_ap_verify_queue_reserved
+ *
+ * @matrix_dev: a mediated matrix device
+ * @apid: an AP adapter ID
+ * @apqi: an AP queue index
+ *
+ * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
+ * driver according to the following rules:
+ *
+ * - If both @apid and @apqi are not NULL, then there must be an AP queue
+ * device bound to the vfio_ap driver with the APQN identified by @apid and
+ * @apqi
+ *
+ * - If only @apid is not NULL, then there must be an AP queue device bound
+ * to the vfio_ap driver with an APQN containing @apid
+ *
+ * - If only @apqi is not NULL, then there must be an AP queue device bound
+ * to the vfio_ap driver with an APQN containing @apqi
+ *
+ * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
+ */
+static int vfio_ap_verify_queue_reserved(unsigned long *apid,
+ unsigned long *apqi)
+{
+ int ret;
+ struct vfio_ap_queue_reserved qres;
+
+ qres.apid = apid;
+ qres.apqi = apqi;
+ qres.reserved = false;
+
+ ret = driver_for_each_device(matrix_dev->device.driver, NULL, &qres,
+ vfio_ap_has_queue);
+ if (ret)
+ return ret;
+
+ if (qres.reserved)
+ return 0;
+
+ return -EADDRNOTAVAIL;
+}
+
+static int
+vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ int ret;
+ unsigned long apqi;
+ unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
+
+ if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
+ return vfio_ap_verify_queue_reserved(&apid, NULL);
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
+ ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vfio_ap_mdev_verify_no_sharing
+ *
+ * Verifies that the APQNs derived from the cross product of the AP adapter IDs
+ * and AP queue indexes comprising the AP matrix are not configured for another
+ * mediated device. AP queue sharing is not allowed.
+ *
+ * @matrix_mdev: the mediated matrix device
+ *
+ * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
+ */
+static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
+{
+ struct ap_matrix_mdev *lstdev;
+ DECLARE_BITMAP(apm, AP_DEVICES);
+ DECLARE_BITMAP(aqm, AP_DOMAINS);
+
+ list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
+ if (matrix_mdev == lstdev)
+ continue;
+
+ memset(apm, 0, sizeof(apm));
+ memset(aqm, 0, sizeof(aqm));
+
+ /*
+ * We work on full longs, as we can only exclude the leftover
+ * bits in non-inverse order. The leftover is all zeros.
+ */
+ if (!bitmap_and(apm, matrix_mdev->matrix.apm,
+ lstdev->matrix.apm, AP_DEVICES))
+ continue;
+
+ if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
+ lstdev->matrix.aqm, AP_DOMAINS))
+ continue;
+
+ return -EADDRINUSE;
+ }
+
+ return 0;
+}
+
+/**
+ * assign_adapter_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's assign_adapter attribute
+ * @buf: a buffer containing the AP adapter number (APID) to
+ * be assigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APID from @buf and sets the corresponding bit in the mediated
+ * matrix device's APM.
+ *
+ * Returns the number of bytes processed if the APID is valid; otherwise,
+ * returns one of the following errors:
+ *
+ * 1. -EINVAL
+ * The APID is not a valid number
+ *
+ * 2. -ENODEV
+ * The APID exceeds the maximum value configured for the system
+ *
+ * 3. -EADDRNOTAVAIL
+ * An APQN derived from the cross product of the APID being assigned
+ * and the APQIs previously assigned is not bound to the vfio_ap device
+ * driver; or, if no APQIs have yet been assigned, the APID is not
+ * contained in an APQN bound to the vfio_ap device driver.
+ *
+ * 4. -EADDRINUSE
+ * An APQN derived from the cross product of the APID being assigned
+ * and the APQIs previously assigned is being used by another mediated
+ * matrix device
+ */
+static ssize_t assign_adapter_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apid;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow assignment of adapter */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apid);
+ if (ret)
+ return ret;
+
+ if (apid > matrix_mdev->matrix.apm_max)
+ return -ENODEV;
+
+ /*
+ * Set the bit in the AP mask (APM) corresponding to the AP adapter
+ * number (APID). The bits in the mask, from most significant to least
+ * significant bit, correspond to APIDs 0-255.
+ */
+ mutex_lock(&matrix_dev->lock);
+
+ ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
+ if (ret)
+ goto done;
+
+ set_bit_inv(apid, matrix_mdev->matrix.apm);
+
+ ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
+ if (ret)
+ goto share_err;
+
+ ret = count;
+ goto done;
+
+share_err:
+ clear_bit_inv(apid, matrix_mdev->matrix.apm);
+done:
+ mutex_unlock(&matrix_dev->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(assign_adapter);
+
+/**
+ * unassign_adapter_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's unassign_adapter attribute
+ * @buf: a buffer containing the adapter number (APID) to be unassigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APID from @buf and clears the corresponding bit in the mediated
+ * matrix device's APM.
+ *
+ * Returns the number of bytes processed if the APID is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the APID is not a number
+ * -ENODEV if the APID it exceeds the maximum value configured for the
+ * system
+ */
+static ssize_t unassign_adapter_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apid;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow un-assignment of adapter */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apid);
+ if (ret)
+ return ret;
+
+ if (apid > matrix_mdev->matrix.apm_max)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+ clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(unassign_adapter);
+
+static int
+vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+{
+ int ret;
+ unsigned long apid;
+ unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
+
+ if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
+ return vfio_ap_verify_queue_reserved(NULL, &apqi);
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
+ ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * assign_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's assign_domain attribute
+ * @buf: a buffer containing the AP queue index (APQI) of the domain to
+ * be assigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APQI from @buf and sets the corresponding bit in the mediated
+ * matrix device's AQM.
+ *
+ * Returns the number of bytes processed if the APQI is valid; otherwise returns
+ * one of the following errors:
+ *
+ * 1. -EINVAL
+ * The APQI is not a valid number
+ *
+ * 2. -ENODEV
+ * The APQI exceeds the maximum value configured for the system
+ *
+ * 3. -EADDRNOTAVAIL
+ * An APQN derived from the cross product of the APQI being assigned
+ * and the APIDs previously assigned is not bound to the vfio_ap device
+ * driver; or, if no APIDs have yet been assigned, the APQI is not
+ * contained in an APQN bound to the vfio_ap device driver.
+ *
+ * 4. -EADDRINUSE
+ * An APQN derived from the cross product of the APQI being assigned
+ * and the APIDs previously assigned is being used by another mediated
+ * matrix device
+ */
+static ssize_t assign_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apqi;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
+
+ /* If the guest is running, disallow assignment of domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apqi);
+ if (ret)
+ return ret;
+ if (apqi > max_apqi)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+
+ ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
+ if (ret)
+ goto done;
+
+ set_bit_inv(apqi, matrix_mdev->matrix.aqm);
+
+ ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
+ if (ret)
+ goto share_err;
+
+ ret = count;
+ goto done;
+
+share_err:
+ clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
+done:
+ mutex_unlock(&matrix_dev->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(assign_domain);
+
+
+/**
+ * unassign_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's unassign_domain attribute
+ * @buf: a buffer containing the AP queue index (APQI) of the domain to
+ * be unassigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APQI from @buf and clears the corresponding bit in the
+ * mediated matrix device's AQM.
+ *
+ * Returns the number of bytes processed if the APQI is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the APQI is not a number
+ * -ENODEV if the APQI exceeds the maximum value configured for the system
+ */
+static ssize_t unassign_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apqi;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow un-assignment of domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apqi);
+ if (ret)
+ return ret;
+
+ if (apqi > matrix_mdev->matrix.aqm_max)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+ clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(unassign_domain);
+
+/**
+ * assign_control_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's assign_control_domain attribute
+ * @buf: a buffer containing the domain ID to be assigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the domain ID from @buf and sets the corresponding bit in the mediated
+ * matrix device's ADM.
+ *
+ * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the ID is not a number
+ * -ENODEV if the ID exceeds the maximum value configured for the system
+ */
+static ssize_t assign_control_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long id;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow assignment of control domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &id);
+ if (ret)
+ return ret;
+
+ if (id > matrix_mdev->matrix.adm_max)
+ return -ENODEV;
+
+ /* Set the bit in the ADM (bitmask) corresponding to the AP control
+ * domain number (id). The bits in the mask, from most significant to
+ * least significant, correspond to IDs 0 up to the one less than the
+ * number of control domains that can be assigned.
+ */
+ mutex_lock(&matrix_dev->lock);
+ set_bit_inv(id, matrix_mdev->matrix.adm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(assign_control_domain);
+
+/**
+ * unassign_control_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's unassign_control_domain attribute
+ * @buf: a buffer containing the domain ID to be unassigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the domain ID from @buf and clears the corresponding bit in the
+ * mediated matrix device's ADM.
+ *
+ * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the ID is not a number
+ * -ENODEV if the ID exceeds the maximum value configured for the system
+ */
+static ssize_t unassign_control_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long domid;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long max_domid = matrix_mdev->matrix.adm_max;
+
+ /* If the guest is running, disallow un-assignment of control domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &domid);
+ if (ret)
+ return ret;
+ if (domid > max_domid)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+ clear_bit_inv(domid, matrix_mdev->matrix.adm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(unassign_control_domain);
+
+static ssize_t control_domains_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ unsigned long id;
+ int nchars = 0;
+ int n;
+ char *bufpos = buf;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long max_domid = matrix_mdev->matrix.adm_max;
+
+ mutex_lock(&matrix_dev->lock);
+ for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
+ n = sprintf(bufpos, "%04lx\n", id);
+ bufpos += n;
+ nchars += n;
+ }
+ mutex_unlock(&matrix_dev->lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(control_domains);
+
+static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ char *bufpos = buf;
+ unsigned long apid;
+ unsigned long apqi;
+ unsigned long apid1;
+ unsigned long apqi1;
+ unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
+ unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
+ int nchars = 0;
+ int n;
+
+ apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
+ apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
+
+ mutex_lock(&matrix_dev->lock);
+
+ if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ naqm_bits) {
+ n = sprintf(bufpos, "%02lx.%04lx\n", apid,
+ apqi);
+ bufpos += n;
+ nchars += n;
+ }
+ }
+ } else if (apid1 < napm_bits) {
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
+ n = sprintf(bufpos, "%02lx.\n", apid);
+ bufpos += n;
+ nchars += n;
+ }
+ } else if (apqi1 < naqm_bits) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
+ n = sprintf(bufpos, ".%04lx\n", apqi);
+ bufpos += n;
+ nchars += n;
+ }
+ }
+
+ mutex_unlock(&matrix_dev->lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(matrix);
+
+static struct attribute *vfio_ap_mdev_attrs[] = {
+ &dev_attr_assign_adapter.attr,
+ &dev_attr_unassign_adapter.attr,
+ &dev_attr_assign_domain.attr,
+ &dev_attr_unassign_domain.attr,
+ &dev_attr_assign_control_domain.attr,
+ &dev_attr_unassign_control_domain.attr,
+ &dev_attr_control_domains.attr,
+ &dev_attr_matrix.attr,
+ NULL,
+};
+
+static struct attribute_group vfio_ap_mdev_attr_group = {
+ .attrs = vfio_ap_mdev_attrs
+};
+
+static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
+ &vfio_ap_mdev_attr_group,
+ NULL
+};
+
+/**
+ * vfio_ap_mdev_set_kvm
+ *
+ * @matrix_mdev: a mediated matrix device
+ * @kvm: reference to KVM instance
+ *
+ * Verifies no other mediated matrix device has @kvm and sets a reference to
+ * it in @matrix_mdev->kvm.
+ *
+ * Return 0 if no other mediated matrix device has a reference to @kvm;
+ * otherwise, returns an -EPERM.
+ */
+static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
+ struct kvm *kvm)
+{
+ struct ap_matrix_mdev *m;
+
+ mutex_lock(&matrix_dev->lock);
+
+ list_for_each_entry(m, &matrix_dev->mdev_list, node) {
+ if ((m != matrix_mdev) && (m->kvm == kvm)) {
+ mutex_unlock(&matrix_dev->lock);
+ return -EPERM;
+ }
+ }
+
+ matrix_mdev->kvm = kvm;
+ mutex_unlock(&matrix_dev->lock);
+
+ return 0;
+}
+
+static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int ret;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if (action != VFIO_GROUP_NOTIFY_SET_KVM)
+ return NOTIFY_OK;
+
+ matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+
+ if (!data) {
+ matrix_mdev->kvm = NULL;
+ return NOTIFY_OK;
+ }
+
+ ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
+ if (ret)
+ return NOTIFY_DONE;
+
+ /* If there is no CRYCB pointer, then we can't copy the masks */
+ if (!matrix_mdev->kvm->arch.crypto.crycbd)
+ return NOTIFY_DONE;
+
+ kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm,
+ matrix_mdev->matrix.adm);
+
+ return NOTIFY_OK;
+}
+
+static int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+ unsigned int retry)
+{
+ struct ap_queue_status status;
+
+ do {
+ status = ap_zapq(AP_MKQID(apid, apqi));
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ return 0;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ default:
+ /* things are really broken, give up */
+ return -EIO;
+ }
+ } while (retry--);
+
+ return -EBUSY;
+}
+
+static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
+{
+ int ret;
+ int rc = 0;
+ unsigned long apid, apqi;
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.apm_max + 1) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ matrix_mdev->matrix.aqm_max + 1) {
+ ret = vfio_ap_mdev_reset_queue(apid, apqi, 1);
+ /*
+ * Regardless whether a queue turns out to be busy, or
+ * is not operational, we need to continue resetting
+ * the remaining queues.
+ */
+ if (ret)
+ rc = ret;
+ }
+ }
+
+ return rc;
+}
+
+static int vfio_ap_mdev_open(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long events;
+ int ret;
+
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
+ events = VFIO_GROUP_NOTIFY_SET_KVM;
+
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &events, &matrix_mdev->group_notifier);
+ if (ret) {
+ module_put(THIS_MODULE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vfio_ap_mdev_release(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ if (matrix_mdev->kvm)
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+
+ vfio_ap_mdev_reset_queues(mdev);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &matrix_mdev->group_notifier);
+ matrix_mdev->kvm = NULL;
+ module_put(THIS_MODULE);
+}
+
+static int vfio_ap_mdev_get_device_info(unsigned long arg)
+{
+ unsigned long minsz;
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
+ info.num_regions = 0;
+ info.num_irqs = 0;
+
+ return copy_to_user((void __user *)arg, &info, minsz);
+}
+
+static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ switch (cmd) {
+ case VFIO_DEVICE_GET_INFO:
+ ret = vfio_ap_mdev_get_device_info(arg);
+ break;
+ case VFIO_DEVICE_RESET:
+ ret = vfio_ap_mdev_reset_queues(mdev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct mdev_parent_ops vfio_ap_matrix_ops = {
+ .owner = THIS_MODULE,
+ .supported_type_groups = vfio_ap_mdev_type_groups,
+ .mdev_attr_groups = vfio_ap_mdev_attr_groups,
+ .create = vfio_ap_mdev_create,
+ .remove = vfio_ap_mdev_remove,
+ .open = vfio_ap_mdev_open,
+ .release = vfio_ap_mdev_release,
+ .ioctl = vfio_ap_mdev_ioctl,
+};
+
+int vfio_ap_mdev_register(void)
+{
+ atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
+
+ return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
+}
+
+void vfio_ap_mdev_unregister(void)
+{
+ mdev_unregister_device(&matrix_dev->device);
+}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
new file mode 100644
index 000000000000..5675492233c7
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Private data and functions for adjunct processor VFIO matrix driver.
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Halil Pasic <pasic@linux.ibm.com>
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#ifndef _VFIO_AP_PRIVATE_H_
+#define _VFIO_AP_PRIVATE_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/mdev.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+
+#include "ap_bus.h"
+
+#define VFIO_AP_MODULE_NAME "vfio_ap"
+#define VFIO_AP_DRV_NAME "vfio_ap"
+
+/**
+ * ap_matrix_dev - the AP matrix device structure
+ * @device: generic device structure associated with the AP matrix device
+ * @available_instances: number of mediated matrix devices that can be created
+ * @info: the struct containing the output from the PQAP(QCI) instruction
+ * mdev_list: the list of mediated matrix devices created
+ * lock: mutex for locking the AP matrix device. This lock will be
+ * taken every time we fiddle with state managed by the vfio_ap
+ * driver, be it using @mdev_list or writing the state of a
+ * single ap_matrix_mdev device. It's quite coarse but we don't
+ * expect much contention.
+ */
+struct ap_matrix_dev {
+ struct device device;
+ atomic_t available_instances;
+ struct ap_config_info info;
+ struct list_head mdev_list;
+ struct mutex lock;
+};
+
+extern struct ap_matrix_dev *matrix_dev;
+
+/**
+ * The AP matrix is comprised of three bit masks identifying the adapters,
+ * queues (domains) and control domains that belong to an AP matrix. The bits i
+ * each mask, from least significant to most significant bit, correspond to IDs
+ * 0 to 255. When a bit is set, the corresponding ID belongs to the matrix.
+ *
+ * @apm_max: max adapter number in @apm
+ * @apm identifies the AP adapters in the matrix
+ * @aqm_max: max domain number in @aqm
+ * @aqm identifies the AP queues (domains) in the matrix
+ * @adm_max: max domain number in @adm
+ * @adm identifies the AP control domains in the matrix
+ */
+struct ap_matrix {
+ unsigned long apm_max;
+ DECLARE_BITMAP(apm, 256);
+ unsigned long aqm_max;
+ DECLARE_BITMAP(aqm, 256);
+ unsigned long adm_max;
+ DECLARE_BITMAP(adm, 256);
+};
+
+/**
+ * struct ap_matrix_mdev - the mediated matrix device structure
+ * @list: allows the ap_matrix_mdev struct to be added to a list
+ * @matrix: the adapters, usage domains and control domains assigned to the
+ * mediated matrix device.
+ * @group_notifier: notifier block used for specifying callback function for
+ * handling the VFIO_GROUP_NOTIFY_SET_KVM event
+ * @kvm: the struct holding guest's state
+ */
+struct ap_matrix_mdev {
+ struct list_head node;
+ struct ap_matrix matrix;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+};
+
+extern int vfio_ap_mdev_register(void);
+extern void vfio_ap_mdev_unregister(void);
+
+#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index e6854127b434..eb93c2d27d0a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * zcrypt 2.1.0
- *
- * Copyright IBM Corp. 2001, 2012
+ * Copyright IBM Corp. 2001, 2018
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
* Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -11,6 +9,7 @@
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
+ * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
*/
#include <linux/module.h>
@@ -24,6 +23,8 @@
#include <linux/uaccess.h>
#include <linux/hw_random.h>
#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/ctype.h>
#include <asm/debug.h>
#define CREATE_TRACE_POINTS
@@ -108,6 +109,375 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
}
EXPORT_SYMBOL(zcrypt_msgtype);
+/*
+ * Multi device nodes extension functions.
+ */
+
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+
+struct zcdn_device;
+
+static struct class *zcrypt_class;
+static dev_t zcrypt_devt;
+static struct cdev zcrypt_cdev;
+
+struct zcdn_device {
+ struct device device;
+ struct ap_perms perms;
+};
+
+#define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
+
+#define ZCDN_MAX_NAME 32
+
+static int zcdn_create(const char *name);
+static int zcdn_destroy(const char *name);
+
+/* helper function, matches the name for find_zcdndev_by_name() */
+static int __match_zcdn_name(struct device *dev, const void *data)
+{
+ return strcmp(dev_name(dev), (const char *)data) == 0;
+}
+
+/* helper function, matches the devt value for find_zcdndev_by_devt() */
+static int __match_zcdn_devt(struct device *dev, const void *data)
+{
+ return dev->devt == *((dev_t *) data);
+}
+
+/*
+ * Find zcdn device by name.
+ * Returns reference to the zcdn device which needs to be released
+ * with put_device() after use.
+ */
+static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
+{
+ struct device *dev =
+ class_find_device(zcrypt_class, NULL,
+ (void *) name,
+ __match_zcdn_name);
+
+ return dev ? to_zcdn_dev(dev) : NULL;
+}
+
+/*
+ * Find zcdn device by devt value.
+ * Returns reference to the zcdn device which needs to be released
+ * with put_device() after use.
+ */
+static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
+{
+ struct device *dev =
+ class_find_device(zcrypt_class, NULL,
+ (void *) &devt,
+ __match_zcdn_devt);
+
+ return dev ? to_zcdn_dev(dev) : NULL;
+}
+
+static ssize_t ioctlmask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+
+ buf[0] = '0';
+ buf[1] = 'x';
+ for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
+ snprintf(buf + 2 + 2 * i * sizeof(long),
+ PAGE_SIZE - 2 - 2 * i * sizeof(long),
+ "%016lx", zcdndev->perms.ioctlm[i]);
+ buf[2 + 2 * i * sizeof(long)] = '\n';
+ buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+ rc = 2 + 2 * i * sizeof(long) + 1;
+
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+
+static ssize_t ioctlmask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
+ AP_IOCTLS, &ap_perms_mutex);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(ioctlmask);
+
+static ssize_t apmask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+
+ buf[0] = '0';
+ buf[1] = 'x';
+ for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
+ snprintf(buf + 2 + 2 * i * sizeof(long),
+ PAGE_SIZE - 2 - 2 * i * sizeof(long),
+ "%016lx", zcdndev->perms.apm[i]);
+ buf[2 + 2 * i * sizeof(long)] = '\n';
+ buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+ rc = 2 + 2 * i * sizeof(long) + 1;
+
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+
+static ssize_t apmask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
+ AP_DEVICES, &ap_perms_mutex);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(apmask);
+
+static ssize_t aqmask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+
+ buf[0] = '0';
+ buf[1] = 'x';
+ for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
+ snprintf(buf + 2 + 2 * i * sizeof(long),
+ PAGE_SIZE - 2 - 2 * i * sizeof(long),
+ "%016lx", zcdndev->perms.aqm[i]);
+ buf[2 + 2 * i * sizeof(long)] = '\n';
+ buf[2 + 2 * i * sizeof(long) + 1] = '\0';
+ rc = 2 + 2 * i * sizeof(long) + 1;
+
+ mutex_unlock(&ap_perms_mutex);
+
+ return rc;
+}
+
+static ssize_t aqmask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
+ AP_DOMAINS, &ap_perms_mutex);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(aqmask);
+
+static struct attribute *zcdn_dev_attrs[] = {
+ &dev_attr_ioctlmask.attr,
+ &dev_attr_apmask.attr,
+ &dev_attr_aqmask.attr,
+ NULL
+};
+
+static struct attribute_group zcdn_dev_attr_group = {
+ .attrs = zcdn_dev_attrs
+};
+
+static const struct attribute_group *zcdn_dev_attr_groups[] = {
+ &zcdn_dev_attr_group,
+ NULL
+};
+
+static ssize_t zcdn_create_store(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ char name[ZCDN_MAX_NAME];
+
+ strncpy(name, skip_spaces(buf), sizeof(name));
+ name[sizeof(name) - 1] = '\0';
+
+ rc = zcdn_create(strim(name));
+
+ return rc ? rc : count;
+}
+
+static const struct class_attribute class_attr_zcdn_create =
+ __ATTR(create, 0600, NULL, zcdn_create_store);
+
+static ssize_t zcdn_destroy_store(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ char name[ZCDN_MAX_NAME];
+
+ strncpy(name, skip_spaces(buf), sizeof(name));
+ name[sizeof(name) - 1] = '\0';
+
+ rc = zcdn_destroy(strim(name));
+
+ return rc ? rc : count;
+}
+
+static const struct class_attribute class_attr_zcdn_destroy =
+ __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
+
+static void zcdn_device_release(struct device *dev)
+{
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ ZCRYPT_DBF(DBF_INFO, "releasing zcdn device %d:%d\n",
+ MAJOR(dev->devt), MINOR(dev->devt));
+
+ kfree(zcdndev);
+}
+
+static int zcdn_create(const char *name)
+{
+ dev_t devt;
+ int i, rc = 0;
+ char nodename[ZCDN_MAX_NAME];
+ struct zcdn_device *zcdndev;
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+
+ /* check if device node with this name already exists */
+ if (name[0]) {
+ zcdndev = find_zcdndev_by_name(name);
+ if (zcdndev) {
+ put_device(&zcdndev->device);
+ rc = -EEXIST;
+ goto unlockout;
+ }
+ }
+
+ /* find an unused minor number */
+ for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
+ devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
+ zcdndev = find_zcdndev_by_devt(devt);
+ if (zcdndev)
+ put_device(&zcdndev->device);
+ else
+ break;
+ }
+ if (i == ZCRYPT_MAX_MINOR_NODES) {
+ rc = -ENOSPC;
+ goto unlockout;
+ }
+
+ /* alloc and prepare a new zcdn device */
+ zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL);
+ if (!zcdndev) {
+ rc = -ENOMEM;
+ goto unlockout;
+ }
+ zcdndev->device.release = zcdn_device_release;
+ zcdndev->device.class = zcrypt_class;
+ zcdndev->device.devt = devt;
+ zcdndev->device.groups = zcdn_dev_attr_groups;
+ if (name[0])
+ strncpy(nodename, name, sizeof(nodename));
+ else
+ snprintf(nodename, sizeof(nodename),
+ ZCRYPT_NAME "_%d", (int) MINOR(devt));
+ nodename[sizeof(nodename)-1] = '\0';
+ if (dev_set_name(&zcdndev->device, nodename)) {
+ rc = -EINVAL;
+ goto unlockout;
+ }
+ rc = device_register(&zcdndev->device);
+ if (rc) {
+ put_device(&zcdndev->device);
+ goto unlockout;
+ }
+
+ ZCRYPT_DBF(DBF_INFO, "created zcdn device %d:%d\n",
+ MAJOR(devt), MINOR(devt));
+
+unlockout:
+ mutex_unlock(&ap_perms_mutex);
+ return rc;
+}
+
+static int zcdn_destroy(const char *name)
+{
+ int rc = 0;
+ struct zcdn_device *zcdndev;
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+
+ /* try to find this zcdn device */
+ zcdndev = find_zcdndev_by_name(name);
+ if (!zcdndev) {
+ rc = -ENOENT;
+ goto unlockout;
+ }
+
+ /*
+ * The zcdn device is not hard destroyed. It is subject to
+ * reference counting and thus just needs to be unregistered.
+ */
+ put_device(&zcdndev->device);
+ device_unregister(&zcdndev->device);
+
+unlockout:
+ mutex_unlock(&ap_perms_mutex);
+ return rc;
+}
+
+static void zcdn_destroy_all(void)
+{
+ int i;
+ dev_t devt;
+ struct zcdn_device *zcdndev;
+
+ mutex_lock(&ap_perms_mutex);
+ for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
+ devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
+ zcdndev = find_zcdndev_by_devt(devt);
+ if (zcdndev) {
+ put_device(&zcdndev->device);
+ device_unregister(&zcdndev->device);
+ }
+ }
+ mutex_unlock(&ap_perms_mutex);
+}
+
+#endif
+
/**
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
*
@@ -137,6 +507,23 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
*/
static int zcrypt_open(struct inode *inode, struct file *filp)
{
+ struct ap_perms *perms = &ap_perms;
+
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+ if (filp->f_inode->i_cdev == &zcrypt_cdev) {
+ struct zcdn_device *zcdndev;
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+ zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
+ /* find returns a reference, no get_device() needed */
+ mutex_unlock(&ap_perms_mutex);
+ if (zcdndev)
+ perms = &zcdndev->perms;
+ }
+#endif
+ filp->private_data = (void *) perms;
+
atomic_inc(&zcrypt_open_count);
return nonseekable_open(inode, filp);
}
@@ -148,10 +535,55 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
*/
static int zcrypt_release(struct inode *inode, struct file *filp)
{
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+ if (filp->f_inode->i_cdev == &zcrypt_cdev) {
+ struct zcdn_device *zcdndev;
+
+ if (mutex_lock_interruptible(&ap_perms_mutex))
+ return -ERESTARTSYS;
+ zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
+ mutex_unlock(&ap_perms_mutex);
+ if (zcdndev) {
+ /* 2 puts here: one for find, one for open */
+ put_device(&zcdndev->device);
+ put_device(&zcdndev->device);
+ }
+ }
+#endif
+
atomic_dec(&zcrypt_open_count);
return 0;
}
+static inline int zcrypt_check_ioctl(struct ap_perms *perms,
+ unsigned int cmd)
+{
+ int rc = -EPERM;
+ int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
+
+ if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
+ if (test_bit_inv(ioctlnr, perms->ioctlm))
+ rc = 0;
+ }
+
+ if (rc)
+ ZCRYPT_DBF(DBF_WARN,
+ "ioctl check failed: ioctlnr=0x%04x rc=%d\n",
+ ioctlnr, rc);
+
+ return rc;
+}
+
+static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
+{
+ return test_bit_inv(card, perms->apm) ? true : false;
+}
+
+static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
+{
+ return test_bit_inv(queue, perms->aqm) ? true : false;
+}
+
static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
unsigned int weight)
@@ -213,7 +645,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
/*
* zcrypt ioctls.
*/
-static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
+static long zcrypt_rsa_modexpo(struct ap_perms *perms,
+ struct ica_rsa_modexpo *mex)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
@@ -250,6 +683,9 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
if (zc->min_mod_size > mex->inputdatalength ||
zc->max_mod_size < mex->inputdatalength)
continue;
+ /* check if device node has admission for this card */
+ if (!zcrypt_check_card(perms, zc->card->id))
+ continue;
/* get weight index of the card device */
weight = zc->speed_rating[func_code];
if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
@@ -258,6 +694,10 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
/* check if device is online and eligible */
if (!zq->online || !zq->ops->rsa_modexpo)
continue;
+ /* check if device node has admission for this queue */
+ if (!zcrypt_check_queue(perms,
+ AP_QID_QUEUE(zq->queue->qid)))
+ continue;
if (zcrypt_queue_compare(zq, pref_zq,
weight, pref_weight))
continue;
@@ -287,7 +727,8 @@ out:
return rc;
}
-static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
+static long zcrypt_rsa_crt(struct ap_perms *perms,
+ struct ica_rsa_modexpo_crt *crt)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
@@ -324,6 +765,9 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
if (zc->min_mod_size > crt->inputdatalength ||
zc->max_mod_size < crt->inputdatalength)
continue;
+ /* check if device node has admission for this card */
+ if (!zcrypt_check_card(perms, zc->card->id))
+ continue;
/* get weight index of the card device */
weight = zc->speed_rating[func_code];
if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
@@ -332,6 +776,10 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
/* check if device is online and eligible */
if (!zq->online || !zq->ops->rsa_modexpo_crt)
continue;
+ /* check if device node has admission for this queue */
+ if (!zcrypt_check_queue(perms,
+ AP_QID_QUEUE(zq->queue->qid)))
+ continue;
if (zcrypt_queue_compare(zq, pref_zq,
weight, pref_weight))
continue;
@@ -361,7 +809,8 @@ out:
return rc;
}
-long zcrypt_send_cprb(struct ica_xcRB *xcRB)
+static long _zcrypt_send_cprb(struct ap_perms *perms,
+ struct ica_xcRB *xcRB)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
@@ -373,6 +822,7 @@ long zcrypt_send_cprb(struct ica_xcRB *xcRB)
trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
+ xcRB->status = 0;
ap_init_message(&ap_msg);
rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
if (rc)
@@ -389,6 +839,9 @@ long zcrypt_send_cprb(struct ica_xcRB *xcRB)
if (xcRB->user_defined != AUTOSELECT &&
xcRB->user_defined != zc->card->id)
continue;
+ /* check if device node has admission for this card */
+ if (!zcrypt_check_card(perms, zc->card->id))
+ continue;
/* get weight index of the card device */
weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
@@ -400,6 +853,10 @@ long zcrypt_send_cprb(struct ica_xcRB *xcRB)
((*domain != (unsigned short) AUTOSELECT) &&
(*domain != AP_QID_QUEUE(zq->queue->qid))))
continue;
+ /* check if device node has admission for this queue */
+ if (!zcrypt_check_queue(perms,
+ AP_QID_QUEUE(zq->queue->qid)))
+ continue;
if (zcrypt_queue_compare(zq, pref_zq,
weight, pref_weight))
continue;
@@ -433,6 +890,11 @@ out:
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
+
+long zcrypt_send_cprb(struct ica_xcRB *xcRB)
+{
+ return _zcrypt_send_cprb(&ap_perms, xcRB);
+}
EXPORT_SYMBOL(zcrypt_send_cprb);
static bool is_desired_ep11_card(unsigned int dev_id,
@@ -459,7 +921,8 @@ static bool is_desired_ep11_queue(unsigned int dev_qid,
return false;
}
-static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
+ struct ep11_urb *xcrb)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
@@ -510,6 +973,9 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
if (targets &&
!is_desired_ep11_card(zc->card->id, target_num, targets))
continue;
+ /* check if device node has admission for this card */
+ if (!zcrypt_check_card(perms, zc->card->id))
+ continue;
/* get weight index of the card device */
weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
@@ -522,6 +988,10 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
!is_desired_ep11_queue(zq->queue->qid,
target_num, targets)))
continue;
+ /* check if device node has admission for this queue */
+ if (!zcrypt_check_queue(perms,
+ AP_QID_QUEUE(zq->queue->qid)))
+ continue;
if (zcrypt_queue_compare(zq, pref_zq,
weight, pref_weight))
continue;
@@ -788,7 +1258,13 @@ static int zcrypt_requestq_count(void)
static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
- int rc = 0;
+ int rc;
+ struct ap_perms *perms =
+ (struct ap_perms *) filp->private_data;
+
+ rc = zcrypt_check_ioctl(perms, cmd);
+ if (rc)
+ return rc;
switch (cmd) {
case ICARSAMODEXPO: {
@@ -798,12 +1274,12 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&mex, umex, sizeof(mex)))
return -EFAULT;
do {
- rc = zcrypt_rsa_modexpo(&mex);
+ rc = zcrypt_rsa_modexpo(perms, &mex);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_rsa_modexpo(&mex);
+ rc = zcrypt_rsa_modexpo(perms, &mex);
} while (rc == -EAGAIN);
if (rc) {
ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
@@ -818,12 +1294,12 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&crt, ucrt, sizeof(crt)))
return -EFAULT;
do {
- rc = zcrypt_rsa_crt(&crt);
+ rc = zcrypt_rsa_crt(perms, &crt);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_rsa_crt(&crt);
+ rc = zcrypt_rsa_crt(perms, &crt);
} while (rc == -EAGAIN);
if (rc) {
ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
@@ -838,15 +1314,16 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
return -EFAULT;
do {
- rc = zcrypt_send_cprb(&xcRB);
+ rc = _zcrypt_send_cprb(perms, &xcRB);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_send_cprb(&xcRB);
+ rc = _zcrypt_send_cprb(perms, &xcRB);
} while (rc == -EAGAIN);
if (rc)
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc);
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
+ rc, xcRB.status);
if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
return -EFAULT;
return rc;
@@ -858,12 +1335,12 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
do {
- rc = zcrypt_send_ep11_cprb(&xcrb);
+ rc = zcrypt_send_ep11_cprb(perms, &xcrb);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_send_ep11_cprb(&xcrb);
+ rc = zcrypt_send_ep11_cprb(perms, &xcrb);
} while (rc == -EAGAIN);
if (rc)
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
@@ -989,8 +1466,8 @@ struct compat_ica_rsa_modexpo {
compat_uptr_t n_modulus;
};
-static long trans_modexpo32(struct file *filp, unsigned int cmd,
- unsigned long arg)
+static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
+ unsigned int cmd, unsigned long arg)
{
struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
struct compat_ica_rsa_modexpo mex32;
@@ -1006,12 +1483,12 @@ static long trans_modexpo32(struct file *filp, unsigned int cmd,
mex64.b_key = compat_ptr(mex32.b_key);
mex64.n_modulus = compat_ptr(mex32.n_modulus);
do {
- rc = zcrypt_rsa_modexpo(&mex64);
+ rc = zcrypt_rsa_modexpo(perms, &mex64);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_rsa_modexpo(&mex64);
+ rc = zcrypt_rsa_modexpo(perms, &mex64);
} while (rc == -EAGAIN);
if (rc)
return rc;
@@ -1031,8 +1508,8 @@ struct compat_ica_rsa_modexpo_crt {
compat_uptr_t u_mult_inv;
};
-static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
- unsigned long arg)
+static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
+ unsigned int cmd, unsigned long arg)
{
struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
struct compat_ica_rsa_modexpo_crt crt32;
@@ -1051,12 +1528,12 @@ static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
crt64.nq_prime = compat_ptr(crt32.nq_prime);
crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
do {
- rc = zcrypt_rsa_crt(&crt64);
+ rc = zcrypt_rsa_crt(perms, &crt64);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_rsa_crt(&crt64);
+ rc = zcrypt_rsa_crt(perms, &crt64);
} while (rc == -EAGAIN);
if (rc)
return rc;
@@ -1084,8 +1561,8 @@ struct compat_ica_xcRB {
unsigned int status;
} __packed;
-static long trans_xcRB32(struct file *filp, unsigned int cmd,
- unsigned long arg)
+static long trans_xcRB32(struct ap_perms *perms, struct file *filp,
+ unsigned int cmd, unsigned long arg)
{
struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
struct compat_ica_xcRB xcRB32;
@@ -1115,12 +1592,12 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd,
xcRB64.priority_window = xcRB32.priority_window;
xcRB64.status = xcRB32.status;
do {
- rc = zcrypt_send_cprb(&xcRB64);
+ rc = _zcrypt_send_cprb(perms, &xcRB64);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_send_cprb(&xcRB64);
+ rc = _zcrypt_send_cprb(perms, &xcRB64);
} while (rc == -EAGAIN);
xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
xcRB32.reply_data_length = xcRB64.reply_data_length;
@@ -1133,12 +1610,20 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd,
static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
+ int rc;
+ struct ap_perms *perms =
+ (struct ap_perms *) filp->private_data;
+
+ rc = zcrypt_check_ioctl(perms, cmd);
+ if (rc)
+ return rc;
+
if (cmd == ICARSAMODEXPO)
- return trans_modexpo32(filp, cmd, arg);
+ return trans_modexpo32(perms, filp, cmd, arg);
if (cmd == ICARSACRT)
- return trans_modexpo_crt32(filp, cmd, arg);
+ return trans_modexpo_crt32(perms, filp, cmd, arg);
if (cmd == ZSECSENDCPRB)
- return trans_xcRB32(filp, cmd, arg);
+ return trans_xcRB32(perms, filp, cmd, arg);
return zcrypt_unlocked_ioctl(filp, cmd, arg);
}
#endif
@@ -1256,6 +1741,67 @@ void zcrypt_debug_exit(void)
debug_unregister(zcrypt_dbf_info);
}
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+
+static int __init zcdn_init(void)
+{
+ int rc;
+
+ /* create a new class 'zcrypt' */
+ zcrypt_class = class_create(THIS_MODULE, ZCRYPT_NAME);
+ if (IS_ERR(zcrypt_class)) {
+ rc = PTR_ERR(zcrypt_class);
+ goto out_class_create_failed;
+ }
+ zcrypt_class->dev_release = zcdn_device_release;
+
+ /* alloc device minor range */
+ rc = alloc_chrdev_region(&zcrypt_devt,
+ 0, ZCRYPT_MAX_MINOR_NODES,
+ ZCRYPT_NAME);
+ if (rc)
+ goto out_alloc_chrdev_failed;
+
+ cdev_init(&zcrypt_cdev, &zcrypt_fops);
+ zcrypt_cdev.owner = THIS_MODULE;
+ rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
+ if (rc)
+ goto out_cdev_add_failed;
+
+ /* need some class specific sysfs attributes */
+ rc = class_create_file(zcrypt_class, &class_attr_zcdn_create);
+ if (rc)
+ goto out_class_create_file_1_failed;
+ rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy);
+ if (rc)
+ goto out_class_create_file_2_failed;
+
+ return 0;
+
+out_class_create_file_2_failed:
+ class_remove_file(zcrypt_class, &class_attr_zcdn_create);
+out_class_create_file_1_failed:
+ cdev_del(&zcrypt_cdev);
+out_cdev_add_failed:
+ unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
+out_alloc_chrdev_failed:
+ class_destroy(zcrypt_class);
+out_class_create_failed:
+ return rc;
+}
+
+static void zcdn_exit(void)
+{
+ class_remove_file(zcrypt_class, &class_attr_zcdn_create);
+ class_remove_file(zcrypt_class, &class_attr_zcdn_destroy);
+ zcdn_destroy_all();
+ cdev_del(&zcrypt_cdev);
+ unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
+ class_destroy(zcrypt_class);
+}
+
+#endif
+
/**
* zcrypt_api_init(): Module initialization.
*
@@ -1269,15 +1815,27 @@ int __init zcrypt_api_init(void)
if (rc)
goto out;
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+ rc = zcdn_init();
+ if (rc)
+ goto out;
+#endif
+
/* Register the request sprayer. */
rc = misc_register(&zcrypt_misc_device);
if (rc < 0)
- goto out;
+ goto out_misc_register_failed;
zcrypt_msgtype6_init();
zcrypt_msgtype50_init();
+
return 0;
+out_misc_register_failed:
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+ zcdn_exit();
+#endif
+ zcrypt_debug_exit();
out:
return rc;
}
@@ -1289,6 +1847,9 @@ out:
*/
void __exit zcrypt_api_exit(void)
{
+#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+ zcdn_exit();
+#endif
misc_deregister(&zcrypt_misc_device);
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index a848625c1a5a..af67a768a3fc 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -1,8 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * zcrypt 2.1.0
- *
- * Copyright IBM Corp. 2001, 2012
+ * Copyright IBM Corp. 2001, 2018
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
* Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -22,17 +20,8 @@
#include "ap_bus.h"
/**
- * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2,
- * PCIXCC_MCL3, CEX2C, or CEX2A
- *
- * NOTE: PCIXCC_MCL3 refers to a PCIXCC with May 2004 version of Licensed
- * Internal Code (LIC) (EC J12220 level 29).
- * PCIXCC_MCL2 refers to any LIC before this level.
+ * Supported device types
*/
-#define ZCRYPT_PCICA 1
-#define ZCRYPT_PCICC 2
-#define ZCRYPT_PCIXCC_MCL2 3
-#define ZCRYPT_PCIXCC_MCL3 4
#define ZCRYPT_CEX2C 5
#define ZCRYPT_CEX2A 6
#define ZCRYPT_CEX3C 7
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index 40cd4c1c2de8..d4f35a183c15 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * zcrypt 2.1.0
- *
* Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index e5b5c02c9d67..f09bb850763b 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * zcrypt 2.1.0
- *
* Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index f4ae5fa30ec9..146f54f5cbb8 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * zcrypt 2.1.0
- *
* Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
@@ -43,8 +41,8 @@
#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \
- "Copyright IBM Corp. 2001, 2012");
+MODULE_DESCRIPTION("CEX2A/CEX3A Cryptographic Coprocessor device driver, " \
+ "Copyright IBM Corp. 2001, 2018");
MODULE_LICENSE("GPL");
static struct ap_device_id zcrypt_cex2a_card_ids[] = {
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
index 66d58bc87c66..7842214d9d09 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.h
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * zcrypt 2.1.0
- *
* Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
@@ -14,7 +12,7 @@
#define _ZCRYPT_CEX2A_H_
/**
- * The type 50 message family is associated with a CEX2A card.
+ * The type 50 message family is associated with CEXxA cards.
*
* The four members of the family are described below.
*
@@ -111,7 +109,7 @@ struct type50_crb3_msg {
} __packed;
/**
- * The type 80 response family is associated with a CEX2A card.
+ * The type 80 response family is associated with a CEXxA cards.
*
* Note that all unsigned char arrays are right-justified and left-padded
* with zeroes.
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 94d9f7224aea..546f67676734 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * zcrypt 2.1.0
- *
- * Copyright IBM Corp. 2001, 2012
+ * Copyright IBM Corp. 2001, 2018
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -25,39 +23,22 @@
#include "zcrypt_api.h"
#include "zcrypt_error.h"
#include "zcrypt_msgtype6.h"
-#include "zcrypt_pcixcc.h"
+#include "zcrypt_cex2c.h"
#include "zcrypt_cca_key.h"
-#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
-#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
-#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
-#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
+#define CEX2C_MIN_MOD_SIZE 16 /* 128 bits */
+#define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */
+#define CEX3C_MIN_MOD_SIZE 16 /* 128 bits */
#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
-
-#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
-#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
-
-#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
-
-#define PCIXCC_CLEANUP_TIME (15*HZ)
-
-#define CEIL4(x) ((((x)+3)/4)*4)
-
-struct response_type {
- struct completion work;
- int type;
-};
-#define PCIXCC_RESPONSE_TYPE_ICA 0
-#define PCIXCC_RESPONSE_TYPE_XCRB 1
+#define CEX2C_MAX_XCRB_MESSAGE_SIZE (12*1024)
+#define CEX2C_CLEANUP_TIME (15*HZ)
MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \
- "Copyright IBM Corp. 2001, 2012");
+MODULE_DESCRIPTION("CEX2C/CEX3C Cryptographic Coprocessor device driver, " \
+ "Copyright IBM Corp. 2001, 2018");
MODULE_LICENSE("GPL");
-static struct ap_device_id zcrypt_pcixcc_card_ids[] = {
- { .dev_type = AP_DEVICE_TYPE_PCIXCC,
- .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+static struct ap_device_id zcrypt_cex2c_card_ids[] = {
{ .dev_type = AP_DEVICE_TYPE_CEX2C,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX3C,
@@ -65,11 +46,9 @@ static struct ap_device_id zcrypt_pcixcc_card_ids[] = {
{ /* end of list */ },
};
-MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_card_ids);
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_card_ids);
-static struct ap_device_id zcrypt_pcixcc_queue_ids[] = {
- { .dev_type = AP_DEVICE_TYPE_PCIXCC,
- .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+static struct ap_device_id zcrypt_cex2c_queue_ids[] = {
{ .dev_type = AP_DEVICE_TYPE_CEX2C,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX3C,
@@ -77,16 +56,16 @@ static struct ap_device_id zcrypt_pcixcc_queue_ids[] = {
{ /* end of list */ },
};
-MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_queue_ids);
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_queue_ids);
/**
- * Large random number detection function. Its sends a message to a pcixcc
+ * Large random number detection function. Its sends a message to a CEX2C/CEX3C
* card to find out if large random numbers are supported.
* @ap_dev: pointer to the AP device.
*
* Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
*/
-static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq)
+static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
{
struct ap_message ap_msg;
unsigned long long psmid;
@@ -147,13 +126,11 @@ out_free:
}
/**
- * Probe function for PCIXCC/CEX2C card devices. It always accepts the
- * AP device since the bus_match already checked the hardware type. The
- * PCIXCC cards come in two flavours: micro code level 2 and micro code
- * level 3. This is checked by sending a test message to the device.
+ * Probe function for CEX2C/CEX3C card devices. It always accepts the
+ * AP device since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP card device.
*/
-static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev)
+static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
{
/*
* Normalized speed ratings per crypto adapter
@@ -179,9 +156,9 @@ static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev)
zc->type_string = "CEX2C";
memcpy(zc->speed_rating, CEX2C_SPEED_IDX,
sizeof(CEX2C_SPEED_IDX));
- zc->min_mod_size = PCIXCC_MIN_MOD_SIZE;
- zc->max_mod_size = PCIXCC_MAX_MOD_SIZE;
- zc->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
+ zc->min_mod_size = CEX2C_MIN_MOD_SIZE;
+ zc->max_mod_size = CEX2C_MAX_MOD_SIZE;
+ zc->max_exp_bit_length = CEX2C_MAX_MOD_SIZE;
break;
case AP_DEVICE_TYPE_CEX3C:
zc->user_space_type = ZCRYPT_CEX3C;
@@ -208,10 +185,10 @@ static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev)
}
/**
- * This is called to remove the PCIXCC/CEX2C card driver information
+ * This is called to remove the CEX2C/CEX3C card driver information
* if an AP card device is removed.
*/
-static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev)
+static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
{
struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
@@ -219,33 +196,31 @@ static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev)
zcrypt_card_unregister(zc);
}
-static struct ap_driver zcrypt_pcixcc_card_driver = {
- .probe = zcrypt_pcixcc_card_probe,
- .remove = zcrypt_pcixcc_card_remove,
- .ids = zcrypt_pcixcc_card_ids,
+static struct ap_driver zcrypt_cex2c_card_driver = {
+ .probe = zcrypt_cex2c_card_probe,
+ .remove = zcrypt_cex2c_card_remove,
+ .ids = zcrypt_cex2c_card_ids,
.flags = AP_DRIVER_FLAG_DEFAULT,
};
/**
- * Probe function for PCIXCC/CEX2C queue devices. It always accepts the
- * AP device since the bus_match already checked the hardware type. The
- * PCIXCC cards come in two flavours: micro code level 2 and micro code
- * level 3. This is checked by sending a test message to the device.
+ * Probe function for CEX2C/CEX3C queue devices. It always accepts the
+ * AP device since the bus_match already checked the hardware type.
* @ap_dev: pointer to the AP card device.
*/
-static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev)
+static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
{
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq;
int rc;
- zq = zcrypt_queue_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
+ zq = zcrypt_queue_alloc(CEX2C_MAX_XCRB_MESSAGE_SIZE);
if (!zq)
return -ENOMEM;
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
- rc = zcrypt_pcixcc_rng_supported(aq);
+ rc = zcrypt_cex2c_rng_supported(aq);
if (rc < 0) {
zcrypt_queue_free(zq);
return rc;
@@ -257,7 +232,7 @@ static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev)
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_NORNG);
ap_queue_init_reply(aq, &zq->reply);
- aq->request_timeout = PCIXCC_CLEANUP_TIME,
+ aq->request_timeout = CEX2C_CLEANUP_TIME;
aq->private = zq;
rc = zcrypt_queue_register(zq);
if (rc) {
@@ -268,10 +243,10 @@ static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev)
}
/**
- * This is called to remove the PCIXCC/CEX2C queue driver information
+ * This is called to remove the CEX2C/CEX3C queue driver information
* if an AP queue device is removed.
*/
-static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev)
+static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
{
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq = aq->private;
@@ -281,37 +256,37 @@ static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev)
zcrypt_queue_unregister(zq);
}
-static struct ap_driver zcrypt_pcixcc_queue_driver = {
- .probe = zcrypt_pcixcc_queue_probe,
- .remove = zcrypt_pcixcc_queue_remove,
+static struct ap_driver zcrypt_cex2c_queue_driver = {
+ .probe = zcrypt_cex2c_queue_probe,
+ .remove = zcrypt_cex2c_queue_remove,
.suspend = ap_queue_suspend,
.resume = ap_queue_resume,
- .ids = zcrypt_pcixcc_queue_ids,
+ .ids = zcrypt_cex2c_queue_ids,
.flags = AP_DRIVER_FLAG_DEFAULT,
};
-int __init zcrypt_pcixcc_init(void)
+int __init zcrypt_cex2c_init(void)
{
int rc;
- rc = ap_driver_register(&zcrypt_pcixcc_card_driver,
- THIS_MODULE, "pcixcccard");
+ rc = ap_driver_register(&zcrypt_cex2c_card_driver,
+ THIS_MODULE, "cex2card");
if (rc)
return rc;
- rc = ap_driver_register(&zcrypt_pcixcc_queue_driver,
- THIS_MODULE, "pcixccqueue");
+ rc = ap_driver_register(&zcrypt_cex2c_queue_driver,
+ THIS_MODULE, "cex2cqueue");
if (rc)
- ap_driver_unregister(&zcrypt_pcixcc_card_driver);
+ ap_driver_unregister(&zcrypt_cex2c_card_driver);
return rc;
}
-void zcrypt_pcixcc_exit(void)
+void zcrypt_cex2c_exit(void)
{
- ap_driver_unregister(&zcrypt_pcixcc_queue_driver);
- ap_driver_unregister(&zcrypt_pcixcc_card_driver);
+ ap_driver_unregister(&zcrypt_cex2c_queue_driver);
+ ap_driver_unregister(&zcrypt_cex2c_card_driver);
}
-module_init(zcrypt_pcixcc_init);
-module_exit(zcrypt_pcixcc_exit);
+module_init(zcrypt_cex2c_init);
+module_exit(zcrypt_cex2c_exit);
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_cex2c.h
index cf73a0f91e9c..6ec405c2bec2 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.h
+++ b/drivers/s390/crypto/zcrypt_cex2c.h
@@ -1,8 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * zcrypt 2.1.0
- *
- * Copyright IBM Corp. 2001, 2012
+ * Copyright IBM Corp. 2001, 2018
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -11,10 +9,10 @@
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*/
-#ifndef _ZCRYPT_PCIXCC_H_
-#define _ZCRYPT_PCIXCC_H_
+#ifndef _ZCRYPT_CEX2C_H_
+#define _ZCRYPT_CEX2C_H_
-int zcrypt_pcixcc_init(void);
-void zcrypt_pcixcc_exit(void);
+int zcrypt_cex2c_init(void);
+void zcrypt_cex2c_exit(void);
-#endif /* _ZCRYPT_PCIXCC_H_ */
+#endif /* _ZCRYPT_CEX2C_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 35d58dbbc4da..f9d4c6c7521d 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -37,8 +37,8 @@
#define CEX4_CLEANUP_TIME (900*HZ)
MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \
- "Copyright IBM Corp. 2012");
+MODULE_DESCRIPTION("CEX4/CEX5/CEX6 Cryptographic Card device driver, " \
+ "Copyright IBM Corp. 2018");
MODULE_LICENSE("GPL");
static struct ap_device_id zcrypt_cex4_card_ids[] = {
@@ -66,8 +66,9 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
/**
- * Probe function for CEX4 card device. It always accepts the AP device
- * since the bus_match already checked the hardware type.
+ * Probe function for CEX4/CEX5/CEX6 card device. It always
+ * accepts the AP device since the bus_match already checked
+ * the hardware type.
* @ap_dev: pointer to the AP device.
*/
static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
@@ -199,7 +200,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
}
/**
- * This is called to remove the CEX4 card driver information
+ * This is called to remove the CEX4/CEX5/CEX6 card driver information
* if an AP card device is removed.
*/
static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
@@ -218,8 +219,9 @@ static struct ap_driver zcrypt_cex4_card_driver = {
};
/**
- * Probe function for CEX4 queue device. It always accepts the AP device
- * since the bus_match already checked the hardware type.
+ * Probe function for CEX4/CEX5/CEX6 queue device. It always
+ * accepts the AP device since the bus_match already checked
+ * the hardware type.
* @ap_dev: pointer to the AP device.
*/
static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
@@ -265,8 +267,8 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
}
/**
- * This is called to remove the CEX4 queue driver information
- * if an AP queue device is removed.
+ * This is called to remove the CEX4/CEX5/CEX6 queue driver
+ * information if an AP queue device is removed.
*/
static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
{
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 6f7ebc1dbe10..240b27f3f5f6 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * zcrypt 2.1.0
- *
* Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
@@ -16,6 +14,7 @@
#include <linux/atomic.h>
#include "zcrypt_debug.h"
#include "zcrypt_api.h"
+#include "zcrypt_msgtype6.h"
/**
* Reply Messages
@@ -114,6 +113,27 @@ static inline int convert_error(struct zcrypt_queue *zq,
card, queue, ehdr->reply_code);
return -EAGAIN;
case REP82_ERROR_TRANSPORT_FAIL:
+ /* Card or infrastructure failure, disable card */
+ atomic_set(&zcrypt_rescan_req, 1);
+ zq->online = 0;
+ pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ card, queue);
+ /* For type 86 response show the apfs value (failure reason) */
+ if (ehdr->type == TYPE86_RSP_CODE) {
+ struct {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ } __packed * head = reply->message;
+ unsigned int apfs = *((u32 *)head->fmt2.apfs);
+
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x reply=0x%02x apfs=0x%x => online=0 rc=EAGAIN\n",
+ card, queue, apfs, ehdr->reply_code);
+ } else
+ ZCRYPT_DBF(DBF_ERR,
+ "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+ card, queue, ehdr->reply_code);
+ return -EAGAIN;
case REP82_ERROR_MACHINE_FAILURE:
// REP88_ERROR_MODULE_FAILURE // '10' CEX2A
/* If a card fails disable it and repeat the request. */
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index f159662c907b..fc4295b3d801 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * zcrypt 2.1.0
- *
* Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
@@ -27,13 +25,13 @@
#include "zcrypt_error.h"
#include "zcrypt_msgtype50.h"
-/* 4096 bits */
+/* >= CEX3A: 4096 bits */
#define CEX3A_MAX_MOD_SIZE 512
-/* max outputdatalength + type80_hdr */
+/* CEX2A: max outputdatalength + type80_hdr */
#define CEX2A_MAX_RESPONSE_SIZE 0x110
-/* 512 bit modulus, (max outputdatalength) + type80_hdr */
+/* >= CEX3A: 512 bit modulus, (max outputdatalength) + type80_hdr */
#define CEX3A_MAX_RESPONSE_SIZE 0x210
MODULE_AUTHOR("IBM Corporation");
@@ -42,7 +40,7 @@ MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
MODULE_LICENSE("GPL");
/**
- * The type 50 message family is associated with a CEX2A card.
+ * The type 50 message family is associated with a CEXxA cards.
*
* The four members of the family are described below.
*
@@ -139,7 +137,7 @@ struct type50_crb3_msg {
} __packed;
/**
- * The type 80 response family is associated with a CEX2A card.
+ * The type 80 response family is associated with a CEXxA cards.
*
* Note that all unsigned char arrays are right-justified and left-padded
* with zeroes.
@@ -273,7 +271,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
/*
* CEX2A and CEX3A w/o FW update can handle requests up to
* 256 byte modulus (2k keys).
- * CEX3A with FW update and CEX4A cards are able to handle
+ * CEX3A with FW update and newer CEXxA cards are able to handle
* 512 byte modulus (4k keys).
*/
if (mod_len <= 128) { /* up to 1024 bit key size */
@@ -356,7 +354,7 @@ static int convert_type80(struct zcrypt_queue *zq,
unsigned char *data;
if (t80h->len < sizeof(*t80h) + outputdatalength) {
- /* The result is too short, the CEX2A card may not do that.. */
+ /* The result is too short, the CEXxA card may not do that.. */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
AP_QID_CARD(zq->queue->qid),
@@ -447,10 +445,10 @@ out:
static atomic_t zcrypt_step = ATOMIC_INIT(0);
/**
- * The request distributor calls this function if it picked the CEX2A
+ * The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
- * CEX2A device to the request distributor
+ * CEXxA device to the request distributor
* @mex: pointer to the modexpo request buffer
*/
static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
@@ -493,10 +491,10 @@ out_free:
}
/**
- * The request distributor calls this function if it picked the CEX2A
+ * The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
- * CEX2A device to the request distributor
+ * CEXxA device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
*/
static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
index 8530f652ea4f..66bec4f45c56 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * zcrypt 2.1.0
- *
* Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 2101776a8148..0cbcc238ef98 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * zcrypt 2.1.0
- *
* Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
@@ -29,8 +27,7 @@
#include "zcrypt_msgtype6.h"
#include "zcrypt_cca_key.h"
-#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
-#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
+#define CEXXC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
#define CEIL4(x) ((((x)+3)/4)*4)
@@ -38,9 +35,9 @@ struct response_type {
struct completion work;
int type;
};
-#define PCIXCC_RESPONSE_TYPE_ICA 0
-#define PCIXCC_RESPONSE_TYPE_XCRB 1
-#define PCIXCC_RESPONSE_TYPE_EP11 2
+#define CEXXC_RESPONSE_TYPE_ICA 0
+#define CEXXC_RESPONSE_TYPE_XCRB 1
+#define CEXXC_RESPONSE_TYPE_EP11 2
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
@@ -111,7 +108,7 @@ struct function_and_rules_block {
} __packed;
/**
- * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
+ * The following is used to initialize the CPRBX passed to the CEXxC/CEXxP
* card in a type6 message. The 3 fields that must be filled in at execution
* time are req_parml, rpl_parml and usage_domain.
* Everything about this interface is ascii/big-endian, since the
@@ -294,7 +291,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
/* message header, cprbx and f&r */
msg->hdr = static_type6_hdrX;
msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
- msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+ msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
@@ -364,7 +361,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
/* message header, cprbx and f&r */
msg->hdr = static_type6_hdrX;
msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
- msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+ msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
@@ -658,16 +655,6 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
(int) service_rc, (int) service_rs);
return -EINVAL;
}
- if (service_rc == 8 && service_rs == 783) {
- zq->zcard->min_mod_size =
- PCIXCC_MIN_MOD_SIZE_OLD;
- ZCRYPT_DBF(DBF_DEBUG,
- "device=%02x.%04x rc/rs=%d/%d => rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) service_rc, (int) service_rs);
- return -EAGAIN;
- }
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
AP_QID_CARD(zq->queue->qid),
@@ -697,7 +684,7 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
if (pad_len > 0) {
if (pad_len < 10)
return -EINVAL;
- /* 'restore' padding left in the PCICC/PCIXCC card. */
+ /* 'restore' padding left in the CEXXC card. */
if (copy_to_user(outputdata, static_pad, pad_len - 1))
return -EFAULT;
if (put_user(0, outputdata + pad_len - 1))
@@ -955,13 +942,13 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
if (t86r->hdr.type == TYPE86_RSP_CODE &&
t86r->cprbx.cprb_ver_id == 0x02) {
switch (resp_type->type) {
- case PCIXCC_RESPONSE_TYPE_ICA:
+ case CEXXC_RESPONSE_TYPE_ICA:
length = sizeof(struct type86x_reply)
+ t86r->length - 2;
- length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
+ length = min(CEXXC_MAX_ICA_RESPONSE_SIZE, length);
memcpy(msg->message, reply->message, length);
break;
- case PCIXCC_RESPONSE_TYPE_XCRB:
+ case CEXXC_RESPONSE_TYPE_XCRB:
length = t86r->fmt2.offset2 + t86r->fmt2.count2;
length = min(MSGTYPE06_MAX_MSG_SIZE, length);
memcpy(msg->message, reply->message, length);
@@ -1004,7 +991,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
if (t86r->hdr.type == TYPE86_RSP_CODE &&
t86r->cprbx.cprb_ver_id == 0x04) {
switch (resp_type->type) {
- case PCIXCC_RESPONSE_TYPE_EP11:
+ case CEXXC_RESPONSE_TYPE_EP11:
length = t86r->fmt2.offset1 + t86r->fmt2.count1;
length = min(MSGTYPE06_MAX_MSG_SIZE, length);
memcpy(msg->message, reply->message, length);
@@ -1022,10 +1009,10 @@ out:
static atomic_t zcrypt_step = ATOMIC_INIT(0);
/**
- * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
- * PCIXCC/CEX2C device to the request distributor
+ * CEXxC device to the request distributor
* @mex: pointer to the modexpo request buffer
*/
static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
@@ -1033,7 +1020,7 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
{
struct ap_message ap_msg;
struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_ICA,
+ .type = CEXXC_RESPONSE_TYPE_ICA,
};
int rc;
@@ -1066,10 +1053,10 @@ out_free:
}
/**
- * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
- * PCIXCC/CEX2C device to the request distributor
+ * CEXxC device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
*/
static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
@@ -1077,7 +1064,7 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
{
struct ap_message ap_msg;
struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_ICA,
+ .type = CEXXC_RESPONSE_TYPE_ICA,
};
int rc;
@@ -1122,7 +1109,7 @@ unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
unsigned int *func_code, unsigned short **dom)
{
struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_XCRB,
+ .type = CEXXC_RESPONSE_TYPE_XCRB,
};
ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
@@ -1131,18 +1118,17 @@ unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+ ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
return -ENOMEM;
- memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
return XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
}
/**
- * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * The request distributor calls this function if it picked the CEXxC
* device to handle a send_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
- * PCIXCC/CEX2C device to the request distributor
+ * CEXxC device to the request distributor
* @xcRB: pointer to the send_cprb request buffer
*/
static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
@@ -1178,7 +1164,7 @@ unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
unsigned int *func_code)
{
struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_EP11,
+ .type = CEXXC_RESPONSE_TYPE_EP11,
};
ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
@@ -1187,10 +1173,9 @@ unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
ap_msg->receive = zcrypt_msgtype6_receive_ep11;
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+ ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
return -ENOMEM;
- memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
return xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
}
@@ -1273,7 +1258,7 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
unsigned int *domain)
{
struct response_type resp_type = {
- .type = PCIXCC_RESPONSE_TYPE_XCRB,
+ .type = CEXXC_RESPONSE_TYPE_XCRB,
};
ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
@@ -1282,10 +1267,9 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
ap_msg->receive = zcrypt_msgtype6_receive;
ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+ ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
return -ENOMEM;
- memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
@@ -1294,10 +1278,10 @@ unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
}
/**
- * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * The request distributor calls this function if it picked the CEXxC
* device to generate random data.
* @zq: pointer to zcrypt_queue structure that identifies the
- * PCIXCC/CEX2C device to the request distributor
+ * CEXxC device to the request distributor
* @buffer: pointer to a memory page to return random data
*/
static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
@@ -1332,7 +1316,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
}
/**
- * The crypto operations for a PCIXCC/CEX2C card.
+ * The crypto operations for a CEXxC card.
*/
static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
.owner = THIS_MODULE,
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index e4c2f37d7ad9..41a0df5f070f 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * zcrypt 2.1.0
- *
* Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
@@ -24,7 +22,7 @@
#define MSGTYPE06_MAX_MSG_SIZE (12*1024)
/**
- * The type 6 message family is associated with PCICC or PCIXCC cards.
+ * The type 6 message family is associated with CEXxC/CEXxP cards.
*
* It contains a message header followed by a CPRB, both of which
* are described below.
@@ -43,13 +41,8 @@ struct type6_hdr {
unsigned int offset2; /* 0x00000000 */
unsigned int offset3; /* 0x00000000 */
unsigned int offset4; /* 0x00000000 */
- unsigned char agent_id[16]; /* PCICC: */
- /* 0x0100 */
- /* 0x4343412d4150504c202020 */
- /* 0x010101 */
- /* PCIXCC: */
- /* 0x4341000000000000 */
- /* 0x0000000000000000 */
+ unsigned char agent_id[16]; /* 0x4341000000000000 */
+ /* 0x0000000000000000 */
unsigned char rqid[2]; /* rqid. internal to 603 */
unsigned char reserved5[2]; /* 0x0000 */
unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */
@@ -65,7 +58,7 @@ struct type6_hdr {
} __packed;
/**
- * The type 86 message family is associated with PCICC and PCIXCC cards.
+ * The type 86 message family is associated with CEXxC/CEXxP cards.
*
* It contains a message header followed by a CPRB. The CPRB is
* the same as the request CPRB, which is described above.
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 8df82c6ef66e..522c4bc69a08 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * zcrypt 2.1.0
- *
* Copyright IBM Corp. 2001, 2012
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index c0631895154e..f96ec68af2e5 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -515,8 +515,8 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_unmap;
- pci_set_dma_seg_boundary(pdev, SZ_1M - 1);
- pci_set_dma_max_seg_size(pdev, SZ_1M);
+ dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
+ dma_set_max_seg_size(&pdev->dev, SZ_1M);
pci_set_master(pdev);
ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 34e0d476c5c6..6843bc7ee9f2 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -26,6 +26,7 @@
#include <net/ipv6.h>
#include <net/if_inet6.h>
#include <net/addrconf.h>
+#include <net/tcp.h>
#include <asm/debug.h>
#include <asm/qdio.h>
@@ -389,8 +390,9 @@ enum qeth_layer2_frame_flags {
enum qeth_header_ids {
QETH_HEADER_TYPE_LAYER3 = 0x01,
QETH_HEADER_TYPE_LAYER2 = 0x02,
- QETH_HEADER_TYPE_TSO = 0x03,
+ QETH_HEADER_TYPE_L3_TSO = 0x03,
QETH_HEADER_TYPE_OSN = 0x04,
+ QETH_HEADER_TYPE_L2_TSO = 0x06,
};
/* flags for qeth_hdr.ext_flags */
#define QETH_HDR_EXT_VLAN_FRAME 0x01
@@ -581,7 +583,8 @@ struct qeth_cmd_buffer {
struct qeth_channel *channel;
unsigned char *data;
int rc;
- void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
+ void (*callback)(struct qeth_card *card, struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob);
};
static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
@@ -638,7 +641,6 @@ struct qeth_reply {
atomic_t received;
int rc;
void *param;
- struct qeth_card *card;
refcount_t refcnt;
};
@@ -671,6 +673,12 @@ struct qeth_card_info {
__u32 hwtrap;
};
+enum qeth_discipline_id {
+ QETH_DISCIPLINE_UNDETERMINED = -1,
+ QETH_DISCIPLINE_LAYER3 = 0,
+ QETH_DISCIPLINE_LAYER2 = 1,
+};
+
struct qeth_card_options {
struct qeth_routing_info route4;
struct qeth_ipa_info ipa4;
@@ -680,7 +688,7 @@ struct qeth_card_options {
struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
struct qeth_vnicc_info vnicc; /* VNICC options */
int fake_broadcast;
- int layer2;
+ enum qeth_discipline_id layer;
int performance_stats;
int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
@@ -690,6 +698,9 @@ struct qeth_card_options {
char hsuid[9];
};
+#define IS_LAYER2(card) ((card)->options.layer == QETH_DISCIPLINE_LAYER2)
+#define IS_LAYER3(card) ((card)->options.layer == QETH_DISCIPLINE_LAYER3)
+
/*
* thread bits for qeth_card thread masks
*/
@@ -702,12 +713,6 @@ struct qeth_osn_info {
int (*data_cb)(struct sk_buff *skb);
};
-enum qeth_discipline_id {
- QETH_DISCIPLINE_UNDETERMINED = -1,
- QETH_DISCIPLINE_LAYER3 = 0,
- QETH_DISCIPLINE_LAYER2 = 1,
-};
-
struct qeth_discipline {
const struct device_type *devtype;
int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done);
@@ -759,7 +764,6 @@ struct qeth_switch_info {
struct qeth_card {
struct list_head list;
enum qeth_card_states state;
- int lan_online;
spinlock_t lock;
struct ccwgroup_device *gdev;
struct qeth_channel read;
@@ -892,11 +896,6 @@ static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
(ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
*flags |= QETH_HDR_EXT_UDP;
- if (ipv == 4) {
- /* some HW requires combined L3+L4 csum offload: */
- *flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
- ip_hdr(skb)->check = 0;
- }
}
static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
@@ -1007,9 +1006,7 @@ int qeth_query_switch_attributes(struct qeth_card *card,
int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
void *reply_param);
-int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
- int extra_elems, int data_offset);
-int qeth_get_elements_for_frags(struct sk_buff *);
+unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, unsigned int offset,
unsigned int hd_len);
@@ -1027,7 +1024,6 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd);
int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
-int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
void qeth_trace_features(struct qeth_card *);
@@ -1052,6 +1048,13 @@ int qeth_vm_request_mac(struct qeth_card *card);
int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr **hdr, unsigned int hdr_len,
unsigned int proto_len, unsigned int *elements);
+void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
+ struct sk_buff *skb, unsigned int proto_len);
+int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue, int ipv, int cast_type,
+ void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
+ struct sk_buff *skb, int ipv, int cast_type,
+ unsigned int data_len));
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index de8282420f96..3274f13aad57 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/log2.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
@@ -61,10 +62,10 @@ static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
static struct lock_class_key qdio_out_skb_queue_key;
-static struct mutex qeth_mod_mutex;
-static void qeth_send_control_data_cb(struct qeth_channel *,
- struct qeth_cmd_buffer *);
+static void qeth_send_control_data_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob);
static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
@@ -591,7 +592,6 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
if (reply) {
refcount_set(&reply->refcnt, 1);
atomic_set(&reply->received, 0);
- reply->card = card;
}
return reply;
}
@@ -610,7 +610,7 @@ static void qeth_put_reply(struct qeth_reply *reply)
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
struct qeth_card *card)
{
- char *ipa_name;
+ const char *ipa_name;
int com = cmd->hdr.command;
ipa_name = qeth_get_ipa_cmd_name(com);
if (rc)
@@ -626,80 +626,61 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
}
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
- struct qeth_cmd_buffer *iob)
+ struct qeth_ipa_cmd *cmd)
{
- struct qeth_ipa_cmd *cmd = NULL;
-
QETH_CARD_TEXT(card, 5, "chkipad");
- if (IS_IPA(iob->data)) {
- cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
- if (IS_IPA_REPLY(cmd)) {
- if (cmd->hdr.command != IPA_CMD_SETCCID &&
- cmd->hdr.command != IPA_CMD_DELCCID &&
- cmd->hdr.command != IPA_CMD_MODCCID &&
- cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
- qeth_issue_ipa_msg(cmd,
- cmd->hdr.return_code, card);
- return cmd;
+
+ if (IS_IPA_REPLY(cmd)) {
+ if (cmd->hdr.command != IPA_CMD_SETCCID &&
+ cmd->hdr.command != IPA_CMD_DELCCID &&
+ cmd->hdr.command != IPA_CMD_MODCCID &&
+ cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
+ qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
+ return cmd;
+ }
+
+ /* handle unsolicited event: */
+ switch (cmd->hdr.command) {
+ case IPA_CMD_STOPLAN:
+ if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
+ dev_err(&card->gdev->dev,
+ "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
+ QETH_CARD_IFNAME(card));
+ qeth_close_dev(card);
} else {
- switch (cmd->hdr.command) {
- case IPA_CMD_STOPLAN:
- if (cmd->hdr.return_code ==
- IPA_RC_VEPA_TO_VEB_TRANSITION) {
- dev_err(&card->gdev->dev,
- "Interface %s is down because the "
- "adjacent port is no longer in "
- "reflective relay mode\n",
- QETH_CARD_IFNAME(card));
- qeth_close_dev(card);
- } else {
- dev_warn(&card->gdev->dev,
- "The link for interface %s on CHPID"
- " 0x%X failed\n",
- QETH_CARD_IFNAME(card),
- card->info.chpid);
- qeth_issue_ipa_msg(cmd,
- cmd->hdr.return_code, card);
- }
- card->lan_online = 0;
- netif_carrier_off(card->dev);
- return NULL;
- case IPA_CMD_STARTLAN:
- dev_info(&card->gdev->dev,
- "The link for %s on CHPID 0x%X has"
- " been restored\n",
- QETH_CARD_IFNAME(card),
- card->info.chpid);
- netif_carrier_on(card->dev);
- card->lan_online = 1;
- if (card->info.hwtrap)
- card->info.hwtrap = 2;
- qeth_schedule_recovery(card);
- return NULL;
- case IPA_CMD_SETBRIDGEPORT_IQD:
- case IPA_CMD_SETBRIDGEPORT_OSA:
- case IPA_CMD_ADDRESS_CHANGE_NOTIF:
- if (card->discipline->control_event_handler
- (card, cmd))
- return cmd;
- else
- return NULL;
- case IPA_CMD_MODCCID:
- return cmd;
- case IPA_CMD_REGISTER_LOCAL_ADDR:
- QETH_CARD_TEXT(card, 3, "irla");
- break;
- case IPA_CMD_UNREGISTER_LOCAL_ADDR:
- QETH_CARD_TEXT(card, 3, "urla");
- break;
- default:
- QETH_DBF_MESSAGE(2, "Received data is IPA "
- "but not a reply!\n");
- break;
- }
+ dev_warn(&card->gdev->dev,
+ "The link for interface %s on CHPID 0x%X failed\n",
+ QETH_CARD_IFNAME(card), card->info.chpid);
+ qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
+ netif_carrier_off(card->dev);
}
+ return NULL;
+ case IPA_CMD_STARTLAN:
+ dev_info(&card->gdev->dev,
+ "The link for %s on CHPID 0x%X has been restored\n",
+ QETH_CARD_IFNAME(card), card->info.chpid);
+ if (card->info.hwtrap)
+ card->info.hwtrap = 2;
+ qeth_schedule_recovery(card);
+ return NULL;
+ case IPA_CMD_SETBRIDGEPORT_IQD:
+ case IPA_CMD_SETBRIDGEPORT_OSA:
+ case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+ if (card->discipline->control_event_handler(card, cmd))
+ return cmd;
+ return NULL;
+ case IPA_CMD_MODCCID:
+ return cmd;
+ case IPA_CMD_REGISTER_LOCAL_ADDR:
+ QETH_CARD_TEXT(card, 3, "irla");
+ return NULL;
+ case IPA_CMD_UNREGISTER_LOCAL_ADDR:
+ QETH_CARD_TEXT(card, 3, "urla");
+ return NULL;
+ default:
+ QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
+ return cmd;
}
- return cmd;
}
void qeth_clear_ipacmd_list(struct qeth_card *card)
@@ -746,18 +727,10 @@ static int qeth_check_idx_response(struct qeth_card *card,
return 0;
}
-static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
-{
- struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
- dev_get_drvdata(&cdev->dev))->dev);
- return card;
-}
-
static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
{
__u8 index;
- QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff");
index = channel->io_buf_no;
do {
if (channel->iob[index].state == BUF_STATE_FREE) {
@@ -778,9 +751,7 @@ void qeth_release_buffer(struct qeth_channel *channel,
{
unsigned long flags;
- QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
spin_lock_irqsave(&channel->iob_lock, flags);
- memset(iob->data, 0, QETH_BUFSIZE);
iob->state = BUF_STATE_FREE;
iob->callback = qeth_send_control_data_cb;
iob->rc = 0;
@@ -789,6 +760,13 @@ void qeth_release_buffer(struct qeth_channel *channel,
}
EXPORT_SYMBOL_GPL(qeth_release_buffer);
+static void qeth_release_buffer_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ qeth_release_buffer(channel, iob);
+}
+
static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
{
struct qeth_cmd_buffer *buffer = NULL;
@@ -819,17 +797,16 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
}
EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
-static void qeth_send_control_data_cb(struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
+static void qeth_send_control_data_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
{
- struct qeth_card *card;
+ struct qeth_ipa_cmd *cmd = NULL;
struct qeth_reply *reply, *r;
- struct qeth_ipa_cmd *cmd;
unsigned long flags;
int keep_reply;
int rc = 0;
- card = CARD_FROM_CDEV(channel->ccwdev);
QETH_CARD_TEXT(card, 4, "sndctlcb");
rc = qeth_check_idx_response(card, iob->data);
switch (rc) {
@@ -843,16 +820,20 @@ static void qeth_send_control_data_cb(struct qeth_channel *channel,
goto out;
}
- cmd = qeth_check_ipa_data(card, iob);
- if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
- goto out;
- /*in case of OSN : check if cmd is set */
- if (card->info.type == QETH_CARD_TYPE_OSN &&
- cmd &&
- cmd->hdr.command != IPA_CMD_STARTLAN &&
- card->osn_info.assist_cb != NULL) {
- card->osn_info.assist_cb(card->dev, cmd);
- goto out;
+ if (IS_IPA(iob->data)) {
+ cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+ cmd = qeth_check_ipa_data(card, cmd);
+ if (!cmd)
+ goto out;
+ if (IS_OSN(card) && card->osn_info.assist_cb &&
+ cmd->hdr.command != IPA_CMD_STARTLAN) {
+ card->osn_info.assist_cb(card->dev, cmd);
+ goto out;
+ }
+ } else {
+ /* non-IPA commands should only flow during initialization */
+ if (card->state != CARD_STATE_DOWN)
+ goto out;
}
spin_lock_irqsave(&card->lock, flags);
@@ -900,44 +881,6 @@ out:
qeth_release_buffer(channel, iob);
}
-static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
-{
- int cnt;
-
- QETH_DBF_TEXT(SETUP, 2, "setupch");
-
- channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
- if (!channel->ccw)
- return -ENOMEM;
- channel->state = CH_STATE_DOWN;
- atomic_set(&channel->irq_pending, 0);
- init_waitqueue_head(&channel->wait_q);
-
- if (!alloc_buffers)
- return 0;
-
- for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
- channel->iob[cnt].data =
- kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
- if (channel->iob[cnt].data == NULL)
- break;
- channel->iob[cnt].state = BUF_STATE_FREE;
- channel->iob[cnt].channel = channel;
- channel->iob[cnt].callback = qeth_send_control_data_cb;
- channel->iob[cnt].rc = 0;
- }
- if (cnt < QETH_CMD_BUFFER_NO) {
- kfree(channel->ccw);
- while (cnt-- > 0)
- kfree(channel->iob[cnt].data);
- return -ENOMEM;
- }
- channel->io_buf_no = 0;
- spin_lock_init(&channel->iob_lock);
-
- return 0;
-}
-
static int qeth_set_thread_start_bit(struct qeth_card *card,
unsigned long thread)
{
@@ -1013,16 +956,15 @@ void qeth_schedule_recovery(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
-static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
+static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
+ struct irb *irb)
{
int dstat, cstat;
char *sense;
- struct qeth_card *card;
sense = (char *) irb->ecw;
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
- card = CARD_FROM_CDEV(cdev);
if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
@@ -1062,14 +1004,11 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
return 0;
}
-static long __qeth_check_irb_error(struct ccw_device *cdev,
- unsigned long intparm, struct irb *irb)
+static long qeth_check_irb_error(struct qeth_card *card,
+ struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb)
{
- struct qeth_card *card;
-
- card = CARD_FROM_CDEV(cdev);
-
- if (!card || !IS_ERR(irb))
+ if (!IS_ERR(irb))
return 0;
switch (PTR_ERR(irb)) {
@@ -1106,10 +1045,13 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
int rc;
int cstat, dstat;
struct qeth_cmd_buffer *iob = NULL;
+ struct ccwgroup_device *gdev;
struct qeth_channel *channel;
struct qeth_card *card;
- card = CARD_FROM_CDEV(cdev);
+ /* while we hold the ccwdev lock, this stays valid: */
+ gdev = dev_get_drvdata(&cdev->dev);
+ card = dev_get_drvdata(&gdev->dev);
if (!card)
return;
@@ -1129,7 +1071,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
if (qeth_intparm_is_iob(intparm))
iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
- if (__qeth_check_irb_error(cdev, intparm, irb)) {
+ if (qeth_check_irb_error(card, cdev, intparm, irb)) {
/* IO was terminated, free its resources. */
if (iob)
qeth_release_buffer(iob->channel, iob);
@@ -1184,7 +1126,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
channel->state = CH_STATE_DOWN;
goto out;
}
- rc = qeth_get_problem(cdev, irb);
+ rc = qeth_get_problem(card, cdev, irb);
if (rc) {
card->read_or_write_problem = 1;
qeth_clear_ipacmd_list(card);
@@ -1204,7 +1146,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
__qeth_issue_next_read(card);
if (iob && iob->callback)
- iob->callback(iob->channel, iob);
+ iob->callback(card, iob->channel, iob);
out:
wake_up(&card->wait_q);
@@ -1217,54 +1159,23 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
{
struct sk_buff *skb;
- if (skb_queue_empty(&buf->skb_list))
- goto out;
- skb = skb_peek(&buf->skb_list);
- while (skb) {
+ skb_queue_walk(&buf->skb_list, skb) {
QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
- if (be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) {
- if (skb->sk) {
- struct iucv_sock *iucv = iucv_sk(skb->sk);
- iucv->sk_txnotify(skb, notification);
- }
- }
- if (skb_queue_is_last(&buf->skb_list, skb))
- skb = NULL;
- else
- skb = skb_queue_next(&buf->skb_list, skb);
+ if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
+ iucv_sk(skb->sk)->sk_txnotify(skb, notification);
}
-out:
- return;
}
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
{
- struct sk_buff *skb;
- struct iucv_sock *iucv;
- int notify_general_error = 0;
-
- if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
- notify_general_error = 1;
-
/* release may never happen from within CQ tasklet scope */
WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
- skb = skb_dequeue(&buf->skb_list);
- while (skb) {
- QETH_CARD_TEXT(buf->q->card, 5, "skbr");
- QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
- if (notify_general_error &&
- be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) {
- if (skb->sk) {
- iucv = iucv_sk(skb->sk);
- iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
- }
- }
- refcount_dec(&skb->users);
- dev_kfree_skb_any(skb);
- skb = skb_dequeue(&buf->skb_list);
- }
+ if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+ qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
+
+ __skb_queue_purge(&buf->skb_list);
}
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
@@ -1336,14 +1247,61 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
static void qeth_clean_channel(struct qeth_channel *channel)
{
+ struct ccw_device *cdev = channel->ccwdev;
int cnt;
QETH_DBF_TEXT(SETUP, 2, "freech");
+
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ cdev->handler = NULL;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
kfree(channel->iob[cnt].data);
kfree(channel->ccw);
}
+static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
+{
+ struct ccw_device *cdev = channel->ccwdev;
+ int cnt;
+
+ QETH_DBF_TEXT(SETUP, 2, "setupch");
+
+ channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+ if (!channel->ccw)
+ return -ENOMEM;
+ channel->state = CH_STATE_DOWN;
+ atomic_set(&channel->irq_pending, 0);
+ init_waitqueue_head(&channel->wait_q);
+
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ cdev->handler = qeth_irq;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+
+ if (!alloc_buffers)
+ return 0;
+
+ for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
+ channel->iob[cnt].data = kmalloc(QETH_BUFSIZE,
+ GFP_KERNEL | GFP_DMA);
+ if (channel->iob[cnt].data == NULL)
+ break;
+ channel->iob[cnt].state = BUF_STATE_FREE;
+ channel->iob[cnt].channel = channel;
+ channel->iob[cnt].callback = qeth_send_control_data_cb;
+ channel->iob[cnt].rc = 0;
+ }
+ if (cnt < QETH_CMD_BUFFER_NO) {
+ qeth_clean_channel(channel);
+ return -ENOMEM;
+ }
+ channel->io_buf_no = 0;
+ spin_lock_init(&channel->iob_lock);
+
+ return 0;
+}
+
static void qeth_set_single_write_queues(struct qeth_card *card)
{
if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
@@ -1421,6 +1379,7 @@ static void qeth_set_initial_options(struct qeth_card *card)
card->options.rx_sg_cb = QETH_RX_SG_CB;
card->options.isolation = ISOLATION_MODE_NONE;
card->options.cq = QETH_CQ_DISABLED;
+ card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
}
static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
@@ -1494,7 +1453,7 @@ static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
CARD_BUS_ID(card), card->info.mcl_level);
}
-static struct qeth_card *qeth_alloc_card(void)
+static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
@@ -1503,13 +1462,18 @@ static struct qeth_card *qeth_alloc_card(void)
if (!card)
goto out;
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+
+ card->gdev = gdev;
+ dev_set_drvdata(&gdev->dev, card);
+ CARD_RDEV(card) = gdev->cdev[0];
+ CARD_WDEV(card) = gdev->cdev[1];
+ CARD_DDEV(card) = gdev->cdev[2];
if (qeth_setup_channel(&card->read, true))
goto out_ip;
if (qeth_setup_channel(&card->write, true))
goto out_channel;
if (qeth_setup_channel(&card->data, false))
goto out_data;
- card->options.layer2 = -1;
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
@@ -1519,22 +1483,21 @@ out_data:
out_channel:
qeth_clean_channel(&card->read);
out_ip:
+ dev_set_drvdata(&gdev->dev, NULL);
kfree(card);
out:
return NULL;
}
-static int qeth_clear_channel(struct qeth_channel *channel)
+static int qeth_clear_channel(struct qeth_card *card,
+ struct qeth_channel *channel)
{
- unsigned long flags;
- struct qeth_card *card;
int rc;
- card = CARD_FROM_CDEV(channel->ccwdev);
QETH_CARD_TEXT(card, 3, "clearch");
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc)
return rc;
@@ -1548,17 +1511,15 @@ static int qeth_clear_channel(struct qeth_channel *channel)
return 0;
}
-static int qeth_halt_channel(struct qeth_channel *channel)
+static int qeth_halt_channel(struct qeth_card *card,
+ struct qeth_channel *channel)
{
- unsigned long flags;
- struct qeth_card *card;
int rc;
- card = CARD_FROM_CDEV(channel->ccwdev);
QETH_CARD_TEXT(card, 3, "haltch");
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc)
return rc;
@@ -1576,9 +1537,9 @@ static int qeth_halt_channels(struct qeth_card *card)
int rc1 = 0, rc2 = 0, rc3 = 0;
QETH_CARD_TEXT(card, 3, "haltchs");
- rc1 = qeth_halt_channel(&card->read);
- rc2 = qeth_halt_channel(&card->write);
- rc3 = qeth_halt_channel(&card->data);
+ rc1 = qeth_halt_channel(card, &card->read);
+ rc2 = qeth_halt_channel(card, &card->write);
+ rc3 = qeth_halt_channel(card, &card->data);
if (rc1)
return rc1;
if (rc2)
@@ -1591,9 +1552,9 @@ static int qeth_clear_channels(struct qeth_card *card)
int rc1 = 0, rc2 = 0, rc3 = 0;
QETH_CARD_TEXT(card, 3, "clearchs");
- rc1 = qeth_clear_channel(&card->read);
- rc2 = qeth_clear_channel(&card->write);
- rc3 = qeth_clear_channel(&card->data);
+ rc1 = qeth_clear_channel(card, &card->read);
+ rc2 = qeth_clear_channel(card, &card->write);
+ rc3 = qeth_clear_channel(card, &card->data);
if (rc1)
return rc1;
if (rc2)
@@ -1652,7 +1613,6 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
char *rcd_buf;
int ret;
struct qeth_channel *channel = &card->data;
- unsigned long flags;
/*
* scan for RCD command in extended SenseID data
@@ -1666,11 +1626,11 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf);
channel->state = CH_STATE_RCD;
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
QETH_RCD_PARM, LPM_ANYPATH, 0,
QETH_RCD_TIMEOUT);
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (!ret)
wait_event(card->wait_q,
(channel->state == CH_STATE_RCD_DONE ||
@@ -1823,30 +1783,29 @@ static void qeth_init_func_level(struct qeth_card *card)
}
}
-static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
- void (*idx_reply_cb)(struct qeth_channel *,
- struct qeth_cmd_buffer *))
+static int qeth_idx_activate_get_answer(struct qeth_card *card,
+ struct qeth_channel *channel,
+ void (*reply_cb)(struct qeth_card *,
+ struct qeth_channel *,
+ struct qeth_cmd_buffer *))
{
struct qeth_cmd_buffer *iob;
- unsigned long flags;
int rc;
- struct qeth_card *card;
QETH_DBF_TEXT(SETUP, 2, "idxanswr");
- card = CARD_FROM_CDEV(channel->ccwdev);
iob = qeth_get_buffer(channel);
if (!iob)
return -ENOMEM;
- iob->callback = idx_reply_cb;
+ iob->callback = reply_cb;
qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
wait_event(card->wait_q,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0, QETH_TIMEOUT);
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
@@ -1867,26 +1826,24 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
return rc;
}
-static int qeth_idx_activate_channel(struct qeth_channel *channel,
- void (*idx_reply_cb)(struct qeth_channel *,
- struct qeth_cmd_buffer *))
+static int qeth_idx_activate_channel(struct qeth_card *card,
+ struct qeth_channel *channel,
+ void (*reply_cb)(struct qeth_card *,
+ struct qeth_channel *,
+ struct qeth_cmd_buffer *))
{
- struct qeth_card *card;
struct qeth_cmd_buffer *iob;
- unsigned long flags;
__u16 temp;
__u8 tmp;
int rc;
struct ccw_dev_id temp_devid;
- card = CARD_FROM_CDEV(channel->ccwdev);
-
QETH_DBF_TEXT(SETUP, 2, "idxactch");
iob = qeth_get_buffer(channel);
if (!iob)
return -ENOMEM;
- iob->callback = idx_reply_cb;
+ iob->callback = reply_cb;
qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE,
iob->data);
if (channel == &card->write) {
@@ -1913,10 +1870,10 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
wait_event(card->wait_q,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0, QETH_TIMEOUT);
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
@@ -1938,7 +1895,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
return -ETIME;
}
- return qeth_idx_activate_get_answer(channel, idx_reply_cb);
+ return qeth_idx_activate_get_answer(card, channel, reply_cb);
}
static int qeth_peer_func_level(int level)
@@ -1950,10 +1907,10 @@ static int qeth_peer_func_level(int level)
return level;
}
-static void qeth_idx_write_cb(struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
+static void qeth_idx_write_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
{
- struct qeth_card *card;
__u16 temp;
QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
@@ -1962,7 +1919,6 @@ static void qeth_idx_write_cb(struct qeth_channel *channel,
channel->state = CH_STATE_ACTIVATING;
goto out;
}
- card = CARD_FROM_CDEV(channel->ccwdev);
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
@@ -1988,10 +1944,10 @@ out:
qeth_release_buffer(channel, iob);
}
-static void qeth_idx_read_cb(struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
+static void qeth_idx_read_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
{
- struct qeth_card *card;
__u16 temp;
QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
@@ -2000,7 +1956,6 @@ static void qeth_idx_read_cb(struct qeth_channel *channel,
goto out;
}
- card = CARD_FROM_CDEV(channel->ccwdev);
if (qeth_check_idx_response(card, iob->data))
goto out;
@@ -2049,7 +2004,7 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob)
{
qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data);
- iob->callback = qeth_release_buffer;
+ iob->callback = qeth_release_buffer_cb;
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
&card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
@@ -2097,7 +2052,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
{
struct qeth_channel *channel = iob->channel;
int rc;
- unsigned long flags;
struct qeth_reply *reply = NULL;
unsigned long timeout, event_timeout;
struct qeth_ipa_cmd *cmd = NULL;
@@ -2130,26 +2084,26 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
qeth_prepare_control_data(card, len, iob);
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irq(&card->lock);
list_add_tail(&reply->list, &card->cmd_waiter_list);
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&card->lock);
timeout = jiffies + event_timeout;
QETH_CARD_TEXT(card, 6, "noirqpnd");
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0, event_timeout);
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
"ccw_device_start rc = %i\n",
dev_name(&channel->ccwdev->dev), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
- spin_lock_irqsave(&card->lock, flags);
+ spin_lock_irq(&card->lock);
list_del_init(&reply->list);
qeth_put_reply(reply);
- spin_unlock_irqrestore(&card->lock, flags);
+ spin_unlock_irq(&card->lock);
qeth_release_buffer(channel, iob);
atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
@@ -2177,9 +2131,9 @@ int qeth_send_control_data(struct qeth_card *card, int len,
time_err:
reply->rc = -ETIME;
- spin_lock_irqsave(&reply->card->lock, flags);
+ spin_lock_irq(&card->lock);
list_del_init(&reply->list);
- spin_unlock_irqrestore(&reply->card->lock, flags);
+ spin_unlock_irq(&card->lock);
atomic_inc(&reply->received);
rc = reply->rc;
qeth_put_reply(reply);
@@ -2198,7 +2152,6 @@ static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
memcpy(&card->token.cm_filter_r,
QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
- QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
@@ -2224,7 +2177,6 @@ static int qeth_cm_enable(struct qeth_card *card)
static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
-
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
@@ -2233,7 +2185,6 @@ static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
memcpy(&card->token.cm_connection_r,
QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
QETH_MPC_TOKEN_LENGTH);
- QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
@@ -2255,7 +2206,6 @@ static int qeth_cm_setup(struct qeth_card *card)
rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
qeth_cm_setup_cb, NULL);
return rc;
-
}
static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
@@ -2284,7 +2234,7 @@ static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
if (dev->mtu)
new_mtu = dev->mtu;
/* default MTUs for first setup: */
- else if (card->options.layer2)
+ else if (IS_LAYER2(card))
new_mtu = ETH_DATA_LEN;
else
new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
@@ -2315,7 +2265,6 @@ static int qeth_get_mtu_outof_framesize(int framesize)
static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
-
__u16 mtu, framesize;
__u16 len;
__u8 link_type;
@@ -2343,7 +2292,6 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
} else
card->info.link_type = 0;
QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
- QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
@@ -2351,7 +2299,7 @@ static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
{
if (IS_OSN(card))
return QETH_PROT_OSN2;
- return (card->options.layer2 == 1) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
+ return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
}
static int qeth_ulp_enable(struct qeth_card *card)
@@ -2880,23 +2828,18 @@ static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
}
static void qeth_fill_ipacmd_header(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd, __u8 command,
- enum qeth_prot_versions prot)
+ struct qeth_ipa_cmd *cmd,
+ enum qeth_ipa_cmds command,
+ enum qeth_prot_versions prot)
{
- memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
cmd->hdr.command = command;
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
/* cmd->hdr.seqno is set by qeth_send_control_data() */
cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port;
- if (card->options.layer2)
- cmd->hdr.prim_version_no = 2;
- else
- cmd->hdr.prim_version_no = 1;
+ cmd->hdr.prim_version_no = IS_LAYER2(card) ? 2 : 1;
cmd->hdr.param_count = 1;
cmd->hdr.prot_version = prot;
- cmd->hdr.ipa_supported = 0;
- cmd->hdr.ipa_enabled = 0;
}
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
@@ -3043,7 +2986,7 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
- return -0;
+ return 0;
default:
if (cmd->hdr.return_code) {
QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
@@ -3787,7 +3730,7 @@ EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
* Returns the number of pages, and thus QDIO buffer elements, needed to cover
* fragmented part of the SKB. Returns zero for linear SKB.
*/
-int qeth_get_elements_for_frags(struct sk_buff *skb)
+static int qeth_get_elements_for_frags(struct sk_buff *skb)
{
int cnt, elements = 0;
@@ -3800,9 +3743,17 @@ int qeth_get_elements_for_frags(struct sk_buff *skb)
}
return elements;
}
-EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
-static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset)
+/**
+ * qeth_count_elements() - Counts the number of QDIO buffer elements needed
+ * to transmit an skb.
+ * @skb: the skb to operate on.
+ * @data_offset: skip this part of the skb's linear data
+ *
+ * Returns the number of pages, and thus QDIO buffer elements, needed to map the
+ * skb's data (both its linear part and paged fragments).
+ */
+unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
{
unsigned int elements = qeth_get_elements_for_frags(skb);
addr_t end = (addr_t)skb->data + skb_headlen(skb);
@@ -3812,54 +3763,10 @@ static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset)
elements += qeth_get_elements_for_range(start, end);
return elements;
}
+EXPORT_SYMBOL_GPL(qeth_count_elements);
-/**
- * qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags.
- * @card: qeth card structure, to check max. elems.
- * @skb: SKB address
- * @extra_elems: extra elems needed, to check against max.
- * @data_offset: range starts at skb->data + data_offset
- *
- * Returns the number of pages, and thus QDIO buffer elements, needed to cover
- * skb data, including linear part and fragments. Checks if the result plus
- * extra_elems fits under the limit for the card. Returns 0 if it does not.
- * Note: extra_elems is not included in the returned result.
- */
-int qeth_get_elements_no(struct qeth_card *card,
- struct sk_buff *skb, int extra_elems, int data_offset)
-{
- int elements = qeth_count_elements(skb, data_offset);
-
- if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
- QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
- "(Number=%d / Length=%d). Discarded.\n",
- elements + extra_elems, skb->len);
- return 0;
- }
- return elements;
-}
-EXPORT_SYMBOL_GPL(qeth_get_elements_no);
-
-int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
-{
- int hroom, inpage, rest;
-
- if (((unsigned long)skb->data & PAGE_MASK) !=
- (((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
- hroom = skb_headroom(skb);
- inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
- rest = len - inpage;
- if (rest > hroom)
- return 1;
- memmove(skb->data - rest, skb->data, skb_headlen(skb));
- skb->data -= rest;
- skb->tail -= rest;
- *hdr = (struct qeth_hdr *)skb->data;
- QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
+#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
+ MAX_TCP_HEADER)
/**
* qeth_add_hw_header() - add a HW header to an skb.
@@ -3894,7 +3801,11 @@ check_layout:
if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
/* Push HW header into same page as first protocol header. */
push_ok = true;
- __elements = qeth_count_elements(skb, 0);
+ /* ... but TSO always needs a separate element for headers: */
+ if (skb_is_gso(skb))
+ __elements = 1 + qeth_count_elements(skb, proto_len);
+ else
+ __elements = qeth_count_elements(skb, 0);
} else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) {
/* Push HW header into a new page. */
push_ok = true;
@@ -3935,6 +3846,8 @@ check_layout:
return hdr_len;
}
/* fall back */
+ if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
+ return -E2BIG;
*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!*hdr)
return -ENOMEM;
@@ -4026,8 +3939,7 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
bool is_first_elem = true;
int flush_cnt = 0;
- refcount_inc(&skb->users);
- skb_queue_tail(&buf->skb_list, skb);
+ __skb_queue_tail(&buf->skb_list, skb);
/* build dedicated header element */
if (hd_len) {
@@ -4176,6 +4088,97 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet);
+void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
+ struct sk_buff *skb, unsigned int proto_len)
+{
+ struct qeth_hdr_ext_tso *ext = &hdr->ext;
+
+ ext->hdr_tot_len = sizeof(*ext);
+ ext->imb_hdr_no = 1;
+ ext->hdr_type = 1;
+ ext->hdr_version = 1;
+ ext->hdr_len = 28;
+ ext->payload_len = payload_len;
+ ext->mss = skb_shinfo(skb)->gso_size;
+ ext->dg_hdr_len = proto_len;
+}
+EXPORT_SYMBOL_GPL(qeth_fill_tso_ext);
+
+int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue, int ipv, int cast_type,
+ void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
+ struct sk_buff *skb, int ipv, int cast_type,
+ unsigned int data_len))
+{
+ unsigned int proto_len, hw_hdr_len;
+ unsigned int frame_len = skb->len;
+ bool is_tso = skb_is_gso(skb);
+ unsigned int data_offset = 0;
+ struct qeth_hdr *hdr = NULL;
+ unsigned int hd_len = 0;
+ unsigned int elements;
+ int push_len, rc;
+ bool is_sg;
+
+ if (is_tso) {
+ hw_hdr_len = sizeof(struct qeth_hdr_tso);
+ proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ } else {
+ hw_hdr_len = sizeof(struct qeth_hdr);
+ proto_len = IS_IQD(card) ? ETH_HLEN : 0;
+ }
+
+ rc = skb_cow_head(skb, hw_hdr_len);
+ if (rc)
+ return rc;
+
+ push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
+ &elements);
+ if (push_len < 0)
+ return push_len;
+ if (is_tso || !push_len) {
+ /* HW header needs its own buffer element. */
+ hd_len = hw_hdr_len + proto_len;
+ data_offset = push_len + proto_len;
+ }
+ memset(hdr, 0, hw_hdr_len);
+ fill_header(card, hdr, skb, ipv, cast_type, frame_len);
+ if (is_tso)
+ qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
+ frame_len - proto_len, skb, proto_len);
+
+ is_sg = skb_is_nonlinear(skb);
+ if (IS_IQD(card)) {
+ rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
+ hd_len);
+ } else {
+ /* TODO: drop skb_orphan() once TX completion is fast enough */
+ skb_orphan(skb);
+ rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
+ hd_len, elements);
+ }
+
+ if (!rc) {
+ if (card->options.performance_stats) {
+ card->perf_stats.buf_elements_sent += elements;
+ if (is_sg)
+ card->perf_stats.sg_skbs_sent++;
+ if (is_tso) {
+ card->perf_stats.large_send_bytes += frame_len;
+ card->perf_stats.large_send_cnt++;
+ }
+ }
+ } else {
+ if (!push_len)
+ kmem_cache_free(qeth_core_header_cache, hdr);
+ if (rc == -EBUSY)
+ /* roll back to ETH header */
+ skb_pull(skb, push_len);
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_xmit);
+
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
@@ -4243,8 +4246,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
if (qeth_setadpparms_inspect_rc(cmd))
return 0;
- if (!card->options.layer2 ||
- !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
+ if (IS_LAYER3(card) || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
ether_addr_copy(card->dev->dev_addr,
cmd->data.setadapterparms.data.change_addr.addr);
card->info.mac_bits |= QETH_LAYER2_MAC_READ;
@@ -4598,9 +4600,9 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
return -EOPNOTSUPP;
if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
- (!card->options.layer2)) {
+ IS_LAYER3(card))
return -EOPNOTSUPP;
- }
+
/* skip 4 bytes (data_len struct member) to get req_len */
if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
return -EFAULT;
@@ -5044,6 +5046,7 @@ static void qeth_core_free_card(struct qeth_card *card)
qeth_clean_channel(&card->data);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
+ dev_set_drvdata(&card->gdev->dev, NULL);
kfree(card);
}
@@ -5123,7 +5126,7 @@ retriable:
qeth_determine_capabilities(card);
qeth_init_tokens(card);
qeth_init_func_level(card);
- rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
+ rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb);
if (rc == -ERESTARTSYS) {
QETH_DBF_TEXT(SETUP, 2, "break2");
return rc;
@@ -5134,7 +5137,7 @@ retriable:
else
goto retry;
}
- rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
+ rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb);
if (rc == -ERESTARTSYS) {
QETH_DBF_TEXT(SETUP, 2, "break3");
return rc;
@@ -5158,13 +5161,14 @@ retriable:
if (rc == IPA_RC_LAN_OFFLINE) {
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
- card->lan_online = 0;
+ netif_carrier_off(card->dev);
} else {
rc = -ENODEV;
goto out;
}
- } else
- card->lan_online = 1;
+ } else {
+ netif_carrier_on(card->dev);
+ }
card->options.ipa4.supported_funcs = 0;
card->options.ipa6.supported_funcs = 0;
@@ -5421,6 +5425,21 @@ static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
return cmd->hdr.return_code;
}
+static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_ipa_caps *caps = reply->param;
+
+ if (qeth_setassparms_inspect_rc(cmd))
+ return 0;
+
+ caps->supported = cmd->data.setassparms.data.caps.supported;
+ caps->enabled = cmd->data.setassparms.data.caps.enabled;
+ return 0;
+}
+
int qeth_setassparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
@@ -5456,8 +5475,6 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
cmd->data.setassparms.hdr.assist_no = ipa_func;
cmd->data.setassparms.hdr.length = 8 + len;
cmd->data.setassparms.hdr.command_code = cmd_code;
- cmd->data.setassparms.hdr.return_code = 0;
- cmd->data.setassparms.hdr.seq_no = 0;
}
return iob;
@@ -5560,11 +5577,11 @@ static int qeth_register_dbf_views(void)
return 0;
}
+static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
+
int qeth_core_load_discipline(struct qeth_card *card,
enum qeth_discipline_id discipline)
{
- int rc = 0;
-
mutex_lock(&qeth_mod_mutex);
switch (discipline) {
case QETH_DISCIPLINE_LAYER3:
@@ -5578,22 +5595,25 @@ int qeth_core_load_discipline(struct qeth_card *card,
default:
break;
}
+ mutex_unlock(&qeth_mod_mutex);
if (!card->discipline) {
dev_err(&card->gdev->dev, "There is no kernel module to "
"support discipline %d\n", discipline);
- rc = -EINVAL;
+ return -EINVAL;
}
- mutex_unlock(&qeth_mod_mutex);
- return rc;
+
+ card->options.layer = discipline;
+ return 0;
}
void qeth_core_free_discipline(struct qeth_card *card)
{
- if (card->options.layer2)
+ if (IS_LAYER2(card))
symbol_put(qeth_l2_discipline);
else
symbol_put(qeth_l3_discipline);
+ card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
card->discipline = NULL;
}
@@ -5731,7 +5751,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
struct device *dev;
int rc;
enum qeth_discipline_id enforced_disc;
- unsigned long flags;
char dbf_name[DBF_NAME_LEN];
QETH_DBF_TEXT(SETUP, 2, "probedev");
@@ -5742,7 +5761,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
- card = qeth_alloc_card();
+ card = qeth_alloc_card(gdev);
if (!card) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
rc = -ENOMEM;
@@ -5758,15 +5777,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
goto err_card;
}
- card->read.ccwdev = gdev->cdev[0];
- card->write.ccwdev = gdev->cdev[1];
- card->data.ccwdev = gdev->cdev[2];
- dev_set_drvdata(&gdev->dev, card);
- card->gdev = gdev;
- gdev->cdev[0]->handler = qeth_irq;
- gdev->cdev[1]->handler = qeth_irq;
- gdev->cdev[2]->handler = qeth_irq;
-
qeth_setup_card(card);
qeth_update_from_chp_desc(card);
@@ -5797,9 +5807,9 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
}
- write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
+ write_lock_irq(&qeth_core_card_list.rwlock);
list_add_tail(&card->list, &qeth_core_card_list.list);
- write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+ write_unlock_irq(&qeth_core_card_list.rwlock);
return 0;
err_disc:
@@ -5815,7 +5825,6 @@ err_dev:
static void qeth_core_remove_device(struct ccwgroup_device *gdev)
{
- unsigned long flags;
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
QETH_DBF_TEXT(SETUP, 2, "removedv");
@@ -5825,12 +5834,11 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
qeth_core_free_discipline(card);
}
- write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
+ write_lock_irq(&qeth_core_card_list.rwlock);
list_del(&card->list);
- write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+ write_unlock_irq(&qeth_core_card_list.rwlock);
free_netdev(card->dev);
qeth_core_free_card(card);
- dev_set_drvdata(&gdev->dev, NULL);
put_device(&gdev->dev);
}
@@ -6123,7 +6131,7 @@ void qeth_core_get_drvinfo(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
- strlcpy(info->driver, card->options.layer2 ? "qeth_l2" : "qeth_l3",
+ strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
sizeof(info->driver));
strlcpy(info->version, "1.0", sizeof(info->version));
strlcpy(info->fw_version, card->info.mcl_level,
@@ -6434,27 +6442,85 @@ static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
return rc ? -EIO : 0;
}
-static int qeth_set_ipa_tso(struct qeth_card *card, int on)
+static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_tso_start_data *tso_data = reply->param;
+
+ if (qeth_setassparms_inspect_rc(cmd))
+ return 0;
+
+ tso_data->mss = cmd->data.setassparms.data.tso.mss;
+ tso_data->supported = cmd->data.setassparms.data.tso.supported;
+ return 0;
+}
+
+static int qeth_set_tso_off(struct qeth_card *card,
+ enum qeth_prot_versions prot)
{
+ return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
+ IPA_CMD_ASS_STOP, 0, prot);
+}
+
+static int qeth_set_tso_on(struct qeth_card *card,
+ enum qeth_prot_versions prot)
+{
+ struct qeth_tso_start_data tso_data;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_caps caps;
int rc;
- QETH_CARD_TEXT(card, 3, "sttso");
+ iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
+ IPA_CMD_ASS_START, 0, prot);
+ if (!iob)
+ return -ENOMEM;
- if (on) {
- rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
- IPA_CMD_ASS_START, 0);
- if (rc) {
- dev_warn(&card->gdev->dev,
- "Starting outbound TCP segmentation offload for %s failed\n",
- QETH_CARD_IFNAME(card));
- return -EIO;
- }
- dev_info(&card->gdev->dev, "Outbound TSO enabled\n");
- } else {
- rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
- IPA_CMD_ASS_STOP, 0);
+ rc = qeth_send_setassparms(card, iob, 0, 0 /* unused */,
+ qeth_start_tso_cb, &tso_data);
+ if (rc)
+ return rc;
+
+ if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
+ qeth_set_tso_off(card, prot);
+ return -EOPNOTSUPP;
}
- return rc;
+
+ iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
+ IPA_CMD_ASS_ENABLE, sizeof(caps), prot);
+ if (!iob) {
+ qeth_set_tso_off(card, prot);
+ return -ENOMEM;
+ }
+
+ /* enable TSO capability */
+ caps.supported = 0;
+ caps.enabled = QETH_IPA_LARGE_SEND_TCP;
+ rc = qeth_send_setassparms(card, iob, sizeof(caps), (long) &caps,
+ qeth_setassparms_get_caps_cb, &caps);
+ if (rc) {
+ qeth_set_tso_off(card, prot);
+ return rc;
+ }
+
+ if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
+ !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
+ qeth_set_tso_off(card, prot);
+ return -EOPNOTSUPP;
+ }
+
+ dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
+ tso_data.mss);
+ return 0;
+}
+
+static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
+ enum qeth_prot_versions prot)
+{
+ int rc = on ? qeth_set_tso_on(card, prot) :
+ qeth_set_tso_off(card, prot);
+
+ return rc ? -EIO : 0;
}
static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
@@ -6481,7 +6547,7 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
}
#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
- NETIF_F_IPV6_CSUM)
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO6)
/**
* qeth_enable_hw_features() - (Re-)Enable HW functions for device features
* @dev: a net_device
@@ -6531,11 +6597,18 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
if (rc)
changed ^= NETIF_F_RXCSUM;
}
- if ((changed & NETIF_F_TSO)) {
- rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO ? 1 : 0);
+ if (changed & NETIF_F_TSO) {
+ rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
+ QETH_PROT_IPV4);
if (rc)
changed ^= NETIF_F_TSO;
}
+ if (changed & NETIF_F_TSO6) {
+ rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
+ QETH_PROT_IPV6);
+ if (rc)
+ changed ^= NETIF_F_TSO6;
+ }
/* everything changed successfully? */
if ((dev->features ^ features) == changed)
@@ -6561,6 +6634,8 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
features &= ~NETIF_F_RXCSUM;
if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
features &= ~NETIF_F_TSO;
+ if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
+ features &= ~NETIF_F_TSO6;
/* if the card isn't up, remove features that require hw changes */
if (card->state == CARD_STATE_DOWN ||
card->state == CARD_STATE_RECOVER)
@@ -6602,9 +6677,7 @@ static int __init qeth_core_init(void)
pr_info("loading core functions\n");
INIT_LIST_HEAD(&qeth_core_card_list.list);
- INIT_LIST_HEAD(&qeth_dbf_list);
rwlock_init(&qeth_core_card_list.rwlock);
- mutex_init(&qeth_mod_mutex);
qeth_wq = create_singlethread_workqueue("qeth_wq");
if (!qeth_wq) {
@@ -6619,8 +6692,10 @@ static int __init qeth_core_init(void)
rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
if (rc)
goto register_err;
- qeth_core_header_cache = kmem_cache_create("qeth_hdr",
- sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
+ qeth_core_header_cache =
+ kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
+ roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
+ 0, NULL);
if (!qeth_core_header_cache) {
rc = -ENOMEM;
goto slab_err;
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 5bcb8dafc3ee..e891c0b52f4c 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -148,10 +148,10 @@ EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
struct ipa_rc_msg {
enum qeth_ipa_return_codes rc;
- char *msg;
+ const char *msg;
};
-static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
+static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_SUCCESS, "success"},
{IPA_RC_NOTSUPP, "Command not supported"},
{IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
@@ -219,23 +219,23 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
-char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
+const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
{
- int x = 0;
- qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
- sizeof(struct ipa_rc_msg) - 1].rc = rc;
- while (qeth_ipa_rc_msg[x].rc != rc)
- x++;
+ int x;
+
+ for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++)
+ if (qeth_ipa_rc_msg[x].rc == rc)
+ return qeth_ipa_rc_msg[x].msg;
return qeth_ipa_rc_msg[x].msg;
}
struct ipa_cmd_names {
enum qeth_ipa_cmds cmd;
- char *name;
+ const char *name;
};
-static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
+static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_STARTLAN, "startlan"},
{IPA_CMD_STOPLAN, "stoplan"},
{IPA_CMD_SETVMAC, "setvmac"},
@@ -267,13 +267,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_UNKNOWN, "unknown"},
};
-char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
+const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
{
- int x = 0;
- qeth_ipa_cmd_names[
- sizeof(qeth_ipa_cmd_names) /
- sizeof(struct ipa_cmd_names)-1].cmd = cmd;
- while (qeth_ipa_cmd_names[x].cmd != cmd)
- x++;
+ int x;
+
+ for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++)
+ if (qeth_ipa_cmd_names[x].cmd == cmd)
+ return qeth_ipa_cmd_names[x].name;
return qeth_ipa_cmd_names[x].name;
}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index aa8b9196b089..e85090467afe 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -56,6 +56,21 @@ static inline bool qeth_intparm_is_iob(unsigned long intparm)
#define IPA_CMD_INITIATOR_OSA_REPLY 0x81
#define IPA_CMD_PRIM_VERSION_NO 0x01
+struct qeth_ipa_caps {
+ u32 supported;
+ u32 enabled;
+};
+
+static inline bool qeth_ipa_caps_supported(struct qeth_ipa_caps *caps, u32 mask)
+{
+ return (caps->supported & mask) == mask;
+}
+
+static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask)
+{
+ return (caps->enabled & mask) == mask;
+}
+
enum qeth_card_types {
QETH_CARD_TYPE_OSD = 1,
QETH_CARD_TYPE_IQD = 5,
@@ -405,14 +420,25 @@ struct qeth_checksum_cmd {
__u32 enabled;
} __packed;
+enum qeth_ipa_large_send_caps {
+ QETH_IPA_LARGE_SEND_TCP = 0x00000001,
+};
+
+struct qeth_tso_start_data {
+ u32 mss;
+ u32 supported;
+};
+
/* SETASSPARMS IPA Command: */
struct qeth_ipacmd_setassparms {
struct qeth_ipacmd_setassparms_hdr hdr;
union {
__u32 flags_32bit;
+ struct qeth_ipa_caps caps;
struct qeth_checksum_cmd chksum;
struct qeth_arp_cache_entry add_arp_entry;
struct qeth_arp_query_data query_arp;
+ struct qeth_tso_start_data tso;
__u8 ip[16];
} data;
} __attribute__ ((packed));
@@ -797,8 +823,8 @@ enum qeth_ipa_arp_return_codes {
QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
};
-extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
-extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
+extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
+extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
sizeof(struct qeth_ipacmd_setassparms_hdr))
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 25d0be25bcb3..30f61608fa22 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -31,10 +31,9 @@ static ssize_t qeth_dev_state_show(struct device *dev,
case CARD_STATE_SOFTSETUP:
return sprintf(buf, "SOFTSETUP\n");
case CARD_STATE_UP:
- if (card->lan_online)
- return sprintf(buf, "UP (LAN ONLINE)\n");
- else
- return sprintf(buf, "UP (LAN OFFLINE)\n");
+ return sprintf(buf, "UP (LAN %s)\n",
+ netif_carrier_ok(card->dev) ? "ONLINE" :
+ "OFFLINE");
case CARD_STATE_RECOVER:
return sprintf(buf, "RECOVER\n");
default:
@@ -228,7 +227,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (sysfs_streq(buf, "prio_queueing_vlan")) {
- if (!card->options.layer2) {
+ if (IS_LAYER3(card)) {
rc = -ENOTSUPP;
goto out;
}
@@ -379,7 +378,7 @@ static ssize_t qeth_dev_layer2_show(struct device *dev,
if (!card)
return -EINVAL;
- return sprintf(buf, "%i\n", card->options.layer2);
+ return sprintf(buf, "%i\n", card->options.layer);
}
static ssize_t qeth_dev_layer2_store(struct device *dev,
@@ -413,7 +412,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
goto out;
}
- if (card->options.layer2 == newdis)
+ if (card->options.layer == newdis)
goto out;
if (card->info.layer_enforced) {
/* fixed layer, can't switch */
@@ -432,8 +431,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
card->discipline->remove(card->gdev);
qeth_core_free_discipline(card);
- card->options.layer2 = -1;
-
free_netdev(card->dev);
card->dev = ndev;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index b5e38531733f..23aaf373f631 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -193,15 +193,25 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
return RTN_UNICAST;
}
-static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
- int cast_type, unsigned int data_len)
+static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
+ struct sk_buff *skb, int ipv, int cast_type,
+ unsigned int data_len)
{
- struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+ struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
- memset(hdr, 0, sizeof(struct qeth_hdr));
- hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
hdr->hdr.l2.pkt_length = data_len;
+ if (skb_is_gso(skb)) {
+ hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO;
+ } else {
+ hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+ }
+ }
+
/* set byte byte 3 to casting flags */
if (cast_type == RTN_MULTICAST)
hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
@@ -641,84 +651,43 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
qeth_promisc_to_bridge(card);
}
-static int qeth_l2_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int cast_type, int ipv)
+static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue)
{
- const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0;
- const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
- unsigned int frame_len = skb->len;
- unsigned int data_offset = 0;
- struct qeth_hdr *hdr = NULL;
+ struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data;
+ addr_t end = (addr_t)(skb->data + sizeof(*hdr));
+ addr_t start = (addr_t)skb->data;
+ unsigned int elements = 0;
unsigned int hd_len = 0;
- unsigned int elements;
- int push_len, rc;
- bool is_sg;
+ int rc;
- rc = skb_cow_head(skb, hw_hdr_len);
- if (rc)
- return rc;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
- push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
- &elements);
- if (push_len < 0)
- return push_len;
- if (!push_len) {
- /* HW header needs its own buffer element. */
- hd_len = hw_hdr_len + proto_len;
- data_offset = proto_len;
- }
- qeth_l2_fill_header(hdr, skb, cast_type, frame_len);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
+ if (qeth_get_elements_for_range(start, end) > 1) {
+ /* Misaligned HW header, move it to its own buffer element. */
+ hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+ if (!hdr)
+ return -ENOMEM;
+ hd_len = sizeof(*hdr);
+ skb_copy_from_linear_data(skb, (char *)hdr, hd_len);
+ elements++;
}
- is_sg = skb_is_nonlinear(skb);
- if (IS_IQD(card)) {
- rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
- hd_len);
- } else {
- /* TODO: drop skb_orphan() once TX completion is fast enough */
- skb_orphan(skb);
- rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
- hd_len, elements);
+ elements += qeth_count_elements(skb, hd_len);
+ if (elements > QETH_MAX_BUFFER_ELEMENTS(card)) {
+ rc = -E2BIG;
+ goto out;
}
- if (!rc) {
- if (card->options.performance_stats) {
- card->perf_stats.buf_elements_sent += elements;
- if (is_sg)
- card->perf_stats.sg_skbs_sent++;
- }
- } else {
- if (!push_len)
- kmem_cache_free(qeth_core_header_cache, hdr);
- if (rc == -EBUSY)
- /* roll back to ETH header */
- skb_pull(skb, push_len);
- }
+ rc = qeth_do_send_packet(card, queue, skb, hdr, hd_len, hd_len,
+ elements);
+out:
+ if (rc && hd_len)
+ kmem_cache_free(qeth_core_header_cache, hdr);
return rc;
}
-static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue)
-{
- unsigned int elements;
- struct qeth_hdr *hdr;
-
- if (skb->protocol == htons(ETH_P_IPV6))
- return -EPROTONOSUPPORT;
-
- hdr = (struct qeth_hdr *)skb->data;
- elements = qeth_get_elements_no(card, skb, 0, 0);
- if (!elements)
- return -E2BIG;
- if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr)))
- return -EINVAL;
- return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements);
-}
-
static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -729,7 +698,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
int tx_bytes = skb->len;
int rc;
- if ((card->state != CARD_STATE_UP) || !card->lan_online) {
+ if (card->state != CARD_STATE_UP) {
card->stats.tx_carrier_errors++;
goto tx_drop;
}
@@ -745,7 +714,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
if (IS_OSN(card))
rc = qeth_l2_xmit_osn(card, skb, queue);
else
- rc = qeth_l2_xmit(card, skb, queue, cast_type, ipv);
+ rc = qeth_xmit(card, skb, queue, ipv, cast_type,
+ qeth_l2_fill_header);
if (!rc) {
card->stats.tx_packets++;
@@ -789,7 +759,10 @@ static int __qeth_l2_open(struct net_device *dev)
if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
napi_enable(&card->napi);
+ local_bh_disable();
napi_schedule(&card->napi);
+ /* kick-start the NAPI softirq: */
+ local_bh_enable();
} else
rc = -EIO;
return rc;
@@ -837,7 +810,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
}
INIT_LIST_HEAD(&card->vid_list);
hash_init(card->mac_htable);
- card->options.layer2 = 1;
card->info.hwtrap = 0;
qeth_l2_vnicc_set_defaults(card);
return 0;
@@ -929,6 +901,20 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->hw_features |= NETIF_F_RXCSUM;
card->dev->vlan_features |= NETIF_F_RXCSUM;
}
+ if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
+ card->dev->hw_features |= NETIF_F_TSO;
+ card->dev->vlan_features |= NETIF_F_TSO;
+ }
+ if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
+ card->dev->hw_features |= NETIF_F_TSO6;
+ card->dev->vlan_features |= NETIF_F_TSO6;
+ }
+
+ if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
+ netif_set_gso_max_size(card->dev,
+ PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
+ }
qeth_l2_request_initial_mac(card);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
@@ -1029,10 +1015,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
card->state = CARD_STATE_SOFTSETUP;
- if (card->lan_online)
- netif_carrier_on(card->dev);
- else
- netif_carrier_off(card->dev);
qeth_set_allowed_threads(card, 0xffffffff, 0);
@@ -1178,9 +1160,6 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
- if (gdev->state == CCWGROUP_OFFLINE)
- goto out;
-
if (card->state == CARD_STATE_RECOVER) {
rc = __qeth_l2_set_online(card->gdev, 1);
if (rc) {
@@ -1190,7 +1169,7 @@ static int qeth_l2_pm_resume(struct ccwgroup_device *gdev)
}
} else
rc = __qeth_l2_set_online(card->gdev, 0);
-out:
+
qeth_set_allowed_threads(card, 0xffffffff, 0);
netif_device_attach(card->dev);
if (rc)
@@ -1240,7 +1219,6 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob)
{
struct qeth_channel *channel = iob->channel;
- unsigned long flags;
int rc = 0;
QETH_CARD_TEXT(card, 5, "osndctrd");
@@ -1249,10 +1227,10 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
qeth_prepare_control_data(card, len, iob);
QETH_CARD_TEXT(card, 6, "osnoirqp");
- spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
- spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
"ccw_device_start rc = %i\n", rc);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ada258c01a08..0b161cc1fd2e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -33,7 +33,6 @@
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/ip6_fib.h>
-#include <net/ip6_checksum.h>
#include <net/iucv/af_iucv.h>
#include <linux/hashtable.h>
@@ -1349,6 +1348,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
int budget, int *done)
{
+ struct net_device *dev = card->dev;
int work_done = 0;
struct sk_buff *skb;
struct qeth_hdr *hdr;
@@ -1370,11 +1370,10 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
magic = *(__u16 *)skb->data;
if ((card->info.type == QETH_CARD_TYPE_IQD) &&
(magic == ETH_P_AF_IUCV)) {
- skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
len = skb->len;
- card->dev->header_ops->create(skb, card->dev, 0,
- card->dev->dev_addr, "FAKELL", len);
- skb_reset_mac_header(skb);
+ dev_hard_header(skb, dev, ETH_P_AF_IUCV,
+ dev->dev_addr, "FAKELL", len);
+ skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb);
} else {
qeth_l3_rebuild_skb(card, skb, hdr);
@@ -1983,39 +1982,38 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
rcu_read_unlock();
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
- if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
- return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
- RTN_MULTICAST : RTN_UNICAST;
- else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
+ switch (qeth_get_ip_version(skb)) {
+ case 4:
return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
-
- /* ... and MAC address */
- if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
- return RTN_BROADCAST;
- if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
- return RTN_MULTICAST;
-
- /* default to unicast */
- return RTN_UNICAST;
+ case 6:
+ return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
+ RTN_MULTICAST : RTN_UNICAST;
+ default:
+ /* ... and MAC address */
+ if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
+ skb->dev->broadcast))
+ return RTN_BROADCAST;
+ if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
+ return RTN_MULTICAST;
+ /* default to unicast */
+ return RTN_UNICAST;
+ }
}
static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
unsigned int data_len)
{
char daddr[16];
- struct af_iucv_trans_hdr *iucv_hdr;
- memset(hdr, 0, sizeof(struct qeth_hdr));
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
hdr->hdr.l3.length = data_len;
hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
- iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN);
memset(daddr, 0, sizeof(daddr));
daddr[0] = 0xfe;
daddr[1] = 0x80;
- memcpy(&daddr[8], iucv_hdr->destUserID, 8);
+ memcpy(&daddr[8], iucv_trans_hdr(skb)->destUserID, 8);
memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
}
@@ -2034,26 +2032,33 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int ipv, int cast_type,
unsigned int data_len)
{
- memset(hdr, 0, sizeof(struct qeth_hdr));
- hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+ struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
+
hdr->hdr.l3.length = data_len;
- /*
- * before we're going to overwrite this location with next hop ip.
- * v6 uses passthrough, v4 sets the tag in the QDIO header.
- */
- if (skb_vlan_tag_present(skb)) {
- if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
- hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
- else
- hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
- hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
+ if (skb_is_gso(skb)) {
+ hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO;
+ } else {
+ hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
+ /* some HW requires combined L3+L4 csum offload: */
+ if (ipv == 4)
+ hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
+ if (card->options.performance_stats)
+ card->perf_stats.tx_csum++;
+ }
}
- if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) {
- qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
- if (card->options.performance_stats)
- card->perf_stats.tx_csum++;
+ if (ipv == 4 || IS_IQD(card)) {
+ /* NETIF_F_HW_VLAN_CTAG_TX */
+ if (skb_vlan_tag_present(skb)) {
+ hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME;
+ hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
+ }
+ } else if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
+ hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_INCLUDE_VLAN_TAG;
+ hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI);
}
/* OSA only: */
@@ -2094,85 +2099,41 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
rcu_read_unlock();
}
-static void qeth_tso_fill_header(struct qeth_card *card,
- struct qeth_hdr *qhdr, struct sk_buff *skb)
+static void qeth_l3_fixup_headers(struct sk_buff *skb)
{
- struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
- struct tcphdr *tcph = tcp_hdr(skb);
struct iphdr *iph = ip_hdr(skb);
- struct ipv6hdr *ip6h = ipv6_hdr(skb);
-
- /*fix header to TSO values ...*/
- hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
- /*set values which are fix for the first approach ...*/
- hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
- hdr->ext.imb_hdr_no = 1;
- hdr->ext.hdr_type = 1;
- hdr->ext.hdr_version = 1;
- hdr->ext.hdr_len = 28;
- /*insert non-fix values */
- hdr->ext.mss = skb_shinfo(skb)->gso_size;
- hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb));
- hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
- sizeof(struct qeth_hdr_tso));
- tcph->check = 0;
- if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) {
- ip6h->payload_len = 0;
- tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
- 0, IPPROTO_TCP, 0);
- } else {
- /*OSA want us to set these values ...*/
- tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- 0, IPPROTO_TCP, 0);
- iph->tot_len = 0;
- iph->check = 0;
- }
-}
-/**
- * qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso
- * @card: qeth card structure, to check max. elems.
- * @skb: SKB address
- * @extra_elems: extra elems needed, to check against max.
- *
- * Returns the number of pages, and thus QDIO buffer elements, needed to cover
- * skb data, including linear part and fragments, but excluding TCP header.
- * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().)
- * Checks if the result plus extra_elems fits under the limit for the card.
- * Returns 0 if it does not.
- * Note: extra_elems is not included in the returned result.
- */
-static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
- struct sk_buff *skb, int extra_elems)
-{
- addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
- addr_t end = (addr_t)skb->data + skb_headlen(skb);
- int elements = qeth_get_elements_for_frags(skb);
-
- if (start != end)
- elements += qeth_get_elements_for_range(start, end);
-
- if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
- QETH_DBF_MESSAGE(2,
- "Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n",
- elements + extra_elems, skb->len);
- return 0;
+ /* this is safe, IPv6 traffic takes a different path */
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ iph->check = 0;
+ if (skb_is_gso(skb)) {
+ iph->tot_len = 0;
+ tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr,
+ iph->daddr, 0);
}
- return elements;
}
-static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv,
- int cast_type)
+static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_qdio_out_q *queue, int ipv, int cast_type)
{
- const unsigned int hw_hdr_len = sizeof(struct qeth_hdr);
- unsigned int frame_len, elements;
+ unsigned int hw_hdr_len, proto_len, frame_len, elements;
unsigned char eth_hdr[ETH_HLEN];
+ bool is_tso = skb_is_gso(skb);
+ unsigned int data_offset = 0;
struct qeth_hdr *hdr = NULL;
unsigned int hd_len = 0;
int push_len, rc;
bool is_sg;
+ if (is_tso) {
+ hw_hdr_len = sizeof(struct qeth_hdr_tso);
+ proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb) -
+ ETH_HLEN;
+ } else {
+ hw_hdr_len = sizeof(struct qeth_hdr);
+ proto_len = 0;
+ }
+
/* re-use the L2 header area for the HW header: */
rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
if (rc)
@@ -2181,28 +2142,37 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
skb_pull(skb, ETH_HLEN);
frame_len = skb->len;
- push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0,
+ qeth_l3_fixup_headers(skb);
+ push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
&elements);
if (push_len < 0)
return push_len;
- if (!push_len) {
- /* hdr was added discontiguous from skb->data */
- hd_len = hw_hdr_len;
+ if (is_tso || !push_len) {
+ /* HW header needs its own buffer element. */
+ hd_len = hw_hdr_len + proto_len;
+ data_offset = push_len + proto_len;
}
+ memset(hdr, 0, hw_hdr_len);
- if (skb->protocol == htons(ETH_P_AF_IUCV))
+ if (skb->protocol == htons(ETH_P_AF_IUCV)) {
qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len);
- else
+ } else {
qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
+ if (is_tso)
+ qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
+ frame_len - proto_len, skb,
+ proto_len);
+ }
is_sg = skb_is_nonlinear(skb);
if (IS_IQD(card)) {
- rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len);
+ rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
+ hd_len);
} else {
/* TODO: drop skb_orphan() once TX completion is fast enough */
skb_orphan(skb);
- rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len,
- elements);
+ rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
+ hd_len, elements);
}
if (!rc) {
@@ -2210,6 +2180,10 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
card->perf_stats.buf_elements_sent += elements;
if (is_sg)
card->perf_stats.sg_skbs_sent++;
+ if (is_tso) {
+ card->perf_stats.large_send_bytes += frame_len;
+ card->perf_stats.large_send_cnt++;
+ }
}
} else {
if (!push_len)
@@ -2224,118 +2198,6 @@ static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb,
return rc;
}
-static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue, int ipv, int cast_type)
-{
- int elements, len, rc;
- __be16 *tag;
- struct qeth_hdr *hdr = NULL;
- int hdr_elements = 0;
- struct sk_buff *new_skb = NULL;
- int tx_bytes = skb->len;
- unsigned int hd_len;
- bool use_tso, is_sg;
-
- /* Ignore segment size from skb_is_gso(), 1 page is always used. */
- use_tso = skb_is_gso(skb) &&
- (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
-
- /* create a clone with writeable headroom */
- new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
- VLAN_HLEN);
- if (!new_skb)
- return -ENOMEM;
-
- if (ipv == 4) {
- skb_pull(new_skb, ETH_HLEN);
- } else if (skb_vlan_tag_present(new_skb)) {
- skb_push(new_skb, VLAN_HLEN);
- skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
- skb_copy_to_linear_data_offset(new_skb, 4,
- new_skb->data + 8, 4);
- skb_copy_to_linear_data_offset(new_skb, 8,
- new_skb->data + 12, 4);
- tag = (__be16 *)(new_skb->data + 12);
- *tag = cpu_to_be16(ETH_P_8021Q);
- *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
- }
-
- /* fix hardware limitation: as long as we do not have sbal
- * chaining we can not send long frag lists
- */
- if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
- (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) {
- rc = skb_linearize(new_skb);
-
- if (card->options.performance_stats) {
- if (rc)
- card->perf_stats.tx_linfail++;
- else
- card->perf_stats.tx_lin++;
- }
- if (rc)
- goto out;
- }
-
- if (use_tso) {
- hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
- memset(hdr, 0, sizeof(struct qeth_hdr_tso));
- qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
- new_skb->len - sizeof(struct qeth_hdr_tso));
- qeth_tso_fill_header(card, hdr, new_skb);
- hdr_elements++;
- } else {
- hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
- qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
- new_skb->len - sizeof(struct qeth_hdr));
- }
-
- elements = use_tso ?
- qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
- qeth_get_elements_no(card, new_skb, hdr_elements, 0);
- if (!elements) {
- rc = -E2BIG;
- goto out;
- }
- elements += hdr_elements;
-
- if (use_tso) {
- hd_len = sizeof(struct qeth_hdr_tso) +
- ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
- len = hd_len;
- } else {
- hd_len = 0;
- len = sizeof(struct qeth_hdr_layer3);
- }
-
- if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) {
- rc = -EINVAL;
- goto out;
- }
-
- is_sg = skb_is_nonlinear(new_skb);
- rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len,
- elements);
-out:
- if (!rc) {
- if (new_skb != skb)
- dev_kfree_skb_any(skb);
- if (card->options.performance_stats) {
- card->perf_stats.buf_elements_sent += elements;
- if (is_sg)
- card->perf_stats.sg_skbs_sent++;
- if (use_tso) {
- card->perf_stats.large_send_bytes += tx_bytes;
- card->perf_stats.large_send_cnt++;
- }
- }
- } else {
- if (new_skb != skb)
- dev_kfree_skb_any(new_skb);
- }
- return rc;
-}
-
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -2355,7 +2217,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
goto tx_drop;
}
- if (card->state != CARD_STATE_UP || !card->lan_online) {
+ if (card->state != CARD_STATE_UP) {
card->stats.tx_carrier_errors++;
goto tx_drop;
}
@@ -2371,10 +2233,11 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
}
netif_stop_queue(dev);
- if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4))
- rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type);
- else
+ if (ipv == 4 || IS_IQD(card))
rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
+ else
+ rc = qeth_xmit(card, skb, queue, ipv, cast_type,
+ qeth_l3_fill_header);
if (!rc) {
card->stats.tx_packets++;
@@ -2412,7 +2275,10 @@ static int __qeth_l3_open(struct net_device *dev)
if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
napi_enable(&card->napi);
+ local_bh_disable();
napi_schedule(&card->napi);
+ /* kick-start the NAPI softirq: */
+ local_bh_enable();
} else
rc = -EIO;
return rc;
@@ -2476,6 +2342,15 @@ qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np)
return 0;
}
+static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (qeth_get_ip_version(skb) != 4)
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+ return qeth_features_check(skb, dev, features);
+}
+
static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_open = qeth_l3_open,
.ndo_stop = qeth_l3_stop,
@@ -2496,7 +2371,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_stop = qeth_l3_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
- .ndo_features_check = qeth_features_check,
+ .ndo_features_check = qeth_l3_osa_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
@@ -2510,6 +2385,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
static int qeth_l3_setup_netdev(struct qeth_card *card)
{
+ unsigned int headroom;
int rc;
if (card->dev->netdev_ops)
@@ -2542,9 +2418,22 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->hw_features |= NETIF_F_IPV6_CSUM;
card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
}
+ if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
+ card->dev->hw_features |= NETIF_F_TSO6;
+ card->dev->vlan_features |= NETIF_F_TSO6;
+ }
+
+ /* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */
+ if (card->dev->hw_features & NETIF_F_TSO6)
+ headroom = sizeof(struct qeth_hdr_tso) + VLAN_HLEN;
+ else if (card->dev->hw_features & NETIF_F_TSO)
+ headroom = sizeof(struct qeth_hdr_tso);
+ else
+ headroom = sizeof(struct qeth_hdr) + VLAN_HLEN;
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
+ headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
rc = qeth_l3_iqd_read_initial_mac(card);
if (rc)
@@ -2555,14 +2444,14 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
} else
return -ENODEV;
+ card->dev->needed_headroom = headroom;
card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
- card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
netif_keep_dst(card->dev);
- if (card->dev->hw_features & NETIF_F_TSO)
+ if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6))
netif_set_gso_max_size(card->dev,
PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
@@ -2591,7 +2480,6 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
}
hash_init(card->ip_htable);
hash_init(card->ip_mc_htable);
- card->options.layer2 = 0;
card->info.hwtrap = 0;
return 0;
}
@@ -2678,10 +2566,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
qeth_set_allowed_threads(card, 0xffffffff, 0);
qeth_l3_recover_ip(card);
- if (card->lan_online)
- netif_carrier_on(card->dev);
- else
- netif_carrier_off(card->dev);
qeth_enable_hw_features(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
@@ -2819,9 +2703,6 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
- if (gdev->state == CCWGROUP_OFFLINE)
- goto out;
-
if (card->state == CARD_STATE_RECOVER) {
rc = __qeth_l3_set_online(card->gdev, 1);
if (rc) {
@@ -2831,7 +2712,7 @@ static int qeth_l3_pm_resume(struct ccwgroup_device *gdev)
}
} else
rc = __qeth_l3_set_online(card->gdev, 0);
-out:
+
qeth_set_allowed_threads(card, 0xffffffff, 0);
netif_device_attach(card->dev);
if (rc)
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index 7b31f19ade83..050879a2ddef 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -715,22 +715,13 @@ static struct miscdevice openprom_dev = {
static int __init openprom_init(void)
{
- struct device_node *dp;
int err;
err = misc_register(&openprom_dev);
if (err)
return err;
- dp = of_find_node_by_path("/");
- dp = dp->child;
- while (dp) {
- if (!strcmp(dp->name, "options"))
- break;
- dp = dp->sibling;
- }
- options_node = dp;
-
+ options_node = of_get_child_by_name(of_find_node_by_path("/"), "options");
if (!options_node) {
misc_deregister(&openprom_dev);
return -EIO;
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
index 524f9ea62e52..6516bc3cb58b 100644
--- a/drivers/sbus/char/oradax.c
+++ b/drivers/sbus/char/oradax.c
@@ -689,8 +689,7 @@ static int dax_open(struct inode *inode, struct file *f)
alloc_error:
kfree(ctx->ccb_buf);
done:
- if (ctx != NULL)
- kfree(ctx);
+ kfree(ctx);
return -ENOMEM;
}
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 27521fc3ef5a..05293babb031 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -518,7 +518,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr;
int retval = 1;
- cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
@@ -526,7 +527,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
- pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
+ cpu_addr, dma_handle);
goto out;
}
@@ -1027,16 +1029,16 @@ out:
static void twa_free_device_extension(TW_Device_Extension *tw_dev)
{
if (tw_dev->command_packet_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
- sizeof(TW_Command_Full)*TW_Q_LENGTH,
- tw_dev->command_packet_virt[0],
- tw_dev->command_packet_phys[0]);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Command_Full) * TW_Q_LENGTH,
+ tw_dev->command_packet_virt[0],
+ tw_dev->command_packet_phys[0]);
if (tw_dev->generic_buffer_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
- TW_SECTOR_SIZE*TW_Q_LENGTH,
- tw_dev->generic_buffer_virt[0],
- tw_dev->generic_buffer_phys[0]);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ TW_SECTOR_SIZE * TW_Q_LENGTH,
+ tw_dev->generic_buffer_virt[0],
+ tw_dev->generic_buffer_phys[0]);
kfree(tw_dev->event_queue[0]);
} /* End twa_free_device_extension() */
@@ -2015,14 +2017,12 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
- retval = -ENODEV;
- goto out_disable_device;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
if (!host) {
@@ -2237,14 +2237,12 @@ static int twa_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
- retval = -ENODEV;
- goto out_disable_device;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
/* Initialize the card */
if (twa_reset_sequence(tw_dev, 0)) {
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 40c1e6e64f58..266bdac75304 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -644,8 +644,8 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr;
int retval = 1;
- cpu_addr = pci_zalloc_consistent(tw_dev->tw_pci_dev, size * TW_Q_LENGTH,
- &dma_handle);
+ cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
@@ -899,19 +899,19 @@ out:
static void twl_free_device_extension(TW_Device_Extension *tw_dev)
{
if (tw_dev->command_packet_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command_Full)*TW_Q_LENGTH,
tw_dev->command_packet_virt[0],
tw_dev->command_packet_phys[0]);
if (tw_dev->generic_buffer_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
TW_SECTOR_SIZE*TW_Q_LENGTH,
tw_dev->generic_buffer_virt[0],
tw_dev->generic_buffer_phys[0]);
if (tw_dev->sense_buffer_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command_Apache_Header)*
TW_Q_LENGTH,
tw_dev->sense_buffer_virt[0],
@@ -1571,14 +1571,12 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
- retval = -ENODEV;
- goto out_disable_device;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
if (!host) {
@@ -1805,14 +1803,12 @@ static int twl_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
- retval = -ENODEV;
- goto out_disable_device;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
/* Initialize the card */
if (twl_reset_sequence(tw_dev, 0)) {
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 471366945bd4..a58257645e94 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -834,15 +834,17 @@ static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
dprintk(KERN_NOTICE "3w-xxxx: tw_allocate_memory()\n");
- cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (cpu_addr == NULL) {
- printk(KERN_WARNING "3w-xxxx: pci_alloc_consistent() failed.\n");
+ printk(KERN_WARNING "3w-xxxx: dma_alloc_coherent() failed.\n");
return 1;
}
if ((unsigned long)cpu_addr % (tw_dev->tw_pci_dev->device == TW_DEVICE_ID ? TW_ALIGNMENT_6000 : TW_ALIGNMENT_7000)) {
printk(KERN_WARNING "3w-xxxx: Couldn't allocate correctly aligned memory.\n");
- pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
+ cpu_addr, dma_handle);
return 1;
}
@@ -1062,10 +1064,16 @@ static void tw_free_device_extension(TW_Device_Extension *tw_dev)
/* Free command packet and generic buffer memory */
if (tw_dev->command_packet_virtual_address[0])
- pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Command)*TW_Q_LENGTH, tw_dev->command_packet_virtual_address[0], tw_dev->command_packet_physical_address[0]);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Command) * TW_Q_LENGTH,
+ tw_dev->command_packet_virtual_address[0],
+ tw_dev->command_packet_physical_address[0]);
if (tw_dev->alignment_virtual_address[0])
- pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Sector)*TW_Q_LENGTH, tw_dev->alignment_virtual_address[0], tw_dev->alignment_physical_address[0]);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Sector) * TW_Q_LENGTH,
+ tw_dev->alignment_virtual_address[0],
+ tw_dev->alignment_physical_address[0]);
} /* End tw_free_device_extension() */
/* This function will send an initconnection command to controller */
@@ -2260,7 +2268,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
- retval = pci_set_dma_mask(pdev, TW_DMA_MASK);
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
printk(KERN_WARNING "3w-xxxx: Failed to set dma mask.");
goto out_disable_device;
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 69e80c1ed1ca..bd87fbacfbc7 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -230,7 +230,6 @@ static unsigned char tw_sense_table[][4] =
#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */
#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
#define TW_IOCTL_CHRDEV_FREE -1
-#define TW_DMA_MASK DMA_BIT_MASK(32)
#define TW_MAX_CDB_LEN 16
/* Bitmask macros to eliminate bitfields */
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index 0c9a100af667..05fe439b66af 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -90,7 +90,7 @@ struct NCR_700_Device_Parameters {
/* The SYNC negotiation sequence looks like:
*
* If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the
- * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION
+ * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTIATION
* If we get an SDTR reply, work out the SXFER parameters, squirrel
* them away here, clear DEV_BEGIN_SYNC_NEGOTIATION and set
* DEV_NEGOTIATED_SYNC. If we get a REJECT msg, squirrel
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 0d4ffe0ae306..9cee941f97d6 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -201,8 +201,8 @@ static bool __init blogic_create_initccbs(struct blogic_adapter *adapter)
dma_addr_t blkp;
while (adapter->alloc_ccbs < adapter->initccbs) {
- blk_pointer = pci_alloc_consistent(adapter->pci_device,
- blk_size, &blkp);
+ blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
+ blk_size, &blkp, GFP_KERNEL);
if (blk_pointer == NULL) {
blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n",
adapter);
@@ -227,15 +227,16 @@ static void blogic_destroy_ccbs(struct blogic_adapter *adapter)
next_ccb = ccb->next_all;
if (ccb->allocgrp_head) {
if (lastccb)
- pci_free_consistent(adapter->pci_device,
+ dma_free_coherent(&adapter->pci_device->dev,
lastccb->allocgrp_size, lastccb,
lastccb->allocgrp_head);
lastccb = ccb;
}
}
if (lastccb)
- pci_free_consistent(adapter->pci_device, lastccb->allocgrp_size,
- lastccb, lastccb->allocgrp_head);
+ dma_free_coherent(&adapter->pci_device->dev,
+ lastccb->allocgrp_size, lastccb,
+ lastccb->allocgrp_head);
}
@@ -256,8 +257,8 @@ static void blogic_create_addlccbs(struct blogic_adapter *adapter,
if (addl_ccbs <= 0)
return;
while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) {
- blk_pointer = pci_alloc_consistent(adapter->pci_device,
- blk_size, &blkp);
+ blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
+ blk_size, &blkp, GFP_KERNEL);
if (blk_pointer == NULL)
break;
blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp);
@@ -318,8 +319,8 @@ static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
if (ccb->command != NULL)
scsi_dma_unmap(ccb->command);
if (dma_unmap)
- pci_unmap_single(adapter->pci_device, ccb->sensedata,
- ccb->sense_datalen, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->pci_device->dev, ccb->sensedata,
+ ccb->sense_datalen, DMA_FROM_DEVICE);
ccb->command = NULL;
ccb->status = BLOGIC_CCB_FREE;
@@ -712,7 +713,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device))
continue;
- if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue;
bus = pci_device->bus->number;
@@ -895,7 +896,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device))
continue;
- if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue;
bus = pci_device->bus->number;
@@ -952,7 +953,7 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device))
continue;
- if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue;
bus = pci_device->bus->number;
@@ -2040,7 +2041,7 @@ static void blogic_relres(struct blogic_adapter *adapter)
Release any allocated memory structs not released elsewhere
*/
if (adapter->mbox_space)
- pci_free_consistent(adapter->pci_device, adapter->mbox_sz,
+ dma_free_coherent(&adapter->pci_device->dev, adapter->mbox_sz,
adapter->mbox_space, adapter->mbox_space_handle);
pci_dev_put(adapter->pci_device);
adapter->mbox_space = NULL;
@@ -2092,8 +2093,9 @@ static bool blogic_initadapter(struct blogic_adapter *adapter)
Initialize the Outgoing and Incoming Mailbox pointers.
*/
adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox));
- adapter->mbox_space = pci_alloc_consistent(adapter->pci_device,
- adapter->mbox_sz, &adapter->mbox_space_handle);
+ adapter->mbox_space = dma_alloc_coherent(&adapter->pci_device->dev,
+ adapter->mbox_sz, &adapter->mbox_space_handle,
+ GFP_KERNEL);
if (adapter->mbox_space == NULL)
return blogic_failure(adapter, "MAILBOX ALLOCATION");
adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space;
@@ -3183,9 +3185,9 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
memcpy(ccb->cdb, cdb, cdblen);
ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
ccb->command = command;
- sense_buf = pci_map_single(adapter->pci_device,
+ sense_buf = dma_map_single(&adapter->pci_device->dev,
command->sense_buffer, ccb->sense_datalen,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
blogic_err("DMA mapping for sense data buffer failed\n",
adapter);
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 867b864f5047..0f17bd51088a 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -2944,7 +2944,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
}
if (currSCCB->Lun == 0x00) {
- if ((currSCCB->Sccb_scsistat == SELECT_SN_ST)) {
+ if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
currTar_Info->TarStatus |=
(unsigned char)SYNC_SUPPORTED;
@@ -2953,8 +2953,8 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
~EE_SYNC_MASK;
}
- else if ((currSCCB->Sccb_scsistat ==
- SELECT_WN_ST)) {
+ else if (currSCCB->Sccb_scsistat ==
+ SELECT_WN_ST) {
currTar_Info->TarStatus =
(currTar_Info->
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7c097006c54d..70988c381268 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -42,6 +42,9 @@ config SCSI_DMA
bool
default n
+config SCSI_ESP_PIO
+ bool
+
config SCSI_NETLINK
bool
default n
@@ -557,6 +560,36 @@ config SCSI_FLASHPOINT
substantial, so users of MultiMaster Host Adapters may not
wish to include it.
+config SCSI_MYRB
+ tristate "Mylex DAC960/DAC1100 PCI RAID Controller (Block Interface)"
+ depends on PCI
+ select RAID_ATTRS
+ help
+ This driver adds support for the Mylex DAC960, AcceleRAID, and
+ eXtremeRAID PCI RAID controllers. This driver supports the
+ older, block based interface.
+ This driver is a reimplementation of the original DAC960
+ driver. If you have used the DAC960 driver you should enable
+ this module.
+
+ To compile this driver as a module, choose M here: the
+ module will be called myrb.
+
+config SCSI_MYRS
+ tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)"
+ depends on PCI
+ select RAID_ATTRS
+ help
+ This driver adds support for the Mylex DAC960, AcceleRAID, and
+ eXtremeRAID PCI RAID controllers. This driver supports the
+ newer, SCSI-based interface only.
+ This driver is a reimplementation of the original DAC960
+ driver. If you have used the DAC960 driver you should enable
+ this module.
+
+ To compile this driver as a module, choose M here: the
+ module will be called myrs.
+
config VMWARE_PVSCSI
tristate "VMware PVSCSI driver support"
depends on PCI && SCSI && X86
@@ -1332,6 +1365,7 @@ config SCSI_ZORRO_ESP
tristate "Zorro ESP SCSI support"
depends on ZORRO && SCSI
select SCSI_SPI_ATTRS
+ select SCSI_ESP_PIO
help
Support for various NCR53C9x (ESP) based SCSI controllers on Zorro
expansion boards for the Amiga.
@@ -1374,6 +1408,7 @@ config SCSI_MAC_ESP
tristate "Macintosh NCR53c9[46] SCSI"
depends on MAC && SCSI
select SCSI_SPI_ATTRS
+ select SCSI_ESP_PIO
help
This is the NCR 53c9x SCSI controller found on most of the 68040
based Macintoshes.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 6d71b2a9592b..fcb41ae329c4 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -106,6 +106,8 @@ obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
obj-$(CONFIG_SCSI_MESH) += mesh.o
obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
+obj-$(CONFIG_SCSI_MYRB) += myrb.o
+obj-$(CONFIG_SCSI_MYRS) += myrs.o
obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 90ea0f5d9bdb..8429c855701f 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -131,6 +131,7 @@
static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *);
+static void bus_reset_cleanup(struct Scsi_Host *);
/**
* initialize_SCp - init the scsi pointer field
@@ -513,16 +514,15 @@ static void complete_cmd(struct Scsi_Host *instance,
if (hostdata->sensing == cmd) {
/* Autosense processing ends here */
- if ((cmd->result & 0xff) != SAM_STAT_GOOD) {
+ if (status_byte(cmd->result) != GOOD) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
- set_host_byte(cmd, DID_ERROR);
- } else
+ } else {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ }
hostdata->sensing = NULL;
}
- hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
-
cmd->scsi_done(cmd);
}
@@ -884,7 +884,14 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
/* Probably Bus Reset */
NCR5380_read(RESET_PARITY_INTERRUPT_REG);
- dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
+ if (sr & SR_RST) {
+ /* Certainly Bus Reset */
+ shost_printk(KERN_WARNING, instance,
+ "bus reset interrupt\n");
+ bus_reset_cleanup(instance);
+ } else {
+ dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
+ }
#ifdef SUN3_SCSI_VME
dregs->csr |= CSR_DMA_ENABLE;
#endif
@@ -902,20 +909,16 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-/*
- * Function : int NCR5380_select(struct Scsi_Host *instance,
- * struct scsi_cmnd *cmd)
- *
- * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
- * including ARBITRATION, SELECTION, and initial message out for
- * IDENTIFY and queue messages.
+/**
+ * NCR5380_select - attempt arbitration and selection for a given command
+ * @instance: the Scsi_Host instance
+ * @cmd: the scsi_cmnd to execute
*
- * Inputs : instance - instantiation of the 5380 driver on which this
- * target lives, cmd - SCSI command to execute.
+ * This routine establishes an I_T_L nexus for a SCSI command. This involves
+ * ARBITRATION, SELECTION and MESSAGE OUT phases and an IDENTIFY message.
*
- * Returns cmd if selection failed but should be retried,
- * NULL if selection failed and should not be retried, or
- * NULL if selection succeeded (hostdata->connected == cmd).
+ * Returns true if the operation should be retried.
+ * Returns false if it should not be retried.
*
* Side effects :
* If bus busy, arbitration failed, etc, NCR5380_select() will exit
@@ -923,16 +926,15 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
* SELECT_ENABLE will be set appropriately, the NCR5380
* will cease to drive any SCSI bus signals.
*
- * If successful : I_T_L or I_T_L_Q nexus will be established,
- * instance->connected will be set to cmd.
+ * If successful : the I_T_L nexus will be established, and
+ * hostdata->connected will be set to cmd.
* SELECT interrupt will be disabled.
*
* If failed (no target) : cmd->scsi_done() will be called, and the
* cmd->result host byte set to DID_BAD_TARGET.
*/
-static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
- struct scsi_cmnd *cmd)
+static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
__releases(&hostdata->lock) __acquires(&hostdata->lock)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
@@ -940,6 +942,9 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
unsigned char *data;
int len;
int err;
+ bool ret = true;
+ bool can_disconnect = instance->irq != NO_IRQ &&
+ cmd->cmnd[0] != REQUEST_SENSE;
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
@@ -948,7 +953,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/*
* Arbitration and selection phases are slow and involve dropping the
* lock, so we have to watch out for EH. An exception handler may
- * change 'selecting' to NULL. This function will then return NULL
+ * change 'selecting' to NULL. This function will then return false
* so that the caller will forget about 'cmd'. (During information
* transfer phases, EH may change 'connected' to NULL.)
*/
@@ -984,7 +989,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
if (!hostdata->selecting) {
/* Command was aborted */
NCR5380_write(MODE_REG, MR_BASE);
- goto out;
+ return false;
}
if (err < 0) {
NCR5380_write(MODE_REG, MR_BASE);
@@ -1033,7 +1038,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
if (!hostdata->selecting) {
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- goto out;
+ return false;
}
dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
@@ -1116,13 +1121,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
spin_lock_irq(&hostdata->lock);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+
/* Can't touch cmd if it has been reclaimed by the scsi ML */
- if (hostdata->selecting) {
- cmd->result = DID_BAD_TARGET << 16;
- complete_cmd(instance, cmd);
- dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n");
- cmd = NULL;
- }
+ if (!hostdata->selecting)
+ return false;
+
+ cmd->result = DID_BAD_TARGET << 16;
+ complete_cmd(instance, cmd);
+ dsprintk(NDEBUG_SELECTION, instance,
+ "target did not respond within 250ms\n");
+ ret = false;
goto out;
}
@@ -1155,12 +1163,12 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
}
if (!hostdata->selecting) {
do_abort(instance);
- goto out;
+ return false;
}
dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
scmd_id(cmd));
- tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun);
+ tmp[0] = IDENTIFY(can_disconnect, cmd->device->lun);
len = 1;
data = tmp;
@@ -1171,7 +1179,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n");
- cmd = NULL;
+ ret = false;
goto out;
}
@@ -1186,13 +1194,13 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
initialize_SCp(cmd);
- cmd = NULL;
+ ret = false;
out:
if (!hostdata->selecting)
return NULL;
hostdata->selecting = NULL;
- return cmd;
+ return ret;
}
/*
@@ -1711,6 +1719,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
hostdata->connected = NULL;
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
return;
#endif
case PHASE_DATAIN:
@@ -1793,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd, scmd_id(cmd), cmd->device->lun);
hostdata->connected = NULL;
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->result &= ~0xffff;
cmd->result |= cmd->SCp.Status;
@@ -1951,6 +1961,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_transfer_pio(instance, &phase, &len, &data);
if (msgout == ABORT) {
hostdata->connected = NULL;
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
maybe_release_dma_irq(instance);
@@ -2014,8 +2025,11 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(MODE_REG, MR_BASE);
target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
-
- dsprintk(NDEBUG_RESELECTION, instance, "reselect\n");
+ if (!target_mask || target_mask & (target_mask - 1)) {
+ shost_printk(KERN_WARNING, instance,
+ "reselect: bad target_mask 0x%02x\n", target_mask);
+ return;
+ }
/*
* At this point, we have detected that our SCSI ID is on the bus,
@@ -2029,6 +2043,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
+ shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n");
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
return;
}
@@ -2040,6 +2055,10 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
+ if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0)
+ /* BUS FREE phase */
+ return;
+ shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n");
do_abort(instance);
return;
}
@@ -2101,13 +2120,16 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance,
"reselect: removed %p from disconnected queue\n", tmp);
} else {
+ int target = ffs(target_mask) - 1;
+
shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n",
target_mask, lun);
/*
* Since we have an established nexus that we can't do anything
* with, we must abort it.
*/
- do_abort(instance);
+ if (do_abort(instance) == 0)
+ hostdata->busy[target] &= ~(1 << lun);
return;
}
@@ -2272,15 +2294,16 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (list_del_cmd(&hostdata->autosense, cmd)) {
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from sense queue\n", cmd);
- set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
}
out:
if (result == FAILED)
dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd);
- else
+ else {
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd);
+ }
queue_work(hostdata->work_q, &hostdata->main_task);
maybe_release_dma_irq(instance);
@@ -2290,31 +2313,12 @@ out:
}
-/**
- * NCR5380_host_reset - reset the SCSI host
- * @cmd: SCSI command undergoing EH
- *
- * Returns SUCCESS
- */
-
-static int NCR5380_host_reset(struct scsi_cmnd *cmd)
+static void bus_reset_cleanup(struct Scsi_Host *instance)
{
- struct Scsi_Host *instance = cmd->device->host;
struct NCR5380_hostdata *hostdata = shost_priv(instance);
int i;
- unsigned long flags;
struct NCR5380_cmd *ncmd;
- spin_lock_irqsave(&hostdata->lock, flags);
-
-#if (NDEBUG & NDEBUG_ANY)
- scmd_printk(KERN_INFO, cmd, __func__);
-#endif
- NCR5380_dprint(NDEBUG_ANY, instance);
- NCR5380_dprint_phase(NDEBUG_ANY, instance);
-
- do_reset(instance);
-
/* reset NCR registers */
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(TARGET_COMMAND_REG, 0);
@@ -2326,11 +2330,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
* commands!
*/
- if (list_del_cmd(&hostdata->unissued, cmd)) {
- cmd->result = DID_RESET << 16;
- cmd->scsi_done(cmd);
- }
-
if (hostdata->selecting) {
hostdata->selecting->result = DID_RESET << 16;
complete_cmd(instance, hostdata->selecting);
@@ -2348,7 +2347,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
- set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
INIT_LIST_HEAD(&hostdata->autosense);
@@ -2365,6 +2363,41 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
queue_work(hostdata->work_q, &hostdata->main_task);
maybe_release_dma_irq(instance);
+}
+
+/**
+ * NCR5380_host_reset - reset the SCSI host
+ * @cmd: SCSI command undergoing EH
+ *
+ * Returns SUCCESS
+ */
+
+static int NCR5380_host_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ unsigned long flags;
+ struct NCR5380_cmd *ncmd;
+
+ spin_lock_irqsave(&hostdata->lock, flags);
+
+#if (NDEBUG & NDEBUG_ANY)
+ shost_printk(KERN_INFO, instance, __func__);
+#endif
+ NCR5380_dprint(NDEBUG_ANY, instance);
+ NCR5380_dprint_phase(NDEBUG_ANY, instance);
+
+ list_for_each_entry(ncmd, &hostdata->unissued, list) {
+ struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd);
+
+ scmd->result = DID_RESET << 16;
+ scmd->scsi_done(scmd);
+ }
+ INIT_LIST_HEAD(&hostdata->unissued);
+
+ do_reset(instance);
+ bus_reset_cleanup(instance);
+
spin_unlock_irqrestore(&hostdata->lock, flags);
return SUCCESS;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 31096a0b0fdd..efca509b92b0 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -275,7 +275,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id);
static void NCR5380_main(struct work_struct *work);
static const char *NCR5380_info(struct Scsi_Host *instance);
static void NCR5380_reselect(struct Scsi_Host *instance);
-static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
+static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 23b17621b6d2..00072ed9540b 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1094,7 +1094,7 @@ static int inia100_probe_one(struct pci_dev *pdev,
if (pci_enable_device(pdev))
goto out;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "Unable to set 32bit DMA "
"on inia100 adapter, ignoring.\n");
goto out_disable_device;
@@ -1124,7 +1124,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for SCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
- host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys);
+ host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys,
+ GFP_KERNEL);
if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n");
goto out_host_put;
@@ -1132,7 +1133,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for ESCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
- host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys);
+ host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys,
+ GFP_KERNEL);
if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array;
@@ -1177,10 +1179,12 @@ static int inia100_probe_one(struct pci_dev *pdev,
out_free_irq:
free_irq(shost->irq, shost);
out_free_escb_array:
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+ dma_free_coherent(&pdev->dev,
+ ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys);
out_free_scb_array:
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+ dma_free_coherent(&pdev->dev,
+ ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys);
out_host_put:
scsi_host_put(shost);
@@ -1200,9 +1204,11 @@ static void inia100_remove_one(struct pci_dev *pdev)
scsi_remove_host(shost);
free_irq(shost->irq, shost);
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+ dma_free_coherent(&pdev->dev,
+ ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys);
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+ dma_free_coherent(&pdev->dev,
+ ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys);
release_region(shost->io_port, 256);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 6e356325d8d9..bd7f352c28f3 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -3480,7 +3480,6 @@ int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
static void aac_srb_callback(void *context, struct fib * fibptr)
{
- struct aac_dev *dev;
struct aac_srb_reply *srbreply;
struct scsi_cmnd *scsicmd;
@@ -3491,8 +3490,6 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
BUG_ON(fibptr == NULL);
- dev = fibptr->dev;
-
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
@@ -3921,13 +3918,11 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
{
- struct aac_dev *dev;
unsigned long byte_count = 0;
int nseg;
struct scatterlist *sg;
int i;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
psg->count = 0;
psg->sg[0].addr = 0;
@@ -3963,14 +3958,12 @@ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
{
- struct aac_dev *dev;
unsigned long byte_count = 0;
u64 addr;
int nseg;
struct scatterlist *sg;
int i;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
psg->count = 0;
psg->sg[0].addr[0] = 0;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 6e1b022a823d..1e77d96a18f2 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2586,9 +2586,7 @@ int aac_acquire_irq(struct aac_dev *dev)
void aac_free_irq(struct aac_dev *dev)
{
int i;
- int cpu;
- cpu = cpumask_first(cpu_online_mask);
if (aac_is_src(dev)) {
if (dev->max_msix > 1) {
for (i = 0; i < dev->max_msix; i++)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 04443577d48b..2d4e4ddc5ace 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1747,7 +1747,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
}
- error = pci_set_dma_max_seg_size(pdev,
+ error = dma_set_max_seg_size(&pdev->dev,
(aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
(shost->max_sectors << 9) : 65536);
if (error)
@@ -2055,8 +2055,6 @@ static void aac_pci_resume(struct pci_dev *pdev)
struct scsi_device *sdev = NULL;
struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
if (aac_adapter_ioremap(aac, aac->base_size)) {
dev_err(&pdev->dev, "aacraid: ioremap failed\n");
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 713f69033f20..223ef6f4e258 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -5949,7 +5949,6 @@ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code)
static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
{
struct asc_board *boardp = adv_dvc_varp->drv_ptr;
- u32 srb_tag;
adv_req_t *reqp;
adv_sgblk_t *sgblkp;
struct scsi_cmnd *scp;
@@ -5965,7 +5964,6 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
* completed. The adv_req_t structure actually contains the
* completed ADV_SCSI_REQ_Q structure.
*/
- srb_tag = le32_to_cpu(scsiqp->srb_tag);
scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag);
ASC_DBG(1, "scp 0x%p\n", scp);
@@ -6448,7 +6446,7 @@ static void AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
sdtr_data =
AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
ext_msg.req_ack_offset);
- if ((sdtr_data == 0xFF)) {
+ if (sdtr_data == 0xFF) {
q_cntl |= QC_MSG_OUT;
asc_dvc->init_sdtr &= ~target_id;
diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c
index 5000bd69c13f..176704b24e6a 100644
--- a/drivers/scsi/aic7xxx/aic7770.c
+++ b/drivers/scsi/aic7xxx/aic7770.c
@@ -42,15 +42,9 @@
* $FreeBSD$
*/
-#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
#define ID_AIC7770 0x04907770
#define ID_AHA_274x 0x04907771
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index 31f2bb9d7146..9a515551641c 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -607,9 +607,6 @@ struct scb {
ahd_io_ctx_t io_ctx;
struct ahd_softc *ahd_softc;
scb_flag flags;
-#ifndef __linux__
- bus_dmamap_t dmamap;
-#endif
struct scb_platform_data *platform_data;
struct map_node *hscb_map;
struct map_node *sg_map;
@@ -1056,9 +1053,6 @@ struct ahd_completion
struct ahd_softc {
bus_space_tag_t tags[2];
bus_space_handle_t bshs[2];
-#ifndef __linux__
- bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */
-#endif
struct scb_data scb_data;
struct hardware_scb *next_queued_hscb;
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 2d82ec85753e..9ee75c9a9aa1 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -40,16 +40,9 @@
* $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $
*/
-#ifdef __linux__
#include "aic79xx_osm.h"
#include "aic79xx_inline.h"
#include "aicasm/aicasm_insformat.h"
-#else
-#include <dev/aic7xxx/aic79xx_osm.h>
-#include <dev/aic7xxx/aic79xx_inline.h>
-#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
-#endif
-
/***************************** Lookup Tables **********************************/
static const char *const ahd_chip_names[] =
@@ -59,7 +52,6 @@ static const char *const ahd_chip_names[] =
"aic7902",
"aic7901A"
};
-static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
/*
* Hardware error codes.
@@ -6172,17 +6164,11 @@ ahd_free(struct ahd_softc *ahd)
case 2:
ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
case 1:
-#ifndef __linux__
- ahd_dma_tag_destroy(ahd, ahd->buffer_dmat);
-#endif
break;
case 0:
break;
}
-#ifndef __linux__
- ahd_dma_tag_destroy(ahd, ahd->parent_dmat);
-#endif
ahd_platform_free(ahd);
ahd_fini_scbdata(ahd);
for (i = 0; i < AHD_NUM_TARGETS; i++) {
@@ -6934,9 +6920,6 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
for (i = 0; i < newcount; i++) {
struct scb_platform_data *pdata;
u_int col_tag;
-#ifndef __linux__
- int error;
-#endif
next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC);
if (next_scb == NULL)
@@ -6970,15 +6953,6 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg);
next_scb->ahd_softc = ahd;
next_scb->flags = SCB_FLAG_NONE;
-#ifndef __linux__
- error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0,
- &next_scb->dmamap);
- if (error != 0) {
- kfree(next_scb);
- kfree(pdata);
- break;
- }
-#endif
next_scb->hscb->tag = ahd_htole16(scb_data->numscbs);
col_tag = scb_data->numscbs ^ 0x100;
next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag);
@@ -7091,24 +7065,6 @@ ahd_init(struct ahd_softc *ahd)
if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0)
ahd->features &= ~AHD_TARGETMODE;
-#ifndef __linux__
- /* DMA tag for mapping buffers into device visible space. */
- if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
- /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
- /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING
- ? (dma_addr_t)0x7FFFFFFFFFULL
- : BUS_SPACE_MAXADDR_32BIT,
- /*highaddr*/BUS_SPACE_MAXADDR,
- /*filter*/NULL, /*filterarg*/NULL,
- /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE,
- /*nsegments*/AHD_NSEG,
- /*maxsegsz*/AHD_MAXTRANSFER_SIZE,
- /*flags*/BUS_DMA_ALLOCNOW,
- &ahd->buffer_dmat) != 0) {
- return (ENOMEM);
- }
-#endif
-
ahd->init_level++;
/*
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index cc9bd26f5d1a..8397ae93f7dd 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -41,14 +41,8 @@
* $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $
*/
-#ifdef __linux__
#include "aic79xx_osm.h"
#include "aic79xx_inline.h"
-#else
-#include <dev/aic7xxx/aic79xx_osm.h>
-#include <dev/aic7xxx/aic79xx_inline.h>
-#endif
-
#include "aic79xx_pci.h"
static inline uint64_t
@@ -294,13 +288,11 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
int
ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
{
- struct scb_data *shared_scb_data;
u_int command;
uint32_t devconfig;
uint16_t subvendor;
int error;
- shared_scb_data = NULL;
ahd->description = entry->name;
/*
* Record if this is an HP board.
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 4ce4e903a759..5614921b4041 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -568,9 +568,6 @@ struct scb {
ahc_io_ctx_t io_ctx;
struct ahc_softc *ahc_softc;
scb_flag flags;
-#ifndef __linux__
- bus_dmamap_t dmamap;
-#endif
struct scb_platform_data *platform_data;
struct sg_map_node *sg_map;
struct ahc_dma_seg *sg_list;
@@ -906,9 +903,6 @@ typedef void ahc_callback_t (void *);
struct ahc_softc {
bus_space_tag_t tag;
bus_space_handle_t bsh;
-#ifndef __linux__
- bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */
-#endif
struct scb_data *scb_data;
struct scb *next_queued_scb;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index 9e85a7ef9c8e..cc9e41967ce4 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -64,15 +64,9 @@
* bit to be sent from the chip.
*/
-#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
/*
* Right now, we only have to read the SEEPROM. But we make it easier to
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 915a34f141e4..f3362f4ab16e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -40,15 +40,9 @@
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
*/
-#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aicasm/aicasm_insformat.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
-#endif
/***************************** Lookup Tables **********************************/
static const char *const ahc_chip_names[] = {
@@ -67,7 +61,6 @@ static const char *const ahc_chip_names[] = {
"aic7892",
"aic7899"
};
-static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
/*
* Hardware error codes.
@@ -4509,17 +4502,11 @@ ahc_free(struct ahc_softc *ahc)
case 2:
ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
case 1:
-#ifndef __linux__
- ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
-#endif
break;
case 0:
break;
}
-#ifndef __linux__
- ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
-#endif
ahc_platform_free(ahc);
ahc_fini_scbdata(ahc);
for (i = 0; i < AHC_NUM_TARGETS; i++) {
@@ -5005,9 +4992,7 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
for (i = 0; i < newcount; i++) {
struct scb_platform_data *pdata;
-#ifndef __linux__
- int error;
-#endif
+
pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC);
if (pdata == NULL)
break;
@@ -5021,12 +5006,6 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
next_scb->ahc_softc = ahc;
next_scb->flags = SCB_FREE;
-#ifndef __linux__
- error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
- &next_scb->dmamap);
- if (error != 0)
- break;
-#endif
next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
next_scb->hscb->tag = ahc->scb_data->numscbs;
SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
@@ -5325,24 +5304,6 @@ ahc_init(struct ahc_softc *ahc)
if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
ahc->features &= ~AHC_TARGETMODE;
-#ifndef __linux__
- /* DMA tag for mapping buffers into device visible space. */
- if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
- /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
- /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING
- ? (dma_addr_t)0x7FFFFFFFFFULL
- : BUS_SPACE_MAXADDR_32BIT,
- /*highaddr*/BUS_SPACE_MAXADDR,
- /*filter*/NULL, /*filterarg*/NULL,
- /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
- /*nsegments*/AHC_NSEG,
- /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
- /*flags*/BUS_DMA_ALLOCNOW,
- &ahc->buffer_dmat) != 0) {
- return (ENOMEM);
- }
-#endif
-
ahc->init_level++;
/*
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 673e826d7adb..656f680c7802 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -42,16 +42,9 @@
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $
*/
-#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
-
#include "aic7xxx_pci.h"
static inline uint64_t
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.h b/drivers/scsi/aic7xxx/aicasm/aicasm.h
index 51678dd46ff7..716a2aefc925 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.h
@@ -42,11 +42,7 @@
* $FreeBSD$
*/
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#ifndef TRUE
#define TRUE 1
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index f1586a437906..924d55a8acbf 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -52,11 +52,7 @@
#include <string.h>
#include <sysexits.h>
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#include "aicasm.h"
#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
index 708326df0766..8c0479865f04 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
@@ -52,11 +52,7 @@
#include <string.h>
#include <sysexits.h>
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#include "aicasm.h"
#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
index c0457b8c3b77..98e9959c6907 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
@@ -51,11 +51,7 @@
#include <stdio.h>
#include <string.h>
#include <sysexits.h>
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#include "aicasm.h"
#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index 93c8667cd704..c78d4f68eea5 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -51,11 +51,7 @@
#include <stdio.h>
#include <string.h>
#include <sysexits.h>
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#include "aicasm.h"
#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index 232aff1fe784..975fcfcc0d8f 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -44,11 +44,7 @@
#include <sys/types.h>
-#ifdef __linux__
#include "aicdb.h"
-#else
-#include <db.h>
-#endif
#include <fcntl.h>
#include <inttypes.h>
#include <regex.h>
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index 34bbcad7f83f..7bf7fd5953ac 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -42,11 +42,7 @@
* $FreeBSD$
*/
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
typedef enum {
UNINITIALIZED,
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 1391e5f35918..41c4d8abdd4a 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -771,13 +771,8 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto Err_remove;
err = -ENODEV;
- if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))
- && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)))
- ;
- else if (!pci_set_dma_mask(dev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)))
- ;
- else {
+ if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
goto Err_remove;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 22873ce8bbfa..91ea87dfb700 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -724,9 +724,11 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->max_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS:
*speed_mask &= ~SAS_SPEED_60_DIS;
+ /* fall through*/
default:
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SAS_SPEED_30_DIS;
+ /* fall through*/
case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SAS_SPEED_15_DIS;
}
@@ -734,6 +736,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->min_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS:
*speed_mask |= SAS_SPEED_30_DIS;
+ /* fall through*/
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask |= SAS_SPEED_15_DIS;
default:
@@ -745,6 +748,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->max_sata_lrate) {
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SATA_SPEED_30_DIS;
+ /* fall through*/
default:
case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SATA_SPEED_15_DIS;
@@ -803,6 +807,7 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
/* link reset retries, this should be nominal */
control_phy->link_reset_retries = 10;
+ /* fall through */
case RELEASE_SPINUP_HOLD: /* 0x02 */
/* decide the func_mask */
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index cdd4ab683be9..7fea344531f6 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -42,13 +42,13 @@ static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
}
-/* PCI_DMA_... to our direction translation.
+/* DMA_... to our direction translation.
*/
static const u8 data_dir_flags[] = {
- [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
- [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */
- [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */
- [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
+ [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+ [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
+ [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
+ [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
};
static int asd_map_scatterlist(struct sas_task *task,
@@ -60,12 +60,12 @@ static int asd_map_scatterlist(struct sas_task *task,
struct scatterlist *sc;
int num_sg, res;
- if (task->data_dir == PCI_DMA_NONE)
+ if (task->data_dir == DMA_NONE)
return 0;
if (task->num_scatter == 0) {
void *p = task->scatter;
- dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
+ dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
task->total_xfer_len,
task->data_dir);
sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
@@ -79,7 +79,7 @@ static int asd_map_scatterlist(struct sas_task *task,
if (sas_protocol_ata(task->task_proto))
num_sg = task->num_scatter;
else
- num_sg = pci_map_sg(asd_ha->pcidev, task->scatter,
+ num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter,
task->num_scatter, task->data_dir);
if (num_sg == 0)
return -ENOMEM;
@@ -126,8 +126,8 @@ static int asd_map_scatterlist(struct sas_task *task,
return 0;
err_unmap:
if (sas_protocol_ata(task->task_proto))
- pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
- task->data_dir);
+ dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
+ task->num_scatter, task->data_dir);
return res;
}
@@ -136,21 +136,21 @@ static void asd_unmap_scatterlist(struct asd_ascb *ascb)
struct asd_ha_struct *asd_ha = ascb->ha;
struct sas_task *task = ascb->uldd_task;
- if (task->data_dir == PCI_DMA_NONE)
+ if (task->data_dir == DMA_NONE)
return;
if (task->num_scatter == 0) {
dma_addr_t dma = (dma_addr_t)
le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
- pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len,
- task->data_dir);
+ dma_unmap_single(&ascb->ha->pcidev->dev, dma,
+ task->total_xfer_len, task->data_dir);
return;
}
asd_free_coherent(asd_ha, ascb->sg_arr);
if (task->task_proto != SAS_PROTOCOL_STP)
- pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
- task->data_dir);
+ dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
+ task->num_scatter, task->data_dir);
}
/* ---------- Task complete tasklet ---------- */
@@ -436,10 +436,10 @@ static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
struct domain_device *dev = task->dev;
struct scb *scb;
- pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
- pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
scb = ascb->scb;
@@ -471,10 +471,10 @@ static void asd_unbuild_smp_ascb(struct asd_ascb *a)
struct sas_task *task = a->uldd_task;
BUG_ON(!task);
- pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
- pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
}
/* ---------- SSP ---------- */
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index d81ca66e24d6..27c0a4a937d9 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -96,9 +96,7 @@ static void pci_esp_dma_drain(struct esp *esp);
static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
{
- struct pci_dev *pdev = esp->dev;
-
- return pci_get_drvdata(pdev);
+ return dev_get_drvdata(esp->dev);
}
static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
@@ -116,30 +114,6 @@ static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg)
return iowrite32(val, esp->regs + (reg * 4UL));
}
-static dma_addr_t pci_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return pci_map_single(esp->dev, buf, sz, dir);
-}
-
-static int pci_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- return pci_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void pci_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- pci_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void pci_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- pci_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
static int pci_esp_irq_pending(struct esp *esp)
{
struct pci_esp_priv *pep = pci_esp_get_priv(esp);
@@ -295,10 +269,6 @@ static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
static const struct esp_driver_ops pci_esp_ops = {
.esp_write8 = pci_esp_write8,
.esp_read8 = pci_esp_read8,
- .map_single = pci_esp_map_single,
- .map_sg = pci_esp_map_sg,
- .unmap_single = pci_esp_unmap_single,
- .unmap_sg = pci_esp_unmap_sg,
.irq_pending = pci_esp_irq_pending,
.reset_dma = pci_esp_reset_dma,
.dma_drain = pci_esp_dma_drain,
@@ -375,18 +345,18 @@ static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
static void dc390_check_eeprom(struct esp *esp)
{
+ struct pci_dev *pdev = to_pci_dev(esp->dev);
u8 EEbuf[128];
u16 *ptr = (u16 *)EEbuf, wval = 0;
int i;
- dc390_read_eeprom((struct pci_dev *)esp->dev, ptr);
+ dc390_read_eeprom(pdev, ptr);
for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
wval += *ptr;
/* no Tekram EEprom found */
if (wval != 0x1234) {
- struct pci_dev *pdev = esp->dev;
dev_printk(KERN_INFO, &pdev->dev,
"No valid Tekram EEprom found\n");
return;
@@ -411,7 +381,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
return -ENODEV;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dev_printk(KERN_INFO, &pdev->dev,
"failed to set 32bit DMA mask\n");
goto fail_disable_device;
@@ -435,7 +405,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
esp = shost_priv(shost);
esp->host = shost;
- esp->dev = pdev;
+ esp->dev = &pdev->dev;
esp->ops = &pci_esp_ops;
/*
* The am53c974 HBA has a design flaw of generating
@@ -467,8 +437,8 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
pci_set_master(pdev);
- esp->command_block = pci_alloc_consistent(pdev, 16,
- &esp->command_block_dma);
+ esp->command_block = dma_alloc_coherent(&pdev->dev, 16,
+ &esp->command_block_dma, GFP_KERNEL);
if (!esp->command_block) {
dev_printk(KERN_ERR, &pdev->dev,
"failed to allocate command block\n");
@@ -498,7 +468,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
/* Assume 40MHz clock */
esp->cfreq = 40000000;
- err = scsi_esp_register(esp, &pdev->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
@@ -508,8 +478,8 @@ fail_free_irq:
free_irq(pdev->irq, esp);
fail_unmap_command_block:
pci_set_drvdata(pdev, NULL);
- pci_free_consistent(pdev, 16, esp->command_block,
- esp->command_block_dma);
+ dma_free_coherent(&pdev->dev, 16, esp->command_block,
+ esp->command_block_dma);
fail_unmap_regs:
pci_iounmap(pdev, esp->regs);
fail_release_regions:
@@ -532,8 +502,8 @@ static void pci_esp_remove_one(struct pci_dev *pdev)
scsi_esp_unregister(esp);
free_irq(pdev->irq, esp);
pci_set_drvdata(pdev, NULL);
- pci_free_consistent(pdev, 16, esp->command_block,
- esp->command_block_dma);
+ dma_free_coherent(&pdev->dev, 16, esp->command_block,
+ esp->command_block_dma);
pci_iounmap(pdev, esp->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 12316ef4c893..d4404eea24fb 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1317,13 +1317,10 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
{
- int id, lun;
if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
struct scsi_cmnd *abortcmd = pCCB->pcmd;
if (abortcmd) {
- id = abortcmd->device->id;
- lun = abortcmd->device->lun;
abortcmd->result |= DID_ABORT << 16;
arcmsr_ccb_complete(pCCB);
printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
@@ -1798,7 +1795,7 @@ static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+ "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
, acb->host->host_no);
}
}
@@ -1811,7 +1808,7 @@ static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+ "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
, acb->host->host_no);
}
}
@@ -1824,7 +1821,7 @@ static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+ "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
, pACB->host->host_no);
}
return;
@@ -1837,7 +1834,7 @@ static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
if (!arcmsr_hbaD_wait_msgint_ready(pACB))
- pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
"timeout\n", pACB->host->host_no);
}
@@ -1850,7 +1847,7 @@ static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
writel(pACB->out_doorbell, &reg->iobound_doorbell);
if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
- pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
"timeout\n", pACB->host->host_no);
}
}
@@ -3927,7 +3924,7 @@ static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
- rebulid' timeout \n", acb->host->host_no);
+ rebuild' timeout \n", acb->host->host_no);
}
}
@@ -3938,7 +3935,7 @@ static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
- rebulid' timeout \n",acb->host->host_no);
+ rebuild' timeout \n",acb->host->host_no);
}
}
@@ -3950,7 +3947,7 @@ static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
- rebulid' timeout \n", pACB->host->host_no);
+ rebuild' timeout \n", pACB->host->host_no);
}
return;
}
@@ -3963,7 +3960,7 @@ static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
pr_notice("arcmsr%d: wait 'start adapter "
- "background rebulid' timeout\n", pACB->host->host_no);
+ "background rebuild' timeout\n", pACB->host->host_no);
}
}
@@ -3977,7 +3974,7 @@ static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
writel(pACB->out_doorbell, &pmu->iobound_doorbell);
if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
pr_notice("arcmsr%d: wait 'start adapter "
- "background rebulid' timeout \n", pACB->host->host_no);
+ "background rebuild' timeout \n", pACB->host->host_no);
}
}
@@ -4135,9 +4132,9 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
pci_read_config_byte(acb->pdev, i, &value[i]);
}
/* hardware reset signal */
- if ((acb->dev_id == 0x1680)) {
+ if (acb->dev_id == 0x1680) {
writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
- } else if ((acb->dev_id == 0x1880)) {
+ } else if (acb->dev_id == 0x1880) {
do {
count++;
writel(0xF, &pmuC->write_sequence);
@@ -4161,7 +4158,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
} while (((readl(&pmuE->host_diagnostic_3xxx) &
ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
- } else if ((acb->dev_id == 0x1214)) {
+ } else if (acb->dev_id == 0x1214) {
writel(0x20, pmuD->reset_request);
} else {
pci_write_config_byte(acb->pdev, 0x84, 0x20);
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 8996d2329e11..802d15018ec0 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1193,7 +1193,7 @@ static void atp870u_free_tables(struct Scsi_Host *host)
for (k = 0; k < 16; k++) {
if (!atp_dev->id[j][k].prd_table)
continue;
- pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
+ dma_free_coherent(&atp_dev->pdev->dev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
atp_dev->id[j][k].prd_table = NULL;
}
}
@@ -1205,7 +1205,7 @@ static int atp870u_init_tables(struct Scsi_Host *host)
int c,k;
for(c=0;c < 2;c++) {
for(k=0;k<16;k++) {
- atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus));
+ atp_dev->id[c][k].prd_table = dma_alloc_coherent(&atp_dev->pdev->dev, 1024, &(atp_dev->id[c][k].prd_bus), GFP_KERNEL);
if (!atp_dev->id[c][k].prd_table) {
printk("atp870u_init_tables fail\n");
atp870u_free_tables(host);
@@ -1509,7 +1509,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto fail;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
err = -EIO;
goto disable_device;
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index c10aac4dbc5e..0a6972ee94d7 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -520,7 +520,7 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
**/
tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
if (tag_mem->size) {
- pci_free_consistent(ctrl->pdev, tag_mem->size,
+ dma_free_coherent(&ctrl->pdev->dev, tag_mem->size,
tag_mem->va, tag_mem->dma);
tag_mem->size = 0;
}
@@ -1269,12 +1269,12 @@ int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
struct be_sge *sge = nonembedded_sgl(wrb);
int status = 0;
- nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
+ nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev,
sizeof(struct be_mgmt_controller_attributes),
- &nonemb_cmd.dma);
+ &nonemb_cmd.dma, GFP_KERNEL);
if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d : pci_alloc_consistent failed in %s\n",
+ "BG_%d : dma_alloc_coherent failed in %s\n",
__func__);
return -ENOMEM;
}
@@ -1314,7 +1314,7 @@ int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
"BG_%d : Failed in beiscsi_check_supported_fw\n");
mutex_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va)
- pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
+ dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return status;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index c8f0a2144b44..96b96e2ab91a 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -771,7 +771,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
status = beiscsi_get_initiator_name(phba, buf, false);
if (status < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Retreiving Initiator Name Failed\n");
+ "BS_%d : Retrieving Initiator Name Failed\n");
status = 0;
}
}
@@ -1071,9 +1071,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
else
req_memsize = sizeof(struct tcp_connect_and_offload_in_v1);
- nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
req_memsize,
- &nonemb_cmd.dma);
+ &nonemb_cmd.dma, GFP_KERNEL);
if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -1091,7 +1091,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
"BS_%d : mgmt_open_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
beiscsi_free_ep(beiscsi_ep);
return -EAGAIN;
@@ -1104,8 +1104,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
"BS_%d : mgmt_open_connection Failed");
if (ret != -EBUSY)
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
+ dma_free_coherent(&phba->ctrl.pdev->dev,
+ nonemb_cmd.size, nonemb_cmd.va,
+ nonemb_cmd.dma);
beiscsi_free_ep(beiscsi_ep);
return ret;
@@ -1118,7 +1119,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : mgmt_open_connection Success\n");
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return 0;
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 3660059784f7..effb6fc95af4 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -511,18 +511,9 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
}
pci_set_master(pcidev);
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
if (ret) {
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
- goto pci_region_release;
- } else {
- ret = pci_set_consistent_dma_mask(pcidev,
- DMA_BIT_MASK(32));
- }
- } else {
- ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
goto pci_region_release;
@@ -550,9 +541,8 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
if (status)
return status;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = pci_alloc_consistent(pdev,
- mbox_mem_alloc->size,
- &mbox_mem_alloc->dma);
+ mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev,
+ mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL);
if (!mbox_mem_alloc->va) {
beiscsi_unmap_pci_function(phba);
return -ENOMEM;
@@ -1866,7 +1856,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
{
struct be_queue_info *cq;
struct sol_cqe *sol;
- struct dmsg_cqe *dmsg;
unsigned int total = 0;
unsigned int num_processed = 0;
unsigned short code = 0, cid = 0;
@@ -1939,7 +1928,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
"BM_%d : Received %s[%d] on CID : %d\n",
cqe_desc[code], code, cid);
- dmsg = (struct dmsg_cqe *)sol;
hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
break;
case UNSOL_HDR_NOTIFY:
@@ -2304,11 +2292,11 @@ static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
/* Map addr only if there is data_count */
if (dsp_value) {
- io_task->mtask_addr = pci_map_single(phba->pcidev,
+ io_task->mtask_addr = dma_map_single(&phba->pcidev->dev,
task->data,
task->data_count,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(phba->pcidev,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&phba->pcidev->dev,
io_task->mtask_addr))
return -ENOMEM;
io_task->mtask_data_count = task->data_count;
@@ -2519,10 +2507,9 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
BEISCSI_MAX_FRAGS_INIT);
curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
do {
- mem_arr->virtual_address = pci_alloc_consistent(
- phba->pcidev,
- curr_alloc_size,
- &bus_add);
+ mem_arr->virtual_address =
+ dma_alloc_coherent(&phba->pcidev->dev,
+ curr_alloc_size, &bus_add, GFP_KERNEL);
if (!mem_arr->virtual_address) {
if (curr_alloc_size <= BE_MIN_MEM_SIZE)
goto free_mem;
@@ -2560,7 +2547,7 @@ free_mem:
mem_descr->num_elements = j;
while ((i) || (j)) {
for (j = mem_descr->num_elements; j > 0; j--) {
- pci_free_consistent(phba->pcidev,
+ dma_free_coherent(&phba->pcidev->dev,
mem_descr->mem_array[j - 1].size,
mem_descr->mem_array[j - 1].
virtual_address,
@@ -3031,9 +3018,9 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
eq = &phwi_context->be_eq[i].q;
mem = &eq->dma_mem;
phwi_context->be_eq[i].phba = phba;
- eq_vaddress = pci_alloc_consistent(phba->pcidev,
+ eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
num_eq_pages * PAGE_SIZE,
- &paddr);
+ &paddr, GFP_KERNEL);
if (!eq_vaddress) {
ret = -ENOMEM;
goto create_eq_error;
@@ -3069,7 +3056,7 @@ create_eq_error:
eq = &phwi_context->be_eq[i].q;
mem = &eq->dma_mem;
if (mem->va)
- pci_free_consistent(phba->pcidev, num_eq_pages
+ dma_free_coherent(&phba->pcidev->dev, num_eq_pages
* PAGE_SIZE,
mem->va, mem->dma);
}
@@ -3097,9 +3084,9 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
pbe_eq->cq = cq;
pbe_eq->phba = phba;
mem = &cq->dma_mem;
- cq_vaddress = pci_alloc_consistent(phba->pcidev,
+ cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
num_cq_pages * PAGE_SIZE,
- &paddr);
+ &paddr, GFP_KERNEL);
if (!cq_vaddress) {
ret = -ENOMEM;
goto create_cq_error;
@@ -3134,7 +3121,7 @@ create_cq_error:
cq = &phwi_context->be_cq[i];
mem = &cq->dma_mem;
if (mem->va)
- pci_free_consistent(phba->pcidev, num_cq_pages
+ dma_free_coherent(&phba->pcidev->dev, num_cq_pages
* PAGE_SIZE,
mem->va, mem->dma);
}
@@ -3326,7 +3313,7 @@ static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
if (mem->va) {
- pci_free_consistent(phba->pcidev, mem->size,
+ dma_free_coherent(&phba->pcidev->dev, mem->size,
mem->va, mem->dma);
mem->va = NULL;
}
@@ -3341,7 +3328,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma);
+ mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
return 0;
@@ -3479,7 +3467,7 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
&ctrl->ptag_state[tag].tag_state)) {
ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
if (ptag_mem->size) {
- pci_free_consistent(ctrl->pdev,
+ dma_free_coherent(&ctrl->pdev->dev,
ptag_mem->size,
ptag_mem->va,
ptag_mem->dma);
@@ -3880,7 +3868,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
j = 0;
for (i = 0; i < SE_MEM_MAX; i++) {
for (j = mem_descr->num_elements; j > 0; j--) {
- pci_free_consistent(phba->pcidev,
+ dma_free_coherent(&phba->pcidev->dev,
mem_descr->mem_array[j - 1].size,
mem_descr->mem_array[j - 1].virtual_address,
(unsigned long)mem_descr->mem_array[j - 1].
@@ -4255,10 +4243,10 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
}
if (io_task->mtask_addr) {
- pci_unmap_single(phba->pcidev,
+ dma_unmap_single(&phba->pcidev->dev,
io_task->mtask_addr,
io_task->mtask_data_count,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
io_task->mtask_addr = 0;
}
}
@@ -4852,9 +4840,9 @@ static int beiscsi_bsg_request(struct bsg_job *job)
switch (bsg_req->msgcode) {
case ISCSI_BSG_HST_VENDOR:
- nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
job->request_payload.payload_len,
- &nonemb_cmd.dma);
+ &nonemb_cmd.dma, GFP_KERNEL);
if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BM_%d : Failed to allocate memory for "
@@ -4867,7 +4855,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BM_%d : MBX Tag Allocation Failed\n");
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EAGAIN;
}
@@ -4881,7 +4869,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
clear_bit(MCC_TAG_STATE_RUNNING,
&phba->ctrl.ptag_state[tag].tag_state);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EIO;
}
@@ -4898,7 +4886,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
bsg_reply->result = status;
bsg_job_done(job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
if (status || extd_status) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -5529,7 +5517,6 @@ static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -5755,7 +5742,7 @@ free_twq:
beiscsi_cleanup_port(phba);
beiscsi_free_mem(phba);
free_port:
- pci_free_consistent(phba->pcidev,
+ dma_free_coherent(&phba->pcidev->dev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma);
@@ -5799,7 +5786,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
/* ctrl uninit */
beiscsi_unmap_pci_function(phba);
- pci_free_consistent(phba->pcidev,
+ dma_free_coherent(&phba->pcidev->dev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma);
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 8fdc07b6c686..ca7b7bbc8371 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -284,7 +284,7 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
return rc;
free_cmd:
- pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
+ dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size,
nonemb_cmd->va, nonemb_cmd->dma);
return rc;
}
@@ -293,7 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
struct be_dma_mem *cmd,
u8 subsystem, u8 opcode, u32 size)
{
- cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
+ cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
+ GFP_KERNEL);
if (!cmd->va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to allocate memory for if info\n");
@@ -315,7 +316,7 @@ static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
__beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
if (tag_mem->size) {
- pci_free_consistent(phba->pcidev, tag_mem->size,
+ dma_free_coherent(&phba->pcidev->dev, tag_mem->size,
tag_mem->va, tag_mem->dma);
tag_mem->size = 0;
}
@@ -761,7 +762,7 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
"BG_%d : Memory Allocation Failure\n");
/* Free the DMA memory for the IOCTL issuing */
- pci_free_consistent(phba->ctrl.pdev,
+ dma_free_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd.size,
nonemb_cmd.va,
nonemb_cmd.dma);
@@ -780,7 +781,7 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
ioctl_size += sizeof(struct be_cmd_req_hdr);
/* Free the previous allocated DMA memory */
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va,
nonemb_cmd.dma);
@@ -869,7 +870,7 @@ static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
status);
boot_work = 0;
}
- pci_free_consistent(phba->ctrl.pdev, bs->nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, bs->nonemb_cmd.size,
bs->nonemb_cmd.va, bs->nonemb_cmd.dma);
bs->nonemb_cmd.va = NULL;
break;
@@ -1012,9 +1013,10 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
nonemb_cmd = &phba->boot_struct.nonemb_cmd;
nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp);
- nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd->size,
- &nonemb_cmd->dma);
+ &nonemb_cmd->dma,
+ GFP_KERNEL);
if (!nonemb_cmd->va) {
mutex_unlock(&ctrl->mbox_lock);
return 0;
@@ -1508,9 +1510,10 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
return -EINVAL;
nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
- nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd.size,
- &nonemb_cmd.dma);
+ &nonemb_cmd.dma,
+ GFP_KERNEL);
if (!nonemb_cmd.va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
"BM_%d : invldt_cmds_params alloc failed\n");
@@ -1521,7 +1524,7 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
wrb = alloc_mcc_wrb(phba, &tag);
if (!wrb) {
mutex_unlock(&ctrl->mbox_lock);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -ENOMEM;
}
@@ -1548,7 +1551,7 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
if (rc != -EBUSY)
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return rc;
}
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 3d0c96a5c873..c19c26e0e405 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -1453,7 +1453,7 @@ union bfa_aen_data_u {
struct bfa_aen_entry_s {
struct list_head qe;
enum bfa_aen_category aen_category;
- u32 aen_type;
+ int aen_type;
union bfa_aen_data_u aen_data;
u64 aen_tv_sec;
u64 aen_tv_usec;
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index d3b00a475aeb..2de5d514e99c 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -190,27 +190,6 @@ fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
fchs->ox_id = ox_id;
}
-enum fc_parse_status
-fc_els_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
- struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
-
- len = len;
-
- switch (els_cmd->els_code) {
- case FC_ELS_LS_RJT:
- if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
- return FC_PARSE_BUSY;
- else
- return FC_PARSE_FAILURE;
-
- case FC_ELS_ACC:
- return FC_PARSE_OK;
- }
- return FC_PARSE_OK;
-}
-
static void
fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
{
@@ -831,18 +810,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
}
u16
-fc_logo_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-
- len = len;
- if (els_cmd->els_code != FC_ELS_ACC)
- return FC_PARSE_FAILURE;
-
- return FC_PARSE_OK;
-}
-
-u16
fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
wwn_t port_name, wwn_t node_name, u16 pdu_size)
{
@@ -908,40 +875,6 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
}
u16
-fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
- int num_pages = 0;
- int page = 0;
-
- len = len;
-
- if (prlo->command != FC_ELS_ACC)
- return FC_PARSE_FAILURE;
-
- num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
-
- for (page = 0; page < num_pages; page++) {
- if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
- return FC_PARSE_FAILURE;
-
- if (prlo->prlo_acc_params[page].opa_valid != 0)
- return FC_PARSE_FAILURE;
-
- if (prlo->prlo_acc_params[page].rpa_valid != 0)
- return FC_PARSE_FAILURE;
-
- if (prlo->prlo_acc_params[page].orig_process_assc != 0)
- return FC_PARSE_FAILURE;
-
- if (prlo->prlo_acc_params[page].resp_process_assc != 0)
- return FC_PARSE_FAILURE;
- }
- return FC_PARSE_OK;
-
-}
-
-u16
fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
{
@@ -972,47 +905,6 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
}
u16
-fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1);
- int num_pages = 0;
- int page = 0;
-
- len = len;
-
- if (tprlo->command != FC_ELS_ACC)
- return FC_PARSE_ACC_INVAL;
-
- num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
-
- for (page = 0; page < num_pages; page++) {
- if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
- return FC_PARSE_NOT_FCP;
- if (tprlo->tprlo_acc_params[page].opa_valid != 0)
- return FC_PARSE_OPAFLAG_INVAL;
- if (tprlo->tprlo_acc_params[page].rpa_valid != 0)
- return FC_PARSE_RPAFLAG_INVAL;
- if (tprlo->tprlo_acc_params[page].orig_process_assc != 0)
- return FC_PARSE_OPA_INVAL;
- if (tprlo->tprlo_acc_params[page].resp_process_assc != 0)
- return FC_PARSE_RPA_INVAL;
- }
- return FC_PARSE_OK;
-}
-
-enum fc_parse_status
-fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-
- len = len;
- if (els_cmd->els_code != FC_ELS_ACC)
- return FC_PARSE_FAILURE;
-
- return FC_PARSE_OK;
-}
-
-u16
fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
u32 reason_code, u32 reason_expl)
{
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
index b109a8813401..ac08d0b5b89a 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.h
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -163,7 +163,6 @@ enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
u32 s_id, u16 ox_id, u16 rrq_oxid);
-enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
u16 ox_id, u8 *name);
@@ -276,8 +275,6 @@ void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
__be16 ox_id);
-enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
-
enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
wwn_t port_name);
@@ -297,8 +294,6 @@ u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
-u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
-
u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, wwn_t port_name, wwn_t node_name,
u16 pdu_size);
@@ -308,14 +303,10 @@ u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, int num_pages);
-u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
-
u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
u32 tpr_id);
-u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
-
u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
__be16 ox_id, u32 reason_code, u32 reason_expl);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index bd7e6a6fc1f1..911efc98d1fd 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1569,8 +1569,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
goto out_disable_device;
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
if (restart_bfa(bfad) == -1)
goto out_disable_device;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index e61ed8dad0b4..bd4ac187fd8e 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -143,7 +143,7 @@ struct bfad_im_s {
static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
struct bfad_s *drv, int cnt,
enum bfa_aen_category cat,
- enum bfa_ioc_aen_event evt)
+ int evt)
{
struct timespec64 ts;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f00045813378..cd160f2ec75d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -150,15 +150,11 @@ static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
struct fcoe_rcv_info *fr;
struct sk_buff_head *list;
struct sk_buff *skb, *next;
- struct sk_buff *head;
bg = &bnx2fc_global;
spin_lock_bh(&bg->fcoe_rx_list.lock);
list = &bg->fcoe_rx_list;
- head = list->next;
- for (skb = head; skb != (struct sk_buff *)list;
- skb = next) {
- next = skb->next;
+ skb_queue_walk_safe(list, skb, next) {
fr = fcoe_dev_from_skb(skb);
if (fr->fr_dev == lp) {
__skb_unlink(skb, list);
@@ -436,7 +432,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
struct sk_buff *tmp_skb;
- unsigned short oxid;
interface = container_of(ptype, struct bnx2fc_interface,
fcoe_packet_type);
@@ -470,8 +465,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
fh = (struct fc_frame_header *) skb_transport_header(skb);
- oxid = ntohs(fh->fh_ox_id);
-
fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lport;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index ed2dae657964..1a458ce08210 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -210,11 +210,8 @@ csio_pci_init(struct pci_dev *pdev, int *bars)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- } else {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "No suitable DMA available.\n");
goto err_release_regions;
}
@@ -1102,7 +1099,6 @@ csio_pci_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
/* Bring HW s/m to ready state.
* but don't resume IOs.
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index cc5611efc7a9..66e58f0a75dc 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -1845,8 +1845,8 @@ csio_ln_fdmi_init(struct csio_lnode *ln)
/* Allocate Dma buffers for FDMI response Payload */
dma_buf = &ln->mgmt_req->dma_buf;
dma_buf->len = 2048;
- dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
- &dma_buf->paddr);
+ dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len,
+ &dma_buf->paddr, GFP_KERNEL);
if (!dma_buf->vaddr) {
csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
kfree(ln->mgmt_req);
@@ -1873,7 +1873,7 @@ csio_ln_fdmi_exit(struct csio_lnode *ln)
dma_buf = &ln->mgmt_req->dma_buf;
if (dma_buf->vaddr)
- pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
+ dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr,
dma_buf->paddr);
kfree(ln->mgmt_req);
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index dab0d3f9bee1..8c15b7acb4b7 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -2349,8 +2349,8 @@ csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
}
/* Allocate Dma buffers for DDP */
- ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size,
- &ddp_desc->paddr);
+ ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
+ &ddp_desc->paddr, GFP_KERNEL);
if (!ddp_desc->vaddr) {
csio_err(hw,
"SCSI response DMA buffer (ddp) allocation"
@@ -2372,8 +2372,8 @@ no_mem:
list_for_each(tmp, &scm->ddp_freelist) {
ddp_desc = (struct csio_dma_buf *) tmp;
tmp = csio_list_prev(tmp);
- pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
- ddp_desc->paddr);
+ dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
+ ddp_desc->vaddr, ddp_desc->paddr);
list_del_init(&ddp_desc->list);
kfree(ddp_desc);
}
@@ -2399,8 +2399,8 @@ csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
list_for_each(tmp, &scm->ddp_freelist) {
ddp_desc = (struct csio_dma_buf *) tmp;
tmp = csio_list_prev(tmp);
- pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
- ddp_desc->paddr);
+ dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
+ ddp_desc->vaddr, ddp_desc->paddr);
list_del_init(&ddp_desc->list);
kfree(ddp_desc);
}
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 5022e82ccc4f..dc12933533d5 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -124,8 +124,8 @@ csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
while (n--) {
buf->len = sge->sge_fl_buf_size[sreg];
- buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
- &buf->paddr);
+ buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len,
+ &buf->paddr, GFP_KERNEL);
if (!buf->vaddr) {
csio_err(hw, "Could only fill %d buffers!\n", n + 1);
return -ENOMEM;
@@ -233,7 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
q = wrm->q_arr[free_idx];
- q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart);
+ q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
+ GFP_KERNEL);
if (!q->vstart) {
csio_err(hw,
"Failed to allocate DMA memory for "
@@ -1703,14 +1704,14 @@ csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
buf = &q->un.fl.bufs[j];
if (!buf->vaddr)
continue;
- pci_free_consistent(hw->pdev, buf->len,
- buf->vaddr,
- buf->paddr);
+ dma_free_coherent(&hw->pdev->dev,
+ buf->len, buf->vaddr,
+ buf->paddr);
}
kfree(q->un.fl.bufs);
}
- pci_free_consistent(hw->pdev, q->size,
- q->vstart, q->pstart);
+ dma_free_coherent(&hw->pdev->dev, q->size,
+ q->vstart, q->pstart);
}
kfree(q);
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 211da1d5a869..064ef5735182 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -35,6 +35,11 @@ static unsigned int dbg_level;
#include "../libcxgbi.h"
+#ifdef CONFIG_CHELSIO_T4_DCB
+#include <net/dcbevent.h>
+#include "cxgb4_dcb.h"
+#endif
+
#define DRV_MODULE_NAME "cxgb4i"
#define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver"
#define DRV_MODULE_VERSION "0.9.5-ko"
@@ -155,6 +160,15 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.session_recovery_timedout = iscsi_session_recovery_timedout,
};
+#ifdef CONFIG_CHELSIO_T4_DCB
+static int
+cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *);
+
+static struct notifier_block cxgb4_dcb_change = {
+ .notifier_call = cxgb4_dcb_change_notify,
+};
+#endif
+
static struct scsi_transport_template *cxgb4i_stt;
/*
@@ -574,6 +588,9 @@ static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
int nparams, flowclen16, flowclen;
nparams = FLOWC_WR_NPARAMS_MIN;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ nparams++;
+#endif
flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
flowclen16 = DIV_ROUND_UP(flowclen, 16);
flowclen = flowclen16 * 16;
@@ -595,6 +612,9 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
struct fw_flowc_wr *flowc;
int nparams, flowclen16, flowclen;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
+#endif
flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
flowc = (struct fw_flowc_wr *)skb->head;
@@ -622,6 +642,17 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
flowc->mnemval[8].val = 0;
flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
flowc->mnemval[8].val = 16384;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
+ if (vlan == CPL_L2T_VLAN_NONE) {
+ pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
+ csk->tid);
+ flowc->mnemval[9].val = cpu_to_be32(0);
+ } else {
+ flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT);
+ }
+#endif
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
@@ -1600,6 +1631,46 @@ static void release_offload_resources(struct cxgbi_sock *csk)
csk->dst = NULL;
}
+#ifdef CONFIG_CHELSIO_T4_DCB
+static inline u8 get_iscsi_dcb_state(struct net_device *ndev)
+{
+ return ndev->dcbnl_ops->getstate(ndev);
+}
+
+static int select_priority(int pri_mask)
+{
+ if (!pri_mask)
+ return 0;
+ return (ffs(pri_mask) - 1);
+}
+
+static u8 get_iscsi_dcb_priority(struct net_device *ndev)
+{
+ int rv;
+ u8 caps;
+
+ struct dcb_app iscsi_dcb_app = {
+ .protocol = 3260
+ };
+
+ rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
+ if (rv)
+ return 0;
+
+ if (caps & DCB_CAP_DCBX_VER_IEEE) {
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+ rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+ } else if (caps & DCB_CAP_DCBX_VER_CEE) {
+ iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
+ rv = dcb_getapp(ndev, &iscsi_dcb_app);
+ }
+
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "iSCSI priority is set to %u\n", select_priority(rv));
+ return select_priority(rv);
+}
+#endif
+
static int init_act_open(struct cxgbi_sock *csk)
{
struct cxgbi_device *cdev = csk->cdev;
@@ -1613,7 +1684,9 @@ static int init_act_open(struct cxgbi_sock *csk)
unsigned int size, size6;
unsigned int linkspeed;
unsigned int rcv_winf, snd_winf;
-
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u8 priority = 0;
+#endif
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n",
csk, csk->state, csk->flags, csk->tid);
@@ -1647,7 +1720,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
+#ifdef CONFIG_CHELSIO_T4_DCB
+ if (get_iscsi_dcb_state(ndev))
+ priority = get_iscsi_dcb_priority(ndev);
+
+ csk->dcb_priority = priority;
+ csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
+#else
csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
+#endif
if (!csk->l2t) {
pr_err("%s, cannot alloc l2t.\n", ndev->name);
goto rel_resource_without_clip;
@@ -2146,6 +2227,70 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
return 0;
}
+#ifdef CONFIG_CHELSIO_T4_DCB
+static int
+cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ int i, port = 0xFF;
+ struct net_device *ndev;
+ struct cxgbi_device *cdev = NULL;
+ struct dcb_app_type *iscsi_app = data;
+ struct cxgbi_ports_map *pmap;
+ u8 priority;
+
+ if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
+ return NOTIFY_DONE;
+
+ priority = iscsi_app->app.priority;
+ } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
+ if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
+ return NOTIFY_DONE;
+
+ if (!iscsi_app->app.priority)
+ return NOTIFY_DONE;
+
+ priority = ffs(iscsi_app->app.priority) - 1;
+ } else {
+ return NOTIFY_DONE;
+ }
+
+ if (iscsi_app->app.protocol != 3260)
+ return NOTIFY_DONE;
+
+ log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n",
+ iscsi_app->ifindex, priority);
+
+ ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
+
+ dev_put(ndev);
+ if (!cdev)
+ return NOTIFY_DONE;
+
+ pmap = &cdev->pmap;
+
+ for (i = 0; i < pmap->used; i++) {
+ if (pmap->port_csk[i]) {
+ struct cxgbi_sock *csk = pmap->port_csk[i];
+
+ if (csk->dcb_priority != priority) {
+ iscsi_conn_failure(csk->user_data,
+ ISCSI_ERR_CONN_FAILED);
+ pr_info("Restarting iSCSI connection %p with "
+ "priority %u->%u.\n", csk,
+ csk->dcb_priority, priority);
+ }
+ }
+ }
+ return NOTIFY_OK;
+}
+#endif
+
static int __init cxgb4i_init_module(void)
{
int rc;
@@ -2157,11 +2302,18 @@ static int __init cxgb4i_init_module(void)
return rc;
cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
+#ifdef CONFIG_CHELSIO_T4_DCB
+ pr_info("%s dcb enabled.\n", DRV_MODULE_NAME);
+ register_dcbevent_notifier(&cxgb4_dcb_change);
+#endif
return 0;
}
static void __exit cxgb4i_exit_module(void)
{
+#ifdef CONFIG_CHELSIO_T4_DCB
+ unregister_dcbevent_notifier(&cxgb4_dcb_change);
+#endif
cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 3f3af5e74a07..75f876409fb9 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -784,7 +784,8 @@ cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex)
csk->mtu = mtu;
csk->dst = dst;
- if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) {
+ rt6_get_prefsrc(rt, &pref_saddr);
+ if (ipv6_addr_any(&pref_saddr)) {
struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL,
@@ -794,8 +795,6 @@ cxgbi_check_route6(struct sockaddr *dst_addr, int ifindex)
&daddr6->sin6_addr);
goto rel_rt;
}
- } else {
- pref_saddr = rt->rt6i_prefsrc.addr;
}
csk->csk_family = AF_INET6;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index dcb190e75343..5d5d8b50d842 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -120,6 +120,9 @@ struct cxgbi_sock {
int wr_max_cred;
int wr_cred;
int wr_una_cred;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u8 dcb_priority;
+#endif
unsigned char hcrc_len;
unsigned char dcrc_len;
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 1ed2cd82129d..8c55ec6e1827 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -753,105 +753,6 @@ static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
return NULL;
}
-
-static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
-{
- struct list_head *head = &acb->srb_free_list;
- struct ScsiReqBlk *srb = NULL;
-
- if (!list_empty(head)) {
- srb = list_entry(head->next, struct ScsiReqBlk, list);
- list_del(head->next);
- dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
- }
- return srb;
-}
-
-
-static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
- list_add_tail(&srb->list, &acb->srb_free_list);
-}
-
-
-static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_add(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_waiting_append(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_add_tail(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_add_tail(&srb->list, &dcb->srb_going_list);
-}
-
-
-static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
-{
- struct ScsiReqBlk *i;
- struct ScsiReqBlk *tmp;
- dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
-
- list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
- if (i == srb) {
- list_del(&srb->list);
- break;
- }
-}
-
-
-static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- struct ScsiReqBlk *i;
- struct ScsiReqBlk *tmp;
- dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
-
- list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
- if (i == srb) {
- list_del(&srb->list);
- break;
- }
-}
-
-
-static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0,
- "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_move(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0,
- "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_move(&srb->list, &dcb->srb_going_list);
-}
-
-
/* Sets the timer to wake us up */
static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
{
@@ -923,7 +824,7 @@ static void waiting_process_next(struct AdapterCtlBlk *acb)
/* Try to send to the bus */
if (!start_scsi(acb, pos, srb))
- srb_waiting_to_going_move(pos, srb);
+ list_move(&srb->list, &pos->srb_going_list);
else
waiting_set_timer(acb, HZ/50);
break;
@@ -960,15 +861,15 @@ static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
acb->active_dcb ||
(acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
- srb_waiting_append(dcb, srb);
+ list_add_tail(&srb->list, &dcb->srb_waiting_list);
waiting_process_next(acb);
return;
}
- if (!start_scsi(acb, dcb, srb))
- srb_going_append(dcb, srb);
- else {
- srb_waiting_insert(dcb, srb);
+ if (!start_scsi(acb, dcb, srb)) {
+ list_add_tail(&srb->list, &dcb->srb_going_list);
+ } else {
+ list_add(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 50);
}
}
@@ -1045,10 +946,8 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
sgp->length++;
}
- srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
- srb->segment_x,
- SEGMENTX_LEN,
- PCI_DMA_TODEVICE);
+ srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
+ srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
@@ -1116,9 +1015,9 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
cmd->scsi_done = done;
cmd->result = 0;
- srb = srb_get_free(acb);
- if (!srb)
- {
+ srb = list_first_entry_or_null(&acb->srb_free_list,
+ struct ScsiReqBlk, list);
+ if (!srb) {
/*
* Return 1 since we are unable to queue this command at this
* point in time.
@@ -1126,12 +1025,13 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
dprintkdbg(DBG_0, "queue_command: No free srb's\n");
return 1;
}
+ list_del(&srb->list);
build_srb(cmd, dcb, srb);
if (!list_empty(&dcb->srb_waiting_list)) {
/* append to waiting queue */
- srb_waiting_append(dcb, srb);
+ list_add_tail(&srb->list, &dcb->srb_waiting_list);
waiting_process_next(acb);
} else {
/* process immediately */
@@ -1376,11 +1276,11 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
srb = find_cmd(cmd, &dcb->srb_waiting_list);
if (srb) {
- srb_waiting_remove(dcb, srb);
+ list_del(&srb->list);
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
free_tag(dcb, srb);
- srb_free_insert(acb, srb);
+ list_add_tail(&srb->list, &acb->srb_free_list);
dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
cmd->result = DID_ABORT << 16;
return SUCCESS;
@@ -1969,14 +1869,15 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
xferred -= psge->length;
} else {
/* Partial SG entry done */
+ dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev,
+ srb->sg_bus_addr, SEGMENTX_LEN,
+ DMA_TO_DEVICE);
psge->length -= xferred;
psge->address += xferred;
srb->sg_index = idx;
- pci_dma_sync_single_for_device(srb->dcb->
- acb->dev,
- srb->sg_bus_addr,
- SEGMENTX_LEN,
- PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(&srb->dcb->acb->dev->dev,
+ srb->sg_bus_addr, SEGMENTX_LEN,
+ DMA_TO_DEVICE);
break;
}
psge++;
@@ -3083,7 +2984,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
goto disc1;
}
free_tag(dcb, srb);
- srb_going_to_waiting_move(dcb, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
dprintkdbg(DBG_KG,
"disconnect: (0x%p) Retry\n",
srb->cmd);
@@ -3148,7 +3049,7 @@ static void reselect(struct AdapterCtlBlk *acb)
srb->state = SRB_READY;
free_tag(dcb, srb);
- srb_going_to_waiting_move(dcb, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 20);
/* return; */
@@ -3271,9 +3172,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
/* unmap DC395x SG list */
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
srb->sg_bus_addr, SEGMENTX_LEN);
- pci_unmap_single(acb->dev, srb->sg_bus_addr,
- SEGMENTX_LEN,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
+ DMA_TO_DEVICE);
dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
scsi_sg_count(cmd), scsi_bufflen(cmd));
/* unmap the sg segments */
@@ -3291,8 +3191,8 @@ static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
/* Unmap sense buffer */
dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
srb->segment_x[0].address);
- pci_unmap_single(acb->dev, srb->segment_x[0].address,
- srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
+ srb->segment_x[0].length, DMA_FROM_DEVICE);
/* Restore SG stuff */
srb->total_xfer_length = srb->xferred;
srb->segment_x[0].address =
@@ -3411,7 +3311,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
tempcnt--;
dcb->max_command = tempcnt;
free_tag(dcb, srb);
- srb_going_to_waiting_move(dcb, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 20);
srb->adapter_status = 0;
srb->target_status = 0;
@@ -3447,14 +3347,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
}
}
- if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
- pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
- scsi_sg_count(cmd), dir);
-
ckc_only = 0;
/* Check Error Conditions */
ckc_e:
+ pci_unmap_srb(acb, srb);
+
if (cmd->cmnd[0] == INQUIRY) {
unsigned char *base = NULL;
struct ScsiInqData *ptr;
@@ -3498,16 +3396,14 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
cmd->cmnd[0], srb->total_xfer_length);
}
- srb_going_remove(dcb, srb);
- /* Add to free list */
- if (srb == acb->tmp_srb)
- dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
- else {
+ if (srb != acb->tmp_srb) {
+ /* Add to free list */
dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
cmd, cmd->result);
- srb_free_insert(acb, srb);
+ list_move_tail(&srb->list, &acb->srb_free_list);
+ } else {
+ dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
}
- pci_unmap_srb(acb, srb);
cmd->scsi_done(cmd);
waiting_process_next(acb);
@@ -3535,9 +3431,9 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
result = MK_RES(0, did_flag, 0, 0);
printk("G:%p(%02i-%i) ", p,
p->device->id, (u8)p->device->lun);
- srb_going_remove(dcb, srb);
+ list_del(&srb->list);
free_tag(dcb, srb);
- srb_free_insert(acb, srb);
+ list_add_tail(&srb->list, &acb->srb_free_list);
p->result = result;
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
@@ -3565,8 +3461,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
result = MK_RES(0, did_flag, 0, 0);
printk("W:%p<%02i-%i>", p, p->device->id,
(u8)p->device->lun);
- srb_waiting_remove(dcb, srb);
- srb_free_insert(acb, srb);
+ list_move_tail(&srb->list, &acb->srb_free_list);
p->result = result;
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
@@ -3692,9 +3587,9 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
/* Map sense buffer */
- srb->segment_x[0].address =
- pci_map_single(acb->dev, cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+ srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
+ cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+ DMA_FROM_DEVICE);
dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
cmd->sense_buffer, srb->segment_x[0].address,
SCSI_SENSE_BUFFERSIZE);
@@ -3705,7 +3600,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
dprintkl(KERN_DEBUG,
"request_sense: (0x%p) failed <%02i-%i>\n",
srb->cmd, dcb->target_id, dcb->target_lun);
- srb_going_to_waiting_move(dcb, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 100);
}
}
@@ -4392,7 +4287,7 @@ static void adapter_init_params(struct AdapterCtlBlk *acb)
/* link static array of srbs into the srb free list */
for (i = 0; i < acb->srb_count - 1; i++)
- srb_free_insert(acb, &acb->srb_array[i]);
+ list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list);
}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index c3fc34b9964d..ac7da9db7317 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -369,19 +369,28 @@ static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
struct scatterlist *sg = scsi_sglist(cmd);
- int dir = cmd->sc_data_direction;
- int total, i;
+ int total = 0, i;
- if (dir == DMA_NONE)
+ if (cmd->sc_data_direction == DMA_NONE)
return;
- spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
+ if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
+ /*
+ * For pseudo DMA and PIO we need the virtual address instead of
+ * a dma address, so perform an identity mapping.
+ */
+ spriv->num_sg = scsi_sg_count(cmd);
+ for (i = 0; i < spriv->num_sg; i++) {
+ sg[i].dma_address = (uintptr_t)sg_virt(&sg[i]);
+ total += sg_dma_len(&sg[i]);
+ }
+ } else {
+ spriv->num_sg = scsi_dma_map(cmd);
+ for (i = 0; i < spriv->num_sg; i++)
+ total += sg_dma_len(&sg[i]);
+ }
spriv->cur_residue = sg_dma_len(sg);
spriv->cur_sg = sg;
-
- total = 0;
- for (i = 0; i < spriv->u.num_sg; i++)
- total += sg_dma_len(&sg[i]);
spriv->tot_residue = total;
}
@@ -441,13 +450,8 @@ static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
{
- struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
- int dir = cmd->sc_data_direction;
-
- if (dir == DMA_NONE)
- return;
-
- esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
+ if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
+ scsi_dma_unmap(cmd);
}
static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
@@ -478,17 +482,6 @@ static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
spriv->tot_residue = ent->saved_tot_residue;
}
-static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
-{
- if (cmd->cmd_len == 6 ||
- cmd->cmd_len == 10 ||
- cmd->cmd_len == 12) {
- esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
- } else {
- esp->flags |= ESP_FLAG_DOING_SLOWCMD;
- }
-}
-
static void esp_write_tgt_config3(struct esp *esp, int tgt)
{
if (esp->rev > ESP100A) {
@@ -624,6 +617,26 @@ static void esp_free_lun_tag(struct esp_cmd_entry *ent,
}
}
+static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
+{
+ ent->sense_ptr = ent->cmd->sense_buffer;
+ if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
+ ent->sense_dma = (uintptr_t)ent->sense_ptr;
+ return;
+ }
+
+ ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+}
+
+static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
+{
+ if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
+ dma_unmap_single(esp->dev, ent->sense_dma,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ ent->sense_ptr = NULL;
+}
+
/* When a contingent allegiance conditon is created, we force feed a
* REQUEST_SENSE command to the device to fetch the sense data. I
* tried many other schemes, relying on the scsi error handling layer
@@ -645,12 +658,7 @@ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
if (!ent->sense_ptr) {
esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
tgt, lun);
-
- ent->sense_ptr = cmd->sense_buffer;
- ent->sense_dma = esp->ops->map_single(esp,
- ent->sense_ptr,
- SCSI_SENSE_BUFFERSIZE,
- DMA_FROM_DEVICE);
+ esp_map_sense(esp, ent);
}
ent->saved_sense_ptr = ent->sense_ptr;
@@ -717,10 +725,10 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
static void esp_maybe_execute_command(struct esp *esp)
{
struct esp_target_data *tp;
- struct esp_lun_data *lp;
struct scsi_device *dev;
struct scsi_cmnd *cmd;
struct esp_cmd_entry *ent;
+ bool select_and_stop = false;
int tgt, lun, i;
u32 val, start_cmd;
u8 *p;
@@ -743,7 +751,6 @@ static void esp_maybe_execute_command(struct esp *esp)
tgt = dev->id;
lun = dev->lun;
tp = &esp->target[tgt];
- lp = dev->hostdata;
list_move(&ent->list, &esp->active_cmds);
@@ -752,7 +759,8 @@ static void esp_maybe_execute_command(struct esp *esp)
esp_map_dma(esp, cmd);
esp_save_pointers(esp, ent);
- esp_check_command_len(esp, cmd);
+ if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
+ select_and_stop = true;
p = esp->command_block;
@@ -793,42 +801,22 @@ static void esp_maybe_execute_command(struct esp *esp)
tp->flags &= ~ESP_TGT_CHECK_NEGO;
}
- /* Process it like a slow command. */
- if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
- esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+ /* If there are multiple message bytes, use Select and Stop */
+ if (esp->msg_out_len)
+ select_and_stop = true;
}
build_identify:
- /* If we don't have a lun-data struct yet, we're probing
- * so do not disconnect. Also, do not disconnect unless
- * we have a tag on this command.
- */
- if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
- *p++ = IDENTIFY(1, lun);
- else
- *p++ = IDENTIFY(0, lun);
+ *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
if (ent->tag[0] && esp->rev == ESP100) {
/* ESP100 lacks select w/atn3 command, use select
* and stop instead.
*/
- esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+ select_and_stop = true;
}
- if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
- start_cmd = ESP_CMD_SELA;
- if (ent->tag[0]) {
- *p++ = ent->tag[0];
- *p++ = ent->tag[1];
-
- start_cmd = ESP_CMD_SA3;
- }
-
- for (i = 0; i < cmd->cmd_len; i++)
- *p++ = cmd->cmnd[i];
-
- esp->select_state = ESP_SELECT_BASIC;
- } else {
+ if (select_and_stop) {
esp->cmd_bytes_left = cmd->cmd_len;
esp->cmd_bytes_ptr = &cmd->cmnd[0];
@@ -843,6 +831,19 @@ build_identify:
start_cmd = ESP_CMD_SELAS;
esp->select_state = ESP_SELECT_MSGOUT;
+ } else {
+ start_cmd = ESP_CMD_SELA;
+ if (ent->tag[0]) {
+ *p++ = ent->tag[0];
+ *p++ = ent->tag[1];
+
+ start_cmd = ESP_CMD_SA3;
+ }
+
+ for (i = 0; i < cmd->cmd_len; i++)
+ *p++ = cmd->cmnd[i];
+
+ esp->select_state = ESP_SELECT_BASIC;
}
val = tgt;
if (esp->rev == FASHME)
@@ -902,9 +903,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
}
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
- esp->ops->unmap_single(esp, ent->sense_dma,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
- ent->sense_ptr = NULL;
+ esp_unmap_sense(esp, ent);
/* Restore the message/status bytes to what we actually
* saw originally. Also, report that we are providing
@@ -965,7 +964,7 @@ static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_
cmd->scsi_done = done;
spriv = ESP_CMD_PRIV(cmd);
- spriv->u.dma_addr = ~(dma_addr_t)0x0;
+ spriv->num_sg = 0;
list_add_tail(&ent->list, &esp->queued_cmds);
@@ -1252,14 +1251,10 @@ static int esp_finish_select(struct esp *esp)
esp_unmap_dma(esp, cmd);
esp_free_lun_tag(ent, cmd->device->hostdata);
tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
- esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
esp->cmd_bytes_ptr = NULL;
esp->cmd_bytes_left = 0;
} else {
- esp->ops->unmap_single(esp, ent->sense_dma,
- SCSI_SENSE_BUFFERSIZE,
- DMA_FROM_DEVICE);
- ent->sense_ptr = NULL;
+ esp_unmap_sense(esp, ent);
}
/* Now that the state is unwound properly, put back onto
@@ -1303,9 +1298,8 @@ static int esp_finish_select(struct esp *esp)
esp_flush_fifo(esp);
}
- /* If we are doing a slow command, negotiation, etc.
- * we'll do the right thing as we transition to the
- * next phase.
+ /* If we are doing a Select And Stop command, negotiation, etc.
+ * we'll do the right thing as we transition to the next phase.
*/
esp_event(esp, ESP_EVENT_CHECK_PHASE);
return 0;
@@ -1338,6 +1332,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
bytes_sent = esp->data_dma_len;
bytes_sent -= ecount;
+ bytes_sent -= esp->send_cmd_residual;
/*
* The am53c974 has a DMA 'pecularity'. The doc states:
@@ -1358,7 +1353,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
u8 *ptr;
- ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
+ ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
&offset, &count);
if (likely(ptr)) {
*(ptr + offset) = bval;
@@ -2039,11 +2034,8 @@ static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
esp_free_lun_tag(ent, cmd->device->hostdata);
cmd->result = DID_RESET << 16;
- if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
- esp->ops->unmap_single(esp, ent->sense_dma,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
- ent->sense_ptr = NULL;
- }
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
+ esp_unmap_sense(esp, ent);
cmd->scsi_done(cmd);
list_del(&ent->list);
@@ -2382,7 +2374,7 @@ static const char *esp_chip_names[] = {
static struct scsi_transport_template *esp_transport_template;
-int scsi_esp_register(struct esp *esp, struct device *dev)
+int scsi_esp_register(struct esp *esp)
{
static int instance;
int err;
@@ -2402,10 +2394,10 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
esp_bootup_reset(esp);
- dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
+ dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
esp->host->unique_id, esp->regs, esp->dma_regs,
esp->host->irq);
- dev_printk(KERN_INFO, dev,
+ dev_printk(KERN_INFO, esp->dev,
"esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
esp->host->unique_id, esp_chip_names[esp->rev],
esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
@@ -2413,7 +2405,7 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
/* Let the SCSI bus reset settle. */
ssleep(esp_bus_reset_settle);
- err = scsi_add_host(esp->host, dev);
+ err = scsi_add_host(esp->host, esp->dev);
if (err)
return err;
@@ -2790,3 +2782,131 @@ MODULE_PARM_DESC(esp_debug,
module_init(esp_init);
module_exit(esp_exit);
+
+#ifdef CONFIG_SCSI_ESP_PIO
+static inline unsigned int esp_wait_for_fifo(struct esp *esp)
+{
+ int i = 500000;
+
+ do {
+ unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+ if (fbytes)
+ return fbytes;
+
+ udelay(1);
+ } while (--i);
+
+ shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
+ esp_read8(ESP_STATUS));
+ return 0;
+}
+
+static inline int esp_wait_for_intr(struct esp *esp)
+{
+ int i = 500000;
+
+ do {
+ esp->sreg = esp_read8(ESP_STATUS);
+ if (esp->sreg & ESP_STAT_INTR)
+ return 0;
+
+ udelay(1);
+ } while (--i);
+
+ shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
+ esp->sreg);
+ return 1;
+}
+
+#define ESP_FIFO_SIZE 16
+
+void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+ cmd &= ~ESP_CMD_DMA;
+ esp->send_cmd_error = 0;
+
+ if (write) {
+ u8 *dst = (u8 *)addr;
+ u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
+
+ scsi_esp_cmd(esp, cmd);
+
+ while (1) {
+ if (!esp_wait_for_fifo(esp))
+ break;
+
+ *dst++ = readb(esp->fifo_reg);
+ --esp_count;
+
+ if (!esp_count)
+ break;
+
+ if (esp_wait_for_intr(esp)) {
+ esp->send_cmd_error = 1;
+ break;
+ }
+
+ if ((esp->sreg & ESP_STAT_PMASK) != phase)
+ break;
+
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if (esp->ireg & mask) {
+ esp->send_cmd_error = 1;
+ break;
+ }
+
+ if (phase == ESP_MIP)
+ esp_write8(ESP_CMD_MOK, ESP_CMD);
+
+ esp_write8(ESP_CMD_TI, ESP_CMD);
+ }
+ } else {
+ unsigned int n = ESP_FIFO_SIZE;
+ u8 *src = (u8 *)addr;
+
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+ if (n > esp_count)
+ n = esp_count;
+ writesb(esp->fifo_reg, src, n);
+ src += n;
+ esp_count -= n;
+
+ scsi_esp_cmd(esp, cmd);
+
+ while (esp_count) {
+ if (esp_wait_for_intr(esp)) {
+ esp->send_cmd_error = 1;
+ break;
+ }
+
+ if ((esp->sreg & ESP_STAT_PMASK) != phase)
+ break;
+
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if (esp->ireg & ~ESP_INTR_BSERV) {
+ esp->send_cmd_error = 1;
+ break;
+ }
+
+ n = ESP_FIFO_SIZE -
+ (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
+
+ if (n > esp_count)
+ n = esp_count;
+ writesb(esp->fifo_reg, src, n);
+ src += n;
+ esp_count -= n;
+
+ esp_write8(ESP_CMD_TI, ESP_CMD);
+ }
+ }
+
+ esp->send_cmd_residual = esp_count;
+}
+EXPORT_SYMBOL(esp_send_pio_cmd);
+#endif
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 8163dca2071b..aa87a6b72dcc 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -249,11 +249,7 @@
#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
struct esp_cmd_priv {
- union {
- dma_addr_t dma_addr;
- int num_sg;
- } u;
-
+ int num_sg;
int cur_residue;
struct scatterlist *cur_sg;
int tot_residue;
@@ -363,19 +359,6 @@ struct esp_driver_ops {
void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg);
u8 (*esp_read8)(struct esp *esp, unsigned long reg);
- /* Map and unmap DMA memory. Eventually the driver will be
- * converted to the generic DMA API as soon as SBUS is able to
- * cope with that. At such time we can remove this.
- */
- dma_addr_t (*map_single)(struct esp *esp, void *buf,
- size_t sz, int dir);
- int (*map_sg)(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir);
- void (*unmap_single)(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir);
- void (*unmap_sg)(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir);
-
/* Return non-zero if there is an IRQ pending. Usually this
* status bit lives in the DMA controller sitting in front of
* the ESP. This has to be accurate or else the ESP interrupt
@@ -435,7 +418,7 @@ struct esp {
const struct esp_driver_ops *ops;
struct Scsi_Host *host;
- void *dev;
+ struct device *dev;
struct esp_cmd_entry *active_cmd;
@@ -490,11 +473,11 @@ struct esp {
u32 flags;
#define ESP_FLAG_DIFFERENTIAL 0x00000001
#define ESP_FLAG_RESETTING 0x00000002
-#define ESP_FLAG_DOING_SLOWCMD 0x00000004
#define ESP_FLAG_WIDE_CAPABLE 0x00000008
#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
#define ESP_FLAG_DISABLE_SYNC 0x00000020
#define ESP_FLAG_USE_FIFO 0x00000040
+#define ESP_FLAG_NO_DMA_MAP 0x00000080
u8 select_state;
#define ESP_SELECT_NONE 0x00 /* Not selecting */
@@ -532,7 +515,7 @@ struct esp {
u32 min_period;
u32 radelay;
- /* Slow command state. */
+ /* ESP_CMD_SELAS command state */
u8 *cmd_bytes_ptr;
int cmd_bytes_left;
@@ -540,6 +523,11 @@ struct esp {
void *dma;
int dmarev;
+
+ /* These are used by esp_send_pio_cmd() */
+ u8 __iomem *fifo_reg;
+ int send_cmd_error;
+ u32 send_cmd_residual;
};
/* A front-end driver for the ESP chip should do the following in
@@ -568,16 +556,18 @@ struct esp {
* example, the DMA engine has to be reset before ESP can
* be programmed.
* 11) If necessary, call dev_set_drvdata() as needed.
- * 12) Call scsi_esp_register() with prepared 'esp' structure
- * and a device pointer if possible.
+ * 12) Call scsi_esp_register() with prepared 'esp' structure.
* 13) Check scsi_esp_register() return value, release all resources
* if an error was returned.
*/
extern struct scsi_host_template scsi_esp_template;
-extern int scsi_esp_register(struct esp *, struct device *);
+extern int scsi_esp_register(struct esp *);
extern void scsi_esp_unregister(struct esp *);
extern irqreturn_t scsi_esp_intr(int, void *);
extern void scsi_esp_cmd(struct esp *, u8);
+extern void esp_send_pio_cmd(struct esp *esp, u32 dma_addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd);
+
#endif /* !(_ESP_SCSI_H) */
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index c7bf316d8e83..844ef688fa91 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -836,8 +836,8 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
u32 fcp_bytes_written = 0;
unsigned long flags;
- pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
skb = buf->os_buf;
fp = (struct fc_frame *)skb;
buf->os_buf = NULL;
@@ -977,9 +977,8 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb_put(skb, len);
- pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
-
- if (pci_dma_mapping_error(fnic->pdev, pa)) {
+ pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, pa)) {
r = -ENOMEM;
printk(KERN_ERR "PCI mapping failed with error %d\n", r);
goto free_skb;
@@ -998,8 +997,8 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(rq->vdev);
- pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
@@ -1018,7 +1017,6 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr;
unsigned long flags;
- int r;
if (!fnic->vlan_hw_insert) {
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1038,11 +1036,10 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
}
}
- pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
-
- r = pci_dma_mapping_error(fnic->pdev, pa);
- if (r) {
- printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, pa)) {
+ printk(KERN_ERR "DMA mapping failed\n");
goto free_skb;
}
@@ -1058,7 +1055,7 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
irq_restore:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
- pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
free_skb:
kfree_skb(skb);
}
@@ -1115,9 +1112,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
if (FC_FCOE_VER)
FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
- pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
-
- if (pci_dma_mapping_error(fnic->pdev, pa)) {
+ pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, pa)) {
ret = -ENOMEM;
printk(KERN_ERR "DMA map failed with error %d\n", ret);
goto free_skb_on_err;
@@ -1131,8 +1127,7 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) {
- pci_unmap_single(fnic->pdev, pa,
- tot_len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
ret = -1;
goto irq_restore;
}
@@ -1247,8 +1242,8 @@ static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
struct fc_frame *fp = (struct fc_frame *)skb;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
- pci_unmap_single(fnic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_irq(fp_skb(fp));
buf->os_buf = NULL;
}
@@ -1290,8 +1285,8 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
- pci_unmap_single(fnic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index e52599f44170..cc461fd7bef1 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -611,30 +611,15 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* limitation for the device. Try 64-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"No usable DMA configuration "
"aborting\n");
goto err_out_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to obtain 32-bit DMA "
- "for consistent allocations, aborting.\n");
- goto err_out_release_regions;
- }
- } else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to obtain 64-bit DMA "
- "for consistent allocations, aborting.\n");
- goto err_out_release_regions;
- }
}
/* Map vNIC resources from BAR0 */
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 8cbd3c9f0b4c..96acfcecd540 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -126,17 +126,17 @@ static void fnic_release_ioreq_buf(struct fnic *fnic,
struct scsi_cmnd *sc)
{
if (io_req->sgl_list_pa)
- pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+ dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
scsi_dma_unmap(sc);
if (io_req->sgl_cnt)
mempool_free(io_req->sgl_list_alloc,
fnic->io_sgl_pool[io_req->sgl_type]);
if (io_req->sense_buf_pa)
- pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
- SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
}
/* Free up Copy Wq descriptors. Called with copy_wq lock held */
@@ -330,7 +330,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
int flags;
u8 exch_flags;
struct scsi_lun fc_lun;
- int r;
if (sg_count) {
/* For each SGE, create a device desc entry */
@@ -342,30 +341,25 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
desc++;
}
- io_req->sgl_list_pa = pci_map_single
- (fnic->pdev,
- io_req->sgl_list,
- sizeof(io_req->sgl_list[0]) * sg_count,
- PCI_DMA_TODEVICE);
-
- r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
- if (r) {
- printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
+ io_req->sgl_list,
+ sizeof(io_req->sgl_list[0]) * sg_count,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
+ printk(KERN_ERR "DMA mapping failed\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
}
- io_req->sense_buf_pa = pci_map_single(fnic->pdev,
+ io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
-
- r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa);
- if (r) {
- pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
+ dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * sg_count,
- PCI_DMA_TODEVICE);
- printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ DMA_TO_DEVICE);
+ printk(KERN_ERR "DMA mapping failed\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -2272,33 +2266,17 @@ clean_pending_aborts_end:
static inline int
fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
{
- struct blk_queue_tag *bqt = fnic->lport->host->bqt;
- int tag, ret = SCSI_NO_TAG;
-
- BUG_ON(!bqt);
- if (!bqt) {
- pr_err("Tags are not supported\n");
- goto end;
- }
-
- do {
- tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
- if (tag >= bqt->max_depth) {
- pr_err("Tag allocation failure\n");
- goto end;
- }
- } while (test_and_set_bit(tag, bqt->tag_map));
+ struct request_queue *q = sc->request->q;
+ struct request *dummy;
- bqt->tag_index[tag] = sc->request;
- sc->request->tag = tag;
- sc->tag = tag;
- if (!sc->request->special)
- sc->request->special = sc;
+ dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(dummy))
+ return SCSI_NO_TAG;
- ret = tag;
+ sc->tag = sc->request->tag = dummy->tag;
+ sc->request->special = sc;
-end:
- return ret;
+ return dummy->tag;
}
/**
@@ -2308,20 +2286,9 @@ end:
static inline void
fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
{
- struct blk_queue_tag *bqt = fnic->lport->host->bqt;
- int tag = sc->request->tag;
+ struct request *dummy = sc->request->special;
- if (tag == SCSI_NO_TAG)
- return;
-
- BUG_ON(!bqt || !bqt->tag_index[tag]);
- if (!bqt)
- return;
-
- bqt->tag_index[tag] = NULL;
- clear_bit(tag, bqt->tag_map);
-
- return;
+ blk_mq_free_request(dummy);
}
/*
@@ -2380,19 +2347,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
tag = sc->request->tag;
if (unlikely(tag < 0)) {
/*
- * XXX(hch): current the midlayer fakes up a struct
- * request for the explicit reset ioctls, and those
- * don't have a tag allocated to them. The below
- * code pokes into midlayer structures to paper over
- * this design issue, but that won't work for blk-mq.
- *
- * Either someone who can actually test the hardware
- * will have to come up with a similar hack for the
- * blk-mq case, or we'll have to bite the bullet and
- * fix the way the EH ioctls work for real, but until
- * that happens we fail these explicit requests here.
+ * Really should fix the midlayer to pass in a proper
+ * request for ioctls...
*/
-
tag = fnic_scsi_host_start_tag(fnic, sc);
if (unlikely(tag == SCSI_NO_TAG))
goto fnic_device_reset_end;
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index ba69d6112fa1..434447ea24b8 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -195,9 +195,9 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
{
vnic_dev_desc_ring_size(ring, desc_count, desc_size);
- ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+ ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
ring->size_unaligned,
- &ring->base_addr_unaligned);
+ &ring->base_addr_unaligned, GFP_KERNEL);
if (!ring->descs_unaligned) {
printk(KERN_ERR
@@ -221,7 +221,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
ring->size_unaligned,
ring->descs_unaligned,
ring->base_addr_unaligned);
@@ -298,9 +298,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
int err = 0;
if (!vdev->fw_info) {
- vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+ vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
- &vdev->fw_info_pa);
+ &vdev->fw_info_pa, GFP_KERNEL);
if (!vdev->fw_info)
return -ENOMEM;
@@ -361,8 +361,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
int wait = 1000;
if (!vdev->stats) {
- vdev->stats = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_stats), &vdev->stats_pa);
+ vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
if (!vdev->stats)
return -ENOMEM;
}
@@ -523,9 +523,9 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
int wait = 1000;
if (!vdev->notify) {
- vdev->notify = pci_alloc_consistent(vdev->pdev,
+ vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
- &vdev->notify_pa);
+ &vdev->notify_pa, GFP_KERNEL);
if (!vdev->notify)
return -ENOMEM;
}
@@ -647,21 +647,21 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->linkstatus)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(u32),
vdev->linkstatus,
vdev->linkstatus_pa);
if (vdev->stats)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
kfree(vdev);
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 6c7d2e201abe..0ddb53c8a2e2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -34,6 +34,7 @@
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
+#define HISI_SAS_RESERVED_IPTT_CNT 96
#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -217,7 +218,7 @@ struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *device);
- int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx,
+ int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
struct domain_device *device);
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index a4e2e6aa9a6b..b3f01d5b821b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -183,7 +183,14 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
{
- hisi_sas_slot_index_clear(hisi_hba, slot_idx);
+ unsigned long flags;
+
+ if (hisi_hba->hw->slot_index_alloc || (slot_idx >=
+ hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_slot_index_clear(hisi_hba, slot_idx);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
}
static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
@@ -193,24 +200,34 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
set_bit(slot_idx, bitmap);
}
-static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
+static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
+ struct scsi_cmnd *scsi_cmnd)
{
- unsigned int index;
+ int index;
void *bitmap = hisi_hba->slot_index_tags;
+ unsigned long flags;
+
+ if (scsi_cmnd)
+ return scsi_cmnd->request->tag;
+ spin_lock_irqsave(&hisi_hba->lock, flags);
index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
- hisi_hba->last_slot_index + 1);
+ hisi_hba->last_slot_index + 1);
if (index >= hisi_hba->slot_index_count) {
- index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
- 0);
- if (index >= hisi_hba->slot_index_count)
+ index = find_next_zero_bit(bitmap,
+ hisi_hba->slot_index_count,
+ hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT);
+ if (index >= hisi_hba->slot_index_count) {
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -SAS_QUEUE_FULL;
+ }
}
hisi_sas_slot_index_set(hisi_hba, index);
- *slot_idx = index;
hisi_hba->last_slot_index = index;
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
- return 0;
+ return index;
}
static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
@@ -249,9 +266,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
- spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot->idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
}
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
@@ -287,13 +302,13 @@ static int hisi_sas_task_prep(struct sas_task *task,
int *pass)
{
struct domain_device *device = task->dev;
- struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct hisi_hba *hisi_hba;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
struct asd_sas_port *sas_port = device->port;
- struct device *dev = hisi_hba->dev;
+ struct device *dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
struct hisi_sas_dq *dq;
@@ -314,6 +329,9 @@ static int hisi_sas_task_prep(struct sas_task *task,
return -ECOMM;
}
+ hisi_hba = dev_to_hisi_hba(device);
+ dev = hisi_hba->dev;
+
if (DEV_IS_GONE(sas_dev)) {
if (sas_dev)
dev_info(dev, "task prep: device %d not ready\n",
@@ -381,16 +399,27 @@ static int hisi_sas_task_prep(struct sas_task *task,
goto err_out_dma_unmap;
}
- spin_lock_irqsave(&hisi_hba->lock, flags);
if (hisi_hba->hw->slot_index_alloc)
- rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
- device);
- else
- rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- if (rc)
+ rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
+ else {
+ struct scsi_cmnd *scsi_cmnd = NULL;
+
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
+
+ if (dev_is_sata(device)) {
+ qc = task->uldd_task;
+ scsi_cmnd = qc->scsicmd;
+ } else {
+ scsi_cmnd = task->uldd_task;
+ }
+ }
+ rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
+ }
+ if (rc < 0)
goto err_out_dma_unmap;
+ slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
spin_lock_irqsave(&dq->lock, flags);
@@ -451,9 +480,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
return 0;
err_out_tag:
- spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
err_out_dma_unmap:
if (!sas_protocol_ata(task->task_proto)) {
if (task->num_scatter) {
@@ -904,6 +931,9 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
_r.maximum_linkrate = max;
_r.minimum_linkrate = min;
+ sas_phy->phy->maximum_linkrate = max;
+ sas_phy->phy->minimum_linkrate = min;
+
hisi_hba->hw->phy_disable(hisi_hba, phy_no);
msleep(100);
hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
@@ -950,8 +980,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
static void hisi_sas_task_done(struct sas_task *task)
{
- if (!del_timer(&task->slow_task->timer))
- return;
+ del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@@ -960,13 +989,17 @@ static void hisi_sas_tmf_timedout(struct timer_list *t)
struct sas_task_slow *slow = from_timer(slow, t, timer);
struct sas_task *task = slow->task;
unsigned long flags;
+ bool is_completed = true;
spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ is_completed = false;
+ }
spin_unlock_irqrestore(&task->task_state_lock, flags);
- complete(&task->slow_task->completion);
+ if (!is_completed)
+ complete(&task->slow_task->completion);
}
#define TASK_TIMEOUT 20
@@ -1019,8 +1052,16 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
struct hisi_sas_slot *slot = task->lldd_task;
dev_err(dev, "abort tmf: TMF task timeout and not done\n");
- if (slot)
+ if (slot) {
+ struct hisi_sas_cq *cq =
+ &hisi_hba->cq[slot->dlvry_queue];
+ /*
+ * flush tasklet to avoid free'ing task
+ * before using task in IO completion
+ */
+ tasklet_kill(&cq->tasklet);
slot->task = NULL;
+ }
goto ex_err;
} else
@@ -1396,6 +1437,17 @@ static int hisi_sas_abort_task(struct sas_task *task)
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ struct hisi_sas_slot *slot = task->lldd_task;
+ struct hisi_sas_cq *cq;
+
+ if (slot) {
+ /*
+ * flush tasklet to avoid free'ing task
+ * before using task in IO completion
+ */
+ cq = &hisi_hba->cq[slot->dlvry_queue];
+ tasklet_kill(&cq->tasklet);
+ }
spin_unlock_irqrestore(&task->task_state_lock, flags);
rc = TMF_RESP_FUNC_COMPLETE;
goto out;
@@ -1451,12 +1503,19 @@ static int hisi_sas_abort_task(struct sas_task *task)
/* SMP */
struct hisi_sas_slot *slot = task->lldd_task;
u32 tag = slot->idx;
+ struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
- task->lldd_task)
- hisi_sas_do_release_task(hisi_hba, task, slot);
+ task->lldd_task) {
+ /*
+ * flush tasklet to avoid free'ing task
+ * before using task in IO completion
+ */
+ tasklet_kill(&cq->tasklet);
+ slot->task = NULL;
+ }
}
out:
@@ -1705,14 +1764,11 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
port = to_hisi_sas_port(sas_port);
/* simply get a slot and send abort command */
- spin_lock_irqsave(&hisi_hba->lock, flags);
- rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
- if (rc) {
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
+ if (rc < 0)
goto err_out;
- }
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
spin_lock_irqsave(&dq->lock, flags_dq);
@@ -1748,7 +1804,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
-
WRITE_ONCE(slot->ready, 1);
/* send abort command to the chip */
spin_lock_irqsave(&dq->lock, flags);
@@ -1759,9 +1814,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
return 0;
err_out_tag:
- spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
err_out:
dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
@@ -1823,8 +1876,16 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
struct hisi_sas_slot *slot = task->lldd_task;
- if (slot)
+ if (slot) {
+ struct hisi_sas_cq *cq =
+ &hisi_hba->cq[slot->dlvry_queue];
+ /*
+ * flush tasklet to avoid free'ing task
+ * before using task in IO completion
+ */
+ tasklet_kill(&cq->tasklet);
slot->task = NULL;
+ }
dev_err(dev, "internal task abort: timeout and not done.\n");
res = -EIO;
goto exit;
@@ -1861,10 +1922,6 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
hisi_sas_port_notify_formed(sas_phy);
}
-static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
-{
-}
-
static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
u8 reg_index, u8 reg_count, u8 *write_data)
{
@@ -1954,10 +2011,9 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
.lldd_lu_reset = hisi_sas_lu_reset,
.lldd_query_task = hisi_sas_query_task,
- .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
+ .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed,
- .lldd_port_deformed = hisi_sas_port_deformed,
- .lldd_write_gpio = hisi_sas_write_gpio,
+ .lldd_write_gpio = hisi_sas_write_gpio,
};
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -2120,6 +2176,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
hisi_sas_init_mem(hisi_hba);
hisi_sas_slot_index_init(hisi_hba);
+ hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
if (!hisi_hba->wq) {
@@ -2323,8 +2381,15 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->max_channel = 1;
shost->max_cmd_len = 16;
shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
- shost->can_queue = hisi_hba->hw->max_command_entries;
- shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+ if (hisi_hba->hw->slot_index_alloc) {
+ shost->can_queue = hisi_hba->hw->max_command_entries;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+ } else {
+ shost->can_queue = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
+ }
sha->sas_ha_name = DRV_NAME;
sha->dev = hisi_hba->dev;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 8f60f0e04599..f0e457e6884e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1809,7 +1809,6 @@ static struct scsi_host_template sht_v1_hw = {
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
- .can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 9c5c5a601332..cc36b6473e98 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -770,7 +770,7 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
/* This function needs to be protected from pre-emption. */
static int
-slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
+slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
struct domain_device *device)
{
int sata_dev = dev_is_sata(device);
@@ -778,6 +778,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
struct hisi_sas_device *sas_dev = device->lldd_dev;
int sata_idx = sas_dev->sata_idx;
int start, end;
+ unsigned long flags;
if (!sata_dev) {
/*
@@ -801,11 +802,14 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
end = 64 * (sata_idx + 2);
}
+ spin_lock_irqsave(&hisi_hba->lock, flags);
while (1) {
start = find_next_zero_bit(bitmap,
hisi_hba->slot_index_count, start);
- if (start >= end)
+ if (start >= end) {
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -SAS_QUEUE_FULL;
+ }
/*
* SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
*/
@@ -815,8 +819,8 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
}
set_bit(start, bitmap);
- *slot_idx = start;
- return 0;
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ return start;
}
static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx)
@@ -2483,7 +2487,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
@@ -2493,6 +2496,7 @@ out:
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
spin_lock_irqsave(&device->done_lock, flags);
@@ -3560,7 +3564,6 @@ static struct scsi_host_template sht_v2_hw = {
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
- .can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 08b503e274b8..bd4ce38b98d2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -127,6 +127,7 @@
#define PHY_CTRL_RESET_OFF 0
#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
#define SL_CFG (PORT_BASE + 0x84)
+#define AIP_LIMIT (PORT_BASE + 0x90)
#define SL_CONTROL (PORT_BASE + 0x94)
#define SL_CONTROL_NOTIFY_EN_OFF 0
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
@@ -431,6 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
(u32)((1ULL << hisi_hba->queue_count) - 1));
hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
+ hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
@@ -441,7 +443,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
if (pdev->revision >= 0x21)
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7aff);
else
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
@@ -495,6 +497,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
/* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
+ hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
}
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -1751,7 +1754,6 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
@@ -1761,6 +1763,7 @@ out:
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
spin_lock_irqsave(&device->done_lock, flags);
@@ -2098,7 +2101,6 @@ static struct scsi_host_template sht_v3_hw = {
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
- .can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
@@ -2108,6 +2110,7 @@ static struct scsi_host_template sht_v3_hw = {
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = host_attrs,
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
static const struct hisi_sas_hw hisi_sas_v3_hw = {
@@ -2245,8 +2248,10 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_channel = 1;
shost->max_cmd_len = 16;
shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
- shost->can_queue = hisi_hba->hw->max_command_entries;
- shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+ shost->can_queue = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
sha->sas_ha_name = DRV_NAME;
sha->dev = dev;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c120929d4ffe..c9cccf35e9d7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2240,8 +2240,8 @@ static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
chain_size = le32_to_cpu(cp->sg[0].length);
- temp64 = pci_map_single(h->pdev, chain_block, chain_size,
- PCI_DMA_TODEVICE);
+ temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&h->pdev->dev, temp64)) {
/* prevent subsequent unmapping */
cp->sg->address = 0;
@@ -2261,7 +2261,7 @@ static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
chain_sg = cp->sg;
temp64 = le64_to_cpu(chain_sg->address);
chain_size = le32_to_cpu(cp->sg[0].length);
- pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
+ dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
}
static int hpsa_map_sg_chain_block(struct ctlr_info *h,
@@ -2277,8 +2277,8 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h,
chain_len = sizeof(*chain_sg) *
(le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
chain_sg->Len = cpu_to_le32(chain_len);
- temp64 = pci_map_single(h->pdev, chain_block, chain_len,
- PCI_DMA_TODEVICE);
+ temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&h->pdev->dev, temp64)) {
/* prevent subsequent unmapping */
chain_sg->Addr = cpu_to_le64(0);
@@ -2297,8 +2297,8 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
return;
chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
- pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
- le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
+ dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
+ le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
}
@@ -2759,13 +2759,13 @@ static void complete_scsi_command(struct CommandList *cp)
return hpsa_cmd_free_and_done(h, cp, cmd);
}
-static void hpsa_pci_unmap(struct pci_dev *pdev,
- struct CommandList *c, int sg_used, int data_direction)
+static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
+ int sg_used, enum dma_data_direction data_direction)
{
int i;
for (i = 0; i < sg_used; i++)
- pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
+ dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
le32_to_cpu(c->SG[i].Len),
data_direction);
}
@@ -2774,17 +2774,17 @@ static int hpsa_map_one(struct pci_dev *pdev,
struct CommandList *cp,
unsigned char *buf,
size_t buflen,
- int data_direction)
+ enum dma_data_direction data_direction)
{
u64 addr64;
- if (buflen == 0 || data_direction == PCI_DMA_NONE) {
+ if (buflen == 0 || data_direction == DMA_NONE) {
cp->Header.SGList = 0;
cp->Header.SGTotal = cpu_to_le16(0);
return 0;
}
- addr64 = pci_map_single(pdev, buf, buflen, data_direction);
+ addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
if (dma_mapping_error(&pdev->dev, addr64)) {
/* Prevent subsequent unmap of something never mapped */
cp->Header.SGList = 0;
@@ -2845,7 +2845,8 @@ static u32 lockup_detected(struct ctlr_info *h)
#define MAX_DRIVER_CMD_RETRIES 25
static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
- struct CommandList *c, int data_direction, unsigned long timeout_msecs)
+ struct CommandList *c, enum dma_data_direction data_direction,
+ unsigned long timeout_msecs)
{
int backoff_time = 10, retry_count = 0;
int rc;
@@ -2969,8 +2970,8 @@ static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
rc = -1;
goto out;
}
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3022,8 +3023,8 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
rc = -1;
goto out;
}
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3306,8 +3307,8 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
cmd_free(h, c);
return -1;
}
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3349,8 +3350,8 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
c->Request.CDB[2] = bmic_device_index & 0xff;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3377,8 +3378,8 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h,
if (rc)
goto out;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3408,7 +3409,7 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
c->Request.CDB[2] = bmic_device_index & 0xff;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
- hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
NO_TIMEOUT);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
@@ -3484,7 +3485,7 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
else
c->Request.CDB[5] = 0;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
NO_TIMEOUT);
if (rc)
goto out;
@@ -3731,8 +3732,8 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
}
if (extended_response)
c->Request.CDB[1] = extended_response;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -6320,8 +6321,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
/* Fill in the scatter gather information */
if (iocommand.buf_size > 0) {
- temp64 = pci_map_single(h->pdev, buff,
- iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ temp64 = dma_map_single(&h->pdev->dev, buff,
+ iocommand.buf_size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
c->SG[0].Addr = cpu_to_le64(0);
c->SG[0].Len = cpu_to_le32(0);
@@ -6335,7 +6336,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
NO_TIMEOUT);
if (iocommand.buf_size > 0)
- hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+ hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
if (rc) {
rc = -EIO;
@@ -6381,13 +6382,9 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
- if (!ioc) {
- status = -ENOMEM;
- goto cleanup1;
- }
- if (copy_from_user(ioc, argp, sizeof(*ioc))) {
- status = -EFAULT;
+ ioc = vmemdup_user(argp, sizeof(*ioc));
+ if (IS_ERR(ioc)) {
+ status = PTR_ERR(ioc);
goto cleanup1;
}
if ((ioc->buf_size < 1) &&
@@ -6447,14 +6444,14 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
if (ioc->buf_size > 0) {
int i;
for (i = 0; i < sg_used; i++) {
- temp64 = pci_map_single(h->pdev, buff[i],
- buff_size[i], PCI_DMA_BIDIRECTIONAL);
+ temp64 = dma_map_single(&h->pdev->dev, buff[i],
+ buff_size[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(&h->pdev->dev,
(dma_addr_t) temp64)) {
c->SG[i].Addr = cpu_to_le64(0);
c->SG[i].Len = cpu_to_le32(0);
hpsa_pci_unmap(h->pdev, c, i,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
status = -ENOMEM;
goto cleanup0;
}
@@ -6467,7 +6464,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
NO_TIMEOUT);
if (sg_used)
- hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+ hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
if (status) {
status = -EIO;
@@ -6505,7 +6502,7 @@ cleanup1:
kfree(buff);
}
kfree(buff_size);
- kfree(ioc);
+ kvfree(ioc);
return status;
}
@@ -6579,7 +6576,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
int cmd_type)
{
- int pci_dir = XFER_NONE;
+ enum dma_data_direction dir = DMA_NONE;
c->cmd_type = CMD_IOCTL_PEND;
c->scsi_cmd = SCSI_CMD_BUSY;
@@ -6785,18 +6782,18 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
switch (GET_DIR(c->Request.type_attr_dir)) {
case XFER_READ:
- pci_dir = PCI_DMA_FROMDEVICE;
+ dir = DMA_FROM_DEVICE;
break;
case XFER_WRITE:
- pci_dir = PCI_DMA_TODEVICE;
+ dir = DMA_TO_DEVICE;
break;
case XFER_NONE:
- pci_dir = PCI_DMA_NONE;
+ dir = DMA_NONE;
break;
default:
- pci_dir = PCI_DMA_BIDIRECTIONAL;
+ dir = DMA_BIDIRECTIONAL;
}
- if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+ if (hpsa_map_one(h->pdev, c, buff, size, dir))
return -1;
return 0;
}
@@ -6992,13 +6989,13 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
* CCISS commands, so they must be allocated from the lower 4GiB of
* memory.
*/
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
iounmap(vaddr);
return err;
}
- cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
+ cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
if (cmd == NULL) {
iounmap(vaddr);
return -ENOMEM;
@@ -7047,7 +7044,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
return -ETIMEDOUT;
}
- pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
+ dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
if (tag & HPSA_ERROR_BIT) {
dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
@@ -7914,7 +7911,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
kfree(h->cmd_pool_bits);
h->cmd_pool_bits = NULL;
if (h->cmd_pool) {
- pci_free_consistent(h->pdev,
+ dma_free_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(struct CommandList),
h->cmd_pool,
h->cmd_pool_dhandle);
@@ -7922,7 +7919,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
h->cmd_pool_dhandle = 0;
}
if (h->errinfo_pool) {
- pci_free_consistent(h->pdev,
+ dma_free_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
@@ -7936,12 +7933,12 @@ static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
sizeof(unsigned long),
GFP_KERNEL);
- h->cmd_pool = pci_alloc_consistent(h->pdev,
+ h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->cmd_pool),
- &(h->cmd_pool_dhandle));
- h->errinfo_pool = pci_alloc_consistent(h->pdev,
+ &h->cmd_pool_dhandle, GFP_KERNEL);
+ h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->errinfo_pool),
- &(h->errinfo_pool_dhandle));
+ &h->errinfo_pool_dhandle, GFP_KERNEL);
if ((h->cmd_pool_bits == NULL)
|| (h->cmd_pool == NULL)
|| (h->errinfo_pool == NULL)) {
@@ -8068,7 +8065,7 @@ static void hpsa_free_reply_queues(struct ctlr_info *h)
for (i = 0; i < h->nreply_queues; i++) {
if (!h->reply_queue[i].head)
continue;
- pci_free_consistent(h->pdev,
+ dma_free_coherent(&h->pdev->dev,
h->reply_queue_size,
h->reply_queue[i].head,
h->reply_queue[i].busaddr);
@@ -8594,11 +8591,11 @@ reinit_after_soft_reset:
number_of_controllers++;
/* configure PCI DMA stuff */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc == 0) {
dac = 1;
} else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc == 0) {
dac = 0;
} else {
@@ -8797,8 +8794,8 @@ static void hpsa_flush_cache(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD)) {
goto out;
}
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
+ DEFAULT_TIMEOUT);
if (rc)
goto out;
if (c->err_info->CommandStatus != 0)
@@ -8833,8 +8830,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD))
goto errout;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8845,8 +8842,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD))
goto errout;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_TODEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
+ NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8855,8 +8852,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD))
goto errout;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -9228,9 +9225,9 @@ static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
IOACCEL1_COMMANDLIST_ALIGNMENT);
h->ioaccel_cmd_pool =
- pci_alloc_consistent(h->pdev,
+ dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
- &(h->ioaccel_cmd_pool_dhandle));
+ &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
h->ioaccel1_blockFetchTable =
kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9281,9 +9278,9 @@ static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
IOACCEL2_COMMANDLIST_ALIGNMENT);
h->ioaccel2_cmd_pool =
- pci_alloc_consistent(h->pdev,
+ dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
- &(h->ioaccel2_cmd_pool_dhandle));
+ &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
h->ioaccel2_blockFetchTable =
kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9356,9 +9353,10 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
h->reply_queue_size = h->max_commands * sizeof(u64);
for (i = 0; i < h->nreply_queues; i++) {
- h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
+ h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
h->reply_queue_size,
- &(h->reply_queue[i].busaddr));
+ &h->reply_queue[i].busaddr,
+ GFP_KERNEL);
if (!h->reply_queue[i].head) {
rc = -ENOMEM;
goto clean1; /* rq, ioaccel */
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index fac377320158..e63aadd10dfd 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2266,7 +2266,6 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
/*
* Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
*/
- target_wait_for_sess_cmds(se_sess);
target_remove_session(se_sess);
tport->ibmv_nexus = NULL;
kfree(nexus);
@@ -3474,11 +3473,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
vscsi->dds.window[LOCAL].liobn,
vscsi->dds.window[REMOTE].liobn);
- strcpy(vscsi->eye, "VSCSI ");
- strncat(vscsi->eye, vdev->name, MAX_EYE);
+ snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
vscsi->dds.unit_id = vdev->unit_address;
- strncpy(vscsi->dds.partition_name, partition_name,
+ strscpy(vscsi->dds.partition_name, partition_name,
sizeof(vscsi->dds.partition_name));
vscsi->dds.partition_num = partition_number;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index f2ec80b0ffc0..271990bc065b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3335,6 +3335,65 @@ static void ipr_release_dump(struct kref *kref)
LEAVE;
}
+static void ipr_add_remove_thread(struct work_struct *work)
+{
+ unsigned long lock_flags;
+ struct ipr_resource_entry *res;
+ struct scsi_device *sdev;
+ struct ipr_ioa_cfg *ioa_cfg =
+ container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
+ u8 bus, target, lun;
+ int did_work;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+restart:
+ do {
+ did_work = 0;
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return;
+ }
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (res->del_from_ml && res->sdev) {
+ did_work = 1;
+ sdev = res->sdev;
+ if (!scsi_device_get(sdev)) {
+ if (!res->add_to_ml)
+ list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ else
+ res->del_from_ml = 0;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ }
+ break;
+ }
+ }
+ } while (did_work);
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (res->add_to_ml) {
+ bus = res->bus;
+ target = res->target;
+ lun = res->lun;
+ res->add_to_ml = 0;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ scsi_add_device(ioa_cfg->host, bus, target, lun);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ goto restart;
+ }
+ }
+
+ ioa_cfg->scan_done = 1;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
+ LEAVE;
+}
+
/**
* ipr_worker_thread - Worker thread
* @work: ioa config struct
@@ -3349,13 +3408,9 @@ static void ipr_release_dump(struct kref *kref)
static void ipr_worker_thread(struct work_struct *work)
{
unsigned long lock_flags;
- struct ipr_resource_entry *res;
- struct scsi_device *sdev;
struct ipr_dump *dump;
struct ipr_ioa_cfg *ioa_cfg =
container_of(work, struct ipr_ioa_cfg, work_q);
- u8 bus, target, lun;
- int did_work;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -3393,49 +3448,9 @@ static void ipr_worker_thread(struct work_struct *work)
return;
}
-restart:
- do {
- did_work = 0;
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return;
- }
+ schedule_work(&ioa_cfg->scsi_add_work_q);
- list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if (res->del_from_ml && res->sdev) {
- did_work = 1;
- sdev = res->sdev;
- if (!scsi_device_get(sdev)) {
- if (!res->add_to_ml)
- list_move_tail(&res->queue, &ioa_cfg->free_res_q);
- else
- res->del_from_ml = 0;
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- }
- break;
- }
- }
- } while (did_work);
-
- list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if (res->add_to_ml) {
- bus = res->bus;
- target = res->target;
- lun = res->lun;
- res->add_to_ml = 0;
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- scsi_add_device(ioa_cfg->host, bus, target, lun);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- goto restart;
- }
- }
-
- ioa_cfg->scan_done = 1;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
LEAVE;
}
@@ -9933,6 +9948,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
INIT_LIST_HEAD(&ioa_cfg->free_res_q);
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
+ INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
init_waitqueue_head(&ioa_cfg->reset_wait_q);
init_waitqueue_head(&ioa_cfg->msi_wait_q);
init_waitqueue_head(&ioa_cfg->eeh_wait_q);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 68afbbde54d3..f6baa2351313 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1575,6 +1575,7 @@ struct ipr_ioa_cfg {
u8 saved_mode_page_len;
struct work_struct work_q;
+ struct work_struct scsi_add_work_q;
struct workqueue_struct *reset_work_q;
wait_queue_head_t reset_wait_q;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bd6ac6b5980a..ee8a1ecd58fd 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -208,7 +208,7 @@ module_param(ips, charp, 0);
#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
- PCI_DMA_BIDIRECTIONAL : \
+ DMA_BIDIRECTIONAL : \
scb->scsi_cmd->sc_data_direction)
#ifdef IPS_DEBUG
@@ -1529,11 +1529,12 @@ ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
if (ha->ioctl_data && length <= ha->ioctl_len)
return 0;
/* there is no buffer or it's not big enough, allocate a new one */
- bigger_buf = pci_alloc_consistent(ha->pcidev, length, &dma_busaddr);
+ bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr,
+ GFP_KERNEL);
if (bigger_buf) {
/* free the old memory */
- pci_free_consistent(ha->pcidev, ha->ioctl_len, ha->ioctl_data,
- ha->ioctl_busaddr);
+ dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
+ ha->ioctl_data, ha->ioctl_busaddr);
/* use the new memory */
ha->ioctl_data = (char *) bigger_buf;
ha->ioctl_len = length;
@@ -1678,9 +1679,8 @@ ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
} else if (!ha->flash_data) {
datasize = pt->CoppCP.cmd.flashfw.total_packets *
pt->CoppCP.cmd.flashfw.count;
- ha->flash_data = pci_alloc_consistent(ha->pcidev,
- datasize,
- &ha->flash_busaddr);
+ ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev,
+ datasize, &ha->flash_busaddr, GFP_KERNEL);
if (!ha->flash_data){
printk(KERN_WARNING "Unable to allocate a flash buffer\n");
return IPS_FAILURE;
@@ -1858,7 +1858,7 @@ ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
scb->data_len = ha->flash_datasize;
scb->data_busaddr =
- pci_map_single(ha->pcidev, ha->flash_data, scb->data_len,
+ dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len,
IPS_DMA_DIR(scb));
scb->flags |= IPS_SCB_MAP_SINGLE;
scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
@@ -1880,8 +1880,8 @@ ips_free_flash_copperhead(ips_ha_t * ha)
if (ha->flash_data == ips_FlashData)
test_and_clear_bit(0, &ips_FlashDataInUse);
else if (ha->flash_data)
- pci_free_consistent(ha->pcidev, ha->flash_len, ha->flash_data,
- ha->flash_busaddr);
+ dma_free_coherent(&ha->pcidev->dev, ha->flash_len,
+ ha->flash_data, ha->flash_busaddr);
ha->flash_data = NULL;
}
@@ -3485,6 +3485,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
case START_STOP:
scb->scsi_cmd->result = DID_OK << 16;
+ break;
case TEST_UNIT_READY:
case INQUIRY:
@@ -4212,7 +4213,7 @@ ips_free(ips_ha_t * ha)
if (ha) {
if (ha->enq) {
- pci_free_consistent(ha->pcidev, sizeof(IPS_ENQ),
+ dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ),
ha->enq, ha->enq_busaddr);
ha->enq = NULL;
}
@@ -4221,7 +4222,7 @@ ips_free(ips_ha_t * ha)
ha->conf = NULL;
if (ha->adapt) {
- pci_free_consistent(ha->pcidev,
+ dma_free_coherent(&ha->pcidev->dev,
sizeof (IPS_ADAPTER) +
sizeof (IPS_IO_CMD), ha->adapt,
ha->adapt->hw_status_start);
@@ -4229,7 +4230,7 @@ ips_free(ips_ha_t * ha)
}
if (ha->logical_drive_info) {
- pci_free_consistent(ha->pcidev,
+ dma_free_coherent(&ha->pcidev->dev,
sizeof (IPS_LD_INFO),
ha->logical_drive_info,
ha->logical_drive_info_dma_addr);
@@ -4243,7 +4244,7 @@ ips_free(ips_ha_t * ha)
ha->subsys = NULL;
if (ha->ioctl_data) {
- pci_free_consistent(ha->pcidev, ha->ioctl_len,
+ dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
ha->ioctl_data, ha->ioctl_busaddr);
ha->ioctl_data = NULL;
ha->ioctl_datasize = 0;
@@ -4276,11 +4277,11 @@ static int
ips_deallocatescbs(ips_ha_t * ha, int cmds)
{
if (ha->scbs) {
- pci_free_consistent(ha->pcidev,
+ dma_free_coherent(&ha->pcidev->dev,
IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
ha->scbs->sg_list.list,
ha->scbs->sg_busaddr);
- pci_free_consistent(ha->pcidev, sizeof (ips_scb_t) * cmds,
+ dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds,
ha->scbs, ha->scbs->scb_busaddr);
ha->scbs = NULL;
} /* end if */
@@ -4307,17 +4308,16 @@ ips_allocatescbs(ips_ha_t * ha)
METHOD_TRACE("ips_allocatescbs", 1);
/* Allocate memory for the SCBs */
- ha->scbs =
- pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof (ips_scb_t),
- &command_dma);
+ ha->scbs = dma_alloc_coherent(&ha->pcidev->dev,
+ ha->max_cmds * sizeof (ips_scb_t),
+ &command_dma, GFP_KERNEL);
if (ha->scbs == NULL)
return 0;
- ips_sg.list =
- pci_alloc_consistent(ha->pcidev,
- IPS_SGLIST_SIZE(ha) * IPS_MAX_SG *
- ha->max_cmds, &sg_dma);
+ ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev,
+ IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds,
+ &sg_dma, GFP_KERNEL);
if (ips_sg.list == NULL) {
- pci_free_consistent(ha->pcidev,
+ dma_free_coherent(&ha->pcidev->dev,
ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
command_dma);
return 0;
@@ -4446,8 +4446,8 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
if (scb->flags & IPS_SCB_MAP_SG)
scsi_dma_unmap(scb->scsi_cmd);
else if (scb->flags & IPS_SCB_MAP_SINGLE)
- pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
- IPS_DMA_DIR(scb));
+ dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr,
+ scb->data_len, IPS_DMA_DIR(scb));
/* check to make sure this is not our "special" scb */
if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
@@ -4559,7 +4559,8 @@ ips_flush_and_reset(ips_ha_t *ha)
dma_addr_t command_dma;
/* Create a usuable SCB */
- scb = pci_alloc_consistent(ha->pcidev, sizeof(ips_scb_t), &command_dma);
+ scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t),
+ &command_dma, GFP_KERNEL);
if (scb) {
memset(scb, 0, sizeof(ips_scb_t));
ips_init_scb(ha, scb);
@@ -4594,7 +4595,7 @@ ips_flush_and_reset(ips_ha_t *ha)
/* Now RESET and INIT the adapter */
(*ha->func.reset) (ha);
- pci_free_consistent(ha->pcidev, sizeof(ips_scb_t), scb, command_dma);
+ dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma);
return;
}
@@ -6926,29 +6927,30 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
* are guaranteed to be < 4G.
*/
if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
- !pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) {
+ !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
(ha)->flags |= IPS_HA_ENH_SG;
} else {
- if (pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING "Unable to set DMA Mask\n");
return ips_abort_init(ha, index);
}
}
if(ips_cd_boot && !ips_FlashData){
- ips_FlashData = pci_alloc_consistent(pci_dev, PAGE_SIZE << 7,
- &ips_flashbusaddr);
+ ips_FlashData = dma_alloc_coherent(&pci_dev->dev,
+ PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL);
}
- ha->enq = pci_alloc_consistent(pci_dev, sizeof (IPS_ENQ),
- &ha->enq_busaddr);
+ ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ),
+ &ha->enq_busaddr, GFP_KERNEL);
if (!ha->enq) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host inquiry structure\n");
return ips_abort_init(ha, index);
}
- ha->adapt = pci_alloc_consistent(pci_dev, sizeof (IPS_ADAPTER) +
- sizeof (IPS_IO_CMD), &dma_address);
+ ha->adapt = dma_alloc_coherent(&pci_dev->dev,
+ sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD),
+ &dma_address, GFP_KERNEL);
if (!ha->adapt) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host adapt & dummy structures\n");
@@ -6959,7 +6961,8 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
- ha->logical_drive_info = pci_alloc_consistent(pci_dev, sizeof (IPS_LD_INFO), &dma_address);
+ ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev,
+ sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL);
if (!ha->logical_drive_info) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate logical drive info structure\n");
@@ -6997,8 +7000,8 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
if (ips_ioctlsize < PAGE_SIZE)
ips_ioctlsize = PAGE_SIZE;
- ha->ioctl_data = pci_alloc_consistent(pci_dev, ips_ioctlsize,
- &ha->ioctl_busaddr);
+ ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize,
+ &ha->ioctl_busaddr, GFP_KERNEL);
ha->ioctl_len = ips_ioctlsize;
if (!ha->ioctl_data) {
IPS_PRINTK(KERN_WARNING, pci_dev,
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 1ee3868ade07..7b5deae68d33 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
* the task management request.
* @task_request: the handle to the task request object to start.
*/
-enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
- struct isci_remote_device *idev,
- struct isci_request *ireq)
+enum sci_status sci_controller_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
{
enum sci_status status;
@@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
"%s: SCIC Controller starting task from invalid "
"state\n",
__func__);
- return SCI_TASK_FAILURE_INVALID_STATE;
+ return SCI_FAILURE_INVALID_STATE;
}
status = sci_remote_device_start_task(ihost, idev, ireq);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index b3539928073c..6bc3f022630a 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -489,7 +489,7 @@ enum sci_status sci_controller_start_io(
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_task_status sci_controller_start_task(
+enum sci_status sci_controller_start_task(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index ed197bc8e801..2f151708b59a 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
if (status == SCI_SUCCESS) {
if (ireq->stp.rsp.status & ATA_ERR)
- status = SCI_IO_FAILURE_RESPONSE_VALID;
+ status = SCI_FAILURE_IO_RESPONSE_VALID;
} else {
- status = SCI_IO_FAILURE_RESPONSE_VALID;
+ status = SCI_FAILURE_IO_RESPONSE_VALID;
}
if (status != SCI_SUCCESS) {
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 6dcaed0c1fc8..fb6eba331ac6 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
struct isci_tmf *tmf, unsigned long timeout_ms)
{
DECLARE_COMPLETION_ONSTACK(completion);
- enum sci_task_status status = SCI_TASK_FAILURE;
+ enum sci_status status = SCI_FAILURE;
struct isci_request *ireq;
int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags;
@@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
/* start the TMF io. */
status = sci_controller_start_task(ihost, idev, ireq);
- if (status != SCI_TASK_SUCCESS) {
+ if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
"%s: start_io failed - status = 0x%x, request = %p\n",
__func__,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b025a0b74341..23354f206533 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -800,7 +800,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
return rc;
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
- &addr, param, buf);
+ &addr,
+ (enum iscsi_param)param, buf);
default:
return iscsi_host_get_param(shost, param, buf);
}
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 6eb5ff3e2e61..1ad28262b00a 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -38,30 +38,6 @@ static u8 jazz_esp_read8(struct esp *esp, unsigned long reg)
return *(volatile u8 *)(esp->regs + reg);
}
-static dma_addr_t jazz_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int jazz_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void jazz_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void jazz_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
static int jazz_esp_irq_pending(struct esp *esp)
{
if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
@@ -117,10 +93,6 @@ static int jazz_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops jazz_esp_ops = {
.esp_write8 = jazz_esp_write8,
.esp_read8 = jazz_esp_read8,
- .map_single = jazz_esp_map_single,
- .map_sg = jazz_esp_map_sg,
- .unmap_single = jazz_esp_unmap_single,
- .unmap_sg = jazz_esp_unmap_sg,
.irq_pending = jazz_esp_irq_pending,
.reset_dma = jazz_esp_reset_dma,
.dma_drain = jazz_esp_dma_drain,
@@ -182,7 +154,7 @@ static int esp_jazz_probe(struct platform_device *dev)
dev_set_drvdata(&dev->dev, esp);
- err = scsi_esp_register(esp, &dev->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 4fae253d4f3d..b1bd283be51c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1872,7 +1872,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_fcp_pkt *fsp;
- struct fc_rport_libfc_priv *rpriv;
int rval;
int rc = 0;
struct fc_stats *stats;
@@ -1894,8 +1893,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
goto out;
}
- rpriv = rport->dd_data;
-
if (!fc_fcp_lport_queue_ready(lport)) {
if (lport->qfull) {
if (fc_fcp_can_queue_ramp_down(lport))
@@ -2295,8 +2292,7 @@ int fc_setup_fcp(void)
void fc_destroy_fcp(void)
{
- if (scsi_pkt_cachep)
- kmem_cache_destroy(scsi_pkt_cachep);
+ kmem_cache_destroy(scsi_pkt_cachep);
}
/**
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 372387a450df..1e1c0f1b9e69 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1038,8 +1038,11 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_ls_rjt *rjt;
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
- FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
- rjt->er_reason, rjt->er_explan);
+ if (!rjt)
+ FC_RPORT_DBG(rdata, "PLOGI bad response\n");
+ else
+ FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
}
out:
@@ -1158,8 +1161,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC) {
pp = fc_frame_payload_get(fp, sizeof(*pp));
- if (!pp)
+ if (!pp) {
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
+ }
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
@@ -1172,8 +1177,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
}
- if (pp->prli.prli_spp_len < sizeof(pp->spp))
+ if (pp->prli.prli_spp_len < sizeof(pp->spp)) {
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
+ }
fcp_parm = ntohl(pp->spp.spp_params);
if (fcp_parm & FCP_SPPF_RETRY)
@@ -1211,8 +1218,11 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
} else {
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
- FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
- rjt->er_reason, rjt->er_explan);
+ if (!rjt)
+ FC_RPORT_DBG(rdata, "PRLI bad response\n");
+ else
+ FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 64a958a99f6a..4f6cdf53e913 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -654,7 +654,7 @@ void sas_probe_sata(struct asd_sas_port *port)
/* if libata could not bring the link up, don't surface
* the device
*/
- if (ata_dev_disabled(sas_to_ata_dev(dev)))
+ if (!ata_dev_enabled(sas_to_ata_dev(dev)))
sas_fail_probe(dev, __func__, -ENODEV);
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 0148ae62a52a..dde433aa59c2 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -260,7 +260,7 @@ static void sas_suspend_devices(struct work_struct *work)
* phy_list is not being mutated
*/
list_for_each_entry(phy, &port->phy_list, port_phy_el) {
- if (si->dft->lldd_port_formed)
+ if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy);
phy->suspended = 1;
port->suspended = 1;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index fadc99cb60df..0d1f72752ca2 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -48,17 +48,16 @@ static void smp_task_timedout(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ complete(&task->slow_task->completion);
+ }
spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- complete(&task->slow_task->completion);
}
static void smp_task_done(struct sas_task *task)
{
- if (!del_timer(&task->slow_task->timer))
- return;
+ del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@@ -2054,14 +2053,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
return res;
}
- /* delete the old link */
- if (SAS_ADDR(phy->attached_sas_addr) &&
- SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) {
- SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
- SAS_ADDR(dev->sas_addr), phy_id,
- SAS_ADDR(phy->attached_sas_addr));
- sas_unregister_devs_sas_addr(dev, phy_id, last);
- }
+ /* we always have to delete the old device when we went here */
+ SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
+ SAS_ADDR(dev->sas_addr), phy_id,
+ SAS_ADDR(phy->attached_sas_addr));
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
return sas_discover_new(dev, phy_id);
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 43732e8d1347..c1eb2b00ca7f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -52,7 +52,7 @@ struct lpfc_sli2_slim;
downloads using bsg */
#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
-#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
+#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
@@ -583,6 +583,25 @@ struct lpfc_mbox_ext_buf_ctx {
struct list_head ext_dmabuf_list;
};
+struct lpfc_ras_fwlog {
+ uint8_t *fwlog_buff;
+ uint32_t fw_buffcount; /* Buffer size posted to FW */
+#define LPFC_RAS_BUFF_ENTERIES 16 /* Each entry can hold max of 64k */
+#define LPFC_RAS_MAX_ENTRY_SIZE (64 * 1024)
+#define LPFC_RAS_MIN_BUFF_POST_SIZE (256 * 1024)
+#define LPFC_RAS_MAX_BUFF_POST_SIZE (1024 * 1024)
+ uint32_t fw_loglevel; /* Log level set */
+ struct lpfc_dmabuf lwpd;
+ struct list_head fwlog_buff_list;
+
+ /* RAS support status on adapter */
+ bool ras_hwsupport; /* RAS Support available on HW or not */
+ bool ras_enabled; /* Ras Enabled for the function */
+#define LPFC_RAS_DISABLE_LOGGING 0x00
+#define LPFC_RAS_ENABLE_LOGGING 0x01
+ bool ras_active; /* RAS logging running state */
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
int (*lpfc_new_scsi_buf)
@@ -790,6 +809,7 @@ struct lpfc_hba {
uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_nvme_seg_cnt;
+ uint32_t cfg_scsi_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn;
@@ -833,6 +853,9 @@ struct lpfc_hba {
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
uint32_t cfg_enable_SmartSAN;
uint32_t cfg_enable_mds_diags;
+ uint32_t cfg_ras_fwlog_level;
+ uint32_t cfg_ras_fwlog_buffsize;
+ uint32_t cfg_ras_fwlog_func;
uint32_t cfg_enable_fc4_type;
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
@@ -963,6 +986,7 @@ struct lpfc_hba {
uint32_t intr_mode;
#define LPFC_INTR_ERROR 0xFFFFFFFF
struct list_head port_list;
+ spinlock_t port_list_lock; /* lock for port_list mutations */
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
@@ -1092,6 +1116,9 @@ struct lpfc_hba {
struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
uint32_t ctx_idx;
+ /* RAS Support */
+ struct lpfc_ras_fwlog ras_fwlog;
+
uint8_t menlo_flag; /* menlo generic flags */
#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
uint32_t iocb_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 057a60abe664..dda7f450b96d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -360,12 +360,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
goto buffer_done;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ nrport = NULL;
+ spin_lock(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
- if (!rport)
- continue;
-
- /* local short-hand pointer. */
- nrport = rport->remoteport;
+ if (rport)
+ nrport = rport->remoteport;
+ spin_unlock(&vport->phba->hbalock);
if (!nrport)
continue;
@@ -3386,6 +3386,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp;
#if (IS_ENABLED(CONFIG_NVME_FC))
struct lpfc_nvme_rport *rport;
+ struct nvme_fc_remote_port *remoteport = NULL;
#endif
shost = lpfc_shost_from_vport(vport);
@@ -3396,8 +3397,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
#if (IS_ENABLED(CONFIG_NVME_FC))
+ spin_lock(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
+ remoteport = rport->remoteport;
+ spin_unlock(&vport->phba->hbalock);
+ if (remoteport)
nvme_fc_set_remoteport_devloss(rport->remoteport,
vport->cfg_devloss_tmo);
#endif
@@ -5353,15 +5358,74 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
/*
* lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
- * This value can be set to values between 64 and 4096. The default value is
- * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
- * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ * This value can be set to values between 64 and 4096. The default value
+ * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
+ * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
* Because of the additional overhead involved in setting up T10-DIF,
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4
* and will be limited to 512 if BlockGuard is enabled under SLI3.
*/
-LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
- LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
+static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
+module_param(lpfc_sg_seg_cnt, uint, 0444);
+MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
+
+/**
+ * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
+ * configured for the adapter
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains a string with the list sizes
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int len;
+
+ len = snprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
+ phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
+
+ len += snprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
+ phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
+ phba->cfg_nvme_seg_cnt);
+ return len;
+}
+
+static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
+
+/**
+ * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
+ * @phba: lpfc_hba pointer.
+ * @val: contains the initial value
+ *
+ * Description:
+ * Validates the initial value is within range and assigns it to the
+ * adapter. If not in range, an error message is posted and the
+ * default value is assigned.
+ *
+ * Returns:
+ * zero if value is in range and is set
+ * -EINVAL if value was out of range
+ **/
+static int
+lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
+ phba->cfg_sg_seg_cnt = val;
+ return 0;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
+ "be set to %d, allowed range is [%d, %d]\n",
+ val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
+ phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
+ return -EINVAL;
+}
/*
* lpfc_enable_mds_diags: Enable MDS Diagnostics
@@ -5372,6 +5436,31 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
/*
+ * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
+ * 0 = Disable firmware logging (default)
+ * [1-4] = Multiple of 1/4th Mb of host memory for FW logging
+ * Value range [0..4]. Default value is 0
+ */
+LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+
+/*
+ * lpfc_ras_fwlog_level: Firmware logging verbosity level
+ * Valid only if firmware logging is enabled
+ * 0(Least Verbosity) 4 (most verbosity)
+ * Value range is [0..4]. Default value is 0
+ */
+LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
+
+/*
+ * lpfc_ras_fwlog_func: Firmware logging enabled on function number
+ * Default function which has RAS support : 0
+ * Value Range is [0..7].
+ * FW logging is a global action and enablement is via a specific
+ * port.
+ */
+LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
+
+/*
* lpfc_enable_bbcr: Enable BB Credit Recovery
* 0 = BB Credit Recovery disabled
* 1 = BB Credit Recovery enabled (default)
@@ -5496,6 +5585,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_protocol,
&dev_attr_lpfc_xlane_supported,
&dev_attr_lpfc_enable_mds_diags,
+ &dev_attr_lpfc_ras_fwlog_buffsize,
+ &dev_attr_lpfc_ras_fwlog_level,
+ &dev_attr_lpfc_ras_fwlog_func,
&dev_attr_lpfc_enable_bbcr,
&dev_attr_lpfc_enable_dpp,
NULL,
@@ -6582,6 +6674,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_sli_mode_init(phba, lpfc_sli_mode);
phba->cfg_enable_dss = 1;
lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
+ lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
+ lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
+ lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
+
+
+ /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
+ * accommodate 512K and 1M IOs in a single nvme buf and supply
+ * enough NVME LS iocb buffers for larger connectivity counts.
+ */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ phba->cfg_iocb_cnt = 5;
+ }
+
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 90745feca808..7bd7ae86bed5 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/bsg-lib.h>
+#include <linux/vmalloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -2843,9 +2844,6 @@ diag_cmd_data_alloc(struct lpfc_hba *phba,
if (nocopydata) {
bpl->tus.f.bdeFlags = 0;
- pci_dma_sync_single_for_device(phba->pcidev,
- dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
-
} else {
memset((uint8_t *)dmp->dma.virt, 0, cnt);
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
@@ -5309,6 +5307,330 @@ job_error:
}
/**
+ * lpfc_check_fwlog_support: Check FW log support on the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Check if FW Logging support by the adapter
+ **/
+int
+lpfc_check_fwlog_support(struct lpfc_hba *phba)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = NULL;
+
+ ras_fwlog = &phba->ras_fwlog;
+
+ if (ras_fwlog->ras_hwsupport == false)
+ return -EACCES;
+ else if (ras_fwlog->ras_enabled == false)
+ return -EPERM;
+ else
+ return 0;
+}
+
+/**
+ * lpfc_bsg_get_ras_config: Get RAS configuration settings
+ * @job: fc_bsg_job to handle
+ *
+ * Get RAS configuration values set.
+ **/
+static int
+lpfc_bsg_get_ras_config(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_bsg_get_ras_config_reply *ras_reply;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_ras_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6181 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ /* Check FW log status */
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc == -EACCES || rc == -EPERM)
+ goto ras_job_error;
+
+ ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ /* Current logging state */
+ if (ras_fwlog->ras_active == true)
+ ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
+ else
+ ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
+
+ ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
+ ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
+
+ras_job_error:
+ /* make error code available to userspace */
+ bsg_reply->result = rc;
+
+ /* complete the job back to userspace */
+ bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+ return rc;
+}
+
+/**
+ * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Disable FW logging into host memory on the adapter. To
+ * be done before reading logs from the host memory.
+ **/
+static void
+lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+
+ ras_fwlog->ras_active = false;
+
+ /* Disable FW logging to host memory */
+ writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+}
+
+/**
+ * lpfc_bsg_set_ras_config: Set FW logging parameters
+ * @job: fc_bsg_job to handle
+ *
+ * Set log-level parameters for FW-logging in host memory
+ **/
+static int
+lpfc_bsg_set_ras_config(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_bsg_set_ras_config_req *ras_req;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint8_t action = 0, log_level = 0;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_set_ras_config_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6182 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ /* Check FW log status */
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc == -EACCES || rc == -EPERM)
+ goto ras_job_error;
+
+ ras_req = (struct lpfc_bsg_set_ras_config_req *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+ action = ras_req->action;
+ log_level = ras_req->log_level;
+
+ if (action == LPFC_RASACTION_STOP_LOGGING) {
+ /* Check if already disabled */
+ if (ras_fwlog->ras_active == false) {
+ rc = -ESRCH;
+ goto ras_job_error;
+ }
+
+ /* Disable logging */
+ lpfc_ras_stop_fwlog(phba);
+ } else {
+ /*action = LPFC_RASACTION_START_LOGGING*/
+ if (ras_fwlog->ras_active == true) {
+ rc = -EINPROGRESS;
+ goto ras_job_error;
+ }
+
+ /* Enable logging */
+ rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
+ LPFC_RAS_ENABLE_LOGGING);
+ if (rc)
+ rc = -EINVAL;
+ }
+ras_job_error:
+ /* make error code available to userspace */
+ bsg_reply->result = rc;
+
+ /* complete the job back to userspace */
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_get_ras_lwpd: Get log write position data
+ * @job: fc_bsg_job to handle
+ *
+ * Get Offset/Wrap count of the log message written
+ * in host memory
+ **/
+static int
+lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_bsg_get_ras_lwpd *ras_reply;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint32_t lwpd_offset = 0;
+ uint64_t wrap_value = 0;
+ int rc = 0;
+
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc == -EACCES || rc == -EPERM)
+ goto ras_job_error;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_ras_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6183 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ lwpd_offset = *((uint32_t *)ras_fwlog->lwpd.virt) & 0xffffffff;
+ ras_reply->offset = be32_to_cpu(lwpd_offset);
+
+ wrap_value = *((uint64_t *)ras_fwlog->lwpd.virt);
+ ras_reply->wrap_count = be32_to_cpu((wrap_value >> 32) & 0xffffffff);
+
+ras_job_error:
+ /* make error code available to userspace */
+ bsg_reply->result = rc;
+
+ /* complete the job back to userspace */
+ bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_get_ras_fwlog: Read FW log
+ * @job: fc_bsg_job to handle
+ *
+ * Copy the FW log into the passed buffer.
+ **/
+static int
+lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct lpfc_bsg_get_fwlog_req *ras_req;
+ uint32_t rd_offset, rd_index, offset, pending_wlen;
+ uint32_t boundary = 0, align_len = 0, write_len = 0;
+ void *dest, *src, *fwlog_buff;
+ struct lpfc_ras_fwlog *ras_fwlog = NULL;
+ struct lpfc_dmabuf *dmabuf, *next;
+ int rc = 0;
+
+ ras_fwlog = &phba->ras_fwlog;
+
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc == -EACCES || rc == -EPERM)
+ goto ras_job_error;
+
+ /* Logging to be stopped before reading */
+ if (ras_fwlog->ras_active == true) {
+ rc = -EINPROGRESS;
+ goto ras_job_error;
+ }
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_get_fwlog_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6184 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ ras_req = (struct lpfc_bsg_get_fwlog_req *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+ rd_offset = ras_req->read_offset;
+
+ /* Allocate memory to read fw log*/
+ fwlog_buff = vmalloc(ras_req->read_size);
+ if (!fwlog_buff) {
+ rc = -ENOMEM;
+ goto ras_job_error;
+ }
+
+ rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
+ offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
+ pending_wlen = ras_req->read_size;
+ dest = fwlog_buff;
+
+ list_for_each_entry_safe(dmabuf, next,
+ &ras_fwlog->fwlog_buff_list, list) {
+
+ if (dmabuf->buffer_tag < rd_index)
+ continue;
+
+ /* Align read to buffer size */
+ if (offset) {
+ boundary = ((dmabuf->buffer_tag + 1) *
+ LPFC_RAS_MAX_ENTRY_SIZE);
+
+ align_len = (boundary - offset);
+ write_len = min_t(u32, align_len,
+ LPFC_RAS_MAX_ENTRY_SIZE);
+ } else {
+ write_len = min_t(u32, pending_wlen,
+ LPFC_RAS_MAX_ENTRY_SIZE);
+ align_len = 0;
+ boundary = 0;
+ }
+ src = dmabuf->virt + offset;
+ memcpy(dest, src, write_len);
+
+ pending_wlen -= write_len;
+ if (!pending_wlen)
+ break;
+
+ dest += write_len;
+ offset = (offset + write_len) % LPFC_RAS_MAX_ENTRY_SIZE;
+ }
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ fwlog_buff, ras_req->read_size);
+
+ vfree(fwlog_buff);
+
+ras_job_error:
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+ return rc;
+}
+
+
+/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
**/
@@ -5355,6 +5677,18 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
rc = lpfc_forced_link_speed(job);
break;
+ case LPFC_BSG_VENDOR_RAS_GET_LWPD:
+ rc = lpfc_bsg_get_ras_lwpd(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
+ rc = lpfc_bsg_get_ras_fwlog(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
+ rc = lpfc_bsg_get_ras_config(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
+ rc = lpfc_bsg_set_ras_config(job);
+ break;
default:
rc = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
@@ -5368,7 +5702,7 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
/**
* lpfc_bsg_request - handle a bsg request from the FC transport
- * @job: fc_bsg_job to handle
+ * @job: bsg_job to handle
**/
int
lpfc_bsg_request(struct bsg_job *job)
@@ -5402,7 +5736,7 @@ lpfc_bsg_request(struct bsg_job *job)
/**
* lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
- * @job: fc_bsg_job that has timed out
+ * @job: bsg_job that has timed out
*
* This function just aborts the job's IOCB. The aborted IOCB will return to
* the waiting function which will handle passing the error back to userspace
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 32347c87e3b4..820323f1139b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -38,6 +38,10 @@
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
+#define LPFC_BSG_VENDOR_RAS_GET_LWPD 16
+#define LPFC_BSG_VENDOR_RAS_GET_FWLOG 17
+#define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18
+#define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19
struct set_ct_event {
uint32_t command;
@@ -296,6 +300,38 @@ struct forced_link_speed_support_reply {
uint8_t supported;
};
+struct lpfc_bsg_ras_req {
+ uint32_t command;
+};
+
+struct lpfc_bsg_get_fwlog_req {
+ uint32_t command;
+ uint32_t read_size;
+ uint32_t read_offset;
+};
+
+struct lpfc_bsg_get_ras_lwpd {
+ uint32_t offset;
+ uint32_t wrap_count;
+};
+
+struct lpfc_bsg_set_ras_config_req {
+ uint32_t command;
+ uint8_t action;
+#define LPFC_RASACTION_STOP_LOGGING 0x00
+#define LPFC_RASACTION_START_LOGGING 0x01
+ uint8_t log_level;
+};
+
+struct lpfc_bsg_get_ras_config_reply {
+ uint8_t state;
+#define LPFC_RASLOG_STATE_STOPPED 0x00
+#define LPFC_RASLOG_STATE_RUNNING 0x01
+ uint8_t log_level;
+ uint32_t log_buff_sz;
+};
+
+
/* driver only */
#define SLI_CONFIG_NOT_HANDLED 0
#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bea24bc4410a..e01136507780 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -545,6 +545,13 @@ bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
+/* RAS Interface */
+void lpfc_sli4_ras_init(struct lpfc_hba *phba);
+void lpfc_sli4_ras_setup(struct lpfc_hba *phba);
+int lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t fwlog_level,
+ uint32_t fwlog_enable);
+int lpfc_check_fwlog_support(struct lpfc_hba *phba);
+
/* NVME interfaces. */
void lpfc_nvme_unregister_port(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 1cbdc892ff95..789ad1502534 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -445,14 +445,14 @@ lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
struct lpfc_vport *vport_curr;
unsigned long flags;
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock_irqsave(&phba->port_list_lock, flags);
list_for_each_entry(vport_curr, &phba->port_list, listentry) {
if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return vport_curr;
}
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return NULL;
}
@@ -471,11 +471,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
- /* Don't assume the rport is always the previous
- * FC4 type.
- */
- ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
-
/* By default, the driver expects to support FCP FC4 */
if (fc4_type == FC_TYPE_FCP)
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 9df0c051349f..0c8005bb0f53 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -550,8 +550,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_nodelist *ndlp;
unsigned char *statep;
struct nvme_fc_local_port *localport;
- struct lpfc_nvmet_tgtport *tgtp;
- struct nvme_fc_remote_port *nrport;
+ struct nvme_fc_remote_port *nrport = NULL;
struct lpfc_nvme_rport *rport;
cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
@@ -654,7 +653,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
"\nOutstanding IO x%x\n", outio);
if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
len += snprintf(buf + len, size - len,
"\nNVME Targetport Entry ...\n");
@@ -696,11 +694,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf + len, size - len, "\tRport List:\n");
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
+ spin_lock(&phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
- if (!rport)
- continue;
-
- nrport = rport->remoteport;
+ if (rport)
+ nrport = rport->remoteport;
+ spin_unlock(&phba->hbalock);
if (!nrport)
continue;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4dda969e947c..f1c1faa74b46 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7673,8 +7673,11 @@ void
lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
+
+ spin_lock_irq(&phba->port_list_lock);
list_for_each_entry(vport, &phba->port_list, listentry)
lpfc_els_flush_cmd(vport);
+ spin_unlock_irq(&phba->port_list_lock);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index eb71877f12f8..f4deb862efc6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4193,7 +4193,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (new_state == NLP_STE_MAPPED_NODE ||
new_state == NLP_STE_UNMAPPED_NODE) {
- if (ndlp->nlp_fc4_type & NLP_FC4_FCP ||
+ if (ndlp->nlp_fc4_type ||
ndlp->nlp_DID == Fabric_DID ||
ndlp->nlp_DID == NameServer_DID ||
ndlp->nlp_DID == FDMI_DID) {
@@ -5428,12 +5428,10 @@ static void
lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
- struct lpfc_sli *psli;
IOCB_t *icmd;
struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_sli_ring *pring;
- psli = &phba->sli;
pring = lpfc_phba_elsring(phba);
if (unlikely(!pring))
return;
@@ -5938,14 +5936,14 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
}
}
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock_irqsave(&phba->port_list_lock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport->vpi == i) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return vport;
}
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 083f8c8706e5..bbd0a57e953f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -186,6 +186,7 @@ struct lpfc_sli_intf {
#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
+#define LPFC_CTL_PDEV_CTL_DDL_RAS 0x1000000
#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
@@ -964,6 +965,7 @@ struct mbox_header {
/* Subsystem Definitions */
#define LPFC_MBOX_SUBSYSTEM_NA 0x0
#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
+#define LPFC_MBOX_SUBSYSTEM_LOWLEVEL 0xB
#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
/* Device Specific Definitions */
@@ -1030,6 +1032,9 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
+/* Low level Opcodes */
+#define LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION 0x37
+
/* Mailbox command structures */
struct eq_context {
uint32_t word0;
@@ -1162,6 +1167,45 @@ struct lpfc_mbx_nop {
uint32_t context[2];
};
+
+
+struct lpfc_mbx_set_ras_fwlog {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_fwlog_enable_SHIFT 0
+#define lpfc_fwlog_enable_MASK 0x00000001
+#define lpfc_fwlog_enable_WORD word4
+#define lpfc_fwlog_loglvl_SHIFT 8
+#define lpfc_fwlog_loglvl_MASK 0x0000000F
+#define lpfc_fwlog_loglvl_WORD word4
+#define lpfc_fwlog_ra_SHIFT 15
+#define lpfc_fwlog_ra_WORD 0x00000008
+#define lpfc_fwlog_buffcnt_SHIFT 16
+#define lpfc_fwlog_buffcnt_MASK 0x000000FF
+#define lpfc_fwlog_buffcnt_WORD word4
+#define lpfc_fwlog_buffsz_SHIFT 24
+#define lpfc_fwlog_buffsz_MASK 0x000000FF
+#define lpfc_fwlog_buffsz_WORD word4
+ uint32_t word5;
+#define lpfc_fwlog_acqe_SHIFT 0
+#define lpfc_fwlog_acqe_MASK 0x0000FFFF
+#define lpfc_fwlog_acqe_WORD word5
+#define lpfc_fwlog_cqid_SHIFT 16
+#define lpfc_fwlog_cqid_MASK 0x0000FFFF
+#define lpfc_fwlog_cqid_WORD word5
+#define LPFC_MAX_FWLOG_PAGE 16
+ struct dma_address lwpd;
+ struct dma_address buff_fwlog[LPFC_MAX_FWLOG_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+
struct cq_context {
uint32_t word0;
#define lpfc_cq_context_event_SHIFT 31
@@ -3868,6 +3912,7 @@ struct lpfc_mqe {
struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
struct lpfc_mbx_set_host_data set_host_data;
struct lpfc_mbx_nop nop;
+ struct lpfc_mbx_set_ras_fwlog ras_fwlog;
} un;
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index f3cae733ae2d..20fa6785a0e2 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3956,7 +3956,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
if (phba->sli_rev == LPFC_SLI_REV4) {
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
- shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+ shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
}
/*
@@ -3988,9 +3988,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
if (error)
goto out_put_shost;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_add_tail(&vport->listentry, &phba->port_list);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
return vport;
out_put_shost:
@@ -4016,9 +4016,9 @@ destroy_port(struct lpfc_vport *vport)
fc_remove_host(shost);
scsi_remove_host(shost);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
lpfc_cleanup(vport);
return;
@@ -5621,7 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Initialize ndlp management spinlock */
spin_lock_init(&phba->ndlp_lock);
+ /* Initialize port_list spinlock */
+ spin_lock_init(&phba->port_list_lock);
INIT_LIST_HEAD(&phba->port_list);
+
INIT_LIST_HEAD(&phba->work_list);
init_waitqueue_head(&phba->wait_4_mlo_m_q);
@@ -5919,8 +5922,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/
max_buf_size = (2 * SLI4_PAGE_SIZE);
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
- phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5942,9 +5943,16 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
- phba->cfg_sg_seg_cnt =
- LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
+ /*
+ * If supporting DIF, reduce the seg count for scsi to
+ * allow room for the DIF sges.
+ */
+ if (phba->cfg_enable_bg &&
+ phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
+ phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
+ else
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
+
} else {
/*
* The scsi_buf for a regular I/O holds the FCP cmnd,
@@ -5958,6 +5966,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Total SGEs for scsi_sg_list */
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
/*
* NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
@@ -5965,10 +5974,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
}
+ /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+ "6300 Reducing NVME sg segment "
+ "cnt to %d\n",
+ LPFC_MAX_NVME_SEG_CNT);
+ phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ } else
+ phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+ }
+
/* Initialize the host templates with the updated values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+ lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
@@ -5977,9 +5998,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
- "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
+ "9087 sg_seg_cnt:%d dmabuf_size:%d "
+ "total:%d scsi:%d nvme:%d\n",
phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
- phba->cfg_total_seg_cnt);
+ phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
+ phba->cfg_nvme_seg_cnt);
/* Initialize buffer queue management fields */
INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
@@ -6205,6 +6228,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (phba->cfg_fof)
fof_vectors = 1;
+ /* Verify RAS support on adapter */
+ lpfc_sli4_ras_init(phba);
+
/* Verify all the SLI4 queues */
rc = lpfc_sli4_queue_verify(phba);
if (rc)
@@ -7967,7 +7993,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
else
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3028 GET_FUNCTION_CONFIG: failed to find "
- "Resrouce Descriptor:x%x\n",
+ "Resource Descriptor:x%x\n",
LPFC_RSRC_DESC_TYPE_FCFCOE);
read_cfg_out:
@@ -10492,6 +10518,14 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Stop kthread signal shall trigger work_done one more time */
kthread_stop(phba->worker_thread);
+ /* Disable FW logging to host memory */
+ writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* Free RAS DMA memory */
+ if (phba->ras_fwlog.ras_enabled == true)
+ lpfc_sli4_ras_dma_free(phba);
+
/* Unset the queues shared with the hardware then release all
* allocated resources.
*/
@@ -10737,6 +10771,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->mds_diags_support = 1;
else
phba->mds_diags_support = 0;
+
return 0;
}
@@ -10965,9 +11000,9 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
kfree(phba->vpi_ids);
lpfc_stop_hba_timers(phba);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
lpfc_debugfs_terminate(vport);
@@ -11329,10 +11364,6 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
/* Bring device online, it will be no-op for non-fatal error resume */
lpfc_online(phba);
-
- /* Clean up Advanced Error Reporting (AER) if needed */
- if (phba->hba_flag & HBA_AER_ENABLED)
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
/**
@@ -11698,6 +11729,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
+
+ /* Enable RAS FW log support */
+ lpfc_sli4_ras_setup(phba);
+
return 0;
out_disable_intr:
@@ -11777,9 +11812,9 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
lpfc_sli4_hba_unset(phba);
lpfc_stop_hba_timers(phba);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
/* Perform scsi free before driver resource_unset since scsi
* buffers are released to their corresponding pools here.
@@ -12144,10 +12179,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
/* Bring the device back online */
lpfc_online(phba);
}
-
- /* Clean up Advanced Error Reporting (AER) if needed */
- if (phba->hba_flag & HBA_AER_ENABLED)
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
/**
@@ -12428,6 +12459,30 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine checks to see if RAS is supported by the adapter. Check the
+ * function through which RAS support enablement is to be done.
+ **/
+void
+lpfc_sli4_ras_init(struct lpfc_hba *phba)
+{
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_LANCER_G6_FC:
+ case PCI_DEVICE_ID_LANCER_G7_FC:
+ phba->ras_fwlog.ras_hwsupport = true;
+ if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn))
+ phba->ras_fwlog.ras_enabled = true;
+ else
+ phba->ras_fwlog.ras_enabled = false;
+ break;
+ default:
+ phba->ras_fwlog.ras_hwsupport = false;
+ }
+}
+
+/**
* lpfc_fof_queue_setup - Set up all the fof queues
* @phba: pointer to lpfc hba data structure.
*
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bd9bce9d9974..269808e8480f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -2318,6 +2318,7 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp);
@@ -2395,6 +2396,7 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
@@ -2652,6 +2654,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 028462e5994d..ba831def9301 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -282,7 +282,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n",
+ "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
lport, qidx, handle);
kfree(handle);
}
@@ -2235,13 +2235,11 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
struct sli4_sge *sgl;
dma_addr_t pdma_phys_sgl;
uint16_t iotag, lxri = 0;
- int bcnt, num_posted, sgl_size;
+ int bcnt, num_posted;
LIST_HEAD(prep_nblist);
LIST_HEAD(post_nblist);
LIST_HEAD(nvme_nblist);
- sgl_size = phba->cfg_sg_dma_buf_size;
-
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
if (!lpfc_ncmd)
@@ -2462,17 +2460,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
- /* Limit to LPFC_MAX_NVME_SEG_CNT.
- * For now need + 1 to get around NVME transport logic.
+ /* We need to tell the transport layer + 1 because it takes page
+ * alignment into account. When space for the SGL is allocated we
+ * allocate + 3, one for cmd, one for rsp and one for this alignment
*/
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
- "6300 Reducing sg segment cnt to %d\n",
- LPFC_MAX_NVME_SEG_CNT);
- phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- } else {
- phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
- }
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
@@ -2725,7 +2716,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ spin_lock_irq(&vport->phba->hbalock);
oldrport = lpfc_ndlp_get_nrport(ndlp);
+ spin_unlock_irq(&vport->phba->hbalock);
if (!oldrport)
lpfc_nlp_get(ndlp);
@@ -2840,7 +2833,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport;
- struct nvme_fc_remote_port *remoteport;
+ struct nvme_fc_remote_port *remoteport = NULL;
localport = vport->localport;
@@ -2854,11 +2847,14 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!lport)
goto input_err;
+ spin_lock_irq(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
- if (!rport)
+ if (rport)
+ remoteport = rport->remoteport;
+ spin_unlock_irq(&vport->phba->hbalock);
+ if (!remoteport)
goto input_err;
- remoteport = rport->remoteport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6033 Unreg nvme remoteport %p, portname x%llx, "
"port_id x%06x, portstate x%x port type x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index b766afe10d3d..6245f442d784 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1339,15 +1339,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
idx = 0;
}
- infop = phba->sli4_hba.nvmet_ctx_info;
- for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+ infop = lpfc_get_ctx_list(phba, i, j);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
"6408 TOTAL NVMET ctx for CPU %d "
"MRQ %d: cnt %d nextcpu %p\n",
i, j, infop->nvmet_ctx_list_cnt,
infop->nvmet_ctx_next_cpu);
- infop++;
}
}
return 0;
@@ -1373,17 +1372,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
pinfo.port_id = vport->fc_myDID;
- /* Limit to LPFC_MAX_NVME_SEG_CNT.
- * For now need + 1 to get around NVME transport logic.
+ /* We need to tell the transport layer + 1 because it takes page
+ * alignment into account. When space for the SGL is allocated we
+ * allocate + 3, one for cmd, one for rsp and one for this alignment
*/
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
- "6400 Reducing sg segment cnt to %d\n",
- LPFC_MAX_NVME_SEG_CNT);
- phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- } else {
- phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
- }
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 5c7858e735c9..4fa6703a9ec9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -202,8 +202,8 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
static void
lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
- struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
- struct lpfc_nodelist *pnode = rdata->pnode;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long flags;
struct Scsi_Host *shost = cmd->device->host;
@@ -211,17 +211,19 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
unsigned long latency;
int i;
- if (cmd->result)
+ if (!vport->stat_data_enabled ||
+ vport->stat_data_blocked ||
+ (cmd->result))
return;
latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
+ rdata = lpfc_cmd->rdata;
+ pnode = rdata->pnode;
spin_lock_irqsave(shost->host_lock, flags);
- if (!vport->stat_data_enabled ||
- vport->stat_data_blocked ||
- !pnode ||
- !pnode->lat_data ||
- (phba->bucket_type == LPFC_NO_BUCKET)) {
+ if (!pnode ||
+ !pnode->lat_data ||
+ (phba->bucket_type == LPFC_NO_BUCKET)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
@@ -1050,7 +1052,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
if (!found)
return NULL;
- if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
atomic_inc(&ndlp->cmd_pending);
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
}
@@ -4158,9 +4160,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
- spin_lock_irqsave(&phba->hbalock, flags);
- lpfc_cmd->pCmd = NULL;
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* If pCmd was set to NULL from abort path, do not call scsi_done */
+ if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0711 FCP cmd already NULL, sid: 0x%06x, "
+ "did: 0x%06x, oxid: 0x%04x\n",
+ vport->fc_myDID,
+ (pnode) ? pnode->nlp_DID : 0,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
+ return;
+ }
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 9830bdb6e072..783a1540cfbe 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -392,11 +392,7 @@ lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
struct lpfc_register doorbell;
doorbell.word0 = 0;
- bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
- bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
- bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
- (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
- bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
+ bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
}
@@ -3797,6 +3793,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
struct hbq_dmabuf *dmabuf;
struct lpfc_cq_event *cq_event;
unsigned long iflag;
+ int count = 0;
spin_lock_irqsave(&phba->hbalock, iflag);
phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
@@ -3818,16 +3815,22 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
if (irspiocbq)
lpfc_sli_sp_handle_rspiocb(phba, pring,
irspiocbq);
+ count++;
break;
case CQE_CODE_RECEIVE:
case CQE_CODE_RECEIVE_V1:
dmabuf = container_of(cq_event, struct hbq_dmabuf,
cq_event);
lpfc_sli4_handle_received_buffer(phba, dmabuf);
+ count++;
break;
default:
break;
}
+
+ /* Limit the number of events to 64 to avoid soft lockups */
+ if (count == 64)
+ break;
}
}
@@ -6146,6 +6149,271 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
}
/**
+ * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called to free memory allocated for RAS FW logging
+ * support in the driver.
+ **/
+void
+lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct lpfc_dmabuf *dmabuf, *next;
+
+ if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
+ list_for_each_entry_safe(dmabuf, next,
+ &ras_fwlog->fwlog_buff_list,
+ list) {
+ list_del(&dmabuf->list);
+ dma_free_coherent(&phba->pcidev->dev,
+ LPFC_RAS_MAX_ENTRY_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+ }
+
+ if (ras_fwlog->lwpd.virt) {
+ dma_free_coherent(&phba->pcidev->dev,
+ sizeof(uint32_t) * 2,
+ ras_fwlog->lwpd.virt,
+ ras_fwlog->lwpd.phys);
+ ras_fwlog->lwpd.virt = NULL;
+ }
+
+ ras_fwlog->ras_active = false;
+}
+
+/**
+ * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
+ * @phba: Pointer to HBA context object.
+ * @fwlog_buff_count: Count of buffers to be created.
+ *
+ * This routine DMA memory for Log Write Position Data[LPWD] and buffer
+ * to update FW log is posted to the adapter.
+ * Buffer count is calculated based on module param ras_fwlog_buffsize
+ * Size of each buffer posted to FW is 64K.
+ **/
+
+static int
+lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
+ uint32_t fwlog_buff_count)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct lpfc_dmabuf *dmabuf;
+ int rc = 0, i = 0;
+
+ /* Initialize List */
+ INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
+
+ /* Allocate memory for the LWPD */
+ ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
+ sizeof(uint32_t) * 2,
+ &ras_fwlog->lwpd.phys,
+ GFP_KERNEL);
+ if (!ras_fwlog->lwpd.virt) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6185 LWPD Memory Alloc Failed\n");
+
+ return -ENOMEM;
+ }
+
+ ras_fwlog->fw_buffcount = fwlog_buff_count;
+ for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+ GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6186 Memory Alloc failed FW logging");
+ goto free_mem;
+ }
+
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ LPFC_RAS_MAX_ENTRY_SIZE,
+ &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ rc = -ENOMEM;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6187 DMA Alloc Failed FW logging");
+ goto free_mem;
+ }
+ memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
+ dmabuf->buffer_tag = i;
+ list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
+ }
+
+free_mem:
+ if (rc)
+ lpfc_sli4_ras_dma_free(phba);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * Completion handler for driver's RAS MBX command to the device.
+ **/
+static void
+lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+
+ mb = &pmb->u.mb;
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+ if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "6188 FW LOG mailbox "
+ "completed with status x%x add_status x%x,"
+ " mbx status x%x\n",
+ shdr_status, shdr_add_status, mb->mbxStatus);
+ goto disable_ras;
+ }
+
+ ras_fwlog->ras_active = true;
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ return;
+
+disable_ras:
+ /* Free RAS DMA memory */
+ lpfc_sli4_ras_dma_free(phba);
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
+ * @phba: pointer to lpfc hba data structure.
+ * @fwlog_level: Logging verbosity level.
+ * @fwlog_enable: Enable/Disable logging.
+ *
+ * Initialize memory and post mailbox command to enable FW logging in host
+ * memory.
+ **/
+int
+lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
+ uint32_t fwlog_level,
+ uint32_t fwlog_enable)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
+ int rc = 0;
+
+ fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
+ phba->cfg_ras_fwlog_buffsize);
+ fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
+
+ /*
+ * If re-enabling FW logging support use earlier allocated
+ * DMA buffers while posting MBX command.
+ **/
+ if (!ras_fwlog->lwpd.virt) {
+ rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6189 RAS FW Log Support Not Enabled");
+ return rc;
+ }
+ }
+
+ /* Setup Mailbox command */
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6190 RAS MBX Alloc Failed");
+ rc = -ENOMEM;
+ goto mem_free;
+ }
+
+ ras_fwlog->fw_loglevel = fwlog_level;
+ len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
+ LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
+ len, LPFC_SLI4_MBX_EMBED);
+
+ mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
+ bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
+ fwlog_enable);
+ bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
+ ras_fwlog->fw_loglevel);
+ bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
+ ras_fwlog->fw_buffcount);
+ bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
+ LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
+
+ /* Update DMA buffer address */
+ list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
+ memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
+
+ mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+
+ mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+
+ /* Update LPWD address */
+ mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
+ mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
+
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6191 RAS Mailbox failed. "
+ "status %d mbxStatus : x%x", rc,
+ bf_get(lpfc_mqe_status, &mbox->u.mqe));
+ mempool_free(mbox, phba->mbox_mem_pool);
+ rc = -EIO;
+ goto mem_free;
+ } else
+ rc = 0;
+mem_free:
+ if (rc)
+ lpfc_sli4_ras_dma_free(phba);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Check if RAS is supported on the adapter and initialize it.
+ **/
+void
+lpfc_sli4_ras_setup(struct lpfc_hba *phba)
+{
+ /* Check RAS FW Log needs to be enabled or not */
+ if (lpfc_check_fwlog_support(phba))
+ return;
+
+ lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
+ LPFC_RAS_ENABLE_LOGGING);
+}
+
+/**
* lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
* @phba: Pointer to HBA context object.
*
@@ -10266,8 +10534,12 @@ lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
LPFC_MBOXQ_t *pmb;
unsigned long iflag;
+ /* Disable softirqs, including timers from obtaining phba->hbalock */
+ local_bh_disable();
+
/* Flush all the mailbox commands in the mbox system */
spin_lock_irqsave(&phba->hbalock, iflag);
+
/* The pending mailbox command queue */
list_splice_init(&phba->sli.mboxq, &completions);
/* The outstanding active mailbox command */
@@ -10280,6 +10552,9 @@ lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
list_splice_init(&phba->sli.mboxq_cmpl, &completions);
spin_unlock_irqrestore(&phba->hbalock, iflag);
+ /* Enable softirqs again, done with phba->hbalock */
+ local_bh_enable();
+
/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
while (!list_empty(&completions)) {
list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
@@ -10419,6 +10694,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
lpfc_hba_down_prep(phba);
+ /* Disable softirqs, including timers from obtaining phba->hbalock */
+ local_bh_disable();
+
lpfc_fabric_abort_hba(phba);
spin_lock_irqsave(&phba->hbalock, flags);
@@ -10472,6 +10750,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
kfree(buf_ptr);
}
+ /* Enable softirqs again, done with phba->hbalock */
+ local_bh_enable();
+
/* Return any active mbox cmds */
del_timer_sync(&psli->mbox_tmo);
@@ -11775,6 +12056,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
}
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+ /* Disable softirqs, including timers from obtaining phba->hbalock */
+ local_bh_disable();
+
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
@@ -11788,6 +12072,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
1000) + jiffies;
spin_unlock_irq(&phba->hbalock);
+ /* Enable softirqs again, done with phba->hbalock */
+ local_bh_enable();
+
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
msleep(2);
@@ -11797,9 +12084,13 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
*/
break;
}
- } else
+ } else {
spin_unlock_irq(&phba->hbalock);
+ /* Enable softirqs again, done with phba->hbalock */
+ local_bh_enable();
+ }
+
lpfc_sli_mbox_sys_flush(phba);
}
@@ -13136,7 +13427,6 @@ static bool
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
{
bool workposted = false;
- struct fc_frame_header *fc_hdr;
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
struct lpfc_nvmet_tgtport *tgtp;
@@ -13173,9 +13463,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
hrq->RQ_buf_posted--;
memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
- /* If a NVME LS event (type 0x28), treat it as Fast path */
- fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
-
/* save off the frame for the word thread to process */
list_add_tail(&dma_buf->cq_event.list,
&phba->sli4_hba.sp_queue_event);
@@ -14558,13 +14845,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
- uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
/* sanity check on queue memory */
if (!cq || !eq)
return -ENODEV;
- if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = cq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 399c0015c546..e76c380e1a84 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -886,3 +886,4 @@ int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
int lpfc_sli4_post_status_check(struct lpfc_hba *);
uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 501249509af4..5a0d512ff497 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.0.0.6"
+#define LPFC_DRIVER_VERSION "12.0.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 1ff0f7de9105..c340e0e47473 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -207,7 +207,7 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
struct lpfc_vport *vport;
unsigned long flags;
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock_irqsave(&phba->port_list_lock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport == new_vport)
continue;
@@ -215,11 +215,11 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
if (memcmp(&vport->fc_sparam.portName,
&new_vport->fc_sparam.portName,
sizeof(struct lpfc_name)) == 0) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return 0;
}
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return 1;
}
@@ -825,9 +825,9 @@ skip_logo:
lpfc_free_vpi(phba, vport->vpi);
vport->work_port_events = 0;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1828 Vport Deleted.\n");
scsi_host_put(shost);
@@ -844,7 +844,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
GFP_KERNEL);
if (vports == NULL)
return NULL;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
if (port_iterator->load_flag & FC_UNLOADING)
continue;
@@ -856,7 +856,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
}
vports[index++] = port_iterator;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
return vports;
}
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index eb551f3cc471..764d320bb2ca 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -52,14 +52,12 @@ struct mac_esp_priv {
struct esp *esp;
void __iomem *pdma_regs;
void __iomem *pdma_io;
- int error;
};
static struct esp *esp_chips[2];
static DEFINE_SPINLOCK(esp_chips_lock);
#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
- platform_get_drvdata((struct platform_device *) \
- (esp->dev)))
+ dev_get_drvdata((esp)->dev))
static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
{
@@ -71,38 +69,6 @@ static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
return nubus_readb(esp->regs + reg * 16);
}
-/* For pseudo DMA and PIO we need the virtual address
- * so this address mapping is the identity mapping.
- */
-
-static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return (dma_addr_t)buf;
-}
-
-static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- int i;
-
- for (i = 0; i < num_sg; i++)
- sg[i].dma_address = (u32)sg_virt(&sg[i]);
- return num_sg;
-}
-
-static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- /* Nothing to do. */
-}
-
-static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- /* Nothing to do. */
-}
-
static void mac_esp_reset_dma(struct esp *esp)
{
/* Nothing to do. */
@@ -120,12 +86,11 @@ static void mac_esp_dma_invalidate(struct esp *esp)
static int mac_esp_dma_error(struct esp *esp)
{
- return MAC_ESP_GET_PRIV(esp)->error;
+ return esp->send_cmd_error;
}
static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
{
- struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
int i = 500000;
do {
@@ -140,7 +105,7 @@ static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
esp_read8(ESP_STATUS));
- mep->error = 1;
+ esp->send_cmd_error = 1;
return 1;
}
@@ -166,7 +131,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
esp_read8(ESP_STATUS));
- mep->error = 1;
+ esp->send_cmd_error = 1;
return 1;
}
@@ -233,7 +198,7 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
{
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
- mep->error = 0;
+ esp->send_cmd_error = 0;
if (!write)
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
@@ -271,164 +236,6 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
} while (esp_count);
}
-/*
- * Programmed IO routines follow.
- */
-
-static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
-{
- int i = 500000;
-
- do {
- unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
-
- if (fbytes)
- return fbytes;
-
- udelay(2);
- } while (--i);
-
- printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
- esp_read8(ESP_STATUS));
- return 0;
-}
-
-static inline int mac_esp_wait_for_intr(struct esp *esp)
-{
- struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
- int i = 500000;
-
- do {
- esp->sreg = esp_read8(ESP_STATUS);
- if (esp->sreg & ESP_STAT_INTR)
- return 0;
-
- udelay(2);
- } while (--i);
-
- printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
- mep->error = 1;
- return 1;
-}
-
-#define MAC_ESP_PIO_LOOP(operands, reg1) \
- asm volatile ( \
- "1: moveb " operands " \n" \
- " subqw #1,%1 \n" \
- " jbne 1b \n" \
- : "+a" (addr), "+r" (reg1) \
- : "a" (fifo))
-
-#define MAC_ESP_PIO_FILL(operands, reg1) \
- asm volatile ( \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " subqw #8,%1 \n" \
- " subqw #8,%1 \n" \
- : "+a" (addr), "+r" (reg1) \
- : "a" (fifo))
-
-#define MAC_ESP_FIFO_SIZE 16
-
-static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
- u32 dma_count, int write, u8 cmd)
-{
- struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
- u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
- u8 phase = esp->sreg & ESP_STAT_PMASK;
-
- cmd &= ~ESP_CMD_DMA;
- mep->error = 0;
-
- if (write) {
- u8 *dst = (u8 *)addr;
- u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
-
- scsi_esp_cmd(esp, cmd);
-
- while (1) {
- if (!mac_esp_wait_for_fifo(esp))
- break;
-
- *dst++ = esp_read8(ESP_FDATA);
- --esp_count;
-
- if (!esp_count)
- break;
-
- if (mac_esp_wait_for_intr(esp))
- break;
-
- if ((esp->sreg & ESP_STAT_PMASK) != phase)
- break;
-
- esp->ireg = esp_read8(ESP_INTRPT);
- if (esp->ireg & mask) {
- mep->error = 1;
- break;
- }
-
- if (phase == ESP_MIP)
- scsi_esp_cmd(esp, ESP_CMD_MOK);
-
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- } else {
- scsi_esp_cmd(esp, ESP_CMD_FLUSH);
-
- if (esp_count >= MAC_ESP_FIFO_SIZE)
- MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
- else
- MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
-
- scsi_esp_cmd(esp, cmd);
-
- while (esp_count) {
- unsigned int n;
-
- if (mac_esp_wait_for_intr(esp))
- break;
-
- if ((esp->sreg & ESP_STAT_PMASK) != phase)
- break;
-
- esp->ireg = esp_read8(ESP_INTRPT);
- if (esp->ireg & ~ESP_INTR_BSERV) {
- mep->error = 1;
- break;
- }
-
- n = MAC_ESP_FIFO_SIZE -
- (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
- if (n > esp_count)
- n = esp_count;
-
- if (n == MAC_ESP_FIFO_SIZE) {
- MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
- } else {
- esp_count -= n;
- MAC_ESP_PIO_LOOP("%0@+,%2@", n);
- }
-
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- }
-}
-
static int mac_esp_irq_pending(struct esp *esp)
{
if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
@@ -470,10 +277,6 @@ static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
static struct esp_driver_ops mac_esp_ops = {
.esp_write8 = mac_esp_write8,
.esp_read8 = mac_esp_read8,
- .map_single = mac_esp_map_single,
- .map_sg = mac_esp_map_sg,
- .unmap_single = mac_esp_unmap_single,
- .unmap_sg = mac_esp_unmap_sg,
.irq_pending = mac_esp_irq_pending,
.dma_length_limit = mac_esp_dma_length_limit,
.reset_dma = mac_esp_reset_dma,
@@ -508,7 +311,7 @@ static int esp_mac_probe(struct platform_device *dev)
esp = shost_priv(host);
esp->host = host;
- esp->dev = dev;
+ esp->dev = &dev->dev;
esp->command_block = kzalloc(16, GFP_KERNEL);
if (!esp->command_block)
@@ -551,14 +354,16 @@ static int esp_mac_probe(struct platform_device *dev)
mep->pdma_regs = NULL;
break;
}
+ esp->fifo_reg = esp->regs + ESP_FDATA * 16;
esp->ops = &mac_esp_ops;
+ esp->flags = ESP_FLAG_NO_DMA_MAP;
if (mep->pdma_io == NULL) {
printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
esp_write8(0, ESP_TCLOW);
esp_write8(0, ESP_TCMED);
- esp->flags = ESP_FLAG_DISABLE_SYNC;
- mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
+ esp->flags |= ESP_FLAG_DISABLE_SYNC;
+ mac_esp_ops.send_dma_cmd = esp_send_pio_cmd;
} else {
printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
}
@@ -577,7 +382,7 @@ static int esp_mac_probe(struct platform_device *dev)
esp_chips[dev->id] = esp;
spin_unlock(&esp_chips_lock);
- err = scsi_esp_register(esp, &dev->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 530358cdcb39..3b7abe5ca7f5 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -202,13 +202,6 @@ module_param_named(debug_level, mraid_debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
/*
- * ### global data ###
- */
-static uint8_t megaraid_mbox_version[8] =
- { 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
-
-
-/*
* PCI table for all supported controllers.
*/
static struct pci_device_id pci_id_table_g[] = {
@@ -457,10 +450,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
// Setup the default DMA mask. This would be changed later on
// depending on hardware capabilities
- if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
-
+ if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(32))) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
+ "megaraid: dma_set_mask failed:%d\n", __LINE__));
goto out_free_adapter;
}
@@ -484,7 +476,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
// Start the mailbox based controller
if (megaraid_init_mbox(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: maibox adapter did not initialize\n"));
+ "megaraid: mailbox adapter did not initialize\n"));
goto out_free_adapter;
}
@@ -878,11 +870,12 @@ megaraid_init_mbox(adapter_t *adapter)
adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
- if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
+ if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(64))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: DMA mask for 64-bit failed\n"));
- if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: 32-bit DMA mask failed\n"));
goto out_free_sysfs_res;
@@ -950,7 +943,7 @@ megaraid_fini_mbox(adapter_t *adapter)
* megaraid_alloc_cmd_packets - allocate shared mailbox
* @adapter : soft state of the raid controller
*
- * Allocate and align the shared mailbox. This maibox is used to issue
+ * Allocate and align the shared mailbox. This mailbox is used to issue
* all the commands. For IO based controllers, the mailbox is also registered
* with the FW. Allocate memory for all commands as well.
* This is our big allocator.
@@ -975,9 +968,9 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
* Allocate the common 16-byte aligned memory for the handshake
* mailbox.
*/
- raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev,
- sizeof(mbox64_t),
- &raid_dev->una_mbox64_dma);
+ raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev,
+ sizeof(mbox64_t), &raid_dev->una_mbox64_dma,
+ GFP_KERNEL);
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING
@@ -1003,8 +996,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
align;
// Allocate memory for commands issued internally
- adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE,
- &adapter->ibuf_dma_h);
+ adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
+ &adapter->ibuf_dma_h, GFP_KERNEL);
if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING
@@ -1082,7 +1075,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
scb->scp = NULL;
scb->state = SCB_FREE;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
scb->dma_type = MRAID_DMA_NONE;
scb->dev_channel = -1;
scb->dev_target = -1;
@@ -1098,10 +1091,10 @@ out_teardown_dma_pools:
out_free_scb_list:
kfree(adapter->kscb_list);
out_free_ibuf:
- pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
+ dma_free_coherent(&pdev->dev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
adapter->ibuf_dma_h);
out_free_common_mbox:
- pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
return -1;
@@ -1123,10 +1116,10 @@ megaraid_free_cmd_packets(adapter_t *adapter)
kfree(adapter->kscb_list);
- pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
+ dma_free_coherent(&adapter->pdev->dev, MBOX_IBUF_SIZE,
(void *)adapter->ibuf, adapter->ibuf_dma_h);
- pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
return;
}
@@ -1428,12 +1421,6 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
adapter->outstanding_cmds++;
- if (scb->dma_direction == PCI_DMA_TODEVICE)
- pci_dma_sync_sg_for_device(adapter->pdev,
- scsi_sglist(scb->scp),
- scsi_sg_count(scb->scp),
- PCI_DMA_TODEVICE);
-
mbox->busy = 1; // Set busy
mbox->poll = 0;
mbox->ack = 0;
@@ -2181,31 +2168,6 @@ megaraid_isr(int irq, void *devp)
/**
- * megaraid_mbox_sync_scb - sync kernel buffers
- * @adapter : controller's soft state
- * @scb : pointer to the resource packet
- *
- * DMA sync if required.
- */
-static void
-megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
-{
- mbox_ccb_t *ccb;
-
- ccb = (mbox_ccb_t *)scb->ccb;
-
- if (scb->dma_direction == PCI_DMA_FROMDEVICE)
- pci_dma_sync_sg_for_cpu(adapter->pdev,
- scsi_sglist(scb->scp),
- scsi_sg_count(scb->scp),
- PCI_DMA_FROMDEVICE);
-
- scsi_dma_unmap(scb->scp);
- return;
-}
-
-
-/**
* megaraid_mbox_dpc - the tasklet to complete the commands from completed list
* @devp : pointer to HBA soft state
*
@@ -2403,9 +2365,7 @@ megaraid_mbox_dpc(unsigned long devp)
megaraid_mbox_display_scb(adapter, scb);
}
- // Free our internal resources and call the mid-layer callback
- // routine
- megaraid_mbox_sync_scb(adapter, scb);
+ scsi_dma_unmap(scp);
// remove from local clist
list_del_init(&scb->list);
@@ -2577,7 +2537,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
int recovery_window;
- int recovering;
int i;
uioc_t *kioc;
@@ -2591,7 +2550,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
return FAILED;
}
-
// Under exceptional conditions, FW can take up to 3 minutes to
// complete command processing. Wait for additional 2 minutes for the
// pending commands counter to go down to 0. If it doesn't, let the
@@ -2640,8 +2598,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
- recovering = adapter->outstanding_cmds;
-
for (i = 0; i < recovery_window; i++) {
megaraid_ack_sequence(adapter);
@@ -2725,13 +2681,10 @@ static int
mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
- mbox64_t *mbox64;
mbox_t *mbox;
uint8_t status;
int i;
-
- mbox64 = raid_dev->mbox64;
mbox = raid_dev->mbox;
/*
@@ -2948,9 +2901,8 @@ megaraid_mbox_product_info(adapter_t *adapter)
* Issue an ENQUIRY3 command to find out certain adapter parameters,
* e.g., max channels, max commands etc.
*/
- pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
- &pinfo_dma_h);
-
+ pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
+ &pinfo_dma_h, GFP_KERNEL);
if (pinfo == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
@@ -2971,7 +2923,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
- pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
pinfo, pinfo_dma_h);
return -1;
@@ -3002,7 +2954,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
con_log(CL_ANN, (KERN_WARNING
"megaraid: product info failed\n"));
- pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
pinfo, pinfo_dma_h);
return -1;
@@ -3038,7 +2990,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
"megaraid: fw version:[%s] bios version:[%s]\n",
adapter->fw_version, adapter->bios_version));
- pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), pinfo,
pinfo_dma_h);
return 0;
@@ -3135,7 +3087,6 @@ megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
static int
megaraid_mbox_support_random_del(adapter_t *adapter)
{
- mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
@@ -3157,8 +3108,6 @@ megaraid_mbox_support_random_del(adapter_t *adapter)
return 0;
}
- mbox = (mbox_t *)raw_mbox;
-
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = FC_DEL_LOGDRV;
@@ -3263,12 +3212,8 @@ megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
static void
megaraid_mbox_flush_cache(adapter_t *adapter)
{
- mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
-
- mbox = (mbox_t *)raw_mbox;
-
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = FLUSH_ADAPTER;
@@ -3299,7 +3244,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
- mbox64_t *mbox64;
int status = 0;
int i;
uint32_t dword;
@@ -3310,7 +3254,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
raw_mbox[0] = 0xFF;
- mbox64 = raid_dev->mbox64;
mbox = raid_dev->mbox;
/* Wait until mailbox is free */
@@ -3515,7 +3458,7 @@ megaraid_cmm_register(adapter_t *adapter)
scb->scp = NULL;
scb->state = SCB_FREE;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
scb->dma_type = MRAID_DMA_NONE;
scb->dev_channel = -1;
scb->dev_target = -1;
@@ -3653,7 +3596,7 @@ megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
scb->state = SCB_ACTIVE;
scb->dma_type = MRAID_DMA_NONE;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
ccb = (mbox_ccb_t *)scb->ccb;
mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
@@ -3794,10 +3737,6 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
static int
gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
{
- uint8_t dmajor;
-
- dmajor = megaraid_mbox_version[0];
-
hinfo->pci_vendor_id = adapter->pdev->vendor;
hinfo->pci_device_id = adapter->pdev->device;
hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor;
@@ -3843,8 +3782,8 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
- raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
- PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
+ raid_dev->sysfs_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ PAGE_SIZE, &raid_dev->sysfs_buffer_dma, GFP_KERNEL);
if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
!raid_dev->sysfs_buffer) {
@@ -3881,7 +3820,7 @@ megaraid_sysfs_free_resources(adapter_t *adapter)
kfree(raid_dev->sysfs_mbox64);
if (raid_dev->sysfs_buffer) {
- pci_free_consistent(adapter->pdev, PAGE_SIZE,
+ dma_free_coherent(&adapter->pdev->dev, PAGE_SIZE,
raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
}
}
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index c1d86d961a92..e075aeb4012f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -117,7 +117,7 @@
* @raw_mbox : raw mailbox pointer
* @mbox : mailbox
* @mbox64 : extended mailbox
- * @mbox_dma_h : maibox dma address
+ * @mbox_dma_h : mailbox dma address
* @sgl64 : 64-bit scatter-gather list
* @sgl32 : 32-bit scatter-gather list
* @sgl_dma_h : dma handle for the scatter-gather list
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9aa9590c5373..9b90c716f06d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1330,11 +1330,11 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
device_id = MEGASAS_DEV_INDEX(scp);
pthru = (struct megasas_pthru_frame *)cmd->frame;
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
flags = MFI_FRAME_DIR_WRITE;
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ else if (scp->sc_data_direction == DMA_FROM_DEVICE)
flags = MFI_FRAME_DIR_READ;
- else if (scp->sc_data_direction == PCI_DMA_NONE)
+ else if (scp->sc_data_direction == DMA_NONE)
flags = MFI_FRAME_DIR_NONE;
if (instance->flag_ieee == 1) {
@@ -1428,9 +1428,9 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
device_id = MEGASAS_DEV_INDEX(scp);
ldio = (struct megasas_io_frame *)cmd->frame;
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
flags = MFI_FRAME_DIR_WRITE;
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ else if (scp->sc_data_direction == DMA_FROM_DEVICE)
flags = MFI_FRAME_DIR_READ;
if (instance->flag_ieee == 1) {
@@ -2240,9 +2240,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION_111));
else {
new_affiliation_111 =
- pci_zalloc_consistent(instance->pdev,
+ dma_zalloc_coherent(&instance->pdev->dev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
- &new_affiliation_111_h);
+ &new_affiliation_111_h, GFP_KERNEL);
if (!new_affiliation_111) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2302,7 +2302,7 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
}
out:
if (new_affiliation_111) {
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
new_affiliation_111,
new_affiliation_111_h);
@@ -2347,10 +2347,10 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION));
else {
new_affiliation =
- pci_zalloc_consistent(instance->pdev,
+ dma_zalloc_coherent(&instance->pdev->dev,
(MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
- &new_affiliation_h);
+ &new_affiliation_h, GFP_KERNEL);
if (!new_affiliation) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2470,7 +2470,7 @@ out:
}
if (new_affiliation)
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
(MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
new_affiliation, new_affiliation_h);
@@ -2513,9 +2513,9 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
if (initial) {
instance->hb_host_mem =
- pci_zalloc_consistent(instance->pdev,
+ dma_zalloc_coherent(&instance->pdev->dev,
sizeof(struct MR_CTRL_HB_HOST_MEM),
- &instance->hb_host_mem_h);
+ &instance->hb_host_mem_h, GFP_KERNEL);
if (!instance->hb_host_mem) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
" memory for heartbeat host memory for scsi%d\n",
@@ -4995,9 +4995,8 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
context_sz = sizeof(u32);
reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
- instance->reply_queue = pci_alloc_consistent(instance->pdev,
- reply_q_sz,
- &instance->reply_queue_h);
+ instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
+ reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
if (!instance->reply_queue) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
@@ -5029,7 +5028,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
fail_fw_init:
- pci_free_consistent(instance->pdev, reply_q_sz,
+ dma_free_coherent(&instance->pdev->dev, reply_q_sz,
instance->reply_queue, instance->reply_queue_h);
fail_reply_queue:
megasas_free_cmds(instance);
@@ -5533,7 +5532,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
else {
if (instance->crash_dump_buf)
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
CRASH_DMA_BUF_SIZE,
instance->crash_dump_buf,
instance->crash_dump_h);
@@ -5616,7 +5615,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
if (instance->reply_queue)
- pci_free_consistent(instance->pdev, reply_q_sz,
+ dma_free_coherent(&instance->pdev->dev, reply_q_sz,
instance->reply_queue, instance->reply_queue_h);
megasas_free_cmds(instance);
@@ -5655,10 +5654,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
}
dcmd = &cmd->frame->dcmd;
- el_info = pci_zalloc_consistent(instance->pdev,
- sizeof(struct megasas_evt_log_info),
- &el_info_h);
-
+ el_info = dma_zalloc_coherent(&instance->pdev->dev,
+ sizeof(struct megasas_evt_log_info), &el_info_h,
+ GFP_KERNEL);
if (!el_info) {
megasas_return_cmd(instance, cmd);
return -ENOMEM;
@@ -5695,8 +5693,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
eli->boot_seq_num = el_info->boot_seq_num;
dcmd_failed:
- pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
- el_info, el_info_h);
+ dma_free_coherent(&instance->pdev->dev,
+ sizeof(struct megasas_evt_log_info),
+ el_info, el_info_h);
megasas_return_cmd(instance, cmd);
@@ -6134,10 +6133,10 @@ static inline void megasas_set_adapter_type(struct megasas_instance *instance)
static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
{
- instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
- &instance->producer_h);
- instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
- &instance->consumer_h);
+ instance->producer = dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(u32), &instance->producer_h, GFP_KERNEL);
+ instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(u32), &instance->consumer_h, GFP_KERNEL);
if (!instance->producer || !instance->consumer) {
dev_err(&instance->pdev->dev,
@@ -6199,11 +6198,11 @@ static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
kfree(instance->reply_map);
if (instance->adapter_type == MFI_SERIES) {
if (instance->producer)
- pci_free_consistent(instance->pdev, sizeof(u32),
+ dma_free_coherent(&instance->pdev->dev, sizeof(u32),
instance->producer,
instance->producer_h);
if (instance->consumer)
- pci_free_consistent(instance->pdev, sizeof(u32),
+ dma_free_coherent(&instance->pdev->dev, sizeof(u32),
instance->consumer,
instance->consumer_h);
} else {
@@ -6224,10 +6223,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
struct pci_dev *pdev = instance->pdev;
struct fusion_context *fusion = instance->ctrl_context;
- instance->evt_detail =
- pci_alloc_consistent(pdev,
- sizeof(struct megasas_evt_detail),
- &instance->evt_detail_h);
+ instance->evt_detail = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct megasas_evt_detail),
+ &instance->evt_detail_h, GFP_KERNEL);
if (!instance->evt_detail) {
dev_err(&instance->pdev->dev,
@@ -6250,9 +6248,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
}
instance->pd_list_buf =
- pci_alloc_consistent(pdev,
+ dma_alloc_coherent(&pdev->dev,
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
- &instance->pd_list_buf_h);
+ &instance->pd_list_buf_h, GFP_KERNEL);
if (!instance->pd_list_buf) {
dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
@@ -6260,9 +6258,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
}
instance->ctrl_info_buf =
- pci_alloc_consistent(pdev,
+ dma_alloc_coherent(&pdev->dev,
sizeof(struct megasas_ctrl_info),
- &instance->ctrl_info_buf_h);
+ &instance->ctrl_info_buf_h, GFP_KERNEL);
if (!instance->ctrl_info_buf) {
dev_err(&pdev->dev,
@@ -6271,9 +6269,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
}
instance->ld_list_buf =
- pci_alloc_consistent(pdev,
+ dma_alloc_coherent(&pdev->dev,
sizeof(struct MR_LD_LIST),
- &instance->ld_list_buf_h);
+ &instance->ld_list_buf_h, GFP_KERNEL);
if (!instance->ld_list_buf) {
dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
@@ -6281,9 +6279,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
}
instance->ld_targetid_list_buf =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_LD_TARGETID_LIST),
- &instance->ld_targetid_list_buf_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_LD_TARGETID_LIST),
+ &instance->ld_targetid_list_buf_h, GFP_KERNEL);
if (!instance->ld_targetid_list_buf) {
dev_err(&pdev->dev,
@@ -6293,21 +6291,20 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
if (!reset_devices) {
instance->system_info_buf =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_DRV_SYSTEM_INFO),
- &instance->system_info_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_DRV_SYSTEM_INFO),
+ &instance->system_info_h, GFP_KERNEL);
instance->pd_info =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_PD_INFO),
- &instance->pd_info_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_PD_INFO),
+ &instance->pd_info_h, GFP_KERNEL);
instance->tgt_prop =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_TARGET_PROPERTIES),
- &instance->tgt_prop_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_TARGET_PROPERTIES),
+ &instance->tgt_prop_h, GFP_KERNEL);
instance->crash_dump_buf =
- pci_alloc_consistent(pdev,
- CRASH_DMA_BUF_SIZE,
- &instance->crash_dump_h);
+ dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h, GFP_KERNEL);
if (!instance->system_info_buf)
dev_err(&instance->pdev->dev,
@@ -6343,7 +6340,7 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
struct fusion_context *fusion = instance->ctrl_context;
if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
instance->evt_detail,
instance->evt_detail_h);
@@ -6354,41 +6351,41 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
fusion->ioc_init_request_phys);
if (instance->pd_list_buf)
- pci_free_consistent(pdev,
+ dma_free_coherent(&pdev->dev,
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
instance->pd_list_buf,
instance->pd_list_buf_h);
if (instance->ld_list_buf)
- pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
instance->ld_list_buf,
instance->ld_list_buf_h);
if (instance->ld_targetid_list_buf)
- pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
instance->ld_targetid_list_buf,
instance->ld_targetid_list_buf_h);
if (instance->ctrl_info_buf)
- pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
+ dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
instance->ctrl_info_buf,
instance->ctrl_info_buf_h);
if (instance->system_info_buf)
- pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
instance->system_info_buf,
instance->system_info_h);
if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
instance->pd_info, instance->pd_info_h);
if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
instance->tgt_prop, instance->tgt_prop_h);
if (instance->crash_dump_buf)
- pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+ dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
instance->crash_dump_buf,
instance->crash_dump_h);
}
@@ -6516,17 +6513,20 @@ static int megasas_probe_one(struct pci_dev *pdev,
if (instance->requestorId) {
if (instance->PlasmaFW111) {
instance->vf_affiliation_111 =
- pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
- &instance->vf_affiliation_111_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &instance->vf_affiliation_111_h,
+ GFP_KERNEL);
if (!instance->vf_affiliation_111)
dev_warn(&pdev->dev, "Can't allocate "
"memory for VF affiliation buffer\n");
} else {
instance->vf_affiliation =
- pci_alloc_consistent(pdev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- &instance->vf_affiliation_h);
+ dma_alloc_coherent(&pdev->dev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &instance->vf_affiliation_h,
+ GFP_KERNEL);
if (!instance->vf_affiliation)
dev_warn(&pdev->dev, "Can't allocate "
"memory for VF affiliation buffer\n");
@@ -6994,19 +6994,19 @@ skip_firing_dcmds:
}
if (instance->vf_affiliation)
- pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+ dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
instance->vf_affiliation,
instance->vf_affiliation_h);
if (instance->vf_affiliation_111)
- pci_free_consistent(pdev,
+ dma_free_coherent(&pdev->dev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
instance->vf_affiliation_111,
instance->vf_affiliation_111_h);
if (instance->hb_host_mem)
- pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
instance->hb_host_mem,
instance->hb_host_mem_h);
@@ -7254,7 +7254,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
/*
* We don't change the dma_coherent_mask, so
- * pci_alloc_consistent only returns 32bit addresses
+ * dma_alloc_coherent only returns 32bit addresses
*/
if (instance->consistent_mask_64bit) {
kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
@@ -7523,6 +7523,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
get_user(user_sense_off, &cioc->sense_off))
return -EFAULT;
+ if (local_sense_off != user_sense_off)
+ return -EINVAL;
+
if (local_sense_len) {
void __user **sense_ioc_ptr =
(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index c7f95bace353..f74b5ea24f0f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -684,8 +684,8 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = pci_zalloc_consistent(instance->pdev, array_size,
- &fusion->rdpq_phys);
+ fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev,
+ array_size, &fusion->rdpq_phys, GFP_KERNEL);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
@@ -813,7 +813,7 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
dma_pool_destroy(fusion->reply_frames_desc_pool_align);
if (fusion->rdpq_virt)
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
fusion->rdpq_virt, fusion->rdpq_phys);
}
@@ -2209,7 +2209,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
- if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ if (scp->sc_data_direction == DMA_FROM_DEVICE)
cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
else
cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
@@ -2238,7 +2238,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[31] = (u8)(num_blocks & 0xff);
/* set SCSI IO EEDPFlags */
- if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
+ if (scp->sc_data_direction == DMA_FROM_DEVICE) {
io_request->EEDPFlags = cpu_to_le16(
MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
@@ -2621,7 +2621,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
scsi_buff_len = scsi_bufflen(scp);
io_request->DataLength = cpu_to_le32(scsi_buff_len);
- if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ if (scp->sc_data_direction == DMA_FROM_DEVICE)
io_info.isRead = 1;
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
@@ -3088,9 +3088,9 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ else if (scp->sc_data_direction == DMA_FROM_DEVICE)
io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
io_request->SGLOffset0 =
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 82e01dbe90af..ec6940f2fcb3 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1915,8 +1915,8 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
/* We use the PCI APIs for now until the generic one gets fixed
* enough or until we get some macio-specific versions
*/
- dma_cmd_space = pci_zalloc_consistent(macio_get_pci_dev(mdev),
- ms->dma_cmd_size, &dma_cmd_bus);
+ dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev,
+ ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL);
if (dma_cmd_space == NULL) {
printk(KERN_ERR "mesh: can't allocate DMA table\n");
goto out_unmap;
@@ -1974,7 +1974,7 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
*/
mesh_shutdown(mdev);
set_mesh_power(ms, 0);
- pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+ dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
ms->dma_cmd_space, ms->dma_cmd_bus);
out_unmap:
iounmap(ms->dma);
@@ -2007,7 +2007,7 @@ static int mesh_remove(struct macio_dev *mdev)
iounmap(ms->dma);
/* Free DMA commands memory */
- pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+ dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
ms->dma_cmd_space, ms->dma_cmd_bus);
/* Release memory resources */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 59d7844ee022..2500377d0723 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -122,8 +122,8 @@ mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
if (!(status & MPT3_CMD_RESET))
issue_reset = 1;
- pr_err(MPT3SAS_FMT "Command %s\n", ioc->name,
- ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout"));
+ ioc_err(ioc, "Command %s\n",
+ issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
_debug_dump_mf(mpi_request, sz);
return issue_reset;
@@ -336,9 +336,7 @@ _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
return ct->chain_buffer;
}
}
- pr_info(MPT3SAS_FMT
- "Provided chain_buffer_dma address is not in the lookup list\n",
- ioc->name);
+ ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
return NULL;
}
@@ -394,7 +392,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
/* Get scsi_cmd using smid */
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (scmd == NULL) {
- pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name);
+ ioc_err(ioc, "scmd is NULL\n");
return;
}
@@ -532,11 +530,11 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
struct pci_dev *pdev;
- if ((ioc == NULL))
+ if (!ioc)
return -1;
pdev = ioc->pdev;
- if ((pdev == NULL))
+ if (!pdev)
return -1;
pci_stop_and_remove_bus_device_locked(pdev);
return 0;
@@ -566,8 +564,7 @@ _base_fault_reset_work(struct work_struct *work)
doorbell = mpt3sas_base_get_iocstate(ioc, 0);
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
- pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
- ioc->name);
+ ioc_err(ioc, "SAS host is non-operational !!!!\n");
/* It may be possible that EEH recovery can resolve some of
* pci bus failure issues rather removing the dead ioc function
@@ -600,13 +597,11 @@ _base_fault_reset_work(struct work_struct *work)
p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
"%s_dead_ioc_%d", ioc->driver_name, ioc->id);
if (IS_ERR(p))
- pr_err(MPT3SAS_FMT
- "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
+ __func__);
else
- pr_err(MPT3SAS_FMT
- "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
+ __func__);
return; /* don't rearm timer */
}
@@ -614,8 +609,8 @@ _base_fault_reset_work(struct work_struct *work)
if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
- __func__, (rc == 0) ? "success" : "failed");
+ ioc_warn(ioc, "%s: hard reset: %s\n",
+ __func__, rc == 0 ? "success" : "failed");
doorbell = mpt3sas_base_get_iocstate(ioc, 0);
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
mpt3sas_base_fault_info(ioc, doorbell &
@@ -657,8 +652,7 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
ioc->fault_reset_work_q =
create_singlethread_workqueue(ioc->fault_reset_work_q_name);
if (!ioc->fault_reset_work_q) {
- pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
- ioc->name, __func__, __LINE__);
+ ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
return;
}
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
@@ -700,8 +694,7 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
{
- pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
- ioc->name, fault_code);
+ ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
}
/**
@@ -728,8 +721,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_base_fault_info(ioc , doorbell);
else {
writel(0xC0FFEE00, &ioc->chip->Doorbell);
- pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
- ioc->name);
+ ioc_err(ioc, "Firmware is halted due to command timeout\n");
}
if (ioc->fwfault_debug == 2)
@@ -956,8 +948,8 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
break;
}
- pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
- ioc->name, desc, ioc_status, request_hdr, func_str);
+ ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
+ desc, ioc_status, request_hdr, func_str);
_debug_dump_mf(request_hdr, frame_sz/4);
}
@@ -1003,9 +995,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
{
Mpi2EventDataSasDiscovery_t *event_data =
(Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
- pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
- (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
- "start" : "stop");
+ ioc_info(ioc, "Discovery: (%s)",
+ event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
+ "start" : "stop");
if (event_data->DiscoveryStatus)
pr_cont(" discovery_status(0x%08x)",
le32_to_cpu(event_data->DiscoveryStatus));
@@ -1059,14 +1051,13 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
{
Mpi26EventDataPCIeEnumeration_t *event_data =
(Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
- pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
- (event_data->ReasonCode ==
- MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
- "start" : "stop");
+ ioc_info(ioc, "PCIE Enumeration: (%s)",
+ event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
+ "start" : "stop");
if (event_data->EnumerationStatus)
- pr_info("enumeration_status(0x%08x)",
- le32_to_cpu(event_data->EnumerationStatus));
- pr_info("\n");
+ pr_cont("enumeration_status(0x%08x)",
+ le32_to_cpu(event_data->EnumerationStatus));
+ pr_cont("\n");
return;
}
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
@@ -1077,7 +1068,7 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
if (!desc)
return;
- pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
+ ioc_info(ioc, "%s\n", desc);
}
/**
@@ -1128,11 +1119,9 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
break;
}
- pr_warn(MPT3SAS_FMT
- "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
- ioc->name, log_info,
- originator_str, sas_loginfo.dw.code,
- sas_loginfo.dw.subcode);
+ ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
+ log_info,
+ originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
}
/**
@@ -1152,8 +1141,8 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
@@ -1249,9 +1238,9 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
delayed_event_ack->EventContext = mpi_reply->EventContext;
list_add_tail(&delayed_event_ack->list,
&ioc->delayed_event_ack_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED: EVENT ACK: event (0x%04x)\n",
- ioc->name, le16_to_cpu(mpi_reply->Event)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
+ le16_to_cpu(mpi_reply->Event)));
goto out;
}
@@ -2270,7 +2259,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
sges_left = scsi_dma_map(scmd);
if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device,
- "pci_map_sg failed: request for %d bytes!\n",
+ "scsi_dma_map failed: request for %d bytes!\n",
scsi_bufflen(scmd));
return -ENOMEM;
}
@@ -2418,7 +2407,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sges_left = scsi_dma_map(scmd);
if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device,
- "pci_map_sg failed: request for %d bytes!\n",
+ "scsi_dma_map failed: request for %d bytes!\n",
scsi_bufflen(scmd));
return -ENOMEM;
}
@@ -2563,44 +2552,41 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
+ u64 required_mask, coherent_mask;
struct sysinfo s;
- u64 consistent_dma_mask;
if (ioc->is_mcpu_endpoint)
goto try_32bit;
+ required_mask = dma_get_required_mask(&pdev->dev);
+ if (sizeof(dma_addr_t) == 4 || required_mask == 32)
+ goto try_32bit;
+
if (ioc->dma_mask)
- consistent_dma_mask = DMA_BIT_MASK(64);
+ coherent_mask = DMA_BIT_MASK(64);
else
- consistent_dma_mask = DMA_BIT_MASK(32);
-
- if (sizeof(dma_addr_t) > 4) {
- const uint64_t required_mask =
- dma_get_required_mask(&pdev->dev);
- if ((required_mask > DMA_BIT_MASK(32)) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
- ioc->base_add_sg_single = &_base_add_sg_single_64;
- ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- ioc->dma_mask = 64;
- goto out;
- }
- }
+ coherent_mask = DMA_BIT_MASK(32);
+
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_coherent_mask(&pdev->dev, coherent_mask))
+ goto try_32bit;
+
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ ioc->dma_mask = 64;
+ goto out;
try_32bit:
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- ioc->base_add_sg_single = &_base_add_sg_single_32;
- ioc->sge_size = sizeof(Mpi2SGESimple32_t);
- ioc->dma_mask = 32;
- } else
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
return -ENODEV;
+ ioc->base_add_sg_single = &_base_add_sg_single_32;
+ ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+ ioc->dma_mask = 32;
out:
si_meminfo(&s);
- pr_info(MPT3SAS_FMT
- "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
- ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
+ ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+ ioc->dma_mask, convert_to_kb(s.totalram));
return 0;
}
@@ -2639,8 +2625,7 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
if (!base) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
- ioc->name));
+ dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
return -EINVAL;
}
@@ -2658,9 +2643,8 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
pci_read_config_word(ioc->pdev, base + 2, &message_control);
ioc->msix_vector_count = (message_control & 0x3FF) + 1;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "msix is supported, vector_count(%d)\n",
- ioc->name, ioc->msix_vector_count));
+ dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
+ ioc->msix_vector_count));
return 0;
}
@@ -2702,8 +2686,8 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
if (!reply_q) {
- pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
- ioc->name, (int)sizeof(struct adapter_reply_queue));
+ ioc_err(ioc, "unable to allocate memory %zu!\n",
+ sizeof(struct adapter_reply_queue));
return -ENOMEM;
}
reply_q->ioc = ioc;
@@ -2719,7 +2703,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
IRQF_SHARED, reply_q->name, reply_q);
if (r) {
- pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
+ pr_err("%s: unable to allocate interrupt %d!\n",
reply_q->name, pci_irq_vector(pdev, index));
kfree(reply_q);
return -EBUSY;
@@ -2761,8 +2745,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
reply_q->msix_index);
if (!mask) {
- pr_warn(MPT3SAS_FMT "no affinity for msi %x\n",
- ioc->name, reply_q->msix_index);
+ ioc_warn(ioc, "no affinity for msi %x\n",
+ reply_q->msix_index);
continue;
}
@@ -2833,9 +2817,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_queue_count = min_t(int, ioc->cpu_count,
ioc->msix_vector_count);
- printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
- ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
- ioc->cpu_count, max_msix_vectors);
+ ioc_info(ioc, "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
+ ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
local_max_msix_vectors = (reset_devices) ? 1 : 8;
@@ -2857,9 +2840,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
irq_flags);
if (r < 0) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT
- "pci_alloc_irq_vectors failed (r=%d) !!!\n",
- ioc->name, r));
+ dfailprintk(ioc,
+ ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
+ r));
goto try_ioapic;
}
@@ -2882,9 +2865,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_queue_count = 1;
r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
if (r < 0) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT
- "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
- ioc->name, r));
+ dfailprintk(ioc,
+ ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
+ r));
} else
r = _base_request_irq(ioc, 0);
@@ -2900,8 +2883,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
{
struct pci_dev *pdev = ioc->pdev;
- dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
- ioc->name, __func__));
+ dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
_base_free_irq(ioc);
_base_disable_msix(ioc);
@@ -2939,13 +2921,11 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
phys_addr_t chip_phys = 0;
struct adapter_reply_queue *reply_q;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
- ioc->name, __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
if (pci_enable_device_mem(pdev)) {
- pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
- ioc->name);
+ ioc_warn(ioc, "pci_enable_device_mem: failed\n");
ioc->bars = 0;
return -ENODEV;
}
@@ -2953,8 +2933,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (pci_request_selected_regions(pdev, ioc->bars,
ioc->driver_name)) {
- pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
- ioc->name);
+ ioc_warn(ioc, "pci_request_selected_regions: failed\n");
ioc->bars = 0;
r = -ENODEV;
goto out_fail;
@@ -2967,8 +2946,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (_base_config_dma_addressing(ioc, pdev) != 0) {
- pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
- ioc->name, pci_name(pdev));
+ ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
r = -ENODEV;
goto out_fail;
}
@@ -2991,8 +2969,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
}
if (ioc->chip == NULL) {
- pr_err(MPT3SAS_FMT "unable to map adapter memory! "
- " or resource not found\n", ioc->name);
+ ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
r = -EINVAL;
goto out_fail;
}
@@ -3026,9 +3003,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
ioc->combined_reply_index_count,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->replyPostRegisterIndex) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "allocation for reply Post Register Index failed!!!\n",
- ioc->name));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
r = -ENOMEM;
goto out_fail;
}
@@ -3053,15 +3029,15 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
}
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
- pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
- reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
- "IO-APIC enabled"),
- pci_irq_vector(ioc->pdev, reply_q->msix_index));
+ pr_info("%s: %s enabled: IRQ %d\n",
+ reply_q->name,
+ ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
+ pci_irq_vector(ioc->pdev, reply_q->msix_index));
- pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n",
- ioc->name, &chip_phys, ioc->chip, memap_sz);
- pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
- ioc->name, (unsigned long long)pio_chip, pio_sz);
+ ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
+ &chip_phys, ioc->chip, memap_sz);
+ ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
+ (unsigned long long)pio_chip, pio_sz);
/* Save PCI configuration state for recovery from PCI AER/EEH errors */
pci_save_state(pdev);
@@ -3176,8 +3152,7 @@ mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (list_empty(&ioc->internal_free_list)) {
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- pr_err(MPT3SAS_FMT "%s: smid not available\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: smid not available\n", __func__);
return 0;
}
@@ -3545,89 +3520,85 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
case MPI2_MFGPAGE_DEVID_SAS2008:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_INTEL_RMS2LL080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS2LL080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS2LL080_BRANDING);
break;
case MPT2SAS_INTEL_RMS2LL040_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS2LL040_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS2LL040_BRANDING);
break;
case MPT2SAS_INTEL_SSD910_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_SSD910_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_SSD910_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Intel(R) Controller: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
case MPI2_MFGPAGE_DEVID_SAS2308_2:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_INTEL_RS25GB008_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RS25GB008_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RS25GB008_BRANDING);
break;
case MPT2SAS_INTEL_RMS25JB080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25JB080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25JB080_BRANDING);
break;
case MPT2SAS_INTEL_RMS25JB040_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25JB040_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25JB040_BRANDING);
break;
case MPT2SAS_INTEL_RMS25KB080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25KB080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25KB080_BRANDING);
break;
case MPT2SAS_INTEL_RMS25KB040_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25KB040_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25KB040_BRANDING);
break;
case MPT2SAS_INTEL_RMS25LB040_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25LB040_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25LB040_BRANDING);
break;
case MPT2SAS_INTEL_RMS25LB080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25LB080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25LB080_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Intel(R) Controller: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
case MPI25_MFGPAGE_DEVID_SAS3008:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_INTEL_RMS3JC080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_INTEL_RMS3JC080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_INTEL_RMS3JC080_BRANDING);
break;
case MPT3SAS_INTEL_RS3GC008_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_INTEL_RS3GC008_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_INTEL_RS3GC008_BRANDING);
break;
case MPT3SAS_INTEL_RS3FC044_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_INTEL_RS3FC044_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_INTEL_RS3FC044_BRANDING);
break;
case MPT3SAS_INTEL_RS3UC080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_INTEL_RS3UC080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_INTEL_RS3UC080_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Intel(R) Controller: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
default:
- pr_info(MPT3SAS_FMT
- "Intel(R) Controller: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
@@ -3636,57 +3607,54 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
case MPI2_MFGPAGE_DEVID_SAS2008:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_BRANDING);
break;
case MPT2SAS_DELL_6GBPS_SAS_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_6GBPS_SAS_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_6GBPS_SAS_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
case MPI25_MFGPAGE_DEVID_SAS3008:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_DELL_12G_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_DELL_12G_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_DELL_12G_HBA_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
default:
- pr_info(MPT3SAS_FMT
- "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
- ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
@@ -3695,46 +3663,42 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
case MPI25_MFGPAGE_DEVID_SAS3008:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
break;
case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
break;
case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
case MPI25_MFGPAGE_DEVID_SAS3108_1:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
break;
case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
- );
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
default:
- pr_info(MPT3SAS_FMT
- "Cisco SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
@@ -3743,43 +3707,40 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
case MPI2_MFGPAGE_DEVID_SAS2004:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
case MPI2_MFGPAGE_DEVID_SAS2308_2:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_HP_2_4_INTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_2_4_INTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_2_4_INTERNAL_BRANDING);
break;
case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
break;
case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
break;
case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
default:
- pr_info(MPT3SAS_FMT
- "HP SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
default:
@@ -3806,28 +3767,25 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
u16 smid, ioc_status;
size_t data_length;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
- pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: internal command already in use\n", __func__);
return -EAGAIN;
}
data_length = sizeof(Mpi2FWImageHeader_t);
- fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
- &fwpkg_data_dma);
+ fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
+ &fwpkg_data_dma, GFP_KERNEL);
if (!fwpkg_data) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENOMEM;
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
r = -EAGAIN;
goto out;
}
@@ -3846,11 +3804,9 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
/* Wait for 15 seconds */
wait_for_completion_timeout(&ioc->base_cmds.done,
FW_IMG_HDR_READ_TIMEOUT*HZ);
- pr_info(MPT3SAS_FMT "%s: complete\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: complete\n", __func__);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi25FWUploadRequest_t)/4);
r = -ETIME;
@@ -3864,13 +3820,11 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
if (FWImgHdr->PackageVersion.Word) {
- pr_info(MPT3SAS_FMT "FW Package Version"
- "(%02d.%02d.%02d.%02d)\n",
- ioc->name,
- FWImgHdr->PackageVersion.Struct.Major,
- FWImgHdr->PackageVersion.Struct.Minor,
- FWImgHdr->PackageVersion.Struct.Unit,
- FWImgHdr->PackageVersion.Struct.Dev);
+ ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
+ FWImgHdr->PackageVersion.Struct.Major,
+ FWImgHdr->PackageVersion.Struct.Minor,
+ FWImgHdr->PackageVersion.Struct.Unit,
+ FWImgHdr->PackageVersion.Struct.Dev);
}
} else {
_debug_dump_mf(&mpi_reply,
@@ -3881,7 +3835,7 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
out:
if (fwpkg_data)
- pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
+ dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
fwpkg_data_dma);
return r;
}
@@ -3900,18 +3854,17 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
strncpy(desc, ioc->manu_pg0.ChipName, 16);
- pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
- "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
- ioc->name, desc,
- (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
- (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
- (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
- ioc->facts.FWVersion.Word & 0x000000FF,
- ioc->pdev->revision,
- (bios_version & 0xFF000000) >> 24,
- (bios_version & 0x00FF0000) >> 16,
- (bios_version & 0x0000FF00) >> 8,
- bios_version & 0x000000FF);
+ ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
+ desc,
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF,
+ ioc->pdev->revision,
+ (bios_version & 0xFF000000) >> 24,
+ (bios_version & 0x00FF0000) >> 16,
+ (bios_version & 0x0000FF00) >> 8,
+ bios_version & 0x000000FF);
_base_display_OEMs_branding(ioc);
@@ -3920,82 +3873,81 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
i++;
}
- pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
+ ioc_info(ioc, "Protocol=(");
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
- pr_info("Initiator");
+ pr_cont("Initiator");
i++;
}
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
- pr_info("%sTarget", i ? "," : "");
+ pr_cont("%sTarget", i ? "," : "");
i++;
}
i = 0;
- pr_info("), ");
- pr_info("Capabilities=(");
+ pr_cont("), Capabilities=(");
if (!ioc->hide_ir_msg) {
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
- pr_info("Raid");
+ pr_cont("Raid");
i++;
}
}
if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
- pr_info("%sTLR", i ? "," : "");
+ pr_cont("%sTLR", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
- pr_info("%sMulticast", i ? "," : "");
+ pr_cont("%sMulticast", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
- pr_info("%sBIDI Target", i ? "," : "");
+ pr_cont("%sBIDI Target", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
- pr_info("%sEEDP", i ? "," : "");
+ pr_cont("%sEEDP", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
- pr_info("%sSnapshot Buffer", i ? "," : "");
+ pr_cont("%sSnapshot Buffer", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
- pr_info("%sDiag Trace Buffer", i ? "," : "");
+ pr_cont("%sDiag Trace Buffer", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
- pr_info("%sDiag Extended Buffer", i ? "," : "");
+ pr_cont("%sDiag Extended Buffer", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
- pr_info("%sTask Set Full", i ? "," : "");
+ pr_cont("%sTask Set Full", i ? "," : "");
i++;
}
iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
- pr_info("%sNCQ", i ? "," : "");
+ pr_cont("%sNCQ", i ? "," : "");
i++;
}
- pr_info(")\n");
+ pr_cont(")\n");
}
/**
@@ -4028,21 +3980,21 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -4074,11 +4026,11 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
else
dmd_new =
dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
- pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
- ioc->name, dmd_orignal, dmd_new);
- pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
- ioc->name, io_missing_delay_original,
- io_missing_delay);
+ ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
+ dmd_orignal, dmd_new);
+ ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
+ io_missing_delay_original,
+ io_missing_delay);
ioc->device_missing_delay = dmd_new;
ioc->io_missing_delay = io_missing_delay;
}
@@ -4189,33 +4141,32 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
struct chain_tracker *ct;
struct reply_post_struct *rps;
- dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->request) {
- pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
+ dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
ioc->request, ioc->request_dma);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "request_pool(0x%p): free\n",
- ioc->name, ioc->request));
+ dexitprintk(ioc,
+ ioc_info(ioc, "request_pool(0x%p): free\n",
+ ioc->request));
ioc->request = NULL;
}
if (ioc->sense) {
dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
dma_pool_destroy(ioc->sense_dma_pool);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "sense_pool(0x%p): free\n",
- ioc->name, ioc->sense));
+ dexitprintk(ioc,
+ ioc_info(ioc, "sense_pool(0x%p): free\n",
+ ioc->sense));
ioc->sense = NULL;
}
if (ioc->reply) {
dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
dma_pool_destroy(ioc->reply_dma_pool);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_pool(0x%p): free\n",
- ioc->name, ioc->reply));
+ dexitprintk(ioc,
+ ioc_info(ioc, "reply_pool(0x%p): free\n",
+ ioc->reply));
ioc->reply = NULL;
}
@@ -4223,9 +4174,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
ioc->reply_free_dma);
dma_pool_destroy(ioc->reply_free_dma_pool);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_free_pool(0x%p): free\n",
- ioc->name, ioc->reply_free));
+ dexitprintk(ioc,
+ ioc_info(ioc, "reply_free_pool(0x%p): free\n",
+ ioc->reply_free));
ioc->reply_free = NULL;
}
@@ -4237,9 +4188,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_post_free_dma_pool,
rps->reply_post_free,
rps->reply_post_free_dma);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_post_free_pool(0x%p): free\n",
- ioc->name, rps->reply_post_free));
+ dexitprintk(ioc,
+ ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
+ rps->reply_post_free));
rps->reply_post_free = NULL;
}
} while (ioc->rdpq_array_enable &&
@@ -4267,10 +4218,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
if (ioc->config_page) {
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "config_page(0x%p): free\n", ioc->name,
- ioc->config_page));
- pci_free_consistent(ioc->pdev, ioc->config_page_sz,
+ dexitprintk(ioc,
+ ioc_info(ioc, "config_page(0x%p): free\n",
+ ioc->config_page));
+ dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
ioc->config_page, ioc->config_page_dma);
}
@@ -4338,8 +4289,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
int i, j;
struct chain_tracker *ct;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
retry_sz = 0;
@@ -4368,10 +4318,8 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
sg_tablesize = min_t(unsigned short, sg_tablesize,
SG_MAX_SEGMENTS);
- pr_warn(MPT3SAS_FMT
- "sg_tablesize(%u) is bigger than kernel "
- "defined SG_CHUNK_SIZE(%u)\n", ioc->name,
- sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
+ ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
+ sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
}
ioc->shost->sg_tablesize = sg_tablesize;
}
@@ -4381,9 +4329,8 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
INTERNAL_SCSIIO_CMDS_COUNT)) {
- pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
- Credits, it has just %d number of credits\n",
- ioc->name, facts->RequestCredit);
+ ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
+ facts->RequestCredit);
return -ENOMEM;
}
ioc->internal_depth = 10;
@@ -4482,11 +4429,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
- "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
- "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
- ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
- ioc->chains_needed_per_io));
+ dinitprintk(ioc,
+ ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
+ ioc->max_sges_in_main_message,
+ ioc->max_sges_in_chain_message,
+ ioc->shost->sg_tablesize,
+ ioc->chains_needed_per_io));
/* reply post queue, 16 byte align */
reply_post_free_sz = ioc->reply_post_queue_depth *
@@ -4501,48 +4449,40 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
sizeof(struct reply_post_struct), GFP_KERNEL);
if (!ioc->reply_post) {
- pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
goto out;
}
ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
&ioc->pdev->dev, sz, 16, 0);
if (!ioc->reply_post_free_dma_pool) {
- pr_err(MPT3SAS_FMT
- "reply_post_free pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
goto out;
}
i = 0;
do {
ioc->reply_post[i].reply_post_free =
- dma_pool_alloc(ioc->reply_post_free_dma_pool,
+ dma_pool_zalloc(ioc->reply_post_free_dma_pool,
GFP_KERNEL,
&ioc->reply_post[i].reply_post_free_dma);
if (!ioc->reply_post[i].reply_post_free) {
- pr_err(MPT3SAS_FMT
- "reply_post_free pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
goto out;
}
- memset(ioc->reply_post[i].reply_post_free, 0, sz);
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply post free pool (0x%p): depth(%d),"
- "element_size(%d), pool_size(%d kB)\n", ioc->name,
- ioc->reply_post[i].reply_post_free,
- ioc->reply_post_queue_depth, 8, sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_post_free_dma = (0x%llx)\n", ioc->name,
- (unsigned long long)
- ioc->reply_post[i].reply_post_free_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->reply_post[i].reply_post_free,
+ ioc->reply_post_queue_depth,
+ 8, sz / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
+ (u64)ioc->reply_post[i].reply_post_free_dma));
total_sz += sz;
} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
if (ioc->dma_mask == 64) {
if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
- pr_warn(MPT3SAS_FMT
- "no suitable consistent DMA mask for %s\n",
- ioc->name, pci_name(ioc->pdev));
+ ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
+ pci_name(ioc->pdev));
goto out;
}
}
@@ -4554,9 +4494,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* with some internal commands that could be outstanding
*/
ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "scsi host: can_queue depth (%d)\n",
- ioc->name, ioc->shost->can_queue));
+ dinitprintk(ioc,
+ ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
+ ioc->shost->can_queue));
/* contiguous pool for request and chains, 16 byte align, one extra "
@@ -4572,12 +4512,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
sz += (ioc->internal_depth * ioc->request_sz);
ioc->request_dma_sz = sz;
- ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
+ ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
+ &ioc->request_dma, GFP_KERNEL);
if (!ioc->request) {
- pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
- "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
- "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
- ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+ ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
+ ioc->hba_queue_depth, ioc->chains_needed_per_io,
+ ioc->request_sz, sz / 1024);
if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
goto out;
retry_sz = 64;
@@ -4587,10 +4527,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
if (retry_sz)
- pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
- "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
- "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
- ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+ ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
+ ioc->hba_queue_depth, ioc->chains_needed_per_io,
+ ioc->request_sz, sz / 1024);
/* hi-priority queue */
ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
@@ -4604,24 +4543,26 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
ioc->request_sz);
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
- (ioc->hba_queue_depth * ioc->request_sz)/1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->request, ioc->hba_queue_depth,
+ ioc->request_sz,
+ (ioc->hba_queue_depth * ioc->request_sz) / 1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
- ioc->name, (unsigned long long) ioc->request_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "request pool: dma(0x%llx)\n",
+ (unsigned long long)ioc->request_dma));
total_sz += sz;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
- ioc->name, ioc->request, ioc->scsiio_depth));
+ dinitprintk(ioc,
+ ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
+ ioc->request, ioc->scsiio_depth));
ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
if (!ioc->chain_lookup) {
- pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages "
- "failed\n", ioc->name);
+ ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
goto out;
}
@@ -4629,8 +4570,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->scsiio_depth; i++) {
ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
if (!ioc->chain_lookup[i].chains_per_smid) {
- pr_err(MPT3SAS_FMT "chain_lookup: "
- " kzalloc failed\n", ioc->name);
+ ioc_err(ioc, "chain_lookup: kzalloc failed\n");
goto out;
}
}
@@ -4639,29 +4579,27 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
sizeof(struct request_tracker), GFP_KERNEL);
if (!ioc->hpr_lookup) {
- pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
- ioc->name);
+ ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
goto out;
}
ioc->hi_priority_smid = ioc->scsiio_depth + 1;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "hi_priority(0x%p): depth(%d), start smid(%d)\n",
- ioc->name, ioc->hi_priority,
- ioc->hi_priority_depth, ioc->hi_priority_smid));
+ dinitprintk(ioc,
+ ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
+ ioc->hi_priority,
+ ioc->hi_priority_depth, ioc->hi_priority_smid));
/* initialize internal queue smid's */
ioc->internal_lookup = kcalloc(ioc->internal_depth,
sizeof(struct request_tracker), GFP_KERNEL);
if (!ioc->internal_lookup) {
- pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
- ioc->name);
+ ioc_err(ioc, "internal_lookup: kcalloc failed\n");
goto out;
}
ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "internal(0x%p): depth(%d), start smid(%d)\n",
- ioc->name, ioc->internal,
- ioc->internal_depth, ioc->internal_smid));
+ dinitprintk(ioc,
+ ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
+ ioc->internal,
+ ioc->internal_depth, ioc->internal_smid));
/*
* The number of NVMe page sized blocks needed is:
* (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
@@ -4685,17 +4623,14 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
if (!ioc->pcie_sg_lookup) {
- pr_info(MPT3SAS_FMT
- "PCIe SGL lookup: kzalloc failed\n", ioc->name);
+ ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
goto out;
}
sz = nvme_blocks_needed * ioc->page_size;
ioc->pcie_sgl_dma_pool =
dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
if (!ioc->pcie_sgl_dma_pool) {
- pr_info(MPT3SAS_FMT
- "PCIe SGL pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
goto out;
}
@@ -4708,9 +4643,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->pcie_sgl_dma_pool, GFP_KERNEL,
&ioc->pcie_sg_lookup[i].pcie_sgl_dma);
if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
- pr_info(MPT3SAS_FMT
- "PCIe SGL pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
goto out;
}
for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
@@ -4724,20 +4657,20 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
- "element_size(%d), pool_size(%d kB)\n", ioc->name,
- ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can "
- "fit in a PRP page(%d)\n", ioc->name,
- ioc->chains_per_prp_buffer));
+ dinitprintk(ioc,
+ ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->scsiio_depth, sz,
+ (sz * ioc->scsiio_depth) / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
+ ioc->chains_per_prp_buffer));
total_sz += sz * ioc->scsiio_depth;
}
ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
ioc->chain_segment_sz, 16, 0);
if (!ioc->chain_dma_pool) {
- pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
goto out;
}
for (i = 0; i < ioc->scsiio_depth; i++) {
@@ -4748,8 +4681,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->chain_dma_pool, GFP_KERNEL,
&ct->chain_buffer_dma);
if (!ct->chain_buffer) {
- pr_err(MPT3SAS_FMT "chain_lookup: "
- " pci_pool_alloc failed\n", ioc->name);
+ ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
_base_release_memory_pools(ioc);
goto out;
}
@@ -4757,25 +4689,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
total_sz += ioc->chain_segment_sz;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
- ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->chain_depth, ioc->chain_segment_sz,
+ (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
/* sense buffers, 4 byte align */
sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4, 0);
if (!ioc->sense_dma_pool) {
- pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "sense pool: dma_pool_create failed\n");
goto out;
}
ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
&ioc->sense_dma);
if (!ioc->sense) {
- pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
goto out;
}
/* sense buffer requires to be in same 4 gb region.
@@ -4797,24 +4727,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
dma_pool_create("sense pool", &ioc->pdev->dev, sz,
roundup_pow_of_two(sz), 0);
if (!ioc->sense_dma_pool) {
- pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "sense pool: pci_pool_create failed\n");
goto out;
}
ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
&ioc->sense_dma);
if (!ioc->sense) {
- pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
goto out;
}
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
- "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
- SCSI_SENSE_BUFFERSIZE, sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
- ioc->name, (unsigned long long)ioc->sense_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->sense, ioc->scsiio_depth,
+ SCSI_SENSE_BUFFERSIZE, sz / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "sense_dma(0x%llx)\n",
+ (unsigned long long)ioc->sense_dma));
total_sz += sz;
/* reply pool, 4 byte align */
@@ -4822,25 +4751,24 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
4, 0);
if (!ioc->reply_dma_pool) {
- pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "reply pool: dma_pool_create failed\n");
goto out;
}
ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
&ioc->reply_dma);
if (!ioc->reply) {
- pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
goto out;
}
ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->reply,
- ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
- ioc->name, (unsigned long long)ioc->reply_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->reply, ioc->reply_free_queue_depth,
+ ioc->reply_sz, sz / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply_dma(0x%llx)\n",
+ (unsigned long long)ioc->reply_dma));
total_sz += sz;
/* reply free queue, 16 byte align */
@@ -4848,24 +4776,22 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
&ioc->pdev->dev, sz, 16, 0);
if (!ioc->reply_free_dma_pool) {
- pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
goto out;
}
- ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL,
+ ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
&ioc->reply_free_dma);
if (!ioc->reply_free) {
- pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
goto out;
}
- memset(ioc->reply_free, 0, sz);
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
- "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
- ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_free_dma (0x%llx)\n",
- ioc->name, (unsigned long long)ioc->reply_free_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->reply_free, ioc->reply_free_queue_depth,
+ 4, sz / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply_free_dma (0x%llx)\n",
+ (unsigned long long)ioc->reply_free_dma));
total_sz += sz;
if (ioc->rdpq_array_enable) {
@@ -4876,8 +4802,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
&ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
if (!ioc->reply_post_free_array_dma_pool) {
dinitprintk(ioc,
- pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
- "dma_pool_create failed\n", ioc->name));
+ ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
goto out;
}
ioc->reply_post_free_array =
@@ -4885,34 +4810,31 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
GFP_KERNEL, &ioc->reply_post_free_array_dma);
if (!ioc->reply_post_free_array) {
dinitprintk(ioc,
- pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
- "dma_pool_alloc failed\n", ioc->name));
+ ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
goto out;
}
}
ioc->config_page_sz = 512;
- ioc->config_page = pci_alloc_consistent(ioc->pdev,
- ioc->config_page_sz, &ioc->config_page_dma);
+ ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
+ ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
if (!ioc->config_page) {
- pr_err(MPT3SAS_FMT
- "config page: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "config page: dma_pool_alloc failed\n");
goto out;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "config page(0x%p): size(%d)\n",
- ioc->name, ioc->config_page, ioc->config_page_sz));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
- ioc->name, (unsigned long long)ioc->config_page_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "config page(0x%p): size(%d)\n",
+ ioc->config_page, ioc->config_page_sz));
+ dinitprintk(ioc,
+ ioc_info(ioc, "config_page_dma(0x%llx)\n",
+ (unsigned long long)ioc->config_page_dma));
total_sz += ioc->config_page_sz;
- pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
- ioc->name, total_sz/1024);
- pr_info(MPT3SAS_FMT
- "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
- ioc->name, ioc->shost->can_queue, facts->RequestCredit);
- pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
- ioc->name, ioc->shost->sg_tablesize);
+ ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
+ total_sz / 1024);
+ ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
+ ioc->shost->can_queue, facts->RequestCredit);
+ ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
+ ioc->shost->sg_tablesize);
return 0;
out:
@@ -4990,9 +4912,9 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
do {
int_status = readl(&ioc->chip->HostInterruptStatus);
if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count, timeout));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+ __func__, count, timeout));
return 0;
}
@@ -5000,9 +4922,8 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
count++;
} while (--cntdn);
- pr_err(MPT3SAS_FMT
- "%s: failed due to timeout count(%d), int_status(%x)!\n",
- ioc->name, __func__, count, int_status);
+ ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ __func__, count, int_status);
return -EFAULT;
}
@@ -5017,9 +4938,9 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
do {
int_status = readl(&ioc->chip->HostInterruptStatus);
if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count, timeout));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+ __func__, count, timeout));
return 0;
}
@@ -5027,9 +4948,8 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
count++;
} while (--cntdn);
- pr_err(MPT3SAS_FMT
- "%s: failed due to timeout count(%d), int_status(%x)!\n",
- ioc->name, __func__, count, int_status);
+ ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ __func__, count, int_status);
return -EFAULT;
}
@@ -5056,9 +4976,9 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
do {
int_status = readl(&ioc->chip->HostInterruptStatus);
if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count, timeout));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+ __func__, count, timeout));
return 0;
} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
doorbell = readl(&ioc->chip->Doorbell);
@@ -5075,9 +4995,8 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
} while (--cntdn);
out:
- pr_err(MPT3SAS_FMT
- "%s: failed due to timeout count(%d), int_status(%x)!\n",
- ioc->name, __func__, count, int_status);
+ ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ __func__, count, int_status);
return -EFAULT;
}
@@ -5099,9 +5018,9 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
do {
doorbell_reg = readl(&ioc->chip->Doorbell);
if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count, timeout));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+ __func__, count, timeout));
return 0;
}
@@ -5109,9 +5028,8 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
count++;
} while (--cntdn);
- pr_err(MPT3SAS_FMT
- "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
- ioc->name, __func__, count, doorbell_reg);
+ ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
+ __func__, count, doorbell_reg);
return -EFAULT;
}
@@ -5130,8 +5048,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
int r = 0;
if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
- pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: unknown reset_type\n", __func__);
return -EFAULT;
}
@@ -5139,7 +5056,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
return -EFAULT;
- pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
+ ioc_info(ioc, "sending message unit reset !!\n");
writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
&ioc->chip->Doorbell);
@@ -5149,15 +5066,14 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
}
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
if (ioc_state) {
- pr_err(MPT3SAS_FMT
- "%s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
+ ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+ __func__, ioc_state);
r = -EFAULT;
goto out;
}
out:
- pr_info(MPT3SAS_FMT "message unit reset: %s\n",
- ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+ ioc_info(ioc, "message unit reset: %s\n",
+ r == 0 ? "SUCCESS" : "FAILED");
return r;
}
@@ -5183,9 +5099,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
/* make sure doorbell is not in use */
if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
- pr_err(MPT3SAS_FMT
- "doorbell is in use (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
return -EFAULT;
}
@@ -5200,17 +5114,15 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
&ioc->chip->Doorbell);
if ((_base_spin_on_doorbell_int(ioc, 5))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_ack(ioc, 5))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake ack failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
@@ -5222,17 +5134,15 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
if (failed) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake sending request failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
/* now wait for the reply */
if ((_base_wait_for_doorbell_int(ioc, timeout))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
@@ -5241,9 +5151,8 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
@@ -5252,9 +5161,8 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
for (i = 2; i < default_reply->MsgLength * 2; i++) {
if ((_base_wait_for_doorbell_int(ioc, 5))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
if (i >= reply_bytes/2) /* overflow case */
@@ -5267,8 +5175,9 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
_base_wait_for_doorbell_int(ioc, 5);
if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
+ dhsprintk(ioc,
+ ioc_info(ioc, "doorbell is in use (line=%d)\n",
+ __LINE__));
}
writel(0, &ioc->chip->HostInterruptStatus);
@@ -5308,14 +5217,12 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
void *request;
u16 wait_state_count;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
mutex_lock(&ioc->base_cmds.mutex);
if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: base_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5324,23 +5231,20 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5408,14 +5312,12 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
void *request;
u16 wait_state_count;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
mutex_lock(&ioc->base_cmds.mutex);
if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: base_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5424,24 +5326,20 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name,
- __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5495,8 +5393,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
struct mpt3sas_port_facts *pfacts;
int mpi_reply_sz, mpi_request_sz, r;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
@@ -5507,8 +5404,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
(u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
if (r != 0) {
- pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
+ ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
return r;
}
@@ -5536,26 +5432,26 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
u32 ioc_state;
int rc;
- dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->pci_error_recovery) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "%s: host in pci error recovery\n", ioc->name, __func__));
+ dfailprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery\n",
+ __func__));
return -EFAULT;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
- dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
- ioc->name, __func__, ioc_state));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
+ __func__, ioc_state));
if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
(ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
return 0;
if (ioc_state & MPI2_DOORBELL_USED) {
- dhsprintk(ioc, printk(MPT3SAS_FMT
- "unexpected doorbell active!\n", ioc->name));
+ dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
goto issue_diag_reset;
}
@@ -5567,9 +5463,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
if (ioc_state) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "%s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state));
+ dfailprintk(ioc,
+ ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+ __func__, ioc_state));
return -EFAULT;
}
@@ -5592,14 +5488,13 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
struct mpt3sas_facts *facts;
int mpi_reply_sz, mpi_request_sz, r;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
r = _base_wait_for_iocstate(ioc, 10);
if (r) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "%s: failed getting to correct state\n",
- ioc->name, __func__));
+ dfailprintk(ioc,
+ ioc_info(ioc, "%s: failed getting to correct state\n",
+ __func__));
return r;
}
mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
@@ -5610,8 +5505,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
(u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
if (r != 0) {
- pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
+ ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
return r;
}
@@ -5663,20 +5557,20 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
*/
ioc->page_size = 1 << facts->CurrentHostPageSize;
if (ioc->page_size == 1) {
- pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
- "default host page size to 4k\n", ioc->name);
+ ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
- ioc->name, facts->CurrentHostPageSize));
-
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "hba queue depth(%d), max chains per io(%d)\n",
- ioc->name, facts->RequestCredit,
- facts->MaxChainDepth));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "request frame size(%d), reply frame size(%d)\n", ioc->name,
- facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
+ dinitprintk(ioc,
+ ioc_info(ioc, "CurrentHostPageSize(%d)\n",
+ facts->CurrentHostPageSize));
+
+ dinitprintk(ioc,
+ ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
+ facts->RequestCredit, facts->MaxChainDepth));
+ dinitprintk(ioc,
+ ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
+ facts->IOCRequestFrameSize * 4,
+ facts->ReplyFrameSize * 4));
return 0;
}
@@ -5696,8 +5590,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
u16 ioc_status;
u32 reply_post_free_array_sz = 0;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
@@ -5763,15 +5656,14 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
if (r != 0) {
- pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
+ ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
return r;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
mpi_reply.IOCLogInfo) {
- pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
+ ioc_err(ioc, "%s: failed\n", __func__);
r = -EIO;
}
@@ -5842,18 +5734,16 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
u16 smid;
u16 ioc_status;
- pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+ ioc_info(ioc, "sending port enable !!\n");
if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
- pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: internal command already in use\n", __func__);
return -EAGAIN;
}
smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return -EAGAIN;
}
@@ -5867,8 +5757,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2PortEnableRequest_t)/4);
if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
@@ -5881,16 +5770,15 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
mpi_reply = ioc->port_enable_cmds.reply;
ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
- ioc->name, __func__, ioc_status);
+ ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
+ __func__, ioc_status);
r = -EFAULT;
goto out;
}
out:
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
- pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
- "SUCCESS" : "FAILED"));
+ ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
return r;
}
@@ -5906,18 +5794,16 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
Mpi2PortEnableRequest_t *mpi_request;
u16 smid;
- pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+ ioc_info(ioc, "sending port enable !!\n");
if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
- pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: internal command already in use\n", __func__);
return -EAGAIN;
}
smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return -EAGAIN;
}
@@ -6020,19 +5906,16 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
int r = 0;
int i;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
- pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: internal command already in use\n", __func__);
return -EAGAIN;
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return -EAGAIN;
}
ioc->base_cmds.status = MPT3_CMD_PENDING;
@@ -6049,8 +5932,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2EventNotificationRequest_t)/4);
if (ioc->base_cmds.status & MPT3_CMD_RESET)
@@ -6058,8 +5940,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
else
r = -ETIME;
} else
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
- ioc->name, __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
return r;
}
@@ -6115,18 +5996,16 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
u32 count;
u32 hcb_size;
- pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
+ ioc_info(ioc, "sending diag reset !!\n");
- drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
- ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
count = 0;
do {
/* Write magic sequence to WriteSequence register
* Loop until in diagnostic mode
*/
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "write magic sequence\n", ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
@@ -6142,16 +6021,15 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
goto out;
host_diagnostic = readl(&ioc->chip->HostDiagnostic);
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
- ioc->name, count, host_diagnostic));
+ drsprintk(ioc,
+ ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+ count, host_diagnostic));
} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
hcb_size = readl(&ioc->chip->HCBSize);
- drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
- ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
&ioc->chip->HostDiagnostic);
@@ -6174,43 +6052,38 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "restart the adapter assuming the HCB Address points to good F/W\n",
- ioc->name));
+ drsprintk(ioc,
+ ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
writel(host_diagnostic, &ioc->chip->HostDiagnostic);
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "re-enable the HCDW\n", ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
&ioc->chip->HCBSize);
}
- drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
- ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
&ioc->chip->HostDiagnostic);
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "disable writes to the diagnostic register\n", ioc->name));
+ drsprintk(ioc,
+ ioc_info(ioc, "disable writes to the diagnostic register\n"));
writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "Wait for FW to go to the READY state\n", ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
if (ioc_state) {
- pr_err(MPT3SAS_FMT
- "%s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
+ ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+ __func__, ioc_state);
goto out;
}
- pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
+ ioc_info(ioc, "diag reset: SUCCESS\n");
return 0;
out:
- pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
+ ioc_err(ioc, "diag reset: FAILED\n");
return -EFAULT;
}
@@ -6228,15 +6101,15 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
int rc;
int count;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->pci_error_recovery)
return 0;
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
- dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
- ioc->name, __func__, ioc_state));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
+ __func__, ioc_state));
/* if in RESET state, it should move to READY state shortly */
count = 0;
@@ -6244,9 +6117,8 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
while ((ioc_state & MPI2_IOC_STATE_MASK) !=
MPI2_IOC_STATE_READY) {
if (count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
+ ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+ __func__, ioc_state);
return -EFAULT;
}
ssleep(1);
@@ -6258,9 +6130,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
return 0;
if (ioc_state & MPI2_DOORBELL_USED) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "unexpected doorbell active!\n",
- ioc->name));
+ dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
goto issue_diag_reset;
}
@@ -6304,8 +6174,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
struct adapter_reply_queue *reply_q;
Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
/* clean the delayed target reset list */
list_for_each_entry_safe(delayed_tr, delayed_tr_next,
@@ -6465,8 +6334,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
{
- dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
/* synchronizing freeing resource with pci_access_mutex lock */
mutex_lock(&ioc->pci_access_mutex);
@@ -6494,8 +6362,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
int r, i;
int cpu_id, last_cpu_id = 0;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
/* setup cpu_msix_table */
ioc->cpu_count = num_online_cpus();
@@ -6505,9 +6372,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
ioc->reply_queue_count = 1;
if (!ioc->cpu_msix_table) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT
- "allocation for cpu_msix_table failed!!!\n",
- ioc->name));
+ dfailprintk(ioc,
+ ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
r = -ENOMEM;
goto out_free_resources;
}
@@ -6516,9 +6382,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->reply_post_host_index) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
- "for reply_post_host_index failed!!!\n",
- ioc->name));
+ dfailprintk(ioc,
+ ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
r = -ENOMEM;
goto out_free_resources;
}
@@ -6747,8 +6612,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
{
- dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
mpt3sas_base_stop_watchdog(ioc);
mpt3sas_base_free_resources(ioc);
@@ -6781,8 +6645,7 @@ static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
mpt3sas_scsih_pre_reset_handler(ioc);
mpt3sas_ctl_pre_reset_handler(ioc);
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
}
/**
@@ -6793,8 +6656,7 @@ static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
mpt3sas_scsih_after_reset_handler(ioc);
mpt3sas_ctl_after_reset_handler(ioc);
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
ioc->transport_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
@@ -6835,8 +6697,7 @@ static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
{
mpt3sas_scsih_reset_done_handler(ioc);
mpt3sas_ctl_reset_done_handler(ioc);
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
}
/**
@@ -6883,12 +6744,10 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
u32 ioc_state;
u8 is_fault = 0, is_trigger = 0;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
if (ioc->pci_error_recovery) {
- pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
r = 0;
goto out_unlocked;
}
@@ -6942,8 +6801,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
_base_reset_done_handler(ioc);
out:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
- ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
+ dtmprintk(ioc,
+ ioc_info(ioc, "%s: %s\n",
+ __func__, r == 0 ? "SUCCESS" : "FAILED"));
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 0;
@@ -6959,7 +6819,6 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_trigger_master(ioc,
MASTER_TRIGGER_ADAPTER_RESET);
}
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
return r;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 96dc15e90bd8..8f1d6b071b39 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -158,7 +158,14 @@ struct mpt3sas_nvme_cmd {
/*
* logging format
*/
-#define MPT3SAS_FMT "%s: "
+#define ioc_err(ioc, fmt, ...) \
+ pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_notice(ioc, fmt, ...) \
+ pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_warn(ioc, fmt, ...) \
+ pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_info(ioc, fmt, ...) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
/*
* WarpDrive Specific Log codes
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index d29a2dcc7d0e..02209447f4ef 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -175,20 +175,18 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
if (!desc)
return;
- pr_info(MPT3SAS_FMT
- "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
- ioc->name, calling_function_name, desc,
- mpi_request->Header.PageNumber, mpi_request->Action,
- le32_to_cpu(mpi_request->PageAddress), smid);
+ ioc_info(ioc, "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
+ calling_function_name, desc,
+ mpi_request->Header.PageNumber, mpi_request->Action,
+ le32_to_cpu(mpi_request->PageAddress), smid);
if (!mpi_reply)
return;
if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
- pr_info(MPT3SAS_FMT
- "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
}
/**
@@ -210,9 +208,8 @@ _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
&mem->page_dma, GFP_KERNEL);
if (!mem->page) {
- pr_err(MPT3SAS_FMT
- "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
- ioc->name, __func__, mem->sz);
+ ioc_err(ioc, "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
+ __func__, mem->sz);
r = -ENOMEM;
}
} else { /* use tmp buffer if less than 512 bytes */
@@ -313,8 +310,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
mutex_lock(&ioc->config_cmds.mutex);
if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: config_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: config_cmd in use\n", __func__);
mutex_unlock(&ioc->config_cmds.mutex);
return -EAGAIN;
}
@@ -362,34 +358,30 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
r = -EFAULT;
goto free_mem;
}
- pr_info(MPT3SAS_FMT "%s: attempting retry (%d)\n",
- ioc->name, __func__, retry_count);
+ ioc_info(ioc, "%s: attempting retry (%d)\n",
+ __func__, retry_count);
}
wait_state_count = 0;
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
ioc->config_cmds.status = MPT3_CMD_NOT_USED;
r = -EFAULT;
goto free_mem;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ioc->config_cmds.status = MPT3_CMD_NOT_USED;
r = -EAGAIN;
goto free_mem;
@@ -429,12 +421,10 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
(mpi_reply->Header.PageType & 0xF)) {
_debug_dump_mf(mpi_request, ioc->request_sz/4);
_debug_dump_reply(mpi_reply, ioc->request_sz/4);
- panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
- " mpi_reply mismatch: Requested PageType(0x%02x)" \
- " Reply PageType(0x%02x)\n", \
- ioc->name, __func__,
- (mpi_request->Header.PageType & 0xF),
- (mpi_reply->Header.PageType & 0xF));
+ panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->Header.PageType & 0xF,
+ mpi_reply->Header.PageType & 0xF);
}
if (((mpi_request->Header.PageType & 0xF) ==
@@ -442,19 +432,18 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
mpi_request->ExtPageType != mpi_reply->ExtPageType) {
_debug_dump_mf(mpi_request, ioc->request_sz/4);
_debug_dump_reply(mpi_reply, ioc->request_sz/4);
- panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
- " mpi_reply mismatch: Requested ExtPageType(0x%02x)"
- " Reply ExtPageType(0x%02x)\n",
- ioc->name, __func__, mpi_request->ExtPageType,
- mpi_reply->ExtPageType);
+ panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->ExtPageType,
+ mpi_reply->ExtPageType);
}
ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
& MPI2_IOCSTATUS_MASK;
}
if (retry_count)
- pr_info(MPT3SAS_FMT "%s: retry (%d) completed!!\n", \
- ioc->name, __func__, retry_count);
+ ioc_info(ioc, "%s: retry (%d) completed!!\n",
+ __func__, retry_count);
if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
config_page && mpi_request->Action ==
@@ -469,14 +458,10 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
_debug_dump_reply(mpi_reply, ioc->request_sz/4);
_debug_dump_config(p, min_t(u16, mem.sz,
config_page_sz)/4);
- panic(KERN_WARNING MPT3SAS_FMT
- "%s: Firmware BUG:" \
- " config page mismatch:"
- " Requested PageType(0x%02x)"
- " Reply PageType(0x%02x)\n",
- ioc->name, __func__,
- (mpi_request->Header.PageType & 0xF),
- (p[3] & 0xF));
+ panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->Header.PageType & 0xF,
+ p[3] & 0xF);
}
if (((mpi_request->Header.PageType & 0xF) ==
@@ -486,13 +471,9 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
_debug_dump_reply(mpi_reply, ioc->request_sz/4);
_debug_dump_config(p, min_t(u16, mem.sz,
config_page_sz)/4);
- panic(KERN_WARNING MPT3SAS_FMT
- "%s: Firmware BUG:" \
- " config page mismatch:"
- " Requested ExtPageType(0x%02x)"
- " Reply ExtPageType(0x%02x)\n",
- ioc->name, __func__,
- mpi_request->ExtPageType, p[6]);
+ panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->ExtPageType, p[6]);
}
}
memcpy(config_page, mem.page, min_t(u16, mem.sz,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 5e8c059ce2c9..4afa597cbfba 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -185,17 +185,15 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
if (!desc)
return;
- pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
- ioc->name, calling_function_name, desc, smid);
+ ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid);
if (!mpi_reply)
return;
if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
- pr_info(MPT3SAS_FMT
- "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
mpi_request->Function ==
@@ -208,38 +206,32 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
sas_device = mpt3sas_get_sdev_by_handle(ioc,
le16_to_cpu(scsi_reply->DevHandle));
if (sas_device) {
- pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->sas_address, sas_device->phy);
- pr_warn(MPT3SAS_FMT
- "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot);
+ ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
+ (u64)sas_device->sas_address,
+ sas_device->phy);
+ ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ (u64)sas_device->enclosure_logical_id,
+ sas_device->slot);
sas_device_put(sas_device);
}
if (!sas_device) {
pcie_device = mpt3sas_get_pdev_by_handle(ioc,
le16_to_cpu(scsi_reply->DevHandle));
if (pcie_device) {
- pr_warn(MPT3SAS_FMT
- "\tWWID(0x%016llx), port(%d)\n", ioc->name,
- (unsigned long long)pcie_device->wwid,
- pcie_device->port_num);
+ ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n",
+ (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
if (pcie_device->enclosure_handle != 0)
- pr_warn(MPT3SAS_FMT
- "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
- ioc->name, (unsigned long long)
- pcie_device->enclosure_logical_id,
- pcie_device->slot);
+ ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
pcie_device_put(pcie_device);
}
}
if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
- pr_info(MPT3SAS_FMT
- "\tscsi_state(0x%02x), scsi_status"
- "(0x%02x)\n", ioc->name,
- scsi_reply->SCSIState,
- scsi_reply->SCSIStatus);
+ ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n",
+ scsi_reply->SCSIState,
+ scsi_reply->SCSIStatus);
}
}
@@ -466,8 +458,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
int i;
u8 issue_reset;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
if (!(ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_REGISTERED))
@@ -487,8 +478,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
*/
void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
ioc->ctl_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
@@ -506,8 +496,7 @@ void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
{
int i;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
if (!(ioc->diag_buffer_status[i] &
@@ -612,10 +601,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
}
if (!found) {
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), lun(%d), no active mid!!\n",
- ioc->name,
- desc, le16_to_cpu(tm_request->DevHandle), lun));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n",
+ desc, le16_to_cpu(tm_request->DevHandle),
+ lun));
tm_reply = ioc->ctl_cmds.reply;
tm_reply->DevHandle = tm_request->DevHandle;
tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -631,10 +620,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
return 1;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
- desc, le16_to_cpu(tm_request->DevHandle), lun,
- le16_to_cpu(tm_request->TaskMID)));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n",
+ desc, le16_to_cpu(tm_request->DevHandle), lun,
+ le16_to_cpu(tm_request->TaskMID)));
return 0;
}
@@ -672,8 +661,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
issue_reset = 0;
if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
ret = -EAGAIN;
goto out;
}
@@ -682,28 +670,23 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
ret = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name,
- __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
if (!mpi_request) {
- pr_err(MPT3SAS_FMT
- "%s: failed obtaining a memory for mpi_request\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n",
+ __func__);
ret = -ENOMEM;
goto out;
}
@@ -726,8 +709,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ret = -EAGAIN;
goto out;
}
@@ -762,8 +744,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
/* obtain dma-able memory for data transfer */
if (data_out_sz) /* WRITE */ {
- data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
- &data_out_dma);
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
+ &data_out_dma, GFP_KERNEL);
if (!data_out) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -782,8 +764,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
if (data_in_sz) /* READ */ {
- data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
- &data_in_dma);
+ data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
+ &data_in_dma, GFP_KERNEL);
if (!data_in) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -823,9 +805,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
data_out_dma, data_out_sz, data_in_dma, data_in_sz);
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :"
- "ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
+ dtmprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n",
+ device_handle));
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
@@ -843,9 +825,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
mpt3sas_base_get_sense_buffer_dma(ioc, smid);
memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "handle(0x%04x) :ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
+ dtmprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ device_handle));
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
@@ -863,10 +845,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
Mpi2SCSITaskManagementRequest_t *tm_request =
(Mpi2SCSITaskManagementRequest_t *)request;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
- ioc->name,
- le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+ dtmprintk(ioc,
+ ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
+ le16_to_cpu(tm_request->DevHandle),
+ tm_request->TaskType));
ioc->got_task_abort_from_ioctl = 1;
if (tm_request->TaskType ==
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
@@ -881,9 +863,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->got_task_abort_from_ioctl = 0;
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "handle(0x%04x) :ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
+ dtmprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ device_handle));
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
@@ -929,9 +911,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
case MPI2_FUNCTION_SATA_PASSTHROUGH:
{
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "handle(0x%04x) :ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
+ dtmprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ device_handle));
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
@@ -1017,12 +999,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
Mpi2SCSITaskManagementReply_t *tm_reply =
(Mpi2SCSITaskManagementReply_t *)mpi_reply;
- pr_info(MPT3SAS_FMT "TASK_MGMT: " \
- "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
- "TerminationCount(0x%08x)\n", ioc->name,
- le16_to_cpu(tm_reply->IOCStatus),
- le32_to_cpu(tm_reply->IOCLogInfo),
- le32_to_cpu(tm_reply->TerminationCount));
+ ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n",
+ le16_to_cpu(tm_reply->IOCStatus),
+ le32_to_cpu(tm_reply->IOCLogInfo),
+ le32_to_cpu(tm_reply->TerminationCount));
}
/* copy out xdata to user */
@@ -1054,9 +1034,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
MPI2_FUNCTION_NVME_ENCAPSULATED)) {
if (karg.sense_data_ptr == NULL) {
- pr_info(MPT3SAS_FMT "Response buffer provided"
- " by application is NULL; Response data will"
- " not be returned.\n", ioc->name);
+ ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n");
goto out;
}
sz_arg = (mpi_request->Function ==
@@ -1079,9 +1057,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
mpi_request->Function ==
MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
- pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
- ioc->name,
- le16_to_cpu(mpi_request->FunctionDependent1));
+ ioc_info(ioc, "issue target reset: handle = (0x%04x)\n",
+ le16_to_cpu(mpi_request->FunctionDependent1));
mpt3sas_halt_firmware(ioc);
pcie_device = mpt3sas_get_pdev_by_handle(ioc,
le16_to_cpu(mpi_request->FunctionDependent1));
@@ -1106,11 +1083,11 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
/* free memory associated with sg buffers */
if (data_in)
- pci_free_consistent(ioc->pdev, data_in_sz, data_in,
+ dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
data_in_dma);
if (data_out)
- pci_free_consistent(ioc->pdev, data_out_sz, data_out,
+ dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
data_out_dma);
kfree(mpi_request);
@@ -1128,8 +1105,8 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt3_ioctl_iocinfo karg;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
memset(&karg, 0 , sizeof(karg));
if (ioc->pfacts)
@@ -1188,8 +1165,8 @@ _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
memcpy(karg.event_types, ioc->event_type,
@@ -1219,8 +1196,8 @@ _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
memcpy(ioc->event_type, karg.event_types,
MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
@@ -1259,8 +1236,8 @@ _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
number_bytes = karg.hdr.max_data_size -
sizeof(struct mpt3_ioctl_header);
@@ -1306,12 +1283,11 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
ioc->is_driver_loading)
return -EAGAIN;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- pr_info(MPT3SAS_FMT "host reset: %s\n",
- ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
+ ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
return 0;
}
@@ -1440,8 +1416,8 @@ _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
rc = _ctl_btdh_search_sas_device(ioc, &karg);
if (!rc)
@@ -1512,53 +1488,46 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
u32 ioc_state;
u8 issue_reset = 0;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EAGAIN;
goto out;
}
if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
buffer_type = diag_register->buffer_type;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if (ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) {
- pr_err(MPT3SAS_FMT
- "%s: already has a registered buffer for buffer_type(0x%02x)\n",
- ioc->name, __func__,
- buffer_type);
+ ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EINVAL;
}
if (diag_register->requested_buffer_size % 4) {
- pr_err(MPT3SAS_FMT
- "%s: the requested_buffer_size is not 4 byte aligned\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n",
+ __func__);
return -EINVAL;
}
smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1580,9 +1549,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (request_data) {
request_data_dma = ioc->diag_buffer_dma[buffer_type];
if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
- pci_free_consistent(ioc->pdev,
- ioc->diag_buffer_sz[buffer_type],
- request_data, request_data_dma);
+ dma_free_coherent(&ioc->pdev->dev,
+ ioc->diag_buffer_sz[buffer_type],
+ request_data, request_data_dma);
request_data = NULL;
}
}
@@ -1590,12 +1559,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (request_data == NULL) {
ioc->diag_buffer_sz[buffer_type] = 0;
ioc->diag_buffer_dma[buffer_type] = 0;
- request_data = pci_alloc_consistent(
- ioc->pdev, request_data_sz, &request_data_dma);
+ request_data = dma_alloc_coherent(&ioc->pdev->dev,
+ request_data_sz, &request_data_dma, GFP_KERNEL);
if (request_data == NULL) {
- pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
- " for diag buffers, requested size(%d)\n",
- ioc->name, __func__, request_data_sz);
+ ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
+ __func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid);
return -ENOMEM;
}
@@ -1612,11 +1580,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
- ioc->name, __func__, request_data,
- (unsigned long long)request_data_dma,
- le32_to_cpu(mpi_request->BufferLength)));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
+ __func__, request_data,
+ (unsigned long long)request_data_dma,
+ le32_to_cpu(mpi_request->BufferLength)));
for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
mpi_request->ProductSpecific[i] =
@@ -1637,8 +1605,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
/* process the completed Reply Message Frame */
if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
- pr_err(MPT3SAS_FMT "%s: no reply message\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: no reply message\n", __func__);
rc = -EFAULT;
goto out;
}
@@ -1649,13 +1616,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_REGISTERED;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
- ioc->name, __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
- pr_info(MPT3SAS_FMT
- "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
- ioc->name, __func__,
- ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
rc = -EFAULT;
}
@@ -1666,7 +1631,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
out:
if (rc && request_data)
- pci_free_consistent(ioc->pdev, request_data_sz,
+ dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma);
ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
@@ -1689,8 +1654,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
if (bits_to_register & 1) {
- pr_info(MPT3SAS_FMT "registering trace buffer support\n",
- ioc->name);
+ ioc_info(ioc, "registering trace buffer support\n");
ioc->diag_trigger_master.MasterData =
(MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
@@ -1701,8 +1665,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
}
if (bits_to_register & 2) {
- pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
- ioc->name);
+ ioc_info(ioc, "registering snapshot buffer support\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
/* register for 2MB buffers */
diag_register.requested_buffer_size = 2 * (1024 * 1024);
@@ -1711,8 +1674,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
}
if (bits_to_register & 4) {
- pr_info(MPT3SAS_FMT "registering extended buffer support\n",
- ioc->name);
+ ioc_info(ioc, "registering extended buffer support\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
/* register for 2MB buffers */
diag_register.requested_buffer_size = 2 * (1024 * 1024);
@@ -1768,51 +1730,46 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
buffer_type = karg.unique_id & 0x000000ff;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is not registered\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
return -EINVAL;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) has not been released\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n",
+ __func__, buffer_type);
return -EINVAL;
}
if (karg.unique_id != ioc->unique_id[buffer_type]) {
- pr_err(MPT3SAS_FMT
- "%s: unique_id(0x%08x) is not registered\n",
- ioc->name, __func__, karg.unique_id);
+ ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+ __func__, karg.unique_id);
return -EINVAL;
}
request_data = ioc->diag_buffer[buffer_type];
if (!request_data) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -ENOMEM;
}
request_data_sz = ioc->diag_buffer_sz[buffer_type];
request_data_dma = ioc->diag_buffer_dma[buffer_type];
- pci_free_consistent(ioc->pdev, request_data_sz,
- request_data, request_data_dma);
+ dma_free_coherent(&ioc->pdev->dev, request_data_sz,
+ request_data, request_data_dma);
ioc->diag_buffer[buffer_type] = NULL;
ioc->diag_buffer_status[buffer_type] = 0;
return 0;
@@ -1841,41 +1798,37 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
karg.application_flags = 0;
buffer_type = karg.buffer_type;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is not registered\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
return -EINVAL;
}
if (karg.unique_id & 0xffffff00) {
if (karg.unique_id != ioc->unique_id[buffer_type]) {
- pr_err(MPT3SAS_FMT
- "%s: unique_id(0x%08x) is not registered\n",
- ioc->name, __func__, karg.unique_id);
+ ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+ __func__, karg.unique_id);
return -EINVAL;
}
}
request_data = ioc->diag_buffer[buffer_type];
if (!request_data) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have buffer for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -ENOMEM;
}
@@ -1897,9 +1850,8 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
- pr_err(MPT3SAS_FMT
- "%s: unable to write mpt3_diag_query data @ %p\n",
- ioc->name, __func__, arg);
+ ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n",
+ __func__, arg);
return -EFAULT;
}
return 0;
@@ -1923,8 +1875,8 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
u32 ioc_state;
int rc;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
rc = 0;
*issue_reset = 0;
@@ -1935,24 +1887,22 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
MPT3_DIAG_BUFFER_IS_REGISTERED)
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_RELEASED;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: skipping due to FAULT state\n", ioc->name,
- __func__));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: skipping due to FAULT state\n",
+ __func__));
rc = -EAGAIN;
goto out;
}
if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1982,8 +1932,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
/* process the completed Reply Message Frame */
if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
- pr_err(MPT3SAS_FMT "%s: no reply message\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: no reply message\n", __func__);
rc = -EFAULT;
goto out;
}
@@ -1994,13 +1943,11 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_RELEASED;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
- ioc->name, __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
- pr_info(MPT3SAS_FMT
- "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
- ioc->name, __func__,
- ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
rc = -EFAULT;
}
@@ -2033,47 +1980,41 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
buffer_type = karg.unique_id & 0x000000ff;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is not registered\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
return -EINVAL;
}
if (karg.unique_id != ioc->unique_id[buffer_type]) {
- pr_err(MPT3SAS_FMT
- "%s: unique_id(0x%08x) is not registered\n",
- ioc->name, __func__, karg.unique_id);
+ ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+ __func__, karg.unique_id);
return -EINVAL;
}
if (ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_RELEASED) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is already released\n",
- ioc->name, __func__,
- buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
+ __func__, buffer_type);
return 0;
}
request_data = ioc->diag_buffer[buffer_type];
if (!request_data) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -ENOMEM;
}
@@ -2084,9 +2025,8 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_DIAG_BUFFER_IS_RELEASED;
ioc->diag_buffer_status[buffer_type] &=
~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) was released due to host reset\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n",
+ __func__, buffer_type);
return 0;
}
@@ -2124,38 +2064,34 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
buffer_type = karg.unique_id & 0x000000ff;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if (karg.unique_id != ioc->unique_id[buffer_type]) {
- pr_err(MPT3SAS_FMT
- "%s: unique_id(0x%08x) is not registered\n",
- ioc->name, __func__, karg.unique_id);
+ ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+ __func__, karg.unique_id);
return -EINVAL;
}
request_data = ioc->diag_buffer[buffer_type];
if (!request_data) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have buffer for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -ENOMEM;
}
request_size = ioc->diag_buffer_sz[buffer_type];
if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
- pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
- "or bytes_to_read are not 4 byte aligned\n", ioc->name,
- __func__);
+ ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n",
+ __func__);
return -EINVAL;
}
@@ -2163,10 +2099,10 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EINVAL;
diag_data = (void *)(request_data + karg.starting_offset);
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
- ioc->name, __func__,
- diag_data, karg.starting_offset, karg.bytes_to_read));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
+ __func__, diag_data, karg.starting_offset,
+ karg.bytes_to_read));
/* Truncate data on requests that are too large */
if ((diag_data + karg.bytes_to_read < diag_data) ||
@@ -2177,39 +2113,36 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
if (copy_to_user((void __user *)uarg->diagnostic_data,
diag_data, copy_size)) {
- pr_err(MPT3SAS_FMT
- "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
- ioc->name, __func__, diag_data);
+ ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
+ __func__, diag_data);
return -EFAULT;
}
if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
return 0;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: Reregister buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n",
+ __func__, buffer_type));
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is still registered\n",
- ioc->name, __func__, buffer_type));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n",
+ __func__, buffer_type));
return 0;
}
/* Get a free request frame and save the message context.
*/
if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -2247,8 +2180,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/* process the completed Reply Message Frame */
if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
- pr_err(MPT3SAS_FMT "%s: no reply message\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: no reply message\n", __func__);
rc = -EFAULT;
goto out;
}
@@ -2259,13 +2191,11 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_REGISTERED;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
- ioc->name, __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
- pr_info(MPT3SAS_FMT
- "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
- ioc->name, __func__,
- ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__, ioc_status,
+ le32_to_cpu(mpi_reply->IOCLogInfo));
rc = -EFAULT;
}
@@ -2450,8 +2380,9 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
ret = _ctl_diag_read_buffer(ioc, arg);
break;
default:
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
+ dctlprintk(ioc,
+ ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
+ cmd));
break;
}
@@ -2840,8 +2771,8 @@ _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
return -EINVAL;
ioc->logging_level = val;
- pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
- ioc->logging_level);
+ ioc_info(ioc, "logging_level=%08xh\n",
+ ioc->logging_level);
return strlen(buf);
}
static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
@@ -2877,8 +2808,8 @@ _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
return -EINVAL;
ioc->fwfault_debug = val;
- pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
- ioc->fwfault_debug);
+ ioc_info(ioc, "fwfault_debug=%d\n",
+ ioc->fwfault_debug);
return strlen(buf);
}
static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
@@ -2958,8 +2889,8 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
ssize_t rc = 0;
if (!ioc->is_warpdrive) {
- pr_err(MPT3SAS_FMT "%s: BRM attribute is only for"
- " warpdrive\n", ioc->name, __func__);
+ ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
+ __func__);
goto out;
}
/* pci_access_mutex lock acquired by sysfs show path */
@@ -2973,30 +2904,28 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
if (!io_unit_pg3) {
- pr_err(MPT3SAS_FMT "%s: failed allocating memory "
- "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
+ ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
+ __func__, sz);
goto out;
}
if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
0) {
- pr_err(MPT3SAS_FMT
- "%s: failed reading iounit_pg3\n", ioc->name,
- __func__);
+ ioc_err(ioc, "%s: failed reading iounit_pg3\n",
+ __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with "
- "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
+ ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
+ __func__, ioc_status);
goto out;
}
if (io_unit_pg3->GPIOCount < 25) {
- pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than "
- "25 entries, detected (%d) entries\n", ioc->name, __func__,
- io_unit_pg3->GPIOCount);
+ ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
+ __func__, io_unit_pg3->GPIOCount);
goto out;
}
@@ -3039,17 +2968,15 @@ _ctl_host_trace_buffer_size_show(struct device *cdev,
struct DIAG_BUFFER_START *request_data;
if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
- pr_err(MPT3SAS_FMT
- "%s: host_trace_buffer is not registered\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+ __func__);
return 0;
}
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: host_trace_buffer is not registered\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+ __func__);
return 0;
}
@@ -3089,17 +3016,15 @@ _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
u32 size;
if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
- pr_err(MPT3SAS_FMT
- "%s: host_trace_buffer is not registered\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+ __func__);
return 0;
}
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: host_trace_buffer is not registered\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+ __func__);
return 0;
}
@@ -3188,8 +3113,7 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
goto out;
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
- pr_info(MPT3SAS_FMT "posting host trace buffers\n",
- ioc->name);
+ ioc_info(ioc, "posting host trace buffers\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
diag_register.requested_buffer_size = (1024 * 1024);
diag_register.unique_id = 0x7075900;
@@ -3205,8 +3129,7 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_RELEASED))
goto out;
- pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
- ioc->name);
+ ioc_info(ioc, "releasing host trace buffer\n");
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
&issue_reset);
}
@@ -3658,8 +3581,10 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate)
if ((ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_RELEASED))
continue;
- pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
- ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
+ dma_free_coherent(&ioc->pdev->dev,
+ ioc->diag_buffer_sz[i],
+ ioc->diag_buffer[i],
+ ioc->diag_buffer_dma[i]);
ioc->diag_buffer[i] = NULL;
ioc->diag_buffer_status[i] = 0;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 53133cfd420f..03c52847ed07 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -418,8 +418,8 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
@@ -442,10 +442,8 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
return -ENXIO;
/* else error case */
- pr_err(MPT3SAS_FMT
- "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
- ioc->name, handle, ioc_status,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
return -EIO;
}
@@ -508,10 +506,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
(ioc->bios_pg2.ReqBootDeviceForm &
MPI2_BIOSPAGE2_FORM_MASK),
&ioc->bios_pg2.RequestedBootDevice)) {
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: req_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
+ dinitprintk(ioc,
+ ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
+ __func__, (u64)sas_address));
ioc->req_boot_device.device = device;
ioc->req_boot_device.channel = channel;
}
@@ -523,10 +520,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
(ioc->bios_pg2.ReqAltBootDeviceForm &
MPI2_BIOSPAGE2_FORM_MASK),
&ioc->bios_pg2.RequestedAltBootDevice)) {
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: req_alt_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
+ dinitprintk(ioc,
+ ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
+ __func__, (u64)sas_address));
ioc->req_alt_boot_device.device = device;
ioc->req_alt_boot_device.channel = channel;
}
@@ -538,10 +534,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
(ioc->bios_pg2.CurrentBootDeviceForm &
MPI2_BIOSPAGE2_FORM_MASK),
&ioc->bios_pg2.CurrentBootDevice)) {
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: current_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
+ dinitprintk(ioc,
+ ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
+ __func__, (u64)sas_address));
ioc->current_boot_device.device = device;
ioc->current_boot_device.channel = channel;
}
@@ -752,19 +747,16 @@ _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
sas_device->chassis_slot);
} else {
if (sas_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "enclosure logical id(0x%016llx), slot(%d) \n",
- ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
+ ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)sas_device->enclosure_logical_id,
+ sas_device->slot);
if (sas_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, sas_device->enclosure_level,
- sas_device->connector_name);
+ ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name);
if (sas_device->is_chassis_slot_valid)
- pr_info(MPT3SAS_FMT "chassis slot(0x%04x)\n",
- ioc->name, sas_device->chassis_slot);
+ ioc_info(ioc, "chassis slot(0x%04x)\n",
+ sas_device->chassis_slot);
}
}
@@ -784,10 +776,8 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
if (!sas_device)
return;
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, sas_device->handle,
- (unsigned long long) sas_device->sas_address);
+ ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ sas_device->handle, (u64)sas_device->sas_address);
_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
@@ -872,10 +862,10 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__, sas_device->handle,
+ (u64)sas_device->sas_address));
dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL));
@@ -923,10 +913,10 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__, sas_device->handle,
+ (u64)sas_device->sas_address));
dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL));
@@ -1073,21 +1063,16 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
if (!pcie_device)
return;
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, pcie_device->handle,
- (unsigned long long) pcie_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ pcie_device->handle, (u64)pcie_device->wwid);
if (pcie_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "removing enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot);
+ ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
if (pcie_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "removing enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, pcie_device->enclosure_level,
- pcie_device->connector_name);
+ ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
if (!list_empty(&pcie_device->list)) {
@@ -1146,20 +1131,21 @@ _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- pcie_device->handle, (unsigned long long)pcie_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure logical id(0x%016llx), slot( %d)\n",
- ioc->name, __func__,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__, pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
+ __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
pcie_device_get(pcie_device);
@@ -1191,20 +1177,21 @@ _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- pcie_device->handle, (unsigned long long)pcie_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure logical id(0x%016llx), slot( %d)\n",
- ioc->name, __func__,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__, pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
+ __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
pcie_device_get(pcie_device);
@@ -1304,9 +1291,10 @@ _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- raid_device->handle, (unsigned long long)raid_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ raid_device->handle, (u64)raid_device->wwid));
spin_lock_irqsave(&ioc->raid_device_lock, flags);
list_add_tail(&raid_device->list, &ioc->raid_device_list);
@@ -1857,16 +1845,16 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -1952,8 +1940,8 @@ scsih_get_resync(struct device *dev)
if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
percent_complete = 0;
goto out;
}
@@ -2006,8 +1994,8 @@ scsih_get_state(struct device *dev)
if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -2103,9 +2091,9 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
&num_pds)) || !num_pds) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
@@ -2114,17 +2102,17 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
sizeof(Mpi2RaidVol0PhysDisk_t));
vol_pg0 = kzalloc(sz, GFP_KERNEL);
if (!vol_pg0) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
kfree(vol_pg0);
return 1;
}
@@ -2215,16 +2203,16 @@ scsih_slave_configure(struct scsi_device *sdev)
raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (!raid_device) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
- __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
if (_scsih_get_volume_capabilities(ioc, raid_device)) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
- __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
@@ -2308,16 +2296,16 @@ scsih_slave_configure(struct scsi_device *sdev)
if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
if (mpt3sas_config_get_volume_handle(ioc, handle,
&volume_handle)) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
volume_handle, &volume_wwid)) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
}
@@ -2329,9 +2317,9 @@ scsih_slave_configure(struct scsi_device *sdev)
sas_device_priv_data->sas_target->sas_address);
if (!pcie_device) {
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
- __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
@@ -2377,9 +2365,9 @@ scsih_slave_configure(struct scsi_device *sdev)
sas_device_priv_data->sas_target->sas_address);
if (!sas_device) {
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
@@ -2515,8 +2503,7 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
desc = "unknown";
break;
}
- pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n",
- ioc->name, response_code, desc);
+ ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
}
/**
@@ -2640,22 +2627,19 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
lockdep_assert_held(&ioc->tm_cmds.mutex);
if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
- pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
return FAILED;
}
if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return FAILED;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
if (ioc_state & MPI2_DOORBELL_USED) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "unexpected doorbell active!\n", ioc->name));
+ dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
return (!rc) ? SUCCESS : FAILED;
}
@@ -2669,14 +2653,13 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return FAILED;
}
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
- ioc->name, handle, type, smid_task, timeout, tr_method));
+ dtmprintk(ioc,
+ ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
+ handle, type, smid_task, timeout, tr_method));
ioc->tm_cmds.status = MPT3_CMD_PENDING;
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->tm_cmds.smid = smid;
@@ -2709,11 +2692,11 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
mpi_reply = ioc->tm_cmds.reply;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \
- "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ dtmprintk(ioc,
+ ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
if (ioc->logging_level & MPT_DEBUG_TM) {
_scsih_response_code(ioc, mpi_reply->ResponseCode);
if (mpi_reply->IOCStatus)
@@ -3060,13 +3043,11 @@ scsih_host_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
int r, retval;
- pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n",
- ioc->name, scmd);
+ ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
scsi_print_command(scmd);
if (ioc->is_driver_loading || ioc->remove_host) {
- pr_info(MPT3SAS_FMT "Blocking the host reset\n",
- ioc->name);
+ ioc_info(ioc, "Blocking the host reset\n");
r = FAILED;
goto out;
}
@@ -3074,8 +3055,8 @@ scsih_host_reset(struct scsi_cmnd *scmd)
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
r = (retval < 0) ? FAILED : SUCCESS;
out:
- pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
- ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ ioc_info(ioc, "host reset: %s scmd(%p)\n",
+ r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
return r;
}
@@ -3567,18 +3548,16 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
u8 tr_method = 0;
if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host in pci error recovery: handle(0x%04x)\n",
- __func__, ioc->name,
- handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
+ __func__, handle));
return;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host is not operational: handle(0x%04x)\n",
- __func__, ioc->name,
- handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
+ __func__, handle));
return;
}
@@ -3614,39 +3593,31 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
}
if (sas_target_priv_data) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle,
- (unsigned long long)sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
+ handle, (u64)sas_address));
if (sas_device) {
if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag:enclosure logical "
- "id(0x%016llx), slot(%d)\n", ioc->name,
- (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)sas_device->enclosure_logical_id,
+ sas_device->slot));
if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: enclosure "
- "level(0x%04x), connector name( %s)\n",
- ioc->name, sas_device->enclosure_level,
- sas_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name));
} else if (pcie_device) {
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: logical "
- "id(0x%016llx), slot(%d)\n", ioc->name,
- (unsigned long long)
- pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag:, enclosure "
- "level(0x%04x), "
- "connector name( %s)\n", ioc->name,
- pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
}
_scsih_ublock_io_device(ioc, sas_address);
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
@@ -3660,16 +3631,15 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+ handle));
goto out;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid,
- ioc->tm_tr_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid, ioc->tm_tr_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -3717,39 +3687,39 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
struct _sc_list *delayed_sc;
if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host in pci error recovery\n", __func__,
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery\n",
+ __func__));
return 1;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host is not operational\n", __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host is not operational\n",
+ __func__));
return 1;
}
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
handle = le16_to_cpu(mpi_request_tm->DevHandle);
if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
- dewtprintk(ioc, pr_err(MPT3SAS_FMT
- "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
- ioc->name, handle,
- le16_to_cpu(mpi_reply->DevHandle), smid));
+ dewtprintk(ioc,
+ ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
return 0;
}
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
- "loginfo(0x%08x), completed(%d)\n", ioc->name,
- handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
if (!smid_sas_ctrl) {
@@ -3759,16 +3729,15 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
INIT_LIST_HEAD(&delayed_sc->list);
delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:sc:handle(0x%04x), (open)\n",
- ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
+ handle));
return _scsih_check_for_pending_tm(ioc, smid);
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid_sas_ctrl,
- ioc->tm_sas_control_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
@@ -3803,20 +3772,19 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (likely(mpi_reply)) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "sc_complete:handle(0x%04x), (open) "
- "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
if (le16_to_cpu(mpi_reply->IOCStatus) ==
MPI2_IOCSTATUS_SUCCESS) {
clear_bit(le16_to_cpu(mpi_reply->DevHandle),
ioc->device_remove_in_progress);
}
} else {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
}
return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
}
@@ -3839,9 +3807,9 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct _tr_list *delayed_tr;
if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host reset in progress!\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host reset in progress!\n",
+ __func__));
return;
}
@@ -3853,16 +3821,15 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+ handle));
return;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid,
- ioc->tm_tr_volume_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid, ioc->tm_tr_volume_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -3892,33 +3859,32 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host reset in progress!\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host reset in progress!\n",
+ __func__));
return 1;
}
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
handle = le16_to_cpu(mpi_request_tm->DevHandle);
if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
- dewtprintk(ioc, pr_err(MPT3SAS_FMT
- "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
- ioc->name, handle,
- le16_to_cpu(mpi_reply->DevHandle), smid));
+ dewtprintk(ioc,
+ ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ handle, le16_to_cpu(mpi_reply->DevHandle),
+ smid));
return 0;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
- "loginfo(0x%08x), completed(%d)\n", ioc->name,
- handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
return _scsih_check_for_pending_tm(ioc, smid);
}
@@ -3948,10 +3914,9 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
- ioc->name, le16_to_cpu(event), smid,
- ioc->base_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
+ le16_to_cpu(event), smid, ioc->base_cb_idx));
ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
@@ -3981,21 +3946,21 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
unsigned long flags;
if (ioc->remove_host) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host has been removed\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host has been removed\n",
+ __func__));
return;
} else if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host in pci error recovery\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery\n",
+ __func__));
return;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host is not operational\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host is not operational\n",
+ __func__));
return;
}
@@ -4007,10 +3972,9 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid,
- ioc->tm_sas_control_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid, ioc->tm_sas_control_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
@@ -4171,8 +4135,8 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
expander_handle) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting ignoring flag\n", ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting ignoring flag\n"));
fw_event->ignore = 1;
}
}
@@ -4243,9 +4207,8 @@ _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
switch_handle) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting ignoring flag for switch event\n",
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting ignoring flag for switch event\n"));
fw_event->ignore = 1;
}
}
@@ -4274,10 +4237,9 @@ _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_target_priv_data =
raid_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: handle(0x%04x), "
- "wwid(0x%016llx)\n", ioc->name, handle,
- (unsigned long long) raid_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
+ handle, (u64)raid_device->wwid));
}
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
@@ -4379,9 +4341,9 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
- handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+ handle));
} else
_scsih_tm_tr_send(ioc, handle);
}
@@ -4424,15 +4386,14 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
Mpi2EventDataTemperature_t *event_data)
{
if (ioc->temp_sensors_count >= event_data->SensorNum) {
- pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s"
- " exceeded for Sensor: %d !!!\n", ioc->name,
- ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
- ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
- ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
- ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
- event_data->SensorNum);
- pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n",
- ioc->name, event_data->CurrentTemperature);
+ ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
+ le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
+ le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
+ le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
+ le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
+ event_data->SensorNum);
+ ioc_err(ioc, "Current Temp In Celsius: %d\n",
+ event_data->CurrentTemperature);
}
}
@@ -4480,8 +4441,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
scmd->result = DID_RESET << 16;
scmd->scsi_done(scmd);
}
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n",
- ioc->name, count));
+ dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
}
/**
@@ -4680,8 +4640,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
_scsih_set_satl_pending(scmd, false);
goto out;
}
@@ -4919,37 +4878,28 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
scsi_print_command(scmd);
if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
- pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
- device_str, (unsigned long long)priv_target->sas_address);
+ ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
+ device_str, (u64)priv_target->sas_address);
} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
if (pcie_device) {
- pr_info(MPT3SAS_FMT "\twwid(0x%016llx), port(%d)\n",
- ioc->name,
- (unsigned long long)pcie_device->wwid,
- pcie_device->port_num);
+ ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
+ (u64)pcie_device->wwid, pcie_device->port_num);
if (pcie_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "\tenclosure logical id(0x%016llx), "
- "slot(%d)\n", ioc->name,
- (unsigned long long)
- pcie_device->enclosure_logical_id,
- pcie_device->slot);
+ ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
if (pcie_device->connector_name[0])
- pr_info(MPT3SAS_FMT
- "\tenclosure level(0x%04x),"
- "connector name( %s)\n",
- ioc->name, pcie_device->enclosure_level,
- pcie_device->connector_name);
+ ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
pcie_device_put(pcie_device);
}
} else {
sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
if (sas_device) {
- pr_warn(MPT3SAS_FMT
- "\tsas_address(0x%016llx), phy(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->sas_address, sas_device->phy);
+ ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
+ (u64)sas_device->sas_address, sas_device->phy);
_scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL);
@@ -4958,30 +4908,23 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
}
}
- pr_warn(MPT3SAS_FMT
- "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->DevHandle),
- desc_ioc_state, ioc_status, smid);
- pr_warn(MPT3SAS_FMT
- "\trequest_len(%d), underflow(%d), resid(%d)\n",
- ioc->name, scsi_bufflen(scmd), scmd->underflow,
- scsi_get_resid(scmd));
- pr_warn(MPT3SAS_FMT
- "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->TaskTag),
- le32_to_cpu(mpi_reply->TransferCount), scmd->result);
- pr_warn(MPT3SAS_FMT
- "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
- ioc->name, desc_scsi_status,
- scsi_status, desc_scsi_state, scsi_state);
+ ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
+ le16_to_cpu(mpi_reply->DevHandle),
+ desc_ioc_state, ioc_status, smid);
+ ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
+ scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
+ ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
+ le16_to_cpu(mpi_reply->TaskTag),
+ le32_to_cpu(mpi_reply->TransferCount), scmd->result);
+ ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
+ desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
struct sense_info data;
_scsih_normalize_sense(scmd->sense_buffer, &data);
- pr_warn(MPT3SAS_FMT
- "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
- ioc->name, data.skey,
- data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+ ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
+ data.skey, data.asc, data.ascq,
+ le32_to_cpu(mpi_reply->SenseCount));
}
if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
response_info = le32_to_cpu(mpi_reply->ResponseInfo);
@@ -5016,17 +4959,17 @@ _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
&mpi_request)) != 0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
sas_device->pfa_led_on = 1;
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
goto out;
}
out:
@@ -5056,16 +4999,16 @@ _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
&mpi_request)) != 0) {
- printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, printk(MPT3SAS_FMT
- "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
return;
}
}
@@ -5133,8 +5076,8 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
event_reply = kzalloc(sz, GFP_KERNEL);
if (!event_reply) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -5424,16 +5367,16 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
u16 attached_handle;
u8 link_rate;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "updating handles for sas_host(0x%016llx)\n",
- ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
+ dtmprintk(ioc,
+ ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
+ (u64)ioc->sas_hba.sas_address));
sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
* sizeof(Mpi2SasIOUnit0PhyData_t));
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -5483,15 +5426,15 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
if (!num_phys) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc->sas_hba.phy = kcalloc(num_phys,
sizeof(struct _sas_phy), GFP_KERNEL);
if (!ioc->sas_hba.phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc->sas_hba.num_phys = num_phys;
@@ -5501,21 +5444,21 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
sizeof(Mpi2SasIOUnit0PhyData_t));
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
sas_iounit_pg0, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -5524,21 +5467,21 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -5557,15 +5500,15 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
i))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -5579,18 +5522,17 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
}
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc->sas_hba.enclosure_handle =
le16_to_cpu(sas_device_pg0.EnclosureHandle);
ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
- pr_info(MPT3SAS_FMT
- "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
- ioc->name, ioc->sas_hba.handle,
- (unsigned long long) ioc->sas_hba.sas_address,
- ioc->sas_hba.num_phys) ;
+ ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ ioc->sas_hba.handle,
+ (u64)ioc->sas_hba.sas_address,
+ ioc->sas_hba.num_phys);
if (ioc->sas_hba.enclosure_handle) {
if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -5639,16 +5581,16 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
@@ -5656,8 +5598,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
!= 0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
if (sas_address_parent != ioc->sas_hba.sas_address) {
@@ -5684,8 +5626,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_expander = kzalloc(sizeof(struct _sas_node),
GFP_KERNEL);
if (!sas_expander) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
@@ -5694,18 +5636,17 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_expander->sas_address_parent = sas_address_parent;
sas_expander->sas_address = sas_address;
- pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \
- " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
- handle, parent_handle, (unsigned long long)
- sas_expander->sas_address, sas_expander->num_phys);
+ ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ handle, parent_handle,
+ (u64)sas_expander->sas_address, sas_expander->num_phys);
if (!sas_expander->num_phys)
goto out_fail;
sas_expander->phy = kcalloc(sas_expander->num_phys,
sizeof(struct _sas_phy), GFP_KERNEL);
if (!sas_expander->phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
@@ -5714,8 +5655,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
sas_address_parent);
if (!mpt3sas_port) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
@@ -5724,8 +5665,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
for (i = 0 ; i < sas_expander->num_phys ; i++) {
if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
&expander_pg1, i, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
@@ -5735,8 +5676,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if ((mpt3sas_transport_add_expander_phy(ioc,
&sas_expander->phy[i], expander_pg1,
sas_expander->parent_dev))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
@@ -5883,9 +5824,8 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
if (!rc)
return 0;
- pr_err(MPT3SAS_FMT
- "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
- ioc->name, desc, (unsigned long long)sas_address, handle);
+ ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
+ desc, (u64)sas_address, handle);
return rc;
}
@@ -5979,9 +5919,8 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
/* check if device is present */
if (!(le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
- pr_err(MPT3SAS_FMT
- "device is not present handle(0x%04x), flags!!!\n",
- ioc->name, handle);
+ ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
+ handle);
goto out_unlock;
}
@@ -6028,16 +5967,16 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
@@ -6051,8 +5990,8 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
/* check if device is present */
if (!(le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
- pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n",
- ioc->name, handle);
+ ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
+ handle);
return -1;
}
@@ -6074,16 +6013,15 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
mpt3sas_scsih_enclosure_find_by_handle(ioc,
le16_to_cpu(sas_device_pg0.EnclosureHandle));
if (enclosure_dev == NULL)
- pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
- "doesn't match with enclosure device!\n",
- ioc->name, sas_device_pg0.EnclosureHandle);
+ ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
+ sas_device_pg0.EnclosureHandle);
}
sas_device = kzalloc(sizeof(struct _sas_device),
GFP_KERNEL);
if (!sas_device) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
@@ -6092,8 +6030,8 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
if (_scsih_get_sas_address(ioc,
le16_to_cpu(sas_device_pg0.ParentDevHandle),
&sas_device->sas_address_parent) != 0)
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_device->enclosure_handle =
le16_to_cpu(sas_device_pg0.EnclosureHandle);
if (sas_device->enclosure_handle != 0)
@@ -6158,11 +6096,10 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->pfa_led_on = 0;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__,
- sas_device->handle, (unsigned long long)
- sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__,
+ sas_device->handle, (u64)sas_device->sas_address));
dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL));
@@ -6180,18 +6117,15 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->sas_address,
sas_device->sas_address_parent);
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, sas_device->handle,
- (unsigned long long) sas_device->sas_address);
+ ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ sas_device->handle, (u64)sas_device->sas_address);
_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__,
- sas_device->handle, (unsigned long long)
- sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__,
+ sas_device->handle, (u64)sas_device->sas_address));
dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL));
}
@@ -6231,8 +6165,7 @@ _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
status_str = "unknown status";
break;
}
- pr_info(MPT3SAS_FMT "sas topology change: (%s)\n",
- ioc->name, status_str);
+ ioc_info(ioc, "sas topology change: (%s)\n", status_str);
pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
"start_phy(%02d), count(%d)\n",
le16_to_cpu(event_data->ExpanderDevHandle),
@@ -6309,8 +6242,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
_scsih_sas_host_refresh(ioc);
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "ignoring expander event\n", ioc->name));
+ dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
return 0;
}
@@ -6339,8 +6271,8 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
/* handle siblings events */
for (i = 0; i < event_data->NumEntries; i++) {
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "ignoring expander event\n", ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "ignoring expander event\n"));
return 0;
}
if (ioc->remove_host || ioc->pci_error_recovery)
@@ -6464,15 +6396,14 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
reason_str = "unknown reason";
break;
}
- pr_info(MPT3SAS_FMT "device status change: (%s)\n"
- "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
- ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- le16_to_cpu(event_data->TaskTag));
+ ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+ reason_str, le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->SASAddress),
+ le16_to_cpu(event_data->TaskTag));
if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
- pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
- event_data->ASC, event_data->ASCQ);
- pr_info("\n");
+ pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
+ event_data->ASC, event_data->ASCQ);
+ pr_cont("\n");
}
/**
@@ -6605,20 +6536,16 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
desc = "nvme failure status";
break;
default:
- pr_err(MPT3SAS_FMT
- " NVMe discovery error(0x%02x): wwid(0x%016llx),"
- "handle(0x%04x)\n", ioc->name, access_status,
- (unsigned long long)wwid, handle);
+ ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
+ access_status, (u64)wwid, handle);
return rc;
}
if (!rc)
return rc;
- pr_info(MPT3SAS_FMT
- "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
- ioc->name, desc,
- (unsigned long long)wwid, handle);
+ ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
+ desc, (u64)wwid, handle);
return rc;
}
@@ -6634,22 +6561,22 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
{
struct MPT3SAS_TARGET *sas_target_priv_data;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- pcie_device->handle, (unsigned long long)
- pcie_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name, __func__,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__,
- pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
+ __func__,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
if (pcie_device->starget && pcie_device->starget->hostdata) {
sas_target_priv_data = pcie_device->starget->hostdata;
@@ -6658,39 +6585,35 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
}
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), wwid (0x%016llx)\n",
- ioc->name, pcie_device->handle,
- (unsigned long long) pcie_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ pcie_device->handle, (u64)pcie_device->wwid);
if (pcie_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "removing : enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot);
+ ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
if (pcie_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "removing: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, pcie_device->enclosure_level,
- pcie_device->connector_name);
+ ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
if (pcie_device->starget)
scsi_remove_target(&pcie_device->starget->dev);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- pcie_device->handle, (unsigned long long)
- pcie_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name, __func__,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__, pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
+ __func__,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
kfree(pcie_device->serial_number);
}
@@ -6760,9 +6683,8 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* check if device is present */
if (!(le32_to_cpu(pcie_device_pg0.Flags) &
MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
- pr_info(MPT3SAS_FMT
- "device is not present handle(0x%04x), flags!!!\n",
- ioc->name, handle);
+ ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
+ handle);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
pcie_device_put(pcie_device);
return;
@@ -6806,16 +6728,15 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
@@ -6825,9 +6746,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* check if device is present */
if (!(le32_to_cpu(pcie_device_pg0.Flags) &
MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
- pr_err(MPT3SAS_FMT
- "device is not present handle(0x04%x)!!!\n",
- ioc->name, handle);
+ ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
+ handle);
return 0;
}
@@ -6848,8 +6768,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
if (!pcie_device) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
@@ -6890,16 +6810,16 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* TODO -- Add device name once FW supports it */
if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
&pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
kfree(pcie_device);
return 0;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
kfree(pcie_device);
return 0;
}
@@ -6956,8 +6876,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
status_str = "unknown status";
break;
}
- pr_info(MPT3SAS_FMT "pcie topology change: (%s)\n",
- ioc->name, status_str);
+ ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
"start_port(%02d), count(%d)\n",
le16_to_cpu(event_data->SwitchDevHandle),
@@ -7030,16 +6949,15 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
return;
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
- ioc->name));
+ dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
return;
}
/* handle siblings events */
for (i = 0; i < event_data->NumEntries; i++) {
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "ignoring switch event\n", ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "ignoring switch event\n"));
return;
}
if (ioc->remove_host || ioc->pci_error_recovery)
@@ -7084,9 +7002,9 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (!test_bit(handle, ioc->pend_os_device_add))
break;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "handle(0x%04x) device not found: convert "
- "event to a device add\n", ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
+ handle));
event_data->PortEntry[i].PortStatus &= 0xF0;
event_data->PortEntry[i].PortStatus |=
MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
@@ -7169,15 +7087,15 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
break;
}
- pr_info(MPT3SAS_FMT "PCIE device status change: (%s)\n"
- "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
- ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->WWID),
- le16_to_cpu(event_data->TaskTag));
+ ioc_info(ioc, "PCIE device status change: (%s)\n"
+ "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
+ reason_str, le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->WWID),
+ le16_to_cpu(event_data->TaskTag));
if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
- pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
+ pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
event_data->ASC, event_data->ASCQ);
- pr_info("\n");
+ pr_cont("\n");
}
/**
@@ -7255,12 +7173,12 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
break;
}
- pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n"
- "\thandle(0x%04x), enclosure logical id(0x%016llx)"
- " number slots(%d)\n", ioc->name, reason_str,
- le16_to_cpu(event_data->EnclosureHandle),
- (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
- le16_to_cpu(event_data->StartSlot));
+ ioc_info(ioc, "enclosure status change: (%s)\n"
+ "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
+ reason_str,
+ le16_to_cpu(event_data->EnclosureHandle),
+ (u64)le64_to_cpu(event_data->EnclosureLogicalID),
+ le16_to_cpu(event_data->StartSlot));
}
/**
@@ -7298,9 +7216,8 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
kzalloc(sizeof(struct _enclosure_node),
GFP_KERNEL);
if (!enclosure_dev) {
- pr_info(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_info(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -7358,10 +7275,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
u8 task_abort_retries;
mutex_lock(&ioc->tm_cmds.mutex);
- pr_info(MPT3SAS_FMT
- "%s: enter: phy number(%d), width(%d)\n",
- ioc->name, __func__, event_data->PhyNum,
- event_data->PortWidth);
+ ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
+ __func__, event_data->PhyNum, event_data->PortWidth);
_scsih_block_io_all_device(ioc);
@@ -7371,12 +7286,12 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
/* sanity checks for retrying this loop */
if (max_retries++ == 5) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n",
- ioc->name, __func__));
+ dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
goto out;
} else if (max_retries > 1)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n",
- ioc->name, __func__, max_retries - 1));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: %d retry\n",
+ __func__, max_retries - 1));
termination_count = 0;
query_count = 0;
@@ -7443,9 +7358,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
task_abort_retries = 0;
tm_retry:
if (task_abort_retries++ == 60) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: ABORT_TASK: giving up\n", ioc->name,
- __func__));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
+ __func__));
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
goto broadcast_aen_retry;
}
@@ -7474,9 +7389,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
}
if (ioc->broadcast_aen_pending) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: loop back due to pending AEN\n",
- ioc->name, __func__));
+ dewtprintk(ioc,
+ ioc_info(ioc,
+ "%s: loop back due to pending AEN\n",
+ __func__));
ioc->broadcast_aen_pending = 0;
goto broadcast_aen_retry;
}
@@ -7485,9 +7401,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
out_no_lock:
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - exit, query_count = %d termination_count = %d\n",
- ioc->name, __func__, query_count, termination_count));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
+ __func__, query_count, termination_count));
ioc->broadcast_aen_busy = 0;
if (!ioc->shost_recovery)
@@ -7509,13 +7425,13 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
- pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name,
- (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
- "start" : "stop");
+ ioc_info(ioc, "discovery event: (%s)",
+ event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
+ "start" : "stop");
if (event_data->DiscoveryStatus)
- pr_info("discovery_status(0x%08x)",
- le32_to_cpu(event_data->DiscoveryStatus));
- pr_info("\n");
+ pr_cont("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ pr_cont("\n");
}
if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
@@ -7545,20 +7461,16 @@ _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
switch (event_data->ReasonCode) {
case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
- pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
- "(handle:0x%04x, sas_address:0x%016llx,"
- "physical_port:0x%02x) has failed",
- ioc->name, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- event_data->PhysicalPort);
+ ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
+ le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->SASAddress),
+ event_data->PhysicalPort);
break;
case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
- pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
- "(handle:0x%04x, sas_address:0x%016llx,"
- "physical_port:0x%02x) has timed out",
- ioc->name, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- event_data->PhysicalPort);
+ ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
+ le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->SASAddress),
+ event_data->PhysicalPort);
break;
default:
break;
@@ -7581,11 +7493,10 @@ _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
return;
- pr_info(MPT3SAS_FMT "pcie enumeration event: (%s) Flag 0x%02x",
- ioc->name,
- (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
- "started" : "completed",
- event_data->Flags);
+ ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
+ (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
+ "started" : "completed",
+ event_data->Flags);
if (event_data->EnumerationStatus)
pr_cont("enumeration_status(0x%08x)",
le32_to_cpu(event_data->EnumerationStatus));
@@ -7617,8 +7528,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
mutex_lock(&ioc->scsih_cmds.mutex);
if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -7626,8 +7536,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
rc = -EAGAIN;
goto out;
@@ -7641,9 +7550,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
mpi_request->PhysDiskNum = phys_disk_num;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\
- "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name,
- handle, phys_disk_num));
+ dewtprintk(ioc,
+ ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
+ handle, phys_disk_num));
init_completion(&ioc->scsih_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
@@ -7668,15 +7577,13 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
log_info = 0;
ioc_status &= MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "IR RAID_ACTION: failed: ioc_status(0x%04x), "
- "loginfo(0x%08x)!!!\n", ioc->name, ioc_status,
- log_info));
+ dewtprintk(ioc,
+ ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
+ ioc_status, log_info));
rc = -EFAULT;
} else
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "IR RAID_ACTION: completed successfully\n",
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
}
out:
@@ -7721,9 +7628,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
if (!wwid) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -7736,9 +7642,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
if (!raid_device) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -7781,9 +7686,8 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_target_priv_data = starget->hostdata;
sas_target_priv_data->deleted = 1;
}
- pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, raid_device->handle,
- (unsigned long long) raid_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ raid_device->handle, (u64)raid_device->wwid);
list_del(&raid_device->list);
kfree(raid_device);
}
@@ -7925,16 +7829,16 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -7964,10 +7868,10 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
- pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n",
- ioc->name, (le32_to_cpu(event_data->Flags) &
- MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
- "foreign" : "native", event_data->NumElements);
+ ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
+ le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
+ "foreign" : "native",
+ event_data->NumElements);
for (i = 0; i < event_data->NumElements; i++, element++) {
switch (element->ReasonCode) {
case MPI2_EVENT_IR_CHANGE_RC_ADDED:
@@ -8123,10 +8027,11 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
handle = le16_to_cpu(event_data->VolDevHandle);
state = le32_to_cpu(event_data->NewValue);
if (!ioc->hide_ir_msg)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
- ioc->name, __func__, handle,
- le32_to_cpu(event_data->PreviousValue), state));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ __func__, handle,
+ le32_to_cpu(event_data->PreviousValue),
+ state));
switch (state) {
case MPI2_RAID_VOL_STATE_MISSING:
case MPI2_RAID_VOL_STATE_FAILED:
@@ -8146,17 +8051,15 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
if (!wwid) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
break;
}
raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
if (!raid_device) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
break;
}
@@ -8207,10 +8110,11 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
state = le32_to_cpu(event_data->NewValue);
if (!ioc->hide_ir_msg)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
- ioc->name, __func__, handle,
- le32_to_cpu(event_data->PreviousValue), state));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ __func__, handle,
+ le32_to_cpu(event_data->PreviousValue),
+ state));
switch (state) {
case MPI2_RAID_PD_STATE_ONLINE:
@@ -8231,16 +8135,16 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
&sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -8294,11 +8198,10 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
if (!reason_str)
return;
- pr_info(MPT3SAS_FMT "raid operational status: (%s)" \
- "\thandle(0x%04x), percent complete(%d)\n",
- ioc->name, reason_str,
- le16_to_cpu(event_data->VolDevHandle),
- event_data->PercentComplete);
+ ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
+ reason_str,
+ le16_to_cpu(event_data->VolDevHandle),
+ event_data->PercentComplete);
}
/**
@@ -8379,9 +8282,8 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
mpt3sas_scsih_enclosure_find_by_handle(ioc,
le16_to_cpu(sas_device_pg0->EnclosureHandle));
if (enclosure_dev == NULL)
- pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
- "doesn't match with enclosure device!\n",
- ioc->name, sas_device_pg0->EnclosureHandle);
+ ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
+ sas_device_pg0->EnclosureHandle);
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
@@ -8475,8 +8377,7 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
enclosure_dev =
kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
if (!enclosure_dev) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
return;
}
@@ -8513,7 +8414,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
u16 handle;
u32 device_info;
- pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+ ioc_info(ioc, "search for end-devices: start\n");
if (list_empty(&ioc->sas_device_list))
goto out;
@@ -8534,8 +8435,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
}
out:
- pr_info(MPT3SAS_FMT "search for end-devices: complete\n",
- ioc->name);
+ ioc_info(ioc, "search for end-devices: complete\n");
}
/**
@@ -8628,7 +8528,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
u16 handle;
u32 device_info;
- pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+ ioc_info(ioc, "search for end-devices: start\n");
if (list_empty(&ioc->pcie_device_list))
goto out;
@@ -8640,10 +8540,9 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from %s: "
- "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name,
- __func__, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ __func__, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(pcie_device_pg0.DevHandle);
@@ -8653,8 +8552,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
}
out:
- pr_info(MPT3SAS_FMT "search for PCIe end-devices: complete\n",
- ioc->name);
+ ioc_info(ioc, "search for PCIe end-devices: complete\n");
}
/**
@@ -8735,8 +8633,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->ir_firmware)
return;
- pr_info(MPT3SAS_FMT "search for raid volumes: start\n",
- ioc->name);
+ ioc_info(ioc, "search for raid volumes: start\n");
if (list_empty(&ioc->raid_device_list))
goto out;
@@ -8779,8 +8676,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
}
}
out:
- pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n",
- ioc->name);
+ ioc_info(ioc, "search for responding raid volumes: complete\n");
}
/**
@@ -8852,7 +8748,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
u64 sas_address;
u16 handle;
- pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name);
+ ioc_info(ioc, "search for expanders: start\n");
if (list_empty(&ioc->sas_expander_list))
goto out;
@@ -8875,7 +8771,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
}
out:
- pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name);
+ ioc_info(ioc, "search for expanders: complete\n");
}
/**
@@ -8893,12 +8789,10 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
unsigned long flags;
LIST_HEAD(head);
- pr_info(MPT3SAS_FMT "removing unresponding devices: start\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: start\n");
/* removing unresponding end devices */
- pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: end-devices\n");
/*
* Iterate, pulling off devices marked as non-responding. We become the
* owner for the reference the list had on any object we prune.
@@ -8922,9 +8816,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
sas_device_put(sas_device);
}
- pr_info(MPT3SAS_FMT
- " Removing unresponding devices: pcie end-devices\n"
- , ioc->name);
+ ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
INIT_LIST_HEAD(&head);
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
list_for_each_entry_safe(pcie_device, pcie_device_next,
@@ -8944,8 +8836,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
/* removing unresponding volumes */
if (ioc->ir_firmware) {
- pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: volumes\n");
list_for_each_entry_safe(raid_device, raid_device_next,
&ioc->raid_device_list, list) {
if (!raid_device->responding)
@@ -8957,8 +8848,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
}
/* removing unresponding expanders */
- pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: expanders\n");
spin_lock_irqsave(&ioc->sas_node_lock, flags);
INIT_LIST_HEAD(&tmp_list);
list_for_each_entry_safe(sas_expander, sas_expander_next,
@@ -8974,8 +8864,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
_scsih_expander_node_remove(ioc, sas_expander);
}
- pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: complete\n");
/* unblock devices */
_scsih_ublock_io_all_device(ioc);
@@ -8992,8 +8881,8 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
for (i = 0 ; i < sas_expander->num_phys ; i++) {
if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
&expander_pg1, i, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -9029,11 +8918,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
u8 retry_count;
unsigned long flags;
- pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name);
+ ioc_info(ioc, "scan devices: start\n");
_scsih_sas_host_refresh(ioc);
- pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name);
+ ioc_info(ioc, "\tscan devices: expanders start\n");
/* expanders */
handle = 0xFFFF;
@@ -9042,10 +8931,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(expander_pg0.DevHandle);
@@ -9057,25 +8944,22 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
_scsih_refresh_expander_links(ioc, expander_device,
handle);
else {
- pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(expander_pg0.SASAddress));
+ ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(expander_pg0.SASAddress));
_scsih_expander_add(ioc, handle);
- pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(expander_pg0.SASAddress));
+ ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(expander_pg0.SASAddress));
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: expanders complete\n");
if (!ioc->ir_firmware)
goto skip_to_sas;
- pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name);
+ ioc_info(ioc, "\tscan devices: phys disk start\n");
/* phys disk */
phys_disk_num = 0xFF;
@@ -9085,10 +8969,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
phys_disk_num = pd_pg0.PhysDiskNum;
@@ -9105,19 +8987,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle,
&sas_address)) {
- pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \
- " handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
mpt3sas_transport_update_links(ioc, sas_address,
handle, sas_device_pg0.PhyNum,
MPI2_SAS_NEG_LINK_RATE_1_5);
@@ -9131,17 +9010,15 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
1)) {
ssleep(1);
}
- pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \
- " handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: phys disk complete\n");
- pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name);
+ ioc_info(ioc, "\tscan devices: volumes start\n");
/* volumes */
handle = 0xFFFF;
@@ -9150,10 +9027,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(volume_pg1.DevHandle);
@@ -9170,10 +9045,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
@@ -9182,23 +9055,19 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
element.VolDevHandle = volume_pg1.DevHandle;
- pr_info(MPT3SAS_FMT
- "\tBEFORE adding volume: handle (0x%04x)\n",
- ioc->name, volume_pg1.DevHandle);
+ ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
+ volume_pg1.DevHandle);
_scsih_sas_volume_add(ioc, &element);
- pr_info(MPT3SAS_FMT
- "\tAFTER adding volume: handle (0x%04x)\n",
- ioc->name, volume_pg1.DevHandle);
+ ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
+ volume_pg1.DevHandle);
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: volumes complete\n");
skip_to_sas:
- pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: end devices start\n");
/* sas devices */
handle = 0xFFFF;
@@ -9208,10 +9077,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
- " ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(sas_device_pg0.DevHandle);
@@ -9226,10 +9093,9 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
}
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
- pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
mpt3sas_transport_update_links(ioc, sas_address, handle,
sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
retry_count = 0;
@@ -9241,16 +9107,13 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
0)) {
ssleep(1);
}
- pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
- ioc->name);
- pr_info(MPT3SAS_FMT "\tscan devices: pcie end devices start\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: end devices complete\n");
+ ioc_info(ioc, "\tscan devices: pcie end devices start\n");
/* pcie devices */
handle = 0xFFFF;
@@ -9260,10 +9123,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
& MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from pcie end device"
- " scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(pcie_device_pg0.DevHandle);
@@ -9280,14 +9141,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
_scsih_pcie_add_device(ioc, handle);
- pr_info(MPT3SAS_FMT "\tAFTER adding pcie end device: "
- "handle (0x%04x), wwid(0x%016llx)\n", ioc->name,
- handle,
- (unsigned long long) le64_to_cpu(pcie_device_pg0.WWID));
+ ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
+ handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
}
- pr_info(MPT3SAS_FMT "\tpcie devices: pcie end devices complete\n",
- ioc->name);
- pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
+ ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
+ ioc_info(ioc, "scan devices: complete\n");
}
/**
@@ -9298,8 +9156,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
*/
void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
}
/**
@@ -9311,8 +9168,7 @@ void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
ioc->scsih_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
@@ -9340,8 +9196,7 @@ mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
!ioc->sas_hba.num_phys)) {
_scsih_prep_device_scan(ioc);
@@ -9396,9 +9251,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
if (missing_delay[0] != -1 && missing_delay[1] != -1)
mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
missing_delay[1]);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "port enable: complete from worker thread\n",
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "port enable: complete from worker thread\n"));
break;
case MPT3SAS_TURN_ON_PFA_LED:
_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
@@ -9496,8 +9350,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
@@ -9564,30 +9418,16 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
switch (le32_to_cpu(*log_code)) {
case MPT2_WARPDRIVE_LC_SSDT:
- pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
- "IO Throttling has occurred in the WarpDrive "
- "subsystem. Check WarpDrive documentation for "
- "additional details.\n", ioc->name);
+ ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
break;
case MPT2_WARPDRIVE_LC_SSDLW:
- pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
- "Program/Erase Cycles for the WarpDrive subsystem "
- "in degraded range. Check WarpDrive documentation "
- "for additional details.\n", ioc->name);
+ ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
break;
case MPT2_WARPDRIVE_LC_SSDLF:
- pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
- "There are no Program/Erase Cycles for the "
- "WarpDrive subsystem. The storage device will be "
- "in read-only mode. Check WarpDrive documentation "
- "for additional details.\n", ioc->name);
+ ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
break;
case MPT2_WARPDRIVE_LC_BRMF:
- pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
- "The Backup Rail Monitor has failed on the "
- "WarpDrive subsystem. Check WarpDrive "
- "documentation for additional details.\n",
- ioc->name);
+ ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
break;
}
@@ -9613,9 +9453,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
(Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
switch (ActiveCableEventData->ReasonCode) {
case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
- pr_notice(MPT3SAS_FMT
- "Currently an active cable with ReceptacleID %d\n",
- ioc->name, ActiveCableEventData->ReceptacleID);
+ ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
+ ActiveCableEventData->ReceptacleID);
pr_notice("cannot be powered and devices connected\n");
pr_notice("to this active cable will not be seen\n");
pr_notice("This active cable requires %d mW of power\n",
@@ -9623,9 +9462,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
break;
case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
- pr_notice(MPT3SAS_FMT
- "Currently a cable with ReceptacleID %d\n",
- ioc->name, ActiveCableEventData->ReceptacleID);
+ ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
+ ActiveCableEventData->ReceptacleID);
pr_notice(
"is not running at optimal speed(12 Gb/s rate)\n");
break;
@@ -9640,8 +9478,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
fw_event = alloc_fw_event_work(sz);
if (!fw_event) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
@@ -9690,11 +9528,9 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
sas_expander->sas_address_parent);
- pr_info(MPT3SAS_FMT
- "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name,
- sas_expander->handle, (unsigned long long)
- sas_expander->sas_address);
+ ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_del(&sas_expander->list);
@@ -9729,16 +9565,14 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
mutex_lock(&ioc->scsih_cmds.mutex);
if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
goto out;
}
ioc->scsih_cmds.status = MPT3_CMD_PENDING;
smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
goto out;
}
@@ -9751,24 +9585,22 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
if (!ioc->hide_ir_msg)
- pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
+ ioc_info(ioc, "IR shutdown (sending)\n");
init_completion(&ioc->scsih_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
goto out;
}
if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
mpi_reply = ioc->scsih_cmds.reply;
if (!ioc->hide_ir_msg)
- pr_info(MPT3SAS_FMT "IR shutdown "
- "(complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
}
out:
@@ -9817,9 +9649,8 @@ static void scsih_remove(struct pci_dev *pdev)
sas_target_priv_data->deleted = 1;
scsi_remove_target(&raid_device->starget->dev);
}
- pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, raid_device->handle,
- (unsigned long long) raid_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ raid_device->handle, (u64)raid_device->wwid);
_scsih_raid_device_remove(ioc, raid_device);
}
list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
@@ -10230,7 +10061,7 @@ scsih_scan_start(struct Scsi_Host *shost)
rc = mpt3sas_port_enable(ioc);
if (rc != 0)
- pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name);
+ ioc_info(ioc, "port enable: FAILED\n");
}
/**
@@ -10255,9 +10086,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
if (time >= (300 * HZ)) {
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
- pr_info(MPT3SAS_FMT
- "port enable: FAILED with timeout (timeout=300s)\n",
- ioc->name);
+ ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
ioc->is_driver_loading = 0;
return 1;
}
@@ -10266,16 +10095,15 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
return 0;
if (ioc->start_scan_failed) {
- pr_info(MPT3SAS_FMT
- "port enable: FAILED with (ioc_status=0x%08x)\n",
- ioc->name, ioc->start_scan_failed);
+ ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
+ ioc->start_scan_failed);
ioc->is_driver_loading = 0;
ioc->wait_for_discovery_to_complete = 0;
ioc->remove_host = 1;
return 1;
}
- pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
+ ioc_info(ioc, "port enable: SUCCESS\n");
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
if (ioc->wait_for_discovery_to_complete) {
@@ -10586,28 +10414,22 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ioc->is_mcpu_endpoint) {
/* mCPU MPI support 64K max IO */
shost->max_sectors = 128;
- pr_info(MPT3SAS_FMT
- "The max_sectors value is set to %d\n",
- ioc->name, shost->max_sectors);
+ ioc_info(ioc, "The max_sectors value is set to %d\n",
+ shost->max_sectors);
} else {
if (max_sectors != 0xFFFF) {
if (max_sectors < 64) {
shost->max_sectors = 64;
- pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
- "for max_sectors, range is 64 to 32767. " \
- "Assigning value of 64.\n", \
- ioc->name, max_sectors);
+ ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
+ max_sectors);
} else if (max_sectors > 32767) {
shost->max_sectors = 32767;
- pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
- "for max_sectors, range is 64 to 32767." \
- "Assigning default value of 32767.\n", \
- ioc->name, max_sectors);
+ ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
+ max_sectors);
} else {
shost->max_sectors = max_sectors & 0xFFFE;
- pr_info(MPT3SAS_FMT
- "The max_sectors value is set to %d\n",
- ioc->name, shost->max_sectors);
+ ioc_info(ioc, "The max_sectors value is set to %d\n",
+ shost->max_sectors);
}
}
}
@@ -10627,16 +10449,16 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->firmware_event_thread = alloc_ordered_workqueue(
ioc->firmware_event_name, 0);
if (!ioc->firmware_event_thread) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rv = -ENODEV;
goto out_thread_fail;
}
ioc->is_driver_loading = 1;
if ((mpt3sas_base_attach(ioc))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rv = -ENODEV;
goto out_attach_fail;
}
@@ -10657,8 +10479,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rv = scsi_add_host(shost, &pdev->dev);
if (rv) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_add_shost_fail;
}
@@ -10695,9 +10517,8 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
flush_scheduled_work();
scsi_block_requests(shost);
device_state = pci_choose_state(pdev, state);
- pr_info(MPT3SAS_FMT
- "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
- ioc->name, pdev, pci_name(pdev), device_state);
+ ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
pci_save_state(pdev);
mpt3sas_base_free_resources(ioc);
@@ -10719,9 +10540,8 @@ scsih_resume(struct pci_dev *pdev)
pci_power_t device_state = pdev->current_state;
int r;
- pr_info(MPT3SAS_FMT
- "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
- ioc->name, pdev, pci_name(pdev), device_state);
+ ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake(pdev, PCI_D0, 0);
@@ -10753,8 +10573,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n",
- ioc->name, state);
+ ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
switch (state) {
case pci_channel_io_normal:
@@ -10791,8 +10610,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
int rc;
- pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n",
- ioc->name);
+ ioc_info(ioc, "PCI error: slot reset callback!!\n");
ioc->pci_error_recovery = 0;
ioc->pdev = pdev;
@@ -10803,8 +10621,8 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
- (rc == 0) ? "success" : "failed");
+ ioc_warn(ioc, "hard reset: %s\n",
+ (rc == 0) ? "success" : "failed");
if (!rc)
return PCI_ERS_RESULT_RECOVERED;
@@ -10826,9 +10644,8 @@ scsih_pci_resume(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name);
+ ioc_info(ioc, "PCI error: resume callback!!\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
mpt3sas_base_start_watchdog(ioc);
scsi_unblock_requests(ioc->shost);
}
@@ -10843,8 +10660,7 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n",
- ioc->name);
+ ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
/* TODO - dump whatever for debugging purposes */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index f8cc2677c1cd..6a8a3c09b4b1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -146,25 +146,22 @@ _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
u32 ioc_status;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return -EFAULT;
}
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT
- "handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n",
- ioc->name, handle, ioc_status,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
return -EIO;
}
@@ -310,16 +307,14 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return -EFAULT;
}
mutex_lock(&ioc->transport_cmds.mutex);
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -329,26 +324,22 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -359,9 +350,8 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
data_out_sz = sizeof(struct rep_manu_request);
data_in_sz = sizeof(struct rep_manu_reply);
- data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz,
- &data_out_dma);
-
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
+ &data_out_dma, GFP_KERNEL);
if (!data_out) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -388,16 +378,15 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "report_manufacture - send to sas_addr(0x%016llx)\n",
- ioc->name, (unsigned long long)sas_address));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "report_manufacture - send to sas_addr(0x%016llx)\n",
+ (u64)sas_address));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -405,17 +394,16 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
goto issue_host_reset;
}
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "report_manufacture - complete\n", ioc->name));
+ dtransportprintk(ioc, ioc_info(ioc, "report_manufacture - complete\n"));
if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
u8 *tmp;
mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "report_manufacture - reply data transfer size(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "report_manufacture - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
sizeof(struct rep_manu_reply))
@@ -439,8 +427,8 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
manufacture_reply->component_revision_id;
}
} else
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "report_manufacture - no reply\n", ioc->name));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "report_manufacture - no reply\n"));
issue_host_reset:
if (issue_reset)
@@ -448,7 +436,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
if (data_out)
- pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz,
+ dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
data_out, data_out_dma);
mutex_unlock(&ioc->transport_cmds.mutex);
@@ -643,8 +631,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
mpt3sas_port = kzalloc(sizeof(struct _sas_port),
GFP_KERNEL);
if (!mpt3sas_port) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return NULL;
}
@@ -655,22 +643,21 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (!sas_node) {
- pr_err(MPT3SAS_FMT
- "%s: Could not find parent sas_address(0x%016llx)!\n",
- ioc->name, __func__, (unsigned long long)sas_address);
+ ioc_err(ioc, "%s: Could not find parent sas_address(0x%016llx)!\n",
+ __func__, (u64)sas_address);
goto out_fail;
}
if ((_transport_set_identify(ioc, handle,
&mpt3sas_port->remote_identify))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
@@ -687,20 +674,20 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
}
if (!mpt3sas_port->num_phys) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
if (!sas_node->parent_dev) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
port = sas_port_alloc_num(sas_node->parent_dev);
if ((sas_port_add(port))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
@@ -729,17 +716,17 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
sas_device = mpt3sas_get_sdev_by_addr(ioc,
mpt3sas_port->remote_identify.sas_address);
if (!sas_device) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_info(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
goto out_fail;
}
sas_device->pend_sas_rphy_add = 1;
}
if ((sas_rphy_add(rphy))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
}
if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -861,14 +848,14 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
phy = sas_phy_alloc(parent_dev, phy_index);
if (!phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
&mpt3sas_phy->identify))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
@@ -890,8 +877,8 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
phy_pg0.ProgrammedLinkRate >> 4);
if ((sas_phy_add(phy))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
@@ -929,14 +916,14 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
phy = sas_phy_alloc(parent_dev, phy_index);
if (!phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
&mpt3sas_phy->identify))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
@@ -960,8 +947,8 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
expander_pg1.ProgrammedLinkRate >> 4);
if ((sas_phy_add(phy))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
@@ -1098,16 +1085,14 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return -EFAULT;
}
mutex_lock(&ioc->transport_cmds.mutex);
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1117,26 +1102,22 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1146,7 +1127,8 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
sz = sizeof(struct phy_error_log_request) +
sizeof(struct phy_error_log_reply);
- data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
if (!data_out) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -1179,17 +1161,16 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
data_out_dma + sizeof(struct phy_error_log_request),
sizeof(struct phy_error_log_reply));
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
- ioc->name, (unsigned long long)phy->identify.sas_address,
- phy->number));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
+ (u64)phy->identify.sas_address,
+ phy->number));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -1197,16 +1178,15 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
goto issue_host_reset;
}
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - complete\n", ioc->name));
+ dtransportprintk(ioc, ioc_info(ioc, "phy_error_log - complete\n"));
if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - reply data transfer size(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_error_log - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
sizeof(struct phy_error_log_reply))
@@ -1215,9 +1195,9 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
phy_error_log_reply = data_out +
sizeof(struct phy_error_log_request);
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - function_result(%d)\n",
- ioc->name, phy_error_log_reply->function_result));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_error_log - function_result(%d)\n",
+ phy_error_log_reply->function_result));
phy->invalid_dword_count =
be32_to_cpu(phy_error_log_reply->invalid_dword);
@@ -1229,8 +1209,8 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
be32_to_cpu(phy_error_log_reply->phy_reset_problem);
rc = 0;
} else
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - no reply\n", ioc->name));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_error_log - no reply\n"));
issue_host_reset:
if (issue_reset)
@@ -1238,7 +1218,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
if (data_out)
- pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+ dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma);
mutex_unlock(&ioc->transport_cmds.mutex);
return rc;
@@ -1273,17 +1253,16 @@ _transport_get_linkerrors(struct sas_phy *phy)
/* get hba phy error logs */
if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
phy->number))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
- pr_info(MPT3SAS_FMT
- "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
- ioc->name, phy->number,
- le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
+ phy->number,
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
phy->running_disparity_error_count =
@@ -1411,16 +1390,14 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return -EFAULT;
}
mutex_lock(&ioc->transport_cmds.mutex);
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1430,26 +1407,22 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1459,7 +1432,8 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
sz = sizeof(struct phy_control_request) +
sizeof(struct phy_control_reply);
- data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
if (!data_out) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -1497,17 +1471,16 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
data_out_dma + sizeof(struct phy_control_request),
sizeof(struct phy_control_reply));
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
- ioc->name, (unsigned long long)phy->identify.sas_address,
- phy->number, phy_operation));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
+ (u64)phy->identify.sas_address,
+ phy->number, phy_operation));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -1515,16 +1488,15 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
goto issue_host_reset;
}
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - complete\n", ioc->name));
+ dtransportprintk(ioc, ioc_info(ioc, "phy_control - complete\n"));
if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - reply data transfer size(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_control - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
sizeof(struct phy_control_reply))
@@ -1533,14 +1505,14 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
phy_control_reply = data_out +
sizeof(struct phy_control_request);
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - function_result(%d)\n",
- ioc->name, phy_control_reply->function_result));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_control - function_result(%d)\n",
+ phy_control_reply->function_result));
rc = 0;
} else
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - no reply\n", ioc->name));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_control - no reply\n"));
issue_host_reset:
if (issue_reset)
@@ -1548,7 +1520,8 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
if (data_out)
- pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+ dma_free_coherent(&ioc->pdev->dev, sz, data_out,
+ data_out_dma);
mutex_unlock(&ioc->transport_cmds.mutex);
return rc;
@@ -1591,16 +1564,15 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
mpi_request.PhyNum = phy->number;
if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
- pr_info(MPT3SAS_FMT
- "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, phy->number, le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ phy->number, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
return 0;
}
@@ -1647,23 +1619,23 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
sizeof(Mpi2SasIOUnit0PhyData_t));
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
sas_iounit_pg0, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
@@ -1672,10 +1644,8 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
if (sas_iounit_pg0->PhyData[i].PortFlags &
MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
- pr_err(MPT3SAS_FMT "discovery is active on " \
- "port = %d, phy = %d: unable to enable/disable "
- "phys, try again later!\n", ioc->name,
- sas_iounit_pg0->PhyData[i].Port, i);
+ ioc_err(ioc, "discovery is active on port = %d, phy = %d: unable to enable/disable phys, try again later!\n",
+ sas_iounit_pg0->PhyData[i].Port, i);
discovery_active = 1;
}
}
@@ -1690,23 +1660,23 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
@@ -1798,23 +1768,23 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
@@ -1833,8 +1803,8 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
sz)) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
@@ -1922,8 +1892,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
unsigned int reslen = 0;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
rc = -EFAULT;
goto job_done;
}
@@ -1933,8 +1902,8 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
goto job_done;
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
- __func__);
+ ioc_err(ioc, "%s: transport_cmds in use\n",
+ __func__);
rc = -EAGAIN;
goto out;
}
@@ -1959,26 +1928,22 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto unmap_in;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto unmap_in;
}
@@ -1999,16 +1964,15 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in,
dma_len_in - 4);
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - sending smp request\n", ioc->name, __func__));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "%s: sending smp request\n", __func__));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s : timeout\n",
- __func__, ioc->name);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) {
@@ -2018,12 +1982,11 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
}
}
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - complete\n", ioc->name, __func__));
+ dtransportprintk(ioc, ioc_info(ioc, "%s - complete\n", __func__));
if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) {
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - no reply\n", ioc->name, __func__));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "%s: no reply\n", __func__));
rc = -ENXIO;
goto unmap_in;
}
@@ -2031,9 +1994,9 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
mpi_reply = ioc->transport_cmds.reply;
dtransportprintk(ioc,
- pr_info(MPT3SAS_FMT "%s - reply data transfer size(%d)\n",
- ioc->name, __func__,
- le16_to_cpu(mpi_reply->ResponseDataLength)));
+ ioc_info(ioc, "%s: reply data transfer size(%d)\n",
+ __func__,
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
memcpy(job->reply, mpi_reply, sizeof(*mpi_reply));
job->reply_len = sizeof(*mpi_reply);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index cae7c1eaef34..6ac453fd5937 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -72,8 +72,7 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
u16 sz, event_data_sz;
unsigned long flags;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4;
@@ -85,23 +84,23 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
mpi_reply->EventDataLength = cpu_to_le16(event_data_sz);
memcpy(&mpi_reply->EventData, event_data,
sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: add to driver event log\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: add to driver event log\n",
+ __func__));
mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
kfree(mpi_reply);
out:
/* clearing the diag_trigger_active flag */
spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: clearing diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: clearing diag_trigger_active flag\n",
+ __func__));
ioc->diag_trigger_active = 0;
spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -115,22 +114,22 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
{
u8 issue_reset = 0;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
/* release the diag buffer trace */
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: release trace diag buffer\n", ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: release trace diag buffer\n",
+ __func__));
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
&issue_reset);
}
_mpt3sas_raise_sigio(ioc, event_data);
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -168,9 +167,9 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
by_pass_checks:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter - trigger_bitmask = 0x%08x\n",
- ioc->name, __func__, trigger_bitmask));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: enter - trigger_bitmask = 0x%08x\n",
+ __func__, trigger_bitmask));
/* don't send trigger if an trigger is currently active */
if (ioc->diag_trigger_active) {
@@ -182,9 +181,9 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
if (ioc->diag_trigger_master.MasterData & trigger_bitmask) {
found_match = 1;
ioc->diag_trigger_active = 1;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
}
spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
@@ -202,8 +201,8 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -239,9 +238,9 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
return;
}
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
- ioc->name, __func__, event, log_entry_qualifier));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
+ __func__, event, log_entry_qualifier));
/* don't send trigger if an trigger is currently active */
if (ioc->diag_trigger_active) {
@@ -263,26 +262,26 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
}
found_match = 1;
ioc->diag_trigger_active = 1;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
}
spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
if (!found_match)
goto out;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
event_data.trigger_type = MPT3SAS_TRIGGER_EVENT;
event_data.u.event.EventValue = event;
event_data.u.event.LogEntryQualifier = log_entry_qualifier;
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -319,9 +318,9 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
return;
}
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
- ioc->name, __func__, sense_key, asc, ascq));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
+ __func__, sense_key, asc, ascq));
/* don't send trigger if an trigger is currently active */
if (ioc->diag_trigger_active) {
@@ -347,9 +346,9 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
if (!found_match)
goto out;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
event_data.trigger_type = MPT3SAS_TRIGGER_SCSI;
event_data.u.scsi.SenseKey = sense_key;
@@ -357,8 +356,8 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
event_data.u.scsi.ASCQ = ascq;
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -393,9 +392,9 @@ mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
return;
}
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
- ioc->name, __func__, ioc_status, loginfo));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
+ __func__, ioc_status, loginfo));
/* don't send trigger if an trigger is currently active */
if (ioc->diag_trigger_active) {
@@ -420,15 +419,15 @@ mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
if (!found_match)
goto out;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
event_data.trigger_type = MPT3SAS_TRIGGER_MPI;
event_data.u.mpi.IOCStatus = ioc_status;
event_data.u.mpi.IocLogInfo = loginfo;
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index b4927f2b7677..cc07ba41f507 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -127,20 +127,17 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
return;
if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "globally as drives are exposed\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as drives are exposed\n");
return;
}
if (mpt3sas_get_num_volumes(ioc) > 1) {
_warpdrive_disable_ddio(ioc);
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "globally as number of drives > 1\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as number of drives > 1\n");
return;
}
if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
&num_pds)) || !num_pds) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "Failure in computing number of drives\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in computing number of drives\n");
return;
}
@@ -148,15 +145,13 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
sizeof(Mpi2RaidVol0PhysDisk_t));
vol_pg0 = kzalloc(sz, GFP_KERNEL);
if (!vol_pg0) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "Memory allocation failure for RVPG0\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled Memory allocation failure for RVPG0\n");
return;
}
if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "Failure in retrieving RVPG0\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in retrieving RVPG0\n");
kfree(vol_pg0);
return;
}
@@ -166,10 +161,8 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
* assumed for WARPDRIVE, disable direct I/O
*/
if (num_pds > MPT_MAX_WARPDRIVE_PDS) {
- pr_warn(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x): num_mem=%d, "
- "max_mem_allowed=%d\n", ioc->name, raid_device->handle,
- num_pds, MPT_MAX_WARPDRIVE_PDS);
+ ioc_warn(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): num_mem=%d, max_mem_allowed=%d\n",
+ raid_device->handle, num_pds, MPT_MAX_WARPDRIVE_PDS);
kfree(vol_pg0);
return;
}
@@ -179,22 +172,18 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
vol_pg0->PhysDisk[count].PhysDiskNum) ||
le16_to_cpu(pd_pg0.DevHandle) ==
MPT3SAS_INVALID_DEVICE_HANDLE) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
- "disabled for the drive with handle(0x%04x) member"
- "handle retrieval failed for member number=%d\n",
- ioc->name, raid_device->handle,
- vol_pg0->PhysDisk[count].PhysDiskNum);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle retrieval failed for member number=%d\n",
+ raid_device->handle,
+ vol_pg0->PhysDisk[count].PhysDiskNum);
goto out_error;
}
/* Disable direct I/O if member drive lba exceeds 4 bytes */
dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
if (dev_max_lba >> 32) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
- "disabled for the drive with handle(0x%04x) member"
- " handle (0x%04x) unsupported max lba 0x%016llx\n",
- ioc->name, raid_device->handle,
- le16_to_cpu(pd_pg0.DevHandle),
- (unsigned long long)dev_max_lba);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle (0x%04x) unsupported max lba 0x%016llx\n",
+ raid_device->handle,
+ le16_to_cpu(pd_pg0.DevHandle),
+ (u64)dev_max_lba);
goto out_error;
}
@@ -206,41 +195,36 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
* not RAID0
*/
if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x): type=%d, "
- "s_sz=%uK, blk_size=%u\n", ioc->name,
- raid_device->handle, raid_device->volume_type,
- (le32_to_cpu(vol_pg0->StripeSize) *
- le16_to_cpu(vol_pg0->BlockSize)) / 1024,
- le16_to_cpu(vol_pg0->BlockSize));
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): type=%d, s_sz=%uK, blk_size=%u\n",
+ raid_device->handle, raid_device->volume_type,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024,
+ le16_to_cpu(vol_pg0->BlockSize));
goto out_error;
}
stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
stripe_exp = find_first_bit(&stripe_sz, 32);
if (stripe_exp == 32) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
- ioc->name, raid_device->handle,
- (le32_to_cpu(vol_pg0->StripeSize) *
- le16_to_cpu(vol_pg0->BlockSize)) / 1024);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+ raid_device->handle,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024);
goto out_error;
}
raid_device->stripe_exponent = stripe_exp;
block_sz = le16_to_cpu(vol_pg0->BlockSize);
block_exp = find_first_bit(&block_sz, 16);
if (block_exp == 16) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x) invalid block sz %u\n",
- ioc->name, raid_device->handle,
- le16_to_cpu(vol_pg0->BlockSize));
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid block sz %u\n",
+ raid_device->handle, le16_to_cpu(vol_pg0->BlockSize));
goto out_error;
}
raid_device->block_exponent = block_exp;
raid_device->direct_io_enabled = 1;
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is Enabled for the drive"
- " with handle(0x%04x)\n", ioc->name, raid_device->handle);
+ ioc_info(ioc, "WarpDrive : Direct IO is Enabled for the drive with handle(0x%04x)\n",
+ raid_device->handle);
/*
* WARPDRIVE: Though the following fields are not used for direct IO,
* stored for future purpose:
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 8c91637cd598..3ac34373746c 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -403,29 +403,14 @@ static int pci_go_64(struct pci_dev *pdev)
{
int rc;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit DMA enable failed\n");
return rc;
}
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
}
return rc;
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index cff43bd9f675..3df1428df317 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -336,13 +336,13 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
* DMA-map SMP request, response buffers
*/
sg_req = &task->smp_task.smp_req;
- elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ elem = dma_map_sg(mvi->dev, sg_req, 1, DMA_TO_DEVICE);
if (!elem)
return -ENOMEM;
req_len = sg_dma_len(sg_req);
sg_resp = &task->smp_task.smp_resp;
- elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ elem = dma_map_sg(mvi->dev, sg_resp, 1, DMA_FROM_DEVICE);
if (!elem) {
rc = -ENOMEM;
goto err_out;
@@ -416,10 +416,10 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
err_out_2:
dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
err_out:
dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return rc;
}
@@ -904,9 +904,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
break;
case SAS_PROTOCOL_SATA:
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index b3cd9a6b1d30..2458974d1af6 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -143,8 +143,8 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
case RESOURCE_UNCACHED_MEMORY:
size = round_up(size, 8);
- res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
- &res->bus_addr);
+ res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size,
+ &res->bus_addr, GFP_KERNEL);
if (!res->virt_addr) {
dev_err(&mhba->pdev->dev,
"unable to allocate consistent mem,"
@@ -175,7 +175,7 @@ static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
switch (res->type) {
case RESOURCE_UNCACHED_MEMORY:
- pci_free_consistent(mhba->pdev, res->size,
+ dma_free_coherent(&mhba->pdev->dev, res->size,
res->virt_addr, res->bus_addr);
break;
case RESOURCE_CACHED_MEMORY:
@@ -211,14 +211,14 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
dma_addr_t busaddr;
sg = scsi_sglist(scmd);
- *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
- (int) scmd->sc_data_direction);
+ *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum,
+ scmd->sc_data_direction);
if (*sg_count > mhba->max_sge) {
dev_err(&mhba->pdev->dev,
"sg count[0x%x] is bigger than max sg[0x%x].\n",
*sg_count, mhba->max_sge);
- pci_unmap_sg(mhba->pdev, sg, sgnum,
- (int) scmd->sc_data_direction);
+ dma_unmap_sg(&mhba->pdev->dev, sg, sgnum,
+ scmd->sc_data_direction);
return -1;
}
for (i = 0; i < *sg_count; i++) {
@@ -246,7 +246,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
if (size == 0)
return 0;
- virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
+ virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr,
+ GFP_KERNEL);
if (!virt_addr)
return -1;
@@ -274,8 +275,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
}
INIT_LIST_HEAD(&cmd->queue_pointer);
- cmd->frame = pci_alloc_consistent(mhba->pdev,
- mhba->ib_max_size, &cmd->frame_phys);
+ cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
+ &cmd->frame_phys, GFP_KERNEL);
if (!cmd->frame) {
dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
" frame,size = %d.\n", mhba->ib_max_size);
@@ -287,7 +288,7 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
dev_err(&mhba->pdev->dev, "failed to allocate memory"
" for internal frame\n");
- pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+ dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
cmd->frame, cmd->frame_phys);
kfree(cmd);
return NULL;
@@ -313,10 +314,10 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
phy_addr = (dma_addr_t) m_sg->baseaddr_l |
(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
- pci_free_consistent(mhba->pdev, size, cmd->data_buf,
+ dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
phy_addr);
}
- pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+ dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
cmd->frame, cmd->frame_phys);
kfree(cmd);
}
@@ -663,16 +664,17 @@ static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
}
}
-static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
+static int mvumi_pci_set_master(struct pci_dev *pdev)
{
- unsigned int ret = 0;
+ int ret = 0;
+
pci_set_master(pdev);
if (IS_DMA64) {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
} else
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
return ret;
}
@@ -771,7 +773,7 @@ static void mvumi_release_fw(struct mvumi_hba *mhba)
mvumi_free_cmds(mhba);
mvumi_release_mem_resource(mhba);
mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
- pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+ dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
mhba->handshake_page, mhba->handshake_page_phys);
kfree(mhba->regs);
pci_release_regions(mhba->pdev);
@@ -1339,9 +1341,9 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
}
if (scsi_bufflen(scmd))
- pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
+ dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
scsi_sg_count(scmd),
- (int) scmd->sc_data_direction);
+ scmd->sc_data_direction);
cmd->scmd->scsi_done(scmd);
mvumi_return_cmd(mhba, cmd);
}
@@ -2148,9 +2150,9 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
scmd->SCp.ptr = NULL;
if (scsi_bufflen(scmd)) {
- pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
+ dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
scsi_sg_count(scmd),
- (int)scmd->sc_data_direction);
+ scmd->sc_data_direction);
}
mvumi_return_cmd(mhba, cmd);
spin_unlock_irqrestore(mhba->shost->host_lock, flags);
@@ -2362,8 +2364,8 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
ret = -ENOMEM;
goto fail_alloc_mem;
}
- mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
- &mhba->handshake_page_phys);
+ mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
+ HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
if (!mhba->handshake_page) {
dev_err(&mhba->pdev->dev,
"failed to allocate memory for handshake\n");
@@ -2383,7 +2385,7 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
fail_ready_state:
mvumi_release_mem_resource(mhba);
- pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+ dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
mhba->handshake_page, mhba->handshake_page_phys);
fail_alloc_page:
kfree(mhba->regs);
@@ -2480,20 +2482,9 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- pci_set_master(pdev);
-
- if (IS_DMA64) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- goto fail_set_dma_mask;
- }
- } else {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- goto fail_set_dma_mask;
- }
+ ret = mvumi_pci_set_master(pdev);
+ if (ret)
+ goto fail_set_dma_mask;
host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
if (!host) {
@@ -2627,19 +2618,11 @@ static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "enable device failed\n");
return ret;
}
- pci_set_master(pdev);
- if (IS_DMA64) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- goto fail;
- }
- } else {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- goto fail;
- }
+
+ ret = mvumi_pci_set_master(pdev);
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
if (ret)
goto fail;
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
new file mode 100644
index 000000000000..aeb282f617c5
--- /dev/null
+++ b/drivers/scsi/myrb.c
@@ -0,0 +1,3656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver,
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/raid_class.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "myrb.h"
+
+static struct raid_template *myrb_raid_template;
+
+static void myrb_monitor(struct work_struct *work);
+static inline void myrb_translate_devstate(void *DeviceState);
+
+static inline int myrb_logical_channel(struct Scsi_Host *shost)
+{
+ return shost->max_channel - 1;
+}
+
+static struct myrb_devstate_name_entry {
+ enum myrb_devstate state;
+ const char *name;
+} myrb_devstate_name_list[] = {
+ { MYRB_DEVICE_DEAD, "Dead" },
+ { MYRB_DEVICE_WO, "WriteOnly" },
+ { MYRB_DEVICE_ONLINE, "Online" },
+ { MYRB_DEVICE_CRITICAL, "Critical" },
+ { MYRB_DEVICE_STANDBY, "Standby" },
+ { MYRB_DEVICE_OFFLINE, "Offline" },
+};
+
+static const char *myrb_devstate_name(enum myrb_devstate state)
+{
+ struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
+ if (entry[i].state == state)
+ return entry[i].name;
+ }
+ return "Unknown";
+}
+
+static struct myrb_raidlevel_name_entry {
+ enum myrb_raidlevel level;
+ const char *name;
+} myrb_raidlevel_name_list[] = {
+ { MYRB_RAID_LEVEL0, "RAID0" },
+ { MYRB_RAID_LEVEL1, "RAID1" },
+ { MYRB_RAID_LEVEL3, "RAID3" },
+ { MYRB_RAID_LEVEL5, "RAID5" },
+ { MYRB_RAID_LEVEL6, "RAID6" },
+ { MYRB_RAID_JBOD, "JBOD" },
+};
+
+static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
+{
+ struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
+ if (entry[i].level == level)
+ return entry[i].name;
+ }
+ return NULL;
+}
+
+/**
+ * myrb_create_mempools - allocates auxiliary data structures
+ *
+ * Return: true on success, false otherwise.
+ */
+static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
+{
+ size_t elem_size, elem_align;
+
+ elem_align = sizeof(struct myrb_sge);
+ elem_size = cb->host->sg_tablesize * elem_align;
+ cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
+ elem_size, elem_align, 0);
+ if (cb->sg_pool == NULL) {
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate SG pool\n");
+ return false;
+ }
+
+ cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
+ sizeof(struct myrb_dcdb),
+ sizeof(unsigned int), 0);
+ if (!cb->dcdb_pool) {
+ dma_pool_destroy(cb->sg_pool);
+ cb->sg_pool = NULL;
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate DCDB pool\n");
+ return false;
+ }
+
+ snprintf(cb->work_q_name, sizeof(cb->work_q_name),
+ "myrb_wq_%d", cb->host->host_no);
+ cb->work_q = create_singlethread_workqueue(cb->work_q_name);
+ if (!cb->work_q) {
+ dma_pool_destroy(cb->dcdb_pool);
+ cb->dcdb_pool = NULL;
+ dma_pool_destroy(cb->sg_pool);
+ cb->sg_pool = NULL;
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to create workqueue\n");
+ return false;
+ }
+
+ /*
+ * Initialize the Monitoring Timer.
+ */
+ INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
+ queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
+
+ return true;
+}
+
+/**
+ * myrb_destroy_mempools - tears down the memory pools for the controller
+ */
+static void myrb_destroy_mempools(struct myrb_hba *cb)
+{
+ cancel_delayed_work_sync(&cb->monitor_work);
+ destroy_workqueue(cb->work_q);
+
+ dma_pool_destroy(cb->sg_pool);
+ dma_pool_destroy(cb->dcdb_pool);
+}
+
+/**
+ * myrb_reset_cmd - reset command block
+ */
+static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
+{
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+ memset(mbox, 0, sizeof(union myrb_cmd_mbox));
+ cmd_blk->status = 0;
+}
+
+/**
+ * myrb_qcmd - queues command block for execution
+ */
+static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+ void __iomem *base = cb->io_base;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
+
+ cb->write_cmd_mbox(next_mbox, mbox);
+ if (cb->prev_cmd_mbox1->words[0] == 0 ||
+ cb->prev_cmd_mbox2->words[0] == 0)
+ cb->get_cmd_mbox(base);
+ cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
+ cb->prev_cmd_mbox1 = next_mbox;
+ if (++next_mbox > cb->last_cmd_mbox)
+ next_mbox = cb->first_cmd_mbox;
+ cb->next_cmd_mbox = next_mbox;
+}
+
+/**
+ * myrb_exec_cmd - executes command block and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
+ struct myrb_cmdblk *cmd_blk)
+{
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ unsigned long flags;
+
+ cmd_blk->completion = &cmpl;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ cb->qcmd(cb, cmd_blk);
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+
+ WARN_ON(in_interrupt());
+ wait_for_completion(&cmpl);
+ return cmd_blk->status;
+}
+
+/**
+ * myrb_exec_type3 - executes a type 3 command and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_type3(struct myrb_hba *cb,
+ enum myrb_cmd_opcode op, dma_addr_t addr)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ mutex_lock(&cb->dcmd_mutex);
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3.id = MYRB_DCMD_TAG;
+ mbox->type3.opcode = op;
+ mbox->type3.addr = addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+ return status;
+}
+
+/**
+ * myrb_exec_type3D - executes a type 3D command and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
+ enum myrb_cmd_opcode op, struct scsi_device *sdev,
+ struct myrb_pdev_state *pdev_info)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned short status;
+ dma_addr_t pdev_info_addr;
+
+ pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
+ sizeof(struct myrb_pdev_state),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
+ return MYRB_STATUS_SUBSYS_FAILED;
+
+ mutex_lock(&cb->dcmd_mutex);
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3D.id = MYRB_DCMD_TAG;
+ mbox->type3D.opcode = op;
+ mbox->type3D.channel = sdev->channel;
+ mbox->type3D.target = sdev->id;
+ mbox->type3D.addr = pdev_info_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+ dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
+ sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
+ if (status == MYRB_STATUS_SUCCESS &&
+ mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
+ myrb_translate_devstate(pdev_info);
+
+ return status;
+}
+
+static char *myrb_event_msg[] = {
+ "killed because write recovery failed",
+ "killed because of SCSI bus reset failure",
+ "killed because of double check condition",
+ "killed because it was removed",
+ "killed because of gross error on SCSI chip",
+ "killed because of bad tag returned from drive",
+ "killed because of timeout on SCSI command",
+ "killed because of reset SCSI command issued from system",
+ "killed because busy or parity error count exceeded limit",
+ "killed because of 'kill drive' command from system",
+ "killed because of selection timeout",
+ "killed due to SCSI phase sequence error",
+ "killed due to unknown status",
+};
+
+/**
+ * myrb_get_event - get event log from HBA
+ * @cb: pointer to the hba structure
+ * @event: number of the event
+ *
+ * Execute a type 3E command and logs the event message
+ */
+static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_log_entry *ev_buf;
+ dma_addr_t ev_addr;
+ unsigned short status;
+
+ ev_buf = dma_alloc_coherent(&cb->pdev->dev,
+ sizeof(struct myrb_log_entry),
+ &ev_addr, GFP_KERNEL);
+ if (!ev_buf)
+ return;
+
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3E.id = MYRB_MCMD_TAG;
+ mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
+ mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
+ mbox->type3E.opqual = 1;
+ mbox->type3E.ev_seq = event;
+ mbox->type3E.addr = ev_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ if (status != MYRB_STATUS_SUCCESS)
+ shost_printk(KERN_INFO, cb->host,
+ "Failed to get event log %d, status %04x\n",
+ event, status);
+
+ else if (ev_buf->seq_num == event) {
+ struct scsi_sense_hdr sshdr;
+
+ memset(&sshdr, 0, sizeof(sshdr));
+ scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
+
+ if (sshdr.sense_key == VENDOR_SPECIFIC &&
+ sshdr.asc == 0x80 &&
+ sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
+ shost_printk(KERN_CRIT, cb->host,
+ "Physical drive %d:%d: %s\n",
+ ev_buf->channel, ev_buf->target,
+ myrb_event_msg[sshdr.ascq]);
+ else
+ shost_printk(KERN_CRIT, cb->host,
+ "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
+ ev_buf->channel, ev_buf->target,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ }
+
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
+ ev_buf, ev_addr);
+}
+
+/**
+ * myrb_get_errtable - retrieves the error table from the controller
+ *
+ * Executes a type 3 command and logs the error table from the controller.
+ */
+static void myrb_get_errtable(struct myrb_hba *cb)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned short status;
+ struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
+
+ memcpy(&old_table, cb->err_table, sizeof(old_table));
+
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3.id = MYRB_MCMD_TAG;
+ mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
+ mbox->type3.addr = cb->err_table_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ if (status == MYRB_STATUS_SUCCESS) {
+ struct myrb_error_entry *table = cb->err_table;
+ struct myrb_error_entry *new, *old;
+ size_t err_table_offset;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, cb->host) {
+ if (sdev->channel >= myrb_logical_channel(cb->host))
+ continue;
+ err_table_offset = sdev->channel * MYRB_MAX_TARGETS
+ + sdev->id;
+ new = table + err_table_offset;
+ old = &old_table[err_table_offset];
+ if (new->parity_err == old->parity_err &&
+ new->soft_err == old->soft_err &&
+ new->hard_err == old->hard_err &&
+ new->misc_err == old->misc_err)
+ continue;
+ sdev_printk(KERN_CRIT, sdev,
+ "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
+ new->parity_err, new->soft_err,
+ new->hard_err, new->misc_err);
+ }
+ }
+}
+
+/**
+ * myrb_get_ldev_info - retrieves the logical device table from the controller
+ *
+ * Executes a type 3 command and updates the logical device table.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
+{
+ unsigned short status;
+ int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
+ struct Scsi_Host *shost = cb->host;
+
+ status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
+ cb->ldev_info_addr);
+ if (status != MYRB_STATUS_SUCCESS)
+ return status;
+
+ for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
+ struct myrb_ldev_info *old = NULL;
+ struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
+ ldev_num, 0);
+ if (!sdev) {
+ if (new->state == MYRB_DEVICE_OFFLINE)
+ continue;
+ shost_printk(KERN_INFO, shost,
+ "Adding Logical Drive %d in state %s\n",
+ ldev_num, myrb_devstate_name(new->state));
+ scsi_add_device(shost, myrb_logical_channel(shost),
+ ldev_num, 0);
+ continue;
+ }
+ old = sdev->hostdata;
+ if (new->state != old->state)
+ shost_printk(KERN_INFO, shost,
+ "Logical Drive %d is now %s\n",
+ ldev_num, myrb_devstate_name(new->state));
+ if (new->wb_enabled != old->wb_enabled)
+ sdev_printk(KERN_INFO, sdev,
+ "Logical Drive is now WRITE %s\n",
+ (new->wb_enabled ? "BACK" : "THRU"));
+ memcpy(old, new, sizeof(*new));
+ scsi_device_put(sdev);
+ }
+ return status;
+}
+
+/**
+ * myrb_get_rbld_progress - get rebuild progress information
+ *
+ * Executes a type 3 command and returns the rebuild progress
+ * information.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
+ struct myrb_rbld_progress *rbld)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_rbld_progress *rbld_buf;
+ dma_addr_t rbld_addr;
+ unsigned short status;
+
+ rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
+ sizeof(struct myrb_rbld_progress),
+ &rbld_addr, GFP_KERNEL);
+ if (!rbld_buf)
+ return MYRB_STATUS_RBLD_NOT_CHECKED;
+
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3.id = MYRB_MCMD_TAG;
+ mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
+ mbox->type3.addr = rbld_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ if (rbld)
+ memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
+ rbld_buf, rbld_addr);
+ return status;
+}
+
+/**
+ * myrb_update_rbld_progress - updates the rebuild status
+ *
+ * Updates the rebuild status for the attached logical devices.
+ *
+ */
+static void myrb_update_rbld_progress(struct myrb_hba *cb)
+{
+ struct myrb_rbld_progress rbld_buf;
+ unsigned short status;
+
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+ if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
+ cb->last_rbld_status == MYRB_STATUS_SUCCESS)
+ status = MYRB_STATUS_RBLD_SUCCESS;
+ if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
+ unsigned int blocks_done =
+ rbld_buf.ldev_size - rbld_buf.blocks_left;
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup(cb->host,
+ myrb_logical_channel(cb->host),
+ rbld_buf.ldev_num, 0);
+ if (!sdev)
+ return;
+
+ switch (status) {
+ case MYRB_STATUS_SUCCESS:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild in Progress, %d%% completed\n",
+ (100 * (blocks_done >> 7))
+ / (rbld_buf.ldev_size >> 7));
+ break;
+ case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed due to Logical Drive Failure\n");
+ break;
+ case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed due to Bad Blocks on Other Drives\n");
+ break;
+ case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
+ break;
+ case MYRB_STATUS_RBLD_SUCCESS:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Completed Successfully\n");
+ break;
+ case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Successfully Terminated\n");
+ break;
+ default:
+ break;
+ }
+ scsi_device_put(sdev);
+ }
+ cb->last_rbld_status = status;
+}
+
+/**
+ * myrb_get_cc_progress - retrieve the rebuild status
+ *
+ * Execute a type 3 Command and fetch the rebuild / consistency check
+ * status.
+ */
+static void myrb_get_cc_progress(struct myrb_hba *cb)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_rbld_progress *rbld_buf;
+ dma_addr_t rbld_addr;
+ unsigned short status;
+
+ rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
+ sizeof(struct myrb_rbld_progress),
+ &rbld_addr, GFP_KERNEL);
+ if (!rbld_buf) {
+ cb->need_cc_status = true;
+ return;
+ }
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3.id = MYRB_MCMD_TAG;
+ mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
+ mbox->type3.addr = rbld_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ if (status == MYRB_STATUS_SUCCESS) {
+ unsigned int ldev_num = rbld_buf->ldev_num;
+ unsigned int ldev_size = rbld_buf->ldev_size;
+ unsigned int blocks_done =
+ ldev_size - rbld_buf->blocks_left;
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup(cb->host,
+ myrb_logical_channel(cb->host),
+ ldev_num, 0);
+ if (sdev) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check in Progress: %d%% completed\n",
+ (100 * (blocks_done >> 7))
+ / (ldev_size >> 7));
+ scsi_device_put(sdev);
+ }
+ }
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
+ rbld_buf, rbld_addr);
+}
+
+/**
+ * myrb_bgi_control - updates background initialisation status
+ *
+ * Executes a type 3B command and updates the background initialisation status
+ */
+static void myrb_bgi_control(struct myrb_hba *cb)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_bgi_status *bgi, *last_bgi;
+ dma_addr_t bgi_addr;
+ struct scsi_device *sdev = NULL;
+ unsigned short status;
+
+ bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
+ &bgi_addr, GFP_KERNEL);
+ if (!bgi) {
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate bgi memory\n");
+ return;
+ }
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3B.id = MYRB_DCMD_TAG;
+ mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
+ mbox->type3B.optype = 0x20;
+ mbox->type3B.addr = bgi_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ last_bgi = &cb->bgi_status;
+ sdev = scsi_device_lookup(cb->host,
+ myrb_logical_channel(cb->host),
+ bgi->ldev_num, 0);
+ switch (status) {
+ case MYRB_STATUS_SUCCESS:
+ switch (bgi->status) {
+ case MYRB_BGI_INVALID:
+ break;
+ case MYRB_BGI_STARTED:
+ if (!sdev)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Started\n");
+ break;
+ case MYRB_BGI_INPROGRESS:
+ if (!sdev)
+ break;
+ if (bgi->blocks_done == last_bgi->blocks_done &&
+ bgi->ldev_num == last_bgi->ldev_num)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization in Progress: %d%% completed\n",
+ (100 * (bgi->blocks_done >> 7))
+ / (bgi->ldev_size >> 7));
+ break;
+ case MYRB_BGI_SUSPENDED:
+ if (!sdev)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Suspended\n");
+ break;
+ case MYRB_BGI_CANCELLED:
+ if (!sdev)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Cancelled\n");
+ break;
+ }
+ memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
+ break;
+ case MYRB_STATUS_BGI_SUCCESS:
+ if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Completed Successfully\n");
+ cb->bgi_status.status = MYRB_BGI_INVALID;
+ break;
+ case MYRB_STATUS_BGI_ABORTED:
+ if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Aborted\n");
+ /* Fallthrough */
+ case MYRB_STATUS_NO_BGI_INPROGRESS:
+ cb->bgi_status.status = MYRB_BGI_INVALID;
+ break;
+ }
+ if (sdev)
+ scsi_device_put(sdev);
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
+ bgi, bgi_addr);
+}
+
+/**
+ * myrb_hba_enquiry - updates the controller status
+ *
+ * Executes a DAC_V1_Enquiry command and updates the controller status.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
+{
+ struct myrb_enquiry old, *new;
+ unsigned short status;
+
+ memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
+
+ status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
+ if (status != MYRB_STATUS_SUCCESS)
+ return status;
+
+ new = cb->enquiry;
+ if (new->ldev_count > old.ldev_count) {
+ int ldev_num = old.ldev_count - 1;
+
+ while (++ldev_num < new->ldev_count)
+ shost_printk(KERN_CRIT, cb->host,
+ "Logical Drive %d Now Exists\n",
+ ldev_num);
+ }
+ if (new->ldev_count < old.ldev_count) {
+ int ldev_num = new->ldev_count - 1;
+
+ while (++ldev_num < old.ldev_count)
+ shost_printk(KERN_CRIT, cb->host,
+ "Logical Drive %d No Longer Exists\n",
+ ldev_num);
+ }
+ if (new->status.deferred != old.status.deferred)
+ shost_printk(KERN_CRIT, cb->host,
+ "Deferred Write Error Flag is now %s\n",
+ (new->status.deferred ? "TRUE" : "FALSE"));
+ if (new->ev_seq != old.ev_seq) {
+ cb->new_ev_seq = new->ev_seq;
+ cb->need_err_info = true;
+ shost_printk(KERN_INFO, cb->host,
+ "Event log %d/%d (%d/%d) available\n",
+ cb->old_ev_seq, cb->new_ev_seq,
+ old.ev_seq, new->ev_seq);
+ }
+ if ((new->ldev_critical > 0 &&
+ new->ldev_critical != old.ldev_critical) ||
+ (new->ldev_offline > 0 &&
+ new->ldev_offline != old.ldev_offline) ||
+ (new->ldev_count != old.ldev_count)) {
+ shost_printk(KERN_INFO, cb->host,
+ "Logical drive count changed (%d/%d/%d)\n",
+ new->ldev_critical,
+ new->ldev_offline,
+ new->ldev_count);
+ cb->need_ldev_info = true;
+ }
+ if (new->pdev_dead > 0 ||
+ new->pdev_dead != old.pdev_dead ||
+ time_after_eq(jiffies, cb->secondary_monitor_time
+ + MYRB_SECONDARY_MONITOR_INTERVAL)) {
+ cb->need_bgi_status = cb->bgi_status_supported;
+ cb->secondary_monitor_time = jiffies;
+ }
+ if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
+ new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
+ old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
+ old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
+ cb->need_rbld = true;
+ cb->rbld_first = (new->ldev_critical < old.ldev_critical);
+ }
+ if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
+ switch (new->rbld) {
+ case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Completed Successfully\n");
+ break;
+ case MYRB_STDBY_RBLD_IN_PROGRESS:
+ case MYRB_BG_RBLD_IN_PROGRESS:
+ break;
+ case MYRB_BG_CHECK_IN_PROGRESS:
+ cb->need_cc_status = true;
+ break;
+ case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Completed with Error\n");
+ break;
+ case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Failed - Physical Device Failed\n");
+ break;
+ case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Failed - Logical Drive Failed\n");
+ break;
+ case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Failed - Other Causes\n");
+ break;
+ case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Successfully Terminated\n");
+ break;
+ }
+ else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
+ cb->need_cc_status = true;
+
+ return MYRB_STATUS_SUCCESS;
+}
+
+/**
+ * myrb_set_pdev_state - sets the device state for a physical device
+ *
+ * Return: command status
+ */
+static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
+ struct scsi_device *sdev, enum myrb_devstate state)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ mutex_lock(&cb->dcmd_mutex);
+ mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
+ mbox->type3D.id = MYRB_DCMD_TAG;
+ mbox->type3D.channel = sdev->channel;
+ mbox->type3D.target = sdev->id;
+ mbox->type3D.state = state & 0x1F;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+
+ return status;
+}
+
+/**
+ * myrb_enable_mmio - enables the Memory Mailbox Interface
+ *
+ * PD and P controller types have no memory mailbox, but still need the
+ * other dma mapped memory.
+ *
+ * Return: true on success, false otherwise.
+ */
+static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
+{
+ void __iomem *base = cb->io_base;
+ struct pci_dev *pdev = cb->pdev;
+ size_t err_table_size;
+ size_t ldev_info_size;
+ union myrb_cmd_mbox *cmd_mbox_mem;
+ struct myrb_stat_mbox *stat_mbox_mem;
+ union myrb_cmd_mbox mbox;
+ unsigned short status;
+
+ memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
+
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_err(&pdev->dev, "DMA mask out of range\n");
+ return false;
+ }
+
+ cb->enquiry = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct myrb_enquiry),
+ &cb->enquiry_addr, GFP_KERNEL);
+ if (!cb->enquiry)
+ return false;
+
+ err_table_size = sizeof(struct myrb_error_entry) *
+ MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
+ cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
+ &cb->err_table_addr, GFP_KERNEL);
+ if (!cb->err_table)
+ return false;
+
+ ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
+ cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
+ &cb->ldev_info_addr, GFP_KERNEL);
+ if (!cb->ldev_info_buf)
+ return false;
+
+ /*
+ * Skip mailbox initialisation for PD and P Controllers
+ */
+ if (!mmio_init_fn)
+ return true;
+
+ /* These are the base addresses for the command memory mailbox array */
+ cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
+ cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
+ cb->cmd_mbox_size,
+ &cb->cmd_mbox_addr,
+ GFP_KERNEL);
+ if (!cb->first_cmd_mbox)
+ return false;
+
+ cmd_mbox_mem = cb->first_cmd_mbox;
+ cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
+ cb->last_cmd_mbox = cmd_mbox_mem;
+ cb->next_cmd_mbox = cb->first_cmd_mbox;
+ cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
+ cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
+
+ /* These are the base addresses for the status memory mailbox array */
+ cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
+ sizeof(struct myrb_stat_mbox);
+ cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
+ cb->stat_mbox_size,
+ &cb->stat_mbox_addr,
+ GFP_KERNEL);
+ if (!cb->first_stat_mbox)
+ return false;
+
+ stat_mbox_mem = cb->first_stat_mbox;
+ stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
+ cb->last_stat_mbox = stat_mbox_mem;
+ cb->next_stat_mbox = cb->first_stat_mbox;
+
+ /* Enable the Memory Mailbox Interface. */
+ cb->dual_mode_interface = true;
+ mbox.typeX.opcode = 0x2B;
+ mbox.typeX.id = 0;
+ mbox.typeX.opcode2 = 0x14;
+ mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
+ mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
+
+ status = mmio_init_fn(pdev, base, &mbox);
+ if (status != MYRB_STATUS_SUCCESS) {
+ cb->dual_mode_interface = false;
+ mbox.typeX.opcode2 = 0x10;
+ status = mmio_init_fn(pdev, base, &mbox);
+ if (status != MYRB_STATUS_SUCCESS) {
+ dev_err(&pdev->dev,
+ "Failed to enable mailbox, statux %02X\n",
+ status);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * myrb_get_hba_config - reads the configuration information
+ *
+ * Reads the configuration information from the controller and
+ * initializes the controller structure.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int myrb_get_hba_config(struct myrb_hba *cb)
+{
+ struct myrb_enquiry2 *enquiry2;
+ dma_addr_t enquiry2_addr;
+ struct myrb_config2 *config2;
+ dma_addr_t config2_addr;
+ struct Scsi_Host *shost = cb->host;
+ struct pci_dev *pdev = cb->pdev;
+ int pchan_max = 0, pchan_cur = 0;
+ unsigned short status;
+ int ret = -ENODEV, memsize = 0;
+
+ enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+ &enquiry2_addr, GFP_KERNEL);
+ if (!enquiry2) {
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate V1 enquiry2 memory\n");
+ return -ENOMEM;
+ }
+ config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
+ &config2_addr, GFP_KERNEL);
+ if (!config2) {
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate V1 config2 memory\n");
+ dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+ enquiry2, enquiry2_addr);
+ return -ENOMEM;
+ }
+ mutex_lock(&cb->dma_mutex);
+ status = myrb_hba_enquiry(cb);
+ mutex_unlock(&cb->dma_mutex);
+ if (status != MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Failed it issue V1 Enquiry\n");
+ goto out_free;
+ }
+
+ status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
+ if (status != MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Failed to issue V1 Enquiry2\n");
+ goto out_free;
+ }
+
+ status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
+ if (status != MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Failed to issue ReadConfig2\n");
+ goto out_free;
+ }
+
+ status = myrb_get_ldev_info(cb);
+ if (status != MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Failed to get logical drive information\n");
+ goto out_free;
+ }
+
+ /*
+ * Initialize the Controller Model Name and Full Model Name fields.
+ */
+ switch (enquiry2->hw.sub_model) {
+ case DAC960_V1_P_PD_PU:
+ if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
+ strcpy(cb->model_name, "DAC960PU");
+ else
+ strcpy(cb->model_name, "DAC960PD");
+ break;
+ case DAC960_V1_PL:
+ strcpy(cb->model_name, "DAC960PL");
+ break;
+ case DAC960_V1_PG:
+ strcpy(cb->model_name, "DAC960PG");
+ break;
+ case DAC960_V1_PJ:
+ strcpy(cb->model_name, "DAC960PJ");
+ break;
+ case DAC960_V1_PR:
+ strcpy(cb->model_name, "DAC960PR");
+ break;
+ case DAC960_V1_PT:
+ strcpy(cb->model_name, "DAC960PT");
+ break;
+ case DAC960_V1_PTL0:
+ strcpy(cb->model_name, "DAC960PTL0");
+ break;
+ case DAC960_V1_PRL:
+ strcpy(cb->model_name, "DAC960PRL");
+ break;
+ case DAC960_V1_PTL1:
+ strcpy(cb->model_name, "DAC960PTL1");
+ break;
+ case DAC960_V1_1164P:
+ strcpy(cb->model_name, "eXtremeRAID 1100");
+ break;
+ default:
+ shost_printk(KERN_WARNING, cb->host,
+ "Unknown Model %X\n",
+ enquiry2->hw.sub_model);
+ goto out;
+ }
+ /*
+ * Initialize the Controller Firmware Version field and verify that it
+ * is a supported firmware version.
+ * The supported firmware versions are:
+ *
+ * DAC1164P 5.06 and above
+ * DAC960PTL/PRL/PJ/PG 4.06 and above
+ * DAC960PU/PD/PL 3.51 and above
+ * DAC960PU/PD/PL/P 2.73 and above
+ */
+#if defined(CONFIG_ALPHA)
+ /*
+ * DEC Alpha machines were often equipped with DAC960 cards that were
+ * OEMed from Mylex, and had their own custom firmware. Version 2.70,
+ * the last custom FW revision to be released by DEC for these older
+ * controllers, appears to work quite well with this driver.
+ *
+ * Cards tested successfully were several versions each of the PD and
+ * PU, called by DEC the KZPSC and KZPAC, respectively, and having
+ * the Manufacturer Numbers (from Mylex), usually on a sticker on the
+ * back of the board, of:
+ *
+ * KZPSC: D040347 (1-channel) or D040348 (2-channel)
+ * or D040349 (3-channel)
+ * KZPAC: D040395 (1-channel) or D040396 (2-channel)
+ * or D040397 (3-channel)
+ */
+# define FIRMWARE_27X "2.70"
+#else
+# define FIRMWARE_27X "2.73"
+#endif
+
+ if (enquiry2->fw.major_version == 0) {
+ enquiry2->fw.major_version = cb->enquiry->fw_major_version;
+ enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
+ enquiry2->fw.firmware_type = '0';
+ enquiry2->fw.turn_id = 0;
+ }
+ sprintf(cb->fw_version, "%d.%02d-%c-%02d",
+ enquiry2->fw.major_version,
+ enquiry2->fw.minor_version,
+ enquiry2->fw.firmware_type,
+ enquiry2->fw.turn_id);
+ if (!((enquiry2->fw.major_version == 5 &&
+ enquiry2->fw.minor_version >= 6) ||
+ (enquiry2->fw.major_version == 4 &&
+ enquiry2->fw.minor_version >= 6) ||
+ (enquiry2->fw.major_version == 3 &&
+ enquiry2->fw.minor_version >= 51) ||
+ (enquiry2->fw.major_version == 2 &&
+ strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Firmware Version '%s' unsupported\n",
+ cb->fw_version);
+ goto out;
+ }
+ /*
+ * Initialize the Channels, Targets, Memory Size, and SAF-TE
+ * Enclosure Management Enabled fields.
+ */
+ switch (enquiry2->hw.model) {
+ case MYRB_5_CHANNEL_BOARD:
+ pchan_max = 5;
+ break;
+ case MYRB_3_CHANNEL_BOARD:
+ case MYRB_3_CHANNEL_ASIC_DAC:
+ pchan_max = 3;
+ break;
+ case MYRB_2_CHANNEL_BOARD:
+ pchan_max = 2;
+ break;
+ default:
+ pchan_max = enquiry2->cfg_chan;
+ break;
+ }
+ pchan_cur = enquiry2->cur_chan;
+ if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
+ cb->bus_width = 32;
+ else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
+ cb->bus_width = 16;
+ else
+ cb->bus_width = 8;
+ cb->ldev_block_size = enquiry2->ldev_block_size;
+ shost->max_channel = pchan_cur;
+ shost->max_id = enquiry2->max_targets;
+ memsize = enquiry2->mem_size >> 20;
+ cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
+ /*
+ * Initialize the Controller Queue Depth, Driver Queue Depth,
+ * Logical Drive Count, Maximum Blocks per Command, Controller
+ * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
+ * The Driver Queue Depth must be at most one less than the
+ * Controller Queue Depth to allow for an automatic drive
+ * rebuild operation.
+ */
+ shost->can_queue = cb->enquiry->max_tcq;
+ if (shost->can_queue < 3)
+ shost->can_queue = enquiry2->max_cmds;
+ if (shost->can_queue < 3)
+ /* Play safe and disable TCQ */
+ shost->can_queue = 1;
+
+ if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
+ shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
+ shost->max_sectors = enquiry2->max_sectors;
+ shost->sg_tablesize = enquiry2->max_sge;
+ if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
+ shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
+ /*
+ * Initialize the Stripe Size, Segment Size, and Geometry Translation.
+ */
+ cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
+ >> (10 - MYRB_BLKSIZE_BITS);
+ cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
+ >> (10 - MYRB_BLKSIZE_BITS);
+ /* Assume 255/63 translation */
+ cb->ldev_geom_heads = 255;
+ cb->ldev_geom_sectors = 63;
+ if (config2->drive_geometry) {
+ cb->ldev_geom_heads = 128;
+ cb->ldev_geom_sectors = 32;
+ }
+
+ /*
+ * Initialize the Background Initialization Status.
+ */
+ if ((cb->fw_version[0] == '4' &&
+ strcmp(cb->fw_version, "4.08") >= 0) ||
+ (cb->fw_version[0] == '5' &&
+ strcmp(cb->fw_version, "5.08") >= 0)) {
+ cb->bgi_status_supported = true;
+ myrb_bgi_control(cb);
+ }
+ cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
+ ret = 0;
+
+out:
+ shost_printk(KERN_INFO, cb->host,
+ "Configuring %s PCI RAID Controller\n", cb->model_name);
+ shost_printk(KERN_INFO, cb->host,
+ " Firmware Version: %s, Memory Size: %dMB\n",
+ cb->fw_version, memsize);
+ if (cb->io_addr == 0)
+ shost_printk(KERN_INFO, cb->host,
+ " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
+ (unsigned long)cb->pci_addr, cb->irq);
+ else
+ shost_printk(KERN_INFO, cb->host,
+ " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
+ (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
+ cb->irq);
+ shost_printk(KERN_INFO, cb->host,
+ " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
+ cb->host->can_queue, cb->host->max_sectors);
+ shost_printk(KERN_INFO, cb->host,
+ " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
+ cb->host->can_queue, cb->host->sg_tablesize,
+ MYRB_SCATTER_GATHER_LIMIT);
+ shost_printk(KERN_INFO, cb->host,
+ " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
+ cb->stripe_size, cb->segment_size,
+ cb->ldev_geom_heads, cb->ldev_geom_sectors,
+ cb->safte_enabled ?
+ " SAF-TE Enclosure Management Enabled" : "");
+ shost_printk(KERN_INFO, cb->host,
+ " Physical: %d/%d channels %d/%d/%d devices\n",
+ pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
+ cb->host->max_id);
+
+ shost_printk(KERN_INFO, cb->host,
+ " Logical: 1/1 channels, %d/%d disks\n",
+ cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
+
+out_free:
+ dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+ enquiry2, enquiry2_addr);
+ dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
+ config2, config2_addr);
+
+ return ret;
+}
+
+/**
+ * myrb_unmap - unmaps controller structures
+ */
+static void myrb_unmap(struct myrb_hba *cb)
+{
+ if (cb->ldev_info_buf) {
+ size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
+ MYRB_MAX_LDEVS;
+ dma_free_coherent(&cb->pdev->dev, ldev_info_size,
+ cb->ldev_info_buf, cb->ldev_info_addr);
+ cb->ldev_info_buf = NULL;
+ }
+ if (cb->err_table) {
+ size_t err_table_size = sizeof(struct myrb_error_entry) *
+ MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
+ dma_free_coherent(&cb->pdev->dev, err_table_size,
+ cb->err_table, cb->err_table_addr);
+ cb->err_table = NULL;
+ }
+ if (cb->enquiry) {
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
+ cb->enquiry, cb->enquiry_addr);
+ cb->enquiry = NULL;
+ }
+ if (cb->first_stat_mbox) {
+ dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
+ cb->first_stat_mbox, cb->stat_mbox_addr);
+ cb->first_stat_mbox = NULL;
+ }
+ if (cb->first_cmd_mbox) {
+ dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
+ cb->first_cmd_mbox, cb->cmd_mbox_addr);
+ cb->first_cmd_mbox = NULL;
+ }
+}
+
+/**
+ * myrb_cleanup - cleanup controller structures
+ */
+static void myrb_cleanup(struct myrb_hba *cb)
+{
+ struct pci_dev *pdev = cb->pdev;
+
+ /* Free the memory mailbox, status, and related structures */
+ myrb_unmap(cb);
+
+ if (cb->mmio_base) {
+ cb->disable_intr(cb->io_base);
+ iounmap(cb->mmio_base);
+ }
+ if (cb->irq)
+ free_irq(cb->irq, cb);
+ if (cb->io_addr)
+ release_region(cb->io_addr, 0x80);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ scsi_host_put(cb->host);
+}
+
+static int myrb_host_reset(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost = scmd->device->host;
+ struct myrb_hba *cb = shost_priv(shost);
+
+ cb->reset(cb->io_base);
+ return SUCCESS;
+}
+
+static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct myrb_hba *cb = shost_priv(shost);
+ struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_dcdb *dcdb;
+ dma_addr_t dcdb_addr;
+ struct scsi_device *sdev = scmd->device;
+ struct scatterlist *sgl;
+ unsigned long flags;
+ int nsge;
+
+ myrb_reset_cmd(cmd_blk);
+ dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
+ if (!dcdb)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ nsge = scsi_dma_map(scmd);
+ if (nsge > 1) {
+ dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
+ scmd->result = (DID_ERROR << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ mbox->type3.opcode = MYRB_CMD_DCDB;
+ mbox->type3.id = scmd->request->tag + 3;
+ mbox->type3.addr = dcdb_addr;
+ dcdb->channel = sdev->channel;
+ dcdb->target = sdev->id;
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
+ break;
+ case DMA_TO_DEVICE:
+ dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
+ break;
+ case DMA_FROM_DEVICE:
+ dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
+ break;
+ default:
+ dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
+ break;
+ }
+ dcdb->early_status = false;
+ if (scmd->request->timeout <= 10)
+ dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
+ else if (scmd->request->timeout <= 60)
+ dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
+ else if (scmd->request->timeout <= 600)
+ dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
+ else
+ dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
+ dcdb->no_autosense = false;
+ dcdb->allow_disconnect = true;
+ sgl = scsi_sglist(scmd);
+ dcdb->dma_addr = sg_dma_address(sgl);
+ if (sg_dma_len(sgl) > USHRT_MAX) {
+ dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
+ dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
+ } else {
+ dcdb->xfer_len_lo = sg_dma_len(sgl);
+ dcdb->xfer_len_hi4 = 0;
+ }
+ dcdb->cdb_len = scmd->cmd_len;
+ dcdb->sense_len = sizeof(dcdb->sense);
+ memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ cb->qcmd(cb, cmd_blk);
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return 0;
+}
+
+static void myrb_inquiry(struct myrb_hba *cb,
+ struct scsi_cmnd *scmd)
+{
+ unsigned char inq[36] = {
+ 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
+ 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ };
+
+ if (cb->bus_width > 16)
+ inq[7] |= 1 << 6;
+ if (cb->bus_width > 8)
+ inq[7] |= 1 << 5;
+ memcpy(&inq[16], cb->model_name, 16);
+ memcpy(&inq[32], cb->fw_version, 1);
+ memcpy(&inq[33], &cb->fw_version[2], 2);
+ memcpy(&inq[35], &cb->fw_version[7], 1);
+
+ scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
+}
+
+static void
+myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
+ struct myrb_ldev_info *ldev_info)
+{
+ unsigned char modes[32], *mode_pg;
+ bool dbd;
+ size_t mode_len;
+
+ dbd = (scmd->cmnd[1] & 0x08) == 0x08;
+ if (dbd) {
+ mode_len = 24;
+ mode_pg = &modes[4];
+ } else {
+ mode_len = 32;
+ mode_pg = &modes[12];
+ }
+ memset(modes, 0, sizeof(modes));
+ modes[0] = mode_len - 1;
+ if (!dbd) {
+ unsigned char *block_desc = &modes[4];
+
+ modes[3] = 8;
+ put_unaligned_be32(ldev_info->size, &block_desc[0]);
+ put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
+ }
+ mode_pg[0] = 0x08;
+ mode_pg[1] = 0x12;
+ if (ldev_info->wb_enabled)
+ mode_pg[2] |= 0x04;
+ if (cb->segment_size) {
+ mode_pg[2] |= 0x08;
+ put_unaligned_be16(cb->segment_size, &mode_pg[14]);
+ }
+
+ scsi_sg_copy_from_buffer(scmd, modes, mode_len);
+}
+
+static void myrb_request_sense(struct myrb_hba *cb,
+ struct scsi_cmnd *scmd)
+{
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ NO_SENSE, 0, 0);
+ scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
+}
+
+static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
+ struct myrb_ldev_info *ldev_info)
+{
+ unsigned char data[8];
+
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Capacity %u, blocksize %u\n",
+ ldev_info->size, cb->ldev_block_size);
+ put_unaligned_be32(ldev_info->size - 1, &data[0]);
+ put_unaligned_be32(cb->ldev_block_size, &data[4]);
+ scsi_sg_copy_from_buffer(scmd, data, 8);
+}
+
+static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct myrb_hba *cb = shost_priv(shost);
+ struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_ldev_info *ldev_info;
+ struct scsi_device *sdev = scmd->device;
+ struct scatterlist *sgl;
+ unsigned long flags;
+ u64 lba;
+ u32 block_cnt;
+ int nsge;
+
+ ldev_info = sdev->hostdata;
+ if (ldev_info->state != MYRB_DEVICE_ONLINE &&
+ ldev_info->state != MYRB_DEVICE_WO) {
+ dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
+ sdev->id, ldev_info ? ldev_info->state : 0xff);
+ scmd->result = (DID_BAD_TARGET << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ switch (scmd->cmnd[0]) {
+ case TEST_UNIT_READY:
+ scmd->result = (DID_OK << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ case INQUIRY:
+ if (scmd->cmnd[1] & 1) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ myrb_inquiry(cb, scmd);
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ case SYNCHRONIZE_CACHE:
+ scmd->result = (DID_OK << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ case MODE_SENSE:
+ if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
+ (scmd->cmnd[2] & 0x3F) != 0x08) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ myrb_mode_sense(cb, scmd, ldev_info);
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ case READ_CAPACITY:
+ if ((scmd->cmnd[1] & 1) ||
+ (scmd->cmnd[8] & 1)) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ lba = get_unaligned_be32(&scmd->cmnd[2]);
+ if (lba) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ myrb_read_capacity(cb, scmd, ldev_info);
+ scmd->scsi_done(scmd);
+ return 0;
+ case REQUEST_SENSE:
+ myrb_request_sense(cb, scmd);
+ scmd->result = (DID_OK << 16);
+ return 0;
+ case SEND_DIAGNOSTIC:
+ if (scmd->cmnd[1] != 0x04) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ /* Assume good status */
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ case READ_6:
+ if (ldev_info->state == MYRB_DEVICE_WO) {
+ /* Data protect, attempt to read invalid data */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ DATA_PROTECT, 0x21, 0x06);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ case WRITE_6:
+ lba = (((scmd->cmnd[1] & 0x1F) << 16) |
+ (scmd->cmnd[2] << 8) |
+ scmd->cmnd[3]);
+ block_cnt = scmd->cmnd[4];
+ break;
+ case READ_10:
+ if (ldev_info->state == MYRB_DEVICE_WO) {
+ /* Data protect, attempt to read invalid data */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ DATA_PROTECT, 0x21, 0x06);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ case WRITE_10:
+ case VERIFY: /* 0x2F */
+ case WRITE_VERIFY: /* 0x2E */
+ lba = get_unaligned_be32(&scmd->cmnd[2]);
+ block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
+ break;
+ case READ_12:
+ if (ldev_info->state == MYRB_DEVICE_WO) {
+ /* Data protect, attempt to read invalid data */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ DATA_PROTECT, 0x21, 0x06);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ case WRITE_12:
+ case VERIFY_12: /* 0xAF */
+ case WRITE_VERIFY_12: /* 0xAE */
+ lba = get_unaligned_be32(&scmd->cmnd[2]);
+ block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+ break;
+ default:
+ /* Illegal request, invalid opcode */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x20, 0);
+ scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ myrb_reset_cmd(cmd_blk);
+ mbox->type5.id = scmd->request->tag + 3;
+ if (scmd->sc_data_direction == DMA_NONE)
+ goto submit;
+ nsge = scsi_dma_map(scmd);
+ if (nsge == 1) {
+ sgl = scsi_sglist(scmd);
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mbox->type5.opcode = MYRB_CMD_READ;
+ else
+ mbox->type5.opcode = MYRB_CMD_WRITE;
+
+ mbox->type5.ld.xfer_len = block_cnt;
+ mbox->type5.ld.ldev_num = sdev->id;
+ mbox->type5.lba = lba;
+ mbox->type5.addr = (u32)sg_dma_address(sgl);
+ } else {
+ struct myrb_sge *hw_sgl;
+ dma_addr_t hw_sgl_addr;
+ int i;
+
+ hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
+ if (!hw_sgl)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ cmd_blk->sgl = hw_sgl;
+ cmd_blk->sgl_addr = hw_sgl_addr;
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mbox->type5.opcode = MYRB_CMD_READ_SG;
+ else
+ mbox->type5.opcode = MYRB_CMD_WRITE_SG;
+
+ mbox->type5.ld.xfer_len = block_cnt;
+ mbox->type5.ld.ldev_num = sdev->id;
+ mbox->type5.lba = lba;
+ mbox->type5.addr = hw_sgl_addr;
+ mbox->type5.sg_count = nsge;
+
+ scsi_for_each_sg(scmd, sgl, nsge, i) {
+ hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
+ hw_sgl->sge_count = (u32)sg_dma_len(sgl);
+ hw_sgl++;
+ }
+ }
+submit:
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ cb->qcmd(cb, cmd_blk);
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+
+ return 0;
+}
+
+static int myrb_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct scsi_device *sdev = scmd->device;
+
+ if (sdev->channel > myrb_logical_channel(shost)) {
+ scmd->result = (DID_BAD_TARGET << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ if (sdev->channel == myrb_logical_channel(shost))
+ return myrb_ldev_queuecommand(shost, scmd);
+
+ return myrb_pthru_queuecommand(shost, scmd);
+}
+
+static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
+{
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_ldev_info *ldev_info;
+ unsigned short ldev_num = sdev->id;
+ enum raid_level level;
+
+ ldev_info = cb->ldev_info_buf + ldev_num;
+ if (!ldev_info)
+ return -ENXIO;
+
+ sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
+ if (!sdev->hostdata)
+ return -ENOMEM;
+ dev_dbg(&sdev->sdev_gendev,
+ "slave alloc ldev %d state %x\n",
+ ldev_num, ldev_info->state);
+ memcpy(sdev->hostdata, ldev_info,
+ sizeof(*ldev_info));
+ switch (ldev_info->raid_level) {
+ case MYRB_RAID_LEVEL0:
+ level = RAID_LEVEL_LINEAR;
+ break;
+ case MYRB_RAID_LEVEL1:
+ level = RAID_LEVEL_1;
+ break;
+ case MYRB_RAID_LEVEL3:
+ level = RAID_LEVEL_3;
+ break;
+ case MYRB_RAID_LEVEL5:
+ level = RAID_LEVEL_5;
+ break;
+ case MYRB_RAID_LEVEL6:
+ level = RAID_LEVEL_6;
+ break;
+ case MYRB_RAID_JBOD:
+ level = RAID_LEVEL_JBOD;
+ break;
+ default:
+ level = RAID_LEVEL_UNKNOWN;
+ break;
+ }
+ raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
+ return 0;
+}
+
+static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
+{
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_pdev_state *pdev_info;
+ unsigned short status;
+
+ if (sdev->id > MYRB_MAX_TARGETS)
+ return -ENXIO;
+
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ if (!pdev_info)
+ return -ENOMEM;
+
+ status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
+ sdev, pdev_info);
+ if (status != MYRB_STATUS_SUCCESS) {
+ dev_dbg(&sdev->sdev_gendev,
+ "Failed to get device state, status %x\n",
+ status);
+ kfree(pdev_info);
+ return -ENXIO;
+ }
+ if (!pdev_info->present) {
+ dev_dbg(&sdev->sdev_gendev,
+ "device not present, skip\n");
+ kfree(pdev_info);
+ return -ENXIO;
+ }
+ dev_dbg(&sdev->sdev_gendev,
+ "slave alloc pdev %d:%d state %x\n",
+ sdev->channel, sdev->id, pdev_info->state);
+ sdev->hostdata = pdev_info;
+
+ return 0;
+}
+
+static int myrb_slave_alloc(struct scsi_device *sdev)
+{
+ if (sdev->channel > myrb_logical_channel(sdev->host))
+ return -ENXIO;
+
+ if (sdev->lun > 0)
+ return -ENXIO;
+
+ if (sdev->channel == myrb_logical_channel(sdev->host))
+ return myrb_ldev_slave_alloc(sdev);
+
+ return myrb_pdev_slave_alloc(sdev);
+}
+
+static int myrb_slave_configure(struct scsi_device *sdev)
+{
+ struct myrb_ldev_info *ldev_info;
+
+ if (sdev->channel > myrb_logical_channel(sdev->host))
+ return -ENXIO;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host)) {
+ sdev->no_uld_attach = 1;
+ return 0;
+ }
+ if (sdev->lun != 0)
+ return -ENXIO;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ if (ldev_info->state != MYRB_DEVICE_ONLINE)
+ sdev_printk(KERN_INFO, sdev,
+ "Logical drive is %s\n",
+ myrb_devstate_name(ldev_info->state));
+
+ sdev->tagged_supported = 1;
+ return 0;
+}
+
+static void myrb_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+}
+
+static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ struct myrb_hba *cb = shost_priv(sdev->host);
+
+ geom[0] = cb->ldev_geom_heads;
+ geom[1] = cb->ldev_geom_sectors;
+ geom[2] = sector_div(capacity, geom[0] * geom[1]);
+
+ return 0;
+}
+
+static ssize_t raid_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ int ret;
+
+ if (!sdev->hostdata)
+ return snprintf(buf, 16, "Unknown\n");
+
+ if (sdev->channel == myrb_logical_channel(sdev->host)) {
+ struct myrb_ldev_info *ldev_info = sdev->hostdata;
+ const char *name;
+
+ name = myrb_devstate_name(ldev_info->state);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->state);
+ } else {
+ struct myrb_pdev_state *pdev_info = sdev->hostdata;
+ unsigned short status;
+ const char *name;
+
+ status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
+ sdev, pdev_info);
+ if (status != MYRB_STATUS_SUCCESS)
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device state, status %x\n",
+ status);
+
+ if (!pdev_info->present)
+ name = "Removed";
+ else
+ name = myrb_devstate_name(pdev_info->state);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ pdev_info->state);
+ }
+ return ret;
+}
+
+static ssize_t raid_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_pdev_state *pdev_info;
+ enum myrb_devstate new_state;
+ unsigned short status;
+
+ if (!strncmp(buf, "kill", 4) ||
+ !strncmp(buf, "offline", 7))
+ new_state = MYRB_DEVICE_DEAD;
+ else if (!strncmp(buf, "online", 6))
+ new_state = MYRB_DEVICE_ONLINE;
+ else if (!strncmp(buf, "standby", 7))
+ new_state = MYRB_DEVICE_STANDBY;
+ else
+ return -EINVAL;
+
+ pdev_info = sdev->hostdata;
+ if (!pdev_info) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - no physical device information\n");
+ return -ENXIO;
+ }
+ if (!pdev_info->present) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - device not present\n");
+ return -ENXIO;
+ }
+
+ if (pdev_info->state == new_state)
+ return count;
+
+ status = myrb_set_pdev_state(cb, sdev, new_state);
+ switch (status) {
+ case MYRB_STATUS_SUCCESS:
+ break;
+ case MYRB_STATUS_START_DEVICE_FAILED:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Unable to Start Device\n");
+ count = -EAGAIN;
+ break;
+ case MYRB_STATUS_NO_DEVICE:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - No Device at Address\n");
+ count = -ENODEV;
+ break;
+ case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Invalid Channel or Target or Modifier\n");
+ count = -EINVAL;
+ break;
+ case MYRB_STATUS_CHANNEL_BUSY:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Channel Busy\n");
+ count = -EBUSY;
+ break;
+ default:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Unexpected Status %04X\n", status);
+ count = -EIO;
+ break;
+ }
+ return count;
+}
+static DEVICE_ATTR_RW(raid_state);
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (sdev->channel == myrb_logical_channel(sdev->host)) {
+ struct myrb_ldev_info *ldev_info = sdev->hostdata;
+ const char *name;
+
+ if (!ldev_info)
+ return -ENXIO;
+
+ name = myrb_raidlevel_name(ldev_info->raid_level);
+ if (!name)
+ return snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->state);
+ return snprintf(buf, 32, "%s\n", name);
+ }
+ return snprintf(buf, 32, "Physical Drive\n");
+}
+static DEVICE_ATTR_RO(raid_level);
+
+static ssize_t rebuild_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_rbld_progress rbld_buf;
+ unsigned char status;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host))
+ return snprintf(buf, 32, "physical device - not rebuilding\n");
+
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+
+ if (rbld_buf.ldev_num != sdev->id ||
+ status != MYRB_STATUS_SUCCESS)
+ return snprintf(buf, 32, "not rebuilding\n");
+
+ return snprintf(buf, 32, "rebuilding block %u of %u\n",
+ rbld_buf.ldev_size - rbld_buf.blocks_left,
+ rbld_buf.ldev_size);
+}
+
+static ssize_t rebuild_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_cmdblk *cmd_blk;
+ union myrb_cmd_mbox *mbox;
+ unsigned short status;
+ int rc, start;
+ const char *msg;
+
+ rc = kstrtoint(buf, 0, &start);
+ if (rc)
+ return rc;
+
+ if (sdev->channel >= myrb_logical_channel(sdev->host))
+ return -ENXIO;
+
+ status = myrb_get_rbld_progress(cb, NULL);
+ if (start) {
+ if (status == MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Initiated; already in progress\n");
+ return -EALREADY;
+ }
+ mutex_lock(&cb->dcmd_mutex);
+ cmd_blk = &cb->dcmd_blk;
+ myrb_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
+ mbox->type3D.id = MYRB_DCMD_TAG;
+ mbox->type3D.channel = sdev->channel;
+ mbox->type3D.target = sdev->id;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+ } else {
+ struct pci_dev *pdev = cb->pdev;
+ unsigned char *rate;
+ dma_addr_t rate_addr;
+
+ if (status != MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Cancelled; not in progress\n");
+ return 0;
+ }
+
+ rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
+ &rate_addr, GFP_KERNEL);
+ if (rate == NULL) {
+ sdev_printk(KERN_INFO, sdev,
+ "Cancellation of Rebuild Failed - Out of Memory\n");
+ return -ENOMEM;
+ }
+ mutex_lock(&cb->dcmd_mutex);
+ cmd_blk = &cb->dcmd_blk;
+ myrb_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
+ mbox->type3R.id = MYRB_DCMD_TAG;
+ mbox->type3R.rbld_rate = 0xFF;
+ mbox->type3R.addr = rate_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
+ mutex_unlock(&cb->dcmd_mutex);
+ }
+ if (status == MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
+ start ? "Initiated" : "Cancelled");
+ return count;
+ }
+ if (!start) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Cancelled, status 0x%x\n",
+ status);
+ return -EIO;
+ }
+
+ switch (status) {
+ case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
+ msg = "Attempt to Rebuild Online or Unresponsive Drive";
+ break;
+ case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
+ msg = "New Disk Failed During Rebuild";
+ break;
+ case MYRB_STATUS_INVALID_ADDRESS:
+ msg = "Invalid Device Address";
+ break;
+ case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
+ msg = "Already in Progress";
+ break;
+ default:
+ msg = NULL;
+ break;
+ }
+ if (msg)
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed - %s\n", msg);
+ else
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed, status 0x%x\n", status);
+
+ return -EIO;
+}
+static DEVICE_ATTR_RW(rebuild);
+
+static ssize_t consistency_check_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_rbld_progress rbld_buf;
+ struct myrb_cmdblk *cmd_blk;
+ union myrb_cmd_mbox *mbox;
+ unsigned short ldev_num = 0xFFFF;
+ unsigned short status;
+ int rc, start;
+ const char *msg;
+
+ rc = kstrtoint(buf, 0, &start);
+ if (rc)
+ return rc;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host))
+ return -ENXIO;
+
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+ if (start) {
+ if (status == MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Not Initiated; already in progress\n");
+ return -EALREADY;
+ }
+ mutex_lock(&cb->dcmd_mutex);
+ cmd_blk = &cb->dcmd_blk;
+ myrb_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
+ mbox->type3C.id = MYRB_DCMD_TAG;
+ mbox->type3C.ldev_num = sdev->id;
+ mbox->type3C.auto_restore = true;
+
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+ } else {
+ struct pci_dev *pdev = cb->pdev;
+ unsigned char *rate;
+ dma_addr_t rate_addr;
+
+ if (ldev_num != sdev->id) {
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Not Cancelled; not in progress\n");
+ return 0;
+ }
+ rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
+ &rate_addr, GFP_KERNEL);
+ if (rate == NULL) {
+ sdev_printk(KERN_INFO, sdev,
+ "Cancellation of Check Consistency Failed - Out of Memory\n");
+ return -ENOMEM;
+ }
+ mutex_lock(&cb->dcmd_mutex);
+ cmd_blk = &cb->dcmd_blk;
+ myrb_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
+ mbox->type3R.id = MYRB_DCMD_TAG;
+ mbox->type3R.rbld_rate = 0xFF;
+ mbox->type3R.addr = rate_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
+ mutex_unlock(&cb->dcmd_mutex);
+ }
+ if (status == MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
+ start ? "Initiated" : "Cancelled");
+ return count;
+ }
+ if (!start) {
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Not Cancelled, status 0x%x\n",
+ status);
+ return -EIO;
+ }
+
+ switch (status) {
+ case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
+ msg = "Dependent Physical Device is DEAD";
+ break;
+ case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
+ msg = "New Disk Failed During Rebuild";
+ break;
+ case MYRB_STATUS_INVALID_ADDRESS:
+ msg = "Invalid or Nonredundant Logical Drive";
+ break;
+ case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
+ msg = "Already in Progress";
+ break;
+ default:
+ msg = NULL;
+ break;
+ }
+ if (msg)
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Failed - %s\n", msg);
+ else
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Failed, status 0x%x\n", status);
+
+ return -EIO;
+}
+
+static ssize_t consistency_check_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return rebuild_show(dev, attr, buf);
+}
+static DEVICE_ATTR_RW(consistency_check);
+
+static ssize_t ctlr_num_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrb_hba *cb = shost_priv(shost);
+
+ return snprintf(buf, 20, "%d\n", cb->ctlr_num);
+}
+static DEVICE_ATTR_RO(ctlr_num);
+
+static ssize_t firmware_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrb_hba *cb = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", cb->fw_version);
+}
+static DEVICE_ATTR_RO(firmware);
+
+static ssize_t model_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrb_hba *cb = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", cb->model_name);
+}
+static DEVICE_ATTR_RO(model);
+
+static ssize_t flush_cache_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrb_hba *cb = shost_priv(shost);
+ unsigned short status;
+
+ status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
+ if (status == MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_INFO, shost,
+ "Cache Flush Completed\n");
+ return count;
+ }
+ shost_printk(KERN_INFO, shost,
+ "Cache Flush Failed, status %x\n", status);
+ return -EIO;
+}
+static DEVICE_ATTR_WO(flush_cache);
+
+static struct device_attribute *myrb_sdev_attrs[] = {
+ &dev_attr_rebuild,
+ &dev_attr_consistency_check,
+ &dev_attr_raid_state,
+ &dev_attr_raid_level,
+ NULL,
+};
+
+static struct device_attribute *myrb_shost_attrs[] = {
+ &dev_attr_ctlr_num,
+ &dev_attr_model,
+ &dev_attr_firmware,
+ &dev_attr_flush_cache,
+ NULL,
+};
+
+struct scsi_host_template myrb_template = {
+ .module = THIS_MODULE,
+ .name = "DAC960",
+ .proc_name = "myrb",
+ .queuecommand = myrb_queuecommand,
+ .eh_host_reset_handler = myrb_host_reset,
+ .slave_alloc = myrb_slave_alloc,
+ .slave_configure = myrb_slave_configure,
+ .slave_destroy = myrb_slave_destroy,
+ .bios_param = myrb_biosparam,
+ .cmd_size = sizeof(struct myrb_cmdblk),
+ .shost_attrs = myrb_shost_attrs,
+ .sdev_attrs = myrb_sdev_attrs,
+ .this_id = -1,
+};
+
+/**
+ * myrb_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int myrb_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return sdev->channel == myrb_logical_channel(sdev->host);
+}
+
+/**
+ * myrb_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void myrb_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_rbld_progress rbld_buf;
+ unsigned int percent_complete = 0;
+ unsigned short status;
+ unsigned int ldev_size = 0, remaining = 0;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host))
+ return;
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+ if (status == MYRB_STATUS_SUCCESS) {
+ if (rbld_buf.ldev_num == sdev->id) {
+ ldev_size = rbld_buf.ldev_size;
+ remaining = rbld_buf.blocks_left;
+ }
+ }
+ if (remaining && ldev_size)
+ percent_complete = (ldev_size - remaining) * 100 / ldev_size;
+ raid_set_resync(myrb_raid_template, dev, percent_complete);
+}
+
+/**
+ * myrb_get_state - get raid volume status
+ * @dev the device struct object
+ */
+static void myrb_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_ldev_info *ldev_info = sdev->hostdata;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+ unsigned short status;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
+ state = RAID_STATE_UNKNOWN;
+ else {
+ status = myrb_get_rbld_progress(cb, NULL);
+ if (status == MYRB_STATUS_SUCCESS)
+ state = RAID_STATE_RESYNCING;
+ else {
+ switch (ldev_info->state) {
+ case MYRB_DEVICE_ONLINE:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case MYRB_DEVICE_WO:
+ case MYRB_DEVICE_CRITICAL:
+ state = RAID_STATE_DEGRADED;
+ break;
+ default:
+ state = RAID_STATE_OFFLINE;
+ }
+ }
+ }
+ raid_set_state(myrb_raid_template, dev, state);
+}
+
+struct raid_function_template myrb_raid_functions = {
+ .cookie = &myrb_template,
+ .is_raid = myrb_is_raid,
+ .get_resync = myrb_get_resync,
+ .get_state = myrb_get_state,
+};
+
+static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
+ struct scsi_cmnd *scmd)
+{
+ unsigned short status;
+
+ if (!cmd_blk)
+ return;
+
+ scsi_dma_unmap(scmd);
+
+ if (cmd_blk->dcdb) {
+ memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
+ dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
+ cmd_blk->dcdb_addr);
+ cmd_blk->dcdb = NULL;
+ }
+ if (cmd_blk->sgl) {
+ dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
+ cmd_blk->sgl = NULL;
+ cmd_blk->sgl_addr = 0;
+ }
+ status = cmd_blk->status;
+ switch (status) {
+ case MYRB_STATUS_SUCCESS:
+ case MYRB_STATUS_DEVICE_BUSY:
+ scmd->result = (DID_OK << 16) | status;
+ break;
+ case MYRB_STATUS_BAD_DATA:
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Bad Data Encountered\n");
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ /* Unrecovered read error */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x11, 0);
+ else
+ /* Write error */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x0C, 0);
+ scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ break;
+ case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
+ scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ /* Unrecovered read error, auto-reallocation failed */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x11, 0x04);
+ else
+ /* Write error, auto-reallocation failed */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x0C, 0x02);
+ scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ break;
+ case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Logical Drive Nonexistent or Offline");
+ scmd->result = (DID_BAD_TARGET << 16);
+ break;
+ case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Attempt to Access Beyond End of Logical Drive");
+ /* Logical block address out of range */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ NOT_READY, 0x21, 0);
+ break;
+ case MYRB_STATUS_DEVICE_NONRESPONSIVE:
+ dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
+ scmd->result = (DID_BAD_TARGET << 16);
+ break;
+ default:
+ scmd_printk(KERN_ERR, scmd,
+ "Unexpected Error Status %04X", status);
+ scmd->result = (DID_ERROR << 16);
+ break;
+ }
+ scmd->scsi_done(scmd);
+}
+
+static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+ if (!cmd_blk)
+ return;
+
+ if (cmd_blk->completion) {
+ complete(cmd_blk->completion);
+ cmd_blk->completion = NULL;
+ }
+}
+
+static void myrb_monitor(struct work_struct *work)
+{
+ struct myrb_hba *cb = container_of(work,
+ struct myrb_hba, monitor_work.work);
+ struct Scsi_Host *shost = cb->host;
+ unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
+
+ dev_dbg(&shost->shost_gendev, "monitor tick\n");
+
+ if (cb->new_ev_seq > cb->old_ev_seq) {
+ int event = cb->old_ev_seq;
+
+ dev_dbg(&shost->shost_gendev,
+ "get event log no %d/%d\n",
+ cb->new_ev_seq, event);
+ myrb_get_event(cb, event);
+ cb->old_ev_seq = event + 1;
+ interval = 10;
+ } else if (cb->need_err_info) {
+ cb->need_err_info = false;
+ dev_dbg(&shost->shost_gendev, "get error table\n");
+ myrb_get_errtable(cb);
+ interval = 10;
+ } else if (cb->need_rbld && cb->rbld_first) {
+ cb->need_rbld = false;
+ dev_dbg(&shost->shost_gendev,
+ "get rebuild progress\n");
+ myrb_update_rbld_progress(cb);
+ interval = 10;
+ } else if (cb->need_ldev_info) {
+ cb->need_ldev_info = false;
+ dev_dbg(&shost->shost_gendev,
+ "get logical drive info\n");
+ myrb_get_ldev_info(cb);
+ interval = 10;
+ } else if (cb->need_rbld) {
+ cb->need_rbld = false;
+ dev_dbg(&shost->shost_gendev,
+ "get rebuild progress\n");
+ myrb_update_rbld_progress(cb);
+ interval = 10;
+ } else if (cb->need_cc_status) {
+ cb->need_cc_status = false;
+ dev_dbg(&shost->shost_gendev,
+ "get consistency check progress\n");
+ myrb_get_cc_progress(cb);
+ interval = 10;
+ } else if (cb->need_bgi_status) {
+ cb->need_bgi_status = false;
+ dev_dbg(&shost->shost_gendev, "get background init status\n");
+ myrb_bgi_control(cb);
+ interval = 10;
+ } else {
+ dev_dbg(&shost->shost_gendev, "new enquiry\n");
+ mutex_lock(&cb->dma_mutex);
+ myrb_hba_enquiry(cb);
+ mutex_unlock(&cb->dma_mutex);
+ if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
+ cb->need_err_info || cb->need_rbld ||
+ cb->need_ldev_info || cb->need_cc_status ||
+ cb->need_bgi_status) {
+ dev_dbg(&shost->shost_gendev,
+ "reschedule monitor\n");
+ interval = 0;
+ }
+ }
+ if (interval > 1)
+ cb->primary_monitor_time = jiffies;
+ queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
+}
+
+/**
+ * myrb_err_status - reports controller BIOS messages
+ *
+ * Controller BIOS messages are passed through the Error Status Register
+ * when the driver performs the BIOS handshaking.
+ *
+ * Return: true for fatal errors and false otherwise.
+ */
+bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
+ unsigned char parm0, unsigned char parm1)
+{
+ struct pci_dev *pdev = cb->pdev;
+
+ switch (error) {
+ case 0x00:
+ dev_info(&pdev->dev,
+ "Physical Device %d:%d Not Responding\n",
+ parm1, parm0);
+ break;
+ case 0x08:
+ dev_notice(&pdev->dev, "Spinning Up Drives\n");
+ break;
+ case 0x30:
+ dev_notice(&pdev->dev, "Configuration Checksum Error\n");
+ break;
+ case 0x60:
+ dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
+ break;
+ case 0x70:
+ dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
+ break;
+ case 0x90:
+ dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
+ parm1, parm0);
+ break;
+ case 0xA0:
+ dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
+ break;
+ case 0xB0:
+ dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
+ break;
+ case 0xD0:
+ dev_notice(&pdev->dev, "New Controller Configuration Found\n");
+ break;
+ case 0xF0:
+ dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
+ return true;
+ default:
+ dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
+ error);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Hardware-specific functions
+ */
+
+/*
+ * DAC960 LA Series Controllers
+ */
+
+static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_gen_intr(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_reset_ctrl(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
+{
+ unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
+
+ return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
+}
+
+static inline bool DAC960_LA_init_in_progress(void __iomem *base)
+{
+ unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
+
+ return !(idb & DAC960_LA_IDB_INIT_DONE);
+}
+
+static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_intr(void __iomem *base)
+{
+ writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
+ base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
+
+ return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
+
+ return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_LA_enable_intr(void __iomem *base)
+{
+ unsigned char odb = 0xFF;
+
+ odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
+ writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_LA_disable_intr(void __iomem *base)
+{
+ unsigned char odb = 0xFF;
+
+ odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
+ writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_LA_intr_enabled(void __iomem *base)
+{
+ unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
+
+ return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
+ union myrb_cmd_mbox *mbox)
+{
+ mem_mbox->words[1] = mbox->words[1];
+ mem_mbox->words[2] = mbox->words[2];
+ mem_mbox->words[3] = mbox->words[3];
+ /* Memory barrier to prevent reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Memory barrier to force PCI access */
+ mb();
+}
+
+static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
+ writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
+ writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
+ writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
+}
+
+static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
+{
+ return readb(base + DAC960_LA_STSID_OFFSET);
+}
+
+static inline unsigned short DAC960_LA_read_status(void __iomem *base)
+{
+ return readw(base + DAC960_LA_STS_OFFSET);
+}
+
+static inline bool
+DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
+
+ if (!(errsts & DAC960_LA_ERRSTS_PENDING))
+ return false;
+ errsts &= ~DAC960_LA_ERRSTS_PENDING;
+
+ *error = errsts;
+ *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
+ *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
+ writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
+ return true;
+}
+
+static inline unsigned short
+DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ unsigned short status;
+ int timeout = 0;
+
+ while (timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (!DAC960_LA_hw_mbox_is_full(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (DAC960_LA_hw_mbox_is_full(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for empty mailbox\n");
+ return MYRB_STATUS_SUBSYS_TIMEOUT;
+ }
+ DAC960_LA_write_hw_mbox(base, mbox);
+ DAC960_LA_hw_mbox_new_cmd(base);
+ timeout = 0;
+ while (timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_LA_hw_mbox_status_available(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (!DAC960_LA_hw_mbox_status_available(base)) {
+ dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
+ return MYRB_STATUS_SUBSYS_TIMEOUT;
+ }
+ status = DAC960_LA_read_status(base);
+ DAC960_LA_ack_hw_mbox_intr(base);
+ DAC960_LA_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_LA_hw_init(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char error, parm0, parm1;
+
+ DAC960_LA_disable_intr(base);
+ DAC960_LA_ack_hw_mbox_status(base);
+ udelay(1000);
+ timeout = 0;
+ while (DAC960_LA_init_in_progress(base) &&
+ timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_LA_read_error_status(base, &error,
+ &parm0, &parm1) &&
+ myrb_err_status(cb, error, parm0, parm1))
+ return -ENODEV;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRB_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_LA_reset_ctrl(base);
+ return -ENODEV;
+ }
+ DAC960_LA_enable_intr(base);
+ cb->qcmd = myrb_qcmd;
+ cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
+ if (cb->dual_mode_interface)
+ cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
+ else
+ cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
+ cb->disable_intr = DAC960_LA_disable_intr;
+ cb->reset = DAC960_LA_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
+{
+ struct myrb_hba *cb = arg;
+ void __iomem *base = cb->io_base;
+ struct myrb_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ DAC960_LA_ack_intr(base);
+ next_stat_mbox = cb->next_stat_mbox;
+ while (next_stat_mbox->valid) {
+ unsigned char id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrb_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRB_DCMD_TAG)
+ cmd_blk = &cb->dcmd_blk;
+ else if (id == MYRB_MCMD_TAG)
+ cmd_blk = &cb->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cb->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = next_stat_mbox->status;
+ else
+ dev_err(&cb->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
+ if (++next_stat_mbox > cb->last_stat_mbox)
+ next_stat_mbox = cb->first_stat_mbox;
+
+ if (cmd_blk) {
+ if (id < 3)
+ myrb_handle_cmdblk(cb, cmd_blk);
+ else
+ myrb_handle_scsi(cb, cmd_blk, scmd);
+ }
+ }
+ cb->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_LA_privdata = {
+ .hw_init = DAC960_LA_hw_init,
+ .irq_handler = DAC960_LA_intr_handler,
+ .mmio_size = DAC960_LA_mmio_size,
+};
+
+/*
+ * DAC960 PG Series Controllers
+ */
+static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_gen_intr(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_reset_ctrl(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
+{
+ unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
+
+ return idb & DAC960_PG_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_PG_init_in_progress(void __iomem *base)
+{
+ unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
+
+ return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
+{
+ writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
+{
+ writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_intr(void __iomem *base)
+{
+ writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
+ base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
+
+ return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
+
+ return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_PG_enable_intr(void __iomem *base)
+{
+ unsigned int imask = (unsigned int)-1;
+
+ imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
+ writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_PG_disable_intr(void __iomem *base)
+{
+ unsigned int imask = (unsigned int)-1;
+
+ writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_PG_intr_enabled(void __iomem *base)
+{
+ unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
+
+ return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
+ union myrb_cmd_mbox *mbox)
+{
+ mem_mbox->words[1] = mbox->words[1];
+ mem_mbox->words[2] = mbox->words[2];
+ mem_mbox->words[3] = mbox->words[3];
+ /* Memory barrier to prevent reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Memory barrier to force PCI access */
+ mb();
+}
+
+static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
+ writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
+ writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
+ writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
+}
+
+static inline unsigned char
+DAC960_PG_read_status_cmd_ident(void __iomem *base)
+{
+ return readb(base + DAC960_PG_STSID_OFFSET);
+}
+
+static inline unsigned short
+DAC960_PG_read_status(void __iomem *base)
+{
+ return readw(base + DAC960_PG_STS_OFFSET);
+}
+
+static inline bool
+DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
+
+ if (!(errsts & DAC960_PG_ERRSTS_PENDING))
+ return false;
+ errsts &= ~DAC960_PG_ERRSTS_PENDING;
+ *error = errsts;
+ *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
+ *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
+ writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
+ return true;
+}
+
+static inline unsigned short
+DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ unsigned short status;
+ int timeout = 0;
+
+ while (timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (!DAC960_PG_hw_mbox_is_full(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (DAC960_PG_hw_mbox_is_full(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for empty mailbox\n");
+ return MYRB_STATUS_SUBSYS_TIMEOUT;
+ }
+ DAC960_PG_write_hw_mbox(base, mbox);
+ DAC960_PG_hw_mbox_new_cmd(base);
+
+ timeout = 0;
+ while (timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_PG_hw_mbox_status_available(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (!DAC960_PG_hw_mbox_status_available(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for mailbox status\n");
+ return MYRB_STATUS_SUBSYS_TIMEOUT;
+ }
+ status = DAC960_PG_read_status(base);
+ DAC960_PG_ack_hw_mbox_intr(base);
+ DAC960_PG_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_PG_hw_init(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char error, parm0, parm1;
+
+ DAC960_PG_disable_intr(base);
+ DAC960_PG_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_PG_init_in_progress(base) &&
+ timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_PG_read_error_status(base, &error,
+ &parm0, &parm1) &&
+ myrb_err_status(cb, error, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRB_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_PG_reset_ctrl(base);
+ return -ENODEV;
+ }
+ DAC960_PG_enable_intr(base);
+ cb->qcmd = myrb_qcmd;
+ cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
+ if (cb->dual_mode_interface)
+ cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
+ else
+ cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
+ cb->disable_intr = DAC960_PG_disable_intr;
+ cb->reset = DAC960_PG_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
+{
+ struct myrb_hba *cb = arg;
+ void __iomem *base = cb->io_base;
+ struct myrb_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ DAC960_PG_ack_intr(base);
+ next_stat_mbox = cb->next_stat_mbox;
+ while (next_stat_mbox->valid) {
+ unsigned char id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrb_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRB_DCMD_TAG)
+ cmd_blk = &cb->dcmd_blk;
+ else if (id == MYRB_MCMD_TAG)
+ cmd_blk = &cb->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cb->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = next_stat_mbox->status;
+ else
+ dev_err(&cb->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
+ if (++next_stat_mbox > cb->last_stat_mbox)
+ next_stat_mbox = cb->first_stat_mbox;
+
+ if (id < 3)
+ myrb_handle_cmdblk(cb, cmd_blk);
+ else
+ myrb_handle_scsi(cb, cmd_blk, scmd);
+ }
+ cb->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_PG_privdata = {
+ .hw_init = DAC960_PG_hw_init,
+ .irq_handler = DAC960_PG_intr_handler,
+ .mmio_size = DAC960_PG_mmio_size,
+};
+
+
+/*
+ * DAC960 PD Series Controllers
+ */
+
+static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
+{
+ writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_gen_intr(void __iomem *base)
+{
+ writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_reset_ctrl(void __iomem *base)
+{
+ writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
+{
+ unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
+
+ return idb & DAC960_PD_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_PD_init_in_progress(void __iomem *base)
+{
+ unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
+
+ return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_PD_ack_intr(void __iomem *base)
+{
+ writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
+}
+
+static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
+
+ return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_PD_enable_intr(void __iomem *base)
+{
+ writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
+}
+
+static inline void DAC960_PD_disable_intr(void __iomem *base)
+{
+ writeb(0, base + DAC960_PD_IRQEN_OFFSET);
+}
+
+static inline bool DAC960_PD_intr_enabled(void __iomem *base)
+{
+ unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
+
+ return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
+}
+
+static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
+ writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
+ writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
+ writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
+}
+
+static inline unsigned char
+DAC960_PD_read_status_cmd_ident(void __iomem *base)
+{
+ return readb(base + DAC960_PD_STSID_OFFSET);
+}
+
+static inline unsigned short
+DAC960_PD_read_status(void __iomem *base)
+{
+ return readw(base + DAC960_PD_STS_OFFSET);
+}
+
+static inline bool
+DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
+
+ if (!(errsts & DAC960_PD_ERRSTS_PENDING))
+ return false;
+ errsts &= ~DAC960_PD_ERRSTS_PENDING;
+ *error = errsts;
+ *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
+ *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
+ writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
+ return true;
+}
+
+static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+ void __iomem *base = cb->io_base;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+ while (DAC960_PD_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_PD_write_cmd_mbox(base, mbox);
+ DAC960_PD_hw_mbox_new_cmd(base);
+}
+
+static int DAC960_PD_hw_init(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char error, parm0, parm1;
+
+ if (!request_region(cb->io_addr, 0x80, "myrb")) {
+ dev_err(&pdev->dev, "IO port 0x%lx busy\n",
+ (unsigned long)cb->io_addr);
+ return -EBUSY;
+ }
+ DAC960_PD_disable_intr(base);
+ DAC960_PD_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_PD_init_in_progress(base) &&
+ timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_PD_read_error_status(base, &error,
+ &parm0, &parm1) &&
+ myrb_err_status(cb, error, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRB_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrb_enable_mmio(cb, NULL)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_PD_reset_ctrl(base);
+ return -ENODEV;
+ }
+ DAC960_PD_enable_intr(base);
+ cb->qcmd = DAC960_PD_qcmd;
+ cb->disable_intr = DAC960_PD_disable_intr;
+ cb->reset = DAC960_PD_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
+{
+ struct myrb_hba *cb = arg;
+ void __iomem *base = cb->io_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ while (DAC960_PD_hw_mbox_status_available(base)) {
+ unsigned char id = DAC960_PD_read_status_cmd_ident(base);
+ struct scsi_cmnd *scmd = NULL;
+ struct myrb_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRB_DCMD_TAG)
+ cmd_blk = &cb->dcmd_blk;
+ else if (id == MYRB_MCMD_TAG)
+ cmd_blk = &cb->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cb->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = DAC960_PD_read_status(base);
+ else
+ dev_err(&cb->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ DAC960_PD_ack_intr(base);
+ DAC960_PD_ack_hw_mbox_status(base);
+
+ if (id < 3)
+ myrb_handle_cmdblk(cb, cmd_blk);
+ else
+ myrb_handle_scsi(cb, cmd_blk, scmd);
+ }
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_PD_privdata = {
+ .hw_init = DAC960_PD_hw_init,
+ .irq_handler = DAC960_PD_intr_handler,
+ .mmio_size = DAC960_PD_mmio_size,
+};
+
+
+/*
+ * DAC960 P Series Controllers
+ *
+ * Similar to the DAC960 PD Series Controllers, but some commands have
+ * to be translated.
+ */
+
+static inline void myrb_translate_enquiry(void *enq)
+{
+ memcpy(enq + 132, enq + 36, 64);
+ memset(enq + 36, 0, 96);
+}
+
+static inline void myrb_translate_devstate(void *state)
+{
+ memcpy(state + 2, state + 3, 1);
+ memmove(state + 4, state + 5, 2);
+ memmove(state + 6, state + 8, 4);
+}
+
+static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
+{
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ int ldev_num = mbox->type5.ld.ldev_num;
+
+ mbox->bytes[3] &= 0x7;
+ mbox->bytes[3] |= mbox->bytes[7] << 6;
+ mbox->bytes[7] = ldev_num;
+}
+
+static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
+{
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ int ldev_num = mbox->bytes[7];
+
+ mbox->bytes[7] = mbox->bytes[3] >> 6;
+ mbox->bytes[3] &= 0x7;
+ mbox->bytes[3] |= ldev_num << 3;
+}
+
+static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+ void __iomem *base = cb->io_base;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+ switch (mbox->common.opcode) {
+ case MYRB_CMD_ENQUIRY:
+ mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
+ break;
+ case MYRB_CMD_GET_DEVICE_STATE:
+ mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
+ break;
+ case MYRB_CMD_READ:
+ mbox->common.opcode = MYRB_CMD_READ_OLD;
+ myrb_translate_to_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_WRITE:
+ mbox->common.opcode = MYRB_CMD_WRITE_OLD;
+ myrb_translate_to_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_READ_SG:
+ mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
+ myrb_translate_to_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_WRITE_SG:
+ mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
+ myrb_translate_to_rw_command(cmd_blk);
+ break;
+ default:
+ break;
+ }
+ while (DAC960_PD_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_PD_write_cmd_mbox(base, mbox);
+ DAC960_PD_hw_mbox_new_cmd(base);
+}
+
+
+static int DAC960_P_hw_init(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char error, parm0, parm1;
+
+ if (!request_region(cb->io_addr, 0x80, "myrb")) {
+ dev_err(&pdev->dev, "IO port 0x%lx busy\n",
+ (unsigned long)cb->io_addr);
+ return -EBUSY;
+ }
+ DAC960_PD_disable_intr(base);
+ DAC960_PD_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_PD_init_in_progress(base) &&
+ timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_PD_read_error_status(base, &error,
+ &parm0, &parm1) &&
+ myrb_err_status(cb, error, parm0, parm1))
+ return -EAGAIN;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRB_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrb_enable_mmio(cb, NULL)) {
+ dev_err(&pdev->dev,
+ "Unable to allocate DMA mapped memory\n");
+ DAC960_PD_reset_ctrl(base);
+ return -ETIMEDOUT;
+ }
+ DAC960_PD_enable_intr(base);
+ cb->qcmd = DAC960_P_qcmd;
+ cb->disable_intr = DAC960_PD_disable_intr;
+ cb->reset = DAC960_PD_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
+{
+ struct myrb_hba *cb = arg;
+ void __iomem *base = cb->io_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ while (DAC960_PD_hw_mbox_status_available(base)) {
+ unsigned char id = DAC960_PD_read_status_cmd_ident(base);
+ struct scsi_cmnd *scmd = NULL;
+ struct myrb_cmdblk *cmd_blk = NULL;
+ union myrb_cmd_mbox *mbox;
+ enum myrb_cmd_opcode op;
+
+
+ if (id == MYRB_DCMD_TAG)
+ cmd_blk = &cb->dcmd_blk;
+ else if (id == MYRB_MCMD_TAG)
+ cmd_blk = &cb->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cb->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = DAC960_PD_read_status(base);
+ else
+ dev_err(&cb->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ DAC960_PD_ack_intr(base);
+ DAC960_PD_ack_hw_mbox_status(base);
+
+ if (!cmd_blk)
+ continue;
+
+ mbox = &cmd_blk->mbox;
+ op = mbox->common.opcode;
+ switch (op) {
+ case MYRB_CMD_ENQUIRY_OLD:
+ mbox->common.opcode = MYRB_CMD_ENQUIRY;
+ myrb_translate_enquiry(cb->enquiry);
+ break;
+ case MYRB_CMD_READ_OLD:
+ mbox->common.opcode = MYRB_CMD_READ;
+ myrb_translate_from_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_WRITE_OLD:
+ mbox->common.opcode = MYRB_CMD_WRITE;
+ myrb_translate_from_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_READ_SG_OLD:
+ mbox->common.opcode = MYRB_CMD_READ_SG;
+ myrb_translate_from_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_WRITE_SG_OLD:
+ mbox->common.opcode = MYRB_CMD_WRITE_SG;
+ myrb_translate_from_rw_command(cmd_blk);
+ break;
+ default:
+ break;
+ }
+ if (id < 3)
+ myrb_handle_cmdblk(cb, cmd_blk);
+ else
+ myrb_handle_scsi(cb, cmd_blk, scmd);
+ }
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_P_privdata = {
+ .hw_init = DAC960_P_hw_init,
+ .irq_handler = DAC960_P_intr_handler,
+ .mmio_size = DAC960_PD_mmio_size,
+};
+
+static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
+ const struct pci_device_id *entry)
+{
+ struct myrb_privdata *privdata =
+ (struct myrb_privdata *)entry->driver_data;
+ irq_handler_t irq_handler = privdata->irq_handler;
+ unsigned int mmio_size = privdata->mmio_size;
+ struct Scsi_Host *shost;
+ struct myrb_hba *cb = NULL;
+
+ shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
+ if (!shost) {
+ dev_err(&pdev->dev, "Unable to allocate Controller\n");
+ return NULL;
+ }
+ shost->max_cmd_len = 12;
+ shost->max_lun = 256;
+ cb = shost_priv(shost);
+ mutex_init(&cb->dcmd_mutex);
+ mutex_init(&cb->dma_mutex);
+ cb->pdev = pdev;
+
+ if (pci_enable_device(pdev))
+ goto failure;
+
+ if (privdata->hw_init == DAC960_PD_hw_init ||
+ privdata->hw_init == DAC960_P_hw_init) {
+ cb->io_addr = pci_resource_start(pdev, 0);
+ cb->pci_addr = pci_resource_start(pdev, 1);
+ } else
+ cb->pci_addr = pci_resource_start(pdev, 0);
+
+ pci_set_drvdata(pdev, cb);
+ spin_lock_init(&cb->queue_lock);
+ if (mmio_size < PAGE_SIZE)
+ mmio_size = PAGE_SIZE;
+ cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
+ if (cb->mmio_base == NULL) {
+ dev_err(&pdev->dev,
+ "Unable to map Controller Register Window\n");
+ goto failure;
+ }
+
+ cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
+ if (privdata->hw_init(pdev, cb, cb->io_base))
+ goto failure;
+
+ if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
+ dev_err(&pdev->dev,
+ "Unable to acquire IRQ Channel %d\n", pdev->irq);
+ goto failure;
+ }
+ cb->irq = pdev->irq;
+ return cb;
+
+failure:
+ dev_err(&pdev->dev,
+ "Failed to initialize Controller\n");
+ myrb_cleanup(cb);
+ return NULL;
+}
+
+static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
+{
+ struct myrb_hba *cb;
+ int ret;
+
+ cb = myrb_detect(dev, entry);
+ if (!cb)
+ return -ENODEV;
+
+ ret = myrb_get_hba_config(cb);
+ if (ret < 0) {
+ myrb_cleanup(cb);
+ return ret;
+ }
+
+ if (!myrb_create_mempools(dev, cb)) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ ret = scsi_add_host(cb->host, &dev->dev);
+ if (ret) {
+ dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
+ myrb_destroy_mempools(cb);
+ goto failed;
+ }
+ scsi_scan_host(cb->host);
+ return 0;
+failed:
+ myrb_cleanup(cb);
+ return ret;
+}
+
+
+static void myrb_remove(struct pci_dev *pdev)
+{
+ struct myrb_hba *cb = pci_get_drvdata(pdev);
+
+ shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
+ myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
+ myrb_cleanup(cb);
+ myrb_destroy_mempools(cb);
+}
+
+
+static const struct pci_device_id myrb_id_table[] = {
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
+ PCI_DEVICE_ID_DEC_21285,
+ PCI_VENDOR_ID_MYLEX,
+ PCI_DEVICE_ID_MYLEX_DAC960_LA),
+ .driver_data = (unsigned long) &DAC960_LA_privdata,
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
+ },
+ {0, },
+};
+
+MODULE_DEVICE_TABLE(pci, myrb_id_table);
+
+static struct pci_driver myrb_pci_driver = {
+ .name = "myrb",
+ .id_table = myrb_id_table,
+ .probe = myrb_probe,
+ .remove = myrb_remove,
+};
+
+static int __init myrb_init_module(void)
+{
+ int ret;
+
+ myrb_raid_template = raid_class_attach(&myrb_raid_functions);
+ if (!myrb_raid_template)
+ return -ENODEV;
+
+ ret = pci_register_driver(&myrb_pci_driver);
+ if (ret)
+ raid_class_release(myrb_raid_template);
+
+ return ret;
+}
+
+static void __exit myrb_cleanup_module(void)
+{
+ pci_unregister_driver(&myrb_pci_driver);
+ raid_class_release(myrb_raid_template);
+}
+
+module_init(myrb_init_module);
+module_exit(myrb_cleanup_module);
+
+MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h
new file mode 100644
index 000000000000..9289c19fcb2f
--- /dev/null
+++ b/drivers/scsi/myrb.h
@@ -0,0 +1,958 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver,
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ *
+ */
+
+#ifndef MYRB_H
+#define MYRB_H
+
+#define MYRB_MAX_LDEVS 32
+#define MYRB_MAX_CHANNELS 3
+#define MYRB_MAX_TARGETS 16
+#define MYRB_MAX_PHYSICAL_DEVICES 45
+#define MYRB_SCATTER_GATHER_LIMIT 32
+#define MYRB_CMD_MBOX_COUNT 256
+#define MYRB_STAT_MBOX_COUNT 1024
+
+#define MYRB_BLKSIZE_BITS 9
+#define MYRB_MAILBOX_TIMEOUT 1000000
+
+#define MYRB_DCMD_TAG 1
+#define MYRB_MCMD_TAG 2
+
+#define MYRB_PRIMARY_MONITOR_INTERVAL (10 * HZ)
+#define MYRB_SECONDARY_MONITOR_INTERVAL (60 * HZ)
+
+/*
+ * DAC960 V1 Firmware Command Opcodes.
+ */
+enum myrb_cmd_opcode {
+ /* I/O Commands */
+ MYRB_CMD_READ_EXTENDED = 0x33,
+ MYRB_CMD_WRITE_EXTENDED = 0x34,
+ MYRB_CMD_READAHEAD_EXTENDED = 0x35,
+ MYRB_CMD_READ_EXTENDED_SG = 0xB3,
+ MYRB_CMD_WRITE_EXTENDED_SG = 0xB4,
+ MYRB_CMD_READ = 0x36,
+ MYRB_CMD_READ_SG = 0xB6,
+ MYRB_CMD_WRITE = 0x37,
+ MYRB_CMD_WRITE_SG = 0xB7,
+ MYRB_CMD_DCDB = 0x04,
+ MYRB_CMD_DCDB_SG = 0x84,
+ MYRB_CMD_FLUSH = 0x0A,
+ /* Controller Status Related Commands */
+ MYRB_CMD_ENQUIRY = 0x53,
+ MYRB_CMD_ENQUIRY2 = 0x1C,
+ MYRB_CMD_GET_LDRV_ELEMENT = 0x55,
+ MYRB_CMD_GET_LDEV_INFO = 0x19,
+ MYRB_CMD_IOPORTREAD = 0x39,
+ MYRB_CMD_IOPORTWRITE = 0x3A,
+ MYRB_CMD_GET_SD_STATS = 0x3E,
+ MYRB_CMD_GET_PD_STATS = 0x3F,
+ MYRB_CMD_EVENT_LOG_OPERATION = 0x72,
+ /* Device Related Commands */
+ MYRB_CMD_START_DEVICE = 0x10,
+ MYRB_CMD_GET_DEVICE_STATE = 0x50,
+ MYRB_CMD_STOP_CHANNEL = 0x13,
+ MYRB_CMD_START_CHANNEL = 0x12,
+ MYRB_CMD_RESET_CHANNEL = 0x1A,
+ /* Commands Associated with Data Consistency and Errors */
+ MYRB_CMD_REBUILD = 0x09,
+ MYRB_CMD_REBUILD_ASYNC = 0x16,
+ MYRB_CMD_CHECK_CONSISTENCY = 0x0F,
+ MYRB_CMD_CHECK_CONSISTENCY_ASYNC = 0x1E,
+ MYRB_CMD_REBUILD_STAT = 0x0C,
+ MYRB_CMD_GET_REBUILD_PROGRESS = 0x27,
+ MYRB_CMD_REBUILD_CONTROL = 0x1F,
+ MYRB_CMD_READ_BADBLOCK_TABLE = 0x0B,
+ MYRB_CMD_READ_BADDATA_TABLE = 0x25,
+ MYRB_CMD_CLEAR_BADDATA_TABLE = 0x26,
+ MYRB_CMD_GET_ERROR_TABLE = 0x17,
+ MYRB_CMD_ADD_CAPACITY_ASYNC = 0x2A,
+ MYRB_CMD_BGI_CONTROL = 0x2B,
+ /* Configuration Related Commands */
+ MYRB_CMD_READ_CONFIG2 = 0x3D,
+ MYRB_CMD_WRITE_CONFIG2 = 0x3C,
+ MYRB_CMD_READ_CONFIG_ONDISK = 0x4A,
+ MYRB_CMD_WRITE_CONFIG_ONDISK = 0x4B,
+ MYRB_CMD_READ_CONFIG = 0x4E,
+ MYRB_CMD_READ_BACKUP_CONFIG = 0x4D,
+ MYRB_CMD_WRITE_CONFIG = 0x4F,
+ MYRB_CMD_ADD_CONFIG = 0x4C,
+ MYRB_CMD_READ_CONFIG_LABEL = 0x48,
+ MYRB_CMD_WRITE_CONFIG_LABEL = 0x49,
+ /* Firmware Upgrade Related Commands */
+ MYRB_CMD_LOAD_IMAGE = 0x20,
+ MYRB_CMD_STORE_IMAGE = 0x21,
+ MYRB_CMD_PROGRAM_IMAGE = 0x22,
+ /* Diagnostic Commands */
+ MYRB_CMD_SET_DIAGNOSTIC_MODE = 0x31,
+ MYRB_CMD_RUN_DIAGNOSTIC = 0x32,
+ /* Subsystem Service Commands */
+ MYRB_CMD_GET_SUBSYS_DATA = 0x70,
+ MYRB_CMD_SET_SUBSYS_PARAM = 0x71,
+ /* Version 2.xx Firmware Commands */
+ MYRB_CMD_ENQUIRY_OLD = 0x05,
+ MYRB_CMD_GET_DEVICE_STATE_OLD = 0x14,
+ MYRB_CMD_READ_OLD = 0x02,
+ MYRB_CMD_WRITE_OLD = 0x03,
+ MYRB_CMD_READ_SG_OLD = 0x82,
+ MYRB_CMD_WRITE_SG_OLD = 0x83
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Command Status Codes.
+ */
+#define MYRB_STATUS_SUCCESS 0x0000 /* Common */
+#define MYRB_STATUS_CHECK_CONDITION 0x0002 /* Common */
+#define MYRB_STATUS_NO_DEVICE 0x0102 /* Common */
+#define MYRB_STATUS_INVALID_ADDRESS 0x0105 /* Common */
+#define MYRB_STATUS_INVALID_PARAM 0x0105 /* Common */
+#define MYRB_STATUS_IRRECOVERABLE_DATA_ERROR 0x0001 /* I/O */
+#define MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE 0x0002 /* I/O */
+#define MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV 0x0105 /* I/O */
+#define MYRB_STATUS_BAD_DATA 0x010C /* I/O */
+#define MYRB_STATUS_DEVICE_BUSY 0x0008 /* DCDB */
+#define MYRB_STATUS_DEVICE_NONRESPONSIVE 0x000E /* DCDB */
+#define MYRB_STATUS_COMMAND_TERMINATED 0x000F /* DCDB */
+#define MYRB_STATUS_START_DEVICE_FAILED 0x0002 /* Device */
+#define MYRB_STATUS_INVALID_CHANNEL_OR_TARGET 0x0105 /* Device */
+#define MYRB_STATUS_CHANNEL_BUSY 0x0106 /* Device */
+#define MYRB_STATUS_OUT_OF_MEMORY 0x0107 /* Device */
+#define MYRB_STATUS_CHANNEL_NOT_STOPPED 0x0002 /* Device */
+#define MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE 0x0002 /* Consistency */
+#define MYRB_STATUS_RBLD_BADBLOCKS 0x0003 /* Consistency */
+#define MYRB_STATUS_RBLD_NEW_DISK_FAILED 0x0004 /* Consistency */
+#define MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS 0x0106 /* Consistency */
+#define MYRB_STATUS_DEPENDENT_DISK_DEAD 0x0002 /* Consistency */
+#define MYRB_STATUS_INCONSISTENT_BLOCKS 0x0003 /* Consistency */
+#define MYRB_STATUS_INVALID_OR_NONREDUNDANT_LDRV 0x0105 /* Consistency */
+#define MYRB_STATUS_NO_RBLD_OR_CHECK_INPROGRESS 0x0105 /* Consistency */
+#define MYRB_STATUS_RBLD_IN_PROGRESS_DATA_VALID 0x0000 /* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE 0x0002 /* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_BADBLOCKS 0x0003 /* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED 0x0004 /* Consistency */
+#define MYRB_STATUS_RBLD_SUCCESS 0x0100 /* Consistency */
+#define MYRB_STATUS_RBLD_SUCCESS_TERMINATED 0x0107 /* Consistency */
+#define MYRB_STATUS_RBLD_NOT_CHECKED 0x0108 /* Consistency */
+#define MYRB_STATUS_BGI_SUCCESS 0x0100 /* Consistency */
+#define MYRB_STATUS_BGI_ABORTED 0x0005 /* Consistency */
+#define MYRB_STATUS_NO_BGI_INPROGRESS 0x0105 /* Consistency */
+#define MYRB_STATUS_ADD_CAPACITY_INPROGRESS 0x0004 /* Consistency */
+#define MYRB_STATUS_ADD_CAPACITY_FAILED_OR_SUSPENDED 0x00F4 /* Consistency */
+#define MYRB_STATUS_CONFIG2_CSUM_ERROR 0x0002 /* Configuration */
+#define MYRB_STATUS_CONFIGURATION_SUSPENDED 0x0106 /* Configuration */
+#define MYRB_STATUS_FAILED_TO_CONFIGURE_NVRAM 0x0105 /* Configuration */
+#define MYRB_STATUS_CONFIGURATION_NOT_SAVED 0x0106 /* Configuration */
+#define MYRB_STATUS_SUBSYS_NOTINSTALLED 0x0001 /* Subsystem */
+#define MYRB_STATUS_SUBSYS_FAILED 0x0002 /* Subsystem */
+#define MYRB_STATUS_SUBSYS_BUSY 0x0106 /* Subsystem */
+#define MYRB_STATUS_SUBSYS_TIMEOUT 0x0108 /* Subsystem */
+
+/*
+ * DAC960 V1 Firmware Enquiry Command reply structure.
+ */
+struct myrb_enquiry {
+ unsigned char ldev_count; /* Byte 0 */
+ unsigned int rsvd1:24; /* Bytes 1-3 */
+ unsigned int ldev_sizes[32]; /* Bytes 4-131 */
+ unsigned short flash_age; /* Bytes 132-133 */
+ struct {
+ unsigned char deferred:1; /* Byte 134 Bit 0 */
+ unsigned char low_bat:1; /* Byte 134 Bit 1 */
+ unsigned char rsvd2:6; /* Byte 134 Bits 2-7 */
+ } status;
+ unsigned char rsvd3:8; /* Byte 135 */
+ unsigned char fw_minor_version; /* Byte 136 */
+ unsigned char fw_major_version; /* Byte 137 */
+ enum {
+ MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS = 0x00,
+ MYRB_STDBY_RBLD_IN_PROGRESS = 0x01,
+ MYRB_BG_RBLD_IN_PROGRESS = 0x02,
+ MYRB_BG_CHECK_IN_PROGRESS = 0x03,
+ MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR = 0xFF,
+ MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED = 0xF0,
+ MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED = 0xF1,
+ MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER = 0xF2,
+ MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED = 0xF3
+ } __packed rbld; /* Byte 138 */
+ unsigned char max_tcq; /* Byte 139 */
+ unsigned char ldev_offline; /* Byte 140 */
+ unsigned char rsvd4:8; /* Byte 141 */
+ unsigned short ev_seq; /* Bytes 142-143 */
+ unsigned char ldev_critical; /* Byte 144 */
+ unsigned int rsvd5:24; /* Bytes 145-147 */
+ unsigned char pdev_dead; /* Byte 148 */
+ unsigned char rsvd6:8; /* Byte 149 */
+ unsigned char rbld_count; /* Byte 150 */
+ struct {
+ unsigned char rsvd7:3; /* Byte 151 Bits 0-2 */
+ unsigned char bbu_present:1; /* Byte 151 Bit 3 */
+ unsigned char rsvd8:4; /* Byte 151 Bits 4-7 */
+ } misc;
+ struct {
+ unsigned char target;
+ unsigned char channel;
+ } dead_drives[21]; /* Bytes 152-194 */
+ unsigned char rsvd9[62]; /* Bytes 195-255 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Enquiry2 Command reply structure.
+ */
+struct myrb_enquiry2 {
+ struct {
+ enum {
+ DAC960_V1_P_PD_PU = 0x01,
+ DAC960_V1_PL = 0x02,
+ DAC960_V1_PG = 0x10,
+ DAC960_V1_PJ = 0x11,
+ DAC960_V1_PR = 0x12,
+ DAC960_V1_PT = 0x13,
+ DAC960_V1_PTL0 = 0x14,
+ DAC960_V1_PRL = 0x15,
+ DAC960_V1_PTL1 = 0x16,
+ DAC960_V1_1164P = 0x20
+ } __packed sub_model; /* Byte 0 */
+ unsigned char actual_channels; /* Byte 1 */
+ enum {
+ MYRB_5_CHANNEL_BOARD = 0x01,
+ MYRB_3_CHANNEL_BOARD = 0x02,
+ MYRB_2_CHANNEL_BOARD = 0x03,
+ MYRB_3_CHANNEL_ASIC_DAC = 0x04
+ } __packed model; /* Byte 2 */
+ enum {
+ MYRB_EISA_CONTROLLER = 0x01,
+ MYRB_MCA_CONTROLLER = 0x02,
+ MYRB_PCI_CONTROLLER = 0x03,
+ MYRB_SCSI_TO_SCSI = 0x08
+ } __packed controller; /* Byte 3 */
+ } hw; /* Bytes 0-3 */
+ /* MajorVersion.MinorVersion-FirmwareType-TurnID */
+ struct {
+ unsigned char major_version; /* Byte 4 */
+ unsigned char minor_version; /* Byte 5 */
+ unsigned char turn_id; /* Byte 6 */
+ char firmware_type; /* Byte 7 */
+ } fw; /* Bytes 4-7 */
+ unsigned int rsvd1; /* Byte 8-11 */
+ unsigned char cfg_chan; /* Byte 12 */
+ unsigned char cur_chan; /* Byte 13 */
+ unsigned char max_targets; /* Byte 14 */
+ unsigned char max_tcq; /* Byte 15 */
+ unsigned char max_ldev; /* Byte 16 */
+ unsigned char max_arms; /* Byte 17 */
+ unsigned char max_spans; /* Byte 18 */
+ unsigned char rsvd2; /* Byte 19 */
+ unsigned int rsvd3; /* Bytes 20-23 */
+ unsigned int mem_size; /* Bytes 24-27 */
+ unsigned int cache_size; /* Bytes 28-31 */
+ unsigned int flash_size; /* Bytes 32-35 */
+ unsigned int nvram_size; /* Bytes 36-39 */
+ struct {
+ enum {
+ MYRB_RAM_TYPE_DRAM = 0x0,
+ MYRB_RAM_TYPE_EDO = 0x1,
+ MYRB_RAM_TYPE_SDRAM = 0x2,
+ MYRB_RAM_TYPE_Last = 0x7
+ } __packed ram:3; /* Byte 40 Bits 0-2 */
+ enum {
+ MYRB_ERR_CORR_None = 0x0,
+ MYRB_ERR_CORR_Parity = 0x1,
+ MYRB_ERR_CORR_ECC = 0x2,
+ MYRB_ERR_CORR_Last = 0x7
+ } __packed ec:3; /* Byte 40 Bits 3-5 */
+ unsigned char fast_page:1; /* Byte 40 Bit 6 */
+ unsigned char low_power:1; /* Byte 40 Bit 7 */
+ unsigned char rsvd4; /* Bytes 41 */
+ } mem_type;
+ unsigned short clock_speed; /* Bytes 42-43 */
+ unsigned short mem_speed; /* Bytes 44-45 */
+ unsigned short hw_speed; /* Bytes 46-47 */
+ unsigned char rsvd5[12]; /* Bytes 48-59 */
+ unsigned short max_cmds; /* Bytes 60-61 */
+ unsigned short max_sge; /* Bytes 62-63 */
+ unsigned short max_drv_cmds; /* Bytes 64-65 */
+ unsigned short max_io_desc; /* Bytes 66-67 */
+ unsigned short max_sectors; /* Bytes 68-69 */
+ unsigned char latency; /* Byte 70 */
+ unsigned char rsvd6; /* Byte 71 */
+ unsigned char scsi_tmo; /* Byte 72 */
+ unsigned char rsvd7; /* Byte 73 */
+ unsigned short min_freelines; /* Bytes 74-75 */
+ unsigned char rsvd8[8]; /* Bytes 76-83 */
+ unsigned char rbld_rate_const; /* Byte 84 */
+ unsigned char rsvd9[11]; /* Byte 85-95 */
+ unsigned short pdrv_block_size; /* Bytes 96-97 */
+ unsigned short ldev_block_size; /* Bytes 98-99 */
+ unsigned short max_blocks_per_cmd; /* Bytes 100-101 */
+ unsigned short block_factor; /* Bytes 102-103 */
+ unsigned short cacheline_size; /* Bytes 104-105 */
+ struct {
+ enum {
+ MYRB_WIDTH_NARROW_8BIT = 0x0,
+ MYRB_WIDTH_WIDE_16BIT = 0x1,
+ MYRB_WIDTH_WIDE_32BIT = 0x2
+ } __packed bus_width:2; /* Byte 106 Bits 0-1 */
+ enum {
+ MYRB_SCSI_SPEED_FAST = 0x0,
+ MYRB_SCSI_SPEED_ULTRA = 0x1,
+ MYRB_SCSI_SPEED_ULTRA2 = 0x2
+ } __packed bus_speed:2; /* Byte 106 Bits 2-3 */
+ unsigned char differential:1; /* Byte 106 Bit 4 */
+ unsigned char rsvd10:3; /* Byte 106 Bits 5-7 */
+ } scsi_cap;
+ unsigned char rsvd11[5]; /* Byte 107-111 */
+ unsigned short fw_build; /* Bytes 112-113 */
+ enum {
+ MYRB_FAULT_AEMI = 0x01,
+ MYRB_FAULT_OEM1 = 0x02,
+ MYRB_FAULT_OEM2 = 0x04,
+ MYRB_FAULT_OEM3 = 0x08,
+ MYRB_FAULT_CONNER = 0x10,
+ MYRB_FAULT_SAFTE = 0x20
+ } __packed fault_mgmt; /* Byte 114 */
+ unsigned char rsvd12; /* Byte 115 */
+ struct {
+ unsigned int clustering:1; /* Byte 116 Bit 0 */
+ unsigned int online_RAID_expansion:1; /* Byte 116 Bit 1 */
+ unsigned int readahead:1; /* Byte 116 Bit 2 */
+ unsigned int bgi:1; /* Byte 116 Bit 3 */
+ unsigned int rsvd13:28; /* Bytes 116-119 */
+ } fw_features;
+ unsigned char rsvd14[8]; /* Bytes 120-127 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Logical Drive State type.
+ */
+enum myrb_devstate {
+ MYRB_DEVICE_DEAD = 0x00,
+ MYRB_DEVICE_WO = 0x02,
+ MYRB_DEVICE_ONLINE = 0x03,
+ MYRB_DEVICE_CRITICAL = 0x04,
+ MYRB_DEVICE_STANDBY = 0x10,
+ MYRB_DEVICE_OFFLINE = 0xFF
+} __packed;
+
+/*
+ * DAC960 V1 RAID Levels
+ */
+enum myrb_raidlevel {
+ MYRB_RAID_LEVEL0 = 0x0, /* RAID 0 */
+ MYRB_RAID_LEVEL1 = 0x1, /* RAID 1 */
+ MYRB_RAID_LEVEL3 = 0x3, /* RAID 3 */
+ MYRB_RAID_LEVEL5 = 0x5, /* RAID 5 */
+ MYRB_RAID_LEVEL6 = 0x6, /* RAID 6 */
+ MYRB_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Logical Drive Information structure.
+ */
+struct myrb_ldev_info {
+ unsigned int size; /* Bytes 0-3 */
+ enum myrb_devstate state; /* Byte 4 */
+ unsigned int raid_level:7; /* Byte 5 Bits 0-6 */
+ unsigned int wb_enabled:1; /* Byte 5 Bit 7 */
+ unsigned int rsvd:16; /* Bytes 6-7 */
+};
+
+/*
+ * DAC960 V1 Firmware Perform Event Log Operation Types.
+ */
+#define DAC960_V1_GetEventLogEntry 0x00
+
+/*
+ * DAC960 V1 Firmware Get Event Log Entry Command reply structure.
+ */
+struct myrb_log_entry {
+ unsigned char msg_type; /* Byte 0 */
+ unsigned char msg_len; /* Byte 1 */
+ unsigned char target:5; /* Byte 2 Bits 0-4 */
+ unsigned char channel:3; /* Byte 2 Bits 5-7 */
+ unsigned char lun:6; /* Byte 3 Bits 0-5 */
+ unsigned char rsvd1:2; /* Byte 3 Bits 6-7 */
+ unsigned short seq_num; /* Bytes 4-5 */
+ unsigned char sense[26]; /* Bytes 6-31 */
+};
+
+/*
+ * DAC960 V1 Firmware Get Device State Command reply structure.
+ * The structure is padded by 2 bytes for compatibility with Version 2.xx
+ * Firmware.
+ */
+struct myrb_pdev_state {
+ unsigned int present:1; /* Byte 0 Bit 0 */
+ unsigned int :7; /* Byte 0 Bits 1-7 */
+ enum {
+ MYRB_TYPE_OTHER = 0x0,
+ MYRB_TYPE_DISK = 0x1,
+ MYRB_TYPE_TAPE = 0x2,
+ MYRB_TYPE_CDROM_OR_WORM = 0x3
+ } __packed devtype:2; /* Byte 1 Bits 0-1 */
+ unsigned int rsvd1:1; /* Byte 1 Bit 2 */
+ unsigned int fast20:1; /* Byte 1 Bit 3 */
+ unsigned int sync:1; /* Byte 1 Bit 4 */
+ unsigned int fast:1; /* Byte 1 Bit 5 */
+ unsigned int wide:1; /* Byte 1 Bit 6 */
+ unsigned int tcq_supported:1; /* Byte 1 Bit 7 */
+ enum myrb_devstate state; /* Byte 2 */
+ unsigned int rsvd2:8; /* Byte 3 */
+ unsigned int sync_multiplier; /* Byte 4 */
+ unsigned int sync_offset:5; /* Byte 5 Bits 0-4 */
+ unsigned int rsvd3:3; /* Byte 5 Bits 5-7 */
+ unsigned int size; /* Bytes 6-9 */
+ unsigned int rsvd4:16; /* Bytes 10-11 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Get Rebuild Progress Command reply structure.
+ */
+struct myrb_rbld_progress {
+ unsigned int ldev_num; /* Bytes 0-3 */
+ unsigned int ldev_size; /* Bytes 4-7 */
+ unsigned int blocks_left; /* Bytes 8-11 */
+};
+
+/*
+ * DAC960 V1 Firmware Background Initialization Status Command reply structure.
+ */
+struct myrb_bgi_status {
+ unsigned int ldev_size; /* Bytes 0-3 */
+ unsigned int blocks_done; /* Bytes 4-7 */
+ unsigned char rsvd1[12]; /* Bytes 8-19 */
+ unsigned int ldev_num; /* Bytes 20-23 */
+ unsigned char raid_level; /* Byte 24 */
+ enum {
+ MYRB_BGI_INVALID = 0x00,
+ MYRB_BGI_STARTED = 0x02,
+ MYRB_BGI_INPROGRESS = 0x04,
+ MYRB_BGI_SUSPENDED = 0x05,
+ MYRB_BGI_CANCELLED = 0x06
+ } __packed status; /* Byte 25 */
+ unsigned char rsvd2[6]; /* Bytes 26-31 */
+};
+
+/*
+ * DAC960 V1 Firmware Error Table Entry structure.
+ */
+struct myrb_error_entry {
+ unsigned char parity_err; /* Byte 0 */
+ unsigned char soft_err; /* Byte 1 */
+ unsigned char hard_err; /* Byte 2 */
+ unsigned char misc_err; /* Byte 3 */
+};
+
+/*
+ * DAC960 V1 Firmware Read Config2 Command reply structure.
+ */
+struct myrb_config2 {
+ unsigned rsvd1:1; /* Byte 0 Bit 0 */
+ unsigned active_negation:1; /* Byte 0 Bit 1 */
+ unsigned rsvd2:5; /* Byte 0 Bits 2-6 */
+ unsigned no_rescan_on_reset_during_scan:1; /* Byte 0 Bit 7 */
+ unsigned StorageWorks_support:1; /* Byte 1 Bit 0 */
+ unsigned HewlettPackard_support:1; /* Byte 1 Bit 1 */
+ unsigned no_disconnect_on_first_command:1; /* Byte 1 Bit 2 */
+ unsigned rsvd3:2; /* Byte 1 Bits 3-4 */
+ unsigned AEMI_ARM:1; /* Byte 1 Bit 5 */
+ unsigned AEMI_OFM:1; /* Byte 1 Bit 6 */
+ unsigned rsvd4:1; /* Byte 1 Bit 7 */
+ enum {
+ MYRB_OEMID_MYLEX = 0x00,
+ MYRB_OEMID_IBM = 0x08,
+ MYRB_OEMID_HP = 0x0A,
+ MYRB_OEMID_DEC = 0x0C,
+ MYRB_OEMID_SIEMENS = 0x10,
+ MYRB_OEMID_INTEL = 0x12
+ } __packed OEMID; /* Byte 2 */
+ unsigned char oem_model_number; /* Byte 3 */
+ unsigned char physical_sector; /* Byte 4 */
+ unsigned char logical_sector; /* Byte 5 */
+ unsigned char block_factor; /* Byte 6 */
+ unsigned readahead_enabled:1; /* Byte 7 Bit 0 */
+ unsigned low_BIOS_delay:1; /* Byte 7 Bit 1 */
+ unsigned rsvd5:2; /* Byte 7 Bits 2-3 */
+ unsigned restrict_reassign_to_one_sector:1; /* Byte 7 Bit 4 */
+ unsigned rsvd6:1; /* Byte 7 Bit 5 */
+ unsigned FUA_during_write_recovery:1; /* Byte 7 Bit 6 */
+ unsigned enable_LeftSymmetricRAID5Algorithm:1; /* Byte 7 Bit 7 */
+ unsigned char default_rebuild_rate; /* Byte 8 */
+ unsigned char rsvd7; /* Byte 9 */
+ unsigned char blocks_per_cacheline; /* Byte 10 */
+ unsigned char blocks_per_stripe; /* Byte 11 */
+ struct {
+ enum {
+ MYRB_SPEED_ASYNC = 0x0,
+ MYRB_SPEED_SYNC_8MHz = 0x1,
+ MYRB_SPEED_SYNC_5MHz = 0x2,
+ MYRB_SPEED_SYNC_10_OR_20MHz = 0x3
+ } __packed speed:2; /* Byte 11 Bits 0-1 */
+ unsigned force_8bit:1; /* Byte 11 Bit 2 */
+ unsigned disable_fast20:1; /* Byte 11 Bit 3 */
+ unsigned rsvd8:3; /* Byte 11 Bits 4-6 */
+ unsigned enable_tcq:1; /* Byte 11 Bit 7 */
+ } __packed channelparam[6]; /* Bytes 12-17 */
+ unsigned char SCSIInitiatorID; /* Byte 18 */
+ unsigned char rsvd9; /* Byte 19 */
+ enum {
+ MYRB_STARTUP_CONTROLLER_SPINUP = 0x00,
+ MYRB_STARTUP_POWERON_SPINUP = 0x01
+ } __packed startup; /* Byte 20 */
+ unsigned char simultaneous_device_spinup_count; /* Byte 21 */
+ unsigned char seconds_delay_between_spinups; /* Byte 22 */
+ unsigned char rsvd10[29]; /* Bytes 23-51 */
+ unsigned BIOS_disabled:1; /* Byte 52 Bit 0 */
+ unsigned CDROM_boot_enabled:1; /* Byte 52 Bit 1 */
+ unsigned rsvd11:3; /* Byte 52 Bits 2-4 */
+ enum {
+ MYRB_GEOM_128_32 = 0x0,
+ MYRB_GEOM_255_63 = 0x1,
+ MYRB_GEOM_RESERVED1 = 0x2,
+ MYRB_GEOM_RESERVED2 = 0x3
+ } __packed drive_geometry:2; /* Byte 52 Bits 5-6 */
+ unsigned rsvd12:1; /* Byte 52 Bit 7 */
+ unsigned char rsvd13[9]; /* Bytes 53-61 */
+ unsigned short csum; /* Bytes 62-63 */
+};
+
+/*
+ * DAC960 V1 Firmware DCDB request structure.
+ */
+struct myrb_dcdb {
+ unsigned target:4; /* Byte 0 Bits 0-3 */
+ unsigned channel:4; /* Byte 0 Bits 4-7 */
+ enum {
+ MYRB_DCDB_XFER_NONE = 0,
+ MYRB_DCDB_XFER_DEVICE_TO_SYSTEM = 1,
+ MYRB_DCDB_XFER_SYSTEM_TO_DEVICE = 2,
+ MYRB_DCDB_XFER_ILLEGAL = 3
+ } __packed data_xfer:2; /* Byte 1 Bits 0-1 */
+ unsigned early_status:1; /* Byte 1 Bit 2 */
+ unsigned rsvd1:1; /* Byte 1 Bit 3 */
+ enum {
+ MYRB_DCDB_TMO_24_HRS = 0,
+ MYRB_DCDB_TMO_10_SECS = 1,
+ MYRB_DCDB_TMO_60_SECS = 2,
+ MYRB_DCDB_TMO_10_MINS = 3
+ } __packed timeout:2; /* Byte 1 Bits 4-5 */
+ unsigned no_autosense:1; /* Byte 1 Bit 6 */
+ unsigned allow_disconnect:1; /* Byte 1 Bit 7 */
+ unsigned short xfer_len_lo; /* Bytes 2-3 */
+ u32 dma_addr; /* Bytes 4-7 */
+ unsigned char cdb_len:4; /* Byte 8 Bits 0-3 */
+ unsigned char xfer_len_hi4:4; /* Byte 8 Bits 4-7 */
+ unsigned char sense_len; /* Byte 9 */
+ unsigned char cdb[12]; /* Bytes 10-21 */
+ unsigned char sense[64]; /* Bytes 22-85 */
+ unsigned char status; /* Byte 86 */
+ unsigned char rsvd2; /* Byte 87 */
+};
+
+/*
+ * DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address
+ *32 Bit Byte Count structure.
+ */
+struct myrb_sge {
+ u32 sge_addr; /* Bytes 0-3 */
+ u32 sge_count; /* Bytes 4-7 */
+};
+
+/*
+ * 13 Byte DAC960 V1 Firmware Command Mailbox structure.
+ * Bytes 13-15 are not used. The structure is padded to 16 bytes for
+ * efficient access.
+ */
+union myrb_cmd_mbox {
+ unsigned int words[4]; /* Words 0-3 */
+ unsigned char bytes[16]; /* Bytes 0-15 */
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char rsvd[14]; /* Bytes 2-15 */
+ } __packed common;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char rsvd1[6]; /* Bytes 2-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed type3;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char optype; /* Byte 2 */
+ unsigned char rsvd1[5]; /* Bytes 3-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed type3B;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char rsvd1[5]; /* Bytes 2-6 */
+ unsigned char ldev_num:6; /* Byte 7 Bits 0-6 */
+ unsigned char auto_restore:1; /* Byte 7 Bit 7 */
+ unsigned char rsvd2[8]; /* Bytes 8-15 */
+ } __packed type3C;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char channel; /* Byte 2 */
+ unsigned char target; /* Byte 3 */
+ enum myrb_devstate state; /* Byte 4 */
+ unsigned char rsvd1[3]; /* Bytes 5-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed type3D;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char optype; /* Byte 2 */
+ unsigned char opqual; /* Byte 3 */
+ unsigned short ev_seq; /* Bytes 4-5 */
+ unsigned char rsvd1[2]; /* Bytes 6-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed type3E;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char rsvd1[2]; /* Bytes 2-3 */
+ unsigned char rbld_rate; /* Byte 4 */
+ unsigned char rsvd2[3]; /* Bytes 5-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd3[4]; /* Bytes 12-15 */
+ } __packed type3R;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned short xfer_len; /* Bytes 2-3 */
+ unsigned int lba; /* Bytes 4-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char ldev_num; /* Byte 12 */
+ unsigned char rsvd[3]; /* Bytes 13-15 */
+ } __packed type4;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ struct {
+ unsigned short xfer_len:11; /* Bytes 2-3 */
+ unsigned char ldev_num:5; /* Byte 3 Bits 3-7 */
+ } __packed ld;
+ unsigned int lba; /* Bytes 4-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char sg_count:6; /* Byte 12 Bits 0-5 */
+ enum {
+ MYRB_SGL_ADDR32_COUNT32 = 0x0,
+ MYRB_SGL_ADDR32_COUNT16 = 0x1,
+ MYRB_SGL_COUNT32_ADDR32 = 0x2,
+ MYRB_SGL_COUNT16_ADDR32 = 0x3
+ } __packed sg_type:2; /* Byte 12 Bits 6-7 */
+ unsigned char rsvd[3]; /* Bytes 13-15 */
+ } __packed type5;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char opcode2; /* Byte 2 */
+ unsigned char rsvd1:8; /* Byte 3 */
+ u32 cmd_mbox_addr; /* Bytes 4-7 */
+ u32 stat_mbox_addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed typeX;
+};
+
+/*
+ * DAC960 V1 Firmware Controller Status Mailbox structure.
+ */
+struct myrb_stat_mbox {
+ unsigned char id; /* Byte 0 */
+ unsigned char rsvd:7; /* Byte 1 Bits 0-6 */
+ unsigned char valid:1; /* Byte 1 Bit 7 */
+ unsigned short status; /* Bytes 2-3 */
+};
+
+struct myrb_cmdblk {
+ union myrb_cmd_mbox mbox;
+ unsigned short status;
+ struct completion *completion;
+ struct myrb_dcdb *dcdb;
+ dma_addr_t dcdb_addr;
+ struct myrb_sge *sgl;
+ dma_addr_t sgl_addr;
+};
+
+struct myrb_hba {
+ unsigned int ldev_block_size;
+ unsigned char ldev_geom_heads;
+ unsigned char ldev_geom_sectors;
+ unsigned char bus_width;
+ unsigned short stripe_size;
+ unsigned short segment_size;
+ unsigned short new_ev_seq;
+ unsigned short old_ev_seq;
+ bool dual_mode_interface;
+ bool bgi_status_supported;
+ bool safte_enabled;
+ bool need_ldev_info;
+ bool need_err_info;
+ bool need_rbld;
+ bool need_cc_status;
+ bool need_bgi_status;
+ bool rbld_first;
+
+ struct pci_dev *pdev;
+ struct Scsi_Host *host;
+
+ struct workqueue_struct *work_q;
+ char work_q_name[20];
+ struct delayed_work monitor_work;
+ unsigned long primary_monitor_time;
+ unsigned long secondary_monitor_time;
+
+ struct dma_pool *sg_pool;
+ struct dma_pool *dcdb_pool;
+
+ spinlock_t queue_lock;
+
+ void (*qcmd)(struct myrb_hba *cs, struct myrb_cmdblk *cmd_blk);
+ void (*write_cmd_mbox)(union myrb_cmd_mbox *next_mbox,
+ union myrb_cmd_mbox *cmd_mbox);
+ void (*get_cmd_mbox)(void __iomem *base);
+ void (*disable_intr)(void __iomem *base);
+ void (*reset)(void __iomem *base);
+
+ unsigned int ctlr_num;
+ unsigned char model_name[20];
+ unsigned char fw_version[12];
+
+ unsigned int irq;
+ phys_addr_t io_addr;
+ phys_addr_t pci_addr;
+ void __iomem *io_base;
+ void __iomem *mmio_base;
+
+ size_t cmd_mbox_size;
+ dma_addr_t cmd_mbox_addr;
+ union myrb_cmd_mbox *first_cmd_mbox;
+ union myrb_cmd_mbox *last_cmd_mbox;
+ union myrb_cmd_mbox *next_cmd_mbox;
+ union myrb_cmd_mbox *prev_cmd_mbox1;
+ union myrb_cmd_mbox *prev_cmd_mbox2;
+
+ size_t stat_mbox_size;
+ dma_addr_t stat_mbox_addr;
+ struct myrb_stat_mbox *first_stat_mbox;
+ struct myrb_stat_mbox *last_stat_mbox;
+ struct myrb_stat_mbox *next_stat_mbox;
+
+ struct myrb_cmdblk dcmd_blk;
+ struct myrb_cmdblk mcmd_blk;
+ struct mutex dcmd_mutex;
+
+ struct myrb_enquiry *enquiry;
+ dma_addr_t enquiry_addr;
+
+ struct myrb_error_entry *err_table;
+ dma_addr_t err_table_addr;
+
+ unsigned short last_rbld_status;
+
+ struct myrb_ldev_info *ldev_info_buf;
+ dma_addr_t ldev_info_addr;
+
+ struct myrb_bgi_status bgi_status;
+
+ struct mutex dma_mutex;
+};
+
+/*
+ * DAC960 LA Series Controller Interface Register Offsets.
+ */
+#define DAC960_LA_mmio_size 0x80
+
+enum DAC960_LA_reg_offset {
+ DAC960_LA_IRQMASK_OFFSET = 0x34,
+ DAC960_LA_CMDOP_OFFSET = 0x50,
+ DAC960_LA_CMDID_OFFSET = 0x51,
+ DAC960_LA_MBOX2_OFFSET = 0x52,
+ DAC960_LA_MBOX3_OFFSET = 0x53,
+ DAC960_LA_MBOX4_OFFSET = 0x54,
+ DAC960_LA_MBOX5_OFFSET = 0x55,
+ DAC960_LA_MBOX6_OFFSET = 0x56,
+ DAC960_LA_MBOX7_OFFSET = 0x57,
+ DAC960_LA_MBOX8_OFFSET = 0x58,
+ DAC960_LA_MBOX9_OFFSET = 0x59,
+ DAC960_LA_MBOX10_OFFSET = 0x5A,
+ DAC960_LA_MBOX11_OFFSET = 0x5B,
+ DAC960_LA_MBOX12_OFFSET = 0x5C,
+ DAC960_LA_STSID_OFFSET = 0x5D,
+ DAC960_LA_STS_OFFSET = 0x5E,
+ DAC960_LA_IDB_OFFSET = 0x60,
+ DAC960_LA_ODB_OFFSET = 0x61,
+ DAC960_LA_ERRSTS_OFFSET = 0x63,
+};
+
+/*
+ * DAC960 LA Series Inbound Door Bell Register.
+ */
+#define DAC960_LA_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_LA_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_LA_IDB_GEN_IRQ 0x04
+#define DAC960_LA_IDB_CTRL_RESET 0x08
+#define DAC960_LA_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_LA_IDB_HWMBOX_EMPTY 0x01
+#define DAC960_LA_IDB_INIT_DONE 0x02
+
+/*
+ * DAC960 LA Series Outbound Door Bell Register.
+ */
+#define DAC960_LA_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_LA_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_LA_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_LA_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 LA Series Interrupt Mask Register.
+ */
+#define DAC960_LA_IRQMASK_DISABLE_IRQ 0x04
+
+/*
+ * DAC960 LA Series Error Status Register.
+ */
+#define DAC960_LA_ERRSTS_PENDING 0x02
+
+/*
+ * DAC960 PG Series Controller Interface Register Offsets.
+ */
+#define DAC960_PG_mmio_size 0x2000
+
+enum DAC960_PG_reg_offset {
+ DAC960_PG_IDB_OFFSET = 0x0020,
+ DAC960_PG_ODB_OFFSET = 0x002C,
+ DAC960_PG_IRQMASK_OFFSET = 0x0034,
+ DAC960_PG_CMDOP_OFFSET = 0x1000,
+ DAC960_PG_CMDID_OFFSET = 0x1001,
+ DAC960_PG_MBOX2_OFFSET = 0x1002,
+ DAC960_PG_MBOX3_OFFSET = 0x1003,
+ DAC960_PG_MBOX4_OFFSET = 0x1004,
+ DAC960_PG_MBOX5_OFFSET = 0x1005,
+ DAC960_PG_MBOX6_OFFSET = 0x1006,
+ DAC960_PG_MBOX7_OFFSET = 0x1007,
+ DAC960_PG_MBOX8_OFFSET = 0x1008,
+ DAC960_PG_MBOX9_OFFSET = 0x1009,
+ DAC960_PG_MBOX10_OFFSET = 0x100A,
+ DAC960_PG_MBOX11_OFFSET = 0x100B,
+ DAC960_PG_MBOX12_OFFSET = 0x100C,
+ DAC960_PG_STSID_OFFSET = 0x1018,
+ DAC960_PG_STS_OFFSET = 0x101A,
+ DAC960_PG_ERRSTS_OFFSET = 0x103F,
+};
+
+/*
+ * DAC960 PG Series Inbound Door Bell Register.
+ */
+#define DAC960_PG_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_PG_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_PG_IDB_GEN_IRQ 0x04
+#define DAC960_PG_IDB_CTRL_RESET 0x08
+#define DAC960_PG_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_PG_IDB_HWMBOX_FULL 0x01
+#define DAC960_PG_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 PG Series Outbound Door Bell Register.
+ */
+#define DAC960_PG_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_PG_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_PG_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_PG_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 PG Series Interrupt Mask Register.
+ */
+#define DAC960_PG_IRQMASK_MSI_MASK1 0x03
+#define DAC960_PG_IRQMASK_DISABLE_IRQ 0x04
+#define DAC960_PG_IRQMASK_MSI_MASK2 0xF8
+
+/*
+ * DAC960 PG Series Error Status Register.
+ */
+#define DAC960_PG_ERRSTS_PENDING 0x04
+
+/*
+ * DAC960 PD Series Controller Interface Register Offsets.
+ */
+#define DAC960_PD_mmio_size 0x80
+
+enum DAC960_PD_reg_offset {
+ DAC960_PD_CMDOP_OFFSET = 0x00,
+ DAC960_PD_CMDID_OFFSET = 0x01,
+ DAC960_PD_MBOX2_OFFSET = 0x02,
+ DAC960_PD_MBOX3_OFFSET = 0x03,
+ DAC960_PD_MBOX4_OFFSET = 0x04,
+ DAC960_PD_MBOX5_OFFSET = 0x05,
+ DAC960_PD_MBOX6_OFFSET = 0x06,
+ DAC960_PD_MBOX7_OFFSET = 0x07,
+ DAC960_PD_MBOX8_OFFSET = 0x08,
+ DAC960_PD_MBOX9_OFFSET = 0x09,
+ DAC960_PD_MBOX10_OFFSET = 0x0A,
+ DAC960_PD_MBOX11_OFFSET = 0x0B,
+ DAC960_PD_MBOX12_OFFSET = 0x0C,
+ DAC960_PD_STSID_OFFSET = 0x0D,
+ DAC960_PD_STS_OFFSET = 0x0E,
+ DAC960_PD_ERRSTS_OFFSET = 0x3F,
+ DAC960_PD_IDB_OFFSET = 0x40,
+ DAC960_PD_ODB_OFFSET = 0x41,
+ DAC960_PD_IRQEN_OFFSET = 0x43,
+};
+
+/*
+ * DAC960 PD Series Inbound Door Bell Register.
+ */
+#define DAC960_PD_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_PD_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_PD_IDB_GEN_IRQ 0x04
+#define DAC960_PD_IDB_CTRL_RESET 0x08
+
+#define DAC960_PD_IDB_HWMBOX_FULL 0x01
+#define DAC960_PD_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 PD Series Outbound Door Bell Register.
+ */
+#define DAC960_PD_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_PD_ODB_HWMBOX_STS_AVAIL 0x01
+
+/*
+ * DAC960 PD Series Interrupt Enable Register.
+ */
+#define DAC960_PD_IRQMASK_ENABLE_IRQ 0x01
+
+/*
+ * DAC960 PD Series Error Status Register.
+ */
+#define DAC960_PD_ERRSTS_PENDING 0x04
+
+typedef int (*myrb_hw_init_t)(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base);
+typedef unsigned short (*mbox_mmio_init_t)(struct pci_dev *pdev,
+ void __iomem *base,
+ union myrb_cmd_mbox *mbox);
+
+struct myrb_privdata {
+ myrb_hw_init_t hw_init;
+ irq_handler_t irq_handler;
+ unsigned int mmio_size;
+};
+
+#endif /* MYRB_H */
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
new file mode 100644
index 000000000000..0264a2e2bc19
--- /dev/null
+++ b/drivers/scsi/myrs.c
@@ -0,0 +1,3268 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * This driver supports the newer, SCSI-based firmware interface only.
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver, which has
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/raid_class.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "myrs.h"
+
+static struct raid_template *myrs_raid_template;
+
+static struct myrs_devstate_name_entry {
+ enum myrs_devstate state;
+ char *name;
+} myrs_devstate_name_list[] = {
+ { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
+ { MYRS_DEVICE_ONLINE, "Online" },
+ { MYRS_DEVICE_REBUILD, "Rebuild" },
+ { MYRS_DEVICE_MISSING, "Missing" },
+ { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
+ { MYRS_DEVICE_OFFLINE, "Offline" },
+ { MYRS_DEVICE_CRITICAL, "Critical" },
+ { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
+ { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
+ { MYRS_DEVICE_STANDBY, "Standby" },
+ { MYRS_DEVICE_INVALID_STATE, "Invalid" },
+};
+
+static char *myrs_devstate_name(enum myrs_devstate state)
+{
+ struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
+ if (entry[i].state == state)
+ return entry[i].name;
+ }
+ return NULL;
+}
+
+static struct myrs_raid_level_name_entry {
+ enum myrs_raid_level level;
+ char *name;
+} myrs_raid_level_name_list[] = {
+ { MYRS_RAID_LEVEL0, "RAID0" },
+ { MYRS_RAID_LEVEL1, "RAID1" },
+ { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
+ { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
+ { MYRS_RAID_LEVEL6, "RAID6" },
+ { MYRS_RAID_JBOD, "JBOD" },
+ { MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
+ { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
+ { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
+ { MYRS_RAID_SPAN, "Mylex SPAN" },
+ { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
+ { MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
+ { MYRS_RAID_PHYSICAL, "Physical device" },
+};
+
+static char *myrs_raid_level_name(enum myrs_raid_level level)
+{
+ struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
+ if (entry[i].level == level)
+ return entry[i].name;
+ }
+ return NULL;
+}
+
+/**
+ * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
+ */
+static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
+{
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+
+ memset(mbox, 0, sizeof(union myrs_cmd_mbox));
+ cmd_blk->status = 0;
+}
+
+/**
+ * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
+ */
+static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
+{
+ void __iomem *base = cs->io_base;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
+
+ cs->write_cmd_mbox(next_mbox, mbox);
+
+ if (cs->prev_cmd_mbox1->words[0] == 0 ||
+ cs->prev_cmd_mbox2->words[0] == 0)
+ cs->get_cmd_mbox(base);
+
+ cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
+ cs->prev_cmd_mbox1 = next_mbox;
+
+ if (++next_mbox > cs->last_cmd_mbox)
+ next_mbox = cs->first_cmd_mbox;
+
+ cs->next_cmd_mbox = next_mbox;
+}
+
+/**
+ * myrs_exec_cmd - executes V2 Command and waits for completion.
+ */
+static void myrs_exec_cmd(struct myrs_hba *cs,
+ struct myrs_cmdblk *cmd_blk)
+{
+ DECLARE_COMPLETION_ONSTACK(complete);
+ unsigned long flags;
+
+ cmd_blk->complete = &complete;
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ myrs_qcmd(cs, cmd_blk);
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+
+ WARN_ON(in_interrupt());
+ wait_for_completion(&complete);
+}
+
+/**
+ * myrs_report_progress - prints progress message
+ */
+static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
+ unsigned char *msg, unsigned long blocks,
+ unsigned long size)
+{
+ shost_printk(KERN_INFO, cs->host,
+ "Logical Drive %d: %s in Progress: %d%% completed\n",
+ ldev_num, msg,
+ (100 * (int)(blocks >> 7)) / (int)(size >> 7));
+}
+
+/**
+ * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
+ */
+static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ dma_addr_t ctlr_info_addr;
+ union myrs_sgl *sgl;
+ unsigned char status;
+ struct myrs_ctlr_info old;
+
+ memcpy(&old, cs->ctlr_info, sizeof(struct myrs_ctlr_info));
+ ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
+ sizeof(struct myrs_ctlr_info),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
+ return MYRS_STATUS_FAILED;
+
+ mutex_lock(&cs->dcmd_mutex);
+ myrs_reset_cmd(cmd_blk);
+ mbox->ctlr_info.id = MYRS_DCMD_TAG;
+ mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->ctlr_info.control.dma_ctrl_to_host = true;
+ mbox->ctlr_info.control.no_autosense = true;
+ mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
+ mbox->ctlr_info.ctlr_num = 0;
+ mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
+ sgl = &mbox->ctlr_info.dma_addr;
+ sgl->sge[0].sge_addr = ctlr_info_addr;
+ sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
+ dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
+ sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
+ if (status == MYRS_STATUS_SUCCESS) {
+ if (cs->ctlr_info->bg_init_active +
+ cs->ctlr_info->ldev_init_active +
+ cs->ctlr_info->pdev_init_active +
+ cs->ctlr_info->cc_active +
+ cs->ctlr_info->rbld_active +
+ cs->ctlr_info->exp_active != 0)
+ cs->needs_update = true;
+ if (cs->ctlr_info->ldev_present != old.ldev_present ||
+ cs->ctlr_info->ldev_critical != old.ldev_critical ||
+ cs->ctlr_info->ldev_offline != old.ldev_offline)
+ shost_printk(KERN_INFO, cs->host,
+ "Logical drive count changes (%d/%d/%d)\n",
+ cs->ctlr_info->ldev_critical,
+ cs->ctlr_info->ldev_offline,
+ cs->ctlr_info->ldev_present);
+ }
+
+ return status;
+}
+
+/**
+ * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
+ */
+static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
+ unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ dma_addr_t ldev_info_addr;
+ struct myrs_ldev_info ldev_info_orig;
+ union myrs_sgl *sgl;
+ unsigned char status;
+
+ memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
+ ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
+ sizeof(struct myrs_ldev_info),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
+ return MYRS_STATUS_FAILED;
+
+ mutex_lock(&cs->dcmd_mutex);
+ myrs_reset_cmd(cmd_blk);
+ mbox->ldev_info.id = MYRS_DCMD_TAG;
+ mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->ldev_info.control.dma_ctrl_to_host = true;
+ mbox->ldev_info.control.no_autosense = true;
+ mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
+ mbox->ldev_info.ldev.ldev_num = ldev_num;
+ mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
+ sgl = &mbox->ldev_info.dma_addr;
+ sgl->sge[0].sge_addr = ldev_info_addr;
+ sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
+ dev_dbg(&cs->host->shost_gendev,
+ "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
+ sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
+ if (status == MYRS_STATUS_SUCCESS) {
+ unsigned short ldev_num = ldev_info->ldev_num;
+ struct myrs_ldev_info *new = ldev_info;
+ struct myrs_ldev_info *old = &ldev_info_orig;
+ unsigned long ldev_size = new->cfg_devsize;
+
+ if (new->dev_state != old->dev_state) {
+ const char *name;
+
+ name = myrs_devstate_name(new->dev_state);
+ shost_printk(KERN_INFO, cs->host,
+ "Logical Drive %d is now %s\n",
+ ldev_num, name ? name : "Invalid");
+ }
+ if ((new->soft_errs != old->soft_errs) ||
+ (new->cmds_failed != old->cmds_failed) ||
+ (new->deferred_write_errs != old->deferred_write_errs))
+ shost_printk(KERN_INFO, cs->host,
+ "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
+ ldev_num, new->soft_errs,
+ new->cmds_failed,
+ new->deferred_write_errs);
+ if (new->bg_init_active)
+ myrs_report_progress(cs, ldev_num,
+ "Background Initialization",
+ new->bg_init_lba, ldev_size);
+ else if (new->fg_init_active)
+ myrs_report_progress(cs, ldev_num,
+ "Foreground Initialization",
+ new->fg_init_lba, ldev_size);
+ else if (new->migration_active)
+ myrs_report_progress(cs, ldev_num,
+ "Data Migration",
+ new->migration_lba, ldev_size);
+ else if (new->patrol_active)
+ myrs_report_progress(cs, ldev_num,
+ "Patrol Operation",
+ new->patrol_lba, ldev_size);
+ if (old->bg_init_active && !new->bg_init_active)
+ shost_printk(KERN_INFO, cs->host,
+ "Logical Drive %d: Background Initialization %s\n",
+ ldev_num,
+ (new->ldev_control.ldev_init_done ?
+ "Completed" : "Failed"));
+ }
+ return status;
+}
+
+/**
+ * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
+ */
+static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
+ unsigned char channel, unsigned char target, unsigned char lun,
+ struct myrs_pdev_info *pdev_info)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ dma_addr_t pdev_info_addr;
+ union myrs_sgl *sgl;
+ unsigned char status;
+
+ pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
+ sizeof(struct myrs_pdev_info),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
+ return MYRS_STATUS_FAILED;
+
+ mutex_lock(&cs->dcmd_mutex);
+ myrs_reset_cmd(cmd_blk);
+ mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->pdev_info.id = MYRS_DCMD_TAG;
+ mbox->pdev_info.control.dma_ctrl_to_host = true;
+ mbox->pdev_info.control.no_autosense = true;
+ mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
+ mbox->pdev_info.pdev.lun = lun;
+ mbox->pdev_info.pdev.target = target;
+ mbox->pdev_info.pdev.channel = channel;
+ mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
+ sgl = &mbox->pdev_info.dma_addr;
+ sgl->sge[0].sge_addr = pdev_info_addr;
+ sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
+ dev_dbg(&cs->host->shost_gendev,
+ "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
+ channel, target, lun);
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
+ sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
+ return status;
+}
+
+/**
+ * myrs_dev_op - executes a "Device Operation" Command
+ */
+static unsigned char myrs_dev_op(struct myrs_hba *cs,
+ enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned char status;
+
+ mutex_lock(&cs->dcmd_mutex);
+ myrs_reset_cmd(cmd_blk);
+ mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->dev_op.id = MYRS_DCMD_TAG;
+ mbox->dev_op.control.dma_ctrl_to_host = true;
+ mbox->dev_op.control.no_autosense = true;
+ mbox->dev_op.ioctl_opcode = opcode;
+ mbox->dev_op.opdev = opdev;
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ return status;
+}
+
+/**
+ * myrs_translate_pdev - translates a Physical Device Channel and
+ * TargetID into a Logical Device.
+ */
+static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
+ unsigned char channel, unsigned char target, unsigned char lun,
+ struct myrs_devmap *devmap)
+{
+ struct pci_dev *pdev = cs->pdev;
+ dma_addr_t devmap_addr;
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ union myrs_sgl *sgl;
+ unsigned char status;
+
+ memset(devmap, 0x0, sizeof(struct myrs_devmap));
+ devmap_addr = dma_map_single(&pdev->dev, devmap,
+ sizeof(struct myrs_devmap),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, devmap_addr))
+ return MYRS_STATUS_FAILED;
+
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ mbox = &cmd_blk->mbox;
+ mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->pdev_info.control.dma_ctrl_to_host = true;
+ mbox->pdev_info.control.no_autosense = true;
+ mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
+ mbox->pdev_info.pdev.target = target;
+ mbox->pdev_info.pdev.channel = channel;
+ mbox->pdev_info.pdev.lun = lun;
+ mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
+ sgl = &mbox->pdev_info.dma_addr;
+ sgl->sge[0].sge_addr = devmap_addr;
+ sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
+
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ dma_unmap_single(&pdev->dev, devmap_addr,
+ sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
+ return status;
+}
+
+/**
+ * myrs_get_event - executes a Get Event Command
+ */
+static unsigned char myrs_get_event(struct myrs_hba *cs,
+ unsigned int event_num, struct myrs_event *event_buf)
+{
+ struct pci_dev *pdev = cs->pdev;
+ dma_addr_t event_addr;
+ struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ union myrs_sgl *sgl;
+ unsigned char status;
+
+ event_addr = dma_map_single(&pdev->dev, event_buf,
+ sizeof(struct myrs_event), DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, event_addr))
+ return MYRS_STATUS_FAILED;
+
+ mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->get_event.dma_size = sizeof(struct myrs_event);
+ mbox->get_event.evnum_upper = event_num >> 16;
+ mbox->get_event.ctlr_num = 0;
+ mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
+ mbox->get_event.evnum_lower = event_num & 0xFFFF;
+ sgl = &mbox->get_event.dma_addr;
+ sgl->sge[0].sge_addr = event_addr;
+ sgl->sge[0].sge_count = mbox->get_event.dma_size;
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ dma_unmap_single(&pdev->dev, event_addr,
+ sizeof(struct myrs_event), DMA_FROM_DEVICE);
+
+ return status;
+}
+
+/*
+ * myrs_get_fwstatus - executes a Get Health Status Command
+ */
+static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ union myrs_sgl *sgl;
+ unsigned char status = cmd_blk->status;
+
+ myrs_reset_cmd(cmd_blk);
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_MCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ mbox->common.dma_size = sizeof(struct myrs_fwstat);
+ mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
+ sgl = &mbox->common.dma_addr;
+ sgl->sge[0].sge_addr = cs->fwstat_addr;
+ sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
+ dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+
+ return status;
+}
+
+/**
+ * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
+ */
+static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
+ enable_mbox_t enable_mbox_fn)
+{
+ void __iomem *base = cs->io_base;
+ struct pci_dev *pdev = cs->pdev;
+ union myrs_cmd_mbox *cmd_mbox;
+ struct myrs_stat_mbox *stat_mbox;
+ union myrs_cmd_mbox *mbox;
+ dma_addr_t mbox_addr;
+ unsigned char status = MYRS_STATUS_FAILED;
+
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_err(&pdev->dev, "DMA mask out of range\n");
+ return false;
+ }
+
+ /* Temporary dma mapping, used only in the scope of this function */
+ mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
+ &mbox_addr, GFP_KERNEL);
+ if (dma_mapping_error(&pdev->dev, mbox_addr))
+ return false;
+
+ /* These are the base addresses for the command memory mailbox array */
+ cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
+ cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
+ &cs->cmd_mbox_addr, GFP_KERNEL);
+ if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
+ dev_err(&pdev->dev, "Failed to map command mailbox\n");
+ goto out_free;
+ }
+ cs->first_cmd_mbox = cmd_mbox;
+ cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
+ cs->last_cmd_mbox = cmd_mbox;
+ cs->next_cmd_mbox = cs->first_cmd_mbox;
+ cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
+ cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
+
+ /* These are the base addresses for the status memory mailbox array */
+ cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
+ stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
+ &cs->stat_mbox_addr, GFP_KERNEL);
+ if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
+ dev_err(&pdev->dev, "Failed to map status mailbox\n");
+ goto out_free;
+ }
+
+ cs->first_stat_mbox = stat_mbox;
+ stat_mbox += MYRS_MAX_STAT_MBOX - 1;
+ cs->last_stat_mbox = stat_mbox;
+ cs->next_stat_mbox = cs->first_stat_mbox;
+
+ cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct myrs_fwstat),
+ &cs->fwstat_addr, GFP_KERNEL);
+ if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
+ dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
+ cs->fwstat_buf = NULL;
+ goto out_free;
+ }
+ cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
+ GFP_KERNEL | GFP_DMA);
+ if (!cs->ctlr_info)
+ goto out_free;
+
+ cs->event_buf = kzalloc(sizeof(struct myrs_event),
+ GFP_KERNEL | GFP_DMA);
+ if (!cs->event_buf)
+ goto out_free;
+
+ /* Enable the Memory Mailbox Interface. */
+ memset(mbox, 0, sizeof(union myrs_cmd_mbox));
+ mbox->set_mbox.id = 1;
+ mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->set_mbox.control.no_autosense = true;
+ mbox->set_mbox.first_cmd_mbox_size_kb =
+ (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
+ mbox->set_mbox.first_stat_mbox_size_kb =
+ (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
+ mbox->set_mbox.second_cmd_mbox_size_kb = 0;
+ mbox->set_mbox.second_stat_mbox_size_kb = 0;
+ mbox->set_mbox.sense_len = 0;
+ mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
+ mbox->set_mbox.fwstat_buf_size_kb = 1;
+ mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
+ mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
+ mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
+ status = enable_mbox_fn(base, mbox_addr);
+
+out_free:
+ dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
+ mbox, mbox_addr);
+ if (status != MYRS_STATUS_SUCCESS)
+ dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
+ status);
+ return (status == MYRS_STATUS_SUCCESS);
+}
+
+/**
+ * myrs_get_config - reads the Configuration Information
+ */
+static int myrs_get_config(struct myrs_hba *cs)
+{
+ struct myrs_ctlr_info *info = cs->ctlr_info;
+ struct Scsi_Host *shost = cs->host;
+ unsigned char status;
+ unsigned char model[20];
+ unsigned char fw_version[12];
+ int i, model_len;
+
+ /* Get data into dma-able area, then copy into permanent location */
+ mutex_lock(&cs->cinfo_mutex);
+ status = myrs_get_ctlr_info(cs);
+ mutex_unlock(&cs->cinfo_mutex);
+ if (status != MYRS_STATUS_SUCCESS) {
+ shost_printk(KERN_ERR, shost,
+ "Failed to get controller information\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the Controller Model Name and Full Model Name fields. */
+ model_len = sizeof(info->ctlr_name);
+ if (model_len > sizeof(model)-1)
+ model_len = sizeof(model)-1;
+ memcpy(model, info->ctlr_name, model_len);
+ model_len--;
+ while (model[model_len] == ' ' || model[model_len] == '\0')
+ model_len--;
+ model[++model_len] = '\0';
+ strcpy(cs->model_name, "DAC960 ");
+ strcat(cs->model_name, model);
+ /* Initialize the Controller Firmware Version field. */
+ sprintf(fw_version, "%d.%02d-%02d",
+ info->fw_major_version, info->fw_minor_version,
+ info->fw_turn_number);
+ if (info->fw_major_version == 6 &&
+ info->fw_minor_version == 0 &&
+ info->fw_turn_number < 1) {
+ shost_printk(KERN_WARNING, shost,
+ "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
+ "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
+ "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
+ fw_version);
+ return -ENODEV;
+ }
+ /* Initialize the Controller Channels and Targets. */
+ shost->max_channel = info->physchan_present + info->virtchan_present;
+ shost->max_id = info->max_targets[0];
+ for (i = 1; i < 16; i++) {
+ if (!info->max_targets[i])
+ continue;
+ if (shost->max_id < info->max_targets[i])
+ shost->max_id = info->max_targets[i];
+ }
+
+ /*
+ * Initialize the Controller Queue Depth, Driver Queue Depth,
+ * Logical Drive Count, Maximum Blocks per Command, Controller
+ * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
+ * The Driver Queue Depth must be at most three less than
+ * the Controller Queue Depth; tag '1' is reserved for
+ * direct commands, and tag '2' for monitoring commands.
+ */
+ shost->can_queue = info->max_tcq - 3;
+ if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
+ shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
+ shost->max_sectors = info->max_transfer_size;
+ shost->sg_tablesize = info->max_sge;
+ if (shost->sg_tablesize > MYRS_SG_LIMIT)
+ shost->sg_tablesize = MYRS_SG_LIMIT;
+
+ shost_printk(KERN_INFO, shost,
+ "Configuring %s PCI RAID Controller\n", model);
+ shost_printk(KERN_INFO, shost,
+ " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
+ fw_version, info->physchan_present, info->mem_size_mb);
+
+ shost_printk(KERN_INFO, shost,
+ " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
+ shost->can_queue, shost->max_sectors);
+
+ shost_printk(KERN_INFO, shost,
+ " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
+ shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
+ for (i = 0; i < info->physchan_max; i++) {
+ if (!info->max_targets[i])
+ continue;
+ shost_printk(KERN_INFO, shost,
+ " Device Channel %d: max %d devices\n",
+ i, info->max_targets[i]);
+ }
+ shost_printk(KERN_INFO, shost,
+ " Physical: %d/%d channels, %d disks, %d devices\n",
+ info->physchan_present, info->physchan_max,
+ info->pdisk_present, info->pdev_present);
+
+ shost_printk(KERN_INFO, shost,
+ " Logical: %d/%d channels, %d disks\n",
+ info->virtchan_present, info->virtchan_max,
+ info->ldev_present);
+ return 0;
+}
+
+/**
+ * myrs_log_event - prints a Controller Event message
+ */
+static struct {
+ int ev_code;
+ unsigned char *ev_msg;
+} myrs_ev_list[] = {
+ /* Physical Device Events (0x0000 - 0x007F) */
+ { 0x0001, "P Online" },
+ { 0x0002, "P Standby" },
+ { 0x0005, "P Automatic Rebuild Started" },
+ { 0x0006, "P Manual Rebuild Started" },
+ { 0x0007, "P Rebuild Completed" },
+ { 0x0008, "P Rebuild Cancelled" },
+ { 0x0009, "P Rebuild Failed for Unknown Reasons" },
+ { 0x000A, "P Rebuild Failed due to New Physical Device" },
+ { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
+ { 0x000C, "S Offline" },
+ { 0x000D, "P Found" },
+ { 0x000E, "P Removed" },
+ { 0x000F, "P Unconfigured" },
+ { 0x0010, "P Expand Capacity Started" },
+ { 0x0011, "P Expand Capacity Completed" },
+ { 0x0012, "P Expand Capacity Failed" },
+ { 0x0013, "P Command Timed Out" },
+ { 0x0014, "P Command Aborted" },
+ { 0x0015, "P Command Retried" },
+ { 0x0016, "P Parity Error" },
+ { 0x0017, "P Soft Error" },
+ { 0x0018, "P Miscellaneous Error" },
+ { 0x0019, "P Reset" },
+ { 0x001A, "P Active Spare Found" },
+ { 0x001B, "P Warm Spare Found" },
+ { 0x001C, "S Sense Data Received" },
+ { 0x001D, "P Initialization Started" },
+ { 0x001E, "P Initialization Completed" },
+ { 0x001F, "P Initialization Failed" },
+ { 0x0020, "P Initialization Cancelled" },
+ { 0x0021, "P Failed because Write Recovery Failed" },
+ { 0x0022, "P Failed because SCSI Bus Reset Failed" },
+ { 0x0023, "P Failed because of Double Check Condition" },
+ { 0x0024, "P Failed because Device Cannot Be Accessed" },
+ { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
+ { 0x0026, "P Failed because of Bad Tag from Device" },
+ { 0x0027, "P Failed because of Command Timeout" },
+ { 0x0028, "P Failed because of System Reset" },
+ { 0x0029, "P Failed because of Busy Status or Parity Error" },
+ { 0x002A, "P Failed because Host Set Device to Failed State" },
+ { 0x002B, "P Failed because of Selection Timeout" },
+ { 0x002C, "P Failed because of SCSI Bus Phase Error" },
+ { 0x002D, "P Failed because Device Returned Unknown Status" },
+ { 0x002E, "P Failed because Device Not Ready" },
+ { 0x002F, "P Failed because Device Not Found at Startup" },
+ { 0x0030, "P Failed because COD Write Operation Failed" },
+ { 0x0031, "P Failed because BDT Write Operation Failed" },
+ { 0x0039, "P Missing at Startup" },
+ { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
+ { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
+ { 0x003D, "P Standby Rebuild Started" },
+ /* Logical Device Events (0x0080 - 0x00FF) */
+ { 0x0080, "M Consistency Check Started" },
+ { 0x0081, "M Consistency Check Completed" },
+ { 0x0082, "M Consistency Check Cancelled" },
+ { 0x0083, "M Consistency Check Completed With Errors" },
+ { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
+ { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
+ { 0x0086, "L Offline" },
+ { 0x0087, "L Critical" },
+ { 0x0088, "L Online" },
+ { 0x0089, "M Automatic Rebuild Started" },
+ { 0x008A, "M Manual Rebuild Started" },
+ { 0x008B, "M Rebuild Completed" },
+ { 0x008C, "M Rebuild Cancelled" },
+ { 0x008D, "M Rebuild Failed for Unknown Reasons" },
+ { 0x008E, "M Rebuild Failed due to New Physical Device" },
+ { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
+ { 0x0090, "M Initialization Started" },
+ { 0x0091, "M Initialization Completed" },
+ { 0x0092, "M Initialization Cancelled" },
+ { 0x0093, "M Initialization Failed" },
+ { 0x0094, "L Found" },
+ { 0x0095, "L Deleted" },
+ { 0x0096, "M Expand Capacity Started" },
+ { 0x0097, "M Expand Capacity Completed" },
+ { 0x0098, "M Expand Capacity Failed" },
+ { 0x0099, "L Bad Block Found" },
+ { 0x009A, "L Size Changed" },
+ { 0x009B, "L Type Changed" },
+ { 0x009C, "L Bad Data Block Found" },
+ { 0x009E, "L Read of Data Block in BDT" },
+ { 0x009F, "L Write Back Data for Disk Block Lost" },
+ { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
+ { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
+ { 0x00A2, "L Standby Rebuild Started" },
+ /* Fault Management Events (0x0100 - 0x017F) */
+ { 0x0140, "E Fan %d Failed" },
+ { 0x0141, "E Fan %d OK" },
+ { 0x0142, "E Fan %d Not Present" },
+ { 0x0143, "E Power Supply %d Failed" },
+ { 0x0144, "E Power Supply %d OK" },
+ { 0x0145, "E Power Supply %d Not Present" },
+ { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
+ { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
+ { 0x0148, "E Temperature Sensor %d Temperature Normal" },
+ { 0x0149, "E Temperature Sensor %d Not Present" },
+ { 0x014A, "E Enclosure Management Unit %d Access Critical" },
+ { 0x014B, "E Enclosure Management Unit %d Access OK" },
+ { 0x014C, "E Enclosure Management Unit %d Access Offline" },
+ /* Controller Events (0x0180 - 0x01FF) */
+ { 0x0181, "C Cache Write Back Error" },
+ { 0x0188, "C Battery Backup Unit Found" },
+ { 0x0189, "C Battery Backup Unit Charge Level Low" },
+ { 0x018A, "C Battery Backup Unit Charge Level OK" },
+ { 0x0193, "C Installation Aborted" },
+ { 0x0195, "C Battery Backup Unit Physically Removed" },
+ { 0x0196, "C Memory Error During Warm Boot" },
+ { 0x019E, "C Memory Soft ECC Error Corrected" },
+ { 0x019F, "C Memory Hard ECC Error Corrected" },
+ { 0x01A2, "C Battery Backup Unit Failed" },
+ { 0x01AB, "C Mirror Race Recovery Failed" },
+ { 0x01AC, "C Mirror Race on Critical Drive" },
+ /* Controller Internal Processor Events */
+ { 0x0380, "C Internal Controller Hung" },
+ { 0x0381, "C Internal Controller Firmware Breakpoint" },
+ { 0x0390, "C Internal Controller i960 Processor Specific Error" },
+ { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
+ { 0, "" }
+};
+
+static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
+{
+ unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
+ int ev_idx = 0, ev_code;
+ unsigned char ev_type, *ev_msg;
+ struct Scsi_Host *shost = cs->host;
+ struct scsi_device *sdev;
+ struct scsi_sense_hdr sshdr;
+ unsigned char sense_info[4];
+ unsigned char cmd_specific[4];
+
+ if (ev->ev_code == 0x1C) {
+ if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
+ memset(&sshdr, 0x0, sizeof(sshdr));
+ memset(sense_info, 0x0, sizeof(sense_info));
+ memset(cmd_specific, 0x0, sizeof(cmd_specific));
+ } else {
+ memcpy(sense_info, &ev->sense_data[3], 4);
+ memcpy(cmd_specific, &ev->sense_data[7], 4);
+ }
+ }
+ if (sshdr.sense_key == VENDOR_SPECIFIC &&
+ (sshdr.asc == 0x80 || sshdr.asc == 0x81))
+ ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
+ while (true) {
+ ev_code = myrs_ev_list[ev_idx].ev_code;
+ if (ev_code == ev->ev_code || ev_code == 0)
+ break;
+ ev_idx++;
+ }
+ ev_type = myrs_ev_list[ev_idx].ev_msg[0];
+ ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
+ if (ev_code == 0) {
+ shost_printk(KERN_WARNING, shost,
+ "Unknown Controller Event Code %04X\n",
+ ev->ev_code);
+ return;
+ }
+ switch (ev_type) {
+ case 'P':
+ sdev = scsi_device_lookup(shost, ev->channel,
+ ev->target, 0);
+ sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
+ ev->ev_seq, ev_msg);
+ if (sdev && sdev->hostdata &&
+ sdev->channel < cs->ctlr_info->physchan_present) {
+ struct myrs_pdev_info *pdev_info = sdev->hostdata;
+
+ switch (ev->ev_code) {
+ case 0x0001:
+ case 0x0007:
+ pdev_info->dev_state = MYRS_DEVICE_ONLINE;
+ break;
+ case 0x0002:
+ pdev_info->dev_state = MYRS_DEVICE_STANDBY;
+ break;
+ case 0x000C:
+ pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
+ break;
+ case 0x000E:
+ pdev_info->dev_state = MYRS_DEVICE_MISSING;
+ break;
+ case 0x000F:
+ pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
+ break;
+ }
+ }
+ break;
+ case 'L':
+ shost_printk(KERN_INFO, shost,
+ "event %d: Logical Drive %d %s\n",
+ ev->ev_seq, ev->lun, ev_msg);
+ cs->needs_update = true;
+ break;
+ case 'M':
+ shost_printk(KERN_INFO, shost,
+ "event %d: Logical Drive %d %s\n",
+ ev->ev_seq, ev->lun, ev_msg);
+ cs->needs_update = true;
+ break;
+ case 'S':
+ if (sshdr.sense_key == NO_SENSE ||
+ (sshdr.sense_key == NOT_READY &&
+ sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
+ sshdr.ascq == 0x02)))
+ break;
+ shost_printk(KERN_INFO, shost,
+ "event %d: Physical Device %d:%d %s\n",
+ ev->ev_seq, ev->channel, ev->target, ev_msg);
+ shost_printk(KERN_INFO, shost,
+ "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
+ ev->channel, ev->target,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ shost_printk(KERN_INFO, shost,
+ "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
+ ev->channel, ev->target,
+ sense_info[0], sense_info[1],
+ sense_info[2], sense_info[3],
+ cmd_specific[0], cmd_specific[1],
+ cmd_specific[2], cmd_specific[3]);
+ break;
+ case 'E':
+ if (cs->disable_enc_msg)
+ break;
+ sprintf(msg_buf, ev_msg, ev->lun);
+ shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
+ ev->ev_seq, ev->target, msg_buf);
+ break;
+ case 'C':
+ shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
+ ev->ev_seq, ev_msg);
+ break;
+ default:
+ shost_printk(KERN_INFO, shost,
+ "event %d: Unknown Event Code %04X\n",
+ ev->ev_seq, ev->ev_code);
+ break;
+ }
+}
+
+/*
+ * SCSI sysfs interface functions
+ */
+static ssize_t raid_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ int ret;
+
+ if (!sdev->hostdata)
+ return snprintf(buf, 16, "Unknown\n");
+
+ if (sdev->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+ const char *name;
+
+ name = myrs_devstate_name(ldev_info->dev_state);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->dev_state);
+ } else {
+ struct myrs_pdev_info *pdev_info;
+ const char *name;
+
+ pdev_info = sdev->hostdata;
+ name = myrs_devstate_name(pdev_info->dev_state);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ pdev_info->dev_state);
+ }
+ return ret;
+}
+
+static ssize_t raid_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ enum myrs_devstate new_state;
+ unsigned short ldev_num;
+ unsigned char status;
+
+ if (!strncmp(buf, "offline", 7) ||
+ !strncmp(buf, "kill", 4))
+ new_state = MYRS_DEVICE_OFFLINE;
+ else if (!strncmp(buf, "online", 6))
+ new_state = MYRS_DEVICE_ONLINE;
+ else if (!strncmp(buf, "standby", 7))
+ new_state = MYRS_DEVICE_STANDBY;
+ else
+ return -EINVAL;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present) {
+ struct myrs_pdev_info *pdev_info = sdev->hostdata;
+ struct myrs_devmap *pdev_devmap =
+ (struct myrs_devmap *)&pdev_info->rsvd13;
+
+ if (pdev_info->dev_state == new_state) {
+ sdev_printk(KERN_INFO, sdev,
+ "Device already in %s\n",
+ myrs_devstate_name(new_state));
+ return count;
+ }
+ status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
+ sdev->lun, pdev_devmap);
+ if (status != MYRS_STATUS_SUCCESS)
+ return -ENXIO;
+ ldev_num = pdev_devmap->ldev_num;
+ } else {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ if (ldev_info->dev_state == new_state) {
+ sdev_printk(KERN_INFO, sdev,
+ "Device already in %s\n",
+ myrs_devstate_name(new_state));
+ return count;
+ }
+ ldev_num = ldev_info->ldev_num;
+ }
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ myrs_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_DCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
+ mbox->set_devstate.state = new_state;
+ mbox->set_devstate.ldev.ldev_num = ldev_num;
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ if (status == MYRS_STATUS_SUCCESS) {
+ if (sdev->channel < cs->ctlr_info->physchan_present) {
+ struct myrs_pdev_info *pdev_info = sdev->hostdata;
+
+ pdev_info->dev_state = new_state;
+ } else {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ ldev_info->dev_state = new_state;
+ }
+ sdev_printk(KERN_INFO, sdev,
+ "Set device state to %s\n",
+ myrs_devstate_name(new_state));
+ return count;
+ }
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to set device state to %s, status 0x%02x\n",
+ myrs_devstate_name(new_state), status);
+ return -EINVAL;
+}
+static DEVICE_ATTR_RW(raid_state);
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ const char *name = NULL;
+
+ if (!sdev->hostdata)
+ return snprintf(buf, 16, "Unknown\n");
+
+ if (sdev->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info;
+
+ ldev_info = sdev->hostdata;
+ name = myrs_raid_level_name(ldev_info->raid_level);
+ if (!name)
+ return snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->dev_state);
+
+ } else
+ name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
+
+ return snprintf(buf, 32, "%s\n", name);
+}
+static DEVICE_ATTR_RO(raid_level);
+
+static ssize_t rebuild_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+ unsigned short ldev_num;
+ unsigned char status;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+ return snprintf(buf, 32, "physical device - not rebuilding\n");
+
+ ldev_info = sdev->hostdata;
+ ldev_num = ldev_info->ldev_num;
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device information, status 0x%02x\n",
+ status);
+ return -EIO;
+ }
+ if (ldev_info->rbld_active) {
+ return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
+ (size_t)ldev_info->rbld_lba,
+ (size_t)ldev_info->cfg_devsize);
+ } else
+ return snprintf(buf, 32, "not rebuilding\n");
+}
+
+static ssize_t rebuild_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ unsigned short ldev_num;
+ unsigned char status;
+ int rebuild, ret;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+ return -EINVAL;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ ldev_num = ldev_info->ldev_num;
+
+ ret = kstrtoint(buf, 0, &rebuild);
+ if (ret)
+ return ret;
+
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device information, status 0x%02x\n",
+ status);
+ return -EIO;
+ }
+
+ if (rebuild && ldev_info->rbld_active) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Initiated; already in progress\n");
+ return -EALREADY;
+ }
+ if (!rebuild && !ldev_info->rbld_active) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Cancelled; no rebuild in progress\n");
+ return count;
+ }
+
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ myrs_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_DCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ if (rebuild) {
+ mbox->ldev_info.ldev.ldev_num = ldev_num;
+ mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
+ } else {
+ mbox->ldev_info.ldev.ldev_num = ldev_num;
+ mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
+ }
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ if (status) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not %s, status 0x%02x\n",
+ rebuild ? "Initiated" : "Cancelled", status);
+ ret = -EIO;
+ } else {
+ sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
+ rebuild ? "Initiated" : "Cancelled");
+ ret = count;
+ }
+
+ return ret;
+}
+static DEVICE_ATTR_RW(rebuild);
+
+static ssize_t consistency_check_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+ unsigned short ldev_num;
+ unsigned char status;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+ return snprintf(buf, 32, "physical device - not checking\n");
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ ldev_num = ldev_info->ldev_num;
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (ldev_info->cc_active)
+ return snprintf(buf, 32, "checking block %zu of %zu\n",
+ (size_t)ldev_info->cc_lba,
+ (size_t)ldev_info->cfg_devsize);
+ else
+ return snprintf(buf, 32, "not checking\n");
+}
+
+static ssize_t consistency_check_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ unsigned short ldev_num;
+ unsigned char status;
+ int check, ret;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+ return -EINVAL;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ ldev_num = ldev_info->ldev_num;
+
+ ret = kstrtoint(buf, 0, &check);
+ if (ret)
+ return ret;
+
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device information, status 0x%02x\n",
+ status);
+ return -EIO;
+ }
+ if (check && ldev_info->cc_active) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check Not Initiated; "
+ "already in progress\n");
+ return -EALREADY;
+ }
+ if (!check && !ldev_info->cc_active) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check Not Cancelled; "
+ "check not in progress\n");
+ return count;
+ }
+
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ myrs_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_DCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ if (check) {
+ mbox->cc.ldev.ldev_num = ldev_num;
+ mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
+ mbox->cc.restore_consistency = true;
+ mbox->cc.initialized_area_only = false;
+ } else {
+ mbox->cc.ldev.ldev_num = ldev_num;
+ mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
+ }
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check Not %s, status 0x%02x\n",
+ check ? "Initiated" : "Cancelled", status);
+ ret = -EIO;
+ } else {
+ sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
+ check ? "Initiated" : "Cancelled");
+ ret = count;
+ }
+
+ return ret;
+}
+static DEVICE_ATTR_RW(consistency_check);
+
+static struct device_attribute *myrs_sdev_attrs[] = {
+ &dev_attr_consistency_check,
+ &dev_attr_rebuild,
+ &dev_attr_raid_state,
+ &dev_attr_raid_level,
+ NULL,
+};
+
+static ssize_t serial_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+ char serial[17];
+
+ memcpy(serial, cs->ctlr_info->serial_number, 16);
+ serial[16] = '\0';
+ return snprintf(buf, 16, "%s\n", serial);
+}
+static DEVICE_ATTR_RO(serial);
+
+static ssize_t ctlr_num_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 20, "%d\n", cs->host->host_no);
+}
+static DEVICE_ATTR_RO(ctlr_num);
+
+static struct myrs_cpu_type_tbl {
+ enum myrs_cpu_type type;
+ char *name;
+} myrs_cpu_type_names[] = {
+ { MYRS_CPUTYPE_i960CA, "i960CA" },
+ { MYRS_CPUTYPE_i960RD, "i960RD" },
+ { MYRS_CPUTYPE_i960RN, "i960RN" },
+ { MYRS_CPUTYPE_i960RP, "i960RP" },
+ { MYRS_CPUTYPE_NorthBay, "NorthBay" },
+ { MYRS_CPUTYPE_StrongArm, "StrongARM" },
+ { MYRS_CPUTYPE_i960RM, "i960RM" },
+};
+
+static ssize_t processor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+ struct myrs_cpu_type_tbl *tbl;
+ const char *first_processor = NULL;
+ const char *second_processor = NULL;
+ struct myrs_ctlr_info *info = cs->ctlr_info;
+ ssize_t ret;
+ int i;
+
+ if (info->cpu[0].cpu_count) {
+ tbl = myrs_cpu_type_names;
+ for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
+ if (tbl[i].type == info->cpu[0].cpu_type) {
+ first_processor = tbl[i].name;
+ break;
+ }
+ }
+ }
+ if (info->cpu[1].cpu_count) {
+ tbl = myrs_cpu_type_names;
+ for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
+ if (tbl[i].type == info->cpu[1].cpu_type) {
+ second_processor = tbl[i].name;
+ break;
+ }
+ }
+ }
+ if (first_processor && second_processor)
+ ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
+ "2: %s (%s, %d cpus)\n",
+ info->cpu[0].cpu_name,
+ first_processor, info->cpu[0].cpu_count,
+ info->cpu[1].cpu_name,
+ second_processor, info->cpu[1].cpu_count);
+ else if (first_processor && !second_processor)
+ ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
+ info->cpu[0].cpu_name,
+ first_processor, info->cpu[0].cpu_count);
+ else if (!first_processor && second_processor)
+ ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
+ info->cpu[1].cpu_name,
+ second_processor, info->cpu[1].cpu_count);
+ else
+ ret = snprintf(buf, 64, "1: absent\n2: absent\n");
+
+ return ret;
+}
+static DEVICE_ATTR_RO(processor);
+
+static ssize_t model_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 28, "%s\n", cs->model_name);
+}
+static DEVICE_ATTR_RO(model);
+
+static ssize_t ctlr_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
+}
+static DEVICE_ATTR_RO(ctlr_type);
+
+static ssize_t cache_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
+}
+static DEVICE_ATTR_RO(cache_size);
+
+static ssize_t firmware_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 16, "%d.%02d-%02d\n",
+ cs->ctlr_info->fw_major_version,
+ cs->ctlr_info->fw_minor_version,
+ cs->ctlr_info->fw_turn_number);
+}
+static DEVICE_ATTR_RO(firmware);
+
+static ssize_t discovery_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ unsigned char status;
+
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ myrs_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_DCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ if (status != MYRS_STATUS_SUCCESS) {
+ shost_printk(KERN_INFO, shost,
+ "Discovery Not Initiated, status %02X\n",
+ status);
+ return -EINVAL;
+ }
+ shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
+ cs->next_evseq = 0;
+ cs->needs_update = true;
+ queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
+ flush_delayed_work(&cs->monitor_work);
+ shost_printk(KERN_INFO, shost, "Discovery Completed\n");
+
+ return count;
+}
+static DEVICE_ATTR_WO(discovery);
+
+static ssize_t flush_cache_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+ unsigned char status;
+
+ status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
+ MYRS_RAID_CONTROLLER);
+ if (status == MYRS_STATUS_SUCCESS) {
+ shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
+ return count;
+ }
+ shost_printk(KERN_INFO, shost,
+ "Cache Flush failed, status 0x%02x\n", status);
+ return -EIO;
+}
+static DEVICE_ATTR_WO(flush_cache);
+
+static ssize_t disable_enclosure_messages_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
+}
+
+static ssize_t disable_enclosure_messages_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ int value, ret;
+
+ ret = kstrtoint(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ if (value > 2)
+ return -EINVAL;
+
+ cs->disable_enc_msg = value;
+ return count;
+}
+static DEVICE_ATTR_RW(disable_enclosure_messages);
+
+static struct device_attribute *myrs_shost_attrs[] = {
+ &dev_attr_serial,
+ &dev_attr_ctlr_num,
+ &dev_attr_processor,
+ &dev_attr_model,
+ &dev_attr_ctlr_type,
+ &dev_attr_cache_size,
+ &dev_attr_firmware,
+ &dev_attr_discovery,
+ &dev_attr_flush_cache,
+ &dev_attr_disable_enclosure_messages,
+ NULL,
+};
+
+/*
+ * SCSI midlayer interface
+ */
+int myrs_host_reset(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost = scmd->device->host;
+ struct myrs_hba *cs = shost_priv(shost);
+
+ cs->reset(cs->io_base);
+ return SUCCESS;
+}
+
+static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
+ struct myrs_ldev_info *ldev_info)
+{
+ unsigned char modes[32], *mode_pg;
+ bool dbd;
+ size_t mode_len;
+
+ dbd = (scmd->cmnd[1] & 0x08) == 0x08;
+ if (dbd) {
+ mode_len = 24;
+ mode_pg = &modes[4];
+ } else {
+ mode_len = 32;
+ mode_pg = &modes[12];
+ }
+ memset(modes, 0, sizeof(modes));
+ modes[0] = mode_len - 1;
+ modes[2] = 0x10; /* Enable FUA */
+ if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
+ modes[2] |= 0x80;
+ if (!dbd) {
+ unsigned char *block_desc = &modes[4];
+
+ modes[3] = 8;
+ put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
+ put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
+ }
+ mode_pg[0] = 0x08;
+ mode_pg[1] = 0x12;
+ if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
+ mode_pg[2] |= 0x01;
+ if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
+ ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
+ mode_pg[2] |= 0x04;
+ if (ldev_info->cacheline_size) {
+ mode_pg[2] |= 0x08;
+ put_unaligned_be16(1 << ldev_info->cacheline_size,
+ &mode_pg[14]);
+ }
+
+ scsi_sg_copy_from_buffer(scmd, modes, mode_len);
+}
+
+static int myrs_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct myrs_hba *cs = shost_priv(shost);
+ struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct scsi_device *sdev = scmd->device;
+ union myrs_sgl *hw_sge;
+ dma_addr_t sense_addr;
+ struct scatterlist *sgl;
+ unsigned long flags, timeout;
+ int nsge;
+
+ if (!scmd->device->hostdata) {
+ scmd->result = (DID_NO_CONNECT << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ switch (scmd->cmnd[0]) {
+ case REPORT_LUNS:
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x20, 0x0);
+ scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ case MODE_SENSE:
+ if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
+ (scmd->cmnd[2] & 0x3F) != 0x08) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ myrs_mode_sense(cs, scmd, ldev_info);
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ break;
+ }
+
+ myrs_reset_cmd(cmd_blk);
+ cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
+ &sense_addr);
+ if (!cmd_blk->sense)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ cmd_blk->sense_addr = sense_addr;
+
+ timeout = scmd->request->timeout;
+ if (scmd->cmd_len <= 10) {
+ if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
+ mbox->SCSI_10.pdev.lun = ldev_info->lun;
+ mbox->SCSI_10.pdev.target = ldev_info->target;
+ mbox->SCSI_10.pdev.channel = ldev_info->channel;
+ mbox->SCSI_10.pdev.ctlr = 0;
+ } else {
+ mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
+ mbox->SCSI_10.pdev.lun = sdev->lun;
+ mbox->SCSI_10.pdev.target = sdev->id;
+ mbox->SCSI_10.pdev.channel = sdev->channel;
+ }
+ mbox->SCSI_10.id = scmd->request->tag + 3;
+ mbox->SCSI_10.control.dma_ctrl_to_host =
+ (scmd->sc_data_direction == DMA_FROM_DEVICE);
+ if (scmd->request->cmd_flags & REQ_FUA)
+ mbox->SCSI_10.control.fua = true;
+ mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
+ mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
+ mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
+ mbox->SCSI_10.cdb_len = scmd->cmd_len;
+ if (timeout > 60) {
+ mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
+ mbox->SCSI_10.tmo.tmo_val = timeout / 60;
+ } else {
+ mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
+ mbox->SCSI_10.tmo.tmo_val = timeout;
+ }
+ memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
+ hw_sge = &mbox->SCSI_10.dma_addr;
+ cmd_blk->dcdb = NULL;
+ } else {
+ dma_addr_t dcdb_dma;
+
+ cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
+ &dcdb_dma);
+ if (!cmd_blk->dcdb) {
+ dma_pool_free(cs->sense_pool, cmd_blk->sense,
+ cmd_blk->sense_addr);
+ cmd_blk->sense = NULL;
+ cmd_blk->sense_addr = 0;
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ cmd_blk->dcdb_dma = dcdb_dma;
+ if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
+ mbox->SCSI_255.pdev.lun = ldev_info->lun;
+ mbox->SCSI_255.pdev.target = ldev_info->target;
+ mbox->SCSI_255.pdev.channel = ldev_info->channel;
+ mbox->SCSI_255.pdev.ctlr = 0;
+ } else {
+ mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
+ mbox->SCSI_255.pdev.lun = sdev->lun;
+ mbox->SCSI_255.pdev.target = sdev->id;
+ mbox->SCSI_255.pdev.channel = sdev->channel;
+ }
+ mbox->SCSI_255.id = scmd->request->tag + 3;
+ mbox->SCSI_255.control.dma_ctrl_to_host =
+ (scmd->sc_data_direction == DMA_FROM_DEVICE);
+ if (scmd->request->cmd_flags & REQ_FUA)
+ mbox->SCSI_255.control.fua = true;
+ mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
+ mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
+ mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
+ mbox->SCSI_255.cdb_len = scmd->cmd_len;
+ mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
+ if (timeout > 60) {
+ mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
+ mbox->SCSI_255.tmo.tmo_val = timeout / 60;
+ } else {
+ mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
+ mbox->SCSI_255.tmo.tmo_val = timeout;
+ }
+ memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
+ hw_sge = &mbox->SCSI_255.dma_addr;
+ }
+ if (scmd->sc_data_direction == DMA_NONE)
+ goto submit;
+ nsge = scsi_dma_map(scmd);
+ if (nsge == 1) {
+ sgl = scsi_sglist(scmd);
+ hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
+ hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
+ } else {
+ struct myrs_sge *hw_sgl;
+ dma_addr_t hw_sgl_addr;
+ int i;
+
+ if (nsge > 2) {
+ hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
+ &hw_sgl_addr);
+ if (WARN_ON(!hw_sgl)) {
+ if (cmd_blk->dcdb) {
+ dma_pool_free(cs->dcdb_pool,
+ cmd_blk->dcdb,
+ cmd_blk->dcdb_dma);
+ cmd_blk->dcdb = NULL;
+ cmd_blk->dcdb_dma = 0;
+ }
+ dma_pool_free(cs->sense_pool,
+ cmd_blk->sense,
+ cmd_blk->sense_addr);
+ cmd_blk->sense = NULL;
+ cmd_blk->sense_addr = 0;
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ cmd_blk->sgl = hw_sgl;
+ cmd_blk->sgl_addr = hw_sgl_addr;
+ if (scmd->cmd_len <= 10)
+ mbox->SCSI_10.control.add_sge_mem = true;
+ else
+ mbox->SCSI_255.control.add_sge_mem = true;
+ hw_sge->ext.sge0_len = nsge;
+ hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
+ } else
+ hw_sgl = hw_sge->sge;
+
+ scsi_for_each_sg(scmd, sgl, nsge, i) {
+ if (WARN_ON(!hw_sgl)) {
+ scsi_dma_unmap(scmd);
+ scmd->result = (DID_ERROR << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
+ hw_sgl->sge_count = (u64)sg_dma_len(sgl);
+ hw_sgl++;
+ }
+ }
+submit:
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ myrs_qcmd(cs, cmd_blk);
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+
+ return 0;
+}
+
+static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
+ struct scsi_device *sdev)
+{
+ unsigned short ldev_num;
+ unsigned int chan_offset =
+ sdev->channel - cs->ctlr_info->physchan_present;
+
+ ldev_num = sdev->id + chan_offset * sdev->host->max_id;
+
+ return ldev_num;
+}
+
+static int myrs_slave_alloc(struct scsi_device *sdev)
+{
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ unsigned char status;
+
+ if (sdev->channel > sdev->host->max_channel)
+ return 0;
+
+ if (sdev->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info;
+ unsigned short ldev_num;
+
+ if (sdev->lun > 0)
+ return -ENXIO;
+
+ ldev_num = myrs_translate_ldev(cs, sdev);
+
+ ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
+ if (!ldev_info)
+ return -ENOMEM;
+
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev->hostdata = NULL;
+ kfree(ldev_info);
+ } else {
+ enum raid_level level;
+
+ dev_dbg(&sdev->sdev_gendev,
+ "Logical device mapping %d:%d:%d -> %d\n",
+ ldev_info->channel, ldev_info->target,
+ ldev_info->lun, ldev_info->ldev_num);
+
+ sdev->hostdata = ldev_info;
+ switch (ldev_info->raid_level) {
+ case MYRS_RAID_LEVEL0:
+ level = RAID_LEVEL_LINEAR;
+ break;
+ case MYRS_RAID_LEVEL1:
+ level = RAID_LEVEL_1;
+ break;
+ case MYRS_RAID_LEVEL3:
+ case MYRS_RAID_LEVEL3F:
+ case MYRS_RAID_LEVEL3L:
+ level = RAID_LEVEL_3;
+ break;
+ case MYRS_RAID_LEVEL5:
+ case MYRS_RAID_LEVEL5L:
+ level = RAID_LEVEL_5;
+ break;
+ case MYRS_RAID_LEVEL6:
+ level = RAID_LEVEL_6;
+ break;
+ case MYRS_RAID_LEVELE:
+ case MYRS_RAID_NEWSPAN:
+ case MYRS_RAID_SPAN:
+ level = RAID_LEVEL_LINEAR;
+ break;
+ case MYRS_RAID_JBOD:
+ level = RAID_LEVEL_JBOD;
+ break;
+ default:
+ level = RAID_LEVEL_UNKNOWN;
+ break;
+ }
+ raid_set_level(myrs_raid_template,
+ &sdev->sdev_gendev, level);
+ if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
+ const char *name;
+
+ name = myrs_devstate_name(ldev_info->dev_state);
+ sdev_printk(KERN_DEBUG, sdev,
+ "logical device in state %s\n",
+ name ? name : "Invalid");
+ }
+ }
+ } else {
+ struct myrs_pdev_info *pdev_info;
+
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ if (!pdev_info)
+ return -ENOMEM;
+
+ status = myrs_get_pdev_info(cs, sdev->channel,
+ sdev->id, sdev->lun,
+ pdev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev->hostdata = NULL;
+ kfree(pdev_info);
+ return -ENXIO;
+ }
+ sdev->hostdata = pdev_info;
+ }
+ return 0;
+}
+
+static int myrs_slave_configure(struct scsi_device *sdev)
+{
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+
+ if (sdev->channel > sdev->host->max_channel)
+ return -ENXIO;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present) {
+ /* Skip HBA device */
+ if (sdev->type == TYPE_RAID)
+ return -ENXIO;
+ sdev->no_uld_attach = 1;
+ return 0;
+ }
+ if (sdev->lun != 0)
+ return -ENXIO;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
+ ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
+ sdev->wce_default_on = 1;
+ sdev->tagged_supported = 1;
+ return 0;
+}
+
+static void myrs_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+}
+
+struct scsi_host_template myrs_template = {
+ .module = THIS_MODULE,
+ .name = "DAC960",
+ .proc_name = "myrs",
+ .queuecommand = myrs_queuecommand,
+ .eh_host_reset_handler = myrs_host_reset,
+ .slave_alloc = myrs_slave_alloc,
+ .slave_configure = myrs_slave_configure,
+ .slave_destroy = myrs_slave_destroy,
+ .cmd_size = sizeof(struct myrs_cmdblk),
+ .shost_attrs = myrs_shost_attrs,
+ .sdev_attrs = myrs_sdev_attrs,
+ .this_id = -1,
+};
+
+static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
+ const struct pci_device_id *entry)
+{
+ struct Scsi_Host *shost;
+ struct myrs_hba *cs;
+
+ shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
+ if (!shost)
+ return NULL;
+
+ shost->max_cmd_len = 16;
+ shost->max_lun = 256;
+ cs = shost_priv(shost);
+ mutex_init(&cs->dcmd_mutex);
+ mutex_init(&cs->cinfo_mutex);
+ cs->host = shost;
+
+ return cs;
+}
+
+/*
+ * RAID template functions
+ */
+
+/**
+ * myrs_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+myrs_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+
+ return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
+}
+
+/**
+ * myrs_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+myrs_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+ u64 percent_complete = 0;
+ u8 status;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
+ return;
+ if (ldev_info->rbld_active) {
+ unsigned short ldev_num = ldev_info->ldev_num;
+
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ percent_complete = ldev_info->rbld_lba * 100;
+ do_div(percent_complete, ldev_info->cfg_devsize);
+ }
+ raid_set_resync(myrs_raid_template, dev, percent_complete);
+}
+
+/**
+ * myrs_get_state - get raid volume status
+ * @dev the device struct object
+ */
+static void
+myrs_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
+ state = RAID_STATE_UNKNOWN;
+ else {
+ switch (ldev_info->dev_state) {
+ case MYRS_DEVICE_ONLINE:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case MYRS_DEVICE_SUSPECTED_CRITICAL:
+ case MYRS_DEVICE_CRITICAL:
+ state = RAID_STATE_DEGRADED;
+ break;
+ case MYRS_DEVICE_REBUILD:
+ state = RAID_STATE_RESYNCING;
+ break;
+ case MYRS_DEVICE_UNCONFIGURED:
+ case MYRS_DEVICE_INVALID_STATE:
+ state = RAID_STATE_UNKNOWN;
+ break;
+ default:
+ state = RAID_STATE_OFFLINE;
+ }
+ }
+ raid_set_state(myrs_raid_template, dev, state);
+}
+
+struct raid_function_template myrs_raid_functions = {
+ .cookie = &myrs_template,
+ .is_raid = myrs_is_raid,
+ .get_resync = myrs_get_resync,
+ .get_state = myrs_get_state,
+};
+
+/*
+ * PCI interface functions
+ */
+void myrs_flush_cache(struct myrs_hba *cs)
+{
+ myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
+}
+
+static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
+ struct scsi_cmnd *scmd)
+{
+ unsigned char status;
+
+ if (!cmd_blk)
+ return;
+
+ scsi_dma_unmap(scmd);
+ status = cmd_blk->status;
+ if (cmd_blk->sense) {
+ if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
+ unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ if (sense_len > cmd_blk->sense_len)
+ sense_len = cmd_blk->sense_len;
+ memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
+ }
+ dma_pool_free(cs->sense_pool, cmd_blk->sense,
+ cmd_blk->sense_addr);
+ cmd_blk->sense = NULL;
+ cmd_blk->sense_addr = 0;
+ }
+ if (cmd_blk->dcdb) {
+ dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
+ cmd_blk->dcdb_dma);
+ cmd_blk->dcdb = NULL;
+ cmd_blk->dcdb_dma = 0;
+ }
+ if (cmd_blk->sgl) {
+ dma_pool_free(cs->sg_pool, cmd_blk->sgl,
+ cmd_blk->sgl_addr);
+ cmd_blk->sgl = NULL;
+ cmd_blk->sgl_addr = 0;
+ }
+ if (cmd_blk->residual)
+ scsi_set_resid(scmd, cmd_blk->residual);
+ if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
+ status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
+ scmd->result = (DID_BAD_TARGET << 16);
+ else
+ scmd->result = (DID_OK << 16) | status;
+ scmd->scsi_done(scmd);
+}
+
+static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
+{
+ if (!cmd_blk)
+ return;
+
+ if (cmd_blk->complete) {
+ complete(cmd_blk->complete);
+ cmd_blk->complete = NULL;
+ }
+}
+
+static void myrs_monitor(struct work_struct *work)
+{
+ struct myrs_hba *cs = container_of(work, struct myrs_hba,
+ monitor_work.work);
+ struct Scsi_Host *shost = cs->host;
+ struct myrs_ctlr_info *info = cs->ctlr_info;
+ unsigned int epoch = cs->fwstat_buf->epoch;
+ unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
+ unsigned char status;
+
+ dev_dbg(&shost->shost_gendev, "monitor tick\n");
+
+ status = myrs_get_fwstatus(cs);
+
+ if (cs->needs_update) {
+ cs->needs_update = false;
+ mutex_lock(&cs->cinfo_mutex);
+ status = myrs_get_ctlr_info(cs);
+ mutex_unlock(&cs->cinfo_mutex);
+ }
+ if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
+ status = myrs_get_event(cs, cs->next_evseq,
+ cs->event_buf);
+ if (status == MYRS_STATUS_SUCCESS) {
+ myrs_log_event(cs, cs->event_buf);
+ cs->next_evseq++;
+ interval = 1;
+ }
+ }
+
+ if (time_after(jiffies, cs->secondary_monitor_time
+ + MYRS_SECONDARY_MONITOR_INTERVAL))
+ cs->secondary_monitor_time = jiffies;
+
+ if (info->bg_init_active +
+ info->ldev_init_active +
+ info->pdev_init_active +
+ info->cc_active +
+ info->rbld_active +
+ info->exp_active != 0) {
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, shost) {
+ struct myrs_ldev_info *ldev_info;
+ int ldev_num;
+
+ if (sdev->channel < info->physchan_present)
+ continue;
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ continue;
+ ldev_num = ldev_info->ldev_num;
+ myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ }
+ cs->needs_update = true;
+ }
+ if (epoch == cs->epoch &&
+ cs->fwstat_buf->next_evseq == cs->next_evseq &&
+ (cs->needs_update == false ||
+ time_before(jiffies, cs->primary_monitor_time
+ + MYRS_PRIMARY_MONITOR_INTERVAL))) {
+ interval = MYRS_SECONDARY_MONITOR_INTERVAL;
+ }
+
+ if (interval > 1)
+ cs->primary_monitor_time = jiffies;
+ queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
+}
+
+static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
+{
+ struct Scsi_Host *shost = cs->host;
+ size_t elem_size, elem_align;
+
+ elem_align = sizeof(struct myrs_sge);
+ elem_size = shost->sg_tablesize * elem_align;
+ cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
+ elem_size, elem_align, 0);
+ if (cs->sg_pool == NULL) {
+ shost_printk(KERN_ERR, shost,
+ "Failed to allocate SG pool\n");
+ return false;
+ }
+
+ cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
+ MYRS_SENSE_SIZE, sizeof(int), 0);
+ if (cs->sense_pool == NULL) {
+ dma_pool_destroy(cs->sg_pool);
+ cs->sg_pool = NULL;
+ shost_printk(KERN_ERR, shost,
+ "Failed to allocate sense data pool\n");
+ return false;
+ }
+
+ cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
+ MYRS_DCDB_SIZE,
+ sizeof(unsigned char), 0);
+ if (!cs->dcdb_pool) {
+ dma_pool_destroy(cs->sg_pool);
+ cs->sg_pool = NULL;
+ dma_pool_destroy(cs->sense_pool);
+ cs->sense_pool = NULL;
+ shost_printk(KERN_ERR, shost,
+ "Failed to allocate DCDB pool\n");
+ return false;
+ }
+
+ snprintf(cs->work_q_name, sizeof(cs->work_q_name),
+ "myrs_wq_%d", shost->host_no);
+ cs->work_q = create_singlethread_workqueue(cs->work_q_name);
+ if (!cs->work_q) {
+ dma_pool_destroy(cs->dcdb_pool);
+ cs->dcdb_pool = NULL;
+ dma_pool_destroy(cs->sg_pool);
+ cs->sg_pool = NULL;
+ dma_pool_destroy(cs->sense_pool);
+ cs->sense_pool = NULL;
+ shost_printk(KERN_ERR, shost,
+ "Failed to create workqueue\n");
+ return false;
+ }
+
+ /* Initialize the Monitoring Timer. */
+ INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
+ queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
+
+ return true;
+}
+
+static void myrs_destroy_mempools(struct myrs_hba *cs)
+{
+ cancel_delayed_work_sync(&cs->monitor_work);
+ destroy_workqueue(cs->work_q);
+
+ dma_pool_destroy(cs->sg_pool);
+ dma_pool_destroy(cs->dcdb_pool);
+ dma_pool_destroy(cs->sense_pool);
+}
+
+static void myrs_unmap(struct myrs_hba *cs)
+{
+ kfree(cs->event_buf);
+ kfree(cs->ctlr_info);
+ if (cs->fwstat_buf) {
+ dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
+ cs->fwstat_buf, cs->fwstat_addr);
+ cs->fwstat_buf = NULL;
+ }
+ if (cs->first_stat_mbox) {
+ dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
+ cs->first_stat_mbox, cs->stat_mbox_addr);
+ cs->first_stat_mbox = NULL;
+ }
+ if (cs->first_cmd_mbox) {
+ dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
+ cs->first_cmd_mbox, cs->cmd_mbox_addr);
+ cs->first_cmd_mbox = NULL;
+ }
+}
+
+static void myrs_cleanup(struct myrs_hba *cs)
+{
+ struct pci_dev *pdev = cs->pdev;
+
+ /* Free the memory mailbox, status, and related structures */
+ myrs_unmap(cs);
+
+ if (cs->mmio_base) {
+ cs->disable_intr(cs);
+ iounmap(cs->mmio_base);
+ }
+ if (cs->irq)
+ free_irq(cs->irq, cs);
+ if (cs->io_addr)
+ release_region(cs->io_addr, 0x80);
+ iounmap(cs->mmio_base);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ scsi_host_put(cs->host);
+}
+
+static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
+ const struct pci_device_id *entry)
+{
+ struct myrs_privdata *privdata =
+ (struct myrs_privdata *)entry->driver_data;
+ irq_handler_t irq_handler = privdata->irq_handler;
+ unsigned int mmio_size = privdata->mmio_size;
+ struct myrs_hba *cs = NULL;
+
+ cs = myrs_alloc_host(pdev, entry);
+ if (!cs) {
+ dev_err(&pdev->dev, "Unable to allocate Controller\n");
+ return NULL;
+ }
+ cs->pdev = pdev;
+
+ if (pci_enable_device(pdev))
+ goto Failure;
+
+ cs->pci_addr = pci_resource_start(pdev, 0);
+
+ pci_set_drvdata(pdev, cs);
+ spin_lock_init(&cs->queue_lock);
+ /* Map the Controller Register Window. */
+ if (mmio_size < PAGE_SIZE)
+ mmio_size = PAGE_SIZE;
+ cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size);
+ if (cs->mmio_base == NULL) {
+ dev_err(&pdev->dev,
+ "Unable to map Controller Register Window\n");
+ goto Failure;
+ }
+
+ cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
+ if (privdata->hw_init(pdev, cs, cs->io_base))
+ goto Failure;
+
+ /* Acquire shared access to the IRQ Channel. */
+ if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
+ dev_err(&pdev->dev,
+ "Unable to acquire IRQ Channel %d\n", pdev->irq);
+ goto Failure;
+ }
+ cs->irq = pdev->irq;
+ return cs;
+
+Failure:
+ dev_err(&pdev->dev,
+ "Failed to initialize Controller\n");
+ myrs_cleanup(cs);
+ return NULL;
+}
+
+/**
+ * myrs_err_status reports Controller BIOS Messages passed through
+ the Error Status Register when the driver performs the BIOS handshaking.
+ It returns true for fatal errors and false otherwise.
+*/
+
+static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
+ unsigned char parm0, unsigned char parm1)
+{
+ struct pci_dev *pdev = cs->pdev;
+
+ switch (status) {
+ case 0x00:
+ dev_info(&pdev->dev,
+ "Physical Device %d:%d Not Responding\n",
+ parm1, parm0);
+ break;
+ case 0x08:
+ dev_notice(&pdev->dev, "Spinning Up Drives\n");
+ break;
+ case 0x30:
+ dev_notice(&pdev->dev, "Configuration Checksum Error\n");
+ break;
+ case 0x60:
+ dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
+ break;
+ case 0x70:
+ dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
+ break;
+ case 0x90:
+ dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
+ parm1, parm0);
+ break;
+ case 0xA0:
+ dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
+ break;
+ case 0xB0:
+ dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
+ break;
+ case 0xD0:
+ dev_notice(&pdev->dev, "New Controller Configuration Found\n");
+ break;
+ case 0xF0:
+ dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
+ return true;
+ default:
+ dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
+ status);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Hardware-specific functions
+ */
+
+/*
+ * DAC960 GEM Series Controllers.
+ */
+
+static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
+
+ writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
+
+ writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_gen_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_GEN_IRQ << 24);
+
+ writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
+
+ writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
+
+ writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
+ return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
+ return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
+
+ writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_mem_mbox_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_ODB_MMBOX_ACK_IRQ << 24);
+
+ writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
+ DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
+
+ writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
+ return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_GEM_mem_mbox_status_available(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
+ return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_GEM_enable_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
+ DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
+ writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_disable_intr(void __iomem *base)
+{
+ __le32 val = 0;
+
+ writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
+}
+
+static inline bool DAC960_GEM_intr_enabled(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_IRQMASK_READ_OFFSET);
+ return !((le32_to_cpu(val) >> 24) &
+ (DAC960_GEM_IRQMASK_HWMBOX_IRQ |
+ DAC960_GEM_IRQMASK_MMBOX_IRQ));
+}
+
+static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+ union myrs_cmd_mbox *mbox)
+{
+ memcpy(&mem_mbox->words[1], &mbox->words[1],
+ sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+ /* Barrier to avoid reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Barrier to force PCI access */
+ mb();
+}
+
+static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
+ dma_addr_t cmd_mbox_addr)
+{
+ dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_GEM_read_cmd_ident(void __iomem *base)
+{
+ return readw(base + DAC960_GEM_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
+{
+ return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
+ if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
+ return false;
+ *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
+ *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
+ *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
+ writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
+ return true;
+}
+
+static inline unsigned char
+DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+ unsigned char status;
+
+ while (DAC960_GEM_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_GEM_write_hw_mbox(base, mbox_addr);
+ DAC960_GEM_hw_mbox_new_cmd(base);
+ while (!DAC960_GEM_hw_mbox_status_available(base))
+ udelay(1);
+ status = DAC960_GEM_read_cmd_status(base);
+ DAC960_GEM_ack_hw_mbox_intr(base);
+ DAC960_GEM_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_GEM_hw_init(struct pci_dev *pdev,
+ struct myrs_hba *cs, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char status, parm0, parm1;
+
+ DAC960_GEM_disable_intr(base);
+ DAC960_GEM_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_GEM_init_in_progress(base) &&
+ timeout < MYRS_MAILBOX_TIMEOUT) {
+ if (DAC960_GEM_read_error_status(base, &status,
+ &parm0, &parm1) &&
+ myrs_err_status(cs, status, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRS_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_GEM_reset_ctrl(base);
+ return -EAGAIN;
+ }
+ DAC960_GEM_enable_intr(base);
+ cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
+ cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
+ cs->disable_intr = DAC960_GEM_disable_intr;
+ cs->reset = DAC960_GEM_reset_ctrl;
+ return 0;
+}
+
+static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
+{
+ struct myrs_hba *cs = arg;
+ void __iomem *base = cs->io_base;
+ struct myrs_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ DAC960_GEM_ack_intr(base);
+ next_stat_mbox = cs->next_stat_mbox;
+ while (next_stat_mbox->id > 0) {
+ unsigned short id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrs_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRS_DCMD_TAG)
+ cmd_blk = &cs->dcmd_blk;
+ else if (id == MYRS_MCMD_TAG)
+ cmd_blk = &cs->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cs->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk) {
+ cmd_blk->status = next_stat_mbox->status;
+ cmd_blk->sense_len = next_stat_mbox->sense_len;
+ cmd_blk->residual = next_stat_mbox->residual;
+ } else
+ dev_err(&cs->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+ if (++next_stat_mbox > cs->last_stat_mbox)
+ next_stat_mbox = cs->first_stat_mbox;
+
+ if (cmd_blk) {
+ if (id < 3)
+ myrs_handle_cmdblk(cs, cmd_blk);
+ else
+ myrs_handle_scsi(cs, cmd_blk, scmd);
+ }
+ }
+ cs->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_GEM_privdata = {
+ .hw_init = DAC960_GEM_hw_init,
+ .irq_handler = DAC960_GEM_intr_handler,
+ .mmio_size = DAC960_GEM_mmio_size,
+};
+
+/*
+ * DAC960 BA Series Controllers.
+ */
+
+static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_gen_intr(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_GEN_IRQ, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_reset_ctrl(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_IDB_OFFSET);
+ return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
+}
+
+static inline bool DAC960_BA_init_in_progress(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_IDB_OFFSET);
+ return !(val & DAC960_BA_IDB_INIT_DONE);
+}
+
+static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_mem_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_BA_ODB_MMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_intr(void __iomem *base)
+{
+ writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
+ base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_ODB_OFFSET);
+ return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_BA_mem_mbox_status_available(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_ODB_OFFSET);
+ return val & DAC960_BA_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_BA_enable_intr(void __iomem *base)
+{
+ writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_BA_disable_intr(void __iomem *base)
+{
+ writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_BA_intr_enabled(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_IRQMASK_OFFSET);
+ return !(val & DAC960_BA_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+ union myrs_cmd_mbox *mbox)
+{
+ memcpy(&mem_mbox->words[1], &mbox->words[1],
+ sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+ /* Barrier to avoid reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Barrier to force PCI access */
+ mb();
+}
+
+
+static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
+ dma_addr_t cmd_mbox_addr)
+{
+ dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_BA_read_cmd_ident(void __iomem *base)
+{
+ return readw(base + DAC960_BA_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
+{
+ return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_ERRSTS_OFFSET);
+ if (!(val & DAC960_BA_ERRSTS_PENDING))
+ return false;
+ val &= ~DAC960_BA_ERRSTS_PENDING;
+ *error = val;
+ *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
+ *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
+ writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
+ return true;
+}
+
+static inline unsigned char
+DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+ unsigned char status;
+
+ while (DAC960_BA_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_BA_write_hw_mbox(base, mbox_addr);
+ DAC960_BA_hw_mbox_new_cmd(base);
+ while (!DAC960_BA_hw_mbox_status_available(base))
+ udelay(1);
+ status = DAC960_BA_read_cmd_status(base);
+ DAC960_BA_ack_hw_mbox_intr(base);
+ DAC960_BA_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_BA_hw_init(struct pci_dev *pdev,
+ struct myrs_hba *cs, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char status, parm0, parm1;
+
+ DAC960_BA_disable_intr(base);
+ DAC960_BA_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_BA_init_in_progress(base) &&
+ timeout < MYRS_MAILBOX_TIMEOUT) {
+ if (DAC960_BA_read_error_status(base, &status,
+ &parm0, &parm1) &&
+ myrs_err_status(cs, status, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRS_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_BA_reset_ctrl(base);
+ return -EAGAIN;
+ }
+ DAC960_BA_enable_intr(base);
+ cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
+ cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
+ cs->disable_intr = DAC960_BA_disable_intr;
+ cs->reset = DAC960_BA_reset_ctrl;
+ return 0;
+}
+
+static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
+{
+ struct myrs_hba *cs = arg;
+ void __iomem *base = cs->io_base;
+ struct myrs_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ DAC960_BA_ack_intr(base);
+ next_stat_mbox = cs->next_stat_mbox;
+ while (next_stat_mbox->id > 0) {
+ unsigned short id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrs_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRS_DCMD_TAG)
+ cmd_blk = &cs->dcmd_blk;
+ else if (id == MYRS_MCMD_TAG)
+ cmd_blk = &cs->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cs->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk) {
+ cmd_blk->status = next_stat_mbox->status;
+ cmd_blk->sense_len = next_stat_mbox->sense_len;
+ cmd_blk->residual = next_stat_mbox->residual;
+ } else
+ dev_err(&cs->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+ if (++next_stat_mbox > cs->last_stat_mbox)
+ next_stat_mbox = cs->first_stat_mbox;
+
+ if (cmd_blk) {
+ if (id < 3)
+ myrs_handle_cmdblk(cs, cmd_blk);
+ else
+ myrs_handle_scsi(cs, cmd_blk, scmd);
+ }
+ }
+ cs->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_BA_privdata = {
+ .hw_init = DAC960_BA_hw_init,
+ .irq_handler = DAC960_BA_intr_handler,
+ .mmio_size = DAC960_BA_mmio_size,
+};
+
+/*
+ * DAC960 LP Series Controllers.
+ */
+
+static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_gen_intr(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_GEN_IRQ, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_reset_ctrl(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_IDB_OFFSET);
+ return val & DAC960_LP_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_LP_init_in_progress(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_IDB_OFFSET);
+ return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_mem_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_LP_ODB_MMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_intr(void __iomem *base)
+{
+ writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
+ base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_ODB_OFFSET);
+ return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_LP_mem_mbox_status_available(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_ODB_OFFSET);
+ return val & DAC960_LP_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_LP_enable_intr(void __iomem *base)
+{
+ writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_LP_disable_intr(void __iomem *base)
+{
+ writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_LP_intr_enabled(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_IRQMASK_OFFSET);
+ return !(val & DAC960_LP_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+ union myrs_cmd_mbox *mbox)
+{
+ memcpy(&mem_mbox->words[1], &mbox->words[1],
+ sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+ /* Barrier to avoid reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Barrier to force PCI access */
+ mb();
+}
+
+static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
+ dma_addr_t cmd_mbox_addr)
+{
+ dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_LP_read_cmd_ident(void __iomem *base)
+{
+ return readw(base + DAC960_LP_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
+{
+ return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_ERRSTS_OFFSET);
+ if (!(val & DAC960_LP_ERRSTS_PENDING))
+ return false;
+ val &= ~DAC960_LP_ERRSTS_PENDING;
+ *error = val;
+ *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
+ *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
+ writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
+ return true;
+}
+
+static inline unsigned char
+DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+ unsigned char status;
+
+ while (DAC960_LP_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_LP_write_hw_mbox(base, mbox_addr);
+ DAC960_LP_hw_mbox_new_cmd(base);
+ while (!DAC960_LP_hw_mbox_status_available(base))
+ udelay(1);
+ status = DAC960_LP_read_cmd_status(base);
+ DAC960_LP_ack_hw_mbox_intr(base);
+ DAC960_LP_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_LP_hw_init(struct pci_dev *pdev,
+ struct myrs_hba *cs, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char status, parm0, parm1;
+
+ DAC960_LP_disable_intr(base);
+ DAC960_LP_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_LP_init_in_progress(base) &&
+ timeout < MYRS_MAILBOX_TIMEOUT) {
+ if (DAC960_LP_read_error_status(base, &status,
+ &parm0, &parm1) &&
+ myrs_err_status(cs, status, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRS_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_LP_reset_ctrl(base);
+ return -ENODEV;
+ }
+ DAC960_LP_enable_intr(base);
+ cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
+ cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
+ cs->disable_intr = DAC960_LP_disable_intr;
+ cs->reset = DAC960_LP_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
+{
+ struct myrs_hba *cs = arg;
+ void __iomem *base = cs->io_base;
+ struct myrs_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ DAC960_LP_ack_intr(base);
+ next_stat_mbox = cs->next_stat_mbox;
+ while (next_stat_mbox->id > 0) {
+ unsigned short id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrs_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRS_DCMD_TAG)
+ cmd_blk = &cs->dcmd_blk;
+ else if (id == MYRS_MCMD_TAG)
+ cmd_blk = &cs->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cs->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk) {
+ cmd_blk->status = next_stat_mbox->status;
+ cmd_blk->sense_len = next_stat_mbox->sense_len;
+ cmd_blk->residual = next_stat_mbox->residual;
+ } else
+ dev_err(&cs->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+ if (++next_stat_mbox > cs->last_stat_mbox)
+ next_stat_mbox = cs->first_stat_mbox;
+
+ if (cmd_blk) {
+ if (id < 3)
+ myrs_handle_cmdblk(cs, cmd_blk);
+ else
+ myrs_handle_scsi(cs, cmd_blk, scmd);
+ }
+ }
+ cs->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_LP_privdata = {
+ .hw_init = DAC960_LP_hw_init,
+ .irq_handler = DAC960_LP_intr_handler,
+ .mmio_size = DAC960_LP_mmio_size,
+};
+
+/*
+ * Module functions
+ */
+static int
+myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
+{
+ struct myrs_hba *cs;
+ int ret;
+
+ cs = myrs_detect(dev, entry);
+ if (!cs)
+ return -ENODEV;
+
+ ret = myrs_get_config(cs);
+ if (ret < 0) {
+ myrs_cleanup(cs);
+ return ret;
+ }
+
+ if (!myrs_create_mempools(dev, cs)) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ ret = scsi_add_host(cs->host, &dev->dev);
+ if (ret) {
+ dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
+ myrs_destroy_mempools(cs);
+ goto failed;
+ }
+ scsi_scan_host(cs->host);
+ return 0;
+failed:
+ myrs_cleanup(cs);
+ return ret;
+}
+
+
+static void myrs_remove(struct pci_dev *pdev)
+{
+ struct myrs_hba *cs = pci_get_drvdata(pdev);
+
+ if (cs == NULL)
+ return;
+
+ shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
+ myrs_flush_cache(cs);
+ myrs_destroy_mempools(cs);
+ myrs_cleanup(cs);
+}
+
+
+static const struct pci_device_id myrs_id_table[] = {
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
+ PCI_DEVICE_ID_MYLEX_DAC960_GEM,
+ PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
+ .driver_data = (unsigned long) &DAC960_GEM_privdata,
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
+ },
+ {0, },
+};
+
+MODULE_DEVICE_TABLE(pci, myrs_id_table);
+
+static struct pci_driver myrs_pci_driver = {
+ .name = "myrs",
+ .id_table = myrs_id_table,
+ .probe = myrs_probe,
+ .remove = myrs_remove,
+};
+
+static int __init myrs_init_module(void)
+{
+ int ret;
+
+ myrs_raid_template = raid_class_attach(&myrs_raid_functions);
+ if (!myrs_raid_template)
+ return -ENODEV;
+
+ ret = pci_register_driver(&myrs_pci_driver);
+ if (ret)
+ raid_class_release(myrs_raid_template);
+
+ return ret;
+}
+
+static void __exit myrs_cleanup_module(void)
+{
+ pci_unregister_driver(&myrs_pci_driver);
+ raid_class_release(myrs_raid_template);
+}
+
+module_init(myrs_init_module);
+module_exit(myrs_cleanup_module);
+
+MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h
new file mode 100644
index 000000000000..e6702ee85e9f
--- /dev/null
+++ b/drivers/scsi/myrs.h
@@ -0,0 +1,1134 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * This driver supports the newer, SCSI-based firmware interface only.
+ *
+ * Copyright 2018 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver, which has
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ */
+
+#ifndef _MYRS_H
+#define _MYRS_H
+
+#define MYRS_MAILBOX_TIMEOUT 1000000
+
+#define MYRS_DCMD_TAG 1
+#define MYRS_MCMD_TAG 2
+
+#define MYRS_LINE_BUFFER_SIZE 128
+
+#define MYRS_PRIMARY_MONITOR_INTERVAL (10 * HZ)
+#define MYRS_SECONDARY_MONITOR_INTERVAL (60 * HZ)
+
+/* Maximum number of Scatter/Gather Segments supported */
+#define MYRS_SG_LIMIT 128
+
+/*
+ * Number of Command and Status Mailboxes used by the
+ * DAC960 V2 Firmware Memory Mailbox Interface.
+ */
+#define MYRS_MAX_CMD_MBOX 512
+#define MYRS_MAX_STAT_MBOX 512
+
+#define MYRS_DCDB_SIZE 16
+#define MYRS_SENSE_SIZE 14
+
+/*
+ * DAC960 V2 Firmware Command Opcodes.
+ */
+enum myrs_cmd_opcode {
+ MYRS_CMD_OP_MEMCOPY = 0x01,
+ MYRS_CMD_OP_SCSI_10_PASSTHRU = 0x02,
+ MYRS_CMD_OP_SCSI_255_PASSTHRU = 0x03,
+ MYRS_CMD_OP_SCSI_10 = 0x04,
+ MYRS_CMD_OP_SCSI_256 = 0x05,
+ MYRS_CMD_OP_IOCTL = 0x20,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware IOCTL Opcodes.
+ */
+enum myrs_ioctl_opcode {
+ MYRS_IOCTL_GET_CTLR_INFO = 0x01,
+ MYRS_IOCTL_GET_LDEV_INFO_VALID = 0x03,
+ MYRS_IOCTL_GET_PDEV_INFO_VALID = 0x05,
+ MYRS_IOCTL_GET_HEALTH_STATUS = 0x11,
+ MYRS_IOCTL_GET_EVENT = 0x15,
+ MYRS_IOCTL_START_DISCOVERY = 0x81,
+ MYRS_IOCTL_SET_DEVICE_STATE = 0x82,
+ MYRS_IOCTL_INIT_PDEV_START = 0x84,
+ MYRS_IOCTL_INIT_PDEV_STOP = 0x85,
+ MYRS_IOCTL_INIT_LDEV_START = 0x86,
+ MYRS_IOCTL_INIT_LDEV_STOP = 0x87,
+ MYRS_IOCTL_RBLD_DEVICE_START = 0x88,
+ MYRS_IOCTL_RBLD_DEVICE_STOP = 0x89,
+ MYRS_IOCTL_MAKE_CONSISTENT_START = 0x8A,
+ MYRS_IOCTL_MAKE_CONSISTENT_STOP = 0x8B,
+ MYRS_IOCTL_CC_START = 0x8C,
+ MYRS_IOCTL_CC_STOP = 0x8D,
+ MYRS_IOCTL_SET_MEM_MBOX = 0x8E,
+ MYRS_IOCTL_RESET_DEVICE = 0x90,
+ MYRS_IOCTL_FLUSH_DEVICE_DATA = 0x91,
+ MYRS_IOCTL_PAUSE_DEVICE = 0x92,
+ MYRS_IOCTL_UNPAUS_EDEVICE = 0x93,
+ MYRS_IOCTL_LOCATE_DEVICE = 0x94,
+ MYRS_IOCTL_CREATE_CONFIGURATION = 0xC0,
+ MYRS_IOCTL_DELETE_LDEV = 0xC1,
+ MYRS_IOCTL_REPLACE_INTERNALDEVICE = 0xC2,
+ MYRS_IOCTL_RENAME_LDEV = 0xC3,
+ MYRS_IOCTL_ADD_CONFIGURATION = 0xC4,
+ MYRS_IOCTL_XLATE_PDEV_TO_LDEV = 0xC5,
+ MYRS_IOCTL_CLEAR_CONFIGURATION = 0xCA,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Command Status Codes.
+ */
+#define MYRS_STATUS_SUCCESS 0x00
+#define MYRS_STATUS_FAILED 0x02
+#define MYRS_STATUS_DEVICE_BUSY 0x08
+#define MYRS_STATUS_DEVICE_NON_RESPONSIVE 0x0E
+#define MYRS_STATUS_DEVICE_NON_RESPONSIVE2 0x0F
+#define MYRS_STATUS_RESERVATION_CONFLICT 0x18
+
+/*
+ * DAC960 V2 Firmware Memory Type structure.
+ */
+struct myrs_mem_type {
+ enum {
+ MYRS_MEMTYPE_RESERVED = 0x00,
+ MYRS_MEMTYPE_DRAM = 0x01,
+ MYRS_MEMTYPE_EDRAM = 0x02,
+ MYRS_MEMTYPE_EDO = 0x03,
+ MYRS_MEMTYPE_SDRAM = 0x04,
+ MYRS_MEMTYPE_LAST = 0x1F,
+ } __packed mem_type:5; /* Byte 0 Bits 0-4 */
+ unsigned rsvd:1; /* Byte 0 Bit 5 */
+ unsigned mem_parity:1; /* Byte 0 Bit 6 */
+ unsigned mem_ecc:1; /* Byte 0 Bit 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Processor Type structure.
+ */
+enum myrs_cpu_type {
+ MYRS_CPUTYPE_i960CA = 0x01,
+ MYRS_CPUTYPE_i960RD = 0x02,
+ MYRS_CPUTYPE_i960RN = 0x03,
+ MYRS_CPUTYPE_i960RP = 0x04,
+ MYRS_CPUTYPE_NorthBay = 0x05,
+ MYRS_CPUTYPE_StrongArm = 0x06,
+ MYRS_CPUTYPE_i960RM = 0x07,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Get Controller Info reply structure.
+ */
+struct myrs_ctlr_info {
+ unsigned char rsvd1; /* Byte 0 */
+ enum {
+ MYRS_SCSI_BUS = 0x00,
+ MYRS_Fibre_BUS = 0x01,
+ MYRS_PCI_BUS = 0x03
+ } __packed bus; /* Byte 1 */
+ enum {
+ MYRS_CTLR_DAC960E = 0x01,
+ MYRS_CTLR_DAC960M = 0x08,
+ MYRS_CTLR_DAC960PD = 0x10,
+ MYRS_CTLR_DAC960PL = 0x11,
+ MYRS_CTLR_DAC960PU = 0x12,
+ MYRS_CTLR_DAC960PE = 0x13,
+ MYRS_CTLR_DAC960PG = 0x14,
+ MYRS_CTLR_DAC960PJ = 0x15,
+ MYRS_CTLR_DAC960PTL0 = 0x16,
+ MYRS_CTLR_DAC960PR = 0x17,
+ MYRS_CTLR_DAC960PRL = 0x18,
+ MYRS_CTLR_DAC960PT = 0x19,
+ MYRS_CTLR_DAC1164P = 0x1A,
+ MYRS_CTLR_DAC960PTL1 = 0x1B,
+ MYRS_CTLR_EXR2000P = 0x1C,
+ MYRS_CTLR_EXR3000P = 0x1D,
+ MYRS_CTLR_ACCELERAID352 = 0x1E,
+ MYRS_CTLR_ACCELERAID170 = 0x1F,
+ MYRS_CTLR_ACCELERAID160 = 0x20,
+ MYRS_CTLR_DAC960S = 0x60,
+ MYRS_CTLR_DAC960SU = 0x61,
+ MYRS_CTLR_DAC960SX = 0x62,
+ MYRS_CTLR_DAC960SF = 0x63,
+ MYRS_CTLR_DAC960SS = 0x64,
+ MYRS_CTLR_DAC960FL = 0x65,
+ MYRS_CTLR_DAC960LL = 0x66,
+ MYRS_CTLR_DAC960FF = 0x67,
+ MYRS_CTLR_DAC960HP = 0x68,
+ MYRS_CTLR_RAIDBRICK = 0x69,
+ MYRS_CTLR_METEOR_FL = 0x6A,
+ MYRS_CTLR_METEOR_FF = 0x6B
+ } __packed ctlr_type; /* Byte 2 */
+ unsigned char rsvd2; /* Byte 3 */
+ unsigned short bus_speed_mhz; /* Bytes 4-5 */
+ unsigned char bus_width; /* Byte 6 */
+ unsigned char flash_code; /* Byte 7 */
+ unsigned char ports_present; /* Byte 8 */
+ unsigned char rsvd3[7]; /* Bytes 9-15 */
+ unsigned char bus_name[16]; /* Bytes 16-31 */
+ unsigned char ctlr_name[16]; /* Bytes 32-47 */
+ unsigned char rsvd4[16]; /* Bytes 48-63 */
+ /* Firmware Release Information */
+ unsigned char fw_major_version; /* Byte 64 */
+ unsigned char fw_minor_version; /* Byte 65 */
+ unsigned char fw_turn_number; /* Byte 66 */
+ unsigned char fw_build_number; /* Byte 67 */
+ unsigned char fw_release_day; /* Byte 68 */
+ unsigned char fw_release_month; /* Byte 69 */
+ unsigned char fw_release_year_hi; /* Byte 70 */
+ unsigned char fw_release_year_lo; /* Byte 71 */
+ /* Hardware Release Information */
+ unsigned char hw_rev; /* Byte 72 */
+ unsigned char rsvd5[3]; /* Bytes 73-75 */
+ unsigned char hw_release_day; /* Byte 76 */
+ unsigned char hw_release_month; /* Byte 77 */
+ unsigned char hw_release_year_hi; /* Byte 78 */
+ unsigned char hw_release_year_lo; /* Byte 79 */
+ /* Hardware Manufacturing Information */
+ unsigned char manuf_batch_num; /* Byte 80 */
+ unsigned char rsvd6; /* Byte 81 */
+ unsigned char manuf_plant_num; /* Byte 82 */
+ unsigned char rsvd7; /* Byte 83 */
+ unsigned char hw_manuf_day; /* Byte 84 */
+ unsigned char hw_manuf_month; /* Byte 85 */
+ unsigned char hw_manuf_year_hi; /* Byte 86 */
+ unsigned char hw_manuf_year_lo; /* Byte 87 */
+ unsigned char max_pd_per_xld; /* Byte 88 */
+ unsigned char max_ild_per_xld; /* Byte 89 */
+ unsigned short nvram_size_kb; /* Bytes 90-91 */
+ unsigned char max_xld; /* Byte 92 */
+ unsigned char rsvd8[3]; /* Bytes 93-95 */
+ /* Unique Information per Controller */
+ unsigned char serial_number[16]; /* Bytes 96-111 */
+ unsigned char rsvd9[16]; /* Bytes 112-127 */
+ /* Vendor Information */
+ unsigned char rsvd10[3]; /* Bytes 128-130 */
+ unsigned char oem_code; /* Byte 131 */
+ unsigned char vendor[16]; /* Bytes 132-147 */
+ /* Other Physical/Controller/Operation Information */
+ unsigned char bbu_present:1; /* Byte 148 Bit 0 */
+ unsigned char cluster_mode:1; /* Byte 148 Bit 1 */
+ unsigned char rsvd11:6; /* Byte 148 Bits 2-7 */
+ unsigned char rsvd12[3]; /* Bytes 149-151 */
+ /* Physical Device Scan Information */
+ unsigned char pscan_active:1; /* Byte 152 Bit 0 */
+ unsigned char rsvd13:7; /* Byte 152 Bits 1-7 */
+ unsigned char pscan_chan; /* Byte 153 */
+ unsigned char pscan_target; /* Byte 154 */
+ unsigned char pscan_lun; /* Byte 155 */
+ /* Maximum Command Data Transfer Sizes */
+ unsigned short max_transfer_size; /* Bytes 156-157 */
+ unsigned short max_sge; /* Bytes 158-159 */
+ /* Logical/Physical Device Counts */
+ unsigned short ldev_present; /* Bytes 160-161 */
+ unsigned short ldev_critical; /* Bytes 162-163 */
+ unsigned short ldev_offline; /* Bytes 164-165 */
+ unsigned short pdev_present; /* Bytes 166-167 */
+ unsigned short pdisk_present; /* Bytes 168-169 */
+ unsigned short pdisk_critical; /* Bytes 170-171 */
+ unsigned short pdisk_offline; /* Bytes 172-173 */
+ unsigned short max_tcq; /* Bytes 174-175 */
+ /* Channel and Target ID Information */
+ unsigned char physchan_present; /* Byte 176 */
+ unsigned char virtchan_present; /* Byte 177 */
+ unsigned char physchan_max; /* Byte 178 */
+ unsigned char virtchan_max; /* Byte 179 */
+ unsigned char max_targets[16]; /* Bytes 180-195 */
+ unsigned char rsvd14[12]; /* Bytes 196-207 */
+ /* Memory/Cache Information */
+ unsigned short mem_size_mb; /* Bytes 208-209 */
+ unsigned short cache_size_mb; /* Bytes 210-211 */
+ unsigned int valid_cache_bytes; /* Bytes 212-215 */
+ unsigned int dirty_cache_bytes; /* Bytes 216-219 */
+ unsigned short mem_speed_mhz; /* Bytes 220-221 */
+ unsigned char mem_data_width; /* Byte 222 */
+ struct myrs_mem_type mem_type; /* Byte 223 */
+ unsigned char cache_mem_type_name[16]; /* Bytes 224-239 */
+ /* Execution Memory Information */
+ unsigned short exec_mem_size_mb; /* Bytes 240-241 */
+ unsigned short exec_l2_cache_size_mb; /* Bytes 242-243 */
+ unsigned char rsvd15[8]; /* Bytes 244-251 */
+ unsigned short exec_mem_speed_mhz; /* Bytes 252-253 */
+ unsigned char exec_mem_data_width; /* Byte 254 */
+ struct myrs_mem_type exec_mem_type; /* Byte 255 */
+ unsigned char exec_mem_type_name[16]; /* Bytes 256-271 */
+ /* CPU Type Information */
+ struct { /* Bytes 272-335 */
+ unsigned short cpu_speed_mhz;
+ enum myrs_cpu_type cpu_type;
+ unsigned char cpu_count;
+ unsigned char rsvd16[12];
+ unsigned char cpu_name[16];
+ } __packed cpu[2];
+ /* Debugging/Profiling/Command Time Tracing Information */
+ unsigned short cur_prof_page_num; /* Bytes 336-337 */
+ unsigned short num_prof_waiters; /* Bytes 338-339 */
+ unsigned short cur_trace_page_num; /* Bytes 340-341 */
+ unsigned short num_trace_waiters; /* Bytes 342-343 */
+ unsigned char rsvd18[8]; /* Bytes 344-351 */
+ /* Error Counters on Physical Devices */
+ unsigned short pdev_bus_resets; /* Bytes 352-353 */
+ unsigned short pdev_parity_errors; /* Bytes 355-355 */
+ unsigned short pdev_soft_errors; /* Bytes 356-357 */
+ unsigned short pdev_cmds_failed; /* Bytes 358-359 */
+ unsigned short pdev_misc_errors; /* Bytes 360-361 */
+ unsigned short pdev_cmd_timeouts; /* Bytes 362-363 */
+ unsigned short pdev_sel_timeouts; /* Bytes 364-365 */
+ unsigned short pdev_retries_done; /* Bytes 366-367 */
+ unsigned short pdev_aborts_done; /* Bytes 368-369 */
+ unsigned short pdev_host_aborts_done; /* Bytes 370-371 */
+ unsigned short pdev_predicted_failures; /* Bytes 372-373 */
+ unsigned short pdev_host_cmds_failed; /* Bytes 374-375 */
+ unsigned short pdev_hard_errors; /* Bytes 376-377 */
+ unsigned char rsvd19[6]; /* Bytes 378-383 */
+ /* Error Counters on Logical Devices */
+ unsigned short ldev_soft_errors; /* Bytes 384-385 */
+ unsigned short ldev_cmds_failed; /* Bytes 386-387 */
+ unsigned short ldev_host_aborts_done; /* Bytes 388-389 */
+ unsigned char rsvd20[2]; /* Bytes 390-391 */
+ /* Error Counters on Controller */
+ unsigned short ctlr_mem_errors; /* Bytes 392-393 */
+ unsigned short ctlr_host_aborts_done; /* Bytes 394-395 */
+ unsigned char rsvd21[4]; /* Bytes 396-399 */
+ /* Long Duration Activity Information */
+ unsigned short bg_init_active; /* Bytes 400-401 */
+ unsigned short ldev_init_active; /* Bytes 402-403 */
+ unsigned short pdev_init_active; /* Bytes 404-405 */
+ unsigned short cc_active; /* Bytes 406-407 */
+ unsigned short rbld_active; /* Bytes 408-409 */
+ unsigned short exp_active; /* Bytes 410-411 */
+ unsigned short patrol_active; /* Bytes 412-413 */
+ unsigned char rsvd22[2]; /* Bytes 414-415 */
+ /* Flash ROM Information */
+ unsigned char flash_type; /* Byte 416 */
+ unsigned char rsvd23; /* Byte 417 */
+ unsigned short flash_size_MB; /* Bytes 418-419 */
+ unsigned int flash_limit; /* Bytes 420-423 */
+ unsigned int flash_count; /* Bytes 424-427 */
+ unsigned char rsvd24[4]; /* Bytes 428-431 */
+ unsigned char flash_type_name[16]; /* Bytes 432-447 */
+ /* Firmware Run Time Information */
+ unsigned char rbld_rate; /* Byte 448 */
+ unsigned char bg_init_rate; /* Byte 449 */
+ unsigned char fg_init_rate; /* Byte 450 */
+ unsigned char cc_rate; /* Byte 451 */
+ unsigned char rsvd25[4]; /* Bytes 452-455 */
+ unsigned int max_dp; /* Bytes 456-459 */
+ unsigned int free_dp; /* Bytes 460-463 */
+ unsigned int max_iop; /* Bytes 464-467 */
+ unsigned int free_iop; /* Bytes 468-471 */
+ unsigned short max_combined_len; /* Bytes 472-473 */
+ unsigned short num_cfg_groups; /* Bytes 474-475 */
+ unsigned installation_abort_status:1; /* Byte 476 Bit 0 */
+ unsigned maint_mode_status:1; /* Byte 476 Bit 1 */
+ unsigned rsvd26:6; /* Byte 476 Bits 2-7 */
+ unsigned char rsvd27[6]; /* Bytes 477-511 */
+ unsigned char rsvd28[512]; /* Bytes 512-1023 */
+};
+
+/*
+ * DAC960 V2 Firmware Device State type.
+ */
+enum myrs_devstate {
+ MYRS_DEVICE_UNCONFIGURED = 0x00,
+ MYRS_DEVICE_ONLINE = 0x01,
+ MYRS_DEVICE_REBUILD = 0x03,
+ MYRS_DEVICE_MISSING = 0x04,
+ MYRS_DEVICE_SUSPECTED_CRITICAL = 0x05,
+ MYRS_DEVICE_OFFLINE = 0x08,
+ MYRS_DEVICE_CRITICAL = 0x09,
+ MYRS_DEVICE_SUSPECTED_DEAD = 0x0C,
+ MYRS_DEVICE_COMMANDED_OFFLINE = 0x10,
+ MYRS_DEVICE_STANDBY = 0x21,
+ MYRS_DEVICE_INVALID_STATE = 0xFF,
+} __packed;
+
+/*
+ * DAC960 V2 RAID Levels
+ */
+enum myrs_raid_level {
+ MYRS_RAID_LEVEL0 = 0x0, /* RAID 0 */
+ MYRS_RAID_LEVEL1 = 0x1, /* RAID 1 */
+ MYRS_RAID_LEVEL3 = 0x3, /* RAID 3 right asymmetric parity */
+ MYRS_RAID_LEVEL5 = 0x5, /* RAID 5 right asymmetric parity */
+ MYRS_RAID_LEVEL6 = 0x6, /* RAID 6 (Mylex RAID 6) */
+ MYRS_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */
+ MYRS_RAID_NEWSPAN = 0x8, /* New Mylex SPAN */
+ MYRS_RAID_LEVEL3F = 0x9, /* RAID 3 fixed parity */
+ MYRS_RAID_LEVEL3L = 0xb, /* RAID 3 left symmetric parity */
+ MYRS_RAID_SPAN = 0xc, /* current spanning implementation */
+ MYRS_RAID_LEVEL5L = 0xd, /* RAID 5 left symmetric parity */
+ MYRS_RAID_LEVELE = 0xe, /* RAID E (concatenation) */
+ MYRS_RAID_PHYSICAL = 0xf, /* physical device */
+} __packed;
+
+enum myrs_stripe_size {
+ MYRS_STRIPE_SIZE_0 = 0x0, /* no stripe (RAID 1, RAID 7, etc) */
+ MYRS_STRIPE_SIZE_512B = 0x1,
+ MYRS_STRIPE_SIZE_1K = 0x2,
+ MYRS_STRIPE_SIZE_2K = 0x3,
+ MYRS_STRIPE_SIZE_4K = 0x4,
+ MYRS_STRIPE_SIZE_8K = 0x5,
+ MYRS_STRIPE_SIZE_16K = 0x6,
+ MYRS_STRIPE_SIZE_32K = 0x7,
+ MYRS_STRIPE_SIZE_64K = 0x8,
+ MYRS_STRIPE_SIZE_128K = 0x9,
+ MYRS_STRIPE_SIZE_256K = 0xa,
+ MYRS_STRIPE_SIZE_512K = 0xb,
+ MYRS_STRIPE_SIZE_1M = 0xc,
+} __packed;
+
+enum myrs_cacheline_size {
+ MYRS_CACHELINE_ZERO = 0x0, /* caching cannot be enabled */
+ MYRS_CACHELINE_512B = 0x1,
+ MYRS_CACHELINE_1K = 0x2,
+ MYRS_CACHELINE_2K = 0x3,
+ MYRS_CACHELINE_4K = 0x4,
+ MYRS_CACHELINE_8K = 0x5,
+ MYRS_CACHELINE_16K = 0x6,
+ MYRS_CACHELINE_32K = 0x7,
+ MYRS_CACHELINE_64K = 0x8,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Get Logical Device Info reply structure.
+ */
+struct myrs_ldev_info {
+ unsigned char ctlr; /* Byte 0 */
+ unsigned char channel; /* Byte 1 */
+ unsigned char target; /* Byte 2 */
+ unsigned char lun; /* Byte 3 */
+ enum myrs_devstate dev_state; /* Byte 4 */
+ unsigned char raid_level; /* Byte 5 */
+ enum myrs_stripe_size stripe_size; /* Byte 6 */
+ enum myrs_cacheline_size cacheline_size; /* Byte 7 */
+ struct {
+ enum {
+ MYRS_READCACHE_DISABLED = 0x0,
+ MYRS_READCACHE_ENABLED = 0x1,
+ MYRS_READAHEAD_ENABLED = 0x2,
+ MYRS_INTELLIGENT_READAHEAD_ENABLED = 0x3,
+ MYRS_READCACHE_LAST = 0x7,
+ } __packed rce:3; /* Byte 8 Bits 0-2 */
+ enum {
+ MYRS_WRITECACHE_DISABLED = 0x0,
+ MYRS_LOGICALDEVICE_RO = 0x1,
+ MYRS_WRITECACHE_ENABLED = 0x2,
+ MYRS_INTELLIGENT_WRITECACHE_ENABLED = 0x3,
+ MYRS_WRITECACHE_LAST = 0x7,
+ } __packed wce:3; /* Byte 8 Bits 3-5 */
+ unsigned rsvd1:1; /* Byte 8 Bit 6 */
+ unsigned ldev_init_done:1; /* Byte 8 Bit 7 */
+ } ldev_control; /* Byte 8 */
+ /* Logical Device Operations Status */
+ unsigned char cc_active:1; /* Byte 9 Bit 0 */
+ unsigned char rbld_active:1; /* Byte 9 Bit 1 */
+ unsigned char bg_init_active:1; /* Byte 9 Bit 2 */
+ unsigned char fg_init_active:1; /* Byte 9 Bit 3 */
+ unsigned char migration_active:1; /* Byte 9 Bit 4 */
+ unsigned char patrol_active:1; /* Byte 9 Bit 5 */
+ unsigned char rsvd2:2; /* Byte 9 Bits 6-7 */
+ unsigned char raid5_writeupdate; /* Byte 10 */
+ unsigned char raid5_algo; /* Byte 11 */
+ unsigned short ldev_num; /* Bytes 12-13 */
+ /* BIOS Info */
+ unsigned char bios_disabled:1; /* Byte 14 Bit 0 */
+ unsigned char cdrom_boot:1; /* Byte 14 Bit 1 */
+ unsigned char drv_coercion:1; /* Byte 14 Bit 2 */
+ unsigned char write_same_disabled:1; /* Byte 14 Bit 3 */
+ unsigned char hba_mode:1; /* Byte 14 Bit 4 */
+ enum {
+ MYRS_GEOMETRY_128_32 = 0x0,
+ MYRS_GEOMETRY_255_63 = 0x1,
+ MYRS_GEOMETRY_RSVD1 = 0x2,
+ MYRS_GEOMETRY_RSVD2 = 0x3
+ } __packed drv_geom:2; /* Byte 14 Bits 5-6 */
+ unsigned char super_ra_enabled:1; /* Byte 14 Bit 7 */
+ unsigned char rsvd3; /* Byte 15 */
+ /* Error Counters */
+ unsigned short soft_errs; /* Bytes 16-17 */
+ unsigned short cmds_failed; /* Bytes 18-19 */
+ unsigned short cmds_aborted; /* Bytes 20-21 */
+ unsigned short deferred_write_errs; /* Bytes 22-23 */
+ unsigned int rsvd4; /* Bytes 24-27 */
+ unsigned int rsvd5; /* Bytes 28-31 */
+ /* Device Size Information */
+ unsigned short rsvd6; /* Bytes 32-33 */
+ unsigned short devsize_bytes; /* Bytes 34-35 */
+ unsigned int orig_devsize; /* Bytes 36-39 */
+ unsigned int cfg_devsize; /* Bytes 40-43 */
+ unsigned int rsvd7; /* Bytes 44-47 */
+ unsigned char ldev_name[32]; /* Bytes 48-79 */
+ unsigned char inquiry[36]; /* Bytes 80-115 */
+ unsigned char rsvd8[12]; /* Bytes 116-127 */
+ u64 last_read_lba; /* Bytes 128-135 */
+ u64 last_write_lba; /* Bytes 136-143 */
+ u64 cc_lba; /* Bytes 144-151 */
+ u64 rbld_lba; /* Bytes 152-159 */
+ u64 bg_init_lba; /* Bytes 160-167 */
+ u64 fg_init_lba; /* Bytes 168-175 */
+ u64 migration_lba; /* Bytes 176-183 */
+ u64 patrol_lba; /* Bytes 184-191 */
+ unsigned char rsvd9[64]; /* Bytes 192-255 */
+};
+
+/*
+ * DAC960 V2 Firmware Get Physical Device Info reply structure.
+ */
+struct myrs_pdev_info {
+ unsigned char rsvd1; /* Byte 0 */
+ unsigned char channel; /* Byte 1 */
+ unsigned char target; /* Byte 2 */
+ unsigned char lun; /* Byte 3 */
+ /* Configuration Status Bits */
+ unsigned char pdev_fault_tolerant:1; /* Byte 4 Bit 0 */
+ unsigned char pdev_connected:1; /* Byte 4 Bit 1 */
+ unsigned char pdev_local_to_ctlr:1; /* Byte 4 Bit 2 */
+ unsigned char rsvd2:5; /* Byte 4 Bits 3-7 */
+ /* Multiple Host/Controller Status Bits */
+ unsigned char remote_host_dead:1; /* Byte 5 Bit 0 */
+ unsigned char remove_ctlr_dead:1; /* Byte 5 Bit 1 */
+ unsigned char rsvd3:6; /* Byte 5 Bits 2-7 */
+ enum myrs_devstate dev_state; /* Byte 6 */
+ unsigned char nego_data_width; /* Byte 7 */
+ unsigned short nego_sync_rate; /* Bytes 8-9 */
+ /* Multiported Physical Device Information */
+ unsigned char num_ports; /* Byte 10 */
+ unsigned char drv_access_bitmap; /* Byte 11 */
+ unsigned int rsvd4; /* Bytes 12-15 */
+ unsigned char ip_address[16]; /* Bytes 16-31 */
+ unsigned short max_tags; /* Bytes 32-33 */
+ /* Physical Device Operations Status */
+ unsigned char cc_in_progress:1; /* Byte 34 Bit 0 */
+ unsigned char rbld_in_progress:1; /* Byte 34 Bit 1 */
+ unsigned char makecc_in_progress:1; /* Byte 34 Bit 2 */
+ unsigned char pdevinit_in_progress:1; /* Byte 34 Bit 3 */
+ unsigned char migration_in_progress:1; /* Byte 34 Bit 4 */
+ unsigned char patrol_in_progress:1; /* Byte 34 Bit 5 */
+ unsigned char rsvd5:2; /* Byte 34 Bits 6-7 */
+ unsigned char long_op_status; /* Byte 35 */
+ unsigned char parity_errs; /* Byte 36 */
+ unsigned char soft_errs; /* Byte 37 */
+ unsigned char hard_errs; /* Byte 38 */
+ unsigned char misc_errs; /* Byte 39 */
+ unsigned char cmd_timeouts; /* Byte 40 */
+ unsigned char retries; /* Byte 41 */
+ unsigned char aborts; /* Byte 42 */
+ unsigned char pred_failures; /* Byte 43 */
+ unsigned int rsvd6; /* Bytes 44-47 */
+ unsigned short rsvd7; /* Bytes 48-49 */
+ unsigned short devsize_bytes; /* Bytes 50-51 */
+ unsigned int orig_devsize; /* Bytes 52-55 */
+ unsigned int cfg_devsize; /* Bytes 56-59 */
+ unsigned int rsvd8; /* Bytes 60-63 */
+ unsigned char pdev_name[16]; /* Bytes 64-79 */
+ unsigned char rsvd9[16]; /* Bytes 80-95 */
+ unsigned char rsvd10[32]; /* Bytes 96-127 */
+ unsigned char inquiry[36]; /* Bytes 128-163 */
+ unsigned char rsvd11[20]; /* Bytes 164-183 */
+ unsigned char rsvd12[8]; /* Bytes 184-191 */
+ u64 last_read_lba; /* Bytes 192-199 */
+ u64 last_write_lba; /* Bytes 200-207 */
+ u64 cc_lba; /* Bytes 208-215 */
+ u64 rbld_lba; /* Bytes 216-223 */
+ u64 makecc_lba; /* Bytes 224-231 */
+ u64 devinit_lba; /* Bytes 232-239 */
+ u64 migration_lba; /* Bytes 240-247 */
+ u64 patrol_lba; /* Bytes 248-255 */
+ unsigned char rsvd13[256]; /* Bytes 256-511 */
+};
+
+/*
+ * DAC960 V2 Firmware Health Status Buffer structure.
+ */
+struct myrs_fwstat {
+ unsigned int uptime_usecs; /* Bytes 0-3 */
+ unsigned int uptime_msecs; /* Bytes 4-7 */
+ unsigned int seconds; /* Bytes 8-11 */
+ unsigned char rsvd1[4]; /* Bytes 12-15 */
+ unsigned int epoch; /* Bytes 16-19 */
+ unsigned char rsvd2[4]; /* Bytes 20-23 */
+ unsigned int dbg_msgbuf_idx; /* Bytes 24-27 */
+ unsigned int coded_msgbuf_idx; /* Bytes 28-31 */
+ unsigned int cur_timetrace_page; /* Bytes 32-35 */
+ unsigned int cur_prof_page; /* Bytes 36-39 */
+ unsigned int next_evseq; /* Bytes 40-43 */
+ unsigned char rsvd3[4]; /* Bytes 44-47 */
+ unsigned char rsvd4[16]; /* Bytes 48-63 */
+ unsigned char rsvd5[64]; /* Bytes 64-127 */
+};
+
+/*
+ * DAC960 V2 Firmware Get Event reply structure.
+ */
+struct myrs_event {
+ unsigned int ev_seq; /* Bytes 0-3 */
+ unsigned int ev_time; /* Bytes 4-7 */
+ unsigned int ev_code; /* Bytes 8-11 */
+ unsigned char rsvd1; /* Byte 12 */
+ unsigned char channel; /* Byte 13 */
+ unsigned char target; /* Byte 14 */
+ unsigned char lun; /* Byte 15 */
+ unsigned int rsvd2; /* Bytes 16-19 */
+ unsigned int ev_parm; /* Bytes 20-23 */
+ unsigned char sense_data[40]; /* Bytes 24-63 */
+};
+
+/*
+ * DAC960 V2 Firmware Command Control Bits structure.
+ */
+struct myrs_cmd_ctrl {
+ unsigned char fua:1; /* Byte 0 Bit 0 */
+ unsigned char disable_pgout:1; /* Byte 0 Bit 1 */
+ unsigned char rsvd1:1; /* Byte 0 Bit 2 */
+ unsigned char add_sge_mem:1; /* Byte 0 Bit 3 */
+ unsigned char dma_ctrl_to_host:1; /* Byte 0 Bit 4 */
+ unsigned char rsvd2:1; /* Byte 0 Bit 5 */
+ unsigned char no_autosense:1; /* Byte 0 Bit 6 */
+ unsigned char disc_prohibited:1; /* Byte 0 Bit 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Command Timeout structure.
+ */
+struct myrs_cmd_tmo {
+ unsigned char tmo_val:6; /* Byte 0 Bits 0-5 */
+ enum {
+ MYRS_TMO_SCALE_SECONDS = 0,
+ MYRS_TMO_SCALE_MINUTES = 1,
+ MYRS_TMO_SCALE_HOURS = 2,
+ MYRS_TMO_SCALE_RESERVED = 3
+ } __packed tmo_scale:2; /* Byte 0 Bits 6-7 */
+};
+
+/*
+ * DAC960 V2 Firmware Physical Device structure.
+ */
+struct myrs_pdev {
+ unsigned char lun; /* Byte 0 */
+ unsigned char target; /* Byte 1 */
+ unsigned char channel:3; /* Byte 2 Bits 0-2 */
+ unsigned char ctlr:5; /* Byte 2 Bits 3-7 */
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Logical Device structure.
+ */
+struct myrs_ldev {
+ unsigned short ldev_num; /* Bytes 0-1 */
+ unsigned char rsvd:3; /* Byte 2 Bits 0-2 */
+ unsigned char ctlr:5; /* Byte 2 Bits 3-7 */
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Operation Device type.
+ */
+enum myrs_opdev {
+ MYRS_PHYSICAL_DEVICE = 0x00,
+ MYRS_RAID_DEVICE = 0x01,
+ MYRS_PHYSICAL_CHANNEL = 0x02,
+ MYRS_RAID_CHANNEL = 0x03,
+ MYRS_PHYSICAL_CONTROLLER = 0x04,
+ MYRS_RAID_CONTROLLER = 0x05,
+ MYRS_CONFIGURATION_GROUP = 0x10,
+ MYRS_ENCLOSURE = 0x11,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Translate Physical To Logical Device structure.
+ */
+struct myrs_devmap {
+ unsigned short ldev_num; /* Bytes 0-1 */
+ unsigned short rsvd; /* Bytes 2-3 */
+ unsigned char prev_boot_ctlr; /* Byte 4 */
+ unsigned char prev_boot_channel; /* Byte 5 */
+ unsigned char prev_boot_target; /* Byte 6 */
+ unsigned char prev_boot_lun; /* Byte 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Scatter/Gather List Entry structure.
+ */
+struct myrs_sge {
+ u64 sge_addr; /* Bytes 0-7 */
+ u64 sge_count; /* Bytes 8-15 */
+};
+
+/*
+ * DAC960 V2 Firmware Data Transfer Memory Address structure.
+ */
+union myrs_sgl {
+ struct myrs_sge sge[2]; /* Bytes 0-31 */
+ struct {
+ unsigned short sge0_len; /* Bytes 0-1 */
+ unsigned short sge1_len; /* Bytes 2-3 */
+ unsigned short sge2_len; /* Bytes 4-5 */
+ unsigned short rsvd; /* Bytes 6-7 */
+ u64 sge0_addr; /* Bytes 8-15 */
+ u64 sge1_addr; /* Bytes 16-23 */
+ u64 sge2_addr; /* Bytes 24-31 */
+ } ext;
+};
+
+/*
+ * 64 Byte DAC960 V2 Firmware Command Mailbox structure.
+ */
+union myrs_cmd_mbox {
+ unsigned int words[16]; /* Words 0-15 */
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned int rsvd1:24; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char rsvd2[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } common;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size; /* Bytes 4-7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char cdb_len; /* Byte 21 */
+ unsigned char cdb[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } SCSI_10;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size; /* Bytes 4-7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char cdb_len; /* Byte 21 */
+ unsigned short rsvd; /* Bytes 22-23 */
+ u64 cdb_addr; /* Bytes 24-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } SCSI_255;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned short rsvd1; /* Bytes 16-17 */
+ unsigned char ctlr_num; /* Byte 18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char rsvd2[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } ctlr_info;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_ldev ldev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char rsvd[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } ldev_info;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char rsvd[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } pdev_info;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned short evnum_upper; /* Bytes 16-17 */
+ unsigned char ctlr_num; /* Byte 18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned short evnum_lower; /* Bytes 22-23 */
+ unsigned char rsvd[8]; /* Bytes 24-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } get_event;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ union {
+ struct myrs_ldev ldev; /* Bytes 16-18 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ };
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ enum myrs_devstate state; /* Byte 22 */
+ unsigned char rsvd[9]; /* Bytes 23-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } set_devstate;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_ldev ldev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char restore_consistency:1; /* Byte 22 Bit 0 */
+ unsigned char initialized_area_only:1; /* Byte 22 Bit 1 */
+ unsigned char rsvd1:6; /* Byte 22 Bits 2-7 */
+ unsigned char rsvd2[9]; /* Bytes 23-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } cc;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ unsigned char first_cmd_mbox_size_kb; /* Byte 4 */
+ unsigned char first_stat_mbox_size_kb; /* Byte 5 */
+ unsigned char second_cmd_mbox_size_kb; /* Byte 6 */
+ unsigned char second_stat_mbox_size_kb; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned int rsvd1:24; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char fwstat_buf_size_kb; /* Byte 22 */
+ unsigned char rsvd2; /* Byte 23 */
+ u64 fwstat_buf_addr; /* Bytes 24-31 */
+ u64 first_cmd_mbox_addr; /* Bytes 32-39 */
+ u64 first_stat_mbox_addr; /* Bytes 40-47 */
+ u64 second_cmd_mbox_addr; /* Bytes 48-55 */
+ u64 second_stat_mbox_addr; /* Bytes 56-63 */
+ } set_mbox;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ enum myrs_opdev opdev; /* Byte 22 */
+ unsigned char rsvd[9]; /* Bytes 23-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } dev_op;
+};
+
+/*
+ * DAC960 V2 Firmware Controller Status Mailbox structure.
+ */
+struct myrs_stat_mbox {
+ unsigned short id; /* Bytes 0-1 */
+ unsigned char status; /* Byte 2 */
+ unsigned char sense_len; /* Byte 3 */
+ int residual; /* Bytes 4-7 */
+};
+
+struct myrs_cmdblk {
+ union myrs_cmd_mbox mbox;
+ unsigned char status;
+ unsigned char sense_len;
+ int residual;
+ struct completion *complete;
+ struct myrs_sge *sgl;
+ dma_addr_t sgl_addr;
+ unsigned char *dcdb;
+ dma_addr_t dcdb_dma;
+ unsigned char *sense;
+ dma_addr_t sense_addr;
+};
+
+/*
+ * DAC960 Driver Controller structure.
+ */
+struct myrs_hba {
+ void __iomem *io_base;
+ void __iomem *mmio_base;
+ phys_addr_t io_addr;
+ phys_addr_t pci_addr;
+ unsigned int irq;
+
+ unsigned char model_name[28];
+ unsigned char fw_version[12];
+
+ struct Scsi_Host *host;
+ struct pci_dev *pdev;
+
+ unsigned int epoch;
+ unsigned int next_evseq;
+ /* Monitor flags */
+ bool needs_update;
+ bool disable_enc_msg;
+
+ struct workqueue_struct *work_q;
+ char work_q_name[20];
+ struct delayed_work monitor_work;
+ unsigned long primary_monitor_time;
+ unsigned long secondary_monitor_time;
+
+ spinlock_t queue_lock;
+
+ struct dma_pool *sg_pool;
+ struct dma_pool *sense_pool;
+ struct dma_pool *dcdb_pool;
+
+ void (*write_cmd_mbox)(union myrs_cmd_mbox *next_mbox,
+ union myrs_cmd_mbox *cmd_mbox);
+ void (*get_cmd_mbox)(void __iomem *base);
+ void (*disable_intr)(void __iomem *base);
+ void (*reset)(void __iomem *base);
+
+ dma_addr_t cmd_mbox_addr;
+ size_t cmd_mbox_size;
+ union myrs_cmd_mbox *first_cmd_mbox;
+ union myrs_cmd_mbox *last_cmd_mbox;
+ union myrs_cmd_mbox *next_cmd_mbox;
+ union myrs_cmd_mbox *prev_cmd_mbox1;
+ union myrs_cmd_mbox *prev_cmd_mbox2;
+
+ dma_addr_t stat_mbox_addr;
+ size_t stat_mbox_size;
+ struct myrs_stat_mbox *first_stat_mbox;
+ struct myrs_stat_mbox *last_stat_mbox;
+ struct myrs_stat_mbox *next_stat_mbox;
+
+ struct myrs_cmdblk dcmd_blk;
+ struct myrs_cmdblk mcmd_blk;
+ struct mutex dcmd_mutex;
+
+ struct myrs_fwstat *fwstat_buf;
+ dma_addr_t fwstat_addr;
+
+ struct myrs_ctlr_info *ctlr_info;
+ struct mutex cinfo_mutex;
+
+ struct myrs_event *event_buf;
+};
+
+typedef unsigned char (*enable_mbox_t)(void __iomem *base, dma_addr_t addr);
+typedef int (*myrs_hwinit_t)(struct pci_dev *pdev,
+ struct myrs_hba *c, void __iomem *base);
+
+struct myrs_privdata {
+ myrs_hwinit_t hw_init;
+ irq_handler_t irq_handler;
+ unsigned int mmio_size;
+};
+
+/*
+ * DAC960 GEM Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_GEM_mmio_size 0x600
+
+enum DAC960_GEM_reg_offset {
+ DAC960_GEM_IDB_READ_OFFSET = 0x214,
+ DAC960_GEM_IDB_CLEAR_OFFSET = 0x218,
+ DAC960_GEM_ODB_READ_OFFSET = 0x224,
+ DAC960_GEM_ODB_CLEAR_OFFSET = 0x228,
+ DAC960_GEM_IRQSTS_OFFSET = 0x208,
+ DAC960_GEM_IRQMASK_READ_OFFSET = 0x22C,
+ DAC960_GEM_IRQMASK_CLEAR_OFFSET = 0x230,
+ DAC960_GEM_CMDMBX_OFFSET = 0x510,
+ DAC960_GEM_CMDSTS_OFFSET = 0x518,
+ DAC960_GEM_ERRSTS_READ_OFFSET = 0x224,
+ DAC960_GEM_ERRSTS_CLEAR_OFFSET = 0x228,
+};
+
+/*
+ * DAC960 GEM Series Inbound Door Bell Register.
+ */
+#define DAC960_GEM_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_GEM_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_GEM_IDB_GEN_IRQ 0x04
+#define DAC960_GEM_IDB_CTRL_RESET 0x08
+#define DAC960_GEM_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_GEM_IDB_HWMBOX_FULL 0x01
+#define DAC960_GEM_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 GEM Series Outbound Door Bell Register.
+ */
+#define DAC960_GEM_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_GEM_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_GEM_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_GEM_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 GEM Series Interrupt Mask Register.
+ */
+#define DAC960_GEM_IRQMASK_HWMBOX_IRQ 0x01
+#define DAC960_GEM_IRQMASK_MMBOX_IRQ 0x02
+
+/*
+ * DAC960 GEM Series Error Status Register.
+ */
+#define DAC960_GEM_ERRSTS_PENDING 0x20
+
+/*
+ * dma_addr_writeql is provided to write dma_addr_t types
+ * to a 64-bit pci address space register. The controller
+ * will accept having the register written as two 32-bit
+ * values.
+ *
+ * In HIGHMEM kernels, dma_addr_t is a 64-bit value.
+ * without HIGHMEM, dma_addr_t is a 32-bit value.
+ *
+ * The compiler should always fix up the assignment
+ * to u.wq appropriately, depending upon the size of
+ * dma_addr_t.
+ */
+static inline
+void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address)
+{
+ union {
+ u64 wq;
+ uint wl[2];
+ } u;
+
+ u.wq = addr;
+
+ writel(u.wl[0], write_address);
+ writel(u.wl[1], write_address + 4);
+}
+
+/*
+ * DAC960 BA Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_BA_mmio_size 0x80
+
+enum DAC960_BA_reg_offset {
+ DAC960_BA_IRQSTS_OFFSET = 0x30,
+ DAC960_BA_IRQMASK_OFFSET = 0x34,
+ DAC960_BA_CMDMBX_OFFSET = 0x50,
+ DAC960_BA_CMDSTS_OFFSET = 0x58,
+ DAC960_BA_IDB_OFFSET = 0x60,
+ DAC960_BA_ODB_OFFSET = 0x61,
+ DAC960_BA_ERRSTS_OFFSET = 0x63,
+};
+
+/*
+ * DAC960 BA Series Inbound Door Bell Register.
+ */
+#define DAC960_BA_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_BA_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_BA_IDB_GEN_IRQ 0x04
+#define DAC960_BA_IDB_CTRL_RESET 0x08
+#define DAC960_BA_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_BA_IDB_HWMBOX_EMPTY 0x01
+#define DAC960_BA_IDB_INIT_DONE 0x02
+
+/*
+ * DAC960 BA Series Outbound Door Bell Register.
+ */
+#define DAC960_BA_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_BA_ODB_MMBOX_ACK_IRQ 0x02
+
+#define DAC960_BA_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_BA_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 BA Series Interrupt Mask Register.
+ */
+#define DAC960_BA_IRQMASK_DISABLE_IRQ 0x04
+#define DAC960_BA_IRQMASK_DISABLEW_I2O 0x08
+
+/*
+ * DAC960 BA Series Error Status Register.
+ */
+#define DAC960_BA_ERRSTS_PENDING 0x04
+
+/*
+ * DAC960 LP Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_LP_mmio_size 0x80
+
+enum DAC960_LP_reg_offset {
+ DAC960_LP_CMDMBX_OFFSET = 0x10,
+ DAC960_LP_CMDSTS_OFFSET = 0x18,
+ DAC960_LP_IDB_OFFSET = 0x20,
+ DAC960_LP_ODB_OFFSET = 0x2C,
+ DAC960_LP_ERRSTS_OFFSET = 0x2E,
+ DAC960_LP_IRQSTS_OFFSET = 0x30,
+ DAC960_LP_IRQMASK_OFFSET = 0x34,
+};
+
+/*
+ * DAC960 LP Series Inbound Door Bell Register.
+ */
+#define DAC960_LP_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_LP_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_LP_IDB_GEN_IRQ 0x04
+#define DAC960_LP_IDB_CTRL_RESET 0x08
+#define DAC960_LP_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_LP_IDB_HWMBOX_FULL 0x01
+#define DAC960_LP_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 LP Series Outbound Door Bell Register.
+ */
+#define DAC960_LP_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_LP_ODB_MMBOX_ACK_IRQ 0x02
+
+#define DAC960_LP_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_LP_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 LP Series Interrupt Mask Register.
+ */
+#define DAC960_LP_IRQMASK_DISABLE_IRQ 0x04
+
+/*
+ * DAC960 LP Series Error Status Register.
+ */
+#define DAC960_LP_ERRSTS_PENDING 0x04
+
+#endif /* _MYRS_H */
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 8620ac5d6e41..5aac3e801903 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -2638,7 +2638,7 @@ static int nsp32_detect(struct pci_dev *pdev)
/*
* setup DMA
*/
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
goto scsi_unregister;
}
@@ -2646,7 +2646,9 @@ static int nsp32_detect(struct pci_dev *pdev)
/*
* allocate autoparam DMA resource.
*/
- data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
+ data->autoparam = dma_alloc_coherent(&pdev->dev,
+ sizeof(nsp32_autoparam), &(data->auto_paddr),
+ GFP_KERNEL);
if (data->autoparam == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto scsi_unregister;
@@ -2655,8 +2657,8 @@ static int nsp32_detect(struct pci_dev *pdev)
/*
* allocate scatter-gather DMA resource.
*/
- data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
- &(data->sg_paddr));
+ data->sg_list = dma_alloc_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE,
+ &data->sg_paddr, GFP_KERNEL);
if (data->sg_list == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto free_autoparam;
@@ -2761,11 +2763,11 @@ static int nsp32_detect(struct pci_dev *pdev)
free_irq(host->irq, data);
free_sg_list:
- pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
+ dma_free_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
free_autoparam:
- pci_free_consistent(pdev, sizeof(nsp32_autoparam),
+ dma_free_coherent(&pdev->dev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
scsi_unregister:
@@ -2780,12 +2782,12 @@ static int nsp32_release(struct Scsi_Host *host)
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
if (data->autoparam) {
- pci_free_consistent(data->Pci, sizeof(nsp32_autoparam),
+ dma_free_coherent(&data->Pci->dev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
}
if (data->sg_list) {
- pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE,
+ dma_free_coherent(&data->Pci->dev, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
}
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 67b14576fff2..e19fa883376f 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -445,7 +445,7 @@ static void _put_request(struct request *rq)
* code paths.
*/
if (unlikely(rq->bio))
- blk_end_request(rq, BLK_STS_IOERR, blk_rq_bytes(rq));
+ blk_mq_end_request(rq, BLK_STS_IOERR);
else
blk_put_request(rq);
}
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 7d1609fa233c..df82a349e969 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -220,16 +220,4 @@ static struct pcmcia_driver aha152x_cs_driver = {
.id_table = aha152x_ids,
.resume = aha152x_resume,
};
-
-static int __init init_aha152x_cs(void)
-{
- return pcmcia_register_driver(&aha152x_cs_driver);
-}
-
-static void __exit exit_aha152x_cs(void)
-{
- pcmcia_unregister_driver(&aha152x_cs_driver);
-}
-
-module_init(init_aha152x_cs);
-module_exit(exit_aha152x_cs);
+module_pcmcia_driver(aha152x_cs_driver);
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 5fb6eefc6541..f3230494a8c9 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1742,19 +1742,6 @@ static struct pcmcia_driver nsp_driver = {
.suspend = nsp_cs_suspend,
.resume = nsp_cs_resume,
};
-
-static int __init nsp_cs_init(void)
-{
- return pcmcia_register_driver(&nsp_driver);
-}
-
-static void __exit nsp_cs_exit(void)
-{
- pcmcia_unregister_driver(&nsp_driver);
-}
-
-
-module_init(nsp_cs_init)
-module_exit(nsp_cs_exit)
+module_pcmcia_driver(nsp_driver);
/* end */
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index afd64f0adc4b..ea5122f3396d 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -326,10 +326,6 @@ static struct Scsi_Host *nsp_detect(struct scsi_host_template *sht);
/* Interrupt handler */
//static irqreturn_t nspintr(int irq, void *dev_id);
-/* Module entry point*/
-static int __init nsp_cs_init(void);
-static void __exit nsp_cs_exit(void);
-
/* Debug */
#ifdef NSP_DEBUG
static void show_command (struct scsi_cmnd *SCpnt);
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 0556054764dc..173351a8554b 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -254,8 +254,12 @@ static void qlogic_release(struct pcmcia_device *link)
static int qlogic_resume(struct pcmcia_device *link)
{
scsi_info_t *info = link->priv;
+ int ret;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ return ret;
- pcmcia_enable_device(link);
if ((info->manf_id == MANFID_MACNICA) ||
(info->manf_id == MANFID_PIONEER) ||
(info->manf_id == 0x0098)) {
@@ -300,18 +304,7 @@ static struct pcmcia_driver qlogic_cs_driver = {
.resume = qlogic_resume,
};
-static int __init init_qlogic_cs(void)
-{
- return pcmcia_register_driver(&qlogic_cs_driver);
-}
-
-static void __exit exit_qlogic_cs(void)
-{
- pcmcia_unregister_driver(&qlogic_cs_driver);
-}
-
MODULE_AUTHOR("Tom Zerucha, Michael Griffith");
MODULE_DESCRIPTION("Driver for the PCMCIA Qlogic FAS SCSI controllers");
MODULE_LICENSE("GPL");
-module_init(init_qlogic_cs);
-module_exit(exit_qlogic_cs);
+module_pcmcia_driver(qlogic_cs_driver);
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 20011c8afbb5..a3b63bea0e50 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -880,18 +880,4 @@ static struct pcmcia_driver sym53c500_cs_driver = {
.id_table = sym53c500_ids,
.resume = sym53c500_resume,
};
-
-static int __init
-init_sym53c500_cs(void)
-{
- return pcmcia_register_driver(&sym53c500_cs_driver);
-}
-
-static void __exit
-exit_sym53c500_cs(void)
-{
- pcmcia_unregister_driver(&sym53c500_cs_driver);
-}
-
-module_init(init_sym53c500_cs);
-module_exit(exit_sym53c500_cs);
+module_pcmcia_driver(sym53c500_cs_driver);
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 199527dbaaa1..48e0624ecc68 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -132,4 +132,12 @@ enum pm8001_hba_info_flags {
PM8001F_RUN_TIME = (1U << 1),
};
+/**
+ * Phy Status
+ */
+#define PHY_LINK_DISABLE 0x00
+#define PHY_LINK_DOWN 0x01
+#define PHY_STATE_LINK_UP_SPCV 0x2
+#define PHY_STATE_LINK_UP_SPC 0x1
+
#endif
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 4dd6cad330e8..d0bb357034d8 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1479,6 +1479,12 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
} else {
u32 producer_index;
void *pi_virt = circularQ->pi_virt;
+ /* spurious interrupt during setup if
+ * kexec-ing and driver doing a doorbell access
+ * with the pre-kexec oq interrupt setup
+ */
+ if (!pi_virt)
+ break;
/* Update the producer index from SPC */
producer_index = pm8001_read_32(pi_virt);
circularQ->producer_index = cpu_to_le32(producer_index);
@@ -2414,7 +2420,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
sata_resp = &psataPayload->sata_resp[0];
resp = (struct ata_task_resp *)ts->buf;
if (t->ata_task.dma_xfer == 0 &&
- t->data_dir == PCI_DMA_FROMDEVICE) {
+ t->data_dir == DMA_FROM_DEVICE) {
len = sizeof(struct pio_setup_fis);
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("PIO read len = %d\n", len));
@@ -3810,7 +3816,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
" status = %x\n", status));
if (status == 0) {
phy->phy_state = 1;
- if (pm8001_ha->flags == PM8001F_RUN_TIME)
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL)
complete(phy->enable_completion);
}
break;
@@ -4196,12 +4203,12 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
return ret;
}
-/* PCI_DMA_... to our direction translation. */
+/* DMA_... to our direction translation. */
static const u8 data_dir_flags[] = {
- [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
- [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */
- [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
- [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
+ [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+ [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
+ [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
+ [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
};
void
pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
@@ -4248,13 +4255,13 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
* DMA-map SMP request, response buffers
*/
sg_req = &task->smp_task.smp_req;
- elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
if (!elem)
return -ENOMEM;
req_len = sg_dma_len(sg_req);
sg_resp = &task->smp_task.smp_resp;
- elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
if (!elem) {
rc = -ENOMEM;
goto err_out;
@@ -4287,10 +4294,10 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
err_out_2:
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
err_out:
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return rc;
}
@@ -4369,7 +4376,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
circularQ = &pm8001_ha->inbnd_q_tbl[0];
- if (task->data_dir == PCI_DMA_NONE) {
+ if (task->data_dir == DMA_NONE) {
ATAP = 0x04; /* no data*/
PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
} else if (likely(!task->ata_task.device_control_reg_update)) {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index e4867e690c84..6d91e2446542 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -131,10 +131,6 @@
#define LINKRATE_30 (0x02 << 8)
#define LINKRATE_60 (0x04 << 8)
-/* for phy state */
-
-#define PHY_STATE_LINK_UP_SPC 0x1
-
/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
#define GSM_SM_BASE 0x4F0000
struct mpi_msg_hdr{
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 7a697ca68501..d71e7e4ec29c 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -121,7 +121,7 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
{
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- phy->phy_state = 0;
+ phy->phy_state = PHY_LINK_DISABLE;
phy->pm8001_ha = pm8001_ha;
sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
sas_phy->class = SAS;
@@ -152,7 +152,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
- pci_free_consistent(pm8001_ha->pdev,
+ dma_free_coherent(&pm8001_ha->pdev->dev,
(pm8001_ha->memoryMap.region[i].total_len +
pm8001_ha->memoryMap.region[i].alignment),
pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -501,30 +501,12 @@ static int pci_go_44(struct pci_dev *pdev)
{
int rc;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44));
- if (rc) {
- rc = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "44-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (rc) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
dev_printk(KERN_ERR, &pdev->dev,
"32-bit DMA enable failed\n");
- return rc;
- }
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
}
return rc;
}
@@ -1067,6 +1049,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
if (rc)
goto err_out_shost;
scsi_scan_host(pm8001_ha->shost);
+ pm8001_ha->flags = PM8001F_RUN_TIME;
return 0;
err_out_shost:
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 947d6017d004..b3be49d41375 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -116,8 +116,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
u64 align_offset = 0;
if (align)
align_offset = (dma_addr_t)align - 1;
- mem_virt_alloc = pci_zalloc_consistent(pdev, mem_size + align,
- &mem_dma_handle);
+ mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align,
+ &mem_dma_handle, GFP_KERNEL);
if (!mem_virt_alloc) {
pm8001_printk("memory allocation error\n");
return -1;
@@ -157,9 +157,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
int rc = 0, phy_id = sas_phy->id;
struct pm8001_hba_info *pm8001_ha = NULL;
struct sas_phy_linkrates *rates;
+ struct sas_ha_struct *sas_ha;
+ struct pm8001_phy *phy;
DECLARE_COMPLETION_ONSTACK(completion);
unsigned long flags;
pm8001_ha = sas_phy->ha->lldd_ha;
+ phy = &pm8001_ha->phy[phy_id];
pm8001_ha->phy[phy_id].enable_completion = &completion;
switch (func) {
case PHY_FUNC_SET_LINK_RATE:
@@ -172,7 +175,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
pm8001_ha->phy[phy_id].maximum_linkrate =
rates->maximum_linkrate;
}
- if (pm8001_ha->phy[phy_id].phy_state == 0) {
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@@ -180,7 +183,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
PHY_LINK_RESET);
break;
case PHY_FUNC_HARD_RESET:
- if (pm8001_ha->phy[phy_id].phy_state == 0) {
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@@ -188,7 +191,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
PHY_HARD_RESET);
break;
case PHY_FUNC_LINK_RESET:
- if (pm8001_ha->phy[phy_id].phy_state == 0) {
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@@ -200,6 +203,25 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
PHY_LINK_RESET);
break;
case PHY_FUNC_DISABLE:
+ if (pm8001_ha->chip_id != chip_8001) {
+ if (pm8001_ha->phy[phy_id].phy_state ==
+ PHY_STATE_LINK_UP_SPCV) {
+ sas_ha = pm8001_ha->sas;
+ sas_phy_disconnected(&phy->sas_phy);
+ sas_ha->notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+ phy->phy_attached = 0;
+ }
+ } else {
+ if (pm8001_ha->phy[phy_id].phy_state ==
+ PHY_STATE_LINK_UP_SPC) {
+ sas_ha = pm8001_ha->sas;
+ sas_phy_disconnected(&phy->sas_phy);
+ sas_ha->notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+ phy->phy_attached = 0;
+ }
+ }
PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
break;
case PHY_FUNC_GET_EVENTS:
@@ -374,6 +396,13 @@ static int pm8001_task_exec(struct sas_task *task,
return 0;
}
pm8001_ha = pm8001_find_ha_by_dev(task->dev);
+ if (pm8001_ha->controller_fatal_error) {
+ struct task_status_struct *ts = &t->task_status;
+
+ ts->resp = SAS_TASK_UNDELIVERED;
+ t->task_done(t);
+ return 0;
+ }
PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
spin_lock_irqsave(&pm8001_ha->lock, flags);
do {
@@ -466,7 +495,7 @@ err_out:
dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
if (!sas_protocol_ata(t->task_proto))
if (n_elem)
- dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem,
+ dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
t->data_dir);
out_done:
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -504,9 +533,9 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
break;
case SAS_PROTOCOL_SATA:
@@ -1020,13 +1049,11 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
struct pm8001_device *pm8001_dev;
struct pm8001_hba_info *pm8001_ha;
struct sas_phy *phy;
- u32 device_id = 0;
if (!dev || !dev->lldd_dev)
return -1;
pm8001_dev = dev->lldd_dev;
- device_id = pm8001_dev->device_id;
pm8001_ha = pm8001_find_ha_by_dev(dev);
PM8001_EH_DBG(pm8001_ha,
@@ -1159,7 +1186,6 @@ int pm8001_abort_task(struct sas_task *task)
{
unsigned long flags;
u32 tag;
- u32 device_id;
struct domain_device *dev ;
struct pm8001_hba_info *pm8001_ha;
struct scsi_lun lun;
@@ -1173,7 +1199,6 @@ int pm8001_abort_task(struct sas_task *task)
dev = task->dev;
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
- device_id = pm8001_dev->device_id;
phy_id = pm8001_dev->attached_phy;
rc = pm8001_find_tag(task, &tag);
if (rc == 0) {
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 80b4dd6df0c2..f88b0d33c385 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -58,7 +58,7 @@
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
-#define DRV_VERSION "0.1.38"
+#define DRV_VERSION "0.1.39"
#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
@@ -538,6 +538,7 @@ struct pm8001_hba_info {
u32 logging_level;
u32 fw_status;
u32 smp_exp_mode;
+ bool controller_fatal_error;
const struct firmware *fw_image;
struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
u32 reset_in_progress;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 42f0405601ad..63e4f7d34d6c 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -577,6 +577,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
+ /* Update Fatal error interrupt vector */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+ ((pm8001_ha->number_of_intr - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
@@ -1110,6 +1113,9 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
return -EBUSY;
}
+ /* Initialize the controller fatal error flag */
+ pm8001_ha->controller_fatal_error = false;
+
/* Initialize pci space address eg: mpi offset */
init_pci_device_addresses(pm8001_ha);
init_default_table_values(pm8001_ha);
@@ -1218,13 +1224,17 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
u32 bootloader_state;
u32 ibutton0, ibutton1;
- /* Check if MPI is in ready state to reset */
- if (mpi_uninit_check(pm8001_ha) != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MPI state is not ready\n"));
- return -1;
+ /* Process MPI table uninitialization only if FW is ready */
+ if (!pm8001_ha->controller_fatal_error) {
+ /* Check if MPI is in ready state to reset */
+ if (mpi_uninit_check(pm8001_ha) != 0) {
+ regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "MPI state is not ready scratch1 :0x%x\n",
+ regval));
+ return -1;
+ }
}
-
/* checked for reset register normal state; 0x0 */
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
PM8001_INIT_DBG(pm8001_ha,
@@ -2123,7 +2133,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
sata_resp = &psataPayload->sata_resp[0];
resp = (struct ata_task_resp *)ts->buf;
if (t->ata_task.dma_xfer == 0 &&
- t->data_dir == PCI_DMA_FROMDEVICE) {
+ t->data_dir == DMA_FROM_DEVICE) {
len = sizeof(struct pio_setup_fis);
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("PIO read len = %d\n", len));
@@ -3118,8 +3128,9 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
status, phy_id));
if (status == 0) {
- phy->phy_state = 1;
- if (pm8001_ha->flags == PM8001F_RUN_TIME)
+ phy->phy_state = PHY_LINK_DOWN;
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL)
complete(phy->enable_completion);
}
return 0;
@@ -3211,7 +3222,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
return 0;
}
phy->phy_attached = 0;
- phy->phy_state = 0;
+ phy->phy_state = PHY_LINK_DISABLE;
break;
case HW_EVENT_PORT_INVALID:
PM8001_MSG_DBG(pm8001_ha,
@@ -3384,13 +3395,14 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 status =
le32_to_cpu(pPayload->status);
u32 phyid =
- le32_to_cpu(pPayload->phyid);
+ le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("phy:0x%x status:0x%x\n",
phyid, status));
- if (status == 0)
- phy->phy_state = 0;
+ if (status == PHY_STOP_SUCCESS ||
+ status == PHY_STOP_ERR_DEVICE_ATTACHED)
+ phy->phy_state = PHY_LINK_DISABLE;
return 0;
}
@@ -3752,6 +3764,46 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
}
+static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha)
+{
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_1:0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_2: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_3: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_1: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_2: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_3: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_4: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7)));
+}
+
static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
struct outbound_queue_table *circularQ;
@@ -3759,10 +3811,28 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
u8 uninitialized_var(bc);
u32 ret = MPI_IO_STATUS_FAIL;
unsigned long flags;
+ u32 regval;
+ if (vec == (pm8001_ha->number_of_intr - 1)) {
+ regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
+ SCRATCH_PAD_MIPSALL_READY) {
+ pm8001_ha->controller_fatal_error = true;
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "Firmware Fatal error! Regval:0x%x\n", regval));
+ print_scratchpad_registers(pm8001_ha);
+ return ret;
+ }
+ }
spin_lock_irqsave(&pm8001_ha->lock, flags);
circularQ = &pm8001_ha->outbnd_q_tbl[vec];
do {
+ /* spurious interrupt during setup if kexec-ing and
+ * driver doing a doorbell access w/ the pre-kexec oq
+ * interrupt setup.
+ */
+ if (!circularQ->pi_virt)
+ break;
ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
if (MPI_IO_STATUS_SUCCESS == ret) {
/* process the outbound message */
@@ -3785,12 +3855,12 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
return ret;
}
-/* PCI_DMA_... to our direction translation. */
+/* DMA_... to our direction translation. */
static const u8 data_dir_flags[] = {
- [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
- [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */
- [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
- [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
+ [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+ [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
+ [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
+ [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
};
static void build_smp_cmd(u32 deviceID, __le32 hTag,
@@ -3832,13 +3902,13 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
* DMA-map SMP request, response buffers
*/
sg_req = &task->smp_task.smp_req;
- elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
if (!elem)
return -ENOMEM;
req_len = sg_dma_len(sg_req);
sg_resp = &task->smp_task.smp_resp;
- elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
if (!elem) {
rc = -ENOMEM;
goto err_out;
@@ -3929,10 +3999,10 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
err_out_2:
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
err_out:
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return rc;
}
@@ -4156,7 +4226,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
- if (task->data_dir == PCI_DMA_NONE) {
+ if (task->data_dir == DMA_NONE) {
ATAP = 0x04; /* no data*/
PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
} else if (likely(!task->ata_task.device_control_reg_update)) {
@@ -4606,9 +4676,8 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
u32 length, u8 *buf)
{
- u32 page_code, i;
+ u32 i;
- page_code = SAS_PHY_ANALOG_SETTINGS_PAGE;
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
mpi_set_phy_profile_req(pm8001_ha,
SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 889e69ce3689..84d7426441bf 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -170,6 +170,10 @@
#define LINKRATE_60 (0x04 << 8)
#define LINKRATE_120 (0x08 << 8)
+/*phy_stop*/
+#define PHY_STOP_SUCCESS 0x00
+#define PHY_STOP_ERR_DEVICE_ATTACHED 0x1046
+
/* phy_profile */
#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04
#define PHY_DWORD_LENGTH 0xC
@@ -216,8 +220,6 @@
#define SAS_DOPNRJT_RTRY_TMO 128
#define SAS_COPNRJT_RTRY_TMO 128
-/* for phy state */
-#define PHY_STATE_LINK_UP_SPCV 0x2
/*
Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
@@ -1384,6 +1386,9 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
#define SCRATCH_PAD_IOP0_READY 0xC00
#define SCRATCH_PAD_IOP1_READY 0x3000
+#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \
+ SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_RAAE_READY)
/* boot loader state */
#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 0a5dd5595dd3..d5a4f17fce51 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2855,12 +2855,12 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
qedf->num_queues);
- qedf->p_cpuq = pci_alloc_consistent(qedf->pdev,
+ qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
qedf->num_queues * sizeof(struct qedf_glbl_q_params),
- &qedf->hw_p_cpuq);
+ &qedf->hw_p_cpuq, GFP_KERNEL);
if (!qedf->p_cpuq) {
- QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n");
+ QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
return 1;
}
@@ -2929,7 +2929,7 @@ static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
if (qedf->p_cpuq) {
size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
- pci_free_consistent(qedf->pdev, size, qedf->p_cpuq,
+ dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
qedf->hw_p_cpuq);
}
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cc8e64dc65ad..105b0e4d7818 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -806,11 +806,11 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
memset(&qedi->pf_params.iscsi_pf_params, 0,
sizeof(qedi->pf_params.iscsi_pf_params));
- qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
+ qedi->p_cpuq = dma_alloc_coherent(&qedi->pdev->dev,
qedi->num_queues * sizeof(struct qedi_glbl_q_params),
- &qedi->hw_p_cpuq);
+ &qedi->hw_p_cpuq, GFP_KERNEL);
if (!qedi->p_cpuq) {
- QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
+ QEDI_ERR(&qedi->dbg_ctx, "dma_alloc_coherent fail\n");
rval = -1;
goto err_alloc_mem;
}
@@ -871,7 +871,7 @@ static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
if (qedi->p_cpuq) {
size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
- pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
+ dma_free_coherent(&qedi->pdev->dev, size, qedi->p_cpuq,
qedi->hw_p_cpuq);
}
@@ -2472,6 +2472,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
/* start qedi context */
spin_lock_init(&qedi->hba_lock);
spin_lock_init(&qedi->task_idx_lock);
+ mutex_init(&qedi->stats_lock);
}
qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
qedi_ops->ll2->start(qedi->cdev, &params);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 390775d5c918..15a50cc7e4b3 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1750,7 +1750,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
uint8_t *sp, *tbuf;
dma_addr_t p_tbuf;
- tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
+ tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL);
if (!tbuf)
return -ENOMEM;
#endif
@@ -1841,7 +1841,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
out:
#if DUMP_IT_BACK
- pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
+ dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf);
#endif
return err;
}
@@ -4259,8 +4259,8 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->devnum = devnum; /* specifies microcode load address */
#ifdef QLA_64BIT_PTR
- if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
- if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+ if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
"suitable DMA mask - aborting\n", ha->host_no);
error = -ENODEV;
@@ -4270,7 +4270,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
ha->host_no);
#else
- if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
"suitable DMA mask - aborting\n", ha->host_no);
error = -ENODEV;
@@ -4278,17 +4278,17 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
#endif
- ha->request_ring = pci_alloc_consistent(ha->pdev,
+ ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
- &ha->request_dma);
+ &ha->request_dma, GFP_KERNEL);
if (!ha->request_ring) {
printk(KERN_INFO "qla1280: Failed to get request memory\n");
goto error_put_host;
}
- ha->response_ring = pci_alloc_consistent(ha->pdev,
+ ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
- &ha->response_dma);
+ &ha->response_dma, GFP_KERNEL);
if (!ha->response_ring) {
printk(KERN_INFO "qla1280: Failed to get response memory\n");
goto error_free_request_ring;
@@ -4370,11 +4370,11 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
release_region(host->io_port, 0xff);
#endif
error_free_response_ring:
- pci_free_consistent(ha->pdev,
+ dma_free_coherent(&ha->pdev->dev,
((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
ha->response_ring, ha->response_dma);
error_free_request_ring:
- pci_free_consistent(ha->pdev,
+ dma_free_coherent(&ha->pdev->dev,
((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
ha->request_ring, ha->request_dma);
error_put_host:
@@ -4404,10 +4404,10 @@ qla1280_remove_one(struct pci_dev *pdev)
release_region(host->io_port, 0xff);
#endif
- pci_free_consistent(ha->pdev,
+ dma_free_coherent(&ha->pdev->dev,
((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
ha->request_ring, ha->request_dma);
- pci_free_consistent(ha->pdev,
+ dma_free_coherent(&ha->pdev->dev,
((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
ha->response_ring, ha->response_dma);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4888b999e82f..b28f159fdaee 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -158,9 +158,17 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return 0;
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
+ }
+
if (IS_NOCACHE_VPD_TYPE(ha))
ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
ha->nvram_size);
+ mutex_unlock(&ha->optrom_mutex);
+
return memory_read_from_buffer(buf, count, &off, ha->nvram,
ha->nvram_size);
}
@@ -208,10 +216,17 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
return -EAGAIN;
}
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
+ return -EAGAIN;
+ }
+
/* Write NVRAM. */
ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
- count);
+ count);
+ mutex_unlock(&ha->optrom_mutex);
ql_dbg(ql_dbg_user, vha, 0x7060,
"Setting ISP_ABORT_NEEDED\n");
@@ -322,6 +337,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
size = ha->optrom_size - start;
mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
+ }
switch (val) {
case 0:
if (ha->optrom_state != QLA_SREADING &&
@@ -499,8 +518,14 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
faddr = ha->flt_region_vpd_sec << 2;
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
+ }
ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
ha->vpd_size);
+ mutex_unlock(&ha->optrom_mutex);
}
return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
}
@@ -518,9 +543,6 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
if (unlikely(pci_channel_offline(ha->pdev)))
return 0;
- if (qla2x00_chip_is_down(vha))
- return 0;
-
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
!ha->isp_ops->write_nvram)
return 0;
@@ -531,16 +553,25 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
return -EAGAIN;
}
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
+ }
+
/* Write NVRAM. */
ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
/* Update flash version information for 4Gb & above. */
- if (!IS_FWI2_CAPABLE(ha))
+ if (!IS_FWI2_CAPABLE(ha)) {
+ mutex_unlock(&ha->optrom_mutex);
return -EINVAL;
+ }
tmp_data = vmalloc(256);
if (!tmp_data) {
+ mutex_unlock(&ha->optrom_mutex);
ql_log(ql_log_warn, vha, 0x706b,
"Unable to allocate memory for VPD information update.\n");
return -ENOMEM;
@@ -548,6 +579,8 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
ha->isp_ops->get_flash_version(vha, tmp_data);
vfree(tmp_data);
+ mutex_unlock(&ha->optrom_mutex);
+
return count;
}
@@ -573,10 +606,15 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
return 0;
- if (qla2x00_chip_is_down(vha))
+ mutex_lock(&vha->hw->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
return 0;
+ }
rval = qla2x00_read_sfp_dev(vha, buf, count);
+ mutex_unlock(&vha->hw->optrom_mutex);
+
if (rval)
return -EIO;
@@ -785,9 +823,11 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
if (unlikely(pci_channel_offline(ha->pdev)))
return 0;
-
- if (qla2x00_chip_is_down(vha))
+ mutex_lock(&vha->hw->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
return 0;
+ }
if (ha->xgmac_data)
goto do_read;
@@ -795,6 +835,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
&ha->xgmac_data_dma, GFP_KERNEL);
if (!ha->xgmac_data) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x7076,
"Unable to allocate memory for XGMAC read-data.\n");
return 0;
@@ -806,6 +847,8 @@ do_read:
rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
XGMAC_DATA_SIZE, &actual_size);
+
+ mutex_unlock(&vha->hw->optrom_mutex);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7077,
"Unable to read XGMAC data (%x).\n", rval);
@@ -842,13 +885,16 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
if (ha->dcbx_tlv)
goto do_read;
-
- if (qla2x00_chip_is_down(vha))
+ mutex_lock(&vha->hw->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
return 0;
+ }
ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
&ha->dcbx_tlv_dma, GFP_KERNEL);
if (!ha->dcbx_tlv) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x7078,
"Unable to allocate memory for DCBX TLV read-data.\n");
return -ENOMEM;
@@ -859,6 +905,9 @@ do_read:
rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
DCBX_TLV_DATA_SIZE);
+
+ mutex_unlock(&vha->hw->optrom_mutex);
+
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7079,
"Unable to read DCBX TLV (%x).\n", rval);
@@ -1159,6 +1208,34 @@ qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
+ vha->hw->last_zio_threshold);
+}
+
+static ssize_t
+qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+
+ if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
+ return -EINVAL;
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+ if (val < 0 || val > 256)
+ return -ERANGE;
+
+ atomic_set(&vha->hw->zio_threshold, val);
+ return strlen(buf);
+}
+
+static ssize_t
qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1184,15 +1261,17 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return -EPERM;
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x707a,
"Abort ISP active -- ignoring beacon request.\n");
return -EBUSY;
}
- if (sscanf(buf, "%d", &val) != 1)
- return -EINVAL;
-
if (val)
rval = ha->isp_ops->beacon_on(vha);
else
@@ -1201,6 +1280,8 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
if (rval != QLA_SUCCESS)
count = 0;
+ mutex_unlock(&vha->hw->optrom_mutex);
+
return count;
}
@@ -1370,18 +1451,24 @@ qla2x00_thermal_temp_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
uint16_t temp = 0;
+ int rc;
+ mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
goto done;
}
if (vha->hw->flags.eeh_busy) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
goto done;
}
- if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+ rc = qla2x00_get_thermal_temp(vha, &temp);
+ mutex_unlock(&vha->hw->optrom_mutex);
+ if (rc == QLA_SUCCESS)
return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
done:
@@ -1402,13 +1489,24 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
}
- if (qla2x00_chip_is_down(vha))
+ mutex_lock(&vha->hw->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x707c,
"ISP reset active.\n");
- else if (!vha->hw->flags.eeh_busy)
- rval = qla2x00_get_firmware_state(vha, state);
- if (rval != QLA_SUCCESS)
+ goto out;
+ } else if (vha->hw->flags.eeh_busy) {
+ mutex_unlock(&vha->hw->optrom_mutex);
+ goto out;
+ }
+
+ rval = qla2x00_get_firmware_state(vha, state);
+ mutex_unlock(&vha->hw->optrom_mutex);
+out:
+ if (rval != QLA_SUCCESS) {
memset(state, -1, sizeof(state));
+ rval = qla2x00_get_firmware_state(vha, state);
+ }
return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
state[0], state[1], state[2], state[3], state[4], state[5]);
@@ -1534,6 +1632,433 @@ qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
ha->max_speed_sup ? "32Gps" : "16Gps");
}
+/* ----- */
+
+static ssize_t
+qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int len = 0;
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "Supported options: enabled | disabled | dual | exclusive\n");
+
+ /* --- */
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
+
+ switch (vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ QLA2XXX_INI_MODE_STR_EXCLUSIVE);
+ break;
+ case QLA2XXX_INI_MODE_DISABLED:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ QLA2XXX_INI_MODE_STR_DISABLED);
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ QLA2XXX_INI_MODE_STR_ENABLED);
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ QLA2XXX_INI_MODE_STR_DUAL);
+ break;
+ }
+ len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
+
+ return len;
+}
+
+static char *mode_to_str[] = {
+ "exclusive",
+ "disabled",
+ "enabled",
+ "dual",
+};
+
+#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
+static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
+{
+ int rc = 0;
+ enum {
+ NO_ACTION,
+ MODE_CHANGE_ACCEPT,
+ MODE_CHANGE_NO_ACTION,
+ TARGET_STILL_ACTIVE,
+ };
+ int action = NO_ACTION;
+ int set_mode = 0;
+ u8 eo_toggle = 0; /* exchange offload flipped */
+
+ switch (vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ switch (op) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (qla_tgt_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xexchoffld !=
+ vha->u_ql2xexchoffld) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+ eo_toggle) {
+ /*
+ * The number of exchange to be offload
+ * was tweaked or offload option was
+ * flipped
+ */
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ break;
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ if (qla_tgt_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xexchoffld !=
+ vha->u_ql2xexchoffld) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+ eo_toggle) {
+ /*
+ * The number of exchange to be offload
+ * was tweaked or offload option was
+ * flipped
+ */
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ } else {
+ action = MODE_CHANGE_ACCEPT;
+ }
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ action = MODE_CHANGE_ACCEPT;
+ /* active_mode is target only, reset it to dual */
+ if (qla_tgt_mode_enabled(vha)) {
+ set_mode = 1;
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_ENABLED:
+ if (qla_tgt_mode_enabled(vha))
+ action = TARGET_STILL_ACTIVE;
+ else {
+ action = MODE_CHANGE_ACCEPT;
+ set_mode = 1;
+ }
+ break;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ switch (op) {
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ if (qla_tgt_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xexchoffld !=
+ vha->u_ql2xexchoffld) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+ eo_toggle)
+ /*
+ * The number of exchange to be offload
+ * was tweaked or offload option was
+ * flipped
+ */
+ action = MODE_CHANGE_ACCEPT;
+ else
+ action = NO_ACTION;
+ } else
+ action = NO_ACTION;
+
+ break;
+
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (qla_tgt_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xexchoffld !=
+ vha->u_ql2xexchoffld) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+ eo_toggle)
+ action = MODE_CHANGE_ACCEPT;
+ else
+ action = MODE_CHANGE_NO_ACTION;
+ } else
+ action = MODE_CHANGE_NO_ACTION;
+ break;
+
+ case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
+ if (qla_tgt_mode_enabled(vha)) {
+ action = MODE_CHANGE_ACCEPT;
+ set_mode = 1;
+ } else
+ action = MODE_CHANGE_ACCEPT;
+ break;
+
+ case QLA2XXX_INI_MODE_ENABLED:
+ if (qla_tgt_mode_enabled(vha))
+ action = TARGET_STILL_ACTIVE;
+ else {
+ if (vha->hw->flags.fw_started)
+ action = MODE_CHANGE_NO_ACTION;
+ else
+ action = MODE_CHANGE_ACCEPT;
+ }
+ break;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_ENABLED:
+ switch (op) {
+ case QLA2XXX_INI_MODE_ENABLED:
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
+ eo_toggle)
+ action = MODE_CHANGE_ACCEPT;
+ else
+ action = NO_ACTION;
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ case QLA2XXX_INI_MODE_DISABLED:
+ action = MODE_CHANGE_ACCEPT;
+ break;
+ default:
+ action = MODE_CHANGE_NO_ACTION;
+ break;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_DUAL:
+ switch (op) {
+ case QLA2XXX_INI_MODE_DUAL:
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
+ vha->u_ql2xiniexchg) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+
+ if ((((vha->ql2xexchoffld +
+ vha->ql2xiniexchg) !=
+ (vha->u_ql2xiniexchg +
+ vha->u_ql2xexchoffld)) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
+ vha->u_ql2xexchoffld)) || eo_toggle)
+ action = MODE_CHANGE_ACCEPT;
+ else
+ action = NO_ACTION;
+ } else {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
+ vha->u_ql2xiniexchg) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+
+ if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
+ != (vha->u_ql2xiniexchg +
+ vha->u_ql2xexchoffld)) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
+ vha->u_ql2xexchoffld)) || eo_toggle)
+ action = MODE_CHANGE_NO_ACTION;
+ else
+ action = NO_ACTION;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ /* turning off initiator mode */
+ set_mode = 1;
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ set_mode = 1;
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_ACCEPT;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_ENABLED:
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ action = TARGET_STILL_ACTIVE;
+ } else {
+ action = MODE_CHANGE_ACCEPT;
+ }
+ }
+ break;
+ }
+
+ switch (action) {
+ case MODE_CHANGE_ACCEPT:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
+ mode_to_str[vha->qlini_mode], mode_to_str[op],
+ vha->ql2xexchoffld, vha->u_ql2xexchoffld,
+ vha->ql2xiniexchg, vha->u_ql2xiniexchg);
+
+ vha->qlini_mode = op;
+ vha->ql2xexchoffld = vha->u_ql2xexchoffld;
+ vha->ql2xiniexchg = vha->u_ql2xiniexchg;
+ if (set_mode)
+ qlt_set_mode(vha);
+ vha->flags.online = 1;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+
+ case MODE_CHANGE_NO_ACTION:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
+ mode_to_str[vha->qlini_mode], mode_to_str[op],
+ vha->ql2xexchoffld, vha->u_ql2xexchoffld,
+ vha->ql2xiniexchg, vha->u_ql2xiniexchg);
+ vha->qlini_mode = op;
+ vha->ql2xexchoffld = vha->u_ql2xexchoffld;
+ vha->ql2xiniexchg = vha->u_ql2xiniexchg;
+ break;
+
+ case TARGET_STILL_ACTIVE:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Target Mode is active. Unable to change Mode.\n");
+ break;
+
+ case NO_ACTION:
+ default:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
+ vha->qlini_mode, op,
+ vha->ql2xexchoffld, vha->u_ql2xexchoffld);
+ break;
+ }
+
+ return rc;
+}
+
+static ssize_t
+qlini_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int ini;
+
+ if (!buf)
+ return -EINVAL;
+
+ if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
+ strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
+ ini = QLA2XXX_INI_MODE_EXCLUSIVE;
+ else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
+ strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
+ ini = QLA2XXX_INI_MODE_DISABLED;
+ else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
+ strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
+ ini = QLA2XXX_INI_MODE_ENABLED;
+ else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
+ strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
+ ini = QLA2XXX_INI_MODE_DUAL;
+ else
+ return -EINVAL;
+
+ qla_set_ini_mode(vha, ini);
+ return strlen(buf);
+}
+
+static ssize_t
+ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int len = 0;
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "target exchange: new %d : current: %d\n\n",
+ vha->u_ql2xexchoffld, vha->ql2xexchoffld);
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
+ vha->host_no);
+
+ return len;
+}
+
+static ssize_t
+ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (val > FW_MAX_EXCHANGES_CNT)
+ val = FW_MAX_EXCHANGES_CNT;
+ else if (val < 0)
+ val = 0;
+
+ vha->u_ql2xexchoffld = val;
+ return strlen(buf);
+}
+
+static ssize_t
+ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int len = 0;
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "target exchange: new %d : current: %d\n\n",
+ vha->u_ql2xiniexchg, vha->ql2xiniexchg);
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
+ vha->host_no);
+
+ return len;
+}
+
+static ssize_t
+ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (val > FW_MAX_EXCHANGES_CNT)
+ val = FW_MAX_EXCHANGES_CNT;
+ else if (val < 0)
+ val = 0;
+
+ vha->u_ql2xiniexchg = val;
+ return strlen(buf);
+}
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1581,6 +2106,13 @@ static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL);
static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL);
+static DEVICE_ATTR(zio_threshold, 0644,
+ qla_zio_threshold_show,
+ qla_zio_threshold_store);
+static DEVICE_ATTR_RW(qlini_mode);
+static DEVICE_ATTR_RW(ql2xexchoffld);
+static DEVICE_ATTR_RW(ql2xiniexchg);
+
struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version,
@@ -1617,9 +2149,28 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_pep_version,
&dev_attr_min_link_speed,
&dev_attr_max_speed_sup,
+ &dev_attr_zio_threshold,
+ NULL, /* reserve for qlini_mode */
+ NULL, /* reserve for ql2xiniexchg */
+ NULL, /* reserve for ql2xexchoffld */
NULL,
};
+void qla_insert_tgt_attrs(void)
+{
+ struct device_attribute **attr;
+
+ /* advance to empty slot */
+ for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
+ continue;
+
+ *attr = &dev_attr_qlini_mode;
+ attr++;
+ *attr = &dev_attr_ql2xiniexchg;
+ attr++;
+ *attr = &dev_attr_ql2xexchoffld;
+}
+
/* Host attributes. */
static void
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index c11a89be292c..4a9fd8d944d6 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2487,7 +2487,7 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
vha = shost_priv(host);
}
- if (qla2x00_reset_active(vha)) {
+ if (qla2x00_chip_is_down(vha)) {
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
bsg_request->msgcode);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a9dc9c4a6382..26b93c563f92 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -262,8 +262,8 @@ struct name_list_extended {
struct get_name_list_extended *l;
dma_addr_t ldma;
struct list_head fcports;
- spinlock_t fcports_lock;
u32 size;
+ u8 sent;
};
/*
* Timeout timer counts in seconds
@@ -519,6 +519,7 @@ struct srb_iocb {
enum {
TYPE_SRB,
TYPE_TGT_CMD,
+ TYPE_TGT_TMCMD, /* task management */
};
typedef struct srb {
@@ -2280,7 +2281,6 @@ struct ct_sns_desc {
enum discovery_state {
DSC_DELETED,
DSC_GNN_ID,
- DSC_GID_PN,
DSC_GNL,
DSC_LOGIN_PEND,
DSC_LOGIN_FAILED,
@@ -2305,7 +2305,6 @@ enum login_state { /* FW control Target side */
enum fcport_mgt_event {
FCME_RELOGIN = 1,
FCME_RSCN,
- FCME_GIDPN_DONE,
FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */
FCME_PRLI_DONE,
FCME_GNL_DONE,
@@ -2351,7 +2350,7 @@ typedef struct fc_port {
unsigned int login_succ:1;
unsigned int query:1;
unsigned int id_changed:1;
- unsigned int rscn_rcvd:1;
+ unsigned int scan_needed:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
@@ -2375,11 +2374,13 @@ typedef struct fc_port {
unsigned long expires;
struct list_head del_list_entry;
struct work_struct free_work;
-
+ struct work_struct reg_work;
+ uint64_t jiffies_at_registration;
struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
uint16_t tgt_id;
uint16_t old_tgt_id;
+ uint16_t sec_since_registration;
uint8_t fcp_prio;
@@ -2412,6 +2413,7 @@ typedef struct fc_port {
struct qla_tgt_sess *tgt_session;
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
+ enum discovery_state next_disc_state;
enum login_state fw_login_state;
unsigned long dm_login_expire;
unsigned long plogi_nack_done_deadline;
@@ -3212,17 +3214,14 @@ enum qla_work_type {
QLA_EVT_ASYNC_LOGOUT,
QLA_EVT_ASYNC_LOGOUT_DONE,
QLA_EVT_ASYNC_ADISC,
- QLA_EVT_ASYNC_ADISC_DONE,
QLA_EVT_UEVENT,
QLA_EVT_AENFX,
- QLA_EVT_GIDPN,
QLA_EVT_GPNID,
QLA_EVT_UNMAP,
QLA_EVT_NEW_SESS,
QLA_EVT_GPDB,
QLA_EVT_PRLI,
QLA_EVT_GPSC,
- QLA_EVT_UPD_FCPORT,
QLA_EVT_GNL,
QLA_EVT_NACK,
QLA_EVT_RELOGIN,
@@ -3483,6 +3482,9 @@ struct qla_qpair {
struct list_head qp_list_elem; /* vha->qp_list */
struct list_head hints_list;
uint16_t cpuid;
+ uint16_t retry_term_cnt;
+ uint32_t retry_term_exchg_addr;
+ uint64_t retry_term_jiff;
struct qla_tgt_counters tgt_counters;
};
@@ -4184,6 +4186,10 @@ struct qla_hw_data {
atomic_t nvme_active_aen_cnt;
uint16_t nvme_last_rptd_aen; /* Last recorded aen count */
+
+ atomic_t zio_threshold;
+ uint16_t last_zio_threshold;
+#define DEFAULT_ZIO_THRESHOLD 64
};
#define FW_ABILITY_MAX_SPEED_MASK 0xFUL
@@ -4263,10 +4269,11 @@ typedef struct scsi_qla_host {
#define FX00_CRITEMP_RECOVERY 25
#define FX00_HOST_INFO_RESEND 26
#define QPAIR_ONLINE_CHECK_NEEDED 27
-#define SET_ZIO_THRESHOLD_NEEDED 28
+#define SET_NVME_ZIO_THRESHOLD_NEEDED 28
#define DETECT_SFP_CHANGE 29
#define N2N_LOGIN_NEEDED 30
#define IOCB_WORK_ACTIVE 31
+#define SET_ZIO_THRESHOLD_NEEDED 32
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4369,6 +4376,13 @@ typedef struct scsi_qla_host {
atomic_t vref_count;
struct qla8044_reset_template reset_tmplt;
uint16_t bbcr;
+
+ uint16_t u_ql2xexchoffld;
+ uint16_t u_ql2xiniexchg;
+ uint16_t qlini_mode;
+ uint16_t ql2xexchoffld;
+ uint16_t ql2xiniexchg;
+
struct name_list_extended gnl;
/* Count of active session/fcport */
int fcport_count;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 178974896b5c..3673fcdb033a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -54,7 +54,7 @@ extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
extern void qla2x00_quiesce_io(scsi_qla_host_t *);
extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
-
+void qla_register_fcport_fn(struct work_struct *);
extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
@@ -73,8 +73,6 @@ extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
- uint16_t *);
struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
@@ -109,6 +107,7 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
int qla24xx_detect_sfp(scsi_qla_host_t *vha);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+
void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
@@ -118,6 +117,8 @@ extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
+void qla_rscn_replay(fc_port_t *fcport);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -158,6 +159,7 @@ extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
+extern int ql2xexlogins;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -208,7 +210,7 @@ extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
extern void qla2x00_sp_compl(void *, int);
extern void qla2xxx_qpair_sp_free_dma(void *);
extern void qla2xxx_qpair_sp_compl(void *, int);
-extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
+extern void qla24xx_sched_upd_fcport(fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
@@ -644,9 +646,6 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
struct ct_sns_rsp *, const char *);
extern void qla2x00_async_iocb_timeout(void *data);
-extern int qla24xx_async_gidpn(scsi_qla_host_t *, fc_port_t *);
-int qla24xx_post_gidpn_work(struct scsi_qla_host *, fc_port_t *);
-void qla24xx_handle_gidpn_event(scsi_qla_host_t *, struct event_arg *);
extern void qla2x00_free_fcport(fc_port_t *);
@@ -677,6 +676,7 @@ void qla_scan_work_fn(struct work_struct *);
*/
struct device_attribute;
extern struct device_attribute *qla2x00_host_attrs[];
+extern struct device_attribute *qla2x00_host_attrs_dm[];
struct fc_function_template;
extern struct fc_function_template qla2xxx_transport_functions;
extern struct fc_function_template qla2xxx_transport_vport_functions;
@@ -690,7 +690,7 @@ extern int qla2x00_echo_test(scsi_qla_host_t *,
extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
struct qla_fcp_prio_cfg *, uint8_t);
-
+void qla_insert_tgt_attrs(void);
/*
* Global Function Prototypes in qla_dfs.c source file.
*/
@@ -897,5 +897,6 @@ void qlt_unknown_atio_work_fn(struct work_struct *);
void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
void qlt_remove_target_resources(struct qla_hw_data *);
void qlt_clr_qp_table(struct scsi_qla_host *vha);
+void qlt_set_mode(struct scsi_qla_host *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index a0038d879b9d..90cfa394f942 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2973,237 +2973,6 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
}
}
-/* GID_PN completion processing. */
-void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- ql_dbg(ql_dbg_disc, vha, 0x201d,
- "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
- __func__, fcport->port_name, fcport->disc_state,
- fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
- fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
-
- if (fcport->disc_state == DSC_DELETE_PEND)
- return;
-
- if (ea->sp->gen2 != fcport->login_gen) {
- /* PLOGI/PRLI/LOGO came in while cmd was out.*/
- ql_dbg(ql_dbg_disc, vha, 0x201e,
- "%s %8phC generation changed rscn %d|%d n",
- __func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen);
- return;
- }
-
- if (!ea->rc) {
- if (ea->sp->gen1 == fcport->rscn_gen) {
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->flags |= FCF_FABRIC_DEVICE;
-
- if (fcport->d_id.b24 == ea->id.b24) {
- /* cable plugged into the same place */
- switch (vha->host->active_mode) {
- case MODE_TARGET:
- if (fcport->fw_login_state ==
- DSC_LS_PRLI_COMP) {
- u16 data[2];
- /*
- * Late RSCN was delivered.
- * Remote port already login'ed.
- */
- ql_dbg(ql_dbg_disc, vha, 0x201f,
- "%s %d %8phC post adisc\n",
- __func__, __LINE__,
- fcport->port_name);
- data[0] = data[1] = 0;
- qla2x00_post_async_adisc_work(
- vha, fcport, data);
- }
- break;
- case MODE_INITIATOR:
- case MODE_DUAL:
- default:
- ql_dbg(ql_dbg_disc, vha, 0x201f,
- "%s %d %8phC post %s\n", __func__,
- __LINE__, fcport->port_name,
- (atomic_read(&fcport->state) ==
- FCS_ONLINE) ? "adisc" : "gnl");
-
- if (atomic_read(&fcport->state) ==
- FCS_ONLINE) {
- u16 data[2];
-
- data[0] = data[1] = 0;
- qla2x00_post_async_adisc_work(
- vha, fcport, data);
- } else {
- qla24xx_post_gnl_work(vha,
- fcport);
- }
- break;
- }
- } else { /* fcport->d_id.b24 != ea->id.b24 */
- fcport->d_id.b24 = ea->id.b24;
- fcport->id_changed = 1;
- if (fcport->deleted != QLA_SESS_DELETED) {
- ql_dbg(ql_dbg_disc, vha, 0x2021,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport);
- }
- }
- } else { /* ea->sp->gen1 != fcport->rscn_gen */
- ql_dbg(ql_dbg_disc, vha, 0x2022,
- "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- /* rscn came in while cmd was out */
- qla24xx_post_gidpn_work(vha, fcport);
- }
- } else { /* ea->rc */
- /* cable pulled */
- if (ea->sp->gen1 == fcport->rscn_gen) {
- if (ea->sp->gen2 == fcport->login_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x2042,
- "%s %d %8phC post del sess\n", __func__,
- __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2045,
- "%s %d %8phC login\n", __func__, __LINE__,
- fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
- }
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2049,
- "%s %d %8phC post gidpn\n", __func__, __LINE__,
- fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
- }
- }
-} /* gidpn_event */
-
-static void qla2x00_async_gidpn_sp_done(void *s, int res)
-{
- struct srb *sp = s;
- struct scsi_qla_host *vha = sp->vha;
- fc_port_t *fcport = sp->fcport;
- u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
- struct event_arg ea;
-
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-
- memset(&ea, 0, sizeof(ea));
- ea.fcport = fcport;
- ea.id.b.domain = id[0];
- ea.id.b.area = id[1];
- ea.id.b.al_pa = id[2];
- ea.sp = sp;
- ea.rc = res;
- ea.event = FCME_GIDPN_DONE;
-
- if (res == QLA_FUNCTION_TIMEOUT) {
- ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
- "Async done-%s WWPN %8phC timed out.\n",
- sp->name, fcport->port_name);
- qla24xx_post_gidpn_work(sp->vha, fcport);
- sp->free(sp);
- return;
- } else if (res) {
- ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
- "Async done-%s fail res %x, WWPN %8phC\n",
- sp->name, res, fcport->port_name);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x204f,
- "Async done-%s good WWPN %8phC ID %3phC\n",
- sp->name, fcport->port_name, id);
- }
-
- qla2x00_fcport_event_handler(vha, &ea);
-
- sp->free(sp);
-}
-
-int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
-{
- int rval = QLA_FUNCTION_FAILED;
- struct ct_sns_req *ct_req;
- srb_t *sp;
-
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
- return rval;
-
- fcport->disc_state = DSC_GID_PN;
- fcport->scan_state = QLA_FCPORT_SCAN;
- sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
- if (!sp)
- goto done;
-
- fcport->flags |= FCF_ASYNC_SENT;
- sp->type = SRB_CT_PTHRU_CMD;
- sp->name = "gidpn";
- sp->gen1 = fcport->rscn_gen;
- sp->gen2 = fcport->login_gen;
-
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- /* CT_IU preamble */
- ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
- GID_PN_RSP_SIZE);
-
- /* GIDPN req */
- memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
- WWN_SIZE);
-
- /* req & rsp use the same buffer */
- sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
- sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
- sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_gidpn_sp_done;
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a4,
- "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
- sp->name, fcport->port_name,
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
- return rval;
-
-done_free_sp:
- sp->free(sp);
-done:
- fcport->flags &= ~FCF_ASYNC_ACTIVE;
- return rval;
-}
-
-int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
-{
- struct qla_work_evt *e;
- int ls;
-
- ls = atomic_read(&vha->loop_state);
- if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
- test_bit(UNLOADING, &vha->dpc_flags))
- return 0;
-
- e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
- if (!e)
- return QLA_FUNCTION_FAILED;
-
- e->u.fcport.fcport = fcport;
- fcport->flags |= FCF_ASYNC_ACTIVE;
- return qla2x00_post_work(vha, e);
-}
-
int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_work_evt *e;
@@ -3237,9 +3006,6 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
__func__, fcport->port_name);
return;
} else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
return;
}
@@ -3261,6 +3027,9 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC \n",
sp->name, res, fcport->port_name);
+ if (res == QLA_FUNCTION_TIMEOUT)
+ return;
+
if (res == (DID_ERROR << 16)) {
/* entry status error */
goto done;
@@ -3272,7 +3041,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
ql_dbg(ql_dbg_disc, vha, 0x2019,
"GPSC command unsupported, disabling query.\n");
ha->flags.gpsc_supported = 0;
- res = QLA_SUCCESS;
+ goto done;
}
} else {
switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
@@ -3305,7 +3074,6 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
be16_to_cpu(ct_rsp->rsp.gpsc.speed));
}
-done:
memset(&ea, 0, sizeof(ea));
ea.event = FCME_GPSC_DONE;
ea.rc = res;
@@ -3313,6 +3081,7 @@ done:
ea.sp = sp;
qla2x00_fcport_event_handler(vha, &ea);
+done:
sp->free(sp);
}
@@ -3355,15 +3124,15 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla24xx_async_gpsc_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_disc, vha, 0x205e,
"Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
sp->name, fcport->port_name, sp->handle,
fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
return rval;
done_free_sp:
@@ -3442,26 +3211,10 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (ea->rc) {
/* cable is disconnected */
list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
- if (fcport->d_id.b24 == ea->id.b24) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC DS %d\n",
- __func__, __LINE__,
- fcport->port_name,
- fcport->disc_state);
+ if (fcport->d_id.b24 == ea->id.b24)
fcport->scan_state = QLA_FCPORT_SCAN;
- switch (fcport->disc_state) {
- case DSC_DELETED:
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport);
- break;
- }
- }
+
+ qlt_schedule_sess_for_deletion(fcport);
}
} else {
/* cable is connected */
@@ -3470,34 +3223,19 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
list) {
if ((conflict->d_id.b24 == ea->id.b24) &&
- (fcport != conflict)) {
- /* 2 fcports with conflict Nport ID or
+ (fcport != conflict))
+ /*
+ * 2 fcports with conflict Nport ID or
* an existing fcport is having nport ID
* conflict with new fcport.
*/
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC DS %d\n",
- __func__, __LINE__,
- conflict->port_name,
- conflict->disc_state);
conflict->scan_state = QLA_FCPORT_SCAN;
- switch (conflict->disc_state) {
- case DSC_DELETED:
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict->port_name);
- qlt_schedule_sess_for_deletion
- (conflict);
- break;
- }
- }
+
+ qlt_schedule_sess_for_deletion(conflict);
}
+ fcport->scan_needed = 0;
fcport->rscn_gen++;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
@@ -3548,19 +3286,7 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
conflict->disc_state);
conflict->scan_state = QLA_FCPORT_SCAN;
- switch (conflict->disc_state) {
- case DSC_DELETED:
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict->port_name);
- qlt_schedule_sess_for_deletion
- (conflict);
- break;
- }
+ qlt_schedule_sess_for_deletion(conflict);
}
}
@@ -3724,13 +3450,14 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_gpnid_sp_done;
+ ql_dbg(ql_dbg_disc, vha, 0x2067,
+ "Async-%s hdl=%x ID %3phC.\n", sp->name,
+ sp->handle, ct_req->req.port_id.port_id);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0x2067,
- "Async-%s hdl=%x ID %3phC.\n", sp->name,
- sp->handle, ct_req->req.port_id.port_id);
return rval;
done_free_sp:
@@ -3896,9 +3623,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
fc_port_t *fcport;
u32 i, rc;
bool found;
- struct fab_scan_rp *rp;
+ struct fab_scan_rp *rp, *trp;
unsigned long flags;
u8 recheck = 0;
+ u16 dup = 0, dup_cnt = 0;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s enter\n", __func__);
@@ -3929,6 +3657,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
for (i = 0; i < vha->hw->max_fibre_devices; i++) {
u64 wwn;
+ int k;
rp = &vha->scan.l[i];
found = false;
@@ -3937,6 +3666,20 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
if (wwn == 0)
continue;
+ /* Remove duplicate NPORT ID entries from switch data base */
+ for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
+ trp = &vha->scan.l[k];
+ if (rp->id.b24 == trp->id.b24) {
+ dup = 1;
+ dup_cnt++;
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+ vha, 0xffff,
+ "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
+ rp->id.b24, rp->port_name, trp->port_name);
+ memset(trp, 0, sizeof(*trp));
+ }
+ }
+
if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
continue;
@@ -3951,7 +3694,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
continue;
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
fcport->scan_state = QLA_FCPORT_FOUND;
found = true;
/*
@@ -3976,25 +3719,30 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
}
}
+ if (dup) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Detected %d duplicate NPORT ID(s) from switch data base\n",
+ dup_cnt);
+ }
+
/*
* Logout all previous fabric dev marked lost, except FCP2 devices.
*/
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
continue;
}
if (fcport->scan_state != QLA_FCPORT_FOUND) {
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
if ((qla_dual_mode_enabled(vha) ||
qla_ini_mode_enabled(vha)) &&
atomic_read(&fcport->state) == FCS_ONLINE) {
- qla2x00_mark_device_lost(vha, fcport,
- ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID) {
+ if (fcport->flags & FCF_FCP2_DEVICE)
+ fcport->logout_on_delete = 0;
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- (fcport->flags & FCF_FCP2_DEVICE) == 0) {
ql_dbg(ql_dbg_disc, vha, 0x20f0,
"%s %d %8phC post del sess\n",
__func__, __LINE__,
@@ -4005,7 +3753,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
}
}
} else {
- if (fcport->rscn_rcvd ||
+ if (fcport->scan_needed ||
fcport->disc_state != DSC_LOGIN_COMPLETE) {
if (fcport->login_retry == 0) {
fcport->login_retry =
@@ -4015,7 +3763,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
fcport->port_name, fcport->loop_id,
fcport->login_retry);
}
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
qla24xx_fcport_handle_login(vha, fcport);
}
}
@@ -4030,7 +3778,7 @@ out:
if (recheck) {
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->rscn_rcvd) {
+ if (fcport->scan_needed) {
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
break;
@@ -4039,6 +3787,41 @@ out:
}
}
+static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
+ srb_t *sp, int cmd)
+{
+ struct qla_work_evt *e;
+
+ if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
+ return QLA_PARAMETER_ERROR;
+
+ e = qla2x00_alloc_work(vha, cmd);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.iosb.sp = sp;
+
+ return qla2x00_post_work(vha, e);
+}
+
+static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
+ srb_t *sp, int cmd)
+{
+ struct qla_work_evt *e;
+
+ if (cmd != QLA_EVT_GPNFT)
+ return QLA_PARAMETER_ERROR;
+
+ e = qla2x00_alloc_work(vha, cmd);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.gpnft.fc4_type = FC4_TYPE_NVME;
+ e->u.gpnft.sp = sp;
+
+ return qla2x00_post_work(vha, e);
+}
+
static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
struct srb *sp)
{
@@ -4139,120 +3922,85 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
{
struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
- struct qla_work_evt *e;
struct ct_sns_req *ct_req =
(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
u16 cmd = be16_to_cpu(ct_req->command);
u8 fc4_type = sp->gen2;
unsigned long flags;
+ int rc;
/* gen2 field is holding the fc4type */
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async done-%s res %x FC4Type %x\n",
sp->name, res, sp->gen2);
+ del_timer(&sp->u.iocb_cmd.timer);
+ sp->rc = res;
if (res) {
unsigned long flags;
+ const char *name = sp->name;
- sp->free(sp);
- spin_lock_irqsave(&vha->work_lock, flags);
- vha->scan.scan_flags &= ~SF_SCANNING;
- vha->scan.scan_retry++;
- spin_unlock_irqrestore(&vha->work_lock, flags);
+ /*
+ * We are in an Interrupt context, queue up this
+ * sp for GNNFT_DONE work. This will allow all
+ * the resource to get freed up.
+ */
+ rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+ QLA_EVT_GNNFT_DONE);
+ if (rc) {
+ /* Cleanup here to prevent memory leak */
+ qla24xx_sp_unmap(vha, sp);
- if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- } else {
- ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
- "Async done-%s rescan failed on all retries\n",
- sp->name);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s rescan failed on all retries.\n",
+ name);
+ }
}
return;
}
- if (!res)
- qla2x00_find_free_fcp_nvme_slot(vha, sp);
+ qla2x00_find_free_fcp_nvme_slot(vha, sp);
if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
cmd == GNN_FT_CMD) {
- del_timer(&sp->u.iocb_cmd.timer);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT);
- if (!e) {
- /*
- * please ignore kernel warning. Otherwise,
- * we have mem leak.
- */
- if (sp->u.iocb_cmd.u.ctarg.req) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.req_allocated_size,
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
- }
- if (sp->u.iocb_cmd.u.ctarg.rsp) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
- }
-
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async done-%s unable to alloc work element\n",
- sp->name);
- sp->free(sp);
+ sp->rc = res;
+ rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
+ if (rc) {
+ qla24xx_sp_unmap(vha, sp);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- return;
}
- e->u.gpnft.fc4_type = FC4_TYPE_NVME;
- sp->rc = res;
- e->u.gpnft.sp = sp;
-
- qla2x00_post_work(vha, e);
return;
}
- if (cmd == GPN_FT_CMD)
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
- else
- e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
- if (!e) {
- /* please ignore kernel warning. Otherwise, we have mem leak. */
- if (sp->u.iocb_cmd.u.ctarg.req) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.req_allocated_size,
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
- }
- if (sp->u.iocb_cmd.u.ctarg.rsp) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
- }
+ if (cmd == GPN_FT_CMD) {
+ rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+ QLA_EVT_GPNFT_DONE);
+ } else {
+ rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+ QLA_EVT_GNNFT_DONE);
+ }
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async done-%s unable to alloc work element\n",
- sp->name);
- sp->free(sp);
+ if (rc) {
+ qla24xx_sp_unmap(vha, sp);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
return;
}
-
- sp->rc = res;
- e->u.iosb.sp = sp;
-
- qla2x00_post_work(vha, e);
}
/*
@@ -4285,11 +4033,13 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
WARN_ON(1);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
goto done_free_sp;
}
ql_dbg(ql_dbg_disc, vha, 0xfffff,
- "%s: FC4Type %x, CT-PASSTRHU %s command ctarg rsp size %d, ctarg req size %d\n",
+ "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
__func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
sp->u.iocb_cmd.u.ctarg.req_size);
@@ -4318,8 +4068,12 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->done = qla2x00_async_gpnft_gnnft_sp_done;
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
+ if (rval != QLA_SUCCESS) {
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
goto done_free_sp;
+ }
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s hdl=%x FC4Type %x.\n", sp->name,
@@ -4351,7 +4105,6 @@ void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
{
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s enter\n", __func__);
- del_timer(&sp->u.iocb_cmd.timer);
qla24xx_async_gnnft(vha, sp, sp->gen2);
}
@@ -4444,9 +4197,9 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
- rspsz = sizeof(struct ct_sns_gpnft_rsp) +
- ((vha->hw->max_fibre_devices - 1) *
- sizeof(struct ct_sns_gpn_ft_data));
+ rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
+ memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
+ memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* CT_IU preamble */
@@ -4644,9 +4397,6 @@ void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
__func__, fcport->port_name);
return;
} else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
return;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b934977c5c26..c72d8012fe2a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -52,12 +52,14 @@ qla2x00_sp_timeout(struct timer_list *t)
struct srb_iocb *iocb;
struct req_que *req;
unsigned long flags;
+ struct qla_hw_data *ha = sp->vha->hw;
- spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ WARN_ON_ONCE(irqs_disabled());
+ spin_lock_irqsave(&ha->hardware_lock, flags);
req = sp->qpair->req;
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
- spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
iocb->timeout(sp);
}
@@ -245,6 +247,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
}
+ ql_dbg(ql_dbg_disc, vha, 0x2072,
+ "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
+ "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->login_retry);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
fcport->flags |= FCF_LOGIN_NEEDED;
@@ -252,11 +260,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
}
- ql_dbg(ql_dbg_disc, vha, 0x2072,
- "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
- "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
- fcport->login_retry);
return rval;
done_free_sp:
@@ -301,15 +304,16 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_logout_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x2070,
"Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
fcport->port_name);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
return rval;
done_free_sp:
@@ -396,6 +400,9 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2066,
"%s %8phC: adisc fail: post delete\n",
__func__, ea->fcport->port_name);
+ /* deleted = 0 & logout_on_delete = force fw cleanup */
+ fcport->deleted = 0;
+ fcport->logout_on_delete = 1;
qlt_schedule_sess_for_deletion(ea->fcport);
return;
}
@@ -410,9 +417,8 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
__func__, ea->fcport->port_name);
return;
} else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, ea->fcport->port_name);
- qla24xx_post_gidpn_work(vha, ea->fcport);
+ qla_rscn_replay(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
return;
}
@@ -487,13 +493,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
sp->done = qla2x00_async_adisc_sp_done;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x206f,
"Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
return rval;
done_free_sp:
@@ -536,11 +544,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
}
if (fcport->last_rscn_gen != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20df,
- "%s %8phC rscn gen changed rscn %d|%d \n",
- __func__, fcport->port_name,
- fcport->last_rscn_gen, fcport->rscn_gen);
- qla24xx_post_gidpn_work(vha, fcport);
+ qla_rscn_replay(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
return;
} else if (fcport->last_login_gen != fcport->login_gen) {
ql_dbg(ql_dbg_disc, vha, 0x20e0,
@@ -787,6 +792,10 @@ qla24xx_async_gnl_sp_done(void *s, int res)
sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
sp->u.iocb_cmd.u.mbx.in_mb[2]);
+ if (res == QLA_FUNCTION_TIMEOUT)
+ return;
+
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.sp = sp;
ea.rc = res;
@@ -814,25 +823,24 @@ qla24xx_async_gnl_sp_done(void *s, int res)
(loop_id & 0x7fff));
}
- spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
INIT_LIST_HEAD(&h);
fcport = tf = NULL;
if (!list_empty(&vha->gnl.fcports))
list_splice_init(&vha->gnl.fcports, &h);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
list_del_init(&fcport->gnl_entry);
- spin_lock(&vha->hw->tgt.sess_lock);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
- spin_unlock(&vha->hw->tgt.sess_lock);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
}
- spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
/* create new fcport if fw has knowledge of new sessions */
for (i = 0; i < n; i++) {
port_id_t id;
@@ -865,6 +873,8 @@ qla24xx_async_gnl_sp_done(void *s, int res)
}
}
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ vha->gnl.sent = 0;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp->free(sp);
@@ -884,27 +894,24 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
- spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
- if (!list_empty(&fcport->gnl_entry)) {
- spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
- rval = QLA_SUCCESS;
- goto done;
- }
-
- spin_lock(&vha->hw->tgt.sess_lock);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GNL;
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
- spin_unlock(&vha->hw->tgt.sess_lock);
list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
- spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
+ if (vha->gnl.sent) {
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ return QLA_SUCCESS;
+ }
+ vha->gnl.sent = 1;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gnlist";
sp->gen1 = fcport->rscn_gen;
@@ -970,8 +977,13 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
+ sp->u.iocb_cmd.u.mbx.in_dma);
+ return;
+ }
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.event = FCME_GPDB_DONE;
ea.fcport = fcport;
@@ -1147,14 +1159,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
sp->done = qla24xx_async_gpdb_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_disc, vha, 0x20dc,
"Async-%s %8phC hndl %x opt %x\n",
sp->name, fcport->port_name, sp->handle, opt);
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
return rval;
done_free_sp:
@@ -1182,11 +1193,9 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
vha->fcport_count++;
ea->fcport->login_succ = 1;
- ql_dbg(ql_dbg_disc, vha, 0x20d6,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, ea->fcport->port_name,
- vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, ea->fcport);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ qla24xx_sched_upd_fcport(ea->fcport);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
} else if (ea->fcport->login_succ) {
/*
* We have an existing session. A late RSCN delivery
@@ -1226,6 +1235,19 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
else
ls = pd->current_login_state & 0xf;
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ qla_rscn_replay(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+
switch (ls) {
case PDS_PRLI_COMPLETE:
__qla24xx_parse_gpdb(vha, fcport, pd);
@@ -1280,7 +1302,8 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
login = 1;
}
- if (login) {
+ if (login && fcport->login_retry) {
+ fcport->login_retry--;
if (fcport->loop_id == FC_NO_LOOP_ID) {
fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
rc = qla2x00_find_new_loop_id(vha, fcport);
@@ -1304,14 +1327,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
{
u16 data[2];
u64 wwn;
+ u16 sec;
- ql_dbg(ql_dbg_disc, vha, 0x20d8,
- "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20d8,
+ "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
- fcport->login_gen, fcport->login_retry,
- fcport->loop_id, fcport->scan_state);
+ fcport->login_gen, fcport->loop_id, fcport->scan_state);
if (fcport->scan_state != QLA_FCPORT_FOUND)
return 0;
@@ -1410,22 +1433,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
case DSC_LOGIN_FAILED:
- fcport->login_retry--;
- ql_dbg(ql_dbg_disc, vha, 0x20d0,
- "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
if (N2N_TOPO(vha->hw))
qla_chk_n2n_b4_login(vha, fcport);
else
- qla24xx_post_gidpn_work(vha, fcport);
+ qlt_schedule_sess_for_deletion(fcport);
break;
case DSC_LOGIN_COMPLETE:
/* recheck login state */
- ql_dbg(ql_dbg_disc, vha, 0x20d1,
- "%s %d %8phC post adisc\n",
- __func__, __LINE__, fcport->port_name);
- fcport->login_retry--;
data[0] = data[1] = 0;
qla2x00_post_async_adisc_work(vha, fcport, data);
break;
@@ -1435,6 +1450,22 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
qla24xx_post_prli_work(vha, fcport);
break;
+ case DSC_UPD_FCPORT:
+ sec = jiffies_to_msecs(jiffies -
+ fcport->jiffies_at_registration)/1000;
+ if (fcport->sec_since_registration < sec && sec &&
+ !(sec % 60)) {
+ fcport->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ "%s %8phC - Slow Rport registration(%d Sec)\n",
+ __func__, fcport->port_name, sec);
+ }
+
+ if (fcport->next_disc_state != DSC_DELETE_PEND)
+ fcport->next_disc_state = DSC_ADISC;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+
default:
break;
}
@@ -1513,7 +1544,6 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
return;
}
@@ -1533,7 +1563,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
{
fc_port_t *f, *tf;
uint32_t id = 0, mask, rid;
- unsigned long flags;
fc_port_t *fcport;
switch (ea->event) {
@@ -1548,10 +1577,16 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
return;
switch (ea->id.b.rsvd_1) {
case RSCN_PORT_ADDR:
+#define BIGSCAN 1
+#if defined BIGSCAN & BIGSCAN > 0
+ {
+ unsigned long flags;
fcport = qla2x00_find_fcport_by_nportid
(vha, &ea->id, 1);
- if (fcport)
- fcport->rscn_rcvd = 1;
+ if (fcport) {
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
spin_lock_irqsave(&vha->work_lock, flags);
if (vha->scan.scan_flags == 0) {
@@ -1561,7 +1596,26 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
schedule_delayed_work(&vha->scan.scan_work, 5);
}
spin_unlock_irqrestore(&vha->work_lock, flags);
-
+ }
+#else
+ {
+ int rc;
+ fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+ if (!fcport) {
+ /* cable moved */
+ rc = qla24xx_post_gpnid_work(vha, &ea->id);
+ if (rc) {
+ ql_log(ql_log_warn, vha, 0xd044,
+ "RSCN GPNID work failed %06x\n",
+ ea->id.b24);
+ }
+ } else {
+ ea->fcport = fcport;
+ fcport->scan_needed = 1;
+ qla24xx_handle_rscn_event(fcport, ea);
+ }
+ }
+#endif
break;
case RSCN_AREA_ADDR:
case RSCN_DOM_ADDR:
@@ -1597,9 +1651,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
}
break;
- case FCME_GIDPN_DONE:
- qla24xx_handle_gidpn_event(vha, ea);
- break;
case FCME_GNL_DONE:
qla24xx_handle_gnl_done_event(vha, ea);
break;
@@ -1639,6 +1690,34 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
}
}
+/*
+ * RSCN(s) came in for this fcport, but the RSCN(s) was not able
+ * to be consumed by the fcport
+ */
+void qla_rscn_replay(fc_port_t *fcport)
+{
+ struct event_arg ea;
+
+ switch (fcport->disc_state) {
+ case DSC_DELETE_PEND:
+ return;
+ default:
+ break;
+ }
+
+ if (fcport->scan_needed) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_RSCN;
+ ea.id = fcport->d_id;
+ ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
+#if defined BIGSCAN & BIGSCAN > 0
+ qla2x00_fcport_event_handler(fcport->vha, &ea);
+#else
+ qla24xx_post_gpnid_work(fcport->vha, &ea.id);
+#endif
+ }
+}
+
static void
qla2x00_tmf_iocb_timeout(void *data)
{
@@ -1684,15 +1763,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
tm_iocb->u.tmf.data = tag;
sp->done = qla2x00_tmf_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_taskm, vha, 0x802f,
"Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
wait_for_completion(&tm_iocb->u.tmf.comp);
rval = tm_iocb->u.tmf.data;
@@ -1747,47 +1825,46 @@ int
qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
{
scsi_qla_host_t *vha = cmd_sp->vha;
- fc_port_t *fcport = cmd_sp->fcport;
struct srb_iocb *abt_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
+ GFP_KERNEL);
if (!sp)
goto done;
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
+ sp->qpair = cmd_sp->qpair;
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+ /* FW can send 2 x ABTS's timeout/20s */
+ qla2x00_init_timer(sp, 42);
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
-
- if (vha->flags.qpairs_available && cmd_sp->qpair)
- abt_iocb->u.abt.req_que_no =
- cpu_to_le16(cmd_sp->qpair->req->id);
- else
- abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
+ abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
sp->done = qla24xx_abort_sp_done;
+ ql_dbg(ql_dbg_async, vha, 0x507c,
+ "Abort command issued - hdl=%x, type=%x\n",
+ cmd_sp->handle, cmd_sp->type);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_async, vha, 0x507c,
- "Abort command issued - hdl=%x, target_id=%x\n",
- cmd_sp->handle, fcport->tgt_id);
-
if (wait) {
wait_for_completion(&abt_iocb->u.abt.comp);
rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ } else {
+ goto done;
}
done_free_sp:
@@ -1803,19 +1880,17 @@ qla24xx_async_abort_command(srb_t *sp)
uint32_t handle;
fc_port_t *fcport = sp->fcport;
+ struct qla_qpair *qpair = sp->qpair;
struct scsi_qla_host *vha = fcport->vha;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
-
- if (vha->flags.qpairs_available && sp->qpair)
- req = sp->qpair->req;
+ struct req_que *req = qpair->req;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
if (handle == req->num_outstanding_cmds) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
@@ -1876,7 +1951,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
"%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
- ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
+ ea->sp->gen1, fcport->rscn_gen,
ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
@@ -1898,9 +1973,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return;
} else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC RSCN generation changed\n",
+ __func__, fcport->port_name);
+ qla_rscn_replay(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
return;
}
@@ -1952,25 +2029,15 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
cid.b.rsvd_1 = 0;
ql_dbg(ql_dbg_disc, vha, 0x20ec,
- "%s %d %8phC LoopID 0x%x in use post gnl\n",
+ "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
__func__, __LINE__, ea->fcport->port_name,
- ea->fcport->loop_id);
+ ea->fcport->loop_id, cid.b24);
- if (IS_SW_RESV_ADDR(cid)) {
- set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
- ea->fcport->loop_id = FC_NO_LOOP_ID;
- } else {
- qla2x00_clear_loop_id(ea->fcport);
- }
+ set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+ ea->fcport->loop_id = FC_NO_LOOP_ID;
qla24xx_post_gnl_work(vha, ea->fcport);
break;
case MBS_PORT_ID_USED:
- ql_dbg(ql_dbg_disc, vha, 0x20ed,
- "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
- __func__, __LINE__, ea->fcport->port_name,
- ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
- ea->fcport->d_id.b.al_pa);
-
lid = ea->iop[1] & 0xffff;
qlt_find_sess_invalidate_other(vha,
wwn_to_u64(ea->fcport->port_name),
@@ -1989,8 +2056,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
"%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
__func__, __LINE__, ea->fcport->port_name,
ea->fcport->d_id.b24, lid);
- qla2x00_clear_loop_id(ea->fcport);
- qla24xx_post_gidpn_work(vha, ea->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ed,
"%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
@@ -2018,26 +2083,6 @@ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
return;
}
-void
-qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
- uint16_t *data)
-{
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
- if (data[0] == MBS_COMMAND_COMPLETE) {
- qla2x00_update_fcport(vha, fcport);
-
- return;
- }
-
- /* Retry login. */
- if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- else
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
-
- return;
-}
-
/****************************************************************************/
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/
@@ -3527,6 +3572,11 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (rval == QLA_SUCCESS) {
qla24xx_detect_sfp(vha);
+ if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
+ (ha->zio_mode == QLA_ZIO_MODE_6))
+ qla27xx_set_zio_threshold(vha,
+ ha->last_zio_threshold);
+
rval = qla2x00_set_exlogins_buffer(vha);
if (rval != QLA_SUCCESS)
goto failed;
@@ -4015,6 +4065,7 @@ next_check:
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
QLA_FW_STARTED(ha);
+ vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
}
return (rval);
@@ -4728,6 +4779,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport = NULL;
}
INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+ INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
@@ -4853,19 +4905,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
*/
if (qla_tgt_mode_enabled(vha) ||
qla_dual_mode_enabled(vha)) {
- if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
- spin_lock_irqsave(&ha->tgt.atio_lock,
- flags);
- qlt_24xx_process_atio_queue(vha, 0);
- spin_unlock_irqrestore(
- &ha->tgt.atio_lock, flags);
- } else {
- spin_lock_irqsave(&ha->hardware_lock,
- flags);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- }
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+ qlt_24xx_process_atio_queue(vha, 0);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock,
+ flags);
}
}
}
@@ -4958,6 +5001,19 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
(uint8_t *)ha->gid_list,
entries * sizeof(struct gid_list_info));
+ if (entries == 0) {
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ }
+ } else {
+ vha->scan.scan_retry = 0;
+ }
+
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = QLA_FCPORT_SCAN;
}
@@ -5223,20 +5279,20 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
- fcport->vha = vha;
-
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
+ ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
+ __func__, fcport->port_name);
+
+ fcport->disc_state = DSC_UPD_FCPORT;
+ fcport->login_retry = vha->hw->login_retry_count;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
- fcport->disc_state = DSC_LOGIN_COMPLETE;
fcport->deleted = 0;
fcport->logout_on_delete = 1;
fcport->login_retry = vha->hw->login_retry_count;
fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
- qla2x00_iidma_fcport(vha, fcport);
-
switch (vha->hw->current_topology) {
case ISP_CFG_N:
case ISP_CFG_NL:
@@ -5246,6 +5302,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
break;
}
+ qla2x00_iidma_fcport(vha, fcport);
+
if (fcport->fc4f_nvme) {
qla_nvme_register_remote(vha, fcport);
fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -5274,6 +5332,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
break;
}
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
if (fcport->id_changed) {
fcport->id_changed = 0;
@@ -5290,7 +5350,36 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla24xx_post_gpsc_work(vha, fcport);
}
}
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+}
+
+void qla_register_fcport_fn(struct work_struct *work)
+{
+ fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
+ u32 rscn_gen = fcport->rscn_gen;
+ u16 data[2];
+
+ if (IS_SW_RESV_ADDR(fcport->d_id))
+ return;
+
+ qla2x00_update_fcport(fcport->vha, fcport);
+
+ if (rscn_gen != fcport->rscn_gen) {
+ /* RSCN(s) came in while registration */
+ switch (fcport->next_disc_state) {
+ case DSC_DELETE_PEND:
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ case DSC_ADISC:
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(fcport->vha, fcport,
+ data);
+ break;
+ default:
+ break;
+ }
+ }
}
/*
@@ -6494,6 +6583,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ ha->link_data_rate = PORT_SPEED_UNKNOWN;
SAVE_TOPO(ha);
ha->flags.rida_fmt2 = 0;
ha->flags.n2n_ae = 0;
@@ -6622,6 +6712,20 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
return status;
}
+ switch (vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (!qla_tgt_mode_enabled(vha))
+ return 0;
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ if (!qla_dual_mode_enabled(vha))
+ return 0;
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ default:
+ break;
+ }
+
ha->isp_ops->get_flash_version(vha, req->ring);
ha->isp_ops->nvram_config(vha);
@@ -6682,7 +6786,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
* The next call disables the board
* completely.
*/
- ha->isp_ops->reset_adapter(vha);
+ qla2x00_abort_isp_cleanup(vha);
vha->flags.online = 0;
clear_bit(ISP_ABORT_RETRY,
&vha->dpc_flags);
@@ -7142,7 +7246,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
}
icb->firmware_options_2 &= cpu_to_le32(
~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
- vha->flags.process_response_queue = 0;
if (ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = QLA_ZIO_MODE_6;
@@ -7153,7 +7256,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
icb->firmware_options_2 |= cpu_to_le32(
(uint32_t)ha->zio_mode);
icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
- vha->flags.process_response_queue = 1;
}
if (rval) {
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 4351736b2426..512c3c37b447 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -209,7 +209,8 @@ qla2x00_chip_is_down(scsi_qla_host_t *vha)
}
static inline srb_t *
-qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
+qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
+ fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
uint8_t bail;
@@ -225,7 +226,9 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->iocbs = 1;
- sp->vha = qpair->vha;
+ sp->vha = vha;
+ sp->qpair = qpair;
+ sp->cmd_type = TYPE_SRB;
INIT_LIST_HEAD(&sp->elem);
done:
@@ -246,19 +249,17 @@ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
uint8_t bail;
+ struct qla_qpair *qpair;
QLA_VHA_MARK_BUSY(vha, bail);
if (unlikely(bail))
return NULL;
- sp = mempool_alloc(vha->hw->srb_mempool, flag);
+ qpair = vha->hw->base_qpair;
+ sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
if (!sp)
goto done;
- memset(sp, 0, sizeof(*sp));
- sp->fcport = fcport;
- sp->cmd_type = TYPE_SRB;
- sp->iocbs = 1;
sp->vha = vha;
done:
if (!sp)
@@ -270,7 +271,7 @@ static inline void
qla2x00_rel_sp(srb_t *sp)
{
QLA_VHA_MARK_NOT_BUSY(sp->vha);
- mempool_free(sp, sp->vha->hw->srb_mempool);
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
static inline void
@@ -317,13 +318,13 @@ static inline bool
qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
{
if (qla_ini_mode_enabled(vha) &&
- (ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
+ (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
return true;
else if (qla_tgt_mode_enabled(vha) &&
- (ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
+ (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
return true;
else if (qla_dual_mode_enabled(vha) &&
- ((ql2xiniexchg + ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
+ ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
return true;
else
return false;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 42ac8e097419..86fb8b21aa71 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1526,12 +1526,6 @@ qla24xx_start_scsi(srb_t *sp)
/* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
-
- /* Manage unprocessed RIO/ZIO commands in response queue. */
- if (vha->flags.process_response_queue &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -1725,12 +1719,6 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
-
- /* Manage unprocessed RIO/ZIO commands in response queue. */
- if (vha->flags.process_response_queue &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1880,11 +1868,6 @@ qla2xxx_start_scsi_mq(srb_t *sp)
/* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index);
- /* Manage unprocessed RIO/ZIO commands in response queue. */
- if (vha->flags.process_response_queue &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(vha, rsp);
-
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_SUCCESS;
@@ -2287,8 +2270,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags =
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
- if (!sp->fcport->se_sess ||
- !sp->fcport->keep_nport_handle)
+ if (!sp->fcport->keep_nport_handle)
logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2659,7 +2641,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
struct qla_hw_data *ha = vha->hw;
int rval = QLA_SUCCESS;
void *ptr, *resp_ptr;
- dma_addr_t ptr_dma;
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
@@ -2691,7 +2672,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
ptr = elsio->u.els_plogi.els_plogi_pyld =
dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
&elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
- ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
if (!elsio->u.els_plogi.els_plogi_pyld) {
rval = QLA_FUNCTION_FAILED;
@@ -3314,19 +3294,21 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
{
struct srb_iocb *aio = &sp->u.iocb_cmd;
scsi_qla_host_t *vha = sp->vha;
- struct req_que *req = vha->req;
+ struct req_que *req = sp->qpair->req;
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
- abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ if (sp->fcport) {
+ abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ }
abt_iocb->handle_to_abort =
cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
aio->u.abt.cmd_hndl));
- abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
- abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
- abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
abt_iocb->vp_index = vha->vp_idx;
abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
/* Send the command to the firmware */
@@ -3455,12 +3437,13 @@ qla2x00_start_sp(srb_t *sp)
int rval;
scsi_qla_host_t *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qp = sp->qpair;
void *pkt;
unsigned long flags;
rval = QLA_FUNCTION_FAILED;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- pkt = qla2x00_alloc_iocbs(vha, sp);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
if (!pkt) {
ql_log(ql_log_warn, vha, 0x700c,
"qla2x00_alloc_iocbs failed.\n");
@@ -3538,9 +3521,9 @@ qla2x00_start_sp(srb_t *sp)
}
wmb();
- qla2x00_start_iocbs(vha, ha->req_q_map[0]);
+ qla2x00_start_iocbs(vha, qp->req);
done:
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 36cbb29c84f6..d73b04e40590 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1850,11 +1850,12 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
uint16_t state_flags;
struct nvmefc_fcp_req *fd;
- uint16_t ret = 0;
+ uint16_t ret = QLA_SUCCESS;
+ uint16_t comp_status = le16_to_cpu(sts->comp_status);
iocb = &sp->u.iocb_cmd;
fcport = sp->fcport;
- iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
+ iocb->u.nvme.comp_status = comp_status;
state_flags = le16_to_cpu(sts->state_flags);
fd = iocb->u.nvme.desc;
@@ -1892,28 +1893,35 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
fd->transferred_length = fd->payload_length -
le32_to_cpu(sts->residual_len);
- switch (le16_to_cpu(sts->comp_status)) {
+ if (unlikely(comp_status != CS_COMPLETE))
+ ql_log(ql_log_warn, fcport->vha, 0x5060,
+ "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
+ sp->name, sp->handle, comp_status,
+ fd->transferred_length, le32_to_cpu(sts->residual_len),
+ sts->ox_id);
+
+ /*
+ * If transport error then Failure (HBA rejects request)
+ * otherwise transport will handle.
+ */
+ switch (comp_status) {
case CS_COMPLETE:
- ret = QLA_SUCCESS;
break;
- case CS_ABORTED:
+
case CS_RESET:
case CS_PORT_UNAVAILABLE:
case CS_PORT_LOGGED_OUT:
+ fcport->nvme_flag |= NVME_FLAG_RESETTING;
+ /* fall through */
+ case CS_ABORTED:
case CS_PORT_BUSY:
- ql_log(ql_log_warn, fcport->vha, 0x5060,
- "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
- sp->name, sp->handle, sts->comp_status,
- le32_to_cpu(sts->residual_len), sts->ox_id);
fd->transferred_length = 0;
iocb->u.nvme.rsp_pyld_len = 0;
ret = QLA_ABORTED;
break;
+ case CS_DATA_UNDERRUN:
+ break;
default:
- ql_log(ql_log_warn, fcport->vha, 0x5060,
- "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
- sp->name, sp->handle, sts->comp_status,
- le32_to_cpu(sts->residual_len), sts->ox_id);
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2837,6 +2845,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
case ELS_IOCB_TYPE:
case ABORT_IOCB_TYPE:
case MBX_IOCB_TYPE:
+ default:
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
sp->done(sp, res);
@@ -2847,7 +2856,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
- default:
return 1;
}
fatal:
@@ -3121,6 +3129,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
uint16_t mb[8];
struct rsp_que *rsp;
unsigned long flags;
+ bool process_atio = false;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -3181,22 +3190,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
case INTR_ATIO_QUE_UPDATE_27XX:
- case INTR_ATIO_QUE_UPDATE:{
- unsigned long flags2;
- spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+ case INTR_ATIO_QUE_UPDATE:
+ process_atio = true;
break;
- }
- case INTR_ATIO_RSP_QUE_UPDATE: {
- unsigned long flags2;
- spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
-
+ case INTR_ATIO_RSP_QUE_UPDATE:
+ process_atio = true;
qla24xx_process_response_queue(vha, rsp);
break;
- }
default:
ql_dbg(ql_dbg_async, vha, 0x504f,
"Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -3210,6 +3210,12 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (process_atio) {
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+ qlt_24xx_process_atio_queue(vha, 0);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+ }
+
return IRQ_HANDLED;
}
@@ -3256,6 +3262,7 @@ qla24xx_msix_default(int irq, void *dev_id)
uint32_t hccr;
uint16_t mb[8];
unsigned long flags;
+ bool process_atio = false;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -3312,22 +3319,13 @@ qla24xx_msix_default(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
case INTR_ATIO_QUE_UPDATE_27XX:
- case INTR_ATIO_QUE_UPDATE:{
- unsigned long flags2;
- spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+ case INTR_ATIO_QUE_UPDATE:
+ process_atio = true;
break;
- }
- case INTR_ATIO_RSP_QUE_UPDATE: {
- unsigned long flags2;
- spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
-
+ case INTR_ATIO_RSP_QUE_UPDATE:
+ process_atio = true;
qla24xx_process_response_queue(vha, rsp);
break;
- }
default:
ql_dbg(ql_dbg_async, vha, 0x5051,
"Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -3338,6 +3336,12 @@ qla24xx_msix_default(int irq, void *dev_id)
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (process_atio) {
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+ qlt_24xx_process_atio_queue(vha, 0);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2c6c2cd5a0d0..2f3e5075ae76 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -60,6 +60,7 @@ static struct rom_cmd {
{ MBC_GET_ADAPTER_LOOP_ID },
{ MBC_READ_SFP },
{ MBC_GET_RNID_PARAMS },
+ { MBC_GET_SET_ZIO_THRESHOLD },
};
static int is_rom_cmd(uint16_t cmd)
@@ -189,7 +190,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
goto premature_exit;
}
- ha->flags.mbox_busy = 1;
+
/* Save mailbox command for debug */
ha->mcp = mcp;
@@ -198,12 +199,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+ if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
+ ha->flags.mbox_busy) {
rval = QLA_ABORTED;
- ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
goto premature_exit;
}
+ ha->flags.mbox_busy = 1;
/* Load mailbox registers. */
if (IS_P3P_TYPE(ha))
@@ -254,9 +256,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
+ ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
- ha->flags.mbox_busy = 0;
+
atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
@@ -274,6 +277,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
+ if (chip_reset != ha->chip_reset) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ atomic_dec(&ha->num_pend_mbx_stage2);
+ atomic_dec(&ha->num_pend_mbx_stage3);
+ rval = QLA_ABORTED;
+ goto premature_exit;
+ }
ql_dbg(ql_dbg_mbx, vha, 0x117a,
"cmd=%x Timeout.\n", command);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -282,7 +295,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
} else if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
atomic_dec(&ha->num_pend_mbx_stage2);
atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
@@ -300,9 +315,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
+ ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
- ha->flags.mbox_busy = 0;
atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
@@ -320,7 +335,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
while (!ha->flags.mbox_int) {
if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
atomic_dec(&ha->num_pend_mbx_stage2);
rval = QLA_ABORTED;
goto premature_exit;
@@ -363,7 +381,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ha->mcp = NULL;
@@ -436,7 +457,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
* then only PCI ERR flag would be set.
* we will do premature exit for above case.
*/
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
}
@@ -451,8 +475,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_FUNCTION_TIMEOUT;
}
}
-
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Clean up */
ha->mcp = NULL;
@@ -493,7 +518,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- } else if (!abort_active) {
+ } else if (current == ha->dpc_thread) {
/* call abort directly since we are in the DPC thread */
ql_dbg(ql_dbg_mbx, vha, 0x101d,
"Timeout, calling abort_isp.\n");
@@ -1486,7 +1511,6 @@ qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
struct req_que *req;
struct rsp_que *rsp;
- l = l;
vha = fcport->vha;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
@@ -3072,22 +3096,25 @@ qla24xx_abort_command(srb_t *sp)
struct scsi_qla_host *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
+ struct qla_qpair *qpair = sp->qpair;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
if (vha->flags.qpairs_available && sp->qpair)
req = sp->qpair->req;
+ else
+ return QLA_FUNCTION_FAILED;
if (ql2xasynctmfenable)
return qla24xx_async_abort_command(sp);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
if (handle == req->num_outstanding_cmds) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
@@ -3762,10 +3789,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
- if (IS_CNA_CAPABLE(vha->hw))
- mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
- else
- mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
+ mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_3|MBX_1|MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 20d9dc39f0fb..7e78e7eff783 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -506,7 +506,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
return -EBUSY;
/* Alloc SRB structure */
- sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+ sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
if (!sp)
return -EBUSY;
@@ -607,7 +607,7 @@ void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
{
int rval;
- if (!test_bit(ABORT_ISP_ACTIVE, &sp->vha->dpc_flags)) {
+ if (ha->flags.fw_started) {
rval = ha->isp_ops->abort_command(sp);
if (!rval && !qla_nvme_wait_on_command(sp))
ql_log(ql_log_warn, NULL, 0x2112,
@@ -660,9 +660,6 @@ void qla_nvme_delete(struct scsi_qla_host *vha)
__func__, fcport);
nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
- init_completion(&fcport->nvme_del_done);
- nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
- wait_for_completion(&fcport->nvme_del_done);
}
if (vha->nvme_local_port) {
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index de2bc78449e7..121e18b3b9f8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3699,8 +3699,8 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait for pending cmds (physical and virtual) to complete */
- if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
- WAIT_HOST) == QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+ WAIT_HOST)) {
ql_dbg(ql_dbg_init, vha, 0x00b3,
"Done wait for "
"pending commands.\n");
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 42b8f0d3e580..8794e54f43a9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -14,6 +14,8 @@
#include <linux/kobject.h>
#include <linux/slab.h>
#include <linux/blk-mq-pci.h>
+#include <linux/refcount.h>
+
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -204,7 +206,7 @@ int ql2xasynctmfenable = 1;
module_param(ql2xasynctmfenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xasynctmfenable,
"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
- "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
+ "Default is 1 - Issue TM IOCBs via mailbox mechanism.");
int ql2xdontresethba;
module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
@@ -391,12 +393,14 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
struct qla_hw_data *ha = vha->hw;
rsp->qpair = ha->base_qpair;
rsp->req = req;
+ ha->base_qpair->hw = ha;
ha->base_qpair->req = req;
ha->base_qpair->rsp = rsp;
ha->base_qpair->vha = vha;
ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
+ ha->base_qpair->srb_mempool = ha->srb_mempool;
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
@@ -1012,7 +1016,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
else
goto qc24_target_busy;
- sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+ sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
if (!sp)
goto qc24_host_busy;
@@ -1212,10 +1216,14 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-static void
+static int
sp_get(struct srb *sp)
{
- atomic_inc(&sp->ref_count);
+ if (!refcount_inc_not_zero((refcount_t*)&sp->ref_count))
+ /* kref get fail */
+ return ENXIO;
+ else
+ return 0;
}
#define ISP_REG_DISCONNECT 0xffffffffU
@@ -1273,38 +1281,51 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int rval, wait = 0;
struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
"PCI/Register disconnect, exiting.\n");
return FAILED;
}
- if (!CMD_SP(cmd))
- return SUCCESS;
ret = fc_block_scsi_eh(cmd);
if (ret != 0)
return ret;
ret = SUCCESS;
- id = cmd->device->id;
- lun = cmd->device->lun;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
sp = (srb_t *) CMD_SP(cmd);
- if (!sp) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (!sp)
+ return SUCCESS;
+
+ qpair = sp->qpair;
+ if (!qpair)
+ return SUCCESS;
+
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ if (!CMD_SP(cmd)) {
+ /* there's a chance an interrupt could clear
+ the ptr as part of done & free */
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return SUCCESS;
}
+ if (sp_get(sp)){
+ /* ref_count is already 0 */
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ return SUCCESS;
+ }
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ id = cmd->device->id;
+ lun = cmd->device->lun;
+
ql_dbg(ql_dbg_taskm, vha, 0x8002,
"Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
vha->host_no, id, lun, sp, cmd, sp->handle);
/* Get a reference to the sp and drop the lock.*/
- sp_get(sp);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
rval = ha->isp_ops->abort_command(sp);
if (rval) {
if (rval == QLA_FUNCTION_PARAMETER_ERROR)
@@ -1320,14 +1341,29 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
wait = 1;
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
- sp->done(sp, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ /*
+ * Clear the slot in the oustanding_cmds array if we can't find the
+ * command to reclaim the resources.
+ */
+ if (rval == QLA_FUNCTION_PARAMETER_ERROR)
+ vha->req->outstanding_cmds[sp->handle] = NULL;
+
+ /*
+ * sp->done will do ref_count--
+ * sp_get() took an extra count above
+ */
+ sp->done(sp, DID_RESET << 16);
/* Did the command return during mailbox execution? */
if (ret == FAILED && !CMD_SP(cmd))
ret = SUCCESS;
+ if (!CMD_SP(cmd))
+ wait = 0;
+
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
/* Wait for the command to be returned. */
if (wait) {
if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
@@ -1721,7 +1757,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
struct req_que *req;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_cmd *cmd;
- uint8_t trace = 0;
if (!ha->req_q_map)
return;
@@ -1731,64 +1766,68 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
- if (sp->cmd_type == TYPE_SRB) {
+ switch (sp->cmd_type) {
+ case TYPE_SRB:
if (sp->type == SRB_NVME_CMD ||
sp->type == SRB_NVME_LS) {
- sp_get(sp);
- spin_unlock_irqrestore(qp->qp_lock_ptr,
- flags);
- qla_nvme_abort(ha, sp, res);
- spin_lock_irqsave(qp->qp_lock_ptr,
- flags);
+ if (!sp_get(sp)) {
+ /* got sp */
+ spin_unlock_irqrestore
+ (qp->qp_lock_ptr,
+ flags);
+ qla_nvme_abort(ha, sp, res);
+ spin_lock_irqsave
+ (qp->qp_lock_ptr, flags);
+ }
} else if (GET_CMD_SP(sp) &&
!ha->flags.eeh_busy &&
(!test_bit(ABORT_ISP_ACTIVE,
&vha->dpc_flags)) &&
+ !qla2x00_isp_reg_stat(ha) &&
(sp->type == SRB_SCSI_CMD)) {
/*
- * Don't abort commands in
- * adapter during EEH
- * recovery as it's not
+ * Don't abort commands in adapter
+ * during EEH recovery as it's not
* accessible/responding.
*
- * Get a reference to the sp
- * and drop the lock. The
- * reference ensures this
- * sp->done() call and not the
- * call in qla2xxx_eh_abort()
- * ends the SCSI command (with
- * result 'res').
- */
- sp_get(sp);
- spin_unlock_irqrestore(qp->qp_lock_ptr,
- flags);
- status = qla2xxx_eh_abort(
- GET_CMD_SP(sp));
- spin_lock_irqsave(qp->qp_lock_ptr,
- flags);
- /*
- * Get rid of extra reference
- * if immediate exit from
- * ql2xxx_eh_abort
+ * Get a reference to the sp and drop
+ * the lock. The reference ensures this
+ * sp->done() call and not the call in
+ * qla2xxx_eh_abort() ends the SCSI cmd
+ * (with result 'res').
*/
- if (status == FAILED &&
- (qla2x00_isp_reg_stat(ha)))
- atomic_dec(
- &sp->ref_count);
+ if (!sp_get(sp)) {
+ spin_unlock_irqrestore
+ (qp->qp_lock_ptr, flags);
+ status = qla2xxx_eh_abort(
+ GET_CMD_SP(sp));
+ spin_lock_irqsave
+ (qp->qp_lock_ptr, flags);
+ }
}
sp->done(sp, res);
- } else {
+ break;
+ case TYPE_TGT_CMD:
if (!vha->hw->tgt.tgt_ops || !tgt ||
qla_ini_mode_enabled(vha)) {
- if (!trace)
- ql_dbg(ql_dbg_tgt_mgt,
- vha, 0xf003,
- "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
- vha->dpc_flags);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+ "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+ vha->dpc_flags);
continue;
}
cmd = (struct qla_tgt_cmd *)sp;
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ break;
+ case TYPE_TGT_TMCMD:
+ /*
+ * Currently, only ABTS response gets on the
+ * outstanding_cmds[]
+ */
+ ha->tgt.tgt_ops->free_mcmd(
+ (struct qla_tgt_mgmt_cmd *)sp);
+ break;
+ default:
+ break;
}
}
}
@@ -2708,7 +2747,7 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
struct scsi_qla_host, iocb_work);
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- int i = 20;
+ int i = 2;
unsigned long flags;
if (test_bit(UNLOADING, &base_vha->dpc_flags))
@@ -2819,6 +2858,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
atomic_set(&ha->num_pend_mbx_stage1, 0);
atomic_set(&ha->num_pend_mbx_stage2, 0);
atomic_set(&ha->num_pend_mbx_stage3, 0);
+ atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
+ ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -4249,29 +4290,34 @@ static void
qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
{
u32 temp;
+ struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
*ret_cnt = FW_DEF_EXCHANGES_CNT;
if (max_cnt > vha->hw->max_exchg)
max_cnt = vha->hw->max_exchg;
if (qla_ini_mode_enabled(vha)) {
- if (ql2xiniexchg > max_cnt)
- ql2xiniexchg = max_cnt;
+ if (vha->ql2xiniexchg > max_cnt)
+ vha->ql2xiniexchg = max_cnt;
+
+ if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
+ *ret_cnt = vha->ql2xiniexchg;
- if (ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
- *ret_cnt = ql2xiniexchg;
} else if (qla_tgt_mode_enabled(vha)) {
- if (ql2xexchoffld > max_cnt)
- ql2xexchoffld = max_cnt;
+ if (vha->ql2xexchoffld > max_cnt) {
+ vha->ql2xexchoffld = max_cnt;
+ icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
+ }
- if (ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
- *ret_cnt = ql2xexchoffld;
+ if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
+ *ret_cnt = vha->ql2xexchoffld;
} else if (qla_dual_mode_enabled(vha)) {
- temp = ql2xiniexchg + ql2xexchoffld;
+ temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
if (temp > max_cnt) {
- ql2xiniexchg -= (temp - max_cnt)/2;
- ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
+ vha->ql2xiniexchg -= (temp - max_cnt)/2;
+ vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
temp = max_cnt;
+ icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
}
if (temp > FW_DEF_EXCHANGES_CNT)
@@ -4309,6 +4355,12 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
if (totsz != ha->exchoffld_size) {
qla2x00_free_exchoffld_buffer(ha);
+ if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
+ ha->exchoffld_size = 0;
+ ha->flags.exchoffld_enabled = 0;
+ return QLA_SUCCESS;
+ }
+
ha->exchoffld_size = totsz;
ql_log(ql_log_info, vha, 0xd016,
@@ -4341,6 +4393,15 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
return -ENOMEM;
}
+ } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
+ /* pathological case */
+ qla2x00_free_exchoffld_buffer(ha);
+ ha->exchoffld_size = 0;
+ ha->flags.exchoffld_enabled = 0;
+ ql_log(ql_log_info, vha, 0xd016,
+ "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
+ ha->exchoffld_size, actual_cnt, size, totsz);
+ return 0;
}
/* Now configure the dma buffer */
@@ -4356,7 +4417,7 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
if (qla_ini_mode_enabled(vha))
icb->exchange_count = 0;
else
- icb->exchange_count = cpu_to_le16(ql2xexchoffld);
+ icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
}
return rval;
@@ -4564,6 +4625,10 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
vha->host_no = host->host_no;
vha->hw = ha;
+ vha->qlini_mode = ql2x_ini_mode;
+ vha->ql2xexchoffld = ql2xexchoffld;
+ vha->ql2xiniexchg = ql2xiniexchg;
+
INIT_LIST_HEAD(&vha->vp_fcports);
INIT_LIST_HEAD(&vha->work_list);
INIT_LIST_HEAD(&vha->list);
@@ -4579,7 +4644,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
- spin_lock_init(&vha->gnl.fcports_lock);
init_waitqueue_head(&vha->fcport_waitQ);
init_waitqueue_head(&vha->vref_waitq);
@@ -4710,7 +4774,6 @@ qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
-qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
@@ -4761,16 +4824,25 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
return qla2x00_post_work(vha, e);
}
-int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+void qla24xx_sched_upd_fcport(fc_port_t *fcport)
{
- struct qla_work_evt *e;
+ unsigned long flags;
- e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT);
- if (!e)
- return QLA_FUNCTION_FAILED;
+ if (IS_SW_RESV_ADDR(fcport->d_id))
+ return;
- e->u.fcport.fcport = fcport;
- return qla2x00_post_work(vha, e);
+ spin_lock_irqsave(&fcport->vha->work_lock, flags);
+ if (fcport->disc_state == DSC_UPD_FCPORT) {
+ spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
+ return;
+ }
+ fcport->jiffies_at_registration = jiffies;
+ fcport->sec_since_registration = 0;
+ fcport->next_disc_state = DSC_DELETED;
+ fcport->disc_state = DSC_UPD_FCPORT;
+ spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
+
+ queue_work(system_unbound_wq, &fcport->reg_work);
}
static
@@ -4808,10 +4880,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
+ if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
fcport->fc4_type = FC4_TYPE_FCP_SCSI;
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
+ if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
fcport->fc4_type = FC4_TYPE_OTHER;
fcport->fc4f_nvme = FC4_TYPE_NVME;
}
@@ -4990,19 +5062,12 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla2x00_async_adisc(vha, e->u.logio.fcport,
e->u.logio.data);
break;
- case QLA_EVT_ASYNC_ADISC_DONE:
- qla2x00_async_adisc_done(vha, e->u.logio.fcport,
- e->u.logio.data);
- break;
case QLA_EVT_UEVENT:
qla2x00_uevent_emit(vha, e->u.uevent.code);
break;
case QLA_EVT_AENFX:
qlafx00_process_aen(vha, e);
break;
- case QLA_EVT_GIDPN:
- qla24xx_async_gidpn(vha, e->u.fcport.fcport);
- break;
case QLA_EVT_GPNID:
qla24xx_async_gpnid(vha, &e->u.gpnid.id);
break;
@@ -5025,9 +5090,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_GPSC:
qla24xx_async_gpsc(vha, e->u.fcport.fcport);
break;
- case QLA_EVT_UPD_FCPORT:
- qla2x00_update_fcport(vha, e->u.fcport.fcport);
- break;
case QLA_EVT_GNL:
qla24xx_async_gnl(vha, e->u.fcport.fcport);
break;
@@ -6041,12 +6103,29 @@ qla2x00_do_dpc(void *data)
if (test_and_clear_bit
(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
!test_bit(UNLOADING, &base_vha->dpc_flags)) {
+ bool do_reset = true;
+
+ switch (base_vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_ENABLED:
+ break;
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (!qla_tgt_mode_enabled(base_vha) &&
+ !ha->flags.fw_started)
+ do_reset = false;
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ if (!qla_dual_mode_enabled(base_vha) &&
+ !ha->flags.fw_started)
+ do_reset = false;
+ break;
+ default:
+ break;
+ }
- ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
- "ISP abort scheduled.\n");
- if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
+ if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
-
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
+ "ISP abort scheduled.\n");
if (ha->isp_ops->abort_isp(base_vha)) {
/* failed. retry later */
set_bit(ISP_ABORT_NEEDED,
@@ -6054,10 +6133,9 @@ qla2x00_do_dpc(void *data)
}
clear_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
+ "ISP abort end.\n");
}
-
- ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
- "ISP abort end.\n");
}
if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
@@ -6183,17 +6261,28 @@ intr_on_check:
mutex_unlock(&ha->mq_lock);
}
- if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) {
+ if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED,
+ &base_vha->dpc_flags)) {
ql_log(ql_log_info, base_vha, 0xffffff,
"nvme: SET ZIO Activity exchange threshold to %d.\n",
ha->nvme_last_rptd_aen);
- if (qla27xx_set_zio_threshold(base_vha, ha->nvme_last_rptd_aen)) {
+ if (qla27xx_set_zio_threshold(base_vha,
+ ha->nvme_last_rptd_aen)) {
ql_log(ql_log_info, base_vha, 0xffffff,
- "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
- ha->nvme_last_rptd_aen);
+ "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
+ ha->nvme_last_rptd_aen);
}
}
+ if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
+ &base_vha->dpc_flags)) {
+ ql_log(ql_log_info, base_vha, 0xffffff,
+ "SET ZIO Activity exchange threshold to %d.\n",
+ ha->last_zio_threshold);
+ qla27xx_set_zio_threshold(base_vha,
+ ha->last_zio_threshold);
+ }
+
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
@@ -6406,13 +6495,24 @@ qla2x00_timer(struct timer_list *t)
* FC-NVME
* see if the active AEN count has changed from what was last reported.
*/
- if (!vha->vp_idx &&
- atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen &&
- ha->zio_mode == QLA_ZIO_MODE_6) {
+ if (!vha->vp_idx && (atomic_read(&ha->nvme_active_aen_cnt) !=
+ ha->nvme_last_rptd_aen) && ha->zio_mode == QLA_ZIO_MODE_6) {
ql_log(ql_log_info, vha, 0x3002,
- "nvme: Sched: Set ZIO exchange threshold to %d.\n",
- ha->nvme_last_rptd_aen);
+ "nvme: Sched: Set ZIO exchange threshold to %d.\n",
+ ha->nvme_last_rptd_aen);
ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
+ set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
+ start_dpc++;
+ }
+
+ if (!vha->vp_idx &&
+ (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
+ (ha->zio_mode == QLA_ZIO_MODE_6) &&
+ (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
+ ql_log(ql_log_info, vha, 0x3002,
+ "Sched: Set ZIO exchange threshold to %d.\n",
+ ha->last_zio_threshold);
+ ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
start_dpc++;
}
@@ -6839,8 +6939,6 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
"The device failed to resume I/O from slot/link_reset.\n");
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
ha->flags.eeh_busy = 0;
}
@@ -6946,6 +7044,9 @@ qla2x00_module_init(void)
if (ql2xextended_error_logging == 1)
ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+ if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL)
+ qla_insert_tgt_attrs();
+
qla2xxx_transport_template =
fc_attach_transport(&qla2xxx_transport_functions);
if (!qla2xxx_transport_template) {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8c811b251d42..39828207bc1d 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -141,6 +141,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *,
struct abts_recv_from_24xx *);
static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
uint16_t);
+static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
+static inline uint32_t qlt_make_handle(struct qla_qpair *);
/*
* Global Variables
@@ -541,7 +543,6 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
qlt_response_pkt(host, rsp, pkt);
break;
}
-
default:
qlt_response_pkt(vha, rsp, pkt);
break;
@@ -600,14 +601,9 @@ void qla2x00_async_nack_sp_done(void *s, int res)
sp->fcport->login_succ = 1;
vha->fcport_count++;
-
- ql_dbg(ql_dbg_disc, vha, 0x20f3,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__,
- sp->fcport->port_name,
- vha->fcport_count);
- sp->fcport->disc_state = DSC_UPD_FCPORT;
- qla24xx_post_upd_fcport_work(vha, sp->fcport);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ qla24xx_sched_upd_fcport(sp->fcport);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
} else {
sp->fcport->login_retry = 0;
sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -1230,11 +1226,12 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
unsigned long flags;
+ u16 sec;
- if (sess->disc_state == DSC_DELETE_PEND)
+ switch (sess->disc_state) {
+ case DSC_DELETE_PEND:
return;
-
- if (sess->disc_state == DSC_DELETED) {
+ case DSC_DELETED:
if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
wake_up_all(&tgt->waitQ);
if (sess->vha->fcport_count == 0)
@@ -1243,11 +1240,26 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
return;
+ break;
+ case DSC_UPD_FCPORT:
+ /*
+ * This port is not done reporting to upper layer.
+ * let it finish
+ */
+ sess->next_disc_state = DSC_DELETE_PEND;
+ sec = jiffies_to_msecs(jiffies -
+ sess->jiffies_at_registration)/1000;
+ if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
+ sess->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+ "%s %8phC : Slow Rport registration(%d Sec)\n",
+ __func__, sess->port_name, sec);
+ }
+ return;
+ default:
+ break;
}
- if (sess->deleted == QLA_SESS_DELETED)
- sess->logout_on_delete = 0;
-
spin_lock_irqsave(&sess->vha->work_lock, flags);
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
spin_unlock_irqrestore(&sess->vha->work_lock, flags);
@@ -1261,7 +1273,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
qla24xx_chk_fcp_state(sess);
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
- "Scheduling sess %p for deletion\n", sess);
+ "Scheduling sess %p for deletion %8phC\n",
+ sess, sess->port_name);
INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
@@ -1479,27 +1492,14 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
struct qla_hw_data *ha = tgt->ha;
unsigned long flags;
+ mutex_lock(&ha->optrom_mutex);
mutex_lock(&qla_tgt_mutex);
- if (!vha->fc_vport) {
- struct Scsi_Host *sh = vha->host;
- struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
- bool npiv_vports;
-
- spin_lock_irqsave(sh->host_lock, flags);
- npiv_vports = (fc_host->npiv_vports_inuse);
- spin_unlock_irqrestore(sh->host_lock, flags);
-
- if (npiv_vports) {
- mutex_unlock(&qla_tgt_mutex);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
- "NPIV is in use. Can not stop target\n");
- return -EPERM;
- }
- }
+
if (tgt->tgt_stop || tgt->tgt_stopped) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
"Already in tgt->tgt_stop or tgt_stopped state\n");
mutex_unlock(&qla_tgt_mutex);
+ mutex_unlock(&ha->optrom_mutex);
return -EPERM;
}
@@ -1537,6 +1537,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
/* Wait for sessions to clear out (just in case) */
wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
+ mutex_unlock(&ha->optrom_mutex);
+
return 0;
}
EXPORT_SYMBOL(qlt_stop_phase1);
@@ -1566,6 +1568,15 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
tgt);
+
+ switch (vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ vha->flags.online = 1;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+ default:
+ break;
+ }
}
EXPORT_SYMBOL(qlt_stop_phase2);
@@ -1715,6 +1726,94 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
qla2x00_start_iocbs(vha, qpair->req);
}
+static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ struct scsi_qla_host *vha = mcmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct abts_resp_to_24xx *resp;
+ uint32_t f_ctl, h;
+ uint8_t *p;
+ int rc;
+ struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
+ struct qla_qpair *qpair = mcmd->qpair;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe006,
+ "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
+ ha, mcmd->fc_tm_rsp);
+
+ rc = qlt_check_reserve_free_req(qpair, 1);
+ if (rc) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+ "qla_target(%d): %s failed: unable to allocate request packet\n",
+ vha->vp_idx, __func__);
+ return -EAGAIN;
+ }
+
+ resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
+ memset(resp, 0, sizeof(*resp));
+
+ h = qlt_make_handle(qpair);
+ if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+ /*
+ * CTIO type 7 from the firmware doesn't provide a way to
+ * know the initiator's LOOP ID, hence we can't find
+ * the session and, so, the command.
+ */
+ return -EAGAIN;
+ } else {
+ qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
+ }
+
+ resp->handle = MAKE_HANDLE(qpair->req->id, h);
+ resp->entry_type = ABTS_RESP_24XX;
+ resp->entry_count = 1;
+ resp->nport_handle = abts->nport_handle;
+ resp->vp_index = vha->vp_idx;
+ resp->sof_type = abts->sof_type;
+ resp->exchange_address = abts->exchange_address;
+ resp->fcp_hdr_le = abts->fcp_hdr_le;
+ f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+ F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+ F_CTL_SEQ_INITIATIVE);
+ p = (uint8_t *)&f_ctl;
+ resp->fcp_hdr_le.f_ctl[0] = *p++;
+ resp->fcp_hdr_le.f_ctl[1] = *p++;
+ resp->fcp_hdr_le.f_ctl[2] = *p;
+
+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+
+ resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+ if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+ resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+ resp->payload.ba_acct.low_seq_cnt = 0x0000;
+ resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+ resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+ } else {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+ resp->payload.ba_rjt.reason_code =
+ BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+ /* Other bytes are zero */
+ }
+
+ vha->vha_tgt.qla_tgt->abts_resp_expected++;
+
+ /* Memory Barrier */
+ wmb();
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
+
+ return rc;
+}
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -1742,6 +1841,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
}
resp->entry_type = ABTS_RESP_24XX;
+ resp->handle = QLA_TGT_SKIP_HANDLE;
resp->entry_count = 1;
resp->nport_handle = abts->nport_handle;
resp->vp_index = vha->vp_idx;
@@ -1799,15 +1899,13 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
- struct abts_resp_from_24xx_fw *entry)
+ struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
{
struct ctio7_to_24xx *ctio;
+ u16 tmp;
+ struct abts_recv_from_24xx *entry;
- ql_dbg(ql_dbg_tgt, vha, 0xe007,
- "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
-
- ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(
- vha->hw->base_qpair, NULL);
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
if (ctio == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe04b,
"qla_target(%d): %s failed: unable to allocate "
@@ -1815,6 +1913,13 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
return;
}
+ if (mcmd)
+ /* abts from remote port */
+ entry = &mcmd->orig_iocb.abts;
+ else
+ /* abts from this driver. */
+ entry = (struct abts_recv_from_24xx *)pkt;
+
/*
* We've got on entrance firmware's response on by us generated
* ABTS response. So, in it ID fields are reversed.
@@ -1826,56 +1931,48 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = vha->vp_idx;
- ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
- ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
- ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
ctio->exchange_addr = entry->exchange_addr_to_abort;
- ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
- CTIO7_FLAGS_TERMINATE);
- ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
+ tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
- /* Memory Barrier */
- wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ if (mcmd) {
+ ctio->initiator_id[0] = entry->fcp_hdr_le.s_id[0];
+ ctio->initiator_id[1] = entry->fcp_hdr_le.s_id[1];
+ ctio->initiator_id[2] = entry->fcp_hdr_le.s_id[2];
- qlt_24xx_send_abts_resp(vha->hw->base_qpair,
- (struct abts_recv_from_24xx *)entry,
- FCP_TMF_CMPL, true);
-}
-
-static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
-{
- struct qla_tgt_sess_op *op;
- struct qla_tgt_cmd *cmd;
- unsigned long flags;
+ if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
+ tmp |= (mcmd->abort_io_attr << 9);
+ else if (qpair->retry_term_cnt & 1)
+ tmp |= (0x4 << 9);
+ } else {
+ ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+ ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+ ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
- if (tag == op->atio.u.isp24.exchange_addr) {
- op->aborted = true;
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
- return 1;
- }
+ if (qpair->retry_term_cnt & 1)
+ tmp |= (0x4 << 9);
}
+ ctio->u.status1.flags = cpu_to_le16(tmp);
+ ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
- list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
- if (tag == op->atio.u.isp24.exchange_addr) {
- op->aborted = true;
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
- return 1;
- }
- }
+ ql_dbg(ql_dbg_tgt, vha, 0xe007,
+ "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
+ le16_to_cpu(ctio->u.status1.flags),
+ le16_to_cpu(ctio->u.status1.ox_id),
+ (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
- list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
- if (tag == cmd->atio.u.isp24.exchange_addr) {
- cmd->aborted = 1;
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
- return 1;
- }
- }
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+ /* Memory Barrier */
+ wmb();
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
+
+ if (mcmd)
+ qlt_build_abts_resp_iocb(mcmd);
+ else
+ qlt_24xx_send_abts_resp(qpair,
+ (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
- return 0;
}
/* drop cmds for the given lun
@@ -1970,9 +2067,8 @@ static void qlt_do_tmr_work(struct work_struct *work)
spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
switch (mcmd->tmr_func) {
case QLA_TGT_ABTS:
- qlt_24xx_send_abts_resp(mcmd->qpair,
- &mcmd->orig_iocb.abts,
- FCP_TMF_REJECTED, false);
+ mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
+ qlt_build_abts_resp_iocb(mcmd);
break;
case QLA_TGT_LUN_RESET:
case QLA_TGT_CLEAR_TS:
@@ -2007,12 +2103,6 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct qla_tgt_mgmt_cmd *mcmd;
struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
- if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
- /* send TASK_ABORT response immediately */
- qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false);
- return 0;
- }
-
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n",
vha->vp_idx, abts->exchange_addr_to_abort);
@@ -2025,7 +2115,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
return -ENOMEM;
}
memset(mcmd, 0, sizeof(*mcmd));
-
+ mcmd->cmd_type = TYPE_TGT_TMCMD;
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
mcmd->reset_count = ha->base_qpair->chip_reset;
@@ -2047,6 +2137,8 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
if (abort_cmd && abort_cmd->qpair) {
mcmd->qpair = abort_cmd->qpair;
mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
+ mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
+ mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
}
}
@@ -2264,6 +2356,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
struct qla_qpair *qpair = mcmd->qpair;
+ bool free_mcmd = true;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
"TM response mcmd (%p) status %#x state %#x",
@@ -2302,10 +2395,10 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
&mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
}
} else {
- if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
- qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts,
- mcmd->fc_tm_rsp, false);
- else
+ if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
+ qlt_build_abts_resp_iocb(mcmd);
+ free_mcmd = false;
+ } else
qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
mcmd->fc_tm_rsp);
}
@@ -2317,7 +2410,9 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
* descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
* qlt_xmit_tm_rsp() returns here..
*/
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ if (free_mcmd)
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
}
EXPORT_SYMBOL(qlt_xmit_tm_rsp);
@@ -2330,7 +2425,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
BUG_ON(cmd->sg_cnt == 0);
prm->sg = (struct scatterlist *)cmd->sg;
- prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
+ prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
cmd->sg_cnt, cmd->dma_data_direction);
if (unlikely(prm->seg_cnt == 0))
goto out_err;
@@ -2357,7 +2452,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
if (cmd->prot_sg_cnt) {
prm->prot_sg = cmd->prot_sg;
- prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
+ prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (unlikely(prm->prot_seg_cnt == 0))
@@ -2392,12 +2487,12 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
qpair = cmd->qpair;
- pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
+ dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
cmd->dma_data_direction);
cmd->sg_mapped = 0;
if (cmd->prot_sg_cnt)
- pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
+ dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (!cmd->ctx)
@@ -3289,7 +3384,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+ spin_lock(&cmd->cmd_lock);
cmd->cmd_sent_to_fw = 1;
+ spin_unlock(&cmd->cmd_lock);
+ cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
/* Memory Barrier */
wmb();
@@ -3367,7 +3465,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
qlt_load_data_segments(&prm);
cmd->state = QLA_TGT_STATE_NEED_DATA;
+ spin_lock(&cmd->cmd_lock);
cmd->cmd_sent_to_fw = 1;
+ spin_unlock(&cmd->cmd_lock);
+ cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
/* Memory Barrier */
wmb();
@@ -3825,10 +3926,10 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
/* ha->hardware_lock supposed to be held on entry */
-static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
struct rsp_que *rsp, uint32_t handle, void *ctio)
{
- struct qla_tgt_cmd *cmd = NULL;
+ void *cmd = NULL;
struct req_que *req;
int qid = GET_QID(handle);
uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
@@ -3857,7 +3958,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return NULL;
}
- cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h];
+ cmd = (void *) req->outstanding_cmds[h];
if (unlikely(cmd == NULL)) {
ql_dbg(ql_dbg_async, vha, 0xe053,
"qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
@@ -3930,7 +4031,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
return;
}
- cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
+ cmd = (struct qla_tgt_cmd *)qlt_ctio_to_cmd(vha, rsp, handle, ctio);
if (cmd == NULL)
return;
@@ -3941,12 +4042,20 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
if (unlikely(status != CTIO_SUCCESS)) {
switch (status & 0xFFFF) {
+ case CTIO_INVALID_RX_ID:
+ if (printk_ratelimit())
+ dev_info(&vha->hw->pdev->dev,
+ "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
+ vha->vp_idx, cmd->atio.u.isp24.attr,
+ ((cmd->ctio_flags >> 9) & 0xf),
+ cmd->ctio_flags);
+
+ break;
case CTIO_LIP_RESET:
case CTIO_TARGET_RESET:
case CTIO_ABORTED:
/* driver request abort via Terminate exchange */
case CTIO_TIMEOUT:
- case CTIO_INVALID_RX_ID:
/* They are OK */
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
"qla_target(%d): CTIO with "
@@ -3973,7 +4082,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
* Session is already logged out, but we need
* to notify initiator, who's not aware of this
*/
- cmd->sess->logout_on_delete = 0;
cmd->sess->send_els_logo = 1;
ql_dbg(ql_dbg_disc, vha, 0x20f8,
"%s %d %8phC post del sess\n",
@@ -4711,6 +4819,12 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
sess = qlt_find_sess_invalidate_other(vha, wwn,
port_id, loop_id, &conflict_sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
+ __func__, __LINE__, loop_id, port_id.b24);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
}
if (IS_SW_RESV_ADDR(port_id)) {
@@ -4752,6 +4866,32 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
goto out;
}
+ if (sess->disc_state == DSC_UPD_FCPORT) {
+ u16 sec;
+
+ /*
+ * Remote port registration is still going on from
+ * previous login. Allow it to finish before we
+ * accept the new login.
+ */
+ sess->next_disc_state = DSC_DELETE_PEND;
+ sec = jiffies_to_msecs(jiffies -
+ sess->jiffies_at_registration) / 1000;
+ if (sess->sec_since_registration < sec && sec &&
+ !(sec % 5)) {
+ sess->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC - Slow Rport registration (%d Sec)\n",
+ __func__, sess->port_name, sec);
+ }
+
+ if (!conflict_sess)
+ kmem_cache_free(qla_tgt_plogi_cachep, pla);
+
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+
qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
sess->d_id = port_id;
sess->login_gen++;
@@ -4910,6 +5050,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
if (sess != NULL) {
bool delete = false;
+ int sec;
spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
switch (sess->fw_login_state) {
case DSC_LS_PLOGI_PEND:
@@ -4922,9 +5063,24 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
switch (sess->disc_state) {
+ case DSC_UPD_FCPORT:
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
+ flags);
+
+ sec = jiffies_to_msecs(jiffies -
+ sess->jiffies_at_registration)/1000;
+ if (sess->sec_since_registration < sec && sec &&
+ !(sec % 5)) {
+ sess->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+ "%s %8phC : Slow Rport registration(%d Sec)\n",
+ __func__, sess->port_name, sec);
+ }
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ return 0;
+
case DSC_LOGIN_PEND:
case DSC_GPDB:
- case DSC_UPD_FCPORT:
case DSC_LOGIN_COMPLETE:
case DSC_ADISC:
delete = false;
@@ -5608,6 +5764,101 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
tgt->atio_irq_cmd_count--;
}
+/*
+ * qpair lock is assume to be held
+ * rc = 0 : send terminate & abts respond
+ * rc != 0: do not send term & abts respond
+ */
+static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
+ struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rc = 0;
+
+ /*
+ * Detect unresolved exchange. If the same ABTS is unable
+ * to terminate an existing command and the same ABTS loops
+ * between FW & Driver, then force FW dump. Under 1 jiff,
+ * we should see multiple loops.
+ */
+ if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
+ qpair->retry_term_jiff == jiffies) {
+ /* found existing exchange */
+ qpair->retry_term_cnt++;
+ if (qpair->retry_term_cnt >= 5) {
+ rc = EIO;
+ qpair->retry_term_cnt = 0;
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to send ABTS Respond. Dumping firmware.\n");
+ ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
+ vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
+
+ if (qpair == ha->base_qpair)
+ ha->isp_ops->fw_dump(vha, 1);
+ else
+ ha->isp_ops->fw_dump(vha, 0);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ } else if (qpair->retry_term_jiff != jiffies) {
+ qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
+ qpair->retry_term_cnt = 0;
+ qpair->retry_term_jiff = jiffies;
+ }
+
+ return rc;
+}
+
+
+static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, response_t *pkt)
+{
+ struct abts_resp_from_24xx_fw *entry =
+ (struct abts_resp_from_24xx_fw *)pkt;
+ u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ struct qla_hw_data *ha = vha->hw;
+
+ mcmd = (struct qla_tgt_mgmt_cmd *)qlt_ctio_to_cmd(vha, rsp,
+ pkt->handle, pkt);
+ if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
+ ql_dbg(ql_dbg_async, vha, 0xe064,
+ "qla_target(%d): ABTS Comp without mcmd\n",
+ vha->vp_idx);
+ return;
+ }
+
+ if (mcmd)
+ vha = mcmd->vha;
+ vha->vha_tgt.qla_tgt->abts_resp_expected--;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe038,
+ "ABTS_RESP_24XX: compl_status %x\n",
+ entry->compl_status);
+
+ if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
+ if ((entry->error_subcode1 == 0x1E) &&
+ (entry->error_subcode2 == 0)) {
+ if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ return;
+ }
+ qlt_24xx_retry_term_exchange(vha, rsp->qpair,
+ pkt, mcmd);
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe063,
+ "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
+ vha->vp_idx, entry->compl_status,
+ entry->error_subcode1,
+ entry->error_subcode2);
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ }
+ } else {
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ }
+}
+
/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
static void qlt_response_pkt(struct scsi_qla_host *vha,
@@ -5740,41 +5991,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
case ABTS_RESP_24XX:
if (tgt->abts_resp_expected > 0) {
- struct abts_resp_from_24xx_fw *entry =
- (struct abts_resp_from_24xx_fw *)pkt;
- ql_dbg(ql_dbg_tgt, vha, 0xe038,
- "ABTS_RESP_24XX: compl_status %x\n",
- entry->compl_status);
- tgt->abts_resp_expected--;
- if (le16_to_cpu(entry->compl_status) !=
- ABTS_RESP_COMPL_SUCCESS) {
- if ((entry->error_subcode1 == 0x1E) &&
- (entry->error_subcode2 == 0)) {
- /*
- * We've got a race here: aborted
- * exchange not terminated, i.e.
- * response for the aborted command was
- * sent between the abort request was
- * received and processed.
- * Unfortunately, the firmware has a
- * silly requirement that all aborted
- * exchanges must be explicitely
- * terminated, otherwise it refuses to
- * send responses for the abort
- * requests. So, we have to
- * (re)terminate the exchange and retry
- * the abort response.
- */
- qlt_24xx_retry_term_exchange(vha,
- entry);
- } else
- ql_dbg(ql_dbg_tgt, vha, 0xe063,
- "qla_target(%d): ABTS_RESP_24XX "
- "failed %x (subcode %x:%x)",
- vha->vp_idx, entry->compl_status,
- entry->error_subcode1,
- entry->error_subcode2);
- }
+ qlt_handle_abts_completion(vha, rsp, pkt);
} else {
ql_dbg(ql_dbg_tgt, vha, 0xe064,
"qla_target(%d): Unexpected ABTS_RESP_24XX "
@@ -5964,10 +6181,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
case MODE_DUAL:
if (newfcport) {
if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
- ql_dbg(ql_dbg_disc, vha, 0x20fe,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name, vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, fcport);
+ qla24xx_sched_upd_fcport(fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ff,
"%s %d %8phC post gpsc fcp_cnt %d\n",
@@ -6413,6 +6627,9 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
if (!(host->hostt->supported_mode & MODE_TARGET))
continue;
+ if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
+ continue;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
@@ -6475,15 +6692,15 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)
EXPORT_SYMBOL(qlt_lport_deregister);
/* Must be called under HW lock */
-static void qlt_set_mode(struct scsi_qla_host *vha)
+void qlt_set_mode(struct scsi_qla_host *vha)
{
- switch (ql2x_ini_mode) {
+ switch (vha->qlini_mode) {
case QLA2XXX_INI_MODE_DISABLED:
case QLA2XXX_INI_MODE_EXCLUSIVE:
vha->host->active_mode = MODE_TARGET;
break;
case QLA2XXX_INI_MODE_ENABLED:
- vha->host->active_mode = MODE_UNKNOWN;
+ vha->host->active_mode = MODE_INITIATOR;
break;
case QLA2XXX_INI_MODE_DUAL:
vha->host->active_mode = MODE_DUAL;
@@ -6496,7 +6713,7 @@ static void qlt_set_mode(struct scsi_qla_host *vha)
/* Must be called under HW lock */
static void qlt_clear_mode(struct scsi_qla_host *vha)
{
- switch (ql2x_ini_mode) {
+ switch (vha->qlini_mode) {
case QLA2XXX_INI_MODE_DISABLED:
vha->host->active_mode = MODE_UNKNOWN;
break;
@@ -6532,12 +6749,17 @@ qlt_enable_vha(struct scsi_qla_host *vha)
dump_stack();
return;
}
+ if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
+ return;
spin_lock_irqsave(&ha->hardware_lock, flags);
tgt->tgt_stopped = 0;
qlt_set_mode(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ mutex_lock(&ha->optrom_mutex);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+ "%s.\n", __func__);
if (vha->vp_idx) {
qla24xx_disable_vp(vha);
qla24xx_enable_vp(vha);
@@ -6546,6 +6768,7 @@ qlt_enable_vha(struct scsi_qla_host *vha)
qla2xxx_wake_dpc(base_vha);
qla2x00_wait_for_hba_online(base_vha);
}
+ mutex_unlock(&ha->optrom_mutex);
}
EXPORT_SYMBOL(qlt_enable_vha);
@@ -6767,7 +6990,7 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
if (qla_tgt_mode_enabled(vha))
nv->exchange_count = cpu_to_le16(0xFFFF);
else /* dual */
- nv->exchange_count = cpu_to_le16(ql2xexchoffld);
+ nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6846,14 +7069,6 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
}
-
- /* disable ZIO at start time. */
- if (!vha->flags.init_done) {
- uint32_t tmp;
- tmp = le32_to_cpu(icb->firmware_options_2);
- tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
- icb->firmware_options_2 = cpu_to_le32(tmp);
- }
}
void
@@ -6881,7 +7096,7 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
if (qla_tgt_mode_enabled(vha))
nv->exchange_count = cpu_to_le16(0xFFFF);
else /* dual */
- nv->exchange_count = cpu_to_le16(ql2xexchoffld);
+ nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6957,15 +7172,6 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
}
-
- /* disable ZIO at start time. */
- if (!vha->flags.init_done) {
- uint32_t tmp;
- tmp = le32_to_cpu(icb->firmware_options_2);
- tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
- icb->firmware_options_2 = cpu_to_le32(tmp);
- }
-
}
void
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index fecf96f0225c..721da593b1bc 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -374,8 +374,8 @@ struct atio_from_isp {
static inline int fcpcmd_is_corrupted(struct atio *atio)
{
if (atio->entry_type == ATIO_TYPE7 &&
- (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
- FCP_CMD_LENGTH_MIN))
+ ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
+ FCP_CMD_LENGTH_MIN))
return 1;
else
return 0;
@@ -900,6 +900,7 @@ struct qla_tgt_cmd {
unsigned int aborted:1;
unsigned int data_work:1;
unsigned int data_work_free:1;
+ unsigned int released:1;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
@@ -908,6 +909,7 @@ struct qla_tgt_cmd {
u64 unpacked_lun;
enum dma_data_direction dma_data_direction;
+ uint16_t ctio_flags;
uint16_t vp_idx;
uint16_t loop_id; /* to save extra sess dereferences */
struct qla_tgt *tgt; /* to save extra sess dereferences */
@@ -956,16 +958,20 @@ struct qla_tgt_sess_work_param {
};
struct qla_tgt_mgmt_cmd {
+ uint8_t cmd_type;
+ uint8_t pad[3];
uint16_t tmr_func;
uint8_t fc_tm_rsp;
+ uint8_t abort_io_attr;
struct fc_port *sess;
struct qla_qpair *qpair;
struct scsi_qla_host *vha;
struct se_cmd se_cmd;
struct work_struct free_work;
unsigned int flags;
+#define QLA24XX_MGMT_SEND_NACK BIT_0
+#define QLA24XX_MGMT_ABORT_IO_ATTR_VALID BIT_1
uint32_t reset_count;
-#define QLA24XX_MGMT_SEND_NACK 1
struct work_struct work;
uint64_t unpacked_lun;
union {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3850b28518e5..12bafff71a1a 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.08-k"
+#define QLA2XXX_VERSION "10.00.00.11-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index e03d12a5f986..65053c066680 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -277,14 +277,25 @@ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
static void tcm_qla2xxx_complete_free(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ bool released = false;
+ unsigned long flags;
cmd->cmd_in_wq = 0;
WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++;
cmd->trc_flags |= TRC_CMD_FREE;
- transport_generic_free_cmd(&cmd->se_cmd, 0);
+ cmd->cmd_sent_to_fw = 0;
+ if (cmd->released)
+ released = true;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+ if (released)
+ qlt_free_cmd(cmd);
+ else
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
}
/*
@@ -325,6 +336,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd;
+ unsigned long flags;
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
@@ -332,9 +344,16 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
qlt_free_mcmd(mcmd);
return;
}
-
cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
- qlt_free_cmd(cmd);
+
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if (cmd->cmd_sent_to_fw) {
+ cmd->released = 1;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ qlt_free_cmd(cmd);
+ }
}
static void tcm_qla2xxx_release_session(struct kref *kref)
@@ -405,7 +424,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
se_cmd->pi_err = 0;
/*
- * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+ * qla_target.c:qlt_rdy_to_xfer() will call dma_map_sg() to setup
* the SGL mappings into PCIe memory for incoming FCP WRITE data.
*/
return qlt_rdy_to_xfer(cmd);
@@ -499,6 +518,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ unsigned long flags;
/*
* Ensure that the complete FCP WRITE payload has been received.
@@ -506,6 +526,25 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
*/
cmd->cmd_in_wq = 0;
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ cmd->cmd_sent_to_fw = 0;
+
+ if (cmd->released) {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ qlt_free_cmd(cmd);
+ return;
+ }
+
+ cmd->data_work = 1;
+ if (cmd->aborted) {
+ cmd->data_work_free = 1;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+ tcm_qla2xxx_free_cmd(cmd);
+ return;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
cmd->qpair->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
/*
@@ -718,10 +757,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd->sg_cnt = 0;
cmd->offset = 0;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
- if (cmd->trc_flags & TRC_XMIT_STATUS) {
- pr_crit("Multiple calls for status = %p.\n", cmd);
- dump_stack();
- }
cmd->trc_flags |= TRC_XMIT_STATUS;
if (se_cmd->data_direction == DMA_FROM_DEVICE) {
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 52b1a0bc93c9..1ef74aa2d00a 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -766,12 +766,10 @@ int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
while (drvr_wait) {
if (ql4xxx_lock_drvr(a) == 0) {
ssleep(QL4_LOCK_DRVR_SLEEP);
- if (drvr_wait) {
- DEBUG2(printk("scsi%ld: %s: Waiting for "
- "Global Init Semaphore(%d)...\n",
- a->host_no,
- __func__, drvr_wait));
- }
+ DEBUG2(printk("scsi%ld: %s: Waiting for "
+ "Global Init Semaphore(%d)...\n",
+ a->host_no,
+ __func__, drvr_wait));
drvr_wait -= QL4_LOCK_DRVR_SLEEP;
} else {
DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0e13349dce57..051164f755a4 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3382,7 +3382,7 @@ static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if (task->data_count) {
task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
task->data_count,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -3437,7 +3437,7 @@ static void qla4xxx_task_cleanup(struct iscsi_task *task)
if (task->data_count) {
dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
- task->data_count, PCI_DMA_TODEVICE);
+ task->data_count, DMA_TO_DEVICE);
}
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -9020,25 +9020,16 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev)
/**
* qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
* @ha: HA context
- *
- * At exit, the @ha's flags.enable_64bit_addressing set to indicated
- * supported addressing method.
*/
static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
{
- int retval;
-
/* Update our PCI device dma_mask for full 64 bit mask */
- if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
- if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
- dev_dbg(&ha->pdev->dev,
- "Failed to set 64 bit PCI consistent mask; "
- "using 32 bit.\n");
- retval = pci_set_consistent_dma_mask(ha->pdev,
- DMA_BIT_MASK(32));
- }
- } else
- retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
+ if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+ dev_dbg(&ha->pdev->dev,
+ "Failed to set 64 bit PCI consistent mask; "
+ "using 32 bit.\n");
+ dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32));
+ }
}
static int qla4xxx_slave_alloc(struct scsi_device *sdev)
@@ -9824,7 +9815,6 @@ qla4xxx_pci_resume(struct pci_dev *pdev)
__func__);
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
clear_bit(AF_EEH_BUSY, &ha->flags);
}
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index ea88906d2cc5..5c3d6e1e0145 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -63,8 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
* emulated RAID devices, so start with SCSI */
struct raid_internal *i = ac_to_raid_internal(cont);
-#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE)
- if (scsi_is_sdev_device(dev)) {
+ if (IS_ENABLED(CONFIG_SCSI) && scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
if (i->f->cookie != sdev->host->hostt)
@@ -72,7 +71,6 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
return i->f->is_raid(dev);
}
-#endif
/* FIXME: look at other subsystems too */
return 0;
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index b7a8fdfeb2f4..c736d61b1648 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -338,9 +338,6 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
online = scsi_device_online(sdev);
- SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
- "%s: rtn: %d\n", __func__, online));
-
return online;
}
EXPORT_SYMBOL(scsi_block_when_processing_errors);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index eb97d2dd3651..c7fccbb8f554 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1201,8 +1201,8 @@ int scsi_init_io(struct scsi_cmnd *cmd)
count = blk_rq_map_integrity_sg(rq->q, rq->bio,
prot_sdb->table.sgl);
- BUG_ON(unlikely(count > ivecs));
- BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
+ BUG_ON(count > ivecs);
+ BUG_ON(count > queue_max_integrity_segments(rq->q));
cmd->prot_sdb = prot_sdb;
cmd->prot_sdb->table.nents = count;
@@ -2753,6 +2753,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (oldstate) {
case SDEV_RUNNING:
case SDEV_CREATED_BLOCK:
+ case SDEV_OFFLINE:
break;
default:
goto illegal;
@@ -3046,11 +3047,14 @@ scsi_device_quiesce(struct scsi_device *sdev)
*/
WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
- blk_set_preempt_only(q);
+ if (sdev->quiesced_by == current)
+ return 0;
+
+ blk_set_pm_only(q);
blk_mq_freeze_queue(q);
/*
- * Ensure that the effect of blk_set_preempt_only() will be visible
+ * Ensure that the effect of blk_set_pm_only() will be visible
* for percpu_ref_tryget() callers that occur after the queue
* unfreeze even if the queue was already frozen before this function
* was called. See also https://lwn.net/Articles/573497/.
@@ -3063,7 +3067,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
if (err == 0)
sdev->quiesced_by = current;
else
- blk_clear_preempt_only(q);
+ blk_clear_pm_only(q);
mutex_unlock(&sdev->state_mutex);
return err;
@@ -3088,7 +3092,7 @@ void scsi_device_resume(struct scsi_device *sdev)
mutex_lock(&sdev->state_mutex);
WARN_ON_ONCE(!sdev->quiesced_by);
sdev->quiesced_by = NULL;
- blk_clear_preempt_only(sdev->request_queue);
+ blk_clear_pm_only(sdev->request_queue);
if (sdev->sdev_state == SDEV_QUIESCE)
scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index b44c1bb687a2..a2b4179bfdf7 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -8,6 +8,7 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
#include <linux/async.h>
+#include <linux/blk-pm.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 0cd16e80b019..0a165b2b3e81 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -612,7 +612,6 @@ sas_phy_protocol_attr(identify.target_port_protocols,
sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
unsigned long long);
sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
-//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
sas_phy_linkspeed_attr(negotiated_linkrate);
sas_phy_linkspeed_attr(minimum_linkrate_hw);
sas_phy_linkspeed_rw_attr(minimum_linkrate);
@@ -1802,7 +1801,6 @@ sas_attach_transport(struct sas_function_template *ft)
SETUP_PHY_ATTRIBUTE(device_type);
SETUP_PHY_ATTRIBUTE(sas_address);
SETUP_PHY_ATTRIBUTE(phy_identifier);
- //SETUP_PHY_ATTRIBUTE(port_identifier);
SETUP_PHY_ATTRIBUTE(negotiated_linkrate);
SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw);
SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b79b366a94f7..b762d0fd773c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -45,6 +45,7 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
+#include <linux/blk-pm.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
@@ -1276,7 +1277,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_ZONE_RESET:
return sd_zbc_setup_reset_cmnd(cmd);
default:
- BUG();
+ WARN_ON_ONCE(1);
+ return BLKPREP_KILL;
}
}
@@ -2959,6 +2961,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
if (rot == 1) {
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+ } else {
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
}
if (sdkp->device->type == TYPE_ZBC) {
@@ -3271,7 +3276,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
}
blk_pm_runtime_init(sdp->request_queue, dev);
- device_add_disk(dev, gd);
+ device_add_disk(dev, gd, NULL);
if (sdkp->capacity)
sd_dif_config_host(sdkp);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8a254bb46a9b..c6ad00703c5b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -822,7 +822,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
scsi_req_free_cmd(scsi_req(srp->rq));
- blk_end_request_all(srp->rq, BLK_STS_IOERR);
+ blk_put_request(srp->rq);
srp->rq = NULL;
}
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 2112ea6723c6..a25a07a0b7f0 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -349,16 +349,16 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
static int pqi_map_single(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *sg_descriptor, void *buffer,
- size_t buffer_length, int data_direction)
+ size_t buffer_length, enum dma_data_direction data_direction)
{
dma_addr_t bus_address;
- if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
+ if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
return 0;
- bus_address = pci_map_single(pci_dev, buffer, buffer_length,
+ bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
data_direction);
- if (pci_dma_mapping_error(pci_dev, bus_address))
+ if (dma_mapping_error(&pci_dev->dev, bus_address))
return -ENOMEM;
put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
@@ -370,15 +370,15 @@ static int pqi_map_single(struct pci_dev *pci_dev,
static void pqi_pci_unmap(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *descriptors, int num_descriptors,
- int data_direction)
+ enum dma_data_direction data_direction)
{
int i;
- if (data_direction == PCI_DMA_NONE)
+ if (data_direction == DMA_NONE)
return;
for (i = 0; i < num_descriptors; i++)
- pci_unmap_single(pci_dev,
+ dma_unmap_single(&pci_dev->dev,
(dma_addr_t)get_unaligned_le64(&descriptors[i].address),
get_unaligned_le32(&descriptors[i].length),
data_direction);
@@ -387,10 +387,9 @@ static void pqi_pci_unmap(struct pci_dev *pci_dev,
static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
struct pqi_raid_path_request *request, u8 cmd,
u8 *scsi3addr, void *buffer, size_t buffer_length,
- u16 vpd_page, int *pci_direction)
+ u16 vpd_page, enum dma_data_direction *dir)
{
u8 *cdb;
- int pci_dir;
memset(request, 0, sizeof(*request));
@@ -458,23 +457,21 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
switch (request->data_direction) {
case SOP_READ_FLAG:
- pci_dir = PCI_DMA_FROMDEVICE;
+ *dir = DMA_FROM_DEVICE;
break;
case SOP_WRITE_FLAG:
- pci_dir = PCI_DMA_TODEVICE;
+ *dir = DMA_TO_DEVICE;
break;
case SOP_NO_DIRECTION_FLAG:
- pci_dir = PCI_DMA_NONE;
+ *dir = DMA_NONE;
break;
default:
- pci_dir = PCI_DMA_BIDIRECTIONAL;
+ *dir = DMA_BIDIRECTIONAL;
break;
}
- *pci_direction = pci_dir;
-
return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
- buffer, buffer_length, pci_dir);
+ buffer, buffer_length, *dir);
}
static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
@@ -516,21 +513,19 @@ static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
struct bmic_identify_controller *buffer)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
- sizeof(*buffer), 0, &pci_direction);
+ sizeof(*buffer), 0, &dir);
if (rc)
return rc;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -538,21 +533,19 @@ static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
- &pci_direction);
+ &dir);
if (rc)
return rc;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -562,13 +555,13 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
size_t buffer_length)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
u16 bmic_device_index;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
- buffer_length, 0, &pci_direction);
+ buffer_length, 0, &dir);
if (rc)
return rc;
@@ -579,9 +572,7 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
0, NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -590,8 +581,8 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
{
int rc;
struct pqi_raid_path_request request;
- int pci_direction;
struct bmic_flush_cache *flush_cache;
+ enum dma_data_direction dir;
/*
* Don't bother trying to flush the cache if the controller is
@@ -608,16 +599,14 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
rc = pqi_build_raid_path_request(ctrl_info, &request,
SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
- sizeof(*flush_cache), 0, &pci_direction);
+ sizeof(*flush_cache), 0, &dir);
if (rc)
goto out;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
0, NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
out:
kfree(flush_cache);
@@ -629,20 +618,18 @@ static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
{
int rc;
struct pqi_raid_path_request request;
- int pci_direction;
+ enum dma_data_direction dir;
rc = pqi_build_raid_path_request(ctrl_info, &request,
BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
- buffer_length, 0, &pci_direction);
+ buffer_length, 0, &dir);
if (rc)
return rc;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
0, NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -793,20 +780,18 @@ static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
void *buffer, size_t buffer_length)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
- cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
+ cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &dir);
if (rc)
return rc;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -1089,7 +1074,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
struct pqi_raid_path_request request;
struct raid_map *raid_map;
@@ -1099,15 +1084,14 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
rc = pqi_build_raid_path_request(ctrl_info, &request,
CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
- sizeof(*raid_map), 0, &pci_direction);
+ sizeof(*raid_map), 0, &dir);
if (rc)
goto error;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
if (rc)
goto error;
@@ -3822,7 +3806,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
rc = pqi_map_single(ctrl_info->pci_dev,
&request.data.report_device_capability.sg_descriptor,
capability, sizeof(*capability),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rc)
goto out;
@@ -3831,7 +3815,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
pqi_pci_unmap(ctrl_info->pci_dev,
&request.data.report_device_capability.sg_descriptor, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rc)
goto out;
@@ -4158,7 +4142,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
rc = pqi_map_single(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors,
event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rc)
goto out;
@@ -4167,7 +4151,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
pqi_pci_unmap(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rc)
goto out;
@@ -4194,7 +4178,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
rc = pqi_map_single(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors,
event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (rc)
goto out;
@@ -4203,7 +4187,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
pqi_pci_unmap(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
out:
kfree(event_config);
@@ -5534,7 +5518,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
rc = pqi_map_single(ctrl_info->pci_dev,
&request.sg_descriptors[0], kernel_buffer,
- iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ iocommand.buf_size, DMA_BIDIRECTIONAL);
if (rc)
goto out;
@@ -5548,7 +5532,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
if (iocommand.buf_size > 0)
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 5141bd4c9f06..ea91658c7060 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -316,9 +316,9 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
put_unaligned_le32(ctrl_info->max_io_slots,
&base_struct->error_buffer_num_elements);
- bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
- sizeof(*base_struct), PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
+ bus_address = dma_map_single(&ctrl_info->pci_dev->dev, base_struct,
+ sizeof(*base_struct), DMA_TO_DEVICE);
+ if (dma_mapping_error(&ctrl_info->pci_dev->dev, bus_address)) {
rc = -ENOMEM;
goto out;
}
@@ -331,9 +331,8 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
&params);
- pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
- PCI_DMA_TODEVICE);
-
+ dma_unmap_single(&ctrl_info->pci_dev->dev, bus_address,
+ sizeof(*base_struct), DMA_TO_DEVICE);
out:
kfree(base_struct_unaligned);
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index b106596cc0cf..e9ccfb97773f 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -111,8 +111,8 @@ snic_queue_report_tgt_req(struct snic *snic)
SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
- pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(snic->pdev, pa)) {
+ pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&snic->pdev->dev, pa)) {
SNIC_HOST_ERR(snic->shost,
"Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
buf);
@@ -138,7 +138,8 @@ snic_queue_report_tgt_req(struct snic *snic)
ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
if (ret) {
- pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&snic->pdev->dev, pa, buf_len,
+ DMA_FROM_DEVICE);
kfree(buf);
rqi->sge_va = 0;
snic_release_untagged_req(snic, rqi);
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
index 8e69548395b9..159ee94d2a55 100644
--- a/drivers/scsi/snic/snic_io.c
+++ b/drivers/scsi/snic/snic_io.c
@@ -102,7 +102,8 @@ snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
struct snic_req_info *rqi = NULL;
unsigned long flags;
- pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
rqi = req_to_rqi(req);
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
@@ -172,8 +173,8 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
snic_print_desc(__func__, os_buf, len);
/* Map request buffer */
- pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(snic->pdev, pa)) {
+ pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&snic->pdev->dev, pa)) {
SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
return -ENOMEM;
@@ -186,7 +187,7 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
spin_lock_irqsave(&snic->wq_lock[q_num], flags);
desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
if (desc_avail <= 0) {
- pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE);
req->req_pa = 0;
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
@@ -350,29 +351,29 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi)
if (rqi->abort_req) {
if (rqi->abort_req->req_pa)
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
rqi->abort_req->req_pa,
sizeof(struct snic_host_req),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
}
if (rqi->dr_req) {
if (rqi->dr_req->req_pa)
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
rqi->dr_req->req_pa,
sizeof(struct snic_host_req),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
}
if (rqi->req->req_pa)
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
rqi->req->req_pa,
rqi->req_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
}
@@ -384,10 +385,10 @@ snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
sgd = req_to_sgl(rqi_to_req(rqi));
SNIC_BUG_ON(sgd[0].addr == 0);
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
le64_to_cpu(sgd[0].addr),
le32_to_cpu(sgd[0].len),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/*
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 7cf70aaec0ba..5295277d6325 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -435,37 +435,17 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* limitation for the device. Try 43-bit first, and
* fail to 32-bit.
*/
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
if (ret) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
SNIC_HOST_ERR(shost,
"No Usable DMA Configuration, aborting %d\n",
ret);
-
- goto err_rel_regions;
- }
-
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret) {
- SNIC_HOST_ERR(shost,
- "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
- ret);
-
- goto err_rel_regions;
- }
- } else {
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
- if (ret) {
- SNIC_HOST_ERR(shost,
- "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
- ret);
-
goto err_rel_regions;
}
}
-
/* Map vNIC resources from BAR0 */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index d9b2e46424aa..b3650c989ed4 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -146,10 +146,10 @@ snic_release_req_buf(struct snic *snic,
CMD_FLAGS(sc));
if (req->u.icmnd.sense_addr)
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
le64_to_cpu(req->u.icmnd.sense_addr),
SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
scsi_dma_unmap(sc);
@@ -185,12 +185,11 @@ snic_queue_icmnd_req(struct snic *snic,
}
}
- pa = pci_map_single(snic->pdev,
+ pa = dma_map_single(&snic->pdev->dev,
sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
-
- if (pci_dma_mapping_error(snic->pdev, pa)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&snic->pdev->dev, pa)) {
SNIC_HOST_ERR(snic->shost,
"QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
sc->sense_buffer, snic_cmd_tag(sc));
@@ -2001,7 +2000,7 @@ snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
}
dr_failed:
- SNIC_BUG_ON(!spin_is_locked(io_lock));
+ lockdep_assert_held(io_lock);
if (rqi)
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -2604,7 +2603,7 @@ snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
ret = SUCCESS;
skip_internal_abts:
- SNIC_BUG_ON(!spin_is_locked(io_lock));
+ lockdep_assert_held(io_lock);
spin_unlock_irqrestore(io_lock, flags);
return ret;
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
index dad5fc66effb..05e374f80946 100644
--- a/drivers/scsi/snic/vnic_dev.c
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -225,10 +225,9 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
{
svnic_dev_desc_ring_size(ring, desc_count, desc_size);
- ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
- ring->size_unaligned,
- &ring->base_addr_unaligned);
-
+ ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
+ ring->size_unaligned, &ring->base_addr_unaligned,
+ GFP_KERNEL);
if (!ring->descs_unaligned) {
pr_err("Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
@@ -251,7 +250,7 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
ring->size_unaligned,
ring->descs_unaligned,
ring->base_addr_unaligned);
@@ -470,9 +469,9 @@ int svnic_dev_fw_info(struct vnic_dev *vdev,
int err = 0;
if (!vdev->fw_info) {
- vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+ vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
- &vdev->fw_info_pa);
+ &vdev->fw_info_pa, GFP_KERNEL);
if (!vdev->fw_info)
return -ENOMEM;
@@ -534,8 +533,8 @@ int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
int wait = VNIC_DVCMD_TMO;
if (!vdev->stats) {
- vdev->stats = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_stats), &vdev->stats_pa);
+ vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
if (!vdev->stats)
return -ENOMEM;
}
@@ -607,9 +606,9 @@ int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
int wait = VNIC_DVCMD_TMO;
if (!vdev->notify) {
- vdev->notify = pci_alloc_consistent(vdev->pdev,
+ vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
- &vdev->notify_pa);
+ &vdev->notify_pa, GFP_KERNEL);
if (!vdev->notify)
return -ENOMEM;
}
@@ -697,21 +696,21 @@ void svnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->linkstatus)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(u32),
vdev->linkstatus,
vdev->linkstatus_pa);
if (vdev->stats)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
if (vdev->devcmd2)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index d0389b20574d..54dd70ae9731 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -43,6 +43,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/blkdev.h>
+#include <linux/blk-pm.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
@@ -758,7 +759,7 @@ static int sr_probe(struct device *dev)
dev_set_drvdata(dev, cd);
disk->flags |= GENHD_FL_REMOVABLE;
- device_add_disk(&sdev->sdev_gendev, disk);
+ device_add_disk(&sdev->sdev_gendev, disk, NULL);
sdev_printk(KERN_DEBUG, sdev,
"Attached scsi CD-ROM %s\n", cd->cdi.name);
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 0b1421cdf8a0..c9a55d0f076d 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -60,30 +60,6 @@ static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
return readb(esp->regs + (reg * 4UL));
}
-static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
static int sun3x_esp_irq_pending(struct esp *esp)
{
if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
@@ -182,10 +158,6 @@ static int sun3x_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops sun3x_esp_ops = {
.esp_write8 = sun3x_esp_write8,
.esp_read8 = sun3x_esp_read8,
- .map_single = sun3x_esp_map_single,
- .map_sg = sun3x_esp_map_sg,
- .unmap_single = sun3x_esp_unmap_single,
- .unmap_sg = sun3x_esp_unmap_sg,
.irq_pending = sun3x_esp_irq_pending,
.reset_dma = sun3x_esp_reset_dma,
.dma_drain = sun3x_esp_dma_drain,
@@ -246,7 +218,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
dev_set_drvdata(&dev->dev, esp);
- err = scsi_esp_register(esp, &dev->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 747ee64a78e1..a11efbcb7f8b 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -80,7 +80,7 @@ static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
static int esp_sbus_map_regs(struct esp *esp, int hme)
{
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct resource *res;
/* On HME, two reg sets exist, first is DVMA,
@@ -100,11 +100,9 @@ static int esp_sbus_map_regs(struct esp *esp, int hme)
static int esp_sbus_map_command_block(struct esp *esp)
{
- struct platform_device *op = esp->dev;
-
- esp->command_block = dma_alloc_coherent(&op->dev, 16,
+ esp->command_block = dma_alloc_coherent(esp->dev, 16,
&esp->command_block_dma,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!esp->command_block)
return -ENOMEM;
return 0;
@@ -113,7 +111,7 @@ static int esp_sbus_map_command_block(struct esp *esp)
static int esp_sbus_register_irq(struct esp *esp)
{
struct Scsi_Host *host = esp->host;
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
host->irq = op->archdata.irqs[0];
return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
@@ -121,7 +119,7 @@ static int esp_sbus_register_irq(struct esp *esp)
static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
{
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct device_node *dp;
dp = op->dev.of_node;
@@ -143,7 +141,7 @@ done:
static void esp_get_differential(struct esp *esp)
{
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct device_node *dp;
dp = op->dev.of_node;
@@ -155,7 +153,7 @@ static void esp_get_differential(struct esp *esp)
static void esp_get_clock_params(struct esp *esp)
{
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct device_node *bus_dp, *dp;
int fmhz;
@@ -172,7 +170,7 @@ static void esp_get_clock_params(struct esp *esp)
static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
{
struct device_node *dma_dp = dma_of->dev.of_node;
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct device_node *dp;
u8 bursts, val;
@@ -212,38 +210,6 @@ static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
return sbus_readb(esp->regs + (reg * 4UL));
}
-static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- struct platform_device *op = esp->dev;
-
- return dma_map_single(&op->dev, buf, sz, dir);
-}
-
-static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- struct platform_device *op = esp->dev;
-
- return dma_map_sg(&op->dev, sg, num_sg, dir);
-}
-
-static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- struct platform_device *op = esp->dev;
-
- dma_unmap_single(&op->dev, addr, sz, dir);
-}
-
-static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- struct platform_device *op = esp->dev;
-
- dma_unmap_sg(&op->dev, sg, num_sg, dir);
-}
-
static int sbus_esp_irq_pending(struct esp *esp)
{
if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
@@ -255,14 +221,13 @@ static void sbus_esp_reset_dma(struct esp *esp)
{
int can_do_burst16, can_do_burst32, can_do_burst64;
int can_do_sbus64, lim;
- struct platform_device *op;
+ struct platform_device *op = to_platform_device(esp->dev);
u32 val;
can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
can_do_burst64 = 0;
can_do_sbus64 = 0;
- op = esp->dev;
if (sbus_can_dma_64bit())
can_do_sbus64 = 1;
if (sbus_can_burst64())
@@ -474,10 +439,6 @@ static int sbus_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops sbus_esp_ops = {
.esp_write8 = sbus_esp_write8,
.esp_read8 = sbus_esp_read8,
- .map_single = sbus_esp_map_single,
- .map_sg = sbus_esp_map_sg,
- .unmap_single = sbus_esp_unmap_single,
- .unmap_sg = sbus_esp_unmap_sg,
.irq_pending = sbus_esp_irq_pending,
.reset_dma = sbus_esp_reset_dma,
.dma_drain = sbus_esp_dma_drain,
@@ -504,7 +465,7 @@ static int esp_sbus_probe_one(struct platform_device *op,
esp = shost_priv(host);
esp->host = host;
- esp->dev = op;
+ esp->dev = &op->dev;
esp->ops = &sbus_esp_ops;
if (hme)
@@ -540,7 +501,7 @@ static int esp_sbus_probe_one(struct platform_device *op,
dev_set_drvdata(&op->dev, esp);
- err = scsi_esp_register(esp, &op->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index bd3f6e2d6834..0a2a54517b15 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -4370,6 +4370,13 @@ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym
OUTB(np, HS_PRT, HS_BUSY);
}
+#define sym_printk(lvl, tp, cp, fmt, v...) do { \
+ if (cp) \
+ scmd_printk(lvl, cp->cmd, fmt, ##v); \
+ else \
+ starget_printk(lvl, tp->starget, fmt, ##v); \
+} while (0)
+
/*
* chip exception handler for programmed interrupts.
*/
@@ -4415,7 +4422,7 @@ static void sym_int_sir(struct sym_hcb *np)
* been selected with ATN. We do not want to handle that.
*/
case SIR_SEL_ATN_NO_MSG_OUT:
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"No MSG OUT phase after selection with ATN\n");
goto out_stuck;
/*
@@ -4423,7 +4430,7 @@ static void sym_int_sir(struct sym_hcb *np)
* having reselected the initiator.
*/
case SIR_RESEL_NO_MSG_IN:
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"No MSG IN phase after reselection\n");
goto out_stuck;
/*
@@ -4431,7 +4438,7 @@ static void sym_int_sir(struct sym_hcb *np)
* an IDENTIFY.
*/
case SIR_RESEL_NO_IDENTIFY:
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"No IDENTIFY after reselection\n");
goto out_stuck;
/*
@@ -4460,7 +4467,7 @@ static void sym_int_sir(struct sym_hcb *np)
case SIR_RESEL_ABORTED:
np->lastmsg = np->msgout[0];
np->msgout[0] = M_NOOP;
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"message %x sent on bad reselection\n", np->lastmsg);
goto out;
/*
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index e09fe6ab3572..2ddd426323e9 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -109,3 +109,22 @@ config SCSI_UFS_HISI
Select this if you have UFS controller on Hisilicon chipset.
If unsure, say N.
+
+config SCSI_UFS_BSG
+ bool "Universal Flash Storage BSG device node"
+ depends on SCSI_UFSHCD
+ select BLK_DEV_BSGLIB
+ help
+ Universal Flash Storage (UFS) is SCSI transport specification for
+ accessing flash storage on digital cameras, mobile phones and
+ consumer electronic devices.
+ A UFS controller communicates with a UFS device by exchanging
+ UFS Protocol Information Units (UPIUs).
+ UPIUs can not only be used as a transport layer for the SCSI protocol
+ but are also used by the UFS native command set.
+ This transport driver supports exchanging UFS protocol information units
+ with a UFS device. See also the ufshcd driver, which is a SCSI driver
+ that supports UFS devices.
+
+ Select this if you need a bsg device node for your UFS controller.
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 2c50f03d8c4a..aca481329828 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -4,7 +4,8 @@ obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-objs := ufshcd.o ufs-sysfs.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 75ee5906b966..3aeadb14aae1 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -16,7 +16,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/phy/phy-qcom-ufs.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
@@ -70,20 +69,27 @@ static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
}
static int ufs_qcom_host_clk_get(struct device *dev,
- const char *name, struct clk **clk_out)
+ const char *name, struct clk **clk_out, bool optional)
{
struct clk *clk;
int err = 0;
clk = devm_clk_get(dev, name);
- if (IS_ERR(clk)) {
- err = PTR_ERR(clk);
- dev_err(dev, "%s: failed to get %s err %d",
- __func__, name, err);
- } else {
+ if (!IS_ERR(clk)) {
*clk_out = clk;
+ return 0;
}
+ err = PTR_ERR(clk);
+
+ if (optional && err == -ENOENT) {
+ *clk_out = NULL;
+ return 0;
+ }
+
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to get %s err %d\n", name, err);
+
return err;
}
@@ -104,11 +110,9 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
if (!host->is_lane_clks_enabled)
return;
- if (host->hba->lanes_per_direction > 1)
- clk_disable_unprepare(host->tx_l1_sync_clk);
+ clk_disable_unprepare(host->tx_l1_sync_clk);
clk_disable_unprepare(host->tx_l0_sync_clk);
- if (host->hba->lanes_per_direction > 1)
- clk_disable_unprepare(host->rx_l1_sync_clk);
+ clk_disable_unprepare(host->rx_l1_sync_clk);
clk_disable_unprepare(host->rx_l0_sync_clk);
host->is_lane_clks_enabled = false;
@@ -132,24 +136,21 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
if (err)
goto disable_rx_l0;
- if (host->hba->lanes_per_direction > 1) {
- err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
+ err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
host->rx_l1_sync_clk);
- if (err)
- goto disable_tx_l0;
+ if (err)
+ goto disable_tx_l0;
- err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+ err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
host->tx_l1_sync_clk);
- if (err)
- goto disable_rx_l1;
- }
+ if (err)
+ goto disable_rx_l1;
host->is_lane_clks_enabled = true;
goto out;
disable_rx_l1:
- if (host->hba->lanes_per_direction > 1)
- clk_disable_unprepare(host->rx_l1_sync_clk);
+ clk_disable_unprepare(host->rx_l1_sync_clk);
disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
@@ -163,25 +164,25 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
int err = 0;
struct device *dev = host->hba->dev;
- err = ufs_qcom_host_clk_get(dev,
- "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
+ err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
+ &host->rx_l0_sync_clk, false);
if (err)
goto out;
- err = ufs_qcom_host_clk_get(dev,
- "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
+ err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
+ &host->tx_l0_sync_clk, false);
if (err)
goto out;
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
- &host->rx_l1_sync_clk);
+ &host->rx_l1_sync_clk, false);
if (err)
goto out;
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
- &host->tx_l1_sync_clk);
+ &host->tx_l1_sync_clk, true);
}
out:
return err;
@@ -189,22 +190,9 @@ out:
static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
u32 tx_lanes;
- int err = 0;
-
- err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
- if (err)
- goto out;
-
- err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
- if (err)
- dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
- __func__);
-out:
- return err;
+ return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
}
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
@@ -932,10 +920,8 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
{
u32 val;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
struct ufs_qcom_dev_params ufs_qcom_cap;
int ret = 0;
- int res = 0;
if (!dev_req_params) {
pr_err("%s: incoming dev_req_params is NULL\n", __func__);
@@ -1002,12 +988,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
}
val = ~(MAX_U32 << dev_req_params->lane_tx);
- res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
- if (res) {
- dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
- __func__, res);
- ret = res;
- }
/* cache the power mode parameters to use internally */
memcpy(&host->dev_req_params,
@@ -1264,10 +1244,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
}
}
- /* update phy revision information before calling phy_init() */
- ufs_qcom_phy_save_controller_version(host->generic_phy,
- host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
-
err = ufs_qcom_init_lane_clks(host);
if (err)
goto out_variant_clear;
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 295f4bef6a0e..c114826316eb 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -129,11 +129,6 @@ enum {
MASK_CLK_NS_REG = 0xFFFC00,
};
-enum ufs_qcom_phy_init_type {
- UFS_PHY_INIT_FULL,
- UFS_PHY_INIT_CFG_RESTORE,
-};
-
/* QCOM UFS debug print bit mask */
#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0)
#define UFS_QCOM_DBG_PRINT_ICE_REGS_EN BIT(1)
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 14e5bf7af0bb..58087d3916d0 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -38,9 +38,9 @@
#include <linux/mutex.h>
#include <linux/types.h>
+#include <uapi/scsi/scsi_bsg_ufs.h>
-#define MAX_CDB_SIZE 16
-#define GENERAL_UPIU_REQUEST_SIZE 32
+#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
#define QUERY_DESC_MAX_SIZE 255
#define QUERY_DESC_MIN_SIZE 2
#define QUERY_DESC_HDR_SIZE 2
@@ -414,6 +414,7 @@ enum {
MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
MASK_RSP_EXCEPTION_EVENT = 0x10000,
MASK_TM_SERVICE_RESP = 0xFF,
+ MASK_TM_FUNC = 0xFF,
};
/* Task management service response */
@@ -433,65 +434,6 @@ enum ufs_dev_pwr_mode {
};
/**
- * struct utp_upiu_header - UPIU header structure
- * @dword_0: UPIU header DW-0
- * @dword_1: UPIU header DW-1
- * @dword_2: UPIU header DW-2
- */
-struct utp_upiu_header {
- __be32 dword_0;
- __be32 dword_1;
- __be32 dword_2;
-};
-
-/**
- * struct utp_upiu_cmd - Command UPIU structure
- * @data_transfer_len: Data Transfer Length DW-3
- * @cdb: Command Descriptor Block CDB DW-4 to DW-7
- */
-struct utp_upiu_cmd {
- __be32 exp_data_transfer_len;
- u8 cdb[MAX_CDB_SIZE];
-};
-
-/**
- * struct utp_upiu_query - upiu request buffer structure for
- * query request.
- * @opcode: command to perform B-0
- * @idn: a value that indicates the particular type of data B-1
- * @index: Index to further identify data B-2
- * @selector: Index to further identify data B-3
- * @reserved_osf: spec reserved field B-4,5
- * @length: number of descriptor bytes to read/write B-6,7
- * @value: Attribute value to be written DW-5
- * @reserved: spec reserved DW-6,7
- */
-struct utp_upiu_query {
- u8 opcode;
- u8 idn;
- u8 index;
- u8 selector;
- __be16 reserved_osf;
- __be16 length;
- __be32 value;
- __be32 reserved[2];
-};
-
-/**
- * struct utp_upiu_req - general upiu request structure
- * @header:UPIU header structure DW-0 to DW-2
- * @sc: fields structure for scsi command DW-3 to DW-7
- * @qr: fields structure for query request DW-3 to DW-7
- */
-struct utp_upiu_req {
- struct utp_upiu_header header;
- union {
- struct utp_upiu_cmd sc;
- struct utp_upiu_query qr;
- };
-};
-
-/**
* struct utp_cmd_rsp - Response UPIU structure
* @residual_transfer_count: Residual transfer count DW-3
* @reserved: Reserved double words DW-4 to DW-7
@@ -520,36 +462,6 @@ struct utp_upiu_rsp {
};
/**
- * struct utp_upiu_task_req - Task request UPIU structure
- * @header - UPIU header structure DW0 to DW-2
- * @input_param1: Input parameter 1 DW-3
- * @input_param2: Input parameter 2 DW-4
- * @input_param3: Input parameter 3 DW-5
- * @reserved: Reserved double words DW-6 to DW-7
- */
-struct utp_upiu_task_req {
- struct utp_upiu_header header;
- __be32 input_param1;
- __be32 input_param2;
- __be32 input_param3;
- __be32 reserved[2];
-};
-
-/**
- * struct utp_upiu_task_rsp - Task Management Response UPIU structure
- * @header: UPIU header structure DW0-DW-2
- * @output_param1: Ouput parameter 1 DW3
- * @output_param2: Output parameter 2 DW4
- * @reserved: Reserved double words DW-5 to DW-7
- */
-struct utp_upiu_task_rsp {
- struct utp_upiu_header header;
- __be32 output_param1;
- __be32 output_param2;
- __be32 reserved[3];
-};
-
-/**
* struct ufs_query_req - parameters for building a query request
* @query_func: UPIU header query function
* @upiu_req: the query request data
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
new file mode 100644
index 000000000000..e5f8e54bf644
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bsg endpoint that supports UPIUs
+ *
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#include "ufs_bsg.h"
+
+static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
+ struct utp_upiu_query *qr)
+{
+ int desc_size = be16_to_cpu(qr->length);
+ int desc_id = qr->idn;
+ int ret;
+
+ if (desc_size <= 0)
+ return -EINVAL;
+
+ ret = ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
+ if (ret || !*desc_len)
+ return -EINVAL;
+
+ *desc_len = min_t(int, *desc_len, desc_size);
+
+ return 0;
+}
+
+static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
+ unsigned int request_len,
+ unsigned int reply_len,
+ int desc_len, enum query_opcode desc_op)
+{
+ int min_req_len = sizeof(struct ufs_bsg_request);
+ int min_rsp_len = sizeof(struct ufs_bsg_reply);
+
+ if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
+ min_req_len += desc_len;
+
+ if (min_req_len > request_len || min_rsp_len > reply_len) {
+ dev_err(hba->dev, "not enough space assigned\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ufs_bsg_verify_query_params(struct ufs_hba *hba,
+ struct ufs_bsg_request *bsg_request,
+ unsigned int request_len,
+ unsigned int reply_len,
+ uint8_t *desc_buff, int *desc_len,
+ enum query_opcode desc_op)
+{
+ struct utp_upiu_query *qr;
+
+ if (desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
+ dev_err(hba->dev, "unsupported opcode %d\n", desc_op);
+ return -ENOTSUPP;
+ }
+
+ if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC)
+ goto out;
+
+ qr = &bsg_request->upiu_req.qr;
+ if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) {
+ dev_err(hba->dev, "Illegal desc size\n");
+ return -EINVAL;
+ }
+
+ if (ufs_bsg_verify_query_size(hba, request_len, reply_len, *desc_len,
+ desc_op))
+ return -EINVAL;
+
+ desc_buff = (uint8_t *)(bsg_request + 1);
+
+out:
+ return 0;
+}
+
+static int ufs_bsg_request(struct bsg_job *job)
+{
+ struct ufs_bsg_request *bsg_request = job->request;
+ struct ufs_bsg_reply *bsg_reply = job->reply;
+ struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
+ unsigned int req_len = job->request_len;
+ unsigned int reply_len = job->reply_len;
+ struct uic_command uc = {};
+ int msgcode;
+ uint8_t *desc_buff = NULL;
+ int desc_len = 0;
+ enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
+ int ret;
+
+ ret = ufs_bsg_verify_query_size(hba, req_len, reply_len, 0, desc_op);
+ if (ret)
+ goto out;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ msgcode = bsg_request->msgcode;
+ switch (msgcode) {
+ case UPIU_TRANSACTION_QUERY_REQ:
+ desc_op = bsg_request->upiu_req.qr.opcode;
+ ret = ufs_bsg_verify_query_params(hba, bsg_request, req_len,
+ reply_len, desc_buff,
+ &desc_len, desc_op);
+ if (ret)
+ goto out;
+
+ /* fall through */
+ case UPIU_TRANSACTION_NOP_OUT:
+ case UPIU_TRANSACTION_TASK_REQ:
+ ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
+ &bsg_reply->upiu_rsp, msgcode,
+ desc_buff, &desc_len, desc_op);
+ if (ret)
+ dev_err(hba->dev,
+ "exe raw upiu: error code %d\n", ret);
+
+ break;
+ case UPIU_TRANSACTION_UIC_CMD:
+ memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
+ ret = ufshcd_send_uic_cmd(hba, &uc);
+ if (ret)
+ dev_dbg(hba->dev,
+ "send uic cmd: error code %d\n", ret);
+
+ memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
+
+ break;
+ default:
+ ret = -ENOTSUPP;
+ dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
+
+ break;
+ }
+
+out:
+ bsg_reply->result = ret;
+ job->reply_len = sizeof(struct ufs_bsg_reply) +
+ bsg_reply->reply_payload_rcv_len;
+
+ bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
+
+ return ret;
+}
+
+/**
+ * ufs_bsg_remove - detach and remove the added ufs-bsg node
+ *
+ * Should be called when unloading the driver.
+ */
+void ufs_bsg_remove(struct ufs_hba *hba)
+{
+ struct device *bsg_dev = &hba->bsg_dev;
+
+ if (!hba->bsg_queue)
+ return;
+
+ bsg_unregister_queue(hba->bsg_queue);
+
+ device_del(bsg_dev);
+ put_device(bsg_dev);
+}
+
+static inline void ufs_bsg_node_release(struct device *dev)
+{
+ put_device(dev->parent);
+}
+
+/**
+ * ufs_bsg_probe - Add ufs bsg device node
+ * @hba: per adapter object
+ *
+ * Called during initial loading of the driver, and before scsi_scan_host.
+ */
+int ufs_bsg_probe(struct ufs_hba *hba)
+{
+ struct device *bsg_dev = &hba->bsg_dev;
+ struct Scsi_Host *shost = hba->host;
+ struct device *parent = &shost->shost_gendev;
+ struct request_queue *q;
+ int ret;
+
+ device_initialize(bsg_dev);
+
+ bsg_dev->parent = get_device(parent);
+ bsg_dev->release = ufs_bsg_node_release;
+
+ dev_set_name(bsg_dev, "ufs-bsg");
+
+ ret = device_add(bsg_dev);
+ if (ret)
+ goto out;
+
+ q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, 0);
+ if (IS_ERR(q)) {
+ ret = PTR_ERR(q);
+ goto out;
+ }
+
+ hba->bsg_queue = q;
+
+ return 0;
+
+out:
+ dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no);
+ put_device(bsg_dev);
+ return ret;
+}
diff --git a/drivers/scsi/ufs/ufs_bsg.h b/drivers/scsi/ufs/ufs_bsg.h
new file mode 100644
index 000000000000..d09918758631
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_bsg.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#ifndef UFS_BSG_H
+#define UFS_BSG_H
+
+#include <linux/bsg-lib.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include "ufshcd.h"
+#include "ufs.h"
+
+#ifdef CONFIG_SCSI_UFS_BSG
+void ufs_bsg_remove(struct ufs_hba *hba);
+int ufs_bsg_probe(struct ufs_hba *hba);
+#else
+static inline void ufs_bsg_remove(struct ufs_hba *hba) {}
+static inline int ufs_bsg_probe(struct ufs_hba *hba) {return 0; }
+#endif
+
+#endif /* UFS_BSG_H */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9d5d2ca7fc4f..23d7cca36ff0 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -46,6 +46,7 @@
#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-sysfs.h"
+#include "ufs_bsg.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -326,14 +327,11 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
const char *str)
{
- struct utp_task_req_desc *descp;
- struct utp_upiu_task_req *task_req;
int off = (int)tag - hba->nutrs;
+ struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
- descp = &hba->utmrdl_base_addr[off];
- task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
- trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
- &task_req->input_param1);
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
+ &descp->input_param1);
}
static void ufshcd_add_command_trace(struct ufs_hba *hba,
@@ -475,22 +473,13 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
{
- struct utp_task_req_desc *tmrdp;
int tag;
for_each_set_bit(tag, &bitmap, hba->nutmrs) {
- tmrdp = &hba->utmrdl_base_addr[tag];
+ struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
+
dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
- ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
- sizeof(struct request_desc_header));
- dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
- tag);
- ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
- sizeof(struct utp_upiu_req));
- dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
- tag);
- ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
- sizeof(struct utp_task_req_desc));
+ ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
}
}
@@ -646,19 +635,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
- * @task_req_descp: pointer to utp_task_req_desc structure
- *
- * This function is used to get the OCS field from UTMRD
- * Returns the OCS field in the UTMRD
- */
-static inline int
-ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
-{
- return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
-}
-
-/**
* ufshcd_get_tm_free_slot - get a free slot for task management request
* @hba: per adapter instance
* @free_slot: pointer to variable with available slot value
@@ -1691,8 +1667,9 @@ static void __ufshcd_release(struct ufs_hba *hba)
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
- schedule_delayed_work(&hba->clk_gating.gate_work,
- msecs_to_jiffies(hba->clk_gating.delay_ms));
+ queue_delayed_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.gate_work,
+ msecs_to_jiffies(hba->clk_gating.delay_ms));
}
void ufshcd_release(struct ufs_hba *hba)
@@ -1763,6 +1740,34 @@ out:
return count;
}
+static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
+{
+ char wq_name[sizeof("ufs_clkscaling_00")];
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ INIT_WORK(&hba->clk_scaling.suspend_work,
+ ufshcd_clk_scaling_suspend_work);
+ INIT_WORK(&hba->clk_scaling.resume_work,
+ ufshcd_clk_scaling_resume_work);
+
+ snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
+ hba->host->host_no);
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+ ufshcd_clkscaling_init_sysfs(hba);
+}
+
+static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ destroy_workqueue(hba->clk_scaling.workq);
+ ufshcd_devfreq_remove(hba);
+}
+
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
char wq_name[sizeof("ufs_clk_gating_00")];
@@ -2055,8 +2060,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
*
* Returns 0 only if success.
*/
-static int
-ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
unsigned long flags;
@@ -2238,8 +2242,8 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
ucd_req_ptr->sc.exp_data_transfer_len =
cpu_to_be32(lrbp->cmd->sdb.length);
- cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
- memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
+ cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
+ memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
@@ -2258,7 +2262,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
struct ufs_query *query = &hba->dev_cmd.query;
u16 len = be16_to_cpu(query->request.upiu_req.length);
- u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
/* Query request header */
ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
@@ -2280,7 +2283,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
/* Copy the Descriptor */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
- memcpy(descp, query->descriptor, len);
+ memcpy(ucd_req_ptr + 1, query->descriptor, len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
@@ -4601,46 +4604,6 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
}
/**
- * ufshcd_task_req_compl - handle task management request completion
- * @hba: per adapter instance
- * @index: index of the completed request
- * @resp: task management service response
- *
- * Returns non-zero value on error, zero on success
- */
-static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
-{
- struct utp_task_req_desc *task_req_descp;
- struct utp_upiu_task_rsp *task_rsp_upiup;
- unsigned long flags;
- int ocs_value;
- int task_result;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
-
- /* Clear completed tasks from outstanding_tasks */
- __clear_bit(index, &hba->outstanding_tasks);
-
- task_req_descp = hba->utmrdl_base_addr;
- ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
-
- if (ocs_value == OCS_SUCCESS) {
- task_rsp_upiup = (struct utp_upiu_task_rsp *)
- task_req_descp[index].task_rsp_upiu;
- task_result = be32_to_cpu(task_rsp_upiup->output_param1);
- task_result = task_result & MASK_TM_SERVICE_RESP;
- if (resp)
- *resp = (u8)task_result;
- } else {
- dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
- __func__, ocs_value);
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- return ocs_value;
-}
-
-/**
* ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
* @lrbp: pointer to local reference block of completed command
* @scsi_status: SCSI command status
@@ -5597,28 +5560,12 @@ out:
return err;
}
-/**
- * ufshcd_issue_tm_cmd - issues task management commands to controller
- * @hba: per adapter instance
- * @lun_id: LUN ID to which TM command is sent
- * @task_id: task ID to which the TM command is applicable
- * @tm_function: task management function opcode
- * @tm_response: task management service response return value
- *
- * Returns non-zero value on error, zero on success.
- */
-static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
- u8 tm_function, u8 *tm_response)
+static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
+ struct utp_task_req_desc *treq, u8 tm_function)
{
- struct utp_task_req_desc *task_req_descp;
- struct utp_upiu_task_req *task_req_upiup;
- struct Scsi_Host *host;
+ struct Scsi_Host *host = hba->host;
unsigned long flags;
- int free_slot;
- int err;
- int task_tag;
-
- host = hba->host;
+ int free_slot, task_tag, err;
/*
* Get free slot, sleep if slots are unavailable.
@@ -5629,30 +5576,11 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags);
- task_req_descp = hba->utmrdl_base_addr;
- task_req_descp += free_slot;
-
- /* Configure task request descriptor */
- task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
- task_req_descp->header.dword_2 =
- cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
-
- /* Configure task request UPIU */
- task_req_upiup =
- (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
task_tag = hba->nutrs + free_slot;
- task_req_upiup->header.dword_0 =
- UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
- lun_id, task_tag);
- task_req_upiup->header.dword_1 =
- UPIU_HEADER_DWORD(0, tm_function, 0, 0);
- /*
- * The host shall provide the same value for LUN field in the basic
- * header and for Input Parameter.
- */
- task_req_upiup->input_param1 = cpu_to_be32(lun_id);
- task_req_upiup->input_param2 = cpu_to_be32(task_id);
+ treq->req_header.dword_0 |= cpu_to_be32(task_tag);
+
+ memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
/* send command to the controller */
@@ -5682,8 +5610,15 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
__func__, free_slot);
err = -ETIMEDOUT;
} else {
- err = ufshcd_task_req_compl(hba, free_slot, tm_response);
+ err = 0;
+ memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
+
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __clear_bit(free_slot, &hba->outstanding_tasks);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
}
clear_bit(free_slot, &hba->tm_condition);
@@ -5695,6 +5630,228 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
}
/**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lun_id: LUN ID to which TM command is sent
+ * @task_id: task ID to which the TM command is applicable
+ * @tm_function: task management function opcode
+ * @tm_response: task management service response return value
+ *
+ * Returns non-zero value on error, zero on success.
+ */
+static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
+ u8 tm_function, u8 *tm_response)
+{
+ struct utp_task_req_desc treq = { { 0 }, };
+ int ocs_value, err;
+
+ /* Configure task request descriptor */
+ treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+ treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ /* Configure task request UPIU */
+ treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
+ cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
+ treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
+
+ /*
+ * The host shall provide the same value for LUN field in the basic
+ * header and for Input Parameter.
+ */
+ treq.input_param1 = cpu_to_be32(lun_id);
+ treq.input_param2 = cpu_to_be32(task_id);
+
+ err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
+ if (err == -ETIMEDOUT)
+ return err;
+
+ ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+ if (ocs_value != OCS_SUCCESS)
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
+ __func__, ocs_value);
+ else if (tm_response)
+ *tm_response = be32_to_cpu(treq.output_param1) &
+ MASK_TM_SERVICE_RESP;
+ return err;
+}
+
+/**
+ * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
+ * @hba: per-adapter instance
+ * @req_upiu: upiu request
+ * @rsp_upiu: upiu reply
+ * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
+ * @desc_buff: pointer to descriptor buffer, NULL if NA
+ * @buff_len: descriptor size, 0 if NA
+ * @desc_op: descriptor operation
+ *
+ * Those type of requests uses UTP Transfer Request Descriptor - utrd.
+ * Therefore, it "rides" the device management infrastructure: uses its tag and
+ * tasks work queues.
+ *
+ * Since there is only one available tag for device management commands,
+ * the caller is expected to hold the hba->dev_cmd.lock mutex.
+ */
+static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
+ struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu,
+ u8 *desc_buff, int *buff_len,
+ int cmd_type,
+ enum query_opcode desc_op)
+{
+ struct ufshcd_lrb *lrbp;
+ int err = 0;
+ int tag;
+ struct completion wait;
+ unsigned long flags;
+ u32 upiu_flags;
+
+ down_read(&hba->clk_scaling_lock);
+
+ wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+
+ lrbp->cmd = NULL;
+ lrbp->sense_bufflen = 0;
+ lrbp->sense_buffer = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = 0;
+ lrbp->intr_cmd = true;
+ hba->dev_cmd.type = cmd_type;
+
+ switch (hba->ufs_version) {
+ case UFSHCI_VERSION_10:
+ case UFSHCI_VERSION_11:
+ lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+ break;
+ default:
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ break;
+ }
+
+ /* update the task tag in the request upiu */
+ req_upiu->header.dword_0 |= cpu_to_be32(tag);
+
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+
+ /* just copy the upiu request as it is */
+ memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
+ if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
+ /* The Data Segment Area is optional depending upon the query
+ * function value. for WRITE DESCRIPTOR, the data segment
+ * follows right after the tsf.
+ */
+ memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
+ *buff_len = 0;
+ }
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
+ hba->dev_cmd.complete = &wait;
+
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /*
+ * ignore the returning value here - ufshcd_check_query_response is
+ * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
+ * read the response directly ignoring all errors.
+ */
+ ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
+
+ /* just copy the upiu response as it is */
+ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
+
+ ufshcd_put_dev_cmd_tag(hba, tag);
+ wake_up(&hba->dev_cmd.tag_wq);
+ up_read(&hba->clk_scaling_lock);
+ return err;
+}
+
+/**
+ * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
+ * @hba: per-adapter instance
+ * @req_upiu: upiu request
+ * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
+ * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
+ * @desc_buff: pointer to descriptor buffer, NULL if NA
+ * @buff_len: descriptor size, 0 if NA
+ * @desc_op: descriptor operation
+ *
+ * Supports UTP Transfer requests (nop and query), and UTP Task
+ * Management requests.
+ * It is up to the caller to fill the upiu conent properly, as it will
+ * be copied without any further input validations.
+ */
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+ struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu,
+ int msgcode,
+ u8 *desc_buff, int *buff_len,
+ enum query_opcode desc_op)
+{
+ int err;
+ int cmd_type = DEV_CMD_TYPE_QUERY;
+ struct utp_task_req_desc treq = { { 0 }, };
+ int ocs_value;
+ u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
+
+ if (desc_buff && desc_op != UPIU_QUERY_OPCODE_WRITE_DESC) {
+ err = -ENOTSUPP;
+ goto out;
+ }
+
+ switch (msgcode) {
+ case UPIU_TRANSACTION_NOP_OUT:
+ cmd_type = DEV_CMD_TYPE_NOP;
+ /* fall through */
+ case UPIU_TRANSACTION_QUERY_REQ:
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
+ desc_buff, buff_len,
+ cmd_type, desc_op);
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ break;
+ case UPIU_TRANSACTION_TASK_REQ:
+ treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+ treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
+
+ err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
+ if (err == -ETIMEDOUT)
+ break;
+
+ ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+ if (ocs_value != OCS_SUCCESS) {
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
+ ocs_value);
+ break;
+ }
+
+ memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
+
+ break;
+ default:
+ err = -EINVAL;
+
+ break;
+ }
+
+out:
+ return err;
+}
+
+/**
* ufshcd_eh_device_reset_handler - device reset handler registered to
* scsi layer.
* @cmd: SCSI command pointer
@@ -6652,6 +6809,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
hba->clk_scaling.is_allowed = true;
}
+ ufs_bsg_probe(hba);
+
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
}
@@ -6666,6 +6825,7 @@ out:
*/
if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
pm_runtime_put_sync(hba->dev);
+ ufshcd_exit_clk_scaling(hba);
ufshcd_hba_exit(hba);
}
@@ -7201,12 +7361,9 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
ufshcd_suspend_clkscaling(hba);
- if (ufshcd_is_clkscaling_supported(hba)) {
+ if (ufshcd_is_clkscaling_supported(hba))
if (hba->devfreq)
ufshcd_suspend_clkscaling(hba);
- destroy_workqueue(hba->clk_scaling.workq);
- ufshcd_devfreq_remove(hba);
- }
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
@@ -7875,12 +8032,14 @@ EXPORT_SYMBOL(ufshcd_shutdown);
*/
void ufshcd_remove(struct ufs_hba *hba)
{
+ ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba, true);
+ ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
if (ufshcd_is_clkscaling_supported(hba))
device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
@@ -7940,6 +8099,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+
+ /*
+ * Do not use blk-mq at this time because blk-mq does not support
+ * runtime pm.
+ */
+ host->use_blk_mq = false;
+
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
@@ -8020,7 +8186,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
host->max_lun = UFS_MAX_LUNS;
host->max_channel = UFSHCD_MAX_CHANNEL;
host->unique_id = host->host_no;
- host->max_cmd_len = MAX_CDB_SIZE;
+ host->max_cmd_len = UFS_CDB_SIZE;
hba->max_pwr_info.is_valid = false;
@@ -8045,6 +8211,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_init_clk_gating(hba);
+ ufshcd_init_clk_scaling(hba);
+
/*
* In order to avoid any spurious interrupt immediately after
* registering UFS controller interrupt handler, clear any pending UFS
@@ -8083,21 +8251,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_remove_scsi_host;
}
- if (ufshcd_is_clkscaling_supported(hba)) {
- char wq_name[sizeof("ufs_clkscaling_00")];
-
- INIT_WORK(&hba->clk_scaling.suspend_work,
- ufshcd_clk_scaling_suspend_work);
- INIT_WORK(&hba->clk_scaling.resume_work,
- ufshcd_clk_scaling_resume_work);
-
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
-
- ufshcd_clkscaling_init_sysfs(hba);
- }
-
/*
* Set the default power management level for runtime and system PM.
* Default power saving mode is to keep UFS link in Hibern8 state
@@ -8135,6 +8288,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
out_remove_scsi_host:
scsi_remove_host(hba->host);
exit_gating:
+ ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
out_disable:
hba->is_irq_enabled = false;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 33fdd3f281ae..1a1c2b487a4e 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -702,6 +702,9 @@ struct ufs_hba {
struct rw_semaphore clk_scaling_lock;
struct ufs_desc_size desc_size;
atomic_t scsi_block_reqs_cnt;
+
+ struct device bsg_dev;
+ struct request_queue *bsg_queue;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -892,6 +895,15 @@ int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
+
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+ struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu,
+ int msgcode,
+ u8 *desc_buff, int *buff_len,
+ enum query_opcode desc_op);
+
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
{
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index bb5d9c7f3353..6fa889de5ee5 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -433,22 +433,25 @@ struct utp_transfer_req_desc {
__le16 prd_table_offset;
};
-/**
- * struct utp_task_req_desc - UTMRD structure
- * @header: UTMRD header DW-0 to DW-3
- * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11
- * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19
+/*
+ * UTMRD structure.
*/
struct utp_task_req_desc {
-
/* DW 0-3 */
struct request_desc_header header;
- /* DW 4-11 */
- __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
-
- /* DW 12-19 */
- __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
+ /* DW 4-11 - Task request UPIU structure */
+ struct utp_upiu_header req_header;
+ __be32 input_param1;
+ __be32 input_param2;
+ __be32 input_param3;
+ __be32 __reserved1[2];
+
+ /* DW 12-19 - Task Management Response UPIU structure */
+ struct utp_upiu_header rsp_header;
+ __be32 output_param1;
+ __be32 output_param2;
+ __be32 __reserved2[3];
};
#endif /* End of Header */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 0cd947f78b5b..6e491023fdd8 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -372,9 +372,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
pvscsi_create_sg(ctx, sg, segs);
e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
- ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
- SGL_SIZE, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(adapter->dev, ctx->sglPA)) {
+ ctx->sglPA = dma_map_single(&adapter->dev->dev,
+ ctx->sgl, SGL_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) {
scmd_printk(KERN_ERR, cmd,
"vmw_pvscsi: Failed to map ctx sglist for DMA.\n");
scsi_dma_unmap(cmd);
@@ -389,9 +389,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
* In case there is no S/G list, scsi_sglist points
* directly to the buffer.
*/
- ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
+ ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
cmd->sc_data_direction);
- if (pci_dma_mapping_error(adapter->dev, ctx->dataPA)) {
+ if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
scmd_printk(KERN_ERR, cmd,
"vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
return -ENOMEM;
@@ -417,23 +417,23 @@ static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
if (count != 0) {
scsi_dma_unmap(cmd);
if (ctx->sglPA) {
- pci_unmap_single(adapter->dev, ctx->sglPA,
- SGL_SIZE, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->dev->dev, ctx->sglPA,
+ SGL_SIZE, DMA_TO_DEVICE);
ctx->sglPA = 0;
}
} else
- pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
- cmd->sc_data_direction);
+ dma_unmap_single(&adapter->dev->dev, ctx->dataPA,
+ bufflen, cmd->sc_data_direction);
}
if (cmd->sense_buffer)
- pci_unmap_single(adapter->dev, ctx->sensePA,
- SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
}
static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
{
- adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
- &adapter->ringStatePA);
+ adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
+ &adapter->ringStatePA, GFP_KERNEL);
if (!adapter->rings_state)
return -ENOMEM;
@@ -441,17 +441,17 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
pvscsi_ring_pages);
adapter->req_depth = adapter->req_pages
* PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
- adapter->req_ring = pci_alloc_consistent(adapter->dev,
- adapter->req_pages * PAGE_SIZE,
- &adapter->reqRingPA);
+ adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev,
+ adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA,
+ GFP_KERNEL);
if (!adapter->req_ring)
return -ENOMEM;
adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
pvscsi_ring_pages);
- adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
- adapter->cmp_pages * PAGE_SIZE,
- &adapter->cmpRingPA);
+ adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev,
+ adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA,
+ GFP_KERNEL);
if (!adapter->cmp_ring)
return -ENOMEM;
@@ -464,9 +464,9 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
pvscsi_msg_ring_pages);
- adapter->msg_ring = pci_alloc_consistent(adapter->dev,
- adapter->msg_pages * PAGE_SIZE,
- &adapter->msgRingPA);
+ adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev,
+ adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA,
+ GFP_KERNEL);
if (!adapter->msg_ring)
return -ENOMEM;
BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
@@ -708,10 +708,10 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
e->lun[1] = sdev->lun;
if (cmd->sense_buffer) {
- ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(adapter->dev, ctx->sensePA)) {
+ ctx->sensePA = dma_map_single(&adapter->dev->dev,
+ cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
scmd_printk(KERN_ERR, cmd,
"vmw_pvscsi: Failed to map sense buffer for DMA.\n");
ctx->sensePA = 0;
@@ -740,9 +740,9 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) {
if (cmd->sense_buffer) {
- pci_unmap_single(adapter->dev, ctx->sensePA,
+ dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ctx->sensePA = 0;
}
return -ENOMEM;
@@ -1218,21 +1218,21 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
}
if (adapter->rings_state)
- pci_free_consistent(adapter->dev, PAGE_SIZE,
+ dma_free_coherent(&adapter->dev->dev, PAGE_SIZE,
adapter->rings_state, adapter->ringStatePA);
if (adapter->req_ring)
- pci_free_consistent(adapter->dev,
+ dma_free_coherent(&adapter->dev->dev,
adapter->req_pages * PAGE_SIZE,
adapter->req_ring, adapter->reqRingPA);
if (adapter->cmp_ring)
- pci_free_consistent(adapter->dev,
+ dma_free_coherent(&adapter->dev->dev,
adapter->cmp_pages * PAGE_SIZE,
adapter->cmp_ring, adapter->cmpRingPA);
if (adapter->msg_ring)
- pci_free_consistent(adapter->dev,
+ dma_free_coherent(&adapter->dev->dev,
adapter->msg_pages * PAGE_SIZE,
adapter->msg_ring, adapter->msgRingPA);
}
@@ -1291,8 +1291,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
u32 numPhys = 16;
dev = pvscsi_dev(adapter);
- config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
- &configPagePA);
+ config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
+ &configPagePA, GFP_KERNEL);
if (!config_page) {
dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
goto exit;
@@ -1326,7 +1326,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
} else
dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
header->hostStatus, header->scsiStatus);
- pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
+ dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page,
+ configPagePA);
exit:
return numPhys;
}
@@ -1346,11 +1347,9 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pci_enable_device(pdev))
return error;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
- } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
+ } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
} else {
printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index bb70882e6b56..ca8e3abeb2c7 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -9,8 +9,6 @@
*
* Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
* Blizzard 1230 DMA and probe function fixes
- *
- * Copyright (C) 2017 Finn Thain for PIO code from Mac ESP driver adapted here
*/
/*
* ZORRO bus code from:
@@ -159,7 +157,6 @@ struct fastlane_dma_registers {
struct zorro_esp_priv {
struct esp *esp; /* our ESP instance - for Scsi_host* */
void __iomem *board_base; /* virtual address (Zorro III board) */
- int error; /* PIO error flag */
int zorro3; /* board is Zorro III */
unsigned char ctrl_data; /* shadow copy of ctrl_reg */
};
@@ -182,30 +179,6 @@ static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
return readb(esp->regs + (reg * 4UL));
}
-static dma_addr_t zorro_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int zorro_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void zorro_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void zorro_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
static int zorro_esp_irq_pending(struct esp *esp)
{
/* check ESP status register; DMA has no status reg. */
@@ -245,7 +218,7 @@ static int fastlane_esp_irq_pending(struct esp *esp)
static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
u32 dma_len)
{
- return dma_len > 0xFFFFFF ? 0xFFFFFF : dma_len;
+ return dma_len > 0xFFFF ? 0xFFFF : dma_len;
}
static void zorro_esp_reset_dma(struct esp *esp)
@@ -274,192 +247,29 @@ static void fastlane_esp_dma_invalidate(struct esp *esp)
z_writel(0, zep->board_base);
}
-/*
- * Programmed IO routines follow.
- */
-
-static inline unsigned int zorro_esp_wait_for_fifo(struct esp *esp)
-{
- int i = 500000;
-
- do {
- unsigned int fbytes = zorro_esp_read8(esp, ESP_FFLAGS)
- & ESP_FF_FBYTES;
-
- if (fbytes)
- return fbytes;
-
- udelay(2);
- } while (--i);
-
- pr_err("FIFO is empty (sreg %02x)\n",
- zorro_esp_read8(esp, ESP_STATUS));
- return 0;
-}
-
-static inline int zorro_esp_wait_for_intr(struct esp *esp)
-{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
- int i = 500000;
-
- do {
- esp->sreg = zorro_esp_read8(esp, ESP_STATUS);
- if (esp->sreg & ESP_STAT_INTR)
- return 0;
-
- udelay(2);
- } while (--i);
-
- pr_err("IRQ timeout (sreg %02x)\n", esp->sreg);
- zep->error = 1;
- return 1;
-}
-
-/*
- * PIO macros as used in mac_esp.c.
- * Note that addr and fifo arguments are local-scope variables declared
- * in zorro_esp_send_pio_cmd(), the macros are only used in that function,
- * and addr and fifo are referenced in each use of the macros so there
- * is no need to pass them as macro parameters.
- */
-#define ZORRO_ESP_PIO_LOOP(operands, reg1) \
- asm volatile ( \
- "1: moveb " operands "\n" \
- " subqw #1,%1 \n" \
- " jbne 1b \n" \
- : "+a" (addr), "+r" (reg1) \
- : "a" (fifo));
-
-#define ZORRO_ESP_PIO_FILL(operands, reg1) \
- asm volatile ( \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " subqw #8,%1 \n" \
- " subqw #8,%1 \n" \
- : "+a" (addr), "+r" (reg1) \
- : "a" (fifo));
-
-#define ZORRO_ESP_FIFO_SIZE 16
-
-static void zorro_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
- u32 dma_count, int write, u8 cmd)
-{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
- u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
- u8 phase = esp->sreg & ESP_STAT_PMASK;
-
- cmd &= ~ESP_CMD_DMA;
-
- if (write) {
- u8 *dst = (u8 *)addr;
- u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
-
- scsi_esp_cmd(esp, cmd);
-
- while (1) {
- if (!zorro_esp_wait_for_fifo(esp))
- break;
-
- *dst++ = zorro_esp_read8(esp, ESP_FDATA);
- --esp_count;
-
- if (!esp_count)
- break;
-
- if (zorro_esp_wait_for_intr(esp))
- break;
-
- if ((esp->sreg & ESP_STAT_PMASK) != phase)
- break;
-
- esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
- if (esp->ireg & mask) {
- zep->error = 1;
- break;
- }
-
- if (phase == ESP_MIP)
- scsi_esp_cmd(esp, ESP_CMD_MOK);
-
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- } else { /* unused, as long as we only handle MIP here */
- scsi_esp_cmd(esp, ESP_CMD_FLUSH);
-
- if (esp_count >= ZORRO_ESP_FIFO_SIZE)
- ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
- else
- ZORRO_ESP_PIO_LOOP("%0@+,%2@", esp_count)
-
- scsi_esp_cmd(esp, cmd);
-
- while (esp_count) {
- unsigned int n;
-
- if (zorro_esp_wait_for_intr(esp))
- break;
-
- if ((esp->sreg & ESP_STAT_PMASK) != phase)
- break;
-
- esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
- if (esp->ireg & ~ESP_INTR_BSERV) {
- zep->error = 1;
- break;
- }
-
- n = ZORRO_ESP_FIFO_SIZE -
- (zorro_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES);
- if (n > esp_count)
- n = esp_count;
-
- if (n == ZORRO_ESP_FIFO_SIZE)
- ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
- else {
- esp_count -= n;
- ZORRO_ESP_PIO_LOOP("%0@+,%2@", n)
- }
-
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- }
-}
-
/* Blizzard 1230/60 SCSI-IV DMA */
static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
u32 esp_count, u32 dma_count, int write, u8 cmd)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
u8 phase = esp->sreg & ESP_STAT_PMASK;
- zep->error = 0;
/*
* Use PIO if transferring message bytes to esp->command_block_dma.
* PIO requires a virtual address, so substitute esp->command_block
* for addr.
*/
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ /* Clear the results of a possible prior esp->ops->send_dma_cmd() */
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
if (write)
/* DMA receive */
dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -484,7 +294,6 @@ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
scsi_esp_cmd(esp, ESP_CMD_DMA);
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
scsi_esp_cmd(esp, cmd);
}
@@ -494,18 +303,19 @@ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
u32 esp_count, u32 dma_count, int write, u8 cmd)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
u8 phase = esp->sreg & ESP_STAT_PMASK;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
if (write)
/* DMA receive */
dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -529,7 +339,6 @@ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
scsi_esp_cmd(esp, ESP_CMD_DMA);
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
scsi_esp_cmd(esp, cmd);
}
@@ -539,18 +348,19 @@ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
u32 esp_count, u32 dma_count, int write, u8 cmd)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
u8 phase = esp->sreg & ESP_STAT_PMASK;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
if (write)
/* DMA receive */
dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -574,7 +384,6 @@ static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
scsi_esp_cmd(esp, ESP_CMD_DMA);
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
scsi_esp_cmd(esp, cmd);
}
@@ -589,17 +398,18 @@ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
u8 phase = esp->sreg & ESP_STAT_PMASK;
unsigned char *ctrl_data = &zep->ctrl_data;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
if (write) {
/* DMA receive */
@@ -635,21 +445,21 @@ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
u32 esp_count, u32 dma_count, int write, u8 cmd)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
u8 phase = esp->sreg & ESP_STAT_PMASK;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
if (write) {
/* DMA receive */
@@ -681,17 +491,18 @@ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
u8 phase = esp->sreg & ESP_STAT_PMASK;
unsigned char *ctrl_data = &zep->ctrl_data;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
if (write) {
/* DMA receive */
@@ -724,14 +535,7 @@ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
static int zorro_esp_dma_error(struct esp *esp)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
-
- /* check for error in case we've been doing PIO */
- if (zep->error == 1)
- return 1;
-
- /* do nothing - there seems to be no way to check for DMA errors */
- return 0;
+ return esp->send_cmd_error;
}
/* per-board ESP driver ops */
@@ -739,10 +543,6 @@ static int zorro_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops blz1230_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = zorro_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -755,10 +555,6 @@ static const struct esp_driver_ops blz1230_esp_ops = {
static const struct esp_driver_ops blz1230II_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = zorro_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -771,10 +567,6 @@ static const struct esp_driver_ops blz1230II_esp_ops = {
static const struct esp_driver_ops blz2060_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = zorro_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -787,10 +579,6 @@ static const struct esp_driver_ops blz2060_esp_ops = {
static const struct esp_driver_ops cyber_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = cyber_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -803,10 +591,6 @@ static const struct esp_driver_ops cyber_esp_ops = {
static const struct esp_driver_ops cyberII_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = zorro_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -819,10 +603,6 @@ static const struct esp_driver_ops cyberII_esp_ops = {
static const struct esp_driver_ops fastlane_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = fastlane_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -1039,6 +819,8 @@ static int zorro_esp_probe(struct zorro_dev *z,
goto fail_unmap_fastlane;
}
+ esp->fifo_reg = esp->regs + ESP_FDATA * 4;
+
/* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
if (zdd->scsi_option) {
zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
@@ -1082,7 +864,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
}
/* register the chip */
- err = scsi_esp_register(esp, &z->dev);
+ err = scsi_esp_register(esp);
if (err) {
err = -ENOMEM;
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 9b17f72349ed..321a92613a7e 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -310,6 +310,37 @@ int dpaa2_io_service_rearm(struct dpaa2_io *d,
EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
/**
+ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @s: the dpaa2_io_store object for the result.
+ *
+ * Return 0 for success, or error code for failure.
+ */
+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
+ struct dpaa2_io_store *s)
+{
+ struct qbman_pull_desc pd;
+ int err;
+
+ qbman_pull_desc_clear(&pd);
+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
+ qbman_pull_desc_set_fq(&pd, fqid);
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+ s->swp = d->swp;
+ err = qbman_swp_pull(d->swp, &pd);
+ if (err)
+ s->swp = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
+
+/**
* dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
* @d: the given DPIO service.
* @channelid: the given channel id.
@@ -342,6 +373,33 @@ int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
/**
+ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
+ u32 fqid,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_fq(&ed, fqid);
+
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
+
+/**
* dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
* @d: the given DPIO service.
* @qdid: the given queuing destination id.
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index 05c42235dd41..7c3cc968053c 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -120,6 +120,7 @@ static void bm_set_memory(u64 ba, u32 size)
*/
static dma_addr_t fbpr_a;
static size_t fbpr_sz;
+static int __bman_probed;
static int bman_fbpr(struct reserved_mem *rmem)
{
@@ -166,6 +167,12 @@ static irqreturn_t bman_isr(int irq, void *ptr)
return IRQ_HANDLED;
}
+int bman_is_probed(void)
+{
+ return __bman_probed;
+}
+EXPORT_SYMBOL_GPL(bman_is_probed);
+
static int fsl_bman_probe(struct platform_device *pdev)
{
int ret, err_irq;
@@ -175,6 +182,8 @@ static int fsl_bman_probe(struct platform_device *pdev)
u16 id, bm_pool_cnt;
u8 major, minor;
+ __bman_probed = -1;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
@@ -255,6 +264,8 @@ static int fsl_bman_probe(struct platform_device *pdev)
return ret;
}
+ __bman_probed = 1;
+
return 0;
};
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index ecb22749df0b..8cc015183043 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2729,6 +2729,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
{
unsigned long addr;
+ if (!p)
+ return -ENODEV;
+
addr = gen_pool_alloc(p, cnt);
if (!addr)
return -ENOMEM;
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 79cba58387a5..6fd5fef5f39b 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -273,6 +273,7 @@ static const struct qman_error_info_mdata error_mdata[] = {
static u32 __iomem *qm_ccsr_start;
/* A SDQCR mask comprising all the available/visible pool channels */
static u32 qm_pools_sdqcr;
+static int __qman_probed;
static inline u32 qm_ccsr_in(u32 offset)
{
@@ -686,6 +687,12 @@ static int qman_resource_init(struct device *dev)
return 0;
}
+int qman_is_probed(void)
+{
+ return __qman_probed;
+}
+EXPORT_SYMBOL_GPL(qman_is_probed);
+
static int fsl_qman_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -695,6 +702,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
u16 id;
u8 major, minor;
+ __qman_probed = -1;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
@@ -828,6 +837,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
if (ret)
return ret;
+ __qman_probed = 1;
+
return 0;
}
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index a120002b630e..3e9391d117c5 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -227,6 +227,14 @@ static int qman_portal_probe(struct platform_device *pdev)
int irq, cpu, err;
u32 val;
+ err = qman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(&pdev->dev, "failing probe due to qman probe error\n");
+ return -ENODEV;
+ }
+
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
if (!pcfg)
return -ENOMEM;
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index c646d8713861..681f7d4b7724 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
{
u32 shift;
- shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
+ shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
shift -= tdm_num * 2;
return shift;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index ba79b609aca2..41986d96f24b 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -73,7 +73,7 @@ config QCOM_PM
config QCOM_QMI_HELPERS
tristate
- depends on ARCH_QCOM && NET
+ depends on (ARCH_QCOM || COMPILE_TEST) && NET
help
Helper library for handling QMI encoded messages. QMI encoded
messages are used in communication between the majority of QRTR
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index feed3db21c10..ee89ffb6dde8 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -513,7 +513,7 @@ EXPORT_SYMBOL(geni_se_resources_on);
*/
int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
{
- unsigned long freq = 0;
+ long freq = 0;
int i;
if (se->clk_perf_tbl) {
@@ -529,7 +529,7 @@ int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) {
freq = clk_round_rate(se->clk, freq + 1);
- if (!freq || freq == se->clk_perf_tbl[i - 1])
+ if (freq <= 0 || freq == se->clk_perf_tbl[i - 1])
break;
se->clk_perf_tbl[i] = freq;
}
@@ -544,16 +544,17 @@ EXPORT_SYMBOL(geni_se_clk_tbl_get);
* @se: Pointer to the concerned serial engine.
* @req_freq: Requested clock frequency.
* @index: Index of the resultant frequency in the table.
- * @res_freq: Resultant frequency which matches or is closer to the
- * requested frequency.
+ * @res_freq: Resultant frequency of the source clock.
* @exact: Flag to indicate exact multiple requirement of the requested
* frequency.
*
- * This function is called by the protocol drivers to determine the matching
- * or exact multiple of the requested frequency, as provided by the serial
- * engine clock in order to meet the performance requirements. If there is
- * no matching or exact multiple of the requested frequency found, then it
- * selects the closest floor frequency, if exact flag is not set.
+ * This function is called by the protocol drivers to determine the best match
+ * of the requested frequency as provided by the serial engine clock in order
+ * to meet the performance requirements.
+ *
+ * If we return success:
+ * - if @exact is true then @res_freq / <an_integer> == @req_freq
+ * - if @exact is false then @res_freq / <an_integer> <= @req_freq
*
* Return: 0 on success, standard Linux error codes on failure.
*/
@@ -564,6 +565,9 @@ int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
unsigned long *tbl;
int num_clk_levels;
int i;
+ unsigned long best_delta;
+ unsigned long new_delta;
+ unsigned int divider;
num_clk_levels = geni_se_clk_tbl_get(se, &tbl);
if (num_clk_levels < 0)
@@ -572,18 +576,21 @@ int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
if (num_clk_levels == 0)
return -EINVAL;
- *res_freq = 0;
+ best_delta = ULONG_MAX;
for (i = 0; i < num_clk_levels; i++) {
- if (!(tbl[i] % req_freq)) {
+ divider = DIV_ROUND_UP(tbl[i], req_freq);
+ new_delta = req_freq - tbl[i] / divider;
+ if (new_delta < best_delta) {
+ /* We have a new best! */
*index = i;
*res_freq = tbl[i];
- return 0;
- }
- if (!(*res_freq) || ((tbl[i] > *res_freq) &&
- (tbl[i] < req_freq))) {
- *index = i;
- *res_freq = tbl[i];
+ /* If the new best is exact then we're done */
+ if (new_delta == 0)
+ return 0;
+
+ /* Record how close we got */
+ best_delta = new_delta;
}
}
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 4b5e250e8615..e5c7e1ef6318 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -899,9 +899,10 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
struct sdw_master_runtime *m_rt = stream->m_rt;
struct sdw_slave_runtime *s_rt, *_s_rt;
- list_for_each_entry_safe(s_rt, _s_rt,
- &m_rt->slave_rt_list, m_rt_node)
- sdw_stream_remove_slave(s_rt->slave, stream);
+ list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ sdw_slave_port_release(s_rt->slave->bus, s_rt->slave, stream);
+ sdw_release_slave_stream(s_rt->slave, stream);
+ }
list_del(&m_rt->bus_node);
}
@@ -1112,7 +1113,7 @@ int sdw_stream_add_master(struct sdw_bus *bus,
"Master runtime config failed for stream:%s",
stream->name);
ret = -ENOMEM;
- goto error;
+ goto unlock;
}
ret = sdw_config_stream(bus->dev, stream, stream_config, false);
@@ -1123,11 +1124,11 @@ int sdw_stream_add_master(struct sdw_bus *bus,
if (ret)
goto stream_error;
- stream->state = SDW_STREAM_CONFIGURED;
+ goto unlock;
stream_error:
sdw_release_master_stream(stream);
-error:
+unlock:
mutex_unlock(&bus->bus_lock);
return ret;
}
@@ -1141,6 +1142,10 @@ EXPORT_SYMBOL(sdw_stream_add_master);
* @stream: SoundWire stream
* @port_config: Port configuration for audio stream
* @num_ports: Number of ports
+ *
+ * It is expected that Slave is added before adding Master
+ * to the Stream.
+ *
*/
int sdw_stream_add_slave(struct sdw_slave *slave,
struct sdw_stream_config *stream_config,
@@ -1186,6 +1191,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
if (ret)
goto stream_error;
+ /*
+ * Change stream state to CONFIGURED on first Slave add.
+ * Bus is not aware of number of Slave(s) in a stream at this
+ * point so cannot depend on all Slave(s) to be added in order to
+ * change stream state to CONFIGURED.
+ */
stream->state = SDW_STREAM_CONFIGURED;
goto error;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 671d078349cc..7d3a5c94727e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -83,6 +83,14 @@ config SPI_ATMEL
This selects a driver for the Atmel SPI Controller, present on
many AT91 ARM chips.
+config SPI_AT91_USART
+ tristate "Atmel USART Controller SPI driver"
+ depends on (ARCH_AT91 || COMPILE_TEST)
+ depends on MFD_AT91_USART
+ help
+ This selects a driver for the AT91 USART Controller as SPI Master,
+ present on AT91 and SAMA5 SoC series.
+
config SPI_AU1550
tristate "Au1550/Au1200/Au1300 SPI Controller"
depends on MIPS_ALCHEMY
@@ -129,7 +137,7 @@ config SPI_BCM63XX
config SPI_BCM63XX_HSSPI
tristate "Broadcom BCM63XX HS SPI controller driver"
- depends on BCM63XX || COMPILE_TEST
+ depends on BCM63XX || ARCH_BCM_63XX || COMPILE_TEST
help
This enables support for the High Speed SPI controller present on
newer Broadcom BCM63XX SoCs.
@@ -520,6 +528,12 @@ config SPI_RSPI
help
SPI driver for Renesas RSPI and QSPI blocks.
+config SPI_QCOM_QSPI
+ tristate "QTI QSPI controller"
+ depends on ARCH_QCOM
+ help
+ QSPI(Quad SPI) driver for Qualcomm QSPI controller.
+
config SPI_QUP
tristate "Qualcomm SPI controller with QUP interface"
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
@@ -533,6 +547,18 @@ config SPI_QUP
This driver can also be built as a module. If so, the module
will be called spi_qup.
+config SPI_QCOM_GENI
+ tristate "Qualcomm GENI based SPI controller"
+ depends on QCOM_GENI_SE
+ help
+ This driver supports GENI serial engine based SPI controller in
+ master mode on the Qualcomm Technologies Inc.'s SoCs. If you say
+ yes to this option, support will be included for the built-in SPI
+ interface on the Qualcomm Technologies Inc.'s SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi-geni-qcom.
+
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
depends on ARCH_S3C24XX
@@ -596,6 +622,22 @@ config SPI_SIRF
help
SPI driver for CSR SiRFprimaII SoCs
+config SPI_SLAVE_MT27XX
+ tristate "MediaTek SPI slave device"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on SPI_SLAVE
+ help
+ This selects the MediaTek(R) SPI slave device driver.
+ If you want to use MediaTek(R) SPI slave interface,
+ say Y or M here.If you are not sure, say N.
+ SPI slave drivers for Mediatek MT27XX series ARM SoCs.
+
+config SPI_SPRD
+ tristate "Spreadtrum SPI controller"
+ depends on ARCH_SPRD || COMPILE_TEST
+ help
+ SPI driver for Spreadtrum SoCs.
+
config SPI_SPRD_ADI
tristate "Spreadtrum ADI controller"
depends on ARCH_SPRD || COMPILE_TEST
@@ -613,6 +655,15 @@ config SPI_STM32
is not available, the driver automatically falls back to
PIO mode.
+config SPI_STM32_QSPI
+ tristate "STMicroelectronics STM32 QUAD SPI controller"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ help
+ This enables support for the Quad SPI controller in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports spi-mem interface.
+
config SPI_ST_SSC4
tristate "STMicroelectronics SPI SSC-based driver"
depends on ARCH_STI || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index a90d55970036..3575205c5c27 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
+obj-$(CONFIG_SPI_AT91_USART) += spi-at91-usart.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
obj-$(CONFIG_SPI_AXI_SPI_ENGINE) += spi-axi-spi-engine.o
@@ -74,6 +75,8 @@ obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o
obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
+obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
+obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o
@@ -88,8 +91,11 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
+obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
+obj-$(CONFIG_SPI_SPRD) += spi-sprd.o
obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
obj-$(CONFIG_SPI_STM32) += spi-stm32.o
+obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
new file mode 100644
index 000000000000..a924657642fa
--- /dev/null
+++ b/drivers/spi/spi-at91-usart.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Driver for AT91 USART Controllers as SPI
+//
+// Copyright (C) 2018 Microchip Technology Inc.
+//
+// Author: Radu Pirea <radu.pirea@microchip.com>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/spi.h>
+
+#define US_CR 0x00
+#define US_MR 0x04
+#define US_IER 0x08
+#define US_IDR 0x0C
+#define US_CSR 0x14
+#define US_RHR 0x18
+#define US_THR 0x1C
+#define US_BRGR 0x20
+#define US_VERSION 0xFC
+
+#define US_CR_RSTRX BIT(2)
+#define US_CR_RSTTX BIT(3)
+#define US_CR_RXEN BIT(4)
+#define US_CR_RXDIS BIT(5)
+#define US_CR_TXEN BIT(6)
+#define US_CR_TXDIS BIT(7)
+
+#define US_MR_SPI_MASTER 0x0E
+#define US_MR_CHRL GENMASK(7, 6)
+#define US_MR_CPHA BIT(8)
+#define US_MR_CPOL BIT(16)
+#define US_MR_CLKO BIT(18)
+#define US_MR_WRDBT BIT(20)
+#define US_MR_LOOP BIT(15)
+
+#define US_IR_RXRDY BIT(0)
+#define US_IR_TXRDY BIT(1)
+#define US_IR_OVRE BIT(5)
+
+#define US_BRGR_SIZE BIT(16)
+
+#define US_MIN_CLK_DIV 0x06
+#define US_MAX_CLK_DIV BIT(16)
+
+#define US_RESET (US_CR_RSTRX | US_CR_RSTTX)
+#define US_DISABLE (US_CR_RXDIS | US_CR_TXDIS)
+#define US_ENABLE (US_CR_RXEN | US_CR_TXEN)
+#define US_OVRE_RXRDY_IRQS (US_IR_OVRE | US_IR_RXRDY)
+
+#define US_INIT \
+ (US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
+
+/* Register access macros */
+#define at91_usart_spi_readl(port, reg) \
+ readl_relaxed((port)->regs + US_##reg)
+#define at91_usart_spi_writel(port, reg, value) \
+ writel_relaxed((value), (port)->regs + US_##reg)
+
+#define at91_usart_spi_readb(port, reg) \
+ readb_relaxed((port)->regs + US_##reg)
+#define at91_usart_spi_writeb(port, reg, value) \
+ writeb_relaxed((value), (port)->regs + US_##reg)
+
+struct at91_usart_spi {
+ struct spi_transfer *current_transfer;
+ void __iomem *regs;
+ struct device *dev;
+ struct clk *clk;
+
+ /*used in interrupt to protect data reading*/
+ spinlock_t lock;
+
+ int irq;
+ unsigned int current_tx_remaining_bytes;
+ unsigned int current_rx_remaining_bytes;
+
+ u32 spi_clk;
+ u32 status;
+
+ bool xfer_failed;
+};
+
+static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus)
+{
+ return aus->status & US_IR_TXRDY;
+}
+
+static inline u32 at91_usart_spi_rx_ready(struct at91_usart_spi *aus)
+{
+ return aus->status & US_IR_RXRDY;
+}
+
+static inline u32 at91_usart_spi_check_overrun(struct at91_usart_spi *aus)
+{
+ return aus->status & US_IR_OVRE;
+}
+
+static inline u32 at91_usart_spi_read_status(struct at91_usart_spi *aus)
+{
+ aus->status = at91_usart_spi_readl(aus, CSR);
+ return aus->status;
+}
+
+static inline void at91_usart_spi_tx(struct at91_usart_spi *aus)
+{
+ unsigned int len = aus->current_transfer->len;
+ unsigned int remaining = aus->current_tx_remaining_bytes;
+ const u8 *tx_buf = aus->current_transfer->tx_buf;
+
+ if (!remaining)
+ return;
+
+ if (at91_usart_spi_tx_ready(aus)) {
+ at91_usart_spi_writeb(aus, THR, tx_buf[len - remaining]);
+ aus->current_tx_remaining_bytes--;
+ }
+}
+
+static inline void at91_usart_spi_rx(struct at91_usart_spi *aus)
+{
+ int len = aus->current_transfer->len;
+ int remaining = aus->current_rx_remaining_bytes;
+ u8 *rx_buf = aus->current_transfer->rx_buf;
+
+ if (!remaining)
+ return;
+
+ rx_buf[len - remaining] = at91_usart_spi_readb(aus, RHR);
+ aus->current_rx_remaining_bytes--;
+}
+
+static inline void
+at91_usart_spi_set_xfer_speed(struct at91_usart_spi *aus,
+ struct spi_transfer *xfer)
+{
+ at91_usart_spi_writel(aus, BRGR,
+ DIV_ROUND_UP(aus->spi_clk, xfer->speed_hz));
+}
+
+static irqreturn_t at91_usart_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_controller *controller = dev_id;
+ struct at91_usart_spi *aus = spi_master_get_devdata(controller);
+
+ spin_lock(&aus->lock);
+ at91_usart_spi_read_status(aus);
+
+ if (at91_usart_spi_check_overrun(aus)) {
+ aus->xfer_failed = true;
+ at91_usart_spi_writel(aus, IDR, US_IR_OVRE | US_IR_RXRDY);
+ spin_unlock(&aus->lock);
+ return IRQ_HANDLED;
+ }
+
+ if (at91_usart_spi_rx_ready(aus)) {
+ at91_usart_spi_rx(aus);
+ spin_unlock(&aus->lock);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock(&aus->lock);
+
+ return IRQ_NONE;
+}
+
+static int at91_usart_spi_setup(struct spi_device *spi)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(spi->controller);
+ u32 *ausd = spi->controller_state;
+ unsigned int mr = at91_usart_spi_readl(aus, MR);
+ u8 bits = spi->bits_per_word;
+
+ if (bits != 8) {
+ dev_dbg(&spi->dev, "Only 8 bits per word are supported\n");
+ return -EINVAL;
+ }
+
+ if (spi->mode & SPI_CPOL)
+ mr |= US_MR_CPOL;
+ else
+ mr &= ~US_MR_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ mr |= US_MR_CPHA;
+ else
+ mr &= ~US_MR_CPHA;
+
+ if (spi->mode & SPI_LOOP)
+ mr |= US_MR_LOOP;
+ else
+ mr &= ~US_MR_LOOP;
+
+ if (!ausd) {
+ ausd = kzalloc(sizeof(*ausd), GFP_KERNEL);
+ if (!ausd)
+ return -ENOMEM;
+
+ spi->controller_state = ausd;
+ }
+
+ *ausd = mr;
+
+ dev_dbg(&spi->dev,
+ "setup: bpw %u mode 0x%x -> mr %d %08x\n",
+ bits, spi->mode, spi->chip_select, mr);
+
+ return 0;
+}
+
+static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ at91_usart_spi_set_xfer_speed(aus, xfer);
+ aus->xfer_failed = false;
+ aus->current_transfer = xfer;
+ aus->current_tx_remaining_bytes = xfer->len;
+ aus->current_rx_remaining_bytes = xfer->len;
+
+ while ((aus->current_tx_remaining_bytes ||
+ aus->current_rx_remaining_bytes) && !aus->xfer_failed) {
+ at91_usart_spi_read_status(aus);
+ at91_usart_spi_tx(aus);
+ cpu_relax();
+ }
+
+ if (aus->xfer_failed) {
+ dev_err(aus->dev, "Overrun!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int at91_usart_spi_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *message)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct spi_device *spi = message->spi;
+ u32 *ausd = spi->controller_state;
+
+ at91_usart_spi_writel(aus, CR, US_ENABLE);
+ at91_usart_spi_writel(aus, IER, US_OVRE_RXRDY_IRQS);
+ at91_usart_spi_writel(aus, MR, *ausd);
+
+ return 0;
+}
+
+static int at91_usart_spi_unprepare_message(struct spi_controller *ctlr,
+ struct spi_message *message)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
+ at91_usart_spi_writel(aus, IDR, US_OVRE_RXRDY_IRQS);
+
+ return 0;
+}
+
+static void at91_usart_spi_cleanup(struct spi_device *spi)
+{
+ struct at91_usart_spi_device *ausd = spi->controller_state;
+
+ spi->controller_state = NULL;
+ kfree(ausd);
+}
+
+static void at91_usart_spi_init(struct at91_usart_spi *aus)
+{
+ at91_usart_spi_writel(aus, MR, US_INIT);
+ at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
+}
+
+static int at91_usart_gpio_setup(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.parent->of_node;
+ int i;
+ int ret;
+ int nb;
+
+ if (!np)
+ return -EINVAL;
+
+ nb = of_gpio_named_count(np, "cs-gpios");
+ for (i = 0; i < nb; i++) {
+ int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
+
+ if (cs_gpio < 0)
+ return cs_gpio;
+
+ if (gpio_is_valid(cs_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, cs_gpio,
+ GPIOF_DIR_OUT,
+ dev_name(&pdev->dev));
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int at91_usart_spi_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ struct spi_controller *controller;
+ struct at91_usart_spi *aus;
+ struct clk *clk;
+ int irq;
+ int ret;
+
+ regs = platform_get_resource(to_platform_device(pdev->dev.parent),
+ IORESOURCE_MEM, 0);
+ if (!regs)
+ return -EINVAL;
+
+ irq = platform_get_irq(to_platform_device(pdev->dev.parent), 0);
+ if (irq < 0)
+ return irq;
+
+ clk = devm_clk_get(pdev->dev.parent, "usart");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = -ENOMEM;
+ controller = spi_alloc_master(&pdev->dev, sizeof(*aus));
+ if (!controller)
+ goto at91_usart_spi_probe_fail;
+
+ ret = at91_usart_gpio_setup(pdev);
+ if (ret)
+ goto at91_usart_spi_probe_fail;
+
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
+ controller->dev.of_node = pdev->dev.parent->of_node;
+ controller->bits_per_word_mask = SPI_BPW_MASK(8);
+ controller->setup = at91_usart_spi_setup;
+ controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ controller->transfer_one = at91_usart_spi_transfer_one;
+ controller->prepare_message = at91_usart_spi_prepare_message;
+ controller->unprepare_message = at91_usart_spi_unprepare_message;
+ controller->cleanup = at91_usart_spi_cleanup;
+ controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
+ US_MIN_CLK_DIV);
+ controller->min_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
+ US_MAX_CLK_DIV);
+ platform_set_drvdata(pdev, controller);
+
+ aus = spi_master_get_devdata(controller);
+
+ aus->dev = &pdev->dev;
+ aus->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(aus->regs)) {
+ ret = PTR_ERR(aus->regs);
+ goto at91_usart_spi_probe_fail;
+ }
+
+ aus->irq = irq;
+ aus->clk = clk;
+
+ ret = devm_request_irq(&pdev->dev, irq, at91_usart_spi_interrupt, 0,
+ dev_name(&pdev->dev), controller);
+ if (ret)
+ goto at91_usart_spi_probe_fail;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto at91_usart_spi_probe_fail;
+
+ aus->spi_clk = clk_get_rate(clk);
+ at91_usart_spi_init(aus);
+
+ spin_lock_init(&aus->lock);
+ ret = devm_spi_register_master(&pdev->dev, controller);
+ if (ret)
+ goto at91_usart_fail_register_master;
+
+ dev_info(&pdev->dev,
+ "AT91 USART SPI Controller version 0x%x at %pa (irq %d)\n",
+ at91_usart_spi_readl(aus, VERSION),
+ &regs->start, irq);
+
+ return 0;
+
+at91_usart_fail_register_master:
+ clk_disable_unprepare(clk);
+at91_usart_spi_probe_fail:
+ spi_master_put(controller);
+ return ret;
+}
+
+static int at91_usart_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ clk_disable_unprepare(aus->clk);
+
+ return 0;
+}
+
+static const struct of_device_id at91_usart_spi_dt_ids[] = {
+ { .compatible = "microchip,at91sam9g45-usart-spi"},
+ { /* sentinel */}
+};
+
+MODULE_DEVICE_TABLE(of, at91_usart_spi_dt_ids);
+
+static struct platform_driver at91_usart_spi_driver = {
+ .driver = {
+ .name = "at91_usart_spi",
+ },
+ .probe = at91_usart_spi_probe,
+ .remove = at91_usart_spi_remove,
+};
+
+module_platform_driver(at91_usart_spi_driver);
+
+MODULE_DESCRIPTION("Microchip AT91 USART SPI Controller driver");
+MODULE_AUTHOR("Radu Pirea <radu.pirea@microchip.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:at91_usart_spi");
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 3f890d162934..74fddcd3282b 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1767,10 +1767,8 @@ static int atmel_spi_suspend(struct device *dev)
/* Stop the queue running */
ret = spi_master_suspend(master);
- if (ret) {
- dev_warn(dev, "cannot suspend master\n");
+ if (ret)
return ret;
- }
if (!pm_runtime_suspended(dev))
atmel_spi_runtime_suspend(dev);
@@ -1799,11 +1797,7 @@ static int atmel_spi_resume(struct device *dev)
}
/* Start the queue running */
- ret = spi_master_resume(master);
- if (ret)
- dev_err(dev, "problem starting queue (%d)\n", ret);
-
- return ret;
+ return spi_master_resume(master);
}
#endif
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 8612525fa4e3..584bcb018a62 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -89,7 +89,7 @@
#define BSPI_BPP_MODE_SELECT_MASK BIT(8)
#define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
-#define BSPI_READ_LENGTH 512
+#define BSPI_READ_LENGTH 256
/* MSPI register offsets */
#define MSPI_SPCR0_LSB 0x000
@@ -355,7 +355,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
int bpc = 0, bpp = 0;
u8 command = op->cmd.opcode;
int width = op->cmd.buswidth ? op->cmd.buswidth : SPI_NBITS_SINGLE;
- int addrlen = op->addr.nbytes * 8;
+ int addrlen = op->addr.nbytes;
int flex_mode = 1;
dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index c23849f7aa7b..9a06ffdb73b8 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -101,6 +101,7 @@ struct bcm63xx_hsspi {
struct platform_device *pdev;
struct clk *clk;
+ struct clk *pll_clk;
void __iomem *regs;
u8 __iomem *fifo;
@@ -332,7 +333,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
struct resource *res_mem;
void __iomem *regs;
struct device *dev = &pdev->dev;
- struct clk *clk;
+ struct clk *clk, *pll_clk = NULL;
int irq, ret;
u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS;
@@ -358,7 +359,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
rate = clk_get_rate(clk);
if (!rate) {
- struct clk *pll_clk = devm_clk_get(dev, "pll");
+ pll_clk = devm_clk_get(dev, "pll");
if (IS_ERR(pll_clk)) {
ret = PTR_ERR(pll_clk);
@@ -373,19 +374,20 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
clk_disable_unprepare(pll_clk);
if (!rate) {
ret = -EINVAL;
- goto out_disable_clk;
+ goto out_disable_pll_clk;
}
}
master = spi_alloc_master(&pdev->dev, sizeof(*bs));
if (!master) {
ret = -ENOMEM;
- goto out_disable_clk;
+ goto out_disable_pll_clk;
}
bs = spi_master_get_devdata(master);
bs->pdev = pdev;
bs->clk = clk;
+ bs->pll_clk = pll_clk;
bs->regs = regs;
bs->speed_hz = rate;
bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
@@ -440,6 +442,8 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
out_put_master:
spi_master_put(master);
+out_disable_pll_clk:
+ clk_disable_unprepare(pll_clk);
out_disable_clk:
clk_disable_unprepare(clk);
return ret;
@@ -453,6 +457,7 @@ static int bcm63xx_hsspi_remove(struct platform_device *pdev)
/* reset the hardware and block queue progress */
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+ clk_disable_unprepare(bs->pll_clk);
clk_disable_unprepare(bs->clk);
return 0;
@@ -465,6 +470,7 @@ static int bcm63xx_hsspi_suspend(struct device *dev)
struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
spi_master_suspend(master);
+ clk_disable_unprepare(bs->pll_clk);
clk_disable_unprepare(bs->clk);
return 0;
@@ -480,6 +486,12 @@ static int bcm63xx_hsspi_resume(struct device *dev)
if (ret)
return ret;
+ if (bs->pll_clk) {
+ ret = clk_prepare_enable(bs->pll_clk);
+ if (ret)
+ return ret;
+ }
+
spi_master_resume(master);
return 0;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index a02099c90c5c..56adec83f8fc 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -208,13 +208,11 @@ static inline void clear_io_bits(void __iomem *addr, u32 bits)
static void davinci_spi_chipselect(struct spi_device *spi, int value)
{
struct davinci_spi *dspi;
- struct davinci_spi_platform_data *pdata;
struct davinci_spi_config *spicfg = spi->controller_data;
u8 chip_sel = spi->chip_select;
u16 spidat1 = CS_DEFAULT;
dspi = spi_master_get_devdata(spi->master);
- pdata = &dspi->pdata;
/* program delay transfers if tx_delay is non zero */
if (spicfg && spicfg->wdelay)
@@ -232,7 +230,8 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
!(spi->mode & SPI_CS_HIGH));
} else {
if (value == BITBANG_CS_ACTIVE) {
- spidat1 |= SPIDAT1_CSHOLD_MASK;
+ if (!(spi->mode & SPI_CS_WORD))
+ spidat1 |= SPIDAT1_CSHOLD_MASK;
spidat1 &= ~(0x1 << chip_sel);
}
}
@@ -421,26 +420,17 @@ static int davinci_spi_setup(struct spi_device *spi)
{
int retval = 0;
struct davinci_spi *dspi;
- struct davinci_spi_platform_data *pdata;
struct spi_master *master = spi->master;
struct device_node *np = spi->dev.of_node;
bool internal_cs = true;
dspi = spi_master_get_devdata(spi->master);
- pdata = &dspi->pdata;
if (!(spi->mode & SPI_NO_CS)) {
if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) {
retval = gpio_direction_output(
spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
internal_cs = false;
- } else if (pdata->chip_sel &&
- spi->chip_select < pdata->num_chipselect &&
- pdata->chip_sel[spi->chip_select] != SPI_INTERN_CS) {
- spi->cs_gpio = pdata->chip_sel[spi->chip_select];
- retval = gpio_direction_output(
- spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
- internal_cs = false;
}
if (retval) {
@@ -449,8 +439,9 @@ static int davinci_spi_setup(struct spi_device *spi)
return retval;
}
- if (internal_cs)
+ if (internal_cs) {
set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
+ }
}
if (spi->mode & SPI_READY)
@@ -985,7 +976,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->prescaler_limit = pdata->prescaler_limit;
dspi->version = pdata->version;
- dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
+ dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_WORD;
if (dspi->version == SPI_VERSION_2)
dspi->bitbang.flags |= SPI_READY;
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index e80f60ed6fdf..3ffb6a40fe0c 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -34,8 +34,9 @@ struct dw_spi_mmio {
};
#define MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL 0x24
-#define OCELOT_IF_SI_OWNER_MASK GENMASK(5, 4)
#define OCELOT_IF_SI_OWNER_OFFSET 4
+#define JAGUAR2_IF_SI_OWNER_OFFSET 6
+#define MSCC_IF_SI_OWNER_MASK GENMASK(1, 0)
#define MSCC_IF_SI_OWNER_SISL 0
#define MSCC_IF_SI_OWNER_SIBM 1
#define MSCC_IF_SI_OWNER_SIMC 2
@@ -76,7 +77,8 @@ static void dw_spi_mscc_set_cs(struct spi_device *spi, bool enable)
}
static int dw_spi_mscc_init(struct platform_device *pdev,
- struct dw_spi_mmio *dwsmmio)
+ struct dw_spi_mmio *dwsmmio,
+ const char *cpu_syscon, u32 if_si_owner_offset)
{
struct dw_spi_mscc *dwsmscc;
struct resource *res;
@@ -92,7 +94,7 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
return PTR_ERR(dwsmscc->spi_mst);
}
- dwsmscc->syscon = syscon_regmap_lookup_by_compatible("mscc,ocelot-cpu-syscon");
+ dwsmscc->syscon = syscon_regmap_lookup_by_compatible(cpu_syscon);
if (IS_ERR(dwsmscc->syscon))
return PTR_ERR(dwsmscc->syscon);
@@ -101,8 +103,8 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
/* Select the owner of the SI interface */
regmap_update_bits(dwsmscc->syscon, MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL,
- OCELOT_IF_SI_OWNER_MASK,
- MSCC_IF_SI_OWNER_SIMC << OCELOT_IF_SI_OWNER_OFFSET);
+ MSCC_IF_SI_OWNER_MASK << if_si_owner_offset,
+ MSCC_IF_SI_OWNER_SIMC << if_si_owner_offset);
dwsmmio->dws.set_cs = dw_spi_mscc_set_cs;
dwsmmio->priv = dwsmscc;
@@ -110,6 +112,28 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
return 0;
}
+static int dw_spi_mscc_ocelot_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ return dw_spi_mscc_init(pdev, dwsmmio, "mscc,ocelot-cpu-syscon",
+ OCELOT_IF_SI_OWNER_OFFSET);
+}
+
+static int dw_spi_mscc_jaguar2_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ return dw_spi_mscc_init(pdev, dwsmmio, "mscc,jaguar2-cpu-syscon",
+ JAGUAR2_IF_SI_OWNER_OFFSET);
+}
+
+static int dw_spi_alpine_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ dwsmmio->dws.cs_override = 1;
+
+ return 0;
+}
+
static int dw_spi_mmio_probe(struct platform_device *pdev)
{
int (*init_func)(struct platform_device *pdev,
@@ -212,7 +236,9 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "snps,dw-apb-ssi", },
- { .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_init},
+ { .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_ocelot_init},
+ { .compatible = "mscc,jaguar2-spi", .data = dw_spi_mscc_jaguar2_init},
+ { .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init},
{ /* end of table */}
};
MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index ac2eb89ef7a5..b705f2bdb8b9 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -144,6 +144,8 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
if (!enable)
dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
+ else if (dws->cs_override)
+ dw_writel(dws, DW_SPI_SER, 0);
}
EXPORT_SYMBOL_GPL(dw_spi_set_cs);
@@ -308,15 +310,10 @@ static int dw_spi_transfer_one(struct spi_controller *master,
dws->current_freq = transfer->speed_hz;
spi_set_clk(dws, chip->clk_div);
}
- if (transfer->bits_per_word == 8) {
- dws->n_bytes = 1;
- dws->dma_width = 1;
- } else if (transfer->bits_per_word == 16) {
- dws->n_bytes = 2;
- dws->dma_width = 2;
- } else {
- return -EINVAL;
- }
+
+ dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
+ dws->dma_width = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
+
/* Default SPI mode is SCPOL = 0, SCPH = 0 */
cr0 = (transfer->bits_per_word - 1)
| (chip->type << SPI_FRF_OFFSET)
@@ -468,6 +465,10 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws)
dws->fifo_len = (fifo == 1) ? 0 : fifo;
dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
}
+
+ /* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
+ if (dws->cs_override)
+ dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
}
int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
@@ -496,7 +497,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
}
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
- master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
master->bus_num = dws->bus_num;
master->num_chipselect = dws->num_cs;
master->setup = dw_spi_setup;
@@ -572,13 +573,8 @@ EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
int dw_spi_resume_host(struct dw_spi *dws)
{
- int ret;
-
spi_hw_init(&dws->master->dev, dws);
- ret = spi_controller_resume(dws->master);
- if (ret)
- dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
- return ret;
+ return spi_controller_resume(dws->master);
}
EXPORT_SYMBOL_GPL(dw_spi_resume_host);
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 0168b08364d5..c9c15881e982 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -32,6 +32,7 @@
#define DW_SPI_IDR 0x58
#define DW_SPI_VERSION 0x5c
#define DW_SPI_DR 0x60
+#define DW_SPI_CS_OVERRIDE 0xf4
/* Bit fields in CTRLR0 */
#define SPI_DFS_OFFSET 0
@@ -109,6 +110,7 @@ struct dw_spi {
u32 fifo_len; /* depth of the FIFO buffer */
u32 max_freq; /* max bus freq supported */
+ int cs_override;
u32 reg_io_width; /* DR I/O width in bytes */
u16 bus_num;
u16 num_cs; /* supported slave numbers */
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index f1526757aaf6..79fc3940245a 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -246,6 +246,19 @@ static int ep93xx_spi_read_write(struct spi_master *master)
return -EINPROGRESS;
}
+static enum dma_transfer_direction
+ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ return DMA_MEM_TO_DEV;
+ case DMA_FROM_DEVICE:
+ return DMA_DEV_TO_MEM;
+ default:
+ return DMA_TRANS_NONE;
+ }
+}
+
/**
* ep93xx_spi_dma_prepare() - prepares a DMA transfer
* @master: SPI master
@@ -257,7 +270,7 @@ static int ep93xx_spi_read_write(struct spi_master *master)
*/
static struct dma_async_tx_descriptor *
ep93xx_spi_dma_prepare(struct spi_master *master,
- enum dma_transfer_direction dir)
+ enum dma_data_direction dir)
{
struct ep93xx_spi *espi = spi_master_get_devdata(master);
struct spi_transfer *xfer = master->cur_msg->state;
@@ -277,9 +290,9 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
memset(&conf, 0, sizeof(conf));
- conf.direction = dir;
+ conf.direction = ep93xx_dma_data_to_trans_dir(dir);
- if (dir == DMA_DEV_TO_MEM) {
+ if (dir == DMA_FROM_DEVICE) {
chan = espi->dma_rx;
buf = xfer->rx_buf;
sgt = &espi->rx_sgt;
@@ -343,7 +356,8 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
if (!nents)
return ERR_PTR(-ENOMEM);
- txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
+ txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction,
+ DMA_CTRL_ACK);
if (!txd) {
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
return ERR_PTR(-ENOMEM);
@@ -360,13 +374,13 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
* unmapped.
*/
static void ep93xx_spi_dma_finish(struct spi_master *master,
- enum dma_transfer_direction dir)
+ enum dma_data_direction dir)
{
struct ep93xx_spi *espi = spi_master_get_devdata(master);
struct dma_chan *chan;
struct sg_table *sgt;
- if (dir == DMA_DEV_TO_MEM) {
+ if (dir == DMA_FROM_DEVICE) {
chan = espi->dma_rx;
sgt = &espi->rx_sgt;
} else {
@@ -381,8 +395,8 @@ static void ep93xx_spi_dma_callback(void *callback_param)
{
struct spi_master *master = callback_param;
- ep93xx_spi_dma_finish(master, DMA_MEM_TO_DEV);
- ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
+ ep93xx_spi_dma_finish(master, DMA_TO_DEVICE);
+ ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
spi_finalize_current_transfer(master);
}
@@ -392,15 +406,15 @@ static int ep93xx_spi_dma_transfer(struct spi_master *master)
struct ep93xx_spi *espi = spi_master_get_devdata(master);
struct dma_async_tx_descriptor *rxd, *txd;
- rxd = ep93xx_spi_dma_prepare(master, DMA_DEV_TO_MEM);
+ rxd = ep93xx_spi_dma_prepare(master, DMA_FROM_DEVICE);
if (IS_ERR(rxd)) {
dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
return PTR_ERR(rxd);
}
- txd = ep93xx_spi_dma_prepare(master, DMA_MEM_TO_DEV);
+ txd = ep93xx_spi_dma_prepare(master, DMA_TO_DEVICE);
if (IS_ERR(txd)) {
- ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
+ ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
return PTR_ERR(txd);
}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 7cb3ab0a35a0..3082e72e4f6c 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -30,7 +30,11 @@
#define DRIVER_NAME "fsl-dspi"
+#ifdef CONFIG_M5441x
+#define DSPI_FIFO_SIZE 16
+#else
#define DSPI_FIFO_SIZE 4
+#endif
#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
#define SPI_MCR 0x00
@@ -623,9 +627,11 @@ static void dspi_tcfq_read(struct fsl_dspi *dspi)
static void dspi_eoq_write(struct fsl_dspi *dspi)
{
int fifo_size = DSPI_FIFO_SIZE;
+ u16 xfer_cmd = dspi->tx_cmd;
/* Fill TX FIFO with as many transfers as possible */
while (dspi->len && fifo_size--) {
+ dspi->tx_cmd = xfer_cmd;
/* Request EOQF for last transfer in FIFO */
if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 1e8ff6256079..cf2118dc91f4 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -798,10 +798,8 @@ static int of_fsl_espi_suspend(struct device *dev)
int ret;
ret = spi_master_suspend(master);
- if (ret) {
- dev_warn(dev, "cannot suspend master\n");
+ if (ret)
return ret;
- }
return pm_runtime_force_suspend(dev);
}
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index e6d5cc6ab108..51670976faa3 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -276,7 +276,7 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
fsl_lpspi_set_watermark(fsl_lpspi);
- temp = CFGR1_PCSCFG | CFGR1_MASTER | CFGR1_NOSTALL;
+ temp = CFGR1_PCSCFG | CFGR1_MASTER;
if (fsl_lpspi->config.mode & SPI_CS_HIGH)
temp |= CFGR1_PCSPOL;
writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
new file mode 100644
index 000000000000..6432ecc4e2ca
--- /dev/null
+++ b/drivers/spi/spi-geni-qcom.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+
+/* SPI SE specific registers and respective register fields */
+#define SE_SPI_CPHA 0x224
+#define CPHA BIT(0)
+
+#define SE_SPI_LOOPBACK 0x22c
+#define LOOPBACK_ENABLE 0x1
+#define NORMAL_MODE 0x0
+#define LOOPBACK_MSK GENMASK(1, 0)
+
+#define SE_SPI_CPOL 0x230
+#define CPOL BIT(2)
+
+#define SE_SPI_DEMUX_OUTPUT_INV 0x24c
+#define CS_DEMUX_OUTPUT_INV_MSK GENMASK(3, 0)
+
+#define SE_SPI_DEMUX_SEL 0x250
+#define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
+
+#define SE_SPI_TRANS_CFG 0x25c
+#define CS_TOGGLE BIT(0)
+
+#define SE_SPI_WORD_LEN 0x268
+#define WORD_LEN_MSK GENMASK(9, 0)
+#define MIN_WORD_LEN 4
+
+#define SE_SPI_TX_TRANS_LEN 0x26c
+#define SE_SPI_RX_TRANS_LEN 0x270
+#define TRANS_LEN_MSK GENMASK(23, 0)
+
+#define SE_SPI_PRE_POST_CMD_DLY 0x274
+
+#define SE_SPI_DELAY_COUNTERS 0x278
+#define SPI_INTER_WORDS_DELAY_MSK GENMASK(9, 0)
+#define SPI_CS_CLK_DELAY_MSK GENMASK(19, 10)
+#define SPI_CS_CLK_DELAY_SHFT 10
+
+/* M_CMD OP codes for SPI */
+#define SPI_TX_ONLY 1
+#define SPI_RX_ONLY 2
+#define SPI_FULL_DUPLEX 3
+#define SPI_TX_RX 7
+#define SPI_CS_ASSERT 8
+#define SPI_CS_DEASSERT 9
+#define SPI_SCK_ONLY 10
+/* M_CMD params for SPI */
+#define SPI_PRE_CMD_DELAY BIT(0)
+#define TIMESTAMP_BEFORE BIT(1)
+#define FRAGMENTATION BIT(2)
+#define TIMESTAMP_AFTER BIT(3)
+#define POST_CMD_DELAY BIT(4)
+
+/* SPI M_COMMAND OPCODE */
+enum spi_mcmd_code {
+ CMD_NONE,
+ CMD_XFER,
+ CMD_CS,
+ CMD_CANCEL,
+};
+
+
+struct spi_geni_master {
+ struct geni_se se;
+ struct device *dev;
+ u32 tx_fifo_depth;
+ u32 fifo_width_bits;
+ u32 tx_wm;
+ unsigned long cur_speed_hz;
+ unsigned int cur_bits_per_word;
+ unsigned int tx_rem_bytes;
+ unsigned int rx_rem_bytes;
+ const struct spi_transfer *cur_xfer;
+ struct completion xfer_done;
+ unsigned int oversampling;
+ spinlock_t lock;
+ unsigned int cur_mcmd;
+ int irq;
+};
+
+static void handle_fifo_timeout(struct spi_master *spi,
+ struct spi_message *msg);
+
+static int get_spi_clk_cfg(unsigned int speed_hz,
+ struct spi_geni_master *mas,
+ unsigned int *clk_idx,
+ unsigned int *clk_div)
+{
+ unsigned long sclk_freq;
+ unsigned int actual_hz;
+ struct geni_se *se = &mas->se;
+ int ret;
+
+ ret = geni_se_clk_freq_match(&mas->se,
+ speed_hz * mas->oversampling,
+ clk_idx, &sclk_freq, false);
+ if (ret) {
+ dev_err(mas->dev, "Failed(%d) to find src clk for %dHz\n",
+ ret, speed_hz);
+ return ret;
+ }
+
+ *clk_div = DIV_ROUND_UP(sclk_freq, mas->oversampling * speed_hz);
+ actual_hz = sclk_freq / (mas->oversampling * *clk_div);
+
+ dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
+ actual_hz, sclk_freq, *clk_idx, *clk_div);
+ ret = clk_set_rate(se->clk, sclk_freq);
+ if (ret)
+ dev_err(mas->dev, "clk_set_rate failed %d\n", ret);
+ return ret;
+}
+
+static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
+ struct spi_master *spi = dev_get_drvdata(mas->dev);
+ struct geni_se *se = &mas->se;
+ unsigned long timeout;
+
+ reinit_completion(&mas->xfer_done);
+ pm_runtime_get_sync(mas->dev);
+ if (!(slv->mode & SPI_CS_HIGH))
+ set_flag = !set_flag;
+
+ mas->cur_mcmd = CMD_CS;
+ if (set_flag)
+ geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
+ else
+ geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
+
+ timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
+ if (!timeout)
+ handle_fifo_timeout(spi, NULL);
+
+ pm_runtime_put(mas->dev);
+}
+
+static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
+ unsigned int bits_per_word)
+{
+ unsigned int pack_words;
+ bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
+ struct geni_se *se = &mas->se;
+ u32 word_len;
+
+ word_len = readl(se->base + SE_SPI_WORD_LEN);
+
+ /*
+ * If bits_per_word isn't a byte aligned value, set the packing to be
+ * 1 SPI word per FIFO word.
+ */
+ if (!(mas->fifo_width_bits % bits_per_word))
+ pack_words = mas->fifo_width_bits / bits_per_word;
+ else
+ pack_words = 1;
+ word_len &= ~WORD_LEN_MSK;
+ word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
+ geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
+ true, true);
+ writel(word_len, se->base + SE_SPI_WORD_LEN);
+}
+
+static int setup_fifo_params(struct spi_device *spi_slv,
+ struct spi_master *spi)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct geni_se *se = &mas->se;
+ u32 loopback_cfg, cpol, cpha, demux_output_inv;
+ u32 demux_sel, clk_sel, m_clk_cfg, idx, div;
+ int ret;
+
+ loopback_cfg = readl(se->base + SE_SPI_LOOPBACK);
+ cpol = readl(se->base + SE_SPI_CPOL);
+ cpha = readl(se->base + SE_SPI_CPHA);
+ demux_output_inv = 0;
+ loopback_cfg &= ~LOOPBACK_MSK;
+ cpol &= ~CPOL;
+ cpha &= ~CPHA;
+
+ if (spi_slv->mode & SPI_LOOP)
+ loopback_cfg |= LOOPBACK_ENABLE;
+
+ if (spi_slv->mode & SPI_CPOL)
+ cpol |= CPOL;
+
+ if (spi_slv->mode & SPI_CPHA)
+ cpha |= CPHA;
+
+ if (spi_slv->mode & SPI_CS_HIGH)
+ demux_output_inv = BIT(spi_slv->chip_select);
+
+ demux_sel = spi_slv->chip_select;
+ mas->cur_speed_hz = spi_slv->max_speed_hz;
+ mas->cur_bits_per_word = spi_slv->bits_per_word;
+
+ ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
+ if (ret) {
+ dev_err(mas->dev, "Err setting clks ret(%d) for %ld\n",
+ ret, mas->cur_speed_hz);
+ return ret;
+ }
+
+ clk_sel = idx & CLK_SEL_MSK;
+ m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
+ spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
+ writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
+ writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
+ writel(cpha, se->base + SE_SPI_CPHA);
+ writel(cpol, se->base + SE_SPI_CPOL);
+ writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
+ writel(clk_sel, se->base + SE_GENI_CLK_SEL);
+ writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
+ return 0;
+}
+
+static int spi_geni_prepare_message(struct spi_master *spi,
+ struct spi_message *spi_msg)
+{
+ int ret;
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct geni_se *se = &mas->se;
+
+ geni_se_select_mode(se, GENI_SE_FIFO);
+ reinit_completion(&mas->xfer_done);
+ ret = setup_fifo_params(spi_msg->spi, spi);
+ if (ret)
+ dev_err(mas->dev, "Couldn't select mode %d\n", ret);
+ return ret;
+}
+
+static int spi_geni_init(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ unsigned int proto, major, minor, ver;
+
+ pm_runtime_get_sync(mas->dev);
+
+ proto = geni_se_read_proto(se);
+ if (proto != GENI_SE_SPI) {
+ dev_err(mas->dev, "Invalid proto %d\n", proto);
+ pm_runtime_put(mas->dev);
+ return -ENXIO;
+ }
+ mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
+
+ /* Width of Tx and Rx FIFO is same */
+ mas->fifo_width_bits = geni_se_get_tx_fifo_width(se);
+
+ /*
+ * Hardware programming guide suggests to configure
+ * RX FIFO RFR level to fifo_depth-2.
+ */
+ geni_se_init(se, 0x0, mas->tx_fifo_depth - 2);
+ /* Transmit an entire FIFO worth of data per IRQ */
+ mas->tx_wm = 1;
+ ver = geni_se_get_qup_hw_version(se);
+ major = GENI_SE_VERSION_MAJOR(ver);
+ minor = GENI_SE_VERSION_MINOR(ver);
+
+ if (major == 1 && minor == 0)
+ mas->oversampling = 2;
+ else
+ mas->oversampling = 1;
+
+ pm_runtime_put(mas->dev);
+ return 0;
+}
+
+static void setup_fifo_xfer(struct spi_transfer *xfer,
+ struct spi_geni_master *mas,
+ u16 mode, struct spi_master *spi)
+{
+ u32 m_cmd = 0;
+ u32 spi_tx_cfg, len;
+ struct geni_se *se = &mas->se;
+
+ spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
+ if (xfer->bits_per_word != mas->cur_bits_per_word) {
+ spi_setup_word_len(mas, mode, xfer->bits_per_word);
+ mas->cur_bits_per_word = xfer->bits_per_word;
+ }
+
+ /* Speed and bits per word can be overridden per transfer */
+ if (xfer->speed_hz != mas->cur_speed_hz) {
+ int ret;
+ u32 clk_sel, m_clk_cfg;
+ unsigned int idx, div;
+
+ ret = get_spi_clk_cfg(xfer->speed_hz, mas, &idx, &div);
+ if (ret) {
+ dev_err(mas->dev, "Err setting clks:%d\n", ret);
+ return;
+ }
+ /*
+ * SPI core clock gets configured with the requested frequency
+ * or the frequency closer to the requested frequency.
+ * For that reason requested frequency is stored in the
+ * cur_speed_hz and referred in the consecutive transfer instead
+ * of calling clk_get_rate() API.
+ */
+ mas->cur_speed_hz = xfer->speed_hz;
+ clk_sel = idx & CLK_SEL_MSK;
+ m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
+ writel(clk_sel, se->base + SE_GENI_CLK_SEL);
+ writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
+ }
+
+ mas->tx_rem_bytes = 0;
+ mas->rx_rem_bytes = 0;
+ if (xfer->tx_buf && xfer->rx_buf)
+ m_cmd = SPI_FULL_DUPLEX;
+ else if (xfer->tx_buf)
+ m_cmd = SPI_TX_ONLY;
+ else if (xfer->rx_buf)
+ m_cmd = SPI_RX_ONLY;
+
+ spi_tx_cfg &= ~CS_TOGGLE;
+
+ if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
+ len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
+ else
+ len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
+ len &= TRANS_LEN_MSK;
+
+ mas->cur_xfer = xfer;
+ if (m_cmd & SPI_TX_ONLY) {
+ mas->tx_rem_bytes = xfer->len;
+ writel(len, se->base + SE_SPI_TX_TRANS_LEN);
+ }
+
+ if (m_cmd & SPI_RX_ONLY) {
+ writel(len, se->base + SE_SPI_RX_TRANS_LEN);
+ mas->rx_rem_bytes = xfer->len;
+ }
+ writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
+ mas->cur_mcmd = CMD_XFER;
+ geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
+
+ /*
+ * TX_WATERMARK_REG should be set after SPI configuration and
+ * setting up GENI SE engine, as driver starts data transfer
+ * for the watermark interrupt.
+ */
+ if (m_cmd & SPI_TX_ONLY)
+ writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
+}
+
+static void handle_fifo_timeout(struct spi_master *spi,
+ struct spi_message *msg)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ unsigned long time_left, flags;
+ struct geni_se *se = &mas->se;
+
+ spin_lock_irqsave(&mas->lock, flags);
+ reinit_completion(&mas->xfer_done);
+ mas->cur_mcmd = CMD_CANCEL;
+ geni_se_cancel_m_cmd(se);
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ spin_unlock_irqrestore(&mas->lock, flags);
+ time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
+ if (time_left)
+ return;
+
+ spin_lock_irqsave(&mas->lock, flags);
+ reinit_completion(&mas->xfer_done);
+ geni_se_abort_m_cmd(se);
+ spin_unlock_irqrestore(&mas->lock, flags);
+ time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
+ if (!time_left)
+ dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+}
+
+static int spi_geni_transfer_one(struct spi_master *spi,
+ struct spi_device *slv,
+ struct spi_transfer *xfer)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ /* Terminate and return success for 0 byte length transfer */
+ if (!xfer->len)
+ return 0;
+
+ setup_fifo_xfer(xfer, mas, slv->mode, spi);
+ return 1;
+}
+
+static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
+{
+ /*
+ * Calculate how many bytes we'll put in each FIFO word. If the
+ * transfer words don't pack cleanly into a FIFO word we'll just put
+ * one transfer word in each FIFO word. If they do pack we'll pack 'em.
+ */
+ if (mas->fifo_width_bits % mas->cur_bits_per_word)
+ return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
+ BITS_PER_BYTE));
+
+ return mas->fifo_width_bits / BITS_PER_BYTE;
+}
+
+static void geni_spi_handle_tx(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ unsigned int max_bytes;
+ const u8 *tx_buf;
+ unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
+ unsigned int i = 0;
+
+ max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
+ if (mas->tx_rem_bytes < max_bytes)
+ max_bytes = mas->tx_rem_bytes;
+
+ tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
+ while (i < max_bytes) {
+ unsigned int j;
+ unsigned int bytes_to_write;
+ u32 fifo_word = 0;
+ u8 *fifo_byte = (u8 *)&fifo_word;
+
+ bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
+ for (j = 0; j < bytes_to_write; j++)
+ fifo_byte[j] = tx_buf[i++];
+ iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
+ }
+ mas->tx_rem_bytes -= max_bytes;
+ if (!mas->tx_rem_bytes)
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+}
+
+static void geni_spi_handle_rx(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 rx_fifo_status;
+ unsigned int rx_bytes;
+ unsigned int rx_last_byte_valid;
+ u8 *rx_buf;
+ unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
+ unsigned int i = 0;
+
+ rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
+ rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
+ if (rx_fifo_status & RX_LAST) {
+ rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
+ rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
+ if (rx_last_byte_valid && rx_last_byte_valid < 4)
+ rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
+ }
+ if (mas->rx_rem_bytes < rx_bytes)
+ rx_bytes = mas->rx_rem_bytes;
+
+ rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
+ while (i < rx_bytes) {
+ u32 fifo_word = 0;
+ u8 *fifo_byte = (u8 *)&fifo_word;
+ unsigned int bytes_to_read;
+ unsigned int j;
+
+ bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
+ ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
+ for (j = 0; j < bytes_to_read; j++)
+ rx_buf[i++] = fifo_byte[j];
+ }
+ mas->rx_rem_bytes -= rx_bytes;
+}
+
+static irqreturn_t geni_spi_isr(int irq, void *data)
+{
+ struct spi_master *spi = data;
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct geni_se *se = &mas->se;
+ u32 m_irq;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ if (mas->cur_mcmd == CMD_NONE)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&mas->lock, flags);
+ m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+
+ if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
+ geni_spi_handle_rx(mas);
+
+ if (m_irq & M_TX_FIFO_WATERMARK_EN)
+ geni_spi_handle_tx(mas);
+
+ if (m_irq & M_CMD_DONE_EN) {
+ if (mas->cur_mcmd == CMD_XFER)
+ spi_finalize_current_transfer(spi);
+ else if (mas->cur_mcmd == CMD_CS)
+ complete(&mas->xfer_done);
+ mas->cur_mcmd = CMD_NONE;
+ /*
+ * If this happens, then a CMD_DONE came before all the Tx
+ * buffer bytes were sent out. This is unusual, log this
+ * condition and disable the WM interrupt to prevent the
+ * system from stalling due an interrupt storm.
+ * If this happens when all Rx bytes haven't been received, log
+ * the condition.
+ * The only known time this can happen is if bits_per_word != 8
+ * and some registers that expect xfer lengths in num spi_words
+ * weren't written correctly.
+ */
+ if (mas->tx_rem_bytes) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
+ mas->tx_rem_bytes, mas->cur_bits_per_word);
+ }
+ if (mas->rx_rem_bytes)
+ dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
+ mas->rx_rem_bytes, mas->cur_bits_per_word);
+ }
+
+ if ((m_irq & M_CMD_CANCEL_EN) || (m_irq & M_CMD_ABORT_EN)) {
+ mas->cur_mcmd = CMD_NONE;
+ complete(&mas->xfer_done);
+ }
+
+ writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
+ spin_unlock_irqrestore(&mas->lock, flags);
+ return ret;
+}
+
+static int spi_geni_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct spi_master *spi;
+ struct spi_geni_master *mas;
+ struct resource *res;
+ struct geni_se *se;
+
+ spi = spi_alloc_master(&pdev->dev, sizeof(*mas));
+ if (!spi)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, spi);
+ mas = spi_master_get_devdata(spi);
+ mas->dev = &pdev->dev;
+ mas->se.dev = &pdev->dev;
+ mas->se.wrapper = dev_get_drvdata(pdev->dev.parent);
+ se = &mas->se;
+
+ spi->bus_num = -1;
+ spi->dev.of_node = pdev->dev.of_node;
+ mas->se.clk = devm_clk_get(&pdev->dev, "se");
+ if (IS_ERR(mas->se.clk)) {
+ ret = PTR_ERR(mas->se.clk);
+ dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
+ goto spi_geni_probe_err;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ se->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(se->base)) {
+ ret = PTR_ERR(se->base);
+ goto spi_geni_probe_err;
+ }
+
+ spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
+ spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ spi->num_chipselect = 4;
+ spi->max_speed_hz = 50000000;
+ spi->prepare_message = spi_geni_prepare_message;
+ spi->transfer_one = spi_geni_transfer_one;
+ spi->auto_runtime_pm = true;
+ spi->handle_err = handle_fifo_timeout;
+ spi->set_cs = spi_geni_set_cs;
+
+ init_completion(&mas->xfer_done);
+ spin_lock_init(&mas->lock);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = spi_geni_init(mas);
+ if (ret)
+ goto spi_geni_probe_runtime_disable;
+
+ mas->irq = platform_get_irq(pdev, 0);
+ if (mas->irq < 0) {
+ ret = mas->irq;
+ dev_err(&pdev->dev, "Err getting IRQ %d\n", ret);
+ goto spi_geni_probe_runtime_disable;
+ }
+
+ ret = request_irq(mas->irq, geni_spi_isr,
+ IRQF_TRIGGER_HIGH, "spi_geni", spi);
+ if (ret)
+ goto spi_geni_probe_runtime_disable;
+
+ ret = spi_register_master(spi);
+ if (ret)
+ goto spi_geni_probe_free_irq;
+
+ return 0;
+spi_geni_probe_free_irq:
+ free_irq(mas->irq, spi);
+spi_geni_probe_runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+spi_geni_probe_err:
+ spi_master_put(spi);
+ return ret;
+}
+
+static int spi_geni_remove(struct platform_device *pdev)
+{
+ struct spi_master *spi = platform_get_drvdata(pdev);
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ /* Unregister _before_ disabling pm_runtime() so we stop transfers */
+ spi_unregister_master(spi);
+
+ free_irq(mas->irq, spi);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
+{
+ struct spi_master *spi = dev_get_drvdata(dev);
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ return geni_se_resources_off(&mas->se);
+}
+
+static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
+{
+ struct spi_master *spi = dev_get_drvdata(dev);
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ return geni_se_resources_on(&mas->se);
+}
+
+static int __maybe_unused spi_geni_suspend(struct device *dev)
+{
+ struct spi_master *spi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(spi);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ spi_master_resume(spi);
+
+ return ret;
+}
+
+static int __maybe_unused spi_geni_resume(struct device *dev)
+{
+ struct spi_master *spi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ ret = spi_master_resume(spi);
+ if (ret)
+ pm_runtime_force_suspend(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops spi_geni_pm_ops = {
+ SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
+ spi_geni_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
+};
+
+static const struct of_device_id spi_geni_dt_match[] = {
+ { .compatible = "qcom,geni-spi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
+
+static struct platform_driver spi_geni_driver = {
+ .probe = spi_geni_probe,
+ .remove = spi_geni_remove,
+ .driver = {
+ .name = "geni_spi",
+ .pm = &spi_geni_pm_ops,
+ .of_match_table = spi_geni_dt_match,
+ },
+};
+module_platform_driver(spi_geni_driver);
+
+MODULE_DESCRIPTION("SPI driver for GENI based QUP cores");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 0626e6e3ea0c..45973ee3ae11 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -295,13 +295,15 @@ static int spi_gpio_request(struct device *dev,
spi_gpio->miso = devm_gpiod_get_optional(dev, "miso", GPIOD_IN);
if (IS_ERR(spi_gpio->miso))
return PTR_ERR(spi_gpio->miso);
- if (!spi_gpio->miso)
- /* HW configuration without MISO pin */
- *mflags |= SPI_MASTER_NO_RX;
+ /*
+ * No setting SPI_MASTER_NO_RX here - if there is only a MOSI
+ * pin connected the host can still do RX by changing the
+ * direction of the line.
+ */
spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
- if (IS_ERR(spi_gpio->mosi))
- return PTR_ERR(spi_gpio->mosi);
+ if (IS_ERR(spi_gpio->sck))
+ return PTR_ERR(spi_gpio->sck);
for (i = 0; i < num_chipselects; i++) {
spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs",
@@ -423,7 +425,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
spi_gpio->bitbang.set_line_direction = spi_gpio_set_direction;
- if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
+ if ((master_flags & SPI_MASTER_NO_TX) == 0) {
spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
@@ -447,10 +449,8 @@ static int spi_gpio_probe(struct platform_device *pdev)
static int spi_gpio_remove(struct platform_device *pdev)
{
struct spi_gpio *spi_gpio;
- struct spi_gpio_platform_data *pdata;
spi_gpio = platform_get_drvdata(pdev);
- pdata = dev_get_platdata(&pdev->dev);
/* stop() unregisters child devices too */
spi_bitbang_stop(&spi_gpio->bitbang);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 08dd3a31a3e5..dd1ce12aa386 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -63,6 +63,7 @@ struct spi_imx_devtype_data {
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
+ void (*setup_wml)(struct spi_imx_data *);
void (*disable)(struct spi_imx_data *);
bool has_dmamode;
bool has_slavemode;
@@ -216,7 +217,6 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- unsigned int bytes_per_word, i;
if (!master->dma_rx)
return false;
@@ -224,14 +224,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
if (spi_imx->slave_mode)
return false;
- bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
-
- for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
- if (!(transfer->len % (i * bytes_per_word)))
- break;
- }
+ if (transfer->len < spi_imx->devtype_data->fifo_size)
+ return false;
- spi_imx->wml = i;
spi_imx->dynamic_burst = 0;
return true;
@@ -583,18 +578,21 @@ static int mx51_ecspi_config(struct spi_device *spi)
else /* SCLK is _very_ slow */
usleep_range(delay, delay + 10);
+ return 0;
+}
+
+static void mx51_setup_wml(struct spi_imx_data *spi_imx)
+{
/*
* Configure the DMA register: setup the watermark
* and enable DMA request.
*/
- writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml) |
+ writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
-
- return 0;
}
static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
@@ -931,6 +929,7 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.trigger = mx51_ecspi_trigger,
.rx_available = mx51_ecspi_rx_available,
.reset = mx51_ecspi_reset,
+ .setup_wml = mx51_setup_wml,
.fifo_size = 64,
.has_dmamode = true,
.dynamic_burst = true,
@@ -1138,7 +1137,6 @@ static int spi_imx_setupxfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
- int ret;
if (!t)
return 0;
@@ -1179,12 +1177,6 @@ static int spi_imx_setupxfer(struct spi_device *spi,
else
spi_imx->usedma = 0;
- if (spi_imx->usedma) {
- ret = spi_imx_dma_configure(spi->master);
- if (ret)
- return ret;
- }
-
if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
spi_imx->rx = mx53_ecspi_rx_slave;
spi_imx->tx = mx53_ecspi_tx_slave;
@@ -1289,6 +1281,31 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
unsigned long timeout;
struct spi_master *master = spi_imx->bitbang.master;
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
+ struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
+ unsigned int bytes_per_word, i;
+ int ret;
+
+ /* Get the right burst length from the last sg to ensure no tail data */
+ bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
+ for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
+ if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
+ break;
+ }
+ /* Use 1 as wml in case no available burst length got */
+ if (i == 0)
+ i = 1;
+
+ spi_imx->wml = i;
+
+ ret = spi_imx_dma_configure(master);
+ if (ret)
+ return ret;
+
+ if (!spi_imx->devtype_data->setup_wml) {
+ dev_err(spi_imx->dev, "No setup_wml()?\n");
+ return -EINVAL;
+ }
+ spi_imx->devtype_data->setup_wml(spi_imx);
/*
* The TX DMA setup starts the transfer, so make sure RX is configured
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index e43842c7a31a..62a7b80801d2 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -12,6 +12,8 @@
#include "internals.h"
+#define SPI_MEM_MAX_BUSWIDTH 4
+
/**
* spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
* memory operation
@@ -149,6 +151,44 @@ static bool spi_mem_default_supports_op(struct spi_mem *mem,
}
EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
+static bool spi_mem_buswidth_is_valid(u8 buswidth)
+{
+ if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
+ return false;
+
+ return true;
+}
+
+static int spi_mem_check_op(const struct spi_mem_op *op)
+{
+ if (!op->cmd.buswidth)
+ return -EINVAL;
+
+ if ((op->addr.nbytes && !op->addr.buswidth) ||
+ (op->dummy.nbytes && !op->dummy.buswidth) ||
+ (op->data.nbytes && !op->data.buswidth))
+ return -EINVAL;
+
+ if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
+ !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
+ !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
+ !spi_mem_buswidth_is_valid(op->data.buswidth))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool spi_mem_internal_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+
+ if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
+ return ctlr->mem_ops->supports_op(mem, op);
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
/**
* spi_mem_supports_op() - Check if a memory device and the controller it is
* connected to support a specific memory operation
@@ -166,12 +206,10 @@ EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
*/
bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct spi_controller *ctlr = mem->spi->controller;
-
- if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
- return ctlr->mem_ops->supports_op(mem, op);
+ if (spi_mem_check_op(op))
+ return false;
- return spi_mem_default_supports_op(mem, op);
+ return spi_mem_internal_supports_op(mem, op);
}
EXPORT_SYMBOL_GPL(spi_mem_supports_op);
@@ -196,7 +234,11 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
u8 *tmpbuf;
int ret;
- if (!spi_mem_supports_op(mem, op))
+ ret = spi_mem_check_op(op);
+ if (ret)
+ return ret;
+
+ if (!spi_mem_internal_supports_op(mem, op))
return -ENOTSUPP;
if (ctlr->mem_ops) {
@@ -346,10 +388,25 @@ EXPORT_SYMBOL_GPL(spi_mem_get_name);
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct spi_controller *ctlr = mem->spi->controller;
+ size_t len;
+
+ len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
return ctlr->mem_ops->adjust_op_size(mem, op);
+ if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
+ if (len > spi_max_transfer_size(mem->spi))
+ return -EINVAL;
+
+ op->data.nbytes = min3((size_t)op->data.nbytes,
+ spi_max_transfer_size(mem->spi),
+ spi_max_message_size(mem->spi) -
+ len);
+ if (!op->data.nbytes)
+ return -EINVAL;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 86bf45667a04..3dc31627c655 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -98,6 +98,7 @@ struct mtk_spi {
struct clk *parent_clk, *sel_clk, *spi_clk;
struct spi_transfer *cur_transfer;
u32 xfer_len;
+ u32 num_xfered;
struct scatterlist *tx_sgl, *rx_sgl;
u32 tx_sgl_len, rx_sgl_len;
const struct mtk_spi_compatible *dev_comp;
@@ -385,6 +386,7 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
mdata->cur_transfer = xfer;
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
+ mdata->num_xfered = 0;
mtk_spi_prepare_transfer(master, xfer);
mtk_spi_setup_packet(master);
@@ -415,6 +417,7 @@ static int mtk_spi_dma_transfer(struct spi_master *master,
mdata->tx_sgl_len = 0;
mdata->rx_sgl_len = 0;
mdata->cur_transfer = xfer;
+ mdata->num_xfered = 0;
mtk_spi_prepare_transfer(master, xfer);
@@ -482,7 +485,7 @@ static int mtk_spi_setup(struct spi_device *spi)
static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
{
- u32 cmd, reg_val, cnt, remainder;
+ u32 cmd, reg_val, cnt, remainder, len;
struct spi_master *master = dev_id;
struct mtk_spi *mdata = spi_master_get_devdata(master);
struct spi_transfer *trans = mdata->cur_transfer;
@@ -497,36 +500,38 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
if (trans->rx_buf) {
cnt = mdata->xfer_len / 4;
ioread32_rep(mdata->base + SPI_RX_DATA_REG,
- trans->rx_buf, cnt);
+ trans->rx_buf + mdata->num_xfered, cnt);
remainder = mdata->xfer_len % 4;
if (remainder > 0) {
reg_val = readl(mdata->base + SPI_RX_DATA_REG);
- memcpy(trans->rx_buf + (cnt * 4),
- &reg_val, remainder);
+ memcpy(trans->rx_buf +
+ mdata->num_xfered +
+ (cnt * 4),
+ &reg_val,
+ remainder);
}
}
- trans->len -= mdata->xfer_len;
- if (!trans->len) {
+ mdata->num_xfered += mdata->xfer_len;
+ if (mdata->num_xfered == trans->len) {
spi_finalize_current_transfer(master);
return IRQ_HANDLED;
}
- if (trans->tx_buf)
- trans->tx_buf += mdata->xfer_len;
- if (trans->rx_buf)
- trans->rx_buf += mdata->xfer_len;
-
- mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, trans->len);
+ len = trans->len - mdata->num_xfered;
+ mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
mtk_spi_setup_packet(master);
- cnt = trans->len / 4;
- iowrite32_rep(mdata->base + SPI_TX_DATA_REG, trans->tx_buf, cnt);
+ cnt = len / 4;
+ iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
+ trans->tx_buf + mdata->num_xfered, cnt);
- remainder = trans->len % 4;
+ remainder = len % 4;
if (remainder > 0) {
reg_val = 0;
- memcpy(&reg_val, trans->tx_buf + (cnt * 4), remainder);
+ memcpy(&reg_val,
+ trans->tx_buf + (cnt * 4) + mdata->num_xfered,
+ remainder);
writel(reg_val, mdata->base + SPI_TX_DATA_REG);
}
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 508c61c669e7..f024c3fc3679 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -33,6 +33,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/gcd.h>
+#include <linux/iopoll.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
@@ -126,6 +127,7 @@ struct omap2_mcspi_regs {
};
struct omap2_mcspi {
+ struct completion txdone;
struct spi_master *master;
/* Virtual base address of the controller */
void __iomem *base;
@@ -135,6 +137,7 @@ struct omap2_mcspi {
struct device *dev;
struct omap2_mcspi_regs ctx;
int fifo_depth;
+ bool slave_aborted;
unsigned int pin_dir:1;
};
@@ -274,19 +277,23 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
}
}
-static void omap2_mcspi_set_master_mode(struct spi_master *master)
+static void omap2_mcspi_set_mode(struct spi_master *master)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
u32 l;
/*
- * Setup when switching from (reset default) slave mode
- * to single-channel master mode
+ * Choose master or slave mode
*/
l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
- l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
- l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+ l &= ~(OMAP2_MCSPI_MODULCTRL_STEST);
+ if (spi_controller_is_slave(master)) {
+ l |= (OMAP2_MCSPI_MODULCTRL_MS);
+ } else {
+ l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
+ l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+ }
mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
ctx->modulctrl = l;
@@ -299,7 +306,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
unsigned int wcnt;
- int max_fifo_depth, fifo_depth, bytes_per_word;
+ int max_fifo_depth, bytes_per_word;
u32 chconf, xferlevel;
mcspi = spi_master_get_devdata(master);
@@ -315,10 +322,6 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
else
max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
- fifo_depth = gcd(t->len, max_fifo_depth);
- if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
- goto disable_fifo;
-
wcnt = t->len / bytes_per_word;
if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
goto disable_fifo;
@@ -326,16 +329,17 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
xferlevel = wcnt << 16;
if (t->rx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFER;
- xferlevel |= (fifo_depth - 1) << 8;
+ xferlevel |= (bytes_per_word - 1) << 8;
}
+
if (t->tx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFET;
- xferlevel |= fifo_depth - 1;
+ xferlevel |= bytes_per_word - 1;
}
mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
mcspi_write_chconf0(spi, chconf);
- mcspi->fifo_depth = fifo_depth;
+ mcspi->fifo_depth = max_fifo_depth;
return;
}
@@ -353,18 +357,22 @@ disable_fifo:
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
{
- unsigned long timeout;
-
- timeout = jiffies + msecs_to_jiffies(1000);
- while (!(readl_relaxed(reg) & bit)) {
- if (time_after(jiffies, timeout)) {
- if (!(readl_relaxed(reg) & bit))
- return -ETIMEDOUT;
- else
- return 0;
- }
- cpu_relax();
+ u32 val;
+
+ return readl_poll_timeout(reg, val, val & bit, 1, MSEC_PER_SEC);
+}
+
+static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
+ struct completion *x)
+{
+ if (spi_controller_is_slave(mcspi->master)) {
+ if (wait_for_completion_interruptible(x) ||
+ mcspi->slave_aborted)
+ return -EINTR;
+ } else {
+ wait_for_completion(x);
}
+
return 0;
}
@@ -517,7 +525,12 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
dma_async_issue_pending(mcspi_dma->dma_rx);
omap2_mcspi_set_dma_req(spi, 1, 1);
- wait_for_completion(&mcspi_dma->dma_rx_completion);
+ ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
+ if (ret || mcspi->slave_aborted) {
+ dmaengine_terminate_sync(mcspi_dma->dma_rx);
+ omap2_mcspi_set_dma_req(spi, 1, 0);
+ return 0;
+ }
for (x = 0; x < nb_sizes; x++)
kfree(sg_out[x]);
@@ -585,7 +598,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
struct dma_slave_config cfg;
enum dma_slave_buswidth width;
unsigned es;
- u32 burst;
void __iomem *chstat_reg;
void __iomem *irqstat_reg;
int wait_res;
@@ -605,34 +617,49 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
}
count = xfer->len;
- burst = 1;
-
- if (mcspi->fifo_depth > 0) {
- if (count > mcspi->fifo_depth)
- burst = mcspi->fifo_depth / es;
- else
- burst = count / es;
- }
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
cfg.src_addr_width = width;
cfg.dst_addr_width = width;
- cfg.src_maxburst = burst;
- cfg.dst_maxburst = burst;
+ cfg.src_maxburst = es;
+ cfg.dst_maxburst = es;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
- if (tx != NULL)
+ mcspi->slave_aborted = false;
+ reinit_completion(&mcspi_dma->dma_tx_completion);
+ reinit_completion(&mcspi_dma->dma_rx_completion);
+ reinit_completion(&mcspi->txdone);
+ if (tx) {
+ /* Enable EOW IRQ to know end of tx in slave mode */
+ if (spi_controller_is_slave(spi->master))
+ mcspi_write_reg(spi->master,
+ OMAP2_MCSPI_IRQENABLE,
+ OMAP2_MCSPI_IRQSTATUS_EOW);
omap2_mcspi_tx_dma(spi, xfer, cfg);
+ }
if (rx != NULL)
count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
if (tx != NULL) {
- wait_for_completion(&mcspi_dma->dma_tx_completion);
+ int ret;
+
+ ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
+ if (ret || mcspi->slave_aborted) {
+ dmaengine_terminate_sync(mcspi_dma->dma_tx);
+ omap2_mcspi_set_dma_req(spi, 0, 0);
+ return 0;
+ }
+
+ if (spi_controller_is_slave(mcspi->master)) {
+ ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
+ if (ret || mcspi->slave_aborted)
+ return 0;
+ }
if (mcspi->fifo_depth > 0) {
irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@@ -1089,6 +1116,36 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
gpio_free(spi->cs_gpio);
}
+static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
+{
+ struct omap2_mcspi *mcspi = data;
+ u32 irqstat;
+
+ irqstat = mcspi_read_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS);
+ if (!irqstat)
+ return IRQ_NONE;
+
+ /* Disable IRQ and wakeup slave xfer task */
+ mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQENABLE, 0);
+ if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
+ complete(&mcspi->txdone);
+
+ return IRQ_HANDLED;
+}
+
+static int omap2_mcspi_slave_abort(struct spi_master *master)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
+
+ mcspi->slave_aborted = true;
+ complete(&mcspi_dma->dma_rx_completion);
+ complete(&mcspi_dma->dma_tx_completion);
+ complete(&mcspi->txdone);
+
+ return 0;
+}
+
static int omap2_mcspi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
@@ -1255,10 +1312,20 @@ static bool omap2_mcspi_can_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma =
+ &mcspi->dma_channels[spi->chip_select];
+
+ if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx)
+ return false;
+
+ if (spi_controller_is_slave(master))
+ return true;
+
return (xfer->len >= DMA_MIN_BYTES);
}
-static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
+static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
@@ -1275,7 +1342,7 @@ static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
OMAP2_MCSPI_WAKEUPENABLE_WKEN);
ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
- omap2_mcspi_set_master_mode(master);
+ omap2_mcspi_set_mode(master);
pm_runtime_mark_last_busy(mcspi->dev);
pm_runtime_put_autosuspend(mcspi->dev);
return 0;
@@ -1350,11 +1417,12 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *match;
- master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
- if (master == NULL) {
- dev_dbg(&pdev->dev, "master allocation failed\n");
+ if (of_property_read_bool(node, "spi-slave"))
+ master = spi_alloc_slave(&pdev->dev, sizeof(*mcspi));
+ else
+ master = spi_alloc_master(&pdev->dev, sizeof(*mcspi));
+ if (!master)
return -ENOMEM;
- }
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
@@ -1366,6 +1434,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->transfer_one = omap2_mcspi_transfer_one;
master->set_cs = omap2_mcspi_set_cs;
master->cleanup = omap2_mcspi_cleanup;
+ master->slave_abort = omap2_mcspi_slave_abort;
master->dev.of_node = node;
master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
@@ -1417,15 +1486,31 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
}
+ status = platform_get_irq(pdev, 0);
+ if (status == -EPROBE_DEFER)
+ goto free_master;
+ if (status < 0) {
+ dev_err(&pdev->dev, "no irq resource found\n");
+ goto free_master;
+ }
+ init_completion(&mcspi->txdone);
+ status = devm_request_irq(&pdev->dev, status,
+ omap2_mcspi_irq_handler, 0, pdev->name,
+ mcspi);
+ if (status) {
+ dev_err(&pdev->dev, "Cannot request IRQ");
+ goto free_master;
+ }
+
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_enable(&pdev->dev);
- status = omap2_mcspi_master_setup(mcspi);
+ status = omap2_mcspi_controller_setup(mcspi);
if (status < 0)
goto disable_pm;
- status = devm_spi_register_master(&pdev->dev, master);
+ status = devm_spi_register_controller(&pdev->dev, master);
if (status < 0)
goto disable_pm;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 47ef6b1a2e76..7f280567093e 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -431,6 +431,7 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
int word_len;
struct orion_spi *orion_spi;
int cs = spi->chip_select;
+ void __iomem *vaddr;
word_len = spi->bits_per_word;
count = xfer->len;
@@ -441,8 +442,9 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
* Use SPI direct write mode if base address is available. Otherwise
* fall back to PIO mode for this transfer.
*/
- if ((orion_spi->child[cs].direct_access.vaddr) && (xfer->tx_buf) &&
- (word_len == 8)) {
+ vaddr = orion_spi->child[cs].direct_access.vaddr;
+
+ if (vaddr && xfer->tx_buf && word_len == 8) {
unsigned int cnt = count / 4;
unsigned int rem = count % 4;
@@ -450,13 +452,11 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
* Send the TX-data to the SPI device via the direct
* mapped address window
*/
- iowrite32_rep(orion_spi->child[cs].direct_access.vaddr,
- xfer->tx_buf, cnt);
+ iowrite32_rep(vaddr, xfer->tx_buf, cnt);
if (rem) {
u32 *buf = (u32 *)xfer->tx_buf;
- iowrite8_rep(orion_spi->child[cs].direct_access.vaddr,
- &buf[cnt], rem);
+ iowrite8_rep(vaddr, &buf[cnt], rem);
}
return count;
@@ -683,6 +683,7 @@ static int orion_spi_probe(struct platform_device *pdev)
}
for_each_available_child_of_node(pdev->dev.of_node, np) {
+ struct orion_direct_acc *dir_acc;
u32 cs;
int cs_gpio;
@@ -750,14 +751,13 @@ static int orion_spi_probe(struct platform_device *pdev)
* This needs to get extended for the direct SPI-NOR / SPI-NAND
* support, once this gets implemented.
*/
- spi->child[cs].direct_access.vaddr = devm_ioremap(&pdev->dev,
- r->start,
- PAGE_SIZE);
- if (!spi->child[cs].direct_access.vaddr) {
+ dir_acc = &spi->child[cs].direct_access;
+ dir_acc->vaddr = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
+ if (!dir_acc->vaddr) {
status = -ENOMEM;
goto out_rel_axi_clk;
}
- spi->child[cs].direct_access.size = PAGE_SIZE;
+ dir_acc->size = PAGE_SIZE;
dev_info(&pdev->dev, "CS%d configured for direct access\n", cs);
}
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
index bd1c6b53283f..d7e4e18ec3df 100644
--- a/drivers/spi/spi-pic32-sqi.c
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -468,7 +468,7 @@ static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
/* allocate coherent DMAable memory for hardware buffer descriptors. */
sqi->bd = dma_zalloc_coherent(&sqi->master->dev,
sizeof(*bd) * PESQI_BD_COUNT,
- &sqi->bd_dma, GFP_DMA32);
+ &sqi->bd_dma, GFP_KERNEL);
if (!sqi->bd) {
dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
return -ENOMEM;
@@ -656,7 +656,7 @@ static int pic32_sqi_probe(struct platform_device *pdev)
master->max_speed_hz = clk_get_rate(sqi->base_clk);
master->dma_alignment = 32;
master->max_dma_len = PESQI_BD_BUF_LEN_MAX;
- master->dev.of_node = of_node_get(pdev->dev.of_node);
+ master->dev.of_node = pdev->dev.of_node;
master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
master->flags = SPI_MASTER_HALF_DUPLEX;
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index f8a45af1fa9f..131849adc570 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -320,7 +320,7 @@ static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
xfer->rx_sg.sgl,
xfer->rx_sg.nents,
- DMA_FROM_DEVICE,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
ret = -EINVAL;
@@ -330,7 +330,7 @@ static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
xfer->tx_sg.sgl,
xfer->tx_sg.nents,
- DMA_TO_DEVICE,
+ DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EINVAL;
@@ -774,7 +774,7 @@ static int pic32_spi_probe(struct platform_device *pdev)
if (ret)
goto err_master;
- master->dev.of_node = of_node_get(pdev->dev.of_node);
+ master->dev.of_node = pdev->dev.of_node;
master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_CS_HIGH;
master->num_chipselect = 1; /* single chip-select */
master->max_speed_hz = clk_get_rate(pic32s->clk);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 1af8c96b940e..6120e6abcd96 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1490,10 +1490,8 @@ static void do_polling_transfer(struct pl022 *pl022)
struct spi_message *message = NULL;
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
- struct chip_data *chip;
unsigned long time, timeout;
- chip = pl022->cur_chip;
message = pl022->cur_msg;
while (message->state != STATE_DONE) {
@@ -2325,10 +2323,8 @@ static int pl022_suspend(struct device *dev)
int ret;
ret = spi_master_suspend(pl022->master);
- if (ret) {
- dev_warn(dev, "cannot suspend master\n");
+ if (ret)
return ret;
- }
ret = pm_runtime_force_suspend(dev);
if (ret) {
@@ -2353,9 +2349,7 @@ static int pl022_resume(struct device *dev)
/* Start the queue running */
ret = spi_master_resume(pl022->master);
- if (ret)
- dev_err(dev, "problem starting queue (%d)\n", ret);
- else
+ if (!ret)
dev_dbg(dev, "resumed\n");
return ret;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 14f4ea59caff..612cc49db28f 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -33,6 +33,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/acpi.h>
+#include <linux/of_device.h>
#include "spi-pxa2xx.h"
@@ -665,9 +666,11 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
bytes_left = drv_data->rx_end - drv_data->rx;
switch (drv_data->n_bytes) {
case 4:
- bytes_left >>= 1;
+ bytes_left >>= 2;
+ break;
case 2:
bytes_left >>= 1;
+ break;
}
rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
@@ -1333,9 +1336,6 @@ static void cleanup(struct spi_device *spi)
kfree(chip);
}
-#ifdef CONFIG_PCI
-#ifdef CONFIG_ACPI
-
static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
{ "INT33C0", LPSS_LPT_SSP },
{ "INT33C1", LPSS_LPT_SSP },
@@ -1347,23 +1347,6 @@ static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
-static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
-{
- unsigned int devid;
- int port_id = -1;
-
- if (adev && adev->pnp.unique_id &&
- !kstrtouint(adev->pnp.unique_id, 0, &devid))
- port_id = devid;
- return port_id;
-}
-#else /* !CONFIG_ACPI */
-static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
-{
- return -1;
-}
-#endif
-
/*
* PCI IDs of compound devices that integrate both host controller and private
* integrated DMA engine. Please note these are not used in module
@@ -1410,6 +1393,37 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ },
};
+static const struct of_device_id pxa2xx_spi_of_match[] = {
+ { .compatible = "marvell,mmp2-ssp", .data = (void *)MMP2_SSP },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);
+
+#ifdef CONFIG_ACPI
+
+static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
+{
+ unsigned int devid;
+ int port_id = -1;
+
+ if (adev && adev->pnp.unique_id &&
+ !kstrtouint(adev->pnp.unique_id, 0, &devid))
+ port_id = devid;
+ return port_id;
+}
+
+#else /* !CONFIG_ACPI */
+
+static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
+{
+ return -1;
+}
+
+#endif /* CONFIG_ACPI */
+
+
+#ifdef CONFIG_PCI
+
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{
struct device *dev = param;
@@ -1420,6 +1434,8 @@ static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
return true;
}
+#endif /* CONFIG_PCI */
+
static struct pxa2xx_spi_master *
pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
@@ -1429,11 +1445,15 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
struct resource *res;
const struct acpi_device_id *adev_id = NULL;
const struct pci_device_id *pcidev_id = NULL;
- int type;
+ const struct of_device_id *of_id = NULL;
+ enum pxa_ssp_type type;
adev = ACPI_COMPANION(&pdev->dev);
- if (dev_is_pci(pdev->dev.parent))
+ if (pdev->dev.of_node)
+ of_id = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ else if (dev_is_pci(pdev->dev.parent))
pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match,
to_pci_dev(pdev->dev.parent));
else if (adev)
@@ -1443,9 +1463,11 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
return NULL;
if (adev_id)
- type = (int)adev_id->driver_data;
+ type = (enum pxa_ssp_type)adev_id->driver_data;
else if (pcidev_id)
- type = (int)pcidev_id->driver_data;
+ type = (enum pxa_ssp_type)pcidev_id->driver_data;
+ else if (of_id)
+ type = (enum pxa_ssp_type)of_id->data;
else
return NULL;
@@ -1464,11 +1486,13 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
if (IS_ERR(ssp->mmio_base))
return NULL;
+#ifdef CONFIG_PCI
if (pcidev_id) {
pdata->tx_param = pdev->dev.parent;
pdata->rx_param = pdev->dev.parent;
pdata->dma_filter = pxa2xx_spi_idma_filter;
}
+#endif
ssp->clk = devm_clk_get(&pdev->dev, NULL);
ssp->irq = platform_get_irq(pdev, 0);
@@ -1482,14 +1506,6 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
return pdata;
}
-#else /* !CONFIG_PCI */
-static inline struct pxa2xx_spi_master *
-pxa2xx_spi_init_pdata(struct platform_device *pdev)
-{
- return NULL;
-}
-#endif
-
static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master,
unsigned int cs)
{
@@ -1764,14 +1780,6 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
return 0;
}
-static void pxa2xx_spi_shutdown(struct platform_device *pdev)
-{
- int status = 0;
-
- if ((status = pxa2xx_spi_remove(pdev)) != 0)
- dev_err(&pdev->dev, "shutdown failed with %d\n", status);
-}
-
#ifdef CONFIG_PM_SLEEP
static int pxa2xx_spi_suspend(struct device *dev)
{
@@ -1808,13 +1816,7 @@ static int pxa2xx_spi_resume(struct device *dev)
lpss_ssp_setup(drv_data);
/* Start the queue running */
- status = spi_controller_resume(drv_data->master);
- if (status != 0) {
- dev_err(dev, "problem starting queue (%d)\n", status);
- return status;
- }
-
- return 0;
+ return spi_controller_resume(drv_data->master);
}
#endif
@@ -1848,10 +1850,10 @@ static struct platform_driver driver = {
.name = "pxa2xx-spi",
.pm = &pxa2xx_spi_pm_ops,
.acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
+ .of_match_table = of_match_ptr(pxa2xx_spi_of_match),
},
.probe = pxa2xx_spi_probe,
.remove = pxa2xx_spi_remove,
- .shutdown = pxa2xx_spi_shutdown,
};
static int __init pxa2xx_spi_init(void)
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
new file mode 100644
index 000000000000..b8163b40bb92
--- /dev/null
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+
+#define QSPI_NUM_CS 2
+#define QSPI_BYTES_PER_WORD 4
+
+#define MSTR_CONFIG 0x0000
+#define FULL_CYCLE_MODE BIT(3)
+#define FB_CLK_EN BIT(4)
+#define PIN_HOLDN BIT(6)
+#define PIN_WPN BIT(7)
+#define DMA_ENABLE BIT(8)
+#define BIG_ENDIAN_MODE BIT(9)
+#define SPI_MODE_MSK 0xc00
+#define SPI_MODE_SHFT 10
+#define CHIP_SELECT_NUM BIT(12)
+#define SBL_EN BIT(13)
+#define LPA_BASE_MSK 0x3c000
+#define LPA_BASE_SHFT 14
+#define TX_DATA_DELAY_MSK 0xc0000
+#define TX_DATA_DELAY_SHFT 18
+#define TX_CLK_DELAY_MSK 0x300000
+#define TX_CLK_DELAY_SHFT 20
+#define TX_CS_N_DELAY_MSK 0xc00000
+#define TX_CS_N_DELAY_SHFT 22
+#define TX_DATA_OE_DELAY_MSK 0x3000000
+#define TX_DATA_OE_DELAY_SHFT 24
+
+#define AHB_MASTER_CFG 0x0004
+#define HMEM_TYPE_START_MID_TRANS_MSK 0x7
+#define HMEM_TYPE_START_MID_TRANS_SHFT 0
+#define HMEM_TYPE_LAST_TRANS_MSK 0x38
+#define HMEM_TYPE_LAST_TRANS_SHFT 3
+#define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_MSK 0xc0
+#define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_SHFT 6
+#define HMEMTYPE_READ_TRANS_MSK 0x700
+#define HMEMTYPE_READ_TRANS_SHFT 8
+#define HSHARED BIT(11)
+#define HINNERSHARED BIT(12)
+
+#define MSTR_INT_EN 0x000C
+#define MSTR_INT_STATUS 0x0010
+#define RESP_FIFO_UNDERRUN BIT(0)
+#define RESP_FIFO_NOT_EMPTY BIT(1)
+#define RESP_FIFO_RDY BIT(2)
+#define HRESP_FROM_NOC_ERR BIT(3)
+#define WR_FIFO_EMPTY BIT(9)
+#define WR_FIFO_FULL BIT(10)
+#define WR_FIFO_OVERRUN BIT(11)
+#define TRANSACTION_DONE BIT(16)
+#define QSPI_ERR_IRQS (RESP_FIFO_UNDERRUN | HRESP_FROM_NOC_ERR | \
+ WR_FIFO_OVERRUN)
+#define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \
+ WR_FIFO_EMPTY | WR_FIFO_FULL | \
+ TRANSACTION_DONE)
+
+#define PIO_XFER_CTRL 0x0014
+#define REQUEST_COUNT_MSK 0xffff
+
+#define PIO_XFER_CFG 0x0018
+#define TRANSFER_DIRECTION BIT(0)
+#define MULTI_IO_MODE_MSK 0xe
+#define MULTI_IO_MODE_SHFT 1
+#define TRANSFER_FRAGMENT BIT(8)
+#define SDR_1BIT 1
+#define SDR_2BIT 2
+#define SDR_4BIT 3
+#define DDR_1BIT 5
+#define DDR_2BIT 6
+#define DDR_4BIT 7
+#define DMA_DESC_SINGLE_SPI 1
+#define DMA_DESC_DUAL_SPI 2
+#define DMA_DESC_QUAD_SPI 3
+
+#define PIO_XFER_STATUS 0x001c
+#define WR_FIFO_BYTES_MSK 0xffff0000
+#define WR_FIFO_BYTES_SHFT 16
+
+#define PIO_DATAOUT_1B 0x0020
+#define PIO_DATAOUT_4B 0x0024
+
+#define RD_FIFO_STATUS 0x002c
+#define FIFO_EMPTY BIT(11)
+#define WR_CNTS_MSK 0x7f0
+#define WR_CNTS_SHFT 4
+#define RDY_64BYTE BIT(3)
+#define RDY_32BYTE BIT(2)
+#define RDY_16BYTE BIT(1)
+#define FIFO_RDY BIT(0)
+
+#define RD_FIFO_CFG 0x0028
+#define CONTINUOUS_MODE BIT(0)
+
+#define RD_FIFO_RESET 0x0030
+#define RESET_FIFO BIT(0)
+
+#define CUR_MEM_ADDR 0x0048
+#define HW_VERSION 0x004c
+#define RD_FIFO 0x0050
+#define SAMPLING_CLK_CFG 0x0090
+#define SAMPLING_CLK_STATUS 0x0094
+
+
+enum qspi_dir {
+ QSPI_READ,
+ QSPI_WRITE,
+};
+
+struct qspi_xfer {
+ union {
+ const void *tx_buf;
+ void *rx_buf;
+ };
+ unsigned int rem_bytes;
+ unsigned int buswidth;
+ enum qspi_dir dir;
+ bool is_last;
+};
+
+enum qspi_clocks {
+ QSPI_CLK_CORE,
+ QSPI_CLK_IFACE,
+ QSPI_NUM_CLKS
+};
+
+struct qcom_qspi {
+ void __iomem *base;
+ struct device *dev;
+ struct clk_bulk_data clks[QSPI_NUM_CLKS];
+ struct qspi_xfer xfer;
+ /* Lock to protect data accessed by IRQs */
+ spinlock_t lock;
+};
+
+static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl,
+ unsigned int buswidth)
+{
+ switch (buswidth) {
+ case 1:
+ return SDR_1BIT << MULTI_IO_MODE_SHFT;
+ case 2:
+ return SDR_2BIT << MULTI_IO_MODE_SHFT;
+ case 4:
+ return SDR_4BIT << MULTI_IO_MODE_SHFT;
+ default:
+ dev_warn_once(ctrl->dev,
+ "Unexpected bus width: %u\n", buswidth);
+ return SDR_1BIT << MULTI_IO_MODE_SHFT;
+ }
+}
+
+static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl)
+{
+ u32 pio_xfer_cfg;
+ const struct qspi_xfer *xfer;
+
+ xfer = &ctrl->xfer;
+ pio_xfer_cfg = readl(ctrl->base + PIO_XFER_CFG);
+ pio_xfer_cfg &= ~TRANSFER_DIRECTION;
+ pio_xfer_cfg |= xfer->dir;
+ if (xfer->is_last)
+ pio_xfer_cfg &= ~TRANSFER_FRAGMENT;
+ else
+ pio_xfer_cfg |= TRANSFER_FRAGMENT;
+ pio_xfer_cfg &= ~MULTI_IO_MODE_MSK;
+ pio_xfer_cfg |= qspi_buswidth_to_iomode(ctrl, xfer->buswidth);
+
+ writel(pio_xfer_cfg, ctrl->base + PIO_XFER_CFG);
+}
+
+static void qcom_qspi_pio_xfer_ctrl(struct qcom_qspi *ctrl)
+{
+ u32 pio_xfer_ctrl;
+
+ pio_xfer_ctrl = readl(ctrl->base + PIO_XFER_CTRL);
+ pio_xfer_ctrl &= ~REQUEST_COUNT_MSK;
+ pio_xfer_ctrl |= ctrl->xfer.rem_bytes;
+ writel(pio_xfer_ctrl, ctrl->base + PIO_XFER_CTRL);
+}
+
+static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl)
+{
+ u32 ints;
+
+ qcom_qspi_pio_xfer_cfg(ctrl);
+
+ /* Ack any previous interrupts that might be hanging around */
+ writel(QSPI_ALL_IRQS, ctrl->base + MSTR_INT_STATUS);
+
+ /* Setup new interrupts */
+ if (ctrl->xfer.dir == QSPI_WRITE)
+ ints = QSPI_ERR_IRQS | WR_FIFO_EMPTY;
+ else
+ ints = QSPI_ERR_IRQS | RESP_FIFO_RDY;
+ writel(ints, ctrl->base + MSTR_INT_EN);
+
+ /* Kick off the transfer */
+ qcom_qspi_pio_xfer_ctrl(ctrl);
+}
+
+static void qcom_qspi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ writel(0, ctrl->base + MSTR_INT_EN);
+ ctrl->xfer.rem_bytes = 0;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+}
+
+static int qcom_qspi_transfer_one(struct spi_master *master,
+ struct spi_device *slv,
+ struct spi_transfer *xfer)
+{
+ struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ int ret;
+ unsigned long speed_hz;
+ unsigned long flags;
+
+ speed_hz = slv->max_speed_hz;
+ if (xfer->speed_hz)
+ speed_hz = xfer->speed_hz;
+
+ /* In regular operation (SBL_EN=1) core must be 4x transfer clock */
+ ret = clk_set_rate(ctrl->clks[QSPI_CLK_CORE].clk, speed_hz * 4);
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to set core clk %d\n", ret);
+ return ret;
+ }
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+ /* We are half duplex, so either rx or tx will be set */
+ if (xfer->rx_buf) {
+ ctrl->xfer.dir = QSPI_READ;
+ ctrl->xfer.buswidth = xfer->rx_nbits;
+ ctrl->xfer.rx_buf = xfer->rx_buf;
+ } else {
+ ctrl->xfer.dir = QSPI_WRITE;
+ ctrl->xfer.buswidth = xfer->tx_nbits;
+ ctrl->xfer.tx_buf = xfer->tx_buf;
+ }
+ ctrl->xfer.is_last = list_is_last(&xfer->transfer_list,
+ &master->cur_msg->transfers);
+ ctrl->xfer.rem_bytes = xfer->len;
+ qcom_qspi_pio_xfer(ctrl);
+
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ /* We'll call spi_finalize_current_transfer() when done */
+ return 1;
+}
+
+static int qcom_qspi_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ u32 mstr_cfg;
+ struct qcom_qspi *ctrl;
+ int tx_data_oe_delay = 1;
+ int tx_data_delay = 1;
+ unsigned long flags;
+
+ ctrl = spi_master_get_devdata(master);
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+ mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
+ mstr_cfg &= ~CHIP_SELECT_NUM;
+ if (message->spi->chip_select)
+ mstr_cfg |= CHIP_SELECT_NUM;
+
+ mstr_cfg |= FB_CLK_EN | PIN_WPN | PIN_HOLDN | SBL_EN | FULL_CYCLE_MODE;
+ mstr_cfg &= ~(SPI_MODE_MSK | TX_DATA_OE_DELAY_MSK | TX_DATA_DELAY_MSK);
+ mstr_cfg |= message->spi->mode << SPI_MODE_SHFT;
+ mstr_cfg |= tx_data_oe_delay << TX_DATA_OE_DELAY_SHFT;
+ mstr_cfg |= tx_data_delay << TX_DATA_DELAY_SHFT;
+ mstr_cfg &= ~DMA_ENABLE;
+
+ writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
+ return 0;
+}
+
+static irqreturn_t pio_read(struct qcom_qspi *ctrl)
+{
+ u32 rd_fifo_status;
+ u32 rd_fifo;
+ unsigned int wr_cnts;
+ unsigned int bytes_to_read;
+ unsigned int words_to_read;
+ u32 *word_buf;
+ u8 *byte_buf;
+ int i;
+
+ rd_fifo_status = readl(ctrl->base + RD_FIFO_STATUS);
+
+ if (!(rd_fifo_status & FIFO_RDY)) {
+ dev_dbg(ctrl->dev, "Spurious IRQ %#x\n", rd_fifo_status);
+ return IRQ_NONE;
+ }
+
+ wr_cnts = (rd_fifo_status & WR_CNTS_MSK) >> WR_CNTS_SHFT;
+ wr_cnts = min(wr_cnts, ctrl->xfer.rem_bytes);
+
+ words_to_read = wr_cnts / QSPI_BYTES_PER_WORD;
+ bytes_to_read = wr_cnts % QSPI_BYTES_PER_WORD;
+
+ if (words_to_read) {
+ word_buf = ctrl->xfer.rx_buf;
+ ctrl->xfer.rem_bytes -= words_to_read * QSPI_BYTES_PER_WORD;
+ ioread32_rep(ctrl->base + RD_FIFO, word_buf, words_to_read);
+ ctrl->xfer.rx_buf = word_buf + words_to_read;
+ }
+
+ if (bytes_to_read) {
+ byte_buf = ctrl->xfer.rx_buf;
+ rd_fifo = readl(ctrl->base + RD_FIFO);
+ ctrl->xfer.rem_bytes -= bytes_to_read;
+ for (i = 0; i < bytes_to_read; i++)
+ *byte_buf++ = rd_fifo >> (i * BITS_PER_BYTE);
+ ctrl->xfer.rx_buf = byte_buf;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t pio_write(struct qcom_qspi *ctrl)
+{
+ const void *xfer_buf = ctrl->xfer.tx_buf;
+ const int *word_buf;
+ const char *byte_buf;
+ unsigned int wr_fifo_bytes;
+ unsigned int wr_fifo_words;
+ unsigned int wr_size;
+ unsigned int rem_words;
+
+ wr_fifo_bytes = readl(ctrl->base + PIO_XFER_STATUS);
+ wr_fifo_bytes >>= WR_FIFO_BYTES_SHFT;
+
+ if (ctrl->xfer.rem_bytes < QSPI_BYTES_PER_WORD) {
+ /* Process the last 1-3 bytes */
+ wr_size = min(wr_fifo_bytes, ctrl->xfer.rem_bytes);
+ ctrl->xfer.rem_bytes -= wr_size;
+
+ byte_buf = xfer_buf;
+ while (wr_size--)
+ writel(*byte_buf++,
+ ctrl->base + PIO_DATAOUT_1B);
+ ctrl->xfer.tx_buf = byte_buf;
+ } else {
+ /*
+ * Process all the whole words; to keep things simple we'll
+ * just wait for the next interrupt to handle the last 1-3
+ * bytes if we don't have an even number of words.
+ */
+ rem_words = ctrl->xfer.rem_bytes / QSPI_BYTES_PER_WORD;
+ wr_fifo_words = wr_fifo_bytes / QSPI_BYTES_PER_WORD;
+
+ wr_size = min(rem_words, wr_fifo_words);
+ ctrl->xfer.rem_bytes -= wr_size * QSPI_BYTES_PER_WORD;
+
+ word_buf = xfer_buf;
+ iowrite32_rep(ctrl->base + PIO_DATAOUT_4B, word_buf, wr_size);
+ ctrl->xfer.tx_buf = word_buf + wr_size;
+
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
+{
+ u32 int_status;
+ struct qcom_qspi *ctrl = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+
+ int_status = readl(ctrl->base + MSTR_INT_STATUS);
+ writel(int_status, ctrl->base + MSTR_INT_STATUS);
+
+ if (ctrl->xfer.dir == QSPI_WRITE) {
+ if (int_status & WR_FIFO_EMPTY)
+ ret = pio_write(ctrl);
+ } else {
+ if (int_status & RESP_FIFO_RDY)
+ ret = pio_read(ctrl);
+ }
+
+ if (int_status & QSPI_ERR_IRQS) {
+ if (int_status & RESP_FIFO_UNDERRUN)
+ dev_err(ctrl->dev, "IRQ error: FIFO underrun\n");
+ if (int_status & WR_FIFO_OVERRUN)
+ dev_err(ctrl->dev, "IRQ error: FIFO overrun\n");
+ if (int_status & HRESP_FROM_NOC_ERR)
+ dev_err(ctrl->dev, "IRQ error: NOC response error\n");
+ ret = IRQ_HANDLED;
+ }
+
+ if (!ctrl->xfer.rem_bytes) {
+ writel(0, ctrl->base + MSTR_INT_EN);
+ spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
+ }
+
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ return ret;
+}
+
+static int qcom_qspi_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev;
+ struct resource *res;
+ struct spi_master *master;
+ struct qcom_qspi *ctrl;
+
+ dev = &pdev->dev;
+
+ master = spi_alloc_master(dev, sizeof(*ctrl));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ ctrl = spi_master_get_devdata(master);
+
+ spin_lock_init(&ctrl->lock);
+ ctrl->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctrl->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->base)) {
+ ret = PTR_ERR(ctrl->base);
+ goto exit_probe_master_put;
+ }
+
+ ctrl->clks[QSPI_CLK_CORE].id = "core";
+ ctrl->clks[QSPI_CLK_IFACE].id = "iface";
+ ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
+ if (ret)
+ goto exit_probe_master_put;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get irq %d\n", ret);
+ goto exit_probe_master_put;
+ }
+ ret = devm_request_irq(dev, ret, qcom_qspi_irq,
+ IRQF_TRIGGER_HIGH, dev_name(dev), ctrl);
+ if (ret) {
+ dev_err(dev, "Failed to request irq %d\n", ret);
+ goto exit_probe_master_put;
+ }
+
+ master->max_speed_hz = 300000000;
+ master->num_chipselect = QSPI_NUM_CS;
+ master->bus_num = -1;
+ master->dev.of_node = pdev->dev.of_node;
+ master->mode_bits = SPI_MODE_0 |
+ SPI_TX_DUAL | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_RX_QUAD;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->prepare_message = qcom_qspi_prepare_message;
+ master->transfer_one = qcom_qspi_transfer_one;
+ master->handle_err = qcom_qspi_handle_err;
+ master->auto_runtime_pm = true;
+
+ pm_runtime_enable(dev);
+
+ ret = spi_register_master(master);
+ if (!ret)
+ return 0;
+
+ pm_runtime_disable(dev);
+
+exit_probe_master_put:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int qcom_qspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+
+ /* Unregister _before_ disabling pm_runtime() so we stop transfers */
+ spi_unregister_master(master);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+
+ clk_bulk_disable_unprepare(QSPI_NUM_CLKS, ctrl->clks);
+
+ return 0;
+}
+
+static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+
+ return clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
+}
+
+static int __maybe_unused qcom_qspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ spi_master_resume(master);
+
+ return ret;
+}
+
+static int __maybe_unused qcom_qspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ ret = spi_master_resume(master);
+ if (ret)
+ pm_runtime_force_suspend(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops qcom_qspi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(qcom_qspi_runtime_suspend,
+ qcom_qspi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(qcom_qspi_suspend, qcom_qspi_resume)
+};
+
+static const struct of_device_id qcom_qspi_dt_match[] = {
+ { .compatible = "qcom,qspi-v1", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_qspi_dt_match);
+
+static struct platform_driver qcom_qspi_driver = {
+ .driver = {
+ .name = "qcom_qspi",
+ .pm = &qcom_qspi_dev_pm_ops,
+ .of_match_table = qcom_qspi_dt_match,
+ },
+ .probe = qcom_qspi_probe,
+ .remove = qcom_qspi_remove,
+};
+module_platform_driver(qcom_qspi_driver);
+
+MODULE_DESCRIPTION("SPI driver for QSPI cores");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c
index 3641d0e20135..fbbf9a188247 100644
--- a/drivers/spi/spi-rb4xx.c
+++ b/drivers/spi/spi-rb4xx.c
@@ -159,7 +159,7 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
master->bus_num = 0;
master->num_chipselect = 3;
master->mode_bits = SPI_TX_DUAL;
- master->bits_per_word_mask = BIT(7);
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
master->flags = SPI_MASTER_MUST_TX;
master->transfer_one = rb4xx_transfer_one;
master->set_cs = rb4xx_set_cs;
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index fdcf3076681b..51ef632bca52 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -164,7 +164,6 @@ enum rockchip_ssi_type {
struct rockchip_spi_dma_data {
struct dma_chan *ch;
- enum dma_transfer_direction direction;
dma_addr_t addr;
};
@@ -202,12 +201,11 @@ struct rockchip_spi {
bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
- u32 use_dma;
+ bool use_dma;
struct sg_table tx_sg;
struct sg_table rx_sg;
struct rockchip_spi_dma_data dma_rx;
struct rockchip_spi_dma_data dma_tx;
- struct dma_slave_caps dma_caps;
};
static inline void spi_enable_chip(struct rockchip_spi *rs, int enable)
@@ -381,6 +379,8 @@ static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
{
int remain = 0;
+ spi_enable_chip(rs, 1);
+
do {
if (rs->tx) {
remain = rs->tx_end - rs->tx;
@@ -445,6 +445,9 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
struct dma_slave_config rxconf, txconf;
struct dma_async_tx_descriptor *rxdesc, *txdesc;
+ memset(&rxconf, 0, sizeof(rxconf));
+ memset(&txconf, 0, sizeof(txconf));
+
spin_lock_irqsave(&rs->lock, flags);
rs->state &= ~RXBUSY;
rs->state &= ~TXBUSY;
@@ -452,19 +455,16 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
rxdesc = NULL;
if (rs->rx) {
- rxconf.direction = rs->dma_rx.direction;
+ rxconf.direction = DMA_DEV_TO_MEM;
rxconf.src_addr = rs->dma_rx.addr;
rxconf.src_addr_width = rs->n_bytes;
- if (rs->dma_caps.max_burst > 4)
- rxconf.src_maxburst = 4;
- else
- rxconf.src_maxburst = 1;
+ rxconf.src_maxburst = 1;
dmaengine_slave_config(rs->dma_rx.ch, &rxconf);
rxdesc = dmaengine_prep_slave_sg(
rs->dma_rx.ch,
rs->rx_sg.sgl, rs->rx_sg.nents,
- rs->dma_rx.direction, DMA_PREP_INTERRUPT);
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (!rxdesc)
return -EINVAL;
@@ -474,19 +474,16 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
txdesc = NULL;
if (rs->tx) {
- txconf.direction = rs->dma_tx.direction;
+ txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = rs->dma_tx.addr;
txconf.dst_addr_width = rs->n_bytes;
- if (rs->dma_caps.max_burst > 4)
- txconf.dst_maxburst = 4;
- else
- txconf.dst_maxburst = 1;
+ txconf.dst_maxburst = rs->fifo_len / 2;
dmaengine_slave_config(rs->dma_tx.ch, &txconf);
txdesc = dmaengine_prep_slave_sg(
rs->dma_tx.ch,
rs->tx_sg.sgl, rs->tx_sg.nents,
- rs->dma_tx.direction, DMA_PREP_INTERRUPT);
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!txdesc) {
if (rxdesc)
dmaengine_terminate_sync(rs->dma_rx.ch);
@@ -506,6 +503,8 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
dma_async_issue_pending(rs->dma_rx.ch);
}
+ spi_enable_chip(rs, 1);
+
if (txdesc) {
spin_lock_irqsave(&rs->lock, flags);
rs->state |= TXBUSY;
@@ -514,7 +513,8 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
dma_async_issue_pending(rs->dma_tx.ch);
}
- return 0;
+ /* 1 means the transfer is in progress */
+ return 1;
}
static void rockchip_spi_config(struct rockchip_spi *rs)
@@ -578,7 +578,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR);
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
- writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR);
+ writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
@@ -597,7 +597,6 @@ static int rockchip_spi_transfer_one(
struct spi_device *spi,
struct spi_transfer *xfer)
{
- int ret = 0;
struct rockchip_spi *rs = spi_master_get_devdata(master);
WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
@@ -635,30 +634,16 @@ static int rockchip_spi_transfer_one(
/* we need prepare dma before spi was enabled */
if (master->can_dma && master->can_dma(master, spi, xfer))
- rs->use_dma = 1;
+ rs->use_dma = true;
else
- rs->use_dma = 0;
+ rs->use_dma = false;
rockchip_spi_config(rs);
- if (rs->use_dma) {
- if (rs->tmode == CR0_XFM_RO) {
- /* rx: dma must be prepared first */
- ret = rockchip_spi_prepare_dma(rs);
- spi_enable_chip(rs, 1);
- } else {
- /* tx or tr: spi must be enabled first */
- spi_enable_chip(rs, 1);
- ret = rockchip_spi_prepare_dma(rs);
- }
- /* successful DMA prepare means the transfer is in progress */
- ret = ret ? ret : 1;
- } else {
- spi_enable_chip(rs, 1);
- ret = rockchip_spi_pio_transfer(rs);
- }
+ if (rs->use_dma)
+ return rockchip_spi_prepare_dma(rs);
- return ret;
+ return rockchip_spi_pio_transfer(rs);
}
static bool rockchip_spi_can_dma(struct spi_master *master,
@@ -780,11 +765,8 @@ static int rockchip_spi_probe(struct platform_device *pdev)
}
if (rs->dma_tx.ch && rs->dma_rx.ch) {
- dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps));
rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
- rs->dma_tx.direction = DMA_MEM_TO_DEV;
- rs->dma_rx.direction = DMA_DEV_TO_MEM;
master->can_dma = rockchip_spi_can_dma;
master->dma_tx = rs->dma_tx.ch;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 95dc4d78618d..55f8e55327b3 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH RSPI driver
*
@@ -6,15 +7,6 @@
*
* Based on spi-sh.c:
* Copyright (C) 2011 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
@@ -598,11 +590,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
- if (ret > 0 && rspi->dma_callbacked)
+ if (ret > 0 && rspi->dma_callbacked) {
ret = 0;
- else if (!ret) {
- dev_err(&rspi->master->dev, "DMA timeout\n");
- ret = -ETIMEDOUT;
+ } else {
+ if (!ret) {
+ dev_err(&rspi->master->dev, "DMA timeout\n");
+ ret = -ETIMEDOUT;
+ }
if (tx)
dmaengine_terminate_all(rspi->master->dma_tx);
if (rx)
@@ -1350,12 +1344,36 @@ static const struct platform_device_id spi_driver_ids[] = {
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+ return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+ return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS &rspi_pm_ops
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver rspi_driver = {
.probe = rspi_probe,
.remove = rspi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "renesas_spi",
+ .pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(rspi_of_match),
},
};
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 20e800e70442..dc0926e43665 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH HSPI bus driver
*
@@ -7,15 +8,6 @@
* Based on pxa2xx_spi.c:
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -316,6 +308,6 @@ static struct platform_driver hspi_driver = {
module_platform_driver(hspi_driver);
MODULE_DESCRIPTION("SuperH HSPI bus driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
MODULE_ALIAS("platform:sh-hspi");
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 539d6d1a277a..adf384323934 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH MSIOF SPI Master Interface
*
* Copyright (c) 2009 Magnus Damm
* Copyright (C) 2014 Renesas Electronics Corporation
* Copyright (C) 2014-2017 Glider bvba
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/bitmap.h>
@@ -397,7 +393,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
{
- sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+ sh_msiof_write(p, STR,
+ sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
}
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1342,8 +1339,8 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
i = platform_get_irq(pdev, 0);
if (i < 0) {
- dev_err(&pdev->dev, "cannot get platform IRQ\n");
- ret = -ENOENT;
+ dev_err(&pdev->dev, "cannot get IRQ\n");
+ ret = i;
goto err1;
}
@@ -1426,12 +1423,37 @@ static const struct platform_device_id spi_driver_ids[] = {
};
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+ return spi_master_suspend(p->master);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+ return spi_master_resume(p->master);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+ sh_msiof_spi_resume);
+#define DEV_PM_OPS &sh_msiof_spi_pm_ops
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver sh_msiof_spi_drv = {
.probe = sh_msiof_spi_probe,
.remove = sh_msiof_spi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "spi_sh_msiof",
+ .pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(sh_msiof_match),
},
};
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 50e0ea9acf8b..f1ee58208216 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SH SPI bus driver
*
@@ -5,15 +6,6 @@
*
* Based on pxa2xx_spi.c:
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
@@ -522,6 +514,6 @@ static struct platform_driver spi_sh_driver = {
module_platform_driver(spi_sh_driver);
MODULE_DESCRIPTION("SH SPI bus driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_ALIAS("platform:sh_spi");
diff --git a/drivers/spi/spi-slave-mt27xx.c b/drivers/spi/spi-slave-mt27xx.c
new file mode 100644
index 000000000000..d1075433f6a6
--- /dev/null
+++ b/drivers/spi/spi-slave-mt27xx.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2018 MediaTek Inc.
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#define SPIS_IRQ_EN_REG 0x0
+#define SPIS_IRQ_CLR_REG 0x4
+#define SPIS_IRQ_ST_REG 0x8
+#define SPIS_IRQ_MASK_REG 0xc
+#define SPIS_CFG_REG 0x10
+#define SPIS_RX_DATA_REG 0x14
+#define SPIS_TX_DATA_REG 0x18
+#define SPIS_RX_DST_REG 0x1c
+#define SPIS_TX_SRC_REG 0x20
+#define SPIS_DMA_CFG_REG 0x30
+#define SPIS_SOFT_RST_REG 0x40
+
+/* SPIS_IRQ_EN_REG */
+#define DMA_DONE_EN BIT(7)
+#define DATA_DONE_EN BIT(2)
+#define RSTA_DONE_EN BIT(1)
+#define CMD_INVALID_EN BIT(0)
+
+/* SPIS_IRQ_ST_REG */
+#define DMA_DONE_ST BIT(7)
+#define DATA_DONE_ST BIT(2)
+#define RSTA_DONE_ST BIT(1)
+#define CMD_INVALID_ST BIT(0)
+
+/* SPIS_IRQ_MASK_REG */
+#define DMA_DONE_MASK BIT(7)
+#define DATA_DONE_MASK BIT(2)
+#define RSTA_DONE_MASK BIT(1)
+#define CMD_INVALID_MASK BIT(0)
+
+/* SPIS_CFG_REG */
+#define SPIS_TX_ENDIAN BIT(7)
+#define SPIS_RX_ENDIAN BIT(6)
+#define SPIS_TXMSBF BIT(5)
+#define SPIS_RXMSBF BIT(4)
+#define SPIS_CPHA BIT(3)
+#define SPIS_CPOL BIT(2)
+#define SPIS_TX_EN BIT(1)
+#define SPIS_RX_EN BIT(0)
+
+/* SPIS_DMA_CFG_REG */
+#define TX_DMA_TRIG_EN BIT(31)
+#define TX_DMA_EN BIT(30)
+#define RX_DMA_EN BIT(29)
+#define TX_DMA_LEN 0xfffff
+
+/* SPIS_SOFT_RST_REG */
+#define SPIS_DMA_ADDR_EN BIT(1)
+#define SPIS_SOFT_RST BIT(0)
+
+#define MTK_SPI_SLAVE_MAX_FIFO_SIZE 512U
+
+struct mtk_spi_slave {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *spi_clk;
+ struct completion xfer_done;
+ struct spi_transfer *cur_transfer;
+ bool slave_aborted;
+};
+
+static const struct of_device_id mtk_spi_slave_of_match[] = {
+ { .compatible = "mediatek,mt2712-spi-slave", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mtk_spi_slave_of_match);
+
+static void mtk_spi_slave_disable_dma(struct mtk_spi_slave *mdata)
+{
+ u32 reg_val;
+
+ reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
+ reg_val &= ~RX_DMA_EN;
+ reg_val &= ~TX_DMA_EN;
+ writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
+}
+
+static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata)
+{
+ u32 reg_val;
+
+ reg_val = readl(mdata->base + SPIS_CFG_REG);
+ reg_val &= ~SPIS_TX_EN;
+ reg_val &= ~SPIS_RX_EN;
+ writel(reg_val, mdata->base + SPIS_CFG_REG);
+}
+
+static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata)
+{
+ if (wait_for_completion_interruptible(&mdata->xfer_done) ||
+ mdata->slave_aborted) {
+ dev_err(mdata->dev, "interrupted\n");
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+static int mtk_spi_slave_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *msg)
+{
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ struct spi_device *spi = msg->spi;
+ bool cpha, cpol;
+ u32 reg_val;
+
+ cpha = spi->mode & SPI_CPHA ? 1 : 0;
+ cpol = spi->mode & SPI_CPOL ? 1 : 0;
+
+ reg_val = readl(mdata->base + SPIS_CFG_REG);
+ if (cpha)
+ reg_val |= SPIS_CPHA;
+ else
+ reg_val &= ~SPIS_CPHA;
+ if (cpol)
+ reg_val |= SPIS_CPOL;
+ else
+ reg_val &= ~SPIS_CPOL;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ reg_val &= ~(SPIS_TXMSBF | SPIS_RXMSBF);
+ else
+ reg_val |= SPIS_TXMSBF | SPIS_RXMSBF;
+
+ reg_val &= ~SPIS_TX_ENDIAN;
+ reg_val &= ~SPIS_RX_ENDIAN;
+ writel(reg_val, mdata->base + SPIS_CFG_REG);
+
+ return 0;
+}
+
+static int mtk_spi_slave_fifo_transfer(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ int reg_val, cnt, remainder, ret;
+
+ writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
+
+ reg_val = readl(mdata->base + SPIS_CFG_REG);
+ if (xfer->rx_buf)
+ reg_val |= SPIS_RX_EN;
+ if (xfer->tx_buf)
+ reg_val |= SPIS_TX_EN;
+ writel(reg_val, mdata->base + SPIS_CFG_REG);
+
+ cnt = xfer->len / 4;
+ if (xfer->tx_buf)
+ iowrite32_rep(mdata->base + SPIS_TX_DATA_REG,
+ xfer->tx_buf, cnt);
+
+ remainder = xfer->len % 4;
+ if (xfer->tx_buf && remainder > 0) {
+ reg_val = 0;
+ memcpy(&reg_val, xfer->tx_buf + cnt * 4, remainder);
+ writel(reg_val, mdata->base + SPIS_TX_DATA_REG);
+ }
+
+ ret = mtk_spi_slave_wait_for_completion(mdata);
+ if (ret) {
+ mtk_spi_slave_disable_xfer(mdata);
+ writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
+ }
+
+ return ret;
+}
+
+static int mtk_spi_slave_dma_transfer(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ struct device *dev = mdata->dev;
+ int reg_val, ret;
+
+ writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
+
+ if (xfer->tx_buf) {
+ /* tx_buf is a const void* where we need a void * for
+ * the dma mapping
+ */
+ void *nonconst_tx = (void *)xfer->tx_buf;
+
+ xfer->tx_dma = dma_map_single(dev, nonconst_tx,
+ xfer->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, xfer->tx_dma)) {
+ ret = -ENOMEM;
+ goto disable_transfer;
+ }
+ }
+
+ if (xfer->rx_buf) {
+ xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
+ xfer->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, xfer->rx_dma)) {
+ ret = -ENOMEM;
+ goto unmap_txdma;
+ }
+ }
+
+ writel(xfer->tx_dma, mdata->base + SPIS_TX_SRC_REG);
+ writel(xfer->rx_dma, mdata->base + SPIS_RX_DST_REG);
+
+ writel(SPIS_DMA_ADDR_EN, mdata->base + SPIS_SOFT_RST_REG);
+
+ /* enable config reg tx rx_enable */
+ reg_val = readl(mdata->base + SPIS_CFG_REG);
+ if (xfer->tx_buf)
+ reg_val |= SPIS_TX_EN;
+ if (xfer->rx_buf)
+ reg_val |= SPIS_RX_EN;
+ writel(reg_val, mdata->base + SPIS_CFG_REG);
+
+ /* config dma */
+ reg_val = 0;
+ reg_val |= (xfer->len - 1) & TX_DMA_LEN;
+ writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
+
+ reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
+ if (xfer->tx_buf)
+ reg_val |= TX_DMA_EN;
+ if (xfer->rx_buf)
+ reg_val |= RX_DMA_EN;
+ reg_val |= TX_DMA_TRIG_EN;
+ writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
+
+ ret = mtk_spi_slave_wait_for_completion(mdata);
+ if (ret)
+ goto unmap_rxdma;
+
+ return 0;
+
+unmap_rxdma:
+ if (xfer->rx_buf)
+ dma_unmap_single(dev, xfer->rx_dma,
+ xfer->len, DMA_FROM_DEVICE);
+
+unmap_txdma:
+ if (xfer->tx_buf)
+ dma_unmap_single(dev, xfer->tx_dma,
+ xfer->len, DMA_TO_DEVICE);
+
+disable_transfer:
+ mtk_spi_slave_disable_dma(mdata);
+ mtk_spi_slave_disable_xfer(mdata);
+ writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
+
+ return ret;
+}
+
+static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+
+ reinit_completion(&mdata->xfer_done);
+ mdata->slave_aborted = false;
+ mdata->cur_transfer = xfer;
+
+ if (xfer->len > MTK_SPI_SLAVE_MAX_FIFO_SIZE)
+ return mtk_spi_slave_dma_transfer(ctlr, spi, xfer);
+ else
+ return mtk_spi_slave_fifo_transfer(ctlr, spi, xfer);
+}
+
+static int mtk_spi_slave_setup(struct spi_device *spi)
+{
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->master);
+ u32 reg_val;
+
+ reg_val = DMA_DONE_EN | DATA_DONE_EN |
+ RSTA_DONE_EN | CMD_INVALID_EN;
+ writel(reg_val, mdata->base + SPIS_IRQ_EN_REG);
+
+ reg_val = DMA_DONE_MASK | DATA_DONE_MASK |
+ RSTA_DONE_MASK | CMD_INVALID_MASK;
+ writel(reg_val, mdata->base + SPIS_IRQ_MASK_REG);
+
+ mtk_spi_slave_disable_dma(mdata);
+ mtk_spi_slave_disable_xfer(mdata);
+
+ return 0;
+}
+
+static int mtk_slave_abort(struct spi_controller *ctlr)
+{
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+
+ mdata->slave_aborted = true;
+ complete(&mdata->xfer_done);
+
+ return 0;
+}
+
+static irqreturn_t mtk_spi_slave_interrupt(int irq, void *dev_id)
+{
+ struct spi_controller *ctlr = dev_id;
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ struct spi_transfer *trans = mdata->cur_transfer;
+ u32 int_status, reg_val, cnt, remainder;
+
+ int_status = readl(mdata->base + SPIS_IRQ_ST_REG);
+ writel(int_status, mdata->base + SPIS_IRQ_CLR_REG);
+
+ if (!trans)
+ return IRQ_NONE;
+
+ if ((int_status & DMA_DONE_ST) &&
+ ((int_status & DATA_DONE_ST) ||
+ (int_status & RSTA_DONE_ST))) {
+ writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
+
+ if (trans->tx_buf)
+ dma_unmap_single(mdata->dev, trans->tx_dma,
+ trans->len, DMA_TO_DEVICE);
+ if (trans->rx_buf)
+ dma_unmap_single(mdata->dev, trans->rx_dma,
+ trans->len, DMA_FROM_DEVICE);
+
+ mtk_spi_slave_disable_dma(mdata);
+ mtk_spi_slave_disable_xfer(mdata);
+ }
+
+ if ((!(int_status & DMA_DONE_ST)) &&
+ ((int_status & DATA_DONE_ST) ||
+ (int_status & RSTA_DONE_ST))) {
+ cnt = trans->len / 4;
+ if (trans->rx_buf)
+ ioread32_rep(mdata->base + SPIS_RX_DATA_REG,
+ trans->rx_buf, cnt);
+ remainder = trans->len % 4;
+ if (trans->rx_buf && remainder > 0) {
+ reg_val = readl(mdata->base + SPIS_RX_DATA_REG);
+ memcpy(trans->rx_buf + (cnt * 4),
+ &reg_val, remainder);
+ }
+
+ mtk_spi_slave_disable_xfer(mdata);
+ }
+
+ if (int_status & CMD_INVALID_ST) {
+ dev_warn(&ctlr->dev, "cmd invalid\n");
+ return IRQ_NONE;
+ }
+
+ mdata->cur_transfer = NULL;
+ complete(&mdata->xfer_done);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_spi_slave_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct mtk_spi_slave *mdata;
+ struct resource *res;
+ int irq, ret;
+
+ ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata));
+ if (!ctlr) {
+ dev_err(&pdev->dev, "failed to alloc spi slave\n");
+ return -ENOMEM;
+ }
+
+ ctlr->auto_runtime_pm = true;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
+ ctlr->mode_bits |= SPI_LSB_FIRST;
+
+ ctlr->prepare_message = mtk_spi_slave_prepare_message;
+ ctlr->transfer_one = mtk_spi_slave_transfer_one;
+ ctlr->setup = mtk_spi_slave_setup;
+ ctlr->slave_abort = mtk_slave_abort;
+
+ mdata = spi_controller_get_devdata(ctlr);
+
+ platform_set_drvdata(pdev, ctlr);
+
+ init_completion(&mdata->xfer_done);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "failed to determine base address\n");
+ goto err_put_ctlr;
+ }
+
+ mdata->dev = &pdev->dev;
+
+ mdata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mdata->base)) {
+ ret = PTR_ERR(mdata->base);
+ goto err_put_ctlr;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
+ ret = irq;
+ goto err_put_ctlr;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, mtk_spi_slave_interrupt,
+ IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
+ goto err_put_ctlr;
+ }
+
+ mdata->spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(mdata->spi_clk)) {
+ ret = PTR_ERR(mdata->spi_clk);
+ dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
+ goto err_put_ctlr;
+ }
+
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
+ goto err_put_ctlr;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register slave controller(%d)\n", ret);
+ clk_disable_unprepare(mdata->spi_clk);
+ goto err_disable_runtime_pm;
+ }
+
+ clk_disable_unprepare(mdata->spi_clk);
+
+ return 0;
+
+err_disable_runtime_pm:
+ pm_runtime_disable(&pdev->dev);
+err_put_ctlr:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+static int mtk_spi_slave_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_spi_slave_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = spi_controller_suspend(ctlr);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ clk_disable_unprepare(mdata->spi_clk);
+
+ return ret;
+}
+
+static int mtk_spi_slave_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ ret = spi_controller_resume(ctlr);
+ if (ret < 0)
+ clk_disable_unprepare(mdata->spi_clk);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int mtk_spi_slave_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+
+ clk_disable_unprepare(mdata->spi_clk);
+
+ return 0;
+}
+
+static int mtk_spi_slave_runtime_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops mtk_spi_slave_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_slave_suspend, mtk_spi_slave_resume)
+ SET_RUNTIME_PM_OPS(mtk_spi_slave_runtime_suspend,
+ mtk_spi_slave_runtime_resume, NULL)
+};
+
+static struct platform_driver mtk_spi_slave_driver = {
+ .driver = {
+ .name = "mtk-spi-slave",
+ .pm = &mtk_spi_slave_pm,
+ .of_match_table = mtk_spi_slave_of_match,
+ },
+ .probe = mtk_spi_slave_probe,
+ .remove = mtk_spi_slave_remove,
+};
+
+module_platform_driver(mtk_spi_slave_driver);
+
+MODULE_DESCRIPTION("MTK SPI Slave Controller driver");
+MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mtk-spi-slave");
diff --git a/drivers/spi/spi-slave-system-control.c b/drivers/spi/spi-slave-system-control.c
index c0257e937995..169f3d595f60 100644
--- a/drivers/spi/spi-slave-system-control.c
+++ b/drivers/spi/spi-slave-system-control.c
@@ -60,6 +60,7 @@ static void spi_slave_system_control_complete(void *arg)
case CMD_REBOOT:
dev_info(&priv->spi->dev, "Rebooting system...\n");
kernel_restart(NULL);
+ break;
case CMD_POWEROFF:
dev_info(&priv->spi->dev, "Powering off system...\n");
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
new file mode 100644
index 000000000000..8daa24eec624
--- /dev/null
+++ b/drivers/spi/spi-sprd.c
@@ -0,0 +1,745 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Spreadtrum Communications Inc.
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#define SPRD_SPI_TXD 0x0
+#define SPRD_SPI_CLKD 0x4
+#define SPRD_SPI_CTL0 0x8
+#define SPRD_SPI_CTL1 0xc
+#define SPRD_SPI_CTL2 0x10
+#define SPRD_SPI_CTL3 0x14
+#define SPRD_SPI_CTL4 0x18
+#define SPRD_SPI_CTL5 0x1c
+#define SPRD_SPI_INT_EN 0x20
+#define SPRD_SPI_INT_CLR 0x24
+#define SPRD_SPI_INT_RAW_STS 0x28
+#define SPRD_SPI_INT_MASK_STS 0x2c
+#define SPRD_SPI_STS1 0x30
+#define SPRD_SPI_STS2 0x34
+#define SPRD_SPI_DSP_WAIT 0x38
+#define SPRD_SPI_STS3 0x3c
+#define SPRD_SPI_CTL6 0x40
+#define SPRD_SPI_STS4 0x44
+#define SPRD_SPI_FIFO_RST 0x48
+#define SPRD_SPI_CTL7 0x4c
+#define SPRD_SPI_STS5 0x50
+#define SPRD_SPI_CTL8 0x54
+#define SPRD_SPI_CTL9 0x58
+#define SPRD_SPI_CTL10 0x5c
+#define SPRD_SPI_CTL11 0x60
+#define SPRD_SPI_CTL12 0x64
+#define SPRD_SPI_STS6 0x68
+#define SPRD_SPI_STS7 0x6c
+#define SPRD_SPI_STS8 0x70
+#define SPRD_SPI_STS9 0x74
+
+/* Bits & mask definition for register CTL0 */
+#define SPRD_SPI_SCK_REV BIT(13)
+#define SPRD_SPI_NG_TX BIT(1)
+#define SPRD_SPI_NG_RX BIT(0)
+#define SPRD_SPI_CHNL_LEN_MASK GENMASK(4, 0)
+#define SPRD_SPI_CSN_MASK GENMASK(11, 8)
+#define SPRD_SPI_CS0_VALID BIT(8)
+
+/* Bits & mask definition for register SPI_INT_EN */
+#define SPRD_SPI_TX_END_INT_EN BIT(8)
+#define SPRD_SPI_RX_END_INT_EN BIT(9)
+
+/* Bits & mask definition for register SPI_INT_RAW_STS */
+#define SPRD_SPI_TX_END_RAW BIT(8)
+#define SPRD_SPI_RX_END_RAW BIT(9)
+
+/* Bits & mask definition for register SPI_INT_CLR */
+#define SPRD_SPI_TX_END_CLR BIT(8)
+#define SPRD_SPI_RX_END_CLR BIT(9)
+
+/* Bits & mask definition for register INT_MASK_STS */
+#define SPRD_SPI_MASK_RX_END BIT(9)
+#define SPRD_SPI_MASK_TX_END BIT(8)
+
+/* Bits & mask definition for register STS2 */
+#define SPRD_SPI_TX_BUSY BIT(8)
+
+/* Bits & mask definition for register CTL1 */
+#define SPRD_SPI_RX_MODE BIT(12)
+#define SPRD_SPI_TX_MODE BIT(13)
+#define SPRD_SPI_RTX_MD_MASK GENMASK(13, 12)
+
+/* Bits & mask definition for register CTL2 */
+#define SPRD_SPI_DMA_EN BIT(6)
+
+/* Bits & mask definition for register CTL4 */
+#define SPRD_SPI_START_RX BIT(9)
+#define SPRD_SPI_ONLY_RECV_MASK GENMASK(8, 0)
+
+/* Bits & mask definition for register SPI_INT_CLR */
+#define SPRD_SPI_RX_END_INT_CLR BIT(9)
+#define SPRD_SPI_TX_END_INT_CLR BIT(8)
+
+/* Bits & mask definition for register SPI_INT_RAW */
+#define SPRD_SPI_RX_END_IRQ BIT(9)
+#define SPRD_SPI_TX_END_IRQ BIT(8)
+
+/* Bits & mask definition for register CTL12 */
+#define SPRD_SPI_SW_RX_REQ BIT(0)
+#define SPRD_SPI_SW_TX_REQ BIT(1)
+
+/* Bits & mask definition for register CTL7 */
+#define SPRD_SPI_DATA_LINE2_EN BIT(15)
+#define SPRD_SPI_MODE_MASK GENMASK(5, 3)
+#define SPRD_SPI_MODE_OFFSET 3
+#define SPRD_SPI_3WIRE_MODE 4
+#define SPRD_SPI_4WIRE_MODE 0
+
+/* Bits & mask definition for register CTL8 */
+#define SPRD_SPI_TX_MAX_LEN_MASK GENMASK(19, 0)
+#define SPRD_SPI_TX_LEN_H_MASK GENMASK(3, 0)
+#define SPRD_SPI_TX_LEN_H_OFFSET 16
+
+/* Bits & mask definition for register CTL9 */
+#define SPRD_SPI_TX_LEN_L_MASK GENMASK(15, 0)
+
+/* Bits & mask definition for register CTL10 */
+#define SPRD_SPI_RX_MAX_LEN_MASK GENMASK(19, 0)
+#define SPRD_SPI_RX_LEN_H_MASK GENMASK(3, 0)
+#define SPRD_SPI_RX_LEN_H_OFFSET 16
+
+/* Bits & mask definition for register CTL11 */
+#define SPRD_SPI_RX_LEN_L_MASK GENMASK(15, 0)
+
+/* Default & maximum word delay cycles */
+#define SPRD_SPI_MIN_DELAY_CYCLE 14
+#define SPRD_SPI_MAX_DELAY_CYCLE 130
+
+#define SPRD_SPI_FIFO_SIZE 32
+#define SPRD_SPI_CHIP_CS_NUM 0x4
+#define SPRD_SPI_CHNL_LEN 2
+#define SPRD_SPI_DEFAULT_SOURCE 26000000
+#define SPRD_SPI_MAX_SPEED_HZ 48000000
+#define SPRD_SPI_AUTOSUSPEND_DELAY 100
+
+struct sprd_spi {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+ u32 src_clk;
+ u32 hw_mode;
+ u32 trans_len;
+ u32 trans_mode;
+ u32 word_delay;
+ u32 hw_speed_hz;
+ u32 len;
+ int status;
+ const void *tx_buf;
+ void *rx_buf;
+ int (*read_bufs)(struct sprd_spi *ss, u32 len);
+ int (*write_bufs)(struct sprd_spi *ss, u32 len);
+};
+
+static u32 sprd_spi_transfer_max_timeout(struct sprd_spi *ss,
+ struct spi_transfer *t)
+{
+ /*
+ * The time spent on transmission of the full FIFO data is the maximum
+ * SPI transmission time.
+ */
+ u32 size = t->bits_per_word * SPRD_SPI_FIFO_SIZE;
+ u32 bit_time_us = DIV_ROUND_UP(USEC_PER_SEC, ss->hw_speed_hz);
+ u32 total_time_us = size * bit_time_us;
+ /*
+ * There is an interval between data and the data in our SPI hardware,
+ * so the total transmission time need add the interval time.
+ */
+ u32 interval_cycle = SPRD_SPI_FIFO_SIZE * ss->word_delay;
+ u32 interval_time_us = DIV_ROUND_UP(interval_cycle * USEC_PER_SEC,
+ ss->src_clk);
+
+ return total_time_us + interval_time_us;
+}
+
+static int sprd_spi_wait_for_tx_end(struct sprd_spi *ss, struct spi_transfer *t)
+{
+ u32 val, us;
+ int ret;
+
+ us = sprd_spi_transfer_max_timeout(ss, t);
+ ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_INT_RAW_STS, val,
+ val & SPRD_SPI_TX_END_IRQ, 0, us);
+ if (ret) {
+ dev_err(ss->dev, "SPI error, spi send timeout!\n");
+ return ret;
+ }
+
+ ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_STS2, val,
+ !(val & SPRD_SPI_TX_BUSY), 0, us);
+ if (ret) {
+ dev_err(ss->dev, "SPI error, spi busy timeout!\n");
+ return ret;
+ }
+
+ writel_relaxed(SPRD_SPI_TX_END_INT_CLR, ss->base + SPRD_SPI_INT_CLR);
+
+ return 0;
+}
+
+static int sprd_spi_wait_for_rx_end(struct sprd_spi *ss, struct spi_transfer *t)
+{
+ u32 val, us;
+ int ret;
+
+ us = sprd_spi_transfer_max_timeout(ss, t);
+ ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_INT_RAW_STS, val,
+ val & SPRD_SPI_RX_END_IRQ, 0, us);
+ if (ret) {
+ dev_err(ss->dev, "SPI error, spi rx timeout!\n");
+ return ret;
+ }
+
+ writel_relaxed(SPRD_SPI_RX_END_INT_CLR, ss->base + SPRD_SPI_INT_CLR);
+
+ return 0;
+}
+
+static void sprd_spi_tx_req(struct sprd_spi *ss)
+{
+ writel_relaxed(SPRD_SPI_SW_TX_REQ, ss->base + SPRD_SPI_CTL12);
+}
+
+static void sprd_spi_rx_req(struct sprd_spi *ss)
+{
+ writel_relaxed(SPRD_SPI_SW_RX_REQ, ss->base + SPRD_SPI_CTL12);
+}
+
+static void sprd_spi_enter_idle(struct sprd_spi *ss)
+{
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL1);
+
+ val &= ~SPRD_SPI_RTX_MD_MASK;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL1);
+}
+
+static void sprd_spi_set_transfer_bits(struct sprd_spi *ss, u32 bits)
+{
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
+
+ /* Set the valid bits for every transaction */
+ val &= ~(SPRD_SPI_CHNL_LEN_MASK << SPRD_SPI_CHNL_LEN);
+ val |= bits << SPRD_SPI_CHNL_LEN;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
+}
+
+static void sprd_spi_set_tx_length(struct sprd_spi *ss, u32 length)
+{
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL8);
+
+ length &= SPRD_SPI_TX_MAX_LEN_MASK;
+ val &= ~SPRD_SPI_TX_LEN_H_MASK;
+ val |= length >> SPRD_SPI_TX_LEN_H_OFFSET;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL8);
+
+ val = length & SPRD_SPI_TX_LEN_L_MASK;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL9);
+}
+
+static void sprd_spi_set_rx_length(struct sprd_spi *ss, u32 length)
+{
+ u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL10);
+
+ length &= SPRD_SPI_RX_MAX_LEN_MASK;
+ val &= ~SPRD_SPI_RX_LEN_H_MASK;
+ val |= length >> SPRD_SPI_RX_LEN_H_OFFSET;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL10);
+
+ val = length & SPRD_SPI_RX_LEN_L_MASK;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL11);
+}
+
+static void sprd_spi_chipselect(struct spi_device *sdev, bool cs)
+{
+ struct spi_controller *sctlr = sdev->controller;
+ struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+ u32 val;
+
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
+ /* The SPI controller will pull down CS pin if cs is 0 */
+ if (!cs) {
+ val &= ~SPRD_SPI_CS0_VALID;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
+ } else {
+ val |= SPRD_SPI_CSN_MASK;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
+ }
+}
+
+static int sprd_spi_write_only_receive(struct sprd_spi *ss, u32 len)
+{
+ u32 val;
+
+ /* Clear the start receive bit and reset receive data number */
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
+ val &= ~(SPRD_SPI_START_RX | SPRD_SPI_ONLY_RECV_MASK);
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
+
+ /* Set the receive data length */
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
+ val |= len & SPRD_SPI_ONLY_RECV_MASK;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
+
+ /* Trigger to receive data */
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
+ val |= SPRD_SPI_START_RX;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
+
+ return len;
+}
+
+static int sprd_spi_write_bufs_u8(struct sprd_spi *ss, u32 len)
+{
+ u8 *tx_p = (u8 *)ss->tx_buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ writeb_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
+
+ ss->tx_buf += i;
+ return i;
+}
+
+static int sprd_spi_write_bufs_u16(struct sprd_spi *ss, u32 len)
+{
+ u16 *tx_p = (u16 *)ss->tx_buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ writew_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
+
+ ss->tx_buf += i << 1;
+ return i << 1;
+}
+
+static int sprd_spi_write_bufs_u32(struct sprd_spi *ss, u32 len)
+{
+ u32 *tx_p = (u32 *)ss->tx_buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ writel_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
+
+ ss->tx_buf += i << 2;
+ return i << 2;
+}
+
+static int sprd_spi_read_bufs_u8(struct sprd_spi *ss, u32 len)
+{
+ u8 *rx_p = (u8 *)ss->rx_buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ rx_p[i] = readb_relaxed(ss->base + SPRD_SPI_TXD);
+
+ ss->rx_buf += i;
+ return i;
+}
+
+static int sprd_spi_read_bufs_u16(struct sprd_spi *ss, u32 len)
+{
+ u16 *rx_p = (u16 *)ss->rx_buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ rx_p[i] = readw_relaxed(ss->base + SPRD_SPI_TXD);
+
+ ss->rx_buf += i << 1;
+ return i << 1;
+}
+
+static int sprd_spi_read_bufs_u32(struct sprd_spi *ss, u32 len)
+{
+ u32 *rx_p = (u32 *)ss->rx_buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ rx_p[i] = readl_relaxed(ss->base + SPRD_SPI_TXD);
+
+ ss->rx_buf += i << 2;
+ return i << 2;
+}
+
+static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t)
+{
+ struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
+ u32 trans_len = ss->trans_len, len;
+ int ret, write_size = 0;
+
+ while (trans_len) {
+ len = trans_len > SPRD_SPI_FIFO_SIZE ? SPRD_SPI_FIFO_SIZE :
+ trans_len;
+ if (ss->trans_mode & SPRD_SPI_TX_MODE) {
+ sprd_spi_set_tx_length(ss, len);
+ write_size += ss->write_bufs(ss, len);
+
+ /*
+ * For our 3 wires mode or dual TX line mode, we need
+ * to request the controller to transfer.
+ */
+ if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
+ sprd_spi_tx_req(ss);
+
+ ret = sprd_spi_wait_for_tx_end(ss, t);
+ } else {
+ sprd_spi_set_rx_length(ss, len);
+
+ /*
+ * For our 3 wires mode or dual TX line mode, we need
+ * to request the controller to read.
+ */
+ if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
+ sprd_spi_rx_req(ss);
+ else
+ write_size += ss->write_bufs(ss, len);
+
+ ret = sprd_spi_wait_for_rx_end(ss, t);
+ }
+
+ if (ret)
+ goto complete;
+
+ if (ss->trans_mode & SPRD_SPI_RX_MODE)
+ ss->read_bufs(ss, len);
+
+ trans_len -= len;
+ }
+
+ ret = write_size;
+
+complete:
+ sprd_spi_enter_idle(ss);
+
+ return ret;
+}
+
+static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz)
+{
+ /*
+ * From SPI datasheet, the prescale calculation formula:
+ * prescale = SPI source clock / (2 * SPI_freq) - 1;
+ */
+ u32 clk_div = DIV_ROUND_UP(ss->src_clk, speed_hz << 1) - 1;
+
+ /* Save the real hardware speed */
+ ss->hw_speed_hz = (ss->src_clk >> 1) / (clk_div + 1);
+ writel_relaxed(clk_div, ss->base + SPRD_SPI_CLKD);
+}
+
+static void sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
+{
+ u16 word_delay, interval;
+ u32 val;
+
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
+ val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX);
+ /* Set default chip selection, clock phase and clock polarity */
+ val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX;
+ val |= ss->hw_mode & SPI_CPOL ? SPRD_SPI_SCK_REV : 0;
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
+
+ /*
+ * Set the intervals of two SPI frames, and the inteval calculation
+ * formula as below per datasheet:
+ * interval time (source clock cycles) = interval * 4 + 10.
+ */
+ word_delay = clamp_t(u16, t->word_delay, SPRD_SPI_MIN_DELAY_CYCLE,
+ SPRD_SPI_MAX_DELAY_CYCLE);
+ interval = DIV_ROUND_UP(word_delay - 10, 4);
+ ss->word_delay = interval * 4 + 10;
+ writel_relaxed(interval, ss->base + SPRD_SPI_CTL5);
+
+ /* Reset SPI fifo */
+ writel_relaxed(1, ss->base + SPRD_SPI_FIFO_RST);
+ writel_relaxed(0, ss->base + SPRD_SPI_FIFO_RST);
+
+ /* Set SPI work mode */
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
+ val &= ~SPRD_SPI_MODE_MASK;
+
+ if (ss->hw_mode & SPI_3WIRE)
+ val |= SPRD_SPI_3WIRE_MODE << SPRD_SPI_MODE_OFFSET;
+ else
+ val |= SPRD_SPI_4WIRE_MODE << SPRD_SPI_MODE_OFFSET;
+
+ if (ss->hw_mode & SPI_TX_DUAL)
+ val |= SPRD_SPI_DATA_LINE2_EN;
+ else
+ val &= ~SPRD_SPI_DATA_LINE2_EN;
+
+ writel_relaxed(val, ss->base + SPRD_SPI_CTL7);
+}
+
+static int sprd_spi_setup_transfer(struct spi_device *sdev,
+ struct spi_transfer *t)
+{
+ struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
+ u8 bits_per_word = t->bits_per_word;
+ u32 val, mode = 0;
+
+ ss->len = t->len;
+ ss->tx_buf = t->tx_buf;
+ ss->rx_buf = t->rx_buf;
+
+ ss->hw_mode = sdev->mode;
+ sprd_spi_init_hw(ss, t);
+
+ /* Set tansfer speed and valid bits */
+ sprd_spi_set_speed(ss, t->speed_hz);
+ sprd_spi_set_transfer_bits(ss, bits_per_word);
+
+ if (bits_per_word > 16)
+ bits_per_word = round_up(bits_per_word, 16);
+ else
+ bits_per_word = round_up(bits_per_word, 8);
+
+ switch (bits_per_word) {
+ case 8:
+ ss->trans_len = t->len;
+ ss->read_bufs = sprd_spi_read_bufs_u8;
+ ss->write_bufs = sprd_spi_write_bufs_u8;
+ break;
+ case 16:
+ ss->trans_len = t->len >> 1;
+ ss->read_bufs = sprd_spi_read_bufs_u16;
+ ss->write_bufs = sprd_spi_write_bufs_u16;
+ break;
+ case 32:
+ ss->trans_len = t->len >> 2;
+ ss->read_bufs = sprd_spi_read_bufs_u32;
+ ss->write_bufs = sprd_spi_write_bufs_u32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set transfer read or write mode */
+ val = readl_relaxed(ss->base + SPRD_SPI_CTL1);
+ val &= ~SPRD_SPI_RTX_MD_MASK;
+ if (t->tx_buf)
+ mode |= SPRD_SPI_TX_MODE;
+ if (t->rx_buf)
+ mode |= SPRD_SPI_RX_MODE;
+
+ writel_relaxed(val | mode, ss->base + SPRD_SPI_CTL1);
+
+ ss->trans_mode = mode;
+
+ /*
+ * If in only receive mode, we need to trigger the SPI controller to
+ * receive data automatically.
+ */
+ if (ss->trans_mode == SPRD_SPI_RX_MODE)
+ ss->write_bufs = sprd_spi_write_only_receive;
+
+ return 0;
+}
+
+static int sprd_spi_transfer_one(struct spi_controller *sctlr,
+ struct spi_device *sdev,
+ struct spi_transfer *t)
+{
+ int ret;
+
+ ret = sprd_spi_setup_transfer(sdev, t);
+ if (ret)
+ goto setup_err;
+
+ ret = sprd_spi_txrx_bufs(sdev, t);
+ if (ret == t->len)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EREMOTEIO;
+
+setup_err:
+ spi_finalize_current_transfer(sctlr);
+
+ return ret;
+}
+
+static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
+{
+ struct clk *clk_spi, *clk_parent;
+
+ clk_spi = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(clk_spi)) {
+ dev_warn(&pdev->dev, "can't get the spi clock\n");
+ clk_spi = NULL;
+ }
+
+ clk_parent = devm_clk_get(&pdev->dev, "source");
+ if (IS_ERR(clk_parent)) {
+ dev_warn(&pdev->dev, "can't get the source clock\n");
+ clk_parent = NULL;
+ }
+
+ ss->clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(ss->clk)) {
+ dev_err(&pdev->dev, "can't get the enable clock\n");
+ return PTR_ERR(ss->clk);
+ }
+
+ if (!clk_set_parent(clk_spi, clk_parent))
+ ss->src_clk = clk_get_rate(clk_spi);
+ else
+ ss->src_clk = SPRD_SPI_DEFAULT_SOURCE;
+
+ return 0;
+}
+
+static int sprd_spi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *sctlr;
+ struct resource *res;
+ struct sprd_spi *ss;
+ int ret;
+
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "spi");
+ sctlr = spi_alloc_master(&pdev->dev, sizeof(*ss));
+ if (!sctlr)
+ return -ENOMEM;
+
+ ss = spi_controller_get_devdata(sctlr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ss->base)) {
+ ret = PTR_ERR(ss->base);
+ goto free_controller;
+ }
+
+ ss->dev = &pdev->dev;
+ sctlr->dev.of_node = pdev->dev.of_node;
+ sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL;
+ sctlr->bus_num = pdev->id;
+ sctlr->set_cs = sprd_spi_chipselect;
+ sctlr->transfer_one = sprd_spi_transfer_one;
+ sctlr->auto_runtime_pm = true;
+ sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1,
+ SPRD_SPI_MAX_SPEED_HZ);
+
+ platform_set_drvdata(pdev, sctlr);
+ ret = sprd_spi_clk_init(pdev, ss);
+ if (ret)
+ goto free_controller;
+
+ ret = clk_prepare_enable(ss->clk);
+ if (ret)
+ goto free_controller;
+
+ ret = pm_runtime_set_active(&pdev->dev);
+ if (ret < 0)
+ goto disable_clk;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ SPRD_SPI_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to resume SPI controller\n");
+ goto err_rpm_put;
+ }
+
+ ret = devm_spi_register_controller(&pdev->dev, sctlr);
+ if (ret)
+ goto err_rpm_put;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+err_rpm_put:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+disable_clk:
+ clk_disable_unprepare(ss->clk);
+free_controller:
+ spi_controller_put(sctlr);
+
+ return ret;
+}
+
+static int sprd_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *sctlr = platform_get_drvdata(pdev);
+ struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+ int ret;
+
+ ret = pm_runtime_get_sync(ss->dev);
+ if (ret < 0) {
+ dev_err(ss->dev, "failed to resume SPI controller\n");
+ return ret;
+ }
+
+ clk_disable_unprepare(ss->clk);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *sctlr = dev_get_drvdata(dev);
+ struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+
+ clk_disable_unprepare(ss->clk);
+
+ return 0;
+}
+
+static int __maybe_unused sprd_spi_runtime_resume(struct device *dev)
+{
+ struct spi_controller *sctlr = dev_get_drvdata(dev);
+ struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
+ int ret;
+
+ ret = clk_prepare_enable(ss->clk);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct dev_pm_ops sprd_spi_pm_ops = {
+ SET_RUNTIME_PM_OPS(sprd_spi_runtime_suspend,
+ sprd_spi_runtime_resume, NULL)
+};
+
+static const struct of_device_id sprd_spi_of_match[] = {
+ { .compatible = "sprd,sc9860-spi", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver sprd_spi_driver = {
+ .driver = {
+ .name = "sprd-spi",
+ .of_match_table = sprd_spi_of_match,
+ .pm = &sprd_spi_pm_ops,
+ },
+ .probe = sprd_spi_probe,
+ .remove = sprd_spi_remove,
+};
+
+module_platform_driver(sprd_spi_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SPI Controller driver");
+MODULE_AUTHOR("Lanqing Liu <lanqing.liu@spreadtrum.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
new file mode 100644
index 000000000000..3b2a9a6b990d
--- /dev/null
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -0,0 +1,512 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi-mem.h>
+
+#define QSPI_CR 0x00
+#define CR_EN BIT(0)
+#define CR_ABORT BIT(1)
+#define CR_DMAEN BIT(2)
+#define CR_TCEN BIT(3)
+#define CR_SSHIFT BIT(4)
+#define CR_DFM BIT(6)
+#define CR_FSEL BIT(7)
+#define CR_FTHRES_MASK GENMASK(12, 8)
+#define CR_TEIE BIT(16)
+#define CR_TCIE BIT(17)
+#define CR_FTIE BIT(18)
+#define CR_SMIE BIT(19)
+#define CR_TOIE BIT(20)
+#define CR_PRESC_MASK GENMASK(31, 24)
+
+#define QSPI_DCR 0x04
+#define DCR_FSIZE_MASK GENMASK(20, 16)
+
+#define QSPI_SR 0x08
+#define SR_TEF BIT(0)
+#define SR_TCF BIT(1)
+#define SR_FTF BIT(2)
+#define SR_SMF BIT(3)
+#define SR_TOF BIT(4)
+#define SR_BUSY BIT(5)
+#define SR_FLEVEL_MASK GENMASK(13, 8)
+
+#define QSPI_FCR 0x0c
+#define FCR_CTEF BIT(0)
+#define FCR_CTCF BIT(1)
+
+#define QSPI_DLR 0x10
+
+#define QSPI_CCR 0x14
+#define CCR_INST_MASK GENMASK(7, 0)
+#define CCR_IMODE_MASK GENMASK(9, 8)
+#define CCR_ADMODE_MASK GENMASK(11, 10)
+#define CCR_ADSIZE_MASK GENMASK(13, 12)
+#define CCR_DCYC_MASK GENMASK(22, 18)
+#define CCR_DMODE_MASK GENMASK(25, 24)
+#define CCR_FMODE_MASK GENMASK(27, 26)
+#define CCR_FMODE_INDW (0U << 26)
+#define CCR_FMODE_INDR (1U << 26)
+#define CCR_FMODE_APM (2U << 26)
+#define CCR_FMODE_MM (3U << 26)
+#define CCR_BUSWIDTH_0 0x0
+#define CCR_BUSWIDTH_1 0x1
+#define CCR_BUSWIDTH_2 0x2
+#define CCR_BUSWIDTH_4 0x3
+
+#define QSPI_AR 0x18
+#define QSPI_ABR 0x1c
+#define QSPI_DR 0x20
+#define QSPI_PSMKR 0x24
+#define QSPI_PSMAR 0x28
+#define QSPI_PIR 0x2c
+#define QSPI_LPTR 0x30
+#define LPTR_DFT_TIMEOUT 0x10
+
+#define STM32_QSPI_MAX_MMAP_SZ SZ_256M
+#define STM32_QSPI_MAX_NORCHIP 2
+
+#define STM32_FIFO_TIMEOUT_US 30000
+#define STM32_BUSY_TIMEOUT_US 100000
+#define STM32_ABT_TIMEOUT_US 100000
+
+struct stm32_qspi_flash {
+ struct stm32_qspi *qspi;
+ u32 cs;
+ u32 presc;
+};
+
+struct stm32_qspi {
+ struct device *dev;
+ void __iomem *io_base;
+ void __iomem *mm_base;
+ resource_size_t mm_size;
+ struct clk *clk;
+ u32 clk_rate;
+ struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP];
+ struct completion data_completion;
+ u32 fmode;
+
+ /*
+ * to protect device configuration, could be different between
+ * 2 flash access (bk1, bk2)
+ */
+ struct mutex lock;
+};
+
+static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
+{
+ struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
+ u32 cr, sr;
+
+ sr = readl_relaxed(qspi->io_base + QSPI_SR);
+
+ if (sr & (SR_TEF | SR_TCF)) {
+ /* disable irq */
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
+ cr &= ~CR_TCIE & ~CR_TEIE;
+ writel_relaxed(cr, qspi->io_base + QSPI_CR);
+ complete(&qspi->data_completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
+{
+ *val = readb_relaxed(addr);
+}
+
+static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
+{
+ writeb_relaxed(*val, addr);
+}
+
+static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
+ const struct spi_mem_op *op)
+{
+ void (*tx_fifo)(u8 *val, void __iomem *addr);
+ u32 len = op->data.nbytes, sr;
+ u8 *buf;
+ int ret;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ tx_fifo = stm32_qspi_read_fifo;
+ buf = op->data.buf.in;
+
+ } else {
+ tx_fifo = stm32_qspi_write_fifo;
+ buf = (u8 *)op->data.buf.out;
+ }
+
+ while (len--) {
+ ret = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR,
+ sr, (sr & SR_FTF), 1,
+ STM32_FIFO_TIMEOUT_US);
+ if (ret) {
+ dev_err(qspi->dev, "fifo timeout (len:%d stat:%#x)\n",
+ len, sr);
+ return ret;
+ }
+ tx_fifo(buf++, qspi->io_base + QSPI_DR);
+ }
+
+ return 0;
+}
+
+static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
+ const struct spi_mem_op *op)
+{
+ memcpy_fromio(op->data.buf.in, qspi->mm_base + op->addr.val,
+ op->data.nbytes);
+ return 0;
+}
+
+static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
+{
+ if (!op->data.nbytes)
+ return 0;
+
+ if (qspi->fmode == CCR_FMODE_MM)
+ return stm32_qspi_tx_mm(qspi, op);
+
+ return stm32_qspi_tx_poll(qspi, op);
+}
+
+static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
+{
+ u32 sr;
+
+ return readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr,
+ !(sr & SR_BUSY), 1,
+ STM32_BUSY_TIMEOUT_US);
+}
+
+static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
+ const struct spi_mem_op *op)
+{
+ u32 cr, sr;
+ int err = 0;
+
+ if (!op->data.nbytes)
+ return stm32_qspi_wait_nobusy(qspi);
+
+ if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
+ goto out;
+
+ reinit_completion(&qspi->data_completion);
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
+ writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
+
+ if (!wait_for_completion_interruptible_timeout(&qspi->data_completion,
+ msecs_to_jiffies(1000))) {
+ err = -ETIMEDOUT;
+ } else {
+ sr = readl_relaxed(qspi->io_base + QSPI_SR);
+ if (sr & SR_TEF)
+ err = -EIO;
+ }
+
+out:
+ /* clear flags */
+ writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
+
+ return err;
+}
+
+static int stm32_qspi_get_mode(struct stm32_qspi *qspi, u8 buswidth)
+{
+ if (buswidth == 4)
+ return CCR_BUSWIDTH_4;
+
+ return buswidth;
+}
+
+static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ struct stm32_qspi_flash *flash = &qspi->flash[mem->spi->chip_select];
+ u32 ccr, cr, addr_max;
+ int timeout, err = 0;
+
+ dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth,
+ op->addr.val, op->data.nbytes);
+
+ err = stm32_qspi_wait_nobusy(qspi);
+ if (err)
+ goto abort;
+
+ addr_max = op->addr.val + op->data.nbytes + 1;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (addr_max < qspi->mm_size &&
+ op->addr.buswidth)
+ qspi->fmode = CCR_FMODE_MM;
+ else
+ qspi->fmode = CCR_FMODE_INDR;
+ } else {
+ qspi->fmode = CCR_FMODE_INDW;
+ }
+
+ cr = readl_relaxed(qspi->io_base + QSPI_CR);
+ cr &= ~CR_PRESC_MASK & ~CR_FSEL;
+ cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
+ cr |= FIELD_PREP(CR_FSEL, flash->cs);
+ writel_relaxed(cr, qspi->io_base + QSPI_CR);
+
+ if (op->data.nbytes)
+ writel_relaxed(op->data.nbytes - 1,
+ qspi->io_base + QSPI_DLR);
+ else
+ qspi->fmode = CCR_FMODE_INDW;
+
+ ccr = qspi->fmode;
+ ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode);
+ ccr |= FIELD_PREP(CCR_IMODE_MASK,
+ stm32_qspi_get_mode(qspi, op->cmd.buswidth));
+
+ if (op->addr.nbytes) {
+ ccr |= FIELD_PREP(CCR_ADMODE_MASK,
+ stm32_qspi_get_mode(qspi, op->addr.buswidth));
+ ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
+ }
+
+ if (op->dummy.buswidth && op->dummy.nbytes)
+ ccr |= FIELD_PREP(CCR_DCYC_MASK,
+ op->dummy.nbytes * 8 / op->dummy.buswidth);
+
+ if (op->data.nbytes) {
+ ccr |= FIELD_PREP(CCR_DMODE_MASK,
+ stm32_qspi_get_mode(qspi, op->data.buswidth));
+ }
+
+ writel_relaxed(ccr, qspi->io_base + QSPI_CCR);
+
+ if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM)
+ writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR);
+
+ err = stm32_qspi_tx(qspi, op);
+
+ /*
+ * Abort in:
+ * -error case
+ * -read memory map: prefetching must be stopped if we read the last
+ * byte of device (device size - fifo size). like device size is not
+ * knows, the prefetching is always stop.
+ */
+ if (err || qspi->fmode == CCR_FMODE_MM)
+ goto abort;
+
+ /* wait end of tx in indirect mode */
+ err = stm32_qspi_wait_cmd(qspi, op);
+ if (err)
+ goto abort;
+
+ return 0;
+
+abort:
+ cr = readl_relaxed(qspi->io_base + QSPI_CR) | CR_ABORT;
+ writel_relaxed(cr, qspi->io_base + QSPI_CR);
+
+ /* wait clear of abort bit by hw */
+ timeout = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_CR,
+ cr, !(cr & CR_ABORT), 1,
+ STM32_ABT_TIMEOUT_US);
+
+ writel_relaxed(FCR_CTCF, qspi->io_base + QSPI_FCR);
+
+ if (err || timeout)
+ dev_err(qspi->dev, "%s err:%d abort timeout:%d\n",
+ __func__, err, timeout);
+
+ return err;
+}
+
+static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ mutex_lock(&qspi->lock);
+ ret = stm32_qspi_send(mem, op);
+ mutex_unlock(&qspi->lock);
+
+ return ret;
+}
+
+static int stm32_qspi_setup(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->master;
+ struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
+ struct stm32_qspi_flash *flash;
+ u32 cr, presc;
+
+ if (ctrl->busy)
+ return -EBUSY;
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
+
+ flash = &qspi->flash[spi->chip_select];
+ flash->qspi = qspi;
+ flash->cs = spi->chip_select;
+ flash->presc = presc;
+
+ mutex_lock(&qspi->lock);
+ writel_relaxed(LPTR_DFT_TIMEOUT, qspi->io_base + QSPI_LPTR);
+ cr = FIELD_PREP(CR_FTHRES_MASK, 3) | CR_TCEN | CR_SSHIFT | CR_EN;
+ writel_relaxed(cr, qspi->io_base + QSPI_CR);
+
+ /* set dcr fsize to max address */
+ writel_relaxed(DCR_FSIZE_MASK, qspi->io_base + QSPI_DCR);
+ mutex_unlock(&qspi->lock);
+
+ return 0;
+}
+
+/*
+ * no special host constraint, so use default spi_mem_default_supports_op
+ * to check supported mode.
+ */
+static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
+ .exec_op = stm32_qspi_exec_op,
+};
+
+static void stm32_qspi_release(struct stm32_qspi *qspi)
+{
+ /* disable qspi */
+ writel_relaxed(0, qspi->io_base + QSPI_CR);
+ mutex_destroy(&qspi->lock);
+ clk_disable_unprepare(qspi->clk);
+}
+
+static int stm32_qspi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct reset_control *rstc;
+ struct stm32_qspi *qspi;
+ struct resource *res;
+ int ret, irq;
+
+ ctrl = spi_alloc_master(dev, sizeof(*qspi));
+ if (!ctrl)
+ return -ENOMEM;
+
+ qspi = spi_controller_get_devdata(ctrl);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
+ qspi->io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->io_base))
+ return PTR_ERR(qspi->io_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
+ qspi->mm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qspi->mm_base))
+ return PTR_ERR(qspi->mm_base);
+
+ qspi->mm_size = resource_size(res);
+ if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
+ dev_name(dev), qspi);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ init_completion(&qspi->data_completion);
+
+ qspi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(qspi->clk))
+ return PTR_ERR(qspi->clk);
+
+ qspi->clk_rate = clk_get_rate(qspi->clk);
+ if (!qspi->clk_rate)
+ return -EINVAL;
+
+ ret = clk_prepare_enable(qspi->clk);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ return ret;
+ }
+
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ udelay(2);
+ reset_control_deassert(rstc);
+ }
+
+ qspi->dev = dev;
+ platform_set_drvdata(pdev, qspi);
+ mutex_init(&qspi->lock);
+
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
+ | SPI_TX_DUAL | SPI_TX_QUAD;
+ ctrl->setup = stm32_qspi_setup;
+ ctrl->bus_num = -1;
+ ctrl->mem_ops = &stm32_qspi_mem_ops;
+ ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP;
+ ctrl->dev.of_node = dev->of_node;
+
+ ret = devm_spi_register_master(dev, ctrl);
+ if (ret)
+ goto err_spi_register;
+
+ return 0;
+
+err_spi_register:
+ stm32_qspi_release(qspi);
+
+ return ret;
+}
+
+static int stm32_qspi_remove(struct platform_device *pdev)
+{
+ struct stm32_qspi *qspi = platform_get_drvdata(pdev);
+
+ stm32_qspi_release(qspi);
+ return 0;
+}
+
+static const struct of_device_id stm32_qspi_match[] = {
+ {.compatible = "st,stm32f469-qspi"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_qspi_match);
+
+static struct platform_driver stm32_qspi_driver = {
+ .probe = stm32_qspi_probe,
+ .remove = stm32_qspi_remove,
+ .driver = {
+ .name = "stm32-qspi",
+ .of_match_table = stm32_qspi_match,
+ },
+};
+module_platform_driver(stm32_qspi_driver);
+
+MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 6f7b946b5ced..1427f343b39a 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev)
goto exit_free_master;
}
+ /* disabled clock may cause interrupt storm upon request */
+ tspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tspi->clk)) {
+ ret = PTR_ERR(tspi->clk);
+ dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+ goto exit_free_master;
+ }
+ ret = clk_prepare(tspi->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
+ goto exit_free_master;
+ }
+ ret = clk_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+ goto exit_free_master;
+ }
+
spi_irq = platform_get_irq(pdev, 0);
tspi->irq = spi_irq;
ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
tspi->irq);
- goto exit_free_master;
- }
-
- tspi->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(tspi->clk)) {
- dev_err(&pdev->dev, "can not get clock\n");
- ret = PTR_ERR(tspi->clk);
- goto exit_free_irq;
+ goto exit_clk_disable;
}
tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@ exit_rx_dma_free:
tegra_slink_deinit_dma_param(tspi, true);
exit_free_irq:
free_irq(spi_irq, tspi);
+exit_clk_disable:
+ clk_disable(tspi->clk);
exit_free_master:
spi_master_put(master);
return ret;
@@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev)
free_irq(tspi->irq, tspi);
+ clk_disable(tspi->clk);
+
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ec395a6baf9c..6ca59406b0b7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SPI init/core code
*
* Copyright (C) 2005 David Brownell
* Copyright (C) 2008 Secret Lab Technologies Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -60,6 +51,7 @@ static void spidev_release(struct device *dev)
spi->controller->cleanup(spi);
spi_controller_put(spi->controller);
+ kfree(spi->driver_override);
kfree(spi);
}
@@ -77,6 +69,51 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ const char *end = memchr(buf, '\n', count);
+ const size_t len = end ? end - buf : count;
+ const char *driver_override, *old;
+
+ /* We need to keep extra room for a newline when displaying value */
+ if (len >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, len, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ device_lock(dev);
+ old = spi->driver_override;
+ if (len) {
+ spi->driver_override = driver_override;
+ } else {
+ /* Emptry string, disable driver override */
+ spi->driver_override = NULL;
+ kfree(driver_override);
+ }
+ device_unlock(dev);
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *a, char *buf)
+{
+ const struct spi_device *spi = to_spi_device(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
+ device_unlock(dev);
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
#define SPI_STATISTICS_ATTRS(field, file) \
static ssize_t spi_controller_##field##_show(struct device *dev, \
struct device_attribute *attr, \
@@ -158,6 +195,7 @@ SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
static struct attribute *spi_dev_attrs[] = {
&dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
NULL,
};
@@ -305,6 +343,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
const struct spi_device *spi = to_spi_device(dev);
const struct spi_driver *sdrv = to_spi_driver(drv);
+ /* Check override first, and if set, only use the named driver */
+ if (spi->driver_override)
+ return strcmp(spi->driver_override, drv->name) == 0;
+
/* Attempt an OF style match */
if (of_driver_match_device(dev, drv))
return 1;
@@ -733,7 +775,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
enable = !enable;
if (gpio_is_valid(spi->cs_gpio)) {
- gpio_set_value(spi->cs_gpio, !enable);
+ /* Honour the SPI_NO_CS flag */
+ if (!(spi->mode & SPI_NO_CS))
+ gpio_set_value(spi->cs_gpio, !enable);
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
spi->controller->set_cs)
@@ -2143,8 +2187,17 @@ int spi_register_controller(struct spi_controller *ctlr)
*/
if (ctlr->num_chipselect == 0)
return -EINVAL;
- /* allocate dynamic bus number using Linux idr */
- if ((ctlr->bus_num < 0) && ctlr->dev.of_node) {
+ if (ctlr->bus_num >= 0) {
+ /* devices with a fixed bus num must check-in with the num */
+ mutex_lock(&board_lock);
+ id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
+ ctlr->bus_num + 1, GFP_KERNEL);
+ mutex_unlock(&board_lock);
+ if (WARN(id < 0, "couldn't get idr"))
+ return id == -ENOSPC ? -EBUSY : id;
+ ctlr->bus_num = id;
+ } else if (ctlr->dev.of_node) {
+ /* allocate dynamic bus number using Linux idr */
id = of_alias_get_id(ctlr->dev.of_node, "spi");
if (id >= 0) {
ctlr->bus_num = id;
@@ -2774,8 +2827,10 @@ int spi_setup(struct spi_device *spi)
return -EINVAL;
/* help drivers fail *cleanly* when they need options
* that aren't supported with their current controller
+ * SPI_CS_WORD has a fallback software implementation,
+ * so it is ignored here.
*/
- bad_bits = spi->mode & ~spi->controller->mode_bits;
+ bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
ugly_bits = bad_bits &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
if (ugly_bits) {
@@ -2829,6 +2884,35 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (list_empty(&message->transfers))
return -EINVAL;
+ /* If an SPI controller does not support toggling the CS line on each
+ * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
+ * for the CS line, we can emulate the CS-per-word hardware function by
+ * splitting transfers into one-word transfers and ensuring that
+ * cs_change is set for each transfer.
+ */
+ if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
+ gpio_is_valid(spi->cs_gpio))) {
+ size_t maxsize;
+ int ret;
+
+ maxsize = (spi->bits_per_word + 7) / 8;
+
+ /* spi_split_transfers_maxsize() requires message->spi */
+ message->spi = spi;
+
+ ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ /* don't change cs_change on the last entry in the list */
+ if (list_is_last(&xfer->transfer_list, &message->transfers))
+ break;
+ xfer->cs_change = 1;
+ }
+ }
+
/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
* either MOSI or MISO is missing. They can also be caused by
@@ -3314,20 +3398,23 @@ EXPORT_SYMBOL_GPL(spi_write_then_read);
/*-------------------------------------------------------------------------*/
-#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+#if IS_ENABLED(CONFIG_OF)
static int __spi_of_device_match(struct device *dev, void *data)
{
return dev->of_node == data;
}
/* must call put_device() when done with returned spi_device device */
-static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
+struct spi_device *of_find_spi_device_by_node(struct device_node *node)
{
struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
__spi_of_device_match);
return dev ? to_spi_device(dev) : NULL;
}
+EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
+#endif /* IS_ENABLED(CONFIG_OF) */
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
static int __spi_of_controller_match(struct device *dev, const void *data)
{
return dev->of_node == data;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index cda10719d1d1..b0c76e2626ce 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -669,6 +669,7 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "lineartechnology,ltc2488" },
{ .compatible = "ge,achc" },
{ .compatible = "semtech,sx1301" },
+ { .compatible = "lwn,bk4" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
@@ -724,11 +725,9 @@ static int spidev_probe(struct spi_device *spi)
* compatible string, it is a Linux implementation thing
* rather than a description of the hardware.
*/
- if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) {
- dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n");
- WARN_ON(spi->dev.of_node &&
- !of_match_device(spidev_dt_ids, &spi->dev));
- }
+ WARN(spi->dev.of_node &&
+ of_device_is_compatible(spi->dev.of_node, "spidev"),
+ "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
spidev_probe_acpi(spi);
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 99a4656d113d..3861cb659cb9 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -425,7 +425,7 @@ void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc,
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2);
break;
}
- /* Fallthough */
+ /* Fall through */
default:
*m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB);
}
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index f91eaa1c3b67..b8f865018950 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -60,10 +60,6 @@ static void dgnc_tty_unthrottle(struct tty_struct *tty);
static void dgnc_tty_flush_chars(struct tty_struct *tty);
static void dgnc_tty_flush_buffer(struct tty_struct *tty);
static void dgnc_tty_hangup(struct tty_struct *tty);
-static int dgnc_set_modem_info(struct channel_t *ch, unsigned int command,
- unsigned int __user *value);
-static int dgnc_get_modem_info(struct channel_t *ch,
- unsigned int __user *value);
static int dgnc_tty_tiocmget(struct tty_struct *tty);
static int dgnc_tty_tiocmset(struct tty_struct *tty, unsigned int set,
unsigned int clear);
@@ -1701,106 +1697,6 @@ static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
spin_unlock_irqrestore(&ch->ch_lock, flags);
}
-/* Return modem signals to ld. */
-static inline int dgnc_get_mstat(struct channel_t *ch)
-{
- unsigned char mstat;
- unsigned long flags;
- int rc;
-
- if (!ch)
- return -ENXIO;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- mstat = ch->ch_mostat | ch->ch_mistat;
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- rc = 0;
-
- if (mstat & UART_MCR_DTR)
- rc |= TIOCM_DTR;
- if (mstat & UART_MCR_RTS)
- rc |= TIOCM_RTS;
- if (mstat & UART_MSR_CTS)
- rc |= TIOCM_CTS;
- if (mstat & UART_MSR_DSR)
- rc |= TIOCM_DSR;
- if (mstat & UART_MSR_RI)
- rc |= TIOCM_RI;
- if (mstat & UART_MSR_DCD)
- rc |= TIOCM_CD;
-
- return rc;
-}
-
-/* Return modem signals to ld. */
-static int dgnc_get_modem_info(struct channel_t *ch,
- unsigned int __user *value)
-{
- return put_user(dgnc_get_mstat(ch), value);
-}
-
-/* Set modem signals, called by ld. */
-static int dgnc_set_modem_info(struct channel_t *ch,
- unsigned int command,
- unsigned int __user *value)
-{
- int rc;
- unsigned int arg = 0;
- unsigned long flags;
-
- rc = get_user(arg, value);
- if (rc)
- return rc;
-
- switch (command) {
- case TIOCMBIS:
- if (arg & TIOCM_RTS)
- ch->ch_mostat |= UART_MCR_RTS;
-
- if (arg & TIOCM_DTR)
- ch->ch_mostat |= UART_MCR_DTR;
-
- break;
-
- case TIOCMBIC:
- if (arg & TIOCM_RTS)
- ch->ch_mostat &= ~(UART_MCR_RTS);
-
- if (arg & TIOCM_DTR)
- ch->ch_mostat &= ~(UART_MCR_DTR);
-
- break;
-
- case TIOCMSET:
-
- if (arg & TIOCM_RTS)
- ch->ch_mostat |= UART_MCR_RTS;
- else
- ch->ch_mostat &= ~(UART_MCR_RTS);
-
- if (arg & TIOCM_DTR)
- ch->ch_mostat |= UART_MCR_DTR;
- else
- ch->ch_mostat &= ~(UART_MCR_DTR);
-
- break;
-
- default:
- return -EINVAL;
- }
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch->ch_bd->bd_ops->assert_modem_signals(ch);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-}
-
/* Ioctl to get the information for ditty. */
static int dgnc_tty_digigeta(struct tty_struct *tty,
struct digi_t __user *retinfo)
@@ -2184,116 +2080,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
}
switch (cmd) {
- /* Here are all the standard ioctl's that we MUST implement */
-
- case TCSBRK:
- /*
- * TCSBRK is SVID version: non-zero arg --> no break
- * this behaviour is exploited by tcdrain().
- *
- * According to POSIX.1 spec (7.2.2.1.2) breaks should be
- * between 0.25 and 0.5 seconds so we'll ask for something
- * in the middle: 0.375 seconds.
- */
- rc = tty_check_change(tty);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- if (rc)
- return rc;
-
- rc = ch_bd_ops->drain(tty, 0);
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
- ch_bd_ops->send_break(ch, 250);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-
- case TCSBRKP:
- /*
- * support for POSIX tcsendbreak()
- * According to POSIX.1 spec (7.2.2.1.2) breaks should be
- * between 0.25 and 0.5 seconds so we'll ask for something
- * in the middle: 0.375 seconds.
- */
- rc = tty_check_change(tty);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- if (rc)
- return rc;
-
- rc = ch_bd_ops->drain(tty, 0);
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch_bd_ops->send_break(ch, 250);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-
- case TIOCSBRK:
- rc = tty_check_change(tty);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- if (rc)
- return rc;
-
- rc = ch_bd_ops->drain(tty, 0);
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
-
- ch_bd_ops->send_break(ch, 250);
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-
- case TIOCCBRK:
- /* Do Nothing */
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return 0;
-
- case TIOCGSOFTCAR:
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return put_user(C_CLOCAL(tty) ? 1 : 0,
- (unsigned long __user *)arg);
-
- case TIOCSSOFTCAR:
-
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = get_user(arg, (unsigned long __user *)arg);
- if (rc)
- return rc;
-
- spin_lock_irqsave(&ch->ch_lock, flags);
- tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) |
- (arg ? CLOCAL : 0));
- ch_bd_ops->param(tty);
- spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return 0;
-
- case TIOCMGET:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return dgnc_get_modem_info(ch, uarg);
-
- case TIOCMBIS:
- case TIOCMBIC:
- case TIOCMSET:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return dgnc_set_modem_info(ch, cmd, uarg);
-
/* Here are any additional ioctl's that we want to implement */
-
case TCFLSH:
/*
* The linux tty driver doesn't have a flush
@@ -2370,11 +2157,6 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/* pretend we didn't recognize this */
return -ENOIOCTLCMD;
- case TCXONC:
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- /* Make the ld do it */
- return -ENOIOCTLCMD;
-
case DIGI_GETA:
/* get information for ditty */
spin_unlock_irqrestore(&ch->ch_lock, flags);
diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
index 546a47156101..8cf0617d4ea0 100644
--- a/drivers/staging/erofs/namei.c
+++ b/drivers/staging/erofs/namei.c
@@ -223,18 +223,13 @@ static struct dentry *erofs_lookup(struct inode *dir,
if (err == -ENOENT) {
/* negative dentry */
inode = NULL;
- goto negative_out;
- } else if (unlikely(err))
- return ERR_PTR(err);
-
- debugln("%s, %s (nid %llu) found, d_type %u", __func__,
- dentry->d_name.name, nid, d_type);
-
- inode = erofs_iget(dir->i_sb, nid, d_type == EROFS_FT_DIR);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
-
-negative_out:
+ } else if (unlikely(err)) {
+ inode = ERR_PTR(err);
+ } else {
+ debugln("%s, %s (nid %llu) found, d_type %u", __func__,
+ dentry->d_name.name, nid, d_type);
+ inode = erofs_iget(dir->i_sb, nid, d_type == EROFS_FT_DIR);
+ }
return d_splice_alias(inode, dentry);
}
diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig
index a4c4b83ddc9c..991e154c0eca 100644
--- a/drivers/staging/fsl-dpaa2/Kconfig
+++ b/drivers/staging/fsl-dpaa2/Kconfig
@@ -9,14 +9,6 @@ config FSL_DPAA2
Build drivers for Freescale DataPath Acceleration
Architecture (DPAA2) family of SoCs.
-config FSL_DPAA2_ETH
- tristate "Freescale DPAA2 Ethernet"
- depends on FSL_DPAA2 && FSL_MC_DPIO
- depends on NETDEVICES && ETHERNET
- ---help---
- Ethernet driver for Freescale DPAA2 SoCs, using the
- Freescale MC bus driver
-
config FSL_DPAA2_ETHSW
tristate "Freescale DPAA2 Ethernet Switch"
depends on FSL_DPAA2
@@ -24,11 +16,3 @@ config FSL_DPAA2_ETHSW
---help---
Driver for Freescale DPAA2 Ethernet Switch. Select
BRIDGE to have support for bridge tools.
-
-config FSL_DPAA2_PTP_CLOCK
- tristate "Freescale DPAA2 PTP Clock"
- depends on FSL_DPAA2_ETH && POSIX_TIMERS
- select PTP_1588_CLOCK
- help
- This driver adds support for using the DPAA2 1588 timer module
- as a PTP clock.
diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile
index 9c7062945758..c92ab98c27d9 100644
--- a/drivers/staging/fsl-dpaa2/Makefile
+++ b/drivers/staging/fsl-dpaa2/Makefile
@@ -2,6 +2,4 @@
# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers
#
-obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/
-obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += rtc/
diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile
deleted file mode 100644
index 9315ecdba612..000000000000
--- a/drivers/staging/fsl-dpaa2/ethernet/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the Freescale DPAA2 Ethernet controller
-#
-
-obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
-
-fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
-
-# Needed by the tracing framework
-CFLAGS_dpaa2-eth.o := -I$(src)
diff --git a/drivers/staging/fsl-dpaa2/ethernet/TODO b/drivers/staging/fsl-dpaa2/ethernet/TODO
deleted file mode 100644
index e400a5e427a5..000000000000
--- a/drivers/staging/fsl-dpaa2/ethernet/TODO
+++ /dev/null
@@ -1,18 +0,0 @@
-* Add a DPAA2 MAC kernel driver in order to allow PHY management; currently
- the DPMAC objects and their link to DPNIs are handled by MC internally
- and all PHYs are seen as fixed-link
-* add more debug support: decide how to expose detailed debug statistics,
- add ingress error queue support
-* MC firmware uprev; the DPAA2 objects used by the Ethernet driver need to
- be kept in sync with binary interface changes in MC
-* refine README file
-* cleanup
-
-NOTE: None of the above is must-have before getting the DPAA2 Ethernet driver
-out of staging. The main requirement for that is to have the drivers it
-depends on, fsl-mc bus and DPIO driver, moved to drivers/bus and drivers/soc
-respectively.
-
- Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
- ruxandra.radulescu@nxp.com, devel@driverdev.osuosl.org,
- linux-kernel@vger.kernel.org
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
deleted file mode 100644
index 8056a95e1265..000000000000
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
- */
-
-#include <linux/net_tstamp.h>
-
-#include "dpni.h" /* DPNI_LINK_OPT_* */
-#include "dpaa2-eth.h"
-
-/* To be kept in sync with DPNI statistics */
-static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
- "[hw] rx frames",
- "[hw] rx bytes",
- "[hw] rx mcast frames",
- "[hw] rx mcast bytes",
- "[hw] rx bcast frames",
- "[hw] rx bcast bytes",
- "[hw] tx frames",
- "[hw] tx bytes",
- "[hw] tx mcast frames",
- "[hw] tx mcast bytes",
- "[hw] tx bcast frames",
- "[hw] tx bcast bytes",
- "[hw] rx filtered frames",
- "[hw] rx discarded frames",
- "[hw] rx nobuffer discards",
- "[hw] tx discarded frames",
- "[hw] tx confirmed frames",
-};
-
-#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
-
-static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
- /* per-cpu stats */
- "[drv] tx conf frames",
- "[drv] tx conf bytes",
- "[drv] tx sg frames",
- "[drv] tx sg bytes",
- "[drv] tx realloc frames",
- "[drv] rx sg frames",
- "[drv] rx sg bytes",
- "[drv] enqueue portal busy",
- /* Channel stats */
- "[drv] dequeue portal busy",
- "[drv] channel pull errors",
- "[drv] cdan",
-};
-
-#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
-
-static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
-
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
-
- strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
- sizeof(drvinfo->bus_info));
-}
-
-static int
-dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
- struct ethtool_link_ksettings *link_settings)
-{
- struct dpni_link_state state = {0};
- int err = 0;
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-
- err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
- if (err) {
- netdev_err(net_dev, "ERROR %d getting link state\n", err);
- goto out;
- }
-
- /* At the moment, we have no way of interrogating the DPMAC
- * from the DPNI side - and for that matter there may exist
- * no DPMAC at all. So for now we just don't report anything
- * beyond the DPNI attributes.
- */
- if (state.options & DPNI_LINK_OPT_AUTONEG)
- link_settings->base.autoneg = AUTONEG_ENABLE;
- if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
- link_settings->base.duplex = DUPLEX_FULL;
- link_settings->base.speed = state.rate;
-
-out:
- return err;
-}
-
-#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7
-#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1
-static int
-dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
- const struct ethtool_link_ksettings *link_settings)
-{
- struct dpni_link_cfg cfg = {0};
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- int err = 0;
-
- /* If using an older MC version, the DPNI must be down
- * in order to be able to change link settings. Taking steps to let
- * the user know that.
- */
- if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
- DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
- if (netif_running(net_dev)) {
- netdev_info(net_dev, "Interface must be brought down first.\n");
- return -EACCES;
- }
- }
-
- cfg.rate = link_settings->base.speed;
- if (link_settings->base.autoneg == AUTONEG_ENABLE)
- cfg.options |= DPNI_LINK_OPT_AUTONEG;
- else
- cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
- if (link_settings->base.duplex == DUPLEX_HALF)
- cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
- else
- cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
-
- err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
- if (err)
- /* ethtool will be loud enough if we return an error; no point
- * in putting our own error message on the console by default
- */
- netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
-
- return err;
-}
-
-static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
-{
- u8 *p = data;
- int i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
- strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
- strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
- return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/** Fill in hardware counters, as returned by MC.
- */
-static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
- struct ethtool_stats *stats,
- u64 *data)
-{
- int i = 0;
- int j, k, err;
- int num_cnt;
- union dpni_statistics dpni_stats;
- u64 cdan = 0;
- u64 portal_busy = 0, pull_err = 0;
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- struct dpaa2_eth_drv_stats *extras;
- struct dpaa2_eth_ch_stats *ch_stats;
-
- memset(data, 0,
- sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
-
- /* Print standard counters, from DPNI statistics */
- for (j = 0; j <= 2; j++) {
- err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
- j, &dpni_stats);
- if (err != 0)
- netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
- switch (j) {
- case 0:
- num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64);
- break;
- case 1:
- num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64);
- break;
- case 2:
- num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
- break;
- }
- for (k = 0; k < num_cnt; k++)
- *(data + i++) = dpni_stats.raw.counter[k];
- }
-
- /* Print per-cpu extra stats */
- for_each_online_cpu(k) {
- extras = per_cpu_ptr(priv->percpu_extras, k);
- for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
- *((__u64 *)data + i + j) += *((__u64 *)extras + j);
- }
- i += j;
-
- for (j = 0; j < priv->num_channels; j++) {
- ch_stats = &priv->channel[j]->stats;
- cdan += ch_stats->cdan;
- portal_busy += ch_stats->dequeue_portal_busy;
- pull_err += ch_stats->pull_err;
- }
-
- *(data + i++) = portal_busy;
- *(data + i++) = pull_err;
- *(data + i++) = cdan;
-}
-
-static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
- struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
-{
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-
- switch (rxnfc->cmd) {
- case ETHTOOL_GRXFH:
- /* we purposely ignore cmd->flow_type for now, because the
- * classifier only supports a single set of fields for all
- * protocols
- */
- rxnfc->data = priv->rx_hash_fields;
- break;
- case ETHTOOL_GRXRINGS:
- rxnfc->data = dpaa2_eth_queue_count(priv);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-int dpaa2_phc_index = -1;
-EXPORT_SYMBOL(dpaa2_phc_index);
-
-static int dpaa2_eth_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
-{
- info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- info->phc_index = dpaa2_phc_index;
-
- info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
- return 0;
-}
-
-const struct ethtool_ops dpaa2_ethtool_ops = {
- .get_drvinfo = dpaa2_eth_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_link_ksettings = dpaa2_eth_get_link_ksettings,
- .set_link_ksettings = dpaa2_eth_set_link_ksettings,
- .get_sset_count = dpaa2_eth_get_sset_count,
- .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
- .get_strings = dpaa2_eth_get_strings,
- .get_rxnfc = dpaa2_eth_get_rxnfc,
- .get_ts_info = dpaa2_eth_get_ts_info,
-};
diff --git a/drivers/staging/fsl-dpaa2/rtc/Makefile b/drivers/staging/fsl-dpaa2/rtc/Makefile
deleted file mode 100644
index 5468da071163..000000000000
--- a/drivers/staging/fsl-dpaa2/rtc/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the Freescale DPAA2 PTP clock
-#
-
-obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += dpaa2-rtc.o
-
-dpaa2-rtc-objs := rtc.o dprtc.o
diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h b/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
deleted file mode 100644
index db6a473430cc..000000000000
--- a/drivers/staging/fsl-dpaa2/rtc/dprtc-cmd.h
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2018 NXP
- */
-
-#ifndef _FSL_DPRTC_CMD_H
-#define _FSL_DPRTC_CMD_H
-
-/* DPRTC Version */
-#define DPRTC_VER_MAJOR 2
-#define DPRTC_VER_MINOR 0
-
-/* Command versioning */
-#define DPRTC_CMD_BASE_VERSION 1
-#define DPRTC_CMD_ID_OFFSET 4
-
-#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION)
-
-/* Command IDs */
-#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800)
-#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810)
-#define DPRTC_CMDID_CREATE DPRTC_CMD(0x910)
-#define DPRTC_CMDID_DESTROY DPRTC_CMD(0x990)
-#define DPRTC_CMDID_GET_API_VERSION DPRTC_CMD(0xa10)
-
-#define DPRTC_CMDID_ENABLE DPRTC_CMD(0x002)
-#define DPRTC_CMDID_DISABLE DPRTC_CMD(0x003)
-#define DPRTC_CMDID_GET_ATTR DPRTC_CMD(0x004)
-#define DPRTC_CMDID_RESET DPRTC_CMD(0x005)
-#define DPRTC_CMDID_IS_ENABLED DPRTC_CMD(0x006)
-
-#define DPRTC_CMDID_SET_IRQ_ENABLE DPRTC_CMD(0x012)
-#define DPRTC_CMDID_GET_IRQ_ENABLE DPRTC_CMD(0x013)
-#define DPRTC_CMDID_SET_IRQ_MASK DPRTC_CMD(0x014)
-#define DPRTC_CMDID_GET_IRQ_MASK DPRTC_CMD(0x015)
-#define DPRTC_CMDID_GET_IRQ_STATUS DPRTC_CMD(0x016)
-#define DPRTC_CMDID_CLEAR_IRQ_STATUS DPRTC_CMD(0x017)
-
-#define DPRTC_CMDID_SET_CLOCK_OFFSET DPRTC_CMD(0x1d0)
-#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1)
-#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2)
-#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3)
-#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4)
-#define DPRTC_CMDID_SET_ALARM DPRTC_CMD(0x1d5)
-#define DPRTC_CMDID_SET_PERIODIC_PULSE DPRTC_CMD(0x1d6)
-#define DPRTC_CMDID_CLEAR_PERIODIC_PULSE DPRTC_CMD(0x1d7)
-#define DPRTC_CMDID_SET_EXT_TRIGGER DPRTC_CMD(0x1d8)
-#define DPRTC_CMDID_CLEAR_EXT_TRIGGER DPRTC_CMD(0x1d9)
-#define DPRTC_CMDID_GET_EXT_TRIGGER_TIMESTAMP DPRTC_CMD(0x1dA)
-
-/* Macros for accessing command fields smaller than 1byte */
-#define DPRTC_MASK(field) \
- GENMASK(DPRTC_##field##_SHIFT + DPRTC_##field##_SIZE - 1, \
- DPRTC_##field##_SHIFT)
-#define dprtc_get_field(var, field) \
- (((var) & DPRTC_MASK(field)) >> DPRTC_##field##_SHIFT)
-
-#pragma pack(push, 1)
-struct dprtc_cmd_open {
- __le32 dprtc_id;
-};
-
-struct dprtc_cmd_destroy {
- __le32 object_id;
-};
-
-#define DPRTC_ENABLE_SHIFT 0
-#define DPRTC_ENABLE_SIZE 1
-
-struct dprtc_rsp_is_enabled {
- u8 en;
-};
-
-struct dprtc_cmd_get_irq {
- __le32 pad;
- u8 irq_index;
-};
-
-struct dprtc_cmd_set_irq_enable {
- u8 en;
- u8 pad[3];
- u8 irq_index;
-};
-
-struct dprtc_rsp_get_irq_enable {
- u8 en;
-};
-
-struct dprtc_cmd_set_irq_mask {
- __le32 mask;
- u8 irq_index;
-};
-
-struct dprtc_rsp_get_irq_mask {
- __le32 mask;
-};
-
-struct dprtc_cmd_get_irq_status {
- __le32 status;
- u8 irq_index;
-};
-
-struct dprtc_rsp_get_irq_status {
- __le32 status;
-};
-
-struct dprtc_cmd_clear_irq_status {
- __le32 status;
- u8 irq_index;
-};
-
-struct dprtc_rsp_get_attributes {
- __le32 pad;
- __le32 id;
-};
-
-struct dprtc_cmd_set_clock_offset {
- __le64 offset;
-};
-
-struct dprtc_get_freq_compensation {
- __le32 freq_compensation;
-};
-
-struct dprtc_time {
- __le64 time;
-};
-
-struct dprtc_rsp_get_api_version {
- __le16 major;
- __le16 minor;
-};
-
-#pragma pack(pop)
-
-#endif /* _FSL_DPRTC_CMD_H */
diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.c b/drivers/staging/fsl-dpaa2/rtc/dprtc.c
deleted file mode 100644
index 68ae6ffefbf5..000000000000
--- a/drivers/staging/fsl-dpaa2/rtc/dprtc.c
+++ /dev/null
@@ -1,701 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2018 NXP
- */
-
-#include <linux/fsl/mc.h>
-
-#include "dprtc.h"
-#include "dprtc-cmd.h"
-
-/**
- * dprtc_open() - Open a control session for the specified object.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @dprtc_id: DPRTC unique ID
- * @token: Returned token; use in subsequent API calls
- *
- * This function can be used to open a control session for an
- * already created object; an object may have been declared in
- * the DPL or by calling the dprtc_create function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent commands for
- * this specific object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_open(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int dprtc_id,
- u16 *token)
-{
- struct dprtc_cmd_open *cmd_params;
- struct fsl_mc_command cmd = { 0 };
- int err;
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN,
- cmd_flags,
- 0);
- cmd_params = (struct dprtc_cmd_open *)cmd.params;
- cmd_params->dprtc_id = cpu_to_le32(dprtc_id);
-
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- *token = mc_cmd_hdr_read_token(&cmd);
-
- return 0;
-}
-
-/**
- * dprtc_close() - Close the control session of the object
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- *
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct fsl_mc_command cmd = { 0 };
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags,
- token);
-
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_create() - Create the DPRTC object.
- * @mc_io: Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @cfg: Configuration structure
- * @obj_id: Returned object id
- *
- * Create the DPRTC object, allocate required resources and
- * perform required initialization.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_create(struct fsl_mc_io *mc_io,
- u16 dprc_token,
- u32 cmd_flags,
- const struct dprtc_cfg *cfg,
- u32 *obj_id)
-{
- struct fsl_mc_command cmd = { 0 };
- int err;
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CREATE,
- cmd_flags,
- dprc_token);
-
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- *obj_id = mc_cmd_read_object_id(&cmd);
-
- return 0;
-}
-
-/**
- * dprtc_destroy() - Destroy the DPRTC object and release all its resources.
- * @mc_io: Pointer to MC portal's I/O object
- * @dprc_token: Parent container token; '0' for default container
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @object_id: The object id; it must be a valid id within the container that
- * created this object;
- *
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dprtc_destroy(struct fsl_mc_io *mc_io,
- u16 dprc_token,
- u32 cmd_flags,
- u32 object_id)
-{
- struct dprtc_cmd_destroy *cmd_params;
- struct fsl_mc_command cmd = { 0 };
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DESTROY,
- cmd_flags,
- dprc_token);
- cmd_params = (struct dprtc_cmd_destroy *)cmd.params;
- cmd_params->object_id = cpu_to_le32(object_id);
-
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_enable() - Enable the DPRTC.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct fsl_mc_command cmd = { 0 };
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_ENABLE, cmd_flags,
- token);
-
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_disable() - Disable the DPRTC.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_disable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct fsl_mc_command cmd = { 0 };
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_DISABLE,
- cmd_flags,
- token);
-
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_is_enabled() - Check if the DPRTC is enabled.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @en: Returns '1' if object is enabled; '0' otherwise
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_is_enabled(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *en)
-{
- struct dprtc_rsp_is_enabled *rsp_params;
- struct fsl_mc_command cmd = { 0 };
- int err;
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_IS_ENABLED, cmd_flags,
- token);
-
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- rsp_params = (struct dprtc_rsp_is_enabled *)cmd.params;
- *en = dprtc_get_field(rsp_params->en, ENABLE);
-
- return 0;
-}
-
-/**
- * dprtc_reset() - Reset the DPRTC, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct fsl_mc_command cmd = { 0 };
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_RESET,
- cmd_flags,
- token);
-
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_set_irq_enable() - Set overall interrupt state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @irq_index: The interrupt index to configure
- * @en: Interrupt state - enable = 1, disable = 0
- *
- * Allows GPP software to control when interrupts are generated.
- * Each interrupt can have up to 32 causes. The enable/disable control's the
- * overall interrupt state. if the interrupt is disabled no causes will cause
- * an interrupt.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en)
-{
- struct dprtc_cmd_set_irq_enable *cmd_params;
- struct fsl_mc_command cmd = { 0 };
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_ENABLE,
- cmd_flags,
- token);
- cmd_params = (struct dprtc_cmd_set_irq_enable *)cmd.params;
- cmd_params->irq_index = irq_index;
- cmd_params->en = en;
-
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_irq_enable() - Get overall interrupt state
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @irq_index: The interrupt index to configure
- * @en: Returned interrupt state - enable = 1, disable = 0
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en)
-{
- struct dprtc_rsp_get_irq_enable *rsp_params;
- struct dprtc_cmd_get_irq *cmd_params;
- struct fsl_mc_command cmd = { 0 };
- int err;
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_ENABLE,
- cmd_flags,
- token);
- cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
- cmd_params->irq_index = irq_index;
-
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- rsp_params = (struct dprtc_rsp_get_irq_enable *)cmd.params;
- *en = rsp_params->en;
-
- return 0;
-}
-
-/**
- * dprtc_set_irq_mask() - Set interrupt mask.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @irq_index: The interrupt index to configure
- * @mask: Event mask to trigger interrupt;
- * each bit:
- * 0 = ignore event
- * 1 = consider event for asserting IRQ
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask)
-{
- struct dprtc_cmd_set_irq_mask *cmd_params;
- struct fsl_mc_command cmd = { 0 };
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_IRQ_MASK,
- cmd_flags,
- token);
- cmd_params = (struct dprtc_cmd_set_irq_mask *)cmd.params;
- cmd_params->mask = cpu_to_le32(mask);
- cmd_params->irq_index = irq_index;
-
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_irq_mask() - Get interrupt mask.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @irq_index: The interrupt index to configure
- * @mask: Returned event mask to trigger interrupt
- *
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask)
-{
- struct dprtc_rsp_get_irq_mask *rsp_params;
- struct dprtc_cmd_get_irq *cmd_params;
- struct fsl_mc_command cmd = { 0 };
- int err;
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_MASK,
- cmd_flags,
- token);
- cmd_params = (struct dprtc_cmd_get_irq *)cmd.params;
- cmd_params->irq_index = irq_index;
-
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- rsp_params = (struct dprtc_rsp_get_irq_mask *)cmd.params;
- *mask = le32_to_cpu(rsp_params->mask);
-
- return 0;
-}
-
-/**
- * dprtc_get_irq_status() - Get the current status of any pending interrupts.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @irq_index: The interrupt index to configure
- * @status: Returned interrupts status - one bit per cause:
- * 0 = no interrupt pending
- * 1 = interrupt pending
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status)
-{
- struct dprtc_cmd_get_irq_status *cmd_params;
- struct dprtc_rsp_get_irq_status *rsp_params;
- struct fsl_mc_command cmd = { 0 };
- int err;
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_IRQ_STATUS,
- cmd_flags,
- token);
- cmd_params = (struct dprtc_cmd_get_irq_status *)cmd.params;
- cmd_params->status = cpu_to_le32(*status);
- cmd_params->irq_index = irq_index;
-
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- rsp_params = (struct dprtc_rsp_get_irq_status *)cmd.params;
- *status = le32_to_cpu(rsp_params->status);
-
- return 0;
-}
-
-/**
- * dprtc_clear_irq_status() - Clear a pending interrupt's status
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @irq_index: The interrupt index to configure
- * @status: Bits to clear (W1C) - one bit per cause:
- * 0 = don't change
- * 1 = clear status bit
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status)
-{
- struct dprtc_cmd_clear_irq_status *cmd_params;
- struct fsl_mc_command cmd = { 0 };
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLEAR_IRQ_STATUS,
- cmd_flags,
- token);
- cmd_params = (struct dprtc_cmd_clear_irq_status *)cmd.params;
- cmd_params->irq_index = irq_index;
- cmd_params->status = cpu_to_le32(status);
-
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_attributes - Retrieve DPRTC attributes.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @attr: Returned object's attributes
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dprtc_attr *attr)
-{
- struct dprtc_rsp_get_attributes *rsp_params;
- struct fsl_mc_command cmd = { 0 };
- int err;
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_ATTR,
- cmd_flags,
- token);
-
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- rsp_params = (struct dprtc_rsp_get_attributes *)cmd.params;
- attr->id = le32_to_cpu(rsp_params->id);
-
- return 0;
-}
-
-/**
- * dprtc_set_clock_offset() - Sets the clock's offset
- * (usually relative to another clock).
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @offset: New clock offset (in nanoseconds).
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int64_t offset)
-{
- struct dprtc_cmd_set_clock_offset *cmd_params;
- struct fsl_mc_command cmd = { 0 };
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_CLOCK_OFFSET,
- cmd_flags,
- token);
- cmd_params = (struct dprtc_cmd_set_clock_offset *)cmd.params;
- cmd_params->offset = cpu_to_le64(offset);
-
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_set_freq_compensation() - Sets a new frequency compensation value.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @freq_compensation: The new frequency compensation value to set.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u32 freq_compensation)
-{
- struct dprtc_get_freq_compensation *cmd_params;
- struct fsl_mc_command cmd = { 0 };
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION,
- cmd_flags,
- token);
- cmd_params = (struct dprtc_get_freq_compensation *)cmd.params;
- cmd_params->freq_compensation = cpu_to_le32(freq_compensation);
-
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_freq_compensation() - Retrieves the frequency compensation value
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @freq_compensation: Frequency compensation value
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u32 *freq_compensation)
-{
- struct dprtc_get_freq_compensation *rsp_params;
- struct fsl_mc_command cmd = { 0 };
- int err;
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION,
- cmd_flags,
- token);
-
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- rsp_params = (struct dprtc_get_freq_compensation *)cmd.params;
- *freq_compensation = le32_to_cpu(rsp_params->freq_compensation);
-
- return 0;
-}
-
-/**
- * dprtc_get_time() - Returns the current RTC time.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @time: Current RTC time.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_get_time(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- uint64_t *time)
-{
- struct dprtc_time *rsp_params;
- struct fsl_mc_command cmd = { 0 };
- int err;
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME,
- cmd_flags,
- token);
-
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- rsp_params = (struct dprtc_time *)cmd.params;
- *time = le64_to_cpu(rsp_params->time);
-
- return 0;
-}
-
-/**
- * dprtc_set_time() - Updates current RTC time.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @time: New RTC time.
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_set_time(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- uint64_t time)
-{
- struct dprtc_time *cmd_params;
- struct fsl_mc_command cmd = { 0 };
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME,
- cmd_flags,
- token);
- cmd_params = (struct dprtc_time *)cmd.params;
- cmd_params->time = cpu_to_le64(time);
-
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_set_alarm() - Defines and sets alarm.
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPRTC object
- * @time: In nanoseconds, the time when the alarm
- * should go off - must be a multiple of
- * 1 microsecond
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_set_alarm(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token, uint64_t time)
-{
- struct dprtc_time *cmd_params;
- struct fsl_mc_command cmd = { 0 };
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_ALARM,
- cmd_flags,
- token);
- cmd_params = (struct dprtc_time *)cmd.params;
- cmd_params->time = cpu_to_le64(time);
-
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dprtc_get_api_version() - Get Data Path Real Time Counter API version
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @major_ver: Major version of data path real time counter API
- * @minor_ver: Minor version of data path real time counter API
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dprtc_get_api_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 *major_ver,
- u16 *minor_ver)
-{
- struct dprtc_rsp_get_api_version *rsp_params;
- struct fsl_mc_command cmd = { 0 };
- int err;
-
- cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_API_VERSION,
- cmd_flags,
- 0);
-
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- rsp_params = (struct dprtc_rsp_get_api_version *)cmd.params;
- *major_ver = le16_to_cpu(rsp_params->major);
- *minor_ver = le16_to_cpu(rsp_params->minor);
-
- return 0;
-}
diff --git a/drivers/staging/fsl-dpaa2/rtc/dprtc.h b/drivers/staging/fsl-dpaa2/rtc/dprtc.h
deleted file mode 100644
index 08f7c7bebbca..000000000000
--- a/drivers/staging/fsl-dpaa2/rtc/dprtc.h
+++ /dev/null
@@ -1,164 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2018 NXP
- */
-
-#ifndef __FSL_DPRTC_H
-#define __FSL_DPRTC_H
-
-/* Data Path Real Time Counter API
- * Contains initialization APIs and runtime control APIs for RTC
- */
-
-struct fsl_mc_io;
-
-/**
- * Number of irq's
- */
-#define DPRTC_MAX_IRQ_NUM 1
-#define DPRTC_IRQ_INDEX 0
-
-/**
- * Interrupt event masks:
- */
-
-/**
- * Interrupt event mask indicating alarm event had occurred
- */
-#define DPRTC_EVENT_ALARM 0x40000000
-/**
- * Interrupt event mask indicating periodic pulse event had occurred
- */
-#define DPRTC_EVENT_PPS 0x08000000
-
-int dprtc_open(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- int dprtc_id,
- u16 *token);
-
-int dprtc_close(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-/**
- * struct dprtc_cfg - Structure representing DPRTC configuration
- * @options: place holder
- */
-struct dprtc_cfg {
- u32 options;
-};
-
-int dprtc_create(struct fsl_mc_io *mc_io,
- u16 dprc_token,
- u32 cmd_flags,
- const struct dprtc_cfg *cfg,
- u32 *obj_id);
-
-int dprtc_destroy(struct fsl_mc_io *mc_io,
- u16 dprc_token,
- u32 cmd_flags,
- u32 object_id);
-
-int dprtc_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-int dprtc_disable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-int dprtc_is_enabled(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int *en);
-
-int dprtc_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
-int dprtc_set_clock_offset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- int64_t offset);
-
-int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u32 freq_compensation);
-
-int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u32 *freq_compensation);
-
-int dprtc_get_time(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- uint64_t *time);
-
-int dprtc_set_time(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- uint64_t time);
-
-int dprtc_set_alarm(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- uint64_t time);
-
-int dprtc_set_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 en);
-
-int dprtc_get_irq_enable(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u8 *en);
-
-int dprtc_set_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 mask);
-
-int dprtc_get_irq_mask(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *mask);
-
-int dprtc_get_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 *status);
-
-int dprtc_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status);
-
-/**
- * struct dprtc_attr - Structure representing DPRTC attributes
- * @id: DPRTC object ID
- */
-struct dprtc_attr {
- int id;
-};
-
-int dprtc_get_attributes(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- struct dprtc_attr *attr);
-
-int dprtc_get_api_version(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 *major_ver,
- u16 *minor_ver);
-
-#endif /* __FSL_DPRTC_H */
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index fa0dd425b454..173f451b86b7 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -1209,42 +1209,40 @@ static int wait_msr_change(struct fwtty_port *port, unsigned long mask)
check_msr_delta(port, mask, &prev));
}
-static int get_serial_info(struct fwtty_port *port,
- struct serial_struct __user *info)
+static int get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct tmp;
-
- memset(&tmp, 0, sizeof(tmp));
-
- tmp.type = PORT_UNKNOWN;
- tmp.line = port->port.tty->index;
- tmp.flags = port->port.flags;
- tmp.xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN;
- tmp.baud_base = 400000000;
- tmp.close_delay = port->port.close_delay;
-
- return (copy_to_user(info, &tmp, sizeof(*info))) ? -EFAULT : 0;
+ struct fwtty_port *port = tty->driver_data;
+ mutex_lock(&port->port.mutex);
+ ss->type = PORT_UNKNOWN;
+ ss->line = port->port.tty->index;
+ ss->flags = port->port.flags;
+ ss->xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN;
+ ss->baud_base = 400000000;
+ ss->close_delay = port->port.close_delay;
+ mutex_unlock(&port->port.mutex);
+ return 0;
}
-static int set_serial_info(struct fwtty_port *port,
- struct serial_struct __user *info)
+static int set_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct tmp;
-
- if (copy_from_user(&tmp, info, sizeof(tmp)))
- return -EFAULT;
+ struct fwtty_port *port = tty->driver_data;
- if (tmp.irq != 0 || tmp.port != 0 || tmp.custom_divisor != 0 ||
- tmp.baud_base != 400000000)
+ if (ss->irq != 0 || ss->port != 0 || ss->custom_divisor != 0 ||
+ ss->baud_base != 400000000)
return -EPERM;
+ mutex_lock(&port->port.mutex);
if (!capable(CAP_SYS_ADMIN)) {
- if (((tmp.flags & ~ASYNC_USR_MASK) !=
- (port->port.flags & ~ASYNC_USR_MASK)))
+ if (((ss->flags & ~ASYNC_USR_MASK) !=
+ (port->port.flags & ~ASYNC_USR_MASK))) {
+ mutex_unlock(&port->port.mutex);
return -EPERM;
- } else {
- port->port.close_delay = tmp.close_delay * HZ / 100;
+ }
}
+ port->port.close_delay = ss->close_delay * HZ / 100;
+ mutex_unlock(&port->port.mutex);
return 0;
}
@@ -1256,18 +1254,6 @@ static int fwtty_ioctl(struct tty_struct *tty, unsigned int cmd,
int err;
switch (cmd) {
- case TIOCGSERIAL:
- mutex_lock(&port->port.mutex);
- err = get_serial_info(port, (void __user *)arg);
- mutex_unlock(&port->port.mutex);
- break;
-
- case TIOCSSERIAL:
- mutex_lock(&port->port.mutex);
- err = set_serial_info(port, (void __user *)arg);
- mutex_unlock(&port->port.mutex);
- break;
-
case TIOCMIWAIT:
err = wait_msr_change(port, arg);
break;
@@ -1557,6 +1543,8 @@ static const struct tty_operations fwtty_ops = {
.tiocmget = fwtty_tiocmget,
.tiocmset = fwtty_tiocmset,
.get_icount = fwtty_get_icount,
+ .set_serial = set_serial_info,
+ .get_serial = get_serial_info,
.proc_show = fwtty_proc_show,
};
@@ -1578,6 +1566,8 @@ static const struct tty_operations fwloop_ops = {
.tiocmget = fwtty_tiocmget,
.tiocmset = fwtty_tiocmset,
.get_icount = fwtty_get_icount,
+ .set_serial = set_serial_info,
+ .get_serial = get_serial_info,
};
static inline int mgmt_pkt_expected_len(__be16 code)
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 8a006323c3c1..3313cb0b60af 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -616,40 +616,33 @@ static void gb_tty_unthrottle(struct tty_struct *tty)
}
}
-static int get_serial_info(struct gb_tty *gb_tty,
- struct serial_struct __user *info)
+static int get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct tmp;
-
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = PORT_16550A;
- tmp.line = gb_tty->minor;
- tmp.xmit_fifo_size = 16;
- tmp.baud_base = 9600;
- tmp.close_delay = gb_tty->port.close_delay / 10;
- tmp.closing_wait =
+ struct gb_tty *gb_tty = tty->driver_data;
+
+ ss->type = PORT_16550A;
+ ss->line = gb_tty->minor;
+ ss->xmit_fifo_size = 16;
+ ss->baud_base = 9600;
+ ss->close_delay = gb_tty->port.close_delay / 10;
+ ss->closing_wait =
gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE : gb_tty->port.closing_wait / 10;
-
- if (copy_to_user(info, &tmp, sizeof(tmp)))
- return -EFAULT;
return 0;
}
-static int set_serial_info(struct gb_tty *gb_tty,
- struct serial_struct __user *newinfo)
+static int set_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct new_serial;
+ struct gb_tty *gb_tty = tty->driver_data;
unsigned int closing_wait;
unsigned int close_delay;
int retval = 0;
- if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
- return -EFAULT;
-
- close_delay = new_serial.close_delay * 10;
- closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
- ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
+ close_delay = ss->close_delay * 10;
+ closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
mutex_lock(&gb_tty->port.mutex);
if (!capable(CAP_SYS_ADMIN)) {
@@ -728,12 +721,6 @@ static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
struct gb_tty *gb_tty = tty->driver_data;
switch (cmd) {
- case TIOCGSERIAL:
- return get_serial_info(gb_tty,
- (struct serial_struct __user *)arg);
- case TIOCSSERIAL:
- return set_serial_info(gb_tty,
- (struct serial_struct __user *)arg);
case TIOCMIWAIT:
return wait_serial_change(gb_tty, arg);
}
@@ -818,6 +805,8 @@ static const struct tty_operations gb_ops = {
.tiocmget = gb_tty_tiocmget,
.tiocmset = gb_tty_tiocmset,
.get_icount = gb_tty_get_icount,
+ .set_serial = set_serial_info,
+ .get_serial = get_serial_info,
};
static const struct tty_port_operations gb_port_ops = {
diff --git a/drivers/staging/iio/adc/ad7606.c b/drivers/staging/iio/adc/ad7606.c
index 25b9fcd5e3a4..b7810b1aad07 100644
--- a/drivers/staging/iio/adc/ad7606.c
+++ b/drivers/staging/iio/adc/ad7606.c
@@ -202,7 +202,7 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
long mask)
{
struct ad7606_state *st = iio_priv(indio_dev);
- int values[3];
+ DECLARE_BITMAP(values, 3);
int ret, i;
switch (mask) {
@@ -227,12 +227,10 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- values[0] = (ret >> 0) & 1;
- values[1] = (ret >> 1) & 1;
- values[2] = (ret >> 2) & 1;
+ values[0] = ret;
mutex_lock(&st->lock);
- gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
+ gpiod_set_array_value(3, st->gpio_os->desc, st->gpio_os->info,
values);
st->oversampling = val;
mutex_unlock(&st->lock);
diff --git a/drivers/staging/media/mt9t031/Kconfig b/drivers/staging/media/mt9t031/Kconfig
index f48e06a03cdb..9a58aaf72edd 100644
--- a/drivers/staging/media/mt9t031/Kconfig
+++ b/drivers/staging/media/mt9t031/Kconfig
@@ -1,9 +1,3 @@
-config SOC_CAMERA_IMX074
- tristate "imx074 support (DEPRECATED)"
- depends on SOC_CAMERA && I2C
- help
- This driver supports IMX074 cameras from Sony
-
config SOC_CAMERA_MT9T031
tristate "mt9t031 support (DEPRECATED)"
depends on SOC_CAMERA && I2C
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 448478451c4c..def8a1f57d1c 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -630,8 +630,7 @@ static int spinand_erase_block(struct spi_device *spi_nand, u16 block_id)
}
#ifdef CONFIG_MTD_SPINAND_ONDIEECC
-static int spinand_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip,
+static int spinand_write_page_hwecc(struct nand_chip *chip,
const u8 *buf, int oob_required,
int page)
{
@@ -643,21 +642,22 @@ static int spinand_write_page_hwecc(struct mtd_info *mtd,
return nand_prog_page_op(chip, page, 0, p, eccsize * eccsteps);
}
-static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- u8 *buf, int oob_required, int page)
+static int spinand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
{
int retval;
u8 status;
u8 *p = buf;
int eccsize = chip->ecc.size;
int eccsteps = chip->ecc.steps;
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct spinand_info *info = nand_get_controller_data(chip);
enable_read_hw_ecc = 1;
nand_read_page_op(chip, page, 0, p, eccsize * eccsteps);
if (oob_required)
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
while (1) {
retval = spinand_read_status(info->spi, &status);
@@ -681,13 +681,13 @@ static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
}
#endif
-static void spinand_select_chip(struct mtd_info *mtd, int dev)
+static void spinand_select_chip(struct nand_chip *chip, int dev)
{
}
-static u8 spinand_read_byte(struct mtd_info *mtd)
+static u8 spinand_read_byte(struct nand_chip *chip)
{
- struct spinand_state *state = mtd_to_state(mtd);
+ struct spinand_state *state = mtd_to_state(nand_to_mtd(chip));
u8 data;
data = state->buf[state->buf_ptr];
@@ -695,8 +695,9 @@ static u8 spinand_read_byte(struct mtd_info *mtd)
return data;
}
-static int spinand_wait(struct mtd_info *mtd, struct nand_chip *chip)
+static int spinand_wait(struct nand_chip *chip)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct spinand_info *info = nand_get_controller_data(chip);
unsigned long timeo = jiffies;
@@ -724,17 +725,17 @@ static int spinand_wait(struct mtd_info *mtd, struct nand_chip *chip)
return 0;
}
-static void spinand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+static void spinand_write_buf(struct nand_chip *chip, const u8 *buf, int len)
{
- struct spinand_state *state = mtd_to_state(mtd);
+ struct spinand_state *state = mtd_to_state(nand_to_mtd(chip));
memcpy(state->buf + state->buf_ptr, buf, len);
state->buf_ptr += len;
}
-static void spinand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+static void spinand_read_buf(struct nand_chip *chip, u8 *buf, int len)
{
- struct spinand_state *state = mtd_to_state(mtd);
+ struct spinand_state *state = mtd_to_state(nand_to_mtd(chip));
memcpy(buf, state->buf + state->buf_ptr, len);
state->buf_ptr += len;
@@ -759,10 +760,10 @@ static void spinand_reset(struct spi_device *spi_nand)
dev_err(&spi_nand->dev, "wait timedout!\n");
}
-static void spinand_cmdfunc(struct mtd_info *mtd, unsigned int command,
+static void spinand_cmdfunc(struct nand_chip *chip, unsigned int command,
int column, int page)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct spinand_info *info = nand_get_controller_data(chip);
struct spinand_state *state = info->priv;
@@ -914,15 +915,15 @@ static int spinand_probe(struct spi_device *spi_nand)
nand_set_flash_node(chip, spi_nand->dev.of_node);
nand_set_controller_data(chip, info);
- chip->read_buf = spinand_read_buf;
- chip->write_buf = spinand_write_buf;
- chip->read_byte = spinand_read_byte;
- chip->cmdfunc = spinand_cmdfunc;
- chip->waitfunc = spinand_wait;
+ chip->legacy.read_buf = spinand_read_buf;
+ chip->legacy.write_buf = spinand_write_buf;
+ chip->legacy.read_byte = spinand_read_byte;
+ chip->legacy.cmdfunc = spinand_cmdfunc;
+ chip->legacy.waitfunc = spinand_wait;
chip->options |= NAND_CACHEPRG;
chip->select_chip = spinand_select_chip;
- chip->set_features = nand_get_set_features_notsupp;
- chip->get_features = nand_get_set_features_notsupp;
+ chip->legacy.set_features = nand_get_set_features_notsupp;
+ chip->legacy.get_features = nand_get_set_features_notsupp;
mtd = nand_to_mtd(chip);
@@ -934,7 +935,7 @@ static int spinand_probe(struct spi_device *spi_nand)
mtd_set_ooblayout(mtd, &spinand_oob_64_ops);
#endif
- if (nand_scan(mtd, 1))
+ if (nand_scan(chip, 1))
return -ENXIO;
return mtd_device_register(mtd, NULL, 0);
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c
index 7ad0c4141205..2c6e1800a3fd 100644
--- a/drivers/staging/mt7621-eth/mdio.c
+++ b/drivers/staging/mt7621-eth/mdio.c
@@ -112,7 +112,7 @@ static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac,
phy->autoneg = AUTONEG_ENABLE;
phy->speed = 0;
phy->duplex = 0;
- phy->supported &= PHY_BASIC_FEATURES;
+ phy_set_max_speed(phy, SPEED_100);
phy->advertising = phy->supported | ADVERTISED_Autoneg;
phy_start_aneg(phy);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index d2605158546b..96f265eee007 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -1149,7 +1149,7 @@ static enum reset_type _rtl92e_tx_check_stuck(struct net_device *dev)
if (skb_queue_len(&ring->queue) == 0) {
continue;
} else {
- skb = (&ring->queue)->next;
+ skb = __skb_peek(&ring->queue);
tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
tcb_desc->nStuckCount++;
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 9f18be14dda6..f38f1f74fcd6 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -49,9 +49,9 @@ struct rtllib_tkip_data {
u32 dot11RSNAStatsTKIPLocalMICFailures;
int key_idx;
- struct crypto_skcipher *rx_tfm_arc4;
+ struct crypto_sync_skcipher *rx_tfm_arc4;
struct crypto_shash *rx_tfm_michael;
- struct crypto_skcipher *tx_tfm_arc4;
+ struct crypto_sync_skcipher *tx_tfm_arc4;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16];
@@ -66,8 +66,7 @@ static void *rtllib_tkip_init(int key_idx)
if (priv == NULL)
goto fail;
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm_arc4)) {
pr_debug("Could not allocate crypto API arc4\n");
priv->tx_tfm_arc4 = NULL;
@@ -81,8 +80,7 @@ static void *rtllib_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm_arc4)) {
pr_debug("Could not allocate crypto API arc4\n");
priv->rx_tfm_arc4 = NULL;
@@ -100,9 +98,9 @@ static void *rtllib_tkip_init(int key_idx)
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_skcipher(priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->tx_tfm_arc4);
crypto_free_shash(priv->rx_tfm_michael);
- crypto_free_skcipher(priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -116,9 +114,9 @@ static void rtllib_tkip_deinit(void *priv)
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_skcipher(_priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->tx_tfm_arc4);
crypto_free_shash(_priv->rx_tfm_michael);
- crypto_free_skcipher(_priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@@ -337,7 +335,7 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
@@ -349,8 +347,8 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
sg_init_one(&sg, pos, len+4);
- crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+ crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
ret = crypto_skcipher_encrypt(req);
@@ -420,7 +418,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += 8;
if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
if ((iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) &&
@@ -447,8 +445,8 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
sg_init_one(&sg, pos, plen+4);
- crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+ crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
err = crypto_skcipher_decrypt(req);
@@ -664,9 +662,9 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv)
struct rtllib_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index b3343a5d0fd6..d11ec39171d5 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -27,8 +27,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_skcipher *tx_tfm;
- struct crypto_skcipher *rx_tfm;
+ struct crypto_sync_skcipher *tx_tfm;
+ struct crypto_sync_skcipher *rx_tfm;
};
@@ -41,13 +41,13 @@ static void *prism2_wep_init(int keyidx)
goto fail;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm)) {
pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
priv->tx_tfm = NULL;
goto fail;
}
- priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm)) {
pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
priv->rx_tfm = NULL;
@@ -61,8 +61,8 @@ static void *prism2_wep_init(int keyidx)
fail:
if (priv) {
- crypto_free_skcipher(priv->tx_tfm);
- crypto_free_skcipher(priv->rx_tfm);
+ crypto_free_sync_skcipher(priv->tx_tfm);
+ crypto_free_sync_skcipher(priv->rx_tfm);
kfree(priv);
}
return NULL;
@@ -74,8 +74,8 @@ static void prism2_wep_deinit(void *priv)
struct prism2_wep_data *_priv = priv;
if (_priv) {
- crypto_free_skcipher(_priv->tx_tfm);
- crypto_free_skcipher(_priv->rx_tfm);
+ crypto_free_sync_skcipher(_priv->tx_tfm);
+ crypto_free_sync_skcipher(_priv->rx_tfm);
}
kfree(priv);
}
@@ -135,7 +135,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
@@ -146,8 +146,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[3] = crc >> 24;
sg_init_one(&sg, pos, len+4);
- crypto_skcipher_setkey(wep->tx_tfm, key, klen);
- skcipher_request_set_tfm(req, wep->tx_tfm);
+ crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
+ skcipher_request_set_sync_tfm(req, wep->tx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
err = crypto_skcipher_encrypt(req);
@@ -199,11 +199,11 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
sg_init_one(&sg, pos, plen+4);
- crypto_skcipher_setkey(wep->rx_tfm, key, klen);
- skcipher_request_set_tfm(req, wep->rx_tfm);
+ crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
+ skcipher_request_set_sync_tfm(req, wep->rx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
err = crypto_skcipher_decrypt(req);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 1088fa0aee0e..829fa4bd253c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -53,9 +53,9 @@ struct ieee80211_tkip_data {
int key_idx;
- struct crypto_skcipher *rx_tfm_arc4;
+ struct crypto_sync_skcipher *rx_tfm_arc4;
struct crypto_shash *rx_tfm_michael;
- struct crypto_skcipher *tx_tfm_arc4;
+ struct crypto_sync_skcipher *tx_tfm_arc4;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
@@ -71,8 +71,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
@@ -88,8 +87,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
@@ -110,9 +108,9 @@ static void *ieee80211_tkip_init(int key_idx)
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_skcipher(priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->tx_tfm_arc4);
crypto_free_shash(priv->rx_tfm_michael);
- crypto_free_skcipher(priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -126,9 +124,9 @@ static void ieee80211_tkip_deinit(void *priv)
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_skcipher(_priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->tx_tfm_arc4);
crypto_free_shash(_priv->rx_tfm_michael);
- crypto_free_skcipher(_priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@@ -340,7 +338,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
@@ -348,9 +346,9 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, len+4);
- skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+ skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
ret = crypto_skcipher_encrypt(req);
@@ -418,7 +416,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += 8;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
if (iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
@@ -440,10 +438,10 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
- crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, plen+4);
- skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+ skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
@@ -663,9 +661,9 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
struct ieee80211_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index b9f86be9e52b..d4a1bf0caa7a 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -32,8 +32,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_skcipher *tx_tfm;
- struct crypto_skcipher *rx_tfm;
+ struct crypto_sync_skcipher *tx_tfm;
+ struct crypto_sync_skcipher *rx_tfm;
};
@@ -46,10 +46,10 @@ static void *prism2_wep_init(int keyidx)
return NULL;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm))
goto free_priv;
- priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm))
goto free_tx;
@@ -58,7 +58,7 @@ static void *prism2_wep_init(int keyidx)
return priv;
free_tx:
- crypto_free_skcipher(priv->tx_tfm);
+ crypto_free_sync_skcipher(priv->tx_tfm);
free_priv:
kfree(priv);
return NULL;
@@ -70,8 +70,8 @@ static void prism2_wep_deinit(void *priv)
struct prism2_wep_data *_priv = priv;
if (_priv) {
- crypto_free_skcipher(_priv->tx_tfm);
- crypto_free_skcipher(_priv->rx_tfm);
+ crypto_free_sync_skcipher(_priv->tx_tfm);
+ crypto_free_sync_skcipher(_priv->rx_tfm);
}
kfree(priv);
}
@@ -128,7 +128,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
@@ -138,10 +138,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_skcipher_setkey(wep->tx_tfm, key, klen);
+ crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
sg_init_one(&sg, pos, len+4);
- skcipher_request_set_tfm(req, wep->tx_tfm);
+ skcipher_request_set_sync_tfm(req, wep->tx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
@@ -193,12 +193,12 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
- crypto_skcipher_setkey(wep->rx_tfm, key, klen);
+ crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
sg_init_one(&sg, pos, plen+4);
- skcipher_request_set_tfm(req, wep->rx_tfm);
+ skcipher_request_set_sync_tfm(req, wep->rx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 8de16016b6de..71888b979ab5 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -598,9 +598,12 @@ out:
mutex_unlock(&cdev_list_lock);
}
+static void __cxgbit_free_conn(struct cxgbit_sock *csk);
+
void cxgbit_free_np(struct iscsi_np *np)
{
struct cxgbit_np *cnp = np->np_context;
+ struct cxgbit_sock *csk, *tmp;
cnp->com.state = CSK_STATE_DEAD;
if (cnp->com.cdev)
@@ -608,6 +611,13 @@ void cxgbit_free_np(struct iscsi_np *np)
else
cxgbit_free_all_np(cnp);
+ spin_lock_bh(&cnp->np_accept_lock);
+ list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
+ list_del_init(&csk->accept_node);
+ __cxgbit_free_conn(csk);
+ }
+ spin_unlock_bh(&cnp->np_accept_lock);
+
np->np_context = NULL;
cxgbit_put_cnp(cnp);
}
@@ -705,9 +715,9 @@ void cxgbit_abort_conn(struct cxgbit_sock *csk)
csk->tid, 600, __func__);
}
-void cxgbit_free_conn(struct iscsi_conn *conn)
+static void __cxgbit_free_conn(struct cxgbit_sock *csk)
{
- struct cxgbit_sock *csk = conn->context;
+ struct iscsi_conn *conn = csk->conn;
bool release = false;
pr_debug("%s: state %d\n",
@@ -716,7 +726,7 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
spin_lock_bh(&csk->lock);
switch (csk->com.state) {
case CSK_STATE_ESTABLISHED:
- if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+ if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
csk->com.state = CSK_STATE_CLOSING;
cxgbit_send_halfclose(csk);
} else {
@@ -741,6 +751,11 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
cxgbit_put_csk(csk);
}
+void cxgbit_free_conn(struct iscsi_conn *conn)
+{
+ __cxgbit_free_conn(conn->context);
+}
+
static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
{
csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
@@ -803,6 +818,7 @@ void _cxgbit_free_csk(struct kref *kref)
spin_unlock_bh(&cdev->cskq.lock);
cxgbit_free_skb(csk);
+ cxgbit_put_cnp(csk->cnp);
cxgbit_put_cdev(cdev);
kfree(csk);
@@ -1351,6 +1367,7 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
goto rel_skb;
}
+ cxgbit_get_cnp(cnp);
cxgbit_get_cdev(cdev);
spin_lock(&cdev->cskq.lock);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 9cdfccbdd06f..c1d5a173553d 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1416,7 +1416,8 @@ static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
sg_init_table(sg, ARRAY_SIZE(sg));
sg_set_buf(sg, buf, payload_length);
- sg_set_buf(sg + 1, pad_bytes, padding);
+ if (padding)
+ sg_set_buf(sg + 1, pad_bytes, padding);
ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
@@ -3910,10 +3911,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
{
int ret;
- u8 buffer[ISCSI_HDR_LEN], opcode;
+ u8 *buffer, opcode;
u32 checksum = 0, digest = 0;
struct kvec iov;
+ buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return;
+
while (!kthread_should_stop()) {
/*
* Ensure that both TX and RX per connection kthreads
@@ -3921,7 +3926,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
*/
iscsit_thread_check_cpumask(conn, current, 0);
- memset(buffer, 0, ISCSI_HDR_LEN);
memset(&iov, 0, sizeof(struct kvec));
iov.iov_base = buffer;
@@ -3930,7 +3934,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
if (ret != ISCSI_HDR_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- return;
+ break;
}
if (conn->conn_ops->HeaderDigest) {
@@ -3940,7 +3944,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
if (ret != ISCSI_CRC_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- return;
+ break;
}
iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
@@ -3964,7 +3968,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
}
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
- return;
+ break;
opcode = buffer[0] & ISCSI_OPCODE_MASK;
@@ -3975,13 +3979,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
" while in Discovery Session, rejecting.\n", opcode);
iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
buffer);
- return;
+ break;
}
ret = iscsi_target_rx_opcode(conn, buffer);
if (ret < 0)
- return;
+ break;
}
+
+ kfree(buffer);
}
int iscsi_target_rx_thread(void *arg)
@@ -4349,7 +4355,7 @@ int iscsit_close_session(struct iscsi_session *sess)
transport_deregister_session(sess->se_sess);
if (sess->sess_ops->ErrorRecoveryLevel == 2)
- iscsit_free_connection_recovery_entires(sess);
+ iscsit_free_connection_recovery_entries(sess);
iscsit_free_all_ooo_cmdsns(sess);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 9518ffd8b8ba..4e680d753941 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -26,27 +26,6 @@
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
-{
- int j = DIV_ROUND_UP(len, 2), rc;
-
- rc = hex2bin(dst, src, j);
- if (rc < 0)
- pr_debug("CHAP string contains non hex digit symbols\n");
-
- dst[j] = '\0';
- return j;
-}
-
-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
-{
- int i;
-
- for (i = 0; i < src_len; i++) {
- sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
- }
-}
-
static int chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
@@ -62,7 +41,7 @@ static int chap_gen_challenge(
ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
if (unlikely(ret))
return ret;
- chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+ bin2hex(challenge_asciihex, chap->challenge,
CHAP_CHALLENGE_LENGTH);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
@@ -248,9 +227,16 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_R.\n");
goto out;
}
+ if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
+ if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
- chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
tfm = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(tfm)) {
@@ -294,7 +280,7 @@ static int chap_server_compute_md5(
goto out;
}
- chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
pr_debug("[server] MD5 Server Digest: %s\n", response);
if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
@@ -349,9 +335,7 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_C.\n");
goto out;
}
- pr_debug("[server] Got CHAP_C=%s\n", challenge);
- challenge_len = chap_string_to_hex(challenge_binhex, challenge,
- strlen(challenge));
+ challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
if (!challenge_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
@@ -360,6 +344,11 @@ static int chap_server_compute_md5(
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
+ if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+ pr_err("Malformed CHAP_C\n");
+ goto out;
+ }
+ pr_debug("[server] Got CHAP_C=%s\n", challenge);
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
@@ -413,7 +402,7 @@ static int chap_server_compute_md5(
/*
* Convert response from binary hex to ascii hext.
*/
- chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, digest, MD5_SIGNATURE_SIZE);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 718fe9a1b709..1193cf884a28 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -770,21 +770,8 @@ void iscsit_handle_time2retain_timeout(struct timer_list *t)
pr_err("Time2Retain timer expired for SID: %u, cleaning up"
" iSCSI session.\n", sess->sid);
- {
- struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
-
- if (tiqn) {
- spin_lock(&tiqn->sess_err_stats.lock);
- strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
- (void *)sess->sess_ops->InitiatorName);
- tiqn->sess_err_stats.last_sess_failure_type =
- ISCSI_SESS_ERR_CXN_TIMEOUT;
- tiqn->sess_err_stats.cxn_timeout_errors++;
- atomic_long_inc(&sess->conn_timeout_errors);
- spin_unlock(&tiqn->sess_err_stats.lock);
- }
- }
+ iscsit_fill_cxn_timeout_err_stats(sess);
spin_unlock_bh(&se_tpg->session_lock);
iscsit_close_session(sess);
}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 5efa42b939a1..a211e8154f4c 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -1169,15 +1169,21 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
na = iscsit_tpg_get_node_attrib(sess);
if (!sess->sess_ops->ErrorRecoveryLevel) {
- pr_debug("Unable to recover from DataOut timeout while"
- " in ERL=0.\n");
+ pr_err("Unable to recover from DataOut timeout while"
+ " in ERL=0, closing iSCSI connection for I_T Nexus"
+ " %s,i,0x%6phN,%s,t,0x%02x\n",
+ sess->sess_ops->InitiatorName, sess->isid,
+ sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
goto failure;
}
if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
- pr_debug("Command ITT: 0x%08x exceeded max retries"
- " for DataOUT timeout %u, closing iSCSI connection.\n",
- cmd->init_task_tag, na->dataout_timeout_retries);
+ pr_err("Command ITT: 0x%08x exceeded max retries"
+ " for DataOUT timeout %u, closing iSCSI connection for"
+ " I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
+ cmd->init_task_tag, na->dataout_timeout_retries,
+ sess->sess_ops->InitiatorName, sess->isid,
+ sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
goto failure;
}
@@ -1224,6 +1230,7 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
failure:
spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_fill_cxn_timeout_err_stats(sess);
iscsit_cause_connection_reinstatement(conn, 0);
iscsit_dec_conn_usage_count(conn);
}
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 8df9c90f3db3..b08b620b1bf0 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -125,7 +125,7 @@ struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
return NULL;
}
-void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+void iscsit_free_connection_recovery_entries(struct iscsi_session *sess)
{
struct iscsi_cmd *cmd, *cmd_tmp;
struct iscsi_conn_recovery *cr, *cr_tmp;
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 93e180d68d07..a39b0caf2337 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -13,7 +13,7 @@ extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32
extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
struct iscsi_session *, u16);
-extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
+extern void iscsit_free_connection_recovery_entries(struct iscsi_session *);
extern int iscsit_remove_active_connection_recovery_entry(
struct iscsi_conn_recovery *, struct iscsi_session *);
extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bb90c80ff388..ae3209efd0e0 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -578,7 +578,7 @@ int iscsi_login_post_auth_non_zero_tsih(
}
/*
- * Check for any connection recovery entires containing CID.
+ * Check for any connection recovery entries containing CID.
* We use the original ExpStatSN sent in the first login request
* to acknowledge commands for the failed connection.
*
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index df0a39811dc2..bb98882bdaa7 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -328,10 +328,10 @@ static ssize_t iscsi_stat_tgt_attr_fail_intr_name_show(struct config_item *item,
{
struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
- unsigned char buf[224];
+ unsigned char buf[ISCSI_IQN_LEN];
spin_lock(&lstat->lock);
- snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
+ snprintf(buf, ISCSI_IQN_LEN, "%s", lstat->last_intr_fail_name[0] ?
lstat->last_intr_fail_name : NONE);
spin_unlock(&lstat->lock);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 49be1e41290c..1227872227dc 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -915,6 +915,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
void iscsit_handle_nopin_response_timeout(struct timer_list *t)
{
struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer);
+ struct iscsi_session *sess = conn->sess;
iscsit_inc_conn_usage_count(conn);
@@ -925,28 +926,14 @@ void iscsit_handle_nopin_response_timeout(struct timer_list *t)
return;
}
- pr_debug("Did not receive response to NOPIN on CID: %hu on"
- " SID: %u, failing connection.\n", conn->cid,
- conn->sess->sid);
+ pr_err("Did not receive response to NOPIN on CID: %hu, failing"
+ " connection for I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
+ conn->cid, sess->sess_ops->InitiatorName, sess->isid,
+ sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
- {
- struct iscsi_portal_group *tpg = conn->sess->tpg;
- struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
-
- if (tiqn) {
- spin_lock_bh(&tiqn->sess_err_stats.lock);
- strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
- conn->sess->sess_ops->InitiatorName);
- tiqn->sess_err_stats.last_sess_failure_type =
- ISCSI_SESS_ERR_CXN_TIMEOUT;
- tiqn->sess_err_stats.cxn_timeout_errors++;
- atomic_long_inc(&conn->sess->conn_timeout_errors);
- spin_unlock_bh(&tiqn->sess_err_stats.lock);
- }
- }
-
+ iscsit_fill_cxn_timeout_err_stats(sess);
iscsit_cause_connection_reinstatement(conn, 0);
iscsit_dec_conn_usage_count(conn);
}
@@ -1405,3 +1392,22 @@ struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
return tpg->tpg_tiqn;
}
+
+void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *sess)
+{
+ struct iscsi_portal_group *tpg = sess->tpg;
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ if (!tiqn)
+ return;
+
+ spin_lock_bh(&tiqn->sess_err_stats.lock);
+ strlcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+ sess->sess_ops->InitiatorName,
+ sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name));
+ tiqn->sess_err_stats.last_sess_failure_type =
+ ISCSI_SESS_ERR_CXN_TIMEOUT;
+ tiqn->sess_err_stats.cxn_timeout_errors++;
+ atomic_long_inc(&sess->conn_timeout_errors);
+ spin_unlock_bh(&tiqn->sess_err_stats.lock);
+}
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index d66dfc212624..68e84803b0a1 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -67,5 +67,6 @@ extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
+extern void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *);
#endif /*** ISCSI_TARGET_UTIL_H ***/
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index ce1321a5cb7b..b5ed9c377060 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -514,7 +514,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
}
/* Always in 512 byte units for Linux/Block */
- block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+ block_lba += sg->length >> SECTOR_SHIFT;
sectors -= 1;
}
@@ -635,14 +635,15 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
}
static int
-iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
+iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
+ struct sg_mapping_iter *miter)
{
struct se_device *dev = cmd->se_dev;
struct blk_integrity *bi;
struct bio_integrity_payload *bip;
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- struct scatterlist *sg;
- int i, rc;
+ int rc;
+ size_t resid, len;
bi = bdev_get_integrity(ib_dev->ibd_bd);
if (!bi) {
@@ -650,31 +651,39 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
return -ENODEV;
}
- bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
+ bip = bio_integrity_alloc(bio, GFP_NOIO,
+ min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
if (IS_ERR(bip)) {
pr_err("Unable to allocate bio_integrity_payload\n");
return PTR_ERR(bip);
}
- bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
- dev->prot_length;
- bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
+ bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
+ bip_set_seed(bip, bio->bi_iter.bi_sector);
pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
(unsigned long long)bip->bip_iter.bi_sector);
- for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
+ resid = bip->bip_iter.bi_size;
+ while (resid > 0 && sg_miter_next(miter)) {
- rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
- sg->offset);
- if (rc != sg->length) {
+ len = min_t(size_t, miter->length, resid);
+ rc = bio_integrity_add_page(bio, miter->page, len,
+ offset_in_page(miter->addr));
+ if (rc != len) {
pr_err("bio_integrity_add_page() failed; %d\n", rc);
+ sg_miter_stop(miter);
return -ENOMEM;
}
- pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
- sg_page(sg), sg->length, sg->offset);
+ pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
+ miter->page, len, offset_in_page(miter->addr));
+
+ resid -= len;
+ if (len < miter->length)
+ miter->consumed -= miter->length - len;
}
+ sg_miter_stop(miter);
return 0;
}
@@ -686,12 +695,13 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct se_device *dev = cmd->se_dev;
sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
struct iblock_req *ibr;
- struct bio *bio, *bio_start;
+ struct bio *bio;
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
unsigned bio_cnt;
- int i, op, op_flags = 0;
+ int i, rc, op, op_flags = 0;
+ struct sg_mapping_iter prot_miter;
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -726,13 +736,17 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!bio)
goto fail_free_ibr;
- bio_start = bio;
bio_list_init(&list);
bio_list_add(&list, bio);
refcount_set(&ibr->pending, 2);
bio_cnt = 1;
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
+ sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
+ op == REQ_OP_READ ? SG_MITER_FROM_SG :
+ SG_MITER_TO_SG);
+
for_each_sg(sgl, sg, sgl_nents, i) {
/*
* XXX: if the length the device accepts is shorter than the
@@ -741,6 +755,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
*/
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+ rc = iblock_alloc_bip(cmd, bio, &prot_miter);
+ if (rc)
+ goto fail_put_bios;
+ }
+
if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
iblock_submit_bios(&list);
bio_cnt = 0;
@@ -757,12 +777,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
/* Always in 512 byte units for Linux/Block */
- block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+ block_lba += sg->length >> SECTOR_SHIFT;
sg_num--;
}
if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
- int rc = iblock_alloc_bip(cmd, bio_start);
+ rc = iblock_alloc_bip(cmd, bio, &prot_miter);
if (rc)
goto fail_put_bios;
}
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 9cc3843404d4..cefc641145b3 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -9,7 +9,6 @@
#define IBLOCK_VERSION "4.0"
#define IBLOCK_MAX_CDBS 16
-#define IBLOCK_LBA_SHIFT 9
struct iblock_req {
refcount_t pending;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index ebac2b49b9c6..1ac1f7d2e6c9 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -360,6 +360,10 @@ static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
unsigned int offset;
sense_reason_t ret = TCM_NO_SENSE;
int i, count;
+
+ if (!success)
+ return 0;
+
/*
* From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
*
@@ -425,14 +429,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
struct se_device *dev = cmd->se_dev;
sense_reason_t ret = TCM_NO_SENSE;
- /*
- * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
- * within target_complete_ok_work() if the command was successfully
- * sent to the backend driver.
- */
spin_lock_irq(&cmd->t_state_lock);
- if (cmd->transport_state & CMD_T_SENT) {
- cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
+ if (success) {
*post_ret = 1;
if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
@@ -453,7 +451,8 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
int *post_ret)
{
struct se_device *dev = cmd->se_dev;
- struct scatterlist *write_sg = NULL, *sg;
+ struct sg_table write_tbl = { };
+ struct scatterlist *write_sg, *sg;
unsigned char *buf = NULL, *addr;
struct sg_mapping_iter m;
unsigned int offset = 0, len;
@@ -494,14 +493,12 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
goto out;
}
- write_sg = kmalloc_array(cmd->t_data_nents, sizeof(*write_sg),
- GFP_KERNEL);
- if (!write_sg) {
+ if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) {
pr_err("Unable to allocate compare_and_write sg\n");
ret = TCM_OUT_OF_RESOURCES;
goto out;
}
- sg_init_table(write_sg, cmd->t_data_nents);
+ write_sg = write_tbl.sgl;
/*
* Setup verify and write data payloads from total NumberLBAs.
*/
@@ -597,7 +594,7 @@ out:
* sbc_compare_and_write() before the original READ I/O submission.
*/
up(&dev->caw_sem);
- kfree(write_sg);
+ sg_free_table(&write_tbl);
kfree(buf);
return ret;
}
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index cb0461a10808..f459118bc11b 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -636,9 +636,9 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
/*
* The unmap_zeroes_data set means that the underlying device supports
- * REQ_DISCARD and has the discard_zeroes_data bit set. This satisfies
- * the SBC requirements for LBPRZ, meaning that a subsequent read
- * will return zeroes after an UNMAP or WRITE SAME (16) to an LBA
+ * REQ_OP_DISCARD and has the discard_zeroes_data bit set. This
+ * satisfies the SBC requirements for LBPRZ, meaning that a subsequent
+ * read will return zeroes after an UNMAP or WRITE SAME (16) to an LBA
* See sbc4r36 6.6.4.
*/
if (((dev->dev_attrib.emulate_tpu != 0) ||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 86c0156e6c88..4cf33e2cc705 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1778,7 +1778,7 @@ EXPORT_SYMBOL(target_submit_tmr);
void transport_generic_request_failure(struct se_cmd *cmd,
sense_reason_t sense_reason)
{
- int ret = 0, post_ret = 0;
+ int ret = 0;
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
sense_reason);
@@ -1789,13 +1789,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
*/
transport_complete_task_attr(cmd);
- /*
- * Handle special case for COMPARE_AND_WRITE failure, where the
- * callback is expected to drop the per device ->caw_sem.
- */
- if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
- cmd->transport_complete_callback)
- cmd->transport_complete_callback(cmd, false, &post_ret);
+ if (cmd->transport_complete_callback)
+ cmd->transport_complete_callback(cmd, false, NULL);
if (transport_check_aborted_status(cmd, 1))
return;
@@ -2012,7 +2007,7 @@ void target_execute_cmd(struct se_cmd *cmd)
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*
- * If the received CDB has aleady been aborted stop processing it here.
+ * If the received CDB has already been aborted stop processing it here.
*/
spin_lock_irq(&cmd->t_state_lock);
if (__transport_check_aborted_status(cmd, 1)) {
@@ -2516,7 +2511,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
}
/*
- * Determine is the TCM fabric module has already allocated physical
+ * Determine if the TCM fabric module has already allocated physical
* memory, and is directly calling transport_generic_map_mem_to_cmd()
* beforehand.
*/
@@ -2754,7 +2749,7 @@ static void target_release_cmd_kref(struct kref *kref)
if (se_sess) {
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_del_init(&se_cmd->se_cmd_list);
- if (list_empty(&se_sess->sess_cmd_list))
+ if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
wake_up(&se_sess->cmd_list_wq);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
@@ -2907,7 +2902,7 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
spin_lock_irq(&se_sess->sess_cmd_lock);
do {
- ret = wait_event_interruptible_lock_irq_timeout(
+ ret = wait_event_lock_irq_timeout(
se_sess->cmd_list_wq,
list_empty(&se_sess->sess_cmd_list),
se_sess->sess_cmd_lock, 180 * HZ);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 2718a933c0c6..70adcfdca8d1 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -391,7 +391,6 @@ out:
struct xcopy_pt_cmd {
bool remote_port;
struct se_cmd se_cmd;
- struct xcopy_op *xcopy_op;
struct completion xpt_passthrough_sem;
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
};
@@ -596,8 +595,6 @@ static int target_xcopy_setup_pt_cmd(
* X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
*/
target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
-
- xpt_cmd->xcopy_op = xop;
target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
cmd->tag = 0;
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index 1e47511a6bd5..d748527d7a38 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -45,7 +45,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
}
static const struct x86_cpu_id soc_thermal_ids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1, 0,
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, 0,
BYT_SOC_DTS_APIC_IRQ},
{}
};
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index e1e264a9a4c7..28fc4ce75edb 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -738,14 +738,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
u8 link, depth;
u64 route;
- /*
- * After NVM upgrade adding root switch device fails because we
- * initiated reset. During that time ICM might still send
- * XDomain connected message which we ignore here.
- */
- if (!tb->root_switch)
- return;
-
link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT;
@@ -1037,14 +1029,6 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
if (pkg->hdr.packet_id)
return;
- /*
- * After NVM upgrade adding root switch device fails because we
- * initiated reset. During that time ICM might still send device
- * connected message which we ignore here.
- */
- if (!tb->root_switch)
- return;
-
route = get_route(pkg->route_hi, pkg->route_lo);
authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
@@ -1408,19 +1392,26 @@ static void icm_handle_notification(struct work_struct *work)
mutex_lock(&tb->lock);
- switch (n->pkg->code) {
- case ICM_EVENT_DEVICE_CONNECTED:
- icm->device_connected(tb, n->pkg);
- break;
- case ICM_EVENT_DEVICE_DISCONNECTED:
- icm->device_disconnected(tb, n->pkg);
- break;
- case ICM_EVENT_XDOMAIN_CONNECTED:
- icm->xdomain_connected(tb, n->pkg);
- break;
- case ICM_EVENT_XDOMAIN_DISCONNECTED:
- icm->xdomain_disconnected(tb, n->pkg);
- break;
+ /*
+ * When the domain is stopped we flush its workqueue but before
+ * that the root switch is removed. In that case we should treat
+ * the queued events as being canceled.
+ */
+ if (tb->root_switch) {
+ switch (n->pkg->code) {
+ case ICM_EVENT_DEVICE_CONNECTED:
+ icm->device_connected(tb, n->pkg);
+ break;
+ case ICM_EVENT_DEVICE_DISCONNECTED:
+ icm->device_disconnected(tb, n->pkg);
+ break;
+ case ICM_EVENT_XDOMAIN_CONNECTED:
+ icm->xdomain_connected(tb, n->pkg);
+ break;
+ case ICM_EVENT_XDOMAIN_DISCONNECTED:
+ icm->xdomain_disconnected(tb, n->pkg);
+ break;
+ }
}
mutex_unlock(&tb->lock);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 88cff05a1808..5cd6bdfa068f 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1191,5 +1191,5 @@ static void __exit nhi_unload(void)
tb_domain_exit();
}
-fs_initcall(nhi_init);
+rootfs_initcall(nhi_init);
module_exit(nhi_unload);
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 34dead614149..8330fd809a05 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -996,63 +996,55 @@ static void rs_unthrottle(struct tty_struct * tty)
* ------------------------------------------------------------
*/
-static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
- struct serial_struct __user * retinfo)
+static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
- struct serial_struct tmp;
-
- memset(&tmp, 0, sizeof(tmp));
+ struct serial_state *state = tty->driver_data;
+
tty_lock(tty);
- tmp.line = tty->index;
- tmp.port = state->port;
- tmp.flags = state->tport.flags;
- tmp.xmit_fifo_size = state->xmit_fifo_size;
- tmp.baud_base = state->baud_base;
- tmp.close_delay = state->tport.close_delay;
- tmp.closing_wait = state->tport.closing_wait;
- tmp.custom_divisor = state->custom_divisor;
+ ss->line = tty->index;
+ ss->port = state->port;
+ ss->flags = state->tport.flags;
+ ss->xmit_fifo_size = state->xmit_fifo_size;
+ ss->baud_base = state->baud_base;
+ ss->close_delay = state->tport.close_delay;
+ ss->closing_wait = state->tport.closing_wait;
+ ss->custom_divisor = state->custom_divisor;
tty_unlock(tty);
- if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
- return -EFAULT;
return 0;
}
-static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
- struct serial_struct __user * new_info)
+static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
+ struct serial_state *state = tty->driver_data;
struct tty_port *port = &state->tport;
- struct serial_struct new_serial;
bool change_spd;
int retval = 0;
- if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
- return -EFAULT;
-
tty_lock(tty);
- change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) ||
- new_serial.custom_divisor != state->custom_divisor;
- if (new_serial.irq || new_serial.port != state->port ||
- new_serial.xmit_fifo_size != state->xmit_fifo_size) {
+ change_spd = ((ss->flags ^ port->flags) & ASYNC_SPD_MASK) ||
+ ss->custom_divisor != state->custom_divisor;
+ if (ss->irq || ss->port != state->port ||
+ ss->xmit_fifo_size != state->xmit_fifo_size) {
tty_unlock(tty);
return -EINVAL;
}
if (!serial_isroot()) {
- if ((new_serial.baud_base != state->baud_base) ||
- (new_serial.close_delay != port->close_delay) ||
- (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
- ((new_serial.flags & ~ASYNC_USR_MASK) !=
+ if ((ss->baud_base != state->baud_base) ||
+ (ss->close_delay != port->close_delay) ||
+ (ss->xmit_fifo_size != state->xmit_fifo_size) ||
+ ((ss->flags & ~ASYNC_USR_MASK) !=
(port->flags & ~ASYNC_USR_MASK))) {
tty_unlock(tty);
return -EPERM;
}
port->flags = ((port->flags & ~ASYNC_USR_MASK) |
- (new_serial.flags & ASYNC_USR_MASK));
- state->custom_divisor = new_serial.custom_divisor;
+ (ss->flags & ASYNC_USR_MASK));
+ state->custom_divisor = ss->custom_divisor;
goto check_and_exit;
}
- if (new_serial.baud_base < 9600) {
+ if (ss->baud_base < 9600) {
tty_unlock(tty);
return -EINVAL;
}
@@ -1062,19 +1054,19 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
* At this point, we start making changes.....
*/
- state->baud_base = new_serial.baud_base;
+ state->baud_base = ss->baud_base;
port->flags = ((port->flags & ~ASYNC_FLAGS) |
- (new_serial.flags & ASYNC_FLAGS));
- state->custom_divisor = new_serial.custom_divisor;
- port->close_delay = new_serial.close_delay * HZ/100;
- port->closing_wait = new_serial.closing_wait * HZ/100;
+ (ss->flags & ASYNC_FLAGS));
+ state->custom_divisor = ss->custom_divisor;
+ port->close_delay = ss->close_delay * HZ/100;
+ port->closing_wait = ss->closing_wait * HZ/100;
port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
check_and_exit:
if (tty_port_initialized(port)) {
if (change_spd) {
/* warn about deprecation unless clearing */
- if (new_serial.flags & ASYNC_SPD_MASK)
+ if (ss->flags & ASYNC_SPD_MASK)
dev_warn_ratelimited(tty->dev, "use of SPD flags is deprecated\n");
change_speed(tty, state, NULL);
}
@@ -1084,7 +1076,6 @@ check_and_exit:
return retval;
}
-
/*
* get_lsr_info - get line status register info
*
@@ -1224,30 +1215,19 @@ static int rs_ioctl(struct tty_struct *tty,
if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
return -ENODEV;
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
- (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
+ if ((cmd != TIOCSERCONFIG) &&
(cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
if (tty_io_error(tty))
return -EIO;
}
switch (cmd) {
- case TIOCGSERIAL:
- return get_serial_info(tty, info, argp);
- case TIOCSSERIAL:
- return set_serial_info(tty, info, argp);
case TIOCSERCONFIG:
return 0;
case TIOCSERGETLSR: /* Get line status register */
return get_lsr_info(info, argp);
- case TIOCSERGSTRUCT:
- if (copy_to_user(argp,
- info, sizeof(struct serial_state)))
- return -EFAULT;
- return 0;
-
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
@@ -1288,12 +1268,6 @@ static int rs_ioctl(struct tty_struct *tty,
finish_wait(&info->tport.delta_msr_wait, &wait);
return ret;
- case TIOCSERGWILD:
- case TIOCSERSWILD:
- /* "setserial -W" is called in Debian boot */
- printk ("TIOCSER?WILD ioctl obsolete, ignored.\n");
- return 0;
-
default:
return -ENOIOCTLCMD;
}
@@ -1607,6 +1581,8 @@ static const struct tty_operations serial_ops = {
.tiocmget = rs_tiocmget,
.tiocmset = rs_tiocmset,
.get_icount = rs_get_icount,
+ .set_serial = set_serial_info,
+ .get_serial = get_serial_info,
.proc_show = rs_proc_show,
};
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 6d3c58051ce3..4562c8060d09 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -2257,44 +2257,45 @@ static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty)
}
} /* set_line_char */
-static int cy_get_serial_info(struct cyclades_port *info,
- struct serial_struct __user *retinfo)
+static int cy_get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
+ struct cyclades_port *info = tty->driver_data;
struct cyclades_card *cinfo = info->card;
- struct serial_struct tmp = {
- .type = info->type,
- .line = info->line,
- .port = (info->card - cy_card) * 0x100 + info->line -
- cinfo->first_line,
- .irq = cinfo->irq,
- .flags = info->port.flags,
- .close_delay = info->port.close_delay,
- .closing_wait = info->port.closing_wait,
- .baud_base = info->baud,
- .custom_divisor = info->custom_divisor,
- };
- return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
+
+ if (serial_paranoia_check(info, tty->name, "cy_ioctl"))
+ return -ENODEV;
+ ss->type = info->type;
+ ss->line = info->line;
+ ss->port = (info->card - cy_card) * 0x100 + info->line -
+ cinfo->first_line;
+ ss->irq = cinfo->irq;
+ ss->flags = info->port.flags;
+ ss->close_delay = info->port.close_delay;
+ ss->closing_wait = info->port.closing_wait;
+ ss->baud_base = info->baud;
+ ss->custom_divisor = info->custom_divisor;
+ return 0;
}
-static int
-cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty,
- struct serial_struct __user *new_info)
+static int cy_set_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct new_serial;
+ struct cyclades_port *info = tty->driver_data;
int old_flags;
int ret;
- if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
- return -EFAULT;
+ if (serial_paranoia_check(info, tty->name, "cy_ioctl"))
+ return -ENODEV;
mutex_lock(&info->port.mutex);
old_flags = info->port.flags;
if (!capable(CAP_SYS_ADMIN)) {
- if (new_serial.close_delay != info->port.close_delay ||
- new_serial.baud_base != info->baud ||
- (new_serial.flags & ASYNC_FLAGS &
+ if (ss->close_delay != info->port.close_delay ||
+ ss->baud_base != info->baud ||
+ (ss->flags & ASYNC_FLAGS &
~ASYNC_USR_MASK) !=
(info->port.flags & ASYNC_FLAGS & ~ASYNC_USR_MASK))
{
@@ -2302,9 +2303,9 @@ cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty,
return -EPERM;
}
info->port.flags = (info->port.flags & ~ASYNC_USR_MASK) |
- (new_serial.flags & ASYNC_USR_MASK);
- info->baud = new_serial.baud_base;
- info->custom_divisor = new_serial.custom_divisor;
+ (ss->flags & ASYNC_USR_MASK);
+ info->baud = ss->baud_base;
+ info->custom_divisor = ss->custom_divisor;
goto check_and_exit;
}
@@ -2313,18 +2314,18 @@ cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty,
* At this point, we start making changes.....
*/
- info->baud = new_serial.baud_base;
- info->custom_divisor = new_serial.custom_divisor;
+ info->baud = ss->baud_base;
+ info->custom_divisor = ss->custom_divisor;
info->port.flags = (info->port.flags & ~ASYNC_FLAGS) |
- (new_serial.flags & ASYNC_FLAGS);
- info->port.close_delay = new_serial.close_delay * HZ / 100;
- info->port.closing_wait = new_serial.closing_wait * HZ / 100;
+ (ss->flags & ASYNC_FLAGS);
+ info->port.close_delay = ss->close_delay * HZ / 100;
+ info->port.closing_wait = ss->closing_wait * HZ / 100;
check_and_exit:
if (tty_port_initialized(&info->port)) {
- if ((new_serial.flags ^ old_flags) & ASYNC_SPD_MASK) {
+ if ((ss->flags ^ old_flags) & ASYNC_SPD_MASK) {
/* warn about deprecation unless clearing */
- if (new_serial.flags & ASYNC_SPD_MASK)
+ if (ss->flags & ASYNC_SPD_MASK)
dev_warn_ratelimited(tty->dev, "use of SPD flags is deprecated\n");
}
cy_set_line_char(info, tty);
@@ -2698,12 +2699,6 @@ cy_ioctl(struct tty_struct *tty,
case CYGETWAIT:
ret_val = info->port.closing_wait / (HZ / 100);
break;
- case TIOCGSERIAL:
- ret_val = cy_get_serial_info(info, argp);
- break;
- case TIOCSSERIAL:
- ret_val = cy_set_serial_info(info, tty, argp);
- break;
case TIOCSERGETLSR: /* Get line status register */
ret_val = get_lsr_info(info, argp);
break;
@@ -4011,6 +4006,8 @@ static const struct tty_operations cy_ops = {
.tiocmget = cy_tiocmget,
.tiocmset = cy_tiocmset,
.get_icount = cy_get_icount,
+ .set_serial = cy_set_serial_info,
+ .get_serial = cy_get_serial_info,
.proc_show = cyclades_proc_show,
};
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 1ef751c27ac6..fad3401e604d 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -248,22 +248,29 @@ static int ipw_write_room(struct tty_struct *linux_tty)
return room;
}
-static int ipwireless_get_serial_info(struct ipw_tty *tty,
- struct serial_struct __user *retinfo)
+static int ipwireless_get_serial_info(struct tty_struct *linux_tty,
+ struct serial_struct *ss)
{
- struct serial_struct tmp;
+ struct ipw_tty *tty = linux_tty->driver_data;
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = PORT_UNKNOWN;
- tmp.line = tty->index;
- tmp.baud_base = 115200;
+ if (!tty)
+ return -ENODEV;
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
+ if (!tty->port.count)
+ return -EINVAL;
+ ss->type = PORT_UNKNOWN;
+ ss->line = tty->index;
+ ss->baud_base = 115200;
return 0;
}
+static int ipwireless_set_serial_info(struct tty_struct *linux_tty,
+ struct serial_struct *ss)
+{
+ return 0; /* Keeps the PCMCIA scripts happy. */
+}
+
static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
{
struct ipw_tty *tty = linux_tty->driver_data;
@@ -386,15 +393,6 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
return -EINVAL;
/* FIXME: Exactly how is the tty object locked here .. */
-
- switch (cmd) {
- case TIOCGSERIAL:
- return ipwireless_get_serial_info(tty, (void __user *) arg);
-
- case TIOCSSERIAL:
- return 0; /* Keeps the PCMCIA scripts happy. */
- }
-
if (tty->tty_type == TTYTYPE_MODEM) {
switch (cmd) {
case PPPIOCGCHAN:
@@ -561,6 +559,8 @@ static const struct tty_operations tty_ops = {
.chars_in_buffer = ipw_chars_in_buffer,
.tiocmget = ipw_tiocmget,
.tiocmset = ipw_tiocmset,
+ .set_serial = ipwireless_set_serial_info,
+ .get_serial = ipwireless_get_serial_info,
};
int ipwireless_tty_init(void)
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 8d96e86966f1..e04a43e89f6b 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -1091,34 +1091,33 @@ static int isicom_tiocmset(struct tty_struct *tty,
}
static int isicom_set_serial_info(struct tty_struct *tty,
- struct serial_struct __user *info)
+ struct serial_struct *ss)
{
struct isi_port *port = tty->driver_data;
- struct serial_struct newinfo;
int reconfig_port;
- if (copy_from_user(&newinfo, info, sizeof(newinfo)))
- return -EFAULT;
+ if (isicom_paranoia_check(port, tty->name, "isicom_ioctl"))
+ return -ENODEV;
mutex_lock(&port->port.mutex);
reconfig_port = ((port->port.flags & ASYNC_SPD_MASK) !=
- (newinfo.flags & ASYNC_SPD_MASK));
+ (ss->flags & ASYNC_SPD_MASK));
if (!capable(CAP_SYS_ADMIN)) {
- if ((newinfo.close_delay != port->port.close_delay) ||
- (newinfo.closing_wait != port->port.closing_wait) ||
- ((newinfo.flags & ~ASYNC_USR_MASK) !=
+ if ((ss->close_delay != port->port.close_delay) ||
+ (ss->closing_wait != port->port.closing_wait) ||
+ ((ss->flags & ~ASYNC_USR_MASK) !=
(port->port.flags & ~ASYNC_USR_MASK))) {
mutex_unlock(&port->port.mutex);
return -EPERM;
}
port->port.flags = ((port->port.flags & ~ASYNC_USR_MASK) |
- (newinfo.flags & ASYNC_USR_MASK));
+ (ss->flags & ASYNC_USR_MASK));
} else {
- port->port.close_delay = newinfo.close_delay;
- port->port.closing_wait = newinfo.closing_wait;
+ port->port.close_delay = ss->close_delay;
+ port->port.closing_wait = ss->closing_wait;
port->port.flags = ((port->port.flags & ~ASYNC_FLAGS) |
- (newinfo.flags & ASYNC_FLAGS));
+ (ss->flags & ASYNC_FLAGS));
}
if (reconfig_port) {
unsigned long flags;
@@ -1130,46 +1129,24 @@ static int isicom_set_serial_info(struct tty_struct *tty,
return 0;
}
-static int isicom_get_serial_info(struct isi_port *port,
- struct serial_struct __user *info)
-{
- struct serial_struct out_info;
-
- mutex_lock(&port->port.mutex);
- memset(&out_info, 0, sizeof(out_info));
-/* out_info.type = ? */
- out_info.line = port - isi_ports;
- out_info.port = port->card->base;
- out_info.irq = port->card->irq;
- out_info.flags = port->port.flags;
-/* out_info.baud_base = ? */
- out_info.close_delay = port->port.close_delay;
- out_info.closing_wait = port->port.closing_wait;
- mutex_unlock(&port->port.mutex);
- if (copy_to_user(info, &out_info, sizeof(out_info)))
- return -EFAULT;
- return 0;
-}
-
-static int isicom_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
+static int isicom_get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
struct isi_port *port = tty->driver_data;
- void __user *argp = (void __user *)arg;
if (isicom_paranoia_check(port, tty->name, "isicom_ioctl"))
return -ENODEV;
- switch (cmd) {
- case TIOCGSERIAL:
- return isicom_get_serial_info(port, argp);
-
- case TIOCSSERIAL:
- return isicom_set_serial_info(tty, argp);
-
- default:
- return -ENOIOCTLCMD;
- }
+ mutex_lock(&port->port.mutex);
+/* ss->type = ? */
+ ss->line = port - isi_ports;
+ ss->port = port->card->base;
+ ss->irq = port->card->irq;
+ ss->flags = port->port.flags;
+/* ss->baud_base = ? */
+ ss->close_delay = port->port.close_delay;
+ ss->closing_wait = port->port.closing_wait;
+ mutex_unlock(&port->port.mutex);
return 0;
}
@@ -1273,7 +1250,6 @@ static const struct tty_operations isicom_ops = {
.flush_chars = isicom_flush_chars,
.write_room = isicom_write_room,
.chars_in_buffer = isicom_chars_in_buffer,
- .ioctl = isicom_ioctl,
.set_termios = isicom_set_termios,
.throttle = isicom_throttle,
.unthrottle = isicom_unthrottle,
@@ -1284,6 +1260,8 @@ static const struct tty_operations isicom_ops = {
.tiocmget = isicom_tiocmget,
.tiocmset = isicom_tiocmset,
.break_ctl = isicom_send_break,
+ .get_serial = isicom_get_serial_info,
+ .set_serial = isicom_set_serial_info,
};
static const struct tty_port_operations isicom_port_ops = {
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 250a19f042d7..3a1a5e0ee93f 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -221,8 +221,8 @@ static int MoxaPortRxQueue(struct moxa_port *);
static int MoxaPortTxFree(struct moxa_port *);
static void MoxaPortTxDisable(struct moxa_port *);
static void MoxaPortTxEnable(struct moxa_port *);
-static int moxa_get_serial_info(struct moxa_port *, struct serial_struct __user *);
-static int moxa_set_serial_info(struct moxa_port *, struct serial_struct __user *);
+static int moxa_get_serial_info(struct tty_struct *, struct serial_struct *);
+static int moxa_set_serial_info(struct tty_struct *, struct serial_struct *);
static void MoxaSetFifo(struct moxa_port *port, int enable);
/*
@@ -375,16 +375,6 @@ copy:
}
break;
}
- case TIOCGSERIAL:
- mutex_lock(&ch->port.mutex);
- ret = moxa_get_serial_info(ch, argp);
- mutex_unlock(&ch->port.mutex);
- break;
- case TIOCSSERIAL:
- mutex_lock(&ch->port.mutex);
- ret = moxa_set_serial_info(ch, argp);
- mutex_unlock(&ch->port.mutex);
- break;
default:
ret = -ENOIOCTLCMD;
}
@@ -415,6 +405,8 @@ static const struct tty_operations moxa_ops = {
.break_ctl = moxa_break_ctl,
.tiocmget = moxa_tiocmget,
.tiocmset = moxa_tiocmset,
+ .set_serial = moxa_set_serial_info,
+ .get_serial = moxa_get_serial_info,
};
static const struct tty_port_operations moxa_port_ops = {
@@ -2034,46 +2026,55 @@ static void MoxaPortTxEnable(struct moxa_port *port)
moxafunc(port->tableAddr, FC_SetXonState, Magic_code);
}
-static int moxa_get_serial_info(struct moxa_port *info,
- struct serial_struct __user *retinfo)
+static int moxa_get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct tmp = {
- .type = info->type,
- .line = info->port.tty->index,
- .flags = info->port.flags,
- .baud_base = 921600,
- .close_delay = info->port.close_delay
- };
- return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
+ struct moxa_port *info = tty->driver_data;
+
+ if (tty->index == MAX_PORTS)
+ return -EINVAL;
+ if (!info)
+ return -ENODEV;
+ mutex_lock(&info->port.mutex);
+ ss->type = info->type,
+ ss->line = info->port.tty->index,
+ ss->flags = info->port.flags,
+ ss->baud_base = 921600,
+ ss->close_delay = info->port.close_delay;
+ mutex_unlock(&info->port.mutex);
+ return 0;
}
-static int moxa_set_serial_info(struct moxa_port *info,
- struct serial_struct __user *new_info)
+static int moxa_set_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct new_serial;
+ struct moxa_port *info = tty->driver_data;
- if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
- return -EFAULT;
+ if (tty->index == MAX_PORTS)
+ return -EINVAL;
+ if (!info)
+ return -ENODEV;
- if (new_serial.irq != 0 || new_serial.port != 0 ||
- new_serial.custom_divisor != 0 ||
- new_serial.baud_base != 921600)
+ if (ss->irq != 0 || ss->port != 0 ||
+ ss->custom_divisor != 0 ||
+ ss->baud_base != 921600)
return -EPERM;
+ mutex_lock(&info->port.mutex);
if (!capable(CAP_SYS_ADMIN)) {
- if (((new_serial.flags & ~ASYNC_USR_MASK) !=
- (info->port.flags & ~ASYNC_USR_MASK)))
+ if (((ss->flags & ~ASYNC_USR_MASK) !=
+ (info->port.flags & ~ASYNC_USR_MASK))) {
+ mutex_unlock(&info->port.mutex);
return -EPERM;
- } else
- info->port.close_delay = new_serial.close_delay * HZ / 100;
-
- new_serial.flags = (new_serial.flags & ~ASYNC_FLAGS);
- new_serial.flags |= (info->port.flags & ASYNC_FLAGS);
+ }
+ }
+ info->port.close_delay = ss->close_delay * HZ / 100;
- MoxaSetFifo(info, new_serial.type == PORT_16550A);
+ MoxaSetFifo(info, ss->type == PORT_16550A);
- info->type = new_serial.type;
+ info->type = ss->type;
+ mutex_unlock(&info->port.mutex);
return 0;
}
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 8bc15cb67a58..9d00ff5ef961 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -1207,76 +1207,90 @@ static int mxser_chars_in_buffer(struct tty_struct *tty)
* ------------------------------------------------------------
*/
static int mxser_get_serial_info(struct tty_struct *tty,
- struct serial_struct __user *retinfo)
+ struct serial_struct *ss)
{
struct mxser_port *info = tty->driver_data;
- struct serial_struct tmp = {
- .type = info->type,
- .line = tty->index,
- .port = info->ioaddr,
- .irq = info->board->irq,
- .flags = info->port.flags,
- .baud_base = info->baud_base,
- .close_delay = info->port.close_delay,
- .closing_wait = info->port.closing_wait,
- .custom_divisor = info->custom_divisor,
- };
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
+ struct tty_port *port = &info->port;
+
+ if (tty->index == MXSER_PORTS)
+ return -ENOTTY;
+
+ mutex_lock(&port->mutex);
+ ss->type = info->type,
+ ss->line = tty->index,
+ ss->port = info->ioaddr,
+ ss->irq = info->board->irq,
+ ss->flags = info->port.flags,
+ ss->baud_base = info->baud_base,
+ ss->close_delay = info->port.close_delay,
+ ss->closing_wait = info->port.closing_wait,
+ ss->custom_divisor = info->custom_divisor,
+ mutex_unlock(&port->mutex);
return 0;
}
static int mxser_set_serial_info(struct tty_struct *tty,
- struct serial_struct __user *new_info)
+ struct serial_struct *ss)
{
struct mxser_port *info = tty->driver_data;
struct tty_port *port = &info->port;
- struct serial_struct new_serial;
speed_t baud;
unsigned long sl_flags;
unsigned int flags;
int retval = 0;
- if (!new_info || !info->ioaddr)
+ if (tty->index == MXSER_PORTS)
+ return -ENOTTY;
+ if (tty_io_error(tty))
+ return -EIO;
+
+ mutex_lock(&port->mutex);
+ if (!info->ioaddr) {
+ mutex_unlock(&port->mutex);
return -ENODEV;
- if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
- return -EFAULT;
+ }
- if (new_serial.irq != info->board->irq ||
- new_serial.port != info->ioaddr)
+ if (ss->irq != info->board->irq ||
+ ss->port != info->ioaddr) {
+ mutex_unlock(&port->mutex);
return -EINVAL;
+ }
flags = port->flags & ASYNC_SPD_MASK;
if (!capable(CAP_SYS_ADMIN)) {
- if ((new_serial.baud_base != info->baud_base) ||
- (new_serial.close_delay != info->port.close_delay) ||
- ((new_serial.flags & ~ASYNC_USR_MASK) != (info->port.flags & ~ASYNC_USR_MASK)))
+ if ((ss->baud_base != info->baud_base) ||
+ (ss->close_delay != info->port.close_delay) ||
+ ((ss->flags & ~ASYNC_USR_MASK) != (info->port.flags & ~ASYNC_USR_MASK))) {
+ mutex_unlock(&port->mutex);
return -EPERM;
+ }
info->port.flags = ((info->port.flags & ~ASYNC_USR_MASK) |
- (new_serial.flags & ASYNC_USR_MASK));
+ (ss->flags & ASYNC_USR_MASK));
} else {
/*
* OK, past this point, all the error checking has been done.
* At this point, we start making changes.....
*/
port->flags = ((port->flags & ~ASYNC_FLAGS) |
- (new_serial.flags & ASYNC_FLAGS));
- port->close_delay = new_serial.close_delay * HZ / 100;
- port->closing_wait = new_serial.closing_wait * HZ / 100;
+ (ss->flags & ASYNC_FLAGS));
+ port->close_delay = ss->close_delay * HZ / 100;
+ port->closing_wait = ss->closing_wait * HZ / 100;
port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
- (new_serial.baud_base != info->baud_base ||
- new_serial.custom_divisor !=
+ (ss->baud_base != info->baud_base ||
+ ss->custom_divisor !=
info->custom_divisor)) {
- if (new_serial.custom_divisor == 0)
+ if (ss->custom_divisor == 0) {
+ mutex_unlock(&port->mutex);
return -EINVAL;
- baud = new_serial.baud_base / new_serial.custom_divisor;
+ }
+ baud = ss->baud_base / ss->custom_divisor;
tty_encode_baud_rate(tty, baud, baud);
}
}
- info->type = new_serial.type;
+ info->type = ss->type;
process_txrx_fifo(info);
@@ -1291,6 +1305,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
if (retval == 0)
tty_port_set_initialized(port, 1);
}
+ mutex_unlock(&port->mutex);
return retval;
}
@@ -1660,11 +1675,9 @@ static int mxser_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct mxser_port *info = tty->driver_data;
- struct tty_port *port = &info->port;
struct async_icount cnow;
unsigned long flags;
void __user *argp = (void __user *)arg;
- int retval;
if (tty->index == MXSER_PORTS)
return mxser_ioctl_special(cmd, argp);
@@ -1708,20 +1721,10 @@ static int mxser_ioctl(struct tty_struct *tty,
return 0;
}
- if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT && tty_io_error(tty))
+ if (cmd != TIOCMIWAIT && tty_io_error(tty))
return -EIO;
switch (cmd) {
- case TIOCGSERIAL:
- mutex_lock(&port->mutex);
- retval = mxser_get_serial_info(tty, argp);
- mutex_unlock(&port->mutex);
- return retval;
- case TIOCSSERIAL:
- mutex_lock(&port->mutex);
- retval = mxser_set_serial_info(tty, argp);
- mutex_unlock(&port->mutex);
- return retval;
case TIOCSERGETLSR: /* Get line status register */
return mxser_get_lsr_info(info, argp);
/*
@@ -2325,6 +2328,8 @@ static const struct tty_operations mxser_ops = {
.wait_until_sent = mxser_wait_until_sent,
.tiocmget = mxser_tiocmget,
.tiocmset = mxser_tiocmset,
+ .set_serial = mxser_set_serial_info,
+ .get_serial = mxser_get_serial_info,
.get_icount = mxser_get_icount,
};
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 86b7e20ffd7f..6f7da9a9d76f 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2614,14 +2614,6 @@ static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
}
}
-#ifdef CONFIG_COMPAT
-static long gsmld_compat_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return gsmld_ioctl(tty, file, cmd, arg);
-}
-#endif
-
/*
* Network interface
*
@@ -2833,9 +2825,6 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
.flush_buffer = gsmld_flush_buffer,
.read = gsmld_read,
.write = gsmld_write,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = gsmld_compat_ioctl,
-#endif
.ioctl = gsmld_ioctl,
.poll = gsmld_poll,
.receive_buf = gsmld_receive_buf,
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index dbf1ab36758e..749a608c40b0 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -134,6 +134,10 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr);
static int r3964_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+static int r3964_compat_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
+#endif
static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
static __poll_t r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait);
@@ -149,6 +153,9 @@ static struct tty_ldisc_ops tty_ldisc_N_R3964 = {
.read = r3964_read,
.write = r3964_write,
.ioctl = r3964_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = r3964_compat_ioctl,
+#endif
.set_termios = r3964_set_termios,
.poll = r3964_poll,
.receive_buf = r3964_receive_buf,
@@ -1210,6 +1217,21 @@ static int r3964_ioctl(struct tty_struct *tty, struct file *file,
}
}
+#ifdef CONFIG_COMPAT
+static int r3964_compat_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case R3964_ENABLE_SIGNALS:
+ case R3964_SETPRIORITY:
+ case R3964_USE_BCC:
+ return r3964_ioctl(tty, file, cmd, arg);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#endif
+
static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old)
{
TRACE_L("set_termios");
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 678406e0948b..00099a8439d2 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -28,6 +28,7 @@
#include <linux/mount.h>
#include <linux/file.h>
#include <linux/ioctl.h>
+#include <linux/compat.h>
#undef TTY_DEBUG_HANGUP
#ifdef TTY_DEBUG_HANGUP
@@ -488,6 +489,7 @@ static int pty_bsd_ioctl(struct tty_struct *tty,
return -ENOIOCTLCMD;
}
+#ifdef CONFIG_COMPAT
static long pty_bsd_compat_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
@@ -495,8 +497,11 @@ static long pty_bsd_compat_ioctl(struct tty_struct *tty,
* PTY ioctls don't require any special translation between 32-bit and
* 64-bit userspace, they are already compatible.
*/
- return pty_bsd_ioctl(tty, cmd, arg);
+ return pty_bsd_ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
}
+#else
+#define pty_bsd_compat_ioctl NULL
+#endif
static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
/*
@@ -676,6 +681,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
return -ENOIOCTLCMD;
}
+#ifdef CONFIG_COMPAT
static long pty_unix98_compat_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
@@ -683,8 +689,12 @@ static long pty_unix98_compat_ioctl(struct tty_struct *tty,
* PTY ioctls don't require any special translation between 32-bit and
* 64-bit userspace, they are already compatible.
*/
- return pty_unix98_ioctl(tty, cmd, arg);
+ return pty_unix98_ioctl(tty, cmd,
+ cmd == TIOCSIG ? arg : (unsigned long)compat_ptr(arg));
}
+#else
+#define pty_unix98_compat_ioctl NULL
+#endif
/**
* ptm_unix98_lookup - find a pty master
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index fa8dcb470640..d31b975dd3fd 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -630,10 +630,6 @@ static int dw8250_probe(struct platform_device *pdev)
if (!data->skip_autocfg)
dw8250_setup_port(p);
-#ifdef CONFIG_PM
- uart.capabilities |= UART_CAP_RPM;
-#endif
-
/* If we have a valid fifosize, try hooking up DMA */
if (p->fifosize) {
data->dma.rxconf.src_maxburst = p->fifosize / 4;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index df8bd0c7b97d..32886c304641 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -118,6 +118,7 @@ config SERIAL_ATMEL
depends on ARCH_AT91 || COMPILE_TEST
select SERIAL_CORE
select SERIAL_MCTRL_GPIO if GPIOLIB
+ select MFD_AT91_USART
help
This enables the driver for the on-chip UARTs of the Atmel
AT91 processors.
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 8e4428725848..267d4d1de3f8 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -193,8 +193,7 @@ static struct console atmel_console;
#if defined(CONFIG_OF)
static const struct of_device_id atmel_serial_dt_ids[] = {
- { .compatible = "atmel,at91rm9200-usart" },
- { .compatible = "atmel,at91sam9260-usart" },
+ { .compatible = "atmel,at91rm9200-usart-serial" },
{ /* sentinel */ }
};
#endif
@@ -915,6 +914,7 @@ static void atmel_tx_dma(struct uart_port *port)
static int atmel_prepare_tx_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct device *mfd_dev = port->dev->parent;
dma_cap_mask_t mask;
struct dma_slave_config config;
int ret, nent;
@@ -922,7 +922,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx");
+ atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx");
if (atmel_port->chan_tx == NULL)
goto chan_err;
dev_info(port->dev, "using %s for tx DMA transfers\n",
@@ -1093,6 +1093,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
static int atmel_prepare_rx_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct device *mfd_dev = port->dev->parent;
struct dma_async_tx_descriptor *desc;
dma_cap_mask_t mask;
struct dma_slave_config config;
@@ -1104,7 +1105,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
dma_cap_zero(mask);
dma_cap_set(DMA_CYCLIC, mask);
- atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
+ atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx");
if (atmel_port->chan_rx == NULL)
goto chan_err;
dev_info(port->dev, "using %s for rx DMA transfers\n",
@@ -2222,8 +2223,8 @@ static const char *atmel_type(struct uart_port *port)
*/
static void atmel_release_port(struct uart_port *port)
{
- struct platform_device *pdev = to_platform_device(port->dev);
- int size = pdev->resource[0].end - pdev->resource[0].start + 1;
+ struct platform_device *mpdev = to_platform_device(port->dev->parent);
+ int size = resource_size(mpdev->resource);
release_mem_region(port->mapbase, size);
@@ -2238,8 +2239,8 @@ static void atmel_release_port(struct uart_port *port)
*/
static int atmel_request_port(struct uart_port *port)
{
- struct platform_device *pdev = to_platform_device(port->dev);
- int size = pdev->resource[0].end - pdev->resource[0].start + 1;
+ struct platform_device *mpdev = to_platform_device(port->dev->parent);
+ int size = resource_size(mpdev->resource);
if (!request_mem_region(port->mapbase, size, "atmel_serial"))
return -EBUSY;
@@ -2341,27 +2342,28 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
{
int ret;
struct uart_port *port = &atmel_port->uart;
+ struct platform_device *mpdev = to_platform_device(pdev->dev.parent);
atmel_init_property(atmel_port, pdev);
atmel_set_ops(port);
- uart_get_rs485_mode(&pdev->dev, &port->rs485);
+ uart_get_rs485_mode(&mpdev->dev, &port->rs485);
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
port->ops = &atmel_pops;
port->fifosize = 1;
port->dev = &pdev->dev;
- port->mapbase = pdev->resource[0].start;
- port->irq = pdev->resource[1].start;
+ port->mapbase = mpdev->resource[0].start;
+ port->irq = mpdev->resource[1].start;
port->rs485_config = atmel_config_rs485;
- port->membase = NULL;
+ port->membase = NULL;
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
/* for console, the clock could already be configured */
if (!atmel_port->clk) {
- atmel_port->clk = clk_get(&pdev->dev, "usart");
+ atmel_port->clk = clk_get(&mpdev->dev, "usart");
if (IS_ERR(atmel_port->clk)) {
ret = PTR_ERR(atmel_port->clk);
atmel_port->clk = NULL;
@@ -2694,13 +2696,22 @@ static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
static int atmel_serial_probe(struct platform_device *pdev)
{
struct atmel_uart_port *atmel_port;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.parent->of_node;
void *data;
int ret = -ENODEV;
bool rs485_enabled;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
+ /*
+ * In device tree there is no node with "atmel,at91rm9200-usart-serial"
+ * as compatible string. This driver is probed by at91-usart mfd driver
+ * which is just a wrapper over the atmel_serial driver and
+ * spi-at91-usart driver. All attributes needed by this driver are
+ * found in of_node of parent.
+ */
+ pdev->dev.of_node = np;
+
ret = of_alias_get_id(np, "serial");
if (ret < 0)
/* port id not found in platform data nor device-tree aliases:
@@ -2836,6 +2847,7 @@ static int atmel_serial_remove(struct platform_device *pdev)
clk_put(atmel_port->clk);
atmel_port->clk = NULL;
+ pdev->dev.of_node = NULL;
return ret;
}
@@ -2846,7 +2858,7 @@ static struct platform_driver atmel_serial_driver = {
.suspend = atmel_serial_suspend,
.resume = atmel_serial_resume,
.driver = {
- .name = "atmel_usart",
+ .name = "atmel_usart_serial",
.of_match_table = of_match_ptr(atmel_serial_dt_ids),
},
};
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 24a5f05e769b..e5389591bb4f 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1054,8 +1054,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
/* Get the address of the host memory buffer.
*/
bdp = pinfo->rx_cur;
- while (bdp->cbd_sc & BD_SC_EMPTY)
- ;
+ if (bdp->cbd_sc & BD_SC_EMPTY)
+ return NO_POLL_CHAR;
/* If the buffer address is in the CPM DPRAM, don't
* convert it.
@@ -1090,7 +1090,11 @@ static int cpm_get_poll_char(struct uart_port *port)
poll_chars = 0;
}
if (poll_chars <= 0) {
- poll_chars = poll_wait_key(poll_buf, pinfo);
+ int ret = poll_wait_key(poll_buf, pinfo);
+
+ if (ret == NO_POLL_CHAR)
+ return ret;
+ poll_chars = ret;
pollp = poll_buf;
}
poll_chars--;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 51e47a63d61a..3f8d1274fc85 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -979,7 +979,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
struct circ_buf *ring = &sport->rx_ring;
int ret, nent;
int bits, baud;
- struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port);
+ struct tty_port *port = &sport->port.state->port;
+ struct tty_struct *tty = port->tty;
struct ktermios *termios = &tty->termios;
baud = tty_get_baud_rate(tty);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 239c0fa2e981..0f67197a3783 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2351,6 +2351,14 @@ static int imx_uart_probe(struct platform_device *pdev)
ret);
return ret;
}
+
+ ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
+ dev_name(&pdev->dev), sport);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+ ret);
+ return ret;
+ }
} else {
ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
dev_name(&pdev->dev), sport);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index d04b5eeea3c6..170e446a2f62 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -511,6 +511,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
termios->c_cflag &= CREAD | CBAUD;
termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
+ termios->c_cflag |= CS8;
}
spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 29ec34387246..1515074e18fb 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -868,8 +868,8 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
geni_se_select_mode(&port->se, port->xfer_mode);
if (!uart_console(uport)) {
- port->rx_fifo = devm_kzalloc(uport->dev,
- port->rx_fifo_depth * sizeof(u32), GFP_KERNEL);
+ port->rx_fifo = devm_kcalloc(uport->dev,
+ port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
if (!port->rx_fifo)
return -ENOMEM;
}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 80bb56facfb6..54726c3f74c6 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -778,17 +778,13 @@ out:
return ret;
}
-static int uart_get_info_user(struct tty_port *port,
- struct serial_struct __user *retinfo)
+static int uart_get_info_user(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct tmp;
-
- if (uart_get_info(port, &tmp) < 0)
- return -EIO;
+ struct uart_state *state = tty->driver_data;
+ struct tty_port *port = &state->port;
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
- return 0;
+ return uart_get_info(port, ss) < 0 ? -EIO : 0;
}
static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
@@ -990,16 +986,13 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
return retval;
}
-static int uart_set_info_user(struct tty_struct *tty, struct uart_state *state,
- struct serial_struct __user *newinfo)
+static int uart_set_info_user(struct tty_struct *tty, struct serial_struct *ss)
{
- struct serial_struct new_serial;
+ struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
int retval;
- if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
- return -EFAULT;
-
+ down_write(&tty->termios_rwsem);
/*
* This semaphore protects port->count. It is also
* very useful to prevent opens. Also, take the
@@ -1008,8 +1001,9 @@ static int uart_set_info_user(struct tty_struct *tty, struct uart_state *state,
* under us.
*/
mutex_lock(&port->mutex);
- retval = uart_set_info(tty, port, state, &new_serial);
+ retval = uart_set_info(tty, port, state, ss);
mutex_unlock(&port->mutex);
+ up_write(&tty->termios_rwsem);
return retval;
}
@@ -1325,26 +1319,11 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
* These ioctls don't rely on the hardware to be present.
*/
switch (cmd) {
- case TIOCGSERIAL:
- ret = uart_get_info_user(port, uarg);
- break;
-
- case TIOCSSERIAL:
- down_write(&tty->termios_rwsem);
- ret = uart_set_info_user(tty, state, uarg);
- up_write(&tty->termios_rwsem);
- break;
-
case TIOCSERCONFIG:
down_write(&tty->termios_rwsem);
ret = uart_do_autoconfig(tty, state);
up_write(&tty->termios_rwsem);
break;
-
- case TIOCSERGWILD: /* obsolete */
- case TIOCSERSWILD: /* obsolete */
- ret = 0;
- break;
}
if (ret != -ENOIOCTLCMD)
@@ -2413,6 +2392,8 @@ static const struct tty_operations uart_ops = {
#endif
.tiocmget = uart_tiocmget,
.tiocmset = uart_tiocmset,
+ .set_serial = uart_set_info_user,
+ .get_serial = uart_get_info_user,
.get_icount = uart_get_icount,
#ifdef CONFIG_CONSOLE_POLL
.poll_init = uart_poll_init,
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 1c06325beaca..39ed56214cd3 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -40,7 +40,7 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
{
enum mctrl_gpio_idx i;
struct gpio_desc *desc_array[UART_GPIO_MAX];
- int value_array[UART_GPIO_MAX];
+ DECLARE_BITMAP(values, UART_GPIO_MAX);
unsigned int count = 0;
if (gpios == NULL)
@@ -49,10 +49,11 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
for (i = 0; i < UART_GPIO_MAX; i++)
if (gpios->gpio[i] && mctrl_gpios_desc[i].dir_out) {
desc_array[count] = gpios->gpio[i];
- value_array[count] = !!(mctrl & mctrl_gpios_desc[i].mctrl);
+ __assign_bit(count, values,
+ mctrl & mctrl_gpios_desc[i].mctrl);
count++;
}
- gpiod_set_array_value(count, desc_array, value_array);
+ gpiod_set_array_value(count, desc_array, NULL, values);
}
EXPORT_SYMBOL_GPL(mctrl_gpio_set);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ac4424bf6b13..ab3f6e91853d 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -292,6 +292,33 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
},
/*
+ * The "SCIFA" that is in RZ/T and RZ/A2.
+ * It looks like a normal SCIF with FIFO data, but with a
+ * compressed address space. Also, the break out of interrupts
+ * are different: ERI/BRI, RXI, TXI, TEI, DRI.
+ */
+ [SCIx_RZ_SCIFA_REGTYPE] = {
+ .regs = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x02, 8 },
+ [SCSCR] = { 0x04, 16 },
+ [SCxTDR] = { 0x06, 8 },
+ [SCxSR] = { 0x08, 16 },
+ [SCxRDR] = { 0x0A, 8 },
+ [SCFCR] = { 0x0C, 16 },
+ [SCFDR] = { 0x0E, 16 },
+ [SCSPTR] = { 0x10, 16 },
+ [SCLSR] = { 0x12, 16 },
+ },
+ .fifosize = 16,
+ .overrun_reg = SCLSR,
+ .overrun_mask = SCLSR_ORER,
+ .sampling_rate_mask = SCI_SR(32),
+ .error_mask = SCIF_DEFAULT_ERROR_MASK,
+ .error_clear = SCIF_ERROR_CLEAR,
+ },
+
+ /*
* Common SH-3 SCIF definitions.
*/
[SCIx_SH3_SCIF_REGTYPE] = {
@@ -319,15 +346,15 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
[SCIx_SH4_SCIF_REGTYPE] = {
.regs = {
[SCSMR] = { 0x00, 16 },
- [SCBRR] = { 0x02, 8 },
- [SCSCR] = { 0x04, 16 },
- [SCxTDR] = { 0x06, 8 },
- [SCxSR] = { 0x08, 16 },
- [SCxRDR] = { 0x0a, 8 },
- [SCFCR] = { 0x0c, 16 },
- [SCFDR] = { 0x0e, 16 },
- [SCSPTR] = { 0x10, 16 },
- [SCLSR] = { 0x12, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCSPTR] = { 0x20, 16 },
+ [SCLSR] = { 0x24, 16 },
},
.fifosize = 16,
.overrun_reg = SCLSR,
@@ -2810,7 +2837,7 @@ static int sci_init_single(struct platform_device *dev,
{
struct uart_port *port = &sci_port->port;
const struct resource *res;
- unsigned int i, regtype;
+ unsigned int i;
int ret;
sci_port->cfg = p;
@@ -2847,7 +2874,6 @@ static int sci_init_single(struct platform_device *dev,
if (unlikely(sci_port->params == NULL))
return -EINVAL;
- regtype = sci_port->params - sci_port_params;
switch (p->type) {
case PORT_SCIFB:
sci_port->rx_trigger = 48;
@@ -2902,10 +2928,6 @@ static int sci_init_single(struct platform_device *dev,
port->regshift = 1;
}
- if (regtype == SCIx_SH4_SCIF_REGTYPE)
- if (sci_port->reg_size >= 0x20)
- port->regshift = 1;
-
/*
* The UART port needs an IRQ value, so we peg this to the RX IRQ
* for the multi-IRQ ports, which is where we are primarily
@@ -3110,6 +3132,10 @@ static const struct of_device_id of_sci_match[] = {
.compatible = "renesas,scif-r7s72100",
.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE),
},
+ {
+ .compatible = "renesas,scif-r7s9210",
+ .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE),
+ },
/* Family-specific types */
{
.compatible = "renesas,rcar-gen1-scif",
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index fbdf4d01c6a9..d55c858d6058 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -2959,8 +2959,7 @@ static int mgsl_ioctl(struct tty_struct *tty,
if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
return -ENODEV;
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
- (cmd != TIOCMIWAIT)) {
+ if (cmd != TIOCMIWAIT) {
if (tty_io_error(tty))
return -EIO;
}
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index a94086597ebd..e8a9047de451 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1029,8 +1029,7 @@ static int ioctl(struct tty_struct *tty,
return -ENODEV;
DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd));
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
- (cmd != TIOCMIWAIT)) {
+ if (cmd != TIOCMIWAIT) {
if (tty_io_error(tty))
return -EIO;
}
@@ -1186,14 +1185,13 @@ static long slgt_compat_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct slgt_info *info = tty->driver_data;
- int rc = -ENOIOCTLCMD;
+ int rc;
if (sanity_check(info, tty->name, "compat_ioctl"))
return -ENODEV;
DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd));
switch (cmd) {
-
case MGSL_IOCSPARAMS32:
rc = set_params32(info, compat_ptr(arg));
break;
@@ -1213,18 +1211,11 @@ static long slgt_compat_ioctl(struct tty_struct *tty,
case MGSL_IOCWAITGPIO:
case MGSL_IOCGXSYNC:
case MGSL_IOCGXCTRL:
- case MGSL_IOCSTXIDLE:
- case MGSL_IOCTXENABLE:
- case MGSL_IOCRXENABLE:
- case MGSL_IOCTXABORT:
- case TIOCMIWAIT:
- case MGSL_IOCSIF:
- case MGSL_IOCSXSYNC:
- case MGSL_IOCSXCTRL:
- rc = ioctl(tty, cmd, arg);
+ rc = ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
break;
+ default:
+ rc = ioctl(tty, cmd, arg);
}
-
DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc));
return rc;
}
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 1e4d5b9c981a..fcb91bf7a15b 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1259,8 +1259,7 @@ static int ioctl(struct tty_struct *tty,
if (sanity_check(info, tty->name, "ioctl"))
return -ENODEV;
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
- (cmd != TIOCMIWAIT)) {
+ if (cmd != TIOCMIWAIT) {
if (tty_io_error(tty))
return -EIO;
}
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 06ed20dd01ba..ad1ee5d01b53 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -348,7 +348,7 @@ static void send_sig_all(int sig)
if (is_global_init(p))
continue;
- do_send_sig_info(sig, SEND_SIG_FORCED, p, PIDTYPE_MAX);
+ do_send_sig_info(sig, SEND_SIG_PRIV, p, PIDTYPE_MAX);
}
read_unlock(&tasklist_lock);
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 32bc3e3fe4d3..da3c1c2f73c4 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -97,6 +97,7 @@
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/ratelimit.h>
+#include <linux/compat.h>
#include <linux/uaccess.h>
@@ -1255,6 +1256,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
static int tty_reopen(struct tty_struct *tty)
{
struct tty_driver *driver = tty->driver;
+ int retval;
if (driver->type == TTY_DRIVER_TYPE_PTY &&
driver->subtype == PTY_TYPE_MASTER)
@@ -1268,10 +1270,14 @@ static int tty_reopen(struct tty_struct *tty)
tty->count++;
- if (!tty->ldisc)
- return tty_ldisc_reinit(tty, tty->termios.c_line);
+ if (tty->ldisc)
+ return 0;
- return 0;
+ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+ if (retval)
+ tty->count--;
+
+ return retval;
}
/**
@@ -2288,34 +2294,6 @@ static int tioccons(struct file *file)
}
/**
- * fionbio - non blocking ioctl
- * @file: file to set blocking value
- * @p: user parameter
- *
- * Historical tty interfaces had a blocking control ioctl before
- * the generic functionality existed. This piece of history is preserved
- * in the expected tty API of posix OS's.
- *
- * Locking: none, the open file handle ensures it won't go away.
- */
-
-static int fionbio(struct file *file, int __user *p)
-{
- int nonblock;
-
- if (get_user(nonblock, p))
- return -EFAULT;
-
- spin_lock(&file->f_lock);
- if (nonblock)
- file->f_flags |= O_NONBLOCK;
- else
- file->f_flags &= ~O_NONBLOCK;
- spin_unlock(&file->f_lock);
- return 0;
-}
-
-/**
* tiocsetd - set line discipline
* @tty: tty device
* @p: pointer to user data
@@ -2483,22 +2461,40 @@ static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
return 0;
}
-static void tty_warn_deprecated_flags(struct serial_struct __user *ss)
+static int tty_tiocsserial(struct tty_struct *tty, struct serial_struct __user *ss)
{
static DEFINE_RATELIMIT_STATE(depr_flags,
DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
char comm[TASK_COMM_LEN];
+ struct serial_struct v;
int flags;
- if (get_user(flags, &ss->flags))
- return;
+ if (copy_from_user(&v, ss, sizeof(struct serial_struct)))
+ return -EFAULT;
- flags &= ASYNC_DEPRECATED;
+ flags = v.flags & ASYNC_DEPRECATED;
if (flags && __ratelimit(&depr_flags))
pr_warn("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
__func__, get_task_comm(comm, current), flags);
+ if (!tty->ops->set_serial)
+ return -ENOTTY;
+ return tty->ops->set_serial(tty, &v);
+}
+
+static int tty_tiocgserial(struct tty_struct *tty, struct serial_struct __user *ss)
+{
+ struct serial_struct v;
+ int err;
+
+ memset(&v, 0, sizeof(struct serial_struct));
+ if (!tty->ops->get_serial)
+ return -ENOTTY;
+ err = tty->ops->get_serial(tty, &v);
+ if (!err && copy_to_user(ss, &v, sizeof(struct serial_struct)))
+ err = -EFAULT;
+ return err;
}
/*
@@ -2561,8 +2557,6 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return tiocswinsz(real_tty, p);
case TIOCCONS:
return real_tty != tty ? -EINVAL : tioccons(file);
- case FIONBIO:
- return fionbio(file, p);
case TIOCEXCL:
set_bit(TTY_EXCLUSIVE, &tty->flags);
return 0;
@@ -2617,11 +2611,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case TIOCMBIS:
return tty_tiocmset(tty, cmd, p);
case TIOCGICOUNT:
- retval = tty_tiocgicount(tty, p);
- /* For the moment allow fall through to the old method */
- if (retval != -EINVAL)
- return retval;
- break;
+ return tty_tiocgicount(tty, p);
case TCFLSH:
switch (arg) {
case TCIFLUSH:
@@ -2632,8 +2622,9 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
break;
case TIOCSSERIAL:
- tty_warn_deprecated_flags(p);
- break;
+ return tty_tiocsserial(tty, p);
+ case TIOCGSERIAL:
+ return tty_tiocgserial(tty, p);
case TIOCGPTPEER:
/* Special because the struct file is needed */
return ptm_open_peer(file, tty, (int)arg);
@@ -2661,6 +2652,81 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
#ifdef CONFIG_COMPAT
+
+struct serial_struct32 {
+ compat_int_t type;
+ compat_int_t line;
+ compat_uint_t port;
+ compat_int_t irq;
+ compat_int_t flags;
+ compat_int_t xmit_fifo_size;
+ compat_int_t custom_divisor;
+ compat_int_t baud_base;
+ unsigned short close_delay;
+ char io_type;
+ char reserved_char[1];
+ compat_int_t hub6;
+ unsigned short closing_wait; /* time to wait before closing */
+ unsigned short closing_wait2; /* no longer used... */
+ compat_uint_t iomem_base;
+ unsigned short iomem_reg_shift;
+ unsigned int port_high;
+ /* compat_ulong_t iomap_base FIXME */
+ compat_int_t reserved[1];
+};
+
+static int compat_tty_tiocsserial(struct tty_struct *tty,
+ struct serial_struct32 __user *ss)
+{
+ static DEFINE_RATELIMIT_STATE(depr_flags,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ char comm[TASK_COMM_LEN];
+ struct serial_struct32 v32;
+ struct serial_struct v;
+ int flags;
+
+ if (copy_from_user(&v32, ss, sizeof(struct serial_struct32)))
+ return -EFAULT;
+
+ memcpy(&v, &v32, offsetof(struct serial_struct32, iomem_base));
+ v.iomem_base = compat_ptr(v32.iomem_base);
+ v.iomem_reg_shift = v32.iomem_reg_shift;
+ v.port_high = v32.port_high;
+ v.iomap_base = 0;
+
+ flags = v.flags & ASYNC_DEPRECATED;
+
+ if (flags && __ratelimit(&depr_flags))
+ pr_warn("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
+ __func__, get_task_comm(comm, current), flags);
+ if (!tty->ops->set_serial)
+ return -ENOTTY;
+ return tty->ops->set_serial(tty, &v);
+}
+
+static int compat_tty_tiocgserial(struct tty_struct *tty,
+ struct serial_struct32 __user *ss)
+{
+ struct serial_struct32 v32;
+ struct serial_struct v;
+ int err;
+ memset(&v, 0, sizeof(struct serial_struct));
+
+ if (!tty->ops->set_serial)
+ return -ENOTTY;
+ err = tty->ops->get_serial(tty, &v);
+ if (!err) {
+ memcpy(&v32, &v, offsetof(struct serial_struct32, iomem_base));
+ v32.iomem_base = (unsigned long)v.iomem_base >> 32 ?
+ 0xfffffff : ptr_to_compat(v.iomem_base);
+ v32.iomem_reg_shift = v.iomem_reg_shift;
+ v32.port_high = v.port_high;
+ if (copy_to_user(ss, &v32, sizeof(struct serial_struct32)))
+ err = -EFAULT;
+ }
+ return err;
+}
static long tty_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -2668,9 +2734,90 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
struct tty_ldisc *ld;
int retval = -ENOIOCTLCMD;
+ switch (cmd) {
+ case TIOCSTI:
+ case TIOCGWINSZ:
+ case TIOCSWINSZ:
+ case TIOCGEXCL:
+ case TIOCGETD:
+ case TIOCSETD:
+ case TIOCGDEV:
+ case TIOCMGET:
+ case TIOCMSET:
+ case TIOCMBIC:
+ case TIOCMBIS:
+ case TIOCGICOUNT:
+ case TIOCGPGRP:
+ case TIOCSPGRP:
+ case TIOCGSID:
+ case TIOCSERGETLSR:
+ case TIOCGRS485:
+ case TIOCSRS485:
+#ifdef TIOCGETP
+ case TIOCGETP:
+ case TIOCSETP:
+ case TIOCSETN:
+#endif
+#ifdef TIOCGETC
+ case TIOCGETC:
+ case TIOCSETC:
+#endif
+#ifdef TIOCGLTC
+ case TIOCGLTC:
+ case TIOCSLTC:
+#endif
+ case TCSETSF:
+ case TCSETSW:
+ case TCSETS:
+ case TCGETS:
+#ifdef TCGETS2
+ case TCGETS2:
+ case TCSETSF2:
+ case TCSETSW2:
+ case TCSETS2:
+#endif
+ case TCGETA:
+ case TCSETAF:
+ case TCSETAW:
+ case TCSETA:
+ case TIOCGLCKTRMIOS:
+ case TIOCSLCKTRMIOS:
+#ifdef TCGETX
+ case TCGETX:
+ case TCSETX:
+ case TCSETXW:
+ case TCSETXF:
+#endif
+ case TIOCGSOFTCAR:
+ case TIOCSSOFTCAR:
+ return tty_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+ case TIOCCONS:
+ case TIOCEXCL:
+ case TIOCNXCL:
+ case TIOCVHANGUP:
+ case TIOCSBRK:
+ case TIOCCBRK:
+ case TCSBRK:
+ case TCSBRKP:
+ case TCFLSH:
+ case TIOCGPTPEER:
+ case TIOCNOTTY:
+ case TIOCSCTTY:
+ case TCXONC:
+ case TIOCMIWAIT:
+ case TIOCSERCONFIG:
+ return tty_ioctl(file, cmd, arg);
+ }
+
if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl"))
return -EINVAL;
+ switch (cmd) {
+ case TIOCSSERIAL:
+ return compat_tty_tiocsserial(tty, compat_ptr(arg));
+ case TIOCGSERIAL:
+ return compat_tty_tiocgserial(tty, compat_ptr(arg));
+ }
if (tty->ops->compat_ioctl) {
retval = tty->ops->compat_ioctl(tty, cmd, arg);
if (retval != -ENOIOCTLCMD)
@@ -2682,8 +2829,9 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
return hung_up_tty_compat_ioctl(file, cmd, arg);
if (ld->ops->compat_ioctl)
retval = ld->ops->compat_ioctl(tty, file, cmd, arg);
- else
- retval = n_tty_compat_ioctl_helper(tty, file, cmd, arg);
+ if (retval == -ENOIOCTLCMD && ld->ops->ioctl)
+ retval = ld->ops->ioctl(tty, file,
+ (unsigned long)compat_ptr(cmd), arg);
tty_ldisc_deref(ld);
return retval;
@@ -2738,7 +2886,7 @@ void __do_SAK(struct tty_struct *tty)
do_each_pid_task(session, PIDTYPE_SID, p) {
tty_notice(tty, "SAK: killed process %d (%s): by session\n",
task_pid_nr(p), p->comm);
- send_sig(SIGKILL, p, 1);
+ group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
} while_each_pid_task(session, PIDTYPE_SID, p);
/* Now kill any processes that happen to have the tty open */
@@ -2746,7 +2894,7 @@ void __do_SAK(struct tty_struct *tty)
if (p->signal->tty == tty) {
tty_notice(tty, "SAK: killed process %d (%s): by controlling tty\n",
task_pid_nr(p), p->comm);
- send_sig(SIGKILL, p, 1);
+ group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
continue;
}
task_lock(p);
@@ -2754,7 +2902,7 @@ void __do_SAK(struct tty_struct *tty)
if (i != 0) {
tty_notice(tty, "SAK: killed process %d (%s): by fd#%d\n",
task_pid_nr(p), p->comm, i - 1);
- force_sig(SIGKILL, p);
+ group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
}
task_unlock(p);
} while_each_thread(g, p);
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index d99fec44036c..9245fffdbceb 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -941,19 +941,3 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
}
}
EXPORT_SYMBOL(n_tty_ioctl_helper);
-
-#ifdef CONFIG_COMPAT
-long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case TIOCGLCKTRMIOS:
- case TIOCSLCKTRMIOS:
- return tty_mode_ioctl(tty, file, cmd, (unsigned long) compat_ptr(arg));
- default:
- return -ENOIOCTLCMD;
- }
-}
-EXPORT_SYMBOL(n_tty_compat_ioctl_helper);
-#endif
-
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index a78ad10a119b..8b0ed139592f 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -32,6 +32,8 @@
#include <asm/io.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
+
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
#include <linux/kbd_diacr.h>
@@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty,
if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
ret = -ENXIO;
else {
+ vsa.console = array_index_nospec(vsa.console,
+ MAX_NR_CONSOLES + 1);
vsa.console--;
console_lock();
ret = vc_allocate(vsa.console);
@@ -1171,17 +1175,13 @@ long vt_compat_ioctl(struct tty_struct *tty,
{
struct vc_data *vc = tty->driver_data;
struct console_font_op op; /* used in multiple places here */
- unsigned int console;
- void __user *up = (void __user *)arg;
+ unsigned int console = vc->vc_num;
+ void __user *up = compat_ptr(arg);
int perm;
- int ret = 0;
- console = vc->vc_num;
- if (!vc_cons_allocated(console)) { /* impossible? */
- ret = -ENOIOCTLCMD;
- goto out;
- }
+ if (!vc_cons_allocated(console)) /* impossible? */
+ return -ENOIOCTLCMD;
/*
* To have permissions to do most of the vt ioctls, we either have
@@ -1197,17 +1197,14 @@ long vt_compat_ioctl(struct tty_struct *tty,
*/
case PIO_FONTX:
case GIO_FONTX:
- ret = compat_fontx_ioctl(cmd, up, perm, &op);
- break;
+ return compat_fontx_ioctl(cmd, up, perm, &op);
case KDFONTOP:
- ret = compat_kdfontop_ioctl(up, perm, &op, vc);
- break;
+ return compat_kdfontop_ioctl(up, perm, &op, vc);
case PIO_UNIMAP:
case GIO_UNIMAP:
- ret = compat_unimap_ioctl(cmd, up, perm, vc);
- break;
+ return compat_unimap_ioctl(cmd, up, perm, vc);
/*
* all these treat 'arg' as an integer
@@ -1232,21 +1229,15 @@ long vt_compat_ioctl(struct tty_struct *tty,
case VT_DISALLOCATE:
case VT_RESIZE:
case VT_RESIZEX:
- goto fallback;
+ return vt_ioctl(tty, cmd, arg);
/*
* the rest has a compatible data structure behind arg,
* but we have to convert it to a proper 64 bit pointer.
*/
default:
- arg = (unsigned long)compat_ptr(arg);
- goto fallback;
+ return vt_ioctl(tty, cmd, (unsigned long)up);
}
-out:
- return ret;
-
-fallback:
- return vt_ioctl(tty, cmd, arg);
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 19f5f5f2a48a..09b37c0d075d 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -364,8 +364,7 @@ static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
ci_hdrc_imx_remove(pdev);
}
-#ifdef CONFIG_PM
-static int imx_controller_suspend(struct device *dev)
+static int __maybe_unused imx_controller_suspend(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
@@ -377,7 +376,7 @@ static int imx_controller_suspend(struct device *dev)
return 0;
}
-static int imx_controller_resume(struct device *dev)
+static int __maybe_unused imx_controller_resume(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
@@ -408,8 +407,7 @@ clk_disable:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
-static int ci_hdrc_imx_suspend(struct device *dev)
+static int __maybe_unused ci_hdrc_imx_suspend(struct device *dev)
{
int ret;
@@ -431,7 +429,7 @@ static int ci_hdrc_imx_suspend(struct device *dev)
return imx_controller_suspend(dev);
}
-static int ci_hdrc_imx_resume(struct device *dev)
+static int __maybe_unused ci_hdrc_imx_resume(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret;
@@ -445,9 +443,8 @@ static int ci_hdrc_imx_resume(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM_SLEEP */
-static int ci_hdrc_imx_runtime_suspend(struct device *dev)
+static int __maybe_unused ci_hdrc_imx_runtime_suspend(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret;
@@ -466,13 +463,11 @@ static int ci_hdrc_imx_runtime_suspend(struct device *dev)
return imx_controller_suspend(dev);
}
-static int ci_hdrc_imx_runtime_resume(struct device *dev)
+static int __maybe_unused ci_hdrc_imx_runtime_resume(struct device *dev)
{
return imx_controller_resume(dev);
}
-#endif /* CONFIG_PM */
-
static const struct dev_pm_ops ci_hdrc_imx_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ci_hdrc_imx_suspend, ci_hdrc_imx_resume)
SET_RUNTIME_PM_OPS(ci_hdrc_imx_runtime_suspend,
@@ -492,7 +487,7 @@ static struct platform_driver ci_hdrc_imx_driver = {
module_platform_driver(ci_hdrc_imx_driver);
MODULE_ALIAS("platform:imx-usb");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CI HDRC i.MX USB binding");
MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
MODULE_AUTHOR("Richard Zhao <richard.zhao@freescale.com>");
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 85fc6db48e44..7bfcbb23c2a4 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -53,6 +53,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
@@ -723,6 +724,24 @@ static int ci_get_platdata(struct device *dev,
else
cable->connected = false;
}
+
+ platdata->pctl = devm_pinctrl_get(dev);
+ if (!IS_ERR(platdata->pctl)) {
+ struct pinctrl_state *p;
+
+ p = pinctrl_lookup_state(platdata->pctl, "default");
+ if (!IS_ERR(p))
+ platdata->pins_default = p;
+
+ p = pinctrl_lookup_state(platdata->pctl, "host");
+ if (!IS_ERR(p))
+ platdata->pins_host = p;
+
+ p = pinctrl_lookup_state(platdata->pctl, "device");
+ if (!IS_ERR(p))
+ platdata->pins_device = p;
+ }
+
return 0;
}
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 4638d9b066be..d858a82c4f44 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -13,6 +13,7 @@
#include <linux/usb/hcd.h>
#include <linux/usb/chipidea.h>
#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
#include "../host/ehci.h"
@@ -153,6 +154,10 @@ static int host_start(struct ci_hdrc *ci)
}
}
+ if (ci->platdata->pins_host)
+ pinctrl_select_state(ci->platdata->pctl,
+ ci->platdata->pins_host);
+
ret = usb_add_hcd(hcd, 0, 0);
if (ret) {
goto disable_reg;
@@ -197,6 +202,10 @@ static void host_stop(struct ci_hdrc *ci)
}
ci->hcd = NULL;
ci->otg.host = NULL;
+
+ if (ci->platdata->pins_host && ci->platdata->pins_default)
+ pinctrl_select_state(ci->platdata->pctl,
+ ci->platdata->pins_default);
}
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index db4ceffcf2a6..f25d4827fd49 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -203,14 +203,17 @@ static void ci_otg_work(struct work_struct *work)
}
pm_runtime_get_sync(ci->dev);
+
if (ci->id_event) {
ci->id_event = false;
ci_handle_id_switch(ci);
- } else if (ci->b_sess_valid_event) {
+ }
+
+ if (ci->b_sess_valid_event) {
ci->b_sess_valid_event = false;
ci_handle_vbus_change(ci);
- } else
- dev_err(ci->dev, "unexpected event occurs at %s\n", __func__);
+ }
+
pm_runtime_put_sync(ci->dev);
enable_irq(ci->irq);
diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
index 7e7428e48bfa..4f8b8179ec96 100644
--- a/drivers/usb/chipidea/otg.h
+++ b/drivers/usb/chipidea/otg.h
@@ -17,7 +17,8 @@ void ci_handle_vbus_change(struct ci_hdrc *ci);
static inline void ci_otg_queue_work(struct ci_hdrc *ci)
{
disable_irq_nosync(ci->irq);
- queue_work(ci->wq, &ci->work);
+ if (queue_work(ci->wq, &ci->work) == false)
+ enable_irq(ci->irq);
}
#endif /* __DRIVERS_USB_CHIPIDEA_OTG_H */
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 9852ec5e6e01..829e947cabf5 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg-fsm.h>
@@ -1965,6 +1966,10 @@ void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
static int udc_id_switch_for_device(struct ci_hdrc *ci)
{
+ if (ci->platdata->pins_device)
+ pinctrl_select_state(ci->platdata->pctl,
+ ci->platdata->pins_device);
+
if (ci->is_otg)
/* Clear and enable BSV irq */
hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
@@ -1983,6 +1988,10 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
ci->vbus_active = 0;
+
+ if (ci->platdata->pins_device && ci->platdata->pins_default)
+ pinctrl_select_state(ci->platdata->pctl,
+ ci->platdata->pins_default);
}
/**
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 34ad5bf8acd8..def80ff547e4 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -343,6 +343,8 @@ static int usbmisc_imx6q_init(struct imx_usbmisc_data *data)
} else if (data->oc_polarity == 1) {
/* High active */
reg &= ~(MX6_BM_OVER_CUR_DIS | MX6_BM_OVER_CUR_POLARITY);
+ } else {
+ reg &= ~(MX6_BM_OVER_CUR_DIS);
}
writel(reg, usbmisc->base + data->index * 4);
@@ -633,6 +635,6 @@ static struct platform_driver usbmisc_imx_driver = {
module_platform_driver(usbmisc_imx_driver);
MODULE_ALIAS("platform:usbmisc-imx");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("driver for imx usb non-core registers");
MODULE_AUTHOR("Richard Zhao <richard.zhao@freescale.com>");
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f9b40a9dc4d3..47d75c20c211 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -310,17 +310,17 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
if (difference & ACM_CTRL_DSR)
acm->iocount.dsr++;
- if (difference & ACM_CTRL_BRK)
- acm->iocount.brk++;
- if (difference & ACM_CTRL_RI)
- acm->iocount.rng++;
if (difference & ACM_CTRL_DCD)
acm->iocount.dcd++;
- if (difference & ACM_CTRL_FRAMING)
+ if (newctrl & ACM_CTRL_BRK)
+ acm->iocount.brk++;
+ if (newctrl & ACM_CTRL_RI)
+ acm->iocount.rng++;
+ if (newctrl & ACM_CTRL_FRAMING)
acm->iocount.frame++;
- if (difference & ACM_CTRL_PARITY)
+ if (newctrl & ACM_CTRL_PARITY)
acm->iocount.parity++;
- if (difference & ACM_CTRL_OVERRUN)
+ if (newctrl & ACM_CTRL_OVERRUN)
acm->iocount.overrun++;
spin_unlock_irqrestore(&acm->read_lock, flags);
@@ -355,7 +355,6 @@ static void acm_ctrl_irq(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- acm->nb_index = 0;
dev_dbg(&acm->control->dev,
"%s - urb shutting down with status: %d\n",
__func__, status);
@@ -885,37 +884,28 @@ static int acm_tty_tiocmset(struct tty_struct *tty,
return acm_set_control(acm, acm->ctrlout = newctrl);
}
-static int get_serial_info(struct acm *acm, struct serial_struct __user *info)
+static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
- struct serial_struct tmp;
+ struct acm *acm = tty->driver_data;
- memset(&tmp, 0, sizeof(tmp));
- tmp.xmit_fifo_size = acm->writesize;
- tmp.baud_base = le32_to_cpu(acm->line.dwDTERate);
- tmp.close_delay = acm->port.close_delay / 10;
- tmp.closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ ss->xmit_fifo_size = acm->writesize;
+ ss->baud_base = le32_to_cpu(acm->line.dwDTERate);
+ ss->close_delay = acm->port.close_delay / 10;
+ ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
acm->port.closing_wait / 10;
-
- if (copy_to_user(info, &tmp, sizeof(tmp)))
- return -EFAULT;
- else
- return 0;
+ return 0;
}
-static int set_serial_info(struct acm *acm,
- struct serial_struct __user *newinfo)
+static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
{
- struct serial_struct new_serial;
+ struct acm *acm = tty->driver_data;
unsigned int closing_wait, close_delay;
int retval = 0;
- if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
- return -EFAULT;
-
- close_delay = new_serial.close_delay * 10;
- closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
- ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
+ close_delay = ss->close_delay * 10;
+ closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
mutex_lock(&acm->port.mutex);
@@ -1000,12 +990,6 @@ static int acm_tty_ioctl(struct tty_struct *tty,
int rv = -ENOIOCTLCMD;
switch (cmd) {
- case TIOCGSERIAL: /* gets serial port data */
- rv = get_serial_info(acm, (struct serial_struct __user *) arg);
- break;
- case TIOCSSERIAL:
- rv = set_serial_info(acm, (struct serial_struct __user *) arg);
- break;
case TIOCMIWAIT:
rv = usb_autopm_get_interface(acm->control);
if (rv < 0) {
@@ -1514,6 +1498,7 @@ static void acm_disconnect(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
struct tty_struct *tty;
+ int i;
/* sibling interface is already cleaning up */
if (!acm)
@@ -1544,6 +1529,11 @@ static void acm_disconnect(struct usb_interface *intf)
tty_unregister_device(acm_tty_driver, acm->minor);
+ usb_free_urb(acm->ctrlurb);
+ for (i = 0; i < ACM_NW; i++)
+ usb_free_urb(acm->wb[i].urb);
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_free_urb(acm->read_urbs[i]);
acm_write_buffers_free(acm);
usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
acm_read_buffers_free(acm);
@@ -1636,6 +1626,7 @@ static int acm_pre_reset(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
clear_bit(EVENT_RX_STALL, &acm->flags);
+ acm->nb_index = 0; /* pending control transfers are lost */
return 0;
}
@@ -1925,6 +1916,8 @@ static const struct tty_operations acm_ops = {
.set_termios = acm_tty_set_termios,
.tiocmget = acm_tty_tiocmget,
.tiocmset = acm_tty_tiocmset,
+ .get_serial = get_serial_info,
+ .set_serial = set_serial_info,
.get_icount = acm_tty_get_icount,
};
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 656d247819c9..bec581fb7c63 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -460,7 +460,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
- rv = usb_submit_urb(desc->response, GFP_ATOMIC);
+ rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
dev_err(&desc->intf->dev,
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 83ffa5a14c3d..4942122b2346 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -5,6 +5,7 @@
* Copyright (C) 2007 Stefan Kopp, Gechingen, Germany
* Copyright (C) 2008 Novell, Inc.
* Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (C) 2018 IVI Foundation, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -21,21 +22,24 @@
#include <linux/compat.h>
#include <linux/usb/tmc.h>
+/* Increment API VERSION when changing tmc.h with new flags or ioctls
+ * or when changing a significant behavior of the driver.
+ */
+#define USBTMC_API_VERSION (2)
#define USBTMC_HEADER_SIZE 12
#define USBTMC_MINOR_BASE 176
-/*
- * Size of driver internal IO buffer. Must be multiple of 4 and at least as
- * large as wMaxPacketSize (which is usually 512 bytes).
- */
-#define USBTMC_SIZE_IOBUFFER 2048
-
/* Minimum USB timeout (in milliseconds) */
#define USBTMC_MIN_TIMEOUT 100
/* Default USB timeout (in milliseconds) */
#define USBTMC_TIMEOUT 5000
+/* Max number of urbs used in write transfers */
+#define MAX_URBS_IN_FLIGHT 16
+/* I/O buffer size used in generic read/write functions */
+#define USBTMC_BUFSIZE (4096)
+
/*
* Maximum number of read cycles to empty bulk in endpoint during CLEAR and
* ABORT_BULK_IN requests. Ends the loop if (for whatever reason) a short
@@ -79,6 +83,9 @@ struct usbtmc_device_data {
u8 bTag_last_write; /* needed for abort */
u8 bTag_last_read; /* needed for abort */
+ /* packet size of IN bulk */
+ u16 wMaxPacketSize;
+
/* data for interrupt in endpoint handling */
u8 bNotify1;
u8 bNotify2;
@@ -95,11 +102,6 @@ struct usbtmc_device_data {
/* coalesced usb488_caps from usbtmc_dev_capabilities */
__u8 usb488_caps;
- /* attributes from the USB TMC spec for this device */
- u8 TermChar;
- bool TermCharEnabled;
- bool auto_abort;
-
bool zombie; /* fd of disconnected device */
struct usbtmc_dev_capabilities capabilities;
@@ -121,13 +123,34 @@ struct usbtmc_file_data {
u32 timeout;
u8 srq_byte;
atomic_t srq_asserted;
+ atomic_t closing;
+ u8 bmTransferAttributes; /* member of DEV_DEP_MSG_IN */
+
u8 eom_val;
u8 term_char;
bool term_char_enabled;
+ bool auto_abort;
+
+ spinlock_t err_lock; /* lock for errors */
+
+ struct usb_anchor submitted;
+
+ /* data for generic_write */
+ struct semaphore limit_write_sem;
+ u32 out_transfer_size;
+ int out_status;
+
+ /* data for generic_read */
+ u32 in_transfer_size;
+ int in_status;
+ int in_urbs_used;
+ struct usb_anchor in_anchor;
+ wait_queue_head_t wait_bulk_in;
};
/* Forward declarations */
static struct usb_driver usbtmc_driver;
+static void usbtmc_draw_down(struct usbtmc_file_data *file_data);
static void usbtmc_delete(struct kref *kref)
{
@@ -153,6 +176,12 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
if (!file_data)
return -ENOMEM;
+ spin_lock_init(&file_data->err_lock);
+ sema_init(&file_data->limit_write_sem, MAX_URBS_IN_FLIGHT);
+ init_usb_anchor(&file_data->submitted);
+ init_usb_anchor(&file_data->in_anchor);
+ init_waitqueue_head(&file_data->wait_bulk_in);
+
data = usb_get_intfdata(intf);
/* Protect reference to data from file structure until release */
kref_get(&data->kref);
@@ -160,10 +189,12 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
mutex_lock(&data->io_mutex);
file_data->data = data;
- /* copy default values from device settings */
+ atomic_set(&file_data->closing, 0);
+
file_data->timeout = USBTMC_TIMEOUT;
- file_data->term_char = data->TermChar;
- file_data->term_char_enabled = data->TermCharEnabled;
+ file_data->term_char = '\n';
+ file_data->term_char_enabled = 0;
+ file_data->auto_abort = 0;
file_data->eom_val = 1;
INIT_LIST_HEAD(&file_data->file_elem);
@@ -178,6 +209,40 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
return 0;
}
+/*
+ * usbtmc_flush - called before file handle is closed
+ */
+static int usbtmc_flush(struct file *file, fl_owner_t id)
+{
+ struct usbtmc_file_data *file_data;
+ struct usbtmc_device_data *data;
+
+ file_data = file->private_data;
+ if (file_data == NULL)
+ return -ENODEV;
+
+ atomic_set(&file_data->closing, 1);
+ data = file_data->data;
+
+ /* wait for io to stop */
+ mutex_lock(&data->io_mutex);
+
+ usbtmc_draw_down(file_data);
+
+ spin_lock_irq(&file_data->err_lock);
+ file_data->in_status = 0;
+ file_data->in_transfer_size = 0;
+ file_data->in_urbs_used = 0;
+ file_data->out_status = 0;
+ file_data->out_transfer_size = 0;
+ spin_unlock_irq(&file_data->err_lock);
+
+ wake_up_interruptible_all(&data->waitq);
+ mutex_unlock(&data->io_mutex);
+
+ return 0;
+}
+
static int usbtmc_release(struct inode *inode, struct file *file)
{
struct usbtmc_file_data *file_data = file->private_data;
@@ -197,18 +262,17 @@ static int usbtmc_release(struct inode *inode, struct file *file)
return 0;
}
-static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
+static int usbtmc_ioctl_abort_bulk_in_tag(struct usbtmc_device_data *data,
+ u8 tag)
{
u8 *buffer;
struct device *dev;
int rv;
int n;
int actual;
- struct usb_host_interface *current_setting;
- int max_size;
dev = &data->intf->dev;
- buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+ buffer = kmalloc(USBTMC_BUFSIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -216,86 +280,88 @@ static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INITIATE_ABORT_BULK_IN,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
- data->bTag_last_read, data->bulk_in,
- buffer, 2, USBTMC_TIMEOUT);
+ tag, data->bulk_in,
+ buffer, 2, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
- dev_dbg(dev, "INITIATE_ABORT_BULK_IN returned %x\n", buffer[0]);
+ dev_dbg(dev, "INITIATE_ABORT_BULK_IN returned %x with tag %02x\n",
+ buffer[0], buffer[1]);
if (buffer[0] == USBTMC_STATUS_FAILED) {
+ /* No transfer in progress and the Bulk-OUT FIFO is empty. */
rv = 0;
goto exit;
}
- if (buffer[0] != USBTMC_STATUS_SUCCESS) {
- dev_err(dev, "INITIATE_ABORT_BULK_IN returned %x\n",
- buffer[0]);
- rv = -EPERM;
+ if (buffer[0] == USBTMC_STATUS_TRANSFER_NOT_IN_PROGRESS) {
+ /* The device returns this status if either:
+ * - There is a transfer in progress, but the specified bTag
+ * does not match.
+ * - There is no transfer in progress, but the Bulk-OUT FIFO
+ * is not empty.
+ */
+ rv = -ENOMSG;
goto exit;
}
- max_size = 0;
- current_setting = data->intf->cur_altsetting;
- for (n = 0; n < current_setting->desc.bNumEndpoints; n++)
- if (current_setting->endpoint[n].desc.bEndpointAddress ==
- data->bulk_in)
- max_size = usb_endpoint_maxp(&current_setting->endpoint[n].desc);
-
- if (max_size == 0) {
- dev_err(dev, "Couldn't get wMaxPacketSize\n");
+ if (buffer[0] != USBTMC_STATUS_SUCCESS) {
+ dev_err(dev, "INITIATE_ABORT_BULK_IN returned %x\n",
+ buffer[0]);
rv = -EPERM;
goto exit;
}
- dev_dbg(&data->intf->dev, "wMaxPacketSize is %d\n", max_size);
-
n = 0;
- do {
- dev_dbg(dev, "Reading from bulk in EP\n");
+usbtmc_abort_bulk_in_status:
+ dev_dbg(dev, "Reading from bulk in EP\n");
- rv = usb_bulk_msg(data->usb_dev,
- usb_rcvbulkpipe(data->usb_dev,
- data->bulk_in),
- buffer, USBTMC_SIZE_IOBUFFER,
- &actual, USBTMC_TIMEOUT);
+ /* Data must be present. So use low timeout 300 ms */
+ actual = 0;
+ rv = usb_bulk_msg(data->usb_dev,
+ usb_rcvbulkpipe(data->usb_dev,
+ data->bulk_in),
+ buffer, USBTMC_BUFSIZE,
+ &actual, 300);
- n++;
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE, 16, 1,
+ buffer, actual, true);
- if (rv < 0) {
- dev_err(dev, "usb_bulk_msg returned %d\n", rv);
+ n++;
+
+ if (rv < 0) {
+ dev_err(dev, "usb_bulk_msg returned %d\n", rv);
+ if (rv != -ETIMEDOUT)
goto exit;
- }
- } while ((actual == max_size) &&
- (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
+ }
- if (actual == max_size) {
+ if (actual == USBTMC_BUFSIZE)
+ goto usbtmc_abort_bulk_in_status;
+
+ if (n >= USBTMC_MAX_READS_TO_CLEAR_BULK_IN) {
dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
rv = -EPERM;
goto exit;
}
- n = 0;
-
-usbtmc_abort_bulk_in_status:
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_CHECK_ABORT_BULK_IN_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
0, data->bulk_in, buffer, 0x08,
- USBTMC_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
- dev_dbg(dev, "INITIATE_ABORT_BULK_IN returned %x\n", buffer[0]);
+ dev_dbg(dev, "CHECK_ABORT_BULK_IN returned %x\n", buffer[0]);
if (buffer[0] == USBTMC_STATUS_SUCCESS) {
rv = 0;
@@ -303,46 +369,30 @@ usbtmc_abort_bulk_in_status:
}
if (buffer[0] != USBTMC_STATUS_PENDING) {
- dev_err(dev, "INITIATE_ABORT_BULK_IN returned %x\n", buffer[0]);
+ dev_err(dev, "CHECK_ABORT_BULK_IN returned %x\n", buffer[0]);
rv = -EPERM;
goto exit;
}
- if (buffer[1] == 1)
- do {
- dev_dbg(dev, "Reading from bulk in EP\n");
-
- rv = usb_bulk_msg(data->usb_dev,
- usb_rcvbulkpipe(data->usb_dev,
- data->bulk_in),
- buffer, USBTMC_SIZE_IOBUFFER,
- &actual, USBTMC_TIMEOUT);
-
- n++;
-
- if (rv < 0) {
- dev_err(dev, "usb_bulk_msg returned %d\n", rv);
- goto exit;
- }
- } while ((actual == max_size) &&
- (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
-
- if (actual == max_size) {
- dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
- USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
- rv = -EPERM;
- goto exit;
+ if ((buffer[1] & 1) > 0) {
+ /* The device has 1 or more queued packets the Host can read */
+ goto usbtmc_abort_bulk_in_status;
}
- goto usbtmc_abort_bulk_in_status;
-
+ /* The Host must send CHECK_ABORT_BULK_IN_STATUS at a later time. */
+ rv = -EAGAIN;
exit:
kfree(buffer);
return rv;
+}
+static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
+{
+ return usbtmc_ioctl_abort_bulk_in_tag(data, data->bTag_last_read);
}
-static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
+static int usbtmc_ioctl_abort_bulk_out_tag(struct usbtmc_device_data *data,
+ u8 tag)
{
struct device *dev;
u8 *buffer;
@@ -359,8 +409,8 @@ static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
- data->bTag_last_write, data->bulk_out,
- buffer, 2, USBTMC_TIMEOUT);
+ tag, data->bulk_out,
+ buffer, 2, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
@@ -379,12 +429,14 @@ static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
n = 0;
usbtmc_abort_bulk_out_check_status:
+ /* do not stress device with subsequent requests */
+ msleep(50);
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
0, data->bulk_out, buffer, 0x08,
- USBTMC_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
n++;
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
@@ -418,6 +470,11 @@ exit:
return rv;
}
+static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
+{
+ return usbtmc_ioctl_abort_bulk_out_tag(data, data->bTag_last_write);
+}
+
static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
void __user *arg)
{
@@ -457,7 +514,7 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
data->iin_bTag,
data->ifnum,
- buffer, 0x03, USBTMC_TIMEOUT);
+ buffer, 0x03, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "stb usb_control_msg returned %d\n", rv);
goto exit;
@@ -510,6 +567,54 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
return rv;
}
+static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
+ __u32 __user *arg)
+{
+ struct usbtmc_device_data *data = file_data->data;
+ struct device *dev = &data->intf->dev;
+ int rv;
+ u32 timeout;
+ unsigned long expire;
+
+ if (!data->iin_ep_present) {
+ dev_dbg(dev, "no interrupt endpoint present\n");
+ return -EFAULT;
+ }
+
+ if (get_user(timeout, arg))
+ return -EFAULT;
+
+ expire = msecs_to_jiffies(timeout);
+
+ mutex_unlock(&data->io_mutex);
+
+ rv = wait_event_interruptible_timeout(
+ data->waitq,
+ atomic_read(&file_data->srq_asserted) != 0 ||
+ atomic_read(&file_data->closing),
+ expire);
+
+ mutex_lock(&data->io_mutex);
+
+ /* Note! disconnect or close could be called in the meantime */
+ if (atomic_read(&file_data->closing) || data->zombie)
+ rv = -ENODEV;
+
+ if (rv < 0) {
+ /* dev can be invalid now! */
+ pr_debug("%s - wait interrupted %d\n", __func__, rv);
+ return rv;
+ }
+
+ if (rv == 0) {
+ dev_dbg(dev, "%s - wait timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "%s - srq asserted\n", __func__);
+ return 0;
+}
+
static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
void __user *arg, unsigned int cmd)
{
@@ -543,7 +648,7 @@ static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
wValue,
data->ifnum,
- buffer, 0x01, USBTMC_TIMEOUT);
+ buffer, 0x01, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "simple usb_control_msg failed %d\n", rv);
goto exit;
@@ -610,6 +715,559 @@ static int usbtmc488_ioctl_trigger(struct usbtmc_file_data *file_data)
return 0;
}
+static struct urb *usbtmc_create_urb(void)
+{
+ const size_t bufsize = USBTMC_BUFSIZE;
+ u8 *dmabuf = NULL;
+ struct urb *urb = usb_alloc_urb(0, GFP_KERNEL);
+
+ if (!urb)
+ return NULL;
+
+ dmabuf = kmalloc(bufsize, GFP_KERNEL);
+ if (!dmabuf) {
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ urb->transfer_buffer = dmabuf;
+ urb->transfer_buffer_length = bufsize;
+ urb->transfer_flags |= URB_FREE_BUFFER;
+ return urb;
+}
+
+static void usbtmc_read_bulk_cb(struct urb *urb)
+{
+ struct usbtmc_file_data *file_data = urb->context;
+ int status = urb->status;
+ unsigned long flags;
+
+ /* sync/async unlink faults aren't errors */
+ if (status) {
+ if (!(/* status == -ENOENT || */
+ status == -ECONNRESET ||
+ status == -EREMOTEIO || /* Short packet */
+ status == -ESHUTDOWN))
+ dev_err(&file_data->data->intf->dev,
+ "%s - nonzero read bulk status received: %d\n",
+ __func__, status);
+
+ spin_lock_irqsave(&file_data->err_lock, flags);
+ if (!file_data->in_status)
+ file_data->in_status = status;
+ spin_unlock_irqrestore(&file_data->err_lock, flags);
+ }
+
+ spin_lock_irqsave(&file_data->err_lock, flags);
+ file_data->in_transfer_size += urb->actual_length;
+ dev_dbg(&file_data->data->intf->dev,
+ "%s - total size: %u current: %d status: %d\n",
+ __func__, file_data->in_transfer_size,
+ urb->actual_length, status);
+ spin_unlock_irqrestore(&file_data->err_lock, flags);
+ usb_anchor_urb(urb, &file_data->in_anchor);
+
+ wake_up_interruptible(&file_data->wait_bulk_in);
+ wake_up_interruptible(&file_data->data->waitq);
+}
+
+static inline bool usbtmc_do_transfer(struct usbtmc_file_data *file_data)
+{
+ bool data_or_error;
+
+ spin_lock_irq(&file_data->err_lock);
+ data_or_error = !usb_anchor_empty(&file_data->in_anchor)
+ || file_data->in_status;
+ spin_unlock_irq(&file_data->err_lock);
+ dev_dbg(&file_data->data->intf->dev, "%s: returns %d\n", __func__,
+ data_or_error);
+ return data_or_error;
+}
+
+static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data,
+ void __user *user_buffer,
+ u32 transfer_size,
+ u32 *transferred,
+ u32 flags)
+{
+ struct usbtmc_device_data *data = file_data->data;
+ struct device *dev = &data->intf->dev;
+ u32 done = 0;
+ u32 remaining;
+ const u32 bufsize = USBTMC_BUFSIZE;
+ int retval = 0;
+ u32 max_transfer_size;
+ unsigned long expire;
+ int bufcount = 1;
+ int again = 0;
+
+ /* mutex already locked */
+
+ *transferred = done;
+
+ max_transfer_size = transfer_size;
+
+ if (flags & USBTMC_FLAG_IGNORE_TRAILER) {
+ /* The device may send extra alignment bytes (up to
+ * wMaxPacketSize – 1) to avoid sending a zero-length
+ * packet
+ */
+ remaining = transfer_size;
+ if ((max_transfer_size % data->wMaxPacketSize) == 0)
+ max_transfer_size += (data->wMaxPacketSize - 1);
+ } else {
+ /* round down to bufsize to avoid truncated data left */
+ if (max_transfer_size > bufsize) {
+ max_transfer_size =
+ roundup(max_transfer_size + 1 - bufsize,
+ bufsize);
+ }
+ remaining = max_transfer_size;
+ }
+
+ spin_lock_irq(&file_data->err_lock);
+
+ if (file_data->in_status) {
+ /* return the very first error */
+ retval = file_data->in_status;
+ spin_unlock_irq(&file_data->err_lock);
+ goto error;
+ }
+
+ if (flags & USBTMC_FLAG_ASYNC) {
+ if (usb_anchor_empty(&file_data->in_anchor))
+ again = 1;
+
+ if (file_data->in_urbs_used == 0) {
+ file_data->in_transfer_size = 0;
+ file_data->in_status = 0;
+ }
+ } else {
+ file_data->in_transfer_size = 0;
+ file_data->in_status = 0;
+ }
+
+ if (max_transfer_size == 0) {
+ bufcount = 0;
+ } else {
+ bufcount = roundup(max_transfer_size, bufsize) / bufsize;
+ if (bufcount > file_data->in_urbs_used)
+ bufcount -= file_data->in_urbs_used;
+ else
+ bufcount = 0;
+
+ if (bufcount + file_data->in_urbs_used > MAX_URBS_IN_FLIGHT) {
+ bufcount = MAX_URBS_IN_FLIGHT -
+ file_data->in_urbs_used;
+ }
+ }
+ spin_unlock_irq(&file_data->err_lock);
+
+ dev_dbg(dev, "%s: requested=%u flags=0x%X size=%u bufs=%d used=%d\n",
+ __func__, transfer_size, flags,
+ max_transfer_size, bufcount, file_data->in_urbs_used);
+
+ while (bufcount > 0) {
+ u8 *dmabuf = NULL;
+ struct urb *urb = usbtmc_create_urb();
+
+ if (!urb) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ dmabuf = urb->transfer_buffer;
+
+ usb_fill_bulk_urb(urb, data->usb_dev,
+ usb_rcvbulkpipe(data->usb_dev, data->bulk_in),
+ dmabuf, bufsize,
+ usbtmc_read_bulk_cb, file_data);
+
+ usb_anchor_urb(urb, &file_data->submitted);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ /* urb is anchored. We can release our reference. */
+ usb_free_urb(urb);
+ if (unlikely(retval)) {
+ usb_unanchor_urb(urb);
+ goto error;
+ }
+ file_data->in_urbs_used++;
+ bufcount--;
+ }
+
+ if (again) {
+ dev_dbg(dev, "%s: ret=again\n", __func__);
+ return -EAGAIN;
+ }
+
+ if (user_buffer == NULL)
+ return -EINVAL;
+
+ expire = msecs_to_jiffies(file_data->timeout);
+
+ while (max_transfer_size > 0) {
+ u32 this_part;
+ struct urb *urb = NULL;
+
+ if (!(flags & USBTMC_FLAG_ASYNC)) {
+ dev_dbg(dev, "%s: before wait time %lu\n",
+ __func__, expire);
+ retval = wait_event_interruptible_timeout(
+ file_data->wait_bulk_in,
+ usbtmc_do_transfer(file_data),
+ expire);
+
+ dev_dbg(dev, "%s: wait returned %d\n",
+ __func__, retval);
+
+ if (retval <= 0) {
+ if (retval == 0)
+ retval = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ urb = usb_get_from_anchor(&file_data->in_anchor);
+ if (!urb) {
+ if (!(flags & USBTMC_FLAG_ASYNC)) {
+ /* synchronous case: must not happen */
+ retval = -EFAULT;
+ goto error;
+ }
+
+ /* asynchronous case: ready, do not block or wait */
+ *transferred = done;
+ dev_dbg(dev, "%s: (async) done=%u ret=0\n",
+ __func__, done);
+ return 0;
+ }
+
+ file_data->in_urbs_used--;
+
+ if (max_transfer_size > urb->actual_length)
+ max_transfer_size -= urb->actual_length;
+ else
+ max_transfer_size = 0;
+
+ if (remaining > urb->actual_length)
+ this_part = urb->actual_length;
+ else
+ this_part = remaining;
+
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE, 16, 1,
+ urb->transfer_buffer, urb->actual_length, true);
+
+ if (copy_to_user(user_buffer + done,
+ urb->transfer_buffer, this_part)) {
+ usb_free_urb(urb);
+ retval = -EFAULT;
+ goto error;
+ }
+
+ remaining -= this_part;
+ done += this_part;
+
+ spin_lock_irq(&file_data->err_lock);
+ if (urb->status) {
+ /* return the very first error */
+ retval = file_data->in_status;
+ spin_unlock_irq(&file_data->err_lock);
+ usb_free_urb(urb);
+ goto error;
+ }
+ spin_unlock_irq(&file_data->err_lock);
+
+ if (urb->actual_length < bufsize) {
+ /* short packet or ZLP received => ready */
+ usb_free_urb(urb);
+ retval = 1;
+ break;
+ }
+
+ if (!(flags & USBTMC_FLAG_ASYNC) &&
+ max_transfer_size > (bufsize * file_data->in_urbs_used)) {
+ /* resubmit, since other buffers still not enough */
+ usb_anchor_urb(urb, &file_data->submitted);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ if (unlikely(retval)) {
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ goto error;
+ }
+ file_data->in_urbs_used++;
+ }
+ usb_free_urb(urb);
+ retval = 0;
+ }
+
+error:
+ *transferred = done;
+
+ dev_dbg(dev, "%s: before kill\n", __func__);
+ /* Attention: killing urbs can take long time (2 ms) */
+ usb_kill_anchored_urbs(&file_data->submitted);
+ dev_dbg(dev, "%s: after kill\n", __func__);
+ usb_scuttle_anchored_urbs(&file_data->in_anchor);
+ file_data->in_urbs_used = 0;
+ file_data->in_status = 0; /* no spinlock needed here */
+ dev_dbg(dev, "%s: done=%u ret=%d\n", __func__, done, retval);
+
+ return retval;
+}
+
+static ssize_t usbtmc_ioctl_generic_read(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ struct usbtmc_message msg;
+ ssize_t retval = 0;
+
+ /* mutex already locked */
+
+ if (copy_from_user(&msg, arg, sizeof(struct usbtmc_message)))
+ return -EFAULT;
+
+ retval = usbtmc_generic_read(file_data, msg.message,
+ msg.transfer_size, &msg.transferred,
+ msg.flags);
+
+ if (put_user(msg.transferred,
+ &((struct usbtmc_message __user *)arg)->transferred))
+ return -EFAULT;
+
+ return retval;
+}
+
+static void usbtmc_write_bulk_cb(struct urb *urb)
+{
+ struct usbtmc_file_data *file_data = urb->context;
+ int wakeup = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&file_data->err_lock, flags);
+ file_data->out_transfer_size += urb->actual_length;
+
+ /* sync/async unlink faults aren't errors */
+ if (urb->status) {
+ if (!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))
+ dev_err(&file_data->data->intf->dev,
+ "%s - nonzero write bulk status received: %d\n",
+ __func__, urb->status);
+
+ if (!file_data->out_status) {
+ file_data->out_status = urb->status;
+ wakeup = 1;
+ }
+ }
+ spin_unlock_irqrestore(&file_data->err_lock, flags);
+
+ dev_dbg(&file_data->data->intf->dev,
+ "%s - write bulk total size: %u\n",
+ __func__, file_data->out_transfer_size);
+
+ up(&file_data->limit_write_sem);
+ if (usb_anchor_empty(&file_data->submitted) || wakeup)
+ wake_up_interruptible(&file_data->data->waitq);
+}
+
+static ssize_t usbtmc_generic_write(struct usbtmc_file_data *file_data,
+ const void __user *user_buffer,
+ u32 transfer_size,
+ u32 *transferred,
+ u32 flags)
+{
+ struct usbtmc_device_data *data = file_data->data;
+ struct device *dev;
+ u32 done = 0;
+ u32 remaining;
+ unsigned long expire;
+ const u32 bufsize = USBTMC_BUFSIZE;
+ struct urb *urb = NULL;
+ int retval = 0;
+ u32 timeout;
+
+ *transferred = 0;
+
+ /* Get pointer to private data structure */
+ dev = &data->intf->dev;
+
+ dev_dbg(dev, "%s: size=%u flags=0x%X sema=%u\n",
+ __func__, transfer_size, flags,
+ file_data->limit_write_sem.count);
+
+ if (flags & USBTMC_FLAG_APPEND) {
+ spin_lock_irq(&file_data->err_lock);
+ retval = file_data->out_status;
+ spin_unlock_irq(&file_data->err_lock);
+ if (retval < 0)
+ return retval;
+ } else {
+ spin_lock_irq(&file_data->err_lock);
+ file_data->out_transfer_size = 0;
+ file_data->out_status = 0;
+ spin_unlock_irq(&file_data->err_lock);
+ }
+
+ remaining = transfer_size;
+ if (remaining > INT_MAX)
+ remaining = INT_MAX;
+
+ timeout = file_data->timeout;
+ expire = msecs_to_jiffies(timeout);
+
+ while (remaining > 0) {
+ u32 this_part, aligned;
+ u8 *buffer = NULL;
+
+ if (flags & USBTMC_FLAG_ASYNC) {
+ if (down_trylock(&file_data->limit_write_sem)) {
+ retval = (done)?(0):(-EAGAIN);
+ goto exit;
+ }
+ } else {
+ retval = down_timeout(&file_data->limit_write_sem,
+ expire);
+ if (retval < 0) {
+ retval = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ spin_lock_irq(&file_data->err_lock);
+ retval = file_data->out_status;
+ spin_unlock_irq(&file_data->err_lock);
+ if (retval < 0) {
+ up(&file_data->limit_write_sem);
+ goto error;
+ }
+
+ /* prepare next urb to send */
+ urb = usbtmc_create_urb();
+ if (!urb) {
+ retval = -ENOMEM;
+ up(&file_data->limit_write_sem);
+ goto error;
+ }
+ buffer = urb->transfer_buffer;
+
+ if (remaining > bufsize)
+ this_part = bufsize;
+ else
+ this_part = remaining;
+
+ if (copy_from_user(buffer, user_buffer + done, this_part)) {
+ retval = -EFAULT;
+ up(&file_data->limit_write_sem);
+ goto error;
+ }
+
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
+ 16, 1, buffer, this_part, true);
+
+ /* fill bulk with 32 bit alignment to meet USBTMC specification
+ * (size + 3 & ~3) rounds up and simplifies user code
+ */
+ aligned = (this_part + 3) & ~3;
+ dev_dbg(dev, "write(size:%u align:%u done:%u)\n",
+ (unsigned int)this_part,
+ (unsigned int)aligned,
+ (unsigned int)done);
+
+ usb_fill_bulk_urb(urb, data->usb_dev,
+ usb_sndbulkpipe(data->usb_dev, data->bulk_out),
+ urb->transfer_buffer, aligned,
+ usbtmc_write_bulk_cb, file_data);
+
+ usb_anchor_urb(urb, &file_data->submitted);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ if (unlikely(retval)) {
+ usb_unanchor_urb(urb);
+ up(&file_data->limit_write_sem);
+ goto error;
+ }
+
+ usb_free_urb(urb);
+ urb = NULL; /* urb will be finally released by usb driver */
+
+ remaining -= this_part;
+ done += this_part;
+ }
+
+ /* All urbs are on the fly */
+ if (!(flags & USBTMC_FLAG_ASYNC)) {
+ if (!usb_wait_anchor_empty_timeout(&file_data->submitted,
+ timeout)) {
+ retval = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ retval = 0;
+ goto exit;
+
+error:
+ usb_kill_anchored_urbs(&file_data->submitted);
+exit:
+ usb_free_urb(urb);
+
+ spin_lock_irq(&file_data->err_lock);
+ if (!(flags & USBTMC_FLAG_ASYNC))
+ done = file_data->out_transfer_size;
+ if (!retval && file_data->out_status)
+ retval = file_data->out_status;
+ spin_unlock_irq(&file_data->err_lock);
+
+ *transferred = done;
+
+ dev_dbg(dev, "%s: done=%u, retval=%d, urbstat=%d\n",
+ __func__, done, retval, file_data->out_status);
+
+ return retval;
+}
+
+static ssize_t usbtmc_ioctl_generic_write(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ struct usbtmc_message msg;
+ ssize_t retval = 0;
+
+ /* mutex already locked */
+
+ if (copy_from_user(&msg, arg, sizeof(struct usbtmc_message)))
+ return -EFAULT;
+
+ retval = usbtmc_generic_write(file_data, msg.message,
+ msg.transfer_size, &msg.transferred,
+ msg.flags);
+
+ if (put_user(msg.transferred,
+ &((struct usbtmc_message __user *)arg)->transferred))
+ return -EFAULT;
+
+ return retval;
+}
+
+/*
+ * Get the generic write result
+ */
+static ssize_t usbtmc_ioctl_write_result(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ u32 transferred;
+ int retval;
+
+ spin_lock_irq(&file_data->err_lock);
+ transferred = file_data->out_transfer_size;
+ retval = file_data->out_status;
+ spin_unlock_irq(&file_data->err_lock);
+
+ if (put_user(transferred, (__u32 __user *)arg))
+ return -EFAULT;
+
+ return retval;
+}
+
/*
* Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-OUT endpoint.
* @transfer_size: number of bytes to request from the device.
@@ -619,7 +1277,7 @@ static int usbtmc488_ioctl_trigger(struct usbtmc_file_data *file_data)
* Also updates bTag_last_write.
*/
static int send_request_dev_dep_msg_in(struct usbtmc_file_data *file_data,
- size_t transfer_size)
+ u32 transfer_size)
{
struct usbtmc_device_data *data = file_data->data;
int retval;
@@ -662,12 +1320,11 @@ static int send_request_dev_dep_msg_in(struct usbtmc_file_data *file_data,
data->bTag++;
kfree(buffer);
- if (retval < 0) {
- dev_err(&data->intf->dev, "usb_bulk_msg in send_request_dev_dep_msg_in() returned %d\n", retval);
- return retval;
- }
+ if (retval < 0)
+ dev_err(&data->intf->dev, "%s returned %d\n",
+ __func__, retval);
- return 0;
+ return retval;
}
static ssize_t usbtmc_read(struct file *filp, char __user *buf,
@@ -676,20 +1333,20 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
struct device *dev;
+ const u32 bufsize = USBTMC_BUFSIZE;
u32 n_characters;
u8 *buffer;
int actual;
- size_t done;
- size_t remaining;
+ u32 done = 0;
+ u32 remaining;
int retval;
- size_t this_part;
/* Get pointer to private data structure */
file_data = filp->private_data;
data = file_data->data;
dev = &data->intf->dev;
- buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+ buffer = kmalloc(bufsize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -699,124 +1356,116 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
goto exit;
}
- dev_dbg(dev, "usb_bulk_msg_in: count(%zu)\n", count);
+ if (count > INT_MAX)
+ count = INT_MAX;
+
+ dev_dbg(dev, "%s(count:%zu)\n", __func__, count);
retval = send_request_dev_dep_msg_in(file_data, count);
if (retval < 0) {
- if (data->auto_abort)
+ if (file_data->auto_abort)
usbtmc_ioctl_abort_bulk_out(data);
goto exit;
}
/* Loop until we have fetched everything we requested */
remaining = count;
- this_part = remaining;
- done = 0;
+ actual = 0;
- while (remaining > 0) {
- /* Send bulk URB */
- retval = usb_bulk_msg(data->usb_dev,
- usb_rcvbulkpipe(data->usb_dev,
- data->bulk_in),
- buffer, USBTMC_SIZE_IOBUFFER, &actual,
- file_data->timeout);
-
- dev_dbg(dev, "usb_bulk_msg: retval(%u), done(%zu), remaining(%zu), actual(%d)\n", retval, done, remaining, actual);
+ /* Send bulk URB */
+ retval = usb_bulk_msg(data->usb_dev,
+ usb_rcvbulkpipe(data->usb_dev,
+ data->bulk_in),
+ buffer, bufsize, &actual,
+ file_data->timeout);
- /* Store bTag (in case we need to abort) */
- data->bTag_last_read = data->bTag;
+ dev_dbg(dev, "%s: bulk_msg retval(%u), actual(%d)\n",
+ __func__, retval, actual);
- if (retval < 0) {
- dev_dbg(dev, "Unable to read data, error %d\n", retval);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
-
- /* Parse header in first packet */
- if (done == 0) {
- /* Sanity checks for the header */
- if (actual < USBTMC_HEADER_SIZE) {
- dev_err(dev, "Device sent too small first packet: %u < %u\n", actual, USBTMC_HEADER_SIZE);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
+ /* Store bTag (in case we need to abort) */
+ data->bTag_last_read = data->bTag;
- if (buffer[0] != 2) {
- dev_err(dev, "Device sent reply with wrong MsgID: %u != 2\n", buffer[0]);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
+ if (retval < 0) {
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- if (buffer[1] != data->bTag_last_write) {
- dev_err(dev, "Device sent reply with wrong bTag: %u != %u\n", buffer[1], data->bTag_last_write);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
+ /* Sanity checks for the header */
+ if (actual < USBTMC_HEADER_SIZE) {
+ dev_err(dev, "Device sent too small first packet: %u < %u\n",
+ actual, USBTMC_HEADER_SIZE);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- /* How many characters did the instrument send? */
- n_characters = buffer[4] +
- (buffer[5] << 8) +
- (buffer[6] << 16) +
- (buffer[7] << 24);
+ if (buffer[0] != 2) {
+ dev_err(dev, "Device sent reply with wrong MsgID: %u != 2\n",
+ buffer[0]);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- if (n_characters > this_part) {
- dev_err(dev, "Device wants to return more data than requested: %u > %zu\n", n_characters, count);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
+ if (buffer[1] != data->bTag_last_write) {
+ dev_err(dev, "Device sent reply with wrong bTag: %u != %u\n",
+ buffer[1], data->bTag_last_write);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- /* Remove the USBTMC header */
- actual -= USBTMC_HEADER_SIZE;
+ /* How many characters did the instrument send? */
+ n_characters = buffer[4] +
+ (buffer[5] << 8) +
+ (buffer[6] << 16) +
+ (buffer[7] << 24);
- /* Check if the message is smaller than requested */
- if (remaining > n_characters)
- remaining = n_characters;
- /* Remove padding if it exists */
- if (actual > remaining)
- actual = remaining;
+ file_data->bmTransferAttributes = buffer[8];
- dev_dbg(dev, "Bulk-IN header: N_characters(%u), bTransAttr(%u)\n", n_characters, buffer[8]);
+ dev_dbg(dev, "Bulk-IN header: N_characters(%u), bTransAttr(%u)\n",
+ n_characters, buffer[8]);
- remaining -= actual;
+ if (n_characters > remaining) {
+ dev_err(dev, "Device wants to return more data than requested: %u > %zu\n",
+ n_characters, count);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- /* Terminate if end-of-message bit received from device */
- if ((buffer[8] & 0x01) && (actual >= n_characters))
- remaining = 0;
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
+ 16, 1, buffer, actual, true);
- dev_dbg(dev, "Bulk-IN header: remaining(%zu), buf(%p), buffer(%p) done(%zu)\n", remaining,buf,buffer,done);
+ remaining = n_characters;
+ /* Remove the USBTMC header */
+ actual -= USBTMC_HEADER_SIZE;
- /* Copy buffer to user space */
- if (copy_to_user(buf + done, &buffer[USBTMC_HEADER_SIZE], actual)) {
- /* There must have been an addressing problem */
- retval = -EFAULT;
- goto exit;
- }
- done += actual;
- }
- else {
- if (actual > remaining)
- actual = remaining;
+ /* Remove padding if it exists */
+ if (actual > remaining)
+ actual = remaining;
- remaining -= actual;
+ remaining -= actual;
- dev_dbg(dev, "Bulk-IN header cont: actual(%u), done(%zu), remaining(%zu), buf(%p), buffer(%p)\n", actual, done, remaining,buf,buffer);
+ /* Copy buffer to user space */
+ if (copy_to_user(buf, &buffer[USBTMC_HEADER_SIZE], actual)) {
+ /* There must have been an addressing problem */
+ retval = -EFAULT;
+ goto exit;
+ }
- /* Copy buffer to user space */
- if (copy_to_user(buf + done, buffer, actual)) {
- /* There must have been an addressing problem */
- retval = -EFAULT;
- goto exit;
- }
- done += actual;
- }
+ if ((actual + USBTMC_HEADER_SIZE) == bufsize) {
+ retval = usbtmc_generic_read(file_data, buf + actual,
+ remaining,
+ &done,
+ USBTMC_FLAG_IGNORE_TRAILER);
+ if (retval < 0)
+ goto exit;
}
+ done += actual;
/* Update file position value */
*f_pos = *f_pos + done;
@@ -833,113 +1482,152 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
{
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
+ struct urb *urb = NULL;
+ ssize_t retval = 0;
u8 *buffer;
- int retval;
- int actual;
- unsigned long int n_bytes;
- int remaining;
- int done;
- int this_part;
+ u32 remaining, done;
+ u32 transfersize, aligned, buflen;
file_data = filp->private_data;
data = file_data->data;
- buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
mutex_lock(&data->io_mutex);
+
if (data->zombie) {
retval = -ENODEV;
goto exit;
}
- remaining = count;
done = 0;
- while (remaining > 0) {
- if (remaining > USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE) {
- this_part = USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE;
- buffer[8] = 0;
- } else {
- this_part = remaining;
- buffer[8] = file_data->eom_val;
- }
+ spin_lock_irq(&file_data->err_lock);
+ file_data->out_transfer_size = 0;
+ file_data->out_status = 0;
+ spin_unlock_irq(&file_data->err_lock);
- /* Setup IO buffer for DEV_DEP_MSG_OUT message */
- buffer[0] = 1;
- buffer[1] = data->bTag;
- buffer[2] = ~data->bTag;
- buffer[3] = 0; /* Reserved */
- buffer[4] = this_part >> 0;
- buffer[5] = this_part >> 8;
- buffer[6] = this_part >> 16;
- buffer[7] = this_part >> 24;
- /* buffer[8] is set above... */
- buffer[9] = 0; /* Reserved */
- buffer[10] = 0; /* Reserved */
- buffer[11] = 0; /* Reserved */
-
- if (copy_from_user(&buffer[USBTMC_HEADER_SIZE], buf + done, this_part)) {
- retval = -EFAULT;
- goto exit;
- }
+ if (!count)
+ goto exit;
- n_bytes = roundup(USBTMC_HEADER_SIZE + this_part, 4);
- memset(buffer + USBTMC_HEADER_SIZE + this_part, 0, n_bytes - (USBTMC_HEADER_SIZE + this_part));
+ if (down_trylock(&file_data->limit_write_sem)) {
+ /* previous calls were async */
+ retval = -EBUSY;
+ goto exit;
+ }
- do {
- retval = usb_bulk_msg(data->usb_dev,
- usb_sndbulkpipe(data->usb_dev,
- data->bulk_out),
- buffer, n_bytes,
- &actual, file_data->timeout);
- if (retval != 0)
- break;
- n_bytes -= actual;
- } while (n_bytes);
-
- data->bTag_last_write = data->bTag;
+ urb = usbtmc_create_urb();
+ if (!urb) {
+ retval = -ENOMEM;
+ up(&file_data->limit_write_sem);
+ goto exit;
+ }
+
+ buffer = urb->transfer_buffer;
+ buflen = urb->transfer_buffer_length;
+
+ if (count > INT_MAX) {
+ transfersize = INT_MAX;
+ buffer[8] = 0;
+ } else {
+ transfersize = count;
+ buffer[8] = file_data->eom_val;
+ }
+
+ /* Setup IO buffer for DEV_DEP_MSG_OUT message */
+ buffer[0] = 1;
+ buffer[1] = data->bTag;
+ buffer[2] = ~data->bTag;
+ buffer[3] = 0; /* Reserved */
+ buffer[4] = transfersize >> 0;
+ buffer[5] = transfersize >> 8;
+ buffer[6] = transfersize >> 16;
+ buffer[7] = transfersize >> 24;
+ /* buffer[8] is set above... */
+ buffer[9] = 0; /* Reserved */
+ buffer[10] = 0; /* Reserved */
+ buffer[11] = 0; /* Reserved */
+
+ remaining = transfersize;
+
+ if (transfersize + USBTMC_HEADER_SIZE > buflen) {
+ transfersize = buflen - USBTMC_HEADER_SIZE;
+ aligned = buflen;
+ } else {
+ aligned = (transfersize + (USBTMC_HEADER_SIZE + 3)) & ~3;
+ }
+
+ if (copy_from_user(&buffer[USBTMC_HEADER_SIZE], buf, transfersize)) {
+ retval = -EFAULT;
+ up(&file_data->limit_write_sem);
+ goto exit;
+ }
+
+ dev_dbg(&data->intf->dev, "%s(size:%u align:%u)\n", __func__,
+ (unsigned int)transfersize, (unsigned int)aligned);
+
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
+ 16, 1, buffer, aligned, true);
+
+ usb_fill_bulk_urb(urb, data->usb_dev,
+ usb_sndbulkpipe(data->usb_dev, data->bulk_out),
+ urb->transfer_buffer, aligned,
+ usbtmc_write_bulk_cb, file_data);
+
+ usb_anchor_urb(urb, &file_data->submitted);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ if (unlikely(retval)) {
+ usb_unanchor_urb(urb);
+ up(&file_data->limit_write_sem);
+ goto exit;
+ }
+
+ remaining -= transfersize;
+
+ data->bTag_last_write = data->bTag;
+ data->bTag++;
+
+ if (!data->bTag)
data->bTag++;
- if (!data->bTag)
- data->bTag++;
+ /* call generic_write even when remaining = 0 */
+ retval = usbtmc_generic_write(file_data, buf + transfersize, remaining,
+ &done, USBTMC_FLAG_APPEND);
+ /* truncate alignment bytes */
+ if (done > remaining)
+ done = remaining;
- if (retval < 0) {
- dev_err(&data->intf->dev,
- "Unable to send data, error %d\n", retval);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_out(data);
- goto exit;
- }
+ /*add size of first urb*/
+ done += transfersize;
- remaining -= this_part;
- done += this_part;
+ if (retval < 0) {
+ usb_kill_anchored_urbs(&file_data->submitted);
+
+ dev_err(&data->intf->dev,
+ "Unable to send data, error %d\n", (int)retval);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_out(data);
+ goto exit;
}
- retval = count;
+ retval = done;
exit:
+ usb_free_urb(urb);
mutex_unlock(&data->io_mutex);
- kfree(buffer);
return retval;
}
static int usbtmc_ioctl_clear(struct usbtmc_device_data *data)
{
- struct usb_host_interface *current_setting;
- struct usb_endpoint_descriptor *desc;
struct device *dev;
u8 *buffer;
int rv;
int n;
int actual = 0;
- int max_size;
dev = &data->intf->dev;
dev_dbg(dev, "Sending INITIATE_CLEAR request\n");
- buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+ buffer = kmalloc(USBTMC_BUFSIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -947,7 +1635,7 @@ static int usbtmc_ioctl_clear(struct usbtmc_device_data *data)
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INITIATE_CLEAR,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, 0, buffer, 1, USBTMC_TIMEOUT);
+ 0, 0, buffer, 1, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
@@ -961,22 +1649,6 @@ static int usbtmc_ioctl_clear(struct usbtmc_device_data *data)
goto exit;
}
- max_size = 0;
- current_setting = data->intf->cur_altsetting;
- for (n = 0; n < current_setting->desc.bNumEndpoints; n++) {
- desc = &current_setting->endpoint[n].desc;
- if (desc->bEndpointAddress == data->bulk_in)
- max_size = usb_endpoint_maxp(desc);
- }
-
- if (max_size == 0) {
- dev_err(dev, "Couldn't get wMaxPacketSize\n");
- rv = -EPERM;
- goto exit;
- }
-
- dev_dbg(dev, "wMaxPacketSize is %d\n", max_size);
-
n = 0;
usbtmc_clear_check_status:
@@ -987,7 +1659,7 @@ usbtmc_clear_check_status:
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_CHECK_CLEAR_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, 0, buffer, 2, USBTMC_TIMEOUT);
+ 0, 0, buffer, 2, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
@@ -1004,15 +1676,20 @@ usbtmc_clear_check_status:
goto exit;
}
- if (buffer[1] == 1)
+ if ((buffer[1] & 1) != 0) {
do {
dev_dbg(dev, "Reading from bulk in EP\n");
+ actual = 0;
rv = usb_bulk_msg(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev,
data->bulk_in),
- buffer, USBTMC_SIZE_IOBUFFER,
- &actual, USBTMC_TIMEOUT);
+ buffer, USBTMC_BUFSIZE,
+ &actual, USB_CTRL_GET_TIMEOUT);
+
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
+ 16, 1, buffer, actual, true);
+
n++;
if (rv < 0) {
@@ -1020,10 +1697,15 @@ usbtmc_clear_check_status:
rv);
goto exit;
}
- } while ((actual == max_size) &&
+ } while ((actual == USBTMC_BUFSIZE) &&
(n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
+ } else {
+ /* do not stress device with subsequent requests */
+ msleep(50);
+ n++;
+ }
- if (actual == max_size) {
+ if (n >= USBTMC_MAX_READS_TO_CLEAR_BULK_IN) {
dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
rv = -EPERM;
@@ -1037,7 +1719,7 @@ usbtmc_clear_bulk_out_halt:
rv = usb_clear_halt(data->usb_dev,
usb_sndbulkpipe(data->usb_dev, data->bulk_out));
if (rv < 0) {
- dev_err(dev, "usb_control_msg returned %d\n", rv);
+ dev_err(dev, "usb_clear_halt returned %d\n", rv);
goto exit;
}
rv = 0;
@@ -1054,12 +1736,9 @@ static int usbtmc_ioctl_clear_out_halt(struct usbtmc_device_data *data)
rv = usb_clear_halt(data->usb_dev,
usb_sndbulkpipe(data->usb_dev, data->bulk_out));
- if (rv < 0) {
- dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
- rv);
- return rv;
- }
- return 0;
+ if (rv < 0)
+ dev_err(&data->usb_dev->dev, "%s returned %d\n", __func__, rv);
+ return rv;
}
static int usbtmc_ioctl_clear_in_halt(struct usbtmc_device_data *data)
@@ -1069,11 +1748,33 @@ static int usbtmc_ioctl_clear_in_halt(struct usbtmc_device_data *data)
rv = usb_clear_halt(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev, data->bulk_in));
- if (rv < 0) {
- dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
- rv);
- return rv;
- }
+ if (rv < 0)
+ dev_err(&data->usb_dev->dev, "%s returned %d\n", __func__, rv);
+ return rv;
+}
+
+static int usbtmc_ioctl_cancel_io(struct usbtmc_file_data *file_data)
+{
+ spin_lock_irq(&file_data->err_lock);
+ file_data->in_status = -ECANCELED;
+ file_data->out_status = -ECANCELED;
+ spin_unlock_irq(&file_data->err_lock);
+ usb_kill_anchored_urbs(&file_data->submitted);
+ return 0;
+}
+
+static int usbtmc_ioctl_cleanup_io(struct usbtmc_file_data *file_data)
+{
+ usb_kill_anchored_urbs(&file_data->submitted);
+ usb_scuttle_anchored_urbs(&file_data->in_anchor);
+ spin_lock_irq(&file_data->err_lock);
+ file_data->in_status = 0;
+ file_data->in_transfer_size = 0;
+ file_data->out_status = 0;
+ file_data->out_transfer_size = 0;
+ spin_unlock_irq(&file_data->err_lock);
+
+ file_data->in_urbs_used = 0;
return 0;
}
@@ -1090,7 +1791,7 @@ static int get_capabilities(struct usbtmc_device_data *data)
rv = usb_control_msg(data->usb_dev, usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_GET_CAPABILITIES,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, 0, buffer, 0x18, USBTMC_TIMEOUT);
+ 0, 0, buffer, 0x18, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto err_out;
@@ -1147,72 +1848,6 @@ static const struct attribute_group capability_attr_grp = {
.attrs = capability_attrs,
};
-static ssize_t TermChar_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbtmc_device_data *data = usb_get_intfdata(intf);
-
- return sprintf(buf, "%c\n", data->TermChar);
-}
-
-static ssize_t TermChar_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbtmc_device_data *data = usb_get_intfdata(intf);
-
- if (count < 1)
- return -EINVAL;
- data->TermChar = buf[0];
- return count;
-}
-static DEVICE_ATTR_RW(TermChar);
-
-#define data_attribute(name) \
-static ssize_t name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct usb_interface *intf = to_usb_interface(dev); \
- struct usbtmc_device_data *data = usb_get_intfdata(intf); \
- \
- return sprintf(buf, "%d\n", data->name); \
-} \
-static ssize_t name##_store(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct usb_interface *intf = to_usb_interface(dev); \
- struct usbtmc_device_data *data = usb_get_intfdata(intf); \
- ssize_t result; \
- unsigned val; \
- \
- result = sscanf(buf, "%u\n", &val); \
- if (result != 1) \
- result = -EINVAL; \
- data->name = val; \
- if (result < 0) \
- return result; \
- else \
- return count; \
-} \
-static DEVICE_ATTR_RW(name)
-
-data_attribute(TermCharEnabled);
-data_attribute(auto_abort);
-
-static struct attribute *data_attrs[] = {
- &dev_attr_TermChar.attr,
- &dev_attr_TermCharEnabled.attr,
- &dev_attr_auto_abort.attr,
- NULL,
-};
-
-static const struct attribute_group data_attr_grp = {
- .attrs = data_attrs,
-};
-
static int usbtmc_ioctl_indicator_pulse(struct usbtmc_device_data *data)
{
struct device *dev;
@@ -1229,7 +1864,7 @@ static int usbtmc_ioctl_indicator_pulse(struct usbtmc_device_data *data)
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INDICATOR_PULSE,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, 0, buffer, 0x01, USBTMC_TIMEOUT);
+ 0, 0, buffer, 0x01, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
@@ -1250,6 +1885,63 @@ exit:
return rv;
}
+static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
+ void __user *arg)
+{
+ struct device *dev = &data->intf->dev;
+ struct usbtmc_ctrlrequest request;
+ u8 *buffer = NULL;
+ int rv;
+ unsigned long res;
+
+ res = copy_from_user(&request, arg, sizeof(struct usbtmc_ctrlrequest));
+ if (res)
+ return -EFAULT;
+
+ if (request.req.wLength > USBTMC_BUFSIZE)
+ return -EMSGSIZE;
+
+ if (request.req.wLength) {
+ buffer = kmalloc(request.req.wLength, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ if ((request.req.bRequestType & USB_DIR_IN) == 0) {
+ /* Send control data to device */
+ res = copy_from_user(buffer, request.data,
+ request.req.wLength);
+ if (res) {
+ rv = -EFAULT;
+ goto exit;
+ }
+ }
+ }
+
+ rv = usb_control_msg(data->usb_dev,
+ usb_rcvctrlpipe(data->usb_dev, 0),
+ request.req.bRequest,
+ request.req.bRequestType,
+ request.req.wValue,
+ request.req.wIndex,
+ buffer, request.req.wLength, USB_CTRL_GET_TIMEOUT);
+
+ if (rv < 0) {
+ dev_err(dev, "%s failed %d\n", __func__, rv);
+ goto exit;
+ }
+
+ if (rv && (request.req.bRequestType & USB_DIR_IN)) {
+ /* Read control data from device */
+ res = copy_to_user(request.data, buffer, rv);
+ if (res)
+ rv = -EFAULT;
+ }
+
+ exit:
+ kfree(buffer);
+ return rv;
+}
+
/*
* Get the usb timeout value
*/
@@ -1331,6 +2023,7 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
int retval = -EBADRQC;
+ __u8 tmp_byte;
file_data = file->private_data;
data = file_data->data;
@@ -1366,6 +2059,10 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
retval = usbtmc_ioctl_abort_bulk_in(data);
break;
+ case USBTMC_IOCTL_CTRL_REQUEST:
+ retval = usbtmc_ioctl_request(data, (void __user *)arg);
+ break;
+
case USBTMC_IOCTL_GET_TIMEOUT:
retval = usbtmc_ioctl_get_timeout(file_data,
(void __user *)arg);
@@ -1386,12 +2083,29 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
(void __user *)arg);
break;
+ case USBTMC_IOCTL_WRITE:
+ retval = usbtmc_ioctl_generic_write(file_data,
+ (void __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_READ:
+ retval = usbtmc_ioctl_generic_read(file_data,
+ (void __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_WRITE_RESULT:
+ retval = usbtmc_ioctl_write_result(file_data,
+ (void __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_API_VERSION:
+ retval = put_user(USBTMC_API_VERSION,
+ (__u32 __user *)arg);
+ break;
+
case USBTMC488_IOCTL_GET_CAPS:
- retval = copy_to_user((void __user *)arg,
- &data->usb488_caps,
- sizeof(data->usb488_caps));
- if (retval)
- retval = -EFAULT;
+ retval = put_user(data->usb488_caps,
+ (unsigned char __user *)arg);
break;
case USBTMC488_IOCTL_READ_STB:
@@ -1417,6 +2131,30 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case USBTMC488_IOCTL_TRIGGER:
retval = usbtmc488_ioctl_trigger(file_data);
break;
+
+ case USBTMC488_IOCTL_WAIT_SRQ:
+ retval = usbtmc488_ioctl_wait_srq(file_data,
+ (__u32 __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_MSG_IN_ATTR:
+ retval = put_user(file_data->bmTransferAttributes,
+ (__u8 __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_AUTO_ABORT:
+ retval = get_user(tmp_byte, (unsigned char __user *)arg);
+ if (retval == 0)
+ file_data->auto_abort = !!tmp_byte;
+ break;
+
+ case USBTMC_IOCTL_CANCEL_IO:
+ retval = usbtmc_ioctl_cancel_io(file_data);
+ break;
+
+ case USBTMC_IOCTL_CLEANUP_IO:
+ retval = usbtmc_ioctl_cleanup_io(file_data);
+ break;
}
skip_io_on_zombie:
@@ -1446,7 +2184,28 @@ static __poll_t usbtmc_poll(struct file *file, poll_table *wait)
poll_wait(file, &data->waitq, wait);
- mask = (atomic_read(&file_data->srq_asserted)) ? EPOLLPRI : 0;
+ /* Note that EPOLLPRI is now assigned to SRQ, and
+ * EPOLLIN|EPOLLRDNORM to normal read data.
+ */
+ mask = 0;
+ if (atomic_read(&file_data->srq_asserted))
+ mask |= EPOLLPRI;
+
+ /* Note that the anchor submitted includes all urbs for BULK IN
+ * and OUT. So EPOLLOUT is signaled when BULK OUT is empty and
+ * all BULK IN urbs are completed and moved to in_anchor.
+ */
+ if (usb_anchor_empty(&file_data->submitted))
+ mask |= (EPOLLOUT | EPOLLWRNORM);
+ if (!usb_anchor_empty(&file_data->in_anchor))
+ mask |= (EPOLLIN | EPOLLRDNORM);
+
+ spin_lock_irq(&file_data->err_lock);
+ if (file_data->in_status || file_data->out_status)
+ mask |= EPOLLERR;
+ spin_unlock_irq(&file_data->err_lock);
+
+ dev_dbg(&data->intf->dev, "poll mask = %x\n", mask);
no_poll:
mutex_unlock(&data->io_mutex);
@@ -1459,6 +2218,7 @@ static const struct file_operations fops = {
.write = usbtmc_write,
.open = usbtmc_open,
.release = usbtmc_release,
+ .flush = usbtmc_flush,
.unlocked_ioctl = usbtmc_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = usbtmc_ioctl,
@@ -1552,7 +2312,9 @@ static void usbtmc_free_int(struct usbtmc_device_data *data)
return;
usb_kill_urb(data->iin_urb);
kfree(data->iin_buffer);
+ data->iin_buffer = NULL;
usb_free_urb(data->iin_urb);
+ data->iin_urb = NULL;
kref_put(&data->kref, usbtmc_delete);
}
@@ -1585,8 +2347,6 @@ static int usbtmc_probe(struct usb_interface *intf,
/* Initialize USBTMC bTag and other fields */
data->bTag = 1;
- data->TermCharEnabled = 0;
- data->TermChar = '\n';
/* 2 <= bTag <= 127 USBTMC-USB488 subclass specification 4.3.1 */
data->iin_bTag = 2;
@@ -1603,6 +2363,7 @@ static int usbtmc_probe(struct usb_interface *intf,
}
data->bulk_in = bulk_in->bEndpointAddress;
+ data->wMaxPacketSize = usb_endpoint_maxp(bulk_in);
dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);
data->bulk_out = bulk_out->bEndpointAddress;
@@ -1659,12 +2420,10 @@ static int usbtmc_probe(struct usb_interface *intf,
}
}
- retcode = sysfs_create_group(&intf->dev.kobj, &data_attr_grp);
-
retcode = usb_register_dev(intf, &usbtmc_class);
if (retcode) {
- dev_err(&intf->dev, "Not able to get a minor"
- " (base %u, slice default): %d\n", USBTMC_MINOR_BASE,
+ dev_err(&intf->dev, "Not able to get a minor (base %u, slice default): %d\n",
+ USBTMC_MINOR_BASE,
retcode);
goto error_register;
}
@@ -1674,7 +2433,6 @@ static int usbtmc_probe(struct usb_interface *intf,
error_register:
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
- sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
usbtmc_free_int(data);
err_put:
kref_put(&data->kref, usbtmc_delete);
@@ -1684,26 +2442,103 @@ err_put:
static void usbtmc_disconnect(struct usb_interface *intf)
{
struct usbtmc_device_data *data = usb_get_intfdata(intf);
+ struct list_head *elem;
usb_deregister_dev(intf, &usbtmc_class);
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
- sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
mutex_lock(&data->io_mutex);
data->zombie = 1;
wake_up_interruptible_all(&data->waitq);
+ list_for_each(elem, &data->file_list) {
+ struct usbtmc_file_data *file_data;
+
+ file_data = list_entry(elem,
+ struct usbtmc_file_data,
+ file_elem);
+ usb_kill_anchored_urbs(&file_data->submitted);
+ usb_scuttle_anchored_urbs(&file_data->in_anchor);
+ }
mutex_unlock(&data->io_mutex);
usbtmc_free_int(data);
kref_put(&data->kref, usbtmc_delete);
}
+static void usbtmc_draw_down(struct usbtmc_file_data *file_data)
+{
+ int time;
+
+ time = usb_wait_anchor_empty_timeout(&file_data->submitted, 1000);
+ if (!time)
+ usb_kill_anchored_urbs(&file_data->submitted);
+ usb_scuttle_anchored_urbs(&file_data->in_anchor);
+}
+
static int usbtmc_suspend(struct usb_interface *intf, pm_message_t message)
{
- /* this driver does not have pending URBs */
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
+ struct list_head *elem;
+
+ if (!data)
+ return 0;
+
+ mutex_lock(&data->io_mutex);
+ list_for_each(elem, &data->file_list) {
+ struct usbtmc_file_data *file_data;
+
+ file_data = list_entry(elem,
+ struct usbtmc_file_data,
+ file_elem);
+ usbtmc_draw_down(file_data);
+ }
+
+ if (data->iin_ep_present && data->iin_urb)
+ usb_kill_urb(data->iin_urb);
+
+ mutex_unlock(&data->io_mutex);
return 0;
}
static int usbtmc_resume(struct usb_interface *intf)
{
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
+ int retcode = 0;
+
+ if (data->iin_ep_present && data->iin_urb)
+ retcode = usb_submit_urb(data->iin_urb, GFP_KERNEL);
+ if (retcode)
+ dev_err(&intf->dev, "Failed to submit iin_urb\n");
+
+ return retcode;
+}
+
+static int usbtmc_pre_reset(struct usb_interface *intf)
+{
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
+ struct list_head *elem;
+
+ if (!data)
+ return 0;
+
+ mutex_lock(&data->io_mutex);
+
+ list_for_each(elem, &data->file_list) {
+ struct usbtmc_file_data *file_data;
+
+ file_data = list_entry(elem,
+ struct usbtmc_file_data,
+ file_elem);
+ usbtmc_ioctl_cancel_io(file_data);
+ }
+
+ return 0;
+}
+
+static int usbtmc_post_reset(struct usb_interface *intf)
+{
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
+
+ mutex_unlock(&data->io_mutex);
+
return 0;
}
@@ -1714,6 +2549,8 @@ static struct usb_driver usbtmc_driver = {
.disconnect = usbtmc_disconnect,
.suspend = usbtmc_suspend,
.resume = usbtmc_resume,
+ .pre_reset = usbtmc_pre_reset,
+ .post_reset = usbtmc_post_reset,
};
module_usb_driver(usbtmc_driver);
diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c
index 15cc76e22123..99116af07f1d 100644
--- a/drivers/usb/common/roles.c
+++ b/drivers/usb/common/roles.c
@@ -109,8 +109,15 @@ static void *usb_role_switch_match(struct device_connection *con, int ep,
*/
struct usb_role_switch *usb_role_switch_get(struct device *dev)
{
- return device_connection_find_match(dev, "usb-role-switch", NULL,
- usb_role_switch_match);
+ struct usb_role_switch *sw;
+
+ sw = device_connection_find_match(dev, "usb-role-switch", NULL,
+ usb_role_switch_match);
+
+ if (!IS_ERR_OR_NULL(sw))
+ WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+
+ return sw;
}
EXPORT_SYMBOL_GPL(usb_role_switch_get);
@@ -122,8 +129,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get);
*/
void usb_role_switch_put(struct usb_role_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
put_device(&sw->dev);
+ module_put(sw->dev.parent->driver->owner);
+ }
}
EXPORT_SYMBOL_GPL(usb_role_switch_put);
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 77eef8acff94..f641342cdec0 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -101,12 +101,8 @@ void hcd_buffer_destroy(struct usb_hcd *hcd)
return;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
- struct dma_pool *pool = hcd->pool[i];
-
- if (pool) {
- dma_pool_destroy(pool);
- hcd->pool[i] = NULL;
- }
+ dma_pool_destroy(hcd->pool[i]);
+ hcd->pool[i] = NULL;
}
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6ce77b33da61..a75bc0b8a50f 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -582,7 +582,7 @@ static void async_completed(struct urb *urb)
{
struct async *as = urb->context;
struct usb_dev_state *ps = as->ps;
- struct siginfo sinfo;
+ struct kernel_siginfo sinfo;
struct pid *pid = NULL;
const struct cred *cred = NULL;
unsigned long flags;
@@ -1434,10 +1434,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
struct async *as = NULL;
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
- int i, ret, is_in, num_sgs = 0, ifnum = -1;
+ int i, ret, num_sgs = 0, ifnum = -1;
int number_of_packets = 0;
unsigned int stream_id = 0;
void *buf;
+ bool is_in;
+ bool allow_short = false;
+ bool allow_zero = false;
unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
@@ -1500,6 +1503,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
is_in = 0;
uurb->endpoint &= ~USB_DIR_IN;
}
+ if (is_in)
+ allow_short = true;
snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
@@ -1511,6 +1516,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
break;
case USBDEVFS_URB_TYPE_BULK:
+ if (!is_in)
+ allow_zero = true;
+ else
+ allow_short = true;
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_ISOC:
@@ -1531,6 +1540,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
if (!usb_endpoint_xfer_int(&ep->desc))
return -EINVAL;
interrupt_urb:
+ if (!is_in)
+ allow_zero = true;
+ else
+ allow_short = true;
break;
case USBDEVFS_URB_TYPE_ISO:
@@ -1676,14 +1689,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
u |= URB_ISO_ASAP;
- if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+ if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
u |= URB_SHORT_NOT_OK;
- if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+ if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
u |= URB_ZERO_PACKET;
if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
u |= URB_NO_INTERRUPT;
as->urb->transfer_flags = u;
+ if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
+ dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
+ if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+ dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
+
as->urb->transfer_buffer_length = uurb->buffer_length;
as->urb->setup_packet = (unsigned char *)dr;
dr = NULL;
@@ -2599,7 +2617,7 @@ const struct file_operations usbdev_file_operations = {
static void usbdev_remove(struct usb_device *udev)
{
struct usb_dev_state *ps;
- struct siginfo sinfo;
+ struct kernel_siginfo sinfo;
while (!list_empty(&udev->filelist)) {
ps = list_entry(udev->filelist.next, struct usb_dev_state, list);
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e76e95f62f76..53564386ed57 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -510,9 +510,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
struct usb_interface *iface, void *priv)
{
struct device *dev;
- struct usb_device *udev;
int retval = 0;
- int lpm_disable_error = -ENODEV;
if (!iface)
return -ENODEV;
@@ -525,24 +523,12 @@ int usb_driver_claim_interface(struct usb_driver *driver,
if (!iface->authorized)
return -ENODEV;
- udev = interface_to_usbdev(iface);
-
dev->driver = &driver->drvwrap.driver;
usb_set_intfdata(iface, priv);
iface->needs_binding = 0;
iface->condition = USB_INTERFACE_BOUND;
- /* See the comment about disabling LPM in usb_probe_interface(). */
- if (driver->disable_hub_initiated_lpm) {
- lpm_disable_error = usb_unlocked_disable_lpm(udev);
- if (lpm_disable_error) {
- dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n",
- __func__, driver->name);
- return -ENOMEM;
- }
- }
-
/* Claimed interfaces are initially inactive (suspended) and
* runtime-PM-enabled, but only if the driver has autosuspend
* support. Otherwise they are marked active, to prevent the
@@ -561,9 +547,20 @@ int usb_driver_claim_interface(struct usb_driver *driver,
if (device_is_registered(dev))
retval = device_bind_driver(dev);
- /* Attempt to re-enable USB3 LPM, if the disable was successful. */
- if (!lpm_disable_error)
- usb_unlocked_enable_lpm(udev);
+ if (retval) {
+ dev->driver = NULL;
+ usb_set_intfdata(iface, NULL);
+ iface->needs_remote_wakeup = 0;
+ iface->condition = USB_INTERFACE_UNBOUND;
+
+ /*
+ * Unbound interfaces are always runtime-PM-disabled
+ * and runtime-PM-suspended
+ */
+ if (driver->supports_autosuspend)
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ }
return retval;
}
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index bc8242bc4564..356b05c82dbc 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -21,6 +21,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <uapi/linux/usb/audio.h>
#include "usb.h"
static inline const char *plural(int n)
@@ -42,6 +43,16 @@ static int is_activesync(struct usb_interface_descriptor *desc)
&& desc->bInterfaceProtocol == 1;
}
+static bool is_audio(struct usb_interface_descriptor *desc)
+{
+ return desc->bInterfaceClass == USB_CLASS_AUDIO;
+}
+
+static bool is_uac3_config(struct usb_interface_descriptor *desc)
+{
+ return desc->bInterfaceProtocol == UAC_VERSION_3;
+}
+
int usb_choose_configuration(struct usb_device *udev)
{
int i;
@@ -121,6 +132,22 @@ int usb_choose_configuration(struct usb_device *udev)
#endif
}
+ /*
+ * Select first configuration as default for audio so that
+ * devices that don't comply with UAC3 protocol are supported.
+ * But, still iterate through other configurations and
+ * select UAC3 compliant config if present.
+ */
+ if (i == 0 && num_configs > 1 && desc && is_audio(desc)) {
+ best = c;
+ continue;
+ }
+
+ if (i > 0 && desc && is_audio(desc) && is_uac3_config(desc)) {
+ best = c;
+ break;
+ }
+
/* From the remaining configs, choose the first one whose
* first interface is for a non-vendor-specific class.
* Reason: Linux is more likely to have a class driver
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 1c21955fe7c0..487025d31d44 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1738,7 +1738,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
struct usb_anchor *anchor = urb->anchor;
int status = urb->unlinked;
- unsigned long flags;
urb->hcpriv = NULL;
if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
@@ -1755,20 +1754,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
/* pass ownership to the completion handler */
urb->status = status;
-
- /*
- * We disable local IRQs here avoid possible deadlock because
- * drivers may call spin_lock() to hold lock which might be
- * acquired in one hard interrupt handler.
- *
- * The local_irq_save()/local_irq_restore() around complete()
- * will be removed if current USB drivers have been cleaned up
- * and no one may trigger the above deadlock situation when
- * running complete() in tasklet.
- */
- local_irq_save(flags);
urb->complete(urb);
- local_irq_restore(flags);
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 462ce49f683a..c6077d582d29 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -28,6 +28,7 @@
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/pm_qos.h>
+#include <linux/kobject.h>
#include <linux/uaccess.h>
#include <asm/byteorder.h>
@@ -2660,11 +2661,13 @@ static bool use_new_scheme(struct usb_device *udev, int retry,
{
int old_scheme_first_port =
port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME;
+ int quick_enumeration = (udev->speed == USB_SPEED_HIGH);
if (udev->speed >= USB_SPEED_SUPER)
return false;
- return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first);
+ return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first
+ || quick_enumeration);
}
/* Is a USB 3.0 port in the Inactive or Compliance Mode state?
@@ -5147,6 +5150,42 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
usb_lock_port(port_dev);
}
+/* Handle notifying userspace about hub over-current events */
+static void port_over_current_notify(struct usb_port *port_dev)
+{
+ static char *envp[] = { NULL, NULL, NULL };
+ struct device *hub_dev;
+ char *port_dev_path;
+
+ sysfs_notify(&port_dev->dev.kobj, NULL, "over_current_count");
+
+ hub_dev = port_dev->dev.parent;
+
+ if (!hub_dev)
+ return;
+
+ port_dev_path = kobject_get_path(&port_dev->dev.kobj, GFP_KERNEL);
+ if (!port_dev_path)
+ return;
+
+ envp[0] = kasprintf(GFP_KERNEL, "OVER_CURRENT_PORT=%s", port_dev_path);
+ if (!envp[0])
+ goto exit_path;
+
+ envp[1] = kasprintf(GFP_KERNEL, "OVER_CURRENT_COUNT=%u",
+ port_dev->over_current_count);
+ if (!envp[1])
+ goto exit;
+
+ kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp);
+
+ kfree(envp[1]);
+exit:
+ kfree(envp[0]);
+exit_path:
+ kfree(port_dev_path);
+}
+
static void port_event(struct usb_hub *hub, int port1)
__must_hold(&port_dev->status_lock)
{
@@ -5189,6 +5228,7 @@ static void port_event(struct usb_hub *hub, int port1)
if (portchange & USB_PORT_STAT_C_OVERCURRENT) {
u16 status = 0, unused;
port_dev->over_current_count++;
+ port_over_current_notify(port_dev);
dev_dbg(&port_dev->dev, "over-current change #%u\n",
port_dev->over_current_count);
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index 9879767452a2..38b2c776c4b4 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -23,10 +23,11 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index,
struct list_head *list)
{
struct usb_phy_roothub *roothub_entry;
- struct phy *phy = devm_of_phy_get_by_index(dev, dev->of_node, index);
+ struct phy *phy;
- if (IS_ERR_OR_NULL(phy)) {
- if (!phy || PTR_ERR(phy) == -ENODEV)
+ phy = devm_of_phy_get_by_index(dev, dev->of_node, index);
+ if (IS_ERR(phy)) {
+ if (PTR_ERR(phy) == -ENODEV)
return 0;
else
return PTR_ERR(phy);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 4a2143195395..1a06a4b5fbb1 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -16,6 +16,15 @@ static int usb_port_block_power_off;
static const struct attribute_group *port_dev_group[];
+static ssize_t location_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_port *port_dev = to_usb_port(dev);
+
+ return sprintf(buf, "0x%08x\n", port_dev->location);
+}
+static DEVICE_ATTR_RO(location);
+
static ssize_t connect_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -140,6 +149,7 @@ static DEVICE_ATTR_RW(usb3_lpm_permit);
static struct attribute *port_dev_attrs[] = {
&dev_attr_connect_type.attr,
+ &dev_attr_location.attr,
&dev_attr_quirks.attr,
&dev_attr_over_current_count.attr,
NULL,
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index e77dfe5ed5ec..178d6c6063c0 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -58,6 +58,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry),
GFP_KERNEL);
if (!quirk_list) {
+ quirk_count = 0;
mutex_unlock(&quirk_mutex);
return -ENOMEM;
}
@@ -154,7 +155,7 @@ static struct kparam_string quirks_param_string = {
.string = quirks_param,
};
-module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
+device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks");
/* Lists of quirky USB devices, split in device quirks and interface quirks.
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 623be3174fb3..79d8bd7a612e 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -228,6 +228,8 @@ struct usb_host_interface *usb_find_alt_setting(
struct usb_interface_cache *intf_cache = NULL;
int i;
+ if (!config)
+ return NULL;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
== iface_num) {
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index cc9c93affa14..30bab8463c96 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -393,6 +393,20 @@ enum dwc2_ep0_state {
* 0 - No
* 1 - Yes
* @hird_threshold: Value of BESL or HIRD Threshold.
+ * @ref_clk_per: Indicates in terms of pico seconds the period
+ * of ref_clk.
+ * 62500 - 16MHz
+ * 58823 - 17MHz
+ * 52083 - 19.2MHz
+ * 50000 - 20MHz
+ * 41666 - 24MHz
+ * 33333 - 30MHz (default)
+ * 25000 - 40MHz
+ * @sof_cnt_wkup_alert: Indicates in term of number of SOF's after which
+ * the controller should generate an interrupt if the
+ * device had been in L1 state until that period.
+ * This is used by SW to initiate Remote WakeUp in the
+ * controller so as to sync to the uF number from the host.
* @activate_stm_fs_transceiver: Activate internal transceiver using GGPIO
* register.
* 0 - Deactivate the transceiver (default)
@@ -416,6 +430,9 @@ enum dwc2_ep0_state {
* back to DWC2_SPEED_PARAM_HIGH while device is gone.
* 0 - No (default)
* 1 - Yes
+ * @service_interval: Enable service interval based scheduling.
+ * 0 - No
+ * 1 - Yes
*
* The following parameters may be specified when starting the module. These
* parameters define how the DWC_otg controller should be configured. A
@@ -461,6 +478,7 @@ struct dwc2_core_params {
bool lpm_clock_gating;
bool besl;
bool hird_threshold_en;
+ bool service_interval;
u8 hird_threshold;
bool activate_stm_fs_transceiver;
bool ipg_isoc_en;
@@ -468,6 +486,10 @@ struct dwc2_core_params {
u32 max_transfer_size;
u32 ahbcfg;
+ /* GREFCLK parameters */
+ u32 ref_clk_per;
+ u16 sof_cnt_wkup_alert;
+
/* Host parameters */
bool host_dma;
bool dma_desc_enable;
@@ -605,6 +627,10 @@ struct dwc2_core_params {
* FIFO sizing is enabled 16 to 32768
* Actual maximum value is autodetected and also
* the default.
+ * @service_interval_mode: For enabling service interval based scheduling in the
+ * controller.
+ * 0 - Disable
+ * 1 - Enable
*/
struct dwc2_hw_params {
unsigned op_mode:3;
@@ -635,6 +661,7 @@ struct dwc2_hw_params {
unsigned utmi_phy_data_width:2;
unsigned lpm_mode:1;
unsigned ipg_isoc_en:1;
+ unsigned service_interval_mode:1;
u32 snpsid;
u32 dev_ep_dirs;
u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
@@ -1354,6 +1381,7 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg);
int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg);
int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg);
void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg);
+void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg);
#else
static inline int dwc2_hsotg_remove(struct dwc2_hsotg *dwc2)
{ return 0; }
@@ -1388,6 +1416,7 @@ static inline int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
static inline int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
{ return 0; }
static inline void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg) {}
#endif
#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 22d015b0424f..7f62f4cdc265 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -701,6 +701,7 @@ static int params_show(struct seq_file *seq, void *v)
print_param(seq, p, besl);
print_param(seq, p, hird_threshold_en);
print_param(seq, p, hird_threshold);
+ print_param(seq, p, service_interval);
print_param(seq, p, host_dma);
print_param(seq, p, g_dma);
print_param(seq, p, g_dma_desc);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 220c0f9b89b0..2d6d2c8244de 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -123,6 +123,24 @@ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
}
/**
+ * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number
+ * by one.
+ * @hs_ep: The endpoint.
+ *
+ * This function used in service interval based scheduling flow to calculate
+ * descriptor frame number filed value. For service interval mode frame
+ * number in descriptor should point to last (u)frame in the interval.
+ *
+ */
+static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
+{
+ if (hs_ep->target_frame)
+ hs_ep->target_frame -= 1;
+ else
+ hs_ep->target_frame = DSTS_SOFFN_LIMIT;
+}
+
+/**
* dwc2_hsotg_en_gsint - enable one or more of the general interrupt
* @hsotg: The device state
* @ints: A bitmask of the interrupts to enable
@@ -228,6 +246,27 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
}
/**
+ * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ */
+static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts2;
+ u32 gintmsk2;
+
+ gintsts2 = dwc2_readl(hsotg, GINTSTS2);
+ gintmsk2 = dwc2_readl(hsotg, GINTMSK2);
+
+ if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
+ dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
+ dwc2_clear_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
+ dwc2_set_bit(hsotg, DCFG, DCTL_RMTWKUPSIG);
+ }
+}
+
+/**
* dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
* TX FIFOs
*
@@ -2812,6 +2851,23 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
if (using_desc_dma(hsotg)) {
hs_ep->target_frame = hsotg->frame_number;
dwc2_gadget_incr_frame_num(hs_ep);
+
+ /* In service interval mode target_frame must
+ * be set to last (u)frame of the service interval.
+ */
+ if (hsotg->params.service_interval) {
+ /* Set target_frame to the first (u)frame of
+ * the service interval
+ */
+ hs_ep->target_frame &= ~hs_ep->interval + 1;
+
+ /* Set target_frame to the last (u)frame of
+ * the service interval
+ */
+ dwc2_gadget_incr_frame_num(hs_ep);
+ dwc2_gadget_dec_frame_num_by_one(hs_ep);
+ }
+
dwc2_gadget_start_isoc_ddma(hs_ep);
return;
}
@@ -3109,6 +3165,8 @@ static void kill_all_requests(struct dwc2_hsotg *hsotg,
dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
}
+static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
+
/**
* dwc2_hsotg_disconnect - disconnect service
* @hsotg: The device state.
@@ -3127,13 +3185,12 @@ void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
hsotg->connected = 0;
hsotg->test_mode = 0;
+ /* all endpoints should be shutdown */
for (ep = 0; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
- kill_all_requests(hsotg, hsotg->eps_in[ep],
- -ESHUTDOWN);
+ dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
if (hsotg->eps_out[ep])
- kill_all_requests(hsotg, hsotg->eps_out[ep],
- -ESHUTDOWN);
+ dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
}
call_gadget(hsotg, disconnect);
@@ -3191,13 +3248,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
u32 val;
u32 usbcfg;
u32 dcfg = 0;
+ int ep;
/* Kill any ep0 requests as controller will be reinitialized */
kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
- if (!is_usb_reset)
+ if (!is_usb_reset) {
if (dwc2_core_reset(hsotg, true))
return;
+ } else {
+ /* all endpoints should be shutdown */
+ for (ep = 1; ep < hsotg->num_of_eps; ep++) {
+ if (hsotg->eps_in[ep])
+ dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
+ if (hsotg->eps_out[ep])
+ dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
+ }
+ }
/*
* we must now enable ep0 ready for host detection and then
@@ -3312,6 +3379,10 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK);
}
+ /* Enable Service Interval mode if supported */
+ if (using_desc_dma(hsotg) && hsotg->params.service_interval)
+ dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED);
+
dwc2_writel(hsotg, 0, DAINTMSK);
dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
@@ -3368,6 +3439,10 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
/* configure the core to support LPM */
dwc2_gadget_init_lpm(hsotg);
+ /* program GREFCLK register if needed */
+ if (using_desc_dma(hsotg) && hsotg->params.service_interval)
+ dwc2_gadget_program_ref_clk(hsotg);
+
/* must be at-least 3ms to allow bus to see disconnect */
mdelay(3);
@@ -3676,6 +3751,10 @@ irq_retry:
if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
goto irq_retry;
+ /* Check WKUP_ALERT interrupt*/
+ if (hsotg->params.service_interval)
+ dwc2_gadget_wkup_alert_handler(hsotg);
+
spin_unlock(&hsotg->lock);
return IRQ_HANDLED;
@@ -3993,6 +4072,7 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
unsigned long flags;
u32 epctrl_reg;
u32 ctrl;
+ int locked;
dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
@@ -4008,7 +4088,9 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
- spin_lock_irqsave(&hsotg->lock, flags);
+ locked = spin_is_locked(&hsotg->lock);
+ if (!locked)
+ spin_lock_irqsave(&hsotg->lock, flags);
ctrl = dwc2_readl(hsotg, epctrl_reg);
@@ -4032,7 +4114,9 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
hs_ep->fifo_index = 0;
hs_ep->fifo_size = 0;
- spin_unlock_irqrestore(&hsotg->lock, flags);
+ if (!locked)
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
return 0;
}
@@ -4944,6 +5028,29 @@ void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
dwc2_writel(hsotg, val, GLPMCFG);
dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
+
+ /* Unmask WKUP_ALERT Interrupt */
+ if (hsotg->params.service_interval)
+ dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK);
+}
+
+/**
+ * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ */
+void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
+{
+ u32 val = 0;
+
+ val |= GREFCLK_REF_CLK_MODE;
+ val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT;
+ val |= hsotg->params.sof_cnt_wkup_alert <<
+ GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT;
+
+ dwc2_writel(hsotg, val, GREFCLK);
+ dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
}
/**
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 2bd6e6bfc241..dd82fa516f3f 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -358,16 +358,10 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg)
{
- int ret;
-
- hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
- if (IS_ERR(hsotg->vbus_supply)) {
- ret = PTR_ERR(hsotg->vbus_supply);
- hsotg->vbus_supply = NULL;
- return ret == -ENODEV ? 0 : ret;
- }
+ if (hsotg->vbus_supply)
+ return regulator_enable(hsotg->vbus_supply);
- return regulator_enable(hsotg->vbus_supply);
+ return 0;
}
static int dwc2_vbus_supply_exit(struct dwc2_hsotg *hsotg)
@@ -1328,14 +1322,11 @@ static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
u32 remaining_count;
u32 byte_count;
u32 dword_count;
- u32 __iomem *data_fifo;
u32 *data_buf = (u32 *)chan->xfer_buf;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
- data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
-
remaining_count = chan->xfer_len - chan->xfer_count;
if (remaining_count > chan->max_packet)
byte_count = chan->max_packet;
@@ -3564,6 +3555,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
u32 port_status;
u32 speed;
u32 pcgctl;
+ u32 pwr;
switch (typereq) {
case ClearHubFeature:
@@ -3612,8 +3604,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_POWER\n");
hprt0 = dwc2_read_hprt0(hsotg);
+ pwr = hprt0 & HPRT0_PWR;
hprt0 &= ~HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
+ if (pwr)
+ dwc2_vbus_supply_exit(hsotg);
break;
case USB_PORT_FEAT_INDICATOR:
@@ -3823,8 +3818,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"SetPortFeature - USB_PORT_FEAT_POWER\n");
hprt0 = dwc2_read_hprt0(hsotg);
+ pwr = hprt0 & HPRT0_PWR;
hprt0 |= HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
+ if (!pwr)
+ dwc2_vbus_supply_init(hsotg);
break;
case USB_PORT_FEAT_RESET:
@@ -3841,6 +3839,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dwc2_writel(hsotg, 0, PCGCTL);
hprt0 = dwc2_read_hprt0(hsotg);
+ pwr = hprt0 & HPRT0_PWR;
/* Clear suspend bit if resetting from suspend state */
hprt0 &= ~HPRT0_SUSP;
@@ -3854,6 +3853,8 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"In host mode, hprt0=%08x\n", hprt0);
dwc2_writel(hsotg, hprt0, HPRT0);
+ if (!pwr)
+ dwc2_vbus_supply_init(hsotg);
}
/* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
@@ -4393,6 +4394,8 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
struct usb_bus *bus = hcd_to_bus(hcd);
unsigned long flags;
+ u32 hprt0;
+ int ret;
dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
@@ -4408,6 +4411,17 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
dwc2_hcd_reinit(hsotg);
+ hprt0 = dwc2_read_hprt0(hsotg);
+ /* Has vbus power been turned on in dwc2_core_host_init ? */
+ if (hprt0 & HPRT0_PWR) {
+ /* Enable external vbus supply before resuming root hub */
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ ret = dwc2_vbus_supply_init(hsotg);
+ if (ret)
+ return ret;
+ spin_lock_irqsave(&hsotg->lock, flags);
+ }
+
/* Initialize and connect root hub if one is not already attached */
if (bus->root_hub) {
dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n");
@@ -4417,7 +4431,7 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
spin_unlock_irqrestore(&hsotg->lock, flags);
- return dwc2_vbus_supply_init(hsotg);
+ return 0;
}
/*
@@ -4428,6 +4442,7 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
unsigned long flags;
+ u32 hprt0;
/* Turn off all host-specific interrupts */
dwc2_disable_host_interrupts(hsotg);
@@ -4436,6 +4451,7 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
synchronize_irq(hcd->irq);
spin_lock_irqsave(&hsotg->lock, flags);
+ hprt0 = dwc2_read_hprt0(hsotg);
/* Ensure hcd is disconnected */
dwc2_hcd_disconnect(hsotg, true);
dwc2_hcd_stop(hsotg);
@@ -4444,7 +4460,9 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irqrestore(&hsotg->lock, flags);
- dwc2_vbus_supply_exit(hsotg);
+ /* keep balanced supply init/exit by checking HPRT0_PWR */
+ if (hprt0 & HPRT0_PWR)
+ dwc2_vbus_supply_exit(hsotg);
usleep_range(1000, 3000);
}
@@ -4482,7 +4500,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
hprt0 |= HPRT0_SUSP;
hprt0 &= ~HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_vbus_supply_exit(hsotg);
+ spin_lock_irqsave(&hsotg->lock, flags);
}
/* Enter partial_power_down */
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 0ca8e7bc7aaf..2b1ea441b7d4 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -312,6 +312,7 @@
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14
#define GHWCFG4_ACG_SUPPORTED BIT(12)
#define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11)
+#define GHWCFG4_SERVICE_INTERVAL_SUPPORTED BIT(10)
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2
@@ -404,6 +405,19 @@
#define ADPCTL_PRB_DSCHRG_MASK (0x3 << 0)
#define ADPCTL_PRB_DSCHRG_SHIFT 0
+#define GREFCLK HSOTG_REG(0x0064)
+#define GREFCLK_REFCLKPER_MASK (0x1ffff << 15)
+#define GREFCLK_REFCLKPER_SHIFT 15
+#define GREFCLK_REF_CLK_MODE BIT(14)
+#define GREFCLK_SOF_CNT_WKUP_ALERT_MASK (0x3ff)
+#define GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT 0
+
+#define GINTMSK2 HSOTG_REG(0x0068)
+#define GINTMSK2_WKUP_ALERT_INT_MSK BIT(0)
+
+#define GINTSTS2 HSOTG_REG(0x006c)
+#define GINTSTS2_WKUP_ALERT_INT BIT(0)
+
#define HPTXFSIZ HSOTG_REG(0x100)
/* Use FIFOSIZE_* constants to access this register */
@@ -443,6 +457,7 @@
#define DCFG_DEVSPD_FS48 3
#define DCTL HSOTG_REG(0x804)
+#define DCTL_SERVICE_INTERVAL_SUPPORTED BIT(19)
#define DCTL_PWRONPRGDONE BIT(11)
#define DCTL_CGOUTNAK BIT(10)
#define DCTL_SGOUTNAK BIT(9)
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index bf7052e037d6..7c1b6938f212 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -81,6 +81,7 @@ static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
p->host_perio_tx_fifo_size = 256;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
GAHBCFG_HBSTLEN_SHIFT;
+ p->power_down = 0;
}
static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg)
@@ -299,9 +300,12 @@ static void dwc2_set_default_params(struct dwc2_hsotg *hsotg)
p->hird_threshold_en = true;
p->hird_threshold = 4;
p->ipg_isoc_en = false;
+ p->service_interval = false;
p->max_packet_count = hw->max_packet_count;
p->max_transfer_size = hw->max_transfer_size;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR << GAHBCFG_HBSTLEN_SHIFT;
+ p->ref_clk_per = 33333;
+ p->sof_cnt_wkup_alert = 100;
if ((hsotg->dr_mode == USB_DR_MODE_HOST) ||
(hsotg->dr_mode == USB_DR_MODE_OTG)) {
@@ -592,6 +596,7 @@ static void dwc2_check_params(struct dwc2_hsotg *hsotg)
CHECK_BOOL(besl, (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a));
CHECK_BOOL(hird_threshold_en, hsotg->params.lpm);
CHECK_RANGE(hird_threshold, 0, hsotg->params.besl ? 12 : 7, 0);
+ CHECK_BOOL(service_interval, hw->service_interval_mode);
CHECK_RANGE(max_packet_count,
15, hw->max_packet_count,
hw->max_packet_count);
@@ -780,6 +785,8 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
hw->acg_enable = !!(hwcfg4 & GHWCFG4_ACG_SUPPORTED);
hw->ipg_isoc_en = !!(hwcfg4 & GHWCFG4_IPG_ISOC_SUPPORTED);
+ hw->service_interval_mode = !!(hwcfg4 &
+ GHWCFG4_SERVICE_INTERVAL_SUPPORTED);
/* fifo sizes */
hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 577642895b57..c0b64d483552 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -432,6 +432,14 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
return retval;
+ hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
+ if (IS_ERR(hsotg->vbus_supply)) {
+ retval = PTR_ERR(hsotg->vbus_supply);
+ hsotg->vbus_supply = NULL;
+ if (retval != -ENODEV)
+ return retval;
+ }
+
retval = dwc2_lowlevel_hw_enable(hsotg);
if (retval)
return retval;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 518ead12458d..1a0404fda596 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -113,7 +113,7 @@ config USB_DWC3_ST
config USB_DWC3_QCOM
tristate "Qualcomm Platform"
- depends on ARCH_QCOM || COMPILE_TEST
+ depends on EXTCON && (ARCH_QCOM || COMPILE_TEST)
depends on OF
default USB_DWC3
help
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 88c80fcc39f5..becfbb87f791 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -756,7 +756,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
/* check if current dwc3 is on simulation board */
if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
- dev_info(dwc->dev, "Running with FPGA optmizations\n");
+ dev_info(dwc->dev, "Running with FPGA optimizations\n");
dwc->is_fpga = true;
}
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index a94fb1ba8f2c..cb7fcd7c0ad8 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -13,80 +13,30 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/usb_phy_generic.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regulator/consumer.h>
+#define DWC3_EXYNOS_MAX_CLOCKS 4
+
+struct dwc3_exynos_driverdata {
+ const char *clk_names[DWC3_EXYNOS_MAX_CLOCKS];
+ int num_clks;
+ int suspend_clk_idx;
+};
+
struct dwc3_exynos {
- struct platform_device *usb2_phy;
- struct platform_device *usb3_phy;
struct device *dev;
- struct clk *clk;
- struct clk *susp_clk;
- struct clk *axius_clk;
+ const char **clk_names;
+ struct clk *clks[DWC3_EXYNOS_MAX_CLOCKS];
+ int num_clks;
+ int suspend_clk_idx;
struct regulator *vdd33;
struct regulator *vdd10;
};
-static int dwc3_exynos_register_phys(struct dwc3_exynos *exynos)
-{
- struct usb_phy_generic_platform_data pdata;
- struct platform_device *pdev;
- int ret;
-
- memset(&pdata, 0x00, sizeof(pdata));
-
- pdev = platform_device_alloc("usb_phy_generic", PLATFORM_DEVID_AUTO);
- if (!pdev)
- return -ENOMEM;
-
- exynos->usb2_phy = pdev;
- pdata.type = USB_PHY_TYPE_USB2;
- pdata.gpio_reset = -1;
-
- ret = platform_device_add_data(exynos->usb2_phy, &pdata, sizeof(pdata));
- if (ret)
- goto err1;
-
- pdev = platform_device_alloc("usb_phy_generic", PLATFORM_DEVID_AUTO);
- if (!pdev) {
- ret = -ENOMEM;
- goto err1;
- }
-
- exynos->usb3_phy = pdev;
- pdata.type = USB_PHY_TYPE_USB3;
-
- ret = platform_device_add_data(exynos->usb3_phy, &pdata, sizeof(pdata));
- if (ret)
- goto err2;
-
- ret = platform_device_add(exynos->usb2_phy);
- if (ret)
- goto err2;
-
- ret = platform_device_add(exynos->usb3_phy);
- if (ret)
- goto err3;
-
- return 0;
-
-err3:
- platform_device_del(exynos->usb2_phy);
-
-err2:
- platform_device_put(exynos->usb3_phy);
-
-err1:
- platform_device_put(exynos->usb2_phy);
-
- return ret;
-}
-
static int dwc3_exynos_remove_child(struct device *dev, void *unused)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -101,47 +51,42 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
struct dwc3_exynos *exynos;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
-
- int ret;
+ const struct dwc3_exynos_driverdata *driver_data;
+ int i, ret;
exynos = devm_kzalloc(dev, sizeof(*exynos), GFP_KERNEL);
if (!exynos)
return -ENOMEM;
- platform_set_drvdata(pdev, exynos);
+ driver_data = of_device_get_match_data(dev);
+ exynos->dev = dev;
+ exynos->num_clks = driver_data->num_clks;
+ exynos->clk_names = (const char **)driver_data->clk_names;
+ exynos->suspend_clk_idx = driver_data->suspend_clk_idx;
- exynos->dev = dev;
+ platform_set_drvdata(pdev, exynos);
- exynos->clk = devm_clk_get(dev, "usbdrd30");
- if (IS_ERR(exynos->clk)) {
- dev_err(dev, "couldn't get clock\n");
- return -EINVAL;
+ for (i = 0; i < exynos->num_clks; i++) {
+ exynos->clks[i] = devm_clk_get(dev, exynos->clk_names[i]);
+ if (IS_ERR(exynos->clks[i])) {
+ dev_err(dev, "failed to get clock: %s\n",
+ exynos->clk_names[i]);
+ return PTR_ERR(exynos->clks[i]);
+ }
}
- ret = clk_prepare_enable(exynos->clk);
- if (ret)
- return ret;
- exynos->susp_clk = devm_clk_get(dev, "usbdrd30_susp_clk");
- if (IS_ERR(exynos->susp_clk))
- exynos->susp_clk = NULL;
- ret = clk_prepare_enable(exynos->susp_clk);
- if (ret)
- goto susp_clk_err;
-
- if (of_device_is_compatible(node, "samsung,exynos7-dwusb3")) {
- exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
- if (IS_ERR(exynos->axius_clk)) {
- dev_err(dev, "no AXI UpScaler clk specified\n");
- ret = -ENODEV;
- goto axius_clk_err;
+ for (i = 0; i < exynos->num_clks; i++) {
+ ret = clk_prepare_enable(exynos->clks[i]);
+ if (ret) {
+ while (--i > 0)
+ clk_disable_unprepare(exynos->clks[i]);
+ return ret;
}
- ret = clk_prepare_enable(exynos->axius_clk);
- if (ret)
- goto axius_clk_err;
- } else {
- exynos->axius_clk = NULL;
}
+ if (exynos->suspend_clk_idx >= 0)
+ clk_prepare_enable(exynos->clks[exynos->suspend_clk_idx]);
+
exynos->vdd33 = devm_regulator_get(dev, "vdd33");
if (IS_ERR(exynos->vdd33)) {
ret = PTR_ERR(exynos->vdd33);
@@ -164,12 +109,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
goto vdd10_err;
}
- ret = dwc3_exynos_register_phys(exynos);
- if (ret) {
- dev_err(dev, "couldn't register PHYs\n");
- goto phys_err;
- }
-
if (node) {
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
@@ -185,32 +124,31 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
return 0;
populate_err:
- platform_device_unregister(exynos->usb2_phy);
- platform_device_unregister(exynos->usb3_phy);
-phys_err:
regulator_disable(exynos->vdd10);
vdd10_err:
regulator_disable(exynos->vdd33);
vdd33_err:
- clk_disable_unprepare(exynos->axius_clk);
-axius_clk_err:
- clk_disable_unprepare(exynos->susp_clk);
-susp_clk_err:
- clk_disable_unprepare(exynos->clk);
+ for (i = exynos->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(exynos->clks[i]);
+
+ if (exynos->suspend_clk_idx >= 0)
+ clk_disable_unprepare(exynos->clks[exynos->suspend_clk_idx]);
+
return ret;
}
static int dwc3_exynos_remove(struct platform_device *pdev)
{
struct dwc3_exynos *exynos = platform_get_drvdata(pdev);
+ int i;
device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
- platform_device_unregister(exynos->usb2_phy);
- platform_device_unregister(exynos->usb3_phy);
- clk_disable_unprepare(exynos->axius_clk);
- clk_disable_unprepare(exynos->susp_clk);
- clk_disable_unprepare(exynos->clk);
+ for (i = exynos->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(exynos->clks[i]);
+
+ if (exynos->suspend_clk_idx >= 0)
+ clk_disable_unprepare(exynos->clks[exynos->suspend_clk_idx]);
regulator_disable(exynos->vdd33);
regulator_disable(exynos->vdd10);
@@ -218,10 +156,36 @@ static int dwc3_exynos_remove(struct platform_device *pdev)
return 0;
}
+static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
+ .clk_names = { "usbdrd30" },
+ .num_clks = 1,
+ .suspend_clk_idx = -1,
+};
+
+static const struct dwc3_exynos_driverdata exynos5433_drvdata = {
+ .clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" },
+ .num_clks = 4,
+ .suspend_clk_idx = 1,
+};
+
+static const struct dwc3_exynos_driverdata exynos7_drvdata = {
+ .clk_names = { "usbdrd30", "usbdrd30_susp_clk", "usbdrd30_axius_clk" },
+ .num_clks = 3,
+ .suspend_clk_idx = 1,
+};
+
static const struct of_device_id exynos_dwc3_match[] = {
- { .compatible = "samsung,exynos5250-dwusb3" },
- { .compatible = "samsung,exynos7-dwusb3" },
- {},
+ {
+ .compatible = "samsung,exynos5250-dwusb3",
+ .data = &exynos5250_drvdata,
+ }, {
+ .compatible = "samsung,exynos5433-dwusb3",
+ .data = &exynos5433_drvdata,
+ }, {
+ .compatible = "samsung,exynos7-dwusb3",
+ .data = &exynos7_drvdata,
+ }, {
+ }
};
MODULE_DEVICE_TABLE(of, exynos_dwc3_match);
@@ -229,9 +193,10 @@ MODULE_DEVICE_TABLE(of, exynos_dwc3_match);
static int dwc3_exynos_suspend(struct device *dev)
{
struct dwc3_exynos *exynos = dev_get_drvdata(dev);
+ int i;
- clk_disable(exynos->axius_clk);
- clk_disable(exynos->clk);
+ for (i = exynos->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(exynos->clks[i]);
regulator_disable(exynos->vdd33);
regulator_disable(exynos->vdd10);
@@ -242,7 +207,7 @@ static int dwc3_exynos_suspend(struct device *dev)
static int dwc3_exynos_resume(struct device *dev)
{
struct dwc3_exynos *exynos = dev_get_drvdata(dev);
- int ret;
+ int i, ret;
ret = regulator_enable(exynos->vdd33);
if (ret) {
@@ -255,13 +220,14 @@ static int dwc3_exynos_resume(struct device *dev)
return ret;
}
- clk_enable(exynos->clk);
- clk_enable(exynos->axius_clk);
-
- /* runtime set active to reflect active state. */
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ for (i = 0; i < exynos->num_clks; i++) {
+ ret = clk_prepare_enable(exynos->clks[i]);
+ if (ret) {
+ while (--i > 0)
+ clk_disable_unprepare(exynos->clks[i]);
+ return ret;
+ }
+ }
return 0;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 2b53194081ba..679c12e14522 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -270,27 +270,36 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
u32 timeout = 1000;
+ u32 saved_config = 0;
u32 reg;
int cmd_status = 0;
- int susphy = false;
int ret = -EINVAL;
/*
- * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
- * we're issuing an endpoint command, we must check if
- * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
+ * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or
+ * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an
+ * endpoint command.
*
- * We will also set SUSPHY bit to what it was before returning as stated
- * by the same section on Synopsys databook.
+ * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY
+ * settings. Restore them after the command is completed.
+ *
+ * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
*/
if (dwc->gadget.speed <= USB_SPEED_HIGH) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
- susphy = true;
+ saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
+
+ if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
+ saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
+ reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
+ }
+
+ if (saved_config)
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
@@ -389,9 +398,9 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
}
}
- if (unlikely(susphy)) {
+ if (saved_config) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ reg |= saved_config;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index e15e896f356c..165653a5e45d 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -717,17 +717,14 @@ static void xdbc_handle_port_status(struct xdbc_trb *evt_trb)
static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
{
- size_t remain_length;
u32 comp_code;
int ep_id;
comp_code = GET_COMP_CODE(le32_to_cpu(evt_trb->field[2]));
- remain_length = EVENT_TRB_LEN(le32_to_cpu(evt_trb->field[2]));
ep_id = TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3]));
switch (comp_code) {
case COMP_SUCCESS:
- remain_length = 0;
case COMP_SHORT_PACKET:
break;
case COMP_TRB_ERROR:
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index ca8a4b53c59f..043f97ad8f22 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -221,6 +221,8 @@
#include <linux/usb/gadget.h>
#include <linux/usb/composite.h>
+#include <linux/nospec.h>
+
#include "configfs.h"
@@ -403,7 +405,7 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
common->exception_req_tag = common->ep0_req_tag;
common->state = new_state;
if (common->thread_task)
- send_sig_info(SIGUSR1, SEND_SIG_FORCED,
+ send_sig_info(SIGUSR1, SEND_SIG_PRIV,
common->thread_task);
}
spin_unlock_irqrestore(&common->lock, flags);
@@ -2311,7 +2313,7 @@ static void handle_exception(struct fsg_common *common)
* into a high-priority EXIT exception.
*/
for (;;) {
- int sig = kernel_dequeue_signal(NULL);
+ int sig = kernel_dequeue_signal();
if (!sig)
break;
if (sig != SIGUSR1) {
@@ -3152,6 +3154,7 @@ static struct config_group *fsg_lun_make(struct config_group *group,
fsg_opts = to_fsg_opts(&group->cg_item);
if (num >= FSG_MAX_LUNS)
return ERR_PTR(-ERANGE);
+ num = array_index_nospec(num, FSG_MAX_LUNS);
mutex_lock(&fsg_opts->lock);
if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index d582921f7257..db2d4980cb35 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -22,12 +22,8 @@
* controlled by two clock sources :
* CLK_5 := c_srate, and CLK_6 := p_srate
*/
-#define USB_OUT_IT_ID 1
-#define IO_IN_IT_ID 2
-#define IO_OUT_OT_ID 3
-#define USB_IN_OT_ID 4
-#define USB_OUT_CLK_ID 5
-#define USB_IN_CLK_ID 6
+#define USB_OUT_CLK_ID (out_clk_src_desc.bClockID)
+#define USB_IN_CLK_ID (in_clk_src_desc.bClockID)
#define CONTROL_ABSENT 0
#define CONTROL_RDONLY 1
@@ -43,6 +39,9 @@
#define UNFLW_CTRL 8
#define OVFLW_CTRL 10
+#define EPIN_EN(_opts) ((_opts)->p_chmask != 0)
+#define EPOUT_EN(_opts) ((_opts)->c_chmask != 0)
+
struct f_uac2 {
struct g_audio g_audio;
u8 ac_intf, as_in_intf, as_out_intf;
@@ -135,7 +134,7 @@ static struct uac_clock_source_descriptor in_clk_src_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC2_CLOCK_SOURCE,
- .bClockID = USB_IN_CLK_ID,
+ /* .bClockID = DYNAMIC */
.bmAttributes = UAC_CLOCK_SOURCE_TYPE_INT_FIXED,
.bmControls = (CONTROL_RDONLY << CLK_FREQ_CTRL),
.bAssocTerminal = 0,
@@ -147,7 +146,7 @@ static struct uac_clock_source_descriptor out_clk_src_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC2_CLOCK_SOURCE,
- .bClockID = USB_OUT_CLK_ID,
+ /* .bClockID = DYNAMIC */
.bmAttributes = UAC_CLOCK_SOURCE_TYPE_INT_FIXED,
.bmControls = (CONTROL_RDONLY << CLK_FREQ_CTRL),
.bAssocTerminal = 0,
@@ -159,10 +158,10 @@ static struct uac2_input_terminal_descriptor usb_out_it_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
- .bTerminalID = USB_OUT_IT_ID,
+ /* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
- .bCSourceID = USB_OUT_CLK_ID,
+ /* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
@@ -173,10 +172,10 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
- .bTerminalID = IO_IN_IT_ID,
+ /* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED),
.bAssocTerminal = 0,
- .bCSourceID = USB_IN_CLK_ID,
+ /* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
@@ -187,11 +186,11 @@ static struct uac2_output_terminal_descriptor usb_in_ot_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
- .bTerminalID = USB_IN_OT_ID,
+ /* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
- .bSourceID = IO_IN_IT_ID,
- .bCSourceID = USB_IN_CLK_ID,
+ /* .bSourceID = DYNAMIC */
+ /* .bCSourceID = DYNAMIC */
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
@@ -201,11 +200,11 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
- .bTerminalID = IO_OUT_OT_ID,
+ /* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED),
.bAssocTerminal = 0,
- .bSourceID = USB_OUT_IT_ID,
- .bCSourceID = USB_OUT_CLK_ID,
+ /* .bSourceID = DYNAMIC */
+ /* .bCSourceID = DYNAMIC */
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
@@ -253,7 +252,7 @@ static struct uac2_as_header_descriptor as_out_hdr_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
- .bTerminalLink = USB_OUT_IT_ID,
+ /* .bTerminalLink = DYNAMIC */
.bmControls = 0,
.bFormatType = UAC_FORMAT_TYPE_I,
.bmFormats = cpu_to_le32(UAC_FORMAT_TYPE_I_PCM),
@@ -330,7 +329,7 @@ static struct uac2_as_header_descriptor as_in_hdr_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
- .bTerminalLink = USB_IN_OT_ID,
+ /* .bTerminalLink = DYNAMIC */
.bmControls = 0,
.bFormatType = UAC_FORMAT_TYPE_I,
.bmFormats = cpu_to_le32(UAC_FORMAT_TYPE_I_PCM),
@@ -471,6 +470,125 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
le16_to_cpu(ep_desc->wMaxPacketSize)));
}
+/* Use macro to overcome line length limitation */
+#define USBDHDR(p) (struct usb_descriptor_header *)(p)
+
+static void setup_descriptor(struct f_uac2_opts *opts)
+{
+ /* patch descriptors */
+ int i = 1; /* ID's start with 1 */
+
+ if (EPOUT_EN(opts))
+ usb_out_it_desc.bTerminalID = i++;
+ if (EPIN_EN(opts))
+ io_in_it_desc.bTerminalID = i++;
+ if (EPOUT_EN(opts))
+ io_out_ot_desc.bTerminalID = i++;
+ if (EPIN_EN(opts))
+ usb_in_ot_desc.bTerminalID = i++;
+ if (EPOUT_EN(opts))
+ out_clk_src_desc.bClockID = i++;
+ if (EPIN_EN(opts))
+ in_clk_src_desc.bClockID = i++;
+
+ usb_out_it_desc.bCSourceID = out_clk_src_desc.bClockID;
+ usb_in_ot_desc.bSourceID = io_in_it_desc.bTerminalID;
+ usb_in_ot_desc.bCSourceID = in_clk_src_desc.bClockID;
+ io_in_it_desc.bCSourceID = in_clk_src_desc.bClockID;
+ io_out_ot_desc.bCSourceID = out_clk_src_desc.bClockID;
+ io_out_ot_desc.bSourceID = usb_out_it_desc.bTerminalID;
+ as_out_hdr_desc.bTerminalLink = usb_out_it_desc.bTerminalID;
+ as_in_hdr_desc.bTerminalLink = usb_in_ot_desc.bTerminalID;
+
+ iad_desc.bInterfaceCount = 1;
+ ac_hdr_desc.wTotalLength = 0;
+
+ if (EPIN_EN(opts)) {
+ u16 len = le16_to_cpu(ac_hdr_desc.wTotalLength);
+
+ len += sizeof(in_clk_src_desc);
+ len += sizeof(usb_in_ot_desc);
+ len += sizeof(io_in_it_desc);
+ ac_hdr_desc.wTotalLength = cpu_to_le16(len);
+ iad_desc.bInterfaceCount++;
+ }
+ if (EPOUT_EN(opts)) {
+ u16 len = le16_to_cpu(ac_hdr_desc.wTotalLength);
+
+ len += sizeof(out_clk_src_desc);
+ len += sizeof(usb_out_it_desc);
+ len += sizeof(io_out_ot_desc);
+ ac_hdr_desc.wTotalLength = cpu_to_le16(len);
+ iad_desc.bInterfaceCount++;
+ }
+
+ i = 0;
+ fs_audio_desc[i++] = USBDHDR(&iad_desc);
+ fs_audio_desc[i++] = USBDHDR(&std_ac_if_desc);
+ fs_audio_desc[i++] = USBDHDR(&ac_hdr_desc);
+ if (EPIN_EN(opts))
+ fs_audio_desc[i++] = USBDHDR(&in_clk_src_desc);
+ if (EPOUT_EN(opts)) {
+ fs_audio_desc[i++] = USBDHDR(&out_clk_src_desc);
+ fs_audio_desc[i++] = USBDHDR(&usb_out_it_desc);
+ }
+ if (EPIN_EN(opts)) {
+ fs_audio_desc[i++] = USBDHDR(&io_in_it_desc);
+ fs_audio_desc[i++] = USBDHDR(&usb_in_ot_desc);
+ }
+ if (EPOUT_EN(opts)) {
+ fs_audio_desc[i++] = USBDHDR(&io_out_ot_desc);
+ fs_audio_desc[i++] = USBDHDR(&std_as_out_if0_desc);
+ fs_audio_desc[i++] = USBDHDR(&std_as_out_if1_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_out_hdr_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_out_fmt1_desc);
+ fs_audio_desc[i++] = USBDHDR(&fs_epout_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_iso_out_desc);
+ }
+ if (EPIN_EN(opts)) {
+ fs_audio_desc[i++] = USBDHDR(&std_as_in_if0_desc);
+ fs_audio_desc[i++] = USBDHDR(&std_as_in_if1_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_in_hdr_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_in_fmt1_desc);
+ fs_audio_desc[i++] = USBDHDR(&fs_epin_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_iso_in_desc);
+ }
+ fs_audio_desc[i] = NULL;
+
+ i = 0;
+ hs_audio_desc[i++] = USBDHDR(&iad_desc);
+ hs_audio_desc[i++] = USBDHDR(&std_ac_if_desc);
+ hs_audio_desc[i++] = USBDHDR(&ac_hdr_desc);
+ if (EPIN_EN(opts))
+ hs_audio_desc[i++] = USBDHDR(&in_clk_src_desc);
+ if (EPOUT_EN(opts)) {
+ hs_audio_desc[i++] = USBDHDR(&out_clk_src_desc);
+ hs_audio_desc[i++] = USBDHDR(&usb_out_it_desc);
+ }
+ if (EPIN_EN(opts)) {
+ hs_audio_desc[i++] = USBDHDR(&io_in_it_desc);
+ hs_audio_desc[i++] = USBDHDR(&usb_in_ot_desc);
+ }
+ if (EPOUT_EN(opts)) {
+ hs_audio_desc[i++] = USBDHDR(&io_out_ot_desc);
+ hs_audio_desc[i++] = USBDHDR(&std_as_out_if0_desc);
+ hs_audio_desc[i++] = USBDHDR(&std_as_out_if1_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_out_hdr_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_out_fmt1_desc);
+ hs_audio_desc[i++] = USBDHDR(&hs_epout_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_iso_out_desc);
+ }
+ if (EPIN_EN(opts)) {
+ hs_audio_desc[i++] = USBDHDR(&std_as_in_if0_desc);
+ hs_audio_desc[i++] = USBDHDR(&std_as_in_if1_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_in_hdr_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_in_fmt1_desc);
+ hs_audio_desc[i++] = USBDHDR(&hs_epin_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_iso_in_desc);
+ }
+ hs_audio_desc[i] = NULL;
+}
+
static int
afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
{
@@ -530,25 +648,29 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
uac2->ac_intf = ret;
uac2->ac_alt = 0;
- ret = usb_interface_id(cfg, fn);
- if (ret < 0) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return ret;
+ if (EPOUT_EN(uac2_opts)) {
+ ret = usb_interface_id(cfg, fn);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+ std_as_out_if0_desc.bInterfaceNumber = ret;
+ std_as_out_if1_desc.bInterfaceNumber = ret;
+ uac2->as_out_intf = ret;
+ uac2->as_out_alt = 0;
}
- std_as_out_if0_desc.bInterfaceNumber = ret;
- std_as_out_if1_desc.bInterfaceNumber = ret;
- uac2->as_out_intf = ret;
- uac2->as_out_alt = 0;
- ret = usb_interface_id(cfg, fn);
- if (ret < 0) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return ret;
+ if (EPIN_EN(uac2_opts)) {
+ ret = usb_interface_id(cfg, fn);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+ std_as_in_if0_desc.bInterfaceNumber = ret;
+ std_as_in_if1_desc.bInterfaceNumber = ret;
+ uac2->as_in_intf = ret;
+ uac2->as_in_alt = 0;
}
- std_as_in_if0_desc.bInterfaceNumber = ret;
- std_as_in_if1_desc.bInterfaceNumber = ret;
- uac2->as_in_intf = ret;
- uac2->as_in_alt = 0;
/* Calculate wMaxPacketSize according to audio bandwidth */
set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
@@ -556,16 +678,20 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
- agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
- if (!agdev->out_ep) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return -ENODEV;
+ if (EPOUT_EN(uac2_opts)) {
+ agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
+ if (!agdev->out_ep) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -ENODEV;
+ }
}
- agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
- if (!agdev->in_ep) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return -ENODEV;
+ if (EPIN_EN(uac2_opts)) {
+ agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
+ if (!agdev->in_ep) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -ENODEV;
+ }
}
agdev->in_ep_maxpsize = max_t(u16,
@@ -578,6 +704,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
+ setup_descriptor(uac2_opts);
+
ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL,
NULL);
if (ret)
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index d8ce7868fe22..8c99392df593 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -197,12 +197,6 @@ static const struct usb_descriptor_header * const uvc_ss_streaming[] = {
NULL,
};
-void uvc_set_trace_param(unsigned int trace)
-{
- uvc_gadget_trace_param = trace;
-}
-EXPORT_SYMBOL(uvc_set_trace_param);
-
/* --------------------------------------------------------------------------
* Control requests
*/
@@ -232,13 +226,8 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
struct v4l2_event v4l2_event;
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
- /* printk(KERN_INFO "setup request %02x %02x value %04x index %04x %04x\n",
- * ctrl->bRequestType, ctrl->bRequest, le16_to_cpu(ctrl->wValue),
- * le16_to_cpu(ctrl->wIndex), le16_to_cpu(ctrl->wLength));
- */
-
if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) {
- INFO(f->config->cdev, "invalid request type\n");
+ uvcg_info(f, "invalid request type\n");
return -EINVAL;
}
@@ -272,7 +261,7 @@ uvc_function_get_alt(struct usb_function *f, unsigned interface)
{
struct uvc_device *uvc = to_uvc(f);
- INFO(f->config->cdev, "uvc_function_get_alt(%u)\n", interface);
+ uvcg_info(f, "%s(%u)\n", __func__, interface);
if (interface == uvc->control_intf)
return 0;
@@ -291,13 +280,13 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
int ret;
- INFO(cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt);
+ uvcg_info(f, "%s(%u, %u)\n", __func__, interface, alt);
if (interface == uvc->control_intf) {
if (alt)
return -EINVAL;
- INFO(cdev, "reset UVC Control\n");
+ uvcg_info(f, "reset UVC Control\n");
usb_ep_disable(uvc->control_ep);
if (!uvc->control_ep->desc)
@@ -348,7 +337,7 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
if (!uvc->video.ep)
return -EINVAL;
- INFO(cdev, "reset UVC\n");
+ uvcg_info(f, "reset UVC\n");
usb_ep_disable(uvc->video.ep);
ret = config_ep_by_speed(f->config->cdev->gadget,
@@ -373,7 +362,7 @@ uvc_function_disable(struct usb_function *f)
struct uvc_device *uvc = to_uvc(f);
struct v4l2_event v4l2_event;
- INFO(f->config->cdev, "uvc_function_disable\n");
+ uvcg_info(f, "%s()\n", __func__);
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_DISCONNECT;
@@ -392,21 +381,19 @@ uvc_function_disable(struct usb_function *f)
void
uvc_function_connect(struct uvc_device *uvc)
{
- struct usb_composite_dev *cdev = uvc->func.config->cdev;
int ret;
if ((ret = usb_function_activate(&uvc->func)) < 0)
- INFO(cdev, "UVC connect failed with %d\n", ret);
+ uvcg_info(&uvc->func, "UVC connect failed with %d\n", ret);
}
void
uvc_function_disconnect(struct uvc_device *uvc)
{
- struct usb_composite_dev *cdev = uvc->func.config->cdev;
int ret;
if ((ret = usb_function_deactivate(&uvc->func)) < 0)
- INFO(cdev, "UVC disconnect failed with %d\n", ret);
+ uvcg_info(&uvc->func, "UVC disconnect failed with %d\n", ret);
}
/* --------------------------------------------------------------------------
@@ -605,7 +592,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
struct f_uvc_opts *opts;
int ret = -EINVAL;
- INFO(cdev, "uvc_function_bind\n");
+ uvcg_info(f, "%s()\n", __func__);
opts = fi_to_f_uvc_opts(f->fi);
/* Sanity check the streaming endpoint module parameters.
@@ -618,8 +605,8 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
if (opts->streaming_maxburst &&
(opts->streaming_maxpacket % 1024) != 0) {
opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
- INFO(cdev, "overriding streaming_maxpacket to %d\n",
- opts->streaming_maxpacket);
+ uvcg_info(f, "overriding streaming_maxpacket to %d\n",
+ opts->streaming_maxpacket);
}
/* Fill in the FS/HS/SS Video Streaming specific descriptors from the
@@ -658,7 +645,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
/* Allocate endpoints. */
ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
if (!ep) {
- INFO(cdev, "Unable to allocate control EP\n");
+ uvcg_info(f, "Unable to allocate control EP\n");
goto error;
}
uvc->control_ep = ep;
@@ -672,7 +659,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
if (!ep) {
- INFO(cdev, "Unable to allocate streaming EP\n");
+ uvcg_info(f, "Unable to allocate streaming EP\n");
goto error;
}
uvc->video.ep = ep;
@@ -699,12 +686,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc_iad.bFirstInterface = ret;
uvc_control_intf.bInterfaceNumber = ret;
uvc->control_intf = ret;
+ opts->control_interface = ret;
if ((ret = usb_interface_id(c, f)) < 0)
goto error;
uvc_streaming_intf_alt0.bInterfaceNumber = ret;
uvc_streaming_intf_alt1.bInterfaceNumber = ret;
uvc->streaming_intf = ret;
+ opts->streaming_interface = ret;
/* Copy descriptors */
f->fs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL);
@@ -743,19 +732,19 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc->control_req->context = uvc;
if (v4l2_device_register(&cdev->gadget->dev, &uvc->v4l2_dev)) {
- printk(KERN_INFO "v4l2_device_register failed\n");
+ uvcg_err(f, "failed to register V4L2 device\n");
goto error;
}
/* Initialise video. */
- ret = uvcg_video_init(&uvc->video);
+ ret = uvcg_video_init(&uvc->video, uvc);
if (ret < 0)
goto error;
/* Register a V4L2 device. */
ret = uvc_register_video(uvc);
if (ret < 0) {
- printk(KERN_INFO "Unable to register video device\n");
+ uvcg_err(f, "failed to register video device\n");
goto error;
}
@@ -792,6 +781,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
struct uvc_output_terminal_descriptor *od;
struct uvc_color_matching_descriptor *md;
struct uvc_descriptor_header **ctl_cls;
+ int ret;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
@@ -868,7 +858,12 @@ static struct usb_function_instance *uvc_alloc_inst(void)
opts->streaming_interval = 1;
opts->streaming_maxpacket = 1024;
- uvcg_attach_configfs(opts);
+ ret = uvcg_attach_configfs(opts);
+ if (ret < 0) {
+ kfree(opts);
+ return ERR_PTR(ret);
+ }
+
return &opts->func_inst;
}
@@ -886,7 +881,7 @@ static void uvc_unbind(struct usb_configuration *c, struct usb_function *f)
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
- INFO(cdev, "%s\n", __func__);
+ uvcg_info(f, "%s\n", __func__);
device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
video_unregister_device(&uvc->vdev);
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 2ed292e94fbc..5242d489e20a 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -25,6 +25,9 @@ struct f_uvc_opts {
unsigned int streaming_maxpacket;
unsigned int streaming_maxburst;
+ unsigned int control_interface;
+ unsigned int streaming_interface;
+
/*
* Control descriptors array pointers for full-/high-speed and
* super-speed. They point by default to the uvc_fs_control_cls and
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 93cf78b420fe..099d650082e5 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -24,6 +24,7 @@
struct usb_ep;
struct usb_request;
struct uvc_descriptor_header;
+struct uvc_device;
/* ------------------------------------------------------------------------
* Debugging, printing and logging
@@ -51,14 +52,12 @@ extern unsigned int uvc_gadget_trace_param;
printk(KERN_DEBUG "uvcvideo: " msg); \
} while (0)
-#define uvc_warn_once(dev, warn, msg...) \
- do { \
- if (!test_and_set_bit(warn, &dev->warnings)) \
- printk(KERN_INFO "uvcvideo: " msg); \
- } while (0)
-
-#define uvc_printk(level, msg...) \
- printk(level "uvcvideo: " msg)
+#define uvcg_dbg(f, fmt, args...) \
+ dev_dbg(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
+#define uvcg_info(f, fmt, args...) \
+ dev_info(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
+#define uvcg_err(f, fmt, args...) \
+ dev_err(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
/* ------------------------------------------------------------------------
* Driver specific constants
@@ -73,6 +72,7 @@ extern unsigned int uvc_gadget_trace_param;
*/
struct uvc_video {
+ struct uvc_device *uvc;
struct usb_ep *ep;
/* Frame parameters */
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index b51f0d278826..bc1e2af566c3 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -9,9 +9,16 @@
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
*/
+
+#include <linux/sort.h>
+
#include "u_uvc.h"
#include "uvc_configfs.h"
+/* -----------------------------------------------------------------------------
+ * Global Utility Structures and Macros
+ */
+
#define UVCG_STREAMING_CONTROL_SIZE 1
#define UVC_ATTR(prefix, cname, aname) \
@@ -31,13 +38,93 @@ static struct configfs_attribute prefix##attr_##cname = { \
.show = prefix##cname##_show, \
}
+#define le8_to_cpu(x) (x)
+#define cpu_to_le8(x) (x)
+
+static int uvcg_config_compare_u32(const void *l, const void *r)
+{
+ u32 li = *(const u32 *)l;
+ u32 ri = *(const u32 *)r;
+
+ return li < ri ? -1 : li == ri ? 0 : 1;
+}
+
static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_uvc_opts,
func_inst.group);
}
-/* control/header/<NAME> */
+struct uvcg_config_group_type {
+ struct config_item_type type;
+ const char *name;
+ const struct uvcg_config_group_type **children;
+ int (*create_children)(struct config_group *group);
+};
+
+static void uvcg_config_item_release(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ kfree(group);
+}
+
+static struct configfs_item_operations uvcg_config_item_ops = {
+ .release = uvcg_config_item_release,
+};
+
+static int uvcg_config_create_group(struct config_group *parent,
+ const struct uvcg_config_group_type *type);
+
+static int uvcg_config_create_children(struct config_group *group,
+ const struct uvcg_config_group_type *type)
+{
+ const struct uvcg_config_group_type **child;
+ int ret;
+
+ if (type->create_children)
+ return type->create_children(group);
+
+ for (child = type->children; child && *child; ++child) {
+ ret = uvcg_config_create_group(group, *child);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int uvcg_config_create_group(struct config_group *parent,
+ const struct uvcg_config_group_type *type)
+{
+ struct config_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
+
+ config_group_init_type_name(group, type->name, &type->type);
+ configfs_add_default_group(group, parent);
+
+ return uvcg_config_create_children(group, type);
+}
+
+static void uvcg_config_remove_children(struct config_group *group)
+{
+ struct config_group *child, *n;
+
+ list_for_each_entry_safe(child, n, &group->default_groups, group_entry) {
+ list_del(&child->group_entry);
+ uvcg_config_remove_children(child);
+ config_item_put(&child->cg_item);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * control/header/<NAME>
+ * control/header
+ */
+
DECLARE_UVC_HEADER_DESCRIPTOR(1);
struct uvcg_control_header {
@@ -51,9 +138,9 @@ static struct uvcg_control_header *to_uvcg_control_header(struct config_item *it
return container_of(item, struct uvcg_control_header, item);
}
-#define UVCG_CTRL_HDR_ATTR(cname, aname, conv, str2u, uxx, vnoc, limit) \
+#define UVCG_CTRL_HDR_ATTR(cname, aname, bits, limit) \
static ssize_t uvcg_control_header_##cname##_show( \
- struct config_item *item, char *page) \
+ struct config_item *item, char *page) \
{ \
struct uvcg_control_header *ch = to_uvcg_control_header(item); \
struct f_uvc_opts *opts; \
@@ -67,7 +154,7 @@ static ssize_t uvcg_control_header_##cname##_show( \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(ch->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(ch->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -83,7 +170,7 @@ uvcg_control_header_##cname##_store(struct config_item *item, \
struct config_item *opts_item; \
struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;\
int ret; \
- uxx num; \
+ u##bits num; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
@@ -96,7 +183,7 @@ uvcg_control_header_##cname##_store(struct config_item *item, \
goto end; \
} \
\
- ret = str2u(page, 0, &num); \
+ ret = kstrtou##bits(page, 0, &num); \
if (ret) \
goto end; \
\
@@ -104,7 +191,7 @@ uvcg_control_header_##cname##_store(struct config_item *item, \
ret = -EINVAL; \
goto end; \
} \
- ch->desc.aname = vnoc(num); \
+ ch->desc.aname = cpu_to_le##bits(num); \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
@@ -114,11 +201,9 @@ end: \
\
UVC_ATTR(uvcg_control_header_, cname, aname)
-UVCG_CTRL_HDR_ATTR(bcd_uvc, bcdUVC, le16_to_cpu, kstrtou16, u16, cpu_to_le16,
- 0xffff);
+UVCG_CTRL_HDR_ATTR(bcd_uvc, bcdUVC, 16, 0xffff);
-UVCG_CTRL_HDR_ATTR(dw_clock_frequency, dwClockFrequency, le32_to_cpu, kstrtou32,
- u32, cpu_to_le32, 0x7fffffff);
+UVCG_CTRL_HDR_ATTR(dw_clock_frequency, dwClockFrequency, 32, 0x7fffffff);
#undef UVCG_CTRL_HDR_ATTR
@@ -129,6 +214,7 @@ static struct configfs_attribute *uvcg_control_header_attrs[] = {
};
static const struct config_item_type uvcg_control_header_type = {
+ .ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_control_header_attrs,
.ct_owner = THIS_MODULE,
};
@@ -153,60 +239,42 @@ static struct config_item *uvcg_control_header_make(struct config_group *group,
return &h->item;
}
-static void uvcg_control_header_drop(struct config_group *group,
- struct config_item *item)
-{
- struct uvcg_control_header *h = to_uvcg_control_header(item);
-
- kfree(h);
-}
-
-/* control/header */
-static struct uvcg_control_header_grp {
- struct config_group group;
-} uvcg_control_header_grp;
-
static struct configfs_group_operations uvcg_control_header_grp_ops = {
.make_item = uvcg_control_header_make,
- .drop_item = uvcg_control_header_drop,
};
-static const struct config_item_type uvcg_control_header_grp_type = {
- .ct_group_ops = &uvcg_control_header_grp_ops,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_control_header_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_control_header_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "header",
};
-/* control/processing/default */
-static struct uvcg_default_processing {
- struct config_group group;
-} uvcg_default_processing;
-
-static inline struct uvcg_default_processing
-*to_uvcg_default_processing(struct config_item *item)
-{
- return container_of(to_config_group(item),
- struct uvcg_default_processing, group);
-}
+/* -----------------------------------------------------------------------------
+ * control/processing/default
+ */
-#define UVCG_DEFAULT_PROCESSING_ATTR(cname, aname, conv) \
+#define UVCG_DEFAULT_PROCESSING_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_processing_##cname##_show( \
struct config_item *item, char *page) \
{ \
- struct uvcg_default_processing *dp = to_uvcg_default_processing(item); \
+ struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
- struct mutex *su_mutex = &dp->group.cg_subsys->su_mutex; \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_processing_unit_descriptor *pd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
- opts_item = dp->group.cg_item.ci_parent->ci_parent->ci_parent; \
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
pd = &opts->uvc_processing; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(pd->aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(pd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -215,37 +283,33 @@ static ssize_t uvcg_default_processing_##cname##_show( \
\
UVC_ATTR_RO(uvcg_default_processing_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_DEFAULT_PROCESSING_ATTR(b_unit_id, bUnitID, identity_conv);
-UVCG_DEFAULT_PROCESSING_ATTR(b_source_id, bSourceID, identity_conv);
-UVCG_DEFAULT_PROCESSING_ATTR(w_max_multiplier, wMaxMultiplier, le16_to_cpu);
-UVCG_DEFAULT_PROCESSING_ATTR(i_processing, iProcessing, identity_conv);
-
-#undef identity_conv
+UVCG_DEFAULT_PROCESSING_ATTR(b_unit_id, bUnitID, 8);
+UVCG_DEFAULT_PROCESSING_ATTR(b_source_id, bSourceID, 8);
+UVCG_DEFAULT_PROCESSING_ATTR(w_max_multiplier, wMaxMultiplier, 16);
+UVCG_DEFAULT_PROCESSING_ATTR(i_processing, iProcessing, 8);
#undef UVCG_DEFAULT_PROCESSING_ATTR
static ssize_t uvcg_default_processing_bm_controls_show(
struct config_item *item, char *page)
{
- struct uvcg_default_processing *dp = to_uvcg_default_processing(item);
+ struct config_group *group = to_config_group(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
- struct mutex *su_mutex = &dp->group.cg_subsys->su_mutex;
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvc_processing_unit_descriptor *pd;
int result, i;
char *pg = page;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
- opts_item = dp->group.cg_item.ci_parent->ci_parent->ci_parent;
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
pd = &opts->uvc_processing;
mutex_lock(&opts->lock);
for (result = 0, i = 0; i < pd->bControlSize; ++i) {
- result += sprintf(pg, "%d\n", pd->bmControls[i]);
+ result += sprintf(pg, "%u\n", pd->bmControls[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
@@ -266,54 +330,55 @@ static struct configfs_attribute *uvcg_default_processing_attrs[] = {
NULL,
};
-static const struct config_item_type uvcg_default_processing_type = {
- .ct_attrs = uvcg_default_processing_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_default_processing_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_processing_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "default",
};
-/* struct uvcg_processing {}; */
-
-/* control/processing */
-static struct uvcg_processing_grp {
- struct config_group group;
-} uvcg_processing_grp;
+/* -----------------------------------------------------------------------------
+ * control/processing
+ */
-static const struct config_item_type uvcg_processing_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_processing_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "processing",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_default_processing_type,
+ NULL,
+ },
};
-/* control/terminal/camera/default */
-static struct uvcg_default_camera {
- struct config_group group;
-} uvcg_default_camera;
-
-static inline struct uvcg_default_camera
-*to_uvcg_default_camera(struct config_item *item)
-{
- return container_of(to_config_group(item),
- struct uvcg_default_camera, group);
-}
+/* -----------------------------------------------------------------------------
+ * control/terminal/camera/default
+ */
-#define UVCG_DEFAULT_CAMERA_ATTR(cname, aname, conv) \
+#define UVCG_DEFAULT_CAMERA_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_camera_##cname##_show( \
struct config_item *item, char *page) \
{ \
- struct uvcg_default_camera *dc = to_uvcg_default_camera(item); \
+ struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
- struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_camera_terminal_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
- opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent-> \
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent-> \
ci_parent; \
opts = to_f_uvc_opts(opts_item); \
cd = &opts->uvc_camera_terminal; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(cd->aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -323,44 +388,40 @@ static ssize_t uvcg_default_camera_##cname##_show( \
\
UVC_ATTR_RO(uvcg_default_camera_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_DEFAULT_CAMERA_ATTR(b_terminal_id, bTerminalID, identity_conv);
-UVCG_DEFAULT_CAMERA_ATTR(w_terminal_type, wTerminalType, le16_to_cpu);
-UVCG_DEFAULT_CAMERA_ATTR(b_assoc_terminal, bAssocTerminal, identity_conv);
-UVCG_DEFAULT_CAMERA_ATTR(i_terminal, iTerminal, identity_conv);
+UVCG_DEFAULT_CAMERA_ATTR(b_terminal_id, bTerminalID, 8);
+UVCG_DEFAULT_CAMERA_ATTR(w_terminal_type, wTerminalType, 16);
+UVCG_DEFAULT_CAMERA_ATTR(b_assoc_terminal, bAssocTerminal, 8);
+UVCG_DEFAULT_CAMERA_ATTR(i_terminal, iTerminal, 8);
UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_min, wObjectiveFocalLengthMin,
- le16_to_cpu);
+ 16);
UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_max, wObjectiveFocalLengthMax,
- le16_to_cpu);
+ 16);
UVCG_DEFAULT_CAMERA_ATTR(w_ocular_focal_length, wOcularFocalLength,
- le16_to_cpu);
-
-#undef identity_conv
+ 16);
#undef UVCG_DEFAULT_CAMERA_ATTR
static ssize_t uvcg_default_camera_bm_controls_show(
struct config_item *item, char *page)
{
- struct uvcg_default_camera *dc = to_uvcg_default_camera(item);
+ struct config_group *group = to_config_group(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
- struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex;
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvc_camera_terminal_descriptor *cd;
int result, i;
char *pg = page;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
- opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent->
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent->
ci_parent;
opts = to_f_uvc_opts(opts_item);
cd = &opts->uvc_camera_terminal;
mutex_lock(&opts->lock);
for (result = 0, i = 0; i < cd->bControlSize; ++i) {
- result += sprintf(pg, "%d\n", cd->bmControls[i]);
+ result += sprintf(pg, "%u\n", cd->bmControls[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
@@ -383,54 +444,55 @@ static struct configfs_attribute *uvcg_default_camera_attrs[] = {
NULL,
};
-static const struct config_item_type uvcg_default_camera_type = {
- .ct_attrs = uvcg_default_camera_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_default_camera_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_camera_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "default",
};
-/* struct uvcg_camera {}; */
-
-/* control/terminal/camera */
-static struct uvcg_camera_grp {
- struct config_group group;
-} uvcg_camera_grp;
+/* -----------------------------------------------------------------------------
+ * control/terminal/camera
+ */
-static const struct config_item_type uvcg_camera_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_camera_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "camera",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_default_camera_type,
+ NULL,
+ },
};
-/* control/terminal/output/default */
-static struct uvcg_default_output {
- struct config_group group;
-} uvcg_default_output;
-
-static inline struct uvcg_default_output
-*to_uvcg_default_output(struct config_item *item)
-{
- return container_of(to_config_group(item),
- struct uvcg_default_output, group);
-}
+/* -----------------------------------------------------------------------------
+ * control/terminal/output/default
+ */
-#define UVCG_DEFAULT_OUTPUT_ATTR(cname, aname, conv) \
+#define UVCG_DEFAULT_OUTPUT_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_output_##cname##_show( \
- struct config_item *item, char *page) \
+ struct config_item *item, char *page) \
{ \
- struct uvcg_default_output *dout = to_uvcg_default_output(item); \
+ struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
- struct mutex *su_mutex = &dout->group.cg_subsys->su_mutex; \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_output_terminal_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
- opts_item = dout->group.cg_item.ci_parent->ci_parent-> \
+ opts_item = group->cg_item.ci_parent->ci_parent-> \
ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
cd = &opts->uvc_output_terminal; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(cd->aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -440,15 +502,11 @@ static ssize_t uvcg_default_output_##cname##_show( \
\
UVC_ATTR_RO(uvcg_default_output_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, identity_conv);
-UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, le16_to_cpu);
-UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, identity_conv);
-UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, identity_conv);
-UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, identity_conv);
-
-#undef identity_conv
+UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, 8);
+UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, 16);
+UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, 8);
+UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, 8);
+UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, 8);
#undef UVCG_DEFAULT_OUTPUT_ATTR
@@ -461,47 +519,68 @@ static struct configfs_attribute *uvcg_default_output_attrs[] = {
NULL,
};
-static const struct config_item_type uvcg_default_output_type = {
- .ct_attrs = uvcg_default_output_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_default_output_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_output_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "default",
};
-/* struct uvcg_output {}; */
-
-/* control/terminal/output */
-static struct uvcg_output_grp {
- struct config_group group;
-} uvcg_output_grp;
+/* -----------------------------------------------------------------------------
+ * control/terminal/output
+ */
-static const struct config_item_type uvcg_output_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_output_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "output",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_default_output_type,
+ NULL,
+ },
};
-/* control/terminal */
-static struct uvcg_terminal_grp {
- struct config_group group;
-} uvcg_terminal_grp;
+/* -----------------------------------------------------------------------------
+ * control/terminal
+ */
-static const struct config_item_type uvcg_terminal_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_terminal_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "terminal",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_camera_grp_type,
+ &uvcg_output_grp_type,
+ NULL,
+ },
};
-/* control/class/{fs} */
-static struct uvcg_control_class {
- struct config_group group;
-} uvcg_control_class_fs, uvcg_control_class_ss;
+/* -----------------------------------------------------------------------------
+ * control/class/{fs|ss}
+ */
+struct uvcg_control_class_group {
+ struct config_group group;
+ const char *name;
+};
static inline struct uvc_descriptor_header
**uvcg_get_ctl_class_arr(struct config_item *i, struct f_uvc_opts *o)
{
- struct uvcg_control_class *cl = container_of(to_config_group(i),
- struct uvcg_control_class, group);
+ struct uvcg_control_class_group *group =
+ container_of(i, struct uvcg_control_class_group,
+ group.cg_item);
- if (cl == &uvcg_control_class_fs)
+ if (!strcmp(group->name, "fs"))
return o->uvc_fs_control_cls;
- if (cl == &uvcg_control_class_ss)
+ if (!strcmp(group->name, "ss"))
return o->uvc_ss_control_cls;
return NULL;
@@ -544,6 +623,7 @@ static int uvcg_control_class_allow_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
return ret;
}
@@ -579,10 +659,12 @@ static void uvcg_control_class_drop_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_control_class_item_ops = {
+ .release = uvcg_config_item_release,
.allow_link = uvcg_control_class_allow_link,
.drop_link = uvcg_control_class_drop_link,
};
@@ -592,37 +674,99 @@ static const struct config_item_type uvcg_control_class_type = {
.ct_owner = THIS_MODULE,
};
-/* control/class */
-static struct uvcg_control_class_grp {
- struct config_group group;
-} uvcg_control_class_grp;
+/* -----------------------------------------------------------------------------
+ * control/class
+ */
+
+static int uvcg_control_class_create_children(struct config_group *parent)
+{
+ static const char * const names[] = { "fs", "ss" };
+ unsigned int i;
-static const struct config_item_type uvcg_control_class_grp_type = {
- .ct_owner = THIS_MODULE,
+ for (i = 0; i < ARRAY_SIZE(names); ++i) {
+ struct uvcg_control_class_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
+
+ group->name = names[i];
+
+ config_group_init_type_name(&group->group, group->name,
+ &uvcg_control_class_type);
+ configfs_add_default_group(&group->group, parent);
+ }
+
+ return 0;
+}
+
+static const struct uvcg_config_group_type uvcg_control_class_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "class",
+ .create_children = uvcg_control_class_create_children,
};
-/* control */
-static struct uvcg_control_grp {
- struct config_group group;
-} uvcg_control_grp;
+/* -----------------------------------------------------------------------------
+ * control
+ */
+
+static ssize_t uvcg_default_control_b_interface_number_show(
+ struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ int result = 0;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = item->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ result += sprintf(page, "%u\n", opts->control_interface);
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return result;
+}
+
+UVC_ATTR_RO(uvcg_default_control_, b_interface_number, bInterfaceNumber);
-static const struct config_item_type uvcg_control_grp_type = {
- .ct_owner = THIS_MODULE,
+static struct configfs_attribute *uvcg_default_control_attrs[] = {
+ &uvcg_default_control_attr_b_interface_number,
+ NULL,
};
-/* streaming/uncompressed */
-static struct uvcg_uncompressed_grp {
- struct config_group group;
-} uvcg_uncompressed_grp;
+static const struct uvcg_config_group_type uvcg_control_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_control_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "control",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_control_header_grp_type,
+ &uvcg_processing_grp_type,
+ &uvcg_terminal_grp_type,
+ &uvcg_control_class_grp_type,
+ NULL,
+ },
+};
-/* streaming/mjpeg */
-static struct uvcg_mjpeg_grp {
- struct config_group group;
-} uvcg_mjpeg_grp;
+/* -----------------------------------------------------------------------------
+ * streaming/uncompressed
+ * streaming/mjpeg
+ */
-static struct config_item *fmt_parent[] = {
- &uvcg_uncompressed_grp.group.cg_item,
- &uvcg_mjpeg_grp.group.cg_item,
+static const char * const uvcg_format_names[] = {
+ "uncompressed",
+ "mjpeg",
};
enum uvcg_format_type {
@@ -706,7 +850,11 @@ struct uvcg_format_ptr {
struct list_head entry;
};
-/* streaming/header/<NAME> */
+/* -----------------------------------------------------------------------------
+ * streaming/header/<NAME>
+ * streaming/header
+ */
+
struct uvcg_streaming_header {
struct config_item item;
struct uvc_input_header_descriptor desc;
@@ -720,6 +868,8 @@ static struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item
return container_of(item, struct uvcg_streaming_header, item);
}
+static void uvcg_format_set_indices(struct config_group *fmt);
+
static int uvcg_streaming_header_allow_link(struct config_item *src,
struct config_item *target)
{
@@ -744,10 +894,22 @@ static int uvcg_streaming_header_allow_link(struct config_item *src,
goto out;
}
- for (i = 0; i < ARRAY_SIZE(fmt_parent); ++i)
- if (target->ci_parent == fmt_parent[i])
+ /*
+ * Linking is only allowed to direct children of the format nodes
+ * (streaming/uncompressed or streaming/mjpeg nodes). First check that
+ * the grand-parent of the target matches the grand-parent of the source
+ * (the streaming node), and then verify that the target parent is a
+ * format node.
+ */
+ if (src->ci_parent->ci_parent != target->ci_parent->ci_parent)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(uvcg_format_names); ++i) {
+ if (!strcmp(target->ci_parent->ci_name, uvcg_format_names[i]))
break;
- if (i == ARRAY_SIZE(fmt_parent))
+ }
+
+ if (i == ARRAY_SIZE(uvcg_format_names))
goto out;
target_fmt = container_of(to_config_group(target), struct uvcg_format,
@@ -755,6 +917,8 @@ static int uvcg_streaming_header_allow_link(struct config_item *src,
if (!target_fmt)
goto out;
+ uvcg_format_set_indices(to_config_group(target));
+
format_ptr = kzalloc(sizeof(*format_ptr), GFP_KERNEL);
if (!format_ptr) {
ret = -ENOMEM;
@@ -764,6 +928,7 @@ static int uvcg_streaming_header_allow_link(struct config_item *src,
format_ptr->fmt = target_fmt;
list_add_tail(&format_ptr->entry, &src_hdr->formats);
++src_hdr->num_fmt;
+ ++target_fmt->linked;
out:
mutex_unlock(&opts->lock);
@@ -801,19 +966,22 @@ static void uvcg_streaming_header_drop_link(struct config_item *src,
break;
}
+ --target_fmt->linked;
+
out:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_streaming_header_item_ops = {
- .allow_link = uvcg_streaming_header_allow_link,
- .drop_link = uvcg_streaming_header_drop_link,
+ .release = uvcg_config_item_release,
+ .allow_link = uvcg_streaming_header_allow_link,
+ .drop_link = uvcg_streaming_header_drop_link,
};
-#define UVCG_STREAMING_HEADER_ATTR(cname, aname, conv) \
+#define UVCG_STREAMING_HEADER_ATTR(cname, aname, bits) \
static ssize_t uvcg_streaming_header_##cname##_show( \
- struct config_item *item, char *page) \
+ struct config_item *item, char *page) \
{ \
struct uvcg_streaming_header *sh = to_uvcg_streaming_header(item); \
struct f_uvc_opts *opts; \
@@ -827,7 +995,7 @@ static ssize_t uvcg_streaming_header_##cname##_show( \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(sh->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(sh->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -836,16 +1004,11 @@ static ssize_t uvcg_streaming_header_##cname##_show( \
\
UVC_ATTR_RO(uvcg_streaming_header_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_STREAMING_HEADER_ATTR(bm_info, bmInfo, identity_conv);
-UVCG_STREAMING_HEADER_ATTR(b_terminal_link, bTerminalLink, identity_conv);
-UVCG_STREAMING_HEADER_ATTR(b_still_capture_method, bStillCaptureMethod,
- identity_conv);
-UVCG_STREAMING_HEADER_ATTR(b_trigger_support, bTriggerSupport, identity_conv);
-UVCG_STREAMING_HEADER_ATTR(b_trigger_usage, bTriggerUsage, identity_conv);
-
-#undef identity_conv
+UVCG_STREAMING_HEADER_ATTR(bm_info, bmInfo, 8);
+UVCG_STREAMING_HEADER_ATTR(b_terminal_link, bTerminalLink, 8);
+UVCG_STREAMING_HEADER_ATTR(b_still_capture_method, bStillCaptureMethod, 8);
+UVCG_STREAMING_HEADER_ATTR(b_trigger_support, bTriggerSupport, 8);
+UVCG_STREAMING_HEADER_ATTR(b_trigger_usage, bTriggerUsage, 8);
#undef UVCG_STREAMING_HEADER_ATTR
@@ -884,31 +1047,26 @@ static struct config_item
return &h->item;
}
-static void uvcg_streaming_header_drop(struct config_group *group,
- struct config_item *item)
-{
- struct uvcg_streaming_header *h = to_uvcg_streaming_header(item);
-
- kfree(h);
-}
-
-/* streaming/header */
-static struct uvcg_streaming_header_grp {
- struct config_group group;
-} uvcg_streaming_header_grp;
-
static struct configfs_group_operations uvcg_streaming_header_grp_ops = {
.make_item = uvcg_streaming_header_make,
- .drop_item = uvcg_streaming_header_drop,
};
-static const struct config_item_type uvcg_streaming_header_grp_type = {
- .ct_group_ops = &uvcg_streaming_header_grp_ops,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_streaming_header_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_streaming_header_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "header",
};
-/* streaming/<mode>/<format>/<NAME> */
+/* -----------------------------------------------------------------------------
+ * streaming/<mode>/<format>/<NAME>
+ */
+
struct uvcg_frame {
+ struct config_item item;
+ enum uvcg_format_type fmt_type;
struct {
u8 b_length;
u8 b_descriptor_type;
@@ -924,8 +1082,6 @@ struct uvcg_frame {
u8 b_frame_interval_type;
} __attribute__((packed)) frame;
u32 *dw_frame_interval;
- enum uvcg_format_type fmt_type;
- struct config_item item;
};
static struct uvcg_frame *to_uvcg_frame(struct config_item *item)
@@ -933,7 +1089,7 @@ static struct uvcg_frame *to_uvcg_frame(struct config_item *item)
return container_of(item, struct uvcg_frame, item);
}
-#define UVCG_FRAME_ATTR(cname, aname, to_cpu_endian, to_little_endian, bits) \
+#define UVCG_FRAME_ATTR(cname, aname, bits) \
static ssize_t uvcg_frame_##cname##_show(struct config_item *item, char *page)\
{ \
struct uvcg_frame *f = to_uvcg_frame(item); \
@@ -948,7 +1104,7 @@ static ssize_t uvcg_frame_##cname##_show(struct config_item *item, char *page)\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", to_cpu_endian(f->frame.cname)); \
+ result = sprintf(page, "%u\n", f->frame.cname); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -963,8 +1119,8 @@ static ssize_t uvcg_frame_##cname##_store(struct config_item *item, \
struct config_item *opts_item; \
struct uvcg_format *fmt; \
struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;\
+ typeof(f->frame.cname) num; \
int ret; \
- u##bits num; \
\
ret = kstrtou##bits(page, 0, &num); \
if (ret) \
@@ -982,7 +1138,7 @@ static ssize_t uvcg_frame_##cname##_store(struct config_item *item, \
goto end; \
} \
\
- f->frame.cname = to_little_endian(num); \
+ f->frame.cname = num; \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
@@ -992,20 +1148,48 @@ end: \
\
UVC_ATTR(uvcg_frame_, cname, aname);
-#define noop_conversion(x) (x)
+static ssize_t uvcg_frame_b_frame_index_show(struct config_item *item,
+ char *page)
+{
+ struct uvcg_frame *f = to_uvcg_frame(item);
+ struct uvcg_format *fmt;
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct config_item *fmt_item;
+ struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;
+ int result;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ fmt_item = f->item.ci_parent;
+ fmt = to_uvcg_format(fmt_item);
-UVCG_FRAME_ATTR(bm_capabilities, bmCapabilities, noop_conversion,
- noop_conversion, 8);
-UVCG_FRAME_ATTR(w_width, wWidth, le16_to_cpu, cpu_to_le16, 16);
-UVCG_FRAME_ATTR(w_height, wHeight, le16_to_cpu, cpu_to_le16, 16);
-UVCG_FRAME_ATTR(dw_min_bit_rate, dwMinBitRate, le32_to_cpu, cpu_to_le32, 32);
-UVCG_FRAME_ATTR(dw_max_bit_rate, dwMaxBitRate, le32_to_cpu, cpu_to_le32, 32);
-UVCG_FRAME_ATTR(dw_max_video_frame_buffer_size, dwMaxVideoFrameBufferSize,
- le32_to_cpu, cpu_to_le32, 32);
-UVCG_FRAME_ATTR(dw_default_frame_interval, dwDefaultFrameInterval,
- le32_to_cpu, cpu_to_le32, 32);
+ if (!fmt->linked) {
+ result = -EBUSY;
+ goto out;
+ }
-#undef noop_conversion
+ opts_item = fmt_item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%u\n", f->frame.b_frame_index);
+ mutex_unlock(&opts->lock);
+
+out:
+ mutex_unlock(su_mutex);
+ return result;
+}
+
+UVC_ATTR_RO(uvcg_frame_, b_frame_index, bFrameIndex);
+
+UVCG_FRAME_ATTR(bm_capabilities, bmCapabilities, 8);
+UVCG_FRAME_ATTR(w_width, wWidth, 16);
+UVCG_FRAME_ATTR(w_height, wHeight, 16);
+UVCG_FRAME_ATTR(dw_min_bit_rate, dwMinBitRate, 32);
+UVCG_FRAME_ATTR(dw_max_bit_rate, dwMaxBitRate, 32);
+UVCG_FRAME_ATTR(dw_max_video_frame_buffer_size, dwMaxVideoFrameBufferSize, 32);
+UVCG_FRAME_ATTR(dw_default_frame_interval, dwDefaultFrameInterval, 32);
#undef UVCG_FRAME_ATTR
@@ -1026,8 +1210,7 @@ static ssize_t uvcg_frame_dw_frame_interval_show(struct config_item *item,
mutex_lock(&opts->lock);
for (result = 0, i = 0; i < frm->frame.b_frame_interval_type; ++i) {
- result += sprintf(pg, "%d\n",
- le32_to_cpu(frm->dw_frame_interval[i]));
+ result += sprintf(pg, "%u\n", frm->dw_frame_interval[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
@@ -1052,7 +1235,7 @@ static inline int __uvcg_fill_frm_intrv(char *buf, void *priv)
return ret;
interv = priv;
- **interv = cpu_to_le32(num);
+ **interv = num;
++*interv;
return 0;
@@ -1129,6 +1312,8 @@ static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item,
kfree(ch->dw_frame_interval);
ch->dw_frame_interval = frm_intrv;
ch->frame.b_frame_interval_type = n;
+ sort(ch->dw_frame_interval, n, sizeof(*ch->dw_frame_interval),
+ uvcg_config_compare_u32, NULL);
ret = len;
end:
@@ -1140,6 +1325,7 @@ end:
UVC_ATTR(uvcg_frame_, dw_frame_interval, dwFrameInterval);
static struct configfs_attribute *uvcg_frame_attrs[] = {
+ &uvcg_frame_attr_b_frame_index,
&uvcg_frame_attr_bm_capabilities,
&uvcg_frame_attr_w_width,
&uvcg_frame_attr_w_height,
@@ -1152,6 +1338,7 @@ static struct configfs_attribute *uvcg_frame_attrs[] = {
};
static const struct config_item_type uvcg_frame_type = {
+ .ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_frame_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1170,12 +1357,12 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
h->frame.b_descriptor_type = USB_DT_CS_INTERFACE;
h->frame.b_frame_index = 1;
- h->frame.w_width = cpu_to_le16(640);
- h->frame.w_height = cpu_to_le16(360);
- h->frame.dw_min_bit_rate = cpu_to_le32(18432000);
- h->frame.dw_max_bit_rate = cpu_to_le32(55296000);
- h->frame.dw_max_video_frame_buffer_size = cpu_to_le32(460800);
- h->frame.dw_default_frame_interval = cpu_to_le32(666666);
+ h->frame.w_width = 640;
+ h->frame.w_height = 360;
+ h->frame.dw_min_bit_rate = 18432000;
+ h->frame.dw_max_bit_rate = 55296000;
+ h->frame.dw_max_video_frame_buffer_size = 460800;
+ h->frame.dw_default_frame_interval = 666666;
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
@@ -1203,7 +1390,6 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
static void uvcg_frame_drop(struct config_group *group, struct config_item *item)
{
- struct uvcg_frame *h = to_uvcg_frame(item);
struct uvcg_format *fmt;
struct f_uvc_opts *opts;
struct config_item *opts_item;
@@ -1214,11 +1400,31 @@ static void uvcg_frame_drop(struct config_group *group, struct config_item *item
mutex_lock(&opts->lock);
fmt = to_uvcg_format(&group->cg_item);
--fmt->num_frames;
- kfree(h);
mutex_unlock(&opts->lock);
+
+ config_item_put(item);
+}
+
+static void uvcg_format_set_indices(struct config_group *fmt)
+{
+ struct config_item *ci;
+ unsigned int i = 1;
+
+ list_for_each_entry(ci, &fmt->cg_children, ci_entry) {
+ struct uvcg_frame *frm;
+
+ if (ci->ci_type != &uvcg_frame_type)
+ continue;
+
+ frm = to_uvcg_frame(ci);
+ frm->frame.b_frame_index = i++;
+ }
}
-/* streaming/uncompressed/<NAME> */
+/* -----------------------------------------------------------------------------
+ * streaming/uncompressed/<NAME>
+ */
+
struct uvcg_uncompressed {
struct uvcg_format fmt;
struct uvc_format_uncompressed desc;
@@ -1290,7 +1496,7 @@ end:
UVC_ATTR(uvcg_uncompressed_, guid_format, guidFormat);
-#define UVCG_UNCOMPRESSED_ATTR_RO(cname, aname, conv) \
+#define UVCG_UNCOMPRESSED_ATTR_RO(cname, aname, bits) \
static ssize_t uvcg_uncompressed_##cname##_show( \
struct config_item *item, char *page) \
{ \
@@ -1306,7 +1512,7 @@ static ssize_t uvcg_uncompressed_##cname##_show( \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1315,7 +1521,7 @@ static ssize_t uvcg_uncompressed_##cname##_show( \
\
UVC_ATTR_RO(uvcg_uncompressed_, cname, aname);
-#define UVCG_UNCOMPRESSED_ATTR(cname, aname, conv) \
+#define UVCG_UNCOMPRESSED_ATTR(cname, aname, bits) \
static ssize_t uvcg_uncompressed_##cname##_show( \
struct config_item *item, char *page) \
{ \
@@ -1331,7 +1537,7 @@ static ssize_t uvcg_uncompressed_##cname##_show( \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1378,16 +1584,12 @@ end: \
\
UVC_ATTR(uvcg_uncompressed_, cname, aname);
-#define identity_conv(x) (x)
-
-UVCG_UNCOMPRESSED_ATTR(b_bits_per_pixel, bBitsPerPixel, identity_conv);
-UVCG_UNCOMPRESSED_ATTR(b_default_frame_index, bDefaultFrameIndex,
- identity_conv);
-UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, identity_conv);
-UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, identity_conv);
-UVCG_UNCOMPRESSED_ATTR_RO(bm_interface_flags, bmInterfaceFlags, identity_conv);
-
-#undef identity_conv
+UVCG_UNCOMPRESSED_ATTR_RO(b_format_index, bFormatIndex, 8);
+UVCG_UNCOMPRESSED_ATTR(b_bits_per_pixel, bBitsPerPixel, 8);
+UVCG_UNCOMPRESSED_ATTR(b_default_frame_index, bDefaultFrameIndex, 8);
+UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8);
+UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8);
+UVCG_UNCOMPRESSED_ATTR_RO(bm_interface_flags, bmInterfaceFlags, 8);
#undef UVCG_UNCOMPRESSED_ATTR
#undef UVCG_UNCOMPRESSED_ATTR_RO
@@ -1410,6 +1612,7 @@ uvcg_uncompressed_bma_controls_store(struct config_item *item,
UVC_ATTR(uvcg_uncompressed_, bma_controls, bmaControls);
static struct configfs_attribute *uvcg_uncompressed_attrs[] = {
+ &uvcg_uncompressed_attr_b_format_index,
&uvcg_uncompressed_attr_guid_format,
&uvcg_uncompressed_attr_b_bits_per_pixel,
&uvcg_uncompressed_attr_b_default_frame_index,
@@ -1421,6 +1624,7 @@ static struct configfs_attribute *uvcg_uncompressed_attrs[] = {
};
static const struct config_item_type uvcg_uncompressed_type = {
+ .ct_item_ops = &uvcg_config_item_ops,
.ct_group_ops = &uvcg_uncompressed_group_ops,
.ct_attrs = uvcg_uncompressed_attrs,
.ct_owner = THIS_MODULE,
@@ -1457,25 +1661,23 @@ static struct config_group *uvcg_uncompressed_make(struct config_group *group,
return &h->fmt.group;
}
-static void uvcg_uncompressed_drop(struct config_group *group,
- struct config_item *item)
-{
- struct uvcg_uncompressed *h = to_uvcg_uncompressed(item);
-
- kfree(h);
-}
-
static struct configfs_group_operations uvcg_uncompressed_grp_ops = {
.make_group = uvcg_uncompressed_make,
- .drop_item = uvcg_uncompressed_drop,
};
-static const struct config_item_type uvcg_uncompressed_grp_type = {
- .ct_group_ops = &uvcg_uncompressed_grp_ops,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_uncompressed_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_uncompressed_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "uncompressed",
};
-/* streaming/mjpeg/<NAME> */
+/* -----------------------------------------------------------------------------
+ * streaming/mjpeg/<NAME>
+ */
+
struct uvcg_mjpeg {
struct uvcg_format fmt;
struct uvc_format_mjpeg desc;
@@ -1493,7 +1695,7 @@ static struct configfs_group_operations uvcg_mjpeg_group_ops = {
.drop_item = uvcg_frame_drop,
};
-#define UVCG_MJPEG_ATTR_RO(cname, aname, conv) \
+#define UVCG_MJPEG_ATTR_RO(cname, aname, bits) \
static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
{ \
struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); \
@@ -1508,7 +1710,7 @@ static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1517,7 +1719,7 @@ static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
\
UVC_ATTR_RO(uvcg_mjpeg_, cname, aname)
-#define UVCG_MJPEG_ATTR(cname, aname, conv) \
+#define UVCG_MJPEG_ATTR(cname, aname, bits) \
static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
{ \
struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); \
@@ -1532,7 +1734,7 @@ static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1579,16 +1781,12 @@ end: \
\
UVC_ATTR(uvcg_mjpeg_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_MJPEG_ATTR(b_default_frame_index, bDefaultFrameIndex,
- identity_conv);
-UVCG_MJPEG_ATTR_RO(bm_flags, bmFlags, identity_conv);
-UVCG_MJPEG_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, identity_conv);
-UVCG_MJPEG_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, identity_conv);
-UVCG_MJPEG_ATTR_RO(bm_interface_flags, bmInterfaceFlags, identity_conv);
-
-#undef identity_conv
+UVCG_MJPEG_ATTR_RO(b_format_index, bFormatIndex, 8);
+UVCG_MJPEG_ATTR(b_default_frame_index, bDefaultFrameIndex, 8);
+UVCG_MJPEG_ATTR_RO(bm_flags, bmFlags, 8);
+UVCG_MJPEG_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8);
+UVCG_MJPEG_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8);
+UVCG_MJPEG_ATTR_RO(bm_interface_flags, bmInterfaceFlags, 8);
#undef UVCG_MJPEG_ATTR
#undef UVCG_MJPEG_ATTR_RO
@@ -1611,6 +1809,7 @@ uvcg_mjpeg_bma_controls_store(struct config_item *item,
UVC_ATTR(uvcg_mjpeg_, bma_controls, bmaControls);
static struct configfs_attribute *uvcg_mjpeg_attrs[] = {
+ &uvcg_mjpeg_attr_b_format_index,
&uvcg_mjpeg_attr_b_default_frame_index,
&uvcg_mjpeg_attr_bm_flags,
&uvcg_mjpeg_attr_b_aspect_ratio_x,
@@ -1621,6 +1820,7 @@ static struct configfs_attribute *uvcg_mjpeg_attrs[] = {
};
static const struct config_item_type uvcg_mjpeg_type = {
+ .ct_item_ops = &uvcg_config_item_ops,
.ct_group_ops = &uvcg_mjpeg_group_ops,
.ct_attrs = uvcg_mjpeg_attrs,
.ct_owner = THIS_MODULE,
@@ -1651,56 +1851,42 @@ static struct config_group *uvcg_mjpeg_make(struct config_group *group,
return &h->fmt.group;
}
-static void uvcg_mjpeg_drop(struct config_group *group,
- struct config_item *item)
-{
- struct uvcg_mjpeg *h = to_uvcg_mjpeg(item);
-
- kfree(h);
-}
-
static struct configfs_group_operations uvcg_mjpeg_grp_ops = {
.make_group = uvcg_mjpeg_make,
- .drop_item = uvcg_mjpeg_drop,
};
-static const struct config_item_type uvcg_mjpeg_grp_type = {
- .ct_group_ops = &uvcg_mjpeg_grp_ops,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_mjpeg_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_mjpeg_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "mjpeg",
};
-/* streaming/color_matching/default */
-static struct uvcg_default_color_matching {
- struct config_group group;
-} uvcg_default_color_matching;
-
-static inline struct uvcg_default_color_matching
-*to_uvcg_default_color_matching(struct config_item *item)
-{
- return container_of(to_config_group(item),
- struct uvcg_default_color_matching, group);
-}
+/* -----------------------------------------------------------------------------
+ * streaming/color_matching/default
+ */
-#define UVCG_DEFAULT_COLOR_MATCHING_ATTR(cname, aname, conv) \
+#define UVCG_DEFAULT_COLOR_MATCHING_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_color_matching_##cname##_show( \
- struct config_item *item, char *page) \
+ struct config_item *item, char *page) \
{ \
- struct uvcg_default_color_matching *dc = \
- to_uvcg_default_color_matching(item); \
+ struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
- struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_color_matching_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
- opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent; \
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
cd = &opts->uvc_color_matching; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(cd->aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1709,16 +1895,10 @@ static ssize_t uvcg_default_color_matching_##cname##_show( \
\
UVC_ATTR_RO(uvcg_default_color_matching_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries,
- identity_conv);
+UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries, 8);
UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_transfer_characteristics,
- bTransferCharacteristics, identity_conv);
-UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients,
- identity_conv);
-
-#undef identity_conv
+ bTransferCharacteristics, 8);
+UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients, 8);
#undef UVCG_DEFAULT_COLOR_MATCHING_ATTR
@@ -1729,41 +1909,54 @@ static struct configfs_attribute *uvcg_default_color_matching_attrs[] = {
NULL,
};
-static const struct config_item_type uvcg_default_color_matching_type = {
- .ct_attrs = uvcg_default_color_matching_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_default_color_matching_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_color_matching_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "default",
};
-/* struct uvcg_color_matching {}; */
-
-/* streaming/color_matching */
-static struct uvcg_color_matching_grp {
- struct config_group group;
-} uvcg_color_matching_grp;
+/* -----------------------------------------------------------------------------
+ * streaming/color_matching
+ */
-static const struct config_item_type uvcg_color_matching_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_color_matching_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "color_matching",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_default_color_matching_type,
+ NULL,
+ },
};
-/* streaming/class/{fs|hs|ss} */
-static struct uvcg_streaming_class {
- struct config_group group;
-} uvcg_streaming_class_fs, uvcg_streaming_class_hs, uvcg_streaming_class_ss;
+/* -----------------------------------------------------------------------------
+ * streaming/class/{fs|hs|ss}
+ */
+struct uvcg_streaming_class_group {
+ struct config_group group;
+ const char *name;
+};
static inline struct uvc_descriptor_header
***__uvcg_get_stream_class_arr(struct config_item *i, struct f_uvc_opts *o)
{
- struct uvcg_streaming_class *cl = container_of(to_config_group(i),
- struct uvcg_streaming_class, group);
+ struct uvcg_streaming_class_group *group =
+ container_of(i, struct uvcg_streaming_class_group,
+ group.cg_item);
- if (cl == &uvcg_streaming_class_fs)
+ if (!strcmp(group->name, "fs"))
return &o->uvc_fs_streaming_cls;
- if (cl == &uvcg_streaming_class_hs)
+ if (!strcmp(group->name, "hs"))
return &o->uvc_hs_streaming_cls;
- if (cl == &uvcg_streaming_class_ss)
+ if (!strcmp(group->name, "ss"))
return &o->uvc_ss_streaming_cls;
return NULL;
@@ -1922,24 +2115,22 @@ static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n,
struct uvcg_format *fmt = priv1;
if (fmt->type == UVCG_UNCOMPRESSED) {
- struct uvc_format_uncompressed *unc = *dest;
struct uvcg_uncompressed *u =
container_of(fmt, struct uvcg_uncompressed,
fmt);
+ u->desc.bFormatIndex = n + 1;
+ u->desc.bNumFrameDescriptors = fmt->num_frames;
memcpy(*dest, &u->desc, sizeof(u->desc));
*dest += sizeof(u->desc);
- unc->bNumFrameDescriptors = fmt->num_frames;
- unc->bFormatIndex = n + 1;
} else if (fmt->type == UVCG_MJPEG) {
- struct uvc_format_mjpeg *mjp = *dest;
struct uvcg_mjpeg *m =
container_of(fmt, struct uvcg_mjpeg, fmt);
+ m->desc.bFormatIndex = n + 1;
+ m->desc.bNumFrameDescriptors = fmt->num_frames;
memcpy(*dest, &m->desc, sizeof(m->desc));
*dest += sizeof(m->desc);
- mjp->bNumFrameDescriptors = fmt->num_frames;
- mjp->bFormatIndex = n + 1;
} else {
return -EINVAL;
}
@@ -2038,6 +2229,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
return ret;
}
@@ -2078,10 +2270,12 @@ static void uvcg_streaming_class_drop_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_streaming_class_item_ops = {
+ .release = uvcg_config_item_release,
.allow_link = uvcg_streaming_class_allow_link,
.drop_link = uvcg_streaming_class_drop_link,
};
@@ -2091,36 +2285,109 @@ static const struct config_item_type uvcg_streaming_class_type = {
.ct_owner = THIS_MODULE,
};
-/* streaming/class */
-static struct uvcg_streaming_class_grp {
- struct config_group group;
-} uvcg_streaming_class_grp;
+/* -----------------------------------------------------------------------------
+ * streaming/class
+ */
+
+static int uvcg_streaming_class_create_children(struct config_group *parent)
+{
+ static const char * const names[] = { "fs", "hs", "ss" };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(names); ++i) {
+ struct uvcg_streaming_class_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
-static const struct config_item_type uvcg_streaming_class_grp_type = {
- .ct_owner = THIS_MODULE,
+ group->name = names[i];
+
+ config_group_init_type_name(&group->group, group->name,
+ &uvcg_streaming_class_type);
+ configfs_add_default_group(&group->group, parent);
+ }
+
+ return 0;
+}
+
+static const struct uvcg_config_group_type uvcg_streaming_class_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "class",
+ .create_children = uvcg_streaming_class_create_children,
};
-/* streaming */
-static struct uvcg_streaming_grp {
- struct config_group group;
-} uvcg_streaming_grp;
+/* -----------------------------------------------------------------------------
+ * streaming
+ */
-static const struct config_item_type uvcg_streaming_grp_type = {
- .ct_owner = THIS_MODULE,
+static ssize_t uvcg_default_streaming_b_interface_number_show(
+ struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ int result = 0;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = item->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ result += sprintf(page, "%u\n", opts->streaming_interface);
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return result;
+}
+
+UVC_ATTR_RO(uvcg_default_streaming_, b_interface_number, bInterfaceNumber);
+
+static struct configfs_attribute *uvcg_default_streaming_attrs[] = {
+ &uvcg_default_streaming_attr_b_interface_number,
+ NULL,
+};
+
+static const struct uvcg_config_group_type uvcg_streaming_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_streaming_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "streaming",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_streaming_header_grp_type,
+ &uvcg_uncompressed_grp_type,
+ &uvcg_mjpeg_grp_type,
+ &uvcg_color_matching_grp_type,
+ &uvcg_streaming_class_grp_type,
+ NULL,
+ },
};
-static void uvc_attr_release(struct config_item *item)
+/* -----------------------------------------------------------------------------
+ * UVC function
+ */
+
+static void uvc_func_item_release(struct config_item *item)
{
struct f_uvc_opts *opts = to_f_uvc_opts(item);
+ uvcg_config_remove_children(to_config_group(item));
usb_put_function_instance(&opts->func_inst);
}
-static struct configfs_item_operations uvc_item_ops = {
- .release = uvc_attr_release,
+static struct configfs_item_operations uvc_func_item_ops = {
+ .release = uvc_func_item_release,
};
-#define UVCG_OPTS_ATTR(cname, aname, conv, str2u, uxx, vnoc, limit) \
+#define UVCG_OPTS_ATTR(cname, aname, limit) \
static ssize_t f_uvc_opts_##cname##_show( \
struct config_item *item, char *page) \
{ \
@@ -2128,7 +2395,7 @@ static ssize_t f_uvc_opts_##cname##_show( \
int result; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(opts->cname)); \
+ result = sprintf(page, "%u\n", opts->cname); \
mutex_unlock(&opts->lock); \
\
return result; \
@@ -2139,8 +2406,8 @@ f_uvc_opts_##cname##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uvc_opts *opts = to_f_uvc_opts(item); \
+ unsigned int num; \
int ret; \
- uxx num; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
@@ -2148,7 +2415,7 @@ f_uvc_opts_##cname##_store(struct config_item *item, \
goto end; \
} \
\
- ret = str2u(page, 0, &num); \
+ ret = kstrtouint(page, 0, &num); \
if (ret) \
goto end; \
\
@@ -2156,7 +2423,7 @@ f_uvc_opts_##cname##_store(struct config_item *item, \
ret = -EINVAL; \
goto end; \
} \
- opts->cname = vnoc(num); \
+ opts->cname = num; \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
@@ -2165,16 +2432,9 @@ end: \
\
UVC_ATTR(f_uvc_opts_, cname, cname)
-#define identity_conv(x) (x)
-
-UVCG_OPTS_ATTR(streaming_interval, streaming_interval, identity_conv,
- kstrtou8, u8, identity_conv, 16);
-UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, le16_to_cpu,
- kstrtou16, u16, le16_to_cpu, 3072);
-UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, identity_conv,
- kstrtou8, u8, identity_conv, 15);
-
-#undef identity_conv
+UVCG_OPTS_ATTR(streaming_interval, streaming_interval, 16);
+UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, 3072);
+UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, 15);
#undef UVCG_OPTS_ATTR
@@ -2185,123 +2445,31 @@ static struct configfs_attribute *uvc_attrs[] = {
NULL,
};
-static const struct config_item_type uvc_func_type = {
- .ct_item_ops = &uvc_item_ops,
- .ct_attrs = uvc_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvc_func_type = {
+ .type = {
+ .ct_item_ops = &uvc_func_item_ops,
+ .ct_attrs = uvc_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_control_grp_type,
+ &uvcg_streaming_grp_type,
+ NULL,
+ },
};
int uvcg_attach_configfs(struct f_uvc_opts *opts)
{
- config_group_init_type_name(&uvcg_control_header_grp.group,
- "header",
- &uvcg_control_header_grp_type);
-
- config_group_init_type_name(&uvcg_default_processing.group,
- "default", &uvcg_default_processing_type);
- config_group_init_type_name(&uvcg_processing_grp.group,
- "processing", &uvcg_processing_grp_type);
- configfs_add_default_group(&uvcg_default_processing.group,
- &uvcg_processing_grp.group);
-
- config_group_init_type_name(&uvcg_default_camera.group,
- "default", &uvcg_default_camera_type);
- config_group_init_type_name(&uvcg_camera_grp.group,
- "camera", &uvcg_camera_grp_type);
- configfs_add_default_group(&uvcg_default_camera.group,
- &uvcg_camera_grp.group);
-
- config_group_init_type_name(&uvcg_default_output.group,
- "default", &uvcg_default_output_type);
- config_group_init_type_name(&uvcg_output_grp.group,
- "output", &uvcg_output_grp_type);
- configfs_add_default_group(&uvcg_default_output.group,
- &uvcg_output_grp.group);
-
- config_group_init_type_name(&uvcg_terminal_grp.group,
- "terminal", &uvcg_terminal_grp_type);
- configfs_add_default_group(&uvcg_camera_grp.group,
- &uvcg_terminal_grp.group);
- configfs_add_default_group(&uvcg_output_grp.group,
- &uvcg_terminal_grp.group);
-
- config_group_init_type_name(&uvcg_control_class_fs.group,
- "fs", &uvcg_control_class_type);
- config_group_init_type_name(&uvcg_control_class_ss.group,
- "ss", &uvcg_control_class_type);
- config_group_init_type_name(&uvcg_control_class_grp.group,
- "class",
- &uvcg_control_class_grp_type);
- configfs_add_default_group(&uvcg_control_class_fs.group,
- &uvcg_control_class_grp.group);
- configfs_add_default_group(&uvcg_control_class_ss.group,
- &uvcg_control_class_grp.group);
-
- config_group_init_type_name(&uvcg_control_grp.group,
- "control",
- &uvcg_control_grp_type);
- configfs_add_default_group(&uvcg_control_header_grp.group,
- &uvcg_control_grp.group);
- configfs_add_default_group(&uvcg_processing_grp.group,
- &uvcg_control_grp.group);
- configfs_add_default_group(&uvcg_terminal_grp.group,
- &uvcg_control_grp.group);
- configfs_add_default_group(&uvcg_control_class_grp.group,
- &uvcg_control_grp.group);
-
- config_group_init_type_name(&uvcg_streaming_header_grp.group,
- "header",
- &uvcg_streaming_header_grp_type);
- config_group_init_type_name(&uvcg_uncompressed_grp.group,
- "uncompressed",
- &uvcg_uncompressed_grp_type);
- config_group_init_type_name(&uvcg_mjpeg_grp.group,
- "mjpeg",
- &uvcg_mjpeg_grp_type);
- config_group_init_type_name(&uvcg_default_color_matching.group,
- "default",
- &uvcg_default_color_matching_type);
- config_group_init_type_name(&uvcg_color_matching_grp.group,
- "color_matching",
- &uvcg_color_matching_grp_type);
- configfs_add_default_group(&uvcg_default_color_matching.group,
- &uvcg_color_matching_grp.group);
-
- config_group_init_type_name(&uvcg_streaming_class_fs.group,
- "fs", &uvcg_streaming_class_type);
- config_group_init_type_name(&uvcg_streaming_class_hs.group,
- "hs", &uvcg_streaming_class_type);
- config_group_init_type_name(&uvcg_streaming_class_ss.group,
- "ss", &uvcg_streaming_class_type);
- config_group_init_type_name(&uvcg_streaming_class_grp.group,
- "class", &uvcg_streaming_class_grp_type);
- configfs_add_default_group(&uvcg_streaming_class_fs.group,
- &uvcg_streaming_class_grp.group);
- configfs_add_default_group(&uvcg_streaming_class_hs.group,
- &uvcg_streaming_class_grp.group);
- configfs_add_default_group(&uvcg_streaming_class_ss.group,
- &uvcg_streaming_class_grp.group);
-
- config_group_init_type_name(&uvcg_streaming_grp.group,
- "streaming", &uvcg_streaming_grp_type);
- configfs_add_default_group(&uvcg_streaming_header_grp.group,
- &uvcg_streaming_grp.group);
- configfs_add_default_group(&uvcg_uncompressed_grp.group,
- &uvcg_streaming_grp.group);
- configfs_add_default_group(&uvcg_mjpeg_grp.group,
- &uvcg_streaming_grp.group);
- configfs_add_default_group(&uvcg_color_matching_grp.group,
- &uvcg_streaming_grp.group);
- configfs_add_default_group(&uvcg_streaming_class_grp.group,
- &uvcg_streaming_grp.group);
-
- config_group_init_type_name(&opts->func_inst.group,
- "",
- &uvc_func_type);
- configfs_add_default_group(&uvcg_control_grp.group,
- &opts->func_inst.group);
- configfs_add_default_group(&uvcg_streaming_grp.group,
- &opts->func_inst.group);
+ int ret;
- return 0;
+ config_group_init_type_name(&opts->func_inst.group, uvc_func_type.name,
+ &uvc_func_type.type);
+
+ ret = uvcg_config_create_children(&opts->func_inst.group,
+ &uvc_func_type);
+ if (ret < 0)
+ config_group_put(&opts->func_inst.group);
+
+ return ret;
}
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 7f1ca3b57823..a1183eccee22 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -115,8 +115,8 @@ uvc_v4l2_set_format(struct file *file, void *fh, struct v4l2_format *fmt)
}
if (i == ARRAY_SIZE(uvc_formats)) {
- printk(KERN_INFO "Unsupported format 0x%08x.\n",
- fmt->fmt.pix.pixelformat);
+ uvcg_info(&uvc->func, "Unsupported format 0x%08x.\n",
+ fmt->fmt.pix.pixelformat);
return -EINVAL;
}
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index d3567b90343a..5c042f380708 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -125,6 +125,23 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
* Request handling
*/
+static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
+{
+ int ret;
+
+ ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
+ if (ret < 0) {
+ uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
+ ret);
+
+ /* Isochronous endpoints can't be halted. */
+ if (usb_endpoint_xfer_bulk(video->ep->desc))
+ usb_ep_set_halt(video->ep);
+ }
+
+ return ret;
+}
+
/*
* I somehow feel that synchronisation won't be easy to achieve here. We have
* three events that control USB requests submission:
@@ -169,13 +186,14 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
break;
case -ESHUTDOWN: /* disconnect from host. */
- printk(KERN_DEBUG "VS request cancelled.\n");
+ uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
uvcg_queue_cancel(queue, 1);
goto requeue;
default:
- printk(KERN_INFO "VS request completed with status %d.\n",
- req->status);
+ uvcg_info(&video->uvc->func,
+ "VS request completed with status %d.\n",
+ req->status);
uvcg_queue_cancel(queue, 0);
goto requeue;
}
@@ -189,14 +207,13 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
video->encode(req, video, buf);
- if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) {
- printk(KERN_INFO "Failed to queue request (%d).\n", ret);
- usb_ep_set_halt(ep);
- spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ ret = uvcg_video_ep_queue(video, req);
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+
+ if (ret < 0) {
uvcg_queue_cancel(queue, 0);
goto requeue;
}
- spin_unlock_irqrestore(&video->queue.irqlock, flags);
return;
@@ -316,15 +333,13 @@ int uvcg_video_pump(struct uvc_video *video)
video->encode(req, video, buf);
/* Queue the USB request */
- ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
+ ret = uvcg_video_ep_queue(video, req);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
if (ret < 0) {
- printk(KERN_INFO "Failed to queue request (%d)\n", ret);
- usb_ep_set_halt(video->ep);
- spin_unlock_irqrestore(&queue->irqlock, flags);
uvcg_queue_cancel(queue, 0);
break;
}
- spin_unlock_irqrestore(&queue->irqlock, flags);
}
spin_lock_irqsave(&video->req_lock, flags);
@@ -342,8 +357,8 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
int ret;
if (video->ep == NULL) {
- printk(KERN_INFO "Video enable failed, device is "
- "uninitialized.\n");
+ uvcg_info(&video->uvc->func,
+ "Video enable failed, device is uninitialized.\n");
return -ENODEV;
}
@@ -375,11 +390,12 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
/*
* Initialize the UVC video stream.
*/
-int uvcg_video_init(struct uvc_video *video)
+int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
{
INIT_LIST_HEAD(&video->req_free);
spin_lock_init(&video->req_lock);
+ video->uvc = uvc;
video->fcc = V4L2_PIX_FMT_YUYV;
video->bpp = 16;
video->width = 320;
diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h
index 7d77122b0ff9..278dc52c7604 100644
--- a/drivers/usb/gadget/function/uvc_video.h
+++ b/drivers/usb/gadget/function/uvc_video.h
@@ -18,6 +18,6 @@ int uvcg_video_pump(struct uvc_video *video);
int uvcg_video_enable(struct uvc_video *video, int enable);
-int uvcg_video_init(struct uvc_video *video);
+int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc);
#endif /* __UVC_VIDEO_H__ */
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 5939eb1e97f2..4a28e3fbeb0b 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -353,7 +353,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
/* Endpoint enabled ? */
if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
!ep->dev->enabled || ep->dev->suspended) {
- EPDBG(ep,"Enqueing request on wrong or disabled EP\n");
+ EPDBG(ep, "Enqueuing request on wrong or disabled EP\n");
return -ESHUTDOWN;
}
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 17147b8c771e..11247322d587 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2004,7 +2004,6 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
struct usba_udc *udc)
{
u32 val;
- const char *name;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
struct device_node *pp;
@@ -2018,6 +2017,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
udc->errata = match->data;
udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
if (IS_ERR(udc->pmc))
+ udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc");
+ if (IS_ERR(udc->pmc))
udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc");
if (udc->errata && IS_ERR(udc->pmc))
return ERR_CAST(udc->pmc);
@@ -2094,11 +2095,6 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
ep->can_dma = of_property_read_bool(pp, "atmel,can-dma");
ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc");
- ret = of_property_read_string(pp, "name", &name);
- if (ret) {
- dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
- goto err;
- }
sprintf(ep->name, "ep%d", ep->index);
ep->ep.name = ep->name;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index af88b48c1cea..87d6b12779f2 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -690,6 +690,9 @@ EXPORT_SYMBOL_GPL(usb_gadget_connect);
* as a disconnect (when a VBUS session is active). Not all systems
* support software pullup controls.
*
+ * Following a successful disconnect, invoke the ->disconnect() callback
+ * for the current gadget driver so that UDC drivers don't need to.
+ *
* Returns zero on success, else negative errno.
*/
int usb_gadget_disconnect(struct usb_gadget *gadget)
@@ -711,8 +714,10 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
}
ret = gadget->ops->pullup(gadget, 0);
- if (!ret)
+ if (!ret) {
gadget->connected = 0;
+ gadget->udc->driver->disconnect(gadget);
+ }
out:
trace_usb_gadget_disconnect(gadget, ret);
@@ -1281,7 +1286,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
usb_gadget_disconnect(udc->gadget);
- udc->driver->disconnect(udc->gadget);
udc->driver->unbind(udc->gadget);
usb_gadget_udc_stop(udc);
@@ -1471,7 +1475,6 @@ static ssize_t soft_connect_store(struct device *dev,
usb_gadget_connect(udc->gadget);
} else if (sysfs_streq(buf, "disconnect")) {
usb_gadget_disconnect(udc->gadget);
- udc->driver->disconnect(udc->gadget);
usb_gadget_udc_stop(udc);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 587c5037ff07..bc6abaea907d 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -741,7 +741,7 @@ static void fotg210_get_status(struct fotg210_udc *fotg210,
fotg210->ep0_req->length = 2;
spin_unlock(&fotg210->lock);
- fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_KERNEL);
+ fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_ATOMIC);
spin_lock(&fotg210->lock);
}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index be59309e848c..20141c3096f6 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2234,8 +2234,10 @@ static void fsl_udc_release(struct device *dev)
Internal structure setup functions
*******************************************************************/
/*------------------------------------------------------------------
- * init resource for globle controller
- * Return the udc handle on success or NULL on failure
+ * init resource for global controller called by fsl_udc_probe()
+ * On success the udc handle is initialized, on failure it is
+ * unchanged (reset).
+ * Return 0 on success and -1 on allocation failure
------------------------------------------------------------------*/
static int struct_udc_setup(struct fsl_udc *udc,
struct platform_device *pdev)
@@ -2247,8 +2249,10 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->phy_mode = pdata->phy_mode;
udc->eps = kcalloc(udc->max_ep, sizeof(struct fsl_ep), GFP_KERNEL);
- if (!udc->eps)
- return -1;
+ if (!udc->eps) {
+ ERR("kmalloc udc endpoint status failed\n");
+ goto eps_alloc_failed;
+ }
/* initialized QHs, take care of alignment */
size = udc->max_ep * sizeof(struct ep_queue_head);
@@ -2262,8 +2266,7 @@ static int struct_udc_setup(struct fsl_udc *udc,
&udc->ep_qh_dma, GFP_KERNEL);
if (!udc->ep_qh) {
ERR("malloc QHs for udc failed\n");
- kfree(udc->eps);
- return -1;
+ goto ep_queue_alloc_failed;
}
udc->ep_qh_size = size;
@@ -2272,8 +2275,17 @@ static int struct_udc_setup(struct fsl_udc *udc,
/* FIXME: fsl_alloc_request() ignores ep argument */
udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
struct fsl_req, req);
+ if (!udc->status_req) {
+ ERR("kzalloc for udc status request failed\n");
+ goto udc_status_alloc_failed;
+ }
+
/* allocate a small amount of memory to get valid address */
udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
+ if (!udc->status_req->req.buf) {
+ ERR("kzalloc for udc request buffer failed\n");
+ goto udc_req_buf_alloc_failed;
+ }
udc->resume_state = USB_STATE_NOTATTACHED;
udc->usb_state = USB_STATE_POWERED;
@@ -2281,6 +2293,18 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->remote_wakeup = 0; /* default to 0 on reset */
return 0;
+
+udc_req_buf_alloc_failed:
+ kfree(udc->status_req);
+udc_status_alloc_failed:
+ kfree(udc->ep_qh);
+ udc->ep_qh_size = 0;
+ep_queue_alloc_failed:
+ kfree(udc->eps);
+eps_alloc_failed:
+ udc->phy_mode = 0;
+ return -1;
+
}
/*----------------------------------------------------------------
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 95f52232493b..cafde053788b 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -185,7 +185,7 @@ static int process_ep_req(struct mv_udc *udc, int index,
else
bit_pos = 1 << (16 + curr_req->ep->ep_num);
- while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
+ while (curr_dqh->curr_dtd_ptr == curr_dtd->td_dma) {
if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
while (readl(&udc->op_regs->epstatus) & bit_pos)
udelay(1);
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index b02ab2a8d927..e7dae5379e04 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1550,9 +1550,6 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
spin_unlock_irqrestore(&dev->lock, flags);
- if (!is_on && dev->driver)
- dev->driver->disconnect(&dev->gadget);
-
return 0;
}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index e1656f361e08..cdffbd1e0316 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2437,6 +2437,9 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
else
usb3->forced_b_device = false;
+ if (usb3->workaround_for_vbus)
+ usb3_disconnect(usb3);
+
/* Let this driver call usb3_connect() anyway */
usb3_check_id(usb3);
@@ -2600,6 +2603,13 @@ static const struct renesas_usb3_priv renesas_usb3_priv_gen3 = {
.ramsize_per_pipe = SZ_4K,
};
+static const struct renesas_usb3_priv renesas_usb3_priv_r8a77990 = {
+ .ramsize_per_ramif = SZ_16K,
+ .num_ramif = 4,
+ .ramsize_per_pipe = SZ_4K,
+ .workaround_for_vbus = true,
+};
+
static const struct of_device_id usb3_of_match[] = {
{
.compatible = "renesas,r8a7795-usb3-peri",
@@ -2618,6 +2628,10 @@ static const struct soc_device_attribute renesas_usb3_quirks_match[] = {
.soc_id = "r8a7795", .revision = "ES1.*",
.data = &renesas_usb3_priv_r8a7795_es1,
},
+ {
+ .soc_id = "r8a77990",
+ .data = &renesas_usb3_priv_r8a77990,
+ },
{ /* sentinel */ },
};
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 6407e433bc78..b1f4104d1283 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1078,7 +1078,7 @@ static int xudc_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
unsigned long flags;
if (!ep->desc) {
- dev_dbg(udc->dev, "%s:queing request to disabled %s\n",
+ dev_dbg(udc->dev, "%s: queuing request to disabled %s\n",
__func__, ep->name);
return -ESHUTDOWN;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 1a4ea98cac2a..16758b12a5e9 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -276,7 +276,7 @@ config USB_EHCI_EXYNOS
Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
config USB_EHCI_MV
- bool "EHCI support for Marvell PXA/MMP USB controller"
+ tristate "EHCI support for Marvell PXA/MMP USB controller"
depends on (ARCH_PXA || ARCH_MMP)
select USB_EHCI_ROOT_HUB_TT
---help---
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index e6235269c151..84514f71ae44 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -87,6 +87,7 @@ obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
obj-$(CONFIG_USB_FSL_USB2) += fsl-mph-dr-of.o
obj-$(CONFIG_USB_EHCI_FSL) += fsl-mph-dr-of.o
obj-$(CONFIG_USB_EHCI_FSL) += ehci-fsl.o
+obj-$(CONFIG_USB_EHCI_MV) += ehci-mv.o
obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 8608ac513fb7..cdafa97f632d 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -730,9 +730,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
if (likely ((status & STS_ERR) == 0))
- COUNT (ehci->stats.normal);
+ INCR(ehci->stats.normal);
else
- COUNT (ehci->stats.error);
+ INCR(ehci->stats.error);
bh = 1;
}
@@ -756,7 +756,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
if (cmd & CMD_IAAD)
ehci_dbg(ehci, "IAA with IAAD still set?\n");
if (ehci->iaa_in_progress)
- COUNT(ehci->stats.iaa);
+ INCR(ehci->stats.iaa);
end_iaa_cycle(ehci);
}
@@ -1286,11 +1286,6 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_grlib_driver
#endif
-#ifdef CONFIG_USB_EHCI_MV
-#include "ehci-mv.c"
-#define PLATFORM_DRIVER ehci_mv_driver
-#endif
-
static int __init ehci_hcd_init(void)
{
int retval = 0;
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index de764459e05a..f26109eafdbf 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -12,24 +12,33 @@
#include <linux/err.h>
#include <linux/usb/otg.h>
#include <linux/platform_data/mv_usb.h>
+#include <linux/io.h>
+
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+/* registers */
+#define U2x_CAPREGS_OFFSET 0x100
#define CAPLENGTH_MASK (0xff)
-struct ehci_hcd_mv {
- struct usb_hcd *hcd;
+#define hcd_to_ehci_hcd_mv(h) ((struct ehci_hcd_mv *)hcd_to_ehci(h)->priv)
+struct ehci_hcd_mv {
/* Which mode does this ehci running OTG/Host ? */
int mode;
- void __iomem *phy_regs;
+ void __iomem *base;
void __iomem *cap_regs;
void __iomem *op_regs;
struct usb_phy *otg;
+ struct clk *clk;
- struct mv_usb_platform_data *pdata;
+ struct phy *phy;
- struct clk *clk;
+ int (*set_vbus)(unsigned int vbus);
};
static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
@@ -44,29 +53,20 @@ static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
{
- int retval;
-
ehci_clock_enable(ehci_mv);
- if (ehci_mv->pdata->phy_init) {
- retval = ehci_mv->pdata->phy_init(ehci_mv->phy_regs);
- if (retval)
- return retval;
- }
-
- return 0;
+ return phy_init(ehci_mv->phy);
}
static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
{
- if (ehci_mv->pdata->phy_deinit)
- ehci_mv->pdata->phy_deinit(ehci_mv->phy_regs);
+ phy_exit(ehci_mv->phy);
ehci_clock_disable(ehci_mv);
}
static int mv_ehci_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
- struct ehci_hcd_mv *ehci_mv = dev_get_drvdata(dev);
+ struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
int retval;
if (ehci_mv == NULL) {
@@ -83,46 +83,11 @@ static int mv_ehci_reset(struct usb_hcd *hcd)
return retval;
}
-static const struct hc_driver mv_ehci_hc_driver = {
- .description = hcd_name,
- .product_desc = "Marvell EHCI",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
-
- /*
- * basic lifecycle operations
- */
- .reset = mv_ehci_reset,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
+static struct hc_driver __read_mostly ehci_platform_hc_driver;
+
+static const struct ehci_driver_overrides platform_overrides __initconst = {
+ .reset = mv_ehci_reset,
+ .extra_priv_size = sizeof(struct ehci_hcd_mv),
};
static int mv_ehci_probe(struct platform_device *pdev)
@@ -135,27 +100,29 @@ static int mv_ehci_probe(struct platform_device *pdev)
int retval = -ENODEV;
u32 offset;
- if (!pdata) {
- dev_err(&pdev->dev, "missing platform_data\n");
- return -ENODEV;
- }
-
if (usb_disabled())
return -ENODEV;
- hcd = usb_create_hcd(&mv_ehci_hc_driver, &pdev->dev, "mv ehci");
+ hcd = usb_create_hcd(&ehci_platform_hc_driver, &pdev->dev, "mv ehci");
if (!hcd)
return -ENOMEM;
- ehci_mv = devm_kzalloc(&pdev->dev, sizeof(*ehci_mv), GFP_KERNEL);
- if (ehci_mv == NULL) {
- retval = -ENOMEM;
- goto err_put_hcd;
+ platform_set_drvdata(pdev, hcd);
+ ehci_mv = hcd_to_ehci_hcd_mv(hcd);
+
+ ehci_mv->mode = MV_USB_MODE_HOST;
+ if (pdata) {
+ ehci_mv->mode = pdata->mode;
+ ehci_mv->set_vbus = pdata->set_vbus;
}
- platform_set_drvdata(pdev, ehci_mv);
- ehci_mv->pdata = pdata;
- ehci_mv->hcd = hcd;
+ ehci_mv->phy = devm_phy_get(&pdev->dev, "usb");
+ if (IS_ERR(ehci_mv->phy)) {
+ retval = PTR_ERR(ehci_mv->phy);
+ if (retval != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get phy.\n");
+ goto err_put_hcd;
+ }
ehci_mv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ehci_mv->clk)) {
@@ -164,17 +131,12 @@ static int mv_ehci_probe(struct platform_device *pdev)
goto err_put_hcd;
}
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phyregs");
- ehci_mv->phy_regs = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(ehci_mv->phy_regs)) {
- retval = PTR_ERR(ehci_mv->phy_regs);
- goto err_put_hcd;
- }
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "capregs");
- ehci_mv->cap_regs = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(ehci_mv->cap_regs)) {
- retval = PTR_ERR(ehci_mv->cap_regs);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ehci_mv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(ehci_mv->base)) {
+ retval = PTR_ERR(ehci_mv->base);
goto err_put_hcd;
}
@@ -184,6 +146,8 @@ static int mv_ehci_probe(struct platform_device *pdev)
goto err_put_hcd;
}
+ ehci_mv->cap_regs =
+ (void __iomem *) ((unsigned long) ehci_mv->base + U2x_CAPREGS_OFFSET);
offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
ehci_mv->op_regs =
(void __iomem *) ((unsigned long) ehci_mv->cap_regs + offset);
@@ -202,7 +166,6 @@ static int mv_ehci_probe(struct platform_device *pdev)
ehci = hcd_to_ehci(hcd);
ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
- ehci_mv->mode = pdata->mode;
if (ehci_mv->mode == MV_USB_MODE_OTG) {
ehci_mv->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
if (IS_ERR(ehci_mv->otg)) {
@@ -227,8 +190,8 @@ static int mv_ehci_probe(struct platform_device *pdev)
/* otg will enable clock before use as host */
mv_ehci_disable(ehci_mv);
} else {
- if (pdata->set_vbus)
- pdata->set_vbus(1);
+ if (ehci_mv->set_vbus)
+ ehci_mv->set_vbus(1);
retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
if (retval) {
@@ -239,9 +202,6 @@ static int mv_ehci_probe(struct platform_device *pdev)
device_wakeup_enable(hcd->self.controller);
}
- if (pdata->private_init)
- pdata->private_init(ehci_mv->op_regs, ehci_mv->phy_regs);
-
dev_info(&pdev->dev,
"successful find EHCI device with regs 0x%p irq %d"
" working in %s mode\n", hcd->regs, hcd->irq,
@@ -250,8 +210,8 @@ static int mv_ehci_probe(struct platform_device *pdev)
return 0;
err_set_vbus:
- if (pdata->set_vbus)
- pdata->set_vbus(0);
+ if (ehci_mv->set_vbus)
+ ehci_mv->set_vbus(0);
err_disable_clk:
mv_ehci_disable(ehci_mv);
err_put_hcd:
@@ -262,8 +222,8 @@ err_put_hcd:
static int mv_ehci_remove(struct platform_device *pdev)
{
- struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ehci_mv->hcd;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
if (hcd->rh_registered)
usb_remove_hcd(hcd);
@@ -272,8 +232,8 @@ static int mv_ehci_remove(struct platform_device *pdev)
otg_set_host(ehci_mv->otg->otg, NULL);
if (ehci_mv->mode == MV_USB_MODE_HOST) {
- if (ehci_mv->pdata->set_vbus)
- ehci_mv->pdata->set_vbus(0);
+ if (ehci_mv->set_vbus)
+ ehci_mv->set_vbus(0);
mv_ehci_disable(ehci_mv);
}
@@ -295,8 +255,7 @@ static const struct platform_device_id ehci_id_table[] = {
static void mv_ehci_shutdown(struct platform_device *pdev)
{
- struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ehci_mv->hcd;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
if (!hcd->rh_registered)
return;
@@ -305,13 +264,41 @@ static void mv_ehci_shutdown(struct platform_device *pdev)
hcd->driver->shutdown(hcd);
}
+static const struct of_device_id ehci_mv_dt_ids[] = {
+ { .compatible = "marvell,pxau2o-ehci", },
+ {},
+};
+
static struct platform_driver ehci_mv_driver = {
.probe = mv_ehci_probe,
.remove = mv_ehci_remove,
.shutdown = mv_ehci_shutdown,
.driver = {
- .name = "mv-ehci",
- .bus = &platform_bus_type,
- },
+ .name = "mv-ehci",
+ .bus = &platform_bus_type,
+ .of_match_table = ehci_mv_dt_ids,
+ },
.id_table = ehci_id_table,
};
+
+static int __init ehci_platform_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
+ return platform_driver_register(&ehci_mv_driver);
+}
+module_init(ehci_platform_init);
+
+static void __exit ehci_platform_cleanup(void)
+{
+ platform_driver_unregister(&ehci_mv_driver);
+}
+module_exit(ehci_platform_cleanup);
+
+MODULE_DESCRIPTION("Marvell EHCI driver");
+MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
+MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>");
+MODULE_ALIAS("mv-ehci");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 327630405695..aa2f77f1506d 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -245,12 +245,12 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
}
if (unlikely(urb->unlinked)) {
- COUNT(ehci->stats.unlink);
+ INCR(ehci->stats.unlink);
} else {
/* report non-error and short read status as zero */
if (status == -EINPROGRESS || status == -EREMOTEIO)
status = 0;
- COUNT(ehci->stats.complete);
+ INCR(ehci->stats.complete);
}
#ifdef EHCI_URB_TRACE
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 4fcebda4b79d..a79c8ac0a55f 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -347,7 +347,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
*/
status = ehci_readl(ehci, &ehci->regs->status);
if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
- COUNT(ehci->stats.lost_iaa);
+ INCR(ehci->stats.lost_iaa);
ehci_writel(ehci, STS_IAA, &ehci->regs->status);
}
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index c8e9a48e1d51..ac5e967907d1 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -235,9 +235,9 @@ struct ehci_hcd { /* one per controller */
/* irq statistics */
#ifdef EHCI_STATS
struct ehci_stats stats;
-# define COUNT(x) ((x)++)
+# define INCR(x) ((x)++)
#else
-# define COUNT(x)
+# define INCR(x) do {} while (0)
#endif
/* debug files */
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index e64eb47770c8..0da68df259c8 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -31,6 +31,7 @@
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
@@ -1285,7 +1286,7 @@ static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210)
*/
status = fotg210_readl(fotg210, &fotg210->regs->status);
if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
- COUNT(fotg210->stats.lost_iaa);
+ INCR(fotg210->stats.lost_iaa);
fotg210_writel(fotg210, STS_IAA,
&fotg210->regs->status);
}
@@ -2204,12 +2205,12 @@ __acquires(fotg210->lock)
}
if (unlikely(urb->unlinked)) {
- COUNT(fotg210->stats.unlink);
+ INCR(fotg210->stats.unlink);
} else {
/* report non-error and short read status as zero */
if (status == -EINPROGRESS || status == -EREMOTEIO)
status = 0;
- COUNT(fotg210->stats.complete);
+ INCR(fotg210->stats.complete);
}
#ifdef FOTG210_URB_TRACE
@@ -5153,9 +5154,9 @@ static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely((status & (STS_INT|STS_ERR)) != 0)) {
if (likely((status & STS_ERR) == 0))
- COUNT(fotg210->stats.normal);
+ INCR(fotg210->stats.normal);
else
- COUNT(fotg210->stats.error);
+ INCR(fotg210->stats.error);
bh = 1;
}
@@ -5180,7 +5181,7 @@ static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
if (cmd & CMD_IAAD)
fotg210_dbg(fotg210, "IAA with IAAD still set?\n");
if (fotg210->async_iaa) {
- COUNT(fotg210->stats.iaa);
+ INCR(fotg210->stats.iaa);
end_unlink_async(fotg210);
} else
fotg210_dbg(fotg210, "IAA with nothing unlinked?\n");
@@ -5596,7 +5597,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
- goto failed;
+ goto failed_put_hcd;
}
hcd->rsrc_start = res->start;
@@ -5606,22 +5607,43 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
fotg210->caps = hcd->regs;
+ /* It's OK not to supply this clock */
+ fotg210->pclk = clk_get(dev, "PCLK");
+ if (!IS_ERR(fotg210->pclk)) {
+ retval = clk_prepare_enable(fotg210->pclk);
+ if (retval) {
+ dev_err(dev, "failed to enable PCLK\n");
+ goto failed_put_hcd;
+ }
+ } else if (PTR_ERR(fotg210->pclk) == -EPROBE_DEFER) {
+ /*
+ * Percolate deferrals, for anything else,
+ * just live without the clocking.
+ */
+ retval = PTR_ERR(fotg210->pclk);
+ goto failed_dis_clk;
+ }
+
retval = fotg210_setup(hcd);
if (retval)
- goto failed;
+ goto failed_dis_clk;
fotg210_init(fotg210);
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval) {
dev_err(dev, "failed to add hcd with err %d\n", retval);
- goto failed;
+ goto failed_dis_clk;
}
device_wakeup_enable(hcd->self.controller);
+ platform_set_drvdata(pdev, hcd);
return retval;
-failed:
+failed_dis_clk:
+ if (!IS_ERR(fotg210->pclk))
+ clk_disable_unprepare(fotg210->pclk);
+failed_put_hcd:
usb_put_hcd(hcd);
fail_create_hcd:
dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
@@ -5635,11 +5657,11 @@ fail_create_hcd:
*/
static int fotg210_hcd_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- if (!hcd)
- return 0;
+ if (!IS_ERR(fotg210->pclk))
+ clk_disable_unprepare(fotg210->pclk);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
index 7fcd785c7bc8..1b4db95e5c43 100644
--- a/drivers/usb/host/fotg210.h
+++ b/drivers/usb/host/fotg210.h
@@ -177,11 +177,14 @@ struct fotg210_hcd { /* one per controller */
/* irq statistics */
#ifdef FOTG210_STATS
struct fotg210_stats stats;
-# define COUNT(x) ((x)++)
+# define INCR(x) ((x)++)
#else
-# define COUNT(x)
+# define INCR(x) do {} while (0)
#endif
+ /* silicon clock */
+ struct clk *pclk;
+
/* debug files */
struct dentry *debug_dir;
};
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index e98673954020..ec6739ef3129 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -551,6 +551,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
pdata->overcurrent_pin[i] =
devm_gpiod_get_index_optional(&pdev->dev, "atmel,oc",
i, GPIOD_IN);
+ if (!pdata->overcurrent_pin[i])
+ continue;
if (IS_ERR(pdata->overcurrent_pin[i])) {
err = PTR_ERR(pdata->overcurrent_pin[i]);
dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 3625a5c1a41b..3ce71cbfbb58 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -783,15 +783,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
/* disable interrupts */
writel((u32) ~0, base + OHCI_INTRDISABLE);
- /* Reset the USB bus, if the controller isn't already in RESET */
- if (control & OHCI_HCFS) {
- /* Go into RESET, preserving RWC (and possibly IR) */
- writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
- readl(base + OHCI_CONTROL);
-
- /* drive bus reset for at least 50 ms (7.1.7.5) */
- msleep(50);
- }
+ /* Go into the USB_RESET state, preserving RWC (and possibly IR) */
+ writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+ readl(base + OHCI_CONTROL);
/* software reset of the controller, preserving HcFmInterval */
if (!no_fminterval)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 7e2a531ba321..12eea73d9f20 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -900,6 +900,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
set_bit(wIndex, &bus_state->resuming_ports);
bus_state->resume_done[wIndex] = timeout;
mod_timer(&hcd->rh_timer, timeout);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
}
/* Has resume been signalled for USB_RESUME_TIME yet? */
} else if (time_after_eq(jiffies,
@@ -940,6 +941,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
clear_bit(wIndex, &bus_state->rexit_ports);
}
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
bus_state->port_c_suspend |= 1 << wIndex;
bus_state->suspended_ports &= ~(1 << wIndex);
} else {
@@ -962,6 +964,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
(raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
bus_state->resume_done[wIndex] = 0;
clear_bit(wIndex, &bus_state->resuming_ports);
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
}
@@ -1337,6 +1340,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
set_bit(wIndex, &bus_state->resuming_ports);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
xhci_set_link_state(xhci, ports[wIndex],
XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1345,6 +1349,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, ports[wIndex],
XDEV_U0);
clear_bit(wIndex, &bus_state->resuming_ports);
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
}
bus_state->port_c_suspend |= 1 << wIndex;
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index fa33d6e5b1cb..fea555570ad4 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -13,14 +13,20 @@
#include "xhci.h"
#include "xhci-mtk.h"
+#define SSP_BW_BOUNDARY 130000
#define SS_BW_BOUNDARY 51000
/* table 5-5. High-speed Isoc Transaction Limits in usb_20 spec */
#define HS_BW_BOUNDARY 6144
/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
#define FS_PAYLOAD_MAX 188
+/*
+ * max number of microframes for split transfer,
+ * for fs isoc in : 1 ss + 1 idle + 7 cs
+ */
+#define TT_MICROFRAMES_MAX 9
/* mtk scheduler bitmasks */
-#define EP_BPKTS(p) ((p) & 0x3f)
+#define EP_BPKTS(p) ((p) & 0x7f)
#define EP_BCSCOUNT(p) (((p) & 0x7) << 8)
#define EP_BBM(p) ((p) << 11)
#define EP_BOFFSET(p) ((p) & 0x3fff)
@@ -51,7 +57,7 @@ static int get_bw_index(struct xhci_hcd *xhci, struct usb_device *udev,
virt_dev = xhci->devs[udev->slot_id];
- if (udev->speed == USB_SPEED_SUPER) {
+ if (udev->speed >= USB_SPEED_SUPER) {
if (usb_endpoint_dir_out(&ep->desc))
bw_index = (virt_dev->real_port - 1) * 2;
else
@@ -64,25 +70,167 @@ static int get_bw_index(struct xhci_hcd *xhci, struct usb_device *udev,
return bw_index;
}
+static u32 get_esit(struct xhci_ep_ctx *ep_ctx)
+{
+ u32 esit;
+
+ esit = 1 << CTX_TO_EP_INTERVAL(le32_to_cpu(ep_ctx->ep_info));
+ if (esit > XHCI_MTK_MAX_ESIT)
+ esit = XHCI_MTK_MAX_ESIT;
+
+ return esit;
+}
+
+static struct mu3h_sch_tt *find_tt(struct usb_device *udev)
+{
+ struct usb_tt *utt = udev->tt;
+ struct mu3h_sch_tt *tt, **tt_index, **ptt;
+ unsigned int port;
+ bool allocated_index = false;
+
+ if (!utt)
+ return NULL; /* Not below a TT */
+
+ /*
+ * Find/create our data structure.
+ * For hubs with a single TT, we get it directly.
+ * For hubs with multiple TTs, there's an extra level of pointers.
+ */
+ tt_index = NULL;
+ if (utt->multi) {
+ tt_index = utt->hcpriv;
+ if (!tt_index) { /* Create the index array */
+ tt_index = kcalloc(utt->hub->maxchild,
+ sizeof(*tt_index), GFP_KERNEL);
+ if (!tt_index)
+ return ERR_PTR(-ENOMEM);
+ utt->hcpriv = tt_index;
+ allocated_index = true;
+ }
+ port = udev->ttport - 1;
+ ptt = &tt_index[port];
+ } else {
+ port = 0;
+ ptt = (struct mu3h_sch_tt **) &utt->hcpriv;
+ }
+
+ tt = *ptt;
+ if (!tt) { /* Create the mu3h_sch_tt */
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt) {
+ if (allocated_index) {
+ utt->hcpriv = NULL;
+ kfree(tt_index);
+ }
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&tt->ep_list);
+ tt->usb_tt = utt;
+ tt->tt_port = port;
+ *ptt = tt;
+ }
+
+ return tt;
+}
+
+/* Release the TT above udev, if it's not in use */
+static void drop_tt(struct usb_device *udev)
+{
+ struct usb_tt *utt = udev->tt;
+ struct mu3h_sch_tt *tt, **tt_index, **ptt;
+ int i, cnt;
+
+ if (!utt || !utt->hcpriv)
+ return; /* Not below a TT, or never allocated */
+
+ cnt = 0;
+ if (utt->multi) {
+ tt_index = utt->hcpriv;
+ ptt = &tt_index[udev->ttport - 1];
+ /* How many entries are left in tt_index? */
+ for (i = 0; i < utt->hub->maxchild; ++i)
+ cnt += !!tt_index[i];
+ } else {
+ tt_index = NULL;
+ ptt = (struct mu3h_sch_tt **)&utt->hcpriv;
+ }
+
+ tt = *ptt;
+ if (!tt || !list_empty(&tt->ep_list))
+ return; /* never allocated , or still in use*/
+
+ *ptt = NULL;
+ kfree(tt);
+
+ if (cnt == 1) {
+ utt->hcpriv = NULL;
+ kfree(tt_index);
+ }
+}
+
+static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
+ struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx)
+{
+ struct mu3h_sch_ep_info *sch_ep;
+ struct mu3h_sch_tt *tt = NULL;
+ u32 len_bw_budget_table;
+ size_t mem_size;
+
+ if (is_fs_or_ls(udev->speed))
+ len_bw_budget_table = TT_MICROFRAMES_MAX;
+ else if ((udev->speed >= USB_SPEED_SUPER)
+ && usb_endpoint_xfer_isoc(&ep->desc))
+ len_bw_budget_table = get_esit(ep_ctx);
+ else
+ len_bw_budget_table = 1;
+
+ mem_size = sizeof(struct mu3h_sch_ep_info) +
+ len_bw_budget_table * sizeof(u32);
+ sch_ep = kzalloc(mem_size, GFP_KERNEL);
+ if (!sch_ep)
+ return ERR_PTR(-ENOMEM);
+
+ if (is_fs_or_ls(udev->speed)) {
+ tt = find_tt(udev);
+ if (IS_ERR(tt)) {
+ kfree(sch_ep);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ sch_ep->sch_tt = tt;
+ sch_ep->ep = ep;
+
+ return sch_ep;
+}
+
static void setup_sch_info(struct usb_device *udev,
struct xhci_ep_ctx *ep_ctx, struct mu3h_sch_ep_info *sch_ep)
{
u32 ep_type;
- u32 ep_interval;
- u32 max_packet_size;
+ u32 maxpkt;
u32 max_burst;
u32 mult;
u32 esit_pkts;
+ u32 max_esit_payload;
+ u32 *bwb_table = sch_ep->bw_budget_table;
+ int i;
ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
- ep_interval = CTX_TO_EP_INTERVAL(le32_to_cpu(ep_ctx->ep_info));
- max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
+ maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
max_burst = CTX_TO_MAX_BURST(le32_to_cpu(ep_ctx->ep_info2));
mult = CTX_TO_EP_MULT(le32_to_cpu(ep_ctx->ep_info));
-
- sch_ep->esit = 1 << ep_interval;
+ max_esit_payload =
+ (CTX_TO_MAX_ESIT_PAYLOAD_HI(
+ le32_to_cpu(ep_ctx->ep_info)) << 16) |
+ CTX_TO_MAX_ESIT_PAYLOAD(le32_to_cpu(ep_ctx->tx_info));
+
+ sch_ep->esit = get_esit(ep_ctx);
+ sch_ep->ep_type = ep_type;
+ sch_ep->maxpkt = maxpkt;
sch_ep->offset = 0;
sch_ep->burst_mode = 0;
+ sch_ep->repeat = 0;
if (udev->speed == USB_SPEED_HIGH) {
sch_ep->cs_count = 0;
@@ -93,7 +241,6 @@ static void setup_sch_info(struct usb_device *udev,
* in a interval
*/
sch_ep->num_budget_microframes = 1;
- sch_ep->repeat = 0;
/*
* xHCI spec section6.2.3.4
@@ -101,19 +248,33 @@ static void setup_sch_info(struct usb_device *udev,
* opportunities per microframe
*/
sch_ep->pkts = max_burst + 1;
- sch_ep->bw_cost_per_microframe = max_packet_size * sch_ep->pkts;
- } else if (udev->speed == USB_SPEED_SUPER) {
+ sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
+ bwb_table[0] = sch_ep->bw_cost_per_microframe;
+ } else if (udev->speed >= USB_SPEED_SUPER) {
/* usb3_r1 spec section4.4.7 & 4.4.8 */
sch_ep->cs_count = 0;
- esit_pkts = (mult + 1) * (max_burst + 1);
+ sch_ep->burst_mode = 1;
+ /*
+ * some device's (d)wBytesPerInterval is set as 0,
+ * then max_esit_payload is 0, so evaluate esit_pkts from
+ * mult and burst
+ */
+ esit_pkts = DIV_ROUND_UP(max_esit_payload, maxpkt);
+ if (esit_pkts == 0)
+ esit_pkts = (mult + 1) * (max_burst + 1);
+
if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
sch_ep->pkts = esit_pkts;
sch_ep->num_budget_microframes = 1;
- sch_ep->repeat = 0;
+ bwb_table[0] = maxpkt * sch_ep->pkts;
}
if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
- if (esit_pkts <= sch_ep->esit)
+ u32 remainder;
+
+ if (sch_ep->esit == 1)
+ sch_ep->pkts = esit_pkts;
+ else if (esit_pkts <= sch_ep->esit)
sch_ep->pkts = 1;
else
sch_ep->pkts = roundup_pow_of_two(esit_pkts)
@@ -122,43 +283,48 @@ static void setup_sch_info(struct usb_device *udev,
sch_ep->num_budget_microframes =
DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
- if (sch_ep->num_budget_microframes > 1)
- sch_ep->repeat = 1;
- else
- sch_ep->repeat = 0;
+ sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1);
+ sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
+
+ remainder = sch_ep->bw_cost_per_microframe;
+ remainder *= sch_ep->num_budget_microframes;
+ remainder -= (maxpkt * esit_pkts);
+ for (i = 0; i < sch_ep->num_budget_microframes - 1; i++)
+ bwb_table[i] = sch_ep->bw_cost_per_microframe;
+
+ /* last one <= bw_cost_per_microframe */
+ bwb_table[i] = remainder;
}
- sch_ep->bw_cost_per_microframe = max_packet_size * sch_ep->pkts;
} else if (is_fs_or_ls(udev->speed)) {
+ sch_ep->pkts = 1; /* at most one packet for each microframe */
/*
- * usb_20 spec section11.18.4
- * assume worst cases
+ * num_budget_microframes and cs_count will be updated when
+ * check TT for INT_OUT_EP, ISOC/INT_IN_EP type
*/
- sch_ep->repeat = 0;
- sch_ep->pkts = 1; /* at most one packet for each microframe */
- if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
- sch_ep->cs_count = 3; /* at most need 3 CS*/
- /* one for SS and one for budgeted transaction */
- sch_ep->num_budget_microframes = sch_ep->cs_count + 2;
- sch_ep->bw_cost_per_microframe = max_packet_size;
- }
- if (ep_type == ISOC_OUT_EP) {
+ sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX);
+ sch_ep->num_budget_microframes = sch_ep->cs_count;
+ sch_ep->bw_cost_per_microframe =
+ (maxpkt < FS_PAYLOAD_MAX) ? maxpkt : FS_PAYLOAD_MAX;
+ /* init budget table */
+ if (ep_type == ISOC_OUT_EP) {
+ for (i = 0; i < sch_ep->num_budget_microframes; i++)
+ bwb_table[i] = sch_ep->bw_cost_per_microframe;
+ } else if (ep_type == INT_OUT_EP) {
+ /* only first one consumes bandwidth, others as zero */
+ bwb_table[0] = sch_ep->bw_cost_per_microframe;
+ } else { /* INT_IN_EP or ISOC_IN_EP */
+ bwb_table[0] = 0; /* start split */
+ bwb_table[1] = 0; /* idle */
/*
- * the best case FS budget assumes that 188 FS bytes
- * occur in each microframe
+ * due to cs_count will be updated according to cs
+ * position, assign all remainder budget array
+ * elements as @bw_cost_per_microframe, but only first
+ * @num_budget_microframes elements will be used later
*/
- sch_ep->num_budget_microframes = DIV_ROUND_UP(
- max_packet_size, FS_PAYLOAD_MAX);
- sch_ep->bw_cost_per_microframe = FS_PAYLOAD_MAX;
- sch_ep->cs_count = sch_ep->num_budget_microframes;
- }
- if (ep_type == ISOC_IN_EP) {
- /* at most need additional two CS. */
- sch_ep->cs_count = DIV_ROUND_UP(
- max_packet_size, FS_PAYLOAD_MAX) + 2;
- sch_ep->num_budget_microframes = sch_ep->cs_count + 2;
- sch_ep->bw_cost_per_microframe = FS_PAYLOAD_MAX;
+ for (i = 2; i < TT_MICROFRAMES_MAX; i++)
+ bwb_table[i] = sch_ep->bw_cost_per_microframe;
}
}
}
@@ -169,6 +335,7 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
{
u32 num_esit;
u32 max_bw = 0;
+ u32 bw;
int i;
int j;
@@ -177,15 +344,17 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
u32 base = offset + i * sch_ep->esit;
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
- if (sch_bw->bus_bw[base + j] > max_bw)
- max_bw = sch_bw->bus_bw[base + j];
+ bw = sch_bw->bus_bw[base + j] +
+ sch_ep->bw_budget_table[j];
+ if (bw > max_bw)
+ max_bw = bw;
}
}
return max_bw;
}
static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
- struct mu3h_sch_ep_info *sch_ep, int bw_cost)
+ struct mu3h_sch_ep_info *sch_ep, bool used)
{
u32 num_esit;
u32 base;
@@ -195,9 +364,105 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
for (i = 0; i < num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit;
+ for (j = 0; j < sch_ep->num_budget_microframes; j++) {
+ if (used)
+ sch_bw->bus_bw[base + j] +=
+ sch_ep->bw_budget_table[j];
+ else
+ sch_bw->bus_bw[base + j] -=
+ sch_ep->bw_budget_table[j];
+ }
+ }
+}
+
+static int check_sch_tt(struct usb_device *udev,
+ struct mu3h_sch_ep_info *sch_ep, u32 offset)
+{
+ struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ u32 extra_cs_count;
+ u32 fs_budget_start;
+ u32 start_ss, last_ss;
+ u32 start_cs, last_cs;
+ int i;
+
+ start_ss = offset % 8;
+ fs_budget_start = (start_ss + 1) % 8;
+
+ if (sch_ep->ep_type == ISOC_OUT_EP) {
+ last_ss = start_ss + sch_ep->cs_count - 1;
+
+ /*
+ * usb_20 spec section11.18:
+ * must never schedule Start-Split in Y6
+ */
+ if (!(start_ss == 7 || last_ss < 6))
+ return -ERANGE;
+
+ for (i = 0; i < sch_ep->cs_count; i++)
+ if (test_bit(offset + i, tt->split_bit_map))
+ return -ERANGE;
+
+ } else {
+ u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
+
+ /*
+ * usb_20 spec section11.18:
+ * must never schedule Start-Split in Y6
+ */
+ if (start_ss == 6)
+ return -ERANGE;
+
+ /* one uframe for ss + one uframe for idle */
+ start_cs = (start_ss + 2) % 8;
+ last_cs = start_cs + cs_count - 1;
+
+ if (last_cs > 7)
+ return -ERANGE;
+
+ if (sch_ep->ep_type == ISOC_IN_EP)
+ extra_cs_count = (last_cs == 7) ? 1 : 2;
+ else /* ep_type : INTR IN / INTR OUT */
+ extra_cs_count = (fs_budget_start == 6) ? 1 : 2;
+
+ cs_count += extra_cs_count;
+ if (cs_count > 7)
+ cs_count = 7; /* HW limit */
+
+ for (i = 0; i < cs_count + 2; i++) {
+ if (test_bit(offset + i, tt->split_bit_map))
+ return -ERANGE;
+ }
+
+ sch_ep->cs_count = cs_count;
+ /* one for ss, the other for idle */
+ sch_ep->num_budget_microframes = cs_count + 2;
+
+ /*
+ * if interval=1, maxp >752, num_budge_micoframe is larger
+ * than sch_ep->esit, will overstep boundary
+ */
+ if (sch_ep->num_budget_microframes > sch_ep->esit)
+ sch_ep->num_budget_microframes = sch_ep->esit;
+ }
+
+ return 0;
+}
+
+static void update_sch_tt(struct usb_device *udev,
+ struct mu3h_sch_ep_info *sch_ep)
+{
+ struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ u32 base, num_esit;
+ int i, j;
+
+ num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
+ for (i = 0; i < num_esit; i++) {
+ base = sch_ep->offset + i * sch_ep->esit;
for (j = 0; j < sch_ep->num_budget_microframes; j++)
- sch_bw->bus_bw[base + j] += bw_cost;
+ set_bit(base + j, tt->split_bit_map);
}
+
+ list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
}
static int check_sch_bw(struct usb_device *udev,
@@ -205,17 +470,16 @@ static int check_sch_bw(struct usb_device *udev,
{
u32 offset;
u32 esit;
- u32 num_budget_microframes;
u32 min_bw;
u32 min_index;
u32 worst_bw;
u32 bw_boundary;
-
- if (sch_ep->esit > XHCI_MTK_MAX_ESIT)
- sch_ep->esit = XHCI_MTK_MAX_ESIT;
+ u32 min_num_budget;
+ u32 min_cs_count;
+ bool tt_offset_ok = false;
+ int ret;
esit = sch_ep->esit;
- num_budget_microframes = sch_ep->num_budget_microframes;
/*
* Search through all possible schedule microframes.
@@ -223,36 +487,56 @@ static int check_sch_bw(struct usb_device *udev,
*/
min_bw = ~0;
min_index = 0;
+ min_cs_count = sch_ep->cs_count;
+ min_num_budget = sch_ep->num_budget_microframes;
for (offset = 0; offset < esit; offset++) {
- if ((offset + num_budget_microframes) > sch_ep->esit)
- break;
+ if (is_fs_or_ls(udev->speed)) {
+ ret = check_sch_tt(udev, sch_ep, offset);
+ if (ret)
+ continue;
+ else
+ tt_offset_ok = true;
+ }
- /*
- * usb_20 spec section11.18:
- * must never schedule Start-Split in Y6
- */
- if (is_fs_or_ls(udev->speed) && (offset % 8 == 6))
- continue;
+ if ((offset + sch_ep->num_budget_microframes) > sch_ep->esit)
+ break;
worst_bw = get_max_bw(sch_bw, sch_ep, offset);
if (min_bw > worst_bw) {
min_bw = worst_bw;
min_index = offset;
+ min_cs_count = sch_ep->cs_count;
+ min_num_budget = sch_ep->num_budget_microframes;
}
if (min_bw == 0)
break;
}
- sch_ep->offset = min_index;
- bw_boundary = (udev->speed == USB_SPEED_SUPER)
- ? SS_BW_BOUNDARY : HS_BW_BOUNDARY;
+ if (udev->speed == USB_SPEED_SUPER_PLUS)
+ bw_boundary = SSP_BW_BOUNDARY;
+ else if (udev->speed == USB_SPEED_SUPER)
+ bw_boundary = SS_BW_BOUNDARY;
+ else
+ bw_boundary = HS_BW_BOUNDARY;
/* check bandwidth */
- if (min_bw + sch_ep->bw_cost_per_microframe > bw_boundary)
+ if (min_bw > bw_boundary)
return -ERANGE;
+ sch_ep->offset = min_index;
+ sch_ep->cs_count = min_cs_count;
+ sch_ep->num_budget_microframes = min_num_budget;
+
+ if (is_fs_or_ls(udev->speed)) {
+ /* all offset for tt is not ok*/
+ if (!tt_offset_ok)
+ return -ERANGE;
+
+ update_sch_tt(udev, sch_ep);
+ }
+
/* update bus bandwidth info */
- update_bus_bw(sch_bw, sch_ep, sch_ep->bw_cost_per_microframe);
+ update_bus_bw(sch_bw, sch_ep, 1);
return 0;
}
@@ -347,8 +631,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &sch_array[bw_index];
- sch_ep = kzalloc(sizeof(struct mu3h_sch_ep_info), GFP_NOIO);
- if (!sch_ep)
+ sch_ep = create_sch_ep(udev, ep, ep_ctx);
+ if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
setup_sch_info(udev, ep_ctx, sch_ep);
@@ -356,12 +640,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
ret = check_sch_bw(udev, sch_bw, sch_ep);
if (ret) {
xhci_err(xhci, "Not enough bandwidth!\n");
+ if (is_fs_or_ls(udev->speed))
+ drop_tt(udev);
+
kfree(sch_ep);
return -ENOSPC;
}
list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
- sch_ep->ep = ep;
ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
| EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
@@ -406,9 +692,12 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
if (sch_ep->ep == ep) {
- update_bus_bw(sch_bw, sch_ep,
- -sch_ep->bw_cost_per_microframe);
+ update_bus_bw(sch_bw, sch_ep, 0);
list_del(&sch_ep->endpoint);
+ if (is_fs_or_ls(udev->speed)) {
+ list_del(&sch_ep->tt_endpoint);
+ drop_tt(udev);
+ }
kfree(sch_ep);
break;
}
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 7334da9e9779..71d0d33c3286 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -642,10 +642,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev)
xhci_mtk_host_enable(mtk);
xhci_dbg(xhci, "%s: restart port polling\n", __func__);
- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- usb_hcd_poll_rh_status(hcd);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
return 0;
}
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index cc59d80b663b..8be8c5f7ff62 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -20,6 +20,19 @@
#define XHCI_MTK_MAX_ESIT 64
/**
+ * @split_bit_map: used to avoid split microframes overlay
+ * @ep_list: Endpoints using this TT
+ * @usb_tt: usb TT related
+ * @tt_port: TT port number
+ */
+struct mu3h_sch_tt {
+ DECLARE_BITMAP(split_bit_map, XHCI_MTK_MAX_ESIT);
+ struct list_head ep_list;
+ struct usb_tt *usb_tt;
+ int tt_port;
+};
+
+/**
* struct mu3h_sch_bw_info: schedule information for bandwidth domain
*
* @bus_bw: array to keep track of bandwidth already used at each uframes
@@ -41,6 +54,10 @@ struct mu3h_sch_bw_info {
* (@repeat==1) scheduled within the interval
* @bw_cost_per_microframe: bandwidth cost per microframe
* @endpoint: linked into bandwidth domain which it belongs to
+ * @tt_endpoint: linked into mu3h_sch_tt's list which it belongs to
+ * @sch_tt: mu3h_sch_tt linked into
+ * @ep_type: endpoint type
+ * @maxpkt: max packet size of endpoint
* @ep: address of usb_host_endpoint struct
* @offset: which uframe of the interval that transfer should be
* scheduled first time within the interval
@@ -57,12 +74,17 @@ struct mu3h_sch_bw_info {
* times; 1: distribute the (bMaxBurst+1)*(Mult+1) packets
* according to @pkts and @repeat. normal mode is used by
* default
+ * @bw_budget_table: table to record bandwidth budget per microframe
*/
struct mu3h_sch_ep_info {
u32 esit;
u32 num_budget_microframes;
u32 bw_cost_per_microframe;
struct list_head endpoint;
+ struct list_head tt_endpoint;
+ struct mu3h_sch_tt *sch_tt;
+ u32 ep_type;
+ u32 maxpkt;
void *ep;
/*
* mtk xHCI scheduling information put into reserved DWs
@@ -73,6 +95,7 @@ struct mu3h_sch_ep_info {
u32 pkts;
u32 cs_count;
u32 burst_mode;
+ u32 bw_budget_table[0];
};
#define MU3C_U3_PORT_MAX 4
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 6372edf339d9..01c57055c0c5 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -41,6 +41,13 @@
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI 0x15b6
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI 0x15db
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI 0x15d4
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
@@ -179,16 +186,30 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)
xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
- }
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
xhci->quirks |= XHCI_MISSING_CAS;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI))
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_EJ168) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -332,6 +353,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
pm_runtime_put_noidle(&dev->dev);
+ if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ pm_runtime_allow(&dev->dev);
+
return 0;
put_usb3_hcd:
@@ -349,6 +373,10 @@ static void xhci_pci_remove(struct pci_dev *dev)
xhci = hcd_to_xhci(pci_get_drvdata(dev));
xhci->xhc_state |= XHCI_STATE_REMOVING;
+
+ if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ pm_runtime_forbid(&dev->dev);
+
if (xhci->shared_hcd) {
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 94e939249b2b..32b5574ad5c5 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -18,6 +18,7 @@
#include <linux/usb/phy.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/usb/of.h>
#include "xhci.h"
#include "xhci-plat.h"
@@ -305,6 +306,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
hcd->skip_phy_initialization = 1;
}
+ hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
+ xhci->shared_hcd->tpl_support = hcd->tpl_support;
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto disable_usb_phy;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f0a99aa0ac58..a8d92c90fb58 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1155,6 +1155,10 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
/* Clear our internal halted state */
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
}
+
+ /* if this was a soft reset, then restart */
+ if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
@@ -1602,6 +1606,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
mod_timer(&hcd->rh_timer,
bus_state->resume_done[hcd_portnum]);
+ usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
bogus_port_status = true;
}
}
@@ -2132,10 +2137,16 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
+ struct xhci_slot_ctx *slot_ctx;
struct xhci_ring *ep_ring;
u32 trb_comp_code;
u32 remaining, requested, ep_trb_len;
+ unsigned int slot_id;
+ int ep_index;
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[slot_id]->out_ctx);
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
@@ -2144,6 +2155,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
switch (trb_comp_code) {
case COMP_SUCCESS:
+ ep_ring->err_count = 0;
/* handle success with untransferred data as short packet */
if (ep_trb != td->last_trb || remaining) {
xhci_warn(xhci, "WARN Successful completion on short TX\n");
@@ -2167,6 +2179,14 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
ep_trb_len = 0;
remaining = 0;
break;
+ case COMP_USB_TRANSACTION_ERROR:
+ if ((ep_ring->err_count++ > MAX_SOFT_RETRY) ||
+ le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
+ break;
+ *status = 0;
+ xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
+ ep_ring->stream_id, td, EP_SOFT_RESET);
+ return 0;
default:
/* do nothing */
break;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 4b463e5202a4..6b5db344de30 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -18,6 +18,7 @@
#include <linux/phy/tegra/xusb.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -107,35 +108,35 @@
#define IMEM_BLOCK_SIZE 256
struct tegra_xusb_fw_header {
- u32 boot_loadaddr_in_imem;
- u32 boot_codedfi_offset;
- u32 boot_codetag;
- u32 boot_codesize;
- u32 phys_memaddr;
- u16 reqphys_memsize;
- u16 alloc_phys_memsize;
- u32 rodata_img_offset;
- u32 rodata_section_start;
- u32 rodata_section_end;
- u32 main_fnaddr;
- u32 fwimg_cksum;
- u32 fwimg_created_time;
- u32 imem_resident_start;
- u32 imem_resident_end;
- u32 idirect_start;
- u32 idirect_end;
- u32 l2_imem_start;
- u32 l2_imem_end;
- u32 version_id;
+ __le32 boot_loadaddr_in_imem;
+ __le32 boot_codedfi_offset;
+ __le32 boot_codetag;
+ __le32 boot_codesize;
+ __le32 phys_memaddr;
+ __le16 reqphys_memsize;
+ __le16 alloc_phys_memsize;
+ __le32 rodata_img_offset;
+ __le32 rodata_section_start;
+ __le32 rodata_section_end;
+ __le32 main_fnaddr;
+ __le32 fwimg_cksum;
+ __le32 fwimg_created_time;
+ __le32 imem_resident_start;
+ __le32 imem_resident_end;
+ __le32 idirect_start;
+ __le32 idirect_end;
+ __le32 l2_imem_start;
+ __le32 l2_imem_end;
+ __le32 version_id;
u8 init_ddirect;
u8 reserved[3];
- u32 phys_addr_log_buffer;
- u32 total_log_entries;
- u32 dequeue_ptr;
- u32 dummy_var[2];
- u32 fwimg_len;
+ __le32 phys_addr_log_buffer;
+ __le32 total_log_entries;
+ __le32 dequeue_ptr;
+ __le32 dummy_var[2];
+ __le32 fwimg_len;
u8 magic[8];
- u32 ss_low_power_entry_timeout;
+ __le32 ss_low_power_entry_timeout;
u8 num_hsic_port;
u8 padding[139]; /* Pad to 256 bytes */
};
@@ -194,6 +195,11 @@ struct tegra_xusb {
struct reset_control *host_rst;
struct reset_control *ss_rst;
+ struct device *genpd_dev_host;
+ struct device *genpd_dev_ss;
+ struct device_link *genpd_dl_host;
+ struct device_link *genpd_dl_ss;
+
struct phy **phys;
unsigned int num_phys;
@@ -928,6 +934,57 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
return 0;
}
+static void tegra_xusb_powerdomain_remove(struct device *dev,
+ struct tegra_xusb *tegra)
+{
+ if (tegra->genpd_dl_ss)
+ device_link_del(tegra->genpd_dl_ss);
+ if (tegra->genpd_dl_host)
+ device_link_del(tegra->genpd_dl_host);
+ if (tegra->genpd_dev_ss)
+ dev_pm_domain_detach(tegra->genpd_dev_ss, true);
+ if (tegra->genpd_dev_host)
+ dev_pm_domain_detach(tegra->genpd_dev_host, true);
+}
+
+static int tegra_xusb_powerdomain_init(struct device *dev,
+ struct tegra_xusb *tegra)
+{
+ int err;
+
+ tegra->genpd_dev_host = dev_pm_domain_attach_by_name(dev, "xusb_host");
+ if (IS_ERR(tegra->genpd_dev_host)) {
+ err = PTR_ERR(tegra->genpd_dev_host);
+ dev_err(dev, "failed to get host pm-domain: %d\n", err);
+ return err;
+ }
+
+ tegra->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "xusb_ss");
+ if (IS_ERR(tegra->genpd_dev_ss)) {
+ err = PTR_ERR(tegra->genpd_dev_ss);
+ dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
+ return err;
+ }
+
+ tegra->genpd_dl_host = device_link_add(dev, tegra->genpd_dev_host,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!tegra->genpd_dl_host) {
+ dev_err(dev, "adding host device link failed!\n");
+ return -ENODEV;
+ }
+
+ tegra->genpd_dl_ss = device_link_add(dev, tegra->genpd_dev_ss,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!tegra->genpd_dl_ss) {
+ dev_err(dev, "adding superspeed device link failed!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int tegra_xusb_probe(struct platform_device *pdev)
{
struct tegra_xusb_mbox_msg msg;
@@ -1038,7 +1095,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_padctl;
}
- if (!pdev->dev.pm_domain) {
+ if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
tegra->host_rst = devm_reset_control_get(&pdev->dev,
"xusb_host");
if (IS_ERR(tegra->host_rst)) {
@@ -1069,17 +1126,22 @@ static int tegra_xusb_probe(struct platform_device *pdev)
tegra->host_clk,
tegra->host_rst);
if (err) {
+ tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
dev_err(&pdev->dev,
"failed to enable XUSBC domain: %d\n", err);
- goto disable_xusba;
+ goto put_padctl;
}
+ } else {
+ err = tegra_xusb_powerdomain_init(&pdev->dev, tegra);
+ if (err)
+ goto put_powerdomains;
}
tegra->supplies = devm_kcalloc(&pdev->dev, tegra->soc->num_supplies,
sizeof(*tegra->supplies), GFP_KERNEL);
if (!tegra->supplies) {
err = -ENOMEM;
- goto disable_xusbc;
+ goto put_powerdomains;
}
for (i = 0; i < tegra->soc->num_supplies; i++)
@@ -1089,7 +1151,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
tegra->supplies);
if (err) {
dev_err(&pdev->dev, "failed to get regulators: %d\n", err);
- goto disable_xusbc;
+ goto put_powerdomains;
}
for (i = 0; i < tegra->soc->num_types; i++)
@@ -1099,7 +1161,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
sizeof(*tegra->phys), GFP_KERNEL);
if (!tegra->phys) {
err = -ENOMEM;
- goto disable_xusbc;
+ goto put_powerdomains;
}
for (i = 0, k = 0; i < tegra->soc->num_types; i++) {
@@ -1115,7 +1177,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
"failed to get PHY %s: %ld\n", prop,
PTR_ERR(phy));
err = PTR_ERR(phy);
- goto disable_xusbc;
+ goto put_powerdomains;
}
tegra->phys[k++] = phy;
@@ -1126,7 +1188,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
dev_name(&pdev->dev));
if (!tegra->hcd) {
err = -ENOMEM;
- goto disable_xusbc;
+ goto put_powerdomains;
}
/*
@@ -1222,12 +1284,13 @@ put_rpm:
disable_rpm:
pm_runtime_disable(&pdev->dev);
usb_put_hcd(tegra->hcd);
-disable_xusbc:
- if (!pdev->dev.pm_domain)
+put_powerdomains:
+ if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
-disable_xusba:
- if (!pdev->dev.pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
+ } else {
+ tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
+ }
put_padctl:
tegra_xusb_padctl_put(tegra->padctl);
return err;
@@ -1249,6 +1312,13 @@ static int tegra_xusb_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
+ tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
+ tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
+ } else {
+ tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
+ }
+
tegra_xusb_padctl_put(tegra->padctl);
return 0;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6230a578324c..bf0b3692dc9a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1496,6 +1496,7 @@ static inline const char *xhci_trb_type_string(u8 type)
/* How much data is left before the 64KB boundary? */
#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \
(addr & (TRB_MAX_BUFF_SIZE - 1)))
+#define MAX_SOFT_RETRY 3
struct xhci_segment {
union xhci_trb *trbs;
@@ -1583,6 +1584,7 @@ struct xhci_ring {
* if we own the TRB (if we are the consumer). See section 4.9.1.
*/
u32 cycle_state;
+ unsigned int err_count;
unsigned int stream_id;
unsigned int num_segs;
unsigned int num_trbs_free;
@@ -1846,6 +1848,7 @@ struct xhci_hcd {
#define XHCI_SUSPEND_DELAY BIT_ULL(30)
#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
#define XHCI_ZERO_64B_REGS BIT_ULL(32)
+#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index d746c26a8055..bd539f3058bc 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -146,8 +146,11 @@ static int appledisplay_bl_update_status(struct backlight_device *bd)
pdata->msgdata, 2,
ACD_USB_TIMEOUT);
mutex_unlock(&pdata->sysfslock);
-
- return retval;
+
+ if (retval < 0)
+ return retval;
+ else
+ return 0;
}
static int appledisplay_bl_get_brightness(struct backlight_device *bd)
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c2991b8a65ce..ba05dd80a020 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -808,8 +808,8 @@ static int iowarrior_probe(struct usb_interface *interface,
dev->int_in_endpoint->bInterval);
/* create an internal buffer for interrupt data from the device */
dev->read_queue =
- kmalloc(((dev->report_size + 1) * MAX_INTERRUPT_BUFFER),
- GFP_KERNEL);
+ kmalloc_array(dev->report_size + 1, MAX_INTERRUPT_BUFFER,
+ GFP_KERNEL);
if (!dev->read_queue)
goto error;
/* Get the serial-number of the chip */
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index b3e1f553954a..ac357ce2d1a6 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -46,7 +46,9 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
struct trancevibrator *tv = usb_get_intfdata(intf);
int temp, retval, old;
- temp = simple_strtoul(buf, NULL, 10);
+ retval = kstrtoint(buf, 10, &temp);
+ if (retval)
+ return retval;
if (temp > 255)
temp = 255;
else if (temp < 0)
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index d045d8458f81..ae70b9bfd797 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -185,8 +185,8 @@ static void mtu3_intr_enable(struct mtu3 *mtu)
if (mtu->is_u3_ip) {
/* Enable U3 LTSSM interrupts */
- value = HOT_RST_INTR | WARM_RST_INTR | VBUS_RISE_INTR |
- VBUS_FALL_INTR | ENTER_U3_INTR | EXIT_U3_INTR;
+ value = HOT_RST_INTR | WARM_RST_INTR |
+ ENTER_U3_INTR | EXIT_U3_INTR;
mtu3_writel(mbase, U3D_LTSSM_INTR_ENABLE, value);
}
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index 5c60a8c5a0b5..bbcd3332471d 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -585,6 +585,17 @@ static const struct usb_gadget_ops mtu3_gadget_ops = {
.udc_stop = mtu3_gadget_stop,
};
+static void mtu3_state_reset(struct mtu3 *mtu)
+{
+ mtu->address = 0;
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+ mtu->may_wakeup = 0;
+ mtu->u1_enable = 0;
+ mtu->u2_enable = 0;
+ mtu->delayed_status = false;
+ mtu->test_mode = false;
+}
+
static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
u32 epnum, u32 is_in)
{
@@ -702,6 +713,7 @@ void mtu3_gadget_disconnect(struct mtu3 *mtu)
spin_lock(&mtu->lock);
}
+ mtu3_state_reset(mtu);
usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED);
}
@@ -712,12 +724,6 @@ void mtu3_gadget_reset(struct mtu3 *mtu)
/* report disconnect, if we didn't flush EP state */
if (mtu->g.speed != USB_SPEED_UNKNOWN)
mtu3_gadget_disconnect(mtu);
-
- mtu->address = 0;
- mtu->ep0_state = MU3D_EP0_STATE_SETUP;
- mtu->may_wakeup = 0;
- mtu->u1_enable = 0;
- mtu->u2_enable = 0;
- mtu->delayed_status = false;
- mtu->test_mode = false;
+ else
+ mtu3_state_reset(mtu);
}
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index df827ff57b0d..23a0df79ef21 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -658,16 +658,6 @@ dsps_dma_controller_create(struct musb *musb, void __iomem *base)
return controller;
}
-static void dsps_dma_controller_destroy(struct dma_controller *c)
-{
- struct musb *musb = c->musb;
- struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
- void __iomem *usbss_base = glue->usbss_base;
-
- musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP);
- cppi41_dma_controller_destroy(c);
-}
-
#ifdef CONFIG_PM_SLEEP
static void dsps_dma_controller_suspend(struct dsps_glue *glue)
{
@@ -697,7 +687,7 @@ static struct musb_platform_ops dsps_ops = {
#ifdef CONFIG_USB_TI_CPPI41_DMA
.dma_init = dsps_dma_controller_create,
- .dma_exit = dsps_dma_controller_destroy,
+ .dma_exit = cppi41_dma_controller_destroy,
#endif
.enable = dsps_musb_enable,
.disable = dsps_musb_disable,
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 66143ab8c043..aaf363f19714 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -505,15 +505,19 @@ static int abx500_usb_link_status_update(struct ab8500_usb *ab)
if (is_ab8500(ab->ab8500)) {
enum ab8500_usb_link_status lsts;
- abx500_get_register_interruptible(ab->dev,
+ ret = abx500_get_register_interruptible(ab->dev,
AB8500_USB, AB8500_USB_LINE_STAT_REG, &reg);
+ if (ret < 0)
+ return ret;
lsts = (reg >> 3) & 0x0F;
ret = ab8500_usb_link_status_update(ab, lsts);
} else if (is_ab8505(ab->ab8500)) {
enum ab8505_usb_link_status lsts;
- abx500_get_register_interruptible(ab->dev,
+ ret = abx500_get_register_interruptible(ab->dev,
AB8500_USB, AB8505_USB_LINE_STAT_REG, &reg);
+ if (ret < 0)
+ return ret;
lsts = (reg >> 3) & 0x1F;
ret = ab8505_usb_link_status_update(ab, lsts);
}
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index e5aa24c1e4fd..1b1bb0ad40c3 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -563,7 +563,7 @@ static enum usb_charger_type mxs_charger_primary_detection(struct mxs_phy *x)
regmap_read(regmap, ANADIG_USB1_CHRG_DET_STAT, &val);
if (!(val & ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED)) {
chgr_type = SDP_TYPE;
- dev_dbg(x->phy.dev, "It is a stardard downstream port\n");
+ dev_dbg(x->phy.dev, "It is a standard downstream port\n");
}
/* Disable charger detector */
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 4310df46639d..a3e1290d682d 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -5,6 +5,7 @@
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/io.h>
@@ -12,6 +13,7 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "common.h"
@@ -290,6 +292,79 @@ static void usbhsc_set_buswait(struct usbhs_priv *priv)
usbhs_bset(priv, BUSWAIT, 0x000F, wait);
}
+static bool usbhsc_is_multi_clks(struct usbhs_priv *priv)
+{
+ if (priv->dparam.type == USBHS_TYPE_RCAR_GEN3 ||
+ priv->dparam.type == USBHS_TYPE_RCAR_GEN3_WITH_PLL)
+ return true;
+
+ return false;
+}
+
+static int usbhsc_clk_get(struct device *dev, struct usbhs_priv *priv)
+{
+ if (!usbhsc_is_multi_clks(priv))
+ return 0;
+
+ /* The first clock should exist */
+ priv->clks[0] = of_clk_get(dev->of_node, 0);
+ if (IS_ERR(priv->clks[0]))
+ return PTR_ERR(priv->clks[0]);
+
+ /*
+ * To backward compatibility with old DT, this driver checks the return
+ * value if it's -ENOENT or not.
+ */
+ priv->clks[1] = of_clk_get(dev->of_node, 1);
+ if (PTR_ERR(priv->clks[1]) == -ENOENT)
+ priv->clks[1] = NULL;
+ else if (IS_ERR(priv->clks[1]))
+ return PTR_ERR(priv->clks[1]);
+
+ return 0;
+}
+
+static void usbhsc_clk_put(struct usbhs_priv *priv)
+{
+ int i;
+
+ if (!usbhsc_is_multi_clks(priv))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(priv->clks); i++)
+ clk_put(priv->clks[i]);
+}
+
+static int usbhsc_clk_prepare_enable(struct usbhs_priv *priv)
+{
+ int i, ret;
+
+ if (!usbhsc_is_multi_clks(priv))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(priv->clks); i++) {
+ ret = clk_prepare_enable(priv->clks[i]);
+ if (ret) {
+ while (--i >= 0)
+ clk_disable_unprepare(priv->clks[i]);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void usbhsc_clk_disable_unprepare(struct usbhs_priv *priv)
+{
+ int i;
+
+ if (!usbhsc_is_multi_clks(priv))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(priv->clks); i++)
+ clk_disable_unprepare(priv->clks[i]);
+}
+
/*
* platform default param
*/
@@ -340,6 +415,10 @@ static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
/* enable PM */
pm_runtime_get_sync(dev);
+ /* enable clks */
+ if (usbhsc_clk_prepare_enable(priv))
+ return;
+
/* enable platform power */
usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
@@ -352,6 +431,9 @@ static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
/* disable platform power */
usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
+ /* disable clks */
+ usbhsc_clk_disable_unprepare(priv);
+
/* disable PM */
pm_runtime_put_sync(dev);
}
@@ -478,6 +560,10 @@ static const struct of_device_id usbhs_of_match[] = {
.data = (void *)USBHS_TYPE_RCAR_GEN3,
},
{
+ .compatible = "renesas,usbhs-r8a77990",
+ .data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL,
+ },
+ {
.compatible = "renesas,usbhs-r8a77995",
.data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL,
},
@@ -574,6 +660,10 @@ static int usbhs_probe(struct platform_device *pdev)
return PTR_ERR(priv->edev);
}
+ priv->rsts = devm_reset_control_array_get_optional_shared(&pdev->dev);
+ if (IS_ERR(priv->rsts))
+ return PTR_ERR(priv->rsts);
+
/*
* care platform info
*/
@@ -591,15 +681,6 @@ static int usbhs_probe(struct platform_device *pdev)
break;
case USBHS_TYPE_RCAR_GEN3_WITH_PLL:
priv->pfunc = usbhs_rcar3_with_pll_ops;
- if (!IS_ERR_OR_NULL(priv->edev)) {
- priv->nb.notifier_call = priv->pfunc.notifier;
- ret = devm_extcon_register_notifier(&pdev->dev,
- priv->edev,
- EXTCON_USB_HOST,
- &priv->nb);
- if (ret < 0)
- dev_err(&pdev->dev, "no notifier registered\n");
- }
break;
case USBHS_TYPE_RZA1:
priv->pfunc = usbhs_rza1_ops;
@@ -658,6 +739,14 @@ static int usbhs_probe(struct platform_device *pdev)
/* dev_set_drvdata should be called after usbhs_mod_init */
platform_set_drvdata(pdev, priv);
+ ret = reset_control_deassert(priv->rsts);
+ if (ret)
+ goto probe_fail_rst;
+
+ ret = usbhsc_clk_get(&pdev->dev, priv);
+ if (ret)
+ goto probe_fail_clks;
+
/*
* deviece reset here because
* USB device might be used in boot loader.
@@ -711,6 +800,10 @@ static int usbhs_probe(struct platform_device *pdev)
return ret;
probe_end_mod_exit:
+ usbhsc_clk_put(priv);
+probe_fail_clks:
+ reset_control_assert(priv->rsts);
+probe_fail_rst:
usbhs_mod_remove(priv);
probe_end_fifo_exit:
usbhs_fifo_remove(priv);
@@ -739,6 +832,8 @@ static int usbhs_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
usbhs_platform_call(priv, hardware_exit, pdev);
+ usbhsc_clk_put(priv);
+ reset_control_assert(priv->rsts);
usbhs_mod_remove(priv);
usbhs_fifo_remove(priv);
usbhs_pipe_remove(priv);
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 6137f7942c05..3777af848a35 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -8,8 +8,10 @@
#ifndef RENESAS_USB_DRIVER_H
#define RENESAS_USB_DRIVER_H
+#include <linux/clk.h>
#include <linux/extcon.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/usb/renesas_usbhs.h>
struct usbhs_priv;
@@ -255,7 +257,6 @@ struct usbhs_priv {
struct platform_device *pdev;
struct extcon_dev *edev;
- struct notifier_block nb;
spinlock_t lock;
@@ -277,6 +278,8 @@ struct usbhs_priv {
struct usbhs_fifo_info fifo_info;
struct phy *phy;
+ struct reset_control *rsts;
+ struct clk *clks[2];
};
/*
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index d0ea4ff89622..aa3820448286 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -27,7 +27,6 @@
* Remarks: bit[31:11] and bit[9:6] should be 0
*/
#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
-#define UGCTRL2_USB0SEL_EHCI 0x00000010
#define UGCTRL2_USB0SEL_HSUSB 0x00000020
#define UGCTRL2_USB0SEL_OTG 0x00000030
#define UGCTRL2_VBUSSEL 0x00000400
@@ -50,14 +49,6 @@ static void usbhs_rcar3_set_ugctrl2(struct usbhs_priv *priv, u32 val)
usbhs_write32(priv, UGCTRL2, val | UGCTRL2_RESERVED_3);
}
-static void usbhs_rcar3_set_usbsel(struct usbhs_priv *priv, bool ehci)
-{
- if (ehci)
- usbhs_rcar3_set_ugctrl2(priv, UGCTRL2_USB0SEL_EHCI);
- else
- usbhs_rcar3_set_ugctrl2(priv, UGCTRL2_USB0SEL_HSUSB);
-}
-
static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
void __iomem *base, int enable)
{
@@ -83,14 +74,11 @@ static int usbhs_rcar3_power_and_pll_ctrl(struct platform_device *pdev,
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
u32 val;
int timeout = 1000;
- bool is_host = false;
if (enable) {
usbhs_write32(priv, UGCTRL, 0); /* release PLLRESET */
- if (priv->edev)
- is_host = extcon_get_state(priv->edev, EXTCON_USB_HOST);
-
- usbhs_rcar3_set_usbsel(priv, is_host);
+ usbhs_rcar3_set_ugctrl2(priv,
+ UGCTRL2_USB0SEL_OTG | UGCTRL2_VBUSSEL);
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
do {
@@ -112,16 +100,6 @@ static int usbhs_rcar3_get_id(struct platform_device *pdev)
return USBHS_GADGET;
}
-static int usbhs_rcar3_notifier(struct notifier_block *nb, unsigned long event,
- void *data)
-{
- struct usbhs_priv *priv = container_of(nb, struct usbhs_priv, nb);
-
- usbhs_rcar3_set_usbsel(priv, !!event);
-
- return NOTIFY_DONE;
-}
-
const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = {
.power_ctrl = usbhs_rcar3_power_ctrl,
.get_id = usbhs_rcar3_get_id,
@@ -130,5 +108,4 @@ const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = {
const struct renesas_usbhs_platform_callback usbhs_rcar3_with_pll_ops = {
.power_ctrl = usbhs_rcar3_power_and_pll_ctrl,
.get_id = usbhs_rcar3_get_id,
- .notifier = usbhs_rcar3_notifier,
};
diff --git a/drivers/usb/roles/intel-xhci-usb-role-switch.c b/drivers/usb/roles/intel-xhci-usb-role-switch.c
index 1fb3dd0f1dfa..277de96181f9 100644
--- a/drivers/usb/roles/intel-xhci-usb-role-switch.c
+++ b/drivers/usb/roles/intel-xhci-usb-role-switch.c
@@ -161,6 +161,8 @@ static int intel_xhci_usb_remove(struct platform_device *pdev)
{
struct intel_xhci_usb_data *data = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
+
usb_role_switch_unregister(data->role_sw);
return 0;
}
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 7796ad8e33c6..ff38aa8963cf 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -397,38 +397,16 @@ err_free:
return result;
}
-static int ark3116_get_serial_info(struct usb_serial_port *port,
- struct serial_struct __user *retinfo)
-{
- struct serial_struct tmp;
-
- memset(&tmp, 0, sizeof(tmp));
-
- tmp.type = PORT_16654;
- tmp.line = port->minor;
- tmp.port = port->port_number;
- tmp.baud_base = 460800;
-
- if (copy_to_user(retinfo, &tmp, sizeof(tmp)))
- return -EFAULT;
-
- return 0;
-}
-
-static int ark3116_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
+static int ark3116_get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
- void __user *user_arg = (void __user *)arg;
-
- switch (cmd) {
- case TIOCGSERIAL:
- return ark3116_get_serial_info(port, user_arg);
- default:
- break;
- }
- return -ENOIOCTLCMD;
+ ss->type = PORT_16654;
+ ss->line = port->minor;
+ ss->port = port->port_number;
+ ss->baud_base = 460800;
+ return 0;
}
static int ark3116_tiocmget(struct tty_struct *tty)
@@ -668,7 +646,7 @@ static struct usb_serial_driver ark3116_device = {
.port_remove = ark3116_port_remove,
.set_termios = ark3116_set_termios,
.init_termios = ark3116_init_termios,
- .ioctl = ark3116_ioctl,
+ .get_serial = ark3116_get_serial_info,
.tiocmget = ark3116_tiocmget,
.tiocmset = ark3116_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index e0035c023120..ed51bc48eea6 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -378,7 +378,7 @@ static int cypress_serial_control(struct tty_struct *tty,
retval = -ENOTTY;
goto out;
}
- dev_dbg(dev, "%s - retreiving serial line settings\n", __func__);
+ dev_dbg(dev, "%s - retrieving serial line settings\n", __func__);
do {
retval = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
@@ -769,7 +769,7 @@ send:
usb_fill_int_urb(port->interrupt_out_urb, port->serial->dev,
usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress),
- port->interrupt_out_buffer, port->interrupt_out_size,
+ port->interrupt_out_buffer, actual_size,
cypress_write_int_callback, port, priv->write_urb_interval);
result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC);
if (result) {
@@ -863,7 +863,7 @@ static void cypress_set_termios(struct tty_struct *tty,
struct cypress_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
int data_bits, stop_bits, parity_type, parity_enable;
- unsigned cflag, iflag;
+ unsigned int cflag;
unsigned long flags;
__u8 oldlines;
int linechange = 0;
@@ -899,7 +899,6 @@ static void cypress_set_termios(struct tty_struct *tty,
tty->termios.c_cflag &= ~(CMSPAR|CRTSCTS);
cflag = tty->termios.c_cflag;
- iflag = tty->termios.c_iflag;
/* check if there are new settings */
if (old_termios) {
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 96036f87b1de..0dcdcb4b2cde 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -583,36 +583,16 @@ static int f81232_carrier_raised(struct usb_serial_port *port)
return 0;
}
-static int f81232_get_serial_info(struct usb_serial_port *port,
- unsigned long arg)
-{
- struct serial_struct ser;
-
- memset(&ser, 0, sizeof(ser));
-
- ser.type = PORT_16550A;
- ser.line = port->minor;
- ser.port = port->port_number;
- ser.baud_base = F81232_MAX_BAUDRATE;
-
- if (copy_to_user((void __user *)arg, &ser, sizeof(ser)))
- return -EFAULT;
-
- return 0;
-}
-
-static int f81232_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
+static int f81232_get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
- switch (cmd) {
- case TIOCGSERIAL:
- return f81232_get_serial_info(port, arg);
- default:
- break;
- }
- return -ENOIOCTLCMD;
+ ss->type = PORT_16550A;
+ ss->line = port->minor;
+ ss->port = port->port_number;
+ ss->baud_base = F81232_MAX_BAUDRATE;
+ return 0;
}
static void f81232_interrupt_work(struct work_struct *work)
@@ -665,7 +645,7 @@ static struct usb_serial_driver f81232_device = {
.close = f81232_close,
.dtr_rts = f81232_dtr_rts,
.carrier_raised = f81232_carrier_raised,
- .ioctl = f81232_ioctl,
+ .get_serial = f81232_get_serial_info,
.break_ctl = f81232_break_ctl,
.set_termios = f81232_set_termios,
.tiocmget = f81232_tiocmget,
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index 4dfbff20bda4..380933db34dd 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -1139,43 +1139,21 @@ static void f81534_close(struct usb_serial_port *port)
mutex_unlock(&serial_priv->urb_mutex);
}
-static int f81534_get_serial_info(struct usb_serial_port *port,
- struct serial_struct __user *retinfo)
+static int f81534_get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
+ struct usb_serial_port *port = tty->driver_data;
struct f81534_port_private *port_priv;
- struct serial_struct tmp;
port_priv = usb_get_serial_port_data(port);
- memset(&tmp, 0, sizeof(tmp));
-
- tmp.type = PORT_16550A;
- tmp.port = port->port_number;
- tmp.line = port->minor;
- tmp.baud_base = port_priv->baud_base;
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
-
+ ss->type = PORT_16550A;
+ ss->port = port->port_number;
+ ss->line = port->minor;
+ ss->baud_base = port_priv->baud_base;
return 0;
}
-static int f81534_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct serial_struct __user *buf = (struct serial_struct __user *)arg;
-
- switch (cmd) {
- case TIOCGSERIAL:
- return f81534_get_serial_info(port, buf);
- default:
- break;
- }
-
- return -ENOIOCTLCMD;
-}
-
static void f81534_process_per_serial_block(struct usb_serial_port *port,
u8 *data)
{
@@ -1581,7 +1559,7 @@ static struct usb_serial_driver f81534_device = {
.break_ctl = f81534_break_ctl,
.dtr_rts = f81534_dtr_rts,
.process_read_urb = f81534_process_read_urb,
- .ioctl = f81534_ioctl,
+ .get_serial = f81534_get_serial_info,
.tiocmget = f81534_tiocmget,
.tiocmset = f81534_tiocmset,
.write_bulk_callback = f81534_write_usb_callback,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b5cef322826f..609198d9594c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -39,6 +39,7 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/serial.h>
+#include <linux/gpio/driver.h>
#include <linux/usb/serial.h>
#include "ftdi_sio.h"
#include "ftdi_sio_ids.h"
@@ -72,6 +73,15 @@ struct ftdi_private {
unsigned int latency; /* latency setting in use */
unsigned short max_packet_size;
struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() and change_speed() */
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gc;
+ struct mutex gpio_lock; /* protects GPIO state */
+ bool gpio_registered; /* is the gpiochip in kernel registered */
+ bool gpio_used; /* true if the user requested a gpio */
+ u8 gpio_altfunc; /* which pins are in gpio mode */
+ u8 gpio_output; /* pin directions cache */
+ u8 gpio_value; /* pin value for outputs */
+#endif
};
/* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -1055,6 +1065,10 @@ static int ftdi_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static int ftdi_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
+static int get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss);
+static int set_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss);
static void ftdi_break_ctl(struct tty_struct *tty, int break_state);
static bool ftdi_tx_empty(struct usb_serial_port *port);
static int ftdi_get_modem_status(struct usb_serial_port *port,
@@ -1091,6 +1105,8 @@ static struct usb_serial_driver ftdi_sio_device = {
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.ioctl = ftdi_ioctl,
+ .get_serial = get_serial_info,
+ .set_serial = set_serial_info,
.set_termios = ftdi_set_termios,
.break_ctl = ftdi_break_ctl,
.tx_empty = ftdi_tx_empty,
@@ -1443,48 +1459,42 @@ static int read_latency_timer(struct usb_serial_port *port)
return 0;
}
-static int get_serial_info(struct usb_serial_port *port,
- struct serial_struct __user *retinfo)
+static int get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
+ struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
- struct serial_struct tmp;
- memset(&tmp, 0, sizeof(tmp));
- tmp.flags = priv->flags;
- tmp.baud_base = priv->baud_base;
- tmp.custom_divisor = priv->custom_divisor;
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
+ ss->flags = priv->flags;
+ ss->baud_base = priv->baud_base;
+ ss->custom_divisor = priv->custom_divisor;
return 0;
}
static int set_serial_info(struct tty_struct *tty,
- struct usb_serial_port *port, struct serial_struct __user *newinfo)
+ struct serial_struct *ss)
{
+ struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
- struct serial_struct new_serial;
struct ftdi_private old_priv;
- if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
- return -EFAULT;
-
mutex_lock(&priv->cfg_lock);
old_priv = *priv;
/* Do error checking and permission checking */
if (!capable(CAP_SYS_ADMIN)) {
- if ((new_serial.flags ^ priv->flags) & ~ASYNC_USR_MASK) {
+ if ((ss->flags ^ priv->flags) & ~ASYNC_USR_MASK) {
mutex_unlock(&priv->cfg_lock);
return -EPERM;
}
priv->flags = ((priv->flags & ~ASYNC_USR_MASK) |
- (new_serial.flags & ASYNC_USR_MASK));
- priv->custom_divisor = new_serial.custom_divisor;
+ (ss->flags & ASYNC_USR_MASK));
+ priv->custom_divisor = ss->custom_divisor;
goto check_and_exit;
}
- if (new_serial.baud_base != priv->baud_base) {
+ if (ss->baud_base != priv->baud_base) {
mutex_unlock(&priv->cfg_lock);
return -EINVAL;
}
@@ -1492,8 +1502,8 @@ static int set_serial_info(struct tty_struct *tty,
/* Make the changes - these are privileged changes! */
priv->flags = ((priv->flags & ~ASYNC_FLAGS) |
- (new_serial.flags & ASYNC_FLAGS));
- priv->custom_divisor = new_serial.custom_divisor;
+ (ss->flags & ASYNC_FLAGS));
+ priv->custom_divisor = ss->custom_divisor;
check_and_exit:
write_latency_timer(port);
@@ -1507,10 +1517,8 @@ check_and_exit:
dev_warn_ratelimited(&port->dev, "use of SPD flags is deprecated\n");
change_speed(tty, port);
- mutex_unlock(&priv->cfg_lock);
}
- else
- mutex_unlock(&priv->cfg_lock);
+ mutex_unlock(&priv->cfg_lock);
return 0;
}
@@ -1766,6 +1774,375 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
}
+#ifdef CONFIG_GPIOLIB
+
+static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ int result;
+ u16 val;
+
+ val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value;
+ result = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ FTDI_SIO_SET_BITMODE_REQUEST,
+ FTDI_SIO_SET_BITMODE_REQUEST_TYPE, val,
+ priv->interface, NULL, 0, WDR_TIMEOUT);
+ if (result < 0) {
+ dev_err(&serial->interface->dev,
+ "bitmode request failed for value 0x%04x: %d\n",
+ val, result);
+ }
+
+ return result;
+}
+
+static int ftdi_set_cbus_pins(struct usb_serial_port *port)
+{
+ return ftdi_set_bitmode(port, FTDI_SIO_BITMODE_CBUS);
+}
+
+static int ftdi_exit_cbus_mode(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ priv->gpio_output = 0;
+ priv->gpio_value = 0;
+ return ftdi_set_bitmode(port, FTDI_SIO_BITMODE_RESET);
+}
+
+static int ftdi_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ int result;
+
+ if (priv->gpio_altfunc & BIT(offset))
+ return -ENODEV;
+
+ mutex_lock(&priv->gpio_lock);
+ if (!priv->gpio_used) {
+ /* Set default pin states, as we cannot get them from device */
+ priv->gpio_output = 0x00;
+ priv->gpio_value = 0x00;
+ result = ftdi_set_cbus_pins(port);
+ if (result) {
+ mutex_unlock(&priv->gpio_lock);
+ return result;
+ }
+
+ priv->gpio_used = true;
+ }
+ mutex_unlock(&priv->gpio_lock);
+
+ return 0;
+}
+
+static int ftdi_read_cbus_pins(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ unsigned char *buf;
+ int result;
+
+ buf = kmalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ result = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ FTDI_SIO_READ_PINS_REQUEST,
+ FTDI_SIO_READ_PINS_REQUEST_TYPE, 0,
+ priv->interface, buf, 1, WDR_TIMEOUT);
+ if (result < 1) {
+ if (result >= 0)
+ result = -EIO;
+ } else {
+ result = buf[0];
+ }
+
+ kfree(buf);
+
+ return result;
+}
+
+static int ftdi_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ int result;
+
+ result = ftdi_read_cbus_pins(port);
+ if (result < 0)
+ return result;
+
+ return !!(result & BIT(gpio));
+}
+
+static void ftdi_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ mutex_lock(&priv->gpio_lock);
+
+ if (value)
+ priv->gpio_value |= BIT(gpio);
+ else
+ priv->gpio_value &= ~BIT(gpio);
+
+ ftdi_set_cbus_pins(port);
+
+ mutex_unlock(&priv->gpio_lock);
+}
+
+static int ftdi_gpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ int result;
+
+ result = ftdi_read_cbus_pins(port);
+ if (result < 0)
+ return result;
+
+ *bits = result & *mask;
+
+ return 0;
+}
+
+static void ftdi_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ mutex_lock(&priv->gpio_lock);
+
+ priv->gpio_value &= ~(*mask);
+ priv->gpio_value |= *bits & *mask;
+ ftdi_set_cbus_pins(port);
+
+ mutex_unlock(&priv->gpio_lock);
+}
+
+static int ftdi_gpio_direction_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ return !(priv->gpio_output & BIT(gpio));
+}
+
+static int ftdi_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ int result;
+
+ mutex_lock(&priv->gpio_lock);
+
+ priv->gpio_output &= ~BIT(gpio);
+ result = ftdi_set_cbus_pins(port);
+
+ mutex_unlock(&priv->gpio_lock);
+
+ return result;
+}
+
+static int ftdi_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
+ int value)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ int result;
+
+ mutex_lock(&priv->gpio_lock);
+
+ priv->gpio_output |= BIT(gpio);
+ if (value)
+ priv->gpio_value |= BIT(gpio);
+ else
+ priv->gpio_value &= ~BIT(gpio);
+
+ result = ftdi_set_cbus_pins(port);
+
+ mutex_unlock(&priv->gpio_lock);
+
+ return result;
+}
+
+static int ftdi_read_eeprom(struct usb_serial *serial, void *dst, u16 addr,
+ u16 nbytes)
+{
+ int read = 0;
+
+ if (addr % 2 != 0)
+ return -EINVAL;
+ if (nbytes % 2 != 0)
+ return -EINVAL;
+
+ /* Read EEPROM two bytes at a time */
+ while (read < nbytes) {
+ int rv;
+
+ rv = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ FTDI_SIO_READ_EEPROM_REQUEST,
+ FTDI_SIO_READ_EEPROM_REQUEST_TYPE,
+ 0, (addr + read) / 2, dst + read, 2,
+ WDR_TIMEOUT);
+ if (rv < 2) {
+ if (rv >= 0)
+ return -EIO;
+ else
+ return rv;
+ }
+
+ read += rv;
+ }
+
+ return 0;
+}
+
+static int ftdi_gpio_init_ft232r(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ u16 cbus_config;
+ u8 *buf;
+ int ret;
+ int i;
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ftdi_read_eeprom(port->serial, buf, 0x14, 2);
+ if (ret < 0)
+ goto out_free;
+
+ cbus_config = le16_to_cpup((__le16 *)buf);
+ dev_dbg(&port->dev, "cbus_config = 0x%04x\n", cbus_config);
+
+ priv->gc.ngpio = 4;
+
+ priv->gpio_altfunc = 0xff;
+ for (i = 0; i < priv->gc.ngpio; ++i) {
+ if ((cbus_config & 0xf) == FTDI_FT232R_CBUS_MUX_GPIO)
+ priv->gpio_altfunc &= ~BIT(i);
+ cbus_config >>= 4;
+ }
+out_free:
+ kfree(buf);
+
+ return ret;
+}
+
+static int ftdi_gpio_init_ftx(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ const u16 cbus_cfg_addr = 0x1a;
+ const u16 cbus_cfg_size = 4;
+ u8 *cbus_cfg_buf;
+ int result;
+ u8 i;
+
+ cbus_cfg_buf = kmalloc(cbus_cfg_size, GFP_KERNEL);
+ if (!cbus_cfg_buf)
+ return -ENOMEM;
+
+ result = ftdi_read_eeprom(serial, cbus_cfg_buf,
+ cbus_cfg_addr, cbus_cfg_size);
+ if (result < 0)
+ goto out_free;
+
+ /* FIXME: FT234XD alone has 1 GPIO, but how to recognize this IC? */
+ priv->gc.ngpio = 4;
+
+ /* Determine which pins are configured for CBUS bitbanging */
+ priv->gpio_altfunc = 0xff;
+ for (i = 0; i < priv->gc.ngpio; ++i) {
+ if (cbus_cfg_buf[i] == FTDI_FTX_CBUS_MUX_GPIO)
+ priv->gpio_altfunc &= ~BIT(i);
+ }
+
+out_free:
+ kfree(cbus_cfg_buf);
+
+ return result;
+}
+
+static int ftdi_gpio_init(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ int result;
+
+ switch (priv->chip_type) {
+ case FT232RL:
+ result = ftdi_gpio_init_ft232r(port);
+ break;
+ case FTX:
+ result = ftdi_gpio_init_ftx(port);
+ break;
+ default:
+ return 0;
+ }
+
+ if (result < 0)
+ return result;
+
+ mutex_init(&priv->gpio_lock);
+
+ priv->gc.label = "ftdi-cbus";
+ priv->gc.request = ftdi_gpio_request;
+ priv->gc.get_direction = ftdi_gpio_direction_get;
+ priv->gc.direction_input = ftdi_gpio_direction_input;
+ priv->gc.direction_output = ftdi_gpio_direction_output;
+ priv->gc.get = ftdi_gpio_get;
+ priv->gc.set = ftdi_gpio_set;
+ priv->gc.get_multiple = ftdi_gpio_get_multiple;
+ priv->gc.set_multiple = ftdi_gpio_set_multiple;
+ priv->gc.owner = THIS_MODULE;
+ priv->gc.parent = &serial->interface->dev;
+ priv->gc.base = -1;
+ priv->gc.can_sleep = true;
+
+ result = gpiochip_add_data(&priv->gc, port);
+ if (!result)
+ priv->gpio_registered = true;
+
+ return result;
+}
+
+static void ftdi_gpio_remove(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ if (priv->gpio_registered) {
+ gpiochip_remove(&priv->gc);
+ priv->gpio_registered = false;
+ }
+
+ if (priv->gpio_used) {
+ /* Exiting CBUS-mode does not reset pin states. */
+ ftdi_exit_cbus_mode(port);
+ priv->gpio_used = false;
+ }
+}
+
+#else
+
+static int ftdi_gpio_init(struct usb_serial_port *port)
+{
+ return 0;
+}
+
+static void ftdi_gpio_remove(struct usb_serial_port *port) { }
+
+#endif /* CONFIG_GPIOLIB */
+
/*
* ***************************************************************************
* FTDI driver specific functions
@@ -1794,7 +2171,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
{
struct ftdi_private *priv;
const struct ftdi_sio_quirk *quirk = usb_get_serial_data(port->serial);
-
+ int result;
priv = kzalloc(sizeof(struct ftdi_private), GFP_KERNEL);
if (!priv)
@@ -1813,6 +2190,14 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
priv->latency = 16;
write_latency_timer(port);
create_sysfs_attrs(port);
+
+ result = ftdi_gpio_init(port);
+ if (result < 0) {
+ dev_err(&port->serial->interface->dev,
+ "GPIO initialisation failed: %d\n",
+ result);
+ }
+
return 0;
}
@@ -1930,6 +2315,8 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
+ ftdi_gpio_remove(port);
+
remove_sysfs_attrs(port);
kfree(priv);
@@ -2452,10 +2839,6 @@ static int ftdi_ioctl(struct tty_struct *tty,
void __user *argp = (void __user *)arg;
switch (cmd) {
- case TIOCGSERIAL:
- return get_serial_info(port, argp);
- case TIOCSSERIAL:
- return set_serial_info(tty, port, argp);
case TIOCSERGETLSR:
return get_lsr_info(port, argp);
default:
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index dcd0b6e05baf..a79a1325b4d9 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -35,7 +35,10 @@
#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */
#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */
#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
-#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */
+#define FTDI_SIO_GET_LATENCY_TIMER 0x0a /* Get the latency timer */
+#define FTDI_SIO_SET_BITMODE 0x0b /* Set bitbang mode */
+#define FTDI_SIO_READ_PINS 0x0c /* Read immediate value of pins */
+#define FTDI_SIO_READ_EEPROM 0x90 /* Read EEPROM */
/* Interface indices for FT2232, FT2232H and FT4232H devices */
#define INTERFACE_A 1
@@ -433,6 +436,29 @@ enum ftdi_sio_baudrate {
* 1 = active
*/
+/* FTDI_SIO_SET_BITMODE */
+#define FTDI_SIO_SET_BITMODE_REQUEST_TYPE 0x40
+#define FTDI_SIO_SET_BITMODE_REQUEST FTDI_SIO_SET_BITMODE
+
+/* Possible bitmodes for FTDI_SIO_SET_BITMODE_REQUEST */
+#define FTDI_SIO_BITMODE_RESET 0x00
+#define FTDI_SIO_BITMODE_CBUS 0x20
+
+/* FTDI_SIO_READ_PINS */
+#define FTDI_SIO_READ_PINS_REQUEST_TYPE 0xc0
+#define FTDI_SIO_READ_PINS_REQUEST FTDI_SIO_READ_PINS
+
+/*
+ * FTDI_SIO_READ_EEPROM
+ *
+ * EEPROM format found in FTDI AN_201, "FT-X MTP memory Configuration",
+ * http://www.ftdichip.com/Support/Documents/AppNotes/AN_201_FT-X%20MTP%20Memory%20Configuration.pdf
+ */
+#define FTDI_SIO_READ_EEPROM_REQUEST_TYPE 0xc0
+#define FTDI_SIO_READ_EEPROM_REQUEST FTDI_SIO_READ_EEPROM
+
+#define FTDI_FTX_CBUS_MUX_GPIO 0x8
+#define FTDI_FT232R_CBUS_MUX_GPIO 0xa
/* Descriptors returned by the device
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 97c69d373ca6..4ca31c0e4174 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1637,24 +1637,20 @@ static int edge_tiocmget(struct tty_struct *tty)
return result;
}
-static int get_serial_info(struct edgeport_port *edge_port,
- struct serial_struct __user *retinfo)
+static int get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct tmp;
-
- memset(&tmp, 0, sizeof(tmp));
-
- tmp.type = PORT_16550A;
- tmp.line = edge_port->port->minor;
- tmp.port = edge_port->port->port_number;
- tmp.irq = 0;
- tmp.xmit_fifo_size = edge_port->maxTxCredits;
- tmp.baud_base = 9600;
- tmp.close_delay = 5*HZ;
- tmp.closing_wait = 30*HZ;
+ struct usb_serial_port *port = tty->driver_data;
+ struct edgeport_port *edge_port = usb_get_serial_port_data(port);
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
+ ss->type = PORT_16550A;
+ ss->line = edge_port->port->minor;
+ ss->port = edge_port->port->port_number;
+ ss->irq = 0;
+ ss->xmit_fifo_size = edge_port->maxTxCredits;
+ ss->baud_base = 9600;
+ ss->close_delay = 5*HZ;
+ ss->closing_wait = 30*HZ;
return 0;
}
@@ -1667,17 +1663,12 @@ static int edge_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
- DEFINE_WAIT(wait);
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
switch (cmd) {
case TIOCSERGETLSR:
dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__);
return get_lsr_info(edge_port, (unsigned int __user *) arg);
-
- case TIOCGSERIAL:
- dev_dbg(&port->dev, "%s TIOCGSERIAL\n", __func__);
- return get_serial_info(edge_port, (struct serial_struct __user *) arg);
}
return -ENOIOCTLCMD;
}
@@ -3126,6 +3117,7 @@ static struct usb_serial_driver edgeport_2port_device = {
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
+ .get_serial = get_serial_info,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.write = edge_write,
@@ -3161,6 +3153,7 @@ static struct usb_serial_driver edgeport_4port_device = {
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
+ .get_serial = get_serial_info,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.write = edge_write,
@@ -3196,6 +3189,7 @@ static struct usb_serial_driver edgeport_8port_device = {
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
+ .get_serial = get_serial_info,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.write = edge_write,
@@ -3231,6 +3225,7 @@ static struct usb_serial_driver epic_device = {
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
+ .get_serial = get_serial_info,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.write = edge_write,
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 6d1d6efa3055..c327d4cf7928 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2437,47 +2437,28 @@ static int edge_tiocmget(struct tty_struct *tty)
return result;
}
-static int get_serial_info(struct edgeport_port *edge_port,
- struct serial_struct __user *retinfo)
+static int get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct tmp;
+ struct usb_serial_port *port = tty->driver_data;
+ struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned cwait;
cwait = edge_port->port->port.closing_wait;
if (cwait != ASYNC_CLOSING_WAIT_NONE)
cwait = jiffies_to_msecs(cwait) / 10;
- memset(&tmp, 0, sizeof(tmp));
-
- tmp.type = PORT_16550A;
- tmp.line = edge_port->port->minor;
- tmp.port = edge_port->port->port_number;
- tmp.irq = 0;
- tmp.xmit_fifo_size = edge_port->port->bulk_out_size;
- tmp.baud_base = 9600;
- tmp.close_delay = 5*HZ;
- tmp.closing_wait = cwait;
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
+ ss->type = PORT_16550A;
+ ss->line = edge_port->port->minor;
+ ss->port = edge_port->port->port_number;
+ ss->irq = 0;
+ ss->xmit_fifo_size = edge_port->port->bulk_out_size;
+ ss->baud_base = 9600;
+ ss->close_delay = 5*HZ;
+ ss->closing_wait = cwait;
return 0;
}
-static int edge_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct edgeport_port *edge_port = usb_get_serial_port_data(port);
-
- switch (cmd) {
- case TIOCGSERIAL:
- dev_dbg(&port->dev, "%s - TIOCGSERIAL\n", __func__);
- return get_serial_info(edge_port,
- (struct serial_struct __user *) arg);
- }
- return -ENOIOCTLCMD;
-}
-
static void edge_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
@@ -2738,7 +2719,7 @@ static struct usb_serial_driver edgeport_1port_device = {
.release = edge_release,
.port_probe = edge_port_probe,
.port_remove = edge_port_remove,
- .ioctl = edge_ioctl,
+ .get_serial = get_serial_info,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
@@ -2777,7 +2758,7 @@ static struct usb_serial_driver edgeport_2port_device = {
.release = edge_release,
.port_probe = edge_port_probe,
.port_remove = edge_port_remove,
- .ioctl = edge_ioctl,
+ .get_serial = get_serial_info,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
.tiocmset = edge_tiocmset,
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 27109522fd8b..fc52ac75fbf6 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1786,69 +1786,20 @@ static int mos7720_tiocmset(struct tty_struct *tty,
return 0;
}
-static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd,
- unsigned int __user *value)
+static int get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- unsigned int mcr;
- unsigned int arg;
-
- struct usb_serial_port *port;
-
- if (mos7720_port == NULL)
- return -1;
-
- port = (struct usb_serial_port *)mos7720_port->port;
- mcr = mos7720_port->shadowMCR;
-
- if (copy_from_user(&arg, value, sizeof(int)))
- return -EFAULT;
-
- switch (cmd) {
- case TIOCMBIS:
- if (arg & TIOCM_RTS)
- mcr |= UART_MCR_RTS;
- if (arg & TIOCM_DTR)
- mcr |= UART_MCR_RTS;
- if (arg & TIOCM_LOOP)
- mcr |= UART_MCR_LOOP;
- break;
-
- case TIOCMBIC:
- if (arg & TIOCM_RTS)
- mcr &= ~UART_MCR_RTS;
- if (arg & TIOCM_DTR)
- mcr &= ~UART_MCR_RTS;
- if (arg & TIOCM_LOOP)
- mcr &= ~UART_MCR_LOOP;
- break;
-
- }
-
- mos7720_port->shadowMCR = mcr;
- write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
- mos7720_port->shadowMCR);
-
- return 0;
-}
-
-static int get_serial_info(struct moschip_port *mos7720_port,
- struct serial_struct __user *retinfo)
-{
- struct serial_struct tmp;
-
- memset(&tmp, 0, sizeof(tmp));
-
- tmp.type = PORT_16550A;
- tmp.line = mos7720_port->port->minor;
- tmp.port = mos7720_port->port->port_number;
- tmp.irq = 0;
- tmp.xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE;
- tmp.baud_base = 9600;
- tmp.close_delay = 5*HZ;
- tmp.closing_wait = 30*HZ;
+ struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7720_port = usb_get_serial_port_data(port);
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
+ ss->type = PORT_16550A;
+ ss->line = mos7720_port->port->minor;
+ ss->port = mos7720_port->port->port_number;
+ ss->irq = 0;
+ ss->xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE;
+ ss->baud_base = 9600;
+ ss->close_delay = 5*HZ;
+ ss->closing_wait = 30*HZ;
return 0;
}
@@ -1867,18 +1818,6 @@ static int mos7720_ioctl(struct tty_struct *tty,
dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__);
return get_lsr_info(tty, mos7720_port,
(unsigned int __user *)arg);
-
- /* FIXME: These should be using the mode methods */
- case TIOCMBIS:
- case TIOCMBIC:
- dev_dbg(&port->dev, "%s TIOCMSET/TIOCMBIC/TIOCMSET\n", __func__);
- return set_modem_info(mos7720_port, cmd,
- (unsigned int __user *)arg);
-
- case TIOCGSERIAL:
- dev_dbg(&port->dev, "%s TIOCGSERIAL\n", __func__);
- return get_serial_info(mos7720_port,
- (struct serial_struct __user *)arg);
}
return -ENOIOCTLCMD;
@@ -2015,6 +1954,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
.ioctl = mos7720_ioctl,
.tiocmget = mos7720_tiocmget,
.tiocmset = mos7720_tiocmset,
+ .get_serial = get_serial_info,
.set_termios = mos7720_set_termios,
.write = mos7720_write,
.write_room = mos7720_write_room,
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index b42bad85097a..88828b4b8c44 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1931,27 +1931,20 @@ static int mos7840_get_lsr_info(struct tty_struct *tty,
* function to get information about serial port
*****************************************************************************/
-static int mos7840_get_serial_info(struct moschip_port *mos7840_port,
- struct serial_struct __user *retinfo)
+static int mos7840_get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct tmp;
-
- if (mos7840_port == NULL)
- return -1;
-
- memset(&tmp, 0, sizeof(tmp));
-
- tmp.type = PORT_16550A;
- tmp.line = mos7840_port->port->minor;
- tmp.port = mos7840_port->port->port_number;
- tmp.irq = 0;
- tmp.xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE;
- tmp.baud_base = 9600;
- tmp.close_delay = 5 * HZ;
- tmp.closing_wait = 30 * HZ;
+ struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = mos7840_get_port_private(port);
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
+ ss->type = PORT_16550A;
+ ss->line = mos7840_port->port->minor;
+ ss->port = mos7840_port->port->port_number;
+ ss->irq = 0;
+ ss->xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE;
+ ss->baud_base = 9600;
+ ss->close_delay = 5 * HZ;
+ ss->closing_wait = 30 * HZ;
return 0;
}
@@ -1982,13 +1975,6 @@ static int mos7840_ioctl(struct tty_struct *tty,
dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__);
return mos7840_get_lsr_info(tty, argp);
- case TIOCGSERIAL:
- dev_dbg(&port->dev, "%s TIOCGSERIAL\n", __func__);
- return mos7840_get_serial_info(mos7840_port, argp);
-
- case TIOCSSERIAL:
- dev_dbg(&port->dev, "%s TIOCSSERIAL\n", __func__);
- break;
default:
break;
}
@@ -2376,6 +2362,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
.calc_num_ports = mos7840_calc_num_ports,
.probe = mos7840_probe,
.ioctl = mos7840_ioctl,
+ .get_serial = mos7840_get_serial_info,
.set_termios = mos7840_set_termios,
.break_ctl = mos7840_break,
.tiocmget = mos7840_tiocmget,
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index caa0746326fd..cb7aac9cd9e7 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -328,42 +328,23 @@ static int opticon_tiocmset(struct tty_struct *tty,
return 0;
}
-static int get_serial_info(struct usb_serial_port *port,
- struct serial_struct __user *serial)
+static int get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct tmp;
-
- memset(&tmp, 0x00, sizeof(tmp));
+ struct usb_serial_port *port = tty->driver_data;
/* fake emulate a 16550 uart to make userspace code happy */
- tmp.type = PORT_16550A;
- tmp.line = port->minor;
- tmp.port = 0;
- tmp.irq = 0;
- tmp.xmit_fifo_size = 1024;
- tmp.baud_base = 9600;
- tmp.close_delay = 5*HZ;
- tmp.closing_wait = 30*HZ;
-
- if (copy_to_user(serial, &tmp, sizeof(*serial)))
- return -EFAULT;
+ ss->type = PORT_16550A;
+ ss->line = port->minor;
+ ss->port = 0;
+ ss->irq = 0;
+ ss->xmit_fifo_size = 1024;
+ ss->baud_base = 9600;
+ ss->close_delay = 5*HZ;
+ ss->closing_wait = 30*HZ;
return 0;
}
-static int opticon_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct usb_serial_port *port = tty->driver_data;
-
- switch (cmd) {
- case TIOCGSERIAL:
- return get_serial_info(port,
- (struct serial_struct __user *)arg);
- }
-
- return -ENOIOCTLCMD;
-}
-
static int opticon_port_probe(struct usb_serial_port *port)
{
struct opticon_private *priv;
@@ -404,7 +385,7 @@ static struct usb_serial_driver opticon_device = {
.write_room = opticon_write_room,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
- .ioctl = opticon_ioctl,
+ .get_serial = get_serial_info,
.tiocmget = opticon_tiocmget,
.tiocmset = opticon_tiocmset,
.process_read_urb = opticon_process_read_urb,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 0215b70c4efc..e24ff16d4147 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -561,6 +561,9 @@ static void option_instat_callback(struct urb *urb);
/* Interface is reserved */
#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0)
+/* Interface must have two endpoints */
+#define NUMEP2 BIT(16)
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -1081,8 +1084,9 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
- .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1962,7 +1966,8 @@ static struct usb_serial_driver option_1port_device = {
.chars_in_buffer = usb_wwan_chars_in_buffer,
.tiocmget = usb_wwan_tiocmget,
.tiocmset = usb_wwan_tiocmset,
- .ioctl = usb_wwan_ioctl,
+ .get_serial = usb_wwan_get_serial_info,
+ .set_serial = usb_wwan_set_serial_info,
.attach = option_attach,
.release = option_release,
.port_probe = usb_wwan_port_probe,
@@ -1999,6 +2004,13 @@ static int option_probe(struct usb_serial *serial,
if (device_flags & RSVD(iface_desc->bInterfaceNumber))
return -ENODEV;
+ /*
+ * Allow matching on bNumEndpoints for devices whose interface numbers
+ * can change (e.g. Quectel EP06).
+ */
+ if (device_flags & NUMEP2 && iface_desc->bNumEndpoints != 2)
+ return -ENODEV;
+
/* Store the device flags so we can use them during attach. */
usb_set_serial_data(serial, (void *)device_flags);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index e41f725ac7aa..a4e0d13fc121 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -808,29 +808,16 @@ static int pl2303_carrier_raised(struct usb_serial_port *port)
return 0;
}
-static int pl2303_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
+static int pl2303_get_serial(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct ser;
struct usb_serial_port *port = tty->driver_data;
- switch (cmd) {
- case TIOCGSERIAL:
- memset(&ser, 0, sizeof ser);
- ser.type = PORT_16654;
- ser.line = port->minor;
- ser.port = port->port_number;
- ser.baud_base = 460800;
-
- if (copy_to_user((void __user *)arg, &ser, sizeof ser))
- return -EFAULT;
-
- return 0;
- default:
- break;
- }
-
- return -ENOIOCTLCMD;
+ ss->type = PORT_16654;
+ ss->line = port->minor;
+ ss->port = port->port_number;
+ ss->baud_base = 460800;
+ return 0;
}
static void pl2303_set_break(struct usb_serial_port *port, bool enable)
@@ -1016,7 +1003,7 @@ static struct usb_serial_driver pl2303_device = {
.close = pl2303_close,
.dtr_rts = pl2303_dtr_rts,
.carrier_raised = pl2303_carrier_raised,
- .ioctl = pl2303_ioctl,
+ .get_serial = pl2303_get_serial,
.break_ctl = pl2303_break_ctl,
.set_termios = pl2303_set_termios,
.tiocmget = pl2303_tiocmget,
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index b61c2a9b6b11..f2fbe1ec9701 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -453,39 +453,19 @@ static void qt2_disconnect(struct usb_serial *serial)
usb_kill_urb(serial_priv->read_urb);
}
-static int get_serial_info(struct usb_serial_port *port,
- struct serial_struct __user *retinfo)
-{
- struct serial_struct tmp;
-
- memset(&tmp, 0, sizeof(tmp));
- tmp.line = port->minor;
- tmp.port = 0;
- tmp.irq = 0;
- tmp.xmit_fifo_size = port->bulk_out_size;
- tmp.baud_base = 9600;
- tmp.close_delay = 5*HZ;
- tmp.closing_wait = 30*HZ;
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
- return 0;
-}
-
-static int qt2_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
+static int get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
- switch (cmd) {
- case TIOCGSERIAL:
- return get_serial_info(port,
- (struct serial_struct __user *)arg);
- default:
- break;
- }
-
- return -ENOIOCTLCMD;
+ ss->line = port->minor;
+ ss->port = 0;
+ ss->irq = 0;
+ ss->xmit_fifo_size = port->bulk_out_size;
+ ss->baud_base = 9600;
+ ss->close_delay = 5*HZ;
+ ss->closing_wait = 30*HZ;
+ return 0;
}
static void qt2_process_status(struct usb_serial_port *port, unsigned char *ch)
@@ -1013,7 +993,7 @@ static struct usb_serial_driver qt2_device = {
.tiocmset = qt2_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
- .ioctl = qt2_ioctl,
+ .get_serial = get_serial_info,
.set_termios = qt2_set_termios,
};
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 0900b47b5f57..f6aea9f1be1a 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -331,39 +331,19 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
return usb_serial_generic_open(tty, port);
}
-static int get_serial_info(struct usb_serial_port *port,
- struct serial_struct __user *retinfo)
-{
- struct serial_struct tmp;
-
- memset(&tmp, 0, sizeof(tmp));
- tmp.line = port->minor;
- tmp.port = 0;
- tmp.irq = 0;
- tmp.xmit_fifo_size = port->bulk_out_size;
- tmp.baud_base = 9600;
- tmp.close_delay = 5*HZ;
- tmp.closing_wait = 30*HZ;
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
- return 0;
-}
-
-static int ssu100_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
+static int get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
- switch (cmd) {
- case TIOCGSERIAL:
- return get_serial_info(port,
- (struct serial_struct __user *) arg);
- default:
- break;
- }
-
- return -ENOIOCTLCMD;
+ ss->line = port->minor;
+ ss->port = 0;
+ ss->irq = 0;
+ ss->xmit_fifo_size = port->bulk_out_size;
+ ss->baud_base = 9600;
+ ss->close_delay = 5*HZ;
+ ss->closing_wait = 30*HZ;
+ return 0;
}
static int ssu100_attach(struct usb_serial *serial)
@@ -566,7 +546,7 @@ static struct usb_serial_driver ssu100_device = {
.tiocmset = ssu100_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
- .ioctl = ssu100_ioctl,
+ .get_serial = get_serial_info,
.set_termios = ssu100_set_termios,
};
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e3c5832337e0..dd0ad67aa71e 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -313,8 +313,6 @@ static int ti_chars_in_buffer(struct tty_struct *tty);
static bool ti_tx_empty(struct usb_serial_port *port);
static void ti_throttle(struct tty_struct *tty);
static void ti_unthrottle(struct tty_struct *tty);
-static int ti_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
static void ti_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios);
static int ti_tiocmget(struct tty_struct *tty);
@@ -330,10 +328,10 @@ static void ti_recv(struct usb_serial_port *port, unsigned char *data,
static void ti_send(struct ti_port *tport);
static int ti_set_mcr(struct ti_port *tport, unsigned int mcr);
static int ti_get_lsr(struct ti_port *tport, u8 *lsr);
-static int ti_get_serial_info(struct ti_port *tport,
- struct serial_struct __user *ret_arg);
-static int ti_set_serial_info(struct tty_struct *tty, struct ti_port *tport,
- struct serial_struct __user *new_arg);
+static int ti_get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss);
+static int ti_set_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss);
static void ti_handle_new_msr(struct ti_port *tport, u8 msr);
static void ti_stop_read(struct ti_port *tport, struct tty_struct *tty);
@@ -436,7 +434,8 @@ static struct usb_serial_driver ti_1port_device = {
.tx_empty = ti_tx_empty,
.throttle = ti_throttle,
.unthrottle = ti_unthrottle,
- .ioctl = ti_ioctl,
+ .get_serial = ti_get_serial_info,
+ .set_serial = ti_set_serial_info,
.set_termios = ti_set_termios,
.tiocmget = ti_tiocmget,
.tiocmset = ti_tiocmset,
@@ -469,7 +468,8 @@ static struct usb_serial_driver ti_2port_device = {
.tx_empty = ti_tx_empty,
.throttle = ti_throttle,
.unthrottle = ti_unthrottle,
- .ioctl = ti_ioctl,
+ .get_serial = ti_get_serial_info,
+ .set_serial = ti_set_serial_info,
.set_termios = ti_set_termios,
.tiocmget = ti_tiocmget,
.tiocmset = ti_tiocmset,
@@ -902,24 +902,6 @@ static void ti_unthrottle(struct tty_struct *tty)
}
}
-static int ti_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct ti_port *tport = usb_get_serial_port_data(port);
-
- switch (cmd) {
- case TIOCGSERIAL:
- return ti_get_serial_info(tport,
- (struct serial_struct __user *)arg);
- case TIOCSSERIAL:
- return ti_set_serial_info(tty, tport,
- (struct serial_struct __user *)arg);
- }
- return -ENOIOCTLCMD;
-}
-
-
static void ti_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
@@ -1417,45 +1399,37 @@ free_data:
}
-static int ti_get_serial_info(struct ti_port *tport,
- struct serial_struct __user *ret_arg)
+static int ti_get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct usb_serial_port *port = tport->tp_port;
- struct serial_struct ret_serial;
+ struct usb_serial_port *port = tty->driver_data;
+ struct ti_port *tport = usb_get_serial_port_data(port);
unsigned cwait;
cwait = port->port.closing_wait;
if (cwait != ASYNC_CLOSING_WAIT_NONE)
cwait = jiffies_to_msecs(cwait) / 10;
- memset(&ret_serial, 0, sizeof(ret_serial));
-
- ret_serial.type = PORT_16550A;
- ret_serial.line = port->minor;
- ret_serial.port = port->port_number;
- ret_serial.xmit_fifo_size = kfifo_size(&port->write_fifo);
- ret_serial.baud_base = tport->tp_tdev->td_is_3410 ? 921600 : 460800;
- ret_serial.closing_wait = cwait;
-
- if (copy_to_user(ret_arg, &ret_serial, sizeof(*ret_arg)))
- return -EFAULT;
-
+ ss->type = PORT_16550A;
+ ss->line = port->minor;
+ ss->port = port->port_number;
+ ss->xmit_fifo_size = kfifo_size(&port->write_fifo);
+ ss->baud_base = tport->tp_tdev->td_is_3410 ? 921600 : 460800;
+ ss->closing_wait = cwait;
return 0;
}
-static int ti_set_serial_info(struct tty_struct *tty, struct ti_port *tport,
- struct serial_struct __user *new_arg)
+static int ti_set_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct new_serial;
+ struct usb_serial_port *port = tty->driver_data;
+ struct ti_port *tport = usb_get_serial_port_data(port);
unsigned cwait;
- if (copy_from_user(&new_serial, new_arg, sizeof(new_serial)))
- return -EFAULT;
-
- cwait = new_serial.closing_wait;
+ cwait = ss->closing_wait;
if (cwait != ASYNC_CLOSING_WAIT_NONE)
- cwait = msecs_to_jiffies(10 * new_serial.closing_wait);
+ cwait = msecs_to_jiffies(10 * ss->closing_wait);
tport->tp_port->port.closing_wait = cwait;
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 40864c2bd9dc..4d0273508043 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -84,7 +84,8 @@ DEVICE(moto_modem, MOTO_IDS);
/* Motorola Tetra driver */
#define MOTOROLA_TETRA_IDS() \
- { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
+ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
+ { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
/* Novatel Wireless GPS driver */
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index f7aaa7f079e1..7e89efbf2c28 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -396,6 +396,24 @@ static void serial_unthrottle(struct tty_struct *tty)
port->serial->type->unthrottle(tty);
}
+static int serial_get_serial(struct tty_struct *tty, struct serial_struct *ss)
+{
+ struct usb_serial_port *port = tty->driver_data;
+
+ if (port->serial->type->get_serial)
+ return port->serial->type->get_serial(tty, ss);
+ return -ENOTTY;
+}
+
+static int serial_set_serial(struct tty_struct *tty, struct serial_struct *ss)
+{
+ struct usb_serial_port *port = tty->driver_data;
+
+ if (port->serial->type->set_serial)
+ return port->serial->type->set_serial(tty, ss);
+ return -ENOTTY;
+}
+
static int serial_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
@@ -1177,6 +1195,8 @@ static const struct tty_operations serial_ops = {
.tiocmget = serial_tiocmget,
.tiocmset = serial_tiocmset,
.get_icount = serial_get_icount,
+ .set_serial = serial_set_serial,
+ .get_serial = serial_get_serial,
.cleanup = serial_cleanup,
.install = serial_install,
.proc_show = serial_proc_show,
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index d28dab4b9eff..1c120eaf4091 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -15,8 +15,10 @@ extern int usb_wwan_write_room(struct tty_struct *tty);
extern int usb_wwan_tiocmget(struct tty_struct *tty);
extern int usb_wwan_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
-extern int usb_wwan_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
+extern int usb_wwan_get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss);
+extern int usb_wwan_set_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss);
extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
extern int usb_wwan_chars_in_buffer(struct tty_struct *tty);
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 912472f26e4f..7e855c87e4f7 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -132,38 +132,32 @@ int usb_wwan_tiocmset(struct tty_struct *tty,
}
EXPORT_SYMBOL(usb_wwan_tiocmset);
-static int get_serial_info(struct usb_serial_port *port,
- struct serial_struct __user *retinfo)
+int usb_wwan_get_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct tmp;
-
- memset(&tmp, 0, sizeof(tmp));
- tmp.line = port->minor;
- tmp.port = port->port_number;
- tmp.baud_base = tty_get_baud_rate(port->port.tty);
- tmp.close_delay = port->port.close_delay / 10;
- tmp.closing_wait = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ struct usb_serial_port *port = tty->driver_data;
+
+ ss->line = port->minor;
+ ss->port = port->port_number;
+ ss->baud_base = tty_get_baud_rate(port->port.tty);
+ ss->close_delay = port->port.close_delay / 10;
+ ss->closing_wait = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
port->port.closing_wait / 10;
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
return 0;
}
+EXPORT_SYMBOL(usb_wwan_get_serial_info);
-static int set_serial_info(struct usb_serial_port *port,
- struct serial_struct __user *newinfo)
+int usb_wwan_set_serial_info(struct tty_struct *tty,
+ struct serial_struct *ss)
{
- struct serial_struct new_serial;
+ struct usb_serial_port *port = tty->driver_data;
unsigned int closing_wait, close_delay;
int retval = 0;
- if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
- return -EFAULT;
-
- close_delay = new_serial.close_delay * 10;
- closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
- ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
+ close_delay = ss->close_delay * 10;
+ closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
mutex_lock(&port->port.mutex);
@@ -181,30 +175,7 @@ static int set_serial_info(struct usb_serial_port *port,
mutex_unlock(&port->port.mutex);
return retval;
}
-
-int usb_wwan_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct usb_serial_port *port = tty->driver_data;
-
- dev_dbg(&port->dev, "%s cmd 0x%04x\n", __func__, cmd);
-
- switch (cmd) {
- case TIOCGSERIAL:
- return get_serial_info(port,
- (struct serial_struct __user *) arg);
- case TIOCSSERIAL:
- return set_serial_info(port,
- (struct serial_struct __user *) arg);
- default:
- break;
- }
-
- dev_dbg(&port->dev, "%s arg not supported\n", __func__);
-
- return -ENOIOCTLCMD;
-}
-EXPORT_SYMBOL(usb_wwan_ioctl);
+EXPORT_SYMBOL(usb_wwan_set_serial_info);
int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 1c7b46a8620c..aefd84f88b59 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -83,8 +83,8 @@ static int whiteheat_port_remove(struct usb_serial_port *port);
static int whiteheat_open(struct tty_struct *tty,
struct usb_serial_port *port);
static void whiteheat_close(struct usb_serial_port *port);
-static int whiteheat_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
+static int whiteheat_get_serial(struct tty_struct *tty,
+ struct serial_struct *ss);
static void whiteheat_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static int whiteheat_tiocmget(struct tty_struct *tty);
@@ -120,7 +120,7 @@ static struct usb_serial_driver whiteheat_device = {
.port_remove = whiteheat_port_remove,
.open = whiteheat_open,
.close = whiteheat_close,
- .ioctl = whiteheat_ioctl,
+ .get_serial = whiteheat_get_serial,
.set_termios = whiteheat_set_termios,
.break_ctl = whiteheat_break_ctl,
.tiocmget = whiteheat_tiocmget,
@@ -442,33 +442,21 @@ static int whiteheat_tiocmset(struct tty_struct *tty,
}
-static int whiteheat_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
+static int whiteheat_get_serial(struct tty_struct *tty,
+ struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
- struct serial_struct serstruct;
- void __user *user_arg = (void __user *)arg;
-
- switch (cmd) {
- case TIOCGSERIAL:
- memset(&serstruct, 0, sizeof(serstruct));
- serstruct.type = PORT_16654;
- serstruct.line = port->minor;
- serstruct.port = port->port_number;
- serstruct.xmit_fifo_size = kfifo_size(&port->write_fifo);
- serstruct.custom_divisor = 0;
- serstruct.baud_base = 460800;
- serstruct.close_delay = CLOSING_DELAY;
- serstruct.closing_wait = CLOSING_DELAY;
-
- if (copy_to_user(user_arg, &serstruct, sizeof(serstruct)))
- return -EFAULT;
- break;
- default:
- break;
- }
- return -ENOIOCTLCMD;
+ ss->type = PORT_16654;
+ ss->line = port->minor;
+ ss->port = port->port_number;
+ ss->xmit_fifo_size = kfifo_size(&port->write_fifo);
+ ss->custom_divisor = 0;
+ ss->baud_base = 460800;
+ ss->close_delay = CLOSING_DELAY;
+ ss->closing_wait = CLOSING_DELAY;
+
+ return 0;
}
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index ec84758f0e23..6fd427284b12 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -23,16 +23,16 @@ config USB_STORAGE
To compile this driver as a module, choose M here: the
module will be called usb-storage.
+if USB_STORAGE
+
config USB_STORAGE_DEBUG
bool "USB Mass Storage verbose debug"
- depends on USB_STORAGE
help
Say Y here in order to have the USB Mass Storage code generate
verbose debugging messages.
config USB_STORAGE_REALTEK
tristate "Realtek Card Reader support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the power-saving function
for Realtek RTS51xx USB card readers.
@@ -46,7 +46,6 @@ config REALTEK_AUTOPM
config USB_STORAGE_DATAFAB
tristate "Datafab Compact Flash Reader support"
- depends on USB_STORAGE
help
Support for certain Datafab CompactFlash readers.
Datafab has a web page at <http://www.datafab.com/>.
@@ -55,7 +54,6 @@ config USB_STORAGE_DATAFAB
config USB_STORAGE_FREECOM
tristate "Freecom USB/ATAPI Bridge support"
- depends on USB_STORAGE
help
Support for the Freecom USB to IDE/ATAPI adaptor.
Freecom has a web page at <http://www.freecom.de/>.
@@ -64,7 +62,6 @@ config USB_STORAGE_FREECOM
config USB_STORAGE_ISD200
tristate "ISD-200 USB/ATA Bridge support"
- depends on USB_STORAGE
---help---
Say Y here if you want to use USB Mass Store devices based
on the In-Systems Design ISD-200 USB/ATA bridge.
@@ -82,7 +79,6 @@ config USB_STORAGE_ISD200
config USB_STORAGE_USBAT
tristate "USBAT/USBAT02-based storage support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support storage devices
based on the SCM/Shuttle USBAT/USBAT02 processors.
@@ -105,7 +101,6 @@ config USB_STORAGE_USBAT
config USB_STORAGE_SDDR09
tristate "SanDisk SDDR-09 (and other SmartMedia, including DPCM) support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Sandisk SDDR-09
SmartMedia reader in the USB Mass Storage driver.
@@ -115,7 +110,6 @@ config USB_STORAGE_SDDR09
config USB_STORAGE_SDDR55
tristate "SanDisk SDDR-55 SmartMedia support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Sandisk SDDR-55
SmartMedia reader in the USB Mass Storage driver.
@@ -124,7 +118,6 @@ config USB_STORAGE_SDDR55
config USB_STORAGE_JUMPSHOT
tristate "Lexar Jumpshot Compact Flash Reader"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Lexar Jumpshot
USB CompactFlash reader.
@@ -133,7 +126,6 @@ config USB_STORAGE_JUMPSHOT
config USB_STORAGE_ALAUDA
tristate "Olympus MAUSB-10/Fuji DPC-R1 support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Olympus MAUSB-10
and Fujifilm DPC-R1 USB Card reader/writer devices.
@@ -145,7 +137,6 @@ config USB_STORAGE_ALAUDA
config USB_STORAGE_ONETOUCH
tristate "Support OneTouch Button on Maxtor Hard Drives"
- depends on USB_STORAGE
depends on INPUT=y || INPUT=USB_STORAGE
help
Say Y here to include additional code to support the Maxtor OneTouch
@@ -160,7 +151,6 @@ config USB_STORAGE_ONETOUCH
config USB_STORAGE_KARMA
tristate "Support for Rio Karma music player"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Rio Karma
USB interface.
@@ -174,7 +164,6 @@ config USB_STORAGE_KARMA
config USB_STORAGE_CYPRESS_ATACB
tristate "SAT emulation on Cypress USB/ATA Bridge with ATACB"
- depends on USB_STORAGE
---help---
Say Y here if you want to use SAT (ata pass through) on devices based
on the Cypress USB/ATA bridge supporting ATACB. This will allow you
@@ -187,19 +176,15 @@ config USB_STORAGE_CYPRESS_ATACB
config USB_STORAGE_ENE_UB6250
tristate "USB ENE card reader support"
- depends on SCSI
- depends on USB_STORAGE
---help---
Say Y here if you wish to control a ENE SD/MS Card reader.
Note that this driver does not support SM cards.
- This option depends on 'SCSI' support being enabled, but you
- probably also need 'SCSI device support: SCSI disk support'
- (BLK_DEV_SD) for most USB storage devices.
-
To compile this driver as a module, choose M here: the
module will be called ums-eneub6250.
+endif # USB_STORAGE
+
config USB_UAS
tristate "USB Attached SCSI"
depends on SCSI && USB_STORAGE
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index f5e4500d9970..2b474d60b4db 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1153,7 +1153,7 @@ static int isd200_get_inquiry_data( struct us_data *us )
/* Fill in vendor identification fields */
src = (__be16 *)&id[ATA_ID_PROD];
dest = (__u16*)info->InquiryData.VendorId;
- for (i=0;i<4;i++)
+ for (i = 0; i < 4; i++)
dest[i] = be16_to_cpu(src[i]);
src = (__be16 *)&id[ATA_ID_PROD + 8/2];
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 00878c386dd0..30a847c2089d 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -45,50 +45,7 @@ menuconfig TYPEC
if TYPEC
-config TYPEC_TCPM
- tristate "USB Type-C Port Controller Manager"
- depends on USB
- select USB_ROLE_SWITCH
- select POWER_SUPPLY
- help
- The Type-C Port Controller Manager provides a USB PD and USB Type-C
- state machine for use with Type-C Port Controllers.
-
-if TYPEC_TCPM
-
-config TYPEC_TCPCI
- tristate "Type-C Port Controller Interface driver"
- depends on I2C
- select REGMAP_I2C
- help
- Type-C Port Controller driver for TCPCI-compliant controller.
-
-config TYPEC_RT1711H
- tristate "Richtek RT1711H Type-C chip driver"
- depends on I2C
- select TYPEC_TCPCI
- help
- Richtek RT1711H Type-C chip driver that works with
- Type-C Port Controller Manager to provide USB PD and USB
- Type-C functionalities.
-
-source "drivers/usb/typec/fusb302/Kconfig"
-
-config TYPEC_WCOVE
- tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
- depends on ACPI
- depends on INTEL_SOC_PMIC
- depends on INTEL_PMC_IPC
- depends on BXT_WC_PMIC_OPREGION
- help
- This driver adds support for USB Type-C detection on Intel Broxton
- platforms that have Intel Whiskey Cove PMIC. The driver can detect the
- role and cable orientation.
-
- To compile this driver as module, choose M here: the module will be
- called typec_wcove
-
-endif # TYPEC_TCPM
+source "drivers/usb/typec/tcpm/Kconfig"
source "drivers/usb/typec/ucsi/Kconfig"
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index 45b0aef428a8..6696b7263d61 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -2,11 +2,7 @@
obj-$(CONFIG_TYPEC) += typec.o
typec-y := class.o mux.o bus.o
obj-$(CONFIG_TYPEC) += altmodes/
-obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
-obj-y += fusb302/
-obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o
+obj-$(CONFIG_TYPEC_TCPM) += tcpm/
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
obj-$(CONFIG_TYPEC_TPS6598X) += tps6598x.o
obj-$(CONFIG_TYPEC) += mux/
-obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
-obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index e61dffb27a0c..5db0593ca0bd 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1322,7 +1322,7 @@ void typec_set_pwr_role(struct typec_port *port, enum typec_role role)
EXPORT_SYMBOL_GPL(typec_set_pwr_role);
/**
- * typec_set_pwr_role - Report VCONN source change
+ * typec_set_vconn_role - Report VCONN source change
* @port: The USB Type-C Port which VCONN role changed
* @role: Source when @port is sourcing VCONN, or Sink when it's not
*
@@ -1500,7 +1500,7 @@ typec_port_register_altmode(struct typec_port *port,
sprintf(id, "id%04xm%02x", desc->svid, desc->mode);
- mux = typec_mux_get(port->dev.parent, id);
+ mux = typec_mux_get(&port->dev, id);
if (IS_ERR(mux))
return ERR_CAST(mux);
@@ -1540,18 +1540,6 @@ struct typec_port *typec_register_port(struct device *parent,
return ERR_PTR(id);
}
- port->sw = typec_switch_get(cap->fwnode ? &port->dev : parent);
- if (IS_ERR(port->sw)) {
- ret = PTR_ERR(port->sw);
- goto err_switch;
- }
-
- port->mux = typec_mux_get(parent, "typec-mux");
- if (IS_ERR(port->mux)) {
- ret = PTR_ERR(port->mux);
- goto err_mux;
- }
-
switch (cap->type) {
case TYPEC_PORT_SRC:
port->pwr_role = TYPEC_SOURCE;
@@ -1592,13 +1580,26 @@ struct typec_port *typec_register_port(struct device *parent,
port->port_type = cap->type;
port->prefer_role = cap->prefer_role;
+ device_initialize(&port->dev);
port->dev.class = typec_class;
port->dev.parent = parent;
port->dev.fwnode = cap->fwnode;
port->dev.type = &typec_port_dev_type;
dev_set_name(&port->dev, "port%d", id);
- ret = device_register(&port->dev);
+ port->sw = typec_switch_get(&port->dev);
+ if (IS_ERR(port->sw)) {
+ put_device(&port->dev);
+ return ERR_CAST(port->sw);
+ }
+
+ port->mux = typec_mux_get(&port->dev, "typec-mux");
+ if (IS_ERR(port->mux)) {
+ put_device(&port->dev);
+ return ERR_CAST(port->mux);
+ }
+
+ ret = device_add(&port->dev);
if (ret) {
dev_err(parent, "failed to register port (%d)\n", ret);
put_device(&port->dev);
@@ -1606,15 +1607,6 @@ struct typec_port *typec_register_port(struct device *parent,
}
return port;
-
-err_mux:
- typec_switch_put(port->sw);
-
-err_switch:
- ida_simple_remove(&typec_index_ida, port->id);
- kfree(port);
-
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(typec_register_port);
diff --git a/drivers/usb/typec/fusb302/Kconfig b/drivers/usb/typec/fusb302/Kconfig
deleted file mode 100644
index fce099ff39fe..000000000000
--- a/drivers/usb/typec/fusb302/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-config TYPEC_FUSB302
- tristate "Fairchild FUSB302 Type-C chip driver"
- depends on I2C
- help
- The Fairchild FUSB302 Type-C chip driver that works with
- Type-C Port Controller Manager to provide USB PD and USB
- Type-C functionalities.
diff --git a/drivers/usb/typec/fusb302/Makefile b/drivers/usb/typec/fusb302/Makefile
deleted file mode 100644
index 3b51b33631a0..000000000000
--- a/drivers/usb/typec/fusb302/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index ddaac63ecf12..d990aa510fab 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/usb/typec_mux.h>
@@ -49,8 +50,10 @@ struct typec_switch *typec_switch_get(struct device *dev)
mutex_lock(&switch_lock);
sw = device_connection_find_match(dev, "typec-switch", NULL,
typec_switch_match);
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
+ WARN_ON(!try_module_get(sw->dev->driver->owner));
get_device(sw->dev);
+ }
mutex_unlock(&switch_lock);
return sw;
@@ -65,8 +68,10 @@ EXPORT_SYMBOL_GPL(typec_switch_get);
*/
void typec_switch_put(struct typec_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
+ module_put(sw->dev->driver->owner);
put_device(sw->dev);
+ }
}
EXPORT_SYMBOL_GPL(typec_switch_put);
@@ -136,8 +141,10 @@ struct typec_mux *typec_mux_get(struct device *dev, const char *name)
mutex_lock(&mux_lock);
mux = device_connection_find_match(dev, name, NULL, typec_mux_match);
- if (!IS_ERR_OR_NULL(mux))
+ if (!IS_ERR_OR_NULL(mux)) {
+ WARN_ON(!try_module_get(mux->dev->driver->owner));
get_device(mux->dev);
+ }
mutex_unlock(&mux_lock);
return mux;
@@ -152,8 +159,10 @@ EXPORT_SYMBOL_GPL(typec_mux_get);
*/
void typec_mux_put(struct typec_mux *mux)
{
- if (!IS_ERR_OR_NULL(mux))
+ if (!IS_ERR_OR_NULL(mux)) {
+ module_put(mux->dev->driver->owner);
put_device(mux->dev);
+ }
}
EXPORT_SYMBOL_GPL(typec_mux_put);
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
new file mode 100644
index 000000000000..f03ea8a61768
--- /dev/null
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -0,0 +1,52 @@
+config TYPEC_TCPM
+ tristate "USB Type-C Port Controller Manager"
+ depends on USB
+ select USB_ROLE_SWITCH
+ select POWER_SUPPLY
+ help
+ The Type-C Port Controller Manager provides a USB PD and USB Type-C
+ state machine for use with Type-C Port Controllers.
+
+if TYPEC_TCPM
+
+config TYPEC_TCPCI
+ tristate "Type-C Port Controller Interface driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Type-C Port Controller driver for TCPCI-compliant controller.
+
+if TYPEC_TCPCI
+
+config TYPEC_RT1711H
+ tristate "Richtek RT1711H Type-C chip driver"
+ help
+ Richtek RT1711H Type-C chip driver that works with
+ Type-C Port Controller Manager to provide USB PD and USB
+ Type-C functionalities.
+
+endif # TYPEC_TCPCI
+
+config TYPEC_FUSB302
+ tristate "Fairchild FUSB302 Type-C chip driver"
+ depends on I2C
+ help
+ The Fairchild FUSB302 Type-C chip driver that works with
+ Type-C Port Controller Manager to provide USB PD and USB
+ Type-C functionalities.
+
+config TYPEC_WCOVE
+ tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
+ depends on ACPI
+ depends on INTEL_SOC_PMIC
+ depends on INTEL_PMC_IPC
+ depends on BXT_WC_PMIC_OPREGION
+ help
+ This driver adds support for USB Type-C on Intel Broxton platforms
+ that have Intel Whiskey Cove PMIC. The driver works with USB Type-C
+ Port Controller Manager to provide USB PD and Type-C functionalities.
+
+ To compile this driver as module, choose M here: the module will be
+ called typec_wcove.ko
+
+endif # TYPEC_TCPM
diff --git a/drivers/usb/typec/tcpm/Makefile b/drivers/usb/typec/tcpm/Makefile
new file mode 100644
index 000000000000..a5ff6c8eb892
--- /dev/null
+++ b/drivers/usb/typec/tcpm/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
+obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o
+obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o
+typec_wcove-y := wcove.o
+obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
+obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 82bed9810be6..43b64d9309d0 100644
--- a/drivers/usb/typec/fusb302/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -42,19 +42,12 @@
#define T_BC_LVL_DEBOUNCE_DELAY_MS 30
enum toggling_mode {
- TOGGLINE_MODE_OFF,
+ TOGGLING_MODE_OFF,
TOGGLING_MODE_DRP,
TOGGLING_MODE_SNK,
TOGGLING_MODE_SRC,
};
-static const char * const toggling_mode_name[] = {
- [TOGGLINE_MODE_OFF] = "toggling_OFF",
- [TOGGLING_MODE_DRP] = "toggling_DRP",
- [TOGGLING_MODE_SNK] = "toggling_SNK",
- [TOGGLING_MODE_SRC] = "toggling_SRC",
-};
-
enum src_current_status {
SRC_CURRENT_DEFAULT,
SRC_CURRENT_MEDIUM,
@@ -601,7 +594,7 @@ static int fusb302_set_toggling(struct fusb302_chip *chip,
chip->intr_comp_chng = false;
/* configure toggling mode: none/snk/src/drp */
switch (mode) {
- case TOGGLINE_MODE_OFF:
+ case TOGGLING_MODE_OFF:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL2,
FUSB_REG_CONTROL2_MODE_MASK,
FUSB_REG_CONTROL2_MODE_NONE);
@@ -633,7 +626,7 @@ static int fusb302_set_toggling(struct fusb302_chip *chip,
break;
}
- if (mode == TOGGLINE_MODE_OFF) {
+ if (mode == TOGGLING_MODE_OFF) {
/* mask TOGDONE interrupt */
ret = fusb302_i2c_set_bits(chip, FUSB_REG_MASKA,
FUSB_REG_MASKA_TOGDONE);
@@ -686,6 +679,7 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
int ret = 0;
bool pull_up, pull_down;
u8 rd_mda;
+ enum toggling_mode mode;
mutex_lock(&chip->lock);
switch (cc) {
@@ -709,7 +703,7 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
ret = -EINVAL;
goto done;
}
- ret = fusb302_set_toggling(chip, TOGGLINE_MODE_OFF);
+ ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
fusb302_log(chip, "cannot stop toggling, ret=%d", ret);
goto done;
@@ -771,6 +765,29 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
chip->intr_comp_chng = false;
}
fusb302_log(chip, "cc := %s", typec_cc_status_name[cc]);
+
+ /* Enable detection for fixed SNK or SRC only roles */
+ switch (cc) {
+ case TYPEC_CC_RD:
+ mode = TOGGLING_MODE_SNK;
+ break;
+ case TYPEC_CC_RP_DEF:
+ case TYPEC_CC_RP_1_5:
+ case TYPEC_CC_RP_3_0:
+ mode = TOGGLING_MODE_SRC;
+ break;
+ default:
+ mode = TOGGLING_MODE_OFF;
+ break;
+ }
+
+ if (mode != TOGGLING_MODE_OFF) {
+ ret = fusb302_set_toggling(chip, mode);
+ if (ret < 0)
+ fusb302_log(chip,
+ "cannot set fixed role toggling mode, ret=%d",
+ ret);
+ }
done:
mutex_unlock(&chip->lock);
@@ -1178,10 +1195,6 @@ static const u32 src_pdo[] = {
PDO_FIXED(5000, 400, PDO_FIXED_FLAGS),
};
-static const u32 snk_pdo[] = {
- PDO_FIXED(5000, 400, PDO_FIXED_FLAGS),
-};
-
static const struct tcpc_config fusb302_tcpc_config = {
.src_pdo = src_pdo,
.nr_src_pdo = ARRAY_SIZE(src_pdo),
@@ -1303,7 +1316,7 @@ static int fusb302_handle_togdone_snk(struct fusb302_chip *chip,
tcpm_cc_change(chip->tcpm_port);
}
/* turn off toggling */
- ret = fusb302_set_toggling(chip, TOGGLINE_MODE_OFF);
+ ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
fusb302_log(chip,
"cannot set toggling mode off, ret=%d", ret);
@@ -1399,7 +1412,7 @@ static int fusb302_handle_togdone_src(struct fusb302_chip *chip,
tcpm_cc_change(chip->tcpm_port);
}
/* turn off toggling */
- ret = fusb302_set_toggling(chip, TOGGLINE_MODE_OFF);
+ ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
fusb302_log(chip,
"cannot set toggling mode off, ret=%d", ret);
@@ -1730,12 +1743,14 @@ static int fusb302_probe(struct i2c_client *client,
return -ENOMEM;
chip->i2c_client = client;
- i2c_set_clientdata(client, chip);
chip->dev = &client->dev;
chip->tcpc_config = fusb302_tcpc_config;
chip->tcpc_dev.config = &chip->tcpc_config;
mutex_init(&chip->lock);
+ chip->tcpc_dev.fwnode =
+ device_get_named_child_node(dev, "connector");
+
if (!device_property_read_u32(dev, "fcs,operating-sink-microwatt", &v))
chip->tcpc_config.operating_snk_mw = v / 1000;
@@ -1756,22 +1771,17 @@ static int fusb302_probe(struct i2c_client *client,
return -EPROBE_DEFER;
}
- fusb302_debugfs_init(chip);
+ chip->vbus = devm_regulator_get(chip->dev, "vbus");
+ if (IS_ERR(chip->vbus))
+ return PTR_ERR(chip->vbus);
chip->wq = create_singlethread_workqueue(dev_name(chip->dev));
- if (!chip->wq) {
- ret = -ENOMEM;
- goto clear_client_data;
- }
+ if (!chip->wq)
+ return -ENOMEM;
+
INIT_DELAYED_WORK(&chip->bc_lvl_handler, fusb302_bc_lvl_handler_work);
init_tcpc_dev(&chip->tcpc_dev);
- chip->vbus = devm_regulator_get(chip->dev, "vbus");
- if (IS_ERR(chip->vbus)) {
- ret = PTR_ERR(chip->vbus);
- goto destroy_workqueue;
- }
-
if (client->irq) {
chip->gpio_int_n_irq = client->irq;
} else {
@@ -1797,15 +1807,15 @@ static int fusb302_probe(struct i2c_client *client,
goto tcpm_unregister_port;
}
enable_irq_wake(chip->gpio_int_n_irq);
+ fusb302_debugfs_init(chip);
+ i2c_set_clientdata(client, chip);
+
return ret;
tcpm_unregister_port:
tcpm_unregister_port(chip->tcpm_port);
destroy_workqueue:
destroy_workqueue(chip->wq);
-clear_client_data:
- i2c_set_clientdata(client, NULL);
- fusb302_debugfs_exit(chip);
return ret;
}
@@ -1816,7 +1826,6 @@ static int fusb302_remove(struct i2c_client *client)
tcpm_unregister_port(chip->tcpm_port);
destroy_workqueue(chip->wq);
- i2c_set_clientdata(client, NULL);
fusb302_debugfs_exit(chip);
return 0;
diff --git a/drivers/usb/typec/fusb302/fusb302_reg.h b/drivers/usb/typec/tcpm/fusb302_reg.h
index 00b39d365478..00b39d365478 100644
--- a/drivers/usb/typec/fusb302/fusb302_reg.h
+++ b/drivers/usb/typec/tcpm/fusb302_reg.h
diff --git a/drivers/usb/typec/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index ac6b418b15f1..ac6b418b15f1 100644
--- a/drivers/usb/typec/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
diff --git a/drivers/usb/typec/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
index 303ebde26546..303ebde26546 100644
--- a/drivers/usb/typec/tcpci.h
+++ b/drivers/usb/typec/tcpm/tcpci.h
diff --git a/drivers/usb/typec/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 017389021b96..017389021b96 100644
--- a/drivers/usb/typec/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 4f1f4215f3d6..dbbd71f754d0 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1430,8 +1430,8 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
break;
- if (pdo_pps_apdo_max_current(pdo[i]) <
- pdo_pps_apdo_max_current(pdo[i - 1]))
+ if (pdo_pps_apdo_max_voltage(pdo[i]) <
+ pdo_pps_apdo_max_voltage(pdo[i - 1]))
return PDO_ERR_PPS_APDO_NOT_SORTED;
else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
@@ -2209,7 +2209,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
{
unsigned int i, j, max_mw = 0, max_mv = 0;
unsigned int min_src_mv, max_src_mv, src_ma, src_mw;
- unsigned int min_snk_mv, max_snk_mv, snk_ma;
+ unsigned int min_snk_mv, max_snk_mv;
u32 pdo;
unsigned int src_pdo = 0, snk_pdo = 0;
@@ -2253,8 +2253,6 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
pdo_pps_apdo_min_voltage(pdo);
max_snk_mv =
pdo_pps_apdo_max_voltage(pdo);
- snk_ma =
- pdo_pps_apdo_max_current(pdo);
break;
default:
tcpm_log(port,
@@ -2402,7 +2400,7 @@ static int tcpm_pd_send_request(struct tcpm_port *port)
static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
{
- unsigned int out_mv, op_ma, op_mw, min_mv, max_mv, max_ma, flags;
+ unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
enum pd_pdo_type type;
unsigned int src_pdo_index;
u32 pdo;
@@ -2420,7 +2418,6 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
tcpm_log(port, "Invalid APDO selected!");
return -EINVAL;
}
- min_mv = port->pps_data.min_volt;
max_mv = port->pps_data.max_volt;
max_ma = port->pps_data.max_curr;
out_mv = port->pps_data.out_volt;
@@ -4116,6 +4113,9 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
goto port_unlock;
}
+ /* Round down operating current to align with PPS valid steps */
+ op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
+
reinit_completion(&port->pps_complete);
port->pps_data.op_curr = op_curr;
port->pps_status = 0;
@@ -4169,6 +4169,9 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
goto port_unlock;
}
+ /* Round down output voltage to align with PPS valid steps */
+ out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
+
reinit_completion(&port->pps_complete);
port->pps_data.out_volt = out_volt;
port->pps_status = 0;
diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 423208e19383..423208e19383 100644
--- a/drivers/usb/typec/typec_wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index d11f3f8dad40..1e592ec94ba4 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -318,8 +318,9 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
struct vhci_hcd *vhci_hcd;
struct vhci *vhci;
int retval = 0;
- int rhport;
+ int rhport = -1;
unsigned long flags;
+ bool invalid_rhport = false;
u32 prev_port_status[VHCI_HC_PORTS];
@@ -334,9 +335,19 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh("typeReq %x wValue %x wIndex %x\n", typeReq, wValue,
wIndex);
- if (wIndex > VHCI_HC_PORTS)
- pr_err("invalid port number %d\n", wIndex);
- rhport = wIndex - 1;
+ /*
+ * wIndex can be 0 for some request types (typeReq). rhport is
+ * in valid range when wIndex >= 1 and < VHCI_HC_PORTS.
+ *
+ * Reference port_status[] only with valid rhport when
+ * invalid_rhport is false.
+ */
+ if (wIndex < 1 || wIndex > VHCI_HC_PORTS) {
+ invalid_rhport = true;
+ if (wIndex > VHCI_HC_PORTS)
+ pr_err("invalid port number %d\n", wIndex);
+ } else
+ rhport = wIndex - 1;
vhci_hcd = hcd_to_vhci_hcd(hcd);
vhci = vhci_hcd->vhci;
@@ -345,8 +356,9 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* store old status and compare now and old later */
if (usbip_dbg_flag_vhci_rh) {
- memcpy(prev_port_status, vhci_hcd->port_status,
- sizeof(prev_port_status));
+ if (!invalid_rhport)
+ memcpy(prev_port_status, vhci_hcd->port_status,
+ sizeof(prev_port_status));
}
switch (typeReq) {
@@ -354,8 +366,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(" ClearHubFeature\n");
break;
case ClearPortFeature:
- if (rhport < 0)
+ if (invalid_rhport) {
+ pr_err("invalid port number %d\n", wIndex);
goto error;
+ }
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (hcd->speed == HCD_USB3) {
@@ -415,9 +429,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case GetPortStatus:
usbip_dbg_vhci_rh(" GetPortStatus port %x\n", wIndex);
- if (wIndex < 1) {
+ if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
retval = -EPIPE;
+ goto error;
}
/* we do not care about resume. */
@@ -513,16 +528,20 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
- if (rhport < 0)
+ if (invalid_rhport) {
+ pr_err("invalid port number %d\n", wIndex);
goto error;
+ }
vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
break;
case USB_PORT_FEAT_POWER:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_POWER\n");
- if (rhport < 0)
+ if (invalid_rhport) {
+ pr_err("invalid port number %d\n", wIndex);
goto error;
+ }
if (hcd->speed == HCD_USB3)
vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
else
@@ -531,8 +550,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_BH_PORT_RESET:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
- if (rhport < 0)
+ if (invalid_rhport) {
+ pr_err("invalid port number %d\n", wIndex);
goto error;
+ }
/* Applicable only for USB3.0 hub */
if (hcd->speed != HCD_USB3) {
pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
@@ -543,8 +564,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_RESET:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_RESET\n");
- if (rhport < 0)
+ if (invalid_rhport) {
+ pr_err("invalid port number %d\n", wIndex);
goto error;
+ }
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
vhci_hcd->port_status[rhport] = 0;
@@ -565,8 +588,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
wValue);
- if (rhport < 0)
+ if (invalid_rhport) {
+ pr_err("invalid port number %d\n", wIndex);
goto error;
+ }
if (hcd->speed == HCD_USB3) {
if ((vhci_hcd->port_status[rhport] &
USB_SS_PORT_STAT_POWER) != 0) {
@@ -608,7 +633,7 @@ error:
if (usbip_dbg_flag_vhci_rh) {
pr_debug("port %d\n", rhport);
/* Only dump valid port status */
- if (rhport >= 0) {
+ if (!invalid_rhport) {
dump_port_status_diff(prev_port_status[rhport],
vhci_hcd->port_status[rhport],
hcd->speed == HCD_USB3);
@@ -618,8 +643,10 @@ error:
spin_unlock_irqrestore(&vhci->lock, flags);
- if ((vhci_hcd->port_status[rhport] & PORT_C_MASK) != 0)
+ if (!invalid_rhport &&
+ (vhci_hcd->port_status[rhport] & PORT_C_MASK) != 0) {
usb_hcd_poll_rh_status(hcd);
+ }
return retval;
}
diff --git a/drivers/usb/usbip/vudc_main.c b/drivers/usb/usbip/vudc_main.c
index 3fc22037a82f..390733e6937e 100644
--- a/drivers/usb/usbip/vudc_main.c
+++ b/drivers/usb/usbip/vudc_main.c
@@ -73,6 +73,10 @@ static int __init init(void)
cleanup:
list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
list_del(&udc_dev->dev_entry);
+ /*
+ * Just do platform_device_del() here, put_vudc_device()
+ * calls the platform_device_put()
+ */
platform_device_del(udc_dev->pdev);
put_vudc_device(udc_dev);
}
@@ -89,7 +93,11 @@ static void __exit cleanup(void)
list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
list_del(&udc_dev->dev_entry);
- platform_device_unregister(udc_dev->pdev);
+ /*
+ * Just do platform_device_del() here, put_vudc_device()
+ * calls the platform_device_put()
+ */
+ platform_device_del(udc_dev->pdev);
put_vudc_device(udc_dev);
}
platform_driver_unregister(&vudc_driver);
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index aff50eb09ca9..68ddee86a886 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -189,7 +189,7 @@ struct wusb_mac_scratch {
* NOTE: blen is not aligned to a block size, we'll pad zeros, that's
* what sg[4] is for. Maybe there is a smarter way to do this.
*/
-static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
+static int wusb_ccm_mac(struct crypto_sync_skcipher *tfm_cbc,
struct crypto_cipher *tfm_aes,
struct wusb_mac_scratch *scratch,
void *mic,
@@ -198,7 +198,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
size_t blen)
{
int result = 0;
- SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
struct scatterlist sg[4], sg_dst;
void *dst_buf;
size_t dst_size;
@@ -224,7 +224,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
if (!dst_buf)
goto error_dst_buf;
- iv = kzalloc(crypto_skcipher_ivsize(tfm_cbc), GFP_KERNEL);
+ iv = kzalloc(crypto_sync_skcipher_ivsize(tfm_cbc), GFP_KERNEL);
if (!iv)
goto error_iv;
@@ -251,7 +251,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0);
sg_init_one(&sg_dst, dst_buf, dst_size);
- skcipher_request_set_tfm(req, tfm_cbc);
+ skcipher_request_set_sync_tfm(req, tfm_cbc);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, &sg_dst, dst_size, iv);
result = crypto_skcipher_encrypt(req);
@@ -298,19 +298,19 @@ ssize_t wusb_prf(void *out, size_t out_size,
{
ssize_t result, bytes = 0, bitr;
struct aes_ccm_nonce n = *_n;
- struct crypto_skcipher *tfm_cbc;
+ struct crypto_sync_skcipher *tfm_cbc;
struct crypto_cipher *tfm_aes;
struct wusb_mac_scratch *scratch;
u64 sfn = 0;
__le64 sfn_le;
- tfm_cbc = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm_cbc = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
if (IS_ERR(tfm_cbc)) {
result = PTR_ERR(tfm_cbc);
printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
goto error_alloc_cbc;
}
- result = crypto_skcipher_setkey(tfm_cbc, key, 16);
+ result = crypto_sync_skcipher_setkey(tfm_cbc, key, 16);
if (result < 0) {
printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
goto error_setkey_cbc;
@@ -351,7 +351,7 @@ error_setkey_aes:
crypto_free_cipher(tfm_aes);
error_alloc_aes:
error_setkey_cbc:
- crypto_free_skcipher(tfm_cbc);
+ crypto_free_sync_skcipher(tfm_cbc);
error_alloc_cbc:
return result;
}
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index 38884aac862b..a5734cbcd5ad 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -470,9 +470,7 @@ error:
int wa_rpipes_create(struct wahc *wa)
{
wa->rpipes = le16_to_cpu(wa->wa_descr->wNumRPipes);
- wa->rpipe_bm = kcalloc(BITS_TO_LONGS(wa->rpipes),
- sizeof(unsigned long),
- GFP_KERNEL);
+ wa->rpipe_bm = bitmap_zalloc(wa->rpipes, GFP_KERNEL);
if (wa->rpipe_bm == NULL)
return -ENOMEM;
return 0;
@@ -487,7 +485,7 @@ void wa_rpipes_destroy(struct wahc *wa)
dev_err(dev, "BUG: pipes not released on exit: %*pb\n",
wa->rpipes, wa->rpipe_bm);
}
- kfree(wa->rpipe_bm);
+ bitmap_free(wa->rpipe_bm);
}
/*
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 96721b154454..b30926e11d87 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -444,7 +444,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
struct mm_iommu_table_group_mem_t *mem = NULL;
int ret;
unsigned long hpa = 0;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
if (!pua)
return;
@@ -467,8 +467,27 @@ static int tce_iommu_clear(struct tce_container *container,
unsigned long oldhpa;
long ret;
enum dma_data_direction direction;
+ unsigned long lastentry = entry + pages;
+
+ for ( ; entry < lastentry; ++entry) {
+ if (tbl->it_indirect_levels && tbl->it_userspace) {
+ /*
+ * For multilevel tables, we can take a shortcut here
+ * and skip some TCEs as we know that the userspace
+ * addresses cache is a mirror of the real TCE table
+ * and if it is missing some indirect levels, then
+ * the hardware table does not have them allocated
+ * either and therefore does not require updating.
+ */
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl,
+ entry);
+ if (!pua) {
+ /* align to level_size which is power of two */
+ entry |= tbl->it_level_size - 1;
+ continue;
+ }
+ }
- for ( ; pages; --pages, ++entry) {
cond_resched();
direction = DMA_NONE;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 4e656f89cb22..ab11b2bee273 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -116,6 +116,8 @@ struct vhost_net_virtqueue {
* For RX, number of batched heads
*/
int done_idx;
+ /* Number of XDP frames batched */
+ int batched_xdp;
/* an array of userspace buffers info */
struct ubuf_info *ubuf_info;
/* Reference counting for outstanding ubufs.
@@ -123,6 +125,8 @@ struct vhost_net_virtqueue {
struct vhost_net_ubuf_ref *ubufs;
struct ptr_ring *rx_ring;
struct vhost_net_buf rxq;
+ /* Batched XDP buffs */
+ struct xdp_buff *xdp;
};
struct vhost_net {
@@ -338,6 +342,11 @@ static bool vhost_sock_zcopy(struct socket *sock)
sock_flag(sock->sk, SOCK_ZEROCOPY);
}
+static bool vhost_sock_xdp(struct socket *sock)
+{
+ return sock_flag(sock->sk, SOCK_XDP);
+}
+
/* In case of DMA done not in order in lower device driver for some reason.
* upend_idx is used to track end of used idx, done_idx is used to track head
* of used idx. Once lower device DMA done contiguously, we will signal KVM
@@ -444,32 +453,120 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
nvq->done_idx = 0;
}
+static void vhost_tx_batch(struct vhost_net *net,
+ struct vhost_net_virtqueue *nvq,
+ struct socket *sock,
+ struct msghdr *msghdr)
+{
+ struct tun_msg_ctl ctl = {
+ .type = TUN_MSG_PTR,
+ .num = nvq->batched_xdp,
+ .ptr = nvq->xdp,
+ };
+ int err;
+
+ if (nvq->batched_xdp == 0)
+ goto signal_used;
+
+ msghdr->msg_control = &ctl;
+ err = sock->ops->sendmsg(sock, msghdr, 0);
+ if (unlikely(err < 0)) {
+ vq_err(&nvq->vq, "Fail to batch sending packets\n");
+ return;
+ }
+
+signal_used:
+ vhost_net_signal_used(nvq);
+ nvq->batched_xdp = 0;
+}
+
+static int sock_has_rx_data(struct socket *sock)
+{
+ if (unlikely(!sock))
+ return 0;
+
+ if (sock->ops->peek_len)
+ return sock->ops->peek_len(sock);
+
+ return skb_queue_empty(&sock->sk->sk_receive_queue);
+}
+
+static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
+ struct vhost_virtqueue *vq)
+{
+ if (!vhost_vq_avail_empty(&net->dev, vq)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ vhost_disable_notify(&net->dev, vq);
+ vhost_poll_queue(&vq->poll);
+ }
+}
+
+static void vhost_net_busy_poll(struct vhost_net *net,
+ struct vhost_virtqueue *rvq,
+ struct vhost_virtqueue *tvq,
+ bool *busyloop_intr,
+ bool poll_rx)
+{
+ unsigned long busyloop_timeout;
+ unsigned long endtime;
+ struct socket *sock;
+ struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
+
+ mutex_lock_nested(&vq->mutex, poll_rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
+ vhost_disable_notify(&net->dev, vq);
+ sock = rvq->private_data;
+
+ busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
+ tvq->busyloop_timeout;
+
+ preempt_disable();
+ endtime = busy_clock() + busyloop_timeout;
+
+ while (vhost_can_busy_poll(endtime)) {
+ if (vhost_has_work(&net->dev)) {
+ *busyloop_intr = true;
+ break;
+ }
+
+ if ((sock_has_rx_data(sock) &&
+ !vhost_vq_avail_empty(&net->dev, rvq)) ||
+ !vhost_vq_avail_empty(&net->dev, tvq))
+ break;
+
+ cpu_relax();
+ }
+
+ preempt_enable();
+
+ if (poll_rx || sock_has_rx_data(sock))
+ vhost_net_busy_poll_try_queue(net, vq);
+ else if (!poll_rx) /* On tx here, sock has no rx data. */
+ vhost_enable_notify(&net->dev, rvq);
+
+ mutex_unlock(&vq->mutex);
+}
+
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
- struct vhost_net_virtqueue *nvq,
+ struct vhost_net_virtqueue *tnvq,
unsigned int *out_num, unsigned int *in_num,
- bool *busyloop_intr)
+ struct msghdr *msghdr, bool *busyloop_intr)
{
- struct vhost_virtqueue *vq = &nvq->vq;
- unsigned long uninitialized_var(endtime);
- int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
+ struct vhost_virtqueue *rvq = &rnvq->vq;
+ struct vhost_virtqueue *tvq = &tnvq->vq;
+
+ int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
out_num, in_num, NULL, NULL);
- if (r == vq->num && vq->busyloop_timeout) {
- if (!vhost_sock_zcopy(vq->private_data))
- vhost_net_signal_used(nvq);
- preempt_disable();
- endtime = busy_clock() + vq->busyloop_timeout;
- while (vhost_can_busy_poll(endtime)) {
- if (vhost_has_work(vq->dev)) {
- *busyloop_intr = true;
- break;
- }
- if (!vhost_vq_avail_empty(vq->dev, vq))
- break;
- cpu_relax();
- }
- preempt_enable();
- r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ if (r == tvq->num && tvq->busyloop_timeout) {
+ /* Flush batched packets first */
+ if (!vhost_sock_zcopy(tvq->private_data))
+ vhost_tx_batch(net, tnvq, tvq->private_data, msghdr);
+
+ vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
+
+ r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
out_num, in_num, NULL, NULL);
}
@@ -512,7 +609,7 @@ static int get_tx_bufs(struct vhost_net *net,
struct vhost_virtqueue *vq = &nvq->vq;
int ret;
- ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr);
+ ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
if (ret < 0 || ret == vq->num)
return ret;
@@ -540,6 +637,80 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
!vhost_vq_avail_empty(vq->dev, vq);
}
+#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
+
+static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
+ struct iov_iter *from)
+{
+ struct vhost_virtqueue *vq = &nvq->vq;
+ struct socket *sock = vq->private_data;
+ struct page_frag *alloc_frag = &current->task_frag;
+ struct virtio_net_hdr *gso;
+ struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
+ struct tun_xdp_hdr *hdr;
+ size_t len = iov_iter_count(from);
+ int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0;
+ int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen);
+ int sock_hlen = nvq->sock_hlen;
+ void *buf;
+ int copied;
+
+ if (unlikely(len < nvq->sock_hlen))
+ return -EFAULT;
+
+ if (SKB_DATA_ALIGN(len + pad) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
+ return -ENOSPC;
+
+ buflen += SKB_DATA_ALIGN(len + pad);
+ alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
+ if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
+ return -ENOMEM;
+
+ buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
+ copied = copy_page_from_iter(alloc_frag->page,
+ alloc_frag->offset +
+ offsetof(struct tun_xdp_hdr, gso),
+ sock_hlen, from);
+ if (copied != sock_hlen)
+ return -EFAULT;
+
+ hdr = buf;
+ gso = &hdr->gso;
+
+ if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+ vhost16_to_cpu(vq, gso->csum_start) +
+ vhost16_to_cpu(vq, gso->csum_offset) + 2 >
+ vhost16_to_cpu(vq, gso->hdr_len)) {
+ gso->hdr_len = cpu_to_vhost16(vq,
+ vhost16_to_cpu(vq, gso->csum_start) +
+ vhost16_to_cpu(vq, gso->csum_offset) + 2);
+
+ if (vhost16_to_cpu(vq, gso->hdr_len) > len)
+ return -EINVAL;
+ }
+
+ len -= sock_hlen;
+ copied = copy_page_from_iter(alloc_frag->page,
+ alloc_frag->offset + pad,
+ len, from);
+ if (copied != len)
+ return -EFAULT;
+
+ xdp->data_hard_start = buf;
+ xdp->data = buf + pad;
+ xdp->data_end = xdp->data + len;
+ hdr->buflen = buflen;
+
+ get_page(alloc_frag->page);
+ alloc_frag->offset += buflen;
+
+ ++nvq->batched_xdp;
+
+ return 0;
+}
+
static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
@@ -556,10 +727,14 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
size_t len, total_len = 0;
int err;
int sent_pkts = 0;
+ bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
for (;;) {
bool busyloop_intr = false;
+ if (nvq->done_idx == VHOST_NET_BATCH)
+ vhost_tx_batch(net, nvq, sock, &msg);
+
head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
&busyloop_intr);
/* On error, stop handling until the next kick. */
@@ -577,14 +752,34 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
break;
}
- vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
- vq->heads[nvq->done_idx].len = 0;
-
total_len += len;
- if (tx_can_batch(vq, total_len))
- msg.msg_flags |= MSG_MORE;
- else
- msg.msg_flags &= ~MSG_MORE;
+
+ /* For simplicity, TX batching is only enabled if
+ * sndbuf is unlimited.
+ */
+ if (sock_can_batch) {
+ err = vhost_net_build_xdp(nvq, &msg.msg_iter);
+ if (!err) {
+ goto done;
+ } else if (unlikely(err != -ENOSPC)) {
+ vhost_tx_batch(net, nvq, sock, &msg);
+ vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
+ break;
+ }
+
+ /* We can't build XDP buff, go for single
+ * packet path but let's flush batched
+ * packets.
+ */
+ vhost_tx_batch(net, nvq, sock, &msg);
+ msg.msg_control = NULL;
+ } else {
+ if (tx_can_batch(vq, total_len))
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags &= ~MSG_MORE;
+ }
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(sock, &msg, len);
@@ -596,15 +791,17 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
if (err != len)
pr_debug("Truncated TX packet: len %d != %zd\n",
err, len);
- if (++nvq->done_idx >= VHOST_NET_BATCH)
- vhost_net_signal_used(nvq);
+done:
+ vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
+ vq->heads[nvq->done_idx].len = 0;
+ ++nvq->done_idx;
if (vhost_exceeds_weight(++sent_pkts, total_len)) {
vhost_poll_queue(&vq->poll);
break;
}
}
- vhost_net_signal_used(nvq);
+ vhost_tx_batch(net, nvq, sock, &msg);
}
static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
@@ -620,6 +817,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
.msg_controllen = 0,
.msg_flags = MSG_DONTWAIT,
};
+ struct tun_msg_ctl ctl;
size_t len, total_len = 0;
int err;
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
@@ -664,8 +862,10 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
ubuf->ctx = nvq->ubufs;
ubuf->desc = nvq->upend_idx;
refcount_set(&ubuf->refcnt, 1);
- msg.msg_control = ubuf;
- msg.msg_controllen = sizeof(ubuf);
+ msg.msg_control = &ctl;
+ ctl.type = TUN_MSG_UBUF;
+ ctl.ptr = ubuf;
+ msg.msg_controllen = sizeof(ctl);
ubufs = nvq->ubufs;
atomic_inc(&ubufs->refcount);
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
@@ -716,7 +916,7 @@ static void handle_tx(struct vhost_net *net)
struct vhost_virtqueue *vq = &nvq->vq;
struct socket *sock;
- mutex_lock(&vq->mutex);
+ mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
sock = vq->private_data;
if (!sock)
goto out;
@@ -757,16 +957,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
return len;
}
-static int sk_has_rx_data(struct sock *sk)
-{
- struct socket *sock = sk->sk_socket;
-
- if (sock->ops->peek_len)
- return sock->ops->peek_len(sock);
-
- return skb_queue_empty(&sk->sk_receive_queue);
-}
-
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
bool *busyloop_intr)
{
@@ -774,41 +964,13 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *rvq = &rnvq->vq;
struct vhost_virtqueue *tvq = &tnvq->vq;
- unsigned long uninitialized_var(endtime);
int len = peek_head_len(rnvq, sk);
- if (!len && tvq->busyloop_timeout) {
+ if (!len && rvq->busyloop_timeout) {
/* Flush batched heads first */
vhost_net_signal_used(rnvq);
/* Both tx vq and rx socket were polled here */
- mutex_lock_nested(&tvq->mutex, 1);
- vhost_disable_notify(&net->dev, tvq);
-
- preempt_disable();
- endtime = busy_clock() + tvq->busyloop_timeout;
-
- while (vhost_can_busy_poll(endtime)) {
- if (vhost_has_work(&net->dev)) {
- *busyloop_intr = true;
- break;
- }
- if ((sk_has_rx_data(sk) &&
- !vhost_vq_avail_empty(&net->dev, rvq)) ||
- !vhost_vq_avail_empty(&net->dev, tvq))
- break;
- cpu_relax();
- }
-
- preempt_enable();
-
- if (!vhost_vq_avail_empty(&net->dev, tvq)) {
- vhost_poll_queue(&tvq->poll);
- } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
- vhost_disable_notify(&net->dev, tvq);
- vhost_poll_queue(&tvq->poll);
- }
-
- mutex_unlock(&tvq->mutex);
+ vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
len = peek_head_len(rnvq, sk);
}
@@ -923,7 +1085,7 @@ static void handle_rx(struct vhost_net *net)
__virtio16 num_buffers;
int recv_pkts = 0;
- mutex_lock_nested(&vq->mutex, 0);
+ mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
sock = vq->private_data;
if (!sock)
goto out;
@@ -1078,6 +1240,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
void **queue;
+ struct xdp_buff *xdp;
int i;
n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
@@ -1098,6 +1261,15 @@ static int vhost_net_open(struct inode *inode, struct file *f)
}
n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
+ xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL);
+ if (!xdp) {
+ kfree(vqs);
+ kvfree(n);
+ kfree(queue);
+ return -ENOMEM;
+ }
+ n->vqs[VHOST_NET_VQ_TX].xdp = xdp;
+
dev = &n->dev;
vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
@@ -1108,6 +1280,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
n->vqs[i].ubuf_info = NULL;
n->vqs[i].upend_idx = 0;
n->vqs[i].done_idx = 0;
+ n->vqs[i].batched_xdp = 0;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
n->vqs[i].rx_ring = NULL;
@@ -1191,6 +1364,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
* since jobs can re-queue themselves. */
vhost_net_flush(n);
kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
+ kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
kfree(n->dev.vqs);
kvfree(n);
return 0;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index b13c6b4b2c66..f52008bb8df7 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -294,8 +294,11 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
{
int i;
- for (i = 0; i < d->nvqs; ++i)
+ for (i = 0; i < d->nvqs; ++i) {
+ mutex_lock(&d->vqs[i]->mutex);
__vhost_vq_meta_reset(d->vqs[i]);
+ mutex_unlock(&d->vqs[i]->mutex);
+ }
}
static void vhost_vq_reset(struct vhost_dev *dev,
@@ -891,20 +894,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
#define vhost_get_used(vq, x, ptr) \
vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
-static void vhost_dev_lock_vqs(struct vhost_dev *d)
-{
- int i = 0;
- for (i = 0; i < d->nvqs; ++i)
- mutex_lock_nested(&d->vqs[i]->mutex, i);
-}
-
-static void vhost_dev_unlock_vqs(struct vhost_dev *d)
-{
- int i = 0;
- for (i = 0; i < d->nvqs; ++i)
- mutex_unlock(&d->vqs[i]->mutex);
-}
-
static int vhost_new_umem_range(struct vhost_umem *umem,
u64 start, u64 size, u64 end,
u64 userspace_addr, int perm)
@@ -954,7 +943,10 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
if (msg->iova <= vq_msg->iova &&
msg->iova + msg->size - 1 >= vq_msg->iova &&
vq_msg->type == VHOST_IOTLB_MISS) {
+ mutex_lock(&node->vq->mutex);
vhost_poll_queue(&node->vq->poll);
+ mutex_unlock(&node->vq->mutex);
+
list_del(&node->node);
kfree(node);
}
@@ -986,7 +978,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
int ret = 0;
mutex_lock(&dev->mutex);
- vhost_dev_lock_vqs(dev);
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
if (!dev->iotlb) {
@@ -1020,7 +1011,6 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
break;
}
- vhost_dev_unlock_vqs(dev);
mutex_unlock(&dev->mutex);
return ret;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 2919e2334052..71ee978c848f 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -111,22 +111,6 @@ config LCD_HP700
If you have an HP Jornada 700 series handheld (710/720/728)
say Y to enable LCD control driver.
-config LCD_S6E63M0
- tristate "S6E63M0 AMOLED LCD Driver"
- depends on SPI && BACKLIGHT_CLASS_DEVICE
- default n
- help
- If you have an S6E63M0 LCD Panel, say Y to enable its
- LCD control driver.
-
-config LCD_LD9040
- tristate "LD9040 AMOLED LCD Driver"
- depends on SPI && BACKLIGHT_CLASS_DEVICE
- default n
- help
- If you have an LD9040 Panel, say Y to enable its
- control driver.
-
config LCD_AMS369FG06
tristate "AMS369FG06 AMOLED LCD Driver"
depends on SPI && BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 0dcc2c745c03..63c507c07437 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -9,13 +9,11 @@ obj-$(CONFIG_LCD_HX8357) += hx8357.o
obj-$(CONFIG_LCD_ILI922X) += ili922x.o
obj-$(CONFIG_LCD_ILI9320) += ili9320.o
obj-$(CONFIG_LCD_L4F00242T03) += l4f00242t03.o
-obj-$(CONFIG_LCD_LD9040) += ld9040.o
obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o
obj-$(CONFIG_LCD_LMS501KF03) += lms501kf03.o
obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
obj-$(CONFIG_LCD_OTM3225A) += otm3225a.o
obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
-obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 35373e2065b2..5e38353b4423 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -391,7 +391,7 @@ static struct platform_driver adp5520_bl_driver = {
module_platform_driver(adp5520_bl_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADP5520(01) Backlight Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:adp5520-backlight");
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index f1dc41cf19e3..85318236da2f 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -822,5 +822,5 @@ static struct i2c_driver adp8860_driver = {
module_i2c_driver(adp8860_driver);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADP8860 Backlight driver");
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 4fec9aa92d9b..8d50e0299578 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -992,5 +992,5 @@ static struct i2c_driver adp8870_driver = {
module_i2c_driver(adp8870_driver);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADP8870 Backlight driver");
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
deleted file mode 100644
index 677f8abba27c..000000000000
--- a/drivers/video/backlight/ld9040.c
+++ /dev/null
@@ -1,811 +0,0 @@
-/*
- * ld9040 AMOLED LCD panel driver.
- *
- * Copyright (c) 2011 Samsung Electronics
- * Author: Donghwa Lee <dh09.lee@samsung.com>
- * Derived from drivers/video/backlight/s6e63m0.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/lcd.h>
-#include <linux/module.h>
-#include <linux/regulator/consumer.h>
-#include <linux/spi/spi.h>
-#include <linux/wait.h>
-
-#include "ld9040_gamma.h"
-
-#define SLEEPMSEC 0x1000
-#define ENDDEF 0x2000
-#define DEFMASK 0xFF00
-#define COMMAND_ONLY 0xFE
-#define DATA_ONLY 0xFF
-
-#define MIN_BRIGHTNESS 0
-#define MAX_BRIGHTNESS 24
-
-struct ld9040 {
- struct device *dev;
- struct spi_device *spi;
- unsigned int power;
- unsigned int current_brightness;
-
- struct lcd_device *ld;
- struct backlight_device *bd;
- struct lcd_platform_data *lcd_pd;
-
- struct mutex lock;
- bool enabled;
-};
-
-static struct regulator_bulk_data supplies[] = {
- { .supply = "vdd3", },
- { .supply = "vci", },
-};
-
-static void ld9040_regulator_enable(struct ld9040 *lcd)
-{
- int ret = 0;
- struct lcd_platform_data *pd = NULL;
-
- pd = lcd->lcd_pd;
- mutex_lock(&lcd->lock);
- if (!lcd->enabled) {
- ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
- if (ret)
- goto out;
-
- lcd->enabled = true;
- }
- msleep(pd->power_on_delay);
-out:
- mutex_unlock(&lcd->lock);
-}
-
-static void ld9040_regulator_disable(struct ld9040 *lcd)
-{
- int ret = 0;
-
- mutex_lock(&lcd->lock);
- if (lcd->enabled) {
- ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
- if (ret)
- goto out;
-
- lcd->enabled = false;
- }
-out:
- mutex_unlock(&lcd->lock);
-}
-
-static const unsigned short seq_swreset[] = {
- 0x01, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_user_setting[] = {
- 0xF0, 0x5A,
-
- DATA_ONLY, 0x5A,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_elvss_on[] = {
- 0xB1, 0x0D,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x16,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gtcon[] = {
- 0xF7, 0x09,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_panel_condition[] = {
- 0xF8, 0x05,
-
- DATA_ONLY, 0x65,
- DATA_ONLY, 0x96,
- DATA_ONLY, 0x71,
- DATA_ONLY, 0x7D,
- DATA_ONLY, 0x19,
- DATA_ONLY, 0x3B,
- DATA_ONLY, 0x0D,
- DATA_ONLY, 0x19,
- DATA_ONLY, 0x7E,
- DATA_ONLY, 0x0D,
- DATA_ONLY, 0xE2,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x7E,
- DATA_ONLY, 0x7D,
- DATA_ONLY, 0x07,
- DATA_ONLY, 0x07,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x02,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gamma_set1[] = {
- 0xF9, 0x00,
-
- DATA_ONLY, 0xA7,
- DATA_ONLY, 0xB4,
- DATA_ONLY, 0xAE,
- DATA_ONLY, 0xBF,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x91,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xB2,
- DATA_ONLY, 0xB4,
- DATA_ONLY, 0xAA,
- DATA_ONLY, 0xBB,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xAC,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xB3,
- DATA_ONLY, 0xB1,
- DATA_ONLY, 0xAA,
- DATA_ONLY, 0xBC,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xB3,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gamma_ctrl[] = {
- 0xFB, 0x02,
-
- DATA_ONLY, 0x5A,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gamma_start[] = {
- 0xF9, COMMAND_ONLY,
-
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_apon[] = {
- 0xF3, 0x00,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x0A,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_display_ctrl[] = {
- 0xF2, 0x02,
-
- DATA_ONLY, 0x08,
- DATA_ONLY, 0x08,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x10,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_manual_pwr[] = {
- 0xB0, 0x04,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_pwr_ctrl[] = {
- 0xF4, 0x0A,
-
- DATA_ONLY, 0x87,
- DATA_ONLY, 0x25,
- DATA_ONLY, 0x6A,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x02,
- DATA_ONLY, 0x88,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_sleep_out[] = {
- 0x11, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_sleep_in[] = {
- 0x10, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_display_on[] = {
- 0x29, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_display_off[] = {
- 0x28, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vci1_1st_en[] = {
- 0xF3, 0x10,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vl1_en[] = {
- 0xF3, 0x11,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vl2_en[] = {
- 0xF3, 0x13,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vci1_2nd_en[] = {
- 0xF3, 0x33,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vl3_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vreg1_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0x01,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vgh_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0x11,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vgl_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0x31,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vmos_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xB1,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vint_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xF1,
- /* DATA_ONLY, 0x71, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vbh_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xF9,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vbl_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFD,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gam_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_sd_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x80,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gls_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x81,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_els_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x83,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_el_on[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x87,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static int ld9040_spi_write_byte(struct ld9040 *lcd, int addr, int data)
-{
- u16 buf[1];
- struct spi_message msg;
-
- struct spi_transfer xfer = {
- .len = 2,
- .tx_buf = buf,
- };
-
- buf[0] = (addr << 8) | data;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- return spi_sync(lcd->spi, &msg);
-}
-
-static int ld9040_spi_write(struct ld9040 *lcd, unsigned char address,
- unsigned char command)
-{
- int ret = 0;
-
- if (address != DATA_ONLY)
- ret = ld9040_spi_write_byte(lcd, 0x0, address);
- if (command != COMMAND_ONLY)
- ret = ld9040_spi_write_byte(lcd, 0x1, command);
-
- return ret;
-}
-
-static int ld9040_panel_send_sequence(struct ld9040 *lcd,
- const unsigned short *wbuf)
-{
- int ret = 0, i = 0;
-
- while ((wbuf[i] & DEFMASK) != ENDDEF) {
- if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
- ret = ld9040_spi_write(lcd, wbuf[i], wbuf[i+1]);
- if (ret)
- break;
- } else {
- msleep(wbuf[i+1]);
- }
- i += 2;
- }
-
- return ret;
-}
-
-static int _ld9040_gamma_ctl(struct ld9040 *lcd, const unsigned int *gamma)
-{
- unsigned int i = 0;
- int ret = 0;
-
- /* start gamma table updating. */
- ret = ld9040_panel_send_sequence(lcd, seq_gamma_start);
- if (ret) {
- dev_err(lcd->dev, "failed to disable gamma table updating.\n");
- goto gamma_err;
- }
-
- for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) {
- ret = ld9040_spi_write(lcd, DATA_ONLY, gamma[i]);
- if (ret) {
- dev_err(lcd->dev, "failed to set gamma table.\n");
- goto gamma_err;
- }
- }
-
- /* update gamma table. */
- ret = ld9040_panel_send_sequence(lcd, seq_gamma_ctrl);
- if (ret)
- dev_err(lcd->dev, "failed to update gamma table.\n");
-
-gamma_err:
- return ret;
-}
-
-static int ld9040_gamma_ctl(struct ld9040 *lcd, int gamma)
-{
- return _ld9040_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
-}
-
-static int ld9040_ldi_init(struct ld9040 *lcd)
-{
- int ret, i;
- static const unsigned short *init_seq[] = {
- seq_user_setting,
- seq_panel_condition,
- seq_display_ctrl,
- seq_manual_pwr,
- seq_elvss_on,
- seq_gtcon,
- seq_gamma_set1,
- seq_gamma_ctrl,
- seq_sleep_out,
- };
-
- for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
- ret = ld9040_panel_send_sequence(lcd, init_seq[i]);
- /* workaround: minimum delay time for transferring CMD */
- usleep_range(300, 310);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-static int ld9040_ldi_enable(struct ld9040 *lcd)
-{
- return ld9040_panel_send_sequence(lcd, seq_display_on);
-}
-
-static int ld9040_ldi_disable(struct ld9040 *lcd)
-{
- int ret;
-
- ret = ld9040_panel_send_sequence(lcd, seq_display_off);
- ret = ld9040_panel_send_sequence(lcd, seq_sleep_in);
-
- return ret;
-}
-
-static int ld9040_power_is_on(int power)
-{
- return power <= FB_BLANK_NORMAL;
-}
-
-static int ld9040_power_on(struct ld9040 *lcd)
-{
- int ret = 0;
- struct lcd_platform_data *pd;
-
- pd = lcd->lcd_pd;
-
- /* lcd power on */
- ld9040_regulator_enable(lcd);
-
- if (!pd->reset) {
- dev_err(lcd->dev, "reset is NULL.\n");
- return -EINVAL;
- }
-
- pd->reset(lcd->ld);
- msleep(pd->reset_delay);
-
- ret = ld9040_ldi_init(lcd);
- if (ret) {
- dev_err(lcd->dev, "failed to initialize ldi.\n");
- return ret;
- }
-
- ret = ld9040_ldi_enable(lcd);
- if (ret) {
- dev_err(lcd->dev, "failed to enable ldi.\n");
- return ret;
- }
-
- return 0;
-}
-
-static int ld9040_power_off(struct ld9040 *lcd)
-{
- int ret;
- struct lcd_platform_data *pd;
-
- pd = lcd->lcd_pd;
-
- ret = ld9040_ldi_disable(lcd);
- if (ret) {
- dev_err(lcd->dev, "lcd setting failed.\n");
- return -EIO;
- }
-
- msleep(pd->power_off_delay);
-
- /* lcd power off */
- ld9040_regulator_disable(lcd);
-
- return 0;
-}
-
-static int ld9040_power(struct ld9040 *lcd, int power)
-{
- int ret = 0;
-
- if (ld9040_power_is_on(power) && !ld9040_power_is_on(lcd->power))
- ret = ld9040_power_on(lcd);
- else if (!ld9040_power_is_on(power) && ld9040_power_is_on(lcd->power))
- ret = ld9040_power_off(lcd);
-
- if (!ret)
- lcd->power = power;
-
- return ret;
-}
-
-static int ld9040_set_power(struct lcd_device *ld, int power)
-{
- struct ld9040 *lcd = lcd_get_data(ld);
-
- if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
- power != FB_BLANK_NORMAL) {
- dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
- return -EINVAL;
- }
-
- return ld9040_power(lcd, power);
-}
-
-static int ld9040_get_power(struct lcd_device *ld)
-{
- struct ld9040 *lcd = lcd_get_data(ld);
-
- return lcd->power;
-}
-
-static int ld9040_set_brightness(struct backlight_device *bd)
-{
- int ret = 0, brightness = bd->props.brightness;
- struct ld9040 *lcd = bl_get_data(bd);
-
- if (brightness < MIN_BRIGHTNESS ||
- brightness > bd->props.max_brightness) {
- dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
- MIN_BRIGHTNESS, MAX_BRIGHTNESS);
- return -EINVAL;
- }
-
- ret = ld9040_gamma_ctl(lcd, bd->props.brightness);
- if (ret) {
- dev_err(&bd->dev, "lcd brightness setting failed.\n");
- return -EIO;
- }
-
- return ret;
-}
-
-static struct lcd_ops ld9040_lcd_ops = {
- .set_power = ld9040_set_power,
- .get_power = ld9040_get_power,
-};
-
-static const struct backlight_ops ld9040_backlight_ops = {
- .update_status = ld9040_set_brightness,
-};
-
-static int ld9040_probe(struct spi_device *spi)
-{
- int ret = 0;
- struct ld9040 *lcd = NULL;
- struct lcd_device *ld = NULL;
- struct backlight_device *bd = NULL;
- struct backlight_properties props;
-
- lcd = devm_kzalloc(&spi->dev, sizeof(struct ld9040), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
-
- /* ld9040 lcd panel uses 3-wire 9bits SPI Mode. */
- spi->bits_per_word = 9;
-
- ret = spi_setup(spi);
- if (ret < 0) {
- dev_err(&spi->dev, "spi setup failed.\n");
- return ret;
- }
-
- lcd->spi = spi;
- lcd->dev = &spi->dev;
-
- lcd->lcd_pd = dev_get_platdata(&spi->dev);
- if (!lcd->lcd_pd) {
- dev_err(&spi->dev, "platform data is NULL.\n");
- return -EINVAL;
- }
-
- mutex_init(&lcd->lock);
-
- ret = devm_regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
- if (ret) {
- dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
- return ret;
- }
-
- ld = devm_lcd_device_register(&spi->dev, "ld9040", &spi->dev, lcd,
- &ld9040_lcd_ops);
- if (IS_ERR(ld))
- return PTR_ERR(ld);
-
- lcd->ld = ld;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = MAX_BRIGHTNESS;
-
- bd = devm_backlight_device_register(&spi->dev, "ld9040-bl", &spi->dev,
- lcd, &ld9040_backlight_ops, &props);
- if (IS_ERR(bd))
- return PTR_ERR(bd);
-
- bd->props.brightness = MAX_BRIGHTNESS;
- lcd->bd = bd;
-
- /*
- * if lcd panel was on from bootloader like u-boot then
- * do not lcd on.
- */
- if (!lcd->lcd_pd->lcd_enabled) {
- /*
- * if lcd panel was off from bootloader then
- * current lcd status is powerdown and then
- * it enables lcd panel.
- */
- lcd->power = FB_BLANK_POWERDOWN;
-
- ld9040_power(lcd, FB_BLANK_UNBLANK);
- } else {
- lcd->power = FB_BLANK_UNBLANK;
- }
-
- spi_set_drvdata(spi, lcd);
-
- dev_info(&spi->dev, "ld9040 panel driver has been probed.\n");
- return 0;
-}
-
-static int ld9040_remove(struct spi_device *spi)
-{
- struct ld9040 *lcd = spi_get_drvdata(spi);
-
- ld9040_power(lcd, FB_BLANK_POWERDOWN);
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ld9040_suspend(struct device *dev)
-{
- struct ld9040 *lcd = dev_get_drvdata(dev);
-
- dev_dbg(dev, "lcd->power = %d\n", lcd->power);
-
- /*
- * when lcd panel is suspend, lcd panel becomes off
- * regardless of status.
- */
- return ld9040_power(lcd, FB_BLANK_POWERDOWN);
-}
-
-static int ld9040_resume(struct device *dev)
-{
- struct ld9040 *lcd = dev_get_drvdata(dev);
-
- lcd->power = FB_BLANK_POWERDOWN;
-
- return ld9040_power(lcd, FB_BLANK_UNBLANK);
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ld9040_pm_ops, ld9040_suspend, ld9040_resume);
-
-/* Power down all displays on reboot, poweroff or halt. */
-static void ld9040_shutdown(struct spi_device *spi)
-{
- struct ld9040 *lcd = spi_get_drvdata(spi);
-
- ld9040_power(lcd, FB_BLANK_POWERDOWN);
-}
-
-static struct spi_driver ld9040_driver = {
- .driver = {
- .name = "ld9040",
- .pm = &ld9040_pm_ops,
- },
- .probe = ld9040_probe,
- .remove = ld9040_remove,
- .shutdown = ld9040_shutdown,
-};
-
-module_spi_driver(ld9040_driver);
-
-MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
-MODULE_DESCRIPTION("ld9040 LCD Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/ld9040_gamma.h b/drivers/video/backlight/ld9040_gamma.h
deleted file mode 100644
index c5e586d97385..000000000000
--- a/drivers/video/backlight/ld9040_gamma.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Gamma level definitions.
- *
- * Copyright (c) 2011 Samsung Electronics
- * InKi Dae <inki.dae@samsung.com>
- * Donghwa Lee <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _LD9040_BRIGHTNESS_H
-#define _LD9040_BRIGHTNESS_H
-
-#define MAX_GAMMA_LEVEL 25
-#define GAMMA_TABLE_COUNT 21
-
-/* gamma value: 2.2 */
-static const unsigned int ld9040_22_300[] = {
- 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91,
- 0x00, 0xb2, 0xb4, 0xaa, 0xbb, 0x00, 0xac,
- 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3
-};
-
-static const unsigned int ld9040_22_290[] = {
- 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89,
- 0x00, 0xb7, 0xb6, 0xa8, 0xba, 0x00, 0xa4,
- 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa
-};
-
-static const unsigned int ld9040_22_280[] = {
- 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86,
- 0x00, 0xb8, 0xb5, 0xa8, 0xbc, 0x00, 0xa0,
- 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7
-};
-
-static const unsigned int ld9040_22_270[] = {
- 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84,
- 0x00, 0xb9, 0xb7, 0xa8, 0xbc, 0x00, 0x9d,
- 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4
-
-};
-static const unsigned int ld9040_22_260[] = {
- 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80,
- 0x00, 0xb8, 0xb6, 0xaa, 0xbc, 0x00, 0x9a,
- 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0
-};
-
-static const unsigned int ld9040_22_250[] = {
- 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d,
- 0x00, 0xb9, 0xb6, 0xaa, 0xbb, 0x00, 0x97,
- 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d
-};
-
-static const unsigned int ld9040_22_240[] = {
- 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a,
- 0x00, 0xb9, 0xb7, 0xaa, 0xbd, 0x00, 0x94,
- 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a
-};
-
-static const unsigned int ld9040_22_230[] = {
- 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77,
- 0x00, 0xb9, 0xb7, 0xab, 0xbe, 0x00, 0x90,
- 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97
-};
-
-static const unsigned int ld9040_22_220[] = {
- 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75,
- 0x00, 0xb9, 0xb8, 0xab, 0xbe, 0x00, 0x8e,
- 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94
-};
-
-static const unsigned int ld9040_22_210[] = {
- 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72,
- 0x00, 0xb8, 0xb8, 0xac, 0xbf, 0x00, 0x8a,
- 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91
-};
-
-static const unsigned int ld9040_22_200[] = {
- 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f,
- 0x00, 0xb8, 0xb8, 0xad, 0xc0, 0x00, 0x86,
- 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d
-};
-
-static const unsigned int ld9040_22_190[] = {
- 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c,
- 0x00, 0xb8, 0xb8, 0xae, 0xc1, 0x00, 0x82,
- 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89
-};
-
-static const unsigned int ld9040_22_180[] = {
- 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69,
- 0x00, 0xb8, 0xb9, 0xae, 0xc1, 0x00, 0x7f,
- 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85
-};
-
-static const unsigned int ld9040_22_170[] = {
- 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65,
- 0x00, 0xb7, 0xb8, 0xaf, 0xc3, 0x00, 0x7a,
- 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81
-};
-
-static const unsigned int ld9040_22_160[] = {
- 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62,
- 0x00, 0xb6, 0xba, 0xaf, 0xc3, 0x00, 0x76,
- 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e
-};
-
-static const unsigned int ld9040_22_150[] = {
- 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f,
- 0x00, 0xb5, 0xba, 0xb0, 0xc3, 0x00, 0x72,
- 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a
-};
-
-static const unsigned int ld9040_22_140[] = {
- 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b,
- 0x00, 0xb5, 0xba, 0xb1, 0xc4, 0x00, 0x6e,
- 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75
-};
-
-static const unsigned int ld9040_22_130[] = {
- 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57,
- 0x00, 0xb5, 0xbb, 0xb0, 0xc5, 0x00, 0x6a,
- 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70
-};
-
-static const unsigned int ld9040_22_120[] = {
- 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53,
- 0x00, 0xb5, 0xbb, 0xb3, 0xc6, 0x00, 0x65,
- 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c
-};
-
-static const unsigned int ld9040_22_110[] = {
- 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f,
- 0x00, 0xb4, 0xbb, 0xb3, 0xc7, 0x00, 0x60,
- 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67
-};
-
-static const unsigned int ld9040_22_100[] = {
- 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b,
- 0x00, 0xb3, 0xbc, 0xb4, 0xc7, 0x00, 0x5c,
- 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62
-};
-
-static const unsigned int ld9040_22_90[] = {
- 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46,
- 0x00, 0xb1, 0xbc, 0xb5, 0xc8, 0x00, 0x56,
- 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d
-};
-
-static const unsigned int ld9040_22_80[] = {
- 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41,
- 0x00, 0xb0, 0xbe, 0xb5, 0xc9, 0x00, 0x51,
- 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57
-};
-
-static const unsigned int ld9040_22_70[] = {
- 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c,
- 0x00, 0xaf, 0xbf, 0xb6, 0xcb, 0x00, 0x4b,
- 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52
-};
-
-static const unsigned int ld9040_22_50[] = {
- 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30,
- 0x00, 0xaf, 0xc0, 0xb8, 0xcd, 0x00, 0x3d,
- 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44
-};
-
-struct ld9040_gamma {
- unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
-};
-
-static struct ld9040_gamma gamma_table = {
- .gamma_22_table[0] = (unsigned int *)&ld9040_22_50,
- .gamma_22_table[1] = (unsigned int *)&ld9040_22_70,
- .gamma_22_table[2] = (unsigned int *)&ld9040_22_80,
- .gamma_22_table[3] = (unsigned int *)&ld9040_22_90,
- .gamma_22_table[4] = (unsigned int *)&ld9040_22_100,
- .gamma_22_table[5] = (unsigned int *)&ld9040_22_110,
- .gamma_22_table[6] = (unsigned int *)&ld9040_22_120,
- .gamma_22_table[7] = (unsigned int *)&ld9040_22_130,
- .gamma_22_table[8] = (unsigned int *)&ld9040_22_140,
- .gamma_22_table[9] = (unsigned int *)&ld9040_22_150,
- .gamma_22_table[10] = (unsigned int *)&ld9040_22_160,
- .gamma_22_table[11] = (unsigned int *)&ld9040_22_170,
- .gamma_22_table[12] = (unsigned int *)&ld9040_22_180,
- .gamma_22_table[13] = (unsigned int *)&ld9040_22_190,
- .gamma_22_table[14] = (unsigned int *)&ld9040_22_200,
- .gamma_22_table[15] = (unsigned int *)&ld9040_22_210,
- .gamma_22_table[16] = (unsigned int *)&ld9040_22_220,
- .gamma_22_table[17] = (unsigned int *)&ld9040_22_230,
- .gamma_22_table[18] = (unsigned int *)&ld9040_22_240,
- .gamma_22_table[19] = (unsigned int *)&ld9040_22_250,
- .gamma_22_table[20] = (unsigned int *)&ld9040_22_260,
- .gamma_22_table[21] = (unsigned int *)&ld9040_22_270,
- .gamma_22_table[22] = (unsigned int *)&ld9040_22_280,
- .gamma_22_table[23] = (unsigned int *)&ld9040_22_290,
- .gamma_22_table[24] = (unsigned int *)&ld9040_22_300,
-};
-
-#endif
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index cd50df5807ea..086611c7bc03 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -400,10 +400,8 @@ static int lm3639_remove(struct i2c_client *client)
regmap_write(pchip->regmap, REG_ENABLE, 0x00);
- if (&pchip->cdev_torch)
- led_classdev_unregister(&pchip->cdev_torch);
- if (&pchip->cdev_flash)
- led_classdev_unregister(&pchip->cdev_flash);
+ led_classdev_unregister(&pchip->cdev_torch);
+ led_classdev_unregister(&pchip->cdev_flash);
if (pchip->bled)
device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
return 0;
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index bdfcc0a71db1..678b27063198 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -28,10 +28,8 @@
struct pwm_bl_data {
struct pwm_device *pwm;
struct device *dev;
- unsigned int period;
unsigned int lth_brightness;
unsigned int *levels;
- bool enabled;
struct regulator *power_supply;
struct gpio_desc *enable_gpio;
unsigned int scale;
@@ -46,31 +44,35 @@ struct pwm_bl_data {
void (*exit)(struct device *);
};
-static void pwm_backlight_power_on(struct pwm_bl_data *pb, int brightness)
+static void pwm_backlight_power_on(struct pwm_bl_data *pb)
{
+ struct pwm_state state;
int err;
- if (pb->enabled)
+ pwm_get_state(pb->pwm, &state);
+ if (state.enabled)
return;
err = regulator_enable(pb->power_supply);
if (err < 0)
dev_err(pb->dev, "failed to enable power supply\n");
- pwm_enable(pb->pwm);
+ state.enabled = true;
+ pwm_apply_state(pb->pwm, &state);
if (pb->post_pwm_on_delay)
msleep(pb->post_pwm_on_delay);
if (pb->enable_gpio)
gpiod_set_value_cansleep(pb->enable_gpio, 1);
-
- pb->enabled = true;
}
static void pwm_backlight_power_off(struct pwm_bl_data *pb)
{
- if (!pb->enabled)
+ struct pwm_state state;
+
+ pwm_get_state(pb->pwm, &state);
+ if (!state.enabled)
return;
if (pb->enable_gpio)
@@ -79,24 +81,27 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
if (pb->pwm_off_delay)
msleep(pb->pwm_off_delay);
- pwm_config(pb->pwm, 0, pb->period);
- pwm_disable(pb->pwm);
+ state.enabled = false;
+ state.duty_cycle = 0;
+ pwm_apply_state(pb->pwm, &state);
regulator_disable(pb->power_supply);
- pb->enabled = false;
}
static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
{
unsigned int lth = pb->lth_brightness;
+ struct pwm_state state;
u64 duty_cycle;
+ pwm_get_state(pb->pwm, &state);
+
if (pb->levels)
duty_cycle = pb->levels[brightness];
else
duty_cycle = brightness;
- duty_cycle *= pb->period - lth;
+ duty_cycle *= state.period - lth;
do_div(duty_cycle, pb->scale);
return duty_cycle + lth;
@@ -106,7 +111,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
{
struct pwm_bl_data *pb = bl_get_data(bl);
int brightness = bl->props.brightness;
- int duty_cycle;
+ struct pwm_state state;
if (bl->props.power != FB_BLANK_UNBLANK ||
bl->props.fb_blank != FB_BLANK_UNBLANK ||
@@ -117,9 +122,10 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
brightness = pb->notify(pb->dev, brightness);
if (brightness > 0) {
- duty_cycle = compute_duty_cycle(pb, brightness);
- pwm_config(pb->pwm, duty_cycle, pb->period);
- pwm_backlight_power_on(pb, brightness);
+ pwm_get_state(pb->pwm, &state);
+ state.duty_cycle = compute_duty_cycle(pb, brightness);
+ pwm_apply_state(pb->pwm, &state);
+ pwm_backlight_power_on(pb);
} else
pwm_backlight_power_off(pb);
@@ -447,7 +453,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct pwm_bl_data *pb;
struct pwm_state state;
- struct pwm_args pargs;
unsigned int i;
int ret;
@@ -478,7 +483,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->check_fb = data->check_fb;
pb->exit = data->exit;
pb->dev = &pdev->dev;
- pb->enabled = false;
pb->post_pwm_on_delay = data->post_pwm_on_delay;
pb->pwm_off_delay = data->pwm_off_delay;
@@ -539,10 +543,26 @@ static int pwm_backlight_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "got pwm for backlight\n");
- if (!data->levels) {
- /* Get the PWM period (in nanoseconds) */
- pwm_get_state(pb->pwm, &state);
+ /* Sync up PWM state. */
+ pwm_init_state(pb->pwm, &state);
+ /*
+ * The DT case will set the pwm_period_ns field to 0 and store the
+ * period, parsed from the DT, in the PWM device. For the non-DT case,
+ * set the period from platform data if it has not already been set
+ * via the PWM lookup table.
+ */
+ if (!state.period && (data->pwm_period_ns > 0))
+ state.period = data->pwm_period_ns;
+
+ ret = pwm_apply_state(pb->pwm, &state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
+ ret);
+ goto err_alloc;
+ }
+
+ if (!data->levels) {
ret = pwm_backlight_brightness_default(&pdev->dev, data,
state.period);
if (ret < 0) {
@@ -559,24 +579,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->levels = data->levels;
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(pb->pwm);
-
- /*
- * The DT case will set the pwm_period_ns field to 0 and store the
- * period, parsed from the DT, in the PWM device. For the non-DT case,
- * set the period from platform data if it has not already been set
- * via the PWM lookup table.
- */
- pwm_get_args(pb->pwm, &pargs);
- pb->period = pargs.period;
- if (!pb->period && (data->pwm_period_ns > 0))
- pb->period = data->pwm_period_ns;
-
- pb->lth_brightness = data->lth_brightness * (pb->period / pb->scale);
+ pb->lth_brightness = data->lth_brightness * (state.period / pb->scale);
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
deleted file mode 100644
index 3c4a22a3063a..000000000000
--- a/drivers/video/backlight/s6e63m0.c
+++ /dev/null
@@ -1,857 +0,0 @@
-/*
- * S6E63M0 AMOLED LCD panel driver.
- *
- * Author: InKi Dae <inki.dae@samsung.com>
- *
- * Derived from drivers/video/omap/lcd-apollon.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/lcd.h>
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-#include <linux/wait.h>
-
-#include "s6e63m0_gamma.h"
-
-#define SLEEPMSEC 0x1000
-#define ENDDEF 0x2000
-#define DEFMASK 0xFF00
-#define COMMAND_ONLY 0xFE
-#define DATA_ONLY 0xFF
-
-#define MIN_BRIGHTNESS 0
-#define MAX_BRIGHTNESS 10
-
-struct s6e63m0 {
- struct device *dev;
- struct spi_device *spi;
- unsigned int power;
- unsigned int current_brightness;
- unsigned int gamma_mode;
- unsigned int gamma_table_count;
- struct lcd_device *ld;
- struct backlight_device *bd;
- struct lcd_platform_data *lcd_pd;
-};
-
-static const unsigned short seq_panel_condition_set[] = {
- 0xF8, 0x01,
- DATA_ONLY, 0x27,
- DATA_ONLY, 0x27,
- DATA_ONLY, 0x07,
- DATA_ONLY, 0x07,
- DATA_ONLY, 0x54,
- DATA_ONLY, 0x9f,
- DATA_ONLY, 0x63,
- DATA_ONLY, 0x86,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0x0d,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_display_condition_set[] = {
- 0xf2, 0x02,
- DATA_ONLY, 0x03,
- DATA_ONLY, 0x1c,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x10,
-
- 0xf7, 0x03,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_gamma_setting[] = {
- 0xfa, 0x00,
- DATA_ONLY, 0x18,
- DATA_ONLY, 0x08,
- DATA_ONLY, 0x24,
- DATA_ONLY, 0x64,
- DATA_ONLY, 0x56,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0xb6,
- DATA_ONLY, 0xba,
- DATA_ONLY, 0xa8,
- DATA_ONLY, 0xac,
- DATA_ONLY, 0xb1,
- DATA_ONLY, 0x9d,
- DATA_ONLY, 0xc1,
- DATA_ONLY, 0xc1,
- DATA_ONLY, 0xb7,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x9c,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x9f,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xd6,
-
- 0xfa, 0x01,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_etc_condition_set[] = {
- 0xf6, 0x00,
- DATA_ONLY, 0x8c,
- DATA_ONLY, 0x07,
-
- 0xb3, 0xc,
-
- 0xb5, 0x2c,
- DATA_ONLY, 0x12,
- DATA_ONLY, 0x0c,
- DATA_ONLY, 0x0a,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x0e,
- DATA_ONLY, 0x17,
- DATA_ONLY, 0x13,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x2a,
- DATA_ONLY, 0x24,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1b,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x17,
-
- DATA_ONLY, 0x2b,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x3a,
- DATA_ONLY, 0x34,
- DATA_ONLY, 0x30,
- DATA_ONLY, 0x2c,
- DATA_ONLY, 0x29,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x25,
- DATA_ONLY, 0x23,
- DATA_ONLY, 0x21,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x1e,
- DATA_ONLY, 0x1e,
-
- 0xb6, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x11,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
-
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
-
- 0xb7, 0x2c,
- DATA_ONLY, 0x12,
- DATA_ONLY, 0x0c,
- DATA_ONLY, 0x0a,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x0e,
- DATA_ONLY, 0x17,
- DATA_ONLY, 0x13,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x2a,
- DATA_ONLY, 0x24,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1b,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x17,
-
- DATA_ONLY, 0x2b,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x3a,
- DATA_ONLY, 0x34,
- DATA_ONLY, 0x30,
- DATA_ONLY, 0x2c,
- DATA_ONLY, 0x29,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x25,
- DATA_ONLY, 0x23,
- DATA_ONLY, 0x21,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x1e,
- DATA_ONLY, 0x1e,
-
- 0xb8, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x11,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
-
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
-
- 0xb9, 0x2c,
- DATA_ONLY, 0x12,
- DATA_ONLY, 0x0c,
- DATA_ONLY, 0x0a,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x0e,
- DATA_ONLY, 0x17,
- DATA_ONLY, 0x13,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x2a,
- DATA_ONLY, 0x24,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1b,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x17,
-
- DATA_ONLY, 0x2b,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x3a,
- DATA_ONLY, 0x34,
- DATA_ONLY, 0x30,
- DATA_ONLY, 0x2c,
- DATA_ONLY, 0x29,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x25,
- DATA_ONLY, 0x23,
- DATA_ONLY, 0x21,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x1e,
- DATA_ONLY, 0x1e,
-
- 0xba, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x11,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
-
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
-
- 0xc1, 0x4d,
- DATA_ONLY, 0x96,
- DATA_ONLY, 0x1d,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x01,
- DATA_ONLY, 0xdf,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- DATA_ONLY, 0x06,
- DATA_ONLY, 0x09,
- DATA_ONLY, 0x0d,
- DATA_ONLY, 0x0f,
- DATA_ONLY, 0x12,
- DATA_ONLY, 0x15,
- DATA_ONLY, 0x18,
-
- 0xb2, 0x10,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x0b,
- DATA_ONLY, 0x05,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_acl_on[] = {
- /* ACL on */
- 0xc0, 0x01,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_acl_off[] = {
- /* ACL off */
- 0xc0, 0x00,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_elvss_on[] = {
- /* ELVSS on */
- 0xb1, 0x0b,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_elvss_off[] = {
- /* ELVSS off */
- 0xb1, 0x0a,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_stand_by_off[] = {
- 0x11, COMMAND_ONLY,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_stand_by_on[] = {
- 0x10, COMMAND_ONLY,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_display_on[] = {
- 0x29, COMMAND_ONLY,
-
- ENDDEF, 0x0000
-};
-
-
-static int s6e63m0_spi_write_byte(struct s6e63m0 *lcd, int addr, int data)
-{
- u16 buf[1];
- struct spi_message msg;
-
- struct spi_transfer xfer = {
- .len = 2,
- .tx_buf = buf,
- };
-
- buf[0] = (addr << 8) | data;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- return spi_sync(lcd->spi, &msg);
-}
-
-static int s6e63m0_spi_write(struct s6e63m0 *lcd, unsigned char address,
- unsigned char command)
-{
- int ret = 0;
-
- if (address != DATA_ONLY)
- ret = s6e63m0_spi_write_byte(lcd, 0x0, address);
- if (command != COMMAND_ONLY)
- ret = s6e63m0_spi_write_byte(lcd, 0x1, command);
-
- return ret;
-}
-
-static int s6e63m0_panel_send_sequence(struct s6e63m0 *lcd,
- const unsigned short *wbuf)
-{
- int ret = 0, i = 0;
-
- while ((wbuf[i] & DEFMASK) != ENDDEF) {
- if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
- ret = s6e63m0_spi_write(lcd, wbuf[i], wbuf[i+1]);
- if (ret)
- break;
- } else {
- msleep(wbuf[i+1]);
- }
- i += 2;
- }
-
- return ret;
-}
-
-static int _s6e63m0_gamma_ctl(struct s6e63m0 *lcd, const unsigned int *gamma)
-{
- unsigned int i = 0;
- int ret = 0;
-
- /* disable gamma table updating. */
- ret = s6e63m0_spi_write(lcd, 0xfa, 0x00);
- if (ret) {
- dev_err(lcd->dev, "failed to disable gamma table updating.\n");
- goto gamma_err;
- }
-
- for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) {
- ret = s6e63m0_spi_write(lcd, DATA_ONLY, gamma[i]);
- if (ret) {
- dev_err(lcd->dev, "failed to set gamma table.\n");
- goto gamma_err;
- }
- }
-
- /* update gamma table. */
- ret = s6e63m0_spi_write(lcd, 0xfa, 0x01);
- if (ret)
- dev_err(lcd->dev, "failed to update gamma table.\n");
-
-gamma_err:
- return ret;
-}
-
-static int s6e63m0_gamma_ctl(struct s6e63m0 *lcd, int gamma)
-{
- int ret = 0;
-
- ret = _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
-
- return ret;
-}
-
-
-static int s6e63m0_ldi_init(struct s6e63m0 *lcd)
-{
- int ret, i;
- const unsigned short *init_seq[] = {
- seq_panel_condition_set,
- seq_display_condition_set,
- seq_gamma_setting,
- seq_etc_condition_set,
- seq_acl_on,
- seq_elvss_on,
- };
-
- for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
- ret = s6e63m0_panel_send_sequence(lcd, init_seq[i]);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-static int s6e63m0_ldi_enable(struct s6e63m0 *lcd)
-{
- int ret = 0, i;
- const unsigned short *enable_seq[] = {
- seq_stand_by_off,
- seq_display_on,
- };
-
- for (i = 0; i < ARRAY_SIZE(enable_seq); i++) {
- ret = s6e63m0_panel_send_sequence(lcd, enable_seq[i]);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-static int s6e63m0_ldi_disable(struct s6e63m0 *lcd)
-{
- int ret;
-
- ret = s6e63m0_panel_send_sequence(lcd, seq_stand_by_on);
-
- return ret;
-}
-
-static int s6e63m0_power_is_on(int power)
-{
- return power <= FB_BLANK_NORMAL;
-}
-
-static int s6e63m0_power_on(struct s6e63m0 *lcd)
-{
- int ret = 0;
- struct lcd_platform_data *pd;
- struct backlight_device *bd;
-
- pd = lcd->lcd_pd;
- bd = lcd->bd;
-
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EINVAL;
- }
-
- pd->power_on(lcd->ld, 1);
- msleep(pd->power_on_delay);
-
- if (!pd->reset) {
- dev_err(lcd->dev, "reset is NULL.\n");
- return -EINVAL;
- }
-
- pd->reset(lcd->ld);
- msleep(pd->reset_delay);
-
- ret = s6e63m0_ldi_init(lcd);
- if (ret) {
- dev_err(lcd->dev, "failed to initialize ldi.\n");
- return ret;
- }
-
- ret = s6e63m0_ldi_enable(lcd);
- if (ret) {
- dev_err(lcd->dev, "failed to enable ldi.\n");
- return ret;
- }
-
- /* set brightness to current value after power on or resume. */
- ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness);
- if (ret) {
- dev_err(lcd->dev, "lcd gamma setting failed.\n");
- return ret;
- }
-
- return 0;
-}
-
-static int s6e63m0_power_off(struct s6e63m0 *lcd)
-{
- int ret;
- struct lcd_platform_data *pd;
-
- pd = lcd->lcd_pd;
-
- ret = s6e63m0_ldi_disable(lcd);
- if (ret) {
- dev_err(lcd->dev, "lcd setting failed.\n");
- return -EIO;
- }
-
- msleep(pd->power_off_delay);
-
- pd->power_on(lcd->ld, 0);
-
- return 0;
-}
-
-static int s6e63m0_power(struct s6e63m0 *lcd, int power)
-{
- int ret = 0;
-
- if (s6e63m0_power_is_on(power) && !s6e63m0_power_is_on(lcd->power))
- ret = s6e63m0_power_on(lcd);
- else if (!s6e63m0_power_is_on(power) && s6e63m0_power_is_on(lcd->power))
- ret = s6e63m0_power_off(lcd);
-
- if (!ret)
- lcd->power = power;
-
- return ret;
-}
-
-static int s6e63m0_set_power(struct lcd_device *ld, int power)
-{
- struct s6e63m0 *lcd = lcd_get_data(ld);
-
- if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
- power != FB_BLANK_NORMAL) {
- dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
- return -EINVAL;
- }
-
- return s6e63m0_power(lcd, power);
-}
-
-static int s6e63m0_get_power(struct lcd_device *ld)
-{
- struct s6e63m0 *lcd = lcd_get_data(ld);
-
- return lcd->power;
-}
-
-static int s6e63m0_set_brightness(struct backlight_device *bd)
-{
- int ret = 0, brightness = bd->props.brightness;
- struct s6e63m0 *lcd = bl_get_data(bd);
-
- if (brightness < MIN_BRIGHTNESS ||
- brightness > bd->props.max_brightness) {
- dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
- MIN_BRIGHTNESS, MAX_BRIGHTNESS);
- return -EINVAL;
- }
-
- ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness);
- if (ret) {
- dev_err(&bd->dev, "lcd brightness setting failed.\n");
- return -EIO;
- }
-
- return ret;
-}
-
-static struct lcd_ops s6e63m0_lcd_ops = {
- .set_power = s6e63m0_set_power,
- .get_power = s6e63m0_get_power,
-};
-
-static const struct backlight_ops s6e63m0_backlight_ops = {
- .update_status = s6e63m0_set_brightness,
-};
-
-static ssize_t s6e63m0_sysfs_show_gamma_mode(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
- char temp[10];
-
- switch (lcd->gamma_mode) {
- case 0:
- sprintf(temp, "2.2 mode\n");
- strcat(buf, temp);
- break;
- case 1:
- sprintf(temp, "1.9 mode\n");
- strcat(buf, temp);
- break;
- case 2:
- sprintf(temp, "1.7 mode\n");
- strcat(buf, temp);
- break;
- default:
- dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7)n");
- break;
- }
-
- return strlen(buf);
-}
-
-static ssize_t s6e63m0_sysfs_store_gamma_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
- struct backlight_device *bd = NULL;
- int brightness, rc;
-
- rc = kstrtouint(buf, 0, &lcd->gamma_mode);
- if (rc < 0)
- return rc;
-
- bd = lcd->bd;
-
- brightness = bd->props.brightness;
-
- switch (lcd->gamma_mode) {
- case 0:
- _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]);
- break;
- case 1:
- _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_19_table[brightness]);
- break;
- case 2:
- _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_17_table[brightness]);
- break;
- default:
- dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7\n");
- _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]);
- break;
- }
- return len;
-}
-
-static DEVICE_ATTR(gamma_mode, 0644,
- s6e63m0_sysfs_show_gamma_mode, s6e63m0_sysfs_store_gamma_mode);
-
-static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
- char temp[3];
-
- sprintf(temp, "%u\n", lcd->gamma_table_count);
- strcpy(buf, temp);
-
- return strlen(buf);
-}
-static DEVICE_ATTR(gamma_table, 0444,
- s6e63m0_sysfs_show_gamma_table, NULL);
-
-static int s6e63m0_probe(struct spi_device *spi)
-{
- int ret = 0;
- struct s6e63m0 *lcd = NULL;
- struct lcd_device *ld = NULL;
- struct backlight_device *bd = NULL;
- struct backlight_properties props;
-
- lcd = devm_kzalloc(&spi->dev, sizeof(struct s6e63m0), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
-
- /* s6e63m0 lcd panel uses 3-wire 9bits SPI Mode. */
- spi->bits_per_word = 9;
-
- ret = spi_setup(spi);
- if (ret < 0) {
- dev_err(&spi->dev, "spi setup failed.\n");
- return ret;
- }
-
- lcd->spi = spi;
- lcd->dev = &spi->dev;
-
- lcd->lcd_pd = dev_get_platdata(&spi->dev);
- if (!lcd->lcd_pd) {
- dev_err(&spi->dev, "platform data is NULL.\n");
- return -EINVAL;
- }
-
- ld = devm_lcd_device_register(&spi->dev, "s6e63m0", &spi->dev, lcd,
- &s6e63m0_lcd_ops);
- if (IS_ERR(ld))
- return PTR_ERR(ld);
-
- lcd->ld = ld;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = MAX_BRIGHTNESS;
-
- bd = devm_backlight_device_register(&spi->dev, "s6e63m0bl-bl",
- &spi->dev, lcd, &s6e63m0_backlight_ops,
- &props);
- if (IS_ERR(bd))
- return PTR_ERR(bd);
-
- bd->props.brightness = MAX_BRIGHTNESS;
- lcd->bd = bd;
-
- /*
- * it gets gamma table count available so it gets user
- * know that.
- */
- lcd->gamma_table_count =
- sizeof(gamma_table) / (MAX_GAMMA_LEVEL * sizeof(int *));
-
- ret = device_create_file(&(spi->dev), &dev_attr_gamma_mode);
- if (ret < 0)
- dev_err(&(spi->dev), "failed to add sysfs entries\n");
-
- ret = device_create_file(&(spi->dev), &dev_attr_gamma_table);
- if (ret < 0)
- dev_err(&(spi->dev), "failed to add sysfs entries\n");
-
- /*
- * if lcd panel was on from bootloader like u-boot then
- * do not lcd on.
- */
- if (!lcd->lcd_pd->lcd_enabled) {
- /*
- * if lcd panel was off from bootloader then
- * current lcd status is powerdown and then
- * it enables lcd panel.
- */
- lcd->power = FB_BLANK_POWERDOWN;
-
- s6e63m0_power(lcd, FB_BLANK_UNBLANK);
- } else {
- lcd->power = FB_BLANK_UNBLANK;
- }
-
- spi_set_drvdata(spi, lcd);
-
- dev_info(&spi->dev, "s6e63m0 panel driver has been probed.\n");
-
- return 0;
-}
-
-static int s6e63m0_remove(struct spi_device *spi)
-{
- struct s6e63m0 *lcd = spi_get_drvdata(spi);
-
- s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
- device_remove_file(&spi->dev, &dev_attr_gamma_table);
- device_remove_file(&spi->dev, &dev_attr_gamma_mode);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int s6e63m0_suspend(struct device *dev)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
-
- dev_dbg(dev, "lcd->power = %d\n", lcd->power);
-
- /*
- * when lcd panel is suspend, lcd panel becomes off
- * regardless of status.
- */
- return s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
-}
-
-static int s6e63m0_resume(struct device *dev)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
-
- lcd->power = FB_BLANK_POWERDOWN;
-
- return s6e63m0_power(lcd, FB_BLANK_UNBLANK);
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(s6e63m0_pm_ops, s6e63m0_suspend, s6e63m0_resume);
-
-/* Power down all displays on reboot, poweroff or halt. */
-static void s6e63m0_shutdown(struct spi_device *spi)
-{
- struct s6e63m0 *lcd = spi_get_drvdata(spi);
-
- s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
-}
-
-static struct spi_driver s6e63m0_driver = {
- .driver = {
- .name = "s6e63m0",
- .pm = &s6e63m0_pm_ops,
- },
- .probe = s6e63m0_probe,
- .remove = s6e63m0_remove,
- .shutdown = s6e63m0_shutdown,
-};
-
-module_spi_driver(s6e63m0_driver);
-
-MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("S6E63M0 LCD Driver");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/video/backlight/s6e63m0_gamma.h b/drivers/video/backlight/s6e63m0_gamma.h
deleted file mode 100644
index 2c44bdb0696b..000000000000
--- a/drivers/video/backlight/s6e63m0_gamma.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/* linux/drivers/video/samsung/s6e63m0_brightness.h
- *
- * Gamma level definitions.
- *
- * Copyright (c) 2009 Samsung Electronics
- * InKi Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _S6E63M0_BRIGHTNESS_H
-#define _S6E63M0_BRIGHTNESS_H
-
-#define MAX_GAMMA_LEVEL 11
-#define GAMMA_TABLE_COUNT 21
-
-/* gamma value: 2.2 */
-static const unsigned int s6e63m0_22_300[] = {
- 0x18, 0x08, 0x24, 0x5f, 0x50, 0x2d, 0xB6,
- 0xB9, 0xA7, 0xAd, 0xB1, 0x9f, 0xbe, 0xC0,
- 0xB5, 0x00, 0xa0, 0x00, 0xa4, 0x00, 0xdb
-};
-
-static const unsigned int s6e63m0_22_280[] = {
- 0x18, 0x08, 0x24, 0x64, 0x56, 0x33, 0xB6,
- 0xBA, 0xA8, 0xAC, 0xB1, 0x9D, 0xC1, 0xC1,
- 0xB7, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6
-};
-
-static const unsigned int s6e63m0_22_260[] = {
- 0x18, 0x08, 0x24, 0x66, 0x58, 0x34, 0xB6,
- 0xBA, 0xA7, 0xAF, 0xB3, 0xA0, 0xC1, 0xC2,
- 0xB7, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1
-
-};
-
-static const unsigned int s6e63m0_22_240[] = {
- 0x18, 0x08, 0x24, 0x62, 0x54, 0x30, 0xB9,
- 0xBB, 0xA9, 0xB0, 0xB3, 0xA1, 0xC1, 0xC3,
- 0xB7, 0x00, 0x91, 0x00, 0x95, 0x00, 0xDA
-
-};
-static const unsigned int s6e63m0_22_220[] = {
- 0x18, 0x08, 0x24, 0x63, 0x53, 0x31, 0xB8,
- 0xBC, 0xA9, 0xB0, 0xB5, 0xA2, 0xC4, 0xC4,
- 0xB8, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2
-};
-
-static const unsigned int s6e63m0_22_200[] = {
- 0x18, 0x08, 0x24, 0x66, 0x55, 0x34, 0xBA,
- 0xBD, 0xAB, 0xB1, 0xB5, 0xA3, 0xC5, 0xC6,
- 0xB9, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA
-};
-
-static const unsigned int s6e63m0_22_170[] = {
- 0x18, 0x08, 0x24, 0x69, 0x54, 0x37, 0xBB,
- 0xBE, 0xAC, 0xB4, 0xB7, 0xA6, 0xC7, 0xC8,
- 0xBC, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB
-};
-
-static const unsigned int s6e63m0_22_140[] = {
- 0x18, 0x08, 0x24, 0x6C, 0x54, 0x3A, 0xBC,
- 0xBF, 0xAC, 0xB7, 0xBB, 0xA9, 0xC9, 0xC9,
- 0xBE, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E
-};
-
-static const unsigned int s6e63m0_22_110[] = {
- 0x18, 0x08, 0x24, 0x70, 0x51, 0x3E, 0xBF,
- 0xC1, 0xAF, 0xB9, 0xBC, 0xAB, 0xCC, 0xCC,
- 0xC2, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D
-};
-
-static const unsigned int s6e63m0_22_90[] = {
- 0x18, 0x08, 0x24, 0x73, 0x4A, 0x3D, 0xC0,
- 0xC2, 0xB1, 0xBB, 0xBE, 0xAC, 0xCE, 0xCF,
- 0xC5, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82
-};
-
-static const unsigned int s6e63m0_22_30[] = {
- 0x18, 0x08, 0x24, 0x78, 0xEC, 0x3D, 0xC8,
- 0xC2, 0xB6, 0xC4, 0xC7, 0xB6, 0xD5, 0xD7,
- 0xCC, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51
-};
-
-/* gamma value: 1.9 */
-static const unsigned int s6e63m0_19_300[] = {
- 0x18, 0x08, 0x24, 0x61, 0x5F, 0x39, 0xBA,
- 0xBD, 0xAD, 0xB1, 0xB6, 0xA5, 0xC4, 0xC5,
- 0xBC, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB
-};
-
-static const unsigned int s6e63m0_19_280[] = {
- 0x18, 0x08, 0x24, 0x61, 0x60, 0x39, 0xBB,
- 0xBE, 0xAD, 0xB2, 0xB6, 0xA6, 0xC5, 0xC7,
- 0xBD, 0x00, 0x9B, 0x00, 0x9E, 0x00, 0xD5
-};
-
-static const unsigned int s6e63m0_19_260[] = {
- 0x18, 0x08, 0x24, 0x63, 0x61, 0x3B, 0xBA,
- 0xBE, 0xAC, 0xB3, 0xB8, 0xA7, 0xC6, 0xC8,
- 0xBD, 0x00, 0x96, 0x00, 0x98, 0x00, 0xCF
-};
-
-static const unsigned int s6e63m0_19_240[] = {
- 0x18, 0x08, 0x24, 0x67, 0x64, 0x3F, 0xBB,
- 0xBE, 0xAD, 0xB3, 0xB9, 0xA7, 0xC8, 0xC9,
- 0xBE, 0x00, 0x90, 0x00, 0x92, 0x00, 0xC8
-};
-
-static const unsigned int s6e63m0_19_220[] = {
- 0x18, 0x08, 0x24, 0x68, 0x64, 0x40, 0xBC,
- 0xBF, 0xAF, 0xB4, 0xBA, 0xA9, 0xC8, 0xCA,
- 0xBE, 0x00, 0x8B, 0x00, 0x8C, 0x00, 0xC0
-};
-
-static const unsigned int s6e63m0_19_200[] = {
- 0x18, 0x08, 0x24, 0x68, 0x64, 0x3F, 0xBE,
- 0xC0, 0xB0, 0xB6, 0xBB, 0xAB, 0xC8, 0xCB,
- 0xBF, 0x00, 0x85, 0x00, 0x86, 0x00, 0xB8
-};
-
-static const unsigned int s6e63m0_19_170[] = {
- 0x18, 0x08, 0x24, 0x69, 0x64, 0x40, 0xBF,
- 0xC1, 0xB0, 0xB9, 0xBE, 0xAD, 0xCB, 0xCD,
- 0xC2, 0x00, 0x7A, 0x00, 0x7B, 0x00, 0xAA
-};
-
-static const unsigned int s6e63m0_19_140[] = {
- 0x18, 0x08, 0x24, 0x6E, 0x65, 0x45, 0xC0,
- 0xC3, 0xB2, 0xBA, 0xBE, 0xAE, 0xCD, 0xD0,
- 0xC4, 0x00, 0x70, 0x00, 0x70, 0x00, 0x9C
-};
-
-static const unsigned int s6e63m0_19_110[] = {
- 0x18, 0x08, 0x24, 0x6F, 0x65, 0x46, 0xC2,
- 0xC4, 0xB3, 0xBF, 0xC2, 0xB2, 0xCF, 0xD1,
- 0xC6, 0x00, 0x64, 0x00, 0x64, 0x00, 0x8D
-};
-
-static const unsigned int s6e63m0_19_90[] = {
- 0x18, 0x08, 0x24, 0x74, 0x60, 0x4A, 0xC3,
- 0xC6, 0xB5, 0xBF, 0xC3, 0xB2, 0xD2, 0xD3,
- 0xC8, 0x00, 0x5B, 0x00, 0x5B, 0x00, 0x81
-};
-
-static const unsigned int s6e63m0_19_30[] = {
- 0x18, 0x08, 0x24, 0x84, 0x45, 0x4F, 0xCA,
- 0xCB, 0xBC, 0xC9, 0xCB, 0xBC, 0xDA, 0xDA,
- 0xD0, 0x00, 0x35, 0x00, 0x34, 0x00, 0x4E
-};
-
-/* gamma value: 1.7 */
-static const unsigned int s6e63m0_17_300[] = {
- 0x18, 0x08, 0x24, 0x70, 0x70, 0x4F, 0xBF,
- 0xC2, 0xB2, 0xB8, 0xBC, 0xAC, 0xCB, 0xCD,
- 0xC3, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB
-};
-
-static const unsigned int s6e63m0_17_280[] = {
- 0x18, 0x08, 0x24, 0x71, 0x71, 0x50, 0xBF,
- 0xC2, 0xB2, 0xBA, 0xBE, 0xAE, 0xCB, 0xCD,
- 0xC3, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6
-};
-
-static const unsigned int s6e63m0_17_260[] = {
- 0x18, 0x08, 0x24, 0x72, 0x72, 0x50, 0xC0,
- 0xC3, 0xB4, 0xB9, 0xBE, 0xAE, 0xCC, 0xCF,
- 0xC4, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1
-};
-
-static const unsigned int s6e63m0_17_240[] = {
- 0x18, 0x08, 0x24, 0x71, 0x72, 0x4F, 0xC2,
- 0xC4, 0xB5, 0xBB, 0xBF, 0xB0, 0xCC, 0xCF,
- 0xC3, 0x00, 0x91, 0x00, 0x95, 0x00, 0xCA
-};
-
-static const unsigned int s6e63m0_17_220[] = {
- 0x18, 0x08, 0x24, 0x71, 0x73, 0x4F, 0xC2,
- 0xC5, 0xB5, 0xBD, 0xC0, 0xB2, 0xCD, 0xD1,
- 0xC5, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2
-};
-
-static const unsigned int s6e63m0_17_200[] = {
- 0x18, 0x08, 0x24, 0x72, 0x75, 0x51, 0xC2,
- 0xC6, 0xB5, 0xBF, 0xC1, 0xB3, 0xCE, 0xD1,
- 0xC6, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA
-};
-
-static const unsigned int s6e63m0_17_170[] = {
- 0x18, 0x08, 0x24, 0x75, 0x77, 0x54, 0xC3,
- 0xC7, 0xB7, 0xC0, 0xC3, 0xB4, 0xD1, 0xD3,
- 0xC9, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB
-};
-
-static const unsigned int s6e63m0_17_140[] = {
- 0x18, 0x08, 0x24, 0x7B, 0x77, 0x58, 0xC3,
- 0xC8, 0xB8, 0xC2, 0xC6, 0xB6, 0xD3, 0xD4,
- 0xCA, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E
-};
-
-static const unsigned int s6e63m0_17_110[] = {
- 0x18, 0x08, 0x24, 0x81, 0x7B, 0x5D, 0xC6,
- 0xCA, 0xBB, 0xC3, 0xC7, 0xB8, 0xD6, 0xD8,
- 0xCD, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D
-};
-
-static const unsigned int s6e63m0_17_90[] = {
- 0x18, 0x08, 0x24, 0x82, 0x7A, 0x5B, 0xC8,
- 0xCB, 0xBD, 0xC5, 0xCA, 0xBA, 0xD6, 0xD8,
- 0xCE, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82
-};
-
-static const unsigned int s6e63m0_17_30[] = {
- 0x18, 0x08, 0x24, 0x8F, 0x73, 0x63, 0xD1,
- 0xD0, 0xC5, 0xCC, 0xD1, 0xC2, 0xDE, 0xE0,
- 0xD6, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51
-};
-
-struct s6e63m0_gamma {
- unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
- unsigned int *gamma_19_table[MAX_GAMMA_LEVEL];
- unsigned int *gamma_17_table[MAX_GAMMA_LEVEL];
-};
-
-static struct s6e63m0_gamma gamma_table = {
- .gamma_22_table[0] = (unsigned int *)&s6e63m0_22_30,
- .gamma_22_table[1] = (unsigned int *)&s6e63m0_22_90,
- .gamma_22_table[2] = (unsigned int *)&s6e63m0_22_110,
- .gamma_22_table[3] = (unsigned int *)&s6e63m0_22_140,
- .gamma_22_table[4] = (unsigned int *)&s6e63m0_22_170,
- .gamma_22_table[5] = (unsigned int *)&s6e63m0_22_200,
- .gamma_22_table[6] = (unsigned int *)&s6e63m0_22_220,
- .gamma_22_table[7] = (unsigned int *)&s6e63m0_22_240,
- .gamma_22_table[8] = (unsigned int *)&s6e63m0_22_260,
- .gamma_22_table[9] = (unsigned int *)&s6e63m0_22_280,
- .gamma_22_table[10] = (unsigned int *)&s6e63m0_22_300,
-
- .gamma_19_table[0] = (unsigned int *)&s6e63m0_19_30,
- .gamma_19_table[1] = (unsigned int *)&s6e63m0_19_90,
- .gamma_19_table[2] = (unsigned int *)&s6e63m0_19_110,
- .gamma_19_table[3] = (unsigned int *)&s6e63m0_19_140,
- .gamma_19_table[4] = (unsigned int *)&s6e63m0_19_170,
- .gamma_19_table[5] = (unsigned int *)&s6e63m0_19_200,
- .gamma_19_table[6] = (unsigned int *)&s6e63m0_19_220,
- .gamma_19_table[7] = (unsigned int *)&s6e63m0_19_240,
- .gamma_19_table[8] = (unsigned int *)&s6e63m0_19_260,
- .gamma_19_table[9] = (unsigned int *)&s6e63m0_19_280,
- .gamma_19_table[10] = (unsigned int *)&s6e63m0_19_300,
-
- .gamma_17_table[0] = (unsigned int *)&s6e63m0_17_30,
- .gamma_17_table[1] = (unsigned int *)&s6e63m0_17_90,
- .gamma_17_table[2] = (unsigned int *)&s6e63m0_17_110,
- .gamma_17_table[3] = (unsigned int *)&s6e63m0_17_140,
- .gamma_17_table[4] = (unsigned int *)&s6e63m0_17_170,
- .gamma_17_table[5] = (unsigned int *)&s6e63m0_17_200,
- .gamma_17_table[6] = (unsigned int *)&s6e63m0_17_220,
- .gamma_17_table[7] = (unsigned int *)&s6e63m0_17_240,
- .gamma_17_table[8] = (unsigned int *)&s6e63m0_17_260,
- .gamma_17_table[9] = (unsigned int *)&s6e63m0_17_280,
- .gamma_17_table[10] = (unsigned int *)&s6e63m0_17_300,
-};
-
-#endif
-
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index 8235b285dbb2..d09bab3bf224 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -333,6 +333,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */
extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
+extern const u8 aty_postdividers[8];
+
/*
* Hardware cursor support
@@ -359,7 +361,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
extern void aty_reset_engine(const struct atyfb_par *par);
extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
-extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index a9a8272f7a6e..05111e90f168 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3087,17 +3087,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
/*
* PLL Reference Divider M:
*/
- M = pll_regs[2];
+ M = pll_regs[PLL_REF_DIV];
/*
* PLL Feedback Divider N (Dependent on CLOCK_CNTL):
*/
- N = pll_regs[7 + (clock_cntl & 3)];
+ N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
/*
* PLL Post Divider P (Dependent on CLOCK_CNTL):
*/
- P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
+ P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
+ ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
/*
* PLL Divider Q:
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index 74a62aa193c0..f87cc81f4fa2 100644
--- a/drivers/video/fbdev/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
@@ -115,7 +115,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
*/
#define Maximum_DSP_PRECISION 7
-static u8 postdividers[] = {1,2,4,8,3};
+const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
{
@@ -222,7 +222,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
pll->vclk_post_div += (q < 64*8);
pll->vclk_post_div += (q < 32*8);
}
- pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
+ pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
// pll->vclk_post_div <<= 6;
pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
@@ -513,7 +513,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
u8 mclk_fb_div, pll_ext_cntl;
pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
- pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
+ pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
mclk_fb_div <<= 1;
@@ -535,7 +535,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
xpost_div += (q < 64*8);
xpost_div += (q < 32*8);
}
- pll->ct.xclk_post_div_real = postdividers[xpost_div];
+ pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
#ifdef CONFIG_PPC
@@ -584,7 +584,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
mpost_div += (q < 64*8);
mpost_div += (q < 32*8);
}
- sclk_post_div_real = postdividers[mpost_div];
+ sclk_post_div_real = aty_postdividers[mpost_div];
pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
pll->ct.spll_cntl2 = mpost_div << 4;
#ifdef DEBUG
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 3946649b85c8..ba906876cc45 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -42,6 +42,7 @@ struct bmp_dib_header {
u32 colors_important;
} __packed;
+static bool use_bgrt = true;
static bool request_mem_succeeded = false;
static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
@@ -160,6 +161,9 @@ static void efifb_show_boot_graphics(struct fb_info *info)
void *bgrt_image = NULL;
u8 *dst = info->screen_base;
+ if (!use_bgrt)
+ return;
+
if (!bgrt_tab.image_address) {
pr_info("efifb: No BGRT, not showing boot graphics\n");
return;
@@ -290,6 +294,8 @@ static int efifb_setup(char *options)
screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
else if (!strcmp(this_opt, "nowc"))
mem_flags &= ~EFI_MEMORY_WC;
+ else if (!strcmp(this_opt, "nobgrt"))
+ use_bgrt = false;
}
}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index ef69273074ba..a3edb20ea4c3 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
return -EFAULT;
+ if (mr->w > 4096 || mr->h > 4096)
+ return -EINVAL;
+
if (mr->w * mr->h * 3 > mr->buffer_size)
return -EINVAL;
@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
mr->x, mr->y, mr->w, mr->h);
if (r > 0) {
- if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+ if (copy_to_user(mr->buffer, buf, r))
r = -EFAULT;
}
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index def3a501acd6..d059d04c63ac 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
/*
* enable controller clock
*/
- clk_enable(fbi->clk);
+ clk_prepare_enable(fbi->clk);
pxa168fb_set_par(info);
@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
failed_free_cmap:
fb_dealloc_cmap(&info->cmap);
failed_free_clk:
- clk_disable(fbi->clk);
+ clk_disable_unprepare(fbi->clk);
failed_free_fbmem:
dma_free_coherent(fbi->dev, info->fix.smem_len,
info->screen_base, fbi->fb_start_dma);
@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
info->screen_base, info->fix.smem_start);
- clk_disable(fbi->clk);
+ clk_disable_unprepare(fbi->clk);
framebuffer_release(info);
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 045e8afe398b..9e88e3f594c2 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1157,7 +1157,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
dev_name);
goto out_err0;
}
- /* fall though */
+ /* fall through */
case S9000_ID_ARTIST:
case S9000_ID_HCRX:
case S9000_ID_TIMBER:
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 55ed80c3a17c..f3fbb700f569 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bio.h>
-#include <linux/io.h>
#include <linux/export.h>
+#include <xen/xen.h>
#include <xen/page.h>
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
@@ -20,4 +20,3 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
return false;
#endif
}
-EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7bafa703a992..84575baceebc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1040,18 +1040,33 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
return ret;
for (i = 0; i < count; i++) {
- /* Retry eagain maps */
- if (map_ops[i].status == GNTST_eagain)
- gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
- &map_ops[i].status, __func__);
-
- if (map_ops[i].status == GNTST_okay) {
+ switch (map_ops[i].status) {
+ case GNTST_okay:
+ {
struct xen_page_foreign *foreign;
SetPageForeign(pages[i]);
foreign = xen_page_foreign(pages[i]);
foreign->domid = map_ops[i].dom;
foreign->gref = map_ops[i].ref;
+ break;
+ }
+
+ case GNTST_no_device_space:
+ pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
+ break;
+
+ case GNTST_eagain:
+ /* Retry eagain maps */
+ gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
+ map_ops + i,
+ &map_ops[i].status, __func__);
+ /* Test status in next loop iteration. */
+ i--;
+ break;
+
+ default:
+ break;
}
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index a6f9ba85dc4b..f5c1af4ce9ab 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -303,6 +303,9 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
*/
flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
+ /* Convert the size to actually allocated. */
+ size = 1UL << (order + XEN_PAGE_SHIFT);
+
/* On ARM this function returns an ioremap'ped virtual address for
* which virt_to_phys doesn't return the corresponding physical
* address. In fact on ARM virt_to_phys only works for kernel direct
@@ -351,6 +354,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
* physical address */
phys = xen_bus_to_phys(dev_addr);
+ /* Convert the size to actually allocated. */
+ size = 1UL << (order + XEN_PAGE_SHIFT);
+
if (((dev_addr + size - 1 <= dma_mask)) ||
range_straddles_page_boundary(phys, size))
xen_destroy_contiguous_region(phys, order);
@@ -662,7 +668,7 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
dma_addr, size, attrs);
#endif
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
/*
@@ -689,7 +695,7 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
handle, size, attrs);
}
#endif
- return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
}
static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/drivers/xen/time.c b/drivers/xen/time.c
index 3e741cd1409c..0968859c29d0 100644
--- a/drivers/xen/time.c
+++ b/drivers/xen/time.c
@@ -175,7 +175,7 @@ void __init xen_time_setup_guest(void)
xen_runstate_remote = !HYPERVISOR_vm_assist(VMASST_CMD_enable,
VMASST_TYPE_runstate_update_flag);
- pv_time_ops.steal_clock = xen_steal_clock;
+ pv_ops.time.steal_clock = xen_steal_clock;
static_key_slow_inc(&paravirt_steal_enabled);
if (xen_runstate_remote)
diff --git a/drivers/xen/xen-acpi-pad.c b/drivers/xen/xen-acpi-pad.c
index 23d1808fe027..e25ab76b9c99 100644
--- a/drivers/xen/xen-acpi-pad.c
+++ b/drivers/xen/xen-acpi-pad.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/acpi.h>
+#include <xen/xen.h>
#include <xen/interface/version.h>
#include <xen/xen-ops.h>
#include <asm/xen/hypercall.h>
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
index 025a9a5e1c32..55a756c60746 100644
--- a/fs/afs/addr_list.c
+++ b/fs/afs/addr_list.c
@@ -17,11 +17,6 @@
#include "internal.h"
#include "afs_fs.h"
-//#define AFS_MAX_ADDRESSES
-// ((unsigned int)((PAGE_SIZE - sizeof(struct afs_addr_list)) /
-// sizeof(struct sockaddr_rxrpc)))
-#define AFS_MAX_ADDRESSES ((unsigned int)(sizeof(unsigned long) * 8))
-
/*
* Release an address list.
*/
@@ -43,11 +38,15 @@ struct afs_addr_list *afs_alloc_addrlist(unsigned int nr,
_enter("%u,%u,%u", nr, service, port);
+ if (nr > AFS_MAX_ADDRESSES)
+ nr = AFS_MAX_ADDRESSES;
+
alist = kzalloc(struct_size(alist, addrs, nr), GFP_KERNEL);
if (!alist)
return NULL;
refcount_set(&alist->usage, 1);
+ alist->max_addrs = nr;
for (i = 0; i < nr; i++) {
struct sockaddr_rxrpc *srx = &alist->addrs[i];
@@ -109,8 +108,6 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
} while (p < end);
_debug("%u/%u addresses", nr, AFS_MAX_ADDRESSES);
- if (nr > AFS_MAX_ADDRESSES)
- nr = AFS_MAX_ADDRESSES;
alist = afs_alloc_addrlist(nr, service, port);
if (!alist)
@@ -119,8 +116,10 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
/* Extract the addresses */
p = text;
do {
- struct sockaddr_rxrpc *srx = &alist->addrs[alist->nr_addrs];
const char *q, *stop;
+ unsigned int xport = port;
+ __be32 x[4];
+ int family;
if (*p == delim) {
p++;
@@ -136,19 +135,12 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
break;
}
- if (in4_pton(p, q - p,
- (u8 *)&srx->transport.sin6.sin6_addr.s6_addr32[3],
- -1, &stop)) {
- srx->transport.sin6.sin6_addr.s6_addr32[0] = 0;
- srx->transport.sin6.sin6_addr.s6_addr32[1] = 0;
- srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
- } else if (in6_pton(p, q - p,
- srx->transport.sin6.sin6_addr.s6_addr,
- -1, &stop)) {
- /* Nothing to do */
- } else {
+ if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop))
+ family = AF_INET;
+ else if (in6_pton(p, q - p, (u8 *)x, -1, &stop))
+ family = AF_INET6;
+ else
goto bad_address;
- }
if (stop != q)
goto bad_address;
@@ -160,7 +152,7 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
if (p < end) {
if (*p == '+') {
/* Port number specification "+1234" */
- unsigned int xport = 0;
+ xport = 0;
p++;
if (p >= end || !isdigit(*p))
goto bad_address;
@@ -171,7 +163,6 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
goto bad_address;
p++;
} while (p < end && isdigit(*p));
- srx->transport.sin6.sin6_port = htons(xport);
} else if (*p == delim) {
p++;
} else {
@@ -179,8 +170,12 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
}
}
- alist->nr_addrs++;
- } while (p < end && alist->nr_addrs < AFS_MAX_ADDRESSES);
+ if (family == AF_INET)
+ afs_merge_fs_addr4(alist, x[0], xport);
+ else
+ afs_merge_fs_addr6(alist, x, xport);
+
+ } while (p < end);
_leave(" = [nr %u]", alist->nr_addrs);
return alist;
@@ -237,19 +232,23 @@ struct afs_addr_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry)
*/
void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
{
- struct sockaddr_in6 *a;
- __be16 xport = htons(port);
+ struct sockaddr_rxrpc *srx;
+ u32 addr = ntohl(xdr);
int i;
+ if (alist->nr_addrs >= alist->max_addrs)
+ return;
+
for (i = 0; i < alist->nr_ipv4; i++) {
- a = &alist->addrs[i].transport.sin6;
- if (xdr == a->sin6_addr.s6_addr32[3] &&
- xport == a->sin6_port)
+ struct sockaddr_in *a = &alist->addrs[i].transport.sin;
+ u32 a_addr = ntohl(a->sin_addr.s_addr);
+ u16 a_port = ntohs(a->sin_port);
+
+ if (addr == a_addr && port == a_port)
return;
- if (xdr == a->sin6_addr.s6_addr32[3] &&
- (u16 __force)xport < (u16 __force)a->sin6_port)
+ if (addr == a_addr && port < a_port)
break;
- if ((u32 __force)xdr < (u32 __force)a->sin6_addr.s6_addr32[3])
+ if (addr < a_addr)
break;
}
@@ -258,12 +257,11 @@ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
alist->addrs + i,
sizeof(alist->addrs[0]) * (alist->nr_addrs - i));
- a = &alist->addrs[i].transport.sin6;
- a->sin6_port = xport;
- a->sin6_addr.s6_addr32[0] = 0;
- a->sin6_addr.s6_addr32[1] = 0;
- a->sin6_addr.s6_addr32[2] = htonl(0xffff);
- a->sin6_addr.s6_addr32[3] = xdr;
+ srx = &alist->addrs[i];
+ srx->transport_len = sizeof(srx->transport.sin);
+ srx->transport.sin.sin_family = AF_INET;
+ srx->transport.sin.sin_port = htons(port);
+ srx->transport.sin.sin_addr.s_addr = xdr;
alist->nr_ipv4++;
alist->nr_addrs++;
}
@@ -273,18 +271,20 @@ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
*/
void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
{
- struct sockaddr_in6 *a;
- __be16 xport = htons(port);
+ struct sockaddr_rxrpc *srx;
int i, diff;
+ if (alist->nr_addrs >= alist->max_addrs)
+ return;
+
for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
- a = &alist->addrs[i].transport.sin6;
+ struct sockaddr_in6 *a = &alist->addrs[i].transport.sin6;
+ u16 a_port = ntohs(a->sin6_port);
+
diff = memcmp(xdr, &a->sin6_addr, 16);
- if (diff == 0 &&
- xport == a->sin6_port)
+ if (diff == 0 && port == a_port)
return;
- if (diff == 0 &&
- (u16 __force)xport < (u16 __force)a->sin6_port)
+ if (diff == 0 && port < a_port)
break;
if (diff < 0)
break;
@@ -295,12 +295,11 @@ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
alist->addrs + i,
sizeof(alist->addrs[0]) * (alist->nr_addrs - i));
- a = &alist->addrs[i].transport.sin6;
- a->sin6_port = xport;
- a->sin6_addr.s6_addr32[0] = xdr[0];
- a->sin6_addr.s6_addr32[1] = xdr[1];
- a->sin6_addr.s6_addr32[2] = xdr[2];
- a->sin6_addr.s6_addr32[3] = xdr[3];
+ srx = &alist->addrs[i];
+ srx->transport_len = sizeof(srx->transport.sin6);
+ srx->transport.sin6.sin6_family = AF_INET6;
+ srx->transport.sin6.sin6_port = htons(port);
+ memcpy(&srx->transport.sin6.sin6_addr, xdr, 16);
alist->nr_addrs++;
}
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index f3d0bef16d78..6127f0fcd62c 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -514,6 +514,8 @@ static int afs_alloc_anon_key(struct afs_cell *cell)
*/
static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
{
+ struct hlist_node **p;
+ struct afs_cell *pcell;
int ret;
if (!cell->anonymous_key) {
@@ -534,7 +536,18 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
return ret;
mutex_lock(&net->proc_cells_lock);
- list_add_tail(&cell->proc_link, &net->proc_cells);
+ for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
+ pcell = hlist_entry(*p, struct afs_cell, proc_link);
+ if (strcmp(cell->name, pcell->name) < 0)
+ break;
+ }
+
+ cell->proc_link.pprev = p;
+ cell->proc_link.next = *p;
+ rcu_assign_pointer(*p, &cell->proc_link.next);
+ if (cell->proc_link.next)
+ cell->proc_link.next->pprev = &cell->proc_link.next;
+
afs_dynroot_mkdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
return 0;
@@ -550,7 +563,7 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
afs_proc_cell_remove(cell);
mutex_lock(&net->proc_cells_lock);
- list_del_init(&cell->proc_link);
+ hlist_del_rcu(&cell->proc_link);
afs_dynroot_rmdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 1cde710a8013..f29c6dade7f6 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -265,7 +265,7 @@ int afs_dynroot_populate(struct super_block *sb)
return -ERESTARTSYS;
net->dynroot_sb = sb;
- list_for_each_entry(cell, &net->proc_cells, proc_link) {
+ hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
ret = afs_dynroot_mkdir(net, cell);
if (ret < 0)
goto error;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 871a228d7f37..72de1f157d20 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -73,12 +73,14 @@ struct afs_addr_list {
struct rcu_head rcu; /* Must be first */
refcount_t usage;
u32 version; /* Version */
- unsigned short nr_addrs;
- unsigned short index; /* Address currently in use */
- unsigned short nr_ipv4; /* Number of IPv4 addresses */
+ unsigned char max_addrs;
+ unsigned char nr_addrs;
+ unsigned char index; /* Address currently in use */
+ unsigned char nr_ipv4; /* Number of IPv4 addresses */
unsigned long probed; /* Mask of servers that have been probed */
unsigned long yfs; /* Mask of servers that are YFS */
struct sockaddr_rxrpc addrs[];
+#define AFS_MAX_ADDRESSES ((unsigned int)(sizeof(unsigned long) * 8))
};
/*
@@ -242,7 +244,7 @@ struct afs_net {
seqlock_t cells_lock;
struct mutex proc_cells_lock;
- struct list_head proc_cells;
+ struct hlist_head proc_cells;
/* Known servers. Theoretically each fileserver can only be in one
* cell, but in practice, people create aliases and subsets and there's
@@ -320,7 +322,7 @@ struct afs_cell {
struct afs_net *net;
struct key *anonymous_key; /* anonymous user key for this cell */
struct work_struct manager; /* Manager for init/deinit/dns */
- struct list_head proc_link; /* /proc cell list link */
+ struct hlist_node proc_link; /* /proc cell list link */
#ifdef CONFIG_AFS_FSCACHE
struct fscache_cookie *cache; /* caching cookie */
#endif
diff --git a/fs/afs/main.c b/fs/afs/main.c
index e84fe822a960..107427688edd 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -87,7 +87,7 @@ static int __net_init afs_net_init(struct net *net_ns)
timer_setup(&net->cells_timer, afs_cells_timer, 0);
mutex_init(&net->proc_cells_lock);
- INIT_LIST_HEAD(&net->proc_cells);
+ INIT_HLIST_HEAD(&net->proc_cells);
seqlock_init(&net->fs_lock);
net->fs_servers = RB_ROOT;
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 476dcbb79713..9101f62707af 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -33,9 +33,8 @@ static inline struct afs_net *afs_seq2net_single(struct seq_file *m)
static int afs_proc_cells_show(struct seq_file *m, void *v)
{
struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link);
- struct afs_net *net = afs_seq2net(m);
- if (v == &net->proc_cells) {
+ if (v == SEQ_START_TOKEN) {
/* display header on line 1 */
seq_puts(m, "USE NAME\n");
return 0;
@@ -50,12 +49,12 @@ static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
__acquires(rcu)
{
rcu_read_lock();
- return seq_list_start_head(&afs_seq2net(m)->proc_cells, *_pos);
+ return seq_hlist_start_head_rcu(&afs_seq2net(m)->proc_cells, *_pos);
}
static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos)
{
- return seq_list_next(v, &afs_seq2net(m)->proc_cells, pos);
+ return seq_hlist_next_rcu(v, &afs_seq2net(m)->proc_cells, pos);
}
static void afs_proc_cells_stop(struct seq_file *m, void *v)
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 35f2ae30f31f..77a83790a31f 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -690,8 +690,6 @@ static void afs_process_async_call(struct work_struct *work)
}
if (call->state == AFS_CALL_COMPLETE) {
- call->reply[0] = NULL;
-
/* We have two refs to release - one from the alloc and one
* queued with the work item - and we can't just deallocate the
* call because the work item may be queued again.
diff --git a/fs/aio.c b/fs/aio.c
index b9350f3360c6..301e6314183b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -2135,12 +2135,12 @@ COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
compat_long_t, min_nr,
compat_long_t, nr,
struct io_event __user *, events,
- struct compat_timespec __user *, timeout)
+ struct old_timespec32 __user *, timeout)
{
struct timespec64 t;
int ret;
- if (timeout && compat_get_timespec64(&t, timeout))
+ if (timeout && get_old_timespec32(&t, timeout))
return -EFAULT;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
@@ -2160,7 +2160,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
compat_long_t, min_nr,
compat_long_t, nr,
struct io_event __user *, events,
- struct compat_timespec __user *, timeout,
+ struct old_timespec32 __user *, timeout,
const struct __compat_aio_sigset __user *, usig)
{
struct __compat_aio_sigset ksig = { NULL, };
@@ -2168,7 +2168,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
struct timespec64 t;
int ret;
- if (timeout && compat_get_timespec64(&t, timeout))
+ if (timeout && get_old_timespec32(&t, timeout))
return -EFAULT;
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index efae2fb0930a..54207327f98f 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1580,7 +1580,7 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
}
static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
- const siginfo_t *siginfo)
+ const kernel_siginfo_t *siginfo)
{
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
@@ -1782,7 +1782,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
static int fill_note_info(struct elfhdr *elf, int phdrs,
struct elf_note_info *info,
- const siginfo_t *siginfo, struct pt_regs *regs)
+ const kernel_siginfo_t *siginfo, struct pt_regs *regs)
{
struct task_struct *dump_task = current;
const struct user_regset_view *view = task_user_regset_view(dump_task);
@@ -2031,7 +2031,7 @@ static int elf_note_info_init(struct elf_note_info *info)
static int fill_note_info(struct elfhdr *elf, int phdrs,
struct elf_note_info *info,
- const siginfo_t *siginfo, struct pt_regs *regs)
+ const kernel_siginfo_t *siginfo, struct pt_regs *regs)
{
struct list_head *t;
struct core_thread *ct;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index ae750b1574a2..68ebe188446a 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -112,11 +112,11 @@ static int find_extent_in_eb(const struct extent_buffer *eb,
}
struct preftree {
- struct rb_root root;
+ struct rb_root_cached root;
unsigned int count;
};
-#define PREFTREE_INIT { .root = RB_ROOT, .count = 0 }
+#define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
struct preftrees {
struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
@@ -225,14 +225,15 @@ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
struct prelim_ref *newref,
struct share_check *sc)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node **p;
struct rb_node *parent = NULL;
struct prelim_ref *ref;
int result;
+ bool leftmost = true;
root = &preftree->root;
- p = &root->rb_node;
+ p = &root->rb_root.rb_node;
while (*p) {
parent = *p;
@@ -242,6 +243,7 @@ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
p = &(*p)->rb_left;
} else if (result > 0) {
p = &(*p)->rb_right;
+ leftmost = false;
} else {
/* Identical refs, merge them and free @newref */
struct extent_inode_elem *eie = ref->inode_list;
@@ -272,7 +274,7 @@ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
preftree->count++;
trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
rb_link_node(&newref->rbnode, parent, p);
- rb_insert_color(&newref->rbnode, root);
+ rb_insert_color_cached(&newref->rbnode, root, leftmost);
}
/*
@@ -283,11 +285,11 @@ static void prelim_release(struct preftree *preftree)
{
struct prelim_ref *ref, *next_ref;
- rbtree_postorder_for_each_entry_safe(ref, next_ref, &preftree->root,
- rbnode)
+ rbtree_postorder_for_each_entry_safe(ref, next_ref,
+ &preftree->root.rb_root, rbnode)
free_pref(ref);
- preftree->root = RB_ROOT;
+ preftree->root = RB_ROOT_CACHED;
preftree->count = 0;
}
@@ -627,7 +629,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
* freeing the entire indirect tree when we're done. In some test
* cases, the tree can grow quite large (~200k objects).
*/
- while ((rnode = rb_first(&preftrees->indirect.root))) {
+ while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
struct prelim_ref *ref;
ref = rb_entry(rnode, struct prelim_ref, rbnode);
@@ -637,7 +639,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
goto out;
}
- rb_erase(&ref->rbnode, &preftrees->indirect.root);
+ rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
preftrees->indirect.count--;
if (ref->count == 0) {
@@ -717,9 +719,9 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
struct preftree *tree = &preftrees->indirect_missing_keys;
struct rb_node *node;
- while ((node = rb_first(&tree->root))) {
+ while ((node = rb_first_cached(&tree->root))) {
ref = rb_entry(node, struct prelim_ref, rbnode);
- rb_erase(node, &tree->root);
+ rb_erase_cached(node, &tree->root);
BUG_ON(ref->parent); /* should not be a direct ref */
BUG_ON(ref->key_for_search.type);
@@ -769,7 +771,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
spin_lock(&head->lock);
- for (n = rb_first(&head->ref_tree); n; n = rb_next(n)) {
+ for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
node = rb_entry(n, struct btrfs_delayed_ref_node,
ref_node);
if (node->seq > seq)
@@ -1229,14 +1231,14 @@ again:
if (ret)
goto out;
- WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root));
+ WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
extent_item_pos, total_refs, sc, ignore_offset);
if (ret)
goto out;
- WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root));
+ WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
/*
* This walks the tree of merged and resolved refs. Tree blocks are
@@ -1245,7 +1247,7 @@ again:
*
* We release the entire tree in one go before returning.
*/
- node = rb_first(&preftrees.direct.root);
+ node = rb_first_cached(&preftrees.direct.root);
while (node) {
ref = rb_entry(node, struct prelim_ref, rbnode);
node = rb_next(&ref->rbnode);
@@ -1468,7 +1470,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
struct seq_list elem = SEQ_LIST_INIT(elem);
int ret = 0;
struct share_check shared = {
- .root_objectid = root->objectid,
+ .root_objectid = root->root_key.objectid,
.inum = inum,
.share_count = 0,
};
@@ -2031,7 +2033,8 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
/* path must be released before calling iterate()! */
btrfs_debug(fs_root->fs_info,
"following ref at offset %u for inode %llu in tree %llu",
- cur, found_key.objectid, fs_root->objectid);
+ cur, found_key.objectid,
+ fs_root->root_key.objectid);
ret = iterate(parent, name_len,
(unsigned long)(iref + 1), eb, ctx);
if (ret)
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 1343ac57b438..97d91e55b70a 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -206,7 +206,7 @@ static inline struct btrfs_inode *BTRFS_I(const struct inode *inode)
static inline unsigned long btrfs_inode_hash(u64 objectid,
const struct btrfs_root *root)
{
- u64 h = objectid ^ (root->objectid * GOLDEN_RATIO_PRIME);
+ u64 h = objectid ^ (root->root_key.objectid * GOLDEN_RATIO_PRIME);
#if BITS_PER_LONG == 32
h = (h >> 32) ^ (h & 0xffffffff);
@@ -339,15 +339,15 @@ static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode,
struct btrfs_root *root = inode->root;
/* Output minus objectid, which is more meaningful */
- if (root->objectid >= BTRFS_LAST_FREE_OBJECTID)
+ if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID)
btrfs_warn_rl(root->fs_info,
"csum failed root %lld ino %lld off %llu csum 0x%08x expected csum 0x%08x mirror %d",
- root->objectid, btrfs_ino(inode),
+ root->root_key.objectid, btrfs_ino(inode),
logical_start, csum, csum_expected, mirror_num);
else
btrfs_warn_rl(root->fs_info,
"csum failed root %llu ino %llu off %llu csum 0x%08x expected csum 0x%08x mirror %d",
- root->objectid, btrfs_ino(inode),
+ root->root_key.objectid, btrfs_ino(inode),
logical_start, csum, csum_expected, mirror_num);
}
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 833cf3c35b4d..2e43fba44035 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1594,6 +1594,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
{
unsigned int num_pages;
unsigned int i;
+ size_t size;
u64 dev_bytenr;
int ret;
@@ -1608,9 +1609,8 @@ static int btrfsic_read_block(struct btrfsic_state *state,
num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
PAGE_SHIFT;
- block_ctx->mem_to_free = kcalloc(sizeof(*block_ctx->datav) +
- sizeof(*block_ctx->pagev),
- num_pages, GFP_NOFS);
+ size = sizeof(*block_ctx->datav) + sizeof(*block_ctx->pagev);
+ block_ctx->mem_to_free = kcalloc(num_pages, size, GFP_NOFS);
if (!block_ctx->mem_to_free)
return -ENOMEM;
block_ctx->datav = block_ctx->mem_to_free;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 9bfa66592aa7..8703ce68fe9d 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -528,7 +528,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct extent_io_tree *tree;
struct extent_map_tree *em_tree;
struct compressed_bio *cb;
unsigned long compressed_len;
@@ -545,7 +544,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int faili = 0;
u32 *sums;
- tree = &BTRFS_I(inode)->io_tree;
em_tree = &BTRFS_I(inode)->extent_tree;
/* we need the actual starting offset of this extent in the file */
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index d436fb4c002e..2ee43b6a4f09 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -52,42 +52,6 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p)
}
}
-/*
- * reset all the locked nodes in the patch to spinning locks.
- *
- * held is used to keep lockdep happy, when lockdep is enabled
- * we set held to a blocking lock before we go around and
- * retake all the spinlocks in the path. You can safely use NULL
- * for held
- */
-noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
- struct extent_buffer *held, int held_rw)
-{
- int i;
-
- if (held) {
- btrfs_set_lock_blocking_rw(held, held_rw);
- if (held_rw == BTRFS_WRITE_LOCK)
- held_rw = BTRFS_WRITE_LOCK_BLOCKING;
- else if (held_rw == BTRFS_READ_LOCK)
- held_rw = BTRFS_READ_LOCK_BLOCKING;
- }
- btrfs_set_path_blocking(p);
-
- for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
- if (p->nodes[i] && p->locks[i]) {
- btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
- if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
- p->locks[i] = BTRFS_WRITE_LOCK;
- else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
- p->locks[i] = BTRFS_READ_LOCK;
- }
- }
-
- if (held)
- btrfs_clear_lock_blocking_rw(held, held_rw);
-}
-
/* this also releases the path */
void btrfs_free_path(struct btrfs_path *p)
{
@@ -207,7 +171,7 @@ static void add_root_to_dirty_list(struct btrfs_root *root)
spin_lock(&fs_info->trans_lock);
if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
/* Want the extent tree to be the last on the list */
- if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
+ if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
list_move_tail(&root->dirty_list,
&fs_info->dirty_cowonly_roots);
else
@@ -1306,7 +1270,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
}
}
- btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
@@ -1815,8 +1778,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
int orig_slot = path->slots[level];
u64 orig_ptr;
- if (level == 0)
- return 0;
+ ASSERT(level > 0);
mid = path->nodes[level];
@@ -2483,7 +2445,6 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
btrfs_set_path_blocking(p);
reada_for_balance(fs_info, p, level);
sret = split_node(trans, root, p, level);
- btrfs_clear_path_blocking(p, NULL, 0);
BUG_ON(sret > 0);
if (sret) {
@@ -2504,7 +2465,6 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
btrfs_set_path_blocking(p);
reada_for_balance(fs_info, p, level);
sret = balance_level(trans, root, p, level);
- btrfs_clear_path_blocking(p, NULL, 0);
if (sret) {
ret = sret;
@@ -2789,7 +2749,10 @@ again:
}
cow_done:
p->nodes[level] = b;
- btrfs_clear_path_blocking(p, NULL, 0);
+ /*
+ * Leave path with blocking locks to avoid massive
+ * lock context switch, this is made on purpose.
+ */
/*
* we have a lock on b and as long as we aren't changing
@@ -2871,8 +2834,6 @@ cow_done:
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_lock(b);
- btrfs_clear_path_blocking(p, b,
- BTRFS_WRITE_LOCK);
}
p->locks[level] = BTRFS_WRITE_LOCK;
} else {
@@ -2880,8 +2841,6 @@ cow_done:
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_read_lock(b);
- btrfs_clear_path_blocking(p, b,
- BTRFS_READ_LOCK);
}
p->locks[level] = BTRFS_READ_LOCK;
}
@@ -2900,7 +2859,6 @@ cow_done:
btrfs_set_path_blocking(p);
err = split_leaf(trans, root, key,
p, ins_len, ret == 0);
- btrfs_clear_path_blocking(p, NULL, 0);
BUG_ON(err > 0);
if (err) {
@@ -2910,7 +2868,7 @@ cow_done:
}
if (!p->search_for_split)
unlock_up(p, level, lowest_unlock,
- min_write_lock_level, &write_lock_level);
+ min_write_lock_level, NULL);
goto done;
}
}
@@ -2961,13 +2919,16 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
again:
b = get_old_root(root, time_seq);
+ if (!b) {
+ ret = -EIO;
+ goto done;
+ }
level = btrfs_header_level(b);
p->locks[level] = BTRFS_READ_LOCK;
while (b) {
level = btrfs_header_level(b);
p->nodes[level] = b;
- btrfs_clear_path_blocking(p, NULL, 0);
/*
* we have a lock on b and as long as we aren't changing
@@ -3013,8 +2974,6 @@ again:
if (!err) {
btrfs_set_path_blocking(p);
btrfs_tree_read_lock(b);
- btrfs_clear_path_blocking(p, b,
- BTRFS_READ_LOCK);
}
b = tree_mod_log_rewind(fs_info, p, b, time_seq);
if (!b) {
@@ -5198,7 +5157,6 @@ find_next_key:
path->locks[level - 1] = BTRFS_READ_LOCK;
path->nodes[level - 1] = cur;
unlock_up(path, level, 1, 0, NULL);
- btrfs_clear_path_blocking(path, NULL, 0);
}
out:
path->keep_locks = keep_locks;
@@ -5783,8 +5741,6 @@ again:
if (!ret) {
btrfs_set_path_blocking(path);
btrfs_tree_read_lock(next);
- btrfs_clear_path_blocking(path, next,
- BTRFS_READ_LOCK);
}
next_rw_lock = BTRFS_READ_LOCK;
}
@@ -5820,8 +5776,6 @@ again:
if (!ret) {
btrfs_set_path_blocking(path);
btrfs_tree_read_lock(next);
- btrfs_clear_path_blocking(path, next,
- BTRFS_READ_LOCK);
}
next_rw_lock = BTRFS_READ_LOCK;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2cddfe7806a4..68ca41dbbef3 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -41,12 +41,6 @@ extern struct kmem_cache *btrfs_path_cachep;
extern struct kmem_cache *btrfs_free_space_cachep;
struct btrfs_ordered_sum;
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-#define STATIC noinline
-#else
-#define STATIC static noinline
-#endif
-
#define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */
#define BTRFS_MAX_MIRRORS 3
@@ -367,11 +361,13 @@ struct btrfs_dev_replace {
struct mutex lock_finishing_cancel_unmount;
rwlock_t lock;
- atomic_t read_locks;
atomic_t blocking_readers;
wait_queue_head_t read_lock_wq;
struct btrfs_scrub_progress scrub_progress;
+
+ struct percpu_counter bio_counter;
+ wait_queue_head_t replace_wait;
};
/* For raid type sysfs entries */
@@ -1094,9 +1090,6 @@ struct btrfs_fs_info {
/* device replace state */
struct btrfs_dev_replace dev_replace;
- struct percpu_counter bio_counter;
- wait_queue_head_t replace_wait;
-
struct semaphore uuid_tree_rescan_sem;
/* Used to reclaim the metadata space in the background. */
@@ -1202,18 +1195,12 @@ struct btrfs_root {
int last_log_commit;
pid_t log_start_pid;
- u64 objectid;
u64 last_trans;
u32 type;
u64 highest_objectid;
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- /* only used with CONFIG_BTRFS_FS_RUN_SANITY_TESTS is enabled */
- u64 alloc_bytenr;
-#endif
-
u64 defrag_trans_start;
struct btrfs_key defrag_progress;
struct btrfs_key defrag_max;
@@ -1286,6 +1273,10 @@ struct btrfs_root {
spinlock_t qgroup_meta_rsv_lock;
u64 qgroup_meta_rsv_pertrans;
u64 qgroup_meta_rsv_prealloc;
+
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+ u64 alloc_bytenr;
+#endif
};
struct btrfs_file_private {
@@ -2607,10 +2598,8 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
}
-int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info);
-int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info);
+int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
+int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans);
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start);
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
@@ -2771,7 +2760,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv, u64 num_bytes,
- int update_size);
+ bool update_size);
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *dest, u64 num_bytes,
int min_factor);
@@ -2877,8 +2866,6 @@ void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
void btrfs_set_path_blocking(struct btrfs_path *p);
-void btrfs_clear_path_blocking(struct btrfs_path *p,
- struct extent_buffer *held, int held_rw);
void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -3021,8 +3008,7 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
/* dir-item.c */
int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
const char *name, int name_len);
-int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *name,
+int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
int name_len, struct btrfs_inode *dir,
struct btrfs_key *location, u8 type, u64 index);
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
@@ -3180,8 +3166,8 @@ void __cold btrfs_destroy_cachep(void);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *was_new);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
- struct page *page, size_t pg_offset,
- u64 start, u64 end, int create);
+ struct page *page, size_t pg_offset,
+ u64 start, u64 end, int create);
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode);
@@ -3201,9 +3187,6 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint);
extern const struct dentry_operations btrfs_dentry_operations;
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-void btrfs_test_inode_set_ops(struct inode *inode);
-#endif
/* ioctl.c */
long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
@@ -3716,18 +3699,19 @@ static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
/* Sanity test specific functions */
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+void btrfs_test_inode_set_ops(struct inode *inode);
void btrfs_test_destroy_inode(struct inode *inode);
-#endif
static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
{
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO,
- &fs_info->fs_state)))
- return 1;
-#endif
+ return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
+}
+#else
+static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
+{
return 0;
}
+#endif
static inline void cond_wake_up(struct wait_queue_head *wq)
{
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index f51b509f2d9b..c669f250d4a0 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -42,8 +42,8 @@ static inline void btrfs_init_delayed_node(
delayed_node->root = root;
delayed_node->inode_id = inode_id;
refcount_set(&delayed_node->refs, 0);
- delayed_node->ins_root = RB_ROOT;
- delayed_node->del_root = RB_ROOT;
+ delayed_node->ins_root = RB_ROOT_CACHED;
+ delayed_node->del_root = RB_ROOT_CACHED;
mutex_init(&delayed_node->mutex);
INIT_LIST_HEAD(&delayed_node->n_list);
INIT_LIST_HEAD(&delayed_node->p_list);
@@ -390,7 +390,7 @@ static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
struct btrfs_delayed_node *delayed_node,
struct btrfs_key *key)
{
- return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
+ return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
NULL, NULL);
}
@@ -400,9 +400,10 @@ static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
{
struct rb_node **p, *node;
struct rb_node *parent_node = NULL;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct btrfs_delayed_item *item;
int cmp;
+ bool leftmost = true;
if (action == BTRFS_DELAYED_INSERTION_ITEM)
root = &delayed_node->ins_root;
@@ -410,7 +411,7 @@ static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
root = &delayed_node->del_root;
else
BUG();
- p = &root->rb_node;
+ p = &root->rb_root.rb_node;
node = &ins->rb_node;
while (*p) {
@@ -419,16 +420,18 @@ static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
rb_node);
cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
- if (cmp < 0)
+ if (cmp < 0) {
p = &(*p)->rb_right;
- else if (cmp > 0)
+ leftmost = false;
+ } else if (cmp > 0) {
p = &(*p)->rb_left;
- else
+ } else {
return -EEXIST;
+ }
}
rb_link_node(node, parent_node, p);
- rb_insert_color(node, root);
+ rb_insert_color_cached(node, root, leftmost);
ins->delayed_node = delayed_node;
ins->ins_or_del = action;
@@ -468,7 +471,7 @@ static void finish_one_item(struct btrfs_delayed_root *delayed_root)
static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct btrfs_delayed_root *delayed_root;
delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
@@ -482,7 +485,7 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
else
root = &delayed_item->delayed_node->del_root;
- rb_erase(&delayed_item->rb_node, root);
+ rb_erase_cached(&delayed_item->rb_node, root);
delayed_item->delayed_node->count--;
finish_one_item(delayed_root);
@@ -503,7 +506,7 @@ static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
struct rb_node *p;
struct btrfs_delayed_item *item = NULL;
- p = rb_first(&delayed_node->ins_root);
+ p = rb_first_cached(&delayed_node->ins_root);
if (p)
item = rb_entry(p, struct btrfs_delayed_item, rb_node);
@@ -516,7 +519,7 @@ static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
struct rb_node *p;
struct btrfs_delayed_item *item = NULL;
- p = rb_first(&delayed_node->del_root);
+ p = rb_first_cached(&delayed_node->del_root);
if (p)
item = rb_entry(p, struct btrfs_delayed_item, rb_node);
@@ -559,7 +562,7 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
* reserved space when starting a transaction. So no need to reserve
* qgroup space here.
*/
- ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
+ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
if (!ret) {
trace_btrfs_space_reservation(fs_info, "delayed_item",
item->key.objectid,
@@ -647,7 +650,7 @@ static int btrfs_delayed_inode_reserve_metadata(
return ret;
}
- ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
+ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
if (!ret) {
trace_btrfs_space_reservation(fs_info, "delayed_inode",
btrfs_ino(inode), num_bytes, 1);
@@ -762,9 +765,6 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
i++;
}
- /* reset all the locked nodes in the patch to spinning locks. */
- btrfs_clear_path_blocking(path, NULL, 0);
-
/* insert the keys of the items */
setup_items_for_insert(root, path, keys, data_size,
total_data_size, total_size, nitems);
@@ -1462,7 +1462,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
if (unlikely(ret)) {
btrfs_err(trans->fs_info,
"err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
- name_len, name, delayed_node->root->objectid,
+ name_len, name, delayed_node->root->root_key.objectid,
delayed_node->inode_id, ret);
BUG();
}
@@ -1533,7 +1533,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
if (unlikely(ret)) {
btrfs_err(trans->fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
- index, node->root->objectid, node->inode_id, ret);
+ index, node->root->root_key.objectid,
+ node->inode_id, ret);
BUG();
}
mutex_unlock(&node->mutex);
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 33536cd681d4..74ae226ffaf0 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -50,8 +50,8 @@ struct btrfs_delayed_node {
* is waiting to be dealt with by the async worker.
*/
struct list_head p_list;
- struct rb_root ins_root;
- struct rb_root del_root;
+ struct rb_root_cached ins_root;
+ struct rb_root_cached del_root;
struct mutex mutex;
struct btrfs_inode_item inode_item;
refcount_t refs;
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 62ff545ba1f7..5149165b49a4 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -101,14 +101,15 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1,
}
/* insert a new ref to head ref rbtree */
-static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
+static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
struct rb_node *node)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent_node = NULL;
struct btrfs_delayed_ref_head *entry;
struct btrfs_delayed_ref_head *ins;
u64 bytenr;
+ bool leftmost = true;
ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
bytenr = ins->bytenr;
@@ -117,26 +118,29 @@ static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
href_node);
- if (bytenr < entry->bytenr)
+ if (bytenr < entry->bytenr) {
p = &(*p)->rb_left;
- else if (bytenr > entry->bytenr)
+ } else if (bytenr > entry->bytenr) {
p = &(*p)->rb_right;
- else
+ leftmost = false;
+ } else {
return entry;
+ }
}
rb_link_node(node, parent_node, p);
- rb_insert_color(node, root);
+ rb_insert_color_cached(node, root, leftmost);
return NULL;
}
-static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
+static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
struct btrfs_delayed_ref_node *ins)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *node = &ins->ref_node;
struct rb_node *parent_node = NULL;
struct btrfs_delayed_ref_node *entry;
+ bool leftmost = true;
while (*p) {
int comp;
@@ -145,16 +149,18 @@ static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
ref_node);
comp = comp_refs(ins, entry, true);
- if (comp < 0)
+ if (comp < 0) {
p = &(*p)->rb_left;
- else if (comp > 0)
+ } else if (comp > 0) {
p = &(*p)->rb_right;
- else
+ leftmost = false;
+ } else {
return entry;
+ }
}
rb_link_node(node, parent_node, p);
- rb_insert_color(node, root);
+ rb_insert_color_cached(node, root, leftmost);
return NULL;
}
@@ -162,12 +168,14 @@ static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
* find an head entry based on bytenr. This returns the delayed ref
* head if it was able to find one, or NULL if nothing was in that spot.
* If return_bigger is given, the next bigger entry is returned if no exact
- * match is found.
+ * match is found. But if no bigger one is found then the first node of the
+ * ref head tree will be returned.
*/
-static struct btrfs_delayed_ref_head *
-find_ref_head(struct rb_root *root, u64 bytenr,
- int return_bigger)
+static struct btrfs_delayed_ref_head* find_ref_head(
+ struct btrfs_delayed_ref_root *dr, u64 bytenr,
+ bool return_bigger)
{
+ struct rb_root *root = &dr->href_root.rb_root;
struct rb_node *n;
struct btrfs_delayed_ref_head *entry;
@@ -187,7 +195,7 @@ find_ref_head(struct rb_root *root, u64 bytenr,
if (bytenr > entry->bytenr) {
n = rb_next(&entry->href_node);
if (!n)
- n = rb_first(root);
+ n = rb_first_cached(&dr->href_root);
entry = rb_entry(n, struct btrfs_delayed_ref_head,
href_node);
return entry;
@@ -197,12 +205,9 @@ find_ref_head(struct rb_root *root, u64 bytenr,
return NULL;
}
-int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
+int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head)
{
- struct btrfs_delayed_ref_root *delayed_refs;
-
- delayed_refs = &trans->transaction->delayed_refs;
lockdep_assert_held(&delayed_refs->lock);
if (mutex_trylock(&head->mutex))
return 0;
@@ -227,7 +232,7 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref)
{
lockdep_assert_held(&head->lock);
- rb_erase(&ref->ref_node, &head->ref_tree);
+ rb_erase_cached(&ref->ref_node, &head->ref_tree);
RB_CLEAR_NODE(&ref->ref_node);
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
@@ -296,7 +301,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
lockdep_assert_held(&head->lock);
- if (RB_EMPTY_ROOT(&head->ref_tree))
+ if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
return;
/* We don't have too many refs to merge for data. */
@@ -314,7 +319,8 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
spin_unlock(&fs_info->tree_mod_seq_lock);
again:
- for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&head->ref_tree); node;
+ node = rb_next(node)) {
ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
if (seq && ref->seq >= seq)
continue;
@@ -345,24 +351,21 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
return ret;
}
-struct btrfs_delayed_ref_head *
-btrfs_select_ref_head(struct btrfs_trans_handle *trans)
+struct btrfs_delayed_ref_head *btrfs_select_ref_head(
+ struct btrfs_delayed_ref_root *delayed_refs)
{
- struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_head *head;
u64 start;
bool loop = false;
- delayed_refs = &trans->transaction->delayed_refs;
-
again:
start = delayed_refs->run_delayed_start;
- head = find_ref_head(&delayed_refs->href_root, start, 1);
+ head = find_ref_head(delayed_refs, start, true);
if (!head && !loop) {
delayed_refs->run_delayed_start = 0;
start = 0;
loop = true;
- head = find_ref_head(&delayed_refs->href_root, start, 1);
+ head = find_ref_head(delayed_refs, start, true);
if (!head)
return NULL;
} else if (!head && loop) {
@@ -569,7 +572,7 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
head_ref->is_system = is_system;
- head_ref->ref_tree = RB_ROOT;
+ head_ref->ref_tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(&head_ref->ref_add_list);
RB_CLEAR_NODE(&head_ref->href_node);
head_ref->processing = 0;
@@ -903,7 +906,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
{
- return find_ref_head(&delayed_refs->href_root, bytenr, 0);
+ return find_ref_head(delayed_refs, bytenr, false);
}
void __cold btrfs_delayed_ref_exit(void)
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index d9f2a4ebd5db..8e20c5cb5404 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -79,7 +79,7 @@ struct btrfs_delayed_ref_head {
struct mutex mutex;
spinlock_t lock;
- struct rb_root ref_tree;
+ struct rb_root_cached ref_tree;
/* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
struct list_head ref_add_list;
@@ -148,7 +148,7 @@ struct btrfs_delayed_data_ref {
struct btrfs_delayed_ref_root {
/* head ref rbtree */
- struct rb_root href_root;
+ struct rb_root_cached href_root;
/* dirty extent records */
struct rb_root dirty_extent_root;
@@ -255,7 +255,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
u64 bytenr);
-int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
+int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head);
static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
{
@@ -263,8 +263,8 @@ static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
}
-struct btrfs_delayed_ref_head *
-btrfs_select_ref_head(struct btrfs_trans_handle *trans);
+struct btrfs_delayed_ref_head *btrfs_select_ref_head(
+ struct btrfs_delayed_ref_root *delayed_refs);
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index dec01970d8c5..2aa48aecc52b 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -382,14 +382,6 @@ out:
return ret;
}
-void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
-
- dev_replace->committed_cursor_left =
- dev_replace->cursor_left_last_write_of_item;
-}
-
static char* btrfs_dev_name(struct btrfs_device *device)
{
if (!device || test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
@@ -408,11 +400,12 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
int ret;
struct btrfs_device *tgt_device = NULL;
struct btrfs_device *src_device = NULL;
+ bool need_unlock;
- ret = btrfs_find_device_by_devspec(fs_info, srcdevid,
- srcdev_name, &src_device);
- if (ret)
- return ret;
+ src_device = btrfs_find_device_by_devspec(fs_info, srcdevid,
+ srcdev_name);
+ if (IS_ERR(src_device))
+ return PTR_ERR(src_device);
ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name,
src_device, &tgt_device);
@@ -432,6 +425,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
return PTR_ERR(trans);
}
+ need_unlock = true;
btrfs_dev_replace_write_lock(dev_replace);
switch (dev_replace->replace_state) {
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
@@ -440,6 +434,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
break;
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+ ASSERT(0);
ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
goto leave;
}
@@ -470,6 +465,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
atomic64_set(&dev_replace->num_write_errors, 0);
atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
btrfs_dev_replace_write_unlock(dev_replace);
+ need_unlock = false;
ret = btrfs_sysfs_add_device_link(tgt_device->fs_devices, tgt_device);
if (ret)
@@ -481,7 +477,12 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ need_unlock = true;
btrfs_dev_replace_write_lock(dev_replace);
+ dev_replace->replace_state =
+ BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
+ dev_replace->srcdev = NULL;
+ dev_replace->tgtdev = NULL;
goto leave;
}
@@ -503,9 +504,8 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
return ret;
leave:
- dev_replace->srcdev = NULL;
- dev_replace->tgtdev = NULL;
- btrfs_dev_replace_write_unlock(dev_replace);
+ if (need_unlock)
+ btrfs_dev_replace_write_unlock(dev_replace);
btrfs_destroy_dev_replace_tgtdev(tgt_device);
return ret;
}
@@ -545,8 +545,8 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
{
set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state);
- wait_event(fs_info->replace_wait, !percpu_counter_sum(
- &fs_info->bio_counter));
+ wait_event(fs_info->dev_replace.replace_wait, !percpu_counter_sum(
+ &fs_info->dev_replace.bio_counter));
}
/*
@@ -555,7 +555,7 @@ static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info)
{
clear_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state);
- wake_up(&fs_info->replace_wait);
+ wake_up(&fs_info->dev_replace.replace_wait);
}
static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
@@ -961,13 +961,10 @@ int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
void btrfs_dev_replace_read_lock(struct btrfs_dev_replace *dev_replace)
{
read_lock(&dev_replace->lock);
- atomic_inc(&dev_replace->read_locks);
}
void btrfs_dev_replace_read_unlock(struct btrfs_dev_replace *dev_replace)
{
- ASSERT(atomic_read(&dev_replace->read_locks) > 0);
- atomic_dec(&dev_replace->read_locks);
read_unlock(&dev_replace->lock);
}
@@ -985,7 +982,6 @@ again:
void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace)
{
- ASSERT(atomic_read(&dev_replace->blocking_readers) == 0);
write_unlock(&dev_replace->lock);
}
@@ -994,45 +990,31 @@ void btrfs_dev_replace_set_lock_blocking(
struct btrfs_dev_replace *dev_replace)
{
/* only set blocking for read lock */
- ASSERT(atomic_read(&dev_replace->read_locks) > 0);
atomic_inc(&dev_replace->blocking_readers);
read_unlock(&dev_replace->lock);
}
-/* acquire read lock and dec blocking cnt */
-void btrfs_dev_replace_clear_lock_blocking(
- struct btrfs_dev_replace *dev_replace)
-{
- /* only set blocking for read lock */
- ASSERT(atomic_read(&dev_replace->read_locks) > 0);
- ASSERT(atomic_read(&dev_replace->blocking_readers) > 0);
- read_lock(&dev_replace->lock);
- /* Barrier implied by atomic_dec_and_test */
- if (atomic_dec_and_test(&dev_replace->blocking_readers))
- cond_wake_up_nomb(&dev_replace->read_lock_wq);
-}
-
void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info)
{
- percpu_counter_inc(&fs_info->bio_counter);
+ percpu_counter_inc(&fs_info->dev_replace.bio_counter);
}
void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
{
- percpu_counter_sub(&fs_info->bio_counter, amount);
- cond_wake_up_nomb(&fs_info->replace_wait);
+ percpu_counter_sub(&fs_info->dev_replace.bio_counter, amount);
+ cond_wake_up_nomb(&fs_info->dev_replace.replace_wait);
}
void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info)
{
while (1) {
- percpu_counter_inc(&fs_info->bio_counter);
+ percpu_counter_inc(&fs_info->dev_replace.bio_counter);
if (likely(!test_bit(BTRFS_FS_STATE_DEV_REPLACING,
&fs_info->fs_state)))
break;
btrfs_bio_counter_dec(fs_info);
- wait_event(fs_info->replace_wait,
+ wait_event(fs_info->dev_replace.replace_wait,
!test_bit(BTRFS_FS_STATE_DEV_REPLACING,
&fs_info->fs_state));
}
diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
index b6d4206188bb..795c551f5b5e 100644
--- a/fs/btrfs/dev-replace.h
+++ b/fs/btrfs/dev-replace.h
@@ -11,7 +11,6 @@ struct btrfs_ioctl_dev_replace_args;
int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
-void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info);
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args);
int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
@@ -28,12 +27,5 @@ void btrfs_dev_replace_read_unlock(struct btrfs_dev_replace *dev_replace);
void btrfs_dev_replace_write_lock(struct btrfs_dev_replace *dev_replace);
void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace);
void btrfs_dev_replace_set_lock_blocking(struct btrfs_dev_replace *dev_replace);
-void btrfs_dev_replace_clear_lock_blocking(
- struct btrfs_dev_replace *dev_replace);
-
-static inline void btrfs_dev_replace_stats_inc(atomic64_t *stat_value)
-{
- atomic64_inc(stat_value);
-}
#endif
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index a678b07fcf01..8de74d835dba 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -105,13 +105,13 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
* to use for the second index (if one is created).
* Will return 0 or -ENOMEM
*/
-int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
- *root, const char *name, int name_len,
- struct btrfs_inode *dir, struct btrfs_key *location,
- u8 type, u64 index)
+int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
+ int name_len, struct btrfs_inode *dir,
+ struct btrfs_key *location, u8 type, u64 index)
{
int ret = 0;
int ret2 = 0;
+ struct btrfs_root *root = dir->root;
struct btrfs_path *path;
struct btrfs_dir_item *dir_item;
struct extent_buffer *leaf;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 05dc3c17cb62..b0ab41da91d1 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -125,8 +125,8 @@ struct async_submit_bio {
* Different roots are used for different purposes and may nest inside each
* other and they require separate keysets. As lockdep keys should be
* static, assign keysets according to the purpose of the root as indicated
- * by btrfs_root->objectid. This ensures that all special purpose roots
- * have separate keysets.
+ * by btrfs_root->root_key.objectid. This ensures that all special purpose
+ * roots have separate keysets.
*
* Lock-nesting across peer nodes is always done with the immediate parent
* node locked thus preventing deadlock. As lockdep doesn't know this, use
@@ -1148,7 +1148,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
root->state = 0;
root->orphan_cleanup_state = 0;
- root->objectid = objectid;
root->last_trans = 0;
root->highest_objectid = 0;
root->nr_delalloc_inodes = 0;
@@ -2156,9 +2155,8 @@ static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
{
mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
rwlock_init(&fs_info->dev_replace.lock);
- atomic_set(&fs_info->dev_replace.read_locks, 0);
atomic_set(&fs_info->dev_replace.blocking_readers, 0);
- init_waitqueue_head(&fs_info->replace_wait);
+ init_waitqueue_head(&fs_info->dev_replace.replace_wait);
init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
}
@@ -2648,7 +2646,8 @@ int open_ctree(struct super_block *sb,
goto fail_dirty_metadata_bytes;
}
- ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
+ ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
+ GFP_KERNEL);
if (ret) {
err = ret;
goto fail_delalloc_bytes;
@@ -3309,7 +3308,7 @@ fail_iput:
iput(fs_info->btree_inode);
fail_bio_counter:
- percpu_counter_destroy(&fs_info->bio_counter);
+ percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
fail_delalloc_bytes:
percpu_counter_destroy(&fs_info->delalloc_bytes);
fail_dirty_metadata_bytes:
@@ -3977,6 +3976,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
kthread_stop(fs_info->transaction_kthread);
kthread_stop(fs_info->cleaner_kthread);
+ ASSERT(list_empty(&fs_info->delayed_iputs));
set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
btrfs_free_qgroup_config(fs_info);
@@ -4018,7 +4018,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
percpu_counter_destroy(&fs_info->delalloc_bytes);
- percpu_counter_destroy(&fs_info->bio_counter);
+ percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
cleanup_srcu_struct(&fs_info->subvol_srcu);
btrfs_free_stripe_hash_table(fs_info);
@@ -4204,7 +4204,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
return ret;
}
- while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
+ while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
struct btrfs_delayed_ref_head *head;
struct rb_node *n;
bool pin_bytes = false;
@@ -4222,11 +4222,11 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
continue;
}
spin_lock(&head->lock);
- while ((n = rb_first(&head->ref_tree)) != NULL) {
+ while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
ref = rb_entry(n, struct btrfs_delayed_ref_node,
ref_node);
ref->in_tree = 0;
- rb_erase(&ref->ref_node, &head->ref_tree);
+ rb_erase_cached(&ref->ref_node, &head->ref_tree);
RB_CLEAR_NODE(&ref->ref_node);
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
@@ -4240,7 +4240,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
if (head->processing == 0)
delayed_refs->num_heads_ready--;
atomic_dec(&delayed_refs->num_entries);
- rb_erase(&head->href_node, &delayed_refs->href_root);
+ rb_erase_cached(&head->href_node, &delayed_refs->href_root);
RB_CLEAR_NODE(&head->href_node);
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 1f3755b3a37a..ddf28ecf17f9 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -33,7 +33,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
type = FILEID_BTRFS_WITHOUT_PARENT;
fid->objectid = btrfs_ino(BTRFS_I(inode));
- fid->root_objectid = BTRFS_I(inode)->root->objectid;
+ fid->root_objectid = BTRFS_I(inode)->root->root_key.objectid;
fid->gen = inode->i_generation;
if (parent) {
@@ -41,7 +41,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
fid->parent_objectid = BTRFS_I(parent)->location.objectid;
fid->parent_gen = parent->i_generation;
- parent_root_id = BTRFS_I(parent)->root->objectid;
+ parent_root_id = BTRFS_I(parent)->root->root_key.objectid;
if (parent_root_id != fid->root_objectid) {
fid->parent_root_objectid = parent_root_id;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2d9074295d7f..a4cd0221bc8d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2374,7 +2374,7 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_ref_node *ref;
- if (RB_EMPTY_ROOT(&head->ref_tree))
+ if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
return NULL;
/*
@@ -2387,7 +2387,7 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
return list_first_entry(&head->ref_add_list,
struct btrfs_delayed_ref_node, add_list);
- ref = rb_entry(rb_first(&head->ref_tree),
+ ref = rb_entry(rb_first_cached(&head->ref_tree),
struct btrfs_delayed_ref_node, ref_node);
ASSERT(list_empty(&ref->add_list));
return ref;
@@ -2448,13 +2448,13 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
spin_unlock(&head->lock);
spin_lock(&delayed_refs->lock);
spin_lock(&head->lock);
- if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
+ if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
return 1;
}
delayed_refs->num_heads--;
- rb_erase(&head->href_node, &delayed_refs->href_root);
+ rb_erase_cached(&head->href_node, &delayed_refs->href_root);
RB_CLEAR_NODE(&head->href_node);
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
@@ -2502,102 +2502,66 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
return 0;
}
-/*
- * Returns 0 on success or if called with an already aborted transaction.
- * Returns -ENOMEM or -EIO on failure and will abort the transaction.
- */
-static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- unsigned long nr)
+static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
+ struct btrfs_trans_handle *trans)
+{
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+ struct btrfs_delayed_ref_head *head = NULL;
+ int ret;
+
+ spin_lock(&delayed_refs->lock);
+ head = btrfs_select_ref_head(delayed_refs);
+ if (!head) {
+ spin_unlock(&delayed_refs->lock);
+ return head;
+ }
+
+ /*
+ * Grab the lock that says we are going to process all the refs for
+ * this head
+ */
+ ret = btrfs_delayed_ref_lock(delayed_refs, head);
+ spin_unlock(&delayed_refs->lock);
+
+ /*
+ * We may have dropped the spin lock to get the head mutex lock, and
+ * that might have given someone else time to free the head. If that's
+ * true, it has been removed from our list and we can move on.
+ */
+ if (ret == -EAGAIN)
+ head = ERR_PTR(-EAGAIN);
+
+ return head;
+}
+
+static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *locked_ref,
+ unsigned long *run_refs)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_delayed_ref_node *ref;
- struct btrfs_delayed_ref_head *locked_ref = NULL;
struct btrfs_delayed_extent_op *extent_op;
- ktime_t start = ktime_get();
- int ret;
- unsigned long count = 0;
- unsigned long actual_count = 0;
+ struct btrfs_delayed_ref_node *ref;
int must_insert_reserved = 0;
+ int ret;
delayed_refs = &trans->transaction->delayed_refs;
- while (1) {
- if (!locked_ref) {
- if (count >= nr)
- break;
- spin_lock(&delayed_refs->lock);
- locked_ref = btrfs_select_ref_head(trans);
- if (!locked_ref) {
- spin_unlock(&delayed_refs->lock);
- break;
- }
+ lockdep_assert_held(&locked_ref->mutex);
+ lockdep_assert_held(&locked_ref->lock);
- /* grab the lock that says we are going to process
- * all the refs for this head */
- ret = btrfs_delayed_ref_lock(trans, locked_ref);
- spin_unlock(&delayed_refs->lock);
- /*
- * we may have dropped the spin lock to get the head
- * mutex lock, and that might have given someone else
- * time to free the head. If that's true, it has been
- * removed from our list and we can move on.
- */
- if (ret == -EAGAIN) {
- locked_ref = NULL;
- count++;
- continue;
- }
- }
-
- /*
- * We need to try and merge add/drops of the same ref since we
- * can run into issues with relocate dropping the implicit ref
- * and then it being added back again before the drop can
- * finish. If we merged anything we need to re-loop so we can
- * get a good ref.
- * Or we can get node references of the same type that weren't
- * merged when created due to bumps in the tree mod seq, and
- * we need to merge them to prevent adding an inline extent
- * backref before dropping it (triggering a BUG_ON at
- * insert_inline_extent_backref()).
- */
- spin_lock(&locked_ref->lock);
- btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
-
- ref = select_delayed_ref(locked_ref);
-
- if (ref && ref->seq &&
+ while ((ref = select_delayed_ref(locked_ref))) {
+ if (ref->seq &&
btrfs_check_delayed_seq(fs_info, ref->seq)) {
spin_unlock(&locked_ref->lock);
unselect_delayed_ref_head(delayed_refs, locked_ref);
- locked_ref = NULL;
- cond_resched();
- count++;
- continue;
- }
-
- /*
- * We're done processing refs in this ref_head, clean everything
- * up and move on to the next ref_head.
- */
- if (!ref) {
- ret = cleanup_ref_head(trans, locked_ref);
- if (ret > 0 ) {
- /* We dropped our lock, we need to loop. */
- ret = 0;
- continue;
- } else if (ret) {
- return ret;
- }
- locked_ref = NULL;
- count++;
- continue;
+ return -EAGAIN;
}
- actual_count++;
+ (*run_refs)++;
ref->in_tree = 0;
- rb_erase(&ref->ref_node, &locked_ref->ref_tree);
+ rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
RB_CLEAR_NODE(&ref->ref_node);
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
@@ -2619,8 +2583,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
atomic_dec(&delayed_refs->num_entries);
/*
- * Record the must-insert_reserved flag before we drop the spin
- * lock.
+ * Record the must_insert_reserved flag before we drop the
+ * spin lock.
*/
must_insert_reserved = locked_ref->must_insert_reserved;
locked_ref->must_insert_reserved = 0;
@@ -2642,10 +2606,90 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
}
btrfs_put_delayed_ref(ref);
- count++;
cond_resched();
+
+ spin_lock(&locked_ref->lock);
+ btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
}
+ return 0;
+}
+
+/*
+ * Returns 0 on success or if called with an already aborted transaction.
+ * Returns -ENOMEM or -EIO on failure and will abort the transaction.
+ */
+static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
+ unsigned long nr)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_delayed_ref_head *locked_ref = NULL;
+ ktime_t start = ktime_get();
+ int ret;
+ unsigned long count = 0;
+ unsigned long actual_count = 0;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+ do {
+ if (!locked_ref) {
+ locked_ref = btrfs_obtain_ref_head(trans);
+ if (IS_ERR_OR_NULL(locked_ref)) {
+ if (PTR_ERR(locked_ref) == -EAGAIN) {
+ continue;
+ } else {
+ break;
+ }
+ }
+ count++;
+ }
+ /*
+ * We need to try and merge add/drops of the same ref since we
+ * can run into issues with relocate dropping the implicit ref
+ * and then it being added back again before the drop can
+ * finish. If we merged anything we need to re-loop so we can
+ * get a good ref.
+ * Or we can get node references of the same type that weren't
+ * merged when created due to bumps in the tree mod seq, and
+ * we need to merge them to prevent adding an inline extent
+ * backref before dropping it (triggering a BUG_ON at
+ * insert_inline_extent_backref()).
+ */
+ spin_lock(&locked_ref->lock);
+ btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
+
+ ret = btrfs_run_delayed_refs_for_head(trans, locked_ref,
+ &actual_count);
+ if (ret < 0 && ret != -EAGAIN) {
+ /*
+ * Error, btrfs_run_delayed_refs_for_head already
+ * unlocked everything so just bail out
+ */
+ return ret;
+ } else if (!ret) {
+ /*
+ * Success, perform the usual cleanup of a processed
+ * head
+ */
+ ret = cleanup_ref_head(trans, locked_ref);
+ if (ret > 0 ) {
+ /* We dropped our lock, we need to loop. */
+ ret = 0;
+ continue;
+ } else if (ret) {
+ return ret;
+ }
+ }
+
+ /*
+ * Either success case or btrfs_run_delayed_refs_for_head
+ * returned -EAGAIN, meaning we need to select another head
+ */
+
+ locked_ref = NULL;
+ cond_resched();
+ } while ((nr != -1 && count < nr) || locked_ref);
+
/*
* We don't want to include ref heads since we can have empty ref heads
* and those will drastically skew our runtime down since we just do
@@ -2745,9 +2789,9 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
return num_csums;
}
-int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_rsv *global_rsv;
u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
@@ -2782,8 +2826,7 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
return ret;
}
-int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info)
+int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
{
u64 num_entries =
atomic_read(&trans->transaction->delayed_refs.num_entries);
@@ -2791,14 +2834,14 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
u64 val;
smp_mb();
- avg_runtime = fs_info->avg_delayed_ref_runtime;
+ avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
val = num_entries * avg_runtime;
if (val >= NSEC_PER_SEC)
return 1;
if (val >= NSEC_PER_SEC / 2)
return 2;
- return btrfs_check_space_for_delayed_refs(trans, fs_info);
+ return btrfs_check_space_for_delayed_refs(trans);
}
struct async_delayed_refs {
@@ -2940,7 +2983,7 @@ again:
btrfs_create_pending_block_groups(trans);
spin_lock(&delayed_refs->lock);
- node = rb_first(&delayed_refs->href_root);
+ node = rb_first_cached(&delayed_refs->href_root);
if (!node) {
spin_unlock(&delayed_refs->lock);
goto out;
@@ -3040,7 +3083,8 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
* XXX: We should replace this with a proper search function in the
* future.
*/
- for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&head->ref_tree); node;
+ node = rb_next(node)) {
ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
/* If it's a shared ref we know a cross reference exists */
if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
@@ -3139,7 +3183,6 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
{
struct btrfs_path *path;
int ret;
- int ret2;
path = btrfs_alloc_path();
if (!path)
@@ -3151,17 +3194,9 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
if (ret && ret != -ENOENT)
goto out;
- ret2 = check_delayed_ref(root, path, objectid,
- offset, bytenr);
- } while (ret2 == -EAGAIN);
-
- if (ret2 && ret2 != -ENOENT) {
- ret = ret2;
- goto out;
- }
+ ret = check_delayed_ref(root, path, objectid, offset, bytenr);
+ } while (ret == -EAGAIN);
- if (ret != -ENOENT || ret2 != -ENOENT)
- ret = 0;
out:
btrfs_free_path(path);
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
@@ -5284,7 +5319,7 @@ static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
}
static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
- u64 num_bytes, int update_size)
+ u64 num_bytes, bool update_size)
{
spin_lock(&block_rsv->lock);
block_rsv->reserved += num_bytes;
@@ -5316,7 +5351,7 @@ int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
global_rsv->full = 0;
spin_unlock(&global_rsv->lock);
- block_rsv_add_bytes(dest, num_bytes, 1);
+ block_rsv_add_bytes(dest, num_bytes, true);
return 0;
}
@@ -5479,7 +5514,7 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
struct btrfs_block_rsv *dst, u64 num_bytes,
- int update_size)
+ bool update_size)
{
int ret;
@@ -5539,10 +5574,8 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
return 0;
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
- if (!ret) {
- block_rsv_add_bytes(block_rsv, num_bytes, 1);
- return 0;
- }
+ if (!ret)
+ block_rsv_add_bytes(block_rsv, num_bytes, true);
return ret;
}
@@ -5587,7 +5620,7 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
if (!ret) {
- block_rsv_add_bytes(block_rsv, num_bytes, 0);
+ block_rsv_add_bytes(block_rsv, num_bytes, false);
return 0;
}
@@ -5629,7 +5662,7 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
return ret;
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
if (!ret) {
- block_rsv_add_bytes(block_rsv, num_bytes, 0);
+ block_rsv_add_bytes(block_rsv, num_bytes, false);
trace_btrfs_space_reservation(root->fs_info, "delalloc",
btrfs_ino(inode), num_bytes, 1);
@@ -5835,7 +5868,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
BTRFS_RESERVE_FLUSH_ALL);
if (ret == -ENOSPC && use_global_rsv)
- ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
+ ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, true);
if (ret && qgroup_num_bytes)
btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
@@ -6399,10 +6432,6 @@ static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
} else {
cache->reserved += num_bytes;
space_info->bytes_reserved += num_bytes;
-
- trace_btrfs_space_reservation(cache->fs_info,
- "space_info", space_info->flags,
- ram_bytes, 0);
space_info->bytes_may_use -= ram_bytes;
if (delalloc)
cache->delalloc_bytes += num_bytes;
@@ -6424,11 +6453,10 @@ static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
* reserve set to 0 in order to clear the reservation.
*/
-static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
- u64 num_bytes, int delalloc)
+static void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
+ u64 num_bytes, int delalloc)
{
struct btrfs_space_info *space_info = cache->space_info;
- int ret = 0;
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
@@ -6441,7 +6469,6 @@ static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
cache->delalloc_bytes -= num_bytes;
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
- return ret;
}
void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
{
@@ -6925,7 +6952,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
goto out_delayed_unlock;
spin_lock(&head->lock);
- if (!RB_EMPTY_ROOT(&head->ref_tree))
+ if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
goto out;
if (head->extent_op) {
@@ -6946,7 +6973,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
* at this point we have a head with no other entries. Go
* ahead and process it.
*/
- rb_erase(&head->href_node, &delayed_refs->href_root);
+ rb_erase_cached(&head->href_node, &delayed_refs->href_root);
RB_CLEAR_NODE(&head->href_node);
atomic_dec(&delayed_refs->num_entries);
@@ -8119,6 +8146,19 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (IS_ERR(buf))
return buf;
+ /*
+ * Extra safety check in case the extent tree is corrupted and extent
+ * allocator chooses to use a tree block which is already used and
+ * locked.
+ */
+ if (buf->lock_owner == current->pid) {
+ btrfs_err_rl(fs_info,
+"tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
+ buf->start, btrfs_header_owner(buf), current->pid);
+ free_extent_buffer(buf);
+ return ERR_PTR(-EUCLEAN);
+ }
+
btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
btrfs_tree_lock(buf);
clean_tree_block(fs_info, buf);
@@ -8215,7 +8255,7 @@ try_reserve:
static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv, u32 blocksize)
{
- block_rsv_add_bytes(block_rsv, blocksize, 0);
+ block_rsv_add_bytes(block_rsv, blocksize, false);
block_rsv_release_bytes(fs_info, block_rsv, NULL, 0, NULL);
}
@@ -8642,7 +8682,13 @@ skip:
parent = 0;
}
- if (need_account) {
+ /*
+ * Reloc tree doesn't contribute to qgroup numbers, and we have
+ * already accounted them at merge time (replace_path),
+ * thus we could skip expensive subtree trace here.
+ */
+ if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
+ need_account) {
ret = btrfs_qgroup_trace_subtree(trans, next,
generation, level - 1);
if (ret) {
@@ -8763,15 +8809,14 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
if (eb == root->node) {
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = eb->start;
- else
- BUG_ON(root->root_key.objectid !=
- btrfs_header_owner(eb));
+ else if (root->root_key.objectid != btrfs_header_owner(eb))
+ goto owner_mismatch;
} else {
if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = path->nodes[level + 1]->start;
- else
- BUG_ON(root->root_key.objectid !=
- btrfs_header_owner(path->nodes[level + 1]));
+ else if (root->root_key.objectid !=
+ btrfs_header_owner(path->nodes[level + 1]))
+ goto owner_mismatch;
}
btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
@@ -8779,6 +8824,11 @@ out:
wc->refs[level] = 0;
wc->flags[level] = 0;
return 0;
+
+owner_mismatch:
+ btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
+ btrfs_header_owner(eb), root->root_key.objectid);
+ return -EUCLEAN;
}
static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
@@ -8832,6 +8882,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
ret = walk_up_proc(trans, root, path, wc);
if (ret > 0)
return 0;
+ if (ret < 0)
+ return ret;
if (path->locks[level]) {
btrfs_tree_unlock_rw(path->nodes[level],
@@ -8875,7 +8927,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
int level;
bool root_dropped = false;
- btrfs_debug(fs_info, "Drop subvolume %llu", root->objectid);
+ btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid);
path = btrfs_alloc_path();
if (!path) {
@@ -9613,6 +9665,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
block_group = btrfs_lookup_first_block_group(info, last);
while (block_group) {
+ wait_block_group_cache_done(block_group);
spin_lock(&block_group->lock);
if (block_group->iref)
break;
@@ -10074,7 +10127,7 @@ error:
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_group_cache *block_group, *tmp;
+ struct btrfs_block_group_cache *block_group;
struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_block_group_item item;
struct btrfs_key key;
@@ -10082,7 +10135,10 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
trans->can_flush_pending_bgs = false;
- list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
+ while (!list_empty(&trans->new_bgs)) {
+ block_group = list_first_entry(&trans->new_bgs,
+ struct btrfs_block_group_cache,
+ bg_list);
if (ret)
goto next;
@@ -10753,14 +10809,16 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
* We don't want a transaction for this since the discard may take a
* substantial amount of time. We don't require that a transaction be
* running, but we do need to take a running transaction into account
- * to ensure that we're not discarding chunks that were released in
- * the current transaction.
+ * to ensure that we're not discarding chunks that were released or
+ * allocated in the current transaction.
*
* Holding the chunks lock will prevent other threads from allocating
* or releasing chunks, but it won't prevent a running transaction
* from committing and releasing the memory that the pending chunks
* list head uses. For that, we need to take a reference to the
- * transaction.
+ * transaction and hold the commit root sem. We only need to hold
+ * it while performing the free space search since we have already
+ * held back allocations.
*/
static int btrfs_trim_free_extents(struct btrfs_device *device,
u64 minlen, u64 *trimmed)
@@ -10770,6 +10828,10 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
*trimmed = 0;
+ /* Discard not supported = nothing to do. */
+ if (!blk_queue_discard(bdev_get_queue(device->bdev)))
+ return 0;
+
/* Not writeable = nothing to do. */
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
return 0;
@@ -10787,9 +10849,13 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
if (ret)
- return ret;
+ break;
- down_read(&fs_info->commit_root_sem);
+ ret = down_read_killable(&fs_info->commit_root_sem);
+ if (ret) {
+ mutex_unlock(&fs_info->chunk_mutex);
+ break;
+ }
spin_lock(&fs_info->trans_lock);
trans = fs_info->running_transaction;
@@ -10797,13 +10863,17 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
refcount_inc(&trans->use_count);
spin_unlock(&fs_info->trans_lock);
+ if (!trans)
+ up_read(&fs_info->commit_root_sem);
+
ret = find_free_dev_extent_start(trans, device, minlen, start,
&start, &len);
- if (trans)
+ if (trans) {
+ up_read(&fs_info->commit_root_sem);
btrfs_put_transaction(trans);
+ }
if (ret) {
- up_read(&fs_info->commit_root_sem);
mutex_unlock(&fs_info->chunk_mutex);
if (ret == -ENOSPC)
ret = 0;
@@ -10811,7 +10881,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
}
ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
- up_read(&fs_info->commit_root_sem);
mutex_unlock(&fs_info->chunk_mutex);
if (ret)
@@ -10831,6 +10900,15 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
return ret;
}
+/*
+ * Trim the whole filesystem by:
+ * 1) trimming the free space in each block group
+ * 2) trimming the unallocated space on each device
+ *
+ * This will also continue trimming even if a block group or device encounters
+ * an error. The return value will be the last error, or 0 if nothing bad
+ * happens.
+ */
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
{
struct btrfs_block_group_cache *cache = NULL;
@@ -10840,18 +10918,14 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
u64 start;
u64 end;
u64 trimmed = 0;
- u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
+ u64 bg_failed = 0;
+ u64 dev_failed = 0;
+ int bg_ret = 0;
+ int dev_ret = 0;
int ret = 0;
- /*
- * try to trim all FS space, our block group may start from non-zero.
- */
- if (range->len == total_bytes)
- cache = btrfs_lookup_first_block_group(fs_info, range->start);
- else
- cache = btrfs_lookup_block_group(fs_info, range->start);
-
- while (cache) {
+ cache = btrfs_lookup_first_block_group(fs_info, range->start);
+ for (; cache; cache = next_block_group(fs_info, cache)) {
if (cache->key.objectid >= (range->start + range->len)) {
btrfs_put_block_group(cache);
break;
@@ -10865,13 +10939,15 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
if (!block_group_cache_done(cache)) {
ret = cache_block_group(cache, 0);
if (ret) {
- btrfs_put_block_group(cache);
- break;
+ bg_failed++;
+ bg_ret = ret;
+ continue;
}
ret = wait_block_group_cache_done(cache);
if (ret) {
- btrfs_put_block_group(cache);
- break;
+ bg_failed++;
+ bg_ret = ret;
+ continue;
}
}
ret = btrfs_trim_block_group(cache,
@@ -10882,28 +10958,40 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
trimmed += group_trimmed;
if (ret) {
- btrfs_put_block_group(cache);
- break;
+ bg_failed++;
+ bg_ret = ret;
+ continue;
}
}
-
- cache = next_block_group(fs_info, cache);
}
+ if (bg_failed)
+ btrfs_warn(fs_info,
+ "failed to trim %llu block group(s), last error %d",
+ bg_failed, bg_ret);
mutex_lock(&fs_info->fs_devices->device_list_mutex);
- devices = &fs_info->fs_devices->alloc_list;
- list_for_each_entry(device, devices, dev_alloc_list) {
+ devices = &fs_info->fs_devices->devices;
+ list_for_each_entry(device, devices, dev_list) {
ret = btrfs_trim_free_extents(device, range->minlen,
&group_trimmed);
- if (ret)
+ if (ret) {
+ dev_failed++;
+ dev_ret = ret;
break;
+ }
trimmed += group_trimmed;
}
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+ if (dev_failed)
+ btrfs_warn(fs_info,
+ "failed to trim %llu device(s), last error %d",
+ dev_failed, dev_ret);
range->len = trimmed;
- return ret;
+ if (bg_ret)
+ return bg_ret;
+ return dev_ret;
}
/*
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4dd6faab02bb..6877a74c7469 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1424,20 +1424,15 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
struct extent_state **cached_state)
{
struct extent_state *state;
- struct rb_node *n;
int ret = 1;
spin_lock(&tree->lock);
if (cached_state && *cached_state) {
state = *cached_state;
if (state->end == start - 1 && extent_state_in_tree(state)) {
- n = rb_next(&state->rb_node);
- while (n) {
- state = rb_entry(n, struct extent_state,
- rb_node);
+ while ((state = next_state(state)) != NULL) {
if (state->state & bits)
goto got_it;
- n = rb_next(n);
}
free_extent_state(*cached_state);
*cached_state = NULL;
@@ -1568,7 +1563,7 @@ static noinline int lock_delalloc_pages(struct inode *inode,
*
* 1 is returned if we find something, 0 if nothing was in the tree
*/
-STATIC u64 find_lock_delalloc_range(struct inode *inode,
+static noinline_for_stack u64 find_lock_delalloc_range(struct inode *inode,
struct extent_io_tree *tree,
struct page *locked_page, u64 *start,
u64 *end, u64 max_bytes)
@@ -1648,6 +1643,17 @@ out_failed:
return found;
}
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+u64 btrfs_find_lock_delalloc_range(struct inode *inode,
+ struct extent_io_tree *tree,
+ struct page *locked_page, u64 *start,
+ u64 *end, u64 max_bytes)
+{
+ return find_lock_delalloc_range(inode, tree, locked_page, start, end,
+ max_bytes);
+}
+#endif
+
static int __process_pages_contig(struct address_space *mapping,
struct page *locked_page,
pgoff_t start_index, pgoff_t end_index,
@@ -5165,11 +5171,11 @@ void clear_extent_buffer_dirty(struct extent_buffer *eb)
WARN_ON(atomic_read(&eb->refs) == 0);
}
-int set_extent_buffer_dirty(struct extent_buffer *eb)
+bool set_extent_buffer_dirty(struct extent_buffer *eb)
{
int i;
int num_pages;
- int was_dirty = 0;
+ bool was_dirty;
check_buffer_tree_ref(eb);
@@ -5179,8 +5185,15 @@ int set_extent_buffer_dirty(struct extent_buffer *eb)
WARN_ON(atomic_read(&eb->refs) == 0);
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
+ if (!was_dirty)
+ for (i = 0; i < num_pages; i++)
+ set_page_dirty(eb->pages[i]);
+
+#ifdef CONFIG_BTRFS_DEBUG
for (i = 0; i < num_pages; i++)
- set_page_dirty(eb->pages[i]);
+ ASSERT(PageDirty(eb->pages[i]));
+#endif
+
return was_dirty;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index b4d03e677e1d..369daa5d4f73 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -479,7 +479,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
unsigned long pos, unsigned long len);
void clear_extent_buffer_dirty(struct extent_buffer *eb);
-int set_extent_buffer_dirty(struct extent_buffer *eb);
+bool set_extent_buffer_dirty(struct extent_buffer *eb);
void set_extent_buffer_uptodate(struct extent_buffer *eb);
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
int extent_buffer_under_io(struct extent_buffer *eb);
@@ -546,7 +546,7 @@ int free_io_failure(struct extent_io_tree *failure_tree,
struct extent_io_tree *io_tree,
struct io_failure_record *rec);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-noinline u64 find_lock_delalloc_range(struct inode *inode,
+u64 btrfs_find_lock_delalloc_range(struct inode *inode,
struct extent_io_tree *tree,
struct page *locked_page, u64 *start,
u64 *end, u64 max_bytes);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 6648d55e5339..7eea8b6e2cd3 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -34,7 +34,7 @@ void __cold extent_map_exit(void)
*/
void extent_map_tree_init(struct extent_map_tree *tree)
{
- tree->map = RB_ROOT;
+ tree->map = RB_ROOT_CACHED;
INIT_LIST_HEAD(&tree->modified_extents);
rwlock_init(&tree->lock);
}
@@ -90,24 +90,27 @@ static u64 range_end(u64 start, u64 len)
return start + len;
}
-static int tree_insert(struct rb_root *root, struct extent_map *em)
+static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct extent_map *entry = NULL;
struct rb_node *orig_parent = NULL;
u64 end = range_end(em->start, em->len);
+ bool leftmost = true;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct extent_map, rb_node);
- if (em->start < entry->start)
+ if (em->start < entry->start) {
p = &(*p)->rb_left;
- else if (em->start >= extent_map_end(entry))
+ } else if (em->start >= extent_map_end(entry)) {
p = &(*p)->rb_right;
- else
+ leftmost = false;
+ } else {
return -EEXIST;
+ }
}
orig_parent = parent;
@@ -130,7 +133,7 @@ static int tree_insert(struct rb_root *root, struct extent_map *em)
return -EEXIST;
rb_link_node(&em->rb_node, orig_parent, p);
- rb_insert_color(&em->rb_node, root);
+ rb_insert_color_cached(&em->rb_node, root, leftmost);
return 0;
}
@@ -242,7 +245,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->mod_start = merge->mod_start;
em->generation = max(em->generation, merge->generation);
- rb_erase(&merge->rb_node, &tree->map);
+ rb_erase_cached(&merge->rb_node, &tree->map);
RB_CLEAR_NODE(&merge->rb_node);
free_extent_map(merge);
}
@@ -254,7 +257,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
if (rb && mergable_maps(em, merge)) {
em->len += merge->len;
em->block_len += merge->block_len;
- rb_erase(&merge->rb_node, &tree->map);
+ rb_erase_cached(&merge->rb_node, &tree->map);
RB_CLEAR_NODE(&merge->rb_node);
em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
em->generation = max(em->generation, merge->generation);
@@ -367,7 +370,7 @@ __lookup_extent_mapping(struct extent_map_tree *tree,
struct rb_node *next = NULL;
u64 end = range_end(start, len);
- rb_node = __tree_search(&tree->map, start, &prev, &next);
+ rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next);
if (!rb_node) {
if (prev)
rb_node = prev;
@@ -428,16 +431,13 @@ struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
* Removes @em from @tree. No reference counts are dropped, and no checks
* are done to see if the range is in use
*/
-int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
+void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
{
- int ret = 0;
-
WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
- rb_erase(&em->rb_node, &tree->map);
+ rb_erase_cached(&em->rb_node, &tree->map);
if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
list_del_init(&em->list);
RB_CLEAR_NODE(&em->rb_node);
- return ret;
}
void replace_extent_mapping(struct extent_map_tree *tree,
@@ -449,7 +449,7 @@ void replace_extent_mapping(struct extent_map_tree *tree,
ASSERT(extent_map_in_tree(cur));
if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
list_del_init(&cur->list);
- rb_replace_node(&cur->rb_node, &new->rb_node, &tree->map);
+ rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
RB_CLEAR_NODE(&cur->rb_node);
setup_extent_mapping(tree, new, modified);
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 25d985e7532a..31977ffd6190 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -49,7 +49,7 @@ struct extent_map {
};
struct extent_map_tree {
- struct rb_root map;
+ struct rb_root_cached map;
struct list_head modified_extents;
rwlock_t lock;
};
@@ -78,7 +78,7 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len);
int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em, int modified);
-int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
+void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
void replace_extent_mapping(struct extent_map_tree *tree,
struct extent_map *cur,
struct extent_map *new,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 2be00e873e92..15b925142793 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -531,6 +531,14 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
end_of_last_block = start_pos + num_bytes - 1;
+ /*
+ * The pages may have already been dirty, clear out old accounting so
+ * we can set things up properly
+ */
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, end_of_last_block,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, cached);
+
if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
if (start_pos >= isize &&
!(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
@@ -1500,18 +1508,27 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
}
if (ordered)
btrfs_put_ordered_extent(ordered);
- clear_extent_bit(&inode->io_tree, start_pos, last_pos,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, cached_state);
+
*lockstart = start_pos;
*lockend = last_pos;
ret = 1;
}
+ /*
+ * It's possible the pages are dirty right now, but we don't want
+ * to clean them yet because copy_from_user may catch a page fault
+ * and we might have to fall back to one page at a time. If that
+ * happens, we'll unlock these pages and we'd have a window where
+ * reclaim could sneak in and drop the once-dirty page on the floor
+ * without writing it.
+ *
+ * We have the pages locked and the extent range locked, so there's
+ * no way someone can start IO on any dirty pages in this range.
+ *
+ * We'll call btrfs_dirty_pages() later on, and that will flip around
+ * delalloc bits and dirty the pages as required.
+ */
for (i = 0; i < num_pages; i++) {
- if (clear_page_dirty_for_io(pages[i]))
- account_page_redirty(pages[i]);
set_page_extent_mapped(pages[i]);
WARN_ON(!PageLocked(pages[i]));
}
@@ -2544,7 +2561,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
}
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
- min_size, 0);
+ min_size, false);
BUG_ON(ret);
trans->block_rsv = rsv;
@@ -2594,7 +2611,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
}
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
- rsv, min_size, 0);
+ rsv, min_size, false);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 0adf38b00fa0..67441219d6c9 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -10,6 +10,7 @@
#include <linux/math64.h>
#include <linux/ratelimit.h>
#include <linux/error-injection.h>
+#include <linux/sched/mm.h>
#include "ctree.h"
#include "free-space-cache.h"
#include "transaction.h"
@@ -47,6 +48,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
struct inode *inode = NULL;
+ unsigned nofs_flag;
int ret;
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@ -68,7 +70,13 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
btrfs_disk_key_to_cpu(&location, &disk_key);
btrfs_release_path(path);
+ /*
+ * We are often under a trans handle at this point, so we need to make
+ * sure NOFS is set to keep us from deadlocking.
+ */
+ nofs_flag = memalloc_nofs_save();
inode = btrfs_iget(fs_info->sb, &location, root, NULL);
+ memalloc_nofs_restore(nofs_flag);
if (IS_ERR(inode))
return inode;
@@ -1679,6 +1687,8 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
bitmap_clear(info->bitmap, start, count);
info->bytes -= bytes;
+ if (info->max_extent_size > ctl->unit)
+ info->max_extent_size = 0;
}
static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
@@ -2110,8 +2120,7 @@ new_bitmap:
out:
if (info) {
- if (info->bitmap)
- kfree(info->bitmap);
+ kfree(info->bitmap);
kmem_cache_free(btrfs_free_space_cachep, info);
}
@@ -3601,8 +3610,7 @@ again:
if (info)
kmem_cache_free(btrfs_free_space_cachep, info);
- if (map)
- kfree(map);
+ kfree(map);
return 0;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3ea5339603cf..181c58b23110 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -64,7 +64,6 @@ static const struct inode_operations btrfs_dir_ro_inode_operations;
static const struct inode_operations btrfs_special_inode_operations;
static const struct inode_operations btrfs_file_inode_operations;
static const struct address_space_operations btrfs_aops;
-static const struct address_space_operations btrfs_symlink_aops;
static const struct file_operations btrfs_dir_file_operations;
static const struct extent_io_ops btrfs_extent_io_ops;
@@ -2750,12 +2749,9 @@ static void relink_file_extents(struct new_sa_defrag_extent *new)
struct btrfs_path *path;
struct sa_defrag_extent_backref *backref;
struct sa_defrag_extent_backref *prev = NULL;
- struct inode *inode;
struct rb_node *node;
int ret;
- inode = new->inode;
-
path = btrfs_alloc_path();
if (!path)
return;
@@ -3471,8 +3467,6 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
/* this will do delete_inode and everything for us */
iput(inode);
- if (ret)
- goto out;
}
/* release the path since we're done with it */
btrfs_release_path(path);
@@ -3738,7 +3732,7 @@ cache_acl:
case S_IFLNK:
inode->i_op = &btrfs_symlink_inode_operations;
inode_nohighmem(inode);
- inode->i_mapping->a_ops = &btrfs_symlink_aops;
+ inode->i_mapping->a_ops = &btrfs_aops;
break;
default:
inode->i_op = &btrfs_special_inode_operations;
@@ -3910,12 +3904,8 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
path->leave_spinning = 1;
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
- goto err;
- }
- if (!di) {
- ret = -ENOENT;
+ if (IS_ERR_OR_NULL(di)) {
+ ret = di ? PTR_ERR(di) : -ENOENT;
goto err;
}
leaf = path->nodes[0];
@@ -4075,10 +4065,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
name, name_len, -1);
if (IS_ERR_OR_NULL(di)) {
- if (!di)
- ret = -ENOENT;
- else
- ret = PTR_ERR(di);
+ ret = di ? PTR_ERR(di) : -ENOENT;
goto out;
}
@@ -4270,18 +4257,17 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
* again is not run concurrently.
*/
spin_lock(&dest->root_item_lock);
- root_flags = btrfs_root_flags(&dest->root_item);
- if (dest->send_in_progress == 0) {
- btrfs_set_root_flags(&dest->root_item,
- root_flags | BTRFS_ROOT_SUBVOL_DEAD);
- spin_unlock(&dest->root_item_lock);
- } else {
+ if (dest->send_in_progress) {
spin_unlock(&dest->root_item_lock);
btrfs_warn(fs_info,
"attempt to delete subvolume %llu during send",
dest->root_key.objectid);
return -EPERM;
}
+ root_flags = btrfs_root_flags(&dest->root_item);
+ btrfs_set_root_flags(&dest->root_item,
+ root_flags | BTRFS_ROOT_SUBVOL_DEAD);
+ spin_unlock(&dest->root_item_lock);
down_write(&fs_info->subvol_sem);
@@ -4727,7 +4713,7 @@ delete:
btrfs_abort_transaction(trans, ret);
break;
}
- if (btrfs_should_throttle_delayed_refs(trans, fs_info))
+ if (btrfs_should_throttle_delayed_refs(trans))
btrfs_async_run_delayed_refs(fs_info,
trans->delayed_ref_updates * 2,
trans->transid, 0);
@@ -4736,8 +4722,7 @@ delete:
extent_num_bytes)) {
should_end = true;
}
- if (btrfs_should_throttle_delayed_refs(trans,
- fs_info))
+ if (btrfs_should_throttle_delayed_refs(trans))
should_throttle = true;
}
}
@@ -5235,10 +5220,10 @@ static void evict_inode_truncate_pages(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
write_lock(&map_tree->lock);
- while (!RB_EMPTY_ROOT(&map_tree->map)) {
+ while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) {
struct extent_map *em;
- node = rb_first(&map_tree->map);
+ node = rb_first_cached(&map_tree->map);
em = rb_entry(node, struct extent_map, rb_node);
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
@@ -5306,8 +5291,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
}
static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
- struct btrfs_block_rsv *rsv,
- u64 min_size)
+ struct btrfs_block_rsv *rsv)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
@@ -5317,7 +5301,7 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
struct btrfs_trans_handle *trans;
int ret;
- ret = btrfs_block_rsv_refill(root, rsv, min_size,
+ ret = btrfs_block_rsv_refill(root, rsv, rsv->size,
BTRFS_RESERVE_FLUSH_LIMIT);
if (ret && ++failures > 2) {
@@ -5334,8 +5318,8 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
* Try to steal from the global reserve if there is space for
* it.
*/
- if (!btrfs_check_space_for_delayed_refs(trans, fs_info) &&
- !btrfs_block_rsv_migrate(global_rsv, rsv, min_size, 0))
+ if (!btrfs_check_space_for_delayed_refs(trans) &&
+ !btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, false))
return trans;
/* If not, commit and try again. */
@@ -5351,7 +5335,6 @@ void btrfs_evict_inode(struct inode *inode)
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv;
- u64 min_size;
int ret;
trace_btrfs_inode_evict(inode);
@@ -5361,8 +5344,6 @@ void btrfs_evict_inode(struct inode *inode)
return;
}
- min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
-
evict_inode_truncate_pages(inode);
if (inode->i_nlink &&
@@ -5373,9 +5354,6 @@ void btrfs_evict_inode(struct inode *inode)
if (is_bad_inode(inode))
goto no_delete;
- /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
- if (!special_file(inode->i_mode))
- btrfs_wait_ordered_range(inode, 0, (u64)-1);
btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1);
@@ -5395,13 +5373,13 @@ void btrfs_evict_inode(struct inode *inode)
rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv)
goto no_delete;
- rsv->size = min_size;
+ rsv->size = btrfs_calc_trunc_metadata_size(fs_info, 1);
rsv->failfast = 1;
btrfs_i_size_write(BTRFS_I(inode), 0);
while (1) {
- trans = evict_refill_and_join(root, rsv, min_size);
+ trans = evict_refill_and_join(root, rsv);
if (IS_ERR(trans))
goto free_rsv;
@@ -5426,7 +5404,7 @@ void btrfs_evict_inode(struct inode *inode)
* If it turns out that we are dropping too many of these, we might want
* to add a mechanism for retrying these after a commit.
*/
- trans = evict_refill_and_join(root, rsv, min_size);
+ trans = evict_refill_and_join(root, rsv);
if (!IS_ERR(trans)) {
trans->block_rsv = rsv;
btrfs_orphan_del(trans, BTRFS_I(inode));
@@ -5471,12 +5449,8 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
name, namelen, 0);
- if (!di) {
- ret = -ENOENT;
- goto out;
- }
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
+ if (IS_ERR_OR_NULL(di)) {
+ ret = di ? PTR_ERR(di) : -ENOENT;
goto out;
}
@@ -5790,16 +5764,10 @@ static int btrfs_dentry_delete(const struct dentry *dentry)
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
- struct inode *inode;
-
- inode = btrfs_lookup_dentry(dir, dentry);
- if (IS_ERR(inode)) {
- if (PTR_ERR(inode) == -ENOENT)
- inode = NULL;
- else
- return ERR_CAST(inode);
- }
+ struct inode *inode = btrfs_lookup_dentry(dir, dentry);
+ if (inode == ERR_PTR(-ENOENT))
+ inode = NULL;
return d_splice_alias(inode, dentry);
}
@@ -6390,8 +6358,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- ret = btrfs_insert_dir_item(trans, root, name, name_len,
- parent_inode, &key,
+ ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key,
btrfs_inode_type(&inode->vfs_inode), index);
if (ret == -EEXIST || ret == -EOVERFLOW)
goto fail_dir_item;
@@ -6584,7 +6551,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
int drop_inode = 0;
/* do not allow sys_link's with other subvols of the same device */
- if (root->objectid != BTRFS_I(inode)->root->objectid)
+ if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
return -EXDEV;
if (inode->i_nlink >= BTRFS_LINK_MAX)
@@ -6777,9 +6744,9 @@ static noinline int uncompress_inline(struct btrfs_path *path,
* This also copies inline extents directly into the page.
*/
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
- struct page *page,
- size_t pg_offset, u64 start, u64 len,
- int create)
+ struct page *page,
+ size_t pg_offset, u64 start, u64 len,
+ int create)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
int ret;
@@ -6823,19 +6790,21 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
em->len = (u64)-1;
em->block_len = (u64)-1;
+ path = btrfs_alloc_path();
if (!path) {
- path = btrfs_alloc_path();
- if (!path) {
- err = -ENOMEM;
- goto out;
- }
- /*
- * Chances are we'll be called again, so go ahead and do
- * readahead
- */
- path->reada = READA_FORWARD;
+ err = -ENOMEM;
+ goto out;
}
+ /* Chances are we'll be called again, so go ahead and do readahead */
+ path->reada = READA_FORWARD;
+
+ /*
+ * Unless we're going to uncompress the inline extent, no sleep would
+ * happen.
+ */
+ path->leave_spinning = 1;
+
ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
if (ret < 0) {
err = ret;
@@ -6938,6 +6907,8 @@ next:
em->orig_block_len = em->len;
em->orig_start = em->start;
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
+
+ btrfs_set_path_blocking(path);
if (!PageUptodate(page)) {
if (btrfs_file_extent_compression(leaf, item) !=
BTRFS_COMPRESS_NONE) {
@@ -6985,10 +6956,10 @@ insert:
err = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
write_unlock(&em_tree->lock);
out:
+ btrfs_free_path(path);
trace_btrfs_get_extent(root, inode, em);
- btrfs_free_path(path);
if (err) {
free_extent_map(em);
return ERR_PTR(err);
@@ -9021,7 +8992,7 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
/* Migrate the slack space for the truncate to our reserve */
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
- min_size, 0);
+ min_size, false);
BUG_ON(ret);
/*
@@ -9058,7 +9029,7 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
btrfs_block_rsv_release(fs_info, rsv, -1);
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
- rsv, min_size, 0);
+ rsv, min_size, false);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
}
@@ -10191,7 +10162,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
inode->i_op = &btrfs_symlink_inode_operations;
inode_nohighmem(inode);
- inode->i_mapping->a_ops = &btrfs_symlink_aops;
+ inode->i_mapping->a_ops = &btrfs_aops;
inode_set_bytes(inode, name_len);
btrfs_i_size_write(BTRFS_I(inode), name_len);
err = btrfs_update_inode(trans, root, inode);
@@ -10567,13 +10538,6 @@ static const struct address_space_operations btrfs_aops = {
.error_remove_page = generic_error_remove_page,
};
-static const struct address_space_operations btrfs_symlink_aops = {
- .readpage = btrfs_readpage,
- .writepage = btrfs_writepage,
- .invalidatepage = btrfs_invalidatepage,
- .releasepage = btrfs_releasepage,
-};
-
static const struct inode_operations btrfs_file_inode_operations = {
.getattr = btrfs_getattr,
.setattr = btrfs_setattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d60b6caf09e8..a990a9045139 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -491,7 +491,6 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
struct fstrim_range range;
u64 minlen = ULLONG_MAX;
u64 num_devices = 0;
- u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
int ret;
if (!capable(CAP_SYS_ADMIN))
@@ -515,11 +514,15 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
return -EOPNOTSUPP;
if (copy_from_user(&range, arg, sizeof(range)))
return -EFAULT;
- if (range.start > total_bytes ||
- range.len < fs_info->sb->s_blocksize)
+
+ /*
+ * NOTE: Don't truncate the range using super->total_bytes. Bytenr of
+ * block group is in the logical address space, which can be any
+ * sectorsize aligned bytenr in the range [0, U64_MAX].
+ */
+ if (range.len < fs_info->sb->s_blocksize)
return -EINVAL;
- range.len = min(range.len, total_bytes - range.start);
range.minlen = max(range.minlen, minlen);
ret = btrfs_trim_fs(fs_info, &range);
if (ret < 0)
@@ -686,8 +689,7 @@ static noinline int create_subvol(struct inode *dir,
goto fail;
}
- ret = btrfs_insert_dir_item(trans, root,
- name, namelen, BTRFS_I(dir), &key,
+ ret = btrfs_insert_dir_item(trans, name, namelen, BTRFS_I(dir), &key,
BTRFS_FT_DIR, index);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -1324,7 +1326,7 @@ again:
if (i_done != page_cnt) {
spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents++;
+ btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
spin_unlock(&BTRFS_I(inode)->lock);
btrfs_delalloc_release_space(inode, data_reserved,
start_index << PAGE_SHIFT,
@@ -4393,7 +4395,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
ret = PTR_ERR(new_root);
goto out;
}
- if (!is_fstree(new_root->objectid)) {
+ if (!is_fstree(new_root->root_key.objectid)) {
ret = -ENOENT;
goto out;
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index d4917c0cddf5..45868fd76209 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1416,13 +1416,14 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
if (!qgroup) {
ret = -ENOENT;
goto out;
- } else {
- /* check if there are no children of this qgroup */
- if (!list_empty(&qgroup->members)) {
- ret = -EBUSY;
- goto out;
- }
}
+
+ /* Check if there are no children of this qgroup */
+ if (!list_empty(&qgroup->members)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
ret = del_qgroup_item(trans, qgroupid);
if (ret && ret != -ENOENT)
goto out;
@@ -1712,6 +1713,416 @@ static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
return 0;
}
+/*
+ * Helper function to trace a subtree tree block swap.
+ *
+ * The swap will happen in highest tree block, but there may be a lot of
+ * tree blocks involved.
+ *
+ * For example:
+ * OO = Old tree blocks
+ * NN = New tree blocks allocated during balance
+ *
+ * File tree (257) Reloc tree for 257
+ * L2 OO NN
+ * / \ / \
+ * L1 OO OO (a) OO NN (a)
+ * / \ / \ / \ / \
+ * L0 OO OO OO OO OO OO NN NN
+ * (b) (c) (b) (c)
+ *
+ * When calling qgroup_trace_extent_swap(), we will pass:
+ * @src_eb = OO(a)
+ * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
+ * @dst_level = 0
+ * @root_level = 1
+ *
+ * In that case, qgroup_trace_extent_swap() will search from OO(a) to
+ * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
+ *
+ * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
+ *
+ * 1) Tree search from @src_eb
+ * It should acts as a simplified btrfs_search_slot().
+ * The key for search can be extracted from @dst_path->nodes[dst_level]
+ * (first key).
+ *
+ * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
+ * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
+ * They should be marked during preivous (@dst_level = 1) iteration.
+ *
+ * 3) Mark file extents in leaves dirty
+ * We don't have good way to pick out new file extents only.
+ * So we still follow the old method by scanning all file extents in
+ * the leave.
+ *
+ * This function can free us from keeping two pathes, thus later we only need
+ * to care about how to iterate all new tree blocks in reloc tree.
+ */
+static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
+ struct extent_buffer *src_eb,
+ struct btrfs_path *dst_path,
+ int dst_level, int root_level,
+ bool trace_leaf)
+{
+ struct btrfs_key key;
+ struct btrfs_path *src_path;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ u32 nodesize = fs_info->nodesize;
+ int cur_level = root_level;
+ int ret;
+
+ BUG_ON(dst_level > root_level);
+ /* Level mismatch */
+ if (btrfs_header_level(src_eb) != root_level)
+ return -EINVAL;
+
+ src_path = btrfs_alloc_path();
+ if (!src_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (dst_level)
+ btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
+ else
+ btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
+
+ /* For src_path */
+ extent_buffer_get(src_eb);
+ src_path->nodes[root_level] = src_eb;
+ src_path->slots[root_level] = dst_path->slots[root_level];
+ src_path->locks[root_level] = 0;
+
+ /* A simplified version of btrfs_search_slot() */
+ while (cur_level >= dst_level) {
+ struct btrfs_key src_key;
+ struct btrfs_key dst_key;
+
+ if (src_path->nodes[cur_level] == NULL) {
+ struct btrfs_key first_key;
+ struct extent_buffer *eb;
+ int parent_slot;
+ u64 child_gen;
+ u64 child_bytenr;
+
+ eb = src_path->nodes[cur_level + 1];
+ parent_slot = src_path->slots[cur_level + 1];
+ child_bytenr = btrfs_node_blockptr(eb, parent_slot);
+ child_gen = btrfs_node_ptr_generation(eb, parent_slot);
+ btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
+
+ eb = read_tree_block(fs_info, child_bytenr, child_gen,
+ cur_level, &first_key);
+ if (IS_ERR(eb)) {
+ ret = PTR_ERR(eb);
+ goto out;
+ } else if (!extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ ret = -EIO;
+ goto out;
+ }
+
+ src_path->nodes[cur_level] = eb;
+
+ btrfs_tree_read_lock(eb);
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
+ }
+
+ src_path->slots[cur_level] = dst_path->slots[cur_level];
+ if (cur_level) {
+ btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
+ &dst_key, dst_path->slots[cur_level]);
+ btrfs_node_key_to_cpu(src_path->nodes[cur_level],
+ &src_key, src_path->slots[cur_level]);
+ } else {
+ btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
+ &dst_key, dst_path->slots[cur_level]);
+ btrfs_item_key_to_cpu(src_path->nodes[cur_level],
+ &src_key, src_path->slots[cur_level]);
+ }
+ /* Content mismatch, something went wrong */
+ if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
+ ret = -ENOENT;
+ goto out;
+ }
+ cur_level--;
+ }
+
+ /*
+ * Now both @dst_path and @src_path have been populated, record the tree
+ * blocks for qgroup accounting.
+ */
+ ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
+ nodesize, GFP_NOFS);
+ if (ret < 0)
+ goto out;
+ ret = btrfs_qgroup_trace_extent(trans,
+ dst_path->nodes[dst_level]->start,
+ nodesize, GFP_NOFS);
+ if (ret < 0)
+ goto out;
+
+ /* Record leaf file extents */
+ if (dst_level == 0 && trace_leaf) {
+ ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
+ if (ret < 0)
+ goto out;
+ ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
+ }
+out:
+ btrfs_free_path(src_path);
+ return ret;
+}
+
+/*
+ * Helper function to do recursive generation-aware depth-first search, to
+ * locate all new tree blocks in a subtree of reloc tree.
+ *
+ * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
+ * reloc tree
+ * L2 NN (a)
+ * / \
+ * L1 OO NN (b)
+ * / \ / \
+ * L0 OO OO OO NN
+ * (c) (d)
+ * If we pass:
+ * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
+ * @cur_level = 1
+ * @root_level = 1
+ *
+ * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
+ * above tree blocks along with their counter parts in file tree.
+ * While during search, old tree blocsk OO(c) will be skiped as tree block swap
+ * won't affect OO(c).
+ */
+static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
+ struct extent_buffer *src_eb,
+ struct btrfs_path *dst_path,
+ int cur_level, int root_level,
+ u64 last_snapshot, bool trace_leaf)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct extent_buffer *eb;
+ bool need_cleanup = false;
+ int ret = 0;
+ int i;
+
+ /* Level sanity check */
+ if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL ||
+ root_level < 0 || root_level >= BTRFS_MAX_LEVEL ||
+ root_level < cur_level) {
+ btrfs_err_rl(fs_info,
+ "%s: bad levels, cur_level=%d root_level=%d",
+ __func__, cur_level, root_level);
+ return -EUCLEAN;
+ }
+
+ /* Read the tree block if needed */
+ if (dst_path->nodes[cur_level] == NULL) {
+ struct btrfs_key first_key;
+ int parent_slot;
+ u64 child_gen;
+ u64 child_bytenr;
+
+ /*
+ * dst_path->nodes[root_level] must be initialized before
+ * calling this function.
+ */
+ if (cur_level == root_level) {
+ btrfs_err_rl(fs_info,
+ "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
+ __func__, root_level, root_level, cur_level);
+ return -EUCLEAN;
+ }
+
+ /*
+ * We need to get child blockptr/gen from parent before we can
+ * read it.
+ */
+ eb = dst_path->nodes[cur_level + 1];
+ parent_slot = dst_path->slots[cur_level + 1];
+ child_bytenr = btrfs_node_blockptr(eb, parent_slot);
+ child_gen = btrfs_node_ptr_generation(eb, parent_slot);
+ btrfs_node_key_to_cpu(eb, &first_key, parent_slot);
+
+ /* This node is old, no need to trace */
+ if (child_gen < last_snapshot)
+ goto out;
+
+ eb = read_tree_block(fs_info, child_bytenr, child_gen,
+ cur_level, &first_key);
+ if (IS_ERR(eb)) {
+ ret = PTR_ERR(eb);
+ goto out;
+ } else if (!extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ ret = -EIO;
+ goto out;
+ }
+
+ dst_path->nodes[cur_level] = eb;
+ dst_path->slots[cur_level] = 0;
+
+ btrfs_tree_read_lock(eb);
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
+ need_cleanup = true;
+ }
+
+ /* Now record this tree block and its counter part for qgroups */
+ ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
+ root_level, trace_leaf);
+ if (ret < 0)
+ goto cleanup;
+
+ eb = dst_path->nodes[cur_level];
+
+ if (cur_level > 0) {
+ /* Iterate all child tree blocks */
+ for (i = 0; i < btrfs_header_nritems(eb); i++) {
+ /* Skip old tree blocks as they won't be swapped */
+ if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
+ continue;
+ dst_path->slots[cur_level] = i;
+
+ /* Recursive call (at most 7 times) */
+ ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
+ dst_path, cur_level - 1, root_level,
+ last_snapshot, trace_leaf);
+ if (ret < 0)
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ if (need_cleanup) {
+ /* Clean up */
+ btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
+ dst_path->locks[cur_level]);
+ free_extent_buffer(dst_path->nodes[cur_level]);
+ dst_path->nodes[cur_level] = NULL;
+ dst_path->slots[cur_level] = 0;
+ dst_path->locks[cur_level] = 0;
+ }
+out:
+ return ret;
+}
+
+/*
+ * Inform qgroup to trace subtree swap used in balance.
+ *
+ * Unlike btrfs_qgroup_trace_subtree(), this function will only trace
+ * new tree blocks whose generation is equal to (or larger than) @last_snapshot.
+ *
+ * Will go down the tree block pointed by @dst_eb (pointed by @dst_parent and
+ * @dst_slot), and find any tree blocks whose generation is at @last_snapshot,
+ * and then go down @src_eb (pointed by @src_parent and @src_slot) to find
+ * the conterpart of the tree block, then mark both tree blocks as qgroup dirty,
+ * and skip all tree blocks whose generation is smaller than last_snapshot.
+ *
+ * This would skip tons of tree blocks of original btrfs_qgroup_trace_subtree(),
+ * which could be the cause of very slow balance if the file tree is large.
+ *
+ * @src_parent, @src_slot: pointer to src (file tree) eb.
+ * @dst_parent, @dst_slot: pointer to dst (reloc tree) eb.
+ */
+int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *bg_cache,
+ struct extent_buffer *src_parent, int src_slot,
+ struct extent_buffer *dst_parent, int dst_slot,
+ u64 last_snapshot)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_path *dst_path = NULL;
+ struct btrfs_key first_key;
+ struct extent_buffer *src_eb = NULL;
+ struct extent_buffer *dst_eb = NULL;
+ bool trace_leaf = false;
+ u64 child_gen;
+ u64 child_bytenr;
+ int level;
+ int ret;
+
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return 0;
+
+ /* Check parameter order */
+ if (btrfs_node_ptr_generation(src_parent, src_slot) >
+ btrfs_node_ptr_generation(dst_parent, dst_slot)) {
+ btrfs_err_rl(fs_info,
+ "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
+ btrfs_node_ptr_generation(src_parent, src_slot),
+ btrfs_node_ptr_generation(dst_parent, dst_slot));
+ return -EUCLEAN;
+ }
+
+ /*
+ * Only trace leaf if we're relocating data block groups, this could
+ * reduce tons of data extents tracing for meta/sys bg relocation.
+ */
+ if (bg_cache->flags & BTRFS_BLOCK_GROUP_DATA)
+ trace_leaf = true;
+ /* Read out real @src_eb, pointed by @src_parent and @src_slot */
+ child_bytenr = btrfs_node_blockptr(src_parent, src_slot);
+ child_gen = btrfs_node_ptr_generation(src_parent, src_slot);
+ btrfs_node_key_to_cpu(src_parent, &first_key, src_slot);
+
+ src_eb = read_tree_block(fs_info, child_bytenr, child_gen,
+ btrfs_header_level(src_parent) - 1, &first_key);
+ if (IS_ERR(src_eb)) {
+ ret = PTR_ERR(src_eb);
+ goto out;
+ }
+
+ /* Read out real @dst_eb, pointed by @src_parent and @src_slot */
+ child_bytenr = btrfs_node_blockptr(dst_parent, dst_slot);
+ child_gen = btrfs_node_ptr_generation(dst_parent, dst_slot);
+ btrfs_node_key_to_cpu(dst_parent, &first_key, dst_slot);
+
+ dst_eb = read_tree_block(fs_info, child_bytenr, child_gen,
+ btrfs_header_level(dst_parent) - 1, &first_key);
+ if (IS_ERR(dst_eb)) {
+ ret = PTR_ERR(dst_eb);
+ goto out;
+ }
+
+ if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ level = btrfs_header_level(dst_eb);
+ dst_path = btrfs_alloc_path();
+ if (!dst_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* For dst_path */
+ extent_buffer_get(dst_eb);
+ dst_path->nodes[level] = dst_eb;
+ dst_path->slots[level] = 0;
+ dst_path->locks[level] = 0;
+
+ /* Do the generation-aware breadth-first search */
+ ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
+ level, last_snapshot, trace_leaf);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+
+out:
+ free_extent_buffer(src_eb);
+ free_extent_buffer(dst_eb);
+ btrfs_free_path(dst_path);
+ if (ret < 0)
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ return ret;
+}
+
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb,
u64 root_gen, int root_level)
@@ -2132,6 +2543,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
struct btrfs_delayed_ref_root *delayed_refs;
struct ulist *new_roots = NULL;
struct rb_node *node;
+ u64 num_dirty_extents = 0;
u64 qgroup_to_skip;
int ret = 0;
@@ -2141,6 +2553,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
record = rb_entry(node, struct btrfs_qgroup_extent_record,
node);
+ num_dirty_extents++;
trace_btrfs_qgroup_account_extents(fs_info, record);
if (!ret) {
@@ -2186,6 +2599,8 @@ cleanup:
kfree(record);
}
+ trace_qgroup_num_dirty_extents(fs_info, trans->transid,
+ num_dirty_extents);
return ret;
}
@@ -2897,6 +3312,7 @@ qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
qgroup->rfer_cmpr = 0;
qgroup->excl = 0;
qgroup->excl_cmpr = 0;
+ qgroup_dirty(fs_info, qgroup);
}
spin_unlock(&fs_info->qgroup_lock);
}
@@ -3004,7 +3420,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode,
int ret;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
- !is_fstree(root->objectid) || len == 0)
+ !is_fstree(root->root_key.objectid) || len == 0)
return 0;
/* @reserved parameter is mandatory for qgroup */
@@ -3090,7 +3506,7 @@ static int qgroup_free_reserved_data(struct inode *inode,
goto out;
freed += changeset.bytes_changed;
}
- btrfs_qgroup_free_refroot(root->fs_info, root->objectid, freed,
+ btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
BTRFS_QGROUP_RSV_DATA);
ret = freed;
out:
@@ -3106,6 +3522,10 @@ static int __btrfs_qgroup_release_data(struct inode *inode,
int trace_op = QGROUP_RELEASE;
int ret;
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED,
+ &BTRFS_I(inode)->root->fs_info->flags))
+ return 0;
+
/* In release case, we shouldn't have @reserved */
WARN_ON(!free && reserved);
if (free && reserved)
@@ -3122,7 +3542,7 @@ static int __btrfs_qgroup_release_data(struct inode *inode,
changeset.bytes_changed, trace_op);
if (free)
btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
- BTRFS_I(inode)->root->objectid,
+ BTRFS_I(inode)->root->root_key.objectid,
changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
ret = changeset.bytes_changed;
out:
@@ -3215,7 +3635,7 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
int ret;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- !is_fstree(root->objectid) || num_bytes == 0)
+ !is_fstree(root->root_key.objectid) || num_bytes == 0)
return 0;
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
@@ -3240,13 +3660,13 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
struct btrfs_fs_info *fs_info = root->fs_info;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- !is_fstree(root->objectid))
+ !is_fstree(root->root_key.objectid))
return;
/* TODO: Update trace point to handle such free */
trace_qgroup_meta_free_all_pertrans(root);
/* Special value -1 means to free all reserved space */
- btrfs_qgroup_free_refroot(fs_info, root->objectid, (u64)-1,
+ btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
BTRFS_QGROUP_RSV_META_PERTRANS);
}
@@ -3256,7 +3676,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
struct btrfs_fs_info *fs_info = root->fs_info;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- !is_fstree(root->objectid))
+ !is_fstree(root->root_key.objectid))
return;
/*
@@ -3267,7 +3687,8 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
num_bytes = sub_root_meta_rsv(root, num_bytes, type);
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
- btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes, type);
+ btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
+ num_bytes, type);
}
static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
@@ -3321,13 +3742,13 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
struct btrfs_fs_info *fs_info = root->fs_info;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- !is_fstree(root->objectid))
+ !is_fstree(root->root_key.objectid))
return;
/* Same as btrfs_qgroup_free_meta_prealloc() */
num_bytes = sub_root_meta_rsv(root, num_bytes,
BTRFS_QGROUP_RSV_META_PREALLOC);
trace_qgroup_meta_convert(root, num_bytes);
- qgroup_convert_meta(fs_info, root->objectid, num_bytes);
+ qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
}
/*
@@ -3354,7 +3775,7 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
inode->i_ino, unode->val, unode->aux);
}
btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
- BTRFS_I(inode)->root->objectid,
+ BTRFS_I(inode)->root->root_key.objectid,
changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 54b8bb282c0e..d8f78f5ab854 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -236,6 +236,12 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb,
u64 root_gen, int root_level);
+
+int btrfs_qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group_cache *bg_cache,
+ struct extent_buffer *src_parent, int src_slot,
+ struct extent_buffer *dst_parent, int dst_slot,
+ u64 last_snapshot);
int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
u64 num_bytes, struct ulist *old_roots,
struct ulist *new_roots);
@@ -249,6 +255,8 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
static inline void btrfs_qgroup_free_delayed_ref(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes)
{
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return;
trace_btrfs_qgroup_free_delayed_ref(fs_info, ref_root, num_bytes);
btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes,
BTRFS_QGROUP_RSV_DATA);
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index e5b9e596bb92..d69fbfb30aa9 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -732,7 +732,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
INIT_LIST_HEAD(&ra->list);
ra->action = action;
- ra->root = root->objectid;
+ ra->root = root->root_key.objectid;
/*
* This is an allocation, preallocate the block_entry in case we haven't
@@ -787,8 +787,8 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
* one we want to lookup below when we modify the
* re->num_refs.
*/
- ref_root = root->objectid;
- re->root_objectid = root->objectid;
+ ref_root = root->root_key.objectid;
+ re->root_objectid = root->root_key.objectid;
re->num_refs = 0;
}
@@ -862,7 +862,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
* didn't thik of some other corner case.
*/
btrfs_err(fs_info, "failed to find root %llu for %llu",
- root->objectid, be->bytenr);
+ root->root_key.objectid, be->bytenr);
dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra);
kfree(ra);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 8783a1776540..924116f654a1 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -648,8 +648,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
int level, u64 bytenr)
{
struct backref_cache *cache = &rc->backref_cache;
- struct btrfs_path *path1;
- struct btrfs_path *path2;
+ struct btrfs_path *path1; /* For searching extent root */
+ struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */
struct extent_buffer *eb;
struct btrfs_root *root;
struct backref_node *cur;
@@ -662,7 +662,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
struct btrfs_key key;
unsigned long end;
unsigned long ptr;
- LIST_HEAD(list);
+ LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */
LIST_HEAD(useless);
int cowonly;
int ret;
@@ -778,6 +778,10 @@ again:
key.type != BTRFS_SHARED_BLOCK_REF_KEY);
}
+ /*
+ * Parent node found and matches current inline ref, no need to
+ * rebuild this node for this inline ref.
+ */
if (exist &&
((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
exist->owner == key.offset) ||
@@ -787,11 +791,12 @@ again:
goto next;
}
+ /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
if (key.objectid == key.offset) {
/*
- * only root blocks of reloc trees use
- * backref of this type.
+ * Only root blocks of reloc trees use backref
+ * pointing to itself.
*/
root = find_reloc_root(rc, cur->bytenr);
ASSERT(root);
@@ -840,7 +845,11 @@ again:
goto next;
}
- /* key.type == BTRFS_TREE_BLOCK_REF_KEY */
+ /*
+ * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
+ * means the root objectid. We need to search the tree to get
+ * its parent bytenr.
+ */
root = read_fs_root(rc->extent_root->fs_info, key.offset);
if (IS_ERR(root)) {
err = PTR_ERR(root);
@@ -863,10 +872,7 @@ again:
level = cur->level + 1;
- /*
- * searching the tree to find upper level blocks
- * reference the block.
- */
+ /* Search the tree to find parent blocks referring the block. */
path2->search_commit_root = 1;
path2->skip_locking = 1;
path2->lowest_level = level;
@@ -884,7 +890,8 @@ again:
cur->bytenr) {
btrfs_err(root->fs_info,
"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
- cur->bytenr, level - 1, root->objectid,
+ cur->bytenr, level - 1,
+ root->root_key.objectid,
node_key->objectid, node_key->type,
node_key->offset);
err = -ENOENT;
@@ -892,6 +899,8 @@ again:
}
lower = cur;
need_check = true;
+
+ /* Add all nodes and edges in the path */
for (; level < BTRFS_MAX_LEVEL; level++) {
if (!path2->nodes[level]) {
ASSERT(btrfs_root_bytenr(&root->root_item) ==
@@ -1281,7 +1290,7 @@ static void __del_reloc_root(struct btrfs_root *root)
struct mapping_node *node = NULL;
struct reloc_control *rc = fs_info->reloc_ctl;
- if (rc) {
+ if (rc && root->node) {
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
root->node->start);
@@ -1735,7 +1744,7 @@ int memcmp_node_keys(struct extent_buffer *eb, int slot,
* errors, a negative error number is returned.
*/
static noinline_for_stack
-int replace_path(struct btrfs_trans_handle *trans,
+int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
struct btrfs_root *dest, struct btrfs_root *src,
struct btrfs_path *path, struct btrfs_key *next_key,
int lowest_level, int max_level)
@@ -1879,14 +1888,9 @@ again:
* and tree block numbers, if current trans doesn't free
* data reloc tree inode.
*/
- ret = btrfs_qgroup_trace_subtree(trans, parent,
- btrfs_header_generation(parent),
- btrfs_header_level(parent));
- if (ret < 0)
- break;
- ret = btrfs_qgroup_trace_subtree(trans, path->nodes[level],
- btrfs_header_generation(path->nodes[level]),
- btrfs_header_level(path->nodes[level]));
+ ret = btrfs_qgroup_trace_subtree_swap(trans, rc->block_group,
+ parent, slot, path->nodes[level],
+ path->slots[level], last_snapshot);
if (ret < 0)
break;
@@ -2205,7 +2209,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
ret = 0;
} else {
- ret = replace_path(trans, root, reloc_root, path,
+ ret = replace_path(trans, rc, root, reloc_root, path,
&next_key, level, max_level);
}
if (ret < 0) {
@@ -2911,7 +2915,6 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info,
free_extent_buffer(eb);
return -EIO;
}
- WARN_ON(btrfs_header_level(eb) != block->level);
if (block->level == 0)
btrfs_item_key_to_cpu(eb, &block->key, 0);
else
@@ -2987,7 +2990,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
struct backref_node *node;
struct btrfs_path *path;
struct tree_block *block;
- struct rb_node *rb_node;
+ struct tree_block *next;
int ret;
int err = 0;
@@ -2997,29 +3000,23 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
goto out_free_blocks;
}
- rb_node = rb_first(blocks);
- while (rb_node) {
- block = rb_entry(rb_node, struct tree_block, rb_node);
+ /* Kick in readahead for tree blocks with missing keys */
+ rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
if (!block->key_ready)
readahead_tree_block(fs_info, block->bytenr);
- rb_node = rb_next(rb_node);
}
- rb_node = rb_first(blocks);
- while (rb_node) {
- block = rb_entry(rb_node, struct tree_block, rb_node);
+ /* Get first keys */
+ rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
if (!block->key_ready) {
err = get_tree_block_key(fs_info, block);
if (err)
goto out_free_path;
}
- rb_node = rb_next(rb_node);
}
- rb_node = rb_first(blocks);
- while (rb_node) {
- block = rb_entry(rb_node, struct tree_block, rb_node);
-
+ /* Do tree relocation */
+ rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
node = build_backref_tree(rc, &block->key,
block->level, block->bytenr);
if (IS_ERR(node)) {
@@ -3030,11 +3027,10 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
ret = relocate_tree_block(trans, rc, node, &block->key,
path);
if (ret < 0) {
- if (ret != -EAGAIN || rb_node == rb_first(blocks))
+ if (ret != -EAGAIN || &block->rb_node == rb_first(blocks))
err = ret;
goto out;
}
- rb_node = rb_next(rb_node);
}
out:
err = finish_pending_nodes(trans, rc, path, err);
@@ -4669,7 +4665,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
if (rc->merge_reloc_tree) {
ret = btrfs_block_rsv_migrate(&pending->block_rsv,
rc->block_rsv,
- rc->nodes_relocated, 1);
+ rc->nodes_relocated, true);
if (ret)
return ret;
}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 3be1456b5116..902819d3cf41 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1124,7 +1124,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
if (scrub_write_page_to_dev_replace(sblock_other,
page_num) != 0) {
- btrfs_dev_replace_stats_inc(
+ atomic64_inc(
&fs_info->dev_replace.num_write_errors);
success = 0;
}
@@ -1564,8 +1564,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
if (btrfsic_submit_bio_wait(bio)) {
btrfs_dev_stat_inc_and_print(page_bad->dev,
BTRFS_DEV_STAT_WRITE_ERRS);
- btrfs_dev_replace_stats_inc(
- &fs_info->dev_replace.num_write_errors);
+ atomic64_inc(&fs_info->dev_replace.num_write_errors);
bio_put(bio);
return -EIO;
}
@@ -1592,8 +1591,7 @@ static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
ret = scrub_write_page_to_dev_replace(sblock, page_num);
if (ret)
- btrfs_dev_replace_stats_inc(
- &fs_info->dev_replace.num_write_errors);
+ atomic64_inc(&fs_info->dev_replace.num_write_errors);
}
}
@@ -1726,8 +1724,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
struct scrub_page *spage = sbio->pagev[i];
spage->io_error = 1;
- btrfs_dev_replace_stats_inc(&dev_replace->
- num_write_errors);
+ atomic64_inc(&dev_replace->num_write_errors);
}
}
@@ -3022,8 +3019,7 @@ out:
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct map_lookup *map,
struct btrfs_device *scrub_dev,
- int num, u64 base, u64 length,
- int is_dev_replace)
+ int num, u64 base, u64 length)
{
struct btrfs_path *path, *ppath;
struct btrfs_fs_info *fs_info = sctx->fs_info;
@@ -3299,7 +3295,7 @@ again:
extent_physical = extent_logical - logical + physical;
extent_dev = scrub_dev;
extent_mirror_num = mirror_num;
- if (is_dev_replace)
+ if (sctx->is_dev_replace)
scrub_remap_extent(fs_info, extent_logical,
extent_len, &extent_physical,
&extent_dev,
@@ -3397,8 +3393,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev,
u64 chunk_offset, u64 length,
u64 dev_offset,
- struct btrfs_block_group_cache *cache,
- int is_dev_replace)
+ struct btrfs_block_group_cache *cache)
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
@@ -3435,8 +3430,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
map->stripes[i].physical == dev_offset) {
ret = scrub_stripe(sctx, map, scrub_dev, i,
- chunk_offset, length,
- is_dev_replace);
+ chunk_offset, length);
if (ret)
goto out;
}
@@ -3449,8 +3443,7 @@ out:
static noinline_for_stack
int scrub_enumerate_chunks(struct scrub_ctx *sctx,
- struct btrfs_device *scrub_dev, u64 start, u64 end,
- int is_dev_replace)
+ struct btrfs_device *scrub_dev, u64 start, u64 end)
{
struct btrfs_dev_extent *dev_extent = NULL;
struct btrfs_path *path;
@@ -3544,7 +3537,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
*/
scrub_pause_on(fs_info);
ret = btrfs_inc_block_group_ro(cache);
- if (!ret && is_dev_replace) {
+ if (!ret && sctx->is_dev_replace) {
/*
* If we are doing a device replace wait for any tasks
* that started dellaloc right before we set the block
@@ -3609,7 +3602,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
dev_replace->item_needs_writeback = 1;
btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
- found_key.offset, cache, is_dev_replace);
+ found_key.offset, cache);
/*
* flush, submit all pending read and write bios, afterwards
@@ -3670,7 +3663,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
btrfs_put_block_group(cache);
if (ret)
break;
- if (is_dev_replace &&
+ if (sctx->is_dev_replace &&
atomic64_read(&dev_replace->num_write_errors) > 0) {
ret = -EIO;
break;
@@ -3893,8 +3886,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
}
if (!ret)
- ret = scrub_enumerate_chunks(sctx, dev, start, end,
- is_dev_replace);
+ ret = scrub_enumerate_chunks(sctx, dev, start, end);
wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
atomic_dec(&fs_info->scrubs_running);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index ba8950bfd9c7..094cc1444a90 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1186,9 +1186,9 @@ static int __clone_root_cmp_bsearch(const void *key, const void *elt)
u64 root = (u64)(uintptr_t)key;
struct clone_root *cr = (struct clone_root *)elt;
- if (root < cr->root->objectid)
+ if (root < cr->root->root_key.objectid)
return -1;
- if (root > cr->root->objectid)
+ if (root > cr->root->root_key.objectid)
return 1;
return 0;
}
@@ -1198,9 +1198,9 @@ static int __clone_root_cmp_sort(const void *e1, const void *e2)
struct clone_root *cr1 = (struct clone_root *)e1;
struct clone_root *cr2 = (struct clone_root *)e2;
- if (cr1->root->objectid < cr2->root->objectid)
+ if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
return -1;
- if (cr1->root->objectid > cr2->root->objectid)
+ if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
return 1;
return 0;
}
@@ -1693,12 +1693,8 @@ static int lookup_dir_item_inode(struct btrfs_root *root,
di = btrfs_lookup_dir_item(NULL, root, path,
dir, name, name_len, 0);
- if (!di) {
- ret = -ENOENT;
- goto out;
- }
- if (IS_ERR(di)) {
- ret = PTR_ERR(di);
+ if (IS_ERR_OR_NULL(di)) {
+ ret = di ? PTR_ERR(di) : -ENOENT;
goto out;
}
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
@@ -2346,7 +2342,7 @@ static int send_subvol_begin(struct send_ctx *sctx)
return -ENOMEM;
}
- key.objectid = send_root->objectid;
+ key.objectid = send_root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = 0;
@@ -2362,7 +2358,7 @@ static int send_subvol_begin(struct send_ctx *sctx)
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.type != BTRFS_ROOT_BACKREF_KEY ||
- key.objectid != send_root->objectid) {
+ key.objectid != send_root->root_key.objectid) {
ret = -ENOENT;
goto out;
}
@@ -4907,8 +4903,8 @@ static int send_clone(struct send_ctx *sctx,
btrfs_debug(sctx->send_root->fs_info,
"send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
- offset, len, clone_root->root->objectid, clone_root->ino,
- clone_root->offset);
+ offset, len, clone_root->root->root_key.objectid,
+ clone_root->ino, clone_root->offset);
p = fs_path_alloc();
if (!p)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 6601c9aa5e35..b362b45dd757 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2177,8 +2177,10 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
/* Mask in the root object ID too, to disambiguate subvols */
- buf->f_fsid.val[0] ^= BTRFS_I(d_inode(dentry))->root->objectid >> 32;
- buf->f_fsid.val[1] ^= BTRFS_I(d_inode(dentry))->root->objectid;
+ buf->f_fsid.val[0] ^=
+ BTRFS_I(d_inode(dentry))->root->root_key.objectid >> 32;
+ buf->f_fsid.val[1] ^=
+ BTRFS_I(d_inode(dentry))->root->root_key.objectid;
return 0;
}
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index d9269a531a4d..9e0f4a01be14 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -106,7 +106,7 @@ static int test_find_delalloc(u32 sectorsize)
set_extent_delalloc(&tmp, 0, sectorsize - 1, 0, NULL);
start = 0;
end = 0;
- found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (!found) {
test_err("should have found at least one delalloc");
@@ -137,7 +137,7 @@ static int test_find_delalloc(u32 sectorsize)
set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, 0, NULL);
start = test_start;
end = 0;
- found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (!found) {
test_err("couldn't find delalloc in our range");
@@ -171,7 +171,7 @@ static int test_find_delalloc(u32 sectorsize)
}
start = test_start;
end = 0;
- found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (found) {
test_err("found range when we shouldn't have");
@@ -192,7 +192,7 @@ static int test_find_delalloc(u32 sectorsize)
set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, 0, NULL);
start = test_start;
end = 0;
- found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (!found) {
test_err("didn't find our range");
@@ -233,7 +233,7 @@ static int test_find_delalloc(u32 sectorsize)
* this changes at any point in the future we will need to fix this
* tests expected behavior.
*/
- found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (!found) {
test_err("didn't find our range");
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index 385a5316e4bf..bf15d3a7f20e 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -12,8 +12,8 @@ static void free_extent_map_tree(struct extent_map_tree *em_tree)
struct extent_map *em;
struct rb_node *node;
- while (!RB_EMPTY_ROOT(&em_tree->map)) {
- node = rb_first(&em_tree->map);
+ while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) {
+ node = rb_first_cached(&em_tree->map);
em = rb_entry(node, struct extent_map, rb_node);
remove_extent_mapping(em_tree, em);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 3b84f5015029..5686290a50e1 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -44,7 +44,8 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
WARN_ON(refcount_read(&transaction->use_count) == 0);
if (refcount_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
- WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
+ WARN_ON(!RB_EMPTY_ROOT(
+ &transaction->delayed_refs.href_root.rb_root));
if (transaction->delayed_refs.pending_csums)
btrfs_err(transaction->fs_info,
"pending csums is %llu",
@@ -118,7 +119,7 @@ static noinline void switch_commit_roots(struct btrfs_transaction *trans)
list_del_init(&root->dirty_list);
free_extent_buffer(root->commit_root);
root->commit_root = btrfs_root_node(root);
- if (is_fstree(root->objectid))
+ if (is_fstree(root->root_key.objectid))
btrfs_unpin_free_ino(root);
clear_btree_io_tree(&root->dirty_log_pages);
}
@@ -245,7 +246,7 @@ loop:
memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
- cur_trans->delayed_refs.href_root = RB_ROOT;
+ cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
atomic_set(&cur_trans->delayed_refs.num_entries, 0);
@@ -759,7 +760,7 @@ static int should_end_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- if (btrfs_check_space_for_delayed_refs(trans, fs_info))
+ if (btrfs_check_space_for_delayed_refs(trans))
return 1;
return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
@@ -834,7 +835,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
trans->delayed_ref_updates = 0;
if (!trans->sync) {
must_run_delayed_refs =
- btrfs_should_throttle_delayed_refs(trans, info);
+ btrfs_should_throttle_delayed_refs(trans);
cur = max_t(unsigned long, cur, 32);
/*
@@ -1197,7 +1198,10 @@ again:
list_add_tail(&fs_info->extent_root->dirty_list,
&trans->transaction->switch_commits);
- btrfs_after_dev_replace_commit(fs_info);
+
+ /* Update dev-replace pointer once everything is committed */
+ fs_info->dev_replace.committed_cursor_left =
+ fs_info->dev_replace.cursor_left_last_write_of_item;
return 0;
}
@@ -1613,10 +1617,9 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
if (ret < 0)
goto fail;
- ret = btrfs_insert_dir_item(trans, parent_root,
- dentry->d_name.name, dentry->d_name.len,
- BTRFS_I(parent_inode), &key,
- BTRFS_FT_DIR, index);
+ ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
+ dentry->d_name.len, BTRFS_I(parent_inode),
+ &key, BTRFS_FT_DIR, index);
/* We have check then name at the beginning, so it is impossible. */
BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
if (ret) {
@@ -1929,6 +1932,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
return ret;
}
+ btrfs_trans_release_metadata(trans);
+ trans->block_rsv = NULL;
+
/* make a pass through all the delayed refs we have so far
* any runnings procs may add more while we are here
*/
@@ -1938,9 +1944,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
return ret;
}
- btrfs_trans_release_metadata(trans);
- trans->block_rsv = NULL;
-
cur_trans = trans->transaction;
/*
@@ -2330,7 +2333,7 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
list_del_init(&root->root_list);
spin_unlock(&fs_info->trans_lock);
- btrfs_debug(fs_info, "cleaner removing %llu", root->objectid);
+ btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
btrfs_kill_all_delayed_nodes(root);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index db835635372f..cab0b1f1f741 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -487,6 +487,13 @@ static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf,
u32 nritems = btrfs_header_nritems(leaf);
int slot;
+ if (btrfs_header_level(leaf) != 0) {
+ generic_err(fs_info, leaf, 0,
+ "invalid level for leaf, have %d expect 0",
+ btrfs_header_level(leaf));
+ return -EUCLEAN;
+ }
+
/*
* Extent buffers from a relocation tree have a owner field that
* corresponds to the subvolume tree they are based on. So just from an
@@ -645,9 +652,16 @@ int btrfs_check_node(struct btrfs_fs_info *fs_info, struct extent_buffer *node)
unsigned long nr = btrfs_header_nritems(node);
struct btrfs_key key, next_key;
int slot;
+ int level = btrfs_header_level(node);
u64 bytenr;
int ret = 0;
+ if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
+ generic_err(fs_info, node, 0,
+ "invalid level for node, have %d expect [1, %d]",
+ level, BTRFS_MAX_LEVEL - 1);
+ return -EUCLEAN;
+ }
if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) {
btrfs_crit(fs_info,
"corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 3c2ae0e4f25a..0dba09334a16 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -205,14 +205,11 @@ static int join_running_log_trans(struct btrfs_root *root)
* until you call btrfs_end_log_trans() or it makes any future
* log transactions wait until you call btrfs_end_log_trans()
*/
-int btrfs_pin_log_trans(struct btrfs_root *root)
+void btrfs_pin_log_trans(struct btrfs_root *root)
{
- int ret = -ENOENT;
-
mutex_lock(&root->log_mutex);
atomic_inc(&root->log_writers);
mutex_unlock(&root->log_mutex);
- return ret;
}
/*
@@ -258,6 +255,13 @@ struct walk_control {
/* what stage of the replay code we're currently in */
int stage;
+ /*
+ * Ignore any items from the inode currently being processed. Needs
+ * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
+ * the LOG_WALK_REPLAY_INODES stage.
+ */
+ bool ignore_cur_inode;
+
/* the root we are currently replaying */
struct btrfs_root *replay_dest;
@@ -2487,6 +2491,20 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
inode_item = btrfs_item_ptr(eb, i,
struct btrfs_inode_item);
+ /*
+ * If we have a tmpfile (O_TMPFILE) that got fsync'ed
+ * and never got linked before the fsync, skip it, as
+ * replaying it is pointless since it would be deleted
+ * later. We skip logging tmpfiles, but it's always
+ * possible we are replaying a log created with a kernel
+ * that used to log tmpfiles.
+ */
+ if (btrfs_inode_nlink(eb, inode_item) == 0) {
+ wc->ignore_cur_inode = true;
+ continue;
+ } else {
+ wc->ignore_cur_inode = false;
+ }
ret = replay_xattr_deletes(wc->trans, root, log,
path, key.objectid);
if (ret)
@@ -2524,16 +2542,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
root->fs_info->sectorsize);
ret = btrfs_drop_extents(wc->trans, root, inode,
from, (u64)-1, 1);
- /*
- * If the nlink count is zero here, the iput
- * will free the inode. We bump it to make
- * sure it doesn't get freed until the link
- * count fixup is done.
- */
if (!ret) {
- if (inode->i_nlink == 0)
- inc_nlink(inode);
- /* Update link count and nbytes. */
+ /* Update the inode's nbytes. */
ret = btrfs_update_inode(wc->trans,
root, inode);
}
@@ -2548,6 +2558,9 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
break;
}
+ if (wc->ignore_cur_inode)
+ continue;
+
if (key.type == BTRFS_DIR_INDEX_KEY &&
wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
ret = replay_one_dir_item(wc->trans, root, path,
@@ -3196,9 +3209,12 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
};
ret = walk_log_tree(trans, log, &wc);
- /* I don't think this can happen but just in case */
- if (ret)
- btrfs_abort_transaction(trans, ret);
+ if (ret) {
+ if (trans)
+ btrfs_abort_transaction(trans, ret);
+ else
+ btrfs_handle_fs_error(log->fs_info, ret, NULL);
+ }
while (1) {
ret = find_first_extent_bit(&log->dirty_log_pages,
@@ -5564,9 +5580,33 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
dir_inode = btrfs_iget(fs_info->sb, &inode_key,
root, NULL);
- /* If parent inode was deleted, skip it. */
- if (IS_ERR(dir_inode))
- continue;
+ /*
+ * If the parent inode was deleted, return an error to
+ * fallback to a transaction commit. This is to prevent
+ * getting an inode that was moved from one parent A to
+ * a parent B, got its former parent A deleted and then
+ * it got fsync'ed, from existing at both parents after
+ * a log replay (and the old parent still existing).
+ * Example:
+ *
+ * mkdir /mnt/A
+ * mkdir /mnt/B
+ * touch /mnt/B/bar
+ * sync
+ * mv /mnt/B/bar /mnt/A/bar
+ * mv -T /mnt/A /mnt/B
+ * fsync /mnt/B/bar
+ * <power fail>
+ *
+ * If we ignore the old parent B which got deleted,
+ * after a log replay we would have file bar linked
+ * at both parents and the old parent B would still
+ * exist.
+ */
+ if (IS_ERR(dir_inode)) {
+ ret = PTR_ERR(dir_inode);
+ goto out;
+ }
if (ctx)
ctx->log_new_dentries = false;
@@ -5640,7 +5680,13 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
if (ret)
goto end_no_trans;
- if (btrfs_inode_in_log(inode, trans->transid)) {
+ /*
+ * Skip already logged inodes or inodes corresponding to tmpfiles
+ * (since logging them is pointless, a link count of 0 means they
+ * will never be accessible).
+ */
+ if (btrfs_inode_in_log(inode, trans->transid) ||
+ inode->vfs_inode.i_nlink == 0) {
ret = BTRFS_NO_LOG_SYNC;
goto end_no_trans;
}
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 7ab9bb88a639..767765031e59 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -65,7 +65,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
const char *name, int name_len,
struct btrfs_inode *inode, u64 dirid);
void btrfs_end_log_trans(struct btrfs_root *root);
-int btrfs_pin_log_trans(struct btrfs_root *root);
+void btrfs_pin_log_trans(struct btrfs_root *root);
void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
struct btrfs_inode *dir, struct btrfs_inode *inode,
int for_rename);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f4405e430da6..f435d397019e 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1613,7 +1613,7 @@ static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
em_tree = &fs_info->mapping_tree.map_tree;
read_lock(&em_tree->lock);
- n = rb_last(&em_tree->map);
+ n = rb_last(&em_tree->map.rb_root);
if (n) {
em = rb_entry(n, struct extent_map, rb_node);
ret = em->start + em->len;
@@ -1854,6 +1854,24 @@ void btrfs_assign_next_active_device(struct btrfs_device *device,
fs_info->fs_devices->latest_bdev = next_device->bdev;
}
+/*
+ * Return btrfs_fs_devices::num_devices excluding the device that's being
+ * currently replaced.
+ */
+static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
+{
+ u64 num_devices = fs_info->fs_devices->num_devices;
+
+ btrfs_dev_replace_read_lock(&fs_info->dev_replace);
+ if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
+ ASSERT(num_devices > 1);
+ num_devices--;
+ }
+ btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
+
+ return num_devices;
+}
+
int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
u64 devid)
{
@@ -1865,22 +1883,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
mutex_lock(&uuid_mutex);
- num_devices = fs_devices->num_devices;
- btrfs_dev_replace_read_lock(&fs_info->dev_replace);
- if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
- WARN_ON(num_devices < 1);
- num_devices--;
- }
- btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
+ num_devices = btrfs_num_devices(fs_info);
ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
if (ret)
goto out;
- ret = btrfs_find_device_by_devspec(fs_info, devid, device_path,
- &device);
- if (ret)
+ device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
+
+ if (IS_ERR(device)) {
+ if (PTR_ERR(device) == -ENOENT &&
+ strcmp(device_path, "missing") == 0)
+ ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
+ else
+ ret = PTR_ERR(device);
goto out;
+ }
if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = BTRFS_ERROR_DEV_TGT_REPLACE;
@@ -2096,9 +2114,8 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
call_rcu(&tgtdev->rcu, free_device_rcu);
}
-static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
- const char *device_path,
- struct btrfs_device **device)
+static struct btrfs_device *btrfs_find_device_by_path(
+ struct btrfs_fs_info *fs_info, const char *device_path)
{
int ret = 0;
struct btrfs_super_block *disk_super;
@@ -2106,28 +2123,27 @@ static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
u8 *dev_uuid;
struct block_device *bdev;
struct buffer_head *bh;
+ struct btrfs_device *device;
- *device = NULL;
ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
fs_info->bdev_holder, 0, &bdev, &bh);
if (ret)
- return ret;
+ return ERR_PTR(ret);
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_uuid = disk_super->dev_item.uuid;
- *device = btrfs_find_device(fs_info, devid, dev_uuid, disk_super->fsid);
+ device = btrfs_find_device(fs_info, devid, dev_uuid, disk_super->fsid);
brelse(bh);
- if (!*device)
- ret = -ENOENT;
+ if (!device)
+ device = ERR_PTR(-ENOENT);
blkdev_put(bdev, FMODE_READ);
- return ret;
+ return device;
}
-int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
- const char *device_path,
- struct btrfs_device **device)
+static struct btrfs_device *btrfs_find_device_missing_or_by_path(
+ struct btrfs_fs_info *fs_info, const char *device_path)
{
- *device = NULL;
+ struct btrfs_device *device = NULL;
if (strcmp(device_path, "missing") == 0) {
struct list_head *devices;
struct btrfs_device *tmp;
@@ -2136,42 +2152,38 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
list_for_each_entry(tmp, devices, dev_list) {
if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
&tmp->dev_state) && !tmp->bdev) {
- *device = tmp;
+ device = tmp;
break;
}
}
- if (!*device)
- return BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
-
- return 0;
+ if (!device)
+ return ERR_PTR(-ENOENT);
} else {
- return btrfs_find_device_by_path(fs_info, device_path, device);
+ device = btrfs_find_device_by_path(fs_info, device_path);
}
+
+ return device;
}
/*
* Lookup a device given by device id, or the path if the id is 0.
*/
-int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
- const char *devpath,
- struct btrfs_device **device)
+struct btrfs_device *btrfs_find_device_by_devspec(
+ struct btrfs_fs_info *fs_info, u64 devid, const char *devpath)
{
- int ret;
+ struct btrfs_device *device;
if (devid) {
- ret = 0;
- *device = btrfs_find_device(fs_info, devid, NULL, NULL);
- if (!*device)
- ret = -ENOENT;
+ device = btrfs_find_device(fs_info, devid, NULL, NULL);
+ if (!device)
+ return ERR_PTR(-ENOENT);
} else {
if (!devpath || !devpath[0])
- return -EINVAL;
-
- ret = btrfs_find_device_missing_or_by_path(fs_info, devpath,
- device);
+ return ERR_PTR(-EINVAL);
+ device = btrfs_find_device_missing_or_by_path(fs_info, devpath);
}
- return ret;
+ return device;
}
/*
@@ -3679,7 +3691,7 @@ static int alloc_profile_is_valid(u64 flags, int extended)
return !extended; /* "0" is valid for usual profiles */
/* true if exactly one bit set */
- return (flags & (flags - 1)) == 0;
+ return is_power_of_2(flags);
}
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
@@ -3740,13 +3752,8 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
}
}
- num_devices = fs_info->fs_devices->num_devices;
- btrfs_dev_replace_read_lock(&fs_info->dev_replace);
- if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
- BUG_ON(num_devices < 1);
- num_devices--;
- }
- btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
+ num_devices = btrfs_num_devices(fs_info);
+
allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
if (num_devices > 1)
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
@@ -5897,7 +5904,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
}
out:
if (dev_replace_is_ongoing) {
- btrfs_dev_replace_clear_lock_blocking(dev_replace);
+ ASSERT(atomic_read(&dev_replace->blocking_readers) > 0);
+ btrfs_dev_replace_read_lock(dev_replace);
+ /* Barrier implied by atomic_dec_and_test */
+ if (atomic_dec_and_test(&dev_replace->blocking_readers))
+ cond_wake_up_nomb(&dev_replace->read_lock_wq);
btrfs_dev_replace_read_unlock(dev_replace);
}
free_extent_map(em);
@@ -7438,7 +7449,7 @@ static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
int ret = 0;
read_lock(&em_tree->lock);
- for (node = rb_first(&em_tree->map); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
em = rb_entry(node, struct extent_map, rb_node);
if (em->map_lookup->num_stripes !=
em->map_lookup->verified_stripes) {
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 23e9285d88de..aefce895e994 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -410,12 +410,9 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step);
void btrfs_assign_next_active_device(struct btrfs_device *device,
struct btrfs_device *this_dev);
-int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
- const char *device_path,
- struct btrfs_device **device);
-int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
- const char *devpath,
- struct btrfs_device **device);
+struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info,
+ u64 devid,
+ const char *devpath);
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
const u64 *devid,
const u8 *uuid);
diff --git a/fs/buffer.c b/fs/buffer.c
index 6f1ae3ac9789..109f55196866 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3060,11 +3060,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
*/
bio = bio_alloc(GFP_NOIO, 1);
- if (wbc) {
- wbc_init_bio(wbc, bio);
- wbc_account_io(wbc, bh->b_page, bh->b_size);
- }
-
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_write_hint = write_hint;
@@ -3084,6 +3079,11 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
op_flags |= REQ_PRIO;
bio_set_op_attrs(bio, op, op_flags);
+ if (wbc) {
+ wbc_init_bio(wbc, bio);
+ wbc_account_io(wbc, bh->b_page, bh->b_size);
+ }
+
submit_bio(bio);
return 0;
}
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index af2b17b21b94..95983c744164 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -343,7 +343,7 @@ try_again:
trap = lock_rename(cache->graveyard, dir);
/* do some checks before getting the grave dentry */
- if (rep->d_parent != dir) {
+ if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
/* the entry was probably culled when we dropped the parent dir
* lock */
unlock_rename(cache->graveyard, dir);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 0c9ab62c3df4..9dcaed031843 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1553,6 +1553,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
/* Flags */
#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
+#define MID_DELETED 2 /* Mid has been dequeued/deleted */
/* Types of response buffer returned from SendReceive2 */
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 7aa08dba4719..52d71b64c0c6 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -659,7 +659,15 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
mid->mid_state = MID_RESPONSE_RECEIVED;
else
mid->mid_state = MID_RESPONSE_MALFORMED;
- list_del_init(&mid->qhead);
+ /*
+ * Trying to handle/dequeue a mid after the send_recv()
+ * function has finished processing it is a bug.
+ */
+ if (mid->mid_flags & MID_DELETED)
+ printk_once(KERN_WARNING
+ "trying to dequeue a deleted mid\n");
+ else
+ list_del_init(&mid->qhead);
spin_unlock(&GlobalMid_Lock);
}
@@ -938,8 +946,7 @@ next_pdu:
} else {
mids[0] = server->ops->find_mid(server, buf);
bufs[0] = buf;
- if (mids[0])
- num_mids = 1;
+ num_mids = 1;
if (!mids[0] || !mids[0]->receive)
length = standard_receive3(server, mids[0]);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index d954ce36b473..89985a0a6819 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1477,7 +1477,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
}
srch_inf->entries_in_buffer = 0;
- srch_inf->index_of_last_entry = 0;
+ srch_inf->index_of_last_entry = 2;
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
fid->volatile_fid, 0, srch_inf);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 78f96fa3d7d9..b48f43963da6 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -142,7 +142,8 @@ void
cifs_delete_mid(struct mid_q_entry *mid)
{
spin_lock(&GlobalMid_Lock);
- list_del(&mid->qhead);
+ list_del_init(&mid->qhead);
+ mid->mid_flags |= MID_DELETED;
spin_unlock(&GlobalMid_Lock);
DeleteMidQEntry(mid);
@@ -772,6 +773,11 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
return mid;
}
+static void
+cifs_noop_callback(struct mid_q_entry *mid)
+{
+}
+
int
compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
const int flags, const int num_rqst, struct smb_rqst *rqst,
@@ -826,8 +832,13 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
}
midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
+ /*
+ * We don't invoke the callback compounds unless it is the last
+ * request.
+ */
+ if (i < num_rqst - 1)
+ midQ[i]->callback = cifs_noop_callback;
}
-
cifs_in_send_inc(ses->server);
rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
cifs_in_send_dec(ses->server);
@@ -908,6 +919,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
midQ[i]->resp_buf = NULL;
}
out:
+ /*
+ * This will dequeue all mids. After this it is important that the
+ * demultiplex_thread will not process any of these mids any futher.
+ * This is prevented above by using a noop callback that will not
+ * wake this thread except for the very last PDU.
+ */
for (i = 0; i < num_rqst; i++)
cifs_delete_mid(midQ[i]);
add_credits(ses->server, credits, optype);
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index 504b3c3539dc..15f6e96b3bd9 100644
--- a/fs/compat_binfmt_elf.c
+++ b/fs/compat_binfmt_elf.c
@@ -52,7 +52,7 @@
#define elf_prpsinfo compat_elf_prpsinfo
#undef ns_to_timeval
-#define ns_to_timeval ns_to_compat_timeval
+#define ns_to_timeval ns_to_old_timeval32
/*
* To use this file, asm/elf.h must define compat_elf_check_arch.
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index a9b00942e87d..ce2cc2169040 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -22,37 +22,21 @@
#include <linux/smp.h>
#include <linux/ioctl.h>
#include <linux/if.h>
-#include <linux/if_bridge.h>
#include <linux/raid/md_u.h>
-#include <linux/kd.h>
-#include <linux/route.h>
-#include <linux/in6.h>
-#include <linux/ipv6_route.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/vt.h>
#include <linux/falloc.h>
-#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/if_pppox.h>
#include <linux/mtio.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
-#include <linux/fb.h>
-#include <linux/videodev2.h>
-#include <linux/netdevice.h>
#include <linux/raw.h>
#include <linux/blkdev.h>
-#include <linux/elevator.h>
#include <linux/rtc.h>
#include <linux/pci.h>
#include <linux/serial.h>
-#include <linux/if_tun.h>
#include <linux/ctype.h>
#include <linux/syscalls.h>
-#include <linux/atalk.h>
#include <linux/gfp.h>
#include <linux/cec.h>
@@ -74,32 +58,9 @@
#endif
#include <linux/uaccess.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_bonding.h>
#include <linux/watchdog.h>
#include <linux/soundcard.h>
-#include <linux/lp.h>
-#include <linux/ppdev.h>
-
-#include <linux/atm.h>
-#include <linux/atmarp.h>
-#include <linux/atmclip.h>
-#include <linux/atmdev.h>
-#include <linux/atmioc.h>
-#include <linux/atmlec.h>
-#include <linux/atmmpc.h>
-#include <linux/atmsvc.h>
-#include <linux/atm_tcp.h>
-#include <linux/sonet.h>
-#include <linux/atm_suni.h>
-
-#include <linux/usb.h>
-#include <linux/usbdevice_fs.h>
-#include <linux/nbd.h>
-#include <linux/random.h>
-#include <linux/filter.h>
#include <linux/hiddev.h>
@@ -112,6 +73,7 @@
#include <linux/sort.h>
#ifdef CONFIG_SPARC
+#include <linux/fb.h>
#include <asm/fbio.h>
#endif
@@ -544,85 +506,6 @@ static int mt_ioctl_trans(struct file *file,
#define HCIUARTSETFLAGS _IOW('U', 203, int)
#define HCIUARTGETFLAGS _IOR('U', 204, int)
-#define BNEPCONNADD _IOW('B', 200, int)
-#define BNEPCONNDEL _IOW('B', 201, int)
-#define BNEPGETCONNLIST _IOR('B', 210, int)
-#define BNEPGETCONNINFO _IOR('B', 211, int)
-#define BNEPGETSUPPFEAT _IOR('B', 212, int)
-
-#define CMTPCONNADD _IOW('C', 200, int)
-#define CMTPCONNDEL _IOW('C', 201, int)
-#define CMTPGETCONNLIST _IOR('C', 210, int)
-#define CMTPGETCONNINFO _IOR('C', 211, int)
-
-#define HIDPCONNADD _IOW('H', 200, int)
-#define HIDPCONNDEL _IOW('H', 201, int)
-#define HIDPGETCONNLIST _IOR('H', 210, int)
-#define HIDPGETCONNINFO _IOR('H', 211, int)
-
-
-struct serial_struct32 {
- compat_int_t type;
- compat_int_t line;
- compat_uint_t port;
- compat_int_t irq;
- compat_int_t flags;
- compat_int_t xmit_fifo_size;
- compat_int_t custom_divisor;
- compat_int_t baud_base;
- unsigned short close_delay;
- char io_type;
- char reserved_char[1];
- compat_int_t hub6;
- unsigned short closing_wait; /* time to wait before closing */
- unsigned short closing_wait2; /* no longer used... */
- compat_uint_t iomem_base;
- unsigned short iomem_reg_shift;
- unsigned int port_high;
- /* compat_ulong_t iomap_base FIXME */
- compat_int_t reserved[1];
-};
-
-static int serial_struct_ioctl(struct file *file,
- unsigned cmd, struct serial_struct32 __user *ss32)
-{
- typedef struct serial_struct32 SS32;
- int err;
- struct serial_struct __user *ss = compat_alloc_user_space(sizeof(*ss));
- __u32 udata;
- unsigned int base;
- unsigned char *iomem_base;
-
- if (ss == NULL)
- return -EFAULT;
- if (cmd == TIOCSSERIAL) {
- if (copy_in_user(ss, ss32, offsetof(SS32, iomem_base)) ||
- get_user(udata, &ss32->iomem_base))
- return -EFAULT;
- iomem_base = compat_ptr(udata);
- if (put_user(iomem_base, &ss->iomem_base) ||
- convert_in_user(&ss32->iomem_reg_shift,
- &ss->iomem_reg_shift) ||
- convert_in_user(&ss32->port_high, &ss->port_high) ||
- put_user(0UL, &ss->iomap_base))
- return -EFAULT;
- }
- err = do_ioctl(file, cmd, (unsigned long)ss);
- if (cmd == TIOCGSERIAL && err >= 0) {
- if (copy_in_user(ss32, ss, offsetof(SS32, iomem_base)) ||
- get_user(iomem_base, &ss->iomem_base))
- return -EFAULT;
- base = (unsigned long)iomem_base >> 32 ?
- 0xffffffff : (unsigned)(unsigned long)iomem_base;
- if (put_user(base, &ss32->iomem_base) ||
- convert_in_user(&ss->iomem_reg_shift,
- &ss32->iomem_reg_shift) ||
- convert_in_user(&ss->port_high, &ss32->port_high))
- return -EFAULT;
- }
- return err;
-}
-
#define RTC_IRQP_READ32 _IOR('p', 0x0b, compat_ulong_t)
#define RTC_IRQP_SET32 _IOW('p', 0x0c, compat_ulong_t)
#define RTC_EPOCH_READ32 _IOR('p', 0x0d, compat_ulong_t)
@@ -707,60 +590,8 @@ static int compat_ioctl_preallocate(struct file *file,
static unsigned int ioctl_pointer[] = {
/* compatible ioctls first */
-COMPATIBLE_IOCTL(0x4B50) /* KDGHWCLK - not in the kernel, but don't complain */
-COMPATIBLE_IOCTL(0x4B51) /* KDSHWCLK - not in the kernel, but don't complain */
-
-/* Big T */
-COMPATIBLE_IOCTL(TCGETA)
-COMPATIBLE_IOCTL(TCSETA)
-COMPATIBLE_IOCTL(TCSETAW)
-COMPATIBLE_IOCTL(TCSETAF)
-COMPATIBLE_IOCTL(TCSBRK)
-COMPATIBLE_IOCTL(TCXONC)
-COMPATIBLE_IOCTL(TCFLSH)
-COMPATIBLE_IOCTL(TCGETS)
-COMPATIBLE_IOCTL(TCSETS)
-COMPATIBLE_IOCTL(TCSETSW)
-COMPATIBLE_IOCTL(TCSETSF)
-COMPATIBLE_IOCTL(TIOCLINUX)
-COMPATIBLE_IOCTL(TIOCSBRK)
-COMPATIBLE_IOCTL(TIOCGDEV)
-COMPATIBLE_IOCTL(TIOCCBRK)
-COMPATIBLE_IOCTL(TIOCGSID)
-COMPATIBLE_IOCTL(TIOCGICOUNT)
-COMPATIBLE_IOCTL(TIOCGEXCL)
/* Little t */
-COMPATIBLE_IOCTL(TIOCGETD)
-COMPATIBLE_IOCTL(TIOCSETD)
-COMPATIBLE_IOCTL(TIOCEXCL)
-COMPATIBLE_IOCTL(TIOCNXCL)
-COMPATIBLE_IOCTL(TIOCCONS)
-COMPATIBLE_IOCTL(TIOCGSOFTCAR)
-COMPATIBLE_IOCTL(TIOCSSOFTCAR)
-COMPATIBLE_IOCTL(TIOCSWINSZ)
-COMPATIBLE_IOCTL(TIOCGWINSZ)
-COMPATIBLE_IOCTL(TIOCMGET)
-COMPATIBLE_IOCTL(TIOCMBIC)
-COMPATIBLE_IOCTL(TIOCMBIS)
-COMPATIBLE_IOCTL(TIOCMSET)
-COMPATIBLE_IOCTL(TIOCNOTTY)
-COMPATIBLE_IOCTL(TIOCSTI)
COMPATIBLE_IOCTL(TIOCOUTQ)
-COMPATIBLE_IOCTL(TIOCSPGRP)
-COMPATIBLE_IOCTL(TIOCGPGRP)
-COMPATIBLE_IOCTL(TIOCSERGETLSR)
-#ifdef TIOCSRS485
-COMPATIBLE_IOCTL(TIOCSRS485)
-#endif
-#ifdef TIOCGRS485
-COMPATIBLE_IOCTL(TIOCGRS485)
-#endif
-#ifdef TCGETS2
-COMPATIBLE_IOCTL(TCGETS2)
-COMPATIBLE_IOCTL(TCSETS2)
-COMPATIBLE_IOCTL(TCSETSW2)
-COMPATIBLE_IOCTL(TCSETSF2)
-#endif
/* Little f */
COMPATIBLE_IOCTL(FIOCLEX)
COMPATIBLE_IOCTL(FIONCLEX)
@@ -775,23 +606,6 @@ COMPATIBLE_IOCTL(FIGETBSZ)
COMPATIBLE_IOCTL(FIFREEZE)
COMPATIBLE_IOCTL(FITHAW)
COMPATIBLE_IOCTL(FITRIM)
-COMPATIBLE_IOCTL(KDGETKEYCODE)
-COMPATIBLE_IOCTL(KDSETKEYCODE)
-COMPATIBLE_IOCTL(KDGKBTYPE)
-COMPATIBLE_IOCTL(KDGETMODE)
-COMPATIBLE_IOCTL(KDGKBMODE)
-COMPATIBLE_IOCTL(KDGKBMETA)
-COMPATIBLE_IOCTL(KDGKBENT)
-COMPATIBLE_IOCTL(KDSKBENT)
-COMPATIBLE_IOCTL(KDGKBSENT)
-COMPATIBLE_IOCTL(KDSKBSENT)
-COMPATIBLE_IOCTL(KDGKBDIACR)
-COMPATIBLE_IOCTL(KDSKBDIACR)
-COMPATIBLE_IOCTL(KDGKBDIACRUC)
-COMPATIBLE_IOCTL(KDSKBDIACRUC)
-COMPATIBLE_IOCTL(KDKBDREP)
-COMPATIBLE_IOCTL(KDGKBLED)
-COMPATIBLE_IOCTL(KDGETLED)
#ifdef CONFIG_BLOCK
/* Big S */
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN)
@@ -1106,19 +920,6 @@ COMPATIBLE_IOCTL(RFCOMMRELEASEDEV)
COMPATIBLE_IOCTL(RFCOMMGETDEVLIST)
COMPATIBLE_IOCTL(RFCOMMGETDEVINFO)
COMPATIBLE_IOCTL(RFCOMMSTEALDLC)
-COMPATIBLE_IOCTL(BNEPCONNADD)
-COMPATIBLE_IOCTL(BNEPCONNDEL)
-COMPATIBLE_IOCTL(BNEPGETCONNLIST)
-COMPATIBLE_IOCTL(BNEPGETCONNINFO)
-COMPATIBLE_IOCTL(BNEPGETSUPPFEAT)
-COMPATIBLE_IOCTL(CMTPCONNADD)
-COMPATIBLE_IOCTL(CMTPCONNDEL)
-COMPATIBLE_IOCTL(CMTPGETCONNLIST)
-COMPATIBLE_IOCTL(CMTPGETCONNINFO)
-COMPATIBLE_IOCTL(HIDPCONNADD)
-COMPATIBLE_IOCTL(HIDPCONNDEL)
-COMPATIBLE_IOCTL(HIDPGETCONNLIST)
-COMPATIBLE_IOCTL(HIDPGETCONNINFO)
/* CAPI */
COMPATIBLE_IOCTL(CAPI_REGISTER)
COMPATIBLE_IOCTL(CAPI_GET_MANUFACTURER)
@@ -1133,11 +934,6 @@ COMPATIBLE_IOCTL(CAPI_SET_FLAGS)
COMPATIBLE_IOCTL(CAPI_CLR_FLAGS)
COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT)
COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT)
-/* Siemens Gigaset */
-COMPATIBLE_IOCTL(GIGASET_REDIR)
-COMPATIBLE_IOCTL(GIGASET_CONFIG)
-COMPATIBLE_IOCTL(GIGASET_BRKCHARS)
-COMPATIBLE_IOCTL(GIGASET_VERSION)
/* Misc. */
COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */
COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */
@@ -1223,21 +1019,6 @@ COMPATIBLE_IOCTL(JSIOCGAXES)
COMPATIBLE_IOCTL(JSIOCGBUTTONS)
COMPATIBLE_IOCTL(JSIOCGNAME(0))
-#ifdef TIOCGLTC
-COMPATIBLE_IOCTL(TIOCGLTC)
-COMPATIBLE_IOCTL(TIOCSLTC)
-#endif
-#ifdef TIOCSTART
-/*
- * For these two we have definitions in ioctls.h and/or termios.h on
- * some architectures but no actual implemention. Some applications
- * like bash call them if they are defined in the headers, so we provide
- * entries here to avoid syslog message spew.
- */
-COMPATIBLE_IOCTL(TIOCSTART)
-COMPATIBLE_IOCTL(TIOCSTOP)
-#endif
-
/* fat 'r' ioctls. These are handled by fat with ->compat_ioctl,
but we don't want warnings on other file systems. So declare
them as compatible here. */
@@ -1293,10 +1074,6 @@ static long do_ioctl_trans(unsigned int cmd,
case MTIOCPOS32:
return mt_ioctl_trans(file, cmd, argp);
#endif
- /* Serial */
- case TIOCGSERIAL:
- case TIOCSSERIAL:
- return serial_struct_ioctl(file, cmd, argp);
/* Not implemented in the native kernel */
case RTC_IRQP_READ32:
case RTC_IRQP_SET32:
@@ -1316,24 +1093,11 @@ static long do_ioctl_trans(unsigned int cmd,
* so we must not do a compat_ptr() translation.
*/
switch (cmd) {
- /* Big T */
- case TCSBRKP:
- case TIOCMIWAIT:
- case TIOCSCTTY:
/* RAID */
case HOT_REMOVE_DISK:
case HOT_ADD_DISK:
case SET_DISK_FAULTY:
case SET_BITMAP_FILE:
- /* Big K */
- case KDSIGACCEPT:
- case KIOCSOUND:
- case KDMKTONE:
- case KDSETMODE:
- case KDSKBMODE:
- case KDSKBMETA:
- case KDSKBLED:
- case KDSETLED:
return vfs_ioctl(file, cmd, arg);
}
diff --git a/fs/coredump.c b/fs/coredump.c
index 1e2c87acac9b..e42e17e55bfd 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -536,7 +536,7 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
return err;
}
-void do_coredump(const siginfo_t *siginfo)
+void do_coredump(const kernel_siginfo_t *siginfo)
{
struct core_state core_state;
struct core_name cn;
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 39c20ef26db4..79debfc9cef9 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -83,10 +83,6 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
return true;
- if (contents_mode == FS_ENCRYPTION_MODE_SPECK128_256_XTS &&
- filenames_mode == FS_ENCRYPTION_MODE_SPECK128_256_CTS)
- return true;
-
return false;
}
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index e997ca51192f..7874c9bb2fc5 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -174,16 +174,6 @@ static struct fscrypt_mode {
.cipher_str = "cts(cbc(aes))",
.keysize = 16,
},
- [FS_ENCRYPTION_MODE_SPECK128_256_XTS] = {
- .friendly_name = "Speck128/256-XTS",
- .cipher_str = "xts(speck128)",
- .keysize = 64,
- },
- [FS_ENCRYPTION_MODE_SPECK128_256_CTS] = {
- .friendly_name = "Speck128/256-CTS-CBC",
- .cipher_str = "cts(cbc(speck128))",
- .keysize = 32,
- },
};
static struct fscrypt_mode *
diff --git a/fs/dax.c b/fs/dax.c
index f32d7125ad0f..0fb270f0a0ef 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -447,6 +447,7 @@ bool dax_lock_mapping_entry(struct page *page)
xa_unlock_irq(&mapping->i_pages);
break;
} else if (IS_ERR(entry)) {
+ xa_unlock_irq(&mapping->i_pages);
WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
continue;
}
@@ -665,6 +666,8 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
indices)) {
+ pgoff_t nr_pages = 1;
+
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *pvec_ent = pvec.pages[i];
void *entry;
@@ -679,8 +682,15 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
xa_lock_irq(&mapping->i_pages);
entry = get_unlocked_mapping_entry(mapping, index, NULL);
- if (entry)
+ if (entry) {
page = dax_busy_page(entry);
+ /*
+ * Account for multi-order entries at
+ * the end of the pagevec.
+ */
+ if (i + 1 >= pagevec_count(&pvec))
+ nr_pages = 1UL << dax_radix_order(entry);
+ }
put_unlocked_mapping_entry(mapping, index, entry);
xa_unlock_irq(&mapping->i_pages);
if (page)
@@ -695,7 +705,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
*/
pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
- index++;
+ index += nr_pages;
if (page)
break;
@@ -1120,21 +1130,12 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
{
struct inode *inode = mapping->host;
unsigned long vaddr = vmf->address;
- vm_fault_t ret = VM_FAULT_NOPAGE;
- struct page *zero_page;
- pfn_t pfn;
-
- zero_page = ZERO_PAGE(0);
- if (unlikely(!zero_page)) {
- ret = VM_FAULT_OOM;
- goto out;
- }
+ pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
+ vm_fault_t ret;
- pfn = page_to_pfn_t(zero_page);
dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
false);
ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
-out:
trace_dax_load_hole(inode, vmf, ret);
return ret;
}
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 49121e5a8de2..5c36ceecb5c1 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -593,11 +593,16 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
lower_new_dir_dentry = dget_parent(lower_new_dentry);
target_inode = d_inode(new_dentry);
trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ rc = -EINVAL;
+ if (lower_old_dentry->d_parent != lower_old_dir_dentry)
+ goto out_lock;
+ if (lower_new_dentry->d_parent != lower_new_dir_dentry)
+ goto out_lock;
+ if (d_unhashed(lower_old_dentry) || d_unhashed(lower_new_dentry))
+ goto out_lock;
/* source should not be ancestor of target */
- if (trap == lower_old_dentry) {
- rc = -EINVAL;
+ if (trap == lower_old_dentry)
goto out_lock;
- }
/* target should not be ancestor of source */
if (trap == lower_new_dentry) {
rc = -ENOTEMPTY;
diff --git a/fs/exec.c b/fs/exec.c
index 1ebf6e5a521d..fc281b738a98 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -908,14 +908,14 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
goto out;
i_size = i_size_read(file_inode(file));
- if (max_size > 0 && i_size > max_size) {
- ret = -EFBIG;
- goto out;
- }
if (i_size <= 0) {
ret = -EINVAL;
goto out;
}
+ if (i_size > SIZE_MAX || (max_size > 0 && i_size > max_size)) {
+ ret = -EFBIG;
+ goto out;
+ }
if (id != READING_FIRMWARE_PREALLOC_BUFFER)
*buf = vmalloc(i_size);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 7f7ee18fe179..e4bb9386c045 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1448,6 +1448,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
}
inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+ ext2_set_inode_flags(inode);
ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
ei->i_frag_no = raw_inode->i_frag;
ei->i_frag_size = raw_inode->i_fsize;
@@ -1517,7 +1518,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
}
brelse (bh);
- ext2_set_inode_flags(inode);
unlock_new_inode(inode);
return inode;
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index fb50f9aa6ead..c1d570ee1d9f 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -284,12 +284,16 @@ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
error = __ext4_set_acl(handle, inode, ACL_TYPE_DEFAULT,
default_acl, XATTR_CREATE);
posix_acl_release(default_acl);
+ } else {
+ inode->i_default_acl = NULL;
}
if (acl) {
if (!error)
error = __ext4_set_acl(handle, inode, ACL_TYPE_ACCESS,
acl, XATTR_CREATE);
posix_acl_release(acl);
+ } else {
+ inode->i_acl = NULL;
}
return error;
}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index e2902d394f1b..f93f9881ec18 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -76,7 +76,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
else if (unlikely(((char *) de - buf) + rlen > size))
- error_msg = "directory entry across range";
+ error_msg = "directory entry overrun";
else if (unlikely(le32_to_cpu(de->inode) >
le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
@@ -85,18 +85,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
if (filp)
ext4_error_file(filp, function, line, bh->b_blocknr,
- "bad entry in directory: %s - offset=%u(%u), "
- "inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset % size),
- offset, le32_to_cpu(de->inode),
- rlen, de->name_len);
+ "bad entry in directory: %s - offset=%u, "
+ "inode=%u, rec_len=%d, name_len=%d, size=%d",
+ error_msg, offset, le32_to_cpu(de->inode),
+ rlen, de->name_len, size);
else
ext4_error_inode(dir, function, line, bh->b_blocknr,
- "bad entry in directory: %s - offset=%u(%u), "
- "inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset % size),
- offset, le32_to_cpu(de->inode),
- rlen, de->name_len);
+ "bad entry in directory: %s - offset=%u, "
+ "inode=%u, rec_len=%d, name_len=%d, size=%d",
+ error_msg, offset, le32_to_cpu(de->inode),
+ rlen, de->name_len, size);
return 1;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0f0edd1cd0cd..12f90d48ba61 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -43,6 +43,17 @@
#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION)
#include <linux/fscrypt.h>
+#include <linux/compiler.h>
+
+/* Until this gets included into linux/compiler-gcc.h */
+#ifndef __nonstring
+#if defined(GCC_VERSION) && (GCC_VERSION >= 80000)
+#define __nonstring __attribute__((nonstring))
+#else
+#define __nonstring
+#endif
+#endif
+
/*
* The fourth extended filesystem constants/structures
*/
@@ -617,6 +628,7 @@ enum {
#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
+#define EXT4_FREE_BLOCKS_RERESERVE_CLUSTER 0x0040
/*
* ioctl commands
@@ -675,6 +687,9 @@ enum {
/* Max physical block we can address w/o extents */
#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
+/* Max logical block we can support */
+#define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFF
+
/*
* Structure of an inode on the disk
*/
@@ -1016,6 +1031,9 @@ struct ext4_inode_info {
ext4_lblk_t i_da_metadata_calc_last_lblock;
int i_da_metadata_calc_len;
+ /* pending cluster reservations for bigalloc file systems */
+ struct ext4_pending_tree i_pending_tree;
+
/* on-disk additional length */
__u16 i_extra_isize;
@@ -1226,7 +1244,7 @@ struct ext4_super_block {
__le32 s_feature_ro_compat; /* readonly-compatible feature set */
/*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */
/*78*/ char s_volume_name[16]; /* volume name */
-/*88*/ char s_last_mounted[64]; /* directory where last mounted */
+/*88*/ char s_last_mounted[64] __nonstring; /* directory where last mounted */
/*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */
/*
* Performance hints. Directory preallocation should only
@@ -1277,13 +1295,13 @@ struct ext4_super_block {
__le32 s_first_error_time; /* first time an error happened */
__le32 s_first_error_ino; /* inode involved in first error */
__le64 s_first_error_block; /* block involved of first error */
- __u8 s_first_error_func[32]; /* function where the error happened */
+ __u8 s_first_error_func[32] __nonstring; /* function where the error happened */
__le32 s_first_error_line; /* line number where error happened */
__le32 s_last_error_time; /* most recent time of an error */
__le32 s_last_error_ino; /* inode involved in last error */
__le32 s_last_error_line; /* line number where error happened */
__le64 s_last_error_block; /* block involved of last error */
- __u8 s_last_error_func[32]; /* function where the error happened */
+ __u8 s_last_error_func[32] __nonstring; /* function where the error happened */
#define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts)
__u8 s_mount_opts[64];
__le32 s_usr_quota_inum; /* inode for tracking user quota */
@@ -1387,7 +1405,8 @@ struct ext4_sb_info {
u32 s_min_batch_time;
struct block_device *journal_bdev;
#ifdef CONFIG_QUOTA
- char *s_qf_names[EXT4_MAXQUOTAS]; /* Names of quota files with journalled quota */
+ /* Names of quota files with journalled quota */
+ char __rcu *s_qf_names[EXT4_MAXQUOTAS];
int s_jquota_fmt; /* Format of quota to use */
#endif
unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
@@ -2469,10 +2488,11 @@ extern int ext4_writepage_trans_blocks(struct inode *);
extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
loff_t lstart, loff_t lend);
-extern int ext4_page_mkwrite(struct vm_fault *vmf);
-extern int ext4_filemap_fault(struct vm_fault *vmf);
+extern vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf);
+extern vm_fault_t ext4_filemap_fault(struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern int ext4_get_projid(struct inode *inode, kprojid_t *projid);
+extern void ext4_da_release_space(struct inode *inode, int to_free);
extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim);
extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
@@ -3128,10 +3148,6 @@ extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
int flags);
extern void ext4_ext_drop_refs(struct ext4_ext_path *);
extern int ext4_ext_check_inode(struct inode *inode);
-extern int ext4_find_delalloc_range(struct inode *inode,
- ext4_lblk_t lblk_start,
- ext4_lblk_t lblk_end);
-extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
extern ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path);
extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
@@ -3142,6 +3158,7 @@ extern int ext4_swap_extents(handle_t *handle, struct inode *inode1,
struct inode *inode2, ext4_lblk_t lblk1,
ext4_lblk_t lblk2, ext4_lblk_t count,
int mark_unwritten,int *err);
+extern int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu);
/* move_extent.c */
extern void ext4_double_down_write_data_sem(struct inode *first,
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index adf6668b596f..98bd0e9ee7df 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -120,6 +120,19 @@ struct ext4_ext_path {
};
/*
+ * Used to record a portion of a cluster found at the beginning or end
+ * of an extent while traversing the extent tree during space removal.
+ * A partial cluster may be removed if it does not contain blocks shared
+ * with extents that aren't being deleted (tofree state). Otherwise,
+ * it cannot be removed (nofree state).
+ */
+struct partial_cluster {
+ ext4_fsblk_t pclu; /* physical cluster number */
+ ext4_lblk_t lblk; /* logical block number within logical cluster */
+ enum {initial, tofree, nofree} state;
+};
+
+/*
* structure for external API
*/
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 72a361d5ef74..240b6dea5441 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2351,8 +2351,8 @@ ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
{
struct extent_status es;
- ext4_es_find_delayed_extent_range(inode, hole_start,
- hole_start + hole_len - 1, &es);
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
+ hole_start + hole_len - 1, &es);
if (es.es_len) {
/* There's delayed extent containing lblock? */
if (es.es_lblk <= hole_start)
@@ -2490,106 +2490,157 @@ static inline int get_default_free_blocks_flags(struct inode *inode)
return 0;
}
+/*
+ * ext4_rereserve_cluster - increment the reserved cluster count when
+ * freeing a cluster with a pending reservation
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in cluster to be reserved
+ *
+ * Increments the reserved cluster count and adjusts quota in a bigalloc
+ * file system when freeing a partial cluster containing at least one
+ * delayed and unwritten block. A partial cluster meeting that
+ * requirement will have a pending reservation. If so, the
+ * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to
+ * defer reserved and allocated space accounting to a subsequent call
+ * to this function.
+ */
+static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
+
+ spin_lock(&ei->i_block_reservation_lock);
+ ei->i_reserved_data_blocks++;
+ percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
+ spin_unlock(&ei->i_block_reservation_lock);
+
+ percpu_counter_add(&sbi->s_freeclusters_counter, 1);
+ ext4_remove_pending(inode, lblk);
+}
+
static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent *ex,
- long long *partial_cluster,
+ struct partial_cluster *partial,
ext4_lblk_t from, ext4_lblk_t to)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned short ee_len = ext4_ext_get_actual_len(ex);
- ext4_fsblk_t pblk;
- int flags = get_default_free_blocks_flags(inode);
+ ext4_fsblk_t last_pblk, pblk;
+ ext4_lblk_t num;
+ int flags;
+
+ /* only extent tail removal is allowed */
+ if (from < le32_to_cpu(ex->ee_block) ||
+ to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
+ ext4_error(sbi->s_sb,
+ "strange request: removal(2) %u-%u from %u:%u",
+ from, to, le32_to_cpu(ex->ee_block), ee_len);
+ return 0;
+ }
+
+#ifdef EXTENTS_STATS
+ spin_lock(&sbi->s_ext_stats_lock);
+ sbi->s_ext_blocks += ee_len;
+ sbi->s_ext_extents++;
+ if (ee_len < sbi->s_ext_min)
+ sbi->s_ext_min = ee_len;
+ if (ee_len > sbi->s_ext_max)
+ sbi->s_ext_max = ee_len;
+ if (ext_depth(inode) > sbi->s_depth_max)
+ sbi->s_depth_max = ext_depth(inode);
+ spin_unlock(&sbi->s_ext_stats_lock);
+#endif
+
+ trace_ext4_remove_blocks(inode, ex, from, to, partial);
/*
- * For bigalloc file systems, we never free a partial cluster
- * at the beginning of the extent. Instead, we make a note
- * that we tried freeing the cluster, and check to see if we
- * need to free it on a subsequent call to ext4_remove_blocks,
- * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
+ * if we have a partial cluster, and it's different from the
+ * cluster of the last block in the extent, we free it
*/
- flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
+ last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
+
+ if (partial->state != initial &&
+ partial->pclu != EXT4_B2C(sbi, last_pblk)) {
+ if (partial->state == tofree) {
+ flags = get_default_free_blocks_flags(inode);
+ if (ext4_is_pending(inode, partial->lblk))
+ flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
+ ext4_free_blocks(handle, inode, NULL,
+ EXT4_C2B(sbi, partial->pclu),
+ sbi->s_cluster_ratio, flags);
+ if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
+ ext4_rereserve_cluster(inode, partial->lblk);
+ }
+ partial->state = initial;
+ }
+
+ num = le32_to_cpu(ex->ee_block) + ee_len - from;
+ pblk = ext4_ext_pblock(ex) + ee_len - num;
- trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
/*
- * If we have a partial cluster, and it's different from the
- * cluster of the last block, we need to explicitly free the
- * partial cluster here.
+ * We free the partial cluster at the end of the extent (if any),
+ * unless the cluster is used by another extent (partial_cluster
+ * state is nofree). If a partial cluster exists here, it must be
+ * shared with the last block in the extent.
*/
- pblk = ext4_ext_pblock(ex) + ee_len - 1;
- if (*partial_cluster > 0 &&
- *partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
+ flags = get_default_free_blocks_flags(inode);
+
+ /* partial, left end cluster aligned, right end unaligned */
+ if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
+ (EXT4_LBLK_CMASK(sbi, to) >= from) &&
+ (partial->state != nofree)) {
+ if (ext4_is_pending(inode, to))
+ flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
ext4_free_blocks(handle, inode, NULL,
- EXT4_C2B(sbi, *partial_cluster),
+ EXT4_PBLK_CMASK(sbi, last_pblk),
sbi->s_cluster_ratio, flags);
- *partial_cluster = 0;
+ if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
+ ext4_rereserve_cluster(inode, to);
+ partial->state = initial;
+ flags = get_default_free_blocks_flags(inode);
}
-#ifdef EXTENTS_STATS
- {
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- spin_lock(&sbi->s_ext_stats_lock);
- sbi->s_ext_blocks += ee_len;
- sbi->s_ext_extents++;
- if (ee_len < sbi->s_ext_min)
- sbi->s_ext_min = ee_len;
- if (ee_len > sbi->s_ext_max)
- sbi->s_ext_max = ee_len;
- if (ext_depth(inode) > sbi->s_depth_max)
- sbi->s_depth_max = ext_depth(inode);
- spin_unlock(&sbi->s_ext_stats_lock);
- }
-#endif
- if (from >= le32_to_cpu(ex->ee_block)
- && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
- /* tail removal */
- ext4_lblk_t num;
- long long first_cluster;
-
- num = le32_to_cpu(ex->ee_block) + ee_len - from;
- pblk = ext4_ext_pblock(ex) + ee_len - num;
- /*
- * Usually we want to free partial cluster at the end of the
- * extent, except for the situation when the cluster is still
- * used by any other extent (partial_cluster is negative).
- */
- if (*partial_cluster < 0 &&
- *partial_cluster == -(long long) EXT4_B2C(sbi, pblk+num-1))
- flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
+ flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
- ext_debug("free last %u blocks starting %llu partial %lld\n",
- num, pblk, *partial_cluster);
- ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
- /*
- * If the block range to be freed didn't start at the
- * beginning of a cluster, and we removed the entire
- * extent and the cluster is not used by any other extent,
- * save the partial cluster here, since we might need to
- * delete if we determine that the truncate or punch hole
- * operation has removed all of the blocks in the cluster.
- * If that cluster is used by another extent, preserve its
- * negative value so it isn't freed later on.
- *
- * If the whole extent wasn't freed, we've reached the
- * start of the truncated/punched region and have finished
- * removing blocks. If there's a partial cluster here it's
- * shared with the remainder of the extent and is no longer
- * a candidate for removal.
- */
- if (EXT4_PBLK_COFF(sbi, pblk) && ee_len == num) {
- first_cluster = (long long) EXT4_B2C(sbi, pblk);
- if (first_cluster != -*partial_cluster)
- *partial_cluster = first_cluster;
- } else {
- *partial_cluster = 0;
+ /*
+ * For bigalloc file systems, we never free a partial cluster
+ * at the beginning of the extent. Instead, we check to see if we
+ * need to free it on a subsequent call to ext4_remove_blocks,
+ * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
+ */
+ flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
+ ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
+
+ /* reset the partial cluster if we've freed past it */
+ if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
+ partial->state = initial;
+
+ /*
+ * If we've freed the entire extent but the beginning is not left
+ * cluster aligned and is not marked as ineligible for freeing we
+ * record the partial cluster at the beginning of the extent. It
+ * wasn't freed by the preceding ext4_free_blocks() call, and we
+ * need to look farther to the left to determine if it's to be freed
+ * (not shared with another extent). Else, reset the partial
+ * cluster - we're either done freeing or the beginning of the
+ * extent is left cluster aligned.
+ */
+ if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
+ if (partial->state == initial) {
+ partial->pclu = EXT4_B2C(sbi, pblk);
+ partial->lblk = from;
+ partial->state = tofree;
}
- } else
- ext4_error(sbi->s_sb, "strange request: removal(2) "
- "%u-%u from %u:%u",
- from, to, le32_to_cpu(ex->ee_block), ee_len);
+ } else {
+ partial->state = initial;
+ }
+
return 0;
}
-
/*
* ext4_ext_rm_leaf() Removes the extents associated with the
* blocks appearing between "start" and "end". Both "start"
@@ -2608,7 +2659,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
static int
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path,
- long long *partial_cluster,
+ struct partial_cluster *partial,
ext4_lblk_t start, ext4_lblk_t end)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -2640,7 +2691,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
ex_ee_block = le32_to_cpu(ex->ee_block);
ex_ee_len = ext4_ext_get_actual_len(ex);
- trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
+ trace_ext4_ext_rm_leaf(inode, start, ex, partial);
while (ex >= EXT_FIRST_EXTENT(eh) &&
ex_ee_block + ex_ee_len > start) {
@@ -2671,8 +2722,8 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
*/
if (sbi->s_cluster_ratio > 1) {
pblk = ext4_ext_pblock(ex);
- *partial_cluster =
- -(long long) EXT4_B2C(sbi, pblk);
+ partial->pclu = EXT4_B2C(sbi, pblk);
+ partial->state = nofree;
}
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
@@ -2714,8 +2765,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
if (err)
goto out;
- err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
- a, b);
+ err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
if (err)
goto out;
@@ -2769,18 +2819,23 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
* If there's a partial cluster and at least one extent remains in
* the leaf, free the partial cluster if it isn't shared with the
* current extent. If it is shared with the current extent
- * we zero partial_cluster because we've reached the start of the
+ * we reset the partial cluster because we've reached the start of the
* truncated/punched region and we're done removing blocks.
*/
- if (*partial_cluster > 0 && ex >= EXT_FIRST_EXTENT(eh)) {
+ if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
- if (*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
+ if (partial->pclu != EXT4_B2C(sbi, pblk)) {
+ int flags = get_default_free_blocks_flags(inode);
+
+ if (ext4_is_pending(inode, partial->lblk))
+ flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
ext4_free_blocks(handle, inode, NULL,
- EXT4_C2B(sbi, *partial_cluster),
- sbi->s_cluster_ratio,
- get_default_free_blocks_flags(inode));
+ EXT4_C2B(sbi, partial->pclu),
+ sbi->s_cluster_ratio, flags);
+ if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
+ ext4_rereserve_cluster(inode, partial->lblk);
}
- *partial_cluster = 0;
+ partial->state = initial;
}
/* if this leaf is free, then we should
@@ -2819,10 +2874,14 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int depth = ext_depth(inode);
struct ext4_ext_path *path = NULL;
- long long partial_cluster = 0;
+ struct partial_cluster partial;
handle_t *handle;
int i = 0, err = 0;
+ partial.pclu = 0;
+ partial.lblk = 0;
+ partial.state = initial;
+
ext_debug("truncate since %u to %u\n", start, end);
/* probably first extent we're gonna free will be last in block */
@@ -2882,8 +2941,8 @@ again:
*/
if (sbi->s_cluster_ratio > 1) {
pblk = ext4_ext_pblock(ex) + end - ee_block + 2;
- partial_cluster =
- -(long long) EXT4_B2C(sbi, pblk);
+ partial.pclu = EXT4_B2C(sbi, pblk);
+ partial.state = nofree;
}
/*
@@ -2911,9 +2970,10 @@ again:
&ex);
if (err)
goto out;
- if (pblk)
- partial_cluster =
- -(long long) EXT4_B2C(sbi, pblk);
+ if (pblk) {
+ partial.pclu = EXT4_B2C(sbi, pblk);
+ partial.state = nofree;
+ }
}
}
/*
@@ -2948,8 +3008,7 @@ again:
if (i == depth) {
/* this is leaf block */
err = ext4_ext_rm_leaf(handle, inode, path,
- &partial_cluster, start,
- end);
+ &partial, start, end);
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
path[i].p_bh = NULL;
@@ -3021,21 +3080,24 @@ again:
}
}
- trace_ext4_ext_remove_space_done(inode, start, end, depth,
- partial_cluster, path->p_hdr->eh_entries);
+ trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
+ path->p_hdr->eh_entries);
/*
- * If we still have something in the partial cluster and we have removed
- * even the first extent, then we should free the blocks in the partial
- * cluster as well. (This code will only run when there are no leaves
- * to the immediate left of the truncated/punched region.)
+ * if there's a partial cluster and we have removed the first extent
+ * in the file, then we also free the partial cluster, if any
*/
- if (partial_cluster > 0 && err == 0) {
- /* don't zero partial_cluster since it's not used afterwards */
+ if (partial.state == tofree && err == 0) {
+ int flags = get_default_free_blocks_flags(inode);
+
+ if (ext4_is_pending(inode, partial.lblk))
+ flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
ext4_free_blocks(handle, inode, NULL,
- EXT4_C2B(sbi, partial_cluster),
- sbi->s_cluster_ratio,
- get_default_free_blocks_flags(inode));
+ EXT4_C2B(sbi, partial.pclu),
+ sbi->s_cluster_ratio, flags);
+ if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
+ ext4_rereserve_cluster(inode, partial.lblk);
+ partial.state = initial;
}
/* TODO: flexible tree reduction should be here */
@@ -3819,114 +3881,6 @@ out:
return ext4_mark_inode_dirty(handle, inode);
}
-/**
- * ext4_find_delalloc_range: find delayed allocated block in the given range.
- *
- * Return 1 if there is a delalloc block in the range, otherwise 0.
- */
-int ext4_find_delalloc_range(struct inode *inode,
- ext4_lblk_t lblk_start,
- ext4_lblk_t lblk_end)
-{
- struct extent_status es;
-
- ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
- if (es.es_len == 0)
- return 0; /* there is no delay extent in this tree */
- else if (es.es_lblk <= lblk_start &&
- lblk_start < es.es_lblk + es.es_len)
- return 1;
- else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
- return 1;
- else
- return 0;
-}
-
-int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
-{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- ext4_lblk_t lblk_start, lblk_end;
- lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
- lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
-
- return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
-}
-
-/**
- * Determines how many complete clusters (out of those specified by the 'map')
- * are under delalloc and were reserved quota for.
- * This function is called when we are writing out the blocks that were
- * originally written with their allocation delayed, but then the space was
- * allocated using fallocate() before the delayed allocation could be resolved.
- * The cases to look for are:
- * ('=' indicated delayed allocated blocks
- * '-' indicates non-delayed allocated blocks)
- * (a) partial clusters towards beginning and/or end outside of allocated range
- * are not delalloc'ed.
- * Ex:
- * |----c---=|====c====|====c====|===-c----|
- * |++++++ allocated ++++++|
- * ==> 4 complete clusters in above example
- *
- * (b) partial cluster (outside of allocated range) towards either end is
- * marked for delayed allocation. In this case, we will exclude that
- * cluster.
- * Ex:
- * |----====c========|========c========|
- * |++++++ allocated ++++++|
- * ==> 1 complete clusters in above example
- *
- * Ex:
- * |================c================|
- * |++++++ allocated ++++++|
- * ==> 0 complete clusters in above example
- *
- * The ext4_da_update_reserve_space will be called only if we
- * determine here that there were some "entire" clusters that span
- * this 'allocated' range.
- * In the non-bigalloc case, this function will just end up returning num_blks
- * without ever calling ext4_find_delalloc_range.
- */
-static unsigned int
-get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
- unsigned int num_blks)
-{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
- ext4_lblk_t lblk_from, lblk_to, c_offset;
- unsigned int allocated_clusters = 0;
-
- alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
- alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
-
- /* max possible clusters for this allocation */
- allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
-
- trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
-
- /* Check towards left side */
- c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
- if (c_offset) {
- lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
- lblk_to = lblk_from + c_offset - 1;
-
- if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
- allocated_clusters--;
- }
-
- /* Now check towards right. */
- c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
- if (allocated_clusters && c_offset) {
- lblk_from = lblk_start + num_blks;
- lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
-
- if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
- allocated_clusters--;
- }
-
- return allocated_clusters;
-}
-
static int
convert_initialized_extent(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
@@ -4108,23 +4062,6 @@ out:
}
map->m_len = allocated;
- /*
- * If we have done fallocate with the offset that is already
- * delayed allocated, we would have block reservation
- * and quota reservation done in the delayed write path.
- * But fallocate would have already updated quota and block
- * count for this offset. So cancel these reservation
- */
- if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
- unsigned int reserved_clusters;
- reserved_clusters = get_reserved_cluster_alloc(inode,
- map->m_lblk, map->m_len);
- if (reserved_clusters)
- ext4_da_update_reserve_space(inode,
- reserved_clusters,
- 0);
- }
-
map_out:
map->m_flags |= EXT4_MAP_MAPPED;
if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
@@ -4513,77 +4450,39 @@ got_allocated_blocks:
map->m_flags |= EXT4_MAP_NEW;
/*
- * Update reserved blocks/metadata blocks after successful
- * block allocation which had been deferred till now.
+ * Reduce the reserved cluster count to reflect successful deferred
+ * allocation of delayed allocated clusters or direct allocation of
+ * clusters discovered to be delayed allocated. Once allocated, a
+ * cluster is not included in the reserved count.
*/
- if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
- unsigned int reserved_clusters;
- /*
- * Check how many clusters we had reserved this allocated range
- */
- reserved_clusters = get_reserved_cluster_alloc(inode,
- map->m_lblk, allocated);
- if (!map_from_cluster) {
- BUG_ON(allocated_clusters < reserved_clusters);
- if (reserved_clusters < allocated_clusters) {
- struct ext4_inode_info *ei = EXT4_I(inode);
- int reservation = allocated_clusters -
- reserved_clusters;
- /*
- * It seems we claimed few clusters outside of
- * the range of this allocation. We should give
- * it back to the reservation pool. This can
- * happen in the following case:
- *
- * * Suppose s_cluster_ratio is 4 (i.e., each
- * cluster has 4 blocks. Thus, the clusters
- * are [0-3],[4-7],[8-11]...
- * * First comes delayed allocation write for
- * logical blocks 10 & 11. Since there were no
- * previous delayed allocated blocks in the
- * range [8-11], we would reserve 1 cluster
- * for this write.
- * * Next comes write for logical blocks 3 to 8.
- * In this case, we will reserve 2 clusters
- * (for [0-3] and [4-7]; and not for [8-11] as
- * that range has a delayed allocated blocks.
- * Thus total reserved clusters now becomes 3.
- * * Now, during the delayed allocation writeout
- * time, we will first write blocks [3-8] and
- * allocate 3 clusters for writing these
- * blocks. Also, we would claim all these
- * three clusters above.
- * * Now when we come here to writeout the
- * blocks [10-11], we would expect to claim
- * the reservation of 1 cluster we had made
- * (and we would claim it since there are no
- * more delayed allocated blocks in the range
- * [8-11]. But our reserved cluster count had
- * already gone to 0.
- *
- * Thus, at the step 4 above when we determine
- * that there are still some unwritten delayed
- * allocated blocks outside of our current
- * block range, we should increment the
- * reserved clusters count so that when the
- * remaining blocks finally gets written, we
- * could claim them.
- */
- dquot_reserve_block(inode,
- EXT4_C2B(sbi, reservation));
- spin_lock(&ei->i_block_reservation_lock);
- ei->i_reserved_data_blocks += reservation;
- spin_unlock(&ei->i_block_reservation_lock);
- }
+ if (test_opt(inode->i_sb, DELALLOC) && !map_from_cluster) {
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
/*
- * We will claim quota for all newly allocated blocks.
- * We're updating the reserved space *after* the
- * correction above so we do not accidentally free
- * all the metadata reservation because we might
- * actually need it later on.
+ * When allocating delayed allocated clusters, simply
+ * reduce the reserved cluster count and claim quota
*/
ext4_da_update_reserve_space(inode, allocated_clusters,
1);
+ } else {
+ ext4_lblk_t lblk, len;
+ unsigned int n;
+
+ /*
+ * When allocating non-delayed allocated clusters
+ * (from fallocate, filemap, DIO, or clusters
+ * allocated when delalloc has been disabled by
+ * ext4_nonda_switch), reduce the reserved cluster
+ * count by the number of allocated clusters that
+ * have previously been delayed allocated. Quota
+ * has been claimed by ext4_mb_new_blocks() above,
+ * so release the quota reservations made for any
+ * previously delayed allocated clusters.
+ */
+ lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
+ len = allocated_clusters << sbi->s_cluster_bits;
+ n = ext4_es_delayed_clu(inode, lblk, len);
+ if (n > 0)
+ ext4_da_update_reserve_space(inode, (int) n, 0);
}
}
@@ -5075,8 +4974,10 @@ static int ext4_find_delayed_extent(struct inode *inode,
ext4_lblk_t block, next_del;
if (newes->es_pblk == 0) {
- ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
- newes->es_lblk + newes->es_len - 1, &es);
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
+ newes->es_lblk,
+ newes->es_lblk + newes->es_len - 1,
+ &es);
/*
* No extent in extent-tree contains block @newes->es_pblk,
@@ -5097,7 +4998,8 @@ static int ext4_find_delayed_extent(struct inode *inode,
}
block = newes->es_lblk + newes->es_len;
- ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed, block,
+ EXT_MAX_BLOCKS, &es);
if (es.es_len == 0)
next_del = EXT_MAX_BLOCKS;
else
@@ -5958,3 +5860,82 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
}
return replaced_count;
}
+
+/*
+ * ext4_clu_mapped - determine whether any block in a logical cluster has
+ * been mapped to a physical cluster
+ *
+ * @inode - file containing the logical cluster
+ * @lclu - logical cluster of interest
+ *
+ * Returns 1 if any block in the logical cluster is mapped, signifying
+ * that a physical cluster has been allocated for it. Otherwise,
+ * returns 0. Can also return negative error codes. Derived from
+ * ext4_ext_map_blocks().
+ */
+int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_ext_path *path;
+ int depth, mapped = 0, err = 0;
+ struct ext4_extent *extent;
+ ext4_lblk_t first_lblk, first_lclu, last_lclu;
+
+ /* search for the extent closest to the first block in the cluster */
+ path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ path = NULL;
+ goto out;
+ }
+
+ depth = ext_depth(inode);
+
+ /*
+ * A consistent leaf must not be empty. This situation is possible,
+ * though, _during_ tree modification, and it's why an assert can't
+ * be put in ext4_find_extent().
+ */
+ if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
+ EXT4_ERROR_INODE(inode,
+ "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
+ (unsigned long) EXT4_C2B(sbi, lclu),
+ depth, path[depth].p_block);
+ err = -EFSCORRUPTED;
+ goto out;
+ }
+
+ extent = path[depth].p_ext;
+
+ /* can't be mapped if the extent tree is empty */
+ if (extent == NULL)
+ goto out;
+
+ first_lblk = le32_to_cpu(extent->ee_block);
+ first_lclu = EXT4_B2C(sbi, first_lblk);
+
+ /*
+ * Three possible outcomes at this point - found extent spanning
+ * the target cluster, to the left of the target cluster, or to the
+ * right of the target cluster. The first two cases are handled here.
+ * The last case indicates the target cluster is not mapped.
+ */
+ if (lclu >= first_lclu) {
+ last_lclu = EXT4_B2C(sbi, first_lblk +
+ ext4_ext_get_actual_len(extent) - 1);
+ if (lclu <= last_lclu) {
+ mapped = 1;
+ } else {
+ first_lblk = ext4_ext_next_allocated_block(path);
+ first_lclu = EXT4_B2C(sbi, first_lblk);
+ if (lclu == first_lclu)
+ mapped = 1;
+ }
+ }
+
+out:
+ ext4_ext_drop_refs(path);
+ kfree(path);
+
+ return err ? err : mapped;
+}
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index c4e6fb15101b..2b439afafe13 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -142,6 +142,7 @@
*/
static struct kmem_cache *ext4_es_cachep;
+static struct kmem_cache *ext4_pending_cachep;
static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
@@ -149,6 +150,8 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
struct ext4_inode_info *locked_ei);
+static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len);
int __init ext4_init_es(void)
{
@@ -233,30 +236,38 @@ static struct extent_status *__es_tree_search(struct rb_root *root,
}
/*
- * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
- * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
+ * ext4_es_find_extent_range - find extent with specified status within block
+ * range or next extent following block range in
+ * extents status tree
*
- * @inode: the inode which owns delayed extents
- * @lblk: the offset where we start to search
- * @end: the offset where we stop to search
- * @es: delayed extent that we found
+ * @inode - file containing the range
+ * @matching_fn - pointer to function that matches extents with desired status
+ * @lblk - logical block defining start of range
+ * @end - logical block defining end of range
+ * @es - extent found, if any
+ *
+ * Find the first extent within the block range specified by @lblk and @end
+ * in the extents status tree that satisfies @matching_fn. If a match
+ * is found, it's returned in @es. If not, and a matching extent is found
+ * beyond the block range, it's returned in @es. If no match is found, an
+ * extent is returned in @es whose es_lblk, es_len, and es_pblk components
+ * are 0.
*/
-void ext4_es_find_delayed_extent_range(struct inode *inode,
- ext4_lblk_t lblk, ext4_lblk_t end,
- struct extent_status *es)
+static void __es_find_extent_range(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk, ext4_lblk_t end,
+ struct extent_status *es)
{
struct ext4_es_tree *tree = NULL;
struct extent_status *es1 = NULL;
struct rb_node *node;
- BUG_ON(es == NULL);
- BUG_ON(end < lblk);
- trace_ext4_es_find_delayed_extent_range_enter(inode, lblk);
+ WARN_ON(es == NULL);
+ WARN_ON(end < lblk);
- read_lock(&EXT4_I(inode)->i_es_lock);
tree = &EXT4_I(inode)->i_es_tree;
- /* find extent in cache firstly */
+ /* see if the extent has been cached */
es->es_lblk = es->es_len = es->es_pblk = 0;
if (tree->cache_es) {
es1 = tree->cache_es;
@@ -271,28 +282,133 @@ void ext4_es_find_delayed_extent_range(struct inode *inode,
es1 = __es_tree_search(&tree->root, lblk);
out:
- if (es1 && !ext4_es_is_delayed(es1)) {
+ if (es1 && !matching_fn(es1)) {
while ((node = rb_next(&es1->rb_node)) != NULL) {
es1 = rb_entry(node, struct extent_status, rb_node);
if (es1->es_lblk > end) {
es1 = NULL;
break;
}
- if (ext4_es_is_delayed(es1))
+ if (matching_fn(es1))
break;
}
}
- if (es1 && ext4_es_is_delayed(es1)) {
+ if (es1 && matching_fn(es1)) {
tree->cache_es = es1;
es->es_lblk = es1->es_lblk;
es->es_len = es1->es_len;
es->es_pblk = es1->es_pblk;
}
+}
+
+/*
+ * Locking for __es_find_extent_range() for external use
+ */
+void ext4_es_find_extent_range(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk, ext4_lblk_t end,
+ struct extent_status *es)
+{
+ trace_ext4_es_find_extent_range_enter(inode, lblk);
+
+ read_lock(&EXT4_I(inode)->i_es_lock);
+ __es_find_extent_range(inode, matching_fn, lblk, end, es);
+ read_unlock(&EXT4_I(inode)->i_es_lock);
+
+ trace_ext4_es_find_extent_range_exit(inode, es);
+}
+
+/*
+ * __es_scan_range - search block range for block with specified status
+ * in extents status tree
+ *
+ * @inode - file containing the range
+ * @matching_fn - pointer to function that matches extents with desired status
+ * @lblk - logical block defining start of range
+ * @end - logical block defining end of range
+ *
+ * Returns true if at least one block in the specified block range satisfies
+ * the criterion specified by @matching_fn, and false if not. If at least
+ * one extent has the specified status, then there is at least one block
+ * in the cluster with that status. Should only be called by code that has
+ * taken i_es_lock.
+ */
+static bool __es_scan_range(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t start, ext4_lblk_t end)
+{
+ struct extent_status es;
+
+ __es_find_extent_range(inode, matching_fn, start, end, &es);
+ if (es.es_len == 0)
+ return false; /* no matching extent in the tree */
+ else if (es.es_lblk <= start &&
+ start < es.es_lblk + es.es_len)
+ return true;
+ else if (start <= es.es_lblk && es.es_lblk <= end)
+ return true;
+ else
+ return false;
+}
+/*
+ * Locking for __es_scan_range() for external use
+ */
+bool ext4_es_scan_range(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk, ext4_lblk_t end)
+{
+ bool ret;
+
+ read_lock(&EXT4_I(inode)->i_es_lock);
+ ret = __es_scan_range(inode, matching_fn, lblk, end);
read_unlock(&EXT4_I(inode)->i_es_lock);
- trace_ext4_es_find_delayed_extent_range_exit(inode, es);
+ return ret;
+}
+
+/*
+ * __es_scan_clu - search cluster for block with specified status in
+ * extents status tree
+ *
+ * @inode - file containing the cluster
+ * @matching_fn - pointer to function that matches extents with desired status
+ * @lblk - logical block in cluster to be searched
+ *
+ * Returns true if at least one extent in the cluster containing @lblk
+ * satisfies the criterion specified by @matching_fn, and false if not. If at
+ * least one extent has the specified status, then there is at least one block
+ * in the cluster with that status. Should only be called by code that has
+ * taken i_es_lock.
+ */
+static bool __es_scan_clu(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t lblk_start, lblk_end;
+
+ lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
+ lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
+
+ return __es_scan_range(inode, matching_fn, lblk_start, lblk_end);
+}
+
+/*
+ * Locking for __es_scan_clu() for external use
+ */
+bool ext4_es_scan_clu(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk)
+{
+ bool ret;
+
+ read_lock(&EXT4_I(inode)->i_es_lock);
+ ret = __es_scan_clu(inode, matching_fn, lblk);
+ read_unlock(&EXT4_I(inode)->i_es_lock);
+
+ return ret;
}
static void ext4_es_list_add(struct inode *inode)
@@ -694,6 +810,7 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status newes;
ext4_lblk_t end = lblk + len - 1;
int err = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
lblk, len, pblk, status, inode->i_ino);
@@ -730,6 +847,11 @@ retry:
if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
err = 0;
+ if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
+ (status & EXTENT_STATUS_WRITTEN ||
+ status & EXTENT_STATUS_UNWRITTEN))
+ __revise_pending(inode, lblk, len);
+
error:
write_unlock(&EXT4_I(inode)->i_es_lock);
@@ -1252,3 +1374,499 @@ static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
ei->i_es_tree.cache_es = NULL;
return nr_shrunk;
}
+
+#ifdef ES_DEBUG__
+static void ext4_print_pending_tree(struct inode *inode)
+{
+ struct ext4_pending_tree *tree;
+ struct rb_node *node;
+ struct pending_reservation *pr;
+
+ printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino);
+ tree = &EXT4_I(inode)->i_pending_tree;
+ node = rb_first(&tree->root);
+ while (node) {
+ pr = rb_entry(node, struct pending_reservation, rb_node);
+ printk(KERN_DEBUG " %u", pr->lclu);
+ node = rb_next(node);
+ }
+ printk(KERN_DEBUG "\n");
+}
+#else
+#define ext4_print_pending_tree(inode)
+#endif
+
+int __init ext4_init_pending(void)
+{
+ ext4_pending_cachep = kmem_cache_create("ext4_pending_reservation",
+ sizeof(struct pending_reservation),
+ 0, (SLAB_RECLAIM_ACCOUNT), NULL);
+ if (ext4_pending_cachep == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void ext4_exit_pending(void)
+{
+ kmem_cache_destroy(ext4_pending_cachep);
+}
+
+void ext4_init_pending_tree(struct ext4_pending_tree *tree)
+{
+ tree->root = RB_ROOT;
+}
+
+/*
+ * __get_pending - retrieve a pointer to a pending reservation
+ *
+ * @inode - file containing the pending cluster reservation
+ * @lclu - logical cluster of interest
+ *
+ * Returns a pointer to a pending reservation if it's a member of
+ * the set, and NULL if not. Must be called holding i_es_lock.
+ */
+static struct pending_reservation *__get_pending(struct inode *inode,
+ ext4_lblk_t lclu)
+{
+ struct ext4_pending_tree *tree;
+ struct rb_node *node;
+ struct pending_reservation *pr = NULL;
+
+ tree = &EXT4_I(inode)->i_pending_tree;
+ node = (&tree->root)->rb_node;
+
+ while (node) {
+ pr = rb_entry(node, struct pending_reservation, rb_node);
+ if (lclu < pr->lclu)
+ node = node->rb_left;
+ else if (lclu > pr->lclu)
+ node = node->rb_right;
+ else if (lclu == pr->lclu)
+ return pr;
+ }
+ return NULL;
+}
+
+/*
+ * __insert_pending - adds a pending cluster reservation to the set of
+ * pending reservations
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in the cluster to be added
+ *
+ * Returns 0 on successful insertion and -ENOMEM on failure. If the
+ * pending reservation is already in the set, returns successfully.
+ */
+static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
+ struct rb_node **p = &tree->root.rb_node;
+ struct rb_node *parent = NULL;
+ struct pending_reservation *pr;
+ ext4_lblk_t lclu;
+ int ret = 0;
+
+ lclu = EXT4_B2C(sbi, lblk);
+ /* search to find parent for insertion */
+ while (*p) {
+ parent = *p;
+ pr = rb_entry(parent, struct pending_reservation, rb_node);
+
+ if (lclu < pr->lclu) {
+ p = &(*p)->rb_left;
+ } else if (lclu > pr->lclu) {
+ p = &(*p)->rb_right;
+ } else {
+ /* pending reservation already inserted */
+ goto out;
+ }
+ }
+
+ pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
+ if (pr == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pr->lclu = lclu;
+
+ rb_link_node(&pr->rb_node, parent, p);
+ rb_insert_color(&pr->rb_node, &tree->root);
+
+out:
+ return ret;
+}
+
+/*
+ * __remove_pending - removes a pending cluster reservation from the set
+ * of pending reservations
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in the pending cluster reservation to be removed
+ *
+ * Returns successfully if pending reservation is not a member of the set.
+ */
+static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct pending_reservation *pr;
+ struct ext4_pending_tree *tree;
+
+ pr = __get_pending(inode, EXT4_B2C(sbi, lblk));
+ if (pr != NULL) {
+ tree = &EXT4_I(inode)->i_pending_tree;
+ rb_erase(&pr->rb_node, &tree->root);
+ kmem_cache_free(ext4_pending_cachep, pr);
+ }
+}
+
+/*
+ * ext4_remove_pending - removes a pending cluster reservation from the set
+ * of pending reservations
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in the pending cluster reservation to be removed
+ *
+ * Locking for external use of __remove_pending.
+ */
+void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+
+ write_lock(&ei->i_es_lock);
+ __remove_pending(inode, lblk);
+ write_unlock(&ei->i_es_lock);
+}
+
+/*
+ * ext4_is_pending - determine whether a cluster has a pending reservation
+ * on it
+ *
+ * @inode - file containing the cluster
+ * @lblk - logical block in the cluster
+ *
+ * Returns true if there's a pending reservation for the cluster in the
+ * set of pending reservations, and false if not.
+ */
+bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ bool ret;
+
+ read_lock(&ei->i_es_lock);
+ ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL);
+ read_unlock(&ei->i_es_lock);
+
+ return ret;
+}
+
+/*
+ * ext4_es_insert_delayed_block - adds a delayed block to the extents status
+ * tree, adding a pending reservation where
+ * needed
+ *
+ * @inode - file containing the newly added block
+ * @lblk - logical block to be added
+ * @allocated - indicates whether a physical cluster has been allocated for
+ * the logical cluster that contains the block
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ bool allocated)
+{
+ struct extent_status newes;
+ int err = 0;
+
+ es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
+ lblk, inode->i_ino);
+
+ newes.es_lblk = lblk;
+ newes.es_len = 1;
+ ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED);
+ trace_ext4_es_insert_delayed_block(inode, &newes, allocated);
+
+ ext4_es_insert_extent_check(inode, &newes);
+
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ err = __es_remove_extent(inode, lblk, lblk);
+ if (err != 0)
+ goto error;
+retry:
+ err = __es_insert_extent(inode, &newes);
+ if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
+ 128, EXT4_I(inode)))
+ goto retry;
+ if (err != 0)
+ goto error;
+
+ if (allocated)
+ __insert_pending(inode, lblk);
+
+error:
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+
+ ext4_es_print_tree(inode);
+ ext4_print_pending_tree(inode);
+
+ return err;
+}
+
+/*
+ * __es_delayed_clu - count number of clusters containing blocks that
+ * are delayed only
+ *
+ * @inode - file containing block range
+ * @start - logical block defining start of range
+ * @end - logical block defining end of range
+ *
+ * Returns the number of clusters containing only delayed (not delayed
+ * and unwritten) blocks in the range specified by @start and @end. Any
+ * cluster or part of a cluster within the range and containing a delayed
+ * and not unwritten block within the range is counted as a whole cluster.
+ */
+static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start,
+ ext4_lblk_t end)
+{
+ struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
+ struct extent_status *es;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct rb_node *node;
+ ext4_lblk_t first_lclu, last_lclu;
+ unsigned long long last_counted_lclu;
+ unsigned int n = 0;
+
+ /* guaranteed to be unequal to any ext4_lblk_t value */
+ last_counted_lclu = ~0ULL;
+
+ es = __es_tree_search(&tree->root, start);
+
+ while (es && (es->es_lblk <= end)) {
+ if (ext4_es_is_delonly(es)) {
+ if (es->es_lblk <= start)
+ first_lclu = EXT4_B2C(sbi, start);
+ else
+ first_lclu = EXT4_B2C(sbi, es->es_lblk);
+
+ if (ext4_es_end(es) >= end)
+ last_lclu = EXT4_B2C(sbi, end);
+ else
+ last_lclu = EXT4_B2C(sbi, ext4_es_end(es));
+
+ if (first_lclu == last_counted_lclu)
+ n += last_lclu - first_lclu;
+ else
+ n += last_lclu - first_lclu + 1;
+ last_counted_lclu = last_lclu;
+ }
+ node = rb_next(&es->rb_node);
+ if (!node)
+ break;
+ es = rb_entry(node, struct extent_status, rb_node);
+ }
+
+ return n;
+}
+
+/*
+ * ext4_es_delayed_clu - count number of clusters containing blocks that
+ * are both delayed and unwritten
+ *
+ * @inode - file containing block range
+ * @lblk - logical block defining start of range
+ * @len - number of blocks in range
+ *
+ * Locking for external use of __es_delayed_clu().
+ */
+unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ ext4_lblk_t end;
+ unsigned int n;
+
+ if (len == 0)
+ return 0;
+
+ end = lblk + len - 1;
+ WARN_ON(end < lblk);
+
+ read_lock(&ei->i_es_lock);
+
+ n = __es_delayed_clu(inode, lblk, end);
+
+ read_unlock(&ei->i_es_lock);
+
+ return n;
+}
+
+/*
+ * __revise_pending - makes, cancels, or leaves unchanged pending cluster
+ * reservations for a specified block range depending
+ * upon the presence or absence of delayed blocks
+ * outside the range within clusters at the ends of the
+ * range
+ *
+ * @inode - file containing the range
+ * @lblk - logical block defining the start of range
+ * @len - length of range in blocks
+ *
+ * Used after a newly allocated extent is added to the extents status tree.
+ * Requires that the extents in the range have either written or unwritten
+ * status. Must be called while holding i_es_lock.
+ */
+static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t end = lblk + len - 1;
+ ext4_lblk_t first, last;
+ bool f_del = false, l_del = false;
+
+ if (len == 0)
+ return;
+
+ /*
+ * Two cases - block range within single cluster and block range
+ * spanning two or more clusters. Note that a cluster belonging
+ * to a range starting and/or ending on a cluster boundary is treated
+ * as if it does not contain a delayed extent. The new range may
+ * have allocated space for previously delayed blocks out to the
+ * cluster boundary, requiring that any pre-existing pending
+ * reservation be canceled. Because this code only looks at blocks
+ * outside the range, it should revise pending reservations
+ * correctly even if the extent represented by the range can't be
+ * inserted in the extents status tree due to ENOSPC.
+ */
+
+ if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) {
+ first = EXT4_LBLK_CMASK(sbi, lblk);
+ if (first != lblk)
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ first, lblk - 1);
+ if (f_del) {
+ __insert_pending(inode, first);
+ } else {
+ last = EXT4_LBLK_CMASK(sbi, end) +
+ sbi->s_cluster_ratio - 1;
+ if (last != end)
+ l_del = __es_scan_range(inode,
+ &ext4_es_is_delonly,
+ end + 1, last);
+ if (l_del)
+ __insert_pending(inode, last);
+ else
+ __remove_pending(inode, last);
+ }
+ } else {
+ first = EXT4_LBLK_CMASK(sbi, lblk);
+ if (first != lblk)
+ f_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ first, lblk - 1);
+ if (f_del)
+ __insert_pending(inode, first);
+ else
+ __remove_pending(inode, first);
+
+ last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
+ if (last != end)
+ l_del = __es_scan_range(inode, &ext4_es_is_delonly,
+ end + 1, last);
+ if (l_del)
+ __insert_pending(inode, last);
+ else
+ __remove_pending(inode, last);
+ }
+}
+
+/*
+ * ext4_es_remove_blks - remove block range from extents status tree and
+ * reduce reservation count or cancel pending
+ * reservation as needed
+ *
+ * @inode - file containing range
+ * @lblk - first block in range
+ * @len - number of blocks to remove
+ *
+ */
+void ext4_es_remove_blks(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ unsigned int clu_size, reserved = 0;
+ ext4_lblk_t last_lclu, first, length, remainder, last;
+ bool delonly;
+ int err = 0;
+ struct pending_reservation *pr;
+ struct ext4_pending_tree *tree;
+
+ /*
+ * Process cluster by cluster for bigalloc - there may be up to
+ * two clusters in a 4k page with a 1k block size and two blocks
+ * per cluster. Also necessary for systems with larger page sizes
+ * and potentially larger block sizes.
+ */
+ clu_size = sbi->s_cluster_ratio;
+ last_lclu = EXT4_B2C(sbi, lblk + len - 1);
+
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ for (first = lblk, remainder = len;
+ remainder > 0;
+ first += length, remainder -= length) {
+
+ if (EXT4_B2C(sbi, first) == last_lclu)
+ length = remainder;
+ else
+ length = clu_size - EXT4_LBLK_COFF(sbi, first);
+
+ /*
+ * The BH_Delay flag, which triggers calls to this function,
+ * and the contents of the extents status tree can be
+ * inconsistent due to writepages activity. So, note whether
+ * the blocks to be removed actually belong to an extent with
+ * delayed only status.
+ */
+ delonly = __es_scan_clu(inode, &ext4_es_is_delonly, first);
+
+ /*
+ * because of the writepages effect, written and unwritten
+ * blocks could be removed here
+ */
+ last = first + length - 1;
+ err = __es_remove_extent(inode, first, last);
+ if (err)
+ ext4_warning(inode->i_sb,
+ "%s: couldn't remove page (err = %d)",
+ __func__, err);
+
+ /* non-bigalloc case: simply count the cluster for release */
+ if (sbi->s_cluster_ratio == 1 && delonly) {
+ reserved++;
+ continue;
+ }
+
+ /*
+ * bigalloc case: if all delayed allocated only blocks have
+ * just been removed from a cluster, either cancel a pending
+ * reservation if it exists or count a cluster for release
+ */
+ if (delonly &&
+ !__es_scan_clu(inode, &ext4_es_is_delonly, first)) {
+ pr = __get_pending(inode, EXT4_B2C(sbi, first));
+ if (pr != NULL) {
+ tree = &EXT4_I(inode)->i_pending_tree;
+ rb_erase(&pr->rb_node, &tree->root);
+ kmem_cache_free(ext4_pending_cachep, pr);
+ } else {
+ reserved++;
+ }
+ }
+ }
+
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+
+ ext4_da_release_space(inode, reserved);
+}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index 8efdeb903d6b..131a8b7df265 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -78,6 +78,51 @@ struct ext4_es_stats {
struct percpu_counter es_stats_shk_cnt;
};
+/*
+ * Pending cluster reservations for bigalloc file systems
+ *
+ * A cluster with a pending reservation is a logical cluster shared by at
+ * least one extent in the extents status tree with delayed and unwritten
+ * status and at least one other written or unwritten extent. The
+ * reservation is said to be pending because a cluster reservation would
+ * have to be taken in the event all blocks in the cluster shared with
+ * written or unwritten extents were deleted while the delayed and
+ * unwritten blocks remained.
+ *
+ * The set of pending cluster reservations is an auxiliary data structure
+ * used with the extents status tree to implement reserved cluster/block
+ * accounting for bigalloc file systems. The set is kept in memory and
+ * records all pending cluster reservations.
+ *
+ * Its primary function is to avoid the need to read extents from the
+ * disk when invalidating pages as a result of a truncate, punch hole, or
+ * collapse range operation. Page invalidation requires a decrease in the
+ * reserved cluster count if it results in the removal of all delayed
+ * and unwritten extents (blocks) from a cluster that is not shared with a
+ * written or unwritten extent, and no decrease otherwise. Determining
+ * whether the cluster is shared can be done by searching for a pending
+ * reservation on it.
+ *
+ * Secondarily, it provides a potentially faster method for determining
+ * whether the reserved cluster count should be increased when a physical
+ * cluster is deallocated as a result of a truncate, punch hole, or
+ * collapse range operation. The necessary information is also present
+ * in the extents status tree, but might be more rapidly accessed in
+ * the pending reservation set in many cases due to smaller size.
+ *
+ * The pending cluster reservation set is implemented as a red-black tree
+ * with the goal of minimizing per page search time overhead.
+ */
+
+struct pending_reservation {
+ struct rb_node rb_node;
+ ext4_lblk_t lclu;
+};
+
+struct ext4_pending_tree {
+ struct rb_root root;
+};
+
extern int __init ext4_init_es(void);
extern void ext4_exit_es(void);
extern void ext4_es_init_tree(struct ext4_es_tree *tree);
@@ -90,11 +135,18 @@ extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
unsigned int status);
extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len);
-extern void ext4_es_find_delayed_extent_range(struct inode *inode,
- ext4_lblk_t lblk, ext4_lblk_t end,
- struct extent_status *es);
+extern void ext4_es_find_extent_range(struct inode *inode,
+ int (*match_fn)(struct extent_status *es),
+ ext4_lblk_t lblk, ext4_lblk_t end,
+ struct extent_status *es);
extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status *es);
+extern bool ext4_es_scan_range(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk, ext4_lblk_t end);
+extern bool ext4_es_scan_clu(struct inode *inode,
+ int (*matching_fn)(struct extent_status *es),
+ ext4_lblk_t lblk);
static inline unsigned int ext4_es_status(struct extent_status *es)
{
@@ -126,6 +178,16 @@ static inline int ext4_es_is_hole(struct extent_status *es)
return (ext4_es_type(es) & EXTENT_STATUS_HOLE) != 0;
}
+static inline int ext4_es_is_mapped(struct extent_status *es)
+{
+ return (ext4_es_is_written(es) || ext4_es_is_unwritten(es));
+}
+
+static inline int ext4_es_is_delonly(struct extent_status *es)
+{
+ return (ext4_es_is_delayed(es) && !ext4_es_is_unwritten(es));
+}
+
static inline void ext4_es_set_referenced(struct extent_status *es)
{
es->es_pblk |= ((ext4_fsblk_t)EXTENT_STATUS_REFERENCED) << ES_SHIFT;
@@ -175,4 +237,16 @@ extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
extern int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v);
+extern int __init ext4_init_pending(void);
+extern void ext4_exit_pending(void);
+extern void ext4_init_pending_tree(struct ext4_pending_tree *tree);
+extern void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk);
+extern bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk);
+extern int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
+ bool allocated);
+extern unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len);
+extern void ext4_es_remove_blks(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len);
+
#endif /* _EXT4_EXTENTS_STATUS_H */
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 3543fe80a3c4..9c4bac18cc6c 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -863,7 +863,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
handle_t *handle;
struct page *page;
struct ext4_iloc iloc;
- int retries;
+ int retries = 0;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
@@ -1753,6 +1753,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
{
int err, inline_size;
struct ext4_iloc iloc;
+ size_t inline_len;
void *inline_pos;
unsigned int offset;
struct ext4_dir_entry_2 *de;
@@ -1780,8 +1781,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
goto out;
}
+ inline_len = ext4_get_inline_size(dir);
offset = EXT4_INLINE_DOTDOT_SIZE;
- while (offset < dir->i_size) {
+ while (offset < inline_len) {
de = ext4_get_inline_entry(dir, &iloc, offset,
&inline_pos, &inline_size);
if (ext4_check_dir_entry(dir, NULL, de,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index d0dd585add6a..c3d9a42c561e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -577,8 +577,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
- ext4_find_delalloc_range(inode, map->m_lblk,
- map->m_lblk + map->m_len - 1))
+ ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
+ map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk,
map->m_len, map->m_pblk, status);
@@ -701,8 +701,8 @@ found:
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
- ext4_find_delalloc_range(inode, map->m_lblk,
- map->m_lblk + map->m_len - 1))
+ ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
+ map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
@@ -1595,7 +1595,7 @@ static int ext4_da_reserve_space(struct inode *inode)
return 0; /* success */
}
-static void ext4_da_release_space(struct inode *inode, int to_free)
+void ext4_da_release_space(struct inode *inode, int to_free)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
@@ -1634,13 +1634,11 @@ static void ext4_da_page_release_reservation(struct page *page,
unsigned int offset,
unsigned int length)
{
- int to_release = 0, contiguous_blks = 0;
+ int contiguous_blks = 0;
struct buffer_head *head, *bh;
unsigned int curr_off = 0;
struct inode *inode = page->mapping->host;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned int stop = offset + length;
- int num_clusters;
ext4_fsblk_t lblk;
BUG_ON(stop > PAGE_SIZE || stop < length);
@@ -1654,7 +1652,6 @@ static void ext4_da_page_release_reservation(struct page *page,
break;
if ((offset <= curr_off) && (buffer_delay(bh))) {
- to_release++;
contiguous_blks++;
clear_buffer_delay(bh);
} else if (contiguous_blks) {
@@ -1662,7 +1659,7 @@ static void ext4_da_page_release_reservation(struct page *page,
(PAGE_SHIFT - inode->i_blkbits);
lblk += (curr_off >> inode->i_blkbits) -
contiguous_blks;
- ext4_es_remove_extent(inode, lblk, contiguous_blks);
+ ext4_es_remove_blks(inode, lblk, contiguous_blks);
contiguous_blks = 0;
}
curr_off = next_off;
@@ -1671,21 +1668,9 @@ static void ext4_da_page_release_reservation(struct page *page,
if (contiguous_blks) {
lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
- ext4_es_remove_extent(inode, lblk, contiguous_blks);
+ ext4_es_remove_blks(inode, lblk, contiguous_blks);
}
- /* If we have released all the blocks belonging to a cluster, then we
- * need to release the reserved space for that cluster. */
- num_clusters = EXT4_NUM_B2C(sbi, to_release);
- while (num_clusters > 0) {
- lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
- ((num_clusters - 1) << sbi->s_cluster_bits);
- if (sbi->s_cluster_ratio == 1 ||
- !ext4_find_delalloc_cluster(inode, lblk))
- ext4_da_release_space(inode, 1);
-
- num_clusters--;
- }
}
/*
@@ -1781,6 +1766,65 @@ static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
}
/*
+ * ext4_insert_delayed_block - adds a delayed block to the extents status
+ * tree, incrementing the reserved cluster/block
+ * count or making a pending reservation
+ * where needed
+ *
+ * @inode - file containing the newly added block
+ * @lblk - logical block to be added
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ int ret;
+ bool allocated = false;
+
+ /*
+ * If the cluster containing lblk is shared with a delayed,
+ * written, or unwritten extent in a bigalloc file system, it's
+ * already been accounted for and does not need to be reserved.
+ * A pending reservation must be made for the cluster if it's
+ * shared with a written or unwritten extent and doesn't already
+ * have one. Written and unwritten extents can be purged from the
+ * extents status tree if the system is under memory pressure, so
+ * it's necessary to examine the extent tree if a search of the
+ * extents status tree doesn't get a match.
+ */
+ if (sbi->s_cluster_ratio == 1) {
+ ret = ext4_da_reserve_space(inode);
+ if (ret != 0) /* ENOSPC */
+ goto errout;
+ } else { /* bigalloc */
+ if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
+ if (!ext4_es_scan_clu(inode,
+ &ext4_es_is_mapped, lblk)) {
+ ret = ext4_clu_mapped(inode,
+ EXT4_B2C(sbi, lblk));
+ if (ret < 0)
+ goto errout;
+ if (ret == 0) {
+ ret = ext4_da_reserve_space(inode);
+ if (ret != 0) /* ENOSPC */
+ goto errout;
+ } else {
+ allocated = true;
+ }
+ } else {
+ allocated = true;
+ }
+ }
+ }
+
+ ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
+
+errout:
+ return ret;
+}
+
+/*
* This function is grabs code from the very beginning of
* ext4_map_blocks, but assumes that the caller is from delayed write
* time. This function looks up the requested blocks and sets the
@@ -1859,28 +1903,14 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
add_delayed:
if (retval == 0) {
int ret;
+
/*
* XXX: __block_prepare_write() unmaps passed block,
* is it OK?
*/
- /*
- * If the block was allocated from previously allocated cluster,
- * then we don't need to reserve it again. However we still need
- * to reserve metadata for every block we're going to write.
- */
- if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 ||
- !ext4_find_delalloc_cluster(inode, map->m_lblk)) {
- ret = ext4_da_reserve_space(inode);
- if (ret) {
- /* not enough space to reserve */
- retval = ret;
- goto out_unlock;
- }
- }
- ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
- ~0, EXTENT_STATUS_DELAYED);
- if (ret) {
+ ret = ext4_insert_delayed_block(inode, map->m_lblk);
+ if (ret != 0) {
retval = ret;
goto out_unlock;
}
@@ -3413,12 +3443,16 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned int blkbits = inode->i_blkbits;
- unsigned long first_block = offset >> blkbits;
- unsigned long last_block = (offset + length - 1) >> blkbits;
+ unsigned long first_block, last_block;
struct ext4_map_blocks map;
bool delalloc = false;
int ret;
+ if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
+ return -EINVAL;
+ first_block = offset >> blkbits;
+ last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
+ EXT4_MAX_LOGICAL_BLOCK);
if (flags & IOMAP_REPORT) {
if (ext4_has_inline_data(inode)) {
@@ -3446,7 +3480,8 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
ext4_lblk_t end = map.m_lblk + map.m_len - 1;
struct extent_status es;
- ext4_es_find_delayed_extent_range(inode, map.m_lblk, end, &es);
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
+ map.m_lblk, end, &es);
if (!es.es_len || es.es_lblk > end) {
/* entire range is a hole */
@@ -3948,6 +3983,7 @@ static const struct address_space_operations ext4_dax_aops = {
.writepages = ext4_dax_writepages,
.direct_IO = noop_direct_IO,
.set_page_dirty = noop_set_page_dirty,
+ .bmap = ext4_bmap,
.invalidatepage = noop_invalidatepage,
};
@@ -4192,9 +4228,8 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
return 0;
}
-static void ext4_wait_dax_page(struct ext4_inode_info *ei, bool *did_unlock)
+static void ext4_wait_dax_page(struct ext4_inode_info *ei)
{
- *did_unlock = true;
up_write(&ei->i_mmap_sem);
schedule();
down_write(&ei->i_mmap_sem);
@@ -4204,14 +4239,12 @@ int ext4_break_layouts(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct page *page;
- bool retry;
int error;
if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
return -EINVAL;
do {
- retry = false;
page = dax_layout_busy_page(inode->i_mapping);
if (!page)
return 0;
@@ -4219,8 +4252,8 @@ int ext4_break_layouts(struct inode *inode)
error = ___wait_var_event(&page->_refcount,
atomic_read(&page->_refcount) == 1,
TASK_INTERRUPTIBLE, 0, 0,
- ext4_wait_dax_page(ei, &retry));
- } while (error == 0 && retry);
+ ext4_wait_dax_page(ei));
+ } while (error == 0);
return error;
}
@@ -4895,6 +4928,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
* not initialized on a new filesystem. */
}
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+ ext4_set_inode_flags(inode);
inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
if (ext4_has_feature_64bit(sb))
@@ -5041,7 +5075,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
goto bad_inode;
}
brelse(iloc.bh);
- ext4_set_inode_flags(inode);
unlock_new_inode(inode);
return inode;
@@ -6151,13 +6184,14 @@ static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
return !buffer_mapped(bh);
}
-int ext4_page_mkwrite(struct vm_fault *vmf)
+vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page = vmf->page;
loff_t size;
unsigned long len;
- int ret;
+ int err;
+ vm_fault_t ret;
struct file *file = vma->vm_file;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
@@ -6170,8 +6204,8 @@ int ext4_page_mkwrite(struct vm_fault *vmf)
down_read(&EXT4_I(inode)->i_mmap_sem);
- ret = ext4_convert_inline_data(inode);
- if (ret)
+ err = ext4_convert_inline_data(inode);
+ if (err)
goto out_ret;
/* Delalloc case is easy... */
@@ -6179,9 +6213,9 @@ int ext4_page_mkwrite(struct vm_fault *vmf)
!ext4_should_journal_data(inode) &&
!ext4_nonda_switch(inode->i_sb)) {
do {
- ret = block_page_mkwrite(vma, vmf,
+ err = block_page_mkwrite(vma, vmf,
ext4_da_get_block_prep);
- } while (ret == -ENOSPC &&
+ } while (err == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries));
goto out_ret;
}
@@ -6226,8 +6260,8 @@ retry_alloc:
ret = VM_FAULT_SIGBUS;
goto out;
}
- ret = block_page_mkwrite(vma, vmf, get_block);
- if (!ret && ext4_should_journal_data(inode)) {
+ err = block_page_mkwrite(vma, vmf, get_block);
+ if (!err && ext4_should_journal_data(inode)) {
if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
PAGE_SIZE, NULL, do_journal_get_write_access)) {
unlock_page(page);
@@ -6238,24 +6272,24 @@ retry_alloc:
ext4_set_inode_state(inode, EXT4_STATE_JDATA);
}
ext4_journal_stop(handle);
- if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_alloc;
out_ret:
- ret = block_page_mkwrite_return(ret);
+ ret = block_page_mkwrite_return(err);
out:
up_read(&EXT4_I(inode)->i_mmap_sem);
sb_end_pagefault(inode->i_sb);
return ret;
}
-int ext4_filemap_fault(struct vm_fault *vmf)
+vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
- int err;
+ vm_fault_t ret;
down_read(&EXT4_I(inode)->i_mmap_sem);
- err = filemap_fault(vmf);
+ ret = filemap_fault(vmf);
up_read(&EXT4_I(inode)->i_mmap_sem);
- return err;
+ return ret;
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index a7074115d6f6..0edee31913d1 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -67,7 +67,6 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
ei1 = EXT4_I(inode1);
ei2 = EXT4_I(inode2);
- swap(inode1->i_flags, inode2->i_flags);
swap(inode1->i_version, inode2->i_version);
swap(inode1->i_blocks, inode2->i_blocks);
swap(inode1->i_bytes, inode2->i_bytes);
@@ -85,6 +84,21 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
i_size_write(inode2, isize);
}
+static void reset_inode_seed(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ __le32 inum = cpu_to_le32(inode->i_ino);
+ __le32 gen = cpu_to_le32(inode->i_generation);
+ __u32 csum;
+
+ if (!ext4_has_metadata_csum(inode->i_sb))
+ return;
+
+ csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum));
+ ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, sizeof(gen));
+}
+
/**
* Swap the information from the given @inode and the inode
* EXT4_BOOT_LOADER_INO. It will basically swap i_data and all other
@@ -102,10 +116,13 @@ static long swap_inode_boot_loader(struct super_block *sb,
struct inode *inode_bl;
struct ext4_inode_info *ei_bl;
- if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode))
+ if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
+ IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
+ ext4_has_inline_data(inode))
return -EINVAL;
- if (!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
+ if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
+ !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
return -EPERM;
inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
@@ -120,13 +137,13 @@ static long swap_inode_boot_loader(struct super_block *sb,
* that only 1 swap_inode_boot_loader is running. */
lock_two_nondirectories(inode, inode_bl);
- truncate_inode_pages(&inode->i_data, 0);
- truncate_inode_pages(&inode_bl->i_data, 0);
-
/* Wait for all existing dio workers */
inode_dio_wait(inode);
inode_dio_wait(inode_bl);
+ truncate_inode_pages(&inode->i_data, 0);
+ truncate_inode_pages(&inode_bl->i_data, 0);
+
handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
if (IS_ERR(handle)) {
err = -EINVAL;
@@ -159,6 +176,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
inode->i_generation = prandom_u32();
inode_bl->i_generation = prandom_u32();
+ reset_inode_seed(inode);
+ reset_inode_seed(inode_bl);
ext4_discard_preallocations(inode);
@@ -169,6 +188,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
inode->i_ino, err);
/* Revert all changes: */
swap_inode_data(inode, inode_bl);
+ ext4_mark_inode_dirty(handle, inode);
} else {
err = ext4_mark_inode_dirty(handle, inode_bl);
if (err < 0) {
@@ -178,6 +198,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
/* Revert all changes: */
swap_inode_data(inode, inode_bl);
ext4_mark_inode_dirty(handle, inode);
+ ext4_mark_inode_dirty(handle, inode_bl);
}
}
ext4_journal_stop(handle);
@@ -339,19 +360,14 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
if (projid_eq(kprojid, EXT4_I(inode)->i_projid))
return 0;
- err = mnt_want_write_file(filp);
- if (err)
- return err;
-
err = -EPERM;
- inode_lock(inode);
/* Is it quota file? Do not allow user to mess with it */
if (ext4_is_quota_file(inode))
- goto out_unlock;
+ return err;
err = ext4_get_inode_loc(inode, &iloc);
if (err)
- goto out_unlock;
+ return err;
raw_inode = ext4_raw_inode(&iloc);
if (!EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) {
@@ -359,20 +375,20 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
EXT4_SB(sb)->s_want_extra_isize,
&iloc);
if (err)
- goto out_unlock;
+ return err;
} else {
brelse(iloc.bh);
}
- dquot_initialize(inode);
+ err = dquot_initialize(inode);
+ if (err)
+ return err;
handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
EXT4_QUOTA_INIT_BLOCKS(sb) +
EXT4_QUOTA_DEL_BLOCKS(sb) + 3);
- if (IS_ERR(handle)) {
- err = PTR_ERR(handle);
- goto out_unlock;
- }
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
@@ -400,9 +416,6 @@ out_dirty:
err = rc;
out_stop:
ext4_journal_stop(handle);
-out_unlock:
- inode_unlock(inode);
- mnt_drop_write_file(filp);
return err;
}
#else
@@ -626,6 +639,30 @@ group_add_out:
return err;
}
+static int ext4_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+{
+ /*
+ * Project Quota ID state is only allowed to change from within the init
+ * namespace. Enforce that restriction only if we are trying to change
+ * the quota ID state. Everything else is allowed in user namespaces.
+ */
+ if (current_user_ns() == &init_user_ns)
+ return 0;
+
+ if (__kprojid_val(EXT4_I(inode)->i_projid) != fa->fsx_projid)
+ return -EINVAL;
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_PROJINHERIT)) {
+ if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
+ return -EINVAL;
+ } else {
+ if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -1025,19 +1062,19 @@ resizefs_out:
return err;
inode_lock(inode);
+ err = ext4_ioctl_check_project(inode, &fa);
+ if (err)
+ goto out;
flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) |
(flags & EXT4_FL_XFLAG_VISIBLE);
err = ext4_ioctl_setflags(inode, flags);
- inode_unlock(inode);
- mnt_drop_write_file(filp);
if (err)
- return err;
-
+ goto out;
err = ext4_ioctl_setproject(filp, fa.fsx_projid);
- if (err)
- return err;
-
- return 0;
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return err;
}
case EXT4_IOC_SHUTDOWN:
return ext4_shutdown(sb, arg);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index e29fce2fbf25..e2248083cdca 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4915,9 +4915,17 @@ do_more:
&sbi->s_flex_groups[flex_group].free_clusters);
}
- if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
- dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
- percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
+ /*
+ * on a bigalloc file system, defer the s_freeclusters_counter
+ * update to the caller (ext4_remove_space and friends) so they
+ * can determine if a cluster freed here should be rereserved
+ */
+ if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
+ if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
+ dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
+ percpu_counter_add(&sbi->s_freeclusters_counter,
+ count_clusters);
+ }
ext4_mb_unload_buddy(&e4b);
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 39b07c2d3384..2305b4374fd3 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -49,7 +49,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
*/
sb_start_write(sb);
ext4_mmp_csum_set(sb, mmp);
- mark_buffer_dirty(bh);
lock_buffer(bh);
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index a409ff70d67b..2f5be02fc6f6 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -516,9 +516,13 @@ mext_check_arguments(struct inode *orig_inode,
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
- if (orig_eof < orig_start + *len - 1)
+ if (orig_eof <= orig_start)
+ *len = 0;
+ else if (orig_eof < orig_start + *len - 1)
*len = orig_eof - orig_start;
- if (donor_eof < donor_start + *len - 1)
+ if (donor_eof <= donor_start)
+ *len = 0;
+ else if (donor_eof < donor_start + *len - 1)
*len = donor_eof - donor_start;
if (!*len) {
ext4_debug("ext4 move extent: len should not be 0 "
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 116ff68c5bd4..67a38532032a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2261,7 +2261,7 @@ again:
dxroot->info.indirect_levels += 1;
dxtrace(printk(KERN_DEBUG
"Creating %d level index...\n",
- info->indirect_levels));
+ dxroot->info.indirect_levels));
err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
if (err)
goto journal_error;
@@ -3478,6 +3478,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
int credits;
u8 old_file_type;
+ if (new.inode && new.inode->i_nlink == 0) {
+ EXT4_ERROR_INODE(new.inode,
+ "target of rename is already freed");
+ return -EFSCORRUPTED;
+ }
+
if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) &&
(!projid_eq(EXT4_I(new_dir)->i_projid,
EXT4_I(old_dentry->d_inode)->i_projid)))
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index db7590178dfc..2aa62d58d8dd 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -374,13 +374,13 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
if (!bio)
return -ENOMEM;
- wbc_init_bio(io->io_wbc, bio);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end);
io->io_bio = bio;
io->io_next_block = bh->b_blocknr;
+ wbc_init_bio(io->io_wbc, bio);
return 0;
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index e5fb38451a73..ebbc663d0798 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -19,6 +19,7 @@
int ext4_resize_begin(struct super_block *sb)
{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
int ret = 0;
if (!capable(CAP_SYS_RESOURCE))
@@ -29,7 +30,7 @@ int ext4_resize_begin(struct super_block *sb)
* because the user tools have no way of handling this. Probably a
* bad time to do it anyways.
*/
- if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+ if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
ext4_warning(sb, "won't resize using backup superblock at %llu",
(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
@@ -1986,6 +1987,26 @@ retry:
}
}
+ /*
+ * Make sure the last group has enough space so that it's
+ * guaranteed to have enough space for all metadata blocks
+ * that it might need to hold. (We might not need to store
+ * the inode table blocks in the last block group, but there
+ * will be cases where this might be needed.)
+ */
+ if ((ext4_group_first_block_no(sb, n_group) +
+ ext4_group_overhead_blocks(sb, n_group) + 2 +
+ sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
+ n_blocks_count = ext4_group_first_block_no(sb, n_group);
+ n_group--;
+ n_blocks_count_retry = 0;
+ if (resize_inode) {
+ iput(resize_inode);
+ resize_inode = NULL;
+ }
+ goto retry;
+ }
+
/* extend the last group */
if (n_group == o_group)
add = n_blocks_count - o_blocks_count;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 5863fd22e90b..a221f1cdf704 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -914,6 +914,18 @@ static inline void ext4_quota_off_umount(struct super_block *sb)
for (type = 0; type < EXT4_MAXQUOTAS; type++)
ext4_quota_off(sb, type);
}
+
+/*
+ * This is a helper function which is used in the mount/remount
+ * codepaths (which holds s_umount) to fetch the quota file name.
+ */
+static inline char *get_qf_name(struct super_block *sb,
+ struct ext4_sb_info *sbi,
+ int type)
+{
+ return rcu_dereference_protected(sbi->s_qf_names[type],
+ lockdep_is_held(&sb->s_umount));
+}
#else
static inline void ext4_quota_off_umount(struct super_block *sb)
{
@@ -965,7 +977,7 @@ static void ext4_put_super(struct super_block *sb)
percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
#ifdef CONFIG_QUOTA
for (i = 0; i < EXT4_MAXQUOTAS; i++)
- kfree(sbi->s_qf_names[i]);
+ kfree(get_qf_name(sb, sbi, i));
#endif
/* Debugging code just in case the in-memory inode orphan list
@@ -1040,6 +1052,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_da_metadata_calc_len = 0;
ei->i_da_metadata_calc_last_lblock = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
+ ext4_init_pending_tree(&ei->i_pending_tree);
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
@@ -1530,11 +1543,10 @@ static const char deprecated_msg[] =
static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- char *qname;
+ char *qname, *old_qname = get_qf_name(sb, sbi, qtype);
int ret = -1;
- if (sb_any_quota_loaded(sb) &&
- !sbi->s_qf_names[qtype]) {
+ if (sb_any_quota_loaded(sb) && !old_qname) {
ext4_msg(sb, KERN_ERR,
"Cannot change journaled "
"quota options when quota turned on");
@@ -1551,8 +1563,8 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
"Not enough memory for storing quotafile name");
return -1;
}
- if (sbi->s_qf_names[qtype]) {
- if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
+ if (old_qname) {
+ if (strcmp(old_qname, qname) == 0)
ret = 1;
else
ext4_msg(sb, KERN_ERR,
@@ -1565,7 +1577,7 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
"quotafile must be on filesystem root");
goto errout;
}
- sbi->s_qf_names[qtype] = qname;
+ rcu_assign_pointer(sbi->s_qf_names[qtype], qname);
set_opt(sb, QUOTA);
return 1;
errout:
@@ -1577,15 +1589,16 @@ static int clear_qf_name(struct super_block *sb, int qtype)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ char *old_qname = get_qf_name(sb, sbi, qtype);
- if (sb_any_quota_loaded(sb) &&
- sbi->s_qf_names[qtype]) {
+ if (sb_any_quota_loaded(sb) && old_qname) {
ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
" when quota turned on");
return -1;
}
- kfree(sbi->s_qf_names[qtype]);
- sbi->s_qf_names[qtype] = NULL;
+ rcu_assign_pointer(sbi->s_qf_names[qtype], NULL);
+ synchronize_rcu();
+ kfree(old_qname);
return 1;
}
#endif
@@ -1960,7 +1973,7 @@ static int parse_options(char *options, struct super_block *sb,
int is_remount)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- char *p;
+ char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
substring_t args[MAX_OPT_ARGS];
int token;
@@ -1991,11 +2004,13 @@ static int parse_options(char *options, struct super_block *sb,
"Cannot enable project quota enforcement.");
return 0;
}
- if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
- if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
+ usr_qf_name = get_qf_name(sb, sbi, USRQUOTA);
+ grp_qf_name = get_qf_name(sb, sbi, GRPQUOTA);
+ if (usr_qf_name || grp_qf_name) {
+ if (test_opt(sb, USRQUOTA) && usr_qf_name)
clear_opt(sb, USRQUOTA);
- if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
+ if (test_opt(sb, GRPQUOTA) && grp_qf_name)
clear_opt(sb, GRPQUOTA);
if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
@@ -2029,6 +2044,7 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
{
#if defined(CONFIG_QUOTA)
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ char *usr_qf_name, *grp_qf_name;
if (sbi->s_jquota_fmt) {
char *fmtname = "";
@@ -2047,11 +2063,14 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
seq_printf(seq, ",jqfmt=%s", fmtname);
}
- if (sbi->s_qf_names[USRQUOTA])
- seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
-
- if (sbi->s_qf_names[GRPQUOTA])
- seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
+ rcu_read_lock();
+ usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
+ grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
+ if (usr_qf_name)
+ seq_show_option(seq, "usrjquota", usr_qf_name);
+ if (grp_qf_name)
+ seq_show_option(seq, "grpjquota", grp_qf_name);
+ rcu_read_unlock();
#endif
}
@@ -2145,6 +2164,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
if (test_opt(sb, DATA_ERR_ABORT))
SEQ_OPTS_PUTS("data_err=abort");
+ if (DUMMY_ENCRYPTION_ENABLED(sbi))
+ SEQ_OPTS_PUTS("test_dummy_encryption");
ext4_show_quota_options(seq, sb);
return 0;
@@ -4378,11 +4399,13 @@ no_journal:
block = ext4_count_free_clusters(sb);
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, block));
+ ext4_superblock_csum_set(sb);
err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
GFP_KERNEL);
if (!err) {
unsigned long freei = ext4_count_free_inodes(sb);
sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
+ ext4_superblock_csum_set(sb);
err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
GFP_KERNEL);
}
@@ -5099,6 +5122,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
int err = 0;
#ifdef CONFIG_QUOTA
int i, j;
+ char *to_free[EXT4_MAXQUOTAS];
#endif
char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -5118,8 +5142,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
for (i = 0; i < EXT4_MAXQUOTAS; i++)
if (sbi->s_qf_names[i]) {
- old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
- GFP_KERNEL);
+ char *qf_name = get_qf_name(sb, sbi, i);
+
+ old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
if (!old_opts.s_qf_names[i]) {
for (j = 0; j < i; j++)
kfree(old_opts.s_qf_names[j]);
@@ -5348,9 +5373,12 @@ restore_opts:
#ifdef CONFIG_QUOTA
sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
for (i = 0; i < EXT4_MAXQUOTAS; i++) {
- kfree(sbi->s_qf_names[i]);
- sbi->s_qf_names[i] = old_opts.s_qf_names[i];
+ to_free[i] = get_qf_name(sb, sbi, i);
+ rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
}
+ synchronize_rcu();
+ for (i = 0; i < EXT4_MAXQUOTAS; i++)
+ kfree(to_free[i]);
#endif
kfree(orig_data);
return err;
@@ -5541,7 +5569,7 @@ static int ext4_write_info(struct super_block *sb, int type)
*/
static int ext4_quota_on_mount(struct super_block *sb, int type)
{
- return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
+ return dquot_quota_on_mount(sb, get_qf_name(sb, EXT4_SB(sb), type),
EXT4_SB(sb)->s_jquota_fmt, type);
}
@@ -5950,6 +5978,10 @@ static int __init ext4_init_fs(void)
if (err)
return err;
+ err = ext4_init_pending();
+ if (err)
+ goto out6;
+
err = ext4_init_pageio();
if (err)
goto out5;
@@ -5988,6 +6020,8 @@ out3:
out4:
ext4_exit_pageio();
out5:
+ ext4_exit_pending();
+out6:
ext4_exit_es();
return err;
@@ -6005,6 +6039,7 @@ static void __exit ext4_exit_fs(void)
ext4_exit_system_zone();
ext4_exit_pageio();
ext4_exit_es();
+ ext4_exit_pending();
}
MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 111824199a88..fa707cdd4120 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/acl.c
*
@@ -7,10 +8,6 @@
* Portions of this code from linux/fs/ext2/acl.c
*
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/f2fs_fs.h>
#include "f2fs.h"
@@ -53,6 +50,9 @@ static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size)
struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1);
const char *end = value + size;
+ if (size < sizeof(struct f2fs_acl_header))
+ return ERR_PTR(-EINVAL);
+
if (hdr->a_version != cpu_to_le32(F2FS_ACL_VERSION))
return ERR_PTR(-EINVAL);
@@ -394,12 +394,16 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
error = __f2fs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl,
ipage);
posix_acl_release(default_acl);
+ } else {
+ inode->i_default_acl = NULL;
}
if (acl) {
if (!error)
error = __f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl,
ipage);
posix_acl_release(acl);
+ } else {
+ inode->i_acl = NULL;
}
return error;
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index 2c685185c24d..b96823c59b15 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/acl.h
*
@@ -7,10 +8,6 @@
* Portions of this code from linux/fs/ext2/acl.h
*
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __F2FS_ACL_H__
#define __F2FS_ACL_H__
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index e8b6b89bddb8..9c28ea439e0b 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/checkpoint.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/bio.h>
@@ -122,11 +119,8 @@ retry:
if (PTR_ERR(page) == -EIO &&
++count <= DEFAULT_RETRY_IO_COUNT)
goto retry;
-
f2fs_stop_checkpoint(sbi, false);
- f2fs_bug_on(sbi, 1);
}
-
return page;
}
@@ -282,8 +276,7 @@ static int __f2fs_write_meta_page(struct page *page,
dec_page_count(sbi, F2FS_DIRTY_META);
if (wbc->for_reclaim)
- f2fs_submit_merged_write_cond(sbi, page->mapping->host,
- 0, page->index, META);
+ f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
unlock_page(page);
@@ -696,6 +689,8 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
/* clear Orphan Flag */
clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
out:
+ set_sbi_flag(sbi, SBI_IS_RECOVERED);
+
#ifdef CONFIG_QUOTA
/* Turn quotas off */
if (quota_enabled)
@@ -1084,6 +1079,21 @@ static void __prepare_cp_block(struct f2fs_sb_info *sbi)
ckpt->next_free_nid = cpu_to_le32(last_nid);
}
+static bool __need_flush_quota(struct f2fs_sb_info *sbi)
+{
+ if (!is_journalled_quota(sbi))
+ return false;
+ if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
+ return false;
+ if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
+ return false;
+ if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH))
+ return true;
+ if (get_pages(sbi, F2FS_DIRTY_QDATA))
+ return true;
+ return false;
+}
+
/*
* Freeze all the FS-operations for checkpoint.
*/
@@ -1095,12 +1105,36 @@ static int block_operations(struct f2fs_sb_info *sbi)
.for_reclaim = 0,
};
struct blk_plug plug;
- int err = 0;
+ int err = 0, cnt = 0;
blk_start_plug(&plug);
-retry_flush_dents:
+retry_flush_quotas:
+ if (__need_flush_quota(sbi)) {
+ int locked;
+
+ if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
+ set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
+ f2fs_lock_all(sbi);
+ goto retry_flush_dents;
+ }
+ clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
+
+ /* only failed during mount/umount/freeze/quotactl */
+ locked = down_read_trylock(&sbi->sb->s_umount);
+ f2fs_quota_sync(sbi->sb, -1);
+ if (locked)
+ up_read(&sbi->sb->s_umount);
+ }
+
f2fs_lock_all(sbi);
+ if (__need_flush_quota(sbi)) {
+ f2fs_unlock_all(sbi);
+ cond_resched();
+ goto retry_flush_quotas;
+ }
+
+retry_flush_dents:
/* write all the dirty dentry pages */
if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
f2fs_unlock_all(sbi);
@@ -1108,7 +1142,7 @@ retry_flush_dents:
if (err)
goto out;
cond_resched();
- goto retry_flush_dents;
+ goto retry_flush_quotas;
}
/*
@@ -1117,6 +1151,12 @@ retry_flush_dents:
*/
down_write(&sbi->node_change);
+ if (__need_flush_quota(sbi)) {
+ up_write(&sbi->node_change);
+ f2fs_unlock_all(sbi);
+ goto retry_flush_quotas;
+ }
+
if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
up_write(&sbi->node_change);
f2fs_unlock_all(sbi);
@@ -1124,7 +1164,7 @@ retry_flush_dents:
if (err)
goto out;
cond_resched();
- goto retry_flush_dents;
+ goto retry_flush_quotas;
}
retry_flush_nodes:
@@ -1215,6 +1255,19 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
__set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+ if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
+ __set_ckpt_flags(ckpt, CP_DISABLED_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_DISABLED_FLAG);
+
+ if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
+ __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
+
+ if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
+ __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
+
/* set this flag to activate crc|cp_ver for recovery */
__set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
__clear_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG);
@@ -1422,6 +1475,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
clear_sbi_flag(sbi, SBI_IS_DIRTY);
clear_sbi_flag(sbi, SBI_NEED_CP);
+ clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
+ sbi->unusable_block_count = 0;
__set_cp_next_pack(sbi);
/*
@@ -1446,6 +1501,12 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unsigned long long ckpt_ver;
int err = 0;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ if (cpc->reason != CP_PAUSE)
+ return 0;
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Start checkpoint disabled!");
+ }
mutex_lock(&sbi->cp_mutex);
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
@@ -1497,7 +1558,10 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
/* write cached NAT/SIT entries to NAT/SIT area */
- f2fs_flush_nat_entries(sbi, cpc);
+ err = f2fs_flush_nat_entries(sbi, cpc);
+ if (err)
+ goto stop;
+
f2fs_flush_sit_entries(sbi, cpc);
/* unlock all the fs_lock[] in do_checkpoint() */
@@ -1506,7 +1570,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_release_discard_addrs(sbi);
else
f2fs_clear_prefree_segments(sbi, cpc);
-
+stop:
unblock_operations(sbi);
stat_inc_cp_count(sbi->stat_info);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 382c1ef9a9e4..106f116466bf 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/data.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -49,12 +46,29 @@ static bool __is_cp_guaranteed(struct page *page)
inode->i_ino == F2FS_NODE_INO(sbi) ||
S_ISDIR(inode->i_mode) ||
(S_ISREG(inode->i_mode) &&
- is_inode_flag_set(inode, FI_ATOMIC_FILE)) ||
+ (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
is_cold_data(page))
return true;
return false;
}
+static enum count_type __read_io_type(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+
+ if (mapping) {
+ struct inode *inode = mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (inode->i_ino == F2FS_META_INO(sbi))
+ return F2FS_RD_META;
+
+ if (inode->i_ino == F2FS_NODE_INO(sbi))
+ return F2FS_RD_NODE;
+ }
+ return F2FS_RD_DATA;
+}
+
/* postprocessing steps for read bios */
enum bio_post_read_step {
STEP_INITIAL = 0,
@@ -80,10 +94,12 @@ static void __read_end_io(struct bio *bio)
/* PG_error was set if any post_read step failed */
if (bio->bi_status || PageError(page)) {
ClearPageUptodate(page);
- SetPageError(page);
+ /* will re-read again later */
+ ClearPageError(page);
} else {
SetPageUptodate(page);
}
+ dec_page_count(F2FS_P_SB(page), __read_io_type(page));
unlock_page(page);
}
if (bio->bi_private)
@@ -126,8 +142,9 @@ static bool f2fs_bio_post_read_required(struct bio *bio)
static void f2fs_read_end_io(struct bio *bio)
{
- if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)), FAULT_IO)) {
- f2fs_show_injection_info(FAULT_IO);
+ if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
+ FAULT_READ_IO)) {
+ f2fs_show_injection_info(FAULT_READ_IO);
bio->bi_status = BLK_STS_IOERR;
}
@@ -148,6 +165,11 @@ static void f2fs_write_end_io(struct bio *bio)
struct bio_vec *bvec;
int i;
+ if (time_to_inject(sbi, FAULT_WRITE_IO)) {
+ f2fs_show_injection_info(FAULT_WRITE_IO);
+ bio->bi_status = BLK_STS_IOERR;
+ }
+
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
enum count_type type = WB_DATA_TYPE(page);
@@ -319,8 +341,8 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
io->bio = NULL;
}
-static bool __has_merged_page(struct f2fs_bio_info *io,
- struct inode *inode, nid_t ino, pgoff_t idx)
+static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
+ struct page *page, nid_t ino)
{
struct bio_vec *bvec;
struct page *target;
@@ -329,7 +351,7 @@ static bool __has_merged_page(struct f2fs_bio_info *io,
if (!io->bio)
return false;
- if (!inode && !ino)
+ if (!inode && !page && !ino)
return true;
bio_for_each_segment_all(bvec, io->bio, i) {
@@ -339,11 +361,10 @@ static bool __has_merged_page(struct f2fs_bio_info *io,
else
target = fscrypt_control_page(bvec->bv_page);
- if (idx != target->index)
- continue;
-
if (inode && inode == target->mapping->host)
return true;
+ if (page && page == target)
+ return true;
if (ino && ino == ino_of_node(target))
return true;
}
@@ -352,7 +373,8 @@ static bool __has_merged_page(struct f2fs_bio_info *io,
}
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
- nid_t ino, pgoff_t idx, enum page_type type)
+ struct page *page, nid_t ino,
+ enum page_type type)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
enum temp_type temp;
@@ -363,7 +385,7 @@ static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
io = sbi->write_io[btype] + temp;
down_read(&io->io_rwsem);
- ret = __has_merged_page(io, inode, ino, idx);
+ ret = __has_merged_page(io, inode, page, ino);
up_read(&io->io_rwsem);
/* TODO: use HOT temp only for meta pages now. */
@@ -394,12 +416,12 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
}
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, nid_t ino, pgoff_t idx,
- enum page_type type, bool force)
+ struct inode *inode, struct page *page,
+ nid_t ino, enum page_type type, bool force)
{
enum temp_type temp;
- if (!force && !has_merged_page(sbi, inode, ino, idx, type))
+ if (!force && !has_merged_page(sbi, inode, page, ino, type))
return;
for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
@@ -418,10 +440,10 @@ void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
}
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, nid_t ino, pgoff_t idx,
- enum page_type type)
+ struct inode *inode, struct page *page,
+ nid_t ino, enum page_type type)
{
- __submit_merged_write_cond(sbi, inode, ino, idx, type, false);
+ __submit_merged_write_cond(sbi, inode, page, ino, type, false);
}
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
@@ -456,12 +478,16 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
bio_put(bio);
return -EFAULT;
}
+
+ if (fio->io_wbc && !is_read_io(fio->op))
+ wbc_account_io(fio->io_wbc, page, PAGE_SIZE);
+
bio_set_op_attrs(bio, fio->op, fio->op_flags);
- __submit_bio(fio->sbi, bio, fio->type);
+ inc_page_count(fio->sbi, is_read_io(fio->op) ?
+ __read_io_type(page): WB_DATA_TYPE(fio->page));
- if (!is_read_io(fio->op))
- inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
+ __submit_bio(fio->sbi, bio, fio->type);
return 0;
}
@@ -533,6 +559,9 @@ skip:
if (fio->in_list)
goto next;
out:
+ if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
+ f2fs_is_checkpoint_ready(sbi))
+ __submit_merged_bio(io);
up_write(&io->io_rwsem);
}
@@ -565,9 +594,6 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
ctx->bio = bio;
ctx->enabled_steps = post_read_steps;
bio->bi_private = ctx;
-
- /* wait the page to be moved by cleaning */
- f2fs_wait_on_block_writeback(sbi, blkaddr);
}
return bio;
@@ -582,10 +608,15 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
if (IS_ERR(bio))
return PTR_ERR(bio);
+ /* wait for GCed page writeback via META_MAPPING */
+ f2fs_wait_on_block_writeback(inode, blkaddr);
+
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
+ ClearPageError(page);
+ inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
__submit_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
@@ -876,7 +907,6 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
struct f2fs_summary sum;
struct node_info ni;
block_t old_blkaddr;
- pgoff_t fofs;
blkcnt_t count = 1;
int err;
@@ -889,7 +919,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
dn->data_blkaddr = datablock_addr(dn->inode,
dn->node_page, dn->ofs_in_node);
- if (dn->data_blkaddr == NEW_ADDR)
+ if (dn->data_blkaddr != NULL_ADDR)
goto alloc;
if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
@@ -905,12 +935,10 @@ alloc:
old_blkaddr, old_blkaddr);
f2fs_set_data_blkaddr(dn);
- /* update i_size */
- fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
- dn->ofs_in_node;
- if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
- f2fs_i_size_write(dn->inode,
- ((loff_t)(fofs + 1) << PAGE_SHIFT));
+ /*
+ * i_size will be updated by direct_IO. Otherwise, we'll get stale
+ * data from unwritten block via dio_read.
+ */
return 0;
}
@@ -945,7 +973,7 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
if (direct_io) {
map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
- flag = f2fs_force_buffered_io(inode, WRITE) ?
+ flag = f2fs_force_buffered_io(inode, iocb, from) ?
F2FS_GET_BLOCK_PRE_AIO :
F2FS_GET_BLOCK_PRE_DIO;
goto map_blocks;
@@ -970,7 +998,7 @@ map_blocks:
return err;
}
-static inline void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
+void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
{
if (flag == F2FS_GET_BLOCK_PRE_AIO) {
if (lock)
@@ -1025,6 +1053,11 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
map->m_flags = F2FS_MAP_MAPPED;
if (map->m_next_extent)
*map->m_next_extent = pgofs + map->m_len;
+
+ /* for hardware encryption, but to avoid potential issue in future */
+ if (flag == F2FS_GET_BLOCK_DIO)
+ f2fs_wait_on_block_writeback_range(inode,
+ map->m_pblk, map->m_len);
goto out;
}
@@ -1064,7 +1097,15 @@ next_block:
goto sync_out;
}
- if (!is_valid_data_blkaddr(sbi, blkaddr)) {
+ if (is_valid_data_blkaddr(sbi, blkaddr)) {
+ /* use out-place-update for driect IO under LFS mode */
+ if (test_opt(sbi, LFS) && create &&
+ flag == F2FS_GET_BLOCK_DIO) {
+ err = __allocate_data_block(&dn, map->m_seg_type);
+ if (!err)
+ set_inode_flag(inode, FI_APPEND_WRITE);
+ }
+ } else {
if (create) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
@@ -1076,6 +1117,8 @@ next_block:
last_ofs_in_node = dn.ofs_in_node;
}
} else {
+ WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
+ flag != F2FS_GET_BLOCK_DIO);
err = __allocate_data_block(&dn,
map->m_seg_type);
if (!err)
@@ -1173,6 +1216,12 @@ skip:
goto next_dnode;
sync_out:
+
+ /* for hardware encryption, but to avoid potential issue in future */
+ if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
+ f2fs_wait_on_block_writeback_range(inode,
+ map->m_pblk, map->m_len);
+
if (flag == F2FS_GET_BLOCK_PRECACHE) {
if (map->m_flags & F2FS_MAP_MAPPED) {
unsigned int ofs = start_pgofs - map->m_lblk;
@@ -1255,7 +1304,7 @@ static int get_data_block_dio(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DEFAULT, NULL,
+ F2FS_GET_BLOCK_DIO, NULL,
f2fs_rw_hint_to_seg_type(
inode->i_write_hint));
}
@@ -1558,9 +1607,17 @@ submit_and_realloc:
}
}
+ /*
+ * If the page is under writeback, we need to wait for
+ * its completion to see the correct decrypted data.
+ */
+ f2fs_wait_on_block_writeback(inode, block_nr);
+
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
goto submit_and_realloc;
+ inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
+ ClearPageError(page);
last_block_in_bio = block_nr;
goto next_page;
set_error_page:
@@ -1625,7 +1682,7 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
return 0;
/* wait for GCed page writeback via META_MAPPING */
- f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
+ f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
@@ -1682,6 +1739,10 @@ static inline bool check_inplace_update_policy(struct inode *inode,
is_inode_flag_set(inode, FI_NEED_IPU))
return true;
+ if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+ !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
+ return true;
+
return false;
}
@@ -1705,6 +1766,8 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
return true;
if (S_ISDIR(inode->i_mode))
return true;
+ if (IS_NOQUOTA(inode))
+ return true;
if (f2fs_is_atomic_file(inode))
return true;
if (fio) {
@@ -1712,6 +1775,9 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
return true;
if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
return true;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+ f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
+ return true;
}
return false;
}
@@ -1763,6 +1829,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
/* This page is already truncated */
if (fio->old_blkaddr == NULL_ADDR) {
ClearPageUptodate(page);
+ clear_cold_data(page);
goto out_writepage;
}
got_it:
@@ -1938,18 +2005,20 @@ done:
out:
inode_dec_dirty_pages(inode);
- if (err)
+ if (err) {
ClearPageUptodate(page);
+ clear_cold_data(page);
+ }
if (wbc->for_reclaim) {
- f2fs_submit_merged_write_cond(sbi, inode, 0, page->index, DATA);
+ f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
clear_inode_flag(inode, FI_HOT_DATA);
f2fs_remove_dirty_inode(inode);
submitted = NULL;
}
unlock_page(page);
- if (!S_ISDIR(inode->i_mode))
+ if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode))
f2fs_balance_fs(sbi, need_balance_fs);
if (unlikely(f2fs_cp_error(sbi))) {
@@ -2000,10 +2069,10 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
- pgoff_t last_idx = ULONG_MAX;
int cycled;
int range_whole = 0;
int tag;
+ int nwritten = 0;
pagevec_init(&pvec);
@@ -2106,7 +2175,7 @@ continue_unlock:
done = 1;
break;
} else if (submitted) {
- last_idx = page->index;
+ nwritten++;
}
if (--wbc->nr_to_write <= 0 &&
@@ -2128,9 +2197,9 @@ continue_unlock:
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
- if (last_idx != ULONG_MAX)
+ if (nwritten)
f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
- 0, last_idx, DATA);
+ NULL, 0, DATA);
return ret;
}
@@ -2140,6 +2209,8 @@ static inline bool __should_serialize_io(struct inode *inode,
{
if (!S_ISREG(inode->i_mode))
return false;
+ if (IS_NOQUOTA(inode))
+ return false;
if (wbc->sync_mode != WB_SYNC_ALL)
return true;
if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
@@ -2169,7 +2240,8 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto skip_write;
- if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
+ if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
+ wbc->sync_mode == WB_SYNC_NONE &&
get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
f2fs_available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
@@ -2234,7 +2306,7 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_pagecache(inode, i_size);
- f2fs_truncate_blocks(inode, i_size, true);
+ f2fs_truncate_blocks(inode, i_size, true, true);
up_write(&F2FS_I(inode)->i_mmap_sem);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -2332,6 +2404,10 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
trace_f2fs_write_begin(inode, pos, len, flags);
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ goto fail;
+
if ((f2fs_is_atomic_file(inode) &&
!f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
@@ -2369,7 +2445,8 @@ repeat:
if (err)
goto fail;
- if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
+ if (need_balance && !IS_NOQUOTA(inode) &&
+ has_not_enough_free_secs(sbi, 0, 0)) {
unlock_page(page);
f2fs_balance_fs(sbi, true);
lock_page(page);
@@ -2382,10 +2459,6 @@ repeat:
f2fs_wait_on_page_writeback(page, DATA, false);
- /* wait for GCed page writeback via META_MAPPING */
- if (f2fs_post_read_required(inode))
- f2fs_wait_on_block_writeback(sbi, blkaddr);
-
if (len == PAGE_SIZE || PageUptodate(page))
return 0;
@@ -2480,36 +2553,53 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
size_t count = iov_iter_count(iter);
loff_t offset = iocb->ki_pos;
int rw = iov_iter_rw(iter);
int err;
enum rw_hint hint = iocb->ki_hint;
int whint_mode = F2FS_OPTION(sbi).whint_mode;
+ bool do_opu;
err = check_direct_IO(inode, iter, offset);
if (err)
return err < 0 ? err : 0;
- if (f2fs_force_buffered_io(inode, rw))
+ if (f2fs_force_buffered_io(inode, iocb, iter))
return 0;
+ do_opu = allow_outplace_dio(inode, iocb, iter);
+
trace_f2fs_direct_IO_enter(inode, offset, count, rw);
if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
iocb->ki_hint = WRITE_LIFE_NOT_SET;
- if (!down_read_trylock(&F2FS_I(inode)->i_gc_rwsem[rw])) {
- if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
iocb->ki_hint = hint;
err = -EAGAIN;
goto out;
}
- down_read(&F2FS_I(inode)->i_gc_rwsem[rw]);
+ if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
+ up_read(&fi->i_gc_rwsem[rw]);
+ iocb->ki_hint = hint;
+ err = -EAGAIN;
+ goto out;
+ }
+ } else {
+ down_read(&fi->i_gc_rwsem[rw]);
+ if (do_opu)
+ down_read(&fi->i_gc_rwsem[READ]);
}
err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
- up_read(&F2FS_I(inode)->i_gc_rwsem[rw]);
+
+ if (do_opu)
+ up_read(&fi->i_gc_rwsem[READ]);
+
+ up_read(&fi->i_gc_rwsem[rw]);
if (rw == WRITE) {
if (whint_mode == WHINT_MODE_OFF)
@@ -2517,7 +2607,8 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
if (err > 0) {
f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
err);
- set_inode_flag(inode, FI_UPDATE_WRITE);
+ if (!do_opu)
+ set_inode_flag(inode, FI_UPDATE_WRITE);
} else if (err < 0) {
f2fs_write_failed(mapping, offset + count);
}
@@ -2550,6 +2641,8 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
}
}
+ clear_cold_data(page);
+
/* This is atomic written page, keep Private */
if (IS_ATOMIC_WRITTEN_PAGE(page))
return f2fs_drop_inmem_page(inode, page);
@@ -2568,6 +2661,7 @@ int f2fs_release_page(struct page *page, gfp_t wait)
if (IS_ATOMIC_WRITTEN_PAGE(page))
return 0;
+ clear_cold_data(page);
set_page_private(page, 0);
ClearPagePrivate(page);
return 1;
@@ -2583,10 +2677,6 @@ static int f2fs_set_data_page_dirty(struct page *page)
if (!PageUptodate(page))
SetPageUptodate(page);
- /* don't remain PG_checked flag which was set during GC */
- if (is_cold_data(page))
- clear_cold_data(page);
-
if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
f2fs_register_inmem_page(inode, page);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 214a968962a1..139b4d5c83d5 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f2fs debugging statistics
*
@@ -5,10 +6,6 @@
* http://www.samsung.com/
* Copyright (c) 2012 Linux Foundation
* Copyright (c) 2012 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
@@ -58,6 +55,9 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
+ si->nr_rd_data = get_pages(sbi, F2FS_RD_DATA);
+ si->nr_rd_node = get_pages(sbi, F2FS_RD_NODE);
+ si->nr_rd_meta = get_pages(sbi, F2FS_RD_META);
if (SM_I(sbi) && SM_I(sbi)->fcc_info) {
si->nr_flushed =
atomic_read(&SM_I(sbi)->fcc_info->issued_flush);
@@ -104,6 +104,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->avail_nids = NM_I(sbi)->available_nids;
si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
si->bg_gc = sbi->bg_gc;
+ si->io_skip_bggc = sbi->io_skip_bggc;
+ si->other_skip_bggc = sbi->other_skip_bggc;
si->skipped_atomic_files[BG_GC] = sbi->skipped_atomic_files[BG_GC];
si->skipped_atomic_files[FG_GC] = sbi->skipped_atomic_files[FG_GC];
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
@@ -121,6 +123,9 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->curzone[i] = GET_ZONE_FROM_SEC(sbi, si->cursec[i]);
}
+ for (i = META_CP; i < META_MAX; i++)
+ si->meta_count[i] = atomic_read(&sbi->meta_count[i]);
+
for (i = 0; i < 2; i++) {
si->segment_count[i] = sbi->segment_count[i];
si->block_count[i] = sbi->block_count[i];
@@ -190,8 +195,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
- if (f2fs_discard_en(sbi))
- si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1)
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
@@ -271,7 +275,8 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, "\n=====[ partition info(%pg). #%d, %s, CP: %s]=====\n",
si->sbi->sb->s_bdev, i++,
f2fs_readonly(si->sbi->sb) ? "RO": "RW",
- f2fs_cp_error(si->sbi) ? "Error": "Good");
+ is_set_ckpt_flags(si->sbi, CP_DISABLED_FLAG) ?
+ "Disabled": (f2fs_cp_error(si->sbi) ? "Error": "Good"));
seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
si->sit_area_segs, si->nat_area_segs);
seq_printf(s, "[SSA: %d] [MAIN: %d",
@@ -333,6 +338,13 @@ static int stat_show(struct seq_file *s, void *v)
si->prefree_count, si->free_segs, si->free_secs);
seq_printf(s, "CP calls: %d (BG: %d)\n",
si->cp_count, si->bg_cp_count);
+ seq_printf(s, " - cp blocks : %u\n", si->meta_count[META_CP]);
+ seq_printf(s, " - sit blocks : %u\n",
+ si->meta_count[META_SIT]);
+ seq_printf(s, " - nat blocks : %u\n",
+ si->meta_count[META_NAT]);
+ seq_printf(s, " - ssa blocks : %u\n",
+ si->meta_count[META_SSA]);
seq_printf(s, "GC calls: %d (BG: %d)\n",
si->call_count, si->bg_gc);
seq_printf(s, " - data segments : %d (%d)\n",
@@ -349,6 +361,8 @@ static int stat_show(struct seq_file *s, void *v)
si->skipped_atomic_files[BG_GC] +
si->skipped_atomic_files[FG_GC],
si->skipped_atomic_files[BG_GC]);
+ seq_printf(s, "BG skip : IO: %u, Other: %u\n",
+ si->io_skip_bggc, si->other_skip_bggc);
seq_puts(s, "\nExtent Cache:\n");
seq_printf(s, " - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
si->hit_largest, si->hit_cached,
@@ -360,7 +374,9 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
si->ext_tree, si->zombie_tree, si->ext_node);
seq_puts(s, "\nBalancing F2FS Async:\n");
- seq_printf(s, " - IO (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), "
+ seq_printf(s, " - IO_R (Data: %4d, Node: %4d, Meta: %4d\n",
+ si->nr_rd_data, si->nr_rd_node, si->nr_rd_meta);
+ seq_printf(s, " - IO_W (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), "
"Discard: (%4d %4d)) cmd: %4d undiscard:%4u\n",
si->nr_wb_cp_data, si->nr_wb_data,
si->nr_flushing, si->nr_flushed,
@@ -445,6 +461,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_stat_info *si;
+ int i;
si = f2fs_kzalloc(sbi, sizeof(struct f2fs_stat_info), GFP_KERNEL);
if (!si)
@@ -470,6 +487,8 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
atomic_set(&sbi->inline_inode, 0);
atomic_set(&sbi->inline_dir, 0);
atomic_set(&sbi->inplace_count, 0);
+ for (i = META_CP; i < META_MAX; i++)
+ atomic_set(&sbi->meta_count[i], 0);
atomic_set(&sbi->aw_cnt, 0);
atomic_set(&sbi->vw_cnt, 0);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index ecc3a4e2be96..2ef84b4590ea 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/dir.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -658,9 +655,9 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
f2fs_put_page(page, 1);
clear_inode_flag(inode, FI_NEW_INODE);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
fail:
up_write(&F2FS_I(inode)->i_sem);
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return err;
}
@@ -733,6 +730,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
clear_page_dirty_for_io(page);
ClearPagePrivate(page);
ClearPageUptodate(page);
+ clear_cold_data(page);
inode_dec_dirty_pages(dir);
f2fs_remove_dirty_inode(dir);
}
@@ -784,9 +782,15 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
struct f2fs_dir_entry *de = NULL;
struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
struct f2fs_sb_info *sbi = F2FS_I_SB(d->inode);
+ struct blk_plug plug;
+ bool readdir_ra = sbi->readdir_ra == 1;
+ int err = 0;
bit_pos = ((unsigned long)ctx->pos % d->max);
+ if (readdir_ra)
+ blk_start_plug(&plug);
+
while (bit_pos < d->max) {
bit_pos = find_next_bit_le(d->bitmap, d->max, bit_pos);
if (bit_pos >= d->max)
@@ -806,29 +810,33 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
if (f2fs_encrypted_inode(d->inode)) {
int save_len = fstr->len;
- int err;
err = fscrypt_fname_disk_to_usr(d->inode,
(u32)de->hash_code, 0,
&de_name, fstr);
if (err)
- return err;
+ goto out;
de_name = *fstr;
fstr->len = save_len;
}
if (!dir_emit(ctx, de_name.name, de_name.len,
- le32_to_cpu(de->ino), d_type))
- return 1;
+ le32_to_cpu(de->ino), d_type)) {
+ err = 1;
+ goto out;
+ }
- if (sbi->readdir_ra == 1)
+ if (readdir_ra)
f2fs_ra_node_page(sbi, le32_to_cpu(de->ino));
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
ctx->pos = start_pos + bit_pos;
}
- return 0;
+out:
+ if (readdir_ra)
+ blk_finish_plug(&plug);
+ return err;
}
static int f2fs_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 231b77ef5a53..1cb0fcc67d2d 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f2fs extent cache support
*
@@ -5,10 +6,6 @@
* Copyright (c) 2015 Samsung Electronics
* Authors: Jaegeuk Kim <jaegeuk@kernel.org>
* Chao Yu <chao2.yu@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
@@ -30,10 +27,10 @@ static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
return NULL;
}
-static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
+static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
unsigned int ofs)
{
- struct rb_node *node = root->rb_node;
+ struct rb_node *node = root->rb_root.rb_node;
struct rb_entry *re;
while (node) {
@@ -49,7 +46,7 @@ static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
return NULL;
}
-struct rb_entry *f2fs_lookup_rb_tree(struct rb_root *root,
+struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
struct rb_entry *cached_re, unsigned int ofs)
{
struct rb_entry *re;
@@ -62,22 +59,25 @@ struct rb_entry *f2fs_lookup_rb_tree(struct rb_root *root,
}
struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
- struct rb_root *root, struct rb_node **parent,
- unsigned int ofs)
+ struct rb_root_cached *root,
+ struct rb_node **parent,
+ unsigned int ofs, bool *leftmost)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_entry *re;
while (*p) {
*parent = *p;
re = rb_entry(*parent, struct rb_entry, rb_node);
- if (ofs < re->ofs)
+ if (ofs < re->ofs) {
p = &(*p)->rb_left;
- else if (ofs >= re->ofs + re->len)
+ } else if (ofs >= re->ofs + re->len) {
p = &(*p)->rb_right;
- else
+ *leftmost = false;
+ } else {
f2fs_bug_on(sbi, 1);
+ }
}
return p;
@@ -92,16 +92,16 @@ struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
* in order to simpfy the insertion after.
* tree must stay unchanged between lookup and insertion.
*/
-struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
+struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
struct rb_entry *cached_re,
unsigned int ofs,
struct rb_entry **prev_entry,
struct rb_entry **next_entry,
struct rb_node ***insert_p,
struct rb_node **insert_parent,
- bool force)
+ bool force, bool *leftmost)
{
- struct rb_node **pnode = &root->rb_node;
+ struct rb_node **pnode = &root->rb_root.rb_node;
struct rb_node *parent = NULL, *tmp_node;
struct rb_entry *re = cached_re;
@@ -110,7 +110,7 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
*prev_entry = NULL;
*next_entry = NULL;
- if (RB_EMPTY_ROOT(root))
+ if (RB_EMPTY_ROOT(&root->rb_root))
return NULL;
if (re) {
@@ -118,16 +118,22 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
goto lookup_neighbors;
}
+ if (leftmost)
+ *leftmost = true;
+
while (*pnode) {
parent = *pnode;
re = rb_entry(*pnode, struct rb_entry, rb_node);
- if (ofs < re->ofs)
+ if (ofs < re->ofs) {
pnode = &(*pnode)->rb_left;
- else if (ofs >= re->ofs + re->len)
+ } else if (ofs >= re->ofs + re->len) {
pnode = &(*pnode)->rb_right;
- else
+ if (leftmost)
+ *leftmost = false;
+ } else {
goto lookup_neighbors;
+ }
}
*insert_p = pnode;
@@ -160,10 +166,10 @@ lookup_neighbors:
}
bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
- struct rb_root *root)
+ struct rb_root_cached *root)
{
#ifdef CONFIG_F2FS_CHECK_FS
- struct rb_node *cur = rb_first(root), *next;
+ struct rb_node *cur = rb_first_cached(root), *next;
struct rb_entry *cur_re, *next_re;
if (!cur)
@@ -196,7 +202,8 @@ static struct kmem_cache *extent_node_slab;
static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei,
- struct rb_node *parent, struct rb_node **p)
+ struct rb_node *parent, struct rb_node **p,
+ bool leftmost)
{
struct extent_node *en;
@@ -209,7 +216,7 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
en->et = et;
rb_link_node(&en->rb_node, parent, p);
- rb_insert_color(&en->rb_node, &et->root);
+ rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
atomic_inc(&et->node_cnt);
atomic_inc(&sbi->total_ext_node);
return en;
@@ -218,7 +225,7 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
static void __detach_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_node *en)
{
- rb_erase(&en->rb_node, &et->root);
+ rb_erase_cached(&en->rb_node, &et->root);
atomic_dec(&et->node_cnt);
atomic_dec(&sbi->total_ext_node);
@@ -257,7 +264,7 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
memset(et, 0, sizeof(struct extent_tree));
et->ino = ino;
- et->root = RB_ROOT;
+ et->root = RB_ROOT_CACHED;
et->cached_en = NULL;
rwlock_init(&et->lock);
INIT_LIST_HEAD(&et->list);
@@ -278,10 +285,10 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei)
{
- struct rb_node **p = &et->root.rb_node;
+ struct rb_node **p = &et->root.rb_root.rb_node;
struct extent_node *en;
- en = __attach_extent_node(sbi, et, ei, NULL, p);
+ en = __attach_extent_node(sbi, et, ei, NULL, p, true);
if (!en)
return NULL;
@@ -297,7 +304,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
struct extent_node *en;
unsigned int count = atomic_read(&et->node_cnt);
- node = rb_first(&et->root);
+ node = rb_first_cached(&et->root);
while (node) {
next = rb_next(node);
en = rb_entry(node, struct extent_node, rb_node);
@@ -308,14 +315,13 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
return count - atomic_read(&et->node_cnt);
}
-static void __drop_largest_extent(struct inode *inode,
+static void __drop_largest_extent(struct extent_tree *et,
pgoff_t fofs, unsigned int len)
{
- struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
-
- if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
- largest->len = 0;
- f2fs_mark_inode_dirty_sync(inode, true);
+ if (fofs < et->largest.fofs + et->largest.len &&
+ fofs + len > et->largest.fofs) {
+ et->largest.len = 0;
+ et->largest_updated = true;
}
}
@@ -416,12 +422,11 @@ out:
return ret;
}
-static struct extent_node *__try_merge_extent_node(struct inode *inode,
+static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei,
struct extent_node *prev_ex,
struct extent_node *next_ex)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_node *en = NULL;
if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
@@ -443,7 +448,7 @@ static struct extent_node *__try_merge_extent_node(struct inode *inode,
if (!en)
return NULL;
- __try_update_largest_extent(inode, et, en);
+ __try_update_largest_extent(et, en);
spin_lock(&sbi->extent_lock);
if (!list_empty(&en->list)) {
@@ -454,12 +459,12 @@ static struct extent_node *__try_merge_extent_node(struct inode *inode,
return en;
}
-static struct extent_node *__insert_extent_tree(struct inode *inode,
+static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei,
struct rb_node **insert_p,
- struct rb_node *insert_parent)
+ struct rb_node *insert_parent,
+ bool leftmost)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct rb_node **p;
struct rb_node *parent = NULL;
struct extent_node *en = NULL;
@@ -470,13 +475,16 @@ static struct extent_node *__insert_extent_tree(struct inode *inode,
goto do_insert;
}
- p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
+ leftmost = true;
+
+ p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
+ ei->fofs, &leftmost);
do_insert:
- en = __attach_extent_node(sbi, et, ei, parent, p);
+ en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
if (!en)
return NULL;
- __try_update_largest_extent(inode, et, en);
+ __try_update_largest_extent(et, en);
/* update in global extent list */
spin_lock(&sbi->extent_lock);
@@ -497,6 +505,8 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
struct rb_node **insert_p = NULL, *insert_parent = NULL;
unsigned int end = fofs + len;
unsigned int pos = (unsigned int)fofs;
+ bool updated = false;
+ bool leftmost;
if (!et)
return;
@@ -517,14 +527,15 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
* drop largest extent before lookup, in case it's already
* been shrunk from extent tree
*/
- __drop_largest_extent(inode, fofs, len);
+ __drop_largest_extent(et, fofs, len);
/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
(struct rb_entry *)et->cached_en, fofs,
(struct rb_entry **)&prev_en,
(struct rb_entry **)&next_en,
- &insert_p, &insert_parent, false);
+ &insert_p, &insert_parent, false,
+ &leftmost);
if (!en)
en = next_en;
@@ -550,8 +561,8 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
set_extent_info(&ei, end,
end - dei.fofs + dei.blk,
org_end - end);
- en1 = __insert_extent_tree(inode, et, &ei,
- NULL, NULL);
+ en1 = __insert_extent_tree(sbi, et, &ei,
+ NULL, NULL, true);
next_en = en1;
} else {
en->ei.fofs = end;
@@ -570,7 +581,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
}
if (parts)
- __try_update_largest_extent(inode, et, en);
+ __try_update_largest_extent(et, en);
else
__release_extent_node(sbi, et, en);
@@ -590,15 +601,16 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
if (blkaddr) {
set_extent_info(&ei, fofs, blkaddr, len);
- if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
- __insert_extent_tree(inode, et, &ei,
- insert_p, insert_parent);
+ if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+ __insert_extent_tree(sbi, et, &ei,
+ insert_p, insert_parent, leftmost);
/* give up extent_cache, if split and small updates happen */
if (dei.len >= 1 &&
prev.len < F2FS_MIN_EXTENT_LEN &&
et->largest.len < F2FS_MIN_EXTENT_LEN) {
- __drop_largest_extent(inode, 0, UINT_MAX);
+ et->largest.len = 0;
+ et->largest_updated = true;
set_inode_flag(inode, FI_NO_EXTENT);
}
}
@@ -606,7 +618,15 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
if (is_inode_flag_set(inode, FI_NO_EXTENT))
__free_extent_tree(sbi, et);
+ if (et->largest_updated) {
+ et->largest_updated = false;
+ updated = true;
+ }
+
write_unlock(&et->lock);
+
+ if (updated)
+ f2fs_mark_inode_dirty_sync(inode, true);
}
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
@@ -705,6 +725,7 @@ void f2fs_drop_extent_tree(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et = F2FS_I(inode)->extent_tree;
+ bool updated = false;
if (!f2fs_may_extent_tree(inode))
return;
@@ -713,8 +734,13 @@ void f2fs_drop_extent_tree(struct inode *inode)
write_lock(&et->lock);
__free_extent_tree(sbi, et);
- __drop_largest_extent(inode, 0, UINT_MAX);
+ if (et->largest.len) {
+ et->largest.len = 0;
+ updated = true;
+ }
write_unlock(&et->lock);
+ if (updated)
+ f2fs_mark_inode_dirty_sync(inode, true);
}
void f2fs_destroy_extent_tree(struct inode *inode)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index abf925664d9c..56204a8f8a12 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1,16 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/f2fs.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_F2FS_H
#define _LINUX_F2FS_H
+#include <linux/uio.h>
#include <linux/types.h>
#include <linux/page-flags.h>
#include <linux/buffer_head.h>
@@ -53,9 +51,10 @@ enum {
FAULT_DIR_DEPTH,
FAULT_EVICT_INODE,
FAULT_TRUNCATE,
- FAULT_IO,
+ FAULT_READ_IO,
FAULT_CHECKPOINT,
FAULT_DISCARD,
+ FAULT_WRITE_IO,
FAULT_MAX,
};
@@ -100,6 +99,7 @@ extern char *f2fs_fault_name[FAULT_MAX];
#define F2FS_MOUNT_QUOTA 0x00400000
#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
#define F2FS_MOUNT_RESERVE_ROOT 0x01000000
+#define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
@@ -150,6 +150,7 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_INODE_CRTIME 0x0100
#define F2FS_FEATURE_LOST_FOUND 0x0200
#define F2FS_FEATURE_VERITY 0x0400 /* reserved */
+#define F2FS_FEATURE_SB_CHKSUM 0x0800
#define F2FS_HAS_FEATURE(sb, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -178,6 +179,7 @@ enum {
#define CP_RECOVERY 0x00000008
#define CP_DISCARD 0x00000010
#define CP_TRIMMED 0x00000020
+#define CP_PAUSE 0x00000040
#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
@@ -187,6 +189,7 @@ enum {
#define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */
#define DEF_CP_INTERVAL 60 /* 60 secs */
#define DEF_IDLE_INTERVAL 5 /* 5 secs */
+#define DEF_DISABLE_INTERVAL 5 /* 5 secs */
struct cp_control {
int reason;
@@ -203,6 +206,7 @@ enum {
META_NAT,
META_SIT,
META_SSA,
+ META_MAX,
META_POR,
DATA_GENERIC,
META_GENERIC,
@@ -324,7 +328,7 @@ struct discard_cmd_control {
atomic_t issued_discard; /* # of issued discard */
atomic_t issing_discard; /* # of issing discard */
atomic_t discard_cmd_cnt; /* # of cached cmd count */
- struct rb_root root; /* root of discard rb-tree */
+ struct rb_root_cached root; /* root of discard rb-tree */
bool rbtree_check; /* config for consistence check */
};
@@ -527,6 +531,9 @@ enum {
#define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */
+/* maximum retry quota flush count */
+#define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
+
#define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
#define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
@@ -566,12 +573,13 @@ struct extent_node {
struct extent_tree {
nid_t ino; /* inode number */
- struct rb_root root; /* root of extent info rb-tree */
+ struct rb_root_cached root; /* root of extent info rb-tree */
struct extent_node *cached_en; /* recently accessed extent node */
struct extent_info largest; /* largested extent info */
struct list_head list; /* to be used by sbi->zombie_list */
rwlock_t lock; /* protect extent info rb-tree */
atomic_t node_cnt; /* # of extent node in rb-tree*/
+ bool largest_updated; /* largest extent updated */
};
/*
@@ -600,6 +608,7 @@ enum {
F2FS_GET_BLOCK_DEFAULT,
F2FS_GET_BLOCK_FIEMAP,
F2FS_GET_BLOCK_BMAP,
+ F2FS_GET_BLOCK_DIO,
F2FS_GET_BLOCK_PRE_DIO,
F2FS_GET_BLOCK_PRE_AIO,
F2FS_GET_BLOCK_PRECACHE,
@@ -754,12 +763,12 @@ static inline bool __is_front_mergeable(struct extent_info *cur,
}
extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
-static inline void __try_update_largest_extent(struct inode *inode,
- struct extent_tree *et, struct extent_node *en)
+static inline void __try_update_largest_extent(struct extent_tree *et,
+ struct extent_node *en)
{
if (en->ei.len > et->largest.len) {
et->largest = en->ei;
- f2fs_mark_inode_dirty_sync(inode, true);
+ et->largest_updated = true;
}
}
@@ -944,6 +953,9 @@ enum count_type {
F2FS_DIRTY_IMETA,
F2FS_WB_CP_DATA,
F2FS_WB_DATA,
+ F2FS_RD_DATA,
+ F2FS_RD_NODE,
+ F2FS_RD_META,
NR_COUNT_TYPE,
};
@@ -1088,11 +1100,19 @@ enum {
SBI_NEED_SB_WRITE, /* need to recover superblock */
SBI_NEED_CP, /* need to checkpoint */
SBI_IS_SHUTDOWN, /* shutdown by ioctl */
+ SBI_IS_RECOVERED, /* recovered orphan/data */
+ SBI_CP_DISABLED, /* CP was disabled last mount */
+ SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */
+ SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
+ SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
};
enum {
CP_TIME,
REQ_TIME,
+ DISCARD_TIME,
+ GC_TIME,
+ DISABLE_TIME,
MAX_TIME,
};
@@ -1209,7 +1229,6 @@ struct f2fs_sb_info {
unsigned int total_valid_node_count; /* valid node block count */
loff_t max_file_blocks; /* max block index of file */
int dir_level; /* directory level */
- unsigned int trigger_ssr_threshold; /* threshold to trigger ssr */
int readdir_ra; /* readahead inode in readdir */
block_t user_block_count; /* # of user blocks */
@@ -1219,6 +1238,9 @@ struct f2fs_sb_info {
block_t reserved_blocks; /* configurable reserved blocks */
block_t current_reserved_blocks; /* current reserved blocks */
+ /* Additional tracking for no checkpoint mode */
+ block_t unusable_block_count; /* # of blocks saved by last cp */
+
unsigned int nquota_files; /* # of quota sysfile */
u32 s_next_generation; /* for NFS support */
@@ -1257,6 +1279,7 @@ struct f2fs_sb_info {
*/
#ifdef CONFIG_F2FS_STAT_FS
struct f2fs_stat_info *stat_info; /* FS status information */
+ atomic_t meta_count[META_MAX]; /* # of meta blocks */
unsigned int segment_count[2]; /* # of allocated segments */
unsigned int block_count[2]; /* # of allocated blocks */
atomic_t inplace_count; /* # of inplace update */
@@ -1272,6 +1295,8 @@ struct f2fs_sb_info {
atomic_t max_aw_cnt; /* max # of atomic writes */
atomic_t max_vw_cnt; /* max # of volatile writes */
int bg_gc; /* background gc calls */
+ unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
+ unsigned int other_skip_bggc; /* skip background gc for other reasons */
unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
#endif
spinlock_t stat_lock; /* lock for stat operations */
@@ -1306,9 +1331,9 @@ struct f2fs_sb_info {
};
#ifdef CONFIG_F2FS_FAULT_INJECTION
-#define f2fs_show_injection_info(type) \
- printk("%sF2FS-fs : inject %s in %s of %pF\n", \
- KERN_INFO, f2fs_fault_name[type], \
+#define f2fs_show_injection_info(type) \
+ printk_ratelimited("%sF2FS-fs : inject %s in %s of %pF\n", \
+ KERN_INFO, f2fs_fault_name[type], \
__func__, __builtin_return_address(0))
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
@@ -1344,7 +1369,15 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
{
- sbi->last_time[type] = jiffies;
+ unsigned long now = jiffies;
+
+ sbi->last_time[type] = now;
+
+ /* DISCARD_TIME and GC_TIME are based on REQ_TIME */
+ if (type == REQ_TIME) {
+ sbi->last_time[DISCARD_TIME] = now;
+ sbi->last_time[GC_TIME] = now;
+ }
}
static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
@@ -1354,16 +1387,18 @@ static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
return time_after(jiffies, sbi->last_time[type] + interval);
}
-static inline bool is_idle(struct f2fs_sb_info *sbi)
+static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
+ int type)
{
- struct block_device *bdev = sbi->sb->s_bdev;
- struct request_queue *q = bdev_get_queue(bdev);
- struct request_list *rl = &q->root_rl;
+ unsigned long interval = sbi->interval_time[type] * HZ;
+ unsigned int wait_ms = 0;
+ long delta;
- if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
- return false;
+ delta = (sbi->last_time[type] + interval) - jiffies;
+ if (delta > 0)
+ wait_ms = jiffies_to_msecs(delta);
- return f2fs_time_over(sbi, REQ_TIME);
+ return wait_ms;
}
/*
@@ -1704,7 +1739,8 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
if (!__allow_reserved_blocks(sbi, inode, true))
avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
-
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ avail_user_block_count -= sbi->unusable_block_count;
if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
diff = sbi->total_valid_block_count - avail_user_block_count;
if (diff > *count)
@@ -1755,7 +1791,9 @@ static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
atomic_inc(&sbi->nr_pages[count_type]);
if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES ||
- count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA)
+ count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA ||
+ count_type == F2FS_RD_DATA || count_type == F2FS_RD_NODE ||
+ count_type == F2FS_RD_META)
return;
set_sbi_flag(sbi, SBI_IS_DIRTY);
@@ -1891,12 +1929,18 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
{
block_t valid_block_count;
unsigned int valid_node_count;
- bool quota = inode && !is_inode;
+ int err;
- if (quota) {
- int ret = dquot_reserve_block(inode, 1);
- if (ret)
- return ret;
+ if (is_inode) {
+ if (inode) {
+ err = dquot_alloc_inode(inode);
+ if (err)
+ return err;
+ }
+ } else {
+ err = dquot_reserve_block(inode, 1);
+ if (err)
+ return err;
}
if (time_to_inject(sbi, FAULT_BLOCK)) {
@@ -1911,6 +1955,8 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
if (!__allow_reserved_blocks(sbi, inode, false))
valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ valid_block_count += sbi->unusable_block_count;
if (unlikely(valid_block_count > sbi->user_block_count)) {
spin_unlock(&sbi->stat_lock);
@@ -1938,8 +1984,12 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
return 0;
enospc:
- if (quota)
+ if (is_inode) {
+ if (inode)
+ dquot_free_inode(inode);
+ } else {
dquot_release_reservation_block(inode, 1);
+ }
return -ENOSPC;
}
@@ -1960,7 +2010,9 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
spin_unlock(&sbi->stat_lock);
- if (!is_inode)
+ if (is_inode)
+ dquot_free_inode(inode);
+ else
f2fs_i_blocks_write(inode, 1, false, true);
}
@@ -2090,6 +2142,15 @@ static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
return bio_alloc(GFP_KERNEL, npages);
}
+static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
+{
+ if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
+ get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
+ get_pages(sbi, F2FS_WB_CP_DATA))
+ return false;
+ return f2fs_time_over(sbi, type);
+}
+
static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
unsigned long index, void *item)
{
@@ -2739,7 +2800,8 @@ static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
*/
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
-int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
+ bool buf_write);
int f2fs_truncate(struct inode *inode);
int f2fs_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
@@ -2749,6 +2811,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
int f2fs_precache_extents(struct inode *inode);
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
int f2fs_pin_file_control(struct inode *inode, bool inc);
/*
@@ -2827,6 +2890,7 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
int f2fs_inode_dirtied(struct inode *inode, bool sync);
void f2fs_inode_synced(struct inode *inode);
int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
+int f2fs_quota_sync(struct super_block *sb, int type);
void f2fs_quota_off_umount(struct super_block *sb);
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
int f2fs_sync_fs(struct super_block *sb, int sync);
@@ -2869,7 +2933,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
struct page *f2fs_get_node_page_ra(struct page *parent, int start);
-void f2fs_move_node_page(struct page *node_page, int gc_type);
+int f2fs_move_node_page(struct page *node_page, int gc_type);
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic,
unsigned int *seq_id);
@@ -2886,7 +2950,7 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum);
-void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
+int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
int __init f2fs_create_node_manager_caches(void);
@@ -2914,6 +2978,8 @@ void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
struct cp_control *cpc);
+void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
+int f2fs_disable_cp_again(struct f2fs_sb_info *sbi);
void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
@@ -2942,7 +3008,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
struct f2fs_io_info *fio, bool add_list);
void f2fs_wait_on_page_writeback(struct page *page,
enum page_type type, bool ordered);
-void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr);
+void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
+void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
+ block_t len);
void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
@@ -3002,8 +3070,8 @@ int f2fs_init_post_read_processing(void);
void f2fs_destroy_post_read_processing(void);
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
- struct inode *inode, nid_t ino, pgoff_t idx,
- enum page_type type);
+ struct inode *inode, struct page *page,
+ nid_t ino, enum page_type type);
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
int f2fs_submit_page_bio(struct f2fs_io_info *fio);
void f2fs_submit_page_write(struct f2fs_io_info *fio);
@@ -3025,6 +3093,7 @@ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
struct page *f2fs_get_new_data_page(struct inode *inode,
struct page *ipage, pgoff_t index, bool new_i_size);
int f2fs_do_write_data_page(struct f2fs_io_info *fio);
+void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -3077,6 +3146,8 @@ struct f2fs_stat_info {
int free_nids, avail_nids, alloc_nids;
int total_count, utilization;
int bg_gc, nr_wb_cp_data, nr_wb_data;
+ int nr_rd_data, nr_rd_node, nr_rd_meta;
+ unsigned int io_skip_bggc, other_skip_bggc;
int nr_flushing, nr_flushed, flush_list_empty;
int nr_discarding, nr_discarded;
int nr_discard_cmd;
@@ -3098,6 +3169,7 @@ struct f2fs_stat_info {
int cursec[NR_CURSEG_TYPE];
int curzone[NR_CURSEG_TYPE];
+ unsigned int meta_count[META_MAX];
unsigned int segment_count[2];
unsigned int block_count[2];
unsigned int inplace_count;
@@ -3113,6 +3185,8 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
#define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
#define stat_inc_call_count(si) ((si)->call_count++)
#define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++)
+#define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
+#define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
#define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
#define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
@@ -3149,6 +3223,17 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
if (f2fs_has_inline_dentry(inode)) \
(atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \
} while (0)
+#define stat_inc_meta_count(sbi, blkaddr) \
+ do { \
+ if (blkaddr < SIT_I(sbi)->sit_base_addr) \
+ atomic_inc(&(sbi)->meta_count[META_CP]); \
+ else if (blkaddr < NM_I(sbi)->nat_blkaddr) \
+ atomic_inc(&(sbi)->meta_count[META_SIT]); \
+ else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \
+ atomic_inc(&(sbi)->meta_count[META_NAT]); \
+ else if (blkaddr < SM_I(sbi)->main_blkaddr) \
+ atomic_inc(&(sbi)->meta_count[META_SSA]); \
+ } while (0)
#define stat_inc_seg_type(sbi, curseg) \
((sbi)->segment_count[(curseg)->alloc_type]++)
#define stat_inc_block_count(sbi, curseg) \
@@ -3218,6 +3303,8 @@ void f2fs_destroy_root_stats(void);
#define stat_inc_bg_cp_count(si) do { } while (0)
#define stat_inc_call_count(si) do { } while (0)
#define stat_inc_bggc_count(si) do { } while (0)
+#define stat_io_skip_bggc_count(sbi) do { } while (0)
+#define stat_other_skip_bggc_count(sbi) do { } while (0)
#define stat_inc_dirty_inode(sbi, type) do { } while (0)
#define stat_dec_dirty_inode(sbi, type) do { } while (0)
#define stat_inc_total_hit(sb) do { } while (0)
@@ -3236,6 +3323,7 @@ void f2fs_destroy_root_stats(void);
#define stat_inc_volatile_write(inode) do { } while (0)
#define stat_dec_volatile_write(inode) do { } while (0)
#define stat_update_max_volatile_write(inode) do { } while (0)
+#define stat_inc_meta_count(sbi, blkaddr) do { } while (0)
#define stat_inc_seg_type(sbi, curseg) do { } while (0)
#define stat_inc_block_count(sbi, curseg) do { } while (0)
#define stat_inc_inplace_blocks(sbi) do { } while (0)
@@ -3305,18 +3393,19 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
/*
* extent_cache.c
*/
-struct rb_entry *f2fs_lookup_rb_tree(struct rb_root *root,
+struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
struct rb_entry *cached_re, unsigned int ofs);
struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
- struct rb_root *root, struct rb_node **parent,
- unsigned int ofs);
-struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
+ struct rb_root_cached *root,
+ struct rb_node **parent,
+ unsigned int ofs, bool *leftmost);
+struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
struct rb_entry *cached_re, unsigned int ofs,
struct rb_entry **prev_entry, struct rb_entry **next_entry,
struct rb_node ***insert_p, struct rb_node **insert_parent,
- bool force);
+ bool force, bool *leftmost);
bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
- struct rb_root *root);
+ struct rb_root_cached *root);
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext);
void f2fs_drop_extent_tree(struct inode *inode);
@@ -3356,7 +3445,7 @@ static inline void f2fs_set_encrypted_inode(struct inode *inode)
{
#ifdef CONFIG_F2FS_FS_ENCRYPTION
file_set_encrypt(inode);
- inode->i_flags |= S_ENCRYPTED;
+ f2fs_set_inode_flags(inode);
#endif
}
@@ -3384,6 +3473,7 @@ F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
+F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
#ifdef CONFIG_BLK_DEV_ZONED
static inline int get_blkz_type(struct f2fs_sb_info *sbi,
@@ -3399,11 +3489,20 @@ static inline int get_blkz_type(struct f2fs_sb_info *sbi,
}
#endif
-static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
+static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
{
- struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
+ return f2fs_sb_has_blkzoned(sbi->sb);
+}
- return blk_queue_discard(q) || f2fs_sb_has_blkzoned(sbi->sb);
+static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
+{
+ return blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev));
+}
+
+static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
+{
+ return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
+ f2fs_hw_should_discard(sbi);
}
static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
@@ -3432,11 +3531,50 @@ static inline bool f2fs_may_encrypt(struct inode *inode)
#endif
}
-static inline bool f2fs_force_buffered_io(struct inode *inode, int rw)
+static inline int block_unaligned_IO(struct inode *inode,
+ struct kiocb *iocb, struct iov_iter *iter)
{
- return (f2fs_post_read_required(inode) ||
- (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
- F2FS_I_SB(inode)->s_ndevs);
+ unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
+ unsigned int blocksize_mask = (1 << i_blkbits) - 1;
+ loff_t offset = iocb->ki_pos;
+ unsigned long align = offset | iov_iter_alignment(iter);
+
+ return align & blocksize_mask;
+}
+
+static inline int allow_outplace_dio(struct inode *inode,
+ struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int rw = iov_iter_rw(iter);
+
+ return (test_opt(sbi, LFS) && (rw == WRITE) &&
+ !block_unaligned_IO(inode, iocb, iter));
+}
+
+static inline bool f2fs_force_buffered_io(struct inode *inode,
+ struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int rw = iov_iter_rw(iter);
+
+ if (f2fs_post_read_required(inode))
+ return true;
+ if (sbi->s_ndevs)
+ return true;
+ /*
+ * for blkzoned device, fallback direct IO to buffered IO, so
+ * all IOs can be serialized by log-structured write.
+ */
+ if (f2fs_sb_has_blkzoned(sbi->sb))
+ return true;
+ if (test_opt(sbi, LFS) && (rw == WRITE) &&
+ block_unaligned_IO(inode, iocb, iter))
+ return true;
+ if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED))
+ return true;
+
+ return false;
}
#ifdef CONFIG_F2FS_FAULT_INJECTION
@@ -3447,3 +3585,16 @@ extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
#endif
#endif
+
+static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
+{
+#ifdef CONFIG_QUOTA
+ if (f2fs_sb_has_quota_ino(sbi->sb))
+ return true;
+ if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
+ F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
+ F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
+ return true;
+#endif
+ return false;
+}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 5474aaa274b9..88b124677189 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/file.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -50,7 +47,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct dnode_of_data dn;
+ struct dnode_of_data dn = { .node_changed = false };
int err;
if (unlikely(f2fs_cp_error(sbi))) {
@@ -62,19 +59,6 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
- /* block allocation */
- f2fs_lock_op(sbi);
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = f2fs_reserve_block(&dn, page->index);
- if (err) {
- f2fs_unlock_op(sbi);
- goto out;
- }
- f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
-
- f2fs_balance_fs(sbi, dn.node_changed);
-
file_update_time(vmf->vma->vm_file);
down_read(&F2FS_I(inode)->i_mmap_sem);
lock_page(page);
@@ -86,11 +70,28 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
goto out_sem;
}
+ /* block allocation */
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = f2fs_get_block(&dn, page->index);
+ f2fs_put_dnode(&dn);
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ if (err) {
+ unlock_page(page);
+ goto out_sem;
+ }
+
+ /* fill the page */
+ f2fs_wait_on_page_writeback(page, DATA, false);
+
+ /* wait for GCed page writeback via META_MAPPING */
+ f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
+
/*
* check to see if the page is mapped already (no holes)
*/
if (PageMappedToDisk(page))
- goto mapped;
+ goto out_sem;
/* page is wholly or partially inside EOF */
if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
@@ -105,21 +106,15 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
SetPageUptodate(page);
f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
+ f2fs_update_time(sbi, REQ_TIME);
trace_f2fs_vm_page_mkwrite(page, DATA);
-mapped:
- /* fill the page */
- f2fs_wait_on_page_writeback(page, DATA, false);
-
- /* wait for GCed page writeback via META_MAPPING */
- if (f2fs_post_read_required(inode))
- f2fs_wait_on_block_writeback(sbi, dn.data_blkaddr);
-
out_sem:
up_read(&F2FS_I(inode)->i_mmap_sem);
-out:
+
+ f2fs_balance_fs(sbi, dn.node_changed);
+
sb_end_pagefault(inode->i_sb);
- f2fs_update_time(sbi, REQ_TIME);
err:
return block_page_mkwrite_return(err);
}
@@ -215,7 +210,8 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
};
unsigned int seq_id = 0;
- if (unlikely(f2fs_readonly(inode->i_sb)))
+ if (unlikely(f2fs_readonly(inode->i_sb) ||
+ is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
return 0;
trace_f2fs_sync_file_enter(inode);
@@ -590,7 +586,8 @@ truncate_out:
return 0;
}
-int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
+ bool buf_write)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
@@ -598,6 +595,7 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
int count = 0, err = 0;
struct page *ipage;
bool truncate_page = false;
+ int flag = buf_write ? F2FS_GET_BLOCK_PRE_AIO : F2FS_GET_BLOCK_PRE_DIO;
trace_f2fs_truncate_blocks_enter(inode, from);
@@ -607,7 +605,7 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
goto free_partial;
if (lock)
- f2fs_lock_op(sbi);
+ __do_map_lock(sbi, flag, true);
ipage = f2fs_get_node_page(sbi, inode->i_ino);
if (IS_ERR(ipage)) {
@@ -645,7 +643,7 @@ free_next:
err = f2fs_truncate_inode_blocks(inode, free_from);
out:
if (lock)
- f2fs_unlock_op(sbi);
+ __do_map_lock(sbi, flag, false);
free_partial:
/* lastly zero out the first data page */
if (!err)
@@ -680,7 +678,7 @@ int f2fs_truncate(struct inode *inode)
return err;
}
- err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+ err = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
if (err)
return err;
@@ -789,9 +787,24 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
!uid_eq(attr->ia_uid, inode->i_uid)) ||
(attr->ia_valid & ATTR_GID &&
!gid_eq(attr->ia_gid, inode->i_gid))) {
+ f2fs_lock_op(F2FS_I_SB(inode));
err = dquot_transfer(inode, attr);
- if (err)
+ if (err) {
+ set_sbi_flag(F2FS_I_SB(inode),
+ SBI_QUOTA_NEED_REPAIR);
+ f2fs_unlock_op(F2FS_I_SB(inode));
return err;
+ }
+ /*
+ * update uid/gid under lock_op(), so that dquot and inode can
+ * be updated atomically.
+ */
+ if (attr->ia_valid & ATTR_UID)
+ inode->i_uid = attr->ia_uid;
+ if (attr->ia_valid & ATTR_GID)
+ inode->i_gid = attr->ia_gid;
+ f2fs_mark_inode_dirty_sync(inode, true);
+ f2fs_unlock_op(F2FS_I_SB(inode));
}
if (attr->ia_valid & ATTR_SIZE) {
@@ -1246,7 +1259,7 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
new_size = i_size_read(inode) - len;
truncate_pagecache(inode, new_size);
- ret = f2fs_truncate_blocks(inode, new_size, true);
+ ret = f2fs_truncate_blocks(inode, new_size, true, false);
up_write(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
f2fs_i_size_write(inode, new_size);
@@ -1431,7 +1444,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
f2fs_balance_fs(sbi, true);
down_write(&F2FS_I(inode)->i_mmap_sem);
- ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+ ret = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
up_write(&F2FS_I(inode)->i_mmap_sem);
if (ret)
return ret;
@@ -1978,7 +1991,7 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!blk_queue_discard(q))
+ if (!f2fs_hw_support_discard(F2FS_SB(sb)))
return -EOPNOTSUPP;
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
@@ -2162,6 +2175,12 @@ static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
if (f2fs_readonly(sbi->sb))
return -EROFS;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Skipping Checkpoint. Checkpoints currently disabled.");
+ return -EINVAL;
+ }
+
ret = mnt_want_write_file(filp);
if (ret)
return ret;
@@ -2533,6 +2552,9 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
if (f2fs_readonly(sbi->sb))
return -EROFS;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ return -EINVAL;
+
if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
sizeof(range)))
return -EFAULT;
@@ -2591,13 +2613,29 @@ static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
}
#ifdef CONFIG_QUOTA
+int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
+{
+ struct dquot *transfer_to[MAXQUOTAS] = {};
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct super_block *sb = sbi->sb;
+ int err = 0;
+
+ transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
+ if (!IS_ERR(transfer_to[PRJQUOTA])) {
+ err = __dquot_transfer(inode, transfer_to);
+ if (err)
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ dqput(transfer_to[PRJQUOTA]);
+ }
+ return err;
+}
+
static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
{
struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
- struct dquot *transfer_to[MAXQUOTAS] = {};
struct page *ipage;
kprojid_t kprojid;
int err;
@@ -2617,53 +2655,45 @@ static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
return 0;
- err = mnt_want_write_file(filp);
- if (err)
- return err;
-
err = -EPERM;
- inode_lock(inode);
-
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
- goto out_unlock;
+ return err;
ipage = f2fs_get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
- goto out_unlock;
- }
+ if (IS_ERR(ipage))
+ return PTR_ERR(ipage);
if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
i_projid)) {
err = -EOVERFLOW;
f2fs_put_page(ipage, 1);
- goto out_unlock;
+ return err;
}
f2fs_put_page(ipage, 1);
err = dquot_initialize(inode);
if (err)
- goto out_unlock;
+ return err;
- transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
- if (!IS_ERR(transfer_to[PRJQUOTA])) {
- err = __dquot_transfer(inode, transfer_to);
- dqput(transfer_to[PRJQUOTA]);
- if (err)
- goto out_dirty;
- }
+ f2fs_lock_op(sbi);
+ err = f2fs_transfer_project_quota(inode, kprojid);
+ if (err)
+ goto out_unlock;
F2FS_I(inode)->i_projid = kprojid;
inode->i_ctime = current_time(inode);
-out_dirty:
f2fs_mark_inode_dirty_sync(inode, true);
out_unlock:
- inode_unlock(inode);
- mnt_drop_write_file(filp);
+ f2fs_unlock_op(sbi);
return err;
}
#else
+int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
+{
+ return 0;
+}
+
static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
{
if (projid != F2FS_DEF_PROJID)
@@ -2736,6 +2766,30 @@ static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
return 0;
}
+static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+{
+ /*
+ * Project Quota ID state is only allowed to change from within the init
+ * namespace. Enforce that restriction only if we are trying to change
+ * the quota ID state. Everything else is allowed in user namespaces.
+ */
+ if (current_user_ns() == &init_user_ns)
+ return 0;
+
+ if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid)
+ return -EINVAL;
+
+ if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) {
+ if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
+ return -EINVAL;
+ } else {
+ if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -2763,19 +2817,20 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
return err;
inode_lock(inode);
+ err = f2fs_ioctl_check_project(inode, &fa);
+ if (err)
+ goto out;
flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) |
(flags & F2FS_FL_XFLAG_VISIBLE);
err = __f2fs_ioc_setflags(inode, flags);
- inode_unlock(inode);
- mnt_drop_write_file(filp);
if (err)
- return err;
+ goto out;
err = f2fs_ioc_setproject(filp, fa.fsx_projid);
- if (err)
- return err;
-
- return 0;
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return err;
}
int f2fs_pin_file_control(struct inode *inode, bool inc)
@@ -2992,7 +3047,8 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!f2fs_overwrite_io(inode, iocb->ki_pos,
iov_iter_count(from)) ||
f2fs_has_inline_data(inode) ||
- f2fs_force_buffered_io(inode, WRITE)) {
+ f2fs_force_buffered_io(inode,
+ iocb, from)) {
clear_inode_flag(inode,
FI_NO_PREALLOC);
inode_unlock(inode);
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 5c8d00422237..a07241fb8537 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/gc.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/module.h>
@@ -43,13 +40,16 @@ static int gc_thread_func(void *data)
if (gc_th->gc_wake)
gc_th->gc_wake = 0;
- if (try_to_freeze())
+ if (try_to_freeze()) {
+ stat_other_skip_bggc_count(sbi);
continue;
+ }
if (kthread_should_stop())
break;
if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
increase_sleep_time(gc_th, &wait_ms);
+ stat_other_skip_bggc_count(sbi);
continue;
}
@@ -58,8 +58,10 @@ static int gc_thread_func(void *data)
f2fs_stop_checkpoint(sbi, false);
}
- if (!sb_start_write_trylock(sbi->sb))
+ if (!sb_start_write_trylock(sbi->sb)) {
+ stat_other_skip_bggc_count(sbi);
continue;
+ }
/*
* [GC triggering condition]
@@ -80,12 +82,15 @@ static int gc_thread_func(void *data)
goto do_gc;
}
- if (!mutex_trylock(&sbi->gc_mutex))
+ if (!mutex_trylock(&sbi->gc_mutex)) {
+ stat_other_skip_bggc_count(sbi);
goto next;
+ }
- if (!is_idle(sbi)) {
+ if (!is_idle(sbi, GC_TIME)) {
increase_sleep_time(gc_th, &wait_ms);
mutex_unlock(&sbi->gc_mutex);
+ stat_io_skip_bggc_count(sbi);
goto next;
}
@@ -365,6 +370,10 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
if (sec_usage_check(sbi, secno))
goto next;
+ /* Don't touch checkpointed data */
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+ get_ckpt_valid_blocks(sbi, segno)))
+ goto next;
if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
goto next;
@@ -464,7 +473,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
* On validity, copy that node with cold status, otherwise (invalid node)
* ignore that.
*/
-static void gc_node_segment(struct f2fs_sb_info *sbi,
+static int gc_node_segment(struct f2fs_sb_info *sbi,
struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
struct f2fs_summary *entry;
@@ -472,6 +481,7 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
int off;
int phase = 0;
bool fggc = (gc_type == FG_GC);
+ int submitted = 0;
start_addr = START_BLOCK(sbi, segno);
@@ -485,10 +495,11 @@ next_step:
nid_t nid = le32_to_cpu(entry->nid);
struct page *node_page;
struct node_info ni;
+ int err;
/* stop BG_GC if there is not enough free sections. */
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
- return;
+ return submitted;
if (check_valid_map(sbi, segno, off) == 0)
continue;
@@ -525,7 +536,9 @@ next_step:
continue;
}
- f2fs_move_node_page(node_page, gc_type);
+ err = f2fs_move_node_page(node_page, gc_type);
+ if (!err && gc_type == FG_GC)
+ submitted++;
stat_inc_node_blk_count(sbi, 1, gc_type);
}
@@ -534,6 +547,7 @@ next_step:
if (fggc)
atomic_dec(&sbi->wb_sync_req[NODE]);
+ return submitted;
}
/*
@@ -669,7 +683,7 @@ put_page:
* Move data block via META_MAPPING while keeping locked data page.
* This can be used to move blocks, aka LBAs, directly on disk.
*/
-static void move_data_block(struct inode *inode, block_t bidx,
+static int move_data_block(struct inode *inode, block_t bidx,
int gc_type, unsigned int segno, int off)
{
struct f2fs_io_info fio = {
@@ -688,25 +702,29 @@ static void move_data_block(struct inode *inode, block_t bidx,
struct node_info ni;
struct page *page, *mpage;
block_t newaddr;
- int err;
+ int err = 0;
bool lfs_mode = test_opt(fio.sbi, LFS);
/* do not read out */
page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
if (!page)
- return;
+ return -ENOMEM;
- if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+ if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
+ err = -ENOENT;
goto out;
+ }
if (f2fs_is_atomic_file(inode)) {
F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
+ err = -EAGAIN;
goto out;
}
if (f2fs_is_pinned_file(inode)) {
f2fs_pin_file_control(inode, true);
+ err = -EAGAIN;
goto out;
}
@@ -717,6 +735,7 @@ static void move_data_block(struct inode *inode, block_t bidx,
if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
ClearPageUptodate(page);
+ err = -ENOENT;
goto put_out;
}
@@ -799,6 +818,7 @@ write_page:
fio.new_blkaddr = newaddr;
f2fs_submit_page_write(&fio);
if (fio.retry) {
+ err = -EAGAIN;
if (PageWriteback(fio.encrypted_page))
end_page_writeback(fio.encrypted_page);
goto put_page_out;
@@ -822,34 +842,42 @@ put_out:
f2fs_put_dnode(&dn);
out:
f2fs_put_page(page, 1);
+ return err;
}
-static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
+static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
unsigned int segno, int off)
{
struct page *page;
+ int err = 0;
page = f2fs_get_lock_data_page(inode, bidx, true);
if (IS_ERR(page))
- return;
+ return PTR_ERR(page);
- if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+ if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
+ err = -ENOENT;
goto out;
+ }
if (f2fs_is_atomic_file(inode)) {
F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
+ err = -EAGAIN;
goto out;
}
if (f2fs_is_pinned_file(inode)) {
if (gc_type == FG_GC)
f2fs_pin_file_control(inode, true);
+ err = -EAGAIN;
goto out;
}
if (gc_type == BG_GC) {
- if (PageWriteback(page))
+ if (PageWriteback(page)) {
+ err = -EAGAIN;
goto out;
+ }
set_page_dirty(page);
set_cold_data(page);
} else {
@@ -867,7 +895,6 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
.io_type = FS_GC_DATA_IO,
};
bool is_dirty = PageDirty(page);
- int err;
retry:
set_page_dirty(page);
@@ -892,6 +919,7 @@ retry:
}
out:
f2fs_put_page(page, 1);
+ return err;
}
/*
@@ -901,7 +929,7 @@ out:
* If the parent node is not valid or the data block address is different,
* the victim data block is ignored.
*/
-static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
{
struct super_block *sb = sbi->sb;
@@ -909,6 +937,7 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
block_t start_addr;
int off;
int phase = 0;
+ int submitted = 0;
start_addr = START_BLOCK(sbi, segno);
@@ -925,7 +954,7 @@ next_step:
/* stop BG_GC if there is not enough free sections. */
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
- return;
+ return submitted;
if (check_valid_map(sbi, segno, off) == 0)
continue;
@@ -997,6 +1026,7 @@ next_step:
if (inode) {
struct f2fs_inode_info *fi = F2FS_I(inode);
bool locked = false;
+ int err;
if (S_ISREG(inode->i_mode)) {
if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
@@ -1016,12 +1046,16 @@ next_step:
start_bidx = f2fs_start_bidx_of_node(nofs, inode)
+ ofs_in_node;
if (f2fs_post_read_required(inode))
- move_data_block(inode, start_bidx, gc_type,
- segno, off);
+ err = move_data_block(inode, start_bidx,
+ gc_type, segno, off);
else
- move_data_page(inode, start_bidx, gc_type,
+ err = move_data_page(inode, start_bidx, gc_type,
segno, off);
+ if (!err && (gc_type == FG_GC ||
+ f2fs_post_read_required(inode)))
+ submitted++;
+
if (locked) {
up_write(&fi->i_gc_rwsem[WRITE]);
up_write(&fi->i_gc_rwsem[READ]);
@@ -1033,6 +1067,8 @@ next_step:
if (++phase < 5)
goto next_step;
+
+ return submitted;
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -1060,6 +1096,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
int seg_freed = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE;
+ int submitted = 0;
/* readahead multi ssa blocks those have contiguous address */
if (sbi->segs_per_sec > 1)
@@ -1069,6 +1106,18 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
/* reference all summary page */
while (segno < end_segno) {
sum_page = f2fs_get_sum_page(sbi, segno++);
+ if (IS_ERR(sum_page)) {
+ int err = PTR_ERR(sum_page);
+
+ end_segno = segno - 1;
+ for (segno = start_segno; segno < end_segno; segno++) {
+ sum_page = find_get_page(META_MAPPING(sbi),
+ GET_SUM_BLOCK(sbi, segno));
+ f2fs_put_page(sum_page, 0);
+ f2fs_put_page(sum_page, 0);
+ }
+ return err;
+ }
unlock_page(sum_page);
}
@@ -1103,10 +1152,11 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
* - lock_page(sum_page)
*/
if (type == SUM_TYPE_NODE)
- gc_node_segment(sbi, sum->entries, segno, gc_type);
- else
- gc_data_segment(sbi, sum->entries, gc_list, segno,
+ submitted += gc_node_segment(sbi, sum->entries, segno,
gc_type);
+ else
+ submitted += gc_data_segment(sbi, sum->entries, gc_list,
+ segno, gc_type);
stat_inc_seg_count(sbi, type, gc_type);
@@ -1117,7 +1167,7 @@ next:
f2fs_put_page(sum_page, 0);
}
- if (gc_type == FG_GC)
+ if (submitted)
f2fs_submit_merged_write(sbi,
(type == SUM_TYPE_NODE) ? NODE : DATA);
@@ -1172,7 +1222,8 @@ gc_more:
* threshold, we can make them free by checkpoint. Then, we
* secure free segments which doesn't need fggc any more.
*/
- if (prefree_segments(sbi)) {
+ if (prefree_segments(sbi) &&
+ !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
ret = f2fs_write_checkpoint(sbi, &cpc);
if (ret)
goto stop;
@@ -1224,7 +1275,7 @@ gc_more:
segno = NULL_SEGNO;
goto gc_more;
}
- if (gc_type == FG_GC)
+ if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
ret = f2fs_write_checkpoint(sbi, &cpc);
}
stop:
@@ -1244,7 +1295,7 @@ stop:
put_gc_inode(&gc_list);
- if (sync)
+ if (sync && !ret)
ret = sec_freed ? 0 : -EAGAIN;
return ret;
}
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index c8619e408009..bbac9d3787bd 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/gc.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define GC_THREAD_MIN_WB_PAGES 1 /*
* a threshold to determine
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index eb2e031ea887..cc82f142f811 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/hash.c
*
@@ -7,10 +8,6 @@
* Portions of this code from linux/fs/ext3/hash.c
*
* Copyright (C) 2002 by Theodore Ts'o
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/fs.h>
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 115dc219344b..cb31a719b048 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/inline.c
* Copyright (c) 2013, Intel Corporation
* Authors: Huajun Li <huajun.li@intel.com>
* Haicheng Li <haicheng.li@intel.com>
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
@@ -300,7 +298,7 @@ process_inline:
clear_inode_flag(inode, FI_INLINE_DATA);
f2fs_put_page(ipage, 1);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
- if (f2fs_truncate_blocks(inode, 0, false))
+ if (f2fs_truncate_blocks(inode, 0, false, false))
return false;
goto process_inline;
}
@@ -472,7 +470,7 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
return 0;
punch_dentry_pages:
truncate_inode_pages(&dir->i_data, 0);
- f2fs_truncate_blocks(dir, 0, false);
+ f2fs_truncate_blocks(dir, 0, false, false);
f2fs_remove_dirty_inode(dir);
return err;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 959df2249875..91ceee0ed4c4 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/inode.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -368,6 +365,12 @@ static int do_read_inode(struct inode *inode)
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
__recover_inline_status(inode, node_page);
+ /* try to recover cold bit for non-dir inode */
+ if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
+ set_cold_node(node_page, false);
+ set_page_dirty(node_page);
+ }
+
/* get rdev by using inline_info */
__get_inode_rdev(inode, ri);
@@ -610,6 +613,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
return 0;
+ if (f2fs_is_checkpoint_ready(sbi))
+ return -ENOSPC;
+
/*
* We need to balance fs here to prevent from producing dirty node pages
* during the urgent cleaning time when runing out of free sections.
@@ -648,7 +654,11 @@ void f2fs_evict_inode(struct inode *inode)
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
- dquot_initialize(inode);
+ err = dquot_initialize(inode);
+ if (err) {
+ err = 0;
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ }
f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
@@ -680,9 +690,10 @@ retry:
goto retry;
}
- if (err)
+ if (err) {
f2fs_update_inode_page(inode);
- dquot_free_inode(inode);
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+ }
sb_end_intwrite(inode->i_sb);
no_delete:
dquot_drop(inode);
@@ -691,7 +702,8 @@ no_delete:
stat_dec_inline_dir(inode);
stat_dec_inline_inode(inode);
- if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG)))
+ if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG) &&
+ !is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
else
f2fs_inode_synced(inode);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 1f67e389169f..99299ede7429 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/namei.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -19,6 +16,7 @@
#include "f2fs.h"
#include "node.h"
+#include "segment.h"
#include "xattr.h"
#include "acl.h"
#include <trace/events/f2fs.h>
@@ -74,10 +72,6 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (err)
goto fail_drop;
- err = dquot_alloc_inode(inode);
- if (err)
- goto fail_drop;
-
set_inode_flag(inode, FI_NEW_INODE);
/* If the directory encrypted, then we should encrypt the inode. */
@@ -124,6 +118,8 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL)
set_inode_flag(inode, FI_PROJ_INHERIT);
+ f2fs_set_inode_flags(inode);
+
trace_f2fs_new_inode(inode, 0);
return inode;
@@ -184,16 +180,19 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *
hot_count = sbi->raw_super->hot_ext_count;
for (i = 0; i < cold_count + hot_count; i++) {
- if (!is_extension_exist(name, extlist[i]))
- continue;
- if (i < cold_count)
- file_set_cold(inode);
- else
- file_set_hot(inode);
- break;
+ if (is_extension_exist(name, extlist[i]))
+ break;
}
up_read(&sbi->sb_lock);
+
+ if (i == cold_count + hot_count)
+ return;
+
+ if (i < cold_count)
+ file_set_cold(inode);
+ else
+ file_set_hot(inode);
}
int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
@@ -272,6 +271,9 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ return err;
err = dquot_initialize(dir);
if (err)
@@ -318,6 +320,9 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ return err;
err = fscrypt_prepare_link(old_dentry, dir, dentry);
if (err)
@@ -564,6 +569,9 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ return err;
err = fscrypt_prepare_symlink(dir, symname, len, dir->i_sb->s_blocksize,
&disk_link);
@@ -693,6 +701,9 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ return err;
err = dquot_initialize(dir);
if (err)
@@ -823,10 +834,13 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct f2fs_dir_entry *old_entry;
struct f2fs_dir_entry *new_entry;
bool is_old_inline = f2fs_has_inline_dentry(old_dir);
- int err = -ENOENT;
+ int err;
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ return err;
if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
(!projid_eq(F2FS_I(new_dir)->i_projid,
@@ -847,6 +861,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out;
}
+ err = -ENOENT;
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_entry) {
if (IS_ERR(old_page))
@@ -983,6 +998,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
f2fs_sync_fs(sbi->sb, 1);
+
+ f2fs_update_time(sbi, REQ_TIME);
return 0;
put_out_dir:
@@ -1012,10 +1029,13 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
struct f2fs_dir_entry *old_dir_entry = NULL, *new_dir_entry = NULL;
struct f2fs_dir_entry *old_entry, *new_entry;
int old_nlink = 0, new_nlink = 0;
- int err = -ENOENT;
+ int err;
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
+ err = f2fs_is_checkpoint_ready(sbi);
+ if (err)
+ return err;
if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
!projid_eq(F2FS_I(new_dir)->i_projid,
@@ -1033,6 +1053,7 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (err)
goto out;
+ err = -ENOENT;
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_entry) {
if (IS_ERR(old_page))
@@ -1136,6 +1157,8 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
f2fs_sync_fs(sbi->sb, 1);
+
+ f2fs_update_time(sbi, REQ_TIME);
return 0;
out_new_dir:
if (new_dir_entry) {
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index dd2e45a661aa..2b34206486d8 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/node.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -129,6 +126,8 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
/* get current nat block page with lock */
src_page = get_current_nat_page(sbi, nid);
+ if (IS_ERR(src_page))
+ return src_page;
dst_page = f2fs_grab_meta_page(sbi, dst_off);
f2fs_bug_on(sbi, PageDirty(src_page));
@@ -1542,8 +1541,10 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
}
if (__is_valid_data_blkaddr(ni.blk_addr) &&
- !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
+ !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC)) {
+ up_read(&sbi->node_write);
goto redirty_out;
+ }
if (atomic && !test_opt(sbi, NOBARRIER))
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
@@ -1564,8 +1565,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
up_read(&sbi->node_write);
if (wbc->for_reclaim) {
- f2fs_submit_merged_write_cond(sbi, page->mapping->host, 0,
- page->index, NODE);
+ f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
submitted = NULL;
}
@@ -1587,8 +1587,10 @@ redirty_out:
return AOP_WRITEPAGE_ACTIVATE;
}
-void f2fs_move_node_page(struct page *node_page, int gc_type)
+int f2fs_move_node_page(struct page *node_page, int gc_type)
{
+ int err = 0;
+
if (gc_type == FG_GC) {
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
@@ -1600,12 +1602,16 @@ void f2fs_move_node_page(struct page *node_page, int gc_type)
f2fs_wait_on_page_writeback(node_page, NODE, true);
f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
- if (!clear_page_dirty_for_io(node_page))
+ if (!clear_page_dirty_for_io(node_page)) {
+ err = -EAGAIN;
goto out_page;
+ }
if (__write_node_page(node_page, false, NULL,
- &wbc, false, FS_GC_NODE_IO, NULL))
+ &wbc, false, FS_GC_NODE_IO, NULL)) {
+ err = -EAGAIN;
unlock_page(node_page);
+ }
goto release_page;
} else {
/* set page dirty and write it */
@@ -1616,6 +1622,7 @@ out_page:
unlock_page(node_page);
release_page:
f2fs_put_page(node_page, 0);
+ return err;
}
static int f2fs_write_node_page(struct page *page,
@@ -1630,13 +1637,13 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
unsigned int *seq_id)
{
pgoff_t index;
- pgoff_t last_idx = ULONG_MAX;
struct pagevec pvec;
int ret = 0;
struct page *last_page = NULL;
bool marked = false;
nid_t ino = inode->i_ino;
int nr_pages;
+ int nwritten = 0;
if (atomic) {
last_page = last_fsync_dnode(sbi, ino);
@@ -1714,7 +1721,7 @@ continue_unlock:
f2fs_put_page(last_page, 0);
break;
} else if (submitted) {
- last_idx = page->index;
+ nwritten++;
}
if (page == last_page) {
@@ -1740,8 +1747,8 @@ continue_unlock:
goto retry;
}
out:
- if (last_idx != ULONG_MAX)
- f2fs_submit_merged_write_cond(sbi, NULL, ino, last_idx, NODE);
+ if (nwritten)
+ f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
return ret ? -EIO: 0;
}
@@ -2268,15 +2275,19 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
nm_i->nat_block_bitmap)) {
struct page *page = get_current_nat_page(sbi, nid);
- ret = scan_nat_page(sbi, page, nid);
- f2fs_put_page(page, 1);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ } else {
+ ret = scan_nat_page(sbi, page, nid);
+ f2fs_put_page(page, 1);
+ }
if (ret) {
up_read(&nm_i->nat_tree_lock);
f2fs_bug_on(sbi, !mount);
f2fs_msg(sbi->sb, KERN_ERR,
"NAT is corrupt, run fsck to fix it");
- return -EINVAL;
+ return ret;
}
}
@@ -2353,8 +2364,9 @@ retry:
spin_unlock(&nm_i->nid_list_lock);
/* Let's scan nat pages and its caches to get free nids */
- f2fs_build_free_nids(sbi, true, false);
- goto retry;
+ if (!f2fs_build_free_nids(sbi, true, false))
+ goto retry;
+ return false;
}
/*
@@ -2537,7 +2549,7 @@ retry:
if (!PageUptodate(ipage))
SetPageUptodate(ipage);
fill_node_footer(ipage, ino, ino, 0, true);
- set_cold_node(page, false);
+ set_cold_node(ipage, false);
src = F2FS_INODE(page);
dst = F2FS_INODE(ipage);
@@ -2560,6 +2572,13 @@ retry:
F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
i_projid))
dst->i_projid = src->i_projid;
+
+ if (f2fs_sb_has_inode_crtime(sbi->sb) &&
+ F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
+ i_crtime_nsec)) {
+ dst->i_crtime = src->i_crtime;
+ dst->i_crtime_nsec = src->i_crtime_nsec;
+ }
}
new_ni = old_ni;
@@ -2703,7 +2722,7 @@ static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
__clear_bit_le(nat_index, nm_i->full_nat_bits);
}
-static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
+static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
struct nat_entry_set *set, struct cp_control *cpc)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -2727,6 +2746,9 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
down_write(&curseg->journal_rwsem);
} else {
page = get_next_nat_page(sbi, start_nid);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
nat_blk = page_address(page);
f2fs_bug_on(sbi, !nat_blk);
}
@@ -2772,12 +2794,13 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
kmem_cache_free(nat_entry_set_slab, set);
}
+ return 0;
}
/*
* This function is called during the checkpointing process.
*/
-void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -2787,6 +2810,7 @@ void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unsigned int found;
nid_t set_idx = 0;
LIST_HEAD(sets);
+ int err = 0;
/* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
if (enabled_nat_bits(sbi, cpc)) {
@@ -2796,7 +2820,7 @@ void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
}
if (!nm_i->dirty_nat_cnt)
- return;
+ return 0;
down_write(&nm_i->nat_tree_lock);
@@ -2819,11 +2843,16 @@ void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
}
/* flush dirty nats in nat entry set */
- list_for_each_entry_safe(set, tmp, &sets, set_list)
- __flush_nat_entry_set(sbi, set, cpc);
+ list_for_each_entry_safe(set, tmp, &sets, set_list) {
+ err = __flush_nat_entry_set(sbi, set, cpc);
+ if (err)
+ break;
+ }
up_write(&nm_i->nat_tree_lock);
/* Allow dirty nats by node block allocation in write_begin */
+
+ return err;
}
static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
@@ -2850,10 +2879,8 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
struct page *page;
page = f2fs_get_meta_page(sbi, nat_bits_addr++);
- if (IS_ERR(page)) {
- disable_nat_bits(sbi, true);
+ if (IS_ERR(page))
return PTR_ERR(page);
- }
memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
page_address(page), F2FS_BLKSIZE);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 0f4db7a61254..1c73d879a9bc 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/node.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/* start node id of a node block dedicated to the given node id */
#define START_NID(nid) (((nid) / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 95511ed11a22..1dfb17f9f9ff 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/recovery.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -99,8 +96,12 @@ err_out:
return ERR_PTR(err);
}
-static void del_fsync_inode(struct fsync_inode_entry *entry)
+static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
{
+ if (drop) {
+ /* inode should not be recovered, drop it */
+ f2fs_inode_synced(entry->inode);
+ }
iput(entry->inode);
list_del(&entry->list);
kmem_cache_free(fsync_entry_slab, entry);
@@ -194,6 +195,33 @@ out:
return err;
}
+static int recover_quota_data(struct inode *inode, struct page *page)
+{
+ struct f2fs_inode *raw = F2FS_INODE(page);
+ struct iattr attr;
+ uid_t i_uid = le32_to_cpu(raw->i_uid);
+ gid_t i_gid = le32_to_cpu(raw->i_gid);
+ int err;
+
+ memset(&attr, 0, sizeof(attr));
+
+ attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
+ attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
+
+ if (!uid_eq(attr.ia_uid, inode->i_uid))
+ attr.ia_valid |= ATTR_UID;
+ if (!gid_eq(attr.ia_gid, inode->i_gid))
+ attr.ia_valid |= ATTR_GID;
+
+ if (!attr.ia_valid)
+ return 0;
+
+ err = dquot_transfer(inode, &attr);
+ if (err)
+ set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
+ return err;
+}
+
static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
{
if (ri->i_inline & F2FS_PIN_FILE)
@@ -206,12 +234,41 @@ static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
clear_inode_flag(inode, FI_DATA_EXIST);
}
-static void recover_inode(struct inode *inode, struct page *page)
+static int recover_inode(struct inode *inode, struct page *page)
{
struct f2fs_inode *raw = F2FS_INODE(page);
char *name;
+ int err;
inode->i_mode = le16_to_cpu(raw->i_mode);
+
+ err = recover_quota_data(inode, page);
+ if (err)
+ return err;
+
+ i_uid_write(inode, le32_to_cpu(raw->i_uid));
+ i_gid_write(inode, le32_to_cpu(raw->i_gid));
+
+ if (raw->i_inline & F2FS_EXTRA_ATTR) {
+ if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
+ F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
+ i_projid)) {
+ projid_t i_projid;
+ kprojid_t kprojid;
+
+ i_projid = (projid_t)le32_to_cpu(raw->i_projid);
+ kprojid = make_kprojid(&init_user_ns, i_projid);
+
+ if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
+ err = f2fs_transfer_project_quota(inode,
+ kprojid);
+ if (err)
+ return err;
+ F2FS_I(inode)->i_projid = kprojid;
+ }
+ }
+ }
+
f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
@@ -221,9 +278,15 @@ static void recover_inode(struct inode *inode, struct page *page)
inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
F2FS_I(inode)->i_advise = raw->i_advise;
+ F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
+ f2fs_set_inode_flags(inode);
+ F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
+ le16_to_cpu(raw->i_gc_failures);
recover_inline_flags(inode, raw);
+ f2fs_mark_inode_dirty_sync(inode, true);
+
if (file_enc_name(inode))
name = "<encrypted>";
else
@@ -232,6 +295,7 @@ static void recover_inode(struct inode *inode, struct page *page)
f2fs_msg(inode->i_sb, KERN_NOTICE,
"recover_inode: ino = %x, name = %s, inline = %x",
ino_of_node(page), name, raw->i_inline);
+ return 0;
}
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
@@ -320,12 +384,12 @@ next:
return err;
}
-static void destroy_fsync_dnodes(struct list_head *head)
+static void destroy_fsync_dnodes(struct list_head *head, int drop)
{
struct fsync_inode_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, head, list)
- del_fsync_inode(entry);
+ del_fsync_inode(entry, drop);
}
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
@@ -358,6 +422,8 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
}
sum_page = f2fs_get_sum_page(sbi, segno);
+ if (IS_ERR(sum_page))
+ return PTR_ERR(sum_page);
sum_node = (struct f2fs_summary_block *)page_address(sum_page);
sum = sum_node->entries[blkoff];
f2fs_put_page(sum_page, 1);
@@ -560,7 +626,7 @@ out:
}
static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
- struct list_head *dir_list)
+ struct list_head *tmp_inode_list, struct list_head *dir_list)
{
struct curseg_info *curseg;
struct page *page = NULL;
@@ -598,8 +664,11 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
* In this case, we can lose the latest inode(x).
* So, call recover_inode for the inode update.
*/
- if (IS_INODE(page))
- recover_inode(entry->inode, page);
+ if (IS_INODE(page)) {
+ err = recover_inode(entry->inode, page);
+ if (err)
+ break;
+ }
if (entry->last_dentry == blkaddr) {
err = recover_dentry(entry->inode, page, dir_list);
if (err) {
@@ -614,7 +683,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
}
if (entry->blkaddr == blkaddr)
- del_fsync_inode(entry);
+ list_move_tail(&entry->list, tmp_inode_list);
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
@@ -627,7 +696,7 @@ next:
int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
{
- struct list_head inode_list;
+ struct list_head inode_list, tmp_inode_list;
struct list_head dir_list;
int err;
int ret = 0;
@@ -658,6 +727,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
}
INIT_LIST_HEAD(&inode_list);
+ INIT_LIST_HEAD(&tmp_inode_list);
INIT_LIST_HEAD(&dir_list);
/* prevent checkpoint */
@@ -676,11 +746,16 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
need_writecp = true;
/* step #2: recover data */
- err = recover_data(sbi, &inode_list, &dir_list);
+ err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
if (!err)
f2fs_bug_on(sbi, !list_empty(&inode_list));
+ else {
+ /* restore s_flags to let iput() trash data */
+ sbi->sb->s_flags = s_flags;
+ }
skip:
- destroy_fsync_dnodes(&inode_list);
+ destroy_fsync_dnodes(&inode_list, err);
+ destroy_fsync_dnodes(&tmp_inode_list, err);
/* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi),
@@ -689,19 +764,23 @@ skip:
if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
truncate_inode_pages_final(META_MAPPING(sbi));
+ } else {
+ clear_sbi_flag(sbi, SBI_POR_DOING);
}
-
- clear_sbi_flag(sbi, SBI_POR_DOING);
mutex_unlock(&sbi->cp_mutex);
/* let's drop all the directory inodes for clean checkpoint */
- destroy_fsync_dnodes(&dir_list);
+ destroy_fsync_dnodes(&dir_list, err);
- if (!err && need_writecp) {
- struct cp_control cpc = {
- .reason = CP_RECOVERY,
- };
- err = f2fs_write_checkpoint(sbi, &cpc);
+ if (need_writecp) {
+ set_sbi_flag(sbi, SBI_IS_RECOVERED);
+
+ if (!err) {
+ struct cp_control cpc = {
+ .reason = CP_RECOVERY,
+ };
+ err = f2fs_write_checkpoint(sbi, &cpc);
+ }
}
kmem_cache_destroy(fsync_entry_slab);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 30779aaa9dba..6edcf8391dd3 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/segment.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
@@ -179,6 +176,8 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
return false;
if (sbi->gc_mode == GC_URGENT)
return true;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ return true;
return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
@@ -267,8 +266,10 @@ retry:
}
next:
/* we don't need to invalidate this in the sccessful status */
- if (drop || recover)
+ if (drop || recover) {
ClearPageUptodate(page);
+ clear_cold_data(page);
+ }
set_page_private(page, 0);
ClearPagePrivate(page);
f2fs_put_page(page, 1);
@@ -374,7 +375,7 @@ static int __f2fs_commit_inmem_pages(struct inode *inode)
.io_type = FS_DATA_IO,
};
struct list_head revoke_list;
- pgoff_t last_idx = ULONG_MAX;
+ bool submit_bio = false;
int err = 0;
INIT_LIST_HEAD(&revoke_list);
@@ -409,14 +410,14 @@ retry:
}
/* record old blkaddr for revoking */
cur->old_addr = fio.old_blkaddr;
- last_idx = page->index;
+ submit_bio = true;
}
unlock_page(page);
list_move_tail(&cur->list, &revoke_list);
}
- if (last_idx != ULONG_MAX)
- f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
+ if (submit_bio)
+ f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
if (err) {
/*
@@ -483,6 +484,9 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
if (need && excess_cached_nats(sbi))
f2fs_balance_fs_bg(sbi);
+ if (f2fs_is_checkpoint_ready(sbi))
+ return;
+
/*
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
@@ -511,7 +515,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
else
f2fs_build_free_nids(sbi, false, false);
- if (!is_idle(sbi) &&
+ if (!is_idle(sbi, REQ_TIME) &&
(!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi)))
return;
@@ -799,7 +803,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- unsigned short valid_blocks;
+ unsigned short valid_blocks, ckpt_valid_blocks;
if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
return;
@@ -807,8 +811,10 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
mutex_lock(&dirty_i->seglist_lock);
valid_blocks = get_valid_blocks(sbi, segno, false);
+ ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno);
- if (valid_blocks == 0) {
+ if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
+ ckpt_valid_blocks == sbi->blocks_per_seg)) {
__locate_dirty_segment(sbi, segno, PRE);
__remove_dirty_segment(sbi, segno, DIRTY);
} else if (valid_blocks < sbi->blocks_per_seg) {
@@ -821,6 +827,66 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
mutex_unlock(&dirty_i->seglist_lock);
}
+/* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
+void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned int segno;
+
+ mutex_lock(&dirty_i->seglist_lock);
+ for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
+ if (get_valid_blocks(sbi, segno, false))
+ continue;
+ if (IS_CURSEG(sbi, segno))
+ continue;
+ __locate_dirty_segment(sbi, segno, PRE);
+ __remove_dirty_segment(sbi, segno, DIRTY);
+ }
+ mutex_unlock(&dirty_i->seglist_lock);
+}
+
+int f2fs_disable_cp_again(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ block_t ovp = overprovision_segments(sbi) << sbi->log_blocks_per_seg;
+ block_t holes[2] = {0, 0}; /* DATA and NODE */
+ struct seg_entry *se;
+ unsigned int segno;
+
+ mutex_lock(&dirty_i->seglist_lock);
+ for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
+ se = get_seg_entry(sbi, segno);
+ if (IS_NODESEG(se->type))
+ holes[NODE] += sbi->blocks_per_seg - se->valid_blocks;
+ else
+ holes[DATA] += sbi->blocks_per_seg - se->valid_blocks;
+ }
+ mutex_unlock(&dirty_i->seglist_lock);
+
+ if (holes[DATA] > ovp || holes[NODE] > ovp)
+ return -EAGAIN;
+ return 0;
+}
+
+/* This is only used by SBI_CP_DISABLED */
+static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
+{
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ unsigned int segno = 0;
+
+ mutex_lock(&dirty_i->seglist_lock);
+ for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
+ if (get_valid_blocks(sbi, segno, false))
+ continue;
+ if (get_ckpt_valid_blocks(sbi, segno))
+ continue;
+ mutex_unlock(&dirty_i->seglist_lock);
+ return segno;
+ }
+ mutex_unlock(&dirty_i->seglist_lock);
+ return NULL_SEGNO;
+}
+
static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len)
@@ -856,7 +922,8 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len,
- struct rb_node *parent, struct rb_node **p)
+ struct rb_node *parent, struct rb_node **p,
+ bool leftmost)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_cmd *dc;
@@ -864,7 +931,7 @@ static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
rb_link_node(&dc->rb_node, parent, p);
- rb_insert_color(&dc->rb_node, &dcc->root);
+ rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
return dc;
}
@@ -876,7 +943,7 @@ static void __detach_discard_cmd(struct discard_cmd_control *dcc,
atomic_sub(dc->issuing, &dcc->issing_discard);
list_del(&dc->list);
- rb_erase(&dc->rb_node, &dcc->root);
+ rb_erase_cached(&dc->rb_node, &dcc->root);
dcc->undiscard_blks -= dc->len;
kmem_cache_free(discard_cmd_slab, dc);
@@ -905,9 +972,9 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
dc->error = 0;
if (dc->error)
- f2fs_msg(sbi->sb, KERN_INFO,
- "Issue discard(%u, %u, %u) failed, ret: %d",
- dc->lstart, dc->start, dc->len, dc->error);
+ printk_ratelimited(
+ "%sF2FS-fs: Issue discard(%u, %u, %u) failed, ret: %d",
+ KERN_INFO, dc->lstart, dc->start, dc->len, dc->error);
__detach_discard_cmd(dcc, dc);
}
@@ -1113,6 +1180,7 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
struct rb_node **p;
struct rb_node *parent = NULL;
struct discard_cmd *dc = NULL;
+ bool leftmost = true;
if (insert_p && insert_parent) {
parent = insert_parent;
@@ -1120,9 +1188,11 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
goto do_insert;
}
- p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
+ p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
+ lstart, &leftmost);
do_insert:
- dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
+ dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
+ p, leftmost);
if (!dc)
return NULL;
@@ -1190,7 +1260,7 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
NULL, lstart,
(struct rb_entry **)&prev_dc,
(struct rb_entry **)&next_dc,
- &insert_p, &insert_parent, true);
+ &insert_p, &insert_parent, true, NULL);
if (dc)
prev_dc = dc;
@@ -1298,7 +1368,7 @@ static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
NULL, pos,
(struct rb_entry **)&prev_dc,
(struct rb_entry **)&next_dc,
- &insert_p, &insert_parent, true);
+ &insert_p, &insert_parent, true, NULL);
if (!dc)
dc = next_dc;
@@ -1311,7 +1381,7 @@ static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
if (dc->state != D_PREP)
goto next;
- if (dpolicy->io_aware && !is_idle(sbi)) {
+ if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
io_interrupted = true;
break;
}
@@ -1371,7 +1441,7 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, dc->state != D_PREP);
if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
- !is_idle(sbi)) {
+ !is_idle(sbi, DISCARD_TIME)) {
io_interrupted = true;
break;
}
@@ -1600,7 +1670,9 @@ static int issue_discard_thread(void *data)
__wait_all_discard_cmd(sbi, &dpolicy);
wait_ms = dpolicy.min_interval;
} else if (issued == -1){
- wait_ms = dpolicy.mid_interval;
+ wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
+ if (!wait_ms)
+ wait_ms = dpolicy.mid_interval;
} else {
wait_ms = dpolicy.max_interval;
}
@@ -1725,11 +1797,11 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
int i;
- if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
+ if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi))
return false;
if (!force) {
- if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
+ if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
SM_I(sbi)->dcc_info->nr_discards >=
SM_I(sbi)->dcc_info->max_discards)
return false;
@@ -1835,7 +1907,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
dirty_i->nr_dirty[PRE]--;
}
- if (!test_opt(sbi, DISCARD))
+ if (!f2fs_realtime_discard_enable(sbi))
continue;
if (force && start >= cpc->trim_start &&
@@ -1928,7 +2000,7 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
dcc->undiscard_blks = 0;
dcc->next_pos = 0;
- dcc->root = RB_ROOT;
+ dcc->root = RB_ROOT_CACHED;
dcc->rbtree_check = false;
init_waitqueue_head(&dcc->discard_wait_queue);
@@ -2025,12 +2097,12 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
del = 0;
}
- if (f2fs_discard_en(sbi) &&
- !f2fs_test_and_set_bit(offset, se->discard_map))
+ if (!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
/* don't overwrite by SSR to keep node chain */
- if (IS_NODESEG(se->type)) {
+ if (IS_NODESEG(se->type) &&
+ !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
se->ckpt_valid_blocks++;
}
@@ -2052,10 +2124,18 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
f2fs_bug_on(sbi, 1);
se->valid_blocks++;
del = 0;
+ } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ /*
+ * If checkpoints are off, we must not reuse data that
+ * was used in the previous checkpoint. If it was used
+ * before, we must track that to know how much space we
+ * really have.
+ */
+ if (f2fs_test_bit(offset, se->ckpt_valid_map))
+ sbi->unusable_block_count++;
}
- if (f2fs_discard_en(sbi) &&
- f2fs_test_and_clear_bit(offset, se->discard_map))
+ if (f2fs_test_and_clear_bit(offset, se->discard_map))
sbi->discard_blks++;
}
if (!f2fs_test_bit(offset, se->ckpt_valid_map))
@@ -2335,6 +2415,9 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
if (sbi->segs_per_sec != 1)
return CURSEG_I(sbi, type)->segno;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ return 0;
+
if (test_opt(sbi, NOHEAP) &&
(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
return 0;
@@ -2432,6 +2515,7 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type)
__next_free_blkoff(sbi, curseg, 0);
sum_page = f2fs_get_sum_page(sbi, new_segno);
+ f2fs_bug_on(sbi, IS_ERR(sum_page));
sum_node = (struct f2fs_summary_block *)page_address(sum_page);
memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
f2fs_put_page(sum_page, 1);
@@ -2478,6 +2562,15 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
return 1;
}
}
+
+ /* find valid_blocks=0 in dirty list */
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
+ segno = get_free_segment(sbi);
+ if (segno != NULL_SEGNO) {
+ curseg->next_segno = segno;
+ return 1;
+ }
+ }
return 0;
}
@@ -2495,7 +2588,8 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
type == CURSEG_WARM_NODE)
new_curseg(sbi, type, false);
- else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
+ else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type) &&
+ likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
new_curseg(sbi, type, false);
else if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type))
change_curseg(sbi, type);
@@ -2570,7 +2664,7 @@ next:
NULL, start,
(struct rb_entry **)&prev_dc,
(struct rb_entry **)&next_dc,
- &insert_p, &insert_parent, true);
+ &insert_p, &insert_parent, true, NULL);
if (!dc)
dc = next_dc;
@@ -2671,7 +2765,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
* discard option. User configuration looks like using runtime discard
* or periodic fstrim instead of it.
*/
- if (test_opt(sbi, DISCARD))
+ if (f2fs_realtime_discard_enable(sbi))
goto out;
start_block = START_BLOCK(sbi, start_segno);
@@ -3020,6 +3114,7 @@ void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
ClearPageError(page);
f2fs_submit_page_write(&fio);
+ stat_inc_meta_count(sbi, page->index);
f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
}
@@ -3182,8 +3277,7 @@ void f2fs_wait_on_page_writeback(struct page *page,
if (PageWriteback(page)) {
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
- f2fs_submit_merged_write_cond(sbi, page->mapping->host,
- 0, page->index, type);
+ f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
if (ordered)
wait_on_page_writeback(page);
else
@@ -3191,10 +3285,14 @@ void f2fs_wait_on_page_writeback(struct page *page,
}
}
-void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
+void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *cpage;
+ if (!f2fs_post_read_required(inode))
+ return;
+
if (!is_valid_data_blkaddr(sbi, blkaddr))
return;
@@ -3205,6 +3303,15 @@ void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
}
}
+void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
+ block_t len)
+{
+ block_t i;
+
+ for (i = 0; i < len; i++)
+ f2fs_wait_on_block_writeback(inode, blkaddr + i);
+}
+
static int read_compacted_summaries(struct f2fs_sb_info *sbi)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -3762,13 +3869,11 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
return -ENOMEM;
#endif
- if (f2fs_discard_en(sbi)) {
- sit_i->sentries[start].discard_map
- = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
- GFP_KERNEL);
- if (!sit_i->sentries[start].discard_map)
- return -ENOMEM;
- }
+ sit_i->sentries[start].discard_map
+ = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
+ GFP_KERNEL);
+ if (!sit_i->sentries[start].discard_map)
+ return -ENOMEM;
}
sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
@@ -3904,6 +4009,8 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
se = &sit_i->sentries[start];
page = get_current_sit_page(sbi, start);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
sit_blk = (struct f2fs_sit_block *)page_address(page);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
f2fs_put_page(page, 1);
@@ -3916,18 +4023,16 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
total_node_blocks += se->valid_blocks;
/* build discard map only one time */
- if (f2fs_discard_en(sbi)) {
- if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
- memset(se->discard_map, 0xff,
- SIT_VBLOCK_MAP_SIZE);
- } else {
- memcpy(se->discard_map,
- se->cur_valid_map,
- SIT_VBLOCK_MAP_SIZE);
- sbi->discard_blks +=
- sbi->blocks_per_seg -
- se->valid_blocks;
- }
+ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+ memset(se->discard_map, 0xff,
+ SIT_VBLOCK_MAP_SIZE);
+ } else {
+ memcpy(se->discard_map,
+ se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks +=
+ sbi->blocks_per_seg -
+ se->valid_blocks;
}
if (sbi->segs_per_sec > 1)
@@ -3965,16 +4070,13 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
if (IS_NODESEG(se->type))
total_node_blocks += se->valid_blocks;
- if (f2fs_discard_en(sbi)) {
- if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
- memset(se->discard_map, 0xff,
- SIT_VBLOCK_MAP_SIZE);
- } else {
- memcpy(se->discard_map, se->cur_valid_map,
- SIT_VBLOCK_MAP_SIZE);
- sbi->discard_blks += old_valid_blocks;
- sbi->discard_blks -= se->valid_blocks;
- }
+ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+ memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
+ } else {
+ memcpy(se->discard_map, se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks += old_valid_blocks;
+ sbi->discard_blks -= se->valid_blocks;
}
if (sbi->segs_per_sec > 1) {
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index b3d9e317ff0c..ab3465faddf1 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/segment.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
@@ -342,6 +339,12 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
return get_seg_entry(sbi, segno)->valid_blocks;
}
+static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
+ unsigned int segno)
+{
+ return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+}
+
static inline void seg_info_from_raw_sit(struct seg_entry *se,
struct f2fs_sit_entry *rs)
{
@@ -579,6 +582,15 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
reserved_sections(sbi) + needed);
}
+static inline int f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
+{
+ if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ return 0;
+ if (likely(!has_not_enough_free_secs(sbi, 0, 0)))
+ return 0;
+ return -ENOSPC;
+}
+
static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
{
return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index 36cfd816c160..9e13db994fdf 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f2fs shrinker support
* the basic infra was copied from fs/ubifs/shrinker.c
*
* Copyright (c) 2015 Motorola Mobility
* Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 896b885f504e..af58b2cc21b8 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/super.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -53,9 +50,10 @@ char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_DIR_DEPTH] = "too big dir depth",
[FAULT_EVICT_INODE] = "evict_inode fail",
[FAULT_TRUNCATE] = "truncate fail",
- [FAULT_IO] = "IO error",
+ [FAULT_READ_IO] = "read IO error",
[FAULT_CHECKPOINT] = "checkpoint error",
[FAULT_DISCARD] = "discard error",
+ [FAULT_WRITE_IO] = "write IO error",
};
void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
@@ -138,6 +136,7 @@ enum {
Opt_alloc,
Opt_fsync,
Opt_test_dummy_encryption,
+ Opt_checkpoint,
Opt_err,
};
@@ -196,6 +195,7 @@ static match_table_t f2fs_tokens = {
{Opt_alloc, "alloc_mode=%s"},
{Opt_fsync, "fsync_mode=%s"},
{Opt_test_dummy_encryption, "test_dummy_encryption"},
+ {Opt_checkpoint, "checkpoint=%s"},
{Opt_err, NULL},
};
@@ -207,7 +207,7 @@ void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
+ printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
va_end(args);
}
@@ -360,7 +360,6 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
static int parse_options(struct super_block *sb, char *options)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
- struct request_queue *q;
substring_t args[MAX_OPT_ARGS];
char *p, *name;
int arg = 0;
@@ -415,14 +414,7 @@ static int parse_options(struct super_block *sb, char *options)
return -EINVAL;
break;
case Opt_discard:
- q = bdev_get_queue(sb->s_bdev);
- if (blk_queue_discard(q)) {
- set_opt(sbi, DISCARD);
- } else if (!f2fs_sb_has_blkzoned(sb)) {
- f2fs_msg(sb, KERN_WARNING,
- "mounting with \"discard\" option, but "
- "the device does not support discard");
- }
+ set_opt(sbi, DISCARD);
break;
case Opt_nodiscard:
if (f2fs_sb_has_blkzoned(sb)) {
@@ -602,28 +594,31 @@ static int parse_options(struct super_block *sb, char *options)
}
F2FS_OPTION(sbi).write_io_size_bits = arg;
break;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
case Opt_fault_injection:
if (args->from && match_int(args, &arg))
return -EINVAL;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
set_opt(sbi, FAULT_INJECTION);
-#else
- f2fs_msg(sb, KERN_INFO,
- "FAULT_INJECTION was not selected");
-#endif
break;
+
case Opt_fault_type:
if (args->from && match_int(args, &arg))
return -EINVAL;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
f2fs_build_fault_attr(sbi, 0, arg);
set_opt(sbi, FAULT_INJECTION);
+ break;
#else
+ case Opt_fault_injection:
f2fs_msg(sb, KERN_INFO,
- "FAULT_INJECTION was not selected");
-#endif
+ "fault_injection options not supported");
+ break;
+
+ case Opt_fault_type:
+ f2fs_msg(sb, KERN_INFO,
+ "fault_type options not supported");
break;
+#endif
case Opt_lazytime:
sb->s_flags |= SB_LAZYTIME;
break;
@@ -776,6 +771,23 @@ static int parse_options(struct super_block *sb, char *options)
"Test dummy encryption mount option ignored");
#endif
break;
+ case Opt_checkpoint:
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+
+ if (strlen(name) == 6 &&
+ !strncmp(name, "enable", 6)) {
+ clear_opt(sbi, DISABLE_CHECKPOINT);
+ } else if (strlen(name) == 7 &&
+ !strncmp(name, "disable", 7)) {
+ set_opt(sbi, DISABLE_CHECKPOINT);
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
default:
f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value",
@@ -834,6 +846,12 @@ static int parse_options(struct super_block *sb, char *options)
}
}
+ if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) {
+ f2fs_msg(sb, KERN_ERR,
+ "LFS not compatible with checkpoint=disable\n");
+ return -EINVAL;
+ }
+
/* Not pass down write hints if the number of active logs is lesser
* than NR_CURSEG_TYPE.
*/
@@ -1021,8 +1039,8 @@ static void f2fs_put_super(struct super_block *sb)
* But, the previous checkpoint was not done by umount, it needs to do
* clean checkpoint again.
*/
- if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
- !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+ if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
@@ -1032,7 +1050,8 @@ static void f2fs_put_super(struct super_block *sb)
/* be sure to wait for any on-going discard commands */
dropped = f2fs_wait_discard_bios(sbi);
- if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) {
+ if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
+ !sbi->discard_blks && !dropped) {
struct cp_control cpc = {
.reason = CP_UMOUNT | CP_TRIMMED,
};
@@ -1093,6 +1112,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
if (unlikely(f2fs_cp_error(sbi)))
return 0;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+ return 0;
trace_f2fs_sync_fs(sb, sync);
@@ -1192,6 +1213,11 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_blocks = total_count - start_count;
buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
sbi->current_reserved_blocks;
+ if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
+ buf->f_bfree = 0;
+ else
+ buf->f_bfree -= sbi->unusable_block_count;
+
if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
buf->f_bavail = buf->f_bfree -
F2FS_OPTION(sbi).root_reserved_blocks;
@@ -1336,7 +1362,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
from_kgid_munged(&init_user_ns,
F2FS_OPTION(sbi).s_resgid));
if (F2FS_IO_SIZE_BITS(sbi))
- seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
+ seq_printf(seq, ",io_bits=%u",
+ F2FS_OPTION(sbi).write_io_size_bits);
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (test_opt(sbi, FAULT_INJECTION)) {
seq_printf(seq, ",fault_injection=%u",
@@ -1370,6 +1397,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
seq_printf(seq, ",alloc_mode=%s", "reuse");
+ if (test_opt(sbi, DISABLE_CHECKPOINT))
+ seq_puts(seq, ",checkpoint=disable");
+
if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
seq_printf(seq, ",fsync_mode=%s", "posix");
else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
@@ -1397,10 +1427,10 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE);
set_opt(sbi, NOHEAP);
+ clear_opt(sbi, DISABLE_CHECKPOINT);
sbi->sb->s_flags |= SB_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
- if (blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev)))
- set_opt(sbi, DISCARD);
+ set_opt(sbi, DISCARD);
if (f2fs_sb_has_blkzoned(sbi->sb))
set_opt_mode(sbi, F2FS_MOUNT_LFS);
else
@@ -1419,6 +1449,57 @@ static void default_options(struct f2fs_sb_info *sbi)
#ifdef CONFIG_QUOTA
static int f2fs_enable_quotas(struct super_block *sb);
#endif
+
+static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
+{
+ struct cp_control cpc;
+ int err;
+
+ sbi->sb->s_flags |= SB_ACTIVE;
+
+ mutex_lock(&sbi->gc_mutex);
+ f2fs_update_time(sbi, DISABLE_TIME);
+
+ while (!f2fs_time_over(sbi, DISABLE_TIME)) {
+ err = f2fs_gc(sbi, true, false, NULL_SEGNO);
+ if (err == -ENODATA)
+ break;
+ if (err && err != -EAGAIN) {
+ mutex_unlock(&sbi->gc_mutex);
+ return err;
+ }
+ }
+ mutex_unlock(&sbi->gc_mutex);
+
+ err = sync_filesystem(sbi->sb);
+ if (err)
+ return err;
+
+ if (f2fs_disable_cp_again(sbi))
+ return -EAGAIN;
+
+ mutex_lock(&sbi->gc_mutex);
+ cpc.reason = CP_PAUSE;
+ set_sbi_flag(sbi, SBI_CP_DISABLED);
+ f2fs_write_checkpoint(sbi, &cpc);
+
+ sbi->unusable_block_count = 0;
+ mutex_unlock(&sbi->gc_mutex);
+ return 0;
+}
+
+static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
+{
+ mutex_lock(&sbi->gc_mutex);
+ f2fs_dirty_to_prefree(sbi);
+
+ clear_sbi_flag(sbi, SBI_CP_DISABLED);
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
+ mutex_unlock(&sbi->gc_mutex);
+
+ f2fs_sync_fs(sbi->sb, 1);
+}
+
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -1428,6 +1509,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
bool need_restart_gc = false;
bool need_stop_gc = false;
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
+ bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
+ bool checkpoint_changed;
#ifdef CONFIG_QUOTA
int i, j;
#endif
@@ -1472,6 +1555,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
err = parse_options(sb, data);
if (err)
goto restore_opts;
+ checkpoint_changed =
+ disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
/*
* Previous and new state of filesystem is RO,
@@ -1485,7 +1570,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
err = dquot_suspend(sb, -1);
if (err < 0)
goto restore_opts;
- } else if (f2fs_readonly(sb) && !(*flags & MS_RDONLY)) {
+ } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
/* dquot_resume needs RW */
sb->s_flags &= ~SB_RDONLY;
if (sb_any_quota_suspended(sb)) {
@@ -1505,6 +1590,13 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
+ if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
+ err = -EINVAL;
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "disabling checkpoint not compatible with read-only");
+ goto restore_opts;
+ }
+
/*
* We stop the GC thread if FS is mounted as RO
* or if background_gc = off is passed in mount
@@ -1533,6 +1625,16 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
clear_sbi_flag(sbi, SBI_IS_CLOSE);
}
+ if (checkpoint_changed) {
+ if (test_opt(sbi, DISABLE_CHECKPOINT)) {
+ err = f2fs_disable_checkpoint(sbi);
+ if (err)
+ goto restore_gc;
+ } else {
+ f2fs_enable_checkpoint(sbi);
+ }
+ }
+
/*
* We stop issue flush thread if FS is mounted as RO
* or if flush_merge is not passed in mount option.
@@ -1556,6 +1658,7 @@ skip:
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
limit_reserve_root(sbi);
+ *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
return 0;
restore_gc:
if (need_restart_gc) {
@@ -1608,6 +1711,7 @@ repeat:
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto repeat;
}
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
return PTR_ERR(page);
}
@@ -1619,6 +1723,7 @@ repeat:
}
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
return -EIO;
}
@@ -1660,6 +1765,7 @@ retry:
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
break;
}
@@ -1696,6 +1802,12 @@ static qsize_t *f2fs_get_reserved_space(struct inode *inode)
static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
{
+ if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "quota sysfile may be corrupted, skip loading it");
+ return 0;
+ }
+
return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
F2FS_OPTION(sbi).s_jquota_fmt, type);
}
@@ -1766,7 +1878,14 @@ static int f2fs_enable_quotas(struct super_block *sb)
test_opt(F2FS_SB(sb), PRJQUOTA),
};
- sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
+ if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
+ f2fs_msg(sb, KERN_ERR,
+ "quota file may be corrupted, skip loading it");
+ return 0;
+ }
+
+ sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
+
for (type = 0; type < MAXQUOTAS; type++) {
qf_inum = f2fs_qf_ino(sb, type);
if (qf_inum) {
@@ -1780,6 +1899,8 @@ static int f2fs_enable_quotas(struct super_block *sb)
"fsck to fix.", type, err);
for (type--; type >= 0; type--)
dquot_quota_off(sb, type);
+ set_sbi_flag(F2FS_SB(sb),
+ SBI_QUOTA_NEED_REPAIR);
return err;
}
}
@@ -1787,35 +1908,51 @@ static int f2fs_enable_quotas(struct super_block *sb)
return 0;
}
-static int f2fs_quota_sync(struct super_block *sb, int type)
+int f2fs_quota_sync(struct super_block *sb, int type)
{
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct quota_info *dqopt = sb_dqopt(sb);
int cnt;
int ret;
ret = dquot_writeback_dquots(sb, type);
if (ret)
- return ret;
+ goto out;
/*
* Now when everything is written we can discard the pagecache so
* that userspace sees the changes.
*/
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ struct address_space *mapping;
+
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
- ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
+ mapping = dqopt->files[cnt]->i_mapping;
+
+ ret = filemap_fdatawrite(mapping);
+ if (ret)
+ goto out;
+
+ /* if we are using journalled quota */
+ if (is_journalled_quota(sbi))
+ continue;
+
+ ret = filemap_fdatawait(mapping);
if (ret)
- return ret;
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
inode_lock(dqopt->files[cnt]);
truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
inode_unlock(dqopt->files[cnt]);
}
- return 0;
+out:
+ if (ret)
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
+ return ret;
}
static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
@@ -1836,8 +1973,7 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
inode_lock(inode);
F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
- inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
- S_NOATIME | S_IMMUTABLE);
+ f2fs_set_inode_flags(inode);
inode_unlock(inode);
f2fs_mark_inode_dirty_sync(inode, false);
@@ -1852,7 +1988,9 @@ static int f2fs_quota_off(struct super_block *sb, int type)
if (!inode || !igrab(inode))
return dquot_quota_off(sb, type);
- f2fs_quota_sync(sb, type);
+ err = f2fs_quota_sync(sb, type);
+ if (err)
+ goto out_put;
err = dquot_quota_off(sb, type);
if (err || f2fs_sb_has_quota_ino(sb))
@@ -1860,7 +1998,7 @@ static int f2fs_quota_off(struct super_block *sb, int type)
inode_lock(inode);
F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
- inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
+ f2fs_set_inode_flags(inode);
inode_unlock(inode);
f2fs_mark_inode_dirty_sync(inode, false);
out_put:
@@ -1871,9 +2009,88 @@ out_put:
void f2fs_quota_off_umount(struct super_block *sb)
{
int type;
+ int err;
+
+ for (type = 0; type < MAXQUOTAS; type++) {
+ err = f2fs_quota_off(sb, type);
+ if (err) {
+ int ret = dquot_quota_off(sb, type);
+
+ f2fs_msg(sb, KERN_ERR,
+ "Fail to turn off disk quota "
+ "(type: %d, err: %d, ret:%d), Please "
+ "run fsck to fix it.", type, err, ret);
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
+ }
+ }
+}
+
+static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int type;
+
+ for (type = 0; type < MAXQUOTAS; type++) {
+ if (!dqopt->files[type])
+ continue;
+ f2fs_inode_synced(dqopt->files[type]);
+ }
+}
+
+static int f2fs_dquot_commit(struct dquot *dquot)
+{
+ int ret;
+
+ ret = dquot_commit(dquot);
+ if (ret < 0)
+ set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
+ return ret;
+}
+
+static int f2fs_dquot_acquire(struct dquot *dquot)
+{
+ int ret;
+
+ ret = dquot_acquire(dquot);
+ if (ret < 0)
+ set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
+
+ return ret;
+}
+
+static int f2fs_dquot_release(struct dquot *dquot)
+{
+ int ret;
+
+ ret = dquot_release(dquot);
+ if (ret < 0)
+ set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
+ return ret;
+}
+
+static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
+{
+ struct super_block *sb = dquot->dq_sb;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int ret;
+
+ ret = dquot_mark_dquot_dirty(dquot);
+
+ /* if we are using journalled quota */
+ if (is_journalled_quota(sbi))
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
+
+ return ret;
+}
- for (type = 0; type < MAXQUOTAS; type++)
- f2fs_quota_off(sb, type);
+static int f2fs_dquot_commit_info(struct super_block *sb, int type)
+{
+ int ret;
+
+ ret = dquot_commit_info(sb, type);
+ if (ret < 0)
+ set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
+ return ret;
}
static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
@@ -1884,11 +2101,11 @@ static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
static const struct dquot_operations f2fs_quota_operations = {
.get_reserved_space = f2fs_get_reserved_space,
- .write_dquot = dquot_commit,
- .acquire_dquot = dquot_acquire,
- .release_dquot = dquot_release,
- .mark_dirty = dquot_mark_dquot_dirty,
- .write_info = dquot_commit_info,
+ .write_dquot = f2fs_dquot_commit,
+ .acquire_dquot = f2fs_dquot_acquire,
+ .release_dquot = f2fs_dquot_release,
+ .mark_dirty = f2fs_dquot_mark_dquot_dirty,
+ .write_info = f2fs_dquot_commit_info,
.alloc_dquot = dquot_alloc,
.destroy_dquot = dquot_destroy,
.get_projid = f2fs_get_projid,
@@ -1906,6 +2123,11 @@ static const struct quotactl_ops f2fs_quotactl_ops = {
.get_nextdqblk = dquot_get_next_dqblk,
};
#else
+int f2fs_quota_sync(struct super_block *sb, int type)
+{
+ return 0;
+}
+
void f2fs_quota_off_umount(struct super_block *sb)
{
}
@@ -2170,6 +2392,26 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
(bh->b_data + F2FS_SUPER_OFFSET);
struct super_block *sb = sbi->sb;
unsigned int blocksize;
+ size_t crc_offset = 0;
+ __u32 crc = 0;
+
+ /* Check checksum_offset and crc in superblock */
+ if (le32_to_cpu(raw_super->feature) & F2FS_FEATURE_SB_CHKSUM) {
+ crc_offset = le32_to_cpu(raw_super->checksum_offset);
+ if (crc_offset !=
+ offsetof(struct f2fs_super_block, crc)) {
+ f2fs_msg(sb, KERN_INFO,
+ "Invalid SB checksum offset: %zu",
+ crc_offset);
+ return 1;
+ }
+ crc = le32_to_cpu(raw_super->crc);
+ if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
+ f2fs_msg(sb, KERN_INFO,
+ "Invalid SB checksum value: %u", crc);
+ return 1;
+ }
+ }
if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
f2fs_msg(sb, KERN_INFO,
@@ -2320,7 +2562,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
unsigned int segment_count_main;
unsigned int cp_pack_start_sum, cp_payload;
block_t user_block_count;
- int i;
+ int i, j;
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -2361,11 +2603,43 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
return 1;
+ for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
+ le32_to_cpu(ckpt->cur_node_segno[j])) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Node segment (%u, %u) has the same "
+ "segno: %u", i, j,
+ le32_to_cpu(ckpt->cur_node_segno[i]));
+ return 1;
+ }
+ }
}
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
return 1;
+ for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
+ if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
+ le32_to_cpu(ckpt->cur_data_segno[j])) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Data segment (%u, %u) has the same "
+ "segno: %u", i, j,
+ le32_to_cpu(ckpt->cur_data_segno[i]));
+ return 1;
+ }
+ }
+ }
+ for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
+ for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
+ le32_to_cpu(ckpt->cur_data_segno[j])) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Data segment (%u) and Data segment (%u)"
+ " has the same segno: %u", i, j,
+ le32_to_cpu(ckpt->cur_node_segno[i]));
+ return 1;
+ }
+ }
}
sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
@@ -2423,6 +2697,9 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->dir_level = DEF_DIR_LEVEL;
sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
+ sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
+ sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
+ sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
clear_sbi_flag(sbi, SBI_NEED_FSCK);
for (i = 0; i < NR_COUNT_TYPE; i++)
@@ -2453,8 +2730,12 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
if (err)
return err;
- return percpu_counter_init(&sbi->total_valid_inode_count, 0,
+ err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
GFP_KERNEL);
+ if (err)
+ percpu_counter_destroy(&sbi->alloc_valid_block_count);
+
+ return err;
}
#ifdef CONFIG_BLK_DEV_ZONED
@@ -2589,6 +2870,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
{
struct buffer_head *bh;
+ __u32 crc = 0;
int err;
if ((recover && f2fs_readonly(sbi->sb)) ||
@@ -2597,6 +2879,13 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
return -EROFS;
}
+ /* we should update superblock crc here */
+ if (!recover && f2fs_sb_has_sb_chksum(sbi->sb)) {
+ crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
+ offsetof(struct f2fs_super_block, crc));
+ F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
+ }
+
/* write back-up superblock first */
bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
if (!bh)
@@ -2866,7 +3155,7 @@ try_onemore:
GFP_KERNEL);
if (!sbi->write_io[i]) {
err = -ENOMEM;
- goto free_options;
+ goto free_bio_info;
}
for (j = HOT; j < n; j++) {
@@ -2909,6 +3198,9 @@ try_onemore:
goto free_meta_inode;
}
+ if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
+ set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+
/* Initialize device list */
err = f2fs_scan_devices(sbi);
if (err) {
@@ -3007,11 +3299,9 @@ try_onemore:
/* Enable quota usage during mount */
if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
err = f2fs_enable_quotas(sb);
- if (err) {
+ if (err)
f2fs_msg(sb, KERN_ERR,
"Cannot turn on quotas: error %d", err);
- goto free_sysfs;
- }
}
#endif
/* if there are nt orphan nodes free them */
@@ -3019,6 +3309,9 @@ try_onemore:
if (err)
goto free_meta;
+ if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
+ goto skip_recovery;
+
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
/*
@@ -3058,6 +3351,14 @@ skip_recovery:
/* f2fs_recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING);
+ if (test_opt(sbi, DISABLE_CHECKPOINT)) {
+ err = f2fs_disable_checkpoint(sbi);
+ if (err)
+ goto free_meta;
+ } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
+ f2fs_enable_checkpoint(sbi);
+ }
+
/*
* If filesystem is not mounted as read-only then
* do start the gc_thread.
@@ -3090,10 +3391,10 @@ skip_recovery:
free_meta:
#ifdef CONFIG_QUOTA
+ f2fs_truncate_quota_inode_pages(sb);
if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
f2fs_quota_off_umount(sbi->sb);
#endif
- f2fs_sync_inode_meta(sbi);
/*
* Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
* failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
@@ -3101,9 +3402,6 @@ free_meta:
* falls into an infinite loop in f2fs_sync_meta_pages().
*/
truncate_inode_pages_final(META_MAPPING(sbi));
-#ifdef CONFIG_QUOTA
-free_sysfs:
-#endif
f2fs_unregister_sysfs(sbi);
free_root_inode:
dput(sb->s_root);
@@ -3175,6 +3473,9 @@ static void kill_f2fs_super(struct super_block *sb)
};
f2fs_write_checkpoint(sbi, &cpc);
}
+
+ if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
+ sb->s_flags &= ~SB_RDONLY;
}
kill_block_super(sb);
}
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 81c0e5337443..b777cbdd796b 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f2fs sysfs interface
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
* Copyright (c) 2017 Chao Yu <chao@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/compiler.h>
#include <linux/proc_fs.h>
@@ -120,6 +117,9 @@ static ssize_t features_show(struct f2fs_attr *a,
if (f2fs_sb_has_lost_found(sb))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "lost_found");
+ if (f2fs_sb_has_sb_chksum(sb))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "sb_checksum");
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -337,6 +337,7 @@ enum feat_id {
FEAT_QUOTA_INO,
FEAT_INODE_CRTIME,
FEAT_LOST_FOUND,
+ FEAT_SB_CHECKSUM,
};
static ssize_t f2fs_feature_show(struct f2fs_attr *a,
@@ -353,6 +354,7 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
case FEAT_QUOTA_INO:
case FEAT_INODE_CRTIME:
case FEAT_LOST_FOUND:
+ case FEAT_SB_CHECKSUM:
return snprintf(buf, PAGE_SIZE, "supported\n");
}
return 0;
@@ -407,6 +409,9 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, discard_idle_interval,
+ interval_time[DISCARD_TIME]);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle_interval, interval_time[GC_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
@@ -434,6 +439,7 @@ F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR);
F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO);
F2FS_FEATURE_RO_ATTR(inode_crtime, FEAT_INODE_CRTIME);
F2FS_FEATURE_RO_ATTR(lost_found, FEAT_LOST_FOUND);
+F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -460,6 +466,8 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(dirty_nats_ratio),
ATTR_LIST(cp_interval),
ATTR_LIST(idle_interval),
+ ATTR_LIST(discard_idle_interval),
+ ATTR_LIST(gc_idle_interval),
ATTR_LIST(iostat_enable),
ATTR_LIST(readdir_ra),
ATTR_LIST(gc_pin_file_thresh),
@@ -491,6 +499,7 @@ static struct attribute *f2fs_feat_attrs[] = {
ATTR_LIST(quota_ino),
ATTR_LIST(inode_crtime),
ATTR_LIST(lost_found),
+ ATTR_LIST(sb_checksum),
NULL,
};
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
index a1fcd00bbb2b..ce2a5eb210b6 100644
--- a/fs/f2fs/trace.c
+++ b/fs/f2fs/trace.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f2fs IO tracer
*
* Copyright (c) 2014 Motorola Mobility
* Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h
index 67db24ac1e85..e8075fc5b228 100644
--- a/fs/f2fs/trace.h
+++ b/fs/f2fs/trace.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f2fs IO tracer
*
* Copyright (c) 2014 Motorola Mobility
* Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __F2FS_TRACE_H__
#define __F2FS_TRACE_H__
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 77a010e625f5..7261245c208d 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/xattr.c
*
@@ -13,10 +14,6 @@
* suggestion of Luka Renko <luka.renko@hermes.si>.
* xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
* Red Hat Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/rwsem.h>
#include <linux/f2fs_fs.h>
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index dbcd1d16e669..67db134da0f5 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/xattr.h
*
@@ -9,10 +10,6 @@
* On-disk format of extended attributes for the ext2 filesystem.
*
* (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __F2FS_XATTR_H__
#define __F2FS_XATTR_H__
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index defc2168de91..f58c0cacc531 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -682,6 +682,7 @@ int fat_count_free_clusters(struct super_block *sb)
if (ops->ent_get(&fatent) == FAT_ENT_FREE)
free++;
} while (fat_ent_next(sbi, &fatent));
+ cond_resched();
}
sbi->free_clusters = free;
sbi->free_clus_valid = 1;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 4137d96534a6..083185174c6d 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -735,7 +735,7 @@ static void send_sigio_to_task(struct task_struct *p,
return;
switch (signum) {
- siginfo_t si;
+ kernel_siginfo_t si;
default:
/* Queue a rt signal with the appropriate fd as its
value. We use SI_SIGIO as the source, not
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 83bfe04456b6..c550512ce335 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -70,20 +70,7 @@ void fscache_free_cookie(struct fscache_cookie *cookie)
}
/*
- * initialise an cookie jar slab element prior to any use
- */
-void fscache_cookie_init_once(void *_cookie)
-{
- struct fscache_cookie *cookie = _cookie;
-
- memset(cookie, 0, sizeof(*cookie));
- spin_lock_init(&cookie->lock);
- spin_lock_init(&cookie->stores_lock);
- INIT_HLIST_HEAD(&cookie->backing_objects);
-}
-
-/*
- * Set the index key in a cookie. The cookie struct has space for a 12-byte
+ * Set the index key in a cookie. The cookie struct has space for a 16-byte
* key plus length and hash, but if that's not big enough, it's instead a
* pointer to a buffer containing 3 bytes of hash, 1 byte of length and then
* the key data.
@@ -93,20 +80,18 @@ static int fscache_set_key(struct fscache_cookie *cookie,
{
unsigned long long h;
u32 *buf;
+ int bufs;
int i;
- cookie->key_len = index_key_len;
+ bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
if (index_key_len > sizeof(cookie->inline_key)) {
- buf = kzalloc(index_key_len, GFP_KERNEL);
+ buf = kcalloc(bufs, sizeof(*buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
cookie->key = buf;
} else {
buf = (u32 *)cookie->inline_key;
- buf[0] = 0;
- buf[1] = 0;
- buf[2] = 0;
}
memcpy(buf, index_key, index_key_len);
@@ -116,7 +101,8 @@ static int fscache_set_key(struct fscache_cookie *cookie,
*/
h = (unsigned long)cookie->parent;
h += index_key_len + cookie->type;
- for (i = 0; i < (index_key_len + sizeof(u32) - 1) / sizeof(u32); i++)
+
+ for (i = 0; i < bufs; i++)
h += buf[i];
cookie->key_hash = h ^ (h >> 32);
@@ -161,7 +147,7 @@ struct fscache_cookie *fscache_alloc_cookie(
struct fscache_cookie *cookie;
/* allocate and initialise a cookie */
- cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
+ cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
if (!cookie)
return NULL;
@@ -192,6 +178,9 @@ struct fscache_cookie *fscache_alloc_cookie(
cookie->netfs_data = netfs_data;
cookie->flags = (1 << FSCACHE_COOKIE_NO_DATA_YET);
cookie->type = def->type;
+ spin_lock_init(&cookie->lock);
+ spin_lock_init(&cookie->stores_lock);
+ INIT_HLIST_HEAD(&cookie->backing_objects);
/* radix tree insertion won't use the preallocation pool unless it's
* told it may not wait */
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index f83328a7f048..d6209022e965 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -51,7 +51,6 @@ extern struct fscache_cache *fscache_select_cache_for_object(
extern struct kmem_cache *fscache_cookie_jar;
extern void fscache_free_cookie(struct fscache_cookie *);
-extern void fscache_cookie_init_once(void *);
extern struct fscache_cookie *fscache_alloc_cookie(struct fscache_cookie *,
const struct fscache_cookie_def *,
const void *, size_t,
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 7dce110bf17d..30ad89db1efc 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -143,9 +143,7 @@ static int __init fscache_init(void)
fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar",
sizeof(struct fscache_cookie),
- 0,
- 0,
- fscache_cookie_init_once);
+ 0, 0, NULL);
if (!fscache_cookie_jar) {
pr_notice("Failed to allocate a cookie jar\n");
ret = -ENOMEM;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 03128ed1f34e..a683d9b27d76 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1057,7 +1057,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
}
}
release_metapath(&mp);
- if (gfs2_is_jdata(ip))
+ if (!gfs2_is_stuffed(ip) && gfs2_is_jdata(ip))
iomap->page_done = gfs2_iomap_journaled_page_done;
return 0;
@@ -1566,7 +1566,7 @@ more_rgrps:
continue;
}
if (bstart) {
- __gfs2_free_blocks(ip, bstart, (u32)blen, meta);
+ __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
(*btotal) += blen;
gfs2_add_inode_blocks(&ip->i_inode, -blen);
}
@@ -1574,7 +1574,7 @@ more_rgrps:
blen = 1;
}
if (bstart) {
- __gfs2_free_blocks(ip, bstart, (u32)blen, meta);
+ __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
(*btotal) += blen;
gfs2_add_inode_blocks(&ip->i_inode, -blen);
}
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index e37002560c11..daa14ab4e31b 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -506,7 +506,8 @@ static int gfs2_dirent_gather(const struct gfs2_dirent *dent,
* For now the most important thing is to check that the various sizes
* are correct.
*/
-static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset,
+static int gfs2_check_dirent(struct gfs2_sbd *sdp,
+ struct gfs2_dirent *dent, unsigned int offset,
unsigned int size, unsigned int len, int first)
{
const char *msg = "gfs2_dirent too small";
@@ -528,12 +529,12 @@ static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset,
goto error;
return 0;
error:
- pr_warn("%s: %s (%s)\n",
+ fs_warn(sdp, "%s: %s (%s)\n",
__func__, msg, first ? "first in block" : "not first in block");
return -EIO;
}
-static int gfs2_dirent_offset(const void *buf)
+static int gfs2_dirent_offset(struct gfs2_sbd *sdp, const void *buf)
{
const struct gfs2_meta_header *h = buf;
int offset;
@@ -552,7 +553,8 @@ static int gfs2_dirent_offset(const void *buf)
}
return offset;
wrong_type:
- pr_warn("%s: wrong block type %u\n", __func__, be32_to_cpu(h->mh_type));
+ fs_warn(sdp, "%s: wrong block type %u\n", __func__,
+ be32_to_cpu(h->mh_type));
return -1;
}
@@ -566,7 +568,7 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
unsigned size;
int ret = 0;
- ret = gfs2_dirent_offset(buf);
+ ret = gfs2_dirent_offset(GFS2_SB(inode), buf);
if (ret < 0)
goto consist_inode;
@@ -574,7 +576,7 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
prev = NULL;
dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len);
- if (gfs2_check_dirent(dent, offset, size, len, 1))
+ if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1))
goto consist_inode;
do {
ret = scan(dent, name, opaque);
@@ -586,7 +588,8 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
prev = dent;
dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len);
- if (gfs2_check_dirent(dent, offset, size, len, 0))
+ if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size,
+ len, 0))
goto consist_inode;
} while(1);
@@ -1043,7 +1046,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
len = BIT(dip->i_depth - be16_to_cpu(oleaf->lf_depth));
half_len = len >> 1;
if (!half_len) {
- pr_warn("i_depth %u lf_depth %u index %u\n",
+ fs_warn(GFS2_SB(inode), "i_depth %u lf_depth %u index %u\n",
dip->i_depth, be16_to_cpu(oleaf->lf_depth), index);
gfs2_consist_inode(dip);
error = -EIO;
@@ -1351,7 +1354,7 @@ static int gfs2_set_cookies(struct gfs2_sbd *sdp, struct buffer_head *bh,
if (!sdp->sd_args.ar_loccookie)
continue;
offset = (char *)(darr[i]) -
- (bh->b_data + gfs2_dirent_offset(bh->b_data));
+ (bh->b_data + gfs2_dirent_offset(sdp, bh->b_data));
offset /= GFS2_MIN_DIRENT_SIZE;
offset += leaf_nr * sdp->sd_max_dents_per_leaf;
if (offset >= GFS2_USE_HASH_FLAG ||
@@ -2018,7 +2021,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
l_blocks++;
}
- gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
+ gfs2_rlist_alloc(&rlist);
for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
@@ -2039,6 +2042,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
bh = leaf_bh;
for (blk = leaf_no; blk; blk = nblk) {
+ struct gfs2_rgrpd *rgd;
+
if (blk != leaf_no) {
error = get_leaf(dip, blk, &bh);
if (error)
@@ -2049,7 +2054,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
if (blk != leaf_no)
brelse(bh);
- gfs2_free_meta(dip, blk, 1);
+ rgd = gfs2_blk2rgrpd(sdp, blk, true);
+ gfs2_free_meta(dip, rgd, blk, 1);
gfs2_add_inode_blocks(&dip->i_inode, -1);
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 08369c6cd127..45a17b770d97 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -314,6 +314,17 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
return do_gfs2_set_flags(filp, gfsflags, mask);
}
+static int gfs2_getlabel(struct file *filp, char __user *label)
+{
+ struct inode *inode = file_inode(filp);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+
+ if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
+ return -EFAULT;
+
+ return 0;
+}
+
static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch(cmd) {
@@ -323,7 +334,10 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return gfs2_set_flags(filp, (u32 __user *)arg);
case FITRIM:
return gfs2_fitrim(filp, (void __user *)arg);
+ case FS_IOC_GETFSLABEL:
+ return gfs2_getlabel(filp, (char __user *)arg);
}
+
return -ENOTTY;
}
@@ -347,8 +361,8 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
int hint = min_t(size_t, INT_MAX, blks);
- if (hint > atomic_read(&ip->i_res.rs_sizehint))
- atomic_set(&ip->i_res.rs_sizehint, hint);
+ if (hint > atomic_read(&ip->i_sizehint))
+ atomic_set(&ip->i_sizehint, hint);
}
/**
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 4614ee25f621..05431324b262 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -494,7 +494,8 @@ retry:
do_xmote(gl, gh, LM_ST_UNLOCKED);
break;
default: /* Everything else */
- pr_err("wanted %u got %u\n", gl->gl_target, state);
+ fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
+ gl->gl_target, state);
GLOCK_BUG_ON(gl, 1);
}
spin_unlock(&gl->gl_lockref.lock);
@@ -577,7 +578,7 @@ __acquires(&gl->gl_lockref.lock)
gfs2_glock_queue_work(gl, 0);
}
else if (ret) {
- pr_err("lm_lock ret %d\n", ret);
+ fs_err(sdp, "lm_lock ret %d\n", ret);
GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
&sdp->sd_flags));
}
@@ -1064,13 +1065,13 @@ do_cancel:
return;
trap_recursive:
- pr_err("original: %pSR\n", (void *)gh2->gh_ip);
- pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
- pr_err("lock type: %d req lock state : %d\n",
+ fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
+ fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
+ fs_err(sdp, "lock type: %d req lock state : %d\n",
gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
- pr_err("new: %pSR\n", (void *)gh->gh_ip);
- pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
- pr_err("lock type: %d req lock state : %d\n",
+ fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
+ fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
+ fs_err(sdp, "lock type: %d req lock state : %d\n",
gh->gh_gl->gl_name.ln_type, gh->gh_state);
gfs2_dump_glock(NULL, gl);
BUG();
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index b96d39c28e17..888b62cfd6d1 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -92,7 +92,7 @@ struct gfs2_bitmap {
unsigned long bi_flags;
u32 bi_offset;
u32 bi_start;
- u32 bi_len;
+ u32 bi_bytes;
u32 bi_blocks;
};
@@ -309,10 +309,6 @@ struct gfs2_qadata { /* quota allocation data */
*/
struct gfs2_blkreserv {
- /* components used during write (step 1): */
- atomic_t rs_sizehint; /* hint of the write size */
-
- struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
struct rb_node rs_node; /* link to other block reservations */
struct gfs2_rbm rs_rbm; /* Start of reservation */
u32 rs_free; /* how many blocks are still free */
@@ -417,8 +413,10 @@ struct gfs2_inode {
struct gfs2_holder i_iopen_gh;
struct gfs2_holder i_gh; /* for prepare/commit_write only */
struct gfs2_qadata *i_qadata; /* quota allocation data */
+ struct gfs2_holder i_rgd_gh;
struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */
u64 i_goal; /* goal block for allocations */
+ atomic_t i_sizehint; /* hint of the write size */
struct rw_semaphore i_rw_mutex;
struct list_head i_ordered;
struct list_head i_trunc_list;
@@ -623,6 +621,7 @@ enum {
SDF_RORECOVERY = 7, /* read only recovery */
SDF_SKIP_DLM_UNLOCK = 8,
SDF_FORCE_AIL_FLUSH = 9,
+ SDF_AIL1_IO_ERROR = 10,
};
enum gfs2_freeze_state {
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index ac7caa267ed6..31df26ed7854 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -177,14 +177,14 @@ static void gdlm_bast(void *arg, int mode)
gfs2_glock_cb(gl, LM_ST_SHARED);
break;
default:
- pr_err("unknown bast mode %d\n", mode);
+ fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode);
BUG();
}
}
/* convert gfs lock-state to dlm lock-mode */
-static int make_mode(const unsigned int lmstate)
+static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate)
{
switch (lmstate) {
case LM_ST_UNLOCKED:
@@ -196,7 +196,7 @@ static int make_mode(const unsigned int lmstate)
case LM_ST_SHARED:
return DLM_LOCK_PR;
}
- pr_err("unknown LM state %d\n", lmstate);
+ fs_err(sdp, "unknown LM state %d\n", lmstate);
BUG();
return -1;
}
@@ -257,7 +257,7 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
u32 lkf;
char strname[GDLM_STRNAME_BYTES] = "";
- req = make_mode(req_state);
+ req = make_mode(gl->gl_name.ln_sbd, req_state);
lkf = make_flags(gl, flags, req);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
@@ -309,7 +309,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
NULL, gl);
if (error) {
- pr_err("gdlm_unlock %x,%llx err=%d\n",
+ fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number, error);
return;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index ee20ea42e7b5..99dd58694ba1 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -108,7 +108,9 @@ __acquires(&sdp->sd_ail_lock)
gfs2_assert(sdp, bd->bd_tr == tr);
if (!buffer_busy(bh)) {
- if (!buffer_uptodate(bh)) {
+ if (!buffer_uptodate(bh) &&
+ !test_and_set_bit(SDF_AIL1_IO_ERROR,
+ &sdp->sd_flags)) {
gfs2_io_error_bh(sdp, bh);
*withdraw = true;
}
@@ -206,7 +208,8 @@ static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
gfs2_assert(sdp, bd->bd_tr == tr);
if (buffer_busy(bh))
continue;
- if (!buffer_uptodate(bh)) {
+ if (!buffer_uptodate(bh) &&
+ !test_and_set_bit(SDF_AIL1_IO_ERROR, &sdp->sd_flags)) {
gfs2_io_error_bh(sdp, bh);
*withdraw = true;
}
@@ -618,7 +621,7 @@ void gfs2_write_revokes(struct gfs2_sbd *sdp)
gfs2_ail1_empty(sdp);
spin_lock(&sdp->sd_ail_lock);
- list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
+ list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
if (list_empty(&bd->bd_list)) {
have_revokes = 1;
@@ -642,7 +645,7 @@ done:
}
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_ail_lock);
- list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
+ list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
if (max_revokes == 0)
goto out_of_blocks;
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index f2567f958d00..4c7069b8f3c1 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -81,7 +81,7 @@ static void maybe_release_space(struct gfs2_bufdata *bd)
if (sdp->sd_args.ar_discard)
gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
memcpy(bi->bi_clone + bi->bi_offset,
- bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
+ bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
clear_bit(GBF_FULL, &bi->bi_flags);
rgd->rd_free_clone = rgd->rd_free;
rgd->rd_extfail_pt = rgd->rd_free;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 2d55e2c3333c..c7603063f861 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -39,9 +39,11 @@ static void gfs2_init_inode_once(void *foo)
struct gfs2_inode *ip = foo;
inode_init_once(&ip->i_inode);
+ atomic_set(&ip->i_sizehint, 0);
init_rwsem(&ip->i_rw_mutex);
INIT_LIST_HEAD(&ip->i_trunc_list);
ip->i_qadata = NULL;
+ gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
memset(&ip->i_res, 0, sizeof(ip->i_res));
RB_CLEAR_NODE(&ip->i_res.rs_node);
ip->i_hash_cache = NULL;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index c2469833b4fb..b041cb8ae383 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -72,13 +72,13 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
if (!sdp)
return NULL;
- sb->s_fs_info = sdp;
sdp->sd_vfs = sb;
sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
if (!sdp->sd_lkstats) {
kfree(sdp);
return NULL;
}
+ sb->s_fs_info = sdp;
set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
gfs2_tune_init(&sdp->sd_tune);
@@ -1333,6 +1333,9 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
struct path path;
int error;
+ if (!dev_name || !*dev_name)
+ return ERR_PTR(-EINVAL);
+
error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
if (error) {
pr_warn("path_lookup on %s returned error %d\n",
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 0efae7a0ee80..2ae5a109eea7 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1183,7 +1183,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
*
* Returns: 0 on success.
* min_req = ap->min_target ? ap->min_target : ap->target;
- * quota must allow atleast min_req blks for success and
+ * quota must allow at least min_req blks for success and
* ap->allowed is set to the number of blocks allowed
*
* -EDQUOT otherwise, quota violation. ap->allowed is set to number
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 1ad3256b9cbc..ffe3032b1043 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -90,7 +90,7 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
{
unsigned char *byte1, *byte2, *end, cur_state;
struct gfs2_bitmap *bi = rbm_bi(rbm);
- unsigned int buflen = bi->bi_len;
+ unsigned int buflen = bi->bi_bytes;
const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
@@ -101,12 +101,16 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
if (unlikely(!valid_change[new_state * 4 + cur_state])) {
- pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
+ struct gfs2_sbd *sdp = rbm->rgd->rd_sbd;
+
+ fs_warn(sdp, "buf_blk = 0x%x old_state=%d, new_state=%d\n",
rbm->offset, cur_state, new_state);
- pr_warn("rgrp=0x%llx bi_start=0x%x\n",
- (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
- pr_warn("bi_offset=0x%x bi_len=0x%x\n",
- bi->bi_offset, bi->bi_len);
+ fs_warn(sdp, "rgrp=0x%llx bi_start=0x%x biblk: 0x%llx\n",
+ (unsigned long long)rbm->rgd->rd_addr, bi->bi_start,
+ (unsigned long long)bi->bi_bh->b_blocknr);
+ fs_warn(sdp, "bi_offset=0x%x bi_bytes=0x%x block=0x%llx\n",
+ bi->bi_offset, bi->bi_bytes,
+ (unsigned long long)gfs2_rbm_to_block(rbm));
dump_stack();
gfs2_consist_rgrpd(rbm->rgd);
return;
@@ -269,15 +273,10 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
{
- u64 rblock = block - rbm->rgd->rd_data0;
-
- if (WARN_ON_ONCE(rblock > UINT_MAX))
- return -EINVAL;
- if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
+ if (!rgrp_contains_block(rbm->rgd, block))
return -E2BIG;
-
rbm->bii = 0;
- rbm->offset = (u32)(rblock);
+ rbm->offset = block - rbm->rgd->rd_data0;
/* Check if the block is within the first block */
if (rbm->offset < rbm_bi(rbm)->bi_blocks)
return 0;
@@ -382,7 +381,7 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
if (bi->bi_clone)
start = bi->bi_clone;
start += bi->bi_offset;
- end = start + bi->bi_len;
+ end = start + bi->bi_bytes;
BUG_ON(rbm.offset & 3);
start += (rbm.offset / GFS2_NBBY);
bytes = min_t(u32, len / GFS2_NBBY, (end - start));
@@ -467,7 +466,7 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
count[x] += gfs2_bitcount(rgd,
bi->bi_bh->b_data +
bi->bi_offset,
- bi->bi_len, x);
+ bi->bi_bytes, x);
}
if (count[0] != rgd->rd_free) {
@@ -642,7 +641,10 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
RB_CLEAR_NODE(&rs->rs_node);
if (rs->rs_free) {
- struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
+ u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
+ rs->rs_free - 1;
+ struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
+ struct gfs2_bitmap *start, *last;
/* return reserved blocks to the rgrp */
BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
@@ -653,7 +655,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
it will force the number to be recalculated later. */
rgd->rd_extfail_pt += rs->rs_free;
rs->rs_free = 0;
- clear_bit(GBF_FULL, &bi->bi_flags);
+ if (gfs2_rbm_from_block(&last_rbm, last_block))
+ return;
+ start = rbm_bi(&rs->rs_rbm);
+ last = rbm_bi(&last_rbm);
+ do
+ clear_bit(GBF_FULL, &start->bi_flags);
+ while (start++ != last);
}
}
@@ -738,11 +746,13 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
{
- pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
- pr_info("ri_length = %u\n", rgd->rd_length);
- pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
- pr_info("ri_data = %u\n", rgd->rd_data);
- pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
+ struct gfs2_sbd *sdp = rgd->rd_sbd;
+
+ fs_info(sdp, "ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
+ fs_info(sdp, "ri_length = %u\n", rgd->rd_length);
+ fs_info(sdp, "ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
+ fs_info(sdp, "ri_data = %u\n", rgd->rd_data);
+ fs_info(sdp, "ri_bitbytes = %u\n", rgd->rd_bitbytes);
}
/**
@@ -780,21 +790,21 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
bytes = bytes_left;
bi->bi_offset = sizeof(struct gfs2_rgrp);
bi->bi_start = 0;
- bi->bi_len = bytes;
+ bi->bi_bytes = bytes;
bi->bi_blocks = bytes * GFS2_NBBY;
/* header block */
} else if (x == 0) {
bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
bi->bi_offset = sizeof(struct gfs2_rgrp);
bi->bi_start = 0;
- bi->bi_len = bytes;
+ bi->bi_bytes = bytes;
bi->bi_blocks = bytes * GFS2_NBBY;
/* last block */
} else if (x + 1 == length) {
bytes = bytes_left;
bi->bi_offset = sizeof(struct gfs2_meta_header);
bi->bi_start = rgd->rd_bitbytes - bytes_left;
- bi->bi_len = bytes;
+ bi->bi_bytes = bytes;
bi->bi_blocks = bytes * GFS2_NBBY;
/* other blocks */
} else {
@@ -802,7 +812,7 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
sizeof(struct gfs2_meta_header);
bi->bi_offset = sizeof(struct gfs2_meta_header);
bi->bi_start = rgd->rd_bitbytes - bytes_left;
- bi->bi_len = bytes;
+ bi->bi_bytes = bytes;
bi->bi_blocks = bytes * GFS2_NBBY;
}
@@ -814,11 +824,11 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
return -EIO;
}
bi = rgd->rd_bits + (length - 1);
- if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
+ if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) {
if (gfs2_consist_rgrpd(rgd)) {
gfs2_rindex_print(rgd);
fs_err(sdp, "start=%u len=%u offset=%u\n",
- bi->bi_start, bi->bi_len, bi->bi_offset);
+ bi->bi_start, bi->bi_bytes, bi->bi_offset);
}
return -EIO;
}
@@ -1103,12 +1113,35 @@ static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
{
struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
+ int valid = 1;
- if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
- rgl->rl_dinodes != str->rg_dinodes ||
- rgl->rl_igeneration != str->rg_igeneration)
- return 0;
- return 1;
+ if (rgl->rl_flags != str->rg_flags) {
+ printk(KERN_WARNING "GFS2: rgd: %llu lvb flag mismatch %u/%u",
+ (unsigned long long)rgd->rd_addr,
+ be32_to_cpu(rgl->rl_flags), be32_to_cpu(str->rg_flags));
+ valid = 0;
+ }
+ if (rgl->rl_free != str->rg_free) {
+ printk(KERN_WARNING "GFS2: rgd: %llu lvb free mismatch %u/%u",
+ (unsigned long long)rgd->rd_addr,
+ be32_to_cpu(rgl->rl_free), be32_to_cpu(str->rg_free));
+ valid = 0;
+ }
+ if (rgl->rl_dinodes != str->rg_dinodes) {
+ printk(KERN_WARNING "GFS2: rgd: %llu lvb dinode mismatch %u/%u",
+ (unsigned long long)rgd->rd_addr,
+ be32_to_cpu(rgl->rl_dinodes),
+ be32_to_cpu(str->rg_dinodes));
+ valid = 0;
+ }
+ if (rgl->rl_igeneration != str->rg_igeneration) {
+ printk(KERN_WARNING "GFS2: rgd: %llu lvb igen mismatch "
+ "%llu/%llu", (unsigned long long)rgd->rd_addr,
+ (unsigned long long)be64_to_cpu(rgl->rl_igeneration),
+ (unsigned long long)be64_to_cpu(str->rg_igeneration));
+ valid = 0;
+ }
+ return valid;
}
static u32 count_unlinked(struct gfs2_rgrpd *rgd)
@@ -1122,8 +1155,8 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd)
goal = 0;
buffer = bi->bi_bh->b_data + bi->bi_offset;
WARN_ON(!buffer_uptodate(bi->bi_bh));
- while (goal < bi->bi_len * GFS2_NBBY) {
- goal = gfs2_bitfit(buffer, bi->bi_len, goal,
+ while (goal < bi->bi_blocks) {
+ goal = gfs2_bitfit(buffer, bi->bi_bytes, goal,
GFS2_BLKST_UNLINKED);
if (goal == BFITNOENT)
break;
@@ -1226,7 +1259,7 @@ static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
rl_flags &= ~GFS2_RDF_MASK;
rgd->rd_flags &= GFS2_RDF_MASK;
- rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
+ rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK);
if (rgd->rd_rgl->rl_unlinked == 0)
rgd->rd_flags &= ~GFS2_RDF_CHECK;
rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
@@ -1295,7 +1328,7 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
u32 trimmed = 0;
u8 diff;
- for (x = 0; x < bi->bi_len; x++) {
+ for (x = 0; x < bi->bi_bytes; x++) {
const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
clone += bi->bi_offset;
clone += x;
@@ -1541,8 +1574,8 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
if (S_ISDIR(inode->i_mode))
extlen = 1;
else {
- extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
- extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
+ extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target);
+ extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks);
}
if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
return;
@@ -1728,7 +1761,7 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
buffer = bi->bi_clone + bi->bi_offset;
initial_offset = rbm->offset;
- offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
+ offset = gfs2_bitfit(buffer, bi->bi_bytes, rbm->offset, state);
if (offset == BFITNOENT)
goto bitmap_full;
rbm->offset = offset;
@@ -1999,7 +2032,7 @@ static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
* We try our best to find an rgrp that has at least ap->target blocks
* available. After a couple of passes (loops == 2), the prospects of finding
* such an rgrp diminish. At this stage, we return the first rgrp that has
- * atleast ap->min_target blocks available. Either way, we set ap->allowed to
+ * at least ap->min_target blocks available. Either way, we set ap->allowed to
* the number of blocks available in the chosen rgrp.
*
* Returns: 0 on success,
@@ -2053,7 +2086,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
}
error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
LM_ST_EXCLUSIVE, flags,
- &rs->rs_rgd_gh);
+ &ip->i_rgd_gh);
if (unlikely(error))
return error;
if (!gfs2_rs_active(rs) && (loops < 2) &&
@@ -2062,13 +2095,13 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
if (sdp->sd_args.ar_rgrplvb) {
error = update_rgrp_lvb(rs->rs_rbm.rgd);
if (unlikely(error)) {
- gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
+ gfs2_glock_dq_uninit(&ip->i_rgd_gh);
return error;
}
}
}
- /* Skip unuseable resource groups */
+ /* Skip unusable resource groups */
if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
GFS2_RDF_ERROR)) ||
(loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
@@ -2105,7 +2138,7 @@ skip_rgrp:
/* Unlock rgrp if required */
if (!rg_locked)
- gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
+ gfs2_glock_dq_uninit(&ip->i_rgd_gh);
next_rgrp:
/* Find the next rgrp, and continue looking */
if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
@@ -2142,10 +2175,8 @@ next_rgrp:
void gfs2_inplace_release(struct gfs2_inode *ip)
{
- struct gfs2_blkreserv *rs = &ip->i_res;
-
- if (gfs2_holder_initialized(&rs->rs_rgd_gh))
- gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
+ if (gfs2_holder_initialized(&ip->i_rgd_gh))
+ gfs2_glock_dq_uninit(&ip->i_rgd_gh);
}
/**
@@ -2184,27 +2215,21 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
/**
* rgblk_free - Change alloc state of given block(s)
* @sdp: the filesystem
+ * @rgd: the resource group the blocks are in
* @bstart: the start of a run of blocks to free
* @blen: the length of the block run (all must lie within ONE RG!)
* @new_state: GFS2_BLKST_XXX the after-allocation block state
- *
- * Returns: Resource group containing the block(s)
*/
-static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
- u32 blen, unsigned char new_state)
+static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen, unsigned char new_state)
{
struct gfs2_rbm rbm;
struct gfs2_bitmap *bi, *bi_prev = NULL;
- rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
- if (!rbm.rgd) {
- if (gfs2_consist(sdp))
- fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
- return NULL;
- }
-
- gfs2_rbm_from_block(&rbm, bstart);
+ rbm.rgd = rgd;
+ if (WARN_ON_ONCE(gfs2_rbm_from_block(&rbm, bstart)))
+ return;
while (blen--) {
bi = rbm_bi(&rbm);
if (bi != bi_prev) {
@@ -2213,7 +2238,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
GFP_NOFS | __GFP_NOFAIL);
memcpy(bi->bi_clone + bi->bi_offset,
bi->bi_bh->b_data + bi->bi_offset,
- bi->bi_len);
+ bi->bi_bytes);
}
gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
bi_prev = bi;
@@ -2221,8 +2246,6 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
gfs2_setbit(&rbm, false, new_state);
gfs2_rbm_incr(&rbm);
}
-
- return rbm.rgd;
}
/**
@@ -2244,6 +2267,14 @@ void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
(unsigned long long)rgd->rd_addr, rgd->rd_flags,
rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
rgd->rd_reserved, rgd->rd_extfail_pt);
+ if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
+ struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
+
+ gfs2_print_dbg(seq, " L: f:%02x b:%u i:%u\n",
+ be32_to_cpu(rgl->rl_flags),
+ be32_to_cpu(rgl->rl_free),
+ be32_to_cpu(rgl->rl_dinodes));
+ }
spin_lock(&rgd->rd_rsspin);
for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
@@ -2295,7 +2326,7 @@ static void gfs2_adjust_reservation(struct gfs2_inode *ip,
goto out;
/* We used up our block reservation, so we should
reserve more blocks next time. */
- atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
+ atomic_add(RGRP_RSRV_ADDBLKS, &ip->i_sizehint);
}
__rs_deltree(rs);
}
@@ -2329,7 +2360,10 @@ static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
else
goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
- gfs2_rbm_from_block(rbm, goal);
+ if (WARN_ON_ONCE(gfs2_rbm_from_block(rbm, goal))) {
+ rbm->bii = 0;
+ rbm->offset = 0;
+ }
}
/**
@@ -2392,7 +2426,7 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
}
}
if (rbm.rgd->rd_free < *nblocks) {
- pr_warn("nblocks=%u\n", *nblocks);
+ fs_warn(sdp, "nblocks=%u\n", *nblocks);
goto rgrp_error;
}
@@ -2427,20 +2461,19 @@ rgrp_error:
/**
* __gfs2_free_blocks - free a contiguous run of block(s)
* @ip: the inode these blocks are being freed from
+ * @rgd: the resource group the blocks are in
* @bstart: first block of a run of contiguous blocks
* @blen: the length of the block run
* @meta: 1 if the blocks represent metadata
*
*/
-void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
+void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen, int meta)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_rgrpd *rgd;
- rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
- if (!rgd)
- return;
+ rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE);
trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
rgd->rd_free += blen;
rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
@@ -2455,16 +2488,18 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
/**
* gfs2_free_meta - free a contiguous run of data block(s)
* @ip: the inode these blocks are being freed from
+ * @rgd: the resource group the blocks are in
* @bstart: first block of a run of contiguous blocks
* @blen: the length of the block run
*
*/
-void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
+void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- __gfs2_free_blocks(ip, bstart, blen, 1);
+ __gfs2_free_blocks(ip, rgd, bstart, blen, 1);
gfs2_statfs_change(sdp, 0, +blen, 0);
gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
}
@@ -2476,9 +2511,10 @@ void gfs2_unlink_di(struct inode *inode)
struct gfs2_rgrpd *rgd;
u64 blkno = ip->i_no_addr;
- rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
+ rgd = gfs2_blk2rgrpd(sdp, blkno, true);
if (!rgd)
return;
+ rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
@@ -2488,13 +2524,8 @@ void gfs2_unlink_di(struct inode *inode)
void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = rgd->rd_sbd;
- struct gfs2_rgrpd *tmp_rgd;
-
- tmp_rgd = rgblk_free(sdp, ip->i_no_addr, 1, GFS2_BLKST_FREE);
- if (!tmp_rgd)
- return;
- gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
+ rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
if (!rgd->rd_dinodes)
gfs2_consist_rgrpd(rgd);
rgd->rd_dinodes--;
@@ -2538,7 +2569,8 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
rbm.rgd = rgd;
error = gfs2_rbm_from_block(&rbm, no_addr);
- WARN_ON_ONCE(error != 0);
+ if (WARN_ON_ONCE(error))
+ goto fail;
if (gfs2_testbit(&rbm, false) != type)
error = -ESTALE;
@@ -2624,13 +2656,12 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
* gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
* and initialize an array of glock holders for them
* @rlist: the list of resource groups
- * @state: the lock state to acquire the RG lock in
*
* FIXME: Don't use NOFAIL
*
*/
-void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
+void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist)
{
unsigned int x;
@@ -2639,7 +2670,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
GFP_NOFS | __GFP_NOFAIL);
for (x = 0; x < rlist->rl_rgrps; x++)
gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
- state, 0,
+ LM_ST_EXCLUSIVE, 0,
&rlist->rl_ghs[x]);
}
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index e90478e2f545..b596c3d17988 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -18,8 +18,7 @@
* By reserving 32 blocks at a time, we can optimize / shortcut how we search
* through the bitmaps by looking a word at a time.
*/
-#define RGRP_RSRV_MINBYTES 8
-#define RGRP_RSRV_MINBLKS ((u32)(RGRP_RSRV_MINBYTES * GFS2_NBBY))
+#define RGRP_RSRV_MINBLKS 32
#define RGRP_RSRV_ADDBLKS 64
struct gfs2_rgrpd;
@@ -52,8 +51,10 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
extern int gfs2_rsqa_alloc(struct gfs2_inode *ip);
extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
extern void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount);
-extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
-extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
+extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen, int meta);
+extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+ u64 bstart, u32 blen);
extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
extern void gfs2_unlink_di(struct inode *inode);
extern int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
@@ -68,7 +69,7 @@ struct gfs2_rgrp_list {
extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
u64 block);
-extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state);
+extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist);
extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
extern void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index c212893534ed..ca71163ff7cf 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -854,10 +854,10 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
return error;
+ flush_workqueue(gfs2_delete_workqueue);
kthread_stop(sdp->sd_quotad_process);
kthread_stop(sdp->sd_logd_process);
- flush_workqueue(gfs2_delete_workqueue);
gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_statfs_sync(sdp->sd_vfs, 0);
@@ -971,7 +971,7 @@ void gfs2_freeze_func(struct work_struct *work)
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
&freeze_gh);
if (error) {
- printk(KERN_INFO "GFS2: couln't get freeze lock : %d\n", error);
+ printk(KERN_INFO "GFS2: couldn't get freeze lock : %d\n", error);
gfs2_assert_withdraw(sdp, 0);
}
else {
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 064c9a0ef046..423bc2d03dd8 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -74,13 +74,13 @@ fail:
return error;
}
-static void gfs2_print_trans(const struct gfs2_trans *tr)
+static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
{
- pr_warn("Transaction created at: %pSR\n", (void *)tr->tr_ip);
- pr_warn("blocks=%u revokes=%u reserved=%u touched=%u\n",
+ fs_warn(sdp, "Transaction created at: %pSR\n", (void *)tr->tr_ip);
+ fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
test_bit(TR_TOUCHED, &tr->tr_flags));
- pr_warn("Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
+ fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
tr->tr_num_buf_new, tr->tr_num_buf_rm,
tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
tr->tr_num_revoke, tr->tr_num_revoke_rm);
@@ -109,7 +109,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) &&
(tr->tr_num_revoke <= tr->tr_revokes)))
- gfs2_print_trans(tr);
+ gfs2_print_trans(sdp, tr);
gfs2_log_commit(sdp, tr);
if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags))
@@ -225,12 +225,13 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) {
- pr_err("Attempting to add uninitialised block to journal (inplace block=%lld)\n",
+ fs_err(sdp, "Attempting to add uninitialised block to "
+ "journal (inplace block=%lld)\n",
(unsigned long long)bd->bd_bh->b_blocknr);
BUG();
}
if (unlikely(state == SFS_FROZEN)) {
- printk(KERN_INFO "GFS2:adding buf while frozen\n");
+ fs_info(sdp, "GFS2:adding buf while frozen\n");
gfs2_assert_withdraw(sdp, 0);
}
gfs2_pin(sdp, bd->bd_bh);
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 59c811de0dc7..0a814ccac41d 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -19,6 +19,7 @@
#include "gfs2.h"
#include "incore.h"
#include "glock.h"
+#include "rgrp.h"
#include "util.h"
struct kmem_cache *gfs2_glock_cachep __read_mostly;
@@ -181,6 +182,8 @@ int gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, int cluster_wide,
{
struct gfs2_sbd *sdp = rgd->rd_sbd;
int rv;
+
+ gfs2_rgrp_dump(NULL, rgd->rd_gl);
rv = gfs2_lm_withdraw(sdp,
"fatal: filesystem consistency error\n"
" RG = %llu\n"
@@ -256,12 +259,13 @@ void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
const char *function, char *file, unsigned int line,
bool withdraw)
{
- fs_err(sdp,
- "fatal: I/O error\n"
- " block = %llu\n"
- " function = %s, file = %s, line = %u\n",
- (unsigned long long)bh->b_blocknr,
- function, file, line);
+ if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
+ fs_err(sdp,
+ "fatal: I/O error\n"
+ " block = %llu\n"
+ " function = %s, file = %s, line = %u\n",
+ (unsigned long long)bh->b_blocknr,
+ function, file, line);
if (withdraw)
gfs2_lm_withdraw(sdp, NULL);
}
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 96ac4aba4738..9278fecba632 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -86,7 +86,7 @@ static inline int gfs2_meta_check(struct gfs2_sbd *sdp,
struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
u32 magic = be32_to_cpu(mh->mh_magic);
if (unlikely(magic != GFS2_MAGIC)) {
- pr_err("Magic number missing at %llu\n",
+ fs_err(sdp, "Magic number missing at %llu\n",
(unsigned long long)bh->b_blocknr);
return -EIO;
}
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 38515988aaf7..996c915a9c97 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -283,7 +283,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
blen++;
else {
if (bstart)
- gfs2_free_meta(ip, bstart, blen);
+ gfs2_free_meta(ip, rgd, bstart, blen);
bstart = bn;
blen = 1;
}
@@ -292,7 +292,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
gfs2_add_inode_blocks(&ip->i_inode, -1);
}
if (bstart)
- gfs2_free_meta(ip, bstart, blen);
+ gfs2_free_meta(ip, rgd, bstart, blen);
if (prev && !leave) {
u32 len;
@@ -1250,6 +1250,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrp_list rlist;
+ struct gfs2_rgrpd *rgd;
struct buffer_head *indbh, *dibh;
__be64 *eablk, *end;
unsigned int rg_blocks = 0;
@@ -1299,11 +1300,10 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
else
goto out;
- gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
+ gfs2_rlist_alloc(&rlist);
for (x = 0; x < rlist.rl_rgrps; x++) {
- struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
-
+ rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
rg_blocks += rgd->rd_length;
}
@@ -1320,6 +1320,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
bstart = 0;
+ rgd = NULL;
blen = 0;
for (; eablk < end; eablk++) {
@@ -1333,8 +1334,9 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
blen++;
else {
if (bstart)
- gfs2_free_meta(ip, bstart, blen);
+ gfs2_free_meta(ip, rgd, bstart, blen);
bstart = bn;
+ rgd = gfs2_blk2rgrpd(sdp, bstart, true);
blen = 1;
}
@@ -1342,7 +1344,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
gfs2_add_inode_blocks(&ip->i_inode, -1);
}
if (bstart)
- gfs2_free_meta(ip, bstart, blen);
+ gfs2_free_meta(ip, rgd, bstart, blen);
ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
@@ -1391,7 +1393,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
if (error)
goto out_gunlock;
- gfs2_free_meta(ip, ip->i_eattr, 1);
+ gfs2_free_meta(ip, rgd, ip->i_eattr, 1);
ip->i_eattr = 0;
gfs2_add_inode_blocks(&ip->i_inode, -1);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 3212c29235ce..2005529af560 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -230,7 +230,7 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
ret = -EXDEV;
if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
goto fdput;
- ret = do_clone_file_range(src_file.file, off, dst_file, destoff, olen);
+ ret = vfs_clone_file_range(src_file.file, off, dst_file, destoff, olen);
fdput:
fdput(src_file);
return ret;
diff --git a/fs/iomap.c b/fs/iomap.c
index 74762b1ec233..ec15cf2ec696 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1051,6 +1051,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
} else {
WARN_ON_ONCE(!PageUptodate(page));
iomap_page_create(inode, page);
+ set_page_dirty(page);
}
return length;
@@ -1090,7 +1091,6 @@ int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
length -= ret;
}
- set_page_dirty(page);
wait_for_stable_page(page);
return VM_FAULT_LOCKED;
out_unlock:
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index c125d662777c..26f8d7e46462 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -251,8 +251,8 @@ restart:
bh = jh2bh(jh);
if (buffer_locked(bh)) {
- spin_unlock(&journal->j_list_lock);
get_bh(bh);
+ spin_unlock(&journal->j_list_lock);
wait_on_buffer(bh);
/* the journal_head may have gone by now */
BUFFER_TRACE(bh, "brelse");
@@ -333,8 +333,8 @@ restart2:
jh = transaction->t_checkpoint_io_list;
bh = jh2bh(jh);
if (buffer_locked(bh)) {
- spin_unlock(&journal->j_list_lock);
get_bh(bh);
+ spin_unlock(&journal->j_list_lock);
wait_on_buffer(bh);
/* the journal_head may have gone by now */
BUFFER_TRACE(bh, "brelse");
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 453a6a1fff34..2b4d5013dc5d 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -125,7 +125,7 @@ static int jffs2_garbage_collect_thread(void *_c)
if (try_to_freeze())
goto again;
- signr = kernel_dequeue_signal(NULL);
+ signr = kernel_dequeue_signal();
switch(signr) {
case SIGSTOP:
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 87bdf0f4cba1..902a7dd10e5c 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -285,10 +285,8 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_fs_info = c;
ret = jffs2_parse_options(c, data);
- if (ret) {
- kfree(c);
+ if (ret)
return -EINVAL;
- }
/* Initialize JFFS2 superblock locks, the further initialization will
* be done later */
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 2e71b6e7e646..8c06a6ea862d 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -146,12 +146,16 @@ int jfs_init_acl(tid_t tid, struct inode *inode, struct inode *dir)
if (default_acl) {
rc = __jfs_set_acl(tid, inode, ACL_TYPE_DEFAULT, default_acl);
posix_acl_release(default_acl);
+ } else {
+ inode->i_default_acl = NULL;
}
if (acl) {
if (!rc)
rc = __jfs_set_acl(tid, inode, ACL_TYPE_ACCESS, acl);
posix_acl_release(acl);
+ } else {
+ inode->i_acl = NULL;
}
JFS_IP(inode)->mode2 = (JFS_IP(inode)->mode2 & 0xffff0000) |
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 054cc761b426..805ae9e8944a 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -166,7 +166,6 @@ void jfs_evict_inode(struct inode *inode)
/*
* Free the inode from the quota allocation.
*/
- dquot_initialize(inode);
dquot_free_inode(inode);
}
} else {
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 09da5cf14e27..65d8fc87ab11 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -247,7 +247,7 @@ static const match_table_t tokens = {
{Opt_resize_nosize, "resize"},
{Opt_errors, "errors=%s"},
{Opt_ignore, "noquota"},
- {Opt_ignore, "quota"},
+ {Opt_quota, "quota"},
{Opt_usrquota, "usrquota"},
{Opt_grpquota, "grpquota"},
{Opt_uid, "uid=%u"},
diff --git a/fs/namespace.c b/fs/namespace.c
index 99186556f8d3..d86830c86ce8 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2642,6 +2642,7 @@ static long exact_copy_from_user(void *to, const void __user * from,
if (!access_ok(VERIFY_READ, from, n))
return n;
+ current->kernel_uaccess_faults_ok++;
while (n) {
if (__get_user(c, f)) {
memset(t, 0, n);
@@ -2651,6 +2652,7 @@ static long exact_copy_from_user(void *to, const void __user * from,
f++;
n--;
}
+ current->kernel_uaccess_faults_ok--;
return n;
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 55a099e47ba2..b53e76391e52 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -541,7 +541,8 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
__be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
u64 dst_pos, u64 count)
{
- return nfserrno(do_clone_file_range(src, src_pos, dst, dst_pos, count));
+ return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
+ count));
}
ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index d9ebe11c8990..1d098c3c00e0 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -342,6 +342,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
* for this bh as it's not marked locally
* uptodate. */
status = -EIO;
+ clear_buffer_needs_validate(bh);
put_bh(bh);
bhs[i] = NULL;
continue;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index aaca0949fe53..826f0567ec43 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -584,9 +584,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
res->last_used = 0;
- spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->track_lock);
list_add_tail(&res->tracking, &dlm->tracking_list);
- spin_unlock(&dlm->spinlock);
+ spin_unlock(&dlm->track_lock);
memset(res->lvb, 0, DLM_LVB_LEN);
memset(res->refmap, 0, sizeof(res->refmap));
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 8e712b614e6e..933aac5da193 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -96,7 +96,9 @@ struct ocfs2_unblock_ctl {
};
/* Lockdep class keys */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
+#endif
static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
int new_level);
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 7869622af22a..7a5ee145c733 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2946,6 +2946,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
if (map_end & (PAGE_SIZE - 1))
to = map_end & (PAGE_SIZE - 1);
+retry:
page = find_or_create_page(mapping, page_index, GFP_NOFS);
if (!page) {
ret = -ENOMEM;
@@ -2954,11 +2955,18 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
}
/*
- * In case PAGE_SIZE <= CLUSTER_SIZE, This page
- * can't be dirtied before we CoW it out.
+ * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
+ * page, so write it back.
*/
- if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
- BUG_ON(PageDirty(page));
+ if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
+ if (PageDirty(page)) {
+ /*
+ * write_on_page will unlock the page on return
+ */
+ ret = write_one_page(page);
+ goto retry;
+ }
+ }
if (!PageUptodate(page)) {
ret = block_read_full_page(page, ocfs2_get_block);
diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
index 10587413b20e..72d2ff17d27b 100644
--- a/fs/orangefs/acl.c
+++ b/fs/orangefs/acl.c
@@ -167,12 +167,16 @@ int orangefs_init_acl(struct inode *inode, struct inode *dir)
error = __orangefs_set_acl(inode, default_acl,
ACL_TYPE_DEFAULT);
posix_acl_release(default_acl);
+ } else {
+ inode->i_default_acl = NULL;
}
if (acl) {
if (!error)
error = __orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
posix_acl_release(acl);
+ } else {
+ inode->i_acl = NULL;
}
/* If mode of the inode was changed, then do a forcible ->setattr */
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 31932879b716..5e65d818937b 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -405,7 +405,11 @@ struct inode *orangefs_iget(struct super_block *sb,
orangefs_test_inode,
orangefs_set_inode,
ref);
- if (!inode || !(inode->i_state & I_NEW))
+
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (!(inode->i_state & I_NEW))
return inode;
error = orangefs_inode_getattr(inode, 1, 1, STATX_ALL);
@@ -448,7 +452,7 @@ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
inode = new_inode(sb);
if (!inode)
- return NULL;
+ return ERR_PTR(-ENOMEM);
orangefs_set_inode(inode, ref);
inode->i_ino = hash; /* needed for stat etc */
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 625b0580f9be..c8676c996249 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -58,7 +58,6 @@ static int orangefs_create(struct inode *dir,
goto out;
ref = new_op->downcall.resp.create.refn;
- op_release(new_op);
inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0, &ref);
if (IS_ERR(inode)) {
@@ -92,6 +91,7 @@ static int orangefs_create(struct inode *dir,
mark_inode_dirty_sync(dir);
ret = 0;
out:
+ op_release(new_op);
gossip_debug(GOSSIP_NAME_DEBUG,
"%s: %pd: returning %d\n",
__func__,
@@ -157,7 +157,7 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
new_op->downcall.resp.lookup.refn.fs_id,
ret);
- if (ret >= 0) {
+ if (ret == 0) {
orangefs_set_timeout(dentry);
inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn);
} else if (ret == -ENOENT) {
@@ -269,7 +269,6 @@ static int orangefs_symlink(struct inode *dir,
}
ref = new_op->downcall.resp.sym.refn;
- op_release(new_op);
inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0, &ref);
if (IS_ERR(inode)) {
@@ -307,6 +306,7 @@ static int orangefs_symlink(struct inode *dir,
mark_inode_dirty_sync(dir);
ret = 0;
out:
+ op_release(new_op);
return ret;
}
@@ -346,7 +346,6 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
}
ref = new_op->downcall.resp.mkdir.refn;
- op_release(new_op);
inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0, &ref);
if (IS_ERR(inode)) {
@@ -379,6 +378,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
orangefs_inode_setattr(dir, &iattr);
mark_inode_dirty_sync(dir);
out:
+ op_release(new_op);
return ret;
}
diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
index dd28079f518c..19739aaee675 100644
--- a/fs/orangefs/orangefs-sysfs.c
+++ b/fs/orangefs/orangefs-sysfs.c
@@ -323,7 +323,7 @@ static ssize_t sysfs_service_op_show(struct kobject *kobj,
/* Can't do a service_operation if the client is not running... */
rc = is_daemon_in_service();
if (rc) {
- pr_info("%s: Client not running :%d:\n",
+ pr_info_ratelimited("%s: Client not running :%d:\n",
__func__,
is_daemon_in_service());
goto out;
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 296037afecdb..1cc797a08a5b 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -141,7 +141,7 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
}
/* Try to use clone_file_range to clone up within the same fs */
- error = vfs_clone_file_range(old_file, 0, new_file, 0, len);
+ error = do_clone_file_range(old_file, 0, new_file, 0, len);
if (!error)
goto out;
/* Couldn't clone, so now we try to copy the data */
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index aeaefd2a551b..986313da0c88 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -240,8 +240,10 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
goto out_unlock;
old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ file_start_write(real.file);
ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
ovl_iocb_to_rwf(iocb));
+ file_end_write(real.file);
revert_creds(old_cred);
/* Update size */
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index b6ac545b5a32..3b7ed5d2279c 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -504,7 +504,7 @@ static const struct inode_operations ovl_special_inode_operations = {
.update_time = ovl_update_time,
};
-const struct address_space_operations ovl_aops = {
+static const struct address_space_operations ovl_aops = {
/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
.direct_IO = noop_direct_IO,
};
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index f28711846dd6..9c0ca6a7becf 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -686,7 +686,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
index = NULL;
goto out;
}
- pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
+ pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
"overlayfs: mount with '-o index=off' to disable inodes index.\n",
d_inode(origin)->i_ino, name.len, name.name,
err);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index f61839e1054c..a3c0d9584312 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -152,8 +152,8 @@ static inline int ovl_do_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
int err = vfs_setxattr(dentry, name, value, size, flags);
- pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n",
- dentry, name, (int) size, (char *) value, flags, err);
+ pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, 0x%x) = %i\n",
+ dentry, name, min((int)size, 48), value, size, flags, err);
return err;
}
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 8cfb62cc8672..ace4fe4c39a9 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -683,7 +683,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
struct dentry *upperdentry = ovl_dentry_upper(dentry);
struct dentry *index = NULL;
struct inode *inode;
- struct qstr name;
+ struct qstr name = { };
int err;
err = ovl_get_index_name(lowerdentry, &name);
@@ -726,6 +726,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
goto fail;
out:
+ kfree(name.name);
dput(index);
return;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ccf86f16d9f0..7e9f07bf260d 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -407,6 +407,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
unsigned long *entries;
int err;
+ /*
+ * The ability to racily run the kernel stack unwinder on a running task
+ * and then observe the unwinder output is scary; while it is useful for
+ * debugging kernel issues, it can also allow an attacker to leak kernel
+ * stack contents.
+ * Doing this in a manner that is at least safe from races would require
+ * some work to ensure that the remote task can not be scheduled; and
+ * even then, this would still expose the unwinder as local attack
+ * surface.
+ * Therefore, this interface is restricted to root.
+ */
+ if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
+ return -EACCES;
+
entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries),
GFP_KERNEL);
if (!entries)
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index ad72261ee3fe..d297fe4472a9 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -464,6 +464,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
ret = -EFAULT;
goto out;
}
+ m = NULL; /* skip the list anchor */
} else if (m->type == KCORE_VMALLOC) {
vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index cbde728f8ac6..91ae16fbd7d5 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -24,6 +24,8 @@
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/uaccess.h>
+#include <linux/mem_encrypt.h>
+#include <asm/pgtable.h>
#include <asm/io.h>
#include "internal.h"
@@ -98,7 +100,8 @@ static int pfn_is_ram(unsigned long pfn)
/* Reads a page from the oldmem device from given offset. */
static ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf)
+ u64 *ppos, int userbuf,
+ bool encrypted)
{
unsigned long pfn, offset;
size_t nr_bytes;
@@ -120,8 +123,15 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
if (pfn_is_ram(pfn) == 0)
memset(buf, 0, nr_bytes);
else {
- tmp = copy_oldmem_page(pfn, buf, nr_bytes,
- offset, userbuf);
+ if (encrypted)
+ tmp = copy_oldmem_page_encrypted(pfn, buf,
+ nr_bytes,
+ offset,
+ userbuf);
+ else
+ tmp = copy_oldmem_page(pfn, buf, nr_bytes,
+ offset, userbuf);
+
if (tmp < 0)
return tmp;
}
@@ -155,7 +165,7 @@ void __weak elfcorehdr_free(unsigned long long addr)
*/
ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
- return read_from_oldmem(buf, count, ppos, 0);
+ return read_from_oldmem(buf, count, ppos, 0, false);
}
/*
@@ -163,7 +173,7 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
*/
ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
{
- return read_from_oldmem(buf, count, ppos, 0);
+ return read_from_oldmem(buf, count, ppos, 0, sme_active());
}
/*
@@ -173,10 +183,21 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot)
{
+ prot = pgprot_encrypted(prot);
return remap_pfn_range(vma, from, pfn, size, prot);
}
/*
+ * Architectures which support memory encryption override this.
+ */
+ssize_t __weak
+copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
+ unsigned long offset, int userbuf)
+{
+ return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
+}
+
+/*
* Copy to either kernel or user space
*/
static int copy_to(void *target, void *src, size_t size, int userbuf)
@@ -351,7 +372,8 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
m->offset + m->size - *fpos,
buflen);
start = m->paddr + *fpos - m->offset;
- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
+ tmp = read_from_oldmem(buffer, tsz, &start,
+ userbuf, sme_active());
if (tmp < 0)
return tmp;
buflen -= tsz;
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 5fcb845b9fec..8cf2218b46a7 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -482,12 +482,10 @@ static struct file_system_type pstore_fs_type = {
.kill_sb = pstore_kill_sb,
};
-static int __init init_pstore_fs(void)
+int __init pstore_init_fs(void)
{
int err;
- pstore_choose_compression();
-
/* Create a convenient mount point for people to access pstore */
err = sysfs_create_mount_point(fs_kobj, "pstore");
if (err)
@@ -500,14 +498,9 @@ static int __init init_pstore_fs(void)
out:
return err;
}
-module_init(init_pstore_fs)
-static void __exit exit_pstore_fs(void)
+void __exit pstore_exit_fs(void)
{
unregister_filesystem(&pstore_fs_type);
sysfs_remove_mount_point(fs_kobj, "pstore");
}
-module_exit(exit_pstore_fs)
-
-MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
-MODULE_LICENSE("GPL");
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index fb767e28aeb2..7062ea4bc57c 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -37,7 +37,8 @@ extern bool pstore_is_mounted(void);
extern void pstore_record_init(struct pstore_record *record,
struct pstore_info *psi);
-/* Called during module_init() */
-extern void __init pstore_choose_compression(void);
+/* Called during pstore init/exit. */
+int __init pstore_init_fs(void);
+void __exit pstore_exit_fs(void);
#endif
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 15e99d5a681d..b821054ca3ed 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -274,36 +274,56 @@ static int pstore_decompress(void *in, void *out,
static void allocate_buf_for_compression(void)
{
+ struct crypto_comp *ctx;
+ int size;
+ char *buf;
+
+ /* Skip if not built-in or compression backend not selected yet. */
if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
return;
+ /* Skip if no pstore backend yet or compression init already done. */
+ if (!psinfo || tfm)
+ return;
+
if (!crypto_has_comp(zbackend->name, 0, 0)) {
- pr_err("No %s compression\n", zbackend->name);
+ pr_err("Unknown compression: %s\n", zbackend->name);
return;
}
- big_oops_buf_sz = zbackend->zbufsize(psinfo->bufsize);
- if (big_oops_buf_sz <= 0)
+ size = zbackend->zbufsize(psinfo->bufsize);
+ if (size <= 0) {
+ pr_err("Invalid compression size for %s: %d\n",
+ zbackend->name, size);
return;
+ }
- big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
- if (!big_oops_buf) {
- pr_err("allocate compression buffer error!\n");
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ pr_err("Failed %d byte compression buffer allocation for: %s\n",
+ size, zbackend->name);
return;
}
- tfm = crypto_alloc_comp(zbackend->name, 0, 0);
- if (IS_ERR_OR_NULL(tfm)) {
- kfree(big_oops_buf);
- big_oops_buf = NULL;
- pr_err("crypto_alloc_comp() failed!\n");
+ ctx = crypto_alloc_comp(zbackend->name, 0, 0);
+ if (IS_ERR_OR_NULL(ctx)) {
+ kfree(buf);
+ pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
+ PTR_ERR(ctx));
return;
}
+
+ /* A non-NULL big_oops_buf indicates compression is available. */
+ tfm = ctx;
+ big_oops_buf_sz = size;
+ big_oops_buf = buf;
+
+ pr_info("Using compression: %s\n", zbackend->name);
}
static void free_buf_for_compression(void)
{
- if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && !IS_ERR_OR_NULL(tfm))
+ if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
crypto_free_comp(tfm);
kfree(big_oops_buf);
big_oops_buf = NULL;
@@ -774,14 +794,43 @@ void __init pstore_choose_compression(void)
for (step = zbackends; step->name; step++) {
if (!strcmp(compress, step->name)) {
zbackend = step;
- pr_info("using %s compression\n", zbackend->name);
return;
}
}
}
+static int __init pstore_init(void)
+{
+ int ret;
+
+ pstore_choose_compression();
+
+ /*
+ * Check if any pstore backends registered earlier but did not
+ * initialize compression because crypto was not ready. If so,
+ * initialize compression now.
+ */
+ allocate_buf_for_compression();
+
+ ret = pstore_init_fs();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+late_initcall(pstore_init);
+
+static void __exit pstore_exit(void)
+{
+ pstore_exit_fs();
+}
+module_exit(pstore_exit)
+
module_param(compress, charp, 0444);
MODULE_PARM_DESC(compress, "Pstore compression to use");
module_param(backend, charp, 0444);
MODULE_PARM_DESC(backend, "Pstore backend to use");
+
+MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index bbd1e357c23d..ffcff6516e89 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -587,9 +587,16 @@ static int ramoops_init_przs(const char *name,
goto fail;
for (i = 0; i < *cnt; i++) {
+ char *label;
+
+ if (*cnt == 1)
+ label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
+ else
+ label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)",
+ name, i, *cnt - 1);
prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
- &cxt->ecc_info,
- cxt->memtype, flags);
+ &cxt->ecc_info,
+ cxt->memtype, flags, label);
if (IS_ERR(prz_ar[i])) {
err = PTR_ERR(prz_ar[i]);
dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
@@ -619,6 +626,8 @@ static int ramoops_init_prz(const char *name,
struct persistent_ram_zone **prz,
phys_addr_t *paddr, size_t sz, u32 sig)
{
+ char *label;
+
if (!sz)
return 0;
@@ -629,8 +638,9 @@ static int ramoops_init_prz(const char *name,
return -ENOMEM;
}
+ label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
- cxt->memtype, 0);
+ cxt->memtype, 0, label);
if (IS_ERR(*prz)) {
int err = PTR_ERR(*prz);
@@ -898,8 +908,22 @@ static struct platform_driver ramoops_driver = {
},
};
-static void ramoops_register_dummy(void)
+static inline void ramoops_unregister_dummy(void)
+{
+ platform_device_unregister(dummy);
+ dummy = NULL;
+
+ kfree(dummy_data);
+ dummy_data = NULL;
+}
+
+static void __init ramoops_register_dummy(void)
{
+ /*
+ * Prepare a dummy platform data structure to carry the module
+ * parameters. If mem_size isn't set, then there are no module
+ * parameters, and we can skip this.
+ */
if (!mem_size)
return;
@@ -932,21 +956,28 @@ static void ramoops_register_dummy(void)
if (IS_ERR(dummy)) {
pr_info("could not create platform device: %ld\n",
PTR_ERR(dummy));
+ dummy = NULL;
+ ramoops_unregister_dummy();
}
}
static int __init ramoops_init(void)
{
+ int ret;
+
ramoops_register_dummy();
- return platform_driver_register(&ramoops_driver);
+ ret = platform_driver_register(&ramoops_driver);
+ if (ret != 0)
+ ramoops_unregister_dummy();
+
+ return ret;
}
-late_initcall(ramoops_init);
+postcore_initcall(ramoops_init);
static void __exit ramoops_exit(void)
{
platform_driver_unregister(&ramoops_driver);
- platform_device_unregister(dummy);
- kfree(dummy_data);
+ ramoops_unregister_dummy();
}
module_exit(ramoops_exit);
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 0792595ebcfb..12e21f789194 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -438,11 +438,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
}
static void *persistent_ram_iomap(phys_addr_t start, size_t size,
- unsigned int memtype)
+ unsigned int memtype, char *label)
{
void *va;
- if (!request_mem_region(start, size, "persistent_ram")) {
+ if (!request_mem_region(start, size, label ?: "ramoops")) {
pr_err("request mem region (0x%llx@0x%llx) failed\n",
(unsigned long long)size, (unsigned long long)start);
return NULL;
@@ -470,7 +470,8 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
if (pfn_valid(start >> PAGE_SHIFT))
prz->vaddr = persistent_ram_vmap(start, size, memtype);
else
- prz->vaddr = persistent_ram_iomap(start, size, memtype);
+ prz->vaddr = persistent_ram_iomap(start, size, memtype,
+ prz->label);
if (!prz->vaddr) {
pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
@@ -541,12 +542,13 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
prz->ecc_info.par = NULL;
persistent_ram_free_old(prz);
+ kfree(prz->label);
kfree(prz);
}
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype, u32 flags)
+ unsigned int memtype, u32 flags, char *label)
{
struct persistent_ram_zone *prz;
int ret = -ENOMEM;
@@ -560,6 +562,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
/* Initialize general buffer state. */
raw_spin_lock_init(&prz->buffer_lock);
prz->flags = flags;
+ prz->label = label;
ret = persistent_ram_buffer_map(start, size, prz, memtype);
if (ret)
diff --git a/fs/read_write.c b/fs/read_write.c
index 39b4a21dd933..603794b207eb 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -331,7 +331,7 @@ COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned i
}
#endif
-#ifdef __ARCH_WANT_SYS_LLSEEK
+#if !defined(CONFIG_64BIT) || defined(CONFIG_COMPAT)
SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
unsigned long, offset_low, loff_t __user *, result,
unsigned int, whence)
@@ -1818,8 +1818,8 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
}
EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
-int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len)
+int do_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, u64 len)
{
struct inode *inode_in = file_inode(file_in);
struct inode *inode_out = file_inode(file_out);
@@ -1866,6 +1866,19 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
return ret;
}
+EXPORT_SYMBOL(do_clone_file_range);
+
+int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, u64 len)
+{
+ int ret;
+
+ file_start_write(file_out);
+ ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len);
+ file_end_write(file_out);
+
+ return ret;
+}
EXPORT_SYMBOL(vfs_clone_file_range);
/*
diff --git a/fs/select.c b/fs/select.c
index 4a6b6e4b21cb..22b3bf89f051 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -1120,7 +1120,7 @@ int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user *
ts.tv_sec = ts.tv_nsec = 0;
if (timeval) {
- struct compat_timeval rtv;
+ struct old_timeval32 rtv;
rtv.tv_sec = ts.tv_sec;
rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
@@ -1128,7 +1128,7 @@ int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user *
if (!copy_to_user(p, &rtv, sizeof(rtv)))
return ret;
} else {
- if (!compat_put_timespec64(&ts, p))
+ if (!put_old_timespec32(&ts, p))
return ret;
}
/*
@@ -1257,10 +1257,10 @@ out_nofds:
static int do_compat_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timeval __user *tvp)
+ struct old_timeval32 __user *tvp)
{
struct timespec64 end_time, *to = NULL;
- struct compat_timeval tv;
+ struct old_timeval32 tv;
int ret;
if (tvp) {
@@ -1282,7 +1282,7 @@ static int do_compat_select(int n, compat_ulong_t __user *inp,
COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
- struct compat_timeval __user *, tvp)
+ struct old_timeval32 __user *, tvp)
{
return do_compat_select(n, inp, outp, exp, tvp);
}
@@ -1307,7 +1307,7 @@ COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
static long do_compat_pselect(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
+ struct old_timespec32 __user *tsp, compat_sigset_t __user *sigmask,
compat_size_t sigsetsize)
{
sigset_t ksigmask, sigsaved;
@@ -1315,7 +1315,7 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
int ret;
if (tsp) {
- if (compat_get_timespec64(&ts, tsp))
+ if (get_old_timespec32(&ts, tsp))
return -EFAULT;
to = &end_time;
@@ -1355,7 +1355,7 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
- struct compat_timespec __user *, tsp, void __user *, sig)
+ struct old_timespec32 __user *, tsp, void __user *, sig)
{
compat_size_t sigsetsize = 0;
compat_uptr_t up = 0;
@@ -1373,7 +1373,7 @@ COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
}
COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
- unsigned int, nfds, struct compat_timespec __user *, tsp,
+ unsigned int, nfds, struct old_timespec32 __user *, tsp,
const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
{
sigset_t ksigmask, sigsaved;
@@ -1381,7 +1381,7 @@ COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
int ret;
if (tsp) {
- if (compat_get_timespec64(&ts, tsp))
+ if (get_old_timespec32(&ts, tsp))
return -EFAULT;
to = &end_time;
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 4fcd1498acf5..757afc7c5895 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -79,7 +79,7 @@ static __poll_t signalfd_poll(struct file *file, poll_table *wait)
* Copied from copy_siginfo_to_user() in kernel/signal.c
*/
static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
- siginfo_t const *kinfo)
+ kernel_siginfo_t const *kinfo)
{
struct signalfd_siginfo new;
@@ -163,7 +163,7 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
return sizeof(*uinfo);
}
-static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info,
+static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info,
int nonblock)
{
ssize_t ret;
@@ -215,7 +215,7 @@ static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
struct signalfd_siginfo __user *siginfo;
int nonblock = file->f_flags & O_NONBLOCK;
ssize_t ret, total = 0;
- siginfo_t info;
+ kernel_siginfo_t info;
count /= sizeof(struct signalfd_siginfo);
if (!count)
diff --git a/fs/stat.c b/fs/stat.c
index f8e6fb2c3657..adbfcd86c81b 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -280,6 +280,8 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
#endif /* __ARCH_WANT_OLD_STAT */
+#ifdef __ARCH_WANT_NEW_STAT
+
#if BITS_PER_LONG == 32
# define choose_32_64(a,b) a
#else
@@ -378,6 +380,7 @@ SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
return error;
}
+#endif
static int do_readlinkat(int dfd, const char __user *pathname,
char __user *buf, int bufsiz)
diff --git a/fs/timerfd.c b/fs/timerfd.c
index d69ad801eb80..803ca070d42e 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -561,29 +561,29 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct __kernel_itimerspec __user *,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
- const struct compat_itimerspec __user *, utmr,
- struct compat_itimerspec __user *, otmr)
+ const struct old_itimerspec32 __user *, utmr,
+ struct old_itimerspec32 __user *, otmr)
{
struct itimerspec64 new, old;
int ret;
- if (get_compat_itimerspec64(&new, utmr))
+ if (get_old_itimerspec32(&new, utmr))
return -EFAULT;
ret = do_timerfd_settime(ufd, flags, &new, &old);
if (ret)
return ret;
- if (otmr && put_compat_itimerspec64(&old, otmr))
+ if (otmr && put_old_itimerspec32(&old, otmr))
return -EFAULT;
return ret;
}
COMPAT_SYSCALL_DEFINE2(timerfd_gettime, int, ufd,
- struct compat_itimerspec __user *, otmr)
+ struct old_itimerspec32 __user *, otmr)
{
struct itimerspec64 kotmr;
int ret = do_timerfd_gettime(ufd, &kotmr);
if (ret)
return ret;
- return put_compat_itimerspec64(&kotmr, otmr) ? -EFAULT : 0;
+ return put_old_itimerspec32(&kotmr, otmr) ? -EFAULT : 0;
}
#endif
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 23e7042666a7..fec62e9dfbe6 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1912,7 +1912,9 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
mutex_unlock(&c->bu_mutex);
}
- ubifs_assert(c, c->lst.taken_empty_lebs > 0);
+ if (!c->need_recovery)
+ ubifs_assert(c, c->lst.taken_empty_lebs > 0);
+
return 0;
}
@@ -1954,6 +1956,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
int dev, vol;
char *endptr;
+ if (!name || !*name)
+ return ERR_PTR(-EINVAL);
+
/* First, try to open using the device node path method */
ubi = ubi_open_volume_path(name, mode);
if (!IS_ERR(ubi))
@@ -2332,8 +2337,8 @@ late_initcall(ubifs_init);
static void __exit ubifs_exit(void)
{
- WARN_ON(list_empty(&ubifs_infos));
- WARN_ON(atomic_long_read(&ubifs_clean_zn_cnt) == 0);
+ WARN_ON(!list_empty(&ubifs_infos));
+ WARN_ON(atomic_long_read(&ubifs_clean_zn_cnt) != 0);
dbg_debugfs_exit();
ubifs_compressors_exit();
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 61afdfee4b28..f5ad1ede7990 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -152,12 +152,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
ui->data_len = size;
mutex_lock(&host_ui->ui_mutex);
-
- if (!host->i_nlink) {
- err = -ENOENT;
- goto out_noent;
- }
-
host->i_ctime = current_time(host);
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
@@ -190,7 +184,6 @@ out_cancel:
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
host_ui->xattr_names -= fname_len(nm);
host_ui->flags &= ~UBIFS_CRYPT_FL;
-out_noent:
mutex_unlock(&host_ui->ui_mutex);
out_free:
make_bad_inode(inode);
@@ -242,12 +235,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
mutex_unlock(&ui->ui_mutex);
mutex_lock(&host_ui->ui_mutex);
-
- if (!host->i_nlink) {
- err = -ENOENT;
- goto out_noent;
- }
-
host->i_ctime = current_time(host);
host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
host_ui->xattr_size += CALC_XATTR_BYTES(size);
@@ -269,7 +256,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
out_cancel:
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
-out_noent:
mutex_unlock(&host_ui->ui_mutex);
make_bad_inode(inode);
out_free:
@@ -496,12 +482,6 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
return err;
mutex_lock(&host_ui->ui_mutex);
-
- if (!host->i_nlink) {
- err = -ENOENT;
- goto out_noent;
- }
-
host->i_ctime = current_time(host);
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
@@ -521,7 +501,6 @@ out_cancel:
host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
host_ui->xattr_names += fname_len(nm);
-out_noent:
mutex_unlock(&host_ui->ui_mutex);
ubifs_release_budget(c, &req);
make_bad_inode(inode);
@@ -561,9 +540,6 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
ubifs_assert(c, inode_is_locked(host));
- if (!host->i_nlink)
- return -ENOENT;
-
if (fname_len(&nm) > UBIFS_MAX_NLEN)
return -ENAMETOOLONG;
diff --git a/fs/utimes.c b/fs/utimes.c
index 69d4b6ba1bfb..bdcf2daf39c1 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -8,35 +8,6 @@
#include <linux/compat.h>
#include <asm/unistd.h>
-#ifdef __ARCH_WANT_SYS_UTIME
-
-/*
- * sys_utime() can be implemented in user-level using sys_utimes().
- * Is this for backwards compatibility? If so, why not move it
- * into the appropriate arch directory (for those architectures that
- * need it).
- */
-
-/* If times==NULL, set access and modification to current time,
- * must be owner or have write permission.
- * Else, update from *times, must be owner or super user.
- */
-SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times)
-{
- struct timespec64 tv[2];
-
- if (times) {
- if (get_user(tv[0].tv_sec, &times->actime) ||
- get_user(tv[1].tv_sec, &times->modtime))
- return -EFAULT;
- tv[0].tv_nsec = 0;
- tv[1].tv_nsec = 0;
- }
- return do_utimes(AT_FDCWD, filename, times ? tv : NULL, 0);
-}
-
-#endif
-
static bool nsec_valid(long nsec)
{
if (nsec == UTIME_OMIT || nsec == UTIME_NOW)
@@ -166,7 +137,7 @@ out:
}
SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename,
- struct timespec __user *, utimes, int, flags)
+ struct __kernel_timespec __user *, utimes, int, flags)
{
struct timespec64 tstimes[2];
@@ -184,6 +155,13 @@ SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename,
return do_utimes(dfd, filename, utimes ? tstimes : NULL, flags);
}
+#ifdef __ARCH_WANT_SYS_UTIME
+/*
+ * futimesat(), utimes() and utime() are older versions of utimensat()
+ * that are provided for compatibility with traditional C libraries.
+ * On modern architectures, we always use libc wrappers around
+ * utimensat() instead.
+ */
static long do_futimesat(int dfd, const char __user *filename,
struct timeval __user *utimes)
{
@@ -225,13 +203,29 @@ SYSCALL_DEFINE2(utimes, char __user *, filename,
return do_futimesat(AT_FDCWD, filename, utimes);
}
-#ifdef CONFIG_COMPAT
+SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times)
+{
+ struct timespec64 tv[2];
+
+ if (times) {
+ if (get_user(tv[0].tv_sec, &times->actime) ||
+ get_user(tv[1].tv_sec, &times->modtime))
+ return -EFAULT;
+ tv[0].tv_nsec = 0;
+ tv[1].tv_nsec = 0;
+ }
+ return do_utimes(AT_FDCWD, filename, times ? tv : NULL, 0);
+}
+#endif
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
/*
* Not all architectures have sys_utime, so implement this in terms
* of sys_utimes.
*/
+#ifdef __ARCH_WANT_SYS_UTIME32
COMPAT_SYSCALL_DEFINE2(utime, const char __user *, filename,
- struct compat_utimbuf __user *, t)
+ struct old_utimbuf32 __user *, t)
{
struct timespec64 tv[2];
@@ -244,14 +238,15 @@ COMPAT_SYSCALL_DEFINE2(utime, const char __user *, filename,
}
return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0);
}
+#endif
-COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filename, struct compat_timespec __user *, t, int, flags)
+COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filename, struct old_timespec32 __user *, t, int, flags)
{
struct timespec64 tv[2];
if (t) {
- if (compat_get_timespec64(&tv[0], &t[0]) ||
- compat_get_timespec64(&tv[1], &t[1]))
+ if (get_old_timespec32(&tv[0], &t[0]) ||
+ get_old_timespec32(&tv[1], &t[1]))
return -EFAULT;
if (tv[0].tv_nsec == UTIME_OMIT && tv[1].tv_nsec == UTIME_OMIT)
@@ -260,8 +255,9 @@ COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filena
return do_utimes(dfd, filename, t ? tv : NULL, flags);
}
+#ifdef __ARCH_WANT_SYS_UTIME32
static long do_compat_futimesat(unsigned int dfd, const char __user *filename,
- struct compat_timeval __user *t)
+ struct old_timeval32 __user *t)
{
struct timespec64 tv[2];
@@ -282,13 +278,14 @@ static long do_compat_futimesat(unsigned int dfd, const char __user *filename,
COMPAT_SYSCALL_DEFINE3(futimesat, unsigned int, dfd,
const char __user *, filename,
- struct compat_timeval __user *, t)
+ struct old_timeval32 __user *, t)
{
return do_compat_futimesat(dfd, filename, t);
}
-COMPAT_SYSCALL_DEFINE2(utimes, const char __user *, filename, struct compat_timeval __user *, t)
+COMPAT_SYSCALL_DEFINE2(utimes, const char __user *, filename, struct old_timeval32 __user *, t)
{
return do_compat_futimesat(AT_FDCWD, filename, t);
}
#endif
+#endif
diff --git a/fs/xattr.c b/fs/xattr.c
index daa732550088..0d6a6a4af861 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -948,17 +948,19 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
int err = 0;
#ifdef CONFIG_FS_POSIX_ACL
- if (inode->i_acl) {
- err = xattr_list_one(&buffer, &remaining_size,
- XATTR_NAME_POSIX_ACL_ACCESS);
- if (err)
- return err;
- }
- if (inode->i_default_acl) {
- err = xattr_list_one(&buffer, &remaining_size,
- XATTR_NAME_POSIX_ACL_DEFAULT);
- if (err)
- return err;
+ if (IS_POSIXACL(inode)) {
+ if (inode->i_acl) {
+ err = xattr_list_one(&buffer, &remaining_size,
+ XATTR_NAME_POSIX_ACL_ACCESS);
+ if (err)
+ return err;
+ }
+ if (inode->i_default_acl) {
+ err = xattr_list_one(&buffer, &remaining_size,
+ XATTR_NAME_POSIX_ACL_DEFAULT);
+ if (err)
+ return err;
+ }
}
#endif
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 1e671d4eb6fa..844ed87b1900 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -191,6 +191,128 @@ xfs_attr_calc_size(
return nblks;
}
+STATIC int
+xfs_attr_try_sf_addname(
+ struct xfs_inode *dp,
+ struct xfs_da_args *args)
+{
+
+ struct xfs_mount *mp = dp->i_mount;
+ int error, error2;
+
+ error = xfs_attr_shortform_addname(args);
+ if (error == -ENOSPC)
+ return error;
+
+ /*
+ * Commit the shortform mods, and we're done.
+ * NOTE: this is also the error path (EEXIST, etc).
+ */
+ if (!error && (args->flags & ATTR_KERNOTIME) == 0)
+ xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
+
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
+ xfs_trans_set_sync(args->trans);
+
+ error2 = xfs_trans_commit(args->trans);
+ args->trans = NULL;
+ return error ? error : error2;
+}
+
+/*
+ * Set the attribute specified in @args.
+ */
+int
+xfs_attr_set_args(
+ struct xfs_da_args *args,
+ struct xfs_buf **leaf_bp)
+{
+ struct xfs_inode *dp = args->dp;
+ int error;
+
+ /*
+ * If the attribute list is non-existent or a shortform list,
+ * upgrade it to a single-leaf-block attribute list.
+ */
+ if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
+ (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+ dp->i_d.di_anextents == 0)) {
+
+ /*
+ * Build initial attribute list (if required).
+ */
+ if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
+ xfs_attr_shortform_create(args);
+
+ /*
+ * Try to add the attr to the attribute list in the inode.
+ */
+ error = xfs_attr_try_sf_addname(dp, args);
+ if (error != -ENOSPC)
+ return error;
+
+ /*
+ * It won't fit in the shortform, transform to a leaf block.
+ * GROT: another possible req'mt for a double-split btree op.
+ */
+ error = xfs_attr_shortform_to_leaf(args, leaf_bp);
+ if (error)
+ return error;
+
+ /*
+ * Prevent the leaf buffer from being unlocked so that a
+ * concurrent AIL push cannot grab the half-baked leaf
+ * buffer and run into problems with the write verifier.
+ */
+ xfs_trans_bhold(args->trans, *leaf_bp);
+
+ error = xfs_defer_finish(&args->trans);
+ if (error)
+ return error;
+
+ /*
+ * Commit the leaf transformation. We'll need another
+ * (linked) transaction to add the new attribute to the
+ * leaf.
+ */
+ error = xfs_trans_roll_inode(&args->trans, dp);
+ if (error)
+ return error;
+ xfs_trans_bjoin(args->trans, *leaf_bp);
+ *leaf_bp = NULL;
+ }
+
+ if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
+ error = xfs_attr_leaf_addname(args);
+ else
+ error = xfs_attr_node_addname(args);
+ return error;
+}
+
+/*
+ * Remove the attribute specified in @args.
+ */
+int
+xfs_attr_remove_args(
+ struct xfs_da_args *args)
+{
+ struct xfs_inode *dp = args->dp;
+ int error;
+
+ if (!xfs_inode_hasattr(dp)) {
+ error = -ENOATTR;
+ } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+ ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
+ error = xfs_attr_shortform_remove(args);
+ } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+ error = xfs_attr_leaf_removename(args);
+ } else {
+ error = xfs_attr_node_removename(args);
+ }
+
+ return error;
+}
+
int
xfs_attr_set(
struct xfs_inode *dp,
@@ -204,7 +326,7 @@ xfs_attr_set(
struct xfs_da_args args;
struct xfs_trans_res tres;
int rsvd = (flags & ATTR_ROOT) != 0;
- int error, err2, local;
+ int error, local;
XFS_STATS_INC(mp, xs_attr_set);
@@ -255,93 +377,17 @@ xfs_attr_set(
error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
XFS_QMOPT_RES_REGBLKS);
- if (error) {
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
- xfs_trans_cancel(args.trans);
- return error;
- }
+ if (error)
+ goto out_trans_cancel;
xfs_trans_ijoin(args.trans, dp, 0);
-
- /*
- * If the attribute list is non-existent or a shortform list,
- * upgrade it to a single-leaf-block attribute list.
- */
- if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
- (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
- dp->i_d.di_anextents == 0)) {
-
- /*
- * Build initial attribute list (if required).
- */
- if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
- xfs_attr_shortform_create(&args);
-
- /*
- * Try to add the attr to the attribute list in
- * the inode.
- */
- error = xfs_attr_shortform_addname(&args);
- if (error != -ENOSPC) {
- /*
- * Commit the shortform mods, and we're done.
- * NOTE: this is also the error path (EEXIST, etc).
- */
- ASSERT(args.trans != NULL);
-
- /*
- * If this is a synchronous mount, make sure that
- * the transaction goes to disk before returning
- * to the user.
- */
- if (mp->m_flags & XFS_MOUNT_WSYNC)
- xfs_trans_set_sync(args.trans);
-
- if (!error && (flags & ATTR_KERNOTIME) == 0) {
- xfs_trans_ichgtime(args.trans, dp,
- XFS_ICHGTIME_CHG);
- }
- err2 = xfs_trans_commit(args.trans);
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
- return error ? error : err2;
- }
-
- /*
- * It won't fit in the shortform, transform to a leaf block.
- * GROT: another possible req'mt for a double-split btree op.
- */
- error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
- if (error)
- goto out;
- /*
- * Prevent the leaf buffer from being unlocked so that a
- * concurrent AIL push cannot grab the half-baked leaf
- * buffer and run into problems with the write verifier.
- */
- xfs_trans_bhold(args.trans, leaf_bp);
- error = xfs_defer_finish(&args.trans);
- if (error)
- goto out;
-
- /*
- * Commit the leaf transformation. We'll need another (linked)
- * transaction to add the new attribute to the leaf, which
- * means that we have to hold & join the leaf buffer here too.
- */
- error = xfs_trans_roll_inode(&args.trans, dp);
- if (error)
- goto out;
- xfs_trans_bjoin(args.trans, leaf_bp);
- leaf_bp = NULL;
- }
-
- if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
- error = xfs_attr_leaf_addname(&args);
- else
- error = xfs_attr_node_addname(&args);
+ error = xfs_attr_set_args(&args, &leaf_bp);
if (error)
- goto out;
+ goto out_release_leaf;
+ if (!args.trans) {
+ /* shortform attribute has already been committed */
+ goto out_unlock;
+ }
/*
* If this is a synchronous mount, make sure that the
@@ -358,17 +404,17 @@ xfs_attr_set(
*/
xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
error = xfs_trans_commit(args.trans);
+out_unlock:
xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
return error;
-out:
+out_release_leaf:
if (leaf_bp)
xfs_trans_brelse(args.trans, leaf_bp);
+out_trans_cancel:
if (args.trans)
xfs_trans_cancel(args.trans);
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
- return error;
+ goto out_unlock;
}
/*
@@ -423,17 +469,7 @@ xfs_attr_remove(
*/
xfs_trans_ijoin(args.trans, dp, 0);
- if (!xfs_inode_hasattr(dp)) {
- error = -ENOATTR;
- } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
- ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
- error = xfs_attr_shortform_remove(&args);
- } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
- error = xfs_attr_leaf_removename(&args);
- } else {
- error = xfs_attr_node_removename(&args);
- }
-
+ error = xfs_attr_remove_args(&args);
if (error)
goto out;
@@ -587,7 +623,7 @@ xfs_attr_leaf_addname(
*/
error = xfs_attr3_leaf_to_node(args);
if (error)
- goto out_defer_cancel;
+ return error;
error = xfs_defer_finish(&args->trans);
if (error)
return error;
@@ -675,7 +711,7 @@ xfs_attr_leaf_addname(
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
- goto out_defer_cancel;
+ return error;
error = xfs_defer_finish(&args->trans);
if (error)
return error;
@@ -693,9 +729,6 @@ xfs_attr_leaf_addname(
error = xfs_attr3_leaf_clearflag(args);
}
return error;
-out_defer_cancel:
- xfs_defer_cancel(args->trans);
- return error;
}
/*
@@ -738,15 +771,12 @@ xfs_attr_leaf_removename(
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
- goto out_defer_cancel;
+ return error;
error = xfs_defer_finish(&args->trans);
if (error)
return error;
}
return 0;
-out_defer_cancel:
- xfs_defer_cancel(args->trans);
- return error;
}
/*
@@ -864,7 +894,7 @@ restart:
state = NULL;
error = xfs_attr3_leaf_to_node(args);
if (error)
- goto out_defer_cancel;
+ goto out;
error = xfs_defer_finish(&args->trans);
if (error)
goto out;
@@ -888,7 +918,7 @@ restart:
*/
error = xfs_da3_split(state);
if (error)
- goto out_defer_cancel;
+ goto out;
error = xfs_defer_finish(&args->trans);
if (error)
goto out;
@@ -984,7 +1014,7 @@ restart:
if (retval && (state->path.active > 1)) {
error = xfs_da3_join(state);
if (error)
- goto out_defer_cancel;
+ goto out;
error = xfs_defer_finish(&args->trans);
if (error)
goto out;
@@ -1013,9 +1043,6 @@ out:
if (error)
return error;
return retval;
-out_defer_cancel:
- xfs_defer_cancel(args->trans);
- goto out;
}
/*
@@ -1107,7 +1134,7 @@ xfs_attr_node_removename(
if (retval && (state->path.active > 1)) {
error = xfs_da3_join(state);
if (error)
- goto out_defer_cancel;
+ goto out;
error = xfs_defer_finish(&args->trans);
if (error)
goto out;
@@ -1138,7 +1165,7 @@ xfs_attr_node_removename(
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
- goto out_defer_cancel;
+ goto out;
error = xfs_defer_finish(&args->trans);
if (error)
goto out;
@@ -1150,9 +1177,6 @@ xfs_attr_node_removename(
out:
xfs_da_state_free(state);
return error;
-out_defer_cancel:
- xfs_defer_cancel(args->trans);
- goto out;
}
/*
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index 033ff8c478e2..bdf52a333f3f 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -140,7 +140,9 @@ int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
unsigned char *value, int *valuelenp, int flags);
int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
unsigned char *value, int valuelen, int flags);
+int xfs_attr_set_args(struct xfs_da_args *args, struct xfs_buf **leaf_bp);
int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
+int xfs_attr_remove_args(struct xfs_da_args *args);
int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
int flags, struct attrlist_cursor_kern *cursor);
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index af094063e402..d89363c6b523 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -485,7 +485,7 @@ xfs_attr_rmtval_set(
blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
&nmap);
if (error)
- goto out_defer_cancel;
+ return error;
error = xfs_defer_finish(&args->trans);
if (error)
return error;
@@ -553,9 +553,6 @@ xfs_attr_rmtval_set(
}
ASSERT(valuelen == 0);
return 0;
-out_defer_cancel:
- xfs_defer_cancel(args->trans);
- return error;
}
/*
@@ -625,7 +622,7 @@ xfs_attr_rmtval_remove(
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK, 1, &done);
if (error)
- goto out_defer_cancel;
+ return error;
error = xfs_defer_finish(&args->trans);
if (error)
return error;
@@ -638,7 +635,4 @@ xfs_attr_rmtval_remove(
return error;
}
return 0;
-out_defer_cancel:
- xfs_defer_cancel(args->trans);
- return error;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 2760314fdf7f..74d7228e755b 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -673,7 +673,8 @@ xfs_bmap_extents_to_btree(
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
/*
- * Make space in the inode incore.
+ * Make space in the inode incore. This needs to be undone if we fail
+ * to expand the root.
*/
xfs_iroot_realloc(ip, 1, whichfork);
ifp->if_flags |= XFS_IFBROOT;
@@ -711,16 +712,15 @@ xfs_bmap_extents_to_btree(
args.minlen = args.maxlen = args.prod = 1;
args.wasdel = wasdel;
*logflagsp = 0;
- if ((error = xfs_alloc_vextent(&args))) {
- ASSERT(ifp->if_broot == NULL);
- goto err1;
- }
+ error = xfs_alloc_vextent(&args);
+ if (error)
+ goto out_root_realloc;
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
- ASSERT(ifp->if_broot == NULL);
error = -ENOSPC;
- goto err1;
+ goto out_root_realloc;
}
+
/*
* Allocation can't fail, the space was reserved.
*/
@@ -732,9 +732,10 @@ xfs_bmap_extents_to_btree(
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
if (!abp) {
- error = -ENOSPC;
- goto err2;
+ error = -EFSCORRUPTED;
+ goto out_unreserve_dquot;
}
+
/*
* Fill in the child block.
*/
@@ -775,11 +776,12 @@ xfs_bmap_extents_to_btree(
*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
return 0;
-err2:
+out_unreserve_dquot:
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
-err1:
+out_root_realloc:
xfs_iroot_realloc(ip, -1, whichfork);
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+ ASSERT(ifp->if_broot == NULL);
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
return error;
@@ -1017,6 +1019,34 @@ xfs_bmap_add_attrfork_local(
return -EFSCORRUPTED;
}
+/* Set an inode attr fork off based on the format */
+int
+xfs_bmap_set_attrforkoff(
+ struct xfs_inode *ip,
+ int size,
+ int *version)
+{
+ switch (ip->i_d.di_format) {
+ case XFS_DINODE_FMT_DEV:
+ ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
+ break;
+ case XFS_DINODE_FMT_LOCAL:
+ case XFS_DINODE_FMT_EXTENTS:
+ case XFS_DINODE_FMT_BTREE:
+ ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
+ if (!ip->i_d.di_forkoff)
+ ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
+ else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
+ *version = 2;
+ break;
+ default:
+ ASSERT(0);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* Convert inode from non-attributed to attributed.
* Must not be in a transaction, ip must not be locked.
@@ -1068,26 +1098,9 @@ xfs_bmap_add_attrfork(
xfs_trans_ijoin(tp, ip, 0);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
- switch (ip->i_d.di_format) {
- case XFS_DINODE_FMT_DEV:
- ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
- break;
- case XFS_DINODE_FMT_LOCAL:
- case XFS_DINODE_FMT_EXTENTS:
- case XFS_DINODE_FMT_BTREE:
- ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
- if (!ip->i_d.di_forkoff)
- ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
- else if (mp->m_flags & XFS_MOUNT_ATTR2)
- version = 2;
- break;
- default:
- ASSERT(0);
- error = -EINVAL;
+ error = xfs_bmap_set_attrforkoff(ip, size, &version);
+ if (error)
goto trans_cancel;
- }
-
ASSERT(ip->i_afp == NULL);
ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
ip->i_afp->if_flags = XFS_IFEXTENTS;
@@ -4079,8 +4092,7 @@ xfs_bmapi_allocate(
* extents to real extents when we're about to write the data.
*/
if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
- (bma->flags & XFS_BMAPI_PREALLOC) &&
- xfs_sb_version_hasextflgbit(&mp->m_sb))
+ (bma->flags & XFS_BMAPI_PREALLOC))
bma->got.br_state = XFS_EXT_UNWRITTEN;
if (bma->wasdel)
@@ -5243,8 +5255,7 @@ __xfs_bunmapi(
* unmapping part of it. But we can't really
* get rid of part of a realtime extent.
*/
- if (del.br_state == XFS_EXT_UNWRITTEN ||
- !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
+ if (del.br_state == XFS_EXT_UNWRITTEN) {
/*
* This piece is unwritten, or we're not
* using unwritten extents. Skip over it.
@@ -5294,10 +5305,9 @@ __xfs_bunmapi(
del.br_blockcount -= mod;
del.br_startoff += mod;
del.br_startblock += mod;
- } else if ((del.br_startoff == start &&
- (del.br_state == XFS_EXT_UNWRITTEN ||
- tp->t_blk_res == 0)) ||
- !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
+ } else if (del.br_startoff == start &&
+ (del.br_state == XFS_EXT_UNWRITTEN ||
+ tp->t_blk_res == 0)) {
/*
* Can't make it unwritten. There isn't
* a full extent here so just skip it.
@@ -6112,11 +6122,7 @@ xfs_bmap_validate_extent(
XFS_FSB_TO_AGNO(mp, endfsb))
return __this_address;
}
- if (irec->br_state != XFS_EXT_NORM) {
- if (whichfork != XFS_DATA_FORK)
- return __this_address;
- if (!xfs_sb_version_hasextflgbit(&mp->m_sb))
- return __this_address;
- }
+ if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
+ return __this_address;
return NULL;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index b6e9b639e731..488dc8860fd7 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -183,6 +183,7 @@ void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len);
void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
+int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
xfs_filblks_t len, struct xfs_owner_info *oinfo,
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 059bc44c27e8..9995d5ae380b 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -287,6 +287,8 @@ static inline bool xfs_sb_good_v4_features(struct xfs_sb *sbp)
{
if (!(sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT))
return false;
+ if (!(sbp->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT))
+ return false;
/* check for unknown features in the fs */
if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) ||
@@ -357,12 +359,6 @@ static inline bool xfs_sb_version_haslogv2(struct xfs_sb *sbp)
(sbp->sb_versionnum & XFS_SB_VERSION_LOGV2BIT);
}
-static inline bool xfs_sb_version_hasextflgbit(struct xfs_sb *sbp)
-{
- return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
- (sbp->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT);
-}
-
static inline bool xfs_sb_version_hassector(struct xfs_sb *sbp)
{
return (sbp->sb_versionnum & XFS_SB_VERSION_SECTORBIT);
@@ -1016,6 +1012,8 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */
+/* Do not use bit 15, di_flags is legacy and unchanging now */
+
#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 30d1d60f1d46..09d9c8cfa4a0 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -415,6 +415,31 @@ xfs_dinode_verify_fork(
return NULL;
}
+static xfs_failaddr_t
+xfs_dinode_verify_forkoff(
+ struct xfs_dinode *dip,
+ struct xfs_mount *mp)
+{
+ if (!XFS_DFORK_Q(dip))
+ return NULL;
+
+ switch (dip->di_format) {
+ case XFS_DINODE_FMT_DEV:
+ if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
+ return __this_address;
+ break;
+ case XFS_DINODE_FMT_LOCAL: /* fall through ... */
+ case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
+ case XFS_DINODE_FMT_BTREE:
+ if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3))
+ return __this_address;
+ break;
+ default:
+ return __this_address;
+ }
+ return NULL;
+}
+
xfs_failaddr_t
xfs_dinode_verify(
struct xfs_mount *mp,
@@ -470,6 +495,11 @@ xfs_dinode_verify(
if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
return __this_address;
+ /* check for illegal values of forkoff */
+ fa = xfs_dinode_verify_forkoff(dip, mp);
+ if (fa)
+ return fa;
+
/* Do we have appropriate data fork formats for the mode? */
switch (mode & S_IFMT) {
case S_IFIFO:
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 081f46e30556..b5a82acd7dfe 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -1115,7 +1115,8 @@ xfs_fs_geometry(
geo->version = XFS_FSOP_GEOM_VERSION;
geo->flags = XFS_FSOP_GEOM_FLAGS_NLINK |
- XFS_FSOP_GEOM_FLAGS_DIRV2;
+ XFS_FSOP_GEOM_FLAGS_DIRV2 |
+ XFS_FSOP_GEOM_FLAGS_EXTFLG;
if (xfs_sb_version_hasattr(sbp))
geo->flags |= XFS_FSOP_GEOM_FLAGS_ATTR;
if (xfs_sb_version_hasquota(sbp))
@@ -1124,8 +1125,6 @@ xfs_fs_geometry(
geo->flags |= XFS_FSOP_GEOM_FLAGS_IALIGN;
if (xfs_sb_version_hasdalign(sbp))
geo->flags |= XFS_FSOP_GEOM_FLAGS_DALIGN;
- if (xfs_sb_version_hasextflgbit(sbp))
- geo->flags |= XFS_FSOP_GEOM_FLAGS_EXTFLG;
if (xfs_sb_version_hassector(sbp))
geo->flags |= XFS_FSOP_GEOM_FLAGS_SECTOR;
if (xfs_sb_version_hasasciici(sbp))
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 036b5c7021eb..376bcb585ae6 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -17,7 +17,6 @@
#include "xfs_sb.h"
#include "xfs_alloc.h"
#include "xfs_rmap.h"
-#include "xfs_alloc.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index 5b3b177c0fc9..e386c9b0b4ab 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -126,6 +126,7 @@ xchk_inode_flags(
{
struct xfs_mount *mp = sc->mp;
+ /* di_flags are all taken, last bit cannot be used */
if (flags & ~XFS_DIFLAG_ANY)
goto bad;
@@ -172,8 +173,9 @@ xchk_inode_flags2(
{
struct xfs_mount *mp = sc->mp;
+ /* Unknown di_flags2 could be from a future kernel */
if (flags2 & ~XFS_DIFLAG2_ANY)
- goto bad;
+ xchk_ino_set_warning(sc, ino);
/* reflink flag requires reflink feature */
if ((flags2 & XFS_DIFLAG2_REFLINK) &&
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 9f08dd9bf1d5..4fc0a5ea7673 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -29,6 +29,8 @@
#include "xfs_ag_resv.h"
#include "xfs_trans_space.h"
#include "xfs_quota.h"
+#include "xfs_attr.h"
+#include "xfs_reflink.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -692,13 +694,14 @@ xrep_findroot_block(
struct xrep_find_ag_btree *fab,
uint64_t owner,
xfs_agblock_t agbno,
- bool *found_it)
+ bool *done_with_block)
{
struct xfs_mount *mp = ri->sc->mp;
struct xfs_buf *bp;
struct xfs_btree_block *btblock;
xfs_daddr_t daddr;
- int error;
+ int block_level;
+ int error = 0;
daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.agno, agbno);
@@ -717,36 +720,111 @@ xrep_findroot_block(
return error;
}
+ /*
+ * Read the buffer into memory so that we can see if it's a match for
+ * our btree type. We have no clue if it is beforehand, and we want to
+ * avoid xfs_trans_read_buf's behavior of dumping the DONE state (which
+ * will cause needless disk reads in subsequent calls to this function)
+ * and logging metadata verifier failures.
+ *
+ * Therefore, pass in NULL buffer ops. If the buffer was already in
+ * memory from some other caller it will already have b_ops assigned.
+ * If it was in memory from a previous unsuccessful findroot_block
+ * call, the buffer won't have b_ops but it should be clean and ready
+ * for us to try to verify if the read call succeeds. The same applies
+ * if the buffer wasn't in memory at all.
+ *
+ * Note: If we never match a btree type with this buffer, it will be
+ * left in memory with NULL b_ops. This shouldn't be a problem unless
+ * the buffer gets written.
+ */
error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr,
mp->m_bsize, 0, &bp, NULL);
if (error)
return error;
- /*
- * Does this look like a block matching our fs and higher than any
- * other block we've found so far? If so, reattach buffer verifiers
- * so the AIL won't complain if the buffer is also dirty.
- */
+ /* Ensure the block magic matches the btree type we're looking for. */
btblock = XFS_BUF_TO_BLOCK(bp);
if (be32_to_cpu(btblock->bb_magic) != fab->magic)
goto out;
- if (xfs_sb_version_hascrc(&mp->m_sb) &&
- !uuid_equal(&btblock->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
- goto out;
- bp->b_ops = fab->buf_ops;
- /* Ignore this block if it's lower in the tree than we've seen. */
- if (fab->root != NULLAGBLOCK &&
- xfs_btree_get_level(btblock) < fab->height)
- goto out;
+ /*
+ * If the buffer already has ops applied and they're not the ones for
+ * this btree type, we know this block doesn't match the btree and we
+ * can bail out.
+ *
+ * If the buffer ops match ours, someone else has already validated
+ * the block for us, so we can move on to checking if this is a root
+ * block candidate.
+ *
+ * If the buffer does not have ops, nobody has successfully validated
+ * the contents and the buffer cannot be dirty. If the magic, uuid,
+ * and structure match this btree type then we'll move on to checking
+ * if it's a root block candidate. If there is no match, bail out.
+ */
+ if (bp->b_ops) {
+ if (bp->b_ops != fab->buf_ops)
+ goto out;
+ } else {
+ ASSERT(!xfs_trans_buf_is_dirty(bp));
+ if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
+ &mp->m_sb.sb_meta_uuid))
+ goto out;
+ fab->buf_ops->verify_read(bp);
+ if (bp->b_error) {
+ bp->b_error = 0;
+ goto out;
+ }
- /* Make sure we pass the verifiers. */
- bp->b_ops->verify_read(bp);
- if (bp->b_error)
+ /*
+ * Some read verifiers will (re)set b_ops, so we must be
+ * careful not to blow away any such assignment.
+ */
+ if (!bp->b_ops)
+ bp->b_ops = fab->buf_ops;
+ }
+
+ /*
+ * This block passes the magic/uuid and verifier tests for this btree
+ * type. We don't need the caller to try the other tree types.
+ */
+ *done_with_block = true;
+
+ /*
+ * Compare this btree block's level to the height of the current
+ * candidate root block.
+ *
+ * If the level matches the root we found previously, throw away both
+ * blocks because there can't be two candidate roots.
+ *
+ * If level is lower in the tree than the root we found previously,
+ * ignore this block.
+ */
+ block_level = xfs_btree_get_level(btblock);
+ if (block_level + 1 == fab->height) {
+ fab->root = NULLAGBLOCK;
goto out;
- fab->root = agbno;
- fab->height = xfs_btree_get_level(btblock) + 1;
- *found_it = true;
+ } else if (block_level < fab->height) {
+ goto out;
+ }
+
+ /*
+ * This is the highest block in the tree that we've found so far.
+ * Update the btree height to reflect what we've learned from this
+ * block.
+ */
+ fab->height = block_level + 1;
+
+ /*
+ * If this block doesn't have sibling pointers, then it's the new root
+ * block candidate. Otherwise, the root will be found farther up the
+ * tree.
+ */
+ if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) &&
+ btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
+ fab->root = agbno;
+ else
+ fab->root = NULLAGBLOCK;
trace_xrep_findroot_block(mp, ri->sc->sa.agno, agbno,
be32_to_cpu(btblock->bb_magic), fab->height - 1);
@@ -768,7 +846,7 @@ xrep_findroot_rmap(
struct xrep_findroot *ri = priv;
struct xrep_find_ag_btree *fab;
xfs_agblock_t b;
- bool found_it;
+ bool done;
int error = 0;
/* Ignore anything that isn't AG metadata. */
@@ -777,16 +855,16 @@ xrep_findroot_rmap(
/* Otherwise scan each block + btree type. */
for (b = 0; b < rec->rm_blockcount; b++) {
- found_it = false;
+ done = false;
for (fab = ri->btree_info; fab->buf_ops; fab++) {
if (rec->rm_owner != fab->rmap_owner)
continue;
error = xrep_findroot_block(ri, fab,
rec->rm_owner, rec->rm_startblock + b,
- &found_it);
+ &done);
if (error)
return error;
- if (found_it)
+ if (done)
break;
}
}
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 4bfae1e61d30..1b2344d00525 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -412,19 +412,6 @@ xchk_validate_inputs(
goto out;
}
- error = -EOPNOTSUPP;
- /*
- * We won't scrub any filesystem that doesn't have the ability
- * to record unwritten extents. The option was made default in
- * 2003, removed from mkfs in 2007, and cannot be disabled in
- * v5, so if we find a filesystem without this flag it's either
- * really old or totally unsupported. Avoid it either way.
- * We also don't support v1-v3 filesystems, which aren't
- * mountable.
- */
- if (!xfs_sb_version_hasextflgbit(&mp->m_sb))
- goto out;
-
/*
* We only want to repair read-write v5+ filesystems. Defer the check
* for ops->repair until after our scrub confirms that we need to
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 49f5f5896a43..338b9d9984e0 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -917,7 +917,7 @@ xfs_vm_writepage(
struct writeback_control *wbc)
{
struct xfs_writepage_ctx wpc = {
- .io_type = XFS_IO_INVALID,
+ .io_type = XFS_IO_HOLE,
};
int ret;
@@ -933,7 +933,7 @@ xfs_vm_writepages(
struct writeback_control *wbc)
{
struct xfs_writepage_ctx wpc = {
- .io_type = XFS_IO_INVALID,
+ .io_type = XFS_IO_HOLE,
};
int ret;
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 9af867951a10..494b4338446e 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -12,21 +12,19 @@ extern struct bio_set xfs_ioend_bioset;
* Types of I/O for bmap clustering and I/O completion tracking.
*/
enum {
- XFS_IO_INVALID, /* initial state */
+ XFS_IO_HOLE, /* covers region without any block allocation */
XFS_IO_DELALLOC, /* covers delalloc region */
XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
XFS_IO_OVERWRITE, /* covers already allocated extent */
XFS_IO_COW, /* covers copy-on-write extent */
- XFS_IO_HOLE, /* covers region without any block allocation */
};
#define XFS_IO_TYPES \
- { XFS_IO_INVALID, "invalid" }, \
- { XFS_IO_DELALLOC, "delalloc" }, \
- { XFS_IO_UNWRITTEN, "unwritten" }, \
- { XFS_IO_OVERWRITE, "overwrite" }, \
- { XFS_IO_COW, "CoW" }, \
- { XFS_IO_HOLE, "hole" }
+ { XFS_IO_HOLE, "hole" }, \
+ { XFS_IO_DELALLOC, "delalloc" }, \
+ { XFS_IO_UNWRITTEN, "unwritten" }, \
+ { XFS_IO_OVERWRITE, "overwrite" }, \
+ { XFS_IO_COW, "CoW" }
/*
* Structure for buffered I/O completions.
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index addbd74ecd8e..5d263dfdb3bc 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -406,10 +406,10 @@ xfs_getbmap_report_one(
struct xfs_bmbt_irec *got)
{
struct kgetbmap *p = out + bmv->bmv_entries;
- bool shared = false, trimmed = false;
+ bool shared = false;
int error;
- error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed);
+ error = xfs_reflink_trim_around_shared(ip, got, &shared);
if (error)
return error;
@@ -702,13 +702,9 @@ xfs_bmap_punch_delalloc_range(
struct xfs_iext_cursor icur;
int error = 0;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (!(ifp->if_flags & XFS_IFEXTENTS)) {
- error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
- if (error)
- goto out_unlock;
- }
+ ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
goto out_unlock;
@@ -1047,44 +1043,6 @@ out_trans_cancel:
}
static int
-xfs_adjust_extent_unmap_boundaries(
- struct xfs_inode *ip,
- xfs_fileoff_t *startoffset_fsb,
- xfs_fileoff_t *endoffset_fsb)
-{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_bmbt_irec imap;
- int nimap, error;
- xfs_extlen_t mod = 0;
-
- nimap = 1;
- error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
- if (error)
- return error;
-
- if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
- ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
- div_u64_rem(imap.br_startblock, mp->m_sb.sb_rextsize, &mod);
- if (mod)
- *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
- }
-
- nimap = 1;
- error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
- if (error)
- return error;
-
- if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
- ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
- mod++;
- if (mod && mod != mp->m_sb.sb_rextsize)
- *endoffset_fsb -= mod;
- }
-
- return 0;
-}
-
-static int
xfs_flush_unmap_range(
struct xfs_inode *ip,
xfs_off_t offset,
@@ -1137,19 +1095,8 @@ xfs_free_file_space(
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
/*
- * Need to zero the stuff we're not freeing, on disk. If it's a RT file
- * and we can't use unwritten extents then we actually need to ensure
- * to zero the whole extent, otherwise we just need to take of block
- * boundaries, and xfs_bunmapi will handle the rest.
+ * Need to zero the stuff we're not freeing, on disk.
*/
- if (XFS_IS_REALTIME_INODE(ip) &&
- !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
- error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
- &endoffset_fsb);
- if (error)
- return error;
- }
-
if (endoffset_fsb > startoffset_fsb) {
while (!done) {
error = xfs_unmap_extent(ip, startoffset_fsb,
@@ -1584,7 +1531,7 @@ xfs_swap_extent_rmap(
tirec.br_blockcount, &irec,
&nimaps, 0);
if (error)
- goto out_defer;
+ goto out;
ASSERT(nimaps == 1);
ASSERT(tirec.br_startoff == irec.br_startoff);
trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
@@ -1599,22 +1546,22 @@ xfs_swap_extent_rmap(
/* Remove the mapping from the donor file. */
error = xfs_bmap_unmap_extent(tp, tip, &uirec);
if (error)
- goto out_defer;
+ goto out;
/* Remove the mapping from the source file. */
error = xfs_bmap_unmap_extent(tp, ip, &irec);
if (error)
- goto out_defer;
+ goto out;
/* Map the donor file's blocks into the source file. */
error = xfs_bmap_map_extent(tp, ip, &uirec);
if (error)
- goto out_defer;
+ goto out;
/* Map the source file's blocks into the donor file. */
error = xfs_bmap_map_extent(tp, tip, &irec);
if (error)
- goto out_defer;
+ goto out;
error = xfs_defer_finish(tpp);
tp = *tpp;
@@ -1636,8 +1583,6 @@ xfs_swap_extent_rmap(
tip->i_d.di_flags2 = tip_flags2;
return 0;
-out_defer:
- xfs_defer_cancel(tp);
out:
trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
tip->i_d.di_flags2 = tip_flags2;
@@ -1830,6 +1775,12 @@ xfs_swap_extents(
if (error)
goto out_unlock;
+ if (xfs_inode_has_cow_data(tip)) {
+ error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
+ if (error)
+ return error;
+ }
+
/*
* Extent "swapping" with rmap requires a permanent reservation and
* a block reservation because it's really just a remap operation
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index e839907e8492..b21ea2ba768d 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -37,6 +37,32 @@ static kmem_zone_t *xfs_buf_zone;
#define xb_to_gfp(flags) \
((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
+/*
+ * Locking orders
+ *
+ * xfs_buf_ioacct_inc:
+ * xfs_buf_ioacct_dec:
+ * b_sema (caller holds)
+ * b_lock
+ *
+ * xfs_buf_stale:
+ * b_sema (caller holds)
+ * b_lock
+ * lru_lock
+ *
+ * xfs_buf_rele:
+ * b_lock
+ * pag_buf_lock
+ * lru_lock
+ *
+ * xfs_buftarg_wait_rele
+ * lru_lock
+ * b_lock (trylock due to inversion)
+ *
+ * xfs_buftarg_isolate
+ * lru_lock
+ * b_lock (trylock due to inversion)
+ */
static inline int
xfs_buf_is_vmapped(
@@ -749,6 +775,30 @@ _xfs_buf_read(
return xfs_buf_submit(bp);
}
+/*
+ * If the caller passed in an ops structure and the buffer doesn't have ops
+ * assigned, set the ops and use them to verify the contents. If the contents
+ * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no
+ * recorded errors and is already in XBF_DONE state.
+ */
+int
+xfs_buf_ensure_ops(
+ struct xfs_buf *bp,
+ const struct xfs_buf_ops *ops)
+{
+ ASSERT(bp->b_flags & XBF_DONE);
+ ASSERT(bp->b_error == 0);
+
+ if (!ops || bp->b_ops)
+ return 0;
+
+ bp->b_ops = ops;
+ bp->b_ops->verify_read(bp);
+ if (bp->b_error)
+ bp->b_flags &= ~XBF_DONE;
+ return bp->b_error;
+}
+
xfs_buf_t *
xfs_buf_read_map(
struct xfs_buftarg *target,
@@ -762,26 +812,32 @@ xfs_buf_read_map(
flags |= XBF_READ;
bp = xfs_buf_get_map(target, map, nmaps, flags);
- if (bp) {
- trace_xfs_buf_read(bp, flags, _RET_IP_);
+ if (!bp)
+ return NULL;
- if (!(bp->b_flags & XBF_DONE)) {
- XFS_STATS_INC(target->bt_mount, xb_get_read);
- bp->b_ops = ops;
- _xfs_buf_read(bp, flags);
- } else if (flags & XBF_ASYNC) {
- /*
- * Read ahead call which is already satisfied,
- * drop the buffer
- */
- xfs_buf_relse(bp);
- return NULL;
- } else {
- /* We do not want read in the flags */
- bp->b_flags &= ~XBF_READ;
- }
+ trace_xfs_buf_read(bp, flags, _RET_IP_);
+
+ if (!(bp->b_flags & XBF_DONE)) {
+ XFS_STATS_INC(target->bt_mount, xb_get_read);
+ bp->b_ops = ops;
+ _xfs_buf_read(bp, flags);
+ return bp;
}
+ xfs_buf_ensure_ops(bp, ops);
+
+ if (flags & XBF_ASYNC) {
+ /*
+ * Read ahead call which is already satisfied,
+ * drop the buffer
+ */
+ xfs_buf_relse(bp);
+ return NULL;
+ }
+
+ /* We do not want read in the flags */
+ bp->b_flags &= ~XBF_READ;
+ ASSERT(bp->b_ops != NULL || ops == NULL);
return bp;
}
@@ -1006,8 +1062,18 @@ xfs_buf_rele(
ASSERT(atomic_read(&bp->b_hold) > 0);
- release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
+ /*
+ * We grab the b_lock here first to serialise racing xfs_buf_rele()
+ * calls. The pag_buf_lock being taken on the last reference only
+ * serialises against racing lookups in xfs_buf_find(). IOWs, the second
+ * to last reference we drop here is not serialised against the last
+ * reference until we take bp->b_lock. Hence if we don't grab b_lock
+ * first, the last "release" reference can win the race to the lock and
+ * free the buffer before the second-to-last reference is processed,
+ * leading to a use-after-free scenario.
+ */
spin_lock(&bp->b_lock);
+ release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
if (!release) {
/*
* Drop the in-flight state if the buffer is already on the LRU
@@ -1989,6 +2055,13 @@ xfs_buf_delwri_submit_buffers(
* is only safely useable for callers that can track I/O completion by higher
* level means, e.g. AIL pushing as the @buffer_list is consumed in this
* function.
+ *
+ * Note: this function will skip buffers it would block on, and in doing so
+ * leaves them on @buffer_list so they can be retried on a later pass. As such,
+ * it is up to the caller to ensure that the buffer list is fully submitted or
+ * cancelled appropriately when they are finished with the list. Failure to
+ * cancel or resubmit the list until it is empty will result in leaked buffers
+ * at unmount time.
*/
int
xfs_buf_delwri_submit_nowait(
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 4e3171acd0f8..b9f5511ea998 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -385,4 +385,6 @@ extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
+int xfs_buf_ensure_ops(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
+
#endif /* __XFS_BUF_H__ */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 1c9d1398980b..12d8455bfbb2 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -532,6 +532,49 @@ xfs_buf_item_push(
}
/*
+ * Drop the buffer log item refcount and take appropriate action. This helper
+ * determines whether the bli must be freed or not, since a decrement to zero
+ * does not necessarily mean the bli is unused.
+ *
+ * Return true if the bli is freed, false otherwise.
+ */
+bool
+xfs_buf_item_put(
+ struct xfs_buf_log_item *bip)
+{
+ struct xfs_log_item *lip = &bip->bli_item;
+ bool aborted;
+ bool dirty;
+
+ /* drop the bli ref and return if it wasn't the last one */
+ if (!atomic_dec_and_test(&bip->bli_refcount))
+ return false;
+
+ /*
+ * We dropped the last ref and must free the item if clean or aborted.
+ * If the bli is dirty and non-aborted, the buffer was clean in the
+ * transaction but still awaiting writeback from previous changes. In
+ * that case, the bli is freed on buffer writeback completion.
+ */
+ aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
+ XFS_FORCED_SHUTDOWN(lip->li_mountp);
+ dirty = bip->bli_flags & XFS_BLI_DIRTY;
+ if (dirty && !aborted)
+ return false;
+
+ /*
+ * The bli is aborted or clean. An aborted item may be in the AIL
+ * regardless of dirty state. For example, consider an aborted
+ * transaction that invalidated a dirty bli and cleared the dirty
+ * state.
+ */
+ if (aborted)
+ xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
+ xfs_buf_item_relse(bip->bli_buf);
+ return true;
+}
+
+/*
* Release the buffer associated with the buf log item. If there is no dirty
* logged data associated with the buffer recorded in the buf log item, then
* free the buf log item and remove the reference to it in the buffer.
@@ -556,76 +599,42 @@ xfs_buf_item_unlock(
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
- bool aborted;
- bool hold = !!(bip->bli_flags & XFS_BLI_HOLD);
- bool dirty = !!(bip->bli_flags & XFS_BLI_DIRTY);
+ bool released;
+ bool hold = bip->bli_flags & XFS_BLI_HOLD;
+ bool stale = bip->bli_flags & XFS_BLI_STALE;
#if defined(DEBUG) || defined(XFS_WARN)
- bool ordered = !!(bip->bli_flags & XFS_BLI_ORDERED);
+ bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
+ bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
#endif
- aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags);
-
- /* Clear the buffer's association with this transaction. */
- bp->b_transp = NULL;
-
- /*
- * The per-transaction state has been copied above so clear it from the
- * bli.
- */
- bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
-
- /*
- * If the buf item is marked stale, then don't do anything. We'll
- * unlock the buffer and free the buf item when the buffer is unpinned
- * for the last time.
- */
- if (bip->bli_flags & XFS_BLI_STALE) {
- trace_xfs_buf_item_unlock_stale(bip);
- ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
- if (!aborted) {
- atomic_dec(&bip->bli_refcount);
- return;
- }
- }
-
trace_xfs_buf_item_unlock(bip);
/*
- * If the buf item isn't tracking any data, free it, otherwise drop the
- * reference we hold to it. If we are aborting the transaction, this may
- * be the only reference to the buf item, so we free it anyway
- * regardless of whether it is dirty or not. A dirty abort implies a
- * shutdown, anyway.
- *
* The bli dirty state should match whether the blf has logged segments
* except for ordered buffers, where only the bli should be dirty.
*/
ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
(ordered && dirty && !xfs_buf_item_dirty_format(bip)));
+ ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
/*
- * Clean buffers, by definition, cannot be in the AIL. However, aborted
- * buffers may be in the AIL regardless of dirty state. An aborted
- * transaction that invalidates a buffer already in the AIL may have
- * marked it stale and cleared the dirty state, for example.
- *
- * Therefore if we are aborting a buffer and we've just taken the last
- * reference away, we have to check if it is in the AIL before freeing
- * it. We need to free it in this case, because an aborted transaction
- * has already shut the filesystem down and this is the last chance we
- * will have to do so.
+ * Clear the buffer's association with this transaction and
+ * per-transaction state from the bli, which has been copied above.
*/
- if (atomic_dec_and_test(&bip->bli_refcount)) {
- if (aborted) {
- ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
- xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
- xfs_buf_item_relse(bp);
- } else if (!dirty)
- xfs_buf_item_relse(bp);
- }
+ bp->b_transp = NULL;
+ bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
- if (!hold)
- xfs_buf_relse(bp);
+ /*
+ * Unref the item and unlock the buffer unless held or stale. Stale
+ * buffers remain locked until final unpin unless the bli is freed by
+ * the unref call. The latter implies shutdown because buffer
+ * invalidation dirties the bli and transaction.
+ */
+ released = xfs_buf_item_put(bip);
+ if (hold || (stale && !released))
+ return;
+ ASSERT(!stale || test_bit(XFS_LI_ABORTED, &lip->li_flags));
+ xfs_buf_relse(bp);
}
/*
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 3f7d7b72e7e6..90f65f891fab 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -51,6 +51,7 @@ struct xfs_buf_log_item {
int xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
void xfs_buf_item_relse(struct xfs_buf *);
+bool xfs_buf_item_put(struct xfs_buf_log_item *);
void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint);
bool xfs_buf_item_dirty_format(struct xfs_buf_log_item *);
void xfs_buf_attach_iodone(struct xfs_buf *,
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 7c00b8bedfe3..093c2b8d7e20 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -470,20 +470,13 @@ xfs_fs_goingdown(
*/
void
xfs_do_force_shutdown(
- xfs_mount_t *mp,
+ struct xfs_mount *mp,
int flags,
char *fname,
int lnnum)
{
- int logerror;
-
- logerror = flags & SHUTDOWN_LOG_IO_ERROR;
+ bool logerror = flags & SHUTDOWN_LOG_IO_ERROR;
- if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
- xfs_notice(mp,
- "%s(0x%x) called from line %d of file %s. Return address = "PTR_FMT,
- __func__, flags, lnnum, fname, __return_address);
- }
/*
* No need to duplicate efforts.
*/
@@ -499,27 +492,34 @@ xfs_do_force_shutdown(
if (xfs_log_force_umount(mp, logerror))
return;
+ if (flags & SHUTDOWN_FORCE_UMOUNT) {
+ xfs_alert(mp,
+"User initiated shutdown received. Shutting down filesystem");
+ return;
+ }
+
+ xfs_notice(mp,
+"%s(0x%x) called from line %d of file %s. Return address = "PTR_FMT,
+ __func__, flags, lnnum, fname, __return_address);
+
if (flags & SHUTDOWN_CORRUPT_INCORE) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
- "Corruption of in-memory data detected. Shutting down filesystem");
+"Corruption of in-memory data detected. Shutting down filesystem");
if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
xfs_stack_trace();
- } else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
- if (logerror) {
- xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
- "Log I/O Error Detected. Shutting down filesystem");
- } else if (flags & SHUTDOWN_DEVICE_REQ) {
- xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
- "All device paths lost. Shutting down filesystem");
- } else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
- xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
- "I/O Error Detected. Shutting down filesystem");
- }
- }
- if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
- xfs_alert(mp,
- "Please umount the filesystem and rectify the problem(s)");
+ } else if (logerror) {
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
+ "Log I/O Error Detected. Shutting down filesystem");
+ } else if (flags & SHUTDOWN_DEVICE_REQ) {
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
+ "All device paths lost. Shutting down filesystem");
+ } else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
+ xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
+ "I/O Error Detected. Shutting down filesystem");
}
+
+ xfs_alert(mp,
+ "Please unmount the filesystem and rectify the problem(s)");
}
/*
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index d957a46dc1cb..05db9540e459 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1563,7 +1563,7 @@ xfs_itruncate_extents_flags(
error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
XFS_ITRUNC_MAX_EXTENTS, &done);
if (error)
- goto out_bmap_cancel;
+ goto out;
/*
* Duplicate the transaction that has the permanent
@@ -1599,14 +1599,6 @@ xfs_itruncate_extents_flags(
out:
*tpp = tp;
return error;
-out_bmap_cancel:
- /*
- * If the bunmapi call encounters an error, return to the caller where
- * the transaction can be properly aborted. We just need to make sure
- * we're not holding any resources that we were not when we came in.
- */
- xfs_defer_cancel(tp);
- goto out;
}
int
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 0ef5ece5634c..6e2c08f30f60 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -604,14 +604,6 @@ xfs_ioc_space(
uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
int error;
- /*
- * Only allow the sys admin to reserve space unless
- * unwritten extents are enabled.
- */
- if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) &&
- !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
return -EPERM;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 6320aca39f39..27c93b5f029d 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -62,6 +62,21 @@ xfs_bmbt_to_iomap(
iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
}
+static void
+xfs_hole_to_iomap(
+ struct xfs_inode *ip,
+ struct iomap *iomap,
+ xfs_fileoff_t offset_fsb,
+ xfs_fileoff_t end_fsb)
+{
+ iomap->addr = IOMAP_NULL_ADDR;
+ iomap->type = IOMAP_HOLE;
+ iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
+ iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
+ iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
+ iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
+}
+
xfs_extlen_t
xfs_eof_alignment(
struct xfs_inode *ip,
@@ -502,6 +517,7 @@ xfs_file_iomap_begin_delay(
struct inode *inode,
loff_t offset,
loff_t count,
+ unsigned flags,
struct iomap *iomap)
{
struct xfs_inode *ip = XFS_I(inode);
@@ -538,15 +554,23 @@ xfs_file_iomap_begin_delay(
goto out_unlock;
}
+ end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
+
eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got);
- if (!eof && got.br_startoff <= offset_fsb) {
- if (xfs_is_reflink_inode(ip)) {
- bool shared;
+ if (eof)
+ got.br_startoff = end_fsb; /* fake hole until the end */
- end_fsb = min(XFS_B_TO_FSB(mp, offset + count),
- maxbytes_fsb);
+ if (got.br_startoff <= offset_fsb) {
+ /*
+ * For reflink files we may need a delalloc reservation when
+ * overwriting shared extents. This includes zeroing of
+ * existing extents that contain data.
+ */
+ if (xfs_is_reflink_inode(ip) &&
+ ((flags & IOMAP_WRITE) ||
+ got.br_state != XFS_EXT_UNWRITTEN)) {
xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
- error = xfs_reflink_reserve_cow(ip, &got, &shared);
+ error = xfs_reflink_reserve_cow(ip, &got);
if (error)
goto out_unlock;
}
@@ -555,6 +579,11 @@ xfs_file_iomap_begin_delay(
goto done;
}
+ if (flags & IOMAP_ZERO) {
+ xfs_hole_to_iomap(ip, iomap, offset_fsb, got.br_startoff);
+ goto out_unlock;
+ }
+
error = xfs_qm_dqattach_locked(ip, false);
if (error)
goto out_unlock;
@@ -1003,16 +1032,17 @@ xfs_file_iomap_begin(
struct xfs_bmbt_irec imap;
xfs_fileoff_t offset_fsb, end_fsb;
int nimaps = 1, error = 0;
- bool shared = false, trimmed = false;
+ bool shared = false;
unsigned lockmode;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) &&
+ if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && !(flags & IOMAP_DIRECT) &&
!IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
/* Reserve delalloc blocks for regular writeback. */
- return xfs_file_iomap_begin_delay(inode, offset, length, iomap);
+ return xfs_file_iomap_begin_delay(inode, offset, length, flags,
+ iomap);
}
/*
@@ -1038,8 +1068,7 @@ xfs_file_iomap_begin(
if (flags & IOMAP_REPORT) {
/* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
- &trimmed);
+ error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
if (error)
goto out_unlock;
}
@@ -1065,7 +1094,7 @@ xfs_file_iomap_begin(
if (error)
goto out_unlock;
} else {
- error = xfs_reflink_reserve_cow(ip, &imap, &shared);
+ error = xfs_reflink_reserve_cow(ip, &imap);
if (error)
goto out_unlock;
}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index c3e74f9128e8..f48ffd7a8d3e 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -471,8 +471,18 @@ xfs_vn_get_link_inline(
struct inode *inode,
struct delayed_call *done)
{
+ char *link;
+
ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
- return XFS_I(inode)->i_df.if_u1.if_data;
+
+ /*
+ * The VFS crashes on a NULL pointer, so return -EFSCORRUPTED if
+ * if_data is junk.
+ */
+ link = XFS_I(inode)->i_df.if_u1.if_data;
+ if (!link)
+ return ERR_PTR(-EFSCORRUPTED);
+ return link;
}
STATIC int
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index a21dc61ec09e..1fc9e9042e0e 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1570,16 +1570,6 @@ xlog_find_zeroed(
if (last_cycle != 0) { /* log completely written to */
xlog_put_bp(bp);
return 0;
- } else if (first_cycle != 1) {
- /*
- * If the cycle of the last block is zero, the cycle of
- * the first block must be 1. If it's not, maybe we're
- * not looking at a log... Bail out.
- */
- xfs_warn(log->l_mp,
- "Log inconsistent or not a log (last==0, first!=1)");
- error = -EINVAL;
- goto bp_err;
}
/* we have a partially zeroed log */
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 38f405415b88..8eaeec9d58ed 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -182,8 +182,7 @@ int
xfs_reflink_trim_around_shared(
struct xfs_inode *ip,
struct xfs_bmbt_irec *irec,
- bool *shared,
- bool *trimmed)
+ bool *shared)
{
xfs_agnumber_t agno;
xfs_agblock_t agbno;
@@ -209,7 +208,7 @@ xfs_reflink_trim_around_shared(
if (error)
return error;
- *shared = *trimmed = false;
+ *shared = false;
if (fbno == NULLAGBLOCK) {
/* No shared blocks at all. */
return 0;
@@ -222,8 +221,6 @@ xfs_reflink_trim_around_shared(
*/
irec->br_blockcount = flen;
*shared = true;
- if (flen != aglen)
- *trimmed = true;
return 0;
} else {
/*
@@ -233,7 +230,6 @@ xfs_reflink_trim_around_shared(
* start of the shared region.
*/
irec->br_blockcount = fbno - agbno;
- *trimmed = true;
return 0;
}
}
@@ -241,7 +237,7 @@ xfs_reflink_trim_around_shared(
/*
* Trim the passed in imap to the next shared/unshared extent boundary, and
* if imap->br_startoff points to a shared extent reserve space for it in the
- * COW fork. In this case *shared is set to true, else to false.
+ * COW fork.
*
* Note that imap will always contain the block numbers for the existing blocks
* in the data fork, as the upper layers need them for read-modify-write
@@ -250,14 +246,14 @@ xfs_reflink_trim_around_shared(
int
xfs_reflink_reserve_cow(
struct xfs_inode *ip,
- struct xfs_bmbt_irec *imap,
- bool *shared)
+ struct xfs_bmbt_irec *imap)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
struct xfs_bmbt_irec got;
int error = 0;
- bool eof = false, trimmed;
+ bool eof = false;
struct xfs_iext_cursor icur;
+ bool shared;
/*
* Search the COW fork extent list first. This serves two purposes:
@@ -273,18 +269,16 @@ xfs_reflink_reserve_cow(
if (!eof && got.br_startoff <= imap->br_startoff) {
trace_xfs_reflink_cow_found(ip, imap);
xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
-
- *shared = true;
return 0;
}
/* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
+ error = xfs_reflink_trim_around_shared(ip, imap, &shared);
if (error)
return error;
/* Not shared? Just report the (potentially capped) extent. */
- if (!*shared)
+ if (!shared)
return 0;
/*
@@ -352,6 +346,50 @@ xfs_reflink_convert_cow(
return error;
}
+/*
+ * Find the extent that maps the given range in the COW fork. Even if the extent
+ * is not shared we might have a preallocation for it in the COW fork. If so we
+ * use it that rather than trigger a new allocation.
+ */
+static int
+xfs_find_trim_cow_extent(
+ struct xfs_inode *ip,
+ struct xfs_bmbt_irec *imap,
+ bool *shared,
+ bool *found)
+{
+ xfs_fileoff_t offset_fsb = imap->br_startoff;
+ xfs_filblks_t count_fsb = imap->br_blockcount;
+ struct xfs_iext_cursor icur;
+ struct xfs_bmbt_irec got;
+
+ *found = false;
+
+ /*
+ * If we don't find an overlapping extent, trim the range we need to
+ * allocate to fit the hole we found.
+ */
+ if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
+ got.br_startoff = offset_fsb + count_fsb;
+ if (got.br_startoff > offset_fsb) {
+ xfs_trim_extent(imap, imap->br_startoff,
+ got.br_startoff - imap->br_startoff);
+ return xfs_reflink_trim_around_shared(ip, imap, shared);
+ }
+
+ *shared = true;
+ if (isnullstartblock(got.br_startblock)) {
+ xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
+ return 0;
+ }
+
+ /* real extent found - no need to allocate */
+ xfs_trim_extent(&got, offset_fsb, count_fsb);
+ *imap = got;
+ *found = true;
+ return 0;
+}
+
/* Allocate all CoW reservations covering a range of blocks in a file. */
int
xfs_reflink_allocate_cow(
@@ -363,78 +401,64 @@ xfs_reflink_allocate_cow(
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t offset_fsb = imap->br_startoff;
xfs_filblks_t count_fsb = imap->br_blockcount;
- struct xfs_bmbt_irec got;
- struct xfs_trans *tp = NULL;
+ struct xfs_trans *tp;
int nimaps, error = 0;
- bool trimmed;
+ bool found;
xfs_filblks_t resaligned;
xfs_extlen_t resblks = 0;
- struct xfs_iext_cursor icur;
-retry:
- ASSERT(xfs_is_reflink_inode(ip));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ ASSERT(xfs_is_reflink_inode(ip));
- /*
- * Even if the extent is not shared we might have a preallocation for
- * it in the COW fork. If so use it.
- */
- if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) &&
- got.br_startoff <= offset_fsb) {
- *shared = true;
-
- /* If we have a real allocation in the COW fork we're done. */
- if (!isnullstartblock(got.br_startblock)) {
- xfs_trim_extent(&got, offset_fsb, count_fsb);
- *imap = got;
- goto convert;
- }
+ error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
+ if (error || !*shared)
+ return error;
+ if (found)
+ goto convert;
- xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
- } else {
- error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
- if (error || !*shared)
- goto out;
- }
+ resaligned = xfs_aligned_fsb_count(imap->br_startoff,
+ imap->br_blockcount, xfs_get_cowextsz_hint(ip));
+ resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
- if (!tp) {
- resaligned = xfs_aligned_fsb_count(imap->br_startoff,
- imap->br_blockcount, xfs_get_cowextsz_hint(ip));
- resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
+ xfs_iunlock(ip, *lockmode);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
+ *lockmode = XFS_ILOCK_EXCL;
+ xfs_ilock(ip, *lockmode);
- xfs_iunlock(ip, *lockmode);
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
- *lockmode = XFS_ILOCK_EXCL;
- xfs_ilock(ip, *lockmode);
+ if (error)
+ return error;
- if (error)
- return error;
+ error = xfs_qm_dqattach_locked(ip, false);
+ if (error)
+ goto out_trans_cancel;
- error = xfs_qm_dqattach_locked(ip, false);
- if (error)
- goto out;
- goto retry;
+ /*
+ * Check for an overlapping extent again now that we dropped the ilock.
+ */
+ error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
+ if (error || !*shared)
+ goto out_trans_cancel;
+ if (found) {
+ xfs_trans_cancel(tp);
+ goto convert;
}
error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
XFS_QMOPT_RES_REGBLKS);
if (error)
- goto out;
+ goto out_trans_cancel;
xfs_trans_ijoin(tp, ip, 0);
- nimaps = 1;
-
/* Allocate the entire reservation as unwritten blocks. */
+ nimaps = 1;
error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
resblks, imap, &nimaps);
if (error)
- goto out_trans_cancel;
+ goto out_unreserve;
xfs_inode_set_cowblocks_tag(ip);
-
- /* Finish up. */
error = xfs_trans_commit(tp);
if (error)
return error;
@@ -447,12 +471,12 @@ retry:
return -ENOSPC;
convert:
return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb);
-out_trans_cancel:
+
+out_unreserve:
xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
XFS_QMOPT_RES_REGBLKS);
-out:
- if (tp)
- xfs_trans_cancel(tp);
+out_trans_cancel:
+ xfs_trans_cancel(tp);
return error;
}
@@ -666,14 +690,12 @@ xfs_reflink_end_cow(
if (!del.br_blockcount)
goto prev_extent;
- ASSERT(!isnullstartblock(got.br_startblock));
-
/*
- * Don't remap unwritten extents; these are
- * speculatively preallocated CoW extents that have been
- * allocated but have not yet been involved in a write.
+ * Only remap real extent that contain data. With AIO
+ * speculatively preallocations can leak into the range we
+ * are called upon, and we need to skip them.
*/
- if (got.br_state == XFS_EXT_UNWRITTEN)
+ if (!xfs_bmap_is_real_extent(&got))
goto prev_extent;
/* Unmap the old blocks in the data fork. */
@@ -1195,35 +1217,92 @@ retry:
return 0;
}
+/* Unlock both inodes after they've been prepped for a range clone. */
+STATIC void
+xfs_reflink_remap_unlock(
+ struct file *file_in,
+ struct file *file_out)
+{
+ struct inode *inode_in = file_inode(file_in);
+ struct xfs_inode *src = XFS_I(inode_in);
+ struct inode *inode_out = file_inode(file_out);
+ struct xfs_inode *dest = XFS_I(inode_out);
+ bool same_inode = (inode_in == inode_out);
+
+ xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
+ if (!same_inode)
+ xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
+ inode_unlock(inode_out);
+ if (!same_inode)
+ inode_unlock_shared(inode_in);
+}
+
/*
- * Link a range of blocks from one file to another.
+ * If we're reflinking to a point past the destination file's EOF, we must
+ * zero any speculative post-EOF preallocations that sit between the old EOF
+ * and the destination file offset.
*/
-int
-xfs_reflink_remap_range(
+static int
+xfs_reflink_zero_posteof(
+ struct xfs_inode *ip,
+ loff_t pos)
+{
+ loff_t isize = i_size_read(VFS_I(ip));
+
+ if (pos <= isize)
+ return 0;
+
+ trace_xfs_zero_eof(ip, isize, pos - isize);
+ return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
+ &xfs_iomap_ops);
+}
+
+/*
+ * Prepare two files for range cloning. Upon a successful return both inodes
+ * will have the iolock and mmaplock held, the page cache of the out file will
+ * be truncated, and any leases on the out file will have been broken. This
+ * function borrows heavily from xfs_file_aio_write_checks.
+ *
+ * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't
+ * checked that the bytes beyond EOF physically match. Hence we cannot use the
+ * EOF block in the source dedupe range because it's not a complete block match,
+ * hence can introduce a corruption into the file that has it's block replaced.
+ *
+ * In similar fashion, the VFS file cloning also allows partial EOF blocks to be
+ * "block aligned" for the purposes of cloning entire files. However, if the
+ * source file range includes the EOF block and it lands within the existing EOF
+ * of the destination file, then we can expose stale data from beyond the source
+ * file EOF in the destination file.
+ *
+ * XFS doesn't support partial block sharing, so in both cases we have check
+ * these cases ourselves. For dedupe, we can simply round the length to dedupe
+ * down to the previous whole block and ignore the partial EOF block. While this
+ * means we can't dedupe the last block of a file, this is an acceptible
+ * tradeoff for simplicity on implementation.
+ *
+ * For cloning, we want to share the partial EOF block if it is also the new EOF
+ * block of the destination file. If the partial EOF block lies inside the
+ * existing destination EOF, then we have to abort the clone to avoid exposing
+ * stale data in the destination file. Hence we reject these clone attempts with
+ * -EINVAL in this case.
+ */
+STATIC int
+xfs_reflink_remap_prep(
struct file *file_in,
loff_t pos_in,
struct file *file_out,
loff_t pos_out,
- u64 len,
+ u64 *len,
bool is_dedupe)
{
struct inode *inode_in = file_inode(file_in);
struct xfs_inode *src = XFS_I(inode_in);
struct inode *inode_out = file_inode(file_out);
struct xfs_inode *dest = XFS_I(inode_out);
- struct xfs_mount *mp = src->i_mount;
bool same_inode = (inode_in == inode_out);
- xfs_fileoff_t sfsbno, dfsbno;
- xfs_filblks_t fsblen;
- xfs_extlen_t cowextsize;
+ u64 blkmask = i_blocksize(inode_in) - 1;
ssize_t ret;
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
- return -EOPNOTSUPP;
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
/* Lock both files against IO */
ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out);
if (ret)
@@ -1245,33 +1324,115 @@ xfs_reflink_remap_range(
goto out_unlock;
ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
- &len, is_dedupe);
+ len, is_dedupe);
if (ret <= 0)
goto out_unlock;
+ /*
+ * If the dedupe data matches, chop off the partial EOF block
+ * from the source file so we don't try to dedupe the partial
+ * EOF block.
+ */
+ if (is_dedupe) {
+ *len &= ~blkmask;
+ } else if (*len & blkmask) {
+ /*
+ * The user is attempting to share a partial EOF block,
+ * if it's inside the destination EOF then reject it.
+ */
+ if (pos_out + *len < i_size_read(inode_out)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
/* Attach dquots to dest inode before changing block map */
ret = xfs_qm_dqattach(dest);
if (ret)
goto out_unlock;
- trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
-
/*
- * Clear out post-eof preallocations because we don't have page cache
- * backing the delayed allocations and they'll never get freed on
- * their own.
+ * Zero existing post-eof speculative preallocations in the destination
+ * file.
*/
- if (xfs_can_free_eofblocks(dest, true)) {
- ret = xfs_free_eofblocks(dest);
- if (ret)
- goto out_unlock;
- }
+ ret = xfs_reflink_zero_posteof(dest, pos_out);
+ if (ret)
+ goto out_unlock;
/* Set flags and remap blocks. */
ret = xfs_reflink_set_inode_flag(src, dest);
if (ret)
goto out_unlock;
+ /* Zap any page cache for the destination file's range. */
+ truncate_inode_pages_range(&inode_out->i_data, pos_out,
+ PAGE_ALIGN(pos_out + *len) - 1);
+
+ /* If we're altering the file contents... */
+ if (!is_dedupe) {
+ /*
+ * ...update the timestamps (which will grab the ilock again
+ * from xfs_fs_dirty_inode, so we have to call it before we
+ * take the ilock).
+ */
+ if (!(file_out->f_mode & FMODE_NOCMTIME)) {
+ ret = file_update_time(file_out);
+ if (ret)
+ goto out_unlock;
+ }
+
+ /*
+ * ...clear the security bits if the process is not being run
+ * by root. This keeps people from modifying setuid and setgid
+ * binaries.
+ */
+ ret = file_remove_privs(file_out);
+ if (ret)
+ goto out_unlock;
+ }
+
+ return 1;
+out_unlock:
+ xfs_reflink_remap_unlock(file_in, file_out);
+ return ret;
+}
+
+/*
+ * Link a range of blocks from one file to another.
+ */
+int
+xfs_reflink_remap_range(
+ struct file *file_in,
+ loff_t pos_in,
+ struct file *file_out,
+ loff_t pos_out,
+ u64 len,
+ bool is_dedupe)
+{
+ struct inode *inode_in = file_inode(file_in);
+ struct xfs_inode *src = XFS_I(inode_in);
+ struct inode *inode_out = file_inode(file_out);
+ struct xfs_inode *dest = XFS_I(inode_out);
+ struct xfs_mount *mp = src->i_mount;
+ xfs_fileoff_t sfsbno, dfsbno;
+ xfs_filblks_t fsblen;
+ xfs_extlen_t cowextsize;
+ ssize_t ret;
+
+ if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ /* Prepare and then clone file data. */
+ ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
+ &len, is_dedupe);
+ if (ret <= 0)
+ return ret;
+
+ trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
dfsbno = XFS_B_TO_FSBT(mp, pos_out);
sfsbno = XFS_B_TO_FSBT(mp, pos_in);
fsblen = XFS_B_TO_FSB(mp, len);
@@ -1280,10 +1441,6 @@ xfs_reflink_remap_range(
if (ret)
goto out_unlock;
- /* Zap any page cache for the destination file's range. */
- truncate_inode_pages_range(&inode_out->i_data, pos_out,
- PAGE_ALIGN(pos_out + len) - 1);
-
/*
* Carry the cowextsize hint from src to dest if we're sharing the
* entire source file to the entire destination file, the source file
@@ -1300,12 +1457,7 @@ xfs_reflink_remap_range(
is_dedupe);
out_unlock:
- xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
- if (!same_inode)
- xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
- inode_unlock(inode_out);
- if (!same_inode)
- inode_unlock_shared(inode_in);
+ xfs_reflink_remap_unlock(file_in, file_out);
if (ret)
trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
return ret;
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index c585ad9552b2..7f47202b5639 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -10,10 +10,10 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, xfs_agblock_t agbno, xfs_extlen_t aglen,
xfs_agblock_t *fbno, xfs_extlen_t *flen, bool find_maximal);
extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
- struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed);
+ struct xfs_bmbt_irec *irec, bool *shared);
extern int xfs_reflink_reserve_cow(struct xfs_inode *ip,
- struct xfs_bmbt_irec *imap, bool *shared);
+ struct xfs_bmbt_irec *imap);
extern int xfs_reflink_allocate_cow(struct xfs_inode *ip,
struct xfs_bmbt_irec *imap, bool *shared, uint *lockmode);
extern int xfs_reflink_convert_cow(struct xfs_inode *ip, xfs_off_t offset,
diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
index 4e4423153071..cc509743facd 100644
--- a/fs/xfs/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
@@ -29,30 +29,30 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
char *desc;
int endpoint;
} xstats[] = {
- { "extent_alloc", XFSSTAT_END_EXTENT_ALLOC },
- { "abt", XFSSTAT_END_ALLOC_BTREE },
- { "blk_map", XFSSTAT_END_BLOCK_MAPPING },
- { "bmbt", XFSSTAT_END_BLOCK_MAP_BTREE },
- { "dir", XFSSTAT_END_DIRECTORY_OPS },
- { "trans", XFSSTAT_END_TRANSACTIONS },
- { "ig", XFSSTAT_END_INODE_OPS },
- { "log", XFSSTAT_END_LOG_OPS },
- { "push_ail", XFSSTAT_END_TAIL_PUSHING },
- { "xstrat", XFSSTAT_END_WRITE_CONVERT },
- { "rw", XFSSTAT_END_READ_WRITE_OPS },
- { "attr", XFSSTAT_END_ATTRIBUTE_OPS },
- { "icluster", XFSSTAT_END_INODE_CLUSTER },
- { "vnodes", XFSSTAT_END_VNODE_OPS },
- { "buf", XFSSTAT_END_BUF },
- { "abtb2", XFSSTAT_END_ABTB_V2 },
- { "abtc2", XFSSTAT_END_ABTC_V2 },
- { "bmbt2", XFSSTAT_END_BMBT_V2 },
- { "ibt2", XFSSTAT_END_IBT_V2 },
- { "fibt2", XFSSTAT_END_FIBT_V2 },
- { "rmapbt", XFSSTAT_END_RMAP_V2 },
- { "refcntbt", XFSSTAT_END_REFCOUNT },
+ { "extent_alloc", xfsstats_offset(xs_abt_lookup) },
+ { "abt", xfsstats_offset(xs_blk_mapr) },
+ { "blk_map", xfsstats_offset(xs_bmbt_lookup) },
+ { "bmbt", xfsstats_offset(xs_dir_lookup) },
+ { "dir", xfsstats_offset(xs_trans_sync) },
+ { "trans", xfsstats_offset(xs_ig_attempts) },
+ { "ig", xfsstats_offset(xs_log_writes) },
+ { "log", xfsstats_offset(xs_try_logspace)},
+ { "push_ail", xfsstats_offset(xs_xstrat_quick)},
+ { "xstrat", xfsstats_offset(xs_write_calls) },
+ { "rw", xfsstats_offset(xs_attr_get) },
+ { "attr", xfsstats_offset(xs_iflush_count)},
+ { "icluster", xfsstats_offset(vn_active) },
+ { "vnodes", xfsstats_offset(xb_get) },
+ { "buf", xfsstats_offset(xs_abtb_2) },
+ { "abtb2", xfsstats_offset(xs_abtc_2) },
+ { "abtc2", xfsstats_offset(xs_bmbt_2) },
+ { "bmbt2", xfsstats_offset(xs_ibt_2) },
+ { "ibt2", xfsstats_offset(xs_fibt_2) },
+ { "fibt2", xfsstats_offset(xs_rmap_2) },
+ { "rmapbt", xfsstats_offset(xs_refcbt_2) },
+ { "refcntbt", xfsstats_offset(xs_qm_dqreclaims)},
/* we print both series of quota information together */
- { "qm", XFSSTAT_END_QM },
+ { "qm", xfsstats_offset(xs_xstrat_bytes)},
};
/* Loop over all stats groups */
@@ -104,6 +104,10 @@ void xfs_stats_clearall(struct xfsstats __percpu *stats)
#ifdef CONFIG_PROC_FS
/* legacy quota interfaces */
#ifdef CONFIG_XFS_QUOTA
+
+#define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
+#define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
+
static int xqm_proc_show(struct seq_file *m, void *v)
{
/* maximum; incore; ratio free to inuse; freelist */
@@ -119,7 +123,7 @@ static int xqmstat_proc_show(struct seq_file *m, void *v)
int j;
seq_printf(m, "qm");
- for (j = XFSSTAT_END_IBT_V2; j < XFSSTAT_END_XQMSTAT; j++)
+ for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
seq_putc(m, '\n');
return 0;
diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h
index 130db070e4d8..34d704f703d2 100644
--- a/fs/xfs/xfs_stats.h
+++ b/fs/xfs/xfs_stats.h
@@ -41,17 +41,14 @@ enum {
* XFS global statistics
*/
struct __xfsstats {
-# define XFSSTAT_END_EXTENT_ALLOC 4
uint32_t xs_allocx;
uint32_t xs_allocb;
uint32_t xs_freex;
uint32_t xs_freeb;
-# define XFSSTAT_END_ALLOC_BTREE (XFSSTAT_END_EXTENT_ALLOC+4)
uint32_t xs_abt_lookup;
uint32_t xs_abt_compare;
uint32_t xs_abt_insrec;
uint32_t xs_abt_delrec;
-# define XFSSTAT_END_BLOCK_MAPPING (XFSSTAT_END_ALLOC_BTREE+7)
uint32_t xs_blk_mapr;
uint32_t xs_blk_mapw;
uint32_t xs_blk_unmap;
@@ -59,21 +56,17 @@ struct __xfsstats {
uint32_t xs_del_exlist;
uint32_t xs_look_exlist;
uint32_t xs_cmp_exlist;
-# define XFSSTAT_END_BLOCK_MAP_BTREE (XFSSTAT_END_BLOCK_MAPPING+4)
uint32_t xs_bmbt_lookup;
uint32_t xs_bmbt_compare;
uint32_t xs_bmbt_insrec;
uint32_t xs_bmbt_delrec;
-# define XFSSTAT_END_DIRECTORY_OPS (XFSSTAT_END_BLOCK_MAP_BTREE+4)
uint32_t xs_dir_lookup;
uint32_t xs_dir_create;
uint32_t xs_dir_remove;
uint32_t xs_dir_getdents;
-# define XFSSTAT_END_TRANSACTIONS (XFSSTAT_END_DIRECTORY_OPS+3)
uint32_t xs_trans_sync;
uint32_t xs_trans_async;
uint32_t xs_trans_empty;
-# define XFSSTAT_END_INODE_OPS (XFSSTAT_END_TRANSACTIONS+7)
uint32_t xs_ig_attempts;
uint32_t xs_ig_found;
uint32_t xs_ig_frecycle;
@@ -81,13 +74,11 @@ struct __xfsstats {
uint32_t xs_ig_dup;
uint32_t xs_ig_reclaims;
uint32_t xs_ig_attrchg;
-# define XFSSTAT_END_LOG_OPS (XFSSTAT_END_INODE_OPS+5)
uint32_t xs_log_writes;
uint32_t xs_log_blocks;
uint32_t xs_log_noiclogs;
uint32_t xs_log_force;
uint32_t xs_log_force_sleep;
-# define XFSSTAT_END_TAIL_PUSHING (XFSSTAT_END_LOG_OPS+10)
uint32_t xs_try_logspace;
uint32_t xs_sleep_logspace;
uint32_t xs_push_ail;
@@ -98,22 +89,17 @@ struct __xfsstats {
uint32_t xs_push_ail_flushing;
uint32_t xs_push_ail_restarts;
uint32_t xs_push_ail_flush;
-# define XFSSTAT_END_WRITE_CONVERT (XFSSTAT_END_TAIL_PUSHING+2)
uint32_t xs_xstrat_quick;
uint32_t xs_xstrat_split;
-# define XFSSTAT_END_READ_WRITE_OPS (XFSSTAT_END_WRITE_CONVERT+2)
uint32_t xs_write_calls;
uint32_t xs_read_calls;
-# define XFSSTAT_END_ATTRIBUTE_OPS (XFSSTAT_END_READ_WRITE_OPS+4)
uint32_t xs_attr_get;
uint32_t xs_attr_set;
uint32_t xs_attr_remove;
uint32_t xs_attr_list;
-# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_ATTRIBUTE_OPS+3)
uint32_t xs_iflush_count;
uint32_t xs_icluster_flushcnt;
uint32_t xs_icluster_flushinode;
-# define XFSSTAT_END_VNODE_OPS (XFSSTAT_END_INODE_CLUSTER+8)
uint32_t vn_active; /* # vnodes not on free lists */
uint32_t vn_alloc; /* # times vn_alloc called */
uint32_t vn_get; /* # times vn_get called */
@@ -122,7 +108,6 @@ struct __xfsstats {
uint32_t vn_reclaim; /* # times vn_reclaim called */
uint32_t vn_remove; /* # times vn_remove called */
uint32_t vn_free; /* # times vn_free called */
-#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9)
uint32_t xb_get;
uint32_t xb_create;
uint32_t xb_get_locked;
@@ -133,28 +118,19 @@ struct __xfsstats {
uint32_t xb_page_found;
uint32_t xb_get_read;
/* Version 2 btree counters */
-#define XFSSTAT_END_ABTB_V2 (XFSSTAT_END_BUF + __XBTS_MAX)
uint32_t xs_abtb_2[__XBTS_MAX];
-#define XFSSTAT_END_ABTC_V2 (XFSSTAT_END_ABTB_V2 + __XBTS_MAX)
uint32_t xs_abtc_2[__XBTS_MAX];
-#define XFSSTAT_END_BMBT_V2 (XFSSTAT_END_ABTC_V2 + __XBTS_MAX)
uint32_t xs_bmbt_2[__XBTS_MAX];
-#define XFSSTAT_END_IBT_V2 (XFSSTAT_END_BMBT_V2 + __XBTS_MAX)
uint32_t xs_ibt_2[__XBTS_MAX];
-#define XFSSTAT_END_FIBT_V2 (XFSSTAT_END_IBT_V2 + __XBTS_MAX)
uint32_t xs_fibt_2[__XBTS_MAX];
-#define XFSSTAT_END_RMAP_V2 (XFSSTAT_END_FIBT_V2 + __XBTS_MAX)
uint32_t xs_rmap_2[__XBTS_MAX];
-#define XFSSTAT_END_REFCOUNT (XFSSTAT_END_RMAP_V2 + __XBTS_MAX)
uint32_t xs_refcbt_2[__XBTS_MAX];
-#define XFSSTAT_END_XQMSTAT (XFSSTAT_END_REFCOUNT + 6)
uint32_t xs_qm_dqreclaims;
uint32_t xs_qm_dqreclaim_misses;
uint32_t xs_qm_dquot_dups;
uint32_t xs_qm_dqcachemisses;
uint32_t xs_qm_dqcachehits;
uint32_t xs_qm_dqwants;
-#define XFSSTAT_END_QM (XFSSTAT_END_XQMSTAT+2)
uint32_t xs_qm_dquot;
uint32_t xs_qm_dquot_unused;
/* Extra precision counters */
@@ -163,10 +139,12 @@ struct __xfsstats {
uint64_t xs_read_bytes;
};
+#define xfsstats_offset(f) (offsetof(struct __xfsstats, f)/sizeof(uint32_t))
+
struct xfsstats {
union {
struct __xfsstats s;
- uint32_t a[XFSSTAT_END_XQMSTAT];
+ uint32_t a[xfsstats_offset(xs_qm_dquot)];
};
};
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 207ee302b1bb..d3e6cd063688 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -43,6 +43,7 @@
#include <linux/dax.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/magic.h>
#include <linux/mount.h>
#include <linux/mempool.h>
#include <linux/writeback.h>
@@ -933,6 +934,32 @@ xfs_fs_alloc_inode(
return NULL;
}
+#ifdef DEBUG
+static void
+xfs_check_delalloc(
+ struct xfs_inode *ip,
+ int whichfork)
+{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_bmbt_irec got;
+ struct xfs_iext_cursor icur;
+
+ if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
+ return;
+ do {
+ if (isnullstartblock(got.br_startblock)) {
+ xfs_warn(ip->i_mount,
+ "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
+ ip->i_ino,
+ whichfork == XFS_DATA_FORK ? "data" : "cow",
+ got.br_startoff, got.br_blockcount);
+ }
+ } while (xfs_iext_next_extent(ifp, &icur, &got));
+}
+#else
+#define xfs_check_delalloc(ip, whichfork) do { } while (0)
+#endif
+
/*
* Now that the generic code is guaranteed not to be accessing
* the linux inode, we can inactivate and reclaim the inode.
@@ -951,7 +978,12 @@ xfs_fs_destroy_inode(
xfs_inactive(ip);
- ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
+ if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
+ xfs_check_delalloc(ip, XFS_DATA_FORK);
+ xfs_check_delalloc(ip, XFS_COW_FORK);
+ ASSERT(0);
+ }
+
XFS_STATS_INC(ip->i_mount, vn_reclaim);
/*
@@ -1097,7 +1129,7 @@ xfs_fs_statfs(
xfs_extlen_t lsize;
int64_t ffree;
- statp->f_type = XFS_SB_MAGIC;
+ statp->f_type = XFS_SUPER_MAGIC;
statp->f_namelen = MAXNAMELEN - 1;
id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
@@ -1650,7 +1682,7 @@ xfs_fs_fill_super(
* we must configure the block size in the superblock before we run the
* full mount process as the mount process can lookup and cache inodes.
*/
- sb->s_magic = XFS_SB_MAGIC;
+ sb->s_magic = XFS_SUPER_MAGIC;
sb->s_blocksize = mp->m_sb.sb_blocksize;
sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index ad315e83bc02..3043e5ed6495 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -473,7 +473,6 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
-DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index bedc5a5133a5..912b42f5fe4a 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -259,6 +259,14 @@ xfs_trans_alloc(
struct xfs_trans *tp;
int error;
+ /*
+ * Allocate the handle before we do our freeze accounting and setting up
+ * GFP_NOFS allocation context so that we avoid lockdep false positives
+ * by doing GFP_KERNEL allocations inside sb_start_intwrite().
+ */
+ tp = kmem_zone_zalloc(xfs_trans_zone,
+ (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
+
if (!(flags & XFS_TRANS_NO_WRITECOUNT))
sb_start_intwrite(mp->m_super);
@@ -270,8 +278,6 @@ xfs_trans_alloc(
mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
atomic_inc(&mp->m_active_trans);
- tp = kmem_zone_zalloc(xfs_trans_zone,
- (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
tp->t_magic = XFS_TRANS_HEADER_MAGIC;
tp->t_flags = flags;
tp->t_mountp = mp;
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index c3d278e96ad1..a0c5dbda18aa 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -220,6 +220,7 @@ void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *, uint);
void xfs_trans_log_buf(struct xfs_trans *, struct xfs_buf *, uint,
uint);
void xfs_trans_dirty_buf(struct xfs_trans *, struct xfs_buf *);
+bool xfs_trans_buf_is_dirty(struct xfs_buf *bp);
void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
void xfs_extent_free_init_defer_op(void);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 55326f971cb3..d3a4e89bf4a0 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -531,17 +531,33 @@ xfsaild(
set_current_state(TASK_INTERRUPTIBLE);
/*
- * Check kthread_should_stop() after we set the task state
- * to guarantee that we either see the stop bit and exit or
- * the task state is reset to runnable such that it's not
- * scheduled out indefinitely and detects the stop bit at
- * next iteration.
- *
+ * Check kthread_should_stop() after we set the task state to
+ * guarantee that we either see the stop bit and exit or the
+ * task state is reset to runnable such that it's not scheduled
+ * out indefinitely and detects the stop bit at next iteration.
* A memory barrier is included in above task state set to
* serialize again kthread_stop().
*/
if (kthread_should_stop()) {
__set_current_state(TASK_RUNNING);
+
+ /*
+ * The caller forces out the AIL before stopping the
+ * thread in the common case, which means the delwri
+ * queue is drained. In the shutdown case, the queue may
+ * still hold relogged buffers that haven't been
+ * submitted because they were pinned since added to the
+ * queue.
+ *
+ * Log I/O error processing stales the underlying buffer
+ * and clears the delwri state, expecting the buf to be
+ * removed on the next submission attempt. That won't
+ * happen if we're shutting down, so this is the last
+ * opportunity to release such buffers from the queue.
+ */
+ ASSERT(list_empty(&ailp->ail_buf_list) ||
+ XFS_FORCED_SHUTDOWN(ailp->ail_mount));
+ xfs_buf_delwri_cancel(&ailp->ail_buf_list);
break;
}
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 15919f67a88f..629f1479c9d2 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -264,11 +264,39 @@ xfs_trans_read_buf_map(
return -EIO;
}
+ /*
+ * Check if the caller is trying to read a buffer that is
+ * already attached to the transaction yet has no buffer ops
+ * assigned. Ops are usually attached when the buffer is
+ * attached to the transaction, or by the read caller if
+ * special circumstances. That didn't happen, which is not
+ * how this is supposed to go.
+ *
+ * If the buffer passes verification we'll let this go, but if
+ * not we have to shut down. Let the transaction cleanup code
+ * release this buffer when it kills the tranaction.
+ */
+ ASSERT(bp->b_ops != NULL);
+ error = xfs_buf_ensure_ops(bp, ops);
+ if (error) {
+ xfs_buf_ioerror_alert(bp, __func__);
+
+ if (tp->t_flags & XFS_TRANS_DIRTY)
+ xfs_force_shutdown(tp->t_mountp,
+ SHUTDOWN_META_IO_ERROR);
+
+ /* bad CRC means corrupted metadata */
+ if (error == -EFSBADCRC)
+ error = -EFSCORRUPTED;
+ return error;
+ }
+
bip = bp->b_log_item;
bip->bli_recur++;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
trace_xfs_trans_read_buf_recur(bip);
+ ASSERT(bp->b_ops != NULL || ops == NULL);
*bpp = bp;
return 0;
}
@@ -316,55 +344,58 @@ xfs_trans_read_buf_map(
_xfs_trans_bjoin(tp, bp, 1);
trace_xfs_trans_read_buf(bp->b_log_item);
}
+ ASSERT(bp->b_ops != NULL || ops == NULL);
*bpp = bp;
return 0;
}
+/* Has this buffer been dirtied by anyone? */
+bool
+xfs_trans_buf_is_dirty(
+ struct xfs_buf *bp)
+{
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+
+ if (!bip)
+ return false;
+ ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
+ return test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
+}
+
/*
- * Release the buffer bp which was previously acquired with one of the
- * xfs_trans_... buffer allocation routines if the buffer has not
- * been modified within this transaction. If the buffer is modified
- * within this transaction, do decrement the recursion count but do
- * not release the buffer even if the count goes to 0. If the buffer is not
- * modified within the transaction, decrement the recursion count and
- * release the buffer if the recursion count goes to 0.
+ * Release a buffer previously joined to the transaction. If the buffer is
+ * modified within this transaction, decrement the recursion count but do not
+ * release the buffer even if the count goes to 0. If the buffer is not modified
+ * within the transaction, decrement the recursion count and release the buffer
+ * if the recursion count goes to 0.
*
- * If the buffer is to be released and it was not modified before
- * this transaction began, then free the buf_log_item associated with it.
+ * If the buffer is to be released and it was not already dirty before this
+ * transaction began, then also free the buf_log_item associated with it.
*
- * If the transaction pointer is NULL, make this just a normal
- * brelse() call.
+ * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
*/
void
xfs_trans_brelse(
- xfs_trans_t *tp,
- xfs_buf_t *bp)
+ struct xfs_trans *tp,
+ struct xfs_buf *bp)
{
- struct xfs_buf_log_item *bip;
- int freed;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
- /*
- * Default to a normal brelse() call if the tp is NULL.
- */
- if (tp == NULL) {
- ASSERT(bp->b_transp == NULL);
+ ASSERT(bp->b_transp == tp);
+
+ if (!tp) {
xfs_buf_relse(bp);
return;
}
- ASSERT(bp->b_transp == tp);
- bip = bp->b_log_item;
+ trace_xfs_trans_brelse(bip);
ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
- ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
- trace_xfs_trans_brelse(bip);
-
/*
- * If the release is just for a recursive lock,
- * then decrement the count and return.
+ * If the release is for a recursive lookup, then decrement the count
+ * and return.
*/
if (bip->bli_recur > 0) {
bip->bli_recur--;
@@ -372,64 +403,24 @@ xfs_trans_brelse(
}
/*
- * If the buffer is dirty within this transaction, we can't
+ * If the buffer is invalidated or dirty in this transaction, we can't
* release it until we commit.
*/
if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
return;
-
- /*
- * If the buffer has been invalidated, then we can't release
- * it until the transaction commits to disk unless it is re-dirtied
- * as part of this transaction. This prevents us from pulling
- * the item from the AIL before we should.
- */
if (bip->bli_flags & XFS_BLI_STALE)
return;
- ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
-
/*
- * Free up the log item descriptor tracking the released item.
+ * Unlink the log item from the transaction and clear the hold flag, if
+ * set. We wouldn't want the next user of the buffer to get confused.
*/
+ ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
xfs_trans_del_item(&bip->bli_item);
+ bip->bli_flags &= ~XFS_BLI_HOLD;
- /*
- * Clear the hold flag in the buf log item if it is set.
- * We wouldn't want the next user of the buffer to
- * get confused.
- */
- if (bip->bli_flags & XFS_BLI_HOLD) {
- bip->bli_flags &= ~XFS_BLI_HOLD;
- }
-
- /*
- * Drop our reference to the buf log item.
- */
- freed = atomic_dec_and_test(&bip->bli_refcount);
-
- /*
- * If the buf item is not tracking data in the log, then we must free it
- * before releasing the buffer back to the free pool.
- *
- * If the fs has shutdown and we dropped the last reference, it may fall
- * on us to release a (possibly dirty) bli if it never made it to the
- * AIL (e.g., the aborted unpin already happened and didn't release it
- * due to our reference). Since we're already shutdown and need
- * ail_lock, just force remove from the AIL and release the bli here.
- */
- if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) {
- xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR);
- xfs_buf_item_relse(bp);
- } else if (!(bip->bli_flags & XFS_BLI_DIRTY)) {
-/***
- ASSERT(bp->b_pincount == 0);
-***/
- ASSERT(atomic_read(&bip->bli_refcount) == 0);
- ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
- ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
- xfs_buf_item_relse(bp);
- }
+ /* drop the reference to the bli */
+ xfs_buf_item_put(bip);
bp->b_transp = NULL;
xfs_buf_relse(bp);
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index e6964e97acdd..53c088247d36 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -173,11 +173,20 @@
#define ACPI_RSDP_CHECKSUM_LENGTH 20
#define ACPI_RSDP_XCHECKSUM_LENGTH 36
-/* SMBus, GSBus and IPMI bidirectional buffer size */
+/*
+ * SMBus, GSBus and IPMI buffer sizes. All have a 2-byte header,
+ * containing both Status and Length.
+ */
+#define ACPI_SERIAL_HEADER_SIZE 2 /* Common for below. Status and Length fields */
+
+#define ACPI_SMBUS_DATA_SIZE 32
+#define ACPI_SMBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_SMBUS_DATA_SIZE
+
+#define ACPI_IPMI_DATA_SIZE 64
+#define ACPI_IPMI_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_IPMI_DATA_SIZE
-#define ACPI_SMBUS_BUFFER_SIZE 34
-#define ACPI_GSBUS_BUFFER_SIZE 34
-#define ACPI_IPMI_BUFFER_SIZE 66
+#define ACPI_MAX_GSBUS_DATA_SIZE 255
+#define ACPI_MAX_GSBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_MAX_GSBUS_DATA_SIZE
/* _sx_d and _sx_w control methods */
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 856c56ef0143..09f46050961f 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -171,8 +171,10 @@ struct acpi_exception_info {
#define AE_AML_LOOP_TIMEOUT EXCEP_AML (0x0021)
#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022)
#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023)
+#define AE_AML_PROTOCOL EXCEP_AML (0x0024)
+#define AE_AML_BUFFER_LENGTH EXCEP_AML (0x0025)
-#define AE_CODE_AML_MAX 0x0023
+#define AE_CODE_AML_MAX 0x0025
/*
* Internal exceptions used for control
@@ -347,7 +349,10 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
"A namespace node is uninitialized or unresolved"),
EXCEP_TXT("AE_AML_TARGET_TYPE",
- "A target operand of an incorrect type was encountered")
+ "A target operand of an incorrect type was encountered"),
+ EXCEP_TXT("AE_AML_PROTOCOL", "Violation of a fixed ACPI protocol"),
+ EXCEP_TXT("AE_AML_BUFFER_LENGTH",
+ "The length of the buffer is invalid/incorrect")
};
static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index ba4dd54f2c82..0300374101cd 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -346,10 +346,16 @@ struct acpi_device_physical_node {
bool put_online:1;
};
+struct acpi_device_properties {
+ const guid_t *guid;
+ const union acpi_object *properties;
+ struct list_head list;
+};
+
/* ACPI Device Specific Data (_DSD) */
struct acpi_device_data {
const union acpi_object *pointer;
- const union acpi_object *properties;
+ struct list_head properties;
const union acpi_object *of_compatible;
struct list_head subnodes;
};
@@ -595,7 +601,6 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
u64 *size);
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
-void acpi_dma_deconfigure(struct device *dev);
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
u64 address, bool check_children);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 9566f99cc3c0..0c19b68bf060 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20180810
+#define ACPI_CA_VERSION 0x20181003
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -157,13 +157,6 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
/*
- * Optionally support group module level code.
- * NOTE, this is essentially obsolete and will be removed soon
- * (01/2018).
- */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE);
-
-/*
* Optionally support module level code by parsing an entire table as
* a method as it is loaded. Default is TRUE.
* NOTE, this is essentially obsolete and will be removed soon
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 8e0b8250a139..cf59e6210d27 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -104,6 +104,7 @@ enum cppc_regs {
* today.
*/
struct cppc_perf_caps {
+ u32 guaranteed_perf;
u32 highest_perf;
u32 nominal_perf;
u32 lowest_perf;
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 20561a60db9c..cdafa5edea49 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -17,10 +17,8 @@
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
-#ifdef CONFIG_BUG
-
-#ifdef CONFIG_GENERIC_BUG
struct bug_entry {
+#ifdef CONFIG_GENERIC_BUG
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
unsigned long bug_addr;
#else
@@ -35,8 +33,10 @@ struct bug_entry {
unsigned short line;
#endif
unsigned short flags;
-};
#endif /* CONFIG_GENERIC_BUG */
+};
+
+#ifdef CONFIG_BUG
/*
* Don't use BUG() or BUG_ON() unless there's really no way out; one
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index 28819451b6d1..a86f65bffab8 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -1,3 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_COMPAT_H
+#define __ASM_GENERIC_COMPAT_H
-/* This is an empty stub for 32-bit-only architectures */
+/* These types are common across all compat ABIs */
+typedef u32 compat_size_t;
+typedef s32 compat_ssize_t;
+typedef s32 compat_clock_t;
+typedef s32 compat_pid_t;
+typedef u32 compat_ino_t;
+typedef s32 compat_off_t;
+typedef s64 compat_loff_t;
+typedef s32 compat_daddr_t;
+typedef s32 compat_timer_t;
+typedef s32 compat_key_t;
+typedef s16 compat_short_t;
+typedef s32 compat_int_t;
+typedef s32 compat_long_t;
+typedef u16 compat_ushort_t;
+typedef u32 compat_uint_t;
+typedef u32 compat_ulong_t;
+typedef u32 compat_uptr_t;
+typedef u32 compat_aio_context_t;
+
+#endif
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index ad2868263867..880a292d792f 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -4,16 +4,7 @@
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- /*
- * Use the non-coherent ops if available. If an architecture wants a
- * more fine-grained selection of operations it will have to implement
- * get_arch_dma_ops itself or use the per-device dma_ops.
- */
-#ifdef CONFIG_DMA_NONCOHERENT_OPS
- return &dma_noncoherent_ops;
-#else
return &dma_direct_ops;
-#endif
}
#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 0f7062bd55e5..36254d2da8e0 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -71,8 +71,8 @@ static inline int queued_write_trylock(struct qrwlock *lock)
if (unlikely(cnts))
return 0;
- return likely(atomic_cmpxchg_acquire(&lock->cnts,
- cnts, cnts | _QW_LOCKED) == cnts);
+ return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
+ _QW_LOCKED));
}
/**
* queued_read_lock - acquire read lock of a queue rwlock
@@ -96,8 +96,9 @@ static inline void queued_read_lock(struct qrwlock *lock)
*/
static inline void queued_write_lock(struct qrwlock *lock)
{
+ u32 cnts = 0;
/* Optimize for the unfair lock case where the fair flag is 0. */
- if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
+ if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
return;
queued_write_lock_slowpath(lock);
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 9cc457597ddf..7541fa707f5b 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -66,10 +66,12 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
*/
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
- if (!atomic_read(&lock->val) &&
- (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
- return 1;
- return 0;
+ u32 val = atomic_read(&lock->val);
+
+ if (unlikely(val))
+ return 0;
+
+ return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
}
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
@@ -80,11 +82,11 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
*/
static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
- u32 val;
+ u32 val = 0;
- val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
- if (likely(val == 0))
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
return;
+
queued_spin_lock_slowpath(lock, val);
}
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index b3353e21f3b3..6be86c1c5c58 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -20,6 +20,8 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#ifdef CONFIG_MMU
+
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
/*
* Semi RCU freeing of the page directories.
@@ -97,12 +99,30 @@ struct mmu_gather {
#endif
unsigned long start;
unsigned long end;
- /* we are in the middle of an operation to clear
- * a full mm and can make some optimizations */
- unsigned int fullmm : 1,
- /* we have performed an operation which
- * requires a complete flush of the tlb */
- need_flush_all : 1;
+ /*
+ * we are in the middle of an operation to clear
+ * a full mm and can make some optimizations
+ */
+ unsigned int fullmm : 1;
+
+ /*
+ * we have performed an operation which
+ * requires a complete flush of the tlb
+ */
+ unsigned int need_flush_all : 1;
+
+ /*
+ * we have removed page directories
+ */
+ unsigned int freed_tables : 1;
+
+ /*
+ * at which levels have we cleared entries?
+ */
+ unsigned int cleared_ptes : 1;
+ unsigned int cleared_pmds : 1;
+ unsigned int cleared_puds : 1;
+ unsigned int cleared_p4ds : 1;
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
@@ -118,6 +138,7 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb,
void tlb_flush_mmu(struct mmu_gather *tlb);
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force);
+void tlb_flush_mmu_free(struct mmu_gather *tlb);
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);
@@ -137,6 +158,11 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
tlb->start = TASK_SIZE;
tlb->end = 0;
}
+ tlb->freed_tables = 0;
+ tlb->cleared_ptes = 0;
+ tlb->cleared_pmds = 0;
+ tlb->cleared_puds = 0;
+ tlb->cleared_p4ds = 0;
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -186,6 +212,25 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
}
#endif
+static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
+{
+ if (tlb->cleared_ptes)
+ return PAGE_SHIFT;
+ if (tlb->cleared_pmds)
+ return PMD_SHIFT;
+ if (tlb->cleared_puds)
+ return PUD_SHIFT;
+ if (tlb->cleared_p4ds)
+ return P4D_SHIFT;
+
+ return PAGE_SHIFT;
+}
+
+static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
+{
+ return 1UL << tlb_get_unmap_shift(tlb);
+}
+
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
@@ -219,13 +264,19 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->cleared_ptes = 1; \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- do { \
- __tlb_adjust_range(tlb, address, huge_page_size(h)); \
- __tlb_remove_tlb_entry(tlb, ptep, address); \
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ do { \
+ unsigned long _sz = huge_page_size(h); \
+ __tlb_adjust_range(tlb, address, _sz); \
+ if (_sz == PMD_SIZE) \
+ tlb->cleared_pmds = 1; \
+ else if (_sz == PUD_SIZE) \
+ tlb->cleared_puds = 1; \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
/**
@@ -239,6 +290,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
+ tlb->cleared_pmds = 1; \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
@@ -253,6 +305,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
+ tlb->cleared_puds = 1; \
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
} while (0)
@@ -278,6 +331,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define pte_free_tlb(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ tlb->cleared_pmds = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#endif
@@ -285,7 +340,9 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ tlb->cleared_puds = 1; \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
#endif
@@ -295,6 +352,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define pud_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ tlb->cleared_p4ds = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
@@ -304,12 +363,15 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#ifndef p4d_free_tlb
#define p4d_free_tlb(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
__p4d_free_tlb(tlb, pudp, address); \
} while (0)
#endif
#endif
+#endif /* CONFIG_MMU */
+
#define tlb_migrate_finish(mm) do {} while (0)
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
deleted file mode 100644
index cdf904265caf..000000000000
--- a/include/asm-generic/unistd.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <uapi/asm-generic/unistd.h>
-#include <linux/export.h>
-
-/*
- * These are required system calls, we should
- * invert the logic eventually and let them
- * be selected by default.
- */
-#if __BITS_PER_LONG == 32
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_LLSEEK
-#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 7b75ff6e2fce..3d7a6a9c2370 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -68,7 +68,7 @@
*/
#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
@@ -203,6 +203,15 @@
#define EARLYCON_TABLE()
#endif
+#ifdef CONFIG_SECURITY
+#define LSM_TABLE() . = ALIGN(8); \
+ __start_lsm_info = .; \
+ KEEP(*(.lsm_info.init)) \
+ __end_lsm_info = .;
+#else
+#define LSM_TABLE()
+#endif
+
#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
#define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name)
@@ -253,10 +262,6 @@
STRUCT_ALIGN(); \
*(__tracepoints) \
/* implement dynamic printk debug */ \
- . = ALIGN(8); \
- __start___jump_table = .; \
- KEEP(*(__jump_table)) \
- __stop___jump_table = .; \
. = ALIGN(8); \
__start___verbose = .; \
KEEP(*(__verbose)) \
@@ -300,6 +305,12 @@
. = __start_init_task + THREAD_SIZE; \
__end_init_task = .;
+#define JUMP_TABLE_DATA \
+ . = ALIGN(8); \
+ __start___jump_table = .; \
+ KEEP(*(__jump_table)) \
+ __stop___jump_table = .;
+
/*
* Allow architectures to handle ro_after_init data on their
* own by defining an empty RO_AFTER_INIT_DATA.
@@ -308,6 +319,7 @@
#define RO_AFTER_INIT_DATA \
__start_ro_after_init = .; \
*(.data..ro_after_init) \
+ JUMP_TABLE_DATA \
__end_ro_after_init = .;
#endif
@@ -473,13 +485,6 @@
#define RODATA RO_DATA_SECTION(4096)
#define RO_DATA(align) RO_DATA_SECTION(align)
-#define SECURITY_INIT \
- .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
- __security_initcall_start = .; \
- KEEP(*(.security_initcall.init)) \
- __security_initcall_end = .; \
- }
-
/*
* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map
@@ -604,7 +609,8 @@
IRQCHIP_OF_MATCH_TABLE() \
ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(timer) \
- EARLYCON_TABLE()
+ EARLYCON_TABLE() \
+ LSM_TABLE()
#define INIT_TEXT \
*(.init.text .init.text.*) \
@@ -613,8 +619,8 @@
#define EXIT_DATA \
*(.exit.data .exit.data.*) \
- *(.fini_array) \
- *(.dtors) \
+ *(.fini_array .fini_array.*) \
+ *(.dtors .dtors.*) \
MEM_DISCARD(exit.data*) \
MEM_DISCARD(exit.rodata*)
@@ -793,11 +799,6 @@
KEEP(*(.con_initcall.init)) \
__con_initcall_end = .;
-#define SECURITY_INITCALL \
- __security_initcall_start = .; \
- KEEP(*(.security_initcall.init)) \
- __security_initcall_end = .;
-
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
@@ -964,7 +965,6 @@
INIT_SETUP(initsetup_align) \
INIT_CALLS \
CON_INITCALL \
- SECURITY_INITCALL \
INIT_RAM_FS \
}
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index e328b52425a8..22e6f412c595 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -234,6 +234,34 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
}
+static inline void crypto_stat_compress(struct acomp_req *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->compress_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->compress_cnt);
+ atomic64_add(req->slen, &tfm->base.__crt_alg->compress_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_decompress(struct acomp_req *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->compress_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->decompress_cnt);
+ atomic64_add(req->slen, &tfm->base.__crt_alg->decompress_tlen);
+ }
+#endif
+}
+
/**
* crypto_acomp_compress() -- Invoke asynchronous compress operation
*
@@ -246,8 +274,11 @@ static inline void acomp_request_set_params(struct acomp_req *req,
static inline int crypto_acomp_compress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ int ret;
- return tfm->compress(req);
+ ret = tfm->compress(req);
+ crypto_stat_compress(req, ret);
+ return ret;
}
/**
@@ -262,8 +293,11 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
static inline int crypto_acomp_decompress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ int ret;
- return tfm->decompress(req);
+ ret = tfm->decompress(req);
+ crypto_stat_decompress(req, ret);
+ return ret;
}
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 1e26f790b03f..0d765d7bfb82 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -306,6 +306,34 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
return __crypto_aead_cast(req->base.tfm);
}
+static inline void crypto_stat_aead_encrypt(struct aead_request *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->aead_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->encrypt_cnt);
+ atomic64_add(req->cryptlen, &tfm->base.__crt_alg->encrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->aead_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->decrypt_cnt);
+ atomic64_add(req->cryptlen, &tfm->base.__crt_alg->decrypt_tlen);
+ }
+#endif
+}
+
/**
* crypto_aead_encrypt() - encrypt plaintext
* @req: reference to the aead_request handle that holds all information
@@ -328,11 +356,14 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
static inline int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ret;
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_aead_alg(aead)->encrypt(req);
+ ret = -ENOKEY;
+ else
+ ret = crypto_aead_alg(aead)->encrypt(req);
+ crypto_stat_aead_encrypt(req, ret);
+ return ret;
}
/**
@@ -360,14 +391,16 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
static inline int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ret;
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- if (req->cryptlen < crypto_aead_authsize(aead))
- return -EINVAL;
-
- return crypto_aead_alg(aead)->decrypt(req);
+ ret = -ENOKEY;
+ else if (req->cryptlen < crypto_aead_authsize(aead))
+ ret = -EINVAL;
+ else
+ ret = crypto_aead_alg(aead)->decrypt(req);
+ crypto_stat_aead_decrypt(req, ret);
+ return ret;
}
/**
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index b5e11de4d497..afac71119396 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -271,6 +271,62 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
return alg->max_size(tfm);
}
+static inline void crypto_stat_akcipher_encrypt(struct akcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->encrypt_cnt);
+ atomic64_add(req->src_len, &tfm->base.__crt_alg->encrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_akcipher_decrypt(struct akcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->decrypt_cnt);
+ atomic64_add(req->src_len, &tfm->base.__crt_alg->decrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_akcipher_sign(struct akcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->sign_cnt);
+#endif
+}
+
+static inline void crypto_stat_akcipher_verify(struct akcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->verify_cnt);
+#endif
+}
+
/**
* crypto_akcipher_encrypt() - Invoke public key encrypt operation
*
@@ -285,8 +341,11 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ int ret;
- return alg->encrypt(req);
+ ret = alg->encrypt(req);
+ crypto_stat_akcipher_encrypt(req, ret);
+ return ret;
}
/**
@@ -303,8 +362,11 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ int ret;
- return alg->decrypt(req);
+ ret = alg->decrypt(req);
+ crypto_stat_akcipher_decrypt(req, ret);
+ return ret;
}
/**
@@ -321,8 +383,11 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ int ret;
- return alg->sign(req);
+ ret = alg->sign(req);
+ crypto_stat_akcipher_sign(req, ret);
+ return ret;
}
/**
@@ -339,8 +404,11 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ int ret;
- return alg->verify(req);
+ ret = alg->verify(req);
+ crypto_stat_akcipher_verify(req, ret);
+ return ret;
}
/**
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index bd5e8ccf1687..4a5ad10e75f0 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -20,8 +20,10 @@
/*
* Maximum values for blocksize and alignmask, used to allocate
* static buffers that are big enough for any combination of
- * ciphers and architectures.
+ * algs and architectures. Ciphers have a lower maximum size.
*/
+#define MAX_ALGAPI_BLOCKSIZE 160
+#define MAX_ALGAPI_ALIGNMASK 63
#define MAX_CIPHER_BLOCKSIZE 16
#define MAX_CIPHER_ALIGNMASK 15
@@ -425,4 +427,14 @@ static inline void crypto_yield(u32 flags)
#endif
}
+int crypto_register_notifier(struct notifier_block *nb);
+int crypto_unregister_notifier(struct notifier_block *nb);
+
+/* Crypto notification events. */
+enum {
+ CRYPTO_MSG_ALG_REQUEST,
+ CRYPTO_MSG_ALG_REGISTER,
+ CRYPTO_MSG_ALG_LOADED,
+};
+
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
index f5b8bfc22e6d..3bf28beefa33 100644
--- a/include/crypto/cbc.h
+++ b/include/crypto/cbc.h
@@ -113,7 +113,7 @@ static inline int crypto_cbc_decrypt_inplace(
unsigned int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
- u8 last_iv[bsize];
+ u8 last_iv[MAX_CIPHER_BLOCKSIZE];
/* Start of the last block. */
src += nbytes - (nbytes & (bsize - 1)) - bsize;
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
index b83d66073db0..f76302d99e2b 100644
--- a/include/crypto/chacha20.h
+++ b/include/crypto/chacha20.h
@@ -13,13 +13,12 @@
#define CHACHA20_IV_SIZE 16
#define CHACHA20_KEY_SIZE 32
#define CHACHA20_BLOCK_SIZE 64
-#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32))
struct chacha20_ctx {
u32 key[8];
};
-void chacha20_block(u32 *state, u32 *stream);
+void chacha20_block(u32 *state, u8 *stream);
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 76e432cab75d..bc7796600338 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -151,9 +151,13 @@ struct shash_desc {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
+#define HASH_MAX_DIGESTSIZE 64
+#define HASH_MAX_DESCSIZE 360
+#define HASH_MAX_STATESIZE 512
+
#define SHASH_DESC_ON_STACK(shash, ctx) \
char __##shash##_desc[sizeof(struct shash_desc) + \
- crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \
+ HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
/**
@@ -408,6 +412,32 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
+static inline void crypto_stat_ahash_update(struct ahash_request *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic_inc(&tfm->base.__crt_alg->hash_err_cnt);
+ else
+ atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
+#endif
+}
+
+static inline void crypto_stat_ahash_final(struct ahash_request *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->hash_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->hash_cnt);
+ atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
+ }
+#endif
+}
+
/**
* crypto_ahash_finup() - update and finalize message digest
* @req: reference to the ahash_request handle that holds all information
@@ -522,7 +552,11 @@ static inline int crypto_ahash_init(struct ahash_request *req)
*/
static inline int crypto_ahash_update(struct ahash_request *req)
{
- return crypto_ahash_reqtfm(req)->update(req);
+ int ret;
+
+ ret = crypto_ahash_reqtfm(req)->update(req);
+ crypto_stat_ahash_update(req, ret);
+ return ret;
}
/**
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
new file mode 100644
index 000000000000..8db299c25566
--- /dev/null
+++ b/include/crypto/internal/cryptouser.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <net/netlink.h>
+
+struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
+
+int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb);
+int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
+int crypto_dump_reportstat_done(struct netlink_callback *cb);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 2bcfb931bc5b..71be24cd59bd 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -20,7 +20,7 @@
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
- struct crypto_skcipher *sknull;
+ struct crypto_sync_skcipher *sknull;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 1bde0a6514fa..f517ba6d3a27 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -268,6 +268,42 @@ struct kpp_secret {
unsigned short len;
};
+static inline void crypto_stat_kpp_set_secret(struct crypto_kpp *tfm, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret)
+ atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->setsecret_cnt);
+#endif
+}
+
+static inline void crypto_stat_kpp_generate_public_key(struct kpp_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+
+ if (ret)
+ atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->generate_public_key_cnt);
+#endif
+}
+
+static inline void crypto_stat_kpp_compute_shared_secret(struct kpp_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+
+ if (ret)
+ atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt);
+#endif
+}
+
/**
* crypto_kpp_set_secret() - Invoke kpp operation
*
@@ -287,8 +323,11 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ int ret;
- return alg->set_secret(tfm, buffer, len);
+ ret = alg->set_secret(tfm, buffer, len);
+ crypto_stat_kpp_set_secret(tfm, ret);
+ return ret;
}
/**
@@ -308,8 +347,11 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ int ret;
- return alg->generate_public_key(req);
+ ret = alg->generate_public_key(req);
+ crypto_stat_kpp_generate_public_key(req, ret);
+ return ret;
}
/**
@@ -326,8 +368,11 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ int ret;
- return alg->compute_shared_secret(req);
+ ret = alg->compute_shared_secret(req);
+ crypto_stat_kpp_compute_shared_secret(req, ret);
+ return ret;
}
/**
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
deleted file mode 100644
index b67404fc4b34..000000000000
--- a/include/crypto/mcryptd.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Software async multibuffer crypto daemon headers
- *
- * Author:
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * Copyright (c) 2014, Intel Corporation.
- */
-
-#ifndef _CRYPTO_MCRYPT_H
-#define _CRYPTO_MCRYPT_H
-
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <crypto/hash.h>
-
-struct mcryptd_ahash {
- struct crypto_ahash base;
-};
-
-static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
- struct crypto_ahash *tfm)
-{
- return (struct mcryptd_ahash *)tfm;
-}
-
-struct mcryptd_cpu_queue {
- struct crypto_queue queue;
- spinlock_t q_lock;
- struct work_struct work;
-};
-
-struct mcryptd_queue {
- struct mcryptd_cpu_queue __percpu *cpu_queue;
-};
-
-struct mcryptd_instance_ctx {
- struct crypto_spawn spawn;
- struct mcryptd_queue *queue;
-};
-
-struct mcryptd_hash_ctx {
- struct crypto_ahash *child;
- struct mcryptd_alg_state *alg_state;
-};
-
-struct mcryptd_tag {
- /* seq number of request */
- unsigned seq_num;
- /* arrival time of request */
- unsigned long arrival;
- unsigned long expire;
- int cpu;
-};
-
-struct mcryptd_hash_request_ctx {
- struct list_head waiter;
- crypto_completion_t complete;
- struct mcryptd_tag tag;
- struct crypto_hash_walk walk;
- u8 *out;
- int flag;
- struct ahash_request areq;
-};
-
-struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask);
-struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
-struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req);
-void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
-void mcryptd_flusher(struct work_struct *work);
-
-enum mcryptd_req_type {
- MCRYPTD_NONE,
- MCRYPTD_UPDATE,
- MCRYPTD_FINUP,
- MCRYPTD_DIGEST,
- MCRYPTD_FINAL
-};
-
-struct mcryptd_alg_cstate {
- unsigned long next_flush;
- unsigned next_seq_num;
- bool flusher_engaged;
- struct delayed_work flush;
- int cpu;
- struct mcryptd_alg_state *alg_state;
- void *mgr;
- spinlock_t work_lock;
- struct list_head work_list;
- struct list_head flush_list;
-};
-
-struct mcryptd_alg_state {
- struct mcryptd_alg_cstate __percpu *alg_cstate;
- unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate);
-};
-
-/* return delay in jiffies from current time */
-static inline unsigned long get_delay(unsigned long t)
-{
- long delay;
-
- delay = (long) t - (long) jiffies;
- if (delay <= 0)
- return 0;
- else
- return (unsigned long) delay;
-}
-
-void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay);
-
-#endif
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
index b26dd70efd9a..ba782e10065e 100644
--- a/include/crypto/morus1280_glue.h
+++ b/include/crypto/morus1280_glue.h
@@ -82,7 +82,7 @@ void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
{ \
} \
\
- struct aead_alg crypto_morus1280_##id##_algs[] = {\
+ static struct aead_alg crypto_morus1280_##id##_algs[] = {\
{ \
.setkey = crypto_morus1280_glue_setkey, \
.setauthsize = crypto_morus1280_glue_setauthsize, \
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
index 90c8db07e740..27fa790a2362 100644
--- a/include/crypto/morus640_glue.h
+++ b/include/crypto/morus640_glue.h
@@ -82,7 +82,7 @@ void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
{ \
} \
\
- struct aead_alg crypto_morus640_##id##_algs[] = {\
+ static struct aead_alg crypto_morus640_##id##_algs[] = {\
{ \
.setkey = crypto_morus640_glue_setkey, \
.setauthsize = crypto_morus640_glue_setauthsize, \
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 15aeef6e30ef..0ef577cc00e3 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -9,7 +9,7 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
-struct crypto_skcipher *crypto_get_default_null_skcipher(void);
+struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void);
void crypto_put_default_null_skcipher(void);
#endif
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index b95ede354a66..6d258f5b68f1 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -122,6 +122,29 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
}
+static inline void crypto_stat_rng_seed(struct crypto_rng *tfm, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic_inc(&tfm->base.__crt_alg->rng_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->seed_cnt);
+#endif
+}
+
+static inline void crypto_stat_rng_generate(struct crypto_rng *tfm,
+ unsigned int dlen, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->rng_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->generate_cnt);
+ atomic64_add(dlen, &tfm->base.__crt_alg->generate_tlen);
+ }
+#endif
+}
+
/**
* crypto_rng_generate() - get random number
* @tfm: cipher handle
@@ -140,7 +163,11 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
- return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
+ int ret;
+
+ ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
+ crypto_stat_rng_generate(tfm, dlen, ret);
+ return ret;
}
/**
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 2f327f090c3e..925f547cdcfa 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -65,6 +65,10 @@ struct crypto_skcipher {
struct crypto_tfm base;
};
+struct crypto_sync_skcipher {
+ struct crypto_skcipher base;
+};
+
/**
* struct skcipher_alg - symmetric key cipher definition
* @min_keysize: Minimum key size supported by the transformation. This is the
@@ -139,9 +143,17 @@ struct skcipher_alg {
struct crypto_alg base;
};
-#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
+#define MAX_SYNC_SKCIPHER_REQSIZE 384
+/*
+ * This performs a type-check against the "tfm" argument to make sure
+ * all users have the correct skcipher tfm for doing on-stack requests.
+ */
+#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \
char __##name##_desc[sizeof(struct skcipher_request) + \
- crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
+ MAX_SYNC_SKCIPHER_REQSIZE + \
+ (!(sizeof((struct crypto_sync_skcipher *)1 == \
+ (typeof(tfm))1))) \
+ ] CRYPTO_MINALIGN_ATTR; \
struct skcipher_request *name = (void *)__##name##_desc
/**
@@ -197,6 +209,9 @@ static inline struct crypto_skcipher *__crypto_skcipher_cast(
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
u32 type, u32 mask);
+struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name,
+ u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_skcipher_tfm(
struct crypto_skcipher *tfm)
{
@@ -212,6 +227,11 @@ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
}
+static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm)
+{
+ crypto_free_skcipher(&tfm->base);
+}
+
/**
* crypto_has_skcipher() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -280,6 +300,12 @@ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
return tfm->ivsize;
}
+static inline unsigned int crypto_sync_skcipher_ivsize(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_ivsize(&tfm->base);
+}
+
static inline unsigned int crypto_skcipher_alg_chunksize(
struct skcipher_alg *alg)
{
@@ -356,6 +382,12 @@ static inline unsigned int crypto_skcipher_blocksize(
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
}
+static inline unsigned int crypto_sync_skcipher_blocksize(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_blocksize(&tfm->base);
+}
+
static inline unsigned int crypto_skcipher_alignmask(
struct crypto_skcipher *tfm)
{
@@ -379,6 +411,24 @@ static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
}
+static inline u32 crypto_sync_skcipher_get_flags(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_get_flags(&tfm->base);
+}
+
+static inline void crypto_sync_skcipher_set_flags(
+ struct crypto_sync_skcipher *tfm, u32 flags)
+{
+ crypto_skcipher_set_flags(&tfm->base, flags);
+}
+
+static inline void crypto_sync_skcipher_clear_flags(
+ struct crypto_sync_skcipher *tfm, u32 flags)
+{
+ crypto_skcipher_clear_flags(&tfm->base, flags);
+}
+
/**
* crypto_skcipher_setkey() - set key for cipher
* @tfm: cipher handle
@@ -401,6 +451,12 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
return tfm->setkey(tfm, key, keylen);
}
+static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_skcipher_setkey(&tfm->base, key, keylen);
+}
+
static inline unsigned int crypto_skcipher_default_keysize(
struct crypto_skcipher *tfm)
{
@@ -422,6 +478,40 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
return __crypto_skcipher_cast(req->base.tfm);
}
+static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm(
+ struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+ return container_of(tfm, struct crypto_sync_skcipher, base);
+}
+
+static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req,
+ int ret, struct crypto_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&alg->cipher_err_cnt);
+ } else {
+ atomic_inc(&alg->encrypt_cnt);
+ atomic64_add(req->cryptlen, &alg->encrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req,
+ int ret, struct crypto_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&alg->cipher_err_cnt);
+ } else {
+ atomic_inc(&alg->decrypt_cnt);
+ atomic64_add(req->cryptlen, &alg->decrypt_tlen);
+ }
+#endif
+}
+
/**
* crypto_skcipher_encrypt() - encrypt plaintext
* @req: reference to the skcipher_request handle that holds all information
@@ -436,11 +526,14 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ int ret;
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->encrypt(req);
+ ret = -ENOKEY;
+ else
+ ret = tfm->encrypt(req);
+ crypto_stat_skcipher_encrypt(req, ret, tfm->base.__crt_alg);
+ return ret;
}
/**
@@ -457,11 +550,14 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ int ret;
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->decrypt(req);
+ ret = -ENOKEY;
+ else
+ ret = tfm->decrypt(req);
+ crypto_stat_skcipher_decrypt(req, ret, tfm->base.__crt_alg);
+ return ret;
}
/**
@@ -500,6 +596,12 @@ static inline void skcipher_request_set_tfm(struct skcipher_request *req,
req->base.tfm = crypto_skcipher_tfm(tfm);
}
+static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req,
+ struct crypto_sync_skcipher *tfm)
+{
+ skcipher_request_set_tfm(req, &tfm->base);
+}
+
static inline struct skcipher_request *skcipher_request_cast(
struct crypto_async_request *req)
{
diff --git a/include/crypto/speck.h b/include/crypto/speck.h
deleted file mode 100644
index 73cfc952d405..000000000000
--- a/include/crypto/speck.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Common values for the Speck algorithm
- */
-
-#ifndef _CRYPTO_SPECK_H
-#define _CRYPTO_SPECK_H
-
-#include <linux/types.h>
-
-/* Speck128 */
-
-#define SPECK128_BLOCK_SIZE 16
-
-#define SPECK128_128_KEY_SIZE 16
-#define SPECK128_128_NROUNDS 32
-
-#define SPECK128_192_KEY_SIZE 24
-#define SPECK128_192_NROUNDS 33
-
-#define SPECK128_256_KEY_SIZE 32
-#define SPECK128_256_NROUNDS 34
-
-struct speck128_tfm_ctx {
- u64 round_keys[SPECK128_256_NROUNDS];
- int nrounds;
-};
-
-void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
- unsigned int keysize);
-
-/* Speck64 */
-
-#define SPECK64_BLOCK_SIZE 8
-
-#define SPECK64_96_KEY_SIZE 12
-#define SPECK64_96_NROUNDS 26
-
-#define SPECK64_128_KEY_SIZE 16
-#define SPECK64_128_NROUNDS 27
-
-struct speck64_tfm_ctx {
- u32 round_keys[SPECK64_128_NROUNDS];
- int nrounds;
-};
-
-void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
- unsigned int keysize);
-
-#endif /* _CRYPTO_SPECK_H */
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index da9d95a19580..1e713154f00e 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -153,6 +153,17 @@ struct __drm_planes_state {
struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state, *old_state, *new_state;
+
+ /**
+ * @commit:
+ *
+ * A reference to the CRTC commit object that is kept for use by
+ * drm_atomic_helper_wait_for_flip_done() after
+ * drm_atomic_helper_commit_hw_done() is called. This ensures that a
+ * concurrent commit won't free a commit object that is still in use.
+ */
+ struct drm_crtc_commit *commit;
+
s32 __user *out_fence_ptr;
u64 last_vblank_count;
};
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 989f8e52864d..971bb7853776 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -87,9 +87,10 @@ struct drm_client_dev {
struct drm_file *file;
};
-int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
- const char *name, const struct drm_client_funcs *funcs);
+int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
+ const char *name, const struct drm_client_funcs *funcs);
void drm_client_release(struct drm_client_dev *client);
+void drm_client_add(struct drm_client_dev *client);
void drm_client_dev_unregister(struct drm_device *dev);
void drm_client_dev_hotplug(struct drm_device *dev);
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 46a8009784df..152b3055e9e1 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -675,7 +675,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
{
return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
- dev->mode_config.funcs->atomic_commit != NULL;
+ (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL);
}
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b25d12ef120a..e3c404833115 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -214,9 +214,9 @@ struct detailed_timing {
#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
/* YCBCR 420 deep color modes */
-#define DRM_EDID_YCBCR420_DC_48 (1 << 6)
-#define DRM_EDID_YCBCR420_DC_36 (1 << 5)
-#define DRM_EDID_YCBCR420_DC_30 (1 << 4)
+#define DRM_EDID_YCBCR420_DC_48 (1 << 2)
+#define DRM_EDID_YCBCR420_DC_36 (1 << 1)
+#define DRM_EDID_YCBCR420_DC_30 (1 << 0)
#define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \
DRM_EDID_YCBCR420_DC_36 | \
DRM_EDID_YCBCR420_DC_30)
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 582a0ec0aa70..777814755fa6 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -89,7 +89,6 @@ struct drm_panel {
struct drm_device *drm;
struct drm_connector *connector;
struct device *dev;
- struct device_link *link;
const struct drm_panel_funcs *funcs;
diff --git a/include/dt-bindings/gpio/meson-g12a-gpio.h b/include/dt-bindings/gpio/meson-g12a-gpio.h
new file mode 100644
index 000000000000..f7bd69350d18
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-g12a-gpio.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2018 Amlogic, Inc. All rights reserved.
+ * Author: Xingyu Chen <xingyu.chen@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_G12A_GPIO_H
+#define _DT_BINDINGS_MESON_G12A_GPIO_H
+
+/* First GPIO chip */
+#define GPIOAO_0 0
+#define GPIOAO_1 1
+#define GPIOAO_2 2
+#define GPIOAO_3 3
+#define GPIOAO_4 4
+#define GPIOAO_5 5
+#define GPIOAO_6 6
+#define GPIOAO_7 7
+#define GPIOAO_8 8
+#define GPIOAO_9 9
+#define GPIOAO_10 10
+#define GPIOAO_11 11
+#define GPIOE_0 12
+#define GPIOE_1 13
+#define GPIOE_2 14
+
+/* Second GPIO chip */
+#define GPIOZ_0 0
+#define GPIOZ_1 1
+#define GPIOZ_2 2
+#define GPIOZ_3 3
+#define GPIOZ_4 4
+#define GPIOZ_5 5
+#define GPIOZ_6 6
+#define GPIOZ_7 7
+#define GPIOZ_8 8
+#define GPIOZ_9 9
+#define GPIOZ_10 10
+#define GPIOZ_11 11
+#define GPIOZ_12 12
+#define GPIOZ_13 13
+#define GPIOZ_14 14
+#define GPIOZ_15 15
+#define GPIOH_0 16
+#define GPIOH_1 17
+#define GPIOH_2 18
+#define GPIOH_3 19
+#define GPIOH_4 20
+#define GPIOH_5 21
+#define GPIOH_6 22
+#define GPIOH_7 23
+#define GPIOH_8 24
+#define BOOT_0 25
+#define BOOT_1 26
+#define BOOT_2 27
+#define BOOT_3 28
+#define BOOT_4 29
+#define BOOT_5 30
+#define BOOT_6 31
+#define BOOT_7 32
+#define BOOT_8 33
+#define BOOT_9 34
+#define BOOT_10 35
+#define BOOT_11 36
+#define BOOT_12 37
+#define BOOT_13 38
+#define BOOT_14 39
+#define BOOT_15 40
+#define GPIOC_0 41
+#define GPIOC_1 42
+#define GPIOC_2 43
+#define GPIOC_3 44
+#define GPIOC_4 45
+#define GPIOC_5 46
+#define GPIOC_6 47
+#define GPIOC_7 48
+#define GPIOA_0 49
+#define GPIOA_1 50
+#define GPIOA_2 51
+#define GPIOA_3 52
+#define GPIOA_4 53
+#define GPIOA_5 54
+#define GPIOA_6 55
+#define GPIOA_7 56
+#define GPIOA_8 57
+#define GPIOA_9 58
+#define GPIOA_10 59
+#define GPIOA_11 60
+#define GPIOA_12 61
+#define GPIOA_13 62
+#define GPIOA_14 63
+#define GPIOA_15 64
+#define GPIOX_0 65
+#define GPIOX_1 66
+#define GPIOX_2 67
+#define GPIOX_3 68
+#define GPIOX_4 69
+#define GPIOX_5 70
+#define GPIOX_6 71
+#define GPIOX_7 72
+#define GPIOX_8 73
+#define GPIOX_9 74
+#define GPIOX_10 75
+#define GPIOX_11 76
+#define GPIOX_12 77
+#define GPIOX_13 78
+#define GPIOX_14 79
+#define GPIOX_15 80
+#define GPIOX_16 81
+#define GPIOX_17 82
+#define GPIOX_18 83
+#define GPIOX_19 84
+
+#endif /* _DT_BINDINGS_MESON_G12A_GPIO_H */
diff --git a/include/dt-bindings/mfd/at91-usart.h b/include/dt-bindings/mfd/at91-usart.h
new file mode 100644
index 000000000000..2de5bc312e1e
--- /dev/null
+++ b/include/dt-bindings/mfd/at91-usart.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides macros for AT91 USART DT bindings.
+ *
+ * Copyright (C) 2018 Microchip Technology
+ *
+ * Author: Radu Pirea <radu.pirea@microchip.com>
+ *
+ */
+
+#ifndef __DT_BINDINGS_AT91_USART_H__
+#define __DT_BINDINGS_AT91_USART_H__
+
+#define AT91_USART_MODE_SERIAL 0
+#define AT91_USART_MODE_SPI 1
+
+#endif /* __DT_BINDINGS_AT91_USART_H__ */
diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h
index 697161f80eb5..9eb2ec2b2ea9 100644
--- a/include/dt-bindings/net/mscc-phy-vsc8531.h
+++ b/include/dt-bindings/net/mscc-phy-vsc8531.h
@@ -18,9 +18,11 @@
#define VSC8531_LINK_100_1000_ACTIVITY 4
#define VSC8531_LINK_10_1000_ACTIVITY 5
#define VSC8531_LINK_10_100_ACTIVITY 6
+#define VSC8584_LINK_100FX_1000X_ACTIVITY 7
#define VSC8531_DUPLEX_COLLISION 8
#define VSC8531_COLLISION 9
#define VSC8531_ACTIVITY 10
+#define VSC8584_100FX_1000X_ACTIVITY 11
#define VSC8531_AUTONEG_FAULT 12
#define VSC8531_SERIAL_MODE 13
#define VSC8531_FORCE_LED_OFF 14
diff --git a/include/dt-bindings/phy/phy-ocelot-serdes.h b/include/dt-bindings/phy/phy-ocelot-serdes.h
new file mode 100644
index 000000000000..fe70adaca68f
--- /dev/null
+++ b/include/dt-bindings/phy/phy-ocelot-serdes.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Copyright (c) 2018 Microsemi Corporation */
+#ifndef __PHY_OCELOT_SERDES_H__
+#define __PHY_OCELOT_SERDES_H__
+
+#define SERDES1G(x) (x)
+#define SERDES1G_MAX SERDES1G(5)
+#define SERDES6G(x) (SERDES1G_MAX + 1 + (x))
+#define SERDES6G_MAX SERDES6G(2)
+#define SERDES_MAX (SERDES6G_MAX + 1)
+
+#endif
diff --git a/include/dt-bindings/pinctrl/rzn1-pinctrl.h b/include/dt-bindings/pinctrl/rzn1-pinctrl.h
new file mode 100644
index 000000000000..21d6cc4d59f5
--- /dev/null
+++ b/include/dt-bindings/pinctrl/rzn1-pinctrl.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Defines macros and constants for Renesas RZ/N1 pin controller pin
+ * muxing functions.
+ */
+#ifndef __DT_BINDINGS_RZN1_PINCTRL_H
+#define __DT_BINDINGS_RZN1_PINCTRL_H
+
+#define RZN1_PINMUX(_gpio, _func) \
+ (((_func) << 8) | (_gpio))
+
+/*
+ * Given the different levels of muxing on the SoC, it was decided to
+ * 'linearize' them into one numerical space. So mux level 1, 2 and the MDIO
+ * muxes are all represented by one single value.
+ *
+ * You can derive the hardware value pretty easily too, as
+ * 0...9 are Level 1
+ * 10...71 are Level 2. The Level 2 mux will be set to this
+ * value - RZN1_FUNC_L2_OFFSET, and the Level 1 mux will be
+ * set accordingly.
+ * 72...103 are for the 2 MDIO muxes.
+ */
+#define RZN1_FUNC_HIGHZ 0
+#define RZN1_FUNC_0L 1
+#define RZN1_FUNC_CLK_ETH_MII_RGMII_RMII 2
+#define RZN1_FUNC_CLK_ETH_NAND 3
+#define RZN1_FUNC_QSPI 4
+#define RZN1_FUNC_SDIO 5
+#define RZN1_FUNC_LCD 6
+#define RZN1_FUNC_LCD_E 7
+#define RZN1_FUNC_MSEBIM 8
+#define RZN1_FUNC_MSEBIS 9
+#define RZN1_FUNC_L2_OFFSET 10 /* I'm Special */
+
+#define RZN1_FUNC_HIGHZ1 (RZN1_FUNC_L2_OFFSET + 0)
+#define RZN1_FUNC_ETHERCAT (RZN1_FUNC_L2_OFFSET + 1)
+#define RZN1_FUNC_SERCOS3 (RZN1_FUNC_L2_OFFSET + 2)
+#define RZN1_FUNC_SDIO_E (RZN1_FUNC_L2_OFFSET + 3)
+#define RZN1_FUNC_ETH_MDIO (RZN1_FUNC_L2_OFFSET + 4)
+#define RZN1_FUNC_ETH_MDIO_E1 (RZN1_FUNC_L2_OFFSET + 5)
+#define RZN1_FUNC_USB (RZN1_FUNC_L2_OFFSET + 6)
+#define RZN1_FUNC_MSEBIM_E (RZN1_FUNC_L2_OFFSET + 7)
+#define RZN1_FUNC_MSEBIS_E (RZN1_FUNC_L2_OFFSET + 8)
+#define RZN1_FUNC_RSV (RZN1_FUNC_L2_OFFSET + 9)
+#define RZN1_FUNC_RSV_E (RZN1_FUNC_L2_OFFSET + 10)
+#define RZN1_FUNC_RSV_E1 (RZN1_FUNC_L2_OFFSET + 11)
+#define RZN1_FUNC_UART0_I (RZN1_FUNC_L2_OFFSET + 12)
+#define RZN1_FUNC_UART0_I_E (RZN1_FUNC_L2_OFFSET + 13)
+#define RZN1_FUNC_UART1_I (RZN1_FUNC_L2_OFFSET + 14)
+#define RZN1_FUNC_UART1_I_E (RZN1_FUNC_L2_OFFSET + 15)
+#define RZN1_FUNC_UART2_I (RZN1_FUNC_L2_OFFSET + 16)
+#define RZN1_FUNC_UART2_I_E (RZN1_FUNC_L2_OFFSET + 17)
+#define RZN1_FUNC_UART0 (RZN1_FUNC_L2_OFFSET + 18)
+#define RZN1_FUNC_UART0_E (RZN1_FUNC_L2_OFFSET + 19)
+#define RZN1_FUNC_UART1 (RZN1_FUNC_L2_OFFSET + 20)
+#define RZN1_FUNC_UART1_E (RZN1_FUNC_L2_OFFSET + 21)
+#define RZN1_FUNC_UART2 (RZN1_FUNC_L2_OFFSET + 22)
+#define RZN1_FUNC_UART2_E (RZN1_FUNC_L2_OFFSET + 23)
+#define RZN1_FUNC_UART3 (RZN1_FUNC_L2_OFFSET + 24)
+#define RZN1_FUNC_UART3_E (RZN1_FUNC_L2_OFFSET + 25)
+#define RZN1_FUNC_UART4 (RZN1_FUNC_L2_OFFSET + 26)
+#define RZN1_FUNC_UART4_E (RZN1_FUNC_L2_OFFSET + 27)
+#define RZN1_FUNC_UART5 (RZN1_FUNC_L2_OFFSET + 28)
+#define RZN1_FUNC_UART5_E (RZN1_FUNC_L2_OFFSET + 29)
+#define RZN1_FUNC_UART6 (RZN1_FUNC_L2_OFFSET + 30)
+#define RZN1_FUNC_UART6_E (RZN1_FUNC_L2_OFFSET + 31)
+#define RZN1_FUNC_UART7 (RZN1_FUNC_L2_OFFSET + 32)
+#define RZN1_FUNC_UART7_E (RZN1_FUNC_L2_OFFSET + 33)
+#define RZN1_FUNC_SPI0_M (RZN1_FUNC_L2_OFFSET + 34)
+#define RZN1_FUNC_SPI0_M_E (RZN1_FUNC_L2_OFFSET + 35)
+#define RZN1_FUNC_SPI1_M (RZN1_FUNC_L2_OFFSET + 36)
+#define RZN1_FUNC_SPI1_M_E (RZN1_FUNC_L2_OFFSET + 37)
+#define RZN1_FUNC_SPI2_M (RZN1_FUNC_L2_OFFSET + 38)
+#define RZN1_FUNC_SPI2_M_E (RZN1_FUNC_L2_OFFSET + 39)
+#define RZN1_FUNC_SPI3_M (RZN1_FUNC_L2_OFFSET + 40)
+#define RZN1_FUNC_SPI3_M_E (RZN1_FUNC_L2_OFFSET + 41)
+#define RZN1_FUNC_SPI4_S (RZN1_FUNC_L2_OFFSET + 42)
+#define RZN1_FUNC_SPI4_S_E (RZN1_FUNC_L2_OFFSET + 43)
+#define RZN1_FUNC_SPI5_S (RZN1_FUNC_L2_OFFSET + 44)
+#define RZN1_FUNC_SPI5_S_E (RZN1_FUNC_L2_OFFSET + 45)
+#define RZN1_FUNC_SGPIO0_M (RZN1_FUNC_L2_OFFSET + 46)
+#define RZN1_FUNC_SGPIO1_M (RZN1_FUNC_L2_OFFSET + 47)
+#define RZN1_FUNC_GPIO (RZN1_FUNC_L2_OFFSET + 48)
+#define RZN1_FUNC_CAN (RZN1_FUNC_L2_OFFSET + 49)
+#define RZN1_FUNC_I2C (RZN1_FUNC_L2_OFFSET + 50)
+#define RZN1_FUNC_SAFE (RZN1_FUNC_L2_OFFSET + 51)
+#define RZN1_FUNC_PTO_PWM (RZN1_FUNC_L2_OFFSET + 52)
+#define RZN1_FUNC_PTO_PWM1 (RZN1_FUNC_L2_OFFSET + 53)
+#define RZN1_FUNC_PTO_PWM2 (RZN1_FUNC_L2_OFFSET + 54)
+#define RZN1_FUNC_PTO_PWM3 (RZN1_FUNC_L2_OFFSET + 55)
+#define RZN1_FUNC_PTO_PWM4 (RZN1_FUNC_L2_OFFSET + 56)
+#define RZN1_FUNC_DELTA_SIGMA (RZN1_FUNC_L2_OFFSET + 57)
+#define RZN1_FUNC_SGPIO2_M (RZN1_FUNC_L2_OFFSET + 58)
+#define RZN1_FUNC_SGPIO3_M (RZN1_FUNC_L2_OFFSET + 59)
+#define RZN1_FUNC_SGPIO4_S (RZN1_FUNC_L2_OFFSET + 60)
+#define RZN1_FUNC_MAC_MTIP_SWITCH (RZN1_FUNC_L2_OFFSET + 61)
+
+#define RZN1_FUNC_MDIO_OFFSET (RZN1_FUNC_L2_OFFSET + 62)
+
+/* These are MDIO0 peripherals for the RZN1_FUNC_ETH_MDIO function */
+#define RZN1_FUNC_MDIO0_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 0)
+#define RZN1_FUNC_MDIO0_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 1)
+#define RZN1_FUNC_MDIO0_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 2)
+#define RZN1_FUNC_MDIO0_ECAT (RZN1_FUNC_MDIO_OFFSET + 3)
+#define RZN1_FUNC_MDIO0_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 4)
+#define RZN1_FUNC_MDIO0_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 5)
+#define RZN1_FUNC_MDIO0_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 6)
+#define RZN1_FUNC_MDIO0_SWITCH (RZN1_FUNC_MDIO_OFFSET + 7)
+/* These are MDIO0 peripherals for the RZN1_FUNC_ETH_MDIO_E1 function */
+#define RZN1_FUNC_MDIO0_E1_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 8)
+#define RZN1_FUNC_MDIO0_E1_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 9)
+#define RZN1_FUNC_MDIO0_E1_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 10)
+#define RZN1_FUNC_MDIO0_E1_ECAT (RZN1_FUNC_MDIO_OFFSET + 11)
+#define RZN1_FUNC_MDIO0_E1_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 12)
+#define RZN1_FUNC_MDIO0_E1_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 13)
+#define RZN1_FUNC_MDIO0_E1_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 14)
+#define RZN1_FUNC_MDIO0_E1_SWITCH (RZN1_FUNC_MDIO_OFFSET + 15)
+
+/* These are MDIO1 peripherals for the RZN1_FUNC_ETH_MDIO function */
+#define RZN1_FUNC_MDIO1_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 16)
+#define RZN1_FUNC_MDIO1_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 17)
+#define RZN1_FUNC_MDIO1_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 18)
+#define RZN1_FUNC_MDIO1_ECAT (RZN1_FUNC_MDIO_OFFSET + 19)
+#define RZN1_FUNC_MDIO1_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 20)
+#define RZN1_FUNC_MDIO1_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 21)
+#define RZN1_FUNC_MDIO1_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 22)
+#define RZN1_FUNC_MDIO1_SWITCH (RZN1_FUNC_MDIO_OFFSET + 23)
+/* These are MDIO1 peripherals for the RZN1_FUNC_ETH_MDIO_E1 function */
+#define RZN1_FUNC_MDIO1_E1_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 24)
+#define RZN1_FUNC_MDIO1_E1_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 25)
+#define RZN1_FUNC_MDIO1_E1_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 26)
+#define RZN1_FUNC_MDIO1_E1_ECAT (RZN1_FUNC_MDIO_OFFSET + 27)
+#define RZN1_FUNC_MDIO1_E1_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 28)
+#define RZN1_FUNC_MDIO1_E1_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 29)
+#define RZN1_FUNC_MDIO1_E1_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 30)
+#define RZN1_FUNC_MDIO1_E1_SWITCH (RZN1_FUNC_MDIO_OFFSET + 31)
+
+#define RZN1_FUNC_MAX (RZN1_FUNC_MDIO_OFFSET + 32)
+
+#endif /* __DT_BINDINGS_RZN1_PINCTRL_H */
diff --git a/include/dt-bindings/reset/imx7-reset.h b/include/dt-bindings/reset/imx7-reset.h
index 63948170c7b2..31b3f87dde9a 100644
--- a/include/dt-bindings/reset/imx7-reset.h
+++ b/include/dt-bindings/reset/imx7-reset.h
@@ -56,7 +56,9 @@
#define IMX7_RESET_DDRC_PRST 23
#define IMX7_RESET_DDRC_CORE_RST 24
-#define IMX7_RESET_NUM 25
+#define IMX7_RESET_PCIE_CTRL_APPS_TURNOFF 25
+
+#define IMX7_RESET_NUM 26
#endif
diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h
index 7b7a92fefa0a..985f2bbd4d24 100644
--- a/include/dt-bindings/usb/pd.h
+++ b/include/dt-bindings/usb/pd.h
@@ -59,4 +59,30 @@
(PDO_TYPE(PDO_TYPE_VAR) | PDO_VAR_MIN_VOLT(min_mv) | \
PDO_VAR_MAX_VOLT(max_mv) | PDO_VAR_MAX_CURR(max_ma))
+#define APDO_TYPE_PPS 0
+
+#define PDO_APDO_TYPE_SHIFT 28 /* Only valid value currently is 0x0 - PPS */
+#define PDO_APDO_TYPE_MASK 0x3
+
+#define PDO_APDO_TYPE(t) ((t) << PDO_APDO_TYPE_SHIFT)
+
+#define PDO_PPS_APDO_MAX_VOLT_SHIFT 17 /* 100mV units */
+#define PDO_PPS_APDO_MIN_VOLT_SHIFT 8 /* 100mV units */
+#define PDO_PPS_APDO_MAX_CURR_SHIFT 0 /* 50mA units */
+
+#define PDO_PPS_APDO_VOLT_MASK 0xff
+#define PDO_PPS_APDO_CURR_MASK 0x7f
+
+#define PDO_PPS_APDO_MIN_VOLT(mv) \
+ ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MIN_VOLT_SHIFT)
+#define PDO_PPS_APDO_MAX_VOLT(mv) \
+ ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MAX_VOLT_SHIFT)
+#define PDO_PPS_APDO_MAX_CURR(ma) \
+ ((((ma) / 50) & PDO_PPS_APDO_CURR_MASK) << PDO_PPS_APDO_MAX_CURR_SHIFT)
+
+#define PDO_PPS_APDO(min_mv, max_mv, max_ma) \
+ (PDO_TYPE(PDO_TYPE_APDO) | PDO_APDO_TYPE(APDO_TYPE_PPS) | \
+ PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \
+ PDO_PPS_APDO_MAX_CURR(max_ma))
+
#endif /* __DT_POWER_DELIVERY_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index de8d3d3fa651..ed80f147bd50 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -831,8 +831,6 @@ static inline int acpi_dma_configure(struct device *dev,
return 0;
}
-static inline void acpi_dma_deconfigure(struct device *dev) { }
-
#define ACPI_PTR(_ptr) (NULL)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -1074,6 +1072,15 @@ static inline int acpi_node_get_property_reference(
NR_FWNODE_REFERENCE_ARGS, args);
}
+static inline bool acpi_dev_has_props(const struct acpi_device *adev)
+{
+ return !list_empty(&adev->data.properties);
+}
+
+struct acpi_device_properties *
+acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
+ const union acpi_object *properties);
+
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
int acpi_dev_prop_read_single(struct acpi_device *adev,
diff --git a/include/linux/adxl.h b/include/linux/adxl.h
new file mode 100644
index 000000000000..2a629acb4c3f
--- /dev/null
+++ b/include/linux/adxl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Address translation interface via ACPI DSM.
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef _LINUX_ADXL_H
+#define _LINUX_ADXL_H
+
+const char * const *adxl_get_component_names(void);
+int adxl_decode(u64 addr, u64 component_values[]);
+
+#endif /* _LINUX_ADXL_H */
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index da8357ba11bc..c92ebc39fc1f 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -18,20 +18,13 @@
* mask into a value to be binary (or set some other custom bits
* in MMCIPWR) or:ed and written into the MMCIPWR register of the
* block. May also control external power based on the power_mode.
- * @status: if no GPIO read function was given to the block in
- * gpio_wp (below) this function will be called to determine
- * whether a card is present in the MMC slot or not
- * @gpio_wp: read this GPIO pin to see if the card is write protected
- * @gpio_cd: read this GPIO pin to detect card insertion
- * @cd_invert: true if the gpio_cd pin value is active low
+ * @status: if no GPIO line was given to the block in this function will
+ * be called to determine whether a card is present in the MMC slot or not
*/
struct mmci_platform_data {
unsigned int ocr_mask;
int (*ios_handler)(struct device *, struct mmc_ios *);
unsigned int (*status)(struct device *);
- int gpio_wp;
- int gpio_cd;
- bool cd_invert;
};
#endif
diff --git a/include/linux/amifd.h b/include/linux/amifd.h
deleted file mode 100644
index 202a77dbe46d..000000000000
--- a/include/linux/amifd.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _AMIFD_H
-#define _AMIFD_H
-
-/* Definitions for the Amiga floppy driver */
-
-#include <linux/fd.h>
-
-#define FD_MAX_UNITS 4 /* Max. Number of drives */
-#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
-
-#ifndef ASSEMBLER
-
-struct fd_data_type {
- char *name; /* description of data type */
- int sects; /* sectors per track */
-#ifdef __STDC__
- int (*read_fkt)(int);
- void (*write_fkt)(int);
-#else
- int (*read_fkt)(); /* read whole track */
- void (*write_fkt)(); /* write whole track */
-#endif
-};
-
-/*
-** Floppy type descriptions
-*/
-
-struct fd_drive_type {
- unsigned long code; /* code returned from drive */
- char *name; /* description of drive */
- unsigned int tracks; /* number of tracks */
- unsigned int heads; /* number of heads */
- unsigned int read_size; /* raw read size for one track */
- unsigned int write_size; /* raw write size for one track */
- unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
- unsigned int precomp1; /* start track for precomp 1 */
- unsigned int precomp2; /* start track for precomp 2 */
- unsigned int step_delay; /* time (in ms) for delay after step */
- unsigned int settle_time; /* time to settle after dir change */
- unsigned int side_time; /* time needed to change sides */
-};
-
-struct amiga_floppy_struct {
- struct fd_drive_type *type; /* type of floppy for this unit */
- struct fd_data_type *dtype; /* type of floppy for this unit */
- int track; /* current track (-1 == unknown) */
- unsigned char *trackbuf; /* current track (kmaloc()'d */
-
- int blocks; /* total # blocks on disk */
-
- int changed; /* true when not known */
- int disk; /* disk in drive (-1 == unknown) */
- int motor; /* true when motor is at speed */
- int busy; /* true when drive is active */
- int dirty; /* true when trackbuf is not on disk */
- int status; /* current error code for unit */
- struct gendisk *gendisk;
-};
-#endif
-
-#endif
diff --git a/include/linux/amifdreg.h b/include/linux/amifdreg.h
deleted file mode 100644
index 9b514d05ec70..000000000000
--- a/include/linux/amifdreg.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_AMIFDREG_H
-#define _LINUX_AMIFDREG_H
-
-/*
-** CIAAPRA bits (read only)
-*/
-
-#define DSKRDY (0x1<<5) /* disk ready when low */
-#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
-#define DSKPROT (0x1<<3) /* disk protected when low */
-#define DSKCHANGE (0x1<<2) /* low when disk removed */
-
-/*
-** CIAAPRB bits (read/write)
-*/
-
-#define DSKMOTOR (0x1<<7) /* motor on when low */
-#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
-#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
-#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
-#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
-#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
-#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
-#define DSKSTEP (0x1) /* pulse low to step head 1 track */
-
-/*
-** DSKBYTR bits (read only)
-*/
-
-#define DSKBYT (1<<15) /* register contains valid byte when set */
-#define DMAON (1<<14) /* disk DMA enabled */
-#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
-#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
-/* bits 7-0 are data */
-
-/*
-** ADKCON/ADKCONR bits
-*/
-
-#ifndef SETCLR
-#define ADK_SETCLR (1<<15) /* control bit */
-#endif
-#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
-#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
-#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
-#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
-#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
-#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
-
-/*
-** DSKLEN bits
-*/
-
-#define DSKLEN_DMAEN (1<<15)
-#define DSKLEN_WRITE (1<<14)
-
-/*
-** INTENA/INTREQ bits
-*/
-
-#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
-
-/*
-** Misc
-*/
-
-#define MFM_SYNC 0x4489 /* standard MFM sync value */
-
-/* Values for FD_COMMAND */
-#define FD_RECALIBRATE 0x07 /* move to track 0 */
-#define FD_SEEK 0x0F /* seek track */
-#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
-#define FD_WRITE 0xC5 /* write with MT, MFM */
-#define FD_SENSEI 0x08 /* Sense Interrupt Status */
-#define FD_SPECIFY 0x03 /* specify HUT etc */
-#define FD_FORMAT 0x4D /* format one track */
-#define FD_VERSION 0x10 /* get version code */
-#define FD_CONFIGURE 0x13 /* configure FIFO operation */
-#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
-
-#endif /* _LINUX_AMIFDREG_H */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 2b709416de05..d9bdc1a7f4e7 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -9,6 +9,7 @@
#include <linux/percpu.h>
void topology_normalize_cpu_scale(void);
+int topology_update_cpu_topology(void);
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 212b3822d180..2c9756bd9c4c 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -252,6 +252,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
+/* Define below the capability flags that are not offloads */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -573,7 +575,7 @@ struct virtchnl_filter {
enum virtchnl_flow_type flow_type;
enum virtchnl_action action;
u32 action_meta;
- __u8 field_flags;
+ u8 field_flags;
};
VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
@@ -596,10 +598,23 @@ enum virtchnl_event_codes {
struct virtchnl_pf_event {
enum virtchnl_event_codes event;
union {
+ /* If the PF driver does not support the new speed reporting
+ * capabilities then use link_event else use link_event_adv to
+ * get the speed and link information. The ability to understand
+ * new speeds is indicated by setting the capability flag
+ * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+ * in virtchnl_vf_resource struct and can be used to determine
+ * which link event struct to use below.
+ */
struct {
enum virtchnl_link_speed link_speed;
bool link_status;
} link_event;
+ struct {
+ /* link_speed provided in Mbps */
+ u32 link_speed;
+ u8 link_status;
+ } link_event_adv;
} event_data;
int severity;
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index c05f24fac4f6..e9f5fe69df31 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -78,7 +78,7 @@ struct linux_binprm {
/* Function parameter for binfmt->coredump */
struct coredump_params {
- const siginfo_t *siginfo;
+ const kernel_siginfo_t *siginfo;
struct pt_regs *regs;
struct file *file;
unsigned long limit;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 51371740d2a8..b47c7f716731 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -21,12 +21,8 @@
#include <linux/highmem.h>
#include <linux/mempool.h>
#include <linux/ioprio.h>
-#include <linux/bug.h>
#ifdef CONFIG_BLOCK
-
-#include <asm/io.h>
-
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
@@ -133,32 +129,6 @@ static inline bool bio_full(struct bio *bio)
}
/*
- * will die
- */
-#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
-
-/*
- * merge helpers etc
- */
-
-/* Default implementation of BIOVEC_PHYS_MERGEABLE */
-#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-
-/*
- * allow arch override, for eg virtualized architectures (put in asm/io.h)
- */
-#ifndef BIOVEC_PHYS_MERGEABLE
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
-#endif
-
-#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
- (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
-#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
- __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
-
-/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
@@ -170,27 +140,11 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
{
iter->bi_sector += bytes >> 9;
- if (bio_no_advance_iter(bio)) {
+ if (bio_no_advance_iter(bio))
iter->bi_size -= bytes;
- iter->bi_done += bytes;
- } else {
+ else
bvec_iter_advance(bio->bi_io_vec, iter, bytes);
/* TODO: It is reasonable to complete bio with error here. */
- }
-}
-
-static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter,
- unsigned int bytes)
-{
- iter->bi_sector -= bytes >> 9;
-
- if (bio_no_advance_iter(bio)) {
- iter->bi_size += bytes;
- iter->bi_done -= bytes;
- return true;
- }
-
- return bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
}
#define __bio_for_each_segment(bvl, bio, iter, start) \
@@ -353,6 +307,8 @@ struct bio_integrity_payload {
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
unsigned short bip_flags; /* control flags */
+ struct bvec_iter bio_iter; /* for rewinding parent bio */
+
struct work_struct bip_work; /* I/O completion */
struct bio_vec *bip_vec;
@@ -547,23 +503,31 @@ do { \
disk_devt((bio)->bi_disk)
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
+int bio_associate_blkg_from_page(struct bio *bio, struct page *page);
#else
-static inline int bio_associate_blkcg_from_page(struct bio *bio,
- struct page *page) { return 0; }
+static inline int bio_associate_blkg_from_page(struct bio *bio,
+ struct page *page) { return 0; }
#endif
#ifdef CONFIG_BLK_CGROUP
-int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
+int bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css);
+int bio_associate_create_blkg(struct request_queue *q, struct bio *bio);
+int bio_reassociate_blkg(struct request_queue *q, struct bio *bio);
void bio_disassociate_task(struct bio *bio);
-void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
+void bio_clone_blkg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
-static inline int bio_associate_blkcg(struct bio *bio,
- struct cgroup_subsys_state *blkcg_css) { return 0; }
+static inline int bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
+{ return 0; }
+static inline int bio_associate_create_blkg(struct request_queue *q,
+ struct bio *bio) { return 0; }
+static inline int bio_reassociate_blkg(struct request_queue *q, struct bio *bio)
+{ return 0; }
static inline void bio_disassociate_task(struct bio *bio) { }
-static inline void bio_clone_blkcg_association(struct bio *dst,
- struct bio *src) { }
+static inline void bio_clone_blkg_association(struct bio *dst,
+ struct bio *src) { }
#endif /* CONFIG_BLK_CGROUP */
#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 6d766a19f2bb..1e76ceebeb5d 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -126,7 +126,7 @@ struct blkcg_gq {
struct request_list rl;
/* reference count */
- atomic_t refcnt;
+ struct percpu_ref refcnt;
/* is this blkg online? protected by both blkcg and q locks */
bool online;
@@ -184,6 +184,8 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
@@ -230,22 +232,59 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx);
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+/**
+ * blkcg_css - find the current css
+ *
+ * Find the css associated with either the kthread or the current task.
+ * This may return a dying css, so it is up to the caller to use tryget logic
+ * to confirm it is alive and well.
+ */
+static inline struct cgroup_subsys_state *blkcg_css(void)
+{
+ struct cgroup_subsys_state *css;
+
+ css = kthread_blkcg();
+ if (css)
+ return css;
+ return task_css(current, io_cgrp_id);
+}
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct blkcg, css) : NULL;
}
-static inline struct blkcg *bio_blkcg(struct bio *bio)
+/**
+ * __bio_blkcg - internal version of bio_blkcg for bfq and cfq
+ *
+ * DO NOT USE.
+ * There is a flaw using this version of the function. In particular, this was
+ * used in a broken paradigm where association was called on the given css. It
+ * is possible though that the returned css from task_css() is in the process
+ * of dying due to migration of the current task. So it is improper to assume
+ * *_get() is going to succeed. Both BFQ and CFQ rely on this logic and will
+ * take additional work to handle more gracefully.
+ */
+static inline struct blkcg *__bio_blkcg(struct bio *bio)
{
- struct cgroup_subsys_state *css;
+ if (bio && bio->bi_blkg)
+ return bio->bi_blkg->blkcg;
+ return css_to_blkcg(blkcg_css());
+}
- if (bio && bio->bi_css)
- return css_to_blkcg(bio->bi_css);
- css = kthread_blkcg();
- if (css)
- return css_to_blkcg(css);
- return css_to_blkcg(task_css(current, io_cgrp_id));
+/**
+ * bio_blkcg - grab the blkcg associated with a bio
+ * @bio: target bio
+ *
+ * This returns the blkcg associated with a bio, NULL if not associated.
+ * Callers are expected to either handle NULL or know association has been
+ * done prior to calling this.
+ */
+static inline struct blkcg *bio_blkcg(struct bio *bio)
+{
+ if (bio && bio->bi_blkg)
+ return bio->bi_blkg->blkcg;
+ return NULL;
}
static inline bool blk_cgroup_congested(void)
@@ -451,26 +490,35 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
*/
static inline void blkg_get(struct blkcg_gq *blkg)
{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- atomic_inc(&blkg->refcnt);
+ percpu_ref_get(&blkg->refcnt);
}
/**
- * blkg_try_get - try and get a blkg reference
+ * blkg_tryget - try and get a blkg reference
* @blkg: blkg to get
*
* This is for use when doing an RCU lookup of the blkg. We may be in the midst
* of freeing this blkg, so we can only use it if the refcnt is not zero.
*/
-static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
+static inline bool blkg_tryget(struct blkcg_gq *blkg)
{
- if (atomic_inc_not_zero(&blkg->refcnt))
- return blkg;
- return NULL;
+ return percpu_ref_tryget(&blkg->refcnt);
}
+/**
+ * blkg_tryget_closest - try and get a blkg ref on the closet blkg
+ * @blkg: blkg to get
+ *
+ * This walks up the blkg tree to find the closest non-dying blkg and returns
+ * the blkg that it did association with as it may not be the passed in blkg.
+ */
+static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
+{
+ while (!percpu_ref_tryget(&blkg->refcnt))
+ blkg = blkg->parent;
-void __blkg_release_rcu(struct rcu_head *rcu);
+ return blkg;
+}
/**
* blkg_put - put a blkg reference
@@ -478,9 +526,7 @@ void __blkg_release_rcu(struct rcu_head *rcu);
*/
static inline void blkg_put(struct blkcg_gq *blkg)
{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- if (atomic_dec_and_test(&blkg->refcnt))
- call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+ percpu_ref_put(&blkg->refcnt);
}
/**
@@ -533,25 +579,36 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
rcu_read_lock();
- blkcg = bio_blkcg(bio);
+ if (bio && bio->bi_blkg) {
+ blkcg = bio->bi_blkg->blkcg;
+ if (blkcg == &blkcg_root)
+ goto rl_use_root;
+
+ blkg_get(bio->bi_blkg);
+ rcu_read_unlock();
+ return &bio->bi_blkg->rl;
+ }
- /* bypass blkg lookup and use @q->root_rl directly for root */
+ blkcg = css_to_blkcg(blkcg_css());
if (blkcg == &blkcg_root)
- goto root_rl;
+ goto rl_use_root;
- /*
- * Try to use blkg->rl. blkg lookup may fail under memory pressure
- * or if either the blkcg or queue is going away. Fall back to
- * root_rl in such cases.
- */
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg))
- goto root_rl;
+ blkg = __blkg_lookup_create(blkcg, q);
+
+ if (blkg->blkcg == &blkcg_root || !blkg_tryget(blkg))
+ goto rl_use_root;
- blkg_get(blkg);
rcu_read_unlock();
return &blkg->rl;
-root_rl:
+
+ /*
+ * Each blkg has its own request_list, however, the root blkcg
+ * uses the request_queue's root_rl. This is to avoid most
+ * overhead for the root blkcg.
+ */
+rl_use_root:
rcu_read_unlock();
return &q->root_rl;
}
@@ -797,32 +854,26 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
struct bio *bio) { return false; }
#endif
+
+static inline void blkcg_bio_issue_init(struct bio *bio)
+{
+ bio_issue_init(&bio->bi_issue, bio_sectors(bio));
+}
+
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio)
{
- struct blkcg *blkcg;
struct blkcg_gq *blkg;
bool throtl = false;
rcu_read_lock();
- blkcg = bio_blkcg(bio);
-
- /* associate blkcg if bio hasn't attached one */
- bio_associate_blkcg(bio, &blkcg->css);
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg)) {
- spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
- blkg = NULL;
- spin_unlock_irq(q->queue_lock);
- }
+ bio_associate_create_blkg(q, bio);
+ blkg = bio->bi_blkg;
throtl = blk_throtl_bio(q, blkg, bio);
if (!throtl) {
- blkg = blkg ?: q->root_blkg;
/*
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
* is a split bio and we would have already accounted for the
@@ -834,6 +885,8 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
}
+ blkcg_bio_issue_init(bio);
+
rcu_read_unlock();
return !throtl;
}
@@ -930,6 +983,7 @@ static inline int blkcg_activate_policy(struct request_queue *q,
static inline void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol) { }
+static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
@@ -945,6 +999,7 @@ static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
+static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio) { return true; }
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 1da59c16f637..2286dc12c6bc 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -203,6 +203,10 @@ enum {
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q);
+struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops,
+ unsigned int queue_depth,
+ unsigned int set_flags);
int blk_mq_register_dev(struct device *, struct request_queue *);
void blk_mq_unregister_dev(struct device *, struct request_queue *);
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
new file mode 100644
index 000000000000..b80c65aba249
--- /dev/null
+++ b/include/linux/blk-pm.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BLK_PM_H_
+#define _BLK_PM_H_
+
+struct device;
+struct request_queue;
+
+/*
+ * block layer runtime pm functions
+ */
+#ifdef CONFIG_PM
+extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
+extern int blk_pre_runtime_suspend(struct request_queue *q);
+extern void blk_post_runtime_suspend(struct request_queue *q, int err);
+extern void blk_pre_runtime_resume(struct request_queue *q);
+extern void blk_post_runtime_resume(struct request_queue *q, int err);
+extern void blk_set_runtime_active(struct request_queue *q);
+#else
+static inline void blk_pm_runtime_init(struct request_queue *q,
+ struct device *dev) {}
+#endif
+
+#endif /* _BLK_PM_H_ */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index f6dfb30737d8..9578c7ab1eb6 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -178,7 +178,6 @@ struct bio {
* release. Read comment on top of bio_associate_current().
*/
struct io_context *bi_ioc;
- struct cgroup_subsys_state *bi_css;
struct blkcg_gq *bi_blkg;
struct bio_issue bi_issue;
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6980014357d4..7d423721b327 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -108,7 +108,7 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_QUIET ((__force req_flags_t)(1 << 11))
/* elevator private data attached */
#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
-/* account I/O stat */
+/* account into disk and partition IO statistics */
#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
/* request came from our alloc pool */
#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
@@ -116,7 +116,7 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_PM ((__force req_flags_t)(1 << 15))
/* on IO scheduler merge hash */
#define RQF_HASHED ((__force req_flags_t)(1 << 16))
-/* IO stats tracking on */
+/* track IO completion time */
#define RQF_STATS ((__force req_flags_t)(1 << 17))
/* Look at ->special_vec for the actual data payload instead of the
bio chain. */
@@ -504,6 +504,12 @@ struct request_queue {
* various queue flags, see QUEUE_* below
*/
unsigned long queue_flags;
+ /*
+ * Number of contexts that have called blk_set_pm_only(). If this
+ * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
+ * processed.
+ */
+ atomic_t pm_only;
/*
* ida allocated id for this queue. Used to index queues from
@@ -679,7 +685,7 @@ struct request_queue {
#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */
+#define QUEUE_FLAG_IO_STAT 10 /* do disk/partitions IO accounting */
#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */
@@ -693,12 +699,12 @@ struct request_queue {
#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */
#define QUEUE_FLAG_DAX 23 /* device supports DAX */
-#define QUEUE_FLAG_STATS 24 /* track rq completion times */
+#define QUEUE_FLAG_STATS 24 /* track IO start and completion times */
#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
-#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
+#define QUEUE_FLAG_PCI_P2PDMA 29 /* device supports PCI p2p requests */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
@@ -731,17 +737,18 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_queue_scsi_passthrough(q) \
test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
+#define blk_queue_pci_p2pdma(q) \
+ test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
-#define blk_queue_preempt_only(q) \
- test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
+#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
-extern int blk_set_preempt_only(struct request_queue *q);
-extern void blk_clear_preempt_only(struct request_queue *q);
+extern void blk_set_pm_only(struct request_queue *q);
+extern void blk_clear_pm_only(struct request_queue *q);
static inline int queue_in_flight(struct request_queue *q)
{
@@ -1281,29 +1288,6 @@ extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
/*
- * block layer runtime pm functions
- */
-#ifdef CONFIG_PM
-extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
-extern int blk_pre_runtime_suspend(struct request_queue *q);
-extern void blk_post_runtime_suspend(struct request_queue *q, int err);
-extern void blk_pre_runtime_resume(struct request_queue *q);
-extern void blk_post_runtime_resume(struct request_queue *q, int err);
-extern void blk_set_runtime_active(struct request_queue *q);
-#else
-static inline void blk_pm_runtime_init(struct request_queue *q,
- struct device *dev) {}
-static inline int blk_pre_runtime_suspend(struct request_queue *q)
-{
- return -ENOSYS;
-}
-static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
-static inline void blk_pre_runtime_resume(struct request_queue *q) {}
-static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
-static inline void blk_set_runtime_active(struct request_queue *q) {}
-#endif
-
-/*
* blk_plug permits building a queue of related requests by holding the I/O
* fragments for a short period. This allows merging of sequential requests
* into single larger request. As the requests are moved from a per-task list to
@@ -1676,94 +1660,6 @@ static inline void put_dev_sector(Sector p)
put_page(p.v);
}
-static inline bool __bvec_gap_to_prev(struct request_queue *q,
- struct bio_vec *bprv, unsigned int offset)
-{
- return offset ||
- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
-}
-
-/*
- * Check if adding a bio_vec after bprv with offset would create a gap in
- * the SG list. Most drivers don't care about this, but some do.
- */
-static inline bool bvec_gap_to_prev(struct request_queue *q,
- struct bio_vec *bprv, unsigned int offset)
-{
- if (!queue_virt_boundary(q))
- return false;
- return __bvec_gap_to_prev(q, bprv, offset);
-}
-
-/*
- * Check if the two bvecs from two bios can be merged to one segment.
- * If yes, no need to check gap between the two bios since the 1st bio
- * and the 1st bvec in the 2nd bio can be handled in one segment.
- */
-static inline bool bios_segs_mergeable(struct request_queue *q,
- struct bio *prev, struct bio_vec *prev_last_bv,
- struct bio_vec *next_first_bv)
-{
- if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
- return false;
- if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
- return false;
- if (prev->bi_seg_back_size + next_first_bv->bv_len >
- queue_max_segment_size(q))
- return false;
- return true;
-}
-
-static inline bool bio_will_gap(struct request_queue *q,
- struct request *prev_rq,
- struct bio *prev,
- struct bio *next)
-{
- if (bio_has_data(prev) && queue_virt_boundary(q)) {
- struct bio_vec pb, nb;
-
- /*
- * don't merge if the 1st bio starts with non-zero
- * offset, otherwise it is quite difficult to respect
- * sg gap limit. We work hard to merge a huge number of small
- * single bios in case of mkfs.
- */
- if (prev_rq)
- bio_get_first_bvec(prev_rq->bio, &pb);
- else
- bio_get_first_bvec(prev, &pb);
- if (pb.bv_offset)
- return true;
-
- /*
- * We don't need to worry about the situation that the
- * merged segment ends in unaligned virt boundary:
- *
- * - if 'pb' ends aligned, the merged segment ends aligned
- * - if 'pb' ends unaligned, the next bio must include
- * one single bvec of 'nb', otherwise the 'nb' can't
- * merge with 'pb'
- */
- bio_get_last_bvec(prev, &pb);
- bio_get_first_bvec(next, &nb);
-
- if (!bios_segs_mergeable(q, prev, &pb, &nb))
- return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
- }
-
- return false;
-}
-
-static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
-{
- return bio_will_gap(req->q, req, req->biotail, bio);
-}
-
-static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
-{
- return bio_will_gap(req->q, NULL, bio, req->bio);
-}
-
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
@@ -1843,26 +1739,6 @@ queue_max_integrity_segments(struct request_queue *q)
return q->limits.max_integrity_segments;
}
-static inline bool integrity_req_gap_back_merge(struct request *req,
- struct bio *next)
-{
- struct bio_integrity_payload *bip = bio_integrity(req->bio);
- struct bio_integrity_payload *bip_next = bio_integrity(next);
-
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
- bip_next->bip_vec[0].bv_offset);
-}
-
-static inline bool integrity_req_gap_front_merge(struct request *req,
- struct bio *bio)
-{
- struct bio_integrity_payload *bip = bio_integrity(bio);
- struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
-
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
- bip_next->bip_vec[0].bv_offset);
-}
-
/**
* bio_integrity_intervals - Return number of integrity intervals for a bio
* @bi: blk_integrity profile for device
@@ -1947,17 +1823,6 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq,
return true;
}
-static inline bool integrity_req_gap_back_merge(struct request *req,
- struct bio *next)
-{
- return false;
-}
-static inline bool integrity_req_gap_front_merge(struct request *req,
- struct bio *bio)
-{
- return false;
-}
-
static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
unsigned int sectors)
{
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index f91b0f8ff3a9..588dd5f0bd85 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -2,6 +2,7 @@
#ifndef _BPF_CGROUP_H
#define _BPF_CGROUP_H
+#include <linux/bpf.h>
#include <linux/errno.h>
#include <linux/jump_label.h>
#include <linux/percpu.h>
@@ -22,7 +23,11 @@ struct bpf_cgroup_storage;
extern struct static_key_false cgroup_bpf_enabled_key;
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
-DECLARE_PER_CPU(void*, bpf_cgroup_storage);
+DECLARE_PER_CPU(struct bpf_cgroup_storage*,
+ bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
+
+#define for_each_cgroup_storage_type(stype) \
+ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
struct bpf_cgroup_storage_map;
@@ -32,7 +37,10 @@ struct bpf_storage_buffer {
};
struct bpf_cgroup_storage {
- struct bpf_storage_buffer *buf;
+ union {
+ struct bpf_storage_buffer *buf;
+ void __percpu *percpu_buf;
+ };
struct bpf_cgroup_storage_map *map;
struct bpf_cgroup_storage_key key;
struct list_head list;
@@ -43,7 +51,7 @@ struct bpf_cgroup_storage {
struct bpf_prog_list {
struct list_head node;
struct bpf_prog *prog;
- struct bpf_cgroup_storage *storage;
+ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
};
struct bpf_prog_array;
@@ -101,18 +109,26 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
short access, enum bpf_attach_type type);
-static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage)
+static inline enum bpf_cgroup_storage_type cgroup_storage_type(
+ struct bpf_map *map)
{
- struct bpf_storage_buffer *buf;
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
+ return BPF_CGROUP_STORAGE_PERCPU;
+
+ return BPF_CGROUP_STORAGE_SHARED;
+}
- if (!storage)
- return;
+static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
+ *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
+{
+ enum bpf_cgroup_storage_type stype;
- buf = READ_ONCE(storage->buf);
- this_cpu_write(bpf_cgroup_storage, &buf->data[0]);
+ for_each_cgroup_storage_type(stype)
+ this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
}
-struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog);
+struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
+ enum bpf_cgroup_storage_type stype);
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
struct cgroup *cgroup,
@@ -121,6 +137,10 @@ void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
+int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
+ void *value, u64 flags);
+
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
@@ -265,15 +285,24 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
return -EINVAL;
}
-static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {}
+static inline void bpf_cgroup_storage_set(
+ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
struct bpf_map *map) { return 0; }
static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
struct bpf_map *map) {}
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
- struct bpf_prog *prog) { return 0; }
+ struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; }
static inline void bpf_cgroup_storage_free(
struct bpf_cgroup_storage *storage) {}
+static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
+ void *value) {
+ return 0;
+}
+static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
+ void *key, void *value, u64 flags) {
+ return 0;
+}
#define cgroup_bpf_enabled (0)
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
@@ -293,6 +322,8 @@ static inline void bpf_cgroup_storage_free(
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
+#define for_each_cgroup_storage_type(stype) for (; false; )
+
#endif /* CONFIG_CGROUP_BPF */
#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 523481a3471b..33014ae73103 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -39,6 +39,9 @@ struct bpf_map_ops {
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_map *map, void *key);
+ int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
+ int (*map_pop_elem)(struct bpf_map *map, void *value);
+ int (*map_peek_elem)(struct bpf_map *map, void *value);
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
@@ -138,6 +141,7 @@ enum bpf_arg_type {
ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
+ ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
/* the following constraints used to prototype bpf_memcmp() and other
* functions that access data on eBPF program stack
@@ -154,6 +158,7 @@ enum bpf_arg_type {
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
+ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */
};
/* type of values returned from helper functions */
@@ -162,6 +167,7 @@ enum bpf_return_type {
RET_VOID, /* function doesn't return anything */
RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
+ RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
};
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
@@ -212,6 +218,9 @@ enum bpf_reg_type {
PTR_TO_PACKET_META, /* skb->data - meta_len */
PTR_TO_PACKET, /* reg points to skb->data */
PTR_TO_PACKET_END, /* skb->data + headlen */
+ PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
+ PTR_TO_SOCKET, /* reg points to struct bpf_sock */
+ PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
};
/* The information passed from prog-specific *_is_valid_access
@@ -258,6 +267,7 @@ struct bpf_verifier_ops {
struct bpf_prog_offload_ops {
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
+ int (*finalize)(struct bpf_verifier_env *env);
};
struct bpf_prog_offload {
@@ -271,6 +281,14 @@ struct bpf_prog_offload {
u32 jited_len;
};
+enum bpf_cgroup_storage_type {
+ BPF_CGROUP_STORAGE_SHARED,
+ BPF_CGROUP_STORAGE_PERCPU,
+ __BPF_CGROUP_STORAGE_MAX
+};
+
+#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+
struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
@@ -288,7 +306,7 @@ struct bpf_prog_aux {
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
- struct bpf_map *cgroup_storage;
+ struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
void *security;
@@ -334,6 +352,11 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
+typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
+ const struct bpf_insn *src,
+ struct bpf_insn *dst,
+ struct bpf_prog *prog,
+ u32 *target_size);
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
@@ -357,7 +380,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
*/
struct bpf_prog_array_item {
struct bpf_prog *prog;
- struct bpf_cgroup_storage *cgroup_storage;
+ struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
};
struct bpf_prog_array {
@@ -718,33 +741,18 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
}
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
-#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
-struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
-struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
-int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
-int sockmap_get_from_fd(const union bpf_attr *attr, int type,
- struct bpf_prog *prog);
+#if defined(CONFIG_BPF_STREAM_PARSER)
+int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
+int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
#else
-static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
-{
- return NULL;
-}
-
-static inline struct sock *__sock_hash_lookup_elem(struct bpf_map *map,
- void *key)
-{
- return NULL;
-}
-
-static inline int sock_map_prog(struct bpf_map *map,
- struct bpf_prog *prog,
- u32 type)
+static inline int sock_map_prog_update(struct bpf_map *map,
+ struct bpf_prog *prog, u32 which)
{
return -EOPNOTSUPP;
}
-static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
- struct bpf_prog *prog)
+static inline int sock_map_get_from_fd(const union bpf_attr *attr,
+ struct bpf_prog *prog)
{
return -EINVAL;
}
@@ -806,6 +814,9 @@ static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+extern const struct bpf_func_proto bpf_map_push_elem_proto;
+extern const struct bpf_func_proto bpf_map_pop_elem_proto;
+extern const struct bpf_func_proto bpf_map_peek_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
@@ -820,6 +831,10 @@ extern const struct bpf_func_proto bpf_get_stack_proto;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
+extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
+extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
+extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
extern const struct bpf_func_proto bpf_get_local_storage_proto;
@@ -827,4 +842,29 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto;
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+#if defined(CONFIG_NET)
+bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size);
+#else
+static inline bool bpf_sock_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+{
+ return 0;
+}
+#endif
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index cd26c090e7c0..44d9ab4809bd 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -16,6 +16,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local)
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg)
+BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector)
#endif
#ifdef CONFIG_BPF_EVENTS
BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe)
@@ -42,6 +43,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
#endif
#ifdef CONFIG_CGROUP_BPF
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, cgroup_storage_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
@@ -49,13 +51,13 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops)
#ifdef CONFIG_PERF_EVENTS
-BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
#ifdef CONFIG_NET
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
-#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET)
+#if defined(CONFIG_BPF_STREAM_PARSER)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
#endif
@@ -67,3 +69,5 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
#endif
#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 38b04f559ad3..9e8056ec20fa 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -41,6 +41,7 @@ enum bpf_reg_liveness {
};
struct bpf_reg_state {
+ /* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
union {
/* valid when type == PTR_TO_PACKET */
@@ -57,9 +58,10 @@ struct bpf_reg_state {
* offset, so they can share range knowledge.
* For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
* came from, when one is tested for != NULL.
+ * For PTR_TO_SOCKET this is used to share which pointers retain the
+ * same reference to the socket, to determine proper reference freeing.
*/
u32 id;
- /* Ordering of fields matters. See states_equal() */
/* For scalar types (SCALAR_VALUE), this represents our knowledge of
* the actual value.
* For pointer types, this represents the variable part of the offset
@@ -76,15 +78,15 @@ struct bpf_reg_state {
s64 smax_value; /* maximum possible (s64)value */
u64 umin_value; /* minimum possible (u64)value */
u64 umax_value; /* maximum possible (u64)value */
+ /* parentage chain for liveness checking */
+ struct bpf_reg_state *parent;
/* Inside the callee two registers can be both PTR_TO_STACK like
* R1=fp-8 and R2=fp-8, but one of them points to this function stack
* while another to the caller's stack. To differentiate them 'frameno'
* is used which is an index in bpf_verifier_state->frame[] array
* pointing to bpf_func_state.
- * This field must be second to last, for states_equal() reasons.
*/
u32 frameno;
- /* This field must be last, for states_equal() reasons. */
enum bpf_reg_liveness live;
};
@@ -102,12 +104,22 @@ struct bpf_stack_state {
u8 slot_type[BPF_REG_SIZE];
};
+struct bpf_reference_state {
+ /* Track each reference created with a unique id, even if the same
+ * instruction creates the reference multiple times (eg, via CALL).
+ */
+ int id;
+ /* Instruction where the allocation of this reference occurred. This
+ * is used purely to inform the user of a reference leak.
+ */
+ int insn_idx;
+};
+
/* state of the program:
* type of all registers and stack info
*/
struct bpf_func_state {
struct bpf_reg_state regs[MAX_BPF_REG];
- struct bpf_verifier_state *parent;
/* index of call instruction that called into this func */
int callsite;
/* stack frame number of this function state from pov of
@@ -120,7 +132,9 @@ struct bpf_func_state {
*/
u32 subprogno;
- /* should be second to last. See copy_func_state() */
+ /* The following fields should be last. See copy_func_state() */
+ int acquired_refs;
+ struct bpf_reference_state *refs;
int allocated_stack;
struct bpf_stack_state *stack;
};
@@ -129,10 +143,20 @@ struct bpf_func_state {
struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
- struct bpf_verifier_state *parent;
u32 curframe;
};
+#define bpf_get_spilled_reg(slot, frame) \
+ (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
+ (frame->stack[slot].slot_type[0] == STACK_SPILL)) \
+ ? &frame->stack[slot].spilled_ptr : NULL)
+
+/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
+#define bpf_for_each_spilled_reg(iter, frame, reg) \
+ for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \
+ iter < frame->allocated_stack / BPF_REG_SIZE; \
+ iter++, reg = bpf_get_spilled_reg(iter, frame))
+
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
@@ -206,15 +230,21 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...);
-static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
+static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *cur = env->cur_state;
- return cur->frame[cur->curframe]->regs;
+ return cur->frame[cur->curframe];
+}
+
+static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
+{
+ return cur_func(env)->regs;
}
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
+int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 96225a77c112..7b73ef7f902d 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -242,7 +242,7 @@ int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
/* Convert errno to return value from ->page_mkwrite() call */
-static inline int block_page_mkwrite_return(int err)
+static inline vm_fault_t block_page_mkwrite_return(int err)
{
if (err == 0)
return VM_FAULT_LOCKED;
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index fe7a22dd133b..02c73c6aa805 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -40,8 +40,6 @@ struct bvec_iter {
unsigned int bi_idx; /* current index into bvl_vec */
- unsigned int bi_done; /* number of bytes completed */
-
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
};
@@ -85,7 +83,6 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
bytes -= len;
iter->bi_size -= len;
iter->bi_bvec_done += len;
- iter->bi_done += len;
if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
iter->bi_bvec_done = 0;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ff20b677fb9f..22254c1fe1c5 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -412,6 +412,7 @@ struct cgroup {
* specific task are charged to the dom_cgrp.
*/
struct cgroup *dom_cgrp;
+ struct cgroup *old_dom_cgrp; /* used while enabling threaded */
/* per-cpu recursive resource statistics */
struct cgroup_rstat_cpu __percpu *rstat_cpu;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 32c553556bbd..b622d6608605 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -93,6 +93,8 @@ extern struct css_set init_css_set;
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
+struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
@@ -567,20 +569,11 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
int ancestor_level)
{
- struct cgroup *ptr;
-
if (cgrp->level < ancestor_level)
return NULL;
-
- for (ptr = cgrp;
- ptr && ptr->level > ancestor_level;
- ptr = cgroup_parent(ptr))
- ;
-
- if (ptr && ptr->level == ancestor_level)
- return ptr;
-
- return NULL;
+ while (cgrp && cgrp->level > ancestor_level)
+ cgrp = cgroup_parent(cgrp);
+ return cgrp;
}
/**
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 308918928767..b21db536fd52 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -241,6 +241,11 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz
__clocksource_update_freq_scale(cs, 1000, khz);
}
+#ifdef CONFIG_ARCH_CLOCKSOURCE_INIT
+extern void clocksource_arch_init(struct clocksource *cs);
+#else
+static inline void clocksource_arch_init(struct clocksource *cs) { }
+#endif
extern int timekeeping_notify(struct clocksource *clock);
@@ -257,9 +262,6 @@ extern int clocksource_i8253_init(void);
#define TIMER_OF_DECLARE(name, compat, fn) \
OF_DECLARE_1_RET(timer, name, compat, fn)
-#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
- TIMER_OF_DECLARE(name, compat, fn)
-
#ifdef CONFIG_TIMER_PROBE
extern void timer_probe(void);
#else
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 1a3c4f37e908..d30e4dbd4be2 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -7,7 +7,7 @@
*/
#include <linux/types.h>
-#include <linux/compat_time.h>
+#include <linux/time.h>
#include <linux/stat.h>
#include <linux/param.h> /* for HZ */
@@ -103,6 +103,9 @@ typedef struct compat_sigaltstack {
compat_size_t ss_size;
} compat_stack_t;
#endif
+#ifndef COMPAT_MINSIGSTKSZ
+#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ
+#endif
#define compat_jiffies_to_clock_t(x) \
(((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
@@ -110,19 +113,12 @@ typedef struct compat_sigaltstack {
typedef __compat_uid32_t compat_uid_t;
typedef __compat_gid32_t compat_gid_t;
-typedef compat_ulong_t compat_aio_context_t;
-
struct compat_sel_arg_struct;
struct rusage;
-struct compat_utimbuf {
- compat_time_t actime;
- compat_time_t modtime;
-};
-
struct compat_itimerval {
- struct compat_timeval it_interval;
- struct compat_timeval it_value;
+ struct old_timeval32 it_interval;
+ struct old_timeval32 it_value;
};
struct itimerval;
@@ -146,7 +142,7 @@ struct compat_timex {
compat_long_t constant;
compat_long_t precision;
compat_long_t tolerance;
- struct compat_timeval time;
+ struct old_timeval32 time;
compat_long_t tick;
compat_long_t ppsfreq;
compat_long_t jitter;
@@ -307,8 +303,8 @@ struct compat_rlimit {
};
struct compat_rusage {
- struct compat_timeval ru_utime;
- struct compat_timeval ru_stime;
+ struct old_timeval32 ru_utime;
+ struct old_timeval32 ru_stime;
compat_long_t ru_maxrss;
compat_long_t ru_ixrss;
compat_long_t ru_idrss;
@@ -452,13 +448,13 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size);
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size);
-int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from);
-int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
+int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from);
+int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
-static inline int compat_timeval_compare(struct compat_timeval *lhs,
- struct compat_timeval *rhs)
+static inline int old_timeval32_compare(struct old_timeval32 *lhs,
+ struct old_timeval32 *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
@@ -467,8 +463,8 @@ static inline int compat_timeval_compare(struct compat_timeval *lhs,
return lhs->tv_usec - rhs->tv_usec;
}
-static inline int compat_timespec_compare(struct compat_timespec *lhs,
- struct compat_timespec *rhs)
+static inline int old_timespec32_compare(struct old_timespec32 *lhs,
+ struct old_timespec32 *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
@@ -552,12 +548,12 @@ asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
compat_long_t min_nr,
compat_long_t nr,
struct io_event __user *events,
- struct compat_timespec __user *timeout);
+ struct old_timespec32 __user *timeout);
asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
compat_long_t min_nr,
compat_long_t nr,
struct io_event __user *events,
- struct compat_timespec __user *timeout,
+ struct old_timespec32 __user *timeout,
const struct __compat_aio_sigset __user *usig);
/* fs/cookies.c */
@@ -642,11 +638,11 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
- struct compat_timespec __user *tsp,
+ struct old_timespec32 __user *tsp,
void __user *sig);
asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
unsigned int nfds,
- struct compat_timespec __user *tsp,
+ struct old_timespec32 __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
@@ -671,15 +667,15 @@ asmlinkage long compat_sys_newfstat(unsigned int fd,
/* fs/timerfd.c */
asmlinkage long compat_sys_timerfd_gettime(int ufd,
- struct compat_itimerspec __user *otmr);
+ struct old_itimerspec32 __user *otmr);
asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
- const struct compat_itimerspec __user *utmr,
- struct compat_itimerspec __user *otmr);
+ const struct old_itimerspec32 __user *utmr,
+ struct old_itimerspec32 __user *otmr);
/* fs/utimes.c */
asmlinkage long compat_sys_utimensat(unsigned int dfd,
const char __user *filename,
- struct compat_timespec __user *t,
+ struct old_timespec32 __user *t,
int flags);
/* kernel/exit.c */
@@ -691,7 +687,7 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t,
/* kernel/futex.c */
asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
- struct compat_timespec __user *utime, u32 __user *uaddr2,
+ struct old_timespec32 __user *utime, u32 __user *uaddr2,
u32 val3);
asmlinkage long
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
@@ -701,8 +697,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
compat_size_t __user *len_ptr);
/* kernel/hrtimer.c */
-asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp);
+asmlinkage long compat_sys_nanosleep(struct old_timespec32 __user *rqtp,
+ struct old_timespec32 __user *rmtp);
/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
@@ -722,19 +718,19 @@ asmlinkage long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent __user *timer_event_spec,
timer_t __user *created_timer_id);
asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
- struct compat_itimerspec __user *setting);
+ struct old_itimerspec32 __user *setting);
asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags,
- struct compat_itimerspec __user *new,
- struct compat_itimerspec __user *old);
+ struct old_itimerspec32 __user *new,
+ struct old_itimerspec32 __user *old);
asmlinkage long compat_sys_clock_settime(clockid_t which_clock,
- struct compat_timespec __user *tp);
+ struct old_timespec32 __user *tp);
asmlinkage long compat_sys_clock_gettime(clockid_t which_clock,
- struct compat_timespec __user *tp);
+ struct old_timespec32 __user *tp);
asmlinkage long compat_sys_clock_getres(clockid_t which_clock,
- struct compat_timespec __user *tp);
+ struct old_timespec32 __user *tp);
asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
- struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp);
+ struct old_timespec32 __user *rqtp,
+ struct old_timespec32 __user *rmtp);
/* kernel/ptrace.c */
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
@@ -748,7 +744,7 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval);
+ struct old_timespec32 __user *interval);
/* kernel/signal.c */
asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
@@ -768,7 +764,7 @@ asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
struct compat_siginfo __user *uinfo,
- struct compat_timespec __user *uts, compat_size_t sigsetsize);
+ struct old_timespec32 __user *uts, compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
/* No generic prototype for rt_sigreturn */
@@ -782,9 +778,9 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource,
asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
/* kernel/time.c */
-asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
+asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
-asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
+asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
@@ -798,11 +794,11 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
const char __user *u_msg_ptr,
compat_size_t msg_len, unsigned int msg_prio,
- const struct compat_timespec __user *u_abs_timeout);
+ const struct old_timespec32 __user *u_abs_timeout);
asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
char __user *u_msg_ptr,
compat_size_t msg_len, unsigned int __user *u_msg_prio,
- const struct compat_timespec __user *u_abs_timeout);
+ const struct old_timespec32 __user *u_abs_timeout);
asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
const struct compat_sigevent __user *u_notification);
asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
@@ -819,7 +815,7 @@ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
/* ipc/sem.c */
asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
- unsigned nsems, const struct compat_timespec __user *timeout);
+ unsigned nsems, const struct old_timespec32 __user *timeout);
/* ipc/shm.c */
asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
@@ -876,7 +872,7 @@ asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
struct compat_siginfo __user *uinfo);
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags,
- struct compat_timespec __user *timeout);
+ struct old_timespec32 __user *timeout);
asmlinkage long compat_sys_wait4(compat_pid_t pid,
compat_uint_t __user *stat_addr, int options,
struct compat_rusage __user *ru);
@@ -928,7 +924,7 @@ asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
asmlinkage long compat_sys_open(const char __user *filename, int flags,
umode_t mode);
asmlinkage long compat_sys_utimes(const char __user *filename,
- struct compat_timeval __user *t);
+ struct old_timeval32 __user *t);
/* __ARCH_WANT_SYSCALL_NO_FLAGS */
asmlinkage long compat_sys_signalfd(int ufd,
@@ -942,15 +938,15 @@ asmlinkage long compat_sys_newlstat(const char __user *filename,
struct compat_stat __user *statbuf);
/* __ARCH_WANT_SYSCALL_DEPRECATED */
-asmlinkage long compat_sys_time(compat_time_t __user *tloc);
+asmlinkage long compat_sys_time(old_time32_t __user *tloc);
asmlinkage long compat_sys_utime(const char __user *filename,
- struct compat_utimbuf __user *t);
+ struct old_utimbuf32 __user *t);
asmlinkage long compat_sys_futimesat(unsigned int dfd,
const char __user *filename,
- struct compat_timeval __user *t);
+ struct old_timeval32 __user *t);
asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timeval __user *tvp);
+ struct old_timeval32 __user *tvp);
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
unsigned flags);
@@ -983,7 +979,7 @@ asmlinkage long compat_sys_sigaction(int sig,
#endif
/* obsolete: kernel/time/time.c */
-asmlinkage long compat_sys_stime(compat_time_t __user *tptr);
+asmlinkage long compat_sys_stime(old_time32_t __user *tptr);
/* obsolete: net/socket.c */
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
@@ -1002,15 +998,15 @@ static inline bool in_compat_syscall(void) { return is_compat_task(); }
#endif
/**
- * ns_to_compat_timeval - Compat version of ns_to_timeval
+ * ns_to_old_timeval32 - Compat version of ns_to_timeval
* @nsec: the nanoseconds value to be converted
*
- * Returns the compat_timeval representation of the nsec parameter.
+ * Returns the old_timeval32 representation of the nsec parameter.
*/
-static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
+static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec)
{
struct timeval tv;
- struct compat_timeval ctv;
+ struct old_timeval32 ctv;
tv = ns_to_timeval(nsec);
ctv.tv_sec = tv.tv_sec;
diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h
deleted file mode 100644
index e70bfd1d2c3f..000000000000
--- a/include/linux/compat_time.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_COMPAT_TIME_H
-#define _LINUX_COMPAT_TIME_H
-
-#include <linux/types.h>
-#include <linux/time64.h>
-
-typedef s32 compat_time_t;
-
-struct compat_timespec {
- compat_time_t tv_sec;
- s32 tv_nsec;
-};
-
-struct compat_timeval {
- compat_time_t tv_sec;
- s32 tv_usec;
-};
-
-struct compat_itimerspec {
- struct compat_timespec it_interval;
- struct compat_timespec it_value;
-};
-
-extern int compat_get_timespec64(struct timespec64 *, const void __user *);
-extern int compat_put_timespec64(const struct timespec64 *, void __user *);
-extern int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits);
-extern int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits);
-
-#endif /* _LINUX_COMPAT_TIME_H */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 763bbad1e258..90ddfefb6c2b 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -79,20 +79,6 @@
#define __noretpoline __attribute__((indirect_branch("keep")))
#endif
-/*
- * it doesn't make sense on ARM (currently the only user of __naked)
- * to trace naked functions because then mcount is called without
- * stack and frame pointer being set up and there is no chance to
- * restore the lr register to the value before mcount was called.
- *
- * The asm() bodies of naked functions often depend on standard calling
- * conventions, therefore they must be noinline and noclone.
- *
- * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
- * See GCC PR44290.
- */
-#define __naked __attribute__((naked)) noinline __noclone notrace
-
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
#define __optimize(level) __attribute__((__optimize__(level)))
@@ -208,6 +194,12 @@
* Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
*/
#define __no_sanitize_address __attribute__((no_sanitize_address))
+#ifdef CONFIG_KASAN
+#define __no_sanitize_address_or_inline \
+ __no_sanitize_address __maybe_unused notrace
+#else
+#define __no_sanitize_address_or_inline inline
+#endif
#endif
#if GCC_VERSION >= 50100
@@ -225,6 +217,7 @@
#if !defined(__no_sanitize_address)
#define __no_sanitize_address
+#define __no_sanitize_address_or_inline inline
#endif
/*
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 681d866efb1e..1921545c6351 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -99,22 +99,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* unique, to convince GCC not to merge duplicate inline asm statements.
*/
#define annotate_reachable() ({ \
- asm volatile("%c0:\n\t" \
- ".pushsection .discard.reachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ asm volatile("ANNOTATE_REACHABLE counter=%c0" \
+ : : "i" (__COUNTER__)); \
})
#define annotate_unreachable() ({ \
- asm volatile("%c0:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \
+ : : "i" (__COUNTER__)); \
})
-#define ASM_UNREACHABLE \
- "999:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long 999b - .\n\t" \
- ".popsection\n\t"
#else
#define annotate_reachable()
#define annotate_unreachable()
@@ -299,6 +290,45 @@ static inline void *offset_to_ptr(const int *off)
return (void *)((unsigned long)off + *off);
}
+#else /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+#ifndef LINKER_SCRIPT
+
+#ifdef CONFIG_STACK_VALIDATION
+.macro ANNOTATE_UNREACHABLE counter:req
+\counter:
+ .pushsection .discard.unreachable
+ .long \counter\()b -.
+ .popsection
+.endm
+
+.macro ANNOTATE_REACHABLE counter:req
+\counter:
+ .pushsection .discard.reachable
+ .long \counter\()b -.
+ .popsection
+.endm
+
+.macro ASM_UNREACHABLE
+999:
+ .pushsection .discard.unreachable
+ .long 999b - .
+ .popsection
+.endm
+#else /* CONFIG_STACK_VALIDATION */
+.macro ANNOTATE_UNREACHABLE counter:req
+.endm
+
+.macro ANNOTATE_REACHABLE counter:req
+.endm
+
+.macro ASM_UNREACHABLE
+.endm
+#endif /* CONFIG_STACK_VALIDATION */
+
+#endif /* LINKER_SCRIPT */
+#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#ifndef __optimize
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 3525c179698c..97cfe29b3f0a 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -198,7 +198,6 @@ struct ftrace_likely_data {
*/
#define __pure __attribute__((pure))
#define __aligned(x) __attribute__((aligned(x)))
-#define __aligned_largest __attribute__((aligned))
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
#define __maybe_unused __attribute__((unused))
@@ -226,6 +225,14 @@ struct ftrace_likely_data {
#define notrace __attribute__((no_instrument_function))
#endif
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+ * stack and frame pointer being set up and there is no chance to
+ * restore the lr register to the value before mcount was called.
+ */
+#define __naked __attribute__((naked)) notrace
+
#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
/*
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 207aed96a5b7..abf4b4e65dbb 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -17,9 +17,9 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
extern void dump_truncate(struct coredump_params *cprm);
#ifdef CONFIG_COREDUMP
-extern void do_coredump(const siginfo_t *siginfo);
+extern void do_coredump(const kernel_siginfo_t *siginfo);
#else
-static inline void do_coredump(const siginfo_t *siginfo) {}
+static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
#endif
#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index 986c06c88d81..84d3c81b5978 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -45,7 +45,7 @@
* 'asm/cpufeature.h' of your favorite architecture.
*/
#define module_cpu_feature_match(x, __initfunc) \
-static struct cpu_feature const cpu_feature_match_ ## x[] = \
+static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \
{ { .feature = cpu_feature(x) }, { } }; \
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
\
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 4325d6fdde9b..faed7a8977e8 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -81,6 +81,7 @@ struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
unsigned int use_deepest_state:1;
+ unsigned int poll_time_limit:1;
unsigned int cpu;
int last_residency;
@@ -99,16 +100,6 @@ struct cpuidle_device {
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
-/**
- * cpuidle_get_last_residency - retrieves the last state's residency time
- * @dev: the target CPU
- */
-static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
-{
- return dev->last_residency;
-}
-
-
/****************************
* CPUIDLE DRIVER INTERFACE *
****************************/
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 3e4ba9d753c8..f774c5eb9e3c 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -26,6 +26,10 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
unsigned long, int);
+extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset,
+ int userbuf);
+
void vmcore_cleanup(void);
/* Architecture code defines this if there are other possible ELF
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index 1fe0cfcdea30..6bb0c0bf357b 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -6,6 +6,7 @@
#define CRC_T10DIF_DIGEST_SIZE 2
#define CRC_T10DIF_BLOCK_SIZE 1
+#define CRC_T10DIF_STRING "crct10dif"
extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
size_t len);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index e8839d3a7559..3634ad6fe202 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -454,6 +454,33 @@ struct compress_alg {
* @cra_refcnt: internally used
* @cra_destroy: internally used
*
+ * All following statistics are for this crypto_alg
+ * @encrypt_cnt: number of encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @compress_cnt: number of compress requests
+ * @decompress_cnt: number of decompress requests
+ * @generate_cnt: number of RNG generate requests
+ * @seed_cnt: number of times the rng was seeded
+ * @hash_cnt: number of hash requests
+ * @sign_cnt: number of sign requests
+ * @setsecret_cnt: number of setsecrey operation
+ * @generate_public_key_cnt: number of generate_public_key operation
+ * @verify_cnt: number of verify operation
+ * @compute_shared_secret_cnt: number of compute_shared_secret operation
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @compress_tlen: total data size handled by compress requests
+ * @decompress_tlen: total data size handled by decompress requests
+ * @generate_tlen: total data size of generated data by the RNG
+ * @hash_tlen: total data size hashed
+ * @akcipher_err_cnt: number of error for akcipher requests
+ * @cipher_err_cnt: number of error for akcipher requests
+ * @compress_err_cnt: number of error for akcipher requests
+ * @aead_err_cnt: number of error for akcipher requests
+ * @hash_err_cnt: number of error for akcipher requests
+ * @rng_err_cnt: number of error for akcipher requests
+ * @kpp_err_cnt: number of error for akcipher requests
+ *
* The struct crypto_alg describes a generic Crypto API algorithm and is common
* for all of the transformations. Any variable not documented here shall not
* be used by a cipher implementation as it is internal to the Crypto API.
@@ -487,6 +514,45 @@ struct crypto_alg {
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
+
+ union {
+ atomic_t encrypt_cnt;
+ atomic_t compress_cnt;
+ atomic_t generate_cnt;
+ atomic_t hash_cnt;
+ atomic_t setsecret_cnt;
+ };
+ union {
+ atomic64_t encrypt_tlen;
+ atomic64_t compress_tlen;
+ atomic64_t generate_tlen;
+ atomic64_t hash_tlen;
+ };
+ union {
+ atomic_t akcipher_err_cnt;
+ atomic_t cipher_err_cnt;
+ atomic_t compress_err_cnt;
+ atomic_t aead_err_cnt;
+ atomic_t hash_err_cnt;
+ atomic_t rng_err_cnt;
+ atomic_t kpp_err_cnt;
+ };
+ union {
+ atomic_t decrypt_cnt;
+ atomic_t decompress_cnt;
+ atomic_t seed_cnt;
+ atomic_t generate_public_key_cnt;
+ };
+ union {
+ atomic64_t decrypt_tlen;
+ atomic64_t decompress_tlen;
+ };
+ union {
+ atomic_t verify_cnt;
+ atomic_t compute_shared_secret_cnt;
+ };
+ atomic_t sign_cnt;
+
} CRYPTO_MINALIGN_ATTR;
/*
@@ -907,6 +973,38 @@ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
return __crypto_ablkcipher_cast(req->base.tfm);
}
+static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct ablkcipher_tfm *crt =
+ crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
+ } else {
+ atomic_inc(&crt->base->base.__crt_alg->encrypt_cnt);
+ atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct ablkcipher_tfm *crt =
+ crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
+ } else {
+ atomic_inc(&crt->base->base.__crt_alg->decrypt_cnt);
+ atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen);
+ }
+#endif
+}
+
/**
* crypto_ablkcipher_encrypt() - encrypt plaintext
* @req: reference to the ablkcipher_request handle that holds all information
@@ -922,7 +1020,11 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- return crt->encrypt(req);
+ int ret;
+
+ ret = crt->encrypt(req);
+ crypto_stat_ablkcipher_encrypt(req, ret);
+ return ret;
}
/**
@@ -940,7 +1042,11 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- return crt->decrypt(req);
+ int ret;
+
+ ret = crt->decrypt(req);
+ crypto_stat_ablkcipher_decrypt(req, ret);
+ return ret;
}
/**
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 120225e9a366..257ab3c92cb8 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -8,8 +8,8 @@
struct task_struct;
-extern int debug_locks;
-extern int debug_locks_silent;
+extern int debug_locks __read_mostly;
+extern int debug_locks_silent __read_mostly;
static inline int __debug_locks_off(void)
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 3aae5b3af87c..e4963b0f45da 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -198,6 +198,14 @@ extern void devm_devfreq_remove_device(struct device *dev,
extern int devfreq_suspend_device(struct devfreq *devfreq);
extern int devfreq_resume_device(struct devfreq *devfreq);
+/**
+ * update_devfreq() - Reevaluate the device and configure frequency
+ * @devfreq: the devfreq device
+ *
+ * Note: devfreq->lock must be held
+ */
+extern int update_devfreq(struct devfreq *devfreq);
+
/* Helper functions for devfreq user device driver with OPP. */
extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags);
diff --git a/include/linux/device.h b/include/linux/device.h
index fecd9722400e..1b25c7a43f4c 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -778,6 +778,30 @@ void device_connection_add(struct device_connection *con);
void device_connection_remove(struct device_connection *con);
/**
+ * device_connections_add - Add multiple device connections at once
+ * @cons: Zero terminated array of device connection descriptors
+ */
+static inline void device_connections_add(struct device_connection *cons)
+{
+ struct device_connection *c;
+
+ for (c = cons; c->endpoint[0]; c++)
+ device_connection_add(c);
+}
+
+/**
+ * device_connections_remove - Remove multiple device connections at once
+ * @cons: Zero terminated array of device connection descriptors
+ */
+static inline void device_connections_remove(struct device_connection *cons)
+{
+ struct device_connection *c;
+
+ for (c = cons; c->endpoint[0]; c++)
+ device_connection_remove(c);
+}
+
+/**
* enum device_link_state - Device link states.
* @DL_STATE_NONE: The presence of the drivers is not being tracked.
* @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
@@ -931,6 +955,8 @@ struct dev_links_info {
* @offline: Set after successful invocation of bus type's .offline().
* @of_node_reused: Set if the device-tree node is shared with an ancestor
* device.
+ * @dma_coherent: this particular device is dma coherent, even if the
+ * architecture supports non-coherent devices.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -1020,6 +1046,11 @@ struct device {
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+ bool dma_coherent:1;
+#endif
};
static inline struct device *kobj_to_dev(struct kobject *kobj)
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index a785f2507159..30213adbb6b9 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -32,6 +32,9 @@ extern void dma_debug_add_bus(struct bus_type *bus);
extern int dma_debug_resize_entries(u32 num_entries);
+extern void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len);
+
extern void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction, dma_addr_t dma_addr,
@@ -103,6 +106,11 @@ static inline int dma_debug_resize_entries(u32 num_entries)
return 0;
}
+static inline void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len)
+{
+}
+
static inline void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction, dma_addr_t dma_addr,
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 8d9f33febde5..fbca184ff5a0 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -27,7 +27,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return false;
- return addr + size - 1 <= *dev->dma_mask;
+ return addr + size - 1 <=
+ min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
}
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
@@ -55,10 +56,15 @@ static inline void dma_mark_clean(void *addr, size_t size)
}
#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
+u64 dma_direct_get_required_mask(struct device *dev);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
+void *dma_direct_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
+void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 1db6a6b46d0d..15bd41447025 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -130,13 +130,10 @@ struct dma_map_ops {
enum dma_data_direction direction);
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
u64 (*get_required_mask)(struct device *dev);
-#endif
};
extern const struct dma_map_ops dma_direct_ops;
-extern const struct dma_map_ops dma_noncoherent_ops;
extern const struct dma_map_ops dma_virt_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -232,6 +229,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
+ debug_dma_map_single(dev, ptr, size);
addr = ops->map_page(dev, virt_to_page(ptr),
offset_in_page(ptr), size,
dir, attrs);
@@ -445,7 +443,8 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
}
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
void *dma_common_contiguous_remap(struct page *page, size_t size,
unsigned long vm_flags,
@@ -477,14 +476,14 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
BUG_ON(!ops);
if (ops->mmap)
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
int
-dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, unsigned long attrs);
static inline int
dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
@@ -496,7 +495,8 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
if (ops->get_sgtable)
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
attrs);
- return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
+ attrs);
}
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
@@ -558,9 +558,11 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, gfp_t gfp)
{
- return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
+
+ return dma_alloc_attrs(dev, size, dma_handle, gfp,
+ (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
@@ -753,18 +755,6 @@ dma_mark_declared_memory_occupied(struct device *dev,
}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-#ifdef CONFIG_HAS_DMA
-int dma_configure(struct device *dev);
-void dma_deconfigure(struct device *dev);
-#else
-static inline int dma_configure(struct device *dev)
-{
- return 0;
-}
-
-static inline void dma_deconfigure(struct device *dev) {}
-#endif
-
/*
* Managed DMA API
*/
@@ -806,8 +796,12 @@ static inline void dmam_release_declared_memory(struct device *dev)
static inline void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
- return dma_alloc_attrs(dev, size, dma_addr, gfp,
- DMA_ATTR_WRITE_COMBINE);
+ unsigned long attrs = DMA_ATTR_NO_WARN;
+
+ if (gfp & __GFP_NOWARN)
+ attrs |= DMA_ATTR_NO_WARN;
+
+ return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
}
#ifndef dma_alloc_writecombine
#define dma_alloc_writecombine dma_alloc_wc
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index a0aa00cc909d..9051b055beec 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -4,18 +4,35 @@
#include <linux/dma-mapping.h>
+#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
+#include <asm/dma-coherence.h>
+#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return dev->dma_coherent;
+}
+#else
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return true;
+}
+#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
+
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr);
-#ifdef CONFIG_DMA_NONCOHERENT_MMAP
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
+#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
+pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs);
#else
-#define arch_dma_mmap NULL
-#endif /* CONFIG_DMA_NONCOHERENT_MMAP */
+# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot)
+#endif
#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
index b0115e340fbc..b42b80e52cc2 100644
--- a/include/linux/dma/sprd-dma.h
+++ b/include/linux/dma/sprd-dma.h
@@ -58,4 +58,73 @@ enum sprd_dma_int_type {
SPRD_DMA_CFGERR_INT,
};
+/*
+ * struct sprd_dma_linklist - DMA link-list address structure
+ * @virt_addr: link-list virtual address to configure link-list node
+ * @phy_addr: link-list physical address to link DMA transfer
+ *
+ * The Spreadtrum DMA controller supports the link-list mode, that means slaves
+ * can supply several groups configurations (each configuration represents one
+ * DMA transfer) saved in memory, and DMA controller will link these groups
+ * configurations by writing the physical address of each configuration into the
+ * link-list register.
+ *
+ * Just as shown below, the link-list pointer register will be pointed to the
+ * physical address of 'configuration 1', and the 'configuration 1' link-list
+ * pointer will be pointed to 'configuration 2', and so on.
+ * Once trigger the DMA transfer, the DMA controller will load 'configuration
+ * 1' to its registers automatically, after 'configuration 1' transaction is
+ * done, DMA controller will load 'configuration 2' automatically, until all
+ * DMA transactions are done.
+ *
+ * Note: The last link-list pointer should point to the physical address
+ * of 'configuration 1', which can avoid DMA controller loads incorrect
+ * configuration when the last configuration transaction is done.
+ *
+ * DMA controller linklist memory
+ * ====================== -----------------------
+ *| | | configuration 1 |<---
+ *| DMA controller | ------->| | |
+ *| | | | | |
+ *| | | | | |
+ *| | | | | |
+ *| linklist pointer reg |---- ----| linklist pointer | |
+ * ====================== | ----------------------- |
+ * | |
+ * | ----------------------- |
+ * | | configuration 2 | |
+ * --->| | |
+ * | | |
+ * | | |
+ * | | |
+ * ----| linklist pointer | |
+ * | ----------------------- |
+ * | |
+ * | ----------------------- |
+ * | | configuration 3 | |
+ * --->| | |
+ * | | |
+ * | . | |
+ * . |
+ * . |
+ * . |
+ * | . |
+ * | ----------------------- |
+ * | | configuration n | |
+ * --->| | |
+ * | | |
+ * | | |
+ * | | |
+ * | linklist pointer |----
+ * -----------------------
+ *
+ * To support the link-list mode, DMA slaves should allocate one segment memory
+ * from always-on IRAM or dma coherent memory to store these groups of DMA
+ * configuration, and pass the virtual and physical address to DMA controller.
+ */
+struct sprd_dma_linklist {
+ unsigned long virt_addr;
+ phys_addr_t phy_addr;
+};
+
#endif
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h
index 6ac3cad9aef1..34a744a1bafc 100644
--- a/include/linux/dns_resolver.h
+++ b/include/linux/dns_resolver.h
@@ -24,11 +24,9 @@
#ifndef _LINUX_DNS_RESOLVER_H
#define _LINUX_DNS_RESOLVER_H
-#ifdef __KERNEL__
+#include <uapi/linux/dns_resolver.h>
extern int dns_query(const char *type, const char *name, size_t namelen,
const char *options, char **_result, time64_t *_expiry);
-#endif /* KERNEL */
-
#endif /* _LINUX_DNS_RESOLVER_H */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index bffb97828ed6..1d0c9ea8825d 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -17,6 +17,7 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
+#include <linux/numa.h>
#define EDAC_DEVICE_NAME_LEN 31
@@ -451,6 +452,8 @@ struct dimm_info {
u32 nr_pages; /* number of pages on this dimm */
unsigned csrow, cschannel; /* Points to the old API data */
+
+ u16 smbios_handle; /* Handle for SMBIOS type 17 */
};
/**
@@ -670,6 +673,6 @@ struct mem_ctl_info {
/*
* Maximum number of memory controllers in the coherent fabric.
*/
-#define EDAC_MAX_MCS 16
+#define EDAC_MAX_MCS 2 * MAX_NUMNODES
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 401e4b254e30..845174e113ce 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -672,6 +672,7 @@ void efi_native_runtime_setup(void);
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
+#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
typedef struct {
efi_guid_t guid;
@@ -957,6 +958,7 @@ extern struct efi {
unsigned long mem_attr_table; /* memory attributes table */
unsigned long rng_seed; /* UEFI firmware random seed */
unsigned long tpm_log; /* TPM2 Event Log table */
+ unsigned long mem_reserve; /* Linux EFI memreserve table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -1041,6 +1043,7 @@ extern int __init efi_uart_console_only (void);
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
+extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern void efi_reserve_boot_services(void);
@@ -1659,7 +1662,55 @@ struct linux_efi_tpm_eventlog {
extern int efi_tpm_eventlog_init(void);
+/*
+ * efi_runtime_service() function identifiers.
+ * "NONE" is used by efi_recover_from_page_fault() to check if the page
+ * fault happened while executing an efi runtime service.
+ */
+enum efi_rts_ids {
+ NONE,
+ GET_TIME,
+ SET_TIME,
+ GET_WAKEUP_TIME,
+ SET_WAKEUP_TIME,
+ GET_VARIABLE,
+ GET_NEXT_VARIABLE,
+ SET_VARIABLE,
+ QUERY_VARIABLE_INFO,
+ GET_NEXT_HIGH_MONO_COUNT,
+ RESET_SYSTEM,
+ UPDATE_CAPSULE,
+ QUERY_CAPSULE_CAPS,
+};
+
+/*
+ * efi_runtime_work: Details of EFI Runtime Service work
+ * @arg<1-5>: EFI Runtime Service function arguments
+ * @status: Status of executing EFI Runtime Service
+ * @efi_rts_id: EFI Runtime Service function identifier
+ * @efi_rts_comp: Struct used for handling completions
+ */
+struct efi_runtime_work {
+ void *arg1;
+ void *arg2;
+ void *arg3;
+ void *arg4;
+ void *arg5;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+};
+
+extern struct efi_runtime_work efi_rts_work;
+
/* Workqueue to queue EFI Runtime Services */
extern struct workqueue_struct *efi_rts_wq;
+struct linux_efi_memreserve {
+ phys_addr_t next;
+ phys_addr_t base;
+ phys_addr_t size;
+};
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index a02deea30185..015bb59c0331 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -111,7 +111,7 @@ struct elevator_mq_ops {
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
bool (*has_work)(struct blk_mq_hw_ctx *);
- void (*completed_request)(struct request *);
+ void (*completed_request)(struct request *, u64);
void (*started_request)(struct request *);
void (*requeue_request)(struct request *);
struct request *(*former_request)(struct request_queue *, struct request *);
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
index b5f2efdd05e0..7a37f4ce9fd2 100644
--- a/include/linux/elfcore-compat.h
+++ b/include/linux/elfcore-compat.h
@@ -27,10 +27,10 @@ struct compat_elf_prstatus
compat_pid_t pr_ppid;
compat_pid_t pr_pgrp;
compat_pid_t pr_sid;
- struct compat_timeval pr_utime;
- struct compat_timeval pr_stime;
- struct compat_timeval pr_cutime;
- struct compat_timeval pr_cstime;
+ struct old_timeval32 pr_utime;
+ struct old_timeval32 pr_stime;
+ struct old_timeval32 pr_cutime;
+ struct old_timeval32 pr_cstime;
compat_elf_gregset_t pr_reg;
#ifdef CONFIG_BINFMT_ELF_FDPIC
compat_ulong_t pr_exec_fdpic_loadmap;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index f8a2245b70ac..afd9596ce636 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -183,14 +183,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
/**
* struct ethtool_ops - optional netdev operations
- * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
- * API. Get various device settings including Ethernet link
- * settings. The @cmd parameter is expected to have been cleared
- * before get_settings is called. Returns a negative error code
- * or zero.
- * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
- * API. Set various device settings including Ethernet link
- * settings. Returns a negative error code or zero.
* @get_drvinfo: Report driver/device information. Should only set the
* @driver, @version, @fw_version and @bus_info fields. If not
* implemented, the @driver and @bus_info fields will be filled in
@@ -297,19 +289,16 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
* queue has this number, ignore the inapplicable fields.
* Returns a negative error code or zero.
- * @get_link_ksettings: When defined, takes precedence over the
- * %get_settings method. Get various device settings
- * including Ethernet link settings. The %cmd and
- * %link_mode_masks_nwords fields should be ignored (use
- * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any
- * change to them will be overwritten by kernel. Returns a
- * negative error code or zero.
- * @set_link_ksettings: When defined, takes precedence over the
- * %set_settings method. Set various device settings including
- * Ethernet link settings. The %cmd and %link_mode_masks_nwords
- * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
- * instead of the latter), any change to them will be overwritten
- * by kernel. Returns a negative error code or zero.
+ * @get_link_ksettings: Get various device settings including Ethernet link
+ * settings. The %cmd and %link_mode_masks_nwords fields should be
+ * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
+ * any change to them will be overwritten by kernel. Returns a negative
+ * error code or zero.
+ * @set_link_ksettings: Set various device settings including Ethernet link
+ * settings. The %cmd and %link_mode_masks_nwords fields should be
+ * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
+ * any change to them will be overwritten by kernel. Returns a negative
+ * error code or zero.
* @get_fecparam: Get the network device Forward Error Correction parameters.
* @set_fecparam: Set the network device Forward Error Correction parameters.
* @get_ethtool_phy_stats: Return extended statistics about the PHY device.
@@ -329,8 +318,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* of the generic netdev features interface.
*/
struct ethtool_ops {
- int (*get_settings)(struct net_device *, struct ethtool_cmd *);
- int (*set_settings)(struct net_device *, struct ethtool_cmd *);
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index f70f8ac9c4f4..d7711048ef93 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* include/linux/f2fs_fs.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_F2FS_FS_H
#define _LINUX_F2FS_FS_H
@@ -112,12 +109,15 @@ struct f2fs_super_block {
struct f2fs_device devs[MAX_DEVICES]; /* device list */
__le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */
__u8 hot_ext_count; /* # of hot file extension */
- __u8 reserved[314]; /* valid reserved region */
+ __u8 reserved[310]; /* valid reserved region */
+ __le32 crc; /* checksum of superblock */
} __packed;
/*
* For checkpoint
*/
+#define CP_DISABLED_FLAG 0x00001000
+#define CP_QUOTA_NEED_FSCK_FLAG 0x00000800
#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400
#define CP_NOCRC_RECOVERY_FLAG 0x00000200
#define CP_TRIMMED_FLAG 0x00000100
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 6791a0ac0139..91b4c934f02e 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -520,24 +520,6 @@ struct bpf_skb_data_end {
void *data_end;
};
-struct sk_msg_buff {
- void *data;
- void *data_end;
- __u32 apply_bytes;
- __u32 cork_bytes;
- int sg_copybreak;
- int sg_start;
- int sg_curr;
- int sg_end;
- struct scatterlist sg_data[MAX_SKB_FRAGS];
- bool sg_copy[MAX_SKB_FRAGS];
- __u32 flags;
- struct sock *sk_redir;
- struct sock *sk;
- struct sk_buff *skb;
- struct list_head list;
-};
-
struct bpf_redirect_info {
u32 ifindex;
u32 flags;
@@ -566,6 +548,27 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
cb->data_end = skb->data + skb_headlen(skb);
}
+/* Similar to bpf_compute_data_pointers(), except that save orginal
+ * data in cb->data and cb->meta_data for restore.
+ */
+static inline void bpf_compute_and_save_data_end(
+ struct sk_buff *skb, void **saved_data_end)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+ *saved_data_end = cb->data_end;
+ cb->data_end = skb->data + skb_headlen(skb);
+}
+
+/* Restore data saved by bpf_compute_data_pointers(). */
+static inline void bpf_restore_data_end(
+ struct sk_buff *skb, void *saved_data_end)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+ cb->data_end = saved_data_end;
+}
+
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
@@ -833,9 +836,6 @@ void xdp_do_flush_map(void);
void bpf_warn_invalid_xdp_action(u32 act);
-struct sock *do_sk_redirect_map(struct sk_buff *skb);
-struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
-
#ifdef CONFIG_INET
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 8942e61f0028..8ab5df769923 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -53,12 +53,20 @@ enum fpga_mgr_states {
FPGA_MGR_STATE_OPERATING,
};
-/*
- * FPGA Manager flags
- * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
- * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
- * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
- * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
+/**
+ * DOC: FPGA Manager flags
+ *
+ * Flags used in the &fpga_image_info->flags field
+ *
+ * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ *
+ * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
+ *
+ * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted
+ *
+ * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
+ *
+ * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6c0b4a1c22ff..897eae8faee1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1828,8 +1828,10 @@ extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
struct inode *inode_out, loff_t pos_out,
u64 *len, bool is_dedupe);
+extern int do_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, u64 len);
extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len);
+ struct file *file_out, loff_t pos_out, u64 len);
extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
struct inode *dest, loff_t destoff,
loff_t len, bool *is_same);
@@ -2773,19 +2775,6 @@ static inline void file_end_write(struct file *file)
__sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
}
-static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- u64 len)
-{
- int ret;
-
- file_start_write(file_out);
- ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
- file_end_write(file_out);
-
- return ret;
-}
-
/*
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index f27cb14088a4..5160f06ffbac 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -405,6 +405,7 @@ extern struct device_type fsl_mc_bus_dpcon_type;
extern struct device_type fsl_mc_bus_dpmcp_type;
extern struct device_type fsl_mc_bus_dpmac_type;
extern struct device_type fsl_mc_bus_dprtc_type;
+extern struct device_type fsl_mc_bus_dpseci_type;
static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
{
@@ -451,6 +452,11 @@ static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev)
return mc_dev->dev.type == &fsl_mc_bus_dprtc_type;
}
+static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
+}
+
/*
* Data Path Buffer Pool (DPBP) API
* Contains initialization APIs and runtime control APIs for DPBP
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index 3fdfede2f0f3..5f343b796ad9 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -274,6 +274,8 @@
*/
/* Auto Boot Mode */
#define IFC_NAND_NCFGR_BOOT 0x80000000
+/* SRAM Initialization */
+#define IFC_NAND_NCFGR_SRAM_INIT_EN 0x20000000
/* Addressing Mode-ROW0+n/COL0 */
#define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000
/* Addressing Mode-ROW0+n/COL0+n */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 57864422a2c8..70fc838e6773 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -83,10 +83,10 @@ struct partition {
} __attribute__((packed));
struct disk_stats {
+ u64 nsecs[NR_STAT_GROUPS];
unsigned long sectors[NR_STAT_GROUPS];
unsigned long ios[NR_STAT_GROUPS];
unsigned long merges[NR_STAT_GROUPS];
- unsigned long ticks[NR_STAT_GROUPS];
unsigned long io_ticks;
unsigned long time_in_queue;
};
@@ -354,6 +354,9 @@ static inline void free_part_stats(struct hd_struct *part)
#endif /* CONFIG_SMP */
+#define part_stat_read_msecs(part, which) \
+ div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC)
+
#define part_stat_read_accum(part, field) \
(part_stat_read(part, field[STAT_READ]) + \
part_stat_read(part, field[STAT_WRITE]) + \
@@ -399,10 +402,11 @@ static inline void free_part_info(struct hd_struct *part)
extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part);
/* block/genhd.c */
-extern void device_add_disk(struct device *parent, struct gendisk *disk);
+extern void device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups);
static inline void add_disk(struct gendisk *disk)
{
- device_add_disk(NULL, disk);
+ device_add_disk(NULL, disk, NULL);
}
extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
static inline void add_disk_no_queue_reg(struct gendisk *disk)
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 21ddbe440030..f2f887795d43 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -18,10 +18,19 @@ struct device;
struct gpio_desc;
/**
+ * Opaque descriptor for a structure of GPIO array attributes. This structure
+ * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be
+ * passed back to get/set array functions in order to activate fast processing
+ * path if applicable.
+ */
+struct gpio_array;
+
+/**
* Struct containing an array of descriptors that can be obtained using
* gpiod_get_array().
*/
struct gpio_descs {
+ struct gpio_array *info;
unsigned int ndescs;
struct gpio_desc *desc[];
};
@@ -30,6 +39,7 @@ struct gpio_descs {
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3)
+#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4)
/**
* Optional flags that can be passed to one of gpiod_* to configure direction
@@ -104,36 +114,46 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
int gpiod_get_array_value(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
void gpiod_set_value(struct gpio_desc *desc, int value);
-void gpiod_set_array_value(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+int gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_get_raw_value(const struct gpio_desc *desc);
int gpiod_get_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
int gpiod_set_raw_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
int gpiod_get_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_array_value_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+int gpiod_set_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
@@ -330,7 +350,8 @@ static inline int gpiod_get_value(const struct gpio_desc *desc)
}
static inline int gpiod_get_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -341,12 +362,14 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline int gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
+ return 0;
}
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
{
@@ -356,7 +379,8 @@ static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
}
static inline int gpiod_get_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -368,8 +392,9 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
WARN_ON(1);
}
static inline int gpiod_set_raw_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -384,7 +409,8 @@ static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
}
static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -395,12 +421,14 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
+static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
+ return 0;
}
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
@@ -410,7 +438,8 @@ static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
}
static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -424,7 +453,8 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
}
static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 0ea328e71ec9..2db62b550b95 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -66,9 +66,15 @@ struct gpio_irq_chip {
/**
* @lock_key:
*
- * Per GPIO IRQ chip lockdep classes.
+ * Per GPIO IRQ chip lockdep class for IRQ lock.
*/
struct lock_class_key *lock_key;
+
+ /**
+ * @request_key:
+ *
+ * Per GPIO IRQ chip lockdep class for IRQ request.
+ */
struct lock_class_key *request_key;
/**
@@ -95,6 +101,13 @@ struct gpio_irq_chip {
unsigned int num_parents;
/**
+ * @parent_irq:
+ *
+ * For use by gpiochip_set_cascaded_irqchip()
+ */
+ unsigned int parent_irq;
+
+ /**
* @parents:
*
* A list of interrupt parents of a GPIO chip. This is owned by the
@@ -138,6 +151,20 @@ struct gpio_irq_chip {
* will allocate and map all IRQs during initialization.
*/
unsigned int first;
+
+ /**
+ * @irq_enable:
+ *
+ * Store old irq_chip irq_enable callback
+ */
+ void (*irq_enable)(struct irq_data *data);
+
+ /**
+ * @irq_disable:
+ *
+ * Store old irq_chip irq_disable callback
+ */
+ void (*irq_disable)(struct irq_data *data);
};
static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
@@ -158,9 +185,13 @@ static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
* @free: optional hook for chip-specific deactivation, such as
* disabling module power and clock; may sleep
* @get_direction: returns direction for signal "offset", 0=out, 1=in,
- * (same as GPIOF_DIR_XXX), or negative error
+ * (same as GPIOF_DIR_XXX), or negative error.
+ * It is recommended to always implement this function, even on
+ * input-only or output-only gpio chips.
* @direction_input: configures signal "offset" as input, or returns error
+ * This can be omitted on input-only or output-only gpio chips.
* @direction_output: configures signal "offset" as output, or returns error
+ * This can be omitted on input-only or output-only gpio chips.
* @get: returns value for signal "offset", 0=low, 1=high, or negative error
* @get_multiple: reads values for multiple signals defined by "mask" and
* stores them in "bits", returns 0 on success or negative error
@@ -256,6 +287,9 @@ struct gpio_chip {
void (*dbg_show)(struct seq_file *s,
struct gpio_chip *chip);
+
+ int (*init_valid_mask)(struct gpio_chip *chip);
+
int base;
u16 ngpio;
const char *const *names;
@@ -294,7 +328,9 @@ struct gpio_chip {
/**
* @need_valid_mask:
*
- * If set core allocates @valid_mask with all bits set to one.
+ * If set core allocates @valid_mask with all its values initialized
+ * with init_valid_mask() or set to one if init_valid_mask() is not
+ * defined
*/
bool need_valid_mask;
@@ -395,6 +431,10 @@ extern struct gpio_chip *gpiochip_find(void *data,
int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset);
+int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset);
+void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset);
+void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset);
+void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset);
/* Line status inquiry for drivers */
bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset);
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d44a78362942..2827b87590d8 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -1139,6 +1139,34 @@ static inline u32 hid_report_len(struct hid_report *report)
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt);
+
+/**
+ * struct hid_scroll_counter - Utility class for processing high-resolution
+ * scroll events.
+ * @dev: the input device for which events should be reported.
+ * @microns_per_hi_res_unit: the amount moved by the user's finger for each
+ * high-resolution unit reported by the mouse, in
+ * microns.
+ * @resolution_multiplier: the wheel's resolution in high-resolution mode as a
+ * multiple of its lower resolution. For example, if
+ * moving the wheel by one "notch" would result in a
+ * value of 1 in low-resolution mode but 8 in
+ * high-resolution, the multiplier is 8.
+ * @remainder: counts the number of high-resolution units moved since the last
+ * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
+ * only be used by class methods.
+ */
+struct hid_scroll_counter {
+ struct input_dev *dev;
+ int microns_per_hi_res_unit;
+ int resolution_multiplier;
+
+ int remainder;
+};
+
+void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
+ int hi_res_value);
+
/* HID quirks API */
unsigned long hid_lookup_quirk(const struct hid_device *hdev);
int hid_quirks_init(char **quirks_param, __u16 bus, int count);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 99c19b06d9a4..fdcb45999b26 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -43,7 +43,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned char *vec);
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
+ pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6b68e345f0ca..087fd5f48c91 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pd(struct vm_area_struct *vma,
@@ -170,6 +172,18 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
}
+static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
+ pte_t *ptep)
+{
+ return 0;
+}
+
+static inline void adjust_range_if_pmd_sharing_possible(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index bee0827766a3..c0b93e0ff0c0 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -33,7 +33,8 @@
* and max is a multiple of 4 and >= 32 bytes.
* @priv: Private data, for use by the RNG driver.
* @quality: Estimation of true entropy in RNG's bitstream
- * (per mill).
+ * (in bits of entropy per 1024 bits of input;
+ * valid values: 1 to 1024, or 0 for unknown).
*/
struct hwrng {
const char *name;
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 9493d4a388db..99e0c1b0b5fb 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -118,6 +118,7 @@ enum hwmon_in_attributes {
hwmon_in_max_alarm,
hwmon_in_lcrit_alarm,
hwmon_in_crit_alarm,
+ hwmon_in_enable,
};
#define HWMON_I_INPUT BIT(hwmon_in_input)
@@ -135,6 +136,7 @@ enum hwmon_in_attributes {
#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
+#define HWMON_I_ENABLE BIT(hwmon_in_enable)
enum hwmon_curr_attributes {
hwmon_curr_input,
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 9c03a7d5e400..0ef67f837ae1 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1460,13 +1460,16 @@ struct ieee80211_ht_operation {
* STA can receive. Rate expressed in units of 1 Mbps.
* If this field is 0 this value should not be used to
* consider the highest RX data rate supported.
- * The top 3 bits of this field are reserved.
+ * The top 3 bits of this field indicate the Maximum NSTS,total
+ * (a beamformee capability.)
* @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
* @tx_highest: Indicates highest long GI VHT PPDU data rate
* STA can transmit. Rate expressed in units of 1 Mbps.
* If this field is 0 this value should not be used to
* consider the highest TX data rate supported.
- * The top 3 bits of this field are reserved.
+ * The top 2 bits of this field are reserved, the
+ * 3rd bit from the top indiciates VHT Extended NSS BW
+ * Capability.
*/
struct ieee80211_vht_mcs_info {
__le16 rx_mcs_map;
@@ -1475,6 +1478,13 @@ struct ieee80211_vht_mcs_info {
__le16 tx_highest;
} __packed;
+/* for rx_highest */
+#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13
+#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT)
+
+/* for tx_highest */
+#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13)
+
/**
* enum ieee80211_vht_mcs_support - VHT MCS support definitions
* @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
@@ -1545,11 +1555,11 @@ struct ieee80211_vht_operation {
* struct ieee80211_he_cap_elem - HE capabilities element
*
* This structure is the "HE capabilities element" fixed fields as
- * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3
+ * described in P802.11ax_D3.0 section 9.4.2.237.2 and 9.4.2.237.3
*/
struct ieee80211_he_cap_elem {
- u8 mac_cap_info[5];
- u8 phy_cap_info[9];
+ u8 mac_cap_info[6];
+ u8 phy_cap_info[11];
} __packed;
#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
@@ -1650,6 +1660,7 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2
#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
@@ -1659,6 +1670,7 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
+#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8
#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
@@ -1678,6 +1690,26 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000
+
+/**
+ * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS
+ * @cap: VHT capabilities of the peer
+ * @bw: bandwidth to use
+ * @mcs: MCS index to use
+ * @ext_nss_bw_capable: indicates whether or not the local transmitter
+ * (rate scaling algorithm) can deal with the new logic
+ * (dot11VHTExtendedNSSBWCapable)
+ *
+ * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can
+ * vary for a given BW/MCS. This function parses the data.
+ *
+ * Note: This function is exported by cfg80211.
+ */
+int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
+ enum ieee80211_vht_chanwidth bw,
+ int mcs, bool ext_nss_bw_capable);
/* 802.11ax HE MAC capabilities */
#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
@@ -1707,15 +1739,15 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70
/* Link adaptation is split between byte HE_MAC_CAP1 and
* HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
@@ -1729,14 +1761,13 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
-#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04
+#define IEEE80211_HE_MAC_CAP2_TRS 0x04
#define IEEE80211_HE_MAC_CAP2_BSR 0x08
#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
-#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01
#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
@@ -1744,25 +1775,34 @@ struct ieee80211_mu_edca_param_set {
* A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
* same field in the HE capabilities.
*/
-#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00
-#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08
-#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10
-#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18
-#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18
-#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT 0x00
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1 0x08
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2 0x10
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED 0x18
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18
+#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20
#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
#define IEEE80211_HE_MAC_CAP4_QTP 0x02
#define IEEE80211_HE_MAC_CAP4_BQR 0x04
-#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08
+#define IEEE80211_HE_MAC_CAP4_SRP_RESP 0x08
#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
#define IEEE80211_HE_MAC_CAP4_OPS 0x20
#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40
+/* Multi TID agg TX is split between byte #4 and #5
+ * The value is a combination of B39,B40,B41
+ */
+#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80
+
+#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01
+#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02
+#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04
+#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
+#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
/* 802.11ax HE PHY capabilities */
-#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
@@ -1779,10 +1819,10 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
-/* Midamble RX Max NSTS is split between byte #2 and byte #3 */
-#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80
+/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */
+#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80
-#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01
+#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01
#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
@@ -1883,7 +1923,19 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
-#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20
+#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_20MHZ 0x00
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_40MHZ 0x40
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_80MHZ 0x80
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ 0xc0
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_MASK 0xc0
+
+#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01
+#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02
+#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04
+#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
+#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
+#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
/* 802.11ax HE TX/RX MCS NSS Support */
#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
@@ -1963,8 +2015,8 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10
-#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000
-#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000
+#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x00100000
+#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00200000
#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000
#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000
#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 3d2996dc7d85..12e3eebf0ce6 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -16,9 +16,23 @@
#define __IF_TUN_H
#include <uapi/linux/if_tun.h>
+#include <uapi/linux/virtio_net.h>
#define TUN_XDP_FLAG 0x1UL
+#define TUN_MSG_UBUF 1
+#define TUN_MSG_PTR 2
+struct tun_msg_ctl {
+ unsigned short type;
+ unsigned short num;
+ void *ptr;
+};
+
+struct tun_xdp_hdr {
+ int buflen;
+ struct virtio_net_hdr gso;
+};
+
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
diff --git a/include/linux/init.h b/include/linux/init.h
index 2538d176dd1f..9c2aba1dbabf 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -133,7 +133,6 @@ static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
#endif
extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
-extern initcall_entry_t __security_initcall_start[], __security_initcall_end[];
/* Used for contructor calls. */
typedef void (*ctor_fn_t)(void);
@@ -236,7 +235,6 @@ extern bool initcall_debug;
static exitcall_t __exitcall_##fn __exit_call = fn
#define console_initcall(fn) ___define_initcall(fn,, .con_initcall)
-#define security_initcall(fn) ___define_initcall(fn,, .security_initcall)
struct obs_kernel_param {
const char *str;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index eeceac3376fc..1d6711c28271 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -45,7 +45,7 @@
* IRQF_PERCPU - Interrupt is per cpu
* IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
- * registered first in an shared interrupt is considered for
+ * registered first in a shared interrupt is considered for
* performance reasons)
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 41f5c086f670..ef61676cfe05 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -27,7 +27,7 @@ struct device;
* Opaque type for a IPMI message user. One of these is needed to
* send and receive messages.
*/
-typedef struct ipmi_user *ipmi_user_t;
+struct ipmi_user;
/*
* Stuff coming from the receive interface comes as one of these.
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 7d5fd38d5282..8c4e2ab696c3 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -28,7 +28,7 @@ struct device;
*/
/* Structure for the low-level drivers. */
-typedef struct ipmi_smi *ipmi_smi_t;
+struct ipmi_smi;
/*
* Messages to/from the lower layer. The smi interface will take one
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 8415bf1a9776..495e834c1367 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -274,7 +274,8 @@ struct ipv6_pinfo {
*/
dontfrag:1,
autoflowlabel:1,
- autoflowlabel_set:1;
+ autoflowlabel_set:1,
+ mc_all:1;
__u8 min_hopcount;
__u8 tclass;
__be32 rcv_flowinfo;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 201de12a9957..c9bffda04a45 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m);
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+ unsigned int *mapped_cpu);
void irq_matrix_reserve(struct irq_matrix *m);
void irq_matrix_remove_reserved(struct irq_matrix *m);
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index 0a83b4379f34..9a1a479a2bf4 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -13,6 +13,12 @@
#include <linux/types.h>
#include <linux/ioport.h>
+#define GICD_INT_DEF_PRI 0xa0
+#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
+ (GICD_INT_DEF_PRI << 16) |\
+ (GICD_INT_DEF_PRI << 8) |\
+ GICD_INT_DEF_PRI)
+
enum gic_type {
GIC_V2,
GIC_V3,
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 8bdbb5f29494..071b4cbdf010 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -357,6 +357,8 @@
#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt)
#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb)
+#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12))
+
#define GITS_BASER_NR_REGS 8
#define GITS_BASER_VALID (1ULL << 63)
@@ -388,6 +390,9 @@
#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
#define GITS_BASER_PHYS_52_to_48(phys) \
(((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
+#define GITS_BASER_ADDR_48_to_52(baser) \
+ (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48)
+
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
@@ -585,8 +590,10 @@ struct rdists {
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
+ bool lpi_enabled;
} __percpu *rdist;
- struct page *prop_page;
+ phys_addr_t prop_table_pa;
+ void *prop_table_va;
u64 flags;
u32 gicd_typer;
bool has_vlpis;
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 6c4aaf04046c..626179077bb0 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -65,11 +65,6 @@
#define GICD_INT_EN_CLR_X32 0xffffffff
#define GICD_INT_EN_SET_SGI 0x0000ffff
#define GICD_INT_EN_CLR_PPI 0xffff0000
-#define GICD_INT_DEF_PRI 0xa0
-#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
- (GICD_INT_DEF_PRI << 16) |\
- (GICD_INT_DEF_PRI << 8) |\
- GICD_INT_DEF_PRI)
#define GICD_IIDR_IMPLEMENTER_SHIFT 0
#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT)
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index dccfa65aee96..068aa46f0d55 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -75,6 +75,7 @@ struct irq_fwspec {
enum irq_domain_bus_token {
DOMAIN_BUS_ANY = 0,
DOMAIN_BUS_WIRED,
+ DOMAIN_BUS_GENERIC_MSI,
DOMAIN_BUS_PCI_MSI,
DOMAIN_BUS_PLATFORM_MSI,
DOMAIN_BUS_NEXUS,
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 1a0b6f17a5d6..5df6a621e464 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -119,6 +119,68 @@ struct static_key {
#ifdef HAVE_JUMP_LABEL
#include <asm/jump_label.h>
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
+
+struct jump_entry {
+ s32 code;
+ s32 target;
+ long key; // key may be far away from the core kernel under KASLR
+};
+
+static inline unsigned long jump_entry_code(const struct jump_entry *entry)
+{
+ return (unsigned long)&entry->code + entry->code;
+}
+
+static inline unsigned long jump_entry_target(const struct jump_entry *entry)
+{
+ return (unsigned long)&entry->target + entry->target;
+}
+
+static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
+{
+ long offset = entry->key & ~3L;
+
+ return (struct static_key *)((unsigned long)&entry->key + offset);
+}
+
+#else
+
+static inline unsigned long jump_entry_code(const struct jump_entry *entry)
+{
+ return entry->code;
+}
+
+static inline unsigned long jump_entry_target(const struct jump_entry *entry)
+{
+ return entry->target;
+}
+
+static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
+{
+ return (struct static_key *)((unsigned long)entry->key & ~3UL);
+}
+
+#endif
+
+static inline bool jump_entry_is_branch(const struct jump_entry *entry)
+{
+ return (unsigned long)entry->key & 1UL;
+}
+
+static inline bool jump_entry_is_init(const struct jump_entry *entry)
+{
+ return (unsigned long)entry->key & 2UL;
+}
+
+static inline void jump_entry_set_init(struct jump_entry *entry)
+{
+ entry->key |= 2;
+}
+
+#endif
#endif
#ifndef __ASSEMBLY__
@@ -151,7 +213,6 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
-extern void jump_label_invalidate_initmem(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -199,8 +260,6 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true;
}
-static inline void jump_label_invalidate_initmem(void) {}
-
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely(static_key_count(key) > 0))
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0205aee44ded..c926698040e0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -733,8 +733,6 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 834683d603f9..7393a316d9fa 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -22,6 +22,7 @@
#include <linux/workqueue.h>
struct device;
+struct led_pattern;
/*
* LED Core
*/
@@ -88,6 +89,10 @@ struct led_classdev {
unsigned long *delay_on,
unsigned long *delay_off);
+ int (*pattern_set)(struct led_classdev *led_cdev,
+ struct led_pattern *pattern, u32 len, int repeat);
+ int (*pattern_clear)(struct led_classdev *led_cdev);
+
struct device *dev;
const struct attribute_group **groups;
@@ -472,4 +477,14 @@ static inline void led_classdev_notify_brightness_hw_changed(
struct led_classdev *led_cdev, enum led_brightness brightness) { }
#endif
+/**
+ * struct led_pattern - pattern interval settings
+ * @delta_t: pattern interval delay, in milliseconds
+ * @brightness: pattern interval brightness
+ */
+struct led_pattern {
+ u32 delta_t;
+ int brightness;
+};
+
#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index e9e0d1c7eaf5..2fdeac1a420d 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -86,8 +86,8 @@ struct nvm_chk_meta;
typedef int (nvm_id_fn)(struct nvm_dev *);
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
-typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *,
- sector_t, int);
+typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
+ struct nvm_chk_meta *);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
@@ -305,6 +305,8 @@ struct nvm_rq {
u64 ppa_status; /* ppa media status */
int error;
+ int is_seq; /* Sequential hint flag. 1.2 only */
+
void *private;
};
@@ -318,6 +320,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
return rqdata + 1;
}
+static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
+{
+ return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+}
+
enum {
NVM_BLK_ST_FREE = 0x1, /* Free block */
NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
@@ -485,6 +492,144 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
return l;
}
+static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
+ struct ppa_addr p)
+{
+ struct nvm_geo *geo = &dev->geo;
+ u64 caddr;
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
+
+ caddr = (u64)p.g.pg << ppaf->pg_offset;
+ caddr |= (u64)p.g.pl << ppaf->pln_offset;
+ caddr |= (u64)p.g.sec << ppaf->sec_offset;
+ } else {
+ caddr = p.m.sec;
+ }
+
+ return caddr;
+}
+
+static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
+ void *addrf, u32 ppa32)
+{
+ struct ppa_addr ppa64;
+
+ ppa64.ppa = 0;
+
+ if (ppa32 == -1) {
+ ppa64.ppa = ADDR_EMPTY;
+ } else if (ppa32 & (1U << 31)) {
+ ppa64.c.line = ppa32 & ((~0U) >> 1);
+ ppa64.c.is_cached = 1;
+ } else {
+ struct nvm_geo *geo = &dev->geo;
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = addrf;
+
+ ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
+ ppaf->ch_offset;
+ ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
+ ppaf->lun_offset;
+ ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
+ ppaf->blk_offset;
+ ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
+ ppaf->pg_offset;
+ ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
+ ppaf->pln_offset;
+ ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
+ ppaf->sec_offset;
+ } else {
+ struct nvm_addrf *lbaf = addrf;
+
+ ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
+ lbaf->ch_offset;
+ ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
+ lbaf->lun_offset;
+ ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
+ lbaf->chk_offset;
+ ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
+ lbaf->sec_offset;
+ }
+ }
+
+ return ppa64;
+}
+
+static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
+ void *addrf, struct ppa_addr ppa64)
+{
+ u32 ppa32 = 0;
+
+ if (ppa64.ppa == ADDR_EMPTY) {
+ ppa32 = ~0U;
+ } else if (ppa64.c.is_cached) {
+ ppa32 |= ppa64.c.line;
+ ppa32 |= 1U << 31;
+ } else {
+ struct nvm_geo *geo = &dev->geo;
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = addrf;
+
+ ppa32 |= ppa64.g.ch << ppaf->ch_offset;
+ ppa32 |= ppa64.g.lun << ppaf->lun_offset;
+ ppa32 |= ppa64.g.blk << ppaf->blk_offset;
+ ppa32 |= ppa64.g.pg << ppaf->pg_offset;
+ ppa32 |= ppa64.g.pl << ppaf->pln_offset;
+ ppa32 |= ppa64.g.sec << ppaf->sec_offset;
+ } else {
+ struct nvm_addrf *lbaf = addrf;
+
+ ppa32 |= ppa64.m.grp << lbaf->ch_offset;
+ ppa32 |= ppa64.m.pu << lbaf->lun_offset;
+ ppa32 |= ppa64.m.chk << lbaf->chk_offset;
+ ppa32 |= ppa64.m.sec << lbaf->sec_offset;
+ }
+ }
+
+ return ppa32;
+}
+
+static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
+ struct ppa_addr *ppa)
+{
+ struct nvm_geo *geo = &dev->geo;
+ int last = 0;
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ int sec = ppa->g.sec;
+
+ sec++;
+ if (sec == geo->ws_min) {
+ int pg = ppa->g.pg;
+
+ sec = 0;
+ pg++;
+ if (pg == geo->num_pg) {
+ int pl = ppa->g.pl;
+
+ pg = 0;
+ pl++;
+ if (pl == geo->num_pln)
+ last = 1;
+
+ ppa->g.pl = pl;
+ }
+ ppa->g.pg = pg;
+ }
+ ppa->g.sec = sec;
+ } else {
+ ppa->m.sec++;
+ if (ppa->m.sec == geo->clba)
+ last = 1;
+ }
+
+ return last;
+}
+
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
@@ -493,9 +638,15 @@ typedef void (nvm_tgt_exit_fn)(void *, bool);
typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
+enum {
+ NVM_TGT_F_DEV_L2P = 0,
+ NVM_TGT_F_HOST_L2P = 1 << 0,
+};
+
struct nvm_tgt_type {
const char *name;
unsigned int version[3];
+ int flags;
/* target entry points */
nvm_tgt_make_rq_fn *make_rq;
@@ -524,18 +675,13 @@ extern struct nvm_dev *nvm_alloc_dev(int);
extern int nvm_register(struct nvm_dev *);
extern void nvm_unregister(struct nvm_dev *);
-
-extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev,
- struct nvm_chk_meta *meta, struct ppa_addr ppa,
- int nchks);
-
-extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
+extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
+ int, struct nvm_chk_meta *);
+extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
int, int);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
extern void nvm_end_io(struct nvm_rq *);
-extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
-extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
#else /* CONFIG_NVM */
struct nvm_dev_ops;
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
new file mode 100644
index 000000000000..22443d7fb5cd
--- /dev/null
+++ b/include/linux/linkmode.h
@@ -0,0 +1,76 @@
+#ifndef __LINKMODE_H
+#define __LINKMODE_H
+
+#include <linux/bitmap.h>
+#include <linux/ethtool.h>
+#include <uapi/linux/ethtool.h>
+
+static inline void linkmode_zero(unsigned long *dst)
+{
+ bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
+{
+ bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_and(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_or(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline bool linkmode_empty(const unsigned long *src)
+{
+ return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
+{
+ __set_bit(nr, addr);
+}
+
+static inline void linkmode_set_bit_array(const int *array, int array_size,
+ unsigned long *addr)
+{
+ int i;
+
+ for (i = 0; i < array_size; i++)
+ linkmode_set_bit(array[i], addr);
+}
+
+static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
+{
+ __clear_bit(nr, addr);
+}
+
+static inline void linkmode_change_bit(int nr, volatile unsigned long *addr)
+{
+ __change_bit(nr, addr);
+}
+
+static inline int linkmode_test_bit(int nr, volatile unsigned long *addr)
+{
+ return test_bit(nr, addr);
+}
+
+static inline int linkmode_equal(const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+#endif /* __LINKMODE_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b0d0b51c4d85..1fd82ff99c65 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -99,13 +99,8 @@ struct lock_class {
*/
unsigned int version;
- /*
- * Statistics counter:
- */
- unsigned long ops;
-
- const char *name;
int name_version;
+ const char *name;
#ifdef CONFIG_LOCK_STAT
unsigned long contention_point[LOCKSTAT_POINTS];
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 97a020c616ad..aaeb7fa24dc4 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -672,7 +672,7 @@
* Return 0 if permission is granted.
* @task_kill:
* Check permission before sending signal @sig to @p. @info can be NULL,
- * the constant 1, or a pointer to a siginfo structure. If @info is 1 or
+ * the constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or
* SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
* from the kernel and should typically be permitted.
* SIGIO signals are handled separately by the send_sigiotask hook in
@@ -1606,7 +1606,7 @@ union security_list_options {
int (*task_setscheduler)(struct task_struct *p);
int (*task_getscheduler)(struct task_struct *p);
int (*task_movememory)(struct task_struct *p);
- int (*task_kill)(struct task_struct *p, struct siginfo *info,
+ int (*task_kill)(struct task_struct *p, struct kernel_siginfo *info,
int sig, const struct cred *cred);
int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
@@ -2039,6 +2039,18 @@ extern char *lsm_names;
extern void security_add_hooks(struct security_hook_list *hooks, int count,
char *lsm);
+struct lsm_info {
+ const char *name; /* Required. */
+ int (*init)(void); /* Required. */
+};
+
+extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
+
+#define DEFINE_LSM(lsm) \
+ static struct lsm_info __lsm_##lsm \
+ __used __section(.lsm_info.init) \
+ __aligned(sizeof(unsigned long))
+
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
/*
* Assuring the safety of deleting a security module is up to
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index f91f9e763557..0ac69ddf5fc4 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -53,11 +53,16 @@ struct vmem_altmap {
* wakeup event whenever a page is unpinned and becomes idle. This
* wakeup is used to coordinate physical address space management (ex:
* fs truncate/hole punch) vs pinned pages (ex: device dma).
+ *
+ * MEMORY_DEVICE_PCI_P2PDMA:
+ * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
+ * transactions.
*/
enum memory_type {
MEMORY_DEVICE_PRIVATE = 1,
MEMORY_DEVICE_PUBLIC,
MEMORY_DEVICE_FS_DAX,
+ MEMORY_DEVICE_PCI_P2PDMA,
};
/*
@@ -120,6 +125,7 @@ struct dev_pagemap {
struct device *dev;
void *data;
enum memory_type type;
+ u64 pci_p2pdma_bus_offset;
};
#ifdef CONFIG_ZONE_DEVICE
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 6e1ab9bead28..5fd0e429f472 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -2132,6 +2132,7 @@ struct ec_response_get_next_event_v1 {
/* Switches */
#define EC_MKBP_LID_OPEN 0
#define EC_MKBP_TABLET_MODE 1
+#define EC_MKBP_BASE_ATTACHED 2
/*****************************************************************************/
/* Temperature sensor commands */
@@ -3102,6 +3103,16 @@ struct ec_params_usb_pd_info_request {
uint8_t port;
} __packed;
+/*
+ * This command will return the number of USB PD charge port + the number
+ * of dedicated port present.
+ * EC_CMD_USB_PD_PORTS does NOT include the dedicated ports
+ */
+#define EC_CMD_CHARGE_PORT_COUNT 0x0105
+struct ec_response_charge_port_count {
+ uint8_t port_count;
+} __packed;
+
/* Read USB-PD Device discovery info */
#define EC_CMD_USB_PD_DISCOVERY 0x0113
struct ec_params_usb_pd_discovery_entry {
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
index 8a125701ef7b..50bed4f89c1a 100644
--- a/include/linux/mfd/da9063/pdata.h
+++ b/include/linux/mfd/da9063/pdata.h
@@ -21,7 +21,7 @@
/*
* Regulator configuration
*/
-/* DA9063 regulator IDs */
+/* DA9063 and DA9063L regulator IDs */
enum {
/* BUCKs */
DA9063_ID_BCORE1,
@@ -37,18 +37,20 @@ enum {
DA9063_ID_BMEM_BIO_MERGED,
/* When two BUCKs are merged, they cannot be reused separately */
- /* LDOs */
+ /* LDOs on both DA9063 and DA9063L */
+ DA9063_ID_LDO3,
+ DA9063_ID_LDO7,
+ DA9063_ID_LDO8,
+ DA9063_ID_LDO9,
+ DA9063_ID_LDO11,
+
+ /* DA9063-only LDOs */
DA9063_ID_LDO1,
DA9063_ID_LDO2,
- DA9063_ID_LDO3,
DA9063_ID_LDO4,
DA9063_ID_LDO5,
DA9063_ID_LDO6,
- DA9063_ID_LDO7,
- DA9063_ID_LDO8,
- DA9063_ID_LDO9,
DA9063_ID_LDO10,
- DA9063_ID_LDO11,
};
/* Regulators platform data */
diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h
new file mode 100644
index 000000000000..ab16ad283def
--- /dev/null
+++ b/include/linux/mfd/ingenic-tcu.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for the Ingenic JZ47xx TCU driver
+ */
+#ifndef __LINUX_MFD_INGENIC_TCU_H_
+#define __LINUX_MFD_INGENIC_TCU_H_
+
+#include <linux/bitops.h>
+
+#define TCU_REG_WDT_TDR 0x00
+#define TCU_REG_WDT_TCER 0x04
+#define TCU_REG_WDT_TCNT 0x08
+#define TCU_REG_WDT_TCSR 0x0c
+#define TCU_REG_TER 0x10
+#define TCU_REG_TESR 0x14
+#define TCU_REG_TECR 0x18
+#define TCU_REG_TSR 0x1c
+#define TCU_REG_TFR 0x20
+#define TCU_REG_TFSR 0x24
+#define TCU_REG_TFCR 0x28
+#define TCU_REG_TSSR 0x2c
+#define TCU_REG_TMR 0x30
+#define TCU_REG_TMSR 0x34
+#define TCU_REG_TMCR 0x38
+#define TCU_REG_TSCR 0x3c
+#define TCU_REG_TDFR0 0x40
+#define TCU_REG_TDHR0 0x44
+#define TCU_REG_TCNT0 0x48
+#define TCU_REG_TCSR0 0x4c
+#define TCU_REG_OST_DR 0xe0
+#define TCU_REG_OST_CNTL 0xe4
+#define TCU_REG_OST_CNTH 0xe8
+#define TCU_REG_OST_TCSR 0xec
+#define TCU_REG_TSTR 0xf0
+#define TCU_REG_TSTSR 0xf4
+#define TCU_REG_TSTCR 0xf8
+#define TCU_REG_OST_CNTHBUF 0xfc
+
+#define TCU_TCSR_RESERVED_BITS 0x3f
+#define TCU_TCSR_PARENT_CLOCK_MASK 0x07
+#define TCU_TCSR_PRESCALE_LSB 3
+#define TCU_TCSR_PRESCALE_MASK 0x38
+
+#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */
+#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */
+#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */
+
+#define TCU_WDT_TCER_TCEN BIT(0) /* Watchdog timer enable */
+
+#define TCU_CHANNEL_STRIDE 0x10
+#define TCU_REG_TDFRc(c) (TCU_REG_TDFR0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TDHRc(c) (TCU_REG_TDHR0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TCNTc(c) (TCU_REG_TCNT0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TCSRc(c) (TCU_REG_TCSR0 + ((c) * TCU_CHANNEL_STRIDE))
+
+#endif /* __LINUX_MFD_INGENIC_TCU_H_ */
diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h
index 439a7a617bc9..317e8608cf41 100644
--- a/include/linux/mfd/intel_msic.h
+++ b/include/linux/mfd/intel_msic.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * include/linux/mfd/intel_msic.h - Core interface for Intel MSIC
+ * Core interface for Intel MSIC
*
* Copyright (C) 2011, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_INTEL_MSIC_H__
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index 5aacdb017a9f..ed1dfba5e5f9 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * intel_soc_pmic.h - Intel SoC PMIC Driver
+ * Intel SoC PMIC Driver
*
* Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
@@ -25,6 +17,7 @@ struct intel_soc_pmic {
int irq;
struct regmap *regmap;
struct regmap_irq_chip_data *irq_chip_data;
+ struct regmap_irq_chip_data *irq_chip_data_pwrbtn;
struct regmap_irq_chip_data *irq_chip_data_tmu;
struct regmap_irq_chip_data *irq_chip_data_bcu;
struct regmap_irq_chip_data *irq_chip_data_adc;
diff --git a/include/linux/mfd/intel_soc_pmic_bxtwc.h b/include/linux/mfd/intel_soc_pmic_bxtwc.h
index 0c351bc85d2d..9be566cc58c6 100644
--- a/include/linux/mfd/intel_soc_pmic_bxtwc.h
+++ b/include/linux/mfd/intel_soc_pmic_bxtwc.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header file for Intel Broxton Whiskey Cove PMIC
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __INTEL_BXTWC_H__
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
index c332681848ef..fe69c0f4398f 100644
--- a/include/linux/mfd/madera/core.h
+++ b/include/linux/mfd/madera/core.h
@@ -148,6 +148,7 @@ struct snd_soc_dapm_context;
* @internal_dcvdd: true if DCVDD is supplied from the internal LDO1
* @pdata: our pdata
* @irq_dev: the irqchip child driver device
+ * @irq_data: pointer to irqchip data for the child irqchip driver
* @irq: host irq number from SPI or I2C configuration
* @out_clamp: indicates output clamp state for each analogue output
* @out_shorted: indicates short circuit state for each analogue output
@@ -175,6 +176,7 @@ struct madera {
struct madera_pdata pdata;
struct device *irq_dev;
+ struct regmap_irq_chip_data *irq_data;
int irq;
unsigned int num_micbias;
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
index 0b311f39c8f4..8dc852402dbb 100644
--- a/include/linux/mfd/madera/pdata.h
+++ b/include/linux/mfd/madera/pdata.h
@@ -24,7 +24,6 @@
struct gpio_desc;
struct pinctrl_map;
-struct madera_irqchip_pdata;
struct madera_codec_pdata;
/**
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
index df75234f979d..a21374f8ad26 100644
--- a/include/linux/mfd/max14577-private.h
+++ b/include/linux/mfd/max14577-private.h
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip
*
* Copyright (C) 2014 Samsung Electrnoics
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MAX14577_PRIVATE_H__
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
index d81b52bb8bee..8b3ef891ba42 100644
--- a/include/linux/mfd/max14577.h
+++ b/include/linux/mfd/max14577.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max14577.h - Driver for the Maxim 14577/77836
*
@@ -5,16 +6,6 @@
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* This driver is based on max8997.h
*
* MAX14577 has MUIC, Charger devices.
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 643dae777b43..833e578e051e 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77686-private.h - Voltage regulator driver for the Maxim 77686/802
*
* Copyright (C) 2012 Samsung Electrnoics
* Chiwoong Byun <woong.byun@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX77686_PRIV_H
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index d4b72d519115..d0fb510875e6 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77686.h - Driver for the Maxim 77686/802
*
* Copyright (C) 2012 Samsung Electrnoics
* Chiwoong Byun <woong.byun@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8997.h
*
* MAX77686 has PMIC, RTC devices.
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
index 095b121aa725..a5bce099f1ed 100644
--- a/include/linux/mfd/max77693-common.h
+++ b/include/linux/mfd/max77693-common.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Common data shared between Maxim 77693 and 77843 drivers
*
* Copyright (C) 2015 Samsung Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_MFD_MAX77693_COMMON_H
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 3c7a63b98ad6..e798c81aec31 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77693-private.h - Voltage regulator driver for the Maxim 77693
*
@@ -5,20 +6,6 @@
* SangYoung Son <hello.son@samsung.com>
*
* This program is not provided / owned by Maxim Integrated Products.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX77693_PRIV_H
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index d450f687301b..c67c16ba8649 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77693.h - Driver for the Maxim 77693
*
@@ -6,20 +7,6 @@
*
* This program is not provided / owned by Maxim Integrated Products.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8997.h
*
* MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices.
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index b8908bf8d315..0bc7454c4dbe 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Common variables for the Maxim MAX77843 driver
*
* Copyright (C) 2015 Samsung Electronics
* Author: Jaewon Kim <jaewon02.kim@samsung.com>
* Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __MAX77843_PRIVATE_H_
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 78c76cd4d37b..a10cd6945232 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8997-private.h - Voltage regulator driver for the Maxim 8997
*
* Copyright (C) 2010 Samsung Electrnoics
* MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8997_PRIV_H
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index cf815577bd68..e955e2f0a2cc 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8997.h - Driver for the Maxim 8997/8966
*
* Copyright (C) 2009-2010 Samsung Electrnoics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8998.h
*
* MAX8997 has PMIC, MUIC, HAPTIC, RTC, FLASH, and Fuel Gauge devices.
@@ -178,7 +165,6 @@ struct max8997_led_platform_data {
struct max8997_platform_data {
/* IRQ */
int ono;
- int wakeup;
/* ---- PMIC ---- */
struct max8997_regulator_data *regulators;
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index d68ada502ff3..6deb5f577602 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8998-private.h - Voltage regulator driver for the Maxim 8998
*
* Copyright (C) 2009-2010 Samsung Electrnoics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8998_PRIV_H
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index e3956a654cbc..061af220dcd3 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8998.h - Voltage regulator driver for the Maxim 8998
*
* Copyright (C) 2009-2010 Samsung Electrnoics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8998_H
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 54a3cd808f9e..2ad9bdc0a5ec 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -249,6 +249,7 @@ struct mc13xxx_platform_data {
#define MC13XXX_ADC0_TSMOD0 (1 << 12)
#define MC13XXX_ADC0_TSMOD1 (1 << 13)
#define MC13XXX_ADC0_TSMOD2 (1 << 14)
+#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15)
#define MC13XXX_ADC0_ADINC1 (1 << 16)
#define MC13XXX_ADC0_ADINC2 (1 << 17)
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
index a528747f8aed..fd194bfc836f 100644
--- a/include/linux/mfd/rohm-bd718x7.h
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -1,112 +1,127 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Copyright (C) 2018 ROHM Semiconductors */
-#ifndef __LINUX_MFD_BD71837_H__
-#define __LINUX_MFD_BD71837_H__
+#ifndef __LINUX_MFD_BD718XX_H__
+#define __LINUX_MFD_BD718XX_H__
#include <linux/regmap.h>
enum {
- BD71837_BUCK1 = 0,
- BD71837_BUCK2,
- BD71837_BUCK3,
- BD71837_BUCK4,
- BD71837_BUCK5,
- BD71837_BUCK6,
- BD71837_BUCK7,
- BD71837_BUCK8,
- BD71837_LDO1,
- BD71837_LDO2,
- BD71837_LDO3,
- BD71837_LDO4,
- BD71837_LDO5,
- BD71837_LDO6,
- BD71837_LDO7,
- BD71837_REGULATOR_CNT,
+ BD718XX_TYPE_BD71837 = 0,
+ BD718XX_TYPE_BD71847,
+ BD718XX_TYPE_AMOUNT
};
-#define BD71837_BUCK1_VOLTAGE_NUM 0x40
-#define BD71837_BUCK2_VOLTAGE_NUM 0x40
-#define BD71837_BUCK3_VOLTAGE_NUM 0x40
-#define BD71837_BUCK4_VOLTAGE_NUM 0x40
+enum {
+ BD718XX_BUCK1 = 0,
+ BD718XX_BUCK2,
+ BD718XX_BUCK3,
+ BD718XX_BUCK4,
+ BD718XX_BUCK5,
+ BD718XX_BUCK6,
+ BD718XX_BUCK7,
+ BD718XX_BUCK8,
+ BD718XX_LDO1,
+ BD718XX_LDO2,
+ BD718XX_LDO3,
+ BD718XX_LDO4,
+ BD718XX_LDO5,
+ BD718XX_LDO6,
+ BD718XX_LDO7,
+ BD718XX_REGULATOR_AMOUNT,
+};
+
+/* Common voltage configurations */
+#define BD718XX_DVS_BUCK_VOLTAGE_NUM 0x3D
+#define BD718XX_4TH_NODVS_BUCK_VOLTAGE_NUM 0x3D
+
+#define BD718XX_LDO1_VOLTAGE_NUM 0x08
+#define BD718XX_LDO2_VOLTAGE_NUM 0x02
+#define BD718XX_LDO3_VOLTAGE_NUM 0x10
+#define BD718XX_LDO4_VOLTAGE_NUM 0x0A
+#define BD718XX_LDO6_VOLTAGE_NUM 0x0A
-#define BD71837_BUCK5_VOLTAGE_NUM 0x08
+/* BD71837 specific voltage configurations */
+#define BD71837_BUCK5_VOLTAGE_NUM 0x10
#define BD71837_BUCK6_VOLTAGE_NUM 0x04
#define BD71837_BUCK7_VOLTAGE_NUM 0x08
-#define BD71837_BUCK8_VOLTAGE_NUM 0x40
-
-#define BD71837_LDO1_VOLTAGE_NUM 0x04
-#define BD71837_LDO2_VOLTAGE_NUM 0x02
-#define BD71837_LDO3_VOLTAGE_NUM 0x10
-#define BD71837_LDO4_VOLTAGE_NUM 0x10
#define BD71837_LDO5_VOLTAGE_NUM 0x10
-#define BD71837_LDO6_VOLTAGE_NUM 0x10
#define BD71837_LDO7_VOLTAGE_NUM 0x10
+/* BD71847 specific voltage configurations */
+#define BD71847_BUCK3_VOLTAGE_NUM 0x18
+#define BD71847_BUCK4_VOLTAGE_NUM 0x08
+#define BD71847_LDO5_VOLTAGE_NUM 0x20
+
+/* Registers specific to BD71837 */
+enum {
+ BD71837_REG_BUCK3_CTRL = 0x07,
+ BD71837_REG_BUCK4_CTRL = 0x08,
+ BD71837_REG_BUCK3_VOLT_RUN = 0x12,
+ BD71837_REG_BUCK4_VOLT_RUN = 0x13,
+ BD71837_REG_LDO7_VOLT = 0x1E,
+};
+
+/* Registers common for BD71837 and BD71847 */
enum {
- BD71837_REG_REV = 0x00,
- BD71837_REG_SWRESET = 0x01,
- BD71837_REG_I2C_DEV = 0x02,
- BD71837_REG_PWRCTRL0 = 0x03,
- BD71837_REG_PWRCTRL1 = 0x04,
- BD71837_REG_BUCK1_CTRL = 0x05,
- BD71837_REG_BUCK2_CTRL = 0x06,
- BD71837_REG_BUCK3_CTRL = 0x07,
- BD71837_REG_BUCK4_CTRL = 0x08,
- BD71837_REG_BUCK5_CTRL = 0x09,
- BD71837_REG_BUCK6_CTRL = 0x0A,
- BD71837_REG_BUCK7_CTRL = 0x0B,
- BD71837_REG_BUCK8_CTRL = 0x0C,
- BD71837_REG_BUCK1_VOLT_RUN = 0x0D,
- BD71837_REG_BUCK1_VOLT_IDLE = 0x0E,
- BD71837_REG_BUCK1_VOLT_SUSP = 0x0F,
- BD71837_REG_BUCK2_VOLT_RUN = 0x10,
- BD71837_REG_BUCK2_VOLT_IDLE = 0x11,
- BD71837_REG_BUCK3_VOLT_RUN = 0x12,
- BD71837_REG_BUCK4_VOLT_RUN = 0x13,
- BD71837_REG_BUCK5_VOLT = 0x14,
- BD71837_REG_BUCK6_VOLT = 0x15,
- BD71837_REG_BUCK7_VOLT = 0x16,
- BD71837_REG_BUCK8_VOLT = 0x17,
- BD71837_REG_LDO1_VOLT = 0x18,
- BD71837_REG_LDO2_VOLT = 0x19,
- BD71837_REG_LDO3_VOLT = 0x1A,
- BD71837_REG_LDO4_VOLT = 0x1B,
- BD71837_REG_LDO5_VOLT = 0x1C,
- BD71837_REG_LDO6_VOLT = 0x1D,
- BD71837_REG_LDO7_VOLT = 0x1E,
- BD71837_REG_TRANS_COND0 = 0x1F,
- BD71837_REG_TRANS_COND1 = 0x20,
- BD71837_REG_VRFAULTEN = 0x21,
- BD71837_REG_MVRFLTMASK0 = 0x22,
- BD71837_REG_MVRFLTMASK1 = 0x23,
- BD71837_REG_MVRFLTMASK2 = 0x24,
- BD71837_REG_RCVCFG = 0x25,
- BD71837_REG_RCVNUM = 0x26,
- BD71837_REG_PWRONCONFIG0 = 0x27,
- BD71837_REG_PWRONCONFIG1 = 0x28,
- BD71837_REG_RESETSRC = 0x29,
- BD71837_REG_MIRQ = 0x2A,
- BD71837_REG_IRQ = 0x2B,
- BD71837_REG_IN_MON = 0x2C,
- BD71837_REG_POW_STATE = 0x2D,
- BD71837_REG_OUT32K = 0x2E,
- BD71837_REG_REGLOCK = 0x2F,
- BD71837_REG_OTPVER = 0xFF,
- BD71837_MAX_REGISTER = 0x100,
+ BD718XX_REG_REV = 0x00,
+ BD718XX_REG_SWRESET = 0x01,
+ BD718XX_REG_I2C_DEV = 0x02,
+ BD718XX_REG_PWRCTRL0 = 0x03,
+ BD718XX_REG_PWRCTRL1 = 0x04,
+ BD718XX_REG_BUCK1_CTRL = 0x05,
+ BD718XX_REG_BUCK2_CTRL = 0x06,
+ BD718XX_REG_1ST_NODVS_BUCK_CTRL = 0x09,
+ BD718XX_REG_2ND_NODVS_BUCK_CTRL = 0x0A,
+ BD718XX_REG_3RD_NODVS_BUCK_CTRL = 0x0B,
+ BD718XX_REG_4TH_NODVS_BUCK_CTRL = 0x0C,
+ BD718XX_REG_BUCK1_VOLT_RUN = 0x0D,
+ BD718XX_REG_BUCK1_VOLT_IDLE = 0x0E,
+ BD718XX_REG_BUCK1_VOLT_SUSP = 0x0F,
+ BD718XX_REG_BUCK2_VOLT_RUN = 0x10,
+ BD718XX_REG_BUCK2_VOLT_IDLE = 0x11,
+ BD718XX_REG_1ST_NODVS_BUCK_VOLT = 0x14,
+ BD718XX_REG_2ND_NODVS_BUCK_VOLT = 0x15,
+ BD718XX_REG_3RD_NODVS_BUCK_VOLT = 0x16,
+ BD718XX_REG_4TH_NODVS_BUCK_VOLT = 0x17,
+ BD718XX_REG_LDO1_VOLT = 0x18,
+ BD718XX_REG_LDO2_VOLT = 0x19,
+ BD718XX_REG_LDO3_VOLT = 0x1A,
+ BD718XX_REG_LDO4_VOLT = 0x1B,
+ BD718XX_REG_LDO5_VOLT = 0x1C,
+ BD718XX_REG_LDO6_VOLT = 0x1D,
+ BD718XX_REG_TRANS_COND0 = 0x1F,
+ BD718XX_REG_TRANS_COND1 = 0x20,
+ BD718XX_REG_VRFAULTEN = 0x21,
+ BD718XX_REG_MVRFLTMASK0 = 0x22,
+ BD718XX_REG_MVRFLTMASK1 = 0x23,
+ BD718XX_REG_MVRFLTMASK2 = 0x24,
+ BD718XX_REG_RCVCFG = 0x25,
+ BD718XX_REG_RCVNUM = 0x26,
+ BD718XX_REG_PWRONCONFIG0 = 0x27,
+ BD718XX_REG_PWRONCONFIG1 = 0x28,
+ BD718XX_REG_RESETSRC = 0x29,
+ BD718XX_REG_MIRQ = 0x2A,
+ BD718XX_REG_IRQ = 0x2B,
+ BD718XX_REG_IN_MON = 0x2C,
+ BD718XX_REG_POW_STATE = 0x2D,
+ BD718XX_REG_OUT32K = 0x2E,
+ BD718XX_REG_REGLOCK = 0x2F,
+ BD718XX_REG_OTPVER = 0xFF,
+ BD718XX_MAX_REGISTER = 0x100,
};
#define REGLOCK_PWRSEQ 0x1
#define REGLOCK_VREG 0x10
/* Generic BUCK control masks */
-#define BD71837_BUCK_SEL 0x02
-#define BD71837_BUCK_EN 0x01
-#define BD71837_BUCK_RUN_ON 0x04
+#define BD718XX_BUCK_SEL 0x02
+#define BD718XX_BUCK_EN 0x01
+#define BD718XX_BUCK_RUN_ON 0x04
/* Generic LDO masks */
-#define BD71837_LDO_SEL 0x80
-#define BD71837_LDO_EN 0x40
+#define BD718XX_LDO_SEL 0x80
+#define BD718XX_LDO_EN 0x40
/* BD71837 BUCK ramp rate CTRL reg bits */
#define BUCK_RAMPRATE_MASK 0xC0
@@ -115,51 +130,64 @@ enum {
#define BUCK_RAMPRATE_2P50MV 0x2
#define BUCK_RAMPRATE_1P25MV 0x3
-/* BD71837_REG_BUCK1_VOLT_RUN bits */
-#define BUCK1_RUN_MASK 0x3F
-#define BUCK1_RUN_DEFAULT 0x14
-
-/* BD71837_REG_BUCK1_VOLT_SUSP bits */
-#define BUCK1_SUSP_MASK 0x3F
-#define BUCK1_SUSP_DEFAULT 0x14
-
-/* BD71837_REG_BUCK1_VOLT_IDLE bits */
-#define BUCK1_IDLE_MASK 0x3F
-#define BUCK1_IDLE_DEFAULT 0x14
-
-/* BD71837_REG_BUCK2_VOLT_RUN bits */
-#define BUCK2_RUN_MASK 0x3F
-#define BUCK2_RUN_DEFAULT 0x1E
-
-/* BD71837_REG_BUCK2_VOLT_IDLE bits */
-#define BUCK2_IDLE_MASK 0x3F
-#define BUCK2_IDLE_DEFAULT 0x14
-
-/* BD71837_REG_BUCK3_VOLT_RUN bits */
-#define BUCK3_RUN_MASK 0x3F
-#define BUCK3_RUN_DEFAULT 0x1E
-
-/* BD71837_REG_BUCK4_VOLT_RUN bits */
-#define BUCK4_RUN_MASK 0x3F
-#define BUCK4_RUN_DEFAULT 0x1E
-
-/* BD71837_REG_BUCK5_VOLT bits */
-#define BUCK5_MASK 0x07
-#define BUCK5_DEFAULT 0x02
-
-/* BD71837_REG_BUCK6_VOLT bits */
-#define BUCK6_MASK 0x03
-#define BUCK6_DEFAULT 0x03
-
-/* BD71837_REG_BUCK7_VOLT bits */
-#define BUCK7_MASK 0x07
-#define BUCK7_DEFAULT 0x03
-
-/* BD71837_REG_BUCK8_VOLT bits */
-#define BUCK8_MASK 0x3F
-#define BUCK8_DEFAULT 0x1E
-
-/* BD71837_REG_IRQ bits */
+#define DVS_BUCK_RUN_MASK 0x3F
+#define DVS_BUCK_SUSP_MASK 0x3F
+#define DVS_BUCK_IDLE_MASK 0x3F
+
+#define BD718XX_1ST_NODVS_BUCK_MASK 0x07
+#define BD718XX_3RD_NODVS_BUCK_MASK 0x07
+#define BD718XX_4TH_NODVS_BUCK_MASK 0x3F
+
+#define BD71847_BUCK3_MASK 0x07
+#define BD71847_BUCK3_RANGE_MASK 0xC0
+#define BD71847_BUCK4_MASK 0x03
+#define BD71847_BUCK4_RANGE_MASK 0x40
+
+#define BD71837_BUCK5_MASK 0x07
+#define BD71837_BUCK5_RANGE_MASK 0x80
+#define BD71837_BUCK6_MASK 0x03
+
+#define BD718XX_LDO1_MASK 0x03
+#define BD718XX_LDO1_RANGE_MASK 0x20
+#define BD718XX_LDO2_MASK 0x20
+#define BD718XX_LDO3_MASK 0x0F
+#define BD718XX_LDO4_MASK 0x0F
+#define BD718XX_LDO6_MASK 0x0F
+
+#define BD71837_LDO5_MASK 0x0F
+#define BD71847_LDO5_MASK 0x0F
+#define BD71847_LDO5_RANGE_MASK 0x20
+
+#define BD71837_LDO7_MASK 0x0F
+
+/* BD718XX Voltage monitoring masks */
+#define BD718XX_BUCK1_VRMON80 0x1
+#define BD718XX_BUCK1_VRMON130 0x2
+#define BD718XX_BUCK2_VRMON80 0x4
+#define BD718XX_BUCK2_VRMON130 0x8
+#define BD718XX_1ST_NODVS_BUCK_VRMON80 0x1
+#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2
+#define BD718XX_2ND_NODVS_BUCK_VRMON80 0x4
+#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8
+#define BD718XX_3RD_NODVS_BUCK_VRMON80 0x10
+#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20
+#define BD718XX_4TH_NODVS_BUCK_VRMON80 0x40
+#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80
+#define BD718XX_LDO1_VRMON80 0x1
+#define BD718XX_LDO2_VRMON80 0x2
+#define BD718XX_LDO3_VRMON80 0x4
+#define BD718XX_LDO4_VRMON80 0x8
+#define BD718XX_LDO5_VRMON80 0x10
+#define BD718XX_LDO6_VRMON80 0x20
+
+/* BD71837 specific voltage monitoring masks */
+#define BD71837_BUCK3_VRMON80 0x10
+#define BD71837_BUCK3_VRMON130 0x20
+#define BD71837_BUCK4_VRMON80 0x40
+#define BD71837_BUCK4_VRMON130 0x80
+#define BD71837_LDO7_VRMON80 0x40
+
+/* BD718XX_REG_IRQ bits */
#define IRQ_SWRST 0x40
#define IRQ_PWRON_S 0x20
#define IRQ_PWRON_L 0x10
@@ -168,52 +196,31 @@ enum {
#define IRQ_ON_REQ 0x02
#define IRQ_STBY_REQ 0x01
-/* BD71837_REG_OUT32K bits */
-#define BD71837_OUT32K_EN 0x01
+/* BD718XX_REG_OUT32K bits */
+#define BD718XX_OUT32K_EN 0x01
-/* BD71837 gated clock rate */
-#define BD71837_CLK_RATE 32768
+/* BD7183XX gated clock rate */
+#define BD718XX_CLK_RATE 32768
-/* ROHM BD71837 irqs */
+/* ROHM BD718XX irqs */
enum {
- BD71837_INT_STBY_REQ,
- BD71837_INT_ON_REQ,
- BD71837_INT_WDOG,
- BD71837_INT_PWRBTN,
- BD71837_INT_PWRBTN_L,
- BD71837_INT_PWRBTN_S,
- BD71837_INT_SWRST
+ BD718XX_INT_STBY_REQ,
+ BD718XX_INT_ON_REQ,
+ BD718XX_INT_WDOG,
+ BD718XX_INT_PWRBTN,
+ BD718XX_INT_PWRBTN_L,
+ BD718XX_INT_PWRBTN_S,
+ BD718XX_INT_SWRST
};
-/* ROHM BD71837 interrupt masks */
-#define BD71837_INT_SWRST_MASK 0x40
-#define BD71837_INT_PWRBTN_S_MASK 0x20
-#define BD71837_INT_PWRBTN_L_MASK 0x10
-#define BD71837_INT_PWRBTN_MASK 0x8
-#define BD71837_INT_WDOG_MASK 0x4
-#define BD71837_INT_ON_REQ_MASK 0x2
-#define BD71837_INT_STBY_REQ_MASK 0x1
-
-/* BD71837_REG_LDO1_VOLT bits */
-#define LDO1_MASK 0x03
-
-/* BD71837_REG_LDO1_VOLT bits */
-#define LDO2_MASK 0x20
-
-/* BD71837_REG_LDO3_VOLT bits */
-#define LDO3_MASK 0x0F
-
-/* BD71837_REG_LDO4_VOLT bits */
-#define LDO4_MASK 0x0F
-
-/* BD71837_REG_LDO5_VOLT bits */
-#define LDO5_MASK 0x0F
-
-/* BD71837_REG_LDO6_VOLT bits */
-#define LDO6_MASK 0x0F
-
-/* BD71837_REG_LDO7_VOLT bits */
-#define LDO7_MASK 0x0F
+/* ROHM BD718XX interrupt masks */
+#define BD718XX_INT_SWRST_MASK 0x40
+#define BD718XX_INT_PWRBTN_S_MASK 0x20
+#define BD718XX_INT_PWRBTN_L_MASK 0x10
+#define BD718XX_INT_PWRBTN_MASK 0x8
+#define BD718XX_INT_WDOG_MASK 0x4
+#define BD718XX_INT_ON_REQ_MASK 0x2
+#define BD718XX_INT_STBY_REQ_MASK 0x1
/* Register write induced reset settings */
@@ -223,13 +230,13 @@ enum {
* write 1 to it we will trigger the action. So always write 0 to it when
* changning SWRESET action - no matter what we read from it.
*/
-#define BD71837_SWRESET_TYPE_MASK 7
-#define BD71837_SWRESET_TYPE_DISABLED 0
-#define BD71837_SWRESET_TYPE_COLD 4
-#define BD71837_SWRESET_TYPE_WARM 6
+#define BD718XX_SWRESET_TYPE_MASK 7
+#define BD718XX_SWRESET_TYPE_DISABLED 0
+#define BD718XX_SWRESET_TYPE_COLD 4
+#define BD718XX_SWRESET_TYPE_WARM 6
-#define BD71837_SWRESET_RESET_MASK 1
-#define BD71837_SWRESET_RESET 1
+#define BD718XX_SWRESET_RESET_MASK 1
+#define BD718XX_SWRESET_RESET 1
/* Poweroff state transition conditions */
@@ -314,10 +321,10 @@ enum {
BD718XX_PWRBTN_LONG_PRESS_15S
};
-struct bd71837_pmic;
-struct bd71837_clk;
+struct bd718xx_clk;
-struct bd71837 {
+struct bd718xx {
+ unsigned int chip_type;
struct device *dev;
struct regmap *regmap;
unsigned long int id;
@@ -325,8 +332,7 @@ struct bd71837 {
int chip_irq;
struct regmap_irq_chip_data *irq_data;
- struct bd71837_pmic *pmic;
- struct bd71837_clk *clk;
+ struct bd718xx_clk *clk;
};
-#endif /* __LINUX_MFD_BD71837_H__ */
+#endif /* __LINUX_MFD_BD718XX_H__ */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 28f4ae76271d..3ca17eb89aa2 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * core.h
- *
- * copyright (c) 2011 Samsung Electronics Co., Ltd
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_SEC_CORE_H
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index 667aa40486dd..6cfe4201a106 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -1,13 +1,7 @@
-/* irq.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_SEC_IRQ_H
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index 9ed2871ea335..0204decfc9aa 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -1,18 +1,7 @@
-/* rtc.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_SEC_RTC_H
diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h
index 2766108bca2f..0762e9de6f2f 100644
--- a/include/linux/mfd/samsung/s2mpa01.h
+++ b/include/linux/mfd/samsung/s2mpa01.h
@@ -1,12 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S2MPA01_H
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index 2c14eeca46f0..6e7668a389a1 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps11.h
- *
* Copyright (c) 2012 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S2MPS11_H
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
index 239e977ba45d..b96d8a11dcd3 100644
--- a/include/linux/mfd/samsung/s2mps13.h
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps13.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPS13_H
diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h
index c92f4782afb5..f4afa0cfc24f 100644
--- a/include/linux/mfd/samsung/s2mps14.h
+++ b/include/linux/mfd/samsung/s2mps14.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps14.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPS14_H
diff --git a/include/linux/mfd/samsung/s2mps15.h b/include/linux/mfd/samsung/s2mps15.h
index 36d35287c3c0..eac6bf74b72e 100644
--- a/include/linux/mfd/samsung/s2mps15.h
+++ b/include/linux/mfd/samsung/s2mps15.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2015 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_MFD_S2MPS15_H
diff --git a/include/linux/mfd/samsung/s2mpu02.h b/include/linux/mfd/samsung/s2mpu02.h
index 47ae9bc583a7..76cd5380cf0f 100644
--- a/include/linux/mfd/samsung/s2mpu02.h
+++ b/include/linux/mfd/samsung/s2mpu02.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mpu02.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPU02_H
diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h
index e025418e5589..c534f086ca16 100644
--- a/include/linux/mfd/samsung/s5m8763.h
+++ b/include/linux/mfd/samsung/s5m8763.h
@@ -1,13 +1,7 @@
-/* s5m8763.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S5M8763_H
diff --git a/include/linux/mfd/samsung/s5m8767.h b/include/linux/mfd/samsung/s5m8767.h
index 243b58fec33d..704f8d80e96e 100644
--- a/include/linux/mfd/samsung/s5m8767.h
+++ b/include/linux/mfd/samsung/s5m8767.h
@@ -1,13 +1,7 @@
-/* s5m8767.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S5M8767_H
diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h
index 09d5f30384e5..1ef51ed36be5 100644
--- a/include/linux/mfd/ti-lmu.h
+++ b/include/linux/mfd/ti-lmu.h
@@ -16,6 +16,7 @@
#include <linux/gpio.h>
#include <linux/notifier.h>
#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
/* Notifier event */
#define LMU_EVENT_MONITOR_DONE 0x01
@@ -81,7 +82,7 @@ enum lm363x_regulator_id {
struct ti_lmu {
struct device *dev;
struct regmap *regmap;
- int en_gpio;
+ struct gpio_desc *en_gpio;
struct blocking_notifier_head notifier;
};
#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 77866214ab51..1e70060c92ce 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -62,13 +62,6 @@
#define TMIO_MMC_USE_GPIO_CD BIT(5)
/*
- * Some controllers doesn't have over 0x100 register.
- * it is used to checking accessibility of
- * CTL_SD_CARD_CLK_CTL / CTL_CLK_AND_WAIT_CTL
- */
-#define TMIO_MMC_HAVE_HIGH_REG BIT(6)
-
-/*
* Some controllers have CMD12 automatically
* issue/non-issue register
*/
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 472fa4d4ea62..7361cd3fddc1 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -31,6 +31,7 @@
#define PHY_ID_KSZ8081 0x00221560
#define PHY_ID_KSZ8061 0x00221570
#define PHY_ID_KSZ9031 0x00221620
+#define PHY_ID_KSZ9131 0x00221640
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 55000ee5c6ad..2da85b02e1c0 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -10,6 +10,7 @@
#include <linux/if.h>
+#include <linux/linkmode.h>
#include <uapi/linux/mii.h>
struct ethtool_cmd;
@@ -132,6 +133,34 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
}
/**
+ * linkmode_adv_to_mii_adv_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADVERTISE register.
+ */
+static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising))
+ result |= ADVERTISE_10HALF;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising))
+ result |= ADVERTISE_10FULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising))
+ result |= ADVERTISE_100HALF;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising))
+ result |= ADVERTISE_100FULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+ result |= ADVERTISE_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+ result |= ADVERTISE_PAUSE_ASYM;
+
+ return result;
+}
+
+/**
* mii_adv_to_ethtool_adv_t
* @adv: value of the MII_ADVERTISE register
*
@@ -179,6 +208,28 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
}
/**
+ * linkmode_adv_to_mii_ctrl1000_t
+ * advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000T mode.
+ */
+static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ advertising))
+ result |= ADVERTISE_1000HALF;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising))
+ result |= ADVERTISE_1000FULL;
+
+ return result;
+}
+
+/**
* mii_ctrl1000_to_ethtool_adv_t
* @adv: value of the MII_CTRL1000 register
*
@@ -303,6 +354,56 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
}
/**
+ * mii_adv_to_linkmode_adv_t
+ * @advertising:pointer to destination link mode.
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to linkmode advertisement settings.
+ */
+static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising,
+ u32 adv)
+{
+ linkmode_zero(advertising);
+
+ if (adv & ADVERTISE_10HALF)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ advertising);
+ if (adv & ADVERTISE_10FULL)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising);
+ if (adv & ADVERTISE_100HALF)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ advertising);
+ if (adv & ADVERTISE_100FULL)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising);
+ if (adv & ADVERTISE_PAUSE_CAP)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising);
+ if (adv & ADVERTISE_PAUSE_ASYM)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
+}
+
+/**
+ * ethtool_adv_to_lcl_adv_t
+ * @advertising:pointer to ethtool advertising
+ *
+ * A small helper function that translates ethtool advertising to LVL
+ * pause capabilities.
+ */
+static inline u32 ethtool_adv_to_lcl_adv_t(u32 advertising)
+{
+ u32 lcl_adv = 0;
+
+ if (advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ return lcl_adv;
+}
+
+/**
* mii_advertise_flowctrl - get flow control advertisement flags
* @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
*/
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 0ef6138eca49..31a750570c38 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -61,6 +61,7 @@ struct mlx5_core_cq {
int reset_notify_added;
struct list_head reset_notify;
struct mlx5_eq *eq;
+ u16 uid;
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 11fa4e66afc5..b4c0457fbebd 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -504,6 +504,10 @@ struct health_buffer {
__be16 ext_synd;
};
+enum mlx5_cmd_addr_l_sz_offset {
+ MLX5_NIC_IFC_OFFSET = 8,
+};
+
struct mlx5_init_seg {
__be32 fw_rev;
__be32 cmdif_rev_fw_sub;
@@ -1120,6 +1124,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
+
+#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
+
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 66d94b4557cf..aa5963b5d38e 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -97,14 +97,15 @@ enum {
};
enum {
- MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
- MLX5_ATOMIC_MODE_CX = 2 << 16,
- MLX5_ATOMIC_MODE_8B = 3 << 16,
- MLX5_ATOMIC_MODE_16B = 4 << 16,
- MLX5_ATOMIC_MODE_32B = 5 << 16,
- MLX5_ATOMIC_MODE_64B = 6 << 16,
- MLX5_ATOMIC_MODE_128B = 7 << 16,
- MLX5_ATOMIC_MODE_256B = 8 << 16,
+ MLX5_ATOMIC_MODE_OFFSET = 16,
+ MLX5_ATOMIC_MODE_IB_COMP = 1,
+ MLX5_ATOMIC_MODE_CX = 2,
+ MLX5_ATOMIC_MODE_8B = 3,
+ MLX5_ATOMIC_MODE_16B = 4,
+ MLX5_ATOMIC_MODE_32B = 5,
+ MLX5_ATOMIC_MODE_64B = 6,
+ MLX5_ATOMIC_MODE_128B = 7,
+ MLX5_ATOMIC_MODE_256B = 8,
};
enum {
@@ -133,6 +134,7 @@ enum {
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
MLX5_REG_PMLP = 0x5002,
+ MLX5_REG_PPLM = 0x5023,
MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
@@ -162,16 +164,11 @@ enum mlx5_dcbx_oper_mode {
MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
};
-enum mlx5_dct_atomic_mode {
- MLX5_ATOMIC_MODE_DCT_OFF = 20,
- MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
- MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
- MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
-};
-
enum {
MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
+ MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
+ MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
};
enum mlx5_page_fault_resume_flags {
@@ -360,7 +357,7 @@ struct mlx5_frag_buf {
};
struct mlx5_frag_buf_ctrl {
- struct mlx5_frag_buf frag_buf;
+ struct mlx5_buf_list *frags;
u32 sz_m1;
u16 frag_sz_m1;
u16 strides_offset;
@@ -477,6 +474,7 @@ struct mlx5_core_srq {
atomic_t refcount;
struct completion free;
+ u16 uid;
};
struct mlx5_eq_table {
@@ -583,10 +581,11 @@ struct mlx5_irq_info {
};
struct mlx5_fc_stats {
- struct rb_root counters;
- struct list_head addlist;
- /* protect addlist add/splice operations */
- spinlock_t addlist_lock;
+ spinlock_t counters_idr_lock; /* protects counters_idr */
+ struct idr counters_idr;
+ struct list_head counters;
+ struct llist_head addlist;
+ struct llist_head dellist;
struct workqueue_struct *wq;
struct delayed_work work;
@@ -804,7 +803,7 @@ struct mlx5_pps {
};
struct mlx5_clock {
- rwlock_t lock;
+ seqlock_t lock;
struct cyclecounter cycles;
struct timecounter tc;
struct hwtstamp_config hwtstamp_config;
@@ -837,6 +836,7 @@ struct mlx5_core_dev {
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
} caps;
+ u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
enum mlx5_device_state state;
@@ -994,10 +994,12 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u;
}
-static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
+static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
+ u8 log_stride, u8 log_sz,
u16 strides_offset,
struct mlx5_frag_buf_ctrl *fbc)
{
+ fbc->frags = frags;
fbc->log_stride = log_stride;
fbc->log_sz = log_sz;
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
@@ -1006,18 +1008,11 @@ static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
fbc->strides_offset = strides_offset;
}
-static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
+static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
+ u8 log_stride, u8 log_sz,
struct mlx5_frag_buf_ctrl *fbc)
{
- mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
-}
-
-static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
- void *cqc)
-{
- mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz),
- MLX5_GET(cqc, cqc, log_cq_size),
- fbc);
+ mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
}
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
@@ -1028,8 +1023,15 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
ix += fbc->strides_offset;
frag = ix >> fbc->log_frag_strides;
- return fbc->frag_buf.frags[frag].buf +
- ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
+ return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
+}
+
+static inline u32
+mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
+{
+ u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
+
+ return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
}
int mlx5_cmd_init(struct mlx5_core_dev *dev);
@@ -1226,21 +1228,15 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
-#ifndef CONFIG_MLX5_CORE_IPOIB
-static inline
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *))
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-#else
+#ifdef CONFIG_MLX5_CORE_IPOIB
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
struct ib_device *ibdev,
const char *name,
void (*setup)(struct net_device *));
#endif /* CONFIG_MLX5_CORE_IPOIB */
+int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
+ struct ib_device *device,
+ struct rdma_netdev_alloc_params *params);
struct mlx5_profile {
u64 mask;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 804516e4f483..5660f07d3be0 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -45,7 +45,8 @@ enum {
};
enum {
- MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0),
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
};
#define LEFTOVERS_RULE_NUM 2
@@ -91,7 +92,7 @@ struct mlx5_flow_destination {
u32 tir_num;
u32 ft_num;
struct mlx5_flow_table *ft;
- struct mlx5_fc *counter;
+ u32 counter_id;
struct {
u16 num;
u16 vhca_id;
@@ -101,6 +102,8 @@ struct mlx5_flow_destination {
};
struct mlx5_flow_namespace *
+mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n);
+struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
struct mlx5_flow_namespace *
@@ -155,20 +158,28 @@ struct mlx5_fs_vlan {
#define MLX5_FS_VLAN_DEPTH 2
+enum {
+ FLOW_ACT_HAS_TAG = BIT(0),
+ FLOW_ACT_NO_APPEND = BIT(1),
+};
+
struct mlx5_flow_act {
u32 action;
- bool has_flow_tag;
u32 flow_tag;
- u32 encap_id;
+ u32 reformat_id;
u32 modify_id;
uintptr_t esp_id;
+ u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
- struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
- MLX5_FS_DEFAULT_FLOW_TAG, 0, 0}
+ struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
+ .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, \
+ .reformat_id = 0, \
+ .modify_id = 0, \
+ .flags = 0, }
/* Single destination per rule.
* Group ID is implied by the match criteria.
@@ -185,15 +196,30 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *new_dest,
struct mlx5_flow_destination *old_dest);
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes);
+u32 mlx5_fc_id(struct mlx5_fc *counter);
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
+int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ u8 namespace, u8 num_actions,
+ void *modify_actions, u32 *modify_header_id);
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
+ u32 modify_header_id);
+
+int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type namespace,
+ u32 *packet_reformat_id);
+void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
+ u32 packet_reformat_id);
+
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index f043d65b9bac..dbff9ff28f2c 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -243,8 +243,8 @@ enum {
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
- MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
- MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942,
@@ -336,7 +336,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 modify_root[0x1];
u8 identified_miss_table_mode[0x1];
u8 flow_table_modify[0x1];
- u8 encap[0x1];
+ u8 reformat[0x1];
u8 decap[0x1];
u8 reserved_at_9[0x1];
u8 pop_vlan[0x1];
@@ -344,8 +344,12 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_c[0x1];
u8 pop_vlan_2[0x1];
u8 push_vlan_2[0x1];
- u8 reserved_at_f[0x11];
-
+ u8 reformat_and_vlan_action[0x1];
+ u8 reserved_at_10[0x2];
+ u8 reformat_l3_tunnel_to_l2[0x1];
+ u8 reformat_l2_to_l3_tunnel[0x1];
+ u8 reformat_and_modify_action[0x1];
+ u8 reserved_at_14[0xb];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
@@ -554,7 +558,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 nic_rx_multi_path_tirs[0x1];
u8 nic_rx_multi_path_tirs_fts[0x1];
u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
- u8 reserved_at_3[0x1fd];
+ u8 reserved_at_3[0x1d];
+ u8 encap_general_header[0x1];
+ u8 reserved_at_21[0xa];
+ u8 log_max_packet_reformat_context[0x5];
+ u8 reserved_at_30[0x6];
+ u8 max_encap_header_size[0xa];
+ u8 reserved_at_40[0x1c0];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
@@ -574,7 +584,9 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_0[0x1c];
u8 fdb_multi_path_to_table[0x1];
- u8 reserved_at_1d[0x1e3];
+ u8 reserved_at_1d[0x1];
+ u8 multi_fdb_encap[0x1];
+ u8 reserved_at_1e[0x1e1];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
@@ -599,7 +611,7 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 vxlan_encap_decap[0x1];
u8 nvgre_encap_decap[0x1];
u8 reserved_at_22[0x9];
- u8 log_max_encap_headers[0x5];
+ u8 log_max_packet_reformat_context[0x5];
u8 reserved_2b[0x6];
u8 max_encap_header_size[0xa];
@@ -896,7 +908,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_mkey[0x6];
u8 reserved_at_f0[0x8];
u8 dump_fill_mkey[0x1];
- u8 reserved_at_f9[0x3];
+ u8 reserved_at_f9[0x2];
+ u8 fast_teardown[0x1];
u8 log_max_eq[0x4];
u8 max_indirection[0x8];
@@ -995,7 +1008,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 umr_modify_atomic_disabled[0x1];
u8 umr_indirect_mkey_disabled[0x1];
u8 umr_fence[0x2];
- u8 reserved_at_20c[0x3];
+ u8 dc_req_scat_data_cqe[0x1];
+ u8 reserved_at_20d[0x2];
u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
@@ -1280,7 +1294,9 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_118[0x3];
u8 log_wq_sz[0x5];
- u8 reserved_at_120[0x3];
+ u8 dbr_umem_valid[0x1];
+ u8 wq_umem_valid[0x1];
+ u8 reserved_at_122[0x1];
u8 log_hairpin_num_packets[0x5];
u8 reserved_at_128[0x3];
u8 log_hairpin_data_sz[0x5];
@@ -2354,7 +2370,10 @@ struct mlx5_ifc_qpc_bits {
u8 dc_access_key[0x40];
- u8 reserved_at_680[0xc0];
+ u8 reserved_at_680[0x3];
+ u8 dbr_umem_valid[0x1];
+
+ u8 reserved_at_684[0xbc];
};
struct mlx5_ifc_roce_addr_layout_bits {
@@ -2394,7 +2413,7 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
- MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10,
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
@@ -2427,7 +2446,7 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 modify_header_id[0x20];
@@ -2454,7 +2473,7 @@ struct mlx5_ifc_xrc_srqc_bits {
u8 wq_signature[0x1];
u8 cont_srq[0x1];
- u8 reserved_at_22[0x1];
+ u8 dbr_umem_valid[0x1];
u8 rlky[0x1];
u8 basic_cyclic_rcv_wqe[0x1];
u8 log_rq_stride[0x3];
@@ -2549,8 +2568,8 @@ enum {
};
enum {
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2,
};
struct mlx5_ifc_tirc_bits {
@@ -3118,7 +3137,9 @@ enum {
struct mlx5_ifc_cqc_bits {
u8 status[0x4];
- u8 reserved_at_4[0x4];
+ u8 reserved_at_4[0x2];
+ u8 dbr_umem_valid[0x1];
+ u8 reserved_at_7[0x1];
u8 cqe_sz[0x3];
u8 cc[0x1];
u8 reserved_at_c[0x1];
@@ -3352,12 +3373,13 @@ struct mlx5_ifc_teardown_hca_out_bits {
u8 reserved_at_40[0x3f];
- u8 force_state[0x1];
+ u8 state[0x1];
};
enum {
MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2,
};
struct mlx5_ifc_teardown_hca_in_bits {
@@ -3384,7 +3406,7 @@ struct mlx5_ifc_sqerr2rts_qp_out_bits {
struct mlx5_ifc_sqerr2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3414,7 +3436,7 @@ struct mlx5_ifc_sqd2rts_qp_out_bits {
struct mlx5_ifc_sqd2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3619,7 +3641,7 @@ struct mlx5_ifc_rts2rts_qp_out_bits {
struct mlx5_ifc_rts2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3649,7 +3671,7 @@ struct mlx5_ifc_rtr2rts_qp_out_bits {
struct mlx5_ifc_rtr2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3679,7 +3701,7 @@ struct mlx5_ifc_rst2init_qp_out_bits {
struct mlx5_ifc_rst2init_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -4802,19 +4824,19 @@ struct mlx5_ifc_query_eq_in_bits {
u8 reserved_at_60[0x20];
};
-struct mlx5_ifc_encap_header_in_bits {
+struct mlx5_ifc_packet_reformat_context_in_bits {
u8 reserved_at_0[0x5];
- u8 header_type[0x3];
+ u8 reformat_type[0x3];
u8 reserved_at_8[0xe];
- u8 encap_header_size[0xa];
+ u8 reformat_data_size[0xa];
u8 reserved_at_20[0x10];
- u8 encap_header[2][0x8];
+ u8 reformat_data[2][0x8];
- u8 more_encap_header[0][0x8];
+ u8 more_reformat_data[0][0x8];
};
-struct mlx5_ifc_query_encap_header_out_bits {
+struct mlx5_ifc_query_packet_reformat_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4822,33 +4844,41 @@ struct mlx5_ifc_query_encap_header_out_bits {
u8 reserved_at_40[0xa0];
- struct mlx5_ifc_encap_header_in_bits encap_header[0];
+ struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[0];
};
-struct mlx5_ifc_query_encap_header_in_bits {
+struct mlx5_ifc_query_packet_reformat_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 reserved_at_60[0xa0];
};
-struct mlx5_ifc_alloc_encap_header_out_bits {
+struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 reserved_at_60[0x20];
};
-struct mlx5_ifc_alloc_encap_header_in_bits {
+enum {
+ MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
+ MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
+ MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
+ MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
+ MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+};
+
+struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -4857,10 +4887,10 @@ struct mlx5_ifc_alloc_encap_header_in_bits {
u8 reserved_at_40[0xa0];
- struct mlx5_ifc_encap_header_in_bits encap_header;
+ struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context;
};
-struct mlx5_ifc_dealloc_encap_header_out_bits {
+struct mlx5_ifc_dealloc_packet_reformat_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4869,14 +4899,14 @@ struct mlx5_ifc_dealloc_encap_header_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_dealloc_encap_header_in_bits {
+struct mlx5_ifc_dealloc_packet_reformat_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_20[0x10];
u8 op_mod[0x10];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 reserved_60[0x20];
};
@@ -5174,7 +5204,7 @@ struct mlx5_ifc_qp_2rst_out_bits {
struct mlx5_ifc_qp_2rst_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5196,7 +5226,7 @@ struct mlx5_ifc_qp_2err_out_bits {
struct mlx5_ifc_qp_2err_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5296,7 +5326,7 @@ struct mlx5_ifc_modify_tis_bitmask_bits {
struct mlx5_ifc_modify_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5335,7 +5365,7 @@ struct mlx5_ifc_modify_tir_out_bits {
struct mlx5_ifc_modify_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5363,7 +5393,7 @@ struct mlx5_ifc_modify_sq_out_bits {
struct mlx5_ifc_modify_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5436,7 +5466,7 @@ struct mlx5_ifc_rqt_bitmask_bits {
struct mlx5_ifc_modify_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5470,7 +5500,7 @@ enum {
struct mlx5_ifc_modify_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5506,7 +5536,7 @@ struct mlx5_ifc_rmp_bitmask_bits {
struct mlx5_ifc_modify_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5611,7 +5641,7 @@ enum {
struct mlx5_ifc_modify_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5623,7 +5653,10 @@ struct mlx5_ifc_modify_cq_in_bits {
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x40];
+
+ u8 cq_umem_valid[0x1];
+ u8 reserved_at_2c1[0x5bf];
u8 pas[0][0x40];
};
@@ -5771,7 +5804,7 @@ struct mlx5_ifc_init2rtr_qp_out_bits {
struct mlx5_ifc_init2rtr_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5801,7 +5834,7 @@ struct mlx5_ifc_init2init_qp_out_bits {
struct mlx5_ifc_init2init_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5900,7 +5933,7 @@ struct mlx5_ifc_drain_dct_out_bits {
struct mlx5_ifc_drain_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5944,7 +5977,7 @@ struct mlx5_ifc_detach_from_mcg_out_bits {
struct mlx5_ifc_detach_from_mcg_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5968,7 +6001,7 @@ struct mlx5_ifc_destroy_xrq_out_bits {
struct mlx5_ifc_destroy_xrq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5990,7 +6023,7 @@ struct mlx5_ifc_destroy_xrc_srq_out_bits {
struct mlx5_ifc_destroy_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6012,7 +6045,7 @@ struct mlx5_ifc_destroy_tis_out_bits {
struct mlx5_ifc_destroy_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6034,7 +6067,7 @@ struct mlx5_ifc_destroy_tir_out_bits {
struct mlx5_ifc_destroy_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6056,7 +6089,7 @@ struct mlx5_ifc_destroy_srq_out_bits {
struct mlx5_ifc_destroy_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6078,7 +6111,7 @@ struct mlx5_ifc_destroy_sq_out_bits {
struct mlx5_ifc_destroy_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6124,7 +6157,7 @@ struct mlx5_ifc_destroy_rqt_out_bits {
struct mlx5_ifc_destroy_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6146,7 +6179,7 @@ struct mlx5_ifc_destroy_rq_out_bits {
struct mlx5_ifc_destroy_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6190,7 +6223,7 @@ struct mlx5_ifc_destroy_rmp_out_bits {
struct mlx5_ifc_destroy_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6212,7 +6245,7 @@ struct mlx5_ifc_destroy_qp_out_bits {
struct mlx5_ifc_destroy_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6364,7 +6397,7 @@ struct mlx5_ifc_destroy_dct_out_bits {
struct mlx5_ifc_destroy_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6386,7 +6419,7 @@ struct mlx5_ifc_destroy_cq_out_bits {
struct mlx5_ifc_destroy_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6489,7 +6522,7 @@ struct mlx5_ifc_dealloc_xrcd_out_bits {
struct mlx5_ifc_dealloc_xrcd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6577,7 +6610,7 @@ struct mlx5_ifc_dealloc_pd_out_bits {
struct mlx5_ifc_dealloc_pd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6623,7 +6656,7 @@ struct mlx5_ifc_create_xrq_out_bits {
struct mlx5_ifc_create_xrq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6647,7 +6680,7 @@ struct mlx5_ifc_create_xrc_srq_out_bits {
struct mlx5_ifc_create_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6656,7 +6689,9 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x40];
+ u8 xrc_srq_umem_valid[0x1];
+ u8 reserved_at_2c1[0x5bf];
u8 pas[0][0x40];
};
@@ -6675,7 +6710,7 @@ struct mlx5_ifc_create_tis_out_bits {
struct mlx5_ifc_create_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6699,7 +6734,7 @@ struct mlx5_ifc_create_tir_out_bits {
struct mlx5_ifc_create_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6723,7 +6758,7 @@ struct mlx5_ifc_create_srq_out_bits {
struct mlx5_ifc_create_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6751,7 +6786,7 @@ struct mlx5_ifc_create_sq_out_bits {
struct mlx5_ifc_create_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6805,7 +6840,7 @@ struct mlx5_ifc_create_rqt_out_bits {
struct mlx5_ifc_create_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6829,7 +6864,7 @@ struct mlx5_ifc_create_rq_out_bits {
struct mlx5_ifc_create_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6853,7 +6888,7 @@ struct mlx5_ifc_create_rmp_out_bits {
struct mlx5_ifc_create_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6877,7 +6912,7 @@ struct mlx5_ifc_create_qp_out_bits {
struct mlx5_ifc_create_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6890,7 +6925,10 @@ struct mlx5_ifc_create_qp_in_bits {
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_at_800[0x80];
+ u8 reserved_at_800[0x60];
+
+ u8 wq_umem_valid[0x1];
+ u8 reserved_at_861[0x1f];
u8 pas[0][0x40];
};
@@ -6952,7 +6990,8 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 reserved_at_40[0x20];
u8 pg_access[0x1];
- u8 reserved_at_61[0x1f];
+ u8 mkey_umem_valid[0x1];
+ u8 reserved_at_62[0x1e];
struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
@@ -6978,7 +7017,7 @@ struct mlx5_ifc_create_flow_table_out_bits {
};
struct mlx5_ifc_flow_table_context_bits {
- u8 encap_en[0x1];
+ u8 reformat_en[0x1];
u8 decap_en[0x1];
u8 reserved_at_2[0x2];
u8 table_miss_action[0x4];
@@ -7120,7 +7159,7 @@ struct mlx5_ifc_create_dct_out_bits {
struct mlx5_ifc_create_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7146,7 +7185,7 @@ struct mlx5_ifc_create_cq_out_bits {
struct mlx5_ifc_create_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7155,7 +7194,10 @@ struct mlx5_ifc_create_cq_in_bits {
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x60];
+
+ u8 cq_umem_valid[0x1];
+ u8 reserved_at_2e1[0x59f];
u8 pas[0][0x40];
};
@@ -7203,7 +7245,7 @@ struct mlx5_ifc_attach_to_mcg_out_bits {
struct mlx5_ifc_attach_to_mcg_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7254,7 +7296,7 @@ enum {
struct mlx5_ifc_arm_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7282,7 +7324,7 @@ enum {
struct mlx5_ifc_arm_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7330,7 +7372,7 @@ struct mlx5_ifc_alloc_xrcd_out_bits {
struct mlx5_ifc_alloc_xrcd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7418,7 +7460,7 @@ struct mlx5_ifc_alloc_pd_out_bits {
struct mlx5_ifc_alloc_pd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7786,20 +7828,34 @@ struct mlx5_ifc_pplr_reg_bits {
struct mlx5_ifc_pplm_reg_bits {
u8 reserved_at_0[0x8];
- u8 local_port[0x8];
- u8 reserved_at_10[0x10];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x20];
+ u8 reserved_at_20[0x20];
- u8 port_profile_mode[0x8];
- u8 static_port_profile[0x8];
- u8 active_port_profile[0x8];
- u8 reserved_at_58[0x8];
+ u8 port_profile_mode[0x8];
+ u8 static_port_profile[0x8];
+ u8 active_port_profile[0x8];
+ u8 reserved_at_58[0x8];
- u8 retransmission_active[0x8];
- u8 fec_mode_active[0x18];
+ u8 retransmission_active[0x8];
+ u8 fec_mode_active[0x18];
- u8 reserved_at_80[0x20];
+ u8 rs_fec_correction_bypass_cap[0x4];
+ u8 reserved_at_84[0x8];
+ u8 fec_override_cap_56g[0x4];
+ u8 fec_override_cap_100g[0x4];
+ u8 fec_override_cap_50g[0x4];
+ u8 fec_override_cap_25g[0x4];
+ u8 fec_override_cap_10g_40g[0x4];
+
+ u8 rs_fec_correction_bypass_admin[0x4];
+ u8 reserved_at_a4[0x8];
+ u8 fec_override_admin_56g[0x4];
+ u8 fec_override_admin_100g[0x4];
+ u8 fec_override_admin_50g[0x4];
+ u8 fec_override_admin_25g[0x4];
+ u8 fec_override_admin_10g_40g[0x4];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -8084,7 +8140,8 @@ struct mlx5_ifc_pcam_enhanced_features_bits {
u8 rx_icrc_encapsulated_counter[0x1];
u8 reserved_at_6e[0x8];
u8 pfcc_mask[0x1];
- u8 reserved_at_77[0x4];
+ u8 reserved_at_77[0x3];
+ u8 per_lane_error_counters[0x1];
u8 rx_buffer_fullness_counters[0x1];
u8 ptys_connector_type[0x1];
u8 reserved_at_7d[0x1];
@@ -8095,7 +8152,10 @@ struct mlx5_ifc_pcam_enhanced_features_bits {
struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
u8 port_access_reg_cap_mask_127_to_96[0x20];
u8 port_access_reg_cap_mask_95_to_64[0x20];
- u8 port_access_reg_cap_mask_63_to_32[0x20];
+
+ u8 port_access_reg_cap_mask_63_to_36[0x1c];
+ u8 pplm[0x1];
+ u8 port_access_reg_cap_mask_34_to_32[0x3];
u8 port_access_reg_cap_mask_31_to_13[0x13];
u8 pbmc[0x1];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 4778d41085d4..fbe322c966bc 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -471,6 +471,7 @@ struct mlx5_core_qp {
int qpn;
struct mlx5_rsc_debug *dbg;
int pid;
+ u16 uid;
};
struct mlx5_core_dct {
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index 24ff23e27c8a..1b1f3c20c6a3 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -61,6 +61,7 @@ struct mlx5_srq_attr {
u32 tm_next_tag;
u32 tm_hw_phase_cnt;
u32 tm_sw_phase_cnt;
+ u16 uid;
};
struct mlx5_core_dev;
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 83a33a1873a6..7f5ca2cd3a32 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -90,6 +90,8 @@ struct mlx5_hairpin {
u32 *rqn;
u32 *sqn;
+
+ bool peer_gone;
};
struct mlx5_hairpin *
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 7e7c6dfcfb09..9c694808c212 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -121,4 +121,6 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
struct mlx5_core_dev *port_mdev);
int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
+
+u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a61ebe8ad4ca..daa2b8f1e9a8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -890,6 +890,19 @@ static inline bool is_device_public_page(const struct page *page)
page->pgmap->type == MEMORY_DEVICE_PUBLIC;
}
+#ifdef CONFIG_PCI_P2PDMA
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+}
+#else /* CONFIG_PCI_P2PDMA */
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return false;
+}
+#endif /* CONFIG_PCI_P2PDMA */
+
#else /* CONFIG_DEV_PAGEMAP_OPS */
static inline void dev_pagemap_get_ops(void)
{
@@ -913,6 +926,11 @@ static inline bool is_device_public_page(const struct page *page)
{
return false;
}
+
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return false;
+}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline void get_page(struct page *page)
@@ -2455,6 +2473,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
+static inline bool range_in_vma(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ return (vma && vma->vm_start <= start && end <= vma->vm_end);
+}
+
#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(unsigned long vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index beed7121c781..2a5fe75dd082 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -569,6 +569,11 @@ static inline bool mmc_can_retune(struct mmc_host *host)
return host->can_retune == 1;
}
+static inline bool mmc_doing_retune(struct mmc_host *host)
+{
+ return host->doing_retune == 1;
+}
+
static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
{
return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1e22d96734e0..d4b0c79d2924 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -668,16 +668,6 @@ typedef struct pglist_data {
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
#endif
-#ifdef CONFIG_NUMA_BALANCING
- /* Lock serializing the migrate rate limiting window */
- spinlock_t numabalancing_migrate_lock;
-
- /* Rate limiting time interval */
- unsigned long numabalancing_migrate_next_window;
-
- /* Number of pages migrated during the rate limiting time interval */
- unsigned long numabalancing_migrate_nr_pages;
-#endif
/*
* This is a per-node reserve of pages that are not available
* to userspace allocations.
diff --git a/include/linux/module.h b/include/linux/module.h
index f807f15bebbe..fce6b4335e36 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -20,6 +20,7 @@
#include <linux/export.h>
#include <linux/rbtree_latch.h>
#include <linux/error-injection.h>
+#include <linux/tracepoint-defs.h>
#include <linux/percpu.h>
#include <asm/module.h>
@@ -123,7 +124,6 @@ extern void cleanup_module(void);
#define late_initcall_sync(fn) module_init(fn)
#define console_initcall(fn) module_init(fn)
-#define security_initcall(fn) module_init(fn)
/* Each module must use one module_init(). */
#define module_init(initfn) \
@@ -430,7 +430,7 @@ struct module {
#ifdef CONFIG_TRACEPOINTS
unsigned int num_tracepoints;
- struct tracepoint * const *tracepoints_ptrs;
+ tracepoint_ptr_t *tracepoints_ptrs;
#endif
#ifdef HAVE_JUMP_LABEL
struct jump_entry *jump_entries;
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index 6675b9f81979..34de06b426ef 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -7,6 +7,7 @@
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/fib_notifier.h>
+#include <net/ip_fib.h>
/**
* struct vif_device - interface representor for multicast routing
@@ -283,6 +284,12 @@ void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg);
int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mr_mfc *c, struct rtmsg *rtm);
+int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ int (*fill)(struct mr_table *mrt, struct sk_buff *skb,
+ u32 portid, u32 seq, struct mr_mfc *c,
+ int cmd, int flags),
+ spinlock_t *lock, struct fib_dump_filter *filter);
int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct mr_table *(*iter)(struct net *net,
struct mr_table *mrt),
@@ -290,7 +297,7 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct sk_buff *skb,
u32 portid, u32 seq, struct mr_mfc *c,
int cmd, int flags),
- spinlock_t *lock);
+ spinlock_t *lock, struct fib_dump_filter *filter);
int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
int (*rules_dump)(struct net *net,
@@ -340,7 +347,7 @@ mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct sk_buff *skb,
u32 portid, u32 seq, struct mr_mfc *c,
int cmd, int flags),
- spinlock_t *lock)
+ spinlock_t *lock, struct fib_dump_filter *filter)
{
return -EINVAL;
}
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 5839d8062dfc..0e9c50052ff3 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -317,11 +317,18 @@ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
int virq, int nvec, msi_alloc_info_t *args);
struct irq_domain *
-platform_msi_create_device_domain(struct device *dev,
- unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg,
- const struct irq_domain_ops *ops,
- void *host_data);
+__platform_msi_create_device_domain(struct device *dev,
+ unsigned int nvec,
+ bool is_tree,
+ irq_write_msi_msg_t write_msi_msg,
+ const struct irq_domain_ops *ops,
+ void *host_data);
+
+#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
+ __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
+#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
+ __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
+
int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs);
void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index e93837f647de..1d3ade69d39a 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -23,7 +23,6 @@
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/sysfs.h>
-#include <linux/workqueue.h>
struct hd_geometry;
struct mtd_info;
@@ -44,9 +43,9 @@ struct mtd_blktrans_dev {
struct kref ref;
struct gendisk *disk;
struct attribute_group *disk_attributes;
- struct workqueue_struct *wq;
- struct work_struct work;
struct request_queue *rq;
+ struct list_head rq_list;
+ struct blk_mq_tag_set *tag_set;
spinlock_t queue_lock;
void *priv;
fmode_t file_mode;
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
new file mode 100644
index 000000000000..0b6b59f7cfbd
--- /dev/null
+++ b/include/linux/mtd/jedec.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Contains all JEDEC related definitions
+ */
+
+#ifndef __LINUX_MTD_JEDEC_H
+#define __LINUX_MTD_JEDEC_H
+
+struct jedec_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+/* JEDEC features */
+#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+
+struct nand_jedec_params {
+ /* rev info and features block */
+ /* 'J' 'E' 'S' 'D' */
+ u8 sig[4];
+ __le16 revision;
+ __le16 features;
+ u8 opt_cmd[3];
+ __le16 sec_cmd;
+ u8 num_of_param_pages;
+ u8 reserved0[18];
+
+ /* manufacturer information block */
+ char manufacturer[12];
+ char model[20];
+ u8 jedec_id[6];
+ u8 reserved1[10];
+
+ /* memory organization block */
+ __le32 byte_per_page;
+ __le16 spare_bytes_per_page;
+ u8 reserved2[6];
+ __le32 pages_per_block;
+ __le32 blocks_per_lun;
+ u8 lun_count;
+ u8 addr_cycles;
+ u8 bits_per_cell;
+ u8 programs_per_page;
+ u8 multi_plane_addr;
+ u8 multi_plane_op_attr;
+ u8 reserved3[38];
+
+ /* electrical parameter block */
+ __le16 async_sdr_speed_grade;
+ __le16 toggle_ddr_speed_grade;
+ __le16 sync_ddr_speed_grade;
+ u8 async_sdr_features;
+ u8 toggle_ddr_features;
+ u8 sync_ddr_features;
+ __le16 t_prog;
+ __le16 t_bers;
+ __le16 t_r;
+ __le16 t_r_multi_plane;
+ __le16 t_ccs;
+ __le16 io_pin_capacitance_typ;
+ __le16 input_pin_capacitance_typ;
+ __le16 clk_pin_capacitance_typ;
+ u8 driver_strength_support;
+ __le16 t_adl;
+ u8 reserved4[36];
+
+ /* ECC and endurance block */
+ u8 guaranteed_good_blocks;
+ __le16 guaranteed_block_endurance;
+ struct jedec_ecc_info ecc_info[4];
+ u8 reserved5[29];
+
+ /* reserved */
+ u8 reserved6[148];
+
+ /* vendor */
+ __le16 vendor_rev_num;
+ u8 reserved7[88];
+
+ /* CRC for Parameter Page */
+ __le16 crc;
+} __packed;
+
+#endif /* __LINUX_MTD_JEDEC_H */
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
index 98f20ef05d60..b8106651f807 100644
--- a/include/linux/mtd/nand_bch.h
+++ b/include/linux/mtd/nand_bch.h
@@ -12,6 +12,7 @@
#define __MTD_NAND_BCH_H__
struct mtd_info;
+struct nand_chip;
struct nand_bch_control;
#if defined(CONFIG_MTD_NAND_ECC_BCH)
@@ -21,14 +22,14 @@ static inline int mtd_nand_has_bch(void) { return 1; }
/*
* Calculate BCH ecc code
*/
-int nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+int nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat,
u_char *ecc_code);
/*
* Detect and correct bit errors
*/
-int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc,
- u_char *calc_ecc);
+int nand_bch_correct_data(struct nand_chip *chip, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc);
/*
* Initialize BCH encoder/decoder
*/
@@ -43,14 +44,14 @@ void nand_bch_free(struct nand_bch_control *nbc);
static inline int mtd_nand_has_bch(void) { return 0; }
static inline int
-nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat,
u_char *ecc_code)
{
return -1;
}
static inline int
-nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
+nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
return -ENOTSUPP;
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 8a2decf7462c..0b3bb156c344 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -13,28 +13,30 @@
#ifndef __MTD_NAND_ECC_H__
#define __MTD_NAND_ECC_H__
-struct mtd_info;
+struct nand_chip;
/*
* Calculate 3 byte ECC code for eccsize byte block
*/
void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
- u_char *ecc_code);
+ u_char *ecc_code, bool sm_order);
/*
* Calculate 3 byte ECC code for 256/512 byte block
*/
-int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
+int nand_calculate_ecc(struct nand_chip *chip, const u_char *dat,
+ u_char *ecc_code);
/*
* Detect and correct a 1 bit error for eccsize byte block
*/
int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
- unsigned int eccsize);
+ unsigned int eccsize, bool sm_order);
/*
* Detect and correct a 1 bit error for 256/512 byte block
*/
-int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
+int nand_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc,
+ u_char *calc_ecc);
#endif /* __MTD_NAND_ECC_H__ */
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
new file mode 100644
index 000000000000..339ac798568e
--- /dev/null
+++ b/include/linux/mtd/onfi.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Contains all ONFI related definitions
+ */
+
+#ifndef __LINUX_MTD_ONFI_H
+#define __LINUX_MTD_ONFI_H
+
+#include <linux/types.h>
+
+/* ONFI version bits */
+#define ONFI_VERSION_1_0 BIT(1)
+#define ONFI_VERSION_2_0 BIT(2)
+#define ONFI_VERSION_2_1 BIT(3)
+#define ONFI_VERSION_2_2 BIT(4)
+#define ONFI_VERSION_2_3 BIT(5)
+#define ONFI_VERSION_3_0 BIT(6)
+#define ONFI_VERSION_3_1 BIT(7)
+#define ONFI_VERSION_3_2 BIT(8)
+#define ONFI_VERSION_4_0 BIT(9)
+
+/* ONFI features */
+#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
+#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
+
+/* ONFI timing mode, used in both asynchronous and synchronous mode */
+#define ONFI_TIMING_MODE_0 (1 << 0)
+#define ONFI_TIMING_MODE_1 (1 << 1)
+#define ONFI_TIMING_MODE_2 (1 << 2)
+#define ONFI_TIMING_MODE_3 (1 << 3)
+#define ONFI_TIMING_MODE_4 (1 << 4)
+#define ONFI_TIMING_MODE_5 (1 << 5)
+#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
+
+/* ONFI feature number/address */
+#define ONFI_FEATURE_NUMBER 256
+#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
+
+/* Vendor-specific feature address (Micron) */
+#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
+#define ONFI_FEATURE_ON_DIE_ECC 0x90
+#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3)
+
+/* ONFI subfeature parameters length */
+#define ONFI_SUBFEATURE_PARAM_LEN 4
+
+/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
+
+struct nand_onfi_params {
+ /* rev info and features block */
+ /* 'O' 'N' 'F' 'I' */
+ u8 sig[4];
+ __le16 revision;
+ __le16 features;
+ __le16 opt_cmd;
+ u8 reserved0[2];
+ __le16 ext_param_page_length; /* since ONFI 2.1 */
+ u8 num_of_param_pages; /* since ONFI 2.1 */
+ u8 reserved1[17];
+
+ /* manufacturer information block */
+ char manufacturer[12];
+ char model[20];
+ u8 jedec_id;
+ __le16 date_code;
+ u8 reserved2[13];
+
+ /* memory organization block */
+ __le32 byte_per_page;
+ __le16 spare_bytes_per_page;
+ __le32 data_bytes_per_ppage;
+ __le16 spare_bytes_per_ppage;
+ __le32 pages_per_block;
+ __le32 blocks_per_lun;
+ u8 lun_count;
+ u8 addr_cycles;
+ u8 bits_per_cell;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 guaranteed_good_blocks;
+ __le16 guaranteed_block_endurance;
+ u8 programs_per_page;
+ u8 ppage_attr;
+ u8 ecc_bits;
+ u8 interleaved_bits;
+ u8 interleaved_ops;
+ u8 reserved3[13];
+
+ /* electrical parameter block */
+ u8 io_pin_capacitance_max;
+ __le16 async_timing_mode;
+ __le16 program_cache_timing_mode;
+ __le16 t_prog;
+ __le16 t_bers;
+ __le16 t_r;
+ __le16 t_ccs;
+ __le16 src_sync_timing_mode;
+ u8 src_ssync_features;
+ __le16 clk_pin_capacitance_typ;
+ __le16 io_pin_capacitance_typ;
+ __le16 input_pin_capacitance_typ;
+ u8 input_pin_capacitance_max;
+ u8 driver_strength_support;
+ __le16 t_int_r;
+ __le16 t_adl;
+ u8 reserved4[8];
+
+ /* vendor */
+ __le16 vendor_revision;
+ u8 vendor[88];
+
+ __le16 crc;
+} __packed;
+
+#define ONFI_CRC_BASE 0x4F4E
+
+/* Extended ECC information Block Definition (since ONFI 2.1) */
+struct onfi_ext_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+#define ONFI_SECTION_TYPE_0 0 /* Unused section. */
+#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */
+#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */
+struct onfi_ext_section {
+ u8 type;
+ u8 length;
+} __packed;
+
+#define ONFI_EXT_SECTION_MAX 8
+
+/* Extended Parameter Page Definition (since ONFI 2.1) */
+struct onfi_ext_param_page {
+ __le16 crc;
+ u8 sig[4]; /* 'E' 'P' 'P' 'S' */
+ u8 reserved0[10];
+ struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
+
+ /*
+ * The actual size of the Extended Parameter Page is in
+ * @ext_param_page_length of nand_onfi_params{}.
+ * The following are the variable length sections.
+ * So we do not add any fields below. Please see the ONFI spec.
+ */
+} __packed;
+
+/**
+ * struct onfi_params - ONFI specific parameters that will be reused
+ * @version: ONFI version (BCD encoded), 0 if ONFI is not supported
+ * @tPROG: Page program time
+ * @tBERS: Block erase time
+ * @tR: Page read time
+ * @tCCS: Change column setup time
+ * @async_timing_mode: Supported asynchronous timing mode
+ * @vendor_revision: Vendor specific revision number
+ * @vendor: Vendor specific data
+ */
+struct onfi_params {
+ int version;
+ u16 tPROG;
+ u16 tBERS;
+ u16 tR;
+ u16 tCCS;
+ u16 async_timing_mode;
+ u16 vendor_revision;
+ u8 vendor[88];
+};
+
+#endif /* __LINUX_MTD_ONFI_H */
diff --git a/include/linux/mtd/platnand.h b/include/linux/mtd/platnand.h
new file mode 100644
index 000000000000..bc11eb6b593b
--- /dev/null
+++ b/include/linux/mtd/platnand.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Contains all platform NAND related definitions.
+ */
+
+#ifndef __LINUX_MTD_PLATNAND_H
+#define __LINUX_MTD_PLATNAND_H
+
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/platform_device.h>
+
+/**
+ * struct platform_nand_chip - chip level device structure
+ * @nr_chips: max. number of chips to scan for
+ * @chip_offset: chip number offset
+ * @nr_partitions: number of partitions pointed to by partitions (or zero)
+ * @partitions: mtd partition list
+ * @chip_delay: R/B delay value in us
+ * @options: Option flags, e.g. 16bit buswidth
+ * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
+ * @part_probe_types: NULL-terminated array of probe types
+ */
+struct platform_nand_chip {
+ int nr_chips;
+ int chip_offset;
+ int nr_partitions;
+ struct mtd_partition *partitions;
+ int chip_delay;
+ unsigned int options;
+ unsigned int bbt_options;
+ const char **part_probe_types;
+};
+
+/**
+ * struct platform_nand_ctrl - controller level device structure
+ * @probe: platform specific function to probe/setup hardware
+ * @remove: platform specific function to remove/teardown hardware
+ * @dev_ready: platform specific function to read ready/busy pin
+ * @select_chip: platform specific chip select function
+ * @cmd_ctrl: platform specific function for controlling
+ * ALE/CLE/nCE. Also used to write command and address
+ * @write_buf: platform specific function for write buffer
+ * @read_buf: platform specific function for read buffer
+ * @priv: private data to transport driver specific settings
+ *
+ * All fields are optional and depend on the hardware driver requirements
+ */
+struct platform_nand_ctrl {
+ int (*probe)(struct platform_device *pdev);
+ void (*remove)(struct platform_device *pdev);
+ int (*dev_ready)(struct nand_chip *chip);
+ void (*select_chip)(struct nand_chip *chip, int cs);
+ void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
+ void (*write_buf)(struct nand_chip *chip, const uint8_t *buf, int len);
+ void (*read_buf)(struct nand_chip *chip, uint8_t *buf, int len);
+ void *priv;
+};
+
+/**
+ * struct platform_nand_data - container structure for platform-specific data
+ * @chip: chip level chip structure
+ * @ctrl: controller level device structure
+ */
+struct platform_nand_data {
+ struct platform_nand_chip chip;
+ struct platform_nand_ctrl ctrl;
+};
+
+#endif /* __LINUX_MTD_PLATNAND_H */
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index efb2345359bb..e10b126e148f 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -21,22 +21,12 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
+#include <linux/mtd/jedec.h>
+#include <linux/mtd/onfi.h>
#include <linux/of.h>
#include <linux/types.h>
-struct nand_flash_dev;
-
-/* Scan and identify a NAND device */
-int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
- struct nand_flash_dev *ids);
-
-static inline int nand_scan(struct mtd_info *mtd, int max_chips)
-{
- return nand_scan_with_ids(mtd, max_chips, NULL);
-}
-
-/* Internal helper for board drivers which need to override command function */
-void nand_wait_ready(struct mtd_info *mtd);
+struct nand_chip;
/* The maximum number of NAND chips in an array */
#define NAND_MAX_CHIPS 8
@@ -131,9 +121,11 @@ enum nand_ecc_algo {
#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
#define NAND_ECC_MAXIMIZE BIT(1)
-/* Bit mask for flags passed to do_nand_read_ecc */
-#define NAND_GET_DEVICE 0x80
-
+/*
+ * When using software implementation of Hamming, we can specify which byte
+ * ordering should be used.
+ */
+#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
/*
* Option constants for bizarre disfunctionality and real
@@ -175,9 +167,7 @@ enum nand_ecc_algo {
#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
/* Macros to identify the above */
-#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
-#define NAND_HAS_SUBPAGE_WRITE(chip) !((chip)->options & NAND_NO_SUBPAGE_WRITE)
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
@@ -198,10 +188,10 @@ enum nand_ecc_algo {
#define NAND_USE_BOUNCE_BUFFER 0x00100000
/*
- * In case your controller is implementing ->cmd_ctrl() and is relying on the
- * default ->cmdfunc() implementation, you may want to let the core handle the
- * tCCS delay which is required when a column change (RNDIN or RNDOUT) is
- * requested.
+ * In case your controller is implementing ->legacy.cmd_ctrl() and is relying
+ * on the default ->cmdfunc() implementation, you may want to let the core
+ * handle the tCCS delay which is required when a column change (RNDIN or
+ * RNDOUT) is requested.
* If your controller already takes care of this delay, you don't need to set
* this flag.
*/
@@ -222,250 +212,6 @@ enum nand_ecc_algo {
#define NAND_CI_CELLTYPE_MSK 0x0C
#define NAND_CI_CELLTYPE_SHIFT 2
-/* Keep gcc happy */
-struct nand_chip;
-
-/* ONFI version bits */
-#define ONFI_VERSION_1_0 BIT(1)
-#define ONFI_VERSION_2_0 BIT(2)
-#define ONFI_VERSION_2_1 BIT(3)
-#define ONFI_VERSION_2_2 BIT(4)
-#define ONFI_VERSION_2_3 BIT(5)
-#define ONFI_VERSION_3_0 BIT(6)
-#define ONFI_VERSION_3_1 BIT(7)
-#define ONFI_VERSION_3_2 BIT(8)
-#define ONFI_VERSION_4_0 BIT(9)
-
-/* ONFI features */
-#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
-#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
-
-/* ONFI timing mode, used in both asynchronous and synchronous mode */
-#define ONFI_TIMING_MODE_0 (1 << 0)
-#define ONFI_TIMING_MODE_1 (1 << 1)
-#define ONFI_TIMING_MODE_2 (1 << 2)
-#define ONFI_TIMING_MODE_3 (1 << 3)
-#define ONFI_TIMING_MODE_4 (1 << 4)
-#define ONFI_TIMING_MODE_5 (1 << 5)
-#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
-
-/* ONFI feature number/address */
-#define ONFI_FEATURE_NUMBER 256
-#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
-
-/* Vendor-specific feature address (Micron) */
-#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
-#define ONFI_FEATURE_ON_DIE_ECC 0x90
-#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3)
-
-/* ONFI subfeature parameters length */
-#define ONFI_SUBFEATURE_PARAM_LEN 4
-
-/* ONFI optional commands SET/GET FEATURES supported? */
-#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
-
-struct nand_onfi_params {
- /* rev info and features block */
- /* 'O' 'N' 'F' 'I' */
- u8 sig[4];
- __le16 revision;
- __le16 features;
- __le16 opt_cmd;
- u8 reserved0[2];
- __le16 ext_param_page_length; /* since ONFI 2.1 */
- u8 num_of_param_pages; /* since ONFI 2.1 */
- u8 reserved1[17];
-
- /* manufacturer information block */
- char manufacturer[12];
- char model[20];
- u8 jedec_id;
- __le16 date_code;
- u8 reserved2[13];
-
- /* memory organization block */
- __le32 byte_per_page;
- __le16 spare_bytes_per_page;
- __le32 data_bytes_per_ppage;
- __le16 spare_bytes_per_ppage;
- __le32 pages_per_block;
- __le32 blocks_per_lun;
- u8 lun_count;
- u8 addr_cycles;
- u8 bits_per_cell;
- __le16 bb_per_lun;
- __le16 block_endurance;
- u8 guaranteed_good_blocks;
- __le16 guaranteed_block_endurance;
- u8 programs_per_page;
- u8 ppage_attr;
- u8 ecc_bits;
- u8 interleaved_bits;
- u8 interleaved_ops;
- u8 reserved3[13];
-
- /* electrical parameter block */
- u8 io_pin_capacitance_max;
- __le16 async_timing_mode;
- __le16 program_cache_timing_mode;
- __le16 t_prog;
- __le16 t_bers;
- __le16 t_r;
- __le16 t_ccs;
- __le16 src_sync_timing_mode;
- u8 src_ssync_features;
- __le16 clk_pin_capacitance_typ;
- __le16 io_pin_capacitance_typ;
- __le16 input_pin_capacitance_typ;
- u8 input_pin_capacitance_max;
- u8 driver_strength_support;
- __le16 t_int_r;
- __le16 t_adl;
- u8 reserved4[8];
-
- /* vendor */
- __le16 vendor_revision;
- u8 vendor[88];
-
- __le16 crc;
-} __packed;
-
-#define ONFI_CRC_BASE 0x4F4E
-
-/* Extended ECC information Block Definition (since ONFI 2.1) */
-struct onfi_ext_ecc_info {
- u8 ecc_bits;
- u8 codeword_size;
- __le16 bb_per_lun;
- __le16 block_endurance;
- u8 reserved[2];
-} __packed;
-
-#define ONFI_SECTION_TYPE_0 0 /* Unused section. */
-#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */
-#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */
-struct onfi_ext_section {
- u8 type;
- u8 length;
-} __packed;
-
-#define ONFI_EXT_SECTION_MAX 8
-
-/* Extended Parameter Page Definition (since ONFI 2.1) */
-struct onfi_ext_param_page {
- __le16 crc;
- u8 sig[4]; /* 'E' 'P' 'P' 'S' */
- u8 reserved0[10];
- struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
-
- /*
- * The actual size of the Extended Parameter Page is in
- * @ext_param_page_length of nand_onfi_params{}.
- * The following are the variable length sections.
- * So we do not add any fields below. Please see the ONFI spec.
- */
-} __packed;
-
-struct jedec_ecc_info {
- u8 ecc_bits;
- u8 codeword_size;
- __le16 bb_per_lun;
- __le16 block_endurance;
- u8 reserved[2];
-} __packed;
-
-/* JEDEC features */
-#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
-
-struct nand_jedec_params {
- /* rev info and features block */
- /* 'J' 'E' 'S' 'D' */
- u8 sig[4];
- __le16 revision;
- __le16 features;
- u8 opt_cmd[3];
- __le16 sec_cmd;
- u8 num_of_param_pages;
- u8 reserved0[18];
-
- /* manufacturer information block */
- char manufacturer[12];
- char model[20];
- u8 jedec_id[6];
- u8 reserved1[10];
-
- /* memory organization block */
- __le32 byte_per_page;
- __le16 spare_bytes_per_page;
- u8 reserved2[6];
- __le32 pages_per_block;
- __le32 blocks_per_lun;
- u8 lun_count;
- u8 addr_cycles;
- u8 bits_per_cell;
- u8 programs_per_page;
- u8 multi_plane_addr;
- u8 multi_plane_op_attr;
- u8 reserved3[38];
-
- /* electrical parameter block */
- __le16 async_sdr_speed_grade;
- __le16 toggle_ddr_speed_grade;
- __le16 sync_ddr_speed_grade;
- u8 async_sdr_features;
- u8 toggle_ddr_features;
- u8 sync_ddr_features;
- __le16 t_prog;
- __le16 t_bers;
- __le16 t_r;
- __le16 t_r_multi_plane;
- __le16 t_ccs;
- __le16 io_pin_capacitance_typ;
- __le16 input_pin_capacitance_typ;
- __le16 clk_pin_capacitance_typ;
- u8 driver_strength_support;
- __le16 t_adl;
- u8 reserved4[36];
-
- /* ECC and endurance block */
- u8 guaranteed_good_blocks;
- __le16 guaranteed_block_endurance;
- struct jedec_ecc_info ecc_info[4];
- u8 reserved5[29];
-
- /* reserved */
- u8 reserved6[148];
-
- /* vendor */
- __le16 vendor_rev_num;
- u8 reserved7[88];
-
- /* CRC for Parameter Page */
- __le16 crc;
-} __packed;
-
-/**
- * struct onfi_params - ONFI specific parameters that will be reused
- * @version: ONFI version (BCD encoded), 0 if ONFI is not supported
- * @tPROG: Page program time
- * @tBERS: Block erase time
- * @tR: Page read time
- * @tCCS: Change column setup time
- * @async_timing_mode: Supported asynchronous timing mode
- * @vendor_revision: Vendor specific revision number
- * @vendor: Vendor specific data
- */
-struct onfi_params {
- int version;
- u16 tPROG;
- u16 tBERS;
- u16 tR;
- u16 tCCS;
- u16 async_timing_mode;
- u16 vendor_revision;
- u8 vendor[88];
-};
-
/**
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
@@ -646,31 +392,28 @@ struct nand_ecc_ctrl {
void *priv;
u8 *calc_buf;
u8 *code_buf;
- void (*hwctl)(struct mtd_info *mtd, int mode);
- int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
- uint8_t *ecc_code);
- int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc,
- uint8_t *calc_ecc);
- int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
- int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page);
- int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
- int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offs, uint32_t len, uint8_t *buf, int page);
- int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, uint32_t data_len,
- const uint8_t *data_buf, int oob_required, int page);
- int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page);
- int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
- int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
- int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page);
- int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
+ void (*hwctl)(struct nand_chip *chip, int mode);
+ int (*calculate)(struct nand_chip *chip, const uint8_t *dat,
+ uint8_t *ecc_code);
+ int (*correct)(struct nand_chip *chip, uint8_t *dat, uint8_t *read_ecc,
+ uint8_t *calc_ecc);
+ int (*read_page_raw)(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+ int (*write_page_raw)(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+ int (*read_page)(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+ int (*read_subpage)(struct nand_chip *chip, uint32_t offs,
+ uint32_t len, uint8_t *buf, int page);
+ int (*write_subpage)(struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *data_buf,
+ int oob_required, int page);
+ int (*write_page)(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+ int (*write_oob_raw)(struct nand_chip *chip, int page);
+ int (*read_oob_raw)(struct nand_chip *chip, int page);
+ int (*read_oob)(struct nand_chip *chip, int page);
+ int (*write_oob)(struct nand_chip *chip, int page);
};
/**
@@ -800,24 +543,6 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
}
/**
- * struct nand_manufacturer_ops - NAND Manufacturer operations
- * @detect: detect the NAND memory organization and capabilities
- * @init: initialize all vendor specific fields (like the ->read_retry()
- * implementation) if any.
- * @cleanup: the ->init() function may have allocated resources, ->cleanup()
- * is here to let vendor specific code release those resources.
- * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
- * page. This is called after the checksum is verified.
- */
-struct nand_manufacturer_ops {
- void (*detect)(struct nand_chip *chip);
- int (*init)(struct nand_chip *chip);
- void (*cleanup)(struct nand_chip *chip);
- void (*fixup_onfi_param_page)(struct nand_chip *chip,
- struct nand_onfi_params *p);
-};
-
-/**
* struct nand_op_cmd_instr - Definition of a command instruction
* @opcode: the command to issue in one cycle
*/
@@ -1175,44 +900,72 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_operation *op, bool check_only);
/**
+ * struct nand_legacy - NAND chip legacy fields/hooks
+ * @IO_ADDR_R: address to read the 8 I/O lines of the flash device
+ * @IO_ADDR_W: address to write the 8 I/O lines of the flash device
+ * @read_byte: read one byte from the chip
+ * @write_byte: write a single byte to the chip on the low 8 I/O lines
+ * @write_buf: write data from the buffer to the chip
+ * @read_buf: read data from the chip into the buffer
+ * @cmd_ctrl: hardware specific function for controlling ALE/CLE/nCE. Also used
+ * to write command and address
+ * @cmdfunc: hardware specific function for writing commands to the chip.
+ * @dev_ready: hardware specific function for accessing device ready/busy line.
+ * If set to NULL no access to ready/busy is available and the
+ * ready/busy information is read from the chip status register.
+ * @waitfunc: hardware specific function for wait on ready.
+ * @block_bad: check if a block is bad, using OOB markers
+ * @block_markbad: mark a block bad
+ * @erase: erase function
+ * @set_features: set the NAND chip features
+ * @get_features: get the NAND chip features
+ * @chip_delay: chip dependent delay for transferring data from array to read
+ * regs (tR).
+ *
+ * If you look at this structure you're already wrong. These fields/hooks are
+ * all deprecated.
+ */
+struct nand_legacy {
+ void __iomem *IO_ADDR_R;
+ void __iomem *IO_ADDR_W;
+ u8 (*read_byte)(struct nand_chip *chip);
+ void (*write_byte)(struct nand_chip *chip, u8 byte);
+ void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
+ void (*read_buf)(struct nand_chip *chip, u8 *buf, int len);
+ void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
+ void (*cmdfunc)(struct nand_chip *chip, unsigned command, int column,
+ int page_addr);
+ int (*dev_ready)(struct nand_chip *chip);
+ int (*waitfunc)(struct nand_chip *chip);
+ int (*block_bad)(struct nand_chip *chip, loff_t ofs);
+ int (*block_markbad)(struct nand_chip *chip, loff_t ofs);
+ int (*erase)(struct nand_chip *chip, int page);
+ int (*set_features)(struct nand_chip *chip, int feature_addr,
+ u8 *subfeature_para);
+ int (*get_features)(struct nand_chip *chip, int feature_addr,
+ u8 *subfeature_para);
+ int chip_delay;
+};
+
+/**
* struct nand_chip - NAND Private Flash Chip Data
* @mtd: MTD device registered to the MTD framework
- * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
- * flash device
- * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
- * flash device.
- * @read_byte: [REPLACEABLE] read one byte from the chip
- * @read_word: [REPLACEABLE] read one word from the chip
- * @write_byte: [REPLACEABLE] write a single byte to the chip on the
- * low 8 I/O lines
- * @write_buf: [REPLACEABLE] write data from the buffer to the chip
- * @read_buf: [REPLACEABLE] read data from the chip into the buffer
+ * @legacy: All legacy fields/hooks. If you develop a new driver,
+ * don't even try to use any of these fields/hooks, and if
+ * you're modifying an existing driver that is using those
+ * fields/hooks, you should consider reworking the driver
+ * avoid using them.
* @select_chip: [REPLACEABLE] select chip nr
- * @block_bad: [REPLACEABLE] check if a block is bad, using OOB markers
- * @block_markbad: [REPLACEABLE] mark a block bad
- * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling
- * ALE/CLE/nCE. Also used to write command and address
- * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing
- * device ready/busy line. If set to NULL no access to
- * ready/busy is available and the ready/busy information
- * is read from the chip status register.
- * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing
- * commands to the chip.
- * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
- * ready.
* @exec_op: controller specific method to execute NAND operations.
* This method replaces ->cmdfunc(),
- * ->{read,write}_{buf,byte,word}(), ->dev_ready() and
- * ->waifunc().
+ * ->legacy.{read,write}_{buf,byte,word}(),
+ * ->legacy.dev_ready() and ->waifunc().
* @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
* @buf_align: minimum buffer alignment required by a platform
* @dummy_controller: dummy controller implementation for drivers that can
* only control a single chip
- * @erase: [REPLACEABLE] erase function
- * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring
- * data from array to read regs (tR).
* @state: [INTERN] the current state of the NAND device
* @oob_poi: "poison value buffer," used for laying out OOB data
* before writing
@@ -1260,8 +1013,6 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
* @blocks_per_die: [INTERN] The number of PEBs in a die
* @data_interface: [INTERN] NAND interface timing information
* @read_retries: [INTERN] the number of read retry modes supported
- * @set_features: [REPLACEABLE] set the NAND chip features
- * @get_features: [REPLACEABLE] get the NAND chip features
* @setup_data_interface: [OPTIONAL] setup the data interface and timing. If
* chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
* means the configuration should not be applied but
@@ -1283,35 +1034,17 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
struct nand_chip {
struct mtd_info mtd;
- void __iomem *IO_ADDR_R;
- void __iomem *IO_ADDR_W;
- uint8_t (*read_byte)(struct mtd_info *mtd);
- u16 (*read_word)(struct mtd_info *mtd);
- void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
- void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
- void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
- void (*select_chip)(struct mtd_info *mtd, int chip);
- int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
- int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
- void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
- int (*dev_ready)(struct mtd_info *mtd);
- void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
- int page_addr);
- int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
+ struct nand_legacy legacy;
+
+ void (*select_chip)(struct nand_chip *chip, int cs);
int (*exec_op)(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only);
- int (*erase)(struct mtd_info *mtd, int page);
- int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip,
- int feature_addr, uint8_t *subfeature_para);
- int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip,
- int feature_addr, uint8_t *subfeature_para);
- int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
- int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
+ int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
+ int (*setup_data_interface)(struct nand_chip *chip, int chipnr,
const struct nand_data_interface *conf);
- int chip_delay;
unsigned int options;
unsigned int bbt_options;
@@ -1420,27 +1153,6 @@ static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
}
/*
- * NAND Flash Manufacturer ID Codes
- */
-#define NAND_MFR_TOSHIBA 0x98
-#define NAND_MFR_ESMT 0xc8
-#define NAND_MFR_SAMSUNG 0xec
-#define NAND_MFR_FUJITSU 0x04
-#define NAND_MFR_NATIONAL 0x8f
-#define NAND_MFR_RENESAS 0x07
-#define NAND_MFR_STMICRO 0x20
-#define NAND_MFR_HYNIX 0xad
-#define NAND_MFR_MICRON 0x2c
-#define NAND_MFR_AMD 0x01
-#define NAND_MFR_MACRONIX 0xc2
-#define NAND_MFR_EON 0x92
-#define NAND_MFR_SANDISK 0x45
-#define NAND_MFR_INTEL 0x89
-#define NAND_MFR_ATO 0x9b
-#define NAND_MFR_WINBOND 0xef
-
-
-/*
* A helper for defining older NAND chips where the second ID byte fully
* defined the chip, including the geometry (chip size, eraseblock size, page
* size). All these chips have 512 bytes NAND page size.
@@ -1519,114 +1231,7 @@ struct nand_flash_dev {
int onfi_timing_mode_default;
};
-/**
- * struct nand_manufacturer - NAND Flash Manufacturer structure
- * @name: Manufacturer name
- * @id: manufacturer ID code of device.
- * @ops: manufacturer operations
-*/
-struct nand_manufacturer {
- int id;
- char *name;
- const struct nand_manufacturer_ops *ops;
-};
-
-const struct nand_manufacturer *nand_get_manufacturer(u8 id);
-
-static inline const char *
-nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
-{
- return manufacturer ? manufacturer->name : "Unknown";
-}
-
-extern struct nand_flash_dev nand_flash_ids[];
-
-extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
-extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
-extern const struct nand_manufacturer_ops hynix_nand_manuf_ops;
-extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
-extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
-extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
-
int nand_create_bbt(struct nand_chip *chip);
-int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
-int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
-int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
-int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
- int allowbbt);
-
-/**
- * struct platform_nand_chip - chip level device structure
- * @nr_chips: max. number of chips to scan for
- * @chip_offset: chip number offset
- * @nr_partitions: number of partitions pointed to by partitions (or zero)
- * @partitions: mtd partition list
- * @chip_delay: R/B delay value in us
- * @options: Option flags, e.g. 16bit buswidth
- * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
- * @part_probe_types: NULL-terminated array of probe types
- */
-struct platform_nand_chip {
- int nr_chips;
- int chip_offset;
- int nr_partitions;
- struct mtd_partition *partitions;
- int chip_delay;
- unsigned int options;
- unsigned int bbt_options;
- const char **part_probe_types;
-};
-
-/* Keep gcc happy */
-struct platform_device;
-
-/**
- * struct platform_nand_ctrl - controller level device structure
- * @probe: platform specific function to probe/setup hardware
- * @remove: platform specific function to remove/teardown hardware
- * @dev_ready: platform specific function to read ready/busy pin
- * @select_chip: platform specific chip select function
- * @cmd_ctrl: platform specific function for controlling
- * ALE/CLE/nCE. Also used to write command and address
- * @write_buf: platform specific function for write buffer
- * @read_buf: platform specific function for read buffer
- * @priv: private data to transport driver specific settings
- *
- * All fields are optional and depend on the hardware driver requirements
- */
-struct platform_nand_ctrl {
- int (*probe)(struct platform_device *pdev);
- void (*remove)(struct platform_device *pdev);
- int (*dev_ready)(struct mtd_info *mtd);
- void (*select_chip)(struct mtd_info *mtd, int chip);
- void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
- void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
- void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
- void *priv;
-};
-
-/**
- * struct platform_nand_data - container structure for platform-specific data
- * @chip: chip level chip structure
- * @ctrl: controller level device structure
- */
-struct platform_nand_data {
- struct platform_nand_chip chip;
- struct platform_nand_ctrl ctrl;
-};
-
-/* return the supported asynchronous timing mode. */
-static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
-{
- if (!chip->parameters.onfi)
- return ONFI_TIMING_MODE_UNKNOWN;
-
- return chip->parameters.onfi->async_timing_mode;
-}
-
-int onfi_fill_data_interface(struct nand_chip *chip,
- enum nand_data_interface_type type,
- int timing_mode);
/*
* Check if it is a SLC nand.
@@ -1658,9 +1263,6 @@ static inline int nand_opcode_8bits(unsigned int command)
return 0;
}
-/* get timing characteristics from ONFI timing mode. */
-const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
-
int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
void *extraoob, int extraooblen,
@@ -1670,37 +1272,22 @@ int nand_ecc_choose_conf(struct nand_chip *chip,
const struct nand_ecc_caps *caps, int oobavail);
/* Default write_oob implementation */
-int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
-
-/* Default write_oob syndrome implementation */
-int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
+int nand_write_oob_std(struct nand_chip *chip, int page);
/* Default read_oob implementation */
-int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
+int nand_read_oob_std(struct nand_chip *chip, int page);
-/* Default read_oob syndrome implementation */
-int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
-
-/* Wrapper to use in order for controllers/vendors to GET/SET FEATURES */
-int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
-int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
/* Stub used by drivers that do not support GET/SET FEATURES operations */
-int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
- int addr, u8 *subfeature_param);
+int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
+ u8 *subfeature_param);
/* Default read_page_raw implementation */
-int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
-int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
- u8 *buf, int oob_required, int page);
+int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page);
/* Default write_page_raw implementation */
-int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page);
-int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
- const u8 *buf, int oob_required, int page);
+int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
@@ -1710,7 +1297,6 @@ int nand_reset_op(struct nand_chip *chip);
int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
unsigned int len);
int nand_status_op(struct nand_chip *chip, u8 *status);
-int nand_exit_status_op(struct nand_chip *chip);
int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
int nand_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf, unsigned int len);
@@ -1734,16 +1320,25 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
+/* Scan and identify a NAND device */
+int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
+ struct nand_flash_dev *ids);
+
+static inline int nand_scan(struct nand_chip *chip, unsigned int max_chips)
+{
+ return nand_scan_with_ids(chip, max_chips, NULL);
+}
+
+/* Internal helper for board drivers which need to override command function */
+void nand_wait_ready(struct nand_chip *chip);
+
/*
* Free resources held by the NAND device, must be called on error after a
* sucessful nand_scan().
*/
void nand_cleanup(struct nand_chip *chip);
/* Unregister the MTD device and calls nand_cleanup() */
-void nand_release(struct mtd_info *mtd);
-
-/* Default extended ID decoding function */
-void nand_decode_ext_id(struct nand_chip *chip);
+void nand_release(struct nand_chip *chip);
/*
* External helper for controller drivers that have to implement the WAITRDY
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index c922e97f205a..7f0c7303575e 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -239,6 +239,94 @@ enum spi_nor_option_flags {
};
/**
+ * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type.
+ * JEDEC JESD216B imposes erase sizes to be a power of 2.
+ * @size_shift: @size is a power of 2, the shift is stored in
+ * @size_shift.
+ * @size_mask: the size mask based on @size_shift.
+ * @opcode: the SPI command op code to erase the sector/block.
+ * @idx: Erase Type index as sorted in the Basic Flash Parameter
+ * Table. It will be used to synchronize the supported
+ * Erase Types with the ones identified in the SFDP
+ * optional tables.
+ */
+struct spi_nor_erase_type {
+ u32 size;
+ u32 size_shift;
+ u32 size_mask;
+ u8 opcode;
+ u8 idx;
+};
+
+/**
+ * struct spi_nor_erase_command - Used for non-uniform erases
+ * The structure is used to describe a list of erase commands to be executed
+ * once we validate that the erase can be performed. The elements in the list
+ * are run-length encoded.
+ * @list: for inclusion into the list of erase commands.
+ * @count: how many times the same erase command should be
+ * consecutively used.
+ * @size: the size of the sector/block erased by the command.
+ * @opcode: the SPI command op code to erase the sector/block.
+ */
+struct spi_nor_erase_command {
+ struct list_head list;
+ u32 count;
+ u32 size;
+ u8 opcode;
+};
+
+/**
+ * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
+ * @offset: the offset in the data array of erase region start.
+ * LSB bits are used as a bitmask encoding flags to
+ * determine if this region is overlaid, if this region is
+ * the last in the SPI NOR flash memory and to indicate
+ * all the supported erase commands inside this region.
+ * The erase types are sorted in ascending order with the
+ * smallest Erase Type size being at BIT(0).
+ * @size: the size of the region in bytes.
+ */
+struct spi_nor_erase_region {
+ u64 offset;
+ u64 size;
+};
+
+#define SNOR_ERASE_TYPE_MAX 4
+#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
+
+#define SNOR_LAST_REGION BIT(4)
+#define SNOR_OVERLAID_REGION BIT(5)
+
+#define SNOR_ERASE_FLAGS_MAX 6
+#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
+
+/**
+ * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
+ * @regions: array of erase regions. The regions are consecutive in
+ * address space. Walking through the regions is done
+ * incrementally.
+ * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform
+ * sector size (legacy implementation).
+ * @erase_type: an array of erase types shared by all the regions.
+ * The erase types are sorted in ascending order, with the
+ * smallest Erase Type size being the first member in the
+ * erase_type array.
+ * @uniform_erase_type: bitmask encoding erase types that can erase the
+ * entire memory. This member is completed at init by
+ * uniform and non-uniform SPI NOR flash memories if they
+ * support at least one erase type that can erase the
+ * entire memory.
+ */
+struct spi_nor_erase_map {
+ struct spi_nor_erase_region *regions;
+ struct spi_nor_erase_region uniform_region;
+ struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
+ u8 uniform_erase_type;
+};
+
+/**
* struct flash_info - Forward declaration of a structure used internally by
* spi_nor_scan()
*/
@@ -262,6 +350,7 @@ struct flash_info;
* @write_proto: the SPI protocol for write operations
* @reg_proto the SPI protocol for read_reg/write_reg/erase operations
* @cmd_buf: used by the write_reg
+ * @erase_map: the erase map of the SPI NOR
* @prepare: [OPTIONAL] do some preparations for the
* read/write/erase/lock/unlock operations
* @unprepare: [OPTIONAL] do some post work after the
@@ -297,6 +386,7 @@ struct spi_nor {
bool sst_write_second;
u32 flags;
u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
+ struct spi_nor_erase_map erase_map;
int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
@@ -317,6 +407,35 @@ struct spi_nor {
void *priv;
};
+static u64 __maybe_unused
+spi_nor_region_is_last(const struct spi_nor_erase_region *region)
+{
+ return region->offset & SNOR_LAST_REGION;
+}
+
+static u64 __maybe_unused
+spi_nor_region_end(const struct spi_nor_erase_region *region)
+{
+ return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
+}
+
+static void __maybe_unused
+spi_nor_region_mark_end(struct spi_nor_erase_region *region)
+{
+ region->offset |= SNOR_LAST_REGION;
+}
+
+static void __maybe_unused
+spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
+{
+ region->offset |= SNOR_OVERLAID_REGION;
+}
+
+static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor)
+{
+ return !!nor->erase_map.uniform_erase_type;
+}
+
static inline void spi_nor_set_flash_node(struct spi_nor *nor,
struct device_node *np)
{
diff --git a/include/linux/ndctl.h b/include/linux/ndctl.h
new file mode 100644
index 000000000000..cd5a293ce3ae
--- /dev/null
+++ b/include/linux/ndctl.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU Lesser General Public License,
+ * version 2.1, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+ * more details.
+ */
+#ifndef _LINUX_NDCTL_H
+#define _LINUX_NDCTL_H
+
+#include <uapi/linux/ndctl.h>
+
+enum {
+ ND_MIN_NAMESPACE_SIZE = PAGE_SIZE,
+};
+
+#endif /* _LINUX_NDCTL_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ca5ab98053c8..dc1d9ed33b31 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -535,6 +535,32 @@ static inline void napi_synchronize(const struct napi_struct *n)
barrier();
}
+/**
+ * napi_if_scheduled_mark_missed - if napi is running, set the
+ * NAPIF_STATE_MISSED
+ * @n: NAPI context
+ *
+ * If napi is running, set the NAPIF_STATE_MISSED, and return true if
+ * NAPI is scheduled.
+ **/
+static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
+{
+ unsigned long val, new;
+
+ do {
+ val = READ_ONCE(n->state);
+ if (val & NAPIF_STATE_DISABLE)
+ return true;
+
+ if (!(val & NAPIF_STATE_SCHED))
+ return false;
+
+ new = val | NAPIF_STATE_MISSED;
+ } while (cmpxchg(&n->state, val, new) != val);
+
+ return true;
+}
+
enum netdev_queue_state_t {
__QUEUE_STATE_DRV_XOFF,
__QUEUE_STATE_STACK_XOFF,
@@ -583,6 +609,9 @@ struct netdev_queue {
/* Subordinate device that the queue has been assigned to */
struct net_device *sb_dev;
+#ifdef CONFIG_XDP_SOCKETS
+ struct xdp_umem *umem;
+#endif
/*
* write-mostly part
*/
@@ -712,6 +741,9 @@ struct netdev_rx_queue {
struct kobject kobj;
struct net_device *dev;
struct xdp_rxq_info xdp_rxq;
+#ifdef CONFIG_XDP_SOCKETS
+ struct xdp_umem *umem;
+#endif
} ____cacheline_aligned_in_smp;
/*
@@ -1730,6 +1762,8 @@ enum netdev_priv_flags {
* switch driver and used to set the phys state of the
* switch port.
*
+ * @wol_enabled: Wake-on-LAN is enabled
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1974,7 +2008,6 @@ struct net_device {
struct pcpu_lstats __percpu *lstats;
struct pcpu_sw_netstats __percpu *tstats;
struct pcpu_dstats __percpu *dstats;
- struct pcpu_vstats __percpu *vstats;
};
#if IS_ENABLED(CONFIG_GARP)
@@ -2014,6 +2047,7 @@ struct net_device {
struct lock_class_key *qdisc_tx_busylock;
struct lock_class_key *qdisc_running_key;
bool proto_down;
+ unsigned wol_enabled:1;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2317,6 +2351,7 @@ static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
+ bool ignore_outgoing;
struct net_device *dev; /* NULL is wildcarded here */
int (*func) (struct sk_buff *,
struct net_device *,
@@ -2355,6 +2390,12 @@ struct pcpu_sw_netstats {
struct u64_stats_sync syncp;
};
+struct pcpu_lstats {
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync syncp;
+};
+
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
@@ -2455,6 +2496,13 @@ struct netdev_notifier_info {
struct netlink_ext_ack *extack;
};
+struct netdev_notifier_info_ext {
+ struct netdev_notifier_info info; /* must be first */
+ union {
+ u32 mtu;
+ } ext;
+};
+
struct netdev_notifier_change_info {
struct netdev_notifier_info info; /* must be first */
unsigned int flags_changed;
@@ -3597,6 +3645,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
return 0;
}
+bool dev_nit_active(struct net_device *dev);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
extern int netdev_budget;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 07efffd0c759..bbe99d2b28b4 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
break;
case NFPROTO_ARP:
#ifdef CONFIG_NETFILTER_FAMILY_ARP
+ if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
+ break;
hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
#endif
break;
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 03097fa70975..e142b2b5f1ea 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -19,7 +19,4 @@ struct ip_conntrack_stat {
unsigned int search_restart;
};
-/* call to create an explicit dependency on nf_conntrack. */
-void need_conntrack(void);
-
#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nfnetlink_osf.h b/include/linux/netfilter/nfnetlink_osf.h
index ecf7dab81e9e..c6000046c966 100644
--- a/include/linux/netfilter/nfnetlink_osf.h
+++ b/include/linux/netfilter/nfnetlink_osf.h
@@ -27,6 +27,7 @@ bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
const struct list_head *nf_osf_fingers);
const char *nf_osf_find(const struct sk_buff *skb,
- const struct list_head *nf_osf_fingers);
+ const struct list_head *nf_osf_fingers,
+ const int ttl_check);
#endif /* _NFOSF_H */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 71f121b66ca8..4da90a6ab536 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -176,8 +176,11 @@ struct netlink_callback {
void *data;
/* the module that dump function belong to */
struct module *module;
+ struct netlink_ext_ack *extack;
u16 family;
u16 min_dump_alloc;
+ bool strict_check;
+ u16 answer_flags;
unsigned int prev_seq, seq;
long args[6];
};
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 67662d01130a..676f1ff161a9 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -31,8 +31,6 @@ struct netpoll {
bool ipv6;
u16 local_port, remote_port;
u8 remote_mac[ETH_ALEN];
-
- struct work_struct cleanup_work;
};
struct netpoll_info {
@@ -49,8 +47,9 @@ struct netpoll_info {
};
#ifdef CONFIG_NETPOLL
-extern void netpoll_poll_disable(struct net_device *dev);
-extern void netpoll_poll_enable(struct net_device *dev);
+void netpoll_poll_dev(struct net_device *dev);
+void netpoll_poll_disable(struct net_device *dev);
+void netpoll_poll_enable(struct net_device *dev);
#else
static inline void netpoll_poll_disable(struct net_device *dev) { return; }
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
@@ -62,7 +61,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
int netpoll_setup(struct netpoll *np);
void __netpoll_cleanup(struct netpoll *np);
-void __netpoll_free_async(struct netpoll *np);
+void __netpoll_free(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
struct net_device *dev);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 68e91ef5494c..818dbe9331be 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1241,6 +1241,7 @@ enum {
NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
NVME_SC_ANA_INACCESSIBLE = 0x302,
NVME_SC_ANA_TRANSITION = 0x303,
+ NVME_SC_HOST_PATH_ERROR = 0x370,
NVME_SC_DNR = 0x4000,
};
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 165fd302b442..8d31e39dd564 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -58,7 +58,6 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
int of_dma_configure(struct device *dev,
struct device_node *np,
bool force_dma);
-void of_dma_deconfigure(struct device *dev);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -113,8 +112,6 @@ static inline int of_dma_configure(struct device *dev,
{
return 0;
}
-static inline void of_dma_deconfigure(struct device *dev)
-{}
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
index c3f1b44ade29..cb1adf0b78a9 100644
--- a/include/linux/pci-dma-compat.h
+++ b/include/linux/pci-dma-compat.h
@@ -119,29 +119,11 @@ static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{
return dma_set_coherent_mask(&dev->dev, mask);
}
-
-static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
- unsigned int size)
-{
- return dma_set_max_seg_size(&dev->dev, size);
-}
-
-static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
- unsigned long mask)
-{
- return dma_set_seg_boundary(&dev->dev, mask);
-}
#else
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
-static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
- unsigned int size)
-{ return -EIO; }
-static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
- unsigned long mask)
-{ return -EIO; }
#endif
#endif
diff --git a/include/linux/pci-dma.h b/include/linux/pci-dma.h
deleted file mode 100644
index 0f7aa7353ca3..000000000000
--- a/include/linux/pci-dma.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PCI_DMA_H
-#define _LINUX_PCI_DMA_H
-
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) DEFINE_DMA_UNMAP_ADDR(ADDR_NAME);
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) DEFINE_DMA_UNMAP_LEN(LEN_NAME);
-#define pci_unmap_addr dma_unmap_addr
-#define pci_unmap_addr_set dma_unmap_addr_set
-#define pci_unmap_len dma_unmap_len
-#define pci_unmap_len_set dma_unmap_len_set
-
-#endif
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
new file mode 100644
index 000000000000..bca9bc3e5be7
--- /dev/null
+++ b/include/linux/pci-p2pdma.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCI Peer 2 Peer DMA support.
+ *
+ * Copyright (c) 2016-2018, Logan Gunthorpe
+ * Copyright (c) 2016-2017, Microsemi Corporation
+ * Copyright (c) 2017, Christoph Hellwig
+ * Copyright (c) 2018, Eideticom Inc.
+ */
+
+#ifndef _LINUX_PCI_P2PDMA_H
+#define _LINUX_PCI_P2PDMA_H
+
+#include <linux/pci.h>
+
+struct block_device;
+struct scatterlist;
+
+#ifdef CONFIG_PCI_P2PDMA
+int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
+ u64 offset);
+int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
+ int num_clients, bool verbose);
+bool pci_has_p2pmem(struct pci_dev *pdev);
+struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients);
+void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size);
+void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size);
+pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr);
+struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
+ unsigned int *nents, u32 length);
+void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
+void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
+int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
+ bool *use_p2pdma);
+ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
+ bool use_p2pdma);
+#else /* CONFIG_PCI_P2PDMA */
+static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar,
+ size_t size, u64 offset)
+{
+ return -EOPNOTSUPP;
+}
+static inline int pci_p2pdma_distance_many(struct pci_dev *provider,
+ struct device **clients, int num_clients, bool verbose)
+{
+ return -1;
+}
+static inline bool pci_has_p2pmem(struct pci_dev *pdev)
+{
+ return false;
+}
+static inline struct pci_dev *pci_p2pmem_find_many(struct device **clients,
+ int num_clients)
+{
+ return NULL;
+}
+static inline void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
+{
+ return NULL;
+}
+static inline void pci_free_p2pmem(struct pci_dev *pdev, void *addr,
+ size_t size)
+{
+}
+static inline pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev,
+ void *addr)
+{
+ return 0;
+}
+static inline struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
+ unsigned int *nents, u32 length)
+{
+ return NULL;
+}
+static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
+ struct scatterlist *sgl)
+{
+}
+static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
+{
+}
+static inline int pci_p2pdma_map_sg(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir)
+{
+ return 0;
+}
+static inline int pci_p2pdma_enable_store(const char *page,
+ struct pci_dev **p2p_dev, bool *use_p2pdma)
+{
+ *use_p2pdma = false;
+ return 0;
+}
+static inline ssize_t pci_p2pdma_enable_show(char *page,
+ struct pci_dev *p2p_dev, bool use_p2pdma)
+{
+ return sprintf(page, "none\n");
+}
+#endif /* CONFIG_PCI_P2PDMA */
+
+
+static inline int pci_p2pdma_distance(struct pci_dev *provider,
+ struct device *client, bool verbose)
+{
+ return pci_p2pdma_distance_many(provider, &client, 1, verbose);
+}
+
+static inline struct pci_dev *pci_p2pmem_find(struct device *client)
+{
+ return pci_p2pmem_find_many(&client, 1);
+}
+
+#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 6925828f9f25..11c71c4ecf75 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -281,6 +281,7 @@ struct pcie_link_state;
struct pci_vpd;
struct pci_sriov;
struct pci_ats;
+struct pci_p2pdma;
/* The pci_dev structure describes PCI devices */
struct pci_dev {
@@ -325,6 +326,7 @@ struct pci_dev {
pci_power_t current_state; /* Current operating state. In ACPI,
this is D0-D3, D0 being fully
functional, and D3 being off. */
+ unsigned int imm_ready:1; /* Supports Immediate Readiness */
u8 pm_cap; /* PM capability offset */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
@@ -402,6 +404,7 @@ struct pci_dev {
unsigned int has_secondary_link:1;
unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
unsigned int is_probed:1; /* Device probing in progress */
+ unsigned int link_active_reporting:1;/* Device capable of reporting link active */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -439,6 +442,9 @@ struct pci_dev {
#ifdef CONFIG_PCI_PASID
u16 pasid_features;
#endif
+#ifdef CONFIG_PCI_P2PDMA
+ struct pci_p2pdma *p2pdma;
+#endif
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
char *driver_override; /* Driver name to force a match */
@@ -1342,7 +1348,6 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
/* kmem_cache style wrapper around pci_alloc_consistent() */
-#include <linux/pci-dma.h>
#include <linux/dmapool.h>
#define pci_pool dma_pool
@@ -1705,6 +1710,10 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
unsigned long *out_hwirq,
unsigned int *out_type)
{ return -EINVAL; }
+
+static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
+ struct pci_dev *dev)
+{ return NULL; }
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index a6d6650a0490..7acc9f91e72b 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -16,8 +16,6 @@
/**
* struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
- * @owner: The module owner of this structure
- * @mod_name: The module name (KBUILD_MODNAME) of this structure
* @enable_slot: Called when the user wants to enable a specific pci slot
* @disable_slot: Called when the user wants to disable a specific pci slot
* @set_attention_status: Called to set the specific slot's attention LED to
@@ -25,17 +23,9 @@
* @hardware_test: Called to run a specified hardware test on the specified
* slot.
* @get_power_status: Called to get the current power status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_attention_status: Called to get the current attention status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_latch_status: Called to get the current latch status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_adapter_status: Called to get see if an adapter is present in the slot or not.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @reset_slot: Optional interface to allow override of a bus reset for the
* slot for cases where a secondary bus reset can result in spurious
* hotplug events or where a slot can be reset independent of the bus.
@@ -46,8 +36,6 @@
* set an LED, enable / disable power, etc.)
*/
struct hotplug_slot_ops {
- struct module *owner;
- const char *mod_name;
int (*enable_slot) (struct hotplug_slot *slot);
int (*disable_slot) (struct hotplug_slot *slot);
int (*set_attention_status) (struct hotplug_slot *slot, u8 value);
@@ -60,37 +48,19 @@ struct hotplug_slot_ops {
};
/**
- * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
- * @power_status: if power is enabled or not (1/0)
- * @attention_status: if the attention light is enabled or not (1/0)
- * @latch_status: if the latch (if any) is open or closed (1/0)
- * @adapter_status: if there is a pci board present in the slot or not (1/0)
- *
- * Used to notify the hotplug pci core of the status of a specific slot.
- */
-struct hotplug_slot_info {
- u8 power_status;
- u8 attention_status;
- u8 latch_status;
- u8 adapter_status;
-};
-
-/**
* struct hotplug_slot - used to register a physical slot with the hotplug pci core
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
- * @info: pointer to the &struct hotplug_slot_info for the initial values for
- * this slot.
- * @private: used by the hotplug pci controller driver to store whatever it
- * needs.
+ * @owner: The module owner of this structure
+ * @mod_name: The module name (KBUILD_MODNAME) of this structure
*/
struct hotplug_slot {
- struct hotplug_slot_ops *ops;
- struct hotplug_slot_info *info;
- void *private;
+ const struct hotplug_slot_ops *ops;
/* Variables below this are for use only by the hotplug pci core. */
struct list_head slot_list;
struct pci_slot *pci_slot;
+ struct module *owner;
+ const char *mod_name;
};
static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
@@ -110,9 +80,6 @@ void pci_hp_del(struct hotplug_slot *slot);
void pci_hp_destroy(struct hotplug_slot *slot);
void pci_hp_deregister(struct hotplug_slot *slot);
-int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
- struct hotplug_slot_info *info);
-
/* use a define to avoid include chaining to get THIS_MODULE & friends */
#define pci_hp_register(slot, pbus, devnr, name) \
__pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d157983b84cf..69f0abe1ba1a 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -117,6 +117,10 @@
#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
+#define PCI_CLASS_SERIAL_IPMI 0x0c07
+#define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700
+#define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701
+#define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702
#define PCI_BASE_CLASS_WIRELESS 0x0d
#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10
@@ -2539,8 +2543,6 @@
#define PCI_VENDOR_ID_HUAWEI 0x19e5
#define PCI_VENDOR_ID_NETRONOME 0x19ee
-#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
-#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000
#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
@@ -2561,6 +2563,8 @@
#define PCI_VENDOR_ID_AMAZON 0x1d0f
+#define PCI_VENDOR_ID_HYGON 0x1d94
+
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 009cdf3d65b6..b297cd1cd4f1 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -108,6 +108,7 @@ void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
+void percpu_ref_resurrect(struct percpu_ref *ref);
void percpu_ref_reinit(struct percpu_ref *ref);
/**
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 10f92e1d8e7b..bf309ff6f244 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -99,6 +99,7 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
+ int (*filter_match)(struct perf_event *event);
int num_events;
bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
diff --git a/include/linux/phy.h b/include/linux/phy.h
index cd6f637cbbfb..3ea87f774a76 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -19,6 +19,7 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
+#include <linux/linkmode.h>
#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/module.h>
@@ -41,13 +42,21 @@
#define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \
SUPPORTED_1000baseT_Full)
-#define PHY_BASIC_FEATURES (PHY_10BT_FEATURES | \
- PHY_100BT_FEATURES | \
- PHY_DEFAULT_FEATURES)
-
-#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \
- PHY_1000BT_FEATURES)
-
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
+
+#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
+#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features)
+#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features)
+#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
+#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
+#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
+#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
@@ -509,7 +518,7 @@ struct phy_driver {
u32 phy_id;
char *name;
u32 phy_id_mask;
- u32 features;
+ const unsigned long * const features;
u32 flags;
const void *driver_data;
@@ -967,6 +976,12 @@ static inline void phy_device_reset(struct phy_device *phydev, int value)
#define phydev_err(_phydev, format, args...) \
dev_err(&_phydev->mdio.dev, format, ##args)
+#define phydev_info(_phydev, format, args...) \
+ dev_info(&_phydev->mdio.dev, format, ##args)
+
+#define phydev_warn(_phydev, format, args...) \
+ dev_warn(&_phydev->mdio.dev, format, ##args)
+
#define phydev_dbg(_phydev, format, args...) \
dev_dbg(&_phydev->mdio.dev, format, ##args)
@@ -1039,7 +1054,7 @@ void phy_change_work(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
-void phy_trigger_machine(struct phy_device *phydev, bool sync);
+void phy_trigger_machine(struct phy_device *phydev);
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd);
@@ -1049,6 +1064,14 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
+void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
+void phy_support_sym_pause(struct phy_device *phydev);
+void phy_support_asym_pause(struct phy_device *phydev);
+void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
+ bool autoneg);
+void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
+bool phy_validate_pause(struct phy_device *phydev,
+ struct ethtool_pauseparam *pp);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
int (*run)(struct phy_device *));
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
deleted file mode 100644
index 0a2c18a9771d..000000000000
--- a/include/linux/phy/phy-qcom-ufs.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef PHY_QCOM_UFS_H_
-#define PHY_QCOM_UFS_H_
-
-#include "phy.h"
-
-/**
- * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
- * ref clock.
- * @phy: reference to a generic phy.
- */
-void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
-
-/**
- * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device
- * ref clock.
- * @phy: reference to a generic phy.
- */
-void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
-
-int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
-void ufs_qcom_phy_save_controller_version(struct phy *phy,
- u8 major, u16 minor, u16 step);
-
-#endif /* PHY_QCOM_UFS_H_ */
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 9713aebdd348..03b319f89a34 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -37,9 +37,11 @@ enum phy_mode {
PHY_MODE_USB_OTG,
PHY_MODE_SGMII,
PHY_MODE_2500SGMII,
+ PHY_MODE_QSGMII,
PHY_MODE_10GKR,
PHY_MODE_UFS_HS_A,
PHY_MODE_UFS_HS_B,
+ PHY_MODE_PCIE,
};
/**
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
index f8f1f6b952a6..eb9805bb3fe8 100644
--- a/include/linux/platform_data/dma-ep93xx.h
+++ b/include/linux/platform_data/dma-ep93xx.h
@@ -85,7 +85,7 @@ static inline enum dma_transfer_direction
ep93xx_dma_chan_direction(struct dma_chan *chan)
{
if (!ep93xx_dma_chan_is_m2p(chan))
- return DMA_NONE;
+ return DMA_TRANS_NONE;
/* even channels are for TX, odd for RX */
return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
diff --git a/include/linux/platform_data/dma-mcf-edma.h b/include/linux/platform_data/dma-mcf-edma.h
new file mode 100644
index 000000000000..d718ccfa3421
--- /dev/null
+++ b/include/linux/platform_data/dma-mcf-edma.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale eDMA platform data, ColdFire SoC's family.
+ *
+ * Copyright (c) 2017 Angelo Dureghello <angelo@sysam.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_MCF_EDMA_H__
+#define __LINUX_PLATFORM_DATA_MCF_EDMA_H__
+
+struct dma_slave_map;
+
+bool mcf_edma_filter_fn(struct dma_chan *chan, void *param);
+
+#define MCF_EDMA_FILTER_PARAM(ch) ((void *)ch)
+
+/**
+ * struct mcf_edma_platform_data - platform specific data for eDMA engine
+ *
+ * @ver The eDMA module version.
+ * @dma_channels The number of eDMA channels.
+ */
+struct mcf_edma_platform_data {
+ int dma_channels;
+ const struct dma_slave_map *slave_map;
+ int slavecnt;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_MCF_EDMA_H__ */
diff --git a/include/linux/platform_data/ehci-sh.h b/include/linux/platform_data/ehci-sh.h
index 5c15a738e116..219bd79dabfc 100644
--- a/include/linux/platform_data/ehci-sh.h
+++ b/include/linux/platform_data/ehci-sh.h
@@ -1,21 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* EHCI SuperH driver platform data
*
* Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
* Copyright (C) 2012 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __USB_EHCI_SH_H
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index 57a5a35e0073..f92a47e18034 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -16,46 +16,12 @@
#ifndef __DAVINCI_GPIO_PLATFORM_H
#define __DAVINCI_GPIO_PLATFORM_H
-#include <linux/io.h>
-#include <linux/spinlock.h>
-
-#include <asm-generic/gpio.h>
-
-#define MAX_REGS_BANKS 5
-#define MAX_INT_PER_BANK 32
-
struct davinci_gpio_platform_data {
u32 ngpio;
u32 gpio_unbanked;
};
-struct davinci_gpio_irq_data {
- void __iomem *regs;
- struct davinci_gpio_controller *chip;
- int bank_num;
-};
-
-struct davinci_gpio_controller {
- struct gpio_chip chip;
- struct irq_domain *irq_domain;
- /* Serialize access to GPIO registers */
- spinlock_t lock;
- void __iomem *regs[MAX_REGS_BANKS];
- int gpio_unbanked;
- int irqs[MAX_INT_PER_BANK];
- unsigned int base;
-};
-
-/*
- * basic gpio routines
- */
-#define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */
-
/* Convert GPIO signal to GPIO pin number */
#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
-static inline u32 __gpio_mask(unsigned gpio)
-{
- return 1 << (gpio % 32);
-}
#endif
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index 8612855691b2..8485c6a9a383 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -197,23 +197,12 @@ struct omap_gpio_platform_data {
bool is_mpuio; /* whether the bank is of type MPUIO */
u32 non_wakeup_gpios;
+ u32 quirks; /* Version specific quirks mask */
+
struct omap_gpio_reg_offs *regs;
/* Return context loss count due to PM states changing */
int (*get_context_loss_count)(struct device *dev);
};
-#if IS_BUILTIN(CONFIG_GPIO_OMAP)
-extern void omap2_gpio_prepare_for_idle(int off_mode);
-extern void omap2_gpio_resume_after_idle(void);
-#else
-static inline void omap2_gpio_prepare_for_idle(int off_mode)
-{
-}
-
-static inline void omap2_gpio_resume_after_idle(void)
-{
-}
-#endif
-
#endif
diff --git a/include/linux/platform_data/gpio-ts5500.h b/include/linux/platform_data/gpio-ts5500.h
deleted file mode 100644
index b10d11c9bb49..000000000000
--- a/include/linux/platform_data/gpio-ts5500.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * GPIO (DIO) header for Technologic Systems TS-5500
- *
- * Copyright (c) 2012 Savoir-faire Linux Inc.
- * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _PDATA_GPIO_TS5500_H
-#define _PDATA_GPIO_TS5500_H
-
-/**
- * struct ts5500_dio_platform_data - TS-5500 pin block configuration
- * @base: The GPIO base number to use.
- * @strap: The only pin connected to an interrupt in a block is input-only.
- * If you need a bidirectional line which can trigger an IRQ, you
- * may strap it with an in/out pin. This flag indicates this case.
- */
-struct ts5500_dio_platform_data {
- int base;
- bool strap;
-};
-
-#endif /* _PDATA_GPIO_TS5500_H */
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
index 73d9098ada2d..85da11916bd5 100644
--- a/include/linux/platform_data/hsmmc-omap.h
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -70,9 +70,6 @@ struct omap_hsmmc_platform_data {
/* string specifying a particular variant of hardware */
char *version;
- int gpio_cd; /* gpio (card detect) */
- int gpio_cod; /* gpio (cover detect) */
- int gpio_wp; /* gpio (write protect) */
/* if we have special card, init it using this callback */
void (*init_card)(struct mmc_card *card);
diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h
index 98b7925f1a2d..c0f624aca81c 100644
--- a/include/linux/platform_data/mv_usb.h
+++ b/include/linux/platform_data/mv_usb.h
@@ -48,6 +48,5 @@ struct mv_usb_platform_data {
int (*phy_init)(void __iomem *regbase);
void (*phy_deinit)(void __iomem *regbase);
int (*set_vbus)(unsigned int vbus);
- int (*private_init)(void __iomem *opregs, void __iomem *phyregs);
};
#endif
diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h
index 9e20c2fb4ffd..4977c06d8a86 100644
--- a/include/linux/platform_data/pxa_sdhci.h
+++ b/include/linux/platform_data/pxa_sdhci.h
@@ -33,8 +33,6 @@
* 1: choose feedback clk + delay value
* 2: choose internal clk
* @clk_delay_enable: enable clk_delay or not, used on pxa910
- * @ext_cd_gpio: gpio pin used for external CD line
- * @ext_cd_gpio_invert: invert values for external CD gpio line
* @max_speed: the maximum speed supported
* @host_caps: Standard MMC host capabilities bit field.
* @quirks: quirks of platfrom
@@ -46,8 +44,6 @@ struct sdhci_pxa_platdata {
unsigned int clk_delay_cycles;
unsigned int clk_delay_sel;
bool clk_delay_enable;
- unsigned int ext_cd_gpio;
- bool ext_cd_gpio_invert;
unsigned int max_speed;
u32 host_caps;
u32 host_caps2;
diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
index f4edcb03c40c..0638fb6353bc 100644
--- a/include/linux/platform_data/spi-davinci.h
+++ b/include/linux/platform_data/spi-davinci.h
@@ -36,9 +36,6 @@ enum {
* @num_chipselect: number of chipselects supported by this SPI master
* @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt
* controller withn the SoC. Possible values are 0 and 1.
- * @chip_sel: list of GPIOs which can act as chip-selects for the SPI.
- * SPI_INTERN_CS denotes internal SPI chip-select. Not necessary
- * to populate if all chip-selects are internal.
* @cshold_bug: set this to true if the SPI controller on your chip requires
* a write to CSHOLD bit in between transfers (like in DM355).
* @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any
@@ -48,7 +45,6 @@ struct davinci_spi_platform_data {
u8 version;
u8 num_chipselect;
u8 intr_line;
- u8 *chip_sel;
u8 prescaler_limit;
bool cshold_bug;
enum dma_event_q dma_event_q;
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 1a9f38f27f65..c7c081dc6034 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -40,6 +40,7 @@ struct platform_device {
#define platform_get_device_id(pdev) ((pdev)->id_entry)
+#define dev_is_platform(dev) ((dev)->bus == &platform_bus_type)
#define to_platform_device(x) container_of((x), struct platform_device, dev)
extern int platform_device_register(struct platform_device *);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 776c546d581a..3b5d7280e52e 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -17,11 +17,36 @@
#include <linux/notifier.h>
#include <linux/spinlock.h>
-/* Defines used for the flags field in the struct generic_pm_domain */
-#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
-#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */
-#define GENPD_FLAG_ALWAYS_ON (1U << 2) /* PM domain is always powered on */
-#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) /* Keep devices active if wakeup */
+/*
+ * Flags to control the behaviour of a genpd.
+ *
+ * These flags may be set in the struct generic_pm_domain's flags field by a
+ * genpd backend driver. The flags must be set before it calls pm_genpd_init(),
+ * which initializes a genpd.
+ *
+ * GENPD_FLAG_PM_CLK: Instructs genpd to use the PM clk framework,
+ * while powering on/off attached devices.
+ *
+ * GENPD_FLAG_IRQ_SAFE: This informs genpd that its backend callbacks,
+ * ->power_on|off(), doesn't sleep. Hence, these
+ * can be invoked from within atomic context, which
+ * enables genpd to power on/off the PM domain,
+ * even when pm_runtime_is_irq_safe() returns true,
+ * for any of its attached devices. Note that, a
+ * genpd having this flag set, requires its
+ * masterdomains to also have it set.
+ *
+ * GENPD_FLAG_ALWAYS_ON: Instructs genpd to always keep the PM domain
+ * powered on.
+ *
+ * GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered
+ * on, in case any of its attached devices is used
+ * in the wakeup path to serve system wakeups.
+ */
+#define GENPD_FLAG_PM_CLK (1U << 0)
+#define GENPD_FLAG_IRQ_SAFE (1U << 1)
+#define GENPD_FLAG_ALWAYS_ON (1U << 2)
+#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 099b31960dec..5d399eeef172 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -79,6 +79,7 @@ struct dev_pm_set_opp_data {
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
+struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index);
void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
@@ -136,6 +137,11 @@ static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
return ERR_PTR(-ENOTSUPP);
}
+static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index ee7e987ea1b4..e96581ca7c9d 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -126,5 +126,5 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
-void posixtimer_rearm(struct siginfo *info);
+void posixtimer_rearm(struct kernel_siginfo *info);
#endif
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index d6355f49fbae..507c5e214c42 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -24,6 +24,7 @@ enum bq27xxx_chip {
BQ27546,
BQ27742,
BQ27545, /* bq27545 */
+ BQ27411,
BQ27421, /* bq27421, bq27441, bq27621 */
BQ27425,
BQ27426,
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index e6d226464838..602d64725222 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -46,6 +46,7 @@ struct persistent_ram_zone {
phys_addr_t paddr;
size_t size;
void *vaddr;
+ char *label;
struct persistent_ram_buffer *buffer;
size_t buffer_size;
u32 flags;
@@ -65,7 +66,7 @@ struct persistent_ram_zone {
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype, u32 flags);
+ unsigned int memtype, u32 flags, char *label);
void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 4f36431c380b..6c2ffed907f5 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -62,14 +62,17 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_READ 0x01
#define PTRACE_MODE_ATTACH 0x02
#define PTRACE_MODE_NOAUDIT 0x04
-#define PTRACE_MODE_FSCREDS 0x08
-#define PTRACE_MODE_REALCREDS 0x10
+#define PTRACE_MODE_FSCREDS 0x08
+#define PTRACE_MODE_REALCREDS 0x10
+#define PTRACE_MODE_SCHED 0x20
+#define PTRACE_MODE_IBPB 0x40
/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
+#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
/**
* ptrace_may_access - check whether the caller is permitted to access
@@ -87,6 +90,20 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
*/
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
+/**
+ * ptrace_may_access - check whether the caller is permitted to access
+ * a target task.
+ * @task: target task
+ * @mode: selects type of access and caller credentials
+ *
+ * Returns true on success, false on denial.
+ *
+ * Similar to ptrace_may_access(). Only to be called from context switch
+ * code. Does not call into audit and the regular LSM hooks due to locking
+ * constraints.
+ */
+extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
+
static inline int ptrace_reparented(struct task_struct *child)
{
return !same_thread_group(child->real_parent, child->parent);
@@ -336,14 +353,19 @@ static inline void user_enable_block_step(struct task_struct *task)
extern void user_enable_block_step(struct task_struct *);
#endif /* arch_has_block_step */
-#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
-extern void user_single_step_siginfo(struct task_struct *tsk,
- struct pt_regs *regs, siginfo_t *info);
+#ifdef ARCH_HAS_USER_SINGLE_STEP_REPORT
+extern void user_single_step_report(struct pt_regs *regs);
#else
-static inline void user_single_step_siginfo(struct task_struct *tsk,
- struct pt_regs *regs, siginfo_t *info)
+static inline void user_single_step_report(struct pt_regs *regs)
{
- info->si_signo = SIGTRAP;
+ kernel_siginfo_t info;
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = 0;
+ info.si_uid = 0;
+ force_sig_info(info.si_signo, &info, current);
}
#endif
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 13b4244d44c1..979087e021f3 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -196,6 +196,7 @@ enum pxa_ssp_type {
PXA27x_SSP,
PXA3xx_SSP,
PXA168_SSP,
+ MMP2_SSP,
PXA910_SSP,
CE4100_SSP,
QUARK_X1000_SSP,
@@ -217,7 +218,7 @@ struct ssp_device {
const char *label;
int port_id;
- int type;
+ enum pxa_ssp_type type;
int use_count;
int irq;
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 5d6144977828..3bcd67fd5548 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -225,19 +225,14 @@ struct geni_se {
#define HW_VER_MINOR_SHFT 16
#define HW_VER_STEP_MASK GENMASK(15, 0)
+#define GENI_SE_VERSION_MAJOR(ver) ((ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT)
+#define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT)
+#define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK)
+
#if IS_ENABLED(CONFIG_QCOM_GENI_SE)
u32 geni_se_get_qup_hw_version(struct geni_se *se);
-#define geni_se_get_wrapper_version(se, major, minor, step) do { \
- u32 ver; \
-\
- ver = geni_se_get_qup_hw_version(se); \
- major = (ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT; \
- minor = (ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT; \
- step = version & HW_VER_STEP_MASK; \
-} while (0)
-
/**
* geni_se_read_proto() - Read the protocol configured for a serial engine
* @se: Pointer to the concerned serial engine.
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 5d65521260b3..06996ad4f2bc 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2015, 2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
@@ -33,6 +33,8 @@ struct qcom_scm_vmperm {
#define QCOM_SCM_VMID_HLOS 0x3
#define QCOM_SCM_VMID_MSS_MSA 0xF
+#define QCOM_SCM_VMID_WLAN 0x18
+#define QCOM_SCM_VMID_WLAN_CE 0x19
#define QCOM_SCM_PERM_READ 0x4
#define QCOM_SCM_PERM_WRITE 0x2
#define QCOM_SCM_PERM_EXEC 0x1
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 0081fa6d1268..03f59a28fefd 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -110,7 +110,7 @@
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 37
-#define FW_REVISION_VERSION 2
+#define FW_REVISION_VERSION 7
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -931,12 +931,12 @@ struct db_rdma_dpm_params {
#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT 28
#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 30
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
};
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index b34c573f2b30..66aba505ec56 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -896,7 +896,7 @@ struct e4_ustorm_iscsi_task_ag_ctx {
__le32 exp_cont_len;
__le32 total_data_acked;
__le32 exp_data_acked;
- u8 next_tid_valid;
+ u8 byte2;
u8 byte3;
__le16 word1;
__le16 next_tid;
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 8cd34645e892..a47321a0d572 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -667,14 +667,35 @@ enum qed_link_mode_bits {
QED_LM_Autoneg_BIT = BIT(1),
QED_LM_Asym_Pause_BIT = BIT(2),
QED_LM_Pause_BIT = BIT(3),
- QED_LM_1000baseT_Half_BIT = BIT(4),
- QED_LM_1000baseT_Full_BIT = BIT(5),
+ QED_LM_1000baseT_Full_BIT = BIT(4),
+ QED_LM_10000baseT_Full_BIT = BIT(5),
QED_LM_10000baseKR_Full_BIT = BIT(6),
- QED_LM_25000baseKR_Full_BIT = BIT(7),
- QED_LM_40000baseLR4_Full_BIT = BIT(8),
- QED_LM_50000baseKR2_Full_BIT = BIT(9),
- QED_LM_100000baseKR4_Full_BIT = BIT(10),
- QED_LM_COUNT = 11
+ QED_LM_20000baseKR2_Full_BIT = BIT(7),
+ QED_LM_25000baseKR_Full_BIT = BIT(8),
+ QED_LM_40000baseLR4_Full_BIT = BIT(9),
+ QED_LM_50000baseKR2_Full_BIT = BIT(10),
+ QED_LM_100000baseKR4_Full_BIT = BIT(11),
+ QED_LM_2500baseX_Full_BIT = BIT(12),
+ QED_LM_Backplane_BIT = BIT(13),
+ QED_LM_1000baseKX_Full_BIT = BIT(14),
+ QED_LM_10000baseKX4_Full_BIT = BIT(15),
+ QED_LM_10000baseR_FEC_BIT = BIT(16),
+ QED_LM_40000baseKR4_Full_BIT = BIT(17),
+ QED_LM_40000baseCR4_Full_BIT = BIT(18),
+ QED_LM_40000baseSR4_Full_BIT = BIT(19),
+ QED_LM_25000baseCR_Full_BIT = BIT(20),
+ QED_LM_25000baseSR_Full_BIT = BIT(21),
+ QED_LM_50000baseCR2_Full_BIT = BIT(22),
+ QED_LM_100000baseSR4_Full_BIT = BIT(23),
+ QED_LM_100000baseCR4_Full_BIT = BIT(24),
+ QED_LM_100000baseLR4_ER4_Full_BIT = BIT(25),
+ QED_LM_50000baseSR2_Full_BIT = BIT(26),
+ QED_LM_1000baseX_Full_BIT = BIT(27),
+ QED_LM_10000baseCR_Full_BIT = BIT(28),
+ QED_LM_10000baseSR_Full_BIT = BIT(29),
+ QED_LM_10000baseLR_Full_BIT = BIT(30),
+ QED_LM_10000baseLRM_Full_BIT = BIT(31),
+ QED_LM_COUNT = 32
};
struct qed_link_params {
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index df4d13f7e191..d15f8e4815e3 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -39,15 +39,6 @@
#include <linux/qed/qed_ll2_if.h>
#include <linux/qed/rdma_common.h>
-enum qed_roce_ll2_tx_dest {
- /* Light L2 TX Destination to the Network */
- QED_ROCE_LL2_TX_DEST_NW,
-
- /* Light L2 TX Destination to the Loopback */
- QED_ROCE_LL2_TX_DEST_LB,
- QED_ROCE_LL2_TX_DEST_MAX
-};
-
#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
/* rdma interface */
@@ -581,7 +572,7 @@ struct qed_roce_ll2_packet {
int n_seg;
struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
int roce_mode;
- enum qed_roce_ll2_tx_dest tx_dest;
+ enum qed_ll2_tx_dest tx_dest;
};
enum qed_rdma_type {
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4786c2235b98..e91ec9ddcd30 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -182,7 +182,7 @@ static inline void list_replace_rcu(struct list_head *old,
* @list: the RCU-protected list to splice
* @prev: points to the last element of the existing list
* @next: points to the first element of the existing list
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*
* The list pointed to by @prev and @next can be RCU-read traversed
* concurrently with this function.
@@ -240,7 +240,7 @@ static inline void __list_splice_init_rcu(struct list_head *list,
* designed for stacks.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*/
static inline void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
@@ -255,7 +255,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* list, designed for queues.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*/
static inline void list_splice_tail_init_rcu(struct list_head *list,
struct list_head *head,
@@ -359,13 +359,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
- * This primitive may safely run concurrently with the _rcu list-mutation
- * primitives such as list_add_rcu(), but requires some implicit RCU
- * read-side guarding. One example is running within a special
- * exception-time environment where preemption is disabled and where
- * lockdep cannot be invoked (in which case updaters must use RCU-sched,
- * as in synchronize_sched(), call_rcu_sched(), and friends). Another
- * example is when items are added to the list, but never deleted.
+ * This primitive may safely run concurrently with the _rcu
+ * list-mutation primitives such as list_add_rcu(), but requires some
+ * implicit RCU read-side guarding. One example is running within a special
+ * exception-time environment where preemption is disabled and where lockdep
+ * cannot be invoked. Another example is when items are added to the list,
+ * but never deleted.
*/
#define list_entry_lockless(ptr, type, member) \
container_of((typeof(ptr))READ_ONCE(ptr), type, member)
@@ -376,13 +375,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
- * This primitive may safely run concurrently with the _rcu list-mutation
- * primitives such as list_add_rcu(), but requires some implicit RCU
- * read-side guarding. One example is running within a special
- * exception-time environment where preemption is disabled and where
- * lockdep cannot be invoked (in which case updaters must use RCU-sched,
- * as in synchronize_sched(), call_rcu_sched(), and friends). Another
- * example is when items are added to the list, but never deleted.
+ * This primitive may safely run concurrently with the _rcu
+ * list-mutation primitives such as list_add_rcu(), but requires some
+ * implicit RCU read-side guarding. One example is running within a special
+ * exception-time environment where preemption is disabled and where lockdep
+ * cannot be invoked. Another example is when items are added to the list,
+ * but never deleted.
*/
#define list_for_each_entry_lockless(pos, head, member) \
for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 75e5b393cf44..4db8bcacc51a 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -48,23 +48,14 @@
#define ulong2long(a) (*(long *)(&(a)))
/* Exported common interfaces */
-
-#ifdef CONFIG_PREEMPT_RCU
void call_rcu(struct rcu_head *head, rcu_callback_t func);
-#else /* #ifdef CONFIG_PREEMPT_RCU */
-#define call_rcu call_rcu_sched
-#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
-void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
-void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
-void synchronize_sched(void);
void rcu_barrier_tasks(void);
+void synchronize_rcu(void);
#ifdef CONFIG_PREEMPT_RCU
void __rcu_read_lock(void);
void __rcu_read_unlock(void);
-void synchronize_rcu(void);
/*
* Defined as a macro as it is a very low level header included from
@@ -88,11 +79,6 @@ static inline void __rcu_read_unlock(void)
preempt_enable();
}
-static inline void synchronize_rcu(void)
-{
- synchronize_sched();
-}
-
static inline int rcu_preempt_depth(void)
{
return 0;
@@ -103,8 +89,6 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
extern int rcu_scheduler_active __read_mostly;
-void rcu_sched_qs(void);
-void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu);
@@ -135,11 +119,10 @@ static inline void rcu_init_nohz(void) { }
* RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
* @a: Code that RCU needs to pay attention to.
*
- * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
- * in the inner idle loop, that is, between the rcu_idle_enter() and
- * the rcu_idle_exit() -- RCU will happily ignore any such read-side
- * critical sections. However, things like powertop need tracepoints
- * in the inner idle loop.
+ * RCU read-side critical sections are forbidden in the inner idle loop,
+ * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
+ * will happily ignore any such read-side critical sections. However,
+ * things like powertop need tracepoints in the inner idle loop.
*
* This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
* will tell RCU that it needs to pay attention, invoke its argument
@@ -167,20 +150,16 @@ static inline void rcu_init_nohz(void) { }
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
-#define rcu_note_voluntary_context_switch(t) \
- do { \
- rcu_all_qs(); \
- rcu_tasks_qs(t); \
- } while (0)
+#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */
#define rcu_tasks_qs(t) do { } while (0)
-#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
-#define call_rcu_tasks call_rcu_sched
-#define synchronize_rcu_tasks synchronize_sched
+#define rcu_note_voluntary_context_switch(t) do { } while (0)
+#define call_rcu_tasks call_rcu
+#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU */
@@ -325,9 +304,8 @@ static inline void rcu_preempt_sleep_check(void) { }
* Helper functions for rcu_dereference_check(), rcu_dereference_protected()
* and rcu_assign_pointer(). Some of these could be folded into their
* callers, but they are left separate in order to ease introduction of
- * multiple flavors of pointers to match the multiple flavors of RCU
- * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
- * the future.
+ * multiple pointers markings to match different RCU implementations
+ * (e.g., __srcu), should this make sense in the future.
*/
#ifdef __CHECKER__
@@ -686,14 +664,9 @@ static inline void rcu_read_unlock(void)
/**
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
- * This is equivalent of rcu_read_lock(), but to be used when updates
- * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
- * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
- * softirq handler to be a quiescent state, a process in RCU read-side
- * critical section must be protected by disabling softirqs. Read-side
- * critical sections in interrupt context can use just rcu_read_lock(),
- * though this should at least be commented to avoid confusing people
- * reading the code.
+ * This is equivalent of rcu_read_lock(), but also disables softirqs.
+ * Note that anything else that disables softirqs can also serve as
+ * an RCU read-side critical section.
*
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
* must occur in the same context, for example, it is illegal to invoke
@@ -726,10 +699,9 @@ static inline void rcu_read_unlock_bh(void)
/**
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
- * This is equivalent of rcu_read_lock(), but to be used when updates
- * are being done using call_rcu_sched() or synchronize_rcu_sched().
- * Read-side critical sections can also be introduced by anything that
- * disables preemption, including local_irq_disable() and friends.
+ * This is equivalent of rcu_read_lock(), but disables preemption.
+ * Read-side critical sections can also be introduced by anything else
+ * that disables preemption, including local_irq_disable() and friends.
*
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
* must occur in the same context, for example, it is illegal to invoke
@@ -885,4 +857,96 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
+/* Has the specified rcu_head structure been handed to call_rcu()? */
+
+/*
+ * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
+ * @rhp: The rcu_head structure to initialize.
+ *
+ * If you intend to invoke rcu_head_after_call_rcu() to test whether a
+ * given rcu_head structure has already been passed to call_rcu(), then
+ * you must also invoke this rcu_head_init() function on it just after
+ * allocating that structure. Calls to this function must not race with
+ * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation.
+ */
+static inline void rcu_head_init(struct rcu_head *rhp)
+{
+ rhp->func = (rcu_callback_t)~0L;
+}
+
+/*
+ * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()?
+ * @rhp: The rcu_head structure to test.
+ * @func: The function passed to call_rcu() along with @rhp.
+ *
+ * Returns @true if the @rhp has been passed to call_rcu() with @func,
+ * and @false otherwise. Emits a warning in any other case, including
+ * the case where @rhp has already been invoked after a grace period.
+ * Calls to this function must not race with callback invocation. One way
+ * to avoid such races is to enclose the call to rcu_head_after_call_rcu()
+ * in an RCU read-side critical section that includes a read-side fetch
+ * of the pointer to the structure containing @rhp.
+ */
+static inline bool
+rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
+{
+ if (READ_ONCE(rhp->func) == f)
+ return true;
+ WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L);
+ return false;
+}
+
+
+/* Transitional pre-consolidation compatibility definitions. */
+
+static inline void synchronize_rcu_bh(void)
+{
+ synchronize_rcu();
+}
+
+static inline void synchronize_rcu_bh_expedited(void)
+{
+ synchronize_rcu_expedited();
+}
+
+static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
+{
+ call_rcu(head, func);
+}
+
+static inline void rcu_barrier_bh(void)
+{
+ rcu_barrier();
+}
+
+static inline void synchronize_sched(void)
+{
+ synchronize_rcu();
+}
+
+static inline void synchronize_sched_expedited(void)
+{
+ synchronize_rcu_expedited();
+}
+
+static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
+{
+ call_rcu(head, func);
+}
+
+static inline void rcu_barrier_sched(void)
+{
+ rcu_barrier();
+}
+
+static inline unsigned long get_state_synchronize_sched(void)
+{
+ return get_state_synchronize_rcu();
+}
+
+static inline void cond_synchronize_sched(unsigned long oldstate)
+{
+ cond_synchronize_rcu(oldstate);
+}
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 57f371344152..8a16c3eb3dd0 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -33,17 +33,17 @@ do { \
/**
* synchronize_rcu_mult - Wait concurrently for multiple grace periods
- * @...: List of call_rcu() functions for the flavors to wait on.
+ * @...: List of call_rcu() functions for different grace periods to wait on
*
- * This macro waits concurrently for multiple flavors of RCU grace periods.
- * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
- * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU
+ * This macro waits concurrently for multiple types of RCU grace periods.
+ * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait
+ * on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU
* domain requires you to write a wrapper function for that SRCU domain's
* call_srcu() function, supplying the corresponding srcu_struct.
*
- * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
- * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
- * is automatically a grace period.
+ * If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU,
+ * given that anywhere synchronize_rcu_mult() can be called is automatically
+ * a grace period.
*/
#define synchronize_rcu_mult(...) \
_wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 8d9a0ea8f0b5..af65d1f36ddb 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,12 +27,6 @@
#include <linux/ktime.h>
-struct rcu_dynticks;
-static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
-{
- return 0;
-}
-
/* Never flag non-existent other CPUs! */
static inline bool rcu_eqs_special_set(int cpu) { return false; }
@@ -46,53 +40,28 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
might_sleep();
}
-static inline unsigned long get_state_synchronize_sched(void)
-{
- return 0;
-}
-
-static inline void cond_synchronize_sched(unsigned long oldstate)
-{
- might_sleep();
-}
-
-extern void rcu_barrier_bh(void);
-extern void rcu_barrier_sched(void);
+extern void rcu_barrier(void);
static inline void synchronize_rcu_expedited(void)
{
- synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
+ synchronize_rcu();
}
-static inline void rcu_barrier(void)
+static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
{
- rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
-}
-
-static inline void synchronize_rcu_bh(void)
-{
- synchronize_sched();
-}
-
-static inline void synchronize_rcu_bh_expedited(void)
-{
- synchronize_sched();
+ call_rcu(head, func);
}
-static inline void synchronize_sched_expedited(void)
-{
- synchronize_sched();
-}
+void rcu_qs(void);
-static inline void kfree_call_rcu(struct rcu_head *head,
- rcu_callback_t func)
+static inline void rcu_softirq_qs(void)
{
- call_rcu(head, func);
+ rcu_qs();
}
#define rcu_note_context_switch(preempt) \
do { \
- rcu_sched_qs(); \
+ rcu_qs(); \
rcu_tasks_qs(current); \
} while (0)
@@ -108,6 +77,7 @@ static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
*/
static inline void rcu_virt_note_context_switch(int cpu) { }
static inline void rcu_cpu_stall_reset(void) { }
+static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
static inline void rcu_idle_enter(void) { }
static inline void rcu_idle_exit(void) { }
static inline void rcu_irq_enter(void) { }
@@ -115,6 +85,11 @@ static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
static inline void exit_rcu(void) { }
+static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
+{
+ return false;
+}
+static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
#ifdef CONFIG_SRCU
void rcu_scheduler_starting(void);
#else /* #ifndef CONFIG_SRCU */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 914655848ef6..7f83179177d1 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,6 +30,7 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
+void rcu_softirq_qs(void);
void rcu_note_context_switch(bool preempt);
int rcu_needs_cpu(u64 basem, u64 *nextevt);
void rcu_cpu_stall_reset(void);
@@ -44,41 +45,13 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(false);
}
-void synchronize_rcu_bh(void);
-void synchronize_sched_expedited(void);
void synchronize_rcu_expedited(void);
-
void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
-/**
- * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
- *
- * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
- * approach to force the grace period to end quickly. This consumes
- * significant time on all CPUs and is unfriendly to real-time workloads,
- * so is thus not recommended for any sort of common-case code. In fact,
- * if you are using synchronize_rcu_bh_expedited() in a loop, please
- * restructure your code to batch your updates, and then use a single
- * synchronize_rcu_bh() instead.
- *
- * Note that it is illegal to call this function while holding any lock
- * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
- * to call this function from a CPU-hotplug notifier. Failing to observe
- * these restriction will result in deadlock.
- */
-static inline void synchronize_rcu_bh_expedited(void)
-{
- synchronize_sched_expedited();
-}
-
void rcu_barrier(void);
-void rcu_barrier_bh(void);
-void rcu_barrier_sched(void);
bool rcu_eqs_special_set(int cpu);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
-unsigned long get_state_synchronize_sched(void);
-void cond_synchronize_sched(unsigned long oldstate);
void rcu_idle_enter(void);
void rcu_idle_exit(void);
@@ -93,7 +66,9 @@ void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
void rcu_end_inkernel_boot(void);
bool rcu_is_watching(void);
+#ifndef CONFIG_PREEMPT
void rcu_all_qs(void);
+#endif
/* RCUtree hotplug events */
int rcutree_prepare_cpu(unsigned int cpu);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 379505a53722..a367d59c301d 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -268,6 +268,13 @@ typedef void (*regmap_unlock)(void *);
* field is NULL but precious_table (see below) is not, the
* check is performed on such table (a register is precious if
* it belongs to one of the ranges specified by precious_table).
+ * @writeable_noinc_reg: Optional callback returning true if the register
+ * supports multiple write operations without incrementing
+ * the register number. If this field is NULL but
+ * wr_noinc_table (see below) is not, the check is
+ * performed on such table (a register is no increment
+ * writeable if it belongs to one of the ranges specified
+ * by wr_noinc_table).
* @readable_noinc_reg: Optional callback returning true if the register
* supports multiple read operations without incrementing
* the register number. If this field is NULL but
@@ -302,6 +309,7 @@ typedef void (*regmap_unlock)(void *);
* @rd_table: As above, for read access.
* @volatile_table: As above, for volatile registers.
* @precious_table: As above, for precious registers.
+ * @wr_noinc_table: As above, for no increment writeable registers.
* @rd_noinc_table: As above, for no increment readable registers.
* @reg_defaults: Power on reset values for registers (for use with
* register cache support).
@@ -315,9 +323,12 @@ typedef void (*regmap_unlock)(void *);
* masks are used.
* @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
* if they are both empty.
- * @use_single_rw: If set, converts the bulk read and write operations into
- * a series of single read and write operations. This is useful
- * for device that does not support bulk read and write.
+ * @use_single_read: If set, converts the bulk read operation into a series of
+ * single read operations. This is useful for a device that
+ * does not support bulk read.
+ * @use_single_write: If set, converts the bulk write operation into a series of
+ * single write operations. This is useful for a device that
+ * does not support bulk write.
* @can_multi_write: If set, the device supports the multi write mode of bulk
* write operations, if clear multi write requests will be
* split into individual write operations
@@ -352,6 +363,7 @@ struct regmap_config {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg);
bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
bool disable_locking;
@@ -369,6 +381,7 @@ struct regmap_config {
const struct regmap_access_table *rd_table;
const struct regmap_access_table *volatile_table;
const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *wr_noinc_table;
const struct regmap_access_table *rd_noinc_table;
const struct reg_default *reg_defaults;
unsigned int num_reg_defaults;
@@ -380,7 +393,8 @@ struct regmap_config {
unsigned long write_flag_mask;
bool zero_flag_mask;
- bool use_single_rw;
+ bool use_single_read;
+ bool use_single_write;
bool can_multi_write;
enum regmap_endian reg_format_endian;
@@ -979,6 +993,8 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val);
int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
+int regmap_noinc_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_count);
int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
@@ -1222,6 +1238,13 @@ static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_noinc_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_bulk_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_count)
{
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 0fd8fbb74763..a9c030192147 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -271,9 +271,16 @@ enum regulator_type {
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
* @min_dropout_uV: The minimum dropout voltage this regulator can handle
* @linear_ranges: A constant table of possible voltage ranges.
- * @n_linear_ranges: Number of entries in the @linear_ranges table.
+ * @linear_range_selectors: A constant table of voltage range selectors.
+ * If pickable ranges are used each range must
+ * have corresponding selector here.
+ * @n_linear_ranges: Number of entries in the @linear_ranges (and in
+ * linear_range_selectors if used) table(s).
* @volt_table: Voltage mapping table (if table based mapping)
*
+ * @vsel_range_reg: Register for range selector when using pickable ranges
+ * and regulator_regmap_X_voltage_X_pickable functions.
+ * @vsel_range_mask: Mask for register bitfield used for range selector
* @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
* @vsel_mask: Mask for register bitfield used for selector
* @csel_reg: Register for TPS65218 LS3 current regulator
@@ -338,10 +345,14 @@ struct regulator_desc {
int min_dropout_uV;
const struct regulator_linear_range *linear_ranges;
+ const unsigned int *linear_range_selectors;
+
int n_linear_ranges;
const unsigned int *volt_table;
+ unsigned int vsel_range_reg;
+ unsigned int vsel_range_mask;
unsigned int vsel_reg;
unsigned int vsel_mask;
unsigned int csel_reg;
@@ -498,18 +509,25 @@ int regulator_mode_to_status(unsigned int);
int regulator_list_voltage_linear(struct regulator_dev *rdev,
unsigned int selector);
+int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev,
+ unsigned int selector);
int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
unsigned int selector);
int regulator_list_voltage_table(struct regulator_dev *rdev,
unsigned int selector);
int regulator_map_voltage_linear(struct regulator_dev *rdev,
int min_uV, int max_uV);
+int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev,
+ int min_uV, int max_uV);
int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
int min_uV, int max_uV);
int regulator_map_voltage_iterate(struct regulator_dev *rdev,
int min_uV, int max_uV);
int regulator_map_voltage_ascend(struct regulator_dev *rdev,
int min_uV, int max_uV);
+int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev);
+int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev,
+ unsigned int sel);
int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev);
int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel);
int regulator_is_enabled_regmap(struct regulator_dev *rdev);
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index 48918be649d4..1a4340ed8e2b 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -24,8 +24,6 @@ struct regulator_init_data;
* @supply_name: Name of the regulator supply
* @input_supply: Name of the input regulator supply
* @microvolts: Output voltage of regulator
- * @gpio: GPIO to use for enable control
- * set to -EINVAL if not used
* @startup_delay: Start-up time in microseconds
* @gpio_is_open_drain: Gpio pin is open drain or normal type.
* If it is open drain type then HIGH will be set
@@ -49,7 +47,6 @@ struct fixed_voltage_config {
const char *supply_name;
const char *input_supply;
int microvolts;
- int gpio;
unsigned startup_delay;
unsigned gpio_is_open_drain:1;
unsigned enable_high:1;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 3468703d663a..a459a5e973a7 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -48,9 +48,9 @@ struct regulator;
* DISABLE_IN_SUSPEND - turn off regulator in suspend states
* ENABLE_IN_SUSPEND - keep regulator on in suspend states
*/
-#define DO_NOTHING_IN_SUSPEND (-1)
-#define DISABLE_IN_SUSPEND 0
-#define ENABLE_IN_SUSPEND 1
+#define DO_NOTHING_IN_SUSPEND 0
+#define DISABLE_IN_SUSPEND 1
+#define ENABLE_IN_SUSPEND 2
/* Regulator active discharge flags */
enum regulator_active_discharge {
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 5d83d0c1d06c..bba2920e9c05 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -10,7 +10,7 @@
#include <linux/time64.h>
struct timespec;
-struct compat_timespec;
+struct old_timespec32;
struct pollfd;
enum timespec_type {
@@ -40,7 +40,7 @@ struct restart_block {
enum timespec_type type;
union {
struct __kernel_timespec __user *rmtp;
- struct compat_timespec __user *compat_rmtp;
+ struct old_timespec32 __user *compat_rmtp;
};
u64 expires;
} nanosleep;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 5225832bd6ff..bb9cb84114c1 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -6,6 +6,7 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/wait.h>
+#include <linux/refcount.h>
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -34,6 +35,7 @@ extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
extern int rtnl_lock_killable(void);
+extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
extern wait_queue_head_t netdev_unregistering_wq;
extern struct rw_semaphore pernet_ops_rwsem;
@@ -83,6 +85,11 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
return rtnl_dereference(dev->ingress_queue);
}
+static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev)
+{
+ return rcu_dereference(dev->ingress_queue);
+}
+
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
#ifdef CONFIG_NET_INGRESS
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index ab93b6eae696..67dbb57508b1 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -45,10 +45,10 @@ struct rw_semaphore {
};
/*
- * Setting bit 0 of the owner field with other non-zero bits will indicate
+ * Setting bit 1 of the owner field but not bit 0 will indicate
* that the rwsem is writer-owned with an unknown owner.
*/
-#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L)
+#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L)
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 977cb57d7bc9..adfb3f9a7597 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -571,12 +571,8 @@ union rcu_special {
struct {
u8 blocked;
u8 need_qs;
- u8 exp_need_qs;
-
- /* Otherwise the compiler can store garbage here: */
- u8 pad;
} b; /* Bits. */
- u32 s; /* Set of bits. */
+ u16 s; /* Set of bits. */
};
enum perf_event_task_context {
@@ -739,6 +735,12 @@ struct task_struct {
unsigned use_memdelay:1;
#endif
+ /*
+ * May usercopy functions fault on kernel addresses?
+ * This is not just a single bit because this can potentially nest.
+ */
+ unsigned int kernel_uaccess_faults_ok;
+
unsigned long atomic_flags; /* Flags requiring atomic access. */
struct restart_block restart_block;
@@ -960,7 +962,7 @@ struct task_struct {
/* Ptrace state: */
unsigned long ptrace_message;
- siginfo_t *last_siginfo;
+ kernel_siginfo_t *last_siginfo;
struct task_io_accounting ioac;
#ifdef CONFIG_TASK_XACCT
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 1be35729c2c5..13789d10a50e 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -270,16 +270,16 @@ static inline int signal_group_exit(const struct signal_struct *sig)
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
+extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info);
-static inline int kernel_dequeue_signal(siginfo_t *info)
+static inline int kernel_dequeue_signal(void)
{
struct task_struct *tsk = current;
- siginfo_t __info;
+ kernel_siginfo_t __info;
int ret;
spin_lock_irq(&tsk->sighand->siglock);
- ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
+ ret = dequeue_signal(tsk, &tsk->blocked, &__info);
spin_unlock_irq(&tsk->sighand->siglock);
return ret;
@@ -322,12 +322,12 @@ int force_sig_pkuerr(void __user *addr, u32 pkey);
int force_sig_ptrace_errno_trap(int errno, void __user *addr);
-extern int send_sig_info(int, struct siginfo *, struct task_struct *);
+extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern void force_sigsegv(int sig, struct task_struct *p);
-extern int force_sig_info(int, struct siginfo *, struct task_struct *);
-extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
-extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
-extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
+extern int force_sig_info(int, struct kernel_siginfo *, struct task_struct *);
+extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
+extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
+extern int kill_pid_info_as_cred(int, struct kernel_siginfo *, struct pid *,
const struct cred *);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
@@ -475,9 +475,8 @@ static inline int kill_cad_pid(int sig, int priv)
}
/* These can be the second arg to send_sig_info/send_group_sig_info. */
-#define SEND_SIG_NOINFO ((struct siginfo *) 0)
-#define SEND_SIG_PRIV ((struct siginfo *) 1)
-#define SEND_SIG_FORCED ((struct siginfo *) 2)
+#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
+#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
/*
* True if we are on the alternate signal stack.
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 26347741ba50..6b9976180c1e 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -23,10 +23,10 @@
#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
-#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */
-#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */
+#define SD_ASYM_CPUCAPACITY 0x0040 /* Domain members have different CPU capacities */
+#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share CPU capacity */
#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
-#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
+#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share CPU pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
diff --git a/include/linux/security.h b/include/linux/security.h
index 75f4156c84d7..d170a5b031f3 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -35,7 +35,7 @@
struct linux_binprm;
struct cred;
struct rlimit;
-struct siginfo;
+struct kernel_siginfo;
struct sembuf;
struct kern_ipc_perm;
struct audit_context;
@@ -361,7 +361,7 @@ int security_task_setrlimit(struct task_struct *p, unsigned int resource,
int security_task_setscheduler(struct task_struct *p);
int security_task_getscheduler(struct task_struct *p);
int security_task_movememory(struct task_struct *p);
-int security_task_kill(struct task_struct *p, struct siginfo *info,
+int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
int sig, const struct cred *cred);
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
@@ -1020,7 +1020,7 @@ static inline int security_task_movememory(struct task_struct *p)
}
static inline int security_task_kill(struct task_struct *p,
- struct siginfo *info, int sig,
+ struct kernel_siginfo *info, int sig,
const struct cred *cred)
{
return 0;
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index c0e795d95477..1c89611e0e06 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -36,6 +36,7 @@ enum {
SCIx_SH4_SCIF_FIFODATA_REGTYPE,
SCIx_SH7705_SCIF_REGTYPE,
SCIx_HSCIF_REGTYPE,
+ SCIx_RZ_SCIFA_REGTYPE,
SCIx_NR_REGTYPES,
};
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 3d4cd5db30a9..200ed96a05af 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -11,17 +11,21 @@ struct task_struct;
/* for sysctl */
extern int print_fatal_signals;
-static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from)
+static inline void copy_siginfo(kernel_siginfo_t *to,
+ const kernel_siginfo_t *from)
{
memcpy(to, from, sizeof(*to));
}
-static inline void clear_siginfo(struct siginfo *info)
+static inline void clear_siginfo(kernel_siginfo_t *info)
{
memset(info, 0, sizeof(*info));
}
-int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
+#define SI_EXPANSION_SIZE (sizeof(struct siginfo) - sizeof(struct kernel_siginfo))
+
+int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from);
+int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from);
enum siginfo_layout {
SIL_KILL,
@@ -36,7 +40,7 @@ enum siginfo_layout {
SIL_SYS,
};
-enum siginfo_layout siginfo_layout(int sig, int si_code);
+enum siginfo_layout siginfo_layout(unsigned sig, int si_code);
/*
* Define some primitives to manipulate sigset_t.
@@ -257,11 +261,11 @@ struct pt_regs;
enum pid_type;
extern int next_signal(struct sigpending *pending, sigset_t *mask);
-extern int do_send_sig_info(int sig, struct siginfo *info,
+extern int do_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
-extern int group_send_sig_info(int sig, struct siginfo *info,
+extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
-extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
+extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern int sigprocmask(int, sigset_t *, sigset_t *);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h
index 222ae696000b..f8a90ae9c6ec 100644
--- a/include/linux/signal_types.h
+++ b/include/linux/signal_types.h
@@ -9,6 +9,10 @@
#include <linux/list.h>
#include <uapi/linux/signal.h>
+typedef struct kernel_siginfo {
+ __SIGINFO;
+} kernel_siginfo_t;
+
/*
* Real Time signals may be queued.
*/
@@ -16,7 +20,7 @@
struct sigqueue {
struct list_head list;
int flags;
- siginfo_t info;
+ kernel_siginfo_t info;
struct user_struct *user;
};
@@ -60,7 +64,7 @@ struct old_sigaction {
struct ksignal {
struct k_sigaction ka;
- siginfo_t info;
+ kernel_siginfo_t info;
int sig;
};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 17a13e4785fc..0ba687454267 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -243,6 +243,8 @@ struct scatterlist;
struct pipe_inode_info;
struct iov_iter;
struct napi_struct;
+struct bpf_prog;
+union bpf_attr;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack {
@@ -689,7 +691,7 @@ struct sk_buff {
union {
ktime_t tstamp;
- u64 skb_mstamp;
+ u64 skb_mstamp_ns; /* earliest departure time */
};
/*
* This is the control buffer. It is free to use for every
@@ -1080,11 +1082,6 @@ static inline int skb_pad(struct sk_buff *skb, int pad)
}
#define dev_kfree_skb(a) consume_skb(a)
-int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
- int getfrag(void *from, char *to, int offset,
- int len, int odd, struct sk_buff *skb),
- void *from, int length);
-
int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
int offset, size_t size);
@@ -1192,6 +1189,24 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
const struct flow_dissector_key *key,
unsigned int key_count);
+#ifdef CONFIG_NET
+int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog);
+
+int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr);
+#else
+static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
@@ -1339,6 +1354,17 @@ static inline void skb_zcopy_abort(struct sk_buff *skb)
}
}
+static inline void skb_mark_not_on_list(struct sk_buff *skb)
+{
+ skb->next = NULL;
+}
+
+static inline void skb_list_del_init(struct sk_buff *skb)
+{
+ __list_del_entry(&skb->list);
+ skb_mark_not_on_list(skb);
+}
+
/**
* skb_queue_empty - check if a queue is empty
* @list: queue head
@@ -1593,6 +1619,17 @@ static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
}
/**
+ * __skb_peek - peek at the head of a non-empty &sk_buff_head
+ * @list_: list to peek at
+ *
+ * Like skb_peek(), but the caller knows that the list is not empty.
+ */
+static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
+{
+ return list_->next;
+}
+
+/**
* skb_peek_next - peek skb following the given one from a queue
* @skb: skb to start from
* @list_: list to peek at
@@ -3468,13 +3505,19 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
#define __it(x, op) (x -= sizeof(u##op))
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
case 32: diffs |= __it_diff(a, b, 64);
+ /* fall through */
case 24: diffs |= __it_diff(a, b, 64);
+ /* fall through */
case 16: diffs |= __it_diff(a, b, 64);
+ /* fall through */
case 8: diffs |= __it_diff(a, b, 64);
break;
case 28: diffs |= __it_diff(a, b, 64);
+ /* fall through */
case 20: diffs |= __it_diff(a, b, 64);
+ /* fall through */
case 12: diffs |= __it_diff(a, b, 64);
+ /* fall through */
case 4: diffs |= __it_diff(a, b, 32);
break;
}
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
new file mode 100644
index 000000000000..2a11e9d91dfa
--- /dev/null
+++ b/include/linux/skmsg.h
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
+
+#ifndef _LINUX_SKMSG_H
+#define _LINUX_SKMSG_H
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/strparser.h>
+
+#define MAX_MSG_FRAGS MAX_SKB_FRAGS
+
+enum __sk_action {
+ __SK_DROP = 0,
+ __SK_PASS,
+ __SK_REDIRECT,
+ __SK_NONE,
+};
+
+struct sk_msg_sg {
+ u32 start;
+ u32 curr;
+ u32 end;
+ u32 size;
+ u32 copybreak;
+ bool copy[MAX_MSG_FRAGS];
+ /* The extra element is used for chaining the front and sections when
+ * the list becomes partitioned (e.g. end < start). The crypto APIs
+ * require the chaining.
+ */
+ struct scatterlist data[MAX_MSG_FRAGS + 1];
+};
+
+struct sk_msg {
+ struct sk_msg_sg sg;
+ void *data;
+ void *data_end;
+ u32 apply_bytes;
+ u32 cork_bytes;
+ u32 flags;
+ struct sk_buff *skb;
+ struct sock *sk_redir;
+ struct sock *sk;
+ struct list_head list;
+};
+
+struct sk_psock_progs {
+ struct bpf_prog *msg_parser;
+ struct bpf_prog *skb_parser;
+ struct bpf_prog *skb_verdict;
+};
+
+enum sk_psock_state_bits {
+ SK_PSOCK_TX_ENABLED,
+};
+
+struct sk_psock_link {
+ struct list_head list;
+ struct bpf_map *map;
+ void *link_raw;
+};
+
+struct sk_psock_parser {
+ struct strparser strp;
+ bool enabled;
+ void (*saved_data_ready)(struct sock *sk);
+};
+
+struct sk_psock_work_state {
+ struct sk_buff *skb;
+ u32 len;
+ u32 off;
+};
+
+struct sk_psock {
+ struct sock *sk;
+ struct sock *sk_redir;
+ u32 apply_bytes;
+ u32 cork_bytes;
+ u32 eval;
+ struct sk_msg *cork;
+ struct sk_psock_progs progs;
+ struct sk_psock_parser parser;
+ struct sk_buff_head ingress_skb;
+ struct list_head ingress_msg;
+ unsigned long state;
+ struct list_head link;
+ spinlock_t link_lock;
+ refcount_t refcnt;
+ void (*saved_unhash)(struct sock *sk);
+ void (*saved_close)(struct sock *sk, long timeout);
+ void (*saved_write_space)(struct sock *sk);
+ struct proto *sk_proto;
+ struct sk_psock_work_state work_state;
+ struct work_struct work;
+ union {
+ struct rcu_head rcu;
+ struct work_struct gc;
+ };
+};
+
+int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
+ int elem_first_coalesce);
+int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
+ u32 off, u32 len);
+void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
+int sk_msg_free(struct sock *sk, struct sk_msg *msg);
+int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
+void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
+void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
+ u32 bytes);
+
+void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
+void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
+
+int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
+ struct sk_msg *msg, u32 bytes);
+int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
+ struct sk_msg *msg, u32 bytes);
+
+static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
+{
+ WARN_ON(i == msg->sg.end && bytes);
+}
+
+static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
+{
+ if (psock->apply_bytes) {
+ if (psock->apply_bytes < bytes)
+ psock->apply_bytes = 0;
+ else
+ psock->apply_bytes -= bytes;
+ }
+}
+
+#define sk_msg_iter_var_prev(var) \
+ do { \
+ if (var == 0) \
+ var = MAX_MSG_FRAGS - 1; \
+ else \
+ var--; \
+ } while (0)
+
+#define sk_msg_iter_var_next(var) \
+ do { \
+ var++; \
+ if (var == MAX_MSG_FRAGS) \
+ var = 0; \
+ } while (0)
+
+#define sk_msg_iter_prev(msg, which) \
+ sk_msg_iter_var_prev(msg->sg.which)
+
+#define sk_msg_iter_next(msg, which) \
+ sk_msg_iter_var_next(msg->sg.which)
+
+static inline void sk_msg_clear_meta(struct sk_msg *msg)
+{
+ memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy));
+}
+
+static inline void sk_msg_init(struct sk_msg *msg)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != MAX_MSG_FRAGS);
+ memset(msg, 0, sizeof(*msg));
+ sg_init_marker(msg->sg.data, MAX_MSG_FRAGS);
+}
+
+static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
+ int which, u32 size)
+{
+ dst->sg.data[which] = src->sg.data[which];
+ dst->sg.data[which].length = size;
+ dst->sg.size += size;
+ src->sg.data[which].length -= size;
+ src->sg.data[which].offset += size;
+}
+
+static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
+{
+ memcpy(dst, src, sizeof(*src));
+ sk_msg_init(src);
+}
+
+static inline bool sk_msg_full(const struct sk_msg *msg)
+{
+ return (msg->sg.end == msg->sg.start) && msg->sg.size;
+}
+
+static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
+{
+ if (sk_msg_full(msg))
+ return MAX_MSG_FRAGS;
+
+ return msg->sg.end >= msg->sg.start ?
+ msg->sg.end - msg->sg.start :
+ msg->sg.end + (MAX_MSG_FRAGS - msg->sg.start);
+}
+
+static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
+{
+ return &msg->sg.data[which];
+}
+
+static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
+{
+ return msg->sg.data[which];
+}
+
+static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
+{
+ return sg_page(sk_msg_elem(msg, which));
+}
+
+static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
+{
+ return msg->flags & BPF_F_INGRESS;
+}
+
+static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
+{
+ struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
+
+ if (msg->sg.copy[msg->sg.start]) {
+ msg->data = NULL;
+ msg->data_end = NULL;
+ } else {
+ msg->data = sg_virt(sge);
+ msg->data_end = msg->data + sge->length;
+ }
+}
+
+static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
+ u32 len, u32 offset)
+{
+ struct scatterlist *sge;
+
+ get_page(page);
+ sge = sk_msg_elem(msg, msg->sg.end);
+ sg_set_page(sge, page, len, offset);
+ sg_unmark_end(sge);
+
+ msg->sg.copy[msg->sg.end] = true;
+ msg->sg.size += len;
+ sk_msg_iter_next(msg, end);
+}
+
+static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
+{
+ do {
+ msg->sg.copy[i] = copy_state;
+ sk_msg_iter_var_next(i);
+ if (i == msg->sg.end)
+ break;
+ } while (1);
+}
+
+static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
+{
+ sk_msg_sg_copy(msg, start, true);
+}
+
+static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
+{
+ sk_msg_sg_copy(msg, start, false);
+}
+
+static inline struct sk_psock *sk_psock(const struct sock *sk)
+{
+ return rcu_dereference_sk_user_data(sk);
+}
+
+static inline void sk_psock_queue_msg(struct sk_psock *psock,
+ struct sk_msg *msg)
+{
+ list_add_tail(&msg->list, &psock->ingress_msg);
+}
+
+static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
+{
+ return psock ? list_empty(&psock->ingress_msg) : true;
+}
+
+static inline void sk_psock_report_error(struct sk_psock *psock, int err)
+{
+ struct sock *sk = psock->sk;
+
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
+}
+
+struct sk_psock *sk_psock_init(struct sock *sk, int node);
+
+int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
+void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
+void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
+
+int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
+ struct sk_msg *msg);
+
+static inline struct sk_psock_link *sk_psock_init_link(void)
+{
+ return kzalloc(sizeof(struct sk_psock_link),
+ GFP_ATOMIC | __GFP_NOWARN);
+}
+
+static inline void sk_psock_free_link(struct sk_psock_link *link)
+{
+ kfree(link);
+}
+
+struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
+#if defined(CONFIG_BPF_STREAM_PARSER)
+void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link);
+#else
+static inline void sk_psock_unlink(struct sock *sk,
+ struct sk_psock_link *link)
+{
+}
+#endif
+
+void __sk_psock_purge_ingress_msg(struct sk_psock *psock);
+
+static inline void sk_psock_cork_free(struct sk_psock *psock)
+{
+ if (psock->cork) {
+ sk_msg_free(psock->sk, psock->cork);
+ kfree(psock->cork);
+ psock->cork = NULL;
+ }
+}
+
+static inline void sk_psock_update_proto(struct sock *sk,
+ struct sk_psock *psock,
+ struct proto *ops)
+{
+ psock->saved_unhash = sk->sk_prot->unhash;
+ psock->saved_close = sk->sk_prot->close;
+ psock->saved_write_space = sk->sk_write_space;
+
+ psock->sk_proto = sk->sk_prot;
+ sk->sk_prot = ops;
+}
+
+static inline void sk_psock_restore_proto(struct sock *sk,
+ struct sk_psock *psock)
+{
+ if (psock->sk_proto) {
+ sk->sk_prot = psock->sk_proto;
+ psock->sk_proto = NULL;
+ }
+}
+
+static inline void sk_psock_set_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ set_bit(bit, &psock->state);
+}
+
+static inline void sk_psock_clear_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ clear_bit(bit, &psock->state);
+}
+
+static inline bool sk_psock_test_state(const struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ return test_bit(bit, &psock->state);
+}
+
+static inline struct sk_psock *sk_psock_get_checked(struct sock *sk)
+{
+ struct sk_psock *psock;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (psock) {
+ if (sk->sk_prot->recvmsg != tcp_bpf_recvmsg) {
+ psock = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
+ if (!refcount_inc_not_zero(&psock->refcnt))
+ psock = ERR_PTR(-EBUSY);
+ }
+out:
+ rcu_read_unlock();
+ return psock;
+}
+
+static inline struct sk_psock *sk_psock_get(struct sock *sk)
+{
+ struct sk_psock *psock;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (psock && !refcount_inc_not_zero(&psock->refcnt))
+ psock = NULL;
+ rcu_read_unlock();
+ return psock;
+}
+
+void sk_psock_stop(struct sock *sk, struct sk_psock *psock);
+void sk_psock_destroy(struct rcu_head *rcu);
+void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
+
+static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
+{
+ if (refcount_dec_and_test(&psock->refcnt))
+ sk_psock_drop(sk, psock);
+}
+
+static inline void psock_set_prog(struct bpf_prog **pprog,
+ struct bpf_prog *prog)
+{
+ prog = xchg(pprog, prog);
+ if (prog)
+ bpf_prog_put(prog);
+}
+
+static inline void psock_progs_drop(struct sk_psock_progs *progs)
+{
+ psock_set_prog(&progs->msg_parser, NULL);
+ psock_set_prog(&progs->skb_parser, NULL);
+ psock_set_prog(&progs->skb_verdict, NULL);
+}
+
+#endif /* _LINUX_SKMSG_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9fb239e12b82..a56f08ff3097 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -53,6 +53,10 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags);
+void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags, const struct cpumask *mask);
+
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
#ifdef CONFIG_SMP
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 7ed4713d5337..8b571e9b9f76 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -348,7 +348,7 @@ struct ucred {
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
-struct timespec;
+struct timespec64;
/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff
* forbid_cmsg_compat==false
@@ -358,7 +358,7 @@ extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg,
extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg,
unsigned int flags, bool forbid_cmsg_compat);
extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
- unsigned int flags, struct timespec *timeout);
+ unsigned int flags, struct timespec64 *timeout);
extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
bool forbid_cmsg_compat);
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index b2bd4b4127c4..69ee30456864 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -81,8 +81,10 @@ enum spi_mem_data_dir {
* @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
* @data.buswidth: number of IO lanes used to send/receive the data
* @data.dir: direction of the transfer
- * @data.buf.in: input buffer
- * @data.buf.out: output buffer
+ * @data.nbytes: number of data bytes to send/receive. Can be zero if the
+ * operation does not involve transferring data
+ * @data.buf.in: input buffer (must be DMA-able)
+ * @data.buf.out: output buffer (must be DMA-able)
*/
struct spi_mem_op {
struct {
@@ -105,7 +107,6 @@ struct spi_mem_op {
u8 buswidth;
enum spi_mem_data_dir dir;
unsigned int nbytes;
- /* buf.{in,out} must be DMA-able. */
union {
void *in;
const void *out;
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a64235e05321..6be77fa5ab90 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -1,15 +1,6 @@
-/*
- * Copyright (C) 2005 David Brownell
+/* SPDX-License-Identifier: GPL-2.0-or-later
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2005 David Brownell
*/
#ifndef __LINUX_SPI_H
@@ -163,10 +154,12 @@ struct spi_device {
#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
+#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
int irq;
void *controller_state;
void *controller_data;
char modalias[SPI_NAME_SIZE];
+ const char *driver_override;
int cs_gpio; /* chip select gpio */
/* the statistics */
@@ -177,7 +170,6 @@ struct spi_device {
* the controller talks to each chip, like:
* - memory packing (12 bit samples into low bits, others zeroed)
* - priority
- * - drop chipselect after each word
* - chipselect delays
* - ...
*/
@@ -711,6 +703,8 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @delay_usecs: microseconds to delay after this transfer before
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message.
+ * @word_delay: clock cycles to inter word delay after each word size
+ * (set by bits_per_word) transmission.
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
@@ -793,6 +787,7 @@ struct spi_transfer {
u8 bits_per_word;
u16 delay_usecs;
u32 speed_hz;
+ u16 word_delay;
struct list_head transfer_list;
};
@@ -1277,7 +1272,6 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
{ return 0; }
#endif
-
/* If you're hotplugging an adapter with devices (parport, usb, etc)
* use spi_new_device() to describe each device. You can also call
* spi_unregister_device() to start making that device vanish, but
@@ -1309,6 +1303,22 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
}
+/* OF support code */
+#if IS_ENABLED(CONFIG_OF)
+
+/* must call put_device() when done with returned spi_device device */
+extern struct spi_device *
+of_find_spi_device_by_node(struct device_node *node);
+
+#else
+
+static inline struct spi_device *
+of_find_spi_device_by_node(struct device_node *node)
+{
+ return NULL;
+}
+
+#endif /* IS_ENABLED(CONFIG_OF) */
/* Compatibility layer */
#define spi_master spi_controller
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 745d4ca4dd50..0ae91b3a7406 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -105,12 +105,13 @@ struct srcu_struct {
#define SRCU_STATE_SCAN2 2
#define __SRCU_STRUCT_INIT(name, pcpu_name) \
- { \
- .sda = &pcpu_name, \
- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
- .srcu_gp_seq_needed = 0 - 1, \
- __SRCU_DEP_MAP_INIT(name) \
- }
+{ \
+ .sda = &pcpu_name, \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+ .srcu_gp_seq_needed = -1UL, \
+ .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
+ __SRCU_DEP_MAP_INIT(name) \
+}
/*
* Define and initialize a srcu struct at build time.
diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h
index 4b268d86a784..8b369a41c03c 100644
--- a/include/linux/start_kernel.h
+++ b/include/linux/start_kernel.h
@@ -9,5 +9,7 @@
up something else. */
extern asmlinkage void __init start_kernel(void);
+extern void __init arch_call_rest_init(void);
+extern void __ref rest_init(void);
#endif /* _LINUX_START_KERNEL_H */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c43e9a01b892..7ddfc65586b0 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -30,6 +30,7 @@
#define MTL_MAX_RX_QUEUES 8
#define MTL_MAX_TX_QUEUES 8
+#define STMMAC_CH_MAX 8
#define STMMAC_RX_COE_NONE 0
#define STMMAC_RX_COE_TYPE1 1
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 7df625d41e35..f6e8ceafafd8 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -71,10 +71,10 @@ struct gss_krb5_enctype {
const u32 keyed_cksum; /* is it a keyed cksum? */
const u32 keybytes; /* raw key len, in bytes */
const u32 keylength; /* final key len, in bytes */
- u32 (*encrypt) (struct crypto_skcipher *tfm,
+ u32 (*encrypt) (struct crypto_sync_skcipher *tfm,
void *iv, void *in, void *out,
int length); /* encryption function */
- u32 (*decrypt) (struct crypto_skcipher *tfm,
+ u32 (*decrypt) (struct crypto_sync_skcipher *tfm,
void *iv, void *in, void *out,
int length); /* decryption function */
u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
@@ -98,12 +98,12 @@ struct krb5_ctx {
u32 enctype;
u32 flags;
const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
- struct crypto_skcipher *enc;
- struct crypto_skcipher *seq;
- struct crypto_skcipher *acceptor_enc;
- struct crypto_skcipher *initiator_enc;
- struct crypto_skcipher *acceptor_enc_aux;
- struct crypto_skcipher *initiator_enc_aux;
+ struct crypto_sync_skcipher *enc;
+ struct crypto_sync_skcipher *seq;
+ struct crypto_sync_skcipher *acceptor_enc;
+ struct crypto_sync_skcipher *initiator_enc;
+ struct crypto_sync_skcipher *acceptor_enc_aux;
+ struct crypto_sync_skcipher *initiator_enc_aux;
u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
u8 cksum[GSS_KRB5_MAX_KEYLEN];
s32 endtime;
@@ -262,24 +262,24 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
u32
-krb5_encrypt(struct crypto_skcipher *key,
+krb5_encrypt(struct crypto_sync_skcipher *key,
void *iv, void *in, void *out, int length);
u32
-krb5_decrypt(struct crypto_skcipher *key,
+krb5_decrypt(struct crypto_sync_skcipher *key,
void *iv, void *in, void *out, int length);
int
-gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf,
+gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf,
int offset, struct page **pages);
int
-gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf,
+gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf,
int offset);
s32
krb5_make_seq_num(struct krb5_ctx *kctx,
- struct crypto_skcipher *key,
+ struct crypto_sync_skcipher *key,
int direction,
u32 seqnum, unsigned char *cksum, unsigned char *buf);
@@ -320,12 +320,12 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
int
krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
- struct crypto_skcipher *cipher,
+ struct crypto_sync_skcipher *cipher,
unsigned char *cksum);
int
krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
- struct crypto_skcipher *cipher,
+ struct crypto_sync_skcipher *cipher,
s32 seqnum);
void
gss_krb5_make_confounder(char *p, u32 conflen);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 5a28ac9284f0..3f529ad9a9d2 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -251,6 +251,7 @@ static inline bool idle_should_enter_s2idle(void)
return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
}
+extern bool pm_suspend_via_s2idle(void);
extern void __init pm_states_init(void);
extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
extern void s2idle_wake(void);
@@ -282,6 +283,7 @@ static inline void pm_set_suspend_via_firmware(void) {}
static inline void pm_set_resume_via_firmware(void) {}
static inline bool pm_suspend_via_firmware(void) { return false; }
static inline bool pm_resume_via_firmware(void) { return false; }
+static inline bool pm_suspend_via_s2idle(void) { return false; }
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 2ff814c92f7f..2ac3d13a915b 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -60,7 +60,7 @@ struct tms;
struct utimbuf;
struct mq_attr;
struct compat_stat;
-struct compat_timeval;
+struct old_timeval32;
struct robust_list_head;
struct getcpu_cache;
struct old_linux_dirent;
@@ -513,7 +513,8 @@ asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *
/* fs/utimes.c */
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
- struct timespec __user *utimes, int flags);
+ struct __kernel_timespec __user *utimes,
+ int flags);
/* kernel/acct.c */
asmlinkage long sys_acct(const char __user *name);
@@ -613,7 +614,7 @@ asmlinkage long sys_sched_yield(void);
asmlinkage long sys_sched_get_priority_max(int policy);
asmlinkage long sys_sched_get_priority_min(int policy);
asmlinkage long sys_sched_rr_get_interval(pid_t pid,
- struct timespec __user *interval);
+ struct __kernel_timespec __user *interval);
/* kernel/signal.c */
asmlinkage long sys_restart_syscall(void);
@@ -634,7 +635,7 @@ asmlinkage long sys_rt_sigprocmask(int how, sigset_t __user *set,
asmlinkage long sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize);
asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese,
siginfo_t __user *uinfo,
- const struct timespec __user *uts,
+ const struct __kernel_timespec __user *uts,
size_t sigsetsize);
asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo);
@@ -829,7 +830,7 @@ asmlinkage long sys_perf_event_open(
asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int);
asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags,
- struct timespec __user *timeout);
+ struct __kernel_timespec __user *timeout);
asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
int options, struct rusage __user *ru);
@@ -954,8 +955,6 @@ asmlinkage long sys_access(const char __user *filename, int mode);
asmlinkage long sys_rename(const char __user *oldname,
const char __user *newname);
asmlinkage long sys_symlink(const char __user *old, const char __user *new);
-asmlinkage long sys_utimes(char __user *filename,
- struct timeval __user *utimes);
#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
asmlinkage long sys_stat64(const char __user *filename,
struct stat64 __user *statbuf);
@@ -985,14 +984,18 @@ asmlinkage long sys_alarm(unsigned int seconds);
asmlinkage long sys_getpgrp(void);
asmlinkage long sys_pause(void);
asmlinkage long sys_time(time_t __user *tloc);
+#ifdef __ARCH_WANT_SYS_UTIME
asmlinkage long sys_utime(char __user *filename,
struct utimbuf __user *times);
+asmlinkage long sys_utimes(char __user *filename,
+ struct timeval __user *utimes);
+asmlinkage long sys_futimesat(int dfd, const char __user *filename,
+ struct timeval __user *utimes);
+#endif
asmlinkage long sys_creat(const char __user *pathname, umode_t mode);
asmlinkage long sys_getdents(unsigned int fd,
struct linux_dirent __user *dirent,
unsigned int count);
-asmlinkage long sys_futimesat(int dfd, const char __user *filename,
- struct timeval __user *utimes);
asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timeval __user *tvp);
asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 263e37271afd..8ed77bb4ed86 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -248,6 +248,9 @@ struct tcp_sock {
syn_smc:1; /* SYN includes SMC */
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
+ u64 tcp_wstamp_ns; /* departure time for next sent data packet */
+ u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
+
/* RTT measurement */
u64 tcp_mstamp; /* most recent packet received/sent */
u32 srtt_us; /* smoothed round trip time << 3 in usecs */
diff --git a/include/linux/time32.h b/include/linux/time32.h
index d1ae43c13e25..61904a6c098f 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -13,6 +13,36 @@
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
+typedef s32 old_time32_t;
+
+struct old_timespec32 {
+ old_time32_t tv_sec;
+ s32 tv_nsec;
+};
+
+struct old_timeval32 {
+ old_time32_t tv_sec;
+ s32 tv_usec;
+};
+
+struct old_itimerspec32 {
+ struct old_timespec32 it_interval;
+ struct old_timespec32 it_value;
+};
+
+struct old_utimbuf32 {
+ old_time32_t actime;
+ old_time32_t modtime;
+};
+
+extern int get_old_timespec32(struct timespec64 *, const void __user *);
+extern int put_old_timespec32(const struct timespec64 *, void __user *);
+extern int get_old_itimerspec32(struct itimerspec64 *its,
+ const struct old_itimerspec32 __user *uits);
+extern int put_old_itimerspec32(const struct itimerspec64 *its,
+ struct old_itimerspec32 __user *uits);
+
+
#if __BITS_PER_LONG == 64
/* timespec64 is defined as timespec here */
@@ -105,16 +135,6 @@ static inline bool timespec_valid(const struct timespec *ts)
return true;
}
-static inline bool timespec_valid_strict(const struct timespec *ts)
-{
- if (!timespec_valid(ts))
- return false;
- /* Disallow values that could overflow ktime_t */
- if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
- return false;
- return true;
-}
-
/**
* timespec_to_ns - Convert timespec to nanoseconds
* @ts: pointer to the timespec variable to be converted
@@ -149,19 +169,6 @@ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
a->tv_nsec = ns;
}
-/**
- * time_to_tm - converts the calendar time to local broken-down time
- *
- * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
- * Coordinated Universal Time (UTC).
- * @offset offset seconds adding to totalsecs.
- * @result pointer to struct tm variable to receive broken-down time
- */
-static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
-{
- time64_to_tm(totalsecs, offset, result);
-}
-
static inline unsigned long mktime(const unsigned int year,
const unsigned int mon, const unsigned int day,
const unsigned int hour, const unsigned int min,
@@ -183,8 +190,6 @@ static inline bool timeval_valid(const struct timeval *tv)
return true;
}
-extern struct timespec timespec_trunc(struct timespec t, unsigned int gran);
-
/**
* timeval_to_ns - Convert timeval to nanoseconds
* @ts: pointer to the timeval variable to be converted
@@ -208,18 +213,17 @@ extern struct timeval ns_to_timeval(const s64 nsec);
extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec);
/*
- * New aliases for compat time functions. These will be used to replace
- * the compat code so it can be shared between 32-bit and 64-bit builds
- * both of which provide compatibility with old 32-bit tasks.
+ * Old names for the 32-bit time_t interfaces, these will be removed
+ * when everything uses the new names.
*/
-#define old_time32_t compat_time_t
-#define old_timeval32 compat_timeval
-#define old_timespec32 compat_timespec
-#define old_itimerspec32 compat_itimerspec
-#define ns_to_old_timeval32 ns_to_compat_timeval
-#define get_old_itimerspec32 get_compat_itimerspec64
-#define put_old_itimerspec32 put_compat_itimerspec64
-#define get_old_timespec32 compat_get_timespec64
-#define put_old_timespec32 compat_put_timespec64
+#define compat_time_t old_time32_t
+#define compat_timeval old_timeval32
+#define compat_timespec old_timespec32
+#define compat_itimerspec old_itimerspec32
+#define ns_to_compat_timeval ns_to_old_timeval32
+#define get_compat_itimerspec64 get_old_itimerspec32
+#define put_compat_itimerspec64 put_old_itimerspec32
+#define compat_get_timespec64 get_old_timespec32
+#define compat_put_timespec64 put_old_timespec32
#endif
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index a5a3cfc3c2fa..29975e93fcb8 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -266,9 +266,6 @@ extern int update_persistent_clock64(struct timespec64 now);
* deprecated aliases, don't use in new code
*/
#define getnstimeofday64(ts) ktime_get_real_ts64(ts)
-#define get_monotonic_boottime64(ts) ktime_get_boottime_ts64(ts)
-#define getrawmonotonic64(ts) ktime_get_raw_ts64(ts)
-#define timekeeping_clocktai64(ts) ktime_get_clocktai_ts64(ts)
static inline struct timespec64 current_kernel_time64(void)
{
@@ -279,13 +276,4 @@ static inline struct timespec64 current_kernel_time64(void)
return ts;
}
-static inline struct timespec64 get_monotonic_coarse64(void)
-{
- struct timespec64 ts;
-
- ktime_get_coarse_ts64(&ts);
-
- return ts;
-}
-
#endif
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
index 8762c2f45f8b..a502616f7e1c 100644
--- a/include/linux/timekeeping32.h
+++ b/include/linux/timekeeping32.h
@@ -6,27 +6,18 @@
* over time so we can remove the file here.
*/
-extern void do_gettimeofday(struct timeval *tv);
-unsigned long get_seconds(void);
-
-static inline struct timespec current_kernel_time(void)
+static inline void do_gettimeofday(struct timeval *tv)
{
- struct timespec64 ts64;
+ struct timespec64 now;
- ktime_get_coarse_real_ts64(&ts64);
-
- return timespec64_to_timespec(ts64);
+ ktime_get_real_ts64(&now);
+ tv->tv_sec = now.tv_sec;
+ tv->tv_usec = now.tv_nsec/1000;
}
-/**
- * Deprecated. Use do_settimeofday64().
- */
-static inline int do_settimeofday(const struct timespec *ts)
+static inline unsigned long get_seconds(void)
{
- struct timespec64 ts64;
-
- ts64 = timespec_to_timespec64(*ts);
- return do_settimeofday64(&ts64);
+ return ktime_get_real_seconds();
}
static inline void getnstimeofday(struct timespec *ts)
@@ -45,14 +36,6 @@ static inline void ktime_get_ts(struct timespec *ts)
*ts = timespec64_to_timespec(ts64);
}
-static inline void ktime_get_real_ts(struct timespec *ts)
-{
- struct timespec64 ts64;
-
- ktime_get_real_ts64(&ts64);
- *ts = timespec64_to_timespec(ts64);
-}
-
static inline void getrawmonotonic(struct timespec *ts)
{
struct timespec64 ts64;
@@ -61,15 +44,6 @@ static inline void getrawmonotonic(struct timespec *ts)
*ts = timespec64_to_timespec(ts64);
}
-static inline struct timespec get_monotonic_coarse(void)
-{
- struct timespec64 ts64;
-
- ktime_get_coarse_ts64(&ts64);
-
- return timespec64_to_timespec(ts64);
-}
-
static inline void getboottime(struct timespec *ts)
{
struct timespec64 ts64;
@@ -79,19 +53,6 @@ static inline void getboottime(struct timespec *ts)
}
/*
- * Timespec interfaces utilizing the ktime based ones
- */
-static inline void get_monotonic_boottime(struct timespec *ts)
-{
- *ts = ktime_to_timespec(ktime_get_boottime());
-}
-
-static inline void timekeeping_clocktai(struct timespec *ts)
-{
- *ts = ktime_to_timespec(ktime_get_clocktai());
-}
-
-/*
* Persistent clock related interfaces
*/
extern void read_persistent_clock(struct timespec *ts);
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 61dfd93b6ee4..48fad21109fc 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -77,7 +77,7 @@ void torture_shutdown_absorb(const char *title);
int torture_shutdown_init(int ssecs, void (*cleanup)(void));
/* Task stuttering, which forces load/no-load transitions. */
-void stutter_wait(const char *title);
+bool stutter_wait(const char *title);
int torture_stutter_init(int s);
/* Initialization and cleanup. */
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 05589a3e37f4..40b0b4c1bf7b 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -123,15 +123,10 @@ static inline __must_check int tracehook_report_syscall_entry(
*/
static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
{
- if (step) {
- siginfo_t info;
- clear_siginfo(&info);
- user_single_step_siginfo(current, regs, &info);
- force_sig_info(SIGTRAP, &info, current);
- return;
- }
-
- ptrace_report_syscall(regs);
+ if (step)
+ user_single_step_report(regs);
+ else
+ ptrace_report_syscall(regs);
}
/**
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index 22c5a46e9693..49ba9cde7e4b 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -35,6 +35,12 @@ struct tracepoint {
struct tracepoint_func __rcu *funcs;
};
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+typedef const int tracepoint_ptr_t;
+#else
+typedef struct tracepoint * const tracepoint_ptr_t;
+#endif
+
struct bpf_raw_event_map {
struct tracepoint *tp;
void *bpf_func;
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 041f7e56a289..538ba1a58f5b 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -99,6 +99,29 @@ extern void syscall_unregfunc(void);
#define TRACE_DEFINE_ENUM(x)
#define TRACE_DEFINE_SIZEOF(x)
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
+{
+ return offset_to_ptr(p);
+}
+
+#define __TRACEPOINT_ENTRY(name) \
+ asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \
+ " .balign 4 \n" \
+ " .long __tracepoint_" #name " - . \n" \
+ " .previous \n")
+#else
+static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
+{
+ return *p;
+}
+
+#define __TRACEPOINT_ENTRY(name) \
+ static tracepoint_ptr_t __tracepoint_ptr_##name __used \
+ __attribute__((section("__tracepoints_ptrs"))) = \
+ &__tracepoint_##name
+#endif
+
#endif /* _LINUX_TRACEPOINT_H */
/*
@@ -253,19 +276,6 @@ extern void syscall_unregfunc(void);
return static_key_false(&__tracepoint_##name.key); \
}
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#define __TRACEPOINT_ENTRY(name) \
- asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \
- " .balign 4 \n" \
- " .long __tracepoint_" #name " - . \n" \
- " .previous \n")
-#else
-#define __TRACEPOINT_ENTRY(name) \
- static struct tracepoint * const __tracepoint_ptr_##name __used \
- __attribute__((section("__tracepoints_ptrs"))) = \
- &__tracepoint_##name
-#endif
-
/*
* We have no guarantee that gcc and the linker won't up-align the tracepoint
* structures, so we create an array of pointers that will be used for iteration
diff --git a/include/linux/tty.h b/include/linux/tty.h
index c56e3978b00f..414db2bce715 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -746,8 +746,6 @@ static inline int tty_audit_push(void)
/* tty_ioctl.c */
extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
-extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
/* vt.c */
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 71dbc891851a..358446247ccd 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -249,6 +249,7 @@
struct tty_struct;
struct tty_driver;
struct serial_icounter_struct;
+struct serial_struct;
struct tty_operations {
struct tty_struct * (*lookup)(struct tty_driver *driver,
@@ -287,6 +288,8 @@ struct tty_operations {
int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew);
int (*get_icount)(struct tty_struct *tty,
struct serial_icounter_struct *icount);
+ int (*get_serial)(struct tty_struct *tty, struct serial_struct *p);
+ int (*set_serial)(struct tty_struct *tty, struct serial_struct *p);
void (*show_fdinfo)(struct tty_struct *tty, struct seq_file *m);
#ifdef CONFIG_CONSOLE_POLL
int (*poll_init)(struct tty_driver *driver, int line, char *options);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 840894ca3fc0..b1e6043e9917 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -54,11 +54,17 @@
* low-level driver can "grab" an ioctl request before the line
* discpline has a chance to see it.
*
- * long (*compat_ioctl)(struct tty_struct * tty, struct file * file,
+ * int (*compat_ioctl)(struct tty_struct * tty, struct file * file,
* unsigned int cmd, unsigned long arg);
*
* Process ioctl calls from 32-bit process on 64-bit system
*
+ * NOTE: only ioctls that are neither "pointer to compatible
+ * structure" nor tty-generic. Something private that takes
+ * an integer or a pointer to wordsize-sensitive structure
+ * belongs here, but most of ldiscs will happily leave
+ * it NULL.
+ *
* void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
*
* This function notifies the line discpline that a change has
@@ -184,7 +190,7 @@ struct tty_ldisc_ops {
const unsigned char *buf, size_t nr);
int (*ioctl)(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
- long (*compat_ioctl)(struct tty_struct *tty, struct file *file,
+ int (*compat_ioctl)(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
__poll_t (*poll)(struct tty_struct *, struct file *,
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 409c845d4cd3..422b1c01ee0d 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -172,7 +172,7 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
static __always_inline __must_check
size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
+ if (unlikely(!check_copy_size(addr, bytes, true)))
return 0;
else
return _copy_to_iter_mcsafe(addr, bytes, i);
diff --git a/include/linux/umh.h b/include/linux/umh.h
index 5c812acbb80a..235f51b62c71 100644
--- a/include/linux/umh.h
+++ b/include/linux/umh.h
@@ -44,6 +44,7 @@ struct subprocess_info *call_usermodehelper_setup_file(struct file *file,
int (*init)(struct subprocess_info *info, struct cred *new),
void (*cleanup)(struct subprocess_info *), void *data);
struct umh_info {
+ const char *cmdline;
struct file *pipe_to_umh;
struct file *pipe_from_umh;
pid_t pid;
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 07f99362bc90..63758c399e4e 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -77,6 +77,12 @@ struct ci_hdrc_platform_data {
struct ci_hdrc_cable vbus_extcon;
struct ci_hdrc_cable id_extcon;
u32 phy_clkgate_delay_us;
+
+ /* pins */
+ struct pinctrl *pctl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_host;
+ struct pinctrl_state *pins_device;
};
/* Default offset of capability registers */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 106551a5616e..1c19f77ed541 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -285,6 +285,8 @@ struct usb_serial_driver {
int (*write_room)(struct tty_struct *tty);
int (*ioctl)(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
+ int (*get_serial)(struct tty_struct *tty, struct serial_struct *ss);
+ int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss);
void (*set_termios)(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
void (*break_ctl)(struct tty_struct *tty, int break_state);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index e2ec3582e549..d8860f2d0976 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -28,7 +28,7 @@ struct usbnet {
/* housekeeping */
struct usb_device *udev;
struct usb_interface *intf;
- struct driver_info *driver_info;
+ const struct driver_info *driver_info;
const char *driver_name;
void *driver_priv;
wait_queue_head_t wait;
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index a34539b7f750..7e6ac0114d55 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -133,15 +133,18 @@ struct vga_switcheroo_handler {
* @can_switch: check if the device is in a position to switch now.
* Mandatory. The client should return false if a user space process
* has one of its device files open
+ * @gpu_bound: notify the client id to audio client when the GPU is bound.
*
* Client callbacks. A client can be either a GPU or an audio device on a GPU.
* The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
* set to NULL. For audio clients, the @reprobe member is bogus.
+ * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients.
*/
struct vga_switcheroo_client_ops {
void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
void (*reprobe)(struct pci_dev *dev);
bool (*can_switch)(struct pci_dev *dev);
+ void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id);
};
#if defined(CONFIG_VGA_SWITCHEROO)
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 9397628a1967..cb462f9ab7dd 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -5,6 +5,24 @@
#include <linux/if_vlan.h>
#include <uapi/linux/virtio_net.h>
+static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr)
+{
+ switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ case VIRTIO_NET_HDR_GSO_UDP:
+ skb->protocol = cpu_to_be16(ETH_P_IP);
+ break;
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ skb->protocol = cpu_to_be16(ETH_P_IPV6);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
const struct virtio_net_hdr *hdr,
bool little_endian)
diff --git a/include/linux/wait.h b/include/linux/wait.h
index d9f131ecf708..ed7c122cb31f 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1052,10 +1052,9 @@ do { \
__ret; \
})
-#define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \
- lock, timeout) \
+#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
- TASK_INTERRUPTIBLE, 0, timeout, \
+ state, 0, timeout, \
spin_unlock_irq(&lock); \
__ret = schedule_timeout(__ret); \
spin_lock_irq(&lock));
@@ -1089,8 +1088,19 @@ do { \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
- __ret = __wait_event_interruptible_lock_irq_timeout( \
- wq_head, condition, lock, timeout); \
+ __ret = __wait_event_lock_irq_timeout( \
+ wq_head, condition, lock, timeout, \
+ TASK_INTERRUPTIBLE); \
+ __ret; \
+})
+
+#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_lock_irq_timeout( \
+ wq_head, condition, lock, timeout, \
+ TASK_UNINTERRUPTIBLE); \
__ret; \
})
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fdfd04e348f6..738a0c24874f 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -246,7 +246,8 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
*
* @bio is a part of the writeback in progress controlled by @wbc. Perform
* writeback specific initialization. This is used to apply the cgroup
- * writeback context.
+ * writeback context. Must be called after the bio has been associated with
+ * a device.
*/
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
@@ -257,7 +258,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
* regular writeback instead of writing things out itself.
*/
if (wbc->wb)
- bio_associate_blkcg(bio, wbc->wb->blkcg_css);
+ bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
}
#else /* CONFIG_CGROUP_WRITEBACK */
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index ea73fef8bdc0..8586cfb49828 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -38,10 +38,13 @@ struct v4l2_ctrl_handler;
* @prio: priority of the file handler, as defined by &enum v4l2_priority
*
* @wait: event' s wait queue
+ * @subscribe_lock: serialise changes to the subscribed list; guarantee that
+ * the add and del event callbacks are orderly called
* @subscribed: list of subscribed events
* @available: list of events waiting to be dequeued
* @navailable: number of available events at @available list
* @sequence: event sequence number
+ *
* @m2m_ctx: pointer to &struct v4l2_m2m_ctx
*/
struct v4l2_fh {
@@ -52,6 +55,7 @@ struct v4l2_fh {
/* Events */
wait_queue_head_t wait;
+ struct mutex subscribe_lock;
struct list_head subscribed;
struct list_head available;
unsigned int navailable;
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 970303448c90..05c7df41d737 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -13,7 +13,7 @@
#include <net/netns/generic.h>
struct tcf_idrinfo {
- spinlock_t lock;
+ struct mutex lock;
struct idr action_idr;
};
@@ -31,10 +31,12 @@ struct tc_action {
int tcfa_action;
struct tcf_t tcfa_tm;
struct gnet_stats_basic_packed tcfa_bstats;
+ struct gnet_stats_basic_packed tcfa_bstats_hw;
struct gnet_stats_queue tcfa_qstats;
struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
struct tcf_chain *goto_chain;
@@ -85,8 +87,7 @@ struct tc_action_ops {
struct tcf_result *); /* called under RCU BH lock*/
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
void (*cleanup)(struct tc_action *);
- int (*lookup)(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack);
+ int (*lookup)(struct net *net, struct tc_action **a, u32 index);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
int bind, bool rtnl_held,
@@ -95,7 +96,7 @@ struct tc_action_ops {
struct netlink_callback *, int,
const struct tc_action_ops *,
struct netlink_ext_ack *);
- void (*stats_update)(struct tc_action *, u64, u32, u64);
+ void (*stats_update)(struct tc_action *, u64, u32, u64, bool);
size_t (*get_fill_size)(const struct tc_action *act);
struct net_device *(*get_dev)(const struct tc_action *a);
void (*put_dev)(struct net_device *dev);
@@ -116,7 +117,7 @@ int tc_action_net_init(struct tc_action_net *tn,
if (!tn->idrinfo)
return -ENOMEM;
tn->ops = ops;
- spin_lock_init(&tn->idrinfo->lock);
+ mutex_init(&tn->idrinfo->lock);
idr_init(&tn->idrinfo->action_idr);
return err;
}
@@ -183,13 +184,13 @@ int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
#endif /* CONFIG_NET_CLS_ACT */
static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
- u64 packets, u64 lastuse)
+ u64 packets, u64 lastuse, bool hw)
{
#ifdef CONFIG_NET_CLS_ACT
if (!a->ops->stats_update)
return;
- a->ops->stats_update(a, bytes, packets, lastuse);
+ a->ops->stats_update(a, bytes, packets, lastuse, hw);
#endif
}
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 6def0351bcc3..14b789a123e7 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -265,6 +265,11 @@ extern const struct ipv6_stub *ipv6_stub __read_mostly;
struct ipv6_bpf_stub {
int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len,
bool force_bind_address_no_port, bool with_lock);
+ struct sock *(*udp6_lib_lookup)(struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, __be16 dport,
+ int dif, int sdif, struct udp_table *tbl,
+ struct sk_buff *skb);
};
extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index f53edb3754bc..de587948042a 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -13,6 +13,7 @@
#define _NET_RXRPC_H
#include <linux/rxrpc.h>
+#include <linux/ktime.h>
struct key;
struct sock;
@@ -77,5 +78,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
enum rxrpc_call_completion *, u32 *);
u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *);
+u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
+bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
+ ktime_t *);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index a5ba41b3b867..e2695c4bf358 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -52,7 +52,7 @@ struct unix_skb_parms {
struct unix_sock {
/* WARNING: sk has to be the first member */
struct sock sk;
- struct unix_address *addr;
+ struct unix_address *addr;
struct path path;
struct mutex iolock, bindlock;
struct sock *peer;
@@ -63,7 +63,7 @@ struct unix_sock {
#define UNIX_GC_CANDIDATE 0
#define UNIX_GC_MAYBE_CYCLE 1
struct socket_wq peer_wq;
- wait_queue_entry_t peer_wake;
+ wait_queue_entry_t peer_wake;
};
static inline struct unix_sock *unix_sk(const struct sock *sk)
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index cdd9f1fe7cfa..c36dc1e20556 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1517,6 +1517,20 @@ struct hci_cp_le_write_def_data_len {
__le16 tx_time;
} __packed;
+#define HCI_OP_LE_ADD_TO_RESOLV_LIST 0x2027
+struct hci_cp_le_add_to_resolv_list {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 peer_irk[16];
+ __u8 local_irk[16];
+} __packed;
+
+#define HCI_OP_LE_DEL_FROM_RESOLV_LIST 0x2028
+struct hci_cp_le_del_from_resolv_list {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+} __packed;
+
#define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029
#define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 0db1b9b428b7..e5ea633ea368 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -103,6 +103,14 @@ struct bdaddr_list {
u8 bdaddr_type;
};
+struct bdaddr_list_with_irk {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 peer_irk[16];
+ u8 local_irk[16];
+};
+
struct bt_uuid {
struct list_head list;
u8 uuid[16];
@@ -259,6 +267,8 @@ struct hci_dev {
__u16 le_max_tx_time;
__u16 le_max_rx_len;
__u16 le_max_rx_time;
+ __u8 le_max_key_size;
+ __u8 le_min_key_size;
__u16 discov_interleaved_timeout;
__u16 conn_info_min_age;
__u16 conn_info_max_age;
@@ -1058,8 +1068,15 @@ int hci_inquiry(void __user *arg);
struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
bdaddr_t *bdaddr, u8 type);
+struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
+ struct list_head *list, bdaddr_t *bdaddr,
+ u8 type);
int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
+int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type, u8 *peer_irk, u8 *local_irk);
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
+int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type);
void hci_bdaddr_list_clear(struct list_head *list);
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 0697fd413087..093aedebdf0c 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -277,12 +277,19 @@ struct l2cap_conn_rsp {
#define L2CAP_CR_SEC_BLOCK 0x0003
#define L2CAP_CR_NO_MEM 0x0004
#define L2CAP_CR_BAD_AMP 0x0005
-#define L2CAP_CR_AUTHENTICATION 0x0005
-#define L2CAP_CR_AUTHORIZATION 0x0006
-#define L2CAP_CR_BAD_KEY_SIZE 0x0007
-#define L2CAP_CR_ENCRYPTION 0x0008
-#define L2CAP_CR_INVALID_SCID 0x0009
-#define L2CAP_CR_SCID_IN_USE 0x000A
+#define L2CAP_CR_INVALID_SCID 0x0006
+#define L2CAP_CR_SCID_IN_USE 0x0007
+
+/* credit based connect results */
+#define L2CAP_CR_LE_SUCCESS 0x0000
+#define L2CAP_CR_LE_BAD_PSM 0x0002
+#define L2CAP_CR_LE_NO_MEM 0x0004
+#define L2CAP_CR_LE_AUTHENTICATION 0x0005
+#define L2CAP_CR_LE_AUTHORIZATION 0x0006
+#define L2CAP_CR_LE_BAD_KEY_SIZE 0x0007
+#define L2CAP_CR_LE_ENCRYPTION 0x0008
+#define L2CAP_CR_LE_INVALID_SCID 0x0009
+#define L2CAP_CR_LE_SCID_IN_USE 0X000A
/* connect/create channel status */
#define L2CAP_CS_NO_INFO 0x0000
@@ -455,9 +462,6 @@ struct l2cap_conn_param_update_rsp {
#define L2CAP_CONN_PARAM_ACCEPTED 0x0000
#define L2CAP_CONN_PARAM_REJECTED 0x0001
-#define L2CAP_LE_MAX_CREDITS 10
-#define L2CAP_LE_DEFAULT_MPS 230
-
struct l2cap_le_conn_req {
__le16 psm;
__le16 scid;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index a2d058170ea3..b46d68acf701 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -139,12 +139,6 @@ struct bond_parm_tbl {
int mode;
};
-struct netdev_notify_work {
- struct delayed_work work;
- struct net_device *dev;
- struct netdev_bonding_info bonding_info;
-};
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
@@ -172,6 +166,7 @@ struct slave {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
#endif
+ struct delayed_work notify_work;
struct kobject kobj;
struct rtnl_link_stats64 slave_stats;
};
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 8ebabc9873d1..1fa41b7a1be3 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -149,7 +149,7 @@ enum ieee80211_channel_flags {
*/
struct ieee80211_channel {
enum nl80211_band band;
- u16 center_freq;
+ u32 center_freq;
u16 hw_value;
u32 flags;
int max_antenna_gain;
@@ -775,6 +775,12 @@ struct cfg80211_crypto_settings {
* @assocresp_ies_len: length of assocresp_ies in octets
* @probe_resp_len: length of probe response template (@probe_resp)
* @probe_resp: probe response template (AP mode only)
+ * @ftm_responder: enable FTM responder functionality; -1 for no change
+ * (which also implies no change in LCI/civic location data)
+ * @lci: LCI subelement content
+ * @civicloc: Civic location subelement content
+ * @lci_len: LCI data length
+ * @civicloc_len: Civic location data length
*/
struct cfg80211_beacon_data {
const u8 *head, *tail;
@@ -782,12 +788,17 @@ struct cfg80211_beacon_data {
const u8 *proberesp_ies;
const u8 *assocresp_ies;
const u8 *probe_resp;
+ const u8 *lci;
+ const u8 *civicloc;
+ s8 ftm_responder;
size_t head_len, tail_len;
size_t beacon_ies_len;
size_t proberesp_ies_len;
size_t assocresp_ies_len;
size_t probe_resp_len;
+ size_t lci_len;
+ size_t civicloc_len;
};
struct mac_address {
@@ -849,6 +860,7 @@ struct cfg80211_bitrate_mask {
* @beacon_rate: bitrate to be used for beacons
* @ht_cap: HT capabilities (or %NULL if HT isn't enabled)
* @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled)
+ * @he_cap: HE capabilities (or %NULL if HE isn't enabled)
* @ht_required: stations must support HT
* @vht_required: stations must support VHT
*/
@@ -874,6 +886,7 @@ struct cfg80211_ap_settings {
const struct ieee80211_ht_cap *ht_cap;
const struct ieee80211_vht_cap *vht_cap;
+ const struct ieee80211_he_cap_elem *he_cap;
bool ht_required, vht_required;
};
@@ -1290,6 +1303,10 @@ struct cfg80211_tid_stats {
* @ack_signal: signal strength (in dBm) of the last ACK frame.
* @avg_ack_signal: average rssi value of ack packet for the no of msdu's has
* been sent.
+ * @rx_mpdu_count: number of MPDUs received from this station
+ * @fcs_err_count: number of packets (MPDUs) received from this station with
+ * an FCS error. This counter should be incremented only when TA of the
+ * received packet with an FCS error matches the peer MAC address.
*/
struct station_info {
u64 filled;
@@ -1336,6 +1353,9 @@ struct station_info {
struct cfg80211_tid_stats *pertid;
s8 ack_signal;
s8 avg_ack_signal;
+
+ u32 rx_mpdu_count;
+ u32 fcs_err_count;
};
#if IS_ENABLED(CONFIG_CFG80211)
@@ -2795,6 +2815,40 @@ struct cfg80211_external_auth_params {
};
/**
+ * cfg80211_ftm_responder_stats - FTM responder statistics
+ *
+ * @filled: bitflag of flags using the bits of &enum nl80211_ftm_stats to
+ * indicate the relevant values in this struct for them
+ * @success_num: number of FTM sessions in which all frames were successfully
+ * answered
+ * @partial_num: number of FTM sessions in which part of frames were
+ * successfully answered
+ * @failed_num: number of failed FTM sessions
+ * @asap_num: number of ASAP FTM sessions
+ * @non_asap_num: number of non-ASAP FTM sessions
+ * @total_duration_ms: total sessions durations - gives an indication
+ * of how much time the responder was busy
+ * @unknown_triggers_num: number of unknown FTM triggers - triggers from
+ * initiators that didn't finish successfully the negotiation phase with
+ * the responder
+ * @reschedule_requests_num: number of FTM reschedule requests - initiator asks
+ * for a new scheduling although it already has scheduled FTM slot
+ * @out_of_window_triggers_num: total FTM triggers out of scheduled window
+ */
+struct cfg80211_ftm_responder_stats {
+ u32 filled;
+ u32 success_num;
+ u32 partial_num;
+ u32 failed_num;
+ u32 asap_num;
+ u32 non_asap_num;
+ u64 total_duration_ms;
+ u32 unknown_triggers_num;
+ u32 reschedule_requests_num;
+ u32 out_of_window_triggers_num;
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -3126,6 +3180,9 @@ struct cfg80211_external_auth_params {
*
* @tx_control_port: TX a control port frame (EAPoL). The noencrypt parameter
* tells the driver that the frame should not be encrypted.
+ *
+ * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
+ * Statistics should be cumulative, currently no way to reset is provided.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3431,6 +3488,10 @@ struct cfg80211_ops {
const u8 *buf, size_t len,
const u8 *dest, const __be16 proto,
const bool noencrypt);
+
+ int (*get_ftm_responder_stats)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ftm_responder_stats *ftm_stats);
};
/*
@@ -3958,7 +4019,6 @@ struct wiphy_iftype_ext_capab {
* by the driver in the .connect() callback. The bit position maps to the
* attribute indices defined in &enum nl80211_bss_select_attr.
*
- * @cookie_counter: unique generic cookie counter, used to identify objects.
* @nan_supported_bands: bands supported by the device in NAN mode, a
* bitmap of &enum nl80211_band values. For instance, for
* NL80211_BAND_2GHZ, bit 0 would be set
@@ -4097,8 +4157,6 @@ struct wiphy {
u32 bss_select_support;
- u64 cookie_counter;
-
u8 nan_supported_bands;
u32 txq_limit;
@@ -4733,6 +4791,17 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
const u8 *ies, int len);
/**
+ * cfg80211_send_layer2_update - send layer 2 update frame
+ *
+ * @dev: network device
+ * @addr: STA MAC address
+ *
+ * Wireless drivers can use this function to update forwarding tables in bridge
+ * devices upon STA association.
+ */
+void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr);
+
+/**
* DOC: Regulatory enforcement infrastructure
*
* TODO
@@ -4852,8 +4921,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
*
* @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
* @freq: the freqency(in MHz) to be queried.
- * @ptr: pointer where the regdb wmm data is to be stored (or %NULL if
- * irrelevant). This can be used later for deduplication.
* @rule: pointer to store the wmm rule from the regulatory db.
*
* Self-managed wireless drivers can use this function to query
diff --git a/include/net/devlink.h b/include/net/devlink.h
index b9b89d6604d4..45db0c79462d 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -298,7 +298,7 @@ struct devlink_resource {
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
-#define DEVLINK_PARAM_MAX_STRING_VALUE 32
+#define __DEVLINK_PARAM_MAX_STRING_VALUE 32
enum devlink_param_type {
DEVLINK_PARAM_TYPE_U8,
DEVLINK_PARAM_TYPE_U16,
@@ -311,7 +311,7 @@ union devlink_param_value {
u8 vu8;
u16 vu16;
u32 vu32;
- const char *vstr;
+ char vstr[__DEVLINK_PARAM_MAX_STRING_VALUE];
bool vbool;
};
@@ -362,6 +362,9 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -380,6 +383,15 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable"
#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL
+#define DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME "ignore_ari"
+#define DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME "msix_vec_per_pf_max"
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME "msix_vec_per_pf_min"
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE DEVLINK_PARAM_TYPE_U32
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
@@ -451,11 +463,14 @@ struct devlink_ops {
u32 *p_cur, u32 *p_max);
int (*eswitch_mode_get)(struct devlink *devlink, u16 *p_mode);
- int (*eswitch_mode_set)(struct devlink *devlink, u16 mode);
+ int (*eswitch_mode_set)(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode);
- int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode);
+ int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode,
+ struct netlink_ext_ack *extack);
int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode);
- int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode);
+ int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode,
+ struct netlink_ext_ack *extack);
};
static inline void *devlink_priv(struct devlink *devlink)
@@ -553,6 +568,8 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
union devlink_param_value init_val);
void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+void devlink_param_value_str_fill(union devlink_param_value *dst_val,
+ const char *src);
struct devlink_region *devlink_region_create(struct devlink *devlink,
const char *region_name,
u32 region_max_snapshots,
@@ -789,6 +806,12 @@ devlink_param_value_changed(struct devlink *devlink, u32 param_id)
{
}
+static inline void
+devlink_param_value_str_fill(union devlink_param_value *dst_val,
+ const char *src)
+{
+}
+
static inline struct devlink_region *
devlink_region_create(struct devlink *devlink,
const char *region_name,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 461e8a7661b7..23690c44e167 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -35,6 +35,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_BRCM_PREPEND,
DSA_TAG_PROTO_DSA,
DSA_TAG_PROTO_EDSA,
+ DSA_TAG_PROTO_GSWIP,
DSA_TAG_PROTO_KSZ,
DSA_TAG_PROTO_LAN9303,
DSA_TAG_PROTO_MTK,
diff --git a/include/net/dst.h b/include/net/dst.h
index 7f735e76ca73..6cf0870414c7 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -527,4 +527,14 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu);
}
+static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
+ struct dst_entry *encap_dst,
+ int headroom)
+{
+ u32 encap_mtu = dst_mtu(encap_dst);
+
+ if (skb->len > encap_mtu - headroom)
+ skb_dst_update_pmtu(skb, encap_mtu - headroom);
+}
+
#endif /* _NET_DST_H */
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 883bb9085f15..946bd53a9f81 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -44,6 +44,10 @@ void __gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
+int gnet_stats_copy_basic_hw(const seqcount_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
struct net_rate_estimator __rcu **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d,
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index decf6012a401..aa2e5888f18d 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -112,7 +112,7 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
static inline int genl_err_attr(struct genl_info *info, int err,
- struct nlattr *attr)
+ const struct nlattr *attr)
{
info->extack->bad_attr = attr;
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index feef706e1158..8014153bdd49 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -75,6 +75,8 @@ enum ieee80211_radiotap_presence {
IEEE80211_RADIOTAP_TIMESTAMP = 22,
IEEE80211_RADIOTAP_HE = 23,
IEEE80211_RADIOTAP_HE_MU = 24,
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU = 26,
+ IEEE80211_RADIOTAP_LSIG = 27,
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -325,6 +327,25 @@ enum ieee80211_radiotap_he_mu_bits {
IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800,
};
+enum ieee80211_radiotap_lsig_data1 {
+ IEEE80211_RADIOTAP_LSIG_DATA1_RATE_KNOWN = 0x0001,
+ IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN = 0x0002,
+};
+
+enum ieee80211_radiotap_lsig_data2 {
+ IEEE80211_RADIOTAP_LSIG_DATA2_RATE = 0x000f,
+ IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH = 0xfff0,
+};
+
+struct ieee80211_radiotap_lsig {
+ __le16 data1, data2;
+};
+
+enum ieee80211_radiotap_zero_len_psdu_type {
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING = 0,
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR = 0xff,
+};
+
/**
* ieee80211_get_radiotap_len - get radiotap header length
*/
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 482a1b705362..c8e2bebd8d93 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -183,8 +183,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
* 1 if something is broken and should be logged (!!! above)
* 2 if packet should be dropped
*/
-static inline int INET_ECN_decapsulate(struct sk_buff *skb,
- __u8 outer, __u8 inner)
+static inline int __INET_ECN_decapsulate(__u8 outer, __u8 inner, bool *set_ce)
{
if (INET_ECN_is_not_ect(inner)) {
switch (outer & INET_ECN_MASK) {
@@ -198,10 +197,21 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb,
}
}
- if (INET_ECN_is_ce(outer))
+ *set_ce = INET_ECN_is_ce(outer);
+ return 0;
+}
+
+static inline int INET_ECN_decapsulate(struct sk_buff *skb,
+ __u8 outer, __u8 inner)
+{
+ bool set_ce = false;
+ int rc;
+
+ rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
+ if (!rc && set_ce)
INET_ECN_set_ce(skb);
- return 0;
+ return rc;
}
static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index e03b93360f33..a80fd0ac4563 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -130,12 +130,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
return sk->sk_bound_dev_if;
}
-static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
-{
- return rcu_dereference_check(ireq->ireq_opt,
- refcount_read(&ireq->req.rsk_refcnt) > 0);
-}
-
struct inet_cork {
unsigned int flags;
__be32 addr;
diff --git a/include/net/ip.h b/include/net/ip.h
index e44b1a44f67a..72593e171d14 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -420,8 +420,35 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
}
-int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len,
- u32 *metrics);
+struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+ int fc_mx_len);
+static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
+{
+ if (fib_metrics != &dst_default_metrics &&
+ refcount_dec_and_test(&fib_metrics->refcnt))
+ kfree(fib_metrics);
+}
+
+/* ipv4 and ipv6 both use refcounted metrics if it is not the default */
+static inline
+void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
+{
+ dst_init_metrics(dst, fib_metrics->metrics, true);
+
+ if (fib_metrics != &dst_default_metrics) {
+ dst->_metrics |= DST_METRICS_REFCOUNTED;
+ refcount_inc(&fib_metrics->refcnt);
+ }
+}
+
+static inline
+void ip_dst_metrics_put(struct dst_entry *dst)
+{
+ struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
+
+ if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
+ kfree(p);
+}
u32 ip_idents_reserve(u32 hash, int segs);
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3d4930528db0..84097010237c 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -159,6 +159,10 @@ struct fib6_info {
struct rt6_info * __percpu *rt6i_pcpu;
struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
+#ifdef CONFIG_IPV6_ROUTER_PREF
+ unsigned long last_probe;
+#endif
+
u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;
@@ -182,7 +186,6 @@ struct rt6_info {
struct in6_addr rt6i_gateway;
struct inet6_dev *rt6i_idev;
u32 rt6i_flags;
- struct rt6key rt6i_prefsrc;
struct list_head rt6i_uncached;
struct uncached_list *rt6i_uncached_list;
@@ -408,11 +411,33 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *arg),
void *arg);
+void fib6_clean_all_skip_notify(struct net *net,
+ int (*func)(struct fib6_info *, void *arg),
+ void *arg);
int fib6_add(struct fib6_node *root, struct fib6_info *rt,
struct nl_info *info, struct netlink_ext_ack *extack);
int fib6_del(struct fib6_info *rt, struct nl_info *info);
+static inline
+void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr)
+{
+ const struct fib6_info *from;
+
+ rcu_read_lock();
+
+ from = rcu_dereference(rt->from);
+ if (from) {
+ *addr = from->fib6_prefsrc.addr;
+ } else {
+ struct in6_addr in6_zero = {};
+
+ *addr = in6_zero;
+ }
+
+ rcu_read_unlock();
+}
+
static inline struct net_device *fib6_info_nh_dev(const struct fib6_info *f6i)
{
return f6i->fib6_nh.nh_dev;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 7b9c82de11cc..7ab119936e69 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -165,8 +165,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
kuid_t uid);
-void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
- u32 mark);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif);
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
struct netlink_callback;
@@ -175,6 +174,7 @@ struct rt6_rtnl_dump_arg {
struct sk_buff *skb;
struct netlink_callback *cb;
struct net *net;
+ struct fib_dump_filter filter;
};
int rt6_dump_route(struct fib6_info *f6i, void *p_arg);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 69c91d1934c1..e8d9456bf36e 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -222,6 +222,16 @@ struct fib_table {
unsigned long __data[0];
};
+struct fib_dump_filter {
+ u32 table_id;
+ /* filter_set is an optimization that an entry is set */
+ bool filter_set;
+ unsigned char protocol;
+ unsigned char rt_type;
+ unsigned int flags;
+ struct net_device *dev;
+};
+
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags);
int fib_table_insert(struct net *, struct fib_table *, struct fib_config *,
@@ -229,7 +239,7 @@ int fib_table_insert(struct net *, struct fib_table *, struct fib_config *,
int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
struct netlink_ext_ack *extack);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
- struct netlink_callback *cb);
+ struct netlink_callback *cb, struct fib_dump_filter *filter);
int fib_table_flush(struct net *net, struct fib_table *table);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
void fib_table_flush_external(struct fib_table *table);
@@ -373,6 +383,7 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
extern const struct nla_policy rtm_ipv4_policy[];
void ip_fib_init(void);
__be32 fib_compute_spec_dst(struct sk_buff *skb);
+bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev);
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
u8 tos, int oif, struct net_device *dev,
struct in_device *idev, u32 *itag);
@@ -394,6 +405,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net_device *dev, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
@@ -451,4 +463,7 @@ static inline void fib_proc_exit(struct net *net)
u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr);
+int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
+ struct fib_dump_filter *filter,
+ struct netlink_callback *cb);
#endif /* _NET_FIB_H */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index ff33f498c137..829650540780 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1089,8 +1089,6 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
#endif
#ifdef CONFIG_SYSCTL
-extern struct ctl_table ipv6_route_table_template[];
-
struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
struct ctl_table *ipv6_route_sysctl_init(struct net *net);
int ipv6_sysctl_register(void);
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index f4c21b5a1242..14a490246be9 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -80,6 +80,11 @@ struct af_iucv_trans_hdr {
u8 pad; /* total 104 bytes */
} __packed;
+static inline struct af_iucv_trans_hdr *iucv_trans_hdr(struct sk_buff *skb)
+{
+ return (struct af_iucv_trans_hdr *)skb_network_header(skb);
+}
+
enum iucv_tx_notify {
/* transmission of skb is completed and was successful */
TX_NOTIFY_OK = 0,
diff --git a/include/net/llc.h b/include/net/llc.h
index 890a87318014..df282d9b4017 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -66,6 +66,7 @@ struct llc_sap {
int sk_count;
struct hlist_nulls_head sk_laddr_hash[LLC_SK_LADDR_HASH_ENTRIES];
struct hlist_head sk_dev_hash[LLC_SK_DEV_HASH_ENTRIES];
+ struct rcu_head rcu;
};
static inline
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 5790f55c241d..71985e95d2d9 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -101,8 +101,9 @@
* Drivers indicate that they use this model by implementing the .wake_tx_queue
* driver operation.
*
- * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with a
- * single per-vif queue for multicast data frames.
+ * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with
+ * another per-sta for non-data/non-mgmt and bufferable management frames, and
+ * a single per-vif queue for multicast data frames.
*
* The driver is expected to initialize its private per-queue data for stations
* and interfaces in the .add_interface and .sta_add ops.
@@ -308,6 +309,8 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected
* keep alive) changed.
* @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface
+ * @BSS_CHANGED_FTM_RESPONDER: fime timing reasurement request responder
+ * functionality changed for this BSS (AP mode).
*
*/
enum ieee80211_bss_change {
@@ -337,6 +340,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_MU_GROUPS = 1<<23,
BSS_CHANGED_KEEP_ALIVE = 1<<24,
BSS_CHANGED_MCAST_RATE = 1<<25,
+ BSS_CHANGED_FTM_RESPONDER = 1<<26,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -463,6 +467,21 @@ struct ieee80211_mu_group_data {
};
/**
+ * ieee80211_ftm_responder_params - FTM responder parameters
+ *
+ * @lci: LCI subelement content
+ * @civicloc: CIVIC location subelement content
+ * @lci_len: LCI data length
+ * @civicloc_len: Civic data length
+ */
+struct ieee80211_ftm_responder_params {
+ const u8 *lci;
+ const u8 *civicloc;
+ size_t lci_len;
+ size_t civicloc_len;
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
@@ -561,6 +580,9 @@ struct ieee80211_mu_group_data {
* @protected_keep_alive: if set, indicates that the station should send an RSN
* protected frame to the AP to reset the idle timer at the AP for the
* station.
+ * @ftm_responder: whether to enable or disable fine timing measurement FTM
+ * responder functionality.
+ * @ftmr_params: configurable lci/civic parameter when enabling FTM responder.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -611,6 +633,8 @@ struct ieee80211_bss_conf {
bool allow_p2p_go_ps;
u16 max_idle_period;
bool protected_keep_alive;
+ bool ftm_responder;
+ struct ieee80211_ftm_responder_params *ftmr_params;
};
/**
@@ -1140,6 +1164,11 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* from the RX info data, so leave those zeroed when building this data)
* @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present
* (&struct ieee80211_radiotap_he_mu)
+ * @RX_FLAG_RADIOTAP_LSIG: L-SIG radiotap data is present
+ * @RX_FLAG_NO_PSDU: use the frame only for radiotap reporting, with
+ * the "0-length PSDU" field included there. The value for it is
+ * in &struct ieee80211_rx_status. Note that if this value isn't
+ * known the frame shouldn't be reported.
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1170,6 +1199,8 @@ enum mac80211_rx_flags {
RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25),
RX_FLAG_RADIOTAP_HE = BIT(26),
RX_FLAG_RADIOTAP_HE_MU = BIT(27),
+ RX_FLAG_RADIOTAP_LSIG = BIT(28),
+ RX_FLAG_NO_PSDU = BIT(29),
};
/**
@@ -1242,6 +1273,7 @@ enum mac80211_rx_encoding {
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
* @ampdu_delimiter_crc: A-MPDU delimiter CRC
+ * @zero_length_psdu_type: radiotap type of the 0-length PSDU
*/
struct ieee80211_rx_status {
u64 mactime;
@@ -1262,6 +1294,7 @@ struct ieee80211_rx_status {
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
u8 ampdu_delimiter_crc;
+ u8 zero_length_psdu_type;
};
/**
@@ -1504,6 +1537,8 @@ enum ieee80211_vif_flags {
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void \*).
* @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
+ * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
+ * protected by fq->lock.
*/
struct ieee80211_vif {
enum nl80211_iftype type;
@@ -1528,6 +1563,8 @@ struct ieee80211_vif {
unsigned int probe_req_reg;
+ bool txqs_stopped[IEEE80211_NUM_ACS];
+
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
@@ -1839,7 +1876,9 @@ struct ieee80211_sta_rates {
* unlimited.
* @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not.
* @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
- * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
+ * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID
+ * @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that
+ * the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames
*/
struct ieee80211_sta {
u32 supp_rates[NUM_NL80211_BANDS];
@@ -1879,8 +1918,9 @@ struct ieee80211_sta {
u16 max_amsdu_len;
bool support_p2p_ps;
u16 max_rc_amsdu_len;
+ u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS];
- struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
+ struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1];
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
@@ -1914,7 +1954,8 @@ struct ieee80211_tx_control {
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @sta: station table entry, %NULL for per-vif queue
- * @tid: the TID for this queue (unused for per-vif queue)
+ * @tid: the TID for this queue (unused for per-vif queue),
+ * %IEEE80211_NUM_TIDS for non-data (if enabled)
* @ac: the AC for this queue
* @drv_priv: driver private area, sized by hw->txq_data_size
*
@@ -2127,6 +2168,19 @@ struct ieee80211_txq {
* @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
* support QoS NDP for AP probing - that's most likely a driver bug.
*
+ * @IEEE80211_HW_BUFF_MMPDU_TXQ: use the TXQ for bufferable MMPDUs, this of
+ * course requires the driver to use TXQs to start with.
+ *
+ * @IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW: (Hardware) rate control supports VHT
+ * extended NSS BW (dot11VHTExtendedNSSBWCapable). This flag will be set if
+ * the selected rate control algorithm sets %RATE_CTRL_CAPA_VHT_EXT_NSS_BW
+ * but if the rate control is built-in then it must be set by the driver.
+ * See also the documentation for that flag.
+ *
+ * @IEEE80211_HW_STA_MMPDU_TXQ: use the extra non-TID per-station TXQ for all
+ * MMPDUs on station interfaces. This of course requires the driver to use
+ * TXQs to start with.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2172,6 +2226,9 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP,
IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP,
+ IEEE80211_HW_BUFF_MMPDU_TXQ,
+ IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW,
+ IEEE80211_HW_STA_MMPDU_TXQ,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2290,6 +2347,10 @@ enum ieee80211_hw_flags {
* supported by HW.
* @max_nan_de_entries: maximum number of NAN DE functions supported by the
* device.
+ *
+ * @tx_sk_pacing_shift: Pacing shift to set on TCP sockets when frames from
+ * them are encountered. The default should typically not be changed,
+ * unless the driver has good reasons for needing more buffers.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -2325,6 +2386,7 @@ struct ieee80211_hw {
u8 n_cipher_schemes;
const struct ieee80211_cipher_scheme *cipher_schemes;
u8 max_nan_de_entries;
+ u8 tx_sk_pacing_shift;
};
static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
@@ -2506,6 +2568,19 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* The set_default_unicast_key() call updates the default WEP key index
* configured to the hardware for WEP encryption type. This is required
* for devices that support offload of data packets (e.g. ARP responses).
+ *
+ * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag
+ * when they are able to replace in-use PTK keys according to to following
+ * requirements:
+ * 1) They do not hand over frames decrypted with the old key to
+ mac80211 once the call to set_key() with command %DISABLE_KEY has been
+ completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key,
+ 2) either drop or continue to use the old key for any outgoing frames queued
+ at the time of the key deletion (including re-transmits),
+ 3) never send out a frame queued prior to the set_key() %SET_KEY command
+ encrypted with the new key and
+ 4) never send out a frame unencrypted when it should be encrypted.
+ Mac80211 will not queue any new frames for a deleted key to the driver.
*/
/**
@@ -3542,6 +3617,12 @@ enum ieee80211_reconfig_type {
* @del_nan_func: Remove a NAN function. The driver must call
* ieee80211_nan_func_terminated() with
* NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST reason code upon removal.
+ * @can_aggregate_in_amsdu: Called in order to determine if HW supports
+ * aggregating two specific frames in the same A-MSDU. The relation
+ * between the skbs should be symmetric and transitive. Note that while
+ * skb is always a real frame, head may or may not be an A-MSDU.
+ * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
+ * Statistics should be cumulative, currently no way to reset is provided.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3824,6 +3905,12 @@ struct ieee80211_ops {
void (*del_nan_func)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u8 instance_id);
+ bool (*can_aggregate_in_amsdu)(struct ieee80211_hw *hw,
+ struct sk_buff *head,
+ struct sk_buff *skb);
+ int (*get_ftm_responder_stats)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_ftm_responder_stats *ftm_stats);
};
/**
@@ -4293,6 +4380,21 @@ void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
u32 thr);
/**
+ * ieee80211_tx_rate_update - transmit rate update callback
+ *
+ * Drivers should call this functions with a non-NULL pub sta
+ * This function can be used in drivers that does not have provision
+ * in updating the tx rate in data path.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @pubsta: the station to update the tx rate for.
+ * @info: tx status information
+ */
+void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta,
+ struct ieee80211_tx_info *info);
+
+/**
* ieee80211_tx_status - transmit status callback
*
* Call this function for all transmitted frames after they have been
@@ -5644,7 +5746,22 @@ struct ieee80211_tx_rate_control {
bool bss;
};
+/**
+ * enum rate_control_capabilities - rate control capabilities
+ */
+enum rate_control_capabilities {
+ /**
+ * @RATE_CTRL_CAPA_VHT_EXT_NSS_BW:
+ * Support for extended NSS BW support (dot11VHTExtendedNSSCapable)
+ * Note that this is only looked at if the minimum number of chains
+ * that the AP uses is < the number of TX chains the hardware has,
+ * otherwise the NSS difference doesn't bother us.
+ */
+ RATE_CTRL_CAPA_VHT_EXT_NSS_BW = BIT(0),
+};
+
struct rate_control_ops {
+ unsigned long capa;
const char *name;
void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir);
void (*free)(void *priv);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 6c1eecd56a4d..f58b384aa6c9 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -323,6 +323,7 @@ void __neigh_set_probe_once(struct neighbour *neigh);
bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
@@ -544,4 +545,19 @@ static inline void neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
*notify = 1;
}
}
+
+static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags,
+ int *notify)
+{
+ u8 ndm_flags = 0;
+
+ ndm_flags |= (flags & NEIGH_UPDATE_F_ISROUTER) ? NTF_ROUTER : 0;
+ if ((neigh->flags ^ ndm_flags) & NTF_ROUTER) {
+ if (ndm_flags & NTF_ROUTER)
+ neigh->flags |= NTF_ROUTER;
+ else
+ neigh->flags &= ~NTF_ROUTER;
+ *notify = 1;
+ }
+}
#endif
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 9b5fdc50519a..99d4148e0f90 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -43,6 +43,7 @@ struct ctl_table_header;
struct net_generic;
struct uevent_sock;
struct netns_ipvs;
+struct bpf_prog;
#define NETDEV_HASHBITS 8
@@ -145,6 +146,8 @@ struct net {
#endif
struct net_generic __rcu *gen;
+ struct bpf_prog __rcu *flow_dissector_prog;
+
/* Note : following structs are cache line aligned */
#ifdef CONFIG_XFRM
struct netns_xfrm xfrm;
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index c84b51682f08..135ee702c7b0 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -10,20 +10,17 @@
#ifndef _NF_CONNTRACK_IPV4_H
#define _NF_CONNTRACK_IPV4_H
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
#ifdef CONFIG_NF_CT_PROTO_DCCP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp;
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp;
#endif
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite;
#endif
-int nf_conntrack_ipv4_compat_init(void);
-void nf_conntrack_ipv4_compat_fini(void);
-
#endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index effa8dfba68c..7b3c873f8839 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -2,20 +2,7 @@
#ifndef _NF_CONNTRACK_IPV6_H
#define _NF_CONNTRACK_IPV6_H
-extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
-
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
-#ifdef CONFIG_NF_CT_PROTO_DCCP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
-#endif
-#ifdef CONFIG_NF_CT_PROTO_SCTP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
-#endif
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
-#endif
#include <linux/sysctl.h>
extern struct ctl_table nf_ct_ipv6_sysctl_table[];
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 2a3e0974a6af..afc9b3620473 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -20,8 +20,7 @@
/* This header is used to share core functionality between the
standalone connection tracking module, and the compatibility layer's use
of connection tracking. */
-unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
- struct sk_buff *skb);
+unsigned int nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state);
int nf_conntrack_init_net(struct net *net);
void nf_conntrack_cleanup_net(struct net *net);
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 8465263b297d..eed04af9b75e 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -18,9 +18,6 @@
struct seq_file;
struct nf_conntrack_l4proto {
- /* L3 Protocol number. */
- u_int16_t l3proto;
-
/* L4 Protocol number. */
u_int8_t l4proto;
@@ -43,22 +40,14 @@ struct nf_conntrack_l4proto {
/* Returns verdict for packet, or -1 for invalid. */
int (*packet)(struct nf_conn *ct,
- const struct sk_buff *skb,
+ struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo);
-
- /* Called when a new connection for this protocol found;
- * returns TRUE if it's OK. If so, packet() called next. */
- bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff);
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
/* Called when a conntrack entry is destroyed */
void (*destroy)(struct nf_conn *ct);
- int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
- unsigned int dataoff,
- u_int8_t pf, unsigned int hooknum);
-
/* called by gc worker if table is full */
bool (*can_early_drop)(const struct nf_conn *ct);
@@ -92,7 +81,7 @@ struct nf_conntrack_l4proto {
#endif
unsigned int *net_id;
/* Init l4proto pernet data */
- int (*init_net)(struct net *net, u_int16_t proto);
+ int (*init_net)(struct net *net);
/* Return the per-net protocol part. */
struct nf_proto_net *(*get_net_proto)(struct net *net);
@@ -101,16 +90,23 @@ struct nf_conntrack_l4proto {
struct module *me;
};
+int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state);
+
+int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state);
/* Existing built-in generic protocol */
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
-#define MAX_NF_CT_PROTO 256
+#define MAX_NF_CT_PROTO IPPROTO_UDPLITE
-const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto,
- u_int8_t l4proto);
+const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto);
-const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto,
- u_int8_t l4proto);
+const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4proto);
void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p);
/* Protocol pernet registration. */
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 0e355f4a3d76..77e2761d4f2f 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -99,7 +99,7 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table,
void (*iter)(struct flow_offload *flow, void *data),
void *data);
-void nf_flow_table_cleanup(struct net *net, struct net_device *dev);
+void nf_flow_table_cleanup(struct net_device *dev);
int nf_flow_table_init(struct nf_flowtable *flow_table);
void nf_flow_table_free(struct nf_flowtable *flow_table);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 0f39ac487012..841835a387e1 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -470,6 +470,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding);
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding);
+void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding);
+void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
/**
* enum nft_set_extensions - set extension type IDs
@@ -724,7 +727,9 @@ struct nft_expr_type {
* @eval: Expression evaluation function
* @size: full expression size, including private data size
* @init: initialization function
- * @destroy: destruction function
+ * @activate: activate expression in the next generation
+ * @deactivate: deactivate expression in next generation
+ * @destroy: destruction function, called after synchronize_rcu
* @dump: function to dump parameters
* @type: expression type
* @validate: validate expression, called during loop detection
@@ -1293,12 +1298,14 @@ static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
*
* @list: used internally
* @msg_type: message type
+ * @put_net: ctx->net needs to be put
* @ctx: transaction context
* @data: internal information related to the transaction
*/
struct nft_trans {
struct list_head list;
int msg_type;
+ bool put_net;
struct nft_ctx ctx;
char data[0];
};
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 8da837d2aaf9..2046d104f323 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -16,6 +16,10 @@ extern struct nft_expr_type nft_meta_type;
extern struct nft_expr_type nft_rt_type;
extern struct nft_expr_type nft_exthdr_type;
+#ifdef CONFIG_NETWORK_SECMARK
+extern struct nft_object_type nft_secmark_obj_type;
+#endif
+
int nf_tables_core_module_init(void);
void nf_tables_core_module_exit(void);
diff --git a/include/net/netfilter/nfnetlink_log.h b/include/net/netfilter/nfnetlink_log.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/include/net/netfilter/nfnetlink_log.h
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 0c154f98e987..4c1e99303b5a 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -153,7 +153,7 @@
* nla_find() find attribute in stream of attributes
* nla_find_nested() find attribute in nested attributes
* nla_parse() parse and validate stream of attrs
- * nla_parse_nested() parse nested attribuets
+ * nla_parse_nested() parse nested attributes
* nla_for_each_attr() loop over all attributes
* nla_for_each_nested() loop over the nested attributes
*=========================================================================
@@ -172,7 +172,7 @@ enum {
NLA_FLAG,
NLA_MSECS,
NLA_NESTED,
- NLA_NESTED_COMPAT,
+ NLA_NESTED_ARRAY,
NLA_NUL_STRING,
NLA_BINARY,
NLA_S8,
@@ -180,14 +180,28 @@ enum {
NLA_S32,
NLA_S64,
NLA_BITFIELD32,
+ NLA_REJECT,
+ NLA_EXACT_LEN,
+ NLA_EXACT_LEN_WARN,
__NLA_TYPE_MAX,
};
#define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
+enum nla_policy_validation {
+ NLA_VALIDATE_NONE,
+ NLA_VALIDATE_RANGE,
+ NLA_VALIDATE_MIN,
+ NLA_VALIDATE_MAX,
+ NLA_VALIDATE_FUNCTION,
+};
+
/**
* struct nla_policy - attribute validation policy
* @type: Type of attribute or NLA_UNSPEC
+ * @validation_type: type of attribute validation done in addition to
+ * type-specific validation (e.g. range, function call), see
+ * &enum nla_policy_validation
* @len: Type specific length of payload
*
* Policies are defined as arrays of this struct, the array must be
@@ -198,9 +212,11 @@ enum {
* NLA_NUL_STRING Maximum length of string (excluding NUL)
* NLA_FLAG Unused
* NLA_BINARY Maximum length of attribute payload
- * NLA_NESTED Don't use `len' field -- length verification is
- * done by checking len of nested header (or empty)
- * NLA_NESTED_COMPAT Minimum length of structure payload
+ * NLA_NESTED,
+ * NLA_NESTED_ARRAY Length verification is done by checking len of
+ * nested header (or empty); len field is used if
+ * validation_data is also used, for the max attr
+ * number in the nested policy.
* NLA_U8, NLA_U16,
* NLA_U32, NLA_U64,
* NLA_S8, NLA_S16,
@@ -208,9 +224,59 @@ enum {
* NLA_MSECS Leaving the length field zero will verify the
* given type fits, using it verifies minimum length
* just like "All other"
- * NLA_BITFIELD32 A 32-bit bitmap/bitselector attribute
+ * NLA_BITFIELD32 Unused
+ * NLA_REJECT Unused
+ * NLA_EXACT_LEN Attribute must have exactly this length, otherwise
+ * it is rejected.
+ * NLA_EXACT_LEN_WARN Attribute should have exactly this length, a warning
+ * is logged if it is longer, shorter is rejected.
* All other Minimum length of attribute payload
*
+ * Meaning of `validation_data' field:
+ * NLA_BITFIELD32 This is a 32-bit bitmap/bitselector attribute and
+ * validation data must point to a u32 value of valid
+ * flags
+ * NLA_REJECT This attribute is always rejected and validation data
+ * may point to a string to report as the error instead
+ * of the generic one in extended ACK.
+ * NLA_NESTED Points to a nested policy to validate, must also set
+ * `len' to the max attribute number.
+ * Note that nla_parse() will validate, but of course not
+ * parse, the nested sub-policies.
+ * NLA_NESTED_ARRAY Points to a nested policy to validate, must also set
+ * `len' to the max attribute number. The difference to
+ * NLA_NESTED is the structure - NLA_NESTED has the
+ * nested attributes directly inside, while an array has
+ * the nested attributes at another level down and the
+ * attributes directly in the nesting don't matter.
+ * All other Unused - but note that it's a union
+ *
+ * Meaning of `min' and `max' fields, use via NLA_POLICY_MIN, NLA_POLICY_MAX
+ * and NLA_POLICY_RANGE:
+ * NLA_U8,
+ * NLA_U16,
+ * NLA_U32,
+ * NLA_U64,
+ * NLA_S8,
+ * NLA_S16,
+ * NLA_S32,
+ * NLA_S64 These are used depending on the validation_type
+ * field, if that is min/max/range then the minimum,
+ * maximum and both are used (respectively) to check
+ * the value of the integer attribute.
+ * Note that in the interest of code simplicity and
+ * struct size both limits are s16, so you cannot
+ * enforce a range that doesn't fall within the range
+ * of s16 - do that as usual in the code instead.
+ * All other Unused - but note that it's a union
+ *
+ * Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
+ * NLA_BINARY Validation function called for the attribute,
+ * not compatible with use of the validation_data
+ * as in NLA_BITFIELD32, NLA_REJECT, NLA_NESTED and
+ * NLA_NESTED_ARRAY.
+ * All other Unused - but note that it's a union
+ *
* Example:
* static const struct nla_policy my_policy[ATTR_MAX+1] = {
* [ATTR_FOO] = { .type = NLA_U16 },
@@ -220,11 +286,69 @@ enum {
* };
*/
struct nla_policy {
- u16 type;
+ u8 type;
+ u8 validation_type;
u16 len;
- void *validation_data;
+ union {
+ const void *validation_data;
+ struct {
+ s16 min, max;
+ };
+ int (*validate)(const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
+ };
};
+#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_EXACT_LEN, .len = _len }
+#define NLA_POLICY_EXACT_LEN_WARN(_len) { .type = NLA_EXACT_LEN_WARN, \
+ .len = _len }
+
+#define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN)
+#define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN)
+
+#define NLA_POLICY_NESTED(maxattr, policy) \
+ { .type = NLA_NESTED, .validation_data = policy, .len = maxattr }
+#define NLA_POLICY_NESTED_ARRAY(maxattr, policy) \
+ { .type = NLA_NESTED_ARRAY, .validation_data = policy, .len = maxattr }
+
+#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
+#define NLA_ENSURE_INT_TYPE(tp) \
+ (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_U8 || \
+ tp == NLA_S16 || tp == NLA_U16 || \
+ tp == NLA_S32 || tp == NLA_U32 || \
+ tp == NLA_S64 || tp == NLA_U64) + tp)
+#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \
+ (__NLA_ENSURE(tp != NLA_BITFIELD32 && \
+ tp != NLA_REJECT && \
+ tp != NLA_NESTED && \
+ tp != NLA_NESTED_ARRAY) + tp)
+
+#define NLA_POLICY_RANGE(tp, _min, _max) { \
+ .type = NLA_ENSURE_INT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_RANGE, \
+ .min = _min, \
+ .max = _max \
+}
+
+#define NLA_POLICY_MIN(tp, _min) { \
+ .type = NLA_ENSURE_INT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_MIN, \
+ .min = _min, \
+}
+
+#define NLA_POLICY_MAX(tp, _max) { \
+ .type = NLA_ENSURE_INT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_MAX, \
+ .max = _max, \
+}
+
+#define NLA_POLICY_VALIDATE_FN(tp, fn, ...) { \
+ .type = NLA_ENSURE_NO_VALIDATION_PTR(tp), \
+ .validation_type = NLA_VALIDATE_FUNCTION, \
+ .validate = fn, \
+ .len = __VA_ARGS__ + 0, \
+}
+
/**
* struct nl_info - netlink source information
* @nlh: Netlink message header of original request
@@ -249,6 +373,9 @@ int nla_validate(const struct nlattr *head, int len, int maxtype,
int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
int len, const struct nla_policy *policy,
struct netlink_ext_ack *extack);
+int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ int len, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack);
int nla_policy_len(const struct nla_policy *, int);
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
@@ -392,13 +519,29 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
- if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) {
+ NL_SET_ERR_MSG(extack, "Invalid header length");
return -EINVAL;
+ }
return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
nlmsg_attrlen(nlh, hdrlen), policy, extack);
}
+static inline int nlmsg_parse_strict(const struct nlmsghdr *nlh, int hdrlen,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) {
+ NL_SET_ERR_MSG(extack, "Invalid header length");
+ return -EINVAL;
+ }
+
+ return nla_parse_strict(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), policy, extack);
+}
+
/**
* nlmsg_find_attr - find a specific attribute in a netlink message
* @nlh: netlink message header
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index f0e396ab9bec..ef1ed529f33c 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -45,6 +45,7 @@ struct netns_sysctl_ipv6 {
int max_dst_opts_len;
int max_hbh_opts_len;
int seg6_flowlabel;
+ bool skip_notify_on_dev_down;
};
struct netns_ipv6 {
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 316694dafa5b..008f466d1da7 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -87,7 +87,7 @@ struct nfc_hci_pipe {
* According to specification 102 622 chapter 4.4 Pipes,
* the pipe identifier is 7 bits long.
*/
-#define NFC_HCI_MAX_PIPES 127
+#define NFC_HCI_MAX_PIPES 128
struct nfc_hci_init_data {
u8 gate_count;
struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 75a3f3fdb359..72ffb3120ced 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -65,11 +65,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
return block->q;
}
-static inline struct net_device *tcf_block_dev(struct tcf_block *block)
-{
- return tcf_block_q(block)->dev_queue->dev;
-}
-
void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident);
@@ -122,11 +117,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
return NULL;
}
-static inline struct net_device *tcf_block_dev(struct tcf_block *block)
-{
- return NULL;
-}
-
static inline
int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
void *cb_priv)
@@ -318,7 +308,7 @@ tcf_exts_stats_update(const struct tcf_exts *exts,
for (i = 0; i < exts->nr_actions; i++) {
struct tc_action *a = exts->actions[i];
- tcf_action_stats_update(a, bytes, packets, lastuse);
+ tcf_action_stats_update(a, bytes, packets, lastuse, true);
}
preempt_enable();
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 7dc769e5452b..a16fbe9a2a67 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -102,6 +102,7 @@ int qdisc_set_default(const char *id);
void qdisc_hash_add(struct Qdisc *q, bool invisible);
void qdisc_hash_del(struct Qdisc *q);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
+struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle);
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
struct nlattr *tab,
struct netlink_ext_ack *extack);
diff --git a/include/net/route.h b/include/net/route.h
index bb53cdba38dc..9883dc82f723 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -201,10 +201,9 @@ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
}
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
- u32 mark, u8 protocol, int flow_flags);
+ u8 protocol);
void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
-void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
- u8 protocol, int flow_flags);
+void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u8 protocol);
void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
void ip_rt_send_redirect(struct sk_buff *skb);
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 0bbaa5488423..cf26e5aacac4 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -165,6 +165,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
struct netlink_ext_ack *exterr);
+struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a6d00093f35e..4d736427a4cb 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -19,6 +19,7 @@ struct Qdisc_ops;
struct qdisc_walker;
struct tcf_walker;
struct module;
+struct bpf_flow_keys;
typedef int tc_setup_cb_t(enum tc_setup_type type,
void *type_data, void *cb_priv);
@@ -105,6 +106,7 @@ struct Qdisc {
spinlock_t busylock ____cacheline_aligned_in_smp;
spinlock_t seqlock;
+ struct rcu_head rcu;
};
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
@@ -114,6 +116,19 @@ static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
refcount_inc(&qdisc->refcnt);
}
+/* Intended to be used by unlocked users, when concurrent qdisc release is
+ * possible.
+ */
+
+static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_BUILTIN)
+ return qdisc;
+ if (refcount_inc_not_zero(&qdisc->refcnt))
+ return qdisc;
+ return NULL;
+}
+
static inline bool qdisc_is_running(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK)
@@ -307,9 +322,14 @@ struct tcf_proto {
};
struct qdisc_skb_cb {
- unsigned int pkt_len;
- u16 slave_dev_queue_mapping;
- u16 tc_classid;
+ union {
+ struct {
+ unsigned int pkt_len;
+ u16 slave_dev_queue_mapping;
+ u16 tc_classid;
+ };
+ struct bpf_flow_keys *flow_keys;
+ };
#define QDISC_CB_PRIV_LEN 20
unsigned char data[QDISC_CB_PRIV_LEN];
};
@@ -331,7 +351,7 @@ struct tcf_chain {
struct tcf_block {
struct list_head chain_list;
u32 index; /* block index for shared blocks */
- unsigned int refcnt;
+ refcount_t refcnt;
struct net *net;
struct Qdisc *q;
struct list_head cb_list;
@@ -343,6 +363,7 @@ struct tcf_block {
struct tcf_chain *chain;
struct list_head filter_chain_list;
} chain0;
+ struct rcu_head rcu;
};
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
@@ -362,7 +383,7 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
}
static inline void
-tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
+tc_cls_offload_cnt_update(struct tcf_block *block, u32 *cnt,
u32 *flags, bool add)
{
if (add) {
@@ -554,7 +575,8 @@ void dev_deactivate_many(struct list_head *head);
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
-void qdisc_destroy(struct Qdisc *qdisc);
+void qdisc_put(struct Qdisc *qdisc);
+void qdisc_put_unlocked(struct Qdisc *qdisc);
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
unsigned int len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
@@ -828,8 +850,8 @@ static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
qh->qlen = 0;
}
-static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
- struct qdisc_skb_head *qh)
+static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
+ struct qdisc_skb_head *qh)
{
struct sk_buff *last = qh->tail;
@@ -842,14 +864,24 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
qh->head = skb;
}
qh->qlen++;
- qdisc_qstats_backlog_inc(sch, skb);
+}
+static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
+{
+ __qdisc_enqueue_tail(skb, &sch->q);
+ qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
}
-static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
+static inline void __qdisc_enqueue_head(struct sk_buff *skb,
+ struct qdisc_skb_head *qh)
{
- return __qdisc_enqueue_tail(skb, sch, &sch->q);
+ skb->next = qh->head;
+
+ if (!qh->head)
+ qh->tail = skb;
+ qh->head = skb;
+ qh->qlen++;
}
static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 86f034b524d4..8dadc74c22e7 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -148,11 +148,6 @@ SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive)
#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \
a->chunk_hdr->type == SCTP_CID_I_DATA)
-/* Calculate the actual data size in a data chunk */
-#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end) - \
- (unsigned long)(c->chunk_hdr) - \
- sctp_datachk_len(&c->asoc->stream)))
-
/* Internal error codes */
enum sctp_ierror {
SCTP_IERROR_NO_ERROR = 0,
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 5ef1bad81ef5..9e3d32746430 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -347,7 +347,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
__u16 size;
size = ntohs(chunk->chunk_hdr->length);
- size -= sctp_datahdr_len(&chunk->asoc->stream);
+ size -= sctp_datachk_len(&chunk->asoc->stream);
return size;
}
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 28a7c8e44636..a11f93790476 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -876,6 +876,8 @@ struct sctp_transport {
unsigned long sackdelay;
__u32 sackfreq;
+ atomic_t mtu_info;
+
/* When was the last time that we heard from this transport? We use
* this to pick new active and retran paths.
*/
diff --git a/include/net/sock.h b/include/net/sock.h
index 433f45fc2d68..f665d74ae509 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -422,8 +422,8 @@ struct sock {
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
- u32 sk_pacing_rate; /* bytes per second */
- u32 sk_max_pacing_rate;
+ unsigned long sk_pacing_rate; /* bytes per second */
+ unsigned long sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
@@ -800,6 +800,7 @@ enum sock_flags {
SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
SOCK_TXTIME,
+ SOCK_XDP, /* XDP is attached */
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -1491,6 +1492,7 @@ static inline void lock_sock(struct sock *sk)
lock_sock_nested(sk, 0);
}
+void __release_sock(struct sock *sk);
void release_sock(struct sock *sk);
/* BH context may only use the following locking interface. */
@@ -2057,14 +2059,20 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
/**
* sock_poll_wait - place memory barrier behind the poll_wait call.
* @filp: file
+ * @sock: socket to wait on
* @p: poll_table
*
* See the comments in the wq_has_sleeper function.
+ *
+ * Do not derive sock from filp->private_data here. An SMC socket establishes
+ * an internal TCP socket that is used in the fallback case. All socket
+ * operations on the SMC socket are then forwarded to the TCP socket. In case of
+ * poll, the filp->private_data pointer references the SMC socket because the
+ * TCP socket has no file assigned.
*/
-static inline void sock_poll_wait(struct file *filp, poll_table *p)
+static inline void sock_poll_wait(struct file *filp, struct socket *sock,
+ poll_table *p)
{
- struct socket *sock = filp->private_data;
-
if (!poll_does_not_wait(p)) {
poll_wait(filp, &sock->wq->wait, p);
/* We need to be sure we are in sync with the
@@ -2212,10 +2220,6 @@ static inline struct page_frag *sk_page_frag(struct sock *sk)
bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
-int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
- int sg_start, int *sg_curr, unsigned int *sg_size,
- int first_coalesce);
-
/*
* Default write policy as shown to user space via poll/select/SIGIO
*/
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d574ce63bf22..881ecb1555bf 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -145,6 +145,10 @@ enum switchdev_notifier_type {
SWITCHDEV_FDB_ADD_TO_DEVICE,
SWITCHDEV_FDB_DEL_TO_DEVICE,
SWITCHDEV_FDB_OFFLOADED,
+
+ SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
+ SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE,
+ SWITCHDEV_VXLAN_FDB_OFFLOADED,
};
struct switchdev_notifier_info {
@@ -155,7 +159,8 @@ struct switchdev_notifier_fdb_info {
struct switchdev_notifier_info info; /* must be first */
const unsigned char *addr;
u16 vid;
- bool added_by_user;
+ u8 added_by_user:1,
+ offloaded:1;
};
static inline struct net_device *
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 770917d0caa7..a18914d20486 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -732,7 +732,7 @@ void tcp_send_window_probe(struct sock *sk);
static inline u64 tcp_clock_ns(void)
{
- return local_clock();
+ return ktime_get_ns();
}
static inline u64 tcp_clock_us(void)
@@ -752,17 +752,7 @@ static inline u32 tcp_time_stamp_raw(void)
return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
}
-
-/* Refresh 1us clock of a TCP socket,
- * ensuring monotically increasing values.
- */
-static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
-{
- u64 val = tcp_clock_us();
-
- if (val > tp->tcp_mstamp)
- tp->tcp_mstamp = val;
-}
+void tcp_mstamp_refresh(struct tcp_sock *tp);
static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
{
@@ -771,7 +761,13 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
- return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
+ return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ);
+}
+
+/* provide the departure time in us unit */
+static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
+{
+ return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
}
@@ -817,7 +813,7 @@ struct tcp_skb_cb {
#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
#define TCPCB_LOST 0x04 /* SKB is lost */
#define TCPCB_TAGBITS 0x07 /* All tag bits */
-#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */
+#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */
#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
TCPCB_REPAIRED)
@@ -862,6 +858,21 @@ static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
}
+static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
+{
+ return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
+}
+
+static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
+{
+ return TCP_SKB_CB(skb)->bpf.sk_redir;
+}
+
+static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
+{
+ TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
/* This is the variant of inet6_iif() that must be used by TCP,
* as TCP moves IP6CB into a different location in skb->cb[]
@@ -1234,8 +1245,31 @@ static inline bool tcp_needs_internal_pacing(const struct sock *sk)
return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
}
+/* Return in jiffies the delay before one skb is sent.
+ * If @skb is NULL, we look at EDT for next packet being sent on the socket.
+ */
+static inline unsigned long tcp_pacing_delay(const struct sock *sk,
+ const struct sk_buff *skb)
+{
+ s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns;
+
+ pacing_delay -= tcp_sk(sk)->tcp_clock_cache;
+
+ return pacing_delay > 0 ? nsecs_to_jiffies(pacing_delay) : 0;
+}
+
+static inline void tcp_reset_xmit_timer(struct sock *sk,
+ const int what,
+ unsigned long when,
+ const unsigned long max_when,
+ const struct sk_buff *skb)
+{
+ inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb),
+ max_when);
+}
+
/* Something is really bad, we could not queue an additional packet,
- * because qdisc is full or receiver sent a 0 window.
+ * because qdisc is full or receiver sent a 0 window, or we are paced.
* We do not want to add fuel to the fire, or abort too early,
* so make sure the timer we arm now is at least 200ms in the future,
* regardless of current icsk_rto value (as it could be ~2ms)
@@ -1257,8 +1291,9 @@ static inline unsigned long tcp_probe0_when(const struct sock *sk,
static inline void tcp_check_probe_timer(struct sock *sk)
{
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_base(sk), TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
+ tcp_probe0_base(sk), TCP_RTO_MAX,
+ NULL);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
@@ -1940,7 +1975,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk)
{
const struct sk_buff *skb = tcp_rtx_queue_head(sk);
u32 rto = inet_csk(sk)->icsk_rto;
- u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
+ u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
}
@@ -2040,11 +2075,6 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
#define TCP_ULP_MAX 128
#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
-enum {
- TCP_ULP_TLS,
- TCP_ULP_BPF,
-};
-
struct tcp_ulp_ops {
struct list_head list;
@@ -2053,15 +2083,12 @@ struct tcp_ulp_ops {
/* cleanup ulp */
void (*release)(struct sock *sk);
- int uid;
char name[TCP_ULP_NAME_MAX];
- bool user_visible;
struct module *owner;
};
int tcp_register_ulp(struct tcp_ulp_ops *type);
void tcp_unregister_ulp(struct tcp_ulp_ops *type);
int tcp_set_ulp(struct sock *sk, const char *name);
-int tcp_set_ulp_id(struct sock *sk, const int ulp);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
@@ -2069,6 +2096,18 @@ void tcp_cleanup_ulp(struct sock *sk);
__MODULE_INFO(alias, alias_userspace, name); \
__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
+struct sk_msg;
+struct sk_psock;
+
+int tcp_bpf_init(struct sock *sk);
+void tcp_bpf_reinit(struct sock *sk);
+int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
+ int flags);
+int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags, int *addr_len);
+int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
+ struct msghdr *msg, int len, int flags);
+
/* Call BPF_SOCK_OPS program that returns an int. If the return value
* is < 0, then the BPF op failed (for example if the loaded BPF
* program does not support the chosen operation or there is no BPF
diff --git a/include/net/tls.h b/include/net/tls.h
index d5c683e8bb22..bab5627ff5e3 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -39,9 +39,11 @@
#include <linux/crypto.h>
#include <linux/socket.h>
#include <linux/tcp.h>
+#include <linux/skmsg.h>
+
#include <net/tcp.h>
#include <net/strparser.h>
-
+#include <crypto/aead.h>
#include <uapi/linux/tls.h>
@@ -93,24 +95,45 @@ enum {
TLS_NUM_CONFIG,
};
-struct tls_sw_context_tx {
- struct crypto_aead *aead_send;
- struct crypto_wait async_wait;
-
- char aad_space[TLS_AAD_SPACE_SIZE];
-
- unsigned int sg_plaintext_size;
- int sg_plaintext_num_elem;
- struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS];
+/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
+ * allocated or mapped for each TLS record. After encryption, the records are
+ * stores in a linked list.
+ */
+struct tls_rec {
+ struct list_head list;
+ int tx_ready;
+ int tx_flags;
+ int inplace_crypto;
- unsigned int sg_encrypted_size;
- int sg_encrypted_num_elem;
- struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS];
+ struct sk_msg msg_plaintext;
+ struct sk_msg msg_encrypted;
- /* AAD | sg_plaintext_data | sg_tag */
+ /* AAD | msg_plaintext.sg.data | sg_tag */
struct scatterlist sg_aead_in[2];
- /* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */
+ /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
struct scatterlist sg_aead_out[2];
+
+ char aad_space[TLS_AAD_SPACE_SIZE];
+ struct aead_request aead_req;
+ u8 aead_req_ctx[];
+};
+
+struct tx_work {
+ struct delayed_work work;
+ struct sock *sk;
+};
+
+struct tls_sw_context_tx {
+ struct crypto_aead *aead_send;
+ struct crypto_wait async_wait;
+ struct tx_work tx_work;
+ struct tls_rec *open_rec;
+ struct list_head tx_list;
+ atomic_t encrypt_pending;
+ int async_notify;
+
+#define BIT_TX_SCHEDULED 0
+ unsigned long tx_bitmask;
};
struct tls_sw_context_rx {
@@ -119,11 +142,12 @@ struct tls_sw_context_rx {
struct strparser strp;
void (*saved_data_ready)(struct sock *sk);
- unsigned int (*sk_poll)(struct file *file, struct socket *sock,
- struct poll_table_struct *wait);
+
struct sk_buff *recv_pkt;
u8 control;
bool decrypted;
+ atomic_t decrypt_pending;
+ bool async_notify;
};
struct tls_record_info {
@@ -171,15 +195,14 @@ struct cipher_context {
char *rec_seq;
};
+union tls_crypto_context {
+ struct tls_crypto_info info;
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+};
+
struct tls_context {
- union {
- struct tls_crypto_info crypto_send;
- struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128;
- };
- union {
- struct tls_crypto_info crypto_recv;
- struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128;
- };
+ union tls_crypto_context crypto_send;
+ union tls_crypto_context crypto_recv;
struct list_head list;
struct net_device *netdev;
@@ -196,10 +219,11 @@ struct tls_context {
struct scatterlist *partially_sent_record;
u16 partially_sent_offset;
+
unsigned long flags;
bool in_tcp_sendpages;
+ bool pending_open_record_frags;
- u16 pending_open_record_frags;
int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk);
@@ -247,8 +271,7 @@ void tls_sw_free_resources_rx(struct sock *sk);
void tls_sw_release_resources_rx(struct sock *sk);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
-unsigned int tls_sw_poll(struct file *file, struct socket *sock,
- struct poll_table_struct *wait);
+bool tls_sw_stream_read(const struct sock *sk);
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
@@ -260,6 +283,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
void tls_device_sk_destruct(struct sock *sk);
void tls_device_init(void);
void tls_device_cleanup(void);
+int tls_tx_records(struct sock *sk, int flags);
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn);
@@ -278,6 +302,9 @@ void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
int tls_push_sg(struct sock *sk, struct tls_context *ctx,
struct scatterlist *sg, u16 first_offset,
int flags);
+int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
+ int flags);
+
int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
int flags, long *timeo);
@@ -311,6 +338,17 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
return tls_ctx->pending_open_record_frags;
}
+static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
+{
+ struct tls_rec *rec;
+
+ rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
+ if (!rec)
+ return false;
+
+ return READ_ONCE(rec->tx_ready);
+}
+
struct sk_buff *
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
@@ -367,8 +405,8 @@ static inline void tls_fill_prepend(struct tls_context *ctx,
* size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
*/
buf[0] = record_type;
- buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version);
- buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version);
+ buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version);
+ buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version);
/* we can use IV for nonce explicit according to spec */
buf[3] = pkt_len >> 8;
buf[4] = pkt_len & 0xFF;
diff --git a/include/net/udp.h b/include/net/udp.h
index 8482a990b0bb..9e82cb391dea 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -443,8 +443,10 @@ int udpv4_offload_init(void);
void udp_init(void);
+DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
void udp_encap_enable(void);
#if IS_ENABLED(CONFIG_IPV6)
+DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void);
#endif
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index b99a02ae3934..03431c148e16 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -5,7 +5,8 @@
#include <linux/if_vlan.h>
#include <net/udp_tunnel.h>
#include <net/dst_metadata.h>
-#include <net/udp_tunnel.h>
+#include <net/rtnetlink.h>
+#include <net/switchdev.h>
/* VXLAN protocol (RFC 7348) header:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -191,6 +192,7 @@ union vxlan_addr {
struct vxlan_rdst {
union vxlan_addr remote_ip;
__be16 remote_port;
+ u8 offloaded:1;
__be32 remote_vni;
u32 remote_ifindex;
struct list_head list;
@@ -371,4 +373,65 @@ static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
return vs->sock->sk->sk_family;
}
+#if IS_ENABLED(CONFIG_IPV6)
+
+static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
+{
+ if (ipa->sa.sa_family == AF_INET6)
+ return ipv6_addr_any(&ipa->sin6.sin6_addr);
+ else
+ return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+}
+
+static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
+{
+ if (ipa->sa.sa_family == AF_INET6)
+ return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
+ else
+ return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+}
+
+#else /* !IS_ENABLED(CONFIG_IPV6) */
+
+static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
+{
+ return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+}
+
+static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
+{
+ return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+}
+
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+static inline bool netif_is_vxlan(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "vxlan");
+}
+
+struct switchdev_notifier_vxlan_fdb_info {
+ struct switchdev_notifier_info info; /* must be first */
+ union vxlan_addr remote_ip;
+ __be16 remote_port;
+ __be32 remote_vni;
+ u32 remote_ifindex;
+ u8 eth_addr[ETH_ALEN];
+ __be32 vni;
+ bool offloaded;
+};
+
+#if IS_ENABLED(CONFIG_VXLAN)
+int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info);
+#else
+static inline int
+vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+ return -ENOENT;
+}
+#endif
+
#endif
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 76b95256c266..0f25b3675c5c 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -91,6 +91,8 @@ static inline void xdp_scrub_frame(struct xdp_frame *frame)
frame->dev_rx = NULL;
}
+struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
+
/* Convert xdp_buff to xdp_frame */
static inline
struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
@@ -99,9 +101,8 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
int metasize;
int headroom;
- /* TODO: implement clone, copy, use "native" MEM_TYPE */
if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY)
- return NULL;
+ return xdp_convert_zc_to_xdp_frame(xdp);
/* Assure headroom is available for storing info */
headroom = xdp->data - xdp->data_hard_start;
@@ -135,6 +136,7 @@ void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
enum xdp_mem_type type, void *allocator);
+void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
/* Drivers not supporting XDP metadata can use this helper, which
* rejects any room expansion for metadata as a result.
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 7161856bcf9c..13acb9803a6d 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -16,21 +16,23 @@
struct net_device;
struct xsk_queue;
-struct xdp_umem_props {
- u64 chunk_mask;
- u64 size;
-};
-
struct xdp_umem_page {
void *addr;
dma_addr_t dma;
};
+struct xdp_umem_fq_reuse {
+ u32 nentries;
+ u32 length;
+ u64 handles[];
+};
+
struct xdp_umem {
struct xsk_queue *fq;
struct xsk_queue *cq;
struct xdp_umem_page *pages;
- struct xdp_umem_props props;
+ u64 chunk_mask;
+ u64 size;
u32 headroom;
u32 chunk_size_nohr;
struct user_struct *user;
@@ -41,6 +43,7 @@ struct xdp_umem {
struct page **pgs;
u32 npgs;
struct net_device *dev;
+ struct xdp_umem_fq_reuse *fq_reuse;
u16 queue_id;
bool zc;
spinlock_t xsk_list_lock;
@@ -79,6 +82,50 @@ void xsk_umem_discard_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
void xsk_umem_consume_tx_done(struct xdp_umem *umem);
+struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
+struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
+ struct xdp_umem_fq_reuse *newq);
+void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
+struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
+
+static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
+{
+ return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
+}
+
+static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
+{
+ return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
+}
+
+/* Reuse-queue aware version of FILL queue helpers */
+static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
+{
+ struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
+
+ if (!rq->length)
+ return xsk_umem_peek_addr(umem, addr);
+
+ *addr = rq->handles[rq->length - 1];
+ return addr;
+}
+
+static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
+{
+ struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
+
+ if (!rq->length)
+ xsk_umem_discard_addr(umem);
+ else
+ rq->length--;
+}
+
+static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
+{
+ struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
+
+ rq->handles[rq->length++] = addr;
+}
#else
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
@@ -98,6 +145,74 @@ static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{
return false;
}
+
+static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
+{
+ return NULL;
+}
+
+static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
+{
+}
+
+static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
+{
+}
+
+static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
+ u32 *len)
+{
+ return false;
+}
+
+static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
+{
+}
+
+static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
+{
+ return NULL;
+}
+
+static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
+ struct xdp_umem *umem,
+ struct xdp_umem_fq_reuse *newq)
+{
+ return NULL;
+}
+static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
+{
+}
+
+static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
+ u16 queue_id)
+{
+ return NULL;
+}
+
+static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
+{
+ return NULL;
+}
+
+static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
+{
+ return 0;
+}
+
+static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
+{
+ return NULL;
+}
+
+static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
+{
+}
+
+static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
+{
+}
+
#endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 77c7908b7d73..2734c895c1bf 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -46,7 +46,6 @@
#include <net/ip.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
-#include <net/ipv6.h>
#include <net/net_namespace.h>
/**
@@ -95,20 +94,18 @@ int rdma_translate_ip(const struct sockaddr *addr,
* @timeout_ms: Amount of time to wait for the address resolution to complete.
* @callback: Call invoked once address resolution has completed, timed out,
* or been canceled. A status of 0 indicates success.
+ * @resolve_by_gid_attr: Resolve the ip based on the GID attribute from
+ * rdma_dev_addr.
* @context: User-specified context associated with the call.
*/
int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr, int timeout_ms,
+ struct rdma_dev_addr *addr, unsigned long timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
- void *context);
+ bool resolve_by_gid_attr, void *context);
void rdma_addr_cancel(struct rdma_dev_addr *addr);
-void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
- const struct net_device *dev,
- const unsigned char *dst_dev_addr);
-
int rdma_addr_size(const struct sockaddr *addr);
int rdma_addr_size_in6(struct sockaddr_in6 *addr);
int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index c10f4b5ea8ab..49f4f75499b3 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -583,7 +583,7 @@ struct ib_cm_sidr_req_param {
struct sa_path_rec *path;
const struct ib_gid_attr *sgid_attr;
__be64 service_id;
- int timeout_ms;
+ unsigned long timeout_ms;
const void *private_data;
u8 private_data_len;
u8 max_cm_retries;
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index b6ddf2a1b9d8..19520979b84c 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -449,28 +449,23 @@ struct ib_sa_query;
void ib_sa_cancel_query(int id, struct ib_sa_query *query);
-int ib_sa_path_rec_get(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec,
- ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
- void (*callback)(int status,
- struct sa_path_rec *resp,
+int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device,
+ u8 port_num, struct sa_path_rec *rec,
+ ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
+ gfp_t gfp_mask,
+ void (*callback)(int status, struct sa_path_rec *resp,
void *context),
- void *context,
- struct ib_sa_query **query);
+ void *context, struct ib_sa_query **query);
int ib_sa_service_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
- u8 method,
- struct ib_sa_service_rec *rec,
- ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
- void (*callback)(int status,
- struct ib_sa_service_rec *resp,
- void *context),
- void *context,
- struct ib_sa_query **sa_query);
+ struct ib_device *device, u8 port_num, u8 method,
+ struct ib_sa_service_rec *rec,
+ ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
+ gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_sa_service_rec *resp,
+ void *context),
+ void *context, struct ib_sa_query **sa_query);
struct ib_sa_multicast {
struct ib_sa_mcmember_rec rec;
@@ -573,12 +568,11 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_guidinfo_rec *resp,
void *context),
- void *context,
- struct ib_sa_query **sa_query);
+ void *context, struct ib_sa_query **sa_query);
bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
struct ib_device *device,
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index a1fd63871d17..5d3755ec5afa 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -42,15 +42,14 @@ struct ib_umem_odp;
struct ib_umem {
struct ib_ucontext *context;
+ struct mm_struct *owning_mm;
size_t length;
unsigned long address;
int page_shift;
- int writable;
- int hugetlb;
+ u32 writable : 1;
+ u32 hugetlb : 1;
+ u32 is_odp : 1;
struct work_struct work;
- struct mm_struct *mm;
- unsigned long diff;
- struct ib_umem_odp *odp_data;
struct sg_table sg_head;
int nmap;
int npages;
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 381cdf5a9bd1..0b1446fe2fab 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -43,6 +43,9 @@ struct umem_odp_node {
};
struct ib_umem_odp {
+ struct ib_umem umem;
+ struct ib_ucontext_per_mm *per_mm;
+
/*
* An array of the pages included in the on-demand paging umem.
* Indices of pages that are currently not mapped into the device will
@@ -64,16 +67,9 @@ struct ib_umem_odp {
struct mutex umem_mutex;
void *private; /* for the HW driver to use. */
- /* When false, use the notifier counter in the ucontext struct. */
- bool mn_counters_active;
int notifiers_seq;
int notifiers_count;
- /* A linked list of umems that don't have private mmu notifier
- * counters yet. */
- struct list_head no_private_counters;
- struct ib_umem *umem;
-
/* Tree tracking */
struct umem_odp_node interval_tree;
@@ -82,15 +78,34 @@ struct ib_umem_odp {
struct work_struct work;
};
+static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
+{
+ return container_of(umem, struct ib_umem_odp, umem);
+}
+
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
- int access);
-struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
- unsigned long addr,
- size_t size);
+struct ib_ucontext_per_mm {
+ struct ib_ucontext *context;
+ struct mm_struct *mm;
+ struct pid *tgid;
+ bool active;
+
+ struct rb_root_cached umem_tree;
+ /* Protects umem_tree */
+ struct rw_semaphore umem_rwsem;
-void ib_umem_odp_release(struct ib_umem *umem);
+ struct mmu_notifier mn;
+ unsigned int odp_mrs_count;
+
+ struct list_head ucontext_list;
+ struct rcu_head rcu;
+};
+
+int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
+struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
+ unsigned long addr, size_t size);
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
/*
* The lower 2 bits of the DMA address signal the R/W permissions for
@@ -105,13 +120,14 @@ void ib_umem_odp_release(struct ib_umem *umem);
#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
-int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
- u64 access_mask, unsigned long current_seq);
+int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
+ u64 bcnt, u64 access_mask,
+ unsigned long current_seq);
-void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
+void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
u64 bound);
-typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
+typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end,
void *cookie);
/*
* Call the callback on each ib_umem in the range. Returns the logical or of
@@ -129,46 +145,37 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
u64 addr, u64 length);
-static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item,
+static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
unsigned long mmu_seq)
{
/*
* This code is strongly based on the KVM code from
* mmu_notifier_retry. Should be called with
- * the relevant locks taken (item->odp_data->umem_mutex
+ * the relevant locks taken (umem_odp->umem_mutex
* and the ucontext umem_mutex semaphore locked for read).
*/
- /* Do not allow page faults while the new ib_umem hasn't seen a state
- * with zero notifiers yet, and doesn't have its own valid set of
- * private counters. */
- if (!item->odp_data->mn_counters_active)
- return 1;
-
- if (unlikely(item->odp_data->notifiers_count))
+ if (unlikely(umem_odp->notifiers_count))
return 1;
- if (item->odp_data->notifiers_seq != mmu_seq)
+ if (umem_odp->notifiers_seq != mmu_seq)
return 1;
return 0;
}
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline int ib_umem_odp_get(struct ib_ucontext *context,
- struct ib_umem *umem,
- int access)
+static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
{
return -EINVAL;
}
-static inline struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
- unsigned long addr,
- size_t size)
+static inline struct ib_umem_odp *
+ib_alloc_odp_umem(struct ib_ucontext *context, unsigned long addr, size_t size)
{
return ERR_PTR(-EINVAL);
}
-static inline void ib_umem_odp_release(struct ib_umem *umem) {}
+static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e950c2a68f06..9c0c2132a2d6 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -69,8 +69,11 @@
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
+struct ib_umem_odp;
+
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
+extern struct workqueue_struct *ib_comp_unbound_wq;
union ib_gid {
u8 raw[16];
@@ -1137,7 +1140,9 @@ enum ib_qp_create_flags {
*/
struct ib_qp_init_attr {
+ /* Consumer's event_handler callback must not block */
void (*event_handler)(struct ib_event *, void *);
+
void *qp_context;
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
@@ -1146,7 +1151,7 @@ struct ib_qp_init_attr {
struct ib_qp_cap cap;
enum ib_sig_type sq_sig_type;
enum ib_qp_type qp_type;
- enum ib_qp_create_flags create_flags;
+ u32 create_flags;
/*
* Only needed for special QP types, or when using the RW API.
@@ -1278,21 +1283,27 @@ struct ib_qp_attr {
};
enum ib_wr_opcode {
- IB_WR_RDMA_WRITE,
- IB_WR_RDMA_WRITE_WITH_IMM,
- IB_WR_SEND,
- IB_WR_SEND_WITH_IMM,
- IB_WR_RDMA_READ,
- IB_WR_ATOMIC_CMP_AND_SWP,
- IB_WR_ATOMIC_FETCH_AND_ADD,
- IB_WR_LSO,
- IB_WR_SEND_WITH_INV,
- IB_WR_RDMA_READ_WITH_INV,
- IB_WR_LOCAL_INV,
- IB_WR_REG_MR,
- IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
- IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
+ /* These are shared with userspace */
+ IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
+ IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
+ IB_WR_SEND = IB_UVERBS_WR_SEND,
+ IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
+ IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
+ IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
+ IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
+ IB_WR_LSO = IB_UVERBS_WR_TSO,
+ IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
+ IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
+ IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
+ IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
+ IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
+ IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
+ IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
+
+ /* These are kernel only and can not be issued by userspace */
+ IB_WR_REG_MR = 0x20,
IB_WR_REG_SIG_MR,
+
/* reserve values for low level drivers' internal use.
* These values will not be used at all in the ib core layer.
*/
@@ -1485,26 +1496,15 @@ struct ib_ucontext {
* it is set when we are closing the file descriptor and indicates
* that mm_sem may be locked.
*/
- int closing;
+ bool closing;
bool cleanup_retryable;
- struct pid *tgid;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct rb_root_cached umem_tree;
- /*
- * Protects .umem_rbroot and tree, as well as odp_mrs_count and
- * mmu notifiers registration.
- */
- struct rw_semaphore umem_rwsem;
- void (*invalidate_range)(struct ib_umem *umem,
+ void (*invalidate_range)(struct ib_umem_odp *umem_odp,
unsigned long start, unsigned long end);
-
- struct mmu_notifier mn;
- atomic_t notifier_count;
- /* A list of umems that don't have private mmu notifier counters yet. */
- struct list_head no_private_counters;
- int odp_mrs_count;
+ struct mutex per_mm_list_lock;
+ struct list_head per_mm_list;
#endif
struct ib_rdmacg_object cg_obj;
@@ -1570,9 +1570,10 @@ struct ib_ah {
typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
enum ib_poll_context {
- IB_POLL_DIRECT, /* caller context, no hw completions */
- IB_POLL_SOFTIRQ, /* poll from softirq context */
- IB_POLL_WORKQUEUE, /* poll from workqueue */
+ IB_POLL_DIRECT, /* caller context, no hw completions */
+ IB_POLL_SOFTIRQ, /* poll from softirq context */
+ IB_POLL_WORKQUEUE, /* poll from workqueue */
+ IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
};
struct ib_cq {
@@ -1589,6 +1590,7 @@ struct ib_cq {
struct irq_poll iop;
struct work_struct work;
};
+ struct workqueue_struct *comp_wq;
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
@@ -2223,6 +2225,16 @@ struct rdma_netdev {
union ib_gid *gid, u16 mlid);
};
+struct rdma_netdev_alloc_params {
+ size_t sizeof_priv;
+ unsigned int txqs;
+ unsigned int rxqs;
+ void *param;
+
+ int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
+ struct net_device *netdev, void *param);
+};
+
struct ib_port_pkey_list {
/* Lock to hold while modifying the list. */
spinlock_t list_lock;
@@ -2253,10 +2265,11 @@ struct ib_device {
struct list_head event_handler_list;
spinlock_t event_handler_lock;
- spinlock_t client_data_lock;
+ rwlock_t client_data_lock;
struct list_head core_list;
/* Access to the client_data_list is protected by the client_data_lock
- * spinlock and the lists_rwsem read-write semaphore */
+ * rwlock and the lists_rwsem read-write semaphore
+ */
struct list_head client_data_list;
struct ib_cache cache;
@@ -2523,8 +2536,8 @@ struct ib_device {
/**
* rdma netdev operation
*
- * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
- * doesn't support the specified rdma netdev type.
+ * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
+ * must return -EOPNOTSUPP if it doesn't support the specified type.
*/
struct net_device *(*alloc_rdma_netdev)(
struct ib_device *device,
@@ -2534,9 +2547,19 @@ struct ib_device {
unsigned char name_assign_type,
void (*setup)(struct net_device *));
+ int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params);
+
struct module *owner;
struct device dev;
- struct kobject *ports_parent;
+ /* First group for device attributes,
+ * Second group for driver provided attributes (optional).
+ * It is NULL terminated array.
+ */
+ const struct attribute_group *groups[3];
+
+ struct kobject *ports_kobj;
struct list_head port_list;
enum {
@@ -2619,9 +2642,9 @@ void ib_dealloc_device(struct ib_device *device);
void ib_get_device_fw_str(struct ib_device *device, char *str);
-int ib_register_device(struct ib_device *device,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *));
+int ib_register_device(struct ib_device *device, const char *name,
+ int (*port_callback)(struct ib_device *, u8,
+ struct kobject *));
void ib_unregister_device(struct ib_device *device);
int ib_register_client (struct ib_client *client);
@@ -2631,6 +2654,28 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
void ib_set_client_data(struct ib_device *device, struct ib_client *client,
void *data);
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size, pgprot_t prot);
+int rdma_user_mmap_page(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma, struct page *page,
+ unsigned long size);
+#else
+static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size,
+ pgprot_t prot)
+{
+ return -EINVAL;
+}
+static inline int rdma_user_mmap_page(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma, struct page *page,
+ unsigned long size)
+{
+ return -EINVAL;
+}
+#endif
+
static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
{
return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
@@ -2714,7 +2759,6 @@ static inline int ib_destroy_usecnt(atomic_t *usecnt,
* @next_state: Next QP state
* @type: QP type
* @mask: Mask of supplied QP attributes
- * @ll : link layer of port
*
* This function is a helper function that a low-level driver's
* modify_qp method can use to validate the consumer's input. It
@@ -2723,8 +2767,7 @@ static inline int ib_destroy_usecnt(atomic_t *usecnt,
* and that the attribute mask supplied is allowed for the transition.
*/
bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
- enum ib_qp_type type, enum ib_qp_attr_mask mask,
- enum rdma_link_layer ll);
+ enum ib_qp_type type, enum ib_qp_attr_mask mask);
void ib_register_event_handler(struct ib_event_handler *event_handler);
void ib_unregister_event_handler(struct ib_event_handler *event_handler);
@@ -4153,20 +4196,6 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
}
-static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow,
- struct ib_qp *qp, struct ib_device *device)
-{
- uobj->object = ibflow;
- ibflow->uobject = uobj;
-
- if (qp) {
- atomic_inc(&qp->usecnt);
- ibflow->qp = qp;
- }
-
- ibflow->device = device;
-}
-
/**
* rdma_roce_rescan_device - Rescan all of the network devices in the system
* and add their gids, as needed, to the relevant RoCE devices.
@@ -4179,4 +4208,38 @@ struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile);
int uverbs_destroy_def_handler(struct ib_uverbs_file *file,
struct uverbs_attr_bundle *attrs);
+
+struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *));
+
+int rdma_init_netdev(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *),
+ struct net_device *netdev);
+
+/**
+ * rdma_set_device_sysfs_group - Set device attributes group to have
+ * driver specific sysfs entries at
+ * for infiniband class.
+ *
+ * @device: device pointer for which attributes to be created
+ * @group: Pointer to group which should be added when device
+ * is registered with sysfs.
+ * rdma_set_device_sysfs_group() allows existing drivers to expose one
+ * group per device to have sysfs attributes.
+ *
+ * NOTE: New drivers should not make use of this API; instead new device
+ * parameter should be exposed via netlink command. This API and mechanism
+ * exist only for existing drivers.
+ */
+static inline void
+rdma_set_device_sysfs_group(struct ib_device *dev,
+ const struct attribute_group *group)
+{
+ dev->groups[1] = group;
+}
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 5d71a7f51a9f..60987a5903b7 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -152,7 +152,11 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
* @ps: RDMA port space.
* @qp_type: type of queue pair associated with the id.
*
- * The id holds a reference on the network namespace until it is destroyed.
+ * Returns a new rdma_cm_id. The id holds a reference on the network
+ * namespace until it is destroyed.
+ *
+ * The event handler callback serializes on the id's mutex and is
+ * allowed to sleep.
*/
#define rdma_create_id(net, event_handler, context, ps, qp_type) \
__rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \
@@ -192,7 +196,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr);
* @timeout_ms: Time to wait for resolution to complete.
*/
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- const struct sockaddr *dst_addr, int timeout_ms);
+ const struct sockaddr *dst_addr,
+ unsigned long timeout_ms);
/**
* rdma_resolve_route - Resolve the RDMA address bound to the RDMA identifier
@@ -202,7 +207,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
* Users must have first called rdma_resolve_addr to resolve a dst_addr
* into an RDMA address before calling this routine.
*/
-int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms);
+int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms);
/**
* rdma_create_qp - Allocate a QP and associate it with the specified RDMA
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index c369703fcd69..70218e6b5187 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -96,7 +96,7 @@ int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags);
/**
* Check if there are any listeners to the netlink group
* @group: the netlink group ID
- * Returns 0 on success or a negative for no listeners.
+ * Returns true on success or false if no listeners.
*/
-int rdma_nl_chk_listeners(unsigned int group);
+bool rdma_nl_chk_listeners(unsigned int group);
#endif /* _RDMA_NETLINK_H */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index e79229a0cf01..3584d0816fcd 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -149,6 +149,10 @@ struct rvt_ibport {
#define RVT_CQN_MAX 16 /* maximum length of cq name */
+#define RVT_SGE_COPY_MEMCPY 0
+#define RVT_SGE_COPY_CACHELESS 1
+#define RVT_SGE_COPY_ADAPTIVE 2
+
/*
* Things that are driver specific, module parameters in hfi1 and qib
*/
@@ -161,6 +165,9 @@ struct rvt_driver_params {
*/
unsigned int lkey_table_size;
unsigned int qp_table_size;
+ unsigned int sge_copy_mode;
+ unsigned int wss_threshold;
+ unsigned int wss_clean_period;
int qpn_start;
int qpn_inc;
int qpn_res_start;
@@ -193,6 +200,19 @@ struct rvt_ah {
u8 log_pmtu;
};
+/* memory working set size */
+struct rvt_wss {
+ unsigned long *entries;
+ atomic_t total_count;
+ atomic_t clean_counter;
+ atomic_t clean_entry;
+
+ int threshold;
+ int num_entries;
+ long pages_mask;
+ unsigned int clean_period;
+};
+
struct rvt_dev_info;
struct rvt_swqe;
struct rvt_driver_provided {
@@ -211,11 +231,18 @@ struct rvt_driver_provided {
* version requires the s_lock not to be held. The other assumes the
* s_lock is held.
*/
- void (*schedule_send)(struct rvt_qp *qp);
- void (*schedule_send_no_lock)(struct rvt_qp *qp);
+ bool (*schedule_send)(struct rvt_qp *qp);
+ bool (*schedule_send_no_lock)(struct rvt_qp *qp);
- /* Driver specific work request checking */
- int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
+ /*
+ * Driver specific work request setup and checking.
+ * This function is allowed to perform any setup, checks, or
+ * adjustments required to the SWQE in order to be usable by
+ * underlying protocols. This includes private data structure
+ * allocations.
+ */
+ int (*setup_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ bool *call_send);
/*
* Sometimes rdmavt needs to kick the driver's send progress. That is
@@ -371,6 +398,9 @@ struct rvt_dev_info {
/* post send table */
const struct rvt_operation_params *post_parms;
+ /* opcode translation table */
+ const enum ib_wc_opcode *wc_opcode;
+
/* Driver specific helper functions */
struct rvt_driver_provided driver_f;
@@ -411,6 +441,8 @@ struct rvt_dev_info {
u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
spinlock_t n_mcast_grps_lock;
+ /* Memory Working Set Size */
+ struct rvt_wss *wss;
};
/**
@@ -423,7 +455,14 @@ static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
const char *fmt, const char *name,
const int unit)
{
- snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit);
+ /*
+ * FIXME: rvt and its users want to touch the ibdev before
+ * registration and have things like the name work. We don't have the
+ * infrastructure in the core to support this directly today, hack it
+ * to work by setting the name manually here.
+ */
+ dev_set_name(&rdi->ibdev.dev, fmt, name, unit);
+ strlcpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
}
/**
@@ -434,7 +473,7 @@ static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
*/
static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
{
- return rdi->ibdev.name;
+ return dev_name(&rdi->ibdev.dev);
}
static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 927f6d5b6d0f..cbafb1878669 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -678,6 +678,13 @@ void rvt_del_timers_sync(struct rvt_qp *qp);
void rvt_stop_rc_timers(struct rvt_qp *qp);
void rvt_add_retry_timer(struct rvt_qp *qp);
+void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
+ void *data, u32 length,
+ bool release, bool copy_last);
+void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ enum ib_wc_status status);
+void rvt_ruc_loopback(struct rvt_qp *qp);
+
/**
* struct rvt_qp_iter - the iterator for QPs
* @qp - the current QP
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 9654d33edd98..2638fa7cd702 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -173,16 +173,10 @@ int rdma_restrack_put(struct rdma_restrack_entry *res);
/**
* rdma_restrack_set_task() - set the task for this resource
* @res: resource entry
- * @task: task struct
+ * @caller: kernel name, the current task will be used if the caller is NULL.
*/
-static inline void rdma_restrack_set_task(struct rdma_restrack_entry *res,
- struct task_struct *task)
-{
- if (res->task)
- put_task_struct(res->task);
- get_task_struct(task);
- res->task = task;
-}
+void rdma_restrack_set_task(struct rdma_restrack_entry *res,
+ const char *caller);
/*
* Helper functions for rdma drivers when filling out
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 9e997c3c2f04..84d3d15f1f38 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -52,6 +52,7 @@ enum uverbs_attr_type {
UVERBS_ATTR_TYPE_IDR,
UVERBS_ATTR_TYPE_FD,
UVERBS_ATTR_TYPE_ENUM_IN,
+ UVERBS_ATTR_TYPE_IDRS_ARRAY,
};
enum uverbs_obj_access {
@@ -101,7 +102,7 @@ struct uverbs_attr_spec {
} enum_def;
} u;
- /* This weird split of the enum lets us remove some padding */
+ /* This weird split lets us remove some padding */
union {
struct {
/*
@@ -111,6 +112,17 @@ struct uverbs_attr_spec {
*/
const struct uverbs_attr_spec *ids;
} enum_def;
+
+ struct {
+ /*
+ * higher bits mean the namespace and lower bits mean
+ * the type id within the namespace.
+ */
+ u16 obj_type;
+ u16 min_len;
+ u16 max_len;
+ u8 access;
+ } objs_arr;
} u2;
};
@@ -251,6 +263,11 @@ static inline __attribute_const__ u32 uapi_bkey_attr(u32 attr_key)
return attr_key - 1;
}
+static inline __attribute_const__ u32 uapi_bkey_to_key_attr(u32 attr_bkey)
+{
+ return attr_bkey + 1;
+}
+
/*
* =======================================
* Verbs definitions
@@ -323,6 +340,27 @@ struct uverbs_object_tree_def {
#define UA_MANDATORY .mandatory = 1
#define UA_OPTIONAL .mandatory = 0
+/*
+ * min_len must be bigger than 0 and _max_len must be smaller than 4095. Only
+ * READ\WRITE accesses are supported.
+ */
+#define UVERBS_ATTR_IDRS_ARR(_attr_id, _idr_type, _access, _min_len, _max_len, \
+ ...) \
+ (&(const struct uverbs_attr_def){ \
+ .id = (_attr_id) + \
+ BUILD_BUG_ON_ZERO((_min_len) == 0 || \
+ (_max_len) > \
+ PAGE_SIZE / sizeof(void *) || \
+ (_min_len) > (_max_len) || \
+ (_access) == UVERBS_ACCESS_NEW || \
+ (_access) == UVERBS_ACCESS_DESTROY), \
+ .attr = { .type = UVERBS_ATTR_TYPE_IDRS_ARRAY, \
+ .u2.objs_arr.obj_type = _idr_type, \
+ .u2.objs_arr.access = _access, \
+ .u2.objs_arr.min_len = _min_len, \
+ .u2.objs_arr.max_len = _max_len, \
+ __VA_ARGS__ } })
+
#define UVERBS_ATTR_IDR(_attr_id, _idr_type, _access, ...) \
(&(const struct uverbs_attr_def){ \
.id = _attr_id, \
@@ -365,6 +403,15 @@ struct uverbs_object_tree_def {
__VA_ARGS__ }, \
})
+/* An input value that is a member in the enum _enum_type. */
+#define UVERBS_ATTR_CONST_IN(_attr_id, _enum_type, ...) \
+ UVERBS_ATTR_PTR_IN( \
+ _attr_id, \
+ UVERBS_ATTR_SIZE( \
+ sizeof(u64) + BUILD_BUG_ON_ZERO(!sizeof(_enum_type)), \
+ sizeof(u64)), \
+ __VA_ARGS__)
+
/*
* An input value that is a bitwise combination of values of _enum_type.
* This permits the flag value to be passed as either a u32 or u64, it must
@@ -431,10 +478,16 @@ struct uverbs_obj_attr {
const struct uverbs_api_attr *attr_elm;
};
+struct uverbs_objs_arr_attr {
+ struct ib_uobject **uobjects;
+ u16 len;
+};
+
struct uverbs_attr {
union {
struct uverbs_ptr_attr ptr_attr;
struct uverbs_obj_attr obj_attr;
+ struct uverbs_objs_arr_attr objs_arr_attr;
};
};
@@ -507,6 +560,31 @@ uverbs_attr_get_len(const struct uverbs_attr_bundle *attrs_bundle, u16 idx)
return attr->ptr_attr.len;
}
+/**
+ * uverbs_attr_get_uobjs_arr() - Provides array's properties for attribute for
+ * UVERBS_ATTR_TYPE_IDRS_ARRAY.
+ * @arr: Returned pointer to array of pointers for uobjects or NULL if
+ * the attribute isn't provided.
+ *
+ * Return: The array length or 0 if no attribute was provided.
+ */
+static inline int uverbs_attr_get_uobjs_arr(
+ const struct uverbs_attr_bundle *attrs_bundle, u16 attr_idx,
+ struct ib_uobject ***arr)
+{
+ const struct uverbs_attr *attr =
+ uverbs_attr_get(attrs_bundle, attr_idx);
+
+ if (IS_ERR(attr)) {
+ *arr = NULL;
+ return 0;
+ }
+
+ *arr = attr->objs_arr_attr.uobjects;
+
+ return attr->objs_arr_attr.len;
+}
+
static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr)
{
return attr->ptr_attr.len <= sizeof(attr->ptr_attr.data);
@@ -603,6 +681,9 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle,
{
return _uverbs_alloc(bundle, size, GFP_KERNEL | __GFP_ZERO);
}
+int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val);
#else
static inline int
uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
@@ -631,6 +712,34 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle,
{
return ERR_PTR(-EINVAL);
}
+static inline int
+_uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val)
+{
+ return -EINVAL;
+}
#endif
+#define uverbs_get_const(_to, _attrs_bundle, _idx) \
+ ({ \
+ s64 _val; \
+ int _ret = _uverbs_get_const(&_val, _attrs_bundle, _idx, \
+ type_min(typeof(*_to)), \
+ type_max(typeof(*_to)), NULL); \
+ (*_to) = _val; \
+ _ret; \
+ })
+
+#define uverbs_get_const_default(_to, _attrs_bundle, _idx, _default) \
+ ({ \
+ s64 _val; \
+ s64 _def_val = _default; \
+ int _ret = \
+ _uverbs_get_const(&_val, _attrs_bundle, _idx, \
+ type_min(typeof(*_to)), \
+ type_max(typeof(*_to)), &_def_val); \
+ (*_to) = _val; \
+ _ret; \
+ })
#endif
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 3b00231cc084..3db2802fbc68 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -140,5 +140,56 @@ __uobj_alloc(const struct uverbs_api_object *obj, struct ib_uverbs_file *ufile,
#define uobj_alloc(_type, _ufile, _ib_dev) \
__uobj_alloc(uobj_get_type(_ufile, _type), _ufile, _ib_dev)
+static inline void uverbs_flow_action_fill_action(struct ib_flow_action *action,
+ struct ib_uobject *uobj,
+ struct ib_device *ib_dev,
+ enum ib_flow_action_type type)
+{
+ atomic_set(&action->usecnt, 0);
+ action->device = ib_dev;
+ action->type = type;
+ action->uobject = uobj;
+ uobj->object = action;
+}
+
+struct ib_uflow_resources {
+ size_t max;
+ size_t num;
+ size_t collection_num;
+ size_t counters_num;
+ struct ib_counters **counters;
+ struct ib_flow_action **collection;
+};
+
+struct ib_uflow_object {
+ struct ib_uobject uobject;
+ struct ib_uflow_resources *resources;
+};
+
+struct ib_uflow_resources *flow_resources_alloc(size_t num_specs);
+void flow_resources_add(struct ib_uflow_resources *uflow_res,
+ enum ib_flow_spec_type type,
+ void *ibobj);
+void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
+
+static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow,
+ struct ib_qp *qp, struct ib_device *device,
+ struct ib_uflow_resources *uflow_res)
+{
+ struct ib_uflow_object *uflow;
+
+ uobj->object = ibflow;
+ ibflow->uobject = uobj;
+
+ if (qp) {
+ atomic_inc(&qp->usecnt);
+ ibflow->qp = qp;
+ }
+
+ ibflow->device = device;
+ uflow = container_of(uobj, typeof(*uflow), uobject);
+ uflow->resources = uflow_res;
+}
+
#endif
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
index eaaf56df4086..5b99cb2ea5ef 100644
--- a/include/soc/fsl/bman.h
+++ b/include/soc/fsl/bman.h
@@ -126,4 +126,12 @@ int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
*/
int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
+/**
+ * bman_is_probed - Check if bman is probed
+ *
+ * Returns 1 if the bman driver successfully probed, -1 if the bman driver
+ * failed to probe or 0 if the bman driver did not probed yet.
+ */
+int bman_is_probed(void);
+
#endif /* __FSL_BMAN_H */
diff --git a/include/soc/fsl/dpaa2-fd.h b/include/soc/fsl/dpaa2-fd.h
index 2576abaa7779..90ae8d191f1a 100644
--- a/include/soc/fsl/dpaa2-fd.h
+++ b/include/soc/fsl/dpaa2-fd.h
@@ -66,6 +66,15 @@ struct dpaa2_fd {
#define SG_BPID_MASK 0x3FFF
#define SG_FINAL_FLAG_MASK 0x1
#define SG_FINAL_FLAG_SHIFT 15
+#define FL_SHORT_LEN_FLAG_MASK 0x1
+#define FL_SHORT_LEN_FLAG_SHIFT 14
+#define FL_SHORT_LEN_MASK 0x3FFFF
+#define FL_OFFSET_MASK 0x0FFF
+#define FL_FORMAT_MASK 0x3
+#define FL_FORMAT_SHIFT 12
+#define FL_BPID_MASK 0x3FFF
+#define FL_FINAL_FLAG_MASK 0x1
+#define FL_FINAL_FLAG_SHIFT 15
/* Error bits in FD CTRL */
#define FD_CTRL_ERR_MASK 0x000000FF
@@ -435,4 +444,237 @@ static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final)
sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT);
}
+/**
+ * struct dpaa2_fl_entry - structure for frame list entry.
+ * @addr: address in the FLE
+ * @len: length in the FLE
+ * @bpid: buffer pool ID
+ * @format_offset: format, offset, and short-length fields
+ * @frc: frame context
+ * @ctrl: control bits...including pta, pvt1, pvt2, err, etc
+ * @flc: flow context address
+ */
+struct dpaa2_fl_entry {
+ __le64 addr;
+ __le32 len;
+ __le16 bpid;
+ __le16 format_offset;
+ __le32 frc;
+ __le32 ctrl;
+ __le64 flc;
+};
+
+enum dpaa2_fl_format {
+ dpaa2_fl_single = 0,
+ dpaa2_fl_res,
+ dpaa2_fl_sg
+};
+
+/**
+ * dpaa2_fl_get_addr() - get the addr field of FLE
+ * @fle: the given frame list entry
+ *
+ * Return the address in the frame list entry.
+ */
+static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle)
+{
+ return (dma_addr_t)le64_to_cpu(fle->addr);
+}
+
+/**
+ * dpaa2_fl_set_addr() - Set the addr field of FLE
+ * @fle: the given frame list entry
+ * @addr: the address needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle,
+ dma_addr_t addr)
+{
+ fle->addr = cpu_to_le64(addr);
+}
+
+/**
+ * dpaa2_fl_get_frc() - Get the frame context in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the frame context field in the frame lsit entry.
+ */
+static inline u32 dpaa2_fl_get_frc(const struct dpaa2_fl_entry *fle)
+{
+ return le32_to_cpu(fle->frc);
+}
+
+/**
+ * dpaa2_fl_set_frc() - Set the frame context in the FLE
+ * @fle: the given frame list entry
+ * @frc: the frame context needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_frc(struct dpaa2_fl_entry *fle, u32 frc)
+{
+ fle->frc = cpu_to_le32(frc);
+}
+
+/**
+ * dpaa2_fl_get_ctrl() - Get the control bits in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the control bits field in the frame list entry.
+ */
+static inline u32 dpaa2_fl_get_ctrl(const struct dpaa2_fl_entry *fle)
+{
+ return le32_to_cpu(fle->ctrl);
+}
+
+/**
+ * dpaa2_fl_set_ctrl() - Set the control bits in the FLE
+ * @fle: the given frame list entry
+ * @ctrl: the control bits to be set in the frame list entry
+ */
+static inline void dpaa2_fl_set_ctrl(struct dpaa2_fl_entry *fle, u32 ctrl)
+{
+ fle->ctrl = cpu_to_le32(ctrl);
+}
+
+/**
+ * dpaa2_fl_get_flc() - Get the flow context in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the flow context in the frame list entry.
+ */
+static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle)
+{
+ return (dma_addr_t)le64_to_cpu(fle->flc);
+}
+
+/**
+ * dpaa2_fl_set_flc() - Set the flow context field of FLE
+ * @fle: the given frame list entry
+ * @flc_addr: the flow context needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle,
+ dma_addr_t flc_addr)
+{
+ fle->flc = cpu_to_le64(flc_addr);
+}
+
+static inline bool dpaa2_fl_short_len(const struct dpaa2_fl_entry *fle)
+{
+ return !!((le16_to_cpu(fle->format_offset) >>
+ FL_SHORT_LEN_FLAG_SHIFT) & FL_SHORT_LEN_FLAG_MASK);
+}
+
+/**
+ * dpaa2_fl_get_len() - Get the length in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the length field in the frame list entry.
+ */
+static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle)
+{
+ if (dpaa2_fl_short_len(fle))
+ return le32_to_cpu(fle->len) & FL_SHORT_LEN_MASK;
+
+ return le32_to_cpu(fle->len);
+}
+
+/**
+ * dpaa2_fl_set_len() - Set the length field of FLE
+ * @fle: the given frame list entry
+ * @len: the length needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len)
+{
+ fle->len = cpu_to_le32(len);
+}
+
+/**
+ * dpaa2_fl_get_offset() - Get the offset field in the frame list entry
+ * @fle: the given frame list entry
+ *
+ * Return the offset.
+ */
+static inline u16 dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle)
+{
+ return le16_to_cpu(fle->format_offset) & FL_OFFSET_MASK;
+}
+
+/**
+ * dpaa2_fl_set_offset() - Set the offset field of FLE
+ * @fle: the given frame list entry
+ * @offset: the offset needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, u16 offset)
+{
+ fle->format_offset &= cpu_to_le16(~FL_OFFSET_MASK);
+ fle->format_offset |= cpu_to_le16(offset);
+}
+
+/**
+ * dpaa2_fl_get_format() - Get the format field in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the format.
+ */
+static inline enum dpaa2_fl_format dpaa2_fl_get_format(const struct dpaa2_fl_entry *fle)
+{
+ return (enum dpaa2_fl_format)((le16_to_cpu(fle->format_offset) >>
+ FL_FORMAT_SHIFT) & FL_FORMAT_MASK);
+}
+
+/**
+ * dpaa2_fl_set_format() - Set the format field of FLE
+ * @fle: the given frame list entry
+ * @format: the format needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle,
+ enum dpaa2_fl_format format)
+{
+ fle->format_offset &= cpu_to_le16(~(FL_FORMAT_MASK << FL_FORMAT_SHIFT));
+ fle->format_offset |= cpu_to_le16(format << FL_FORMAT_SHIFT);
+}
+
+/**
+ * dpaa2_fl_get_bpid() - Get the bpid field in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the buffer pool id.
+ */
+static inline u16 dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle)
+{
+ return le16_to_cpu(fle->bpid) & FL_BPID_MASK;
+}
+
+/**
+ * dpaa2_fl_set_bpid() - Set the bpid field of FLE
+ * @fle: the given frame list entry
+ * @bpid: buffer pool id to be set
+ */
+static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, u16 bpid)
+{
+ fle->bpid &= cpu_to_le16(~(FL_BPID_MASK));
+ fle->bpid |= cpu_to_le16(bpid);
+}
+
+/**
+ * dpaa2_fl_is_final() - Check final bit in FLE
+ * @fle: the given frame list entry
+ *
+ * Return bool.
+ */
+static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle)
+{
+ return !!(le16_to_cpu(fle->format_offset) >> FL_FINAL_FLAG_SHIFT);
+}
+
+/**
+ * dpaa2_fl_set_final() - Set the final bit in FLE
+ * @fle: the given frame list entry
+ * @final: the final boolean to be set
+ */
+static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final)
+{
+ fle->format_offset &= cpu_to_le16((~(FL_FINAL_FLAG_MASK <<
+ FL_FINAL_FLAG_SHIFT)) & 0xFFFF);
+ fle->format_offset |= cpu_to_le16(final << FL_FINAL_FLAG_SHIFT);
+}
+
#endif /* __FSL_DPAA2_FD_H */
diff --git a/include/soc/fsl/dpaa2-global.h b/include/soc/fsl/dpaa2-global.h
index 9bc0713346a8..2bfc379d3dc9 100644
--- a/include/soc/fsl/dpaa2-global.h
+++ b/include/soc/fsl/dpaa2-global.h
@@ -174,4 +174,19 @@ static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq)
return (const struct dpaa2_fd *)&dq->dq.fd[0];
}
+#define DPAA2_CSCN_SIZE sizeof(struct dpaa2_dq)
+#define DPAA2_CSCN_ALIGN 16
+#define DPAA2_CSCN_STATE_CG BIT(0)
+
+/**
+ * dpaa2_cscn_state_congested() - Check congestion state
+ * @cscn: congestion SCN (delivered to WQ or memory)
+ *
+i * Return true is congested.
+ */
+static inline bool dpaa2_cscn_state_congested(struct dpaa2_dq *cscn)
+{
+ return !!(cscn->scn.state & DPAA2_CSCN_STATE_CG);
+}
+
#endif /* __FSL_DPAA2_GLOBAL_H */
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
index ab51e40d11db..70997ab2146c 100644
--- a/include/soc/fsl/dpaa2-io.h
+++ b/include/soc/fsl/dpaa2-io.h
@@ -97,9 +97,13 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service,
int dpaa2_io_service_rearm(struct dpaa2_io *service,
struct dpaa2_io_notification_ctx *ctx);
+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
+ struct dpaa2_io_store *s);
int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
struct dpaa2_io_store *s);
+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
+ const struct dpaa2_fd *fd);
int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
u16 qdbin, const struct dpaa2_fd *fd);
int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h
index 3ee9e7c1a7d7..dcd6b865b590 100644
--- a/include/soc/fsl/qe/ucc_fast.h
+++ b/include/soc/fsl/qe/ucc_fast.h
@@ -41,8 +41,12 @@
#define R_L_S 0x0800 /* last */
#define R_F_S 0x0400 /* first */
#define R_CM_S 0x0200 /* continuous mode */
+#define R_LG_S 0x0020 /* frame length */
+#define R_NO_S 0x0010 /* nonoctet */
+#define R_AB_S 0x0008 /* abort */
#define R_CR_S 0x0004 /* crc */
-#define R_OV_S 0x0002 /* crc */
+#define R_OV_S 0x0002 /* overrun */
+#define R_CD_S 0x0001 /* carrier detect */
/* transmit BD's status */
#define T_R_S 0x8000 /* ready bit */
@@ -51,6 +55,8 @@
#define T_L_S 0x0800 /* last */
#define T_TC_S 0x0400 /* crc */
#define T_TM_S 0x0200 /* continuous mode */
+#define T_UN_S 0x0002 /* hdlc underrun */
+#define T_CT_S 0x0001 /* hdlc carrier lost */
/* Rx Data buffer must be 4 bytes aligned in most cases */
#define UCC_FAST_RX_ALIGN 4
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index d4dfefdee6c1..597783b8a3a0 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -1186,4 +1186,12 @@ int qman_alloc_cgrid_range(u32 *result, u32 count);
*/
int qman_release_cgrid(u32 id);
+/**
+ * qman_is_probed - Check if qman is probed
+ *
+ * Returns 1 if the qman driver successfully probed, -1 if the qman driver
+ * failed to probe or 0 if the qman driver did not probed yet.
+ */
+int qman_is_probed(void);
+
#endif /* __FSL_QMAN_H */
diff --git a/drivers/net/ethernet/mscc/ocelot_hsio.h b/include/soc/mscc/ocelot_hsio.h
index d93ddec3931b..43112dd7313a 100644
--- a/drivers/net/ethernet/mscc/ocelot_hsio.h
+++ b/include/soc/mscc/ocelot_hsio.h
@@ -8,6 +8,80 @@
#ifndef _MSCC_OCELOT_HSIO_H_
#define _MSCC_OCELOT_HSIO_H_
+#define HSIO_PLL5G_CFG0 0x0000
+#define HSIO_PLL5G_CFG1 0x0004
+#define HSIO_PLL5G_CFG2 0x0008
+#define HSIO_PLL5G_CFG3 0x000c
+#define HSIO_PLL5G_CFG4 0x0010
+#define HSIO_PLL5G_CFG5 0x0014
+#define HSIO_PLL5G_CFG6 0x0018
+#define HSIO_PLL5G_STATUS0 0x001c
+#define HSIO_PLL5G_STATUS1 0x0020
+#define HSIO_PLL5G_BIST_CFG0 0x0024
+#define HSIO_PLL5G_BIST_CFG1 0x0028
+#define HSIO_PLL5G_BIST_CFG2 0x002c
+#define HSIO_PLL5G_BIST_STAT0 0x0030
+#define HSIO_PLL5G_BIST_STAT1 0x0034
+#define HSIO_RCOMP_CFG0 0x0038
+#define HSIO_RCOMP_STATUS 0x003c
+#define HSIO_SYNC_ETH_CFG 0x0040
+#define HSIO_SYNC_ETH_PLL_CFG 0x0048
+#define HSIO_S1G_DES_CFG 0x004c
+#define HSIO_S1G_IB_CFG 0x0050
+#define HSIO_S1G_OB_CFG 0x0054
+#define HSIO_S1G_SER_CFG 0x0058
+#define HSIO_S1G_COMMON_CFG 0x005c
+#define HSIO_S1G_PLL_CFG 0x0060
+#define HSIO_S1G_PLL_STATUS 0x0064
+#define HSIO_S1G_DFT_CFG0 0x0068
+#define HSIO_S1G_DFT_CFG1 0x006c
+#define HSIO_S1G_DFT_CFG2 0x0070
+#define HSIO_S1G_TP_CFG 0x0074
+#define HSIO_S1G_RC_PLL_BIST_CFG 0x0078
+#define HSIO_S1G_MISC_CFG 0x007c
+#define HSIO_S1G_DFT_STATUS 0x0080
+#define HSIO_S1G_MISC_STATUS 0x0084
+#define HSIO_MCB_S1G_ADDR_CFG 0x0088
+#define HSIO_S6G_DIG_CFG 0x008c
+#define HSIO_S6G_DFT_CFG0 0x0090
+#define HSIO_S6G_DFT_CFG1 0x0094
+#define HSIO_S6G_DFT_CFG2 0x0098
+#define HSIO_S6G_TP_CFG0 0x009c
+#define HSIO_S6G_TP_CFG1 0x00a0
+#define HSIO_S6G_RC_PLL_BIST_CFG 0x00a4
+#define HSIO_S6G_MISC_CFG 0x00a8
+#define HSIO_S6G_OB_ANEG_CFG 0x00ac
+#define HSIO_S6G_DFT_STATUS 0x00b0
+#define HSIO_S6G_ERR_CNT 0x00b4
+#define HSIO_S6G_MISC_STATUS 0x00b8
+#define HSIO_S6G_DES_CFG 0x00bc
+#define HSIO_S6G_IB_CFG 0x00c0
+#define HSIO_S6G_IB_CFG1 0x00c4
+#define HSIO_S6G_IB_CFG2 0x00c8
+#define HSIO_S6G_IB_CFG3 0x00cc
+#define HSIO_S6G_IB_CFG4 0x00d0
+#define HSIO_S6G_IB_CFG5 0x00d4
+#define HSIO_S6G_OB_CFG 0x00d8
+#define HSIO_S6G_OB_CFG1 0x00dc
+#define HSIO_S6G_SER_CFG 0x00e0
+#define HSIO_S6G_COMMON_CFG 0x00e4
+#define HSIO_S6G_PLL_CFG 0x00e8
+#define HSIO_S6G_ACJTAG_CFG 0x00ec
+#define HSIO_S6G_GP_CFG 0x00f0
+#define HSIO_S6G_IB_STATUS0 0x00f4
+#define HSIO_S6G_IB_STATUS1 0x00f8
+#define HSIO_S6G_ACJTAG_STATUS 0x00fc
+#define HSIO_S6G_PLL_STATUS 0x0100
+#define HSIO_S6G_REVID 0x0104
+#define HSIO_MCB_S6G_ADDR_CFG 0x0108
+#define HSIO_HW_CFG 0x010c
+#define HSIO_HW_QSGMII_CFG 0x0110
+#define HSIO_HW_QSGMII_STAT 0x0114
+#define HSIO_CLK_CFG 0x0118
+#define HSIO_TEMP_SENSOR_CTRL 0x011c
+#define HSIO_TEMP_SENSOR_CFG 0x0120
+#define HSIO_TEMP_SENSOR_STAT 0x0124
+
#define HSIO_PLL5G_CFG0_ENA_ROT BIT(31)
#define HSIO_PLL5G_CFG0_ENA_LANE BIT(30)
#define HSIO_PLL5G_CFG0_ENA_CLKTREE BIT(29)
diff --git a/sound/pci/hda/hda_codec.h b/include/sound/hda_codec.h
index 0d98bb9068b1..0d98bb9068b1 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/include/sound/hda_codec.h
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 6f1e1f3b3063..cd1773d0e08f 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -412,6 +412,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus);
void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
+int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset);
void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 67561b997915..af3fa577fa06 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -47,10 +47,13 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
+#define SNDRV_DMA_TYPE_DEV_UC 5 /* continuous non-cahced */
#ifdef CONFIG_SND_DMA_SGBUF
#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
+#define SNDRV_DMA_TYPE_DEV_UC_SG 6 /* SG non-cached */
#else
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
+#define SNDRV_DMA_TYPE_DEV_UC_SG SNDRV_DMA_TYPE_DEV_UC
#endif
#ifdef CONFIG_GENERIC_ALLOCATOR
#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 6665cb29e1a2..3b5a061132b6 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -171,6 +171,7 @@ int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count);
int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
int count);
+int snd_rawmidi_proceed(struct snd_rawmidi_substream *substream);
/* main midi functions */
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 8bc5e2d8b13c..fb0318f9b10f 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -51,29 +51,35 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
#define asoc_simple_card_parse_clk_cpu(dev, node, dai_link, simple_dai) \
asoc_simple_card_parse_clk(dev, node, dai_link->cpu_of_node, simple_dai, \
- dai_link->cpu_dai_name)
+ dai_link->cpu_dai_name, NULL)
#define asoc_simple_card_parse_clk_codec(dev, node, dai_link, simple_dai) \
asoc_simple_card_parse_clk(dev, node, dai_link->codec_of_node, simple_dai,\
- dai_link->codec_dai_name)
+ dai_link->codec_dai_name, dai_link->codecs)
int asoc_simple_card_parse_clk(struct device *dev,
struct device_node *node,
struct device_node *dai_of_node,
struct asoc_simple_dai *simple_dai,
- const char *name);
+ const char *dai_name,
+ struct snd_soc_dai_link_component *dlc);
int asoc_simple_card_clk_enable(struct asoc_simple_dai *dai);
void asoc_simple_card_clk_disable(struct asoc_simple_dai *dai);
#define asoc_simple_card_parse_cpu(node, dai_link, \
list_name, cells_name, is_single_link) \
- asoc_simple_card_parse_dai(node, &dai_link->cpu_of_node, \
+ asoc_simple_card_parse_dai(node, NULL, \
+ &dai_link->cpu_of_node, \
&dai_link->cpu_dai_name, list_name, cells_name, is_single_link)
#define asoc_simple_card_parse_codec(node, dai_link, list_name, cells_name) \
- asoc_simple_card_parse_dai(node, &dai_link->codec_of_node, \
- &dai_link->codec_dai_name, list_name, cells_name, NULL)
+ asoc_simple_card_parse_dai(node, dai_link->codecs, \
+ &dai_link->codec_of_node, \
+ &dai_link->codec_dai_name, \
+ list_name, cells_name, NULL)
#define asoc_simple_card_parse_platform(node, dai_link, list_name, cells_name) \
- asoc_simple_card_parse_dai(node, &dai_link->platform_of_node, \
+ asoc_simple_card_parse_dai(node, dai_link->platform, \
+ &dai_link->platform_of_node, \
NULL, list_name, cells_name, NULL)
int asoc_simple_card_parse_dai(struct device_node *node,
+ struct snd_soc_dai_link_component *dlc,
struct device_node **endpoint_np,
const char **dai_name,
const char *list_name,
@@ -81,12 +87,15 @@ int asoc_simple_card_parse_dai(struct device_node *node,
int *is_single_links);
#define asoc_simple_card_parse_graph_cpu(ep, dai_link) \
- asoc_simple_card_parse_graph_dai(ep, &dai_link->cpu_of_node, \
+ asoc_simple_card_parse_graph_dai(ep, NULL, \
+ &dai_link->cpu_of_node, \
&dai_link->cpu_dai_name)
#define asoc_simple_card_parse_graph_codec(ep, dai_link) \
- asoc_simple_card_parse_graph_dai(ep, &dai_link->codec_of_node, \
+ asoc_simple_card_parse_graph_dai(ep, dai_link->codecs, \
+ &dai_link->codec_of_node, \
&dai_link->codec_dai_name)
int asoc_simple_card_parse_graph_dai(struct device_node *ep,
+ struct snd_soc_dai_link_component *dlc,
struct device_node **endpoint_np,
const char **dai_name);
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index bb1d24b703fb..f48f59e5b7b0 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -25,4 +25,10 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[];
+/*
+ * generic table used for HDA codec-based platforms, possibly with
+ * additional ACPI-enumerated codecs
+ */
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_hda_machines[];
+
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index af9ef16cc34d..bd8163f151cb 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -406,11 +406,6 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
struct snd_soc_dai *dai);
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
-int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
- const struct snd_soc_pcm_stream *params,
- unsigned int num_params,
- struct snd_soc_dapm_widget *source,
- struct snd_soc_dapm_widget *sink);
/* dapm path setup */
int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
@@ -589,9 +584,6 @@ struct snd_soc_dapm_widget {
void *priv; /* widget specific data */
struct regulator *regulator; /* attached regulator */
struct pinctrl *pinctrl; /* attached pinctrl */
- const struct snd_soc_pcm_stream *params; /* params for dai links */
- unsigned int num_params; /* number of params for dai links */
- unsigned int params_select; /* currently selected param for dai link */
/* dapm control */
int reg; /* negative reg = no direct dapm */
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 9bb92f187af8..4be3a2b7c106 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -103,6 +103,16 @@ struct snd_soc_dpcm_runtime {
int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
};
+#define for_each_dpcm_fe(be, stream, dpcm) \
+ list_for_each_entry(dpcm, &(be)->dpcm[stream].fe_clients, list_fe)
+
+#define for_each_dpcm_be(fe, stream, dpcm) \
+ list_for_each_entry(dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_be_safe(fe, stream, dpcm, _dpcm) \
+ list_for_each_entry_safe(dpcm, _dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_be_rollback(fe, stream, dpcm) \
+ list_for_each_entry_continue_reverse(dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+
/* can this BE stop and free */
int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be, int stream);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 41cec42fb456..f1dab1f4b194 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -372,6 +372,11 @@
#define SND_SOC_COMP_ORDER_LATE 1
#define SND_SOC_COMP_ORDER_LAST 2
+#define for_each_comp_order(order) \
+ for (order = SND_SOC_COMP_ORDER_FIRST; \
+ order <= SND_SOC_COMP_ORDER_LAST; \
+ order++)
+
/*
* Bias levels
*
@@ -859,6 +864,11 @@ struct snd_soc_component {
#endif
};
+#define for_each_component_dais(component, dai)\
+ list_for_each_entry(dai, &(component)->dai_list, list)
+#define for_each_component_dais_safe(component, dai, _dai)\
+ list_for_each_entry_safe(dai, _dai, &(component)->dai_list, list)
+
struct snd_soc_rtdcom_list {
struct snd_soc_component *component;
struct list_head list; /* rtd::component_list */
@@ -915,6 +925,8 @@ struct snd_soc_dai_link {
*/
const char *platform_name;
struct device_node *platform_of_node;
+ struct snd_soc_dai_link_component *platform;
+
int id; /* optional ID for machine driver link identification */
const struct snd_soc_pcm_stream *params;
@@ -976,6 +988,10 @@ struct snd_soc_dai_link {
struct list_head list; /* DAI link list of the soc card */
struct snd_soc_dobj dobj; /* For topology */
};
+#define for_each_link_codecs(link, i, codec) \
+ for ((i) = 0; \
+ ((i) < link->num_codecs) && ((codec) = &link->codecs[i]); \
+ (i)++)
struct snd_soc_codec_conf {
/*
@@ -1054,7 +1070,6 @@ struct snd_soc_card {
struct snd_soc_dai_link *dai_link; /* predefined links only */
int num_links; /* predefined links only */
struct list_head dai_link_list; /* all links */
- int num_dai_links;
struct list_head rtd_list;
int num_rtd;
@@ -1092,6 +1107,7 @@ struct snd_soc_card {
/* lists of probed devices belonging to this card */
struct list_head component_dev_list;
+ struct list_head list;
struct list_head widgets;
struct list_head paths;
@@ -1114,6 +1130,23 @@ struct snd_soc_card {
void *drvdata;
};
+#define for_each_card_prelinks(card, i, link) \
+ for ((i) = 0; \
+ ((i) < (card)->num_links) && ((link) = &(card)->dai_link[i]); \
+ (i)++)
+
+#define for_each_card_links(card, link) \
+ list_for_each_entry(dai_link, &(card)->dai_link_list, list)
+#define for_each_card_links_safe(card, link, _link) \
+ list_for_each_entry_safe(link, _link, &(card)->dai_link_list, list)
+
+#define for_each_card_rtds(card, rtd) \
+ list_for_each_entry(rtd, &(card)->rtd_list, list)
+#define for_each_card_rtds_safe(card, rtd, _rtd) \
+ list_for_each_entry_safe(rtd, _rtd, &(card)->rtd_list, list)
+
+#define for_each_card_components(card, component) \
+ list_for_each_entry(component, &(card)->component_dev_list, card_list)
/* SoC machine DAI configuration, glues a codec and cpu DAI together */
struct snd_soc_pcm_runtime {
@@ -1124,6 +1157,8 @@ struct snd_soc_pcm_runtime {
enum snd_soc_pcm_subclass pcm_subclass;
struct snd_pcm_ops ops;
+ unsigned int params_select; /* currently selected param for dai link */
+
/* Dynamic PCM BE runtime data */
struct snd_soc_dpcm_runtime dpcm[2];
int fe_compr;
@@ -1152,6 +1187,13 @@ struct snd_soc_pcm_runtime {
unsigned int dev_registered:1;
unsigned int pop_wait:1;
};
+#define for_each_rtd_codec_dai(rtd, i, dai)\
+ for ((i) = 0; \
+ ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
+ (i)++)
+#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
+ for (; ((i--) >= 0) && ((dai) = rtd->codec_dais[i]);)
+
/* mixer control */
struct soc_mixer_control {
@@ -1359,6 +1401,7 @@ static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
INIT_LIST_HEAD(&card->dapm_list);
INIT_LIST_HEAD(&card->aux_comp_list);
INIT_LIST_HEAD(&card->component_dev_list);
+ INIT_LIST_HEAD(&card->list);
}
static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc)
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index f2e6abea8490..24c398f4a68f 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -25,6 +25,7 @@ struct sock;
#define ISCSIT_TCP_BACKLOG 256
#define ISCSI_RX_THREAD_NAME "iscsi_trx"
#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
+#define ISCSI_IQN_LEN 224
/* struct iscsi_node_attrib sanity values */
#define NA_DATAOUT_TIMEOUT 3
@@ -270,9 +271,9 @@ struct iscsi_conn_ops {
};
struct iscsi_sess_ops {
- char InitiatorName[224];
+ char InitiatorName[ISCSI_IQN_LEN];
char InitiatorAlias[256];
- char TargetName[224];
+ char TargetName[ISCSI_IQN_LEN];
char TargetAlias[256];
char TargetAddress[256];
u16 TargetPortalGroupTag; /* [0..65535] */
@@ -855,7 +856,6 @@ struct iscsi_wwn_stat_grps {
};
struct iscsi_tiqn {
-#define ISCSI_IQN_LEN 224
unsigned char tiqn[ISCSI_IQN_LEN];
enum tiqn_state_table tiqn_state;
int tiqn_access_count;
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
index 4d75a2c426ca..ff6a47209313 100644
--- a/include/target/iscsi/iscsi_target_stat.h
+++ b/include/target/iscsi/iscsi_target_stat.h
@@ -33,7 +33,7 @@ struct iscsi_sess_err_stats {
u32 cxn_timeout_errors;
u32 pdu_format_errors;
u32 last_sess_failure_type;
- char last_sess_fail_rem_name[224];
+ char last_sess_fail_rem_name[ISCSI_IQN_LEN];
} ____cacheline_aligned;
/* iSCSI login failure types (sub oids) */
@@ -56,7 +56,7 @@ struct iscsi_login_stats {
u32 last_fail_type;
int last_intr_fail_ip_family;
struct sockaddr_storage last_intr_fail_sockaddr;
- char last_intr_fail_name[224];
+ char last_intr_fail_name[ISCSI_IQN_LEN];
} ____cacheline_aligned;
/* iSCSI logout stats */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7a4ee7852ca4..e3bdb0550a59 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -138,7 +138,6 @@ enum se_cmd_flags_table {
SCF_ALUA_NON_OPTIMIZED = 0x00008000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
SCF_COMPARE_AND_WRITE = 0x00080000,
- SCF_COMPARE_AND_WRITE_POST = 0x00100000,
SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
SCF_ACK_KREF = 0x00400000,
SCF_USE_CPUID = 0x00800000,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index b401c4e36394..8568946f491d 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -316,7 +316,7 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
),
TP_fast_assign_btrfs(bi->root->fs_info,
- __entry->root_obj = bi->root->objectid;
+ __entry->root_obj = bi->root->root_key.objectid;
__entry->ino = btrfs_ino(bi);
__entry->isize = bi->vfs_inode.i_size;
__entry->disk_isize = bi->disk_i_size;
@@ -367,7 +367,7 @@ DECLARE_EVENT_CLASS(
TP_fast_assign_btrfs(
bi->root->fs_info,
- __entry->root_obj = bi->root->objectid;
+ __entry->root_obj = bi->root->root_key.objectid;
__entry->ino = btrfs_ino(bi);
__entry->isize = bi->vfs_inode.i_size;
__entry->disk_isize = bi->disk_i_size;
@@ -1477,7 +1477,8 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
),
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
- __entry->rootid = BTRFS_I(inode)->root->objectid;
+ __entry->rootid =
+ BTRFS_I(inode)->root->root_key.objectid;
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->start = start;
__entry->len = len;
@@ -1575,6 +1576,27 @@ DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
TP_ARGS(fs_info, rec)
);
+TRACE_EVENT(qgroup_num_dirty_extents,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 transid,
+ u64 num_dirty_extents),
+
+ TP_ARGS(fs_info, transid, num_dirty_extents),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, transid )
+ __field( u64, num_dirty_extents )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->transid = transid;
+ __entry->num_dirty_extents = num_dirty_extents;
+ ),
+
+ TP_printk_btrfs("transid=%llu num_dirty_extents=%llu",
+ __entry->transid, __entry->num_dirty_extents)
+);
+
TRACE_EVENT(btrfs_qgroup_account_extent,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 transid, u64 bytenr,
@@ -1675,7 +1697,7 @@ TRACE_EVENT(qgroup_meta_reserve,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->objectid;
+ __entry->refroot = root->root_key.objectid;
__entry->diff = diff;
),
@@ -1697,7 +1719,7 @@ TRACE_EVENT(qgroup_meta_convert,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->objectid;
+ __entry->refroot = root->root_key.objectid;
__entry->diff = diff;
),
@@ -1721,7 +1743,7 @@ TRACE_EVENT(qgroup_meta_free_all_pertrans,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->objectid;
+ __entry->refroot = root->root_key.objectid;
spin_lock(&root->qgroup_meta_rsv_lock);
__entry->diff = -(s64)root->qgroup_meta_rsv_pertrans;
spin_unlock(&root->qgroup_meta_rsv_lock);
@@ -1802,7 +1824,7 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->objectid;
+ __entry->root_objectid = root->root_key.objectid;
__entry->ino = ino;
__entry->mod = mod;
),
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 0e31eb136c57..698e0d8a5ca4 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -17,6 +17,7 @@ struct mpage_da_data;
struct ext4_map_blocks;
struct extent_status;
struct ext4_fsmap;
+struct partial_cluster;
#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
@@ -2035,21 +2036,23 @@ TRACE_EVENT(ext4_ext_show_extent,
);
TRACE_EVENT(ext4_remove_blocks,
- TP_PROTO(struct inode *inode, struct ext4_extent *ex,
- ext4_lblk_t from, ext4_fsblk_t to,
- long long partial_cluster),
+ TP_PROTO(struct inode *inode, struct ext4_extent *ex,
+ ext4_lblk_t from, ext4_fsblk_t to,
+ struct partial_cluster *pc),
- TP_ARGS(inode, ex, from, to, partial_cluster),
+ TP_ARGS(inode, ex, from, to, pc),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( ext4_lblk_t, from )
__field( ext4_lblk_t, to )
- __field( long long, partial )
__field( ext4_fsblk_t, ee_pblk )
__field( ext4_lblk_t, ee_lblk )
__field( unsigned short, ee_len )
+ __field( ext4_fsblk_t, pc_pclu )
+ __field( ext4_lblk_t, pc_lblk )
+ __field( int, pc_state)
),
TP_fast_assign(
@@ -2057,14 +2060,16 @@ TRACE_EVENT(ext4_remove_blocks,
__entry->ino = inode->i_ino;
__entry->from = from;
__entry->to = to;
- __entry->partial = partial_cluster;
__entry->ee_pblk = ext4_ext_pblock(ex);
__entry->ee_lblk = le32_to_cpu(ex->ee_block);
__entry->ee_len = ext4_ext_get_actual_len(ex);
+ __entry->pc_pclu = pc->pclu;
+ __entry->pc_lblk = pc->lblk;
+ __entry->pc_state = pc->state;
),
TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
- "from %u to %u partial_cluster %lld",
+ "from %u to %u partial [pclu %lld lblk %u state %d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->ee_lblk,
@@ -2072,45 +2077,53 @@ TRACE_EVENT(ext4_remove_blocks,
(unsigned short) __entry->ee_len,
(unsigned) __entry->from,
(unsigned) __entry->to,
- (long long) __entry->partial)
+ (long long) __entry->pc_pclu,
+ (unsigned int) __entry->pc_lblk,
+ (int) __entry->pc_state)
);
TRACE_EVENT(ext4_ext_rm_leaf,
TP_PROTO(struct inode *inode, ext4_lblk_t start,
struct ext4_extent *ex,
- long long partial_cluster),
+ struct partial_cluster *pc),
- TP_ARGS(inode, start, ex, partial_cluster),
+ TP_ARGS(inode, start, ex, pc),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( long long, partial )
__field( ext4_lblk_t, start )
__field( ext4_lblk_t, ee_lblk )
__field( ext4_fsblk_t, ee_pblk )
__field( short, ee_len )
+ __field( ext4_fsblk_t, pc_pclu )
+ __field( ext4_lblk_t, pc_lblk )
+ __field( int, pc_state)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->partial = partial_cluster;
__entry->start = start;
__entry->ee_lblk = le32_to_cpu(ex->ee_block);
__entry->ee_pblk = ext4_ext_pblock(ex);
__entry->ee_len = ext4_ext_get_actual_len(ex);
+ __entry->pc_pclu = pc->pclu;
+ __entry->pc_lblk = pc->lblk;
+ __entry->pc_state = pc->state;
),
TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
- "partial_cluster %lld",
+ "partial [pclu %lld lblk %u state %d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->start,
(unsigned) __entry->ee_lblk,
(unsigned long long) __entry->ee_pblk,
(unsigned short) __entry->ee_len,
- (long long) __entry->partial)
+ (long long) __entry->pc_pclu,
+ (unsigned int) __entry->pc_lblk,
+ (int) __entry->pc_state)
);
TRACE_EVENT(ext4_ext_rm_idx,
@@ -2168,9 +2181,9 @@ TRACE_EVENT(ext4_ext_remove_space,
TRACE_EVENT(ext4_ext_remove_space_done,
TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end,
- int depth, long long partial, __le16 eh_entries),
+ int depth, struct partial_cluster *pc, __le16 eh_entries),
- TP_ARGS(inode, start, end, depth, partial, eh_entries),
+ TP_ARGS(inode, start, end, depth, pc, eh_entries),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -2178,7 +2191,9 @@ TRACE_EVENT(ext4_ext_remove_space_done,
__field( ext4_lblk_t, start )
__field( ext4_lblk_t, end )
__field( int, depth )
- __field( long long, partial )
+ __field( ext4_fsblk_t, pc_pclu )
+ __field( ext4_lblk_t, pc_lblk )
+ __field( int, pc_state )
__field( unsigned short, eh_entries )
),
@@ -2188,18 +2203,23 @@ TRACE_EVENT(ext4_ext_remove_space_done,
__entry->start = start;
__entry->end = end;
__entry->depth = depth;
- __entry->partial = partial;
+ __entry->pc_pclu = pc->pclu;
+ __entry->pc_lblk = pc->lblk;
+ __entry->pc_state = pc->state;
__entry->eh_entries = le16_to_cpu(eh_entries);
),
- TP_printk("dev %d,%d ino %lu since %u end %u depth %d partial %lld "
+ TP_printk("dev %d,%d ino %lu since %u end %u depth %d "
+ "partial [pclu %lld lblk %u state %d] "
"remaining_entries %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->start,
(unsigned) __entry->end,
__entry->depth,
- (long long) __entry->partial,
+ (long long) __entry->pc_pclu,
+ (unsigned int) __entry->pc_lblk,
+ (int) __entry->pc_state,
(unsigned short) __entry->eh_entries)
);
@@ -2270,7 +2290,7 @@ TRACE_EVENT(ext4_es_remove_extent,
__entry->lblk, __entry->len)
);
-TRACE_EVENT(ext4_es_find_delayed_extent_range_enter,
+TRACE_EVENT(ext4_es_find_extent_range_enter,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
TP_ARGS(inode, lblk),
@@ -2292,7 +2312,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_enter,
(unsigned long) __entry->ino, __entry->lblk)
);
-TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
+TRACE_EVENT(ext4_es_find_extent_range_exit,
TP_PROTO(struct inode *inode, struct extent_status *es),
TP_ARGS(inode, es),
@@ -2512,6 +2532,41 @@ TRACE_EVENT(ext4_es_shrink,
__entry->scan_time, __entry->nr_skipped, __entry->retried)
);
+TRACE_EVENT(ext4_es_insert_delayed_block,
+ TP_PROTO(struct inode *inode, struct extent_status *es,
+ bool allocated),
+
+ TP_ARGS(inode, es, allocated),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( ext4_lblk_t, lblk )
+ __field( ext4_lblk_t, len )
+ __field( ext4_fsblk_t, pblk )
+ __field( char, status )
+ __field( bool, allocated )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->lblk = es->es_lblk;
+ __entry->len = es->es_len;
+ __entry->pblk = ext4_es_pblock(es);
+ __entry->status = ext4_es_status(es);
+ __entry->allocated = allocated;
+ ),
+
+ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s "
+ "allocated %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->lblk, __entry->len,
+ __entry->pblk, show_extent_status(__entry->status),
+ __entry->allocated)
+);
+
/* fsmap traces */
DECLARE_EVENT_CLASS(ext4_fsmap_class,
TP_PROTO(struct super_block *sb, u32 keydev, u32 agno, u64 bno, u64 len,
diff --git a/include/trace/events/hwmon.h b/include/trace/events/hwmon.h
new file mode 100644
index 000000000000..d7a1d0ffb679
--- /dev/null
+++ b/include/trace/events/hwmon.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hwmon
+
+#if !defined(_TRACE_HWMON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HWMON_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(hwmon_attr_class,
+
+ TP_PROTO(int index, const char *attr_name, long val),
+
+ TP_ARGS(index, attr_name, val),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __string(attr_name, attr_name)
+ __field(long, val)
+ ),
+
+ TP_fast_assign(
+ __entry->index = index;
+ __assign_str(attr_name, attr_name);
+ __entry->val = val;
+ ),
+
+ TP_printk("index=%d, attr_name=%s, val=%ld",
+ __entry->index, __get_str(attr_name), __entry->val)
+);
+
+DEFINE_EVENT(hwmon_attr_class, hwmon_attr_show,
+
+ TP_PROTO(int index, const char *attr_name, long val),
+
+ TP_ARGS(index, attr_name, val)
+);
+
+DEFINE_EVENT(hwmon_attr_class, hwmon_attr_store,
+
+ TP_PROTO(int index, const char *attr_name, long val),
+
+ TP_ARGS(index, attr_name, val)
+);
+
+TRACE_EVENT(hwmon_attr_show_string,
+
+ TP_PROTO(int index, const char *attr_name, const char *s),
+
+ TP_ARGS(index, attr_name, s),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __string(attr_name, attr_name)
+ __string(label, s)
+ ),
+
+ TP_fast_assign(
+ __entry->index = index;
+ __assign_str(attr_name, attr_name);
+ __assign_str(label, s);
+ ),
+
+ TP_printk("index=%d, attr_name=%s, val=%s",
+ __entry->index, __get_str(attr_name), __get_str(label))
+);
+
+#endif /* _TRACE_HWMON_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
new file mode 100644
index 000000000000..a9834c37ac40
--- /dev/null
+++ b/include/trace/events/kyber.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kyber
+
+#if !defined(_TRACE_KYBER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KYBER_H
+
+#include <linux/blkdev.h>
+#include <linux/tracepoint.h>
+
+#define DOMAIN_LEN 16
+#define LATENCY_TYPE_LEN 8
+
+TRACE_EVENT(kyber_latency,
+
+ TP_PROTO(struct request_queue *q, const char *domain, const char *type,
+ unsigned int percentile, unsigned int numerator,
+ unsigned int denominator, unsigned int samples),
+
+ TP_ARGS(q, domain, type, percentile, numerator, denominator, samples),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __array( char, domain, DOMAIN_LEN )
+ __array( char, type, LATENCY_TYPE_LEN )
+ __field( u8, percentile )
+ __field( u8, numerator )
+ __field( u8, denominator )
+ __field( unsigned int, samples )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
+ strlcpy(__entry->domain, domain, DOMAIN_LEN);
+ strlcpy(__entry->type, type, DOMAIN_LEN);
+ __entry->percentile = percentile;
+ __entry->numerator = numerator;
+ __entry->denominator = denominator;
+ __entry->samples = samples;
+ ),
+
+ TP_printk("%d,%d %s %s p%u %u/%u samples=%u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->domain,
+ __entry->type, __entry->percentile, __entry->numerator,
+ __entry->denominator, __entry->samples)
+);
+
+TRACE_EVENT(kyber_adjust,
+
+ TP_PROTO(struct request_queue *q, const char *domain,
+ unsigned int depth),
+
+ TP_ARGS(q, domain, depth),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __array( char, domain, DOMAIN_LEN )
+ __field( unsigned int, depth )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
+ strlcpy(__entry->domain, domain, DOMAIN_LEN);
+ __entry->depth = depth;
+ ),
+
+ TP_printk("%d,%d %s %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->domain,
+ __entry->depth)
+);
+
+TRACE_EVENT(kyber_throttled,
+
+ TP_PROTO(struct request_queue *q, const char *domain),
+
+ TP_ARGS(q, domain),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __array( char, domain, DOMAIN_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
+ strlcpy(__entry->domain, domain, DOMAIN_LEN);
+ ),
+
+ TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->domain)
+);
+
+#define _TRACE_KYBER_H
+#endif /* _TRACE_KYBER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 711372845945..705b33d1e395 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -70,33 +70,6 @@ TRACE_EVENT(mm_migrate_pages,
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
-
-TRACE_EVENT(mm_numa_migrate_ratelimit,
-
- TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages),
-
- TP_ARGS(p, dst_nid, nr_pages),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN)
- __field( pid_t, pid)
- __field( int, dst_nid)
- __field( unsigned long, nr_pages)
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->dst_nid = dst_nid;
- __entry->nr_pages = nr_pages;
- ),
-
- TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu",
- __entry->comm,
- __entry->pid,
- __entry->dst_nid,
- __entry->nr_pages)
-);
#endif /* _TRACE_MIGRATE_H */
/* This part must be outside protection */
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index a8d07feff6a0..f0c4d10e614b 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -393,9 +393,8 @@ TRACE_EVENT(rcu_quiescent_state_report,
* Tracepoint for quiescent states detected by force_quiescent_state().
* These trace events include the type of RCU, the grace-period number
* that was blocked by the CPU, the CPU itself, and the type of quiescent
- * state, which can be "dti" for dyntick-idle mode, "kick" when kicking
- * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the
- * CPU got a quiescent state via its rcu_qs_ctr.
+ * state, which can be "dti" for dyntick-idle mode or "kick" when kicking
+ * a CPU that has been in dyntick-idle mode for too long.
*/
TRACE_EVENT(rcu_fqs,
@@ -705,20 +704,20 @@ TRACE_EVENT(rcu_torture_read,
);
/*
- * Tracepoint for _rcu_barrier() execution. The string "s" describes
- * the _rcu_barrier phase:
- * "Begin": _rcu_barrier() started.
- * "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
- * "Inc1": _rcu_barrier() piggyback check counter incremented.
- * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
- * "OnlineNoCB": _rcu_barrier() found online no-CBs CPU.
- * "OnlineQ": _rcu_barrier() found online CPU with callbacks.
- * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks.
+ * Tracepoint for rcu_barrier() execution. The string "s" describes
+ * the rcu_barrier phase:
+ * "Begin": rcu_barrier() started.
+ * "EarlyExit": rcu_barrier() piggybacked, thus early exit.
+ * "Inc1": rcu_barrier() piggyback check counter incremented.
+ * "OfflineNoCB": rcu_barrier() found callback on never-online CPU
+ * "OnlineNoCB": rcu_barrier() found online no-CBs CPU.
+ * "OnlineQ": rcu_barrier() found online CPU with callbacks.
+ * "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
* "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
* "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "LastCB": An rcu_barrier_callback() invoked the last callback.
- * "Inc2": _rcu_barrier() piggyback check counter incremented.
+ * "Inc2": rcu_barrier() piggyback check counter incremented.
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
* is the count of remaining callbacks, and "done" is the piggybacking count.
*/
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 196587b8f204..573d5b901fb1 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -56,7 +56,6 @@ enum rxrpc_peer_trace {
rxrpc_peer_new,
rxrpc_peer_processing,
rxrpc_peer_put,
- rxrpc_peer_queued_error,
};
enum rxrpc_conn_trace {
@@ -257,8 +256,7 @@ enum rxrpc_tx_point {
EM(rxrpc_peer_got, "GOT") \
EM(rxrpc_peer_new, "NEW") \
EM(rxrpc_peer_processing, "PRO") \
- EM(rxrpc_peer_put, "PUT") \
- E_(rxrpc_peer_queued_error, "QER")
+ E_(rxrpc_peer_put, "PUT")
#define rxrpc_conn_traces \
EM(rxrpc_conn_got, "GOT") \
@@ -933,6 +931,7 @@ TRACE_EVENT(rxrpc_tx_packet,
TP_fast_assign(
__entry->call = call_id;
memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr));
+ __entry->where = where;
),
TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 0be866c91f62..f07b270d4fc4 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -159,9 +159,14 @@ TRACE_EVENT(sched_switch,
(__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
__print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
- { 0x01, "S" }, { 0x02, "D" }, { 0x04, "T" },
- { 0x08, "t" }, { 0x10, "X" }, { 0x20, "Z" },
- { 0x40, "P" }, { 0x80, "I" }) :
+ { TASK_INTERRUPTIBLE, "S" },
+ { TASK_UNINTERRUPTIBLE, "D" },
+ { __TASK_STOPPED, "T" },
+ { __TASK_TRACED, "t" },
+ { EXIT_DEAD, "X" },
+ { EXIT_ZOMBIE, "Z" },
+ { TASK_PARKED, "P" },
+ { TASK_DEAD, "I" }) :
"R",
__entry->prev_state & TASK_REPORT_MAX ? "+" : "",
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index 86582923d51c..1db7e4b07c01 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -11,8 +11,7 @@
#define TP_STORE_SIGINFO(__entry, info) \
do { \
- if (info == SEND_SIG_NOINFO || \
- info == SEND_SIG_FORCED) { \
+ if (info == SEND_SIG_NOINFO) { \
__entry->errno = 0; \
__entry->code = SI_USER; \
} else if (info == SEND_SIG_PRIV) { \
@@ -50,7 +49,7 @@ enum {
*/
TRACE_EVENT(signal_generate,
- TP_PROTO(int sig, struct siginfo *info, struct task_struct *task,
+ TP_PROTO(int sig, struct kernel_siginfo *info, struct task_struct *task,
int group, int result),
TP_ARGS(sig, info, task, group, result),
@@ -96,7 +95,7 @@ TRACE_EVENT(signal_generate,
*/
TRACE_EVENT(signal_deliver,
- TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka),
+ TP_PROTO(int sig, struct kernel_siginfo *info, struct k_sigaction *ka),
TP_ARGS(sig, info, ka),
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index ac55b328d61b..2bc9960a31aa 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -56,6 +56,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
TP_STRUCT__entry(
__field(const void *, skbaddr)
__field(const void *, skaddr)
+ __field(int, state)
__field(__u16, sport)
__field(__u16, dport)
__array(__u8, saddr, 4)
@@ -70,6 +71,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
__entry->skbaddr = skb;
__entry->skaddr = sk;
+ __entry->state = sk->sk_state;
__entry->sport = ntohs(inet->inet_sport);
__entry->dport = ntohs(inet->inet_dport);
@@ -84,9 +86,10 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
+ TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s\n",
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
- __entry->saddr_v6, __entry->daddr_v6)
+ __entry->saddr_v6, __entry->daddr_v6,
+ show_tcp_state_name(__entry->state))
);
DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb,
diff --git a/include/uapi/asm-generic/hugetlb_encode.h b/include/uapi/asm-generic/hugetlb_encode.h
index e4732d3c2998..b0f8e87235bd 100644
--- a/include/uapi/asm-generic/hugetlb_encode.h
+++ b/include/uapi/asm-generic/hugetlb_encode.h
@@ -26,7 +26,9 @@
#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB (25 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB (29 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index 80e2a7227205..cb3d6c267181 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -10,18 +10,7 @@ typedef union sigval {
void __user *sival_ptr;
} sigval_t;
-/*
- * This is the size (including padding) of the part of the
- * struct siginfo that is before the union.
- */
-#ifndef __ARCH_SI_PREAMBLE_SIZE
-#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
-#endif
-
#define SI_MAX_SIZE 128
-#ifndef SI_PAD_SIZE
-#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
-#endif
/*
* The default "si_band" type is "long", as specified by POSIX.
@@ -40,96 +29,108 @@ typedef union sigval {
#define __ARCH_SI_ATTRIBUTES
#endif
-typedef struct siginfo {
- int si_signo;
-#ifndef __ARCH_HAS_SWAPPED_SIGINFO
- int si_errno;
- int si_code;
-#else
- int si_code;
- int si_errno;
-#endif
-
- union {
- int _pad[SI_PAD_SIZE];
-
- /* kill() */
- struct {
- __kernel_pid_t _pid; /* sender's pid */
- __kernel_uid32_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- __kernel_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- __kernel_pid_t _pid; /* sender's pid */
- __kernel_uid32_t _uid; /* sender's uid */
- sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- __kernel_pid_t _pid; /* which child */
- __kernel_uid32_t _uid; /* sender's uid */
- int _status; /* exit code */
- __ARCH_SI_CLOCK_T _utime;
- __ARCH_SI_CLOCK_T _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
- struct {
- void __user *_addr; /* faulting insn/memory ref. */
+union __sifields {
+ /* kill() */
+ struct {
+ __kernel_pid_t _pid; /* sender's pid */
+ __kernel_uid32_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ __kernel_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ __kernel_pid_t _pid; /* sender's pid */
+ __kernel_uid32_t _uid; /* sender's uid */
+ sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ __kernel_pid_t _pid; /* which child */
+ __kernel_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ __ARCH_SI_CLOCK_T _utime;
+ __ARCH_SI_CLOCK_T _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
+ struct {
+ void __user *_addr; /* faulting insn/memory ref. */
#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
+ int _trapno; /* TRAP # which caused the signal */
#endif
#ifdef __ia64__
- int _imm; /* immediate value for "break" */
- unsigned int _flags; /* see ia64 si_flags */
- unsigned long _isr; /* isr */
+ int _imm; /* immediate value for "break" */
+ unsigned int _flags; /* see ia64 si_flags */
+ unsigned long _isr; /* isr */
#endif
#define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \
sizeof(short) : __alignof__(void *))
- union {
- /*
- * used when si_code=BUS_MCEERR_AR or
- * used when si_code=BUS_MCEERR_AO
- */
- short _addr_lsb; /* LSB of the reported address */
- /* used when si_code=SEGV_BNDERR */
- struct {
- char _dummy_bnd[__ADDR_BND_PKEY_PAD];
- void __user *_lower;
- void __user *_upper;
- } _addr_bnd;
- /* used when si_code=SEGV_PKUERR */
- struct {
- char _dummy_pkey[__ADDR_BND_PKEY_PAD];
- __u32 _pkey;
- } _addr_pkey;
- };
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
+ union {
+ /*
+ * used when si_code=BUS_MCEERR_AR or
+ * used when si_code=BUS_MCEERR_AO
+ */
+ short _addr_lsb; /* LSB of the reported address */
+ /* used when si_code=SEGV_BNDERR */
+ struct {
+ char _dummy_bnd[__ADDR_BND_PKEY_PAD];
+ void __user *_lower;
+ void __user *_upper;
+ } _addr_bnd;
+ /* used when si_code=SEGV_PKUERR */
+ struct {
+ char _dummy_pkey[__ADDR_BND_PKEY_PAD];
+ __u32 _pkey;
+ } _addr_pkey;
+ };
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+
+ /* SIGSYS */
+ struct {
+ void __user *_call_addr; /* calling user insn */
+ int _syscall; /* triggering system call number */
+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
+};
- /* SIGSYS */
- struct {
- void __user *_call_addr; /* calling user insn */
- int _syscall; /* triggering system call number */
- unsigned int _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
+#ifndef __ARCH_HAS_SWAPPED_SIGINFO
+#define __SIGINFO \
+struct { \
+ int si_signo; \
+ int si_errno; \
+ int si_code; \
+ union __sifields _sifields; \
+}
+#else
+#define __SIGINFO \
+struct { \
+ int si_signo; \
+ int si_code; \
+ int si_errno; \
+ union __sifields _sifields; \
+}
+#endif /* __ARCH_HAS_SWAPPED_SIGINFO */
+
+typedef struct siginfo {
+ union {
+ __SIGINFO;
+ int _si_pad[SI_MAX_SIZE/sizeof(int)];
+ };
} __ARCH_SI_ATTRIBUTES siginfo_t;
/*
@@ -286,6 +287,12 @@ typedef struct siginfo {
#define NSIGSYS 1
/*
+ * SIGEMT si_codes
+ */
+#define EMT_TAGOVF 1 /* tag overflow */
+#define NSIGEMT 1
+
+/*
* sigevent definitions
*
* It seems likely that SIGEV_THREAD will have to be handled from
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index df4bedb9b01c..538546edbfbd 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -242,10 +242,12 @@ __SYSCALL(__NR_tee, sys_tee)
/* fs/stat.c */
#define __NR_readlinkat 78
__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR3264_fstatat 79
__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
#define __NR3264_fstat 80
__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
+#endif
/* fs/sync.c */
#define __NR_sync 81
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 66917a4eba27..852dc17ab47a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -103,6 +103,7 @@ enum bpf_cmd {
BPF_BTF_LOAD,
BPF_BTF_GET_FD_BY_ID,
BPF_TASK_FD_QUERY,
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM,
};
enum bpf_map_type {
@@ -127,6 +128,9 @@ enum bpf_map_type {
BPF_MAP_TYPE_SOCKHASH,
BPF_MAP_TYPE_CGROUP_STORAGE,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+ BPF_MAP_TYPE_QUEUE,
+ BPF_MAP_TYPE_STACK,
};
enum bpf_prog_type {
@@ -152,6 +156,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LWT_SEG6LOCAL,
BPF_PROG_TYPE_LIRC_MODE2,
BPF_PROG_TYPE_SK_REUSEPORT,
+ BPF_PROG_TYPE_FLOW_DISSECTOR,
};
enum bpf_attach_type {
@@ -172,6 +177,7 @@ enum bpf_attach_type {
BPF_CGROUP_UDP4_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
BPF_LIRC_MODE2,
+ BPF_FLOW_DISSECTOR,
__MAX_BPF_ATTACH_TYPE
};
@@ -459,6 +465,28 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * Description
+ * Push an element *value* in *map*. *flags* is one of:
+ *
+ * **BPF_EXIST**
+ * If the queue/stack is full, the oldest element is removed to
+ * make room for this.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_map_pop_elem(struct bpf_map *map, void *value)
+ * Description
+ * Pop an element from *map*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_map_peek_elem(struct bpf_map *map, void *value)
+ * Description
+ * Get an element from *map* without removing it.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
* int bpf_probe_read(void *dst, u32 size, const void *src)
* Description
* For tracing programs, safely attempt to read *size* bytes from
@@ -1430,7 +1458,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_adjust_room(struct sk_buff *skb, u32 len_diff, u32 mode, u64 flags)
+ * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
* Description
* Grow or shrink the room for data in the packet associated to
* *skb* by *len_diff*, and according to the selected *mode*.
@@ -2141,6 +2169,94 @@ union bpf_attr {
* request in the skb.
* Return
* 0 on success, or a negative error in case of failure.
+ *
+ * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * Description
+ * Look for TCP socket matching *tuple*, optionally in a child
+ * network namespace *netns*. The return value must be checked,
+ * and if non-NULL, released via **bpf_sk_release**\ ().
+ *
+ * The *ctx* should point to the context of the program, such as
+ * the skb or socket (depending on the hook in use). This is used
+ * to determine the base network namespace for the lookup.
+ *
+ * *tuple_size* must be one of:
+ *
+ * **sizeof**\ (*tuple*\ **->ipv4**)
+ * Look for an IPv4 socket.
+ * **sizeof**\ (*tuple*\ **->ipv6**)
+ * Look for an IPv6 socket.
+ *
+ * If the *netns* is zero, then the socket lookup table in the
+ * netns associated with the *ctx* will be used. For the TC hooks,
+ * this in the netns of the device in the skb. For socket hooks,
+ * this in the netns of the socket. If *netns* is non-zero, then
+ * it specifies the ID of the netns relative to the netns
+ * associated with the *ctx*.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_NET** configuration option.
+ * Return
+ * Pointer to *struct bpf_sock*, or NULL in case of failure.
+ *
+ * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * Description
+ * Look for UDP socket matching *tuple*, optionally in a child
+ * network namespace *netns*. The return value must be checked,
+ * and if non-NULL, released via **bpf_sk_release**\ ().
+ *
+ * The *ctx* should point to the context of the program, such as
+ * the skb or socket (depending on the hook in use). This is used
+ * to determine the base network namespace for the lookup.
+ *
+ * *tuple_size* must be one of:
+ *
+ * **sizeof**\ (*tuple*\ **->ipv4**)
+ * Look for an IPv4 socket.
+ * **sizeof**\ (*tuple*\ **->ipv6**)
+ * Look for an IPv6 socket.
+ *
+ * If the *netns* is zero, then the socket lookup table in the
+ * netns associated with the *ctx* will be used. For the TC hooks,
+ * this in the netns of the device in the skb. For socket hooks,
+ * this in the netns of the socket. If *netns* is non-zero, then
+ * it specifies the ID of the netns relative to the netns
+ * associated with the *ctx*.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_NET** configuration option.
+ * Return
+ * Pointer to *struct bpf_sock*, or NULL in case of failure.
+ *
+ * int bpf_sk_release(struct bpf_sock *sk)
+ * Description
+ * Release the reference held by *sock*. *sock* must be a non-NULL
+ * pointer that was returned from bpf_sk_lookup_xxx\ ().
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
+ * Description
+ * For socket policies, insert *len* bytes into msg at offset
+ * *start*.
+ *
+ * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
+ * *msg* it may want to insert metadata or options into the msg.
+ * This can later be read and used by any of the lower layer BPF
+ * hooks.
+ *
+ * This helper may fail if under memory pressure (a malloc
+ * fails) in these cases BPF programs will get an appropriate
+ * error and BPF programs will need to handle them.
+ *
+ * Return
+ * 0 on success, or a negative error in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2226,7 +2342,14 @@ union bpf_attr {
FN(get_current_cgroup_id), \
FN(get_local_storage), \
FN(sk_select_reuseport), \
- FN(skb_ancestor_cgroup_id),
+ FN(skb_ancestor_cgroup_id), \
+ FN(sk_lookup_tcp), \
+ FN(sk_lookup_udp), \
+ FN(sk_release), \
+ FN(map_push_elem), \
+ FN(map_pop_elem), \
+ FN(map_peek_elem), \
+ FN(msg_push_data),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2333,6 +2456,7 @@ struct __sk_buff {
/* ... here. */
__u32 data_meta;
+ struct bpf_flow_keys *flow_keys;
};
struct bpf_tunnel_key {
@@ -2395,6 +2519,23 @@ struct bpf_sock {
*/
};
+struct bpf_sock_tuple {
+ union {
+ struct {
+ __be32 saddr;
+ __be32 daddr;
+ __be16 sport;
+ __be16 dport;
+ } ipv4;
+ struct {
+ __be32 saddr[4];
+ __be32 daddr[4];
+ __be16 sport;
+ __be16 dport;
+ } ipv6;
+ };
+};
+
#define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type.
@@ -2778,4 +2919,27 @@ enum bpf_task_fd_type {
BPF_FD_TYPE_URETPROBE, /* filename + offset */
};
+struct bpf_flow_keys {
+ __u16 nhoff;
+ __u16 thoff;
+ __u16 addr_proto; /* ETH_P_* of valid addrs */
+ __u8 is_frag;
+ __u8 is_first_frag;
+ __u8 is_encap;
+ __u8 ip_proto;
+ __be16 n_proto;
+ __be16 sport;
+ __be16 dport;
+ union {
+ struct {
+ __be32 ipv4_src;
+ __be32 ipv4_dst;
+ };
+ struct {
+ __u32 ipv6_src[4]; /* in6_addr; network order */
+ __u32 ipv6_dst[4]; /* in6_addr; network order */
+ };
+ };
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 19bf0ca6d635..6dafbc3e4414 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -29,6 +29,7 @@ enum {
CRYPTO_MSG_UPDATEALG,
CRYPTO_MSG_GETALG,
CRYPTO_MSG_DELRNG,
+ CRYPTO_MSG_GETSTAT,
__CRYPTO_MSG_MAX
};
#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
@@ -50,6 +51,16 @@ enum crypto_attr_type_t {
CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
+ CRYPTOCFGA_STAT_LARVAL, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_HASH, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_BLKCIPHER, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_AEAD, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_COMPRESS, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_RNG, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_CIPHER, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_AKCIPHER, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_KPP, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_ACOMP, /* struct crypto_stat */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -65,6 +76,47 @@ struct crypto_user_alg {
__u32 cru_flags;
};
+struct crypto_stat {
+ char type[CRYPTO_MAX_NAME];
+ union {
+ __u32 stat_encrypt_cnt;
+ __u32 stat_compress_cnt;
+ __u32 stat_generate_cnt;
+ __u32 stat_hash_cnt;
+ __u32 stat_setsecret_cnt;
+ };
+ union {
+ __u64 stat_encrypt_tlen;
+ __u64 stat_compress_tlen;
+ __u64 stat_generate_tlen;
+ __u64 stat_hash_tlen;
+ };
+ union {
+ __u32 stat_akcipher_err_cnt;
+ __u32 stat_cipher_err_cnt;
+ __u32 stat_compress_err_cnt;
+ __u32 stat_aead_err_cnt;
+ __u32 stat_hash_err_cnt;
+ __u32 stat_rng_err_cnt;
+ __u32 stat_kpp_err_cnt;
+ };
+ union {
+ __u32 stat_decrypt_cnt;
+ __u32 stat_decompress_cnt;
+ __u32 stat_seed_cnt;
+ __u32 stat_generate_public_key_cnt;
+ };
+ union {
+ __u64 stat_decrypt_tlen;
+ __u64 stat_decompress_tlen;
+ };
+ union {
+ __u32 stat_verify_cnt;
+ __u32 stat_compute_shared_secret_cnt;
+ };
+ __u32 stat_sign_cnt;
+};
+
struct crypto_report_larval {
char type[CRYPTO_MAX_NAME];
};
diff --git a/include/uapi/linux/dns_resolver.h b/include/uapi/linux/dns_resolver.h
new file mode 100644
index 000000000000..129745f9c794
--- /dev/null
+++ b/include/uapi/linux/dns_resolver.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/* DNS resolver interface definitions.
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_DNS_RESOLVER_H
+#define _UAPI_LINUX_DNS_RESOLVER_H
+
+#include <linux/types.h>
+
+/*
+ * Type of payload.
+ */
+enum dns_payload_content_type {
+ DNS_PAYLOAD_IS_SERVER_LIST = 0, /* List of servers, requested by srv=1 */
+};
+
+/*
+ * Type of address that might be found in an address record.
+ */
+enum dns_payload_address_type {
+ DNS_ADDRESS_IS_IPV4 = 0, /* 4-byte AF_INET address */
+ DNS_ADDRESS_IS_IPV6 = 1, /* 16-byte AF_INET6 address */
+};
+
+/*
+ * Type of protocol used to access a server.
+ */
+enum dns_payload_protocol_type {
+ DNS_SERVER_PROTOCOL_UNSPECIFIED = 0,
+ DNS_SERVER_PROTOCOL_UDP = 1, /* Use UDP to talk to the server */
+ DNS_SERVER_PROTOCOL_TCP = 2, /* Use TCP to talk to the server */
+};
+
+/*
+ * Source of record included in DNS resolver payload.
+ */
+enum dns_record_source {
+ DNS_RECORD_UNAVAILABLE = 0, /* No source available (empty record) */
+ DNS_RECORD_FROM_CONFIG = 1, /* From local configuration data */
+ DNS_RECORD_FROM_DNS_A = 2, /* From DNS A or AAAA record */
+ DNS_RECORD_FROM_DNS_AFSDB = 3, /* From DNS AFSDB record */
+ DNS_RECORD_FROM_DNS_SRV = 4, /* From DNS SRV record */
+ DNS_RECORD_FROM_NSS = 5, /* From NSS */
+ NR__dns_record_source
+};
+
+/*
+ * Status of record included in DNS resolver payload.
+ */
+enum dns_lookup_status {
+ DNS_LOOKUP_NOT_DONE = 0, /* No lookup has been made */
+ DNS_LOOKUP_GOOD = 1, /* Good records obtained */
+ DNS_LOOKUP_GOOD_WITH_BAD = 2, /* Good records, some decoding errors */
+ DNS_LOOKUP_BAD = 3, /* Couldn't decode results */
+ DNS_LOOKUP_GOT_NOT_FOUND = 4, /* Got a "Not Found" result */
+ DNS_LOOKUP_GOT_LOCAL_FAILURE = 5, /* Local failure during lookup */
+ DNS_LOOKUP_GOT_TEMP_FAILURE = 6, /* Temporary failure during lookup */
+ DNS_LOOKUP_GOT_NS_FAILURE = 7, /* Name server failure */
+ NR__dns_lookup_status
+};
+
+/*
+ * Header at the beginning of binary format payload.
+ */
+struct dns_payload_header {
+ __u8 zero; /* Zero byte: marks this as not being text */
+ __u8 content; /* enum dns_payload_content_type */
+ __u8 version; /* Encoding version */
+} __packed;
+
+/*
+ * Header at the beginning of a V1 server list. This is followed directly by
+ * the server records. Each server records begins with a struct of type
+ * dns_server_list_v1_server.
+ */
+struct dns_server_list_v1_header {
+ struct dns_payload_header hdr;
+ __u8 source; /* enum dns_record_source */
+ __u8 status; /* enum dns_lookup_status */
+ __u8 nr_servers; /* Number of server records following this */
+} __packed;
+
+/*
+ * Header at the beginning of each V1 server record. This is followed by the
+ * characters of the name with no NUL-terminator, followed by the address
+ * records for that server. Each address record begins with a struct of type
+ * struct dns_server_list_v1_address.
+ */
+struct dns_server_list_v1_server {
+ __u16 name_len; /* Length of name (LE) */
+ __u16 priority; /* Priority (as SRV record) (LE) */
+ __u16 weight; /* Weight (as SRV record) (LE) */
+ __u16 port; /* UDP/TCP port number (LE) */
+ __u8 source; /* enum dns_record_source */
+ __u8 status; /* enum dns_lookup_status */
+ __u8 protocol; /* enum dns_payload_protocol_type */
+ __u8 nr_addrs;
+} __packed;
+
+/*
+ * Header at the beginning of each V1 address record. This is followed by the
+ * bytes of the address, 4 for IPV4 and 16 for IPV6.
+ */
+struct dns_server_list_v1_address {
+ __u8 address_type; /* enum dns_payload_address_type */
+} __packed;
+
+#endif /* _UAPI_LINUX_DNS_RESOLVER_H */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index dc69391d2bba..c8f8e2455bf3 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -91,10 +91,6 @@
* %ETHTOOL_GSET to get the current values before making specific
* changes and then applying them with %ETHTOOL_SSET.
*
- * Drivers that implement set_settings() should validate all fields
- * other than @cmd that are not described as read-only or deprecated,
- * and must ignore all fields described as read-only.
- *
* Deprecated fields should be ignored by both users and drivers.
*/
struct ethtool_cmd {
@@ -1800,14 +1796,9 @@ enum ethtool_reset_flags {
* rejected.
*
* Deprecated %ethtool_cmd fields transceiver, maxtxpkt and maxrxpkt
- * are not available in %ethtool_link_settings. Until all drivers are
- * converted to ignore them or to the new %ethtool_link_settings API,
- * for both queries and changes, users should always try
- * %ETHTOOL_GLINKSETTINGS first, and if it fails with -ENOTSUPP stick
- * only to %ETHTOOL_GSET and %ETHTOOL_SSET consistently. If it
- * succeeds, then users should stick to %ETHTOOL_GLINKSETTINGS and
- * %ETHTOOL_SLINKSETTINGS (which would support drivers implementing
- * either %ethtool_cmd or %ethtool_link_settings).
+ * are not available in %ethtool_link_settings. These fields will be
+ * always set to zero in %ETHTOOL_GSET reply and %ETHTOOL_SSET will
+ * fail if any of them is set to non-zero value.
*
* Users should assume that all fields not marked read-only are
* writable and subject to validation by the driver. They should use
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index 1db453e4b550..1acd2b179aef 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -47,11 +47,11 @@
#define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09
/**
- * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
+ * struct fw_cdev_event_common - Common part of all fw_cdev_event_* types
* @closure: For arbitrary use by userspace
- * @type: Discriminates the fw_cdev_event_ types
+ * @type: Discriminates the fw_cdev_event_* types
*
- * This struct may be used to access generic members of all fw_cdev_event_
+ * This struct may be used to access generic members of all fw_cdev_event_*
* types regardless of the specific type.
*
* Data passed in the @closure field for a request will be returned in the
@@ -123,7 +123,13 @@ struct fw_cdev_event_response {
/**
* struct fw_cdev_event_request - Old version of &fw_cdev_event_request2
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST
+ * @tcode: Transaction code of the incoming request
+ * @offset: The offset into the 48-bit per-node address space
+ * @handle: Reference to the kernel-side pending request
+ * @length: Data length, i.e. the request's payload size in bytes
+ * @data: Incoming data, if any
*
* This event is sent instead of &fw_cdev_event_request2 if the kernel or
* the client implements ABI version <= 3. &fw_cdev_event_request lacks
@@ -353,7 +359,7 @@ struct fw_cdev_event_phy_packet {
};
/**
- * union fw_cdev_event - Convenience union of fw_cdev_event_ types
+ * union fw_cdev_event - Convenience union of fw_cdev_event_* types
* @common: Valid for all types
* @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
* @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
@@ -735,7 +741,7 @@ struct fw_cdev_set_iso_channels {
* @header: Header and payload in case of a transmit context.
*
* &struct fw_cdev_iso_packet is used to describe isochronous packet queues.
- * Use the FW_CDEV_ISO_ macros to fill in @control.
+ * Use the FW_CDEV_ISO_* macros to fill in @control.
* The @header array is empty in case of receive contexts.
*
* Context type %FW_CDEV_ISO_CONTEXT_TRANSMIT:
@@ -842,7 +848,7 @@ struct fw_cdev_queue_iso {
* the %FW_CDEV_ISO_SYNC bit set
* @tags: Tag filter bit mask. Only valid for isochronous reception.
* Determines the tag values for which packets will be accepted.
- * Use FW_CDEV_ISO_CONTEXT_MATCH_ macros to set @tags.
+ * Use FW_CDEV_ISO_CONTEXT_MATCH_* macros to set @tags.
* @handle: Isochronous context handle within which to transmit or receive
*/
struct fw_cdev_start_iso {
@@ -1009,8 +1015,8 @@ struct fw_cdev_send_stream_packet {
* on the same card as this device. After transmission, an
* %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated.
*
- * The payload @data[] shall be specified in host byte order. Usually,
- * @data[1] needs to be the bitwise inverse of @data[0]. VersaPHY packets
+ * The payload @data\[\] shall be specified in host byte order. Usually,
+ * @data\[1\] needs to be the bitwise inverse of @data\[0\]. VersaPHY packets
* are an exception to this rule.
*
* The ioctl is only permitted on device files which represent a local node.
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 73e01918f996..a441ea1bfe6d 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -279,8 +279,8 @@ struct fsxattr {
#define FS_ENCRYPTION_MODE_AES_256_CTS 4
#define FS_ENCRYPTION_MODE_AES_128_CBC 5
#define FS_ENCRYPTION_MODE_AES_128_CTS 6
-#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7
-#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8
+#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
+#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
struct fscrypt_policy {
__u8 version;
diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h
index 24a861c0d29d..065408e16a80 100644
--- a/include/uapi/linux/gen_stats.h
+++ b/include/uapi/linux/gen_stats.h
@@ -12,6 +12,7 @@ enum {
TCA_STATS_APP,
TCA_STATS_RATE_EST64,
TCA_STATS_PAD,
+ TCA_STATS_BASIC_HW,
__TCA_STATS_MAX,
};
#define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h
index 1bf6e6df084b..4ebfe0ac6c5b 100644
--- a/include/uapi/linux/gpio.h
+++ b/include/uapi/linux/gpio.h
@@ -65,7 +65,7 @@ struct gpioline_info {
/**
* struct gpiohandle_request - Information about a GPIO handle request
- * @lineoffsets: an array desired lines, specified by offset index for the
+ * @lineoffsets: an array of desired lines, specified by offset index for the
* associated GPIO device
* @flags: desired flags for the desired GPIO lines, such as
* GPIOHANDLE_REQUEST_OUTPUT, GPIOHANDLE_REQUEST_ACTIVE_LOW etc, OR:ed
diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h
index ebaf5701c9db..dfcf3ce0097f 100644
--- a/include/uapi/linux/if_addr.h
+++ b/include/uapi/linux/if_addr.h
@@ -34,6 +34,7 @@ enum {
IFA_MULTICAST,
IFA_FLAGS,
IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */
+ IFA_TARGET_NETNSID,
__IFA_MAX,
};
diff --git a/include/uapi/linux/if_arp.h b/include/uapi/linux/if_arp.h
index 4605527ca41b..c3cc5a9e5eaf 100644
--- a/include/uapi/linux/if_arp.h
+++ b/include/uapi/linux/if_arp.h
@@ -114,18 +114,18 @@
/* ARP ioctl request. */
struct arpreq {
- struct sockaddr arp_pa; /* protocol address */
- struct sockaddr arp_ha; /* hardware address */
- int arp_flags; /* flags */
- struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
- char arp_dev[16];
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+ struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
+ char arp_dev[IFNAMSIZ];
};
struct arpreq_old {
- struct sockaddr arp_pa; /* protocol address */
- struct sockaddr arp_ha; /* hardware address */
- int arp_flags; /* flags */
- struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+ struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
};
/* ARP Flag values. */
diff --git a/include/uapi/linux/if_fddi.h b/include/uapi/linux/if_fddi.h
index 75eed8b62823..7239aa9c0766 100644
--- a/include/uapi/linux/if_fddi.h
+++ b/include/uapi/linux/if_fddi.h
@@ -6,9 +6,10 @@
*
* Global definitions for the ANSI FDDI interface.
*
- * Version: @(#)if_fddi.h 1.0.2 Sep 29 2004
+ * Version: @(#)if_fddi.h 1.0.3 Oct 6 2018
*
- * Author: Lawrence V. Stefani, <stefani@lkg.dec.com>
+ * Author: Lawrence V. Stefani, <stefani@yahoo.com>
+ * Maintainer: Maciej W. Rozycki, <macro@linux-mips.org>
*
* if_fddi.h is based on previous if_ether.h and if_tr.h work by
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -45,7 +46,21 @@
#define FDDI_K_OUI_LEN 3 /* Octets in OUI in 802.2 SNAP
header */
-/* Define FDDI Frame Control (FC) Byte values */
+/* Define FDDI Frame Control (FC) Byte masks */
+#define FDDI_FC_K_CLASS_MASK 0x80 /* class bit */
+#define FDDI_FC_K_CLASS_SYNC 0x80
+#define FDDI_FC_K_CLASS_ASYNC 0x00
+#define FDDI_FC_K_ALEN_MASK 0x40 /* address length bit */
+#define FDDI_FC_K_ALEN_48 0x40
+#define FDDI_FC_K_ALEN_16 0x00
+#define FDDI_FC_K_FORMAT_MASK 0x30 /* format bits */
+#define FDDI_FC_K_FORMAT_FUTURE 0x30
+#define FDDI_FC_K_FORMAT_IMPLEMENTOR 0x20
+#define FDDI_FC_K_FORMAT_LLC 0x10
+#define FDDI_FC_K_FORMAT_MANAGEMENT 0x00
+#define FDDI_FC_K_CONTROL_MASK 0x0f /* control bits */
+
+/* Define FDDI Frame Control (FC) Byte specific values */
#define FDDI_FC_K_VOID 0x00
#define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80
#define FDDI_FC_K_RESTRICTED_TOKEN 0xC0
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 43391e2d1153..1debfa42cba1 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -161,6 +161,7 @@ enum {
IFLA_EVENT,
IFLA_NEW_NETNSID,
IFLA_IF_NETNSID,
+ IFLA_TARGET_NETNSID = IFLA_IF_NETNSID, /* new alias */
IFLA_CARRIER_UP_COUNT,
IFLA_CARRIER_DOWN_COUNT,
IFLA_NEW_IFINDEX,
@@ -286,6 +287,7 @@ enum {
IFLA_BR_MCAST_STATS_ENABLED,
IFLA_BR_MCAST_IGMP_VERSION,
IFLA_BR_MCAST_MLD_VERSION,
+ IFLA_BR_VLAN_STATS_PER_PORT,
__IFLA_BR_MAX,
};
@@ -554,6 +556,7 @@ enum {
IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
IFLA_GENEVE_LABEL,
+ IFLA_GENEVE_TTL_INHERIT,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index 67b61d91d89b..467b654bd4c7 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -57,6 +57,7 @@ struct sockaddr_ll {
#define PACKET_QDISC_BYPASS 20
#define PACKET_ROLLOVER_STATS 21
#define PACKET_FANOUT_DATA 22
+#define PACKET_IGNORE_OUTGOING 23
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index ed291e55f024..71d82fe15b03 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -177,6 +177,7 @@ struct in6_flowlabel_req {
#define IPV6_V6ONLY 26
#define IPV6_JOIN_ANYCAST 27
#define IPV6_LEAVE_ANYCAST 28
+#define IPV6_MULTICAST_ALL 29
/* IPV6_MTU_DISCOVER values */
#define IPV6_PMTUDISC_DONT 0
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 53fbae27b280..6d180cc60a5d 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -708,6 +708,15 @@
#define REL_DIAL 0x07
#define REL_WHEEL 0x08
#define REL_MISC 0x09
+/*
+ * 0x0a is reserved and should not be used in input drivers.
+ * It was used by HID as REL_MISC+1 and userspace needs to detect if
+ * the next REL_* event is correct or is just REL_MISC + n.
+ * We define here REL_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define REL_RESERVED 0x0a
+#define REL_WHEEL_HI_RES 0x0b
#define REL_MAX 0x0f
#define REL_CNT (REL_MAX+1)
@@ -744,6 +753,15 @@
#define ABS_MISC 0x28
+/*
+ * 0x2e is reserved and should not be used in input drivers.
+ * It was used by HID as ABS_MISC+6 and userspace needs to detect if
+ * the next ABS_* event is correct or is just ABS_MISC + n.
+ * We define here ABS_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define ABS_RESERVED 0x2e
+
#define ABS_MT_SLOT 0x2f /* MT slot being modified */
#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 910cc4334b21..0f3cb13db8e9 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -65,7 +65,12 @@
/* keyctl structures */
struct keyctl_dh_params {
- __s32 dh_private;
+ union {
+#ifndef __cplusplus
+ __s32 private;
+#endif
+ __s32 priv;
+ };
__s32 prime;
__s32 base;
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 07548de5c988..2b7a652c9fa4 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -420,13 +420,19 @@ struct kvm_run {
struct kvm_coalesced_mmio_zone {
__u64 addr;
__u32 size;
- __u32 pad;
+ union {
+ __u32 pad;
+ __u32 pio;
+ };
};
struct kvm_coalesced_mmio {
__u64 phys_addr;
__u32 len;
- __u32 pad;
+ union {
+ __u32 pad;
+ __u32 pio;
+ };
__u8 data[8];
};
@@ -719,6 +725,7 @@ struct kvm_ppc_one_seg_page_size {
#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
#define KVM_PPC_1T_SEGMENTS 0x00000002
+#define KVM_PPC_NO_HASH 0x00000004
struct kvm_ppc_smmu_info {
__u64 flags;
@@ -751,6 +758,15 @@ struct kvm_ppc_resize_hpt {
#define KVM_S390_SIE_PAGE_OFFSET 1
/*
+ * On arm64, machine type can be used to request the physical
+ * address size for the VM. Bits[7-0] are reserved for the guest
+ * PA size shift (i.e, log2(PA_Size)). For backward compatibility,
+ * value 0 implies the default IPA size, 40bits.
+ */
+#define KVM_VM_TYPE_ARM_IPA_SIZE_MASK 0xffULL
+#define KVM_VM_TYPE_ARM_IPA_SIZE(x) \
+ ((x) & KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+/*
* ioctls for /dev/kvm fds:
*/
#define KVM_GET_API_VERSION _IO(KVMIO, 0x00)
@@ -952,6 +968,13 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_HPAGE_1M 156
#define KVM_CAP_NESTED_STATE 157
#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
+#define KVM_CAP_MSR_PLATFORM_INFO 159
+#define KVM_CAP_PPC_NESTED_HV 160
+#define KVM_CAP_HYPERV_SEND_IPI 161
+#define KVM_CAP_COALESCED_PIO 162
+#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
+#define KVM_CAP_EXCEPTION_PAYLOAD 164
+#define KVM_CAP_ARM_VM_IPA_SIZE 165
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 1a6fee974116..96c24478d8ce 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -29,6 +29,7 @@
#define HPFS_SUPER_MAGIC 0xf995e849
#define ISOFS_SUPER_MAGIC 0x9660
#define JFFS2_SUPER_MAGIC 0x72b6
+#define XFS_SUPER_MAGIC 0x58465342 /* "XFSB" */
#define PSTOREFS_MAGIC 0x6165676C
#define EFIVARFS_MAGIC 0xde5e81e4
#define HOSTFS_SUPER_MAGIC 0x00c0ffee
diff --git a/include/uapi/linux/memfd.h b/include/uapi/linux/memfd.h
index 015a4c0bbb47..7a8a26751c23 100644
--- a/include/uapi/linux/memfd.h
+++ b/include/uapi/linux/memfd.h
@@ -25,7 +25,9 @@
#define MFD_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
#define MFD_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
#define MFD_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
+#define MFD_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB
#define MFD_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MFD_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
#define MFD_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
#define MFD_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define MFD_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h
index bfd5938fede6..d0f515d53299 100644
--- a/include/uapi/linux/mman.h
+++ b/include/uapi/linux/mman.h
@@ -28,7 +28,9 @@
#define MAP_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
#define MAP_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
#define MAP_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
+#define MAP_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB
#define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MAP_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
#define MAP_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
#define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
diff --git a/include/uapi/linux/ncsi.h b/include/uapi/linux/ncsi.h
index 4c292ecbb748..0a26a5576645 100644
--- a/include/uapi/linux/ncsi.h
+++ b/include/uapi/linux/ncsi.h
@@ -23,6 +23,9 @@
* optionally the preferred NCSI_ATTR_CHANNEL_ID.
* @NCSI_CMD_CLEAR_INTERFACE: clear any preferred package/channel combination.
* Requires NCSI_ATTR_IFINDEX.
+ * @NCSI_CMD_SEND_CMD: send NC-SI command to network card.
+ * Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID
+ * and NCSI_ATTR_CHANNEL_ID.
* @NCSI_CMD_MAX: highest command number
*/
enum ncsi_nl_commands {
@@ -30,6 +33,7 @@ enum ncsi_nl_commands {
NCSI_CMD_PKG_INFO,
NCSI_CMD_SET_INTERFACE,
NCSI_CMD_CLEAR_INTERFACE,
+ NCSI_CMD_SEND_CMD,
__NCSI_CMD_AFTER_LAST,
NCSI_CMD_MAX = __NCSI_CMD_AFTER_LAST - 1
@@ -43,6 +47,7 @@ enum ncsi_nl_commands {
* @NCSI_ATTR_PACKAGE_LIST: nested array of NCSI_PKG_ATTR attributes
* @NCSI_ATTR_PACKAGE_ID: package ID
* @NCSI_ATTR_CHANNEL_ID: channel ID
+ * @NCSI_ATTR_DATA: command payload
* @NCSI_ATTR_MAX: highest attribute number
*/
enum ncsi_nl_attrs {
@@ -51,6 +56,7 @@ enum ncsi_nl_attrs {
NCSI_ATTR_PACKAGE_LIST,
NCSI_ATTR_PACKAGE_ID,
NCSI_ATTR_CHANNEL_ID,
+ NCSI_ATTR_DATA,
__NCSI_ATTR_AFTER_LAST,
NCSI_ATTR_MAX = __NCSI_ATTR_AFTER_LAST - 1
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 7e27070b9440..f57c9e434d2d 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -128,37 +128,31 @@ enum {
static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
{
- static const char * const names[] = {
- [ND_CMD_ARS_CAP] = "ars_cap",
- [ND_CMD_ARS_START] = "ars_start",
- [ND_CMD_ARS_STATUS] = "ars_status",
- [ND_CMD_CLEAR_ERROR] = "clear_error",
- [ND_CMD_CALL] = "cmd_call",
- };
-
- if (cmd < ARRAY_SIZE(names) && names[cmd])
- return names[cmd];
- return "unknown";
+ switch (cmd) {
+ case ND_CMD_ARS_CAP: return "ars_cap";
+ case ND_CMD_ARS_START: return "ars_start";
+ case ND_CMD_ARS_STATUS: return "ars_status";
+ case ND_CMD_CLEAR_ERROR: return "clear_error";
+ case ND_CMD_CALL: return "cmd_call";
+ default: return "unknown";
+ }
}
static inline const char *nvdimm_cmd_name(unsigned cmd)
{
- static const char * const names[] = {
- [ND_CMD_SMART] = "smart",
- [ND_CMD_SMART_THRESHOLD] = "smart_thresh",
- [ND_CMD_DIMM_FLAGS] = "flags",
- [ND_CMD_GET_CONFIG_SIZE] = "get_size",
- [ND_CMD_GET_CONFIG_DATA] = "get_data",
- [ND_CMD_SET_CONFIG_DATA] = "set_data",
- [ND_CMD_VENDOR_EFFECT_LOG_SIZE] = "effect_size",
- [ND_CMD_VENDOR_EFFECT_LOG] = "effect_log",
- [ND_CMD_VENDOR] = "vendor",
- [ND_CMD_CALL] = "cmd_call",
- };
-
- if (cmd < ARRAY_SIZE(names) && names[cmd])
- return names[cmd];
- return "unknown";
+ switch (cmd) {
+ case ND_CMD_SMART: return "smart";
+ case ND_CMD_SMART_THRESHOLD: return "smart_thresh";
+ case ND_CMD_DIMM_FLAGS: return "flags";
+ case ND_CMD_GET_CONFIG_SIZE: return "get_size";
+ case ND_CMD_GET_CONFIG_DATA: return "get_data";
+ case ND_CMD_SET_CONFIG_DATA: return "set_data";
+ case ND_CMD_VENDOR_EFFECT_LOG_SIZE: return "effect_size";
+ case ND_CMD_VENDOR_EFFECT_LOG: return "effect_log";
+ case ND_CMD_VENDOR: return "vendor";
+ case ND_CMD_CALL: return "cmd_call";
+ default: return "unknown";
+ }
}
#define ND_IOCTL 'N'
@@ -208,10 +202,6 @@ enum nd_driver_flags {
ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM,
};
-enum {
- ND_MIN_NAMESPACE_SIZE = PAGE_SIZE,
-};
-
enum ars_masks {
ARS_STATUS_MASK = 0x0000FFFF,
ARS_EXT_STATUS_SHIFT = 16,
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 904db6148476..998155444e0d 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -43,6 +43,7 @@ enum {
#define NTF_PROXY 0x08 /* == ATF_PUBL */
#define NTF_EXT_LEARNED 0x10
#define NTF_OFFLOADED 0x20
+#define NTF_STICKY 0x40
#define NTF_ROUTER 0x80
/*
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index e23290ffdc77..579974b0bf0d 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -826,12 +826,14 @@ enum nft_meta_keys {
* @NFT_RT_NEXTHOP4: routing nexthop for IPv4
* @NFT_RT_NEXTHOP6: routing nexthop for IPv6
* @NFT_RT_TCPMSS: fetch current path tcp mss
+ * @NFT_RT_XFRM: boolean, skb->dst->xfrm != NULL
*/
enum nft_rt_keys {
NFT_RT_CLASSID,
NFT_RT_NEXTHOP4,
NFT_RT_NEXTHOP6,
NFT_RT_TCPMSS,
+ NFT_RT_XFRM,
__NFT_RT_MAX
};
#define NFT_RT_MAX (__NFT_RT_MAX - 1)
@@ -1175,6 +1177,21 @@ enum nft_quota_attributes {
#define NFTA_QUOTA_MAX (__NFTA_QUOTA_MAX - 1)
/**
+ * enum nft_secmark_attributes - nf_tables secmark object netlink attributes
+ *
+ * @NFTA_SECMARK_CTX: security context (NLA_STRING)
+ */
+enum nft_secmark_attributes {
+ NFTA_SECMARK_UNSPEC,
+ NFTA_SECMARK_CTX,
+ __NFTA_SECMARK_MAX,
+};
+#define NFTA_SECMARK_MAX (__NFTA_SECMARK_MAX - 1)
+
+/* Max security context length */
+#define NFT_SECMARK_CTX_MAXLEN 256
+
+/**
* enum nft_reject_types - nf_tables reject expression reject types
*
* @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable
@@ -1430,7 +1447,8 @@ enum nft_ct_timeout_timeout_attributes {
#define NFT_OBJECT_CONNLIMIT 5
#define NFT_OBJECT_TUNNEL 6
#define NFT_OBJECT_CT_TIMEOUT 7
-#define __NFT_OBJECT_MAX 8
+#define NFT_OBJECT_SECMARK 8
+#define __NFT_OBJECT_MAX 9
#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
/**
@@ -1493,9 +1511,16 @@ enum nft_flowtable_hook_attributes {
};
#define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1)
+/**
+ * enum nft_osf_attributes - nftables osf expression netlink attributes
+ *
+ * @NFTA_OSF_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_OSF_TTL: Value of the TTL osf option (NLA_U8)
+ */
enum nft_osf_attributes {
NFTA_OSF_UNSPEC,
NFTA_OSF_DREG,
+ NFTA_OSF_TTL,
__NFTA_OSF_MAX,
};
#define NFTA_OSF_MAX (__NFTA_OSF_MAX - 1)
@@ -1512,6 +1537,35 @@ enum nft_devices_attributes {
};
#define NFTA_DEVICE_MAX (__NFTA_DEVICE_MAX - 1)
+/*
+ * enum nft_xfrm_attributes - nf_tables xfrm expr netlink attributes
+ *
+ * @NFTA_XFRM_DREG: destination register (NLA_U32)
+ * @NFTA_XFRM_KEY: enum nft_xfrm_keys (NLA_U32)
+ * @NFTA_XFRM_DIR: direction (NLA_U8)
+ * @NFTA_XFRM_SPNUM: index in secpath array (NLA_U32)
+ */
+enum nft_xfrm_attributes {
+ NFTA_XFRM_UNSPEC,
+ NFTA_XFRM_DREG,
+ NFTA_XFRM_KEY,
+ NFTA_XFRM_DIR,
+ NFTA_XFRM_SPNUM,
+ __NFTA_XFRM_MAX
+};
+#define NFTA_XFRM_MAX (__NFTA_XFRM_MAX - 1)
+
+enum nft_xfrm_keys {
+ NFT_XFRM_KEY_UNSPEC,
+ NFT_XFRM_KEY_DADDR_IP4,
+ NFT_XFRM_KEY_DADDR_IP6,
+ NFT_XFRM_KEY_SADDR_IP4,
+ NFT_XFRM_KEY_SADDR_IP6,
+ NFT_XFRM_KEY_REQID,
+ NFT_XFRM_KEY_SPI,
+ __NFT_XFRM_KEY_MAX,
+};
+#define NFT_XFRM_KEY_MAX (__NFT_XFRM_KEY_MAX - 1)
/**
* enum nft_trace_attributes - nf_tables trace netlink attributes
diff --git a/include/uapi/linux/netfilter/xt_cgroup.h b/include/uapi/linux/netfilter/xt_cgroup.h
index e96dfa1b34f7..b74e370d6133 100644
--- a/include/uapi/linux/netfilter/xt_cgroup.h
+++ b/include/uapi/linux/netfilter/xt_cgroup.h
@@ -22,4 +22,20 @@ struct xt_cgroup_info_v1 {
void *priv __attribute__((aligned(8)));
};
+#define XT_CGROUP_PATH_MAX 512
+
+struct xt_cgroup_info_v2 {
+ __u8 has_path;
+ __u8 has_classid;
+ __u8 invert_path;
+ __u8 invert_classid;
+ union {
+ char path[XT_CGROUP_PATH_MAX];
+ __u32 classid;
+ };
+
+ /* kernel internal data */
+ void *priv __attribute__((aligned(8)));
+};
+
#endif /* _UAPI_XT_CGROUP_H */
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 776bc92e9118..486ed1f0c0bc 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -155,6 +155,7 @@ enum nlmsgerr_attrs {
#define NETLINK_LIST_MEMBERSHIPS 9
#define NETLINK_CAP_ACK 10
#define NETLINK_EXT_ACK 11
+#define NETLINK_DUMP_STRICT_CHK 12
struct nl_pktinfo {
__u32 group;
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 7acc16f34942..6d610bae30a9 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1033,6 +1033,9 @@
* %NL80211_ATTR_CHANNEL_WIDTH,%NL80211_ATTR_NSS attributes with its
* address(specified in %NL80211_ATTR_MAC).
*
+ * @NL80211_CMD_GET_FTM_RESPONDER_STATS: Retrieve FTM responder statistics, in
+ * the %NL80211_ATTR_FTM_RESPONDER_STATS attribute.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1245,6 +1248,8 @@ enum nl80211_commands {
NL80211_CMD_CONTROL_PORT_FRAME,
+ NL80211_CMD_GET_FTM_RESPONDER_STATS,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -2241,6 +2246,14 @@ enum nl80211_commands {
* association request when used with NL80211_CMD_NEW_STATION). Can be set
* only if %NL80211_STA_FLAG_WME is set.
*
+ * @NL80211_ATTR_FTM_RESPONDER: nested attribute which user-space can include
+ * in %NL80211_CMD_START_AP or %NL80211_CMD_SET_BEACON for fine timing
+ * measurement (FTM) responder functionality and containing parameters as
+ * possible, see &enum nl80211_ftm_responder_attr
+ *
+ * @NL80211_ATTR_FTM_RESPONDER_STATS: Nested attribute with FTM responder
+ * statistics, see &enum nl80211_ftm_responder_stats.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2682,6 +2695,10 @@ enum nl80211_attrs {
NL80211_ATTR_HE_CAPABILITY,
+ NL80211_ATTR_FTM_RESPONDER,
+
+ NL80211_ATTR_FTM_RESPONDER_STATS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3050,8 +3067,13 @@ enum nl80211_sta_bss_param {
* received from the station (u64, usec)
* @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment
* @NL80211_STA_INFO_ACK_SIGNAL: signal strength of the last ACK frame(u8, dBm)
- * @NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG: avg signal strength of (data)
- * ACK frame (s8, dBm)
+ * @NL80211_STA_INFO_ACK_SIGNAL_AVG: avg signal strength of ACK frames (s8, dBm)
+ * @NL80211_STA_INFO_RX_MPDUS: total number of received packets (MPDUs)
+ * (u32, from this station)
+ * @NL80211_STA_INFO_FCS_ERROR_COUNT: total number of packets (MPDUs) received
+ * with an FCS error (u32, from this station). This count may not include
+ * some packets with an FCS error due to TA corruption. Hence this counter
+ * might not be fully accurate.
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -3091,13 +3113,19 @@ enum nl80211_sta_info {
NL80211_STA_INFO_RX_DURATION,
NL80211_STA_INFO_PAD,
NL80211_STA_INFO_ACK_SIGNAL,
- NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG,
+ NL80211_STA_INFO_ACK_SIGNAL_AVG,
+ NL80211_STA_INFO_RX_MPDUS,
+ NL80211_STA_INFO_FCS_ERROR_COUNT,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
NL80211_STA_INFO_MAX = __NL80211_STA_INFO_AFTER_LAST - 1
};
+/* we renamed this - stay compatible */
+#define NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG NL80211_STA_INFO_ACK_SIGNAL_AVG
+
+
/**
* enum nl80211_tid_stats - per TID statistics attributes
* @__NL80211_TID_STATS_INVALID: attribute number 0 is reserved
@@ -4338,7 +4366,7 @@ enum nl80211_txrate_gi {
* enum nl80211_band - Frequency band
* @NL80211_BAND_2GHZ: 2.4 GHz ISM band
* @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz)
- * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
+ * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz)
* @NUM_NL80211_BANDS: number of bands, avoid using this in userspace
* since newer kernel versions may support more bands
*/
@@ -5213,9 +5241,8 @@ enum nl80211_feature_flags {
* "radar detected" event.
* @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211: Driver supports sending and
* receiving control port frames over nl80211 instead of the netdevice.
- * @NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT: This Driver support data ack
- * rssi if firmware support, this flag is to intimate about ack rssi
- * support to nl80211.
+ * @NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT: This driver/device supports
+ * (average) ACK signal strength reporting.
* @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate
* TXQs.
* @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the
@@ -5223,6 +5250,13 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data
* except for supported rates from the probe request content if requested
* by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag.
+ * @NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER: Driver supports enabling fine
+ * timing measurement responder role.
+ *
+ * @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0: Driver/device confirm that they are
+ * able to rekey an in-use key correctly. Userspace must not rekey PTK keys
+ * if this flag is not set. Ignoring this can leak clear text packets and/or
+ * freeze the connection.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -5255,10 +5289,14 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN,
NL80211_EXT_FEATURE_DFS_OFFLOAD,
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211,
- NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT,
+ NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT,
+ /* we renamed this - stay compatible */
+ NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT,
NL80211_EXT_FEATURE_TXQS,
NL80211_EXT_FEATURE_SCAN_RANDOM_SN,
NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT,
+ NL80211_EXT_FEATURE_CAN_REPLACE_PTK0,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5798,4 +5836,74 @@ enum nl80211_external_auth_action {
NL80211_EXTERNAL_AUTH_ABORT,
};
+/**
+ * enum nl80211_ftm_responder_attributes - fine timing measurement
+ * responder attributes
+ * @__NL80211_FTM_RESP_ATTR_INVALID: Invalid
+ * @NL80211_FTM_RESP_ATTR_ENABLED: FTM responder is enabled
+ * @NL80211_FTM_RESP_ATTR_LCI: The content of Measurement Report Element
+ * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10)
+ * @NL80211_FTM_RESP_ATTR_CIVIC: The content of Measurement Report Element
+ * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13)
+ * @__NL80211_FTM_RESP_ATTR_LAST: Internal
+ * @NL80211_FTM_RESP_ATTR_MAX: highest FTM responder attribute.
+ */
+enum nl80211_ftm_responder_attributes {
+ __NL80211_FTM_RESP_ATTR_INVALID,
+
+ NL80211_FTM_RESP_ATTR_ENABLED,
+ NL80211_FTM_RESP_ATTR_LCI,
+ NL80211_FTM_RESP_ATTR_CIVICLOC,
+
+ /* keep last */
+ __NL80211_FTM_RESP_ATTR_LAST,
+ NL80211_FTM_RESP_ATTR_MAX = __NL80211_FTM_RESP_ATTR_LAST - 1,
+};
+
+/*
+ * enum nl80211_ftm_responder_stats - FTM responder statistics
+ *
+ * These attribute types are used with %NL80211_ATTR_FTM_RESPONDER_STATS
+ * when getting FTM responder statistics.
+ *
+ * @__NL80211_FTM_STATS_INVALID: attribute number 0 is reserved
+ * @NL80211_FTM_STATS_SUCCESS_NUM: number of FTM sessions in which all frames
+ * were ssfully answered (u32)
+ * @NL80211_FTM_STATS_PARTIAL_NUM: number of FTM sessions in which part of the
+ * frames were successfully answered (u32)
+ * @NL80211_FTM_STATS_FAILED_NUM: number of failed FTM sessions (u32)
+ * @NL80211_FTM_STATS_ASAP_NUM: number of ASAP sessions (u32)
+ * @NL80211_FTM_STATS_NON_ASAP_NUM: number of non-ASAP sessions (u32)
+ * @NL80211_FTM_STATS_TOTAL_DURATION_MSEC: total sessions durations - gives an
+ * indication of how much time the responder was busy (u64, msec)
+ * @NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM: number of unknown FTM triggers -
+ * triggers from initiators that didn't finish successfully the negotiation
+ * phase with the responder (u32)
+ * @NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM: number of FTM reschedule requests
+ * - initiator asks for a new scheduling although it already has scheduled
+ * FTM slot (u32)
+ * @NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM: number of FTM triggers out of
+ * scheduled window (u32)
+ * @NL80211_FTM_STATS_PAD: used for padding, ignore
+ * @__NL80211_TXQ_ATTR_AFTER_LAST: Internal
+ * @NL80211_FTM_STATS_MAX: highest possible FTM responder stats attribute
+ */
+enum nl80211_ftm_responder_stats {
+ __NL80211_FTM_STATS_INVALID,
+ NL80211_FTM_STATS_SUCCESS_NUM,
+ NL80211_FTM_STATS_PARTIAL_NUM,
+ NL80211_FTM_STATS_FAILED_NUM,
+ NL80211_FTM_STATS_ASAP_NUM,
+ NL80211_FTM_STATS_NON_ASAP_NUM,
+ NL80211_FTM_STATS_TOTAL_DURATION_MSEC,
+ NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM,
+ NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM,
+ NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM,
+ NL80211_FTM_STATS_PAD,
+
+ /* keep last */
+ __NL80211_FTM_STATS_AFTER_LAST,
+ NL80211_FTM_STATS_MAX = __NL80211_FTM_STATS_AFTER_LAST - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index ee556ccc93f4..e1e9888c85e6 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -52,6 +52,7 @@
#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_IMM_READY 0x01 /* Immediate Readiness */
#define PCI_STATUS_INTERRUPT 0x08 /* Interrupt status */
#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
#define PCI_STATUS_66MHZ 0x20 /* Support 66 MHz PCI 2.1 bus */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index be382fb0592d..401d0c1e612d 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -483,6 +483,8 @@ enum {
TCA_FLOWER_KEY_ENC_OPTS,
TCA_FLOWER_KEY_ENC_OPTS_MASK,
+ TCA_FLOWER_IN_HW_COUNT,
+
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 8975fd1a1421..89ee47c2f17d 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -395,9 +395,9 @@ enum {
struct tc_htb_xstats {
__u32 lends;
__u32 borrows;
- __u32 giants; /* too big packets (rate will not be accurate) */
- __u32 tokens;
- __u32 ctokens;
+ __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */
+ __s32 tokens;
+ __s32 ctokens;
};
/* HFSC section */
@@ -1084,4 +1084,50 @@ enum {
CAKE_ATM_MAX
};
+
+/* TAPRIO */
+enum {
+ TC_TAPRIO_CMD_SET_GATES = 0x00,
+ TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
+ TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
+};
+
+enum {
+ TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
+ TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
+ TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
+ TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
+ TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
+ __TCA_TAPRIO_SCHED_ENTRY_MAX,
+};
+#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
+
+/* The format for schedule entry list is:
+ * [TCA_TAPRIO_SCHED_ENTRY_LIST]
+ * [TCA_TAPRIO_SCHED_ENTRY]
+ * [TCA_TAPRIO_SCHED_ENTRY_CMD]
+ * [TCA_TAPRIO_SCHED_ENTRY_GATES]
+ * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
+ */
+enum {
+ TCA_TAPRIO_SCHED_UNSPEC,
+ TCA_TAPRIO_SCHED_ENTRY,
+ __TCA_TAPRIO_SCHED_MAX,
+};
+
+#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
+
+enum {
+ TCA_TAPRIO_ATTR_UNSPEC,
+ TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
+ TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
+ TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
+ TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
+ TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
+ TCA_TAPRIO_PAD,
+ __TCA_TAPRIO_ATTR_MAX,
+};
+
+#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index b479db5c71d9..34dd3d497f2c 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -301,6 +301,7 @@ enum sctp_sinfo_flags {
SCTP_SACK_IMMEDIATELY = (1 << 3), /* SACK should be sent without delay. */
/* 2 bits here have been used by SCTP_PR_SCTP_MASK */
SCTP_SENDALL = (1 << 6),
+ SCTP_PR_SCTP_ALL = (1 << 7),
SCTP_NOTIFICATION = MSG_NOTIFICATION, /* Next message is not user msg but notification. */
SCTP_EOF = MSG_FIN, /* Initiate graceful shutdown process. */
};
diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h
index dde1344f047c..6507ad0afc81 100644
--- a/include/uapi/linux/shm.h
+++ b/include/uapi/linux/shm.h
@@ -65,7 +65,9 @@ struct shmid_ds {
#define SHM_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
#define SHM_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
#define SHM_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
+#define SHM_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB
#define SHM_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define SHM_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
#define SHM_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
#define SHM_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define SHM_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index ac9e8c96d9bd..8cb3a6fef553 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -18,14 +18,17 @@ struct smc_diag_req {
* on the internal clcsock, and more SMC-related socket data
*/
struct smc_diag_msg {
- __u8 diag_family;
- __u8 diag_state;
- __u8 diag_mode;
- __u8 diag_shutdown;
+ __u8 diag_family;
+ __u8 diag_state;
+ union {
+ __u8 diag_mode;
+ __u8 diag_fallback; /* the old name of the field */
+ };
+ __u8 diag_shutdown;
struct inet_diag_sockid id;
- __u32 diag_uid;
- __u64 diag_inode;
+ __u32 diag_uid;
+ __aligned_u64 diag_inode;
};
/* Mode of a connection */
@@ -99,11 +102,11 @@ struct smc_diag_fallback {
};
struct smcd_diag_dmbinfo { /* SMC-D Socket internals */
- __u32 linkid; /* Link identifier */
- __u64 peer_gid; /* Peer GID */
- __u64 my_gid; /* My GID */
- __u64 token; /* Token of DMB */
- __u64 peer_token; /* Token of remote DMBE */
+ __u32 linkid; /* Link identifier */
+ __aligned_u64 peer_gid; /* Peer GID */
+ __aligned_u64 my_gid; /* My GID */
+ __aligned_u64 token; /* Token of DMB */
+ __aligned_u64 peer_token; /* Token of remote DMBE */
};
#endif /* _UAPI_SMC_DIAG_H_ */
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 09d00f8c442b..09502de447f5 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -40,5 +40,6 @@ struct udphdr {
#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */
#define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */
#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */
+#define UDP_ENCAP_RXRPC 6
#endif /* _UAPI_LINUX_UDP_H */
diff --git a/include/uapi/linux/usb/tmc.h b/include/uapi/linux/usb/tmc.h
index 729af2f861a4..fdd4d88a7b95 100644
--- a/include/uapi/linux/usb/tmc.h
+++ b/include/uapi/linux/usb/tmc.h
@@ -4,6 +4,7 @@
* Copyright (C) 2008 Novell, Inc.
* Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (C) 2015 Dave Penkler <dpenkler@gmail.com>
+ * Copyright (C) 2018 IVI Foundation, Inc.
*
* This file holds USB constants defined by the USB Device Class
* and USB488 Subclass Definitions for Test and Measurement devices
@@ -40,11 +41,38 @@
#define USBTMC488_REQUEST_GOTO_LOCAL 161
#define USBTMC488_REQUEST_LOCAL_LOCKOUT 162
+struct usbtmc_request {
+ __u8 bRequestType;
+ __u8 bRequest;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
+} __attribute__ ((packed));
+
+struct usbtmc_ctrlrequest {
+ struct usbtmc_request req;
+ void __user *data; /* pointer to user space */
+} __attribute__ ((packed));
+
struct usbtmc_termchar {
__u8 term_char;
__u8 term_char_enabled;
} __attribute__ ((packed));
+/*
+ * usbtmc_message->flags:
+ */
+#define USBTMC_FLAG_ASYNC 0x0001
+#define USBTMC_FLAG_APPEND 0x0002
+#define USBTMC_FLAG_IGNORE_TRAILER 0x0004
+
+struct usbtmc_message {
+ __u32 transfer_size; /* size of bytes to transfer */
+ __u32 transferred; /* size of received/written bytes */
+ __u32 flags; /* bit 0: 0 = synchronous; 1 = asynchronous */
+ void __user *message; /* pointer to header and data in user space */
+} __attribute__ ((packed));
+
/* Request values for USBTMC driver's ioctl entry point */
#define USBTMC_IOC_NR 91
#define USBTMC_IOCTL_INDICATOR_PULSE _IO(USBTMC_IOC_NR, 1)
@@ -53,10 +81,15 @@ struct usbtmc_termchar {
#define USBTMC_IOCTL_ABORT_BULK_IN _IO(USBTMC_IOC_NR, 4)
#define USBTMC_IOCTL_CLEAR_OUT_HALT _IO(USBTMC_IOC_NR, 6)
#define USBTMC_IOCTL_CLEAR_IN_HALT _IO(USBTMC_IOC_NR, 7)
+#define USBTMC_IOCTL_CTRL_REQUEST _IOWR(USBTMC_IOC_NR, 8, struct usbtmc_ctrlrequest)
#define USBTMC_IOCTL_GET_TIMEOUT _IOR(USBTMC_IOC_NR, 9, __u32)
#define USBTMC_IOCTL_SET_TIMEOUT _IOW(USBTMC_IOC_NR, 10, __u32)
#define USBTMC_IOCTL_EOM_ENABLE _IOW(USBTMC_IOC_NR, 11, __u8)
#define USBTMC_IOCTL_CONFIG_TERMCHAR _IOW(USBTMC_IOC_NR, 12, struct usbtmc_termchar)
+#define USBTMC_IOCTL_WRITE _IOWR(USBTMC_IOC_NR, 13, struct usbtmc_message)
+#define USBTMC_IOCTL_READ _IOWR(USBTMC_IOC_NR, 14, struct usbtmc_message)
+#define USBTMC_IOCTL_WRITE_RESULT _IOWR(USBTMC_IOC_NR, 15, __u32)
+#define USBTMC_IOCTL_API_VERSION _IOR(USBTMC_IOC_NR, 16, __u32)
#define USBTMC488_IOCTL_GET_CAPS _IOR(USBTMC_IOC_NR, 17, unsigned char)
#define USBTMC488_IOCTL_READ_STB _IOR(USBTMC_IOC_NR, 18, unsigned char)
@@ -64,6 +97,14 @@ struct usbtmc_termchar {
#define USBTMC488_IOCTL_GOTO_LOCAL _IO(USBTMC_IOC_NR, 20)
#define USBTMC488_IOCTL_LOCAL_LOCKOUT _IO(USBTMC_IOC_NR, 21)
#define USBTMC488_IOCTL_TRIGGER _IO(USBTMC_IOC_NR, 22)
+#define USBTMC488_IOCTL_WAIT_SRQ _IOW(USBTMC_IOC_NR, 23, __u32)
+
+#define USBTMC_IOCTL_MSG_IN_ATTR _IOR(USBTMC_IOC_NR, 24, __u8)
+#define USBTMC_IOCTL_AUTO_ABORT _IOW(USBTMC_IOC_NR, 25, __u8)
+
+/* Cancel and cleanup asynchronous calls */
+#define USBTMC_IOCTL_CANCEL_IO _IO(USBTMC_IOC_NR, 35)
+#define USBTMC_IOCTL_CLEANUP_IO _IO(USBTMC_IOC_NR, 36)
/* Driver encoded usb488 capabilities */
#define USBTMC488_CAPABILITY_TRIGGER 1
diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
index ff6cc6cb4227..d854cb19c42c 100644
--- a/include/uapi/linux/usb/video.h
+++ b/include/uapi/linux/usb/video.h
@@ -192,14 +192,14 @@ struct uvc_descriptor_header {
/* 3.7.2. Video Control Interface Header Descriptor */
struct uvc_header_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u16 bcdUVC;
- __u16 wTotalLength;
- __u32 dwClockFrequency;
- __u8 bInCollection;
- __u8 baInterfaceNr[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __le16 bcdUVC;
+ __le16 wTotalLength;
+ __le32 dwClockFrequency;
+ __u8 bInCollection;
+ __u8 baInterfaceNr[];
} __attribute__((__packed__));
#define UVC_DT_HEADER_SIZE(n) (12+(n))
@@ -209,57 +209,57 @@ struct uvc_header_descriptor {
#define DECLARE_UVC_HEADER_DESCRIPTOR(n) \
struct UVC_HEADER_DESCRIPTOR(n) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u16 bcdUVC; \
- __u16 wTotalLength; \
- __u32 dwClockFrequency; \
- __u8 bInCollection; \
- __u8 baInterfaceNr[n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __le16 bcdUVC; \
+ __le16 wTotalLength; \
+ __le32 dwClockFrequency; \
+ __u8 bInCollection; \
+ __u8 baInterfaceNr[n]; \
} __attribute__ ((packed))
/* 3.7.2.1. Input Terminal Descriptor */
struct uvc_input_terminal_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bTerminalID;
- __u16 wTerminalType;
- __u8 bAssocTerminal;
- __u8 iTerminal;
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 iTerminal;
} __attribute__((__packed__));
#define UVC_DT_INPUT_TERMINAL_SIZE 8
/* 3.7.2.2. Output Terminal Descriptor */
struct uvc_output_terminal_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bTerminalID;
- __u16 wTerminalType;
- __u8 bAssocTerminal;
- __u8 bSourceID;
- __u8 iTerminal;
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bSourceID;
+ __u8 iTerminal;
} __attribute__((__packed__));
#define UVC_DT_OUTPUT_TERMINAL_SIZE 9
/* 3.7.2.3. Camera Terminal Descriptor */
struct uvc_camera_terminal_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bTerminalID;
- __u16 wTerminalType;
- __u8 bAssocTerminal;
- __u8 iTerminal;
- __u16 wObjectiveFocalLengthMin;
- __u16 wObjectiveFocalLengthMax;
- __u16 wOcularFocalLength;
- __u8 bControlSize;
- __u8 bmControls[3];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 iTerminal;
+ __le16 wObjectiveFocalLengthMin;
+ __le16 wObjectiveFocalLengthMax;
+ __le16 wOcularFocalLength;
+ __u8 bControlSize;
+ __u8 bmControls[3];
} __attribute__((__packed__));
#define UVC_DT_CAMERA_TERMINAL_SIZE(n) (15+(n))
@@ -293,15 +293,15 @@ struct UVC_SELECTOR_UNIT_DESCRIPTOR(n) { \
/* 3.7.2.5. Processing Unit Descriptor */
struct uvc_processing_unit_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bUnitID;
- __u8 bSourceID;
- __u16 wMaxMultiplier;
- __u8 bControlSize;
- __u8 bmControls[2];
- __u8 iProcessing;
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bUnitID;
+ __u8 bSourceID;
+ __le16 wMaxMultiplier;
+ __u8 bControlSize;
+ __u8 bmControls[2];
+ __u8 iProcessing;
} __attribute__((__packed__));
#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n))
@@ -343,29 +343,29 @@ struct UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) { \
/* 3.8.2.2. Video Control Interrupt Endpoint Descriptor */
struct uvc_control_endpoint_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u16 wMaxTransferSize;
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __le16 wMaxTransferSize;
} __attribute__((__packed__));
#define UVC_DT_CONTROL_ENDPOINT_SIZE 5
/* 3.9.2.1. Input Header Descriptor */
struct uvc_input_header_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bNumFormats;
- __u16 wTotalLength;
- __u8 bEndpointAddress;
- __u8 bmInfo;
- __u8 bTerminalLink;
- __u8 bStillCaptureMethod;
- __u8 bTriggerSupport;
- __u8 bTriggerUsage;
- __u8 bControlSize;
- __u8 bmaControls[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bNumFormats;
+ __le16 wTotalLength;
+ __u8 bEndpointAddress;
+ __u8 bmInfo;
+ __u8 bTerminalLink;
+ __u8 bStillCaptureMethod;
+ __u8 bTriggerSupport;
+ __u8 bTriggerUsage;
+ __u8 bControlSize;
+ __u8 bmaControls[];
} __attribute__((__packed__));
#define UVC_DT_INPUT_HEADER_SIZE(n, p) (13+(n*p))
@@ -375,32 +375,32 @@ struct uvc_input_header_descriptor {
#define DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(n, p) \
struct UVC_INPUT_HEADER_DESCRIPTOR(n, p) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u8 bNumFormats; \
- __u16 wTotalLength; \
- __u8 bEndpointAddress; \
- __u8 bmInfo; \
- __u8 bTerminalLink; \
- __u8 bStillCaptureMethod; \
- __u8 bTriggerSupport; \
- __u8 bTriggerUsage; \
- __u8 bControlSize; \
- __u8 bmaControls[p][n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bNumFormats; \
+ __le16 wTotalLength; \
+ __u8 bEndpointAddress; \
+ __u8 bmInfo; \
+ __u8 bTerminalLink; \
+ __u8 bStillCaptureMethod; \
+ __u8 bTriggerSupport; \
+ __u8 bTriggerUsage; \
+ __u8 bControlSize; \
+ __u8 bmaControls[p][n]; \
} __attribute__ ((packed))
/* 3.9.2.2. Output Header Descriptor */
struct uvc_output_header_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bNumFormats;
- __u16 wTotalLength;
- __u8 bEndpointAddress;
- __u8 bTerminalLink;
- __u8 bControlSize;
- __u8 bmaControls[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bNumFormats;
+ __le16 wTotalLength;
+ __u8 bEndpointAddress;
+ __u8 bTerminalLink;
+ __u8 bControlSize;
+ __u8 bmaControls[];
} __attribute__((__packed__));
#define UVC_DT_OUTPUT_HEADER_SIZE(n, p) (9+(n*p))
@@ -410,15 +410,15 @@ struct uvc_output_header_descriptor {
#define DECLARE_UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \
struct UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u8 bNumFormats; \
- __u16 wTotalLength; \
- __u8 bEndpointAddress; \
- __u8 bTerminalLink; \
- __u8 bControlSize; \
- __u8 bmaControls[p][n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bNumFormats; \
+ __le16 wTotalLength; \
+ __u8 bEndpointAddress; \
+ __u8 bTerminalLink; \
+ __u8 bControlSize; \
+ __u8 bmaControls[p][n]; \
} __attribute__ ((packed))
/* 3.9.2.6. Color matching descriptor */
@@ -473,19 +473,19 @@ struct uvc_format_uncompressed {
/* Uncompressed Payload - 3.1.2. Uncompressed Video Frame Descriptor */
struct uvc_frame_uncompressed {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bFrameIndex;
- __u8 bmCapabilities;
- __u16 wWidth;
- __u16 wHeight;
- __u32 dwMinBitRate;
- __u32 dwMaxBitRate;
- __u32 dwMaxVideoFrameBufferSize;
- __u32 dwDefaultFrameInterval;
- __u8 bFrameIntervalType;
- __u32 dwFrameInterval[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __le16 wWidth;
+ __le16 wHeight;
+ __le32 dwMinBitRate;
+ __le32 dwMaxBitRate;
+ __le32 dwMaxVideoFrameBufferSize;
+ __le32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __le32 dwFrameInterval[];
} __attribute__((__packed__));
#define UVC_DT_FRAME_UNCOMPRESSED_SIZE(n) (26+4*(n))
@@ -495,19 +495,19 @@ struct uvc_frame_uncompressed {
#define DECLARE_UVC_FRAME_UNCOMPRESSED(n) \
struct UVC_FRAME_UNCOMPRESSED(n) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u8 bFrameIndex; \
- __u8 bmCapabilities; \
- __u16 wWidth; \
- __u16 wHeight; \
- __u32 dwMinBitRate; \
- __u32 dwMaxBitRate; \
- __u32 dwMaxVideoFrameBufferSize; \
- __u32 dwDefaultFrameInterval; \
- __u8 bFrameIntervalType; \
- __u32 dwFrameInterval[n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bFrameIndex; \
+ __u8 bmCapabilities; \
+ __le16 wWidth; \
+ __le16 wHeight; \
+ __le32 dwMinBitRate; \
+ __le32 dwMaxBitRate; \
+ __le32 dwMaxVideoFrameBufferSize; \
+ __le32 dwDefaultFrameInterval; \
+ __u8 bFrameIntervalType; \
+ __le32 dwFrameInterval[n]; \
} __attribute__ ((packed))
/* MJPEG Payload - 3.1.1. MJPEG Video Format Descriptor */
@@ -529,19 +529,19 @@ struct uvc_format_mjpeg {
/* MJPEG Payload - 3.1.2. MJPEG Video Frame Descriptor */
struct uvc_frame_mjpeg {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bFrameIndex;
- __u8 bmCapabilities;
- __u16 wWidth;
- __u16 wHeight;
- __u32 dwMinBitRate;
- __u32 dwMaxBitRate;
- __u32 dwMaxVideoFrameBufferSize;
- __u32 dwDefaultFrameInterval;
- __u8 bFrameIntervalType;
- __u32 dwFrameInterval[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __le16 wWidth;
+ __le16 wHeight;
+ __le32 dwMinBitRate;
+ __le32 dwMaxBitRate;
+ __le32 dwMaxVideoFrameBufferSize;
+ __le32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __le32 dwFrameInterval[];
} __attribute__((__packed__));
#define UVC_DT_FRAME_MJPEG_SIZE(n) (26+4*(n))
@@ -551,19 +551,19 @@ struct uvc_frame_mjpeg {
#define DECLARE_UVC_FRAME_MJPEG(n) \
struct UVC_FRAME_MJPEG(n) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u8 bFrameIndex; \
- __u8 bmCapabilities; \
- __u16 wWidth; \
- __u16 wHeight; \
- __u32 dwMinBitRate; \
- __u32 dwMaxBitRate; \
- __u32 dwMaxVideoFrameBufferSize; \
- __u32 dwDefaultFrameInterval; \
- __u8 bFrameIntervalType; \
- __u32 dwFrameInterval[n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bFrameIndex; \
+ __u8 bmCapabilities; \
+ __le16 wWidth; \
+ __le16 wHeight; \
+ __le32 dwMinBitRate; \
+ __le32 dwMaxBitRate; \
+ __le32 dwMaxVideoFrameBufferSize; \
+ __le32 dwDefaultFrameInterval; \
+ __u8 bFrameIntervalType; \
+ __le32 dwFrameInterval[n]; \
} __attribute__ ((packed))
#endif /* __LINUX_USB_VIDEO_H */
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 1aa7b82e8169..f378b9802d8b 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -200,6 +200,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
#define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
+#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
};
@@ -215,6 +216,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform"
#define VFIO_DEVICE_API_AMBA_STRING "vfio-amba"
#define VFIO_DEVICE_API_CCW_STRING "vfio-ccw"
+#define VFIO_DEVICE_API_AP_STRING "vfio-ap"
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 25a16760de2a..1254b51a551a 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -763,10 +763,28 @@ struct ib_uverbs_sge {
__u32 lkey;
};
+enum ib_uverbs_wr_opcode {
+ IB_UVERBS_WR_RDMA_WRITE = 0,
+ IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1,
+ IB_UVERBS_WR_SEND = 2,
+ IB_UVERBS_WR_SEND_WITH_IMM = 3,
+ IB_UVERBS_WR_RDMA_READ = 4,
+ IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5,
+ IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6,
+ IB_UVERBS_WR_LOCAL_INV = 7,
+ IB_UVERBS_WR_BIND_MW = 8,
+ IB_UVERBS_WR_SEND_WITH_INV = 9,
+ IB_UVERBS_WR_TSO = 10,
+ IB_UVERBS_WR_RDMA_READ_WITH_INV = 11,
+ IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12,
+ IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13,
+ /* Review enum ib_wr_opcode before modifying this */
+};
+
struct ib_uverbs_send_wr {
__aligned_u64 wr_id;
__u32 num_sge;
- __u32 opcode;
+ __u32 opcode; /* see enum ib_uverbs_wr_opcode */
__u32 send_flags;
union {
__be32 imm_data;
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index addbb9c4529e..8fa9f90e2bb1 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -45,6 +45,9 @@ enum {
MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
};
enum {
@@ -349,9 +352,22 @@ struct mlx5_ib_create_qp_rss {
__u32 flags;
};
+enum mlx5_ib_create_qp_resp_mask {
+ MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0,
+ MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
+ MLX5_IB_CREATE_QP_RESP_MASK_RQN = 1UL << 2,
+ MLX5_IB_CREATE_QP_RESP_MASK_SQN = 1UL << 3,
+};
+
struct mlx5_ib_create_qp_resp {
__u32 bfreg_index;
__u32 reserved;
+ __u32 comp_mask;
+ __u32 tirn;
+ __u32 tisn;
+ __u32 rqn;
+ __u32 sqn;
+ __u32 reserved1;
};
struct mlx5_ib_alloc_mw {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 9c51801b9e64..408e220034de 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -125,6 +125,7 @@ enum mlx5_ib_flow_matcher_create_attrs {
MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
};
enum mlx5_ib_flow_matcher_destroy_attrs {
@@ -155,6 +156,8 @@ enum mlx5_ib_create_flow_attrs {
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
+ MLX5_IB_ATTR_CREATE_FLOW_TAG,
};
enum mlx5_ib_destoy_flow_attrs {
@@ -166,4 +169,22 @@ enum mlx5_ib_flow_methods {
MLX5_IB_METHOD_DESTROY_FLOW,
};
+enum mlx5_ib_flow_action_methods {
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
+};
+
+enum mlx5_ib_create_flow_action_create_modify_header_attrs {
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
+};
+
+enum mlx5_ib_create_flow_action_create_packet_reformat_attrs {
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
+};
+
#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 8a2fb33f3ed4..4ef62c0e8452 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -39,5 +39,17 @@ enum mlx5_ib_uapi_flow_action_flags {
MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA = 1 << 0,
};
+enum mlx5_ib_uapi_flow_table_type {
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1,
+};
+
+enum mlx5_ib_uapi_flow_action_packet_reformat_type {
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 = 0x0,
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x1,
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x2,
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x3,
+};
+
#endif
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index edba6351ac13..f9c41bf59efc 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -227,8 +227,9 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_UNSPEC,
RDMA_NLDEV_CMD_GET, /* can dump */
+ RDMA_NLDEV_CMD_SET,
- /* 2 - 4 are free to use */
+ /* 3 - 4 are free to use */
RDMA_NLDEV_CMD_PORT_GET = 5, /* can dump */
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
index 24800c6c1f32..06c34d99be85 100644
--- a/include/uapi/rdma/rdma_user_ioctl_cmds.h
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -53,7 +53,7 @@ enum {
struct ib_uverbs_attr {
__u16 attr_id; /* command specific type attribute */
- __u16 len; /* only for pointers */
+ __u16 len; /* only for pointers and IDRs array */
__u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
union {
struct {
@@ -63,7 +63,10 @@ struct ib_uverbs_attr {
__u16 reserved;
} attr_data;
union {
- /* Used by PTR_IN/OUT, ENUM_IN and IDR */
+ /*
+ * ptr to command, inline data, idr/fd or
+ * ptr to __u32 array of IDRs
+ */
__aligned_u64 data;
/* Used by FD_IN and FD_OUT */
__s64 data_s64;
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
new file mode 100644
index 000000000000..17c7abd0803a
--- /dev/null
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * UFS Transport SGIO v4 BSG Message Support
+ *
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#ifndef SCSI_BSG_UFS_H
+#define SCSI_BSG_UFS_H
+
+#include <linux/types.h>
+/*
+ * This file intended to be included by both kernel and user space
+ */
+
+#define UFS_CDB_SIZE 16
+#define UPIU_TRANSACTION_UIC_CMD 0x1F
+/* uic commands are 4DW long, per UFSHCI V2.1 paragraph 5.6.1 */
+#define UIC_CMD_SIZE (sizeof(__u32) * 4)
+
+/**
+ * struct utp_upiu_header - UPIU header structure
+ * @dword_0: UPIU header DW-0
+ * @dword_1: UPIU header DW-1
+ * @dword_2: UPIU header DW-2
+ */
+struct utp_upiu_header {
+ __be32 dword_0;
+ __be32 dword_1;
+ __be32 dword_2;
+};
+
+/**
+ * struct utp_upiu_query - upiu request buffer structure for
+ * query request.
+ * @opcode: command to perform B-0
+ * @idn: a value that indicates the particular type of data B-1
+ * @index: Index to further identify data B-2
+ * @selector: Index to further identify data B-3
+ * @reserved_osf: spec reserved field B-4,5
+ * @length: number of descriptor bytes to read/write B-6,7
+ * @value: Attribute value to be written DW-5
+ * @reserved: spec reserved DW-6,7
+ */
+struct utp_upiu_query {
+ __u8 opcode;
+ __u8 idn;
+ __u8 index;
+ __u8 selector;
+ __be16 reserved_osf;
+ __be16 length;
+ __be32 value;
+ __be32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_cmd - Command UPIU structure
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+struct utp_upiu_cmd {
+ __be32 exp_data_transfer_len;
+ __u8 cdb[UFS_CDB_SIZE];
+};
+
+/**
+ * struct utp_upiu_req - general upiu request structure
+ * @header:UPIU header structure DW-0 to DW-2
+ * @sc: fields structure for scsi command DW-3 to DW-7
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_req {
+ struct utp_upiu_header header;
+ union {
+ struct utp_upiu_cmd sc;
+ struct utp_upiu_query qr;
+ struct utp_upiu_query tr;
+ /* use utp_upiu_query to host the 4 dwords of uic command */
+ struct utp_upiu_query uc;
+ };
+};
+
+/* request (CDB) structure of the sg_io_v4 */
+struct ufs_bsg_request {
+ __u32 msgcode;
+ struct utp_upiu_req upiu_req;
+};
+
+/* response (request sense data) structure of the sg_io_v4 */
+struct ufs_bsg_reply {
+ /*
+ * The completion result. Result exists in two forms:
+ * if negative, it is an -Exxx system errno value. There will
+ * be no further reply information supplied.
+ * else, it's the 4-byte scsi error result, with driver, host,
+ * msg and status fields. The per-msgcode reply structure
+ * will contain valid data.
+ */
+ __u32 result;
+
+ /* If there was reply_payload, how much was received? */
+ __u32 reply_payload_rcv_len;
+
+ struct utp_upiu_req upiu_rsp;
+};
+#endif /* UFS_BSG_H */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index ed0a120d4f08..404d4b9ffe76 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -752,7 +752,7 @@ struct snd_timer_info {
#define SNDRV_TIMER_PSFLG_EARLY_EVENT (1<<2) /* write early event to the poll queue */
struct snd_timer_params {
- unsigned int flags; /* flags - SNDRV_MIXER_PSFLG_* */
+ unsigned int flags; /* flags - SNDRV_TIMER_PSFLG_* */
unsigned int ticks; /* requested resolution in ticks */
unsigned int queue_size; /* total size of queue (32-1024) */
unsigned int reserved0; /* reserved, was: failure locations */
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
index f58cafa42f18..f39352cef382 100644
--- a/include/uapi/sound/skl-tplg-interface.h
+++ b/include/uapi/sound/skl-tplg-interface.h
@@ -10,6 +10,8 @@
#ifndef __HDA_TPLG_INTERFACE_H__
#define __HDA_TPLG_INTERFACE_H__
+#include <linux/types.h>
+
/*
* Default types range from 0~12. type can range from 0 to 0xff
* SST types start at higher to avoid any overlapping in future
@@ -143,10 +145,10 @@ enum skl_module_param_type {
};
struct skl_dfw_algo_data {
- u32 set_params:2;
- u32 rsvd:30;
- u32 param_id;
- u32 max;
+ __u32 set_params:2;
+ __u32 rsvd:30;
+ __u32 param_id;
+ __u32 max;
char params[0];
} __packed;
@@ -163,68 +165,68 @@ enum skl_tuple_type {
/* v4 configuration data */
struct skl_dfw_v4_module_pin {
- u16 module_id;
- u16 instance_id;
+ __u16 module_id;
+ __u16 instance_id;
} __packed;
struct skl_dfw_v4_module_fmt {
- u32 channels;
- u32 freq;
- u32 bit_depth;
- u32 valid_bit_depth;
- u32 ch_cfg;
- u32 interleaving_style;
- u32 sample_type;
- u32 ch_map;
+ __u32 channels;
+ __u32 freq;
+ __u32 bit_depth;
+ __u32 valid_bit_depth;
+ __u32 ch_cfg;
+ __u32 interleaving_style;
+ __u32 sample_type;
+ __u32 ch_map;
} __packed;
struct skl_dfw_v4_module_caps {
- u32 set_params:2;
- u32 rsvd:30;
- u32 param_id;
- u32 caps_size;
- u32 caps[HDA_SST_CFG_MAX];
+ __u32 set_params:2;
+ __u32 rsvd:30;
+ __u32 param_id;
+ __u32 caps_size;
+ __u32 caps[HDA_SST_CFG_MAX];
} __packed;
struct skl_dfw_v4_pipe {
- u8 pipe_id;
- u8 pipe_priority;
- u16 conn_type:4;
- u16 rsvd:4;
- u16 memory_pages:8;
+ __u8 pipe_id;
+ __u8 pipe_priority;
+ __u16 conn_type:4;
+ __u16 rsvd:4;
+ __u16 memory_pages:8;
} __packed;
struct skl_dfw_v4_module {
char uuid[SKL_UUID_STR_SZ];
- u16 module_id;
- u16 instance_id;
- u32 max_mcps;
- u32 mem_pages;
- u32 obs;
- u32 ibs;
- u32 vbus_id;
-
- u32 max_in_queue:8;
- u32 max_out_queue:8;
- u32 time_slot:8;
- u32 core_id:4;
- u32 rsvd1:4;
-
- u32 module_type:8;
- u32 conn_type:4;
- u32 dev_type:4;
- u32 hw_conn_type:4;
- u32 rsvd2:12;
-
- u32 params_fixup:8;
- u32 converter:8;
- u32 input_pin_type:1;
- u32 output_pin_type:1;
- u32 is_dynamic_in_pin:1;
- u32 is_dynamic_out_pin:1;
- u32 is_loadable:1;
- u32 rsvd3:11;
+ __u16 module_id;
+ __u16 instance_id;
+ __u32 max_mcps;
+ __u32 mem_pages;
+ __u32 obs;
+ __u32 ibs;
+ __u32 vbus_id;
+
+ __u32 max_in_queue:8;
+ __u32 max_out_queue:8;
+ __u32 time_slot:8;
+ __u32 core_id:4;
+ __u32 rsvd1:4;
+
+ __u32 module_type:8;
+ __u32 conn_type:4;
+ __u32 dev_type:4;
+ __u32 hw_conn_type:4;
+ __u32 rsvd2:12;
+
+ __u32 params_fixup:8;
+ __u32 converter:8;
+ __u32 input_pin_type:1;
+ __u32 output_pin_type:1;
+ __u32 is_dynamic_in_pin:1;
+ __u32 is_dynamic_out_pin:1;
+ __u32 is_loadable:1;
+ __u32 rsvd3:11;
struct skl_dfw_v4_pipe pipe;
struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE];
diff --git a/include/xen/events.h b/include/xen/events.h
index c3e6bc643a7b..a48897199975 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -89,11 +89,13 @@ unsigned irq_from_evtchn(unsigned int evtchn);
int irq_from_virq(unsigned int cpu, unsigned int virq);
unsigned int evtchn_from_irq(unsigned irq);
+#ifdef CONFIG_XEN_PVHVM
/* Xen HVM evtchn vector callback */
void xen_hvm_callback_vector(void);
#ifdef CONFIG_TRACING
#define trace_xen_hvm_callback_vector xen_hvm_callback_vector
#endif
+#endif
int xen_set_callback_via(uint64_t via);
void xen_evtchn_do_upcall(struct pt_regs *regs);
void xen_hvm_evtchn_do_upcall(void);
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 4c5751c26f87..447004861f00 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -245,12 +245,6 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map);
/*
- * Prevent the balloon driver from changing the memory reservation
- * during a driver critical region.
- */
-extern spinlock_t xen_reservation_lock;
-
-/*
* Unmaps the page appearing at a particular GPFN from the specified guest's
* pseudophysical address space.
* arg == addr of xen_remove_from_physmap_t.
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index fd18c974a619..18803ff76e27 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -5,6 +5,7 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/efi.h>
+#include <xen/features.h>
#include <asm/xen/interface.h>
#include <xen/interface/vcpu.h>
@@ -47,6 +48,10 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
dma_addr_t *dma_handle);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
+
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+ unsigned int domid, bool no_translate, struct page **pages);
#else
static inline int xen_create_contiguous_region(phys_addr_t pstart,
unsigned int order,
@@ -58,10 +63,50 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart,
static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
unsigned int order) { }
+
+static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *pfn, int nr, int *err_ptr,
+ pgprot_t prot, unsigned int domid,
+ bool no_translate, struct page **pages)
+{
+ BUG();
+ return 0;
+}
#endif
struct vm_area_struct;
+#ifdef CONFIG_XEN_AUTO_XLATE
+int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid,
+ struct page **pages);
+int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
+ int nr, struct page **pages);
+#else
+/*
+ * These two functions are called from arch/x86/xen/mmu.c and so stubs
+ * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE.
+ */
+static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid,
+ struct page **pages)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
+ int nr, struct page **pages)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
/*
* xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
* @vma: VMA to map the pages into
@@ -79,12 +124,25 @@ struct vm_area_struct;
* Returns the number of successfully mapped frames, or a -ve error
* code.
*/
-int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *gfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned domid,
- struct page **pages);
+static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid,
+ struct page **pages)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
+ prot, domid, pages);
+
+ /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
+ * and the consequences later is quite hard to detect what the actual
+ * cause of "wrong memory was mapped in".
+ */
+ BUG_ON(err_ptr == NULL);
+ return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
+ false, pages);
+}
/*
* xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
@@ -103,10 +161,18 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
* Returns the number of successfully mapped frames, or a -ve error
* code.
*/
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
- unsigned long addr, xen_pfn_t *mfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned int domid, struct page **pages);
+static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr, xen_pfn_t *mfn,
+ int nr, int *err_ptr,
+ pgprot_t prot, unsigned int domid,
+ struct page **pages)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return -EOPNOTSUPP;
+
+ return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
+ true, pages);
+}
/* xen_remap_domain_gfn_range() - map a range of foreign frames
* @vma: VMA to map the pages into
@@ -120,44 +186,21 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
* Returns the number of successfully mapped frames, or a -ve error
* code.
*/
-int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t gfn, int nr,
- pgprot_t prot, unsigned domid,
- struct page **pages);
-int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
- int numpgs, struct page **pages);
-
-#ifdef CONFIG_XEN_AUTO_XLATE
-int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *gfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned domid,
- struct page **pages);
-int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
- int nr, struct page **pages);
-#else
-/*
- * These two functions are called from arch/x86/xen/mmu.c and so stubs
- * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE.
- */
-static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *gfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned int domid,
- struct page **pages)
+static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t gfn, int nr,
+ pgprot_t prot, unsigned int domid,
+ struct page **pages)
{
- return -EOPNOTSUPP;
-}
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return -EOPNOTSUPP;
-static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
- int nr, struct page **pages)
-{
- return -EOPNOTSUPP;
+ return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
+ pages);
}
-#endif
+
+int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
+ int numpgs, struct page **pages);
int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
unsigned long nr_grant_frames);
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 1e1d9bd0bd37..d7a2678da77f 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -39,4 +39,8 @@ extern uint32_t xen_start_flags;
#define xen_initial_domain() (0)
#endif /* CONFIG_XEN_DOM0 */
+struct bio_vec;
+bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct bio_vec *vec2);
+
#endif /* _XEN_XEN_H */
diff --git a/init/Kconfig b/init/Kconfig
index 1e234e2f1cba..317d5ccb5191 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -415,6 +415,11 @@ config IRQ_TIME_ACCOUNTING
If in doubt, say N here.
+config HAVE_SCHED_AVG_IRQ
+ def_bool y
+ depends on IRQ_TIME_ACCOUNTING || PARAVIRT_TIME_ACCOUNTING
+ depends on SMP
+
config BSD_PROCESS_ACCT
bool "BSD Process Accounting"
depends on MULTIUSER
diff --git a/init/main.c b/init/main.c
index 18f8f0140fa0..1c3f90264280 100644
--- a/init/main.c
+++ b/init/main.c
@@ -394,7 +394,7 @@ static void __init setup_command_line(char *command_line)
static __initdata DECLARE_COMPLETION(kthreadd_done);
-static noinline void __ref rest_init(void)
+noinline void __ref rest_init(void)
{
struct task_struct *tsk;
int pid;
@@ -528,6 +528,11 @@ static void __init mm_init(void)
pti_init();
}
+void __init __weak arch_call_rest_init(void)
+{
+ rest_init();
+}
+
asmlinkage __visible void __init start_kernel(void)
{
char *command_line;
@@ -736,7 +741,7 @@ asmlinkage __visible void __init start_kernel(void)
}
/* Do the rest non-__init'ed, we're now alive */
- rest_init();
+ arch_call_rest_init();
}
/* Call all constructor functions linked into the kernel. */
@@ -1064,7 +1069,6 @@ static int __ref kernel_init(void *unused)
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
ftrace_free_init_mem();
- jump_label_invalidate_initmem();
free_initmem();
mark_readonly();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index c0d58f390c3b..c595bed7bfcb 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -655,7 +655,7 @@ static void __do_notify(struct mqueue_inode_info *info)
* synchronously. */
if (info->notify_owner &&
info->attr.mq_curmsgs == 1) {
- struct siginfo sig_i;
+ struct kernel_siginfo sig_i;
switch (info->notify.sigev_notify) {
case SIGEV_NONE:
break;
@@ -1461,10 +1461,10 @@ COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
-static int compat_prepare_timeout(const struct compat_timespec __user *p,
+static int compat_prepare_timeout(const struct old_timespec32 __user *p,
struct timespec64 *ts)
{
- if (compat_get_timespec64(ts, p))
+ if (get_old_timespec32(ts, p))
return -EFAULT;
if (!timespec64_valid(ts))
return -EINVAL;
@@ -1474,7 +1474,7 @@ static int compat_prepare_timeout(const struct compat_timespec __user *p,
COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
const char __user *, u_msg_ptr,
compat_size_t, msg_len, unsigned int, msg_prio,
- const struct compat_timespec __user *, u_abs_timeout)
+ const struct old_timespec32 __user *, u_abs_timeout)
{
struct timespec64 ts, *p = NULL;
if (u_abs_timeout) {
@@ -1489,7 +1489,7 @@ COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes,
char __user *, u_msg_ptr,
compat_size_t, msg_len, unsigned int __user *, u_msg_prio,
- const struct compat_timespec __user *, u_abs_timeout)
+ const struct old_timespec32 __user *, u_abs_timeout)
{
struct timespec64 ts, *p = NULL;
if (u_abs_timeout) {
diff --git a/ipc/msg.c b/ipc/msg.c
index 883642cf2b27..0833c6405915 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -622,9 +622,9 @@ struct compat_msqid_ds {
struct compat_ipc_perm msg_perm;
compat_uptr_t msg_first;
compat_uptr_t msg_last;
- compat_time_t msg_stime;
- compat_time_t msg_rtime;
- compat_time_t msg_ctime;
+ old_time32_t msg_stime;
+ old_time32_t msg_rtime;
+ old_time32_t msg_ctime;
compat_ulong_t msg_lcbytes;
compat_ulong_t msg_lqbytes;
unsigned short msg_cbytes;
diff --git a/ipc/sem.c b/ipc/sem.c
index 26f8e37fcdcb..745dc6187e84 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1698,8 +1698,8 @@ SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
struct compat_semid_ds {
struct compat_ipc_perm sem_perm;
- compat_time_t sem_otime;
- compat_time_t sem_ctime;
+ old_time32_t sem_otime;
+ old_time32_t sem_ctime;
compat_uptr_t sem_base;
compat_uptr_t sem_pending;
compat_uptr_t sem_pending_last;
@@ -2214,11 +2214,11 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
#ifdef CONFIG_COMPAT_32BIT_TIME
long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned int nsops,
- const struct compat_timespec __user *timeout)
+ const struct old_timespec32 __user *timeout)
{
if (timeout) {
struct timespec64 ts;
- if (compat_get_timespec64(&ts, timeout))
+ if (get_old_timespec32(&ts, timeout))
return -EFAULT;
return do_semtimedop(semid, tsems, nsops, &ts);
}
@@ -2227,7 +2227,7 @@ long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
unsigned int, nsops,
- const struct compat_timespec __user *, timeout)
+ const struct old_timespec32 __user *, timeout)
{
return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
}
diff --git a/ipc/shm.c b/ipc/shm.c
index 4cd402e4cfeb..0842411cb0e9 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -206,7 +206,7 @@ err:
* Callers of shm_lock() must validate the status of the returned ipc
* object pointer and error out as appropriate.
*/
- return (void *)ipcp;
+ return ERR_CAST(ipcp);
}
static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
@@ -1202,9 +1202,9 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
struct compat_shmid_ds {
struct compat_ipc_perm shm_perm;
int shm_segsz;
- compat_time_t shm_atime;
- compat_time_t shm_dtime;
- compat_time_t shm_ctime;
+ old_time32_t shm_atime;
+ old_time32_t shm_dtime;
+ old_time32_t shm_ctime;
compat_ipc_pid_t shm_cpid;
compat_ipc_pid_t shm_lpid;
unsigned short shm_nattch;
diff --git a/ipc/syscall.c b/ipc/syscall.c
index 65d405f1ba0c..1ac06e3983c0 100644
--- a/ipc/syscall.c
+++ b/ipc/syscall.c
@@ -35,7 +35,7 @@ SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
(const struct __kernel_timespec __user *)fifth);
else if (IS_ENABLED(CONFIG_COMPAT_32BIT_TIME))
return compat_ksys_semtimedop(first, ptr, second,
- (const struct compat_timespec __user *)fifth);
+ (const struct old_timespec32 __user *)fifth);
else
return -ENOSYS;
diff --git a/ipc/util.h b/ipc/util.h
index 0a159f69b3bb..1ee81bce25e9 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -266,7 +266,7 @@ long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
/* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */
long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned int nsops,
- const struct compat_timespec __user *timeout);
+ const struct old_timespec32 __user *timeout);
#ifdef CONFIG_COMPAT
long compat_ksys_semctl(int semid, int semnum, int cmd, int arg);
long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr);
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 0488b8258321..4c2fa3ac56f6 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -3,7 +3,7 @@ obj-y := core.o
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
-obj-$(CONFIG_BPF_SYSCALL) += local_storage.o
+obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o
obj-$(CONFIG_BPF_SYSCALL) += disasm.o
obj-$(CONFIG_BPF_SYSCALL) += btf.o
ifeq ($(CONFIG_NET),y)
@@ -13,11 +13,6 @@ ifeq ($(CONFIG_XDP_SOCKETS),y)
obj-$(CONFIG_BPF_SYSCALL) += xskmap.o
endif
obj-$(CONFIG_BPF_SYSCALL) += offload.o
-ifeq ($(CONFIG_STREAM_PARSER),y)
-ifeq ($(CONFIG_INET),y)
-obj-$(CONFIG_BPF_SYSCALL) += sockmap.o
-endif
-endif
endif
ifeq ($(CONFIG_PERF_EVENTS),y)
obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 0c17aab3ce5f..24583da9ffd1 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -358,6 +358,29 @@ static void array_map_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock();
}
+static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
+ struct seq_file *m)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ u32 index = *(u32 *)key;
+ void __percpu *pptr;
+ int cpu;
+
+ rcu_read_lock();
+
+ seq_printf(m, "%u: {\n", *(u32 *)key);
+ pptr = array->pptrs[index & array->index_mask];
+ for_each_possible_cpu(cpu) {
+ seq_printf(m, "\tcpu%d: ", cpu);
+ btf_type_seq_show(map->btf, map->btf_value_type_id,
+ per_cpu_ptr(pptr, cpu), m);
+ seq_puts(m, "\n");
+ }
+ seq_puts(m, "}\n");
+
+ rcu_read_unlock();
+}
+
static int array_map_check_btf(const struct bpf_map *map,
const struct btf_type *key_type,
const struct btf_type *value_type)
@@ -398,6 +421,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
.map_lookup_elem = percpu_array_map_lookup_elem,
.map_update_elem = array_map_update_elem,
.map_delete_elem = array_map_delete_elem,
+ .map_seq_show_elem = percpu_array_map_seq_show_elem,
.map_check_btf = array_map_check_btf,
};
@@ -425,7 +449,7 @@ static void fd_array_map_free(struct bpf_map *map)
static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
{
- return NULL;
+ return ERR_PTR(-EOPNOTSUPP);
}
/* only called from syscall */
@@ -529,6 +553,29 @@ static void bpf_fd_array_map_clear(struct bpf_map *map)
fd_array_map_delete_elem(map, &i);
}
+static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
+ struct seq_file *m)
+{
+ void **elem, *ptr;
+ u32 prog_id;
+
+ rcu_read_lock();
+
+ elem = array_map_lookup_elem(map, key);
+ if (elem) {
+ ptr = READ_ONCE(*elem);
+ if (ptr) {
+ seq_printf(m, "%u: ", *(u32 *)key);
+ prog_id = prog_fd_array_sys_lookup_elem(ptr);
+ btf_type_seq_show(map->btf, map->btf_value_type_id,
+ &prog_id, m);
+ seq_puts(m, "\n");
+ }
+ }
+
+ rcu_read_unlock();
+}
+
const struct bpf_map_ops prog_array_map_ops = {
.map_alloc_check = fd_array_map_alloc_check,
.map_alloc = array_map_alloc,
@@ -540,7 +587,7 @@ const struct bpf_map_ops prog_array_map_ops = {
.map_fd_put_ptr = prog_fd_array_put_ptr,
.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
.map_release_uref = bpf_fd_array_map_clear,
- .map_check_btf = map_check_no_btf,
+ .map_seq_show_elem = prog_array_map_seq_show_elem,
};
static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 2590700237c1..378cef70341c 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -1844,7 +1844,7 @@ static int btf_check_all_metas(struct btf_verifier_env *env)
hdr = &btf->hdr;
cur = btf->nohdr_data + hdr->type_off;
- end = btf->nohdr_data + hdr->type_len;
+ end = cur + hdr->type_len;
env->log_type_id = 1;
while (cur < end) {
@@ -2114,6 +2114,9 @@ static int btf_parse_hdr(struct btf_verifier_env *env, void __user *btf_data,
hdr = &btf->hdr;
+ if (hdr->hdr_len != hdr_len)
+ return -EINVAL;
+
btf_verifier_log_hdr(env, btf_data_size);
if (hdr->magic != BTF_MAGIC) {
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 6a7d931bbc55..9425c2fb872f 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -25,6 +25,7 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key);
*/
void cgroup_bpf_put(struct cgroup *cgrp)
{
+ enum bpf_cgroup_storage_type stype;
unsigned int type;
for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
@@ -34,8 +35,10 @@ void cgroup_bpf_put(struct cgroup *cgrp)
list_for_each_entry_safe(pl, tmp, progs, node) {
list_del(&pl->node);
bpf_prog_put(pl->prog);
- bpf_cgroup_storage_unlink(pl->storage);
- bpf_cgroup_storage_free(pl->storage);
+ for_each_cgroup_storage_type(stype) {
+ bpf_cgroup_storage_unlink(pl->storage[stype]);
+ bpf_cgroup_storage_free(pl->storage[stype]);
+ }
kfree(pl);
static_branch_dec(&cgroup_bpf_enabled_key);
}
@@ -97,6 +100,7 @@ static int compute_effective_progs(struct cgroup *cgrp,
enum bpf_attach_type type,
struct bpf_prog_array __rcu **array)
{
+ enum bpf_cgroup_storage_type stype;
struct bpf_prog_array *progs;
struct bpf_prog_list *pl;
struct cgroup *p = cgrp;
@@ -125,7 +129,9 @@ static int compute_effective_progs(struct cgroup *cgrp,
continue;
progs->items[cnt].prog = pl->prog;
- progs->items[cnt].cgroup_storage = pl->storage;
+ for_each_cgroup_storage_type(stype)
+ progs->items[cnt].cgroup_storage[stype] =
+ pl->storage[stype];
cnt++;
}
} while ((p = cgroup_parent(p)));
@@ -232,7 +238,9 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
{
struct list_head *progs = &cgrp->bpf.progs[type];
struct bpf_prog *old_prog = NULL;
- struct bpf_cgroup_storage *storage, *old_storage = NULL;
+ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
+ *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
+ enum bpf_cgroup_storage_type stype;
struct bpf_prog_list *pl;
bool pl_was_allocated;
int err;
@@ -254,34 +262,44 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
return -E2BIG;
- storage = bpf_cgroup_storage_alloc(prog);
- if (IS_ERR(storage))
- return -ENOMEM;
+ for_each_cgroup_storage_type(stype) {
+ storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
+ if (IS_ERR(storage[stype])) {
+ storage[stype] = NULL;
+ for_each_cgroup_storage_type(stype)
+ bpf_cgroup_storage_free(storage[stype]);
+ return -ENOMEM;
+ }
+ }
if (flags & BPF_F_ALLOW_MULTI) {
list_for_each_entry(pl, progs, node) {
if (pl->prog == prog) {
/* disallow attaching the same prog twice */
- bpf_cgroup_storage_free(storage);
+ for_each_cgroup_storage_type(stype)
+ bpf_cgroup_storage_free(storage[stype]);
return -EINVAL;
}
}
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
if (!pl) {
- bpf_cgroup_storage_free(storage);
+ for_each_cgroup_storage_type(stype)
+ bpf_cgroup_storage_free(storage[stype]);
return -ENOMEM;
}
pl_was_allocated = true;
pl->prog = prog;
- pl->storage = storage;
+ for_each_cgroup_storage_type(stype)
+ pl->storage[stype] = storage[stype];
list_add_tail(&pl->node, progs);
} else {
if (list_empty(progs)) {
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
if (!pl) {
- bpf_cgroup_storage_free(storage);
+ for_each_cgroup_storage_type(stype)
+ bpf_cgroup_storage_free(storage[stype]);
return -ENOMEM;
}
pl_was_allocated = true;
@@ -289,12 +307,15 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
} else {
pl = list_first_entry(progs, typeof(*pl), node);
old_prog = pl->prog;
- old_storage = pl->storage;
- bpf_cgroup_storage_unlink(old_storage);
+ for_each_cgroup_storage_type(stype) {
+ old_storage[stype] = pl->storage[stype];
+ bpf_cgroup_storage_unlink(old_storage[stype]);
+ }
pl_was_allocated = false;
}
pl->prog = prog;
- pl->storage = storage;
+ for_each_cgroup_storage_type(stype)
+ pl->storage[stype] = storage[stype];
}
cgrp->bpf.flags[type] = flags;
@@ -304,21 +325,27 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
goto cleanup;
static_branch_inc(&cgroup_bpf_enabled_key);
- if (old_storage)
- bpf_cgroup_storage_free(old_storage);
+ for_each_cgroup_storage_type(stype) {
+ if (!old_storage[stype])
+ continue;
+ bpf_cgroup_storage_free(old_storage[stype]);
+ }
if (old_prog) {
bpf_prog_put(old_prog);
static_branch_dec(&cgroup_bpf_enabled_key);
}
- bpf_cgroup_storage_link(storage, cgrp, type);
+ for_each_cgroup_storage_type(stype)
+ bpf_cgroup_storage_link(storage[stype], cgrp, type);
return 0;
cleanup:
/* and cleanup the prog list */
pl->prog = old_prog;
- bpf_cgroup_storage_free(pl->storage);
- pl->storage = old_storage;
- bpf_cgroup_storage_link(old_storage, cgrp, type);
+ for_each_cgroup_storage_type(stype) {
+ bpf_cgroup_storage_free(pl->storage[stype]);
+ pl->storage[stype] = old_storage[stype];
+ bpf_cgroup_storage_link(old_storage[stype], cgrp, type);
+ }
if (pl_was_allocated) {
list_del(&pl->node);
kfree(pl);
@@ -339,6 +366,7 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 unused_flags)
{
struct list_head *progs = &cgrp->bpf.progs[type];
+ enum bpf_cgroup_storage_type stype;
u32 flags = cgrp->bpf.flags[type];
struct bpf_prog *old_prog = NULL;
struct bpf_prog_list *pl;
@@ -385,8 +413,10 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
/* now can actually delete it from this cgroup list */
list_del(&pl->node);
- bpf_cgroup_storage_unlink(pl->storage);
- bpf_cgroup_storage_free(pl->storage);
+ for_each_cgroup_storage_type(stype) {
+ bpf_cgroup_storage_unlink(pl->storage[stype]);
+ bpf_cgroup_storage_free(pl->storage[stype]);
+ }
kfree(pl);
if (list_empty(progs))
/* last program was detached, reset flags to zero */
@@ -523,6 +553,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
{
unsigned int offset = skb->data - skb_network_header(skb);
struct sock *save_sk;
+ void *saved_data_end;
struct cgroup *cgrp;
int ret;
@@ -536,8 +567,13 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
save_sk = skb->sk;
skb->sk = sk;
__skb_push(skb, offset);
+
+ /* compute pointers for the bpf prog */
+ bpf_compute_and_save_data_end(skb, &saved_data_end);
+
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
bpf_prog_run_save_cb);
+ bpf_restore_data_end(skb, saved_data_end);
__skb_pull(skb, offset);
skb->sk = save_sk;
return ret == 1 ? 0 : -EPERM;
@@ -677,6 +713,8 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_current_uid_gid_proto;
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
+ case BPF_FUNC_get_current_cgroup_id:
+ return &bpf_get_current_cgroup_id_proto;
case BPF_FUNC_trace_printk:
if (capable(CAP_SYS_ADMIN))
return bpf_get_trace_printk_proto();
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 3f5bf1af0826..7c7eeea8cffc 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1783,6 +1783,9 @@ BPF_CALL_0(bpf_user_rnd_u32)
const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
const struct bpf_func_proto bpf_map_update_elem_proto __weak;
const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
+const struct bpf_func_proto bpf_map_push_elem_proto __weak;
+const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
+const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
@@ -1792,8 +1795,6 @@ const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
const struct bpf_func_proto bpf_get_current_comm_proto __weak;
-const struct bpf_func_proto bpf_sock_map_update_proto __weak;
-const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
const struct bpf_func_proto bpf_get_local_storage_proto __weak;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 03cc59ee9c95..2c1790288138 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1285,6 +1285,35 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
return ret;
}
+static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
+ struct seq_file *m)
+{
+ struct htab_elem *l;
+ void __percpu *pptr;
+ int cpu;
+
+ rcu_read_lock();
+
+ l = __htab_map_lookup_elem(map, key);
+ if (!l) {
+ rcu_read_unlock();
+ return;
+ }
+
+ btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
+ seq_puts(m, ": {\n");
+ pptr = htab_elem_get_ptr(l, map->key_size);
+ for_each_possible_cpu(cpu) {
+ seq_printf(m, "\tcpu%d: ", cpu);
+ btf_type_seq_show(map->btf, map->btf_value_type_id,
+ per_cpu_ptr(pptr, cpu), m);
+ seq_puts(m, "\n");
+ }
+ seq_puts(m, "}\n");
+
+ rcu_read_unlock();
+}
+
const struct bpf_map_ops htab_percpu_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
@@ -1293,6 +1322,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
.map_lookup_elem = htab_percpu_map_lookup_elem,
.map_update_elem = htab_percpu_map_update_elem,
.map_delete_elem = htab_map_delete_elem,
+ .map_seq_show_elem = htab_percpu_map_seq_show_elem,
};
const struct bpf_map_ops htab_lru_percpu_map_ops = {
@@ -1303,6 +1333,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
.map_update_elem = htab_lru_percpu_map_update_elem,
.map_delete_elem = htab_lru_map_delete_elem,
+ .map_seq_show_elem = htab_percpu_map_seq_show_elem,
};
static int fd_htab_map_alloc_check(union bpf_attr *attr)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 1991466b8327..ab0d5e3f9892 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -76,6 +76,49 @@ const struct bpf_func_proto bpf_map_delete_elem_proto = {
.arg2_type = ARG_PTR_TO_MAP_KEY,
};
+BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
+{
+ return map->ops->map_push_elem(map, value, flags);
+}
+
+const struct bpf_func_proto bpf_map_push_elem_proto = {
+ .func = bpf_map_push_elem,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_MAP_VALUE,
+ .arg3_type = ARG_ANYTHING,
+};
+
+BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
+{
+ return map->ops->map_pop_elem(map, value);
+}
+
+const struct bpf_func_proto bpf_map_pop_elem_proto = {
+ .func = bpf_map_pop_elem,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
+};
+
+BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
+{
+ return map->ops->map_peek_elem(map, value);
+}
+
+const struct bpf_func_proto bpf_map_peek_elem_proto = {
+ .func = bpf_map_pop_elem,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
+};
+
const struct bpf_func_proto bpf_get_prandom_u32_proto = {
.func = bpf_user_rnd_u32,
.gpl_only = false,
@@ -194,16 +237,28 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
.ret_type = RET_INTEGER,
};
-DECLARE_PER_CPU(void*, bpf_cgroup_storage);
+#ifdef CONFIG_CGROUP_BPF
+DECLARE_PER_CPU(struct bpf_cgroup_storage*,
+ bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
{
- /* map and flags arguments are not used now,
- * but provide an ability to extend the API
- * for other types of local storages.
- * verifier checks that their values are correct.
+ /* flags argument is not used now,
+ * but provides an ability to extend the API.
+ * verifier checks that its value is correct.
*/
- return (unsigned long) this_cpu_read(bpf_cgroup_storage);
+ enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
+ struct bpf_cgroup_storage *storage;
+ void *ptr;
+
+ storage = this_cpu_read(bpf_cgroup_storage[stype]);
+
+ if (stype == BPF_CGROUP_STORAGE_SHARED)
+ ptr = &READ_ONCE(storage->buf)->data[0];
+ else
+ ptr = this_cpu_ptr(storage->percpu_buf);
+
+ return (unsigned long)ptr;
}
const struct bpf_func_proto bpf_get_local_storage_proto = {
@@ -214,3 +269,4 @@ const struct bpf_func_proto bpf_get_local_storage_proto = {
.arg2_type = ARG_ANYTHING,
};
#endif
+#endif
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 22ad967d1e5f..c97a8f968638 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -7,7 +7,8 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
-DEFINE_PER_CPU(void*, bpf_cgroup_storage);
+DEFINE_PER_CPU(struct bpf_cgroup_storage*,
+ bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
#ifdef CONFIG_CGROUP_BPF
@@ -129,7 +130,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
struct bpf_cgroup_storage *storage;
struct bpf_storage_buffer *new;
- if (flags & BPF_NOEXIST)
+ if (flags != BPF_ANY && flags != BPF_EXIST)
return -EINVAL;
storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
@@ -151,6 +152,71 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
return 0;
}
+int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key,
+ void *value)
+{
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+ struct bpf_cgroup_storage_key *key = _key;
+ struct bpf_cgroup_storage *storage;
+ int cpu, off = 0;
+ u32 size;
+
+ rcu_read_lock();
+ storage = cgroup_storage_lookup(map, key, false);
+ if (!storage) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ /* per_cpu areas are zero-filled and bpf programs can only
+ * access 'value_size' of them, so copying rounded areas
+ * will not leak any kernel data
+ */
+ size = round_up(_map->value_size, 8);
+ for_each_possible_cpu(cpu) {
+ bpf_long_memcpy(value + off,
+ per_cpu_ptr(storage->percpu_buf, cpu), size);
+ off += size;
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key,
+ void *value, u64 map_flags)
+{
+ struct bpf_cgroup_storage_map *map = map_to_storage(_map);
+ struct bpf_cgroup_storage_key *key = _key;
+ struct bpf_cgroup_storage *storage;
+ int cpu, off = 0;
+ u32 size;
+
+ if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
+ return -EINVAL;
+
+ rcu_read_lock();
+ storage = cgroup_storage_lookup(map, key, false);
+ if (!storage) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ /* the user space will provide round_up(value_size, 8) bytes that
+ * will be copied into per-cpu area. bpf programs can only access
+ * value_size of it. During lookup the same extra bytes will be
+ * returned or zeros which were zero-filled by percpu_alloc,
+ * so no kernel data leaks possible
+ */
+ size = round_up(_map->value_size, 8);
+ for_each_possible_cpu(cpu) {
+ bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
+ value + off, size);
+ off += size;
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key,
void *_next_key)
{
@@ -195,6 +261,9 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
return ERR_PTR(-EINVAL);
+ if (attr->value_size == 0)
+ return ERR_PTR(-EINVAL);
+
if (attr->value_size > PAGE_SIZE)
return ERR_PTR(-E2BIG);
@@ -251,6 +320,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
{
+ enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
int ret = -EBUSY;
@@ -258,11 +328,12 @@ int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
if (map->prog && map->prog != prog)
goto unlock;
- if (prog->aux->cgroup_storage && prog->aux->cgroup_storage != _map)
+ if (prog->aux->cgroup_storage[stype] &&
+ prog->aux->cgroup_storage[stype] != _map)
goto unlock;
map->prog = prog;
- prog->aux->cgroup_storage = _map;
+ prog->aux->cgroup_storage[stype] = _map;
ret = 0;
unlock:
spin_unlock_bh(&map->lock);
@@ -272,70 +343,117 @@ unlock:
void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map)
{
+ enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
struct bpf_cgroup_storage_map *map = map_to_storage(_map);
spin_lock_bh(&map->lock);
if (map->prog == prog) {
- WARN_ON(prog->aux->cgroup_storage != _map);
+ WARN_ON(prog->aux->cgroup_storage[stype] != _map);
map->prog = NULL;
- prog->aux->cgroup_storage = NULL;
+ prog->aux->cgroup_storage[stype] = NULL;
}
spin_unlock_bh(&map->lock);
}
-struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog)
+static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
+{
+ size_t size;
+
+ if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
+ size = sizeof(struct bpf_storage_buffer) + map->value_size;
+ *pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
+ PAGE_SIZE) >> PAGE_SHIFT;
+ } else {
+ size = map->value_size;
+ *pages = round_up(round_up(size, 8) * num_possible_cpus(),
+ PAGE_SIZE) >> PAGE_SHIFT;
+ }
+
+ return size;
+}
+
+struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
+ enum bpf_cgroup_storage_type stype)
{
struct bpf_cgroup_storage *storage;
struct bpf_map *map;
+ gfp_t flags;
+ size_t size;
u32 pages;
- map = prog->aux->cgroup_storage;
+ map = prog->aux->cgroup_storage[stype];
if (!map)
return NULL;
- pages = round_up(sizeof(struct bpf_cgroup_storage) +
- sizeof(struct bpf_storage_buffer) +
- map->value_size, PAGE_SIZE) >> PAGE_SHIFT;
+ size = bpf_cgroup_storage_calculate_size(map, &pages);
+
if (bpf_map_charge_memlock(map, pages))
return ERR_PTR(-EPERM);
storage = kmalloc_node(sizeof(struct bpf_cgroup_storage),
__GFP_ZERO | GFP_USER, map->numa_node);
- if (!storage) {
- bpf_map_uncharge_memlock(map, pages);
- return ERR_PTR(-ENOMEM);
- }
+ if (!storage)
+ goto enomem;
- storage->buf = kmalloc_node(sizeof(struct bpf_storage_buffer) +
- map->value_size, __GFP_ZERO | GFP_USER,
- map->numa_node);
- if (!storage->buf) {
- bpf_map_uncharge_memlock(map, pages);
- kfree(storage);
- return ERR_PTR(-ENOMEM);
+ flags = __GFP_ZERO | GFP_USER;
+
+ if (stype == BPF_CGROUP_STORAGE_SHARED) {
+ storage->buf = kmalloc_node(size, flags, map->numa_node);
+ if (!storage->buf)
+ goto enomem;
+ } else {
+ storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
+ if (!storage->percpu_buf)
+ goto enomem;
}
storage->map = (struct bpf_cgroup_storage_map *)map;
return storage;
+
+enomem:
+ bpf_map_uncharge_memlock(map, pages);
+ kfree(storage);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
+{
+ struct bpf_cgroup_storage *storage =
+ container_of(rcu, struct bpf_cgroup_storage, rcu);
+
+ kfree(storage->buf);
+ kfree(storage);
+}
+
+static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
+{
+ struct bpf_cgroup_storage *storage =
+ container_of(rcu, struct bpf_cgroup_storage, rcu);
+
+ free_percpu(storage->percpu_buf);
+ kfree(storage);
}
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
{
- u32 pages;
+ enum bpf_cgroup_storage_type stype;
struct bpf_map *map;
+ u32 pages;
if (!storage)
return;
map = &storage->map->map;
- pages = round_up(sizeof(struct bpf_cgroup_storage) +
- sizeof(struct bpf_storage_buffer) +
- map->value_size, PAGE_SIZE) >> PAGE_SHIFT;
+
+ bpf_cgroup_storage_calculate_size(map, &pages);
bpf_map_uncharge_memlock(map, pages);
- kfree_rcu(storage->buf, rcu);
- kfree_rcu(storage, rcu);
+ stype = cgroup_storage_type(map);
+ if (stype == BPF_CGROUP_STORAGE_SHARED)
+ call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
+ else
+ call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
}
void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index 3bfbf4464416..99d243e1ad6e 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -24,7 +24,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
* in the verifier is not enough.
*/
if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
- inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE) {
+ inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
+ inner_map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
fdput(f);
return ERR_PTR(-ENOTSUPP);
}
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 177a52436394..8e93c47f0779 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -172,6 +172,24 @@ int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
return ret;
}
+int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
+{
+ struct bpf_prog_offload *offload;
+ int ret = -ENODEV;
+
+ down_read(&bpf_devs_lock);
+ offload = env->prog->aux->offload;
+ if (offload) {
+ if (offload->dev_ops->finalize)
+ ret = offload->dev_ops->finalize(env);
+ else
+ ret = 0;
+ }
+ up_read(&bpf_devs_lock);
+
+ return ret;
+}
+
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{
struct bpf_prog_offload *offload = prog->aux->offload;
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
new file mode 100644
index 000000000000..12a93fb37449
--- /dev/null
+++ b/kernel/bpf/queue_stack_maps.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * queue_stack_maps.c: BPF queue and stack maps
+ *
+ * Copyright (c) 2018 Politecnico di Torino
+ */
+#include <linux/bpf.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "percpu_freelist.h"
+
+#define QUEUE_STACK_CREATE_FLAG_MASK \
+ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
+
+
+struct bpf_queue_stack {
+ struct bpf_map map;
+ raw_spinlock_t lock;
+ u32 head, tail;
+ u32 size; /* max_entries + 1 */
+
+ char elements[0] __aligned(8);
+};
+
+static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
+{
+ return container_of(map, struct bpf_queue_stack, map);
+}
+
+static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
+{
+ return qs->head == qs->tail;
+}
+
+static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
+{
+ u32 head = qs->head + 1;
+
+ if (unlikely(head >= qs->size))
+ head = 0;
+
+ return head == qs->tail;
+}
+
+/* Called from syscall */
+static int queue_stack_map_alloc_check(union bpf_attr *attr)
+{
+ /* check sanity of attributes */
+ if (attr->max_entries == 0 || attr->key_size != 0 ||
+ attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK)
+ return -EINVAL;
+
+ if (attr->value_size > KMALLOC_MAX_SIZE)
+ /* if value_size is bigger, the user space won't be able to
+ * access the elements.
+ */
+ return -E2BIG;
+
+ return 0;
+}
+
+static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
+{
+ int ret, numa_node = bpf_map_attr_numa_node(attr);
+ struct bpf_queue_stack *qs;
+ u32 size, value_size;
+ u64 queue_size, cost;
+
+ size = attr->max_entries + 1;
+ value_size = attr->value_size;
+
+ queue_size = sizeof(*qs) + (u64) value_size * size;
+
+ cost = queue_size;
+ if (cost >= U32_MAX - PAGE_SIZE)
+ return ERR_PTR(-E2BIG);
+
+ cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+
+ ret = bpf_map_precharge_memlock(cost);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ qs = bpf_map_area_alloc(queue_size, numa_node);
+ if (!qs)
+ return ERR_PTR(-ENOMEM);
+
+ memset(qs, 0, sizeof(*qs));
+
+ bpf_map_init_from_attr(&qs->map, attr);
+
+ qs->map.pages = cost;
+ qs->size = size;
+
+ raw_spin_lock_init(&qs->lock);
+
+ return &qs->map;
+}
+
+/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
+static void queue_stack_map_free(struct bpf_map *map)
+{
+ struct bpf_queue_stack *qs = bpf_queue_stack(map);
+
+ /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
+ * so the programs (can be more than one that used this map) were
+ * disconnected from events. Wait for outstanding critical sections in
+ * these programs to complete
+ */
+ synchronize_rcu();
+
+ bpf_map_area_free(qs);
+}
+
+static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
+{
+ struct bpf_queue_stack *qs = bpf_queue_stack(map);
+ unsigned long flags;
+ int err = 0;
+ void *ptr;
+
+ raw_spin_lock_irqsave(&qs->lock, flags);
+
+ if (queue_stack_map_is_empty(qs)) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ ptr = &qs->elements[qs->tail * qs->map.value_size];
+ memcpy(value, ptr, qs->map.value_size);
+
+ if (delete) {
+ if (unlikely(++qs->tail >= qs->size))
+ qs->tail = 0;
+ }
+
+out:
+ raw_spin_unlock_irqrestore(&qs->lock, flags);
+ return err;
+}
+
+
+static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
+{
+ struct bpf_queue_stack *qs = bpf_queue_stack(map);
+ unsigned long flags;
+ int err = 0;
+ void *ptr;
+ u32 index;
+
+ raw_spin_lock_irqsave(&qs->lock, flags);
+
+ if (queue_stack_map_is_empty(qs)) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ index = qs->head - 1;
+ if (unlikely(index >= qs->size))
+ index = qs->size - 1;
+
+ ptr = &qs->elements[index * qs->map.value_size];
+ memcpy(value, ptr, qs->map.value_size);
+
+ if (delete)
+ qs->head = index;
+
+out:
+ raw_spin_unlock_irqrestore(&qs->lock, flags);
+ return err;
+}
+
+/* Called from syscall or from eBPF program */
+static int queue_map_peek_elem(struct bpf_map *map, void *value)
+{
+ return __queue_map_get(map, value, false);
+}
+
+/* Called from syscall or from eBPF program */
+static int stack_map_peek_elem(struct bpf_map *map, void *value)
+{
+ return __stack_map_get(map, value, false);
+}
+
+/* Called from syscall or from eBPF program */
+static int queue_map_pop_elem(struct bpf_map *map, void *value)
+{
+ return __queue_map_get(map, value, true);
+}
+
+/* Called from syscall or from eBPF program */
+static int stack_map_pop_elem(struct bpf_map *map, void *value)
+{
+ return __stack_map_get(map, value, true);
+}
+
+/* Called from syscall or from eBPF program */
+static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
+ u64 flags)
+{
+ struct bpf_queue_stack *qs = bpf_queue_stack(map);
+ unsigned long irq_flags;
+ int err = 0;
+ void *dst;
+
+ /* BPF_EXIST is used to force making room for a new element in case the
+ * map is full
+ */
+ bool replace = (flags & BPF_EXIST);
+
+ /* Check supported flags for queue and stack maps */
+ if (flags & BPF_NOEXIST || flags > BPF_EXIST)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&qs->lock, irq_flags);
+
+ if (queue_stack_map_is_full(qs)) {
+ if (!replace) {
+ err = -E2BIG;
+ goto out;
+ }
+ /* advance tail pointer to overwrite oldest element */
+ if (unlikely(++qs->tail >= qs->size))
+ qs->tail = 0;
+ }
+
+ dst = &qs->elements[qs->head * qs->map.value_size];
+ memcpy(dst, value, qs->map.value_size);
+
+ if (unlikely(++qs->head >= qs->size))
+ qs->head = 0;
+
+out:
+ raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
+ return err;
+}
+
+/* Called from syscall or from eBPF program */
+static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
+{
+ return NULL;
+}
+
+/* Called from syscall or from eBPF program */
+static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 flags)
+{
+ return -EINVAL;
+}
+
+/* Called from syscall or from eBPF program */
+static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
+{
+ return -EINVAL;
+}
+
+/* Called from syscall */
+static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
+ void *next_key)
+{
+ return -EINVAL;
+}
+
+const struct bpf_map_ops queue_map_ops = {
+ .map_alloc_check = queue_stack_map_alloc_check,
+ .map_alloc = queue_stack_map_alloc,
+ .map_free = queue_stack_map_free,
+ .map_lookup_elem = queue_stack_map_lookup_elem,
+ .map_update_elem = queue_stack_map_update_elem,
+ .map_delete_elem = queue_stack_map_delete_elem,
+ .map_push_elem = queue_stack_map_push_elem,
+ .map_pop_elem = queue_map_pop_elem,
+ .map_peek_elem = queue_map_peek_elem,
+ .map_get_next_key = queue_stack_map_get_next_key,
+};
+
+const struct bpf_map_ops stack_map_ops = {
+ .map_alloc_check = queue_stack_map_alloc_check,
+ .map_alloc = queue_stack_map_alloc,
+ .map_free = queue_stack_map_free,
+ .map_lookup_elem = queue_stack_map_lookup_elem,
+ .map_update_elem = queue_stack_map_update_elem,
+ .map_delete_elem = queue_stack_map_delete_elem,
+ .map_push_elem = queue_stack_map_push_elem,
+ .map_pop_elem = stack_map_pop_elem,
+ .map_peek_elem = stack_map_peek_elem,
+ .map_get_next_key = queue_stack_map_get_next_key,
+};
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
deleted file mode 100644
index 488ef9663c01..000000000000
--- a/kernel/bpf/sockmap.c
+++ /dev/null
@@ -1,2580 +0,0 @@
-/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-/* A BPF sock_map is used to store sock objects. This is primarly used
- * for doing socket redirect with BPF helper routines.
- *
- * A sock map may have BPF programs attached to it, currently a program
- * used to parse packets and a program to provide a verdict and redirect
- * decision on the packet are supported. Any programs attached to a sock
- * map are inherited by sock objects when they are added to the map. If
- * no BPF programs are attached the sock object may only be used for sock
- * redirect.
- *
- * A sock object may be in multiple maps, but can only inherit a single
- * parse or verdict program. If adding a sock object to a map would result
- * in having multiple parsing programs the update will return an EBUSY error.
- *
- * For reference this program is similar to devmap used in XDP context
- * reviewing these together may be useful. For an example please review
- * ./samples/bpf/sockmap/.
- */
-#include <linux/bpf.h>
-#include <net/sock.h>
-#include <linux/filter.h>
-#include <linux/errno.h>
-#include <linux/file.h>
-#include <linux/kernel.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <net/strparser.h>
-#include <net/tcp.h>
-#include <linux/ptr_ring.h>
-#include <net/inet_common.h>
-#include <linux/sched/signal.h>
-
-#define SOCK_CREATE_FLAG_MASK \
- (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
-
-struct bpf_sock_progs {
- struct bpf_prog *bpf_tx_msg;
- struct bpf_prog *bpf_parse;
- struct bpf_prog *bpf_verdict;
-};
-
-struct bpf_stab {
- struct bpf_map map;
- struct sock **sock_map;
- struct bpf_sock_progs progs;
- raw_spinlock_t lock;
-};
-
-struct bucket {
- struct hlist_head head;
- raw_spinlock_t lock;
-};
-
-struct bpf_htab {
- struct bpf_map map;
- struct bucket *buckets;
- atomic_t count;
- u32 n_buckets;
- u32 elem_size;
- struct bpf_sock_progs progs;
- struct rcu_head rcu;
-};
-
-struct htab_elem {
- struct rcu_head rcu;
- struct hlist_node hash_node;
- u32 hash;
- struct sock *sk;
- char key[0];
-};
-
-enum smap_psock_state {
- SMAP_TX_RUNNING,
-};
-
-struct smap_psock_map_entry {
- struct list_head list;
- struct bpf_map *map;
- struct sock **entry;
- struct htab_elem __rcu *hash_link;
-};
-
-struct smap_psock {
- struct rcu_head rcu;
- refcount_t refcnt;
-
- /* datapath variables */
- struct sk_buff_head rxqueue;
- bool strp_enabled;
-
- /* datapath error path cache across tx work invocations */
- int save_rem;
- int save_off;
- struct sk_buff *save_skb;
-
- /* datapath variables for tx_msg ULP */
- struct sock *sk_redir;
- int apply_bytes;
- int cork_bytes;
- int sg_size;
- int eval;
- struct sk_msg_buff *cork;
- struct list_head ingress;
-
- struct strparser strp;
- struct bpf_prog *bpf_tx_msg;
- struct bpf_prog *bpf_parse;
- struct bpf_prog *bpf_verdict;
- struct list_head maps;
- spinlock_t maps_lock;
-
- /* Back reference used when sock callback trigger sockmap operations */
- struct sock *sock;
- unsigned long state;
-
- struct work_struct tx_work;
- struct work_struct gc_work;
-
- struct proto *sk_proto;
- void (*save_close)(struct sock *sk, long timeout);
- void (*save_data_ready)(struct sock *sk);
- void (*save_write_space)(struct sock *sk);
-};
-
-static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
-static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len);
-static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
-static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
-static void bpf_tcp_close(struct sock *sk, long timeout);
-
-static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
-{
- return rcu_dereference_sk_user_data(sk);
-}
-
-static bool bpf_tcp_stream_read(const struct sock *sk)
-{
- struct smap_psock *psock;
- bool empty = true;
-
- rcu_read_lock();
- psock = smap_psock_sk(sk);
- if (unlikely(!psock))
- goto out;
- empty = list_empty(&psock->ingress);
-out:
- rcu_read_unlock();
- return !empty;
-}
-
-enum {
- SOCKMAP_IPV4,
- SOCKMAP_IPV6,
- SOCKMAP_NUM_PROTS,
-};
-
-enum {
- SOCKMAP_BASE,
- SOCKMAP_TX,
- SOCKMAP_NUM_CONFIGS,
-};
-
-static struct proto *saved_tcpv6_prot __read_mostly;
-static DEFINE_SPINLOCK(tcpv6_prot_lock);
-static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
-static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
- struct proto *base)
-{
- prot[SOCKMAP_BASE] = *base;
- prot[SOCKMAP_BASE].close = bpf_tcp_close;
- prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg;
- prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read;
-
- prot[SOCKMAP_TX] = prot[SOCKMAP_BASE];
- prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg;
- prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage;
-}
-
-static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
-{
- int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
- int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
-
- sk->sk_prot = &bpf_tcp_prots[family][conf];
-}
-
-static int bpf_tcp_init(struct sock *sk)
-{
- struct smap_psock *psock;
-
- rcu_read_lock();
- psock = smap_psock_sk(sk);
- if (unlikely(!psock)) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
- if (unlikely(psock->sk_proto)) {
- rcu_read_unlock();
- return -EBUSY;
- }
-
- psock->save_close = sk->sk_prot->close;
- psock->sk_proto = sk->sk_prot;
-
- /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
- if (sk->sk_family == AF_INET6 &&
- unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
- spin_lock_bh(&tcpv6_prot_lock);
- if (likely(sk->sk_prot != saved_tcpv6_prot)) {
- build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
- smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
- }
- spin_unlock_bh(&tcpv6_prot_lock);
- }
- update_sk_prot(sk, psock);
- rcu_read_unlock();
- return 0;
-}
-
-static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
-static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge);
-
-static void bpf_tcp_release(struct sock *sk)
-{
- struct smap_psock *psock;
-
- rcu_read_lock();
- psock = smap_psock_sk(sk);
- if (unlikely(!psock))
- goto out;
-
- if (psock->cork) {
- free_start_sg(psock->sock, psock->cork, true);
- kfree(psock->cork);
- psock->cork = NULL;
- }
-
- if (psock->sk_proto) {
- sk->sk_prot = psock->sk_proto;
- psock->sk_proto = NULL;
- }
-out:
- rcu_read_unlock();
-}
-
-static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
- u32 hash, void *key, u32 key_size)
-{
- struct htab_elem *l;
-
- hlist_for_each_entry_rcu(l, head, hash_node) {
- if (l->hash == hash && !memcmp(&l->key, key, key_size))
- return l;
- }
-
- return NULL;
-}
-
-static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
-{
- return &htab->buckets[hash & (htab->n_buckets - 1)];
-}
-
-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
-{
- return &__select_bucket(htab, hash)->head;
-}
-
-static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
-{
- atomic_dec(&htab->count);
- kfree_rcu(l, rcu);
-}
-
-static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
- struct smap_psock *psock)
-{
- struct smap_psock_map_entry *e;
-
- spin_lock_bh(&psock->maps_lock);
- e = list_first_entry_or_null(&psock->maps,
- struct smap_psock_map_entry,
- list);
- if (e)
- list_del(&e->list);
- spin_unlock_bh(&psock->maps_lock);
- return e;
-}
-
-static void bpf_tcp_close(struct sock *sk, long timeout)
-{
- void (*close_fun)(struct sock *sk, long timeout);
- struct smap_psock_map_entry *e;
- struct sk_msg_buff *md, *mtmp;
- struct smap_psock *psock;
- struct sock *osk;
-
- lock_sock(sk);
- rcu_read_lock();
- psock = smap_psock_sk(sk);
- if (unlikely(!psock)) {
- rcu_read_unlock();
- release_sock(sk);
- return sk->sk_prot->close(sk, timeout);
- }
-
- /* The psock may be destroyed anytime after exiting the RCU critial
- * section so by the time we use close_fun the psock may no longer
- * be valid. However, bpf_tcp_close is called with the sock lock
- * held so the close hook and sk are still valid.
- */
- close_fun = psock->save_close;
-
- if (psock->cork) {
- free_start_sg(psock->sock, psock->cork, true);
- kfree(psock->cork);
- psock->cork = NULL;
- }
-
- list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
- list_del(&md->list);
- free_start_sg(psock->sock, md, true);
- kfree(md);
- }
-
- e = psock_map_pop(sk, psock);
- while (e) {
- if (e->entry) {
- struct bpf_stab *stab = container_of(e->map, struct bpf_stab, map);
-
- raw_spin_lock_bh(&stab->lock);
- osk = *e->entry;
- if (osk == sk) {
- *e->entry = NULL;
- smap_release_sock(psock, sk);
- }
- raw_spin_unlock_bh(&stab->lock);
- } else {
- struct htab_elem *link = rcu_dereference(e->hash_link);
- struct bpf_htab *htab = container_of(e->map, struct bpf_htab, map);
- struct hlist_head *head;
- struct htab_elem *l;
- struct bucket *b;
-
- b = __select_bucket(htab, link->hash);
- head = &b->head;
- raw_spin_lock_bh(&b->lock);
- l = lookup_elem_raw(head,
- link->hash, link->key,
- htab->map.key_size);
- /* If another thread deleted this object skip deletion.
- * The refcnt on psock may or may not be zero.
- */
- if (l && l == link) {
- hlist_del_rcu(&link->hash_node);
- smap_release_sock(psock, link->sk);
- free_htab_elem(htab, link);
- }
- raw_spin_unlock_bh(&b->lock);
- }
- kfree(e);
- e = psock_map_pop(sk, psock);
- }
- rcu_read_unlock();
- release_sock(sk);
- close_fun(sk, timeout);
-}
-
-enum __sk_action {
- __SK_DROP = 0,
- __SK_PASS,
- __SK_REDIRECT,
- __SK_NONE,
-};
-
-static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
- .name = "bpf_tcp",
- .uid = TCP_ULP_BPF,
- .user_visible = false,
- .owner = NULL,
- .init = bpf_tcp_init,
- .release = bpf_tcp_release,
-};
-
-static int memcopy_from_iter(struct sock *sk,
- struct sk_msg_buff *md,
- struct iov_iter *from, int bytes)
-{
- struct scatterlist *sg = md->sg_data;
- int i = md->sg_curr, rc = -ENOSPC;
-
- do {
- int copy;
- char *to;
-
- if (md->sg_copybreak >= sg[i].length) {
- md->sg_copybreak = 0;
-
- if (++i == MAX_SKB_FRAGS)
- i = 0;
-
- if (i == md->sg_end)
- break;
- }
-
- copy = sg[i].length - md->sg_copybreak;
- to = sg_virt(&sg[i]) + md->sg_copybreak;
- md->sg_copybreak += copy;
-
- if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
- rc = copy_from_iter_nocache(to, copy, from);
- else
- rc = copy_from_iter(to, copy, from);
-
- if (rc != copy) {
- rc = -EFAULT;
- goto out;
- }
-
- bytes -= copy;
- if (!bytes)
- break;
-
- md->sg_copybreak = 0;
- if (++i == MAX_SKB_FRAGS)
- i = 0;
- } while (i != md->sg_end);
-out:
- md->sg_curr = i;
- return rc;
-}
-
-static int bpf_tcp_push(struct sock *sk, int apply_bytes,
- struct sk_msg_buff *md,
- int flags, bool uncharge)
-{
- bool apply = apply_bytes;
- struct scatterlist *sg;
- int offset, ret = 0;
- struct page *p;
- size_t size;
-
- while (1) {
- sg = md->sg_data + md->sg_start;
- size = (apply && apply_bytes < sg->length) ?
- apply_bytes : sg->length;
- offset = sg->offset;
-
- tcp_rate_check_app_limited(sk);
- p = sg_page(sg);
-retry:
- ret = do_tcp_sendpages(sk, p, offset, size, flags);
- if (ret != size) {
- if (ret > 0) {
- if (apply)
- apply_bytes -= ret;
-
- sg->offset += ret;
- sg->length -= ret;
- size -= ret;
- offset += ret;
- if (uncharge)
- sk_mem_uncharge(sk, ret);
- goto retry;
- }
-
- return ret;
- }
-
- if (apply)
- apply_bytes -= ret;
- sg->offset += ret;
- sg->length -= ret;
- if (uncharge)
- sk_mem_uncharge(sk, ret);
-
- if (!sg->length) {
- put_page(p);
- md->sg_start++;
- if (md->sg_start == MAX_SKB_FRAGS)
- md->sg_start = 0;
- sg_init_table(sg, 1);
-
- if (md->sg_start == md->sg_end)
- break;
- }
-
- if (apply && !apply_bytes)
- break;
- }
- return 0;
-}
-
-static inline void bpf_compute_data_pointers_sg(struct sk_msg_buff *md)
-{
- struct scatterlist *sg = md->sg_data + md->sg_start;
-
- if (md->sg_copy[md->sg_start]) {
- md->data = md->data_end = 0;
- } else {
- md->data = sg_virt(sg);
- md->data_end = md->data + sg->length;
- }
-}
-
-static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
-{
- struct scatterlist *sg = md->sg_data;
- int i = md->sg_start;
-
- do {
- int uncharge = (bytes < sg[i].length) ? bytes : sg[i].length;
-
- sk_mem_uncharge(sk, uncharge);
- bytes -= uncharge;
- if (!bytes)
- break;
- i++;
- if (i == MAX_SKB_FRAGS)
- i = 0;
- } while (i != md->sg_end);
-}
-
-static void free_bytes_sg(struct sock *sk, int bytes,
- struct sk_msg_buff *md, bool charge)
-{
- struct scatterlist *sg = md->sg_data;
- int i = md->sg_start, free;
-
- while (bytes && sg[i].length) {
- free = sg[i].length;
- if (bytes < free) {
- sg[i].length -= bytes;
- sg[i].offset += bytes;
- if (charge)
- sk_mem_uncharge(sk, bytes);
- break;
- }
-
- if (charge)
- sk_mem_uncharge(sk, sg[i].length);
- put_page(sg_page(&sg[i]));
- bytes -= sg[i].length;
- sg[i].length = 0;
- sg[i].page_link = 0;
- sg[i].offset = 0;
- i++;
-
- if (i == MAX_SKB_FRAGS)
- i = 0;
- }
- md->sg_start = i;
-}
-
-static int free_sg(struct sock *sk, int start,
- struct sk_msg_buff *md, bool charge)
-{
- struct scatterlist *sg = md->sg_data;
- int i = start, free = 0;
-
- while (sg[i].length) {
- free += sg[i].length;
- if (charge)
- sk_mem_uncharge(sk, sg[i].length);
- if (!md->skb)
- put_page(sg_page(&sg[i]));
- sg[i].length = 0;
- sg[i].page_link = 0;
- sg[i].offset = 0;
- i++;
-
- if (i == MAX_SKB_FRAGS)
- i = 0;
- }
- if (md->skb)
- consume_skb(md->skb);
-
- return free;
-}
-
-static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge)
-{
- int free = free_sg(sk, md->sg_start, md, charge);
-
- md->sg_start = md->sg_end;
- return free;
-}
-
-static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
-{
- return free_sg(sk, md->sg_curr, md, true);
-}
-
-static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
-{
- return ((_rc == SK_PASS) ?
- (md->sk_redir ? __SK_REDIRECT : __SK_PASS) :
- __SK_DROP);
-}
-
-static unsigned int smap_do_tx_msg(struct sock *sk,
- struct smap_psock *psock,
- struct sk_msg_buff *md)
-{
- struct bpf_prog *prog;
- unsigned int rc, _rc;
-
- preempt_disable();
- rcu_read_lock();
-
- /* If the policy was removed mid-send then default to 'accept' */
- prog = READ_ONCE(psock->bpf_tx_msg);
- if (unlikely(!prog)) {
- _rc = SK_PASS;
- goto verdict;
- }
-
- bpf_compute_data_pointers_sg(md);
- md->sk = sk;
- rc = (*prog->bpf_func)(md, prog->insnsi);
- psock->apply_bytes = md->apply_bytes;
-
- /* Moving return codes from UAPI namespace into internal namespace */
- _rc = bpf_map_msg_verdict(rc, md);
-
- /* The psock has a refcount on the sock but not on the map and because
- * we need to drop rcu read lock here its possible the map could be
- * removed between here and when we need it to execute the sock
- * redirect. So do the map lookup now for future use.
- */
- if (_rc == __SK_REDIRECT) {
- if (psock->sk_redir)
- sock_put(psock->sk_redir);
- psock->sk_redir = do_msg_redirect_map(md);
- if (!psock->sk_redir) {
- _rc = __SK_DROP;
- goto verdict;
- }
- sock_hold(psock->sk_redir);
- }
-verdict:
- rcu_read_unlock();
- preempt_enable();
-
- return _rc;
-}
-
-static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
- struct smap_psock *psock,
- struct sk_msg_buff *md, int flags)
-{
- bool apply = apply_bytes;
- size_t size, copied = 0;
- struct sk_msg_buff *r;
- int err = 0, i;
-
- r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_KERNEL);
- if (unlikely(!r))
- return -ENOMEM;
-
- lock_sock(sk);
- r->sg_start = md->sg_start;
- i = md->sg_start;
-
- do {
- size = (apply && apply_bytes < md->sg_data[i].length) ?
- apply_bytes : md->sg_data[i].length;
-
- if (!sk_wmem_schedule(sk, size)) {
- if (!copied)
- err = -ENOMEM;
- break;
- }
-
- sk_mem_charge(sk, size);
- r->sg_data[i] = md->sg_data[i];
- r->sg_data[i].length = size;
- md->sg_data[i].length -= size;
- md->sg_data[i].offset += size;
- copied += size;
-
- if (md->sg_data[i].length) {
- get_page(sg_page(&r->sg_data[i]));
- r->sg_end = (i + 1) == MAX_SKB_FRAGS ? 0 : i + 1;
- } else {
- i++;
- if (i == MAX_SKB_FRAGS)
- i = 0;
- r->sg_end = i;
- }
-
- if (apply) {
- apply_bytes -= size;
- if (!apply_bytes)
- break;
- }
- } while (i != md->sg_end);
-
- md->sg_start = i;
-
- if (!err) {
- list_add_tail(&r->list, &psock->ingress);
- sk->sk_data_ready(sk);
- } else {
- free_start_sg(sk, r, true);
- kfree(r);
- }
-
- release_sock(sk);
- return err;
-}
-
-static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
- struct sk_msg_buff *md,
- int flags)
-{
- bool ingress = !!(md->flags & BPF_F_INGRESS);
- struct smap_psock *psock;
- int err = 0;
-
- rcu_read_lock();
- psock = smap_psock_sk(sk);
- if (unlikely(!psock))
- goto out_rcu;
-
- if (!refcount_inc_not_zero(&psock->refcnt))
- goto out_rcu;
-
- rcu_read_unlock();
-
- if (ingress) {
- err = bpf_tcp_ingress(sk, send, psock, md, flags);
- } else {
- lock_sock(sk);
- err = bpf_tcp_push(sk, send, md, flags, false);
- release_sock(sk);
- }
- smap_release_sock(psock, sk);
- return err;
-out_rcu:
- rcu_read_unlock();
- return 0;
-}
-
-static inline void bpf_md_init(struct smap_psock *psock)
-{
- if (!psock->apply_bytes) {
- psock->eval = __SK_NONE;
- if (psock->sk_redir) {
- sock_put(psock->sk_redir);
- psock->sk_redir = NULL;
- }
- }
-}
-
-static void apply_bytes_dec(struct smap_psock *psock, int i)
-{
- if (psock->apply_bytes) {
- if (psock->apply_bytes < i)
- psock->apply_bytes = 0;
- else
- psock->apply_bytes -= i;
- }
-}
-
-static int bpf_exec_tx_verdict(struct smap_psock *psock,
- struct sk_msg_buff *m,
- struct sock *sk,
- int *copied, int flags)
-{
- bool cork = false, enospc = (m->sg_start == m->sg_end);
- struct sock *redir;
- int err = 0;
- int send;
-
-more_data:
- if (psock->eval == __SK_NONE)
- psock->eval = smap_do_tx_msg(sk, psock, m);
-
- if (m->cork_bytes &&
- m->cork_bytes > psock->sg_size && !enospc) {
- psock->cork_bytes = m->cork_bytes - psock->sg_size;
- if (!psock->cork) {
- psock->cork = kcalloc(1,
- sizeof(struct sk_msg_buff),
- GFP_ATOMIC | __GFP_NOWARN);
-
- if (!psock->cork) {
- err = -ENOMEM;
- goto out_err;
- }
- }
- memcpy(psock->cork, m, sizeof(*m));
- goto out_err;
- }
-
- send = psock->sg_size;
- if (psock->apply_bytes && psock->apply_bytes < send)
- send = psock->apply_bytes;
-
- switch (psock->eval) {
- case __SK_PASS:
- err = bpf_tcp_push(sk, send, m, flags, true);
- if (unlikely(err)) {
- *copied -= free_start_sg(sk, m, true);
- break;
- }
-
- apply_bytes_dec(psock, send);
- psock->sg_size -= send;
- break;
- case __SK_REDIRECT:
- redir = psock->sk_redir;
- apply_bytes_dec(psock, send);
-
- if (psock->cork) {
- cork = true;
- psock->cork = NULL;
- }
-
- return_mem_sg(sk, send, m);
- release_sock(sk);
-
- err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
- lock_sock(sk);
-
- if (unlikely(err < 0)) {
- int free = free_start_sg(sk, m, false);
-
- psock->sg_size = 0;
- if (!cork)
- *copied -= free;
- } else {
- psock->sg_size -= send;
- }
-
- if (cork) {
- free_start_sg(sk, m, true);
- psock->sg_size = 0;
- kfree(m);
- m = NULL;
- err = 0;
- }
- break;
- case __SK_DROP:
- default:
- free_bytes_sg(sk, send, m, true);
- apply_bytes_dec(psock, send);
- *copied -= send;
- psock->sg_size -= send;
- err = -EACCES;
- break;
- }
-
- if (likely(!err)) {
- bpf_md_init(psock);
- if (m &&
- m->sg_data[m->sg_start].page_link &&
- m->sg_data[m->sg_start].length)
- goto more_data;
- }
-
-out_err:
- return err;
-}
-
-static int bpf_wait_data(struct sock *sk,
- struct smap_psock *psk, int flags,
- long timeo, int *err)
-{
- int rc;
-
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
-
- add_wait_queue(sk_sleep(sk), &wait);
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- rc = sk_wait_event(sk, &timeo,
- !list_empty(&psk->ingress) ||
- !skb_queue_empty(&sk->sk_receive_queue),
- &wait);
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- remove_wait_queue(sk_sleep(sk), &wait);
-
- return rc;
-}
-
-static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
-{
- struct iov_iter *iter = &msg->msg_iter;
- struct smap_psock *psock;
- int copied = 0;
-
- if (unlikely(flags & MSG_ERRQUEUE))
- return inet_recv_error(sk, msg, len, addr_len);
- if (!skb_queue_empty(&sk->sk_receive_queue))
- return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
-
- rcu_read_lock();
- psock = smap_psock_sk(sk);
- if (unlikely(!psock))
- goto out;
-
- if (unlikely(!refcount_inc_not_zero(&psock->refcnt)))
- goto out;
- rcu_read_unlock();
-
- lock_sock(sk);
-bytes_ready:
- while (copied != len) {
- struct scatterlist *sg;
- struct sk_msg_buff *md;
- int i;
-
- md = list_first_entry_or_null(&psock->ingress,
- struct sk_msg_buff, list);
- if (unlikely(!md))
- break;
- i = md->sg_start;
- do {
- struct page *page;
- int n, copy;
-
- sg = &md->sg_data[i];
- copy = sg->length;
- page = sg_page(sg);
-
- if (copied + copy > len)
- copy = len - copied;
-
- n = copy_page_to_iter(page, sg->offset, copy, iter);
- if (n != copy) {
- md->sg_start = i;
- release_sock(sk);
- smap_release_sock(psock, sk);
- return -EFAULT;
- }
-
- copied += copy;
- sg->offset += copy;
- sg->length -= copy;
- sk_mem_uncharge(sk, copy);
-
- if (!sg->length) {
- i++;
- if (i == MAX_SKB_FRAGS)
- i = 0;
- if (!md->skb)
- put_page(page);
- }
- if (copied == len)
- break;
- } while (i != md->sg_end);
- md->sg_start = i;
-
- if (!sg->length && md->sg_start == md->sg_end) {
- list_del(&md->list);
- if (md->skb)
- consume_skb(md->skb);
- kfree(md);
- }
- }
-
- if (!copied) {
- long timeo;
- int data;
- int err = 0;
-
- timeo = sock_rcvtimeo(sk, nonblock);
- data = bpf_wait_data(sk, psock, flags, timeo, &err);
-
- if (data) {
- if (!skb_queue_empty(&sk->sk_receive_queue)) {
- release_sock(sk);
- smap_release_sock(psock, sk);
- copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
- return copied;
- }
- goto bytes_ready;
- }
-
- if (err)
- copied = err;
- }
-
- release_sock(sk);
- smap_release_sock(psock, sk);
- return copied;
-out:
- rcu_read_unlock();
- return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
-}
-
-
-static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
-{
- int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
- struct sk_msg_buff md = {0};
- unsigned int sg_copy = 0;
- struct smap_psock *psock;
- int copied = 0, err = 0;
- struct scatterlist *sg;
- long timeo;
-
- /* Its possible a sock event or user removed the psock _but_ the ops
- * have not been reprogrammed yet so we get here. In this case fallback
- * to tcp_sendmsg. Note this only works because we _only_ ever allow
- * a single ULP there is no hierarchy here.
- */
- rcu_read_lock();
- psock = smap_psock_sk(sk);
- if (unlikely(!psock)) {
- rcu_read_unlock();
- return tcp_sendmsg(sk, msg, size);
- }
-
- /* Increment the psock refcnt to ensure its not released while sending a
- * message. Required because sk lookup and bpf programs are used in
- * separate rcu critical sections. Its OK if we lose the map entry
- * but we can't lose the sock reference.
- */
- if (!refcount_inc_not_zero(&psock->refcnt)) {
- rcu_read_unlock();
- return tcp_sendmsg(sk, msg, size);
- }
-
- sg = md.sg_data;
- sg_init_marker(sg, MAX_SKB_FRAGS);
- rcu_read_unlock();
-
- lock_sock(sk);
- timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
-
- while (msg_data_left(msg)) {
- struct sk_msg_buff *m = NULL;
- bool enospc = false;
- int copy;
-
- if (sk->sk_err) {
- err = -sk->sk_err;
- goto out_err;
- }
-
- copy = msg_data_left(msg);
- if (!sk_stream_memory_free(sk))
- goto wait_for_sndbuf;
-
- m = psock->cork_bytes ? psock->cork : &md;
- m->sg_curr = m->sg_copybreak ? m->sg_curr : m->sg_end;
- err = sk_alloc_sg(sk, copy, m->sg_data,
- m->sg_start, &m->sg_end, &sg_copy,
- m->sg_end - 1);
- if (err) {
- if (err != -ENOSPC)
- goto wait_for_memory;
- enospc = true;
- copy = sg_copy;
- }
-
- err = memcopy_from_iter(sk, m, &msg->msg_iter, copy);
- if (err < 0) {
- free_curr_sg(sk, m);
- goto out_err;
- }
-
- psock->sg_size += copy;
- copied += copy;
- sg_copy = 0;
-
- /* When bytes are being corked skip running BPF program and
- * applying verdict unless there is no more buffer space. In
- * the ENOSPC case simply run BPF prorgram with currently
- * accumulated data. We don't have much choice at this point
- * we could try extending the page frags or chaining complex
- * frags but even in these cases _eventually_ we will hit an
- * OOM scenario. More complex recovery schemes may be
- * implemented in the future, but BPF programs must handle
- * the case where apply_cork requests are not honored. The
- * canonical method to verify this is to check data length.
- */
- if (psock->cork_bytes) {
- if (copy > psock->cork_bytes)
- psock->cork_bytes = 0;
- else
- psock->cork_bytes -= copy;
-
- if (psock->cork_bytes && !enospc)
- goto out_cork;
-
- /* All cork bytes accounted for re-run filter */
- psock->eval = __SK_NONE;
- psock->cork_bytes = 0;
- }
-
- err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
- if (unlikely(err < 0))
- goto out_err;
- continue;
-wait_for_sndbuf:
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-wait_for_memory:
- err = sk_stream_wait_memory(sk, &timeo);
- if (err) {
- if (m && m != psock->cork)
- free_start_sg(sk, m, true);
- goto out_err;
- }
- }
-out_err:
- if (err < 0)
- err = sk_stream_error(sk, msg->msg_flags, err);
-out_cork:
- release_sock(sk);
- smap_release_sock(psock, sk);
- return copied ? copied : err;
-}
-
-static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags)
-{
- struct sk_msg_buff md = {0}, *m = NULL;
- int err = 0, copied = 0;
- struct smap_psock *psock;
- struct scatterlist *sg;
- bool enospc = false;
-
- rcu_read_lock();
- psock = smap_psock_sk(sk);
- if (unlikely(!psock))
- goto accept;
-
- if (!refcount_inc_not_zero(&psock->refcnt))
- goto accept;
- rcu_read_unlock();
-
- lock_sock(sk);
-
- if (psock->cork_bytes) {
- m = psock->cork;
- sg = &m->sg_data[m->sg_end];
- } else {
- m = &md;
- sg = m->sg_data;
- sg_init_marker(sg, MAX_SKB_FRAGS);
- }
-
- /* Catch case where ring is full and sendpage is stalled. */
- if (unlikely(m->sg_end == m->sg_start &&
- m->sg_data[m->sg_end].length))
- goto out_err;
-
- psock->sg_size += size;
- sg_set_page(sg, page, size, offset);
- get_page(page);
- m->sg_copy[m->sg_end] = true;
- sk_mem_charge(sk, size);
- m->sg_end++;
- copied = size;
-
- if (m->sg_end == MAX_SKB_FRAGS)
- m->sg_end = 0;
-
- if (m->sg_end == m->sg_start)
- enospc = true;
-
- if (psock->cork_bytes) {
- if (size > psock->cork_bytes)
- psock->cork_bytes = 0;
- else
- psock->cork_bytes -= size;
-
- if (psock->cork_bytes && !enospc)
- goto out_err;
-
- /* All cork bytes accounted for re-run filter */
- psock->eval = __SK_NONE;
- psock->cork_bytes = 0;
- }
-
- err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
-out_err:
- release_sock(sk);
- smap_release_sock(psock, sk);
- return copied ? copied : err;
-accept:
- rcu_read_unlock();
- return tcp_sendpage(sk, page, offset, size, flags);
-}
-
-static void bpf_tcp_msg_add(struct smap_psock *psock,
- struct sock *sk,
- struct bpf_prog *tx_msg)
-{
- struct bpf_prog *orig_tx_msg;
-
- orig_tx_msg = xchg(&psock->bpf_tx_msg, tx_msg);
- if (orig_tx_msg)
- bpf_prog_put(orig_tx_msg);
-}
-
-static int bpf_tcp_ulp_register(void)
-{
- build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
- /* Once BPF TX ULP is registered it is never unregistered. It
- * will be in the ULP list for the lifetime of the system. Doing
- * duplicate registers is not a problem.
- */
- return tcp_register_ulp(&bpf_tcp_ulp_ops);
-}
-
-static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
-{
- struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
- int rc;
-
- if (unlikely(!prog))
- return __SK_DROP;
-
- skb_orphan(skb);
- /* We need to ensure that BPF metadata for maps is also cleared
- * when we orphan the skb so that we don't have the possibility
- * to reference a stale map.
- */
- TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
- skb->sk = psock->sock;
- bpf_compute_data_end_sk_skb(skb);
- preempt_disable();
- rc = (*prog->bpf_func)(skb, prog->insnsi);
- preempt_enable();
- skb->sk = NULL;
-
- /* Moving return codes from UAPI namespace into internal namespace */
- return rc == SK_PASS ?
- (TCP_SKB_CB(skb)->bpf.sk_redir ? __SK_REDIRECT : __SK_PASS) :
- __SK_DROP;
-}
-
-static int smap_do_ingress(struct smap_psock *psock, struct sk_buff *skb)
-{
- struct sock *sk = psock->sock;
- int copied = 0, num_sg;
- struct sk_msg_buff *r;
-
- r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_ATOMIC);
- if (unlikely(!r))
- return -EAGAIN;
-
- if (!sk_rmem_schedule(sk, skb, skb->len)) {
- kfree(r);
- return -EAGAIN;
- }
-
- sg_init_table(r->sg_data, MAX_SKB_FRAGS);
- num_sg = skb_to_sgvec(skb, r->sg_data, 0, skb->len);
- if (unlikely(num_sg < 0)) {
- kfree(r);
- return num_sg;
- }
- sk_mem_charge(sk, skb->len);
- copied = skb->len;
- r->sg_start = 0;
- r->sg_end = num_sg == MAX_SKB_FRAGS ? 0 : num_sg;
- r->skb = skb;
- list_add_tail(&r->list, &psock->ingress);
- sk->sk_data_ready(sk);
- return copied;
-}
-
-static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
-{
- struct smap_psock *peer;
- struct sock *sk;
- __u32 in;
- int rc;
-
- rc = smap_verdict_func(psock, skb);
- switch (rc) {
- case __SK_REDIRECT:
- sk = do_sk_redirect_map(skb);
- if (!sk) {
- kfree_skb(skb);
- break;
- }
-
- peer = smap_psock_sk(sk);
- in = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
-
- if (unlikely(!peer || sock_flag(sk, SOCK_DEAD) ||
- !test_bit(SMAP_TX_RUNNING, &peer->state))) {
- kfree_skb(skb);
- break;
- }
-
- if (!in && sock_writeable(sk)) {
- skb_set_owner_w(skb, sk);
- skb_queue_tail(&peer->rxqueue, skb);
- schedule_work(&peer->tx_work);
- break;
- } else if (in &&
- atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
- skb_queue_tail(&peer->rxqueue, skb);
- schedule_work(&peer->tx_work);
- break;
- }
- /* Fall through and free skb otherwise */
- case __SK_DROP:
- default:
- kfree_skb(skb);
- }
-}
-
-static void smap_report_sk_error(struct smap_psock *psock, int err)
-{
- struct sock *sk = psock->sock;
-
- sk->sk_err = err;
- sk->sk_error_report(sk);
-}
-
-static void smap_read_sock_strparser(struct strparser *strp,
- struct sk_buff *skb)
-{
- struct smap_psock *psock;
-
- rcu_read_lock();
- psock = container_of(strp, struct smap_psock, strp);
- smap_do_verdict(psock, skb);
- rcu_read_unlock();
-}
-
-/* Called with lock held on socket */
-static void smap_data_ready(struct sock *sk)
-{
- struct smap_psock *psock;
-
- rcu_read_lock();
- psock = smap_psock_sk(sk);
- if (likely(psock)) {
- write_lock_bh(&sk->sk_callback_lock);
- strp_data_ready(&psock->strp);
- write_unlock_bh(&sk->sk_callback_lock);
- }
- rcu_read_unlock();
-}
-
-static void smap_tx_work(struct work_struct *w)
-{
- struct smap_psock *psock;
- struct sk_buff *skb;
- int rem, off, n;
-
- psock = container_of(w, struct smap_psock, tx_work);
-
- /* lock sock to avoid losing sk_socket at some point during loop */
- lock_sock(psock->sock);
- if (psock->save_skb) {
- skb = psock->save_skb;
- rem = psock->save_rem;
- off = psock->save_off;
- psock->save_skb = NULL;
- goto start;
- }
-
- while ((skb = skb_dequeue(&psock->rxqueue))) {
- __u32 flags;
-
- rem = skb->len;
- off = 0;
-start:
- flags = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
- do {
- if (likely(psock->sock->sk_socket)) {
- if (flags)
- n = smap_do_ingress(psock, skb);
- else
- n = skb_send_sock_locked(psock->sock,
- skb, off, rem);
- } else {
- n = -EINVAL;
- }
-
- if (n <= 0) {
- if (n == -EAGAIN) {
- /* Retry when space is available */
- psock->save_skb = skb;
- psock->save_rem = rem;
- psock->save_off = off;
- goto out;
- }
- /* Hard errors break pipe and stop xmit */
- smap_report_sk_error(psock, n ? -n : EPIPE);
- clear_bit(SMAP_TX_RUNNING, &psock->state);
- kfree_skb(skb);
- goto out;
- }
- rem -= n;
- off += n;
- } while (rem);
-
- if (!flags)
- kfree_skb(skb);
- }
-out:
- release_sock(psock->sock);
-}
-
-static void smap_write_space(struct sock *sk)
-{
- struct smap_psock *psock;
- void (*write_space)(struct sock *sk);
-
- rcu_read_lock();
- psock = smap_psock_sk(sk);
- if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
- schedule_work(&psock->tx_work);
- write_space = psock->save_write_space;
- rcu_read_unlock();
- write_space(sk);
-}
-
-static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
-{
- if (!psock->strp_enabled)
- return;
- sk->sk_data_ready = psock->save_data_ready;
- sk->sk_write_space = psock->save_write_space;
- psock->save_data_ready = NULL;
- psock->save_write_space = NULL;
- strp_stop(&psock->strp);
- psock->strp_enabled = false;
-}
-
-static void smap_destroy_psock(struct rcu_head *rcu)
-{
- struct smap_psock *psock = container_of(rcu,
- struct smap_psock, rcu);
-
- /* Now that a grace period has passed there is no longer
- * any reference to this sock in the sockmap so we can
- * destroy the psock, strparser, and bpf programs. But,
- * because we use workqueue sync operations we can not
- * do it in rcu context
- */
- schedule_work(&psock->gc_work);
-}
-
-static bool psock_is_smap_sk(struct sock *sk)
-{
- return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops;
-}
-
-static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
-{
- if (refcount_dec_and_test(&psock->refcnt)) {
- if (psock_is_smap_sk(sock))
- tcp_cleanup_ulp(sock);
- write_lock_bh(&sock->sk_callback_lock);
- smap_stop_sock(psock, sock);
- write_unlock_bh(&sock->sk_callback_lock);
- clear_bit(SMAP_TX_RUNNING, &psock->state);
- rcu_assign_sk_user_data(sock, NULL);
- call_rcu_sched(&psock->rcu, smap_destroy_psock);
- }
-}
-
-static int smap_parse_func_strparser(struct strparser *strp,
- struct sk_buff *skb)
-{
- struct smap_psock *psock;
- struct bpf_prog *prog;
- int rc;
-
- rcu_read_lock();
- psock = container_of(strp, struct smap_psock, strp);
- prog = READ_ONCE(psock->bpf_parse);
-
- if (unlikely(!prog)) {
- rcu_read_unlock();
- return skb->len;
- }
-
- /* Attach socket for bpf program to use if needed we can do this
- * because strparser clones the skb before handing it to a upper
- * layer, meaning skb_orphan has been called. We NULL sk on the
- * way out to ensure we don't trigger a BUG_ON in skb/sk operations
- * later and because we are not charging the memory of this skb to
- * any socket yet.
- */
- skb->sk = psock->sock;
- bpf_compute_data_end_sk_skb(skb);
- rc = (*prog->bpf_func)(skb, prog->insnsi);
- skb->sk = NULL;
- rcu_read_unlock();
- return rc;
-}
-
-static int smap_read_sock_done(struct strparser *strp, int err)
-{
- return err;
-}
-
-static int smap_init_sock(struct smap_psock *psock,
- struct sock *sk)
-{
- static const struct strp_callbacks cb = {
- .rcv_msg = smap_read_sock_strparser,
- .parse_msg = smap_parse_func_strparser,
- .read_sock_done = smap_read_sock_done,
- };
-
- return strp_init(&psock->strp, sk, &cb);
-}
-
-static void smap_init_progs(struct smap_psock *psock,
- struct bpf_prog *verdict,
- struct bpf_prog *parse)
-{
- struct bpf_prog *orig_parse, *orig_verdict;
-
- orig_parse = xchg(&psock->bpf_parse, parse);
- orig_verdict = xchg(&psock->bpf_verdict, verdict);
-
- if (orig_verdict)
- bpf_prog_put(orig_verdict);
- if (orig_parse)
- bpf_prog_put(orig_parse);
-}
-
-static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
-{
- if (sk->sk_data_ready == smap_data_ready)
- return;
- psock->save_data_ready = sk->sk_data_ready;
- psock->save_write_space = sk->sk_write_space;
- sk->sk_data_ready = smap_data_ready;
- sk->sk_write_space = smap_write_space;
- psock->strp_enabled = true;
-}
-
-static void sock_map_remove_complete(struct bpf_stab *stab)
-{
- bpf_map_area_free(stab->sock_map);
- kfree(stab);
-}
-
-static void smap_gc_work(struct work_struct *w)
-{
- struct smap_psock_map_entry *e, *tmp;
- struct sk_msg_buff *md, *mtmp;
- struct smap_psock *psock;
-
- psock = container_of(w, struct smap_psock, gc_work);
-
- /* no callback lock needed because we already detached sockmap ops */
- if (psock->strp_enabled)
- strp_done(&psock->strp);
-
- cancel_work_sync(&psock->tx_work);
- __skb_queue_purge(&psock->rxqueue);
-
- /* At this point all strparser and xmit work must be complete */
- if (psock->bpf_parse)
- bpf_prog_put(psock->bpf_parse);
- if (psock->bpf_verdict)
- bpf_prog_put(psock->bpf_verdict);
- if (psock->bpf_tx_msg)
- bpf_prog_put(psock->bpf_tx_msg);
-
- if (psock->cork) {
- free_start_sg(psock->sock, psock->cork, true);
- kfree(psock->cork);
- }
-
- list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
- list_del(&md->list);
- free_start_sg(psock->sock, md, true);
- kfree(md);
- }
-
- list_for_each_entry_safe(e, tmp, &psock->maps, list) {
- list_del(&e->list);
- kfree(e);
- }
-
- if (psock->sk_redir)
- sock_put(psock->sk_redir);
-
- sock_put(psock->sock);
- kfree(psock);
-}
-
-static struct smap_psock *smap_init_psock(struct sock *sock, int node)
-{
- struct smap_psock *psock;
-
- psock = kzalloc_node(sizeof(struct smap_psock),
- GFP_ATOMIC | __GFP_NOWARN,
- node);
- if (!psock)
- return ERR_PTR(-ENOMEM);
-
- psock->eval = __SK_NONE;
- psock->sock = sock;
- skb_queue_head_init(&psock->rxqueue);
- INIT_WORK(&psock->tx_work, smap_tx_work);
- INIT_WORK(&psock->gc_work, smap_gc_work);
- INIT_LIST_HEAD(&psock->maps);
- INIT_LIST_HEAD(&psock->ingress);
- refcount_set(&psock->refcnt, 1);
- spin_lock_init(&psock->maps_lock);
-
- rcu_assign_sk_user_data(sock, psock);
- sock_hold(sock);
- return psock;
-}
-
-static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
-{
- struct bpf_stab *stab;
- u64 cost;
- int err;
-
- if (!capable(CAP_NET_ADMIN))
- return ERR_PTR(-EPERM);
-
- /* check sanity of attributes */
- if (attr->max_entries == 0 || attr->key_size != 4 ||
- attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
- return ERR_PTR(-EINVAL);
-
- err = bpf_tcp_ulp_register();
- if (err && err != -EEXIST)
- return ERR_PTR(err);
-
- stab = kzalloc(sizeof(*stab), GFP_USER);
- if (!stab)
- return ERR_PTR(-ENOMEM);
-
- bpf_map_init_from_attr(&stab->map, attr);
- raw_spin_lock_init(&stab->lock);
-
- /* make sure page count doesn't overflow */
- cost = (u64) stab->map.max_entries * sizeof(struct sock *);
- err = -EINVAL;
- if (cost >= U32_MAX - PAGE_SIZE)
- goto free_stab;
-
- stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
-
- /* if map size is larger than memlock limit, reject it early */
- err = bpf_map_precharge_memlock(stab->map.pages);
- if (err)
- goto free_stab;
-
- err = -ENOMEM;
- stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
- sizeof(struct sock *),
- stab->map.numa_node);
- if (!stab->sock_map)
- goto free_stab;
-
- return &stab->map;
-free_stab:
- kfree(stab);
- return ERR_PTR(err);
-}
-
-static void smap_list_map_remove(struct smap_psock *psock,
- struct sock **entry)
-{
- struct smap_psock_map_entry *e, *tmp;
-
- spin_lock_bh(&psock->maps_lock);
- list_for_each_entry_safe(e, tmp, &psock->maps, list) {
- if (e->entry == entry) {
- list_del(&e->list);
- kfree(e);
- }
- }
- spin_unlock_bh(&psock->maps_lock);
-}
-
-static void smap_list_hash_remove(struct smap_psock *psock,
- struct htab_elem *hash_link)
-{
- struct smap_psock_map_entry *e, *tmp;
-
- spin_lock_bh(&psock->maps_lock);
- list_for_each_entry_safe(e, tmp, &psock->maps, list) {
- struct htab_elem *c = rcu_dereference(e->hash_link);
-
- if (c == hash_link) {
- list_del(&e->list);
- kfree(e);
- }
- }
- spin_unlock_bh(&psock->maps_lock);
-}
-
-static void sock_map_free(struct bpf_map *map)
-{
- struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
- int i;
-
- synchronize_rcu();
-
- /* At this point no update, lookup or delete operations can happen.
- * However, be aware we can still get a socket state event updates,
- * and data ready callabacks that reference the psock from sk_user_data
- * Also psock worker threads are still in-flight. So smap_release_sock
- * will only free the psock after cancel_sync on the worker threads
- * and a grace period expire to ensure psock is really safe to remove.
- */
- rcu_read_lock();
- raw_spin_lock_bh(&stab->lock);
- for (i = 0; i < stab->map.max_entries; i++) {
- struct smap_psock *psock;
- struct sock *sock;
-
- sock = stab->sock_map[i];
- if (!sock)
- continue;
- stab->sock_map[i] = NULL;
- psock = smap_psock_sk(sock);
- /* This check handles a racing sock event that can get the
- * sk_callback_lock before this case but after xchg happens
- * causing the refcnt to hit zero and sock user data (psock)
- * to be null and queued for garbage collection.
- */
- if (likely(psock)) {
- smap_list_map_remove(psock, &stab->sock_map[i]);
- smap_release_sock(psock, sock);
- }
- }
- raw_spin_unlock_bh(&stab->lock);
- rcu_read_unlock();
-
- sock_map_remove_complete(stab);
-}
-
-static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
-{
- struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
- u32 i = key ? *(u32 *)key : U32_MAX;
- u32 *next = (u32 *)next_key;
-
- if (i >= stab->map.max_entries) {
- *next = 0;
- return 0;
- }
-
- if (i == stab->map.max_entries - 1)
- return -ENOENT;
-
- *next = i + 1;
- return 0;
-}
-
-struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
-{
- struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
-
- if (key >= map->max_entries)
- return NULL;
-
- return READ_ONCE(stab->sock_map[key]);
-}
-
-static int sock_map_delete_elem(struct bpf_map *map, void *key)
-{
- struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
- struct smap_psock *psock;
- int k = *(u32 *)key;
- struct sock *sock;
-
- if (k >= map->max_entries)
- return -EINVAL;
-
- raw_spin_lock_bh(&stab->lock);
- sock = stab->sock_map[k];
- stab->sock_map[k] = NULL;
- raw_spin_unlock_bh(&stab->lock);
- if (!sock)
- return -EINVAL;
-
- psock = smap_psock_sk(sock);
- if (!psock)
- return 0;
- if (psock->bpf_parse) {
- write_lock_bh(&sock->sk_callback_lock);
- smap_stop_sock(psock, sock);
- write_unlock_bh(&sock->sk_callback_lock);
- }
- smap_list_map_remove(psock, &stab->sock_map[k]);
- smap_release_sock(psock, sock);
- return 0;
-}
-
-/* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
- * done inside rcu critical sections. This ensures on updates that the psock
- * will not be released via smap_release_sock() until concurrent updates/deletes
- * complete. All operations operate on sock_map using cmpxchg and xchg
- * operations to ensure we do not get stale references. Any reads into the
- * map must be done with READ_ONCE() because of this.
- *
- * A psock is destroyed via call_rcu and after any worker threads are cancelled
- * and syncd so we are certain all references from the update/lookup/delete
- * operations as well as references in the data path are no longer in use.
- *
- * Psocks may exist in multiple maps, but only a single set of parse/verdict
- * programs may be inherited from the maps it belongs to. A reference count
- * is kept with the total number of references to the psock from all maps. The
- * psock will not be released until this reaches zero. The psock and sock
- * user data data use the sk_callback_lock to protect critical data structures
- * from concurrent access. This allows us to avoid two updates from modifying
- * the user data in sock and the lock is required anyways for modifying
- * callbacks, we simply increase its scope slightly.
- *
- * Rules to follow,
- * - psock must always be read inside RCU critical section
- * - sk_user_data must only be modified inside sk_callback_lock and read
- * inside RCU critical section.
- * - psock->maps list must only be read & modified inside sk_callback_lock
- * - sock_map must use READ_ONCE and (cmp)xchg operations
- * - BPF verdict/parse programs must use READ_ONCE and xchg operations
- */
-
-static int __sock_map_ctx_update_elem(struct bpf_map *map,
- struct bpf_sock_progs *progs,
- struct sock *sock,
- void *key)
-{
- struct bpf_prog *verdict, *parse, *tx_msg;
- struct smap_psock *psock;
- bool new = false;
- int err = 0;
-
- /* 1. If sock map has BPF programs those will be inherited by the
- * sock being added. If the sock is already attached to BPF programs
- * this results in an error.
- */
- verdict = READ_ONCE(progs->bpf_verdict);
- parse = READ_ONCE(progs->bpf_parse);
- tx_msg = READ_ONCE(progs->bpf_tx_msg);
-
- if (parse && verdict) {
- /* bpf prog refcnt may be zero if a concurrent attach operation
- * removes the program after the above READ_ONCE() but before
- * we increment the refcnt. If this is the case abort with an
- * error.
- */
- verdict = bpf_prog_inc_not_zero(verdict);
- if (IS_ERR(verdict))
- return PTR_ERR(verdict);
-
- parse = bpf_prog_inc_not_zero(parse);
- if (IS_ERR(parse)) {
- bpf_prog_put(verdict);
- return PTR_ERR(parse);
- }
- }
-
- if (tx_msg) {
- tx_msg = bpf_prog_inc_not_zero(tx_msg);
- if (IS_ERR(tx_msg)) {
- if (parse && verdict) {
- bpf_prog_put(parse);
- bpf_prog_put(verdict);
- }
- return PTR_ERR(tx_msg);
- }
- }
-
- psock = smap_psock_sk(sock);
-
- /* 2. Do not allow inheriting programs if psock exists and has
- * already inherited programs. This would create confusion on
- * which parser/verdict program is running. If no psock exists
- * create one. Inside sk_callback_lock to ensure concurrent create
- * doesn't update user data.
- */
- if (psock) {
- if (!psock_is_smap_sk(sock)) {
- err = -EBUSY;
- goto out_progs;
- }
- if (READ_ONCE(psock->bpf_parse) && parse) {
- err = -EBUSY;
- goto out_progs;
- }
- if (READ_ONCE(psock->bpf_tx_msg) && tx_msg) {
- err = -EBUSY;
- goto out_progs;
- }
- if (!refcount_inc_not_zero(&psock->refcnt)) {
- err = -EAGAIN;
- goto out_progs;
- }
- } else {
- psock = smap_init_psock(sock, map->numa_node);
- if (IS_ERR(psock)) {
- err = PTR_ERR(psock);
- goto out_progs;
- }
-
- set_bit(SMAP_TX_RUNNING, &psock->state);
- new = true;
- }
-
- /* 3. At this point we have a reference to a valid psock that is
- * running. Attach any BPF programs needed.
- */
- if (tx_msg)
- bpf_tcp_msg_add(psock, sock, tx_msg);
- if (new) {
- err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
- if (err)
- goto out_free;
- }
-
- if (parse && verdict && !psock->strp_enabled) {
- err = smap_init_sock(psock, sock);
- if (err)
- goto out_free;
- smap_init_progs(psock, verdict, parse);
- write_lock_bh(&sock->sk_callback_lock);
- smap_start_sock(psock, sock);
- write_unlock_bh(&sock->sk_callback_lock);
- }
-
- return err;
-out_free:
- smap_release_sock(psock, sock);
-out_progs:
- if (parse && verdict) {
- bpf_prog_put(parse);
- bpf_prog_put(verdict);
- }
- if (tx_msg)
- bpf_prog_put(tx_msg);
- return err;
-}
-
-static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
- struct bpf_map *map,
- void *key, u64 flags)
-{
- struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
- struct bpf_sock_progs *progs = &stab->progs;
- struct sock *osock, *sock = skops->sk;
- struct smap_psock_map_entry *e;
- struct smap_psock *psock;
- u32 i = *(u32 *)key;
- int err;
-
- if (unlikely(flags > BPF_EXIST))
- return -EINVAL;
- if (unlikely(i >= stab->map.max_entries))
- return -E2BIG;
-
- e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
- if (!e)
- return -ENOMEM;
-
- err = __sock_map_ctx_update_elem(map, progs, sock, key);
- if (err)
- goto out;
-
- /* psock guaranteed to be present. */
- psock = smap_psock_sk(sock);
- raw_spin_lock_bh(&stab->lock);
- osock = stab->sock_map[i];
- if (osock && flags == BPF_NOEXIST) {
- err = -EEXIST;
- goto out_unlock;
- }
- if (!osock && flags == BPF_EXIST) {
- err = -ENOENT;
- goto out_unlock;
- }
-
- e->entry = &stab->sock_map[i];
- e->map = map;
- spin_lock_bh(&psock->maps_lock);
- list_add_tail(&e->list, &psock->maps);
- spin_unlock_bh(&psock->maps_lock);
-
- stab->sock_map[i] = sock;
- if (osock) {
- psock = smap_psock_sk(osock);
- smap_list_map_remove(psock, &stab->sock_map[i]);
- smap_release_sock(psock, osock);
- }
- raw_spin_unlock_bh(&stab->lock);
- return 0;
-out_unlock:
- smap_release_sock(psock, sock);
- raw_spin_unlock_bh(&stab->lock);
-out:
- kfree(e);
- return err;
-}
-
-int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
-{
- struct bpf_sock_progs *progs;
- struct bpf_prog *orig;
-
- if (map->map_type == BPF_MAP_TYPE_SOCKMAP) {
- struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
-
- progs = &stab->progs;
- } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH) {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-
- progs = &htab->progs;
- } else {
- return -EINVAL;
- }
-
- switch (type) {
- case BPF_SK_MSG_VERDICT:
- orig = xchg(&progs->bpf_tx_msg, prog);
- break;
- case BPF_SK_SKB_STREAM_PARSER:
- orig = xchg(&progs->bpf_parse, prog);
- break;
- case BPF_SK_SKB_STREAM_VERDICT:
- orig = xchg(&progs->bpf_verdict, prog);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- if (orig)
- bpf_prog_put(orig);
-
- return 0;
-}
-
-int sockmap_get_from_fd(const union bpf_attr *attr, int type,
- struct bpf_prog *prog)
-{
- int ufd = attr->target_fd;
- struct bpf_map *map;
- struct fd f;
- int err;
-
- f = fdget(ufd);
- map = __bpf_map_get(f);
- if (IS_ERR(map))
- return PTR_ERR(map);
-
- err = sock_map_prog(map, prog, attr->attach_type);
- fdput(f);
- return err;
-}
-
-static void *sock_map_lookup(struct bpf_map *map, void *key)
-{
- return NULL;
-}
-
-static int sock_map_update_elem(struct bpf_map *map,
- void *key, void *value, u64 flags)
-{
- struct bpf_sock_ops_kern skops;
- u32 fd = *(u32 *)value;
- struct socket *socket;
- int err;
-
- socket = sockfd_lookup(fd, &err);
- if (!socket)
- return err;
-
- skops.sk = socket->sk;
- if (!skops.sk) {
- fput(socket->file);
- return -EINVAL;
- }
-
- if (skops.sk->sk_type != SOCK_STREAM ||
- skops.sk->sk_protocol != IPPROTO_TCP) {
- fput(socket->file);
- return -EOPNOTSUPP;
- }
-
- lock_sock(skops.sk);
- preempt_disable();
- rcu_read_lock();
- err = sock_map_ctx_update_elem(&skops, map, key, flags);
- rcu_read_unlock();
- preempt_enable();
- release_sock(skops.sk);
- fput(socket->file);
- return err;
-}
-
-static void sock_map_release(struct bpf_map *map)
-{
- struct bpf_sock_progs *progs;
- struct bpf_prog *orig;
-
- if (map->map_type == BPF_MAP_TYPE_SOCKMAP) {
- struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
-
- progs = &stab->progs;
- } else {
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-
- progs = &htab->progs;
- }
-
- orig = xchg(&progs->bpf_parse, NULL);
- if (orig)
- bpf_prog_put(orig);
- orig = xchg(&progs->bpf_verdict, NULL);
- if (orig)
- bpf_prog_put(orig);
-
- orig = xchg(&progs->bpf_tx_msg, NULL);
- if (orig)
- bpf_prog_put(orig);
-}
-
-static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
-{
- struct bpf_htab *htab;
- int i, err;
- u64 cost;
-
- if (!capable(CAP_NET_ADMIN))
- return ERR_PTR(-EPERM);
-
- /* check sanity of attributes */
- if (attr->max_entries == 0 ||
- attr->key_size == 0 ||
- attr->value_size != 4 ||
- attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
- return ERR_PTR(-EINVAL);
-
- if (attr->key_size > MAX_BPF_STACK)
- /* eBPF programs initialize keys on stack, so they cannot be
- * larger than max stack size
- */
- return ERR_PTR(-E2BIG);
-
- err = bpf_tcp_ulp_register();
- if (err && err != -EEXIST)
- return ERR_PTR(err);
-
- htab = kzalloc(sizeof(*htab), GFP_USER);
- if (!htab)
- return ERR_PTR(-ENOMEM);
-
- bpf_map_init_from_attr(&htab->map, attr);
-
- htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
- htab->elem_size = sizeof(struct htab_elem) +
- round_up(htab->map.key_size, 8);
- err = -EINVAL;
- if (htab->n_buckets == 0 ||
- htab->n_buckets > U32_MAX / sizeof(struct bucket))
- goto free_htab;
-
- cost = (u64) htab->n_buckets * sizeof(struct bucket) +
- (u64) htab->elem_size * htab->map.max_entries;
-
- if (cost >= U32_MAX - PAGE_SIZE)
- goto free_htab;
-
- htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- err = bpf_map_precharge_memlock(htab->map.pages);
- if (err)
- goto free_htab;
-
- err = -ENOMEM;
- htab->buckets = bpf_map_area_alloc(
- htab->n_buckets * sizeof(struct bucket),
- htab->map.numa_node);
- if (!htab->buckets)
- goto free_htab;
-
- for (i = 0; i < htab->n_buckets; i++) {
- INIT_HLIST_HEAD(&htab->buckets[i].head);
- raw_spin_lock_init(&htab->buckets[i].lock);
- }
-
- return &htab->map;
-free_htab:
- kfree(htab);
- return ERR_PTR(err);
-}
-
-static void __bpf_htab_free(struct rcu_head *rcu)
-{
- struct bpf_htab *htab;
-
- htab = container_of(rcu, struct bpf_htab, rcu);
- bpf_map_area_free(htab->buckets);
- kfree(htab);
-}
-
-static void sock_hash_free(struct bpf_map *map)
-{
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- int i;
-
- synchronize_rcu();
-
- /* At this point no update, lookup or delete operations can happen.
- * However, be aware we can still get a socket state event updates,
- * and data ready callabacks that reference the psock from sk_user_data
- * Also psock worker threads are still in-flight. So smap_release_sock
- * will only free the psock after cancel_sync on the worker threads
- * and a grace period expire to ensure psock is really safe to remove.
- */
- rcu_read_lock();
- for (i = 0; i < htab->n_buckets; i++) {
- struct bucket *b = __select_bucket(htab, i);
- struct hlist_head *head;
- struct hlist_node *n;
- struct htab_elem *l;
-
- raw_spin_lock_bh(&b->lock);
- head = &b->head;
- hlist_for_each_entry_safe(l, n, head, hash_node) {
- struct sock *sock = l->sk;
- struct smap_psock *psock;
-
- hlist_del_rcu(&l->hash_node);
- psock = smap_psock_sk(sock);
- /* This check handles a racing sock event that can get
- * the sk_callback_lock before this case but after xchg
- * causing the refcnt to hit zero and sock user data
- * (psock) to be null and queued for garbage collection.
- */
- if (likely(psock)) {
- smap_list_hash_remove(psock, l);
- smap_release_sock(psock, sock);
- }
- free_htab_elem(htab, l);
- }
- raw_spin_unlock_bh(&b->lock);
- }
- rcu_read_unlock();
- call_rcu(&htab->rcu, __bpf_htab_free);
-}
-
-static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
- void *key, u32 key_size, u32 hash,
- struct sock *sk,
- struct htab_elem *old_elem)
-{
- struct htab_elem *l_new;
-
- if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
- if (!old_elem) {
- atomic_dec(&htab->count);
- return ERR_PTR(-E2BIG);
- }
- }
- l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
- htab->map.numa_node);
- if (!l_new) {
- atomic_dec(&htab->count);
- return ERR_PTR(-ENOMEM);
- }
-
- memcpy(l_new->key, key, key_size);
- l_new->sk = sk;
- l_new->hash = hash;
- return l_new;
-}
-
-static inline u32 htab_map_hash(const void *key, u32 key_len)
-{
- return jhash(key, key_len, 0);
-}
-
-static int sock_hash_get_next_key(struct bpf_map *map,
- void *key, void *next_key)
-{
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct htab_elem *l, *next_l;
- struct hlist_head *h;
- u32 hash, key_size;
- int i = 0;
-
- WARN_ON_ONCE(!rcu_read_lock_held());
-
- key_size = map->key_size;
- if (!key)
- goto find_first_elem;
- hash = htab_map_hash(key, key_size);
- h = select_bucket(htab, hash);
-
- l = lookup_elem_raw(h, hash, key, key_size);
- if (!l)
- goto find_first_elem;
- next_l = hlist_entry_safe(
- rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
- struct htab_elem, hash_node);
- if (next_l) {
- memcpy(next_key, next_l->key, key_size);
- return 0;
- }
-
- /* no more elements in this hash list, go to the next bucket */
- i = hash & (htab->n_buckets - 1);
- i++;
-
-find_first_elem:
- /* iterate over buckets */
- for (; i < htab->n_buckets; i++) {
- h = select_bucket(htab, i);
-
- /* pick first element in the bucket */
- next_l = hlist_entry_safe(
- rcu_dereference_raw(hlist_first_rcu(h)),
- struct htab_elem, hash_node);
- if (next_l) {
- /* if it's not empty, just return it */
- memcpy(next_key, next_l->key, key_size);
- return 0;
- }
- }
-
- /* iterated over all buckets and all elements */
- return -ENOENT;
-}
-
-static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
- struct bpf_map *map,
- void *key, u64 map_flags)
-{
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct bpf_sock_progs *progs = &htab->progs;
- struct htab_elem *l_new = NULL, *l_old;
- struct smap_psock_map_entry *e = NULL;
- struct hlist_head *head;
- struct smap_psock *psock;
- u32 key_size, hash;
- struct sock *sock;
- struct bucket *b;
- int err;
-
- sock = skops->sk;
-
- if (sock->sk_type != SOCK_STREAM ||
- sock->sk_protocol != IPPROTO_TCP)
- return -EOPNOTSUPP;
-
- if (unlikely(map_flags > BPF_EXIST))
- return -EINVAL;
-
- e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
- if (!e)
- return -ENOMEM;
-
- WARN_ON_ONCE(!rcu_read_lock_held());
- key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
- b = __select_bucket(htab, hash);
- head = &b->head;
-
- err = __sock_map_ctx_update_elem(map, progs, sock, key);
- if (err)
- goto err;
-
- /* psock is valid here because otherwise above *ctx_update_elem would
- * have thrown an error. It is safe to skip error check.
- */
- psock = smap_psock_sk(sock);
- raw_spin_lock_bh(&b->lock);
- l_old = lookup_elem_raw(head, hash, key, key_size);
- if (l_old && map_flags == BPF_NOEXIST) {
- err = -EEXIST;
- goto bucket_err;
- }
- if (!l_old && map_flags == BPF_EXIST) {
- err = -ENOENT;
- goto bucket_err;
- }
-
- l_new = alloc_sock_hash_elem(htab, key, key_size, hash, sock, l_old);
- if (IS_ERR(l_new)) {
- err = PTR_ERR(l_new);
- goto bucket_err;
- }
-
- rcu_assign_pointer(e->hash_link, l_new);
- e->map = map;
- spin_lock_bh(&psock->maps_lock);
- list_add_tail(&e->list, &psock->maps);
- spin_unlock_bh(&psock->maps_lock);
-
- /* add new element to the head of the list, so that
- * concurrent search will find it before old elem
- */
- hlist_add_head_rcu(&l_new->hash_node, head);
- if (l_old) {
- psock = smap_psock_sk(l_old->sk);
-
- hlist_del_rcu(&l_old->hash_node);
- smap_list_hash_remove(psock, l_old);
- smap_release_sock(psock, l_old->sk);
- free_htab_elem(htab, l_old);
- }
- raw_spin_unlock_bh(&b->lock);
- return 0;
-bucket_err:
- smap_release_sock(psock, sock);
- raw_spin_unlock_bh(&b->lock);
-err:
- kfree(e);
- return err;
-}
-
-static int sock_hash_update_elem(struct bpf_map *map,
- void *key, void *value, u64 flags)
-{
- struct bpf_sock_ops_kern skops;
- u32 fd = *(u32 *)value;
- struct socket *socket;
- int err;
-
- socket = sockfd_lookup(fd, &err);
- if (!socket)
- return err;
-
- skops.sk = socket->sk;
- if (!skops.sk) {
- fput(socket->file);
- return -EINVAL;
- }
-
- lock_sock(skops.sk);
- preempt_disable();
- rcu_read_lock();
- err = sock_hash_ctx_update_elem(&skops, map, key, flags);
- rcu_read_unlock();
- preempt_enable();
- release_sock(skops.sk);
- fput(socket->file);
- return err;
-}
-
-static int sock_hash_delete_elem(struct bpf_map *map, void *key)
-{
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
- struct bucket *b;
- struct htab_elem *l;
- u32 hash, key_size;
- int ret = -ENOENT;
-
- key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
- b = __select_bucket(htab, hash);
- head = &b->head;
-
- raw_spin_lock_bh(&b->lock);
- l = lookup_elem_raw(head, hash, key, key_size);
- if (l) {
- struct sock *sock = l->sk;
- struct smap_psock *psock;
-
- hlist_del_rcu(&l->hash_node);
- psock = smap_psock_sk(sock);
- /* This check handles a racing sock event that can get the
- * sk_callback_lock before this case but after xchg happens
- * causing the refcnt to hit zero and sock user data (psock)
- * to be null and queued for garbage collection.
- */
- if (likely(psock)) {
- smap_list_hash_remove(psock, l);
- smap_release_sock(psock, sock);
- }
- free_htab_elem(htab, l);
- ret = 0;
- }
- raw_spin_unlock_bh(&b->lock);
- return ret;
-}
-
-struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
-{
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
- struct htab_elem *l;
- u32 key_size, hash;
- struct bucket *b;
- struct sock *sk;
-
- key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
- b = __select_bucket(htab, hash);
- head = &b->head;
-
- l = lookup_elem_raw(head, hash, key, key_size);
- sk = l ? l->sk : NULL;
- return sk;
-}
-
-const struct bpf_map_ops sock_map_ops = {
- .map_alloc = sock_map_alloc,
- .map_free = sock_map_free,
- .map_lookup_elem = sock_map_lookup,
- .map_get_next_key = sock_map_get_next_key,
- .map_update_elem = sock_map_update_elem,
- .map_delete_elem = sock_map_delete_elem,
- .map_release_uref = sock_map_release,
- .map_check_btf = map_check_no_btf,
-};
-
-const struct bpf_map_ops sock_hash_ops = {
- .map_alloc = sock_hash_alloc,
- .map_free = sock_hash_free,
- .map_lookup_elem = sock_map_lookup,
- .map_get_next_key = sock_hash_get_next_key,
- .map_update_elem = sock_hash_update_elem,
- .map_delete_elem = sock_hash_delete_elem,
- .map_release_uref = sock_map_release,
- .map_check_btf = map_check_no_btf,
-};
-
-BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
- struct bpf_map *, map, void *, key, u64, flags)
-{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
-}
-
-const struct bpf_func_proto bpf_sock_map_update_proto = {
- .func = bpf_sock_map_update,
- .gpl_only = false,
- .pkt_access = true,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_CONST_MAP_PTR,
- .arg3_type = ARG_PTR_TO_MAP_KEY,
- .arg4_type = ARG_ANYTHING,
-};
-
-BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock,
- struct bpf_map *, map, void *, key, u64, flags)
-{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return sock_hash_ctx_update_elem(bpf_sock, map, key, flags);
-}
-
-const struct bpf_func_proto bpf_sock_hash_update_proto = {
- .func = bpf_sock_hash_update,
- .gpl_only = false,
- .pkt_access = true,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_CONST_MAP_PTR,
- .arg3_type = ARG_PTR_TO_MAP_KEY,
- .arg4_type = ARG_ANYTHING,
-};
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 8061a439ef18..90daf285de03 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -505,7 +505,7 @@ const struct bpf_func_proto bpf_get_stack_proto = {
/* Called from eBPF program */
static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
{
- return NULL;
+ return ERR_PTR(-EOPNOTSUPP);
}
/* Called from syscall */
@@ -600,7 +600,7 @@ static void stack_map_free(struct bpf_map *map)
put_callchain_buffers();
}
-const struct bpf_map_ops stack_map_ops = {
+const struct bpf_map_ops stack_trace_map_ops = {
.map_alloc = stack_map_alloc,
.map_free = stack_map_free,
.map_get_next_key = stack_map_get_next_key,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 8339d81cba1d..ccb93277aae2 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -30,7 +30,6 @@
#include <linux/cred.h>
#include <linux/timekeeping.h>
#include <linux/ctype.h>
-#include <linux/btf.h>
#include <linux/nospec.h>
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
@@ -652,6 +651,17 @@ int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
return -ENOTSUPP;
}
+static void *__bpf_copy_key(void __user *ukey, u64 key_size)
+{
+ if (key_size)
+ return memdup_user(ukey, key_size);
+
+ if (ukey)
+ return ERR_PTR(-EINVAL);
+
+ return NULL;
+}
+
/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
@@ -679,7 +689,7 @@ static int map_lookup_elem(union bpf_attr *attr)
goto err_put;
}
- key = memdup_user(ukey, map->key_size);
+ key = __bpf_copy_key(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
@@ -687,7 +697,8 @@ static int map_lookup_elem(union bpf_attr *attr)
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
- map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+ map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
+ map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
value_size = round_up(map->value_size, 8) * num_possible_cpus();
else if (IS_FD_MAP(map))
value_size = sizeof(u32);
@@ -706,6 +717,8 @@ static int map_lookup_elem(union bpf_attr *attr)
err = bpf_percpu_hash_copy(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_copy(map, key, value);
+ } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
+ err = bpf_percpu_cgroup_storage_copy(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
err = bpf_stackmap_copy(map, key, value);
} else if (IS_FD_ARRAY(map)) {
@@ -714,13 +727,21 @@ static int map_lookup_elem(union bpf_attr *attr)
err = bpf_fd_htab_map_lookup_elem(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
+ } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
+ map->map_type == BPF_MAP_TYPE_STACK) {
+ err = map->ops->map_peek_elem(map, value);
} else {
rcu_read_lock();
ptr = map->ops->map_lookup_elem(map, key);
- if (ptr)
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ } else if (!ptr) {
+ err = -ENOENT;
+ } else {
+ err = 0;
memcpy(value, ptr, value_size);
+ }
rcu_read_unlock();
- err = ptr ? 0 : -ENOENT;
}
if (err)
@@ -741,6 +762,17 @@ err_put:
return err;
}
+static void maybe_wait_bpf_programs(struct bpf_map *map)
+{
+ /* Wait for any running BPF programs to complete so that
+ * userspace, when we return to it, knows that all programs
+ * that could be running use the new map value.
+ */
+ if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
+ map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
+ synchronize_rcu();
+}
+
#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
static int map_update_elem(union bpf_attr *attr)
@@ -767,7 +799,7 @@ static int map_update_elem(union bpf_attr *attr)
goto err_put;
}
- key = memdup_user(ukey, map->key_size);
+ key = __bpf_copy_key(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
@@ -775,7 +807,8 @@ static int map_update_elem(union bpf_attr *attr)
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
- map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+ map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
+ map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
value_size = round_up(map->value_size, 8) * num_possible_cpus();
else
value_size = map->value_size;
@@ -810,6 +843,9 @@ static int map_update_elem(union bpf_attr *attr)
err = bpf_percpu_hash_update(map, key, value, attr->flags);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_update(map, key, value, attr->flags);
+ } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
+ err = bpf_percpu_cgroup_storage_update(map, key, value,
+ attr->flags);
} else if (IS_FD_ARRAY(map)) {
rcu_read_lock();
err = bpf_fd_array_map_update_elem(map, f.file, key, value,
@@ -824,6 +860,9 @@ static int map_update_elem(union bpf_attr *attr)
/* rcu_read_lock() is not needed */
err = bpf_fd_reuseport_array_update_elem(map, key, value,
attr->flags);
+ } else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
+ map->map_type == BPF_MAP_TYPE_STACK) {
+ err = map->ops->map_push_elem(map, value, attr->flags);
} else {
rcu_read_lock();
err = map->ops->map_update_elem(map, key, value, attr->flags);
@@ -831,6 +870,7 @@ static int map_update_elem(union bpf_attr *attr)
}
__this_cpu_dec(bpf_prog_active);
preempt_enable();
+ maybe_wait_bpf_programs(map);
out:
free_value:
kfree(value);
@@ -865,7 +905,7 @@ static int map_delete_elem(union bpf_attr *attr)
goto err_put;
}
- key = memdup_user(ukey, map->key_size);
+ key = __bpf_copy_key(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
@@ -883,6 +923,7 @@ static int map_delete_elem(union bpf_attr *attr)
rcu_read_unlock();
__this_cpu_dec(bpf_prog_active);
preempt_enable();
+ maybe_wait_bpf_programs(map);
out:
kfree(key);
err_put:
@@ -917,7 +958,7 @@ static int map_get_next_key(union bpf_attr *attr)
}
if (ukey) {
- key = memdup_user(ukey, map->key_size);
+ key = __bpf_copy_key(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
@@ -958,6 +999,69 @@ err_put:
return err;
}
+#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
+
+static int map_lookup_and_delete_elem(union bpf_attr *attr)
+{
+ void __user *ukey = u64_to_user_ptr(attr->key);
+ void __user *uvalue = u64_to_user_ptr(attr->value);
+ int ufd = attr->map_fd;
+ struct bpf_map *map;
+ void *key, *value;
+ u32 value_size;
+ struct fd f;
+ int err;
+
+ if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
+ return -EINVAL;
+
+ f = fdget(ufd);
+ map = __bpf_map_get(f);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
+ err = -EPERM;
+ goto err_put;
+ }
+
+ key = __bpf_copy_key(ukey, map->key_size);
+ if (IS_ERR(key)) {
+ err = PTR_ERR(key);
+ goto err_put;
+ }
+
+ value_size = map->value_size;
+
+ err = -ENOMEM;
+ value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
+ if (!value)
+ goto free_key;
+
+ if (map->map_type == BPF_MAP_TYPE_QUEUE ||
+ map->map_type == BPF_MAP_TYPE_STACK) {
+ err = map->ops->map_pop_elem(map, value);
+ } else {
+ err = -ENOTSUPP;
+ }
+
+ if (err)
+ goto free_value;
+
+ if (copy_to_user(uvalue, value, value_size) != 0)
+ goto free_value;
+
+ err = 0;
+
+free_value:
+ kfree(value);
+free_key:
+ kfree(key);
+err_put:
+ fdput(f);
+ return err;
+}
+
static const struct bpf_prog_ops * const bpf_prog_types[] = {
#define BPF_PROG_TYPE(_id, _name) \
[_id] = & _name ## _prog_ops,
@@ -989,10 +1093,15 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
/* drop refcnt on maps used by eBPF program and free auxilary data */
static void free_used_maps(struct bpf_prog_aux *aux)
{
+ enum bpf_cgroup_storage_type stype;
int i;
- if (aux->cgroup_storage)
- bpf_cgroup_storage_release(aux->prog, aux->cgroup_storage);
+ for_each_cgroup_storage_type(stype) {
+ if (!aux->cgroup_storage[stype])
+ continue;
+ bpf_cgroup_storage_release(aux->prog,
+ aux->cgroup_storage[stype]);
+ }
for (i = 0; i < aux->used_map_cnt; i++)
bpf_map_put(aux->used_maps[i]);
@@ -1616,6 +1725,9 @@ static int bpf_prog_attach(const union bpf_attr *attr)
case BPF_LIRC_MODE2:
ptype = BPF_PROG_TYPE_LIRC_MODE2;
break;
+ case BPF_FLOW_DISSECTOR:
+ ptype = BPF_PROG_TYPE_FLOW_DISSECTOR;
+ break;
default:
return -EINVAL;
}
@@ -1632,11 +1744,14 @@ static int bpf_prog_attach(const union bpf_attr *attr)
switch (ptype) {
case BPF_PROG_TYPE_SK_SKB:
case BPF_PROG_TYPE_SK_MSG:
- ret = sockmap_get_from_fd(attr, ptype, prog);
+ ret = sock_map_get_from_fd(attr, prog);
break;
case BPF_PROG_TYPE_LIRC_MODE2:
ret = lirc_prog_attach(attr, prog);
break;
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
+ ret = skb_flow_dissector_bpf_prog_attach(attr, prog);
+ break;
default:
ret = cgroup_bpf_prog_attach(attr, ptype, prog);
}
@@ -1683,12 +1798,14 @@ static int bpf_prog_detach(const union bpf_attr *attr)
ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
break;
case BPF_SK_MSG_VERDICT:
- return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL);
+ return sock_map_get_from_fd(attr, NULL);
case BPF_SK_SKB_STREAM_PARSER:
case BPF_SK_SKB_STREAM_VERDICT:
- return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL);
+ return sock_map_get_from_fd(attr, NULL);
case BPF_LIRC_MODE2:
return lirc_prog_detach(attr);
+ case BPF_FLOW_DISSECTOR:
+ return skb_flow_dissector_bpf_prog_detach(attr);
default:
return -EINVAL;
}
@@ -2418,6 +2535,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_TASK_FD_QUERY:
err = bpf_task_fd_query(&attr, uattr);
break;
+ case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
+ err = map_lookup_and_delete_elem(&attr);
+ break;
default:
err = -EINVAL;
break;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 92246117d2b0..98fa0be35370 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1,5 +1,6 @@
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
+ * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -80,8 +81,8 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
* (like pointer plus pointer becomes SCALAR_VALUE type)
*
* When verifier sees load or store instructions the type of base register
- * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer
- * types recognized by check_mem_access() function.
+ * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
+ * four pointer types recognized by check_mem_access() function.
*
* PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
* and the range of [ptr, ptr + map's value_size) is accessible.
@@ -140,6 +141,24 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
*
* After the call R0 is set to return type of the function and registers R1-R5
* are set to NOT_INIT to indicate that they are no longer readable.
+ *
+ * The following reference types represent a potential reference to a kernel
+ * resource which, after first being allocated, must be checked and freed by
+ * the BPF program:
+ * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
+ *
+ * When the verifier sees a helper call return a reference type, it allocates a
+ * pointer id for the reference and stores it in the current function state.
+ * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
+ * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
+ * passes through a NULL-check conditional. For the branch wherein the state is
+ * changed to CONST_IMM, the verifier releases the reference.
+ *
+ * For each helper function that allocates a reference, such as
+ * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
+ * bpf_sk_release(). When a reference type passes into the release function,
+ * the verifier also releases the reference. If any unchecked or unreleased
+ * reference remains at the end of the program, the verifier rejects it.
*/
/* verifier_state + insn_idx are pushed to stack when branch is encountered */
@@ -189,6 +208,7 @@ struct bpf_call_arg_meta {
int access_size;
s64 msize_smax_value;
u64 msize_umax_value;
+ int ptr_id;
};
static DEFINE_MUTEX(bpf_verifier_lock);
@@ -249,6 +269,46 @@ static bool type_is_pkt_pointer(enum bpf_reg_type type)
type == PTR_TO_PACKET_META;
}
+static bool reg_type_may_be_null(enum bpf_reg_type type)
+{
+ return type == PTR_TO_MAP_VALUE_OR_NULL ||
+ type == PTR_TO_SOCKET_OR_NULL;
+}
+
+static bool type_is_refcounted(enum bpf_reg_type type)
+{
+ return type == PTR_TO_SOCKET;
+}
+
+static bool type_is_refcounted_or_null(enum bpf_reg_type type)
+{
+ return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
+}
+
+static bool reg_is_refcounted(const struct bpf_reg_state *reg)
+{
+ return type_is_refcounted(reg->type);
+}
+
+static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
+{
+ return type_is_refcounted_or_null(reg->type);
+}
+
+static bool arg_type_is_refcounted(enum bpf_arg_type type)
+{
+ return type == ARG_PTR_TO_SOCKET;
+}
+
+/* Determine whether the function releases some resources allocated by another
+ * function call. The first reference type argument will be assumed to be
+ * released by release_reference().
+ */
+static bool is_release_function(enum bpf_func_id func_id)
+{
+ return func_id == BPF_FUNC_sk_release;
+}
+
/* string representation of 'enum bpf_reg_type' */
static const char * const reg_type_str[] = {
[NOT_INIT] = "?",
@@ -261,6 +321,16 @@ static const char * const reg_type_str[] = {
[PTR_TO_PACKET] = "pkt",
[PTR_TO_PACKET_META] = "pkt_meta",
[PTR_TO_PACKET_END] = "pkt_end",
+ [PTR_TO_FLOW_KEYS] = "flow_keys",
+ [PTR_TO_SOCKET] = "sock",
+ [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
+};
+
+static char slot_type_char[] = {
+ [STACK_INVALID] = '?',
+ [STACK_SPILL] = 'r',
+ [STACK_MISC] = 'm',
+ [STACK_ZERO] = '0',
};
static void print_liveness(struct bpf_verifier_env *env,
@@ -349,72 +419,179 @@ static void print_verifier_state(struct bpf_verifier_env *env,
}
}
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].slot_type[0] == STACK_SPILL) {
- verbose(env, " fp%d",
- (-i - 1) * BPF_REG_SIZE);
- print_liveness(env, state->stack[i].spilled_ptr.live);
+ char types_buf[BPF_REG_SIZE + 1];
+ bool valid = false;
+ int j;
+
+ for (j = 0; j < BPF_REG_SIZE; j++) {
+ if (state->stack[i].slot_type[j] != STACK_INVALID)
+ valid = true;
+ types_buf[j] = slot_type_char[
+ state->stack[i].slot_type[j]];
+ }
+ types_buf[BPF_REG_SIZE] = 0;
+ if (!valid)
+ continue;
+ verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
+ print_liveness(env, state->stack[i].spilled_ptr.live);
+ if (state->stack[i].slot_type[0] == STACK_SPILL)
verbose(env, "=%s",
reg_type_str[state->stack[i].spilled_ptr.type]);
- }
- if (state->stack[i].slot_type[0] == STACK_ZERO)
- verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE);
+ else
+ verbose(env, "=%s", types_buf);
+ }
+ if (state->acquired_refs && state->refs[0].id) {
+ verbose(env, " refs=%d", state->refs[0].id);
+ for (i = 1; i < state->acquired_refs; i++)
+ if (state->refs[i].id)
+ verbose(env, ",%d", state->refs[i].id);
}
verbose(env, "\n");
}
-static int copy_stack_state(struct bpf_func_state *dst,
- const struct bpf_func_state *src)
-{
- if (!src->stack)
- return 0;
- if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) {
- /* internal bug, make state invalid to reject the program */
- memset(dst, 0, sizeof(*dst));
- return -EFAULT;
- }
- memcpy(dst->stack, src->stack,
- sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE));
- return 0;
-}
+#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
+static int copy_##NAME##_state(struct bpf_func_state *dst, \
+ const struct bpf_func_state *src) \
+{ \
+ if (!src->FIELD) \
+ return 0; \
+ if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
+ /* internal bug, make state invalid to reject the program */ \
+ memset(dst, 0, sizeof(*dst)); \
+ return -EFAULT; \
+ } \
+ memcpy(dst->FIELD, src->FIELD, \
+ sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
+ return 0; \
+}
+/* copy_reference_state() */
+COPY_STATE_FN(reference, acquired_refs, refs, 1)
+/* copy_stack_state() */
+COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
+#undef COPY_STATE_FN
+
+#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
+static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
+ bool copy_old) \
+{ \
+ u32 old_size = state->COUNT; \
+ struct bpf_##NAME##_state *new_##FIELD; \
+ int slot = size / SIZE; \
+ \
+ if (size <= old_size || !size) { \
+ if (copy_old) \
+ return 0; \
+ state->COUNT = slot * SIZE; \
+ if (!size && old_size) { \
+ kfree(state->FIELD); \
+ state->FIELD = NULL; \
+ } \
+ return 0; \
+ } \
+ new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
+ GFP_KERNEL); \
+ if (!new_##FIELD) \
+ return -ENOMEM; \
+ if (copy_old) { \
+ if (state->FIELD) \
+ memcpy(new_##FIELD, state->FIELD, \
+ sizeof(*new_##FIELD) * (old_size / SIZE)); \
+ memset(new_##FIELD + old_size / SIZE, 0, \
+ sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
+ } \
+ state->COUNT = slot * SIZE; \
+ kfree(state->FIELD); \
+ state->FIELD = new_##FIELD; \
+ return 0; \
+}
+/* realloc_reference_state() */
+REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
+/* realloc_stack_state() */
+REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
+#undef REALLOC_STATE_FN
/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
* make it consume minimal amount of memory. check_stack_write() access from
* the program calls into realloc_func_state() to grow the stack size.
* Note there is a non-zero 'parent' pointer inside bpf_verifier_state
- * which this function copies over. It points to previous bpf_verifier_state
- * which is never reallocated
+ * which realloc_stack_state() copies over. It points to previous
+ * bpf_verifier_state which is never reallocated.
*/
-static int realloc_func_state(struct bpf_func_state *state, int size,
- bool copy_old)
+static int realloc_func_state(struct bpf_func_state *state, int stack_size,
+ int refs_size, bool copy_old)
{
- u32 old_size = state->allocated_stack;
- struct bpf_stack_state *new_stack;
- int slot = size / BPF_REG_SIZE;
+ int err = realloc_reference_state(state, refs_size, copy_old);
+ if (err)
+ return err;
+ return realloc_stack_state(state, stack_size, copy_old);
+}
+
+/* Acquire a pointer id from the env and update the state->refs to include
+ * this new pointer reference.
+ * On success, returns a valid pointer id to associate with the register
+ * On failure, returns a negative errno.
+ */
+static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
+{
+ struct bpf_func_state *state = cur_func(env);
+ int new_ofs = state->acquired_refs;
+ int id, err;
- if (size <= old_size || !size) {
- if (copy_old)
+ err = realloc_reference_state(state, state->acquired_refs + 1, true);
+ if (err)
+ return err;
+ id = ++env->id_gen;
+ state->refs[new_ofs].id = id;
+ state->refs[new_ofs].insn_idx = insn_idx;
+
+ return id;
+}
+
+/* release function corresponding to acquire_reference_state(). Idempotent. */
+static int __release_reference_state(struct bpf_func_state *state, int ptr_id)
+{
+ int i, last_idx;
+
+ if (!ptr_id)
+ return -EFAULT;
+
+ last_idx = state->acquired_refs - 1;
+ for (i = 0; i < state->acquired_refs; i++) {
+ if (state->refs[i].id == ptr_id) {
+ if (last_idx && i != last_idx)
+ memcpy(&state->refs[i], &state->refs[last_idx],
+ sizeof(*state->refs));
+ memset(&state->refs[last_idx], 0, sizeof(*state->refs));
+ state->acquired_refs--;
return 0;
- state->allocated_stack = slot * BPF_REG_SIZE;
- if (!size && old_size) {
- kfree(state->stack);
- state->stack = NULL;
}
- return 0;
}
- new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state),
- GFP_KERNEL);
- if (!new_stack)
- return -ENOMEM;
- if (copy_old) {
- if (state->stack)
- memcpy(new_stack, state->stack,
- sizeof(*new_stack) * (old_size / BPF_REG_SIZE));
- memset(new_stack + old_size / BPF_REG_SIZE, 0,
- sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE);
- }
- state->allocated_stack = slot * BPF_REG_SIZE;
- kfree(state->stack);
- state->stack = new_stack;
+ return -EFAULT;
+}
+
+/* variation on the above for cases where we expect that there must be an
+ * outstanding reference for the specified ptr_id.
+ */
+static int release_reference_state(struct bpf_verifier_env *env, int ptr_id)
+{
+ struct bpf_func_state *state = cur_func(env);
+ int err;
+
+ err = __release_reference_state(state, ptr_id);
+ if (WARN_ON_ONCE(err != 0))
+ verbose(env, "verifier internal error: can't release reference\n");
+ return err;
+}
+
+static int transfer_reference_state(struct bpf_func_state *dst,
+ struct bpf_func_state *src)
+{
+ int err = realloc_reference_state(dst, src->acquired_refs, false);
+ if (err)
+ return err;
+ err = copy_reference_state(dst, src);
+ if (err)
+ return err;
return 0;
}
@@ -422,6 +599,7 @@ static void free_func_state(struct bpf_func_state *state)
{
if (!state)
return;
+ kfree(state->refs);
kfree(state->stack);
kfree(state);
}
@@ -447,10 +625,14 @@ static int copy_func_state(struct bpf_func_state *dst,
{
int err;
- err = realloc_func_state(dst, src->allocated_stack, false);
+ err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
+ false);
+ if (err)
+ return err;
+ memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
+ err = copy_reference_state(dst, src);
if (err)
return err;
- memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack));
return copy_stack_state(dst, src);
}
@@ -466,7 +648,6 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
dst_state->frame[i] = NULL;
}
dst_state->curframe = src->curframe;
- dst_state->parent = src->parent;
for (i = 0; i <= src->curframe; i++) {
dst = dst_state->frame[i];
if (!dst) {
@@ -553,7 +734,9 @@ static void __mark_reg_not_init(struct bpf_reg_state *reg);
*/
static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{
- reg->id = 0;
+ /* Clear id, off, and union(map_ptr, range) */
+ memset(((u8 *)reg) + sizeof(reg->type), 0,
+ offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
reg->var_off = tnum_const(imm);
reg->smin_value = (s64)imm;
reg->smax_value = (s64)imm;
@@ -572,7 +755,6 @@ static void __mark_reg_known_zero(struct bpf_reg_state *reg)
static void __mark_reg_const_zero(struct bpf_reg_state *reg)
{
__mark_reg_known(reg, 0);
- reg->off = 0;
reg->type = SCALAR_VALUE;
}
@@ -683,9 +865,12 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
/* Mark a register as having a completely unknown (scalar) value. */
static void __mark_reg_unknown(struct bpf_reg_state *reg)
{
+ /*
+ * Clear type, id, off, and union(map_ptr, range) and
+ * padding between 'type' and union
+ */
+ memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
reg->type = SCALAR_VALUE;
- reg->id = 0;
- reg->off = 0;
reg->var_off = tnum_unknown;
reg->frameno = 0;
__mark_reg_unbounded(reg);
@@ -732,6 +917,7 @@ static void init_reg_state(struct bpf_verifier_env *env,
for (i = 0; i < MAX_BPF_REG; i++) {
mark_reg_not_init(env, regs, i);
regs[i].live = REG_LIVE_NONE;
+ regs[i].parent = NULL;
}
/* frame pointer */
@@ -823,10 +1009,6 @@ static int check_subprogs(struct bpf_verifier_env *env)
verbose(env, "function calls to other bpf functions are allowed for root only\n");
return -EPERM;
}
- if (bpf_prog_is_dev_bound(env->prog->aux)) {
- verbose(env, "function calls in offloaded programs are not supported yet\n");
- return -EINVAL;
- }
ret = add_subprog(env, i + insn[i].imm + 1);
if (ret < 0)
return ret;
@@ -876,74 +1058,21 @@ next:
return 0;
}
-static
-struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
- const struct bpf_verifier_state *state,
- struct bpf_verifier_state *parent,
- u32 regno)
-{
- struct bpf_verifier_state *tmp = NULL;
-
- /* 'parent' could be a state of caller and
- * 'state' could be a state of callee. In such case
- * parent->curframe < state->curframe
- * and it's ok for r1 - r5 registers
- *
- * 'parent' could be a callee's state after it bpf_exit-ed.
- * In such case parent->curframe > state->curframe
- * and it's ok for r0 only
- */
- if (parent->curframe == state->curframe ||
- (parent->curframe < state->curframe &&
- regno >= BPF_REG_1 && regno <= BPF_REG_5) ||
- (parent->curframe > state->curframe &&
- regno == BPF_REG_0))
- return parent;
-
- if (parent->curframe > state->curframe &&
- regno >= BPF_REG_6) {
- /* for callee saved regs we have to skip the whole chain
- * of states that belong to callee and mark as LIVE_READ
- * the registers before the call
- */
- tmp = parent;
- while (tmp && tmp->curframe != state->curframe) {
- tmp = tmp->parent;
- }
- if (!tmp)
- goto bug;
- parent = tmp;
- } else {
- goto bug;
- }
- return parent;
-bug:
- verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
- verbose(env, "regno %d parent frame %d current frame %d\n",
- regno, parent->curframe, state->curframe);
- return NULL;
-}
-
+/* Parentage chain of this register (or stack slot) should take care of all
+ * issues like callee-saved registers, stack slot allocation time, etc.
+ */
static int mark_reg_read(struct bpf_verifier_env *env,
- const struct bpf_verifier_state *state,
- struct bpf_verifier_state *parent,
- u32 regno)
+ const struct bpf_reg_state *state,
+ struct bpf_reg_state *parent)
{
bool writes = parent == state->parent; /* Observe write marks */
- if (regno == BPF_REG_FP)
- /* We don't need to worry about FP liveness because it's read-only */
- return 0;
-
while (parent) {
/* if read wasn't screened by an earlier write ... */
- if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN)
+ if (writes && state->live & REG_LIVE_WRITTEN)
break;
- parent = skip_callee(env, state, parent, regno);
- if (!parent)
- return -EFAULT;
/* ... then we depend on parent's value */
- parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ;
+ parent->live |= REG_LIVE_READ;
state = parent;
parent = state->parent;
writes = true;
@@ -969,7 +1098,10 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
verbose(env, "R%d !read_ok\n", regno);
return -EACCES;
}
- return mark_reg_read(env, vstate, vstate->parent, regno);
+ /* We don't need to worry about FP liveness because it's read-only */
+ if (regno != BPF_REG_FP)
+ return mark_reg_read(env, &regs[regno],
+ regs[regno].parent);
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
@@ -993,7 +1125,10 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
case PTR_TO_PACKET_END:
+ case PTR_TO_FLOW_KEYS:
case CONST_PTR_TO_MAP:
+ case PTR_TO_SOCKET:
+ case PTR_TO_SOCKET_OR_NULL:
return true;
default:
return false;
@@ -1018,7 +1153,7 @@ static int check_stack_write(struct bpf_verifier_env *env,
enum bpf_reg_type type;
err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
- true);
+ state->acquired_refs, true);
if (err)
return err;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
@@ -1080,8 +1215,8 @@ static int check_stack_write(struct bpf_verifier_env *env,
} else {
u8 type = STACK_MISC;
- /* regular write of data into stack */
- state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
+ /* regular write of data into stack destroys any spilled ptr */
+ state->stack[spi].spilled_ptr.type = NOT_INIT;
/* only mark the slot as written if all 8 bytes were written
* otherwise read propagation may incorrectly stop too soon
@@ -1106,61 +1241,6 @@ static int check_stack_write(struct bpf_verifier_env *env,
return 0;
}
-/* registers of every function are unique and mark_reg_read() propagates
- * the liveness in the following cases:
- * - from callee into caller for R1 - R5 that were used as arguments
- * - from caller into callee for R0 that used as result of the call
- * - from caller to the same caller skipping states of the callee for R6 - R9,
- * since R6 - R9 are callee saved by implicit function prologue and
- * caller's R6 != callee's R6, so when we propagate liveness up to
- * parent states we need to skip callee states for R6 - R9.
- *
- * stack slot marking is different, since stacks of caller and callee are
- * accessible in both (since caller can pass a pointer to caller's stack to
- * callee which can pass it to another function), hence mark_stack_slot_read()
- * has to propagate the stack liveness to all parent states at given frame number.
- * Consider code:
- * f1() {
- * ptr = fp - 8;
- * *ptr = ctx;
- * call f2 {
- * .. = *ptr;
- * }
- * .. = *ptr;
- * }
- * First *ptr is reading from f1's stack and mark_stack_slot_read() has
- * to mark liveness at the f1's frame and not f2's frame.
- * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has
- * to propagate liveness to f2 states at f1's frame level and further into
- * f1 states at f1's frame level until write into that stack slot
- */
-static void mark_stack_slot_read(struct bpf_verifier_env *env,
- const struct bpf_verifier_state *state,
- struct bpf_verifier_state *parent,
- int slot, int frameno)
-{
- bool writes = parent == state->parent; /* Observe write marks */
-
- while (parent) {
- if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE)
- /* since LIVE_WRITTEN mark is only done for full 8-byte
- * write the read marks are conservative and parent
- * state may not even have the stack allocated. In such case
- * end the propagation, since the loop reached beginning
- * of the function
- */
- break;
- /* if read wasn't screened by an earlier write ... */
- if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
- break;
- /* ... then we depend on parent's value */
- parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
- state = parent;
- parent = state->parent;
- writes = true;
- }
-}
-
static int check_stack_read(struct bpf_verifier_env *env,
struct bpf_func_state *reg_state /* func where register points to */,
int off, int size, int value_regno)
@@ -1198,8 +1278,8 @@ static int check_stack_read(struct bpf_verifier_env *env,
*/
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
}
- mark_stack_slot_read(env, vstate, vstate->parent, spi,
- reg_state->frameno);
+ mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
+ reg_state->stack[spi].spilled_ptr.parent);
return 0;
} else {
int zeros = 0;
@@ -1215,8 +1295,8 @@ static int check_stack_read(struct bpf_verifier_env *env,
off, i, size);
return -EACCES;
}
- mark_stack_slot_read(env, vstate, vstate->parent, spi,
- reg_state->frameno);
+ mark_reg_read(env, &reg_state->stack[spi].spilled_ptr,
+ reg_state->stack[spi].spilled_ptr.parent);
if (value_regno >= 0) {
if (zeros == size) {
/* any size read into register is zero extended,
@@ -1321,6 +1401,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
case BPF_PROG_TYPE_LWT_XMIT:
case BPF_PROG_TYPE_SK_SKB:
case BPF_PROG_TYPE_SK_MSG:
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
if (meta)
return meta->pkt_access;
@@ -1404,6 +1485,40 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
return -EACCES;
}
+static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
+ int size)
+{
+ if (size < 0 || off < 0 ||
+ (u64)off + size > sizeof(struct bpf_flow_keys)) {
+ verbose(env, "invalid access to flow keys off=%d size=%d\n",
+ off, size);
+ return -EACCES;
+ }
+ return 0;
+}
+
+static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
+ int size, enum bpf_access_type t)
+{
+ struct bpf_reg_state *regs = cur_regs(env);
+ struct bpf_reg_state *reg = &regs[regno];
+ struct bpf_insn_access_aux info;
+
+ if (reg->smin_value < 0) {
+ verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
+ regno);
+ return -EACCES;
+ }
+
+ if (!bpf_sock_is_valid_access(off, size, t, &info)) {
+ verbose(env, "invalid bpf_sock access off=%d size=%d\n",
+ off, size);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
static bool __is_pointer_value(bool allow_ptr_leaks,
const struct bpf_reg_state *reg)
{
@@ -1413,25 +1528,39 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
return reg->type != SCALAR_VALUE;
}
+static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
+{
+ return cur_regs(env) + regno;
+}
+
static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{
- return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
+ return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
}
static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
{
- const struct bpf_reg_state *reg = cur_regs(env) + regno;
+ const struct bpf_reg_state *reg = reg_state(env, regno);
- return reg->type == PTR_TO_CTX;
+ return reg->type == PTR_TO_CTX ||
+ reg->type == PTR_TO_SOCKET;
}
static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
{
- const struct bpf_reg_state *reg = cur_regs(env) + regno;
+ const struct bpf_reg_state *reg = reg_state(env, regno);
return type_is_pkt_pointer(reg->type);
}
+static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
+{
+ const struct bpf_reg_state *reg = reg_state(env, regno);
+
+ /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
+ return reg->type == PTR_TO_FLOW_KEYS;
+}
+
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size, bool strict)
@@ -1505,6 +1634,9 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
* right in front, treat it the very same way.
*/
return check_pkt_ptr_alignment(env, reg, off, size, strict);
+ case PTR_TO_FLOW_KEYS:
+ pointer_desc = "flow keys ";
+ break;
case PTR_TO_MAP_VALUE:
pointer_desc = "value ";
break;
@@ -1519,6 +1651,9 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
*/
strict = true;
break;
+ case PTR_TO_SOCKET:
+ pointer_desc = "sock ";
+ break;
default:
break;
}
@@ -1727,9 +1862,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
else
mark_reg_known_zero(env, regs,
value_regno);
- regs[value_regno].id = 0;
- regs[value_regno].off = 0;
- regs[value_regno].range = 0;
regs[value_regno].type = reg_type;
}
@@ -1778,6 +1910,25 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
err = check_packet_access(env, regno, off, size, false);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
+ } else if (reg->type == PTR_TO_FLOW_KEYS) {
+ if (t == BPF_WRITE && value_regno >= 0 &&
+ is_pointer_value(env, value_regno)) {
+ verbose(env, "R%d leaks addr into flow keys\n",
+ value_regno);
+ return -EACCES;
+ }
+
+ err = check_flow_keys_access(env, off, size);
+ if (!err && t == BPF_READ && value_regno >= 0)
+ mark_reg_unknown(env, regs, value_regno);
+ } else if (reg->type == PTR_TO_SOCKET) {
+ if (t == BPF_WRITE) {
+ verbose(env, "cannot write into socket\n");
+ return -EACCES;
+ }
+ err = check_sock_access(env, regno, off, size, t);
+ if (!err && value_regno >= 0)
+ mark_reg_unknown(env, regs, value_regno);
} else {
verbose(env, "R%d invalid mem access '%s'\n", regno,
reg_type_str[reg->type]);
@@ -1818,10 +1969,11 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
}
if (is_ctx_reg(env, insn->dst_reg) ||
- is_pkt_reg(env, insn->dst_reg)) {
+ is_pkt_reg(env, insn->dst_reg) ||
+ is_flow_key_reg(env, insn->dst_reg)) {
verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
- insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
- "context" : "packet");
+ insn->dst_reg,
+ reg_type_str[reg_state(env, insn->dst_reg)->type]);
return -EACCES;
}
@@ -1846,7 +1998,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
- struct bpf_reg_state *reg = cur_regs(env) + regno;
+ struct bpf_reg_state *reg = reg_state(env, regno);
struct bpf_func_state *state = func(env, reg);
int off, i, slot, spi;
@@ -1908,8 +2060,8 @@ mark:
/* reading any byte out of 8-byte 'spill_slot' will cause
* the whole slot to be marked as 'read'
*/
- mark_stack_slot_read(env, env->cur_state, env->cur_state->parent,
- spi, state->frameno);
+ mark_reg_read(env, &state->stack[spi].spilled_ptr,
+ state->stack[spi].spilled_ptr.parent);
}
return update_stack_depth(env, state, off);
}
@@ -1978,7 +2130,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
}
if (arg_type == ARG_PTR_TO_MAP_KEY ||
- arg_type == ARG_PTR_TO_MAP_VALUE) {
+ arg_type == ARG_PTR_TO_MAP_VALUE ||
+ arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
expected_type = PTR_TO_STACK;
if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE &&
type != expected_type)
@@ -1999,6 +2152,16 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
err = check_ctx_reg(env, reg, regno);
if (err < 0)
return err;
+ } else if (arg_type == ARG_PTR_TO_SOCKET) {
+ expected_type = PTR_TO_SOCKET;
+ if (type != expected_type)
+ goto err_type;
+ if (meta->ptr_id || !reg->id) {
+ verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n",
+ meta->ptr_id, reg->id);
+ return -EFAULT;
+ }
+ meta->ptr_id = reg->id;
} else if (arg_type_is_mem_ptr(arg_type)) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
@@ -2038,7 +2201,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
err = check_helper_mem_access(env, regno,
meta->map_ptr->key_size, false,
NULL);
- } else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
+ } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
+ arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
/* bpf_map_xxx(..., map_ptr, ..., value) call:
* check [value, value + map->value_size) validity
*/
@@ -2047,9 +2211,10 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
verbose(env, "invalid map_ptr to access map->value\n");
return -EACCES;
}
+ meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
err = check_helper_mem_access(env, regno,
meta->map_ptr->value_size, false,
- NULL);
+ meta);
} else if (arg_type_is_mem_size(arg_type)) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
@@ -2129,6 +2294,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
goto error;
break;
case BPF_MAP_TYPE_CGROUP_STORAGE:
+ case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
if (func_id != BPF_FUNC_get_local_storage)
goto error;
break;
@@ -2171,6 +2337,13 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
if (func_id != BPF_FUNC_sk_select_reuseport)
goto error;
break;
+ case BPF_MAP_TYPE_QUEUE:
+ case BPF_MAP_TYPE_STACK:
+ if (func_id != BPF_FUNC_map_peek_elem &&
+ func_id != BPF_FUNC_map_pop_elem &&
+ func_id != BPF_FUNC_map_push_elem)
+ goto error;
+ break;
default:
break;
}
@@ -2219,13 +2392,21 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
goto error;
break;
case BPF_FUNC_get_local_storage:
- if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE)
+ if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
+ map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
goto error;
break;
case BPF_FUNC_sk_select_reuseport:
if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
goto error;
break;
+ case BPF_FUNC_map_peek_elem:
+ case BPF_FUNC_map_pop_elem:
+ case BPF_FUNC_map_push_elem:
+ if (map->map_type != BPF_MAP_TYPE_QUEUE &&
+ map->map_type != BPF_MAP_TYPE_STACK)
+ goto error;
+ break;
default:
break;
}
@@ -2286,10 +2467,32 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
return true;
}
+static bool check_refcount_ok(const struct bpf_func_proto *fn)
+{
+ int count = 0;
+
+ if (arg_type_is_refcounted(fn->arg1_type))
+ count++;
+ if (arg_type_is_refcounted(fn->arg2_type))
+ count++;
+ if (arg_type_is_refcounted(fn->arg3_type))
+ count++;
+ if (arg_type_is_refcounted(fn->arg4_type))
+ count++;
+ if (arg_type_is_refcounted(fn->arg5_type))
+ count++;
+
+ /* We only support one arg being unreferenced at the moment,
+ * which is sufficient for the helper functions we have right now.
+ */
+ return count <= 1;
+}
+
static int check_func_proto(const struct bpf_func_proto *fn)
{
return check_raw_mode_ok(fn) &&
- check_arg_pair_ok(fn) ? 0 : -EINVAL;
+ check_arg_pair_ok(fn) &&
+ check_refcount_ok(fn) ? 0 : -EINVAL;
}
/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
@@ -2305,10 +2508,9 @@ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
if (reg_is_pkt_pointer_any(&regs[i]))
mark_reg_unknown(env, regs, i);
- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].slot_type[0] != STACK_SPILL)
+ bpf_for_each_spilled_reg(i, state, reg) {
+ if (!reg)
continue;
- reg = &state->stack[i].spilled_ptr;
if (reg_is_pkt_pointer_any(reg))
__mark_reg_unknown(reg);
}
@@ -2323,12 +2525,45 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
__clear_all_pkt_pointers(env, vstate->frame[i]);
}
+static void release_reg_references(struct bpf_verifier_env *env,
+ struct bpf_func_state *state, int id)
+{
+ struct bpf_reg_state *regs = state->regs, *reg;
+ int i;
+
+ for (i = 0; i < MAX_BPF_REG; i++)
+ if (regs[i].id == id)
+ mark_reg_unknown(env, regs, i);
+
+ bpf_for_each_spilled_reg(i, state, reg) {
+ if (!reg)
+ continue;
+ if (reg_is_refcounted(reg) && reg->id == id)
+ __mark_reg_unknown(reg);
+ }
+}
+
+/* The pointer with the specified id has released its reference to kernel
+ * resources. Identify all copies of the same pointer and clear the reference.
+ */
+static int release_reference(struct bpf_verifier_env *env,
+ struct bpf_call_arg_meta *meta)
+{
+ struct bpf_verifier_state *vstate = env->cur_state;
+ int i;
+
+ for (i = 0; i <= vstate->curframe; i++)
+ release_reg_references(env, vstate->frame[i], meta->ptr_id);
+
+ return release_reference_state(env, meta->ptr_id);
+}
+
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_func_state *caller, *callee;
- int i, subprog, target_insn;
+ int i, err, subprog, target_insn;
if (state->curframe + 1 >= MAX_CALL_FRAMES) {
verbose(env, "the call stack of %d frames is too deep\n",
@@ -2366,11 +2601,18 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
state->curframe + 1 /* frameno within this callchain */,
subprog /* subprog number within this prog */);
- /* copy r1 - r5 args that callee can access */
+ /* Transfer references to the callee */
+ err = transfer_reference_state(callee, caller);
+ if (err)
+ return err;
+
+ /* copy r1 - r5 args that callee can access. The copy includes parent
+ * pointers, which connects us up to the liveness chain
+ */
for (i = BPF_REG_1; i <= BPF_REG_5; i++)
callee->regs[i] = caller->regs[i];
- /* after the call regsiters r0 - r5 were scratched */
+ /* after the call registers r0 - r5 were scratched */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(env, caller->regs, caller_saved[i]);
check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
@@ -2396,6 +2638,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
struct bpf_verifier_state *state = env->cur_state;
struct bpf_func_state *caller, *callee;
struct bpf_reg_state *r0;
+ int err;
callee = state->frame[state->curframe];
r0 = &callee->regs[BPF_REG_0];
@@ -2415,6 +2658,11 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
/* return to the caller whatever r0 had in the callee */
caller->regs[BPF_REG_0] = *r0;
+ /* Transfer references to the caller */
+ err = transfer_reference_state(caller, callee);
+ if (err)
+ return err;
+
*insn_idx = callee->callsite + 1;
if (env->log.level) {
verbose(env, "returning from callee:\n");
@@ -2454,7 +2702,10 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
if (func_id != BPF_FUNC_tail_call &&
func_id != BPF_FUNC_map_lookup_elem &&
func_id != BPF_FUNC_map_update_elem &&
- func_id != BPF_FUNC_map_delete_elem)
+ func_id != BPF_FUNC_map_delete_elem &&
+ func_id != BPF_FUNC_map_push_elem &&
+ func_id != BPF_FUNC_map_pop_elem &&
+ func_id != BPF_FUNC_map_peek_elem)
return 0;
if (meta->map_ptr == NULL) {
@@ -2471,6 +2722,18 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
return 0;
}
+static int check_reference_leak(struct bpf_verifier_env *env)
+{
+ struct bpf_func_state *state = cur_func(env);
+ int i;
+
+ for (i = 0; i < state->acquired_refs; i++) {
+ verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
+ state->refs[i].id, state->refs[i].insn_idx);
+ }
+ return state->acquired_refs ? -EINVAL : 0;
+}
+
static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
{
const struct bpf_func_proto *fn = NULL;
@@ -2549,6 +2812,18 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
return err;
}
+ if (func_id == BPF_FUNC_tail_call) {
+ err = check_reference_leak(env);
+ if (err) {
+ verbose(env, "tail_call would lead to reference leak\n");
+ return err;
+ }
+ } else if (is_release_function(func_id)) {
+ err = release_reference(env, &meta);
+ if (err)
+ return err;
+ }
+
regs = cur_regs(env);
/* check that flags argument in get_local_storage(map, flags) is 0,
@@ -2580,7 +2855,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
/* There is no offset yet applied, variable or fixed */
mark_reg_known_zero(env, regs, BPF_REG_0);
- regs[BPF_REG_0].off = 0;
/* remember map_ptr, so that check_map_access()
* can check 'value_size' boundary of memory access
* to map element returned from bpf_map_lookup_elem()
@@ -2592,6 +2866,13 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
}
regs[BPF_REG_0].map_ptr = meta.map_ptr;
regs[BPF_REG_0].id = ++env->id_gen;
+ } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
+ int id = acquire_reference_state(env, insn_idx);
+ if (id < 0)
+ return id;
+ mark_reg_known_zero(env, regs, BPF_REG_0);
+ regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
+ regs[BPF_REG_0].id = id;
} else {
verbose(env, "unknown return type %d of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id);
@@ -2722,20 +3003,20 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
return -EACCES;
}
- if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
- verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
- dst);
- return -EACCES;
- }
- if (ptr_reg->type == CONST_PTR_TO_MAP) {
- verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
- dst);
+ switch (ptr_reg->type) {
+ case PTR_TO_MAP_VALUE_OR_NULL:
+ verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
+ dst, reg_type_str[ptr_reg->type]);
return -EACCES;
- }
- if (ptr_reg->type == PTR_TO_PACKET_END) {
- verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
- dst);
+ case CONST_PTR_TO_MAP:
+ case PTR_TO_PACKET_END:
+ case PTR_TO_SOCKET:
+ case PTR_TO_SOCKET_OR_NULL:
+ verbose(env, "R%d pointer arithmetic on %s prohibited\n",
+ dst, reg_type_str[ptr_reg->type]);
return -EACCES;
+ default:
+ break;
}
/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
@@ -2896,6 +3177,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+ if (insn_bitness == 32) {
+ /* Relevant for 32-bit RSH: Information can propagate towards
+ * LSB, so it isn't sufficient to only truncate the output to
+ * 32 bits.
+ */
+ coerce_reg_to_size(dst_reg, 4);
+ coerce_reg_to_size(&src_reg, 4);
+ }
+
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
@@ -3131,7 +3421,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->32 */
coerce_reg_to_size(dst_reg, 4);
- coerce_reg_to_size(&src_reg, 4);
}
__reg_deduce_bounds(dst_reg);
@@ -3163,7 +3452,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
* an arbitrary scalar. Disallow all math except
* pointer subtraction
*/
- if (opcode == BPF_SUB){
+ if (opcode == BPF_SUB && env->allow_ptr_leaks) {
mark_reg_unknown(env, regs, insn->dst_reg);
return 0;
}
@@ -3447,10 +3736,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
for (j = 0; j <= vstate->curframe; j++) {
state = vstate->frame[j];
- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].slot_type[0] != STACK_SPILL)
+ bpf_for_each_spilled_reg(i, state, reg) {
+ if (!reg)
continue;
- reg = &state->stack[i].spilled_ptr;
if (reg->type == type && reg->id == dst_reg->id)
reg->range = max(reg->range, new_range);
}
@@ -3656,12 +3944,11 @@ static void reg_combine_min_max(struct bpf_reg_state *true_src,
}
}
-static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
- bool is_null)
+static void mark_ptr_or_null_reg(struct bpf_func_state *state,
+ struct bpf_reg_state *reg, u32 id,
+ bool is_null)
{
- struct bpf_reg_state *reg = &regs[regno];
-
- if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
+ if (reg_type_may_be_null(reg->type) && reg->id == id) {
/* Old offset (both fixed and variable parts) should
* have been known-zero, because we don't allow pointer
* arithmetic on pointers that might be NULL.
@@ -3674,40 +3961,49 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
}
if (is_null) {
reg->type = SCALAR_VALUE;
- } else if (reg->map_ptr->inner_map_meta) {
- reg->type = CONST_PTR_TO_MAP;
- reg->map_ptr = reg->map_ptr->inner_map_meta;
- } else {
- reg->type = PTR_TO_MAP_VALUE;
+ } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
+ if (reg->map_ptr->inner_map_meta) {
+ reg->type = CONST_PTR_TO_MAP;
+ reg->map_ptr = reg->map_ptr->inner_map_meta;
+ } else {
+ reg->type = PTR_TO_MAP_VALUE;
+ }
+ } else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
+ reg->type = PTR_TO_SOCKET;
+ }
+ if (is_null || !reg_is_refcounted(reg)) {
+ /* We don't need id from this point onwards anymore,
+ * thus we should better reset it, so that state
+ * pruning has chances to take effect.
+ */
+ reg->id = 0;
}
- /* We don't need id from this point onwards anymore, thus we
- * should better reset it, so that state pruning has chances
- * to take effect.
- */
- reg->id = 0;
}
}
/* The logic is similar to find_good_pkt_pointers(), both could eventually
* be folded together at some point.
*/
-static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno,
- bool is_null)
+static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
+ bool is_null)
{
struct bpf_func_state *state = vstate->frame[vstate->curframe];
- struct bpf_reg_state *regs = state->regs;
+ struct bpf_reg_state *reg, *regs = state->regs;
u32 id = regs[regno].id;
int i, j;
+ if (reg_is_refcounted_or_null(&regs[regno]) && is_null)
+ __release_reference_state(state, id);
+
for (i = 0; i < MAX_BPF_REG; i++)
- mark_map_reg(regs, i, id, is_null);
+ mark_ptr_or_null_reg(state, &regs[i], id, is_null);
for (j = 0; j <= vstate->curframe; j++) {
state = vstate->frame[j];
- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- if (state->stack[i].slot_type[0] != STACK_SPILL)
+ bpf_for_each_spilled_reg(i, state, reg) {
+ if (!reg)
continue;
- mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
+ mark_ptr_or_null_reg(state, reg, id, is_null);
}
}
}
@@ -3909,12 +4205,14 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
if (BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
- dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
- /* Mark all identical map registers in each branch as either
+ reg_type_may_be_null(dst_reg->type)) {
+ /* Mark all identical registers in each branch as either
* safe or unknown depending R == 0 or R != 0 conditional.
*/
- mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE);
- mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ);
+ mark_ptr_or_null_regs(this_branch, insn->dst_reg,
+ opcode == BPF_JNE);
+ mark_ptr_or_null_regs(other_branch, insn->dst_reg,
+ opcode == BPF_JEQ);
} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
this_branch, other_branch) &&
is_pointer_value(env, insn->dst_reg)) {
@@ -4037,6 +4335,16 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (err)
return err;
+ /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
+ * gen_ld_abs() may terminate the program at runtime, leading to
+ * reference leak.
+ */
+ err = check_reference_leak(env);
+ if (err) {
+ verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
+ return err;
+ }
+
if (regs[BPF_REG_6].type != PTR_TO_CTX) {
verbose(env,
"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
@@ -4370,7 +4678,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
/* explored state didn't use this */
return true;
- equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0;
+ equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
if (rold->type == PTR_TO_STACK)
/* two stack pointers are equal only if they're pointing to
@@ -4451,6 +4759,9 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
case PTR_TO_CTX:
case CONST_PTR_TO_MAP:
case PTR_TO_PACKET_END:
+ case PTR_TO_FLOW_KEYS:
+ case PTR_TO_SOCKET:
+ case PTR_TO_SOCKET_OR_NULL:
/* Only valid matches are exact, which memcmp() above
* would have accepted
*/
@@ -4526,6 +4837,14 @@ static bool stacksafe(struct bpf_func_state *old,
return true;
}
+static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
+{
+ if (old->acquired_refs != cur->acquired_refs)
+ return false;
+ return !memcmp(old->refs, cur->refs,
+ sizeof(*old->refs) * old->acquired_refs);
+}
+
/* compare two verifier states
*
* all states stored in state_list are known to be valid, since
@@ -4571,6 +4890,9 @@ static bool func_states_equal(struct bpf_func_state *old,
if (!stacksafe(old, cur, idmap))
goto out_free;
+
+ if (!refsafe(old, cur))
+ goto out_free;
ret = true;
out_free:
kfree(idmap);
@@ -4603,7 +4925,7 @@ static bool states_equal(struct bpf_verifier_env *env,
* equivalent state (jump target or such) we didn't arrive by the straight-line
* code, so read marks in the state must propagate to the parent regardless
* of the state's write marks. That's what 'parent == state->parent' comparison
- * in mark_reg_read() and mark_stack_slot_read() is for.
+ * in mark_reg_read() is for.
*/
static int propagate_liveness(struct bpf_verifier_env *env,
const struct bpf_verifier_state *vstate,
@@ -4624,7 +4946,8 @@ static int propagate_liveness(struct bpf_verifier_env *env,
if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
continue;
if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
- err = mark_reg_read(env, vstate, vparent, i);
+ err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
+ &vparent->frame[vstate->curframe]->regs[i]);
if (err)
return err;
}
@@ -4639,7 +4962,8 @@ static int propagate_liveness(struct bpf_verifier_env *env,
if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
continue;
if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
- mark_stack_slot_read(env, vstate, vparent, i, frame);
+ mark_reg_read(env, &state->stack[i].spilled_ptr,
+ &parent->stack[i].spilled_ptr);
}
}
return err;
@@ -4649,7 +4973,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
- struct bpf_verifier_state *cur = env->cur_state;
+ struct bpf_verifier_state *cur = env->cur_state, *new;
int i, j, err;
sl = env->explored_states[insn_idx];
@@ -4691,16 +5015,18 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
return -ENOMEM;
/* add new state to the head of linked list */
- err = copy_verifier_state(&new_sl->state, cur);
+ new = &new_sl->state;
+ err = copy_verifier_state(new, cur);
if (err) {
- free_verifier_state(&new_sl->state, false);
+ free_verifier_state(new, false);
kfree(new_sl);
return err;
}
new_sl->next = env->explored_states[insn_idx];
env->explored_states[insn_idx] = new_sl;
/* connect new state to parentage chain */
- cur->parent = &new_sl->state;
+ for (i = 0; i < BPF_REG_FP; i++)
+ cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i];
/* clear write marks in current state: the writes we did are not writes
* our child did, so they don't screen off its reads from us.
* (There are no read marks in current state, because reads always mark
@@ -4713,13 +5039,48 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
/* all stack frames are accessible from callee, clear them all */
for (j = 0; j <= cur->curframe; j++) {
struct bpf_func_state *frame = cur->frame[j];
+ struct bpf_func_state *newframe = new->frame[j];
- for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++)
+ for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
+ frame->stack[i].spilled_ptr.parent =
+ &newframe->stack[i].spilled_ptr;
+ }
}
return 0;
}
+/* Return true if it's OK to have the same insn return a different type. */
+static bool reg_type_mismatch_ok(enum bpf_reg_type type)
+{
+ switch (type) {
+ case PTR_TO_CTX:
+ case PTR_TO_SOCKET:
+ case PTR_TO_SOCKET_OR_NULL:
+ return false;
+ default:
+ return true;
+ }
+}
+
+/* If an instruction was previously used with particular pointer types, then we
+ * need to be careful to avoid cases such as the below, where it may be ok
+ * for one branch accessing the pointer, but not ok for the other branch:
+ *
+ * R1 = sock_ptr
+ * goto X;
+ * ...
+ * R1 = some_other_valid_ptr;
+ * goto X;
+ * ...
+ * R2 = *(u32 *)(R1 + 0);
+ */
+static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
+{
+ return src != prev && (!reg_type_mismatch_ok(src) ||
+ !reg_type_mismatch_ok(prev));
+}
+
static int do_check(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *state;
@@ -4734,7 +5095,6 @@ static int do_check(struct bpf_verifier_env *env)
if (!state)
return -ENOMEM;
state->curframe = 0;
- state->parent = NULL;
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
if (!state->frame[0]) {
kfree(state);
@@ -4814,6 +5174,7 @@ static int do_check(struct bpf_verifier_env *env)
regs = cur_regs(env);
env->insn_aux_data[insn_idx].seen = true;
+
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
@@ -4853,9 +5214,7 @@ static int do_check(struct bpf_verifier_env *env)
*/
*prev_src_type = src_reg_type;
- } else if (src_reg_type != *prev_src_type &&
- (src_reg_type == PTR_TO_CTX ||
- *prev_src_type == PTR_TO_CTX)) {
+ } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
/* ABuser program is trying to use the same insn
* dst_reg = *(u32*) (src_reg + off)
* with different pointer types:
@@ -4900,9 +5259,7 @@ static int do_check(struct bpf_verifier_env *env)
if (*prev_dst_type == NOT_INIT) {
*prev_dst_type = dst_reg_type;
- } else if (dst_reg_type != *prev_dst_type &&
- (dst_reg_type == PTR_TO_CTX ||
- *prev_dst_type == PTR_TO_CTX)) {
+ } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
verbose(env, "same insn cannot be used with different pointers\n");
return -EINVAL;
}
@@ -4919,8 +5276,9 @@ static int do_check(struct bpf_verifier_env *env)
return err;
if (is_ctx_reg(env, insn->dst_reg)) {
- verbose(env, "BPF_ST stores into R%d context is not allowed\n",
- insn->dst_reg);
+ verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
+ insn->dst_reg,
+ reg_type_str[reg_state(env, insn->dst_reg)->type]);
return -EACCES;
}
@@ -4982,6 +5340,10 @@ static int do_check(struct bpf_verifier_env *env)
continue;
}
+ err = check_reference_leak(env);
+ if (err)
+ return err;
+
/* eBPF calling convetion is such that R0 is used
* to return the value from eBPF program.
* Make sure that it's readable at this time
@@ -5095,6 +5457,12 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
return 0;
}
+static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
+{
+ return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
+ map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
+}
+
/* look for pseudo eBPF instructions that access map FDs and
* replace them with actual map pointers
*/
@@ -5185,10 +5553,9 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
}
env->used_maps[env->used_map_cnt++] = map;
- if (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE &&
+ if (bpf_map_is_cgroup_storage(map) &&
bpf_cgroup_storage_assign(env->prog, map)) {
- verbose(env,
- "only one cgroup storage is allowed\n");
+ verbose(env, "only one cgroup storage of each type is allowed\n");
fdput(f);
return -EBUSY;
}
@@ -5217,11 +5584,15 @@ next_insn:
/* drop refcnt of maps used by the rejected program */
static void release_maps(struct bpf_verifier_env *env)
{
+ enum bpf_cgroup_storage_type stype;
int i;
- if (env->prog->aux->cgroup_storage)
+ for_each_cgroup_storage_type(stype) {
+ if (!env->prog->aux->cgroup_storage[stype])
+ continue;
bpf_cgroup_storage_release(env->prog,
- env->prog->aux->cgroup_storage);
+ env->prog->aux->cgroup_storage[stype]);
+ }
for (i = 0; i < env->used_map_cnt; i++)
bpf_map_put(env->used_maps[i]);
@@ -5319,8 +5690,10 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
}
}
-/* convert load instructions that access fields of 'struct __sk_buff'
- * into sequence of instructions that access fields of 'struct sk_buff'
+/* convert load instructions that access fields of a context type into a
+ * sequence of instructions that access fields of the underlying structure:
+ * struct __sk_buff -> struct sk_buff
+ * struct bpf_sock_ops -> struct sock
*/
static int convert_ctx_accesses(struct bpf_verifier_env *env)
{
@@ -5349,12 +5722,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
}
}
- if (!ops->convert_ctx_access || bpf_prog_is_dev_bound(env->prog->aux))
+ if (bpf_prog_is_dev_bound(env->prog->aux))
return 0;
insn = env->prog->insnsi + delta;
for (i = 0; i < insn_cnt; i++, insn++) {
+ bpf_convert_ctx_access_t convert_ctx_access;
+
if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
@@ -5396,8 +5771,18 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
continue;
}
- if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
+ switch (env->insn_aux_data[i + delta].ptr_type) {
+ case PTR_TO_CTX:
+ if (!ops->convert_ctx_access)
+ continue;
+ convert_ctx_access = ops->convert_ctx_access;
+ break;
+ case PTR_TO_SOCKET:
+ convert_ctx_access = bpf_sock_convert_ctx_access;
+ break;
+ default:
continue;
+ }
ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
size = BPF_LDST_BYTES(insn);
@@ -5429,8 +5814,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
}
target_size = 0;
- cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
- &target_size);
+ cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
+ &target_size);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
(ctx_field_size && !target_size)) {
verbose(env, "bpf verifier is misconfigured\n");
@@ -5621,10 +6006,10 @@ static int fixup_call_args(struct bpf_verifier_env *env)
struct bpf_insn *insn = prog->insnsi;
int i, depth;
#endif
- int err;
+ int err = 0;
- err = 0;
- if (env->prog->jit_requested) {
+ if (env->prog->jit_requested &&
+ !bpf_prog_is_dev_bound(env->prog->aux)) {
err = jit_subprogs(env);
if (err == 0)
return 0;
@@ -5793,7 +6178,10 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
if (prog->jit_requested && BITS_PER_LONG == 64 &&
(insn->imm == BPF_FUNC_map_lookup_elem ||
insn->imm == BPF_FUNC_map_update_elem ||
- insn->imm == BPF_FUNC_map_delete_elem)) {
+ insn->imm == BPF_FUNC_map_delete_elem ||
+ insn->imm == BPF_FUNC_map_push_elem ||
+ insn->imm == BPF_FUNC_map_pop_elem ||
+ insn->imm == BPF_FUNC_map_peek_elem)) {
aux = &env->insn_aux_data[i + delta];
if (bpf_map_ptr_poisoned(aux))
goto patch_call_imm;
@@ -5826,6 +6214,14 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
BUILD_BUG_ON(!__same_type(ops->map_update_elem,
(int (*)(struct bpf_map *map, void *key, void *value,
u64 flags))NULL));
+ BUILD_BUG_ON(!__same_type(ops->map_push_elem,
+ (int (*)(struct bpf_map *map, void *value,
+ u64 flags))NULL));
+ BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
+ (int (*)(struct bpf_map *map, void *value))NULL));
+ BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
+ (int (*)(struct bpf_map *map, void *value))NULL));
+
switch (insn->imm) {
case BPF_FUNC_map_lookup_elem:
insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
@@ -5839,6 +6235,18 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
__bpf_call_base;
continue;
+ case BPF_FUNC_map_push_elem:
+ insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
+ __bpf_call_base;
+ continue;
+ case BPF_FUNC_map_pop_elem:
+ insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
+ __bpf_call_base;
+ continue;
+ case BPF_FUNC_map_peek_elem:
+ insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
+ __bpf_call_base;
+ continue;
}
goto patch_call_imm;
@@ -5962,6 +6370,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
env->cur_state = NULL;
}
+ if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
+ ret = bpf_prog_offload_finalize(env);
+
skip_full_check:
while (!pop_stack(env, NULL, NULL));
free_states(env);
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index 9f8463afda9c..686d244e798d 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -154,7 +154,7 @@ void __xsk_map_flush(struct bpf_map *map)
static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
{
- return NULL;
+ return ERR_PTR(-EOPNOTSUPP);
}
static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
@@ -192,11 +192,8 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
sock_hold(sock->sk);
old_xs = xchg(&m->xsk_map[i], xs);
- if (old_xs) {
- /* Make sure we've flushed everything. */
- synchronize_net();
+ if (old_xs)
sock_put((struct sock *)old_xs);
- }
sockfd_put(sock);
return 0;
@@ -212,11 +209,8 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
return -EINVAL;
old_xs = xchg(&m->xsk_map[k], NULL);
- if (old_xs) {
- /* Make sure we've flushed everything. */
- synchronize_net();
+ if (old_xs)
sock_put((struct sock *)old_xs);
- }
return 0;
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index aae10baf1902..4c1cf0969a80 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -492,7 +492,7 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
}
/**
- * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
+ * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest (%NULL returns @cgrp->self)
*
@@ -501,8 +501,8 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
* enabled. If @ss is associated with the hierarchy @cgrp is on, this
* function is guaranteed to return non-NULL css.
*/
-static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
- struct cgroup_subsys *ss)
+static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
{
lockdep_assert_held(&cgroup_mutex);
@@ -523,6 +523,35 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
}
/**
+ * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
+ * @cgrp: the cgroup of interest
+ * @ss: the subsystem of interest
+ *
+ * Find and get the effective css of @cgrp for @ss. The effective css is
+ * defined as the matching css of the nearest ancestor including self which
+ * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
+ * the root css is returned, so this function always returns a valid css.
+ *
+ * The returned css is not guaranteed to be online, and therefore it is the
+ * callers responsiblity to tryget a reference for it.
+ */
+struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
+{
+ struct cgroup_subsys_state *css;
+
+ do {
+ css = cgroup_css(cgrp, ss);
+
+ if (css)
+ return css;
+ cgrp = cgroup_parent(cgrp);
+ } while (cgrp);
+
+ return init_css_set.subsys[ss->id];
+}
+
+/**
* cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest
@@ -604,10 +633,11 @@ EXPORT_SYMBOL_GPL(of_css);
*
* Should be called under cgroup_[tree_]mutex.
*/
-#define for_each_e_css(css, ssid, cgrp) \
- for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
- if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
- ; \
+#define for_each_e_css(css, ssid, cgrp) \
+ for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
+ if (!((css) = cgroup_e_css_by_mask(cgrp, \
+ cgroup_subsys[(ssid)]))) \
+ ; \
else
/**
@@ -1006,7 +1036,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset,
* @ss is in this hierarchy, so we want the
* effective css from @cgrp.
*/
- template[i] = cgroup_e_css(cgrp, ss);
+ template[i] = cgroup_e_css_by_mask(cgrp, ss);
} else {
/*
* @ss is not in this hierarchy, so we don't want
@@ -2836,11 +2866,12 @@ restart:
}
/**
- * cgroup_save_control - save control masks of a subtree
+ * cgroup_save_control - save control masks and dom_cgrp of a subtree
* @cgrp: root of the target subtree
*
- * Save ->subtree_control and ->subtree_ss_mask to the respective old_
- * prefixed fields for @cgrp's subtree including @cgrp itself.
+ * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the
+ * respective old_ prefixed fields for @cgrp's subtree including @cgrp
+ * itself.
*/
static void cgroup_save_control(struct cgroup *cgrp)
{
@@ -2850,6 +2881,7 @@ static void cgroup_save_control(struct cgroup *cgrp)
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
dsct->old_subtree_control = dsct->subtree_control;
dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
+ dsct->old_dom_cgrp = dsct->dom_cgrp;
}
}
@@ -2875,11 +2907,12 @@ static void cgroup_propagate_control(struct cgroup *cgrp)
}
/**
- * cgroup_restore_control - restore control masks of a subtree
+ * cgroup_restore_control - restore control masks and dom_cgrp of a subtree
* @cgrp: root of the target subtree
*
- * Restore ->subtree_control and ->subtree_ss_mask from the respective old_
- * prefixed fields for @cgrp's subtree including @cgrp itself.
+ * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the
+ * respective old_ prefixed fields for @cgrp's subtree including @cgrp
+ * itself.
*/
static void cgroup_restore_control(struct cgroup *cgrp)
{
@@ -2889,6 +2922,7 @@ static void cgroup_restore_control(struct cgroup *cgrp)
cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
dsct->subtree_control = dsct->old_subtree_control;
dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
+ dsct->dom_cgrp = dsct->old_dom_cgrp;
}
}
@@ -3019,7 +3053,7 @@ static int cgroup_apply_control(struct cgroup *cgrp)
return ret;
/*
- * At this point, cgroup_e_css() results reflect the new csses
+ * At this point, cgroup_e_css_by_mask() results reflect the new csses
* making the following cgroup_update_dfl_csses() properly update
* css associations of all tasks in the subtree.
*/
@@ -3196,6 +3230,8 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
{
struct cgroup *parent = cgroup_parent(cgrp);
struct cgroup *dom_cgrp = parent->dom_cgrp;
+ struct cgroup *dsct;
+ struct cgroup_subsys_state *d_css;
int ret;
lockdep_assert_held(&cgroup_mutex);
@@ -3225,12 +3261,13 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
*/
cgroup_save_control(cgrp);
- cgrp->dom_cgrp = dom_cgrp;
+ cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)
+ if (dsct == cgrp || cgroup_is_threaded(dsct))
+ dsct->dom_cgrp = dom_cgrp;
+
ret = cgroup_apply_control(cgrp);
if (!ret)
parent->nr_threaded_children++;
- else
- cgrp->dom_cgrp = cgrp;
cgroup_finalize_control(cgrp, ret);
return ret;
diff --git a/kernel/compat.c b/kernel/compat.c
index 8e40efc2928a..089d00d0da9c 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -93,28 +93,28 @@ int compat_put_timex(struct compat_timex __user *utp, const struct timex *txc)
return 0;
}
-static int __compat_get_timeval(struct timeval *tv, const struct compat_timeval __user *ctv)
+static int __compat_get_timeval(struct timeval *tv, const struct old_timeval32 __user *ctv)
{
return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) ||
__get_user(tv->tv_sec, &ctv->tv_sec) ||
__get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
}
-static int __compat_put_timeval(const struct timeval *tv, struct compat_timeval __user *ctv)
+static int __compat_put_timeval(const struct timeval *tv, struct old_timeval32 __user *ctv)
{
return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) ||
__put_user(tv->tv_sec, &ctv->tv_sec) ||
__put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
}
-static int __compat_get_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
+static int __compat_get_timespec(struct timespec *ts, const struct old_timespec32 __user *cts)
{
return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
__get_user(ts->tv_sec, &cts->tv_sec) ||
__get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
}
-static int __compat_put_timespec(const struct timespec *ts, struct compat_timespec __user *cts)
+static int __compat_put_timespec(const struct timespec *ts, struct old_timespec32 __user *cts)
{
return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) ||
__put_user(ts->tv_sec, &cts->tv_sec) ||
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 0097acec1c71..3c7f3b4c453c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -315,6 +315,16 @@ void lockdep_assert_cpus_held(void)
percpu_rwsem_assert_held(&cpu_hotplug_lock);
}
+static void lockdep_acquire_cpus_lock(void)
+{
+ rwsem_acquire(&cpu_hotplug_lock.rw_sem.dep_map, 0, 0, _THIS_IP_);
+}
+
+static void lockdep_release_cpus_lock(void)
+{
+ rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, 1, _THIS_IP_);
+}
+
/*
* Wait for currently running CPU hotplug operations to complete (if any) and
* disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
@@ -344,6 +354,17 @@ void cpu_hotplug_enable(void)
cpu_maps_update_done();
}
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
+
+#else
+
+static void lockdep_acquire_cpus_lock(void)
+{
+}
+
+static void lockdep_release_cpus_lock(void)
+{
+}
+
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_HOTPLUG_SMT
@@ -362,6 +383,7 @@ void __init cpu_smt_disable(bool force)
pr_info("SMT: Force disabled\n");
cpu_smt_control = CPU_SMT_FORCE_DISABLED;
} else {
+ pr_info("SMT: disabled\n");
cpu_smt_control = CPU_SMT_DISABLED;
}
}
@@ -616,6 +638,12 @@ static void cpuhp_thread_fun(unsigned int cpu)
*/
smp_mb();
+ /*
+ * The BP holds the hotplug lock, but we're now running on the AP,
+ * ensure that anybody asserting the lock is held, will actually find
+ * it so.
+ */
+ lockdep_acquire_cpus_lock();
cpuhp_lock_acquire(bringup);
if (st->single) {
@@ -661,6 +689,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
}
cpuhp_lock_release(bringup);
+ lockdep_release_cpus_lock();
if (!st->should_run)
complete_ap_thread(st, bringup);
@@ -2026,6 +2055,12 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
}
+/*
+ * Architectures that need SMT-specific errata handling during SMT hotplug
+ * should override this.
+ */
+void __weak arch_smt_update(void) { };
+
static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
{
int cpu, ret = 0;
@@ -2052,8 +2087,10 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
*/
cpuhp_offline_cpu_device(cpu);
}
- if (!ret)
+ if (!ret) {
cpu_smt_control = ctrlval;
+ arch_smt_update();
+ }
cpu_maps_update_done();
return ret;
}
@@ -2064,6 +2101,7 @@ static int cpuhp_smt_enable(void)
cpu_maps_update_begin();
cpu_smt_control = CPU_SMT_ENABLED;
+ arch_smt_update();
for_each_present_cpu(cpu) {
/* Skip online CPUs and CPUs on offline nodes */
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 9bd54304446f..645c7a2ecde8 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -13,6 +13,9 @@ config NEED_DMA_MAP_STATE
config ARCH_DMA_ADDR_T_64BIT
def_bool 64BIT || PHYS_ADDR_T_64BIT
+config ARCH_HAS_DMA_COHERENCE_H
+ bool
+
config HAVE_GENERIC_DMA_COHERENT
bool
@@ -23,22 +26,22 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU
bool
select NEED_DMA_MAP_STATE
-config DMA_DIRECT_OPS
+config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
bool
- depends on HAS_DMA
-config DMA_NONCOHERENT_OPS
+config ARCH_HAS_DMA_COHERENT_TO_PFN
bool
- depends on HAS_DMA
- select DMA_DIRECT_OPS
-config DMA_NONCOHERENT_MMAP
+config ARCH_HAS_DMA_MMAP_PGPROT
bool
- depends on DMA_NONCOHERENT_OPS
+
+config DMA_DIRECT_OPS
+ bool
+ depends on HAS_DMA
config DMA_NONCOHERENT_CACHE_SYNC
bool
- depends on DMA_NONCOHERENT_OPS
+ depends on DMA_DIRECT_OPS
config DMA_VIRT_OPS
bool
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
index 6de44e4eb454..7d581e4eea4a 100644
--- a/kernel/dma/Makefile
+++ b/kernel/dma/Makefile
@@ -4,7 +4,6 @@ obj-$(CONFIG_HAS_DMA) += mapping.o
obj-$(CONFIG_DMA_CMA) += contiguous.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o
obj-$(CONFIG_DMA_DIRECT_OPS) += direct.o
-obj-$(CONFIG_DMA_NONCOHERENT_OPS) += noncoherent.o
obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 286d82329eb0..b2a87905846d 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -49,7 +49,11 @@ static phys_addr_t limit_cmdline;
static int __init early_cma(char *p)
{
- pr_debug("%s(%s)\n", __func__, p);
+ if (!p) {
+ pr_err("Config string not provided\n");
+ return -EINVAL;
+ }
+
size_cmdline = memparse(p, &p);
if (*p != '@')
return 0;
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index c007d25bee09..231ca4628062 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -1312,6 +1312,22 @@ static void check_sg_segment(struct device *dev, struct scatterlist *sg)
#endif
}
+void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len)
+{
+ if (unlikely(dma_debug_disabled()))
+ return;
+
+ if (!virt_addr_valid(addr))
+ err_printk(dev, NULL, "DMA-API: device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
+ addr, len);
+
+ if (is_vmalloc_addr(addr))
+ err_printk(dev, NULL, "DMA-API: device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
+ addr, len);
+}
+EXPORT_SYMBOL(debug_dma_map_single);
+
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
size_t size, int direction, dma_addr_t dma_addr,
bool map_single)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index de87b0282e74..87a6bc2a96c0 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -1,13 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * DMA operations that map physical memory directly without using an IOMMU or
- * flushing caches.
+ * Copyright (C) 2018 Christoph Hellwig.
+ *
+ * DMA operations that map physical memory directly without using an IOMMU.
*/
+#include <linux/bootmem.h> /* for max_pfn */
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-direct.h>
#include <linux/scatterlist.h>
#include <linux/dma-contiguous.h>
+#include <linux/dma-noncoherent.h>
#include <linux/pfn.h>
#include <linux/set_memory.h>
@@ -41,40 +44,83 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
return false;
}
- if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
+ if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
dev_err(dev,
- "%s: overflow %pad+%zu of device mask %llx\n",
- caller, &dma_addr, size, *dev->dma_mask);
+ "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n",
+ caller, &dma_addr, size,
+ *dev->dma_mask, dev->bus_dma_mask);
}
return false;
}
return true;
}
+static inline dma_addr_t phys_to_dma_direct(struct device *dev,
+ phys_addr_t phys)
+{
+ if (force_dma_unencrypted())
+ return __phys_to_dma(dev, phys);
+ return phys_to_dma(dev, phys);
+}
+
+u64 dma_direct_get_required_mask(struct device *dev)
+{
+ u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
+
+ if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
+ max_dma = dev->bus_dma_mask;
+
+ return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
+}
+
+static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
+ u64 *phys_mask)
+{
+ if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
+ dma_mask = dev->bus_dma_mask;
+
+ if (force_dma_unencrypted())
+ *phys_mask = __dma_to_phys(dev, dma_mask);
+ else
+ *phys_mask = dma_to_phys(dev, dma_mask);
+
+ /*
+ * Optimistically try the zone that the physical address mask falls
+ * into first. If that returns memory that isn't actually addressable
+ * we will fallback to the next lower zone and try again.
+ *
+ * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
+ * zones.
+ */
+ if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+ return GFP_DMA;
+ if (*phys_mask <= DMA_BIT_MASK(32))
+ return GFP_DMA32;
+ return 0;
+}
+
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{
- dma_addr_t addr = force_dma_unencrypted() ?
- __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
- return addr + size - 1 <= dev->coherent_dma_mask;
+ return phys_to_dma_direct(dev, phys) + size - 1 <=
+ min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
}
-void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs)
+void *dma_direct_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
int page_order = get_order(size);
struct page *page = NULL;
+ u64 phys_mask;
void *ret;
+ if (attrs & DMA_ATTR_NO_WARN)
+ gfp |= __GFP_NOWARN;
+
/* we always manually zero the memory once we are done: */
gfp &= ~__GFP_ZERO;
-
- /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
- if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
- gfp |= GFP_DMA;
- if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
- gfp |= GFP_DMA32;
-
+ gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
+ &phys_mask);
again:
/* CMA can be used only in the context which permits sleeping */
if (gfpflags_allow_blocking(gfp)) {
@@ -93,15 +139,14 @@ again:
page = NULL;
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
- dev->coherent_dma_mask < DMA_BIT_MASK(64) &&
+ phys_mask < DMA_BIT_MASK(64) &&
!(gfp & (GFP_DMA32 | GFP_DMA))) {
gfp |= GFP_DMA32;
goto again;
}
if (IS_ENABLED(CONFIG_ZONE_DMA) &&
- dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
- !(gfp & GFP_DMA)) {
+ phys_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) {
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
goto again;
}
@@ -124,7 +169,7 @@ again:
* NOTE: this function must never look at the dma_addr argument, because we want
* to be able to use it as a helper for iommu implementations as well.
*/
-void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -136,14 +181,96 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
free_pages((unsigned long)cpu_addr, page_order);
}
+void *dma_direct_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ if (!dev_is_dma_coherent(dev))
+ return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
+ return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
+}
+
+void dma_direct_free(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
+{
+ if (!dev_is_dma_coherent(dev))
+ arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
+ else
+ dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
+}
+
+static void dma_direct_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ if (dev_is_dma_coherent(dev))
+ return;
+ arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
+}
+
+static void dma_direct_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ if (dev_is_dma_coherent(dev))
+ return;
+
+ for_each_sg(sgl, sg, nents, i)
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+}
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+static void dma_direct_sync_single_for_cpu(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ if (dev_is_dma_coherent(dev))
+ return;
+ arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
+ arch_sync_dma_for_cpu_all(dev);
+}
+
+static void dma_direct_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ if (dev_is_dma_coherent(dev))
+ return;
+
+ for_each_sg(sgl, sg, nents, i)
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_cpu_all(dev);
+}
+
+static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
+}
+#endif
+
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
+ phys_addr_t phys = page_to_phys(page) + offset;
+ dma_addr_t dma_addr = phys_to_dma(dev, phys);
if (!check_addr(dev, dma_addr, size, __func__))
return DIRECT_MAPPING_ERROR;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_direct_sync_single_for_device(dev, dma_addr, size, dir);
return dma_addr;
}
@@ -162,31 +289,29 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
sg_dma_len(sg) = sg->length;
}
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
return nents;
}
+/*
+ * Because 32-bit DMA masks are so common we expect every architecture to be
+ * able to satisfy them - either by not supporting more physical memory, or by
+ * providing a ZONE_DMA32. If neither is the case, the architecture needs to
+ * use an IOMMU instead of the direct mapping.
+ */
int dma_direct_supported(struct device *dev, u64 mask)
{
-#ifdef CONFIG_ZONE_DMA
- if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
- return 0;
-#else
- /*
- * Because 32-bit DMA masks are so common we expect every architecture
- * to be able to satisfy them - either by not supporting more physical
- * memory, or by providing a ZONE_DMA32. If neither is the case, the
- * architecture needs to use an IOMMU instead of the direct mapping.
- */
- if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
- return 0;
-#endif
- /*
- * Upstream PCI/PCIe bridges or SoC interconnects may not carry
- * as many DMA address bits as the device itself supports.
- */
- if (dev->bus_dma_mask && mask > dev->bus_dma_mask)
- return 0;
- return 1;
+ u64 min_mask;
+
+ if (IS_ENABLED(CONFIG_ZONE_DMA))
+ min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
+ else
+ min_mask = DMA_BIT_MASK(32);
+
+ min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
+
+ return mask >= phys_to_dma(dev, min_mask);
}
int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -199,7 +324,20 @@ const struct dma_map_ops dma_direct_ops = {
.free = dma_direct_free,
.map_page = dma_direct_map_page,
.map_sg = dma_direct_map_sg,
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
+ .sync_single_for_device = dma_direct_sync_single_for_device,
+ .sync_sg_for_device = dma_direct_sync_sg_for_device,
+#endif
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+ .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
+ .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
+ .unmap_page = dma_direct_unmap_page,
+ .unmap_sg = dma_direct_unmap_sg,
+#endif
+ .get_required_mask = dma_direct_get_required_mask,
.dma_supported = dma_direct_supported,
.mapping_error = dma_direct_mapping_error,
+ .cache_sync = arch_dma_cache_sync,
};
EXPORT_SYMBOL(dma_direct_ops);
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index d2a92ddaac4d..58dec7a92b7b 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -7,7 +7,7 @@
*/
#include <linux/acpi.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/of_device.h>
@@ -202,17 +202,26 @@ EXPORT_SYMBOL(dmam_release_declared_memory);
* Create scatter-list for the already allocated DMA buffer.
*/
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size)
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
{
- struct page *page = virt_to_page(cpu_addr);
+ struct page *page;
int ret;
- ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
- if (unlikely(ret))
- return ret;
+ if (!dev_is_dma_coherent(dev)) {
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
+ return -ENXIO;
- sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
- return 0;
+ page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr,
+ dma_addr));
+ } else {
+ page = virt_to_page(cpu_addr);
+ }
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (!ret)
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return ret;
}
EXPORT_SYMBOL(dma_common_get_sgtable);
@@ -220,27 +229,37 @@ EXPORT_SYMBOL(dma_common_get_sgtable);
* Create userspace mapping for the DMA-coherent memory.
*/
int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
{
- int ret = -ENXIO;
#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
+ unsigned long pfn;
+ int ret = -ENXIO;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
- if (off < count && user_count <= (count - off))
- ret = remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(virt_to_page(cpu_addr)) + off,
- user_count << PAGE_SHIFT,
- vma->vm_page_prot);
-#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
+ if (off >= count || user_count > count - off)
+ return -ENXIO;
- return ret;
+ if (!dev_is_dma_coherent(dev)) {
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
+ return -ENXIO;
+ pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
+ } else {
+ pfn = page_to_pfn(virt_to_page(cpu_addr));
+ }
+
+ return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ user_count << PAGE_SHIFT, vma->vm_page_prot);
+#else
+ return -ENXIO;
+#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
}
EXPORT_SYMBOL(dma_common_mmap);
@@ -327,19 +346,3 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
vunmap(cpu_addr);
}
#endif
-
-/*
- * enables DMA API use for a device
- */
-int dma_configure(struct device *dev)
-{
- if (dev->bus->dma_configure)
- return dev->bus->dma_configure(dev);
- return 0;
-}
-
-void dma_deconfigure(struct device *dev)
-{
- of_dma_deconfigure(dev);
- acpi_dma_deconfigure(dev);
-}
diff --git a/kernel/dma/noncoherent.c b/kernel/dma/noncoherent.c
deleted file mode 100644
index 031fe235d958..000000000000
--- a/kernel/dma/noncoherent.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2018 Christoph Hellwig.
- *
- * DMA operations that map physical memory directly without providing cache
- * coherence.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/scatterlist.h>
-
-static void dma_noncoherent_sync_single_for_device(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
- arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
-}
-
-static void dma_noncoherent_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sgl, sg, nents, i)
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
-}
-
-static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- dma_addr_t addr;
-
- addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
- if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(dev, page_to_phys(page) + offset,
- size, dir);
- return addr;
-}
-
-static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs);
- if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir);
- return nents;
-}
-
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
-static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
- arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
- arch_sync_dma_for_cpu_all(dev);
-}
-
-static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sgl, sg, nents, i)
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
- arch_sync_dma_for_cpu_all(dev);
-}
-
-static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir, unsigned long attrs)
-{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir);
-}
-#endif
-
-const struct dma_map_ops dma_noncoherent_ops = {
- .alloc = arch_dma_alloc,
- .free = arch_dma_free,
- .mmap = arch_dma_mmap,
- .sync_single_for_device = dma_noncoherent_sync_single_for_device,
- .sync_sg_for_device = dma_noncoherent_sync_sg_for_device,
- .map_page = dma_noncoherent_map_page,
- .map_sg = dma_noncoherent_map_sg,
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
- .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu,
- .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu,
- .unmap_page = dma_noncoherent_unmap_page,
- .unmap_sg = dma_noncoherent_unmap_sg,
-#endif
- .dma_supported = dma_direct_supported,
- .mapping_error = dma_direct_mapping_error,
- .cache_sync = arch_dma_cache_sync,
-};
-EXPORT_SYMBOL(dma_noncoherent_ops);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c80549bf82c6..5a97f34bc14c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3935,6 +3935,12 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
goto out;
}
+ /* If this is a pinned event it must be running on this CPU */
+ if (event->attr.pinned && event->oncpu != smp_processor_id()) {
+ ret = -EBUSY;
+ goto out;
+ }
+
/*
* If the event is currently on this CPU, its either a per-task event,
* or local to this CPU. Furthermore it means its ACTIVE (otherwise
@@ -8308,6 +8314,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
goto unlock;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+ if (event->cpu != smp_processor_id())
+ continue;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
continue;
if (event->attr.config != entry->type)
@@ -9425,9 +9433,7 @@ static void free_pmu_context(struct pmu *pmu)
if (pmu->task_ctx_nr > perf_invalid_context)
return;
- mutex_lock(&pmus_lock);
free_percpu(pmu->pmu_cpu_context);
- mutex_unlock(&pmus_lock);
}
/*
@@ -9683,12 +9689,8 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
void perf_pmu_unregister(struct pmu *pmu)
{
- int remove_device;
-
mutex_lock(&pmus_lock);
- remove_device = pmu_bus_running;
list_del_rcu(&pmu->entry);
- mutex_unlock(&pmus_lock);
/*
* We dereference the pmu list under both SRCU and regular RCU, so
@@ -9700,13 +9702,14 @@ void perf_pmu_unregister(struct pmu *pmu)
free_percpu(pmu->pmu_disable_count);
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
- if (remove_device) {
+ if (pmu_bus_running) {
if (pmu->nr_addr_filters)
device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
device_del(pmu->dev);
put_device(pmu->dev);
}
free_pmu_context(pmu);
+ mutex_unlock(&pmus_lock);
}
EXPORT_SYMBOL_GPL(perf_pmu_unregister);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 5d3cf407e374..4a9937076331 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -459,10 +459,20 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
if (size || handle->aux_flags) {
/*
* Only send RECORD_AUX if we have something useful to communicate
+ *
+ * Note: the OVERWRITE records by themselves are not considered
+ * useful, as they don't communicate any *new* information,
+ * aside from the short-lived offset, that becomes history at
+ * the next event sched-in and therefore isn't useful.
+ * The userspace that needs to copy out AUX data in overwrite
+ * mode should know to use user_page::aux_head for the actual
+ * offset. So, from now on we don't output AUX records that
+ * have *only* OVERWRITE flag set.
*/
- perf_event_aux_event(handle->event, aux_head, size,
- handle->aux_flags);
+ if (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE)
+ perf_event_aux_event(handle->event, aux_head, size,
+ handle->aux_flags);
}
rb->user_page->aux_head = rb->aux_head;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 3207a4d26849..2bf792d22087 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1858,7 +1858,7 @@ static void handle_trampoline(struct pt_regs *regs)
sigill:
uprobe_warn(current, "handle uretprobe, sending SIGILL.");
- force_sig_info(SIGILL, SEND_SIG_FORCED, current);
+ force_sig(SIGILL, current);
}
@@ -1966,7 +1966,7 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
if (unlikely(err)) {
uprobe_warn(current, "execute the probed insn, sending SIGILL.");
- force_sig_info(SIGILL, SEND_SIG_FORCED, current);
+ force_sig(SIGILL, current);
}
}
diff --git a/kernel/futex.c b/kernel/futex.c
index 11fc3bb456d6..3e2de8fc1891 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1365,9 +1365,9 @@ static void __unqueue_futex(struct futex_q *q)
{
struct futex_hash_bucket *hb;
- if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
- || WARN_ON(plist_node_empty(&q->list)))
+ if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
return;
+ lockdep_assert_held(q->lock_ptr);
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
plist_del(&q->list, &hb->chain);
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 83f830acbb5f..410a77a8f6e2 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -173,7 +173,7 @@ err_unlock:
}
COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
- struct compat_timespec __user *, utime, u32 __user *, uaddr2,
+ struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
u32, val3)
{
struct timespec ts;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 5d9fc01b60a6..3366d11c3e02 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -183,7 +183,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
* unhappy about. Replace them with ':', which does
* the trick and is not as offensive as '\'...
*/
- name = kstrdup(of_node_full_name(of_node), GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "%pOF", of_node);
if (!name) {
kfree(domain);
return NULL;
@@ -867,7 +867,7 @@ void irq_dispose_mapping(unsigned int virq)
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
/**
- * irq_find_mapping() - Find a linux irq from an hw irq number.
+ * irq_find_mapping() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
*/
@@ -1741,6 +1741,7 @@ static void debugfs_add_domain_dir(struct irq_domain *d)
static void debugfs_remove_domain_dir(struct irq_domain *d)
{
debugfs_remove(d->debugfs_file);
+ d->debugfs_file = NULL;
}
void __init irq_domain_debugfs_init(struct dentry *root)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index fb86146037a7..9dbdccab3b6a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -927,6 +927,9 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
+ if (ret == IRQ_HANDLED)
+ atomic_inc(&desc->threads_handled);
+
irq_finalize_oneshot(desc, action);
local_bh_enable();
return ret;
@@ -943,6 +946,9 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
irqreturn_t ret;
ret = action->thread_fn(action->irq, action->dev_id);
+ if (ret == IRQ_HANDLED)
+ atomic_inc(&desc->threads_handled);
+
irq_finalize_oneshot(desc, action);
return ret;
}
@@ -1020,8 +1026,6 @@ static int irq_thread(void *data)
irq_thread_check_affinity(desc, action);
action_ret = handler_fn(desc, action);
- if (action_ret == IRQ_HANDLED)
- atomic_inc(&desc->threads_handled);
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 5092494bf261..6e6d467f3dec 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -124,6 +124,27 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
return area;
}
+/* Find the best CPU which has the lowest vector allocation count */
+static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
+ const struct cpumask *msk)
+{
+ unsigned int cpu, best_cpu, maxavl = 0;
+ struct cpumap *cm;
+
+ best_cpu = UINT_MAX;
+
+ for_each_cpu(cpu, msk) {
+ cm = per_cpu_ptr(m->maps, cpu);
+
+ if (!cm->online || cm->available <= maxavl)
+ continue;
+
+ best_cpu = cpu;
+ maxavl = cm->available;
+ }
+ return best_cpu;
+}
+
/**
* irq_matrix_assign_system - Assign system wide entry in the matrix
* @m: Matrix pointer
@@ -239,11 +260,21 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
* @m: Matrix pointer
* @cpu: On which CPU the interrupt should be allocated
*/
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+ unsigned int *mapped_cpu)
{
- struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
- unsigned int bit, end = m->alloc_end;
+ unsigned int bit, cpu, end = m->alloc_end;
+ struct cpumap *cm;
+
+ if (cpumask_empty(msk))
+ return -EINVAL;
+
+ cpu = matrix_find_best_cpu(m, msk);
+ if (cpu == UINT_MAX)
+ return -ENOSPC;
+ cm = per_cpu_ptr(m->maps, cpu);
+ end = m->alloc_end;
/* Get managed bit which are not allocated */
bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
bit = find_first_bit(m->scratch_map, end);
@@ -252,6 +283,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
set_bit(bit, cm->alloc_map);
cm->allocated++;
m->total_allocated++;
+ *mapped_cpu = cpu;
trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
return bit;
}
@@ -322,37 +354,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
bool reserved, unsigned int *mapped_cpu)
{
- unsigned int cpu, best_cpu, maxavl = 0;
+ unsigned int cpu, bit;
struct cpumap *cm;
- unsigned int bit;
-
- best_cpu = UINT_MAX;
- for_each_cpu(cpu, msk) {
- cm = per_cpu_ptr(m->maps, cpu);
- if (!cm->online || cm->available <= maxavl)
- continue;
+ cpu = matrix_find_best_cpu(m, msk);
+ if (cpu == UINT_MAX)
+ return -ENOSPC;
- best_cpu = cpu;
- maxavl = cm->available;
- }
+ cm = per_cpu_ptr(m->maps, cpu);
+ bit = matrix_alloc_area(m, cm, 1, false);
+ if (bit >= m->alloc_end)
+ return -ENOSPC;
+ cm->allocated++;
+ cm->available--;
+ m->total_allocated++;
+ m->global_available--;
+ if (reserved)
+ m->global_reserved--;
+ *mapped_cpu = cpu;
+ trace_irq_matrix_alloc(bit, cpu, m, cm);
+ return bit;
- if (maxavl) {
- cm = per_cpu_ptr(m->maps, best_cpu);
- bit = matrix_alloc_area(m, cm, 1, false);
- if (bit < m->alloc_end) {
- cm->allocated++;
- cm->available--;
- m->total_allocated++;
- m->global_available--;
- if (reserved)
- m->global_reserved--;
- *mapped_cpu = best_cpu;
- trace_irq_matrix_alloc(bit, best_cpu, m, cm);
- return bit;
- }
- }
- return -ENOSPC;
}
/**
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 2e62503bea0d..b28028b08d44 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -38,23 +38,43 @@ static int jump_label_cmp(const void *a, const void *b)
const struct jump_entry *jea = a;
const struct jump_entry *jeb = b;
- if (jea->key < jeb->key)
+ if (jump_entry_key(jea) < jump_entry_key(jeb))
return -1;
- if (jea->key > jeb->key)
+ if (jump_entry_key(jea) > jump_entry_key(jeb))
return 1;
return 0;
}
+static void jump_label_swap(void *a, void *b, int size)
+{
+ long delta = (unsigned long)a - (unsigned long)b;
+ struct jump_entry *jea = a;
+ struct jump_entry *jeb = b;
+ struct jump_entry tmp = *jea;
+
+ jea->code = jeb->code - delta;
+ jea->target = jeb->target - delta;
+ jea->key = jeb->key - delta;
+
+ jeb->code = tmp.code + delta;
+ jeb->target = tmp.target + delta;
+ jeb->key = tmp.key + delta;
+}
+
static void
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
{
unsigned long size;
+ void *swapfn = NULL;
+
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
+ swapfn = jump_label_swap;
size = (((unsigned long)stop - (unsigned long)start)
/ sizeof(struct jump_entry));
- sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
+ sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
}
static void jump_label_update(struct static_key *key);
@@ -85,6 +105,7 @@ void static_key_slow_inc_cpuslocked(struct static_key *key)
int v, v1;
STATIC_KEY_CHECK_USE(key);
+ lockdep_assert_cpus_held();
/*
* Careful if we get concurrent static_key_slow_inc() calls;
@@ -130,6 +151,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
void static_key_enable_cpuslocked(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
+ lockdep_assert_cpus_held();
if (atomic_read(&key->enabled) > 0) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
@@ -160,6 +182,7 @@ EXPORT_SYMBOL_GPL(static_key_enable);
void static_key_disable_cpuslocked(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
+ lockdep_assert_cpus_held();
if (atomic_read(&key->enabled) != 1) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
@@ -185,6 +208,8 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key,
unsigned long rate_limit,
struct delayed_work *work)
{
+ lockdep_assert_cpus_held();
+
/*
* The negative count check is valid even when a negative
* key->enabled is in use by static_key_slow_inc(); a
@@ -261,8 +286,8 @@ EXPORT_SYMBOL_GPL(jump_label_rate_limit);
static int addr_conflict(struct jump_entry *entry, void *start, void *end)
{
- if (entry->code <= (unsigned long)end &&
- entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
+ if (jump_entry_code(entry) <= (unsigned long)end &&
+ jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
return 1;
return 0;
@@ -321,16 +346,6 @@ static inline void static_key_set_linked(struct static_key *key)
key->type |= JUMP_TYPE_LINKED;
}
-static inline struct static_key *jump_entry_key(struct jump_entry *entry)
-{
- return (struct static_key *)((unsigned long)entry->key & ~1UL);
-}
-
-static bool jump_entry_branch(struct jump_entry *entry)
-{
- return (unsigned long)entry->key & 1UL;
-}
-
/***
* A 'struct static_key' uses a union such that it either points directly
* to a table of 'struct jump_entry' or to a linked list of modules which in
@@ -355,7 +370,7 @@ static enum jump_label_type jump_label_type(struct jump_entry *entry)
{
struct static_key *key = jump_entry_key(entry);
bool enabled = static_key_enabled(key);
- bool branch = jump_entry_branch(entry);
+ bool branch = jump_entry_is_branch(entry);
/* See the comment in linux/jump_label.h */
return enabled ^ branch;
@@ -363,19 +378,20 @@ static enum jump_label_type jump_label_type(struct jump_entry *entry)
static void __jump_label_update(struct static_key *key,
struct jump_entry *entry,
- struct jump_entry *stop)
+ struct jump_entry *stop,
+ bool init)
{
for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
/*
* An entry->code of 0 indicates an entry which has been
* disabled because it was in an init text area.
*/
- if (entry->code) {
- if (kernel_text_address(entry->code))
+ if (init || !jump_entry_is_init(entry)) {
+ if (kernel_text_address(jump_entry_code(entry)))
arch_jump_label_transform(entry, jump_label_type(entry));
else
WARN_ONCE(1, "can't patch jump_label at %pS",
- (void *)(unsigned long)entry->code);
+ (void *)jump_entry_code(entry));
}
}
}
@@ -410,6 +426,9 @@ void __init jump_label_init(void)
if (jump_label_type(iter) == JUMP_LABEL_NOP)
arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
+ if (init_section_contains((void *)jump_entry_code(iter), 1))
+ jump_entry_set_init(iter);
+
iterk = jump_entry_key(iter);
if (iterk == key)
continue;
@@ -422,26 +441,13 @@ void __init jump_label_init(void)
cpus_read_unlock();
}
-/* Disable any jump label entries in __init/__exit code */
-void __init jump_label_invalidate_initmem(void)
-{
- struct jump_entry *iter_start = __start___jump_table;
- struct jump_entry *iter_stop = __stop___jump_table;
- struct jump_entry *iter;
-
- for (iter = iter_start; iter < iter_stop; iter++) {
- if (init_section_contains((void *)(unsigned long)iter->code, 1))
- iter->code = 0;
- }
-}
-
#ifdef CONFIG_MODULES
static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
{
struct static_key *key = jump_entry_key(entry);
bool type = static_key_type(key);
- bool branch = jump_entry_branch(entry);
+ bool branch = jump_entry_is_branch(entry);
/* See the comment in linux/jump_label.h */
return type ^ branch;
@@ -455,7 +461,7 @@ struct static_key_mod {
static inline struct static_key_mod *static_key_mod(struct static_key *key)
{
- WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
+ WARN_ON_ONCE(!static_key_linked(key));
return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
}
@@ -514,7 +520,8 @@ static void __jump_label_mod_update(struct static_key *key)
stop = __stop___jump_table;
else
stop = m->jump_entries + m->num_jump_entries;
- __jump_label_update(key, mod->entries, stop);
+ __jump_label_update(key, mod->entries, stop,
+ m && m->state == MODULE_STATE_COMING);
}
}
@@ -560,12 +567,15 @@ static int jump_label_add_module(struct module *mod)
for (iter = iter_start; iter < iter_stop; iter++) {
struct static_key *iterk;
+ if (within_module_init(jump_entry_code(iter), mod))
+ jump_entry_set_init(iter);
+
iterk = jump_entry_key(iter);
if (iterk == key)
continue;
key = iterk;
- if (within_module(iter->key, mod)) {
+ if (within_module((unsigned long)key, mod)) {
static_key_set_entries(key, iter);
continue;
}
@@ -595,7 +605,7 @@ static int jump_label_add_module(struct module *mod)
/* Only update if we've changed from our initial state */
if (jump_label_type(iter) != jump_label_init_type(iter))
- __jump_label_update(key, iter, iter_stop);
+ __jump_label_update(key, iter, iter_stop, true);
}
return 0;
@@ -615,7 +625,7 @@ static void jump_label_del_module(struct module *mod)
key = jump_entry_key(iter);
- if (within_module(iter->key, mod))
+ if (within_module((unsigned long)key, mod))
continue;
/* No memory during module load */
@@ -651,19 +661,6 @@ static void jump_label_del_module(struct module *mod)
}
}
-/* Disable any jump label entries in module init code */
-static void jump_label_invalidate_module_init(struct module *mod)
-{
- struct jump_entry *iter_start = mod->jump_entries;
- struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
- struct jump_entry *iter;
-
- for (iter = iter_start; iter < iter_stop; iter++) {
- if (within_module_init(iter->code, mod))
- iter->code = 0;
- }
-}
-
static int
jump_label_module_notify(struct notifier_block *self, unsigned long val,
void *data)
@@ -685,9 +682,6 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
case MODULE_STATE_GOING:
jump_label_del_module(mod);
break;
- case MODULE_STATE_LIVE:
- jump_label_invalidate_module_init(mod);
- break;
}
jump_label_unlock();
@@ -757,7 +751,8 @@ static void jump_label_update(struct static_key *key)
entry = static_key_entries(key);
/* if there are no users, entry can be NULL */
if (entry)
- __jump_label_update(key, entry, stop);
+ __jump_label_update(key, entry, stop,
+ system_state < SYSTEM_RUNNING);
}
#ifdef CONFIG_STATIC_KEYS_SELFTEST
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 23a83a4da38a..86ef06d3dbe3 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -471,6 +471,10 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
}
}
+ /* Ensure that these pages are decrypted if SME is enabled. */
+ if (pages)
+ arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
+
return pages;
}
@@ -867,6 +871,7 @@ static int kimage_load_crash_segment(struct kimage *image,
result = -ENOMEM;
goto out;
}
+ arch_kexec_post_alloc_pages(page_address(page), 1, 0);
ptr = kmap(page);
ptr += maddr & ~PAGE_MASK;
mchunk = min_t(size_t, mbytes,
@@ -884,6 +889,7 @@ static int kimage_load_crash_segment(struct kimage *image,
result = copy_from_user(ptr, buf, uchunk);
kexec_flush_icache_page(page);
kunmap(page);
+ arch_kexec_pre_free_pages(page_address(page), 1);
if (result) {
result = -EFAULT;
goto out;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ab257be4d924..90e98e233647 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -546,8 +546,14 @@ static void do_free_cleaned_kprobes(void)
struct optimized_kprobe *op, *tmp;
list_for_each_entry_safe(op, tmp, &freeing_list, list) {
- BUG_ON(!kprobe_unused(&op->kp));
list_del_init(&op->list);
+ if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
+ /*
+ * This must not happen, but if there is a kprobe
+ * still in use, keep it on kprobes hash list.
+ */
+ continue;
+ }
free_aggr_kprobe(&op->kp);
}
}
@@ -700,11 +706,11 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
}
/* Cancel unoptimizing for reusing */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
{
struct optimized_kprobe *op;
+ int ret;
- BUG_ON(!kprobe_unused(ap));
/*
* Unused kprobe MUST be on the way of delayed unoptimizing (means
* there is still a relative jump) and disabled.
@@ -714,8 +720,12 @@ static void reuse_unused_kprobe(struct kprobe *ap)
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
/* Optimize it again (remove from op->list) */
- BUG_ON(!kprobe_optready(ap));
+ ret = kprobe_optready(ap);
+ if (ret)
+ return ret;
+
optimize_kprobe(ap);
+ return 0;
}
/* Remove optimized instructions */
@@ -940,11 +950,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
#define kprobe_disarmed(p) kprobe_disabled(p)
#define wait_for_kprobe_optimizer() do {} while (0)
-/* There should be no unused kprobes can be reused without optimization */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
{
+ /*
+ * If the optimized kprobe is NOT supported, the aggr kprobe is
+ * released at the same time that the last aggregated kprobe is
+ * unregistered.
+ * Thus there should be no chance to reuse unused kprobe.
+ */
printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
- BUG_ON(kprobe_unused(ap));
+ return -EINVAL;
}
static void free_aggr_kprobe(struct kprobe *p)
@@ -1259,8 +1274,6 @@ NOKPROBE_SYMBOL(cleanup_rp_inst);
/* Add the new probe to ap->list */
static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
{
- BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
-
if (p->post_handler)
unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
@@ -1318,9 +1331,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
goto out;
}
init_aggr_kprobe(ap, orig_p);
- } else if (kprobe_unused(ap))
+ } else if (kprobe_unused(ap)) {
/* This probe is going to die. Rescue it */
- reuse_unused_kprobe(ap);
+ ret = reuse_unused_kprobe(ap);
+ if (ret)
+ goto out;
+ }
if (kprobe_gone(ap)) {
/*
@@ -1704,7 +1720,6 @@ noclean:
return 0;
disarmed:
- BUG_ON(!kprobe_disarmed(ap));
hlist_del_rcu(&ap->hlist);
return 0;
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index dd13f865ad40..1efada2dd9dd 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -138,7 +138,7 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
* get freed - this significantly simplifies the debugging code.
*/
unsigned long nr_lock_classes;
-static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
+struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
@@ -1391,7 +1391,9 @@ static void print_lock_class_header(struct lock_class *class, int depth)
printk("%*s->", depth, "");
print_lock_name(class);
- printk(KERN_CONT " ops: %lu", class->ops);
+#ifdef CONFIG_DEBUG_LOCKDEP
+ printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
+#endif
printk(KERN_CONT " {\n");
for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
@@ -2148,76 +2150,6 @@ static int check_no_collision(struct task_struct *curr,
}
/*
- * This is for building a chain between just two different classes,
- * instead of adding a new hlock upon current, which is done by
- * add_chain_cache().
- *
- * This can be called in any context with two classes, while
- * add_chain_cache() must be done within the lock owener's context
- * since it uses hlock which might be racy in another context.
- */
-static inline int add_chain_cache_classes(unsigned int prev,
- unsigned int next,
- unsigned int irq_context,
- u64 chain_key)
-{
- struct hlist_head *hash_head = chainhashentry(chain_key);
- struct lock_chain *chain;
-
- /*
- * Allocate a new chain entry from the static array, and add
- * it to the hash:
- */
-
- /*
- * We might need to take the graph lock, ensure we've got IRQs
- * disabled to make this an IRQ-safe lock.. for recursion reasons
- * lockdep won't complain about its own locking errors.
- */
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
- return 0;
-
- if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
- if (!debug_locks_off_graph_unlock())
- return 0;
-
- print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
- dump_stack();
- return 0;
- }
-
- chain = lock_chains + nr_lock_chains++;
- chain->chain_key = chain_key;
- chain->irq_context = irq_context;
- chain->depth = 2;
- if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
- chain->base = nr_chain_hlocks;
- nr_chain_hlocks += chain->depth;
- chain_hlocks[chain->base] = prev - 1;
- chain_hlocks[chain->base + 1] = next -1;
- }
-#ifdef CONFIG_DEBUG_LOCKDEP
- /*
- * Important for check_no_collision().
- */
- else {
- if (!debug_locks_off_graph_unlock())
- return 0;
-
- print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
- dump_stack();
- return 0;
- }
-#endif
-
- hlist_add_head_rcu(&chain->entry, hash_head);
- debug_atomic_inc(chain_lookup_misses);
- inc_chains();
-
- return 1;
-}
-
-/*
* Adds a dependency chain into chain hashtable. And must be called with
* graph_lock held.
*
@@ -3262,6 +3194,10 @@ static int __lock_is_held(const struct lockdep_map *lock, int read);
/*
* This gets called for every mutex_lock*()/spin_lock*() operation.
* We maintain the dependency maps and validate the locking attempt:
+ *
+ * The callers must make sure that IRQs are disabled before calling it,
+ * otherwise we could get an interrupt which would want to take locks,
+ * which would end up in lockdep again.
*/
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, int hardirqs_off,
@@ -3279,14 +3215,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (unlikely(!debug_locks))
return 0;
- /*
- * Lockdep should run with IRQs disabled, otherwise we could
- * get an interrupt which would want to take locks, which would
- * end up in lockdep and have you got a head-ache already?
- */
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
- return 0;
-
if (!prove_locking || lock->key == &__lockdep_no_validate__)
check = 0;
@@ -3300,7 +3228,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (!class)
return 0;
}
- atomic_inc((atomic_t *)&class->ops);
+
+ debug_class_ops_inc(class);
+
if (very_verbose(class)) {
printk("\nacquire class [%px] %s", class->key, class->name);
if (class->name_version > 1)
@@ -3543,6 +3473,9 @@ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
{
struct held_lock *hlock;
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ return 0;
+
for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
if (!__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass,
@@ -3696,6 +3629,13 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
+ /*
+ * The most likely case is when the unlock is on the innermost
+ * lock. In this case, we are done!
+ */
+ if (i == depth-1)
+ return 1;
+
if (reacquire_held_locks(curr, depth, i + 1))
return 0;
@@ -3703,10 +3643,14 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
* We had N bottles of beer on the wall, we drank one, but now
* there's not N-1 bottles of beer left on the wall...
*/
- if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
- return 0;
+ DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1);
- return 1;
+ /*
+ * Since reacquire_held_locks() would have called check_chain_key()
+ * indirectly via __lock_acquire(), we don't need to do it again
+ * on return.
+ */
+ return 0;
}
static int __lock_is_held(const struct lockdep_map *lock, int read)
@@ -4122,7 +4066,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- if (unlikely(!lock_stat))
+ if (unlikely(!lock_stat || !debug_locks))
return;
if (unlikely(current->lockdep_recursion))
@@ -4142,7 +4086,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- if (unlikely(!lock_stat))
+ if (unlikely(!lock_stat || !debug_locks))
return;
if (unlikely(current->lockdep_recursion))
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index d459d624ba2a..88c847a41c8a 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -152,9 +152,15 @@ struct lockdep_stats {
int nr_find_usage_forwards_recursions;
int nr_find_usage_backwards_checks;
int nr_find_usage_backwards_recursions;
+
+ /*
+ * Per lock class locking operation stat counts
+ */
+ unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
};
DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
+extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
#define __debug_atomic_inc(ptr) \
this_cpu_inc(lockdep_stats.ptr);
@@ -179,9 +185,30 @@ DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
} \
__total; \
})
+
+static inline void debug_class_ops_inc(struct lock_class *class)
+{
+ int idx;
+
+ idx = class - lock_classes;
+ __debug_atomic_inc(lock_class_ops[idx]);
+}
+
+static inline unsigned long debug_class_ops_read(struct lock_class *class)
+{
+ int idx, cpu;
+ unsigned long ops = 0;
+
+ idx = class - lock_classes;
+ for_each_possible_cpu(cpu)
+ ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
+ return ops;
+}
+
#else
# define __debug_atomic_inc(ptr) do { } while (0)
# define debug_atomic_inc(ptr) do { } while (0)
# define debug_atomic_dec(ptr) do { } while (0)
# define debug_atomic_read(ptr) 0
+# define debug_class_ops_inc(ptr) do { } while (0)
#endif
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 3dd980dfba2d..3d31f9b0059e 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -68,7 +68,7 @@ static int l_show(struct seq_file *m, void *v)
seq_printf(m, "%p", class->key);
#ifdef CONFIG_DEBUG_LOCKDEP
- seq_printf(m, " OPS:%8ld", class->ops);
+ seq_printf(m, " OPS:%8ld", debug_class_ops_read(class));
#endif
#ifdef CONFIG_PROVE_LOCKING
seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index bfaeb05123ff..8a8c3c208c5e 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -74,12 +74,24 @@
*/
#include "mcs_spinlock.h"
+#define MAX_NODES 4
+/*
+ * On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in
+ * size and four of them will fit nicely in one 64-byte cacheline. For
+ * pvqspinlock, however, we need more space for extra data. To accommodate
+ * that, we insert two more long words to pad it up to 32 bytes. IOW, only
+ * two of them can fit in a cacheline in this case. That is OK as it is rare
+ * to have more than 2 levels of slowpath nesting in actual use. We don't
+ * want to penalize pvqspinlocks to optimize for a rare case in native
+ * qspinlocks.
+ */
+struct qnode {
+ struct mcs_spinlock mcs;
#ifdef CONFIG_PARAVIRT_SPINLOCKS
-#define MAX_NODES 8
-#else
-#define MAX_NODES 4
+ long reserved[2];
#endif
+};
/*
* The pending bit spinning loop count.
@@ -101,7 +113,7 @@
*
* PV doubles the storage and uses the second cacheline for PV state.
*/
-static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
+static DEFINE_PER_CPU_ALIGNED(struct qnode, qnodes[MAX_NODES]);
/*
* We must be able to distinguish between no-tail and the tail at 0:0,
@@ -126,7 +138,13 @@ static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
- return per_cpu_ptr(&mcs_nodes[idx], cpu);
+ return per_cpu_ptr(&qnodes[idx].mcs, cpu);
+}
+
+static inline __pure
+struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx)
+{
+ return &((struct qnode *)base + idx)->mcs;
}
#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
@@ -232,6 +250,20 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
#endif /* _Q_PENDING_BITS == 8 */
/**
+ * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
+ * @lock : Pointer to queued spinlock structure
+ * Return: The previous lock value
+ *
+ * *,*,* -> *,1,*
+ */
+#ifndef queued_fetch_set_pending_acquire
+static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
+{
+ return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
+}
+#endif
+
+/**
* set_locked - Set the lock bit and own the lock
* @lock: Pointer to queued spinlock structure
*
@@ -326,43 +358,48 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
/*
* trylock || pending
*
- * 0,0,0 -> 0,0,1 ; trylock
- * 0,0,1 -> 0,1,1 ; pending
+ * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock
*/
- val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
- if (!(val & ~_Q_LOCKED_MASK)) {
- /*
- * We're pending, wait for the owner to go away.
- *
- * *,1,1 -> *,1,0
- *
- * this wait loop must be a load-acquire such that we match the
- * store-release that clears the locked bit and create lock
- * sequentiality; this is because not all
- * clear_pending_set_locked() implementations imply full
- * barriers.
- */
- if (val & _Q_LOCKED_MASK) {
- atomic_cond_read_acquire(&lock->val,
- !(VAL & _Q_LOCKED_MASK));
- }
+ val = queued_fetch_set_pending_acquire(lock);
- /*
- * take ownership and clear the pending bit.
- *
- * *,1,0 -> *,0,1
- */
- clear_pending_set_locked(lock);
- qstat_inc(qstat_lock_pending, true);
- return;
+ /*
+ * If we observe contention, there is a concurrent locker.
+ *
+ * Undo and queue; our setting of PENDING might have made the
+ * n,0,0 -> 0,0,0 transition fail and it will now be waiting
+ * on @next to become !NULL.
+ */
+ if (unlikely(val & ~_Q_LOCKED_MASK)) {
+
+ /* Undo PENDING if we set it. */
+ if (!(val & _Q_PENDING_MASK))
+ clear_pending(lock);
+
+ goto queue;
}
/*
- * If pending was clear but there are waiters in the queue, then
- * we need to undo our setting of pending before we queue ourselves.
+ * We're pending, wait for the owner to go away.
+ *
+ * 0,1,1 -> 0,1,0
+ *
+ * this wait loop must be a load-acquire such that we match the
+ * store-release that clears the locked bit and create lock
+ * sequentiality; this is because not all
+ * clear_pending_set_locked() implementations imply full
+ * barriers.
+ */
+ if (val & _Q_LOCKED_MASK)
+ atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK));
+
+ /*
+ * take ownership and clear the pending bit.
+ *
+ * 0,1,0 -> 0,0,1
*/
- if (!(val & _Q_PENDING_MASK))
- clear_pending(lock);
+ clear_pending_set_locked(lock);
+ qstat_inc(qstat_lock_pending, true);
+ return;
/*
* End of pending bit optimistic spinning and beginning of MCS
@@ -371,11 +408,16 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
queue:
qstat_inc(qstat_lock_slowpath, true);
pv_queue:
- node = this_cpu_ptr(&mcs_nodes[0]);
+ node = this_cpu_ptr(&qnodes[0].mcs);
idx = node->count++;
tail = encode_tail(smp_processor_id(), idx);
- node += idx;
+ node = grab_mcs_node(node, idx);
+
+ /*
+ * Keep counts of non-zero index values:
+ */
+ qstat_inc(qstat_lock_idx1 + idx - 1, idx);
/*
* Ensure that we increment the head node->count before initialising
@@ -476,16 +518,25 @@ locked:
*/
/*
- * In the PV case we might already have _Q_LOCKED_VAL set.
+ * In the PV case we might already have _Q_LOCKED_VAL set, because
+ * of lock stealing; therefore we must also allow:
+ *
+ * n,0,1 -> 0,0,1
*
- * The atomic_cond_read_acquire() call above has provided the
- * necessary acquire semantics required for locking.
+ * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
+ * above wait condition, therefore any concurrent setting of
+ * PENDING will make the uncontended transition fail.
*/
- if (((val & _Q_TAIL_MASK) == tail) &&
- atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
- goto release; /* No contention */
+ if ((val & _Q_TAIL_MASK) == tail) {
+ if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
+ goto release; /* No contention */
+ }
- /* Either somebody is queued behind us or _Q_PENDING_VAL is set */
+ /*
+ * Either somebody is queued behind us or _Q_PENDING_VAL got set
+ * which will then detect the remaining tail and queue behind us
+ * ensuring we'll see a @next.
+ */
set_locked(lock);
/*
@@ -501,7 +552,7 @@ release:
/*
* release the node
*/
- __this_cpu_dec(mcs_nodes[0].count);
+ __this_cpu_dec(qnodes[0].mcs.count);
}
EXPORT_SYMBOL(queued_spin_lock_slowpath);
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 5a0cf5f9008c..0130e488ebfe 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -49,8 +49,6 @@ enum vcpu_state {
struct pv_node {
struct mcs_spinlock mcs;
- struct mcs_spinlock __res[3];
-
int cpu;
u8 state;
};
@@ -281,7 +279,7 @@ static void pv_init_node(struct mcs_spinlock *node)
{
struct pv_node *pn = (struct pv_node *)node;
- BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock));
+ BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode));
pn->cpu = smp_processor_id();
pn->state = vcpu_running;
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h
index 6bd78c0740fc..42d3d8dc8f49 100644
--- a/kernel/locking/qspinlock_stat.h
+++ b/kernel/locking/qspinlock_stat.h
@@ -55,6 +55,9 @@ enum qlock_stats {
qstat_pv_wait_node,
qstat_lock_pending,
qstat_lock_slowpath,
+ qstat_lock_idx1,
+ qstat_lock_idx2,
+ qstat_lock_idx3,
qstat_num, /* Total number of statistical counters */
qstat_reset_cnts = qstat_num,
};
@@ -82,6 +85,9 @@ static const char * const qstat_names[qstat_num + 1] = {
[qstat_pv_wait_node] = "pv_wait_node",
[qstat_lock_pending] = "lock_pending",
[qstat_lock_slowpath] = "lock_slowpath",
+ [qstat_lock_idx1] = "lock_index1",
+ [qstat_lock_idx2] = "lock_index2",
+ [qstat_lock_idx3] = "lock_index3",
[qstat_reset_cnts] = "reset_counters",
};
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 2823d4163a37..581edcc63c26 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1485,9 +1485,9 @@ void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
__rt_mutex_lock(lock, subclass);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
-#endif
-#ifndef CONFIG_DEBUG_LOCK_ALLOC
+#else /* !CONFIG_DEBUG_LOCK_ALLOC */
+
/**
* rt_mutex_lock - lock a rt_mutex
*
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 3064c50e181e..09b180063ee1 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -180,7 +180,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
* but it gives the spinners an early indication that the
* readers now have the lock.
*/
- rwsem_set_reader_owned(sem);
+ __rwsem_set_reader_owned(sem, waiter->task);
}
/*
@@ -233,8 +233,19 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
waiter.type = RWSEM_WAITING_FOR_READ;
raw_spin_lock_irq(&sem->wait_lock);
- if (list_empty(&sem->wait_list))
+ if (list_empty(&sem->wait_list)) {
+ /*
+ * In case the wait queue is empty and the lock isn't owned
+ * by a writer, this reader can exit the slowpath and return
+ * immediately as its RWSEM_ACTIVE_READ_BIAS has already
+ * been set in the count.
+ */
+ if (atomic_long_read(&sem->count) >= 0) {
+ raw_spin_unlock_irq(&sem->wait_lock);
+ return sem;
+ }
adjustment += RWSEM_WAITING_BIAS;
+ }
list_add_tail(&waiter.list, &sem->wait_list);
/* we're now waiting on the lock, but no longer actively locking */
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 776308d2fa9e..e586f0d03ad3 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -117,8 +117,9 @@ EXPORT_SYMBOL(down_write_trylock);
void up_read(struct rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, 1, _RET_IP_);
- DEBUG_RWSEMS_WARN_ON(sem->owner != RWSEM_READER_OWNED);
+ DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
+ rwsem_clear_reader_owned(sem);
__up_read(sem);
}
@@ -181,7 +182,7 @@ void down_read_non_owner(struct rw_semaphore *sem)
might_sleep();
__down_read(sem);
- rwsem_set_reader_owned(sem);
+ __rwsem_set_reader_owned(sem, NULL);
}
EXPORT_SYMBOL(down_read_non_owner);
@@ -215,7 +216,7 @@ EXPORT_SYMBOL(down_write_killable_nested);
void up_read_non_owner(struct rw_semaphore *sem)
{
- DEBUG_RWSEMS_WARN_ON(sem->owner != RWSEM_READER_OWNED);
+ DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
__up_read(sem);
}
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
index b9d0e72aa80f..bad2bca0268b 100644
--- a/kernel/locking/rwsem.h
+++ b/kernel/locking/rwsem.h
@@ -1,24 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * The owner field of the rw_semaphore structure will be set to
- * RWSEM_READER_OWNED when a reader grabs the lock. A writer will clear
- * the owner field when it unlocks. A reader, on the other hand, will
- * not touch the owner field when it unlocks.
+ * The least significant 2 bits of the owner value has the following
+ * meanings when set.
+ * - RWSEM_READER_OWNED (bit 0): The rwsem is owned by readers
+ * - RWSEM_ANONYMOUSLY_OWNED (bit 1): The rwsem is anonymously owned,
+ * i.e. the owner(s) cannot be readily determined. It can be reader
+ * owned or the owning writer is indeterminate.
*
- * In essence, the owner field now has the following 4 states:
- * 1) 0
- * - lock is free or the owner hasn't set the field yet
- * 2) RWSEM_READER_OWNED
- * - lock is currently or previously owned by readers (lock is free
- * or not set by owner yet)
- * 3) RWSEM_ANONYMOUSLY_OWNED bit set with some other bits set as well
- * - lock is owned by an anonymous writer, so spinning on the lock
- * owner should be disabled.
- * 4) Other non-zero value
- * - a writer owns the lock and other writers can spin on the lock owner.
+ * When a writer acquires a rwsem, it puts its task_struct pointer
+ * into the owner field. It is cleared after an unlock.
+ *
+ * When a reader acquires a rwsem, it will also puts its task_struct
+ * pointer into the owner field with both the RWSEM_READER_OWNED and
+ * RWSEM_ANONYMOUSLY_OWNED bits set. On unlock, the owner field will
+ * largely be left untouched. So for a free or reader-owned rwsem,
+ * the owner value may contain information about the last reader that
+ * acquires the rwsem. The anonymous bit is set because that particular
+ * reader may or may not still own the lock.
+ *
+ * That information may be helpful in debugging cases where the system
+ * seems to hang on a reader owned rwsem especially if only one reader
+ * is involved. Ideally we would like to track all the readers that own
+ * a rwsem, but the overhead is simply too big.
*/
-#define RWSEM_ANONYMOUSLY_OWNED (1UL << 0)
-#define RWSEM_READER_OWNED ((struct task_struct *)RWSEM_ANONYMOUSLY_OWNED)
+#define RWSEM_READER_OWNED (1UL << 0)
+#define RWSEM_ANONYMOUSLY_OWNED (1UL << 1)
#ifdef CONFIG_DEBUG_RWSEMS
# define DEBUG_RWSEMS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
@@ -44,15 +50,26 @@ static inline void rwsem_clear_owner(struct rw_semaphore *sem)
WRITE_ONCE(sem->owner, NULL);
}
+/*
+ * The task_struct pointer of the last owning reader will be left in
+ * the owner field.
+ *
+ * Note that the owner value just indicates the task has owned the rwsem
+ * previously, it may not be the real owner or one of the real owners
+ * anymore when that field is examined, so take it with a grain of salt.
+ */
+static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
+ struct task_struct *owner)
+{
+ unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED
+ | RWSEM_ANONYMOUSLY_OWNED;
+
+ WRITE_ONCE(sem->owner, (struct task_struct *)val);
+}
+
static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
{
- /*
- * We check the owner value first to make sure that we will only
- * do a write to the rwsem cacheline when it is really necessary
- * to minimize cacheline contention.
- */
- if (READ_ONCE(sem->owner) != RWSEM_READER_OWNED)
- WRITE_ONCE(sem->owner, RWSEM_READER_OWNED);
+ __rwsem_set_reader_owned(sem, current);
}
/*
@@ -72,6 +89,25 @@ static inline bool rwsem_has_anonymous_owner(struct task_struct *owner)
{
return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED;
}
+
+#ifdef CONFIG_DEBUG_RWSEMS
+/*
+ * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
+ * is a task pointer in owner of a reader-owned rwsem, it will be the
+ * real owner or one of the real owners. The only exception is when the
+ * unlock is done by up_read_non_owner().
+ */
+#define rwsem_clear_reader_owned rwsem_clear_reader_owned
+static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
+{
+ unsigned long val = (unsigned long)current | RWSEM_READER_OWNED
+ | RWSEM_ANONYMOUSLY_OWNED;
+ if (READ_ONCE(sem->owner) == (struct task_struct *)val)
+ cmpxchg_relaxed((unsigned long *)&sem->owner, val,
+ RWSEM_READER_OWNED | RWSEM_ANONYMOUSLY_OWNED);
+}
+#endif
+
#else
static inline void rwsem_set_owner(struct rw_semaphore *sem)
{
@@ -81,7 +117,18 @@ static inline void rwsem_clear_owner(struct rw_semaphore *sem)
{
}
+static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
+ struct task_struct *owner)
+{
+}
+
static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
{
}
#endif
+
+#ifndef rwsem_clear_reader_owned
+static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
+{
+}
+#endif
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 0be047dbd897..65a3b7e55b9f 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -260,7 +260,7 @@ static void test_cycle_work(struct work_struct *work)
{
struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
struct ww_acquire_ctx ctx;
- int err;
+ int err, erra = 0;
ww_acquire_init(&ctx, &ww_class);
ww_mutex_lock(&cycle->a_mutex, &ctx);
@@ -270,17 +270,19 @@ static void test_cycle_work(struct work_struct *work)
err = ww_mutex_lock(cycle->b_mutex, &ctx);
if (err == -EDEADLK) {
+ err = 0;
ww_mutex_unlock(&cycle->a_mutex);
ww_mutex_lock_slow(cycle->b_mutex, &ctx);
- err = ww_mutex_lock(&cycle->a_mutex, &ctx);
+ erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
}
if (!err)
ww_mutex_unlock(cycle->b_mutex);
- ww_mutex_unlock(&cycle->a_mutex);
+ if (!erra)
+ ww_mutex_unlock(&cycle->a_mutex);
ww_acquire_fini(&ctx);
- cycle->result = err;
+ cycle->result = err ?: erra;
}
static int __test_cycle(unsigned int nthreads)
diff --git a/kernel/module.c b/kernel/module.c
index 6746c85511fe..49a405891587 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3317,6 +3317,15 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
ndx = find_sec(info, ".data..ro_after_init");
if (ndx)
info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
+ /*
+ * Mark the __jump_table section as ro_after_init as well: these data
+ * structures are never modified, with the exception of entries that
+ * refer to code in the __init section, which are annotated as such
+ * at module load time.
+ */
+ ndx = find_sec(info, "__jump_table");
+ if (ndx)
+ info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
/* Determine total sizes, and put offsets in sh_entsize. For now
this is done generically; there doesn't appear to be any
diff --git a/kernel/pid.c b/kernel/pid.c
index de1cfc4f75a2..cdf63e53a014 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
idr_preload_end();
if (nr < 0) {
- retval = nr;
+ retval = (nr == -ENOSPC) ? -EAGAIN : nr;
goto out_free;
}
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 2a2ac53d8b8b..aa6e72fb7c08 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -216,7 +216,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
idr_for_each_entry_continue(&pid_ns->idr, pid, nr) {
task = pid_task(pid, PIDTYPE_PID);
if (task && !__fatal_signal_pending(task))
- send_sig_info(SIGKILL, SEND_SIG_FORCED, task);
+ group_send_sig_info(SIGKILL, SEND_SIG_PRIV, task, PIDTYPE_MAX);
}
read_unlock(&tasklist_lock);
rcu_read_unlock();
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 7381d49a44db..4b6a54da7e65 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -96,7 +96,7 @@ static int try_to_freeze_tasks(bool user_only)
if (wq_busy)
show_workqueue_state();
- if (!wakeup) {
+ if (!wakeup || pm_debug_messages_on) {
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
if (p != current && !freezer_should_skip(p)
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 5342f6fc022e..0bd595a0b610 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -63,6 +63,12 @@ static DECLARE_SWAIT_QUEUE_HEAD(s2idle_wait_head);
enum s2idle_states __read_mostly s2idle_state;
static DEFINE_RAW_SPINLOCK(s2idle_lock);
+bool pm_suspend_via_s2idle(void)
+{
+ return mem_sleep_current == PM_SUSPEND_TO_IDLE;
+}
+EXPORT_SYMBOL_GPL(pm_suspend_via_s2idle);
+
void s2idle_set_ops(const struct platform_s2idle_ops *ops)
{
lock_system_sleep();
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 9bf5404397e0..b77150ad1965 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -16,6 +16,8 @@
* 01Mar01 Andrew Morton
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
@@ -192,16 +194,7 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
return 0;
}
-/*
- * Number of registered extended console drivers.
- *
- * If extended consoles are present, in-kernel cont reassembly is disabled
- * and each fragment is stored as a separate log entry with proper
- * continuation flag so that every emitted message has full metadata. This
- * doesn't change the result for regular consoles or /proc/kmsg. For
- * /dev/kmsg, as long as the reader concatenates messages according to
- * consecutive continuation flags, the end result should be the same too.
- */
+/* Number of registered extended console drivers. */
static int nr_ext_console_drivers;
/*
@@ -423,6 +416,7 @@ static u32 log_next_idx;
/* the next printk record to write to the console */
static u64 console_seq;
static u32 console_idx;
+static u64 exclusive_console_stop_seq;
/* the next printk record to read after the last 'clear' command */
static u64 clear_seq;
@@ -437,6 +431,7 @@ static u32 clear_idx;
/* record buffer */
#define LOG_ALIGN __alignof__(struct printk_log)
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+#define LOG_BUF_LEN_MAX (u32)(1 << 31)
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
@@ -1037,18 +1032,28 @@ void log_buf_vmcoreinfo_setup(void)
static unsigned long __initdata new_log_buf_len;
/* we practice scaling the ring buffer by powers of 2 */
-static void __init log_buf_len_update(unsigned size)
+static void __init log_buf_len_update(u64 size)
{
+ if (size > (u64)LOG_BUF_LEN_MAX) {
+ size = (u64)LOG_BUF_LEN_MAX;
+ pr_err("log_buf over 2G is not supported.\n");
+ }
+
if (size)
size = roundup_pow_of_two(size);
if (size > log_buf_len)
- new_log_buf_len = size;
+ new_log_buf_len = (unsigned long)size;
}
/* save requested log_buf_len since it's too early to process it */
static int __init log_buf_len_setup(char *str)
{
- unsigned size = memparse(str, &str);
+ u64 size;
+
+ if (!str)
+ return -EINVAL;
+
+ size = memparse(str, &str);
log_buf_len_update(size);
@@ -1093,7 +1098,7 @@ void __init setup_log_buf(int early)
{
unsigned long flags;
char *new_log_buf;
- int free;
+ unsigned int free;
if (log_buf != __log_buf)
return;
@@ -1113,7 +1118,7 @@ void __init setup_log_buf(int early)
}
if (unlikely(!new_log_buf)) {
- pr_err("log_buf_len: %ld bytes not available\n",
+ pr_err("log_buf_len: %lu bytes not available\n",
new_log_buf_len);
return;
}
@@ -1126,8 +1131,8 @@ void __init setup_log_buf(int early)
memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
logbuf_unlock_irqrestore(flags);
- pr_info("log_buf_len: %d bytes\n", log_buf_len);
- pr_info("early log buf free: %d(%d%%)\n",
+ pr_info("log_buf_len: %u bytes\n", log_buf_len);
+ pr_info("early log buf free: %u(%u%%)\n",
free, (free * 100) / __LOG_BUF_LEN);
}
@@ -1767,12 +1772,8 @@ static void cont_flush(void)
static bool cont_add(int facility, int level, enum log_flags flags, const char *text, size_t len)
{
- /*
- * If ext consoles are present, flush and skip in-kernel
- * continuation. See nr_ext_console_drivers definition. Also, if
- * the line gets too long, split it up in separate records.
- */
- if (nr_ext_console_drivers || cont.len + len > sizeof(cont.buf)) {
+ /* If the line gets too long, split it up in separate records. */
+ if (cont.len + len > sizeof(cont.buf)) {
cont_flush();
return false;
}
@@ -1795,9 +1796,6 @@ static bool cont_add(int facility, int level, enum log_flags flags, const char *
cont_flush();
}
- if (cont.len > (sizeof(cont.buf) * 80) / 100)
- cont_flush();
-
return true;
}
@@ -1889,8 +1887,9 @@ asmlinkage int vprintk_emit(int facility, int level,
const char *fmt, va_list args)
{
int printed_len;
- bool in_sched = false;
+ bool in_sched = false, pending_output;
unsigned long flags;
+ u64 curr_log_seq;
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
@@ -1902,11 +1901,13 @@ asmlinkage int vprintk_emit(int facility, int level,
/* This stops the holder of console_sem just where we want him */
logbuf_lock_irqsave(flags);
+ curr_log_seq = log_next_seq;
printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args);
+ pending_output = (curr_log_seq != log_next_seq);
logbuf_unlock_irqrestore(flags);
/* If called from the scheduler, we can not call up(). */
- if (!in_sched) {
+ if (!in_sched && pending_output) {
/*
* Disable preemption to avoid being preempted while holding
* console_sem which would prevent anyone from printing to
@@ -1923,7 +1924,8 @@ asmlinkage int vprintk_emit(int facility, int level,
preempt_enable();
}
- wake_up_klogd();
+ if (pending_output)
+ wake_up_klogd();
return printed_len;
}
EXPORT_SYMBOL(vprintk_emit);
@@ -2009,6 +2011,7 @@ static u64 syslog_seq;
static u32 syslog_idx;
static u64 console_seq;
static u32 console_idx;
+static u64 exclusive_console_stop_seq;
static u64 log_first_seq;
static u32 log_first_idx;
static u64 log_next_seq;
@@ -2351,8 +2354,9 @@ again:
printk_safe_enter_irqsave(flags);
raw_spin_lock(&logbuf_lock);
if (console_seq < log_first_seq) {
- len = sprintf(text, "** %u printk messages dropped **\n",
- (unsigned)(log_first_seq - console_seq));
+ len = sprintf(text,
+ "** %llu printk messages dropped **\n",
+ log_first_seq - console_seq);
/* messages are gone, move to first one */
console_seq = log_first_seq;
@@ -2376,6 +2380,12 @@ skip:
goto skip;
}
+ /* Output to all consoles once old messages replayed. */
+ if (unlikely(exclusive_console &&
+ console_seq >= exclusive_console_stop_seq)) {
+ exclusive_console = NULL;
+ }
+
len += msg_print_text(msg,
console_msg_format & MSG_FORMAT_SYSLOG,
text + len,
@@ -2418,10 +2428,6 @@ skip:
console_locked = 0;
- /* Release the exclusive_console once it is used */
- if (unlikely(exclusive_console))
- exclusive_console = NULL;
-
raw_spin_unlock(&logbuf_lock);
up_console_sem();
@@ -2688,8 +2694,7 @@ void register_console(struct console *newcon)
}
if (newcon->flags & CON_EXTENDED)
- if (!nr_ext_console_drivers++)
- pr_info("printk: continuation disabled due to ext consoles, expect more fragments in /dev/kmsg\n");
+ nr_ext_console_drivers++;
if (newcon->flags & CON_PRINTBUFFER) {
/*
@@ -2699,13 +2704,18 @@ void register_console(struct console *newcon)
logbuf_lock_irqsave(flags);
console_seq = syslog_seq;
console_idx = syslog_idx;
- logbuf_unlock_irqrestore(flags);
/*
* We're about to replay the log buffer. Only do this to the
* just-registered console to avoid excessive message spam to
* the already-registered consoles.
+ *
+ * Set exclusive_console with disabled interrupts to reduce
+ * race window with eventual console_flush_on_panic() that
+ * ignores console_lock.
*/
exclusive_console = newcon;
+ exclusive_console_stop_seq = console_seq;
+ logbuf_unlock_irqrestore(flags);
}
console_unlock();
console_sysfs_notify();
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 21fec73d45d4..80b34dffdfb9 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -261,6 +261,9 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
{
+ if (mode & PTRACE_MODE_SCHED)
+ return false;
+
if (mode & PTRACE_MODE_NOAUDIT)
return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
else
@@ -328,9 +331,16 @@ ok:
!ptrace_has_cap(mm->user_ns, mode)))
return -EPERM;
+ if (mode & PTRACE_MODE_SCHED)
+ return 0;
return security_ptrace_access_check(task, mode);
}
+bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode)
+{
+ return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED);
+}
+
bool ptrace_may_access(struct task_struct *task, unsigned int mode)
{
int err;
@@ -396,7 +406,7 @@ static int ptrace_attach(struct task_struct *task, long request,
/* SEIZE doesn't trap tracee on attach */
if (!seize)
- send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
+ send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
spin_lock(&task->sighand->siglock);
@@ -563,7 +573,7 @@ void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
if (unlikely(p->ptrace & PT_EXITKILL))
- send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
+ send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
if (__ptrace_detach(tracer, p))
list_add(&p->ptrace_entry, dead);
@@ -651,7 +661,7 @@ static int ptrace_setoptions(struct task_struct *child, unsigned long data)
return 0;
}
-static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
+static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info)
{
unsigned long flags;
int error = -ESRCH;
@@ -667,7 +677,7 @@ static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
return error;
}
-static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
+static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info)
{
unsigned long flags;
int error = -ESRCH;
@@ -709,7 +719,7 @@ static int ptrace_peek_siginfo(struct task_struct *child,
pending = &child->pending;
for (i = 0; i < arg.nr; ) {
- siginfo_t info;
+ kernel_siginfo_t info;
s32 off = arg.off + i;
spin_lock_irq(&child->sighand->siglock);
@@ -885,7 +895,7 @@ int ptrace_request(struct task_struct *child, long request,
{
bool seized = child->ptrace & PT_SEIZED;
int ret = -EIO;
- siginfo_t siginfo, *si;
+ kernel_siginfo_t siginfo, *si;
void __user *datavp = (void __user *) data;
unsigned long __user *datalp = datavp;
unsigned long flags;
@@ -919,9 +929,8 @@ int ptrace_request(struct task_struct *child, long request,
break;
case PTRACE_SETSIGINFO:
- if (copy_from_user(&siginfo, datavp, sizeof siginfo))
- ret = -EFAULT;
- else
+ ret = copy_siginfo_from_user(&siginfo, datavp);
+ if (!ret)
ret = ptrace_setsiginfo(child, &siginfo);
break;
@@ -1181,7 +1190,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
{
compat_ulong_t __user *datap = compat_ptr(data);
compat_ulong_t word;
- siginfo_t siginfo;
+ kernel_siginfo_t siginfo;
int ret;
switch (request) {
@@ -1215,10 +1224,9 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
break;
case PTRACE_SETSIGINFO:
- if (copy_siginfo_from_user32(
- &siginfo, (struct compat_siginfo __user *) datap))
- ret = -EFAULT;
- else
+ ret = copy_siginfo_from_user32(
+ &siginfo, (struct compat_siginfo __user *) datap);
+ if (!ret)
ret = ptrace_setsiginfo(child, &siginfo);
break;
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
index 9210379c0353..939a2056c87a 100644
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
@@ -196,7 +196,7 @@ config RCU_BOOST
This option boosts the priority of preempted RCU readers that
block the current preemptible RCU grace period for too long.
This option also prevents heavy loads from blocking RCU
- callback invocation for all flavors of RCU.
+ callback invocation.
Say Y here if you are working with real-time apps or heavy loads
Say N here if you are unsure.
@@ -225,12 +225,12 @@ config RCU_NOCB_CPU
callback invocation to energy-efficient CPUs in battery-powered
asymmetric multiprocessors.
- This option offloads callback invocation from the set of
- CPUs specified at boot time by the rcu_nocbs parameter.
- For each such CPU, a kthread ("rcuox/N") will be created to
- invoke callbacks, where the "N" is the CPU being offloaded,
- and where the "x" is "b" for RCU-bh, "p" for RCU-preempt, and
- "s" for RCU-sched. Nothing prevents this kthread from running
+ This option offloads callback invocation from the set of CPUs
+ specified at boot time by the rcu_nocbs parameter. For each
+ such CPU, a kthread ("rcuox/N") will be created to invoke
+ callbacks, where the "N" is the CPU being offloaded, and where
+ the "p" for RCU-preempt (PREEMPT kernels) and "s" for RCU-sched
+ (!PREEMPT kernels). Nothing prevents this kthread from running
on the specified CPUs, but (1) the kthreads may be preempted
between each callback, and (2) affinity or cgroups can be used
to force the kthreads to run on whatever set of CPUs is desired.
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 4d04683c31b2..2866166863f0 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -176,8 +176,9 @@ static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
/*
* debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
- * by call_rcu() and rcu callback execution, and are therefore not part of the
- * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
+ * by call_rcu() and rcu callback execution, and are therefore not part
+ * of the RCU API. These are in rcupdate.h because they are used by all
+ * RCU implementations.
*/
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
@@ -223,6 +224,7 @@ void kfree(const void *);
*/
static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
{
+ rcu_callback_t f;
unsigned long offset = (unsigned long)head->func;
rcu_lock_acquire(&rcu_callback_map);
@@ -233,7 +235,9 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
return true;
} else {
RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
- head->func(head);
+ f = head->func;
+ WRITE_ONCE(head->func, (rcu_callback_t)0L);
+ f(head);
rcu_lock_release(&rcu_callback_map);
return false;
}
@@ -328,40 +332,35 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
}
}
-/* Returns first leaf rcu_node of the specified RCU flavor. */
-#define rcu_first_leaf_node(rsp) ((rsp)->level[rcu_num_lvls - 1])
+/* Returns a pointer to the first leaf rcu_node structure. */
+#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
/* Is this rcu_node a leaf? */
#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
/* Is this rcu_node the last leaf? */
-#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1])
+#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
/*
- * Do a full breadth-first scan of the rcu_node structures for the
- * specified rcu_state structure.
+ * Do a full breadth-first scan of the {s,}rcu_node structures for the
+ * specified state structure (for SRCU) or the only rcu_state structure
+ * (for RCU).
*/
-#define rcu_for_each_node_breadth_first(rsp, rnp) \
- for ((rnp) = &(rsp)->node[0]; \
- (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
+#define srcu_for_each_node_breadth_first(sp, rnp) \
+ for ((rnp) = &(sp)->node[0]; \
+ (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
+#define rcu_for_each_node_breadth_first(rnp) \
+ srcu_for_each_node_breadth_first(&rcu_state, rnp)
/*
- * Do a breadth-first scan of the non-leaf rcu_node structures for the
- * specified rcu_state structure. Note that if there is a singleton
- * rcu_node tree with but one rcu_node structure, this loop is a no-op.
+ * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
+ * Note that if there is a singleton rcu_node tree with but one rcu_node
+ * structure, this loop -will- visit the rcu_node structure. It is still
+ * a leaf node, even if it is also the root node.
*/
-#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
- for ((rnp) = &(rsp)->node[0]; !rcu_is_leaf_node(rsp, rnp); (rnp)++)
-
-/*
- * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
- * structure. Note that if there is a singleton rcu_node tree with but
- * one rcu_node structure, this loop -will- visit the rcu_node structure.
- * It is still a leaf node, even if it is also the root node.
- */
-#define rcu_for_each_leaf_node(rsp, rnp) \
- for ((rnp) = rcu_first_leaf_node(rsp); \
- (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
+#define rcu_for_each_leaf_node(rnp) \
+ for ((rnp) = rcu_first_leaf_node(); \
+ (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
/*
* Iterate over all possible CPUs in a leaf RCU node.
@@ -435,6 +434,12 @@ do { \
#endif /* #if defined(SRCU) || !defined(TINY_RCU) */
+#ifdef CONFIG_SRCU
+void srcu_init(void);
+#else /* #ifdef CONFIG_SRCU */
+static inline void srcu_init(void) { }
+#endif /* #else #ifdef CONFIG_SRCU */
+
#ifdef CONFIG_TINY_RCU
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
static inline bool rcu_gp_is_normal(void) { return true; }
@@ -515,29 +520,19 @@ void srcutorture_get_gp_data(enum rcutorture_type test_type,
#ifdef CONFIG_TINY_RCU
static inline unsigned long rcu_get_gp_seq(void) { return 0; }
-static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; }
-static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; }
static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
-static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
static inline unsigned long
srcu_batches_completed(struct srcu_struct *sp) { return 0; }
static inline void rcu_force_quiescent_state(void) { }
-static inline void rcu_bh_force_quiescent_state(void) { }
-static inline void rcu_sched_force_quiescent_state(void) { }
static inline void show_rcu_gp_kthreads(void) { }
static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
#else /* #ifdef CONFIG_TINY_RCU */
unsigned long rcu_get_gp_seq(void);
-unsigned long rcu_bh_get_gp_seq(void);
-unsigned long rcu_sched_get_gp_seq(void);
unsigned long rcu_exp_batches_completed(void);
-unsigned long rcu_exp_batches_completed_sched(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp);
void show_rcu_gp_kthreads(void);
int rcu_get_gp_kthreads_prio(void);
void rcu_force_quiescent_state(void);
-void rcu_bh_force_quiescent_state(void);
-void rcu_sched_force_quiescent_state(void);
extern struct workqueue_struct *rcu_gp_wq;
extern struct workqueue_struct *rcu_par_gp_wq;
#endif /* #else #ifdef CONFIG_TINY_RCU */
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index 34244523550e..b459da70b4fc 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -190,36 +190,6 @@ static struct rcu_perf_ops rcu_ops = {
};
/*
- * Definitions for rcu_bh perf testing.
- */
-
-static int rcu_bh_perf_read_lock(void) __acquires(RCU_BH)
-{
- rcu_read_lock_bh();
- return 0;
-}
-
-static void rcu_bh_perf_read_unlock(int idx) __releases(RCU_BH)
-{
- rcu_read_unlock_bh();
-}
-
-static struct rcu_perf_ops rcu_bh_ops = {
- .ptype = RCU_BH_FLAVOR,
- .init = rcu_sync_perf_init,
- .readlock = rcu_bh_perf_read_lock,
- .readunlock = rcu_bh_perf_read_unlock,
- .get_gp_seq = rcu_bh_get_gp_seq,
- .gp_diff = rcu_seq_diff,
- .exp_completed = rcu_exp_batches_completed_sched,
- .async = call_rcu_bh,
- .gp_barrier = rcu_barrier_bh,
- .sync = synchronize_rcu_bh,
- .exp_sync = synchronize_rcu_bh_expedited,
- .name = "rcu_bh"
-};
-
-/*
* Definitions for srcu perf testing.
*/
@@ -306,36 +276,6 @@ static struct rcu_perf_ops srcud_ops = {
};
/*
- * Definitions for sched perf testing.
- */
-
-static int sched_perf_read_lock(void)
-{
- preempt_disable();
- return 0;
-}
-
-static void sched_perf_read_unlock(int idx)
-{
- preempt_enable();
-}
-
-static struct rcu_perf_ops sched_ops = {
- .ptype = RCU_SCHED_FLAVOR,
- .init = rcu_sync_perf_init,
- .readlock = sched_perf_read_lock,
- .readunlock = sched_perf_read_unlock,
- .get_gp_seq = rcu_sched_get_gp_seq,
- .gp_diff = rcu_seq_diff,
- .exp_completed = rcu_exp_batches_completed_sched,
- .async = call_rcu_sched,
- .gp_barrier = rcu_barrier_sched,
- .sync = synchronize_sched,
- .exp_sync = synchronize_sched_expedited,
- .name = "sched"
-};
-
-/*
* Definitions for RCU-tasks perf testing.
*/
@@ -611,7 +551,7 @@ rcu_perf_cleanup(void)
kfree(writer_n_durations);
}
- /* Do flavor-specific cleanup operations. */
+ /* Do torture-type-specific cleanup operations. */
if (cur_ops->cleanup != NULL)
cur_ops->cleanup();
@@ -661,8 +601,7 @@ rcu_perf_init(void)
long i;
int firsterr = 0;
static struct rcu_perf_ops *perf_ops[] = {
- &rcu_ops, &rcu_bh_ops, &srcu_ops, &srcud_ops, &sched_ops,
- &tasks_ops,
+ &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops,
};
if (!torture_init_begin(perf_type, verbose))
@@ -680,6 +619,7 @@ rcu_perf_init(void)
for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
pr_cont(" %s", perf_ops[i]->name);
pr_cont("\n");
+ WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST));
firsterr = -EINVAL;
goto unwind;
}
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index c596c6f1e457..210c77460365 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -66,15 +66,19 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@jos
/* Bits for ->extendables field, extendables param, and related definitions. */
#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
-#define RCUTORTURE_RDR_BH 0x1 /* Extend readers by disabling bh. */
-#define RCUTORTURE_RDR_IRQ 0x2 /* ... disabling interrupts. */
-#define RCUTORTURE_RDR_PREEMPT 0x4 /* ... disabling preemption. */
-#define RCUTORTURE_RDR_RCU 0x8 /* ... entering another RCU reader. */
-#define RCUTORTURE_RDR_NBITS 4 /* Number of bits defined above. */
-#define RCUTORTURE_MAX_EXTEND (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \
- RCUTORTURE_RDR_PREEMPT)
+#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
+#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
+#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
+#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
+#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
+#define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
+#define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
+#define RCUTORTURE_MAX_EXTEND \
+ (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
+ RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
/* Must be power of two minus one. */
+#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
torture_param(int, cbflood_inter_holdoff, HZ,
"Holdoff between floods (jiffies)");
@@ -89,6 +93,12 @@ torture_param(int, fqs_duration, 0,
"Duration of fqs bursts (us), 0 to disable");
torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
+torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
+torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
+torture_param(int, fwd_progress_holdoff, 60,
+ "Time between forward-progress tests (s)");
+torture_param(bool, fwd_progress_need_resched, 1,
+ "Hide cond_resched() behind need_resched()");
torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
torture_param(bool, gp_normal, false,
@@ -125,7 +135,7 @@ torture_param(int, verbose, 1,
static char *torture_type = "rcu";
module_param(torture_type, charp, 0444);
-MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
+MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
static int nrealreaders;
static int ncbflooders;
@@ -137,6 +147,7 @@ static struct task_struct **cbflood_task;
static struct task_struct *fqs_task;
static struct task_struct *boost_tasks[NR_CPUS];
static struct task_struct *stall_task;
+static struct task_struct *fwd_prog_task;
static struct task_struct **barrier_cbs_tasks;
static struct task_struct *barrier_task;
@@ -197,6 +208,18 @@ static const char * const rcu_torture_writer_state_names[] = {
"RTWS_STOPPING",
};
+/* Record reader segment types and duration for first failing read. */
+struct rt_read_seg {
+ int rt_readstate;
+ unsigned long rt_delay_jiffies;
+ unsigned long rt_delay_ms;
+ unsigned long rt_delay_us;
+ bool rt_preempted;
+};
+static int err_segs_recorded;
+static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
+static int rt_read_nsegs;
+
static const char *rcu_torture_writer_state_getname(void)
{
unsigned int i = READ_ONCE(rcu_torture_writer_state);
@@ -278,7 +301,8 @@ struct rcu_torture_ops {
void (*init)(void);
void (*cleanup)(void);
int (*readlock)(void);
- void (*read_delay)(struct torture_random_state *rrsp);
+ void (*read_delay)(struct torture_random_state *rrsp,
+ struct rt_read_seg *rtrsp);
void (*readunlock)(int idx);
unsigned long (*get_gp_seq)(void);
unsigned long (*gp_diff)(unsigned long new, unsigned long old);
@@ -291,6 +315,7 @@ struct rcu_torture_ops {
void (*cb_barrier)(void);
void (*fqs)(void);
void (*stats)(void);
+ int (*stall_dur)(void);
int irq_capable;
int can_boost;
int extendables;
@@ -310,12 +335,13 @@ static int rcu_torture_read_lock(void) __acquires(RCU)
return 0;
}
-static void rcu_read_delay(struct torture_random_state *rrsp)
+static void
+rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
{
unsigned long started;
unsigned long completed;
const unsigned long shortdelay_us = 200;
- const unsigned long longdelay_ms = 50;
+ unsigned long longdelay_ms = 300;
unsigned long long ts;
/* We want a short delay sometimes to make a reader delay the grace
@@ -325,16 +351,23 @@ static void rcu_read_delay(struct torture_random_state *rrsp)
if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local();
+ if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
+ longdelay_ms = 5; /* Avoid triggering BH limits. */
mdelay(longdelay_ms);
+ rtrsp->rt_delay_ms = longdelay_ms;
completed = cur_ops->get_gp_seq();
do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
started, completed);
}
- if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
+ if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
udelay(shortdelay_us);
+ rtrsp->rt_delay_us = shortdelay_us;
+ }
if (!preempt_count() &&
- !(torture_random(rrsp) % (nrealreaders * 500)))
+ !(torture_random(rrsp) % (nrealreaders * 500))) {
torture_preempt_schedule(); /* QS only if preemptible. */
+ rtrsp->rt_preempted = true;
+ }
}
static void rcu_torture_read_unlock(int idx) __releases(RCU)
@@ -429,53 +462,14 @@ static struct rcu_torture_ops rcu_ops = {
.cb_barrier = rcu_barrier,
.fqs = rcu_force_quiescent_state,
.stats = NULL,
+ .stall_dur = rcu_jiffies_till_stall_check,
.irq_capable = 1,
.can_boost = rcu_can_boost(),
+ .extendables = RCUTORTURE_MAX_EXTEND,
.name = "rcu"
};
/*
- * Definitions for rcu_bh torture testing.
- */
-
-static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
-{
- rcu_read_lock_bh();
- return 0;
-}
-
-static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
-{
- rcu_read_unlock_bh();
-}
-
-static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
-{
- call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
-}
-
-static struct rcu_torture_ops rcu_bh_ops = {
- .ttype = RCU_BH_FLAVOR,
- .init = rcu_sync_torture_init,
- .readlock = rcu_bh_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .get_gp_seq = rcu_bh_get_gp_seq,
- .gp_diff = rcu_seq_diff,
- .deferred_free = rcu_bh_torture_deferred_free,
- .sync = synchronize_rcu_bh,
- .exp_sync = synchronize_rcu_bh_expedited,
- .call = call_rcu_bh,
- .cb_barrier = rcu_barrier_bh,
- .fqs = rcu_bh_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .extendables = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ),
- .ext_irq_conflict = RCUTORTURE_RDR_RCU,
- .name = "rcu_bh"
-};
-
-/*
* Don't even think about trying any of these in real life!!!
* The names includes "busted", and they really means it!
* The only purpose of these functions is to provide a buggy RCU
@@ -531,7 +525,8 @@ static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
return srcu_read_lock(srcu_ctlp);
}
-static void srcu_read_delay(struct torture_random_state *rrsp)
+static void
+srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
{
long delay;
const long uspertick = 1000000 / HZ;
@@ -541,10 +536,12 @@ static void srcu_read_delay(struct torture_random_state *rrsp)
delay = torture_random(rrsp) %
(nrealreaders * 2 * longdelay * uspertick);
- if (!delay && in_task())
+ if (!delay && in_task()) {
schedule_timeout_interruptible(longdelay);
- else
- rcu_read_delay(rrsp);
+ rtrsp->rt_delay_jiffies = longdelay;
+ } else {
+ rcu_read_delay(rrsp, rtrsp);
+ }
}
static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
@@ -663,48 +660,6 @@ static struct rcu_torture_ops busted_srcud_ops = {
};
/*
- * Definitions for sched torture testing.
- */
-
-static int sched_torture_read_lock(void)
-{
- preempt_disable();
- return 0;
-}
-
-static void sched_torture_read_unlock(int idx)
-{
- preempt_enable();
-}
-
-static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
-{
- call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
-}
-
-static struct rcu_torture_ops sched_ops = {
- .ttype = RCU_SCHED_FLAVOR,
- .init = rcu_sync_torture_init,
- .readlock = sched_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = sched_torture_read_unlock,
- .get_gp_seq = rcu_sched_get_gp_seq,
- .gp_diff = rcu_seq_diff,
- .deferred_free = rcu_sched_torture_deferred_free,
- .sync = synchronize_sched,
- .exp_sync = synchronize_sched_expedited,
- .get_state = get_state_synchronize_sched,
- .cond_sync = cond_synchronize_sched,
- .call = call_rcu_sched,
- .cb_barrier = rcu_barrier_sched,
- .fqs = rcu_sched_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .extendables = RCUTORTURE_MAX_EXTEND,
- .name = "sched"
-};
-
-/*
* Definitions for RCU-tasks torture testing.
*/
@@ -1116,7 +1071,8 @@ rcu_torture_writer(void *arg)
break;
}
}
- rcu_torture_current_version++;
+ WRITE_ONCE(rcu_torture_current_version,
+ rcu_torture_current_version + 1);
/* Cycle through nesting levels of rcu_expedite_gp() calls. */
if (can_expedite &&
!(torture_random(&rand) & 0xff & (!!expediting - 1))) {
@@ -1132,7 +1088,10 @@ rcu_torture_writer(void *arg)
!rcu_gp_is_normal();
}
rcu_torture_writer_state = RTWS_STUTTER;
- stutter_wait("rcu_torture_writer");
+ if (stutter_wait("rcu_torture_writer"))
+ for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
+ if (list_empty(&rcu_tortures[i].rtort_free))
+ WARN_ON_ONCE(1);
} while (!torture_must_stop());
/* Reset expediting back to unexpedited. */
if (expediting > 0)
@@ -1199,7 +1158,8 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp)
* change, do a ->read_delay().
*/
static void rcutorture_one_extend(int *readstate, int newstate,
- struct torture_random_state *trsp)
+ struct torture_random_state *trsp,
+ struct rt_read_seg *rtrsp)
{
int idxnew = -1;
int idxold = *readstate;
@@ -1208,6 +1168,7 @@ static void rcutorture_one_extend(int *readstate, int newstate,
WARN_ON_ONCE(idxold < 0);
WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
+ rtrsp->rt_readstate = newstate;
/* First, put new protection in place to avoid critical-section gap. */
if (statesnew & RCUTORTURE_RDR_BH)
@@ -1216,6 +1177,10 @@ static void rcutorture_one_extend(int *readstate, int newstate,
local_irq_disable();
if (statesnew & RCUTORTURE_RDR_PREEMPT)
preempt_disable();
+ if (statesnew & RCUTORTURE_RDR_RBH)
+ rcu_read_lock_bh();
+ if (statesnew & RCUTORTURE_RDR_SCHED)
+ rcu_read_lock_sched();
if (statesnew & RCUTORTURE_RDR_RCU)
idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
@@ -1226,12 +1191,16 @@ static void rcutorture_one_extend(int *readstate, int newstate,
local_bh_enable();
if (statesold & RCUTORTURE_RDR_PREEMPT)
preempt_enable();
+ if (statesold & RCUTORTURE_RDR_RBH)
+ rcu_read_unlock_bh();
+ if (statesold & RCUTORTURE_RDR_SCHED)
+ rcu_read_unlock_sched();
if (statesold & RCUTORTURE_RDR_RCU)
cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
/* Delay if neither beginning nor end and there was a change. */
if ((statesnew || statesold) && *readstate && newstate)
- cur_ops->read_delay(trsp);
+ cur_ops->read_delay(trsp, rtrsp);
/* Update the reader state. */
if (idxnew == -1)
@@ -1260,18 +1229,19 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
{
int mask = rcutorture_extend_mask_max();
unsigned long randmask1 = torture_random(trsp) >> 8;
- unsigned long randmask2 = randmask1 >> 1;
+ unsigned long randmask2 = randmask1 >> 3;
WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
- /* Half the time lots of bits, half the time only one bit. */
- if (randmask1 & 0x1)
+ /* Most of the time lots of bits, half the time only one bit. */
+ if (!(randmask1 & 0x7))
mask = mask & randmask2;
else
mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
+ /* Can't enable bh w/irq disabled. */
if ((mask & RCUTORTURE_RDR_IRQ) &&
- !(mask & RCUTORTURE_RDR_BH) &&
- (oldmask & RCUTORTURE_RDR_BH))
- mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */
+ ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
+ (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
+ mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
if ((mask & RCUTORTURE_RDR_IRQ) &&
!(mask & cur_ops->ext_irq_conflict) &&
(oldmask & cur_ops->ext_irq_conflict))
@@ -1283,20 +1253,25 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
* Do a randomly selected number of extensions of an existing RCU read-side
* critical section.
*/
-static void rcutorture_loop_extend(int *readstate,
- struct torture_random_state *trsp)
+static struct rt_read_seg *
+rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
+ struct rt_read_seg *rtrsp)
{
int i;
+ int j;
int mask = rcutorture_extend_mask_max();
WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
if (!((mask - 1) & mask))
- return; /* Current RCU flavor not extendable. */
- i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS;
- while (i--) {
+ return rtrsp; /* Current RCU reader not extendable. */
+ /* Bias towards larger numbers of loops. */
+ i = (torture_random(trsp) >> 3);
+ i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
+ for (j = 0; j < i; j++) {
mask = rcutorture_extend_mask(*readstate, trsp);
- rcutorture_one_extend(readstate, mask, trsp);
+ rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
}
+ return &rtrsp[j];
}
/*
@@ -1306,16 +1281,20 @@ static void rcutorture_loop_extend(int *readstate,
*/
static bool rcu_torture_one_read(struct torture_random_state *trsp)
{
+ int i;
unsigned long started;
unsigned long completed;
int newstate;
struct rcu_torture *p;
int pipe_count;
int readstate = 0;
+ struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
+ struct rt_read_seg *rtrsp = &rtseg[0];
+ struct rt_read_seg *rtrsp1;
unsigned long long ts;
newstate = rcutorture_extend_mask(readstate, trsp);
- rcutorture_one_extend(&readstate, newstate, trsp);
+ rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local();
p = rcu_dereference_check(rcu_torture_current,
@@ -1325,12 +1304,12 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp)
torturing_tasks());
if (p == NULL) {
/* Wait for rcu_torture_writer to get underway */
- rcutorture_one_extend(&readstate, 0, trsp);
+ rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
return false;
}
if (p->rtort_mbtest == 0)
atomic_inc(&n_rcu_torture_mberror);
- rcutorture_loop_extend(&readstate, trsp);
+ rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
preempt_disable();
pipe_count = p->rtort_pipe_count;
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
@@ -1351,8 +1330,17 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp)
}
__this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
- rcutorture_one_extend(&readstate, 0, trsp);
+ rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
+
+ /* If error or close call, record the sequence of reader protections. */
+ if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
+ i = 0;
+ for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
+ err_segs[i++] = *rtrsp1;
+ rt_read_nsegs = i;
+ }
+
return true;
}
@@ -1387,6 +1375,9 @@ static void rcu_torture_timer(struct timer_list *unused)
static int
rcu_torture_reader(void *arg)
{
+ unsigned long lastsleep = jiffies;
+ long myid = (long)arg;
+ int mynumonline = myid;
DEFINE_TORTURE_RANDOM(rand);
struct timer_list t;
@@ -1402,6 +1393,12 @@ rcu_torture_reader(void *arg)
}
if (!rcu_torture_one_read(&rand))
schedule_timeout_interruptible(HZ);
+ if (time_after(jiffies, lastsleep)) {
+ schedule_timeout_interruptible(1);
+ lastsleep = jiffies + 10;
+ }
+ while (num_online_cpus() < mynumonline && !torture_must_stop())
+ schedule_timeout_interruptible(HZ / 5);
stutter_wait("rcu_torture_reader");
} while (!torture_must_stop());
if (irqreader && cur_ops->irq_capable) {
@@ -1655,6 +1652,121 @@ static int __init rcu_torture_stall_init(void)
return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
}
+/* State structure for forward-progress self-propagating RCU callback. */
+struct fwd_cb_state {
+ struct rcu_head rh;
+ int stop;
+};
+
+/*
+ * Forward-progress self-propagating RCU callback function. Because
+ * callbacks run from softirq, this function is an implicit RCU read-side
+ * critical section.
+ */
+static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
+{
+ struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
+
+ if (READ_ONCE(fcsp->stop)) {
+ WRITE_ONCE(fcsp->stop, 2);
+ return;
+ }
+ cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
+}
+
+/* Carry out grace-period forward-progress testing. */
+static int rcu_torture_fwd_prog(void *args)
+{
+ unsigned long cver;
+ unsigned long dur;
+ struct fwd_cb_state fcs;
+ unsigned long gps;
+ int idx;
+ int sd;
+ int sd4;
+ bool selfpropcb = false;
+ unsigned long stopat;
+ int tested = 0;
+ int tested_tries = 0;
+ static DEFINE_TORTURE_RANDOM(trs);
+
+ VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
+ if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
+ set_user_nice(current, MAX_NICE);
+ if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
+ init_rcu_head_on_stack(&fcs.rh);
+ selfpropcb = true;
+ }
+ do {
+ schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
+ if (selfpropcb) {
+ WRITE_ONCE(fcs.stop, 0);
+ cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
+ }
+ cver = READ_ONCE(rcu_torture_current_version);
+ gps = cur_ops->get_gp_seq();
+ sd = cur_ops->stall_dur() + 1;
+ sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
+ dur = sd4 + torture_random(&trs) % (sd - sd4);
+ stopat = jiffies + dur;
+ while (time_before(jiffies, stopat) && !torture_must_stop()) {
+ idx = cur_ops->readlock();
+ udelay(10);
+ cur_ops->readunlock(idx);
+ if (!fwd_progress_need_resched || need_resched())
+ cond_resched();
+ }
+ tested_tries++;
+ if (!time_before(jiffies, stopat) && !torture_must_stop()) {
+ tested++;
+ cver = READ_ONCE(rcu_torture_current_version) - cver;
+ gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
+ WARN_ON(!cver && gps < 2);
+ pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
+ }
+ if (selfpropcb) {
+ WRITE_ONCE(fcs.stop, 1);
+ cur_ops->sync(); /* Wait for running CB to complete. */
+ cur_ops->cb_barrier(); /* Wait for queued callbacks. */
+ }
+ /* Avoid slow periods, better to test when busy. */
+ stutter_wait("rcu_torture_fwd_prog");
+ } while (!torture_must_stop());
+ if (selfpropcb) {
+ WARN_ON(READ_ONCE(fcs.stop) != 2);
+ destroy_rcu_head_on_stack(&fcs.rh);
+ }
+ /* Short runs might not contain a valid forward-progress attempt. */
+ WARN_ON(!tested && tested_tries >= 5);
+ pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
+ torture_kthread_stopping("rcu_torture_fwd_prog");
+ return 0;
+}
+
+/* If forward-progress checking is requested and feasible, spawn the thread. */
+static int __init rcu_torture_fwd_prog_init(void)
+{
+ if (!fwd_progress)
+ return 0; /* Not requested, so don't do it. */
+ if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0) {
+ VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
+ return 0;
+ }
+ if (stall_cpu > 0) {
+ VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
+ if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
+ return -EINVAL; /* In module, can fail back to user. */
+ WARN_ON(1); /* Make sure rcutorture notices conflict. */
+ return 0;
+ }
+ if (fwd_progress_holdoff <= 0)
+ fwd_progress_holdoff = 1;
+ if (fwd_progress_div <= 0)
+ fwd_progress_div = 4;
+ return torture_create_kthread(rcu_torture_fwd_prog,
+ NULL, fwd_prog_task);
+}
+
/* Callback function for RCU barrier testing. */
static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
{
@@ -1817,6 +1929,7 @@ static enum cpuhp_state rcutor_hp;
static void
rcu_torture_cleanup(void)
{
+ int firsttime;
int flags = 0;
unsigned long gp_seq = 0;
int i;
@@ -1828,6 +1941,7 @@ rcu_torture_cleanup(void)
}
rcu_torture_barrier_cleanup();
+ torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
torture_stop_kthread(rcu_torture_stall, stall_task);
torture_stop_kthread(rcu_torture_writer, writer_task);
@@ -1860,7 +1974,7 @@ rcu_torture_cleanup(void)
cpuhp_remove_state(rcutor_hp);
/*
- * Wait for all RCU callbacks to fire, then do flavor-specific
+ * Wait for all RCU callbacks to fire, then do torture-type-specific
* cleanup operations.
*/
if (cur_ops->cb_barrier != NULL)
@@ -1870,6 +1984,33 @@ rcu_torture_cleanup(void)
rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
+ if (err_segs_recorded) {
+ pr_alert("Failure/close-call rcutorture reader segments:\n");
+ if (rt_read_nsegs == 0)
+ pr_alert("\t: No segments recorded!!!\n");
+ firsttime = 1;
+ for (i = 0; i < rt_read_nsegs; i++) {
+ pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
+ if (err_segs[i].rt_delay_jiffies != 0) {
+ pr_cont("%s%ldjiffies", firsttime ? "" : "+",
+ err_segs[i].rt_delay_jiffies);
+ firsttime = 0;
+ }
+ if (err_segs[i].rt_delay_ms != 0) {
+ pr_cont("%s%ldms", firsttime ? "" : "+",
+ err_segs[i].rt_delay_ms);
+ firsttime = 0;
+ }
+ if (err_segs[i].rt_delay_us != 0) {
+ pr_cont("%s%ldus", firsttime ? "" : "+",
+ err_segs[i].rt_delay_us);
+ firsttime = 0;
+ }
+ pr_cont("%s\n",
+ err_segs[i].rt_preempted ? "preempted" : "");
+
+ }
+ }
if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
else if (torture_onoff_failures())
@@ -1939,12 +2080,12 @@ static void rcu_test_debug_objects(void)
static int __init
rcu_torture_init(void)
{
- int i;
+ long i;
int cpu;
int firsterr = 0;
static struct rcu_torture_ops *torture_ops[] = {
- &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
- &busted_srcud_ops, &sched_ops, &tasks_ops,
+ &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
+ &busted_srcud_ops, &tasks_ops,
};
if (!torture_init_begin(torture_type, verbose))
@@ -1963,6 +2104,7 @@ rcu_torture_init(void)
for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
pr_cont(" %s", torture_ops[i]->name);
pr_cont("\n");
+ WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
firsterr = -EINVAL;
goto unwind;
}
@@ -2013,6 +2155,8 @@ rcu_torture_init(void)
per_cpu(rcu_torture_batch, cpu)[i] = 0;
}
}
+ err_segs_recorded = 0;
+ rt_read_nsegs = 0;
/* Start up the kthreads. */
@@ -2044,7 +2188,7 @@ rcu_torture_init(void)
goto unwind;
}
for (i = 0; i < nrealreaders; i++) {
- firsterr = torture_create_kthread(rcu_torture_reader, NULL,
+ firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
reader_tasks[i]);
if (firsterr)
goto unwind;
@@ -2100,6 +2244,9 @@ rcu_torture_init(void)
firsterr = rcu_torture_stall_init();
if (firsterr)
goto unwind;
+ firsterr = rcu_torture_fwd_prog_init();
+ if (firsterr)
+ goto unwind;
firsterr = rcu_torture_barrier_init();
if (firsterr)
goto unwind;
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 04fc2ed71af8..b46e6683f8c9 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -34,6 +34,8 @@
#include "rcu.h"
int rcu_scheduler_active __read_mostly;
+static LIST_HEAD(srcu_boot_list);
+static bool srcu_init_done;
static int init_srcu_struct_fields(struct srcu_struct *sp)
{
@@ -46,6 +48,7 @@ static int init_srcu_struct_fields(struct srcu_struct *sp)
sp->srcu_gp_waiting = false;
sp->srcu_idx = 0;
INIT_WORK(&sp->srcu_work, srcu_drive_gp);
+ INIT_LIST_HEAD(&sp->srcu_work.entry);
return 0;
}
@@ -179,8 +182,12 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
*sp->srcu_cb_tail = rhp;
sp->srcu_cb_tail = &rhp->next;
local_irq_restore(flags);
- if (!READ_ONCE(sp->srcu_gp_running))
- schedule_work(&sp->srcu_work);
+ if (!READ_ONCE(sp->srcu_gp_running)) {
+ if (likely(srcu_init_done))
+ schedule_work(&sp->srcu_work);
+ else if (list_empty(&sp->srcu_work.entry))
+ list_add(&sp->srcu_work.entry, &srcu_boot_list);
+ }
}
EXPORT_SYMBOL_GPL(call_srcu);
@@ -204,3 +211,21 @@ void __init rcu_scheduler_starting(void)
{
rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
}
+
+/*
+ * Queue work for srcu_struct structures with early boot callbacks.
+ * The work won't actually execute until the workqueue initialization
+ * phase that takes place after the scheduler starts.
+ */
+void __init srcu_init(void)
+{
+ struct srcu_struct *sp;
+
+ srcu_init_done = true;
+ while (!list_empty(&srcu_boot_list)) {
+ sp = list_first_entry(&srcu_boot_list,
+ struct srcu_struct, srcu_work.entry);
+ list_del_init(&sp->srcu_work.entry);
+ schedule_work(&sp->srcu_work);
+ }
+}
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 6c9866a854b1..a8846ed7f352 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -51,6 +51,10 @@ module_param(exp_holdoff, ulong, 0444);
static ulong counter_wrap_check = (ULONG_MAX >> 2);
module_param(counter_wrap_check, ulong, 0444);
+/* Early-boot callback-management, so early that no lock is required! */
+static LIST_HEAD(srcu_boot_list);
+static bool __read_mostly srcu_init_done;
+
static void srcu_invoke_callbacks(struct work_struct *work);
static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
static void process_srcu(struct work_struct *work);
@@ -105,7 +109,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
rcu_init_levelspread(levelspread, num_rcu_lvl);
/* Each pass through this loop initializes one srcu_node structure. */
- rcu_for_each_node_breadth_first(sp, snp) {
+ srcu_for_each_node_breadth_first(sp, snp) {
spin_lock_init(&ACCESS_PRIVATE(snp, lock));
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
ARRAY_SIZE(snp->srcu_data_have_cbs));
@@ -235,7 +239,6 @@ static void check_init_srcu_struct(struct srcu_struct *sp)
{
unsigned long flags;
- WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
/* The smp_load_acquire() pairs with the smp_store_release(). */
if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
return; /* Already initialized. */
@@ -561,7 +564,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
/* Initiate callback invocation as needed. */
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
- rcu_for_each_node_breadth_first(sp, snp) {
+ srcu_for_each_node_breadth_first(sp, snp) {
spin_lock_irq_rcu_node(snp);
cbs = false;
last_lvl = snp >= sp->level[rcu_num_lvls - 1];
@@ -701,7 +704,11 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
srcu_gp_start(sp);
- queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp));
+ if (likely(srcu_init_done))
+ queue_delayed_work(rcu_gp_wq, &sp->work,
+ srcu_get_delay(sp));
+ else if (list_empty(&sp->work.work.entry))
+ list_add(&sp->work.work.entry, &srcu_boot_list);
}
spin_unlock_irqrestore_rcu_node(sp, flags);
}
@@ -980,7 +987,7 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
* There are memory-ordering constraints implied by synchronize_srcu().
* On systems with more than one CPU, when synchronize_srcu() returns,
* each CPU is guaranteed to have executed a full memory barrier since
- * the end of its last corresponding SRCU-sched read-side critical section
+ * the end of its last corresponding SRCU read-side critical section
* whose beginning preceded the call to synchronize_srcu(). In addition,
* each CPU having an SRCU read-side critical section that extends beyond
* the return from synchronize_srcu() is guaranteed to have executed a
@@ -1308,3 +1315,17 @@ static int __init srcu_bootup_announce(void)
return 0;
}
early_initcall(srcu_bootup_announce);
+
+void __init srcu_init(void)
+{
+ struct srcu_struct *sp;
+
+ srcu_init_done = true;
+ while (!list_empty(&srcu_boot_list)) {
+ sp = list_first_entry(&srcu_boot_list, struct srcu_struct,
+ work.work.entry);
+ check_init_srcu_struct(sp);
+ list_del_init(&sp->work.work.entry);
+ queue_work(rcu_gp_wq, &sp->work.work);
+ }
+}
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index befc9321a89c..5f5963ba313e 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -46,69 +46,27 @@ struct rcu_ctrlblk {
};
/* Definition for rcupdate control block. */
-static struct rcu_ctrlblk rcu_sched_ctrlblk = {
- .donetail = &rcu_sched_ctrlblk.rcucblist,
- .curtail = &rcu_sched_ctrlblk.rcucblist,
+static struct rcu_ctrlblk rcu_ctrlblk = {
+ .donetail = &rcu_ctrlblk.rcucblist,
+ .curtail = &rcu_ctrlblk.rcucblist,
};
-static struct rcu_ctrlblk rcu_bh_ctrlblk = {
- .donetail = &rcu_bh_ctrlblk.rcucblist,
- .curtail = &rcu_bh_ctrlblk.rcucblist,
-};
-
-void rcu_barrier_bh(void)
-{
- wait_rcu_gp(call_rcu_bh);
-}
-EXPORT_SYMBOL(rcu_barrier_bh);
-
-void rcu_barrier_sched(void)
-{
- wait_rcu_gp(call_rcu_sched);
-}
-EXPORT_SYMBOL(rcu_barrier_sched);
-
-/*
- * Helper function for rcu_sched_qs() and rcu_bh_qs().
- * Also irqs are disabled to avoid confusion due to interrupt handlers
- * invoking call_rcu().
- */
-static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
-{
- if (rcp->donetail != rcp->curtail) {
- rcp->donetail = rcp->curtail;
- return 1;
- }
-
- return 0;
-}
-
-/*
- * Record an rcu quiescent state. And an rcu_bh quiescent state while we
- * are at it, given that any rcu quiescent state is also an rcu_bh
- * quiescent state. Use "+" instead of "||" to defeat short circuiting.
- */
-void rcu_sched_qs(void)
+void rcu_barrier(void)
{
- unsigned long flags;
-
- local_irq_save(flags);
- if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
- rcu_qsctr_help(&rcu_bh_ctrlblk))
- raise_softirq(RCU_SOFTIRQ);
- local_irq_restore(flags);
+ wait_rcu_gp(call_rcu);
}
+EXPORT_SYMBOL(rcu_barrier);
-/*
- * Record an rcu_bh quiescent state.
- */
-void rcu_bh_qs(void)
+/* Record an rcu quiescent state. */
+void rcu_qs(void)
{
unsigned long flags;
local_irq_save(flags);
- if (rcu_qsctr_help(&rcu_bh_ctrlblk))
+ if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
+ rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
raise_softirq(RCU_SOFTIRQ);
+ }
local_irq_restore(flags);
}
@@ -120,34 +78,33 @@ void rcu_bh_qs(void)
*/
void rcu_check_callbacks(int user)
{
- if (user)
- rcu_sched_qs();
- if (user || !in_softirq())
- rcu_bh_qs();
+ if (user) {
+ rcu_qs();
+ } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+ }
}
-/*
- * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
- * whose grace period has elapsed.
- */
-static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
+/* Invoke the RCU callbacks whose grace period has elapsed. */
+static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
{
struct rcu_head *next, *list;
unsigned long flags;
/* Move the ready-to-invoke callbacks to a local list. */
local_irq_save(flags);
- if (rcp->donetail == &rcp->rcucblist) {
+ if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
/* No callbacks ready, so just leave. */
local_irq_restore(flags);
return;
}
- list = rcp->rcucblist;
- rcp->rcucblist = *rcp->donetail;
- *rcp->donetail = NULL;
- if (rcp->curtail == rcp->donetail)
- rcp->curtail = &rcp->rcucblist;
- rcp->donetail = &rcp->rcucblist;
+ list = rcu_ctrlblk.rcucblist;
+ rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
+ *rcu_ctrlblk.donetail = NULL;
+ if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
+ rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
+ rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
local_irq_restore(flags);
/* Invoke the callbacks on the local list. */
@@ -162,37 +119,31 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
}
}
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
-{
- __rcu_process_callbacks(&rcu_sched_ctrlblk);
- __rcu_process_callbacks(&rcu_bh_ctrlblk);
-}
-
/*
* Wait for a grace period to elapse. But it is illegal to invoke
- * synchronize_sched() from within an RCU read-side critical section.
- * Therefore, any legal call to synchronize_sched() is a quiescent
- * state, and so on a UP system, synchronize_sched() need do nothing.
- * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
- * benefits of doing might_sleep() to reduce latency.)
+ * synchronize_rcu() from within an RCU read-side critical section.
+ * Therefore, any legal call to synchronize_rcu() is a quiescent
+ * state, and so on a UP system, synchronize_rcu() need do nothing.
+ * (But Lai Jiangshan points out the benefits of doing might_sleep()
+ * to reduce latency.)
*
* Cool, huh? (Due to Josh Triplett.)
*/
-void synchronize_sched(void)
+void synchronize_rcu(void)
{
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_sched() in RCU read-side critical section");
+ "Illegal synchronize_rcu() in RCU read-side critical section");
}
-EXPORT_SYMBOL_GPL(synchronize_sched);
+EXPORT_SYMBOL_GPL(synchronize_rcu);
/*
- * Helper function for call_rcu() and call_rcu_bh().
+ * Post an RCU callback to be invoked after the end of an RCU grace
+ * period. But since we have but one CPU, that would be after any
+ * quiescent state.
*/
-static void __call_rcu(struct rcu_head *head,
- rcu_callback_t func,
- struct rcu_ctrlblk *rcp)
+void call_rcu(struct rcu_head *head, rcu_callback_t func)
{
unsigned long flags;
@@ -201,39 +152,20 @@ static void __call_rcu(struct rcu_head *head,
head->next = NULL;
local_irq_save(flags);
- *rcp->curtail = head;
- rcp->curtail = &head->next;
+ *rcu_ctrlblk.curtail = head;
+ rcu_ctrlblk.curtail = &head->next;
local_irq_restore(flags);
if (unlikely(is_idle_task(current))) {
- /* force scheduling for rcu_sched_qs() */
+ /* force scheduling for rcu_qs() */
resched_cpu(0);
}
}
-
-/*
- * Post an RCU callback to be invoked after the end of an RCU-sched grace
- * period. But since we have but one CPU, that would be after any
- * quiescent state.
- */
-void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
-{
- __call_rcu(head, func, &rcu_sched_ctrlblk);
-}
-EXPORT_SYMBOL_GPL(call_rcu_sched);
-
-/*
- * Post an RCU bottom-half callback to be invoked after any subsequent
- * quiescent state.
- */
-void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
-{
- __call_rcu(head, func, &rcu_bh_ctrlblk);
-}
-EXPORT_SYMBOL_GPL(call_rcu_bh);
+EXPORT_SYMBOL_GPL(call_rcu);
void __init rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
rcu_early_boot_tests();
+ srcu_init();
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 0b760c1369f7..121f833acd04 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -61,6 +61,7 @@
#include <linux/trace_events.h>
#include <linux/suspend.h>
#include <linux/ftrace.h>
+#include <linux/tick.h>
#include "tree.h"
#include "rcu.h"
@@ -73,45 +74,31 @@
/* Data structures. */
/*
- * In order to export the rcu_state name to the tracing tools, it
- * needs to be added in the __tracepoint_string section.
- * This requires defining a separate variable tp_<sname>_varname
- * that points to the string being used, and this will allow
- * the tracing userspace tools to be able to decipher the string
- * address to the matching string.
+ * Steal a bit from the bottom of ->dynticks for idle entry/exit
+ * control. Initially this is for TLB flushing.
*/
-#ifdef CONFIG_TRACING
-# define DEFINE_RCU_TPS(sname) \
-static char sname##_varname[] = #sname; \
-static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
-# define RCU_STATE_NAME(sname) sname##_varname
-#else
-# define DEFINE_RCU_TPS(sname)
-# define RCU_STATE_NAME(sname) __stringify(sname)
+#define RCU_DYNTICK_CTRL_MASK 0x1
+#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
+#ifndef rcu_eqs_special_exit
+#define rcu_eqs_special_exit() do { } while (0)
#endif
-#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
-DEFINE_RCU_TPS(sname) \
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
-struct rcu_state sname##_state = { \
- .level = { &sname##_state.node[0] }, \
- .rda = &sname##_data, \
- .call = cr, \
- .gp_state = RCU_GP_IDLE, \
- .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, \
- .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
- .name = RCU_STATE_NAME(sname), \
- .abbr = sabbr, \
- .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
- .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
- .ofl_lock = __SPIN_LOCK_UNLOCKED(sname##_state.ofl_lock), \
-}
-
-RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
-RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
-
-static struct rcu_state *const rcu_state_p;
-LIST_HEAD(rcu_struct_flavors);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
+ .dynticks_nesting = 1,
+ .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
+ .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
+};
+struct rcu_state rcu_state = {
+ .level = { &rcu_state.node[0] },
+ .gp_state = RCU_GP_IDLE,
+ .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
+ .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
+ .name = RCU_NAME,
+ .abbr = RCU_ABBR,
+ .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
+ .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
+ .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
+};
/* Dump rcu_node combining tree at boot to verify correct setup. */
static bool dump_tree;
@@ -158,16 +145,14 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
*/
static int rcu_scheduler_fully_active __read_mostly;
-static void
-rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
- struct rcu_node *rnp, unsigned long gps, unsigned long flags);
+static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
+ unsigned long gps, unsigned long flags);
static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
static void invoke_rcu_core(void);
-static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
-static void rcu_report_exp_rdp(struct rcu_state *rsp,
- struct rcu_data *rdp, bool wake);
+static void invoke_rcu_callbacks(struct rcu_data *rdp);
+static void rcu_report_exp_rdp(struct rcu_data *rdp);
static void sync_sched_exp_online_cleanup(int cpu);
/* rcuc/rcub kthread realtime priority */
@@ -183,7 +168,7 @@ module_param(gp_init_delay, int, 0444);
static int gp_cleanup_delay;
module_param(gp_cleanup_delay, int, 0444);
-/* Retreive RCU kthreads priority for rcutorture */
+/* Retrieve RCU kthreads priority for rcutorture */
int rcu_get_gp_kthreads_prio(void)
{
return kthread_prio;
@@ -217,67 +202,24 @@ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
* permit this function to be invoked without holding the root rcu_node
* structure's ->lock, but of course results can be subject to change.
*/
-static int rcu_gp_in_progress(struct rcu_state *rsp)
+static int rcu_gp_in_progress(void)
{
- return rcu_seq_state(rcu_seq_current(&rsp->gp_seq));
-}
-
-/*
- * Note a quiescent state. Because we do not need to know
- * how many quiescent states passed, just if there was at least
- * one since the start of the grace period, this just sets a flag.
- * The caller must have disabled preemption.
- */
-void rcu_sched_qs(void)
-{
- RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!");
- if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
- return;
- trace_rcu_grace_period(TPS("rcu_sched"),
- __this_cpu_read(rcu_sched_data.gp_seq),
- TPS("cpuqs"));
- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
- if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
- return;
- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
- rcu_report_exp_rdp(&rcu_sched_state,
- this_cpu_ptr(&rcu_sched_data), true);
+ return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
}
-void rcu_bh_qs(void)
+void rcu_softirq_qs(void)
{
- RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
- if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
- trace_rcu_grace_period(TPS("rcu_bh"),
- __this_cpu_read(rcu_bh_data.gp_seq),
- TPS("cpuqs"));
- __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
- }
+ rcu_qs();
+ rcu_preempt_deferred_qs(current);
}
/*
- * Steal a bit from the bottom of ->dynticks for idle entry/exit
- * control. Initially this is for TLB flushing.
- */
-#define RCU_DYNTICK_CTRL_MASK 0x1
-#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
-#ifndef rcu_eqs_special_exit
-#define rcu_eqs_special_exit() do { } while (0)
-#endif
-
-static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
- .dynticks_nesting = 1,
- .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
- .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
-};
-
-/*
* Record entry into an extended quiescent state. This is only to be
* called when not already in an extended quiescent state.
*/
static void rcu_dynticks_eqs_enter(void)
{
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
int seq;
/*
@@ -285,7 +227,7 @@ static void rcu_dynticks_eqs_enter(void)
* critical sections, and we also must force ordering with the
* next idle sojourn.
*/
- seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
+ seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
/* Better be in an extended quiescent state! */
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
(seq & RCU_DYNTICK_CTRL_CTR));
@@ -300,7 +242,7 @@ static void rcu_dynticks_eqs_enter(void)
*/
static void rcu_dynticks_eqs_exit(void)
{
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
int seq;
/*
@@ -308,11 +250,11 @@ static void rcu_dynticks_eqs_exit(void)
* and we also must force ordering with the next RCU read-side
* critical section.
*/
- seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
+ seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!(seq & RCU_DYNTICK_CTRL_CTR));
if (seq & RCU_DYNTICK_CTRL_MASK) {
- atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
+ atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
smp_mb__after_atomic(); /* _exit after clearing mask. */
/* Prefer duplicate flushes to losing a flush. */
rcu_eqs_special_exit();
@@ -331,11 +273,11 @@ static void rcu_dynticks_eqs_exit(void)
*/
static void rcu_dynticks_eqs_online(void)
{
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
- if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
+ if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
return;
- atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
+ atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
}
/*
@@ -345,18 +287,18 @@ static void rcu_dynticks_eqs_online(void)
*/
bool rcu_dynticks_curr_cpu_in_eqs(void)
{
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
- return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
+ return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
}
/*
* Snapshot the ->dynticks counter with full ordering so as to allow
* stable comparison of this counter with past and future snapshots.
*/
-int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
+int rcu_dynticks_snap(struct rcu_data *rdp)
{
- int snap = atomic_add_return(0, &rdtp->dynticks);
+ int snap = atomic_add_return(0, &rdp->dynticks);
return snap & ~RCU_DYNTICK_CTRL_MASK;
}
@@ -371,13 +313,13 @@ static bool rcu_dynticks_in_eqs(int snap)
}
/*
- * Return true if the CPU corresponding to the specified rcu_dynticks
+ * Return true if the CPU corresponding to the specified rcu_data
* structure has spent some time in an extended quiescent state since
* rcu_dynticks_snap() returned the specified snapshot.
*/
-static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
+static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
{
- return snap != rcu_dynticks_snap(rdtp);
+ return snap != rcu_dynticks_snap(rdp);
}
/*
@@ -391,14 +333,14 @@ bool rcu_eqs_special_set(int cpu)
{
int old;
int new;
- struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
do {
- old = atomic_read(&rdtp->dynticks);
+ old = atomic_read(&rdp->dynticks);
if (old & RCU_DYNTICK_CTRL_CTR)
return false;
new = old | RCU_DYNTICK_CTRL_MASK;
- } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
+ } while (atomic_cmpxchg(&rdp->dynticks, old, new) != old);
return true;
}
@@ -413,82 +355,30 @@ bool rcu_eqs_special_set(int cpu)
*
* The caller must have disabled interrupts and must not be idle.
*/
-static void rcu_momentary_dyntick_idle(void)
+static void __maybe_unused rcu_momentary_dyntick_idle(void)
{
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
int special;
- raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
- special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
+ raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
+ special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
+ &this_cpu_ptr(&rcu_data)->dynticks);
/* It is illegal to call this from idle state. */
WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
+ rcu_preempt_deferred_qs(current);
}
-/*
- * Note a context switch. This is a quiescent state for RCU-sched,
- * and requires special handling for preemptible RCU.
- * The caller must have disabled interrupts.
- */
-void rcu_note_context_switch(bool preempt)
-{
- barrier(); /* Avoid RCU read-side critical sections leaking down. */
- trace_rcu_utilization(TPS("Start context switch"));
- rcu_sched_qs();
- rcu_preempt_note_context_switch(preempt);
- /* Load rcu_urgent_qs before other flags. */
- if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
- goto out;
- this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
- if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
- rcu_momentary_dyntick_idle();
- this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
- if (!preempt)
- rcu_tasks_qs(current);
-out:
- trace_rcu_utilization(TPS("End context switch"));
- barrier(); /* Avoid RCU read-side critical sections leaking up. */
-}
-EXPORT_SYMBOL_GPL(rcu_note_context_switch);
-
-/*
- * Register a quiescent state for all RCU flavors. If there is an
- * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
- * dyntick-idle quiescent state visible to other CPUs (but only for those
- * RCU flavors in desperate need of a quiescent state, which will normally
- * be none of them). Either way, do a lightweight quiescent state for
- * all RCU flavors.
- *
- * The barrier() calls are redundant in the common case when this is
- * called externally, but just in case this is called from within this
- * file.
+/**
+ * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
*
+ * If the current CPU is idle or running at a first-level (not nested)
+ * interrupt from idle, return true. The caller must have at least
+ * disabled preemption.
*/
-void rcu_all_qs(void)
+static int rcu_is_cpu_rrupt_from_idle(void)
{
- unsigned long flags;
-
- if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
- return;
- preempt_disable();
- /* Load rcu_urgent_qs before other flags. */
- if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
- preempt_enable();
- return;
- }
- this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
- barrier(); /* Avoid RCU read-side critical sections leaking down. */
- if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
- local_irq_save(flags);
- rcu_momentary_dyntick_idle();
- local_irq_restore(flags);
- }
- if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)))
- rcu_sched_qs();
- this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
- barrier(); /* Avoid RCU read-side critical sections leaking up. */
- preempt_enable();
+ return __this_cpu_read(rcu_data.dynticks_nesting) <= 0 &&
+ __this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 1;
}
-EXPORT_SYMBOL_GPL(rcu_all_qs);
#define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch. */
static long blimit = DEFAULT_RCU_BLIMIT;
@@ -505,13 +395,47 @@ static ulong jiffies_till_first_fqs = ULONG_MAX;
static ulong jiffies_till_next_fqs = ULONG_MAX;
static bool rcu_kick_kthreads;
+/*
+ * How long the grace period must be before we start recruiting
+ * quiescent-state help from rcu_note_context_switch().
+ */
+static ulong jiffies_till_sched_qs = ULONG_MAX;
+module_param(jiffies_till_sched_qs, ulong, 0444);
+static ulong jiffies_to_sched_qs; /* Adjusted version of above if not default */
+module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
+
+/*
+ * Make sure that we give the grace-period kthread time to detect any
+ * idle CPUs before taking active measures to force quiescent states.
+ * However, don't go below 100 milliseconds, adjusted upwards for really
+ * large systems.
+ */
+static void adjust_jiffies_till_sched_qs(void)
+{
+ unsigned long j;
+
+ /* If jiffies_till_sched_qs was specified, respect the request. */
+ if (jiffies_till_sched_qs != ULONG_MAX) {
+ WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
+ return;
+ }
+ j = READ_ONCE(jiffies_till_first_fqs) +
+ 2 * READ_ONCE(jiffies_till_next_fqs);
+ if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
+ j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
+ pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
+ WRITE_ONCE(jiffies_to_sched_qs, j);
+}
+
static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
{
ulong j;
int ret = kstrtoul(val, 0, &j);
- if (!ret)
+ if (!ret) {
WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
+ adjust_jiffies_till_sched_qs();
+ }
return ret;
}
@@ -520,8 +444,10 @@ static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param
ulong j;
int ret = kstrtoul(val, 0, &j);
- if (!ret)
+ if (!ret) {
WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
+ adjust_jiffies_till_sched_qs();
+ }
return ret;
}
@@ -539,15 +465,8 @@ module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_fi
module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
module_param(rcu_kick_kthreads, bool, 0644);
-/*
- * How long the grace period must be before we start recruiting
- * quiescent-state help from rcu_note_context_switch().
- */
-static ulong jiffies_till_sched_qs = HZ / 10;
-module_param(jiffies_till_sched_qs, ulong, 0444);
-
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp));
-static void force_quiescent_state(struct rcu_state *rsp);
+static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
+static void force_quiescent_state(void);
static int rcu_pending(void);
/*
@@ -555,29 +474,11 @@ static int rcu_pending(void);
*/
unsigned long rcu_get_gp_seq(void)
{
- return READ_ONCE(rcu_state_p->gp_seq);
+ return READ_ONCE(rcu_state.gp_seq);
}
EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
/*
- * Return the number of RCU-sched GPs completed thus far for debug & stats.
- */
-unsigned long rcu_sched_get_gp_seq(void)
-{
- return READ_ONCE(rcu_sched_state.gp_seq);
-}
-EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
-
-/*
- * Return the number of RCU-bh GPs completed thus far for debug & stats.
- */
-unsigned long rcu_bh_get_gp_seq(void)
-{
- return READ_ONCE(rcu_bh_state.gp_seq);
-}
-EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
-
-/*
* Return the number of RCU expedited batches completed thus far for
* debug & stats. Odd numbers mean that a batch is in progress, even
* numbers mean idle. The value returned will thus be roughly double
@@ -585,48 +486,20 @@ EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
*/
unsigned long rcu_exp_batches_completed(void)
{
- return rcu_state_p->expedited_sequence;
+ return rcu_state.expedited_sequence;
}
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
/*
- * Return the number of RCU-sched expedited batches completed thus far
- * for debug & stats. Similar to rcu_exp_batches_completed().
- */
-unsigned long rcu_exp_batches_completed_sched(void)
-{
- return rcu_sched_state.expedited_sequence;
-}
-EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
-
-/*
* Force a quiescent state.
*/
void rcu_force_quiescent_state(void)
{
- force_quiescent_state(rcu_state_p);
+ force_quiescent_state();
}
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
/*
- * Force a quiescent state for RCU BH.
- */
-void rcu_bh_force_quiescent_state(void)
-{
- force_quiescent_state(&rcu_bh_state);
-}
-EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
-
-/*
- * Force a quiescent state for RCU-sched.
- */
-void rcu_sched_force_quiescent_state(void)
-{
- force_quiescent_state(&rcu_sched_state);
-}
-EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
-
-/*
* Show the state of the grace-period kthreads.
*/
void show_rcu_gp_kthreads(void)
@@ -634,31 +507,28 @@ void show_rcu_gp_kthreads(void)
int cpu;
struct rcu_data *rdp;
struct rcu_node *rnp;
- struct rcu_state *rsp;
- for_each_rcu_flavor(rsp) {
- pr_info("%s: wait state: %d ->state: %#lx\n",
- rsp->name, rsp->gp_state, rsp->gp_kthread->state);
- rcu_for_each_node_breadth_first(rsp, rnp) {
- if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
- continue;
- pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
- rnp->grplo, rnp->grphi, rnp->gp_seq,
- rnp->gp_seq_needed);
- if (!rcu_is_leaf_node(rnp))
+ pr_info("%s: wait state: %d ->state: %#lx\n", rcu_state.name,
+ rcu_state.gp_state, rcu_state.gp_kthread->state);
+ rcu_for_each_node_breadth_first(rnp) {
+ if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
+ continue;
+ pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
+ rnp->grplo, rnp->grphi, rnp->gp_seq,
+ rnp->gp_seq_needed);
+ if (!rcu_is_leaf_node(rnp))
+ continue;
+ for_each_leaf_node_possible_cpu(rnp, cpu) {
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+ if (rdp->gpwrap ||
+ ULONG_CMP_GE(rcu_state.gp_seq,
+ rdp->gp_seq_needed))
continue;
- for_each_leaf_node_possible_cpu(rnp, cpu) {
- rdp = per_cpu_ptr(rsp->rda, cpu);
- if (rdp->gpwrap ||
- ULONG_CMP_GE(rsp->gp_seq,
- rdp->gp_seq_needed))
- continue;
- pr_info("\tcpu %d ->gp_seq_needed %lu\n",
- cpu, rdp->gp_seq_needed);
- }
+ pr_info("\tcpu %d ->gp_seq_needed %lu\n",
+ cpu, rdp->gp_seq_needed);
}
- /* sched_show_task(rsp->gp_kthread); */
}
+ /* sched_show_task(rcu_state.gp_kthread); */
}
EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
@@ -668,34 +538,25 @@ EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
unsigned long *gp_seq)
{
- struct rcu_state *rsp = NULL;
-
switch (test_type) {
case RCU_FLAVOR:
- rsp = rcu_state_p;
- break;
case RCU_BH_FLAVOR:
- rsp = &rcu_bh_state;
- break;
case RCU_SCHED_FLAVOR:
- rsp = &rcu_sched_state;
+ *flags = READ_ONCE(rcu_state.gp_flags);
+ *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
break;
default:
break;
}
- if (rsp == NULL)
- return;
- *flags = READ_ONCE(rsp->gp_flags);
- *gp_seq = rcu_seq_current(&rsp->gp_seq);
}
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
/*
- * Return the root node of the specified rcu_state structure.
+ * Return the root node of the rcu_state structure.
*/
-static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
+static struct rcu_node *rcu_get_root(void)
{
- return &rsp->node[0];
+ return &rcu_state.node[0];
}
/*
@@ -708,28 +569,25 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
*/
static void rcu_eqs_enter(bool user)
{
- struct rcu_state *rsp;
- struct rcu_data *rdp;
- struct rcu_dynticks *rdtp;
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
- rdtp = this_cpu_ptr(&rcu_dynticks);
- WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0);
+ WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
+ WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
- rdtp->dynticks_nesting == 0);
- if (rdtp->dynticks_nesting != 1) {
- rdtp->dynticks_nesting--;
+ rdp->dynticks_nesting == 0);
+ if (rdp->dynticks_nesting != 1) {
+ rdp->dynticks_nesting--;
return;
}
lockdep_assert_irqs_disabled();
- trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
+ trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, rdp->dynticks);
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
- for_each_rcu_flavor(rsp) {
- rdp = this_cpu_ptr(rsp->rda);
- do_nocb_deferred_wakeup(rdp);
- }
+ rdp = this_cpu_ptr(&rcu_data);
+ do_nocb_deferred_wakeup(rdp);
rcu_prepare_for_idle();
- WRITE_ONCE(rdtp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
+ rcu_preempt_deferred_qs(current);
+ WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
rcu_dynticks_eqs_enter();
rcu_dynticks_task_enter();
}
@@ -770,44 +628,61 @@ void rcu_user_enter(void)
}
#endif /* CONFIG_NO_HZ_FULL */
-/**
- * rcu_nmi_exit - inform RCU of exit from NMI context
- *
+/*
* If we are returning from the outermost NMI handler that interrupted an
- * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
+ * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
* to let the RCU grace-period handling know that the CPU is back to
* being RCU-idle.
*
- * If you add or remove a call to rcu_nmi_exit(), be sure to test
+ * If you add or remove a call to rcu_nmi_exit_common(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/
-void rcu_nmi_exit(void)
+static __always_inline void rcu_nmi_exit_common(bool irq)
{
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
/*
* Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
* (We are exiting an NMI handler, so RCU better be paying attention
* to us!)
*/
- WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
+ WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
/*
* If the nesting level is not 1, the CPU wasn't RCU-idle, so
* leave it in non-RCU-idle state.
*/
- if (rdtp->dynticks_nmi_nesting != 1) {
- trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks);
- WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */
- rdtp->dynticks_nmi_nesting - 2);
+ if (rdp->dynticks_nmi_nesting != 1) {
+ trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
+ WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
+ rdp->dynticks_nmi_nesting - 2);
return;
}
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
- trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks);
- WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
+ trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, rdp->dynticks);
+ WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
+
+ if (irq)
+ rcu_prepare_for_idle();
+
rcu_dynticks_eqs_enter();
+
+ if (irq)
+ rcu_dynticks_task_enter();
+}
+
+/**
+ * rcu_nmi_exit - inform RCU of exit from NMI context
+ * @irq: Is this call from rcu_irq_exit?
+ *
+ * If you add or remove a call to rcu_nmi_exit(), be sure to test
+ * with CONFIG_RCU_EQS_DEBUG=y.
+ */
+void rcu_nmi_exit(void)
+{
+ rcu_nmi_exit_common(false);
}
/**
@@ -831,14 +706,8 @@ void rcu_nmi_exit(void)
*/
void rcu_irq_exit(void)
{
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
-
lockdep_assert_irqs_disabled();
- if (rdtp->dynticks_nmi_nesting == 1)
- rcu_prepare_for_idle();
- rcu_nmi_exit();
- if (rdtp->dynticks_nmi_nesting == 0)
- rcu_dynticks_task_enter();
+ rcu_nmi_exit_common(true);
}
/*
@@ -866,24 +735,25 @@ void rcu_irq_exit_irqson(void)
*/
static void rcu_eqs_exit(bool user)
{
- struct rcu_dynticks *rdtp;
+ struct rcu_data *rdp;
long oldval;
lockdep_assert_irqs_disabled();
- rdtp = this_cpu_ptr(&rcu_dynticks);
- oldval = rdtp->dynticks_nesting;
+ rdp = this_cpu_ptr(&rcu_data);
+ oldval = rdp->dynticks_nesting;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
if (oldval) {
- rdtp->dynticks_nesting++;
+ rdp->dynticks_nesting++;
return;
}
rcu_dynticks_task_exit();
rcu_dynticks_eqs_exit();
rcu_cleanup_after_idle();
- trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, 1, rdtp->dynticks);
+ trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, rdp->dynticks);
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
- WRITE_ONCE(rdtp->dynticks_nesting, 1);
- WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
+ WRITE_ONCE(rdp->dynticks_nesting, 1);
+ WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
+ WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
}
/**
@@ -921,24 +791,25 @@ void rcu_user_exit(void)
#endif /* CONFIG_NO_HZ_FULL */
/**
- * rcu_nmi_enter - inform RCU of entry to NMI context
+ * rcu_nmi_enter_common - inform RCU of entry to NMI context
+ * @irq: Is this call from rcu_irq_enter?
*
- * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
- * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
+ * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
+ * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
* that the CPU is active. This implementation permits nested NMIs, as
* long as the nesting level does not overflow an int. (You will probably
* run out of stack space first.)
*
- * If you add or remove a call to rcu_nmi_enter(), be sure to test
+ * If you add or remove a call to rcu_nmi_enter_common(), be sure to test
* with CONFIG_RCU_EQS_DEBUG=y.
*/
-void rcu_nmi_enter(void)
+static __always_inline void rcu_nmi_enter_common(bool irq)
{
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
long incby = 2;
/* Complain about underflow. */
- WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
+ WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
/*
* If idle from RCU viewpoint, atomically increment ->dynticks
@@ -949,18 +820,34 @@ void rcu_nmi_enter(void)
* period (observation due to Andy Lutomirski).
*/
if (rcu_dynticks_curr_cpu_in_eqs()) {
+
+ if (irq)
+ rcu_dynticks_task_exit();
+
rcu_dynticks_eqs_exit();
+
+ if (irq)
+ rcu_cleanup_after_idle();
+
incby = 1;
}
trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
- rdtp->dynticks_nmi_nesting,
- rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks);
- WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */
- rdtp->dynticks_nmi_nesting + incby);
+ rdp->dynticks_nmi_nesting,
+ rdp->dynticks_nmi_nesting + incby, rdp->dynticks);
+ WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
+ rdp->dynticks_nmi_nesting + incby);
barrier();
}
/**
+ * rcu_nmi_enter - inform RCU of entry to NMI context
+ */
+void rcu_nmi_enter(void)
+{
+ rcu_nmi_enter_common(false);
+}
+
+/**
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
*
* Enter an interrupt handler, which might possibly result in exiting
@@ -984,14 +871,8 @@ void rcu_nmi_enter(void)
*/
void rcu_irq_enter(void)
{
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
-
lockdep_assert_irqs_disabled();
- if (rdtp->dynticks_nmi_nesting == 0)
- rcu_dynticks_task_exit();
- rcu_nmi_enter();
- if (rdtp->dynticks_nmi_nesting == 1)
- rcu_cleanup_after_idle();
+ rcu_nmi_enter_common(true);
}
/*
@@ -1043,7 +924,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
cpu = task_cpu(t);
if (!task_curr(t))
return; /* This task is not running on that CPU. */
- smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
+ smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
}
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
@@ -1054,11 +935,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
* Disable preemption to avoid false positives that could otherwise
* happen due to the current CPU number being sampled, this task being
* preempted, its old CPU being taken offline, resuming on some other CPU,
- * then determining that its old CPU is now offline. Because there are
- * multiple flavors of RCU, and because this function can be called in the
- * midst of updating the flavors while a given CPU coming online or going
- * offline, it is necessary to check all flavors. If any of the flavors
- * believe that given CPU is online, it is considered to be online.
+ * then determining that its old CPU is now offline.
*
* Disable checking if in an NMI handler because we cannot safely
* report errors from NMI handlers anyway. In addition, it is OK to use
@@ -1069,39 +946,22 @@ bool rcu_lockdep_current_cpu_online(void)
{
struct rcu_data *rdp;
struct rcu_node *rnp;
- struct rcu_state *rsp;
+ bool ret = false;
if (in_nmi() || !rcu_scheduler_fully_active)
return true;
preempt_disable();
- for_each_rcu_flavor(rsp) {
- rdp = this_cpu_ptr(rsp->rda);
- rnp = rdp->mynode;
- if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) {
- preempt_enable();
- return true;
- }
- }
+ rdp = this_cpu_ptr(&rcu_data);
+ rnp = rdp->mynode;
+ if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
+ ret = true;
preempt_enable();
- return false;
+ return ret;
}
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
-/**
- * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
- *
- * If the current CPU is idle or running at a first-level (not nested)
- * interrupt from idle, return true. The caller must have at least
- * disabled preemption.
- */
-static int rcu_is_cpu_rrupt_from_idle(void)
-{
- return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 &&
- __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1;
-}
-
/*
* We are reporting a quiescent state on behalf of some other CPU, so
* it is our responsibility to check for and handle potential overflow
@@ -1126,9 +986,9 @@ static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
*/
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
- rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
+ rdp->dynticks_snap = rcu_dynticks_snap(rdp);
if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
+ trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rcu_gpnum_ovf(rdp->mynode, rdp);
return 1;
}
@@ -1177,35 +1037,15 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* read-side critical section that started before the beginning
* of the current RCU grace period.
*/
- if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
- rdp->dynticks_fqs++;
- rcu_gpnum_ovf(rnp, rdp);
- return 1;
- }
-
- /*
- * Has this CPU encountered a cond_resched() since the beginning
- * of the grace period? For this to be the case, the CPU has to
- * have noticed the current grace period. This might not be the
- * case for nohz_full CPUs looping in the kernel.
- */
- jtsq = jiffies_till_sched_qs;
- ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
- if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
- READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
- rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
+ if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
+ trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rcu_gpnum_ovf(rnp, rdp);
return 1;
- } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
- /* Load rcu_qs_ctr before store to rcu_urgent_qs. */
- smp_store_release(ruqp, true);
}
/* If waiting too long on an offline CPU, complain. */
if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
- time_after(jiffies, rdp->rsp->gp_start + HZ)) {
+ time_after(jiffies, rcu_state.gp_start + HZ)) {
bool onl;
struct rcu_node *rnp1;
@@ -1226,39 +1066,56 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
/*
* A CPU running for an extended time within the kernel can
- * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
- * even context-switching back and forth between a pair of
- * in-kernel CPU-bound tasks cannot advance grace periods.
- * So if the grace period is old enough, make the CPU pay attention.
- * Note that the unsynchronized assignments to the per-CPU
- * rcu_need_heavy_qs variable are safe. Yes, setting of
- * bits can be lost, but they will be set again on the next
- * force-quiescent-state pass. So lost bit sets do not result
- * in incorrect behavior, merely in a grace period lasting
- * a few jiffies longer than it might otherwise. Because
- * there are at most four threads involved, and because the
- * updates are only once every few jiffies, the probability of
- * lossage (and thus of slight grace-period extension) is
- * quite low.
+ * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
+ * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
+ * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
+ * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
+ * variable are safe because the assignments are repeated if this
+ * CPU failed to pass through a quiescent state. This code
+ * also checks .jiffies_resched in case jiffies_to_sched_qs
+ * is set way high.
*/
- rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
+ jtsq = READ_ONCE(jiffies_to_sched_qs);
+ ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
+ rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
if (!READ_ONCE(*rnhqp) &&
- (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
- time_after(jiffies, rdp->rsp->jiffies_resched))) {
+ (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
+ time_after(jiffies, rcu_state.jiffies_resched))) {
WRITE_ONCE(*rnhqp, true);
/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
smp_store_release(ruqp, true);
- rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */
+ } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
+ WRITE_ONCE(*ruqp, true);
}
/*
- * If more than halfway to RCU CPU stall-warning time, do a
- * resched_cpu() to try to loosen things up a bit. Also check to
- * see if the CPU is getting hammered with interrupts, but only
- * once per grace period, just to keep the IPIs down to a dull roar.
+ * NO_HZ_FULL CPUs can run in-kernel without rcu_check_callbacks!
+ * The above code handles this, but only for straight cond_resched().
+ * And some in-kernel loops check need_resched() before calling
+ * cond_resched(), which defeats the above code for CPUs that are
+ * running in-kernel with scheduling-clock interrupts disabled.
+ * So hit them over the head with the resched_cpu() hammer!
*/
- if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
+ if (tick_nohz_full_cpu(rdp->cpu) &&
+ time_after(jiffies,
+ READ_ONCE(rdp->last_fqs_resched) + jtsq * 3)) {
resched_cpu(rdp->cpu);
+ WRITE_ONCE(rdp->last_fqs_resched, jiffies);
+ }
+
+ /*
+ * If more than halfway to RCU CPU stall-warning time, invoke
+ * resched_cpu() more frequently to try to loosen things up a bit.
+ * Also check to see if the CPU is getting hammered with interrupts,
+ * but only once per grace period, just to keep the IPIs down to
+ * a dull roar.
+ */
+ if (time_after(jiffies, rcu_state.jiffies_resched)) {
+ if (time_after(jiffies,
+ READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
+ resched_cpu(rdp->cpu);
+ WRITE_ONCE(rdp->last_fqs_resched, jiffies);
+ }
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
(rnp->ffmask & rdp->grpmask)) {
@@ -1272,17 +1129,17 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
return 0;
}
-static void record_gp_stall_check_time(struct rcu_state *rsp)
+static void record_gp_stall_check_time(void)
{
unsigned long j = jiffies;
unsigned long j1;
- rsp->gp_start = j;
+ rcu_state.gp_start = j;
j1 = rcu_jiffies_till_stall_check();
/* Record ->gp_start before ->jiffies_stall. */
- smp_store_release(&rsp->jiffies_stall, j + j1); /* ^^^ */
- rsp->jiffies_resched = j + j1 / 2;
- rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
+ smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
+ rcu_state.jiffies_resched = j + j1 / 2;
+ rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
}
/*
@@ -1298,25 +1155,23 @@ static const char *gp_state_getname(short gs)
/*
* Complain about starvation of grace-period kthread.
*/
-static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
+static void rcu_check_gp_kthread_starvation(void)
{
- unsigned long gpa;
+ struct task_struct *gpk = rcu_state.gp_kthread;
unsigned long j;
- j = jiffies;
- gpa = READ_ONCE(rsp->gp_activity);
- if (j - gpa > 2 * HZ) {
+ j = jiffies - READ_ONCE(rcu_state.gp_activity);
+ if (j > 2 * HZ) {
pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
- rsp->name, j - gpa,
- (long)rcu_seq_current(&rsp->gp_seq),
- rsp->gp_flags,
- gp_state_getname(rsp->gp_state), rsp->gp_state,
- rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
- rsp->gp_kthread ? task_cpu(rsp->gp_kthread) : -1);
- if (rsp->gp_kthread) {
+ rcu_state.name, j,
+ (long)rcu_seq_current(&rcu_state.gp_seq),
+ rcu_state.gp_flags,
+ gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
+ gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
+ if (gpk) {
pr_err("RCU grace-period kthread stack dump:\n");
- sched_show_task(rsp->gp_kthread);
- wake_up_process(rsp->gp_kthread);
+ sched_show_task(gpk);
+ wake_up_process(gpk);
}
}
}
@@ -1327,13 +1182,13 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
* that don't support NMI-based stack dumps. The NMI-triggered stack
* traces are more accurate because they are printed by the target CPU.
*/
-static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
+static void rcu_dump_cpu_stacks(void)
{
int cpu;
unsigned long flags;
struct rcu_node *rnp;
- rcu_for_each_leaf_node(rsp, rnp) {
+ rcu_for_each_leaf_node(rnp) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
for_each_leaf_node_possible_cpu(rnp, cpu)
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
@@ -1347,19 +1202,20 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
* If too much time has passed in the current grace period, and if
* so configured, go kick the relevant kthreads.
*/
-static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
+static void rcu_stall_kick_kthreads(void)
{
unsigned long j;
if (!rcu_kick_kthreads)
return;
- j = READ_ONCE(rsp->jiffies_kick_kthreads);
- if (time_after(jiffies, j) && rsp->gp_kthread &&
- (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
- WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
+ j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
+ if (time_after(jiffies, j) && rcu_state.gp_kthread &&
+ (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
+ WARN_ONCE(1, "Kicking %s grace-period kthread\n",
+ rcu_state.name);
rcu_ftrace_dump(DUMP_ALL);
- wake_up_process(rsp->gp_kthread);
- WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
+ wake_up_process(rcu_state.gp_kthread);
+ WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
}
}
@@ -1369,18 +1225,18 @@ static void panic_on_rcu_stall(void)
panic("RCU Stall\n");
}
-static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
+static void print_other_cpu_stall(unsigned long gp_seq)
{
int cpu;
unsigned long flags;
unsigned long gpa;
unsigned long j;
int ndetected = 0;
- struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_node *rnp = rcu_get_root();
long totqlen = 0;
/* Kick and suppress, if so configured. */
- rcu_stall_kick_kthreads(rsp);
+ rcu_stall_kick_kthreads();
if (rcu_cpu_stall_suppress)
return;
@@ -1389,15 +1245,15 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
* See Documentation/RCU/stallwarn.txt for info on how to debug
* RCU CPU stall warnings.
*/
- pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
+ pr_err("INFO: %s detected stalls on CPUs/tasks:", rcu_state.name);
print_cpu_stall_info_begin();
- rcu_for_each_leaf_node(rsp, rnp) {
+ rcu_for_each_leaf_node(rnp) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
ndetected += rcu_print_task_stall(rnp);
if (rnp->qsmask != 0) {
for_each_leaf_node_possible_cpu(rnp, cpu)
if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
- print_cpu_stall_info(rsp, cpu);
+ print_cpu_stall_info(cpu);
ndetected++;
}
}
@@ -1406,52 +1262,52 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
print_cpu_stall_info_end();
for_each_possible_cpu(cpu)
- totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
+ totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data,
cpu)->cblist);
pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
- smp_processor_id(), (long)(jiffies - rsp->gp_start),
- (long)rcu_seq_current(&rsp->gp_seq), totqlen);
+ smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
+ (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
if (ndetected) {
- rcu_dump_cpu_stacks(rsp);
+ rcu_dump_cpu_stacks();
/* Complain about tasks blocking the grace period. */
- rcu_print_detail_task_stall(rsp);
+ rcu_print_detail_task_stall();
} else {
- if (rcu_seq_current(&rsp->gp_seq) != gp_seq) {
+ if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
pr_err("INFO: Stall ended before state dump start\n");
} else {
j = jiffies;
- gpa = READ_ONCE(rsp->gp_activity);
+ gpa = READ_ONCE(rcu_state.gp_activity);
pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
- rsp->name, j - gpa, j, gpa,
- jiffies_till_next_fqs,
- rcu_get_root(rsp)->qsmask);
+ rcu_state.name, j - gpa, j, gpa,
+ READ_ONCE(jiffies_till_next_fqs),
+ rcu_get_root()->qsmask);
/* In this case, the current CPU might be at fault. */
sched_show_task(current);
}
}
/* Rewrite if needed in case of slow consoles. */
- if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
- WRITE_ONCE(rsp->jiffies_stall,
+ if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
+ WRITE_ONCE(rcu_state.jiffies_stall,
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
- rcu_check_gp_kthread_starvation(rsp);
+ rcu_check_gp_kthread_starvation();
panic_on_rcu_stall();
- force_quiescent_state(rsp); /* Kick them all. */
+ force_quiescent_state(); /* Kick them all. */
}
-static void print_cpu_stall(struct rcu_state *rsp)
+static void print_cpu_stall(void)
{
int cpu;
unsigned long flags;
- struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
- struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+ struct rcu_node *rnp = rcu_get_root();
long totqlen = 0;
/* Kick and suppress, if so configured. */
- rcu_stall_kick_kthreads(rsp);
+ rcu_stall_kick_kthreads();
if (rcu_cpu_stall_suppress)
return;
@@ -1460,27 +1316,27 @@ static void print_cpu_stall(struct rcu_state *rsp)
* See Documentation/RCU/stallwarn.txt for info on how to debug
* RCU CPU stall warnings.
*/
- pr_err("INFO: %s self-detected stall on CPU", rsp->name);
+ pr_err("INFO: %s self-detected stall on CPU", rcu_state.name);
print_cpu_stall_info_begin();
raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
- print_cpu_stall_info(rsp, smp_processor_id());
+ print_cpu_stall_info(smp_processor_id());
raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
print_cpu_stall_info_end();
for_each_possible_cpu(cpu)
- totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
+ totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data,
cpu)->cblist);
pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
- jiffies - rsp->gp_start,
- (long)rcu_seq_current(&rsp->gp_seq), totqlen);
+ jiffies - rcu_state.gp_start,
+ (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
- rcu_check_gp_kthread_starvation(rsp);
+ rcu_check_gp_kthread_starvation();
- rcu_dump_cpu_stacks(rsp);
+ rcu_dump_cpu_stacks();
raw_spin_lock_irqsave_rcu_node(rnp, flags);
/* Rewrite if needed in case of slow consoles. */
- if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
- WRITE_ONCE(rsp->jiffies_stall,
+ if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
+ WRITE_ONCE(rcu_state.jiffies_stall,
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -1493,10 +1349,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
* progress and it could be we're stuck in kernel space without context
* switches for an entirely unreasonable amount of time.
*/
- resched_cpu(smp_processor_id());
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
}
-static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
+static void check_cpu_stall(struct rcu_data *rdp)
{
unsigned long gs1;
unsigned long gs2;
@@ -1507,54 +1364,55 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
struct rcu_node *rnp;
if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
- !rcu_gp_in_progress(rsp))
+ !rcu_gp_in_progress())
return;
- rcu_stall_kick_kthreads(rsp);
+ rcu_stall_kick_kthreads();
j = jiffies;
/*
* Lots of memory barriers to reject false positives.
*
- * The idea is to pick up rsp->gp_seq, then rsp->jiffies_stall,
- * then rsp->gp_start, and finally another copy of rsp->gp_seq.
- * These values are updated in the opposite order with memory
- * barriers (or equivalent) during grace-period initialization
- * and cleanup. Now, a false positive can occur if we get an new
- * value of rsp->gp_start and a old value of rsp->jiffies_stall.
- * But given the memory barriers, the only way that this can happen
- * is if one grace period ends and another starts between these
- * two fetches. This is detected by comparing the second fetch
- * of rsp->gp_seq with the previous fetch from rsp->gp_seq.
+ * The idea is to pick up rcu_state.gp_seq, then
+ * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
+ * another copy of rcu_state.gp_seq. These values are updated in
+ * the opposite order with memory barriers (or equivalent) during
+ * grace-period initialization and cleanup. Now, a false positive
+ * can occur if we get an new value of rcu_state.gp_start and a old
+ * value of rcu_state.jiffies_stall. But given the memory barriers,
+ * the only way that this can happen is if one grace period ends
+ * and another starts between these two fetches. This is detected
+ * by comparing the second fetch of rcu_state.gp_seq with the
+ * previous fetch from rcu_state.gp_seq.
*
- * Given this check, comparisons of jiffies, rsp->jiffies_stall,
- * and rsp->gp_start suffice to forestall false positives.
+ * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
+ * and rcu_state.gp_start suffice to forestall false positives.
*/
- gs1 = READ_ONCE(rsp->gp_seq);
+ gs1 = READ_ONCE(rcu_state.gp_seq);
smp_rmb(); /* Pick up ->gp_seq first... */
- js = READ_ONCE(rsp->jiffies_stall);
+ js = READ_ONCE(rcu_state.jiffies_stall);
smp_rmb(); /* ...then ->jiffies_stall before the rest... */
- gps = READ_ONCE(rsp->gp_start);
+ gps = READ_ONCE(rcu_state.gp_start);
smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
- gs2 = READ_ONCE(rsp->gp_seq);
+ gs2 = READ_ONCE(rcu_state.gp_seq);
if (gs1 != gs2 ||
ULONG_CMP_LT(j, js) ||
ULONG_CMP_GE(gps, js))
return; /* No stall or GP completed since entering function. */
rnp = rdp->mynode;
jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
- if (rcu_gp_in_progress(rsp) &&
+ if (rcu_gp_in_progress() &&
(READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
- cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
+ cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
/* We haven't checked in, so go dump stack. */
- print_cpu_stall(rsp);
+ print_cpu_stall();
- } else if (rcu_gp_in_progress(rsp) &&
+ } else if (rcu_gp_in_progress() &&
ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
- cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
+ cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
/* They had a few time units to dump stack, so complain. */
- print_other_cpu_stall(rsp, gs2);
+ print_other_cpu_stall(gs2);
}
}
@@ -1569,17 +1427,14 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
*/
void rcu_cpu_stall_reset(void)
{
- struct rcu_state *rsp;
-
- for_each_rcu_flavor(rsp)
- WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
+ WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
}
/* Trace-event wrapper function for trace_rcu_future_grace_period. */
static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
unsigned long gp_seq_req, const char *s)
{
- trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req,
+ trace_rcu_future_grace_period(rcu_state.name, rnp->gp_seq, gp_seq_req,
rnp->level, rnp->grplo, rnp->grphi, s);
}
@@ -1603,7 +1458,6 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
unsigned long gp_seq_req)
{
bool ret = false;
- struct rcu_state *rsp = rdp->rsp;
struct rcu_node *rnp;
/*
@@ -1647,18 +1501,18 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
}
/* If GP already in progress, just leave, otherwise start one. */
- if (rcu_gp_in_progress(rsp)) {
+ if (rcu_gp_in_progress()) {
trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
goto unlock_out;
}
trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
- WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
- rsp->gp_req_activity = jiffies;
- if (!rsp->gp_kthread) {
+ WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
+ rcu_state.gp_req_activity = jiffies;
+ if (!rcu_state.gp_kthread) {
trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
goto unlock_out;
}
- trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), TPS("newreq"));
+ trace_rcu_grace_period(rcu_state.name, READ_ONCE(rcu_state.gp_seq), TPS("newreq"));
ret = true; /* Caller must wake GP kthread. */
unlock_out:
/* Push furthest requested GP to leaf node and rcu_data structure. */
@@ -1675,10 +1529,10 @@ unlock_out:
* Clean up any old requests for the just-ended grace period. Also return
* whether any additional grace periods have been requested.
*/
-static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
+static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
{
bool needmore;
- struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
if (!needmore)
@@ -1689,19 +1543,18 @@ static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
}
/*
- * Awaken the grace-period kthread for the specified flavor of RCU.
- * Don't do a self-awaken, and don't bother awakening when there is
- * nothing for the grace-period kthread to do (as in several CPUs
- * raced to awaken, and we lost), and finally don't try to awaken
- * a kthread that has not yet been created.
+ * Awaken the grace-period kthread. Don't do a self-awaken, and don't
+ * bother awakening when there is nothing for the grace-period kthread
+ * to do (as in several CPUs raced to awaken, and we lost), and finally
+ * don't try to awaken a kthread that has not yet been created.
*/
-static void rcu_gp_kthread_wake(struct rcu_state *rsp)
+static void rcu_gp_kthread_wake(void)
{
- if (current == rsp->gp_kthread ||
- !READ_ONCE(rsp->gp_flags) ||
- !rsp->gp_kthread)
+ if (current == rcu_state.gp_kthread ||
+ !READ_ONCE(rcu_state.gp_flags) ||
+ !rcu_state.gp_kthread)
return;
- swake_up_one(&rsp->gp_wq);
+ swake_up_one(&rcu_state.gp_wq);
}
/*
@@ -1716,8 +1569,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
*
* The caller must hold rnp->lock with interrupts disabled.
*/
-static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
- struct rcu_data *rdp)
+static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
{
unsigned long gp_seq_req;
bool ret = false;
@@ -1738,15 +1590,15 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
* accelerating callback invocation to an earlier grace-period
* number.
*/
- gp_seq_req = rcu_seq_snap(&rsp->gp_seq);
+ gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
/* Trace depending on how much we were able to accelerate. */
if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
- trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccWaitCB"));
+ trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB"));
else
- trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccReadyCB"));
+ trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB"));
return ret;
}
@@ -1757,25 +1609,24 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
* that a new grace-period request be made, invokes rcu_accelerate_cbs()
* while holding the leaf rcu_node structure's ->lock.
*/
-static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
- struct rcu_node *rnp,
+static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
struct rcu_data *rdp)
{
unsigned long c;
bool needwake;
lockdep_assert_irqs_disabled();
- c = rcu_seq_snap(&rsp->gp_seq);
+ c = rcu_seq_snap(&rcu_state.gp_seq);
if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
/* Old request still live, so mark recent callbacks. */
(void)rcu_segcblist_accelerate(&rdp->cblist, c);
return;
}
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
- needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+ needwake = rcu_accelerate_cbs(rnp, rdp);
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
if (needwake)
- rcu_gp_kthread_wake(rsp);
+ rcu_gp_kthread_wake();
}
/*
@@ -1788,8 +1639,7 @@ static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
*
* The caller must hold rnp->lock with interrupts disabled.
*/
-static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
- struct rcu_data *rdp)
+static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
{
raw_lockdep_assert_held_rcu_node(rnp);
@@ -1804,7 +1654,7 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
/* Classify any remaining callbacks. */
- return rcu_accelerate_cbs(rsp, rnp, rdp);
+ return rcu_accelerate_cbs(rnp, rdp);
}
/*
@@ -1813,8 +1663,7 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
* structure corresponding to the current CPU, and must have irqs disabled.
* Returns true if the grace-period kthread needs to be awakened.
*/
-static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
- struct rcu_data *rdp)
+static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
{
bool ret;
bool need_gp;
@@ -1827,10 +1676,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
/* Handle the ends of any preceding grace periods first. */
if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
unlikely(READ_ONCE(rdp->gpwrap))) {
- ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */
- trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
+ ret = rcu_advance_cbs(rnp, rdp); /* Advance callbacks. */
+ trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
} else {
- ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */
+ ret = rcu_accelerate_cbs(rnp, rdp); /* Recent callbacks. */
}
/* Now handle the beginnings of any new-to-this-CPU grace periods. */
@@ -1841,10 +1690,9 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
* set up to detect a quiescent state, otherwise don't
* go looking for one.
*/
- trace_rcu_grace_period(rsp->name, rnp->gp_seq, TPS("cpustart"));
+ trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
need_gp = !!(rnp->qsmask & rdp->grpmask);
rdp->cpu_no_qs.b.norm = need_gp;
- rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
rdp->core_needs_qs = need_gp;
zero_cpu_stall_ticks(rdp);
}
@@ -1856,7 +1704,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
return ret;
}
-static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
+static void note_gp_changes(struct rcu_data *rdp)
{
unsigned long flags;
bool needwake;
@@ -1870,16 +1718,16 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
local_irq_restore(flags);
return;
}
- needwake = __note_gp_changes(rsp, rnp, rdp);
+ needwake = __note_gp_changes(rnp, rdp);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (needwake)
- rcu_gp_kthread_wake(rsp);
+ rcu_gp_kthread_wake();
}
-static void rcu_gp_slow(struct rcu_state *rsp, int delay)
+static void rcu_gp_slow(int delay)
{
if (delay > 0 &&
- !(rcu_seq_ctr(rsp->gp_seq) %
+ !(rcu_seq_ctr(rcu_state.gp_seq) %
(rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
schedule_timeout_uninterruptible(delay);
}
@@ -1887,24 +1735,24 @@ static void rcu_gp_slow(struct rcu_state *rsp, int delay)
/*
* Initialize a new grace period. Return false if no grace period required.
*/
-static bool rcu_gp_init(struct rcu_state *rsp)
+static bool rcu_gp_init(void)
{
unsigned long flags;
unsigned long oldmask;
unsigned long mask;
struct rcu_data *rdp;
- struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_node *rnp = rcu_get_root();
- WRITE_ONCE(rsp->gp_activity, jiffies);
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
raw_spin_lock_irq_rcu_node(rnp);
- if (!READ_ONCE(rsp->gp_flags)) {
+ if (!READ_ONCE(rcu_state.gp_flags)) {
/* Spurious wakeup, tell caller to go back to sleep. */
raw_spin_unlock_irq_rcu_node(rnp);
return false;
}
- WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
+ WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
- if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
+ if (WARN_ON_ONCE(rcu_gp_in_progress())) {
/*
* Grace period already in progress, don't start another.
* Not supposed to be able to happen.
@@ -1914,10 +1762,10 @@ static bool rcu_gp_init(struct rcu_state *rsp)
}
/* Advance to a new grace period and initialize state. */
- record_gp_stall_check_time(rsp);
+ record_gp_stall_check_time();
/* Record GP times before starting GP, hence rcu_seq_start(). */
- rcu_seq_start(&rsp->gp_seq);
- trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start"));
+ rcu_seq_start(&rcu_state.gp_seq);
+ trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
raw_spin_unlock_irq_rcu_node(rnp);
/*
@@ -1926,15 +1774,15 @@ static bool rcu_gp_init(struct rcu_state *rsp)
* for subsequent online CPUs, and that quiescent-state forcing
* will handle subsequent offline CPUs.
*/
- rsp->gp_state = RCU_GP_ONOFF;
- rcu_for_each_leaf_node(rsp, rnp) {
- spin_lock(&rsp->ofl_lock);
+ rcu_state.gp_state = RCU_GP_ONOFF;
+ rcu_for_each_leaf_node(rnp) {
+ raw_spin_lock(&rcu_state.ofl_lock);
raw_spin_lock_irq_rcu_node(rnp);
if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
!rnp->wait_blkd_tasks) {
/* Nothing to do on this leaf rcu_node structure. */
raw_spin_unlock_irq_rcu_node(rnp);
- spin_unlock(&rsp->ofl_lock);
+ raw_spin_unlock(&rcu_state.ofl_lock);
continue;
}
@@ -1970,45 +1818,45 @@ static bool rcu_gp_init(struct rcu_state *rsp)
}
raw_spin_unlock_irq_rcu_node(rnp);
- spin_unlock(&rsp->ofl_lock);
+ raw_spin_unlock(&rcu_state.ofl_lock);
}
- rcu_gp_slow(rsp, gp_preinit_delay); /* Races with CPU hotplug. */
+ rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
/*
* Set the quiescent-state-needed bits in all the rcu_node
- * structures for all currently online CPUs in breadth-first order,
- * starting from the root rcu_node structure, relying on the layout
- * of the tree within the rsp->node[] array. Note that other CPUs
- * will access only the leaves of the hierarchy, thus seeing that no
- * grace period is in progress, at least until the corresponding
- * leaf node has been initialized.
+ * structures for all currently online CPUs in breadth-first
+ * order, starting from the root rcu_node structure, relying on the
+ * layout of the tree within the rcu_state.node[] array. Note that
+ * other CPUs will access only the leaves of the hierarchy, thus
+ * seeing that no grace period is in progress, at least until the
+ * corresponding leaf node has been initialized.
*
* The grace period cannot complete until the initialization
* process finishes, because this kthread handles both.
*/
- rsp->gp_state = RCU_GP_INIT;
- rcu_for_each_node_breadth_first(rsp, rnp) {
- rcu_gp_slow(rsp, gp_init_delay);
+ rcu_state.gp_state = RCU_GP_INIT;
+ rcu_for_each_node_breadth_first(rnp) {
+ rcu_gp_slow(gp_init_delay);
raw_spin_lock_irqsave_rcu_node(rnp, flags);
- rdp = this_cpu_ptr(rsp->rda);
- rcu_preempt_check_blocked_tasks(rsp, rnp);
+ rdp = this_cpu_ptr(&rcu_data);
+ rcu_preempt_check_blocked_tasks(rnp);
rnp->qsmask = rnp->qsmaskinit;
- WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
+ WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
if (rnp == rdp->mynode)
- (void)__note_gp_changes(rsp, rnp, rdp);
+ (void)__note_gp_changes(rnp, rdp);
rcu_preempt_boost_start_gp(rnp);
- trace_rcu_grace_period_init(rsp->name, rnp->gp_seq,
+ trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
rnp->level, rnp->grplo,
rnp->grphi, rnp->qsmask);
/* Quiescent states for tasks on any now-offline CPUs. */
mask = rnp->qsmask & ~rnp->qsmaskinitnext;
rnp->rcu_gp_init_mask = mask;
if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
else
raw_spin_unlock_irq_rcu_node(rnp);
cond_resched_tasks_rcu_qs();
- WRITE_ONCE(rsp->gp_activity, jiffies);
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
}
return true;
@@ -2018,12 +1866,12 @@ static bool rcu_gp_init(struct rcu_state *rsp)
* Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
* time.
*/
-static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
+static bool rcu_gp_fqs_check_wake(int *gfp)
{
- struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_node *rnp = rcu_get_root();
/* Someone like call_rcu() requested a force-quiescent-state scan. */
- *gfp = READ_ONCE(rsp->gp_flags);
+ *gfp = READ_ONCE(rcu_state.gp_flags);
if (*gfp & RCU_GP_FLAG_FQS)
return true;
@@ -2037,45 +1885,110 @@ static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
/*
* Do one round of quiescent-state forcing.
*/
-static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
+static void rcu_gp_fqs(bool first_time)
{
- struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_node *rnp = rcu_get_root();
- WRITE_ONCE(rsp->gp_activity, jiffies);
- rsp->n_force_qs++;
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
+ rcu_state.n_force_qs++;
if (first_time) {
/* Collect dyntick-idle snapshots. */
- force_qs_rnp(rsp, dyntick_save_progress_counter);
+ force_qs_rnp(dyntick_save_progress_counter);
} else {
/* Handle dyntick-idle and offline CPUs. */
- force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
+ force_qs_rnp(rcu_implicit_dynticks_qs);
}
/* Clear flag to prevent immediate re-entry. */
- if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
+ if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
raw_spin_lock_irq_rcu_node(rnp);
- WRITE_ONCE(rsp->gp_flags,
- READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
+ WRITE_ONCE(rcu_state.gp_flags,
+ READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
raw_spin_unlock_irq_rcu_node(rnp);
}
}
/*
+ * Loop doing repeated quiescent-state forcing until the grace period ends.
+ */
+static void rcu_gp_fqs_loop(void)
+{
+ bool first_gp_fqs;
+ int gf;
+ unsigned long j;
+ int ret;
+ struct rcu_node *rnp = rcu_get_root();
+
+ first_gp_fqs = true;
+ j = READ_ONCE(jiffies_till_first_fqs);
+ ret = 0;
+ for (;;) {
+ if (!ret) {
+ rcu_state.jiffies_force_qs = jiffies + j;
+ WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
+ jiffies + 3 * j);
+ }
+ trace_rcu_grace_period(rcu_state.name,
+ READ_ONCE(rcu_state.gp_seq),
+ TPS("fqswait"));
+ rcu_state.gp_state = RCU_GP_WAIT_FQS;
+ ret = swait_event_idle_timeout_exclusive(
+ rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
+ rcu_state.gp_state = RCU_GP_DOING_FQS;
+ /* Locking provides needed memory barriers. */
+ /* If grace period done, leave loop. */
+ if (!READ_ONCE(rnp->qsmask) &&
+ !rcu_preempt_blocked_readers_cgp(rnp))
+ break;
+ /* If time for quiescent-state forcing, do it. */
+ if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) ||
+ (gf & RCU_GP_FLAG_FQS)) {
+ trace_rcu_grace_period(rcu_state.name,
+ READ_ONCE(rcu_state.gp_seq),
+ TPS("fqsstart"));
+ rcu_gp_fqs(first_gp_fqs);
+ first_gp_fqs = false;
+ trace_rcu_grace_period(rcu_state.name,
+ READ_ONCE(rcu_state.gp_seq),
+ TPS("fqsend"));
+ cond_resched_tasks_rcu_qs();
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
+ ret = 0; /* Force full wait till next FQS. */
+ j = READ_ONCE(jiffies_till_next_fqs);
+ } else {
+ /* Deal with stray signal. */
+ cond_resched_tasks_rcu_qs();
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
+ WARN_ON(signal_pending(current));
+ trace_rcu_grace_period(rcu_state.name,
+ READ_ONCE(rcu_state.gp_seq),
+ TPS("fqswaitsig"));
+ ret = 1; /* Keep old FQS timing. */
+ j = jiffies;
+ if (time_after(jiffies, rcu_state.jiffies_force_qs))
+ j = 1;
+ else
+ j = rcu_state.jiffies_force_qs - j;
+ }
+ }
+}
+
+/*
* Clean up after the old grace period.
*/
-static void rcu_gp_cleanup(struct rcu_state *rsp)
+static void rcu_gp_cleanup(void)
{
unsigned long gp_duration;
bool needgp = false;
unsigned long new_gp_seq;
struct rcu_data *rdp;
- struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_node *rnp = rcu_get_root();
struct swait_queue_head *sq;
- WRITE_ONCE(rsp->gp_activity, jiffies);
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
raw_spin_lock_irq_rcu_node(rnp);
- gp_duration = jiffies - rsp->gp_start;
- if (gp_duration > rsp->gp_max)
- rsp->gp_max = gp_duration;
+ gp_duration = jiffies - rcu_state.gp_start;
+ if (gp_duration > rcu_state.gp_max)
+ rcu_state.gp_max = gp_duration;
/*
* We know the grace period is complete, but to everyone else
@@ -2096,48 +2009,50 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
* the rcu_node structures before the beginning of the next grace
* period is recorded in any of the rcu_node structures.
*/
- new_gp_seq = rsp->gp_seq;
+ new_gp_seq = rcu_state.gp_seq;
rcu_seq_end(&new_gp_seq);
- rcu_for_each_node_breadth_first(rsp, rnp) {
+ rcu_for_each_node_breadth_first(rnp) {
raw_spin_lock_irq_rcu_node(rnp);
if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
- dump_blkd_tasks(rsp, rnp, 10);
+ dump_blkd_tasks(rnp, 10);
WARN_ON_ONCE(rnp->qsmask);
WRITE_ONCE(rnp->gp_seq, new_gp_seq);
- rdp = this_cpu_ptr(rsp->rda);
+ rdp = this_cpu_ptr(&rcu_data);
if (rnp == rdp->mynode)
- needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
+ needgp = __note_gp_changes(rnp, rdp) || needgp;
/* smp_mb() provided by prior unlock-lock pair. */
- needgp = rcu_future_gp_cleanup(rsp, rnp) || needgp;
+ needgp = rcu_future_gp_cleanup(rnp) || needgp;
sq = rcu_nocb_gp_get(rnp);
raw_spin_unlock_irq_rcu_node(rnp);
rcu_nocb_gp_cleanup(sq);
cond_resched_tasks_rcu_qs();
- WRITE_ONCE(rsp->gp_activity, jiffies);
- rcu_gp_slow(rsp, gp_cleanup_delay);
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
+ rcu_gp_slow(gp_cleanup_delay);
}
- rnp = rcu_get_root(rsp);
- raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
+ rnp = rcu_get_root();
+ raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
/* Declare grace period done. */
- rcu_seq_end(&rsp->gp_seq);
- trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end"));
- rsp->gp_state = RCU_GP_IDLE;
+ rcu_seq_end(&rcu_state.gp_seq);
+ trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
+ rcu_state.gp_state = RCU_GP_IDLE;
/* Check for GP requests since above loop. */
- rdp = this_cpu_ptr(rsp->rda);
+ rdp = this_cpu_ptr(&rcu_data);
if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
TPS("CleanupMore"));
needgp = true;
}
/* Advance CBs to reduce false positives below. */
- if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
- WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
- rsp->gp_req_activity = jiffies;
- trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq),
+ if (!rcu_accelerate_cbs(rnp, rdp) && needgp) {
+ WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
+ rcu_state.gp_req_activity = jiffies;
+ trace_rcu_grace_period(rcu_state.name,
+ READ_ONCE(rcu_state.gp_seq),
TPS("newreq"));
} else {
- WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
+ WRITE_ONCE(rcu_state.gp_flags,
+ rcu_state.gp_flags & RCU_GP_FLAG_INIT);
}
raw_spin_unlock_irq_rcu_node(rnp);
}
@@ -2145,116 +2060,60 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
/*
* Body of kthread that handles grace periods.
*/
-static int __noreturn rcu_gp_kthread(void *arg)
+static int __noreturn rcu_gp_kthread(void *unused)
{
- bool first_gp_fqs;
- int gf;
- unsigned long j;
- int ret;
- struct rcu_state *rsp = arg;
- struct rcu_node *rnp = rcu_get_root(rsp);
-
rcu_bind_gp_kthread();
for (;;) {
/* Handle grace-period start. */
for (;;) {
- trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gp_seq),
+ trace_rcu_grace_period(rcu_state.name,
+ READ_ONCE(rcu_state.gp_seq),
TPS("reqwait"));
- rsp->gp_state = RCU_GP_WAIT_GPS;
- swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
- RCU_GP_FLAG_INIT);
- rsp->gp_state = RCU_GP_DONE_GPS;
+ rcu_state.gp_state = RCU_GP_WAIT_GPS;
+ swait_event_idle_exclusive(rcu_state.gp_wq,
+ READ_ONCE(rcu_state.gp_flags) &
+ RCU_GP_FLAG_INIT);
+ rcu_state.gp_state = RCU_GP_DONE_GPS;
/* Locking provides needed memory barrier. */
- if (rcu_gp_init(rsp))
+ if (rcu_gp_init())
break;
cond_resched_tasks_rcu_qs();
- WRITE_ONCE(rsp->gp_activity, jiffies);
+ WRITE_ONCE(rcu_state.gp_activity, jiffies);
WARN_ON(signal_pending(current));
- trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gp_seq),
+ trace_rcu_grace_period(rcu_state.name,
+ READ_ONCE(rcu_state.gp_seq),
TPS("reqwaitsig"));
}
/* Handle quiescent-state forcing. */
- first_gp_fqs = true;
- j = jiffies_till_first_fqs;
- ret = 0;
- for (;;) {
- if (!ret) {
- rsp->jiffies_force_qs = jiffies + j;
- WRITE_ONCE(rsp->jiffies_kick_kthreads,
- jiffies + 3 * j);
- }
- trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gp_seq),
- TPS("fqswait"));
- rsp->gp_state = RCU_GP_WAIT_FQS;
- ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
- rcu_gp_fqs_check_wake(rsp, &gf), j);
- rsp->gp_state = RCU_GP_DOING_FQS;
- /* Locking provides needed memory barriers. */
- /* If grace period done, leave loop. */
- if (!READ_ONCE(rnp->qsmask) &&
- !rcu_preempt_blocked_readers_cgp(rnp))
- break;
- /* If time for quiescent-state forcing, do it. */
- if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
- (gf & RCU_GP_FLAG_FQS)) {
- trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gp_seq),
- TPS("fqsstart"));
- rcu_gp_fqs(rsp, first_gp_fqs);
- first_gp_fqs = false;
- trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gp_seq),
- TPS("fqsend"));
- cond_resched_tasks_rcu_qs();
- WRITE_ONCE(rsp->gp_activity, jiffies);
- ret = 0; /* Force full wait till next FQS. */
- j = jiffies_till_next_fqs;
- } else {
- /* Deal with stray signal. */
- cond_resched_tasks_rcu_qs();
- WRITE_ONCE(rsp->gp_activity, jiffies);
- WARN_ON(signal_pending(current));
- trace_rcu_grace_period(rsp->name,
- READ_ONCE(rsp->gp_seq),
- TPS("fqswaitsig"));
- ret = 1; /* Keep old FQS timing. */
- j = jiffies;
- if (time_after(jiffies, rsp->jiffies_force_qs))
- j = 1;
- else
- j = rsp->jiffies_force_qs - j;
- }
- }
+ rcu_gp_fqs_loop();
/* Handle grace-period end. */
- rsp->gp_state = RCU_GP_CLEANUP;
- rcu_gp_cleanup(rsp);
- rsp->gp_state = RCU_GP_CLEANED;
+ rcu_state.gp_state = RCU_GP_CLEANUP;
+ rcu_gp_cleanup();
+ rcu_state.gp_state = RCU_GP_CLEANED;
}
}
/*
- * Report a full set of quiescent states to the specified rcu_state data
- * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period
- * kthread if another grace period is required. Whether we wake
- * the grace-period kthread or it awakens itself for the next round
- * of quiescent-state forcing, that kthread will clean up after the
- * just-completed grace period. Note that the caller must hold rnp->lock,
- * which is released before return.
+ * Report a full set of quiescent states to the rcu_state data structure.
+ * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
+ * another grace period is required. Whether we wake the grace-period
+ * kthread or it awakens itself for the next round of quiescent-state
+ * forcing, that kthread will clean up after the just-completed grace
+ * period. Note that the caller must hold rnp->lock, which is released
+ * before return.
*/
-static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
- __releases(rcu_get_root(rsp)->lock)
+static void rcu_report_qs_rsp(unsigned long flags)
+ __releases(rcu_get_root()->lock)
{
- raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
- WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
- WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
- raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
- rcu_gp_kthread_wake(rsp);
+ raw_lockdep_assert_held_rcu_node(rcu_get_root());
+ WARN_ON_ONCE(!rcu_gp_in_progress());
+ WRITE_ONCE(rcu_state.gp_flags,
+ READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
+ raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
+ rcu_gp_kthread_wake();
}
/*
@@ -2271,9 +2130,8 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
* disabled. This allows propagating quiescent state due to resumed tasks
* during grace-period initialization.
*/
-static void
-rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
- struct rcu_node *rnp, unsigned long gps, unsigned long flags)
+static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
+ unsigned long gps, unsigned long flags)
__releases(rnp->lock)
{
unsigned long oldmask = 0;
@@ -2296,7 +2154,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
rcu_preempt_blocked_readers_cgp(rnp));
rnp->qsmask &= ~mask;
- trace_rcu_quiescent_state_report(rsp->name, rnp->gp_seq,
+ trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
mask, rnp->qsmask, rnp->level,
rnp->grplo, rnp->grphi,
!!rnp->gp_tasks);
@@ -2326,19 +2184,18 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
* state for this grace period. Invoke rcu_report_qs_rsp()
* to clean up and start the next grace period if one is needed.
*/
- rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
+ rcu_report_qs_rsp(flags); /* releases rnp->lock. */
}
/*
* Record a quiescent state for all tasks that were previously queued
* on the specified rcu_node structure and that were blocking the current
- * RCU grace period. The caller must hold the specified rnp->lock with
+ * RCU grace period. The caller must hold the corresponding rnp->lock with
* irqs disabled, and this lock is released upon return, but irqs remain
* disabled.
*/
static void __maybe_unused
-rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
- struct rcu_node *rnp, unsigned long flags)
+rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
__releases(rnp->lock)
{
unsigned long gps;
@@ -2346,8 +2203,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
struct rcu_node *rnp_p;
raw_lockdep_assert_held_rcu_node(rnp);
- if (WARN_ON_ONCE(rcu_state_p == &rcu_sched_state) ||
- WARN_ON_ONCE(rsp != rcu_state_p) ||
+ if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)) ||
WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
rnp->qsmask != 0) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2361,7 +2217,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
* Only one rcu_node structure in the tree, so don't
* try to report up to its nonexistent parent!
*/
- rcu_report_qs_rsp(rsp, flags);
+ rcu_report_qs_rsp(flags);
return;
}
@@ -2370,7 +2226,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
mask = rnp->grpmask;
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
- rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
+ rcu_report_qs_rnp(mask, rnp_p, gps, flags);
}
/*
@@ -2378,7 +2234,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
* structure. This must be called from the specified CPU.
*/
static void
-rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
+rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
{
unsigned long flags;
unsigned long mask;
@@ -2397,7 +2253,6 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
* within the current grace period.
*/
rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
- rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
}
@@ -2411,12 +2266,12 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
* This GP can't end until cpu checks in, so all of our
* callbacks can be processed during the next GP.
*/
- needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+ needwake = rcu_accelerate_cbs(rnp, rdp);
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
/* ^^^ Released rnp->lock */
if (needwake)
- rcu_gp_kthread_wake(rsp);
+ rcu_gp_kthread_wake();
}
}
@@ -2427,10 +2282,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
* quiescent state for this grace period, and record that fact if so.
*/
static void
-rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
+rcu_check_quiescent_state(struct rcu_data *rdp)
{
/* Check for grace-period ends and beginnings. */
- note_gp_changes(rsp, rdp);
+ note_gp_changes(rdp);
/*
* Does this CPU still need to do its part for current grace period?
@@ -2450,24 +2305,26 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
* Tell RCU we are done (but rcu_report_qs_rdp() will be the
* judge of that).
*/
- rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
+ rcu_report_qs_rdp(rdp->cpu, rdp);
}
/*
- * Trace the fact that this CPU is going offline.
+ * Near the end of the offline process. Trace the fact that this CPU
+ * is going offline.
*/
-static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
+int rcutree_dying_cpu(unsigned int cpu)
{
RCU_TRACE(bool blkd;)
- RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
+ RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);)
RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
- return;
+ return 0;
RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
- trace_rcu_grace_period(rsp->name, rnp->gp_seq,
+ trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
+ return 0;
}
/*
@@ -2521,23 +2378,26 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
* There can only be one CPU hotplug operation at a time, so no need for
* explicit locking.
*/
-static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
+int rcutree_dead_cpu(unsigned int cpu)
{
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
- return;
+ return 0;
/* Adjust any no-longer-needed kthreads. */
rcu_boost_kthread_setaffinity(rnp, -1);
+ /* Do any needed no-CB deferred wakeups from this CPU. */
+ do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
+ return 0;
}
/*
* Invoke any RCU callbacks that have made it to the end of their grace
* period. Thottle as specified by rdp->blimit.
*/
-static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
+static void rcu_do_batch(struct rcu_data *rdp)
{
unsigned long flags;
struct rcu_head *rhp;
@@ -2546,10 +2406,10 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
/* If no callbacks are ready, just return. */
if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
- trace_rcu_batch_start(rsp->name,
+ trace_rcu_batch_start(rcu_state.name,
rcu_segcblist_n_lazy_cbs(&rdp->cblist),
rcu_segcblist_n_cbs(&rdp->cblist), 0);
- trace_rcu_batch_end(rsp->name, 0,
+ trace_rcu_batch_end(rcu_state.name, 0,
!rcu_segcblist_empty(&rdp->cblist),
need_resched(), is_idle_task(current),
rcu_is_callbacks_kthread());
@@ -2564,7 +2424,8 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
local_irq_save(flags);
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
bl = rdp->blimit;
- trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist),
+ trace_rcu_batch_start(rcu_state.name,
+ rcu_segcblist_n_lazy_cbs(&rdp->cblist),
rcu_segcblist_n_cbs(&rdp->cblist), bl);
rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
local_irq_restore(flags);
@@ -2573,7 +2434,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
rhp = rcu_cblist_dequeue(&rcl);
for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
debug_rcu_head_unqueue(rhp);
- if (__rcu_reclaim(rsp->name, rhp))
+ if (__rcu_reclaim(rcu_state.name, rhp))
rcu_cblist_dequeued_lazy(&rcl);
/*
* Stop only if limit reached and CPU has something to do.
@@ -2587,7 +2448,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
local_irq_save(flags);
count = -rcl.len;
- trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
+ trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
is_idle_task(current), rcu_is_callbacks_kthread());
/* Update counts and requeue any remaining callbacks. */
@@ -2603,7 +2464,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
if (count == 0 && rdp->qlen_last_fqs_check != 0) {
rdp->qlen_last_fqs_check = 0;
- rdp->n_force_qs_snap = rsp->n_force_qs;
+ rdp->n_force_qs_snap = rcu_state.n_force_qs;
} else if (count < rdp->qlen_last_fqs_check - qhimark)
rdp->qlen_last_fqs_check = count;
@@ -2631,37 +2492,17 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
void rcu_check_callbacks(int user)
{
trace_rcu_utilization(TPS("Start scheduler-tick"));
- increment_cpu_stall_ticks();
- if (user || rcu_is_cpu_rrupt_from_idle()) {
-
- /*
- * Get here if this CPU took its interrupt from user
- * mode or from the idle loop, and if this is not a
- * nested interrupt. In this case, the CPU is in
- * a quiescent state, so note it.
- *
- * No memory barrier is required here because both
- * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
- * variables that other CPUs neither access nor modify,
- * at least not while the corresponding CPU is online.
- */
-
- rcu_sched_qs();
- rcu_bh_qs();
- rcu_note_voluntary_context_switch(current);
-
- } else if (!in_softirq()) {
-
- /*
- * Get here if this CPU did not take its interrupt from
- * softirq, in other words, if it is not interrupting
- * a rcu_bh read-side critical section. This is an _bh
- * critical section, so note it.
- */
-
- rcu_bh_qs();
+ raw_cpu_inc(rcu_data.ticks_this_gp);
+ /* The load-acquire pairs with the store-release setting to true. */
+ if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
+ /* Idle and userspace execution already are quiescent states. */
+ if (!rcu_is_cpu_rrupt_from_idle() && !user) {
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+ }
+ __this_cpu_write(rcu_data.rcu_urgent_qs, false);
}
- rcu_preempt_check_callbacks();
+ rcu_flavor_check_callbacks(user);
if (rcu_pending())
invoke_rcu_core();
@@ -2675,20 +2516,19 @@ void rcu_check_callbacks(int user)
*
* The caller must have suppressed start of new grace periods.
*/
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
+static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
{
int cpu;
unsigned long flags;
unsigned long mask;
struct rcu_node *rnp;
- rcu_for_each_leaf_node(rsp, rnp) {
+ rcu_for_each_leaf_node(rnp) {
cond_resched_tasks_rcu_qs();
mask = 0;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (rnp->qsmask == 0) {
- if (rcu_state_p == &rcu_sched_state ||
- rsp != rcu_state_p ||
+ if (!IS_ENABLED(CONFIG_PREEMPT) ||
rcu_preempt_blocked_readers_cgp(rnp)) {
/*
* No point in scanning bits because they
@@ -2705,13 +2545,13 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
for_each_leaf_node_possible_cpu(rnp, cpu) {
unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
if ((rnp->qsmask & bit) != 0) {
- if (f(per_cpu_ptr(rsp->rda, cpu)))
+ if (f(per_cpu_ptr(&rcu_data, cpu)))
mask |= bit;
}
}
if (mask != 0) {
/* Idle/offline CPUs, report (releases rnp->lock). */
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
} else {
/* Nothing to do here, so just drop the lock. */
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2723,7 +2563,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
* Force quiescent states on reluctant CPUs, and also detect which
* CPUs are in dyntick-idle mode.
*/
-static void force_quiescent_state(struct rcu_state *rsp)
+static void force_quiescent_state(void)
{
unsigned long flags;
bool ret;
@@ -2731,9 +2571,9 @@ static void force_quiescent_state(struct rcu_state *rsp)
struct rcu_node *rnp_old = NULL;
/* Funnel through hierarchy to reduce memory contention. */
- rnp = __this_cpu_read(rsp->rda->mynode);
+ rnp = __this_cpu_read(rcu_data.mynode);
for (; rnp != NULL; rnp = rnp->parent) {
- ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
+ ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
!raw_spin_trylock(&rnp->fqslock);
if (rnp_old != NULL)
raw_spin_unlock(&rnp_old->fqslock);
@@ -2741,18 +2581,19 @@ static void force_quiescent_state(struct rcu_state *rsp)
return;
rnp_old = rnp;
}
- /* rnp_old == rcu_get_root(rsp), rnp == NULL. */
+ /* rnp_old == rcu_get_root(), rnp == NULL. */
/* Reached the root of the rcu_node tree, acquire lock. */
raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
raw_spin_unlock(&rnp_old->fqslock);
- if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
+ if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
return; /* Someone beat us to it. */
}
- WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
+ WRITE_ONCE(rcu_state.gp_flags,
+ READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
- rcu_gp_kthread_wake(rsp);
+ rcu_gp_kthread_wake();
}
/*
@@ -2760,30 +2601,29 @@ static void force_quiescent_state(struct rcu_state *rsp)
* RCU to come out of its idle mode.
*/
static void
-rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
- struct rcu_data *rdp)
+rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
{
const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
unsigned long flags;
unsigned long j;
- struct rcu_node *rnp_root = rcu_get_root(rsp);
+ struct rcu_node *rnp_root = rcu_get_root();
static atomic_t warned = ATOMIC_INIT(0);
- if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress(rsp) ||
+ if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
return;
j = jiffies; /* Expensive access, and in common case don't get here. */
- if (time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
- time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+ if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
+ time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
atomic_read(&warned))
return;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
j = jiffies;
- if (rcu_gp_in_progress(rsp) ||
+ if (rcu_gp_in_progress() ||
ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
- time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
- time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+ time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
+ time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
atomic_read(&warned)) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
@@ -2793,21 +2633,21 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
if (rnp_root != rnp)
raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
j = jiffies;
- if (rcu_gp_in_progress(rsp) ||
+ if (rcu_gp_in_progress() ||
ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
- time_before(j, rsp->gp_req_activity + gpssdelay) ||
- time_before(j, rsp->gp_activity + gpssdelay) ||
+ time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
+ time_before(j, rcu_state.gp_activity + gpssdelay) ||
atomic_xchg(&warned, 1)) {
raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
}
pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
- __func__, (long)READ_ONCE(rsp->gp_seq),
+ __func__, (long)READ_ONCE(rcu_state.gp_seq),
(long)READ_ONCE(rnp_root->gp_seq_needed),
- j - rsp->gp_req_activity, j - rsp->gp_activity,
- rsp->gp_flags, rsp->gp_state, rsp->name,
- rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL);
+ j - rcu_state.gp_req_activity, j - rcu_state.gp_activity,
+ rcu_state.gp_flags, rcu_state.gp_state, rcu_state.name,
+ rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL);
WARN_ON(1);
if (rnp_root != rnp)
raw_spin_unlock_rcu_node(rnp_root);
@@ -2815,69 +2655,65 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
}
/*
- * This does the RCU core processing work for the specified rcu_state
- * and rcu_data structures. This may be called only from the CPU to
- * whom the rdp belongs.
+ * This does the RCU core processing work for the specified rcu_data
+ * structures. This may be called only from the CPU to whom the rdp
+ * belongs.
*/
-static void
-__rcu_process_callbacks(struct rcu_state *rsp)
+static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
{
unsigned long flags;
- struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
+ struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
struct rcu_node *rnp = rdp->mynode;
+ if (cpu_is_offline(smp_processor_id()))
+ return;
+ trace_rcu_utilization(TPS("Start RCU core"));
WARN_ON_ONCE(!rdp->beenonline);
+ /* Report any deferred quiescent states if preemption enabled. */
+ if (!(preempt_count() & PREEMPT_MASK)) {
+ rcu_preempt_deferred_qs(current);
+ } else if (rcu_preempt_need_deferred_qs(current)) {
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+ }
+
/* Update RCU state based on any recent quiescent states. */
- rcu_check_quiescent_state(rsp, rdp);
+ rcu_check_quiescent_state(rdp);
/* No grace period and unregistered callbacks? */
- if (!rcu_gp_in_progress(rsp) &&
+ if (!rcu_gp_in_progress() &&
rcu_segcblist_is_enabled(&rdp->cblist)) {
local_irq_save(flags);
if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
- rcu_accelerate_cbs_unlocked(rsp, rnp, rdp);
+ rcu_accelerate_cbs_unlocked(rnp, rdp);
local_irq_restore(flags);
}
- rcu_check_gp_start_stall(rsp, rnp, rdp);
+ rcu_check_gp_start_stall(rnp, rdp);
/* If there are callbacks ready, invoke them. */
if (rcu_segcblist_ready_cbs(&rdp->cblist))
- invoke_rcu_callbacks(rsp, rdp);
+ invoke_rcu_callbacks(rdp);
/* Do any needed deferred wakeups of rcuo kthreads. */
do_nocb_deferred_wakeup(rdp);
-}
-
-/*
- * Do RCU core processing for the current CPU.
- */
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
-{
- struct rcu_state *rsp;
-
- if (cpu_is_offline(smp_processor_id()))
- return;
- trace_rcu_utilization(TPS("Start RCU core"));
- for_each_rcu_flavor(rsp)
- __rcu_process_callbacks(rsp);
trace_rcu_utilization(TPS("End RCU core"));
}
/*
- * Schedule RCU callback invocation. If the specified type of RCU
- * does not support RCU priority boosting, just do a direct call,
- * otherwise wake up the per-CPU kernel kthread. Note that because we
- * are running on the current CPU with softirqs disabled, the
- * rcu_cpu_kthread_task cannot disappear out from under us.
+ * Schedule RCU callback invocation. If the running implementation of RCU
+ * does not support RCU priority boosting, just do a direct call, otherwise
+ * wake up the per-CPU kernel kthread. Note that because we are running
+ * on the current CPU with softirqs disabled, the rcu_cpu_kthread_task
+ * cannot disappear out from under us.
*/
-static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+static void invoke_rcu_callbacks(struct rcu_data *rdp)
{
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return;
- if (likely(!rsp->boost)) {
- rcu_do_batch(rsp, rdp);
+ if (likely(!rcu_state.boost)) {
+ rcu_do_batch(rdp);
return;
}
invoke_rcu_callbacks_kthread();
@@ -2892,8 +2728,8 @@ static void invoke_rcu_core(void)
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
*/
-static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
- struct rcu_head *head, unsigned long flags)
+static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
+ unsigned long flags)
{
/*
* If called from an extended quiescent state, invoke the RCU
@@ -2917,18 +2753,18 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
rdp->qlen_last_fqs_check + qhimark)) {
/* Are we ignoring a completed grace period? */
- note_gp_changes(rsp, rdp);
+ note_gp_changes(rdp);
/* Start a new grace period if one not already started. */
- if (!rcu_gp_in_progress(rsp)) {
- rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
+ if (!rcu_gp_in_progress()) {
+ rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
} else {
/* Give the grace period a kick. */
rdp->blimit = LONG_MAX;
- if (rsp->n_force_qs == rdp->n_force_qs_snap &&
+ if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
- force_quiescent_state(rsp);
- rdp->n_force_qs_snap = rsp->n_force_qs;
+ force_quiescent_state();
+ rdp->n_force_qs_snap = rcu_state.n_force_qs;
rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
}
}
@@ -2944,12 +2780,11 @@ static void rcu_leak_callback(struct rcu_head *rhp)
/*
* Helper function for call_rcu() and friends. The cpu argument will
* normally be -1, indicating "currently running CPU". It may specify
- * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier()
+ * a CPU only if that CPU is a no-CBs CPU. Currently, only rcu_barrier()
* is expected to specify a CPU.
*/
static void
-__call_rcu(struct rcu_head *head, rcu_callback_t func,
- struct rcu_state *rsp, int cpu, bool lazy)
+__call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
{
unsigned long flags;
struct rcu_data *rdp;
@@ -2971,14 +2806,14 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
head->func = func;
head->next = NULL;
local_irq_save(flags);
- rdp = this_cpu_ptr(rsp->rda);
+ rdp = this_cpu_ptr(&rcu_data);
/* Add the callback to our list. */
if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
int offline;
if (cpu != -1)
- rdp = per_cpu_ptr(rsp->rda, cpu);
+ rdp = per_cpu_ptr(&rcu_data, cpu);
if (likely(rdp->mynode)) {
/* Post-boot, so this should be for a no-CBs CPU. */
offline = !__call_rcu_nocb(rdp, head, lazy, flags);
@@ -3001,72 +2836,60 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
rcu_idle_count_callbacks_posted();
if (__is_kfree_rcu_offset((unsigned long)func))
- trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
+ trace_rcu_kfree_callback(rcu_state.name, head,
+ (unsigned long)func,
rcu_segcblist_n_lazy_cbs(&rdp->cblist),
rcu_segcblist_n_cbs(&rdp->cblist));
else
- trace_rcu_callback(rsp->name, head,
+ trace_rcu_callback(rcu_state.name, head,
rcu_segcblist_n_lazy_cbs(&rdp->cblist),
rcu_segcblist_n_cbs(&rdp->cblist));
/* Go handle any RCU core processing required. */
- __call_rcu_core(rsp, rdp, head, flags);
+ __call_rcu_core(rdp, head, flags);
local_irq_restore(flags);
}
/**
- * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_sched() assumes
- * that the read-side critical sections end on enabling of preemption
- * or on voluntary preemption.
- * RCU read-side critical sections are delimited by:
- *
- * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
- * - anything that disables preemption.
- *
- * These may be nested.
- *
- * See the description of call_rcu() for more detailed information on
- * memory ordering guarantees.
- */
-void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
-{
- __call_rcu(head, func, &rcu_sched_state, -1, 0);
-}
-EXPORT_SYMBOL_GPL(call_rcu_sched);
-
-/**
- * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
+ * call_rcu() - Queue an RCU callback for invocation after a grace period.
* @head: structure to be used for queueing the RCU updates.
* @func: actual callback function to be invoked after the grace period
*
* The callback function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_bh() assumes
- * that the read-side critical sections end on completion of a softirq
- * handler. This means that read-side critical sections in process
- * context must not be interrupted by softirqs. This interface is to be
- * used when most of the read-side critical sections are in softirq context.
- * RCU read-side critical sections are delimited by:
- *
- * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context, OR
- * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
- *
- * These may be nested.
- *
- * See the description of call_rcu() for more detailed information on
- * memory ordering guarantees.
- */
-void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
-{
- __call_rcu(head, func, &rcu_bh_state, -1, 0);
-}
-EXPORT_SYMBOL_GPL(call_rcu_bh);
+ * period elapses, in other words after all pre-existing RCU read-side
+ * critical sections have completed. However, the callback function
+ * might well execute concurrently with RCU read-side critical sections
+ * that started after call_rcu() was invoked. RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
+ * may be nested. In addition, regions of code across which interrupts,
+ * preemption, or softirqs have been disabled also serve as RCU read-side
+ * critical sections. This includes hardware interrupt handlers, softirq
+ * handlers, and NMI handlers.
+ *
+ * Note that all CPUs must agree that the grace period extended beyond
+ * all pre-existing RCU read-side critical section. On systems with more
+ * than one CPU, this means that when "func()" is invoked, each CPU is
+ * guaranteed to have executed a full memory barrier since the end of its
+ * last RCU read-side critical section whose beginning preceded the call
+ * to call_rcu(). It also means that each CPU executing an RCU read-side
+ * critical section that continues beyond the start of "func()" must have
+ * executed a memory barrier after the call_rcu() but before the beginning
+ * of that RCU read-side critical section. Note that these guarantees
+ * include CPUs that are offline, idle, or executing in user mode, as
+ * well as CPUs that are executing in the kernel.
+ *
+ * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
+ * resulting RCU callback function "func()", then both CPU A and CPU B are
+ * guaranteed to execute a full memory barrier during the time interval
+ * between the call to call_rcu() and the invocation of "func()" -- even
+ * if CPU A and CPU B are the same CPU (but again only if the system has
+ * more than one CPU).
+ */
+void call_rcu(struct rcu_head *head, rcu_callback_t func)
+{
+ __call_rcu(head, func, -1, 0);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
/*
* Queue an RCU callback for lazy invocation after a grace period.
@@ -3075,110 +2898,12 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
* callbacks in the list of pending callbacks. Until then, this
* function may only be called from __kfree_rcu().
*/
-void kfree_call_rcu(struct rcu_head *head,
- rcu_callback_t func)
+void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
{
- __call_rcu(head, func, rcu_state_p, -1, 1);
+ __call_rcu(head, func, -1, 1);
}
EXPORT_SYMBOL_GPL(kfree_call_rcu);
-/*
- * Because a context switch is a grace period for RCU-sched and RCU-bh,
- * any blocking grace-period wait automatically implies a grace period
- * if there is only one CPU online at any point time during execution
- * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to
- * occasionally incorrectly indicate that there are multiple CPUs online
- * when there was in fact only one the whole time, as this just adds
- * some overhead: RCU still operates correctly.
- */
-static int rcu_blocking_is_gp(void)
-{
- int ret;
-
- might_sleep(); /* Check for RCU read-side critical section. */
- preempt_disable();
- ret = num_online_cpus() <= 1;
- preempt_enable();
- return ret;
-}
-
-/**
- * synchronize_sched - wait until an rcu-sched grace period has elapsed.
- *
- * Control will return to the caller some time after a full rcu-sched
- * grace period has elapsed, in other words after all currently executing
- * rcu-sched read-side critical sections have completed. These read-side
- * critical sections are delimited by rcu_read_lock_sched() and
- * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
- * local_irq_disable(), and so on may be used in place of
- * rcu_read_lock_sched().
- *
- * This means that all preempt_disable code sequences, including NMI and
- * non-threaded hardware-interrupt handlers, in progress on entry will
- * have completed before this primitive returns. However, this does not
- * guarantee that softirq handlers will have completed, since in some
- * kernels, these handlers can run in process context, and can block.
- *
- * Note that this guarantee implies further memory-ordering guarantees.
- * On systems with more than one CPU, when synchronize_sched() returns,
- * each CPU is guaranteed to have executed a full memory barrier since the
- * end of its last RCU-sched read-side critical section whose beginning
- * preceded the call to synchronize_sched(). In addition, each CPU having
- * an RCU read-side critical section that extends beyond the return from
- * synchronize_sched() is guaranteed to have executed a full memory barrier
- * after the beginning of synchronize_sched() and before the beginning of
- * that RCU read-side critical section. Note that these guarantees include
- * CPUs that are offline, idle, or executing in user mode, as well as CPUs
- * that are executing in the kernel.
- *
- * Furthermore, if CPU A invoked synchronize_sched(), which returned
- * to its caller on CPU B, then both CPU A and CPU B are guaranteed
- * to have executed a full memory barrier during the execution of
- * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
- * again only if the system has more than one CPU).
- */
-void synchronize_sched(void)
-{
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
- lock_is_held(&rcu_lock_map) ||
- lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_sched() in RCU-sched read-side critical section");
- if (rcu_blocking_is_gp())
- return;
- if (rcu_gp_is_expedited())
- synchronize_sched_expedited();
- else
- wait_rcu_gp(call_rcu_sched);
-}
-EXPORT_SYMBOL_GPL(synchronize_sched);
-
-/**
- * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
- *
- * Control will return to the caller some time after a full rcu_bh grace
- * period has elapsed, in other words after all currently executing rcu_bh
- * read-side critical sections have completed. RCU read-side critical
- * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
- * and may be nested.
- *
- * See the description of synchronize_sched() for more detailed information
- * on memory ordering guarantees.
- */
-void synchronize_rcu_bh(void)
-{
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
- lock_is_held(&rcu_lock_map) ||
- lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
- if (rcu_blocking_is_gp())
- return;
- if (rcu_gp_is_expedited())
- synchronize_rcu_bh_expedited();
- else
- wait_rcu_gp(call_rcu_bh);
-}
-EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
-
/**
* get_state_synchronize_rcu - Snapshot current RCU state
*
@@ -3193,7 +2918,7 @@ unsigned long get_state_synchronize_rcu(void)
* before the load from ->gp_seq.
*/
smp_mb(); /* ^^^ */
- return rcu_seq_snap(&rcu_state_p->gp_seq);
+ return rcu_seq_snap(&rcu_state.gp_seq);
}
EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
@@ -3213,70 +2938,30 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
*/
void cond_synchronize_rcu(unsigned long oldstate)
{
- if (!rcu_seq_done(&rcu_state_p->gp_seq, oldstate))
+ if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
synchronize_rcu();
else
smp_mb(); /* Ensure GP ends before subsequent accesses. */
}
EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
-/**
- * get_state_synchronize_sched - Snapshot current RCU-sched state
- *
- * Returns a cookie that is used by a later call to cond_synchronize_sched()
- * to determine whether or not a full grace period has elapsed in the
- * meantime.
- */
-unsigned long get_state_synchronize_sched(void)
-{
- /*
- * Any prior manipulation of RCU-protected data must happen
- * before the load from ->gp_seq.
- */
- smp_mb(); /* ^^^ */
- return rcu_seq_snap(&rcu_sched_state.gp_seq);
-}
-EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
-
-/**
- * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
- *
- * @oldstate: return value from earlier call to get_state_synchronize_sched()
- *
- * If a full RCU-sched grace period has elapsed since the earlier call to
- * get_state_synchronize_sched(), just return. Otherwise, invoke
- * synchronize_sched() to wait for a full grace period.
- *
- * Yes, this function does not take counter wrap into account. But
- * counter wrap is harmless. If the counter wraps, we have waited for
- * more than 2 billion grace periods (and way more on a 64-bit system!),
- * so waiting for one additional grace period should be just fine.
- */
-void cond_synchronize_sched(unsigned long oldstate)
-{
- if (!rcu_seq_done(&rcu_sched_state.gp_seq, oldstate))
- synchronize_sched();
- else
- smp_mb(); /* Ensure GP ends before subsequent accesses. */
-}
-EXPORT_SYMBOL_GPL(cond_synchronize_sched);
-
/*
- * Check to see if there is any immediate RCU-related work to be done
- * by the current CPU, for the specified type of RCU, returning 1 if so.
- * The checks are in order of increasing expense: checks that can be
- * carried out against CPU-local state are performed first. However,
- * we must check for CPU stalls first, else we might not get a chance.
+ * Check to see if there is any immediate RCU-related work to be done by
+ * the current CPU, returning 1 if so and zero otherwise. The checks are
+ * in order of increasing expense: checks that can be carried out against
+ * CPU-local state are performed first. However, we must check for CPU
+ * stalls first, else we might not get a chance.
*/
-static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
+static int rcu_pending(void)
{
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp = rdp->mynode;
/* Check for CPU stalls, if enabled. */
- check_cpu_stall(rsp, rdp);
+ check_cpu_stall(rdp);
/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
- if (rcu_nohz_full_cpu(rsp))
+ if (rcu_nohz_full_cpu())
return 0;
/* Is the RCU core waiting for a quiescent state from this CPU? */
@@ -3288,7 +2973,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
return 1;
/* Has RCU gone idle with this CPU needing another grace period? */
- if (!rcu_gp_in_progress(rsp) &&
+ if (!rcu_gp_in_progress() &&
rcu_segcblist_is_enabled(&rdp->cblist) &&
!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
return 1;
@@ -3307,21 +2992,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
}
/*
- * Check to see if there is any immediate RCU-related work to be done
- * by the current CPU, returning 1 if so. This function is part of the
- * RCU implementation; it is -not- an exported member of the RCU API.
- */
-static int rcu_pending(void)
-{
- struct rcu_state *rsp;
-
- for_each_rcu_flavor(rsp)
- if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
- return 1;
- return 0;
-}
-
-/*
* Return true if the specified CPU has any callback. If all_lazy is
* non-NULL, store an indication of whether all callbacks are lazy.
* (If there are no callbacks, all of them are deemed to be lazy.)
@@ -3331,17 +3001,12 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy)
bool al = true;
bool hc = false;
struct rcu_data *rdp;
- struct rcu_state *rsp;
- for_each_rcu_flavor(rsp) {
- rdp = this_cpu_ptr(rsp->rda);
- if (rcu_segcblist_empty(&rdp->cblist))
- continue;
+ rdp = this_cpu_ptr(&rcu_data);
+ if (!rcu_segcblist_empty(&rdp->cblist)) {
hc = true;
- if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) {
+ if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist))
al = false;
- break;
- }
}
if (all_lazy)
*all_lazy = al;
@@ -3349,81 +3014,80 @@ static bool rcu_cpu_has_callbacks(bool *all_lazy)
}
/*
- * Helper function for _rcu_barrier() tracing. If tracing is disabled,
+ * Helper function for rcu_barrier() tracing. If tracing is disabled,
* the compiler is expected to optimize this away.
*/
-static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
- int cpu, unsigned long done)
+static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
{
- trace_rcu_barrier(rsp->name, s, cpu,
- atomic_read(&rsp->barrier_cpu_count), done);
+ trace_rcu_barrier(rcu_state.name, s, cpu,
+ atomic_read(&rcu_state.barrier_cpu_count), done);
}
/*
- * RCU callback function for _rcu_barrier(). If we are last, wake
- * up the task executing _rcu_barrier().
+ * RCU callback function for rcu_barrier(). If we are last, wake
+ * up the task executing rcu_barrier().
*/
static void rcu_barrier_callback(struct rcu_head *rhp)
{
- struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
- struct rcu_state *rsp = rdp->rsp;
-
- if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
- _rcu_barrier_trace(rsp, TPS("LastCB"), -1,
- rsp->barrier_sequence);
- complete(&rsp->barrier_completion);
+ if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
+ rcu_barrier_trace(TPS("LastCB"), -1,
+ rcu_state.barrier_sequence);
+ complete(&rcu_state.barrier_completion);
} else {
- _rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence);
+ rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
}
}
/*
* Called with preemption disabled, and from cross-cpu IRQ context.
*/
-static void rcu_barrier_func(void *type)
+static void rcu_barrier_func(void *unused)
{
- struct rcu_state *rsp = type;
- struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
+ struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
- _rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
+ rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
rdp->barrier_head.func = rcu_barrier_callback;
debug_rcu_head_queue(&rdp->barrier_head);
if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
- atomic_inc(&rsp->barrier_cpu_count);
+ atomic_inc(&rcu_state.barrier_cpu_count);
} else {
debug_rcu_head_unqueue(&rdp->barrier_head);
- _rcu_barrier_trace(rsp, TPS("IRQNQ"), -1,
- rsp->barrier_sequence);
+ rcu_barrier_trace(TPS("IRQNQ"), -1,
+ rcu_state.barrier_sequence);
}
}
-/*
- * Orchestrate the specified type of RCU barrier, waiting for all
- * RCU callbacks of the specified type to complete.
+/**
+ * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
+ *
+ * Note that this primitive does not necessarily wait for an RCU grace period
+ * to complete. For example, if there are no RCU callbacks queued anywhere
+ * in the system, then rcu_barrier() is within its rights to return
+ * immediately, without waiting for anything, much less an RCU grace period.
*/
-static void _rcu_barrier(struct rcu_state *rsp)
+void rcu_barrier(void)
{
int cpu;
struct rcu_data *rdp;
- unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
+ unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
- _rcu_barrier_trace(rsp, TPS("Begin"), -1, s);
+ rcu_barrier_trace(TPS("Begin"), -1, s);
/* Take mutex to serialize concurrent rcu_barrier() requests. */
- mutex_lock(&rsp->barrier_mutex);
+ mutex_lock(&rcu_state.barrier_mutex);
/* Did someone else do our work for us? */
- if (rcu_seq_done(&rsp->barrier_sequence, s)) {
- _rcu_barrier_trace(rsp, TPS("EarlyExit"), -1,
- rsp->barrier_sequence);
+ if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
+ rcu_barrier_trace(TPS("EarlyExit"), -1,
+ rcu_state.barrier_sequence);
smp_mb(); /* caller's subsequent code after above check. */
- mutex_unlock(&rsp->barrier_mutex);
+ mutex_unlock(&rcu_state.barrier_mutex);
return;
}
/* Mark the start of the barrier operation. */
- rcu_seq_start(&rsp->barrier_sequence);
- _rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence);
+ rcu_seq_start(&rcu_state.barrier_sequence);
+ rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
/*
* Initialize the count to one rather than to zero in order to
@@ -3431,8 +3095,8 @@ static void _rcu_barrier(struct rcu_state *rsp)
* (or preemption of this task). Exclude CPU-hotplug operations
* to ensure that no offline CPU has callbacks queued.
*/
- init_completion(&rsp->barrier_completion);
- atomic_set(&rsp->barrier_cpu_count, 1);
+ init_completion(&rcu_state.barrier_completion);
+ atomic_set(&rcu_state.barrier_cpu_count, 1);
get_online_cpus();
/*
@@ -3443,26 +3107,26 @@ static void _rcu_barrier(struct rcu_state *rsp)
for_each_possible_cpu(cpu) {
if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
continue;
- rdp = per_cpu_ptr(rsp->rda, cpu);
+ rdp = per_cpu_ptr(&rcu_data, cpu);
if (rcu_is_nocb_cpu(cpu)) {
- if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
- _rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
- rsp->barrier_sequence);
+ if (!rcu_nocb_cpu_needs_barrier(cpu)) {
+ rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
+ rcu_state.barrier_sequence);
} else {
- _rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu,
- rsp->barrier_sequence);
+ rcu_barrier_trace(TPS("OnlineNoCB"), cpu,
+ rcu_state.barrier_sequence);
smp_mb__before_atomic();
- atomic_inc(&rsp->barrier_cpu_count);
+ atomic_inc(&rcu_state.barrier_cpu_count);
__call_rcu(&rdp->barrier_head,
- rcu_barrier_callback, rsp, cpu, 0);
+ rcu_barrier_callback, cpu, 0);
}
} else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
- _rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
- rsp->barrier_sequence);
- smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
+ rcu_barrier_trace(TPS("OnlineQ"), cpu,
+ rcu_state.barrier_sequence);
+ smp_call_function_single(cpu, rcu_barrier_func, NULL, 1);
} else {
- _rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu,
- rsp->barrier_sequence);
+ rcu_barrier_trace(TPS("OnlineNQ"), cpu,
+ rcu_state.barrier_sequence);
}
}
put_online_cpus();
@@ -3471,37 +3135,20 @@ static void _rcu_barrier(struct rcu_state *rsp)
* Now that we have an rcu_barrier_callback() callback on each
* CPU, and thus each counted, remove the initial count.
*/
- if (atomic_dec_and_test(&rsp->barrier_cpu_count))
- complete(&rsp->barrier_completion);
+ if (atomic_dec_and_test(&rcu_state.barrier_cpu_count))
+ complete(&rcu_state.barrier_completion);
/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
- wait_for_completion(&rsp->barrier_completion);
+ wait_for_completion(&rcu_state.barrier_completion);
/* Mark the end of the barrier operation. */
- _rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence);
- rcu_seq_end(&rsp->barrier_sequence);
+ rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
+ rcu_seq_end(&rcu_state.barrier_sequence);
/* Other rcu_barrier() invocations can now safely proceed. */
- mutex_unlock(&rsp->barrier_mutex);
-}
-
-/**
- * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
- */
-void rcu_barrier_bh(void)
-{
- _rcu_barrier(&rcu_bh_state);
-}
-EXPORT_SYMBOL_GPL(rcu_barrier_bh);
-
-/**
- * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
- */
-void rcu_barrier_sched(void)
-{
- _rcu_barrier(&rcu_sched_state);
+ mutex_unlock(&rcu_state.barrier_mutex);
}
-EXPORT_SYMBOL_GPL(rcu_barrier_sched);
+EXPORT_SYMBOL_GPL(rcu_barrier);
/*
* Propagate ->qsinitmask bits up the rcu_node tree to account for the
@@ -3535,46 +3182,46 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
* Do boot-time initialization of a CPU's per-CPU RCU data.
*/
static void __init
-rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
+rcu_boot_init_percpu_data(int cpu)
{
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
/* Set up local state, ensuring consistent view of global state. */
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
- rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
- WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
- WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
- rdp->rcu_ofl_gp_seq = rsp->gp_seq;
+ WARN_ON_ONCE(rdp->dynticks_nesting != 1);
+ WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
+ rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
- rdp->rcu_onl_gp_seq = rsp->gp_seq;
+ rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
rdp->cpu = cpu;
- rdp->rsp = rsp;
rcu_boot_init_nocb_percpu_data(rdp);
}
/*
- * Initialize a CPU's per-CPU RCU data. Note that only one online or
+ * Invoked early in the CPU-online process, when pretty much all services
+ * are available. The incoming CPU is not present.
+ *
+ * Initializes a CPU's per-CPU RCU data. Note that only one online or
* offline event can be happening at a given time. Note also that we can
* accept some slop in the rsp->gp_seq access due to the fact that this
* CPU cannot possibly have any RCU callbacks in flight yet.
*/
-static void
-rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
+int rcutree_prepare_cpu(unsigned int cpu)
{
unsigned long flags;
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
- struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ struct rcu_node *rnp = rcu_get_root();
/* Set up local state, ensuring consistent view of global state. */
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rdp->qlen_last_fqs_check = 0;
- rdp->n_force_qs_snap = rsp->n_force_qs;
+ rdp->n_force_qs_snap = rcu_state.n_force_qs;
rdp->blimit = blimit;
if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
!init_nocb_callback_list(rdp))
rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
- rdp->dynticks->dynticks_nesting = 1; /* CPU not up, no tearing. */
+ rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
rcu_dynticks_eqs_online();
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
@@ -3589,25 +3236,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->gp_seq = rnp->gp_seq;
rdp->gp_seq_needed = rnp->gp_seq;
rdp->cpu_no_qs.b.norm = true;
- rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
rdp->core_needs_qs = false;
rdp->rcu_iw_pending = false;
rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
- trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuonl"));
+ trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-}
-
-/*
- * Invoked early in the CPU-online process, when pretty much all
- * services are available. The incoming CPU is not present.
- */
-int rcutree_prepare_cpu(unsigned int cpu)
-{
- struct rcu_state *rsp;
-
- for_each_rcu_flavor(rsp)
- rcu_init_percpu_data(cpu, rsp);
-
rcu_prepare_kthreads(cpu);
rcu_spawn_all_nocb_kthreads(cpu);
@@ -3619,7 +3252,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
*/
static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
{
- struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
}
@@ -3633,15 +3266,12 @@ int rcutree_online_cpu(unsigned int cpu)
unsigned long flags;
struct rcu_data *rdp;
struct rcu_node *rnp;
- struct rcu_state *rsp;
- for_each_rcu_flavor(rsp) {
- rdp = per_cpu_ptr(rsp->rda, cpu);
- rnp = rdp->mynode;
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- rnp->ffmask |= rdp->grpmask;
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- }
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+ rnp = rdp->mynode;
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ rnp->ffmask |= rdp->grpmask;
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (IS_ENABLED(CONFIG_TREE_SRCU))
srcu_online_cpu(cpu);
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
@@ -3660,15 +3290,12 @@ int rcutree_offline_cpu(unsigned int cpu)
unsigned long flags;
struct rcu_data *rdp;
struct rcu_node *rnp;
- struct rcu_state *rsp;
- for_each_rcu_flavor(rsp) {
- rdp = per_cpu_ptr(rsp->rda, cpu);
- rnp = rdp->mynode;
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- rnp->ffmask &= ~rdp->grpmask;
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- }
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+ rnp = rdp->mynode;
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ rnp->ffmask &= ~rdp->grpmask;
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rcutree_affinity_setting(cpu, cpu);
if (IS_ENABLED(CONFIG_TREE_SRCU))
@@ -3676,32 +3303,6 @@ int rcutree_offline_cpu(unsigned int cpu)
return 0;
}
-/*
- * Near the end of the offline process. We do only tracing here.
- */
-int rcutree_dying_cpu(unsigned int cpu)
-{
- struct rcu_state *rsp;
-
- for_each_rcu_flavor(rsp)
- rcu_cleanup_dying_cpu(rsp);
- return 0;
-}
-
-/*
- * The outgoing CPU is gone and we are running elsewhere.
- */
-int rcutree_dead_cpu(unsigned int cpu)
-{
- struct rcu_state *rsp;
-
- for_each_rcu_flavor(rsp) {
- rcu_cleanup_dead_cpu(cpu, rsp);
- do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
- }
- return 0;
-}
-
static DEFINE_PER_CPU(int, rcu_cpu_started);
/*
@@ -3723,137 +3324,113 @@ void rcu_cpu_starting(unsigned int cpu)
unsigned long oldmask;
struct rcu_data *rdp;
struct rcu_node *rnp;
- struct rcu_state *rsp;
if (per_cpu(rcu_cpu_started, cpu))
return;
per_cpu(rcu_cpu_started, cpu) = 1;
- for_each_rcu_flavor(rsp) {
- rdp = per_cpu_ptr(rsp->rda, cpu);
- rnp = rdp->mynode;
- mask = rdp->grpmask;
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- rnp->qsmaskinitnext |= mask;
- oldmask = rnp->expmaskinitnext;
- rnp->expmaskinitnext |= mask;
- oldmask ^= rnp->expmaskinitnext;
- nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
- /* Allow lockless access for expedited grace periods. */
- smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
- rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
- rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
- rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
- if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
- /* Report QS -after- changing ->qsmaskinitnext! */
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
- } else {
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- }
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+ rnp = rdp->mynode;
+ mask = rdp->grpmask;
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ rnp->qsmaskinitnext |= mask;
+ oldmask = rnp->expmaskinitnext;
+ rnp->expmaskinitnext |= mask;
+ oldmask ^= rnp->expmaskinitnext;
+ nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
+ /* Allow lockless access for expedited grace periods. */
+ smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + nbits); /* ^^^ */
+ rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
+ rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
+ rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
+ if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
+ /* Report QS -after- changing ->qsmaskinitnext! */
+ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
+ } else {
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
}
#ifdef CONFIG_HOTPLUG_CPU
/*
- * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
- * function. We now remove it from the rcu_node tree's ->qsmaskinitnext
- * bit masks.
+ * The outgoing function has no further need of RCU, so remove it from
+ * the rcu_node tree's ->qsmaskinitnext bit masks.
+ *
+ * Note that this function is special in that it is invoked directly
+ * from the outgoing CPU rather than from the cpuhp_step mechanism.
+ * This is because this function must be invoked at a precise location.
*/
-static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
+void rcu_report_dead(unsigned int cpu)
{
unsigned long flags;
unsigned long mask;
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
+ /* QS for any half-done expedited grace period. */
+ preempt_disable();
+ rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
+ preempt_enable();
+ rcu_preempt_deferred_qs(current);
+
/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
mask = rdp->grpmask;
- spin_lock(&rsp->ofl_lock);
+ raw_spin_lock(&rcu_state.ofl_lock);
raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
- rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq);
- rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
+ rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
+ rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
/* Report quiescent state -before- changing ->qsmaskinitnext! */
- rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+ rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
raw_spin_lock_irqsave_rcu_node(rnp, flags);
}
rnp->qsmaskinitnext &= ~mask;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- spin_unlock(&rsp->ofl_lock);
-}
-
-/*
- * The outgoing function has no further need of RCU, so remove it from
- * the list of CPUs that RCU must track.
- *
- * Note that this function is special in that it is invoked directly
- * from the outgoing CPU rather than from the cpuhp_step mechanism.
- * This is because this function must be invoked at a precise location.
- */
-void rcu_report_dead(unsigned int cpu)
-{
- struct rcu_state *rsp;
-
- /* QS for any half-done expedited RCU-sched GP. */
- preempt_disable();
- rcu_report_exp_rdp(&rcu_sched_state,
- this_cpu_ptr(rcu_sched_state.rda), true);
- preempt_enable();
- for_each_rcu_flavor(rsp)
- rcu_cleanup_dying_idle_cpu(cpu, rsp);
+ raw_spin_unlock(&rcu_state.ofl_lock);
per_cpu(rcu_cpu_started, cpu) = 0;
}
-/* Migrate the dead CPU's callbacks to the current CPU. */
-static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
+/*
+ * The outgoing CPU has just passed through the dying-idle state, and we
+ * are being invoked from the CPU that was IPIed to continue the offline
+ * operation. Migrate the outgoing CPU's callbacks to the current CPU.
+ */
+void rcutree_migrate_callbacks(int cpu)
{
unsigned long flags;
struct rcu_data *my_rdp;
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
- struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ struct rcu_node *rnp_root = rcu_get_root();
bool needwake;
if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
return; /* No callbacks to migrate. */
local_irq_save(flags);
- my_rdp = this_cpu_ptr(rsp->rda);
+ my_rdp = this_cpu_ptr(&rcu_data);
if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) {
local_irq_restore(flags);
return;
}
raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
/* Leverage recent GPs and set GP for new callbacks. */
- needwake = rcu_advance_cbs(rsp, rnp_root, rdp) ||
- rcu_advance_cbs(rsp, rnp_root, my_rdp);
+ needwake = rcu_advance_cbs(rnp_root, rdp) ||
+ rcu_advance_cbs(rnp_root, my_rdp);
rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
!rcu_segcblist_n_cbs(&my_rdp->cblist));
raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
if (needwake)
- rcu_gp_kthread_wake(rsp);
+ rcu_gp_kthread_wake();
WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
!rcu_segcblist_empty(&rdp->cblist),
"rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
cpu, rcu_segcblist_n_cbs(&rdp->cblist),
rcu_segcblist_first_cb(&rdp->cblist));
}
-
-/*
- * The outgoing CPU has just passed through the dying-idle state,
- * and we are being invoked from the CPU that was IPIed to continue the
- * offline operation. We need to migrate the outgoing CPU's callbacks.
- */
-void rcutree_migrate_callbacks(int cpu)
-{
- struct rcu_state *rsp;
-
- for_each_rcu_flavor(rsp)
- rcu_migrate_callbacks(cpu, rsp);
-}
#endif
/*
@@ -3881,14 +3458,13 @@ static int rcu_pm_notify(struct notifier_block *self,
}
/*
- * Spawn the kthreads that handle each RCU flavor's grace periods.
+ * Spawn the kthreads that handle RCU's grace periods.
*/
static int __init rcu_spawn_gp_kthread(void)
{
unsigned long flags;
int kthread_prio_in = kthread_prio;
struct rcu_node *rnp;
- struct rcu_state *rsp;
struct sched_param sp;
struct task_struct *t;
@@ -3908,19 +3484,17 @@ static int __init rcu_spawn_gp_kthread(void)
kthread_prio, kthread_prio_in);
rcu_scheduler_fully_active = 1;
- for_each_rcu_flavor(rsp) {
- t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
- BUG_ON(IS_ERR(t));
- rnp = rcu_get_root(rsp);
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
- rsp->gp_kthread = t;
- if (kthread_prio) {
- sp.sched_priority = kthread_prio;
- sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
- }
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- wake_up_process(t);
+ t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
+ BUG_ON(IS_ERR(t));
+ rnp = rcu_get_root();
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ rcu_state.gp_kthread = t;
+ if (kthread_prio) {
+ sp.sched_priority = kthread_prio;
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
}
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ wake_up_process(t);
rcu_spawn_nocb_kthreads();
rcu_spawn_boost_kthreads();
return 0;
@@ -3947,9 +3521,9 @@ void rcu_scheduler_starting(void)
}
/*
- * Helper function for rcu_init() that initializes one rcu_state structure.
+ * Helper function for rcu_init() that initializes the rcu_state structure.
*/
-static void __init rcu_init_one(struct rcu_state *rsp)
+static void __init rcu_init_one(void)
{
static const char * const buf[] = RCU_NODE_NAME_INIT;
static const char * const fqs[] = RCU_FQS_NAME_INIT;
@@ -3971,14 +3545,15 @@ static void __init rcu_init_one(struct rcu_state *rsp)
/* Initialize the level-tracking arrays. */
for (i = 1; i < rcu_num_lvls; i++)
- rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1];
+ rcu_state.level[i] =
+ rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
rcu_init_levelspread(levelspread, num_rcu_lvl);
/* Initialize the elements themselves, starting from the leaves. */
for (i = rcu_num_lvls - 1; i >= 0; i--) {
cpustride *= levelspread[i];
- rnp = rsp->level[i];
+ rnp = rcu_state.level[i];
for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
@@ -3986,9 +3561,9 @@ static void __init rcu_init_one(struct rcu_state *rsp)
raw_spin_lock_init(&rnp->fqslock);
lockdep_set_class_and_name(&rnp->fqslock,
&rcu_fqs_class[i], fqs[i]);
- rnp->gp_seq = rsp->gp_seq;
- rnp->gp_seq_needed = rsp->gp_seq;
- rnp->completedqs = rsp->gp_seq;
+ rnp->gp_seq = rcu_state.gp_seq;
+ rnp->gp_seq_needed = rcu_state.gp_seq;
+ rnp->completedqs = rcu_state.gp_seq;
rnp->qsmask = 0;
rnp->qsmaskinit = 0;
rnp->grplo = j * cpustride;
@@ -4001,8 +3576,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
rnp->parent = NULL;
} else {
rnp->grpnum = j % levelspread[i - 1];
- rnp->grpmask = 1UL << rnp->grpnum;
- rnp->parent = rsp->level[i - 1] +
+ rnp->grpmask = BIT(rnp->grpnum);
+ rnp->parent = rcu_state.level[i - 1] +
j / levelspread[i - 1];
}
rnp->level = i;
@@ -4016,16 +3591,15 @@ static void __init rcu_init_one(struct rcu_state *rsp)
}
}
- init_swait_queue_head(&rsp->gp_wq);
- init_swait_queue_head(&rsp->expedited_wq);
- rnp = rcu_first_leaf_node(rsp);
+ init_swait_queue_head(&rcu_state.gp_wq);
+ init_swait_queue_head(&rcu_state.expedited_wq);
+ rnp = rcu_first_leaf_node();
for_each_possible_cpu(i) {
while (i > rnp->grphi)
rnp++;
- per_cpu_ptr(rsp->rda, i)->mynode = rnp;
- rcu_boot_init_percpu_data(i, rsp);
+ per_cpu_ptr(&rcu_data, i)->mynode = rnp;
+ rcu_boot_init_percpu_data(i);
}
- list_add(&rsp->flavors, &rcu_struct_flavors);
}
/*
@@ -4051,6 +3625,8 @@ static void __init rcu_init_geometry(void)
jiffies_till_first_fqs = d;
if (jiffies_till_next_fqs == ULONG_MAX)
jiffies_till_next_fqs = d;
+ if (jiffies_till_sched_qs == ULONG_MAX)
+ adjust_jiffies_till_sched_qs();
/* If the compile-time values are accurate, just leave. */
if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
@@ -4109,16 +3685,16 @@ static void __init rcu_init_geometry(void)
/*
* Dump out the structure of the rcu_node combining tree associated
- * with the rcu_state structure referenced by rsp.
+ * with the rcu_state structure.
*/
-static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
+static void __init rcu_dump_rcu_node_tree(void)
{
int level = 0;
struct rcu_node *rnp;
pr_info("rcu_node tree layout dump\n");
pr_info(" ");
- rcu_for_each_node_breadth_first(rsp, rnp) {
+ rcu_for_each_node_breadth_first(rnp) {
if (rnp->level != level) {
pr_cont("\n");
pr_info(" ");
@@ -4140,11 +3716,9 @@ void __init rcu_init(void)
rcu_bootup_announce();
rcu_init_geometry();
- rcu_init_one(&rcu_bh_state);
- rcu_init_one(&rcu_sched_state);
+ rcu_init_one();
if (dump_tree)
- rcu_dump_rcu_node_tree(&rcu_sched_state);
- __rcu_init_preempt();
+ rcu_dump_rcu_node_tree();
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
/*
@@ -4164,6 +3738,7 @@ void __init rcu_init(void)
WARN_ON(!rcu_gp_wq);
rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
WARN_ON(!rcu_par_gp_wq);
+ srcu_init();
}
#include "tree_exp.h"
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 4e74df768c57..703e19ff532d 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -34,34 +34,9 @@
#include "rcu_segcblist.h"
-/*
- * Dynticks per-CPU state.
- */
-struct rcu_dynticks {
- long dynticks_nesting; /* Track process nesting level. */
- long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
- atomic_t dynticks; /* Even value for idle, else odd. */
- bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */
- unsigned long rcu_qs_ctr; /* Light universal quiescent state ctr. */
- bool rcu_urgent_qs; /* GP old need light quiescent state. */
-#ifdef CONFIG_RCU_FAST_NO_HZ
- bool all_lazy; /* Are all CPU's CBs lazy? */
- unsigned long nonlazy_posted;
- /* # times non-lazy CBs posted to CPU. */
- unsigned long nonlazy_posted_snap;
- /* idle-period nonlazy_posted snapshot. */
- unsigned long last_accelerate;
- /* Last jiffy CBs were accelerated. */
- unsigned long last_advance_all;
- /* Last jiffy CBs were all advanced. */
- int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
-#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
-};
-
/* Communicate arguments to a workqueue handler. */
struct rcu_exp_work {
smp_call_func_t rew_func;
- struct rcu_state *rew_rsp;
unsigned long rew_s;
struct work_struct rew_work;
};
@@ -170,7 +145,7 @@ struct rcu_node {
* are indexed relative to this interval rather than the global CPU ID space.
* This generates the bit for a CPU in node-local masks.
*/
-#define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo))
+#define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
/*
* Union to allow "aggregate OR" operation on the need for a quiescent
@@ -189,12 +164,11 @@ struct rcu_data {
/* 1) quiescent-state and grace-period handling : */
unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */
unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed ctr. */
- unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
- /* for rcu_all_qs() invocations. */
union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
bool core_needs_qs; /* Core waits for quiesc state. */
bool beenonline; /* CPU online at least once. */
bool gpwrap; /* Possible ->gp_seq wrap. */
+ bool deferred_qs; /* This CPU awaiting a deferred QS? */
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
unsigned long ticks_this_gp; /* The number of scheduling-clock */
@@ -213,23 +187,27 @@ struct rcu_data {
long blimit; /* Upper limit on a processed batch */
/* 3) dynticks interface. */
- struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
int dynticks_snap; /* Per-GP tracking for dynticks. */
-
- /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
- unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
- unsigned long cond_resched_completed;
- /* Grace period that needs help */
- /* from cond_resched(). */
-
- /* 5) _rcu_barrier(), OOM callbacks, and expediting. */
- struct rcu_head barrier_head;
+ long dynticks_nesting; /* Track process nesting level. */
+ long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
+ atomic_t dynticks; /* Even value for idle, else odd. */
+ bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
+ bool rcu_urgent_qs; /* GP old need light quiescent state. */
#ifdef CONFIG_RCU_FAST_NO_HZ
- struct rcu_head oom_head;
+ bool all_lazy; /* Are all CPU's CBs lazy? */
+ unsigned long nonlazy_posted; /* # times non-lazy CB posted to CPU. */
+ unsigned long nonlazy_posted_snap;
+ /* Nonlazy_posted snapshot. */
+ unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
+ unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
+ int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
+
+ /* 4) rcu_barrier(), OOM callbacks, and expediting. */
+ struct rcu_head barrier_head;
int exp_dynticks_snap; /* Double-check need for IPI. */
- /* 6) Callback offloading. */
+ /* 5) Callback offloading. */
#ifdef CONFIG_RCU_NOCB_CPU
struct rcu_head *nocb_head; /* CBs waiting for kthread. */
struct rcu_head **nocb_tail;
@@ -256,7 +234,7 @@ struct rcu_data {
/* Leader CPU takes GP-end wakeups. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
- /* 7) Diagnostic data, including RCU CPU stall warnings. */
+ /* 6) Diagnostic data, including RCU CPU stall warnings. */
unsigned int softirq_snap; /* Snapshot of softirq activity. */
/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
struct irq_work rcu_iw; /* Check for non-irq activity. */
@@ -266,9 +244,9 @@ struct rcu_data {
short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */
unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */
short rcu_onl_gp_flags; /* ->gp_flags at last online. */
+ unsigned long last_fqs_resched; /* Time of last rcu_resched(). */
int cpu;
- struct rcu_state *rsp;
};
/* Values for nocb_defer_wakeup field in struct rcu_data. */
@@ -314,8 +292,6 @@ struct rcu_state {
struct rcu_node *level[RCU_NUM_LVLS + 1];
/* Hierarchy levels (+1 to */
/* shut bogus gcc warning) */
- struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
- call_rcu_func_t call; /* call_rcu() flavor. */
int ncpus; /* # CPUs seen so far. */
/* The following fields are guarded by the root rcu_node's lock. */
@@ -334,7 +310,7 @@ struct rcu_state {
atomic_t barrier_cpu_count; /* # CPUs waiting on. */
struct completion barrier_completion; /* Wake at barrier end. */
unsigned long barrier_sequence; /* ++ at start and end of */
- /* _rcu_barrier(). */
+ /* rcu_barrier(). */
/* End of fields guarded by barrier_mutex. */
struct mutex exp_mutex; /* Serialize expedited GP. */
@@ -366,9 +342,8 @@ struct rcu_state {
/* jiffies. */
const char *name; /* Name of structure. */
char abbr; /* Abbreviated name. */
- struct list_head flavors; /* List of RCU flavors. */
- spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
+ raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
/* Synchronize offline with */
/* GP pre-initialization. */
};
@@ -388,7 +363,6 @@ struct rcu_state {
#define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */
#define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */
-#ifndef RCU_TREE_NONCORE
static const char * const gp_state_names[] = {
"RCU_GP_IDLE",
"RCU_GP_WAIT_GPS",
@@ -400,13 +374,29 @@ static const char * const gp_state_names[] = {
"RCU_GP_CLEANUP",
"RCU_GP_CLEANED",
};
-#endif /* #ifndef RCU_TREE_NONCORE */
-
-extern struct list_head rcu_struct_flavors;
-/* Sequence through rcu_state structures for each RCU flavor. */
-#define for_each_rcu_flavor(rsp) \
- list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
+/*
+ * In order to export the rcu_state name to the tracing tools, it
+ * needs to be added in the __tracepoint_string section.
+ * This requires defining a separate variable tp_<sname>_varname
+ * that points to the string being used, and this will allow
+ * the tracing userspace tools to be able to decipher the string
+ * address to the matching string.
+ */
+#ifdef CONFIG_PREEMPT_RCU
+#define RCU_ABBR 'p'
+#define RCU_NAME_RAW "rcu_preempt"
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+#define RCU_ABBR 's'
+#define RCU_NAME_RAW "rcu_sched"
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+#ifndef CONFIG_TRACING
+#define RCU_NAME RCU_NAME_RAW
+#else /* #ifdef CONFIG_TRACING */
+static char rcu_name[] = RCU_NAME_RAW;
+static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
+#define RCU_NAME rcu_name
+#endif /* #else #ifdef CONFIG_TRACING */
/*
* RCU implementation internal declarations:
@@ -419,7 +409,7 @@ extern struct rcu_state rcu_bh_state;
extern struct rcu_state rcu_preempt_state;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
-int rcu_dynticks_snap(struct rcu_dynticks *rdtp);
+int rcu_dynticks_snap(struct rcu_data *rdp);
#ifdef CONFIG_RCU_BOOST
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
@@ -428,45 +418,37 @@ DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DECLARE_PER_CPU(char, rcu_cpu_has_work);
#endif /* #ifdef CONFIG_RCU_BOOST */
-#ifndef RCU_TREE_NONCORE
-
/* Forward declarations for rcutree_plugin.h */
static void rcu_bootup_announce(void);
-static void rcu_preempt_note_context_switch(bool preempt);
+static void rcu_qs(void);
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-static void rcu_print_detail_task_stall(struct rcu_state *rsp);
+static void rcu_print_detail_task_stall(void);
static int rcu_print_task_stall(struct rcu_node *rnp);
static int rcu_print_task_exp_stall(struct rcu_node *rnp);
-static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp,
- struct rcu_node *rnp);
-static void rcu_preempt_check_callbacks(void);
+static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
+static void rcu_flavor_check_callbacks(int user);
void call_rcu(struct rcu_head *head, rcu_callback_t func);
-static void __init __rcu_init_preempt(void);
-static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp,
- int ncheck);
+static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
static void invoke_rcu_callbacks_kthread(void);
static bool rcu_is_callbacks_kthread(void);
-#ifdef CONFIG_RCU_BOOST
-static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
- struct rcu_node *rnp);
-#endif /* #ifdef CONFIG_RCU_BOOST */
static void __init rcu_spawn_boost_kthreads(void);
static void rcu_prepare_kthreads(int cpu);
static void rcu_cleanup_after_idle(void);
static void rcu_prepare_for_idle(void);
static void rcu_idle_count_callbacks_posted(void);
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
+static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
+static void rcu_preempt_deferred_qs(struct task_struct *t);
static void print_cpu_stall_info_begin(void);
-static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
+static void print_cpu_stall_info(int cpu);
static void print_cpu_stall_info_end(void);
static void zero_cpu_stall_ticks(struct rcu_data *rdp);
-static void increment_cpu_stall_ticks(void);
-static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
+static bool rcu_nocb_cpu_needs_barrier(int cpu);
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
static void rcu_init_one_nocb(struct rcu_node *rnp);
@@ -481,11 +463,11 @@ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
static void rcu_spawn_all_nocb_kthreads(int cpu);
static void __init rcu_spawn_nocb_kthreads(void);
#ifdef CONFIG_RCU_NOCB_CPU
-static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
+static void __init rcu_organize_nocb_kthreads(void);
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
static bool init_nocb_callback_list(struct rcu_data *rdp);
static void rcu_bind_gp_kthread(void);
-static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
+static bool rcu_nohz_full_cpu(void);
static void rcu_dynticks_task_enter(void);
static void rcu_dynticks_task_exit(void);
@@ -496,5 +478,3 @@ void srcu_offline_cpu(unsigned int cpu);
void srcu_online_cpu(unsigned int cpu) { }
void srcu_offline_cpu(unsigned int cpu) { }
#endif /* #else #ifdef CONFIG_SRCU */
-
-#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 0b2c2ad69629..8d18c1014e2b 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -25,39 +25,39 @@
/*
* Record the start of an expedited grace period.
*/
-static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
+static void rcu_exp_gp_seq_start(void)
{
- rcu_seq_start(&rsp->expedited_sequence);
+ rcu_seq_start(&rcu_state.expedited_sequence);
}
/*
* Return then value that expedited-grace-period counter will have
* at the end of the current grace period.
*/
-static __maybe_unused unsigned long rcu_exp_gp_seq_endval(struct rcu_state *rsp)
+static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
{
- return rcu_seq_endval(&rsp->expedited_sequence);
+ return rcu_seq_endval(&rcu_state.expedited_sequence);
}
/*
* Record the end of an expedited grace period.
*/
-static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
+static void rcu_exp_gp_seq_end(void)
{
- rcu_seq_end(&rsp->expedited_sequence);
+ rcu_seq_end(&rcu_state.expedited_sequence);
smp_mb(); /* Ensure that consecutive grace periods serialize. */
}
/*
* Take a snapshot of the expedited-grace-period counter.
*/
-static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
+static unsigned long rcu_exp_gp_seq_snap(void)
{
unsigned long s;
smp_mb(); /* Caller's modifications seen first by other CPUs. */
- s = rcu_seq_snap(&rsp->expedited_sequence);
- trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
+ s = rcu_seq_snap(&rcu_state.expedited_sequence);
+ trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
return s;
}
@@ -66,9 +66,9 @@ static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
* if a full expedited grace period has elapsed since that snapshot
* was taken.
*/
-static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
+static bool rcu_exp_gp_seq_done(unsigned long s)
{
- return rcu_seq_done(&rsp->expedited_sequence, s);
+ return rcu_seq_done(&rcu_state.expedited_sequence, s);
}
/*
@@ -78,26 +78,26 @@ static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
* ever been online. This means that this function normally takes its
* no-work-to-do fastpath.
*/
-static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
+static void sync_exp_reset_tree_hotplug(void)
{
bool done;
unsigned long flags;
unsigned long mask;
unsigned long oldmask;
- int ncpus = smp_load_acquire(&rsp->ncpus); /* Order against locking. */
+ int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
struct rcu_node *rnp;
struct rcu_node *rnp_up;
/* If no new CPUs onlined since last time, nothing to do. */
- if (likely(ncpus == rsp->ncpus_snap))
+ if (likely(ncpus == rcu_state.ncpus_snap))
return;
- rsp->ncpus_snap = ncpus;
+ rcu_state.ncpus_snap = ncpus;
/*
* Each pass through the following loop propagates newly onlined
* CPUs for the current rcu_node structure up the rcu_node tree.
*/
- rcu_for_each_leaf_node(rsp, rnp) {
+ rcu_for_each_leaf_node(rnp) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (rnp->expmaskinit == rnp->expmaskinitnext) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -135,13 +135,13 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
* Reset the ->expmask values in the rcu_node tree in preparation for
* a new expedited grace period.
*/
-static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
+static void __maybe_unused sync_exp_reset_tree(void)
{
unsigned long flags;
struct rcu_node *rnp;
- sync_exp_reset_tree_hotplug(rsp);
- rcu_for_each_node_breadth_first(rsp, rnp) {
+ sync_exp_reset_tree_hotplug();
+ rcu_for_each_node_breadth_first(rnp) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
WARN_ON_ONCE(rnp->expmask);
rnp->expmask = rnp->expmaskinit;
@@ -194,7 +194,7 @@ static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
*
* Caller must hold the specified rcu_node structure's ->lock.
*/
-static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+static void __rcu_report_exp_rnp(struct rcu_node *rnp,
bool wake, unsigned long flags)
__releases(rnp->lock)
{
@@ -212,7 +212,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (wake) {
smp_mb(); /* EGP done before wake_up(). */
- swake_up_one(&rsp->expedited_wq);
+ swake_up_one(&rcu_state.expedited_wq);
}
break;
}
@@ -229,20 +229,19 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
* Report expedited quiescent state for specified node. This is a
* lock-acquisition wrapper function for __rcu_report_exp_rnp().
*/
-static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
- struct rcu_node *rnp, bool wake)
+static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
{
unsigned long flags;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
- __rcu_report_exp_rnp(rsp, rnp, wake, flags);
+ __rcu_report_exp_rnp(rnp, wake, flags);
}
/*
* Report expedited quiescent state for multiple CPUs, all covered by the
* specified leaf rcu_node structure.
*/
-static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
+static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
unsigned long mask, bool wake)
{
unsigned long flags;
@@ -253,23 +252,23 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
return;
}
rnp->expmask &= ~mask;
- __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
+ __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
}
/*
* Report expedited quiescent state for specified rcu_data (CPU).
*/
-static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
- bool wake)
+static void rcu_report_exp_rdp(struct rcu_data *rdp)
{
- rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
+ WRITE_ONCE(rdp->deferred_qs, false);
+ rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
}
-/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
-static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s)
+/* Common code for work-done checking. */
+static bool sync_exp_work_done(unsigned long s)
{
- if (rcu_exp_gp_seq_done(rsp, s)) {
- trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
+ if (rcu_exp_gp_seq_done(s)) {
+ trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
/* Ensure test happens before caller kfree(). */
smp_mb__before_atomic(); /* ^^^ */
return true;
@@ -284,28 +283,28 @@ static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s)
* with the mutex held, indicating that the caller must actually do the
* expedited grace period.
*/
-static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
+static bool exp_funnel_lock(unsigned long s)
{
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
struct rcu_node *rnp = rdp->mynode;
- struct rcu_node *rnp_root = rcu_get_root(rsp);
+ struct rcu_node *rnp_root = rcu_get_root();
/* Low-contention fastpath. */
if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
(rnp == rnp_root ||
ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
- mutex_trylock(&rsp->exp_mutex))
+ mutex_trylock(&rcu_state.exp_mutex))
goto fastpath;
/*
* Each pass through the following loop works its way up
* the rcu_node tree, returning if others have done the work or
- * otherwise falls through to acquire rsp->exp_mutex. The mapping
+ * otherwise falls through to acquire ->exp_mutex. The mapping
* from CPU to rcu_node structure can be inexact, as it is just
* promoting locality and is not strictly needed for correctness.
*/
for (; rnp != NULL; rnp = rnp->parent) {
- if (sync_exp_work_done(rsp, s))
+ if (sync_exp_work_done(s))
return true;
/* Work not done, either wait here or go up. */
@@ -314,68 +313,29 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
/* Someone else doing GP, so wait for them. */
spin_unlock(&rnp->exp_lock);
- trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
+ trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
rnp->grplo, rnp->grphi,
TPS("wait"));
wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
- sync_exp_work_done(rsp, s));
+ sync_exp_work_done(s));
return true;
}
rnp->exp_seq_rq = s; /* Followers can wait on us. */
spin_unlock(&rnp->exp_lock);
- trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
- rnp->grphi, TPS("nxtlvl"));
+ trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
+ rnp->grplo, rnp->grphi, TPS("nxtlvl"));
}
- mutex_lock(&rsp->exp_mutex);
+ mutex_lock(&rcu_state.exp_mutex);
fastpath:
- if (sync_exp_work_done(rsp, s)) {
- mutex_unlock(&rsp->exp_mutex);
+ if (sync_exp_work_done(s)) {
+ mutex_unlock(&rcu_state.exp_mutex);
return true;
}
- rcu_exp_gp_seq_start(rsp);
- trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
+ rcu_exp_gp_seq_start();
+ trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
return false;
}
-/* Invoked on each online non-idle CPU for expedited quiescent state. */
-static void sync_sched_exp_handler(void *data)
-{
- struct rcu_data *rdp;
- struct rcu_node *rnp;
- struct rcu_state *rsp = data;
-
- rdp = this_cpu_ptr(rsp->rda);
- rnp = rdp->mynode;
- if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
- __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
- return;
- if (rcu_is_cpu_rrupt_from_idle()) {
- rcu_report_exp_rdp(&rcu_sched_state,
- this_cpu_ptr(&rcu_sched_data), true);
- return;
- }
- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
- /* Store .exp before .rcu_urgent_qs. */
- smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
- resched_cpu(smp_processor_id());
-}
-
-/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
-static void sync_sched_exp_online_cleanup(int cpu)
-{
- struct rcu_data *rdp;
- int ret;
- struct rcu_node *rnp;
- struct rcu_state *rsp = &rcu_sched_state;
-
- rdp = per_cpu_ptr(rsp->rda, cpu);
- rnp = rdp->mynode;
- if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
- return;
- ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
- WARN_ON_ONCE(ret);
-}
-
/*
* Select the CPUs within the specified rcu_node that the upcoming
* expedited grace period needs to wait for.
@@ -391,7 +351,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
struct rcu_exp_work *rewp =
container_of(wp, struct rcu_exp_work, rew_work);
struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
- struct rcu_state *rsp = rewp->rew_rsp;
func = rewp->rew_func;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -400,15 +359,14 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
mask_ofl_test = 0;
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
- struct rcu_dynticks *rdtp = per_cpu_ptr(&rcu_dynticks, cpu);
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
int snap;
if (raw_smp_processor_id() == cpu ||
!(rnp->qsmaskinitnext & mask)) {
mask_ofl_test |= mask;
} else {
- snap = rcu_dynticks_snap(rdtp);
+ snap = rcu_dynticks_snap(rdp);
if (rcu_dynticks_in_eqs(snap))
mask_ofl_test |= mask;
else
@@ -429,17 +387,16 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
/* IPI the remaining CPUs for expedited quiescent state. */
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
if (!(mask_ofl_ipi & mask))
continue;
retry_ipi:
- if (rcu_dynticks_in_eqs_since(rdp->dynticks,
- rdp->exp_dynticks_snap)) {
+ if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
mask_ofl_test |= mask;
continue;
}
- ret = smp_call_function_single(cpu, func, rsp, 0);
+ ret = smp_call_function_single(cpu, func, NULL, 0);
if (!ret) {
mask_ofl_ipi &= ~mask;
continue;
@@ -450,7 +407,7 @@ retry_ipi:
(rnp->expmask & mask)) {
/* Online, so delay for a bit and try again. */
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("selectofl"));
+ trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
schedule_timeout_uninterruptible(1);
goto retry_ipi;
}
@@ -462,33 +419,31 @@ retry_ipi:
/* Report quiescent states for those that went offline. */
mask_ofl_test |= mask_ofl_ipi;
if (mask_ofl_test)
- rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
+ rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
}
/*
* Select the nodes that the upcoming expedited grace period needs
* to wait for.
*/
-static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
- smp_call_func_t func)
+static void sync_rcu_exp_select_cpus(smp_call_func_t func)
{
int cpu;
struct rcu_node *rnp;
- trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
- sync_exp_reset_tree(rsp);
- trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("select"));
+ trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
+ sync_exp_reset_tree();
+ trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
/* Schedule work for each leaf rcu_node structure. */
- rcu_for_each_leaf_node(rsp, rnp) {
+ rcu_for_each_leaf_node(rnp) {
rnp->exp_need_flush = false;
if (!READ_ONCE(rnp->expmask))
continue; /* Avoid early boot non-existent wq. */
rnp->rew.rew_func = func;
- rnp->rew.rew_rsp = rsp;
if (!READ_ONCE(rcu_par_gp_wq) ||
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
- rcu_is_last_leaf_node(rsp, rnp)) {
+ rcu_is_last_leaf_node(rnp)) {
/* No workqueues yet or last leaf, do direct call. */
sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
continue;
@@ -505,12 +460,12 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
}
/* Wait for workqueue jobs (if any) to complete. */
- rcu_for_each_leaf_node(rsp, rnp)
+ rcu_for_each_leaf_node(rnp)
if (rnp->exp_need_flush)
flush_work(&rnp->rew.rew_work);
}
-static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
+static void synchronize_sched_expedited_wait(void)
{
int cpu;
unsigned long jiffies_stall;
@@ -518,16 +473,16 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
unsigned long mask;
int ndetected;
struct rcu_node *rnp;
- struct rcu_node *rnp_root = rcu_get_root(rsp);
+ struct rcu_node *rnp_root = rcu_get_root();
int ret;
- trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("startwait"));
+ trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
jiffies_stall = rcu_jiffies_till_stall_check();
jiffies_start = jiffies;
for (;;) {
ret = swait_event_timeout_exclusive(
- rsp->expedited_wq,
+ rcu_state.expedited_wq,
sync_rcu_preempt_exp_done_unlocked(rnp_root),
jiffies_stall);
if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
@@ -537,9 +492,9 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
continue;
panic_on_rcu_stall();
pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
- rsp->name);
+ rcu_state.name);
ndetected = 0;
- rcu_for_each_leaf_node(rsp, rnp) {
+ rcu_for_each_leaf_node(rnp) {
ndetected += rcu_print_task_exp_stall(rnp);
for_each_leaf_node_possible_cpu(rnp, cpu) {
struct rcu_data *rdp;
@@ -548,7 +503,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
if (!(rnp->expmask & mask))
continue;
ndetected++;
- rdp = per_cpu_ptr(rsp->rda, cpu);
+ rdp = per_cpu_ptr(&rcu_data, cpu);
pr_cont(" %d-%c%c%c", cpu,
"O."[!!cpu_online(cpu)],
"o."[!!(rdp->grpmask & rnp->expmaskinit)],
@@ -556,11 +511,11 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
}
}
pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
- jiffies - jiffies_start, rsp->expedited_sequence,
+ jiffies - jiffies_start, rcu_state.expedited_sequence,
rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
if (ndetected) {
pr_err("blocking rcu_node structures:");
- rcu_for_each_node_breadth_first(rsp, rnp) {
+ rcu_for_each_node_breadth_first(rnp) {
if (rnp == rnp_root)
continue; /* printed unconditionally */
if (sync_rcu_preempt_exp_done_unlocked(rnp))
@@ -572,7 +527,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
}
pr_cont("\n");
}
- rcu_for_each_leaf_node(rsp, rnp) {
+ rcu_for_each_leaf_node(rnp) {
for_each_leaf_node_possible_cpu(rnp, cpu) {
mask = leaf_node_cpu_bit(rnp, cpu);
if (!(rnp->expmask & mask))
@@ -590,21 +545,21 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
* grace period. Also update all the ->exp_seq_rq counters as needed
* in order to avoid counter-wrap problems.
*/
-static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
+static void rcu_exp_wait_wake(unsigned long s)
{
struct rcu_node *rnp;
- synchronize_sched_expedited_wait(rsp);
- rcu_exp_gp_seq_end(rsp);
- trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
+ synchronize_sched_expedited_wait();
+ rcu_exp_gp_seq_end();
+ trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
/*
* Switch over to wakeup mode, allowing the next GP, but -only- the
* next GP, to proceed.
*/
- mutex_lock(&rsp->exp_wake_mutex);
+ mutex_lock(&rcu_state.exp_wake_mutex);
- rcu_for_each_node_breadth_first(rsp, rnp) {
+ rcu_for_each_node_breadth_first(rnp) {
if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
spin_lock(&rnp->exp_lock);
/* Recheck, avoid hang in case someone just arrived. */
@@ -613,24 +568,23 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
spin_unlock(&rnp->exp_lock);
}
smp_mb(); /* All above changes before wakeup. */
- wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]);
+ wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]);
}
- trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
- mutex_unlock(&rsp->exp_wake_mutex);
+ trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
+ mutex_unlock(&rcu_state.exp_wake_mutex);
}
/*
* Common code to drive an expedited grace period forward, used by
* workqueues and mid-boot-time tasks.
*/
-static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
- smp_call_func_t func, unsigned long s)
+static void rcu_exp_sel_wait_wake(smp_call_func_t func, unsigned long s)
{
/* Initialize the rcu_node tree in preparation for the wait. */
- sync_rcu_exp_select_cpus(rsp, func);
+ sync_rcu_exp_select_cpus(func);
/* Wait and clean up, including waking everyone. */
- rcu_exp_wait_wake(rsp, s);
+ rcu_exp_wait_wake(s);
}
/*
@@ -641,15 +595,14 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
struct rcu_exp_work *rewp;
rewp = container_of(wp, struct rcu_exp_work, rew_work);
- rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
+ rcu_exp_sel_wait_wake(rewp->rew_func, rewp->rew_s);
}
/*
- * Given an rcu_state pointer and a smp_call_function() handler, kick
- * off the specified flavor of expedited grace period.
+ * Given a smp_call_function() handler, kick off the specified
+ * implementation of expedited grace period.
*/
-static void _synchronize_rcu_expedited(struct rcu_state *rsp,
- smp_call_func_t func)
+static void _synchronize_rcu_expedited(smp_call_func_t func)
{
struct rcu_data *rdp;
struct rcu_exp_work rew;
@@ -658,71 +611,37 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
/* If expedited grace periods are prohibited, fall back to normal. */
if (rcu_gp_is_normal()) {
- wait_rcu_gp(rsp->call);
+ wait_rcu_gp(call_rcu);
return;
}
/* Take a snapshot of the sequence number. */
- s = rcu_exp_gp_seq_snap(rsp);
- if (exp_funnel_lock(rsp, s))
+ s = rcu_exp_gp_seq_snap();
+ if (exp_funnel_lock(s))
return; /* Someone else did our work for us. */
/* Ensure that load happens before action based on it. */
if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
/* Direct call during scheduler init and early_initcalls(). */
- rcu_exp_sel_wait_wake(rsp, func, s);
+ rcu_exp_sel_wait_wake(func, s);
} else {
/* Marshall arguments & schedule the expedited grace period. */
rew.rew_func = func;
- rew.rew_rsp = rsp;
rew.rew_s = s;
INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
queue_work(rcu_gp_wq, &rew.rew_work);
}
/* Wait for expedited grace period to complete. */
- rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
- rnp = rcu_get_root(rsp);
+ rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
+ rnp = rcu_get_root();
wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
- sync_exp_work_done(rsp, s));
+ sync_exp_work_done(s));
smp_mb(); /* Workqueue actions happen before return. */
/* Let the next expedited grace period start. */
- mutex_unlock(&rsp->exp_mutex);
-}
-
-/**
- * synchronize_sched_expedited - Brute-force RCU-sched grace period
- *
- * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
- * approach to force the grace period to end quickly. This consumes
- * significant time on all CPUs and is unfriendly to real-time workloads,
- * so is thus not recommended for any sort of common-case code. In fact,
- * if you are using synchronize_sched_expedited() in a loop, please
- * restructure your code to batch your updates, and then use a single
- * synchronize_sched() instead.
- *
- * This implementation can be thought of as an application of sequence
- * locking to expedited grace periods, but using the sequence counter to
- * determine when someone else has already done the work instead of for
- * retrying readers.
- */
-void synchronize_sched_expedited(void)
-{
- struct rcu_state *rsp = &rcu_sched_state;
-
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
- lock_is_held(&rcu_lock_map) ||
- lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_sched_expedited() in RCU read-side critical section");
-
- /* If only one CPU, this is automatically a grace period. */
- if (rcu_blocking_is_gp())
- return;
-
- _synchronize_rcu_expedited(rsp, sync_sched_exp_handler);
+ mutex_unlock(&rcu_state.exp_mutex);
}
-EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
#ifdef CONFIG_PREEMPT_RCU
@@ -733,34 +652,78 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
* ->expmask fields in the rcu_node tree. Otherwise, immediately
* report the quiescent state.
*/
-static void sync_rcu_exp_handler(void *info)
+static void sync_rcu_exp_handler(void *unused)
{
- struct rcu_data *rdp;
- struct rcu_state *rsp = info;
+ unsigned long flags;
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+ struct rcu_node *rnp = rdp->mynode;
struct task_struct *t = current;
/*
- * Within an RCU read-side critical section, request that the next
- * rcu_read_unlock() report. Unless this RCU read-side critical
- * section has already blocked, in which case it is already set
- * up for the expedited grace period to wait on it.
+ * First, the common case of not being in an RCU read-side
+ * critical section. If also enabled or idle, immediately
+ * report the quiescent state, otherwise defer.
*/
- if (t->rcu_read_lock_nesting > 0 &&
- !t->rcu_read_unlock_special.b.blocked) {
- t->rcu_read_unlock_special.b.exp_need_qs = true;
+ if (!t->rcu_read_lock_nesting) {
+ if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
+ rcu_dynticks_curr_cpu_in_eqs()) {
+ rcu_report_exp_rdp(rdp);
+ } else {
+ rdp->deferred_qs = true;
+ set_tsk_need_resched(t);
+ set_preempt_need_resched();
+ }
return;
}
/*
- * We are either exiting an RCU read-side critical section (negative
- * values of t->rcu_read_lock_nesting) or are not in one at all
- * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU
- * read-side critical section that blocked before this expedited
- * grace period started. Either way, we can immediately report
- * the quiescent state.
+ * Second, the less-common case of being in an RCU read-side
+ * critical section. In this case we can count on a future
+ * rcu_read_unlock(). However, this rcu_read_unlock() might
+ * execute on some other CPU, but in that case there will be
+ * a future context switch. Either way, if the expedited
+ * grace period is still waiting on this CPU, set ->deferred_qs
+ * so that the eventual quiescent state will be reported.
+ * Note that there is a large group of race conditions that
+ * can have caused this quiescent state to already have been
+ * reported, so we really do need to check ->expmask.
+ */
+ if (t->rcu_read_lock_nesting > 0) {
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ if (rnp->expmask & rdp->grpmask)
+ rdp->deferred_qs = true;
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
+
+ /*
+ * The final and least likely case is where the interrupted
+ * code was just about to or just finished exiting the RCU-preempt
+ * read-side critical section, and no, we can't tell which.
+ * So either way, set ->deferred_qs to flag later code that
+ * a quiescent state is required.
+ *
+ * If the CPU is fully enabled (or if some buggy RCU-preempt
+ * read-side critical section is being used from idle), just
+ * invoke rcu_preempt_defer_qs() to immediately report the
+ * quiescent state. We cannot use rcu_read_unlock_special()
+ * because we are in an interrupt handler, which will cause that
+ * function to take an early exit without doing anything.
+ *
+ * Otherwise, force a context switch after the CPU enables everything.
*/
- rdp = this_cpu_ptr(rsp->rda);
- rcu_report_exp_rdp(rsp, rdp, true);
+ rdp->deferred_qs = true;
+ if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
+ WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
+ rcu_preempt_deferred_qs(t);
+ } else {
+ set_tsk_need_resched(t);
+ set_preempt_need_resched();
+ }
+}
+
+/* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
+static void sync_sched_exp_online_cleanup(int cpu)
+{
}
/**
@@ -780,11 +743,11 @@ static void sync_rcu_exp_handler(void *info)
* you are using synchronize_rcu_expedited() in a loop, please restructure
* your code to batch your updates, and then Use a single synchronize_rcu()
* instead.
+ *
+ * This has the same semantics as (but is more brutal than) synchronize_rcu().
*/
void synchronize_rcu_expedited(void)
{
- struct rcu_state *rsp = rcu_state_p;
-
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
@@ -792,19 +755,82 @@ void synchronize_rcu_expedited(void)
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return;
- _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
+ _synchronize_rcu_expedited(sync_rcu_exp_handler);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
#else /* #ifdef CONFIG_PREEMPT_RCU */
+/* Invoked on each online non-idle CPU for expedited quiescent state. */
+static void sync_sched_exp_handler(void *unused)
+{
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
+
+ rdp = this_cpu_ptr(&rcu_data);
+ rnp = rdp->mynode;
+ if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
+ __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
+ return;
+ if (rcu_is_cpu_rrupt_from_idle()) {
+ rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
+ return;
+ }
+ __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
+ /* Store .exp before .rcu_urgent_qs. */
+ smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+}
+
+/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
+static void sync_sched_exp_online_cleanup(int cpu)
+{
+ struct rcu_data *rdp;
+ int ret;
+ struct rcu_node *rnp;
+
+ rdp = per_cpu_ptr(&rcu_data, cpu);
+ rnp = rdp->mynode;
+ if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
+ return;
+ ret = smp_call_function_single(cpu, sync_sched_exp_handler, NULL, 0);
+ WARN_ON_ONCE(ret);
+}
+
/*
- * Wait for an rcu-preempt grace period, but make it happen quickly.
- * But because preemptible RCU does not exist, map to rcu-sched.
+ * Because a context switch is a grace period for !PREEMPT, any
+ * blocking grace-period wait automatically implies a grace period if
+ * there is only one CPU online at any point time during execution of
+ * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
+ * occasionally incorrectly indicate that there are multiple CPUs online
+ * when there was in fact only one the whole time, as this just adds some
+ * overhead: RCU still operates correctly.
*/
+static int rcu_blocking_is_gp(void)
+{
+ int ret;
+
+ might_sleep(); /* Check for RCU read-side critical section. */
+ preempt_disable();
+ ret = num_online_cpus() <= 1;
+ preempt_enable();
+ return ret;
+}
+
+/* PREEMPT=n implementation of synchronize_rcu_expedited(). */
void synchronize_rcu_expedited(void)
{
- synchronize_sched_expedited();
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+ lock_is_held(&rcu_lock_map) ||
+ lock_is_held(&rcu_sched_lock_map),
+ "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
+
+ /* If only one CPU, this is automatically a grace period. */
+ if (rcu_blocking_is_gp())
+ return;
+
+ _synchronize_rcu_expedited(sync_sched_exp_handler);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index a97c20ea9bce..05915e536336 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -38,8 +38,7 @@
#include "../locking/rtmutex_common.h"
/*
- * Control variables for per-CPU and per-rcu_node kthreads. These
- * handle all flavors of RCU.
+ * Control variables for per-CPU and per-rcu_node kthreads.
*/
static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
@@ -106,6 +105,8 @@ static void __init rcu_bootup_announce_oddness(void)
pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
if (jiffies_till_next_fqs != ULONG_MAX)
pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
+ if (jiffies_till_sched_qs != ULONG_MAX)
+ pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs);
if (rcu_kick_kthreads)
pr_info("\tKick kthreads if too-long grace period.\n");
if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
@@ -123,12 +124,7 @@ static void __init rcu_bootup_announce_oddness(void)
#ifdef CONFIG_PREEMPT_RCU
-RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
-static struct rcu_state *const rcu_state_p = &rcu_preempt_state;
-static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
-
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
- bool wake);
+static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
static void rcu_read_unlock_special(struct task_struct *t);
/*
@@ -284,13 +280,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
* no need to check for a subsequent expedited GP. (Though we are
* still in a quiescent state in any case.)
*/
- if (blkd_state & RCU_EXP_BLKD &&
- t->rcu_read_unlock_special.b.exp_need_qs) {
- t->rcu_read_unlock_special.b.exp_need_qs = false;
- rcu_report_exp_rdp(rdp->rsp, rdp, true);
- } else {
- WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
- }
+ if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs)
+ rcu_report_exp_rdp(rdp);
+ else
+ WARN_ON_ONCE(rdp->deferred_qs);
}
/*
@@ -306,15 +299,15 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
*
* Callers to this function must disable preemption.
*/
-static void rcu_preempt_qs(void)
+static void rcu_qs(void)
{
- RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n");
- if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
+ RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n");
+ if (__this_cpu_read(rcu_data.cpu_no_qs.s)) {
trace_rcu_grace_period(TPS("rcu_preempt"),
- __this_cpu_read(rcu_data_p->gp_seq),
+ __this_cpu_read(rcu_data.gp_seq),
TPS("cpuqs"));
- __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
- barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
+ __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
+ barrier(); /* Coordinate with rcu_flavor_check_callbacks(). */
current->rcu_read_unlock_special.b.need_qs = false;
}
}
@@ -332,19 +325,20 @@ static void rcu_preempt_qs(void)
*
* Caller must disable interrupts.
*/
-static void rcu_preempt_note_context_switch(bool preempt)
+void rcu_note_context_switch(bool preempt)
{
struct task_struct *t = current;
- struct rcu_data *rdp;
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp;
+ barrier(); /* Avoid RCU read-side critical sections leaking down. */
+ trace_rcu_utilization(TPS("Start context switch"));
lockdep_assert_irqs_disabled();
WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
if (t->rcu_read_lock_nesting > 0 &&
!t->rcu_read_unlock_special.b.blocked) {
/* Possibly blocking in an RCU read-side critical section. */
- rdp = this_cpu_ptr(rcu_state_p->rda);
rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp);
t->rcu_read_unlock_special.b.blocked = true;
@@ -357,7 +351,7 @@ static void rcu_preempt_note_context_switch(bool preempt)
*/
WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
- trace_rcu_preempt_task(rdp->rsp->name,
+ trace_rcu_preempt_task(rcu_state.name,
t->pid,
(rnp->qsmask & rdp->grpmask)
? rnp->gp_seq
@@ -371,6 +365,9 @@ static void rcu_preempt_note_context_switch(bool preempt)
* behalf of preempted instance of __rcu_read_unlock().
*/
rcu_read_unlock_special(t);
+ rcu_preempt_deferred_qs(t);
+ } else {
+ rcu_preempt_deferred_qs(t);
}
/*
@@ -382,8 +379,13 @@ static void rcu_preempt_note_context_switch(bool preempt)
* grace period, then the fact that the task has been enqueued
* means that we continue to block the current grace period.
*/
- rcu_preempt_qs();
+ rcu_qs();
+ if (rdp->deferred_qs)
+ rcu_report_exp_rdp(rdp);
+ trace_rcu_utilization(TPS("End context switch"));
+ barrier(); /* Avoid RCU read-side critical sections leaking up. */
}
+EXPORT_SYMBOL_GPL(rcu_note_context_switch);
/*
* Check for preempted RCU readers blocking the current grace period
@@ -464,74 +466,56 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
}
/*
- * Handle special cases during rcu_read_unlock(), such as needing to
- * notify RCU core processing or task having blocked during the RCU
- * read-side critical section.
+ * Report deferred quiescent states. The deferral time can
+ * be quite short, for example, in the case of the call from
+ * rcu_read_unlock_special().
*/
-static void rcu_read_unlock_special(struct task_struct *t)
+static void
+rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
{
bool empty_exp;
bool empty_norm;
bool empty_exp_now;
- unsigned long flags;
struct list_head *np;
bool drop_boost_mutex = false;
struct rcu_data *rdp;
struct rcu_node *rnp;
union rcu_special special;
- /* NMI handlers cannot block and cannot safely manipulate state. */
- if (in_nmi())
- return;
-
- local_irq_save(flags);
-
/*
* If RCU core is waiting for this CPU to exit its critical section,
* report the fact that it has exited. Because irqs are disabled,
* t->rcu_read_unlock_special cannot change.
*/
special = t->rcu_read_unlock_special;
+ rdp = this_cpu_ptr(&rcu_data);
+ if (!special.s && !rdp->deferred_qs) {
+ local_irq_restore(flags);
+ return;
+ }
if (special.b.need_qs) {
- rcu_preempt_qs();
+ rcu_qs();
t->rcu_read_unlock_special.b.need_qs = false;
- if (!t->rcu_read_unlock_special.s) {
+ if (!t->rcu_read_unlock_special.s && !rdp->deferred_qs) {
local_irq_restore(flags);
return;
}
}
/*
- * Respond to a request for an expedited grace period, but only if
- * we were not preempted, meaning that we were running on the same
- * CPU throughout. If we were preempted, the exp_need_qs flag
- * would have been cleared at the time of the first preemption,
- * and the quiescent state would be reported when we were dequeued.
+ * Respond to a request by an expedited grace period for a
+ * quiescent state from this CPU. Note that requests from
+ * tasks are handled when removing the task from the
+ * blocked-tasks list below.
*/
- if (special.b.exp_need_qs) {
- WARN_ON_ONCE(special.b.blocked);
- t->rcu_read_unlock_special.b.exp_need_qs = false;
- rdp = this_cpu_ptr(rcu_state_p->rda);
- rcu_report_exp_rdp(rcu_state_p, rdp, true);
+ if (rdp->deferred_qs) {
+ rcu_report_exp_rdp(rdp);
if (!t->rcu_read_unlock_special.s) {
local_irq_restore(flags);
return;
}
}
- /* Hardware IRQ handlers cannot block, complain if they get here. */
- if (in_irq() || in_serving_softirq()) {
- lockdep_rcu_suspicious(__FILE__, __LINE__,
- "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
- pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
- t->rcu_read_unlock_special.s,
- t->rcu_read_unlock_special.b.blocked,
- t->rcu_read_unlock_special.b.exp_need_qs,
- t->rcu_read_unlock_special.b.need_qs);
- local_irq_restore(flags);
- return;
- }
-
/* Clean up if blocked during RCU read-side critical section. */
if (special.b.blocked) {
t->rcu_read_unlock_special.b.blocked = false;
@@ -582,7 +566,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
rnp->grplo,
rnp->grphi,
!!rnp->gp_tasks);
- rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
+ rcu_report_unblock_qs_rnp(rnp, flags);
} else {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
@@ -596,13 +580,79 @@ static void rcu_read_unlock_special(struct task_struct *t)
* then we need to report up the rcu_node hierarchy.
*/
if (!empty_exp && empty_exp_now)
- rcu_report_exp_rnp(rcu_state_p, rnp, true);
+ rcu_report_exp_rnp(rnp, true);
} else {
local_irq_restore(flags);
}
}
/*
+ * Is a deferred quiescent-state pending, and are we also not in
+ * an RCU read-side critical section? It is the caller's responsibility
+ * to ensure it is otherwise safe to report any deferred quiescent
+ * states. The reason for this is that it is safe to report a
+ * quiescent state during context switch even though preemption
+ * is disabled. This function cannot be expected to understand these
+ * nuances, so the caller must handle them.
+ */
+static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
+{
+ return (this_cpu_ptr(&rcu_data)->deferred_qs ||
+ READ_ONCE(t->rcu_read_unlock_special.s)) &&
+ t->rcu_read_lock_nesting <= 0;
+}
+
+/*
+ * Report a deferred quiescent state if needed and safe to do so.
+ * As with rcu_preempt_need_deferred_qs(), "safe" involves only
+ * not being in an RCU read-side critical section. The caller must
+ * evaluate safety in terms of interrupt, softirq, and preemption
+ * disabling.
+ */
+static void rcu_preempt_deferred_qs(struct task_struct *t)
+{
+ unsigned long flags;
+ bool couldrecurse = t->rcu_read_lock_nesting >= 0;
+
+ if (!rcu_preempt_need_deferred_qs(t))
+ return;
+ if (couldrecurse)
+ t->rcu_read_lock_nesting -= INT_MIN;
+ local_irq_save(flags);
+ rcu_preempt_deferred_qs_irqrestore(t, flags);
+ if (couldrecurse)
+ t->rcu_read_lock_nesting += INT_MIN;
+}
+
+/*
+ * Handle special cases during rcu_read_unlock(), such as needing to
+ * notify RCU core processing or task having blocked during the RCU
+ * read-side critical section.
+ */
+static void rcu_read_unlock_special(struct task_struct *t)
+{
+ unsigned long flags;
+ bool preempt_bh_were_disabled =
+ !!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
+ bool irqs_were_disabled;
+
+ /* NMI handlers cannot block and cannot safely manipulate state. */
+ if (in_nmi())
+ return;
+
+ local_irq_save(flags);
+ irqs_were_disabled = irqs_disabled_flags(flags);
+ if ((preempt_bh_were_disabled || irqs_were_disabled) &&
+ t->rcu_read_unlock_special.b.blocked) {
+ /* Need to defer quiescent state until everything is enabled. */
+ raise_softirq_irqoff(RCU_SOFTIRQ);
+ local_irq_restore(flags);
+ return;
+ }
+ rcu_preempt_deferred_qs_irqrestore(t, flags);
+}
+
+/*
* Dump detailed information for all tasks blocking the current RCU
* grace period on the specified rcu_node structure.
*/
@@ -633,12 +683,12 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
* Dump detailed information for all tasks blocking the current RCU
* grace period.
*/
-static void rcu_print_detail_task_stall(struct rcu_state *rsp)
+static void rcu_print_detail_task_stall(void)
{
- struct rcu_node *rnp = rcu_get_root(rsp);
+ struct rcu_node *rnp = rcu_get_root();
rcu_print_detail_task_stall_rnp(rnp);
- rcu_for_each_leaf_node(rsp, rnp)
+ rcu_for_each_leaf_node(rnp)
rcu_print_detail_task_stall_rnp(rnp);
}
@@ -706,14 +756,13 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
* Also, if there are blocked tasks on the list, they automatically
* block the newly created grace period, so set up ->gp_tasks accordingly.
*/
-static void
-rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
{
struct task_struct *t;
RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
- dump_blkd_tasks(rsp, rnp, 10);
+ dump_blkd_tasks(rnp, 10);
if (rcu_preempt_has_tasks(rnp) &&
(rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
rnp->gp_tasks = rnp->blkd_tasks.next;
@@ -732,62 +781,38 @@ rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
*
* Caller must disable hard irqs.
*/
-static void rcu_preempt_check_callbacks(void)
+static void rcu_flavor_check_callbacks(int user)
{
- struct rcu_state *rsp = &rcu_preempt_state;
struct task_struct *t = current;
- if (t->rcu_read_lock_nesting == 0) {
- rcu_preempt_qs();
+ if (user || rcu_is_cpu_rrupt_from_idle()) {
+ rcu_note_voluntary_context_switch(current);
+ }
+ if (t->rcu_read_lock_nesting > 0 ||
+ (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
+ /* No QS, force context switch if deferred. */
+ if (rcu_preempt_need_deferred_qs(t)) {
+ set_tsk_need_resched(t);
+ set_preempt_need_resched();
+ }
+ } else if (rcu_preempt_need_deferred_qs(t)) {
+ rcu_preempt_deferred_qs(t); /* Report deferred QS. */
+ return;
+ } else if (!t->rcu_read_lock_nesting) {
+ rcu_qs(); /* Report immediate QS. */
return;
}
+
+ /* If GP is oldish, ask for help from rcu_read_unlock_special(). */
if (t->rcu_read_lock_nesting > 0 &&
- __this_cpu_read(rcu_data_p->core_needs_qs) &&
- __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm) &&
+ __this_cpu_read(rcu_data.core_needs_qs) &&
+ __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
!t->rcu_read_unlock_special.b.need_qs &&
- time_after(jiffies, rsp->gp_start + HZ))
+ time_after(jiffies, rcu_state.gp_start + HZ))
t->rcu_read_unlock_special.b.need_qs = true;
}
/**
- * call_rcu() - Queue an RCU callback for invocation after a grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all pre-existing RCU read-side
- * critical sections have completed. However, the callback function
- * might well execute concurrently with RCU read-side critical sections
- * that started after call_rcu() was invoked. RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
- * and may be nested.
- *
- * Note that all CPUs must agree that the grace period extended beyond
- * all pre-existing RCU read-side critical section. On systems with more
- * than one CPU, this means that when "func()" is invoked, each CPU is
- * guaranteed to have executed a full memory barrier since the end of its
- * last RCU read-side critical section whose beginning preceded the call
- * to call_rcu(). It also means that each CPU executing an RCU read-side
- * critical section that continues beyond the start of "func()" must have
- * executed a memory barrier after the call_rcu() but before the beginning
- * of that RCU read-side critical section. Note that these guarantees
- * include CPUs that are offline, idle, or executing in user mode, as
- * well as CPUs that are executing in the kernel.
- *
- * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
- * resulting RCU callback function "func()", then both CPU A and CPU B are
- * guaranteed to execute a full memory barrier during the time interval
- * between the call to call_rcu() and the invocation of "func()" -- even
- * if CPU A and CPU B are the same CPU (but again only if the system has
- * more than one CPU).
- */
-void call_rcu(struct rcu_head *head, rcu_callback_t func)
-{
- __call_rcu(head, func, rcu_state_p, -1, 0);
-}
-EXPORT_SYMBOL_GPL(call_rcu);
-
-/**
* synchronize_rcu - wait until a grace period has elapsed.
*
* Control will return to the caller some time after a full grace
@@ -797,14 +822,28 @@ EXPORT_SYMBOL_GPL(call_rcu);
* concurrently with new RCU read-side critical sections that began while
* synchronize_rcu() was waiting. RCU read-side critical sections are
* delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
+ * In addition, regions of code across which interrupts, preemption, or
+ * softirqs have been disabled also serve as RCU read-side critical
+ * sections. This includes hardware interrupt handlers, softirq handlers,
+ * and NMI handlers.
+ *
+ * Note that this guarantee implies further memory-ordering guarantees.
+ * On systems with more than one CPU, when synchronize_rcu() returns,
+ * each CPU is guaranteed to have executed a full memory barrier since
+ * the end of its last RCU read-side critical section whose beginning
+ * preceded the call to synchronize_rcu(). In addition, each CPU having
+ * an RCU read-side critical section that extends beyond the return from
+ * synchronize_rcu() is guaranteed to have executed a full memory barrier
+ * after the beginning of synchronize_rcu() and before the beginning of
+ * that RCU read-side critical section. Note that these guarantees include
+ * CPUs that are offline, idle, or executing in user mode, as well as CPUs
+ * that are executing in the kernel.
*
- * See the description of synchronize_sched() for more detailed
- * information on memory-ordering guarantees. However, please note
- * that -only- the memory-ordering guarantees apply. For example,
- * synchronize_rcu() is -not- guaranteed to wait on things like code
- * protected by preempt_disable(), instead, synchronize_rcu() is -only-
- * guaranteed to wait on RCU read-side critical sections, that is, sections
- * of code protected by rcu_read_lock().
+ * Furthermore, if CPU A invoked synchronize_rcu(), which returned
+ * to its caller on CPU B, then both CPU A and CPU B are guaranteed
+ * to have executed a full memory barrier during the execution of
+ * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
+ * again only if the system has more than one CPU).
*/
void synchronize_rcu(void)
{
@@ -821,28 +860,6 @@ void synchronize_rcu(void)
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
-/**
- * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
- *
- * Note that this primitive does not necessarily wait for an RCU grace period
- * to complete. For example, if there are no RCU callbacks queued anywhere
- * in the system, then rcu_barrier() is within its rights to return
- * immediately, without waiting for anything, much less an RCU grace period.
- */
-void rcu_barrier(void)
-{
- _rcu_barrier(rcu_state_p);
-}
-EXPORT_SYMBOL_GPL(rcu_barrier);
-
-/*
- * Initialize preemptible RCU's state structures.
- */
-static void __init __rcu_init_preempt(void)
-{
- rcu_init_one(rcu_state_p);
-}
-
/*
* Check for a task exiting while in a preemptible-RCU read-side
* critical section, clean up if so. No need to issue warnings,
@@ -859,6 +876,7 @@ void exit_rcu(void)
barrier();
t->rcu_read_unlock_special.b.blocked = true;
__rcu_read_unlock();
+ rcu_preempt_deferred_qs(current);
}
/*
@@ -866,7 +884,7 @@ void exit_rcu(void)
* specified number of elements.
*/
static void
-dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
{
int cpu;
int i;
@@ -893,7 +911,7 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
}
pr_cont("\n");
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
- rdp = per_cpu_ptr(rsp->rda, cpu);
+ rdp = per_cpu_ptr(&rcu_data, cpu);
onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
cpu, ".o"[onl],
@@ -904,8 +922,6 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
#else /* #ifdef CONFIG_PREEMPT_RCU */
-static struct rcu_state *const rcu_state_p = &rcu_sched_state;
-
/*
* Tell them what RCU they are running.
*/
@@ -916,14 +932,85 @@ static void __init rcu_bootup_announce(void)
}
/*
- * Because preemptible RCU does not exist, we never have to check for
- * CPUs being in quiescent states.
+ * Note a quiescent state for PREEMPT=n. Because we do not need to know
+ * how many quiescent states passed, just if there was at least one since
+ * the start of the grace period, this just sets a flag. The caller must
+ * have disabled preemption.
*/
-static void rcu_preempt_note_context_switch(bool preempt)
+static void rcu_qs(void)
{
+ RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!");
+ if (!__this_cpu_read(rcu_data.cpu_no_qs.s))
+ return;
+ trace_rcu_grace_period(TPS("rcu_sched"),
+ __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs"));
+ __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
+ if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
+ return;
+ __this_cpu_write(rcu_data.cpu_no_qs.b.exp, false);
+ rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
}
/*
+ * Register an urgently needed quiescent state. If there is an
+ * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
+ * dyntick-idle quiescent state visible to other CPUs, which will in
+ * some cases serve for expedited as well as normal grace periods.
+ * Either way, register a lightweight quiescent state.
+ *
+ * The barrier() calls are redundant in the common case when this is
+ * called externally, but just in case this is called from within this
+ * file.
+ *
+ */
+void rcu_all_qs(void)
+{
+ unsigned long flags;
+
+ if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
+ return;
+ preempt_disable();
+ /* Load rcu_urgent_qs before other flags. */
+ if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
+ preempt_enable();
+ return;
+ }
+ this_cpu_write(rcu_data.rcu_urgent_qs, false);
+ barrier(); /* Avoid RCU read-side critical sections leaking down. */
+ if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
+ local_irq_save(flags);
+ rcu_momentary_dyntick_idle();
+ local_irq_restore(flags);
+ }
+ rcu_qs();
+ barrier(); /* Avoid RCU read-side critical sections leaking up. */
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(rcu_all_qs);
+
+/*
+ * Note a PREEMPT=n context switch. The caller must have disabled interrupts.
+ */
+void rcu_note_context_switch(bool preempt)
+{
+ barrier(); /* Avoid RCU read-side critical sections leaking down. */
+ trace_rcu_utilization(TPS("Start context switch"));
+ rcu_qs();
+ /* Load rcu_urgent_qs before other flags. */
+ if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
+ goto out;
+ this_cpu_write(rcu_data.rcu_urgent_qs, false);
+ if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
+ rcu_momentary_dyntick_idle();
+ if (!preempt)
+ rcu_tasks_qs(current);
+out:
+ trace_rcu_utilization(TPS("End context switch"));
+ barrier(); /* Avoid RCU read-side critical sections leaking up. */
+}
+EXPORT_SYMBOL_GPL(rcu_note_context_switch);
+
+/*
* Because preemptible RCU does not exist, there are never any preempted
* RCU readers.
*/
@@ -941,10 +1028,20 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
}
/*
+ * Because there is no preemptible RCU, there can be no deferred quiescent
+ * states.
+ */
+static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
+{
+ return false;
+}
+static void rcu_preempt_deferred_qs(struct task_struct *t) { }
+
+/*
* Because preemptible RCU does not exist, we never have to check for
* tasks blocked within RCU read-side critical sections.
*/
-static void rcu_print_detail_task_stall(struct rcu_state *rsp)
+static void rcu_print_detail_task_stall(void)
{
}
@@ -972,36 +1069,54 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
* so there is no need to check for blocked tasks. So check only for
* bogus qsmask values.
*/
-static void
-rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
{
WARN_ON_ONCE(rnp->qsmask);
}
/*
- * Because preemptible RCU does not exist, it never has any callbacks
- * to check.
+ * Check to see if this CPU is in a non-context-switch quiescent state
+ * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
+ * Also schedule RCU core processing.
+ *
+ * This function must be called from hardirq context. It is normally
+ * invoked from the scheduling-clock interrupt.
*/
-static void rcu_preempt_check_callbacks(void)
+static void rcu_flavor_check_callbacks(int user)
{
-}
+ if (user || rcu_is_cpu_rrupt_from_idle()) {
-/*
- * Because preemptible RCU does not exist, rcu_barrier() is just
- * another name for rcu_barrier_sched().
- */
-void rcu_barrier(void)
-{
- rcu_barrier_sched();
+ /*
+ * Get here if this CPU took its interrupt from user
+ * mode or from the idle loop, and if this is not a
+ * nested interrupt. In this case, the CPU is in
+ * a quiescent state, so note it.
+ *
+ * No memory barrier is required here because rcu_qs()
+ * references only CPU-local variables that other CPUs
+ * neither access nor modify, at least not while the
+ * corresponding CPU is online.
+ */
+
+ rcu_qs();
+ }
}
-EXPORT_SYMBOL_GPL(rcu_barrier);
-/*
- * Because preemptible RCU does not exist, it need not be initialized.
- */
-static void __init __rcu_init_preempt(void)
+/* PREEMPT=n implementation of synchronize_rcu(). */
+void synchronize_rcu(void)
{
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+ lock_is_held(&rcu_lock_map) ||
+ lock_is_held(&rcu_sched_lock_map),
+ "Illegal synchronize_rcu() in RCU read-side critical section");
+ if (rcu_blocking_is_gp())
+ return;
+ if (rcu_gp_is_expedited())
+ synchronize_rcu_expedited();
+ else
+ wait_rcu_gp(call_rcu);
}
+EXPORT_SYMBOL_GPL(synchronize_rcu);
/*
* Because preemptible RCU does not exist, tasks cannot possibly exit
@@ -1015,7 +1130,7 @@ void exit_rcu(void)
* Dump the guaranteed-empty blocked-tasks state. Trust but verify.
*/
static void
-dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
{
WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
}
@@ -1212,21 +1327,20 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
* already exist. We only create this kthread for preemptible RCU.
* Returns zero if all is well, a negated errno otherwise.
*/
-static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
- struct rcu_node *rnp)
+static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
{
- int rnp_index = rnp - &rsp->node[0];
+ int rnp_index = rnp - rcu_get_root();
unsigned long flags;
struct sched_param sp;
struct task_struct *t;
- if (rcu_state_p != rsp)
+ if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
return 0;
if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
return 0;
- rsp->boost = 1;
+ rcu_state.boost = 1;
if (rnp->boost_kthread_task != NULL)
return 0;
t = kthread_create(rcu_boost_kthread, (void *)rnp,
@@ -1244,9 +1358,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
static void rcu_kthread_do_work(void)
{
- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
- rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
+ rcu_do_batch(this_cpu_ptr(&rcu_data));
}
static void rcu_cpu_kthread_setup(unsigned int cpu)
@@ -1268,9 +1380,9 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu)
}
/*
- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
- * RCU softirq used in flavors and configurations of RCU that do not
- * support RCU priority boosting.
+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces
+ * the RCU softirq used in configurations of RCU that do not support RCU
+ * priority boosting.
*/
static void rcu_cpu_kthread(unsigned int cpu)
{
@@ -1353,18 +1465,18 @@ static void __init rcu_spawn_boost_kthreads(void)
for_each_possible_cpu(cpu)
per_cpu(rcu_cpu_has_work, cpu) = 0;
BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
- rcu_for_each_leaf_node(rcu_state_p, rnp)
- (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
+ rcu_for_each_leaf_node(rnp)
+ (void)rcu_spawn_one_boost_kthread(rnp);
}
static void rcu_prepare_kthreads(int cpu)
{
- struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
struct rcu_node *rnp = rdp->mynode;
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
if (rcu_scheduler_fully_active)
- (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
+ (void)rcu_spawn_one_boost_kthread(rnp);
}
#else /* #ifdef CONFIG_RCU_BOOST */
@@ -1411,8 +1523,8 @@ static void rcu_prepare_kthreads(int cpu)
* 1 if so. This function is part of the RCU implementation; it is -not-
* an exported member of the RCU API.
*
- * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
- * any flavor of RCU.
+ * Because we not have RCU_FAST_NO_HZ, just check whether or not this
+ * CPU has RCU callbacks queued.
*/
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
@@ -1478,41 +1590,36 @@ static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
module_param(rcu_idle_lazy_gp_delay, int, 0644);
/*
- * Try to advance callbacks for all flavors of RCU on the current CPU, but
- * only if it has been awhile since the last time we did so. Afterwards,
- * if there are any callbacks ready for immediate invocation, return true.
+ * Try to advance callbacks on the current CPU, but only if it has been
+ * awhile since the last time we did so. Afterwards, if there are any
+ * callbacks ready for immediate invocation, return true.
*/
static bool __maybe_unused rcu_try_advance_all_cbs(void)
{
bool cbs_ready = false;
- struct rcu_data *rdp;
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp;
- struct rcu_state *rsp;
/* Exit early if we advanced recently. */
- if (jiffies == rdtp->last_advance_all)
+ if (jiffies == rdp->last_advance_all)
return false;
- rdtp->last_advance_all = jiffies;
+ rdp->last_advance_all = jiffies;
- for_each_rcu_flavor(rsp) {
- rdp = this_cpu_ptr(rsp->rda);
- rnp = rdp->mynode;
+ rnp = rdp->mynode;
- /*
- * Don't bother checking unless a grace period has
- * completed since we last checked and there are
- * callbacks not yet ready to invoke.
- */
- if ((rcu_seq_completed_gp(rdp->gp_seq,
- rcu_seq_current(&rnp->gp_seq)) ||
- unlikely(READ_ONCE(rdp->gpwrap))) &&
- rcu_segcblist_pend_cbs(&rdp->cblist))
- note_gp_changes(rsp, rdp);
-
- if (rcu_segcblist_ready_cbs(&rdp->cblist))
- cbs_ready = true;
- }
+ /*
+ * Don't bother checking unless a grace period has
+ * completed since we last checked and there are
+ * callbacks not yet ready to invoke.
+ */
+ if ((rcu_seq_completed_gp(rdp->gp_seq,
+ rcu_seq_current(&rnp->gp_seq)) ||
+ unlikely(READ_ONCE(rdp->gpwrap))) &&
+ rcu_segcblist_pend_cbs(&rdp->cblist))
+ note_gp_changes(rdp);
+
+ if (rcu_segcblist_ready_cbs(&rdp->cblist))
+ cbs_ready = true;
return cbs_ready;
}
@@ -1526,16 +1633,16 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
*/
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
unsigned long dj;
lockdep_assert_irqs_disabled();
/* Snapshot to detect later posting of non-lazy callback. */
- rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
+ rdp->nonlazy_posted_snap = rdp->nonlazy_posted;
/* If no callbacks, RCU doesn't need the CPU. */
- if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
+ if (!rcu_cpu_has_callbacks(&rdp->all_lazy)) {
*nextevt = KTIME_MAX;
return 0;
}
@@ -1546,10 +1653,10 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
invoke_rcu_core();
return 1;
}
- rdtp->last_accelerate = jiffies;
+ rdp->last_accelerate = jiffies;
/* Request timer delay depending on laziness, and round. */
- if (!rdtp->all_lazy) {
+ if (!rdp->all_lazy) {
dj = round_up(rcu_idle_gp_delay + jiffies,
rcu_idle_gp_delay) - jiffies;
} else {
@@ -1572,10 +1679,8 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
static void rcu_prepare_for_idle(void)
{
bool needwake;
- struct rcu_data *rdp;
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp;
- struct rcu_state *rsp;
int tne;
lockdep_assert_irqs_disabled();
@@ -1584,10 +1689,10 @@ static void rcu_prepare_for_idle(void)
/* Handle nohz enablement switches conservatively. */
tne = READ_ONCE(tick_nohz_active);
- if (tne != rdtp->tick_nohz_enabled_snap) {
+ if (tne != rdp->tick_nohz_enabled_snap) {
if (rcu_cpu_has_callbacks(NULL))
invoke_rcu_core(); /* force nohz to see update. */
- rdtp->tick_nohz_enabled_snap = tne;
+ rdp->tick_nohz_enabled_snap = tne;
return;
}
if (!tne)
@@ -1598,10 +1703,10 @@ static void rcu_prepare_for_idle(void)
* callbacks, invoke RCU core for the side-effect of recalculating
* idle duration on re-entry to idle.
*/
- if (rdtp->all_lazy &&
- rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
- rdtp->all_lazy = false;
- rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
+ if (rdp->all_lazy &&
+ rdp->nonlazy_posted != rdp->nonlazy_posted_snap) {
+ rdp->all_lazy = false;
+ rdp->nonlazy_posted_snap = rdp->nonlazy_posted;
invoke_rcu_core();
return;
}
@@ -1610,19 +1715,16 @@ static void rcu_prepare_for_idle(void)
* If we have not yet accelerated this jiffy, accelerate all
* callbacks on this CPU.
*/
- if (rdtp->last_accelerate == jiffies)
+ if (rdp->last_accelerate == jiffies)
return;
- rdtp->last_accelerate = jiffies;
- for_each_rcu_flavor(rsp) {
- rdp = this_cpu_ptr(rsp->rda);
- if (!rcu_segcblist_pend_cbs(&rdp->cblist))
- continue;
+ rdp->last_accelerate = jiffies;
+ if (rcu_segcblist_pend_cbs(&rdp->cblist)) {
rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
- needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+ needwake = rcu_accelerate_cbs(rnp, rdp);
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
if (needwake)
- rcu_gp_kthread_wake(rsp);
+ rcu_gp_kthread_wake();
}
}
@@ -1650,104 +1752,23 @@ static void rcu_cleanup_after_idle(void)
*/
static void rcu_idle_count_callbacks_posted(void)
{
- __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
-}
-
-/*
- * Data for flushing lazy RCU callbacks at OOM time.
- */
-static atomic_t oom_callback_count;
-static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
-
-/*
- * RCU OOM callback -- decrement the outstanding count and deliver the
- * wake-up if we are the last one.
- */
-static void rcu_oom_callback(struct rcu_head *rhp)
-{
- if (atomic_dec_and_test(&oom_callback_count))
- wake_up(&oom_callback_wq);
-}
-
-/*
- * Post an rcu_oom_notify callback on the current CPU if it has at
- * least one lazy callback. This will unnecessarily post callbacks
- * to CPUs that already have a non-lazy callback at the end of their
- * callback list, but this is an infrequent operation, so accept some
- * extra overhead to keep things simple.
- */
-static void rcu_oom_notify_cpu(void *unused)
-{
- struct rcu_state *rsp;
- struct rcu_data *rdp;
-
- for_each_rcu_flavor(rsp) {
- rdp = raw_cpu_ptr(rsp->rda);
- if (rcu_segcblist_n_lazy_cbs(&rdp->cblist)) {
- atomic_inc(&oom_callback_count);
- rsp->call(&rdp->oom_head, rcu_oom_callback);
- }
- }
-}
-
-/*
- * If low on memory, ensure that each CPU has a non-lazy callback.
- * This will wake up CPUs that have only lazy callbacks, in turn
- * ensuring that they free up the corresponding memory in a timely manner.
- * Because an uncertain amount of memory will be freed in some uncertain
- * timeframe, we do not claim to have freed anything.
- */
-static int rcu_oom_notify(struct notifier_block *self,
- unsigned long notused, void *nfreed)
-{
- int cpu;
-
- /* Wait for callbacks from earlier instance to complete. */
- wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
- smp_mb(); /* Ensure callback reuse happens after callback invocation. */
-
- /*
- * Prevent premature wakeup: ensure that all increments happen
- * before there is a chance of the counter reaching zero.
- */
- atomic_set(&oom_callback_count, 1);
-
- for_each_online_cpu(cpu) {
- smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
- cond_resched_tasks_rcu_qs();
- }
-
- /* Unconditionally decrement: no need to wake ourselves up. */
- atomic_dec(&oom_callback_count);
-
- return NOTIFY_OK;
+ __this_cpu_add(rcu_data.nonlazy_posted, 1);
}
-static struct notifier_block rcu_oom_nb = {
- .notifier_call = rcu_oom_notify
-};
-
-static int __init rcu_register_oom_notifier(void)
-{
- register_oom_notifier(&rcu_oom_nb);
- return 0;
-}
-early_initcall(rcu_register_oom_notifier);
-
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
#ifdef CONFIG_RCU_FAST_NO_HZ
static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
{
- struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
- unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ unsigned long nlpd = rdp->nonlazy_posted - rdp->nonlazy_posted_snap;
sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
- rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
+ rdp->last_accelerate & 0xffff, jiffies & 0xffff,
ulong2long(nlpd),
- rdtp->all_lazy ? 'L' : '.',
- rdtp->tick_nohz_enabled_snap ? '.' : 'D');
+ rdp->all_lazy ? 'L' : '.',
+ rdp->tick_nohz_enabled_snap ? '.' : 'D');
}
#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
@@ -1768,21 +1789,19 @@ static void print_cpu_stall_info_begin(void)
/*
* Print out diagnostic information for the specified stalled CPU.
*
- * If the specified CPU is aware of the current RCU grace period
- * (flavor specified by rsp), then print the number of scheduling
- * clock interrupts the CPU has taken during the time that it has
- * been aware. Otherwise, print the number of RCU grace periods
- * that this CPU is ignorant of, for example, "1" if the CPU was
- * aware of the previous grace period.
+ * If the specified CPU is aware of the current RCU grace period, then
+ * print the number of scheduling clock interrupts the CPU has taken
+ * during the time that it has been aware. Otherwise, print the number
+ * of RCU grace periods that this CPU is ignorant of, for example, "1"
+ * if the CPU was aware of the previous grace period.
*
* Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
*/
-static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
+static void print_cpu_stall_info(int cpu)
{
unsigned long delta;
char fast_no_hz[72];
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
- struct rcu_dynticks *rdtp = rdp->dynticks;
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
char *ticks_title;
unsigned long ticks_value;
@@ -1792,7 +1811,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
*/
touch_nmi_watchdog();
- ticks_value = rcu_seq_ctr(rsp->gp_seq - rdp->gp_seq);
+ ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
if (ticks_value) {
ticks_title = "GPs behind";
} else {
@@ -1810,10 +1829,10 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
"!."[!delta],
ticks_value, ticks_title,
- rcu_dynticks_snap(rdtp) & 0xfff,
- rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
+ rcu_dynticks_snap(rdp) & 0xfff,
+ rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
- READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
+ READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
fast_no_hz);
}
@@ -1823,20 +1842,12 @@ static void print_cpu_stall_info_end(void)
pr_err("\t");
}
-/* Zero ->ticks_this_gp for all flavors of RCU. */
+/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
static void zero_cpu_stall_ticks(struct rcu_data *rdp)
{
rdp->ticks_this_gp = 0;
rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
-}
-
-/* Increment ->ticks_this_gp for all flavors of RCU. */
-static void increment_cpu_stall_ticks(void)
-{
- struct rcu_state *rsp;
-
- for_each_rcu_flavor(rsp)
- raw_cpu_inc(rsp->rda->ticks_this_gp);
+ WRITE_ONCE(rdp->last_fqs_resched, jiffies);
}
#ifdef CONFIG_RCU_NOCB_CPU
@@ -1958,17 +1969,17 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
mod_timer(&rdp->nocb_timer, jiffies + 1);
WRITE_ONCE(rdp->nocb_defer_wakeup, waketype);
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, reason);
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
}
/*
- * Does the specified CPU need an RCU callback for the specified flavor
+ * Does the specified CPU need an RCU callback for this invocation
* of rcu_barrier()?
*/
-static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
+static bool rcu_nocb_cpu_needs_barrier(int cpu)
{
- struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
unsigned long ret;
#ifdef CONFIG_PROVE_RCU
struct rcu_head *rhp;
@@ -1979,7 +1990,7 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
* There needs to be a barrier before this function is called,
* but associated with a prior determination that no more
* callbacks would be posted. In the worst case, the first
- * barrier in _rcu_barrier() suffices (but the caller cannot
+ * barrier in rcu_barrier() suffices (but the caller cannot
* necessarily rely on this, not a substitute for the caller
* getting the concurrency design right!). There must also be
* a barrier between the following load an posting of a callback
@@ -2037,7 +2048,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
/* If we are not being polled and there is a kthread, awaken it ... */
t = READ_ONCE(rdp->nocb_kthread);
if (rcu_nocb_poll || !t) {
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("WakeNotPoll"));
return;
}
@@ -2046,7 +2057,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
if (!irqs_disabled_flags(flags)) {
/* ... if queue was empty ... */
wake_nocb_leader(rdp, false);
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("WakeEmpty"));
} else {
wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE,
@@ -2057,7 +2068,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
/* ... or if many callbacks queued. */
if (!irqs_disabled_flags(flags)) {
wake_nocb_leader(rdp, true);
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("WakeOvf"));
} else {
wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE_FORCE,
@@ -2065,7 +2076,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
}
rdp->qlen_last_fqs_check = LONG_MAX / 2;
} else {
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
}
return;
}
@@ -2087,12 +2098,12 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
return false;
__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
if (__is_kfree_rcu_offset((unsigned long)rhp->func))
- trace_rcu_kfree_callback(rdp->rsp->name, rhp,
+ trace_rcu_kfree_callback(rcu_state.name, rhp,
(unsigned long)rhp->func,
-atomic_long_read(&rdp->nocb_q_count_lazy),
-atomic_long_read(&rdp->nocb_q_count));
else
- trace_rcu_callback(rdp->rsp->name, rhp,
+ trace_rcu_callback(rcu_state.name, rhp,
-atomic_long_read(&rdp->nocb_q_count_lazy),
-atomic_long_read(&rdp->nocb_q_count));
@@ -2142,7 +2153,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
struct rcu_node *rnp = rdp->mynode;
local_irq_save(flags);
- c = rcu_seq_snap(&rdp->rsp->gp_seq);
+ c = rcu_seq_snap(&rcu_state.gp_seq);
if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
local_irq_restore(flags);
} else {
@@ -2150,7 +2161,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
needwake = rcu_start_this_gp(rnp, rdp, c);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (needwake)
- rcu_gp_kthread_wake(rdp->rsp);
+ rcu_gp_kthread_wake();
}
/*
@@ -2187,7 +2198,7 @@ wait_again:
/* Wait for callbacks to appear. */
if (!rcu_nocb_poll) {
- trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
+ trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Sleep"));
swait_event_interruptible_exclusive(my_rdp->nocb_wq,
!READ_ONCE(my_rdp->nocb_leader_sleep));
raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
@@ -2197,7 +2208,7 @@ wait_again:
raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags);
} else if (firsttime) {
firsttime = false; /* Don't drown trace log with "Poll"! */
- trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Poll"));
+ trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu, TPS("Poll"));
}
/*
@@ -2224,7 +2235,7 @@ wait_again:
if (rcu_nocb_poll) {
schedule_timeout_interruptible(1);
} else {
- trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
+ trace_rcu_nocb_wake(rcu_state.name, my_rdp->cpu,
TPS("WokeEmpty"));
}
goto wait_again;
@@ -2269,7 +2280,7 @@ wait_again:
static void nocb_follower_wait(struct rcu_data *rdp)
{
for (;;) {
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FollowerSleep"));
swait_event_interruptible_exclusive(rdp->nocb_wq,
READ_ONCE(rdp->nocb_follower_head));
if (smp_load_acquire(&rdp->nocb_follower_head)) {
@@ -2277,7 +2288,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
return;
}
WARN_ON(signal_pending(current));
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeEmpty"));
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
}
}
@@ -2312,10 +2323,10 @@ static int rcu_nocb_kthread(void *arg)
rdp->nocb_follower_tail = &rdp->nocb_follower_head;
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
BUG_ON(!list);
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeNonEmpty"));
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty"));
/* Each pass through the following loop invokes a callback. */
- trace_rcu_batch_start(rdp->rsp->name,
+ trace_rcu_batch_start(rcu_state.name,
atomic_long_read(&rdp->nocb_q_count_lazy),
atomic_long_read(&rdp->nocb_q_count), -1);
c = cl = 0;
@@ -2323,23 +2334,23 @@ static int rcu_nocb_kthread(void *arg)
next = list->next;
/* Wait for enqueuing to complete, if needed. */
while (next == NULL && &list->next != tail) {
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("WaitQueue"));
schedule_timeout_interruptible(1);
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("WokeQueue"));
next = list->next;
}
debug_rcu_head_unqueue(list);
local_bh_disable();
- if (__rcu_reclaim(rdp->rsp->name, list))
+ if (__rcu_reclaim(rcu_state.name, list))
cl++;
c++;
local_bh_enable();
cond_resched_tasks_rcu_qs();
list = next;
}
- trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
+ trace_rcu_batch_end(rcu_state.name, c, !!list, 0, 0, 1);
smp_mb__before_atomic(); /* _add after CB invocation. */
atomic_long_add(-c, &rdp->nocb_q_count);
atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
@@ -2367,7 +2378,7 @@ static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
ndw = READ_ONCE(rdp->nocb_defer_wakeup);
WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
__wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
}
/* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
@@ -2393,7 +2404,6 @@ void __init rcu_init_nohz(void)
{
int cpu;
bool need_rcu_nocb_mask = false;
- struct rcu_state *rsp;
#if defined(CONFIG_NO_HZ_FULL)
if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
@@ -2427,11 +2437,9 @@ void __init rcu_init_nohz(void)
if (rcu_nocb_poll)
pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
- for_each_rcu_flavor(rsp) {
- for_each_cpu(cpu, rcu_nocb_mask)
- init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu));
- rcu_organize_nocb_kthreads(rsp);
- }
+ for_each_cpu(cpu, rcu_nocb_mask)
+ init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu));
+ rcu_organize_nocb_kthreads();
}
/* Initialize per-rcu_data variables for no-CBs CPUs. */
@@ -2446,16 +2454,15 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
/*
* If the specified CPU is a no-CBs CPU that does not already have its
- * rcuo kthread for the specified RCU flavor, spawn it. If the CPUs are
- * brought online out of order, this can require re-organizing the
- * leader-follower relationships.
+ * rcuo kthread, spawn it. If the CPUs are brought online out of order,
+ * this can require re-organizing the leader-follower relationships.
*/
-static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
+static void rcu_spawn_one_nocb_kthread(int cpu)
{
struct rcu_data *rdp;
struct rcu_data *rdp_last;
struct rcu_data *rdp_old_leader;
- struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
+ struct rcu_data *rdp_spawn = per_cpu_ptr(&rcu_data, cpu);
struct task_struct *t;
/*
@@ -2485,9 +2492,9 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
rdp_spawn->nocb_next_follower = rdp_old_leader;
}
- /* Spawn the kthread for this CPU and RCU flavor. */
+ /* Spawn the kthread for this CPU. */
t = kthread_run(rcu_nocb_kthread, rdp_spawn,
- "rcuo%c/%d", rsp->abbr, cpu);
+ "rcuo%c/%d", rcu_state.abbr, cpu);
BUG_ON(IS_ERR(t));
WRITE_ONCE(rdp_spawn->nocb_kthread, t);
}
@@ -2498,11 +2505,8 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
*/
static void rcu_spawn_all_nocb_kthreads(int cpu)
{
- struct rcu_state *rsp;
-
if (rcu_scheduler_fully_active)
- for_each_rcu_flavor(rsp)
- rcu_spawn_one_nocb_kthread(rsp, cpu);
+ rcu_spawn_one_nocb_kthread(cpu);
}
/*
@@ -2526,7 +2530,7 @@ module_param(rcu_nocb_leader_stride, int, 0444);
/*
* Initialize leader-follower relationships for all no-CBs CPU.
*/
-static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
+static void __init rcu_organize_nocb_kthreads(void)
{
int cpu;
int ls = rcu_nocb_leader_stride;
@@ -2548,7 +2552,7 @@ static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
* we will spawn the needed set of rcu_nocb_kthread() kthreads.
*/
for_each_cpu(cpu, rcu_nocb_mask) {
- rdp = per_cpu_ptr(rsp->rda, cpu);
+ rdp = per_cpu_ptr(&rcu_data, cpu);
if (rdp->cpu >= nl) {
/* New leader, set up for followers & next leader. */
nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
@@ -2585,7 +2589,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
-static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
+static bool rcu_nocb_cpu_needs_barrier(int cpu)
{
WARN_ON_ONCE(1); /* Should be dead code. */
return false;
@@ -2654,12 +2658,12 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
* This code relies on the fact that all NO_HZ_FULL CPUs are also
* CONFIG_RCU_NOCB_CPU CPUs.
*/
-static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
+static bool rcu_nohz_full_cpu(void)
{
#ifdef CONFIG_NO_HZ_FULL
if (tick_nohz_full_cpu(smp_processor_id()) &&
- (!rcu_gp_in_progress(rsp) ||
- ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
+ (!rcu_gp_in_progress() ||
+ ULONG_CMP_LT(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
return true;
#endif /* #ifdef CONFIG_NO_HZ_FULL */
return false;
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 39cb23d22109..f203b94f6b5b 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -203,11 +203,7 @@ void rcu_test_sync_prims(void)
if (!IS_ENABLED(CONFIG_PROVE_RCU))
return;
synchronize_rcu();
- synchronize_rcu_bh();
- synchronize_sched();
synchronize_rcu_expedited();
- synchronize_rcu_bh_expedited();
- synchronize_sched_expedited();
}
#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
@@ -298,7 +294,7 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_held);
*
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
*
- * Note that rcu_read_lock() is disallowed if the CPU is either idle or
+ * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
* offline from an RCU perspective, so check for those as well.
*/
int rcu_read_lock_bh_held(void)
@@ -336,7 +332,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
int i;
int j;
- /* Initialize and register callbacks for each flavor specified. */
+ /* Initialize and register callbacks for each crcu_array element. */
for (i = 0; i < n; i++) {
if (checktiny &&
(crcu_array[i] == call_rcu ||
@@ -472,6 +468,7 @@ int rcu_jiffies_till_stall_check(void)
}
return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
}
+EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
void rcu_sysrq_start(void)
{
@@ -701,19 +698,19 @@ static int __noreturn rcu_tasks_kthread(void *arg)
/*
* Wait for all pre-existing t->on_rq and t->nvcsw
- * transitions to complete. Invoking synchronize_sched()
+ * transitions to complete. Invoking synchronize_rcu()
* suffices because all these transitions occur with
- * interrupts disabled. Without this synchronize_sched(),
+ * interrupts disabled. Without this synchronize_rcu(),
* a read-side critical section that started before the
* grace period might be incorrectly seen as having started
* after the grace period.
*
- * This synchronize_sched() also dispenses with the
+ * This synchronize_rcu() also dispenses with the
* need for a memory barrier on the first store to
* ->rcu_tasks_holdout, as it forces the store to happen
* after the beginning of the grace period.
*/
- synchronize_sched();
+ synchronize_rcu();
/*
* There were callbacks, so we need to wait for an
@@ -740,7 +737,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
* This does only part of the job, ensuring that all
* tasks that were previously exiting reach the point
* where they have disabled preemption, allowing the
- * later synchronize_sched() to finish the job.
+ * later synchronize_rcu() to finish the job.
*/
synchronize_srcu(&tasks_rcu_exit_srcu);
@@ -790,20 +787,20 @@ static int __noreturn rcu_tasks_kthread(void *arg)
* cause their RCU-tasks read-side critical sections to
* extend past the end of the grace period. However,
* because these ->nvcsw updates are carried out with
- * interrupts disabled, we can use synchronize_sched()
+ * interrupts disabled, we can use synchronize_rcu()
* to force the needed ordering on all such CPUs.
*
- * This synchronize_sched() also confines all
+ * This synchronize_rcu() also confines all
* ->rcu_tasks_holdout accesses to be within the grace
* period, avoiding the need for memory barriers for
* ->rcu_tasks_holdout accesses.
*
- * In addition, this synchronize_sched() waits for exiting
+ * In addition, this synchronize_rcu() waits for exiting
* tasks to complete their final preempt_disable() region
* of execution, cleaning up after the synchronize_srcu()
* above.
*/
- synchronize_sched();
+ synchronize_rcu();
/* Invoke the callbacks. */
while (list) {
@@ -870,15 +867,10 @@ static void __init rcu_tasks_bootup_oddness(void)
#ifdef CONFIG_PROVE_RCU
/*
- * Early boot self test parameters, one for each flavor
+ * Early boot self test parameters.
*/
static bool rcu_self_test;
-static bool rcu_self_test_bh;
-static bool rcu_self_test_sched;
-
module_param(rcu_self_test, bool, 0444);
-module_param(rcu_self_test_bh, bool, 0444);
-module_param(rcu_self_test_sched, bool, 0444);
static int rcu_self_test_counter;
@@ -888,25 +880,16 @@ static void test_callback(struct rcu_head *r)
pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
}
+DEFINE_STATIC_SRCU(early_srcu);
+
static void early_boot_test_call_rcu(void)
{
static struct rcu_head head;
+ static struct rcu_head shead;
call_rcu(&head, test_callback);
-}
-
-static void early_boot_test_call_rcu_bh(void)
-{
- static struct rcu_head head;
-
- call_rcu_bh(&head, test_callback);
-}
-
-static void early_boot_test_call_rcu_sched(void)
-{
- static struct rcu_head head;
-
- call_rcu_sched(&head, test_callback);
+ if (IS_ENABLED(CONFIG_SRCU))
+ call_srcu(&early_srcu, &shead, test_callback);
}
void rcu_early_boot_tests(void)
@@ -915,10 +898,6 @@ void rcu_early_boot_tests(void)
if (rcu_self_test)
early_boot_test_call_rcu();
- if (rcu_self_test_bh)
- early_boot_test_call_rcu_bh();
- if (rcu_self_test_sched)
- early_boot_test_call_rcu_sched();
rcu_test_sync_prims();
}
@@ -930,16 +909,11 @@ static int rcu_verify_early_boot_tests(void)
if (rcu_self_test) {
early_boot_test_counter++;
rcu_barrier();
+ if (IS_ENABLED(CONFIG_SRCU)) {
+ early_boot_test_counter++;
+ srcu_barrier(&early_srcu);
+ }
}
- if (rcu_self_test_bh) {
- early_boot_test_counter++;
- rcu_barrier_bh();
- }
- if (rcu_self_test_sched) {
- early_boot_test_counter++;
- rcu_barrier_sched();
- }
-
if (rcu_self_test_counter != early_boot_test_counter) {
WARN_ON(1);
ret = -1;
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 8fb44dec9ad7..e1b79b6a2735 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -49,6 +49,7 @@ int reboot_force;
*/
void (*pm_power_off_prepare)(void);
+EXPORT_SYMBOL_GPL(pm_power_off_prepare);
/**
* emergency_restart - reboot the system
diff --git a/kernel/resource.c b/kernel/resource.c
index 30e1bc68503b..b3a3a1fc499e 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -318,33 +318,34 @@ int release_resource(struct resource *old)
EXPORT_SYMBOL(release_resource);
-/*
- * Finds the lowest iomem resource existing within [res->start.res->end).
- * The caller must specify res->start, res->end, res->flags, and optionally
- * desc. If found, returns 0, res is overwritten, if not found, returns -1.
- * This function walks the whole tree and not just first level children until
- * and unless first_level_children_only is true.
+/**
+ * Finds the lowest iomem resource that covers part of [start..end]. The
+ * caller must specify start, end, flags, and desc (which may be
+ * IORES_DESC_NONE).
+ *
+ * If a resource is found, returns 0 and *res is overwritten with the part
+ * of the resource that's within [start..end]; if none is found, returns
+ * -1.
+ *
+ * This function walks the whole tree and not just first level children
+ * unless @first_lvl is true.
*/
-static int find_next_iomem_res(struct resource *res, unsigned long desc,
- bool first_level_children_only)
+static int find_next_iomem_res(resource_size_t start, resource_size_t end,
+ unsigned long flags, unsigned long desc,
+ bool first_lvl, struct resource *res)
{
- resource_size_t start, end;
struct resource *p;
- bool sibling_only = false;
- BUG_ON(!res);
-
- start = res->start;
- end = res->end;
- BUG_ON(start >= end);
+ if (!res)
+ return -EINVAL;
- if (first_level_children_only)
- sibling_only = true;
+ if (start >= end)
+ return -EINVAL;
read_lock(&resource_lock);
- for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) {
- if ((p->flags & res->flags) != res->flags)
+ for (p = iomem_resource.child; p; p = next_resource(p, first_lvl)) {
+ if ((p->flags & flags) != flags)
continue;
if ((desc != IORES_DESC_NONE) && (desc != p->desc))
continue;
@@ -352,45 +353,43 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
p = NULL;
break;
}
- if ((p->end >= start) && (p->start < end))
+ if ((p->end >= start) && (p->start <= end))
break;
}
read_unlock(&resource_lock);
if (!p)
return -1;
+
/* copy data */
- if (res->start < p->start)
- res->start = p->start;
- if (res->end > p->end)
- res->end = p->end;
+ res->start = max(start, p->start);
+ res->end = min(end, p->end);
res->flags = p->flags;
res->desc = p->desc;
return 0;
}
-static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
- bool first_level_children_only,
- void *arg,
+static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
+ unsigned long flags, unsigned long desc,
+ bool first_lvl, void *arg,
int (*func)(struct resource *, void *))
{
- u64 orig_end = res->end;
+ struct resource res;
int ret = -1;
- while ((res->start < res->end) &&
- !find_next_iomem_res(res, desc, first_level_children_only)) {
- ret = (*func)(res, arg);
+ while (start < end &&
+ !find_next_iomem_res(start, end, flags, desc, first_lvl, &res)) {
+ ret = (*func)(&res, arg);
if (ret)
break;
- res->start = res->end + 1;
- res->end = orig_end;
+ start = res.end + 1;
}
return ret;
}
-/*
+/**
* Walks through iomem resources and calls func() with matching resource
* ranges. This walks through whole tree and not just first level children.
* All the memory ranges which overlap start,end and also match flags and
@@ -407,13 +406,7 @@ static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
u64 end, void *arg, int (*func)(struct resource *, void *))
{
- struct resource res;
-
- res.start = start;
- res.end = end;
- res.flags = flags;
-
- return __walk_iomem_res_desc(&res, desc, false, arg, func);
+ return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func);
}
EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
@@ -425,15 +418,11 @@ EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
* ranges.
*/
int walk_system_ram_res(u64 start, u64 end, void *arg,
- int (*func)(struct resource *, void *))
+ int (*func)(struct resource *, void *))
{
- struct resource res;
+ unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
- res.start = start;
- res.end = end;
- res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
-
- return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
+ return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
arg, func);
}
@@ -444,13 +433,9 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
int walk_mem_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *))
{
- struct resource res;
-
- res.start = start;
- res.end = end;
- res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
+ return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
arg, func);
}
@@ -462,27 +447,27 @@ int walk_mem_res(u64 start, u64 end, void *arg,
* It is to be used only for System RAM.
*/
int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
- void *arg, int (*func)(unsigned long, unsigned long, void *))
+ void *arg, int (*func)(unsigned long, unsigned long, void *))
{
+ resource_size_t start, end;
+ unsigned long flags;
struct resource res;
unsigned long pfn, end_pfn;
- u64 orig_end;
int ret = -1;
- res.start = (u64) start_pfn << PAGE_SHIFT;
- res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
- res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
- orig_end = res.end;
- while ((res.start < res.end) &&
- (find_next_iomem_res(&res, IORES_DESC_NONE, true) >= 0)) {
+ start = (u64) start_pfn << PAGE_SHIFT;
+ end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
+ flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ while (start < end &&
+ !find_next_iomem_res(start, end, flags, IORES_DESC_NONE,
+ true, &res)) {
pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
end_pfn = (res.end + 1) >> PAGE_SHIFT;
if (end_pfn > pfn)
ret = (*func)(pfn, end_pfn - pfn, arg);
if (ret)
break;
- res.start = res.end + 1;
- res.end = orig_end;
+ start = res.end + 1;
}
return ret;
}
@@ -658,8 +643,8 @@ static int find_resource(struct resource *root, struct resource *new,
* @constraint: the size and alignment constraints to be met.
*/
static int reallocate_resource(struct resource *root, struct resource *old,
- resource_size_t newsize,
- struct resource_constraint *constraint)
+ resource_size_t newsize,
+ struct resource_constraint *constraint)
{
int err=0;
struct resource new = *old;
@@ -972,7 +957,7 @@ skip:
* Existing children of the resource are assumed to be immutable.
*/
int adjust_resource(struct resource *res, resource_size_t start,
- resource_size_t size)
+ resource_size_t size)
{
int result;
@@ -983,9 +968,9 @@ int adjust_resource(struct resource *res, resource_size_t start,
}
EXPORT_SYMBOL(adjust_resource);
-static void __init __reserve_region_with_split(struct resource *root,
- resource_size_t start, resource_size_t end,
- const char *name)
+static void __init
+__reserve_region_with_split(struct resource *root, resource_size_t start,
+ resource_size_t end, const char *name)
{
struct resource *parent = root;
struct resource *conflict;
@@ -1044,9 +1029,9 @@ static void __init __reserve_region_with_split(struct resource *root,
}
-void __init reserve_region_with_split(struct resource *root,
- resource_size_t start, resource_size_t end,
- const char *name)
+void __init
+reserve_region_with_split(struct resource *root, resource_size_t start,
+ resource_size_t end, const char *name)
{
int abort = 0;
@@ -1172,7 +1157,7 @@ EXPORT_SYMBOL(__request_region);
* The described resource region must match a currently busy region.
*/
void __release_region(struct resource *parent, resource_size_t start,
- resource_size_t n)
+ resource_size_t n)
{
struct resource **p;
resource_size_t end;
@@ -1234,7 +1219,7 @@ EXPORT_SYMBOL(__release_region);
* simplicity. Enhance this logic when necessary.
*/
int release_mem_region_adjustable(struct resource *parent,
- resource_size_t start, resource_size_t size)
+ resource_size_t start, resource_size_t size)
{
struct resource **p;
struct resource *res;
@@ -1410,9 +1395,9 @@ static int devm_region_match(struct device *dev, void *res, void *match_data)
this->start == match->start && this->n == match->n;
}
-struct resource * __devm_request_region(struct device *dev,
- struct resource *parent, resource_size_t start,
- resource_size_t n, const char *name)
+struct resource *
+__devm_request_region(struct device *dev, struct resource *parent,
+ resource_size_t start, resource_size_t n, const char *name)
{
struct region_devres *dr = NULL;
struct resource *res;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 625bc9897f62..2e696b03e99d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
* In theory, the compile should just see 0 here, and optimize out the call
* to sched_rt_avg_update. But I don't trust it...
*/
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
- s64 steal = 0, irq_delta = 0;
-#endif
+ s64 __maybe_unused steal = 0, irq_delta = 0;
+
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
@@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
rq->clock_task += delta;
-#ifdef HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
update_irq_load_avg(rq, irq_delta + steal);
#endif
@@ -701,6 +700,7 @@ static void set_load_weight(struct task_struct *p, bool update_load)
if (idle_policy(p->policy)) {
load->weight = scale_load(WEIGHT_IDLEPRIO);
load->inv_weight = WMULT_IDLEPRIO;
+ p->se.runnable_weight = load->weight;
return;
}
@@ -713,6 +713,7 @@ static void set_load_weight(struct task_struct *p, bool update_load)
} else {
load->weight = scale_load(sched_prio_to_weight[prio]);
load->inv_weight = sched_prio_to_wmult[prio];
+ p->se.runnable_weight = load->weight;
}
}
@@ -1167,7 +1168,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (task_cpu(p) != new_cpu) {
if (p->sched_class->migrate_task_rq)
- p->sched_class->migrate_task_rq(p);
+ p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
rseq_migrate(p);
perf_event_task_migrate(p);
@@ -2915,10 +2916,10 @@ unsigned long nr_iowait(void)
}
/*
- * Consumers of these two interfaces, like for example the cpufreq menu
- * governor are using nonsensical data. Boosting frequency for a CPU that has
- * IO-wait which might not even end up running the task when it does become
- * runnable.
+ * Consumers of these two interfaces, like for example the cpuidle menu
+ * governor, are using nonsensical data. Preferring shallow idle state selection
+ * for a CPU that has IO-wait which might not even end up running the task when
+ * it does become runnable.
*/
unsigned long nr_iowait_cpu(int cpu)
@@ -5243,7 +5244,7 @@ out_unlock:
* an error code.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
- struct timespec __user *, interval)
+ struct __kernel_timespec __user *, interval)
{
struct timespec64 t;
int retval = sched_rr_get_interval(pid, &t);
@@ -5254,16 +5255,16 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
return retval;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
compat_pid_t, pid,
- struct compat_timespec __user *, interval)
+ struct old_timespec32 __user *, interval)
{
struct timespec64 t;
int retval = sched_rr_get_interval(pid, &t);
if (retval == 0)
- retval = compat_put_timespec64(&t, interval);
+ retval = put_old_timespec32(&t, interval);
return retval;
}
#endif
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 997ea7b839fa..91e4202b0634 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1607,7 +1607,7 @@ out:
return cpu;
}
-static void migrate_task_rq_dl(struct task_struct *p)
+static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
{
struct rq *rq;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f808ddf2a868..ee271bb661cc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -693,6 +693,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
static unsigned long task_h_load(struct task_struct *p);
+static unsigned long capacity_of(int cpu);
/* Give new sched_entity start runnable values to heavy its load in infant time */
void init_entity_runnable_average(struct sched_entity *se)
@@ -1392,6 +1393,17 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
int last_cpupid, this_cpupid;
this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
+ last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
+
+ /*
+ * Allow first faults or private faults to migrate immediately early in
+ * the lifetime of a task. The magic number 4 is based on waiting for
+ * two full passes of the "multi-stage node selection" test that is
+ * executed below.
+ */
+ if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) &&
+ (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
+ return true;
/*
* Multi-stage node selection is used in conjunction with a periodic
@@ -1410,7 +1422,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
* This quadric squishes small probabilities, making it less likely we
* act on an unlikely task<->page relation.
*/
- last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
if (!cpupid_pid_unset(last_cpupid) &&
cpupid_to_nid(last_cpupid) != dst_nid)
return false;
@@ -1446,7 +1457,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
static unsigned long weighted_cpuload(struct rq *rq);
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
-static unsigned long capacity_of(int cpu);
/* Cached statistics for all CPUs within a node */
struct numa_stats {
@@ -1454,8 +1464,6 @@ struct numa_stats {
/* Total compute capacity of CPUs on a node */
unsigned long compute_capacity;
-
- unsigned int nr_running;
};
/*
@@ -1463,36 +1471,16 @@ struct numa_stats {
*/
static void update_numa_stats(struct numa_stats *ns, int nid)
{
- int smt, cpu, cpus = 0;
- unsigned long capacity;
+ int cpu;
memset(ns, 0, sizeof(*ns));
for_each_cpu(cpu, cpumask_of_node(nid)) {
struct rq *rq = cpu_rq(cpu);
- ns->nr_running += rq->nr_running;
ns->load += weighted_cpuload(rq);
ns->compute_capacity += capacity_of(cpu);
-
- cpus++;
}
- /*
- * If we raced with hotplug and there are no CPUs left in our mask
- * the @ns structure is NULL'ed and task_numa_compare() will
- * not find this node attractive.
- *
- * We'll detect a huge imbalance and bail there.
- */
- if (!cpus)
- return;
-
- /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
- smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
- capacity = cpus / smt; /* cores */
-
- capacity = min_t(unsigned, capacity,
- DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
}
struct task_numa_env {
@@ -1514,6 +1502,21 @@ struct task_numa_env {
static void task_numa_assign(struct task_numa_env *env,
struct task_struct *p, long imp)
{
+ struct rq *rq = cpu_rq(env->dst_cpu);
+
+ /* Bail out if run-queue part of active NUMA balance. */
+ if (xchg(&rq->numa_migrate_on, 1))
+ return;
+
+ /*
+ * Clear previous best_cpu/rq numa-migrate flag, since task now
+ * found a better CPU to move/swap.
+ */
+ if (env->best_cpu != -1) {
+ rq = cpu_rq(env->best_cpu);
+ WRITE_ONCE(rq->numa_migrate_on, 0);
+ }
+
if (env->best_task)
put_task_struct(env->best_task);
if (p)
@@ -1553,6 +1556,13 @@ static bool load_too_imbalanced(long src_load, long dst_load,
}
/*
+ * Maximum NUMA importance can be 1998 (2*999);
+ * SMALLIMP @ 30 would be close to 1998/64.
+ * Used to deter task migration.
+ */
+#define SMALLIMP 30
+
+/*
* This checks if the overall compute and NUMA accesses of the system would
* be improved if the source tasks was migrated to the target dst_cpu taking
* into account that it might be best if task running on the dst_cpu should
@@ -1569,6 +1579,9 @@ static void task_numa_compare(struct task_numa_env *env,
long moveimp = imp;
int dist = env->dist;
+ if (READ_ONCE(dst_rq->numa_migrate_on))
+ return;
+
rcu_read_lock();
cur = task_rcu_dereference(&dst_rq->curr);
if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
@@ -1582,7 +1595,7 @@ static void task_numa_compare(struct task_numa_env *env,
goto unlock;
if (!cur) {
- if (maymove || imp > env->best_imp)
+ if (maymove && moveimp >= env->best_imp)
goto assign;
else
goto unlock;
@@ -1625,16 +1638,22 @@ static void task_numa_compare(struct task_numa_env *env,
task_weight(cur, env->dst_nid, dist);
}
- if (imp <= env->best_imp)
- goto unlock;
-
if (maymove && moveimp > imp && moveimp > env->best_imp) {
- imp = moveimp - 1;
+ imp = moveimp;
cur = NULL;
goto assign;
}
/*
+ * If the NUMA importance is less than SMALLIMP,
+ * task migration might only result in ping pong
+ * of tasks and also hurt performance due to cache
+ * misses.
+ */
+ if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
+ goto unlock;
+
+ /*
* In the overloaded case, try and keep the load balanced.
*/
load = task_h_load(env->p) - task_h_load(cur);
@@ -1710,6 +1729,7 @@ static int task_numa_migrate(struct task_struct *p)
.best_cpu = -1,
};
struct sched_domain *sd;
+ struct rq *best_rq;
unsigned long taskweight, groupweight;
int nid, ret, dist;
long taskimp, groupimp;
@@ -1805,20 +1825,17 @@ static int task_numa_migrate(struct task_struct *p)
if (env.best_cpu == -1)
return -EAGAIN;
- /*
- * Reset the scan period if the task is being rescheduled on an
- * alternative node to recheck if the tasks is now properly placed.
- */
- p->numa_scan_period = task_scan_start(p);
-
+ best_rq = cpu_rq(env.best_cpu);
if (env.best_task == NULL) {
ret = migrate_task_to(p, env.best_cpu);
+ WRITE_ONCE(best_rq->numa_migrate_on, 0);
if (ret != 0)
trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
return ret;
}
ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
+ WRITE_ONCE(best_rq->numa_migrate_on, 0);
if (ret != 0)
trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
@@ -2596,6 +2613,39 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
}
}
+static void update_scan_period(struct task_struct *p, int new_cpu)
+{
+ int src_nid = cpu_to_node(task_cpu(p));
+ int dst_nid = cpu_to_node(new_cpu);
+
+ if (!static_branch_likely(&sched_numa_balancing))
+ return;
+
+ if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
+ return;
+
+ if (src_nid == dst_nid)
+ return;
+
+ /*
+ * Allow resets if faults have been trapped before one scan
+ * has completed. This is most likely due to a new task that
+ * is pulled cross-node due to wakeups or load balancing.
+ */
+ if (p->numa_scan_seq) {
+ /*
+ * Avoid scan adjustments if moving to the preferred
+ * node or if the task was not previously running on
+ * the preferred node.
+ */
+ if (dst_nid == p->numa_preferred_nid ||
+ (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid))
+ return;
+ }
+
+ p->numa_scan_period = task_scan_start(p);
+}
+
#else
static void task_tick_numa(struct rq *rq, struct task_struct *curr)
{
@@ -2609,6 +2659,10 @@ static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
{
}
+static inline void update_scan_period(struct task_struct *p, int new_cpu)
+{
+}
+
#endif /* CONFIG_NUMA_BALANCING */
static void
@@ -3647,6 +3701,29 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
WRITE_ONCE(p->se.avg.util_est, ue);
}
+static inline int task_fits_capacity(struct task_struct *p, long capacity)
+{
+ return capacity * 1024 > task_util_est(p) * capacity_margin;
+}
+
+static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+{
+ if (!static_branch_unlikely(&sched_asym_cpucapacity))
+ return;
+
+ if (!p) {
+ rq->misfit_task_load = 0;
+ return;
+ }
+
+ if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
+ rq->misfit_task_load = 0;
+ return;
+ }
+
+ rq->misfit_task_load = task_h_load(p);
+}
+
#else /* CONFIG_SMP */
#define UPDATE_TG 0x0
@@ -3676,6 +3753,7 @@ util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
static inline void
util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
bool task_sleep) {}
+static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
#endif /* CONFIG_SMP */
@@ -3925,7 +4003,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* put back on, and if we advance min_vruntime, we'll be placed back
* further than we started -- ie. we'll be penalized.
*/
- if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
+ if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
}
@@ -4400,9 +4478,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
/*
* Add to the _head_ of the list, so that an already-started
- * distribute_cfs_runtime will not see us
+ * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
+ * not running add to the tail so that later runqueues don't get starved.
*/
- list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+ if (cfs_b->distribute_running)
+ list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+ else
+ list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
/*
* If we're the first throttled task, make sure the bandwidth
@@ -4546,14 +4628,16 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
* in us over-using our runtime if it is all used during this loop, but
* only by limited amounts in that extreme case.
*/
- while (throttled && cfs_b->runtime > 0) {
+ while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
runtime = cfs_b->runtime;
+ cfs_b->distribute_running = 1;
raw_spin_unlock(&cfs_b->lock);
/* we can't nest cfs_b->lock while distributing bandwidth */
runtime = distribute_cfs_runtime(cfs_b, runtime,
runtime_expires);
raw_spin_lock(&cfs_b->lock);
+ cfs_b->distribute_running = 0;
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
cfs_b->runtime -= min(runtime, cfs_b->runtime);
@@ -4664,6 +4748,11 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
/* confirm we're still not at a refresh boundary */
raw_spin_lock(&cfs_b->lock);
+ if (cfs_b->distribute_running) {
+ raw_spin_unlock(&cfs_b->lock);
+ return;
+ }
+
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
raw_spin_unlock(&cfs_b->lock);
return;
@@ -4673,6 +4762,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
runtime = cfs_b->runtime;
expires = cfs_b->runtime_expires;
+ if (runtime)
+ cfs_b->distribute_running = 1;
+
raw_spin_unlock(&cfs_b->lock);
if (!runtime)
@@ -4683,6 +4775,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
raw_spin_lock(&cfs_b->lock);
if (expires == cfs_b->runtime_expires)
cfs_b->runtime -= min(runtime, cfs_b->runtime);
+ cfs_b->distribute_running = 0;
raw_spin_unlock(&cfs_b->lock);
}
@@ -4791,6 +4884,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
cfs_b->period_timer.function = sched_cfs_period_timer;
hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cfs_b->slack_timer.function = sched_cfs_slack_timer;
+ cfs_b->distribute_running = 0;
}
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
@@ -6188,6 +6282,9 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
{
long min_cap, max_cap;
+ if (!static_branch_unlikely(&sched_asym_cpucapacity))
+ return 0;
+
min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
@@ -6198,7 +6295,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
/* Bring task utilization in sync with prev_cpu */
sync_entity_load_avg(&p->se);
- return min_cap * 1024 < task_util(p) * capacity_margin;
+ return !task_fits_capacity(p, min_cap);
}
/*
@@ -6275,7 +6372,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
* cfs_rq_of(p) references at time of call are still valid and identify the
* previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
*/
-static void migrate_task_rq_fair(struct task_struct *p)
+static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
/*
* As blocked tasks retain absolute vruntime the migration needs to
@@ -6328,6 +6425,8 @@ static void migrate_task_rq_fair(struct task_struct *p)
/* We have migrated, no longer consider this task hot */
p->se.exec_start = 0;
+
+ update_scan_period(p, new_cpu);
}
static void task_dead_fair(struct task_struct *p)
@@ -6615,9 +6714,12 @@ done: __maybe_unused;
if (hrtick_enabled(rq))
hrtick_start_fair(rq, p);
+ update_misfit_status(p, rq);
+
return p;
idle:
+ update_misfit_status(NULL, rq);
new_tasks = idle_balance(rq, rf);
/*
@@ -6823,6 +6925,13 @@ static unsigned long __read_mostly max_load_balance_interval = HZ/10;
enum fbq_type { regular, remote, all };
+enum group_type {
+ group_other = 0,
+ group_misfit_task,
+ group_imbalanced,
+ group_overloaded,
+};
+
#define LBF_ALL_PINNED 0x01
#define LBF_NEED_BREAK 0x02
#define LBF_DST_PINNED 0x04
@@ -6853,6 +6962,7 @@ struct lb_env {
unsigned int loop_max;
enum fbq_type fbq_type;
+ enum group_type src_grp_type;
struct list_head tasks;
};
@@ -7233,7 +7343,7 @@ static inline bool others_have_blocked(struct rq *rq)
if (READ_ONCE(rq->avg_dl.util_avg))
return true;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
if (READ_ONCE(rq->avg_irq.util_avg))
return true;
#endif
@@ -7396,12 +7506,6 @@ static unsigned long task_h_load(struct task_struct *p)
/********** Helpers for find_busiest_group ************************/
-enum group_type {
- group_other = 0,
- group_imbalanced,
- group_overloaded,
-};
-
/*
* sg_lb_stats - stats of a sched_group required for load_balancing
*/
@@ -7417,6 +7521,7 @@ struct sg_lb_stats {
unsigned int group_weight;
enum group_type group_type;
int group_no_capacity;
+ unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
unsigned int nr_preferred_running;
@@ -7525,13 +7630,14 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
cpu_rq(cpu)->cpu_capacity = capacity;
sdg->sgc->capacity = capacity;
sdg->sgc->min_capacity = capacity;
+ sdg->sgc->max_capacity = capacity;
}
void update_group_capacity(struct sched_domain *sd, int cpu)
{
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
- unsigned long capacity, min_capacity;
+ unsigned long capacity, min_capacity, max_capacity;
unsigned long interval;
interval = msecs_to_jiffies(sd->balance_interval);
@@ -7545,6 +7651,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
capacity = 0;
min_capacity = ULONG_MAX;
+ max_capacity = 0;
if (child->flags & SD_OVERLAP) {
/*
@@ -7575,6 +7682,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
}
min_capacity = min(capacity, min_capacity);
+ max_capacity = max(capacity, max_capacity);
}
} else {
/*
@@ -7588,12 +7696,14 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
capacity += sgc->capacity;
min_capacity = min(sgc->min_capacity, min_capacity);
+ max_capacity = max(sgc->max_capacity, max_capacity);
group = group->next;
} while (group != child->groups);
}
sdg->sgc->capacity = capacity;
sdg->sgc->min_capacity = min_capacity;
+ sdg->sgc->max_capacity = max_capacity;
}
/*
@@ -7689,16 +7799,27 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
}
/*
- * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
+ * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller
* per-CPU capacity than sched_group ref.
*/
static inline bool
-group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
{
return sg->sgc->min_capacity * capacity_margin <
ref->sgc->min_capacity * 1024;
}
+/*
+ * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller
+ * per-CPU capacity_orig than sched_group ref.
+ */
+static inline bool
+group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+{
+ return sg->sgc->max_capacity * capacity_margin <
+ ref->sgc->max_capacity * 1024;
+}
+
static inline enum
group_type group_classify(struct sched_group *group,
struct sg_lb_stats *sgs)
@@ -7709,6 +7830,9 @@ group_type group_classify(struct sched_group *group,
if (sg_imbalanced(group))
return group_imbalanced;
+ if (sgs->group_misfit_task_load)
+ return group_misfit_task;
+
return group_other;
}
@@ -7741,7 +7865,7 @@ static bool update_nohz_stats(struct rq *rq, bool force)
* @load_idx: Load index of sched_domain of this_cpu for load calc.
* @local_group: Does group contain this_cpu.
* @sgs: variable to hold the statistics for this group.
- * @overload: Indicate more than one runnable task for any CPU.
+ * @overload: Indicate pullable load (e.g. >1 runnable task).
*/
static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx,
@@ -7783,6 +7907,12 @@ static inline void update_sg_lb_stats(struct lb_env *env,
*/
if (!nr_running && idle_cpu(i))
sgs->idle_cpus++;
+
+ if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
+ sgs->group_misfit_task_load < rq->misfit_task_load) {
+ sgs->group_misfit_task_load = rq->misfit_task_load;
+ *overload = 1;
+ }
}
/* Adjust by relative CPU capacity of the group */
@@ -7818,6 +7948,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
{
struct sg_lb_stats *busiest = &sds->busiest_stat;
+ /*
+ * Don't try to pull misfit tasks we can't help.
+ * We can use max_capacity here as reduction in capacity on some
+ * CPUs in the group should either be possible to resolve
+ * internally or be covered by avg_load imbalance (eventually).
+ */
+ if (sgs->group_type == group_misfit_task &&
+ (!group_smaller_max_cpu_capacity(sg, sds->local) ||
+ !group_has_capacity(env, &sds->local_stat)))
+ return false;
+
if (sgs->group_type > busiest->group_type)
return true;
@@ -7837,7 +7978,14 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* power/energy consequences are not considered.
*/
if (sgs->sum_nr_running <= sgs->group_weight &&
- group_smaller_cpu_capacity(sds->local, sg))
+ group_smaller_min_cpu_capacity(sds->local, sg))
+ return false;
+
+ /*
+ * If we have more than one misfit sg go with the biggest misfit.
+ */
+ if (sgs->group_type == group_misfit_task &&
+ sgs->group_misfit_task_load < busiest->group_misfit_task_load)
return false;
asym_packing:
@@ -7908,11 +8056,9 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
struct sched_group *sg = env->sd->groups;
struct sg_lb_stats *local = &sds->local_stat;
struct sg_lb_stats tmp_sgs;
- int load_idx, prefer_sibling = 0;
+ int load_idx;
bool overload = false;
-
- if (child && child->flags & SD_PREFER_SIBLING)
- prefer_sibling = 1;
+ bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
#ifdef CONFIG_NO_HZ_COMMON
if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
@@ -7986,8 +8132,8 @@ next_group:
if (!env->sd->parent) {
/* update overload indicator if we are at root domain */
- if (env->dst_rq->rd->overload != overload)
- env->dst_rq->rd->overload = overload;
+ if (READ_ONCE(env->dst_rq->rd->overload) != overload)
+ WRITE_ONCE(env->dst_rq->rd->overload, overload);
}
}
@@ -8137,8 +8283,9 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* factors in sg capacity and sgs with smaller group_type are
* skipped when updating the busiest sg:
*/
- if (busiest->avg_load <= sds->avg_load ||
- local->avg_load >= sds->avg_load) {
+ if (busiest->group_type != group_misfit_task &&
+ (busiest->avg_load <= sds->avg_load ||
+ local->avg_load >= sds->avg_load)) {
env->imbalance = 0;
return fix_small_imbalance(env, sds);
}
@@ -8172,6 +8319,12 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
(sds->avg_load - local->avg_load) * local->group_capacity
) / SCHED_CAPACITY_SCALE;
+ /* Boost imbalance to allow misfit task to be balanced. */
+ if (busiest->group_type == group_misfit_task) {
+ env->imbalance = max_t(long, env->imbalance,
+ busiest->group_misfit_task_load);
+ }
+
/*
* if *imbalance is less than the average load per runnable task
* there is no guarantee that any tasks will be moved so we'll have
@@ -8238,6 +8391,10 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
busiest->group_no_capacity)
goto force_balance;
+ /* Misfit tasks should be dealt with regardless of the avg load */
+ if (busiest->group_type == group_misfit_task)
+ goto force_balance;
+
/*
* If the local group is busier than the selected busiest group
* don't try and pull any tasks.
@@ -8275,6 +8432,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
force_balance:
/* Looks like there is an imbalance. Compute it */
+ env->src_grp_type = busiest->group_type;
calculate_imbalance(env, &sds);
return env->imbalance ? sds.busiest : NULL;
@@ -8322,8 +8480,32 @@ static struct rq *find_busiest_queue(struct lb_env *env,
if (rt > env->fbq_type)
continue;
+ /*
+ * For ASYM_CPUCAPACITY domains with misfit tasks we simply
+ * seek the "biggest" misfit task.
+ */
+ if (env->src_grp_type == group_misfit_task) {
+ if (rq->misfit_task_load > busiest_load) {
+ busiest_load = rq->misfit_task_load;
+ busiest = rq;
+ }
+
+ continue;
+ }
+
capacity = capacity_of(i);
+ /*
+ * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
+ * eventually lead to active_balancing high->low capacity.
+ * Higher per-CPU capacity is considered better than balancing
+ * average load.
+ */
+ if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
+ capacity_of(env->dst_cpu) < capacity &&
+ rq->nr_running == 1)
+ continue;
+
wl = weighted_cpuload(rq);
/*
@@ -8391,6 +8573,9 @@ static int need_active_balance(struct lb_env *env)
return 1;
}
+ if (env->src_grp_type == group_misfit_task)
+ return 1;
+
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
}
@@ -9033,7 +9218,7 @@ static void nohz_balancer_kick(struct rq *rq)
if (time_before(now, nohz.next_balance))
goto out;
- if (rq->nr_running >= 2) {
+ if (rq->nr_running >= 2 || rq->misfit_task_load) {
flags = NOHZ_KICK_MASK;
goto out;
}
@@ -9402,7 +9587,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
rq_unpin_lock(this_rq, rf);
if (this_rq->avg_idle < sysctl_sched_migration_cost ||
- !this_rq->rd->overload) {
+ !READ_ONCE(this_rq->rd->overload)) {
rcu_read_lock();
sd = rcu_dereference_check_sched_domain(this_rq->sd);
@@ -9564,6 +9749,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
if (static_branch_unlikely(&sched_numa_balancing))
task_tick_numa(rq, curr);
+
+ update_misfit_status(curr, rq);
}
/*
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 85ae8488039c..858589b83377 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -39,7 +39,7 @@ SCHED_FEAT(WAKEUP_PREEMPTION, true)
SCHED_FEAT(HRTICK, false)
SCHED_FEAT(DOUBLE_TICK, false)
-SCHED_FEAT(LB_BIAS, true)
+SCHED_FEAT(LB_BIAS, false)
/*
* Decrement CPU capacity based on time not spent running tasks
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 16f84142f2f4..f5516bae0c1b 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -347,21 +347,6 @@ EXPORT_SYMBOL_GPL(play_idle);
void cpu_startup_entry(enum cpuhp_state state)
{
- /*
- * This #ifdef needs to die, but it's too late in the cycle to
- * make this generic (ARM and SH have never invoked the canary
- * init for the non boot CPUs!). Will be fixed in 3.11
- */
-#ifdef CONFIG_X86
- /*
- * If we're the non-boot CPU, nothing set the stack canary up
- * for us. The boot CPU already has it initialized but no harm
- * in doing it again. This is a good place for updating it, as
- * we wont ever return from this function (so the invalid
- * canaries already on the stack wont ever trigger).
- */
- boot_init_stack_canary();
-#endif
arch_cpu_idle_prepare();
cpuhp_online_idle(state);
while (1)
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 35475c0c5419..90fb5bc12ad4 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -269,9 +269,6 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna
int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
{
- if (entity_is_task(se))
- se->runnable_weight = se->load.weight;
-
if (___update_load_sum(now, cpu, &se->avg, 0, 0, 0)) {
___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
return 1;
@@ -282,9 +279,6 @@ int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- if (entity_is_task(se))
- se->runnable_weight = se->load.weight;
-
if (___update_load_sum(now, cpu, &se->avg, !!se->on_rq, !!se->on_rq,
cfs_rq->curr == se)) {
@@ -358,7 +352,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
return 0;
}
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
/*
* irq:
*
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index d2894db28955..7e56b489ff32 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -6,7 +6,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
int update_irq_load_avg(struct rq *rq, u64 running);
#else
static inline int
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4a2e8cae63c4..b8c007713b3b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -56,7 +56,6 @@
#include <linux/profile.h>
#include <linux/rcupdate_wait.h>
#include <linux/security.h>
-#include <linux/stackprotector.h>
#include <linux/stop_machine.h>
#include <linux/suspend.h>
#include <linux/swait.h>
@@ -346,6 +345,8 @@ struct cfs_bandwidth {
int nr_periods;
int nr_throttled;
u64 throttled_time;
+
+ bool distribute_running;
#endif
};
@@ -715,8 +716,12 @@ struct root_domain {
cpumask_var_t span;
cpumask_var_t online;
- /* Indicate more than one runnable task for any CPU */
- bool overload;
+ /*
+ * Indicate pullable load on at least one CPU, e.g:
+ * - More than one runnable task
+ * - Running task is misfit
+ */
+ int overload;
/*
* The bit corresponding to a CPU gets set here if such CPU has more
@@ -783,6 +788,7 @@ struct rq {
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
unsigned int nr_preferred_running;
+ unsigned int numa_migrate_on;
#endif
#define CPU_LOAD_IDX_MAX 5
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
@@ -842,6 +848,8 @@ struct rq {
unsigned char idle_balance;
+ unsigned long misfit_task_load;
+
/* For active balancing */
int active_balance;
int push_cpu;
@@ -855,8 +863,7 @@ struct rq {
struct sched_avg avg_rt;
struct sched_avg avg_dl;
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-#define HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
struct sched_avg avg_irq;
#endif
u64 idle_stamp;
@@ -1185,6 +1192,7 @@ DECLARE_PER_CPU(int, sd_llc_id);
DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
DECLARE_PER_CPU(struct sched_domain *, sd_numa);
DECLARE_PER_CPU(struct sched_domain *, sd_asym);
+extern struct static_key_false sched_asym_cpucapacity;
struct sched_group_capacity {
atomic_t ref;
@@ -1194,6 +1202,7 @@ struct sched_group_capacity {
*/
unsigned long capacity;
unsigned long min_capacity; /* Min per-CPU capacity in group */
+ unsigned long max_capacity; /* Max per-CPU capacity in group */
unsigned long next_update;
int imbalance; /* XXX unrelated to capacity but shared group state */
@@ -1393,7 +1402,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =
0;
#undef SCHED_FEAT
-#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
@@ -1523,7 +1532,7 @@ struct sched_class {
#ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
- void (*migrate_task_rq)(struct task_struct *p);
+ void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
@@ -1693,8 +1702,8 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
if (prev_nr < 2 && rq->nr_running >= 2) {
#ifdef CONFIG_SMP
- if (!rq->rd->overload)
- rq->rd->overload = true;
+ if (!READ_ONCE(rq->rd->overload))
+ WRITE_ONCE(rq->rd->overload, 1);
#endif
}
@@ -2214,7 +2223,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
}
#endif
-#ifdef HAVE_SCHED_AVG_IRQ
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
static inline unsigned long cpu_util_irq(struct rq *rq)
{
return rq->avg_irq.util_avg;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 505a41c42b96..9d74371e4aad 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -7,8 +7,8 @@
DEFINE_MUTEX(sched_domains_mutex);
/* Protected by sched_domains_mutex: */
-cpumask_var_t sched_domains_tmpmask;
-cpumask_var_t sched_domains_tmpmask2;
+static cpumask_var_t sched_domains_tmpmask;
+static cpumask_var_t sched_domains_tmpmask2;
#ifdef CONFIG_SCHED_DEBUG
@@ -398,6 +398,7 @@ DEFINE_PER_CPU(int, sd_llc_id);
DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
DEFINE_PER_CPU(struct sched_domain *, sd_numa);
DEFINE_PER_CPU(struct sched_domain *, sd_asym);
+DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
static void update_top_cache_domain(int cpu)
{
@@ -692,6 +693,7 @@ static void init_overlap_sched_group(struct sched_domain *sd,
sg_span = sched_group_span(sg);
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
+ sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
}
static int
@@ -851,6 +853,7 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
+ sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
return sg;
}
@@ -1061,7 +1064,6 @@ static struct cpumask ***sched_domains_numa_masks;
* SD_SHARE_PKG_RESOURCES - describes shared caches
* SD_NUMA - describes NUMA topologies
* SD_SHARE_POWERDOMAIN - describes shared power domain
- * SD_ASYM_CPUCAPACITY - describes mixed capacity topologies
*
* Odd one out, which beside describing the topology has a quirk also
* prescribes the desired behaviour that goes along with it:
@@ -1073,13 +1075,12 @@ static struct cpumask ***sched_domains_numa_masks;
SD_SHARE_PKG_RESOURCES | \
SD_NUMA | \
SD_ASYM_PACKING | \
- SD_ASYM_CPUCAPACITY | \
SD_SHARE_POWERDOMAIN)
static struct sched_domain *
sd_init(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map,
- struct sched_domain *child, int cpu)
+ struct sched_domain *child, int dflags, int cpu)
{
struct sd_data *sdd = &tl->data;
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
@@ -1100,6 +1101,9 @@ sd_init(struct sched_domain_topology_level *tl,
"wrong sd_flags in topology description\n"))
sd_flags &= ~TOPOLOGY_SD_FLAGS;
+ /* Apply detected topology flags */
+ sd_flags |= dflags;
+
*sd = (struct sched_domain){
.min_interval = sd_weight,
.max_interval = 2*sd_weight,
@@ -1122,7 +1126,7 @@ sd_init(struct sched_domain_topology_level *tl,
| 0*SD_SHARE_CPUCAPACITY
| 0*SD_SHARE_PKG_RESOURCES
| 0*SD_SERIALIZE
- | 0*SD_PREFER_SIBLING
+ | 1*SD_PREFER_SIBLING
| 0*SD_NUMA
| sd_flags
,
@@ -1148,17 +1152,21 @@ sd_init(struct sched_domain_topology_level *tl,
if (sd->flags & SD_ASYM_CPUCAPACITY) {
struct sched_domain *t = sd;
+ /*
+ * Don't attempt to spread across CPUs of different capacities.
+ */
+ if (sd->child)
+ sd->child->flags &= ~SD_PREFER_SIBLING;
+
for_each_lower_domain(t)
t->flags |= SD_BALANCE_WAKE;
}
if (sd->flags & SD_SHARE_CPUCAPACITY) {
- sd->flags |= SD_PREFER_SIBLING;
sd->imbalance_pct = 110;
sd->smt_gain = 1178; /* ~15% */
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
- sd->flags |= SD_PREFER_SIBLING;
sd->imbalance_pct = 117;
sd->cache_nice_tries = 1;
sd->busy_idx = 2;
@@ -1169,6 +1177,7 @@ sd_init(struct sched_domain_topology_level *tl,
sd->busy_idx = 3;
sd->idle_idx = 2;
+ sd->flags &= ~SD_PREFER_SIBLING;
sd->flags |= SD_SERIALIZE;
if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
sd->flags &= ~(SD_BALANCE_EXEC |
@@ -1178,7 +1187,6 @@ sd_init(struct sched_domain_topology_level *tl,
#endif
} else {
- sd->flags |= SD_PREFER_SIBLING;
sd->cache_nice_tries = 1;
sd->busy_idx = 2;
sd->idle_idx = 1;
@@ -1604,9 +1612,9 @@ static void __sdt_free(const struct cpumask *cpu_map)
static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
- struct sched_domain *child, int cpu)
+ struct sched_domain *child, int dflags, int cpu)
{
- struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
+ struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu);
if (child) {
sd->level = child->level + 1;
@@ -1633,6 +1641,65 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
}
/*
+ * Find the sched_domain_topology_level where all CPU capacities are visible
+ * for all CPUs.
+ */
+static struct sched_domain_topology_level
+*asym_cpu_capacity_level(const struct cpumask *cpu_map)
+{
+ int i, j, asym_level = 0;
+ bool asym = false;
+ struct sched_domain_topology_level *tl, *asym_tl = NULL;
+ unsigned long cap;
+
+ /* Is there any asymmetry? */
+ cap = arch_scale_cpu_capacity(NULL, cpumask_first(cpu_map));
+
+ for_each_cpu(i, cpu_map) {
+ if (arch_scale_cpu_capacity(NULL, i) != cap) {
+ asym = true;
+ break;
+ }
+ }
+
+ if (!asym)
+ return NULL;
+
+ /*
+ * Examine topology from all CPU's point of views to detect the lowest
+ * sched_domain_topology_level where a highest capacity CPU is visible
+ * to everyone.
+ */
+ for_each_cpu(i, cpu_map) {
+ unsigned long max_capacity = arch_scale_cpu_capacity(NULL, i);
+ int tl_id = 0;
+
+ for_each_sd_topology(tl) {
+ if (tl_id < asym_level)
+ goto next_level;
+
+ for_each_cpu_and(j, tl->mask(i), cpu_map) {
+ unsigned long capacity;
+
+ capacity = arch_scale_cpu_capacity(NULL, j);
+
+ if (capacity <= max_capacity)
+ continue;
+
+ max_capacity = capacity;
+ asym_level = tl_id;
+ asym_tl = tl;
+ }
+next_level:
+ tl_id++;
+ }
+ }
+
+ return asym_tl;
+}
+
+
+/*
* Build sched domains for a given set of CPUs and attach the sched domains
* to the individual CPUs
*/
@@ -1644,18 +1711,30 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
struct s_data d;
struct rq *rq = NULL;
int i, ret = -ENOMEM;
+ struct sched_domain_topology_level *tl_asym;
+ bool has_asym = false;
alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
if (alloc_state != sa_rootdomain)
goto error;
+ tl_asym = asym_cpu_capacity_level(cpu_map);
+
/* Set up domains for CPUs specified by the cpu_map: */
for_each_cpu(i, cpu_map) {
struct sched_domain_topology_level *tl;
sd = NULL;
for_each_sd_topology(tl) {
- sd = build_sched_domain(tl, cpu_map, attr, sd, i);
+ int dflags = 0;
+
+ if (tl == tl_asym) {
+ dflags |= SD_ASYM_CPUCAPACITY;
+ has_asym = true;
+ }
+
+ sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
+
if (tl == sched_domain_topology)
*per_cpu_ptr(d.sd, i) = sd;
if (tl->flags & SDTL_OVERLAP)
@@ -1704,6 +1783,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
}
rcu_read_unlock();
+ if (has_asym)
+ static_branch_enable_cpuslocked(&sched_asym_cpucapacity);
+
if (rq && sched_debug_enabled) {
pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index fd023ac24e10..f2ae2324c232 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -195,7 +195,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
READ_ONCE(current->seccomp.filter);
/* Ensure unexpected behavior doesn't result in failing open. */
- if (unlikely(WARN_ON(f == NULL)))
+ if (WARN_ON(f == NULL))
return SECCOMP_RET_KILL_PROCESS;
if (!sd) {
@@ -297,7 +297,7 @@ static inline pid_t seccomp_can_sync_threads(void)
/* Return the first thread that cannot be synchronized. */
failed = task_pid_vnr(thread);
/* If the pid cannot be resolved, then return -ESRCH */
- if (unlikely(WARN_ON(failed == 0)))
+ if (WARN_ON(failed == 0))
failed = -ESRCH;
return failed;
}
@@ -522,7 +522,7 @@ void put_seccomp_filter(struct task_struct *tsk)
__put_seccomp_filter(tsk->seccomp.filter);
}
-static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
+static void seccomp_init_siginfo(kernel_siginfo_t *info, int syscall, int reason)
{
clear_siginfo(info);
info->si_signo = SIGSYS;
@@ -542,7 +542,7 @@ static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
*/
static void seccomp_send_sigsys(int syscall, int reason)
{
- struct siginfo info;
+ struct kernel_siginfo info;
seccomp_init_siginfo(&info, syscall, reason);
force_sig_info(SIGSYS, &info, current);
}
@@ -747,7 +747,7 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
/* Dump core only if this is the last remaining thread. */
if (action == SECCOMP_RET_KILL_PROCESS ||
get_nr_threads(current) == 1) {
- siginfo_t info;
+ kernel_siginfo_t info;
/* Show the original registers in the dump. */
syscall_rollback(current, task_pt_regs(current));
diff --git a/kernel/signal.c b/kernel/signal.c
index 5843c541fda9..17565240b1c6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -78,6 +78,10 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
handler = sig_handler(t, sig);
+ /* SIGKILL and SIGSTOP may not be sent to the global init */
+ if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
+ return true;
+
if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
handler == SIG_DFL && !(force && sig_kernel_only(sig)))
return true;
@@ -172,6 +176,7 @@ void recalc_sigpending(void)
clear_thread_flag(TIF_SIGPENDING);
}
+EXPORT_SYMBOL(recalc_sigpending);
void calculate_sigpending(void)
{
@@ -462,6 +467,7 @@ void flush_signals(struct task_struct *t)
flush_sigqueue(&t->signal->shared_pending);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
}
+EXPORT_SYMBOL(flush_signals);
#ifdef CONFIG_POSIX_TIMERS
static void __flush_itimer_signals(struct sigpending *pending)
@@ -543,7 +549,7 @@ bool unhandled_signal(struct task_struct *tsk, int sig)
return !tsk->ptrace;
}
-static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
+static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
bool *resched_timer)
{
struct sigqueue *q, *first = NULL;
@@ -589,7 +595,7 @@ still_pending:
}
static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
- siginfo_t *info, bool *resched_timer)
+ kernel_siginfo_t *info, bool *resched_timer)
{
int sig = next_signal(pending, mask);
@@ -604,7 +610,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
*
* All callers have to hold the siglock.
*/
-int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
{
bool resched_timer = false;
int signr;
@@ -680,6 +686,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
#endif
return signr;
}
+EXPORT_SYMBOL_GPL(dequeue_signal);
/*
* Tell a process that it has a new active signal..
@@ -730,12 +737,12 @@ static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
}
}
-static inline int is_si_special(const struct siginfo *info)
+static inline int is_si_special(const struct kernel_siginfo *info)
{
- return info <= SEND_SIG_FORCED;
+ return info <= SEND_SIG_PRIV;
}
-static inline bool si_fromuser(const struct siginfo *info)
+static inline bool si_fromuser(const struct kernel_siginfo *info)
{
return info == SEND_SIG_NOINFO ||
(!is_si_special(info) && SI_FROMUSER(info));
@@ -760,7 +767,7 @@ static bool kill_ok_by_cred(struct task_struct *t)
* Bad permissions for sending the signal
* - the caller must hold the RCU read lock
*/
-static int check_kill_permission(int sig, struct siginfo *info,
+static int check_kill_permission(int sig, struct kernel_siginfo *info,
struct task_struct *t)
{
struct pid *sid;
@@ -1003,7 +1010,7 @@ static inline bool legacy_queue(struct sigpending *signals, int sig)
}
#ifdef CONFIG_USER_NS
-static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
+static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
{
if (current_user_ns() == task_cred_xxx(t, user_ns))
return;
@@ -1017,13 +1024,13 @@ static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_str
rcu_read_unlock();
}
#else
-static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
+static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
{
return;
}
#endif
-static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
+static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
enum pid_type type, int from_ancestor_ns)
{
struct sigpending *pending;
@@ -1035,7 +1042,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
result = TRACE_SIGNAL_IGNORED;
if (!prepare_signal(sig, t,
- from_ancestor_ns || (info == SEND_SIG_FORCED)))
+ from_ancestor_ns || (info == SEND_SIG_PRIV)))
goto ret;
pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
@@ -1050,10 +1057,10 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
result = TRACE_SIGNAL_DELIVERED;
/*
- * fast-pathed signals for kernel-internal things like SIGSTOP
- * or SIGKILL.
+ * Skip useless siginfo allocation for SIGKILL SIGSTOP,
+ * and kernel threads.
*/
- if (info == SEND_SIG_FORCED)
+ if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD))
goto out_set;
/*
@@ -1143,7 +1150,7 @@ ret:
return ret;
}
-static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
enum pid_type type)
{
int from_ancestor_ns = 0;
@@ -1190,18 +1197,12 @@ static int __init setup_print_fatal_signals(char *str)
__setup("print-fatal-signals=", setup_print_fatal_signals);
int
-__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
{
return send_signal(sig, info, p, PIDTYPE_TGID);
}
-static int
-specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
-{
- return send_signal(sig, info, t, PIDTYPE_PID);
-}
-
-int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
+int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
enum pid_type type)
{
unsigned long flags;
@@ -1227,7 +1228,7 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
int
-force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+force_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *t)
{
unsigned long int flags;
int ret, blocked, ignored;
@@ -1250,7 +1251,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
*/
if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
t->signal->flags &= ~SIGNAL_UNKILLABLE;
- ret = specific_send_sig_info(sig, info, t);
+ ret = send_signal(sig, info, t, PIDTYPE_PID);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
return ret;
@@ -1315,8 +1316,8 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
/*
* send signal info to all the members of a group
*/
-int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
- enum pid_type type)
+int group_send_sig_info(int sig, struct kernel_siginfo *info,
+ struct task_struct *p, enum pid_type type)
{
int ret;
@@ -1335,7 +1336,7 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
* control characters do (^C, ^Z etc)
* - the caller must hold at least a readlock on tasklist_lock
*/
-int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
+int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
{
struct task_struct *p = NULL;
int retval, success;
@@ -1350,7 +1351,7 @@ int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
return success ? 0 : retval;
}
-int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
+int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
{
int error = -ESRCH;
struct task_struct *p;
@@ -1372,7 +1373,7 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
}
}
-static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
+static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
{
int error;
rcu_read_lock();
@@ -1393,7 +1394,7 @@ static inline bool kill_as_cred_perm(const struct cred *cred,
}
/* like kill_pid_info(), but doesn't use uid/euid of "current" */
-int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
+int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid,
const struct cred *cred)
{
int ret = -EINVAL;
@@ -1437,7 +1438,7 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
* is probably wrong. Should make it like BSD or SYSV.
*/
-static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
+static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
{
int ret;
@@ -1481,7 +1482,7 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
* These are for backward compatibility with the rest of the kernel source.
*/
-int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
{
/*
* Make sure legacy kernel users don't send in bad values
@@ -1492,6 +1493,7 @@ int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
return do_send_sig_info(sig, info, p, PIDTYPE_PID);
}
+EXPORT_SYMBOL(send_sig_info);
#define __si_special(priv) \
((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
@@ -1501,11 +1503,13 @@ send_sig(int sig, struct task_struct *p, int priv)
{
return send_sig_info(sig, __si_special(priv), p);
}
+EXPORT_SYMBOL(send_sig);
void force_sig(int sig, struct task_struct *p)
{
force_sig_info(sig, SEND_SIG_PRIV, p);
}
+EXPORT_SYMBOL(force_sig);
/*
* When things go south during signal handling, we
@@ -1529,7 +1533,7 @@ int force_sig_fault(int sig, int code, void __user *addr
___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
, struct task_struct *t)
{
- struct siginfo info;
+ struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = sig;
@@ -1552,7 +1556,7 @@ int send_sig_fault(int sig, int code, void __user *addr
___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
, struct task_struct *t)
{
- struct siginfo info;
+ struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = sig;
@@ -1572,7 +1576,7 @@ int send_sig_fault(int sig, int code, void __user *addr
int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
{
- struct siginfo info;
+ struct kernel_siginfo info;
WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
clear_siginfo(&info);
@@ -1586,7 +1590,7 @@ int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct
int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
{
- struct siginfo info;
+ struct kernel_siginfo info;
WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
clear_siginfo(&info);
@@ -1601,7 +1605,7 @@ EXPORT_SYMBOL(send_sig_mceerr);
int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
{
- struct siginfo info;
+ struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = SIGSEGV;
@@ -1616,7 +1620,7 @@ int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
#ifdef SEGV_PKUERR
int force_sig_pkuerr(void __user *addr, u32 pkey)
{
- struct siginfo info;
+ struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = SIGSEGV;
@@ -1633,7 +1637,7 @@ int force_sig_pkuerr(void __user *addr, u32 pkey)
*/
int force_sig_ptrace_errno_trap(int errno, void __user *addr)
{
- struct siginfo info;
+ struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = SIGTRAP;
@@ -1762,7 +1766,7 @@ ret:
*/
bool do_notify_parent(struct task_struct *tsk, int sig)
{
- struct siginfo info;
+ struct kernel_siginfo info;
unsigned long flags;
struct sighand_struct *psig;
bool autoreap = false;
@@ -1867,7 +1871,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
static void do_notify_parent_cldstop(struct task_struct *tsk,
bool for_ptracer, int why)
{
- struct siginfo info;
+ struct kernel_siginfo info;
unsigned long flags;
struct task_struct *parent;
struct sighand_struct *sighand;
@@ -1967,7 +1971,7 @@ static bool sigkill_pending(struct task_struct *tsk)
* If we actually decide not to stop at all because the tracer
* is gone, we keep current->exit_code unless clear_code.
*/
-static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
+static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
__releases(&current->sighand->siglock)
__acquires(&current->sighand->siglock)
{
@@ -2104,7 +2108,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
static void ptrace_do_notify(int signr, int exit_code, int why)
{
- siginfo_t info;
+ kernel_siginfo_t info;
clear_siginfo(&info);
info.si_signo = signr;
@@ -2285,7 +2289,7 @@ static void do_jobctl_trap(void)
}
}
-static int ptrace_signal(int signr, siginfo_t *info)
+static int ptrace_signal(int signr, kernel_siginfo_t *info)
{
/*
* We do not check sig_kernel_stop(signr) but set this marker
@@ -2326,7 +2330,7 @@ static int ptrace_signal(int signr, siginfo_t *info)
/* If the (new) signal is now blocked, requeue it. */
if (sigismember(&current->blocked, signr)) {
- specific_send_sig_info(signr, info, current);
+ send_signal(signr, info, current, PIDTYPE_PID);
signr = 0;
}
@@ -2636,14 +2640,6 @@ out:
}
}
-EXPORT_SYMBOL(recalc_sigpending);
-EXPORT_SYMBOL_GPL(dequeue_signal);
-EXPORT_SYMBOL(flush_signals);
-EXPORT_SYMBOL(force_sig);
-EXPORT_SYMBOL(send_sig);
-EXPORT_SYMBOL(send_sig_info);
-EXPORT_SYMBOL(sigprocmask);
-
/*
* System call entry points.
*/
@@ -2737,6 +2733,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
__set_current_blocked(&newset);
return 0;
}
+EXPORT_SYMBOL(sigprocmask);
/**
* sys_rt_sigprocmask - change the list of currently blocked signals
@@ -2847,27 +2844,48 @@ COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
}
#endif
-enum siginfo_layout siginfo_layout(int sig, int si_code)
+static const struct {
+ unsigned char limit, layout;
+} sig_sicodes[] = {
+ [SIGILL] = { NSIGILL, SIL_FAULT },
+ [SIGFPE] = { NSIGFPE, SIL_FAULT },
+ [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
+ [SIGBUS] = { NSIGBUS, SIL_FAULT },
+ [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
+#if defined(SIGEMT)
+ [SIGEMT] = { NSIGEMT, SIL_FAULT },
+#endif
+ [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
+ [SIGPOLL] = { NSIGPOLL, SIL_POLL },
+ [SIGSYS] = { NSIGSYS, SIL_SYS },
+};
+
+static bool known_siginfo_layout(unsigned sig, int si_code)
+{
+ if (si_code == SI_KERNEL)
+ return true;
+ else if ((si_code > SI_USER)) {
+ if (sig_specific_sicodes(sig)) {
+ if (si_code <= sig_sicodes[sig].limit)
+ return true;
+ }
+ else if (si_code <= NSIGPOLL)
+ return true;
+ }
+ else if (si_code >= SI_DETHREAD)
+ return true;
+ else if (si_code == SI_ASYNCNL)
+ return true;
+ return false;
+}
+
+enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
{
enum siginfo_layout layout = SIL_KILL;
if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
- static const struct {
- unsigned char limit, layout;
- } filter[] = {
- [SIGILL] = { NSIGILL, SIL_FAULT },
- [SIGFPE] = { NSIGFPE, SIL_FAULT },
- [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
- [SIGBUS] = { NSIGBUS, SIL_FAULT },
- [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
-#if defined(SIGEMT) && defined(NSIGEMT)
- [SIGEMT] = { NSIGEMT, SIL_FAULT },
-#endif
- [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
- [SIGPOLL] = { NSIGPOLL, SIL_POLL },
- [SIGSYS] = { NSIGSYS, SIL_SYS },
- };
- if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
- layout = filter[sig].layout;
+ if ((sig < ARRAY_SIZE(sig_sicodes)) &&
+ (si_code <= sig_sicodes[sig].limit)) {
+ layout = sig_sicodes[sig].layout;
/* Handle the exceptions */
if ((sig == SIGBUS) &&
(si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
@@ -2892,22 +2910,69 @@ enum siginfo_layout siginfo_layout(int sig, int si_code)
return layout;
}
-int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
+static inline char __user *si_expansion(const siginfo_t __user *info)
+{
+ return ((char __user *)info) + sizeof(struct kernel_siginfo);
+}
+
+int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
{
- if (copy_to_user(to, from , sizeof(struct siginfo)))
+ char __user *expansion = si_expansion(to);
+ if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
+ return -EFAULT;
+ if (clear_user(expansion, SI_EXPANSION_SIZE))
return -EFAULT;
return 0;
}
+static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
+ const siginfo_t __user *from)
+{
+ if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
+ char __user *expansion = si_expansion(from);
+ char buf[SI_EXPANSION_SIZE];
+ int i;
+ /*
+ * An unknown si_code might need more than
+ * sizeof(struct kernel_siginfo) bytes. Verify all of the
+ * extra bytes are 0. This guarantees copy_siginfo_to_user
+ * will return this data to userspace exactly.
+ */
+ if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
+ return -EFAULT;
+ for (i = 0; i < SI_EXPANSION_SIZE; i++) {
+ if (buf[i] != 0)
+ return -E2BIG;
+ }
+ }
+ return 0;
+}
+
+static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
+ const siginfo_t __user *from)
+{
+ if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
+ return -EFAULT;
+ to->si_signo = signo;
+ return post_copy_siginfo_from_user(to, from);
+}
+
+int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
+{
+ if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
+ return -EFAULT;
+ return post_copy_siginfo_from_user(to, from);
+}
+
#ifdef CONFIG_COMPAT
int copy_siginfo_to_user32(struct compat_siginfo __user *to,
- const struct siginfo *from)
+ const struct kernel_siginfo *from)
#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
{
return __copy_siginfo_to_user32(to, from, in_x32_syscall());
}
int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
- const struct siginfo *from, bool x32_ABI)
+ const struct kernel_siginfo *from, bool x32_ABI)
#endif
{
struct compat_siginfo new;
@@ -2991,88 +3056,106 @@ int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
return 0;
}
-int copy_siginfo_from_user32(struct siginfo *to,
- const struct compat_siginfo __user *ufrom)
+static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
+ const struct compat_siginfo *from)
{
- struct compat_siginfo from;
-
- if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
- return -EFAULT;
-
clear_siginfo(to);
- to->si_signo = from.si_signo;
- to->si_errno = from.si_errno;
- to->si_code = from.si_code;
- switch(siginfo_layout(from.si_signo, from.si_code)) {
+ to->si_signo = from->si_signo;
+ to->si_errno = from->si_errno;
+ to->si_code = from->si_code;
+ switch(siginfo_layout(from->si_signo, from->si_code)) {
case SIL_KILL:
- to->si_pid = from.si_pid;
- to->si_uid = from.si_uid;
+ to->si_pid = from->si_pid;
+ to->si_uid = from->si_uid;
break;
case SIL_TIMER:
- to->si_tid = from.si_tid;
- to->si_overrun = from.si_overrun;
- to->si_int = from.si_int;
+ to->si_tid = from->si_tid;
+ to->si_overrun = from->si_overrun;
+ to->si_int = from->si_int;
break;
case SIL_POLL:
- to->si_band = from.si_band;
- to->si_fd = from.si_fd;
+ to->si_band = from->si_band;
+ to->si_fd = from->si_fd;
break;
case SIL_FAULT:
- to->si_addr = compat_ptr(from.si_addr);
+ to->si_addr = compat_ptr(from->si_addr);
#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from.si_trapno;
+ to->si_trapno = from->si_trapno;
#endif
break;
case SIL_FAULT_MCEERR:
- to->si_addr = compat_ptr(from.si_addr);
+ to->si_addr = compat_ptr(from->si_addr);
#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from.si_trapno;
+ to->si_trapno = from->si_trapno;
#endif
- to->si_addr_lsb = from.si_addr_lsb;
+ to->si_addr_lsb = from->si_addr_lsb;
break;
case SIL_FAULT_BNDERR:
- to->si_addr = compat_ptr(from.si_addr);
+ to->si_addr = compat_ptr(from->si_addr);
#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from.si_trapno;
+ to->si_trapno = from->si_trapno;
#endif
- to->si_lower = compat_ptr(from.si_lower);
- to->si_upper = compat_ptr(from.si_upper);
+ to->si_lower = compat_ptr(from->si_lower);
+ to->si_upper = compat_ptr(from->si_upper);
break;
case SIL_FAULT_PKUERR:
- to->si_addr = compat_ptr(from.si_addr);
+ to->si_addr = compat_ptr(from->si_addr);
#ifdef __ARCH_SI_TRAPNO
- to->si_trapno = from.si_trapno;
+ to->si_trapno = from->si_trapno;
#endif
- to->si_pkey = from.si_pkey;
+ to->si_pkey = from->si_pkey;
break;
case SIL_CHLD:
- to->si_pid = from.si_pid;
- to->si_uid = from.si_uid;
- to->si_status = from.si_status;
+ to->si_pid = from->si_pid;
+ to->si_uid = from->si_uid;
+ to->si_status = from->si_status;
#ifdef CONFIG_X86_X32_ABI
if (in_x32_syscall()) {
- to->si_utime = from._sifields._sigchld_x32._utime;
- to->si_stime = from._sifields._sigchld_x32._stime;
+ to->si_utime = from->_sifields._sigchld_x32._utime;
+ to->si_stime = from->_sifields._sigchld_x32._stime;
} else
#endif
{
- to->si_utime = from.si_utime;
- to->si_stime = from.si_stime;
+ to->si_utime = from->si_utime;
+ to->si_stime = from->si_stime;
}
break;
case SIL_RT:
- to->si_pid = from.si_pid;
- to->si_uid = from.si_uid;
- to->si_int = from.si_int;
+ to->si_pid = from->si_pid;
+ to->si_uid = from->si_uid;
+ to->si_int = from->si_int;
break;
case SIL_SYS:
- to->si_call_addr = compat_ptr(from.si_call_addr);
- to->si_syscall = from.si_syscall;
- to->si_arch = from.si_arch;
+ to->si_call_addr = compat_ptr(from->si_call_addr);
+ to->si_syscall = from->si_syscall;
+ to->si_arch = from->si_arch;
break;
}
return 0;
}
+
+static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
+ const struct compat_siginfo __user *ufrom)
+{
+ struct compat_siginfo from;
+
+ if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
+ return -EFAULT;
+
+ from.si_signo = signo;
+ return post_copy_siginfo_from_user32(to, &from);
+}
+
+int copy_siginfo_from_user32(struct kernel_siginfo *to,
+ const struct compat_siginfo __user *ufrom)
+{
+ struct compat_siginfo from;
+
+ if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
+ return -EFAULT;
+
+ return post_copy_siginfo_from_user32(to, &from);
+}
#endif /* CONFIG_COMPAT */
/**
@@ -3081,8 +3164,8 @@ int copy_siginfo_from_user32(struct siginfo *to,
* @info: if non-null, the signal's siginfo is returned here
* @ts: upper bound on process time suspension
*/
-static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
- const struct timespec *ts)
+static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
+ const struct timespec64 *ts)
{
ktime_t *to = NULL, timeout = KTIME_MAX;
struct task_struct *tsk = current;
@@ -3090,9 +3173,9 @@ static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
int sig, ret = 0;
if (ts) {
- if (!timespec_valid(ts))
+ if (!timespec64_valid(ts))
return -EINVAL;
- timeout = timespec_to_ktime(*ts);
+ timeout = timespec64_to_ktime(*ts);
to = &timeout;
}
@@ -3140,12 +3223,13 @@ static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
* @sigsetsize: size of sigset_t type
*/
SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
- siginfo_t __user *, uinfo, const struct timespec __user *, uts,
+ siginfo_t __user *, uinfo,
+ const struct __kernel_timespec __user *, uts,
size_t, sigsetsize)
{
sigset_t these;
- struct timespec ts;
- siginfo_t info;
+ struct timespec64 ts;
+ kernel_siginfo_t info;
int ret;
/* XXX: Don't preclude handling different sized sigset_t's. */
@@ -3156,7 +3240,7 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
return -EFAULT;
if (uts) {
- if (copy_from_user(&ts, uts, sizeof(ts)))
+ if (get_timespec64(&ts, uts))
return -EFAULT;
}
@@ -3173,11 +3257,11 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
struct compat_siginfo __user *, uinfo,
- struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
+ struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
{
sigset_t s;
- struct timespec t;
- siginfo_t info;
+ struct timespec64 t;
+ kernel_siginfo_t info;
long ret;
if (sigsetsize != sizeof(sigset_t))
@@ -3187,7 +3271,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
return -EFAULT;
if (uts) {
- if (compat_get_timespec(&t, uts))
+ if (get_old_timespec32(&t, uts))
return -EFAULT;
}
@@ -3209,7 +3293,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
*/
SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
{
- struct siginfo info;
+ struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = sig;
@@ -3222,7 +3306,7 @@ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
}
static int
-do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
{
struct task_struct *p;
int error = -ESRCH;
@@ -3253,7 +3337,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
static int do_tkill(pid_t tgid, pid_t pid, int sig)
{
- struct siginfo info;
+ struct kernel_siginfo info;
clear_siginfo(&info);
info.si_signo = sig;
@@ -3300,7 +3384,7 @@ SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
return do_tkill(0, pid, sig);
}
-static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
+static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
{
/* Not even root can pretend to send signals from the kernel.
* Nor can they impersonate a kill()/tgkill(), which adds source info.
@@ -3309,8 +3393,6 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
(task_pid_vnr(current) != pid))
return -EPERM;
- info->si_signo = sig;
-
/* POSIX.1b doesn't mention process groups. */
return kill_proc_info(sig, info, pid);
}
@@ -3324,9 +3406,10 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
siginfo_t __user *, uinfo)
{
- siginfo_t info;
- if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
- return -EFAULT;
+ kernel_siginfo_t info;
+ int ret = __copy_siginfo_from_user(sig, &info, uinfo);
+ if (unlikely(ret))
+ return ret;
return do_rt_sigqueueinfo(pid, sig, &info);
}
@@ -3336,15 +3419,15 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
int, sig,
struct compat_siginfo __user *, uinfo)
{
- siginfo_t info;
- int ret = copy_siginfo_from_user32(&info, uinfo);
+ kernel_siginfo_t info;
+ int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
if (unlikely(ret))
return ret;
return do_rt_sigqueueinfo(pid, sig, &info);
}
#endif
-static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
+static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
{
/* This is only valid for single tasks */
if (pid <= 0 || tgid <= 0)
@@ -3357,19 +3440,16 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
(task_pid_vnr(current) != pid))
return -EPERM;
- info->si_signo = sig;
-
return do_send_specific(tgid, pid, sig, info);
}
SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
siginfo_t __user *, uinfo)
{
- siginfo_t info;
-
- if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
- return -EFAULT;
-
+ kernel_siginfo_t info;
+ int ret = __copy_siginfo_from_user(sig, &info, uinfo);
+ if (unlikely(ret))
+ return ret;
return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
}
@@ -3380,10 +3460,10 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
int, sig,
struct compat_siginfo __user *, uinfo)
{
- siginfo_t info;
-
- if (copy_siginfo_from_user32(&info, uinfo))
- return -EFAULT;
+ kernel_siginfo_t info;
+ int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
+ if (unlikely(ret))
+ return ret;
return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
}
#endif
@@ -3460,7 +3540,8 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
}
static int
-do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
+do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
+ size_t min_ss_size)
{
struct task_struct *t = current;
@@ -3490,7 +3571,7 @@ do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
ss_size = 0;
ss_sp = NULL;
} else {
- if (unlikely(ss_size < MINSIGSTKSZ))
+ if (unlikely(ss_size < min_ss_size))
return -ENOMEM;
}
@@ -3508,7 +3589,8 @@ SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
return -EFAULT;
err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
- current_user_stack_pointer());
+ current_user_stack_pointer(),
+ MINSIGSTKSZ);
if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
err = -EFAULT;
return err;
@@ -3519,7 +3601,8 @@ int restore_altstack(const stack_t __user *uss)
stack_t new;
if (copy_from_user(&new, uss, sizeof(stack_t)))
return -EFAULT;
- (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
+ (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
+ MINSIGSTKSZ);
/* squash all but EFAULT for now */
return 0;
}
@@ -3553,7 +3636,8 @@ static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
uss.ss_size = uss32.ss_size;
}
ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
- compat_user_stack_pointer());
+ compat_user_stack_pointer(),
+ COMPAT_MINSIGSTKSZ);
if (ret >= 0 && uoss_ptr) {
compat_stack_t old;
memset(&old, 0, sizeof(old));
@@ -3962,13 +4046,57 @@ __weak const char *arch_vma_name(struct vm_area_struct *vma)
return NULL;
}
-void __init signals_init(void)
+static inline void siginfo_buildtime_checks(void)
{
- /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
- BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
- != offsetof(struct siginfo, _sifields._pad));
BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
+ /* Verify the offsets in the two siginfos match */
+#define CHECK_OFFSET(field) \
+ BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
+
+ /* kill */
+ CHECK_OFFSET(si_pid);
+ CHECK_OFFSET(si_uid);
+
+ /* timer */
+ CHECK_OFFSET(si_tid);
+ CHECK_OFFSET(si_overrun);
+ CHECK_OFFSET(si_value);
+
+ /* rt */
+ CHECK_OFFSET(si_pid);
+ CHECK_OFFSET(si_uid);
+ CHECK_OFFSET(si_value);
+
+ /* sigchld */
+ CHECK_OFFSET(si_pid);
+ CHECK_OFFSET(si_uid);
+ CHECK_OFFSET(si_status);
+ CHECK_OFFSET(si_utime);
+ CHECK_OFFSET(si_stime);
+
+ /* sigfault */
+ CHECK_OFFSET(si_addr);
+ CHECK_OFFSET(si_addr_lsb);
+ CHECK_OFFSET(si_lower);
+ CHECK_OFFSET(si_upper);
+ CHECK_OFFSET(si_pkey);
+
+ /* sigpoll */
+ CHECK_OFFSET(si_band);
+ CHECK_OFFSET(si_fd);
+
+ /* sigsys */
+ CHECK_OFFSET(si_call_addr);
+ CHECK_OFFSET(si_syscall);
+ CHECK_OFFSET(si_arch);
+#undef CHECK_OFFSET
+}
+
+void __init signals_init(void)
+{
+ siginfo_buildtime_checks();
+
sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
}
diff --git a/kernel/smp.c b/kernel/smp.c
index d86eec5f51c1..163c451af42e 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -669,9 +669,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
* You must not call this function with disabled interrupts or
* from a hardware interrupt handler or from a bottom half handler.
*/
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait,
- gfp_t gfp_flags)
+ gfp_t gfp_flags, const struct cpumask *mask)
{
cpumask_var_t cpus;
int cpu, ret;
@@ -680,9 +680,9 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
preempt_disable();
- for_each_online_cpu(cpu)
+ for_each_cpu(cpu, mask)
if (cond_func(cpu, info))
- cpumask_set_cpu(cpu, cpus);
+ __cpumask_set_cpu(cpu, cpus);
on_each_cpu_mask(cpus, func, info, wait);
preempt_enable();
free_cpumask_var(cpus);
@@ -692,7 +692,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
* just have to IPI them one by one.
*/
preempt_disable();
- for_each_online_cpu(cpu)
+ for_each_cpu(cpu, mask)
if (cond_func(cpu, info)) {
ret = smp_call_function_single(cpu, func,
info, wait);
@@ -701,6 +701,15 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
preempt_enable();
}
}
+EXPORT_SYMBOL(on_each_cpu_cond_mask);
+
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags)
+{
+ on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
+ cpu_online_mask);
+}
EXPORT_SYMBOL(on_each_cpu_cond);
static void do_nothing(void *unused)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 6f584861d329..d28813306b2c 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -257,9 +257,9 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
int softirq_bit;
/*
- * Mask out PF_MEMALLOC s current task context is borrowed for the
- * softirq. A softirq handled such as network RX might set PF_MEMALLOC
- * again if the socket is related to swap
+ * Mask out PF_MEMALLOC as the current task context is borrowed for the
+ * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
+ * again if the socket is related to swapping.
*/
current->flags &= ~PF_MEMALLOC;
@@ -301,7 +301,8 @@ restart:
pending >>= softirq_bit;
}
- rcu_bh_qs();
+ if (__this_cpu_read(ksoftirqd) == current)
+ rcu_softirq_qs();
local_irq_disable();
pending = local_softirq_pending();
diff --git a/kernel/sys.c b/kernel/sys.c
index cf5c67533ff1..123bd73046ec 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -71,9 +71,6 @@
#include <asm/io.h>
#include <asm/unistd.h>
-/* Hardening for Spectre-v1 */
-#include <linux/nospec.h>
-
#include "uid16.h"
#ifndef SET_UNALIGN_CTL
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 78eabc41eaa6..58b981f4bb5d 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -12,6 +12,10 @@ config CLOCKSOURCE_WATCHDOG
config ARCH_CLOCKSOURCE_DATA
bool
+# Architecture has extra clocksource init called from registration
+config ARCH_CLOCKSOURCE_INIT
+ bool
+
# Clocksources require validation of the clocksource against the last
# cycle update - x86/TSC misfeature
config CLOCKSOURCE_VALIDATE_LAST_CYCLE
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 0e6e97a01942..ffe081623aec 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -937,6 +937,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
{
unsigned long flags;
+ clocksource_arch_init(cs);
+
/* Initialize mult/shift and max_idle_ns */
__clocksource_update_freq_scale(cs, scale, freq);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index e1a549c9e399..9cdd74bd2d27 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1660,7 +1660,7 @@ int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
switch(restart->nanosleep.type) {
#ifdef CONFIG_COMPAT_32BIT_TIME
case TT_COMPAT:
- if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp))
+ if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp))
return -EFAULT;
break;
#endif
@@ -1780,12 +1780,12 @@ SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
- struct compat_timespec __user *, rmtp)
+COMPAT_SYSCALL_DEFINE2(nanosleep, struct old_timespec32 __user *, rqtp,
+ struct old_timespec32 __user *, rmtp)
{
struct timespec64 tu;
- if (compat_get_timespec64(&tu, rqtp))
+ if (get_old_timespec32(&tu, rqtp))
return -EFAULT;
if (!timespec64_valid(&tu))
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index 2c6847d5d69b..989ccf028bde 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -162,20 +162,20 @@ COMPAT_SYS_NI(setitimer);
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
- struct compat_timespec __user *, tp)
+ struct old_timespec32 __user *, tp)
{
struct timespec64 new_tp;
if (which_clock != CLOCK_REALTIME)
return -EINVAL;
- if (compat_get_timespec64(&new_tp, tp))
+ if (get_old_timespec32(&new_tp, tp))
return -EFAULT;
return do_sys_settimeofday64(&new_tp, NULL);
}
COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
- struct compat_timespec __user *, tp)
+ struct old_timespec32 __user *, tp)
{
int ret;
struct timespec64 kernel_tp;
@@ -184,13 +184,13 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
if (ret)
return ret;
- if (compat_put_timespec64(&kernel_tp, tp))
+ if (put_old_timespec32(&kernel_tp, tp))
return -EFAULT;
return 0;
}
COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
- struct compat_timespec __user *, tp)
+ struct old_timespec32 __user *, tp)
{
struct timespec64 rtn_tp = {
.tv_sec = 0,
@@ -201,7 +201,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
case CLOCK_REALTIME:
case CLOCK_MONOTONIC:
case CLOCK_BOOTTIME:
- if (compat_put_timespec64(&rtn_tp, tp))
+ if (put_old_timespec32(&rtn_tp, tp))
return -EFAULT;
return 0;
default:
@@ -210,8 +210,8 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
}
COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
- struct compat_timespec __user *, rqtp,
- struct compat_timespec __user *, rmtp)
+ struct old_timespec32 __user *, rqtp,
+ struct old_timespec32 __user *, rmtp)
{
struct timespec64 t;
@@ -224,7 +224,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
return -EINVAL;
}
- if (compat_get_timespec64(&t, rqtp))
+ if (get_old_timespec32(&t, rqtp))
return -EFAULT;
if (!timespec64_valid(&t))
return -EINVAL;
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 4b9127e95430..bd62b5eeb5a0 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -308,7 +308,7 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
* To protect against the timer going away while the interrupt is queued,
* we require that the it_requeue_pending flag be set.
*/
-void posixtimer_rearm(struct siginfo *info)
+void posixtimer_rearm(struct kernel_siginfo *info)
{
struct k_itimer *timr;
unsigned long flags;
@@ -755,13 +755,13 @@ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
- struct compat_itimerspec __user *, setting)
+ struct old_itimerspec32 __user *, setting)
{
struct itimerspec64 cur_setting;
int ret = do_timer_gettime(timer_id, &cur_setting);
if (!ret) {
- if (put_compat_itimerspec64(&cur_setting, setting))
+ if (put_old_itimerspec32(&cur_setting, setting))
ret = -EFAULT;
}
return ret;
@@ -928,8 +928,8 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
- struct compat_itimerspec __user *, new,
- struct compat_itimerspec __user *, old)
+ struct old_itimerspec32 __user *, new,
+ struct old_itimerspec32 __user *, old)
{
struct itimerspec64 new_spec, old_spec;
struct itimerspec64 *rtn = old ? &old_spec : NULL;
@@ -937,12 +937,12 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
if (!new)
return -EINVAL;
- if (get_compat_itimerspec64(&new_spec, new))
+ if (get_old_itimerspec32(&new_spec, new))
return -EFAULT;
error = do_timer_settime(timer_id, flags, &new_spec, rtn);
if (!error && old) {
- if (put_compat_itimerspec64(&old_spec, old))
+ if (put_old_itimerspec32(&old_spec, old))
error = -EFAULT;
}
return error;
@@ -1115,7 +1115,7 @@ SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
- struct compat_timespec __user *, tp)
+ struct old_timespec32 __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 ts;
@@ -1123,14 +1123,14 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
if (!kc || !kc->clock_set)
return -EINVAL;
- if (compat_get_timespec64(&ts, tp))
+ if (get_old_timespec32(&ts, tp))
return -EFAULT;
return kc->clock_set(which_clock, &ts);
}
COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
- struct compat_timespec __user *, tp)
+ struct old_timespec32 __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 ts;
@@ -1141,7 +1141,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
err = kc->clock_get(which_clock, &ts);
- if (!err && compat_put_timespec64(&ts, tp))
+ if (!err && put_old_timespec32(&ts, tp))
err = -EFAULT;
return err;
@@ -1180,7 +1180,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
- struct compat_timespec __user *, tp)
+ struct old_timespec32 __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 ts;
@@ -1190,7 +1190,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
return -EINVAL;
err = kc->clock_getres(which_clock, &ts);
- if (!err && tp && compat_put_timespec64(&ts, tp))
+ if (!err && tp && put_old_timespec32(&ts, tp))
return -EFAULT;
return err;
@@ -1237,8 +1237,8 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
- struct compat_timespec __user *, rqtp,
- struct compat_timespec __user *, rmtp)
+ struct old_timespec32 __user *, rqtp,
+ struct old_timespec32 __user *, rmtp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 t;
@@ -1248,7 +1248,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
if (!kc->nsleep)
return -EOPNOTSUPP;
- if (compat_get_timespec64(&t, rqtp))
+ if (get_old_timespec32(&t, rqtp))
return -EFAULT;
if (!timespec64_valid(&t))
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index aa2094d5dd27..be0aac2b4300 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -400,8 +400,6 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
if (tick_broadcast_forced)
break;
cpumask_clear_cpu(cpu, tick_broadcast_on);
- if (!tick_device_is_functional(dev))
- break;
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 5b33e2f5c0ed..69e673b88474 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -885,7 +885,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
if (need_resched())
return false;
- if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
+ if (unlikely(local_softirq_pending())) {
static int ratelimit;
if (ratelimit < 10 &&
diff --git a/kernel/time/time.c b/kernel/time/time.c
index ccdb351277ee..e3a7f7fd3abc 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -104,12 +104,12 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr)
#ifdef CONFIG_COMPAT
#ifdef __ARCH_WANT_COMPAT_SYS_TIME
-/* compat_time_t is a 32 bit "long" and needs to get converted. */
-COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
+/* old_time32_t is a 32 bit "long" and needs to get converted. */
+COMPAT_SYSCALL_DEFINE1(time, old_time32_t __user *, tloc)
{
- compat_time_t i;
+ old_time32_t i;
- i = (compat_time_t)ktime_get_real_seconds();
+ i = (old_time32_t)ktime_get_real_seconds();
if (tloc) {
if (put_user(i,tloc))
@@ -119,7 +119,7 @@ COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
return i;
}
-COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
+COMPAT_SYSCALL_DEFINE1(stime, old_time32_t __user *, tptr)
{
struct timespec64 tv;
int err;
@@ -144,9 +144,11 @@ SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
struct timezone __user *, tz)
{
if (likely(tv != NULL)) {
- struct timeval ktv;
- do_gettimeofday(&ktv);
- if (copy_to_user(tv, &ktv, sizeof(ktv)))
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ if (put_user(ts.tv_sec, &tv->tv_sec) ||
+ put_user(ts.tv_nsec / 1000, &tv->tv_usec))
return -EFAULT;
}
if (unlikely(tz != NULL)) {
@@ -223,14 +225,15 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
}
#ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
+COMPAT_SYSCALL_DEFINE2(gettimeofday, struct old_timeval32 __user *, tv,
struct timezone __user *, tz)
{
if (tv) {
- struct timeval ktv;
+ struct timespec64 ts;
- do_gettimeofday(&ktv);
- if (compat_put_timeval(&ktv, tv))
+ ktime_get_real_ts64(&ts);
+ if (put_user(ts.tv_sec, &tv->tv_sec) ||
+ put_user(ts.tv_nsec / 1000, &tv->tv_usec))
return -EFAULT;
}
if (tz) {
@@ -241,7 +244,7 @@ COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
return 0;
}
-COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv,
+COMPAT_SYSCALL_DEFINE2(settimeofday, struct old_timeval32 __user *, tv,
struct timezone __user *, tz)
{
struct timespec64 new_ts;
@@ -342,30 +345,6 @@ unsigned int jiffies_to_usecs(const unsigned long j)
}
EXPORT_SYMBOL(jiffies_to_usecs);
-/**
- * timespec_trunc - Truncate timespec to a granularity
- * @t: Timespec
- * @gran: Granularity in ns.
- *
- * Truncate a timespec to a granularity. Always rounds down. gran must
- * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
- */
-struct timespec timespec_trunc(struct timespec t, unsigned gran)
-{
- /* Avoid division in the common cases 1 ns and 1 s. */
- if (gran == 1) {
- /* nothing */
- } else if (gran == NSEC_PER_SEC) {
- t.tv_nsec = 0;
- } else if (gran > 1 && gran < NSEC_PER_SEC) {
- t.tv_nsec -= t.tv_nsec % gran;
- } else {
- WARN(1, "illegal file time granularity: %u", gran);
- }
- return t;
-}
-EXPORT_SYMBOL(timespec_trunc);
-
/*
* mktime64 - Converts date to seconds.
* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
@@ -884,10 +863,10 @@ int put_timespec64(const struct timespec64 *ts,
}
EXPORT_SYMBOL_GPL(put_timespec64);
-int __compat_get_timespec64(struct timespec64 *ts64,
- const struct compat_timespec __user *cts)
+static int __get_old_timespec32(struct timespec64 *ts64,
+ const struct old_timespec32 __user *cts)
{
- struct compat_timespec ts;
+ struct old_timespec32 ts;
int ret;
ret = copy_from_user(&ts, cts, sizeof(ts));
@@ -900,33 +879,33 @@ int __compat_get_timespec64(struct timespec64 *ts64,
return 0;
}
-int __compat_put_timespec64(const struct timespec64 *ts64,
- struct compat_timespec __user *cts)
+static int __put_old_timespec32(const struct timespec64 *ts64,
+ struct old_timespec32 __user *cts)
{
- struct compat_timespec ts = {
+ struct old_timespec32 ts = {
.tv_sec = ts64->tv_sec,
.tv_nsec = ts64->tv_nsec
};
return copy_to_user(cts, &ts, sizeof(ts)) ? -EFAULT : 0;
}
-int compat_get_timespec64(struct timespec64 *ts, const void __user *uts)
+int get_old_timespec32(struct timespec64 *ts, const void __user *uts)
{
if (COMPAT_USE_64BIT_TIME)
return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0;
else
- return __compat_get_timespec64(ts, uts);
+ return __get_old_timespec32(ts, uts);
}
-EXPORT_SYMBOL_GPL(compat_get_timespec64);
+EXPORT_SYMBOL_GPL(get_old_timespec32);
-int compat_put_timespec64(const struct timespec64 *ts, void __user *uts)
+int put_old_timespec32(const struct timespec64 *ts, void __user *uts)
{
if (COMPAT_USE_64BIT_TIME)
return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0;
else
- return __compat_put_timespec64(ts, uts);
+ return __put_old_timespec32(ts, uts);
}
-EXPORT_SYMBOL_GPL(compat_put_timespec64);
+EXPORT_SYMBOL_GPL(put_old_timespec32);
int get_itimerspec64(struct itimerspec64 *it,
const struct __kernel_itimerspec __user *uit)
@@ -958,23 +937,23 @@ int put_itimerspec64(const struct itimerspec64 *it,
}
EXPORT_SYMBOL_GPL(put_itimerspec64);
-int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits)
+int get_old_itimerspec32(struct itimerspec64 *its,
+ const struct old_itimerspec32 __user *uits)
{
- if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) ||
- __compat_get_timespec64(&its->it_value, &uits->it_value))
+ if (__get_old_timespec32(&its->it_interval, &uits->it_interval) ||
+ __get_old_timespec32(&its->it_value, &uits->it_value))
return -EFAULT;
return 0;
}
-EXPORT_SYMBOL_GPL(get_compat_itimerspec64);
+EXPORT_SYMBOL_GPL(get_old_itimerspec32);
-int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits)
+int put_old_itimerspec32(const struct itimerspec64 *its,
+ struct old_itimerspec32 __user *uits)
{
- if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) ||
- __compat_put_timespec64(&its->it_value, &uits->it_value))
+ if (__put_old_timespec32(&its->it_interval, &uits->it_interval) ||
+ __put_old_timespec32(&its->it_value, &uits->it_value))
return -EFAULT;
return 0;
}
-EXPORT_SYMBOL_GPL(put_compat_itimerspec64);
+EXPORT_SYMBOL_GPL(put_old_itimerspec32);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f3b22f456fac..2d110c948805 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1212,22 +1212,6 @@ int get_device_system_crosststamp(int (*get_time_fn)
EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
/**
- * do_gettimeofday - Returns the time of day in a timeval
- * @tv: pointer to the timeval to be set
- *
- * NOTE: Users should be converted to using getnstimeofday()
- */
-void do_gettimeofday(struct timeval *tv)
-{
- struct timespec64 now;
-
- getnstimeofday64(&now);
- tv->tv_sec = now.tv_sec;
- tv->tv_usec = now.tv_nsec/1000;
-}
-EXPORT_SYMBOL(do_gettimeofday);
-
-/**
* do_settimeofday64 - Sets the time of day.
* @ts: pointer to the timespec64 variable containing the new time
*
@@ -2174,14 +2158,6 @@ void getboottime64(struct timespec64 *ts)
}
EXPORT_SYMBOL_GPL(getboottime64);
-unsigned long get_seconds(void)
-{
- struct timekeeper *tk = &tk_core.timekeeper;
-
- return tk->xtime_sec;
-}
-EXPORT_SYMBOL(get_seconds);
-
void ktime_get_coarse_real_ts64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
diff --git a/kernel/torture.c b/kernel/torture.c
index 1ac24a826589..17d91f5fba2a 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -573,7 +573,7 @@ static int stutter;
* Block until the stutter interval ends. This must be called periodically
* by all running kthreads that need to be subject to stuttering.
*/
-void stutter_wait(const char *title)
+bool stutter_wait(const char *title)
{
int spt;
@@ -590,6 +590,7 @@ void stutter_wait(const char *title)
}
torture_shutdown_absorb(title);
}
+ return !!spt;
}
EXPORT_SYMBOL_GPL(stutter_wait);
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 2868d85f1fb1..fac0ddf8a8e2 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -764,9 +764,9 @@ blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
return NULL;
- if (!bio->bi_css)
+ if (!bio->bi_blkg)
return NULL;
- return cgroup_get_kernfs_id(bio->bi_css->cgroup);
+ return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup);
}
#else
static union kernfs_node_id *
diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
index f704390db9fc..d8765c952fab 100644
--- a/kernel/trace/preemptirq_delay_test.c
+++ b/kernel/trace/preemptirq_delay_test.c
@@ -5,12 +5,12 @@
* Copyright (C) 2018 Joel Fernandes (Google) <joel@joelfernandes.org>
*/
+#include <linux/trace_clock.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
-#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/string.h>
@@ -25,13 +25,13 @@ MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt or irq (default ir
static void busy_wait(ulong time)
{
- ktime_t start, end;
- start = ktime_get();
+ u64 start, end;
+ start = trace_clock_local();
do {
- end = ktime_get();
+ end = trace_clock_local();
if (kthread_should_stop())
break;
- } while (ktime_to_ns(ktime_sub(end, start)) < (time * 1000));
+ } while ((end - start) < (time * 1000));
}
static int preemptirq_delay_run(void *data)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 1d92d4a982fd..65bd4616220d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1546,6 +1546,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
tmp_iter_page = first_page;
do {
+ cond_resched();
+
to_remove_page = tmp_iter_page;
rb_inc_page(cpu_buffer, &tmp_iter_page);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 85f6b01431c7..d239004aaf29 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -738,16 +738,30 @@ static void free_synth_field(struct synth_field *field)
kfree(field);
}
-static struct synth_field *parse_synth_field(char *field_type,
- char *field_name)
+static struct synth_field *parse_synth_field(int argc, char **argv,
+ int *consumed)
{
struct synth_field *field;
+ const char *prefix = NULL;
+ char *field_type = argv[0], *field_name;
int len, ret = 0;
char *array;
if (field_type[0] == ';')
field_type++;
+ if (!strcmp(field_type, "unsigned")) {
+ if (argc < 3)
+ return ERR_PTR(-EINVAL);
+ prefix = "unsigned ";
+ field_type = argv[1];
+ field_name = argv[2];
+ *consumed = 3;
+ } else {
+ field_name = argv[1];
+ *consumed = 2;
+ }
+
len = strlen(field_name);
if (field_name[len - 1] == ';')
field_name[len - 1] = '\0';
@@ -760,11 +774,15 @@ static struct synth_field *parse_synth_field(char *field_type,
array = strchr(field_name, '[');
if (array)
len += strlen(array);
+ if (prefix)
+ len += strlen(prefix);
field->type = kzalloc(len, GFP_KERNEL);
if (!field->type) {
ret = -ENOMEM;
goto free;
}
+ if (prefix)
+ strcat(field->type, prefix);
strcat(field->type, field_type);
if (array) {
strcat(field->type, array);
@@ -1009,7 +1027,7 @@ static int create_synth_event(int argc, char **argv)
struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
struct synth_event *event = NULL;
bool delete_event = false;
- int i, n_fields = 0, ret = 0;
+ int i, consumed = 0, n_fields = 0, ret = 0;
char *name;
mutex_lock(&synth_event_mutex);
@@ -1061,16 +1079,16 @@ static int create_synth_event(int argc, char **argv)
goto err;
}
- field = parse_synth_field(argv[i], argv[i + 1]);
+ field = parse_synth_field(argc - i, &argv[i], &consumed);
if (IS_ERR(field)) {
ret = PTR_ERR(field);
goto err;
}
- fields[n_fields] = field;
- i++; n_fields++;
+ fields[n_fields++] = field;
+ i += consumed - 1;
}
- if (i < argc) {
+ if (i < argc && strcmp(argv[i], ";") != 0) {
ret = -EINVAL;
goto err;
}
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index bf2c06ef9afc..a3be42304485 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -28,8 +28,8 @@
#include <linux/sched/task.h>
#include <linux/static_key.h>
-extern struct tracepoint * const __start___tracepoints_ptrs[];
-extern struct tracepoint * const __stop___tracepoints_ptrs[];
+extern tracepoint_ptr_t __start___tracepoints_ptrs[];
+extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
DEFINE_SRCU(tracepoint_srcu);
EXPORT_SYMBOL_GPL(tracepoint_srcu);
@@ -371,25 +371,17 @@ int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
}
EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
-static void for_each_tracepoint_range(struct tracepoint * const *begin,
- struct tracepoint * const *end,
+static void for_each_tracepoint_range(
+ tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
void (*fct)(struct tracepoint *tp, void *priv),
void *priv)
{
+ tracepoint_ptr_t *iter;
+
if (!begin)
return;
-
- if (IS_ENABLED(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)) {
- const int *iter;
-
- for (iter = (const int *)begin; iter < (const int *)end; iter++)
- fct(offset_to_ptr(iter), priv);
- } else {
- struct tracepoint * const *iter;
-
- for (iter = begin; iter < end; iter++)
- fct(*iter, priv);
- }
+ for (iter = begin; iter < end; iter++)
+ fct(tracepoint_ptr_deref(iter), priv);
}
#ifdef CONFIG_MODULES
diff --git a/kernel/umh.c b/kernel/umh.c
index c449858946af..0baa672e023c 100644
--- a/kernel/umh.c
+++ b/kernel/umh.c
@@ -405,11 +405,19 @@ struct subprocess_info *call_usermodehelper_setup_file(struct file *file,
void (*cleanup)(struct subprocess_info *info), void *data)
{
struct subprocess_info *sub_info;
+ struct umh_info *info = data;
+ const char *cmdline = (info->cmdline) ? info->cmdline : "usermodehelper";
sub_info = kzalloc(sizeof(struct subprocess_info), GFP_KERNEL);
if (!sub_info)
return NULL;
+ sub_info->argv = argv_split(GFP_KERNEL, cmdline, NULL);
+ if (!sub_info->argv) {
+ kfree(sub_info);
+ return NULL;
+ }
+
INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
sub_info->path = "none";
sub_info->file = file;
@@ -458,10 +466,11 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
return 0;
}
-static void umh_save_pid(struct subprocess_info *info)
+static void umh_clean_and_save_pid(struct subprocess_info *info)
{
struct umh_info *umh_info = info->data;
+ argv_free(info->argv);
umh_info->pid = info->pid;
}
@@ -471,6 +480,9 @@ static void umh_save_pid(struct subprocess_info *info)
* @len: length of the blob
* @info: information about usermode process (shouldn't be NULL)
*
+ * If info->cmdline is set it will be used as command line for the
+ * user process, else "usermodehelper" is used.
+ *
* Returns either negative error or zero which indicates success
* in executing a blob of bytes as a usermode process. In such
* case 'struct umh_info *info' is populated with two pipes
@@ -500,7 +512,7 @@ int fork_usermode_blob(void *data, size_t len, struct umh_info *info)
err = -ENOMEM;
sub_info = call_usermodehelper_setup_file(file, umh_pipe_setup,
- umh_save_pid, info);
+ umh_clean_and_save_pid, info);
if (!sub_info)
goto out;
diff --git a/kernel/up.c b/kernel/up.c
index 42c46bf3e0a5..ff536f9cc8a2 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -68,9 +68,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
* Preemption is disabled here to make sure the cond_func is called under the
* same condtions in UP and SMP.
*/
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
- smp_call_func_t func, void *info, bool wait,
- gfp_t gfp_flags)
+void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags, const struct cpumask *mask)
{
unsigned long flags;
@@ -82,6 +82,14 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
}
preempt_enable();
}
+EXPORT_SYMBOL(on_each_cpu_cond_mask);
+
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags)
+{
+ on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
+}
EXPORT_SYMBOL(on_each_cpu_cond);
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
diff --git a/lib/Kconfig b/lib/Kconfig
index a3928d4438b5..d82f20609939 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -621,3 +621,6 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2
bool
+
+config GENERIC_LIB_UMODDI3
+ bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4966c4fbe7f7..8d24f4ed66fd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1179,7 +1179,7 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !X86
+ select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
select KALLSYMS
select KALLSYMS_ALL
@@ -1590,7 +1590,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
help
Provide stacktrace filter for fault-injection capabilities
@@ -1599,7 +1599,7 @@ config LATENCYTOP
depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
depends on PROC_FS
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index befb127507c0..d0bad1bd9a2b 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -57,6 +57,15 @@ config KASAN_INLINE
endchoice
+config KASAN_S390_4_LEVEL_PAGING
+ bool "KASan: use 4-level paging"
+ depends on KASAN && S390
+ help
+ Compiling the kernel with KASan disables automatic 3-level vs
+ 4-level paging selection. 3-level paging is used by default (up
+ to 3TB of RAM with KASan enabled). This options allows to force
+ 4-level paging instead.
+
config TEST_KASAN
tristate "Module for testing kasan for bug detection"
depends on m && KASAN
diff --git a/lib/Makefile b/lib/Makefile
index ca3f7ebb900d..56a8d9c23ef3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -119,7 +119,6 @@ obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
obj-$(CONFIG_BCH) += bch.o
-CFLAGS_bch.o := $(call cc-option,-Wframe-larger-than=4500)
obj-$(CONFIG_LZO_COMPRESS) += lzo/
obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
obj-$(CONFIG_LZ4_COMPRESS) += lz4/
@@ -271,3 +270,4 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o
obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
+obj-$(CONFIG_GENERIC_LIB_UMODDI3) += umoddi3.o udivmoddi4.o
diff --git a/lib/bch.c b/lib/bch.c
index 7b0f2006698b..5db6d3a4c8a6 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -79,20 +79,19 @@
#define GF_T(_p) (CONFIG_BCH_CONST_T)
#define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1)
#define BCH_MAX_M (CONFIG_BCH_CONST_M)
+#define BCH_MAX_T (CONFIG_BCH_CONST_T)
#else
#define GF_M(_p) ((_p)->m)
#define GF_T(_p) ((_p)->t)
#define GF_N(_p) ((_p)->n)
-#define BCH_MAX_M 15
+#define BCH_MAX_M 15 /* 2KB */
+#define BCH_MAX_T 64 /* 64 bit correction */
#endif
-#define BCH_MAX_T (((1 << BCH_MAX_M) - 1) / BCH_MAX_M)
-
#define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32)
#define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8)
#define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32)
-#define BCH_ECC_MAX_BYTES DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 8)
#ifndef dbg
#define dbg(_fmt, args...) do {} while (0)
@@ -202,6 +201,9 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
const uint32_t * const tab3 = tab2 + 256*(l+1);
const uint32_t *pdata, *p0, *p1, *p2, *p3;
+ if (WARN_ON(r_bytes > sizeof(r)))
+ return;
+
if (ecc) {
/* load ecc parity bytes into internal 32-bit buffer */
load_ecc8(bch, bch->ecc_buf, ecc);
@@ -1285,6 +1287,13 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
*/
goto fail;
+ if (t > BCH_MAX_T)
+ /*
+ * we can support larger than 64 bits if necessary, at the
+ * cost of higher stack usage.
+ */
+ goto fail;
+
/* sanity checks */
if ((t < 1) || (m*t >= ((1 << m)-1)))
/* invalid t value */
diff --git a/lib/chacha20.c b/lib/chacha20.c
index c1cc50fb68c9..d907fec6a9ed 100644
--- a/lib/chacha20.c
+++ b/lib/chacha20.c
@@ -16,9 +16,9 @@
#include <asm/unaligned.h>
#include <crypto/chacha20.h>
-void chacha20_block(u32 *state, u32 *stream)
+void chacha20_block(u32 *state, u8 *stream)
{
- u32 x[16], *out = stream;
+ u32 x[16];
int i;
for (i = 0; i < ARRAY_SIZE(x); i++)
@@ -67,7 +67,7 @@ void chacha20_block(u32 *state, u32 *stream)
}
for (i = 0; i < ARRAY_SIZE(x); i++)
- out[i] = cpu_to_le32(x[i] + state[i]);
+ put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]);
state[12]++;
}
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 1ad33e555805..4d0d47c1ffbd 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -14,10 +14,47 @@
#include <linux/err.h>
#include <linux/init.h>
#include <crypto/hash.h>
+#include <crypto/algapi.h>
#include <linux/static_key.h>
+#include <linux/notifier.h>
-static struct crypto_shash *crct10dif_tfm;
+static struct crypto_shash __rcu *crct10dif_tfm;
static struct static_key crct10dif_fallback __read_mostly;
+static DEFINE_MUTEX(crc_t10dif_mutex);
+
+static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct crypto_alg *alg = data;
+ struct crypto_shash *new, *old;
+
+ if (val != CRYPTO_MSG_ALG_LOADED ||
+ static_key_false(&crct10dif_fallback) ||
+ strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
+ return 0;
+
+ mutex_lock(&crc_t10dif_mutex);
+ old = rcu_dereference_protected(crct10dif_tfm,
+ lockdep_is_held(&crc_t10dif_mutex));
+ if (!old) {
+ mutex_unlock(&crc_t10dif_mutex);
+ return 0;
+ }
+ new = crypto_alloc_shash("crct10dif", 0, 0);
+ if (IS_ERR(new)) {
+ mutex_unlock(&crc_t10dif_mutex);
+ return 0;
+ }
+ rcu_assign_pointer(crct10dif_tfm, new);
+ mutex_unlock(&crc_t10dif_mutex);
+
+ synchronize_rcu();
+ crypto_free_shash(old);
+ return 0;
+}
+
+static struct notifier_block crc_t10dif_nb = {
+ .notifier_call = crc_t10dif_rehash,
+};
__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
{
@@ -30,11 +67,14 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
if (static_key_false(&crct10dif_fallback))
return crc_t10dif_generic(crc, buffer, len);
- desc.shash.tfm = crct10dif_tfm;
+ rcu_read_lock();
+ desc.shash.tfm = rcu_dereference(crct10dif_tfm);
desc.shash.flags = 0;
*(__u16 *)desc.ctx = crc;
err = crypto_shash_update(&desc.shash, buffer, len);
+ rcu_read_unlock();
+
BUG_ON(err);
return *(__u16 *)desc.ctx;
@@ -49,6 +89,7 @@ EXPORT_SYMBOL(crc_t10dif);
static int __init crc_t10dif_mod_init(void)
{
+ crypto_register_notifier(&crc_t10dif_nb);
crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
if (IS_ERR(crct10dif_tfm)) {
static_key_slow_inc(&crct10dif_fallback);
@@ -59,12 +100,24 @@ static int __init crc_t10dif_mod_init(void)
static void __exit crc_t10dif_mod_fini(void)
{
+ crypto_unregister_notifier(&crc_t10dif_nb);
crypto_free_shash(crct10dif_tfm);
}
module_init(crc_t10dif_mod_init);
module_exit(crc_t10dif_mod_fini);
+static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
+{
+ if (static_key_false(&crct10dif_fallback))
+ return sprintf(buffer, "fallback\n");
+
+ return sprintf(buffer, "%s\n",
+ crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm)));
+}
+
+module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644);
+
MODULE_DESCRIPTION("T10 DIF CRC calculation");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crct10dif");
diff --git a/lib/crc32.c b/lib/crc32.c
index a6c9afafc8c8..45b1d67a1767 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -183,21 +183,21 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
}
#if CRC_LE_BITS == 1
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
}
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE);
}
#else
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len,
(const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
}
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len,
(const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
@@ -206,6 +206,9 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(__crc32c_le);
+u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
+u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+
/*
* This multiplies the polynomials x and y modulo the given modulus.
* This follows the "little-endian" CRC convention that the lsbit
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 96c4c633d95e..ce51749cc145 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -21,7 +21,7 @@
* that would just muddy the log. So we report the first one and
* shut up after that.
*/
-int debug_locks = 1;
+int debug_locks __read_mostly = 1;
EXPORT_SYMBOL_GPL(debug_locks);
/*
@@ -29,7 +29,7 @@ EXPORT_SYMBOL_GPL(debug_locks);
* 'silent failure': nothing is printed to the console when
* a locking bug is detected.
*/
-int debug_locks_silent;
+int debug_locks_silent __read_mostly;
EXPORT_SYMBOL_GPL(debug_locks_silent);
/*
@@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
*/
int debug_locks_off(void)
{
- if (__debug_locks_off()) {
+ if (debug_locks && __debug_locks_off()) {
if (!debug_locks_silent) {
console_verbose();
return 1;
diff --git a/lib/nlattr.c b/lib/nlattr.c
index e335bcafa9e4..d26de6156b97 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -45,12 +45,11 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
};
static int validate_nla_bitfield32(const struct nlattr *nla,
- u32 *valid_flags_allowed)
+ const u32 *valid_flags_mask)
{
const struct nla_bitfield32 *bf = nla_data(nla);
- u32 *valid_flags_mask = valid_flags_allowed;
- if (!valid_flags_allowed)
+ if (!valid_flags_mask)
return -EINVAL;
/*disallow invalid bit selector */
@@ -68,11 +67,99 @@ static int validate_nla_bitfield32(const struct nlattr *nla,
return 0;
}
+static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ const struct nlattr *entry;
+ int rem;
+
+ nla_for_each_attr(entry, head, len, rem) {
+ int ret;
+
+ if (nla_len(entry) == 0)
+ continue;
+
+ if (nla_len(entry) < NLA_HDRLEN) {
+ NL_SET_ERR_MSG_ATTR(extack, entry,
+ "Array element too short");
+ return -ERANGE;
+ }
+
+ ret = nla_validate(nla_data(entry), nla_len(entry),
+ maxtype, policy, extack);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nla_validate_int_range(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ bool validate_min, validate_max;
+ s64 value;
+
+ validate_min = pt->validation_type == NLA_VALIDATE_RANGE ||
+ pt->validation_type == NLA_VALIDATE_MIN;
+ validate_max = pt->validation_type == NLA_VALIDATE_RANGE ||
+ pt->validation_type == NLA_VALIDATE_MAX;
+
+ switch (pt->type) {
+ case NLA_U8:
+ value = nla_get_u8(nla);
+ break;
+ case NLA_U16:
+ value = nla_get_u16(nla);
+ break;
+ case NLA_U32:
+ value = nla_get_u32(nla);
+ break;
+ case NLA_S8:
+ value = nla_get_s8(nla);
+ break;
+ case NLA_S16:
+ value = nla_get_s16(nla);
+ break;
+ case NLA_S32:
+ value = nla_get_s32(nla);
+ break;
+ case NLA_S64:
+ value = nla_get_s64(nla);
+ break;
+ case NLA_U64:
+ /* treat this one specially, since it may not fit into s64 */
+ if ((validate_min && nla_get_u64(nla) < pt->min) ||
+ (validate_max && nla_get_u64(nla) > pt->max)) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "integer out of range");
+ return -ERANGE;
+ }
+ return 0;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if ((validate_min && value < pt->min) ||
+ (validate_max && value > pt->max)) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "integer out of range");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
static int validate_nla(const struct nlattr *nla, int maxtype,
- const struct nla_policy *policy)
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
const struct nla_policy *pt;
int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
+ int err = -ERANGE;
if (type <= 0 || type > maxtype)
return 0;
@@ -81,22 +168,40 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
BUG_ON(pt->type > NLA_TYPE_MAX);
- if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
+ if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) ||
+ (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) {
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
current->comm, type);
}
switch (pt->type) {
+ case NLA_EXACT_LEN:
+ if (attrlen != pt->len)
+ goto out_err;
+ break;
+
+ case NLA_REJECT:
+ if (extack && pt->validation_data) {
+ NL_SET_BAD_ATTR(extack, nla);
+ extack->_msg = pt->validation_data;
+ return -EINVAL;
+ }
+ err = -EINVAL;
+ goto out_err;
+
case NLA_FLAG:
if (attrlen > 0)
- return -ERANGE;
+ goto out_err;
break;
case NLA_BITFIELD32:
if (attrlen != sizeof(struct nla_bitfield32))
- return -ERANGE;
+ goto out_err;
- return validate_nla_bitfield32(nla, pt->validation_data);
+ err = validate_nla_bitfield32(nla, pt->validation_data);
+ if (err)
+ goto out_err;
+ break;
case NLA_NUL_STRING:
if (pt->len)
@@ -104,13 +209,15 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
else
minlen = attrlen;
- if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL)
- return -EINVAL;
+ if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) {
+ err = -EINVAL;
+ goto out_err;
+ }
/* fall through */
case NLA_STRING:
if (attrlen < 1)
- return -ERANGE;
+ goto out_err;
if (pt->len) {
char *buf = nla_data(nla);
@@ -119,32 +226,58 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
attrlen--;
if (attrlen > pt->len)
- return -ERANGE;
+ goto out_err;
}
break;
case NLA_BINARY:
if (pt->len && attrlen > pt->len)
- return -ERANGE;
+ goto out_err;
break;
- case NLA_NESTED_COMPAT:
- if (attrlen < pt->len)
- return -ERANGE;
- if (attrlen < NLA_ALIGN(pt->len))
- break;
- if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
- return -ERANGE;
- nla = nla_data(nla) + NLA_ALIGN(pt->len);
- if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
- return -ERANGE;
- break;
case NLA_NESTED:
/* a nested attributes is allowed to be empty; if its not,
* it must have a size of at least NLA_HDRLEN.
*/
if (attrlen == 0)
break;
+ if (attrlen < NLA_HDRLEN)
+ goto out_err;
+ if (pt->validation_data) {
+ err = nla_validate(nla_data(nla), nla_len(nla), pt->len,
+ pt->validation_data, extack);
+ if (err < 0) {
+ /*
+ * return directly to preserve the inner
+ * error message/attribute pointer
+ */
+ return err;
+ }
+ }
+ break;
+ case NLA_NESTED_ARRAY:
+ /* a nested array attribute is allowed to be empty; if its not,
+ * it must have a size of at least NLA_HDRLEN.
+ */
+ if (attrlen == 0)
+ break;
+ if (attrlen < NLA_HDRLEN)
+ goto out_err;
+ if (pt->validation_data) {
+ int err;
+
+ err = nla_validate_array(nla_data(nla), nla_len(nla),
+ pt->len, pt->validation_data,
+ extack);
+ if (err < 0) {
+ /*
+ * return directly to preserve the inner
+ * error message/attribute pointer
+ */
+ return err;
+ }
+ }
+ break;
default:
if (pt->len)
minlen = pt->len;
@@ -152,10 +285,34 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
minlen = nla_attr_minlen[pt->type];
if (attrlen < minlen)
- return -ERANGE;
+ goto out_err;
+ }
+
+ /* further validation */
+ switch (pt->validation_type) {
+ case NLA_VALIDATE_NONE:
+ /* nothing to do */
+ break;
+ case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_MIN:
+ case NLA_VALIDATE_MAX:
+ err = nla_validate_int_range(pt, nla, extack);
+ if (err)
+ return err;
+ break;
+ case NLA_VALIDATE_FUNCTION:
+ if (pt->validate) {
+ err = pt->validate(nla, extack);
+ if (err)
+ return err;
+ }
+ break;
}
return 0;
+out_err:
+ NL_SET_ERR_MSG_ATTR(extack, nla, "Attribute failed policy validation");
+ return err;
}
/**
@@ -180,13 +337,10 @@ int nla_validate(const struct nlattr *head, int len, int maxtype,
int rem;
nla_for_each_attr(nla, head, len, rem) {
- int err = validate_nla(nla, maxtype, policy);
+ int err = validate_nla(nla, maxtype, policy, extack);
- if (err < 0) {
- if (extack)
- extack->bad_attr = nla;
+ if (err < 0)
return err;
- }
}
return 0;
@@ -237,42 +391,63 @@ EXPORT_SYMBOL(nla_policy_len);
*
* Returns 0 on success or a negative error code.
*/
-int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
- int len, const struct nla_policy *policy,
- struct netlink_ext_ack *extack)
+static int __nla_parse(struct nlattr **tb, int maxtype,
+ const struct nlattr *head, int len,
+ bool strict, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
const struct nlattr *nla;
- int rem, err;
+ int rem;
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
nla_for_each_attr(nla, head, len, rem) {
u16 type = nla_type(nla);
- if (type > 0 && type <= maxtype) {
- if (policy) {
- err = validate_nla(nla, maxtype, policy);
- if (err < 0) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "Attribute failed policy validation");
- goto errout;
- }
+ if (type == 0 || type > maxtype) {
+ if (strict) {
+ NL_SET_ERR_MSG(extack, "Unknown attribute type");
+ return -EINVAL;
}
+ continue;
+ }
+ if (policy) {
+ int err = validate_nla(nla, maxtype, policy, extack);
- tb[type] = (struct nlattr *)nla;
+ if (err < 0)
+ return err;
}
+
+ tb[type] = (struct nlattr *)nla;
}
- if (unlikely(rem > 0))
+ if (unlikely(rem > 0)) {
pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
rem, current->comm);
+ NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
+ if (strict)
+ return -EINVAL;
+ }
- err = 0;
-errout:
- return err;
+ return 0;
+}
+
+int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ int len, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, head, len, false, policy, extack);
}
EXPORT_SYMBOL(nla_parse);
+int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ int len, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, head, len, true, policy, extack);
+}
+EXPORT_SYMBOL(nla_parse_strict);
+
/**
* nla_find - Find a specific attribute in a stream of attributes
* @head: head of attribute stream
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 9f96fa7bc000..de10b8c0bff6 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -356,11 +356,35 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
*/
void percpu_ref_reinit(struct percpu_ref *ref)
{
+ WARN_ON_ONCE(!percpu_ref_is_zero(ref));
+
+ percpu_ref_resurrect(ref);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+
+/**
+ * percpu_ref_resurrect - modify a percpu refcount from dead to live
+ * @ref: perpcu_ref to resurrect
+ *
+ * Modify @ref so that it's in the same state as before percpu_ref_kill() was
+ * called. @ref must be dead but must not yet have exited.
+ *
+ * If @ref->release() frees @ref then the caller is responsible for
+ * guaranteeing that @ref->release() does not get called while this
+ * function is in progress.
+ *
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
+ */
+void percpu_ref_resurrect(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
unsigned long flags;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- WARN_ON_ONCE(!percpu_ref_is_zero(ref));
+ WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
+ WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
percpu_ref_get(ref);
@@ -368,4 +392,4 @@ void percpu_ref_reinit(struct percpu_ref *ref)
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
-EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+EXPORT_SYMBOL_GPL(percpu_ref_resurrect);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 08d3d59dca17..aa22bcaec1dc 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6494,6 +6494,7 @@ static struct sk_buff *populate_skb(char *buf, int size)
skb->queue_mapping = SKB_QUEUE_MAP;
skb->vlan_tci = SKB_VLAN_TCI;
skb->vlan_proto = htons(ETH_P_IP);
+ dev_net_set(&dev, &init_net);
skb->dev = &dev;
skb->dev->ifindex = SKB_DEV_IFINDEX;
skb->dev->type = SKB_DEV_TYPE;
diff --git a/lib/test_ida.c b/lib/test_ida.c
index 2d1637d8136b..b06880625961 100644
--- a/lib/test_ida.c
+++ b/lib/test_ida.c
@@ -150,10 +150,10 @@ static void ida_check_conv(struct ida *ida)
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
+static DEFINE_IDA(ida);
+
static int ida_checks(void)
{
- DEFINE_IDA(ida);
-
IDA_BUG_ON(&ida, !ida_is_empty(&ida));
ida_check_alloc(&ida);
ida_check_destroy(&ida);
diff --git a/lib/udivmoddi4.c b/lib/udivmoddi4.c
new file mode 100644
index 000000000000..c08bc8a5f1cf
--- /dev/null
+++ b/lib/udivmoddi4.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
+ */
+
+#include <linux/libgcc.h>
+
+#define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz(X))
+
+#define W_TYPE_SIZE 32
+
+#define __ll_B ((unsigned long) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2))
+
+/* If we still don't have umul_ppmm, define it using plain C. */
+#if !defined(umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ unsigned long __x0, __x1, __x2, __x3; \
+ unsigned short __ul, __vl, __uh, __vh; \
+ \
+ __ul = __ll_lowpart(u); \
+ __uh = __ll_highpart(u); \
+ __vl = __ll_lowpart(v); \
+ __vh = __ll_highpart(v); \
+ \
+ __x0 = (unsigned long) __ul * __vl; \
+ __x1 = (unsigned long) __ul * __vh; \
+ __x2 = (unsigned long) __uh * __vl; \
+ __x3 = (unsigned long) __uh * __vh; \
+ \
+ __x1 += __ll_highpart(__x0); \
+ __x1 += __x2; \
+ if (__x1 < __x2) \
+ __x3 += __ll_B; \
+ \
+ (w1) = __x3 + __ll_highpart(__x1); \
+ (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
+ } while (0)
+#endif
+
+#if !defined(sub_ddmmss)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ unsigned long __x; \
+ __x = (al) - (bl); \
+ (sh) = (ah) - (bh) - (__x > (al)); \
+ (sl) = __x; \
+ } while (0)
+#endif
+
+/* Define this unconditionally, so it can be used for debugging. */
+#define __udiv_qrnnd_c(q, r, n1, n0, d) \
+ do { \
+ unsigned long __d1, __d0, __q1, __q0; \
+ unsigned long __r1, __r0, __m; \
+ __d1 = __ll_highpart(d); \
+ __d0 = __ll_lowpart(d); \
+ \
+ __r1 = (n1) % __d1; \
+ __q1 = (n1) / __d1; \
+ __m = (unsigned long) __q1 * __d0; \
+ __r1 = __r1 * __ll_B | __ll_highpart(n0); \
+ if (__r1 < __m) { \
+ __q1--, __r1 += (d); \
+ if (__r1 >= (d)) \
+ if (__r1 < __m) \
+ __q1--, __r1 += (d); \
+ } \
+ __r1 -= __m; \
+ \
+ __r0 = __r1 % __d1; \
+ __q0 = __r1 / __d1; \
+ __m = (unsigned long) __q0 * __d0; \
+ __r0 = __r0 * __ll_B | __ll_lowpart(n0); \
+ if (__r0 < __m) { \
+ __q0--, __r0 += (d); \
+ if (__r0 >= (d)) \
+ if (__r0 < __m) \
+ __q0--, __r0 += (d); \
+ } \
+ __r0 -= __m; \
+ \
+ (q) = (unsigned long) __q1 * __ll_B | __q0; \
+ (r) = __r0; \
+ } while (0)
+
+/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
+#if !defined(udiv_qrnnd)
+#define UDIV_NEEDS_NORMALIZATION 1
+#define udiv_qrnnd __udiv_qrnnd_c
+#endif
+
+unsigned long long __udivmoddi4(unsigned long long u, unsigned long long v,
+ unsigned long long *rp)
+{
+ const DWunion nn = {.ll = u };
+ const DWunion dd = {.ll = v };
+ DWunion rr, ww;
+ unsigned long d0, d1, n0, n1, n2;
+ unsigned long q0 = 0, q1 = 0;
+ unsigned long b, bm;
+
+ d0 = dd.s.low;
+ d1 = dd.s.high;
+ n0 = nn.s.low;
+ n1 = nn.s.high;
+
+#if !UDIV_NEEDS_NORMALIZATION
+
+ if (d1 == 0) {
+ if (d0 > n1) {
+ /* 0q = nn / 0D */
+
+ udiv_qrnnd(q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0. */
+ } else {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ /* Divide intentionally by zero. */
+ d0 = 1 / d0;
+
+ udiv_qrnnd(q1, n1, 0, n1, d0);
+ udiv_qrnnd(q0, n0, n1, n0, d0);
+
+ /* Remainder in n0. */
+ }
+
+ if (rp != 0) {
+ rr.s.low = n0;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+
+#else /* UDIV_NEEDS_NORMALIZATION */
+
+ if (d1 == 0) {
+ if (d0 > n1) {
+ /* 0q = nn / 0D */
+
+ count_leading_zeros(bm, d0);
+
+ if (bm != 0) {
+ /*
+ * Normalize, i.e. make the most significant bit
+ * of the denominator set.
+ */
+
+ d0 = d0 << bm;
+ n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
+ n0 = n0 << bm;
+ }
+
+ udiv_qrnnd(q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0 >> bm. */
+ } else {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ /* Divide intentionally by zero. */
+ d0 = 1 / d0;
+
+ count_leading_zeros(bm, d0);
+
+ if (bm == 0) {
+ /*
+ * From (n1 >= d0) /\ (the most significant bit
+ * of d0 is set), conclude (the most significant
+ * bit of n1 is set) /\ (theleading quotient
+ * digit q1 = 1).
+ *
+ * This special case is necessary, not an
+ * optimization. (Shifts counts of W_TYPE_SIZE
+ * are undefined.)
+ */
+
+ n1 -= d0;
+ q1 = 1;
+ } else {
+ /* Normalize. */
+
+ b = W_TYPE_SIZE - bm;
+
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd(q1, n1, n2, n1, d0);
+ }
+
+ /* n1 != d0... */
+
+ udiv_qrnnd(q0, n0, n1, n0, d0);
+
+ /* Remainder in n0 >> bm. */
+ }
+
+ if (rp != 0) {
+ rr.s.low = n0 >> bm;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+
+#endif /* UDIV_NEEDS_NORMALIZATION */
+
+ } else {
+ if (d1 > n1) {
+ /* 00 = nn / DD */
+
+ q0 = 0;
+ q1 = 0;
+
+ /* Remainder in n1n0. */
+ if (rp != 0) {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ } else {
+ /* 0q = NN / dd */
+
+ count_leading_zeros(bm, d1);
+ if (bm == 0) {
+ /*
+ * From (n1 >= d1) /\ (the most significant bit
+ * of d1 is set), conclude (the most significant
+ * bit of n1 is set) /\ (the quotient digit q0 =
+ * 0 or 1).
+ *
+ * This special case is necessary, not an
+ * optimization.
+ */
+
+ /*
+ * The condition on the next line takes
+ * advantage of that n1 >= d1 (true due to
+ * program flow).
+ */
+ if (n1 > d1 || n0 >= d0) {
+ q0 = 1;
+ sub_ddmmss(n1, n0, n1, n0, d1, d0);
+ } else {
+ q0 = 0;
+ }
+
+ q1 = 0;
+
+ if (rp != 0) {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ } else {
+ unsigned long m1, m0;
+ /* Normalize. */
+
+ b = W_TYPE_SIZE - bm;
+
+ d1 = (d1 << bm) | (d0 >> b);
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd(q0, n1, n2, n1, d1);
+ umul_ppmm(m1, m0, q0, d0);
+
+ if (m1 > n1 || (m1 == n1 && m0 > n0)) {
+ q0--;
+ sub_ddmmss(m1, m0, m1, m0, d1, d0);
+ }
+
+ q1 = 0;
+
+ /* Remainder in (n1n0 - m1m0) >> bm. */
+ if (rp != 0) {
+ sub_ddmmss(n1, n0, n1, n0, m1, m0);
+ rr.s.low = (n1 << b) | (n0 >> bm);
+ rr.s.high = n1 >> bm;
+ *rp = rr.ll;
+ }
+ }
+ }
+ }
+
+ ww.s.low = q0;
+ ww.s.high = q1;
+
+ return ww.ll;
+}
diff --git a/lib/umoddi3.c b/lib/umoddi3.c
new file mode 100644
index 000000000000..d7bbf0f85197
--- /dev/null
+++ b/lib/umoddi3.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/libgcc.h>
+
+extern unsigned long long __udivmoddi4(unsigned long long u,
+ unsigned long long v,
+ unsigned long long *rp);
+
+unsigned long long __umoddi3(unsigned long long u, unsigned long long v)
+{
+ unsigned long long w;
+ (void)__udivmoddi4(u, v, &w);
+ return w;
+}
+EXPORT_SYMBOL(__umoddi3);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index d5b3a3f95c01..ad4fbe5bc730 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -613,6 +613,109 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)
}
static noinline_for_stack
+char *pointer_string(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ spec.base = 16;
+ spec.flags |= SMALL;
+ if (spec.field_width == -1) {
+ spec.field_width = 2 * sizeof(ptr);
+ spec.flags |= ZEROPAD;
+ }
+
+ return number(buf, end, (unsigned long int)ptr, spec);
+}
+
+/* Make pointers available for printing early in the boot sequence. */
+static int debug_boot_weak_hash __ro_after_init;
+
+static int __init debug_boot_weak_hash_enable(char *str)
+{
+ debug_boot_weak_hash = 1;
+ pr_info("debug_boot_weak_hash enabled\n");
+ return 0;
+}
+early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
+
+static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
+static siphash_key_t ptr_key __read_mostly;
+
+static void enable_ptr_key_workfn(struct work_struct *work)
+{
+ get_random_bytes(&ptr_key, sizeof(ptr_key));
+ /* Needs to run from preemptible context */
+ static_branch_disable(&not_filled_random_ptr_key);
+}
+
+static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+ /* This may be in an interrupt handler. */
+ queue_work(system_unbound_wq, &enable_ptr_key_work);
+}
+
+static struct random_ready_callback random_ready = {
+ .func = fill_random_ptr_key
+};
+
+static int __init initialize_ptr_random(void)
+{
+ int key_size = sizeof(ptr_key);
+ int ret;
+
+ /* Use hw RNG if available. */
+ if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
+ static_branch_disable(&not_filled_random_ptr_key);
+ return 0;
+ }
+
+ ret = add_random_ready_callback(&random_ready);
+ if (!ret) {
+ return 0;
+ } else if (ret == -EALREADY) {
+ /* This is in preemptible context */
+ enable_ptr_key_workfn(&enable_ptr_key_work);
+ return 0;
+ }
+
+ return ret;
+}
+early_initcall(initialize_ptr_random);
+
+/* Maps a pointer to a 32 bit unique identifier. */
+static char *ptr_to_id(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
+ unsigned long hashval;
+
+ /* When debugging early boot use non-cryptographically secure hash. */
+ if (unlikely(debug_boot_weak_hash)) {
+ hashval = hash_long((unsigned long)ptr, 32);
+ return pointer_string(buf, end, (const void *)hashval, spec);
+ }
+
+ if (static_branch_unlikely(&not_filled_random_ptr_key)) {
+ spec.field_width = 2 * sizeof(ptr);
+ /* string length must be less than default_width */
+ return string(buf, end, str, spec);
+ }
+
+#ifdef CONFIG_64BIT
+ hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+ /*
+ * Mask off the first 32 bits, this makes explicit that we have
+ * modified the address (and 32 bits is plenty for a unique ID).
+ */
+ hashval = hashval & 0xffffffff;
+#else
+ hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+ return pointer_string(buf, end, (const void *)hashval, spec);
+}
+
+static noinline_for_stack
char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
const char *fmt)
{
@@ -1357,20 +1460,6 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
return string(buf, end, uuid, spec);
}
-static noinline_for_stack
-char *pointer_string(char *buf, char *end, const void *ptr,
- struct printf_spec spec)
-{
- spec.base = 16;
- spec.flags |= SMALL;
- if (spec.field_width == -1) {
- spec.field_width = 2 * sizeof(ptr);
- spec.flags |= ZEROPAD;
- }
-
- return number(buf, end, (unsigned long int)ptr, spec);
-}
-
int kptr_restrict __read_mostly;
static noinline_for_stack
@@ -1421,7 +1510,8 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
}
static noinline_for_stack
-char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
+char *netdev_bits(char *buf, char *end, const void *addr,
+ struct printf_spec spec, const char *fmt)
{
unsigned long long num;
int size;
@@ -1432,9 +1522,7 @@ char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
size = sizeof(netdev_features_t);
break;
default:
- num = (unsigned long)addr;
- size = sizeof(unsigned long);
- break;
+ return ptr_to_id(buf, end, addr, spec);
}
return special_hex_number(buf, end, num, size);
@@ -1474,7 +1562,7 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
#ifdef CONFIG_COMMON_CLK
return string(buf, end, __clk_get_name(clk), spec);
#else
- return special_hex_number(buf, end, (unsigned long)clk, sizeof(unsigned long));
+ return ptr_to_id(buf, end, clk, spec);
#endif
}
}
@@ -1651,94 +1739,6 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
return widen_string(buf, buf - buf_start, end, spec);
}
-/* Make pointers available for printing early in the boot sequence. */
-static int debug_boot_weak_hash __ro_after_init;
-
-static int __init debug_boot_weak_hash_enable(char *str)
-{
- debug_boot_weak_hash = 1;
- pr_info("debug_boot_weak_hash enabled\n");
- return 0;
-}
-early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
-
-static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
-static siphash_key_t ptr_key __read_mostly;
-
-static void enable_ptr_key_workfn(struct work_struct *work)
-{
- get_random_bytes(&ptr_key, sizeof(ptr_key));
- /* Needs to run from preemptible context */
- static_branch_disable(&not_filled_random_ptr_key);
-}
-
-static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
-
-static void fill_random_ptr_key(struct random_ready_callback *unused)
-{
- /* This may be in an interrupt handler. */
- queue_work(system_unbound_wq, &enable_ptr_key_work);
-}
-
-static struct random_ready_callback random_ready = {
- .func = fill_random_ptr_key
-};
-
-static int __init initialize_ptr_random(void)
-{
- int key_size = sizeof(ptr_key);
- int ret;
-
- /* Use hw RNG if available. */
- if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
- static_branch_disable(&not_filled_random_ptr_key);
- return 0;
- }
-
- ret = add_random_ready_callback(&random_ready);
- if (!ret) {
- return 0;
- } else if (ret == -EALREADY) {
- /* This is in preemptible context */
- enable_ptr_key_workfn(&enable_ptr_key_work);
- return 0;
- }
-
- return ret;
-}
-early_initcall(initialize_ptr_random);
-
-/* Maps a pointer to a 32 bit unique identifier. */
-static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
-{
- const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
- unsigned long hashval;
-
- /* When debugging early boot use non-cryptographically secure hash. */
- if (unlikely(debug_boot_weak_hash)) {
- hashval = hash_long((unsigned long)ptr, 32);
- return pointer_string(buf, end, (const void *)hashval, spec);
- }
-
- if (static_branch_unlikely(&not_filled_random_ptr_key)) {
- spec.field_width = 2 * sizeof(ptr);
- /* string length must be less than default_width */
- return string(buf, end, str, spec);
- }
-
-#ifdef CONFIG_64BIT
- hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
- /*
- * Mask off the first 32 bits, this makes explicit that we have
- * modified the address (and 32 bits is plenty for a unique ID).
- */
- hashval = hashval & 0xffffffff;
-#else
- hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
-#endif
- return pointer_string(buf, end, (const void *)hashval, spec);
-}
-
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -1833,17 +1833,15 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
* p page flags (see struct page) given as pointer to unsigned long
* g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t
* v vma flags (VM_*) given as pointer to unsigned long
- * - 'O' For a kobject based struct. Must be one of the following:
- * - 'OF[fnpPcCF]' For a device tree object
- * Without any optional arguments prints the full_name
- * f device node full_name
- * n device node name
- * p device node phandle
- * P device node path spec (name + @unit)
- * F device node flags
- * c major compatible string
- * C full compatible string
- *
+ * - 'OF[fnpPcCF]' For a device tree object
+ * Without any optional arguments prints the full_name
+ * f device node full_name
+ * n device node name
+ * p device node phandle
+ * P device node path spec (name + @unit)
+ * F device node flags
+ * c major compatible string
+ * C full compatible string
* - 'x' For printing the address. Equivalent to "%lx".
*
* ** When making changes please also update:
@@ -1944,7 +1942,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
break;
return restricted_pointer(buf, end, ptr, spec);
case 'N':
- return netdev_bits(buf, end, ptr, fmt);
+ return netdev_bits(buf, end, ptr, spec, fmt);
case 'a':
return address_val(buf, end, ptr, fmt);
case 'd':
@@ -2794,7 +2792,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
copy = end - str;
memcpy(str, args, copy);
str += len;
- args += len;
+ args += len + 1;
}
}
if (process)
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 25a5d87e2e4c..912aae5fa09e 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -15,7 +15,6 @@
* but they are bigger and use more memory for the lookup table.
*/
-#include <linux/crc32poly.h>
#include "xz_private.h"
/*
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index 482b90f363fe..09360ebb510e 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -102,6 +102,10 @@
# endif
#endif
+#ifndef CRC32_POLY_LE
+#define CRC32_POLY_LE 0xedb88320
+#endif
+
/*
* Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
* before calling xz_dec_lzma2_run().
diff --git a/mm/Kconfig b/mm/Kconfig
index a550635ea5c3..de64ea658716 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -637,6 +637,7 @@ config DEFERRED_STRUCT_PAGE_INIT
depends on NO_BOOTMEM
depends on SPARSEMEM
depends on !NEED_PER_CPU_KM
+ depends on 64BIT
help
Ordinarily all struct pages are initialised during early boot in a
single thread. On very large machines this can take a considerable
diff --git a/mm/Makefile b/mm/Makefile
index 26ef77a3883b..6485d5745dd7 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -23,9 +23,9 @@ KCOV_INSTRUMENT_vmstat.o := n
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
- mlock.o mmap.o mprotect.o mremap.o msync.o \
- page_vma_mapped.o pagewalk.o pgtable-generic.o \
- rmap.o vmalloc.o
+ mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
+ msync.o page_vma_mapped.o pagewalk.o \
+ pgtable-generic.o rmap.o vmalloc.o
ifdef CONFIG_CROSS_MEMORY_ATTACH
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
index 6a473709e9b6..7405c9d89d65 100644
--- a/mm/gup_benchmark.c
+++ b/mm/gup_benchmark.c
@@ -19,7 +19,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
struct gup_benchmark *gup)
{
ktime_t start_time, end_time;
- unsigned long i, nr, nr_pages, addr, next;
+ unsigned long i, nr_pages, addr, next;
+ int nr;
struct page **pages;
nr_pages = gup->size / PAGE_SIZE;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 533f9b00147d..deed97fba979 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1780,7 +1780,7 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
+ pmd_t *old_pmd, pmd_t *new_pmd)
{
spinlock_t *old_ptl, *new_ptl;
pmd_t pmd;
@@ -1811,7 +1811,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
- if (pmd_present(pmd) && pmd_dirty(pmd))
+ if (pmd_present(pmd))
force_flush = true;
VM_BUG_ON(!pmd_none(*new_pmd));
@@ -1822,12 +1822,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
}
pmd = move_soft_dirty_pmd(pmd);
set_pmd_at(mm, new_addr, new_pmd, pmd);
- if (new_ptl != old_ptl)
- spin_unlock(new_ptl);
if (force_flush)
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
- else
- *need_flush = true;
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
spin_unlock(old_ptl);
return true;
}
@@ -2885,9 +2883,6 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
if (!(pvmw->pmd && !pvmw->pte))
return;
- mmu_notifier_invalidate_range_start(mm, address,
- address + HPAGE_PMD_SIZE);
-
flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
pmdval = *pvmw->pmd;
pmdp_invalidate(vma, address, pvmw->pmd);
@@ -2900,9 +2895,6 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
page_remove_rmap(page, true);
put_page(page);
-
- mmu_notifier_invalidate_range_end(mm, address,
- address + HPAGE_PMD_SIZE);
}
void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
@@ -2931,7 +2923,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
else
page_add_file_rmap(new, true);
set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
- if (vma->vm_flags & VM_LOCKED)
+ if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
mlock_vma_page(new);
update_mmu_cache_pmd(vma, address, pvmw->pmd);
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3c21775f196b..5c390f5a5207 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3326,8 +3326,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct page *page;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
- const unsigned long mmun_start = start; /* For mmu_notifiers */
- const unsigned long mmun_end = end; /* For mmu_notifiers */
+ unsigned long mmun_start = start; /* For mmu_notifiers */
+ unsigned long mmun_end = end; /* For mmu_notifiers */
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
@@ -3339,6 +3339,11 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
*/
tlb_remove_check_page_size_change(tlb, sz);
tlb_start_vma(tlb, vma);
+
+ /*
+ * If sharing possible, alert mmu notifiers of worst case.
+ */
+ adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
address = start;
for (; address < end; address += sz) {
@@ -3349,6 +3354,10 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
ptl = huge_pte_lock(h, mm, ptep);
if (huge_pmd_unshare(mm, &address, ptep)) {
spin_unlock(ptl);
+ /*
+ * We just unmapped a page of PMDs by clearing a PUD.
+ * The caller's TLB flush range should cover this area.
+ */
continue;
}
@@ -3431,12 +3440,23 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
{
struct mm_struct *mm;
struct mmu_gather tlb;
+ unsigned long tlb_start = start;
+ unsigned long tlb_end = end;
+
+ /*
+ * If shared PMDs were possibly used within this vma range, adjust
+ * start/end for worst case tlb flushing.
+ * Note that we can not be sure if PMDs are shared until we try to
+ * unmap pages. However, we want to make sure TLB flushing covers
+ * the largest possible range.
+ */
+ adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
mm = vma->vm_mm;
- tlb_gather_mmu(&tlb, mm, start, end);
+ tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
- tlb_finish_mmu(&tlb, start, end);
+ tlb_finish_mmu(&tlb, tlb_start, tlb_end);
}
/*
@@ -4298,11 +4318,21 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
pte_t pte;
struct hstate *h = hstate_vma(vma);
unsigned long pages = 0;
+ unsigned long f_start = start;
+ unsigned long f_end = end;
+ bool shared_pmd = false;
+
+ /*
+ * In the case of shared PMDs, the area to flush could be beyond
+ * start/end. Set f_start/f_end to cover the maximum possible
+ * range if PMD sharing is possible.
+ */
+ adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
BUG_ON(address >= end);
- flush_cache_range(vma, address, end);
+ flush_cache_range(vma, f_start, f_end);
- mmu_notifier_invalidate_range_start(mm, start, end);
+ mmu_notifier_invalidate_range_start(mm, f_start, f_end);
i_mmap_lock_write(vma->vm_file->f_mapping);
for (; address < end; address += huge_page_size(h)) {
spinlock_t *ptl;
@@ -4313,6 +4343,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
if (huge_pmd_unshare(mm, &address, ptep)) {
pages++;
spin_unlock(ptl);
+ shared_pmd = true;
continue;
}
pte = huge_ptep_get(ptep);
@@ -4348,9 +4379,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
* may have cleared our pud entry and done put_page on the page table:
* once we release i_mmap_rwsem, another task can do the final put_page
- * and that page table be reused and filled with junk.
+ * and that page table be reused and filled with junk. If we actually
+ * did unshare a page of pmds, flush the range corresponding to the pud.
*/
- flush_hugetlb_tlb_range(vma, start, end);
+ if (shared_pmd)
+ flush_hugetlb_tlb_range(vma, f_start, f_end);
+ else
+ flush_hugetlb_tlb_range(vma, start, end);
/*
* No need to call mmu_notifier_invalidate_range() we are downgrading
* page table protection not changing it to point to a new page.
@@ -4358,7 +4393,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* See Documentation/vm/mmu_notifier.rst
*/
i_mmap_unlock_write(vma->vm_file->f_mapping);
- mmu_notifier_invalidate_range_end(mm, start, end);
+ mmu_notifier_invalidate_range_end(mm, f_start, f_end);
return pages << h->order;
}
@@ -4545,13 +4580,41 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
/*
* check on proper vm_flags and page table alignment
*/
- if (vma->vm_flags & VM_MAYSHARE &&
- vma->vm_start <= base && end <= vma->vm_end)
+ if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
return true;
return false;
}
/*
+ * Determine if start,end range within vma could be mapped by shared pmd.
+ * If yes, adjust start and end to cover range associated with possible
+ * shared pmd mappings.
+ */
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ unsigned long check_addr = *start;
+
+ if (!(vma->vm_flags & VM_MAYSHARE))
+ return;
+
+ for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
+ unsigned long a_start = check_addr & PUD_MASK;
+ unsigned long a_end = a_start + PUD_SIZE;
+
+ /*
+ * If sharing is possible, adjust start/end if necessary.
+ */
+ if (range_in_vma(vma, a_start, a_end)) {
+ if (a_start < *start)
+ *start = a_start;
+ if (a_end > *end)
+ *end = a_end;
+ }
+ }
+}
+
+/*
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
* and returns the corresponding pte. While this is not necessary for the
* !shared pmd case because we can allocate the pmd later as well, it makes the
@@ -4648,6 +4711,11 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
+
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
#define want_pmd_share() (0)
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
diff --git a/mm/maccess.c b/mm/maccess.c
index ec00be51a24f..f3416632e5a4 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -30,8 +30,10 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
set_fs(KERNEL_DS);
pagefault_disable();
+ current->kernel_uaccess_faults_ok++;
ret = __copy_from_user_inatomic(dst,
(__force const void __user *)src, size);
+ current->kernel_uaccess_faults_ok--;
pagefault_enable();
set_fs(old_fs);
@@ -58,7 +60,9 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
set_fs(KERNEL_DS);
pagefault_disable();
+ current->kernel_uaccess_faults_ok++;
ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
+ current->kernel_uaccess_faults_ok--;
pagefault_enable();
set_fs(old_fs);
@@ -94,11 +98,13 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
set_fs(KERNEL_DS);
pagefault_disable();
+ current->kernel_uaccess_faults_ok++;
do {
ret = __get_user(*dst++, (const char __user __force *)src++);
} while (dst[-1] && ret == 0 && src - unsafe_addr < count);
+ current->kernel_uaccess_faults_ok--;
dst[-1] = '\0';
pagefault_enable();
set_fs(old_fs);
diff --git a/mm/madvise.c b/mm/madvise.c
index 972a9eaa898b..71d21df2a3f3 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -96,7 +96,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
new_flags |= VM_DONTDUMP;
break;
case MADV_DODUMP:
- if (new_flags & VM_SPECIAL) {
+ if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
error = -EINVAL;
goto out;
}
diff --git a/mm/memory.c b/mm/memory.c
index c467102a5cbc..21a5e6e4758b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -186,253 +186,6 @@ static void check_sync_rss_stat(struct task_struct *task)
#endif /* SPLIT_RSS_COUNTING */
-#ifdef HAVE_GENERIC_MMU_GATHER
-
-static bool tlb_next_batch(struct mmu_gather *tlb)
-{
- struct mmu_gather_batch *batch;
-
- batch = tlb->active;
- if (batch->next) {
- tlb->active = batch->next;
- return true;
- }
-
- if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
- return false;
-
- batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
- if (!batch)
- return false;
-
- tlb->batch_count++;
- batch->next = NULL;
- batch->nr = 0;
- batch->max = MAX_GATHER_BATCH;
-
- tlb->active->next = batch;
- tlb->active = batch;
-
- return true;
-}
-
-void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
-
- /* Is it from 0 to ~0? */
- tlb->fullmm = !(start | (end+1));
- tlb->need_flush_all = 0;
- tlb->local.next = NULL;
- tlb->local.nr = 0;
- tlb->local.max = ARRAY_SIZE(tlb->__pages);
- tlb->active = &tlb->local;
- tlb->batch_count = 0;
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb->batch = NULL;
-#endif
- tlb->page_size = 0;
-
- __tlb_reset_range(tlb);
-}
-
-static void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- struct mmu_gather_batch *batch;
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb_table_flush(tlb);
-#endif
- for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
- free_pages_and_swap_cache(batch->pages, batch->nr);
- batch->nr = 0;
- }
- tlb->active = &tlb->local;
-}
-
-void tlb_flush_mmu(struct mmu_gather *tlb)
-{
- tlb_flush_mmu_tlbonly(tlb);
- tlb_flush_mmu_free(tlb);
-}
-
-/* tlb_finish_mmu
- * Called at the end of the shootdown operation to free up any resources
- * that were required.
- */
-void arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
-{
- struct mmu_gather_batch *batch, *next;
-
- if (force)
- __tlb_adjust_range(tlb, start, end - start);
-
- tlb_flush_mmu(tlb);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-
- for (batch = tlb->local.next; batch; batch = next) {
- next = batch->next;
- free_pages((unsigned long)batch, 0);
- }
- tlb->local.next = NULL;
-}
-
-/* __tlb_remove_page
- * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
- * handling the additional races in SMP caused by other CPUs caching valid
- * mappings in their TLBs. Returns the number of free page slots left.
- * When out of page slots we must call tlb_flush_mmu().
- *returns true if the caller should flush.
- */
-bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
-{
- struct mmu_gather_batch *batch;
-
- VM_BUG_ON(!tlb->end);
- VM_WARN_ON(tlb->page_size != page_size);
-
- batch = tlb->active;
- /*
- * Add the page and check if we are full. If so
- * force a flush.
- */
- batch->pages[batch->nr++] = page;
- if (batch->nr == batch->max) {
- if (!tlb_next_batch(tlb))
- return true;
- batch = tlb->active;
- }
- VM_BUG_ON_PAGE(batch->nr > batch->max, page);
-
- return false;
-}
-
-#endif /* HAVE_GENERIC_MMU_GATHER */
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-
-/*
- * See the comment near struct mmu_table_batch.
- */
-
-/*
- * If we want tlb_remove_table() to imply TLB invalidates.
- */
-static inline void tlb_table_invalidate(struct mmu_gather *tlb)
-{
-#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
- /*
- * Invalidate page-table caches used by hardware walkers. Then we still
- * need to RCU-sched wait while freeing the pages because software
- * walkers can still be in-flight.
- */
- tlb_flush_mmu_tlbonly(tlb);
-#endif
-}
-
-static void tlb_remove_table_smp_sync(void *arg)
-{
- /* Simply deliver the interrupt */
-}
-
-static void tlb_remove_table_one(void *table)
-{
- /*
- * This isn't an RCU grace period and hence the page-tables cannot be
- * assumed to be actually RCU-freed.
- *
- * It is however sufficient for software page-table walkers that rely on
- * IRQ disabling. See the comment near struct mmu_table_batch.
- */
- smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
- __tlb_remove_table(table);
-}
-
-static void tlb_remove_table_rcu(struct rcu_head *head)
-{
- struct mmu_table_batch *batch;
- int i;
-
- batch = container_of(head, struct mmu_table_batch, rcu);
-
- for (i = 0; i < batch->nr; i++)
- __tlb_remove_table(batch->tables[i]);
-
- free_page((unsigned long)batch);
-}
-
-void tlb_table_flush(struct mmu_gather *tlb)
-{
- struct mmu_table_batch **batch = &tlb->batch;
-
- if (*batch) {
- tlb_table_invalidate(tlb);
- call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
- *batch = NULL;
- }
-}
-
-void tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
- struct mmu_table_batch **batch = &tlb->batch;
-
- if (*batch == NULL) {
- *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
- if (*batch == NULL) {
- tlb_table_invalidate(tlb);
- tlb_remove_table_one(table);
- return;
- }
- (*batch)->nr = 0;
- }
-
- (*batch)->tables[(*batch)->nr++] = table;
- if ((*batch)->nr == MAX_TABLE_BATCH)
- tlb_table_flush(tlb);
-}
-
-#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
-
-/**
- * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
- * @tlb: the mmu_gather structure to initialize
- * @mm: the mm_struct of the target address space
- * @start: start of the region that will be removed from the page-table
- * @end: end of the region that will be removed from the page-table
- *
- * Called to initialize an (on-stack) mmu_gather structure for page-table
- * tear-down from @mm. The @start and @end are set to 0 and -1
- * respectively when @mm is without users and we're going to destroy
- * the full address space (exit/execve).
- */
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- arch_tlb_gather_mmu(tlb, mm, start, end);
- inc_tlb_flush_pending(tlb->mm);
-}
-
-void tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end)
-{
- /*
- * If there are parallel threads are doing PTE changes on same range
- * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
- * flush by batching, a thread has stable TLB entry can fail to flush
- * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
- * forcefully if we detect parallel PTE batching threads.
- */
- bool force = mm_tlb_flush_nested(tlb->mm);
-
- arch_tlb_finish_mmu(tlb, start, end, force);
- dec_tlb_flush_pending(tlb->mm);
-}
-
/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
diff --git a/mm/migrate.c b/mm/migrate.c
index d6a2e89b086a..84381b55b2bd 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -275,6 +275,9 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
mlock_vma_page(new);
+ if (PageTransHuge(page) && PageMlocked(page))
+ clear_page_mlock(page);
+
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, pvmw.address, pvmw.pte);
}
@@ -1411,7 +1414,7 @@ retry:
* we encounter them after the rest of the list
* is processed.
*/
- if (PageTransHuge(page)) {
+ if (PageTransHuge(page) && !PageHuge(page)) {
lock_page(page);
rc = split_huge_page_to_list(page, from);
unlock_page(page);
@@ -1855,46 +1858,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
return newpage;
}
-/*
- * page migration rate limiting control.
- * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
- * window of time. Default here says do not migrate more than 1280M per second.
- */
-static unsigned int migrate_interval_millisecs __read_mostly = 100;
-static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
-
-/* Returns true if the node is migrate rate-limited after the update */
-static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
- unsigned long nr_pages)
-{
- /*
- * Rate-limit the amount of data that is being migrated to a node.
- * Optimal placement is no good if the memory bus is saturated and
- * all the time is being spent migrating!
- */
- if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
- spin_lock(&pgdat->numabalancing_migrate_lock);
- pgdat->numabalancing_migrate_nr_pages = 0;
- pgdat->numabalancing_migrate_next_window = jiffies +
- msecs_to_jiffies(migrate_interval_millisecs);
- spin_unlock(&pgdat->numabalancing_migrate_lock);
- }
- if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
- trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
- nr_pages);
- return true;
- }
-
- /*
- * This is an unlocked non-atomic update so errors are possible.
- * The consequences are failing to migrate when we potentiall should
- * have which is not severe enough to warrant locking. If it is ever
- * a problem, it can be converted to a per-cpu counter.
- */
- pgdat->numabalancing_migrate_nr_pages += nr_pages;
- return false;
-}
-
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
{
int page_lru;
@@ -1967,14 +1930,6 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
if (page_is_file_cache(page) && PageDirty(page))
goto out;
- /*
- * Rate-limit the amount of data that is being migrated to a node.
- * Optimal placement is no good if the memory bus is saturated and
- * all the time is being spent migrating!
- */
- if (numamigrate_update_ratelimit(pgdat, 1))
- goto out;
-
isolated = numamigrate_isolate_page(pgdat, page);
if (!isolated)
goto out;
@@ -2021,14 +1976,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
unsigned long mmun_start = address & HPAGE_PMD_MASK;
unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
- /*
- * Rate-limit the amount of data that is being migrated to a node.
- * Optimal placement is no good if the memory bus is saturated and
- * all the time is being spent migrating!
- */
- if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
- goto out_dropref;
-
new_page = alloc_pages_node(node,
(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
HPAGE_PMD_ORDER);
@@ -2125,7 +2072,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
out_fail:
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
-out_dropref:
ptl = pmd_lock(mm, pmd);
if (pmd_same(*pmd, entry)) {
entry = pmd_modify(entry, vma->vm_page_prot);
diff --git a/mm/mmap.c b/mm/mmap.c
index 5f2b2b184c60..f7cd9cb966c0 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1410,7 +1410,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
if (flags & MAP_FIXED_NOREPLACE) {
struct vm_area_struct *vma = find_vma(mm, addr);
- if (vma && vma->vm_start <= addr)
+ if (vma && vma->vm_start < addr + len)
return -EEXIST;
}
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
new file mode 100644
index 000000000000..2a9fbc4a37d5
--- /dev/null
+++ b/mm/mmu_gather.c
@@ -0,0 +1,261 @@
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mmdebug.h>
+#include <linux/mm_types.h>
+#include <linux/pagemap.h>
+#include <linux/rcupdate.h>
+#include <linux/smp.h>
+#include <linux/swap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+
+#ifdef HAVE_GENERIC_MMU_GATHER
+
+static bool tlb_next_batch(struct mmu_gather *tlb)
+{
+ struct mmu_gather_batch *batch;
+
+ batch = tlb->active;
+ if (batch->next) {
+ tlb->active = batch->next;
+ return true;
+ }
+
+ if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
+ return false;
+
+ batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+ if (!batch)
+ return false;
+
+ tlb->batch_count++;
+ batch->next = NULL;
+ batch->nr = 0;
+ batch->max = MAX_GATHER_BATCH;
+
+ tlb->active->next = batch;
+ tlb->active = batch;
+
+ return true;
+}
+
+void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ tlb->mm = mm;
+
+ /* Is it from 0 to ~0? */
+ tlb->fullmm = !(start | (end+1));
+ tlb->need_flush_all = 0;
+ tlb->local.next = NULL;
+ tlb->local.nr = 0;
+ tlb->local.max = ARRAY_SIZE(tlb->__pages);
+ tlb->active = &tlb->local;
+ tlb->batch_count = 0;
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ tlb->batch = NULL;
+#endif
+ tlb->page_size = 0;
+
+ __tlb_reset_range(tlb);
+}
+
+void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+ struct mmu_gather_batch *batch;
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ tlb_table_flush(tlb);
+#endif
+ for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
+ free_pages_and_swap_cache(batch->pages, batch->nr);
+ batch->nr = 0;
+ }
+ tlb->active = &tlb->local;
+}
+
+void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+ tlb_flush_mmu_tlbonly(tlb);
+ tlb_flush_mmu_free(tlb);
+}
+
+/* tlb_finish_mmu
+ * Called at the end of the shootdown operation to free up any resources
+ * that were required.
+ */
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
+{
+ struct mmu_gather_batch *batch, *next;
+
+ if (force) {
+ __tlb_reset_range(tlb);
+ __tlb_adjust_range(tlb, start, end - start);
+ }
+
+ tlb_flush_mmu(tlb);
+
+ /* keep the page table cache within bounds */
+ check_pgt_cache();
+
+ for (batch = tlb->local.next; batch; batch = next) {
+ next = batch->next;
+ free_pages((unsigned long)batch, 0);
+ }
+ tlb->local.next = NULL;
+}
+
+/* __tlb_remove_page
+ * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
+ * handling the additional races in SMP caused by other CPUs caching valid
+ * mappings in their TLBs. Returns the number of free page slots left.
+ * When out of page slots we must call tlb_flush_mmu().
+ *returns true if the caller should flush.
+ */
+bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
+{
+ struct mmu_gather_batch *batch;
+
+ VM_BUG_ON(!tlb->end);
+ VM_WARN_ON(tlb->page_size != page_size);
+
+ batch = tlb->active;
+ /*
+ * Add the page and check if we are full. If so
+ * force a flush.
+ */
+ batch->pages[batch->nr++] = page;
+ if (batch->nr == batch->max) {
+ if (!tlb_next_batch(tlb))
+ return true;
+ batch = tlb->active;
+ }
+ VM_BUG_ON_PAGE(batch->nr > batch->max, page);
+
+ return false;
+}
+
+#endif /* HAVE_GENERIC_MMU_GATHER */
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+
+/*
+ * See the comment near struct mmu_table_batch.
+ */
+
+/*
+ * If we want tlb_remove_table() to imply TLB invalidates.
+ */
+static inline void tlb_table_invalidate(struct mmu_gather *tlb)
+{
+#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
+ /*
+ * Invalidate page-table caches used by hardware walkers. Then we still
+ * need to RCU-sched wait while freeing the pages because software
+ * walkers can still be in-flight.
+ */
+ tlb_flush_mmu_tlbonly(tlb);
+#endif
+}
+
+static void tlb_remove_table_smp_sync(void *arg)
+{
+ /* Simply deliver the interrupt */
+}
+
+static void tlb_remove_table_one(void *table)
+{
+ /*
+ * This isn't an RCU grace period and hence the page-tables cannot be
+ * assumed to be actually RCU-freed.
+ *
+ * It is however sufficient for software page-table walkers that rely on
+ * IRQ disabling. See the comment near struct mmu_table_batch.
+ */
+ smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+ __tlb_remove_table(table);
+}
+
+static void tlb_remove_table_rcu(struct rcu_head *head)
+{
+ struct mmu_table_batch *batch;
+ int i;
+
+ batch = container_of(head, struct mmu_table_batch, rcu);
+
+ for (i = 0; i < batch->nr; i++)
+ __tlb_remove_table(batch->tables[i]);
+
+ free_page((unsigned long)batch);
+}
+
+void tlb_table_flush(struct mmu_gather *tlb)
+{
+ struct mmu_table_batch **batch = &tlb->batch;
+
+ if (*batch) {
+ tlb_table_invalidate(tlb);
+ call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+ *batch = NULL;
+ }
+}
+
+void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ struct mmu_table_batch **batch = &tlb->batch;
+
+ if (*batch == NULL) {
+ *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+ if (*batch == NULL) {
+ tlb_table_invalidate(tlb);
+ tlb_remove_table_one(table);
+ return;
+ }
+ (*batch)->nr = 0;
+ }
+
+ (*batch)->tables[(*batch)->nr++] = table;
+ if ((*batch)->nr == MAX_TABLE_BATCH)
+ tlb_table_flush(tlb);
+}
+
+#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+
+/**
+ * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
+ * @tlb: the mmu_gather structure to initialize
+ * @mm: the mm_struct of the target address space
+ * @start: start of the region that will be removed from the page-table
+ * @end: end of the region that will be removed from the page-table
+ *
+ * Called to initialize an (on-stack) mmu_gather structure for page-table
+ * tear-down from @mm. The @start and @end are set to 0 and -1
+ * respectively when @mm is without users and we're going to destroy
+ * the full address space (exit/execve).
+ */
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ arch_tlb_gather_mmu(tlb, mm, start, end);
+ inc_tlb_flush_pending(tlb->mm);
+}
+
+void tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end)
+{
+ /*
+ * If there are parallel threads are doing PTE changes on same range
+ * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
+ * flush by batching, a thread has stable TLB entry can fail to flush
+ * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
+ * forcefully if we detect parallel PTE batching threads.
+ */
+ bool force = mm_tlb_flush_nested(tlb->mm);
+
+ arch_tlb_finish_mmu(tlb, start, end, force);
+ dec_tlb_flush_pending(tlb->mm);
+}
diff --git a/mm/mremap.c b/mm/mremap.c
index 5c2e18505f75..a9617e72e6b7 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -115,7 +115,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
unsigned long old_addr, unsigned long old_end,
struct vm_area_struct *new_vma, pmd_t *new_pmd,
- unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
+ unsigned long new_addr, bool need_rmap_locks)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte;
@@ -163,15 +163,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
pte = ptep_get_and_clear(mm, old_addr, old_pte);
/*
- * If we are remapping a dirty PTE, make sure
+ * If we are remapping a valid PTE, make sure
* to flush TLB before we drop the PTL for the
- * old PTE or we may race with page_mkclean().
+ * PTE.
*
- * This check has to be done after we removed the
- * old PTE from page tables or another thread may
- * dirty it after the check and before the removal.
+ * NOTE! Both old and new PTL matter: the old one
+ * for racing with page_mkclean(), the new one to
+ * make sure the physical page stays valid until
+ * the TLB entry for the old mapping has been
+ * flushed.
*/
- if (pte_present(pte) && pte_dirty(pte))
+ if (pte_present(pte))
force_flush = true;
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
pte = move_soft_dirty_pte(pte);
@@ -179,13 +181,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
}
arch_leave_lazy_mmu_mode();
+ if (force_flush)
+ flush_tlb_range(vma, old_end - len, old_end);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
pte_unmap(new_pte - 1);
- if (force_flush)
- flush_tlb_range(vma, old_end - len, old_end);
- else
- *need_flush = true;
pte_unmap_unlock(old_pte - 1, old_ptl);
if (need_rmap_locks)
drop_rmap_locks(vma);
@@ -198,7 +198,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
{
unsigned long extent, next, old_end;
pmd_t *old_pmd, *new_pmd;
- bool need_flush = false;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
@@ -229,8 +228,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (need_rmap_locks)
take_rmap_locks(vma);
moved = move_huge_pmd(vma, old_addr, new_addr,
- old_end, old_pmd, new_pmd,
- &need_flush);
+ old_end, old_pmd, new_pmd);
if (need_rmap_locks)
drop_rmap_locks(vma);
if (moved)
@@ -246,10 +244,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (extent > next - new_addr)
extent = next - new_addr;
move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
- new_pmd, new_addr, need_rmap_locks, &need_flush);
+ new_pmd, new_addr, need_rmap_locks);
}
- if (need_flush)
- flush_tlb_range(vma, old_end-len, old_addr);
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f10aa5360616..6589f60d5018 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -859,7 +859,7 @@ static void __oom_kill_process(struct task_struct *victim)
* in order to prevent the OOM victim from depleting the memory
* reserves from the user space under its control.
*/
- do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, PIDTYPE_TGID);
+ do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
mark_oom_victim(victim);
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
@@ -897,7 +897,7 @@ static void __oom_kill_process(struct task_struct *victim)
*/
if (unlikely(p->flags & PF_KTHREAD))
continue;
- do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, PIDTYPE_TGID);
+ do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
}
rcu_read_unlock();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 89d2a2ab3fe6..e2ef1c17942f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6193,17 +6193,6 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
}
-#ifdef CONFIG_NUMA_BALANCING
-static void pgdat_init_numabalancing(struct pglist_data *pgdat)
-{
- spin_lock_init(&pgdat->numabalancing_migrate_lock);
- pgdat->numabalancing_migrate_nr_pages = 0;
- pgdat->numabalancing_migrate_next_window = jiffies;
-}
-#else
-static void pgdat_init_numabalancing(struct pglist_data *pgdat) {}
-#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void pgdat_init_split_queue(struct pglist_data *pgdat)
{
@@ -6228,7 +6217,6 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
{
pgdat_resize_init(pgdat);
- pgdat_init_numabalancing(pgdat);
pgdat_init_split_queue(pgdat);
pgdat_init_kcompactd(pgdat);
diff --git a/mm/page_io.c b/mm/page_io.c
index aafd19ec1db4..573d3663d846 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -339,7 +339,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
goto out;
}
bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
- bio_associate_blkcg_from_page(bio, page);
+ bio_associate_blkg_from_page(bio, page);
count_swpout_vm_event(page);
set_page_writeback(page);
unlock_page(page);
diff --git a/mm/percpu.c b/mm/percpu.c
index a749d4d96e3e..4b90682623e9 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1212,6 +1212,7 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
{
if (!chunk)
return;
+ pcpu_mem_free(chunk->md_blocks);
pcpu_mem_free(chunk->bound_map);
pcpu_mem_free(chunk->alloc_map);
pcpu_mem_free(chunk);
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index cf2af04b34b9..532c29276fce 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -8,6 +8,7 @@
*/
#include <linux/pagemap.h>
+#include <linux/hugetlb.h>
#include <asm/tlb.h>
#include <asm-generic/pgtable.h>
diff --git a/mm/rmap.c b/mm/rmap.c
index eb477809a5c0..1e79fac3186b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1362,11 +1362,21 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
/*
- * We have to assume the worse case ie pmd for invalidation. Note that
- * the page can not be free in this function as call of try_to_unmap()
- * must hold a reference on the page.
+ * For THP, we have to assume the worse case ie pmd for invalidation.
+ * For hugetlb, it could be much worse if we need to do pud
+ * invalidation in the case of pmd sharing.
+ *
+ * Note that the page can not be free in this function as call of
+ * try_to_unmap() must hold a reference on the page.
*/
end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
+ if (PageHuge(page)) {
+ /*
+ * If sharing is possible, start and end will be adjusted
+ * accordingly.
+ */
+ adjust_range_if_pmd_sharing_possible(vma, &start, &end);
+ }
mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
while (page_vma_mapped_walk(&pvmw)) {
@@ -1409,6 +1419,32 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
address = pvmw.address;
+ if (PageHuge(page)) {
+ if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
+ /*
+ * huge_pmd_unshare unmapped an entire PMD
+ * page. There is no way of knowing exactly
+ * which PMDs may be cached for this mm, so
+ * we must flush them all. start/end were
+ * already adjusted above to cover this range.
+ */
+ flush_cache_range(vma, start, end);
+ flush_tlb_range(vma, start, end);
+ mmu_notifier_invalidate_range(mm, start, end);
+
+ /*
+ * The ref count of the PMD page was dropped
+ * which is part of the way map counting
+ * is done for shared PMDs. Return 'true'
+ * here. When there is no other sharing,
+ * huge_pmd_unshare returns false and we will
+ * unmap the actual page and drop map count
+ * to zero.
+ */
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
+ }
if (IS_ENABLED(CONFIG_MIGRATION) &&
(flags & TTU_MIGRATION) &&
diff --git a/mm/shmem.c b/mm/shmem.c
index 0376c124b043..446942677cd4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2227,6 +2227,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
mpol_shared_policy_init(&info->policy, NULL);
break;
}
+
+ lockdep_annotate_inode_mutex_key(inode);
} else
shmem_free_inode(sb);
return inode;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7e7d25504651..c5ef7240cbcb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -476,6 +476,17 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
delta = freeable >> priority;
delta *= 4;
do_div(delta, shrinker->seeks);
+
+ /*
+ * Make sure we apply some minimal pressure on default priority
+ * even on small cgroups. Stale objects are not only consuming memory
+ * by themselves, but can also hold a reference to a dying cgroup,
+ * preventing it from being reclaimed. A dying cgroup with all
+ * corresponding structures like per-cpu stats and kmem caches
+ * can be really big, so it may lead to a significant waste of memory.
+ */
+ delta = max_t(unsigned long long, delta, min(freeable, batch_size));
+
total_scan += delta;
if (total_scan < 0) {
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
@@ -569,8 +580,8 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
struct mem_cgroup *memcg, int priority)
{
struct memcg_shrinker_map *map;
- unsigned long freed = 0;
- int ret, i;
+ unsigned long ret, freed = 0;
+ int i;
if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
return 0;
@@ -666,9 +677,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
struct mem_cgroup *memcg,
int priority)
{
+ unsigned long ret, freed = 0;
struct shrinker *shrinker;
- unsigned long freed = 0;
- int ret;
if (!mem_cgroup_is_root(memcg))
return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8ba0870ecddd..7878da76abf2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1275,6 +1275,9 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_SMP
"nr_tlb_remote_flush",
"nr_tlb_remote_flush_received",
+#else
+ "", /* nr_tlb_remote_flush */
+ "", /* nr_tlb_remote_flush_received */
#endif /* CONFIG_SMP */
"nr_tlb_local_flush_all",
"nr_tlb_local_flush_one",
@@ -1283,7 +1286,6 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_DEBUG_VM_VMACACHE
"vmacache_find_calls",
"vmacache_find_hits",
- "vmacache_full_flushes",
#endif
#ifdef CONFIG_SWAP
"swap_ra",
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 546af0e73ac3..ff720f1ebf73 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -756,8 +756,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
return;
vlan->netpoll = NULL;
-
- __netpoll_free_async(netpoll);
+ __netpoll_free(netpoll);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
diff --git a/net/Kconfig b/net/Kconfig
index 228dfa382eec..f235edb593ba 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -300,8 +300,11 @@ config BPF_JIT
config BPF_STREAM_PARSER
bool "enable BPF STREAM_PARSER"
+ depends on INET
depends on BPF_SYSCALL
+ depends on CGROUP_BPF
select STREAM_PARSER
+ select NET_SOCK_MSG
---help---
Enabling this allows a stream parser to be used with
BPF_MAP_TYPE_SOCKMAP.
@@ -413,6 +416,14 @@ config GRO_CELLS
config SOCK_VALIDATE_XMIT
bool
+config NET_SOCK_MSG
+ bool
+ default n
+ help
+ The NET_SOCK_MSG provides a framework for plain sockets (e.g. TCP) or
+ ULPs (upper layer modules, e.g. TLS) to process L7 application data
+ with the help of BPF programs.
+
config NET_DEVLINK
tristate "Network physical/parent device Netlink interface"
help
diff --git a/net/atm/common.c b/net/atm/common.c
index 9f8cb0d2e71e..a38c174fc766 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -653,7 +653,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
struct atm_vcc *vcc;
__poll_t mask;
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
mask = 0;
vcc = ATM_SD(sock);
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 361116f77cb9..f75816f58107 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -106,3 +106,14 @@ config BATMAN_ADV_DEBUG
say N here. This enables compilation of support for
outputting debugging information to the kernel log. The
output is controlled via the module parameter debug.
+
+config BATMAN_ADV_TRACING
+ bool "B.A.T.M.A.N. tracing support"
+ depends on BATMAN_ADV
+ depends on EVENT_TRACING
+ help
+ This is an option for use by developers; most people should
+ say N here. Select this option to gather traces like the debug
+ messages using the generic tracing infrastructure of the kernel.
+ BATMAN_ADV_DEBUG must also be selected to get trace events for
+ batadv_dbg.
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index b97ba6fb8353..9b58160fe485 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -42,6 +42,9 @@ batman-adv-y += routing.o
batman-adv-y += send.o
batman-adv-y += soft-interface.o
batman-adv-y += sysfs.o
+batman-adv-$(CONFIG_BATMAN_ADV_TRACING) += trace.o
batman-adv-y += tp_meter.o
batman-adv-y += translation-table.o
batman-adv-y += tvlv.o
+
+CFLAGS_trace.o := -I$(src)
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 73bf6a93a3cf..d2227091029f 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -138,169 +138,6 @@ static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
}
/**
- * batadv_iv_ogm_orig_free() - free the private resources allocated for this
- * orig_node
- * @orig_node: the orig_node for which the resources have to be free'd
- */
-static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
-{
- kfree(orig_node->bat_iv.bcast_own);
- kfree(orig_node->bat_iv.bcast_own_sum);
-}
-
-/**
- * batadv_iv_ogm_orig_add_if() - change the private structures of the orig_node
- * to include the new hard-interface
- * @orig_node: the orig_node that has to be changed
- * @max_if_num: the current amount of interfaces
- *
- * Return: 0 on success, a negative error code otherwise.
- */
-static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
- unsigned int max_if_num)
-{
- void *data_ptr;
- size_t old_size;
- int ret = -ENOMEM;
-
- spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
-
- old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
- data_ptr = kmalloc_array(max_if_num,
- BATADV_NUM_WORDS * sizeof(unsigned long),
- GFP_ATOMIC);
- if (!data_ptr)
- goto unlock;
-
- memcpy(data_ptr, orig_node->bat_iv.bcast_own, old_size);
- kfree(orig_node->bat_iv.bcast_own);
- orig_node->bat_iv.bcast_own = data_ptr;
-
- data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC);
- if (!data_ptr)
- goto unlock;
-
- memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
- (max_if_num - 1) * sizeof(u8));
- kfree(orig_node->bat_iv.bcast_own_sum);
- orig_node->bat_iv.bcast_own_sum = data_ptr;
-
- ret = 0;
-
-unlock:
- spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
-
- return ret;
-}
-
-/**
- * batadv_iv_ogm_drop_bcast_own_entry() - drop section of bcast_own
- * @orig_node: the orig_node that has to be changed
- * @max_if_num: the current amount of interfaces
- * @del_if_num: the index of the interface being removed
- */
-static void
-batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
- unsigned int max_if_num,
- unsigned int del_if_num)
-{
- size_t chunk_size;
- size_t if_offset;
- void *data_ptr;
-
- lockdep_assert_held(&orig_node->bat_iv.ogm_cnt_lock);
-
- chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
- data_ptr = kmalloc_array(max_if_num, chunk_size, GFP_ATOMIC);
- if (!data_ptr)
- /* use old buffer when new one could not be allocated */
- data_ptr = orig_node->bat_iv.bcast_own;
-
- /* copy first part */
- memmove(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
-
- /* copy second part */
- if_offset = (del_if_num + 1) * chunk_size;
- memmove((char *)data_ptr + del_if_num * chunk_size,
- (uint8_t *)orig_node->bat_iv.bcast_own + if_offset,
- (max_if_num - del_if_num) * chunk_size);
-
- /* bcast_own was shrunk down in new buffer; free old one */
- if (orig_node->bat_iv.bcast_own != data_ptr) {
- kfree(orig_node->bat_iv.bcast_own);
- orig_node->bat_iv.bcast_own = data_ptr;
- }
-}
-
-/**
- * batadv_iv_ogm_drop_bcast_own_sum_entry() - drop section of bcast_own_sum
- * @orig_node: the orig_node that has to be changed
- * @max_if_num: the current amount of interfaces
- * @del_if_num: the index of the interface being removed
- */
-static void
-batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
- unsigned int max_if_num,
- unsigned int del_if_num)
-{
- size_t if_offset;
- void *data_ptr;
-
- lockdep_assert_held(&orig_node->bat_iv.ogm_cnt_lock);
-
- data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC);
- if (!data_ptr)
- /* use old buffer when new one could not be allocated */
- data_ptr = orig_node->bat_iv.bcast_own_sum;
-
- memmove(data_ptr, orig_node->bat_iv.bcast_own_sum,
- del_if_num * sizeof(u8));
-
- if_offset = (del_if_num + 1) * sizeof(u8);
- memmove((char *)data_ptr + del_if_num * sizeof(u8),
- orig_node->bat_iv.bcast_own_sum + if_offset,
- (max_if_num - del_if_num) * sizeof(u8));
-
- /* bcast_own_sum was shrunk down in new buffer; free old one */
- if (orig_node->bat_iv.bcast_own_sum != data_ptr) {
- kfree(orig_node->bat_iv.bcast_own_sum);
- orig_node->bat_iv.bcast_own_sum = data_ptr;
- }
-}
-
-/**
- * batadv_iv_ogm_orig_del_if() - change the private structures of the orig_node
- * to exclude the removed interface
- * @orig_node: the orig_node that has to be changed
- * @max_if_num: the current amount of interfaces
- * @del_if_num: the index of the interface being removed
- *
- * Return: 0 on success, a negative error code otherwise.
- */
-static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
- unsigned int max_if_num,
- unsigned int del_if_num)
-{
- spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
-
- if (max_if_num == 0) {
- kfree(orig_node->bat_iv.bcast_own);
- kfree(orig_node->bat_iv.bcast_own_sum);
- orig_node->bat_iv.bcast_own = NULL;
- orig_node->bat_iv.bcast_own_sum = NULL;
- } else {
- batadv_iv_ogm_drop_bcast_own_entry(orig_node, max_if_num,
- del_if_num);
- batadv_iv_ogm_drop_bcast_own_sum_entry(orig_node, max_if_num,
- del_if_num);
- }
-
- spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
-
- return 0;
-}
-
-/**
* batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an
* originator
* @bat_priv: the bat priv with all the soft interface information
@@ -315,7 +152,6 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
{
struct batadv_orig_node *orig_node;
int hash_added;
- size_t size;
orig_node = batadv_orig_hash_find(bat_priv, addr);
if (orig_node)
@@ -327,16 +163,6 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
spin_lock_init(&orig_node->bat_iv.ogm_cnt_lock);
- size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
- orig_node->bat_iv.bcast_own = kzalloc(size, GFP_ATOMIC);
- if (!orig_node->bat_iv.bcast_own)
- goto free_orig_node;
-
- size = bat_priv->num_ifaces * sizeof(u8);
- orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
- if (!orig_node->bat_iv.bcast_own_sum)
- goto free_orig_node;
-
kref_get(&orig_node->refcount);
hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
batadv_choose_orig, orig_node,
@@ -347,8 +173,9 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
return orig_node;
free_orig_node_hash:
+ /* reference for batadv_hash_add */
batadv_orig_node_put(orig_node);
-free_orig_node:
+ /* reference from batadv_orig_node_new */
batadv_orig_node_put(orig_node);
return NULL;
@@ -893,26 +720,30 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
struct batadv_orig_node *orig_node;
+ struct batadv_orig_ifinfo *orig_ifinfo;
unsigned long *word;
u32 i;
- size_t word_index;
u8 *w;
- unsigned int if_num;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
- word_index = hard_iface->if_num * BATADV_NUM_WORDS;
- word = &orig_node->bat_iv.bcast_own[word_index];
-
- batadv_bit_get_packet(bat_priv, word, 1, 0);
- if_num = hard_iface->if_num;
- w = &orig_node->bat_iv.bcast_own_sum[if_num];
- *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE);
- spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ hlist_for_each_entry_rcu(orig_ifinfo,
+ &orig_node->ifinfo_list,
+ list) {
+ if (orig_ifinfo->if_outgoing != hard_iface)
+ continue;
+
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ word = orig_ifinfo->bat_iv.bcast_own;
+ batadv_bit_get_packet(bat_priv, word, 1, 0);
+ w = &orig_ifinfo->bat_iv.bcast_own_sum;
+ *w = bitmap_weight(word,
+ BATADV_TQ_LOCAL_WINDOW_SIZE);
+ spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ }
}
rcu_read_unlock();
}
@@ -1000,6 +831,35 @@ out:
}
/**
+ * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface
+ * @orig_node: originator which reproadcasted the OGMs directly
+ * @if_outgoing: interface which transmitted the original OGM and received the
+ * direct rebroadcast
+ *
+ * Return: Number of replied (rebroadcasted) OGMs which were transmitted by
+ * an originator and directly (without intermediate hop) received by a specific
+ * interface
+ */
+static u8 batadv_iv_orig_ifinfo_sum(struct batadv_orig_node *orig_node,
+ struct batadv_hard_iface *if_outgoing)
+{
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ u8 sum;
+
+ orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
+ if (!orig_ifinfo)
+ return 0;
+
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ sum = orig_ifinfo->bat_iv.bcast_own_sum;
+ spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+ batadv_orig_ifinfo_put(orig_ifinfo);
+
+ return sum;
+}
+
+/**
* batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an
* originator
* @bat_priv: the bat priv with all the soft interface information
@@ -1026,8 +886,6 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
struct batadv_neigh_node *neigh_node = NULL;
struct batadv_neigh_node *tmp_neigh_node = NULL;
struct batadv_neigh_node *router = NULL;
- struct batadv_orig_node *orig_node_tmp;
- unsigned int if_num;
u8 sum_orig, sum_neigh;
u8 *neigh_addr;
u8 tq_avg;
@@ -1132,18 +990,10 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
*/
if (router_ifinfo &&
neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg) {
- orig_node_tmp = router->orig_node;
- spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
- if_num = router->if_incoming->if_num;
- sum_orig = orig_node_tmp->bat_iv.bcast_own_sum[if_num];
- spin_unlock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
-
- orig_node_tmp = neigh_node->orig_node;
- spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
- if_num = neigh_node->if_incoming->if_num;
- sum_neigh = orig_node_tmp->bat_iv.bcast_own_sum[if_num];
- spin_unlock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
-
+ sum_orig = batadv_iv_orig_ifinfo_sum(router->orig_node,
+ router->if_incoming);
+ sum_neigh = batadv_iv_orig_ifinfo_sum(neigh_node->orig_node,
+ neigh_node->if_incoming);
if (sum_orig >= sum_neigh)
goto out;
}
@@ -1186,7 +1036,6 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
u8 total_count;
u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
- unsigned int if_num;
unsigned int tq_asym_penalty, inv_asym_penalty;
unsigned int combined_tq;
unsigned int tq_iface_penalty;
@@ -1227,9 +1076,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
orig_node->last_seen = jiffies;
/* find packet count of corresponding one hop neighbor */
- spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
- if_num = if_incoming->if_num;
- orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num];
+ orig_eq_count = batadv_iv_orig_ifinfo_sum(orig_neigh_node, if_incoming);
neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
if (neigh_ifinfo) {
neigh_rq_count = neigh_ifinfo->bat_iv.real_packet_count;
@@ -1237,7 +1084,6 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
} else {
neigh_rq_count = 0;
}
- spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
/* pay attention to not get a value bigger than 100 % */
if (orig_eq_count > neigh_rq_count)
@@ -1622,6 +1468,49 @@ out:
}
/**
+ * batadv_iv_ogm_process_reply() - Check OGM for direct reply and process it
+ * @ogm_packet: rebroadcast OGM packet to process
+ * @if_incoming: the interface where this packet was received
+ * @orig_node: originator which reproadcasted the OGMs
+ * @if_incoming_seqno: OGM sequence number when rebroadcast was received
+ */
+static void batadv_iv_ogm_process_reply(struct batadv_ogm_packet *ogm_packet,
+ struct batadv_hard_iface *if_incoming,
+ struct batadv_orig_node *orig_node,
+ u32 if_incoming_seqno)
+{
+ struct batadv_orig_ifinfo *orig_ifinfo;
+ s32 bit_pos;
+ u8 *weight;
+
+ /* neighbor has to indicate direct link and it has to
+ * come via the corresponding interface
+ */
+ if (!(ogm_packet->flags & BATADV_DIRECTLINK))
+ return;
+
+ if (!batadv_compare_eth(if_incoming->net_dev->dev_addr,
+ ogm_packet->orig))
+ return;
+
+ orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_incoming);
+ if (!orig_ifinfo)
+ return;
+
+ /* save packet seqno for bidirectional check */
+ spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ bit_pos = if_incoming_seqno - 2;
+ bit_pos -= ntohl(ogm_packet->seqno);
+ batadv_set_bit(orig_ifinfo->bat_iv.bcast_own, bit_pos);
+ weight = &orig_ifinfo->bat_iv.bcast_own_sum;
+ *weight = bitmap_weight(orig_ifinfo->bat_iv.bcast_own,
+ BATADV_TQ_LOCAL_WINDOW_SIZE);
+ spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+ batadv_orig_ifinfo_put(orig_ifinfo);
+}
+
+/**
* batadv_iv_ogm_process() - process an incoming batman iv OGM
* @skb: the skb containing the OGM
* @ogm_offset: offset to the OGM which should be processed (for aggregates)
@@ -1705,37 +1594,13 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
}
if (is_my_orig) {
- unsigned long *word;
- size_t offset;
- s32 bit_pos;
- unsigned int if_num;
- u8 *weight;
-
orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
ethhdr->h_source);
if (!orig_neigh_node)
return;
- /* neighbor has to indicate direct link and it has to
- * come via the corresponding interface
- * save packet seqno for bidirectional check
- */
- if (has_directlink_flag &&
- batadv_compare_eth(if_incoming->net_dev->dev_addr,
- ogm_packet->orig)) {
- if_num = if_incoming->if_num;
- offset = if_num * BATADV_NUM_WORDS;
-
- spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
- word = &orig_neigh_node->bat_iv.bcast_own[offset];
- bit_pos = if_incoming_seqno - 2;
- bit_pos -= ntohl(ogm_packet->seqno);
- batadv_set_bit(word, bit_pos);
- weight = &orig_neigh_node->bat_iv.bcast_own_sum[if_num];
- *weight = bitmap_weight(word,
- BATADV_TQ_LOCAL_WINDOW_SIZE);
- spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
- }
+ batadv_iv_ogm_process_reply(ogm_packet, if_incoming,
+ orig_neigh_node, if_incoming_seqno);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: originator packet from myself (via neighbor)\n");
@@ -2844,9 +2709,6 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.print = batadv_iv_ogm_orig_print,
#endif
.dump = batadv_iv_ogm_orig_dump,
- .free = batadv_iv_ogm_orig_free,
- .add_if = batadv_iv_ogm_orig_add_if,
- .del_if = batadv_iv_ogm_orig_del_if,
},
.gw = {
.init_sel_class = batadv_iv_init_sel_class,
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 71c20c1d4002..9f481cfdf77d 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -241,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
* the packet to be exactly of that size to make the link
* throughput estimation effective.
*/
- skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
+ skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Sending unicast (probe) ELP packet on interface %s to %pM\n",
@@ -268,6 +268,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
struct batadv_priv *bat_priv;
struct sk_buff *skb;
u32 elp_interval;
+ bool ret;
bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
@@ -329,8 +330,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
* may sleep and that is not allowed in an rcu protected
* context. Therefore schedule a task for that.
*/
- queue_work(batadv_event_workqueue,
- &hardif_neigh->bat_v.metric_work);
+ ret = queue_work(batadv_event_workqueue,
+ &hardif_neigh->bat_v.metric_work);
+
+ if (!ret)
+ batadv_hardif_neigh_put(hardif_neigh);
}
rcu_read_unlock();
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ff9659af6b91..5f1aeeded0e3 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
{
struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
+ bool ret;
ethhdr = eth_hdr(skb);
@@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
if (unlikely(!backbone_gw))
return true;
- queue_work(batadv_event_workqueue, &backbone_gw->report_work);
- /* backbone_gw is unreferenced in the report work function function */
+ ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
+
+ /* backbone_gw is unreferenced in the report work function function
+ * if queue_work() call was successful
+ */
+ if (!ret)
+ batadv_backbone_gw_put(backbone_gw);
return true;
}
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 3cb82378300b..8b608a2e2653 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -47,8 +47,24 @@
static struct dentry *batadv_debugfs;
+/**
+ * batadv_debugfs_deprecated() - Log use of deprecated batadv debugfs access
+ * @file: file which was accessed
+ * @alt: explanation what can be used as alternative
+ */
+void batadv_debugfs_deprecated(struct file *file, const char *alt)
+{
+ struct dentry *dentry = file_dentry(file);
+ const char *name = dentry->d_name.name;
+
+ pr_warn_ratelimited(DEPRECATED "%s (pid %d) Use of debugfs file \"%s\".\n%s",
+ current->comm, task_pid_nr(current), name, alt);
+}
+
static int batadv_algorithms_open(struct inode *inode, struct file *file)
{
+ batadv_debugfs_deprecated(file,
+ "Use genl command BATADV_CMD_GET_ROUTING_ALGOS instead\n");
return single_open(file, batadv_algo_seq_print_text, NULL);
}
@@ -56,6 +72,8 @@ static int neighbors_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
+ batadv_debugfs_deprecated(file,
+ "Use genl command BATADV_CMD_GET_NEIGHBORS instead\n");
return single_open(file, batadv_hardif_neigh_seq_print_text, net_dev);
}
@@ -63,6 +81,8 @@ static int batadv_originators_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
+ batadv_debugfs_deprecated(file,
+ "Use genl command BATADV_CMD_GET_ORIGINATORS instead\n");
return single_open(file, batadv_orig_seq_print_text, net_dev);
}
@@ -79,6 +99,8 @@ static int batadv_originators_hardif_open(struct inode *inode,
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
+ batadv_debugfs_deprecated(file,
+ "Use genl command BATADV_CMD_GET_HARDIFS instead\n");
return single_open(file, batadv_orig_hardif_seq_print_text, net_dev);
}
@@ -86,6 +108,8 @@ static int batadv_gateways_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
+ batadv_debugfs_deprecated(file,
+ "Use genl command BATADV_CMD_GET_GATEWAYS instead\n");
return single_open(file, batadv_gw_client_seq_print_text, net_dev);
}
@@ -93,6 +117,8 @@ static int batadv_transtable_global_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
+ batadv_debugfs_deprecated(file,
+ "Use genl command BATADV_CMD_GET_TRANSTABLE_GLOBAL instead\n");
return single_open(file, batadv_tt_global_seq_print_text, net_dev);
}
@@ -101,6 +127,8 @@ static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
+ batadv_debugfs_deprecated(file,
+ "Use genl command BATADV_CMD_GET_BLA_CLAIM instead\n");
return single_open(file, batadv_bla_claim_table_seq_print_text,
net_dev);
}
@@ -110,6 +138,8 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
+ batadv_debugfs_deprecated(file,
+ "Use genl command BATADV_CMD_GET_BLA_BACKBONE instead\n");
return single_open(file, batadv_bla_backbone_table_seq_print_text,
net_dev);
}
@@ -128,6 +158,8 @@ static int batadv_dat_cache_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
+ batadv_debugfs_deprecated(file,
+ "Use genl command BATADV_CMD_GET_DAT_CACHE instead\n");
return single_open(file, batadv_dat_cache_seq_print_text, net_dev);
}
#endif
@@ -136,6 +168,8 @@ static int batadv_transtable_local_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
+ batadv_debugfs_deprecated(file,
+ "Use genl command BATADV_CMD_GET_TRANSTABLE_LOCAL instead\n");
return single_open(file, batadv_tt_local_seq_print_text, net_dev);
}
@@ -149,6 +183,7 @@ static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
+ batadv_debugfs_deprecated(file, "");
return single_open(file, batadv_nc_nodes_seq_print_text, net_dev);
}
#endif
@@ -165,6 +200,8 @@ static int batadv_mcast_flags_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
+ batadv_debugfs_deprecated(file,
+ "Use genl command BATADV_CMD_GET_MCAST_FLAGS instead\n");
return single_open(file, batadv_mcast_flags_seq_print_text, net_dev);
}
#endif
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 08a592ffbee5..8de018e5c577 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -21,12 +21,14 @@
#include "main.h"
+struct file;
struct net_device;
#define BATADV_DEBUGFS_SUBDIR "batman_adv"
#if IS_ENABLED(CONFIG_BATMAN_ADV_DEBUGFS)
+void batadv_debugfs_deprecated(struct file *file, const char *alt);
void batadv_debugfs_init(void);
void batadv_debugfs_destroy(void);
int batadv_debugfs_add_meshif(struct net_device *dev);
@@ -38,6 +40,10 @@ void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
#else
+static inline void batadv_debugfs_deprecated(struct file *file, const char *alt)
+{
+}
+
static inline void batadv_debugfs_init(void)
{
}
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 8b198ee798c9..140c61a3f1ec 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
@@ -348,6 +349,9 @@ out:
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
* @gateway: announced bandwidth information
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (gw.list_lock).
*/
static void batadv_gw_node_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
@@ -355,6 +359,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *gw_node;
+ lockdep_assert_held(&bat_priv->gw.list_lock);
+
if (gateway->bandwidth_down == 0)
return;
@@ -369,10 +375,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
- spin_lock_bh(&bat_priv->gw.list_lock);
kref_get(&gw_node->refcount);
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
- spin_unlock_bh(&bat_priv->gw.list_lock);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
@@ -428,11 +432,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *gw_node, *curr_gw = NULL;
+ spin_lock_bh(&bat_priv->gw.list_lock);
gw_node = batadv_gw_node_get(bat_priv, orig_node);
if (!gw_node) {
batadv_gw_node_add(bat_priv, orig_node, gateway);
+ spin_unlock_bh(&bat_priv->gw.list_lock);
goto out;
}
+ spin_unlock_bh(&bat_priv->gw.list_lock);
if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) &&
gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 2f0d42f2f913..781c5b6e6e8e 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -763,11 +763,6 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
hard_iface->soft_iface = soft_iface;
bat_priv = netdev_priv(hard_iface->soft_iface);
- if (bat_priv->num_ifaces >= UINT_MAX) {
- ret = -ENOSPC;
- goto err_dev;
- }
-
ret = netdev_master_upper_dev_link(hard_iface->net_dev,
soft_iface, NULL, NULL, NULL);
if (ret)
@@ -777,16 +772,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
if (ret < 0)
goto err_upper;
- hard_iface->if_num = bat_priv->num_ifaces;
- bat_priv->num_ifaces++;
hard_iface->if_status = BATADV_IF_INACTIVE;
- ret = batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
- if (ret < 0) {
- bat_priv->algo_ops->iface.disable(hard_iface);
- bat_priv->num_ifaces--;
- hard_iface->if_status = BATADV_IF_NOT_IN_USE;
- goto err_upper;
- }
kref_get(&hard_iface->refcount);
hard_iface->batman_adv_ptype.type = ethertype;
@@ -834,6 +820,33 @@ err:
}
/**
+ * batadv_hardif_cnt() - get number of interfaces enslaved to soft interface
+ * @soft_iface: soft interface to check
+ *
+ * This function is only using RCU for locking - the result can therefore be
+ * off when another functions is modifying the list at the same time. The
+ * caller can use the rtnl_lock to make sure that the count is accurate.
+ *
+ * Return: number of connected/enslaved hard interfaces
+ */
+static size_t batadv_hardif_cnt(const struct net_device *soft_iface)
+{
+ struct batadv_hard_iface *hard_iface;
+ size_t count = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->soft_iface != soft_iface)
+ continue;
+
+ count++;
+ }
+ rcu_read_unlock();
+
+ return count;
+}
+
+/**
* batadv_hardif_disable_interface() - Remove hard interface from soft interface
* @hard_iface: hard interface to be removed
* @autodel: whether to delete soft interface when it doesn't contain any other
@@ -855,9 +868,6 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
dev_remove_pack(&hard_iface->batman_adv_ptype);
batadv_hardif_put(hard_iface);
- bat_priv->num_ifaces--;
- batadv_orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
-
primary_if = batadv_primary_if_get_selected(bat_priv);
if (hard_iface == primary_if) {
struct batadv_hard_iface *new_if;
@@ -881,7 +891,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
/* nobody uses this interface anymore */
- if (bat_priv->num_ifaces == 0) {
+ if (batadv_hardif_cnt(hard_iface->soft_iface) <= 1) {
batadv_gw_check_client_stop(bat_priv);
if (autodel == BATADV_IF_CLEANUP_AUTO)
@@ -917,7 +927,6 @@ batadv_hardif_add_interface(struct net_device *net_dev)
if (ret)
goto free_if;
- hard_iface->if_num = 0;
hard_iface->net_dev = net_dev;
hard_iface->soft_iface = NULL;
hard_iface->if_status = BATADV_IF_NOT_IN_USE;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 55c358ad3331..d70f363c52ae 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -47,6 +47,7 @@
#include <linux/wait.h>
#include <uapi/linux/batadv_packet.h>
+#include "debugfs.h"
#include "hard-interface.h"
#include "log.h"
#include "originator.h"
@@ -74,6 +75,8 @@ static int batadv_socket_open(struct inode *inode, struct file *file)
if (!try_module_get(THIS_MODULE))
return -EBUSY;
+ batadv_debugfs_deprecated(file, "");
+
nonseekable_open(inode, file);
socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL);
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index 853773e45f79..6beb5f067810 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -40,6 +40,9 @@
#include <linux/wait.h>
#include <stdarg.h>
+#include "debugfs.h"
+#include "trace.h"
+
#define BATADV_LOG_BUFF_MASK (batadv_log_buff_len - 1)
static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN;
@@ -98,13 +101,19 @@ static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
*/
int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
- char tmp_log_buf[256];
va_start(args, fmt);
- vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
- batadv_fdebug_log(bat_priv->debug_log, "[%10u] %s",
- jiffies_to_msecs(jiffies), tmp_log_buf);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ batadv_fdebug_log(bat_priv->debug_log, "[%10u] %pV",
+ jiffies_to_msecs(jiffies), &vaf);
+
+ trace_batadv_dbg(bat_priv, &vaf);
+
va_end(args);
return 0;
@@ -115,6 +124,9 @@ static int batadv_log_open(struct inode *inode, struct file *file)
if (!try_module_get(THIS_MODULE))
return -EBUSY;
+ batadv_debugfs_deprecated(file,
+ "Use tracepoint batadv:batadv_dbg instead\n");
+
nonseekable_open(inode, file);
file->private_data = inode->i_private;
return 0;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 8da3c9336111..2002b70e18db 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -25,7 +25,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2018.2"
+#define BATADV_SOURCE_VERSION "2018.4"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index c3578444f3cb..34caf129a9bf 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -854,16 +854,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
struct list_head *list;
+ /* Select ingoing or outgoing coding node */
+ if (in_coding) {
+ lock = &orig_neigh_node->in_coding_list_lock;
+ list = &orig_neigh_node->in_coding_list;
+ } else {
+ lock = &orig_neigh_node->out_coding_list_lock;
+ list = &orig_neigh_node->out_coding_list;
+ }
+
+ spin_lock_bh(lock);
+
/* Check if nc_node is already added */
nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
/* Node found */
if (nc_node)
- return nc_node;
+ goto unlock;
nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
if (!nc_node)
- return NULL;
+ goto unlock;
/* Initialize nc_node */
INIT_LIST_HEAD(&nc_node->list);
@@ -872,22 +883,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
kref_get(&orig_neigh_node->refcount);
nc_node->orig_node = orig_neigh_node;
- /* Select ingoing or outgoing coding node */
- if (in_coding) {
- lock = &orig_neigh_node->in_coding_list_lock;
- list = &orig_neigh_node->in_coding_list;
- } else {
- lock = &orig_neigh_node->out_coding_list_lock;
- list = &orig_neigh_node->out_coding_list;
- }
-
batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
nc_node->addr, nc_node->orig_node->orig);
/* Add nc_node to orig_node */
- spin_lock_bh(lock);
kref_get(&nc_node->refcount);
list_add_tail_rcu(&nc_node->list, list);
+
+unlock:
spin_unlock_bh(lock);
return nc_node;
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 1d295da3e342..56a981af5c92 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -904,9 +904,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
batadv_frag_purge_orig(orig_node, NULL);
- if (orig_node->bat_priv->algo_ops->orig.free)
- orig_node->bat_priv->algo_ops->orig.free(orig_node);
-
kfree(orig_node->tt_buff);
kfree(orig_node);
}
@@ -1555,107 +1552,3 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
return ret;
}
-
-/**
- * batadv_orig_hash_add_if() - Add interface to originators in orig_hash
- * @hard_iface: hard interface to add (already slave of the soft interface)
- * @max_if_num: new number of interfaces
- *
- * Return: 0 on success or negative error number in case of failure
- */
-int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
- unsigned int max_if_num)
-{
- struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct batadv_algo_ops *bao = bat_priv->algo_ops;
- struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_head *head;
- struct batadv_orig_node *orig_node;
- u32 i;
- int ret;
-
- /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
- * if_num
- */
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- ret = 0;
- if (bao->orig.add_if)
- ret = bao->orig.add_if(orig_node, max_if_num);
- if (ret == -ENOMEM)
- goto err;
- }
- rcu_read_unlock();
- }
-
- return 0;
-
-err:
- rcu_read_unlock();
- return -ENOMEM;
-}
-
-/**
- * batadv_orig_hash_del_if() - Remove interface from originators in orig_hash
- * @hard_iface: hard interface to remove (still slave of the soft interface)
- * @max_if_num: new number of interfaces
- *
- * Return: 0 on success or negative error number in case of failure
- */
-int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
- unsigned int max_if_num)
-{
- struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_head *head;
- struct batadv_hard_iface *hard_iface_tmp;
- struct batadv_orig_node *orig_node;
- struct batadv_algo_ops *bao = bat_priv->algo_ops;
- u32 i;
- int ret;
-
- /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
- * if_num
- */
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
- ret = 0;
- if (bao->orig.del_if)
- ret = bao->orig.del_if(orig_node, max_if_num,
- hard_iface->if_num);
- if (ret == -ENOMEM)
- goto err;
- }
- rcu_read_unlock();
- }
-
- /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
- rcu_read_lock();
- list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
- if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
- continue;
-
- if (hard_iface == hard_iface_tmp)
- continue;
-
- if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
- continue;
-
- if (hard_iface_tmp->if_num > hard_iface->if_num)
- hard_iface_tmp->if_num--;
- }
- rcu_read_unlock();
-
- hard_iface->if_num = -1;
- return 0;
-
-err:
- rcu_read_unlock();
- return -ENOMEM;
-}
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 3b3f59b881e1..a8b4c7b667ec 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -72,10 +72,6 @@ void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo);
int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb);
int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset);
-int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
- unsigned int max_if_num);
-int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
- unsigned int max_if_num);
struct batadv_orig_node_vlan *
batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
unsigned short vid);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 1485263a348b..5db5a0a4c959 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -574,15 +574,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
struct batadv_softif_vlan *vlan;
int err;
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (vlan) {
batadv_softif_vlan_put(vlan);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -EEXIST;
}
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
- if (!vlan)
+ if (!vlan) {
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -ENOMEM;
+ }
vlan->bat_priv = bat_priv;
vlan->vid = vid;
@@ -590,17 +595,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
atomic_set(&vlan->ap_isolation, 0);
+ kref_get(&vlan->refcount);
+ hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+ /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
+ * sleeping behavior of the sysfs functions and the fs_reclaim lock
+ */
err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
if (err) {
- kfree(vlan);
+ /* ref for the function */
+ batadv_softif_vlan_put(vlan);
+
+ /* ref for the list */
+ batadv_softif_vlan_put(vlan);
return err;
}
- spin_lock_bh(&bat_priv->softif_vlan_list_lock);
- kref_get(&vlan->refcount);
- hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
- spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
/* add a new TT local entry. This one will be marked with the NOPURGE
* flag
*/
@@ -833,7 +844,6 @@ static int batadv_softif_init_late(struct net_device *dev)
atomic_set(&bat_priv->frag_seqno, random_seqno);
bat_priv->primary_if = NULL;
- bat_priv->num_ifaces = 0;
batadv_nc_init_bat_priv(bat_priv);
@@ -1051,6 +1061,7 @@ static void batadv_softif_init_early(struct net_device *dev)
dev->needs_free_netdev = true;
dev->priv_destructor = batadv_softif_free;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_LLTX;
dev->priv_flags |= IFF_NO_QUEUE;
/* can't call min_mtu, because the needed variables
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index f2eef43bd2ec..09427fc6494a 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -188,7 +188,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
\
return __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
- &bat_priv->_var, net_dev); \
+ &bat_priv->_var, net_dev, \
+ NULL); \
}
#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
@@ -262,7 +263,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
\
length = __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
- &hard_iface->_var, net_dev); \
+ &hard_iface->_var, \
+ hard_iface->soft_iface, \
+ net_dev); \
\
batadv_hardif_put(hard_iface); \
return length; \
@@ -356,10 +359,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
static int batadv_store_uint_attr(const char *buff, size_t count,
struct net_device *net_dev,
+ struct net_device *slave_dev,
const char *attr_name,
unsigned int min, unsigned int max,
atomic_t *attr)
{
+ char ifname[IFNAMSIZ + 3] = "";
unsigned long uint_val;
int ret;
@@ -385,8 +390,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
if (atomic_read(attr) == uint_val)
return count;
- batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
- attr_name, atomic_read(attr), uint_val);
+ if (slave_dev)
+ snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
+
+ batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
+ attr_name, ifname, atomic_read(attr), uint_val);
atomic_set(attr, uint_val);
return count;
@@ -397,12 +405,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
void (*post_func)(struct net_device *),
const struct attribute *attr,
atomic_t *attr_store,
- struct net_device *net_dev)
+ struct net_device *net_dev,
+ struct net_device *slave_dev)
{
int ret;
- ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
- attr_store);
+ ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
+ attr->name, min, max, attr_store);
if (post_func && ret)
post_func(net_dev);
@@ -571,7 +580,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
batadv_post_gw_reselect, attr,
&bat_priv->gw.sel_class,
- bat_priv->soft_iface);
+ bat_priv->soft_iface, NULL);
}
static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
@@ -1090,8 +1099,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
if (old_tp_override == tp_override)
goto out;
- batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
- "throughput_override",
+ batadv_info(hard_iface->soft_iface,
+ "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
+ "throughput_override", net_dev->name,
old_tp_override / 10, old_tp_override % 10,
tp_override / 10, tp_override % 10);
diff --git a/net/batman-adv/trace.c b/net/batman-adv/trace.c
new file mode 100644
index 000000000000..3d57f9981f25
--- /dev/null
+++ b/net/batman-adv/trace.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2010-2018 B.A.T.M.A.N. contributors:
+ *
+ * Sven Eckelmann
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/net/batman-adv/trace.h b/net/batman-adv/trace.h
new file mode 100644
index 000000000000..3acda26a30ca
--- /dev/null
+++ b/net/batman-adv/trace.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2010-2018 B.A.T.M.A.N. contributors:
+ *
+ * Sven Eckelmann
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#if !defined(_NET_BATMAN_ADV_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _NET_BATMAN_ADV_TRACE_H_
+
+#include "main.h"
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM batadv
+
+/* provide dummy function when tracing is disabled */
+#if !defined(CONFIG_BATMAN_ADV_TRACING)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+ static inline void trace_ ## name(proto) {}
+
+#endif /* CONFIG_BATMAN_ADV_TRACING */
+
+#define BATADV_MAX_MSG_LEN 256
+
+TRACE_EVENT(batadv_dbg,
+
+ TP_PROTO(struct batadv_priv *bat_priv,
+ struct va_format *vaf),
+
+ TP_ARGS(bat_priv, vaf),
+
+ TP_STRUCT__entry(
+ __string(device, bat_priv->soft_iface->name)
+ __string(driver, KBUILD_MODNAME)
+ __dynamic_array(char, msg, BATADV_MAX_MSG_LEN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, bat_priv->soft_iface->name);
+ __assign_str(driver, KBUILD_MODNAME);
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ BATADV_MAX_MSG_LEN,
+ vaf->fmt,
+ *vaf->va) >= BATADV_MAX_MSG_LEN);
+ ),
+
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+#endif /* _NET_BATMAN_ADV_TRACE_H_ || TRACE_HEADER_MULTI_READ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 12a2b7d21376..d21624c44665 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1613,6 +1613,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
{
struct batadv_tt_orig_list_entry *orig_entry;
+ spin_lock_bh(&tt_global->list_lock);
+
orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
if (orig_entry) {
/* refresh the ttvn: the current value could be a bogus one that
@@ -1635,11 +1637,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
orig_entry->flags = flags;
kref_init(&orig_entry->refcount);
- spin_lock_bh(&tt_global->list_lock);
kref_get(&orig_entry->refcount);
hlist_add_head_rcu(&orig_entry->list,
&tt_global->orig_list);
- spin_unlock_bh(&tt_global->list_lock);
atomic_inc(&tt_global->orig_list_count);
sync_flags:
@@ -1647,6 +1647,8 @@ sync_flags:
out:
if (orig_entry)
batadv_tt_orig_list_entry_put(orig_entry);
+
+ spin_unlock_bh(&tt_global->list_lock);
}
/**
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index a637458205d1..40e69c9346d2 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -529,15 +529,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
{
struct batadv_tvlv_handler *tvlv_handler;
+ spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+
tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
if (tvlv_handler) {
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
batadv_tvlv_handler_put(tvlv_handler);
return;
}
tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
- if (!tvlv_handler)
+ if (!tvlv_handler) {
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
return;
+ }
tvlv_handler->ogm_handler = optr;
tvlv_handler->unicast_handler = uptr;
@@ -547,7 +552,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
kref_init(&tvlv_handler->refcount);
INIT_HLIST_NODE(&tvlv_handler->list);
- spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
kref_get(&tvlv_handler->refcount);
hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 343d304851a5..45b5592de816 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -167,9 +167,6 @@ struct batadv_hard_iface {
/** @list: list node for batadv_hardif_list */
struct list_head list;
- /** @if_num: identificator of the interface */
- unsigned int if_num;
-
/** @if_status: status of the interface for batman-adv */
char if_status;
@@ -233,6 +230,20 @@ struct batadv_hard_iface {
};
/**
+ * struct batadv_orig_ifinfo - B.A.T.M.A.N. IV private orig_ifinfo members
+ */
+struct batadv_orig_ifinfo_bat_iv {
+ /**
+ * @bcast_own: bitfield which counts the number of our OGMs this
+ * orig_node rebroadcasted "back" to us (relative to last_real_seqno)
+ */
+ DECLARE_BITMAP(bcast_own, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+ /** @bcast_own_sum: sum of bcast_own */
+ u8 bcast_own_sum;
+};
+
+/**
* struct batadv_orig_ifinfo - originator info per outgoing interface
*/
struct batadv_orig_ifinfo {
@@ -257,6 +268,9 @@ struct batadv_orig_ifinfo {
/** @batman_seqno_reset: time when the batman seqno window was reset */
unsigned long batman_seqno_reset;
+ /** @bat_iv: B.A.T.M.A.N. IV private structure */
+ struct batadv_orig_ifinfo_bat_iv bat_iv;
+
/** @refcount: number of contexts the object is used */
struct kref refcount;
@@ -339,19 +353,10 @@ struct batadv_orig_node_vlan {
*/
struct batadv_orig_bat_iv {
/**
- * @bcast_own: set of bitfields (one per hard-interface) where each one
- * counts the number of our OGMs this orig_node rebroadcasted "back" to
- * us (relative to last_real_seqno). Every bitfield is
- * BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
- */
- unsigned long *bcast_own;
-
- /** @bcast_own_sum: sum of bcast_own */
- u8 *bcast_own_sum;
-
- /**
- * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
- * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
+ * @ogm_cnt_lock: lock protecting &batadv_orig_ifinfo_bat_iv.bcast_own,
+ * &batadv_orig_ifinfo_bat_iv.bcast_own_sum,
+ * &batadv_neigh_ifinfo_bat_iv.bat_iv.real_bits and
+ * &batadv_neigh_ifinfo_bat_iv.real_packet_count
*/
spinlock_t ogm_cnt_lock;
};
@@ -1597,9 +1602,6 @@ struct batadv_priv {
/** @batman_queue_left: number of remaining OGM packet slots */
atomic_t batman_queue_left;
- /** @num_ifaces: number of interfaces assigned to this mesh interface */
- unsigned int num_ifaces;
-
/** @mesh_obj: kobject for sysfs mesh subdirectory */
struct kobject *mesh_obj;
@@ -2179,28 +2181,6 @@ struct batadv_algo_neigh_ops {
* struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific)
*/
struct batadv_algo_orig_ops {
- /**
- * @free: free the resources allocated by the routing algorithm for an
- * orig_node object (optional)
- */
- void (*free)(struct batadv_orig_node *orig_node);
-
- /**
- * @add_if: ask the routing algorithm to apply the needed changes to the
- * orig_node due to a new hard-interface being added into the mesh
- * (optional)
- */
- int (*add_if)(struct batadv_orig_node *orig_node,
- unsigned int max_if_num);
-
- /**
- * @del_if: ask the routing algorithm to apply the needed changes to the
- * orig_node due to an hard-interface being removed from the mesh
- * (optional)
- */
- int (*del_if)(struct batadv_orig_node *orig_node,
- unsigned int max_if_num, unsigned int del_if_num);
-
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
/** @print: print the originator table (optional) */
void (*print)(struct batadv_priv *priv, struct seq_file *seq,
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 7b3965861013..43c284158f63 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -489,9 +489,6 @@ static int bnep_session(void *arg)
add_wait_queue(sk_sleep(sk), &wait);
while (1) {
- /* Ensure session->terminate is updated */
- smp_mb__before_atomic();
-
if (atomic_read(&s->terminate))
break;
/* RX */
@@ -512,6 +509,10 @@ static int bnep_session(void *arg)
break;
netif_wake_queue(dev);
+ /*
+ * wait_woken() performs the necessary memory barriers
+ * for us; see the header comment for this primitive.
+ */
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
remove_wait_queue(sk_sleep(sk), &wait);
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 00deacdcb51c..cfd83c5521ae 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -49,18 +49,17 @@ static int bnep_sock_release(struct socket *sock)
return 0;
}
-static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int do_bnep_sock_ioctl(struct socket *sock, unsigned int cmd, void __user *argp)
{
struct bnep_connlist_req cl;
struct bnep_connadd_req ca;
struct bnep_conndel_req cd;
struct bnep_conninfo ci;
struct socket *nsock;
- void __user *argp = (void __user *)arg;
__u32 supp_feat = BIT(BNEP_SETUP_RESPONSE);
int err;
- BT_DBG("cmd %x arg %lx", cmd, arg);
+ BT_DBG("cmd %x arg %p", cmd, argp);
switch (cmd) {
case BNEPCONNADD:
@@ -134,16 +133,22 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
return 0;
}
+static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ return do_bnep_sock_ioctl(sock, cmd, (void __user *)arg);
+}
+
#ifdef CONFIG_COMPAT
static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
+ void __user *argp = compat_ptr(arg);
if (cmd == BNEPGETCONNLIST) {
struct bnep_connlist_req cl;
+ unsigned __user *p = argp;
u32 uci;
int err;
- if (get_user(cl.cnum, (u32 __user *) arg) ||
- get_user(uci, (u32 __user *) (arg + 4)))
+ if (get_user(cl.cnum, p) || get_user(uci, p + 1))
return -EFAULT;
cl.ci = compat_ptr(uci);
@@ -153,13 +158,13 @@ static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
err = bnep_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (u32 __user *) arg))
+ if (!err && put_user(cl.cnum, p))
err = -EFAULT;
return err;
}
- return bnep_sock_ioctl(sock, cmd, arg);
+ return do_bnep_sock_ioctl(sock, cmd, argp);
}
#endif
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 7f26a5a19ff6..07cfa3249f83 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -288,9 +288,6 @@ static int cmtp_session(void *arg)
add_wait_queue(sk_sleep(sk), &wait);
while (1) {
- /* Ensure session->terminate is updated */
- smp_mb__before_atomic();
-
if (atomic_read(&session->terminate))
break;
if (sk->sk_state != BT_CONNECTED)
@@ -306,6 +303,10 @@ static int cmtp_session(void *arg)
cmtp_process_transmit(session);
+ /*
+ * wait_woken() performs the necessary memory barriers
+ * for us; see the header comment for this primitive.
+ */
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
remove_wait_queue(sk_sleep(sk), &wait);
@@ -431,9 +432,10 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
/* Stop session thread */
atomic_inc(&session->terminate);
- /* Ensure session->terminate is updated */
- smp_mb__after_atomic();
-
+ /*
+ * See the comment preceding the call to wait_woken()
+ * in cmtp_session().
+ */
wake_up_interruptible(sk_sleep(session->sock->sk));
} else
err = -ENOENT;
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index e08f28fadd65..defdd4871919 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -63,17 +63,16 @@ static int cmtp_sock_release(struct socket *sock)
return 0;
}
-static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int do_cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, void __user *argp)
{
struct cmtp_connadd_req ca;
struct cmtp_conndel_req cd;
struct cmtp_connlist_req cl;
struct cmtp_conninfo ci;
struct socket *nsock;
- void __user *argp = (void __user *)arg;
int err;
- BT_DBG("cmd %x arg %lx", cmd, arg);
+ BT_DBG("cmd %x arg %p", cmd, argp);
switch (cmd) {
case CMTPCONNADD:
@@ -137,16 +136,22 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
return -EINVAL;
}
+static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ return do_cmtp_sock_ioctl(sock, cmd, (void __user *)arg);
+}
+
#ifdef CONFIG_COMPAT
static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
+ void __user *argp = compat_ptr(arg);
if (cmd == CMTPGETCONNLIST) {
struct cmtp_connlist_req cl;
+ u32 __user *p = argp;
u32 uci;
int err;
- if (get_user(cl.cnum, (u32 __user *) arg) ||
- get_user(uci, (u32 __user *) (arg + 4)))
+ if (get_user(cl.cnum, p) || get_user(uci, p + 1))
return -EFAULT;
cl.ci = compat_ptr(uci);
@@ -156,13 +161,13 @@ static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
err = cmtp_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (u32 __user *) arg))
+ if (!err && put_user(cl.cnum, p))
err = -EFAULT;
return err;
}
- return cmtp_sock_ioctl(sock, cmd, arg);
+ return do_cmtp_sock_ioctl(sock, cmd, argp);
}
#endif
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 74b29c7d841c..7352fe85674b 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2839,6 +2839,20 @@ struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
return NULL;
}
+struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
+ struct list_head *bdaddr_list, bdaddr_t *bdaddr,
+ u8 type)
+{
+ struct bdaddr_list_with_irk *b;
+
+ list_for_each_entry(b, bdaddr_list, list) {
+ if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
+ return b;
+ }
+
+ return NULL;
+}
+
void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
{
struct bdaddr_list *b, *n;
@@ -2871,6 +2885,35 @@ int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
return 0;
}
+int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type, u8 *peer_irk, u8 *local_irk)
+{
+ struct bdaddr_list_with_irk *entry;
+
+ if (!bacmp(bdaddr, BDADDR_ANY))
+ return -EBADF;
+
+ if (hci_bdaddr_list_lookup(list, bdaddr, type))
+ return -EEXIST;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ bacpy(&entry->bdaddr, bdaddr);
+ entry->bdaddr_type = type;
+
+ if (peer_irk)
+ memcpy(entry->peer_irk, peer_irk, 16);
+
+ if (local_irk)
+ memcpy(entry->local_irk, local_irk, 16);
+
+ list_add(&entry->list, list);
+
+ return 0;
+}
+
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
{
struct bdaddr_list *entry;
@@ -2890,6 +2933,26 @@ int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
return 0;
}
+int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type)
+{
+ struct bdaddr_list_with_irk *entry;
+
+ if (!bacmp(bdaddr, BDADDR_ANY)) {
+ hci_bdaddr_list_clear(list);
+ return 0;
+ }
+
+ entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
+ if (!entry)
+ return -ENOENT;
+
+ list_del(&entry->list);
+ kfree(entry);
+
+ return 0;
+}
+
/* This function requires the caller holds hdev->lock */
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type)
@@ -3084,6 +3147,8 @@ struct hci_dev *hci_alloc_dev(void)
hdev->le_max_tx_time = 0x0148;
hdev->le_max_rx_len = 0x001b;
hdev->le_max_rx_time = 0x0148;
+ hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
+ hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index f12555f23a49..ef9928d7b4fb 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1454,6 +1454,45 @@ static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
}
+static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_le_add_to_resolv_list *sent;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
+ if (!sent)
+ return;
+
+ hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
+ sent->bdaddr_type, sent->peer_irk,
+ sent->local_irk);
+}
+
+static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_cp_le_del_from_resolv_list *sent;
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
+ if (!sent)
+ return;
+
+ hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
+ sent->bdaddr_type);
+}
+
static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
struct sk_buff *skb)
{
@@ -3279,6 +3318,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_le_write_def_data_len(hdev, skb);
break;
+ case HCI_OP_LE_ADD_TO_RESOLV_LIST:
+ hci_cc_le_add_to_resolv_list(hdev, skb);
+ break;
+
+ case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
+ hci_cc_le_del_from_resolv_list(hdev, skb);
+ break;
+
case HCI_OP_LE_CLEAR_RESOLV_LIST:
hci_cc_le_clear_resolv_list(hdev, skb);
break;
@@ -4890,31 +4937,27 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
hci_debugfs_create_conn(conn);
hci_conn_add_sysfs(conn);
- if (!status) {
- /* The remote features procedure is defined for master
- * role only. So only in case of an initiated connection
- * request the remote features.
- *
- * If the local controller supports slave-initiated features
- * exchange, then requesting the remote features in slave
- * role is possible. Otherwise just transition into the
- * connected state without requesting the remote features.
- */
- if (conn->out ||
- (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
- struct hci_cp_le_read_remote_features cp;
+ /* The remote features procedure is defined for master
+ * role only. So only in case of an initiated connection
+ * request the remote features.
+ *
+ * If the local controller supports slave-initiated features
+ * exchange, then requesting the remote features in slave
+ * role is possible. Otherwise just transition into the
+ * connected state without requesting the remote features.
+ */
+ if (conn->out ||
+ (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
+ struct hci_cp_le_read_remote_features cp;
- cp.handle = __cpu_to_le16(conn->handle);
+ cp.handle = __cpu_to_le16(conn->handle);
- hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
- sizeof(cp), &cp);
+ hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
+ sizeof(cp), &cp);
- hci_conn_hold(conn);
- } else {
- conn->state = BT_CONNECTED;
- hci_connect_cfm(conn, status);
- }
+ hci_conn_hold(conn);
} else {
+ conn->state = BT_CONNECTED;
hci_connect_cfm(conn, status);
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 253975cce943..a442e21f3894 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -649,7 +649,7 @@ static void hidp_process_transmit(struct hidp_session *session,
}
static int hidp_setup_input(struct hidp_session *session,
- struct hidp_connadd_req *req)
+ const struct hidp_connadd_req *req)
{
struct input_dev *input;
int i;
@@ -748,7 +748,7 @@ EXPORT_SYMBOL_GPL(hidp_hid_driver);
/* This function sets up the hid device. It does not add it
to the HID system. That is done in hidp_add_connection(). */
static int hidp_setup_hid(struct hidp_session *session,
- struct hidp_connadd_req *req)
+ const struct hidp_connadd_req *req)
{
struct hid_device *hid;
int err;
@@ -807,7 +807,7 @@ fault:
/* initialize session devices */
static int hidp_session_dev_init(struct hidp_session *session,
- struct hidp_connadd_req *req)
+ const struct hidp_connadd_req *req)
{
int ret;
@@ -906,7 +906,7 @@ static void hidp_session_dev_work(struct work_struct *work)
static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
struct socket *ctrl_sock,
struct socket *intr_sock,
- struct hidp_connadd_req *req,
+ const struct hidp_connadd_req *req,
struct l2cap_conn *conn)
{
struct hidp_session *session;
@@ -1074,6 +1074,10 @@ static int hidp_session_start_sync(struct hidp_session *session)
static void hidp_session_terminate(struct hidp_session *session)
{
atomic_inc(&session->terminate);
+ /*
+ * See the comment preceding the call to wait_woken()
+ * in hidp_session_run().
+ */
wake_up_interruptible(&hidp_session_wq);
}
@@ -1193,8 +1197,6 @@ static void hidp_session_run(struct hidp_session *session)
* thread is woken up by ->sk_state_changed().
*/
- /* Ensure session->terminate is updated */
- smp_mb__before_atomic();
if (atomic_read(&session->terminate))
break;
@@ -1228,14 +1230,15 @@ static void hidp_session_run(struct hidp_session *session)
hidp_process_transmit(session, &session->ctrl_transmit,
session->ctrl_sock);
+ /*
+ * wait_woken() performs the necessary memory barriers
+ * for us; see the header comment for this primitive.
+ */
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
remove_wait_queue(&hidp_session_wq, &wait);
atomic_inc(&session->terminate);
-
- /* Ensure session->terminate is updated */
- smp_mb__after_atomic();
}
static int hidp_session_wake_function(wait_queue_entry_t *wait,
@@ -1335,7 +1338,7 @@ static int hidp_verify_sockets(struct socket *ctrl_sock,
return 0;
}
-int hidp_connection_add(struct hidp_connadd_req *req,
+int hidp_connection_add(const struct hidp_connadd_req *req,
struct socket *ctrl_sock,
struct socket *intr_sock)
{
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 8798492a6e99..6ef88d0a1919 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -122,7 +122,7 @@ struct hidp_connlist_req {
struct hidp_conninfo __user *ci;
};
-int hidp_connection_add(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
+int hidp_connection_add(const struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
int hidp_connection_del(struct hidp_conndel_req *req);
int hidp_get_connlist(struct hidp_connlist_req *req);
int hidp_get_conninfo(struct hidp_conninfo *ci);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 1eaac01f85de..9f85a1943be9 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -46,9 +46,8 @@ static int hidp_sock_release(struct socket *sock)
return 0;
}
-static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int do_hidp_sock_ioctl(struct socket *sock, unsigned int cmd, void __user *argp)
{
- void __user *argp = (void __user *) arg;
struct hidp_connadd_req ca;
struct hidp_conndel_req cd;
struct hidp_connlist_req cl;
@@ -57,7 +56,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
struct socket *isock;
int err;
- BT_DBG("cmd %x arg %lx", cmd, arg);
+ BT_DBG("cmd %x arg %p", cmd, argp);
switch (cmd) {
case HIDPCONNADD:
@@ -122,6 +121,11 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
return -EINVAL;
}
+static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ return do_hidp_sock_ioctl(sock, cmd, (void __user *)arg);
+}
+
#ifdef CONFIG_COMPAT
struct compat_hidp_connadd_req {
int ctrl_sock; /* Connected control socket */
@@ -141,13 +145,15 @@ struct compat_hidp_connadd_req {
static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
+ void __user *argp = compat_ptr(arg);
+ int err;
+
if (cmd == HIDPGETCONNLIST) {
struct hidp_connlist_req cl;
+ u32 __user *p = argp;
u32 uci;
- int err;
- if (get_user(cl.cnum, (u32 __user *) arg) ||
- get_user(uci, (u32 __user *) (arg + 4)))
+ if (get_user(cl.cnum, p) || get_user(uci, p + 1))
return -EFAULT;
cl.ci = compat_ptr(uci);
@@ -157,39 +163,54 @@ static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
err = hidp_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (u32 __user *) arg))
+ if (!err && put_user(cl.cnum, p))
err = -EFAULT;
return err;
} else if (cmd == HIDPCONNADD) {
- struct compat_hidp_connadd_req ca;
- struct hidp_connadd_req __user *uca;
+ struct compat_hidp_connadd_req ca32;
+ struct hidp_connadd_req ca;
+ struct socket *csock;
+ struct socket *isock;
- uca = compat_alloc_user_space(sizeof(*uca));
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
- if (copy_from_user(&ca, (void __user *) arg, sizeof(ca)))
+ if (copy_from_user(&ca32, (void __user *) arg, sizeof(ca32)))
return -EFAULT;
- if (put_user(ca.ctrl_sock, &uca->ctrl_sock) ||
- put_user(ca.intr_sock, &uca->intr_sock) ||
- put_user(ca.parser, &uca->parser) ||
- put_user(ca.rd_size, &uca->rd_size) ||
- put_user(compat_ptr(ca.rd_data), &uca->rd_data) ||
- put_user(ca.country, &uca->country) ||
- put_user(ca.subclass, &uca->subclass) ||
- put_user(ca.vendor, &uca->vendor) ||
- put_user(ca.product, &uca->product) ||
- put_user(ca.version, &uca->version) ||
- put_user(ca.flags, &uca->flags) ||
- put_user(ca.idle_to, &uca->idle_to) ||
- copy_to_user(&uca->name[0], &ca.name[0], 128))
- return -EFAULT;
+ ca.ctrl_sock = ca32.ctrl_sock;
+ ca.intr_sock = ca32.intr_sock;
+ ca.parser = ca32.parser;
+ ca.rd_size = ca32.rd_size;
+ ca.rd_data = compat_ptr(ca32.rd_data);
+ ca.country = ca32.country;
+ ca.subclass = ca32.subclass;
+ ca.vendor = ca32.vendor;
+ ca.product = ca32.product;
+ ca.version = ca32.version;
+ ca.flags = ca32.flags;
+ ca.idle_to = ca32.idle_to;
+ memcpy(ca.name, ca32.name, 128);
+
+ csock = sockfd_lookup(ca.ctrl_sock, &err);
+ if (!csock)
+ return err;
- arg = (unsigned long) uca;
+ isock = sockfd_lookup(ca.intr_sock, &err);
+ if (!isock) {
+ sockfd_put(csock);
+ return err;
+ }
- /* Fall through. We don't actually write back any _changes_
- to the structure anyway, so there's no need to copy back
- into the original compat version */
+ err = hidp_connection_add(&ca, csock, isock);
+ if (!err && copy_to_user(argp, &ca32, sizeof(ca32)))
+ err = -EFAULT;
+
+ sockfd_put(csock);
+ sockfd_put(isock);
+
+ return err;
}
return hidp_sock_ioctl(sock, cmd, arg);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index d17a4736e47c..2146e0f3b6f8 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -51,9 +51,6 @@ static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
static LIST_HEAD(chan_list);
static DEFINE_RWLOCK(chan_list_lock);
-static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
-static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
-
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
u8 code, u8 ident, u16 dlen, void *data);
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
@@ -519,8 +516,10 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
chan->sdu_last_frag = NULL;
chan->sdu_len = 0;
chan->tx_credits = 0;
- chan->rx_credits = le_max_credits;
- chan->mps = min_t(u16, chan->imtu, le_default_mps);
+ /* Derive MPS from connection MTU to stop HCI fragmentation */
+ chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
+ /* Give enough credits for a full packet */
+ chan->rx_credits = (chan->imtu / chan->mps) + 1;
skb_queue_head_init(&chan->tx_q);
}
@@ -681,9 +680,9 @@ static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
u16 result;
if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
- result = L2CAP_CR_AUTHORIZATION;
+ result = L2CAP_CR_LE_AUTHORIZATION;
else
- result = L2CAP_CR_BAD_PSM;
+ result = L2CAP_CR_LE_BAD_PSM;
l2cap_state_change(chan, BT_DISCONN);
@@ -1282,6 +1281,8 @@ static void l2cap_le_connect(struct l2cap_chan *chan)
if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
return;
+ l2cap_le_flowctl_init(chan);
+
req.psm = chan->psm;
req.scid = cpu_to_le16(chan->scid);
req.mtu = cpu_to_le16(chan->imtu);
@@ -3669,7 +3670,7 @@ void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
rsp.mtu = cpu_to_le16(chan->imtu);
rsp.mps = cpu_to_le16(chan->mps);
rsp.credits = cpu_to_le16(chan->rx_credits);
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
&rsp);
@@ -3815,9 +3816,17 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
result = L2CAP_CR_NO_MEM;
+ /* Check for valid dynamic CID range (as per Erratum 3253) */
+ if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
+ result = L2CAP_CR_INVALID_SCID;
+ goto response;
+ }
+
/* Check if we already have channel with that dcid */
- if (__l2cap_get_chan_by_dcid(conn, scid))
+ if (__l2cap_get_chan_by_dcid(conn, scid)) {
+ result = L2CAP_CR_SCID_IN_USE;
goto response;
+ }
chan = pchan->ops->new_connection(pchan);
if (!chan)
@@ -5279,7 +5288,7 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
credits = __le16_to_cpu(rsp->credits);
result = __le16_to_cpu(rsp->result);
- if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
+ if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
dcid < L2CAP_CID_DYN_START ||
dcid > L2CAP_CID_LE_DYN_END))
return -EPROTO;
@@ -5300,7 +5309,7 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
l2cap_chan_lock(chan);
switch (result) {
- case L2CAP_CR_SUCCESS:
+ case L2CAP_CR_LE_SUCCESS:
if (__l2cap_get_chan_by_dcid(conn, dcid)) {
err = -EBADSLT;
break;
@@ -5314,8 +5323,8 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
l2cap_chan_ready(chan);
break;
- case L2CAP_CR_AUTHENTICATION:
- case L2CAP_CR_ENCRYPTION:
+ case L2CAP_CR_LE_AUTHENTICATION:
+ case L2CAP_CR_LE_ENCRYPTION:
/* If we already have MITM protection we can't do
* anything.
*/
@@ -5458,7 +5467,7 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
&conn->hcon->dst, LE_LINK);
if (!pchan) {
- result = L2CAP_CR_BAD_PSM;
+ result = L2CAP_CR_LE_BAD_PSM;
chan = NULL;
goto response;
}
@@ -5468,33 +5477,31 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
SMP_ALLOW_STK)) {
- result = L2CAP_CR_AUTHENTICATION;
+ result = L2CAP_CR_LE_AUTHENTICATION;
chan = NULL;
goto response_unlock;
}
/* Check for valid dynamic CID range */
if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
- result = L2CAP_CR_INVALID_SCID;
+ result = L2CAP_CR_LE_INVALID_SCID;
chan = NULL;
goto response_unlock;
}
/* Check if we already have channel with that dcid */
if (__l2cap_get_chan_by_dcid(conn, scid)) {
- result = L2CAP_CR_SCID_IN_USE;
+ result = L2CAP_CR_LE_SCID_IN_USE;
chan = NULL;
goto response_unlock;
}
chan = pchan->ops->new_connection(pchan);
if (!chan) {
- result = L2CAP_CR_NO_MEM;
+ result = L2CAP_CR_LE_NO_MEM;
goto response_unlock;
}
- l2cap_le_flowctl_init(chan);
-
bacpy(&chan->src, &conn->hcon->src);
bacpy(&chan->dst, &conn->hcon->dst);
chan->src_type = bdaddr_src_type(conn->hcon);
@@ -5506,6 +5513,9 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
chan->tx_credits = __le16_to_cpu(req->credits);
__l2cap_chan_add(conn, chan);
+
+ l2cap_le_flowctl_init(chan);
+
dcid = chan->scid;
credits = chan->rx_credits;
@@ -5524,7 +5534,7 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
chan->ops->defer(chan);
} else {
l2cap_chan_ready(chan);
- result = L2CAP_CR_SUCCESS;
+ result = L2CAP_CR_LE_SUCCESS;
}
response_unlock:
@@ -6699,13 +6709,10 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
struct l2cap_le_credits pkt;
u16 return_credits;
- /* We return more credits to the sender only after the amount of
- * credits falls below half of the initial amount.
- */
- if (chan->rx_credits >= (le_max_credits + 1) / 2)
- return;
+ return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
- return_credits = le_max_credits - chan->rx_credits;
+ if (!return_credits)
+ return;
BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
@@ -6719,6 +6726,21 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
}
+static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ int err;
+
+ BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
+
+ /* Wait recv to confirm reception before updating the credits */
+ err = chan->ops->recv(chan, skb);
+
+ /* Update credits whenever an SDU is received */
+ l2cap_chan_le_send_credits(chan);
+
+ return err;
+}
+
static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
{
int err;
@@ -6737,7 +6759,11 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
chan->rx_credits--;
BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
- l2cap_chan_le_send_credits(chan);
+ /* Update if remote had run out of credits, this should only happens
+ * if the remote is not using the entire MPS.
+ */
+ if (!chan->rx_credits)
+ l2cap_chan_le_send_credits(chan);
err = 0;
@@ -6763,12 +6789,22 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
}
if (skb->len == sdu_len)
- return chan->ops->recv(chan, skb);
+ return l2cap_le_recv(chan, skb);
chan->sdu = skb;
chan->sdu_len = sdu_len;
chan->sdu_last_frag = skb;
+ /* Detect if remote is not able to use the selected MPS */
+ if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
+ u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
+
+ /* Adjust the number of credits */
+ BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
+ chan->mps = mps_len;
+ l2cap_chan_le_send_credits(chan);
+ }
+
return 0;
}
@@ -6785,7 +6821,7 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
skb = NULL;
if (chan->sdu->len == chan->sdu_len) {
- err = chan->ops->recv(chan, chan->sdu);
+ err = l2cap_le_recv(chan, chan->sdu);
if (!err) {
chan->sdu = NULL;
chan->sdu_last_frag = NULL;
@@ -7102,7 +7138,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
case L2CAP_MODE_BASIC:
break;
case L2CAP_MODE_LE_FLOWCTL:
- l2cap_le_flowctl_init(chan);
break;
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
@@ -7645,11 +7680,6 @@ int __init l2cap_init(void)
l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
NULL, &l2cap_debugfs_fops);
- debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
- &le_max_credits);
- debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
- &le_default_mps);
-
return 0;
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3bdc8f3ca259..ccce954f8146 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2434,9 +2434,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
/* LE address type */
addr_type = le_addr_type(cp->addr.type);
- hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
-
- err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
+ /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
+ err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
if (err < 0) {
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
MGMT_STATUS_NOT_PAIRED, &rp,
@@ -2450,8 +2449,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto done;
}
- /* Abort any ongoing SMP pairing */
- smp_cancel_pairing(conn);
/* Defer clearing up the connection parameters until closing to
* give a chance of keeping them if a repairing happens.
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 5e44d842cc5d..0c7d31c6c18c 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -839,18 +839,6 @@ static int rfcomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned l
BT_DBG("TIOCMIWAIT");
break;
- case TIOCGSERIAL:
- BT_ERR("TIOCGSERIAL is not supported");
- return -ENOIOCTLCMD;
-
- case TIOCSSERIAL:
- BT_ERR("TIOCSSERIAL is not supported");
- return -ENOIOCTLCMD;
-
- case TIOCSERGSTRUCT:
- BT_ERR("TIOCSERGSTRUCT is not supported");
- return -ENOIOCTLCMD;
-
case TIOCSERGETLSR:
BT_ERR("TIOCSERGETLSR is not supported");
return -ENOIOCTLCMD;
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index ae91e2d40056..a1c1b7e8a45c 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -83,13 +83,11 @@ enum {
struct smp_dev {
/* Secure Connections OOB data */
+ bool local_oob;
u8 local_pk[64];
u8 local_rand[16];
bool debug_key;
- u8 min_key_size;
- u8 max_key_size;
-
struct crypto_cipher *tfm_aes;
struct crypto_shash *tfm_cmac;
struct crypto_kpp *tfm_ecdh;
@@ -599,6 +597,8 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16])
memcpy(rand, smp->local_rand, 16);
+ smp->local_oob = true;
+
return 0;
}
@@ -717,7 +717,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
if (rsp == NULL) {
req->io_capability = conn->hcon->io_capability;
req->oob_flag = oob_flag;
- req->max_key_size = SMP_DEV(hdev)->max_key_size;
+ req->max_key_size = hdev->le_max_key_size;
req->init_key_dist = local_dist;
req->resp_key_dist = remote_dist;
req->auth_req = (authreq & AUTH_REQ_MASK(hdev));
@@ -728,7 +728,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
rsp->io_capability = conn->hcon->io_capability;
rsp->oob_flag = oob_flag;
- rsp->max_key_size = SMP_DEV(hdev)->max_key_size;
+ rsp->max_key_size = hdev->le_max_key_size;
rsp->init_key_dist = req->init_key_dist & remote_dist;
rsp->resp_key_dist = req->resp_key_dist & local_dist;
rsp->auth_req = (authreq & AUTH_REQ_MASK(hdev));
@@ -742,7 +742,7 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
struct hci_dev *hdev = conn->hcon->hdev;
struct smp_chan *smp = chan->data;
- if (max_key_size > SMP_DEV(hdev)->max_key_size ||
+ if (max_key_size > hdev->le_max_key_size ||
max_key_size < SMP_MIN_ENC_KEY_SIZE)
return SMP_ENC_KEY_SIZE;
@@ -1785,7 +1785,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
* successfully received our local OOB data - therefore set the
* flag to indicate that local OOB is in use.
*/
- if (req->oob_flag == SMP_OOB_PRESENT)
+ if (req->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob)
set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
/* SMP over BR/EDR requires special treatment */
@@ -1967,7 +1967,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
* successfully received our local OOB data - therefore set the
* flag to indicate that local OOB is in use.
*/
- if (rsp->oob_flag == SMP_OOB_PRESENT)
+ if (rsp->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob)
set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -2419,30 +2419,51 @@ unlock:
return ret;
}
-void smp_cancel_pairing(struct hci_conn *hcon)
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type)
{
- struct l2cap_conn *conn = hcon->l2cap_data;
+ struct hci_conn *hcon;
+ struct l2cap_conn *conn;
struct l2cap_chan *chan;
struct smp_chan *smp;
+ int err;
+
+ err = hci_remove_ltk(hdev, bdaddr, addr_type);
+ hci_remove_irk(hdev, bdaddr, addr_type);
+ hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
+ if (!hcon)
+ goto done;
+
+ conn = hcon->l2cap_data;
if (!conn)
- return;
+ goto done;
chan = conn->smp;
if (!chan)
- return;
+ goto done;
l2cap_chan_lock(chan);
smp = chan->data;
if (smp) {
+ /* Set keys to NULL to make sure smp_failure() does not try to
+ * remove and free already invalidated rcu list entries. */
+ smp->ltk = NULL;
+ smp->slave_ltk = NULL;
+ smp->remote_irk = NULL;
+
if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
smp_failure(conn, 0);
else
smp_failure(conn, SMP_UNSPECIFIED);
+ err = 0;
}
l2cap_chan_unlock(chan);
+
+done:
+ return err;
}
static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -2697,7 +2718,13 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
* key was set/generated.
*/
if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) {
- struct smp_dev *smp_dev = chan->data;
+ struct l2cap_chan *hchan = hdev->smp_data;
+ struct smp_dev *smp_dev;
+
+ if (!hchan || !hchan->data)
+ return SMP_UNSPECIFIED;
+
+ smp_dev = hchan->data;
tfm_ecdh = smp_dev->tfm_ecdh;
} else {
@@ -3230,11 +3257,10 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
return ERR_CAST(tfm_ecdh);
}
+ smp->local_oob = false;
smp->tfm_aes = tfm_aes;
smp->tfm_cmac = tfm_cmac;
smp->tfm_ecdh = tfm_ecdh;
- smp->min_key_size = SMP_MIN_ENC_KEY_SIZE;
- smp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
create_chan:
chan = l2cap_chan_create();
@@ -3360,7 +3386,7 @@ static ssize_t le_min_key_size_read(struct file *file,
struct hci_dev *hdev = file->private_data;
char buf[4];
- snprintf(buf, sizeof(buf), "%2u\n", SMP_DEV(hdev)->min_key_size);
+ snprintf(buf, sizeof(buf), "%2u\n", hdev->le_min_key_size);
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
@@ -3381,11 +3407,11 @@ static ssize_t le_min_key_size_write(struct file *file,
sscanf(buf, "%hhu", &key_size);
- if (key_size > SMP_DEV(hdev)->max_key_size ||
+ if (key_size > hdev->le_max_key_size ||
key_size < SMP_MIN_ENC_KEY_SIZE)
return -EINVAL;
- SMP_DEV(hdev)->min_key_size = key_size;
+ hdev->le_min_key_size = key_size;
return count;
}
@@ -3404,7 +3430,7 @@ static ssize_t le_max_key_size_read(struct file *file,
struct hci_dev *hdev = file->private_data;
char buf[4];
- snprintf(buf, sizeof(buf), "%2u\n", SMP_DEV(hdev)->max_key_size);
+ snprintf(buf, sizeof(buf), "%2u\n", hdev->le_max_key_size);
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
@@ -3426,10 +3452,10 @@ static ssize_t le_max_key_size_write(struct file *file,
sscanf(buf, "%hhu", &key_size);
if (key_size > SMP_MAX_ENC_KEY_SIZE ||
- key_size < SMP_DEV(hdev)->min_key_size)
+ key_size < hdev->le_min_key_size)
return -EINVAL;
- SMP_DEV(hdev)->max_key_size = key_size;
+ hdev->le_max_key_size = key_size;
return count;
}
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 0ff6247eaa6c..121edadd5f8d 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -181,7 +181,8 @@ enum smp_key_pref {
};
/* SMP Commands */
-void smp_cancel_pairing(struct hci_conn *hcon);
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type);
bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
enum smp_key_pref key_pref);
int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index f4078830ea50..c89c22c49015 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -10,9 +10,11 @@
#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/sched/signal.h>
+#include <net/sock.h>
+#include <net/tcp.h>
static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
- struct bpf_cgroup_storage *storage)
+ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
{
u32 ret;
@@ -28,13 +30,20 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
{
- struct bpf_cgroup_storage *storage = NULL;
+ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
+ enum bpf_cgroup_storage_type stype;
u64 time_start, time_spent = 0;
u32 ret = 0, i;
- storage = bpf_cgroup_storage_alloc(prog);
- if (IS_ERR(storage))
- return PTR_ERR(storage);
+ for_each_cgroup_storage_type(stype) {
+ storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
+ if (IS_ERR(storage[stype])) {
+ storage[stype] = NULL;
+ for_each_cgroup_storage_type(stype)
+ bpf_cgroup_storage_free(storage[stype]);
+ return -ENOMEM;
+ }
+ }
if (!repeat)
repeat = 1;
@@ -53,7 +62,8 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
do_div(time_spent, repeat);
*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
- bpf_cgroup_storage_free(storage);
+ for_each_cgroup_storage_type(stype)
+ bpf_cgroup_storage_free(storage[stype]);
return ret;
}
@@ -107,6 +117,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
u32 retval, duration;
int hh_len = ETH_HLEN;
struct sk_buff *skb;
+ struct sock *sk;
void *data;
int ret;
@@ -129,11 +140,21 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
break;
}
+ sk = kzalloc(sizeof(struct sock), GFP_USER);
+ if (!sk) {
+ kfree(data);
+ return -ENOMEM;
+ }
+ sock_net_set(sk, current->nsproxy->net_ns);
+ sock_init_data(NULL, sk);
+
skb = build_skb(data, 0);
if (!skb) {
kfree(data);
+ kfree(sk);
return -ENOMEM;
}
+ skb->sk = sk;
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
__skb_put(skb, size);
@@ -151,6 +172,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
kfree_skb(skb);
+ kfree(sk);
return -ENOMEM;
}
}
@@ -163,6 +185,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
size = skb_headlen(skb);
ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
kfree_skb(skb);
+ kfree(sk);
return ret;
}
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
index f0fc182d3db7..7acfc83087d5 100644
--- a/net/bpfilter/bpfilter_kern.c
+++ b/net/bpfilter/bpfilter_kern.c
@@ -23,9 +23,11 @@ static void shutdown_umh(struct umh_info *info)
if (!info->pid)
return;
- tsk = pid_task(find_vpid(info->pid), PIDTYPE_PID);
- if (tsk)
+ tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID);
+ if (tsk) {
force_sig(SIGKILL, tsk);
+ put_task_struct(tsk);
+ }
fput(info->pipe_to_umh);
fput(info->pipe_from_umh);
info->pid = 0;
@@ -59,7 +61,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
req.is_set = is_set;
req.pid = current->pid;
req.cmd = optname;
- req.addr = (long)optval;
+ req.addr = (long __force __user)optval;
req.len = optlen;
mutex_lock(&bpfilter_lock);
if (!info.pid)
@@ -90,6 +92,7 @@ static int __init load_umh(void)
int err;
/* fork usermode process */
+ info.cmdline = "bpfilter_umh";
err = fork_usermode_blob(&bpfilter_umh_start,
&bpfilter_umh_end - &bpfilter_umh_start,
&info);
@@ -98,7 +101,7 @@ static int __init load_umh(void)
pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
/* health check that usermode process started correctly */
- if (__bpfilter_process_sockopt(NULL, 0, 0, 0, 0) != 0) {
+ if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) {
stop_umh();
return -EFAULT;
}
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index aa0d3b2f1bb7..3625d6ade45c 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -17,7 +17,7 @@ config BRIDGE
other third party bridge products.
In order to use the Ethernet bridge, you'll need the bridge
- configuration tools; see <file:Documentation/networking/bridge.txt>
+ configuration tools; see <file:Documentation/networking/bridge.rst>
for location. Please read the Bridge mini-HOWTO for more
information.
diff --git a/net/bridge/br.c b/net/bridge/br.c
index b0a0b82e2d91..360ad66c21e9 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -151,7 +151,7 @@ static int br_switchdev_event(struct notifier_block *unused,
break;
}
br_fdb_offloaded_set(br, p, fdb_info->addr,
- fdb_info->vid);
+ fdb_info->vid, true);
break;
case SWITCHDEV_FDB_DEL_TO_BRIDGE:
fdb_info = ptr;
@@ -163,7 +163,7 @@ static int br_switchdev_event(struct notifier_block *unused,
case SWITCHDEV_FDB_OFFLOADED:
fdb_info = ptr;
br_fdb_offloaded_set(br, p, fdb_info->addr,
- fdb_info->vid);
+ fdb_info->vid, fdb_info->offloaded);
break;
}
@@ -175,6 +175,22 @@ static struct notifier_block br_switchdev_notifier = {
.notifier_call = br_switchdev_event,
};
+void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on)
+{
+ bool cur = !!br_opt_get(br, opt);
+
+ br_debug(br, "toggle option: %d state: %d -> %d\n",
+ opt, cur, on);
+
+ if (cur == on)
+ return;
+
+ if (on)
+ set_bit(opt, &br->options);
+ else
+ clear_bit(opt, &br->options);
+}
+
static void __net_exit br_net_exit(struct net *net)
{
struct net_device *dev;
diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
index 2cf7716254be..6b78e6351719 100644
--- a/net/bridge/br_arp_nd_proxy.c
+++ b/net/bridge/br_arp_nd_proxy.c
@@ -39,7 +39,7 @@ void br_recalculate_neigh_suppress_enabled(struct net_bridge *br)
}
}
- br->neigh_suppress_enabled = neigh_suppress;
+ br_opt_toggle(br, BROPT_NEIGH_SUPPRESS_ENABLED, neigh_suppress);
}
#if IS_ENABLED(CONFIG_INET)
@@ -155,7 +155,7 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
ipv4_is_multicast(tip))
return;
- if (br->neigh_suppress_enabled) {
+ if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
if (p && (p->flags & BR_NEIGH_SUPPRESS))
return;
if (ipv4_is_zeronet(sip) || sip == tip) {
@@ -175,7 +175,8 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
return;
}
- if (br->neigh_suppress_enabled && br_is_local_ip(vlandev, tip)) {
+ if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ br_is_local_ip(vlandev, tip)) {
/* its our local ip, so don't proxy reply
* and don't forward to neigh suppress ports
*/
@@ -213,7 +214,8 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
/* If we have replied or as long as we know the
* mac, indicate to arp replied
*/
- if (replied || br->neigh_suppress_enabled)
+ if (replied ||
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED))
BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
}
@@ -311,7 +313,7 @@ static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p,
/* Neighbor Advertisement */
memset(na, 0, sizeof(*na) + na_olen);
na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
- na->icmph.icmp6_router = 0; /* XXX: should be 1 ? */
+ na->icmph.icmp6_router = (n->flags & NTF_ROUTER) ? 1 : 0;
na->icmph.icmp6_override = 1;
na->icmph.icmp6_solicited = 1;
na->target = ns->target;
@@ -460,7 +462,8 @@ void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
* mac, indicate to NEIGH_SUPPRESS ports that we
* have replied
*/
- if (replied || br->neigh_suppress_enabled)
+ if (replied ||
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED))
BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
}
neigh_release(n);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index e682a668ce57..c6abf927f0c9 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -67,11 +67,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
if (IS_ENABLED(CONFIG_INET) &&
(eth->h_proto == htons(ETH_P_ARP) ||
eth->h_proto == htons(ETH_P_RARP)) &&
- br->neigh_suppress_enabled) {
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
br_do_proxy_suppress_arp(skb, br, vid, NULL);
} else if (IS_ENABLED(CONFIG_IPV6) &&
skb->protocol == htons(ETH_P_IPV6) &&
- br->neigh_suppress_enabled &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
pskb_may_pull(skb, sizeof(struct ipv6hdr) +
sizeof(struct nd_msg)) &&
ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
@@ -228,7 +228,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
/* this flag will be cleared if the MTU was automatically adjusted */
- br->mtu_set_by_user = true;
+ br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* remember the MTU in the rtable for PMTU */
dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
@@ -344,7 +344,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
p->np = NULL;
- __netpoll_free_async(np);
+ __netpoll_free(np);
}
#endif
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 502f66349530..e56ba3912a90 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -504,6 +504,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
fdb->added_by_user = 0;
fdb->added_by_external_learn = 0;
fdb->offloaded = 0;
+ fdb->is_sticky = 0;
fdb->updated = fdb->used = jiffies;
if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
&fdb->rhnode,
@@ -584,7 +585,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
unsigned long now = jiffies;
/* fastpath: update of existing entry */
- if (unlikely(source != fdb->dst)) {
+ if (unlikely(source != fdb->dst && !fdb->is_sticky)) {
fdb->dst = source;
fdb_modified = true;
/* Take over HW learned entry */
@@ -656,6 +657,8 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
ndm->ndm_flags |= NTF_OFFLOADED;
if (fdb->added_by_external_learn)
ndm->ndm_flags |= NTF_EXT_LEARNED;
+ if (fdb->is_sticky)
+ ndm->ndm_flags |= NTF_STICKY;
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
goto nla_put_failure;
@@ -772,8 +775,10 @@ skip:
/* Update (create or replace) forwarding database entry */
static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
- const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
+ const u8 *addr, u16 state, u16 flags, u16 vid,
+ u8 ndm_flags)
{
+ u8 is_sticky = !!(ndm_flags & NTF_STICKY);
struct net_bridge_fdb_entry *fdb;
bool modified = false;
@@ -789,6 +794,9 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
return -EINVAL;
}
+ if (is_sticky && (state & NUD_PERMANENT))
+ return -EINVAL;
+
fdb = br_fdb_find(br, addr, vid);
if (fdb == NULL) {
if (!(flags & NLM_F_CREATE))
@@ -832,6 +840,12 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
modified = true;
}
+
+ if (is_sticky != fdb->is_sticky) {
+ fdb->is_sticky = is_sticky;
+ modified = true;
+ }
+
fdb->added_by_user = 1;
fdb->used = jiffies;
@@ -865,7 +879,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
} else {
spin_lock_bh(&br->hash_lock);
err = fdb_add_entry(br, p, addr, ndm->ndm_state,
- nlh_flags, vid);
+ nlh_flags, vid, ndm->ndm_flags);
spin_unlock_bh(&br->hash_lock);
}
@@ -1138,7 +1152,7 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
}
void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid, bool offloaded)
{
struct net_bridge_fdb_entry *fdb;
@@ -1146,7 +1160,7 @@ void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
fdb = br_fdb_find(br, addr, vid);
if (fdb)
- fdb->offloaded = 1;
+ fdb->offloaded = offloaded;
spin_unlock_bh(&br->hash_lock);
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 0363f1bdc401..9b46d2dc4c22 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -394,8 +394,7 @@ static int find_portno(struct net_bridge *br)
struct net_bridge_port *p;
unsigned long *inuse;
- inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
- GFP_KERNEL);
+ inuse = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
if (!inuse)
return -ENOMEM;
@@ -404,7 +403,7 @@ static int find_portno(struct net_bridge *br)
set_bit(p->port_no, inuse);
}
index = find_first_zero_bit(inuse, BR_MAX_PORTS);
- kfree(inuse);
+ bitmap_free(inuse);
return (index >= BR_MAX_PORTS) ? -EXFULL : index;
}
@@ -509,14 +508,14 @@ void br_mtu_auto_adjust(struct net_bridge *br)
ASSERT_RTNL();
/* if the bridge MTU was manually configured don't mess with it */
- if (br->mtu_set_by_user)
+ if (br_opt_get(br, BROPT_MTU_SET_BY_USER))
return;
/* change to the minimum MTU and clear the flag which was set by
* the bridge ndo_change_mtu callback
*/
dev_set_mtu(br->dev, br_mtu_min(br));
- br->mtu_set_by_user = false;
+ br_opt_toggle(br, BROPT_MTU_SET_BY_USER, false);
}
static void br_set_gso_limits(struct net_bridge *br)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 72074276c088..3ddca11f44c2 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -122,7 +122,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
br_do_proxy_suppress_arp(skb, br, vid, p);
} else if (IS_ENABLED(CONFIG_IPV6) &&
skb->protocol == htons(ETH_P_IPV6) &&
- br->neigh_suppress_enabled &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
pskb_may_pull(skb, sizeof(struct ipv6hdr) +
sizeof(struct nd_msg)) &&
ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 6d9f48bd374a..a7ea2d431714 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -84,7 +84,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
int i, err = 0;
int idx = 0, s_idx = cb->args[1];
- if (br->multicast_disabled)
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
return 0;
mdb = rcu_dereference(br->mdb);
@@ -162,6 +162,29 @@ out:
return err;
}
+static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct br_port_msg *bpm;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
+ return -EINVAL;
+ }
+
+ bpm = nlmsg_data(nlh);
+ if (bpm->ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
+ return -EINVAL;
+ }
+ if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
+ NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net_device *dev;
@@ -169,6 +192,13 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct nlmsghdr *nlh = NULL;
int idx = 0, s_idx;
+ if (cb->strict_check) {
+ int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
+
+ if (err < 0)
+ return err;
+ }
+
s_idx = cb->args[0];
rcu_read_lock();
@@ -598,7 +628,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
struct net_bridge_port *p;
int ret;
- if (!netif_running(br->dev) || br->multicast_disabled)
+ if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
return -EINVAL;
dev = __dev_get_by_index(net, entry->ifindex);
@@ -673,7 +703,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
struct br_ip ip;
int err = -EINVAL;
- if (!netif_running(br->dev) || br->multicast_disabled)
+ if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
return -EINVAL;
__mdb_entry_to_br_ip(entry, &ip);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 20ed7adcf1cc..024139b51d3a 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -158,7 +158,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
struct br_ip ip;
- if (br->multicast_disabled)
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
return NULL;
if (BR_INPUT_SKB_CB(skb)->igmp)
@@ -411,7 +411,7 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
iph->frag_off = htons(IP_DF);
iph->ttl = 1;
iph->protocol = IPPROTO_IGMP;
- iph->saddr = br->multicast_query_use_ifaddr ?
+ iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
((u8 *)&iph[1])[0] = IPOPT_RA;
@@ -503,11 +503,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
&ip6h->saddr)) {
kfree_skb(skb);
- br->has_ipv6_addr = 0;
+ br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
return NULL;
}
- br->has_ipv6_addr = 1;
+ br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
hopopt = (u8 *)(ip6h + 1);
@@ -628,7 +628,7 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
port ? port->dev->name : br->dev->name);
err = -E2BIG;
disable:
- br->multicast_disabled = 1;
+ br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
goto err;
}
}
@@ -894,7 +894,7 @@ static void br_multicast_querier_expired(struct net_bridge *br,
struct bridge_mcast_own_query *query)
{
spin_lock(&br->multicast_lock);
- if (!netif_running(br->dev) || br->multicast_disabled)
+ if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
goto out;
br_multicast_start_querier(br, query);
@@ -965,8 +965,9 @@ static void br_multicast_send_query(struct net_bridge *br,
struct br_ip br_group;
unsigned long time;
- if (!netif_running(br->dev) || br->multicast_disabled ||
- !br->multicast_querier)
+ if (!netif_running(br->dev) ||
+ !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
+ !br_opt_get(br, BROPT_MULTICAST_QUERIER))
return;
memset(&br_group.u, 0, sizeof(br_group.u));
@@ -1036,7 +1037,7 @@ static void br_mc_disabled_update(struct net_device *dev, bool value)
.orig_dev = dev,
.id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
.flags = SWITCHDEV_F_DEFER,
- .u.mc_disabled = value,
+ .u.mc_disabled = !value,
};
switchdev_port_attr_set(dev, &attr);
@@ -1054,7 +1055,8 @@ int br_multicast_add_port(struct net_bridge_port *port)
timer_setup(&port->ip6_own_query.timer,
br_ip6_multicast_port_query_expired, 0);
#endif
- br_mc_disabled_update(port->dev, port->br->multicast_disabled);
+ br_mc_disabled_update(port->dev,
+ br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
if (!port->mcast_stats)
@@ -1091,7 +1093,7 @@ static void __br_multicast_enable_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
- if (br->multicast_disabled || !netif_running(br->dev))
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
return;
br_multicast_enable(&port->ip4_own_query);
@@ -1634,7 +1636,7 @@ br_multicast_leave_group(struct net_bridge *br,
if (timer_pending(&other_query->timer))
goto out;
- if (br->multicast_querier) {
+ if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
__br_multicast_send_query(br, port, &mp->addr);
time = jiffies + br->multicast_last_member_count *
@@ -1746,7 +1748,7 @@ static void br_multicast_err_count(const struct net_bridge *br,
struct bridge_mcast_stats __percpu *stats;
struct bridge_mcast_stats *pstats;
- if (!br->multicast_stats_enabled)
+ if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
return;
if (p)
@@ -1904,7 +1906,7 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
BR_INPUT_SKB_CB(skb)->igmp = 0;
BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
- if (br->multicast_disabled)
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
return 0;
switch (skb->protocol) {
@@ -1956,8 +1958,6 @@ void br_multicast_init(struct net_bridge *br)
br->hash_max = 512;
br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
- br->multicast_querier = 0;
- br->multicast_query_use_ifaddr = 0;
br->multicast_last_member_count = 2;
br->multicast_startup_query_count = 2;
@@ -1976,7 +1976,8 @@ void br_multicast_init(struct net_bridge *br)
br->ip6_other_query.delay_time = 0;
br->ip6_querier.port = NULL;
#endif
- br->has_ipv6_addr = 1;
+ br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
+ br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
spin_lock_init(&br->multicast_lock);
timer_setup(&br->multicast_router_timer,
@@ -1998,7 +1999,7 @@ static void __br_multicast_open(struct net_bridge *br,
{
query->startup_sent = 0;
- if (br->multicast_disabled)
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
return;
mod_timer(&query->timer, jiffies);
@@ -2173,12 +2174,12 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
int err = 0;
spin_lock_bh(&br->multicast_lock);
- if (br->multicast_disabled == !val)
+ if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
goto unlock;
- br_mc_disabled_update(br->dev, !val);
- br->multicast_disabled = !val;
- if (br->multicast_disabled)
+ br_mc_disabled_update(br->dev, val);
+ br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
goto unlock;
if (!netif_running(br->dev))
@@ -2189,7 +2190,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
if (mdb->old) {
err = -EEXIST;
rollback:
- br->multicast_disabled = !!val;
+ br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
goto unlock;
}
@@ -2213,7 +2214,7 @@ bool br_multicast_enabled(const struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- return !br->multicast_disabled;
+ return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
}
EXPORT_SYMBOL_GPL(br_multicast_enabled);
@@ -2236,10 +2237,10 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
val = !!val;
spin_lock_bh(&br->multicast_lock);
- if (br->multicast_querier == val)
+ if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
goto unlock;
- br->multicast_querier = val;
+ br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
if (!val)
goto unlock;
@@ -2560,7 +2561,7 @@ void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
struct bridge_mcast_stats __percpu *stats;
/* if multicast_disabled is true then igmp type can't be set */
- if (!type || !br->multicast_stats_enabled)
+ if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
return;
if (p)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 6e0dc6bcd32a..b1b5e8516724 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -487,14 +487,15 @@ static unsigned int br_nf_pre_routing(void *priv,
br = p->br;
if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
- if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
+ if (!brnf_call_ip6tables &&
+ !br_opt_get(br, BROPT_NF_CALL_IP6TABLES))
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
return br_nf_pre_routing_ipv6(priv, skb, state);
}
- if (!brnf_call_iptables && !br->nf_call_iptables)
+ if (!brnf_call_iptables && !br_opt_get(br, BROPT_NF_CALL_IPTABLES))
return NF_ACCEPT;
if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
@@ -636,7 +637,7 @@ static unsigned int br_nf_forward_arp(void *priv,
return NF_ACCEPT;
br = p->br;
- if (!brnf_call_arptables && !br->nf_call_arptables)
+ if (!brnf_call_arptables && !br_opt_get(br, BROPT_NF_CALL_ARPTABLES))
return NF_ACCEPT;
if (!IS_ARP(skb)) {
@@ -835,7 +836,8 @@ static unsigned int ip_sabotage_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
+ if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
+ !netif_is_l3_master(skb->dev)) {
state->okfn(state->net, state->sk, skb);
return NF_STOLEN;
}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index ec2b58a09f76..3345f1984542 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1034,6 +1034,7 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
[IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
[IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
[IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
+ [IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 },
};
static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -1114,6 +1115,14 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (err)
return err;
}
+
+ if (data[IFLA_BR_VLAN_STATS_PER_PORT]) {
+ __u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]);
+
+ err = br_vlan_set_stats_per_port(br, per_port);
+ if (err)
+ return err;
+ }
#endif
if (data[IFLA_BR_GROUP_FWD_MASK]) {
@@ -1139,7 +1148,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
spin_lock_bh(&br->lock);
memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
spin_unlock_bh(&br->lock);
- br->group_addr_set = true;
+ br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true);
br_recalculate_fwd_mask(br);
}
@@ -1167,7 +1176,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
u8 val;
val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
- br->multicast_query_use_ifaddr = !!val;
+ br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val);
}
if (data[IFLA_BR_MCAST_QUERIER]) {
@@ -1244,7 +1253,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
__u8 mcast_stats;
mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
- br->multicast_stats_enabled = !!mcast_stats;
+ br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats);
}
if (data[IFLA_BR_MCAST_IGMP_VERSION]) {
@@ -1271,19 +1280,19 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (data[IFLA_BR_NF_CALL_IPTABLES]) {
u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
- br->nf_call_iptables = val ? true : false;
+ br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val);
}
if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
- br->nf_call_ip6tables = val ? true : false;
+ br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val);
}
if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
- br->nf_call_arptables = val ? true : false;
+ br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val);
}
#endif
@@ -1327,6 +1336,7 @@ static size_t br_get_size(const struct net_device *brdev)
nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */
nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */
+ nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_PER_PORT */
#endif
nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */
nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */
@@ -1416,17 +1426,22 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
- nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled))
+ nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
+ br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
+ nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
+ br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT)))
return -EMSGSIZE;
#endif
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
- nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) ||
+ nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING,
+ br_opt_get(br, BROPT_MULTICAST_ENABLED)) ||
nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
- br->multicast_query_use_ifaddr) ||
- nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) ||
+ br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) ||
+ nla_put_u8(skb, IFLA_BR_MCAST_QUERIER,
+ br_opt_get(br, BROPT_MULTICAST_QUERIER)) ||
nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
- br->multicast_stats_enabled) ||
+ br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) ||
nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY,
br->hash_elasticity) ||
nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
@@ -1469,11 +1484,11 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
- br->nf_call_iptables ? 1 : 0) ||
+ br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) ||
nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
- br->nf_call_ip6tables ? 1 : 0) ||
+ br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) ||
nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
- br->nf_call_arptables ? 1 : 0))
+ br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0))
return -EMSGSIZE;
#endif
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 11ed2029985f..2920e06a5403 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -54,14 +54,12 @@ typedef struct bridge_id bridge_id;
typedef struct mac_addr mac_addr;
typedef __u16 port_id;
-struct bridge_id
-{
+struct bridge_id {
unsigned char prio[2];
unsigned char addr[ETH_ALEN];
};
-struct mac_addr
-{
+struct mac_addr {
unsigned char addr[ETH_ALEN];
};
@@ -181,6 +179,7 @@ struct net_bridge_fdb_entry {
struct hlist_node fdb_node;
unsigned char is_local:1,
is_static:1,
+ is_sticky:1,
added_by_user:1,
added_by_external_learn:1,
offloaded:1;
@@ -206,8 +205,7 @@ struct net_bridge_port_group {
unsigned char eth_addr[ETH_ALEN];
};
-struct net_bridge_mdb_entry
-{
+struct net_bridge_mdb_entry {
struct hlist_node hlist[2];
struct net_bridge *br;
struct net_bridge_port_group __rcu *ports;
@@ -217,8 +215,7 @@ struct net_bridge_mdb_entry
bool host_joined;
};
-struct net_bridge_mdb_htable
-{
+struct net_bridge_mdb_htable {
struct hlist_head *mhash;
struct rcu_head rcu;
struct net_bridge_mdb_htable *old;
@@ -309,16 +306,32 @@ static inline struct net_bridge_port *br_port_get_rtnl_rcu(const struct net_devi
rcu_dereference_rtnl(dev->rx_handler_data) : NULL;
}
+enum net_bridge_opts {
+ BROPT_VLAN_ENABLED,
+ BROPT_VLAN_STATS_ENABLED,
+ BROPT_NF_CALL_IPTABLES,
+ BROPT_NF_CALL_IP6TABLES,
+ BROPT_NF_CALL_ARPTABLES,
+ BROPT_GROUP_ADDR_SET,
+ BROPT_MULTICAST_ENABLED,
+ BROPT_MULTICAST_QUERIER,
+ BROPT_MULTICAST_QUERY_USE_IFADDR,
+ BROPT_MULTICAST_STATS_ENABLED,
+ BROPT_HAS_IPV6_ADDR,
+ BROPT_NEIGH_SUPPRESS_ENABLED,
+ BROPT_MTU_SET_BY_USER,
+ BROPT_VLAN_STATS_PER_PORT,
+};
+
struct net_bridge {
spinlock_t lock;
spinlock_t hash_lock;
struct list_head port_list;
struct net_device *dev;
struct pcpu_sw_netstats __percpu *stats;
+ unsigned long options;
/* These fields are accessed on each packet */
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
- u8 vlan_enabled;
- u8 vlan_stats_enabled;
__be16 vlan_proto;
u16 default_pvid;
struct net_bridge_vlan_group __rcu *vlgrp;
@@ -330,9 +343,6 @@ struct net_bridge {
struct rtable fake_rtable;
struct rt6_info fake_rt6_info;
};
- bool nf_call_iptables;
- bool nf_call_ip6tables;
- bool nf_call_arptables;
#endif
u16 group_fwd_mask;
u16 group_fwd_mask_required;
@@ -340,7 +350,6 @@ struct net_bridge {
/* STP */
bridge_id designated_root;
bridge_id bridge_id;
- u32 root_path_cost;
unsigned char topology_change;
unsigned char topology_change_detected;
u16 root_port;
@@ -352,9 +361,9 @@ struct net_bridge {
unsigned long bridge_hello_time;
unsigned long bridge_forward_delay;
unsigned long bridge_ageing_time;
+ u32 root_path_cost;
u8 group_addr[ETH_ALEN];
- bool group_addr_set;
enum {
BR_NO_STP, /* no spanning tree */
@@ -363,13 +372,6 @@ struct net_bridge {
} stp_enabled;
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
- unsigned char multicast_router;
-
- u8 multicast_disabled:1;
- u8 multicast_querier:1;
- u8 multicast_query_use_ifaddr:1;
- u8 has_ipv6_addr:1;
- u8 multicast_stats_enabled:1;
u32 hash_elasticity;
u32 hash_max;
@@ -378,7 +380,11 @@ struct net_bridge {
u32 multicast_startup_query_count;
u8 multicast_igmp_version;
-
+ u8 multicast_router;
+#if IS_ENABLED(CONFIG_IPV6)
+ u8 multicast_mld_version;
+#endif
+ spinlock_t multicast_lock;
unsigned long multicast_last_member_interval;
unsigned long multicast_membership_interval;
unsigned long multicast_querier_interval;
@@ -386,7 +392,6 @@ struct net_bridge {
unsigned long multicast_query_response_interval;
unsigned long multicast_startup_query_interval;
- spinlock_t multicast_lock;
struct net_bridge_mdb_htable __rcu *mdb;
struct hlist_head router_list;
@@ -399,7 +404,6 @@ struct net_bridge {
struct bridge_mcast_other_query ip6_other_query;
struct bridge_mcast_own_query ip6_own_query;
struct bridge_mcast_querier ip6_querier;
- u8 multicast_mld_version;
#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif
@@ -413,8 +417,6 @@ struct net_bridge {
#ifdef CONFIG_NET_SWITCHDEV
int offload_fwd_mark;
#endif
- bool neigh_suppress_enabled;
- bool mtu_set_by_user;
struct hlist_head fdb_list;
};
@@ -492,6 +494,14 @@ static inline bool br_vlan_should_use(const struct net_bridge_vlan *v)
return true;
}
+static inline int br_opt_get(const struct net_bridge *br,
+ enum net_bridge_opts opt)
+{
+ return test_bit(opt, &br->options);
+}
+
+void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on);
+
/* br_device.c */
void br_dev_setup(struct net_device *dev);
void br_dev_delete(struct net_device *dev, struct list_head *list);
@@ -564,7 +574,7 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid,
bool swdev_notify);
void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid, bool offloaded);
/* br_forward.c */
enum br_pkt_type {
@@ -698,8 +708,8 @@ __br_multicast_querier_exists(struct net_bridge *br,
{
bool own_querier_enabled;
- if (br->multicast_querier) {
- if (is_ipv6 && !br->has_ipv6_addr)
+ if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
+ if (is_ipv6 && !br_opt_get(br, BROPT_HAS_IPV6_ADDR))
own_querier_enabled = false;
else
own_querier_enabled = true;
@@ -850,6 +860,7 @@ int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto);
int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
int br_vlan_set_stats(struct net_bridge *br, unsigned long val);
+int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val);
int br_vlan_init(struct net_bridge *br);
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid);
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index d77f807420c4..b993df770675 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -103,7 +103,7 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
static void
br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
u16 vid, struct net_device *dev,
- bool added_by_user)
+ bool added_by_user, bool offloaded)
{
struct switchdev_notifier_fdb_info info;
unsigned long notifier_type;
@@ -111,6 +111,7 @@ br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
info.addr = mac;
info.vid = vid;
info.added_by_user = added_by_user;
+ info.offloaded = offloaded;
notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE;
call_switchdev_notifiers(notifier_type, dev, &info.info);
}
@@ -126,13 +127,15 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
br_switchdev_fdb_call_notifiers(false, fdb->key.addr.addr,
fdb->key.vlan_id,
fdb->dst->dev,
- fdb->added_by_user);
+ fdb->added_by_user,
+ fdb->offloaded);
break;
case RTM_NEWNEIGH:
br_switchdev_fdb_call_notifiers(true, fdb->key.addr.addr,
fdb->key.vlan_id,
fdb->dst->dev,
- fdb->added_by_user);
+ fdb->added_by_user,
+ fdb->offloaded);
break;
}
}
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 0318a69888d4..60182bef6341 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -303,7 +303,7 @@ static ssize_t group_addr_store(struct device *d,
ether_addr_copy(br->group_addr, new_addr);
spin_unlock_bh(&br->lock);
- br->group_addr_set = true;
+ br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true);
br_recalculate_fwd_mask(br);
netdev_state_change(br->dev);
@@ -349,7 +349,7 @@ static ssize_t multicast_snooping_show(struct device *d,
char *buf)
{
struct net_bridge *br = to_bridge(d);
- return sprintf(buf, "%d\n", !br->multicast_disabled);
+ return sprintf(buf, "%d\n", br_opt_get(br, BROPT_MULTICAST_ENABLED));
}
static ssize_t multicast_snooping_store(struct device *d,
@@ -365,12 +365,13 @@ static ssize_t multicast_query_use_ifaddr_show(struct device *d,
char *buf)
{
struct net_bridge *br = to_bridge(d);
- return sprintf(buf, "%d\n", br->multicast_query_use_ifaddr);
+ return sprintf(buf, "%d\n",
+ br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR));
}
static int set_query_use_ifaddr(struct net_bridge *br, unsigned long val)
{
- br->multicast_query_use_ifaddr = !!val;
+ br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val);
return 0;
}
@@ -388,7 +389,7 @@ static ssize_t multicast_querier_show(struct device *d,
char *buf)
{
struct net_bridge *br = to_bridge(d);
- return sprintf(buf, "%d\n", br->multicast_querier);
+ return sprintf(buf, "%d\n", br_opt_get(br, BROPT_MULTICAST_QUERIER));
}
static ssize_t multicast_querier_store(struct device *d,
@@ -636,12 +637,13 @@ static ssize_t multicast_stats_enabled_show(struct device *d,
{
struct net_bridge *br = to_bridge(d);
- return sprintf(buf, "%u\n", br->multicast_stats_enabled);
+ return sprintf(buf, "%d\n",
+ br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED));
}
static int set_stats_enabled(struct net_bridge *br, unsigned long val)
{
- br->multicast_stats_enabled = !!val;
+ br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!val);
return 0;
}
@@ -678,12 +680,12 @@ static ssize_t nf_call_iptables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
- return sprintf(buf, "%u\n", br->nf_call_iptables);
+ return sprintf(buf, "%u\n", br_opt_get(br, BROPT_NF_CALL_IPTABLES));
}
static int set_nf_call_iptables(struct net_bridge *br, unsigned long val)
{
- br->nf_call_iptables = val ? true : false;
+ br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val);
return 0;
}
@@ -699,12 +701,12 @@ static ssize_t nf_call_ip6tables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
- return sprintf(buf, "%u\n", br->nf_call_ip6tables);
+ return sprintf(buf, "%u\n", br_opt_get(br, BROPT_NF_CALL_IP6TABLES));
}
static int set_nf_call_ip6tables(struct net_bridge *br, unsigned long val)
{
- br->nf_call_ip6tables = val ? true : false;
+ br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val);
return 0;
}
@@ -720,12 +722,12 @@ static ssize_t nf_call_arptables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
- return sprintf(buf, "%u\n", br->nf_call_arptables);
+ return sprintf(buf, "%u\n", br_opt_get(br, BROPT_NF_CALL_ARPTABLES));
}
static int set_nf_call_arptables(struct net_bridge *br, unsigned long val)
{
- br->nf_call_arptables = val ? true : false;
+ br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val);
return 0;
}
@@ -743,7 +745,7 @@ static ssize_t vlan_filtering_show(struct device *d,
char *buf)
{
struct net_bridge *br = to_bridge(d);
- return sprintf(buf, "%d\n", br->vlan_enabled);
+ return sprintf(buf, "%d\n", br_opt_get(br, BROPT_VLAN_ENABLED));
}
static ssize_t vlan_filtering_store(struct device *d,
@@ -791,7 +793,7 @@ static ssize_t vlan_stats_enabled_show(struct device *d,
char *buf)
{
struct net_bridge *br = to_bridge(d);
- return sprintf(buf, "%u\n", br->vlan_stats_enabled);
+ return sprintf(buf, "%u\n", br_opt_get(br, BROPT_VLAN_STATS_ENABLED));
}
static ssize_t vlan_stats_enabled_store(struct device *d,
@@ -801,6 +803,22 @@ static ssize_t vlan_stats_enabled_store(struct device *d,
return store_bridge_parm(d, buf, len, br_vlan_set_stats);
}
static DEVICE_ATTR_RW(vlan_stats_enabled);
+
+static ssize_t vlan_stats_per_port_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%u\n", br_opt_get(br, BROPT_VLAN_STATS_PER_PORT));
+}
+
+static ssize_t vlan_stats_per_port_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_bridge_parm(d, buf, len, br_vlan_set_stats_per_port);
+}
+static DEVICE_ATTR_RW(vlan_stats_per_port);
#endif
static struct attribute *bridge_attrs[] = {
@@ -854,6 +872,7 @@ static struct attribute *bridge_attrs[] = {
&dev_attr_vlan_protocol.attr,
&dev_attr_default_pvid.attr,
&dev_attr_vlan_stats_enabled.attr,
+ &dev_attr_vlan_stats_per_port.attr,
#endif
NULL
};
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 7df269092103..8c9297a01947 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -190,6 +190,19 @@ static void br_vlan_put_master(struct net_bridge_vlan *masterv)
}
}
+static void nbp_vlan_rcu_free(struct rcu_head *rcu)
+{
+ struct net_bridge_vlan *v;
+
+ v = container_of(rcu, struct net_bridge_vlan, rcu);
+ WARN_ON(br_vlan_is_master(v));
+ /* if we had per-port stats configured then free them here */
+ if (v->brvlan->stats != v->stats)
+ free_percpu(v->stats);
+ v->stats = NULL;
+ kfree(v);
+}
+
/* This is the shared VLAN add function which works for both ports and bridge
* devices. There are four possible calls to this function in terms of the
* vlan entry type:
@@ -245,7 +258,15 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
if (!masterv)
goto out_filt;
v->brvlan = masterv;
- v->stats = masterv->stats;
+ if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
+ v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
+ if (!v->stats) {
+ err = -ENOMEM;
+ goto out_filt;
+ }
+ } else {
+ v->stats = masterv->stats;
+ }
} else {
err = br_switchdev_port_vlan_add(dev, v->vid, flags);
if (err && err != -EOPNOTSUPP)
@@ -282,6 +303,10 @@ out_filt:
if (p) {
__vlan_vid_del(dev, br, v->vid);
if (masterv) {
+ if (v->stats && masterv->stats != v->stats)
+ free_percpu(v->stats);
+ v->stats = NULL;
+
br_vlan_put_master(masterv);
v->brvlan = NULL;
}
@@ -329,7 +354,7 @@ static int __vlan_del(struct net_bridge_vlan *v)
rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
br_vlan_rht_params);
__vlan_del_list(v);
- kfree_rcu(v, rcu);
+ call_rcu(&v->rcu, nbp_vlan_rcu_free);
}
br_vlan_put_master(masterv);
@@ -386,7 +411,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
return NULL;
}
}
- if (br->vlan_stats_enabled) {
+ if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
stats = this_cpu_ptr(v->stats);
u64_stats_update_begin(&stats->syncp);
stats->tx_bytes += skb->len;
@@ -475,14 +500,14 @@ static bool __allowed_ingress(const struct net_bridge *br,
skb->vlan_tci |= pvid;
/* if stats are disabled we can avoid the lookup */
- if (!br->vlan_stats_enabled)
+ if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED))
return true;
}
v = br_vlan_find(vg, *vid);
if (!v || !br_vlan_should_use(v))
goto drop;
- if (br->vlan_stats_enabled) {
+ if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
stats = this_cpu_ptr(v->stats);
u64_stats_update_begin(&stats->syncp);
stats->rx_bytes += skb->len;
@@ -504,7 +529,7 @@ bool br_allowed_ingress(const struct net_bridge *br,
/* If VLAN filtering is disabled on the bridge, all packets are
* permitted.
*/
- if (!br->vlan_enabled) {
+ if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
return true;
}
@@ -538,7 +563,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
struct net_bridge *br = p->br;
/* If filtering was disabled at input, let it pass. */
- if (!br->vlan_enabled)
+ if (!br_opt_get(br, BROPT_VLAN_ENABLED))
return true;
vg = nbp_vlan_group_rcu(p);
@@ -695,11 +720,12 @@ struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
/* Must be protected by RTNL. */
static void recalculate_group_addr(struct net_bridge *br)
{
- if (br->group_addr_set)
+ if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
return;
spin_lock_bh(&br->lock);
- if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
+ if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
+ br->vlan_proto == htons(ETH_P_8021Q)) {
/* Bridge Group Address */
br->group_addr[5] = 0x00;
} else { /* vlan_enabled && ETH_P_8021AD */
@@ -712,7 +738,8 @@ static void recalculate_group_addr(struct net_bridge *br)
/* Must be protected by RTNL. */
void br_recalculate_fwd_mask(struct net_bridge *br)
{
- if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
+ if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
+ br->vlan_proto == htons(ETH_P_8021Q))
br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
else /* vlan_enabled && ETH_P_8021AD */
br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
@@ -729,14 +756,14 @@ int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
};
int err;
- if (br->vlan_enabled == val)
+ if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
return 0;
err = switchdev_port_attr_set(br->dev, &attr);
if (err && err != -EOPNOTSUPP)
return err;
- br->vlan_enabled = val;
+ br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
br_manage_promisc(br);
recalculate_group_addr(br);
br_recalculate_fwd_mask(br);
@@ -753,7 +780,7 @@ bool br_vlan_enabled(const struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- return !!br->vlan_enabled;
+ return br_opt_get(br, BROPT_VLAN_ENABLED);
}
EXPORT_SYMBOL_GPL(br_vlan_enabled);
@@ -819,7 +846,31 @@ int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
switch (val) {
case 0:
case 1:
- br->vlan_stats_enabled = val;
+ br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
+{
+ struct net_bridge_port *p;
+
+ /* allow to change the option if there are no port vlans configured */
+ list_for_each_entry(p, &br->port_list, list) {
+ struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
+
+ if (vg->num_vlans)
+ return -EBUSY;
+ }
+
+ switch (val) {
+ case 0:
+ case 1:
+ br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
break;
default:
return -EINVAL;
@@ -877,8 +928,7 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
return 0;
}
- changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
- GFP_KERNEL);
+ changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
if (!changed)
return -ENOMEM;
@@ -925,7 +975,7 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
br->default_pvid = pvid;
out:
- kfree(changed);
+ bitmap_free(changed);
return err;
err_port:
@@ -965,7 +1015,7 @@ int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
goto out;
/* Only allow default pvid change when filtering is disabled */
- if (br->vlan_enabled) {
+ if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
pr_info_once("Please disable vlan filtering to change default_pvid\n");
err = -EPERM;
goto out;
@@ -1019,7 +1069,7 @@ int nbp_vlan_init(struct net_bridge_port *p)
.orig_dev = p->br->dev,
.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
- .u.vlan_filtering = p->br->vlan_enabled,
+ .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
};
struct net_bridge_vlan_group *vg;
int ret = -ENOMEM;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index d18965f3291f..416717c57cd1 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -941,7 +941,7 @@ static __poll_t caif_poll(struct file *file,
__poll_t mask;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
mask = 0;
/* exceptional events? */
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index b82440e1fcb4..a931a71ef6df 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -264,9 +264,6 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
frontpkt = rearpkt;
rearpkt = NULL;
- err = -ENOMEM;
- if (frontpkt == NULL)
- goto out;
err = -EPROTO;
if (cfpkt_add_head(frontpkt, head, 6) < 0)
goto out;
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 02172c408ff2..5d6724cee38f 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -46,9 +46,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
goto fail;
}
- /* crypto_alloc_skcipher() allocates with GFP_KERNEL */
+ /* crypto_alloc_sync_skcipher() allocates with GFP_KERNEL */
noio_flag = memalloc_noio_save();
- key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ key->tfm = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
memalloc_noio_restore(noio_flag);
if (IS_ERR(key->tfm)) {
ret = PTR_ERR(key->tfm);
@@ -56,7 +56,7 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
goto fail;
}
- ret = crypto_skcipher_setkey(key->tfm, key->key, key->len);
+ ret = crypto_sync_skcipher_setkey(key->tfm, key->key, key->len);
if (ret)
goto fail;
@@ -136,7 +136,7 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
if (key) {
kfree(key->key);
key->key = NULL;
- crypto_free_skcipher(key->tfm);
+ crypto_free_sync_skcipher(key->tfm);
key->tfm = NULL;
}
}
@@ -216,7 +216,7 @@ static void teardown_sgtable(struct sg_table *sgt)
static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
void *buf, int buf_len, int in_len, int *pout_len)
{
- SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
struct sg_table sgt;
struct scatterlist prealloc_sg;
char iv[AES_BLOCK_SIZE] __aligned(8);
@@ -232,7 +232,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
return ret;
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
- skcipher_request_set_tfm(req, key->tfm);
+ skcipher_request_set_sync_tfm(req, key->tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index bb45c7d43739..96ef4d860bc9 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -13,7 +13,7 @@ struct ceph_crypto_key {
struct ceph_timespec created;
int len;
void *key;
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
};
int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
diff --git a/net/compat.c b/net/compat.c
index 3b2105f6549d..47a614b370cd 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -812,21 +812,21 @@ COMPAT_SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, buf, compat_size_t, len
static int __compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
- struct compat_timespec __user *timeout)
+ struct old_timespec32 __user *timeout)
{
int datagrams;
- struct timespec ktspec;
+ struct timespec64 ktspec;
if (timeout == NULL)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, NULL);
- if (compat_get_timespec(&ktspec, timeout))
+ if (compat_get_timespec64(&ktspec, timeout))
return -EFAULT;
datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, &ktspec);
- if (datagrams > 0 && compat_put_timespec(&ktspec, timeout))
+ if (datagrams > 0 && compat_put_timespec64(&ktspec, timeout))
datagrams = -EFAULT;
return datagrams;
@@ -834,7 +834,7 @@ static int __compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags,
- struct compat_timespec __user *, timeout)
+ struct old_timespec32 __user *, timeout)
{
return __compat_sys_recvmmsg(fd, mmsg, vlen, flags, timeout);
}
diff --git a/net/core/Makefile b/net/core/Makefile
index 80175e6a2eb8..fccd31e0e7f7 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -16,6 +16,7 @@ obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
obj-y += net-sysfs.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o
obj-$(CONFIG_PROC_FS) += net-procfs.o
+obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
obj-$(CONFIG_NETPOLL) += netpoll.o
obj-$(CONFIG_FIB_RULES) += fib_rules.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
obj-$(CONFIG_LWTUNNEL_BPF) += lwt_bpf.o
+obj-$(CONFIG_BPF_STREAM_PARSER) += sock_map.o
obj-$(CONFIG_DST_CACHE) += dst_cache.o
obj-$(CONFIG_HWBM) += hwbm.o
obj-$(CONFIG_NET_DEVLINK) += devlink.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 9aac0d63d53e..6a034eb538a1 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -837,7 +837,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
__poll_t mask;
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
mask = 0;
/* exceptional events? */
diff --git a/net/core/dev.c b/net/core/dev.c
index 82114e1111e6..022ad73d6253 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1752,6 +1752,28 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
}
EXPORT_SYMBOL(call_netdevice_notifiers);
+/**
+ * call_netdevice_notifiers_mtu - call all network notifier blocks
+ * @val: value passed unmodified to notifier function
+ * @dev: net_device pointer passed unmodified to notifier function
+ * @arg: additional u32 argument passed to the notifier function
+ *
+ * Call all network notifier blocks. Parameters and return value
+ * are as for raw_notifier_call_chain().
+ */
+static int call_netdevice_notifiers_mtu(unsigned long val,
+ struct net_device *dev, u32 arg)
+{
+ struct netdev_notifier_info_ext info = {
+ .info.dev = dev,
+ .ext.mtu = arg,
+ };
+
+ BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
+
+ return call_netdevice_notifiers_info(val, &info.info);
+}
+
#ifdef CONFIG_NET_INGRESS
static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
@@ -1954,6 +1976,17 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
return false;
}
+/**
+ * dev_nit_active - return true if any network interface taps are in use
+ *
+ * @dev: network device to check for the presence of taps
+ */
+bool dev_nit_active(struct net_device *dev)
+{
+ return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
+}
+EXPORT_SYMBOL_GPL(dev_nit_active);
+
/*
* Support routine. Sends outgoing frames to any network
* taps currently in use.
@@ -1969,6 +2002,9 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
again:
list_for_each_entry_rcu(ptype, ptype_list, list) {
+ if (ptype->ignore_outgoing)
+ continue;
+
/* Never send packets back to the socket
* they originated from - MvS (miquels@drinkel.ow.org)
*/
@@ -3208,7 +3244,7 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
unsigned int len;
int rc;
- if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
+ if (dev_nit_active(dev))
dev_queue_xmit_nit(skb, dev);
len = skb->len;
@@ -3228,7 +3264,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
while (skb) {
struct sk_buff *next = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
rc = xmit_one(skb, dev, txq, next != NULL);
if (unlikely(!dev_xmit_complete(rc))) {
skb->next = next;
@@ -3328,7 +3364,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
for (; skb != NULL; skb = next) {
next = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
/* in case skb wont be segmented, point to itself */
skb->prev = skb;
@@ -4255,6 +4291,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
struct netdev_rx_queue *rxqueue;
void *orig_data, *orig_data_end;
u32 metalen, act = XDP_DROP;
+ __be16 orig_eth_type;
+ struct ethhdr *eth;
+ bool orig_bcast;
int hlen, off;
u32 mac_len;
@@ -4295,6 +4334,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
xdp->data_hard_start = skb->data - skb_headroom(skb);
orig_data_end = xdp->data_end;
orig_data = xdp->data;
+ eth = (struct ethhdr *)xdp->data;
+ orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
+ orig_eth_type = eth->h_proto;
rxqueue = netif_get_rxqueue(skb);
xdp->rxq = &rxqueue->xdp_rxq;
@@ -4318,6 +4360,14 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
}
+ /* check if XDP changed eth hdr such SKB needs update */
+ eth = (struct ethhdr *)xdp->data;
+ if ((orig_eth_type != eth->h_proto) ||
+ (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
+ __skb_push(skb, ETH_HLEN);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ }
+
switch (act) {
case XDP_REDIRECT:
case XDP_TX:
@@ -5292,8 +5342,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
list_for_each_entry_safe_reverse(skb, p, head, list) {
if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
return;
- list_del(&skb->list);
- skb->next = NULL;
+ skb_list_del_init(skb);
napi_gro_complete(skb);
napi->gro_hash[index].count--;
}
@@ -5478,8 +5527,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
if (pp) {
- list_del(&pp->list);
- pp->next = NULL;
+ skb_list_del_init(pp);
napi_gro_complete(pp);
napi->gro_hash[hash].count--;
}
@@ -7574,14 +7622,16 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
err = __dev_set_mtu(dev, new_mtu);
if (!err) {
- err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+ orig_mtu);
err = notifier_to_errno(err);
if (err) {
/* setting mtu back and notifying everyone again,
* so that they have a chance to revert changes.
*/
__dev_set_mtu(dev, orig_mtu);
- call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+ new_mtu);
}
}
return err;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 65fc366a78a4..3a4b29a13d31 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1626,7 +1626,7 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
if (!ops->eswitch_mode_set)
return -EOPNOTSUPP;
mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
- err = ops->eswitch_mode_set(devlink, mode);
+ err = ops->eswitch_mode_set(devlink, mode, info->extack);
if (err)
return err;
}
@@ -1636,7 +1636,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
return -EOPNOTSUPP;
inline_mode = nla_get_u8(
info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]);
- err = ops->eswitch_inline_mode_set(devlink, inline_mode);
+ err = ops->eswitch_inline_mode_set(devlink, inline_mode,
+ info->extack);
if (err)
return err;
}
@@ -1645,7 +1646,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
if (!ops->eswitch_encap_mode_set)
return -EOPNOTSUPP;
encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]);
- err = ops->eswitch_encap_mode_set(devlink, encap_mode);
+ err = ops->eswitch_encap_mode_set(devlink, encap_mode,
+ info->extack);
if (err)
return err;
}
@@ -2592,7 +2594,7 @@ send_done:
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&skb, info);
if (err)
- goto err_skb_send_alloc;
+ return err;
goto send_done;
}
return genlmsg_reply(skb, info);
@@ -2600,7 +2602,6 @@ send_done:
nla_put_failure:
err = -EMSGSIZE;
err_resource_put:
-err_skb_send_alloc:
nlmsg_free(skb);
return err;
}
@@ -2676,6 +2677,21 @@ static const struct devlink_param devlink_param_generic[] = {
.name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME,
.type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE,
},
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
+ .name = DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME,
+ .type = DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME,
+ .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME,
+ .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE,
+ },
};
static int devlink_param_generic_verify(const struct devlink_param *param)
@@ -2996,6 +3012,8 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
struct genl_info *info,
union devlink_param_value *value)
{
+ int len;
+
if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
!info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
return -EINVAL;
@@ -3011,10 +3029,13 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
break;
case DEVLINK_PARAM_TYPE_STRING:
- if (nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) >
- DEVLINK_PARAM_MAX_STRING_VALUE)
+ len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]),
+ nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
+ if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) ||
+ len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
return -EINVAL;
- value->vstr = nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+ strcpy(value->vstr,
+ nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
break;
case DEVLINK_PARAM_TYPE_BOOL:
value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
@@ -3101,7 +3122,10 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
return -EOPNOTSUPP;
if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
- param_item->driverinit_value = value;
+ if (param->type == DEVLINK_PARAM_TYPE_STRING)
+ strcpy(param_item->driverinit_value.vstr, value.vstr);
+ else
+ param_item->driverinit_value = value;
param_item->driverinit_value_valid = true;
} else {
if (!param->set)
@@ -3488,7 +3512,7 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
start_offset = *((u64 *)&cb->args[0]);
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + devlink_nl_family.hdrsize,
- attrs, DEVLINK_ATTR_MAX, ops->policy, NULL);
+ attrs, DEVLINK_ATTR_MAX, ops->policy, cb->extack);
if (err)
goto out;
@@ -4541,7 +4565,10 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
DEVLINK_PARAM_CMODE_DRIVERINIT))
return -EOPNOTSUPP;
- *init_val = param_item->driverinit_value;
+ if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
+ strcpy(init_val->vstr, param_item->driverinit_value.vstr);
+ else
+ *init_val = param_item->driverinit_value;
return 0;
}
@@ -4572,7 +4599,10 @@ int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
DEVLINK_PARAM_CMODE_DRIVERINIT))
return -EOPNOTSUPP;
- param_item->driverinit_value = init_val;
+ if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
+ strcpy(param_item->driverinit_value.vstr, init_val.vstr);
+ else
+ param_item->driverinit_value = init_val;
param_item->driverinit_value_valid = true;
devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
@@ -4605,6 +4635,23 @@ void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
EXPORT_SYMBOL_GPL(devlink_param_value_changed);
/**
+ * devlink_param_value_str_fill - Safely fill-up the string preventing
+ * from overflow of the preallocated buffer
+ *
+ * @dst_val: destination devlink_param_value
+ * @src: source buffer
+ */
+void devlink_param_value_str_fill(union devlink_param_value *dst_val,
+ const char *src)
+{
+ size_t len;
+
+ len = strlcpy(dst_val->vstr, src, __DEVLINK_PARAM_MAX_STRING_VALUE);
+ WARN_ON(len >= __DEVLINK_PARAM_MAX_STRING_VALUE);
+}
+EXPORT_SYMBOL_GPL(devlink_param_value_str_fill);
+
+/**
* devlink_region_create - create a new address region
*
* @devlink: devlink
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index c9993c6c2fd4..d05402868575 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -27,6 +27,7 @@
#include <linux/rtnetlink.h>
#include <linux/sched/signal.h>
#include <linux/net.h>
+#include <net/xdp_sock.h>
/*
* Some useful ethtool_ops methods that're device independent.
@@ -539,47 +540,17 @@ struct ethtool_link_usettings {
} link_modes;
};
-/* Internal kernel helper to query a device ethtool_link_settings.
- *
- * Backward compatibility note: for compatibility with legacy drivers
- * that implement only the ethtool_cmd API, this has to work with both
- * drivers implementing get_link_ksettings API and drivers
- * implementing get_settings API. When drivers implement get_settings
- * and report ethtool_cmd deprecated fields
- * (transceiver/maxrxpkt/maxtxpkt), these fields are silently ignored
- * because the resulting struct ethtool_link_settings does not report them.
- */
+/* Internal kernel helper to query a device ethtool_link_settings. */
int __ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- int err;
- struct ethtool_cmd cmd;
-
ASSERT_RTNL();
- if (dev->ethtool_ops->get_link_ksettings) {
- memset(link_ksettings, 0, sizeof(*link_ksettings));
- return dev->ethtool_ops->get_link_ksettings(dev,
- link_ksettings);
- }
-
- /* driver doesn't support %ethtool_link_ksettings API. revert to
- * legacy %ethtool_cmd API, unless it's not supported either.
- * TODO: remove when ethtool_ops::get_settings disappears internally
- */
- if (!dev->ethtool_ops->get_settings)
+ if (!dev->ethtool_ops->get_link_ksettings)
return -EOPNOTSUPP;
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = ETHTOOL_GSET;
- err = dev->ethtool_ops->get_settings(dev, &cmd);
- if (err < 0)
- return err;
-
- /* we ignore deprecated fields transceiver/maxrxpkt/maxtxpkt
- */
- convert_legacy_settings_to_link_ksettings(link_ksettings, &cmd);
- return err;
+ memset(link_ksettings, 0, sizeof(*link_ksettings));
+ return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
}
EXPORT_SYMBOL(__ethtool_get_link_ksettings);
@@ -635,16 +606,7 @@ store_link_ksettings_for_user(void __user *to,
return 0;
}
-/* Query device for its ethtool_link_settings.
- *
- * Backward compatibility note: this function must fail when driver
- * does not implement ethtool::get_link_ksettings, even if legacy
- * ethtool_ops::get_settings is implemented. This tells new versions
- * of ethtool that they should use the legacy API %ETHTOOL_GSET for
- * this driver, so that they can correctly access the ethtool_cmd
- * deprecated fields (transceiver/maxrxpkt/maxtxpkt), until no driver
- * implements ethtool_ops::get_settings anymore.
- */
+/* Query device for its ethtool_link_settings. */
static int ethtool_get_link_ksettings(struct net_device *dev,
void __user *useraddr)
{
@@ -652,7 +614,6 @@ static int ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings link_ksettings;
ASSERT_RTNL();
-
if (!dev->ethtool_ops->get_link_ksettings)
return -EOPNOTSUPP;
@@ -699,16 +660,7 @@ static int ethtool_get_link_ksettings(struct net_device *dev,
return store_link_ksettings_for_user(useraddr, &link_ksettings);
}
-/* Update device ethtool_link_settings.
- *
- * Backward compatibility note: this function must fail when driver
- * does not implement ethtool::set_link_ksettings, even if legacy
- * ethtool_ops::set_settings is implemented. This tells new versions
- * of ethtool that they should use the legacy API %ETHTOOL_SSET for
- * this driver, so that they can correctly update the ethtool_cmd
- * deprecated fields (transceiver/maxrxpkt/maxtxpkt), until no driver
- * implements ethtool_ops::get_settings anymore.
- */
+/* Update device ethtool_link_settings. */
static int ethtool_set_link_ksettings(struct net_device *dev,
void __user *useraddr)
{
@@ -746,51 +698,31 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
/* Query device for its ethtool_cmd settings.
*
- * Backward compatibility note: for compatibility with legacy ethtool,
- * this has to work with both drivers implementing get_link_ksettings
- * API and drivers implementing get_settings API. When drivers
- * implement get_link_ksettings and report higher link mode bits, a
- * kernel warning is logged once (with name of 1st driver/device) to
- * recommend user to upgrade ethtool, but the command is successful
- * (only the lower link mode bits reported back to user).
+ * Backward compatibility note: for compatibility with legacy ethtool, this is
+ * now implemented via get_link_ksettings. When driver reports higher link mode
+ * bits, a kernel warning is logged once (with name of 1st driver/device) to
+ * recommend user to upgrade ethtool, but the command is successful (only the
+ * lower link mode bits reported back to user). Deprecated fields from
+ * ethtool_cmd (transceiver/maxrxpkt/maxtxpkt) are always set to zero.
*/
static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
{
+ struct ethtool_link_ksettings link_ksettings;
struct ethtool_cmd cmd;
+ int err;
ASSERT_RTNL();
+ if (!dev->ethtool_ops->get_link_ksettings)
+ return -EOPNOTSUPP;
- if (dev->ethtool_ops->get_link_ksettings) {
- /* First, use link_ksettings API if it is supported */
- int err;
- struct ethtool_link_ksettings link_ksettings;
-
- memset(&link_ksettings, 0, sizeof(link_ksettings));
- err = dev->ethtool_ops->get_link_ksettings(dev,
- &link_ksettings);
- if (err < 0)
- return err;
- convert_link_ksettings_to_legacy_settings(&cmd,
- &link_ksettings);
-
- /* send a sensible cmd tag back to user */
- cmd.cmd = ETHTOOL_GSET;
- } else {
- /* driver doesn't support %ethtool_link_ksettings
- * API. revert to legacy %ethtool_cmd API, unless it's
- * not supported either.
- */
- int err;
-
- if (!dev->ethtool_ops->get_settings)
- return -EOPNOTSUPP;
+ memset(&link_ksettings, 0, sizeof(link_ksettings));
+ err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings);
+ if (err < 0)
+ return err;
+ convert_link_ksettings_to_legacy_settings(&cmd, &link_ksettings);
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = ETHTOOL_GSET;
- err = dev->ethtool_ops->get_settings(dev, &cmd);
- if (err < 0)
- return err;
- }
+ /* send a sensible cmd tag back to user */
+ cmd.cmd = ETHTOOL_GSET;
if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
return -EFAULT;
@@ -800,48 +732,29 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
/* Update device link settings with given ethtool_cmd.
*
- * Backward compatibility note: for compatibility with legacy ethtool,
- * this has to work with both drivers implementing set_link_ksettings
- * API and drivers implementing set_settings API. When drivers
- * implement set_link_ksettings and user's request updates deprecated
- * ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel
- * warning is logged once (with name of 1st driver/device) to
- * recommend user to upgrade ethtool, and the request is rejected.
+ * Backward compatibility note: for compatibility with legacy ethtool, this is
+ * now always implemented via set_link_settings. When user's request updates
+ * deprecated ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel
+ * warning is logged once (with name of 1st driver/device) to recommend user to
+ * upgrade ethtool, and the request is rejected.
*/
static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
{
+ struct ethtool_link_ksettings link_ksettings;
struct ethtool_cmd cmd;
ASSERT_RTNL();
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
-
- /* first, try new %ethtool_link_ksettings API. */
- if (dev->ethtool_ops->set_link_ksettings) {
- struct ethtool_link_ksettings link_ksettings;
-
- if (!convert_legacy_settings_to_link_ksettings(&link_ksettings,
- &cmd))
- return -EINVAL;
-
- link_ksettings.base.cmd = ETHTOOL_SLINKSETTINGS;
- link_ksettings.base.link_mode_masks_nwords
- = __ETHTOOL_LINK_MODE_MASK_NU32;
- return dev->ethtool_ops->set_link_ksettings(dev,
- &link_ksettings);
- }
-
- /* legacy %ethtool_cmd API */
-
- /* TODO: return -EOPNOTSUPP when ethtool_ops::get_settings
- * disappears internally
- */
-
- if (!dev->ethtool_ops->set_settings)
+ if (!dev->ethtool_ops->set_link_ksettings)
return -EOPNOTSUPP;
- return dev->ethtool_ops->set_settings(dev, &cmd);
+ if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, &cmd))
+ return -EINVAL;
+ link_ksettings.base.link_mode_masks_nwords =
+ __ETHTOOL_LINK_MODE_MASK_NU32;
+ return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
}
static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
@@ -1015,6 +928,9 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
return -EINVAL;
}
+ if (info.cmd != cmd)
+ return -EINVAL;
+
if (info.cmd == ETHTOOL_GRXCLSRLALL) {
if (info.rule_cnt > 0) {
if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
@@ -1483,6 +1399,7 @@ static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
{
struct ethtool_wolinfo wol;
+ int ret;
if (!dev->ethtool_ops->set_wol)
return -EOPNOTSUPP;
@@ -1490,7 +1407,13 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
if (copy_from_user(&wol, useraddr, sizeof(wol)))
return -EFAULT;
- return dev->ethtool_ops->set_wol(dev, &wol);
+ ret = dev->ethtool_ops->set_wol(dev, &wol);
+ if (ret)
+ return ret;
+
+ dev->wol_enabled = !!wol.wolopts;
+
+ return 0;
}
static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
@@ -1743,8 +1666,10 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
void __user *useraddr)
{
- struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
+ struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS };
+ u16 from_channel, to_channel;
u32 max_rx_in_use = 0;
+ unsigned int i;
if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
return -EOPNOTSUPP;
@@ -1752,13 +1677,13 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
if (copy_from_user(&channels, useraddr, sizeof(channels)))
return -EFAULT;
- dev->ethtool_ops->get_channels(dev, &max);
+ dev->ethtool_ops->get_channels(dev, &curr);
/* ensure new counts are within the maximums */
- if ((channels.rx_count > max.max_rx) ||
- (channels.tx_count > max.max_tx) ||
- (channels.combined_count > max.max_combined) ||
- (channels.other_count > max.max_other))
+ if (channels.rx_count > curr.max_rx ||
+ channels.tx_count > curr.max_tx ||
+ channels.combined_count > curr.max_combined ||
+ channels.other_count > curr.max_other)
return -EINVAL;
/* ensure the new Rx count fits within the configured Rx flow
@@ -1768,6 +1693,14 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
(channels.combined_count + channels.rx_count) <= max_rx_in_use)
return -EINVAL;
+ /* Disabling channels, query zero-copy AF_XDP sockets */
+ from_channel = channels.combined_count +
+ min(channels.rx_count, channels.tx_count);
+ to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count);
+ for (i = from_channel; i < to_channel; i++)
+ if (xdp_get_umem_from_qid(dev, i))
+ return -EINVAL;
+
return dev->ethtool_ops->set_channels(dev, &channels);
}
@@ -2462,13 +2395,17 @@ roll_back:
return ret;
}
-static int ethtool_set_per_queue(struct net_device *dev, void __user *useraddr)
+static int ethtool_set_per_queue(struct net_device *dev,
+ void __user *useraddr, u32 sub_cmd)
{
struct ethtool_per_queue_op per_queue_opt;
if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt)))
return -EFAULT;
+ if (per_queue_opt.sub_command != sub_cmd)
+ return -EINVAL;
+
switch (per_queue_opt.sub_command) {
case ETHTOOL_GCOALESCE:
return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt);
@@ -2624,6 +2561,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GPHYSTATS:
case ETHTOOL_GTSO:
case ETHTOOL_GPERMADDR:
+ case ETHTOOL_GUFO:
case ETHTOOL_GGSO:
case ETHTOOL_GGRO:
case ETHTOOL_GFLAGS:
@@ -2838,7 +2776,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
rc = ethtool_get_phy_stats(dev, useraddr);
break;
case ETHTOOL_PERQUEUE:
- rc = ethtool_set_per_queue(dev, useraddr);
+ rc = ethtool_set_per_queue(dev, useraddr, sub_cmd);
break;
case ETHTOOL_GLINKSETTINGS:
rc = ethtool_get_link_ksettings(dev, useraddr);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 0ff3953f64aa..ffbb827723a2 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -1063,13 +1063,47 @@ skip:
return err;
}
+static int fib_valid_dumprule_req(const struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct fib_rule_hdr *frh;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for fib rule dump request");
+ return -EINVAL;
+ }
+
+ frh = nlmsg_data(nlh);
+ if (frh->dst_len || frh->src_len || frh->tos || frh->table ||
+ frh->res1 || frh->res2 || frh->action || frh->flags) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid values in header for fib rule dump request");
+ return -EINVAL;
+ }
+
+ if (nlmsg_attrlen(nlh, sizeof(*frh))) {
+ NL_SET_ERR_MSG(extack, "Invalid data after header in fib rule dump request");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct fib_rules_ops *ops;
int idx = 0, family;
- family = rtnl_msg_family(cb->nlh);
+ if (cb->strict_check) {
+ int err = fib_valid_dumprule_req(nlh, cb->extack);
+
+ if (err < 0)
+ return err;
+ }
+
+ family = rtnl_msg_family(nlh);
if (family != AF_UNSPEC) {
/* Protocol specific dump request */
ops = lookup_rules_ops(net, family);
diff --git a/net/core/filter.c b/net/core/filter.c
index aecdeba052d3..35c6933c2622 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -38,6 +38,7 @@
#include <net/protocol.h>
#include <net/netlink.h>
#include <linux/skbuff.h>
+#include <linux/skmsg.h>
#include <net/sock.h>
#include <net/flow_dissector.h>
#include <linux/errno.h>
@@ -58,13 +59,17 @@
#include <net/busy_poll.h>
#include <net/tcp.h>
#include <net/xfrm.h>
+#include <net/udp.h>
#include <linux/bpf_trace.h>
#include <net/xdp_sock.h>
#include <linux/inetdevice.h>
+#include <net/inet_hashtables.h>
+#include <net/inet6_hashtables.h>
#include <net/ip_fib.h>
#include <net/flow.h>
#include <net/arp.h>
#include <net/ipv6.h>
+#include <net/net_namespace.h>
#include <linux/seg6_local.h>
#include <net/seg6.h>
#include <net/seg6_local.h>
@@ -2138,123 +2143,7 @@ static const struct bpf_func_proto bpf_redirect_proto = {
.arg2_type = ARG_ANYTHING,
};
-BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
- struct bpf_map *, map, void *, key, u64, flags)
-{
- struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
-
- /* If user passes invalid input drop the packet. */
- if (unlikely(flags & ~(BPF_F_INGRESS)))
- return SK_DROP;
-
- tcb->bpf.flags = flags;
- tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key);
- if (!tcb->bpf.sk_redir)
- return SK_DROP;
-
- return SK_PASS;
-}
-
-static const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
- .func = bpf_sk_redirect_hash,
- .gpl_only = false,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_CONST_MAP_PTR,
- .arg3_type = ARG_PTR_TO_MAP_KEY,
- .arg4_type = ARG_ANYTHING,
-};
-
-BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
- struct bpf_map *, map, u32, key, u64, flags)
-{
- struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
-
- /* If user passes invalid input drop the packet. */
- if (unlikely(flags & ~(BPF_F_INGRESS)))
- return SK_DROP;
-
- tcb->bpf.flags = flags;
- tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key);
- if (!tcb->bpf.sk_redir)
- return SK_DROP;
-
- return SK_PASS;
-}
-
-struct sock *do_sk_redirect_map(struct sk_buff *skb)
-{
- struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
-
- return tcb->bpf.sk_redir;
-}
-
-static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
- .func = bpf_sk_redirect_map,
- .gpl_only = false,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_CONST_MAP_PTR,
- .arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_ANYTHING,
-};
-
-BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg_buff *, msg,
- struct bpf_map *, map, void *, key, u64, flags)
-{
- /* If user passes invalid input drop the packet. */
- if (unlikely(flags & ~(BPF_F_INGRESS)))
- return SK_DROP;
-
- msg->flags = flags;
- msg->sk_redir = __sock_hash_lookup_elem(map, key);
- if (!msg->sk_redir)
- return SK_DROP;
-
- return SK_PASS;
-}
-
-static const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
- .func = bpf_msg_redirect_hash,
- .gpl_only = false,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_CONST_MAP_PTR,
- .arg3_type = ARG_PTR_TO_MAP_KEY,
- .arg4_type = ARG_ANYTHING,
-};
-
-BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg_buff *, msg,
- struct bpf_map *, map, u32, key, u64, flags)
-{
- /* If user passes invalid input drop the packet. */
- if (unlikely(flags & ~(BPF_F_INGRESS)))
- return SK_DROP;
-
- msg->flags = flags;
- msg->sk_redir = __sock_map_lookup_elem(map, key);
- if (!msg->sk_redir)
- return SK_DROP;
-
- return SK_PASS;
-}
-
-struct sock *do_msg_redirect_map(struct sk_msg_buff *msg)
-{
- return msg->sk_redir;
-}
-
-static const struct bpf_func_proto bpf_msg_redirect_map_proto = {
- .func = bpf_msg_redirect_map,
- .gpl_only = false,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_CONST_MAP_PTR,
- .arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_ANYTHING,
-};
-
-BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg_buff *, msg, u32, bytes)
+BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
{
msg->apply_bytes = bytes;
return 0;
@@ -2268,7 +2157,7 @@ static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
.arg2_type = ARG_ANYTHING,
};
-BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg_buff *, msg, u32, bytes)
+BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
{
msg->cork_bytes = bytes;
return 0;
@@ -2282,45 +2171,37 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
.arg2_type = ARG_ANYTHING,
};
-#define sk_msg_iter_var(var) \
- do { \
- var++; \
- if (var == MAX_SKB_FRAGS) \
- var = 0; \
- } while (0)
-
-BPF_CALL_4(bpf_msg_pull_data,
- struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
+BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
+ u32, end, u64, flags)
{
- unsigned int len = 0, offset = 0, copy = 0, poffset = 0;
- int bytes = end - start, bytes_sg_total;
- struct scatterlist *sg = msg->sg_data;
- int first_sg, last_sg, i, shift;
- unsigned char *p, *to, *from;
+ u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start;
+ u32 first_sge, last_sge, i, shift, bytes_sg_total;
+ struct scatterlist *sge;
+ u8 *raw, *to, *from;
struct page *page;
if (unlikely(flags || end <= start))
return -EINVAL;
/* First find the starting scatterlist element */
- i = msg->sg_start;
+ i = msg->sg.start;
do {
- len = sg[i].length;
+ len = sk_msg_elem(msg, i)->length;
if (start < offset + len)
break;
offset += len;
- sk_msg_iter_var(i);
- } while (i != msg->sg_end);
+ sk_msg_iter_var_next(i);
+ } while (i != msg->sg.end);
if (unlikely(start >= offset + len))
return -EINVAL;
- first_sg = i;
+ first_sge = i;
/* The start may point into the sg element so we need to also
* account for the headroom.
*/
bytes_sg_total = start - offset + bytes;
- if (!msg->sg_copy[i] && bytes_sg_total <= len)
+ if (!msg->sg.copy[i] && bytes_sg_total <= len)
goto out;
/* At this point we need to linearize multiple scatterlist
@@ -2334,76 +2215,75 @@ BPF_CALL_4(bpf_msg_pull_data,
* will copy the entire sg entry.
*/
do {
- copy += sg[i].length;
- sk_msg_iter_var(i);
+ copy += sk_msg_elem(msg, i)->length;
+ sk_msg_iter_var_next(i);
if (bytes_sg_total <= copy)
break;
- } while (i != msg->sg_end);
- last_sg = i;
+ } while (i != msg->sg.end);
+ last_sge = i;
if (unlikely(bytes_sg_total > copy))
return -EINVAL;
- page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
+ page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
+ get_order(copy));
if (unlikely(!page))
return -ENOMEM;
- p = page_address(page);
- i = first_sg;
+ raw = page_address(page);
+ i = first_sge;
do {
- from = sg_virt(&sg[i]);
- len = sg[i].length;
- to = p + poffset;
+ sge = sk_msg_elem(msg, i);
+ from = sg_virt(sge);
+ len = sge->length;
+ to = raw + poffset;
memcpy(to, from, len);
poffset += len;
- sg[i].length = 0;
- put_page(sg_page(&sg[i]));
+ sge->length = 0;
+ put_page(sg_page(sge));
- sk_msg_iter_var(i);
- } while (i != last_sg);
+ sk_msg_iter_var_next(i);
+ } while (i != last_sge);
- sg[first_sg].length = copy;
- sg_set_page(&sg[first_sg], page, copy, 0);
+ sg_set_page(&msg->sg.data[first_sge], page, copy, 0);
/* To repair sg ring we need to shift entries. If we only
* had a single entry though we can just replace it and
* be done. Otherwise walk the ring and shift the entries.
*/
- WARN_ON_ONCE(last_sg == first_sg);
- shift = last_sg > first_sg ?
- last_sg - first_sg - 1 :
- MAX_SKB_FRAGS - first_sg + last_sg - 1;
+ WARN_ON_ONCE(last_sge == first_sge);
+ shift = last_sge > first_sge ?
+ last_sge - first_sge - 1 :
+ MAX_SKB_FRAGS - first_sge + last_sge - 1;
if (!shift)
goto out;
- i = first_sg;
- sk_msg_iter_var(i);
+ i = first_sge;
+ sk_msg_iter_var_next(i);
do {
- int move_from;
+ u32 move_from;
- if (i + shift >= MAX_SKB_FRAGS)
- move_from = i + shift - MAX_SKB_FRAGS;
+ if (i + shift >= MAX_MSG_FRAGS)
+ move_from = i + shift - MAX_MSG_FRAGS;
else
move_from = i + shift;
-
- if (move_from == msg->sg_end)
+ if (move_from == msg->sg.end)
break;
- sg[i] = sg[move_from];
- sg[move_from].length = 0;
- sg[move_from].page_link = 0;
- sg[move_from].offset = 0;
-
- sk_msg_iter_var(i);
+ msg->sg.data[i] = msg->sg.data[move_from];
+ msg->sg.data[move_from].length = 0;
+ msg->sg.data[move_from].page_link = 0;
+ msg->sg.data[move_from].offset = 0;
+ sk_msg_iter_var_next(i);
} while (1);
- msg->sg_end -= shift;
- if (msg->sg_end < 0)
- msg->sg_end += MAX_SKB_FRAGS;
+
+ msg->sg.end = msg->sg.end - shift > msg->sg.end ?
+ msg->sg.end - shift + MAX_MSG_FRAGS :
+ msg->sg.end - shift;
out:
- msg->data = sg_virt(&sg[first_sg]) + start - offset;
+ msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
msg->data_end = msg->data + bytes;
-
return 0;
}
@@ -2417,6 +2297,137 @@ static const struct bpf_func_proto bpf_msg_pull_data_proto = {
.arg4_type = ARG_ANYTHING,
};
+BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
+ u32, len, u64, flags)
+{
+ struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
+ u32 new, i = 0, l, space, copy = 0, offset = 0;
+ u8 *raw, *to, *from;
+ struct page *page;
+
+ if (unlikely(flags))
+ return -EINVAL;
+
+ /* First find the starting scatterlist element */
+ i = msg->sg.start;
+ do {
+ l = sk_msg_elem(msg, i)->length;
+
+ if (start < offset + l)
+ break;
+ offset += l;
+ sk_msg_iter_var_next(i);
+ } while (i != msg->sg.end);
+
+ if (start >= offset + l)
+ return -EINVAL;
+
+ space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
+
+ /* If no space available will fallback to copy, we need at
+ * least one scatterlist elem available to push data into
+ * when start aligns to the beginning of an element or two
+ * when it falls inside an element. We handle the start equals
+ * offset case because its the common case for inserting a
+ * header.
+ */
+ if (!space || (space == 1 && start != offset))
+ copy = msg->sg.data[i].length;
+
+ page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
+ get_order(copy + len));
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ if (copy) {
+ int front, back;
+
+ raw = page_address(page);
+
+ psge = sk_msg_elem(msg, i);
+ front = start - offset;
+ back = psge->length - front;
+ from = sg_virt(psge);
+
+ if (front)
+ memcpy(raw, from, front);
+
+ if (back) {
+ from += front;
+ to = raw + front + len;
+
+ memcpy(to, from, back);
+ }
+
+ put_page(sg_page(psge));
+ } else if (start - offset) {
+ psge = sk_msg_elem(msg, i);
+ rsge = sk_msg_elem_cpy(msg, i);
+
+ psge->length = start - offset;
+ rsge.length -= psge->length;
+ rsge.offset += start;
+
+ sk_msg_iter_var_next(i);
+ sg_unmark_end(psge);
+ sk_msg_iter_next(msg, end);
+ }
+
+ /* Slot(s) to place newly allocated data */
+ new = i;
+
+ /* Shift one or two slots as needed */
+ if (!copy) {
+ sge = sk_msg_elem_cpy(msg, i);
+
+ sk_msg_iter_var_next(i);
+ sg_unmark_end(&sge);
+ sk_msg_iter_next(msg, end);
+
+ nsge = sk_msg_elem_cpy(msg, i);
+ if (rsge.length) {
+ sk_msg_iter_var_next(i);
+ nnsge = sk_msg_elem_cpy(msg, i);
+ }
+
+ while (i != msg->sg.end) {
+ msg->sg.data[i] = sge;
+ sge = nsge;
+ sk_msg_iter_var_next(i);
+ if (rsge.length) {
+ nsge = nnsge;
+ nnsge = sk_msg_elem_cpy(msg, i);
+ } else {
+ nsge = sk_msg_elem_cpy(msg, i);
+ }
+ }
+ }
+
+ /* Place newly allocated data buffer */
+ sk_mem_charge(msg->sk, len);
+ msg->sg.size += len;
+ msg->sg.copy[new] = false;
+ sg_set_page(&msg->sg.data[new], page, len + copy, 0);
+ if (rsge.length) {
+ get_page(sg_page(&rsge));
+ sk_msg_iter_var_next(new);
+ msg->sg.data[new] = rsge;
+ }
+
+ sk_msg_compute_data_pointers(msg);
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_msg_push_data_proto = {
+ .func = bpf_msg_push_data,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_ANYTHING,
+};
+
BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
{
return task_get_classid(skb);
@@ -3175,6 +3186,32 @@ static int __bpf_tx_xdp(struct net_device *dev,
return 0;
}
+static noinline int
+xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri)
+{
+ struct net_device *fwd;
+ u32 index = ri->ifindex;
+ int err;
+
+ fwd = dev_get_by_index_rcu(dev_net(dev), index);
+ ri->ifindex = 0;
+ if (unlikely(!fwd)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
+ if (unlikely(err))
+ goto err;
+
+ _trace_xdp_redirect(dev, xdp_prog, index);
+ return 0;
+err:
+ _trace_xdp_redirect_err(dev, xdp_prog, index, err);
+ return err;
+}
+
static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
struct bpf_map *map,
struct xdp_buff *xdp,
@@ -3187,7 +3224,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
struct bpf_dtab_netdev *dst = fwd;
err = dev_map_enqueue(dst, xdp, dev_rx);
- if (err)
+ if (unlikely(err))
return err;
__dev_map_insert_ctx(map, index);
break;
@@ -3196,7 +3233,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
struct bpf_cpu_map_entry *rcpu = fwd;
err = cpu_map_enqueue(rcpu, xdp, dev_rx);
- if (err)
+ if (unlikely(err))
return err;
__cpu_map_insert_ctx(map, index);
break;
@@ -3237,7 +3274,7 @@ void xdp_do_flush_map(void)
}
EXPORT_SYMBOL_GPL(xdp_do_flush_map);
-static void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
+static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
{
switch (map->map_type) {
case BPF_MAP_TYPE_DEVMAP:
@@ -3269,9 +3306,9 @@ void bpf_clear_redirect_map(struct bpf_map *map)
}
static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog, struct bpf_map *map)
+ struct bpf_prog *xdp_prog, struct bpf_map *map,
+ struct bpf_redirect_info *ri)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
u32 index = ri->ifindex;
void *fwd = NULL;
int err;
@@ -3280,11 +3317,11 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
WRITE_ONCE(ri->map, NULL);
fwd = __xdp_map_lookup_elem(map, index);
- if (!fwd) {
+ if (unlikely(!fwd)) {
err = -EINVAL;
goto err;
}
- if (ri->map_to_flush && ri->map_to_flush != map)
+ if (ri->map_to_flush && unlikely(ri->map_to_flush != map))
xdp_do_flush_map();
err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
@@ -3304,29 +3341,11 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct bpf_map *map = READ_ONCE(ri->map);
- struct net_device *fwd;
- u32 index = ri->ifindex;
- int err;
- if (map)
- return xdp_do_redirect_map(dev, xdp, xdp_prog, map);
-
- fwd = dev_get_by_index_rcu(dev_net(dev), index);
- ri->ifindex = 0;
- if (unlikely(!fwd)) {
- err = -EINVAL;
- goto err;
- }
+ if (likely(map))
+ return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri);
- err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
- if (unlikely(err))
- goto err;
-
- _trace_xdp_redirect(dev, xdp_prog, index);
- return 0;
-err:
- _trace_xdp_redirect_err(dev, xdp_prog, index, err);
- return err;
+ return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri);
}
EXPORT_SYMBOL_GPL(xdp_do_redirect);
@@ -3914,8 +3933,8 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
break;
- case SO_MAX_PACING_RATE:
- sk->sk_max_pacing_rate = val;
+ case SO_MAX_PACING_RATE: /* 32bit version */
+ sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate);
break;
@@ -4012,6 +4031,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
tp->snd_ssthresh = val;
}
break;
+ case TCP_SAVE_SYN:
+ if (val < 0 || val > 1)
+ ret = -EINVAL;
+ else
+ tp->save_syn = val;
+ break;
default:
ret = -EINVAL;
}
@@ -4041,17 +4066,29 @@ BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
if (!sk_fullsock(sk))
goto err_clear;
-
#ifdef CONFIG_INET
if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
- if (optname == TCP_CONGESTION) {
- struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_connection_sock *icsk;
+ struct tcp_sock *tp;
+
+ switch (optname) {
+ case TCP_CONGESTION:
+ icsk = inet_csk(sk);
if (!icsk->icsk_ca_ops || optlen <= 1)
goto err_clear;
strncpy(optval, icsk->icsk_ca_ops->name, optlen);
optval[optlen - 1] = 0;
- } else {
+ break;
+ case TCP_SAVED_SYN:
+ tp = tcp_sk(sk);
+
+ if (optlen <= 0 || !tp->saved_syn ||
+ optlen > tp->saved_syn[0])
+ goto err_clear;
+ memcpy(optval, tp->saved_syn + 1, optlen);
+ break;
+ default:
goto err_clear;
}
} else if (level == SOL_IP) {
@@ -4786,6 +4823,149 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
};
#endif /* CONFIG_IPV6_SEG6_BPF */
+#ifdef CONFIG_INET
+static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
+ struct sk_buff *skb, u8 family, u8 proto)
+{
+ bool refcounted = false;
+ struct sock *sk = NULL;
+ int dif = 0;
+
+ if (skb->dev)
+ dif = skb->dev->ifindex;
+
+ if (family == AF_INET) {
+ __be32 src4 = tuple->ipv4.saddr;
+ __be32 dst4 = tuple->ipv4.daddr;
+ int sdif = inet_sdif(skb);
+
+ if (proto == IPPROTO_TCP)
+ sk = __inet_lookup(net, &tcp_hashinfo, skb, 0,
+ src4, tuple->ipv4.sport,
+ dst4, tuple->ipv4.dport,
+ dif, sdif, &refcounted);
+ else
+ sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
+ dst4, tuple->ipv4.dport,
+ dif, sdif, &udp_table, skb);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
+ struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
+ u16 hnum = ntohs(tuple->ipv6.dport);
+ int sdif = inet6_sdif(skb);
+
+ if (proto == IPPROTO_TCP)
+ sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0,
+ src6, tuple->ipv6.sport,
+ dst6, hnum,
+ dif, sdif, &refcounted);
+ else if (likely(ipv6_bpf_stub))
+ sk = ipv6_bpf_stub->udp6_lib_lookup(net,
+ src6, tuple->ipv6.sport,
+ dst6, hnum,
+ dif, sdif,
+ &udp_table, skb);
+#endif
+ }
+
+ if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) {
+ WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
+ sk = NULL;
+ }
+ return sk;
+}
+
+/* bpf_sk_lookup performs the core lookup for different types of sockets,
+ * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
+ * Returns the socket as an 'unsigned long' to simplify the casting in the
+ * callers to satisfy BPF_CALL declarations.
+ */
+static unsigned long
+bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ u8 proto, u64 netns_id, u64 flags)
+{
+ struct net *caller_net;
+ struct sock *sk = NULL;
+ u8 family = AF_UNSPEC;
+ struct net *net;
+
+ family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
+ if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags))
+ goto out;
+
+ if (skb->dev)
+ caller_net = dev_net(skb->dev);
+ else
+ caller_net = sock_net(skb->sk);
+ if (netns_id) {
+ net = get_net_ns_by_id(caller_net, netns_id);
+ if (unlikely(!net))
+ goto out;
+ sk = sk_lookup(net, tuple, skb, family, proto);
+ put_net(net);
+ } else {
+ net = caller_net;
+ sk = sk_lookup(net, tuple, skb, family, proto);
+ }
+
+ if (sk)
+ sk = sk_to_full_sk(sk);
+out:
+ return (unsigned long) sk;
+}
+
+BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
+ struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
+{
+ return bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
+ .func = bpf_sk_lookup_tcp,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
+ struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
+{
+ return bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
+ .func = bpf_sk_lookup_udp,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+BPF_CALL_1(bpf_sk_release, struct sock *, sk)
+{
+ if (!sock_flag(sk, SOCK_RCU_FREE))
+ sock_gen_put(sk);
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_sk_release_proto = {
+ .func = bpf_sk_release,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_SOCKET,
+};
+#endif /* CONFIG_INET */
+
bool bpf_helper_changes_pkt_data(void *func)
{
if (func == bpf_skb_vlan_push ||
@@ -4805,6 +4985,7 @@ bool bpf_helper_changes_pkt_data(void *func)
func == bpf_xdp_adjust_head ||
func == bpf_xdp_adjust_meta ||
func == bpf_msg_pull_data ||
+ func == bpf_msg_push_data ||
func == bpf_xdp_adjust_tail ||
#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
func == bpf_lwt_seg6_store_bytes ||
@@ -4827,6 +5008,12 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_map_update_elem_proto;
case BPF_FUNC_map_delete_elem:
return &bpf_map_delete_elem_proto;
+ case BPF_FUNC_map_push_elem:
+ return &bpf_map_push_elem_proto;
+ case BPF_FUNC_map_pop_elem:
+ return &bpf_map_pop_elem_proto;
+ case BPF_FUNC_map_peek_elem:
+ return &bpf_map_peek_elem_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_get_smp_processor_id:
@@ -4992,6 +5179,14 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_ancestor_cgroup_id:
return &bpf_skb_ancestor_cgroup_id_proto;
#endif
+#ifdef CONFIG_INET
+ case BPF_FUNC_sk_lookup_tcp:
+ return &bpf_sk_lookup_tcp_proto;
+ case BPF_FUNC_sk_lookup_udp:
+ return &bpf_sk_lookup_udp_proto;
+ case BPF_FUNC_sk_release:
+ return &bpf_sk_release_proto;
+#endif
default:
return bpf_base_func_proto(func_id);
}
@@ -5024,6 +5219,9 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
}
}
+const struct bpf_func_proto bpf_sock_map_update_proto __weak;
+const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
+
static const struct bpf_func_proto *
sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -5047,6 +5245,9 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
}
}
+const struct bpf_func_proto bpf_msg_redirect_map_proto __weak;
+const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak;
+
static const struct bpf_func_proto *
sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -5061,6 +5262,8 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_msg_cork_bytes_proto;
case BPF_FUNC_msg_pull_data:
return &bpf_msg_pull_data_proto;
+ case BPF_FUNC_msg_push_data:
+ return &bpf_msg_push_data_proto;
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
default:
@@ -5068,6 +5271,9 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
}
}
+const struct bpf_func_proto bpf_sk_redirect_map_proto __weak;
+const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak;
+
static const struct bpf_func_proto *
sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -5092,6 +5298,25 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_redirect_hash_proto;
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
+#ifdef CONFIG_INET
+ case BPF_FUNC_sk_lookup_tcp:
+ return &bpf_sk_lookup_tcp_proto;
+ case BPF_FUNC_sk_lookup_udp:
+ return &bpf_sk_lookup_udp_proto;
+ case BPF_FUNC_sk_release:
+ return &bpf_sk_release_proto;
+#endif
+ default:
+ return bpf_base_func_proto(func_id);
+ }
+}
+
+static const struct bpf_func_proto *
+flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+ case BPF_FUNC_skb_load_bytes:
+ return &bpf_skb_load_bytes_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -5215,6 +5440,10 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
if (size != size_default)
return false;
break;
+ case bpf_ctx_range(struct __sk_buff, flow_keys):
+ if (size != sizeof(struct bpf_flow_keys *))
+ return false;
+ break;
default:
/* Only narrow read access allowed for now. */
if (type == BPF_WRITE) {
@@ -5240,6 +5469,7 @@ static bool sk_filter_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range(struct __sk_buff, data_end):
+ case bpf_ctx_range(struct __sk_buff, flow_keys):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
return false;
}
@@ -5256,6 +5486,40 @@ static bool sk_filter_is_valid_access(int off, int size,
return bpf_skb_is_valid_access(off, size, type, prog, info);
}
+static bool cg_skb_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ switch (off) {
+ case bpf_ctx_range(struct __sk_buff, tc_classid):
+ case bpf_ctx_range(struct __sk_buff, data_meta):
+ case bpf_ctx_range(struct __sk_buff, flow_keys):
+ return false;
+ }
+ if (type == BPF_WRITE) {
+ switch (off) {
+ case bpf_ctx_range(struct __sk_buff, mark):
+ case bpf_ctx_range(struct __sk_buff, priority):
+ case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
+ break;
+ default:
+ return false;
+ }
+ }
+
+ switch (off) {
+ case bpf_ctx_range(struct __sk_buff, data):
+ info->reg_type = PTR_TO_PACKET;
+ break;
+ case bpf_ctx_range(struct __sk_buff, data_end):
+ info->reg_type = PTR_TO_PACKET_END;
+ break;
+ }
+
+ return bpf_skb_is_valid_access(off, size, type, prog, info);
+}
+
static bool lwt_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
@@ -5265,6 +5529,7 @@ static bool lwt_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
case bpf_ctx_range(struct __sk_buff, data_meta):
+ case bpf_ctx_range(struct __sk_buff, flow_keys):
return false;
}
@@ -5350,23 +5615,29 @@ static bool __sock_filter_check_size(int off, int size,
return size == size_default;
}
-static bool sock_filter_is_valid_access(int off, int size,
- enum bpf_access_type type,
- const struct bpf_prog *prog,
- struct bpf_insn_access_aux *info)
+bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
{
if (off < 0 || off >= sizeof(struct bpf_sock))
return false;
if (off % size != 0)
return false;
- if (!__sock_filter_check_attach_type(off, type,
- prog->expected_attach_type))
- return false;
if (!__sock_filter_check_size(off, size, info))
return false;
return true;
}
+static bool sock_filter_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (!bpf_sock_is_valid_access(off, size, type, info))
+ return false;
+ return __sock_filter_check_attach_type(off, type,
+ prog->expected_attach_type);
+}
+
static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
const struct bpf_prog *prog, int drop_verdict)
{
@@ -5475,6 +5746,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
break;
+ case bpf_ctx_range(struct __sk_buff, flow_keys):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
return false;
}
@@ -5676,6 +5948,7 @@ static bool sk_skb_is_valid_access(int off, int size,
switch (off) {
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
+ case bpf_ctx_range(struct __sk_buff, flow_keys):
return false;
}
@@ -5735,6 +6008,39 @@ static bool sk_msg_is_valid_access(int off, int size,
return true;
}
+static bool flow_dissector_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (type == BPF_WRITE) {
+ switch (off) {
+ case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
+ break;
+ default:
+ return false;
+ }
+ }
+
+ switch (off) {
+ case bpf_ctx_range(struct __sk_buff, data):
+ info->reg_type = PTR_TO_PACKET;
+ break;
+ case bpf_ctx_range(struct __sk_buff, data_end):
+ info->reg_type = PTR_TO_PACKET_END;
+ break;
+ case bpf_ctx_range(struct __sk_buff, flow_keys):
+ info->reg_type = PTR_TO_FLOW_KEYS;
+ break;
+ case bpf_ctx_range(struct __sk_buff, tc_classid):
+ case bpf_ctx_range(struct __sk_buff, data_meta):
+ case bpf_ctx_range_till(struct __sk_buff, family, local_port):
+ return false;
+ }
+
+ return bpf_skb_is_valid_access(off, size, type, prog, info);
+}
+
static u32 bpf_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
@@ -6029,15 +6335,24 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
bpf_target_off(struct sock_common,
skc_num, 2, target_size));
break;
+
+ case offsetof(struct __sk_buff, flow_keys):
+ off = si->off;
+ off -= offsetof(struct __sk_buff, flow_keys);
+ off += offsetof(struct sk_buff, cb);
+ off += offsetof(struct qdisc_skb_cb, flow_keys);
+ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
+ si->src_reg, off);
+ break;
}
return insn - insn_buf;
}
-static u32 sock_filter_convert_ctx_access(enum bpf_access_type type,
- const struct bpf_insn *si,
- struct bpf_insn *insn_buf,
- struct bpf_prog *prog, u32 *target_size)
+u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog, u32 *target_size)
{
struct bpf_insn *insn = insn_buf;
int off;
@@ -6747,22 +7062,22 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
switch (si->off) {
case offsetof(struct sk_msg_md, data):
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data),
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
si->dst_reg, si->src_reg,
- offsetof(struct sk_msg_buff, data));
+ offsetof(struct sk_msg, data));
break;
case offsetof(struct sk_msg_md, data_end):
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data_end),
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end),
si->dst_reg, si->src_reg,
- offsetof(struct sk_msg_buff, data_end));
+ offsetof(struct sk_msg, data_end));
break;
case offsetof(struct sk_msg_md, family):
BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct sk_msg_buff, sk),
+ struct sk_msg, sk),
si->dst_reg, si->src_reg,
- offsetof(struct sk_msg_buff, sk));
+ offsetof(struct sk_msg, sk));
*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
offsetof(struct sock_common, skc_family));
break;
@@ -6771,9 +7086,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct sk_msg_buff, sk),
+ struct sk_msg, sk),
si->dst_reg, si->src_reg,
- offsetof(struct sk_msg_buff, sk));
+ offsetof(struct sk_msg, sk));
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
offsetof(struct sock_common, skc_daddr));
break;
@@ -6783,9 +7098,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
skc_rcv_saddr) != 4);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct sk_msg_buff, sk),
+ struct sk_msg, sk),
si->dst_reg, si->src_reg,
- offsetof(struct sk_msg_buff, sk));
+ offsetof(struct sk_msg, sk));
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
offsetof(struct sock_common,
skc_rcv_saddr));
@@ -6800,9 +7115,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
off = si->off;
off -= offsetof(struct sk_msg_md, remote_ip6[0]);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct sk_msg_buff, sk),
+ struct sk_msg, sk),
si->dst_reg, si->src_reg,
- offsetof(struct sk_msg_buff, sk));
+ offsetof(struct sk_msg, sk));
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
offsetof(struct sock_common,
skc_v6_daddr.s6_addr32[0]) +
@@ -6821,9 +7136,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
off = si->off;
off -= offsetof(struct sk_msg_md, local_ip6[0]);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct sk_msg_buff, sk),
+ struct sk_msg, sk),
si->dst_reg, si->src_reg,
- offsetof(struct sk_msg_buff, sk));
+ offsetof(struct sk_msg, sk));
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
offsetof(struct sock_common,
skc_v6_rcv_saddr.s6_addr32[0]) +
@@ -6837,9 +7152,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct sk_msg_buff, sk),
+ struct sk_msg, sk),
si->dst_reg, si->src_reg,
- offsetof(struct sk_msg_buff, sk));
+ offsetof(struct sk_msg, sk));
*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
offsetof(struct sock_common, skc_dport));
#ifndef __BIG_ENDIAN_BITFIELD
@@ -6851,9 +7166,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
- struct sk_msg_buff, sk),
+ struct sk_msg, sk),
si->dst_reg, si->src_reg,
- offsetof(struct sk_msg_buff, sk));
+ offsetof(struct sk_msg, sk));
*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
offsetof(struct sock_common, skc_num));
break;
@@ -6897,7 +7212,7 @@ const struct bpf_prog_ops xdp_prog_ops = {
const struct bpf_verifier_ops cg_skb_verifier_ops = {
.get_func_proto = cg_skb_func_proto,
- .is_valid_access = sk_filter_is_valid_access,
+ .is_valid_access = cg_skb_is_valid_access,
.convert_ctx_access = bpf_convert_ctx_access,
};
@@ -6949,7 +7264,7 @@ const struct bpf_prog_ops lwt_seg6local_prog_ops = {
const struct bpf_verifier_ops cg_sock_verifier_ops = {
.get_func_proto = sock_filter_func_proto,
.is_valid_access = sock_filter_is_valid_access,
- .convert_ctx_access = sock_filter_convert_ctx_access,
+ .convert_ctx_access = bpf_sock_convert_ctx_access,
};
const struct bpf_prog_ops cg_sock_prog_ops = {
@@ -6992,6 +7307,15 @@ const struct bpf_verifier_ops sk_msg_verifier_ops = {
const struct bpf_prog_ops sk_msg_prog_ops = {
};
+const struct bpf_verifier_ops flow_dissector_verifier_ops = {
+ .get_func_proto = flow_dissector_func_proto,
+ .is_valid_access = flow_dissector_is_valid_access,
+ .convert_ctx_access = bpf_convert_ctx_access,
+};
+
+const struct bpf_prog_ops flow_dissector_prog_ops = {
+};
+
int sk_detach_filter(struct sock *sk)
{
int ret = -ENOENT;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index ce9eeeb7c024..676f3ad629f9 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -25,6 +25,9 @@
#include <net/flow_dissector.h>
#include <scsi/fc/fc_fcoe.h>
#include <uapi/linux/batadv_packet.h>
+#include <linux/bpf.h>
+
+static DEFINE_MUTEX(flow_dissector_mutex);
static void dissector_set_key(struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id)
@@ -62,6 +65,44 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
}
EXPORT_SYMBOL(skb_flow_dissector_init);
+int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ struct bpf_prog *attached;
+ struct net *net;
+
+ net = current->nsproxy->net_ns;
+ mutex_lock(&flow_dissector_mutex);
+ attached = rcu_dereference_protected(net->flow_dissector_prog,
+ lockdep_is_held(&flow_dissector_mutex));
+ if (attached) {
+ /* Only one BPF program can be attached at a time */
+ mutex_unlock(&flow_dissector_mutex);
+ return -EEXIST;
+ }
+ rcu_assign_pointer(net->flow_dissector_prog, prog);
+ mutex_unlock(&flow_dissector_mutex);
+ return 0;
+}
+
+int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
+{
+ struct bpf_prog *attached;
+ struct net *net;
+
+ net = current->nsproxy->net_ns;
+ mutex_lock(&flow_dissector_mutex);
+ attached = rcu_dereference_protected(net->flow_dissector_prog,
+ lockdep_is_held(&flow_dissector_mutex));
+ if (!attached) {
+ mutex_unlock(&flow_dissector_mutex);
+ return -ENOENT;
+ }
+ bpf_prog_put(attached);
+ RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
+ mutex_unlock(&flow_dissector_mutex);
+ return 0;
+}
/**
* skb_flow_get_be16 - extract be16 entity
* @skb: sk_buff to extract from
@@ -382,8 +423,8 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
offset += sizeof(struct gre_base_hdr);
if (hdr->flags & GRE_CSUM)
- offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
- sizeof(((struct gre_full_hdr *) 0)->reserved1);
+ offset += FIELD_SIZEOF(struct gre_full_hdr, csum) +
+ FIELD_SIZEOF(struct gre_full_hdr, reserved1);
if (hdr->flags & GRE_KEY) {
const __be32 *keyid;
@@ -405,11 +446,11 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
else
key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
}
- offset += sizeof(((struct gre_full_hdr *) 0)->key);
+ offset += FIELD_SIZEOF(struct gre_full_hdr, key);
}
if (hdr->flags & GRE_SEQ)
- offset += sizeof(((struct pptp_gre_header *) 0)->seq);
+ offset += FIELD_SIZEOF(struct pptp_gre_header, seq);
if (gre_ver == 0) {
if (*p_proto == htons(ETH_P_TEB)) {
@@ -436,7 +477,7 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
u8 *ppp_hdr;
if (hdr->flags & GRE_ACK)
- offset += sizeof(((struct pptp_gre_header *) 0)->ack);
+ offset += FIELD_SIZEOF(struct pptp_gre_header, ack);
ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
sizeof(_ppp_hdr),
@@ -588,6 +629,60 @@ static bool skb_flow_dissect_allowed(int *num_hdrs)
return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS);
}
+static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
+ struct flow_dissector *flow_dissector,
+ void *target_container)
+{
+ struct flow_dissector_key_control *key_control;
+ struct flow_dissector_key_basic *key_basic;
+ struct flow_dissector_key_addrs *key_addrs;
+ struct flow_dissector_key_ports *key_ports;
+
+ key_control = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ target_container);
+ key_control->thoff = flow_keys->thoff;
+ if (flow_keys->is_frag)
+ key_control->flags |= FLOW_DIS_IS_FRAGMENT;
+ if (flow_keys->is_first_frag)
+ key_control->flags |= FLOW_DIS_FIRST_FRAG;
+ if (flow_keys->is_encap)
+ key_control->flags |= FLOW_DIS_ENCAPSULATION;
+
+ key_basic = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ target_container);
+ key_basic->n_proto = flow_keys->n_proto;
+ key_basic->ip_proto = flow_keys->ip_proto;
+
+ if (flow_keys->addr_proto == ETH_P_IP &&
+ dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ key_addrs = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ target_container);
+ key_addrs->v4addrs.src = flow_keys->ipv4_src;
+ key_addrs->v4addrs.dst = flow_keys->ipv4_dst;
+ key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ } else if (flow_keys->addr_proto == ETH_P_IPV6 &&
+ dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ key_addrs = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ target_container);
+ memcpy(&key_addrs->v6addrs, &flow_keys->ipv6_src,
+ sizeof(key_addrs->v6addrs));
+ key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ }
+
+ if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ key_ports = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ target_container);
+ key_ports->src = flow_keys->sport;
+ key_ports->dst = flow_keys->dport;
+ }
+}
+
/**
* __skb_flow_dissect - extract the flow_keys struct and return it
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
@@ -619,6 +714,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_vlan *key_vlan;
enum flow_dissect_ret fdret;
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
+ struct bpf_prog *attached = NULL;
int num_hdrs = 0;
u8 ip_proto = 0;
bool ret;
@@ -658,6 +754,50 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
FLOW_DISSECTOR_KEY_BASIC,
target_container);
+ rcu_read_lock();
+ if (skb) {
+ if (skb->dev)
+ attached = rcu_dereference(dev_net(skb->dev)->flow_dissector_prog);
+ else if (skb->sk)
+ attached = rcu_dereference(sock_net(skb->sk)->flow_dissector_prog);
+ else
+ WARN_ON_ONCE(1);
+ }
+ if (attached) {
+ /* Note that even though the const qualifier is discarded
+ * throughout the execution of the BPF program, all changes(the
+ * control block) are reverted after the BPF program returns.
+ * Therefore, __skb_flow_dissect does not alter the skb.
+ */
+ struct bpf_flow_keys flow_keys = {};
+ struct bpf_skb_data_end cb_saved;
+ struct bpf_skb_data_end *cb;
+ u32 result;
+
+ cb = (struct bpf_skb_data_end *)skb->cb;
+
+ /* Save Control Block */
+ memcpy(&cb_saved, cb, sizeof(cb_saved));
+ memset(cb, 0, sizeof(cb_saved));
+
+ /* Pass parameters to the BPF program */
+ cb->qdisc_cb.flow_keys = &flow_keys;
+ flow_keys.nhoff = nhoff;
+
+ bpf_compute_data_pointers((struct sk_buff *)skb);
+ result = BPF_PROG_RUN(attached, skb);
+
+ /* Restore state */
+ memcpy(cb, &cb_saved, sizeof(cb_saved));
+
+ __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
+ target_container);
+ key_control->thoff = min_t(u16, key_control->thoff, skb->len);
+ rcu_read_unlock();
+ return result == BPF_OK;
+ }
+ rcu_read_unlock();
+
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct ethhdr *eth = eth_hdr(skb);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 188d693cb251..9bf1b9ad1780 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -162,6 +162,34 @@ __gnet_stats_copy_basic(const seqcount_t *running,
}
EXPORT_SYMBOL(__gnet_stats_copy_basic);
+static int
+___gnet_stats_copy_basic(const seqcount_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b,
+ int type)
+{
+ struct gnet_stats_basic_packed bstats = {0};
+
+ __gnet_stats_copy_basic(running, &bstats, cpu, b);
+
+ if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
+ d->tc_stats.bytes = bstats.bytes;
+ d->tc_stats.packets = bstats.packets;
+ }
+
+ if (d->tail) {
+ struct gnet_stats_basic sb;
+
+ memset(&sb, 0, sizeof(sb));
+ sb.bytes = bstats.bytes;
+ sb.packets = bstats.packets;
+ return gnet_stats_copy(d, type, &sb, sizeof(sb),
+ TCA_STATS_PAD);
+ }
+ return 0;
+}
+
/**
* gnet_stats_copy_basic - copy basic statistics into statistic TLV
* @running: seqcount_t pointer
@@ -181,29 +209,36 @@ gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b)
{
- struct gnet_stats_basic_packed bstats = {0};
-
- __gnet_stats_copy_basic(running, &bstats, cpu, b);
-
- if (d->compat_tc_stats) {
- d->tc_stats.bytes = bstats.bytes;
- d->tc_stats.packets = bstats.packets;
- }
-
- if (d->tail) {
- struct gnet_stats_basic sb;
-
- memset(&sb, 0, sizeof(sb));
- sb.bytes = bstats.bytes;
- sb.packets = bstats.packets;
- return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb),
- TCA_STATS_PAD);
- }
- return 0;
+ return ___gnet_stats_copy_basic(running, d, cpu, b,
+ TCA_STATS_BASIC);
}
EXPORT_SYMBOL(gnet_stats_copy_basic);
/**
+ * gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV
+ * @running: seqcount_t pointer
+ * @d: dumping handle
+ * @cpu: copy statistic per cpu
+ * @b: basic statistics
+ *
+ * Appends the basic statistics to the top level TLV created by
+ * gnet_stats_start_copy().
+ *
+ * Returns 0 on success or -1 with the statistic lock released
+ * if the room in the socket buffer was not sufficient.
+ */
+int
+gnet_stats_copy_basic_hw(const seqcount_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b)
+{
+ return ___gnet_stats_copy_basic(running, d, cpu, b,
+ TCA_STATS_BASIC_HW);
+}
+EXPORT_SYMBOL(gnet_stats_copy_basic_hw);
+
+/**
* gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
* @d: dumping handle
* @rate_est: rate estimator
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index e38e641e98d5..7f51efb2b3ab 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -155,7 +155,7 @@ static void linkwatch_do_dev(struct net_device *dev)
clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
rfc2863_policy(dev);
- if (dev->flags & IFF_UP) {
+ if (dev->flags & IFF_UP && netif_device_present(dev)) {
if (netif_carrier_ok(dev))
dev_activate(dev);
else
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index aa19d86937af..ee605d9d8bd4 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -232,7 +232,8 @@ static void pneigh_queue_purge(struct sk_buff_head *list)
}
}
-static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
+static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
+ bool skip_perm)
{
int i;
struct neigh_hash_table *nht;
@@ -250,6 +251,10 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
np = &n->next;
continue;
}
+ if (skip_perm && n->nud_state & NUD_PERMANENT) {
+ np = &n->next;
+ continue;
+ }
rcu_assign_pointer(*np,
rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock)));
@@ -285,21 +290,35 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
{
write_lock_bh(&tbl->lock);
- neigh_flush_dev(tbl, dev);
+ neigh_flush_dev(tbl, dev, false);
write_unlock_bh(&tbl->lock);
}
EXPORT_SYMBOL(neigh_changeaddr);
-int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
+ bool skip_perm)
{
write_lock_bh(&tbl->lock);
- neigh_flush_dev(tbl, dev);
+ neigh_flush_dev(tbl, dev, skip_perm);
pneigh_ifdown_and_unlock(tbl, dev);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
return 0;
}
+
+int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
+{
+ __neigh_ifdown(tbl, dev, true);
+ return 0;
+}
+EXPORT_SYMBOL(neigh_carrier_down);
+
+int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+{
+ __neigh_ifdown(tbl, dev, false);
+ return 0;
+}
EXPORT_SYMBOL(neigh_ifdown);
static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
@@ -1148,8 +1167,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
neigh->nud_state = new;
err = 0;
notify = old & NUD_VALID;
- if (((old & (NUD_INCOMPLETE | NUD_PROBE)) ||
- (flags & NEIGH_UPDATE_F_ADMIN)) &&
+ if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
(new & NUD_FAILED)) {
neigh_invalidate(neigh);
notify = 1;
@@ -1180,6 +1198,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
lladdr = neigh->ha;
}
+ /* Update confirmed timestamp for neighbour entry after we
+ * received ARP packet even if it doesn't change IP to MAC binding.
+ */
+ if (new & NUD_CONNECTED)
+ neigh->confirmed = jiffies;
+
/* If entry was valid and address is not changed,
do not change entry state, if new one is STALE.
*/
@@ -1201,15 +1225,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
}
}
- /* Update timestamps only once we know we will make a change to the
+ /* Update timestamp only once we know we will make a change to the
* neighbour entry. Otherwise we risk to move the locktime window with
* noop updates and ignore relevant ARP updates.
*/
- if (new != old || lladdr != neigh->ha) {
- if (new & NUD_CONNECTED)
- neigh->confirmed = jiffies;
+ if (new != old || lladdr != neigh->ha)
neigh->updated = jiffies;
- }
if (new != old) {
neigh_del_timer(neigh);
@@ -1277,11 +1298,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
neigh->arp_queue_len_bytes = 0;
}
out:
- if (update_isrouter) {
- neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
- (neigh->flags | NTF_ROUTER) :
- (neigh->flags & ~NTF_ROUTER);
- }
+ if (update_isrouter)
+ neigh_update_is_router(neigh, flags, &notify);
write_unlock_bh(&neigh->lock);
if (notify)
@@ -1709,7 +1727,8 @@ out:
static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
- int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
+ int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
+ NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX+1];
@@ -1784,12 +1803,16 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
}
if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
- flags &= ~NEIGH_UPDATE_F_OVERRIDE;
+ flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
+ NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
}
if (ndm->ndm_flags & NTF_EXT_LEARNED)
flags |= NEIGH_UPDATE_F_EXT_LEARNED;
+ if (ndm->ndm_flags & NTF_ROUTER)
+ flags |= NEIGH_UPDATE_F_ISROUTER;
+
if (ndm->ndm_flags & NTF_USE) {
neigh_event_send(neigh, NULL);
err = 0;
@@ -2159,15 +2182,47 @@ errout:
return err;
}
+static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct ndtmsg *ndtm;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
+ return -EINVAL;
+ }
+
+ ndtm = nlmsg_data(nlh);
+ if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
+ return -EINVAL;
+ }
+
+ if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
+ NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
int family, tidx, nidx = 0;
int tbl_skip = cb->args[0];
int neigh_skip = cb->args[1];
struct neigh_table *tbl;
- family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
+ if (cb->strict_check) {
+ int err = neightbl_valid_dump_info(nlh, cb->extack);
+
+ if (err < 0)
+ return err;
+ }
+
+ family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
struct neigh_parms *p;
@@ -2180,7 +2235,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
continue;
if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
+ nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
NLM_F_MULTI) < 0)
break;
@@ -2195,7 +2250,7 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
if (neightbl_fill_param_info(skb, tbl, p,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
+ nlh->nlmsg_seq,
RTM_NEWNEIGHTBL,
NLM_F_MULTI) < 0)
goto out;
@@ -2324,35 +2379,24 @@ static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
return false;
}
+struct neigh_dump_filter {
+ int master_idx;
+ int dev_idx;
+};
+
static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
- struct netlink_callback *cb)
+ struct netlink_callback *cb,
+ struct neigh_dump_filter *filter)
{
struct net *net = sock_net(skb->sk);
- const struct nlmsghdr *nlh = cb->nlh;
- struct nlattr *tb[NDA_MAX + 1];
struct neighbour *n;
int rc, h, s_h = cb->args[1];
int idx, s_idx = idx = cb->args[2];
struct neigh_hash_table *nht;
- int filter_master_idx = 0, filter_idx = 0;
unsigned int flags = NLM_F_MULTI;
- int err;
- err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
- if (!err) {
- if (tb[NDA_IFINDEX]) {
- if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
- return -EINVAL;
- filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
- }
- if (tb[NDA_MASTER]) {
- if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
- return -EINVAL;
- filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
- }
- if (filter_idx || filter_master_idx)
- flags |= NLM_F_DUMP_FILTERED;
- }
+ if (filter->dev_idx || filter->master_idx)
+ flags |= NLM_F_DUMP_FILTERED;
rcu_read_lock_bh();
nht = rcu_dereference_bh(tbl->nht);
@@ -2365,8 +2409,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
n = rcu_dereference_bh(n->next)) {
if (idx < s_idx || !net_eq(dev_net(n->dev), net))
goto next;
- if (neigh_ifindex_filtered(n->dev, filter_idx) ||
- neigh_master_filtered(n->dev, filter_master_idx))
+ if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
+ neigh_master_filtered(n->dev, filter->master_idx))
goto next;
if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
@@ -2388,12 +2432,17 @@ out:
}
static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
- struct netlink_callback *cb)
+ struct netlink_callback *cb,
+ struct neigh_dump_filter *filter)
{
struct pneigh_entry *n;
struct net *net = sock_net(skb->sk);
int rc, h, s_h = cb->args[3];
int idx, s_idx = idx = cb->args[4];
+ unsigned int flags = NLM_F_MULTI;
+
+ if (filter->dev_idx || filter->master_idx)
+ flags |= NLM_F_DUMP_FILTERED;
read_lock_bh(&tbl->lock);
@@ -2403,10 +2452,12 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
if (idx < s_idx || pneigh_net(n) != net)
goto next;
+ if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
+ neigh_master_filtered(n->dev, filter->master_idx))
+ goto next;
if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- NLM_F_MULTI, tbl) < 0) {
+ RTM_NEWNEIGH, flags, tbl) < 0) {
read_unlock_bh(&tbl->lock);
rc = -1;
goto out;
@@ -2425,22 +2476,91 @@ out:
}
+static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
+ bool strict_check,
+ struct neigh_dump_filter *filter,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[NDA_MAX + 1];
+ int err, i;
+
+ if (strict_check) {
+ struct ndmsg *ndm;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
+ return -EINVAL;
+ }
+
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
+ ndm->ndm_state || ndm->ndm_flags || ndm->ndm_type) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
+ NULL, extack);
+ } else {
+ err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
+ NULL, extack);
+ }
+ if (err < 0)
+ return err;
+
+ for (i = 0; i <= NDA_MAX; ++i) {
+ if (!tb[i])
+ continue;
+
+ /* all new attributes should require strict_check */
+ switch (i) {
+ case NDA_IFINDEX:
+ if (nla_len(tb[i]) != sizeof(u32)) {
+ NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in neighbor dump request");
+ return -EINVAL;
+ }
+ filter->dev_idx = nla_get_u32(tb[i]);
+ break;
+ case NDA_MASTER:
+ if (nla_len(tb[i]) != sizeof(u32)) {
+ NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in neighbor dump request");
+ return -EINVAL;
+ }
+ filter->master_idx = nla_get_u32(tb[i]);
+ break;
+ default:
+ if (strict_check) {
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
+ struct neigh_dump_filter filter = {};
struct neigh_table *tbl;
int t, family, s_t;
int proxy = 0;
int err;
- family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
+ family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
/* check for full ndmsg structure presence, family member is
* the same for both structures
*/
- if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
- ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
+ if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
+ ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
proxy = 1;
+ err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
+ if (err < 0 && cb->strict_check)
+ return err;
+
s_t = cb->args[0];
for (t = 0; t < NEIGH_NR_TABLES; t++) {
@@ -2454,9 +2574,9 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
memset(&cb->args[1], 0, sizeof(cb->args) -
sizeof(cb->args[0]));
if (proxy)
- err = pneigh_dump_table(tbl, skb, cb);
+ err = pneigh_dump_table(tbl, skb, cb, &filter);
else
- err = neigh_dump_table(tbl, skb, cb);
+ err = neigh_dump_table(tbl, skb, cb, &filter);
if (err < 0)
break;
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 670c84b1bfc2..fefe72774aeb 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -853,6 +853,12 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
.s_idx = cb->args[0],
};
+ if (cb->strict_check &&
+ nlmsg_attrlen(cb->nlh, sizeof(struct rtgenmsg))) {
+ NL_SET_ERR_MSG(cb->extack, "Unknown data in network namespace id dump request");
+ return -EINVAL;
+ }
+
spin_lock_bh(&net->nsid_lock);
idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
spin_unlock_bh(&net->nsid_lock);
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 5e4f04004a49..7bf833598615 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -106,6 +106,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
iterate_fd(p->files, 0, update_classid_sock,
(void *)(unsigned long)cs->classid);
task_unlock(p);
+ cond_resched();
}
css_task_iter_end(&it);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 57557a6a950c..5da9552b186b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -57,7 +57,6 @@ DEFINE_STATIC_SRCU(netpoll_srcu);
MAX_UDP_CHUNK)
static void zap_completion_queue(void);
-static void netpoll_async_cleanup(struct work_struct *work);
static unsigned int carrier_timeout = 4;
module_param(carrier_timeout, uint, 0644);
@@ -135,27 +134,9 @@ static void queue_process(struct work_struct *work)
}
}
-/*
- * Check whether delayed processing was scheduled for our NIC. If so,
- * we attempt to grab the poll lock and use ->poll() to pump the card.
- * If this fails, either we've recursed in ->poll() or it's already
- * running on another CPU.
- *
- * Note: we don't mask interrupts with this lock because we're using
- * trylock here and interrupts are already disabled in the softirq
- * case. Further, we test the poll_owner to avoid recursion on UP
- * systems where the lock doesn't exist.
- */
static void poll_one_napi(struct napi_struct *napi)
{
- int work = 0;
-
- /* net_rx_action's ->poll() invocations and our's are
- * synchronized by this test which is only made while
- * holding the napi->poll_lock.
- */
- if (!test_bit(NAPI_STATE_SCHED, &napi->state))
- return;
+ int work;
/* If we set this bit but see that it has already been set,
* that indicates that napi has been disabled and we need
@@ -187,16 +168,16 @@ static void poll_napi(struct net_device *dev)
}
}
-static void netpoll_poll_dev(struct net_device *dev)
+void netpoll_poll_dev(struct net_device *dev)
{
- const struct net_device_ops *ops;
struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
+ const struct net_device_ops *ops;
/* Don't do any rx activity if the dev_lock mutex is held
* the dev_open/close paths use this to block netpoll activity
* while changing device state
*/
- if (down_trylock(&ni->dev_lock))
+ if (!ni || down_trylock(&ni->dev_lock))
return;
if (!netif_running(dev)) {
@@ -205,13 +186,8 @@ static void netpoll_poll_dev(struct net_device *dev)
}
ops = dev->netdev_ops;
- if (!ops->ndo_poll_controller) {
- up(&ni->dev_lock);
- return;
- }
-
- /* Process pending work on NIC */
- ops->ndo_poll_controller(dev);
+ if (ops->ndo_poll_controller)
+ ops->ndo_poll_controller(dev);
poll_napi(dev);
@@ -219,6 +195,7 @@ static void netpoll_poll_dev(struct net_device *dev)
zap_completion_queue();
}
+EXPORT_SYMBOL(netpoll_poll_dev);
void netpoll_poll_disable(struct net_device *dev)
{
@@ -611,10 +588,8 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
np->dev = ndev;
strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
- INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
- if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
- !ndev->netdev_ops->ndo_poll_controller) {
+ if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
np_err(np, "%s doesn't support polling, aborting\n",
np->dev_name);
err = -ENOTSUPP;
@@ -811,10 +786,6 @@ void __netpoll_cleanup(struct netpoll *np)
{
struct netpoll_info *npinfo;
- /* rtnl_dereference would be preferable here but
- * rcu_cleanup_netpoll path can put us in here safely without
- * holding the rtnl, so plain rcu_dereference it is
- */
npinfo = rtnl_dereference(np->dev->npinfo);
if (!npinfo)
return;
@@ -835,21 +806,16 @@ void __netpoll_cleanup(struct netpoll *np)
}
EXPORT_SYMBOL_GPL(__netpoll_cleanup);
-static void netpoll_async_cleanup(struct work_struct *work)
+void __netpoll_free(struct netpoll *np)
{
- struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
+ ASSERT_RTNL();
- rtnl_lock();
+ /* Wait for transmitting packets to finish before freeing. */
+ synchronize_rcu_bh();
__netpoll_cleanup(np);
- rtnl_unlock();
kfree(np);
}
-
-void __netpoll_free_async(struct netpoll *np)
-{
- schedule_work(&np->cleanup_work);
-}
-EXPORT_SYMBOL_GPL(__netpoll_free_async);
+EXPORT_SYMBOL_GPL(__netpoll_free);
void netpoll_cleanup(struct netpoll *np)
{
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 7f6938405fa1..6ac919847ce6 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3426,7 +3426,7 @@ xmit_more:
net_info_ratelimited("%s xmit error: %d\n",
pkt_dev->odevname, ret);
pkt_dev->errors++;
- /* fallthru */
+ /* fall through */
case NETDEV_TX_BUSY:
/* Retry it next time */
refcount_dec(&(pkt_dev->skb->users));
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 60c928894a78..0958c7be2c22 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -59,7 +59,7 @@
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
-#define RTNL_MAX_TYPE 48
+#define RTNL_MAX_TYPE 49
#define RTNL_SLAVE_MAX_TYPE 36
struct rtnl_link {
@@ -130,6 +130,12 @@ int rtnl_is_locked(void)
}
EXPORT_SYMBOL(rtnl_is_locked);
+bool refcount_dec_and_rtnl_lock(refcount_t *r)
+{
+ return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
+}
+EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
+
#ifdef CONFIG_PROVE_LOCKING
bool lockdep_rtnl_is_held(void)
{
@@ -1016,7 +1022,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_NEW_NETNSID */
+ nla_total_size(4) /* IFLA_NEW_IFINDEX */
+ nla_total_size(1) /* IFLA_PROTO_DOWN */
- + nla_total_size(4) /* IFLA_IF_NETNSID */
+ + nla_total_size(4) /* IFLA_TARGET_NETNSID */
+ nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
+ nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
+ nla_total_size(4) /* IFLA_MIN_MTU */
@@ -1598,7 +1604,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
ifm->ifi_flags = dev_get_flags(dev);
ifm->ifi_change = change;
- if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_IF_NETNSID, tgt_netnsid))
+ if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
goto nla_put_failure;
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
@@ -1737,7 +1743,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_XDP] = { .type = NLA_NESTED },
[IFLA_EVENT] = { .type = NLA_U32 },
[IFLA_GROUP] = { .type = NLA_U32 },
- [IFLA_IF_NETNSID] = { .type = NLA_S32 },
+ [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
[IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
[IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
[IFLA_MIN_MTU] = { .type = NLA_U32 },
@@ -1845,7 +1851,15 @@ static bool link_dump_filtered(struct net_device *dev,
return false;
}
-static struct net *get_target_net(struct sock *sk, int netnsid)
+/**
+ * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
+ * @sk: netlink socket
+ * @netnsid: network namespace identifier
+ *
+ * Returns the network namespace identified by netnsid on success or an error
+ * pointer on failure.
+ */
+struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
{
struct net *net;
@@ -1862,9 +1876,54 @@ static struct net *get_target_net(struct sock *sk, int netnsid)
}
return net;
}
+EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
+
+static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
+ bool strict_check, struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ int hdrlen;
+
+ if (strict_check) {
+ struct ifinfomsg *ifm;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for link dump");
+ return -EINVAL;
+ }
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
+ ifm->ifi_change) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
+ return -EINVAL;
+ }
+ if (ifm->ifi_index) {
+ NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
+ return -EINVAL;
+ }
+
+ return nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
+ ifla_policy, extack);
+ }
+
+ /* A hack to preserve kernel<->userspace interface.
+ * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
+ * However, before Linux v3.9 the code here assumed rtgenmsg and that's
+ * what iproute2 < v3.9.0 used.
+ * We can detect the old iproute2. Even including the IFLA_EXT_MASK
+ * attribute, its netlink message is shorter than struct ifinfomsg.
+ */
+ hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
+ sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
+
+ return nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, extack);
+}
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct netlink_ext_ack *extack = cb->extack;
+ const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct net *tgt_net = net;
int h, s_h;
@@ -1877,46 +1936,54 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
unsigned int flags = NLM_F_MULTI;
int master_idx = 0;
int netnsid = -1;
- int err;
- int hdrlen;
+ int err, i;
s_h = cb->args[0];
s_idx = cb->args[1];
- /* A hack to preserve kernel<->userspace interface.
- * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
- * However, before Linux v3.9 the code here assumed rtgenmsg and that's
- * what iproute2 < v3.9.0 used.
- * We can detect the old iproute2. Even including the IFLA_EXT_MASK
- * attribute, its netlink message is shorter than struct ifinfomsg.
- */
- hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
- sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
+ err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
+ if (err < 0) {
+ if (cb->strict_check)
+ return err;
+
+ goto walk_entries;
+ }
+
+ for (i = 0; i <= IFLA_MAX; ++i) {
+ if (!tb[i])
+ continue;
- if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX,
- ifla_policy, NULL) >= 0) {
- if (tb[IFLA_IF_NETNSID]) {
- netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
- tgt_net = get_target_net(skb->sk, netnsid);
+ /* new attributes should only be added with strict checking */
+ switch (i) {
+ case IFLA_TARGET_NETNSID:
+ netnsid = nla_get_s32(tb[i]);
+ tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
if (IS_ERR(tgt_net)) {
- tgt_net = net;
- netnsid = -1;
+ NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
+ return PTR_ERR(tgt_net);
+ }
+ break;
+ case IFLA_EXT_MASK:
+ ext_filter_mask = nla_get_u32(tb[i]);
+ break;
+ case IFLA_MASTER:
+ master_idx = nla_get_u32(tb[i]);
+ break;
+ case IFLA_LINKINFO:
+ kind_ops = linkinfo_to_kind_ops(tb[i]);
+ break;
+ default:
+ if (cb->strict_check) {
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
+ return -EINVAL;
}
}
-
- if (tb[IFLA_EXT_MASK])
- ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
-
- if (tb[IFLA_MASTER])
- master_idx = nla_get_u32(tb[IFLA_MASTER]);
-
- if (tb[IFLA_LINKINFO])
- kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);
-
- if (master_idx || kind_ops)
- flags |= NLM_F_DUMP_FILTERED;
}
+ if (master_idx || kind_ops)
+ flags |= NLM_F_DUMP_FILTERED;
+
+walk_entries:
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &tgt_net->dev_index_head[h];
@@ -1928,8 +1995,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
err = rtnl_fill_ifinfo(skb, dev, net,
RTM_NEWLINK,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, 0,
- flags,
+ nlh->nlmsg_seq, 0, flags,
ext_filter_mask, 0, NULL, 0,
netnsid);
@@ -1984,7 +2050,7 @@ EXPORT_SYMBOL(rtnl_link_get_net);
*
* 1. IFLA_NET_NS_PID
* 2. IFLA_NET_NS_FD
- * 3. IFLA_IF_NETNSID
+ * 3. IFLA_TARGET_NETNSID
*/
static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
struct nlattr *tb[])
@@ -1994,10 +2060,10 @@ static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
return rtnl_link_get_net(src_net, tb);
- if (!tb[IFLA_IF_NETNSID])
+ if (!tb[IFLA_TARGET_NETNSID])
return get_net(src_net);
- net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_IF_NETNSID]));
+ net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
if (!net)
return ERR_PTR(-EINVAL);
@@ -2038,13 +2104,13 @@ static int rtnl_ensure_unique_netns(struct nlattr *tb[],
return -EOPNOTSUPP;
}
- if (tb[IFLA_IF_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
+ if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
goto invalid_attr;
- if (tb[IFLA_NET_NS_PID] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_FD]))
+ if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
goto invalid_attr;
- if (tb[IFLA_NET_NS_FD] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_PID]))
+ if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
goto invalid_attr;
return 0;
@@ -2320,7 +2386,7 @@ static int do_setlink(const struct sk_buff *skb,
if (err < 0)
return err;
- if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_IF_NETNSID]) {
+ if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
tb, CAP_NET_ADMIN);
if (IS_ERR(net)) {
@@ -2763,9 +2829,9 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
- if (tb[IFLA_IF_NETNSID]) {
- netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
- tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
+ if (tb[IFLA_TARGET_NETNSID]) {
+ netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
+ tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
if (IS_ERR(tgt_net))
return PTR_ERR(tgt_net);
}
@@ -2810,7 +2876,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
}
if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
- __dev_notify_flags(dev, old_flags, 0U);
+ __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
} else {
dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
__dev_notify_flags(dev, old_flags, ~0U);
@@ -2837,6 +2903,12 @@ struct net_device *rtnl_create_link(struct net *net,
else if (ops->get_num_rx_queues)
num_rx_queues = ops->get_num_rx_queues();
+ if (num_tx_queues < 1 || num_tx_queues > 4096)
+ return ERR_PTR(-EINVAL);
+
+ if (num_rx_queues < 1 || num_rx_queues > 4096)
+ return ERR_PTR(-EINVAL);
+
dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
ops->setup, num_tx_queues, num_rx_queues);
if (!dev)
@@ -3173,9 +3245,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0)
return err;
- if (tb[IFLA_IF_NETNSID]) {
- netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
- tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
+ if (tb[IFLA_TARGET_NETNSID]) {
+ netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
+ tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
if (IS_ERR(tgt_net))
return PTR_ERR(tgt_net);
}
@@ -3260,13 +3332,13 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
{
int idx;
int s_idx = cb->family;
+ int type = cb->nlh->nlmsg_type - RTM_BASE;
if (s_idx == 0)
s_idx = 1;
for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
struct rtnl_link **tab;
- int type = cb->nlh->nlmsg_type-RTM_BASE;
struct rtnl_link *link;
rtnl_dumpit_func dumpit;
@@ -3727,14 +3799,100 @@ out:
}
EXPORT_SYMBOL(ndo_dflt_fdb_dump);
+static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
+ int *br_idx, int *brport_idx,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[NDA_MAX + 1];
+ struct ndmsg *ndm;
+ int err, i;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
+ return -EINVAL;
+ }
+
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
+ ndm->ndm_flags || ndm->ndm_type) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for fbd dump request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
+ NULL, extack);
+ if (err < 0)
+ return err;
+
+ *brport_idx = ndm->ndm_ifindex;
+ for (i = 0; i <= NDA_MAX; ++i) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NDA_IFINDEX:
+ if (nla_len(tb[i]) != sizeof(u32)) {
+ NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
+ return -EINVAL;
+ }
+ *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
+ break;
+ case NDA_MASTER:
+ if (nla_len(tb[i]) != sizeof(u32)) {
+ NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
+ return -EINVAL;
+ }
+ *br_idx = nla_get_u32(tb[NDA_MASTER]);
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
+ int *br_idx, int *brport_idx,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[IFLA_MAX+1];
+ int err;
+
+ /* A hack to preserve kernel<->userspace interface.
+ * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
+ * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
+ * So, check for ndmsg with an optional u32 attribute (not used here).
+ * Fortunately these sizes don't conflict with the size of ifinfomsg
+ * with an optional attribute.
+ */
+ if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
+ (nlmsg_len(nlh) != sizeof(struct ndmsg) +
+ nla_attr_size(sizeof(u32)))) {
+ struct ifinfomsg *ifm;
+
+ err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ ifla_policy, extack);
+ if (err < 0) {
+ return -EINVAL;
+ } else if (err == 0) {
+ if (tb[IFLA_MASTER])
+ *br_idx = nla_get_u32(tb[IFLA_MASTER]);
+ }
+
+ ifm = nlmsg_data(nlh);
+ *brport_idx = ifm->ifi_index;
+ }
+ return 0;
+}
+
static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net_device *dev;
- struct nlattr *tb[IFLA_MAX+1];
struct net_device *br_dev = NULL;
const struct net_device_ops *ops = NULL;
const struct net_device_ops *cops = NULL;
- struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
struct net *net = sock_net(skb->sk);
struct hlist_head *head;
int brport_idx = 0;
@@ -3744,16 +3902,14 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
int err = 0;
int fidx = 0;
- err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
- IFLA_MAX, ifla_policy, NULL);
- if (err < 0) {
- return -EINVAL;
- } else if (err == 0) {
- if (tb[IFLA_MASTER])
- br_idx = nla_get_u32(tb[IFLA_MASTER]);
- }
-
- brport_idx = ifm->ifi_index;
+ if (cb->strict_check)
+ err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
+ cb->extack);
+ else
+ err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
+ cb->extack);
+ if (err < 0)
+ return err;
if (br_idx) {
br_dev = __dev_get_by_index(net, br_idx);
@@ -3938,28 +4094,72 @@ nla_put_failure:
}
EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
+static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
+ bool strict_check, u32 *filter_mask,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[IFLA_MAX+1];
+ int err, i;
+
+ if (strict_check) {
+ struct ifinfomsg *ifm;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
+ return -EINVAL;
+ }
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
+ ifm->ifi_change || ifm->ifi_index) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct ifinfomsg), tb,
+ IFLA_MAX, ifla_policy, extack);
+ } else {
+ err = nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb,
+ IFLA_MAX, ifla_policy, extack);
+ }
+ if (err < 0)
+ return err;
+
+ /* new attributes should only be added with strict checking */
+ for (i = 0; i <= IFLA_MAX; ++i) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case IFLA_EXT_MASK:
+ *filter_mask = nla_get_u32(tb[i]);
+ break;
+ default:
+ if (strict_check) {
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct net_device *dev;
int idx = 0;
u32 portid = NETLINK_CB(cb->skb).portid;
- u32 seq = cb->nlh->nlmsg_seq;
+ u32 seq = nlh->nlmsg_seq;
u32 filter_mask = 0;
int err;
- if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
- struct nlattr *extfilt;
-
- extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
- IFLA_EXT_MASK);
- if (extfilt) {
- if (nla_len(extfilt) < sizeof(filter_mask))
- return -EINVAL;
-
- filter_mask = nla_get_u32(extfilt);
- }
- }
+ err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
+ cb->extack);
+ if (err < 0 && cb->strict_check)
+ return err;
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
@@ -4553,6 +4753,7 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct netlink_ext_ack *extack = cb->extack;
int h, s_h, err, s_idx, s_idxattr, s_prividx;
struct net *net = sock_net(skb->sk);
unsigned int flags = NLM_F_MULTI;
@@ -4569,13 +4770,32 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->seq = net->dev_base_seq;
- if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
+ if (nlmsg_len(cb->nlh) < sizeof(*ifsm)) {
+ NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
return -EINVAL;
+ }
ifsm = nlmsg_data(cb->nlh);
+
+ /* only requests using strict checks can pass data to influence
+ * the dump. The legacy exception is filter_mask.
+ */
+ if (cb->strict_check) {
+ if (ifsm->pad1 || ifsm->pad2 || ifsm->ifindex) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
+ return -EINVAL;
+ }
+ if (nlmsg_attrlen(cb->nlh, sizeof(*ifsm))) {
+ NL_SET_ERR_MSG(extack, "Invalid attributes after stats header");
+ return -EINVAL;
+ }
+ }
+
filter_mask = ifsm->filter_mask;
- if (!filter_mask)
+ if (!filter_mask) {
+ NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
return -EINVAL;
+ }
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b2c807f67aba..946de0e24c87 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1846,8 +1846,9 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
if (skb->ip_summed == CHECKSUM_COMPLETE) {
int delta = skb->len - len;
- skb->csum = csum_sub(skb->csum,
- skb_checksum(skb, len, delta, 0));
+ skb->csum = csum_block_sub(skb->csum,
+ skb_checksum(skb, len, delta, 0),
+ len);
}
return __pskb_trim(skb, len);
}
@@ -3381,64 +3382,6 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
}
EXPORT_SYMBOL(skb_find_text);
-/**
- * skb_append_datato_frags - append the user data to a skb
- * @sk: sock structure
- * @skb: skb structure to be appended with user data.
- * @getfrag: call back function to be used for getting the user data
- * @from: pointer to user message iov
- * @length: length of the iov message
- *
- * Description: This procedure append the user data in the fragment part
- * of the skb if any page alloc fails user this procedure returns -ENOMEM
- */
-int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
- int (*getfrag)(void *from, char *to, int offset,
- int len, int odd, struct sk_buff *skb),
- void *from, int length)
-{
- int frg_cnt = skb_shinfo(skb)->nr_frags;
- int copy;
- int offset = 0;
- int ret;
- struct page_frag *pfrag = &current->task_frag;
-
- do {
- /* Return error if we don't have space for new frag */
- if (frg_cnt >= MAX_SKB_FRAGS)
- return -EMSGSIZE;
-
- if (!sk_page_frag_refill(sk, pfrag))
- return -ENOMEM;
-
- /* copy the user data to page */
- copy = min_t(int, length, pfrag->size - pfrag->offset);
-
- ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
- offset, copy, 0, skb);
- if (ret < 0)
- return -EFAULT;
-
- /* copy was successful so update the size parameters */
- skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
- copy);
- frg_cnt++;
- pfrag->offset += copy;
- get_page(pfrag->page);
-
- skb->truesize += copy;
- refcount_add(copy, &sk->sk_wmem_alloc);
- skb->len += copy;
- skb->data_len += copy;
- offset += copy;
- length -= copy;
-
- } while (length > 0);
-
- return 0;
-}
-EXPORT_SYMBOL(skb_append_datato_frags);
-
int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
int offset, size_t size)
{
@@ -4452,14 +4395,16 @@ EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
*/
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
{
- if (unlikely(start > skb_headlen(skb)) ||
- unlikely((int)start + off > skb_headlen(skb) - 2)) {
- net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
- start, off, skb_headlen(skb));
+ u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
+ u32 csum_start = skb_headroom(skb) + (u32)start;
+
+ if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
+ net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
+ start, off, skb_headroom(skb), skb_headlen(skb));
return false;
}
skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_headroom(skb) + start;
+ skb->csum_start = csum_start;
skb->csum_offset = off;
skb_set_transport_header(skb, start);
return true;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
new file mode 100644
index 000000000000..56a99d0c9aa0
--- /dev/null
+++ b/net/core/skmsg.c
@@ -0,0 +1,802 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
+
+#include <linux/skmsg.h>
+#include <linux/skbuff.h>
+#include <linux/scatterlist.h>
+
+#include <net/sock.h>
+#include <net/tcp.h>
+
+static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
+{
+ if (msg->sg.end > msg->sg.start &&
+ elem_first_coalesce < msg->sg.end)
+ return true;
+
+ if (msg->sg.end < msg->sg.start &&
+ (elem_first_coalesce > msg->sg.start ||
+ elem_first_coalesce < msg->sg.end))
+ return true;
+
+ return false;
+}
+
+int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
+ int elem_first_coalesce)
+{
+ struct page_frag *pfrag = sk_page_frag(sk);
+ int ret = 0;
+
+ len -= msg->sg.size;
+ while (len > 0) {
+ struct scatterlist *sge;
+ u32 orig_offset;
+ int use, i;
+
+ if (!sk_page_frag_refill(sk, pfrag))
+ return -ENOMEM;
+
+ orig_offset = pfrag->offset;
+ use = min_t(int, len, pfrag->size - orig_offset);
+ if (!sk_wmem_schedule(sk, use))
+ return -ENOMEM;
+
+ i = msg->sg.end;
+ sk_msg_iter_var_prev(i);
+ sge = &msg->sg.data[i];
+
+ if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
+ sg_page(sge) == pfrag->page &&
+ sge->offset + sge->length == orig_offset) {
+ sge->length += use;
+ } else {
+ if (sk_msg_full(msg)) {
+ ret = -ENOSPC;
+ break;
+ }
+
+ sge = &msg->sg.data[msg->sg.end];
+ sg_unmark_end(sge);
+ sg_set_page(sge, pfrag->page, use, orig_offset);
+ get_page(pfrag->page);
+ sk_msg_iter_next(msg, end);
+ }
+
+ sk_mem_charge(sk, use);
+ msg->sg.size += use;
+ pfrag->offset += use;
+ len -= use;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sk_msg_alloc);
+
+int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
+ u32 off, u32 len)
+{
+ int i = src->sg.start;
+ struct scatterlist *sge = sk_msg_elem(src, i);
+ u32 sge_len, sge_off;
+
+ if (sk_msg_full(dst))
+ return -ENOSPC;
+
+ while (off) {
+ if (sge->length > off)
+ break;
+ off -= sge->length;
+ sk_msg_iter_var_next(i);
+ if (i == src->sg.end && off)
+ return -ENOSPC;
+ sge = sk_msg_elem(src, i);
+ }
+
+ while (len) {
+ sge_len = sge->length - off;
+ sge_off = sge->offset + off;
+ if (sge_len > len)
+ sge_len = len;
+ off = 0;
+ len -= sge_len;
+ sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
+ sk_mem_charge(sk, sge_len);
+ sk_msg_iter_var_next(i);
+ if (i == src->sg.end && len)
+ return -ENOSPC;
+ sge = sk_msg_elem(src, i);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sk_msg_clone);
+
+void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
+{
+ int i = msg->sg.start;
+
+ do {
+ struct scatterlist *sge = sk_msg_elem(msg, i);
+
+ if (bytes < sge->length) {
+ sge->length -= bytes;
+ sge->offset += bytes;
+ sk_mem_uncharge(sk, bytes);
+ break;
+ }
+
+ sk_mem_uncharge(sk, sge->length);
+ bytes -= sge->length;
+ sge->length = 0;
+ sge->offset = 0;
+ sk_msg_iter_var_next(i);
+ } while (bytes && i != msg->sg.end);
+ msg->sg.start = i;
+}
+EXPORT_SYMBOL_GPL(sk_msg_return_zero);
+
+void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
+{
+ int i = msg->sg.start;
+
+ do {
+ struct scatterlist *sge = &msg->sg.data[i];
+ int uncharge = (bytes < sge->length) ? bytes : sge->length;
+
+ sk_mem_uncharge(sk, uncharge);
+ bytes -= uncharge;
+ sk_msg_iter_var_next(i);
+ } while (i != msg->sg.end);
+}
+EXPORT_SYMBOL_GPL(sk_msg_return);
+
+static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
+ bool charge)
+{
+ struct scatterlist *sge = sk_msg_elem(msg, i);
+ u32 len = sge->length;
+
+ if (charge)
+ sk_mem_uncharge(sk, len);
+ if (!msg->skb)
+ put_page(sg_page(sge));
+ memset(sge, 0, sizeof(*sge));
+ return len;
+}
+
+static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
+ bool charge)
+{
+ struct scatterlist *sge = sk_msg_elem(msg, i);
+ int freed = 0;
+
+ while (msg->sg.size) {
+ msg->sg.size -= sge->length;
+ freed += sk_msg_free_elem(sk, msg, i, charge);
+ sk_msg_iter_var_next(i);
+ sk_msg_check_to_free(msg, i, msg->sg.size);
+ sge = sk_msg_elem(msg, i);
+ }
+ if (msg->skb)
+ consume_skb(msg->skb);
+ sk_msg_init(msg);
+ return freed;
+}
+
+int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
+{
+ return __sk_msg_free(sk, msg, msg->sg.start, false);
+}
+EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
+
+int sk_msg_free(struct sock *sk, struct sk_msg *msg)
+{
+ return __sk_msg_free(sk, msg, msg->sg.start, true);
+}
+EXPORT_SYMBOL_GPL(sk_msg_free);
+
+static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
+ u32 bytes, bool charge)
+{
+ struct scatterlist *sge;
+ u32 i = msg->sg.start;
+
+ while (bytes) {
+ sge = sk_msg_elem(msg, i);
+ if (!sge->length)
+ break;
+ if (bytes < sge->length) {
+ if (charge)
+ sk_mem_uncharge(sk, bytes);
+ sge->length -= bytes;
+ sge->offset += bytes;
+ msg->sg.size -= bytes;
+ break;
+ }
+
+ msg->sg.size -= sge->length;
+ bytes -= sge->length;
+ sk_msg_free_elem(sk, msg, i, charge);
+ sk_msg_iter_var_next(i);
+ sk_msg_check_to_free(msg, i, bytes);
+ }
+ msg->sg.start = i;
+}
+
+void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
+{
+ __sk_msg_free_partial(sk, msg, bytes, true);
+}
+EXPORT_SYMBOL_GPL(sk_msg_free_partial);
+
+void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
+ u32 bytes)
+{
+ __sk_msg_free_partial(sk, msg, bytes, false);
+}
+
+void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
+{
+ int trim = msg->sg.size - len;
+ u32 i = msg->sg.end;
+
+ if (trim <= 0) {
+ WARN_ON(trim < 0);
+ return;
+ }
+
+ sk_msg_iter_var_prev(i);
+ msg->sg.size = len;
+ while (msg->sg.data[i].length &&
+ trim >= msg->sg.data[i].length) {
+ trim -= msg->sg.data[i].length;
+ sk_msg_free_elem(sk, msg, i, true);
+ sk_msg_iter_var_prev(i);
+ if (!trim)
+ goto out;
+ }
+
+ msg->sg.data[i].length -= trim;
+ sk_mem_uncharge(sk, trim);
+out:
+ /* If we trim data before curr pointer update copybreak and current
+ * so that any future copy operations start at new copy location.
+ * However trimed data that has not yet been used in a copy op
+ * does not require an update.
+ */
+ if (msg->sg.curr >= i) {
+ msg->sg.curr = i;
+ msg->sg.copybreak = msg->sg.data[i].length;
+ }
+ sk_msg_iter_var_next(i);
+ msg->sg.end = i;
+}
+EXPORT_SYMBOL_GPL(sk_msg_trim);
+
+int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
+ struct sk_msg *msg, u32 bytes)
+{
+ int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
+ const int to_max_pages = MAX_MSG_FRAGS;
+ struct page *pages[MAX_MSG_FRAGS];
+ ssize_t orig, copied, use, offset;
+
+ orig = msg->sg.size;
+ while (bytes > 0) {
+ i = 0;
+ maxpages = to_max_pages - num_elems;
+ if (maxpages == 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ copied = iov_iter_get_pages(from, pages, bytes, maxpages,
+ &offset);
+ if (copied <= 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ iov_iter_advance(from, copied);
+ bytes -= copied;
+ msg->sg.size += copied;
+
+ while (copied) {
+ use = min_t(int, copied, PAGE_SIZE - offset);
+ sg_set_page(&msg->sg.data[msg->sg.end],
+ pages[i], use, offset);
+ sg_unmark_end(&msg->sg.data[msg->sg.end]);
+ sk_mem_charge(sk, use);
+
+ offset = 0;
+ copied -= use;
+ sk_msg_iter_next(msg, end);
+ num_elems++;
+ i++;
+ }
+ /* When zerocopy is mixed with sk_msg_*copy* operations we
+ * may have a copybreak set in this case clear and prefer
+ * zerocopy remainder when possible.
+ */
+ msg->sg.copybreak = 0;
+ msg->sg.curr = msg->sg.end;
+ }
+out:
+ /* Revert iov_iter updates, msg will need to use 'trim' later if it
+ * also needs to be cleared.
+ */
+ if (ret)
+ iov_iter_revert(from, msg->sg.size - orig);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
+
+int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
+ struct sk_msg *msg, u32 bytes)
+{
+ int ret = -ENOSPC, i = msg->sg.curr;
+ struct scatterlist *sge;
+ u32 copy, buf_size;
+ void *to;
+
+ do {
+ sge = sk_msg_elem(msg, i);
+ /* This is possible if a trim operation shrunk the buffer */
+ if (msg->sg.copybreak >= sge->length) {
+ msg->sg.copybreak = 0;
+ sk_msg_iter_var_next(i);
+ if (i == msg->sg.end)
+ break;
+ sge = sk_msg_elem(msg, i);
+ }
+
+ buf_size = sge->length - msg->sg.copybreak;
+ copy = (buf_size > bytes) ? bytes : buf_size;
+ to = sg_virt(sge) + msg->sg.copybreak;
+ msg->sg.copybreak += copy;
+ if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
+ ret = copy_from_iter_nocache(to, copy, from);
+ else
+ ret = copy_from_iter(to, copy, from);
+ if (ret != copy) {
+ ret = -EFAULT;
+ goto out;
+ }
+ bytes -= copy;
+ if (!bytes)
+ break;
+ msg->sg.copybreak = 0;
+ sk_msg_iter_var_next(i);
+ } while (i != msg->sg.end);
+out:
+ msg->sg.curr = i;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
+
+static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
+{
+ struct sock *sk = psock->sk;
+ int copied = 0, num_sge;
+ struct sk_msg *msg;
+
+ msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
+ if (unlikely(!msg))
+ return -EAGAIN;
+ if (!sk_rmem_schedule(sk, skb, skb->len)) {
+ kfree(msg);
+ return -EAGAIN;
+ }
+
+ sk_msg_init(msg);
+ num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
+ if (unlikely(num_sge < 0)) {
+ kfree(msg);
+ return num_sge;
+ }
+
+ sk_mem_charge(sk, skb->len);
+ copied = skb->len;
+ msg->sg.start = 0;
+ msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge;
+ msg->skb = skb;
+
+ sk_psock_queue_msg(psock, msg);
+ sk->sk_data_ready(sk);
+ return copied;
+}
+
+static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
+ u32 off, u32 len, bool ingress)
+{
+ if (ingress)
+ return sk_psock_skb_ingress(psock, skb);
+ else
+ return skb_send_sock_locked(psock->sk, skb, off, len);
+}
+
+static void sk_psock_backlog(struct work_struct *work)
+{
+ struct sk_psock *psock = container_of(work, struct sk_psock, work);
+ struct sk_psock_work_state *state = &psock->work_state;
+ struct sk_buff *skb;
+ bool ingress;
+ u32 len, off;
+ int ret;
+
+ /* Lock sock to avoid losing sk_socket during loop. */
+ lock_sock(psock->sk);
+ if (state->skb) {
+ skb = state->skb;
+ len = state->len;
+ off = state->off;
+ state->skb = NULL;
+ goto start;
+ }
+
+ while ((skb = skb_dequeue(&psock->ingress_skb))) {
+ len = skb->len;
+ off = 0;
+start:
+ ingress = tcp_skb_bpf_ingress(skb);
+ do {
+ ret = -EIO;
+ if (likely(psock->sk->sk_socket))
+ ret = sk_psock_handle_skb(psock, skb, off,
+ len, ingress);
+ if (ret <= 0) {
+ if (ret == -EAGAIN) {
+ state->skb = skb;
+ state->len = len;
+ state->off = off;
+ goto end;
+ }
+ /* Hard errors break pipe and stop xmit. */
+ sk_psock_report_error(psock, ret ? -ret : EPIPE);
+ sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
+ kfree_skb(skb);
+ goto end;
+ }
+ off += ret;
+ len -= ret;
+ } while (len);
+
+ if (!ingress)
+ kfree_skb(skb);
+ }
+end:
+ release_sock(psock->sk);
+}
+
+struct sk_psock *sk_psock_init(struct sock *sk, int node)
+{
+ struct sk_psock *psock = kzalloc_node(sizeof(*psock),
+ GFP_ATOMIC | __GFP_NOWARN,
+ node);
+ if (!psock)
+ return NULL;
+
+ psock->sk = sk;
+ psock->eval = __SK_NONE;
+
+ INIT_LIST_HEAD(&psock->link);
+ spin_lock_init(&psock->link_lock);
+
+ INIT_WORK(&psock->work, sk_psock_backlog);
+ INIT_LIST_HEAD(&psock->ingress_msg);
+ skb_queue_head_init(&psock->ingress_skb);
+
+ sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
+ refcount_set(&psock->refcnt, 1);
+
+ rcu_assign_sk_user_data(sk, psock);
+ sock_hold(sk);
+
+ return psock;
+}
+EXPORT_SYMBOL_GPL(sk_psock_init);
+
+struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
+{
+ struct sk_psock_link *link;
+
+ spin_lock_bh(&psock->link_lock);
+ link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
+ list);
+ if (link)
+ list_del(&link->list);
+ spin_unlock_bh(&psock->link_lock);
+ return link;
+}
+
+void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
+{
+ struct sk_msg *msg, *tmp;
+
+ list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
+ list_del(&msg->list);
+ sk_msg_free(psock->sk, msg);
+ kfree(msg);
+ }
+}
+
+static void sk_psock_zap_ingress(struct sk_psock *psock)
+{
+ __skb_queue_purge(&psock->ingress_skb);
+ __sk_psock_purge_ingress_msg(psock);
+}
+
+static void sk_psock_link_destroy(struct sk_psock *psock)
+{
+ struct sk_psock_link *link, *tmp;
+
+ list_for_each_entry_safe(link, tmp, &psock->link, list) {
+ list_del(&link->list);
+ sk_psock_free_link(link);
+ }
+}
+
+static void sk_psock_destroy_deferred(struct work_struct *gc)
+{
+ struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
+
+ /* No sk_callback_lock since already detached. */
+ if (psock->parser.enabled)
+ strp_done(&psock->parser.strp);
+
+ cancel_work_sync(&psock->work);
+
+ psock_progs_drop(&psock->progs);
+
+ sk_psock_link_destroy(psock);
+ sk_psock_cork_free(psock);
+ sk_psock_zap_ingress(psock);
+
+ if (psock->sk_redir)
+ sock_put(psock->sk_redir);
+ sock_put(psock->sk);
+ kfree(psock);
+}
+
+void sk_psock_destroy(struct rcu_head *rcu)
+{
+ struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
+
+ INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
+ schedule_work(&psock->gc);
+}
+EXPORT_SYMBOL_GPL(sk_psock_destroy);
+
+void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
+{
+ rcu_assign_sk_user_data(sk, NULL);
+ sk_psock_cork_free(psock);
+ sk_psock_restore_proto(sk, psock);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (psock->progs.skb_parser)
+ sk_psock_stop_strp(sk, psock);
+ write_unlock_bh(&sk->sk_callback_lock);
+ sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
+
+ call_rcu_sched(&psock->rcu, sk_psock_destroy);
+}
+EXPORT_SYMBOL_GPL(sk_psock_drop);
+
+static int sk_psock_map_verd(int verdict, bool redir)
+{
+ switch (verdict) {
+ case SK_PASS:
+ return redir ? __SK_REDIRECT : __SK_PASS;
+ case SK_DROP:
+ default:
+ break;
+ }
+
+ return __SK_DROP;
+}
+
+int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
+ struct sk_msg *msg)
+{
+ struct bpf_prog *prog;
+ int ret;
+
+ preempt_disable();
+ rcu_read_lock();
+ prog = READ_ONCE(psock->progs.msg_parser);
+ if (unlikely(!prog)) {
+ ret = __SK_PASS;
+ goto out;
+ }
+
+ sk_msg_compute_data_pointers(msg);
+ msg->sk = sk;
+ ret = BPF_PROG_RUN(prog, msg);
+ ret = sk_psock_map_verd(ret, msg->sk_redir);
+ psock->apply_bytes = msg->apply_bytes;
+ if (ret == __SK_REDIRECT) {
+ if (psock->sk_redir)
+ sock_put(psock->sk_redir);
+ psock->sk_redir = msg->sk_redir;
+ if (!psock->sk_redir) {
+ ret = __SK_DROP;
+ goto out;
+ }
+ sock_hold(psock->sk_redir);
+ }
+out:
+ rcu_read_unlock();
+ preempt_enable();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
+
+static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ int ret;
+
+ skb->sk = psock->sk;
+ bpf_compute_data_end_sk_skb(skb);
+ preempt_disable();
+ ret = BPF_PROG_RUN(prog, skb);
+ preempt_enable();
+ /* strparser clones the skb before handing it to a upper layer,
+ * meaning skb_orphan has been called. We NULL sk on the way out
+ * to ensure we don't trigger a BUG_ON() in skb/sk operations
+ * later and because we are not charging the memory of this skb
+ * to any socket yet.
+ */
+ skb->sk = NULL;
+ return ret;
+}
+
+static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
+{
+ struct sk_psock_parser *parser;
+
+ parser = container_of(strp, struct sk_psock_parser, strp);
+ return container_of(parser, struct sk_psock, parser);
+}
+
+static void sk_psock_verdict_apply(struct sk_psock *psock,
+ struct sk_buff *skb, int verdict)
+{
+ struct sk_psock *psock_other;
+ struct sock *sk_other;
+ bool ingress;
+
+ switch (verdict) {
+ case __SK_REDIRECT:
+ sk_other = tcp_skb_bpf_redirect_fetch(skb);
+ if (unlikely(!sk_other))
+ goto out_free;
+ psock_other = sk_psock(sk_other);
+ if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
+ !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED))
+ goto out_free;
+ ingress = tcp_skb_bpf_ingress(skb);
+ if ((!ingress && sock_writeable(sk_other)) ||
+ (ingress &&
+ atomic_read(&sk_other->sk_rmem_alloc) <=
+ sk_other->sk_rcvbuf)) {
+ if (!ingress)
+ skb_set_owner_w(skb, sk_other);
+ skb_queue_tail(&psock_other->ingress_skb, skb);
+ schedule_work(&psock_other->work);
+ break;
+ }
+ /* fall-through */
+ case __SK_DROP:
+ /* fall-through */
+ default:
+out_free:
+ kfree_skb(skb);
+ }
+}
+
+static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
+{
+ struct sk_psock *psock = sk_psock_from_strp(strp);
+ struct bpf_prog *prog;
+ int ret = __SK_DROP;
+
+ rcu_read_lock();
+ prog = READ_ONCE(psock->progs.skb_verdict);
+ if (likely(prog)) {
+ skb_orphan(skb);
+ tcp_skb_bpf_redirect_clear(skb);
+ ret = sk_psock_bpf_run(psock, prog, skb);
+ ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+ }
+ rcu_read_unlock();
+ sk_psock_verdict_apply(psock, skb, ret);
+}
+
+static int sk_psock_strp_read_done(struct strparser *strp, int err)
+{
+ return err;
+}
+
+static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
+{
+ struct sk_psock *psock = sk_psock_from_strp(strp);
+ struct bpf_prog *prog;
+ int ret = skb->len;
+
+ rcu_read_lock();
+ prog = READ_ONCE(psock->progs.skb_parser);
+ if (likely(prog))
+ ret = sk_psock_bpf_run(psock, prog, skb);
+ rcu_read_unlock();
+ return ret;
+}
+
+/* Called with socket lock held. */
+static void sk_psock_data_ready(struct sock *sk)
+{
+ struct sk_psock *psock;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (likely(psock)) {
+ write_lock_bh(&sk->sk_callback_lock);
+ strp_data_ready(&psock->parser.strp);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+ rcu_read_unlock();
+}
+
+static void sk_psock_write_space(struct sock *sk)
+{
+ struct sk_psock *psock;
+ void (*write_space)(struct sock *sk);
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (likely(psock && sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)))
+ schedule_work(&psock->work);
+ write_space = psock->saved_write_space;
+ rcu_read_unlock();
+ write_space(sk);
+}
+
+int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
+{
+ static const struct strp_callbacks cb = {
+ .rcv_msg = sk_psock_strp_read,
+ .read_sock_done = sk_psock_strp_read_done,
+ .parse_msg = sk_psock_strp_parse,
+ };
+
+ psock->parser.enabled = false;
+ return strp_init(&psock->parser.strp, sk, &cb);
+}
+
+void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
+{
+ struct sk_psock_parser *parser = &psock->parser;
+
+ if (parser->enabled)
+ return;
+
+ parser->saved_data_ready = sk->sk_data_ready;
+ sk->sk_data_ready = sk_psock_data_ready;
+ sk->sk_write_space = sk_psock_write_space;
+ parser->enabled = true;
+}
+
+void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
+{
+ struct sk_psock_parser *parser = &psock->parser;
+
+ if (!parser->enabled)
+ return;
+
+ sk->sk_data_ready = parser->saved_data_ready;
+ parser->saved_data_ready = NULL;
+ strp_stop(&parser->strp);
+ parser->enabled = false;
+}
diff --git a/net/core/sock.c b/net/core/sock.c
index 3730eb855095..6fcc4bc07d19 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -998,7 +998,7 @@ set_rcvbuf:
cmpxchg(&sk->sk_pacing_status,
SK_PACING_NONE,
SK_PACING_NEEDED);
- sk->sk_max_pacing_rate = val;
+ sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate);
break;
@@ -1336,7 +1336,8 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
#endif
case SO_MAX_PACING_RATE:
- v.val = sk->sk_max_pacing_rate;
+ /* 32bit version */
+ v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
break;
case SO_INCOMING_CPU:
@@ -2238,67 +2239,6 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
}
EXPORT_SYMBOL(sk_page_frag_refill);
-int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
- int sg_start, int *sg_curr_index, unsigned int *sg_curr_size,
- int first_coalesce)
-{
- int sg_curr = *sg_curr_index, use = 0, rc = 0;
- unsigned int size = *sg_curr_size;
- struct page_frag *pfrag;
- struct scatterlist *sge;
-
- len -= size;
- pfrag = sk_page_frag(sk);
-
- while (len > 0) {
- unsigned int orig_offset;
-
- if (!sk_page_frag_refill(sk, pfrag)) {
- rc = -ENOMEM;
- goto out;
- }
-
- use = min_t(int, len, pfrag->size - pfrag->offset);
-
- if (!sk_wmem_schedule(sk, use)) {
- rc = -ENOMEM;
- goto out;
- }
-
- sk_mem_charge(sk, use);
- size += use;
- orig_offset = pfrag->offset;
- pfrag->offset += use;
-
- sge = sg + sg_curr - 1;
- if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page &&
- sge->offset + sge->length == orig_offset) {
- sge->length += use;
- } else {
- sge = sg + sg_curr;
- sg_unmark_end(sge);
- sg_set_page(sge, pfrag->page, use, orig_offset);
- get_page(pfrag->page);
- sg_curr++;
-
- if (sg_curr == MAX_SKB_FRAGS)
- sg_curr = 0;
-
- if (sg_curr == sg_start) {
- rc = -ENOSPC;
- break;
- }
- }
-
- len -= use;
- }
-out:
- *sg_curr_size = size;
- *sg_curr_index = sg_curr;
- return rc;
-}
-EXPORT_SYMBOL(sk_alloc_sg);
-
static void __lock_sock(struct sock *sk)
__releases(&sk->sk_lock.slock)
__acquires(&sk->sk_lock.slock)
@@ -2317,7 +2257,7 @@ static void __lock_sock(struct sock *sk)
finish_wait(&sk->sk_lock.wq, &wait);
}
-static void __release_sock(struct sock *sk)
+void __release_sock(struct sock *sk)
__releases(&sk->sk_lock.slock)
__acquires(&sk->sk_lock.slock)
{
@@ -2332,7 +2272,7 @@ static void __release_sock(struct sock *sk)
next = skb->next;
prefetch(next);
WARN_ON_ONCE(skb_dst_is_noref(skb));
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
sk_backlog_rcv(sk, skb);
cond_resched();
@@ -2810,8 +2750,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_ll_usec = sysctl_net_busy_read;
#endif
- sk->sk_max_pacing_rate = ~0U;
- sk->sk_pacing_rate = ~0U;
+ sk->sk_max_pacing_rate = ~0UL;
+ sk->sk_pacing_rate = ~0UL;
sk->sk_pacing_shift = 10;
sk->sk_incoming_cpu = -1;
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
new file mode 100644
index 000000000000..be6092ac69f8
--- /dev/null
+++ b/net/core/sock_map.c
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/net.h>
+#include <linux/workqueue.h>
+#include <linux/skmsg.h>
+#include <linux/list.h>
+#include <linux/jhash.h>
+
+struct bpf_stab {
+ struct bpf_map map;
+ struct sock **sks;
+ struct sk_psock_progs progs;
+ raw_spinlock_t lock;
+};
+
+#define SOCK_CREATE_FLAG_MASK \
+ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
+
+static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
+{
+ struct bpf_stab *stab;
+ u64 cost;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return ERR_PTR(-EPERM);
+ if (attr->max_entries == 0 ||
+ attr->key_size != 4 ||
+ attr->value_size != 4 ||
+ attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
+ return ERR_PTR(-EINVAL);
+
+ stab = kzalloc(sizeof(*stab), GFP_USER);
+ if (!stab)
+ return ERR_PTR(-ENOMEM);
+
+ bpf_map_init_from_attr(&stab->map, attr);
+ raw_spin_lock_init(&stab->lock);
+
+ /* Make sure page count doesn't overflow. */
+ cost = (u64) stab->map.max_entries * sizeof(struct sock *);
+ if (cost >= U32_MAX - PAGE_SIZE) {
+ err = -EINVAL;
+ goto free_stab;
+ }
+
+ stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+ err = bpf_map_precharge_memlock(stab->map.pages);
+ if (err)
+ goto free_stab;
+
+ stab->sks = bpf_map_area_alloc(stab->map.max_entries *
+ sizeof(struct sock *),
+ stab->map.numa_node);
+ if (stab->sks)
+ return &stab->map;
+ err = -ENOMEM;
+free_stab:
+ kfree(stab);
+ return ERR_PTR(err);
+}
+
+int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ u32 ufd = attr->target_fd;
+ struct bpf_map *map;
+ struct fd f;
+ int ret;
+
+ f = fdget(ufd);
+ map = __bpf_map_get(f);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+ ret = sock_map_prog_update(map, prog, attr->attach_type);
+ fdput(f);
+ return ret;
+}
+
+static void sock_map_sk_acquire(struct sock *sk)
+ __acquires(&sk->sk_lock.slock)
+{
+ lock_sock(sk);
+ preempt_disable();
+ rcu_read_lock();
+}
+
+static void sock_map_sk_release(struct sock *sk)
+ __releases(&sk->sk_lock.slock)
+{
+ rcu_read_unlock();
+ preempt_enable();
+ release_sock(sk);
+}
+
+static void sock_map_add_link(struct sk_psock *psock,
+ struct sk_psock_link *link,
+ struct bpf_map *map, void *link_raw)
+{
+ link->link_raw = link_raw;
+ link->map = map;
+ spin_lock_bh(&psock->link_lock);
+ list_add_tail(&link->list, &psock->link);
+ spin_unlock_bh(&psock->link_lock);
+}
+
+static void sock_map_del_link(struct sock *sk,
+ struct sk_psock *psock, void *link_raw)
+{
+ struct sk_psock_link *link, *tmp;
+ bool strp_stop = false;
+
+ spin_lock_bh(&psock->link_lock);
+ list_for_each_entry_safe(link, tmp, &psock->link, list) {
+ if (link->link_raw == link_raw) {
+ struct bpf_map *map = link->map;
+ struct bpf_stab *stab = container_of(map, struct bpf_stab,
+ map);
+ if (psock->parser.enabled && stab->progs.skb_parser)
+ strp_stop = true;
+ list_del(&link->list);
+ sk_psock_free_link(link);
+ }
+ }
+ spin_unlock_bh(&psock->link_lock);
+ if (strp_stop) {
+ write_lock_bh(&sk->sk_callback_lock);
+ sk_psock_stop_strp(sk, psock);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+}
+
+static void sock_map_unref(struct sock *sk, void *link_raw)
+{
+ struct sk_psock *psock = sk_psock(sk);
+
+ if (likely(psock)) {
+ sock_map_del_link(sk, psock, link_raw);
+ sk_psock_put(sk, psock);
+ }
+}
+
+static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
+ struct sock *sk)
+{
+ struct bpf_prog *msg_parser, *skb_parser, *skb_verdict;
+ bool skb_progs, sk_psock_is_new = false;
+ struct sk_psock *psock;
+ int ret;
+
+ skb_verdict = READ_ONCE(progs->skb_verdict);
+ skb_parser = READ_ONCE(progs->skb_parser);
+ skb_progs = skb_parser && skb_verdict;
+ if (skb_progs) {
+ skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
+ if (IS_ERR(skb_verdict))
+ return PTR_ERR(skb_verdict);
+ skb_parser = bpf_prog_inc_not_zero(skb_parser);
+ if (IS_ERR(skb_parser)) {
+ bpf_prog_put(skb_verdict);
+ return PTR_ERR(skb_parser);
+ }
+ }
+
+ msg_parser = READ_ONCE(progs->msg_parser);
+ if (msg_parser) {
+ msg_parser = bpf_prog_inc_not_zero(msg_parser);
+ if (IS_ERR(msg_parser)) {
+ ret = PTR_ERR(msg_parser);
+ goto out;
+ }
+ }
+
+ psock = sk_psock_get_checked(sk);
+ if (IS_ERR(psock)) {
+ ret = PTR_ERR(psock);
+ goto out_progs;
+ }
+
+ if (psock) {
+ if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
+ (skb_progs && READ_ONCE(psock->progs.skb_parser))) {
+ sk_psock_put(sk, psock);
+ ret = -EBUSY;
+ goto out_progs;
+ }
+ } else {
+ psock = sk_psock_init(sk, map->numa_node);
+ if (!psock) {
+ ret = -ENOMEM;
+ goto out_progs;
+ }
+ sk_psock_is_new = true;
+ }
+
+ if (msg_parser)
+ psock_set_prog(&psock->progs.msg_parser, msg_parser);
+ if (sk_psock_is_new) {
+ ret = tcp_bpf_init(sk);
+ if (ret < 0)
+ goto out_drop;
+ } else {
+ tcp_bpf_reinit(sk);
+ }
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (skb_progs && !psock->parser.enabled) {
+ ret = sk_psock_init_strp(sk, psock);
+ if (ret) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ goto out_drop;
+ }
+ psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
+ psock_set_prog(&psock->progs.skb_parser, skb_parser);
+ sk_psock_start_strp(sk, psock);
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+ return 0;
+out_drop:
+ sk_psock_put(sk, psock);
+out_progs:
+ if (msg_parser)
+ bpf_prog_put(msg_parser);
+out:
+ if (skb_progs) {
+ bpf_prog_put(skb_verdict);
+ bpf_prog_put(skb_parser);
+ }
+ return ret;
+}
+
+static void sock_map_free(struct bpf_map *map)
+{
+ struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+ int i;
+
+ synchronize_rcu();
+ rcu_read_lock();
+ raw_spin_lock_bh(&stab->lock);
+ for (i = 0; i < stab->map.max_entries; i++) {
+ struct sock **psk = &stab->sks[i];
+ struct sock *sk;
+
+ sk = xchg(psk, NULL);
+ if (sk)
+ sock_map_unref(sk, psk);
+ }
+ raw_spin_unlock_bh(&stab->lock);
+ rcu_read_unlock();
+
+ bpf_map_area_free(stab->sks);
+ kfree(stab);
+}
+
+static void sock_map_release_progs(struct bpf_map *map)
+{
+ psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs);
+}
+
+static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
+{
+ struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (unlikely(key >= map->max_entries))
+ return NULL;
+ return READ_ONCE(stab->sks[key]);
+}
+
+static void *sock_map_lookup(struct bpf_map *map, void *key)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
+ struct sock **psk)
+{
+ struct sock *sk;
+
+ raw_spin_lock_bh(&stab->lock);
+ sk = *psk;
+ if (!sk_test || sk_test == sk)
+ *psk = NULL;
+ raw_spin_unlock_bh(&stab->lock);
+ if (unlikely(!sk))
+ return -EINVAL;
+ sock_map_unref(sk, psk);
+ return 0;
+}
+
+static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
+ void *link_raw)
+{
+ struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+
+ __sock_map_delete(stab, sk, link_raw);
+}
+
+static int sock_map_delete_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+ u32 i = *(u32 *)key;
+ struct sock **psk;
+
+ if (unlikely(i >= map->max_entries))
+ return -EINVAL;
+
+ psk = &stab->sks[i];
+ return __sock_map_delete(stab, NULL, psk);
+}
+
+static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
+{
+ struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+ u32 i = key ? *(u32 *)key : U32_MAX;
+ u32 *key_next = next;
+
+ if (i == stab->map.max_entries - 1)
+ return -ENOENT;
+ if (i >= stab->map.max_entries)
+ *key_next = 0;
+ else
+ *key_next = i + 1;
+ return 0;
+}
+
+static int sock_map_update_common(struct bpf_map *map, u32 idx,
+ struct sock *sk, u64 flags)
+{
+ struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+ struct sk_psock_link *link;
+ struct sk_psock *psock;
+ struct sock *osk;
+ int ret;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ if (unlikely(flags > BPF_EXIST))
+ return -EINVAL;
+ if (unlikely(idx >= map->max_entries))
+ return -E2BIG;
+
+ link = sk_psock_init_link();
+ if (!link)
+ return -ENOMEM;
+
+ ret = sock_map_link(map, &stab->progs, sk);
+ if (ret < 0)
+ goto out_free;
+
+ psock = sk_psock(sk);
+ WARN_ON_ONCE(!psock);
+
+ raw_spin_lock_bh(&stab->lock);
+ osk = stab->sks[idx];
+ if (osk && flags == BPF_NOEXIST) {
+ ret = -EEXIST;
+ goto out_unlock;
+ } else if (!osk && flags == BPF_EXIST) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ sock_map_add_link(psock, link, map, &stab->sks[idx]);
+ stab->sks[idx] = sk;
+ if (osk)
+ sock_map_unref(osk, &stab->sks[idx]);
+ raw_spin_unlock_bh(&stab->lock);
+ return 0;
+out_unlock:
+ raw_spin_unlock_bh(&stab->lock);
+ if (psock)
+ sk_psock_put(sk, psock);
+out_free:
+ sk_psock_free_link(link);
+ return ret;
+}
+
+static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
+{
+ return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
+ ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB;
+}
+
+static bool sock_map_sk_is_suitable(const struct sock *sk)
+{
+ return sk->sk_type == SOCK_STREAM &&
+ sk->sk_protocol == IPPROTO_TCP;
+}
+
+static int sock_map_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 flags)
+{
+ u32 ufd = *(u32 *)value;
+ u32 idx = *(u32 *)key;
+ struct socket *sock;
+ struct sock *sk;
+ int ret;
+
+ sock = sockfd_lookup(ufd, &ret);
+ if (!sock)
+ return ret;
+ sk = sock->sk;
+ if (!sk) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!sock_map_sk_is_suitable(sk) ||
+ sk->sk_state != TCP_ESTABLISHED) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ sock_map_sk_acquire(sk);
+ ret = sock_map_update_common(map, idx, sk, flags);
+ sock_map_sk_release(sk);
+out:
+ fput(sock->file);
+ return ret;
+}
+
+BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
+ struct bpf_map *, map, void *, key, u64, flags)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (likely(sock_map_sk_is_suitable(sops->sk) &&
+ sock_map_op_okay(sops)))
+ return sock_map_update_common(map, *(u32 *)key, sops->sk,
+ flags);
+ return -EOPNOTSUPP;
+}
+
+const struct bpf_func_proto bpf_sock_map_update_proto = {
+ .func = bpf_sock_map_update,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_PTR_TO_MAP_KEY,
+ .arg4_type = ARG_ANYTHING,
+};
+
+BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
+ struct bpf_map *, map, u32, key, u64, flags)
+{
+ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
+
+ if (unlikely(flags & ~(BPF_F_INGRESS)))
+ return SK_DROP;
+ tcb->bpf.flags = flags;
+ tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key);
+ if (!tcb->bpf.sk_redir)
+ return SK_DROP;
+ return SK_PASS;
+}
+
+const struct bpf_func_proto bpf_sk_redirect_map_proto = {
+ .func = bpf_sk_redirect_map,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_ANYTHING,
+};
+
+BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
+ struct bpf_map *, map, u32, key, u64, flags)
+{
+ if (unlikely(flags & ~(BPF_F_INGRESS)))
+ return SK_DROP;
+ msg->flags = flags;
+ msg->sk_redir = __sock_map_lookup_elem(map, key);
+ if (!msg->sk_redir)
+ return SK_DROP;
+ return SK_PASS;
+}
+
+const struct bpf_func_proto bpf_msg_redirect_map_proto = {
+ .func = bpf_msg_redirect_map,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_ANYTHING,
+};
+
+const struct bpf_map_ops sock_map_ops = {
+ .map_alloc = sock_map_alloc,
+ .map_free = sock_map_free,
+ .map_get_next_key = sock_map_get_next_key,
+ .map_update_elem = sock_map_update_elem,
+ .map_delete_elem = sock_map_delete_elem,
+ .map_lookup_elem = sock_map_lookup,
+ .map_release_uref = sock_map_release_progs,
+ .map_check_btf = map_check_no_btf,
+};
+
+struct bpf_htab_elem {
+ struct rcu_head rcu;
+ u32 hash;
+ struct sock *sk;
+ struct hlist_node node;
+ u8 key[0];
+};
+
+struct bpf_htab_bucket {
+ struct hlist_head head;
+ raw_spinlock_t lock;
+};
+
+struct bpf_htab {
+ struct bpf_map map;
+ struct bpf_htab_bucket *buckets;
+ u32 buckets_num;
+ u32 elem_size;
+ struct sk_psock_progs progs;
+ atomic_t count;
+};
+
+static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
+{
+ return jhash(key, len, 0);
+}
+
+static struct bpf_htab_bucket *sock_hash_select_bucket(struct bpf_htab *htab,
+ u32 hash)
+{
+ return &htab->buckets[hash & (htab->buckets_num - 1)];
+}
+
+static struct bpf_htab_elem *
+sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
+ u32 key_size)
+{
+ struct bpf_htab_elem *elem;
+
+ hlist_for_each_entry_rcu(elem, head, node) {
+ if (elem->hash == hash &&
+ !memcmp(&elem->key, key, key_size))
+ return elem;
+ }
+
+ return NULL;
+}
+
+static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ u32 key_size = map->key_size, hash;
+ struct bpf_htab_bucket *bucket;
+ struct bpf_htab_elem *elem;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ hash = sock_hash_bucket_hash(key, key_size);
+ bucket = sock_hash_select_bucket(htab, hash);
+ elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
+
+ return elem ? elem->sk : NULL;
+}
+
+static void sock_hash_free_elem(struct bpf_htab *htab,
+ struct bpf_htab_elem *elem)
+{
+ atomic_dec(&htab->count);
+ kfree_rcu(elem, rcu);
+}
+
+static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
+ void *link_raw)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct bpf_htab_elem *elem_probe, *elem = link_raw;
+ struct bpf_htab_bucket *bucket;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ bucket = sock_hash_select_bucket(htab, elem->hash);
+
+ /* elem may be deleted in parallel from the map, but access here
+ * is okay since it's going away only after RCU grace period.
+ * However, we need to check whether it's still present.
+ */
+ raw_spin_lock_bh(&bucket->lock);
+ elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
+ elem->key, map->key_size);
+ if (elem_probe && elem_probe == elem) {
+ hlist_del_rcu(&elem->node);
+ sock_map_unref(elem->sk, elem);
+ sock_hash_free_elem(htab, elem);
+ }
+ raw_spin_unlock_bh(&bucket->lock);
+}
+
+static int sock_hash_delete_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ u32 hash, key_size = map->key_size;
+ struct bpf_htab_bucket *bucket;
+ struct bpf_htab_elem *elem;
+ int ret = -ENOENT;
+
+ hash = sock_hash_bucket_hash(key, key_size);
+ bucket = sock_hash_select_bucket(htab, hash);
+
+ raw_spin_lock_bh(&bucket->lock);
+ elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
+ if (elem) {
+ hlist_del_rcu(&elem->node);
+ sock_map_unref(elem->sk, elem);
+ sock_hash_free_elem(htab, elem);
+ ret = 0;
+ }
+ raw_spin_unlock_bh(&bucket->lock);
+ return ret;
+}
+
+static struct bpf_htab_elem *sock_hash_alloc_elem(struct bpf_htab *htab,
+ void *key, u32 key_size,
+ u32 hash, struct sock *sk,
+ struct bpf_htab_elem *old)
+{
+ struct bpf_htab_elem *new;
+
+ if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
+ if (!old) {
+ atomic_dec(&htab->count);
+ return ERR_PTR(-E2BIG);
+ }
+ }
+
+ new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
+ htab->map.numa_node);
+ if (!new) {
+ atomic_dec(&htab->count);
+ return ERR_PTR(-ENOMEM);
+ }
+ memcpy(new->key, key, key_size);
+ new->sk = sk;
+ new->hash = hash;
+ return new;
+}
+
+static int sock_hash_update_common(struct bpf_map *map, void *key,
+ struct sock *sk, u64 flags)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ u32 key_size = map->key_size, hash;
+ struct bpf_htab_elem *elem, *elem_new;
+ struct bpf_htab_bucket *bucket;
+ struct sk_psock_link *link;
+ struct sk_psock *psock;
+ int ret;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ if (unlikely(flags > BPF_EXIST))
+ return -EINVAL;
+
+ link = sk_psock_init_link();
+ if (!link)
+ return -ENOMEM;
+
+ ret = sock_map_link(map, &htab->progs, sk);
+ if (ret < 0)
+ goto out_free;
+
+ psock = sk_psock(sk);
+ WARN_ON_ONCE(!psock);
+
+ hash = sock_hash_bucket_hash(key, key_size);
+ bucket = sock_hash_select_bucket(htab, hash);
+
+ raw_spin_lock_bh(&bucket->lock);
+ elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
+ if (elem && flags == BPF_NOEXIST) {
+ ret = -EEXIST;
+ goto out_unlock;
+ } else if (!elem && flags == BPF_EXIST) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
+ if (IS_ERR(elem_new)) {
+ ret = PTR_ERR(elem_new);
+ goto out_unlock;
+ }
+
+ sock_map_add_link(psock, link, map, elem_new);
+ /* Add new element to the head of the list, so that
+ * concurrent search will find it before old elem.
+ */
+ hlist_add_head_rcu(&elem_new->node, &bucket->head);
+ if (elem) {
+ hlist_del_rcu(&elem->node);
+ sock_map_unref(elem->sk, elem);
+ sock_hash_free_elem(htab, elem);
+ }
+ raw_spin_unlock_bh(&bucket->lock);
+ return 0;
+out_unlock:
+ raw_spin_unlock_bh(&bucket->lock);
+ sk_psock_put(sk, psock);
+out_free:
+ sk_psock_free_link(link);
+ return ret;
+}
+
+static int sock_hash_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 flags)
+{
+ u32 ufd = *(u32 *)value;
+ struct socket *sock;
+ struct sock *sk;
+ int ret;
+
+ sock = sockfd_lookup(ufd, &ret);
+ if (!sock)
+ return ret;
+ sk = sock->sk;
+ if (!sk) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!sock_map_sk_is_suitable(sk) ||
+ sk->sk_state != TCP_ESTABLISHED) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ sock_map_sk_acquire(sk);
+ ret = sock_hash_update_common(map, key, sk, flags);
+ sock_map_sk_release(sk);
+out:
+ fput(sock->file);
+ return ret;
+}
+
+static int sock_hash_get_next_key(struct bpf_map *map, void *key,
+ void *key_next)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct bpf_htab_elem *elem, *elem_next;
+ u32 hash, key_size = map->key_size;
+ struct hlist_head *head;
+ int i = 0;
+
+ if (!key)
+ goto find_first_elem;
+ hash = sock_hash_bucket_hash(key, key_size);
+ head = &sock_hash_select_bucket(htab, hash)->head;
+ elem = sock_hash_lookup_elem_raw(head, hash, key, key_size);
+ if (!elem)
+ goto find_first_elem;
+
+ elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)),
+ struct bpf_htab_elem, node);
+ if (elem_next) {
+ memcpy(key_next, elem_next->key, key_size);
+ return 0;
+ }
+
+ i = hash & (htab->buckets_num - 1);
+ i++;
+find_first_elem:
+ for (; i < htab->buckets_num; i++) {
+ head = &sock_hash_select_bucket(htab, i)->head;
+ elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
+ struct bpf_htab_elem, node);
+ if (elem_next) {
+ memcpy(key_next, elem_next->key, key_size);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
+{
+ struct bpf_htab *htab;
+ int i, err;
+ u64 cost;
+
+ if (!capable(CAP_NET_ADMIN))
+ return ERR_PTR(-EPERM);
+ if (attr->max_entries == 0 ||
+ attr->key_size == 0 ||
+ attr->value_size != 4 ||
+ attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
+ return ERR_PTR(-EINVAL);
+ if (attr->key_size > MAX_BPF_STACK)
+ return ERR_PTR(-E2BIG);
+
+ htab = kzalloc(sizeof(*htab), GFP_USER);
+ if (!htab)
+ return ERR_PTR(-ENOMEM);
+
+ bpf_map_init_from_attr(&htab->map, attr);
+
+ htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
+ htab->elem_size = sizeof(struct bpf_htab_elem) +
+ round_up(htab->map.key_size, 8);
+ if (htab->buckets_num == 0 ||
+ htab->buckets_num > U32_MAX / sizeof(struct bpf_htab_bucket)) {
+ err = -EINVAL;
+ goto free_htab;
+ }
+
+ cost = (u64) htab->buckets_num * sizeof(struct bpf_htab_bucket) +
+ (u64) htab->elem_size * htab->map.max_entries;
+ if (cost >= U32_MAX - PAGE_SIZE) {
+ err = -EINVAL;
+ goto free_htab;
+ }
+
+ htab->buckets = bpf_map_area_alloc(htab->buckets_num *
+ sizeof(struct bpf_htab_bucket),
+ htab->map.numa_node);
+ if (!htab->buckets) {
+ err = -ENOMEM;
+ goto free_htab;
+ }
+
+ for (i = 0; i < htab->buckets_num; i++) {
+ INIT_HLIST_HEAD(&htab->buckets[i].head);
+ raw_spin_lock_init(&htab->buckets[i].lock);
+ }
+
+ return &htab->map;
+free_htab:
+ kfree(htab);
+ return ERR_PTR(err);
+}
+
+static void sock_hash_free(struct bpf_map *map)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ struct bpf_htab_bucket *bucket;
+ struct bpf_htab_elem *elem;
+ struct hlist_node *node;
+ int i;
+
+ synchronize_rcu();
+ rcu_read_lock();
+ for (i = 0; i < htab->buckets_num; i++) {
+ bucket = sock_hash_select_bucket(htab, i);
+ raw_spin_lock_bh(&bucket->lock);
+ hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
+ hlist_del_rcu(&elem->node);
+ sock_map_unref(elem->sk, elem);
+ }
+ raw_spin_unlock_bh(&bucket->lock);
+ }
+ rcu_read_unlock();
+
+ bpf_map_area_free(htab->buckets);
+ kfree(htab);
+}
+
+static void sock_hash_release_progs(struct bpf_map *map)
+{
+ psock_progs_drop(&container_of(map, struct bpf_htab, map)->progs);
+}
+
+BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
+ struct bpf_map *, map, void *, key, u64, flags)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (likely(sock_map_sk_is_suitable(sops->sk) &&
+ sock_map_op_okay(sops)))
+ return sock_hash_update_common(map, key, sops->sk, flags);
+ return -EOPNOTSUPP;
+}
+
+const struct bpf_func_proto bpf_sock_hash_update_proto = {
+ .func = bpf_sock_hash_update,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_PTR_TO_MAP_KEY,
+ .arg4_type = ARG_ANYTHING,
+};
+
+BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
+ struct bpf_map *, map, void *, key, u64, flags)
+{
+ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
+
+ if (unlikely(flags & ~(BPF_F_INGRESS)))
+ return SK_DROP;
+ tcb->bpf.flags = flags;
+ tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key);
+ if (!tcb->bpf.sk_redir)
+ return SK_DROP;
+ return SK_PASS;
+}
+
+const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
+ .func = bpf_sk_redirect_hash,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_PTR_TO_MAP_KEY,
+ .arg4_type = ARG_ANYTHING,
+};
+
+BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
+ struct bpf_map *, map, void *, key, u64, flags)
+{
+ if (unlikely(flags & ~(BPF_F_INGRESS)))
+ return SK_DROP;
+ msg->flags = flags;
+ msg->sk_redir = __sock_hash_lookup_elem(map, key);
+ if (!msg->sk_redir)
+ return SK_DROP;
+ return SK_PASS;
+}
+
+const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
+ .func = bpf_msg_redirect_hash,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_PTR_TO_MAP_KEY,
+ .arg4_type = ARG_ANYTHING,
+};
+
+const struct bpf_map_ops sock_hash_ops = {
+ .map_alloc = sock_hash_alloc,
+ .map_free = sock_hash_free,
+ .map_get_next_key = sock_hash_get_next_key,
+ .map_update_elem = sock_hash_update_elem,
+ .map_delete_elem = sock_hash_delete_elem,
+ .map_lookup_elem = sock_map_lookup,
+ .map_release_uref = sock_hash_release_progs,
+ .map_check_btf = map_check_no_btf,
+};
+
+static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
+{
+ switch (map->map_type) {
+ case BPF_MAP_TYPE_SOCKMAP:
+ return &container_of(map, struct bpf_stab, map)->progs;
+ case BPF_MAP_TYPE_SOCKHASH:
+ return &container_of(map, struct bpf_htab, map)->progs;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
+ u32 which)
+{
+ struct sk_psock_progs *progs = sock_map_progs(map);
+
+ if (!progs)
+ return -EOPNOTSUPP;
+
+ switch (which) {
+ case BPF_SK_MSG_VERDICT:
+ psock_set_prog(&progs->msg_parser, prog);
+ break;
+ case BPF_SK_SKB_STREAM_PARSER:
+ psock_set_prog(&progs->skb_parser, prog);
+ break;
+ case BPF_SK_SKB_STREAM_VERDICT:
+ psock_set_prog(&progs->skb_verdict, prog);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link)
+{
+ switch (link->map->map_type) {
+ case BPF_MAP_TYPE_SOCKMAP:
+ return sock_map_delete_from_link(link->map, sk,
+ link->link_raw);
+ case BPF_MAP_TYPE_SOCKHASH:
+ return sock_hash_delete_from_link(link->map, sk,
+ link->link_raw);
+ default:
+ break;
+ }
+}
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 89b6785cef2a..4b2b194f4f1f 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -94,11 +94,21 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
kfree(xa);
}
-static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
+void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
{
struct xdp_mem_allocator *xa;
int id = xdp_rxq->mem.id;
+ if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
+ WARN(1, "Missing register, driver bug");
+ return;
+ }
+
+ if (xdp_rxq->mem.type != MEM_TYPE_PAGE_POOL &&
+ xdp_rxq->mem.type != MEM_TYPE_ZERO_COPY) {
+ return;
+ }
+
if (id == 0)
return;
@@ -110,6 +120,7 @@ static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
mutex_unlock(&mem_id_lock);
}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
{
@@ -119,7 +130,7 @@ void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
- __xdp_rxq_info_unreg_mem_model(xdp_rxq);
+ xdp_rxq_info_unreg_mem_model(xdp_rxq);
xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
xdp_rxq->dev = NULL;
@@ -398,3 +409,41 @@ void xdp_attachment_setup(struct xdp_attachment_info *info,
info->flags = bpf->flags;
}
EXPORT_SYMBOL_GPL(xdp_attachment_setup);
+
+struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
+{
+ unsigned int metasize, totsize;
+ void *addr, *data_to_copy;
+ struct xdp_frame *xdpf;
+ struct page *page;
+
+ /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
+ metasize = xdp_data_meta_unsupported(xdp) ? 0 :
+ xdp->data - xdp->data_meta;
+ totsize = xdp->data_end - xdp->data + metasize;
+
+ if (sizeof(*xdpf) + totsize > PAGE_SIZE)
+ return NULL;
+
+ page = dev_alloc_page();
+ if (!page)
+ return NULL;
+
+ addr = page_to_virt(page);
+ xdpf = addr;
+ memset(xdpf, 0, sizeof(*xdpf));
+
+ addr += sizeof(*xdpf);
+ data_to_copy = metasize ? xdp->data_meta : xdp->data;
+ memcpy(addr, data_to_copy, totsize);
+
+ xdpf->data = addr + metasize;
+ xdpf->len = totsize - metasize;
+ xdpf->headroom = 0;
+ xdpf->metasize = metasize;
+ xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+
+ xdp_return_buff(xdp);
+ return xdpf;
+}
+EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index d28d46bff6ab..85d6c879383d 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -606,11 +606,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
if (sk->sk_state == DCCP_LISTEN) {
if (dh->dccph_type == DCCP_PKT_REQUEST) {
/* It is possible that we process SYN packets from backlog,
- * so we need to make sure to disable BH right there.
+ * so we need to make sure to disable BH and RCU right there.
*/
+ rcu_read_lock();
local_bh_disable();
acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
local_bh_enable();
+ rcu_read_unlock();
if (!acceptable)
return 1;
consume_skb(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index b08feb219b44..8e08cea6f178 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -493,9 +493,11 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
ireq->ir_rmt_addr);
+ rcu_read_lock();
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- ireq_opt_deref(ireq));
+ rcu_dereference(ireq->ireq_opt));
+ rcu_read_unlock();
err = net_xmit_eval(err);
}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 875858c8b059..43733accf58e 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -325,7 +325,7 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
__poll_t mask;
struct sock *sk = sock->sk;
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
if (sk->sk_state == DCCP_LISTEN)
return inet_csk_listen_poll(sk);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index bfd43e8f2c06..d0b3e69c6b39 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1363,7 +1363,7 @@ static int dn_dev_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu"
" %04hu %03d %02x %-10s %-7s %-7s\n",
- dev->name ? dev->name : "???",
+ dev->name,
dn_type2asc(dn_db->parms.mode),
0, 0,
dn_db->t3, dn_db->parms.t3,
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 7f4534828f6c..a65d553e730d 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -29,6 +29,7 @@
#include <linux/keyctl.h>
#include <linux/err.h>
#include <linux/seq_file.h>
+#include <linux/dns_resolver.h>
#include <keys/dns_resolver-type.h>
#include <keys/user-type.h>
#include "internal.h"
@@ -48,27 +49,86 @@ const struct cred *dns_resolver_cache;
/*
* Preparse instantiation data for a dns_resolver key.
*
- * The data must be a NUL-terminated string, with the NUL char accounted in
- * datalen.
+ * For normal hostname lookups, the data must be a NUL-terminated string, with
+ * the NUL char accounted in datalen.
*
* If the data contains a '#' characters, then we take the clause after each
* one to be an option of the form 'key=value'. The actual data of interest is
* the string leading up to the first '#'. For instance:
*
* "ip1,ip2,...#foo=bar"
+ *
+ * For server list requests, the data must begin with a NUL char and be
+ * followed by a byte indicating the version of the data format. Version 1
+ * looks something like (note this is packed):
+ *
+ * u8 Non-string marker (ie. 0)
+ * u8 Content (DNS_PAYLOAD_IS_*)
+ * u8 Version (e.g. 1)
+ * u8 Source of server list
+ * u8 Lookup status of server list
+ * u8 Number of servers
+ * foreach-server {
+ * __le16 Name length
+ * __le16 Priority (as per SRV record, low first)
+ * __le16 Weight (as per SRV record, higher first)
+ * __le16 Port
+ * u8 Source of address list
+ * u8 Lookup status of address list
+ * u8 Protocol (DNS_SERVER_PROTOCOL_*)
+ * u8 Number of addresses
+ * char[] Name (not NUL-terminated)
+ * foreach-address {
+ * u8 Family (DNS_ADDRESS_IS_*)
+ * union {
+ * u8[4] ipv4_addr
+ * u8[16] ipv6_addr
+ * }
+ * }
+ * }
+ *
*/
static int
dns_resolver_preparse(struct key_preparsed_payload *prep)
{
+ const struct dns_payload_header *bin;
struct user_key_payload *upayload;
unsigned long derrno;
int ret;
int datalen = prep->datalen, result_len = 0;
const char *data = prep->data, *end, *opt;
+ if (datalen <= 1 || !data)
+ return -EINVAL;
+
+ if (data[0] == 0) {
+ /* It may be a server list. */
+ if (datalen <= sizeof(*bin))
+ return -EINVAL;
+
+ bin = (const struct dns_payload_header *)data;
+ kenter("[%u,%u],%u", bin->content, bin->version, datalen);
+ if (bin->content != DNS_PAYLOAD_IS_SERVER_LIST) {
+ pr_warn_ratelimited(
+ "dns_resolver: Unsupported content type (%u)\n",
+ bin->content);
+ return -EINVAL;
+ }
+
+ if (bin->version != 1) {
+ pr_warn_ratelimited(
+ "dns_resolver: Unsupported server list version (%u)\n",
+ bin->version);
+ return -EINVAL;
+ }
+
+ result_len = datalen;
+ goto store_result;
+ }
+
kenter("'%*.*s',%u", datalen, datalen, data, datalen);
- if (datalen <= 1 || !data || data[datalen - 1] != '\0')
+ if (!data || data[datalen - 1] != '\0')
return -EINVAL;
datalen--;
@@ -144,6 +204,7 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
return 0;
}
+store_result:
kdebug("store result");
prep->quotalen = result_len;
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 49da67034f29..76338c38738a 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -148,12 +148,9 @@ int dns_query(const char *type, const char *name, size_t namelen,
if (_result) {
ret = -ENOMEM;
- *_result = kmalloc(len + 1, GFP_KERNEL);
+ *_result = kmemdup_nul(upayload->data, len, GFP_KERNEL);
if (!*_result)
goto put;
-
- memcpy(*_result, upayload->data, len);
- (*_result)[len] = '\0';
}
if (_expiry)
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 4183e4ba27a5..48c41918fb35 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -38,6 +38,9 @@ config NET_DSA_TAG_DSA
config NET_DSA_TAG_EDSA
bool
+config NET_DSA_TAG_GSWIP
+ bool
+
config NET_DSA_TAG_KSZ
bool
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 9e4d3536f977..6e721f7a2947 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -9,6 +9,7 @@ dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
dsa_core-$(CONFIG_NET_DSA_TAG_BRCM_PREPEND) += tag_brcm.o
dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
+dsa_core-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o
dsa_core-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
dsa_core-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
dsa_core-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 9f3209ff7ffd..a69c1790bbfc 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -52,6 +52,9 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
#ifdef CONFIG_NET_DSA_TAG_EDSA
[DSA_TAG_PROTO_EDSA] = &edsa_netdev_ops,
#endif
+#ifdef CONFIG_NET_DSA_TAG_GSWIP
+ [DSA_TAG_PROTO_GSWIP] = &gswip_netdev_ops,
+#endif
#ifdef CONFIG_NET_DSA_TAG_KSZ
[DSA_TAG_PROTO_KSZ] = &ksz_netdev_ops,
#endif
@@ -70,6 +73,52 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
[DSA_TAG_PROTO_NONE] = &none_ops,
};
+const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
+{
+ const char *protocol_name[DSA_TAG_LAST] = {
+#ifdef CONFIG_NET_DSA_TAG_BRCM
+ [DSA_TAG_PROTO_BRCM] = "brcm",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_BRCM_PREPEND
+ [DSA_TAG_PROTO_BRCM_PREPEND] = "brcm-prepend",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_DSA
+ [DSA_TAG_PROTO_DSA] = "dsa",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+ [DSA_TAG_PROTO_EDSA] = "edsa",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_GSWIP
+ [DSA_TAG_PROTO_GSWIP] = "gswip",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_KSZ
+ [DSA_TAG_PROTO_KSZ] = "ksz",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_LAN9303
+ [DSA_TAG_PROTO_LAN9303] = "lan9303",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_MTK
+ [DSA_TAG_PROTO_MTK] = "mtk",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_QCA
+ [DSA_TAG_PROTO_QCA] = "qca",
+#endif
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+ [DSA_TAG_PROTO_TRAILER] = "trailer",
+#endif
+ [DSA_TAG_PROTO_NONE] = "none",
+ };
+ unsigned int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(protocol_name) != DSA_TAG_LAST);
+
+ for (i = 0; i < ARRAY_SIZE(dsa_device_ops); i++)
+ if (ops == dsa_device_ops[i])
+ return protocol_name[i];
+
+ return protocol_name[DSA_TAG_PROTO_NONE];
+};
+
const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol)
{
const struct dsa_device_ops *ops;
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 3964c6f7a7c0..9e4fd04ab53c 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -86,6 +86,7 @@ struct dsa_slave_priv {
/* dsa.c */
const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol);
bool dsa_schedule_work(struct work_struct *work);
+const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
/* legacy.c */
#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
@@ -205,6 +206,9 @@ extern const struct dsa_device_ops dsa_netdev_ops;
/* tag_edsa.c */
extern const struct dsa_device_ops edsa_netdev_ops;
+/* tag_gswip.c */
+extern const struct dsa_device_ops gswip_netdev_ops;
+
/* tag_ksz.c */
extern const struct dsa_device_ops ksz_netdev_ops;
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index 42a7b85b84e1..cb42939db776 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -392,8 +392,7 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
}
/* Drop our reference to the MDIO bus device */
- if (pd->chip[i].host_dev)
- put_device(pd->chip[i].host_dev);
+ put_device(pd->chip[i].host_dev);
}
kfree(pd->chip);
}
@@ -687,8 +686,7 @@ static void dsa_shutdown(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int dsa_suspend(struct device *d)
{
- struct platform_device *pdev = to_platform_device(d);
- struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
+ struct dsa_switch_tree *dst = dev_get_drvdata(d);
int i, ret = 0;
for (i = 0; i < dst->pd->nr_chips; i++) {
@@ -703,8 +701,7 @@ static int dsa_suspend(struct device *d)
static int dsa_resume(struct device *d)
{
- struct platform_device *pdev = to_platform_device(d);
- struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
+ struct dsa_switch_tree *dst = dev_get_drvdata(d);
int i, ret = 0;
for (i = 0; i < dst->pd->nr_chips; i++) {
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 1c45c1d6d241..7d0c19e7edcf 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -722,7 +722,7 @@ static void dsa_slave_netpoll_cleanup(struct net_device *dev)
p->netpoll = NULL;
- __netpoll_free_async(netpoll);
+ __netpoll_free(netpoll);
}
static void dsa_slave_poll_controller(struct net_device *dev)
@@ -1058,6 +1058,27 @@ static struct device_type dsa_type = {
.name = "dsa",
};
+static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *dev = to_net_dev(d);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+
+ return sprintf(buf, "%s\n",
+ dsa_tag_protocol_to_str(dp->cpu_dp->tag_ops));
+}
+static DEVICE_ATTR_RO(tagging);
+
+static struct attribute *dsa_slave_attrs[] = {
+ &dev_attr_tagging.attr,
+ NULL
+};
+
+static const struct attribute_group dsa_group = {
+ .name = "dsa",
+ .attrs = dsa_slave_attrs,
+};
+
static void dsa_slave_phylink_validate(struct net_device *dev,
unsigned long *supported,
struct phylink_link_state *state)
@@ -1353,8 +1374,14 @@ int dsa_slave_create(struct dsa_port *port)
goto out_phy;
}
+ ret = sysfs_create_group(&slave_dev->dev.kobj, &dsa_group);
+ if (ret)
+ goto out_unreg;
+
return 0;
+out_unreg:
+ unregister_netdev(slave_dev);
out_phy:
rtnl_lock();
phylink_disconnect_phy(p->dp->pl);
@@ -1378,6 +1405,7 @@ void dsa_slave_destroy(struct net_device *slave_dev)
rtnl_unlock();
dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
+ sysfs_remove_group(&slave_dev->dev.kobj, &dsa_group);
unregister_netdev(slave_dev);
phylink_destroy(dp->pl);
free_percpu(p->stats64);
@@ -1450,6 +1478,7 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
netdev_dbg(dev, "fdb add failed err=%d\n", err);
break;
}
+ fdb_info->offloaded = true;
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
&fdb_info->info);
break;
diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c
new file mode 100644
index 000000000000..49e9b73f1be3
--- /dev/null
+++ b/net/dsa/tag_gswip.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel / Lantiq GSWIP V2.0 PMAC tag support
+ *
+ * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/dsa.h>
+
+#include "dsa_priv.h"
+
+#define GSWIP_TX_HEADER_LEN 4
+
+/* special tag in TX path header */
+/* Byte 0 */
+#define GSWIP_TX_SLPID_SHIFT 0 /* source port ID */
+#define GSWIP_TX_SLPID_CPU 2
+#define GSWIP_TX_SLPID_APP1 3
+#define GSWIP_TX_SLPID_APP2 4
+#define GSWIP_TX_SLPID_APP3 5
+#define GSWIP_TX_SLPID_APP4 6
+#define GSWIP_TX_SLPID_APP5 7
+
+/* Byte 1 */
+#define GSWIP_TX_CRCGEN_DIS BIT(7)
+#define GSWIP_TX_DPID_SHIFT 0 /* destination group ID */
+#define GSWIP_TX_DPID_ELAN 0
+#define GSWIP_TX_DPID_EWAN 1
+#define GSWIP_TX_DPID_CPU 2
+#define GSWIP_TX_DPID_APP1 3
+#define GSWIP_TX_DPID_APP2 4
+#define GSWIP_TX_DPID_APP3 5
+#define GSWIP_TX_DPID_APP4 6
+#define GSWIP_TX_DPID_APP5 7
+
+/* Byte 2 */
+#define GSWIP_TX_PORT_MAP_EN BIT(7)
+#define GSWIP_TX_PORT_MAP_SEL BIT(6)
+#define GSWIP_TX_LRN_DIS BIT(5)
+#define GSWIP_TX_CLASS_EN BIT(4)
+#define GSWIP_TX_CLASS_SHIFT 0
+#define GSWIP_TX_CLASS_MASK GENMASK(3, 0)
+
+/* Byte 3 */
+#define GSWIP_TX_DPID_EN BIT(0)
+#define GSWIP_TX_PORT_MAP_SHIFT 1
+#define GSWIP_TX_PORT_MAP_MASK GENMASK(6, 1)
+
+#define GSWIP_RX_HEADER_LEN 8
+
+/* special tag in RX path header */
+/* Byte 7 */
+#define GSWIP_RX_SPPID_SHIFT 4
+#define GSWIP_RX_SPPID_MASK GENMASK(6, 4)
+
+static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ int err;
+ u8 *gswip_tag;
+
+ err = skb_cow_head(skb, GSWIP_TX_HEADER_LEN);
+ if (err)
+ return NULL;
+
+ skb_push(skb, GSWIP_TX_HEADER_LEN);
+
+ gswip_tag = skb->data;
+ gswip_tag[0] = GSWIP_TX_SLPID_CPU;
+ gswip_tag[1] = GSWIP_TX_DPID_ELAN;
+ gswip_tag[2] = GSWIP_TX_PORT_MAP_EN | GSWIP_TX_PORT_MAP_SEL;
+ gswip_tag[3] = BIT(dp->index + GSWIP_TX_PORT_MAP_SHIFT) & GSWIP_TX_PORT_MAP_MASK;
+ gswip_tag[3] |= GSWIP_TX_DPID_EN;
+
+ return skb;
+}
+
+static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *pt)
+{
+ int port;
+ u8 *gswip_tag;
+
+ if (unlikely(!pskb_may_pull(skb, GSWIP_RX_HEADER_LEN)))
+ return NULL;
+
+ gswip_tag = skb->data - ETH_HLEN;
+
+ /* Get source port information */
+ port = (gswip_tag[7] & GSWIP_RX_SPPID_MASK) >> GSWIP_RX_SPPID_SHIFT;
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev)
+ return NULL;
+
+ /* remove GSWIP tag */
+ skb_pull_rcsum(skb, GSWIP_RX_HEADER_LEN);
+
+ return skb;
+}
+
+const struct dsa_device_ops gswip_netdev_ops = {
+ .xmit = gswip_tag_xmit,
+ .rcv = gswip_tag_rcv,
+};
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index e7857a8ac86d..d14226ecfde4 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -260,7 +260,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
}
sub_frag_mem_limit(fq->q.net, sum_truesize);
- head->next = NULL;
+ skb_mark_not_on_list(head);
head->dev = ldev;
head->tstamp = fq->q.stamp;
@@ -463,7 +463,6 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
table[0].data = &ieee802154_lowpan->frags.high_thresh;
table[0].extra1 = &ieee802154_lowpan->frags.low_thresh;
- table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh;
table[1].data = &ieee802154_lowpan->frags.low_thresh;
table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
table[2].data = &ieee802154_lowpan->frags.timeout;
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 7446b98661d8..58629314eae9 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
+obj-$(CONFIG_NET_SOCK_MSG) += tcp_bpf.o
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 20fda8fb8ffd..1fbe2f815474 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1377,6 +1377,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
if (encap)
skb_reset_inner_headers(skb);
skb->network_header = (u8 *)iph - skb->head;
+ skb_reset_mac_len(skb);
} while ((skb = skb->next));
out:
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 4dd95cdd8070..c01fa791260d 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -461,9 +461,9 @@ static int ah4_err(struct sk_buff *skb, u32 info)
return 0;
if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
- ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
+ ipv4_update_pmtu(skb, net, info, 0, IPPROTO_AH);
else
- ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
+ ipv4_redirect(skb, net, 0, IPPROTO_AH);
xfrm_state_put(x);
return 0;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index e90c89ef8c08..850a6f13a082 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1255,6 +1255,8 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event,
change_info = ptr;
if (change_info->flags_changed & IFF_NOARP)
neigh_changeaddr(&arp_tbl, dev);
+ if (!netif_carrier_ok(dev))
+ neigh_carrier_down(&arp_tbl, dev);
break;
default:
break;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 82178cc69c96..777fa3b7fb13 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1512,7 +1512,7 @@ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
*
* Description:
* Parse the packet's IP header looking for a CIPSO option. Returns a pointer
- * to the start of the CIPSO option on success, NULL if one if not found.
+ * to the start of the CIPSO option on success, NULL if one is not found.
*
*/
unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
@@ -1522,10 +1522,8 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
int optlen;
int taglen;
- for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
+ for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 1; ) {
switch (optptr[0]) {
- case IPOPT_CIPSO:
- return optptr;
case IPOPT_END:
return NULL;
case IPOPT_NOOP:
@@ -1534,6 +1532,11 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
default:
taglen = optptr[1];
}
+ if (!taglen || taglen > optlen)
+ return NULL;
+ if (optptr[0] == IPOPT_CIPSO)
+ return optptr;
+
optlen -= taglen;
optptr += taglen;
}
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index f915abff1350..300921417f89 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -42,7 +42,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
oif = sk->sk_bound_dev_if;
saddr = inet->inet_saddr;
if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
- if (!oif)
+ if (!oif || netif_index_is_l3_master(sock_net(sk), oif))
oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index ea4bd8a52422..63d5b58fbfdb 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -100,6 +100,16 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
[IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
[IFA_FLAGS] = { .type = NLA_U32 },
[IFA_RT_PRIORITY] = { .type = NLA_U32 },
+ [IFA_TARGET_NETNSID] = { .type = NLA_S32 },
+};
+
+struct inet_fill_args {
+ u32 portid;
+ u32 seq;
+ int event;
+ unsigned int flags;
+ int netnsid;
+ int ifindex;
};
#define IN4_ADDR_HSIZE_SHIFT 8
@@ -773,7 +783,8 @@ static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
}
static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
- __u32 *pvalid_lft, __u32 *pprefered_lft)
+ __u32 *pvalid_lft, __u32 *pprefered_lft,
+ struct netlink_ext_ack *extack)
{
struct nlattr *tb[IFA_MAX+1];
struct in_ifaddr *ifa;
@@ -783,7 +794,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
int err;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy,
- NULL);
+ extack);
if (err < 0)
goto errout;
@@ -888,7 +899,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
ASSERT_RTNL();
- ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
+ ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft, extack);
if (IS_ERR(ifa))
return PTR_ERR(ifa);
@@ -1584,13 +1595,14 @@ static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
}
static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
- u32 portid, u32 seq, int event, unsigned int flags)
+ struct inet_fill_args *args)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
u32 preferred, valid;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
+ nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
+ args->flags);
if (!nlh)
return -EMSGSIZE;
@@ -1601,6 +1613,10 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
+ if (args->netnsid >= 0 &&
+ nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
+ goto nla_put_failure;
+
if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
preferred = ifa->ifa_preferred_lft;
valid = ifa->ifa_valid_lft;
@@ -1645,27 +1661,138 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
+ struct inet_fill_args *fillargs,
+ struct net **tgt_net, struct sock *sk,
+ struct netlink_callback *cb)
+{
+ struct netlink_ext_ack *extack = cb->extack;
+ struct nlattr *tb[IFA_MAX+1];
+ struct ifaddrmsg *ifm;
+ int err, i;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid header for address dump request");
+ return -EINVAL;
+ }
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for address dump request");
+ return -EINVAL;
+ }
+
+ fillargs->ifindex = ifm->ifa_index;
+ if (fillargs->ifindex) {
+ cb->answer_flags |= NLM_F_DUMP_FILTERED;
+ fillargs->flags |= NLM_F_DUMP_FILTERED;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
+ ifa_ipv4_policy, extack);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i <= IFA_MAX; ++i) {
+ if (!tb[i])
+ continue;
+
+ if (i == IFA_TARGET_NETNSID) {
+ struct net *net;
+
+ fillargs->netnsid = nla_get_s32(tb[i]);
+
+ net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
+ if (IS_ERR(net)) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid target network namespace id");
+ return PTR_ERR(net);
+ }
+ *tgt_net = net;
+ } else {
+ NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in dump request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
+ struct netlink_callback *cb, int s_ip_idx,
+ struct inet_fill_args *fillargs)
+{
+ struct in_ifaddr *ifa;
+ int ip_idx = 0;
+ int err;
+
+ for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next, ip_idx++) {
+ if (ip_idx < s_ip_idx)
+ continue;
+
+ err = inet_fill_ifaddr(skb, ifa, fillargs);
+ if (err < 0)
+ goto done;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ }
+ err = 0;
+
+done:
+ cb->args[2] = ip_idx;
+
+ return err;
+}
+
static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
+ struct inet_fill_args fillargs = {
+ .portid = NETLINK_CB(cb->skb).portid,
+ .seq = nlh->nlmsg_seq,
+ .event = RTM_NEWADDR,
+ .flags = NLM_F_MULTI,
+ .netnsid = -1,
+ };
struct net *net = sock_net(skb->sk);
+ struct net *tgt_net = net;
int h, s_h;
int idx, s_idx;
- int ip_idx, s_ip_idx;
+ int s_ip_idx;
struct net_device *dev;
struct in_device *in_dev;
- struct in_ifaddr *ifa;
struct hlist_head *head;
+ int err;
s_h = cb->args[0];
s_idx = idx = cb->args[1];
- s_ip_idx = ip_idx = cb->args[2];
+ s_ip_idx = cb->args[2];
+
+ if (cb->strict_check) {
+ err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
+ skb->sk, cb);
+ if (err < 0)
+ return err;
+
+ if (fillargs.ifindex) {
+ dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
+ if (!dev)
+ return -ENODEV;
+
+ in_dev = __in_dev_get_rtnl(dev);
+ if (in_dev) {
+ err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
+ &fillargs);
+ }
+ goto put_tgt_net;
+ }
+ }
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
- head = &net->dev_index_head[h];
+ head = &tgt_net->dev_index_head[h];
rcu_read_lock();
- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
- net->dev_base_seq;
+ cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^
+ tgt_net->dev_base_seq;
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
@@ -1675,18 +1802,11 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
if (!in_dev)
goto cont;
- for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
- ifa = ifa->ifa_next, ip_idx++) {
- if (ip_idx < s_ip_idx)
- continue;
- if (inet_fill_ifaddr(skb, ifa,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWADDR, NLM_F_MULTI) < 0) {
- rcu_read_unlock();
- goto done;
- }
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
+ &fillargs);
+ if (err < 0) {
+ rcu_read_unlock();
+ goto done;
}
cont:
idx++;
@@ -1697,7 +1817,9 @@ cont:
done:
cb->args[0] = h;
cb->args[1] = idx;
- cb->args[2] = ip_idx;
+put_tgt_net:
+ if (fillargs.netnsid >= 0)
+ put_net(tgt_net);
return skb->len;
}
@@ -1705,8 +1827,14 @@ done:
static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
u32 portid)
{
+ struct inet_fill_args fillargs = {
+ .portid = portid,
+ .seq = nlh ? nlh->nlmsg_seq : 0,
+ .event = event,
+ .flags = 0,
+ .netnsid = -1,
+ };
struct sk_buff *skb;
- u32 seq = nlh ? nlh->nlmsg_seq : 0;
int err = -ENOBUFS;
struct net *net;
@@ -1715,7 +1843,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
if (!skb)
goto errout;
- err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
+ err = inet_fill_ifaddr(skb, ifa, &fillargs);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -1995,6 +2123,7 @@ errout:
static int inet_netconf_dump_devconf(struct sk_buff *skb,
struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
int h, s_h;
int idx, s_idx;
@@ -2002,6 +2131,21 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
struct in_device *in_dev;
struct hlist_head *head;
+ if (cb->strict_check) {
+ struct netlink_ext_ack *extack = cb->extack;
+ struct netconfmsg *ncm;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid header for netconf dump request");
+ return -EINVAL;
+ }
+
+ if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid data after header in netconf dump request");
+ return -EINVAL;
+ }
+ }
+
s_h = cb->args[0];
s_idx = idx = cb->args[1];
@@ -2021,7 +2165,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
if (inet_netconf_fill_devconf(skb, dev->ifindex,
&in_dev->cnf,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
+ nlh->nlmsg_seq,
RTM_NEWNETCONF,
NLM_F_MULTI,
NETCONFA_ALL) < 0) {
@@ -2038,7 +2182,7 @@ cont:
if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
net->ipv4.devconf_all,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
+ nlh->nlmsg_seq,
RTM_NEWNETCONF, NLM_F_MULTI,
NETCONFA_ALL) < 0)
goto done;
@@ -2049,7 +2193,7 @@ cont:
if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
net->ipv4.devconf_dflt,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
+ nlh->nlmsg_seq,
RTM_NEWNETCONF, NLM_F_MULTI,
NETCONFA_ALL) < 0)
goto done;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 97689012b357..9e1c840596c5 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -683,12 +683,11 @@ static void esp_input_done_esn(struct crypto_async_request *base, int err)
*/
static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
{
- struct ip_esp_hdr *esph;
struct crypto_aead *aead = x->data;
struct aead_request *req;
struct sk_buff *trailer;
int ivlen = crypto_aead_ivsize(aead);
- int elen = skb->len - sizeof(*esph) - ivlen;
+ int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
int nfrags;
int assoclen;
int seqhilen;
@@ -698,13 +697,13 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
struct scatterlist *sg;
int err = -EINVAL;
- if (!pskb_may_pull(skb, sizeof(*esph) + ivlen))
+ if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
goto out;
if (elen <= 0)
goto out;
- assoclen = sizeof(*esph);
+ assoclen = sizeof(struct ip_esp_hdr);
seqhilen = 0;
if (x->props.flags & XFRM_STATE_ESN) {
@@ -820,9 +819,9 @@ static int esp4_err(struct sk_buff *skb, u32 info)
return 0;
if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
- ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
+ ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP);
else
- ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
+ ipv4_redirect(skb, net, 0, IPPROTO_ESP);
xfrm_state_put(x);
return 0;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 2998b0e47d4b..5bf653f36911 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -315,6 +315,32 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
return inet_select_addr(dev, ip_hdr(skb)->saddr, scope);
}
+bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev)
+{
+ bool dev_match = false;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ int ret;
+
+ for (ret = 0; ret < fi->fib_nhs; ret++) {
+ struct fib_nh *nh = &fi->fib_nh[ret];
+
+ if (nh->nh_dev == dev) {
+ dev_match = true;
+ break;
+ } else if (l3mdev_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
+ dev_match = true;
+ break;
+ }
+ }
+#else
+ if (fi->fib_nh[0].nh_dev == dev)
+ dev_match = true;
+#endif
+
+ return dev_match;
+}
+EXPORT_SYMBOL_GPL(fib_info_nh_uses_dev);
+
/* Given (packet source, input interface) and optional (dst, oif, tos):
* - (main) check, that source is valid i.e. not broadcast or our local
* address.
@@ -361,24 +387,8 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
(res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev)))
goto e_inval;
fib_combine_itag(itag, &res);
- dev_match = false;
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
- for (ret = 0; ret < res.fi->fib_nhs; ret++) {
- struct fib_nh *nh = &res.fi->fib_nh[ret];
-
- if (nh->nh_dev == dev) {
- dev_match = true;
- break;
- } else if (l3mdev_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
- dev_match = true;
- break;
- }
- }
-#else
- if (FIB_RES_DEV(res) == dev)
- dev_match = true;
-#endif
+ dev_match = fib_info_nh_uses_dev(res.fi, dev);
if (dev_match) {
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
return ret;
@@ -792,19 +802,111 @@ errout:
return err;
}
+int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
+ struct fib_dump_filter *filter,
+ struct netlink_callback *cb)
+{
+ struct netlink_ext_ack *extack = cb->extack;
+ struct nlattr *tb[RTA_MAX + 1];
+ struct rtmsg *rtm;
+ int err, i;
+
+ ASSERT_RTNL();
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for FIB dump request");
+ return -EINVAL;
+ }
+
+ rtm = nlmsg_data(nlh);
+ if (rtm->rtm_dst_len || rtm->rtm_src_len || rtm->rtm_tos ||
+ rtm->rtm_scope) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for FIB dump request");
+ return -EINVAL;
+ }
+ if (rtm->rtm_flags & ~(RTM_F_CLONED | RTM_F_PREFIX)) {
+ NL_SET_ERR_MSG(extack, "Invalid flags for FIB dump request");
+ return -EINVAL;
+ }
+
+ filter->flags = rtm->rtm_flags;
+ filter->protocol = rtm->rtm_protocol;
+ filter->rt_type = rtm->rtm_type;
+ filter->table_id = rtm->rtm_table;
+
+ err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_ipv4_policy, extack);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i <= RTA_MAX; ++i) {
+ int ifindex;
+
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case RTA_TABLE:
+ filter->table_id = nla_get_u32(tb[i]);
+ break;
+ case RTA_OIF:
+ ifindex = nla_get_u32(tb[i]);
+ filter->dev = __dev_get_by_index(net, ifindex);
+ if (!filter->dev)
+ return -ENODEV;
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in dump request");
+ return -EINVAL;
+ }
+ }
+
+ if (filter->flags || filter->protocol || filter->rt_type ||
+ filter->table_id || filter->dev) {
+ filter->filter_set = 1;
+ cb->answer_flags = NLM_F_DUMP_FILTERED;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ip_valid_fib_dump_req);
+
static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
+ struct fib_dump_filter filter = {};
unsigned int h, s_h;
unsigned int e = 0, s_e;
struct fib_table *tb;
struct hlist_head *head;
int dumped = 0, err;
- if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
- ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
+ if (cb->strict_check) {
+ err = ip_valid_fib_dump_req(net, nlh, &filter, cb);
+ if (err < 0)
+ return err;
+ } else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) {
+ struct rtmsg *rtm = nlmsg_data(nlh);
+
+ filter.flags = rtm->rtm_flags & (RTM_F_PREFIX | RTM_F_CLONED);
+ }
+
+ /* fib entries are never clones and ipv4 does not use prefix flag */
+ if (filter.flags & (RTM_F_PREFIX | RTM_F_CLONED))
return skb->len;
+ if (filter.table_id) {
+ tb = fib_get_table(net, filter.table_id);
+ if (!tb) {
+ NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist");
+ return -ENOENT;
+ }
+
+ err = fib_table_dump(tb, skb, cb, &filter);
+ return skb->len ? : err;
+ }
+
s_h = cb->args[0];
s_e = cb->args[1];
@@ -819,7 +921,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
if (dumped)
memset(&cb->args[2], 0, sizeof(cb->args) -
2 * sizeof(cb->args[0]));
- err = fib_table_dump(tb, skb, cb);
+ err = fib_table_dump(tb, skb, cb, &filter);
if (err < 0) {
if (likely(skb->len))
goto out;
@@ -1243,7 +1345,8 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *upper_info = ptr;
+ struct netdev_notifier_info_ext *info_ext = ptr;
struct in_device *in_dev;
struct net *net = dev_net(dev);
unsigned int flags;
@@ -1278,16 +1381,19 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
fib_sync_up(dev, RTNH_F_LINKDOWN);
else
fib_sync_down_dev(dev, event, false);
- /* fall through */
+ rt_cache_flush(net);
+ break;
case NETDEV_CHANGEMTU:
+ fib_sync_mtu(dev, info_ext->ext.mtu);
rt_cache_flush(net);
break;
case NETDEV_CHANGEUPPER:
- info = ptr;
+ upper_info = ptr;
/* flush all routes if dev is linked to or unlinked from
* an L3 master device (e.g., VRF)
*/
- if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+ if (upper_info->upper_dev &&
+ netif_is_l3_master(upper_info->upper_dev))
fib_disable_ip(dev, NETDEV_DOWN, true);
break;
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index f3c89ccf14c5..b5c3937ca6ec 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -208,7 +208,6 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
- struct dst_metrics *m;
change_nexthops(fi) {
if (nexthop_nh->nh_dev)
@@ -219,9 +218,8 @@ static void free_fib_info_rcu(struct rcu_head *head)
rt_fibinfo_free(&nexthop_nh->nh_rth_input);
} endfor_nexthops(fi);
- m = fi->fib_metrics;
- if (m != &dst_default_metrics && refcount_dec_and_test(&m->refcnt))
- kfree(m);
+ ip_fib_metrics_put(fi->fib_metrics);
+
kfree(fi);
}
@@ -797,8 +795,10 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_nh *nh,
return -EINVAL;
}
dev = __dev_get_by_index(net, nh->nh_oif);
- if (!dev)
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "Nexthop device required for onlink");
return -ENODEV;
+ }
if (!(dev->flags & IFF_UP)) {
NL_SET_ERR_MSG(extack,
"Nexthop device is not up");
@@ -1018,13 +1018,6 @@ static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
return true;
}
-static int
-fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
-{
- return ip_metrics_convert(fi->fib_net, cfg->fc_mx, cfg->fc_mx_len,
- fi->fib_metrics->metrics);
-}
-
struct fib_info *fib_create_info(struct fib_config *cfg,
struct netlink_ext_ack *extack)
{
@@ -1082,16 +1075,14 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
if (!fi)
goto failure;
- if (cfg->fc_mx) {
- fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
- if (unlikely(!fi->fib_metrics)) {
- kfree(fi);
- return ERR_PTR(err);
- }
- refcount_set(&fi->fib_metrics->refcnt, 1);
- } else {
- fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
+ fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx,
+ cfg->fc_mx_len);
+ if (unlikely(IS_ERR(fi->fib_metrics))) {
+ err = PTR_ERR(fi->fib_metrics);
+ kfree(fi);
+ return ERR_PTR(err);
}
+
fib_info_cnt++;
fi->fib_net = net;
fi->fib_protocol = cfg->fc_protocol;
@@ -1110,10 +1101,6 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
goto failure;
} endfor_nexthops(fi)
- err = fib_convert_metrics(fi, cfg);
- if (err)
- goto failure;
-
if (cfg->fc_mp) {
#ifdef CONFIG_IP_ROUTE_MULTIPATH
err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg, extack);
@@ -1470,6 +1457,56 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
return NOTIFY_DONE;
}
+/* Update the PMTU of exceptions when:
+ * - the new MTU of the first hop becomes smaller than the PMTU
+ * - the old MTU was the same as the PMTU, and it limited discovery of
+ * larger MTUs on the path. With that limit raised, we can now
+ * discover larger MTUs
+ * A special case is locked exceptions, for which the PMTU is smaller
+ * than the minimal accepted PMTU:
+ * - if the new MTU is greater than the PMTU, don't make any change
+ * - otherwise, unlock and set PMTU
+ */
+static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
+{
+ struct fnhe_hash_bucket *bucket;
+ int i;
+
+ bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
+ if (!bucket)
+ return;
+
+ for (i = 0; i < FNHE_HASH_SIZE; i++) {
+ struct fib_nh_exception *fnhe;
+
+ for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
+ fnhe;
+ fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
+ if (fnhe->fnhe_mtu_locked) {
+ if (new <= fnhe->fnhe_pmtu) {
+ fnhe->fnhe_pmtu = new;
+ fnhe->fnhe_mtu_locked = false;
+ }
+ } else if (new < fnhe->fnhe_pmtu ||
+ orig == fnhe->fnhe_pmtu) {
+ fnhe->fnhe_pmtu = new;
+ }
+ }
+ }
+}
+
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
+{
+ unsigned int hash = fib_devindex_hashfn(dev->ifindex);
+ struct hlist_head *head = &fib_info_devhash[hash];
+ struct fib_nh *nh;
+
+ hlist_for_each_entry(nh, head, nh_hash) {
+ if (nh->nh_dev == dev)
+ nh_update_mtu(nh, dev->mtu, orig_mtu);
+ }
+}
+
/* Event force Flags Description
* NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
* NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 5bc0c89e81e4..237c9f72b265 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2003,12 +2003,17 @@ void fib_free_table(struct fib_table *tb)
}
static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
- struct sk_buff *skb, struct netlink_callback *cb)
+ struct sk_buff *skb, struct netlink_callback *cb,
+ struct fib_dump_filter *filter)
{
+ unsigned int flags = NLM_F_MULTI;
__be32 xkey = htonl(l->key);
struct fib_alias *fa;
int i, s_i;
+ if (filter->filter_set)
+ flags |= NLM_F_DUMP_FILTERED;
+
s_i = cb->args[4];
i = 0;
@@ -2016,25 +2021,35 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
int err;
- if (i < s_i) {
- i++;
- continue;
- }
+ if (i < s_i)
+ goto next;
- if (tb->tb_id != fa->tb_id) {
- i++;
- continue;
+ if (tb->tb_id != fa->tb_id)
+ goto next;
+
+ if (filter->filter_set) {
+ if (filter->rt_type && fa->fa_type != filter->rt_type)
+ goto next;
+
+ if ((filter->protocol &&
+ fa->fa_info->fib_protocol != filter->protocol))
+ goto next;
+
+ if (filter->dev &&
+ !fib_info_nh_uses_dev(fa->fa_info, filter->dev))
+ goto next;
}
err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
tb->tb_id, fa->fa_type,
xkey, KEYLENGTH - fa->fa_slen,
- fa->fa_tos, fa->fa_info, NLM_F_MULTI);
+ fa->fa_tos, fa->fa_info, flags);
if (err < 0) {
cb->args[4] = i;
return err;
}
+next:
i++;
}
@@ -2044,7 +2059,7 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
/* rcu_read_lock needs to be hold by caller from readside */
int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
- struct netlink_callback *cb)
+ struct netlink_callback *cb, struct fib_dump_filter *filter)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *l, *tp = t->kv;
@@ -2057,7 +2072,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
int err;
- err = fn_trie_dump_leaf(l, tb, skb, cb);
+ err = fn_trie_dump_leaf(l, tb, skb, cb, filter);
if (err < 0) {
cb->args[3] = key;
cb->args[2] = count;
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index b798862b6be5..7efe740c06eb 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -86,13 +86,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
options = (__be32 *)(greh + 1);
if (greh->flags & GRE_CSUM) {
- if (skb_checksum_simple_validate(skb)) {
+ if (!skb_checksum_simple_validate(skb)) {
+ skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
+ null_compute_pseudo);
+ } else if (csum_err) {
*csum_err = true;
return -EINVAL;
}
- skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
- null_compute_pseudo);
options++;
}
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 695979b7ef6d..d832beed6e3a 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1098,9 +1098,9 @@ void icmp_err(struct sk_buff *skb, u32 info)
}
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
- ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ICMP, 0);
+ ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ICMP);
else if (type == ICMP_REDIRECT)
- ipv4_redirect(skb, net, 0, 0, IPPROTO_ICMP, 0);
+ ipv4_redirect(skb, net, 0, IPPROTO_ICMP);
}
/*
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index dfd5009f96ef..15e7f7915a21 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -544,7 +544,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
struct ip_options_rcu *opt;
struct rtable *rt;
- opt = ireq_opt_deref(ireq);
+ rcu_read_lock();
+ opt = rcu_dereference(ireq->ireq_opt);
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
@@ -558,11 +559,13 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
goto no_route;
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
goto route_err;
+ rcu_read_unlock();
return &rt->dst;
route_err:
ip_rt_put(rt);
no_route:
+ rcu_read_unlock();
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index e7227128df2c..9b0158fa431f 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -260,8 +260,7 @@ out:
spin_unlock(&qp->q.lock);
out_rcu_unlock:
rcu_read_unlock();
- if (head)
- kfree_skb(head);
+ kfree_skb(head);
ipq_put(qp);
}
@@ -382,7 +381,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
*/
if (end < qp->q.len ||
((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
- goto err;
+ goto discard_qp;
qp->q.flags |= INET_FRAG_LAST_IN;
qp->q.len = end;
} else {
@@ -394,20 +393,20 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
if (end > qp->q.len) {
/* Some bits beyond end -> corruption. */
if (qp->q.flags & INET_FRAG_LAST_IN)
- goto err;
+ goto discard_qp;
qp->q.len = end;
}
}
if (end == offset)
- goto err;
+ goto discard_qp;
err = -ENOMEM;
if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
- goto err;
+ goto discard_qp;
err = pskb_trim_rcsum(skb, end - offset);
if (err)
- goto err;
+ goto discard_qp;
/* Note : skb->rbnode and skb->dev share the same location. */
dev = skb->dev;
@@ -423,6 +422,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
* We do the same here for IPv4 (and increment an snmp counter).
*/
+ err = -EINVAL;
/* Find out where to put this fragment. */
prev_tail = qp->q.fragments_tail;
if (!prev_tail)
@@ -431,7 +431,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
/* This is the common case: skb goes to the end. */
/* Detect and discard overlaps. */
if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
- goto discard_qp;
+ goto overlap;
if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
ip4_frag_append_to_last_run(&qp->q, skb);
else
@@ -450,7 +450,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
FRAG_CB(skb1)->frag_run_len)
rbn = &parent->rb_right;
else /* Found an overlap with skb1. */
- goto discard_qp;
+ goto overlap;
} while (*rbn);
/* Here we have parent properly set, and rbn pointing to
* one of its NULL left/right children. Insert skb.
@@ -487,16 +487,18 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
skb->_skb_refdst = 0UL;
err = ip_frag_reasm(qp, skb, prev_tail, dev);
skb->_skb_refdst = orefdst;
+ if (err)
+ inet_frag_kill(&qp->q);
return err;
}
skb_dst_drop(skb);
return -EINPROGRESS;
+overlap:
+ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
discard_qp:
inet_frag_kill(&qp->q);
- err = -EINVAL;
- __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
err:
kfree_skb(skb);
return err;
@@ -621,7 +623,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
sub_frag_mem_limit(qp->q.net, head->truesize);
*nextp = NULL;
- head->next = NULL;
+ skb_mark_not_on_list(head);
head->prev = NULL;
head->dev = dev;
head->tstamp = qp->q.stamp;
@@ -820,7 +822,6 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
table[0].data = &net->ipv4.frags.high_thresh;
table[0].extra1 = &net->ipv4.frags.low_thresh;
- table[0].extra2 = &init_net.ipv4.frags.high_thresh;
table[1].data = &net->ipv4.frags.low_thresh;
table[1].extra2 = &net->ipv4.frags.high_thresh;
table[2].data = &net->ipv4.frags.timeout;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 8cce0e9ea08c..38befe829caf 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -232,22 +232,19 @@ static void gre_err(struct sk_buff *skb, u32 info)
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct tnl_ptk_info tpi;
- bool csum_err = false;
- if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
- iph->ihl * 4) < 0) {
- if (!csum_err) /* ignore csum errors. */
- return;
- }
+ if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
+ iph->ihl * 4) < 0)
+ return;
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
- skb->dev->ifindex, 0, IPPROTO_GRE, 0);
+ skb->dev->ifindex, IPPROTO_GRE);
return;
}
if (type == ICMP_REDIRECT) {
- ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
- IPPROTO_GRE, 0);
+ ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
+ IPPROTO_GRE);
return;
}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 3196cf58f418..35a786c0aaa0 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -531,11 +531,7 @@ static void ip_sublist_rcv_finish(struct list_head *head)
struct sk_buff *skb, *next;
list_for_each_entry_safe(skb, next, head, list) {
- list_del(&skb->list);
- /* Handle ip{6}_forward case, as sch_direct_xmit have
- * another kind of SKB-list usage (see validate_xmit_skb_list)
- */
- skb->next = NULL;
+ skb_list_del_init(skb);
dst_input(skb);
}
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9c4e72e9c60a..c09219e7f230 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -278,7 +278,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
struct sk_buff *nskb = segs->next;
int err;
- segs->next = NULL;
+ skb_mark_not_on_list(segs);
err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
if (err && ret == 0)
@@ -684,7 +684,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb = frag;
frag = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
}
if (err == 0) {
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index c0fe5ad996f2..26c36cccabdc 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -149,7 +149,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
struct sockaddr_in sin;
- const struct iphdr *iph = ip_hdr(skb);
__be16 *ports;
int end;
@@ -164,7 +163,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
ports = (__be16 *)skb_transport_header(skb);
sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = iph->daddr;
+ sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
sin.sin_port = ports[1];
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c4f5602308ed..284a22154b4e 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -627,6 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, u8 protocol)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ unsigned int inner_nhdr_len = 0;
const struct iphdr *inner_iph;
struct flowi4 fl4;
u8 tos, ttl;
@@ -636,6 +637,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
__be32 dst;
bool connected;
+ /* ensure we can access the inner net header, for several users below */
+ if (skb->protocol == htons(ETH_P_IP))
+ inner_nhdr_len = sizeof(struct iphdr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ inner_nhdr_len = sizeof(struct ipv6hdr);
+ if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
+ goto tx_error;
+
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index f38cb21d773d..de31b302d69c 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -318,9 +318,9 @@ static int vti4_err(struct sk_buff *skb, u32 info)
return 0;
if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
- ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0);
+ ipv4_update_pmtu(skb, net, info, 0, protocol);
else
- ipv4_redirect(skb, net, 0, 0, protocol, 0);
+ ipv4_redirect(skb, net, 0, protocol);
xfrm_state_put(x);
return 0;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index d97f4f2787f5..9119d012ba46 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -48,9 +48,9 @@ static int ipcomp4_err(struct sk_buff *skb, u32 info)
return 0;
if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
- ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
+ ipv4_update_pmtu(skb, net, info, 0, IPPROTO_COMP);
else
- ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
+ ipv4_redirect(skb, net, 0, IPPROTO_COMP);
xfrm_state_put(x);
return 0;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index c891235b4966..e65287c27e3d 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -175,13 +175,12 @@ static int ipip_err(struct sk_buff *skb, u32 info)
}
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
- ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
- iph->protocol, 0);
+ ipv4_update_pmtu(skb, net, info, t->parms.link, iph->protocol);
goto out;
}
if (type == ICMP_REDIRECT) {
- ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
+ ipv4_redirect(skb, net, t->parms.link, iph->protocol);
goto out;
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 5660adcf7a04..7a3e2acda94c 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2527,8 +2527,31 @@ errout_free:
static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct fib_dump_filter filter = {};
+ int err;
+
+ if (cb->strict_check) {
+ err = ip_valid_fib_dump_req(sock_net(skb->sk), cb->nlh,
+ &filter, cb);
+ if (err < 0)
+ return err;
+ }
+
+ if (filter.table_id) {
+ struct mr_table *mrt;
+
+ mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
+ if (!mrt) {
+ NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
+ return -ENOENT;
+ }
+ err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute,
+ &mfc_unres_lock, &filter);
+ return skb->len ? : err;
+ }
+
return mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter,
- _ipmr_fill_mroute, &mfc_unres_lock);
+ _ipmr_fill_mroute, &mfc_unres_lock, &filter);
}
static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
@@ -2710,6 +2733,31 @@ static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
return true;
}
+static int ipmr_valid_dumplink(const struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct ifinfomsg *ifm;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+ NL_SET_ERR_MSG(extack, "ipv4: Invalid header for ipmr link dump");
+ return -EINVAL;
+ }
+
+ if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
+ NL_SET_ERR_MSG(extack, "Invalid data after header in ipmr link dump");
+ return -EINVAL;
+ }
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
+ ifm->ifi_change || ifm->ifi_index) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for ipmr link dump request");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
@@ -2718,6 +2766,13 @@ static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
unsigned int e = 0, s_e;
struct mr_table *mrt;
+ if (cb->strict_check) {
+ int err = ipmr_valid_dumplink(cb->nlh, cb->extack);
+
+ if (err < 0)
+ return err;
+ }
+
s_t = cb->args[0];
s_e = cb->args[1];
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index 1ad9aa62a97b..3e614cc824f7 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -268,6 +268,81 @@ int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
}
EXPORT_SYMBOL(mr_fill_mroute);
+static bool mr_mfc_uses_dev(const struct mr_table *mrt,
+ const struct mr_mfc *c,
+ const struct net_device *dev)
+{
+ int ct;
+
+ for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
+ if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
+ const struct vif_device *vif;
+
+ vif = &mrt->vif_table[ct];
+ if (vif->dev == dev)
+ return true;
+ }
+ }
+ return false;
+}
+
+int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ int (*fill)(struct mr_table *mrt, struct sk_buff *skb,
+ u32 portid, u32 seq, struct mr_mfc *c,
+ int cmd, int flags),
+ spinlock_t *lock, struct fib_dump_filter *filter)
+{
+ unsigned int e = 0, s_e = cb->args[1];
+ unsigned int flags = NLM_F_MULTI;
+ struct mr_mfc *mfc;
+ int err;
+
+ if (filter->filter_set)
+ flags |= NLM_F_DUMP_FILTERED;
+
+ list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
+ if (e < s_e)
+ goto next_entry;
+ if (filter->dev &&
+ !mr_mfc_uses_dev(mrt, mfc, filter->dev))
+ goto next_entry;
+
+ err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
+ if (err < 0)
+ goto out;
+next_entry:
+ e++;
+ }
+
+ spin_lock_bh(lock);
+ list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
+ if (e < s_e)
+ goto next_entry2;
+ if (filter->dev &&
+ !mr_mfc_uses_dev(mrt, mfc, filter->dev))
+ goto next_entry2;
+
+ err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
+ if (err < 0) {
+ spin_unlock_bh(lock);
+ goto out;
+ }
+next_entry2:
+ e++;
+ }
+ spin_unlock_bh(lock);
+ err = 0;
+ e = 0;
+
+out:
+ cb->args[1] = e;
+ return err;
+}
+EXPORT_SYMBOL(mr_table_dump);
+
int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct mr_table *(*iter)(struct net *net,
struct mr_table *mrt),
@@ -275,53 +350,35 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct sk_buff *skb,
u32 portid, u32 seq, struct mr_mfc *c,
int cmd, int flags),
- spinlock_t *lock)
+ spinlock_t *lock, struct fib_dump_filter *filter)
{
- unsigned int t = 0, e = 0, s_t = cb->args[0], s_e = cb->args[1];
+ unsigned int t = 0, s_t = cb->args[0];
struct net *net = sock_net(skb->sk);
struct mr_table *mrt;
- struct mr_mfc *mfc;
+ int err;
+
+ /* multicast does not track protocol or have route type other
+ * than RTN_MULTICAST
+ */
+ if (filter->filter_set) {
+ if (filter->protocol || filter->flags ||
+ (filter->rt_type && filter->rt_type != RTN_MULTICAST))
+ return skb->len;
+ }
rcu_read_lock();
for (mrt = iter(net, NULL); mrt; mrt = iter(net, mrt)) {
if (t < s_t)
goto next_table;
- list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
- if (e < s_e)
- goto next_entry;
- if (fill(mrt, skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, mfc,
- RTM_NEWROUTE, NLM_F_MULTI) < 0)
- goto done;
-next_entry:
- e++;
- }
- e = 0;
- s_e = 0;
-
- spin_lock_bh(lock);
- list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
- if (e < s_e)
- goto next_entry2;
- if (fill(mrt, skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, mfc,
- RTM_NEWROUTE, NLM_F_MULTI) < 0) {
- spin_unlock_bh(lock);
- goto done;
- }
-next_entry2:
- e++;
- }
- spin_unlock_bh(lock);
- e = 0;
- s_e = 0;
+
+ err = mr_table_dump(mrt, skb, cb, fill, lock, filter);
+ if (err < 0)
+ break;
next_table:
t++;
}
-done:
rcu_read_unlock();
- cb->args[1] = e;
cb->args[0] = t;
return skb->len;
diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
index 04311f7067e2..6d218f5a2e71 100644
--- a/net/ipv4/metrics.c
+++ b/net/ipv4/metrics.c
@@ -5,8 +5,8 @@
#include <net/net_namespace.h>
#include <net/tcp.h>
-int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len,
- u32 *metrics)
+static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
+ int fc_mx_len, u32 *metrics)
{
bool ecn_ca = false;
struct nlattr *nla;
@@ -52,4 +52,28 @@ int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len,
return 0;
}
-EXPORT_SYMBOL_GPL(ip_metrics_convert);
+
+struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+ int fc_mx_len)
+{
+ struct dst_metrics *fib_metrics;
+ int err;
+
+ if (!fc_mx)
+ return (struct dst_metrics *)&dst_default_metrics;
+
+ fib_metrics = kzalloc(sizeof(*fib_metrics), GFP_KERNEL);
+ if (unlikely(!fib_metrics))
+ return ERR_PTR(-ENOMEM);
+
+ err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics);
+ if (!err) {
+ refcount_set(&fib_metrics->refcnt, 1);
+ } else {
+ kfree(fib_metrics);
+ fib_metrics = ERR_PTR(err);
+ }
+
+ return fib_metrics;
+}
+EXPORT_SYMBOL_GPL(ip_fib_metrics_init);
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 12843c9ef142..0b10d8812828 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -36,7 +36,6 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
const struct net_device *dev, u8 flags)
{
struct fib_result res;
- bool dev_match;
int ret __maybe_unused;
if (fib_lookup(net, fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE))
@@ -46,21 +45,7 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
if (res.type != RTN_LOCAL || !(flags & XT_RPFILTER_ACCEPT_LOCAL))
return false;
}
- dev_match = false;
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
- for (ret = 0; ret < res.fi->fib_nhs; ret++) {
- struct fib_nh *nh = &res.fi->fib_nh[ret];
-
- if (nh->nh_dev == dev) {
- dev_match = true;
- break;
- }
- }
-#else
- if (FIB_RES_DEV(res) == dev)
- dev_match = true;
-#endif
- return dev_match || flags & XT_RPFILTER_LOOSE;
+ return fib_info_nh_uses_dev(res.fi, dev) || flags & XT_RPFILTER_LOOSE;
}
static bool
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index 6115bf1ff6f0..78a67f961d86 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -264,7 +264,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
return nf_nat_inet_fn(priv, skb, state);
}
-EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn);
static unsigned int
nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
index ad3aeff152ed..a9d5e013e555 100644
--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
@@ -104,12 +104,26 @@ static int masq_device_event(struct notifier_block *this,
return NOTIFY_DONE;
}
+static int inet_cmp(struct nf_conn *ct, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct nf_conntrack_tuple *tuple;
+
+ if (!device_cmp(ct, (void *)(long)dev->ifindex))
+ return 0;
+
+ tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+ return ifa->ifa_address == tuple->dst.u3.ip;
+}
+
static int masq_inet_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
- struct netdev_notifier_info info;
+ struct net *net = dev_net(idev->dev);
/* The masq_dev_notifier will catch the case of the device going
* down. So if the inetdev is dead and being destroyed we have
@@ -119,8 +133,10 @@ static int masq_inet_event(struct notifier_block *this,
if (idev->dead)
return NOTIFY_DONE;
- netdev_notifier_info_init(&info, idev->dev);
- return masq_device_event(this, event, &info);
+ if (event == NETDEV_DOWN)
+ nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);
+
+ return NOTIFY_DONE;
}
static struct notifier_block masq_dev_notifier = {
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
index ac110c1d55b5..a0aa13bcabda 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
@@ -60,6 +60,7 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway");
MODULE_ALIAS("ip_nat_snmp_basic");
+MODULE_ALIAS_NFCT_HELPER("snmp_trap");
#define SNMP_PORT 161
#define SNMP_TRAP_PORT 162
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index e50976e3c213..94eb25bc8d7e 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -76,10 +76,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
.flowi4_iif = LOOPBACK_IFINDEX,
};
const struct net_device *oif;
- struct net_device *found;
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
- int i;
-#endif
+ const struct net_device *found;
/*
* Do not set flowi4_oif, it restricts results (for example, asking
@@ -146,25 +143,13 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
if (!oif) {
found = FIB_RES_DEV(res);
- goto ok;
- }
-
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
- for (i = 0; i < res.fi->fib_nhs; i++) {
- struct fib_nh *nh = &res.fi->fib_nh[i];
+ } else {
+ if (!fib_info_nh_uses_dev(res.fi, oif))
+ return;
- if (nh->nh_dev == oif) {
- found = nh->nh_dev;
- goto ok;
- }
+ found = oif;
}
- return;
-#else
- found = FIB_RES_DEV(res);
- if (found != oif)
- return;
-#endif
-ok:
+
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
*dest = found->ifindex;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 8d7aaf118a30..7ccb5f87f70b 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -779,7 +779,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
if (ipv4_is_multicast(daddr)) {
- if (!ipc.oif)
+ if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 33df4d76db2d..8ca3eb06ba04 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -608,7 +608,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
tos |= RTO_ONLINK;
if (ipv4_is_multicast(daddr)) {
- if (!ipc.oif)
+ if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index b678466da451..c0a9d26c06ce 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1001,21 +1001,22 @@ out: kfree_skb(skb);
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
+ u32 old_mtu = ipv4_mtu(dst);
struct fib_result res;
bool lock = false;
if (ip_mtu_locked(dst))
return;
- if (ipv4_mtu(dst) < mtu)
+ if (old_mtu < mtu)
return;
if (mtu < ip_rt_min_pmtu) {
lock = true;
- mtu = ip_rt_min_pmtu;
+ mtu = min(old_mtu, ip_rt_min_pmtu);
}
- if (rt->rt_pmtu == mtu &&
+ if (rt->rt_pmtu == mtu && !lock &&
time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
return;
@@ -1040,17 +1041,15 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
}
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
- int oif, u32 mark, u8 protocol, int flow_flags)
+ int oif, u8 protocol)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
-
- if (!mark)
- mark = IP4_REPLY_MARK(net, skb->mark);
+ u32 mark = IP4_REPLY_MARK(net, skb->mark);
__build_flow_key(net, &fl4, NULL, iph, oif,
- RT_TOS(iph->tos), protocol, mark, flow_flags);
+ RT_TOS(iph->tos), protocol, mark, 0);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_rt_update_pmtu(rt, &fl4, mtu);
@@ -1132,14 +1131,14 @@ out:
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
void ipv4_redirect(struct sk_buff *skb, struct net *net,
- int oif, u32 mark, u8 protocol, int flow_flags)
+ int oif, u8 protocol)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
__build_flow_key(net, &fl4, NULL, iph, oif,
- RT_TOS(iph->tos), protocol, mark, flow_flags);
+ RT_TOS(iph->tos), protocol, 0, 0);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_do_redirect(rt, skb, &fl4, false);
@@ -1219,18 +1218,15 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
src = ip_hdr(skb)->saddr;
else {
struct fib_result res;
- struct flowi4 fl4;
- struct iphdr *iph;
-
- iph = ip_hdr(skb);
-
- memset(&fl4, 0, sizeof(fl4));
- fl4.daddr = iph->daddr;
- fl4.saddr = iph->saddr;
- fl4.flowi4_tos = RT_TOS(iph->tos);
- fl4.flowi4_oif = rt->dst.dev->ifindex;
- fl4.flowi4_iif = skb->dev->ifindex;
- fl4.flowi4_mark = skb->mark;
+ struct iphdr *iph = ip_hdr(skb);
+ struct flowi4 fl4 = {
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .flowi4_tos = RT_TOS(iph->tos),
+ .flowi4_oif = rt->dst.dev->ifindex,
+ .flowi4_iif = skb->dev->ifindex,
+ .flowi4_mark = skb->mark,
+ };
rcu_read_lock();
if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
@@ -1481,12 +1477,9 @@ void rt_del_uncached_list(struct rtable *rt)
static void ipv4_dst_destroy(struct dst_entry *dst)
{
- struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
struct rtable *rt = (struct rtable *)dst;
- if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
- kfree(p);
-
+ ip_dst_metrics_put(dst);
rt_del_uncached_list(rt);
}
@@ -1533,11 +1526,8 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
rt->rt_gateway = nh->nh_gw;
rt->rt_uses_gateway = 1;
}
- dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
- if (fi->fib_metrics != &dst_default_metrics) {
- rt->dst._metrics |= DST_METRICS_REFCOUNTED;
- refcount_inc(&fi->fib_metrics->refcnt);
- }
+ ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
+
#ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid;
#endif
@@ -2785,7 +2775,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct rtable *rt = NULL;
struct sk_buff *skb;
struct rtmsg *rtm;
- struct flowi4 fl4;
+ struct flowi4 fl4 = {};
__be32 dst = 0;
__be32 src = 0;
kuid_t uid;
@@ -2825,7 +2815,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (!skb)
return -ENOBUFS;
- memset(&fl4, 0, sizeof(fl4));
fl4.daddr = dst;
fl4.saddr = src;
fl4.flowi4_tos = rtm->rtm_tos;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index c3387dfd725b..606f868d9f3f 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -88,7 +88,7 @@ u64 cookie_init_timestamp(struct request_sock *req)
ts <<= TSBITS;
ts |= options;
}
- return (u64)ts * (USEC_PER_SEC / TCP_TS_HZ);
+ return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b92f422f2fa8..891ed2f91467 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -48,6 +48,7 @@ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
static int comp_sack_nr_max = 255;
+static u32 u32_max_div_HZ = UINT_MAX / HZ;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
@@ -745,9 +746,10 @@ static struct ctl_table ipv4_net_table[] = {
{
.procname = "tcp_probe_interval",
.data = &init_net.ipv4.sysctl_tcp_probe_interval,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(u32),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_douintvec_minmax,
+ .extra2 = &u32_max_div_HZ,
},
{
.procname = "igmp_link_local_mcast_reports",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 10c6246396cc..1834818ed07b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -507,7 +507,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
const struct tcp_sock *tp = tcp_sk(sk);
int state;
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
state = inet_sk_state_load(sk);
if (state == TCP_LISTEN)
@@ -1295,7 +1295,7 @@ new_segment:
copy = size_goal;
/* All packets are restored as if they have
- * already been sent. skb_mstamp isn't set to
+ * already been sent. skb_mstamp_ns isn't set to
* avoid wrong rtt estimation.
*/
if (tp->repair)
@@ -1753,6 +1753,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
struct vm_area_struct *vma;
struct sk_buff *skb = NULL;
struct tcp_sock *tp;
+ int inq;
int ret;
if (address & (PAGE_SIZE - 1) || address != zc->address)
@@ -1773,12 +1774,15 @@ static int tcp_zerocopy_receive(struct sock *sk,
tp = tcp_sk(sk);
seq = tp->copied_seq;
- zc->length = min_t(u32, zc->length, tcp_inq(sk));
+ inq = tcp_inq(sk);
+ zc->length = min_t(u32, zc->length, inq);
zc->length &= ~(PAGE_SIZE - 1);
-
- zap_page_range(vma, address, zc->length);
-
- zc->recv_skip_hint = 0;
+ if (zc->length) {
+ zap_page_range(vma, address, zc->length);
+ zc->recv_skip_hint = 0;
+ } else {
+ zc->recv_skip_hint = inq;
+ }
ret = 0;
while (length + PAGE_SIZE <= zc->length) {
if (zc->recv_skip_hint < PAGE_SIZE) {
@@ -1801,8 +1805,17 @@ static int tcp_zerocopy_receive(struct sock *sk,
frags++;
}
}
- if (frags->size != PAGE_SIZE || frags->page_offset)
+ if (frags->size != PAGE_SIZE || frags->page_offset) {
+ int remaining = zc->recv_skip_hint;
+
+ while (remaining && (frags->size != PAGE_SIZE ||
+ frags->page_offset)) {
+ remaining -= frags->size;
+ frags++;
+ }
+ zc->recv_skip_hint -= remaining;
break;
+ }
ret = vm_insert_page(vma, address + length,
skb_frag_page(frags));
if (ret)
@@ -2403,16 +2416,10 @@ adjudge_to_death:
sock_hold(sk);
sock_orphan(sk);
- /* It is the last release_sock in its life. It will remove backlog. */
- release_sock(sk);
-
-
- /* Now socket is owned by kernel and we acquire BH lock
- * to finish close. No need to check for user refs.
- */
local_bh_disable();
bh_lock_sock(sk);
- WARN_ON(sock_owned_by_user(sk));
+ /* remove backlog if any, without releasing ownership. */
+ __release_sock(sk);
percpu_counter_inc(sk->sk_prot->orphan_count);
@@ -2481,6 +2488,7 @@ adjudge_to_death:
out:
bh_unlock_sock(sk);
local_bh_enable();
+ release_sock(sk);
sock_put(sk);
}
EXPORT_SYMBOL(tcp_close);
@@ -2595,6 +2603,8 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->compressed_ack = 0;
tp->bytes_sent = 0;
tp->bytes_retrans = 0;
+ tp->duplicate_sack[0].start_seq = 0;
+ tp->duplicate_sack[0].end_seq = 0;
tp->dsack_dups = 0;
tp->reord_seen = 0;
@@ -3101,10 +3111,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
{
const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
const struct inet_connection_sock *icsk = inet_csk(sk);
+ unsigned long rate;
u32 now;
u64 rate64;
bool slow;
- u32 rate;
memset(info, 0, sizeof(*info));
if (sk->sk_type != SOCK_STREAM)
@@ -3114,11 +3124,11 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
/* Report meaningful fields for all TCP states, including listeners */
rate = READ_ONCE(sk->sk_pacing_rate);
- rate64 = rate != ~0U ? rate : ~0ULL;
+ rate64 = (rate != ~0UL) ? rate : ~0ULL;
info->tcpi_pacing_rate = rate64;
rate = READ_ONCE(sk->sk_max_pacing_rate);
- rate64 = rate != ~0U ? rate : ~0ULL;
+ rate64 = (rate != ~0UL) ? rate : ~0ULL;
info->tcpi_max_pacing_rate = rate64;
info->tcpi_reordering = tp->reordering;
@@ -3244,8 +3254,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *stats;
struct tcp_info info;
+ unsigned long rate;
u64 rate64;
- u32 rate;
stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC);
if (!stats)
@@ -3264,7 +3274,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
tp->total_retrans, TCP_NLA_PAD);
rate = READ_ONCE(sk->sk_pacing_rate);
- rate64 = rate != ~0U ? rate : ~0ULL;
+ rate64 = (rate != ~0UL) ? rate : ~0ULL;
nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD);
rate64 = tcp_compute_delivery_rate(tp);
@@ -3894,8 +3904,8 @@ void __init tcp_init(void)
init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
init_net.ipv4.sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
- init_net.ipv4.sysctl_tcp_rmem[1] = 87380;
- init_net.ipv4.sysctl_tcp_rmem[2] = max(87380, max_rshare);
+ init_net.ipv4.sysctl_tcp_rmem[1] = 131072;
+ init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare);
pr_info("Hash tables configured (established %u bind %u)\n",
tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 02ff2dde9609..9277abdd822a 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -128,6 +128,9 @@ static const u32 bbr_probe_rtt_mode_ms = 200;
/* Skip TSO below the following bandwidth (bits/sec): */
static const int bbr_min_tso_rate = 1200000;
+/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck. */
+static const int bbr_pacing_margin_percent = 1;
+
/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
* that will allow a smoothly increasing pacing rate that will double each RTT
* and send the same number of packets per RTT that an un-paced, slow-starting
@@ -208,17 +211,15 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
{
unsigned int mss = tcp_sk(sk)->mss_cache;
- if (!tcp_needs_internal_pacing(sk))
- mss = tcp_mss_to_mtu(sk, mss);
rate *= mss;
rate *= gain;
rate >>= BBR_SCALE;
- rate *= USEC_PER_SEC;
+ rate *= USEC_PER_SEC / 100 * (100 - bbr_pacing_margin_percent);
return rate >> BW_SCALE;
}
/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
-static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
+static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
{
u64 rate = bw;
@@ -257,7 +258,7 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
- u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
+ unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
bbr_init_pacing_rate_from_rtt(sk);
@@ -279,7 +280,7 @@ static u32 bbr_tso_segs_goal(struct sock *sk)
/* Sort of tcp_tso_autosize() but ignoring
* driver provided sk_gso_max_size.
*/
- bytes = min_t(u32, sk->sk_pacing_rate >> sk->sk_pacing_shift,
+ bytes = min_t(unsigned long, sk->sk_pacing_rate >> sk->sk_pacing_shift,
GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
@@ -368,6 +369,39 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
return cwnd;
}
+/* With pacing at lower layers, there's often less data "in the network" than
+ * "in flight". With TSQ and departure time pacing at lower layers (e.g. fq),
+ * we often have several skbs queued in the pacing layer with a pre-scheduled
+ * earliest departure time (EDT). BBR adapts its pacing rate based on the
+ * inflight level that it estimates has already been "baked in" by previous
+ * departure time decisions. We calculate a rough estimate of the number of our
+ * packets that might be in the network at the earliest departure time for the
+ * next skb scheduled:
+ * in_network_at_edt = inflight_at_edt - (EDT - now) * bw
+ * If we're increasing inflight, then we want to know if the transmit of the
+ * EDT skb will push inflight above the target, so inflight_at_edt includes
+ * bbr_tso_segs_goal() from the skb departing at EDT. If decreasing inflight,
+ * then estimate if inflight will sink too low just before the EDT transmit.
+ */
+static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u64 now_ns, edt_ns, interval_us;
+ u32 interval_delivered, inflight_at_edt;
+
+ now_ns = tp->tcp_clock_cache;
+ edt_ns = max(tp->tcp_wstamp_ns, now_ns);
+ interval_us = div_u64(edt_ns - now_ns, NSEC_PER_USEC);
+ interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE;
+ inflight_at_edt = inflight_now;
+ if (bbr->pacing_gain > BBR_UNIT) /* increasing inflight */
+ inflight_at_edt += bbr_tso_segs_goal(sk); /* include EDT skb */
+ if (interval_delivered >= inflight_at_edt)
+ return 0;
+ return inflight_at_edt - interval_delivered;
+}
+
/* An optimization in BBR to reduce losses: On the first round of recovery, we
* follow the packet conservation principle: send P packets per P packets acked.
* After that, we slow-start and send at most 2*P packets per P packets acked.
@@ -459,7 +493,7 @@ static bool bbr_is_next_cycle_phase(struct sock *sk,
if (bbr->pacing_gain == BBR_UNIT)
return is_full_length; /* just use wall clock time */
- inflight = rs->prior_in_flight; /* what was in-flight before ACK? */
+ inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
bw = bbr_max_bw(sk);
/* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at
@@ -487,8 +521,6 @@ static void bbr_advance_cycle_phase(struct sock *sk)
bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
bbr->cycle_mstamp = tp->delivered_mstamp;
- bbr->pacing_gain = bbr->lt_use_bw ? BBR_UNIT :
- bbr_pacing_gain[bbr->cycle_idx];
}
/* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
@@ -506,8 +538,6 @@ static void bbr_reset_startup_mode(struct sock *sk)
struct bbr *bbr = inet_csk_ca(sk);
bbr->mode = BBR_STARTUP;
- bbr->pacing_gain = bbr_high_gain;
- bbr->cwnd_gain = bbr_high_gain;
}
static void bbr_reset_probe_bw_mode(struct sock *sk)
@@ -515,8 +545,6 @@ static void bbr_reset_probe_bw_mode(struct sock *sk)
struct bbr *bbr = inet_csk_ca(sk);
bbr->mode = BBR_PROBE_BW;
- bbr->pacing_gain = BBR_UNIT;
- bbr->cwnd_gain = bbr_cwnd_gain;
bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */
}
@@ -734,13 +762,11 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
bbr->mode = BBR_DRAIN; /* drain queue we created */
- bbr->pacing_gain = bbr_drain_gain; /* pace slow to drain */
- bbr->cwnd_gain = bbr_high_gain; /* maintain cwnd */
tcp_sk(sk)->snd_ssthresh =
bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT);
} /* fall through to check if in-flight is already small: */
if (bbr->mode == BBR_DRAIN &&
- tcp_packets_in_flight(tcp_sk(sk)) <=
+ bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT))
bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
}
@@ -797,8 +823,6 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
!bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */
- bbr->pacing_gain = BBR_UNIT;
- bbr->cwnd_gain = BBR_UNIT;
bbr_save_cwnd(sk); /* note cwnd so we can restore it */
bbr->probe_rtt_done_stamp = 0;
}
@@ -826,6 +850,35 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
bbr->idle_restart = 0;
}
+static void bbr_update_gains(struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ switch (bbr->mode) {
+ case BBR_STARTUP:
+ bbr->pacing_gain = bbr_high_gain;
+ bbr->cwnd_gain = bbr_high_gain;
+ break;
+ case BBR_DRAIN:
+ bbr->pacing_gain = bbr_drain_gain; /* slow, to drain */
+ bbr->cwnd_gain = bbr_high_gain; /* keep cwnd */
+ break;
+ case BBR_PROBE_BW:
+ bbr->pacing_gain = (bbr->lt_use_bw ?
+ BBR_UNIT :
+ bbr_pacing_gain[bbr->cycle_idx]);
+ bbr->cwnd_gain = bbr_cwnd_gain;
+ break;
+ case BBR_PROBE_RTT:
+ bbr->pacing_gain = BBR_UNIT;
+ bbr->cwnd_gain = BBR_UNIT;
+ break;
+ default:
+ WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode);
+ break;
+ }
+}
+
static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
{
bbr_update_bw(sk, rs);
@@ -833,6 +886,7 @@ static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
bbr_check_full_bw_reached(sk, rs);
bbr_check_drain(sk, rs);
bbr_update_min_rtt(sk, rs);
+ bbr_update_gains(sk);
}
static void bbr_main(struct sock *sk, const struct rate_sample *rs)
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
new file mode 100644
index 000000000000..b7918d4caa30
--- /dev/null
+++ b/net/ipv4/tcp_bpf.c
@@ -0,0 +1,668 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
+
+#include <linux/skmsg.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+
+#include <net/inet_common.h>
+
+static bool tcp_bpf_stream_read(const struct sock *sk)
+{
+ struct sk_psock *psock;
+ bool empty = true;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (likely(psock))
+ empty = list_empty(&psock->ingress_msg);
+ rcu_read_unlock();
+ return !empty;
+}
+
+static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock,
+ int flags, long timeo, int *err)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret;
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ ret = sk_wait_event(sk, &timeo,
+ !list_empty(&psock->ingress_msg) ||
+ !skb_queue_empty(&sk->sk_receive_queue), &wait);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return ret;
+}
+
+int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
+ struct msghdr *msg, int len, int flags)
+{
+ struct iov_iter *iter = &msg->msg_iter;
+ int peek = flags & MSG_PEEK;
+ int i, ret, copied = 0;
+ struct sk_msg *msg_rx;
+
+ msg_rx = list_first_entry_or_null(&psock->ingress_msg,
+ struct sk_msg, list);
+
+ while (copied != len) {
+ struct scatterlist *sge;
+
+ if (unlikely(!msg_rx))
+ break;
+
+ i = msg_rx->sg.start;
+ do {
+ struct page *page;
+ int copy;
+
+ sge = sk_msg_elem(msg_rx, i);
+ copy = sge->length;
+ page = sg_page(sge);
+ if (copied + copy > len)
+ copy = len - copied;
+ ret = copy_page_to_iter(page, sge->offset, copy, iter);
+ if (ret != copy) {
+ msg_rx->sg.start = i;
+ return -EFAULT;
+ }
+
+ copied += copy;
+ if (likely(!peek)) {
+ sge->offset += copy;
+ sge->length -= copy;
+ sk_mem_uncharge(sk, copy);
+ msg_rx->sg.size -= copy;
+
+ if (!sge->length) {
+ sk_msg_iter_var_next(i);
+ if (!msg_rx->skb)
+ put_page(page);
+ }
+ } else {
+ sk_msg_iter_var_next(i);
+ }
+
+ if (copied == len)
+ break;
+ } while (i != msg_rx->sg.end);
+
+ if (unlikely(peek)) {
+ msg_rx = list_next_entry(msg_rx, list);
+ continue;
+ }
+
+ msg_rx->sg.start = i;
+ if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
+ list_del(&msg_rx->list);
+ if (msg_rx->skb)
+ consume_skb(msg_rx->skb);
+ kfree(msg_rx);
+ }
+ msg_rx = list_first_entry_or_null(&psock->ingress_msg,
+ struct sk_msg, list);
+ }
+
+ return copied;
+}
+EXPORT_SYMBOL_GPL(__tcp_bpf_recvmsg);
+
+int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags, int *addr_len)
+{
+ struct sk_psock *psock;
+ int copied, ret;
+
+ if (unlikely(flags & MSG_ERRQUEUE))
+ return inet_recv_error(sk, msg, len, addr_len);
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+
+ psock = sk_psock_get(sk);
+ if (unlikely(!psock))
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ lock_sock(sk);
+msg_bytes_ready:
+ copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
+ if (!copied) {
+ int data, err = 0;
+ long timeo;
+
+ timeo = sock_rcvtimeo(sk, nonblock);
+ data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err);
+ if (data) {
+ if (skb_queue_empty(&sk->sk_receive_queue))
+ goto msg_bytes_ready;
+ release_sock(sk);
+ sk_psock_put(sk, psock);
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ }
+ if (err) {
+ ret = err;
+ goto out;
+ }
+ }
+ ret = copied;
+out:
+ release_sock(sk);
+ sk_psock_put(sk, psock);
+ return ret;
+}
+
+static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
+ struct sk_msg *msg, u32 apply_bytes, int flags)
+{
+ bool apply = apply_bytes;
+ struct scatterlist *sge;
+ u32 size, copied = 0;
+ struct sk_msg *tmp;
+ int i, ret = 0;
+
+ tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL);
+ if (unlikely(!tmp))
+ return -ENOMEM;
+
+ lock_sock(sk);
+ tmp->sg.start = msg->sg.start;
+ i = msg->sg.start;
+ do {
+ sge = sk_msg_elem(msg, i);
+ size = (apply && apply_bytes < sge->length) ?
+ apply_bytes : sge->length;
+ if (!sk_wmem_schedule(sk, size)) {
+ if (!copied)
+ ret = -ENOMEM;
+ break;
+ }
+
+ sk_mem_charge(sk, size);
+ sk_msg_xfer(tmp, msg, i, size);
+ copied += size;
+ if (sge->length)
+ get_page(sk_msg_page(tmp, i));
+ sk_msg_iter_var_next(i);
+ tmp->sg.end = i;
+ if (apply) {
+ apply_bytes -= size;
+ if (!apply_bytes)
+ break;
+ }
+ } while (i != msg->sg.end);
+
+ if (!ret) {
+ msg->sg.start = i;
+ msg->sg.size -= apply_bytes;
+ sk_psock_queue_msg(psock, tmp);
+ sk->sk_data_ready(sk);
+ } else {
+ sk_msg_free(sk, tmp);
+ kfree(tmp);
+ }
+
+ release_sock(sk);
+ return ret;
+}
+
+static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
+ int flags, bool uncharge)
+{
+ bool apply = apply_bytes;
+ struct scatterlist *sge;
+ struct page *page;
+ int size, ret = 0;
+ u32 off;
+
+ while (1) {
+ sge = sk_msg_elem(msg, msg->sg.start);
+ size = (apply && apply_bytes < sge->length) ?
+ apply_bytes : sge->length;
+ off = sge->offset;
+ page = sg_page(sge);
+
+ tcp_rate_check_app_limited(sk);
+retry:
+ ret = do_tcp_sendpages(sk, page, off, size, flags);
+ if (ret <= 0)
+ return ret;
+ if (apply)
+ apply_bytes -= ret;
+ msg->sg.size -= ret;
+ sge->offset += ret;
+ sge->length -= ret;
+ if (uncharge)
+ sk_mem_uncharge(sk, ret);
+ if (ret != size) {
+ size -= ret;
+ off += ret;
+ goto retry;
+ }
+ if (!sge->length) {
+ put_page(page);
+ sk_msg_iter_next(msg, start);
+ sg_init_table(sge, 1);
+ if (msg->sg.start == msg->sg.end)
+ break;
+ }
+ if (apply && !apply_bytes)
+ break;
+ }
+
+ return 0;
+}
+
+static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
+ u32 apply_bytes, int flags, bool uncharge)
+{
+ int ret;
+
+ lock_sock(sk);
+ ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
+ release_sock(sk);
+ return ret;
+}
+
+int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
+ u32 bytes, int flags)
+{
+ bool ingress = sk_msg_to_ingress(msg);
+ struct sk_psock *psock = sk_psock_get(sk);
+ int ret;
+
+ if (unlikely(!psock)) {
+ sk_msg_free(sk, msg);
+ return 0;
+ }
+ ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
+ tcp_bpf_push_locked(sk, msg, bytes, flags, false);
+ sk_psock_put(sk, psock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
+
+static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
+ struct sk_msg *msg, int *copied, int flags)
+{
+ bool cork = false, enospc = msg->sg.start == msg->sg.end;
+ struct sock *sk_redir;
+ u32 tosend;
+ int ret;
+
+more_data:
+ if (psock->eval == __SK_NONE)
+ psock->eval = sk_psock_msg_verdict(sk, psock, msg);
+
+ if (msg->cork_bytes &&
+ msg->cork_bytes > msg->sg.size && !enospc) {
+ psock->cork_bytes = msg->cork_bytes - msg->sg.size;
+ if (!psock->cork) {
+ psock->cork = kzalloc(sizeof(*psock->cork),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!psock->cork)
+ return -ENOMEM;
+ }
+ memcpy(psock->cork, msg, sizeof(*msg));
+ return 0;
+ }
+
+ tosend = msg->sg.size;
+ if (psock->apply_bytes && psock->apply_bytes < tosend)
+ tosend = psock->apply_bytes;
+
+ switch (psock->eval) {
+ case __SK_PASS:
+ ret = tcp_bpf_push(sk, msg, tosend, flags, true);
+ if (unlikely(ret)) {
+ *copied -= sk_msg_free(sk, msg);
+ break;
+ }
+ sk_msg_apply_bytes(psock, tosend);
+ break;
+ case __SK_REDIRECT:
+ sk_redir = psock->sk_redir;
+ sk_msg_apply_bytes(psock, tosend);
+ if (psock->cork) {
+ cork = true;
+ psock->cork = NULL;
+ }
+ sk_msg_return(sk, msg, tosend);
+ release_sock(sk);
+ ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
+ lock_sock(sk);
+ if (unlikely(ret < 0)) {
+ int free = sk_msg_free_nocharge(sk, msg);
+
+ if (!cork)
+ *copied -= free;
+ }
+ if (cork) {
+ sk_msg_free(sk, msg);
+ kfree(msg);
+ msg = NULL;
+ ret = 0;
+ }
+ break;
+ case __SK_DROP:
+ default:
+ sk_msg_free_partial(sk, msg, tosend);
+ sk_msg_apply_bytes(psock, tosend);
+ *copied -= tosend;
+ return -EACCES;
+ }
+
+ if (likely(!ret)) {
+ if (!psock->apply_bytes) {
+ psock->eval = __SK_NONE;
+ if (psock->sk_redir) {
+ sock_put(psock->sk_redir);
+ psock->sk_redir = NULL;
+ }
+ }
+ if (msg &&
+ msg->sg.data[msg->sg.start].page_link &&
+ msg->sg.data[msg->sg.start].length)
+ goto more_data;
+ }
+ return ret;
+}
+
+static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+{
+ struct sk_msg tmp, *msg_tx = NULL;
+ int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
+ int copied = 0, err = 0;
+ struct sk_psock *psock;
+ long timeo;
+
+ psock = sk_psock_get(sk);
+ if (unlikely(!psock))
+ return tcp_sendmsg(sk, msg, size);
+
+ lock_sock(sk);
+ timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ while (msg_data_left(msg)) {
+ bool enospc = false;
+ u32 copy, osize;
+
+ if (sk->sk_err) {
+ err = -sk->sk_err;
+ goto out_err;
+ }
+
+ copy = msg_data_left(msg);
+ if (!sk_stream_memory_free(sk))
+ goto wait_for_sndbuf;
+ if (psock->cork) {
+ msg_tx = psock->cork;
+ } else {
+ msg_tx = &tmp;
+ sk_msg_init(msg_tx);
+ }
+
+ osize = msg_tx->sg.size;
+ err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1);
+ if (err) {
+ if (err != -ENOSPC)
+ goto wait_for_memory;
+ enospc = true;
+ copy = msg_tx->sg.size - osize;
+ }
+
+ err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
+ copy);
+ if (err < 0) {
+ sk_msg_trim(sk, msg_tx, osize);
+ goto out_err;
+ }
+
+ copied += copy;
+ if (psock->cork_bytes) {
+ if (size > psock->cork_bytes)
+ psock->cork_bytes = 0;
+ else
+ psock->cork_bytes -= size;
+ if (psock->cork_bytes && !enospc)
+ goto out_err;
+ /* All cork bytes are accounted, rerun the prog. */
+ psock->eval = __SK_NONE;
+ psock->cork_bytes = 0;
+ }
+
+ err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags);
+ if (unlikely(err < 0))
+ goto out_err;
+ continue;
+wait_for_sndbuf:
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+wait_for_memory:
+ err = sk_stream_wait_memory(sk, &timeo);
+ if (err) {
+ if (msg_tx && msg_tx != psock->cork)
+ sk_msg_free(sk, msg_tx);
+ goto out_err;
+ }
+ }
+out_err:
+ if (err < 0)
+ err = sk_stream_error(sk, msg->msg_flags, err);
+ release_sock(sk);
+ sk_psock_put(sk, psock);
+ return copied ? copied : err;
+}
+
+static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags)
+{
+ struct sk_msg tmp, *msg = NULL;
+ int err = 0, copied = 0;
+ struct sk_psock *psock;
+ bool enospc = false;
+
+ psock = sk_psock_get(sk);
+ if (unlikely(!psock))
+ return tcp_sendpage(sk, page, offset, size, flags);
+
+ lock_sock(sk);
+ if (psock->cork) {
+ msg = psock->cork;
+ } else {
+ msg = &tmp;
+ sk_msg_init(msg);
+ }
+
+ /* Catch case where ring is full and sendpage is stalled. */
+ if (unlikely(sk_msg_full(msg)))
+ goto out_err;
+
+ sk_msg_page_add(msg, page, size, offset);
+ sk_mem_charge(sk, size);
+ copied = size;
+ if (sk_msg_full(msg))
+ enospc = true;
+ if (psock->cork_bytes) {
+ if (size > psock->cork_bytes)
+ psock->cork_bytes = 0;
+ else
+ psock->cork_bytes -= size;
+ if (psock->cork_bytes && !enospc)
+ goto out_err;
+ /* All cork bytes are accounted, rerun the prog. */
+ psock->eval = __SK_NONE;
+ psock->cork_bytes = 0;
+ }
+
+ err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags);
+out_err:
+ release_sock(sk);
+ sk_psock_put(sk, psock);
+ return copied ? copied : err;
+}
+
+static void tcp_bpf_remove(struct sock *sk, struct sk_psock *psock)
+{
+ struct sk_psock_link *link;
+
+ sk_psock_cork_free(psock);
+ __sk_psock_purge_ingress_msg(psock);
+ while ((link = sk_psock_link_pop(psock))) {
+ sk_psock_unlink(sk, link);
+ sk_psock_free_link(link);
+ }
+}
+
+static void tcp_bpf_unhash(struct sock *sk)
+{
+ void (*saved_unhash)(struct sock *sk);
+ struct sk_psock *psock;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ if (sk->sk_prot->unhash)
+ sk->sk_prot->unhash(sk);
+ return;
+ }
+
+ saved_unhash = psock->saved_unhash;
+ tcp_bpf_remove(sk, psock);
+ rcu_read_unlock();
+ saved_unhash(sk);
+}
+
+static void tcp_bpf_close(struct sock *sk, long timeout)
+{
+ void (*saved_close)(struct sock *sk, long timeout);
+ struct sk_psock *psock;
+
+ lock_sock(sk);
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ release_sock(sk);
+ return sk->sk_prot->close(sk, timeout);
+ }
+
+ saved_close = psock->saved_close;
+ tcp_bpf_remove(sk, psock);
+ rcu_read_unlock();
+ release_sock(sk);
+ saved_close(sk, timeout);
+}
+
+enum {
+ TCP_BPF_IPV4,
+ TCP_BPF_IPV6,
+ TCP_BPF_NUM_PROTS,
+};
+
+enum {
+ TCP_BPF_BASE,
+ TCP_BPF_TX,
+ TCP_BPF_NUM_CFGS,
+};
+
+static struct proto *tcpv6_prot_saved __read_mostly;
+static DEFINE_SPINLOCK(tcpv6_prot_lock);
+static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS];
+
+static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
+ struct proto *base)
+{
+ prot[TCP_BPF_BASE] = *base;
+ prot[TCP_BPF_BASE].unhash = tcp_bpf_unhash;
+ prot[TCP_BPF_BASE].close = tcp_bpf_close;
+ prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
+ prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read;
+
+ prot[TCP_BPF_TX] = prot[TCP_BPF_BASE];
+ prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg;
+ prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage;
+}
+
+static void tcp_bpf_check_v6_needs_rebuild(struct sock *sk, struct proto *ops)
+{
+ if (sk->sk_family == AF_INET6 &&
+ unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) {
+ spin_lock_bh(&tcpv6_prot_lock);
+ if (likely(ops != tcpv6_prot_saved)) {
+ tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops);
+ smp_store_release(&tcpv6_prot_saved, ops);
+ }
+ spin_unlock_bh(&tcpv6_prot_lock);
+ }
+}
+
+static int __init tcp_bpf_v4_build_proto(void)
+{
+ tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
+ return 0;
+}
+core_initcall(tcp_bpf_v4_build_proto);
+
+static void tcp_bpf_update_sk_prot(struct sock *sk, struct sk_psock *psock)
+{
+ int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
+ int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
+
+ sk_psock_update_proto(sk, psock, &tcp_bpf_prots[family][config]);
+}
+
+static void tcp_bpf_reinit_sk_prot(struct sock *sk, struct sk_psock *psock)
+{
+ int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
+ int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
+
+ /* Reinit occurs when program types change e.g. TCP_BPF_TX is removed
+ * or added requiring sk_prot hook updates. We keep original saved
+ * hooks in this case.
+ */
+ sk->sk_prot = &tcp_bpf_prots[family][config];
+}
+
+static int tcp_bpf_assert_proto_ops(struct proto *ops)
+{
+ /* In order to avoid retpoline, we make assumptions when we call
+ * into ops if e.g. a psock is not present. Make sure they are
+ * indeed valid assumptions.
+ */
+ return ops->recvmsg == tcp_recvmsg &&
+ ops->sendmsg == tcp_sendmsg &&
+ ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP;
+}
+
+void tcp_bpf_reinit(struct sock *sk)
+{
+ struct sk_psock *psock;
+
+ sock_owned_by_me(sk);
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ tcp_bpf_reinit_sk_prot(sk, psock);
+ rcu_read_unlock();
+}
+
+int tcp_bpf_init(struct sock *sk)
+{
+ struct proto *ops = READ_ONCE(sk->sk_prot);
+ struct sk_psock *psock;
+
+ sock_owned_by_me(sk);
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (unlikely(!psock || psock->sk_proto ||
+ tcp_bpf_assert_proto_ops(ops))) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ tcp_bpf_check_v6_needs_rebuild(sk, ops);
+ tcp_bpf_update_sk_prot(sk, psock);
+ rcu_read_unlock();
+ return 0;
+}
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 06fbe102a425..37eebd910396 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -146,7 +146,7 @@ static void tcp_cdg_hystart_update(struct sock *sk)
return;
if (hystart_detect & HYSTART_ACK_TRAIN) {
- u32 now_us = div_u64(local_clock(), NSEC_PER_USEC);
+ u32 now_us = tp->tcp_mstamp;
if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk)) {
ca->last_ack = now_us;
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index ca61e2a659e7..cd4814f7e962 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -44,6 +44,7 @@
#include <linux/mm.h>
#include <net/tcp.h>
#include <linux/inet_diag.h>
+#include "tcp_dctcp.h"
#define DCTCP_MAX_ALPHA 1024U
@@ -118,54 +119,6 @@ static u32 dctcp_ssthresh(struct sock *sk)
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
}
-/* Minimal DCTP CE state machine:
- *
- * S: 0 <- last pkt was non-CE
- * 1 <- last pkt was CE
- */
-
-static void dctcp_ce_state_0_to_1(struct sock *sk)
-{
- struct dctcp *ca = inet_csk_ca(sk);
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (!ca->ce_state) {
- /* State has changed from CE=0 to CE=1, force an immediate
- * ACK to reflect the new CE state. If an ACK was delayed,
- * send that first to reflect the prior CE state.
- */
- if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
- __tcp_send_ack(sk, ca->prior_rcv_nxt);
- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
- }
-
- ca->prior_rcv_nxt = tp->rcv_nxt;
- ca->ce_state = 1;
-
- tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
-}
-
-static void dctcp_ce_state_1_to_0(struct sock *sk)
-{
- struct dctcp *ca = inet_csk_ca(sk);
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (ca->ce_state) {
- /* State has changed from CE=1 to CE=0, force an immediate
- * ACK to reflect the new CE state. If an ACK was delayed,
- * send that first to reflect the prior CE state.
- */
- if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
- __tcp_send_ack(sk, ca->prior_rcv_nxt);
- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
- }
-
- ca->prior_rcv_nxt = tp->rcv_nxt;
- ca->ce_state = 0;
-
- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
-}
-
static void dctcp_update_alpha(struct sock *sk, u32 flags)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -230,12 +183,12 @@ static void dctcp_state(struct sock *sk, u8 new_state)
static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
{
+ struct dctcp *ca = inet_csk_ca(sk);
+
switch (ev) {
case CA_EVENT_ECN_IS_CE:
- dctcp_ce_state_0_to_1(sk);
- break;
case CA_EVENT_ECN_NO_CE:
- dctcp_ce_state_1_to_0(sk);
+ dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
break;
default:
/* Don't care for the rest. */
diff --git a/net/ipv4/tcp_dctcp.h b/net/ipv4/tcp_dctcp.h
new file mode 100644
index 000000000000..d69a77cbd0c7
--- /dev/null
+++ b/net/ipv4/tcp_dctcp.h
@@ -0,0 +1,40 @@
+#ifndef _TCP_DCTCP_H
+#define _TCP_DCTCP_H
+
+static inline void dctcp_ece_ack_cwr(struct sock *sk, u32 ce_state)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (ce_state == 1)
+ tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ else
+ tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+}
+
+/* Minimal DCTP CE state machine:
+ *
+ * S: 0 <- last pkt was non-CE
+ * 1 <- last pkt was CE
+ */
+static inline void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
+ u32 *prior_rcv_nxt, u32 *ce_state)
+{
+ u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
+
+ if (*ce_state != new_ce_state) {
+ /* CE state has changed, force an immediate ACK to
+ * reflect the new CE state. If an ACK was delayed,
+ * send that first to reflect the prior CE state.
+ */
+ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
+ dctcp_ece_ack_cwr(sk, *ce_state);
+ __tcp_send_ack(sk, *prior_rcv_nxt);
+ }
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+ }
+ *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
+ *ce_state = new_ce_state;
+ dctcp_ece_ack_cwr(sk, new_ce_state);
+}
+
+#endif
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4cf2f7bb2802..2868ef28ce52 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -426,26 +426,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
}
}
-/* 3. Tuning rcvbuf, when connection enters established state. */
-static void tcp_fixup_rcvbuf(struct sock *sk)
-{
- u32 mss = tcp_sk(sk)->advmss;
- int rcvmem;
-
- rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) *
- tcp_default_init_rwnd(mss);
-
- /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency
- * Allow enough cushion so that sender is not limited by our window
- */
- if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)
- rcvmem <<= 2;
-
- if (sk->sk_rcvbuf < rcvmem)
- sk->sk_rcvbuf = min(rcvmem, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
-}
-
-/* 4. Try to fixup all. It is made immediately after connection enters
+/* 3. Try to fixup all. It is made immediately after connection enters
* established state.
*/
void tcp_init_buffer_space(struct sock *sk)
@@ -454,12 +435,10 @@ void tcp_init_buffer_space(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
int maxwin;
- if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
- tcp_fixup_rcvbuf(sk);
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
tcp_sndbuf_expand(sk);
- tp->rcvq_space.space = tp->rcv_wnd;
+ tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss);
tcp_mstamp_refresh(tp);
tp->rcvq_space.time = tp->tcp_mstamp;
tp->rcvq_space.seq = tp->copied_seq;
@@ -485,7 +464,7 @@ void tcp_init_buffer_space(struct sock *sk)
tp->snd_cwnd_stamp = tcp_jiffies32;
}
-/* 5. Recalculate window clamp after socket hit its memory bounds. */
+/* 4. Recalculate window clamp after socket hit its memory bounds. */
static void tcp_clamp_window(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -1305,7 +1284,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
*/
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
start_seq, end_seq, dup_sack, pcount,
- skb->skb_mstamp);
+ tcp_skb_timestamp_us(skb));
tcp_rate_skb_delivered(sk, skb, state->rate);
if (skb == tp->lost_skb_hint)
@@ -1580,7 +1559,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
TCP_SKB_CB(skb)->end_seq,
dup_sack,
tcp_skb_pcount(skb),
- skb->skb_mstamp);
+ tcp_skb_timestamp_us(skb));
tcp_rate_skb_delivered(sk, skb, state->rate);
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
list_del_init(&skb->tcp_tsorted_anchor);
@@ -3000,8 +2979,8 @@ void tcp_rearm_rto(struct sock *sk)
*/
rto = usecs_to_jiffies(max_t(int, delta_us, 1));
}
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
- TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
+ TCP_RTO_MAX, tcp_rtx_queue_head(sk));
}
}
@@ -3103,7 +3082,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
tp->retrans_out -= acked_pcount;
flag |= FLAG_RETRANS_DATA_ACKED;
} else if (!(sacked & TCPCB_SACKED_ACKED)) {
- last_ackt = skb->skb_mstamp;
+ last_ackt = tcp_skb_timestamp_us(skb);
WARN_ON_ONCE(last_ackt == 0);
if (!first_ackt)
first_ackt = last_ackt;
@@ -3121,7 +3100,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
tp->delivered += acked_pcount;
if (!tcp_skb_spurious_retrans(tp, skb))
tcp_rack_advance(tp, sacked, scb->end_seq,
- skb->skb_mstamp);
+ tcp_skb_timestamp_us(skb));
}
if (sacked & TCPCB_LOST)
tp->lost_out -= acked_pcount;
@@ -3215,7 +3194,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
}
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
- sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) {
+ sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
+ tcp_skb_timestamp_us(skb))) {
/* Do not re-arm RTO if the sack RTT is measured from data sent
* after when the head was last (re)transmitted. Otherwise the
* timeout may continue to extend in loss recovery.
@@ -3275,8 +3255,8 @@ static void tcp_ack_probe(struct sock *sk)
} else {
unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- when, TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
+ when, TCP_RTO_MAX, NULL);
}
}
@@ -4199,6 +4179,17 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
}
+static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
+{
+ /* When the ACK path fails or drops most ACKs, the sender would
+ * timeout and spuriously retransmit the same segment repeatedly.
+ * The receiver remembers and reflects via DSACKs. Leverage the
+ * DSACK state and change the txhash to re-route speculatively.
+ */
+ if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq)
+ sk_rethink_txhash(sk);
+}
+
static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -4211,6 +4202,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
+ tcp_rcv_spurious_retrans(sk, skb);
if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
end_seq = tp->rcv_nxt;
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
@@ -4755,6 +4747,7 @@ queue_and_out:
}
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
+ tcp_rcv_spurious_retrans(sk, skb);
/* A retransmit, 2nd most common case. Force an immediate ack. */
NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
@@ -6009,11 +6002,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (th->fin)
goto discard;
/* It is possible that we process SYN packets from backlog,
- * so we need to make sure to disable BH right there.
+ * so we need to make sure to disable BH and RCU right there.
*/
+ rcu_read_lock();
local_bh_disable();
acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
local_bh_enable();
+ rcu_read_unlock();
if (!acceptable)
return 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 44c09eddbb78..de47038afdf0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -544,7 +544,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
BUG_ON(!skb);
tcp_mstamp_refresh(tp);
- delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
+ delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
remaining = icsk->icsk_rto -
usecs_to_jiffies(delta_us);
@@ -943,9 +943,11 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
if (skb) {
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
+ rcu_read_lock();
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- ireq_opt_deref(ireq));
+ rcu_dereference(ireq->ireq_opt));
+ rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -2549,7 +2551,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_tw_reuse = 2;
cnt = tcp_hashinfo.ehash_mask + 1;
- net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
+ net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 597dbd749f05..9c34b97d365d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -45,6 +45,21 @@
#include <trace/events/tcp.h>
+/* Refresh clocks of a TCP socket,
+ * ensuring monotically increasing values.
+ */
+void tcp_mstamp_refresh(struct tcp_sock *tp)
+{
+ u64 val = tcp_clock_ns();
+
+ if (val > tp->tcp_clock_cache)
+ tp->tcp_clock_cache = val;
+
+ val = div_u64(val, NSEC_PER_USEC);
+ if (val > tp->tcp_mstamp)
+ tp->tcp_mstamp = val;
+}
+
static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
int push_one, gfp_t gfp);
@@ -179,21 +194,6 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
-
-u32 tcp_default_init_rwnd(u32 mss)
-{
- /* Initial receive window should be twice of TCP_INIT_CWND to
- * enable proper sending of new unsent data during fast recovery
- * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
- * limit when mss is larger than 1460.
- */
- u32 init_rwnd = TCP_INIT_CWND * 2;
-
- if (mss > 1460)
- init_rwnd = max((1460 * init_rwnd) / mss, 2U);
- return init_rwnd;
-}
-
/* Determine a window scaling and initial window to offer.
* Based on the assumption that the given amount of space
* will be offered. Store the results in the tp structure.
@@ -228,7 +228,10 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
else
- (*rcv_wnd) = space;
+ (*rcv_wnd) = min_t(u32, space, U16_MAX);
+
+ if (init_rcv_wnd)
+ *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
(*rcv_wscale) = 0;
if (wscale_ok) {
@@ -241,11 +244,6 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
(*rcv_wscale)++;
}
}
-
- if (!init_rcv_wnd) /* Use default unless specified otherwise */
- init_rcv_wnd = tcp_default_init_rwnd(mss);
- *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
-
/* Set the clamp no higher than max representable value */
(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
}
@@ -977,28 +975,28 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
+static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
+ u64 prior_wstamp)
{
- u64 len_ns;
- u32 rate;
+ struct tcp_sock *tp = tcp_sk(sk);
- if (!tcp_needs_internal_pacing(sk))
- return;
- rate = sk->sk_pacing_rate;
- if (!rate || rate == ~0U)
- return;
+ skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
+ if (sk->sk_pacing_status != SK_PACING_NONE) {
+ unsigned long rate = sk->sk_pacing_rate;
- len_ns = (u64)skb->len * NSEC_PER_SEC;
- do_div(len_ns, rate);
- hrtimer_start(&tcp_sk(sk)->pacing_timer,
- ktime_add_ns(ktime_get(), len_ns),
- HRTIMER_MODE_ABS_PINNED_SOFT);
- sock_hold(sk);
-}
+ /* Original sch_fq does not pace first 10 MSS
+ * Note that tp->data_segs_out overflows after 2^32 packets,
+ * this is a minor annoyance.
+ */
+ if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
+ u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
+ u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
-static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
-{
- skb->skb_mstamp = tp->tcp_mstamp;
+ /* take into account OS jitter */
+ len_ns -= min_t(u64, len_ns / 2, credit);
+ tp->tcp_wstamp_ns += len_ns;
+ }
+ }
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
}
@@ -1025,6 +1023,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
struct sk_buff *oskb = NULL;
struct tcp_md5sig_key *md5;
struct tcphdr *th;
+ u64 prior_wstamp;
int err;
BUG_ON(!skb || !tcp_skb_pcount(skb));
@@ -1045,7 +1044,11 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
if (unlikely(!skb))
return -ENOBUFS;
}
- skb->skb_mstamp = tp->tcp_mstamp;
+
+ prior_wstamp = tp->tcp_wstamp_ns;
+ tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
+
+ skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
inet = inet_sk(sk);
tcb = TCP_SKB_CB(skb);
@@ -1137,7 +1140,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
tcp_event_data_sent(tp, sk);
tp->data_segs_out += tcp_skb_pcount(skb);
tp->bytes_sent += skb->len - tcp_header_size;
- tcp_internal_pacing(sk, skb);
}
if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
@@ -1149,8 +1151,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
- /* Our usage of tstamp should remain private */
- skb->tstamp = 0;
+ /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
/* Cleanup our debris for IP stacks */
memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
@@ -1163,7 +1164,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
err = net_xmit_eval(err);
}
if (!err && oskb) {
- tcp_update_skb_after_send(tp, oskb);
+ tcp_update_skb_after_send(sk, oskb, prior_wstamp);
tcp_rate_skb_sent(sk, oskb);
}
return err;
@@ -1698,8 +1699,9 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
{
u32 bytes, segs;
- bytes = min(sk->sk_pacing_rate >> sk->sk_pacing_shift,
- sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
+ bytes = min_t(unsigned long,
+ sk->sk_pacing_rate >> sk->sk_pacing_shift,
+ sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
/* Goal is to send at least one packet per ms,
* not one big TSO packet every 100 ms.
@@ -1966,7 +1968,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
head = tcp_rtx_queue_head(sk);
if (!head)
goto send_now;
- age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
+ age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
/* If next ACK is likely to come too late (half srtt), do not defer */
if (age < (tp->srtt_us >> 4))
goto send_now;
@@ -2172,10 +2174,23 @@ static int tcp_mtu_probe(struct sock *sk)
return -1;
}
-static bool tcp_pacing_check(const struct sock *sk)
+static bool tcp_pacing_check(struct sock *sk)
{
- return tcp_needs_internal_pacing(sk) &&
- hrtimer_is_queued(&tcp_sk(sk)->pacing_timer);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!tcp_needs_internal_pacing(sk))
+ return false;
+
+ if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
+ return false;
+
+ if (!hrtimer_is_queued(&tp->pacing_timer)) {
+ hrtimer_start(&tp->pacing_timer,
+ ns_to_ktime(tp->tcp_wstamp_ns),
+ HRTIMER_MODE_ABS_PINNED_SOFT);
+ sock_hold(sk);
+ }
+ return true;
}
/* TCP Small Queues :
@@ -2192,10 +2207,12 @@ static bool tcp_pacing_check(const struct sock *sk)
static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
unsigned int factor)
{
- unsigned int limit;
+ unsigned long limit;
- limit = max(2 * skb->truesize, sk->sk_pacing_rate >> sk->sk_pacing_shift);
- limit = min_t(u32, limit,
+ limit = max_t(unsigned long,
+ 2 * skb->truesize,
+ sk->sk_pacing_rate >> sk->sk_pacing_shift);
+ limit = min_t(unsigned long, limit,
sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
limit <<= factor;
@@ -2304,18 +2321,19 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
+ if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
+ /* "skb_mstamp_ns" is used as a start point for the retransmit timer */
+ skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
+ list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
+ goto repair; /* Skip network transmission */
+ }
+
if (tcp_pacing_check(sk))
break;
tso_segs = tcp_init_tso_segs(skb, mss_now);
BUG_ON(!tso_segs);
- if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
- /* "skb_mstamp" is used as a start point for the retransmit timer */
- tcp_update_skb_after_send(tp, skb);
- goto repair; /* Skip network transmission */
- }
-
cwnd_quota = tcp_cwnd_test(tp, skb);
if (!cwnd_quota) {
if (push_one == 2)
@@ -2437,8 +2455,8 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
if (rto_delta_us > 0)
timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
- TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
+ TCP_RTO_MAX, NULL);
return true;
}
@@ -2887,7 +2905,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
} tcp_skb_tsorted_restore(skb);
if (!err) {
- tcp_update_skb_after_send(tp, skb);
+ tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
tcp_rate_skb_sent(sk, skb);
}
} else {
@@ -3002,9 +3020,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (skb == rtx_head &&
icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ inet_csk(sk)->icsk_rto,
+ TCP_RTO_MAX,
+ skb);
}
}
@@ -3205,10 +3224,10 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts))
- skb->skb_mstamp = cookie_init_timestamp(req);
+ skb->skb_mstamp_ns = cookie_init_timestamp(req);
else
#endif
- skb->skb_mstamp = tcp_clock_us();
+ skb->skb_mstamp_ns = tcp_clock_ns();
#ifdef CONFIG_TCP_MD5SIG
rcu_read_lock();
@@ -3424,7 +3443,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
- syn->skb_mstamp = syn_data->skb_mstamp;
+ syn->skb_mstamp_ns = syn_data->skb_mstamp_ns;
/* Now full SYN+DATA was cloned and sent (or not),
* remove the SYN from the original skb (syn_data)
@@ -3734,9 +3753,10 @@ void tcp_send_probe0(struct sock *sk)
icsk->icsk_probes_out = 1;
probe_max = TCP_RESOURCE_PROBE_INTERVAL;
}
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_when(sk, probe_max),
- TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
+ tcp_probe0_when(sk, probe_max),
+ TCP_RTO_MAX,
+ NULL);
}
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index 4dff40dad4dc..baed2186c7c6 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -55,8 +55,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
* bandwidth estimate.
*/
if (!tp->packets_out) {
- tp->first_tx_mstamp = skb->skb_mstamp;
- tp->delivered_mstamp = skb->skb_mstamp;
+ u64 tstamp_us = tcp_skb_timestamp_us(skb);
+
+ tp->first_tx_mstamp = tstamp_us;
+ tp->delivered_mstamp = tstamp_us;
}
TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
@@ -88,13 +90,12 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
rs->is_app_limited = scb->tx.is_app_limited;
rs->is_retrans = scb->sacked & TCPCB_RETRANS;
+ /* Record send time of most recently ACKed packet: */
+ tp->first_tx_mstamp = tcp_skb_timestamp_us(skb);
/* Find the duration of the "send phase" of this window: */
- rs->interval_us = tcp_stamp_us_delta(
- skb->skb_mstamp,
- scb->tx.first_tx_mstamp);
+ rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
+ scb->tx.first_tx_mstamp);
- /* Record send time of most recently ACKed packet: */
- tp->first_tx_mstamp = skb->skb_mstamp;
}
/* Mark off the skb delivered once it's sacked to avoid being
* used again when it's cumulatively acked. For acked packets
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index c81aadff769b..fdb715bdd2d1 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -50,7 +50,7 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk)
s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
{
return tp->rack.rtt_us + reo_wnd -
- tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
+ tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
}
/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
@@ -91,7 +91,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
!(scb->sacked & TCPCB_SACKED_RETRANS))
continue;
- if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
+ if (!tcp_rack_sent_after(tp->rack.mstamp,
+ tcp_skb_timestamp_us(skb),
tp->rack.end_seq, scb->end_seq))
break;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 7fdf222a0bdf..676020663ce8 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -360,7 +360,7 @@ static void tcp_probe_timer(struct sock *sk)
*/
start_ts = tcp_skb_timestamp(skb);
if (!start_ts)
- skb->skb_mstamp = tp->tcp_mstamp;
+ skb->skb_mstamp_ns = tp->tcp_clock_cache;
else if (icsk->icsk_user_timeout &&
(s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout)
goto abort;
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
index a5995bb2eaca..95df7f7f6328 100644
--- a/net/ipv4/tcp_ulp.c
+++ b/net/ipv4/tcp_ulp.c
@@ -6,7 +6,7 @@
*
*/
-#include<linux/module.h>
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/list.h>
@@ -29,18 +29,6 @@ static struct tcp_ulp_ops *tcp_ulp_find(const char *name)
return NULL;
}
-static struct tcp_ulp_ops *tcp_ulp_find_id(const int ulp)
-{
- struct tcp_ulp_ops *e;
-
- list_for_each_entry_rcu(e, &tcp_ulp_list, list) {
- if (e->uid == ulp)
- return e;
- }
-
- return NULL;
-}
-
static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
{
const struct tcp_ulp_ops *ulp = NULL;
@@ -63,18 +51,6 @@ static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
return ulp;
}
-static const struct tcp_ulp_ops *__tcp_ulp_lookup(const int uid)
-{
- const struct tcp_ulp_ops *ulp;
-
- rcu_read_lock();
- ulp = tcp_ulp_find_id(uid);
- if (!ulp || !try_module_get(ulp->owner))
- ulp = NULL;
- rcu_read_unlock();
- return ulp;
-}
-
/* Attach new upper layer protocol to the list
* of available protocols.
*/
@@ -123,6 +99,10 @@ void tcp_cleanup_ulp(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
+ /* No sock_owned_by_me() check here as at the time the
+ * stack calls this function, the socket is dead and
+ * about to be destroyed.
+ */
if (!icsk->icsk_ulp_ops)
return;
@@ -133,54 +113,35 @@ void tcp_cleanup_ulp(struct sock *sk)
icsk->icsk_ulp_ops = NULL;
}
-/* Change upper layer protocol for socket */
-int tcp_set_ulp(struct sock *sk, const char *name)
+static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- const struct tcp_ulp_ops *ulp_ops;
- int err = 0;
+ int err;
+ err = -EEXIST;
if (icsk->icsk_ulp_ops)
- return -EEXIST;
-
- ulp_ops = __tcp_ulp_find_autoload(name);
- if (!ulp_ops)
- return -ENOENT;
-
- if (!ulp_ops->user_visible) {
- module_put(ulp_ops->owner);
- return -ENOENT;
- }
+ goto out_err;
err = ulp_ops->init(sk);
- if (err) {
- module_put(ulp_ops->owner);
- return err;
- }
+ if (err)
+ goto out_err;
icsk->icsk_ulp_ops = ulp_ops;
return 0;
+out_err:
+ module_put(ulp_ops->owner);
+ return err;
}
-int tcp_set_ulp_id(struct sock *sk, int ulp)
+int tcp_set_ulp(struct sock *sk, const char *name)
{
- struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_ulp_ops *ulp_ops;
- int err;
- if (icsk->icsk_ulp_ops)
- return -EEXIST;
+ sock_owned_by_me(sk);
- ulp_ops = __tcp_ulp_lookup(ulp);
+ ulp_ops = __tcp_ulp_find_autoload(name);
if (!ulp_ops)
return -ENOENT;
- err = ulp_ops->init(sk);
- if (err) {
- module_put(ulp_ops->owner);
- return err;
- }
-
- icsk->icsk_ulp_ops = ulp_ops;
- return 0;
+ return __tcp_set_ulp(sk, ulp_ops);
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f4e35b2ff8b8..cf8252d05a01 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1042,7 +1042,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
if (ipv4_is_multicast(daddr)) {
- if (!ipc.oif)
+ if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
@@ -1627,7 +1627,7 @@ busy_check:
*err = error;
return NULL;
}
-EXPORT_SYMBOL_GPL(__skb_recv_udp);
+EXPORT_SYMBOL(__skb_recv_udp);
/*
* This should be easy, if there is something there we
@@ -1889,7 +1889,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return 0;
}
-static DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
+DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
void udp_encap_enable(void)
{
static_branch_enable(&udp_encap_needed_key);
@@ -2124,6 +2124,28 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
inet_compute_pseudo);
}
+/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+ * return code conversion for ip layer consumption
+ */
+static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
+ struct udphdr *uh)
+{
+ int ret;
+
+ if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+ skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+ inet_compute_pseudo);
+
+ ret = udp_queue_rcv_skb(sk, skb);
+
+ /* a return value > 0 means to resubmit the input, but
+ * it wants the return to be -protocol, or 0
+ */
+ if (ret > 0)
+ return -ret;
+ return 0;
+}
+
/*
* All we need to do is get the socket, and then do a checksum.
*/
@@ -2170,14 +2192,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (unlikely(sk->sk_rx_dst != dst))
udp_sk_rx_dst_set(sk, dst);
- ret = udp_queue_rcv_skb(sk, skb);
+ ret = udp_unicast_rcv_skb(sk, skb, uh);
sock_put(sk);
- /* a return value > 0 means to resubmit the input, but
- * it wants the return to be -protocol, or 0
- */
- if (ret > 0)
- return -ret;
- return 0;
+ return ret;
}
if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
@@ -2185,22 +2202,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
saddr, daddr, udptable, proto);
sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
- if (sk) {
- int ret;
-
- if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
- skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
- inet_compute_pseudo);
-
- ret = udp_queue_rcv_skb(sk, skb);
-
- /* a return value > 0 means to resubmit the input, but
- * it wants the return to be -protocol, or 0
- */
- if (ret > 0)
- return -ret;
- return 0;
- }
+ if (sk)
+ return udp_unicast_rcv_skb(sk, skb, uh);
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 0c0522b79b43..802f2bc00d69 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -405,7 +405,7 @@ static struct sk_buff *udp4_gro_receive(struct list_head *head,
{
struct udphdr *uh = udp_gro_udphdr(skb);
- if (unlikely(!uh))
+ if (unlikely(!uh) || !static_branch_unlikely(&udp_encap_needed_key))
goto flush;
/* Don't bother verifying checksum if we're going to flush anyway. */
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index bcfc00e88756..f8de2482a529 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -67,6 +67,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
if (xo && (xo->flags & XFRM_GRO)) {
skb_mac_header_rebuild(skb);
+ skb_reset_transport_header(skb);
return 0;
}
diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c
index 3d36644890bb..1ad2c2c4e250 100644
--- a/net/ipv4/xfrm4_mode_transport.c
+++ b/net/ipv4/xfrm4_mode_transport.c
@@ -46,7 +46,6 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ihl = skb->data - skb_transport_header(skb);
- struct xfrm_offload *xo = xfrm_offload(skb);
if (skb->transport_header != skb->network_header) {
memmove(skb_transport_header(skb),
@@ -54,8 +53,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
skb->network_header = skb->transport_header;
}
ip_hdr(skb)->tot_len = htons(skb->len + ihl);
- if (!xo || !(xo->flags & XFRM_GRO))
- skb_reset_transport_header(skb);
+ skb_reset_transport_header(skb);
return 0;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d51a8c0b3372..45b84dd5c4eb 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -666,6 +666,7 @@ errout:
static int inet6_netconf_dump_devconf(struct sk_buff *skb,
struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
int h, s_h;
int idx, s_idx;
@@ -673,6 +674,21 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
struct inet6_dev *idev;
struct hlist_head *head;
+ if (cb->strict_check) {
+ struct netlink_ext_ack *extack = cb->extack;
+ struct netconfmsg *ncm;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
+ return -EINVAL;
+ }
+
+ if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
+ return -EINVAL;
+ }
+ }
+
s_h = cb->args[0];
s_idx = idx = cb->args[1];
@@ -692,7 +708,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
if (inet6_netconf_fill_devconf(skb, dev->ifindex,
&idev->cnf,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
+ nlh->nlmsg_seq,
RTM_NEWNETCONF,
NLM_F_MULTI,
NETCONFA_ALL) < 0) {
@@ -709,7 +725,7 @@ cont:
if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
net->ipv6.devconf_all,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
+ nlh->nlmsg_seq,
RTM_NEWNETCONF, NLM_F_MULTI,
NETCONFA_ALL) < 0)
goto done;
@@ -720,7 +736,7 @@ cont:
if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
net->ipv6.devconf_dflt,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
+ nlh->nlmsg_seq,
RTM_NEWNETCONF, NLM_F_MULTI,
NETCONFA_ALL) < 0)
goto done;
@@ -997,6 +1013,7 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
if (addr_type == IPV6_ADDR_ANY ||
addr_type & IPV6_ADDR_MULTICAST ||
(!(idev->dev->flags & IFF_LOOPBACK) &&
+ !netif_is_l3_master(idev->dev) &&
addr_type & IPV6_ADDR_LOOPBACK))
return ERR_PTR(-EADDRNOTAVAIL);
@@ -4201,7 +4218,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
p++;
continue;
}
- state->offset++;
return ifa;
}
@@ -4225,13 +4241,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
return ifa;
}
+ state->offset = 0;
while (++state->bucket < IN6_ADDR_HSIZE) {
- state->offset = 0;
hlist_for_each_entry_rcu(ifa,
&inet6_addr_lst[state->bucket], addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
- state->offset++;
return ifa;
}
}
@@ -4491,6 +4506,7 @@ static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
[IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
[IFA_FLAGS] = { .len = sizeof(u32) },
[IFA_RT_PRIORITY] = { .len = sizeof(u32) },
+ [IFA_TARGET_NETNSID] = { .type = NLA_S32 },
};
static int
@@ -4793,19 +4809,40 @@ static inline int inet6_ifaddr_msgsize(void)
+ nla_total_size(4) /* IFA_RT_PRIORITY */;
}
+enum addr_type_t {
+ UNICAST_ADDR,
+ MULTICAST_ADDR,
+ ANYCAST_ADDR,
+};
+
+struct inet6_fill_args {
+ u32 portid;
+ u32 seq;
+ int event;
+ unsigned int flags;
+ int netnsid;
+ int ifindex;
+ enum addr_type_t type;
+};
+
static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
- u32 portid, u32 seq, int event, unsigned int flags)
+ struct inet6_fill_args *args)
{
struct nlmsghdr *nlh;
u32 preferred, valid;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
+ nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
+ sizeof(struct ifaddrmsg), args->flags);
if (!nlh)
return -EMSGSIZE;
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
ifa->idev->dev->ifindex);
+ if (args->netnsid >= 0 &&
+ nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
+ goto error;
+
if (!((ifa->flags&IFA_F_PERMANENT) &&
(ifa->prefered_lft == INFINITY_LIFE_TIME))) {
preferred = ifa->prefered_lft;
@@ -4855,7 +4892,7 @@ error:
}
static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
- u32 portid, u32 seq, int event, u16 flags)
+ struct inet6_fill_args *args)
{
struct nlmsghdr *nlh;
u8 scope = RT_SCOPE_UNIVERSE;
@@ -4864,10 +4901,15 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
+ nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
+ sizeof(struct ifaddrmsg), args->flags);
if (!nlh)
return -EMSGSIZE;
+ if (args->netnsid >= 0 &&
+ nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
+ return -EMSGSIZE;
+
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
@@ -4881,7 +4923,7 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
}
static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
- u32 portid, u32 seq, int event, unsigned int flags)
+ struct inet6_fill_args *args)
{
struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
int ifindex = dev ? dev->ifindex : 1;
@@ -4891,10 +4933,15 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
scope = RT_SCOPE_SITE;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
+ nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
+ sizeof(struct ifaddrmsg), args->flags);
if (!nlh)
return -EMSGSIZE;
+ if (args->netnsid >= 0 &&
+ nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
+ return -EMSGSIZE;
+
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
@@ -4907,68 +4954,56 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
return 0;
}
-enum addr_type_t {
- UNICAST_ADDR,
- MULTICAST_ADDR,
- ANYCAST_ADDR,
-};
-
/* called with rcu_read_lock() */
static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
- struct netlink_callback *cb, enum addr_type_t type,
- int s_ip_idx, int *p_ip_idx)
+ struct netlink_callback *cb, int s_ip_idx,
+ struct inet6_fill_args *fillargs)
{
struct ifmcaddr6 *ifmca;
struct ifacaddr6 *ifaca;
+ int ip_idx = 0;
int err = 1;
- int ip_idx = *p_ip_idx;
read_lock_bh(&idev->lock);
- switch (type) {
+ switch (fillargs->type) {
case UNICAST_ADDR: {
struct inet6_ifaddr *ifa;
+ fillargs->event = RTM_NEWADDR;
/* unicast address incl. temp addr */
list_for_each_entry(ifa, &idev->addr_list, if_list) {
- if (++ip_idx < s_ip_idx)
- continue;
- err = inet6_fill_ifaddr(skb, ifa,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWADDR,
- NLM_F_MULTI);
+ if (ip_idx < s_ip_idx)
+ goto next;
+ err = inet6_fill_ifaddr(skb, ifa, fillargs);
if (err < 0)
break;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+next:
+ ip_idx++;
}
break;
}
case MULTICAST_ADDR:
+ fillargs->event = RTM_GETMULTICAST;
+
/* multicast address */
for (ifmca = idev->mc_list; ifmca;
ifmca = ifmca->next, ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
- err = inet6_fill_ifmcaddr(skb, ifmca,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_GETMULTICAST,
- NLM_F_MULTI);
+ err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
if (err < 0)
break;
}
break;
case ANYCAST_ADDR:
+ fillargs->event = RTM_GETANYCAST;
/* anycast address */
for (ifaca = idev->ac_list; ifaca;
ifaca = ifaca->aca_next, ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
- err = inet6_fill_ifacaddr(skb, ifaca,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_GETANYCAST,
- NLM_F_MULTI);
+ err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
if (err < 0)
break;
}
@@ -4977,42 +5012,125 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
break;
}
read_unlock_bh(&idev->lock);
- *p_ip_idx = ip_idx;
+ cb->args[2] = ip_idx;
return err;
}
+static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
+ struct inet6_fill_args *fillargs,
+ struct net **tgt_net, struct sock *sk,
+ struct netlink_callback *cb)
+{
+ struct netlink_ext_ack *extack = cb->extack;
+ struct nlattr *tb[IFA_MAX+1];
+ struct ifaddrmsg *ifm;
+ int err, i;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request");
+ return -EINVAL;
+ }
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request");
+ return -EINVAL;
+ }
+
+ fillargs->ifindex = ifm->ifa_index;
+ if (fillargs->ifindex) {
+ cb->answer_flags |= NLM_F_DUMP_FILTERED;
+ fillargs->flags |= NLM_F_DUMP_FILTERED;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
+ ifa_ipv6_policy, extack);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i <= IFA_MAX; ++i) {
+ if (!tb[i])
+ continue;
+
+ if (i == IFA_TARGET_NETNSID) {
+ struct net *net;
+
+ fillargs->netnsid = nla_get_s32(tb[i]);
+ net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
+ if (IS_ERR(net)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id");
+ return PTR_ERR(net);
+ }
+ *tgt_net = net;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
enum addr_type_t type)
{
+ const struct nlmsghdr *nlh = cb->nlh;
+ struct inet6_fill_args fillargs = {
+ .portid = NETLINK_CB(cb->skb).portid,
+ .seq = cb->nlh->nlmsg_seq,
+ .flags = NLM_F_MULTI,
+ .netnsid = -1,
+ .type = type,
+ };
struct net *net = sock_net(skb->sk);
+ struct net *tgt_net = net;
+ int idx, s_idx, s_ip_idx;
int h, s_h;
- int idx, ip_idx;
- int s_idx, s_ip_idx;
struct net_device *dev;
struct inet6_dev *idev;
struct hlist_head *head;
s_h = cb->args[0];
s_idx = idx = cb->args[1];
- s_ip_idx = ip_idx = cb->args[2];
+ s_ip_idx = cb->args[2];
+
+ if (cb->strict_check) {
+ int err;
+
+ err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
+ skb->sk, cb);
+ if (err < 0)
+ return err;
+
+ if (fillargs.ifindex) {
+ dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
+ if (!dev)
+ return -ENODEV;
+ idev = __in6_dev_get(dev);
+ if (idev) {
+ err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
+ &fillargs);
+ }
+ goto put_tgt_net;
+ }
+ }
rcu_read_lock();
- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
+ cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
- head = &net->dev_index_head[h];
+ head = &tgt_net->dev_index_head[h];
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
if (h > s_h || idx > s_idx)
s_ip_idx = 0;
- ip_idx = 0;
idev = __in6_dev_get(dev);
if (!idev)
goto cont;
- if (in6_dump_addrs(idev, skb, cb, type,
- s_ip_idx, &ip_idx) < 0)
+ if (in6_dump_addrs(idev, skb, cb, s_ip_idx,
+ &fillargs) < 0)
goto done;
cont:
idx++;
@@ -5022,7 +5140,9 @@ done:
rcu_read_unlock();
cb->args[0] = h;
cb->args[1] = idx;
- cb->args[2] = ip_idx;
+put_tgt_net:
+ if (fillargs.netnsid >= 0)
+ put_net(tgt_net);
return skb->len;
}
@@ -5053,6 +5173,14 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
+ struct inet6_fill_args fillargs = {
+ .portid = NETLINK_CB(in_skb).portid,
+ .seq = nlh->nlmsg_seq,
+ .event = RTM_NEWADDR,
+ .flags = 0,
+ .netnsid = -1,
+ };
+ struct net *tgt_net = net;
struct ifaddrmsg *ifm;
struct nlattr *tb[IFA_MAX+1];
struct in6_addr *addr = NULL, *peer;
@@ -5066,15 +5194,24 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (err < 0)
return err;
+ if (tb[IFA_TARGET_NETNSID]) {
+ fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
+
+ tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
+ fillargs.netnsid);
+ if (IS_ERR(tgt_net))
+ return PTR_ERR(tgt_net);
+ }
+
addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
if (!addr)
return -EINVAL;
ifm = nlmsg_data(nlh);
if (ifm->ifa_index)
- dev = dev_get_by_index(net, ifm->ifa_index);
+ dev = dev_get_by_index(tgt_net, ifm->ifa_index);
- ifa = ipv6_get_ifaddr(net, addr, dev, 1);
+ ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1);
if (!ifa) {
err = -EADDRNOTAVAIL;
goto errout;
@@ -5086,20 +5223,22 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
goto errout_ifa;
}
- err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, RTM_NEWADDR, 0);
+ err = inet6_fill_ifaddr(skb, ifa, &fillargs);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout_ifa;
}
- err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
+ err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
errout_ifa:
in6_ifa_put(ifa);
errout:
if (dev)
dev_put(dev);
+ if (fillargs.netnsid >= 0)
+ put_net(tgt_net);
+
return err;
}
@@ -5107,13 +5246,20 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
{
struct sk_buff *skb;
struct net *net = dev_net(ifa->idev->dev);
+ struct inet6_fill_args fillargs = {
+ .portid = 0,
+ .seq = 0,
+ .event = event,
+ .flags = 0,
+ .netnsid = -1,
+ };
int err = -ENOBUFS;
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
if (!skb)
goto errout;
- err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
+ err = inet6_fill_ifaddr(skb, ifa, &fillargs);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
WARN_ON(err == -EMSGSIZE);
@@ -5529,6 +5675,31 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct ifinfomsg *ifm;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request");
+ return -EINVAL;
+ }
+
+ if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid data after header");
+ return -EINVAL;
+ }
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
+ ifm->ifi_change || ifm->ifi_index) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
@@ -5538,6 +5709,16 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
struct inet6_dev *idev;
struct hlist_head *head;
+ /* only requests using strict checking can pass data to
+ * influence the dump
+ */
+ if (cb->strict_check) {
+ int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
+
+ if (err < 0)
+ return err;
+ }
+
s_h = cb->args[0];
s_idx = cb->args[1];
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 1d6ced37ad71..0d1ee82ee55b 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -458,20 +458,52 @@ static int ip6addrlbl_fill(struct sk_buff *skb,
return 0;
}
+static int ip6addrlbl_valid_dump_req(const struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct ifaddrlblmsg *ifal;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifal))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for address label dump request");
+ return -EINVAL;
+ }
+
+ ifal = nlmsg_data(nlh);
+ if (ifal->__ifal_reserved || ifal->ifal_prefixlen ||
+ ifal->ifal_flags || ifal->ifal_index || ifal->ifal_seq) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address label dump request");
+ return -EINVAL;
+ }
+
+ if (nlmsg_attrlen(nlh, sizeof(*ifal))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid data after header for address label dump requewst");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct ip6addrlbl_entry *p;
int idx = 0, s_idx = cb->args[0];
int err;
+ if (cb->strict_check) {
+ err = ip6addrlbl_valid_dump_req(nlh, cb->extack);
+ if (err < 0)
+ return err;
+ }
+
rcu_read_lock();
hlist_for_each_entry_rcu(p, &net->ipv6.ip6addrlbl_table.head, list) {
if (idx >= s_idx) {
err = ip6addrlbl_fill(skb, p,
net->ipv6.ip6addrlbl_table.seq,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
+ nlh->nlmsg_seq,
RTM_NEWADDRLABEL,
NLM_F_MULTI);
if (err < 0)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 9a4261e50272..3f4d61017a69 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -209,6 +209,7 @@ lookup_protocol:
np->hop_limit = -1;
np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
np->mc_loop = 1;
+ np->mc_all = 1;
np->pmtudisc = IPV6_PMTUDISC_WANT;
np->repflow = net->ipv6.sysctl.flowlabel_reflect;
sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
@@ -467,12 +468,10 @@ void inet6_destroy_sock(struct sock *sk)
/* Release rx options */
skb = xchg(&np->pktoptions, NULL);
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
skb = xchg(&np->rxpmtu, NULL);
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
/* Free flowlabels */
fl6_free_socklist(sk);
@@ -902,6 +901,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = {
.inet6_bind = __inet6_bind,
+ .udp6_lib_lookup = __udp6_lib_lookup,
};
static int __init inet6_init(void)
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 88a7579c23bd..63b2b66f9dfa 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -601,12 +601,11 @@ static void esp_input_done_esn(struct crypto_async_request *base, int err)
static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
{
- struct ip_esp_hdr *esph;
struct crypto_aead *aead = x->data;
struct aead_request *req;
struct sk_buff *trailer;
int ivlen = crypto_aead_ivsize(aead);
- int elen = skb->len - sizeof(*esph) - ivlen;
+ int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
int nfrags;
int assoclen;
int seqhilen;
@@ -616,7 +615,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
u8 *iv;
struct scatterlist *sg;
- if (!pskb_may_pull(skb, sizeof(*esph) + ivlen)) {
+ if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
ret = -EINVAL;
goto out;
}
@@ -626,7 +625,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
}
- assoclen = sizeof(*esph);
+ assoclen = sizeof(struct ip_esp_hdr);
seqhilen = 0;
if (x->props.flags & XFRM_STATE_ESN) {
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5516f55e214b..2a058b408a6a 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -29,6 +29,7 @@
#include <linux/list.h>
#include <linux/slab.h>
+#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
@@ -46,6 +47,7 @@ struct fib6_cleaner {
int (*func)(struct fib6_info *, void *arg);
int sernum;
void *arg;
+ bool skip_notify;
};
#ifdef CONFIG_IPV6_SUBTREES
@@ -160,8 +162,6 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags)
}
INIT_LIST_HEAD(&f6i->fib6_siblings);
- f6i->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
-
atomic_inc(&f6i->fib6_ref);
return f6i;
@@ -171,7 +171,6 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
{
struct fib6_info *f6i = container_of(head, struct fib6_info, rcu);
struct rt6_exception_bucket *bucket;
- struct dst_metrics *m;
WARN_ON(f6i->fib6_node);
@@ -196,6 +195,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
*ppcpu_rt = NULL;
}
}
+
+ free_percpu(f6i->rt6i_pcpu);
}
lwtstate_put(f6i->fib6_nh.nh_lwtstate);
@@ -203,9 +204,7 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
if (f6i->fib6_nh.nh_dev)
dev_put(f6i->fib6_nh.nh_dev);
- m = f6i->fib6_metrics;
- if (m != &dst_default_metrics && refcount_dec_and_test(&m->refcnt))
- kfree(m);
+ ip_fib_metrics_put(f6i->fib6_metrics);
kfree(f6i);
}
@@ -568,17 +567,31 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
+ struct rt6_rtnl_dump_arg arg = {};
unsigned int h, s_h;
unsigned int e = 0, s_e;
- struct rt6_rtnl_dump_arg arg;
struct fib6_walker *w;
struct fib6_table *tb;
struct hlist_head *head;
int res = 0;
- s_h = cb->args[0];
- s_e = cb->args[1];
+ if (cb->strict_check) {
+ int err;
+
+ err = ip_valid_fib_dump_req(net, nlh, &arg.filter, cb);
+ if (err < 0)
+ return err;
+ } else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) {
+ struct rtmsg *rtm = nlmsg_data(nlh);
+
+ arg.filter.flags = rtm->rtm_flags & (RTM_F_PREFIX|RTM_F_CLONED);
+ }
+
+ /* fib entries are never clones */
+ if (arg.filter.flags & RTM_F_CLONED)
+ return skb->len;
w = (void *)cb->args[2];
if (!w) {
@@ -604,6 +617,20 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
arg.net = net;
w->args = &arg;
+ if (arg.filter.table_id) {
+ tb = fib6_get_table(net, arg.filter.table_id);
+ if (!tb) {
+ NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
+ return -ENOENT;
+ }
+
+ res = fib6_dump_table(tb, skb, cb);
+ goto out;
+ }
+
+ s_h = cb->args[0];
+ s_e = cb->args[1];
+
rcu_read_lock();
for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
@@ -613,16 +640,16 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
goto next;
res = fib6_dump_table(tb, skb, cb);
if (res != 0)
- goto out;
+ goto out_unlock;
next:
e++;
}
}
-out:
+out_unlock:
rcu_read_unlock();
cb->args[1] = e;
cb->args[0] = h;
-
+out:
res = res < 0 ? res : skb->len;
if (res <= 0)
fib6_dump_end(cb);
@@ -1952,6 +1979,7 @@ static int fib6_clean_node(struct fib6_walker *w)
struct fib6_cleaner *c = container_of(w, struct fib6_cleaner, w);
struct nl_info info = {
.nl_net = c->net,
+ .skip_notify = c->skip_notify,
};
if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
@@ -2003,7 +2031,7 @@ static int fib6_clean_node(struct fib6_walker *w)
static void fib6_clean_tree(struct net *net, struct fib6_node *root,
int (*func)(struct fib6_info *, void *arg),
- int sernum, void *arg)
+ int sernum, void *arg, bool skip_notify)
{
struct fib6_cleaner c;
@@ -2015,13 +2043,14 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
c.sernum = sernum;
c.arg = arg;
c.net = net;
+ c.skip_notify = skip_notify;
fib6_walk(net, &c.w);
}
static void __fib6_clean_all(struct net *net,
int (*func)(struct fib6_info *, void *),
- int sernum, void *arg)
+ int sernum, void *arg, bool skip_notify)
{
struct fib6_table *table;
struct hlist_head *head;
@@ -2033,7 +2062,7 @@ static void __fib6_clean_all(struct net *net,
hlist_for_each_entry_rcu(table, head, tb6_hlist) {
spin_lock_bh(&table->tb6_lock);
fib6_clean_tree(net, &table->tb6_root,
- func, sernum, arg);
+ func, sernum, arg, skip_notify);
spin_unlock_bh(&table->tb6_lock);
}
}
@@ -2043,14 +2072,21 @@ static void __fib6_clean_all(struct net *net,
void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *),
void *arg)
{
- __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg);
+ __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg, false);
+}
+
+void fib6_clean_all_skip_notify(struct net *net,
+ int (*func)(struct fib6_info *, void *),
+ void *arg)
+{
+ __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg, true);
}
static void fib6_flush_trees(struct net *net)
{
int new_sernum = fib6_new_sernum(net);
- __fib6_clean_all(net, NULL, new_sernum, NULL);
+ __fib6_clean_all(net, NULL, new_sernum, NULL, false);
}
/*
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index e493b041d4ac..515adbdba1d2 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -427,35 +427,17 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct net *net = dev_net(skb->dev);
- const struct gre_base_hdr *greh;
const struct ipv6hdr *ipv6h;
- int grehlen = sizeof(*greh);
+ struct tnl_ptk_info tpi;
struct ip6_tnl *t;
- int key_off = 0;
- __be16 flags;
- __be32 key;
- if (!pskb_may_pull(skb, offset + grehlen))
- return;
- greh = (const struct gre_base_hdr *)(skb->data + offset);
- flags = greh->flags;
- if (flags & (GRE_VERSION | GRE_ROUTING))
+ if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6),
+ offset) < 0)
return;
- if (flags & GRE_CSUM)
- grehlen += 4;
- if (flags & GRE_KEY) {
- key_off = grehlen + offset;
- grehlen += 4;
- }
- if (!pskb_may_pull(skb, offset + grehlen))
- return;
ipv6h = (const struct ipv6hdr *)skb->data;
- greh = (const struct gre_base_hdr *)(skb->data + offset);
- key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
-
t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
- key, greh->protocol);
+ tpi.key, tpi.proto);
if (!t)
return;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 6242682be876..96577e742afd 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -178,7 +178,8 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
*/
if ((ipv6_addr_loopback(&hdr->saddr) ||
ipv6_addr_loopback(&hdr->daddr)) &&
- !(dev->flags & IFF_LOOPBACK))
+ !(dev->flags & IFF_LOOPBACK) &&
+ !netif_is_l3_master(dev))
goto err;
/* RFC4291 Errata ID: 3480
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 37ff4805b20c..c7e495f12011 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -115,6 +115,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
payload_len = skb->len - nhoff - sizeof(*ipv6h);
ipv6h->payload_len = htons(payload_len);
skb->network_header = (u8 *)ipv6h - skb->head;
+ skb_reset_mac_len(skb);
if (udpfrag) {
int err = ip6_find_1stfragopt(skb, &prevhdr);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 16f200f06500..89e0d5118afe 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -219,12 +219,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
kfree_skb(skb);
return -ENOBUFS;
}
+ if (skb->sk)
+ skb_set_owner_w(skb2, skb->sk);
consume_skb(skb);
skb = skb2;
- /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
- * it is safe to call in our context (socket lock not held)
- */
- skb_set_owner_w(skb, (struct sock *)sk);
}
if (opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
@@ -727,7 +725,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
skb = frag;
frag = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
}
kfree(tmp_hdr);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 419960b0ba16..a9d06d4dd057 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1184,11 +1184,6 @@ route_lookup:
}
skb_dst_set(skb, dst);
- if (encap_limit >= 0) {
- init_tel_txopt(&opt, encap_limit);
- ipv6_push_frag_opts(skb, &opt.ops, &proto);
- }
-
if (hop_limit == 0) {
if (skb->protocol == htons(ETH_P_IP))
hop_limit = ip_hdr(skb)->ttl;
@@ -1210,6 +1205,11 @@ route_lookup:
if (err)
return err;
+ if (encap_limit >= 0) {
+ init_tel_txopt(&opt, encap_limit);
+ ipv6_push_frag_opts(skb, &opt.ops, &proto);
+ }
+
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
@@ -1234,7 +1234,7 @@ static inline int
ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- const struct iphdr *iph = ip_hdr(skb);
+ const struct iphdr *iph;
int encap_limit = -1;
struct flowi6 fl6;
__u8 dsfield;
@@ -1242,6 +1242,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
+ /* ensure we can access the full inner ip header */
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return -1;
+
+ iph = ip_hdr(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
tproto = READ_ONCE(t->parms.proto);
@@ -1306,7 +1311,7 @@ static inline int
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ipv6hdr *ipv6h;
int encap_limit = -1;
__u16 offset;
struct flowi6 fl6;
@@ -1315,6 +1320,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+ return -1;
+
+ ipv6h = ipv6_hdr(skb);
tproto = READ_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
ip6_tnl_addr_conflict(t, ipv6h))
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index d0b7e0249c13..c3317ffb09eb 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -85,7 +85,8 @@ static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
static void ip6mr_free_table(struct mr_table *mrt);
static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, struct mfc6_cache *cache);
+ struct net_device *dev, struct sk_buff *skb,
+ struct mfc6_cache *cache);
static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
mifi_t mifi, int assert);
static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
@@ -138,6 +139,9 @@ static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
.flags = FIB_LOOKUP_NOREF,
};
+ /* update flow if oif or iif point to device enslaved to l3mdev */
+ l3mdev_update_flow(net, flowi6_to_flowi(flp6));
+
err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
flowi6_to_flowi(flp6), 0, &arg);
if (err < 0)
@@ -164,7 +168,9 @@ static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
return -EINVAL;
}
- mrt = ip6mr_get_table(rule->fr_net, rule->table);
+ arg->table = fib_rule_get_table(rule, arg);
+
+ mrt = ip6mr_get_table(rule->fr_net, arg->table);
if (!mrt)
return -EAGAIN;
res->mrt = mrt;
@@ -1014,7 +1020,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
}
rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
} else
- ip6_mr_forward(net, mrt, skb, c);
+ ip6_mr_forward(net, mrt, skb->dev, skb, c);
}
}
@@ -1120,7 +1126,7 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
/* Queue a packet for resolution. It gets locked cache entry! */
static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
- struct sk_buff *skb)
+ struct sk_buff *skb, struct net_device *dev)
{
struct mfc6_cache *c;
bool found = false;
@@ -1180,6 +1186,10 @@ static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
kfree_skb(skb);
err = -ENOBUFS;
} else {
+ if (dev) {
+ skb->dev = dev;
+ skb->skb_iif = dev->ifindex;
+ }
skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
err = 0;
}
@@ -2043,11 +2053,12 @@ static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
}
static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, struct mfc6_cache *c)
+ struct net_device *dev, struct sk_buff *skb,
+ struct mfc6_cache *c)
{
int psend = -1;
int vif, ct;
- int true_vifi = ip6mr_find_vif(mrt, skb->dev);
+ int true_vifi = ip6mr_find_vif(mrt, dev);
vif = c->_c.mfc_parent;
c->_c.mfc_un.res.pkt++;
@@ -2073,7 +2084,7 @@ static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
/*
* Wrong interface: drop packet and (maybe) send PIM assert.
*/
- if (mrt->vif_table[vif].dev != skb->dev) {
+ if (mrt->vif_table[vif].dev != dev) {
c->_c.mfc_un.res.wrong_if++;
if (true_vifi >= 0 && mrt->mroute_do_assert &&
@@ -2154,6 +2165,19 @@ int ip6_mr_input(struct sk_buff *skb)
.flowi6_mark = skb->mark,
};
int err;
+ struct net_device *dev;
+
+ /* skb->dev passed in is the master dev for vrfs.
+ * Get the proper interface that does have a vif associated with it.
+ */
+ dev = skb->dev;
+ if (netif_is_l3_master(skb->dev)) {
+ dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
+ if (!dev) {
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+ }
err = ip6mr_fib_lookup(net, &fl6, &mrt);
if (err < 0) {
@@ -2165,7 +2189,7 @@ int ip6_mr_input(struct sk_buff *skb)
cache = ip6mr_cache_find(mrt,
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
if (!cache) {
- int vif = ip6mr_find_vif(mrt, skb->dev);
+ int vif = ip6mr_find_vif(mrt, dev);
if (vif >= 0)
cache = ip6mr_cache_find_any(mrt,
@@ -2179,9 +2203,9 @@ int ip6_mr_input(struct sk_buff *skb)
if (!cache) {
int vif;
- vif = ip6mr_find_vif(mrt, skb->dev);
+ vif = ip6mr_find_vif(mrt, dev);
if (vif >= 0) {
- int err = ip6mr_cache_unresolved(mrt, vif, skb);
+ int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
read_unlock(&mrt_lock);
return err;
@@ -2191,7 +2215,7 @@ int ip6_mr_input(struct sk_buff *skb)
return -ENODEV;
}
- ip6_mr_forward(net, mrt, skb, cache);
+ ip6_mr_forward(net, mrt, dev, skb, cache);
read_unlock(&mrt_lock);
@@ -2257,7 +2281,7 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
iph->saddr = rt->rt6i_src.addr;
iph->daddr = rt->rt6i_dst.addr;
- err = ip6mr_cache_unresolved(mrt, vif, skb2);
+ err = ip6mr_cache_unresolved(mrt, vif, skb2, dev);
read_unlock(&mrt_lock);
return err;
@@ -2433,6 +2457,30 @@ errout:
static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
+ struct fib_dump_filter filter = {};
+ int err;
+
+ if (cb->strict_check) {
+ err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh,
+ &filter, cb);
+ if (err < 0)
+ return err;
+ }
+
+ if (filter.table_id) {
+ struct mr_table *mrt;
+
+ mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
+ if (!mrt) {
+ NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
+ return -ENOENT;
+ }
+ err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute,
+ &mfc_unres_lock, &filter);
+ return skb->len ? : err;
+ }
+
return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
- _ip6mr_fill_mroute, &mfc_unres_lock);
+ _ip6mr_fill_mroute, &mfc_unres_lock, &filter);
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index c0cac9cc3a28..381ce38940ae 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -674,6 +674,13 @@ done:
retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
break;
}
+ case IPV6_MULTICAST_ALL:
+ if (optlen < sizeof(int))
+ goto e_inval;
+ np->mc_all = valbool;
+ retv = 0;
+ break;
+
case MCAST_JOIN_GROUP:
case MCAST_LEAVE_GROUP:
{
@@ -1266,6 +1273,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
val = np->mcast_oif;
break;
+ case IPV6_MULTICAST_ALL:
+ val = np->mc_all;
+ break;
+
case IPV6_UNICAST_IF:
val = (__force int)htonl((__u32) np->ucast_oif);
break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 4ae54aaca373..21f6deb2aec9 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -636,7 +636,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
}
if (!mc) {
rcu_read_unlock();
- return true;
+ return np->mc_all;
}
read_lock(&mc->sflock);
psl = mc->sflist;
@@ -2436,17 +2436,17 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
{
int err;
- /* callers have the socket lock and rtnl lock
- * so no other readers or writers of iml or its sflist
- */
+ write_lock_bh(&iml->sflock);
if (!iml->sflist) {
/* any-source empty exclude case */
- return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
+ err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
+ } else {
+ err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
+ iml->sflist->sl_count, iml->sflist->sl_addr, 0);
+ sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
+ iml->sflist = NULL;
}
- err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
- iml->sflist->sl_count, iml->sflist->sl_addr, 0);
- sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
- iml->sflist = NULL;
+ write_unlock_bh(&iml->sflock);
return err;
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0ec273997d1d..a25cfdd47c89 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1533,7 +1533,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
if (!ndopts.nd_opts_rh) {
ip6_redirect_no_header(skb, dev_net(skb->dev),
- skb->dev->ifindex, 0);
+ skb->dev->ifindex);
return;
}
@@ -1784,6 +1784,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
change_info = ptr;
if (change_info->flags_changed & IFF_NOARP)
neigh_changeaddr(&nd_tbl, dev);
+ if (!netif_carrier_ok(dev))
+ neigh_carrier_down(&nd_tbl, dev);
break;
case NETDEV_DOWN:
neigh_ifdown(&nd_tbl, dev);
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 8b147440fbdc..af737b47b9b5 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -65,7 +65,10 @@ ipv6header_mt6(const struct sk_buff *skb, struct xt_action_param *par)
}
hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
- BUG_ON(hp == NULL);
+ if (!hp) {
+ par->hotdrop = true;
+ return false;
+ }
/* Calculate the header length */
if (nexthdr == NEXTHDR_FRAGMENT)
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index 2c99b94eeca3..21bf6bf04323 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -137,7 +137,10 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
sizeof(_addr),
&_addr);
- BUG_ON(ap == NULL);
+ if (ap == NULL) {
+ par->hotdrop = true;
+ return false;
+ }
if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
pr_debug("i=%d temp=%d;\n", i, temp);
@@ -166,7 +169,10 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par)
+ temp * sizeof(_addr),
sizeof(_addr),
&_addr);
- BUG_ON(ap == NULL);
+ if (ap == NULL) {
+ par->hotdrop = true;
+ return false;
+ }
if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp]))
break;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 8f68a518d9db..b8ac369f98ad 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -450,7 +450,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
sub_frag_mem_limit(fq->q.net, head->truesize);
head->ignore_df = 1;
- head->next = NULL;
+ skb_mark_not_on_list(head);
head->dev = dev;
head->tstamp = fq->q.stamp;
ipv6_hdr(head)->payload_len = htons(payload_len);
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
index e6eb7cf9b54f..3e4bf2286abe 100644
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
@@ -87,18 +87,30 @@ static struct notifier_block masq_dev_notifier = {
struct masq_dev_work {
struct work_struct work;
struct net *net;
+ struct in6_addr addr;
int ifindex;
};
+static int inet_cmp(struct nf_conn *ct, void *work)
+{
+ struct masq_dev_work *w = (struct masq_dev_work *)work;
+ struct nf_conntrack_tuple *tuple;
+
+ if (!device_cmp(ct, (void *)(long)w->ifindex))
+ return 0;
+
+ tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+ return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
+}
+
static void iterate_cleanup_work(struct work_struct *work)
{
struct masq_dev_work *w;
- long index;
w = container_of(work, struct masq_dev_work, work);
- index = w->ifindex;
- nf_ct_iterate_cleanup_net(w->net, device_cmp, (void *)index, 0, 0);
+ nf_ct_iterate_cleanup_net(w->net, inet_cmp, (void *)w, 0, 0);
put_net(w->net);
kfree(w);
@@ -147,6 +159,7 @@ static int masq_inet_event(struct notifier_block *this,
INIT_WORK(&w->work, iterate_cleanup_work);
w->ifindex = dev->ifindex;
w->net = net;
+ w->addr = ifa->addr;
schedule_work(&w->work);
return NOTIFY_DONE;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 413d98bf24f4..5e0efd3954e9 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -651,8 +651,6 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb->tstamp = sockc->transmit_time;
- skb_dst_set(skb, &rt->dst);
- *dstp = NULL;
skb_put(skb, length);
skb_reset_network_header(skb);
@@ -665,8 +663,14 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->transport_header = skb->network_header;
err = memcpy_from_msg(iph, msg, length);
- if (err)
- goto error_fault;
+ if (err) {
+ err = -EFAULT;
+ kfree_skb(skb);
+ goto error;
+ }
+
+ skb_dst_set(skb, &rt->dst);
+ *dstp = NULL;
/* if egress device is enslaved to an L3 master device pass the
* skb to its handler for processing
@@ -675,21 +679,28 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
if (unlikely(!skb))
return 0;
+ /* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev
+ * in the error path. Since skb has been freed, the dst could
+ * have been queued for deletion.
+ */
+ rcu_read_lock();
IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
NULL, rt->dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
- if (err)
- goto error;
+ if (err) {
+ IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+ rcu_read_unlock();
+ goto error_check;
+ }
+ rcu_read_unlock();
out:
return 0;
-error_fault:
- err = -EFAULT;
- kfree_skb(skb);
error:
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+error_check:
if (err == -ENOBUFS && !np->recverr)
err = 0;
return err;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 5c5b4f79296e..5c3c92713096 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -145,7 +145,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
*/
if (end < fq->q.len ||
((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
- goto err;
+ goto discard_fq;
fq->q.flags |= INET_FRAG_LAST_IN;
fq->q.len = end;
} else {
@@ -162,20 +162,20 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
if (end > fq->q.len) {
/* Some bits beyond end -> corruption. */
if (fq->q.flags & INET_FRAG_LAST_IN)
- goto err;
+ goto discard_fq;
fq->q.len = end;
}
}
if (end == offset)
- goto err;
+ goto discard_fq;
/* Point into the IP datagram 'data' part. */
if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
- goto err;
+ goto discard_fq;
if (pskb_trim_rcsum(skb, end - offset))
- goto err;
+ goto discard_fq;
/* Find out which fragments are in front and at the back of us
* in the chain of fragments so far. We must know where to put
@@ -388,7 +388,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
}
sub_frag_mem_limit(fq->q.net, sum_truesize);
- head->next = NULL;
+ skb_mark_not_on_list(head);
head->dev = dev;
head->tstamp = fq->q.stamp;
ipv6_hdr(head)->payload_len = htons(payload_len);
@@ -418,6 +418,7 @@ out_fail:
rcu_read_lock();
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
rcu_read_unlock();
+ inet_frag_kill(&fq->q);
return -1;
}
@@ -553,7 +554,6 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
table[0].data = &net->ipv6.frags.high_thresh;
table[0].extra1 = &net->ipv6.frags.low_thresh;
- table[0].extra2 = &init_net.ipv6.frags.high_thresh;
table[1].data = &net->ipv6.frags.low_thresh;
table[1].extra2 = &net->ipv6.frags.high_thresh;
table[2].data = &net->ipv6.frags.timeout;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 18e00ce1719a..e3226284e480 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -368,7 +368,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
struct fib6_info *from;
struct inet6_dev *idev;
- dst_destroy_metrics_generic(dst);
+ ip_dst_metrics_put(dst);
rt6_uncached_list_del(rt);
idev = rt->rt6i_idev;
@@ -517,10 +517,11 @@ static void rt6_probe_deferred(struct work_struct *w)
static void rt6_probe(struct fib6_info *rt)
{
- struct __rt6_probe_work *work;
+ struct __rt6_probe_work *work = NULL;
const struct in6_addr *nh_gw;
struct neighbour *neigh;
struct net_device *dev;
+ struct inet6_dev *idev;
/*
* Okay, this does not seem to be appropriate
@@ -536,15 +537,12 @@ static void rt6_probe(struct fib6_info *rt)
nh_gw = &rt->fib6_nh.nh_gw;
dev = rt->fib6_nh.nh_dev;
rcu_read_lock_bh();
+ idev = __in6_dev_get(dev);
neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
if (neigh) {
- struct inet6_dev *idev;
-
if (neigh->nud_state & NUD_VALID)
goto out;
- idev = __in6_dev_get(dev);
- work = NULL;
write_lock(&neigh->lock);
if (!(neigh->nud_state & NUD_VALID) &&
time_after(jiffies,
@@ -554,11 +552,13 @@ static void rt6_probe(struct fib6_info *rt)
__neigh_set_probe_once(neigh);
}
write_unlock(&neigh->lock);
- } else {
+ } else if (time_after(jiffies, rt->last_probe +
+ idev->cnf.rtr_probe_interval)) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
}
if (work) {
+ rt->last_probe = jiffies;
INIT_WORK(&work->work, rt6_probe_deferred);
work->target = *nh_gw;
dev_hold(dev);
@@ -946,8 +946,6 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
{
- rt->dst.flags |= fib6_info_dst_flags(ort);
-
if (ort->fib6_flags & RTF_REJECT) {
ip6_rt_init_dst_reject(rt, ort);
return;
@@ -977,7 +975,7 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
{
rt->rt6i_flags &= ~RTF_EXPIRES;
rcu_assign_pointer(rt->from, from);
- dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
+ ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
}
/* Caller must already hold reference to @ort */
@@ -995,7 +993,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
#ifdef CONFIG_IPV6_SUBTREES
rt->rt6i_src = ort->fib6_src;
#endif
- rt->rt6i_prefsrc = ort->fib6_prefsrc;
}
static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
@@ -1449,11 +1446,6 @@ static int rt6_insert_exception(struct rt6_info *nrt,
if (ort->fib6_src.plen)
src_key = &nrt->rt6i_src.addr;
#endif
-
- /* Update rt6i_prefsrc as it could be changed
- * in rt6_remove_prefsrc()
- */
- nrt->rt6i_prefsrc = ort->fib6_prefsrc;
/* rt6_mtu_change() might lower mtu on ort.
* Only insert this exception route if its mtu
* is less than ort's mtu value.
@@ -1635,25 +1627,6 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
rcu_read_unlock();
}
-static void rt6_exceptions_remove_prefsrc(struct fib6_info *rt)
-{
- struct rt6_exception_bucket *bucket;
- struct rt6_exception *rt6_ex;
- int i;
-
- bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
- lockdep_is_held(&rt6_exception_lock));
-
- if (bucket) {
- for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
- hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
- rt6_ex->rt6i->rt6i_prefsrc.plen = 0;
- }
- bucket++;
- }
- }
-}
-
static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
struct rt6_info *rt, int mtu)
{
@@ -2098,7 +2071,8 @@ struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
{
bool any_src;
- if (rt6_need_strict(&fl6->daddr)) {
+ if (ipv6_addr_type(&fl6->daddr) &
+ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
struct dst_entry *dst;
dst = l3mdev_link_scope_lookup(net, fl6);
@@ -2368,15 +2342,14 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
{
const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
struct dst_entry *dst;
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_oif = oif;
- fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
- fl6.daddr = iph->daddr;
- fl6.saddr = iph->saddr;
- fl6.flowlabel = ip6_flowinfo(iph);
- fl6.flowi6_uid = uid;
+ struct flowi6 fl6 = {
+ .flowi6_oif = oif,
+ .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .flowlabel = ip6_flowinfo(iph),
+ .flowi6_uid = uid,
+ };
dst = ip6_route_output(net, NULL, &fl6);
if (!dst->error)
@@ -2527,16 +2500,15 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
{
const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
struct dst_entry *dst;
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_iif = LOOPBACK_IFINDEX;
- fl6.flowi6_oif = oif;
- fl6.flowi6_mark = mark;
- fl6.daddr = iph->daddr;
- fl6.saddr = iph->saddr;
- fl6.flowlabel = ip6_flowinfo(iph);
- fl6.flowi6_uid = uid;
+ struct flowi6 fl6 = {
+ .flowi6_iif = LOOPBACK_IFINDEX,
+ .flowi6_oif = oif,
+ .flowi6_mark = mark,
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .flowlabel = ip6_flowinfo(iph),
+ .flowi6_uid = uid,
+ };
dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
rt6_do_redirect(dst, NULL, skb);
@@ -2544,21 +2516,18 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
}
EXPORT_SYMBOL_GPL(ip6_redirect);
-void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
- u32 mark)
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
struct dst_entry *dst;
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_iif = LOOPBACK_IFINDEX;
- fl6.flowi6_oif = oif;
- fl6.flowi6_mark = mark;
- fl6.daddr = msg->dest;
- fl6.saddr = iph->daddr;
- fl6.flowi6_uid = sock_net_uid(net, NULL);
+ struct flowi6 fl6 = {
+ .flowi6_iif = LOOPBACK_IFINDEX,
+ .flowi6_oif = oif,
+ .daddr = msg->dest,
+ .saddr = iph->daddr,
+ .flowi6_uid = sock_net_uid(net, NULL),
+ };
dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
rt6_do_redirect(dst, NULL, skb);
@@ -2729,24 +2698,6 @@ out:
return entries > rt_max_size;
}
-static int ip6_convert_metrics(struct net *net, struct fib6_info *rt,
- struct fib6_config *cfg)
-{
- struct dst_metrics *p;
-
- if (!cfg->fc_mx)
- return 0;
-
- p = kzalloc(sizeof(*rt->fib6_metrics), GFP_KERNEL);
- if (unlikely(!p))
- return -ENOMEM;
-
- refcount_set(&p->refcnt, 1);
- rt->fib6_metrics = p;
-
- return ip_metrics_convert(net, cfg->fc_mx, cfg->fc_mx_len, p->metrics);
-}
-
static struct rt6_info *ip6_nh_lookup_table(struct net *net,
struct fib6_config *cfg,
const struct in6_addr *gw_addr,
@@ -3022,13 +2973,17 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (!rt)
goto out;
+ rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len);
+ if (IS_ERR(rt->fib6_metrics)) {
+ err = PTR_ERR(rt->fib6_metrics);
+ /* Do not leave garbage there. */
+ rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
+ goto out;
+ }
+
if (cfg->fc_flags & RTF_ADDRCONF)
rt->dst_nocount = true;
- err = ip6_convert_metrics(net, rt, cfg);
- if (err < 0)
- goto out;
-
if (cfg->fc_flags & RTF_EXPIRES)
fib6_set_expires(rt, jiffies +
clock_t_to_jiffies(cfg->fc_expires));
@@ -3137,8 +3092,6 @@ install_route:
rt->fib6_nh.nh_dev = dev;
rt->fib6_table = table;
- cfg->fc_nlinfo.nl_net = dev_net(dev);
-
if (idev)
in6_dev_put(idev);
@@ -3630,23 +3583,23 @@ static void rtmsg_to_fib6_config(struct net *net,
struct in6_rtmsg *rtmsg,
struct fib6_config *cfg)
{
- memset(cfg, 0, sizeof(*cfg));
-
- cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
- : RT6_TABLE_MAIN;
- cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
- cfg->fc_metric = rtmsg->rtmsg_metric;
- cfg->fc_expires = rtmsg->rtmsg_info;
- cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
- cfg->fc_src_len = rtmsg->rtmsg_src_len;
- cfg->fc_flags = rtmsg->rtmsg_flags;
- cfg->fc_type = rtmsg->rtmsg_type;
+ *cfg = (struct fib6_config){
+ .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
+ : RT6_TABLE_MAIN,
+ .fc_ifindex = rtmsg->rtmsg_ifindex,
+ .fc_metric = rtmsg->rtmsg_metric,
+ .fc_expires = rtmsg->rtmsg_info,
+ .fc_dst_len = rtmsg->rtmsg_dst_len,
+ .fc_src_len = rtmsg->rtmsg_src_len,
+ .fc_flags = rtmsg->rtmsg_flags,
+ .fc_type = rtmsg->rtmsg_type,
- cfg->fc_nlinfo.nl_net = net;
+ .fc_nlinfo.nl_net = net,
- cfg->fc_dst = rtmsg->rtmsg_dst;
- cfg->fc_src = rtmsg->rtmsg_src;
- cfg->fc_gateway = rtmsg->rtmsg_gateway;
+ .fc_dst = rtmsg->rtmsg_dst,
+ .fc_src = rtmsg->rtmsg_src,
+ .fc_gateway = rtmsg->rtmsg_gateway,
+ };
}
int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
@@ -3753,6 +3706,7 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
if (!f6i)
return ERR_PTR(-ENOMEM);
+ f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0);
f6i->dst_nocount = true;
f6i->dst_host = true;
f6i->fib6_protocol = RTPROT_KERNEL;
@@ -3795,8 +3749,6 @@ static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
spin_lock_bh(&rt6_exception_lock);
/* remove prefsrc entry */
rt->fib6_prefsrc.plen = 0;
- /* need to update cache as well */
- rt6_exceptions_remove_prefsrc(rt);
spin_unlock_bh(&rt6_exception_lock);
}
return 0;
@@ -4074,8 +4026,12 @@ void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
.event = event,
},
};
+ struct net *net = dev_net(dev);
- fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
+ if (net->ipv6.sysctl.skip_notify_on_dev_down)
+ fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
+ else
+ fib6_clean_all(net, fib6_ifdown, &arg);
}
void rt6_disable_ip(struct net_device *dev, unsigned long event)
@@ -4165,20 +4121,25 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
- NULL);
+ extack);
if (err < 0)
goto errout;
err = -EINVAL;
rtm = nlmsg_data(nlh);
- memset(cfg, 0, sizeof(*cfg));
- cfg->fc_table = rtm->rtm_table;
- cfg->fc_dst_len = rtm->rtm_dst_len;
- cfg->fc_src_len = rtm->rtm_src_len;
- cfg->fc_flags = RTF_UP;
- cfg->fc_protocol = rtm->rtm_protocol;
- cfg->fc_type = rtm->rtm_type;
+ *cfg = (struct fib6_config){
+ .fc_table = rtm->rtm_table,
+ .fc_dst_len = rtm->rtm_dst_len,
+ .fc_src_len = rtm->rtm_src_len,
+ .fc_flags = RTF_UP,
+ .fc_protocol = rtm->rtm_protocol,
+ .fc_type = rtm->rtm_type,
+
+ .fc_nlinfo.portid = NETLINK_CB(skb).portid,
+ .fc_nlinfo.nlh = nlh,
+ .fc_nlinfo.nl_net = sock_net(skb->sk),
+ };
if (rtm->rtm_type == RTN_UNREACHABLE ||
rtm->rtm_type == RTN_BLACKHOLE ||
@@ -4194,10 +4155,6 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
- cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
- cfg->fc_nlinfo.nlh = nlh;
- cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
-
if (tb[RTA_GATEWAY]) {
cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
cfg->fc_flags |= RTF_GATEWAY;
@@ -4316,11 +4273,6 @@ static int ip6_route_info_append(struct net *net,
if (!nh)
return -ENOMEM;
nh->fib6_info = rt;
- err = ip6_convert_metrics(net, rt, r_cfg);
- if (err) {
- kfree(nh);
- return err;
- }
memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
list_add_tail(&nh->next, rt6_nh_list);
@@ -4670,20 +4622,31 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
int iif, int type, u32 portid, u32 seq,
unsigned int flags)
{
- struct rtmsg *rtm;
+ struct rt6_info *rt6 = (struct rt6_info *)dst;
+ struct rt6key *rt6_dst, *rt6_src;
+ u32 *pmetrics, table, rt6_flags;
struct nlmsghdr *nlh;
+ struct rtmsg *rtm;
long expires = 0;
- u32 *pmetrics;
- u32 table;
nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
if (!nlh)
return -EMSGSIZE;
+ if (rt6) {
+ rt6_dst = &rt6->rt6i_dst;
+ rt6_src = &rt6->rt6i_src;
+ rt6_flags = rt6->rt6i_flags;
+ } else {
+ rt6_dst = &rt->fib6_dst;
+ rt6_src = &rt->fib6_src;
+ rt6_flags = rt->fib6_flags;
+ }
+
rtm = nlmsg_data(nlh);
rtm->rtm_family = AF_INET6;
- rtm->rtm_dst_len = rt->fib6_dst.plen;
- rtm->rtm_src_len = rt->fib6_src.plen;
+ rtm->rtm_dst_len = rt6_dst->plen;
+ rtm->rtm_src_len = rt6_src->plen;
rtm->rtm_tos = 0;
if (rt->fib6_table)
table = rt->fib6_table->tb6_id;
@@ -4698,7 +4661,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_protocol = rt->fib6_protocol;
- if (rt->fib6_flags & RTF_CACHE)
+ if (rt6_flags & RTF_CACHE)
rtm->rtm_flags |= RTM_F_CLONED;
if (dest) {
@@ -4706,7 +4669,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
rtm->rtm_dst_len = 128;
} else if (rtm->rtm_dst_len)
- if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr))
+ if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
goto nla_put_failure;
#ifdef CONFIG_IPV6_SUBTREES
if (src) {
@@ -4714,12 +4677,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
rtm->rtm_src_len = 128;
} else if (rtm->rtm_src_len &&
- nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr))
+ nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
goto nla_put_failure;
#endif
if (iif) {
#ifdef CONFIG_IPV6_MROUTE
- if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) {
+ if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
int err = ip6mr_get_route(net, skb, rtm, portid);
if (err == 0)
@@ -4754,7 +4717,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
/* For multipath routes, walk the siblings list and add
* each as a nexthop within RTA_MULTIPATH.
*/
- if (rt->fib6_nsiblings) {
+ if (rt6) {
+ if (rt6_flags & RTF_GATEWAY &&
+ nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
+ goto nla_put_failure;
+
+ if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
+ goto nla_put_failure;
+ } else if (rt->fib6_nsiblings) {
struct fib6_info *sibling, *next_sibling;
struct nlattr *mp;
@@ -4777,7 +4747,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
}
- if (rt->fib6_flags & RTF_EXPIRES) {
+ if (rt6_flags & RTF_EXPIRES) {
expires = dst ? dst->expires : rt->expires;
expires -= jiffies;
}
@@ -4785,7 +4755,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
goto nla_put_failure;
- if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags)))
+ if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
goto nla_put_failure;
@@ -4797,28 +4767,52 @@ nla_put_failure:
return -EMSGSIZE;
}
+static bool fib6_info_uses_dev(const struct fib6_info *f6i,
+ const struct net_device *dev)
+{
+ if (f6i->fib6_nh.nh_dev == dev)
+ return true;
+
+ if (f6i->fib6_nsiblings) {
+ struct fib6_info *sibling, *next_sibling;
+
+ list_for_each_entry_safe(sibling, next_sibling,
+ &f6i->fib6_siblings, fib6_siblings) {
+ if (sibling->fib6_nh.nh_dev == dev)
+ return true;
+ }
+ }
+
+ return false;
+}
+
int rt6_dump_route(struct fib6_info *rt, void *p_arg)
{
struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
+ struct fib_dump_filter *filter = &arg->filter;
+ unsigned int flags = NLM_F_MULTI;
struct net *net = arg->net;
if (rt == net->ipv6.fib6_null_entry)
return 0;
- if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
- struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
-
- /* user wants prefix routes only */
- if (rtm->rtm_flags & RTM_F_PREFIX &&
- !(rt->fib6_flags & RTF_PREFIX_RT)) {
- /* success since this is not a prefix route */
+ if ((filter->flags & RTM_F_PREFIX) &&
+ !(rt->fib6_flags & RTF_PREFIX_RT)) {
+ /* success since this is not a prefix route */
+ return 1;
+ }
+ if (filter->filter_set) {
+ if ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
+ (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
+ (filter->protocol && rt->fib6_protocol != filter->protocol)) {
return 1;
}
+ flags |= NLM_F_DUMP_FILTERED;
}
return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0,
RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid,
- arg->cb->nlh->nlmsg_seq, NLM_F_MULTI);
+ arg->cb->nlh->nlmsg_seq, flags);
}
static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
@@ -4832,7 +4826,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct rt6_info *rt;
struct sk_buff *skb;
struct rtmsg *rtm;
- struct flowi6 fl6;
+ struct flowi6 fl6 = {};
bool fibmatch;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
@@ -4841,7 +4835,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
goto errout;
err = -EINVAL;
- memset(&fl6, 0, sizeof(fl6));
rtm = nlmsg_data(nlh);
fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
@@ -5066,7 +5059,10 @@ int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
return 0;
}
-struct ctl_table ipv6_route_table_template[] = {
+static int zero;
+static int one = 1;
+
+static struct ctl_table ipv6_route_table_template[] = {
{
.procname = "flush",
.data = &init_net.ipv6.sysctl.flush_delay,
@@ -5137,6 +5133,15 @@ struct ctl_table ipv6_route_table_template[] = {
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
+ {
+ .procname = "skip_notify_on_dev_down",
+ .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
{ }
};
@@ -5160,6 +5165,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
+ table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
@@ -5224,6 +5230,7 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
+ net->ipv6.sysctl.skip_notify_on_dev_down = 0;
net->ipv6.ip6_rt_gc_expire = 30*HZ;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e9400ffa7875..51c9f75f34b9 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -534,13 +534,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
- t->parms.link, 0, iph->protocol, 0);
+ t->parms.link, iph->protocol);
err = 0;
goto out;
}
if (type == ICMP_REDIRECT) {
- ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
- iph->protocol, 0);
+ ipv4_redirect(skb, dev_net(skb->dev), t->parms.link,
+ iph->protocol);
err = 0;
goto out;
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 83f4c77c79d8..06d17ff3562f 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -548,7 +548,7 @@ static __inline__ void udpv6_err(struct sk_buff *skb,
__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
}
-static DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void)
{
static_branch_enable(&udpv6_encap_needed_key);
@@ -752,6 +752,26 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
}
}
+/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+ * return code conversion for ip layer consumption
+ */
+static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
+ struct udphdr *uh)
+{
+ int ret;
+
+ if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+ skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+ ip6_compute_pseudo);
+
+ ret = udpv6_queue_rcv_skb(sk, skb);
+
+ /* a return value > 0 means to resubmit the input */
+ if (ret > 0)
+ return ret;
+ return 0;
+}
+
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
@@ -803,13 +823,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (unlikely(sk->sk_rx_dst != dst))
udp6_sk_rx_dst_set(sk, dst);
- ret = udpv6_queue_rcv_skb(sk, skb);
- sock_put(sk);
+ if (!uh->check && !udp_sk(sk)->no_check6_rx) {
+ sock_put(sk);
+ goto report_csum_error;
+ }
- /* a return value > 0 means to resubmit the input */
- if (ret > 0)
- return ret;
- return 0;
+ ret = udp6_unicast_rcv_skb(sk, skb, uh);
+ sock_put(sk);
+ return ret;
}
/*
@@ -822,30 +843,13 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
/* Unicast */
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk) {
- int ret;
-
- if (!uh->check && !udp_sk(sk)->no_check6_rx) {
- udp6_csum_zero_error(skb);
- goto csum_error;
- }
-
- if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
- skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
- ip6_compute_pseudo);
-
- ret = udpv6_queue_rcv_skb(sk, skb);
-
- /* a return value > 0 means to resubmit the input */
- if (ret > 0)
- return ret;
-
- return 0;
+ if (!uh->check && !udp_sk(sk)->no_check6_rx)
+ goto report_csum_error;
+ return udp6_unicast_rcv_skb(sk, skb, uh);
}
- if (!uh->check) {
- udp6_csum_zero_error(skb);
- goto csum_error;
- }
+ if (!uh->check)
+ goto report_csum_error;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
@@ -866,6 +870,9 @@ short_packet:
ulen, skb->len,
daddr, ntohs(uh->dest));
goto discard;
+
+report_csum_error:
+ udp6_csum_zero_error(skb);
csum_error:
__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard:
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 95dee9ca8d22..1b8e161ac527 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -119,7 +119,7 @@ static struct sk_buff *udp6_gro_receive(struct list_head *head,
{
struct udphdr *uh = udp_gro_udphdr(skb);
- if (unlikely(!uh))
+ if (unlikely(!uh) || !static_branch_unlikely(&udpv6_encap_needed_key))
goto flush;
/* Don't bother verifying checksum if we're going to flush anyway. */
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 841f4a07438e..9ef490dddcea 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -59,6 +59,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
if (xo && (xo->flags & XFRM_GRO)) {
skb_mac_header_rebuild(skb);
+ skb_reset_transport_header(skb);
return -1;
}
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 9ad07a91708e..3c29da5defe6 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -51,7 +51,6 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ihl = skb->data - skb_transport_header(skb);
- struct xfrm_offload *xo = xfrm_offload(skb);
if (skb->transport_header != skb->network_header) {
memmove(skb_transport_header(skb),
@@ -60,8 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
}
ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
sizeof(struct ipv6hdr));
- if (!xo || !(xo->flags & XFRM_GRO))
- skb_reset_transport_header(skb);
+ skb_reset_transport_header(skb);
return 0;
}
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 5959ce9620eb..6a74080005cf 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -170,9 +170,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
if (toobig && xfrm6_local_dontfrag(skb)) {
xfrm6_local_rxpmtu(skb, mtu);
+ kfree_skb(skb);
return -EMSGSIZE;
} else if (!skb->ignore_df && toobig && skb->sk) {
xfrm_local_error(skb, mtu);
+ kfree_skb(skb);
return -EMSGSIZE;
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ef3defaf43b9..d35bcf92969c 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -146,8 +146,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
- while (nh + offset + 1 < skb->data ||
- pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
+ while (nh + offset + sizeof(*exthdr) < skb->data ||
+ pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
nh = skb_network_header(skb);
exthdr = (struct ipv6_opt_hdr *)(nh + offset);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index e2f16a0173a9..0bed4cc20603 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -48,7 +48,7 @@ static struct iucv_interface *pr_iucv;
static const u8 iprm_shutdown[8] =
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
-#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
+#define TRGCLS_SIZE FIELD_SIZEOF(struct iucv_message, class)
#define __iucv_sock_wait(sk, condition, timeo, ret) \
do { \
@@ -320,13 +320,9 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
struct sk_buff *nskb;
int err, confirm_recv = 0;
- memset(skb->head, 0, ETH_HLEN);
- phs_hdr = skb_push(skb, sizeof(struct af_iucv_trans_hdr));
- skb_reset_mac_header(skb);
+ phs_hdr = skb_push(skb, sizeof(*phs_hdr));
+ memset(phs_hdr, 0, sizeof(*phs_hdr));
skb_reset_network_header(skb);
- skb_push(skb, ETH_HLEN);
- skb_reset_mac_header(skb);
- memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
phs_hdr->magic = ETH_P_AF_IUCV;
phs_hdr->version = 1;
@@ -350,6 +346,9 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
if (imsg)
memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
+ skb_push(skb, ETH_HLEN);
+ memset(skb->data, 0, ETH_HLEN);
+
skb->dev = iucv->hs_dev;
if (!skb->dev) {
err = -ENODEV;
@@ -1505,7 +1504,7 @@ __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
__poll_t mask = 0;
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
if (sk->sk_state == IUCV_LISTEN)
return iucv_accept_poll(sk);
@@ -1943,8 +1942,7 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
/***************** HiperSockets transport callbacks ********************/
static void afiucv_swap_src_dest(struct sk_buff *skb)
{
- struct af_iucv_trans_hdr *trans_hdr =
- (struct af_iucv_trans_hdr *)skb->data;
+ struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
char tmpID[8];
char tmpName[8];
@@ -1967,13 +1965,12 @@ static void afiucv_swap_src_dest(struct sk_buff *skb)
**/
static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
{
+ struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
struct sock *nsk;
struct iucv_sock *iucv, *niucv;
- struct af_iucv_trans_hdr *trans_hdr;
int err;
iucv = iucv_sk(sk);
- trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
if (!iucv) {
/* no sock - connection refused */
afiucv_swap_src_dest(skb);
@@ -2034,15 +2031,13 @@ out:
static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
- struct af_iucv_trans_hdr *trans_hdr =
- (struct af_iucv_trans_hdr *)skb->data;
if (!iucv)
goto out;
if (sk->sk_state != IUCV_BOUND)
goto out;
bh_lock_sock(sk);
- iucv->msglimit_peer = trans_hdr->window;
+ iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
sk->sk_state = IUCV_CONNECTED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
@@ -2098,8 +2093,6 @@ out:
static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
- struct af_iucv_trans_hdr *trans_hdr =
- (struct af_iucv_trans_hdr *)skb->data;
if (!iucv)
return NET_RX_SUCCESS;
@@ -2107,7 +2100,7 @@ static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state != IUCV_CONNECTED)
return NET_RX_SUCCESS;
- atomic_sub(trans_hdr->window, &iucv->msg_sent);
+ atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent);
iucv_sock_wake_msglim(sk);
return NET_RX_SUCCESS;
}
@@ -2170,22 +2163,13 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
int err = NET_RX_SUCCESS;
char nullstring[8];
- if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
- WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
- (int)skb->len,
- (int)(ETH_HLEN + sizeof(struct af_iucv_trans_hdr)));
+ if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
+ WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb->len);
kfree_skb(skb);
return NET_RX_SUCCESS;
}
- if (skb_headlen(skb) < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr)))
- if (skb_linearize(skb)) {
- WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d",
- (int)skb->len);
- kfree_skb(skb);
- return NET_RX_SUCCESS;
- }
- skb_pull(skb, ETH_HLEN);
- trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
+
+ trans_hdr = iucv_trans_hdr(skb);
EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 1beeea9549fa..b99e73a7e7e0 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -730,7 +730,6 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct sk_buff *skb = NULL;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
- unsigned long cpu_flags;
size_t copied = 0;
u32 peek_seq = 0;
u32 *seq, skb_len;
@@ -855,9 +854,8 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
goto copy_uaddr;
if (!(flags & MSG_PEEK)) {
- spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
- sk_eat_skb(sk, skb);
- spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
+ skb_unlink(skb, &sk->sk_receive_queue);
+ kfree_skb(skb);
*seq = 0;
}
@@ -878,9 +876,8 @@ copy_uaddr:
llc_cmsg_rcv(msg, skb);
if (!(flags & MSG_PEEK)) {
- spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
- sk_eat_skb(sk, skb);
- spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
+ skb_unlink(skb, &sk->sk_receive_queue);
+ kfree_skb(skb);
*seq = 0;
}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index c0ac522b48a1..4ff89cb7c86f 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -734,6 +734,7 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
llc_sk(sk)->sap = sap;
spin_lock_bh(&sap->sk_lock);
+ sock_set_flag(sk, SOCK_RCU_FREE);
sap->sk_count++;
sk_nulls_add_node_rcu(sk, laddr_hb);
hlist_add_head(&llc->dev_hash_node, dev_hb);
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 260b3dc1b4a2..64d4bef04e73 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -127,9 +127,7 @@ void llc_sap_close(struct llc_sap *sap)
list_del_rcu(&sap->node);
spin_unlock_bh(&llc_sap_list_lock);
- synchronize_rcu();
-
- kfree(sap);
+ kfree_rcu(sap, rcu);
}
static struct packet_type llc_packet_type __read_mostly = {
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 76e30f4797fb..f869e35d0974 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -27,20 +27,6 @@ config MAC80211_RC_MINSTREL
---help---
This option enables the 'minstrel' TX rate control algorithm
-config MAC80211_RC_MINSTREL_HT
- bool "Minstrel 802.11n support" if EXPERT
- depends on MAC80211_RC_MINSTREL
- default y
- ---help---
- This option enables the 'minstrel_ht' TX rate control algorithm
-
-config MAC80211_RC_MINSTREL_VHT
- bool "Minstrel 802.11ac support" if EXPERT
- depends on MAC80211_RC_MINSTREL_HT
- default n
- ---help---
- This option enables VHT in the 'minstrel_ht' TX rate control algorithm
-
choice
prompt "Default rate control algorithm"
depends on MAC80211_HAS_RC
@@ -62,8 +48,7 @@ endchoice
config MAC80211_RC_DEFAULT
string
- default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL && MAC80211_RC_MINSTREL_HT
- default "minstrel" if MAC80211_RC_DEFAULT_MINSTREL
+ default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL
default ""
endif
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index bb707789ef2b..4f03ebe732fa 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -53,13 +53,14 @@ mac80211-$(CONFIG_PM) += pm.o
CFLAGS_trace.o := -I$(src)
-rc80211_minstrel-y := rc80211_minstrel.o
-rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o
+rc80211_minstrel-y := \
+ rc80211_minstrel.o \
+ rc80211_minstrel_ht.o
-rc80211_minstrel_ht-y := rc80211_minstrel_ht.o
-rc80211_minstrel_ht-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_ht_debugfs.o
+rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += \
+ rc80211_minstrel_debugfs.o \
+ rc80211_minstrel_ht_debugfs.o
mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
-mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y)
ccflags-y += -DDEBUG
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d25da0e66da1..51622333d460 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -158,12 +158,10 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
if (ret)
return ret;
- if (type == NL80211_IFTYPE_AP_VLAN &&
- params && params->use_4addr == 0) {
+ if (type == NL80211_IFTYPE_AP_VLAN && params->use_4addr == 0) {
RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
ieee80211_check_fast_rx_iface(sdata);
- } else if (type == NL80211_IFTYPE_STATION &&
- params && params->use_4addr >= 0) {
+ } else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) {
sdata->u.mgd.use_4addr = params->use_4addr;
}
@@ -427,7 +425,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
/* Keys without a station are used for TX only */
- if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+ if (sta && test_sta_flag(sta, WLAN_STA_MFP))
key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
break;
case NL80211_IFTYPE_ADHOC:
@@ -792,6 +790,48 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static int ieee80211_set_ftm_responder_params(
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *lci, size_t lci_len,
+ const u8 *civicloc, size_t civicloc_len)
+{
+ struct ieee80211_ftm_responder_params *new, *old;
+ struct ieee80211_bss_conf *bss_conf;
+ u8 *pos;
+ int len;
+
+ if ((!lci || !lci_len) && (!civicloc || !civicloc_len))
+ return 1;
+
+ bss_conf = &sdata->vif.bss_conf;
+ old = bss_conf->ftmr_params;
+ len = lci_len + civicloc_len;
+
+ new = kzalloc(sizeof(*new) + len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ pos = (u8 *)(new + 1);
+ if (lci_len) {
+ new->lci_len = lci_len;
+ new->lci = pos;
+ memcpy(pos, lci, lci_len);
+ pos += lci_len;
+ }
+
+ if (civicloc_len) {
+ new->civicloc_len = civicloc_len;
+ new->civicloc = pos;
+ memcpy(pos, civicloc, civicloc_len);
+ pos += civicloc_len;
+ }
+
+ bss_conf->ftmr_params = new;
+ kfree(old);
+
+ return 0;
+}
+
static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
struct cfg80211_beacon_data *params,
const struct ieee80211_csa_settings *csa)
@@ -865,6 +905,20 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
if (err == 0)
changed |= BSS_CHANGED_AP_PROBE_RESP;
+ if (params->ftm_responder != -1) {
+ sdata->vif.bss_conf.ftm_responder = params->ftm_responder;
+ err = ieee80211_set_ftm_responder_params(sdata,
+ params->lci,
+ params->lci_len,
+ params->civicloc,
+ params->civicloc_len);
+
+ if (err < 0)
+ return err;
+
+ changed |= BSS_CHANGED_FTM_RESPONDER;
+ }
+
rcu_assign_pointer(sdata->u.ap.beacon, new);
if (old)
@@ -911,6 +965,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
+ if (params->he_cap)
+ sdata->vif.bss_conf.he_support = true;
+
mutex_lock(&local->mtx);
err = ieee80211_vif_use_channel(sdata, &params->chandef,
IEEE80211_CHANCTX_SHARED);
@@ -1062,6 +1119,9 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
kfree_rcu(old_probe_resp, rcu_head);
sdata->u.ap.driver_smps_mode = IEEE80211_SMPS_OFF;
+ kfree(sdata->vif.bss_conf.ftmr_params);
+ sdata->vif.bss_conf.ftmr_params = NULL;
+
__sta_info_flush(sdata, true);
ieee80211_free_keys(sdata, true);
@@ -1092,50 +1152,6 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
return 0;
}
-/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */
-struct iapp_layer2_update {
- u8 da[ETH_ALEN]; /* broadcast */
- u8 sa[ETH_ALEN]; /* STA addr */
- __be16 len; /* 6 */
- u8 dsap; /* 0 */
- u8 ssap; /* 0 */
- u8 control;
- u8 xid_info[3];
-} __packed;
-
-static void ieee80211_send_layer2_update(struct sta_info *sta)
-{
- struct iapp_layer2_update *msg;
- struct sk_buff *skb;
-
- /* Send Level 2 Update Frame to update forwarding tables in layer 2
- * bridge devices */
-
- skb = dev_alloc_skb(sizeof(*msg));
- if (!skb)
- return;
- msg = skb_put(skb, sizeof(*msg));
-
- /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
- * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
-
- eth_broadcast_addr(msg->da);
- memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
- msg->len = htons(6);
- msg->dsap = 0;
- msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */
- msg->control = 0xaf; /* XID response lsb.1111F101.
- * F=0 (no poll command; unsolicited frame) */
- msg->xid_info[0] = 0x81; /* XID format identifier */
- msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */
- msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */
-
- skb->dev = sta->sdata->dev;
- skb->protocol = eth_type_trans(skb, sta->sdata->dev);
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx_ni(skb);
-}
-
static int sta_apply_auth_flags(struct ieee80211_local *local,
struct sta_info *sta,
u32 mask, u32 set)
@@ -1499,7 +1515,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
}
if (layer2_update)
- ieee80211_send_layer2_update(sta);
+ cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
rcu_read_unlock();
@@ -1601,7 +1617,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
ieee80211_vif_inc_num_mcast(sta->sdata);
- ieee80211_send_layer2_update(sta);
+ cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
}
err = sta_apply_parameters(local, sta, params);
@@ -2918,6 +2934,20 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
pos += beacon->probe_resp_len;
}
+ if (beacon->ftm_responder)
+ new_beacon->ftm_responder = beacon->ftm_responder;
+ if (beacon->lci) {
+ new_beacon->lci_len = beacon->lci_len;
+ new_beacon->lci = pos;
+ memcpy(pos, beacon->lci, beacon->lci_len);
+ pos += beacon->lci_len;
+ }
+ if (beacon->civicloc) {
+ new_beacon->civicloc_len = beacon->civicloc_len;
+ new_beacon->civicloc = pos;
+ memcpy(pos, beacon->civicloc, beacon->civicloc_len);
+ pos += beacon->civicloc_len;
+ }
return new_beacon;
}
@@ -3808,6 +3838,17 @@ out:
return ret;
}
+static int
+ieee80211_get_ftm_responder_stats(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ return drv_get_ftm_responder_stats(local, sdata, ftm_stats);
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -3902,4 +3943,5 @@ const struct cfg80211_ops mac80211_config_ops = {
.set_multicast_to_unicast = ieee80211_set_multicast_to_unicast,
.tx_control_port = ieee80211_tx_control_port,
.get_txq_stats = ieee80211_get_txq_stats,
+ .get_ftm_responder_stats = ieee80211_get_ftm_responder_stats,
};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index b5adf3625d16..3fe541e358f3 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -3,6 +3,7 @@
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* GPLv2
*
@@ -214,6 +215,9 @@ static const char *hw_flag_names[] = {
FLAG(SUPPORTS_TDLS_BUFFER_STA),
FLAG(DEAUTH_NEED_MGD_TX_PREP),
FLAG(DOESNT_SUPPORT_QOS_NDP),
+ FLAG(BUFF_MMPDU_TXQ),
+ FLAG(SUPPORTS_VHT_EXT_NSS_BW),
+ FLAG(STA_MMPDU_TXQ),
#undef FLAG
};
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 4105081dc1df..af5185a836e5 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -4,6 +4,7 @@
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -140,7 +141,7 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
{
struct sta_info *sta = file->private_data;
struct ieee80211_local *local = sta->local;
- size_t bufsz = AQM_TXQ_ENTRY_LEN*(IEEE80211_NUM_TIDS+1);
+ size_t bufsz = AQM_TXQ_ENTRY_LEN * (IEEE80211_NUM_TIDS + 2);
char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
struct txq_info *txqi;
ssize_t rv;
@@ -162,7 +163,9 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
bufsz+buf-p,
"tid ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets flags\n");
- for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
+ for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
+ if (!sta->sta.txq[i])
+ continue;
txqi = to_txq_info(sta->sta.txq[i]);
p += scnprintf(p, bufsz+buf-p,
"%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n",
@@ -487,12 +490,368 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
p += scnprintf(p, sizeof(buf)+buf-p,
"MCS TX highest: %d Mbps\n",
le16_to_cpu(vhtc->vht_mcs.tx_highest));
+#undef PFLAG
}
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
STA_OPS(vht_capa);
+static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf, *p;
+ size_t buf_sz = PAGE_SIZE;
+ struct sta_info *sta = file->private_data;
+ struct ieee80211_sta_he_cap *hec = &sta->sta.he_cap;
+ struct ieee80211_he_mcs_nss_supp *nss = &hec->he_mcs_nss_supp;
+ u8 ppe_size;
+ u8 *cap;
+ int i;
+ ssize_t ret;
+
+ buf = kmalloc(buf_sz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ p = buf;
+
+ p += scnprintf(p, buf_sz + buf - p, "HE %ssupported\n",
+ hec->has_he ? "" : "not ");
+ if (!hec->has_he)
+ goto out;
+
+ cap = hec->he_cap_elem.mac_cap_info;
+ p += scnprintf(p, buf_sz + buf - p,
+ "MAC-CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n",
+ cap[0], cap[1], cap[2], cap[3], cap[4], cap[5]);
+
+#define PRINT(fmt, ...) \
+ p += scnprintf(p, buf_sz + buf - p, "\t\t" fmt "\n", \
+ ##__VA_ARGS__)
+
+#define PFLAG(t, n, a, b) \
+ do { \
+ if (cap[n] & IEEE80211_HE_##t##_CAP##n##_##a) \
+ PRINT("%s", b); \
+ } while (0)
+
+#define PFLAG_RANGE(t, i, n, s, m, off, fmt) \
+ do { \
+ u8 msk = IEEE80211_HE_##t##_CAP##i##_##n##_MASK; \
+ u8 idx = ((cap[i] & msk) >> (ffs(msk) - 1)) + off; \
+ PRINT(fmt, (s << idx) + (m * idx)); \
+ } while (0)
+
+#define PFLAG_RANGE_DEFAULT(t, i, n, s, m, off, fmt, a, b) \
+ do { \
+ if (cap[i] == IEEE80211_HE_##t ##_CAP##i##_##n##_##a) { \
+ PRINT("%s", b); \
+ break; \
+ } \
+ PFLAG_RANGE(t, i, n, s, m, off, fmt); \
+ } while (0)
+
+ PFLAG(MAC, 0, HTC_HE, "HTC-HE");
+ PFLAG(MAC, 0, TWT_REQ, "TWT-REQ");
+ PFLAG(MAC, 0, TWT_RES, "TWT-RES");
+ PFLAG_RANGE_DEFAULT(MAC, 0, DYNAMIC_FRAG, 0, 1, 0,
+ "DYNAMIC-FRAG-LEVEL-%d", NOT_SUPP, "NOT-SUPP");
+ PFLAG_RANGE_DEFAULT(MAC, 0, MAX_NUM_FRAG_MSDU, 1, 0, 0,
+ "MAX-NUM-FRAG-MSDU-%d", UNLIMITED, "UNLIMITED");
+
+ PFLAG_RANGE_DEFAULT(MAC, 1, MIN_FRAG_SIZE, 128, 0, -1,
+ "MIN-FRAG-SIZE-%d", UNLIMITED, "UNLIMITED");
+ PFLAG_RANGE_DEFAULT(MAC, 1, TF_MAC_PAD_DUR, 0, 8, 0,
+ "TF-MAC-PAD-DUR-%dUS", MASK, "UNKNOWN");
+ PFLAG_RANGE(MAC, 1, MULTI_TID_AGG_RX_QOS, 0, 1, 1,
+ "MULTI-TID-AGG-RX-QOS-%d");
+
+ if (cap[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) {
+ switch (((cap[2] << 1) | (cap[1] >> 7)) & 0x3) {
+ case 0:
+ PRINT("LINK-ADAPTATION-NO-FEEDBACK");
+ break;
+ case 1:
+ PRINT("LINK-ADAPTATION-RESERVED");
+ break;
+ case 2:
+ PRINT("LINK-ADAPTATION-UNSOLICITED-FEEDBACK");
+ break;
+ case 3:
+ PRINT("LINK-ADAPTATION-BOTH");
+ break;
+ }
+ }
+
+ PFLAG(MAC, 2, ALL_ACK, "ALL-ACK");
+ PFLAG(MAC, 2, TRS, "TRS");
+ PFLAG(MAC, 2, BSR, "BSR");
+ PFLAG(MAC, 2, BCAST_TWT, "BCAST-TWT");
+ PFLAG(MAC, 2, 32BIT_BA_BITMAP, "32BIT-BA-BITMAP");
+ PFLAG(MAC, 2, MU_CASCADING, "MU-CASCADING");
+ PFLAG(MAC, 2, ACK_EN, "ACK-EN");
+
+ PFLAG(MAC, 3, OMI_CONTROL, "OMI-CONTROL");
+ PFLAG(MAC, 3, OFDMA_RA, "OFDMA-RA");
+
+ switch (cap[3] & IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) {
+ case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT:
+ PRINT("MAX-AMPDU-LEN-EXP-USE-VHT");
+ break;
+ case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1:
+ PRINT("MAX-AMPDU-LEN-EXP-VHT-1");
+ break;
+ case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2:
+ PRINT("MAX-AMPDU-LEN-EXP-VHT-2");
+ break;
+ case IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED:
+ PRINT("MAX-AMPDU-LEN-EXP-RESERVED");
+ break;
+ }
+
+ PFLAG(MAC, 3, AMSDU_FRAG, "AMSDU-FRAG");
+ PFLAG(MAC, 3, FLEX_TWT_SCHED, "FLEX-TWT-SCHED");
+ PFLAG(MAC, 3, RX_CTRL_FRAME_TO_MULTIBSS, "RX-CTRL-FRAME-TO-MULTIBSS");
+
+ PFLAG(MAC, 4, BSRP_BQRP_A_MPDU_AGG, "BSRP-BQRP-A-MPDU-AGG");
+ PFLAG(MAC, 4, QTP, "QTP");
+ PFLAG(MAC, 4, BQR, "BQR");
+ PFLAG(MAC, 4, SRP_RESP, "SRP-RESP");
+ PFLAG(MAC, 4, NDP_FB_REP, "NDP-FB-REP");
+ PFLAG(MAC, 4, OPS, "OPS");
+ PFLAG(MAC, 4, AMDSU_IN_AMPDU, "AMSDU-IN-AMPDU");
+
+ PRINT("MULTI-TID-AGG-TX-QOS-%d", ((cap[5] << 1) | (cap[4] >> 7)) & 0x7);
+
+ PFLAG(MAC, 5, SUBCHAN_SELECVITE_TRANSMISSION,
+ "SUBCHAN-SELECVITE-TRANSMISSION");
+ PFLAG(MAC, 5, UL_2x996_TONE_RU, "UL-2x996-TONE-RU");
+ PFLAG(MAC, 5, OM_CTRL_UL_MU_DATA_DIS_RX, "OM-CTRL-UL-MU-DATA-DIS-RX");
+
+ cap = hec->he_cap_elem.phy_cap_info;
+ p += scnprintf(p, buf_sz + buf - p,
+ "PHY CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n",
+ cap[0], cap[1], cap[2], cap[3], cap[4], cap[5], cap[6],
+ cap[7], cap[8], cap[9], cap[10]);
+
+ PFLAG(PHY, 0, CHANNEL_WIDTH_SET_40MHZ_IN_2G,
+ "CHANNEL-WIDTH-SET-40MHZ-IN-2G");
+ PFLAG(PHY, 0, CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G,
+ "CHANNEL-WIDTH-SET-40MHZ-80MHZ-IN-5G");
+ PFLAG(PHY, 0, CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+ "CHANNEL-WIDTH-SET-160MHZ-IN-5G");
+ PFLAG(PHY, 0, CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ "CHANNEL-WIDTH-SET-80PLUS80-MHZ-IN-5G");
+ PFLAG(PHY, 0, CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G,
+ "CHANNEL-WIDTH-SET-RU-MAPPING-IN-2G");
+ PFLAG(PHY, 0, CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G,
+ "CHANNEL-WIDTH-SET-RU-MAPPING-IN-5G");
+
+ switch (cap[1] & IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK) {
+ case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ:
+ PRINT("PREAMBLE-PUNC-RX-80MHZ-ONLY-SECOND-20MHZ");
+ break;
+ case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ:
+ PRINT("PREAMBLE-PUNC-RX-80MHZ-ONLY-SECOND-40MHZ");
+ break;
+ case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ:
+ PRINT("PREAMBLE-PUNC-RX-160MHZ-ONLY-SECOND-20MHZ");
+ break;
+ case IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ:
+ PRINT("PREAMBLE-PUNC-RX-160MHZ-ONLY-SECOND-40MHZ");
+ break;
+ }
+
+ PFLAG(PHY, 1, DEVICE_CLASS_A,
+ "IEEE80211-HE-PHY-CAP1-DEVICE-CLASS-A");
+ PFLAG(PHY, 1, LDPC_CODING_IN_PAYLOAD,
+ "LDPC-CODING-IN-PAYLOAD");
+ PFLAG(PHY, 1, HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US,
+ "HY-CAP1-HE-LTF-AND-GI-FOR-HE-PPDUS-0-8US");
+ PRINT("MIDAMBLE-RX-MAX-NSTS-%d", ((cap[2] << 1) | (cap[1] >> 7)) & 0x3);
+
+ PFLAG(PHY, 2, NDP_4x_LTF_AND_3_2US, "NDP-4X-LTF-AND-3-2US");
+ PFLAG(PHY, 2, STBC_TX_UNDER_80MHZ, "STBC-TX-UNDER-80MHZ");
+ PFLAG(PHY, 2, STBC_RX_UNDER_80MHZ, "STBC-RX-UNDER-80MHZ");
+ PFLAG(PHY, 2, DOPPLER_TX, "DOPPLER-TX");
+ PFLAG(PHY, 2, DOPPLER_RX, "DOPPLER-RX");
+ PFLAG(PHY, 2, UL_MU_FULL_MU_MIMO, "UL-MU-FULL-MU-MIMO");
+ PFLAG(PHY, 2, UL_MU_PARTIAL_MU_MIMO, "UL-MU-PARTIAL-MU-MIMO");
+
+ switch (cap[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK) {
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM:
+ PRINT("DCM-MAX-CONST-TX-NO-DCM");
+ break;
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK:
+ PRINT("DCM-MAX-CONST-TX-BPSK");
+ break;
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK:
+ PRINT("DCM-MAX-CONST-TX-QPSK");
+ break;
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM:
+ PRINT("DCM-MAX-CONST-TX-16-QAM");
+ break;
+ }
+
+ PFLAG(PHY, 3, DCM_MAX_TX_NSS_1, "DCM-MAX-TX-NSS-1");
+ PFLAG(PHY, 3, DCM_MAX_TX_NSS_2, "DCM-MAX-TX-NSS-2");
+
+ switch (cap[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK) {
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM:
+ PRINT("DCM-MAX-CONST-RX-NO-DCM");
+ break;
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK:
+ PRINT("DCM-MAX-CONST-RX-BPSK");
+ break;
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK:
+ PRINT("DCM-MAX-CONST-RX-QPSK");
+ break;
+ case IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM:
+ PRINT("DCM-MAX-CONST-RX-16-QAM");
+ break;
+ }
+
+ PFLAG(PHY, 3, DCM_MAX_RX_NSS_1, "DCM-MAX-RX-NSS-1");
+ PFLAG(PHY, 3, DCM_MAX_RX_NSS_2, "DCM-MAX-RX-NSS-2");
+ PFLAG(PHY, 3, RX_HE_MU_PPDU_FROM_NON_AP_STA,
+ "RX-HE-MU-PPDU-FROM-NON-AP-STA");
+ PFLAG(PHY, 3, SU_BEAMFORMER, "SU-BEAMFORMER");
+
+ PFLAG(PHY, 4, SU_BEAMFORMEE, "SU-BEAMFORMEE");
+ PFLAG(PHY, 4, MU_BEAMFORMER, "MU-BEAMFORMER");
+
+ PFLAG_RANGE(PHY, 4, BEAMFORMEE_MAX_STS_UNDER_80MHZ, 0, 1, 4,
+ "BEAMFORMEE-MAX-STS-UNDER-%d");
+ PFLAG_RANGE(PHY, 4, BEAMFORMEE_MAX_STS_ABOVE_80MHZ, 0, 1, 4,
+ "BEAMFORMEE-MAX-STS-ABOVE-%d");
+
+ PFLAG_RANGE(PHY, 5, BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ, 0, 1, 1,
+ "NUM-SND-DIM-UNDER-80MHZ-%d");
+ PFLAG_RANGE(PHY, 5, BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ, 0, 1, 1,
+ "NUM-SND-DIM-ABOVE-80MHZ-%d");
+ PFLAG(PHY, 5, NG16_SU_FEEDBACK, "NG16-SU-FEEDBACK");
+ PFLAG(PHY, 5, NG16_MU_FEEDBACK, "NG16-MU-FEEDBACK");
+
+ PFLAG(PHY, 6, CODEBOOK_SIZE_42_SU, "CODEBOOK-SIZE-42-SU");
+ PFLAG(PHY, 6, CODEBOOK_SIZE_75_MU, "CODEBOOK-SIZE-75-MU");
+ PFLAG(PHY, 6, TRIG_SU_BEAMFORMER_FB, "TRIG-SU-BEAMFORMER-FB");
+ PFLAG(PHY, 6, TRIG_MU_BEAMFORMER_FB, "TRIG-MU-BEAMFORMER-FB");
+ PFLAG(PHY, 6, TRIG_CQI_FB, "TRIG-CQI-FB");
+ PFLAG(PHY, 6, PARTIAL_BW_EXT_RANGE, "PARTIAL-BW-EXT-RANGE");
+ PFLAG(PHY, 6, PARTIAL_BANDWIDTH_DL_MUMIMO,
+ "PARTIAL-BANDWIDTH-DL-MUMIMO");
+ PFLAG(PHY, 6, PPE_THRESHOLD_PRESENT, "PPE-THRESHOLD-PRESENT");
+
+ PFLAG(PHY, 7, SRP_BASED_SR, "SRP-BASED-SR");
+ PFLAG(PHY, 7, POWER_BOOST_FACTOR_AR, "POWER-BOOST-FACTOR-AR");
+ PFLAG(PHY, 7, HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
+ "HE-SU-MU-PPDU-4XLTF-AND-08-US-GI");
+ PFLAG_RANGE(PHY, 7, MAX_NC, 0, 1, 1, "MAX-NC-%d");
+ PFLAG(PHY, 7, STBC_TX_ABOVE_80MHZ, "STBC-TX-ABOVE-80MHZ");
+ PFLAG(PHY, 7, STBC_RX_ABOVE_80MHZ, "STBC-RX-ABOVE-80MHZ");
+
+ PFLAG(PHY, 8, HE_ER_SU_PPDU_4XLTF_AND_08_US_GI,
+ "HE-ER-SU-PPDU-4XLTF-AND-08-US-GI");
+ PFLAG(PHY, 8, 20MHZ_IN_40MHZ_HE_PPDU_IN_2G,
+ "20MHZ-IN-40MHZ-HE-PPDU-IN-2G");
+ PFLAG(PHY, 8, 20MHZ_IN_160MHZ_HE_PPDU, "20MHZ-IN-160MHZ-HE-PPDU");
+ PFLAG(PHY, 8, 80MHZ_IN_160MHZ_HE_PPDU, "80MHZ-IN-160MHZ-HE-PPDU");
+ PFLAG(PHY, 8, HE_ER_SU_1XLTF_AND_08_US_GI,
+ "HE-ER-SU-1XLTF-AND-08-US-GI");
+ PFLAG(PHY, 8, MIDAMBLE_RX_TX_2X_AND_1XLTF,
+ "MIDAMBLE-RX-TX-2X-AND-1XLTF");
+
+ switch (cap[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_MASK) {
+ case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_20MHZ:
+ PRINT("DDCM-MAX-BW-20MHZ");
+ break;
+ case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_40MHZ:
+ PRINT("DCM-MAX-BW-40MHZ");
+ break;
+ case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_80MHZ:
+ PRINT("DCM-MAX-BW-80MHZ");
+ break;
+ case IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ:
+ PRINT("DCM-MAX-BW-160-OR-80P80-MHZ");
+ break;
+ }
+
+ PFLAG(PHY, 9, LONGER_THAN_16_SIGB_OFDM_SYM,
+ "LONGER-THAN-16-SIGB-OFDM-SYM");
+ PFLAG(PHY, 9, NON_TRIGGERED_CQI_FEEDBACK,
+ "NON-TRIGGERED-CQI-FEEDBACK");
+ PFLAG(PHY, 9, TX_1024_QAM_LESS_THAN_242_TONE_RU,
+ "TX-1024-QAM-LESS-THAN-242-TONE-RU");
+ PFLAG(PHY, 9, RX_1024_QAM_LESS_THAN_242_TONE_RU,
+ "RX-1024-QAM-LESS-THAN-242-TONE-RU");
+ PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB,
+ "RX-FULL-BW-SU-USING-MU-WITH-COMP-SIGB");
+ PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB,
+ "RX-FULL-BW-SU-USING-MU-WITH-NON-COMP-SIGB");
+
+#undef PFLAG_RANGE_DEFAULT
+#undef PFLAG_RANGE
+#undef PFLAG
+
+#define PRINT_NSS_SUPP(f, n) \
+ do { \
+ int i; \
+ u16 v = le16_to_cpu(nss->f); \
+ p += scnprintf(p, buf_sz + buf - p, n ": %#.4x\n", v); \
+ for (i = 0; i < 8; i += 2) { \
+ switch ((v >> i) & 0x3) { \
+ case 0: \
+ PRINT(n "-%d-SUPPORT-0-7", i / 2); \
+ break; \
+ case 1: \
+ PRINT(n "-%d-SUPPORT-0-9", i / 2); \
+ break; \
+ case 2: \
+ PRINT(n "-%d-SUPPORT-0-11", i / 2); \
+ break; \
+ case 3: \
+ PRINT(n "-%d-NOT-SUPPORTED", i / 2); \
+ break; \
+ } \
+ } \
+ } while (0)
+
+ PRINT_NSS_SUPP(rx_mcs_80, "RX-MCS-80");
+ PRINT_NSS_SUPP(tx_mcs_80, "TX-MCS-80");
+
+ if (cap[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) {
+ PRINT_NSS_SUPP(rx_mcs_160, "RX-MCS-160");
+ PRINT_NSS_SUPP(tx_mcs_160, "TX-MCS-160");
+ }
+
+ if (cap[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ PRINT_NSS_SUPP(rx_mcs_80p80, "RX-MCS-80P80");
+ PRINT_NSS_SUPP(tx_mcs_80p80, "TX-MCS-80P80");
+ }
+
+#undef PRINT_NSS_SUPP
+#undef PRINT
+
+ if (!(cap[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT))
+ goto out;
+
+ p += scnprintf(p, buf_sz + buf - p, "PPE-THRESHOLDS: %#.2x",
+ hec->ppe_thres[0]);
+
+ ppe_size = ieee80211_he_ppe_size(hec->ppe_thres[0], cap);
+ for (i = 1; i < ppe_size; i++) {
+ p += scnprintf(p, buf_sz + buf - p, " %#.2x",
+ hec->ppe_thres[i]);
+ }
+ p += scnprintf(p, buf_sz + buf - p, "\n");
+
+out:
+ ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ kfree(buf);
+ return ret;
+}
+STA_OPS(he_capa);
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, \
@@ -538,6 +897,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD(agg_status);
DEBUGFS_ADD(ht_capa);
DEBUGFS_ADD(vht_capa);
+ DEBUGFS_ADD(he_capa);
DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates);
DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 8f6998091d26..0b1747a2313d 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1173,6 +1173,32 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
local->ops->wake_tx_queue(&local->hw, &txq->txq);
}
+static inline int drv_can_aggregate_in_amsdu(struct ieee80211_local *local,
+ struct sk_buff *head,
+ struct sk_buff *skb)
+{
+ if (!local->ops->can_aggregate_in_amsdu)
+ return true;
+
+ return local->ops->can_aggregate_in_amsdu(&local->hw, head, skb);
+}
+
+static inline int
+drv_get_ftm_responder_stats(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+{
+ u32 ret = -EOPNOTSUPP;
+
+ if (local->ops->get_ftm_responder_stats)
+ ret = local->ops->get_ftm_responder_stats(&local->hw,
+ &sdata->vif,
+ ftm_stats);
+ trace_drv_get_ftm_responder_stats(local, sdata, ftm_stats);
+
+ return ret;
+}
+
static inline int drv_start_nan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct cfg80211_nan_conf *conf)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index f0f5fedb8caa..0d704e8d7078 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -1070,7 +1070,9 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
struct ieee80211_vht_cap cap_ie;
struct ieee80211_sta_vht_cap cap = sta->sta.vht_cap;
- ieee80211_chandef_vht_oper(elems->vht_operation,
+ ieee80211_chandef_vht_oper(&local->hw,
+ elems->vht_operation,
+ elems->ht_operation,
&chandef);
memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 172aeae21ae9..10a05062e4a0 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -377,6 +377,7 @@ struct ieee80211_mgd_auth_data {
u8 key[WLAN_KEY_LEN_WEP104];
u8 key_len, key_idx;
bool done;
+ bool peer_confirmed;
bool timeout_started;
u16 sae_trans, sae_status;
@@ -818,6 +819,7 @@ enum txq_info_flags {
IEEE80211_TXQ_STOP,
IEEE80211_TXQ_AMPDU,
IEEE80211_TXQ_NO_AMSDU,
+ IEEE80211_TXQ_STOP_NETIF_TX,
};
/**
@@ -1198,6 +1200,9 @@ struct ieee80211_local {
/* number of RX chains the hardware has */
u8 rx_chains;
+ /* bitmap of which sbands were copied */
+ u8 sband_allocated;
+
int tx_headroom; /* required headroom for hardware/radiotap */
/* Tasklet and skb queue to process calls from IRQ mode. All frames
@@ -1226,6 +1231,7 @@ struct ieee80211_local {
struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
struct tasklet_struct tx_pending_tasklet;
+ struct tasklet_struct wake_txqs_tasklet;
atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES];
@@ -2038,6 +2044,7 @@ void ieee80211_txq_remove_vlan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
void ieee80211_fill_txq_stats(struct cfg80211_txq_stats *txqstats,
struct txq_info *txqi);
+void ieee80211_wake_txqs(unsigned long data);
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg, u16 status,
const u8 *extra, size_t extra_len, const u8 *bssid,
@@ -2106,7 +2113,9 @@ u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
/* channel management */
bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
struct cfg80211_chan_def *chandef);
-bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper,
+bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
+ const struct ieee80211_vht_operation *oper,
+ const struct ieee80211_ht_operation *htop,
struct cfg80211_chan_def *chandef);
u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5e6cf2cee965..5836ddeac9e3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1756,7 +1756,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
if (local->ops->wake_tx_queue &&
type != NL80211_IFTYPE_AP_VLAN &&
- type != NL80211_IFTYPE_MONITOR)
+ (type != NL80211_IFTYPE_MONITOR ||
+ (params->flags & MONITOR_FLAG_ACTIVE)))
txq_size += sizeof(struct txq_info) +
local->hw.txq_data_size;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index c054ac85793c..4700718e010f 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -248,6 +248,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
(key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
increment_tailroom_need_count(sdata);
+ key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
ret = drv_set_key(key->local, DISABLE_KEY, sdata,
sta ? &sta->sta : NULL, &key->conf);
@@ -256,8 +257,65 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
"failed to remove key (%d, %pM) from hardware (%d)\n",
key->conf.keyidx,
sta ? sta->sta.addr : bcast_addr, ret);
+}
- key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
+static int ieee80211_hw_key_replace(struct ieee80211_key *old_key,
+ struct ieee80211_key *new_key,
+ bool ptk0rekey)
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_local *local;
+ struct sta_info *sta;
+ int ret;
+
+ /* Aggregation sessions are OK when running on SW crypto.
+ * A broken remote STA may cause issues not observed with HW
+ * crypto, though.
+ */
+ if (!(old_key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
+ return 0;
+
+ assert_key_lock(old_key->local);
+ sta = old_key->sta;
+
+ /* PTK only using key ID 0 needs special handling on rekey */
+ if (new_key && sta && ptk0rekey) {
+ local = old_key->local;
+ sdata = old_key->sdata;
+
+ /* Stop TX till we are on the new key */
+ old_key->flags |= KEY_FLAG_TAINTED;
+ ieee80211_clear_fast_xmit(sta);
+
+ /* Aggregation sessions during rekey are complicated due to the
+ * reorder buffer and retransmits. Side step that by blocking
+ * aggregation during rekey and tear down running sessions.
+ */
+ if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) {
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
+ ieee80211_sta_tear_down_BA_sessions(sta,
+ AGG_STOP_LOCAL_REQUEST);
+ }
+
+ if (!wiphy_ext_feature_isset(local->hw.wiphy,
+ NL80211_EXT_FEATURE_CAN_REPLACE_PTK0)) {
+ pr_warn_ratelimited("Rekeying PTK for STA %pM but driver can't safely do that.",
+ sta->sta.addr);
+ /* Flushing the driver queues *may* help prevent
+ * the clear text leaks and freezes.
+ */
+ ieee80211_flush_queues(local, sdata, false);
+ }
+ }
+
+ ieee80211_key_disable_hw_accel(old_key);
+
+ if (new_key)
+ ret = ieee80211_key_enable_hw_accel(new_key);
+ else
+ ret = 0;
+
+ return ret;
}
static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
@@ -316,38 +374,57 @@ void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
}
-static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
+static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
bool pairwise,
struct ieee80211_key *old,
struct ieee80211_key *new)
{
int idx;
+ int ret;
bool defunikey, defmultikey, defmgmtkey;
/* caller must provide at least one old/new */
if (WARN_ON(!new && !old))
- return;
+ return 0;
if (new)
list_add_tail_rcu(&new->list, &sdata->key_list);
WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx);
- if (old)
+ if (old) {
idx = old->conf.keyidx;
- else
+ /* TODO: proper implement and test "Extended Key ID for
+ * Individually Addressed Frames" from IEEE 802.11-2016.
+ * Till then always assume only key ID 0 is used for
+ * pairwise keys.*/
+ ret = ieee80211_hw_key_replace(old, new, pairwise);
+ } else {
+ /* new must be provided in case old is not */
idx = new->conf.keyidx;
+ if (!new->local->wowlan)
+ ret = ieee80211_key_enable_hw_accel(new);
+ else
+ ret = 0;
+ }
+
+ if (ret)
+ return ret;
if (sta) {
if (pairwise) {
rcu_assign_pointer(sta->ptk[idx], new);
sta->ptk_idx = idx;
- ieee80211_check_fast_xmit(sta);
+ if (new) {
+ clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
+ ieee80211_check_fast_xmit(sta);
+ }
} else {
rcu_assign_pointer(sta->gtk[idx], new);
}
- ieee80211_check_fast_rx(sta);
+ if (new)
+ ieee80211_check_fast_rx(sta);
} else {
defunikey = old &&
old == key_mtx_dereference(sdata->local,
@@ -380,6 +457,8 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
if (old)
list_del_rcu(&old->list);
+
+ return 0;
}
struct ieee80211_key *
@@ -575,9 +654,6 @@ static void ieee80211_key_free_common(struct ieee80211_key *key)
static void __ieee80211_key_destroy(struct ieee80211_key *key,
bool delay_tailroom)
{
- if (key->local)
- ieee80211_key_disable_hw_accel(key);
-
if (key->local) {
struct ieee80211_sub_if_data *sdata = key->sdata;
@@ -654,7 +730,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
- struct ieee80211_local *local = sdata->local;
struct ieee80211_key *old_key;
int idx = key->conf.keyidx;
bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
@@ -691,17 +766,13 @@ int ieee80211_key_link(struct ieee80211_key *key,
increment_tailroom_need_count(sdata);
- ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
- ieee80211_key_destroy(old_key, delay_tailroom);
-
- ieee80211_debugfs_key_add(key);
+ ret = ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
- if (!local->wowlan) {
- ret = ieee80211_key_enable_hw_accel(key);
- if (ret)
- ieee80211_key_free(key, delay_tailroom);
+ if (!ret) {
+ ieee80211_debugfs_key_add(key);
+ ieee80211_key_destroy(old_key, delay_tailroom);
} else {
- ret = 0;
+ ieee80211_key_free(key, delay_tailroom);
}
out:
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 513627896204..83e71e6b2ebe 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -4,6 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -610,6 +611,18 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
local->ops = ops;
local->use_chanctx = use_chanctx;
+ /*
+ * We need a bit of data queued to build aggregates properly, so
+ * instruct the TCP stack to allow more than a single ms of data
+ * to be queued in the stack. The value is a bit-shift of 1
+ * second, so 8 is ~4ms of queued data. Only affects local TCP
+ * sockets.
+ * This is the default, anyhow - drivers may need to override it
+ * for local reasons (longer buffers, longer completion time, or
+ * similar).
+ */
+ local->hw.tx_sk_pacing_shift = 8;
+
/* set up some defaults */
local->hw.queues = 1;
local->hw.max_rates = 1;
@@ -684,6 +697,10 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
(unsigned long)local);
+ if (ops->wake_tx_queue)
+ tasklet_init(&local->wake_txqs_tasklet, ieee80211_wake_txqs,
+ (unsigned long)local);
+
tasklet_init(&local->tasklet,
ieee80211_tasklet_handler,
(unsigned long) local);
@@ -1154,6 +1171,53 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
goto fail_rate;
}
+ if (local->rate_ctrl) {
+ clear_bit(IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, hw->flags);
+ if (local->rate_ctrl->ops->capa & RATE_CTRL_CAPA_VHT_EXT_NSS_BW)
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ }
+
+ /*
+ * If the VHT capabilities don't have IEEE80211_VHT_EXT_NSS_BW_CAPABLE,
+ * or have it when we don't, copy the sband structure and set/clear it.
+ * This is necessary because rate scaling algorithms could be switched
+ * and have different support values.
+ * Print a message so that in the common case the reallocation can be
+ * avoided.
+ */
+ BUILD_BUG_ON(NUM_NL80211_BANDS > 8 * sizeof(local->sband_allocated));
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband;
+ bool local_cap, ie_cap;
+
+ local_cap = ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW);
+
+ sband = local->hw.wiphy->bands[band];
+ if (!sband || !sband->vht_cap.vht_supported)
+ continue;
+
+ ie_cap = !!(sband->vht_cap.vht_mcs.tx_highest &
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE));
+
+ if (local_cap == ie_cap)
+ continue;
+
+ sband = kmemdup(sband, sizeof(*sband), GFP_KERNEL);
+ if (!sband) {
+ result = -ENOMEM;
+ goto fail_rate;
+ }
+
+ wiphy_dbg(hw->wiphy, "copying sband (band %d) due to VHT EXT NSS BW flag\n",
+ band);
+
+ sband->vht_cap.vht_mcs.tx_highest ^=
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
+
+ local->hw.wiphy->bands[band] = sband;
+ local->sband_allocated |= BIT(band);
+ }
+
/* add one default STA interface if supported */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) &&
!ieee80211_hw_check(hw, NO_AUTO_VIF)) {
@@ -1272,6 +1336,7 @@ static int ieee80211_free_ack_frame(int id, void *p, void *data)
void ieee80211_free_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
+ enum nl80211_band band;
mutex_destroy(&local->iflist_mtx);
mutex_destroy(&local->mtx);
@@ -1287,6 +1352,12 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
ieee80211_free_led_names(local);
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!(local->sband_allocated & BIT(band)))
+ continue;
+ kfree(local->hw.wiphy->bands[band]);
+ }
+
wiphy_free(local->hw.wiphy);
}
EXPORT_SYMBOL(ieee80211_free_hw);
@@ -1304,18 +1375,12 @@ static int __init ieee80211_init(void)
if (ret)
return ret;
- ret = rc80211_minstrel_ht_init();
- if (ret)
- goto err_minstrel;
-
ret = ieee80211_iface_init();
if (ret)
goto err_netdev;
return 0;
err_netdev:
- rc80211_minstrel_ht_exit();
- err_minstrel:
rc80211_minstrel_exit();
return ret;
@@ -1323,7 +1388,6 @@ static int __init ieee80211_init(void)
static void __exit ieee80211_exit(void)
{
- rc80211_minstrel_ht_exit();
rc80211_minstrel_exit();
ieee80211s_stop();
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index d51da26e9c18..8bad414c52ad 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
+ * Copyright (C) 2018 Intel Corporation
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*
@@ -98,7 +99,9 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan,
NL80211_CHAN_NO_HT);
ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def);
- ieee80211_chandef_vht_oper(ie->vht_operation, &sta_chan_def);
+ ieee80211_chandef_vht_oper(&sdata->local->hw,
+ ie->vht_operation, ie->ht_operation,
+ &sta_chan_def);
if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
&sta_chan_def))
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index ee56f18cad3f..21526630bf65 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -217,7 +217,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
void ieee80211s_init(void);
void ieee80211s_update_metric(struct ieee80211_local *local,
- struct sta_info *sta, struct sk_buff *skb);
+ struct sta_info *sta,
+ struct ieee80211_tx_status *st);
void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index daf9db3c8f24..6950cd0bf594 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -295,15 +295,12 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
}
void ieee80211s_update_metric(struct ieee80211_local *local,
- struct sta_info *sta, struct sk_buff *skb)
+ struct sta_info *sta,
+ struct ieee80211_tx_status *st)
{
- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_tx_info *txinfo = st->info;
int failed;
- if (!ieee80211_is_data(hdr->frame_control))
- return;
-
failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
/* moving average, scaled to 100.
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 3dbecae4be73..d2bc8d57c87e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -220,7 +220,8 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
memcpy(&he_oper_vht_cap, he_oper->optional, 3);
he_oper_vht_cap.basic_mcs_set = cpu_to_le16(0);
- if (!ieee80211_chandef_vht_oper(&he_oper_vht_cap,
+ if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
+ &he_oper_vht_cap, ht_oper,
&vht_chandef)) {
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
sdata_info(sdata,
@@ -228,7 +229,8 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
ret = IEEE80211_STA_DISABLE_HE;
goto out;
}
- } else if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) {
+ } else if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_oper,
+ ht_oper, &vht_chandef)) {
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
"AP VHT information is invalid, disable VHT\n");
@@ -2759,13 +2761,40 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
auth_data->key_idx, tx_flags);
}
+static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct sta_info *sta;
+
+ sdata_info(sdata, "authenticated\n");
+ ifmgd->auth_data->done = true;
+ ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
+ ifmgd->auth_data->timeout_started = true;
+ run_again(sdata, ifmgd->auth_data->timeout);
+
+ /* move station state to auth */
+ mutex_lock(&sdata->local->sta_mtx);
+ sta = sta_info_get(sdata, bssid);
+ if (!sta) {
+ WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid);
+ return false;
+ }
+ if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
+ sdata_info(sdata, "failed moving %pM to auth\n", bssid);
+ return false;
+ }
+ mutex_unlock(&sdata->local->sta_mtx);
+
+ return true;
+}
+
static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 bssid[ETH_ALEN];
u16 auth_alg, auth_transaction, status_code;
- struct sta_info *sta;
struct ieee80211_event event = {
.type = MLME_EVENT,
.u.mlme.data = AUTH_EVENT,
@@ -2789,7 +2818,11 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
status_code = le16_to_cpu(mgmt->u.auth.status_code);
if (auth_alg != ifmgd->auth_data->algorithm ||
- auth_transaction != ifmgd->auth_data->expected_transaction) {
+ (auth_alg != WLAN_AUTH_SAE &&
+ auth_transaction != ifmgd->auth_data->expected_transaction) ||
+ (auth_alg == WLAN_AUTH_SAE &&
+ (auth_transaction < ifmgd->auth_data->expected_transaction ||
+ auth_transaction > 2))) {
sdata_info(sdata, "%pM unexpected authentication state: alg %d (expected %d) transact %d (expected %d)\n",
mgmt->sa, auth_alg, ifmgd->auth_data->algorithm,
auth_transaction,
@@ -2832,35 +2865,17 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
event.u.mlme.status = MLME_SUCCESS;
drv_event_callback(sdata->local, sdata, &event);
- sdata_info(sdata, "authenticated\n");
- ifmgd->auth_data->done = true;
- ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
- ifmgd->auth_data->timeout_started = true;
- run_again(sdata, ifmgd->auth_data->timeout);
-
- if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
- ifmgd->auth_data->expected_transaction != 2) {
- /*
- * Report auth frame to user space for processing since another
- * round of Authentication frames is still needed.
- */
- cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
- return;
+ if (ifmgd->auth_data->algorithm != WLAN_AUTH_SAE ||
+ (auth_transaction == 2 &&
+ ifmgd->auth_data->expected_transaction == 2)) {
+ if (!ieee80211_mark_sta_auth(sdata, bssid))
+ goto out_err;
+ } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
+ auth_transaction == 2) {
+ sdata_info(sdata, "SAE peer confirmed\n");
+ ifmgd->auth_data->peer_confirmed = true;
}
- /* move station state to auth */
- mutex_lock(&sdata->local->sta_mtx);
- sta = sta_info_get(sdata, bssid);
- if (!sta) {
- WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid);
- goto out_err;
- }
- if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
- sdata_info(sdata, "failed moving %pM to auth\n", bssid);
- goto out_err;
- }
- mutex_unlock(&sdata->local->sta_mtx);
-
cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
return;
out_err:
@@ -3237,19 +3252,16 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
if (bss_conf->he_support) {
- u32 he_oper_params =
- le32_to_cpu(elems.he_operation->he_oper_params);
+ bss_conf->bss_color =
+ le32_get_bits(elems.he_operation->he_oper_params,
+ IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
- bss_conf->bss_color = he_oper_params &
- IEEE80211_HE_OPERATION_BSS_COLOR_MASK;
bss_conf->htc_trig_based_pkt_ext =
- (he_oper_params &
- IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK) <<
- IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET;
+ le32_get_bits(elems.he_operation->he_oper_params,
+ IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK);
bss_conf->frame_time_rts_th =
- (he_oper_params &
- IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK) <<
- IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET;
+ le32_get_bits(elems.he_operation->he_oper_params,
+ IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK);
bss_conf->multi_sta_back_32bit =
sta->sta.he_cap.he_cap_elem.mac_cap_info[2] &
@@ -4879,6 +4891,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_auth_data *auth_data;
u16 auth_alg;
int err;
+ bool cont_auth;
/* prepare auth data structure */
@@ -4913,6 +4926,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return -EOPNOTSUPP;
}
+ if (ifmgd->assoc_data)
+ return -EBUSY;
+
auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len +
req->ie_len, GFP_KERNEL);
if (!auth_data)
@@ -4932,6 +4948,13 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
auth_data->data_len += req->auth_data_len - 4;
}
+ /* Check if continuing authentication or trying to authenticate with the
+ * same BSS that we were in the process of authenticating with and avoid
+ * removal and re-addition of the STA entry in
+ * ieee80211_prep_connection().
+ */
+ cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss;
+
if (req->ie && req->ie_len) {
memcpy(&auth_data->data[auth_data->data_len],
req->ie, req->ie_len);
@@ -4948,18 +4971,26 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
/* try to authenticate/probe */
- if ((ifmgd->auth_data && !ifmgd->auth_data->done) ||
- ifmgd->assoc_data) {
- err = -EBUSY;
- goto err_free;
+ if (ifmgd->auth_data) {
+ if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE) {
+ auth_data->peer_confirmed =
+ ifmgd->auth_data->peer_confirmed;
+ }
+ ieee80211_destroy_auth_data(sdata, cont_auth);
}
- if (ifmgd->auth_data)
- ieee80211_destroy_auth_data(sdata, false);
-
/* prep auth_data so we don't go into idle on disassoc */
ifmgd->auth_data = auth_data;
+ /* If this is continuation of an ongoing SAE authentication exchange
+ * (i.e., request to send SAE Confirm) and the peer has already
+ * confirmed, mark authentication completed since we are about to send
+ * out SAE Confirm.
+ */
+ if (cont_auth && req->auth_type == NL80211_AUTHTYPE_SAE &&
+ auth_data->peer_confirmed && auth_data->sae_trans == 2)
+ ieee80211_mark_sta_auth(sdata, req->bss->bssid);
+
if (ifmgd->associated) {
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
@@ -4977,7 +5008,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
- err = ieee80211_prep_connection(sdata, req->bss, false, false);
+ err = ieee80211_prep_connection(sdata, req->bss, cont_auth, false);
if (err)
goto err_clear;
@@ -4998,7 +5029,6 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
mutex_lock(&sdata->local->mtx);
ieee80211_vif_release_channel(sdata);
mutex_unlock(&sdata->local->mtx);
- err_free:
kfree(auth_data);
return err;
}
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 8212bfeb71d6..d59198191a79 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -95,18 +95,5 @@ static inline void rc80211_minstrel_exit(void)
}
#endif
-#ifdef CONFIG_MAC80211_RC_MINSTREL_HT
-int rc80211_minstrel_ht_init(void);
-void rc80211_minstrel_ht_exit(void);
-#else
-static inline int rc80211_minstrel_ht_init(void)
-{
- return 0;
-}
-static inline void rc80211_minstrel_ht_exit(void)
-{
-}
-#endif
-
#endif /* IEEE80211_RATE_H */
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 07fb219327d6..a34e9c2ca626 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -167,12 +167,6 @@ minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs)
if (unlikely(!mrs->att_hist)) {
mrs->prob_ewma = cur_prob;
} else {
- /* update exponential weighted moving variance */
- mrs->prob_ewmv = minstrel_ewmv(mrs->prob_ewmv,
- cur_prob,
- mrs->prob_ewma,
- EWMA_LEVEL);
-
/*update exponential weighted moving avarage */
mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma,
cur_prob,
@@ -572,141 +566,6 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
minstrel_update_rates(mp, mi);
}
-static void *
-minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
- struct ieee80211_supported_band *sband;
- struct minstrel_sta_info *mi;
- struct minstrel_priv *mp = priv;
- struct ieee80211_hw *hw = mp->hw;
- int max_rates = 0;
- int i;
-
- mi = kzalloc(sizeof(struct minstrel_sta_info), gfp);
- if (!mi)
- return NULL;
-
- for (i = 0; i < NUM_NL80211_BANDS; i++) {
- sband = hw->wiphy->bands[i];
- if (sband && sband->n_bitrates > max_rates)
- max_rates = sband->n_bitrates;
- }
-
- mi->r = kcalloc(max_rates, sizeof(struct minstrel_rate), gfp);
- if (!mi->r)
- goto error;
-
- mi->sample_table = kmalloc_array(max_rates, SAMPLE_COLUMNS, gfp);
- if (!mi->sample_table)
- goto error1;
-
- mi->last_stats_update = jiffies;
- return mi;
-
-error1:
- kfree(mi->r);
-error:
- kfree(mi);
- return NULL;
-}
-
-static void
-minstrel_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
-{
- struct minstrel_sta_info *mi = priv_sta;
-
- kfree(mi->sample_table);
- kfree(mi->r);
- kfree(mi);
-}
-
-static void
-minstrel_init_cck_rates(struct minstrel_priv *mp)
-{
- static const int bitrates[4] = { 10, 20, 55, 110 };
- struct ieee80211_supported_band *sband;
- u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
- int i, j;
-
- sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ];
- if (!sband)
- return;
-
- for (i = 0, j = 0; i < sband->n_bitrates; i++) {
- struct ieee80211_rate *rate = &sband->bitrates[i];
-
- if (rate->flags & IEEE80211_RATE_ERP_G)
- continue;
-
- if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
- continue;
-
- for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
- if (rate->bitrate != bitrates[j])
- continue;
-
- mp->cck_rates[j] = i;
- break;
- }
- }
-}
-
-static void *
-minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- struct minstrel_priv *mp;
-
- mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
- if (!mp)
- return NULL;
-
- /* contention window settings
- * Just an approximation. Using the per-queue values would complicate
- * the calculations and is probably unnecessary */
- mp->cw_min = 15;
- mp->cw_max = 1023;
-
- /* number of packets (in %) to use for sampling other rates
- * sample less often for non-mrr packets, because the overhead
- * is much higher than with mrr */
- mp->lookaround_rate = 5;
- mp->lookaround_rate_mrr = 10;
-
- /* maximum time that the hw is allowed to stay in one MRR segment */
- mp->segment_size = 6000;
-
- if (hw->max_rate_tries > 0)
- mp->max_retry = hw->max_rate_tries;
- else
- /* safe default, does not necessarily have to match hw properties */
- mp->max_retry = 7;
-
- if (hw->max_rates >= 4)
- mp->has_mrr = true;
-
- mp->hw = hw;
- mp->update_interval = 100;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- mp->fixed_rate_idx = (u32) -1;
- mp->dbg_fixed_rate = debugfs_create_u32("fixed_rate_idx",
- 0666, debugfsdir, &mp->fixed_rate_idx);
-#endif
-
- minstrel_init_cck_rates(mp);
-
- return mp;
-}
-
-static void
-minstrel_free(void *priv)
-{
-#ifdef CONFIG_MAC80211_DEBUGFS
- debugfs_remove(((struct minstrel_priv *)priv)->dbg_fixed_rate);
-#endif
- kfree(priv);
-}
-
static u32 minstrel_get_expected_throughput(void *priv_sta)
{
struct minstrel_sta_info *mi = priv_sta;
@@ -725,29 +584,8 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
}
const struct rate_control_ops mac80211_minstrel = {
- .name = "minstrel",
.tx_status_ext = minstrel_tx_status,
.get_rate = minstrel_get_rate,
.rate_init = minstrel_rate_init,
- .alloc = minstrel_alloc,
- .free = minstrel_free,
- .alloc_sta = minstrel_alloc_sta,
- .free_sta = minstrel_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = minstrel_add_sta_debugfs,
- .remove_sta_debugfs = minstrel_remove_sta_debugfs,
-#endif
.get_expected_throughput = minstrel_get_expected_throughput,
};
-
-int __init
-rc80211_minstrel_init(void)
-{
- return ieee80211_rate_control_register(&mac80211_minstrel);
-}
-
-void
-rc80211_minstrel_exit(void)
-{
- ieee80211_rate_control_unregister(&mac80211_minstrel);
-}
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index be6c3f35f48b..23ec953e3a24 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -35,19 +35,6 @@ minstrel_ewma(int old, int new, int weight)
return old + incr;
}
-/*
- * Perform EWMV (Exponentially Weighted Moving Variance) calculation
- */
-static inline int
-minstrel_ewmv(int old_ewmv, int cur_prob, int prob_ewma, int weight)
-{
- int diff, incr;
-
- diff = cur_prob - prob_ewma;
- incr = (EWMA_DIV - weight) * diff / EWMA_DIV;
- return weight * (old_ewmv + MINSTREL_TRUNC(diff * incr)) / EWMA_DIV;
-}
-
struct minstrel_rate_stats {
/* current / last sampling period attempts/success counters */
u16 attempts, last_attempts;
@@ -56,11 +43,8 @@ struct minstrel_rate_stats {
/* total attempts/success counters */
u32 att_hist, succ_hist;
- /* statistis of packet delivery probability
- * prob_ewma - exponential weighted moving average of prob
- * prob_ewmsd - exp. weighted moving standard deviation of prob */
+ /* prob_ewma - exponential weighted moving average of prob */
u16 prob_ewma;
- u16 prob_ewmv;
/* maximum retry counts */
u8 retry_count;
@@ -109,11 +93,6 @@ struct minstrel_sta_info {
/* sampling table */
u8 *sample_table;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *dbg_stats;
- struct dentry *dbg_stats_csv;
-#endif
};
struct minstrel_priv {
@@ -137,7 +116,6 @@ struct minstrel_priv {
* - setting will be applied on next update
*/
u32 fixed_rate_idx;
- struct dentry *dbg_fixed_rate;
#endif
};
@@ -146,17 +124,8 @@ struct minstrel_debugfs_info {
char buf[];
};
-/* Get EWMSD (Exponentially Weighted Moving Standard Deviation) * 10 */
-static inline int
-minstrel_get_ewmsd10(struct minstrel_rate_stats *mrs)
-{
- unsigned int ewmv = mrs->prob_ewmv;
- return int_sqrt(MINSTREL_TRUNC(ewmv * 1000 * 1000));
-}
-
extern const struct rate_control_ops mac80211_minstrel;
void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
-void minstrel_remove_sta_debugfs(void *priv, void *priv_sta);
/* Recalculate success probabilities and counters for a given rate using EWMA */
void minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs);
@@ -165,7 +134,5 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
/* debugfs */
int minstrel_stats_open(struct inode *inode, struct file *file);
int minstrel_stats_csv_open(struct inode *inode, struct file *file);
-ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos);
-int minstrel_stats_release(struct inode *inode, struct file *file);
#endif
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 9ad7d63d3e5b..c8afd85b51a0 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -54,22 +54,6 @@
#include <net/mac80211.h>
#include "rc80211_minstrel.h"
-ssize_t
-minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos)
-{
- struct minstrel_debugfs_info *ms;
-
- ms = file->private_data;
- return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len);
-}
-
-int
-minstrel_stats_release(struct inode *inode, struct file *file)
-{
- kfree(file->private_data);
- return 0;
-}
-
int
minstrel_stats_open(struct inode *inode, struct file *file)
{
@@ -86,14 +70,13 @@ minstrel_stats_open(struct inode *inode, struct file *file)
p = ms->buf;
p += sprintf(p, "\n");
p += sprintf(p,
- "best __________rate_________ ________statistics________ ____last_____ ______sum-of________\n");
+ "best __________rate_________ ____statistics___ ____last_____ ______sum-of________\n");
p += sprintf(p,
- "rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [retry|suc|att] [#success | #attempts]\n");
+ "rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts]\n");
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
struct minstrel_rate_stats *mrs = &mi->r[i].stats;
- unsigned int prob_ewmsd;
*(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' ';
*(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' ';
@@ -109,15 +92,13 @@ minstrel_stats_open(struct inode *inode, struct file *file)
tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
- prob_ewmsd = minstrel_get_ewmsd10(mrs);
- p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u"
+ p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u"
" %3u %3u %-3u "
"%9llu %-9llu\n",
tp_max / 10, tp_max % 10,
tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
- prob_ewmsd / 10, prob_ewmsd % 10,
mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
@@ -135,14 +116,6 @@ minstrel_stats_open(struct inode *inode, struct file *file)
return 0;
}
-static const struct file_operations minstrel_stat_fops = {
- .owner = THIS_MODULE,
- .open = minstrel_stats_open,
- .read = minstrel_stats_read,
- .release = minstrel_stats_release,
- .llseek = default_llseek,
-};
-
int
minstrel_stats_csv_open(struct inode *inode, struct file *file)
{
@@ -161,7 +134,6 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
struct minstrel_rate_stats *mrs = &mi->r[i].stats;
- unsigned int prob_ewmsd;
p += sprintf(p, "%s" ,((i == mi->max_tp_rate[0]) ? "A" : ""));
p += sprintf(p, "%s" ,((i == mi->max_tp_rate[1]) ? "B" : ""));
@@ -177,14 +149,12 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
- prob_ewmsd = minstrel_get_ewmsd10(mrs);
- p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,%u,"
+ p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,%u,"
"%llu,%llu,%d,%d\n",
tp_max / 10, tp_max % 10,
tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
- prob_ewmsd / 10, prob_ewmsd % 10,
mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
@@ -200,33 +170,3 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
return 0;
}
-
-static const struct file_operations minstrel_stat_csv_fops = {
- .owner = THIS_MODULE,
- .open = minstrel_stats_csv_open,
- .read = minstrel_stats_read,
- .release = minstrel_stats_release,
- .llseek = default_llseek,
-};
-
-void
-minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
-{
- struct minstrel_sta_info *mi = priv_sta;
-
- mi->dbg_stats = debugfs_create_file("rc_stats", 0444, dir, mi,
- &minstrel_stat_fops);
-
- mi->dbg_stats_csv = debugfs_create_file("rc_stats_csv", 0444, dir, mi,
- &minstrel_stat_csv_fops);
-}
-
-void
-minstrel_remove_sta_debugfs(void *priv, void *priv_sta)
-{
- struct minstrel_sta_info *mi = priv_sta;
-
- debugfs_remove(mi->dbg_stats);
-
- debugfs_remove(mi->dbg_stats_csv);
-}
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 67ebdeaffbbc..f466ec37d161 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -52,22 +52,23 @@
_streams - 1
/* MCS rate information for an MCS group */
-#define MCS_GROUP(_streams, _sgi, _ht40) \
+#define MCS_GROUP(_streams, _sgi, _ht40, _s) \
[GROUP_IDX(_streams, _sgi, _ht40)] = { \
.streams = _streams, \
+ .shift = _s, \
.flags = \
IEEE80211_TX_RC_MCS | \
(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
(_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
.duration = { \
- MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234), \
- MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \
+ MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \
} \
}
@@ -80,9 +81,10 @@
#define BW2VBPS(_bw, r3, r2, r1) \
(_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
-#define VHT_GROUP(_streams, _sgi, _bw) \
+#define VHT_GROUP(_streams, _sgi, _bw, _s) \
[VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
.streams = _streams, \
+ .shift = _s, \
.flags = \
IEEE80211_TX_RC_VHT_MCS | \
(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
@@ -90,25 +92,25 @@
_bw == BW_40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
.duration = { \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 117, 54, 26)), \
+ BW2VBPS(_bw, 117, 54, 26)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 234, 108, 52)), \
+ BW2VBPS(_bw, 234, 108, 52)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 351, 162, 78)), \
+ BW2VBPS(_bw, 351, 162, 78)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 468, 216, 104)), \
+ BW2VBPS(_bw, 468, 216, 104)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 702, 324, 156)), \
+ BW2VBPS(_bw, 702, 324, 156)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 936, 432, 208)), \
+ BW2VBPS(_bw, 936, 432, 208)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1053, 486, 234)), \
+ BW2VBPS(_bw, 1053, 486, 234)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1170, 540, 260)), \
+ BW2VBPS(_bw, 1170, 540, 260)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1404, 648, 312)), \
+ BW2VBPS(_bw, 1404, 648, 312)) >> _s, \
MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1560, 720, 346)) \
+ BW2VBPS(_bw, 1560, 720, 346)) >> _s \
} \
}
@@ -121,28 +123,27 @@
(CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \
CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE))
-#define CCK_DURATION_LIST(_short) \
- CCK_ACK_DURATION(10, _short), \
- CCK_ACK_DURATION(20, _short), \
- CCK_ACK_DURATION(55, _short), \
- CCK_ACK_DURATION(110, _short)
+#define CCK_DURATION_LIST(_short, _s) \
+ CCK_ACK_DURATION(10, _short) >> _s, \
+ CCK_ACK_DURATION(20, _short) >> _s, \
+ CCK_ACK_DURATION(55, _short) >> _s, \
+ CCK_ACK_DURATION(110, _short) >> _s
-#define CCK_GROUP \
+#define CCK_GROUP(_s) \
[MINSTREL_CCK_GROUP] = { \
- .streams = 0, \
+ .streams = 1, \
.flags = 0, \
+ .shift = _s, \
.duration = { \
- CCK_DURATION_LIST(false), \
- CCK_DURATION_LIST(true) \
+ CCK_DURATION_LIST(false, _s), \
+ CCK_DURATION_LIST(true, _s) \
} \
}
-#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
static bool minstrel_vht_only = true;
module_param(minstrel_vht_only, bool, 0644);
MODULE_PARM_DESC(minstrel_vht_only,
"Use only VHT rates when VHT is supported by sta.");
-#endif
/*
* To enable sufficiently targeted rate sampling, MCS rates are divided into
@@ -153,49 +154,47 @@ MODULE_PARM_DESC(minstrel_vht_only,
* BW -> SGI -> #streams
*/
const struct mcs_group minstrel_mcs_groups[] = {
- MCS_GROUP(1, 0, BW_20),
- MCS_GROUP(2, 0, BW_20),
- MCS_GROUP(3, 0, BW_20),
+ MCS_GROUP(1, 0, BW_20, 5),
+ MCS_GROUP(2, 0, BW_20, 4),
+ MCS_GROUP(3, 0, BW_20, 4),
- MCS_GROUP(1, 1, BW_20),
- MCS_GROUP(2, 1, BW_20),
- MCS_GROUP(3, 1, BW_20),
+ MCS_GROUP(1, 1, BW_20, 5),
+ MCS_GROUP(2, 1, BW_20, 4),
+ MCS_GROUP(3, 1, BW_20, 4),
- MCS_GROUP(1, 0, BW_40),
- MCS_GROUP(2, 0, BW_40),
- MCS_GROUP(3, 0, BW_40),
+ MCS_GROUP(1, 0, BW_40, 4),
+ MCS_GROUP(2, 0, BW_40, 4),
+ MCS_GROUP(3, 0, BW_40, 4),
- MCS_GROUP(1, 1, BW_40),
- MCS_GROUP(2, 1, BW_40),
- MCS_GROUP(3, 1, BW_40),
+ MCS_GROUP(1, 1, BW_40, 4),
+ MCS_GROUP(2, 1, BW_40, 4),
+ MCS_GROUP(3, 1, BW_40, 4),
- CCK_GROUP,
+ CCK_GROUP(8),
-#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
- VHT_GROUP(1, 0, BW_20),
- VHT_GROUP(2, 0, BW_20),
- VHT_GROUP(3, 0, BW_20),
+ VHT_GROUP(1, 0, BW_20, 5),
+ VHT_GROUP(2, 0, BW_20, 4),
+ VHT_GROUP(3, 0, BW_20, 4),
- VHT_GROUP(1, 1, BW_20),
- VHT_GROUP(2, 1, BW_20),
- VHT_GROUP(3, 1, BW_20),
+ VHT_GROUP(1, 1, BW_20, 5),
+ VHT_GROUP(2, 1, BW_20, 4),
+ VHT_GROUP(3, 1, BW_20, 4),
- VHT_GROUP(1, 0, BW_40),
- VHT_GROUP(2, 0, BW_40),
- VHT_GROUP(3, 0, BW_40),
+ VHT_GROUP(1, 0, BW_40, 4),
+ VHT_GROUP(2, 0, BW_40, 4),
+ VHT_GROUP(3, 0, BW_40, 4),
- VHT_GROUP(1, 1, BW_40),
- VHT_GROUP(2, 1, BW_40),
- VHT_GROUP(3, 1, BW_40),
+ VHT_GROUP(1, 1, BW_40, 4),
+ VHT_GROUP(2, 1, BW_40, 4),
+ VHT_GROUP(3, 1, BW_40, 4),
- VHT_GROUP(1, 0, BW_80),
- VHT_GROUP(2, 0, BW_80),
- VHT_GROUP(3, 0, BW_80),
+ VHT_GROUP(1, 0, BW_80, 4),
+ VHT_GROUP(2, 0, BW_80, 4),
+ VHT_GROUP(3, 0, BW_80, 4),
- VHT_GROUP(1, 1, BW_80),
- VHT_GROUP(2, 1, BW_80),
- VHT_GROUP(3, 1, BW_80),
-#endif
+ VHT_GROUP(1, 1, BW_80, 4),
+ VHT_GROUP(2, 1, BW_80, 4),
+ VHT_GROUP(3, 1, BW_80, 4),
};
static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
@@ -282,7 +281,8 @@ minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
break;
/* short preamble */
- if (!(mi->supported[group] & BIT(idx)))
+ if ((mi->supported[group] & BIT(idx + 4)) &&
+ (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE))
idx += 4;
}
return &mi->groups[group].rates[idx];
@@ -311,7 +311,8 @@ minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
if (group != MINSTREL_CCK_GROUP)
nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
- nsecs += minstrel_mcs_groups[group].duration[rate];
+ nsecs += minstrel_mcs_groups[group].duration[rate] <<
+ minstrel_mcs_groups[group].shift;
/*
* For the throughput calculation, limit the probability value to 90% to
@@ -759,12 +760,19 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
minstrel_ht_update_rates(mp, mi);
}
+static inline int
+minstrel_get_duration(int index)
+{
+ const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
+ unsigned int duration = group->duration[index % MCS_GROUP_RATES];
+ return duration << group->shift;
+}
+
static void
minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
int index)
{
struct minstrel_rate_stats *mrs;
- const struct mcs_group *group;
unsigned int tx_time, tx_time_rtscts, tx_time_data;
unsigned int cw = mp->cw_min;
unsigned int ctime = 0;
@@ -783,8 +791,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
mrs->retry_count_rtscts = 2;
mrs->retry_updated = true;
- group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
- tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len / 1000;
+ tx_time_data = minstrel_get_duration(index) * ampdu_len / 1000;
/* Contention time for first 2 tries */
ctime = (t_slot * cw) >> 1;
@@ -878,20 +885,24 @@ minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
int group = mi->max_prob_rate / MCS_GROUP_RATES;
const struct mcs_group *g = &minstrel_mcs_groups[group];
int rate = mi->max_prob_rate % MCS_GROUP_RATES;
+ unsigned int duration;
/* Disable A-MSDU if max_prob_rate is bad */
if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100))
return 1;
+ duration = g->duration[rate];
+ duration <<= g->shift;
+
/* If the rate is slower than single-stream MCS1, make A-MSDU limit small */
- if (g->duration[rate] > MCS_DURATION(1, 0, 52))
+ if (duration > MCS_DURATION(1, 0, 52))
return 500;
/*
* If the rate is slower than single-stream MCS4, limit A-MSDU to usual
* data packet size
*/
- if (g->duration[rate] > MCS_DURATION(1, 0, 104))
+ if (duration > MCS_DURATION(1, 0, 104))
return 1600;
/*
@@ -899,7 +910,7 @@ minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
* rate success probability is less than 75%, limit A-MSDU to twice the usual
* data packet size
*/
- if (g->duration[rate] > MCS_DURATION(1, 0, 260) ||
+ if (duration > MCS_DURATION(1, 0, 260) ||
(minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) <
MINSTREL_FRAC(75, 100)))
return 3200;
@@ -946,13 +957,6 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
rate_control_set_rates(mp->hw, mi->sta, rates);
}
-static inline int
-minstrel_get_duration(int index)
-{
- const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
- return group->duration[index % MCS_GROUP_RATES];
-}
-
static int
minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
@@ -1000,10 +1004,13 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
return -1;
/*
- * Do not sample if the probability is already higher than 95%
- * to avoid wasting airtime.
+ * Do not sample if the probability is already higher than 95%,
+ * or if the rate is 3 times slower than the current max probability
+ * rate, to avoid wasting airtime.
*/
- if (mrs->prob_ewma > MINSTREL_FRAC(95, 100))
+ sample_dur = minstrel_get_duration(sample_idx);
+ if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
+ minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur)
return -1;
/*
@@ -1013,7 +1020,6 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
cur_max_tp_streams = minstrel_mcs_groups[tp_rate1 /
MCS_GROUP_RATES].streams;
- sample_dur = minstrel_get_duration(sample_idx);
if (sample_dur >= minstrel_get_duration(tp_rate2) &&
(cur_max_tp_streams - 1 <
minstrel_mcs_groups[sample_group].streams ||
@@ -1077,18 +1083,23 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
return;
sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
+ sample_idx %= MCS_GROUP_RATES;
+
+ if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP] &&
+ (sample_idx >= 4) != txrc->short_preamble)
+ return;
+
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
rate->count = 1;
- if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
+ if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP]) {
int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
rate->idx = mp->cck_rates[idx];
} else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) {
ieee80211_rate_set_vht(rate, sample_idx % MCS_GROUP_RATES,
sample_group->streams);
} else {
- rate->idx = sample_idx % MCS_GROUP_RATES +
- (sample_group->streams - 1) * 8;
+ rate->idx = sample_idx + (sample_group->streams - 1) * 8;
}
rate->flags = sample_group->flags;
@@ -1130,14 +1141,14 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
struct minstrel_ht_sta_priv *msp = priv_sta;
struct minstrel_ht_sta *mi = &msp->ht;
struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
- u16 sta_cap = sta->ht_cap.cap;
+ u16 ht_cap = sta->ht_cap.cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
- struct sta_info *sinfo = container_of(sta, struct sta_info, sta);
int use_vht;
int n_supported = 0;
int ack_dur;
int stbc;
int i;
+ bool ldpc;
/* fall back to the old minstrel for legacy stations */
if (!sta->ht_cap.ht_supported)
@@ -1145,12 +1156,10 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_GROUPS_NB);
-#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
if (vht_cap->vht_supported)
use_vht = vht_cap->vht_mcs.tx_mcs_map != cpu_to_le16(~0);
else
-#endif
- use_vht = 0;
+ use_vht = 0;
msp->is_ht = true;
memset(mi, 0, sizeof(*mi));
@@ -1175,16 +1184,22 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
}
mi->sample_tries = 4;
- /* TODO tx_flags for vht - ATM the RC API is not fine-grained enough */
if (!use_vht) {
- stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >>
+ stbc = (ht_cap & IEEE80211_HT_CAP_RX_STBC) >>
IEEE80211_HT_CAP_RX_STBC_SHIFT;
- mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
- if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
- mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
+ ldpc = ht_cap & IEEE80211_HT_CAP_LDPC_CODING;
+ } else {
+ stbc = (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) >>
+ IEEE80211_VHT_CAP_RXSTBC_SHIFT;
+
+ ldpc = vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC;
}
+ mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
+ if (ldpc)
+ mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
+
for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
u32 gflags = minstrel_mcs_groups[i].flags;
int bw, nss;
@@ -1197,10 +1212,10 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
if (gflags & IEEE80211_TX_RC_SHORT_GI) {
if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
- if (!(sta_cap & IEEE80211_HT_CAP_SGI_40))
+ if (!(ht_cap & IEEE80211_HT_CAP_SGI_40))
continue;
} else {
- if (!(sta_cap & IEEE80211_HT_CAP_SGI_20))
+ if (!(ht_cap & IEEE80211_HT_CAP_SGI_20))
continue;
}
}
@@ -1217,10 +1232,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
/* HT rate */
if (gflags & IEEE80211_TX_RC_MCS) {
-#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
if (use_vht && minstrel_vht_only)
continue;
-#endif
+
mi->supported[i] = mcs->rx_mask[nss - 1];
if (mi->supported[i])
n_supported++;
@@ -1258,8 +1272,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
if (!n_supported)
goto use_legacy;
- if (test_sta_flag(sinfo, WLAN_STA_SHORT_PREAMBLE))
- mi->cck_supported_short |= mi->cck_supported_short << 4;
+ mi->supported[MINSTREL_CCK_GROUP] |= mi->cck_supported_short << 4;
/* create an initial rate table with the lowest supported rates */
minstrel_ht_update_stats(mp, mi);
@@ -1340,16 +1353,88 @@ minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
kfree(msp);
}
+static void
+minstrel_ht_init_cck_rates(struct minstrel_priv *mp)
+{
+ static const int bitrates[4] = { 10, 20, 55, 110 };
+ struct ieee80211_supported_band *sband;
+ u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
+ int i, j;
+
+ sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ struct ieee80211_rate *rate = &sband->bitrates[i];
+
+ if (rate->flags & IEEE80211_RATE_ERP_G)
+ continue;
+
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
+ if (rate->bitrate != bitrates[j])
+ continue;
+
+ mp->cck_rates[j] = i;
+ break;
+ }
+ }
+}
+
static void *
minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
- return mac80211_minstrel.alloc(hw, debugfsdir);
+ struct minstrel_priv *mp;
+
+ mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
+ if (!mp)
+ return NULL;
+
+ /* contention window settings
+ * Just an approximation. Using the per-queue values would complicate
+ * the calculations and is probably unnecessary */
+ mp->cw_min = 15;
+ mp->cw_max = 1023;
+
+ /* number of packets (in %) to use for sampling other rates
+ * sample less often for non-mrr packets, because the overhead
+ * is much higher than with mrr */
+ mp->lookaround_rate = 5;
+ mp->lookaround_rate_mrr = 10;
+
+ /* maximum time that the hw is allowed to stay in one MRR segment */
+ mp->segment_size = 6000;
+
+ if (hw->max_rate_tries > 0)
+ mp->max_retry = hw->max_rate_tries;
+ else
+ /* safe default, does not necessarily have to match hw properties */
+ mp->max_retry = 7;
+
+ if (hw->max_rates >= 4)
+ mp->has_mrr = true;
+
+ mp->hw = hw;
+ mp->update_interval = 100;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ mp->fixed_rate_idx = (u32) -1;
+ debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir,
+ &mp->fixed_rate_idx);
+#endif
+
+ minstrel_ht_init_cck_rates(mp);
+
+ return mp;
}
static void
minstrel_ht_free(void *priv)
{
- mac80211_minstrel.free(priv);
+ kfree(priv);
}
static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
@@ -1384,7 +1469,6 @@ static const struct rate_control_ops mac80211_minstrel_ht = {
.free = minstrel_ht_free,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = minstrel_ht_add_sta_debugfs,
- .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs,
#endif
.get_expected_throughput = minstrel_ht_get_expected_throughput,
};
@@ -1409,14 +1493,14 @@ static void __init init_sample_table(void)
}
int __init
-rc80211_minstrel_ht_init(void)
+rc80211_minstrel_init(void)
{
init_sample_table();
return ieee80211_rate_control_register(&mac80211_minstrel_ht);
}
void
-rc80211_minstrel_ht_exit(void)
+rc80211_minstrel_exit(void)
{
ieee80211_rate_control_unregister(&mac80211_minstrel_ht);
}
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index de1646c42e82..26b7a3244b47 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -15,11 +15,7 @@
*/
#define MINSTREL_MAX_STREAMS 3
#define MINSTREL_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */
-#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
#define MINSTREL_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */
-#else
-#define MINSTREL_VHT_STREAM_GROUPS 0
-#endif
#define MINSTREL_HT_GROUPS_NB (MINSTREL_MAX_STREAMS * \
MINSTREL_HT_STREAM_GROUPS)
@@ -34,16 +30,13 @@
#define MINSTREL_CCK_GROUP (MINSTREL_HT_GROUP_0 + MINSTREL_HT_GROUPS_NB)
#define MINSTREL_VHT_GROUP_0 (MINSTREL_CCK_GROUP + 1)
-#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
#define MCS_GROUP_RATES 10
-#else
-#define MCS_GROUP_RATES 8
-#endif
struct mcs_group {
- u32 flags;
- unsigned int streams;
- unsigned int duration[MCS_GROUP_RATES];
+ u16 flags;
+ u8 streams;
+ u8 shift;
+ u16 duration[MCS_GROUP_RATES];
};
extern const struct mcs_group minstrel_mcs_groups[];
@@ -110,17 +103,12 @@ struct minstrel_ht_sta_priv {
struct minstrel_ht_sta ht;
struct minstrel_sta_info legacy;
};
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *dbg_stats;
- struct dentry *dbg_stats_csv;
-#endif
void *ratelist;
void *sample_table;
bool is_ht;
};
void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
-void minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta);
int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
int prob_ewma);
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index bfcc03152dc6..57820a5f2c16 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -15,6 +15,22 @@
#include "rc80211_minstrel.h"
#include "rc80211_minstrel_ht.h"
+static ssize_t
+minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos)
+{
+ struct minstrel_debugfs_info *ms;
+
+ ms = file->private_data;
+ return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len);
+}
+
+static int
+minstrel_stats_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
static char *
minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
{
@@ -41,7 +57,7 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j];
static const int bitrates[4] = { 10, 20, 55, 110 };
int idx = i * MCS_GROUP_RATES + j;
- unsigned int prob_ewmsd;
+ unsigned int duration;
if (!(mi->supported[i] & BIT(j)))
continue;
@@ -79,21 +95,21 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
p += sprintf(p, " %3u ", idx);
/* tx_time[rate(i)] in usec */
- tx_time = DIV_ROUND_CLOSEST(mg->duration[j], 1000);
+ duration = mg->duration[j];
+ duration <<= mg->shift;
+ tx_time = DIV_ROUND_CLOSEST(duration, 1000);
p += sprintf(p, "%6u ", tx_time);
tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
- prob_ewmsd = minstrel_get_ewmsd10(mrs);
- p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u"
+ p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u"
" %3u %3u %-3u "
"%9llu %-9llu\n",
tp_max / 10, tp_max % 10,
tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
- prob_ewmsd / 10, prob_ewmsd % 10,
mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
@@ -130,9 +146,9 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
p += sprintf(p, "\n");
p += sprintf(p,
- " best ____________rate__________ ________statistics________ _____last____ ______sum-of________\n");
+ " best ____________rate__________ ____statistics___ _____last____ ______sum-of________\n");
p += sprintf(p,
- "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [retry|suc|att] [#success | #attempts]\n");
+ "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts]\n");
p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p);
for (i = 0; i < MINSTREL_CCK_GROUP; i++)
@@ -187,7 +203,7 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j];
static const int bitrates[4] = { 10, 20, 55, 110 };
int idx = i * MCS_GROUP_RATES + j;
- unsigned int prob_ewmsd;
+ unsigned int duration;
if (!(mi->supported[i] & BIT(j)))
continue;
@@ -222,20 +238,21 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
}
p += sprintf(p, "%u,", idx);
- tx_time = DIV_ROUND_CLOSEST(mg->duration[j], 1000);
+
+ duration = mg->duration[j];
+ duration <<= mg->shift;
+ tx_time = DIV_ROUND_CLOSEST(duration, 1000);
p += sprintf(p, "%u,", tx_time);
tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
- prob_ewmsd = minstrel_get_ewmsd10(mrs);
- p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,"
+ p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u,%u,"
"%u,%llu,%llu,",
tp_max / 10, tp_max % 10,
tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
- prob_ewmsd / 10, prob_ewmsd % 10,
mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
@@ -303,17 +320,8 @@ minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
{
struct minstrel_ht_sta_priv *msp = priv_sta;
- msp->dbg_stats = debugfs_create_file("rc_stats", 0444, dir, msp,
- &minstrel_ht_stat_fops);
- msp->dbg_stats_csv = debugfs_create_file("rc_stats_csv", 0444, dir, msp,
- &minstrel_ht_stat_csv_fops);
-}
-
-void
-minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta)
-{
- struct minstrel_ht_sta_priv *msp = priv_sta;
-
- debugfs_remove(msp->dbg_stats);
- debugfs_remove(msp->dbg_stats_csv);
+ debugfs_create_file("rc_stats", 0444, dir, msp,
+ &minstrel_ht_stat_fops);
+ debugfs_create_file("rc_stats_csv", 0444, dir, msp,
+ &minstrel_ht_stat_csv_fops);
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 96611d5dfadb..3bd3b5769797 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -115,7 +115,8 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
RX_FLAG_FAILED_PLCP_CRC |
- RX_FLAG_ONLY_MONITOR))
+ RX_FLAG_ONLY_MONITOR |
+ RX_FLAG_NO_PSDU))
return true;
if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
@@ -189,6 +190,15 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
}
+ if (status->flag & RX_FLAG_NO_PSDU)
+ len += 1;
+
+ if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
+ len = ALIGN(len, 2);
+ len += 4;
+ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4);
+ }
+
if (status->chains) {
/* antenna and antenna signal fields */
len += 2 * hweight8(status->chains);
@@ -279,6 +289,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
struct ieee80211_vendor_radiotap rtap = {};
struct ieee80211_radiotap_he he = {};
struct ieee80211_radiotap_he_mu he_mu = {};
+ struct ieee80211_radiotap_lsig lsig = {};
if (status->flag & RX_FLAG_RADIOTAP_HE) {
he = *(struct ieee80211_radiotap_he *)skb->data;
@@ -291,6 +302,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
skb_pull(skb, sizeof(he_mu));
}
+ if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
+ lsig = *(struct ieee80211_radiotap_lsig *)skb->data;
+ skb_pull(skb, sizeof(lsig));
+ }
+
if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
/* rtap.len and rtap.pad are undone immediately */
@@ -549,7 +565,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
if (status->encoding == RX_ENC_HE &&
status->flag & RX_FLAG_RADIOTAP_HE) {
-#define HE_PREP(f, val) cpu_to_le16(FIELD_PREP(IEEE80211_RADIOTAP_HE_##f, val))
+#define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
he.data6 |= HE_PREP(DATA6_NSTS,
@@ -630,6 +646,21 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
pos += sizeof(he_mu);
}
+ if (status->flag & RX_FLAG_NO_PSDU) {
+ rthdr->it_present |=
+ cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU);
+ *pos++ = status->zero_length_psdu_type;
+ }
+
+ if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
+ /* ensure 2 byte alignment */
+ while ((pos - (u8 *)rthdr) & 1)
+ pos++;
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG);
+ memcpy(pos, &lsig, sizeof(lsig));
+ pos += sizeof(lsig);
+ }
+
for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
*pos++ = status->chain_signal[chain];
*pos++ = chain;
@@ -1505,7 +1536,7 @@ static void sta_ps_start(struct sta_info *sta)
if (!sta->sta.txq[0])
return;
- for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
+ for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
if (txq_has_queue(sta->sta.txq[tid]))
set_bit(tid, &sta->txq_buffered_tids);
else
@@ -2046,6 +2077,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
idx = sdata->fragment_next;
for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
struct ieee80211_hdr *f_hdr;
+ struct sk_buff *f_skb;
idx--;
if (idx < 0)
@@ -2057,7 +2089,8 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
entry->last_frag + 1 != frag)
continue;
- f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
+ f_skb = __skb_peek(&entry->skb_list);
+ f_hdr = (struct ieee80211_hdr *) f_skb->data;
/*
* Check ftype and addresses are equal, else check next fragment
@@ -2314,7 +2347,7 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
if (!sdata->u.mgd.use_4addr)
return -1;
- else
+ else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr))
check_port_control = true;
}
@@ -2425,8 +2458,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
if (!xmit_skb)
net_info_ratelimited("%s: failed to clone multicast frame\n",
dev->name);
- } else if (!is_multicast_ether_addr(ehdr->h_dest)) {
- dsta = sta_info_get(sdata, skb->data);
+ } else if (!is_multicast_ether_addr(ehdr->h_dest) &&
+ !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) {
+ dsta = sta_info_get(sdata, ehdr->h_dest);
if (dsta) {
/*
* The destination station is associated to
@@ -4207,11 +4241,10 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
if (fast_rx->internal_forward) {
struct sk_buff *xmit_skb = NULL;
- bool multicast = is_multicast_ether_addr(skb->data);
-
- if (multicast) {
+ if (is_multicast_ether_addr(addrs.da)) {
xmit_skb = skb_copy(skb, GFP_ATOMIC);
- } else if (sta_info_get(rx->sdata, skb->data)) {
+ } else if (!ether_addr_equal(addrs.da, addrs.sa) &&
+ sta_info_get(rx->sdata, addrs.da)) {
xmit_skb = skb;
skb = NULL;
}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 029334835747..4e4902bdbef8 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -144,6 +144,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
wide_bw_chansw_ie->new_center_freq_seg1,
/* .basic_mcs_set doesn't matter */
};
+ struct ieee80211_ht_operation ht_oper = {};
/* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
* to the previously parsed chandef
@@ -151,7 +152,9 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
new_vht_chandef = csa_ie->chandef;
/* ignore if parsing fails */
- if (!ieee80211_chandef_vht_oper(&vht_oper, &new_vht_chandef))
+ if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
+ &vht_oper, &ht_oper,
+ &new_vht_chandef))
new_vht_chandef.chan = NULL;
if (sta_flags & IEEE80211_STA_DISABLE_80P80MHZ &&
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f34202242d24..fb8c2252ac0e 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -113,7 +113,12 @@ static void __cleanup_single_sta(struct sta_info *sta)
if (sta->sta.txq[0]) {
for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
- struct txq_info *txqi = to_txq_info(sta->sta.txq[i]);
+ struct txq_info *txqi;
+
+ if (!sta->sta.txq[i])
+ continue;
+
+ txqi = to_txq_info(sta->sta.txq[i]);
spin_lock_bh(&fq->lock);
ieee80211_txq_purge(local, txqi);
@@ -374,6 +379,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
struct txq_info *txq = txq_data + i * size;
+ /* might not do anything for the bufferable MMPDU TXQ */
ieee80211_txq_init(sdata, sta, txq, i);
}
}
@@ -1239,13 +1245,11 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
- if (sta->sta.txq[0]) {
- for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
- if (!txq_has_queue(sta->sta.txq[i]))
- continue;
+ for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
+ if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i]))
+ continue;
- drv_wake_tx_queue(local, to_txq_info(sta->sta.txq[i]));
- }
+ drv_wake_tx_queue(local, to_txq_info(sta->sta.txq[i]));
}
skb_queue_head_init(&pending);
@@ -1683,7 +1687,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
return;
for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
- if (!(driver_release_tids & BIT(tid)) ||
+ if (!sta->sta.txq[tid] ||
+ !(driver_release_tids & BIT(tid)) ||
txq_has_queue(sta->sta.txq[tid]))
continue;
@@ -2323,13 +2328,13 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
}
- if (ieee80211_hw_check(&sta->local->hw, REPORTS_TX_ACK_STATUS) &&
- !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG))) {
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) &&
+ sta->status_stats.ack_signal_filled) {
sinfo->avg_ack_signal =
-(s8)ewma_avg_signal_read(
&sta->status_stats.avg_ack_signal);
sinfo->filled |=
- BIT_ULL(NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG);
+ BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
}
}
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 9a6d7208bf4f..aa4afbf0abaf 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -479,11 +479,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
if (!skb)
return;
- if (dropped) {
- dev_kfree_skb_any(skb);
- return;
- }
-
if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
struct ieee80211_sub_if_data *sdata;
@@ -507,6 +502,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
rcu_read_unlock();
dev_kfree_skb_any(skb);
+ } else if (dropped) {
+ dev_kfree_skb_any(skb);
} else {
/* consumes skb */
skb_complete_wifi_ack(skb, acked);
@@ -811,7 +808,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
rate_control_tx_status(local, sband, status);
if (ieee80211_vif_is_mesh(&sta->sdata->vif))
- ieee80211s_update_metric(local, sta, skb);
+ ieee80211s_update_metric(local, sta, status);
if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
ieee80211_frame_acked(sta, skb);
@@ -972,6 +969,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
}
rate_control_tx_status(local, sband, status);
+ if (ieee80211_vif_is_mesh(&sta->sdata->vif))
+ ieee80211s_update_metric(local, sta, status);
}
if (acked || noack_success) {
@@ -988,6 +987,25 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_tx_status_ext);
+void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_supported_band *sband = hw->wiphy->bands[info->band];
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ struct ieee80211_tx_status status = {
+ .info = info,
+ .sta = pubsta,
+ };
+
+ rate_control_tx_status(local, sband, &status);
+
+ if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL))
+ sta->tx_stats.last_rate = info->status.rates[0];
+}
+EXPORT_SYMBOL(ieee80211_tx_rate_update);
+
void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 5cd5e6e5834e..6c647f425e05 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -16,6 +16,7 @@
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "rate.h"
+#include "wme.h"
/* give usermode some time for retries in setting up the TDLS session */
#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
@@ -1010,14 +1011,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
case WLAN_TDLS_SETUP_RESPONSE:
- skb_set_queue_mapping(skb, IEEE80211_AC_BK);
- skb->priority = 2;
+ skb->priority = 256 + 2;
break;
default:
- skb_set_queue_mapping(skb, IEEE80211_AC_VI);
- skb->priority = 5;
+ skb->priority = 256 + 5;
break;
}
+ skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
/*
* Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 0ab69a1964f8..588c51a67c89 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -2600,6 +2600,29 @@ TRACE_EVENT(drv_wake_tx_queue,
)
);
+TRACE_EVENT(drv_get_ftm_responder_stats,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats),
+
+ TP_ARGS(local, sdata, ftm_stats),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG
+ )
+);
+
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f353d9db54bc..e0ccee23fbcd 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -214,6 +214,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
{
struct ieee80211_local *local = tx->local;
struct ieee80211_if_managed *ifmgd;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
/* driver doesn't support power save */
if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
@@ -242,6 +243,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
return TX_CONTINUE;
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
+ return TX_CONTINUE;
+
ifmgd = &tx->sdata->u.mgd;
/*
@@ -1249,10 +1253,18 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
return NULL;
- if (!ieee80211_is_data_present(hdr->frame_control))
- return NULL;
-
- if (sta) {
+ if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
+ if ((!ieee80211_is_mgmt(hdr->frame_control) ||
+ ieee80211_is_bufferable_mmpdu(hdr->frame_control) ||
+ vif->type == NL80211_IFTYPE_STATION) &&
+ sta && sta->uploaded) {
+ /*
+ * This will be NULL if the driver didn't set the
+ * opt-in hardware flag.
+ */
+ txq = sta->sta.txq[IEEE80211_NUM_TIDS];
+ }
+ } else if (sta) {
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
if (!sta->uploaded)
@@ -1440,16 +1452,33 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
txqi->txq.vif = &sdata->vif;
- if (sta) {
- txqi->txq.sta = &sta->sta;
- sta->sta.txq[tid] = &txqi->txq;
- txqi->txq.tid = tid;
- txqi->txq.ac = ieee80211_ac_from_tid(tid);
- } else {
+ if (!sta) {
sdata->vif.txq = &txqi->txq;
txqi->txq.tid = 0;
txqi->txq.ac = IEEE80211_AC_BE;
+
+ return;
}
+
+ if (tid == IEEE80211_NUM_TIDS) {
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ /* Drivers need to opt in to the management MPDU TXQ */
+ if (!ieee80211_hw_check(&sdata->local->hw,
+ STA_MMPDU_TXQ))
+ return;
+ } else if (!ieee80211_hw_check(&sdata->local->hw,
+ BUFF_MMPDU_TXQ)) {
+ /* Drivers need to opt in to the bufferable MMPDU TXQ */
+ return;
+ }
+ txqi->txq.ac = IEEE80211_AC_VO;
+ } else {
+ txqi->txq.ac = ieee80211_ac_from_tid(tid);
+ }
+
+ txqi->txq.sta = &sta->sta;
+ txqi->txq.tid = tid;
+ sta->sta.txq[tid] = &txqi->txq;
}
void ieee80211_txq_purge(struct ieee80211_local *local,
@@ -1890,7 +1919,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
if (invoke_tx_handlers_early(&tx))
- return false;
+ return true;
if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
return true;
@@ -2951,6 +2980,10 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
goto out;
+ /* Key is being removed */
+ if (build.key->flags & KEY_FLAG_TAINTED)
+ goto out;
+
switch (build.key->conf.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
@@ -3196,6 +3229,10 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
max_amsdu_len = min_t(int, max_amsdu_len,
sta->sta.max_rc_amsdu_len);
+ if (sta->sta.max_tid_amsdu_len[tid])
+ max_amsdu_len = min_t(int, max_amsdu_len,
+ sta->sta.max_tid_amsdu_len[tid]);
+
spin_lock_bh(&fq->lock);
/* TODO: Ideally aggregation should be done on dequeue to remain
@@ -3228,6 +3265,9 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (max_frags && nfrags > max_frags)
goto out;
+ if (!drv_can_aggregate_in_amsdu(local, head, skb))
+ goto out;
+
if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
goto out;
@@ -3472,12 +3512,18 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info;
struct ieee80211_tx_data tx;
ieee80211_tx_result r;
- struct ieee80211_vif *vif;
+ struct ieee80211_vif *vif = txq->vif;
spin_lock_bh(&fq->lock);
- if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags))
+ if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
+ test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
+ goto out;
+
+ if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) {
+ set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
goto out;
+ }
/* Make sure fragments stay together. */
skb = __skb_dequeue(&txqi->frags);
@@ -3573,6 +3619,7 @@ begin:
}
IEEE80211_SKB_CB(skb)->control.vif = vif;
+
out:
spin_unlock_bh(&fq->lock);
@@ -3601,13 +3648,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
if (!IS_ERR_OR_NULL(sta)) {
struct ieee80211_fast_tx *fast_tx;
- /* We need a bit of data queued to build aggregates properly, so
- * instruct the TCP stack to allow more than a single ms of data
- * to be queued in the stack. The value is a bit-shift of 1
- * second, so 8 is ~4ms of queued data. Only affects local TCP
- * sockets.
- */
- sk_pacing_shift_update(skb->sk, 8);
+ sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
fast_tx = rcu_dereference(sta->fast_tx);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 716cd6442d86..bec424316ea4 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -240,6 +240,102 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_ctstoself_duration);
+static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_vif *vif = &sdata->vif;
+ struct fq *fq = &local->fq;
+ struct ps_data *ps = NULL;
+ struct txq_info *txqi;
+ struct sta_info *sta;
+ int i;
+
+ spin_lock_bh(&fq->lock);
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP)
+ ps = &sdata->bss->ps;
+
+ sdata->vif.txqs_stopped[ac] = false;
+
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ if (sdata != sta->sdata)
+ continue;
+
+ for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
+ struct ieee80211_txq *txq = sta->sta.txq[i];
+
+ if (!txq)
+ continue;
+
+ txqi = to_txq_info(txq);
+
+ if (ac != txq->ac)
+ continue;
+
+ if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX,
+ &txqi->flags))
+ continue;
+
+ spin_unlock_bh(&fq->lock);
+ drv_wake_tx_queue(local, txqi);
+ spin_lock_bh(&fq->lock);
+ }
+ }
+
+ if (!vif->txq)
+ goto out;
+
+ txqi = to_txq_info(vif->txq);
+
+ if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) ||
+ (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
+ goto out;
+
+ spin_unlock_bh(&fq->lock);
+
+ drv_wake_tx_queue(local, txqi);
+ return;
+out:
+ spin_unlock_bh(&fq->lock);
+}
+
+void ieee80211_wake_txqs(unsigned long data)
+{
+ struct ieee80211_local *local = (struct ieee80211_local *)data;
+ struct ieee80211_sub_if_data *sdata;
+ int n_acs = IEEE80211_NUM_ACS;
+ unsigned long flags;
+ int i;
+
+ rcu_read_lock();
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ n_acs = 1;
+
+ for (i = 0; i < local->hw.queues; i++) {
+ if (local->queue_stop_reasons[i])
+ continue;
+
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ int ac;
+
+ for (ac = 0; ac < n_acs; ac++) {
+ int ac_queue = sdata->vif.hw_queue[ac];
+
+ if (ac_queue == i ||
+ sdata->vif.cab_queue == i)
+ __ieee80211_wake_txqs(sdata, ac);
+ }
+ }
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ rcu_read_unlock();
+}
+
void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
{
struct ieee80211_sub_if_data *sdata;
@@ -308,6 +404,9 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
rcu_read_unlock();
} else
tasklet_schedule(&local->tx_pending_tasklet);
+
+ if (local->ops->wake_tx_queue)
+ tasklet_schedule(&local->wake_txqs_tasklet);
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -351,9 +450,6 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue]))
return;
- if (local->ops->wake_tx_queue)
- return;
-
if (local->hw.queues < IEEE80211_NUM_ACS)
n_acs = 1;
@@ -366,8 +462,15 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
for (ac = 0; ac < n_acs; ac++) {
if (sdata->vif.hw_queue[ac] == queue ||
- sdata->vif.cab_queue == queue)
- netif_stop_subqueue(sdata->dev, ac);
+ sdata->vif.cab_queue == queue) {
+ if (!local->ops->wake_tx_queue) {
+ netif_stop_subqueue(sdata->dev, ac);
+ continue;
+ }
+ spin_lock(&local->fq.lock);
+ sdata->vif.txqs_stopped[ac] = true;
+ spin_unlock(&local->fq.lock);
+ }
}
}
rcu_read_unlock();
@@ -2075,6 +2178,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
case NL80211_IFTYPE_AP:
changed |= BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS;
+ if (sdata->vif.bss_conf.ftm_responder == 1 &&
+ wiphy_ext_feature_isset(sdata->local->hw.wiphy,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER))
+ changed |= BSS_CHANGED_FTM_RESPONDER;
+
if (sdata->vif.type == NL80211_IFTYPE_AP) {
changed |= BSS_CHANGED_AP_PROBE_RESP;
@@ -2657,49 +2765,65 @@ bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
return true;
}
-bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper,
+bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
+ const struct ieee80211_vht_operation *oper,
+ const struct ieee80211_ht_operation *htop,
struct cfg80211_chan_def *chandef)
{
struct cfg80211_chan_def new = *chandef;
- int cf1, cf2;
+ int cf0, cf1;
+ int ccfs0, ccfs1, ccfs2;
+ int ccf0, ccf1;
- if (!oper)
+ if (!oper || !htop)
return false;
- cf1 = ieee80211_channel_to_frequency(oper->center_freq_seg0_idx,
- chandef->chan->band);
- cf2 = ieee80211_channel_to_frequency(oper->center_freq_seg1_idx,
- chandef->chan->band);
+ ccfs0 = oper->center_freq_seg0_idx;
+ ccfs1 = oper->center_freq_seg1_idx;
+ ccfs2 = (le16_to_cpu(htop->operation_mode) &
+ IEEE80211_HT_OP_MODE_CCFS2_MASK)
+ >> IEEE80211_HT_OP_MODE_CCFS2_SHIFT;
+
+ /* when parsing (and we know how to) CCFS1 and CCFS2 are equivalent */
+ ccf0 = ccfs0;
+ ccf1 = ccfs1;
+ if (!ccfs1 && ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW))
+ ccf1 = ccfs2;
+
+ cf0 = ieee80211_channel_to_frequency(ccf0, chandef->chan->band);
+ cf1 = ieee80211_channel_to_frequency(ccf1, chandef->chan->band);
switch (oper->chan_width) {
case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ /* just use HT information directly */
break;
case IEEE80211_VHT_CHANWIDTH_80MHZ:
new.width = NL80211_CHAN_WIDTH_80;
- new.center_freq1 = cf1;
+ new.center_freq1 = cf0;
/* If needed, adjust based on the newer interop workaround. */
- if (oper->center_freq_seg1_idx) {
+ if (ccf1) {
unsigned int diff;
- diff = abs(oper->center_freq_seg1_idx -
- oper->center_freq_seg0_idx);
+ diff = abs(ccf1 - ccf0);
if (diff == 8) {
new.width = NL80211_CHAN_WIDTH_160;
- new.center_freq1 = cf2;
+ new.center_freq1 = cf1;
} else if (diff > 8) {
new.width = NL80211_CHAN_WIDTH_80P80;
- new.center_freq2 = cf2;
+ new.center_freq2 = cf1;
}
}
break;
case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ /* deprecated encoding */
new.width = NL80211_CHAN_WIDTH_160;
- new.center_freq1 = cf1;
+ new.center_freq1 = cf0;
break;
case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ /* deprecated encoding */
new.width = NL80211_CHAN_WIDTH_80P80;
- new.center_freq1 = cf1;
- new.center_freq2 = cf2;
+ new.center_freq1 = cf0;
+ new.center_freq2 = cf1;
break;
default:
return false;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 259325cbcc31..006d82e4a397 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -3,6 +3,7 @@
*
* Portions of this file
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -231,6 +232,13 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs,
sizeof(struct ieee80211_vht_mcs_info));
+ /* copy EXT_NSS_BW Support value or remove the capability */
+ if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_VHT_EXT_NSS_BW))
+ vht_cap->cap |= (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK);
+ else
+ vht_cap->vht_mcs.tx_highest &=
+ ~cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
+
/* but also restrict MCSes */
for (i = 0; i < 8; i++) {
u16 own_rx, own_tx, peer_rx, peer_tx;
@@ -294,6 +302,18 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
break;
default:
sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80;
+
+ if (!(vht_cap->vht_mcs.tx_highest &
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)))
+ break;
+
+ /*
+ * If this is non-zero, then it does support 160 MHz after all,
+ * in one form or the other. We don't distinguish here (or even
+ * above) between 160 and 80+80 yet.
+ */
+ if (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)
+ sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160;
}
sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 2fb703d70803..7e29f88dbf6a 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -146,18 +146,18 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
goto err_tfm;
}
- key->tfm0 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
+ key->tfm0 = crypto_alloc_sync_skcipher("ctr(aes)", 0, 0);
if (IS_ERR(key->tfm0))
goto err_tfm;
- if (crypto_skcipher_setkey(key->tfm0, template->key,
+ if (crypto_sync_skcipher_setkey(key->tfm0, template->key,
IEEE802154_LLSEC_KEY_SIZE))
goto err_tfm0;
return key;
err_tfm0:
- crypto_free_skcipher(key->tfm0);
+ crypto_free_sync_skcipher(key->tfm0);
err_tfm:
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
if (key->tfm[i])
@@ -177,7 +177,7 @@ static void llsec_key_release(struct kref *ref)
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
crypto_free_aead(key->tfm[i]);
- crypto_free_skcipher(key->tfm0);
+ crypto_free_sync_skcipher(key->tfm0);
kzfree(key);
}
@@ -622,7 +622,7 @@ llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
{
u8 iv[16];
struct scatterlist src;
- SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
int err, datalen;
unsigned char *data;
@@ -632,7 +632,7 @@ llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
datalen = skb_tail_pointer(skb) - data;
sg_init_one(&src, data, datalen);
- skcipher_request_set_tfm(req, key->tfm0);
+ skcipher_request_set_sync_tfm(req, key->tfm0);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &src, &src, datalen, iv);
err = crypto_skcipher_encrypt(req);
@@ -840,7 +840,7 @@ llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
unsigned char *data;
int datalen;
struct scatterlist src;
- SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
int err;
llsec_geniv(iv, dev_addr, &hdr->sec);
@@ -849,7 +849,7 @@ llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
sg_init_one(&src, data, datalen);
- skcipher_request_set_tfm(req, key->tfm0);
+ skcipher_request_set_sync_tfm(req, key->tfm0);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &src, &src, datalen, iv);
diff --git a/net/mac802154/llsec.h b/net/mac802154/llsec.h
index 6f3b658e3279..8be46d74dc39 100644
--- a/net/mac802154/llsec.h
+++ b/net/mac802154/llsec.h
@@ -29,7 +29,7 @@ struct mac802154_llsec_key {
/* one tfm for each authsize (4/8/16) */
struct crypto_aead *tfm[3];
- struct crypto_skcipher *tfm0;
+ struct crypto_sync_skcipher *tfm0;
struct kref ref;
};
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 7a4de6d618b1..7d55d4c04088 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1223,7 +1223,7 @@ static int mpls_netconf_get_devconf(struct sk_buff *in_skb,
int err;
err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
- devconf_mpls_policy, NULL);
+ devconf_mpls_policy, extack);
if (err < 0)
goto errout;
@@ -1263,6 +1263,7 @@ errout:
static int mpls_netconf_dump_devconf(struct sk_buff *skb,
struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct hlist_head *head;
struct net_device *dev;
@@ -1270,6 +1271,21 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb,
int idx, s_idx;
int h, s_h;
+ if (cb->strict_check) {
+ struct netlink_ext_ack *extack = cb->extack;
+ struct netconfmsg *ncm;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
+ return -EINVAL;
+ }
+
+ if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
+ return -EINVAL;
+ }
+ }
+
s_h = cb->args[0];
s_idx = idx = cb->args[1];
@@ -1286,7 +1302,7 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb,
goto cont;
if (mpls_netconf_fill_devconf(skb, mdev,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
+ nlh->nlmsg_seq,
RTM_NEWNETCONF,
NLM_F_MULTI,
NETCONFA_ALL) < 0) {
@@ -1533,10 +1549,14 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
unsigned int flags;
if (event == NETDEV_REGISTER) {
- /* For now just support Ethernet, IPGRE, SIT and IPIP devices */
+
+ /* For now just support Ethernet, IPGRE, IP6GRE, SIT and
+ * IPIP devices
+ */
if (dev->type == ARPHRD_ETHER ||
dev->type == ARPHRD_LOOPBACK ||
dev->type == ARPHRD_IPGRE ||
+ dev->type == ARPHRD_IP6GRE ||
dev->type == ARPHRD_SIT ||
dev->type == ARPHRD_TUNNEL) {
mdev = mpls_add_dev(dev);
@@ -2011,30 +2031,140 @@ nla_put_failure:
return -EMSGSIZE;
}
+#if IS_ENABLED(CONFIG_INET)
+static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
+ struct fib_dump_filter *filter,
+ struct netlink_callback *cb)
+{
+ return ip_valid_fib_dump_req(net, nlh, filter, cb);
+}
+#else
+static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
+ struct fib_dump_filter *filter,
+ struct netlink_callback *cb)
+{
+ struct netlink_ext_ack *extack = cb->extack;
+ struct nlattr *tb[RTA_MAX + 1];
+ struct rtmsg *rtm;
+ int err, i;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid header for FIB dump request");
+ return -EINVAL;
+ }
+
+ rtm = nlmsg_data(nlh);
+ if (rtm->rtm_dst_len || rtm->rtm_src_len || rtm->rtm_tos ||
+ rtm->rtm_table || rtm->rtm_scope || rtm->rtm_type ||
+ rtm->rtm_flags) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for FIB dump request");
+ return -EINVAL;
+ }
+
+ if (rtm->rtm_protocol) {
+ filter->protocol = rtm->rtm_protocol;
+ filter->filter_set = 1;
+ cb->answer_flags = NLM_F_DUMP_FILTERED;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_mpls_policy, extack);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i <= RTA_MAX; ++i) {
+ int ifindex;
+
+ if (i == RTA_OIF) {
+ ifindex = nla_get_u32(tb[i]);
+ filter->dev = __dev_get_by_index(net, ifindex);
+ if (!filter->dev)
+ return -ENODEV;
+ filter->filter_set = 1;
+ } else if (tb[i]) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static bool mpls_rt_uses_dev(struct mpls_route *rt,
+ const struct net_device *dev)
+{
+ struct net_device *nh_dev;
+
+ if (rt->rt_nhn == 1) {
+ struct mpls_nh *nh = rt->rt_nh;
+
+ nh_dev = rtnl_dereference(nh->nh_dev);
+ if (dev == nh_dev)
+ return true;
+ } else {
+ for_nexthops(rt) {
+ nh_dev = rtnl_dereference(nh->nh_dev);
+ if (nh_dev == dev)
+ return true;
+ } endfor_nexthops(rt);
+ }
+
+ return false;
+}
+
static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct mpls_route __rcu **platform_label;
+ struct fib_dump_filter filter = {};
+ unsigned int flags = NLM_F_MULTI;
size_t platform_labels;
unsigned int index;
ASSERT_RTNL();
+ if (cb->strict_check) {
+ int err;
+
+ err = mpls_valid_fib_dump_req(net, nlh, &filter, cb);
+ if (err < 0)
+ return err;
+
+ /* for MPLS, there is only 1 table with fixed type and flags.
+ * If either are set in the filter then return nothing.
+ */
+ if ((filter.table_id && filter.table_id != RT_TABLE_MAIN) ||
+ (filter.rt_type && filter.rt_type != RTN_UNICAST) ||
+ filter.flags)
+ return skb->len;
+ }
+
index = cb->args[0];
if (index < MPLS_LABEL_FIRST_UNRESERVED)
index = MPLS_LABEL_FIRST_UNRESERVED;
platform_label = rtnl_dereference(net->mpls.platform_label);
platform_labels = net->mpls.platform_labels;
+
+ if (filter.filter_set)
+ flags |= NLM_F_DUMP_FILTERED;
+
for (; index < platform_labels; index++) {
struct mpls_route *rt;
+
rt = rtnl_dereference(platform_label[index]);
if (!rt)
continue;
+ if ((filter.dev && !mpls_rt_uses_dev(rt, filter.dev)) ||
+ (filter.protocol && rt->rt_protocol != filter.protocol))
+ continue;
+
if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
- index, rt, NLM_F_MULTI) < 0)
+ index, rt, flags) < 0)
break;
}
cb->args[0] = index;
diff --git a/net/ncsi/Kconfig b/net/ncsi/Kconfig
index 08a8a6031fd7..7f2b46108a24 100644
--- a/net/ncsi/Kconfig
+++ b/net/ncsi/Kconfig
@@ -10,3 +10,9 @@ config NET_NCSI
support. Enable this only if your system connects to a network
device via NCSI and the ethernet driver you're using supports
the protocol explicitly.
+config NCSI_OEM_CMD_GET_MAC
+ bool "Get NCSI OEM MAC Address"
+ depends on NET_NCSI
+ ---help---
+ This allows to get MAC address from NCSI firmware and set them back to
+ controller.
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 8055e3965cef..1dae77c54009 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -68,6 +68,17 @@ enum {
NCSI_MODE_MAX
};
+/* OEM Vendor Manufacture ID */
+#define NCSI_OEM_MFR_MLX_ID 0x8119
+#define NCSI_OEM_MFR_BCM_ID 0x113d
+/* Broadcom specific OEM Command */
+#define NCSI_OEM_BCM_CMD_GMA 0x01 /* CMD ID for Get MAC */
+/* OEM Command payload lengths*/
+#define NCSI_OEM_BCM_CMD_GMA_LEN 12
+/* Mac address offset in OEM response */
+#define BCM_MAC_ADDR_OFFSET 28
+
+
struct ncsi_channel_version {
u32 version; /* Supported BCD encoded NCSI version */
u32 alpha2; /* Supported BCD encoded NCSI version */
@@ -171,6 +182,8 @@ struct ncsi_package;
#define NCSI_RESERVED_CHANNEL 0x1f
#define NCSI_CHANNEL_INDEX(c) ((c) & ((1 << NCSI_PACKAGE_SHIFT) - 1))
#define NCSI_TO_CHANNEL(p, c) (((p) << NCSI_PACKAGE_SHIFT) | (c))
+#define NCSI_MAX_PACKAGE 8
+#define NCSI_MAX_CHANNEL 32
struct ncsi_channel {
unsigned char id;
@@ -216,11 +229,15 @@ struct ncsi_request {
bool used; /* Request that has been assigned */
unsigned int flags; /* NCSI request property */
#define NCSI_REQ_FLAG_EVENT_DRIVEN 1
+#define NCSI_REQ_FLAG_NETLINK_DRIVEN 2
struct ncsi_dev_priv *ndp; /* Associated NCSI device */
struct sk_buff *cmd; /* Associated NCSI command packet */
struct sk_buff *rsp; /* Associated NCSI response packet */
struct timer_list timer; /* Timer on waiting for response */
bool enabled; /* Time has been enabled or not */
+ u32 snd_seq; /* netlink sending sequence number */
+ u32 snd_portid; /* netlink portid of sender */
+ struct nlmsghdr nlhdr; /* netlink message header */
};
enum {
@@ -236,6 +253,7 @@ enum {
ncsi_dev_state_probe_dp,
ncsi_dev_state_config_sp = 0x0301,
ncsi_dev_state_config_cis,
+ ncsi_dev_state_config_oem_gma,
ncsi_dev_state_config_clear_vids,
ncsi_dev_state_config_svf,
ncsi_dev_state_config_ev,
@@ -269,6 +287,7 @@ struct ncsi_dev_priv {
#define NCSI_DEV_PROBED 1 /* Finalized NCSI topology */
#define NCSI_DEV_HWA 2 /* Enabled HW arbitration */
#define NCSI_DEV_RESHUFFLE 4
+ unsigned int gma_flag; /* OEM GMA flag */
spinlock_t lock; /* Protect the NCSI device */
#if IS_ENABLED(CONFIG_IPV6)
unsigned int inet6_addr_num; /* Number of IPv6 addresses */
@@ -305,6 +324,8 @@ struct ncsi_cmd_arg {
unsigned short words[8];
unsigned int dwords[4];
};
+ unsigned char *data; /* NCSI OEM data */
+ struct genl_info *info; /* Netlink information */
};
extern struct list_head ncsi_dev_list;
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
index 7567ca63aae2..356af474e43c 100644
--- a/net/ncsi/ncsi-cmd.c
+++ b/net/ncsi/ncsi-cmd.c
@@ -17,6 +17,7 @@
#include <net/ncsi.h>
#include <net/net_namespace.h>
#include <net/sock.h>
+#include <net/genetlink.h>
#include "internal.h"
#include "ncsi-pkt.h"
@@ -211,6 +212,25 @@ static int ncsi_cmd_handler_snfc(struct sk_buff *skb,
return 0;
}
+static int ncsi_cmd_handler_oem(struct sk_buff *skb,
+ struct ncsi_cmd_arg *nca)
+{
+ struct ncsi_cmd_oem_pkt *cmd;
+ unsigned int len;
+
+ len = sizeof(struct ncsi_cmd_pkt_hdr) + 4;
+ if (nca->payload < 26)
+ len += 26;
+ else
+ len += nca->payload;
+
+ cmd = skb_put_zero(skb, len);
+ memcpy(&cmd->mfr_id, nca->data, nca->payload);
+ ncsi_cmd_build_header(&cmd->cmd.common, nca);
+
+ return 0;
+}
+
static struct ncsi_cmd_handler {
unsigned char type;
int payload;
@@ -244,7 +264,7 @@ static struct ncsi_cmd_handler {
{ NCSI_PKT_CMD_GNS, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_GNPTS, 0, ncsi_cmd_handler_default },
{ NCSI_PKT_CMD_GPS, 0, ncsi_cmd_handler_default },
- { NCSI_PKT_CMD_OEM, 0, NULL },
+ { NCSI_PKT_CMD_OEM, -1, ncsi_cmd_handler_oem },
{ NCSI_PKT_CMD_PLDM, 0, NULL },
{ NCSI_PKT_CMD_GPUUID, 0, ncsi_cmd_handler_default }
};
@@ -316,12 +336,24 @@ int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
return -ENOENT;
}
- /* Get packet payload length and allocate the request */
- nca->payload = nch->payload;
+ /* Get packet payload length and allocate the request
+ * It is expected that if length set as negative in
+ * handler structure means caller is initializing it
+ * and setting length in nca before calling xmit function
+ */
+ if (nch->payload >= 0)
+ nca->payload = nch->payload;
nr = ncsi_alloc_command(nca);
if (!nr)
return -ENOMEM;
+ /* track netlink information */
+ if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
+ nr->snd_seq = nca->info->snd_seq;
+ nr->snd_portid = nca->info->snd_portid;
+ nr->nlhdr = *nca->info->nlhdr;
+ }
+
/* Prepare the packet */
nca->id = nr->id;
ret = nch->handler(nr->cmd, nca);
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 091284760d21..bfc43b28c7a6 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -19,6 +19,7 @@
#include <net/addrconf.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
+#include <net/genetlink.h>
#include "internal.h"
#include "ncsi-pkt.h"
@@ -406,6 +407,9 @@ static void ncsi_request_timeout(struct timer_list *t)
{
struct ncsi_request *nr = from_timer(nr, t, timer);
struct ncsi_dev_priv *ndp = nr->ndp;
+ struct ncsi_cmd_pkt *cmd;
+ struct ncsi_package *np;
+ struct ncsi_channel *nc;
unsigned long flags;
/* If the request already had associated response,
@@ -419,6 +423,18 @@ static void ncsi_request_timeout(struct timer_list *t)
}
spin_unlock_irqrestore(&ndp->lock, flags);
+ if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
+ if (nr->cmd) {
+ /* Find the package */
+ cmd = (struct ncsi_cmd_pkt *)
+ skb_network_header(nr->cmd);
+ ncsi_find_package_and_channel(ndp,
+ cmd->cmd.common.channel,
+ &np, &nc);
+ ncsi_send_netlink_timeout(nr, np, nc);
+ }
+ }
+
/* Release the request */
ncsi_free_request(nr);
}
@@ -635,6 +651,72 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
return 0;
}
+#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
+
+/* NCSI OEM Command APIs */
+static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
+{
+ unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
+ int ret = 0;
+
+ nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
+
+ memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
+ *(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID);
+ data[5] = NCSI_OEM_BCM_CMD_GMA;
+
+ nca->data = data;
+
+ ret = ncsi_xmit_cmd(nca);
+ if (ret)
+ netdev_err(nca->ndp->ndev.dev,
+ "NCSI: Failed to transmit cmd 0x%x during configure\n",
+ nca->type);
+ return ret;
+}
+
+/* OEM Command handlers initialization */
+static struct ncsi_oem_gma_handler {
+ unsigned int mfr_id;
+ int (*handler)(struct ncsi_cmd_arg *nca);
+} ncsi_oem_gma_handlers[] = {
+ { NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm }
+};
+
+static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
+{
+ struct ncsi_oem_gma_handler *nch = NULL;
+ int i;
+
+ /* This function should only be called once, return if flag set */
+ if (nca->ndp->gma_flag == 1)
+ return -1;
+
+ /* Find gma handler for given manufacturer id */
+ for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
+ if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
+ if (ncsi_oem_gma_handlers[i].handler)
+ nch = &ncsi_oem_gma_handlers[i];
+ break;
+ }
+ }
+
+ if (!nch) {
+ netdev_err(nca->ndp->ndev.dev,
+ "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
+ mf_id);
+ return -1;
+ }
+
+ /* Set the flag for GMA command which should only be called once */
+ nca->ndp->gma_flag = 1;
+
+ /* Get Mac address from NCSI device */
+ return nch->handler(nca);
+}
+
+#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
+
static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_dev *nd = &ndp->ndev;
@@ -685,7 +767,23 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
goto error;
}
+ nd->state = ncsi_dev_state_config_oem_gma;
+ break;
+ case ncsi_dev_state_config_oem_gma:
nd->state = ncsi_dev_state_config_clear_vids;
+ ret = -1;
+
+#if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
+ nca.type = NCSI_PKT_CMD_OEM;
+ nca.package = np->id;
+ nca.channel = nc->id;
+ ndp->pending_req_num = 1;
+ ret = ncsi_gma_handler(&nca, nc->version.mf_id);
+#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
+
+ if (ret < 0)
+ schedule_work(&ndp->work);
+
break;
case ncsi_dev_state_config_clear_vids:
case ncsi_dev_state_config_svf:
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 45f33d6dedf7..33314381b4f5 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -12,7 +12,6 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/etherdevice.h>
-#include <linux/module.h>
#include <net/genetlink.h>
#include <net/ncsi.h>
#include <linux/skbuff.h>
@@ -20,6 +19,7 @@
#include <uapi/linux/ncsi.h>
#include "internal.h"
+#include "ncsi-pkt.h"
#include "ncsi-netlink.h"
static struct genl_family ncsi_genl_family;
@@ -29,6 +29,7 @@ static const struct nla_policy ncsi_genl_policy[NCSI_ATTR_MAX + 1] = {
[NCSI_ATTR_PACKAGE_LIST] = { .type = NLA_NESTED },
[NCSI_ATTR_PACKAGE_ID] = { .type = NLA_U32 },
[NCSI_ATTR_CHANNEL_ID] = { .type = NLA_U32 },
+ [NCSI_ATTR_DATA] = { .type = NLA_BINARY, .len = 2048 },
};
static struct ncsi_dev_priv *ndp_from_ifindex(struct net *net, u32 ifindex)
@@ -366,6 +367,202 @@ static int ncsi_clear_interface_nl(struct sk_buff *msg, struct genl_info *info)
return 0;
}
+static int ncsi_send_cmd_nl(struct sk_buff *msg, struct genl_info *info)
+{
+ struct ncsi_dev_priv *ndp;
+ struct ncsi_pkt_hdr *hdr;
+ struct ncsi_cmd_arg nca;
+ unsigned char *data;
+ u32 package_id;
+ u32 channel_id;
+ int len, ret;
+
+ if (!info || !info->attrs) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!info->attrs[NCSI_ATTR_IFINDEX]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!info->attrs[NCSI_ATTR_PACKAGE_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!info->attrs[NCSI_ATTR_CHANNEL_ID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!info->attrs[NCSI_ATTR_DATA]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
+ nla_get_u32(info->attrs[NCSI_ATTR_IFINDEX]));
+ if (!ndp) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ package_id = nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_ID]);
+ channel_id = nla_get_u32(info->attrs[NCSI_ATTR_CHANNEL_ID]);
+
+ if (package_id >= NCSI_MAX_PACKAGE || channel_id >= NCSI_MAX_CHANNEL) {
+ ret = -ERANGE;
+ goto out_netlink;
+ }
+
+ len = nla_len(info->attrs[NCSI_ATTR_DATA]);
+ if (len < sizeof(struct ncsi_pkt_hdr)) {
+ netdev_info(ndp->ndev.dev, "NCSI: no command to send %u\n",
+ package_id);
+ ret = -EINVAL;
+ goto out_netlink;
+ } else {
+ data = (unsigned char *)nla_data(info->attrs[NCSI_ATTR_DATA]);
+ }
+
+ hdr = (struct ncsi_pkt_hdr *)data;
+
+ nca.ndp = ndp;
+ nca.package = (unsigned char)package_id;
+ nca.channel = (unsigned char)channel_id;
+ nca.type = hdr->type;
+ nca.req_flags = NCSI_REQ_FLAG_NETLINK_DRIVEN;
+ nca.info = info;
+ nca.payload = ntohs(hdr->length);
+ nca.data = data + sizeof(*hdr);
+
+ ret = ncsi_xmit_cmd(&nca);
+out_netlink:
+ if (ret != 0) {
+ netdev_err(ndp->ndev.dev,
+ "NCSI: Error %d sending command\n",
+ ret);
+ ncsi_send_netlink_err(ndp->ndev.dev,
+ info->snd_seq,
+ info->snd_portid,
+ info->nlhdr,
+ ret);
+ }
+out:
+ return ret;
+}
+
+int ncsi_send_netlink_rsp(struct ncsi_request *nr,
+ struct ncsi_package *np,
+ struct ncsi_channel *nc)
+{
+ struct sk_buff *skb;
+ struct net *net;
+ void *hdr;
+ int rc;
+
+ net = dev_net(nr->rsp->dev);
+
+ skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(skb, nr->snd_portid, nr->snd_seq,
+ &ncsi_genl_family, 0, NCSI_CMD_SEND_CMD);
+ if (!hdr) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+
+ nla_put_u32(skb, NCSI_ATTR_IFINDEX, nr->rsp->dev->ifindex);
+ if (np)
+ nla_put_u32(skb, NCSI_ATTR_PACKAGE_ID, np->id);
+ if (nc)
+ nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, nc->id);
+ else
+ nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, NCSI_RESERVED_CHANNEL);
+
+ rc = nla_put(skb, NCSI_ATTR_DATA, nr->rsp->len, (void *)nr->rsp->data);
+ if (rc)
+ goto err;
+
+ genlmsg_end(skb, hdr);
+ return genlmsg_unicast(net, skb, nr->snd_portid);
+
+err:
+ kfree_skb(skb);
+ return rc;
+}
+
+int ncsi_send_netlink_timeout(struct ncsi_request *nr,
+ struct ncsi_package *np,
+ struct ncsi_channel *nc)
+{
+ struct sk_buff *skb;
+ struct net *net;
+ void *hdr;
+
+ skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(skb, nr->snd_portid, nr->snd_seq,
+ &ncsi_genl_family, 0, NCSI_CMD_SEND_CMD);
+ if (!hdr) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+
+ net = dev_net(nr->cmd->dev);
+
+ nla_put_u32(skb, NCSI_ATTR_IFINDEX, nr->cmd->dev->ifindex);
+
+ if (np)
+ nla_put_u32(skb, NCSI_ATTR_PACKAGE_ID, np->id);
+ else
+ nla_put_u32(skb, NCSI_ATTR_PACKAGE_ID,
+ NCSI_PACKAGE_INDEX((((struct ncsi_pkt_hdr *)
+ nr->cmd->data)->channel)));
+
+ if (nc)
+ nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, nc->id);
+ else
+ nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, NCSI_RESERVED_CHANNEL);
+
+ genlmsg_end(skb, hdr);
+ return genlmsg_unicast(net, skb, nr->snd_portid);
+}
+
+int ncsi_send_netlink_err(struct net_device *dev,
+ u32 snd_seq,
+ u32 snd_portid,
+ struct nlmsghdr *nlhdr,
+ int err)
+{
+ struct nlmsghdr *nlh;
+ struct nlmsgerr *nle;
+ struct sk_buff *skb;
+ struct net *net;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ net = dev_net(dev);
+
+ nlh = nlmsg_put(skb, snd_portid, snd_seq,
+ NLMSG_ERROR, sizeof(*nle), 0);
+ nle = (struct nlmsgerr *)nlmsg_data(nlh);
+ nle->error = err;
+ memcpy(&nle->msg, nlhdr, sizeof(*nlh));
+
+ nlmsg_end(skb, nlh);
+
+ return nlmsg_unicast(net->genl_sock, skb, snd_portid);
+}
+
static const struct genl_ops ncsi_ops[] = {
{
.cmd = NCSI_CMD_PKG_INFO,
@@ -386,6 +583,12 @@ static const struct genl_ops ncsi_ops[] = {
.doit = ncsi_clear_interface_nl,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = NCSI_CMD_SEND_CMD,
+ .policy = ncsi_genl_policy,
+ .doit = ncsi_send_cmd_nl,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family ncsi_genl_family __ro_after_init = {
diff --git a/net/ncsi/ncsi-netlink.h b/net/ncsi/ncsi-netlink.h
index 91a5c256f8c4..c4a46887a932 100644
--- a/net/ncsi/ncsi-netlink.h
+++ b/net/ncsi/ncsi-netlink.h
@@ -14,6 +14,18 @@
#include "internal.h"
+int ncsi_send_netlink_rsp(struct ncsi_request *nr,
+ struct ncsi_package *np,
+ struct ncsi_channel *nc);
+int ncsi_send_netlink_timeout(struct ncsi_request *nr,
+ struct ncsi_package *np,
+ struct ncsi_channel *nc);
+int ncsi_send_netlink_err(struct net_device *dev,
+ u32 snd_seq,
+ u32 snd_portid,
+ struct nlmsghdr *nlhdr,
+ int err);
+
int ncsi_init_netlink(struct net_device *dev);
int ncsi_unregister_netlink(struct net_device *dev);
diff --git a/net/ncsi/ncsi-pkt.h b/net/ncsi/ncsi-pkt.h
index 91b4b66438df..4d3f06be38bd 100644
--- a/net/ncsi/ncsi-pkt.h
+++ b/net/ncsi/ncsi-pkt.h
@@ -151,6 +151,28 @@ struct ncsi_cmd_snfc_pkt {
unsigned char pad[22];
};
+/* OEM Request Command as per NCSI Specification */
+struct ncsi_cmd_oem_pkt {
+ struct ncsi_cmd_pkt_hdr cmd; /* Command header */
+ __be32 mfr_id; /* Manufacture ID */
+ unsigned char data[]; /* OEM Payload Data */
+};
+
+/* OEM Response Packet as per NCSI Specification */
+struct ncsi_rsp_oem_pkt {
+ struct ncsi_rsp_pkt_hdr rsp; /* Command header */
+ __be32 mfr_id; /* Manufacture ID */
+ unsigned char data[]; /* Payload data */
+};
+
+/* Broadcom Response Data */
+struct ncsi_rsp_oem_bcm_pkt {
+ unsigned char ver; /* Payload Version */
+ unsigned char type; /* OEM Command type */
+ __be16 len; /* Payload Length */
+ unsigned char data[]; /* Cmd specific Data */
+};
+
/* Get Link Status */
struct ncsi_rsp_gls_pkt {
struct ncsi_rsp_pkt_hdr rsp; /* Response header */
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 930c1d3796f0..77e07ba3f493 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -16,9 +16,11 @@
#include <net/ncsi.h>
#include <net/net_namespace.h>
#include <net/sock.h>
+#include <net/genetlink.h>
#include "internal.h"
#include "ncsi-pkt.h"
+#include "ncsi-netlink.h"
static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
unsigned short payload)
@@ -32,15 +34,25 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
* before calling this function.
*/
h = (struct ncsi_rsp_pkt_hdr *)skb_network_header(nr->rsp);
- if (h->common.revision != NCSI_PKT_REVISION)
+
+ if (h->common.revision != NCSI_PKT_REVISION) {
+ netdev_dbg(nr->ndp->ndev.dev,
+ "NCSI: unsupported header revision\n");
return -EINVAL;
- if (ntohs(h->common.length) != payload)
+ }
+ if (ntohs(h->common.length) != payload) {
+ netdev_dbg(nr->ndp->ndev.dev,
+ "NCSI: payload length mismatched\n");
return -EINVAL;
+ }
/* Check on code and reason */
if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED ||
- ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR)
- return -EINVAL;
+ ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) {
+ netdev_dbg(nr->ndp->ndev.dev,
+ "NCSI: non zero response/reason code\n");
+ return -EPERM;
+ }
/* Validate checksum, which might be zeroes if the
* sender doesn't support checksum according to NCSI
@@ -52,8 +64,11 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
checksum = ncsi_calculate_checksum((unsigned char *)h,
sizeof(*h) + payload - 4);
- if (*pchecksum != htonl(checksum))
+
+ if (*pchecksum != htonl(checksum)) {
+ netdev_dbg(nr->ndp->ndev.dev, "NCSI: checksum mismatched\n");
return -EINVAL;
+ }
return 0;
}
@@ -596,6 +611,87 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr)
return 0;
}
+/* Response handler for Broadcom command Get Mac Address */
+static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr)
+{
+ struct ncsi_dev_priv *ndp = nr->ndp;
+ struct net_device *ndev = ndp->ndev.dev;
+ const struct net_device_ops *ops = ndev->netdev_ops;
+ struct ncsi_rsp_oem_pkt *rsp;
+ struct sockaddr saddr;
+ int ret = 0;
+
+ /* Get the response header */
+ rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
+
+ saddr.sa_family = ndev->type;
+ ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN);
+ /* Increase mac address by 1 for BMC's address */
+ saddr.sa_data[ETH_ALEN - 1]++;
+ ret = ops->ndo_set_mac_address(ndev, &saddr);
+ if (ret < 0)
+ netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
+
+ return ret;
+}
+
+/* Response handler for Broadcom card */
+static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr)
+{
+ struct ncsi_rsp_oem_bcm_pkt *bcm;
+ struct ncsi_rsp_oem_pkt *rsp;
+
+ /* Get the response header */
+ rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
+ bcm = (struct ncsi_rsp_oem_bcm_pkt *)(rsp->data);
+
+ if (bcm->type == NCSI_OEM_BCM_CMD_GMA)
+ return ncsi_rsp_handler_oem_bcm_gma(nr);
+ return 0;
+}
+
+static struct ncsi_rsp_oem_handler {
+ unsigned int mfr_id;
+ int (*handler)(struct ncsi_request *nr);
+} ncsi_rsp_oem_handlers[] = {
+ { NCSI_OEM_MFR_MLX_ID, NULL },
+ { NCSI_OEM_MFR_BCM_ID, ncsi_rsp_handler_oem_bcm }
+};
+
+/* Response handler for OEM command */
+static int ncsi_rsp_handler_oem(struct ncsi_request *nr)
+{
+ struct ncsi_rsp_oem_handler *nrh = NULL;
+ struct ncsi_rsp_oem_pkt *rsp;
+ unsigned int mfr_id, i;
+
+ /* Get the response header */
+ rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
+ mfr_id = ntohl(rsp->mfr_id);
+
+ /* Check for manufacturer id and Find the handler */
+ for (i = 0; i < ARRAY_SIZE(ncsi_rsp_oem_handlers); i++) {
+ if (ncsi_rsp_oem_handlers[i].mfr_id == mfr_id) {
+ if (ncsi_rsp_oem_handlers[i].handler)
+ nrh = &ncsi_rsp_oem_handlers[i];
+ else
+ nrh = NULL;
+
+ break;
+ }
+ }
+
+ if (!nrh) {
+ netdev_err(nr->ndp->ndev.dev, "Received unrecognized OEM packet with MFR-ID (0x%x)\n",
+ mfr_id);
+ return -ENOENT;
+ }
+
+ /* Process the packet */
+ return nrh->handler(nr);
+}
+
static int ncsi_rsp_handler_gvi(struct ncsi_request *nr)
{
struct ncsi_rsp_gvi_pkt *rsp;
@@ -900,6 +996,26 @@ static int ncsi_rsp_handler_gpuuid(struct ncsi_request *nr)
return 0;
}
+static int ncsi_rsp_handler_netlink(struct ncsi_request *nr)
+{
+ struct ncsi_dev_priv *ndp = nr->ndp;
+ struct ncsi_rsp_pkt *rsp;
+ struct ncsi_package *np;
+ struct ncsi_channel *nc;
+ int ret;
+
+ /* Find the package */
+ rsp = (struct ncsi_rsp_pkt *)skb_network_header(nr->rsp);
+ ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
+ &np, &nc);
+ if (!np)
+ return -ENODEV;
+
+ ret = ncsi_send_netlink_rsp(nr, np, nc);
+
+ return ret;
+}
+
static struct ncsi_rsp_handler {
unsigned char type;
int payload;
@@ -932,7 +1048,7 @@ static struct ncsi_rsp_handler {
{ NCSI_PKT_RSP_GNS, 172, ncsi_rsp_handler_gns },
{ NCSI_PKT_RSP_GNPTS, 172, ncsi_rsp_handler_gnpts },
{ NCSI_PKT_RSP_GPS, 8, ncsi_rsp_handler_gps },
- { NCSI_PKT_RSP_OEM, 0, NULL },
+ { NCSI_PKT_RSP_OEM, -1, ncsi_rsp_handler_oem },
{ NCSI_PKT_RSP_PLDM, 0, NULL },
{ NCSI_PKT_RSP_GPUUID, 20, ncsi_rsp_handler_gpuuid }
};
@@ -1002,6 +1118,17 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
netdev_warn(ndp->ndev.dev,
"NCSI: 'bad' packet ignored for type 0x%x\n",
hdr->type);
+
+ if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
+ if (ret == -EPERM)
+ goto out_netlink;
+ else
+ ncsi_send_netlink_err(ndp->ndev.dev,
+ nr->snd_seq,
+ nr->snd_portid,
+ &nr->nlhdr,
+ ret);
+ }
goto out;
}
@@ -1011,6 +1138,17 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
netdev_err(ndp->ndev.dev,
"NCSI: Handler for packet type 0x%x returned %d\n",
hdr->type, ret);
+
+out_netlink:
+ if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
+ ret = ncsi_rsp_handler_netlink(nr);
+ if (ret) {
+ netdev_err(ndp->ndev.dev,
+ "NCSI: Netlink handler for packet type 0x%x returned %d\n",
+ hdr->type, ret);
+ }
+ }
+
out:
ncsi_free_request(nr);
return ret;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index f61c306de1d0..2ab870ef233a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -625,6 +625,13 @@ config NFT_FIB_INET
The lookup will be delegated to the IPv4 or IPv6 FIB depending
on the protocol of the packet.
+config NFT_XFRM
+ tristate "Netfilter nf_tables xfrm/IPSec security association matching"
+ depends on XFRM
+ help
+ This option adds an expression that you can use to extract properties
+ of a packets security association.
+
config NFT_SOCKET
tristate "Netfilter nf_tables socket match support"
depends on IPV6 || IPV6=n
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 16895e045b66..4ddf3ef51ece 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -113,6 +113,7 @@ obj-$(CONFIG_NFT_FIB_NETDEV) += nft_fib_netdev.o
obj-$(CONFIG_NFT_SOCKET) += nft_socket.o
obj-$(CONFIG_NFT_OSF) += nft_osf.o
obj-$(CONFIG_NFT_TPROXY) += nft_tproxy.o
+obj-$(CONFIG_NFT_XFRM) += nft_xfrm.o
# nf_tables netdev
obj-$(CONFIG_NFT_DUP_NETDEV) += nft_dup_netdev.o
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 8a33dac4e805..e287da68d5fa 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -15,7 +15,7 @@
#define __ipset_dereference_protected(p, c) rcu_dereference_protected(p, c)
#define ipset_dereference_protected(p, set) \
- __ipset_dereference_protected(p, spin_is_locked(&(set)->lock))
+ __ipset_dereference_protected(p, lockdep_is_held(&(set)->lock))
#define rcu_dereference_bh_nfnl(p) rcu_dereference_bh_check(p, 1)
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 7ca926a03b81..fe9abf3cc10a 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1686,8 +1686,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
skb_reset_network_header(skb);
IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
- ipv4_update_pmtu(skb, ipvs->net,
- mtu, 0, 0, 0, 0);
+ ipv4_update_pmtu(skb, ipvs->net, mtu, 0, 0);
/* Client uses PMTUD? */
if (!(frag_off & htons(IP_DF)))
goto ignore_ipip;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 62eefea48973..83395bf6dc35 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3234,7 +3234,7 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb,
/* Try to find the service for which to dump destinations */
if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, IPVS_CMD_ATTR_MAX,
- ip_vs_cmd_policy, NULL))
+ ip_vs_cmd_policy, cb->extack))
goto out_err;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index a676d5f76bdc..ca1168d67fac 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -379,7 +379,7 @@ bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
return false;
}
- l4proto = __nf_ct_l4proto_find(l3num, protonum);
+ l4proto = __nf_ct_l4proto_find(protonum);
ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
l4proto);
@@ -539,7 +539,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
nf_ct_tmpl_free(ct);
return;
}
- l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+ l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
if (l4proto->destroy)
l4proto->destroy(ct);
@@ -840,7 +840,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
enum ip_conntrack_info oldinfo;
struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
- l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+ l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
if (l4proto->allow_clash &&
!nf_ct_is_dying(ct) &&
atomic_inc_not_zero(&ct->ct_general.use)) {
@@ -1109,7 +1109,7 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
if (!test_bit(IPS_ASSURED_BIT, &ct->status))
return true;
- l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+ l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
return true;
@@ -1370,12 +1370,6 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
- if (!l4proto->new(ct, skb, dataoff)) {
- nf_conntrack_free(ct);
- pr_debug("can't track with proto module\n");
- return NULL;
- }
-
if (timeout_ext)
nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
GFP_ATOMIC);
@@ -1436,12 +1430,12 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
/* On success, returns 0, sets skb->_nfct | ctinfo */
static int
-resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
+resolve_normal_ct(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
- u_int16_t l3num,
u_int8_t protonum,
- const struct nf_conntrack_l4proto *l4proto)
+ const struct nf_conntrack_l4proto *l4proto,
+ const struct nf_hook_state *state)
{
const struct nf_conntrack_zone *zone;
struct nf_conntrack_tuple tuple;
@@ -1452,17 +1446,18 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
u32 hash;
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
- dataoff, l3num, protonum, net, &tuple, l4proto)) {
+ dataoff, state->pf, protonum, state->net,
+ &tuple, l4proto)) {
pr_debug("Can't get tuple\n");
return 0;
}
/* look for tuple match */
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
- hash = hash_conntrack_raw(&tuple, net);
- h = __nf_conntrack_find_get(net, zone, &tuple, hash);
+ hash = hash_conntrack_raw(&tuple, state->net);
+ h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
if (!h) {
- h = init_conntrack(net, tmpl, &tuple, l4proto,
+ h = init_conntrack(state->net, tmpl, &tuple, l4proto,
skb, dataoff, hash);
if (!h)
return 0;
@@ -1491,13 +1486,45 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
return 0;
}
+/*
+ * icmp packets need special treatment to handle error messages that are
+ * related to a connection.
+ *
+ * Callers need to check if skb has a conntrack assigned when this
+ * helper returns; in such case skb belongs to an already known connection.
+ */
+static unsigned int __cold
+nf_conntrack_handle_icmp(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ u8 protonum,
+ const struct nf_hook_state *state)
+{
+ int ret;
+
+ if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
+ ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
+ ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
+#endif
+ else
+ return NF_ACCEPT;
+
+ if (ret <= 0) {
+ NF_CT_STAT_INC_ATOMIC(state->net, error);
+ NF_CT_STAT_INC_ATOMIC(state->net, invalid);
+ }
+
+ return ret;
+}
+
unsigned int
-nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
- struct sk_buff *skb)
+nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
{
const struct nf_conntrack_l4proto *l4proto;
- struct nf_conn *ct, *tmpl;
enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct, *tmpl;
u_int8_t protonum;
int dataoff, ret;
@@ -1506,32 +1533,28 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
/* Previously seen (loopback or untracked)? Ignore. */
if ((tmpl && !nf_ct_is_template(tmpl)) ||
ctinfo == IP_CT_UNTRACKED) {
- NF_CT_STAT_INC_ATOMIC(net, ignore);
+ NF_CT_STAT_INC_ATOMIC(state->net, ignore);
return NF_ACCEPT;
}
skb->_nfct = 0;
}
/* rcu_read_lock()ed by nf_hook_thresh */
- dataoff = get_l4proto(skb, skb_network_offset(skb), pf, &protonum);
+ dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
if (dataoff <= 0) {
pr_debug("not prepared to track yet or error occurred\n");
- NF_CT_STAT_INC_ATOMIC(net, error);
- NF_CT_STAT_INC_ATOMIC(net, invalid);
+ NF_CT_STAT_INC_ATOMIC(state->net, error);
+ NF_CT_STAT_INC_ATOMIC(state->net, invalid);
ret = NF_ACCEPT;
goto out;
}
- l4proto = __nf_ct_l4proto_find(pf, protonum);
+ l4proto = __nf_ct_l4proto_find(protonum);
- /* It may be an special packet, error, unclean...
- * inverse of the return code tells to the netfilter
- * core what to do with the packet. */
- if (l4proto->error != NULL) {
- ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum);
+ if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
+ ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
+ protonum, state);
if (ret <= 0) {
- NF_CT_STAT_INC_ATOMIC(net, error);
- NF_CT_STAT_INC_ATOMIC(net, invalid);
ret = -ret;
goto out;
}
@@ -1540,10 +1563,11 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
goto out;
}
repeat:
- ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, l4proto);
+ ret = resolve_normal_ct(tmpl, skb, dataoff,
+ protonum, l4proto, state);
if (ret < 0) {
/* Too stressed to deal. */
- NF_CT_STAT_INC_ATOMIC(net, drop);
+ NF_CT_STAT_INC_ATOMIC(state->net, drop);
ret = NF_DROP;
goto out;
}
@@ -1551,21 +1575,21 @@ repeat:
ct = nf_ct_get(skb, &ctinfo);
if (!ct) {
/* Not valid part of a connection */
- NF_CT_STAT_INC_ATOMIC(net, invalid);
+ NF_CT_STAT_INC_ATOMIC(state->net, invalid);
ret = NF_ACCEPT;
goto out;
}
- ret = l4proto->packet(ct, skb, dataoff, ctinfo);
+ ret = l4proto->packet(ct, skb, dataoff, ctinfo, state);
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
pr_debug("nf_conntrack_in: Can't track with proto module\n");
nf_conntrack_put(&ct->ct_general);
skb->_nfct = 0;
- NF_CT_STAT_INC_ATOMIC(net, invalid);
+ NF_CT_STAT_INC_ATOMIC(state->net, invalid);
if (ret == -NF_DROP)
- NF_CT_STAT_INC_ATOMIC(net, drop);
+ NF_CT_STAT_INC_ATOMIC(state->net, drop);
/* Special case: TCP tracker reports an attempt to reopen a
* closed/aborted connection. We have to go back and create a
* fresh conntrack.
@@ -1594,8 +1618,7 @@ bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
rcu_read_lock();
ret = nf_ct_invert_tuple(inverse, orig,
- __nf_ct_l4proto_find(orig->src.l3num,
- orig->dst.protonum));
+ __nf_ct_l4proto_find(orig->dst.protonum));
rcu_read_unlock();
return ret;
}
@@ -1752,7 +1775,7 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
if (dataoff <= 0)
return -1;
- l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+ l4proto = nf_ct_l4proto_find_get(l4num);
if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
l4num, net, &tuple, l4proto))
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 27b84231db10..3034038bfdf0 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -610,8 +610,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
expect->tuple.src.l3num,
expect->tuple.dst.protonum);
print_tuple(s, &expect->tuple,
- __nf_ct_l4proto_find(expect->tuple.src.l3num,
- expect->tuple.dst.protonum));
+ __nf_ct_l4proto_find(expect->tuple.dst.protonum));
if (expect->flags & NF_CT_EXPECT_PERMANENT) {
seq_puts(s, "PERMANENT");
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 036207ecaf16..4ae8e528943a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -135,8 +135,7 @@ static int ctnetlink_dump_tuples(struct sk_buff *skb,
ret = ctnetlink_dump_tuples_ip(skb, tuple);
if (ret >= 0) {
- l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
- tuple->dst.protonum);
+ l4proto = __nf_ct_l4proto_find(tuple->dst.protonum);
ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
}
rcu_read_unlock();
@@ -184,7 +183,7 @@ static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
struct nlattr *nest_proto;
int ret;
- l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+ l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
if (!l4proto->to_nlattr)
return 0;
@@ -592,7 +591,7 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct)
len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1);
len *= 3u; /* ORIG, REPLY, MASTER */
- l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+ l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
len += l4proto->nlattr_size;
if (l4proto->nlattr_tuple_size) {
len4 = l4proto->nlattr_tuple_size();
@@ -821,6 +820,7 @@ static int ctnetlink_done(struct netlink_callback *cb)
}
struct ctnetlink_filter {
+ u8 family;
struct {
u_int32_t val;
u_int32_t mask;
@@ -828,31 +828,39 @@ struct ctnetlink_filter {
};
static struct ctnetlink_filter *
-ctnetlink_alloc_filter(const struct nlattr * const cda[])
+ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
{
-#ifdef CONFIG_NF_CONNTRACK_MARK
struct ctnetlink_filter *filter;
+#ifndef CONFIG_NF_CONNTRACK_MARK
+ if (cda[CTA_MARK] && cda[CTA_MARK_MASK])
+ return ERR_PTR(-EOPNOTSUPP);
+#endif
+
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (filter == NULL)
return ERR_PTR(-ENOMEM);
- filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
- filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
+ filter->family = family;
- return filter;
-#else
- return ERR_PTR(-EOPNOTSUPP);
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
+ filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
+ filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
+ }
#endif
+ return filter;
}
static int ctnetlink_start(struct netlink_callback *cb)
{
const struct nlattr * const *cda = cb->data;
struct ctnetlink_filter *filter = NULL;
+ struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+ u8 family = nfmsg->nfgen_family;
- if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
- filter = ctnetlink_alloc_filter(cda);
+ if (family || (cda[CTA_MARK] && cda[CTA_MARK_MASK])) {
+ filter = ctnetlink_alloc_filter(cda, family);
if (IS_ERR(filter))
return PTR_ERR(filter);
}
@@ -866,13 +874,24 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
struct ctnetlink_filter *filter = data;
if (filter == NULL)
- return 1;
+ goto out;
+
+ /* Match entries of a given L3 protocol number.
+ * If it is not specified, ie. l3proto == 0,
+ * then match everything.
+ */
+ if (filter->family && nf_ct_l3num(ct) != filter->family)
+ goto ignore_entry;
#ifdef CONFIG_NF_CONNTRACK_MARK
- if ((ct->mark & filter->mark.mask) == filter->mark.val)
- return 1;
+ if ((ct->mark & filter->mark.mask) != filter->mark.val)
+ goto ignore_entry;
#endif
+out:
+ return 1;
+
+ignore_entry:
return 0;
}
@@ -883,8 +902,6 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
- u_int8_t l3proto = nfmsg->nfgen_family;
struct nf_conn *nf_ct_evict[8];
int res, i;
spinlock_t *lockp;
@@ -923,11 +940,6 @@ restart:
if (!net_eq(net, nf_ct_net(ct)))
continue;
- /* Dump entries of a given L3 protocol number.
- * If it is not specified, ie. l3proto == 0,
- * then dump everything. */
- if (l3proto && nf_ct_l3num(ct) != l3proto)
- continue;
if (cb->args[1]) {
if (ct != last)
continue;
@@ -1048,7 +1060,7 @@ static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
rcu_read_lock();
- l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
+ l4proto = __nf_ct_l4proto_find(tuple->dst.protonum);
if (likely(l4proto->nlattr_to_tuple)) {
ret = nla_validate_nested(attr, CTA_PROTO_MAX,
@@ -1213,12 +1225,12 @@ static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
static int ctnetlink_flush_conntrack(struct net *net,
const struct nlattr * const cda[],
- u32 portid, int report)
+ u32 portid, int report, u8 family)
{
struct ctnetlink_filter *filter = NULL;
- if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
- filter = ctnetlink_alloc_filter(cda);
+ if (family || (cda[CTA_MARK] && cda[CTA_MARK_MASK])) {
+ filter = ctnetlink_alloc_filter(cda, family);
if (IS_ERR(filter))
return PTR_ERR(filter);
}
@@ -1257,7 +1269,7 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
else {
return ctnetlink_flush_conntrack(net, cda,
NETLINK_CB(skb).portid,
- nlmsg_report(nlh));
+ nlmsg_report(nlh), u3);
}
if (err < 0)
@@ -1696,7 +1708,7 @@ static int ctnetlink_change_protoinfo(struct nf_conn *ct,
return err;
rcu_read_lock();
- l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+ l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
if (l4proto->from_nlattr)
err = l4proto->from_nlattr(tb, ct);
rcu_read_unlock();
@@ -2656,8 +2668,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
rcu_read_lock();
ret = ctnetlink_dump_tuples_ip(skb, &m);
if (ret >= 0) {
- l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
- tuple->dst.protonum);
+ l4proto = __nf_ct_l4proto_find(tuple->dst.protonum);
ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
}
rcu_read_unlock();
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 51c5d7eec0a3..40643af7137e 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -43,7 +43,7 @@
extern unsigned int nf_conntrack_net_id;
-static struct nf_conntrack_l4proto __rcu **nf_ct_protos[NFPROTO_NUMPROTO] __read_mostly;
+static struct nf_conntrack_l4proto __rcu *nf_ct_protos[MAX_NF_CT_PROTO + 1] __read_mostly;
static DEFINE_MUTEX(nf_ct_proto_mutex);
@@ -124,23 +124,21 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
EXPORT_SYMBOL_GPL(nf_ct_l4proto_log_invalid);
#endif
-const struct nf_conntrack_l4proto *
-__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto)
+const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto)
{
- if (unlikely(l3proto >= NFPROTO_NUMPROTO || nf_ct_protos[l3proto] == NULL))
+ if (unlikely(l4proto >= ARRAY_SIZE(nf_ct_protos)))
return &nf_conntrack_l4proto_generic;
- return rcu_dereference(nf_ct_protos[l3proto][l4proto]);
+ return rcu_dereference(nf_ct_protos[l4proto]);
}
EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find);
-const struct nf_conntrack_l4proto *
-nf_ct_l4proto_find_get(u_int16_t l3num, u_int8_t l4num)
+const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4num)
{
const struct nf_conntrack_l4proto *p;
rcu_read_lock();
- p = __nf_ct_l4proto_find(l3num, l4num);
+ p = __nf_ct_l4proto_find(l4num);
if (!try_module_get(p->me))
p = &nf_conntrack_l4proto_generic;
rcu_read_unlock();
@@ -159,8 +157,7 @@ static int kill_l4proto(struct nf_conn *i, void *data)
{
const struct nf_conntrack_l4proto *l4proto;
l4proto = data;
- return nf_ct_protonum(i) == l4proto->l4proto &&
- nf_ct_l3num(i) == l4proto->l3proto;
+ return nf_ct_protonum(i) == l4proto->l4proto;
}
static struct nf_proto_net *nf_ct_l4proto_net(struct net *net,
@@ -219,48 +216,20 @@ int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *l4proto)
{
int ret = 0;
- if (l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos))
- return -EBUSY;
-
if ((l4proto->to_nlattr && l4proto->nlattr_size == 0) ||
(l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size))
return -EINVAL;
mutex_lock(&nf_ct_proto_mutex);
- if (!nf_ct_protos[l4proto->l3proto]) {
- /* l3proto may be loaded latter. */
- struct nf_conntrack_l4proto __rcu **proto_array;
- int i;
-
- proto_array =
- kmalloc_array(MAX_NF_CT_PROTO,
- sizeof(struct nf_conntrack_l4proto *),
- GFP_KERNEL);
- if (proto_array == NULL) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- for (i = 0; i < MAX_NF_CT_PROTO; i++)
- RCU_INIT_POINTER(proto_array[i],
- &nf_conntrack_l4proto_generic);
-
- /* Before making proto_array visible to lockless readers,
- * we must make sure its content is committed to memory.
- */
- smp_wmb();
-
- nf_ct_protos[l4proto->l3proto] = proto_array;
- } else if (rcu_dereference_protected(
- nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+ if (rcu_dereference_protected(
+ nf_ct_protos[l4proto->l4proto],
lockdep_is_held(&nf_ct_proto_mutex)
) != &nf_conntrack_l4proto_generic) {
ret = -EBUSY;
goto out_unlock;
}
- rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
- l4proto);
+ rcu_assign_pointer(nf_ct_protos[l4proto->l4proto], l4proto);
out_unlock:
mutex_unlock(&nf_ct_proto_mutex);
return ret;
@@ -274,7 +243,7 @@ int nf_ct_l4proto_pernet_register_one(struct net *net,
struct nf_proto_net *pn = NULL;
if (l4proto->init_net) {
- ret = l4proto->init_net(net, l4proto->l3proto);
+ ret = l4proto->init_net(net);
if (ret < 0)
goto out;
}
@@ -296,13 +265,13 @@ EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register_one);
static void __nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *l4proto)
{
- BUG_ON(l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos));
+ BUG_ON(l4proto->l4proto >= ARRAY_SIZE(nf_ct_protos));
BUG_ON(rcu_dereference_protected(
- nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+ nf_ct_protos[l4proto->l4proto],
lockdep_is_held(&nf_ct_proto_mutex)
) != l4proto);
- rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+ rcu_assign_pointer(nf_ct_protos[l4proto->l4proto],
&nf_conntrack_l4proto_generic);
}
@@ -352,7 +321,7 @@ static int
nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[],
unsigned int num_proto)
{
- int ret = -EINVAL, ver;
+ int ret = -EINVAL;
unsigned int i;
for (i = 0; i < num_proto; i++) {
@@ -361,9 +330,8 @@ nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[],
break;
}
if (i != num_proto) {
- ver = l4proto[i]->l3proto == PF_INET6 ? 6 : 4;
- pr_err("nf_conntrack_ipv%d: can't register l4 %d proto.\n",
- ver, l4proto[i]->l4proto);
+ pr_err("nf_conntrack: can't register l4 %d proto.\n",
+ l4proto[i]->l4proto);
nf_ct_l4proto_unregister(l4proto, i);
}
return ret;
@@ -382,9 +350,8 @@ int nf_ct_l4proto_pernet_register(struct net *net,
break;
}
if (i != num_proto) {
- pr_err("nf_conntrack_proto_%d %d: pernet registration failed\n",
- l4proto[i]->l4proto,
- l4proto[i]->l3proto == PF_INET6 ? 6 : 4);
+ pr_err("nf_conntrack %d: pernet registration failed\n",
+ l4proto[i]->l4proto);
nf_ct_l4proto_pernet_unregister(net, l4proto, i);
}
return ret;
@@ -455,7 +422,7 @@ static unsigned int ipv4_conntrack_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
+ return nf_conntrack_in(skb, state);
}
static unsigned int ipv4_conntrack_local(void *priv,
@@ -477,7 +444,7 @@ static unsigned int ipv4_conntrack_local(void *priv,
return NF_ACCEPT;
}
- return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
+ return nf_conntrack_in(skb, state);
}
/* Connection tracking may drop packets, but never alters them, so
@@ -690,14 +657,14 @@ static unsigned int ipv6_conntrack_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
+ return nf_conntrack_in(skb, state);
}
static unsigned int ipv6_conntrack_local(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
+ return nf_conntrack_in(skb, state);
}
static unsigned int ipv6_helper(void *priv,
@@ -911,37 +878,26 @@ void nf_ct_netns_put(struct net *net, uint8_t nfproto)
EXPORT_SYMBOL_GPL(nf_ct_netns_put);
static const struct nf_conntrack_l4proto * const builtin_l4proto[] = {
- &nf_conntrack_l4proto_tcp4,
- &nf_conntrack_l4proto_udp4,
+ &nf_conntrack_l4proto_tcp,
+ &nf_conntrack_l4proto_udp,
&nf_conntrack_l4proto_icmp,
#ifdef CONFIG_NF_CT_PROTO_DCCP
- &nf_conntrack_l4proto_dccp4,
+ &nf_conntrack_l4proto_dccp,
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
- &nf_conntrack_l4proto_sctp4,
+ &nf_conntrack_l4proto_sctp,
#endif
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
- &nf_conntrack_l4proto_udplite4,
+ &nf_conntrack_l4proto_udplite,
#endif
#if IS_ENABLED(CONFIG_IPV6)
- &nf_conntrack_l4proto_tcp6,
- &nf_conntrack_l4proto_udp6,
&nf_conntrack_l4proto_icmpv6,
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- &nf_conntrack_l4proto_dccp6,
-#endif
-#ifdef CONFIG_NF_CT_PROTO_SCTP
- &nf_conntrack_l4proto_sctp6,
-#endif
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
- &nf_conntrack_l4proto_udplite6,
-#endif
#endif /* CONFIG_IPV6 */
};
int nf_conntrack_proto_init(void)
{
- int ret = 0;
+ int ret = 0, i;
ret = nf_register_sockopt(&so_getorigdst);
if (ret < 0)
@@ -952,6 +908,11 @@ int nf_conntrack_proto_init(void)
if (ret < 0)
goto cleanup_sockopt;
#endif
+
+ for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++)
+ RCU_INIT_POINTER(nf_ct_protos[i],
+ &nf_conntrack_l4proto_generic);
+
ret = nf_ct_l4proto_register(builtin_l4proto,
ARRAY_SIZE(builtin_l4proto));
if (ret < 0)
@@ -969,17 +930,10 @@ cleanup_sockopt:
void nf_conntrack_proto_fini(void)
{
- unsigned int i;
-
nf_unregister_sockopt(&so_getorigdst);
#if IS_ENABLED(CONFIG_IPV6)
nf_unregister_sockopt(&so_getorigdst6);
#endif
- /* No need to call nf_ct_l4proto_unregister(), the register
- * tables are free'd here anyway.
- */
- for (i = 0; i < ARRAY_SIZE(nf_ct_protos); i++)
- kfree(nf_ct_protos[i]);
}
int nf_conntrack_proto_pernet_init(struct net *net)
@@ -988,8 +942,7 @@ int nf_conntrack_proto_pernet_init(struct net *net)
struct nf_proto_net *pn = nf_ct_l4proto_net(net,
&nf_conntrack_l4proto_generic);
- err = nf_conntrack_l4proto_generic.init_net(net,
- nf_conntrack_l4proto_generic.l3proto);
+ err = nf_conntrack_l4proto_generic.init_net(net);
if (err < 0)
return err;
err = nf_ct_l4proto_register_sysctl(net,
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index f3f91ed2c21a..171e9e122e5f 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -389,18 +389,15 @@ static inline struct nf_dccp_net *dccp_pernet(struct net *net)
return &net->ct.nf_ct_proto.dccp;
}
-static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+static noinline bool
+dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
+ const struct dccp_hdr *dh)
{
struct net *net = nf_ct_net(ct);
struct nf_dccp_net *dn;
- struct dccp_hdr _dh, *dh;
const char *msg;
u_int8_t state;
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
- BUG_ON(dh == NULL);
-
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
switch (state) {
default:
@@ -438,8 +435,51 @@ static u64 dccp_ack_seq(const struct dccp_hdr *dh)
ntohl(dhack->dccph_ack_nr_low);
}
-static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, enum ip_conntrack_info ctinfo)
+static bool dccp_error(const struct dccp_hdr *dh,
+ struct sk_buff *skb, unsigned int dataoff,
+ const struct nf_hook_state *state)
+{
+ unsigned int dccp_len = skb->len - dataoff;
+ unsigned int cscov;
+ const char *msg;
+
+ if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
+ dh->dccph_doff * 4 > dccp_len) {
+ msg = "nf_ct_dccp: truncated/malformed packet ";
+ goto out_invalid;
+ }
+
+ cscov = dccp_len;
+ if (dh->dccph_cscov) {
+ cscov = (dh->dccph_cscov - 1) * 4;
+ if (cscov > dccp_len) {
+ msg = "nf_ct_dccp: bad checksum coverage ";
+ goto out_invalid;
+ }
+ }
+
+ if (state->hook == NF_INET_PRE_ROUTING &&
+ state->net->ct.sysctl_checksum &&
+ nf_checksum_partial(skb, state->hook, dataoff, cscov,
+ IPPROTO_DCCP, state->pf)) {
+ msg = "nf_ct_dccp: bad checksum ";
+ goto out_invalid;
+ }
+
+ if (dh->dccph_type >= DCCP_PKT_INVALID) {
+ msg = "nf_ct_dccp: reserved packet type ";
+ goto out_invalid;
+ }
+ return false;
+out_invalid:
+ nf_l4proto_log_invalid(skb, state->net, state->pf,
+ IPPROTO_DCCP, "%s", msg);
+ return true;
+}
+
+static int dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
+ unsigned int dataoff, enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct dccp_hdr _dh, *dh;
@@ -448,8 +488,15 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int *timeouts;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
- BUG_ON(dh == NULL);
+ if (!dh)
+ return NF_DROP;
+
+ if (dccp_error(dh, skb, dataoff, state))
+ return -NF_ACCEPT;
+
type = dh->dccph_type;
+ if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh))
+ return -NF_ACCEPT;
if (type == DCCP_PKT_RESET &&
!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
@@ -527,55 +574,6 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
return NF_ACCEPT;
}
-static int dccp_error(struct net *net, struct nf_conn *tmpl,
- struct sk_buff *skb, unsigned int dataoff,
- u_int8_t pf, unsigned int hooknum)
-{
- struct dccp_hdr _dh, *dh;
- unsigned int dccp_len = skb->len - dataoff;
- unsigned int cscov;
- const char *msg;
-
- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
- if (dh == NULL) {
- msg = "nf_ct_dccp: short packet ";
- goto out_invalid;
- }
-
- if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
- dh->dccph_doff * 4 > dccp_len) {
- msg = "nf_ct_dccp: truncated/malformed packet ";
- goto out_invalid;
- }
-
- cscov = dccp_len;
- if (dh->dccph_cscov) {
- cscov = (dh->dccph_cscov - 1) * 4;
- if (cscov > dccp_len) {
- msg = "nf_ct_dccp: bad checksum coverage ";
- goto out_invalid;
- }
- }
-
- if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
- nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_DCCP,
- pf)) {
- msg = "nf_ct_dccp: bad checksum ";
- goto out_invalid;
- }
-
- if (dh->dccph_type >= DCCP_PKT_INVALID) {
- msg = "nf_ct_dccp: reserved packet type ";
- goto out_invalid;
- }
-
- return NF_ACCEPT;
-
-out_invalid:
- nf_l4proto_log_invalid(skb, net, pf, IPPROTO_DCCP, "%s", msg);
- return -NF_ACCEPT;
-}
-
static bool dccp_can_early_drop(const struct nf_conn *ct)
{
switch (ct->proto.dccp.state) {
@@ -814,7 +812,7 @@ static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
return 0;
}
-static int dccp_init_net(struct net *net, u_int16_t proto)
+static int dccp_init_net(struct net *net)
{
struct nf_dccp_net *dn = dccp_pernet(net);
struct nf_proto_net *pn = &dn->pn;
@@ -844,45 +842,9 @@ static struct nf_proto_net *dccp_get_net_proto(struct net *net)
return &net->ct.nf_ct_proto.dccp.pn;
}
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
- .l3proto = AF_INET,
- .l4proto = IPPROTO_DCCP,
- .new = dccp_new,
- .packet = dccp_packet,
- .error = dccp_error,
- .can_early_drop = dccp_can_early_drop,
-#ifdef CONFIG_NF_CONNTRACK_PROCFS
- .print_conntrack = dccp_print_conntrack,
-#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_size = DCCP_NLATTR_SIZE,
- .to_nlattr = dccp_to_nlattr,
- .from_nlattr = nlattr_to_dccp,
- .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
- .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
- .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
- .nla_policy = nf_ct_port_nla_policy,
-#endif
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
- .ctnl_timeout = {
- .nlattr_to_obj = dccp_timeout_nlattr_to_obj,
- .obj_to_nlattr = dccp_timeout_obj_to_nlattr,
- .nlattr_max = CTA_TIMEOUT_DCCP_MAX,
- .obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
- .nla_policy = dccp_timeout_nla_policy,
- },
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = dccp_init_net,
- .get_net_proto = dccp_get_net_proto,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp4);
-
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
- .l3proto = AF_INET6,
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp = {
.l4proto = IPPROTO_DCCP,
- .new = dccp_new,
.packet = dccp_packet,
- .error = dccp_error,
.can_early_drop = dccp_can_early_drop,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = dccp_print_conntrack,
@@ -908,4 +870,3 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
.init_net = dccp_init_net,
.get_net_proto = dccp_get_net_proto,
};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp6);
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 1df3244ecd07..e10e867e0b55 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -44,12 +44,19 @@ static bool generic_pkt_to_tuple(const struct sk_buff *skb,
/* Returns verdict for packet, or -1 for invalid. */
static int generic_packet(struct nf_conn *ct,
- const struct sk_buff *skb,
+ struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo)
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
const unsigned int *timeout = nf_ct_timeout_lookup(ct);
+ if (!nf_generic_should_process(nf_ct_protonum(ct))) {
+ pr_warn_once("conntrack: generic helper won't handle protocol %d. Please consider loading the specific helper module.\n",
+ nf_ct_protonum(ct));
+ return -NF_ACCEPT;
+ }
+
if (!timeout)
timeout = &generic_pernet(nf_ct_net(ct))->timeout;
@@ -57,19 +64,6 @@ static int generic_packet(struct nf_conn *ct,
return NF_ACCEPT;
}
-/* Called when a new connection for this protocol found. */
-static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
-{
- bool ret;
-
- ret = nf_generic_should_process(nf_ct_protonum(ct));
- if (!ret)
- pr_warn_once("conntrack: generic helper won't handle protocol %d. Please consider loading the specific helper module.\n",
- nf_ct_protonum(ct));
- return ret;
-}
-
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
@@ -142,7 +136,7 @@ static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
return 0;
}
-static int generic_init_net(struct net *net, u_int16_t proto)
+static int generic_init_net(struct net *net)
{
struct nf_generic_net *gn = generic_pernet(net);
struct nf_proto_net *pn = &gn->pn;
@@ -159,11 +153,9 @@ static struct nf_proto_net *generic_get_net_proto(struct net *net)
const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
{
- .l3proto = PF_UNSPEC,
.l4proto = 255,
.pkt_to_tuple = generic_pkt_to_tuple,
.packet = generic_packet,
- .new = generic_new,
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = generic_timeout_nlattr_to_obj,
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 650eb4fba2c5..9b48dc8b4b88 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -233,10 +233,26 @@ static unsigned int *gre_get_timeouts(struct net *net)
/* Returns verdict for packet, and may modify conntrack */
static int gre_packet(struct nf_conn *ct,
- const struct sk_buff *skb,
+ struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo)
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
+ if (state->pf != NFPROTO_IPV4)
+ return -NF_ACCEPT;
+
+ if (!nf_ct_is_confirmed(ct)) {
+ unsigned int *timeouts = nf_ct_timeout_lookup(ct);
+
+ if (!timeouts)
+ timeouts = gre_get_timeouts(nf_ct_net(ct));
+
+ /* initialize to sane value. Ideally a conntrack helper
+ * (e.g. in case of pptp) is increasing them */
+ ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED];
+ ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
+ }
+
/* If we've seen traffic both ways, this is a GRE connection.
* Extend timeout. */
if (ct->status & IPS_SEEN_REPLY) {
@@ -252,26 +268,6 @@ static int gre_packet(struct nf_conn *ct,
return NF_ACCEPT;
}
-/* Called when a new connection for this protocol found. */
-static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
-{
- unsigned int *timeouts = nf_ct_timeout_lookup(ct);
-
- if (!timeouts)
- timeouts = gre_get_timeouts(nf_ct_net(ct));
-
- pr_debug(": ");
- nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-
- /* initialize to sane value. Ideally a conntrack helper
- * (e.g. in case of pptp) is increasing them */
- ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED];
- ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
-
- return true;
-}
-
/* Called when a conntrack entry has already been removed from the hashes
* and is about to be deleted from memory */
static void gre_destroy(struct nf_conn *ct)
@@ -336,7 +332,7 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-static int gre_init_net(struct net *net, u_int16_t proto)
+static int gre_init_net(struct net *net)
{
struct netns_proto_gre *net_gre = gre_pernet(net);
int i;
@@ -351,14 +347,12 @@ static int gre_init_net(struct net *net, u_int16_t proto)
/* protocol helper struct */
static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
- .l3proto = AF_INET,
.l4proto = IPPROTO_GRE,
.pkt_to_tuple = gre_pkt_to_tuple,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = gre_print_conntrack,
#endif
.packet = gre_packet,
- .new = gre_new,
.destroy = gre_destroy,
.me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index 43c7e1a217b9..3598520bd19b 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -72,34 +72,17 @@ static bool icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
return true;
}
-static unsigned int *icmp_get_timeouts(struct net *net)
-{
- return &icmp_pernet(net)->timeout;
-}
-
/* Returns verdict for packet, or -1 for invalid. */
static int icmp_packet(struct nf_conn *ct,
- const struct sk_buff *skb,
+ struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo)
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
/* Do not immediately delete the connection after the first
successful reply to avoid excessive conntrackd traffic
and also to handle correctly ICMP echo reply duplicates. */
unsigned int *timeout = nf_ct_timeout_lookup(ct);
-
- if (!timeout)
- timeout = icmp_get_timeouts(nf_ct_net(ct));
-
- nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
-
- return NF_ACCEPT;
-}
-
-/* Called when a new connection for this protocol found. */
-static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
-{
static const u_int8_t valid_new[] = {
[ICMP_ECHO] = 1,
[ICMP_TIMESTAMP] = 1,
@@ -107,21 +90,29 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
[ICMP_ADDRESS] = 1
};
+ if (state->pf != NFPROTO_IPV4)
+ return -NF_ACCEPT;
+
if (ct->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) ||
!valid_new[ct->tuplehash[0].tuple.dst.u.icmp.type]) {
/* Can't create a new ICMP `conn' with this. */
pr_debug("icmp: can't create new conn with type %u\n",
ct->tuplehash[0].tuple.dst.u.icmp.type);
nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple);
- return false;
+ return -NF_ACCEPT;
}
- return true;
+
+ if (!timeout)
+ timeout = &icmp_pernet(nf_ct_net(ct))->timeout;
+
+ nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
+ return NF_ACCEPT;
}
/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
static int
-icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
- unsigned int hooknum)
+icmp_error_message(struct nf_conn *tmpl, struct sk_buff *skb,
+ const struct nf_hook_state *state)
{
struct nf_conntrack_tuple innertuple, origtuple;
const struct nf_conntrack_l4proto *innerproto;
@@ -137,13 +128,13 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
if (!nf_ct_get_tuplepr(skb,
skb_network_offset(skb) + ip_hdrlen(skb)
+ sizeof(struct icmphdr),
- PF_INET, net, &origtuple)) {
+ PF_INET, state->net, &origtuple)) {
pr_debug("icmp_error_message: failed to get tuple\n");
return -NF_ACCEPT;
}
/* rcu_read_lock()ed by nf_hook_thresh */
- innerproto = __nf_ct_l4proto_find(PF_INET, origtuple.dst.protonum);
+ innerproto = __nf_ct_l4proto_find(origtuple.dst.protonum);
/* Ordinarily, we'd expect the inverted tupleproto, but it's
been preserved inside the ICMP. */
@@ -154,7 +145,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
ctinfo = IP_CT_RELATED;
- h = nf_conntrack_find_get(net, zone, &innertuple);
+ h = nf_conntrack_find_get(state->net, zone, &innertuple);
if (!h) {
pr_debug("icmp_error_message: no match\n");
return -NF_ACCEPT;
@@ -168,17 +159,18 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
return NF_ACCEPT;
}
-static void icmp_error_log(const struct sk_buff *skb, struct net *net,
- u8 pf, const char *msg)
+static void icmp_error_log(const struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ const char *msg)
{
- nf_l4proto_log_invalid(skb, net, pf, IPPROTO_ICMP, "%s", msg);
+ nf_l4proto_log_invalid(skb, state->net, state->pf,
+ IPPROTO_ICMP, "%s", msg);
}
/* Small and modified version of icmp_rcv */
-static int
-icmp_error(struct net *net, struct nf_conn *tmpl,
- struct sk_buff *skb, unsigned int dataoff,
- u8 pf, unsigned int hooknum)
+int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
+ struct sk_buff *skb, unsigned int dataoff,
+ const struct nf_hook_state *state)
{
const struct icmphdr *icmph;
struct icmphdr _ih;
@@ -186,14 +178,15 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
/* Not enough header? */
icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih);
if (icmph == NULL) {
- icmp_error_log(skb, net, pf, "short packet");
+ icmp_error_log(skb, state, "short packet");
return -NF_ACCEPT;
}
/* See ip_conntrack_proto_tcp.c */
- if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
- nf_ip_checksum(skb, hooknum, dataoff, 0)) {
- icmp_error_log(skb, net, pf, "bad hw icmp checksum");
+ if (state->net->ct.sysctl_checksum &&
+ state->hook == NF_INET_PRE_ROUTING &&
+ nf_ip_checksum(skb, state->hook, dataoff, 0)) {
+ icmp_error_log(skb, state, "bad hw icmp checksum");
return -NF_ACCEPT;
}
@@ -204,7 +197,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
* discarded.
*/
if (icmph->type > NR_ICMP_TYPES) {
- icmp_error_log(skb, net, pf, "invalid icmp type");
+ icmp_error_log(skb, state, "invalid icmp type");
return -NF_ACCEPT;
}
@@ -216,7 +209,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
icmph->type != ICMP_REDIRECT)
return NF_ACCEPT;
- return icmp_error_message(net, tmpl, skb, hooknum);
+ return icmp_error_message(tmpl, skb, state);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -342,7 +335,7 @@ static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
return 0;
}
-static int icmp_init_net(struct net *net, u_int16_t proto)
+static int icmp_init_net(struct net *net)
{
struct nf_icmp_net *in = icmp_pernet(net);
struct nf_proto_net *pn = &in->pn;
@@ -359,13 +352,10 @@ static struct nf_proto_net *icmp_get_net_proto(struct net *net)
const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
{
- .l3proto = PF_INET,
.l4proto = IPPROTO_ICMP,
.pkt_to_tuple = icmp_pkt_to_tuple,
.invert_tuple = icmp_invert_tuple,
.packet = icmp_packet,
- .new = icmp_new,
- .error = icmp_error,
.destroy = NULL,
.me = NULL,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index 97e40f77d678..378618feed5d 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -92,11 +92,31 @@ static unsigned int *icmpv6_get_timeouts(struct net *net)
/* Returns verdict for packet, or -1 for invalid. */
static int icmpv6_packet(struct nf_conn *ct,
- const struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo)
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
unsigned int *timeout = nf_ct_timeout_lookup(ct);
+ static const u8 valid_new[] = {
+ [ICMPV6_ECHO_REQUEST - 128] = 1,
+ [ICMPV6_NI_QUERY - 128] = 1
+ };
+
+ if (state->pf != NFPROTO_IPV6)
+ return -NF_ACCEPT;
+
+ if (!nf_ct_is_confirmed(ct)) {
+ int type = ct->tuplehash[0].tuple.dst.u.icmp.type - 128;
+
+ if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) {
+ /* Can't create a new ICMPv6 `conn' with this. */
+ pr_debug("icmpv6: can't create new conn with type %u\n",
+ type + 128);
+ nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple);
+ return -NF_ACCEPT;
+ }
+ }
if (!timeout)
timeout = icmpv6_get_timeouts(nf_ct_net(ct));
@@ -109,26 +129,6 @@ static int icmpv6_packet(struct nf_conn *ct,
return NF_ACCEPT;
}
-/* Called when a new connection for this protocol found. */
-static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
-{
- static const u_int8_t valid_new[] = {
- [ICMPV6_ECHO_REQUEST - 128] = 1,
- [ICMPV6_NI_QUERY - 128] = 1
- };
- int type = ct->tuplehash[0].tuple.dst.u.icmp.type - 128;
-
- if (type < 0 || type >= sizeof(valid_new) || !valid_new[type]) {
- /* Can't create a new ICMPv6 `conn' with this. */
- pr_debug("icmpv6: can't create new conn with type %u\n",
- type + 128);
- nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple);
- return false;
- }
- return true;
-}
-
static int
icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb,
@@ -153,7 +153,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
}
/* rcu_read_lock()ed by nf_hook_thresh */
- inproto = __nf_ct_l4proto_find(PF_INET6, origtuple.dst.protonum);
+ inproto = __nf_ct_l4proto_find(origtuple.dst.protonum);
/* Ordinarily, we'd expect the inverted tupleproto, but it's
been preserved inside the ICMP. */
@@ -179,16 +179,18 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
return NF_ACCEPT;
}
-static void icmpv6_error_log(const struct sk_buff *skb, struct net *net,
- u8 pf, const char *msg)
+static void icmpv6_error_log(const struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ const char *msg)
{
- nf_l4proto_log_invalid(skb, net, pf, IPPROTO_ICMPV6, "%s", msg);
+ nf_l4proto_log_invalid(skb, state->net, state->pf,
+ IPPROTO_ICMPV6, "%s", msg);
}
-static int
-icmpv6_error(struct net *net, struct nf_conn *tmpl,
- struct sk_buff *skb, unsigned int dataoff,
- u8 pf, unsigned int hooknum)
+int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state)
{
const struct icmp6hdr *icmp6h;
struct icmp6hdr _ih;
@@ -196,13 +198,14 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
if (icmp6h == NULL) {
- icmpv6_error_log(skb, net, pf, "short packet");
+ icmpv6_error_log(skb, state, "short packet");
return -NF_ACCEPT;
}
- if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
- nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) {
- icmpv6_error_log(skb, net, pf, "ICMPv6 checksum failed");
+ if (state->hook == NF_INET_PRE_ROUTING &&
+ state->net->ct.sysctl_checksum &&
+ nf_ip6_checksum(skb, state->hook, dataoff, IPPROTO_ICMPV6)) {
+ icmpv6_error_log(skb, state, "ICMPv6 checksum failed");
return -NF_ACCEPT;
}
@@ -217,7 +220,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
if (icmp6h->icmp6_type >= 128)
return NF_ACCEPT;
- return icmpv6_error_message(net, tmpl, skb, dataoff);
+ return icmpv6_error_message(state->net, tmpl, skb, dataoff);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -343,7 +346,7 @@ static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
return 0;
}
-static int icmpv6_init_net(struct net *net, u_int16_t proto)
+static int icmpv6_init_net(struct net *net)
{
struct nf_icmp_net *in = icmpv6_pernet(net);
struct nf_proto_net *pn = &in->pn;
@@ -360,13 +363,10 @@ static struct nf_proto_net *icmpv6_get_net_proto(struct net *net)
const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
{
- .l3proto = PF_INET6,
.l4proto = IPPROTO_ICMPV6,
.pkt_to_tuple = icmpv6_pkt_to_tuple,
.invert_tuple = icmpv6_invert_tuple,
.packet = icmpv6_packet,
- .new = icmpv6_new,
- .error = icmpv6_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = icmpv6_tuple_to_nlattr,
.nlattr_tuple_size = icmpv6_nlattr_tuple_size,
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index e4d738d34cd0..3d719d3eb9a3 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -273,11 +273,100 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
return sctp_conntracks[dir][i][cur_state];
}
+/* Don't need lock here: this conntrack not in circulation yet */
+static noinline bool
+sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
+ const struct sctphdr *sh, unsigned int dataoff)
+{
+ enum sctp_conntrack new_state;
+ const struct sctp_chunkhdr *sch;
+ struct sctp_chunkhdr _sch;
+ u32 offset, count;
+
+ memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp));
+ new_state = SCTP_CONNTRACK_MAX;
+ for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) {
+ new_state = sctp_new_state(IP_CT_DIR_ORIGINAL,
+ SCTP_CONNTRACK_NONE, sch->type);
+
+ /* Invalid: delete conntrack */
+ if (new_state == SCTP_CONNTRACK_NONE ||
+ new_state == SCTP_CONNTRACK_MAX) {
+ pr_debug("nf_conntrack_sctp: invalid new deleting.\n");
+ return false;
+ }
+
+ /* Copy the vtag into the state info */
+ if (sch->type == SCTP_CID_INIT) {
+ struct sctp_inithdr _inithdr, *ih;
+ /* Sec 8.5.1 (A) */
+ if (sh->vtag)
+ return false;
+
+ ih = skb_header_pointer(skb, offset + sizeof(_sch),
+ sizeof(_inithdr), &_inithdr);
+ if (!ih)
+ return false;
+
+ pr_debug("Setting vtag %x for new conn\n",
+ ih->init_tag);
+
+ ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag;
+ } else if (sch->type == SCTP_CID_HEARTBEAT) {
+ pr_debug("Setting vtag %x for secondary conntrack\n",
+ sh->vtag);
+ ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
+ } else {
+ /* If it is a shutdown ack OOTB packet, we expect a return
+ shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
+ pr_debug("Setting vtag %x for new conn OOTB\n",
+ sh->vtag);
+ ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
+ }
+
+ ct->proto.sctp.state = new_state;
+ }
+
+ return true;
+}
+
+static bool sctp_error(struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state)
+{
+ const struct sctphdr *sh;
+ const char *logmsg;
+
+ if (skb->len < dataoff + sizeof(struct sctphdr)) {
+ logmsg = "nf_ct_sctp: short packet ";
+ goto out_invalid;
+ }
+ if (state->hook == NF_INET_PRE_ROUTING &&
+ state->net->ct.sysctl_checksum &&
+ skb->ip_summed == CHECKSUM_NONE) {
+ if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
+ logmsg = "nf_ct_sctp: failed to read header ";
+ goto out_invalid;
+ }
+ sh = (const struct sctphdr *)(skb->data + dataoff);
+ if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
+ logmsg = "nf_ct_sctp: bad CRC ";
+ goto out_invalid;
+ }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ return false;
+out_invalid:
+ nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_SCTP, "%s", logmsg);
+ return true;
+}
+
/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
static int sctp_packet(struct nf_conn *ct,
- const struct sk_buff *skb,
+ struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo)
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
enum sctp_conntrack new_state, old_state;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@@ -289,6 +378,9 @@ static int sctp_packet(struct nf_conn *ct,
unsigned int *timeouts;
unsigned long map[256 / sizeof(unsigned long)] = { 0 };
+ if (sctp_error(skb, dataoff, state))
+ return -NF_ACCEPT;
+
sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
if (sh == NULL)
goto out;
@@ -296,6 +388,17 @@ static int sctp_packet(struct nf_conn *ct,
if (do_basic_checks(ct, skb, dataoff, map) != 0)
goto out;
+ if (!nf_ct_is_confirmed(ct)) {
+ /* If an OOTB packet has any of these chunks discard (Sec 8.4) */
+ if (test_bit(SCTP_CID_ABORT, map) ||
+ test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) ||
+ test_bit(SCTP_CID_COOKIE_ACK, map))
+ return -NF_ACCEPT;
+
+ if (!sctp_new(ct, skb, sh, dataoff))
+ return -NF_ACCEPT;
+ }
+
/* Check the verification tag (Sec 8.5) */
if (!test_bit(SCTP_CID_INIT, map) &&
!test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) &&
@@ -397,110 +500,6 @@ out:
return -NF_ACCEPT;
}
-/* Called when a new connection for this protocol found. */
-static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
-{
- enum sctp_conntrack new_state;
- const struct sctphdr *sh;
- struct sctphdr _sctph;
- const struct sctp_chunkhdr *sch;
- struct sctp_chunkhdr _sch;
- u_int32_t offset, count;
- unsigned long map[256 / sizeof(unsigned long)] = { 0 };
-
- sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
- if (sh == NULL)
- return false;
-
- if (do_basic_checks(ct, skb, dataoff, map) != 0)
- return false;
-
- /* If an OOTB packet has any of these chunks discard (Sec 8.4) */
- if (test_bit(SCTP_CID_ABORT, map) ||
- test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) ||
- test_bit(SCTP_CID_COOKIE_ACK, map))
- return false;
-
- memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp));
- new_state = SCTP_CONNTRACK_MAX;
- for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
- /* Don't need lock here: this conntrack not in circulation yet */
- new_state = sctp_new_state(IP_CT_DIR_ORIGINAL,
- SCTP_CONNTRACK_NONE, sch->type);
-
- /* Invalid: delete conntrack */
- if (new_state == SCTP_CONNTRACK_NONE ||
- new_state == SCTP_CONNTRACK_MAX) {
- pr_debug("nf_conntrack_sctp: invalid new deleting.\n");
- return false;
- }
-
- /* Copy the vtag into the state info */
- if (sch->type == SCTP_CID_INIT) {
- struct sctp_inithdr _inithdr, *ih;
- /* Sec 8.5.1 (A) */
- if (sh->vtag)
- return false;
-
- ih = skb_header_pointer(skb, offset + sizeof(_sch),
- sizeof(_inithdr), &_inithdr);
- if (!ih)
- return false;
-
- pr_debug("Setting vtag %x for new conn\n",
- ih->init_tag);
-
- ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag;
- } else if (sch->type == SCTP_CID_HEARTBEAT) {
- pr_debug("Setting vtag %x for secondary conntrack\n",
- sh->vtag);
- ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
- }
- /* If it is a shutdown ack OOTB packet, we expect a return
- shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
- else {
- pr_debug("Setting vtag %x for new conn OOTB\n",
- sh->vtag);
- ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
- }
-
- ct->proto.sctp.state = new_state;
- }
-
- return true;
-}
-
-static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
- unsigned int dataoff,
- u8 pf, unsigned int hooknum)
-{
- const struct sctphdr *sh;
- const char *logmsg;
-
- if (skb->len < dataoff + sizeof(struct sctphdr)) {
- logmsg = "nf_ct_sctp: short packet ";
- goto out_invalid;
- }
- if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
- skb->ip_summed == CHECKSUM_NONE) {
- if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
- logmsg = "nf_ct_sctp: failed to read header ";
- goto out_invalid;
- }
- sh = (const struct sctphdr *)(skb->data + dataoff);
- if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
- logmsg = "nf_ct_sctp: bad CRC ";
- goto out_invalid;
- }
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- return NF_ACCEPT;
-out_invalid:
- nf_l4proto_log_invalid(skb, net, pf, IPPROTO_SCTP, "%s", logmsg);
- return -NF_ACCEPT;
-}
-
static bool sctp_can_early_drop(const struct nf_conn *ct)
{
switch (ct->proto.sctp.state) {
@@ -735,7 +734,7 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
return 0;
}
-static int sctp_init_net(struct net *net, u_int16_t proto)
+static int sctp_init_net(struct net *net)
{
struct nf_sctp_net *sn = sctp_pernet(net);
struct nf_proto_net *pn = &sn->pn;
@@ -760,49 +759,12 @@ static struct nf_proto_net *sctp_get_net_proto(struct net *net)
return &net->ct.nf_ct_proto.sctp.pn;
}
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
- .l3proto = PF_INET,
- .l4proto = IPPROTO_SCTP,
-#ifdef CONFIG_NF_CONNTRACK_PROCFS
- .print_conntrack = sctp_print_conntrack,
-#endif
- .packet = sctp_packet,
- .new = sctp_new,
- .error = sctp_error,
- .can_early_drop = sctp_can_early_drop,
- .me = THIS_MODULE,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_size = SCTP_NLATTR_SIZE,
- .to_nlattr = sctp_to_nlattr,
- .from_nlattr = nlattr_to_sctp,
- .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
- .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
- .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
- .nla_policy = nf_ct_port_nla_policy,
-#endif
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
- .ctnl_timeout = {
- .nlattr_to_obj = sctp_timeout_nlattr_to_obj,
- .obj_to_nlattr = sctp_timeout_obj_to_nlattr,
- .nlattr_max = CTA_TIMEOUT_SCTP_MAX,
- .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
- .nla_policy = sctp_timeout_nla_policy,
- },
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = sctp_init_net,
- .get_net_proto = sctp_get_net_proto,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp4);
-
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
- .l3proto = PF_INET6,
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp = {
.l4proto = IPPROTO_SCTP,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = sctp_print_conntrack,
#endif
.packet = sctp_packet,
- .new = sctp_new,
- .error = sctp_error,
.can_early_drop = sctp_can_early_drop,
.me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -826,4 +788,3 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
.init_net = sctp_init_net,
.get_net_proto = sctp_get_net_proto,
};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp6);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index b4bdf9eda7b7..1bcf9984d45e 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -717,35 +717,26 @@ static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
[TCPHDR_ACK|TCPHDR_URG] = 1,
};
-static void tcp_error_log(const struct sk_buff *skb, struct net *net,
- u8 pf, const char *msg)
+static void tcp_error_log(const struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ const char *msg)
{
- nf_l4proto_log_invalid(skb, net, pf, IPPROTO_TCP, "%s", msg);
+ nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_TCP, "%s", msg);
}
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
-static int tcp_error(struct net *net, struct nf_conn *tmpl,
- struct sk_buff *skb,
- unsigned int dataoff,
- u_int8_t pf,
- unsigned int hooknum)
+static bool tcp_error(const struct tcphdr *th,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state)
{
- const struct tcphdr *th;
- struct tcphdr _tcph;
unsigned int tcplen = skb->len - dataoff;
- u_int8_t tcpflags;
-
- /* Smaller that minimal TCP header? */
- th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
- if (th == NULL) {
- tcp_error_log(skb, net, pf, "short packet");
- return -NF_ACCEPT;
- }
+ u8 tcpflags;
/* Not whole TCP header or malformed packet */
if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
- tcp_error_log(skb, net, pf, "truncated packet");
- return -NF_ACCEPT;
+ tcp_error_log(skb, state, "truncated packet");
+ return true;
}
/* Checksum invalid? Ignore.
@@ -753,27 +744,101 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
* because the checksum is assumed to be correct.
*/
/* FIXME: Source route IP option packets --RR */
- if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
- nf_checksum(skb, hooknum, dataoff, IPPROTO_TCP, pf)) {
- tcp_error_log(skb, net, pf, "bad checksum");
- return -NF_ACCEPT;
+ if (state->net->ct.sysctl_checksum &&
+ state->hook == NF_INET_PRE_ROUTING &&
+ nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) {
+ tcp_error_log(skb, state, "bad checksum");
+ return true;
}
/* Check TCP flags. */
tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
if (!tcp_valid_flags[tcpflags]) {
- tcp_error_log(skb, net, pf, "invalid tcp flag combination");
- return -NF_ACCEPT;
+ tcp_error_log(skb, state, "invalid tcp flag combination");
+ return true;
}
- return NF_ACCEPT;
+ return false;
+}
+
+static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct tcphdr *th)
+{
+ enum tcp_conntrack new_state;
+ struct net *net = nf_ct_net(ct);
+ const struct nf_tcp_net *tn = tcp_pernet(net);
+ const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
+ const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
+
+ /* Don't need lock here: this conntrack not in circulation yet */
+ new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
+
+ /* Invalid: delete conntrack */
+ if (new_state >= TCP_CONNTRACK_MAX) {
+ pr_debug("nf_ct_tcp: invalid new deleting.\n");
+ return false;
+ }
+
+ if (new_state == TCP_CONNTRACK_SYN_SENT) {
+ memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
+ /* SYN packet */
+ ct->proto.tcp.seen[0].td_end =
+ segment_seq_plus_len(ntohl(th->seq), skb->len,
+ dataoff, th);
+ ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
+ if (ct->proto.tcp.seen[0].td_maxwin == 0)
+ ct->proto.tcp.seen[0].td_maxwin = 1;
+ ct->proto.tcp.seen[0].td_maxend =
+ ct->proto.tcp.seen[0].td_end;
+
+ tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
+ } else if (tn->tcp_loose == 0) {
+ /* Don't try to pick up connections. */
+ return false;
+ } else {
+ memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
+ /*
+ * We are in the middle of a connection,
+ * its history is lost for us.
+ * Let's try to use the data from the packet.
+ */
+ ct->proto.tcp.seen[0].td_end =
+ segment_seq_plus_len(ntohl(th->seq), skb->len,
+ dataoff, th);
+ ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
+ if (ct->proto.tcp.seen[0].td_maxwin == 0)
+ ct->proto.tcp.seen[0].td_maxwin = 1;
+ ct->proto.tcp.seen[0].td_maxend =
+ ct->proto.tcp.seen[0].td_end +
+ ct->proto.tcp.seen[0].td_maxwin;
+
+ /* We assume SACK and liberal window checking to handle
+ * window scaling */
+ ct->proto.tcp.seen[0].flags =
+ ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
+ IP_CT_TCP_FLAG_BE_LIBERAL;
+ }
+
+ /* tcp_packet will set them */
+ ct->proto.tcp.last_index = TCP_NONE_SET;
+
+ pr_debug("%s: sender end=%u maxend=%u maxwin=%u scale=%i "
+ "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
+ __func__,
+ sender->td_end, sender->td_maxend, sender->td_maxwin,
+ sender->td_scale,
+ receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
+ receiver->td_scale);
+ return true;
}
/* Returns verdict for packet, or -1 for invalid. */
static int tcp_packet(struct nf_conn *ct,
- const struct sk_buff *skb,
+ struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo)
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = tcp_pernet(net);
@@ -786,7 +851,14 @@ static int tcp_packet(struct nf_conn *ct,
unsigned long timeout;
th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
- BUG_ON(th == NULL);
+ if (th == NULL)
+ return -NF_ACCEPT;
+
+ if (tcp_error(th, skb, dataoff, state))
+ return -NF_ACCEPT;
+
+ if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th))
+ return -NF_ACCEPT;
spin_lock_bh(&ct->lock);
old_state = ct->proto.tcp.state;
@@ -1067,82 +1139,6 @@ static int tcp_packet(struct nf_conn *ct,
return NF_ACCEPT;
}
-/* Called when a new connection for this protocol found. */
-static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
-{
- enum tcp_conntrack new_state;
- const struct tcphdr *th;
- struct tcphdr _tcph;
- struct net *net = nf_ct_net(ct);
- struct nf_tcp_net *tn = tcp_pernet(net);
- const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
- const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
-
- th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
- BUG_ON(th == NULL);
-
- /* Don't need lock here: this conntrack not in circulation yet */
- new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
-
- /* Invalid: delete conntrack */
- if (new_state >= TCP_CONNTRACK_MAX) {
- pr_debug("nf_ct_tcp: invalid new deleting.\n");
- return false;
- }
-
- if (new_state == TCP_CONNTRACK_SYN_SENT) {
- memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
- /* SYN packet */
- ct->proto.tcp.seen[0].td_end =
- segment_seq_plus_len(ntohl(th->seq), skb->len,
- dataoff, th);
- ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
- if (ct->proto.tcp.seen[0].td_maxwin == 0)
- ct->proto.tcp.seen[0].td_maxwin = 1;
- ct->proto.tcp.seen[0].td_maxend =
- ct->proto.tcp.seen[0].td_end;
-
- tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
- } else if (tn->tcp_loose == 0) {
- /* Don't try to pick up connections. */
- return false;
- } else {
- memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
- /*
- * We are in the middle of a connection,
- * its history is lost for us.
- * Let's try to use the data from the packet.
- */
- ct->proto.tcp.seen[0].td_end =
- segment_seq_plus_len(ntohl(th->seq), skb->len,
- dataoff, th);
- ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
- if (ct->proto.tcp.seen[0].td_maxwin == 0)
- ct->proto.tcp.seen[0].td_maxwin = 1;
- ct->proto.tcp.seen[0].td_maxend =
- ct->proto.tcp.seen[0].td_end +
- ct->proto.tcp.seen[0].td_maxwin;
-
- /* We assume SACK and liberal window checking to handle
- * window scaling */
- ct->proto.tcp.seen[0].flags =
- ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
- IP_CT_TCP_FLAG_BE_LIBERAL;
- }
-
- /* tcp_packet will set them */
- ct->proto.tcp.last_index = TCP_NONE_SET;
-
- pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
- "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
- sender->td_end, sender->td_maxend, sender->td_maxwin,
- sender->td_scale,
- receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
- receiver->td_scale);
- return true;
-}
-
static bool tcp_can_early_drop(const struct nf_conn *ct)
{
switch (ct->proto.tcp.state) {
@@ -1213,8 +1209,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
#define TCP_NLATTR_SIZE ( \
NLA_ALIGN(NLA_HDRLEN + 1) + \
NLA_ALIGN(NLA_HDRLEN + 1) + \
- NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
- NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
+ NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
+ NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
{
@@ -1510,7 +1506,7 @@ static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
return 0;
}
-static int tcp_init_net(struct net *net, u_int16_t proto)
+static int tcp_init_net(struct net *net)
{
struct nf_tcp_net *tn = tcp_pernet(net);
struct nf_proto_net *pn = &tn->pn;
@@ -1538,16 +1534,13 @@ static struct nf_proto_net *tcp_get_net_proto(struct net *net)
return &net->ct.nf_ct_proto.tcp.pn;
}
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
{
- .l3proto = PF_INET,
.l4proto = IPPROTO_TCP,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = tcp_print_conntrack,
#endif
.packet = tcp_packet,
- .new = tcp_new,
- .error = tcp_error,
.can_early_drop = tcp_can_early_drop,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = tcp_to_nlattr,
@@ -1571,39 +1564,3 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
.init_net = tcp_init_net,
.get_net_proto = tcp_get_net_proto,
};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
-
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
-{
- .l3proto = PF_INET6,
- .l4proto = IPPROTO_TCP,
-#ifdef CONFIG_NF_CONNTRACK_PROCFS
- .print_conntrack = tcp_print_conntrack,
-#endif
- .packet = tcp_packet,
- .new = tcp_new,
- .error = tcp_error,
- .can_early_drop = tcp_can_early_drop,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_size = TCP_NLATTR_SIZE,
- .to_nlattr = tcp_to_nlattr,
- .from_nlattr = nlattr_to_tcp,
- .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
- .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
- .nlattr_tuple_size = tcp_nlattr_tuple_size,
- .nla_policy = nf_ct_port_nla_policy,
-#endif
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
- .ctnl_timeout = {
- .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
- .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
- .nlattr_max = CTA_TIMEOUT_TCP_MAX,
- .obj_size = sizeof(unsigned int) *
- TCP_CONNTRACK_TIMEOUT_MAX,
- .nla_policy = tcp_timeout_nla_policy,
- },
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = tcp_init_net,
- .get_net_proto = tcp_get_net_proto,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6);
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 3065fb8ef91b..a7aa70370913 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -42,14 +42,65 @@ static unsigned int *udp_get_timeouts(struct net *net)
return udp_pernet(net)->timeouts;
}
+static void udp_error_log(const struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ const char *msg)
+{
+ nf_l4proto_log_invalid(skb, state->net, state->pf,
+ IPPROTO_UDP, "%s", msg);
+}
+
+static bool udp_error(struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state)
+{
+ unsigned int udplen = skb->len - dataoff;
+ const struct udphdr *hdr;
+ struct udphdr _hdr;
+
+ /* Header is too small? */
+ hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+ if (!hdr) {
+ udp_error_log(skb, state, "short packet");
+ return true;
+ }
+
+ /* Truncated/malformed packets */
+ if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
+ udp_error_log(skb, state, "truncated/malformed packet");
+ return true;
+ }
+
+ /* Packet with no checksum */
+ if (!hdr->check)
+ return false;
+
+ /* Checksum invalid? Ignore.
+ * We skip checking packets on the outgoing path
+ * because the checksum is assumed to be correct.
+ * FIXME: Source route IP option packets --RR */
+ if (state->hook == NF_INET_PRE_ROUTING &&
+ state->net->ct.sysctl_checksum &&
+ nf_checksum(skb, state->hook, dataoff, IPPROTO_UDP, state->pf)) {
+ udp_error_log(skb, state, "bad checksum");
+ return true;
+ }
+
+ return false;
+}
+
/* Returns verdict for packet, and may modify conntracktype */
static int udp_packet(struct nf_conn *ct,
- const struct sk_buff *skb,
+ struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo)
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
unsigned int *timeouts;
+ if (udp_error(skb, dataoff, state))
+ return -NF_ACCEPT;
+
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = udp_get_timeouts(nf_ct_net(ct));
@@ -69,24 +120,18 @@ static int udp_packet(struct nf_conn *ct,
return NF_ACCEPT;
}
-/* Called when a new connection for this protocol found. */
-static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
-{
- return true;
-}
-
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-static void udplite_error_log(const struct sk_buff *skb, struct net *net,
- u8 pf, const char *msg)
+static void udplite_error_log(const struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ const char *msg)
{
- nf_l4proto_log_invalid(skb, net, pf, IPPROTO_UDPLITE, "%s", msg);
+ nf_l4proto_log_invalid(skb, state->net, state->pf,
+ IPPROTO_UDPLITE, "%s", msg);
}
-static int udplite_error(struct net *net, struct nf_conn *tmpl,
- struct sk_buff *skb,
- unsigned int dataoff,
- u8 pf, unsigned int hooknum)
+static bool udplite_error(struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state)
{
unsigned int udplen = skb->len - dataoff;
const struct udphdr *hdr;
@@ -96,80 +141,67 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
/* Header is too small? */
hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
if (!hdr) {
- udplite_error_log(skb, net, pf, "short packet");
- return -NF_ACCEPT;
+ udplite_error_log(skb, state, "short packet");
+ return true;
}
cscov = ntohs(hdr->len);
if (cscov == 0) {
cscov = udplen;
} else if (cscov < sizeof(*hdr) || cscov > udplen) {
- udplite_error_log(skb, net, pf, "invalid checksum coverage");
- return -NF_ACCEPT;
+ udplite_error_log(skb, state, "invalid checksum coverage");
+ return true;
}
/* UDPLITE mandates checksums */
if (!hdr->check) {
- udplite_error_log(skb, net, pf, "checksum missing");
- return -NF_ACCEPT;
+ udplite_error_log(skb, state, "checksum missing");
+ return true;
}
/* Checksum invalid? Ignore. */
- if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
- nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP,
- pf)) {
- udplite_error_log(skb, net, pf, "bad checksum");
- return -NF_ACCEPT;
+ if (state->hook == NF_INET_PRE_ROUTING &&
+ state->net->ct.sysctl_checksum &&
+ nf_checksum_partial(skb, state->hook, dataoff, cscov, IPPROTO_UDP,
+ state->pf)) {
+ udplite_error_log(skb, state, "bad checksum");
+ return true;
}
- return NF_ACCEPT;
-}
-#endif
-
-static void udp_error_log(const struct sk_buff *skb, struct net *net,
- u8 pf, const char *msg)
-{
- nf_l4proto_log_invalid(skb, net, pf, IPPROTO_UDP, "%s", msg);
+ return false;
}
-static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
- unsigned int dataoff,
- u_int8_t pf,
- unsigned int hooknum)
+/* Returns verdict for packet, and may modify conntracktype */
+static int udplite_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
{
- unsigned int udplen = skb->len - dataoff;
- const struct udphdr *hdr;
- struct udphdr _hdr;
-
- /* Header is too small? */
- hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
- if (hdr == NULL) {
- udp_error_log(skb, net, pf, "short packet");
- return -NF_ACCEPT;
- }
+ unsigned int *timeouts;
- /* Truncated/malformed packets */
- if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
- udp_error_log(skb, net, pf, "truncated/malformed packet");
+ if (udplite_error(skb, dataoff, state))
return -NF_ACCEPT;
- }
- /* Packet with no checksum */
- if (!hdr->check)
- return NF_ACCEPT;
+ timeouts = nf_ct_timeout_lookup(ct);
+ if (!timeouts)
+ timeouts = udp_get_timeouts(nf_ct_net(ct));
- /* Checksum invalid? Ignore.
- * We skip checking packets on the outgoing path
- * because the checksum is assumed to be correct.
- * FIXME: Source route IP option packets --RR */
- if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
- nf_checksum(skb, hooknum, dataoff, IPPROTO_UDP, pf)) {
- udp_error_log(skb, net, pf, "bad checksum");
- return -NF_ACCEPT;
+ /* If we've seen traffic both ways, this is some kind of UDP
+ stream. Extend timeout. */
+ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+ nf_ct_refresh_acct(ct, ctinfo, skb,
+ timeouts[UDP_CT_REPLIED]);
+ /* Also, more likely to be important, and not a probe */
+ if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
+ nf_conntrack_event_cache(IPCT_ASSURED, ct);
+ } else {
+ nf_ct_refresh_acct(ct, ctinfo, skb,
+ timeouts[UDP_CT_UNREPLIED]);
}
-
return NF_ACCEPT;
}
+#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
@@ -258,7 +290,7 @@ static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
return 0;
}
-static int udp_init_net(struct net *net, u_int16_t proto)
+static int udp_init_net(struct net *net)
{
struct nf_udp_net *un = udp_pernet(net);
struct nf_proto_net *pn = &un->pn;
@@ -278,72 +310,11 @@ static struct nf_proto_net *udp_get_net_proto(struct net *net)
return &net->ct.nf_ct_proto.udp.pn;
}
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
-{
- .l3proto = PF_INET,
- .l4proto = IPPROTO_UDP,
- .allow_clash = true,
- .packet = udp_packet,
- .new = udp_new,
- .error = udp_error,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
- .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
- .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
- .nla_policy = nf_ct_port_nla_policy,
-#endif
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
- .ctnl_timeout = {
- .nlattr_to_obj = udp_timeout_nlattr_to_obj,
- .obj_to_nlattr = udp_timeout_obj_to_nlattr,
- .nlattr_max = CTA_TIMEOUT_UDP_MAX,
- .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
- .nla_policy = udp_timeout_nla_policy,
- },
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = udp_init_net,
- .get_net_proto = udp_get_net_proto,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
-
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
-{
- .l3proto = PF_INET,
- .l4proto = IPPROTO_UDPLITE,
- .allow_clash = true,
- .packet = udp_packet,
- .new = udp_new,
- .error = udplite_error,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
- .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
- .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
- .nla_policy = nf_ct_port_nla_policy,
-#endif
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
- .ctnl_timeout = {
- .nlattr_to_obj = udp_timeout_nlattr_to_obj,
- .obj_to_nlattr = udp_timeout_obj_to_nlattr,
- .nlattr_max = CTA_TIMEOUT_UDP_MAX,
- .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
- .nla_policy = udp_timeout_nla_policy,
- },
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
- .init_net = udp_init_net,
- .get_net_proto = udp_get_net_proto,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4);
-#endif
-
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp =
{
- .l3proto = PF_INET6,
.l4proto = IPPROTO_UDP,
.allow_clash = true,
.packet = udp_packet,
- .new = udp_new,
- .error = udp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
@@ -362,17 +333,13 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite =
{
- .l3proto = PF_INET6,
.l4proto = IPPROTO_UDPLITE,
.allow_clash = true,
- .packet = udp_packet,
- .new = udp_new,
- .error = udplite_error,
+ .packet = udplite_packet,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
@@ -391,5 +358,4 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
#endif
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 13279f683da9..463d17d349c1 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -292,7 +292,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
if (!net_eq(nf_ct_net(ct), net))
goto release;
- l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
+ l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
WARN_ON(!l4proto);
ret = -ENOSPC;
@@ -720,10 +720,3 @@ static void __exit nf_conntrack_standalone_fini(void)
module_init(nf_conntrack_standalone_init);
module_exit(nf_conntrack_standalone_fini);
-
-/* Some modules need us, but don't depend directly on any symbol.
- They should call this. */
-void need_conntrack(void)
-{
-}
-EXPORT_SYMBOL_GPL(need_conntrack);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index d8125616edc7..b7a4816add76 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -120,7 +120,7 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct)
if (l4num == IPPROTO_TCP)
flow_offload_fixup_tcp(&ct->proto.tcp);
- l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), l4num);
+ l4proto = __nf_ct_l4proto_find(l4num);
if (!l4proto)
return;
@@ -233,8 +233,8 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload *flow;
int dir;
- tuplehash = rhashtable_lookup_fast(&flow_table->rhashtable, tuple,
- nf_flow_offload_rhash_params);
+ tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
+ nf_flow_offload_rhash_params);
if (!tuplehash)
return NULL;
@@ -254,20 +254,17 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table,
struct flow_offload_tuple_rhash *tuplehash;
struct rhashtable_iter hti;
struct flow_offload *flow;
- int err;
-
- err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
- if (err)
- return err;
+ int err = 0;
+ rhashtable_walk_enter(&flow_table->rhashtable, &hti);
rhashtable_walk_start(&hti);
while ((tuplehash = rhashtable_walk_next(&hti))) {
if (IS_ERR(tuplehash)) {
- err = PTR_ERR(tuplehash);
- if (err != -EAGAIN)
- goto out;
-
+ if (PTR_ERR(tuplehash) != -EAGAIN) {
+ err = PTR_ERR(tuplehash);
+ break;
+ }
continue;
}
if (tuplehash->tuple.dir)
@@ -277,7 +274,6 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table,
iter(flow, data);
}
-out:
rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti);
@@ -290,25 +286,19 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow)
return (__s32)(flow->timeout - (u32)jiffies) <= 0;
}
-static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
+static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
{
struct flow_offload_tuple_rhash *tuplehash;
struct rhashtable_iter hti;
struct flow_offload *flow;
- int err;
-
- err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
- if (err)
- return 0;
+ rhashtable_walk_enter(&flow_table->rhashtable, &hti);
rhashtable_walk_start(&hti);
while ((tuplehash = rhashtable_walk_next(&hti))) {
if (IS_ERR(tuplehash)) {
- err = PTR_ERR(tuplehash);
- if (err != -EAGAIN)
- goto out;
-
+ if (PTR_ERR(tuplehash) != -EAGAIN)
+ break;
continue;
}
if (tuplehash->tuple.dir)
@@ -321,11 +311,8 @@ static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
FLOW_OFFLOAD_TEARDOWN)))
flow_offload_del(flow_table, flow);
}
-out:
rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti);
-
- return 1;
}
static void nf_flow_offload_work_gc(struct work_struct *work)
@@ -478,14 +465,17 @@ EXPORT_SYMBOL_GPL(nf_flow_table_init);
static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
{
struct net_device *dev = data;
+ struct flow_offload_entry *e;
+
+ e = container_of(flow, struct flow_offload_entry, flow);
if (!dev) {
flow_offload_teardown(flow);
return;
}
-
- if (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
- flow->tuplehash[1].tuple.iifidx == dev->ifindex)
+ if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
+ (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
+ flow->tuplehash[1].tuple.iifidx == dev->ifindex))
flow_offload_dead(flow);
}
@@ -496,7 +486,7 @@ static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
flush_delayed_work(&flowtable->gc_work);
}
-void nf_flow_table_cleanup(struct net *net, struct net_device *dev)
+void nf_flow_table_cleanup(struct net_device *dev)
{
struct nf_flowtable *flowtable;
@@ -514,7 +504,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
mutex_unlock(&flowtable_lock);
cancel_delayed_work_sync(&flow_table->gc_work);
nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
- WARN_ON(!nf_flow_offload_gc_step(flow_table));
+ nf_flow_offload_gc_step(flow_table);
rhashtable_destroy(&flow_table->rhashtable);
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index 15ed91309992..1d291a51cd45 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -254,8 +254,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
return NF_ACCEPT;
- if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
- nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
+ if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
return NF_DROP;
flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
@@ -471,8 +470,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
if (skb_try_make_writable(skb, sizeof(*ip6h)))
return NF_DROP;
- if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
- nf_flow_nat_ipv6(flow, skb, dir) < 0)
+ if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
return NF_DROP;
flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c
index 99606baedda4..38793b95d9bc 100644
--- a/net/netfilter/nf_nat_helper.c
+++ b/net/netfilter/nf_nat_helper.c
@@ -37,7 +37,7 @@ static void mangle_contents(struct sk_buff *skb,
{
unsigned char *data;
- BUG_ON(skb_is_nonlinear(skb));
+ SKB_LINEAR_ASSERT(skb);
data = skb_network_header(skb) + dataoff;
/* move post-replacement */
@@ -110,8 +110,6 @@ bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
!enlarge_skb(skb, rep_len - match_len))
return false;
- SKB_LINEAR_ASSERT(skb);
-
tcph = (void *)skb->data + protoff;
oldlen = skb->len - protoff;
diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
index adee04af8d43..78a9e6454ff3 100644
--- a/net/netfilter/nf_nat_redirect.c
+++ b/net/netfilter/nf_nat_redirect.c
@@ -52,13 +52,11 @@ nf_nat_redirect_ipv4(struct sk_buff *skb,
newdst = 0;
- rcu_read_lock();
indev = __in_dev_get_rcu(skb->dev);
if (indev && indev->ifa_list) {
ifa = indev->ifa_list;
newdst = ifa->ifa_local;
}
- rcu_read_unlock();
if (!newdst)
return NF_DROP;
@@ -97,7 +95,6 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
struct inet6_ifaddr *ifa;
bool addr = false;
- rcu_read_lock();
idev = __in6_dev_get(skb->dev);
if (idev != NULL) {
read_lock_bh(&idev->lock);
@@ -108,7 +105,6 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
}
read_unlock_bh(&idev->lock);
}
- rcu_read_unlock();
if (!addr)
return NF_DROP;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 2cfb173cd0b2..42487d01a3ed 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -27,6 +27,8 @@
static LIST_HEAD(nf_tables_expressions);
static LIST_HEAD(nf_tables_objects);
static LIST_HEAD(nf_tables_flowtables);
+static LIST_HEAD(nf_tables_destroy_list);
+static DEFINE_SPINLOCK(nf_tables_destroy_list_lock);
static u64 table_handle;
enum {
@@ -64,6 +66,8 @@ static void nft_validate_state_update(struct net *net, u8 new_validate_state)
net->nft.validate_state = new_validate_state;
}
+static void nf_tables_trans_destroy_work(struct work_struct *w);
+static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work);
static void nft_ctx_init(struct nft_ctx *ctx,
struct net *net,
@@ -207,6 +211,18 @@ static int nft_delchain(struct nft_ctx *ctx)
return err;
}
+/* either expr ops provide both activate/deactivate, or neither */
+static bool nft_expr_check_ops(const struct nft_expr_ops *ops)
+{
+ if (!ops)
+ return true;
+
+ if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate)))
+ return false;
+
+ return true;
+}
+
static void nft_rule_expr_activate(const struct nft_ctx *ctx,
struct nft_rule *rule)
{
@@ -298,7 +314,7 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
return 0;
}
-static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
+static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
struct nft_set *set)
{
struct nft_trans *trans;
@@ -318,7 +334,7 @@ static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
return 0;
}
-static int nft_delset(struct nft_ctx *ctx, struct nft_set *set)
+static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
{
int err;
@@ -1005,7 +1021,8 @@ static int nf_tables_deltable(struct net *net, struct sock *nlsk,
static void nf_tables_table_destroy(struct nft_ctx *ctx)
{
- BUG_ON(ctx->table->use > 0);
+ if (WARN_ON(ctx->table->use > 0))
+ return;
rhltable_destroy(&ctx->table->chains_ht);
kfree(ctx->table->name);
@@ -1412,7 +1429,8 @@ static void nf_tables_chain_destroy(struct nft_ctx *ctx)
{
struct nft_chain *chain = ctx->chain;
- BUG_ON(chain->use > 0);
+ if (WARN_ON(chain->use > 0))
+ return;
/* no concurrent access possible anymore */
nf_tables_chain_free_chain_rules(chain);
@@ -1907,6 +1925,9 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
*/
int nft_register_expr(struct nft_expr_type *type)
{
+ if (!nft_expr_check_ops(type->ops))
+ return -EINVAL;
+
nfnl_lock(NFNL_SUBSYS_NFTABLES);
if (type->family == NFPROTO_UNSPEC)
list_add_tail_rcu(&type->list, &nf_tables_expressions);
@@ -2054,6 +2075,10 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
err = PTR_ERR(ops);
goto err1;
}
+ if (!nft_expr_check_ops(ops)) {
+ err = -EINVAL;
+ goto err1;
+ }
} else
ops = type->ops;
@@ -2434,7 +2459,6 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
{
struct nft_expr *expr;
- lockdep_assert_held(&ctx->net->nft.commit_mutex);
/*
* Careful: some expressions might not be initialized in case this
* is called on error from nf_tables_newrule().
@@ -3567,13 +3591,6 @@ static void nft_set_destroy(struct nft_set *set)
kvfree(set);
}
-static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
-{
- list_del_rcu(&set->list);
- nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
- nft_set_destroy(set);
-}
-
static int nf_tables_delset(struct net *net, struct sock *nlsk,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const nla[],
@@ -3668,17 +3685,38 @@ bind:
}
EXPORT_SYMBOL_GPL(nf_tables_bind_set);
-void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding)
{
+ if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
+ nft_is_active(ctx->net, set))
+ list_add_tail_rcu(&set->list, &ctx->table->sets);
+
+ list_add_tail_rcu(&binding->list, &set->bindings);
+}
+EXPORT_SYMBOL_GPL(nf_tables_rebind_set);
+
+void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding)
+{
list_del_rcu(&binding->list);
if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
nft_is_active(ctx->net, set))
- nf_tables_set_destroy(ctx, set);
+ list_del_rcu(&set->list);
}
EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
+void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
+{
+ if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
+ nft_is_active(ctx->net, set)) {
+ nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
+ nft_set_destroy(set);
+ }
+}
+EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
+
const struct nft_set_ext_type nft_set_ext_types[] = {
[NFT_SET_EXT_KEY] = {
.align = __alignof__(u32),
@@ -6191,19 +6229,28 @@ static void nft_commit_release(struct nft_trans *trans)
nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
break;
}
+
+ if (trans->put_net)
+ put_net(trans->ctx.net);
+
kfree(trans);
}
-static void nf_tables_commit_release(struct net *net)
+static void nf_tables_trans_destroy_work(struct work_struct *w)
{
struct nft_trans *trans, *next;
+ LIST_HEAD(head);
- if (list_empty(&net->nft.commit_list))
+ spin_lock(&nf_tables_destroy_list_lock);
+ list_splice_init(&nf_tables_destroy_list, &head);
+ spin_unlock(&nf_tables_destroy_list_lock);
+
+ if (list_empty(&head))
return;
synchronize_rcu();
- list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+ list_for_each_entry_safe(trans, next, &head, list) {
list_del(&trans->list);
nft_commit_release(trans);
}
@@ -6334,6 +6381,37 @@ static void nft_chain_del(struct nft_chain *chain)
list_del_rcu(&chain->list);
}
+static void nf_tables_commit_release(struct net *net)
+{
+ struct nft_trans *trans;
+
+ /* all side effects have to be made visible.
+ * For example, if a chain named 'foo' has been deleted, a
+ * new transaction must not find it anymore.
+ *
+ * Memory reclaim happens asynchronously from work queue
+ * to prevent expensive synchronize_rcu() in commit phase.
+ */
+ if (list_empty(&net->nft.commit_list)) {
+ mutex_unlock(&net->nft.commit_mutex);
+ return;
+ }
+
+ trans = list_last_entry(&net->nft.commit_list,
+ struct nft_trans, list);
+ get_net(trans->ctx.net);
+ WARN_ON_ONCE(trans->put_net);
+
+ trans->put_net = true;
+ spin_lock(&nf_tables_destroy_list_lock);
+ list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list);
+ spin_unlock(&nf_tables_destroy_list_lock);
+
+ mutex_unlock(&net->nft.commit_mutex);
+
+ schedule_work(&trans_destroy_work);
+}
+
static int nf_tables_commit(struct net *net, struct sk_buff *skb)
{
struct nft_trans *trans, *next;
@@ -6495,9 +6573,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
}
}
- nf_tables_commit_release(net);
nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
- mutex_unlock(&net->nft.commit_mutex);
+ nf_tables_commit_release(net);
return 0;
}
@@ -7168,7 +7245,8 @@ int __nft_release_basechain(struct nft_ctx *ctx)
{
struct nft_rule *rule, *nr;
- BUG_ON(!nft_is_base_chain(ctx->chain));
+ if (WARN_ON(!nft_is_base_chain(ctx->chain)))
+ return 0;
nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
@@ -7202,9 +7280,6 @@ static void __nft_release_tables(struct net *net)
list_for_each_entry(chain, &table->chains, list)
nf_tables_unregister_hook(net, table, chain);
- list_for_each_entry(flowtable, &table->flowtables, list)
- nf_unregister_net_hooks(net, flowtable->ops,
- flowtable->ops_len);
/* No packets are walking on these chains anymore. */
ctx.table = table;
list_for_each_entry(chain, &table->chains, list) {
@@ -7271,6 +7346,7 @@ static int __init nf_tables_module_init(void)
{
int err;
+ spin_lock_init(&nf_tables_destroy_list_lock);
err = register_pernet_subsys(&nf_tables_net_ops);
if (err < 0)
return err;
@@ -7310,6 +7386,7 @@ static void __exit nf_tables_module_exit(void)
unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
nft_chain_filter_fini();
unregister_pernet_subsys(&nf_tables_net_ops);
+ cancel_work_sync(&trans_destroy_work);
rcu_barrier();
nf_tables_core_module_exit();
}
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index ffd5c0f9412b..3fbce3b9c5ec 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -249,12 +249,24 @@ static struct nft_expr_type *nft_basic_types[] = {
&nft_exthdr_type,
};
+static struct nft_object_type *nft_basic_objects[] = {
+#ifdef CONFIG_NETWORK_SECMARK
+ &nft_secmark_obj_type,
+#endif
+};
+
int __init nf_tables_core_module_init(void)
{
- int err, i;
+ int err, i, j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(nft_basic_objects); i++) {
+ err = nft_register_obj(nft_basic_objects[i]);
+ if (err)
+ goto err;
+ }
- for (i = 0; i < ARRAY_SIZE(nft_basic_types); i++) {
- err = nft_register_expr(nft_basic_types[i]);
+ for (j = 0; j < ARRAY_SIZE(nft_basic_types); j++) {
+ err = nft_register_expr(nft_basic_types[j]);
if (err)
goto err;
}
@@ -262,8 +274,12 @@ int __init nf_tables_core_module_init(void)
return 0;
err:
+ while (j-- > 0)
+ nft_unregister_expr(nft_basic_types[j]);
+
while (i-- > 0)
- nft_unregister_expr(nft_basic_types[i]);
+ nft_unregister_obj(nft_basic_objects[i]);
+
return err;
}
@@ -274,4 +290,8 @@ void nf_tables_core_module_exit(void)
i = ARRAY_SIZE(nft_basic_types);
while (i-- > 0)
nft_unregister_expr(nft_basic_types[i]);
+
+ i = ARRAY_SIZE(nft_basic_objects);
+ while (i-- > 0)
+ nft_unregister_obj(nft_basic_objects[i]);
}
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index a30f8ba4b89a..e7a50af1b3d6 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -53,9 +53,6 @@ ctnl_timeout_parse_policy(void *timeout,
struct nlattr **tb;
int ret = 0;
- if (!l4proto->ctnl_timeout.nlattr_to_obj)
- return 0;
-
tb = kcalloc(l4proto->ctnl_timeout.nlattr_max + 1, sizeof(*tb),
GFP_KERNEL);
@@ -125,7 +122,7 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
return -EBUSY;
}
- l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+ l4proto = nf_ct_l4proto_find_get(l4num);
/* This protocol is not supportted, skip. */
if (l4proto->l4proto != l4num) {
@@ -167,6 +164,8 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0;
const struct nf_conntrack_l4proto *l4proto = timeout->timeout.l4proto;
+ struct nlattr *nest_parms;
+ int ret;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event);
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
@@ -186,22 +185,15 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
htonl(refcount_read(&timeout->refcnt))))
goto nla_put_failure;
- if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
- struct nlattr *nest_parms;
- int ret;
-
- nest_parms = nla_nest_start(skb,
- CTA_TIMEOUT_DATA | NLA_F_NESTED);
- if (!nest_parms)
- goto nla_put_failure;
+ nest_parms = nla_nest_start(skb, CTA_TIMEOUT_DATA | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
- ret = l4proto->ctnl_timeout.obj_to_nlattr(skb,
- &timeout->timeout.data);
- if (ret < 0)
- goto nla_put_failure;
+ ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->timeout.data);
+ if (ret < 0)
+ goto nla_put_failure;
- nla_nest_end(skb, nest_parms);
- }
+ nla_nest_end(skb, nest_parms);
nlmsg_end(skb, nlh);
return skb->len;
@@ -358,7 +350,6 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl,
struct netlink_ext_ack *extack)
{
const struct nf_conntrack_l4proto *l4proto;
- __u16 l3num;
__u8 l4num;
int ret;
@@ -367,9 +358,8 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl,
!cda[CTA_TIMEOUT_DATA])
return -EINVAL;
- l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
- l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+ l4proto = nf_ct_l4proto_find_get(l4num);
/* This protocol is not supported, skip. */
if (l4proto->l4proto != l4num) {
@@ -391,12 +381,14 @@ err:
static int
cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
- u32 seq, u32 type, int event,
+ u32 seq, u32 type, int event, u16 l3num,
const struct nf_conntrack_l4proto *l4proto)
{
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0;
+ struct nlattr *nest_parms;
+ int ret;
event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_TIMEOUT, event);
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
@@ -408,25 +400,19 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
- if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l4proto->l3proto)) ||
+ if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l3num)) ||
nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto))
goto nla_put_failure;
- if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
- struct nlattr *nest_parms;
- int ret;
-
- nest_parms = nla_nest_start(skb,
- CTA_TIMEOUT_DATA | NLA_F_NESTED);
- if (!nest_parms)
- goto nla_put_failure;
+ nest_parms = nla_nest_start(skb, CTA_TIMEOUT_DATA | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
- ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
- if (ret < 0)
- goto nla_put_failure;
+ ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
+ if (ret < 0)
+ goto nla_put_failure;
- nla_nest_end(skb, nest_parms);
- }
+ nla_nest_end(skb, nest_parms);
nlmsg_end(skb, nlh);
return skb->len;
@@ -454,7 +440,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
- l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+ l4proto = nf_ct_l4proto_find_get(l4num);
/* This protocol is not supported, skip. */
if (l4proto->l4proto != l4num) {
@@ -472,6 +458,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
nlh->nlmsg_seq,
NFNL_MSG_TYPE(nlh->nlmsg_type),
IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
+ l3num,
l4proto);
if (ret <= 0) {
kfree_skb(skb2);
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index 00db27dfd2ff..6f41dd74729d 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -30,32 +30,27 @@ EXPORT_SYMBOL_GPL(nf_osf_fingers);
static inline int nf_osf_ttl(const struct sk_buff *skb,
int ttl_check, unsigned char f_ttl)
{
+ struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
const struct iphdr *ip = ip_hdr(skb);
-
- if (ttl_check != -1) {
- if (ttl_check == NF_OSF_TTL_TRUE)
- return ip->ttl == f_ttl;
- if (ttl_check == NF_OSF_TTL_NOCHECK)
- return 1;
- else if (ip->ttl <= f_ttl)
- return 1;
- else {
- struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
- int ret = 0;
-
- for_ifa(in_dev) {
- if (inet_ifa_match(ip->saddr, ifa)) {
- ret = (ip->ttl == f_ttl);
- break;
- }
- }
- endfor_ifa(in_dev);
-
- return ret;
+ int ret = 0;
+
+ if (ttl_check == NF_OSF_TTL_TRUE)
+ return ip->ttl == f_ttl;
+ if (ttl_check == NF_OSF_TTL_NOCHECK)
+ return 1;
+ else if (ip->ttl <= f_ttl)
+ return 1;
+
+ for_ifa(in_dev) {
+ if (inet_ifa_match(ip->saddr, ifa)) {
+ ret = (ip->ttl == f_ttl);
+ break;
}
}
- return ip->ttl == f_ttl;
+ endfor_ifa(in_dev);
+
+ return ret;
}
struct nf_osf_hdr_ctx {
@@ -213,7 +208,7 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family,
if (!tcp)
return false;
- ttl_check = (info->flags & NF_OSF_TTL) ? info->ttl : -1;
+ ttl_check = (info->flags & NF_OSF_TTL) ? info->ttl : 0;
list_for_each_entry_rcu(kf, &nf_osf_fingers[ctx.df], finger_entry) {
@@ -257,7 +252,8 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family,
EXPORT_SYMBOL_GPL(nf_osf_match);
const char *nf_osf_find(const struct sk_buff *skb,
- const struct list_head *nf_osf_fingers)
+ const struct list_head *nf_osf_fingers,
+ const int ttl_check)
{
const struct iphdr *ip = ip_hdr(skb);
const struct nf_osf_user_finger *f;
@@ -275,7 +271,7 @@ const char *nf_osf_find(const struct sk_buff *skb,
list_for_each_entry_rcu(kf, &nf_osf_fingers[ctx.df], finger_entry) {
f = &kf->finger;
- if (!nf_osf_match_one(skb, f, -1, &ctx))
+ if (!nf_osf_match_one(skb, f, ttl_check, &ctx))
continue;
genre = f->genre;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index d33094f4ec41..43041f087eb3 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -765,7 +765,7 @@ __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
return ret;
}
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
entry_seg = nf_queue_entry_dup(entry);
if (entry_seg) {
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index fa90a8402845..79d48c1d06f4 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -79,7 +79,8 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc,
tb[NFTA_CMP_DATA]);
- BUG_ON(err < 0);
+ if (err < 0)
+ return err;
priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
err = nft_validate_register_load(priv->sreg, desc.len);
@@ -129,7 +130,8 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
err = nft_data_init(NULL, &data, sizeof(data), &desc,
tb[NFTA_CMP_DATA]);
- BUG_ON(err < 0);
+ if (err < 0)
+ return err;
priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
err = nft_validate_register_load(priv->sreg, desc.len);
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 32535eea51b2..768292eac2a4 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -290,6 +290,24 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
module_put(target->me);
}
+static int nft_extension_dump_info(struct sk_buff *skb, int attr,
+ const void *info,
+ unsigned int size, unsigned int user_size)
+{
+ unsigned int info_size, aligned_size = XT_ALIGN(size);
+ struct nlattr *nla;
+
+ nla = nla_reserve(skb, attr, aligned_size);
+ if (!nla)
+ return -1;
+
+ info_size = user_size ? : size;
+ memcpy(nla_data(nla), info, info_size);
+ memset(nla_data(nla) + info_size, 0, aligned_size - info_size);
+
+ return 0;
+}
+
static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct xt_target *target = expr->ops->data;
@@ -297,7 +315,8 @@ static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) ||
nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) ||
- nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(target->targetsize), info))
+ nft_extension_dump_info(skb, NFTA_TARGET_INFO, info,
+ target->targetsize, target->usersize))
goto nla_put_failure;
return 0;
@@ -532,7 +551,8 @@ static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr,
if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) ||
- nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(match->matchsize), info))
+ nft_extension_dump_info(skb, NFTA_MATCH_INFO, info,
+ match->matchsize, match->usersize))
goto nla_put_failure;
return 0;
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 5dd87748afa8..586627c361df 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -279,7 +279,7 @@ static void nft_ct_set_eval(const struct nft_expr *expr,
{
const struct nft_ct *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
-#ifdef CONFIG_NF_CONNTRACK_MARK
+#if defined(CONFIG_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_SECMARK)
u32 value = regs->data[priv->sreg];
#endif
enum ip_conntrack_info ctinfo;
@@ -298,6 +298,14 @@ static void nft_ct_set_eval(const struct nft_expr *expr,
}
break;
#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ case NFT_CT_SECMARK:
+ if (ct->secmark != value) {
+ ct->secmark = value;
+ nf_conntrack_event_cache(IPCT_SECMARK, ct);
+ }
+ break;
+#endif
#ifdef CONFIG_NF_CONNTRACK_LABELS
case NFT_CT_LABELS:
nf_connlabels_replace(ct,
@@ -565,6 +573,13 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
len = sizeof(u32);
break;
#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ case NFT_CT_SECMARK:
+ if (tb[NFTA_CT_DIRECTION])
+ return -EINVAL;
+ len = sizeof(u32);
+ break;
+#endif
default:
return -EOPNOTSUPP;
}
@@ -776,9 +791,6 @@ nft_ct_timeout_parse_policy(void *timeouts,
struct nlattr **tb;
int ret = 0;
- if (!l4proto->ctnl_timeout.nlattr_to_obj)
- return 0;
-
tb = kcalloc(l4proto->ctnl_timeout.nlattr_max + 1, sizeof(*tb),
GFP_KERNEL);
@@ -858,7 +870,7 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
l4num = nla_get_u8(tb[NFTA_CT_TIMEOUT_L4PROTO]);
priv->l4proto = l4num;
- l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+ l4proto = nf_ct_l4proto_find_get(l4num);
if (l4proto->l4proto != l4num) {
ret = -EOPNOTSUPP;
diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c
index 2cc1e0ef56e8..15cc62b293d6 100644
--- a/net/netfilter/nft_dup_netdev.c
+++ b/net/netfilter/nft_dup_netdev.c
@@ -46,8 +46,6 @@ static int nft_dup_netdev_init(const struct nft_ctx *ctx,
return nft_validate_register_load(priv->sreg_dev, sizeof(int));
}
-static const struct nft_expr_ops nft_dup_netdev_ingress_ops;
-
static int nft_dup_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
struct nft_dup_netdev *priv = nft_expr_priv(expr);
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 6e91a37d57f2..07d4efd3d851 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -235,14 +235,31 @@ err1:
return err;
}
+static void nft_dynset_activate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_dynset *priv = nft_expr_priv(expr);
+
+ nf_tables_rebind_set(ctx, priv->set, &priv->binding);
+}
+
+static void nft_dynset_deactivate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_dynset *priv = nft_expr_priv(expr);
+
+ nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+}
+
static void nft_dynset_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_dynset *priv = nft_expr_priv(expr);
- nf_tables_unbind_set(ctx, priv->set, &priv->binding);
if (priv->expr != NULL)
nft_expr_destroy(ctx, priv->expr);
+
+ nf_tables_destroy_set(ctx, priv->set);
}
static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -279,6 +296,8 @@ static const struct nft_expr_ops nft_dynset_ops = {
.eval = nft_dynset_eval,
.init = nft_dynset_init,
.destroy = nft_dynset_destroy,
+ .activate = nft_dynset_activate,
+ .deactivate = nft_dynset_deactivate,
.dump = nft_dynset_dump,
};
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index d6bab8c3cbb0..e82d9a966c45 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -201,7 +201,7 @@ static int flow_offload_netdev_event(struct notifier_block *this,
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
- nf_flow_table_cleanup(dev_net(dev), dev);
+ nf_flow_table_cleanup(dev);
return NOTIFY_DONE;
}
diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
index 8abb9891cdf2..d7694e7255a0 100644
--- a/net/netfilter/nft_fwd_netdev.c
+++ b/net/netfilter/nft_fwd_netdev.c
@@ -53,8 +53,6 @@ static int nft_fwd_netdev_init(const struct nft_ctx *ctx,
return nft_validate_register_load(priv->sreg_dev, sizeof(int));
}
-static const struct nft_expr_ops nft_fwd_netdev_ingress_ops;
-
static int nft_fwd_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
struct nft_fwd_netdev *priv = nft_expr_priv(expr);
@@ -169,8 +167,6 @@ static int nft_fwd_neigh_init(const struct nft_ctx *ctx,
return nft_validate_register_load(priv->sreg_addr, addr_len);
}
-static const struct nft_expr_ops nft_fwd_netdev_ingress_ops;
-
static int nft_fwd_neigh_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
struct nft_fwd_neigh *priv = nft_expr_priv(expr);
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index ad13e8643599..227b2b15a19c 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -121,12 +121,28 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
return 0;
}
+static void nft_lookup_activate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_lookup *priv = nft_expr_priv(expr);
+
+ nf_tables_rebind_set(ctx, priv->set, &priv->binding);
+}
+
+static void nft_lookup_deactivate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_lookup *priv = nft_expr_priv(expr);
+
+ nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+}
+
static void nft_lookup_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_lookup *priv = nft_expr_priv(expr);
- nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+ nf_tables_destroy_set(ctx, priv->set);
}
static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -209,6 +225,8 @@ static const struct nft_expr_ops nft_lookup_ops = {
.size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
.eval = nft_lookup_eval,
.init = nft_lookup_init,
+ .activate = nft_lookup_activate,
+ .deactivate = nft_lookup_deactivate,
.destroy = nft_lookup_destroy,
.dump = nft_lookup_dump,
.validate = nft_lookup_validate,
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 297fe7d97c18..6180626c3f80 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -284,6 +284,11 @@ static void nft_meta_set_eval(const struct nft_expr *expr,
skb->nf_trace = !!value8;
break;
+#ifdef CONFIG_NETWORK_SECMARK
+ case NFT_META_SECMARK:
+ skb->secmark = value;
+ break;
+#endif
default:
WARN_ON(1);
}
@@ -436,6 +441,9 @@ static int nft_meta_set_init(const struct nft_ctx *ctx,
switch (priv->key) {
case NFT_META_MARK:
case NFT_META_PRIORITY:
+#ifdef CONFIG_NETWORK_SECMARK
+ case NFT_META_SECMARK:
+#endif
len = sizeof(u32);
break;
case NFT_META_NFTRACE:
@@ -543,3 +551,111 @@ struct nft_expr_type nft_meta_type __read_mostly = {
.maxattr = NFTA_META_MAX,
.owner = THIS_MODULE,
};
+
+#ifdef CONFIG_NETWORK_SECMARK
+struct nft_secmark {
+ u32 secid;
+ char *ctx;
+};
+
+static const struct nla_policy nft_secmark_policy[NFTA_SECMARK_MAX + 1] = {
+ [NFTA_SECMARK_CTX] = { .type = NLA_STRING, .len = NFT_SECMARK_CTX_MAXLEN },
+};
+
+static int nft_secmark_compute_secid(struct nft_secmark *priv)
+{
+ u32 tmp_secid = 0;
+ int err;
+
+ err = security_secctx_to_secid(priv->ctx, strlen(priv->ctx), &tmp_secid);
+ if (err)
+ return err;
+
+ if (!tmp_secid)
+ return -ENOENT;
+
+ err = security_secmark_relabel_packet(tmp_secid);
+ if (err)
+ return err;
+
+ priv->secid = tmp_secid;
+ return 0;
+}
+
+static void nft_secmark_obj_eval(struct nft_object *obj, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_secmark *priv = nft_obj_data(obj);
+ struct sk_buff *skb = pkt->skb;
+
+ skb->secmark = priv->secid;
+}
+
+static int nft_secmark_obj_init(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[],
+ struct nft_object *obj)
+{
+ struct nft_secmark *priv = nft_obj_data(obj);
+ int err;
+
+ if (tb[NFTA_SECMARK_CTX] == NULL)
+ return -EINVAL;
+
+ priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL);
+ if (!priv->ctx)
+ return -ENOMEM;
+
+ err = nft_secmark_compute_secid(priv);
+ if (err) {
+ kfree(priv->ctx);
+ return err;
+ }
+
+ security_secmark_refcount_inc();
+
+ return 0;
+}
+
+static int nft_secmark_obj_dump(struct sk_buff *skb, struct nft_object *obj,
+ bool reset)
+{
+ struct nft_secmark *priv = nft_obj_data(obj);
+ int err;
+
+ if (nla_put_string(skb, NFTA_SECMARK_CTX, priv->ctx))
+ return -1;
+
+ if (reset) {
+ err = nft_secmark_compute_secid(priv);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void nft_secmark_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
+{
+ struct nft_secmark *priv = nft_obj_data(obj);
+
+ security_secmark_refcount_dec();
+
+ kfree(priv->ctx);
+}
+
+static const struct nft_object_ops nft_secmark_obj_ops = {
+ .type = &nft_secmark_obj_type,
+ .size = sizeof(struct nft_secmark),
+ .init = nft_secmark_obj_init,
+ .eval = nft_secmark_obj_eval,
+ .dump = nft_secmark_obj_dump,
+ .destroy = nft_secmark_obj_destroy,
+};
+struct nft_object_type nft_secmark_obj_type __read_mostly = {
+ .type = NFT_OBJECT_SECMARK,
+ .ops = &nft_secmark_obj_ops,
+ .maxattr = NFTA_SECMARK_MAX,
+ .policy = nft_secmark_policy,
+ .owner = THIS_MODULE,
+};
+#endif /* CONFIG_NETWORK_SECMARK */
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index cdf348f751ec..a3185ca2a3a9 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -155,12 +155,28 @@ nla_put_failure:
return -1;
}
+static void nft_objref_map_activate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_objref_map *priv = nft_expr_priv(expr);
+
+ nf_tables_rebind_set(ctx, priv->set, &priv->binding);
+}
+
+static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_objref_map *priv = nft_expr_priv(expr);
+
+ nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+}
+
static void nft_objref_map_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_objref_map *priv = nft_expr_priv(expr);
- nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+ nf_tables_destroy_set(ctx, priv->set);
}
static struct nft_expr_type nft_objref_type;
@@ -169,6 +185,8 @@ static const struct nft_expr_ops nft_objref_map_ops = {
.size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
.eval = nft_objref_map_eval,
.init = nft_objref_map_init,
+ .activate = nft_objref_map_activate,
+ .deactivate = nft_objref_map_deactivate,
.destroy = nft_objref_map_destroy,
.dump = nft_objref_map_dump,
};
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index 5af74b37f423..ca5e5d8c5ef8 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -6,10 +6,12 @@
struct nft_osf {
enum nft_registers dreg:8;
+ u8 ttl;
};
static const struct nla_policy nft_osf_policy[NFTA_OSF_MAX + 1] = {
[NFTA_OSF_DREG] = { .type = NLA_U32 },
+ [NFTA_OSF_TTL] = { .type = NLA_U8 },
};
static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
@@ -33,7 +35,7 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
return;
}
- os_name = nf_osf_find(skb, nf_osf_fingers);
+ os_name = nf_osf_find(skb, nf_osf_fingers, priv->ttl);
if (!os_name)
strncpy((char *)dest, "unknown", NFT_OSF_MAXGENRELEN);
else
@@ -46,10 +48,18 @@ static int nft_osf_init(const struct nft_ctx *ctx,
{
struct nft_osf *priv = nft_expr_priv(expr);
int err;
+ u8 ttl;
+
+ if (nla_get_u8(tb[NFTA_OSF_TTL])) {
+ ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
+ if (ttl > 2)
+ return -EINVAL;
+ priv->ttl = ttl;
+ }
priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
err = nft_validate_register_store(ctx, priv->dreg, NULL,
- NFTA_DATA_VALUE, NFT_OSF_MAXGENRELEN);
+ NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
if (err < 0)
return err;
@@ -60,6 +70,9 @@ static int nft_osf_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_osf *priv = nft_expr_priv(expr);
+ if (nla_put_u8(skb, NFTA_OSF_TTL, priv->ttl))
+ goto nla_put_failure;
+
if (nft_dump_register(skb, NFTA_OSF_DREG, priv->dreg))
goto nla_put_failure;
@@ -69,6 +82,15 @@ nla_put_failure:
return -1;
}
+static int nft_osf_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_FORWARD));
+}
+
static struct nft_expr_type nft_osf_type;
static const struct nft_expr_ops nft_osf_op = {
.eval = nft_osf_eval,
@@ -76,6 +98,7 @@ static const struct nft_expr_ops nft_osf_op = {
.init = nft_osf_init,
.dump = nft_osf_dump,
.type = &nft_osf_type,
+ .validate = nft_osf_validate,
};
static struct nft_expr_type nft_osf_type __read_mostly = {
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 29f5bd2377b0..b48e58cceeb7 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -94,7 +94,8 @@ static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX + 1] = {
int nft_reject_icmp_code(u8 code)
{
- BUG_ON(code > NFT_REJECT_ICMPX_MAX);
+ if (WARN_ON_ONCE(code > NFT_REJECT_ICMPX_MAX))
+ return ICMP_NET_UNREACH;
return icmp_code_v4[code];
}
@@ -111,7 +112,8 @@ static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX + 1] = {
int nft_reject_icmpv6_code(u8 code)
{
- BUG_ON(code > NFT_REJECT_ICMPX_MAX);
+ if (WARN_ON_ONCE(code > NFT_REJECT_ICMPX_MAX))
+ return ICMPV6_NOROUTE;
return icmp_code_v6[code];
}
diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
index 76dba9f6b6f6..f35fa33913ae 100644
--- a/net/netfilter/nft_rt.c
+++ b/net/netfilter/nft_rt.c
@@ -90,6 +90,11 @@ static void nft_rt_get_eval(const struct nft_expr *expr,
case NFT_RT_TCPMSS:
nft_reg_store16(dest, get_tcpmss(pkt, dst));
break;
+#ifdef CONFIG_XFRM
+ case NFT_RT_XFRM:
+ nft_reg_store8(dest, !!dst->xfrm);
+ break;
+#endif
default:
WARN_ON(1);
goto err;
@@ -130,6 +135,11 @@ static int nft_rt_get_init(const struct nft_ctx *ctx,
case NFT_RT_TCPMSS:
len = sizeof(u16);
break;
+#ifdef CONFIG_XFRM
+ case NFT_RT_XFRM:
+ len = sizeof(u8);
+ break;
+#endif
default:
return -EOPNOTSUPP;
}
@@ -164,6 +174,7 @@ static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *exp
case NFT_RT_NEXTHOP4:
case NFT_RT_NEXTHOP6:
case NFT_RT_CLASSID:
+ case NFT_RT_XFRM:
return 0;
case NFT_RT_TCPMSS:
hooks = (1 << NF_INET_FORWARD) |
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 015124e649cb..339a9dd1c832 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -88,7 +88,7 @@ static bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
.key = key,
};
- he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params);
+ he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
if (he != NULL)
*ext = &he->ext;
@@ -106,7 +106,7 @@ static void *nft_rhash_get(const struct net *net, const struct nft_set *set,
.key = elem->key.val.data,
};
- he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params);
+ he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
if (he != NULL)
return he;
@@ -129,7 +129,7 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
.key = key,
};
- he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params);
+ he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
if (he != NULL)
goto out;
@@ -217,7 +217,7 @@ static void *nft_rhash_deactivate(const struct net *net,
};
rcu_read_lock();
- he = rhashtable_lookup_fast(&priv->ht, &arg, nft_rhash_params);
+ he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
if (he != NULL &&
!nft_rhash_flush(net, set, he))
he = NULL;
@@ -244,21 +244,15 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_rhash_elem *he;
struct rhashtable_iter hti;
struct nft_set_elem elem;
- int err;
-
- err = rhashtable_walk_init(&priv->ht, &hti, GFP_ATOMIC);
- iter->err = err;
- if (err)
- return;
+ rhashtable_walk_enter(&priv->ht, &hti);
rhashtable_walk_start(&hti);
while ((he = rhashtable_walk_next(&hti))) {
if (IS_ERR(he)) {
- err = PTR_ERR(he);
- if (err != -EAGAIN) {
- iter->err = err;
- goto out;
+ if (PTR_ERR(he) != -EAGAIN) {
+ iter->err = PTR_ERR(he);
+ break;
}
continue;
@@ -275,13 +269,11 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
iter->err = iter->fn(ctx, set, iter, &elem);
if (iter->err < 0)
- goto out;
+ break;
cont:
iter->count++;
}
-
-out:
rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti);
}
@@ -293,21 +285,17 @@ static void nft_rhash_gc(struct work_struct *work)
struct nft_rhash *priv;
struct nft_set_gc_batch *gcb = NULL;
struct rhashtable_iter hti;
- int err;
priv = container_of(work, struct nft_rhash, gc_work.work);
set = nft_set_container_of(priv);
- err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
- if (err)
- goto schedule;
-
+ rhashtable_walk_enter(&priv->ht, &hti);
rhashtable_walk_start(&hti);
while ((he = rhashtable_walk_next(&hti))) {
if (IS_ERR(he)) {
if (PTR_ERR(he) != -EAGAIN)
- goto out;
+ break;
continue;
}
@@ -326,17 +314,15 @@ gc:
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
if (gcb == NULL)
- goto out;
+ break;
rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params);
atomic_dec(&set->nelems);
nft_set_gc_batch_add(gcb, he);
}
-out:
rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti);
nft_set_gc_batch_complete(gcb);
-schedule:
queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
nft_set_gc_interval(set));
}
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 55e2d9215c0d..fa61208371f8 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -135,9 +135,12 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
d = memcmp(this, key, set->klen);
if (d < 0) {
parent = rcu_dereference_raw(parent->rb_left);
- interval = rbe;
+ if (!(flags & NFT_SET_ELEM_INTERVAL_END))
+ interval = rbe;
} else if (d > 0) {
parent = rcu_dereference_raw(parent->rb_right);
+ if (flags & NFT_SET_ELEM_INTERVAL_END)
+ interval = rbe;
} else {
if (!nft_set_elem_active(&rbe->ext, genmask))
parent = rcu_dereference_raw(parent->rb_left);
@@ -154,7 +157,10 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
nft_set_elem_active(&interval->ext, genmask) &&
- !nft_rbtree_interval_end(interval)) {
+ ((!nft_rbtree_interval_end(interval) &&
+ !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
+ (nft_rbtree_interval_end(interval) &&
+ (flags & NFT_SET_ELEM_INTERVAL_END)))) {
*elem = interval;
return true;
}
@@ -355,12 +361,11 @@ cont:
static void nft_rbtree_gc(struct work_struct *work)
{
+ struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
struct nft_set_gc_batch *gcb = NULL;
- struct rb_node *node, *prev = NULL;
- struct nft_rbtree_elem *rbe;
struct nft_rbtree *priv;
+ struct rb_node *node;
struct nft_set *set;
- int i;
priv = container_of(work, struct nft_rbtree, gc_work.work);
set = nft_set_container_of(priv);
@@ -371,7 +376,7 @@ static void nft_rbtree_gc(struct work_struct *work)
rbe = rb_entry(node, struct nft_rbtree_elem, node);
if (nft_rbtree_interval_end(rbe)) {
- prev = node;
+ rbe_end = rbe;
continue;
}
if (!nft_set_elem_expired(&rbe->ext))
@@ -379,29 +384,30 @@ static void nft_rbtree_gc(struct work_struct *work)
if (nft_set_elem_mark_busy(&rbe->ext))
continue;
+ if (rbe_prev) {
+ rb_erase(&rbe_prev->node, &priv->root);
+ rbe_prev = NULL;
+ }
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
if (!gcb)
break;
atomic_dec(&set->nelems);
nft_set_gc_batch_add(gcb, rbe);
+ rbe_prev = rbe;
- if (prev) {
- rbe = rb_entry(prev, struct nft_rbtree_elem, node);
+ if (rbe_end) {
atomic_dec(&set->nelems);
- nft_set_gc_batch_add(gcb, rbe);
- prev = NULL;
+ nft_set_gc_batch_add(gcb, rbe_end);
+ rb_erase(&rbe_end->node, &priv->root);
+ rbe_end = NULL;
}
node = rb_next(node);
if (!node)
break;
}
- if (gcb) {
- for (i = 0; i < gcb->head.cnt; i++) {
- rbe = gcb->elems[i];
- rb_erase(&rbe->node, &priv->root);
- }
- }
+ if (rbe_prev)
+ rb_erase(&rbe_prev->node, &priv->root);
write_seqcount_end(&priv->count);
write_unlock_bh(&priv->lock);
diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
new file mode 100644
index 000000000000..5322609f7662
--- /dev/null
+++ b/net/netfilter/nft_xfrm.c
@@ -0,0 +1,294 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Generic part shared by ipv4 and ipv6 backends.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <linux/in.h>
+#include <net/xfrm.h>
+
+static const struct nla_policy nft_xfrm_policy[NFTA_XFRM_MAX + 1] = {
+ [NFTA_XFRM_KEY] = { .type = NLA_U32 },
+ [NFTA_XFRM_DIR] = { .type = NLA_U8 },
+ [NFTA_XFRM_SPNUM] = { .type = NLA_U32 },
+ [NFTA_XFRM_DREG] = { .type = NLA_U32 },
+};
+
+struct nft_xfrm {
+ enum nft_xfrm_keys key:8;
+ enum nft_registers dreg:8;
+ u8 dir;
+ u8 spnum;
+};
+
+static int nft_xfrm_get_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_xfrm *priv = nft_expr_priv(expr);
+ unsigned int len = 0;
+ u32 spnum = 0;
+ u8 dir;
+
+ if (!tb[NFTA_XFRM_KEY] || !tb[NFTA_XFRM_DIR] || !tb[NFTA_XFRM_DREG])
+ return -EINVAL;
+
+ switch (ctx->family) {
+ case NFPROTO_IPV4:
+ case NFPROTO_IPV6:
+ case NFPROTO_INET:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ priv->key = ntohl(nla_get_u32(tb[NFTA_XFRM_KEY]));
+ switch (priv->key) {
+ case NFT_XFRM_KEY_REQID:
+ case NFT_XFRM_KEY_SPI:
+ len = sizeof(u32);
+ break;
+ case NFT_XFRM_KEY_DADDR_IP4:
+ case NFT_XFRM_KEY_SADDR_IP4:
+ len = sizeof(struct in_addr);
+ break;
+ case NFT_XFRM_KEY_DADDR_IP6:
+ case NFT_XFRM_KEY_SADDR_IP6:
+ len = sizeof(struct in6_addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dir = nla_get_u8(tb[NFTA_XFRM_DIR]);
+ switch (dir) {
+ case XFRM_POLICY_IN:
+ case XFRM_POLICY_OUT:
+ priv->dir = dir;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (tb[NFTA_XFRM_SPNUM])
+ spnum = ntohl(nla_get_be32(tb[NFTA_XFRM_SPNUM]));
+
+ if (spnum >= XFRM_MAX_DEPTH)
+ return -ERANGE;
+
+ priv->spnum = spnum;
+
+ priv->dreg = nft_parse_register(tb[NFTA_XFRM_DREG]);
+ return nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, len);
+}
+
+/* Return true if key asks for daddr/saddr and current
+ * state does have a valid address (BEET, TUNNEL).
+ */
+static bool xfrm_state_addr_ok(enum nft_xfrm_keys k, u8 family, u8 mode)
+{
+ switch (k) {
+ case NFT_XFRM_KEY_DADDR_IP4:
+ case NFT_XFRM_KEY_SADDR_IP4:
+ if (family == NFPROTO_IPV4)
+ break;
+ return false;
+ case NFT_XFRM_KEY_DADDR_IP6:
+ case NFT_XFRM_KEY_SADDR_IP6:
+ if (family == NFPROTO_IPV6)
+ break;
+ return false;
+ default:
+ return true;
+ }
+
+ return mode == XFRM_MODE_BEET || mode == XFRM_MODE_TUNNEL;
+}
+
+static void nft_xfrm_state_get_key(const struct nft_xfrm *priv,
+ struct nft_regs *regs,
+ const struct xfrm_state *state)
+{
+ u32 *dest = &regs->data[priv->dreg];
+
+ if (!xfrm_state_addr_ok(priv->key,
+ state->props.family,
+ state->props.mode)) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+
+ switch (priv->key) {
+ case NFT_XFRM_KEY_UNSPEC:
+ case __NFT_XFRM_KEY_MAX:
+ WARN_ON_ONCE(1);
+ break;
+ case NFT_XFRM_KEY_DADDR_IP4:
+ *dest = state->id.daddr.a4;
+ return;
+ case NFT_XFRM_KEY_DADDR_IP6:
+ memcpy(dest, &state->id.daddr.in6, sizeof(struct in6_addr));
+ return;
+ case NFT_XFRM_KEY_SADDR_IP4:
+ *dest = state->props.saddr.a4;
+ return;
+ case NFT_XFRM_KEY_SADDR_IP6:
+ memcpy(dest, &state->props.saddr.in6, sizeof(struct in6_addr));
+ return;
+ case NFT_XFRM_KEY_REQID:
+ *dest = state->props.reqid;
+ return;
+ case NFT_XFRM_KEY_SPI:
+ *dest = state->id.spi;
+ return;
+ }
+
+ regs->verdict.code = NFT_BREAK;
+}
+
+static void nft_xfrm_get_eval_in(const struct nft_xfrm *priv,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct sec_path *sp = pkt->skb->sp;
+ const struct xfrm_state *state;
+
+ if (sp == NULL || sp->len <= priv->spnum) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+
+ state = sp->xvec[priv->spnum];
+ nft_xfrm_state_get_key(priv, regs, state);
+}
+
+static void nft_xfrm_get_eval_out(const struct nft_xfrm *priv,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct dst_entry *dst = skb_dst(pkt->skb);
+ int i;
+
+ for (i = 0; dst && dst->xfrm;
+ dst = ((const struct xfrm_dst *)dst)->child, i++) {
+ if (i < priv->spnum)
+ continue;
+
+ nft_xfrm_state_get_key(priv, regs, dst->xfrm);
+ return;
+ }
+
+ regs->verdict.code = NFT_BREAK;
+}
+
+static void nft_xfrm_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_xfrm *priv = nft_expr_priv(expr);
+
+ switch (priv->dir) {
+ case XFRM_POLICY_IN:
+ nft_xfrm_get_eval_in(priv, regs, pkt);
+ break;
+ case XFRM_POLICY_OUT:
+ nft_xfrm_get_eval_out(priv, regs, pkt);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ regs->verdict.code = NFT_BREAK;
+ break;
+ }
+}
+
+static int nft_xfrm_get_dump(struct sk_buff *skb,
+ const struct nft_expr *expr)
+{
+ const struct nft_xfrm *priv = nft_expr_priv(expr);
+
+ if (nft_dump_register(skb, NFTA_XFRM_DREG, priv->dreg))
+ return -1;
+
+ if (nla_put_be32(skb, NFTA_XFRM_KEY, htonl(priv->key)))
+ return -1;
+ if (nla_put_u8(skb, NFTA_XFRM_DIR, priv->dir))
+ return -1;
+ if (nla_put_be32(skb, NFTA_XFRM_SPNUM, htonl(priv->spnum)))
+ return -1;
+
+ return 0;
+}
+
+static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ const struct nft_xfrm *priv = nft_expr_priv(expr);
+ unsigned int hooks;
+
+ switch (priv->dir) {
+ case XFRM_POLICY_IN:
+ hooks = (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_PRE_ROUTING);
+ break;
+ case XFRM_POLICY_OUT:
+ hooks = (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_POST_ROUTING);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ return nft_chain_validate_hooks(ctx->chain, hooks);
+}
+
+
+static struct nft_expr_type nft_xfrm_type;
+static const struct nft_expr_ops nft_xfrm_get_ops = {
+ .type = &nft_xfrm_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_xfrm)),
+ .eval = nft_xfrm_get_eval,
+ .init = nft_xfrm_get_init,
+ .dump = nft_xfrm_get_dump,
+ .validate = nft_xfrm_validate,
+};
+
+static struct nft_expr_type nft_xfrm_type __read_mostly = {
+ .name = "xfrm",
+ .ops = &nft_xfrm_get_ops,
+ .policy = nft_xfrm_policy,
+ .maxattr = NFTA_XFRM_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_xfrm_module_init(void)
+{
+ return nft_register_expr(&nft_xfrm_type);
+}
+
+static void __exit nft_xfrm_module_exit(void)
+{
+ nft_unregister_expr(&nft_xfrm_type);
+}
+
+module_init(nft_xfrm_module_init);
+module_exit(nft_xfrm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("nf_tables: xfrm/IPSec matching");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_AUTHOR("Máté Eckl <ecklm94@gmail.com>");
+MODULE_ALIAS_NFT_EXPR("xfrm");
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 89457efd2e00..2c7a4b80206f 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -159,7 +159,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
/* Make sure the timeout policy matches any existing protocol tracker,
* otherwise default to generic.
*/
- l4proto = __nf_ct_l4proto_find(par->family, proto);
+ l4proto = __nf_ct_l4proto_find(proto);
if (timeout->l4proto->l4proto != l4proto->l4proto) {
ret = -EINVAL;
pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 5ee859193783..c6acfc2d9c84 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -68,8 +68,6 @@ struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
{
struct idletimer_tg *entry;
- BUG_ON(!label);
-
list_for_each_entry(entry, &idletimer_tg_list, entry) {
if (!strcmp(label, entry->attr.attr.name))
return entry;
@@ -172,8 +170,6 @@ static unsigned int idletimer_tg_target(struct sk_buff *skb,
pr_debug("resetting timer %s, timeout period %u\n",
info->label, info->timeout);
- BUG_ON(!info->timer);
-
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 4ad5fe27e08b..f16202d26c20 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -35,8 +35,6 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
u32 secmark = 0;
const struct xt_secmark_target_info *info = par->targinfo;
- BUG_ON(info->mode != mode);
-
switch (mode) {
case SECMARK_MODE_SEL:
secmark = info->secid;
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 0d0d68c989df..1dae02a97ee3 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -14,6 +14,8 @@
#include <linux/skbuff.h>
#include <linux/route.h>
#include <linux/netfilter/x_tables.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <net/route.h>
#include <net/netfilter/ipv4/nf_dup_ipv4.h>
#include <net/netfilter/ipv6/nf_dup_ipv6.h>
@@ -25,8 +27,15 @@ struct xt_tee_priv {
int oif;
};
+static unsigned int tee_net_id __read_mostly;
static const union nf_inet_addr tee_zero_address;
+struct tee_net {
+ struct list_head priv_list;
+ /* lock protects the priv_list */
+ struct mutex lock;
+};
+
static unsigned int
tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
@@ -51,17 +60,16 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
}
#endif
-static DEFINE_MUTEX(priv_list_mutex);
-static LIST_HEAD(priv_list);
-
static int tee_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net *net = dev_net(dev);
+ struct tee_net *tn = net_generic(net, tee_net_id);
struct xt_tee_priv *priv;
- mutex_lock(&priv_list_mutex);
- list_for_each_entry(priv, &priv_list, list) {
+ mutex_lock(&tn->lock);
+ list_for_each_entry(priv, &tn->priv_list, list) {
switch (event) {
case NETDEV_REGISTER:
if (!strcmp(dev->name, priv->tginfo->oif))
@@ -79,13 +87,14 @@ static int tee_netdev_event(struct notifier_block *this, unsigned long event,
break;
}
}
- mutex_unlock(&priv_list_mutex);
+ mutex_unlock(&tn->lock);
return NOTIFY_DONE;
}
static int tee_tg_check(const struct xt_tgchk_param *par)
{
+ struct tee_net *tn = net_generic(par->net, tee_net_id);
struct xt_tee_tginfo *info = par->targinfo;
struct xt_tee_priv *priv;
@@ -95,6 +104,8 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
return -EINVAL;
if (info->oif[0]) {
+ struct net_device *dev;
+
if (info->oif[sizeof(info->oif)-1] != '\0')
return -EINVAL;
@@ -106,9 +117,14 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
priv->oif = -1;
info->priv = priv;
- mutex_lock(&priv_list_mutex);
- list_add(&priv->list, &priv_list);
- mutex_unlock(&priv_list_mutex);
+ dev = dev_get_by_name(par->net, info->oif);
+ if (dev) {
+ priv->oif = dev->ifindex;
+ dev_put(dev);
+ }
+ mutex_lock(&tn->lock);
+ list_add(&priv->list, &tn->priv_list);
+ mutex_unlock(&tn->lock);
} else
info->priv = NULL;
@@ -118,12 +134,13 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
static void tee_tg_destroy(const struct xt_tgdtor_param *par)
{
+ struct tee_net *tn = net_generic(par->net, tee_net_id);
struct xt_tee_tginfo *info = par->targinfo;
if (info->priv) {
- mutex_lock(&priv_list_mutex);
+ mutex_lock(&tn->lock);
list_del(&info->priv->list);
- mutex_unlock(&priv_list_mutex);
+ mutex_unlock(&tn->lock);
kfree(info->priv);
}
static_key_slow_dec(&xt_tee_enabled);
@@ -156,6 +173,21 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
#endif
};
+static int __net_init tee_net_init(struct net *net)
+{
+ struct tee_net *tn = net_generic(net, tee_net_id);
+
+ INIT_LIST_HEAD(&tn->priv_list);
+ mutex_init(&tn->lock);
+ return 0;
+}
+
+static struct pernet_operations tee_net_ops = {
+ .init = tee_net_init,
+ .id = &tee_net_id,
+ .size = sizeof(struct tee_net),
+};
+
static struct notifier_block tee_netdev_notifier = {
.notifier_call = tee_netdev_event,
};
@@ -164,22 +196,32 @@ static int __init tee_tg_init(void)
{
int ret;
- ret = xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
- if (ret)
+ ret = register_pernet_subsys(&tee_net_ops);
+ if (ret < 0)
return ret;
+
+ ret = xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
+ if (ret < 0)
+ goto cleanup_subsys;
+
ret = register_netdevice_notifier(&tee_netdev_notifier);
- if (ret) {
- xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
- return ret;
- }
+ if (ret < 0)
+ goto unregister_targets;
return 0;
+
+unregister_targets:
+ xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
+cleanup_subsys:
+ unregister_pernet_subsys(&tee_net_ops);
+ return ret;
}
static void __exit tee_tg_exit(void)
{
unregister_netdevice_notifier(&tee_netdev_notifier);
xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
+ unregister_pernet_subsys(&tee_net_ops);
}
module_init(tee_tg_init);
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
index 5d92e1781980..5cb1ecb29ea4 100644
--- a/net/netfilter/xt_cgroup.c
+++ b/net/netfilter/xt_cgroup.c
@@ -68,6 +68,38 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
return 0;
}
+static int cgroup_mt_check_v2(const struct xt_mtchk_param *par)
+{
+ struct xt_cgroup_info_v2 *info = par->matchinfo;
+ struct cgroup *cgrp;
+
+ if ((info->invert_path & ~1) || (info->invert_classid & ~1))
+ return -EINVAL;
+
+ if (!info->has_path && !info->has_classid) {
+ pr_info("xt_cgroup: no path or classid specified\n");
+ return -EINVAL;
+ }
+
+ if (info->has_path && info->has_classid) {
+ pr_info_ratelimited("path and classid specified\n");
+ return -EINVAL;
+ }
+
+ info->priv = NULL;
+ if (info->has_path) {
+ cgrp = cgroup_get_from_path(info->path);
+ if (IS_ERR(cgrp)) {
+ pr_info_ratelimited("invalid path, errno=%ld\n",
+ PTR_ERR(cgrp));
+ return -EINVAL;
+ }
+ info->priv = cgrp;
+ }
+
+ return 0;
+}
+
static bool
cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
@@ -99,6 +131,24 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
info->invert_classid;
}
+static bool cgroup_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_cgroup_info_v2 *info = par->matchinfo;
+ struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
+ struct cgroup *ancestor = info->priv;
+ struct sock *sk = skb->sk;
+
+ if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
+ return false;
+
+ if (ancestor)
+ return cgroup_is_descendant(sock_cgroup_ptr(skcd), ancestor) ^
+ info->invert_path;
+ else
+ return (info->classid == sock_cgroup_classid(skcd)) ^
+ info->invert_classid;
+}
+
static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par)
{
struct xt_cgroup_info_v1 *info = par->matchinfo;
@@ -107,6 +157,14 @@ static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par)
cgroup_put(info->priv);
}
+static void cgroup_mt_destroy_v2(const struct xt_mtdtor_param *par)
+{
+ struct xt_cgroup_info_v2 *info = par->matchinfo;
+
+ if (info->priv)
+ cgroup_put(info->priv);
+}
+
static struct xt_match cgroup_mt_reg[] __read_mostly = {
{
.name = "cgroup",
@@ -134,6 +192,20 @@ static struct xt_match cgroup_mt_reg[] __read_mostly = {
(1 << NF_INET_POST_ROUTING) |
(1 << NF_INET_LOCAL_IN),
},
+ {
+ .name = "cgroup",
+ .revision = 2,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = cgroup_mt_check_v2,
+ .match = cgroup_mt_v2,
+ .matchsize = sizeof(struct xt_cgroup_info_v2),
+ .usersize = offsetof(struct xt_cgroup_info_v2, priv),
+ .destroy = cgroup_mt_destroy_v2,
+ .me = THIS_MODULE,
+ .hooks = (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_POST_ROUTING) |
+ (1 << NF_INET_LOCAL_IN),
+ },
};
static int __init cgroup_mt_init(void)
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
index 8af9707f8789..ac91170fc8c8 100644
--- a/net/netfilter/xt_nat.c
+++ b/net/netfilter/xt_nat.c
@@ -216,6 +216,8 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
{
.name = "DNAT",
.revision = 2,
+ .checkentry = xt_nat_checkentry,
+ .destroy = xt_nat_destroy,
.target = xt_dnat_target_v2,
.targetsize = sizeof(struct nf_nat_range2),
.table = "nat",
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index bf7bba80e24c..7a103553d10d 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -40,14 +40,8 @@
static bool
xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
{
- const struct xt_osf_info *info = p->matchinfo;
- struct net *net = xt_net(p);
-
- if (!info)
- return false;
-
return nf_osf_match(skb, xt_family(p), xt_hooknum(p), xt_in(p),
- xt_out(p), info, net, nf_osf_fingers);
+ xt_out(p), p->matchinfo, xt_net(p), nf_osf_fingers);
}
static struct xt_match xt_osf_match = {
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 0472f3472842..ada144e5645b 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -56,7 +56,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
struct sk_buff *pskb = (struct sk_buff *)skb;
struct sock *sk = skb->sk;
- if (!net_eq(xt_net(par), sock_net(sk)))
+ if (sk && !net_eq(xt_net(par), sock_net(sk)))
sk = NULL;
if (!sk)
@@ -117,7 +117,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
struct sk_buff *pskb = (struct sk_buff *)skb;
struct sock *sk = skb->sk;
- if (!net_eq(xt_net(par), sock_net(sk)))
+ if (sk && !net_eq(xt_net(par), sock_net(sk)))
sk = NULL;
if (!sk)
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index c070dfc0190a..c92894c3e40a 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info,
{
u32 addr_len;
- if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
+ if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
+ info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
if (addr_len != sizeof(struct in_addr) &&
addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 930d17fa906c..6bb9f3cde0b0 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -574,11 +574,6 @@ static int netlink_insert(struct sock *sk, u32 portid)
if (nlk_sk(sk)->bound)
goto err;
- err = -ENOMEM;
- if (BITS_PER_LONG > 32 &&
- unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
- goto err;
-
nlk_sk(sk)->portid = portid;
sock_hold(sk);
@@ -993,7 +988,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
struct netlink_sock *nlk = nlk_sk(sk);
struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
int err = 0;
- long unsigned int groups = nladdr->nl_groups;
+ unsigned long groups = nladdr->nl_groups;
bool bound;
if (addr_len < sizeof(struct sockaddr_nl))
@@ -1011,9 +1006,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
return err;
}
- if (nlk->ngroups == 0)
- groups = 0;
- else if (nlk->ngroups < 8*sizeof(groups))
+ if (nlk->ngroups < BITS_PER_LONG)
groups &= (1UL << nlk->ngroups) - 1;
bound = nlk->bound;
@@ -1713,6 +1706,13 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
nlk->flags &= ~NETLINK_F_EXT_ACK;
err = 0;
break;
+ case NETLINK_DUMP_STRICT_CHK:
+ if (val)
+ nlk->flags |= NETLINK_F_STRICT_CHK;
+ else
+ nlk->flags &= ~NETLINK_F_STRICT_CHK;
+ err = 0;
+ break;
default:
err = -ENOPROTOOPT;
}
@@ -1806,6 +1806,15 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
return -EFAULT;
err = 0;
break;
+ case NETLINK_DUMP_STRICT_CHK:
+ if (len < sizeof(int))
+ return -EINVAL;
+ len = sizeof(int);
+ val = nlk->flags & NETLINK_F_STRICT_CHK ? 1 : 0;
+ if (put_user(len, optlen) || put_user(val, optval))
+ return -EFAULT;
+ err = 0;
+ break;
default:
err = -ENOPROTOOPT;
}
@@ -2178,6 +2187,7 @@ EXPORT_SYMBOL(__nlmsg_put);
static int netlink_dump(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
+ struct netlink_ext_ack extack = {};
struct netlink_callback *cb;
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
@@ -2229,8 +2239,11 @@ static int netlink_dump(struct sock *sk)
skb_reserve(skb, skb_tailroom(skb) - alloc_size);
netlink_skb_set_owner_r(skb, sk);
- if (nlk->dump_done_errno > 0)
+ if (nlk->dump_done_errno > 0) {
+ cb->extack = &extack;
nlk->dump_done_errno = cb->dump(skb, cb);
+ cb->extack = NULL;
+ }
if (nlk->dump_done_errno > 0 ||
skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
@@ -2244,7 +2257,8 @@ static int netlink_dump(struct sock *sk)
}
nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE,
- sizeof(nlk->dump_done_errno), NLM_F_MULTI);
+ sizeof(nlk->dump_done_errno),
+ NLM_F_MULTI | cb->answer_flags);
if (WARN_ON(!nlh))
goto errout_skb;
@@ -2253,6 +2267,12 @@ static int netlink_dump(struct sock *sk)
memcpy(nlmsg_data(nlh), &nlk->dump_done_errno,
sizeof(nlk->dump_done_errno));
+ if (extack._msg && nlk->flags & NETLINK_F_EXT_ACK) {
+ nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
+ if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack._msg))
+ nlmsg_end(skb, nlh);
+ }
+
if (sk_filter(sk, skb))
kfree_skb(skb);
else
@@ -2279,9 +2299,9 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *control)
{
+ struct netlink_sock *nlk, *nlk2;
struct netlink_callback *cb;
struct sock *sk;
- struct netlink_sock *nlk;
int ret;
refcount_inc(&skb->users);
@@ -2315,6 +2335,9 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb->min_dump_alloc = control->min_dump_alloc;
cb->skb = skb;
+ nlk2 = nlk_sk(NETLINK_CB(skb).sk);
+ cb->strict_check = !!(nlk2->flags & NETLINK_F_STRICT_CHK);
+
if (control->start) {
ret = control->start(cb);
if (ret)
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 962de7b3c023..5f454c8de6a4 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -15,6 +15,7 @@
#define NETLINK_F_LISTEN_ALL_NSID 0x10
#define NETLINK_F_CAP_ACK 0x20
#define NETLINK_F_EXT_ACK 0x40
+#define NETLINK_F_STRICT_CHK 0x80
#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index ac8030c4bcf8..19cb2e473ea6 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
create_info = (struct hci_create_pipe_resp *)skb->data;
+ if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
/* Save the new created pipe and bind with local gate,
* the description for skb->data[3] is destination gate id
* but since we received this cmd from host controller, we
@@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
delete_info = (struct hci_delete_pipe_noti *)skb->data;
+ if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
break;
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index dd4adf8b1167..ae296273ce3d 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -556,7 +556,7 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
pr_debug("%p\n", sk);
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
if (sk->sk_state == LLCP_LISTEN)
return llcp_accept_poll(sk);
diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
index a66f102c6c01..78fe622eba65 100644
--- a/net/nfc/nci/uart.c
+++ b/net/nfc/nci/uart.c
@@ -192,10 +192,8 @@ static void nci_uart_tty_close(struct tty_struct *tty)
if (!nu)
return;
- if (nu->tx_skb)
- kfree_skb(nu->tx_skb);
- if (nu->rx_skb)
- kfree_skb(nu->rx_skb);
+ kfree_skb(nu->tx_skb);
+ kfree_skb(nu->rx_skb);
skb_queue_purge(&nu->tx_q);
@@ -465,6 +463,7 @@ static struct tty_ldisc_ops nci_uart_ldisc = {
.receive_buf = nci_uart_tty_receive,
.write_wakeup = nci_uart_tty_wakeup,
.ioctl = nci_uart_tty_ioctl,
+ .compat_ioctl = nci_uart_tty_ioctl,
};
static int __init nci_uart_init(void)
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 86a75105af1a..6bec37ab4472 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -933,6 +933,11 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
struct nf_conn *ct;
if (!cached) {
+ struct nf_hook_state state = {
+ .hook = NF_INET_PRE_ROUTING,
+ .pf = info->family,
+ .net = net,
+ };
struct nf_conn *tmpl = info->ct;
int err;
@@ -944,8 +949,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
nf_ct_set(skb, tmpl, IP_CT_NEW);
}
- err = nf_conntrack_in(net, info->family,
- NF_INET_PRE_ROUTING, skb);
+ err = nf_conntrack_in(skb, &state);
if (err != NF_ACCEPT)
return -ENOENT;
@@ -1312,6 +1316,10 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
rcu_assign_pointer(help->helper, helper);
info->helper = helper;
+
+ if (info->nat)
+ request_module("ip_nat_%s", name);
+
return 0;
}
@@ -1624,10 +1632,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
OVS_NLERR(log, "Failed to allocate conntrack template");
return -ENOMEM;
}
-
- __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
- nf_conntrack_get(&ct_info.ct->ct_general);
-
if (helper) {
err = ovs_ct_add_helper(&ct_info, helper, key, log);
if (err)
@@ -1639,6 +1643,8 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
if (err)
goto err_free_ct;
+ __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
+ nf_conntrack_get(&ct_info.ct->ct_general);
return 0;
err_free_ct:
__ovs_ct_free_action(&ct_info);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 0f5ce77460d4..6679e96ab1dc 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1182,14 +1182,14 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
ovs_header->dp_ifindex,
reply, info->snd_portid,
info->snd_seq, 0,
- OVS_FLOW_CMD_NEW,
+ OVS_FLOW_CMD_SET,
ufid_flags);
BUG_ON(error < 0);
}
} else {
/* Could not alloc without acts before locking. */
reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
- info, OVS_FLOW_CMD_NEW, false,
+ info, OVS_FLOW_CMD_SET, false,
ufid_flags);
if (IS_ERR(reply)) {
@@ -1265,7 +1265,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
}
reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
- OVS_FLOW_CMD_NEW, true, ufid_flags);
+ OVS_FLOW_CMD_GET, true, ufid_flags);
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
goto unlock;
@@ -1389,7 +1389,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- OVS_FLOW_CMD_NEW, ufid_flags) < 0)
+ OVS_FLOW_CMD_GET, ufid_flags) < 0)
break;
cb->args[0] = bucket;
@@ -1730,7 +1730,7 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
ovs_dp_change(dp, info->attrs);
err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
- info->snd_seq, 0, OVS_DP_CMD_NEW);
+ info->snd_seq, 0, OVS_DP_CMD_SET);
BUG_ON(err < 0);
ovs_unlock();
@@ -1761,7 +1761,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_free;
}
err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
- info->snd_seq, 0, OVS_DP_CMD_NEW);
+ info->snd_seq, 0, OVS_DP_CMD_GET);
BUG_ON(err < 0);
ovs_unlock();
@@ -1785,7 +1785,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (i >= skip &&
ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- OVS_DP_CMD_NEW) < 0)
+ OVS_DP_CMD_GET) < 0)
break;
i++;
}
@@ -2101,7 +2101,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
info->snd_portid, info->snd_seq, 0,
- OVS_VPORT_CMD_NEW);
+ OVS_VPORT_CMD_SET);
BUG_ON(err < 0);
ovs_unlock();
@@ -2182,7 +2182,7 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
goto exit_unlock_free;
err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
info->snd_portid, info->snd_seq, 0,
- OVS_VPORT_CMD_NEW);
+ OVS_VPORT_CMD_GET);
BUG_ON(err < 0);
rcu_read_unlock();
@@ -2218,7 +2218,7 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
- OVS_VPORT_CMD_NEW) < 0)
+ OVS_VPORT_CMD_GET) < 0)
goto out;
j++;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 56b8e7167790..35966da84769 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -254,21 +254,18 @@ static bool icmphdr_ok(struct sk_buff *skb)
static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
{
+ unsigned short frag_off;
+ unsigned int payload_ofs = 0;
unsigned int nh_ofs = skb_network_offset(skb);
unsigned int nh_len;
- int payload_ofs;
struct ipv6hdr *nh;
- uint8_t nexthdr;
- __be16 frag_off;
- int err;
+ int err, nexthdr, flags = 0;
err = check_header(skb, nh_ofs + sizeof(*nh));
if (unlikely(err))
return err;
nh = ipv6_hdr(skb);
- nexthdr = nh->nexthdr;
- payload_ofs = (u8 *)(nh + 1) - skb->data;
key->ip.proto = NEXTHDR_NONE;
key->ip.tos = ipv6_get_dsfield(nh);
@@ -277,10 +274,9 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
key->ipv6.addr.src = nh->saddr;
key->ipv6.addr.dst = nh->daddr;
- payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
-
- if (frag_off) {
- if (frag_off & htons(~0x7))
+ nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
+ if (flags & IP6_FH_F_FRAG) {
+ if (frag_off)
key->ip.frag = OVS_FRAG_TYPE_LATER;
else
key->ip.frag = OVS_FRAG_TYPE_FIRST;
@@ -288,11 +284,11 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
key->ip.frag = OVS_FRAG_TYPE_NONE;
}
- /* Delayed handling of error in ipv6_skip_exthdr() as it
- * always sets frag_off to a valid value which may be
+ /* Delayed handling of error in ipv6_find_hdr() as it
+ * always sets flags and frag_off to a valid value which may be
* used to set key->ip.frag above.
*/
- if (unlikely(payload_ofs < 0))
+ if (unlikely(nexthdr < 0))
return -EPROTO;
nh_len = payload_ofs - nh_ofs;
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index bb95c43aae76..26f71cbf7527 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -43,7 +43,8 @@ static struct internal_dev *internal_dev_priv(struct net_device *netdev)
}
/* Called with rcu_read_lock_bh. */
-static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t
+internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
int len, err;
@@ -62,7 +63,7 @@ static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
} else {
netdev->stats.tx_errors++;
}
- return 0;
+ return NETDEV_TX_OK;
}
static int internal_dev_open(struct net_device *netdev)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 75c92a87e7b2..ec3095f13aae 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2715,10 +2715,12 @@ tpacket_error:
}
}
- if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr,
- vio_le())) {
- tp_len = -EINVAL;
- goto tpacket_error;
+ if (po->has_vnet_hdr) {
+ if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
+ tp_len = -EINVAL;
+ goto tpacket_error;
+ }
+ virtio_net_hdr_set_proto(skb, vnet_hdr);
}
skb->destructor = tpacket_destruct_skb;
@@ -2915,6 +2917,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (err)
goto out_free;
len += sizeof(vnet_hdr);
+ virtio_net_hdr_set_proto(skb, &vnet_hdr);
}
skb_probe_transport_header(skb, reserve);
@@ -3805,6 +3808,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
return fanout_set_data(po, optval, optlen);
}
+ case PACKET_IGNORE_OUTGOING:
+ {
+ int val;
+
+ if (optlen != sizeof(val))
+ return -EINVAL;
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ po->prot_hook.ignore_outgoing = !!val;
+ return 0;
+ }
case PACKET_TX_HAS_OFF:
{
unsigned int val;
@@ -3928,6 +3945,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
((u32)po->fanout->flags << 24)) :
0);
break;
+ case PACKET_IGNORE_OUTGOING:
+ val = po->prot_hook.ignore_outgoing;
+ break;
case PACKET_ROLLOVER_STATS:
if (!po->rollover)
return -EINVAL;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 73427ff439f9..71ff356ee702 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -443,7 +443,7 @@ int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
/* ib_stats.c */
-DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
#define rds_ib_stats_add(member, count) \
rds_stats_add_which(rds_ib_stats, member, count)
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c4dcf654d8fe..6bfaf05b63b2 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -278,7 +278,7 @@ struct rds_incoming {
struct in6_addr i_saddr;
rds_rdma_cookie_t i_rdma_cookie;
- struct timeval i_rx_tstamp;
+ ktime_t i_rx_tstamp;
u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
};
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 504cd6bcc54c..727639dac8a7 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -43,18 +43,14 @@
void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
struct in6_addr *saddr)
{
- int i;
-
refcount_set(&inc->i_refcount, 1);
INIT_LIST_HEAD(&inc->i_item);
inc->i_conn = conn;
inc->i_saddr = *saddr;
inc->i_rdma_cookie = 0;
- inc->i_rx_tstamp.tv_sec = 0;
- inc->i_rx_tstamp.tv_usec = 0;
+ inc->i_rx_tstamp = ktime_set(0, 0);
- for (i = 0; i < RDS_RX_MAX_TRACES; i++)
- inc->i_rx_lat_trace[i] = 0;
+ memset(inc->i_rx_lat_trace, 0, sizeof(inc->i_rx_lat_trace));
}
EXPORT_SYMBOL_GPL(rds_inc_init);
@@ -67,8 +63,7 @@ void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
inc->i_conn_path = cp;
inc->i_saddr = *saddr;
inc->i_rdma_cookie = 0;
- inc->i_rx_tstamp.tv_sec = 0;
- inc->i_rx_tstamp.tv_usec = 0;
+ inc->i_rx_tstamp = ktime_set(0, 0);
}
EXPORT_SYMBOL_GPL(rds_inc_path_init);
@@ -385,7 +380,7 @@ void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
be32_to_cpu(inc->i_hdr.h_len),
inc->i_hdr.h_dport);
if (sock_flag(sk, SOCK_RCVTSTAMP))
- do_gettimeofday(&inc->i_rx_tstamp);
+ inc->i_rx_tstamp = ktime_get_real();
rds_inc_addref(inc);
inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
list_add_tail(&inc->i_item, &rs->rs_recv_queue);
@@ -552,11 +547,11 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
goto out;
}
- if ((inc->i_rx_tstamp.tv_sec != 0) &&
+ if ((inc->i_rx_tstamp != 0) &&
sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
+ struct timeval tv = ktime_to_timeval(inc->i_rx_tstamp);
ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
- sizeof(struct timeval),
- &inc->i_rx_tstamp);
+ sizeof(tv), &tv);
if (ret)
goto out;
}
diff --git a/net/rds/send.c b/net/rds/send.c
index 57b3d5a8b2db..fe785ee819dd 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1007,7 +1007,8 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
return ret;
}
-static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
+static int rds_send_mprds_hash(struct rds_sock *rs,
+ struct rds_connection *conn, int nonblock)
{
int hash;
@@ -1023,10 +1024,16 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
* used. But if we are interrupted, we have to use the zero
* c_path in case the connection ends up being non-MP capable.
*/
- if (conn->c_npaths == 0)
+ if (conn->c_npaths == 0) {
+ /* Cannot wait for the connection be made, so just use
+ * the base c_path.
+ */
+ if (nonblock)
+ return 0;
if (wait_event_interruptible(conn->c_hs_waitq,
conn->c_npaths != 0))
hash = 0;
+ }
if (conn->c_npaths == 1)
hash = 0;
}
@@ -1256,7 +1263,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
}
if (conn->c_trans->t_mp_capable)
- cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
+ cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
else
cpath = &conn->c_path[0];
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 1355f5ca8d22..abca57040f37 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -510,8 +510,8 @@ void rfkill_remove_epo_lock(void)
/**
* rfkill_is_epo_lock_active - returns true EPO is active
*
- * Returns 0 (false) if there is NOT an active EPO contidion,
- * and 1 (true) if there is an active EPO contition, which
+ * Returns 0 (false) if there is NOT an active EPO condition,
+ * and 1 (true) if there is an active EPO condition, which
* locks all radios in one of the BLOCKED states.
*
* Can be called in atomic context.
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index ac44d8afffb1..64362d078da8 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -97,7 +97,8 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx,
srx->transport_len > len)
return -EINVAL;
- if (srx->transport.family != rx->family)
+ if (srx->transport.family != rx->family &&
+ srx->transport.family == AF_INET && rx->family != AF_INET6)
return -EAFNOSUPPORT;
switch (srx->transport.family) {
@@ -385,6 +386,20 @@ u32 rxrpc_kernel_check_life(struct socket *sock, struct rxrpc_call *call)
EXPORT_SYMBOL(rxrpc_kernel_check_life);
/**
+ * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
+ * @sock: The socket the call is on
+ * @call: The call to query
+ *
+ * Allow a kernel service to retrieve the epoch value from a service call to
+ * see if the client at the other end rebooted.
+ */
+u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call)
+{
+ return call->conn->proto.epoch;
+}
+EXPORT_SYMBOL(rxrpc_kernel_get_epoch);
+
+/**
* rxrpc_kernel_check_call - Check a call's state
* @sock: The socket the call is on
* @call: The call to check
@@ -741,7 +756,7 @@ static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
struct rxrpc_sock *rx = rxrpc_sk(sk);
__poll_t mask;
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
mask = 0;
/* the socket is readable if there are any messages waiting on the Rx
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index c97558710421..382196e57a26 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -40,17 +40,12 @@ struct rxrpc_crypt {
struct rxrpc_connection;
/*
- * Mark applied to socket buffers.
+ * Mark applied to socket buffers in skb->mark. skb->priority is used
+ * to pass supplementary information.
*/
enum rxrpc_skb_mark {
- RXRPC_SKB_MARK_DATA, /* data message */
- RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
- RXRPC_SKB_MARK_BUSY, /* server busy message */
- RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
- RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
- RXRPC_SKB_MARK_NET_ERROR, /* network error message */
- RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
- RXRPC_SKB_MARK_NEW_CALL, /* local error message */
+ RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */
+ RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */
};
/*
@@ -293,7 +288,6 @@ struct rxrpc_peer {
struct hlist_node hash_link;
struct rxrpc_local *local;
struct hlist_head error_targets; /* targets for net error distribution */
- struct work_struct error_distributor;
struct rb_root service_conns; /* Service connections */
struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
time64_t last_tx_at; /* Last time packet sent here */
@@ -304,12 +298,11 @@ struct rxrpc_peer {
unsigned int maxdata; /* data size (MTU - hdrsize) */
unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
int debug_id; /* debug ID for printks */
- int error_report; /* Net (+0) or local (+1000000) to distribute */
-#define RXRPC_LOCAL_ERROR_OFFSET 1000000
struct sockaddr_rxrpc srx; /* remote address */
/* calculated RTT cache */
#define RXRPC_RTT_CACHE_SIZE 32
+ spinlock_t rtt_input_lock; /* RTT lock for input routine */
ktime_t rtt_last_req; /* Time of last RTT request */
u64 rtt; /* Current RTT estimate (in nS) */
u64 rtt_sum; /* Sum of cache contents */
@@ -442,7 +435,7 @@ struct rxrpc_connection {
struct sk_buff_head rx_queue; /* received conn-level packets */
const struct rxrpc_security *security; /* applied security module */
struct key *server_key; /* security for this service */
- struct crypto_skcipher *cipher; /* encryption handle */
+ struct crypto_sync_skcipher *cipher; /* encryption handle */
struct rxrpc_crypt csum_iv; /* packet checksum base */
unsigned long flags;
unsigned long events;
@@ -450,19 +443,29 @@ struct rxrpc_connection {
spinlock_t state_lock; /* state-change lock */
enum rxrpc_conn_cache_state cache_state;
enum rxrpc_conn_proto_state state; /* current state of connection */
- u32 local_abort; /* local abort code */
- u32 remote_abort; /* remote abort code */
+ u32 abort_code; /* Abort code of connection abort */
int debug_id; /* debug ID for printks */
atomic_t serial; /* packet serial number counter */
unsigned int hi_serial; /* highest serial number received */
u32 security_nonce; /* response re-use preventer */
- u16 service_id; /* Service ID, possibly upgraded */
+ u32 service_id; /* Service ID, possibly upgraded */
u8 size_align; /* data size alignment (for security) */
u8 security_size; /* security header size */
u8 security_ix; /* security type */
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
+ short error; /* Local error code */
};
+static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
+{
+ return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
+}
+
+static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
+{
+ return !rxrpc_to_server(sp);
+}
+
/*
* Flags in call->flags.
*/
@@ -633,6 +636,8 @@ struct rxrpc_call {
bool tx_phase; /* T if transmission phase, F if receive phase */
u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
+ spinlock_t input_lock; /* Lock for packet input to this call */
+
/* receive-phase ACK management */
u8 ackr_reason; /* reason to ACK */
u16 ackr_skew; /* skew on packet being ACK'd */
@@ -717,7 +722,7 @@ extern struct workqueue_struct *rxrpc_workqueue;
int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
void rxrpc_discard_prealloc(struct rxrpc_sock *);
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
- struct rxrpc_connection *,
+ struct rxrpc_sock *,
struct sk_buff *);
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
@@ -887,8 +892,9 @@ extern unsigned long rxrpc_conn_idle_client_fast_expiry;
extern struct idr rxrpc_client_conn_ids;
void rxrpc_destroy_client_conn_ids(void);
-int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
- struct sockaddr_rxrpc *, gfp_t);
+int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
+ struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
+ gfp_t);
void rxrpc_expose_client_call(struct rxrpc_call *);
void rxrpc_disconnect_client_call(struct rxrpc_call *);
void rxrpc_put_client_conn(struct rxrpc_connection *);
@@ -908,7 +914,8 @@ extern unsigned int rxrpc_closed_conn_expiry;
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
- struct sk_buff *);
+ struct sk_buff *,
+ struct rxrpc_peer **);
void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
void rxrpc_disconnect_call(struct rxrpc_call *);
void rxrpc_kill_connection(struct rxrpc_connection *);
@@ -960,7 +967,7 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
/*
* input.c
*/
-void rxrpc_data_ready(struct sock *);
+int rxrpc_input_packet(struct sock *, struct sk_buff *);
/*
* insecure.c
@@ -1031,7 +1038,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
* peer_event.c
*/
void rxrpc_error_report(struct sock *);
-void rxrpc_peer_error_distributor(struct work_struct *);
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
void rxrpc_peer_keepalive_worker(struct work_struct *);
@@ -1041,22 +1047,22 @@ void rxrpc_peer_keepalive_worker(struct work_struct *);
*/
struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
const struct sockaddr_rxrpc *);
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
struct sockaddr_rxrpc *, gfp_t);
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
- struct rxrpc_peer *);
+void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
+ struct rxrpc_peer *);
void rxrpc_destroy_all_peers(struct rxrpc_net *);
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
void rxrpc_put_peer(struct rxrpc_peer *);
-void __rxrpc_queue_peer_error(struct rxrpc_peer *);
/*
* proc.c
*/
extern const struct seq_operations rxrpc_call_seq_ops;
extern const struct seq_operations rxrpc_connection_seq_ops;
+extern const struct seq_operations rxrpc_peer_seq_ops;
/*
* recvmsg.c
@@ -1093,7 +1099,6 @@ void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
-void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_purge_queue(struct sk_buff_head *);
/*
@@ -1110,8 +1115,7 @@ static inline void rxrpc_sysctl_exit(void) {}
/*
* utils.c
*/
-int rxrpc_extract_addr_from_skb(struct rxrpc_local *, struct sockaddr_rxrpc *,
- struct sk_buff *);
+int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
static inline bool before(u32 seq1, u32 seq2)
{
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 9d1e298b784c..44860505246d 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -249,11 +249,11 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
*/
static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
struct rxrpc_local *local,
+ struct rxrpc_peer *peer,
struct rxrpc_connection *conn,
struct sk_buff *skb)
{
struct rxrpc_backlog *b = rx->backlog;
- struct rxrpc_peer *peer, *xpeer;
struct rxrpc_call *call;
unsigned short call_head, conn_head, peer_head;
unsigned short call_tail, conn_tail, peer_tail;
@@ -276,21 +276,18 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
return NULL;
if (!conn) {
- /* No connection. We're going to need a peer to start off
- * with. If one doesn't yet exist, use a spare from the
- * preallocation set. We dump the address into the spare in
- * anticipation - and to save on stack space.
- */
- xpeer = b->peer_backlog[peer_tail];
- if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
- return NULL;
-
- peer = rxrpc_lookup_incoming_peer(local, xpeer);
- if (peer == xpeer) {
+ if (peer && !rxrpc_get_peer_maybe(peer))
+ peer = NULL;
+ if (!peer) {
+ peer = b->peer_backlog[peer_tail];
+ if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
+ return NULL;
b->peer_backlog[peer_tail] = NULL;
smp_store_release(&b->peer_backlog_tail,
(peer_tail + 1) &
(RXRPC_BACKLOG_MAX - 1));
+
+ rxrpc_new_incoming_peer(rx, local, peer);
}
/* Now allocate and set up the connection */
@@ -335,45 +332,38 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
* The call is returned with the user access mutex held.
*/
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
- struct rxrpc_connection *conn,
+ struct rxrpc_sock *rx,
struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_sock *rx;
+ struct rxrpc_connection *conn;
+ struct rxrpc_peer *peer = NULL;
struct rxrpc_call *call;
- u16 service_id = sp->hdr.serviceId;
_enter("");
- /* Get the socket providing the service */
- rx = rcu_dereference(local->service);
- if (rx && (service_id == rx->srx.srx_service ||
- service_id == rx->second_service))
- goto found_service;
-
- trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
- RX_INVALID_OPERATION, EOPNOTSUPP);
- skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
- skb->priority = RX_INVALID_OPERATION;
- _leave(" = NULL [service]");
- return NULL;
-
-found_service:
spin_lock(&rx->incoming_lock);
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
rx->sk.sk_state == RXRPC_CLOSE) {
trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
- skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb->priority = RX_INVALID_OPERATION;
_leave(" = NULL [close]");
call = NULL;
goto out;
}
- call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
+ /* The peer, connection and call may all have sprung into existence due
+ * to a duplicate packet being handled on another CPU in parallel, so
+ * we have to recheck the routing. However, we're now holding
+ * rx->incoming_lock, so the values should remain stable.
+ */
+ conn = rxrpc_find_connection_rcu(local, skb, &peer);
+
+ call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
if (!call) {
- skb->mark = RXRPC_SKB_MARK_BUSY;
+ skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
_leave(" = NULL [busy]");
call = NULL;
goto out;
@@ -413,20 +403,22 @@ found_service:
case RXRPC_CONN_SERVICE:
write_lock(&call->state_lock);
- if (rx->discard_new_call)
- call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
- else
- call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ if (rx->discard_new_call)
+ call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
+ else
+ call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ }
write_unlock(&call->state_lock);
break;
case RXRPC_CONN_REMOTELY_ABORTED:
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
- conn->remote_abort, -ECONNABORTED);
+ conn->abort_code, conn->error);
break;
case RXRPC_CONN_LOCALLY_ABORTED:
rxrpc_abort_call("CON", call, sp->hdr.seq,
- conn->local_abort, -ECONNABORTED);
+ conn->abort_code, conn->error);
break;
default:
BUG();
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 9486293fef5c..8f1a8f85b1f9 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -138,6 +138,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->lock);
spin_lock_init(&call->notify_lock);
+ spin_lock_init(&call->input_lock);
rwlock_init(&call->state_lock);
atomic_set(&call->usage, 1);
call->debug_id = debug_id;
@@ -287,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
/* Set up or get a connection record and set the protocol parameters,
* including channel number and call ID.
*/
- ret = rxrpc_connect_call(call, cp, srx, gfp);
+ ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
if (ret < 0)
goto error;
@@ -339,7 +340,7 @@ int rxrpc_retry_client_call(struct rxrpc_sock *rx,
/* Set up or get a connection record and set the protocol parameters,
* including channel number and call ID.
*/
- ret = rxrpc_connect_call(call, cp, srx, gfp);
+ ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
if (ret < 0)
goto error;
@@ -400,7 +401,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
rcu_assign_pointer(conn->channels[chan].call, call);
spin_lock(&conn->params.peer->lock);
- hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
+ hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
spin_unlock(&conn->params.peer->lock);
_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index f8f37188a932..521189f4b666 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -276,7 +276,8 @@ dont_reuse:
* If we return with a connection, the call will be on its waiting list. It's
* left to the caller to assign a channel and wake up the call.
*/
-static int rxrpc_get_client_conn(struct rxrpc_call *call,
+static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx,
gfp_t gfp)
@@ -289,7 +290,7 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call,
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
- cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
+ cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
if (!cp->peer)
goto error;
@@ -683,7 +684,8 @@ out:
* find a connection for a call
* - called in process context with IRQs enabled
*/
-int rxrpc_connect_call(struct rxrpc_call *call,
+int rxrpc_connect_call(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx,
gfp_t gfp)
@@ -696,7 +698,7 @@ int rxrpc_connect_call(struct rxrpc_call *call,
rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
rxrpc_cull_active_client_conns(rxnet);
- ret = rxrpc_get_client_conn(call, cp, srx, gfp);
+ ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
if (ret < 0)
goto out;
@@ -710,8 +712,8 @@ int rxrpc_connect_call(struct rxrpc_call *call,
}
spin_lock_bh(&call->conn->params.peer->lock);
- hlist_add_head(&call->error_link,
- &call->conn->params.peer->error_targets);
+ hlist_add_head_rcu(&call->error_link,
+ &call->conn->params.peer->error_targets);
spin_unlock_bh(&call->conn->params.peer->lock);
out:
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 6df56ce68861..b6fca8ebb117 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -126,7 +126,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
switch (chan->last_type) {
case RXRPC_PACKET_TYPE_ABORT:
- _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort);
+ _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code);
break;
case RXRPC_PACKET_TYPE_ACK:
trace_rxrpc_tx_ack(chan->call_debug_id, serial,
@@ -153,13 +153,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
* pass a connection-level abort onto all calls on that connection
*/
static void rxrpc_abort_calls(struct rxrpc_connection *conn,
- enum rxrpc_call_completion compl,
- u32 abort_code, int error)
+ enum rxrpc_call_completion compl)
{
struct rxrpc_call *call;
int i;
- _enter("{%d},%x", conn->debug_id, abort_code);
+ _enter("{%d},%x", conn->debug_id, conn->abort_code);
spin_lock(&conn->channel_lock);
@@ -172,9 +171,11 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
trace_rxrpc_abort(call->debug_id,
"CON", call->cid,
call->call_id, 0,
- abort_code, error);
+ conn->abort_code,
+ conn->error);
if (rxrpc_set_call_completion(call, compl,
- abort_code, error))
+ conn->abort_code,
+ conn->error))
rxrpc_notify_socket(call);
}
}
@@ -207,10 +208,12 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
return 0;
}
+ conn->error = error;
+ conn->abort_code = abort_code;
conn->state = RXRPC_CONN_LOCALLY_ABORTED;
spin_unlock_bh(&conn->state_lock);
- rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error);
+ rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
msg.msg_name = &conn->params.peer->srx.transport;
msg.msg_namelen = conn->params.peer->srx.transport_len;
@@ -229,7 +232,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
whdr._rsvd = 0;
whdr.serviceId = htons(conn->service_id);
- word = htonl(conn->local_abort);
+ word = htonl(conn->abort_code);
iov[0].iov_base = &whdr;
iov[0].iov_len = sizeof(whdr);
@@ -240,7 +243,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
serial = atomic_inc_return(&conn->serial);
whdr.serial = htonl(serial);
- _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort);
+ _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
if (ret < 0) {
@@ -315,9 +318,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
abort_code = ntohl(wtmp);
_proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
+ conn->error = -ECONNABORTED;
+ conn->abort_code = abort_code;
conn->state = RXRPC_CONN_REMOTELY_ABORTED;
- rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
- abort_code, -ECONNABORTED);
+ rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
return -ECONNABORTED;
case RXRPC_PACKET_TYPE_CHALLENGE:
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 77440a356b14..c332722820c2 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -69,10 +69,14 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
* If successful, a pointer to the connection is returned, but no ref is taken.
* NULL is returned if there is no match.
*
+ * When searching for a service call, if we find a peer but no connection, we
+ * return that through *_peer in case we need to create a new service call.
+ *
* The caller must be holding the RCU read lock.
*/
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct rxrpc_peer **_peer)
{
struct rxrpc_connection *conn;
struct rxrpc_conn_proto k;
@@ -82,14 +86,12 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
_enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
- if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
+ if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
goto not_found;
- k.epoch = sp->hdr.epoch;
- k.cid = sp->hdr.cid & RXRPC_CIDMASK;
-
- /* We may have to handle mixing IPv4 and IPv6 */
- if (srx.transport.family != local->srx.transport.family) {
+ if (srx.transport.family != local->srx.transport.family &&
+ (srx.transport.family == AF_INET &&
+ local->srx.transport.family != AF_INET6)) {
pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
srx.transport.family,
local->srx.transport.family);
@@ -99,7 +101,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
k.epoch = sp->hdr.epoch;
k.cid = sp->hdr.cid & RXRPC_CIDMASK;
- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
+ if (rxrpc_to_server(sp)) {
/* We need to look up service connections by the full protocol
* parameter set. We look up the peer first as an intermediate
* step and then the connection from the peer's tree.
@@ -107,6 +109,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
peer = rxrpc_lookup_peer_rcu(local, &srx);
if (!peer)
goto not_found;
+ *_peer = peer;
conn = rxrpc_find_service_conn_rcu(peer, skb);
if (!conn || atomic_read(&conn->usage) == 0)
goto not_found;
@@ -214,7 +217,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
call->peer->cong_cwnd = call->cong_cwnd;
spin_lock_bh(&conn->params.peer->lock);
- hlist_del_init(&call->error_link);
+ hlist_del_rcu(&call->error_link);
spin_unlock_bh(&conn->params.peer->lock);
if (rxrpc_is_client_call(call))
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index cfdc199c6351..9128aa0e40aa 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
/*
* Apply a hard ACK by advancing the Tx window.
*/
-static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
+static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
struct rxrpc_ack_summary *summary)
{
struct sk_buff *skb, *list = NULL;
+ bool rot_last = false;
int ix;
u8 annotation;
@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
skb->next = list;
list = skb;
- if (annotation & RXRPC_TX_ANNO_LAST)
+ if (annotation & RXRPC_TX_ANNO_LAST) {
set_bit(RXRPC_CALL_TX_LAST, &call->flags);
+ rot_last = true;
+ }
if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
summary->nr_rot_new_acks++;
}
spin_unlock(&call->lock);
- trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
+ trace_rxrpc_transmit(call, (rot_last ?
rxrpc_transmit_rotate_last :
rxrpc_transmit_rotate));
wake_up(&call->waitq);
@@ -259,9 +262,11 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
while (list) {
skb = list;
list = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
}
+
+ return rot_last;
}
/*
@@ -273,23 +278,26 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
const char *abort_why)
{
+ unsigned int state;
ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
write_lock(&call->state_lock);
- switch (call->state) {
+ state = call->state;
+ switch (state) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
if (reply_begun)
- call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
+ call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
else
- call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+ call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
break;
case RXRPC_CALL_SERVER_AWAIT_ACK:
__rxrpc_call_completed(call);
rxrpc_notify_socket(call);
+ state = call->state;
break;
default:
@@ -297,11 +305,10 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
}
write_unlock(&call->state_lock);
- if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
+ if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
- } else {
+ else
trace_rxrpc_transmit(call, rxrpc_transmit_end);
- }
_leave(" = ok");
return true;
@@ -332,11 +339,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
}
- if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
- rxrpc_rotate_tx_window(call, top, &summary);
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
- rxrpc_proto_abort("TXL", call, top);
- return false;
+ if (!rxrpc_rotate_tx_window(call, top, &summary)) {
+ rxrpc_proto_abort("TXL", call, top);
+ return false;
+ }
}
if (!rxrpc_end_tx_phase(call, true, "ETD"))
return false;
@@ -452,13 +459,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
}
}
+ spin_lock(&call->input_lock);
+
/* Received data implicitly ACKs all of the request packets we sent
* when we're acting as a client.
*/
if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
!rxrpc_receiving_reply(call))
- return;
+ goto unlock;
call->ackr_prev_seq = seq;
@@ -488,12 +497,16 @@ next_subpacket:
if (flags & RXRPC_LAST_PACKET) {
if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
- seq != call->rx_top)
- return rxrpc_proto_abort("LSN", call, seq);
+ seq != call->rx_top) {
+ rxrpc_proto_abort("LSN", call, seq);
+ goto unlock;
+ }
} else {
if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
- after_eq(seq, call->rx_top))
- return rxrpc_proto_abort("LSA", call, seq);
+ after_eq(seq, call->rx_top)) {
+ rxrpc_proto_abort("LSA", call, seq);
+ goto unlock;
+ }
}
trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
@@ -560,8 +573,10 @@ next_subpacket:
skip:
offset += len;
if (flags & RXRPC_JUMBO_PACKET) {
- if (skb_copy_bits(skb, offset, &flags, 1) < 0)
- return rxrpc_proto_abort("XJF", call, seq);
+ if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
+ rxrpc_proto_abort("XJF", call, seq);
+ goto unlock;
+ }
offset += sizeof(struct rxrpc_jumbo_header);
seq++;
serial++;
@@ -601,6 +616,9 @@ ack:
trace_rxrpc_notify_socket(call->debug_id, serial);
rxrpc_notify_socket(call);
}
+
+unlock:
+ spin_unlock(&call->input_lock);
_leave(" [queued]");
}
@@ -622,13 +640,14 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
if (!skb)
continue;
+ sent_at = skb->tstamp;
+ smp_rmb(); /* Read timestamp before serial. */
sp = rxrpc_skb(skb);
if (sp->hdr.serial != orig_serial)
continue;
- smp_rmb();
- sent_at = skb->tstamp;
goto found;
}
+
return;
found:
@@ -686,15 +705,14 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
ping_time = call->ping_time;
smp_rmb();
- ping_serial = call->ping_serial;
+ ping_serial = READ_ONCE(call->ping_serial);
if (orig_serial == call->acks_lost_ping)
rxrpc_input_check_for_lost_ack(call);
- if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
- before(orig_serial, ping_serial))
+ if (before(orig_serial, ping_serial) ||
+ !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
return;
- clear_bit(RXRPC_CALL_PINGING, &call->flags);
if (after(orig_serial, ping_serial))
return;
@@ -860,15 +878,32 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
rxrpc_propose_ack_respond_to_ack);
}
+ /* Discard any out-of-order or duplicate ACKs. */
+ if (before_eq(sp->hdr.serial, call->acks_latest))
+ return;
+
+ buf.info.rxMTU = 0;
ioffset = offset + nr_acks + 3;
- if (skb->len >= ioffset + sizeof(buf.info)) {
- if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
- return rxrpc_proto_abort("XAI", call, 0);
+ if (skb->len >= ioffset + sizeof(buf.info) &&
+ skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
+ return rxrpc_proto_abort("XAI", call, 0);
+
+ spin_lock(&call->input_lock);
+
+ /* Discard any out-of-order or duplicate ACKs. */
+ if (before_eq(sp->hdr.serial, call->acks_latest))
+ goto out;
+ call->acks_latest_ts = skb->tstamp;
+ call->acks_latest = sp->hdr.serial;
+
+ /* Parse rwind and mtu sizes if provided. */
+ if (buf.info.rxMTU)
rxrpc_input_ackinfo(call, skb, &buf.info);
- }
- if (first_soft_ack == 0)
- return rxrpc_proto_abort("AK0", call, 0);
+ if (first_soft_ack == 0) {
+ rxrpc_proto_abort("AK0", call, 0);
+ goto out;
+ }
/* Ignore ACKs unless we are or have just been transmitting. */
switch (READ_ONCE(call->state)) {
@@ -878,39 +913,35 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
case RXRPC_CALL_SERVER_AWAIT_ACK:
break;
default:
- return;
- }
-
- /* Discard any out-of-order or duplicate ACKs. */
- if (before_eq(sp->hdr.serial, call->acks_latest)) {
- _debug("discard ACK %d <= %d",
- sp->hdr.serial, call->acks_latest);
- return;
+ goto out;
}
- call->acks_latest_ts = skb->tstamp;
- call->acks_latest = sp->hdr.serial;
if (before(hard_ack, call->tx_hard_ack) ||
- after(hard_ack, call->tx_top))
- return rxrpc_proto_abort("AKW", call, 0);
- if (nr_acks > call->tx_top - hard_ack)
- return rxrpc_proto_abort("AKN", call, 0);
+ after(hard_ack, call->tx_top)) {
+ rxrpc_proto_abort("AKW", call, 0);
+ goto out;
+ }
+ if (nr_acks > call->tx_top - hard_ack) {
+ rxrpc_proto_abort("AKN", call, 0);
+ goto out;
+ }
- if (after(hard_ack, call->tx_hard_ack))
- rxrpc_rotate_tx_window(call, hard_ack, &summary);
+ if (after(hard_ack, call->tx_hard_ack)) {
+ if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
+ rxrpc_end_tx_phase(call, false, "ETA");
+ goto out;
+ }
+ }
if (nr_acks > 0) {
- if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
- return rxrpc_proto_abort("XSA", call, 0);
+ if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) {
+ rxrpc_proto_abort("XSA", call, 0);
+ goto out;
+ }
rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
&summary);
}
- if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
- rxrpc_end_tx_phase(call, false, "ETA");
- return;
- }
-
if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
RXRPC_TX_ANNO_LAST &&
summary.nr_acks == call->tx_top - hard_ack &&
@@ -919,7 +950,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
false, true,
rxrpc_propose_ack_ping_for_lost_reply);
- return rxrpc_congestion_management(call, skb, &summary, acked_serial);
+ rxrpc_congestion_management(call, skb, &summary, acked_serial);
+out:
+ spin_unlock(&call->input_lock);
}
/*
@@ -932,9 +965,12 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
_proto("Rx ACKALL %%%u", sp->hdr.serial);
- rxrpc_rotate_tx_window(call, call->tx_top, &summary);
- if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
+ spin_lock(&call->input_lock);
+
+ if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
rxrpc_end_tx_phase(call, false, "ETL");
+
+ spin_unlock(&call->input_lock);
}
/*
@@ -1017,18 +1053,19 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
}
/*
- * Handle a new call on a channel implicitly completing the preceding call on
- * that channel.
+ * Handle a new service call on a channel implicitly completing the preceding
+ * call on that channel. This does not apply to client conns.
*
* TODO: If callNumber > call_id + 1, renegotiate security.
*/
-static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
+static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx,
+ struct rxrpc_connection *conn,
struct rxrpc_call *call)
{
switch (READ_ONCE(call->state)) {
case RXRPC_CALL_SERVER_AWAIT_ACK:
rxrpc_call_completed(call);
- break;
+ /* Fall through */
case RXRPC_CALL_COMPLETE:
break;
default:
@@ -1036,11 +1073,13 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call);
}
+ trace_rxrpc_improper_term(call);
break;
}
- trace_rxrpc_improper_term(call);
+ spin_lock(&rx->incoming_lock);
__rxrpc_disconnect_call(conn, call);
+ spin_unlock(&rx->incoming_lock);
rxrpc_notify_socket(call);
}
@@ -1119,43 +1158,29 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
* The socket is locked by the caller and this prevents the socket from being
* shut down and the local endpoint from going away, thus sk_user_data will not
* be cleared until this function returns.
+ *
+ * Called with the RCU read lock held from the IP layer via UDP.
*/
-void rxrpc_data_ready(struct sock *udp_sk)
+int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
{
struct rxrpc_connection *conn;
struct rxrpc_channel *chan;
- struct rxrpc_call *call;
+ struct rxrpc_call *call = NULL;
struct rxrpc_skb_priv *sp;
struct rxrpc_local *local = udp_sk->sk_user_data;
- struct sk_buff *skb;
+ struct rxrpc_peer *peer = NULL;
+ struct rxrpc_sock *rx = NULL;
unsigned int channel;
- int ret, skew;
+ int skew = 0;
_enter("%p", udp_sk);
- ASSERT(!irqs_disabled());
-
- skb = skb_recv_udp(udp_sk, 0, 1, &ret);
- if (!skb) {
- if (ret == -EAGAIN)
- return;
- _debug("UDP socket error %d", ret);
- return;
- }
+ if (skb->tstamp == 0)
+ skb->tstamp = ktime_get_real();
rxrpc_new_skb(skb, rxrpc_skb_rx_received);
- _net("recv skb %p", skb);
-
- /* we'll probably need to checksum it (didn't call sock_recvmsg) */
- if (skb_checksum_complete(skb)) {
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
- __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
- _leave(" [CSUM failed]");
- return;
- }
-
- __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
+ skb_pull(skb, sizeof(struct udphdr));
/* The UDP protocol already released all skb resources;
* we are free to add our own data there.
@@ -1170,69 +1195,104 @@ void rxrpc_data_ready(struct sock *udp_sk)
static int lose;
if ((lose++ & 7) == 7) {
trace_rxrpc_rx_lose(sp);
- rxrpc_lose_skb(skb, rxrpc_skb_rx_lost);
- return;
+ rxrpc_free_skb(skb, rxrpc_skb_rx_lost);
+ return 0;
}
}
+ if (skb->tstamp == 0)
+ skb->tstamp = ktime_get_real();
trace_rxrpc_rx_packet(sp);
- _net("Rx RxRPC %s ep=%x call=%x:%x",
- sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
- sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
-
- if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
- !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
- _proto("Rx Bad Packet Type %u", sp->hdr.type);
- goto bad_message;
- }
-
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_VERSION:
- if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
+ if (rxrpc_to_client(sp))
goto discard;
rxrpc_post_packet_to_local(local, skb);
goto out;
case RXRPC_PACKET_TYPE_BUSY:
- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
+ if (rxrpc_to_server(sp))
goto discard;
/* Fall through */
+ case RXRPC_PACKET_TYPE_ACK:
+ case RXRPC_PACKET_TYPE_ACKALL:
+ if (sp->hdr.callNumber == 0)
+ goto bad_message;
+ /* Fall through */
+ case RXRPC_PACKET_TYPE_ABORT:
+ break;
case RXRPC_PACKET_TYPE_DATA:
- if (sp->hdr.callNumber == 0)
+ if (sp->hdr.callNumber == 0 ||
+ sp->hdr.seq == 0)
goto bad_message;
if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
!rxrpc_validate_jumbo(skb))
goto bad_message;
break;
+ case RXRPC_PACKET_TYPE_CHALLENGE:
+ if (rxrpc_to_server(sp))
+ goto discard;
+ break;
+ case RXRPC_PACKET_TYPE_RESPONSE:
+ if (rxrpc_to_client(sp))
+ goto discard;
+ break;
+
/* Packet types 9-11 should just be ignored. */
case RXRPC_PACKET_TYPE_PARAMS:
case RXRPC_PACKET_TYPE_10:
case RXRPC_PACKET_TYPE_11:
goto discard;
+
+ default:
+ _proto("Rx Bad Packet Type %u", sp->hdr.type);
+ goto bad_message;
}
- rcu_read_lock();
+ if (sp->hdr.serviceId == 0)
+ goto bad_message;
+
+ if (rxrpc_to_server(sp)) {
+ /* Weed out packets to services we're not offering. Packets
+ * that would begin a call are explicitly rejected and the rest
+ * are just discarded.
+ */
+ rx = rcu_dereference(local->service);
+ if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
+ sp->hdr.serviceId != rx->second_service)) {
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+ sp->hdr.seq == 1)
+ goto unsupported_service;
+ goto discard;
+ }
+ }
- conn = rxrpc_find_connection_rcu(local, skb);
+ conn = rxrpc_find_connection_rcu(local, skb, &peer);
if (conn) {
if (sp->hdr.securityIndex != conn->security_ix)
goto wrong_security;
if (sp->hdr.serviceId != conn->service_id) {
- if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) ||
- conn->service_id != conn->params.service_id)
+ int old_id;
+
+ if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
+ goto reupgrade;
+ old_id = cmpxchg(&conn->service_id, conn->params.service_id,
+ sp->hdr.serviceId);
+
+ if (old_id != conn->params.service_id &&
+ old_id != sp->hdr.serviceId)
goto reupgrade;
- conn->service_id = sp->hdr.serviceId;
}
if (sp->hdr.callNumber == 0) {
/* Connection-level packet */
_debug("CONN %p {%d}", conn, conn->debug_id);
rxrpc_post_packet_to_conn(conn, skb);
- goto out_unlock;
+ goto out;
}
/* Note the serial number skew here */
@@ -1251,19 +1311,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
/* Ignore really old calls */
if (sp->hdr.callNumber < chan->last_call)
- goto discard_unlock;
+ goto discard;
if (sp->hdr.callNumber == chan->last_call) {
if (chan->call ||
sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
- goto discard_unlock;
+ goto discard;
/* For the previous service call, if completed
* successfully, we discard all further packets.
*/
if (rxrpc_conn_is_service(conn) &&
chan->last_type == RXRPC_PACKET_TYPE_ACK)
- goto discard_unlock;
+ goto discard;
/* But otherwise we need to retransmit the final packet
* from data cached in the connection record.
@@ -1274,18 +1334,16 @@ void rxrpc_data_ready(struct sock *udp_sk)
sp->hdr.serial,
sp->hdr.flags, 0);
rxrpc_post_packet_to_conn(conn, skb);
- goto out_unlock;
+ goto out;
}
call = rcu_dereference(chan->call);
if (sp->hdr.callNumber > chan->call_id) {
- if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) {
- rcu_read_unlock();
+ if (rxrpc_to_client(sp))
goto reject_packet;
- }
if (call)
- rxrpc_input_implicit_end_call(conn, call);
+ rxrpc_input_implicit_end_call(rx, conn, call);
call = NULL;
}
@@ -1297,66 +1355,57 @@ void rxrpc_data_ready(struct sock *udp_sk)
if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
}
- } else {
- skew = 0;
- call = NULL;
}
if (!call || atomic_read(&call->usage) == 0) {
- if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
- sp->hdr.callNumber == 0 ||
+ if (rxrpc_to_client(sp) ||
sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
- goto bad_message_unlock;
+ goto bad_message;
if (sp->hdr.seq != 1)
- goto discard_unlock;
- call = rxrpc_new_incoming_call(local, conn, skb);
- if (!call) {
- rcu_read_unlock();
+ goto discard;
+ call = rxrpc_new_incoming_call(local, rx, skb);
+ if (!call)
goto reject_packet;
- }
rxrpc_send_ping(call, skb, skew);
mutex_unlock(&call->user_mutex);
}
rxrpc_input_call_packet(call, skb, skew);
- goto discard_unlock;
+ goto discard;
-discard_unlock:
- rcu_read_unlock();
discard:
rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
out:
trace_rxrpc_rx_done(0, 0);
- return;
-
-out_unlock:
- rcu_read_unlock();
- goto out;
+ return 0;
wrong_security:
- rcu_read_unlock();
trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RXKADINCONSISTENCY, EBADMSG);
skb->priority = RXKADINCONSISTENCY;
goto post_abort;
+unsupported_service:
+ trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ RX_INVALID_OPERATION, EOPNOTSUPP);
+ skb->priority = RX_INVALID_OPERATION;
+ goto post_abort;
+
reupgrade:
- rcu_read_unlock();
trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG);
goto protocol_error;
-bad_message_unlock:
- rcu_read_unlock();
bad_message:
trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG);
protocol_error:
skb->priority = RX_PROTOCOL_ERROR;
post_abort:
- skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
reject_packet:
trace_rxrpc_rx_done(skb->mark, skb->priority);
rxrpc_reject_packet(local, skb);
_leave(" [badmsg]");
+ return 0;
}
diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c
index 13bd8a4dfac7..927ead43df42 100644
--- a/net/rxrpc/local_event.c
+++ b/net/rxrpc/local_event.c
@@ -39,7 +39,7 @@ static void rxrpc_send_version_request(struct rxrpc_local *local,
_enter("");
- if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
+ if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
return;
msg.msg_name = &srx.transport;
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 777c3ed4cfc0..0906e51d3cfb 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -19,6 +19,7 @@
#include <linux/ip.h>
#include <linux/hashtable.h>
#include <net/sock.h>
+#include <net/udp.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
@@ -108,7 +109,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
*/
static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
{
- struct sock *sock;
+ struct sock *usk;
int ret, opt;
_enter("%p{%d,%d}",
@@ -122,6 +123,28 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
return ret;
}
+ /* set the socket up */
+ usk = local->socket->sk;
+ inet_sk(usk)->mc_loop = 0;
+
+ /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
+ inet_inc_convert_csum(usk);
+
+ rcu_assign_sk_user_data(usk, local);
+
+ udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC;
+ udp_sk(usk)->encap_rcv = rxrpc_input_packet;
+ udp_sk(usk)->encap_destroy = NULL;
+ udp_sk(usk)->gro_receive = NULL;
+ udp_sk(usk)->gro_complete = NULL;
+
+ udp_encap_enable();
+#if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
+ if (local->srx.transport.family == AF_INET6)
+ udpv6_encap_enable();
+#endif
+ usk->sk_error_report = rxrpc_error_report;
+
/* if a local address was supplied then bind it */
if (local->srx.transport_len > sizeof(sa_family_t)) {
_debug("bind");
@@ -135,10 +158,10 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
}
switch (local->srx.transport.family) {
- case AF_INET:
- /* we want to receive ICMP errors */
+ case AF_INET6:
+ /* we want to receive ICMPv6 errors */
opt = 1;
- ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
+ ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
(char *) &opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
@@ -146,19 +169,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
}
/* we want to set the don't fragment bit */
- opt = IP_PMTUDISC_DO;
- ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
+ opt = IPV6_PMTUDISC_DO;
+ ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
(char *) &opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
goto error;
}
- break;
- case AF_INET6:
+ /* Fall through and set IPv4 options too otherwise we don't get
+ * errors from IPv4 packets sent through the IPv6 socket.
+ */
+
+ case AF_INET:
/* we want to receive ICMP errors */
opt = 1;
- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
+ ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
(char *) &opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
@@ -166,24 +192,28 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
}
/* we want to set the don't fragment bit */
- opt = IPV6_PMTUDISC_DO;
- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
+ opt = IP_PMTUDISC_DO;
+ ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
(char *) &opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
goto error;
}
+
+ /* We want receive timestamps. */
+ opt = 1;
+ ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
+ (char *)&opt, sizeof(opt));
+ if (ret < 0) {
+ _debug("setsockopt failed");
+ goto error;
+ }
break;
default:
BUG();
}
- /* set the socket up */
- sock = local->socket->sk;
- sock->sk_user_data = local;
- sock->sk_data_ready = rxrpc_data_ready;
- sock->sk_error_report = rxrpc_error_report;
_leave(" = 0");
return 0;
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index 417d80867c4f..fd7eba8467fa 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -102,6 +102,9 @@ static __net_init int rxrpc_init_net(struct net *net)
proc_create_net("conns", 0444, rxnet->proc_net,
&rxrpc_connection_seq_ops,
sizeof(struct seq_net_private));
+ proc_create_net("peers", 0444, rxnet->proc_net,
+ &rxrpc_peer_seq_ops,
+ sizeof(struct seq_net_private));
return 0;
err_proc:
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index ccf5de160444..189418888839 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -124,7 +124,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
struct kvec iov[2];
rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top;
- ktime_t now;
size_t len, n;
int ret;
u8 reason;
@@ -196,9 +195,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
/* We need to stick a time in before we send the packet in case
* the reply gets back before kernel_sendmsg() completes - but
* asking UDP to send the packet can take a relatively long
- * time, so we update the time after, on the assumption that
- * the packet transmission is more likely to happen towards the
- * end of the kernel_sendmsg() call.
+ * time.
*/
call->ping_time = ktime_get_real();
set_bit(RXRPC_CALL_PINGING, &call->flags);
@@ -206,9 +203,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
}
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
- now = ktime_get_real();
- if (ping)
- call->ping_time = now;
conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
@@ -363,8 +357,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
/* If our RTT cache needs working on, request an ACK. Also request
* ACKs if a DATA packet appears to have been lost.
+ *
+ * However, we mustn't request an ACK on the last reply packet of a
+ * service call, lest OpenAFS incorrectly send us an ACK with some
+ * soft-ACKs in it and then never follow up with a proper hard ACK.
*/
- if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
+ if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
+ rxrpc_to_server(sp)
+ ) &&
(test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
retrans ||
call->cong_mode == RXRPC_CALL_SLOW_START ||
@@ -378,11 +378,13 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
if ((lose++ & 7) == 7) {
ret = 0;
lost = true;
- goto done;
}
}
- _proto("Tx DATA %%%u { #%u }", serial, sp->hdr.seq);
+ trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
+ retrans, lost);
+ if (lost)
+ goto done;
/* send the packet with the don't fragment bit set if we currently
* think it's small enough */
@@ -390,6 +392,11 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
goto send_fragmentable;
down_read(&conn->params.local->defrag_sem);
+
+ sp->hdr.serial = serial;
+ smp_wmb(); /* Set serial before timestamp */
+ skb->tstamp = ktime_get_real();
+
/* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet
* to go out of the interface
@@ -410,15 +417,9 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
goto send_fragmentable;
done:
- trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
- retrans, lost);
if (ret >= 0) {
- ktime_t now = ktime_get_real();
- skb->tstamp = now;
- smp_wmb();
- sp->hdr.serial = serial;
if (whdr.flags & RXRPC_REQUEST_ACK) {
- call->peer->rtt_last_req = now;
+ call->peer->rtt_last_req = skb->tstamp;
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
if (call->peer->rtt_usage > 1) {
unsigned long nowj = jiffies, ack_lost_at;
@@ -457,6 +458,10 @@ send_fragmentable:
down_write(&conn->params.local->defrag_sem);
+ sp->hdr.serial = serial;
+ smp_wmb(); /* Set serial before timestamp */
+ skb->tstamp = ktime_get_real();
+
switch (conn->params.local->srx.transport.family) {
case AF_INET:
opt = IP_PMTUDISC_DONT;
@@ -519,7 +524,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
struct kvec iov[2];
size_t size;
__be32 code;
- int ret;
+ int ret, ioc;
_enter("%d", local->debug_id);
@@ -527,7 +532,6 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
iov[0].iov_len = sizeof(whdr);
iov[1].iov_base = &code;
iov[1].iov_len = sizeof(code);
- size = sizeof(whdr) + sizeof(code);
msg.msg_name = &srx.transport;
msg.msg_control = NULL;
@@ -535,16 +539,30 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
msg.msg_flags = 0;
memset(&whdr, 0, sizeof(whdr));
- whdr.type = RXRPC_PACKET_TYPE_ABORT;
while ((skb = skb_dequeue(&local->reject_queue))) {
rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
sp = rxrpc_skb(skb);
- if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) {
- msg.msg_namelen = srx.transport_len;
-
+ switch (skb->mark) {
+ case RXRPC_SKB_MARK_REJECT_BUSY:
+ whdr.type = RXRPC_PACKET_TYPE_BUSY;
+ size = sizeof(whdr);
+ ioc = 1;
+ break;
+ case RXRPC_SKB_MARK_REJECT_ABORT:
+ whdr.type = RXRPC_PACKET_TYPE_ABORT;
code = htonl(skb->priority);
+ size = sizeof(whdr) + sizeof(code);
+ ioc = 2;
+ break;
+ default:
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ continue;
+ }
+
+ if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
+ msg.msg_namelen = srx.transport_len;
whdr.epoch = htonl(sp->hdr.epoch);
whdr.cid = htonl(sp->hdr.cid);
@@ -554,7 +572,8 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
whdr.flags ^= RXRPC_CLIENT_INITIATED;
whdr.flags &= RXRPC_CLIENT_INITIATED;
- ret = kernel_sendmsg(local->socket, &msg, iov, 2, size);
+ ret = kernel_sendmsg(local->socket, &msg,
+ iov, ioc, size);
if (ret < 0)
trace_rxrpc_tx_fail(local->debug_id, 0, ret,
rxrpc_tx_point_reject);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 4f9da2f51c69..bc05af89fc38 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -23,6 +23,8 @@
#include "ar-internal.h"
static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
+static void rxrpc_distribute_error(struct rxrpc_peer *, int,
+ enum rxrpc_call_completion);
/*
* Find the peer associated with an ICMP packet.
@@ -45,6 +47,8 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
*/
switch (srx->transport.family) {
case AF_INET:
+ srx->transport_len = sizeof(srx->transport.sin);
+ srx->transport.family = AF_INET;
srx->transport.sin.sin_port = serr->port;
switch (serr->ee.ee_origin) {
case SO_EE_ORIGIN_ICMP:
@@ -68,20 +72,20 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
- srx->transport.sin6.sin6_port = serr->port;
switch (serr->ee.ee_origin) {
case SO_EE_ORIGIN_ICMP6:
_net("Rx ICMP6");
+ srx->transport.sin6.sin6_port = serr->port;
memcpy(&srx->transport.sin6.sin6_addr,
skb_network_header(skb) + serr->addr_offset,
sizeof(struct in6_addr));
break;
case SO_EE_ORIGIN_ICMP:
_net("Rx ICMP on v6 sock");
- srx->transport.sin6.sin6_addr.s6_addr32[0] = 0;
- srx->transport.sin6.sin6_addr.s6_addr32[1] = 0;
- srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
- memcpy(srx->transport.sin6.sin6_addr.s6_addr + 12,
+ srx->transport_len = sizeof(srx->transport.sin);
+ srx->transport.family = AF_INET;
+ srx->transport.sin.sin_port = serr->port;
+ memcpy(&srx->transport.sin.sin_addr,
skb_network_header(skb) + serr->addr_offset,
sizeof(struct in_addr));
break;
@@ -193,9 +197,8 @@ void rxrpc_error_report(struct sock *sk)
rxrpc_store_error(peer, serr);
rcu_read_unlock();
rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_put_peer(peer);
- /* The ref we obtained is passed off to the work item */
- __rxrpc_queue_peer_error(peer);
_leave("");
}
@@ -205,6 +208,7 @@ void rxrpc_error_report(struct sock *sk)
static void rxrpc_store_error(struct rxrpc_peer *peer,
struct sock_exterr_skb *serr)
{
+ enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
struct sock_extended_err *ee;
int err;
@@ -255,7 +259,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
case SO_EE_ORIGIN_NONE:
case SO_EE_ORIGIN_LOCAL:
_proto("Rx Received local error { error=%d }", err);
- err += RXRPC_LOCAL_ERROR_OFFSET;
+ compl = RXRPC_CALL_LOCAL_ERROR;
break;
case SO_EE_ORIGIN_ICMP6:
@@ -264,48 +268,23 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
break;
}
- peer->error_report = err;
+ rxrpc_distribute_error(peer, err, compl);
}
/*
- * Distribute an error that occurred on a peer
+ * Distribute an error that occurred on a peer.
*/
-void rxrpc_peer_error_distributor(struct work_struct *work)
+static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
+ enum rxrpc_call_completion compl)
{
- struct rxrpc_peer *peer =
- container_of(work, struct rxrpc_peer, error_distributor);
struct rxrpc_call *call;
- enum rxrpc_call_completion compl;
- int error;
-
- _enter("");
-
- error = READ_ONCE(peer->error_report);
- if (error < RXRPC_LOCAL_ERROR_OFFSET) {
- compl = RXRPC_CALL_NETWORK_ERROR;
- } else {
- compl = RXRPC_CALL_LOCAL_ERROR;
- error -= RXRPC_LOCAL_ERROR_OFFSET;
- }
- _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error);
-
- spin_lock_bh(&peer->lock);
-
- while (!hlist_empty(&peer->error_targets)) {
- call = hlist_entry(peer->error_targets.first,
- struct rxrpc_call, error_link);
- hlist_del_init(&call->error_link);
+ hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
rxrpc_see_call(call);
-
- if (rxrpc_set_call_completion(call, compl, 0, -error))
+ if (call->state < RXRPC_CALL_COMPLETE &&
+ rxrpc_set_call_completion(call, compl, 0, -error))
rxrpc_notify_socket(call);
}
-
- spin_unlock_bh(&peer->lock);
-
- rxrpc_put_peer(peer);
- _leave("");
}
/*
@@ -325,6 +304,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
if (rtt < 0)
return;
+ spin_lock(&peer->rtt_input_lock);
+
/* Replace the oldest datum in the RTT buffer */
sum -= peer->rtt_cache[cursor];
sum += rtt;
@@ -336,6 +317,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
peer->rtt_usage = usage;
}
+ spin_unlock(&peer->rtt_input_lock);
+
/* Now recalculate the average */
if (usage == RXRPC_RTT_CACHE_SIZE) {
avg = sum / RXRPC_RTT_CACHE_SIZE;
@@ -344,6 +327,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
do_div(avg, usage);
}
+ /* Don't need to update this under lock */
peer->rtt = avg;
trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
usage, avg);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 1dc7648e3eff..5691b7d266ca 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -124,11 +124,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
struct rxrpc_net *rxnet = local->rxnet;
hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
- if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) {
- if (atomic_read(&peer->usage) == 0)
- return NULL;
+ if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
+ atomic_read(&peer->usage) > 0)
return peer;
- }
}
return NULL;
@@ -155,8 +153,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
* assess the MTU size for the network interface through which this peer is
* reached
*/
-static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
+static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
+ struct rxrpc_peer *peer)
{
+ struct net *net = sock_net(&rx->sk);
struct dst_entry *dst;
struct rtable *rt;
struct flowi fl;
@@ -171,7 +171,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
switch (peer->srx.transport.family) {
case AF_INET:
rt = ip_route_output_ports(
- &init_net, fl4, NULL,
+ net, fl4, NULL,
peer->srx.transport.sin.sin_addr.s_addr, 0,
htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
if (IS_ERR(rt)) {
@@ -190,7 +190,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
sizeof(struct in6_addr));
fl6->fl6_dport = htons(7001);
fl6->fl6_sport = htons(7000);
- dst = ip6_route_output(&init_net, NULL, fl6);
+ dst = ip6_route_output(net, NULL, fl6);
if (dst->error) {
_leave(" [route err %d]", dst->error);
return;
@@ -222,11 +222,10 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
atomic_set(&peer->usage, 1);
peer->local = local;
INIT_HLIST_HEAD(&peer->error_targets);
- INIT_WORK(&peer->error_distributor,
- &rxrpc_peer_error_distributor);
peer->service_conns = RB_ROOT;
seqlock_init(&peer->service_conn_lock);
spin_lock_init(&peer->lock);
+ spin_lock_init(&peer->rtt_input_lock);
peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
if (RXRPC_TX_SMSS > 2190)
@@ -244,10 +243,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
/*
* Initialise peer record.
*/
-static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
+static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
+ unsigned long hash_key)
{
peer->hash_key = hash_key;
- rxrpc_assess_MTU_size(peer);
+ rxrpc_assess_MTU_size(rx, peer);
peer->mtu = peer->if_mtu;
peer->rtt_last_req = ktime_get_real();
@@ -279,7 +279,8 @@ static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
/*
* Set up a new peer.
*/
-static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
+static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
+ struct rxrpc_local *local,
struct sockaddr_rxrpc *srx,
unsigned long hash_key,
gfp_t gfp)
@@ -291,7 +292,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
peer = rxrpc_alloc_peer(local, gfp);
if (peer) {
memcpy(&peer->srx, srx, sizeof(*srx));
- rxrpc_init_peer(peer, hash_key);
+ rxrpc_init_peer(rx, peer, hash_key);
}
_leave(" = %p", peer);
@@ -299,40 +300,31 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
}
/*
- * Set up a new incoming peer. The address is prestored in the preallocated
- * peer.
+ * Set up a new incoming peer. There shouldn't be any other matching peers
+ * since we've already done a search in the list from the non-reentrant context
+ * (the data_ready handler) that is the only place we can add new peers.
*/
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
- struct rxrpc_peer *prealloc)
+void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
+ struct rxrpc_peer *peer)
{
- struct rxrpc_peer *peer;
struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key;
- hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
- prealloc->local = local;
- rxrpc_init_peer(prealloc, hash_key);
+ hash_key = rxrpc_peer_hash_key(local, &peer->srx);
+ peer->local = local;
+ rxrpc_init_peer(rx, peer, hash_key);
spin_lock(&rxnet->peer_hash_lock);
-
- /* Need to check that we aren't racing with someone else */
- peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
- if (peer && !rxrpc_get_peer_maybe(peer))
- peer = NULL;
- if (!peer) {
- peer = prealloc;
- hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
- list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
- }
-
+ hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
+ list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
spin_unlock(&rxnet->peer_hash_lock);
- return peer;
}
/*
* obtain a remote transport endpoint for the specified address
*/
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
+ struct rxrpc_local *local,
struct sockaddr_rxrpc *srx, gfp_t gfp)
{
struct rxrpc_peer *peer, *candidate;
@@ -352,7 +344,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
/* The peer is not yet present in hash - create a candidate
* for a new record and then redo the search.
*/
- candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
+ candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
if (!candidate) {
_leave(" = NULL [nomem]");
return NULL;
@@ -416,21 +408,6 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
}
/*
- * Queue a peer record. This passes the caller's ref to the workqueue.
- */
-void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
-{
- const void *here = __builtin_return_address(0);
- int n;
-
- n = atomic_read(&peer->usage);
- if (rxrpc_queue_work(&peer->error_distributor))
- trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
- else
- rxrpc_put_peer(peer);
-}
-
-/*
* Discard a peer record.
*/
static void __rxrpc_put_peer(struct rxrpc_peer *peer)
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 9805e3b85c36..c7d976859d40 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -212,3 +212,129 @@ const struct seq_operations rxrpc_connection_seq_ops = {
.stop = rxrpc_connection_seq_stop,
.show = rxrpc_connection_seq_show,
};
+
+/*
+ * generate a list of extant virtual peers in /proc/net/rxrpc/peers
+ */
+static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
+{
+ struct rxrpc_peer *peer;
+ time64_t now;
+ char lbuff[50], rbuff[50];
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq,
+ "Proto Local "
+ " Remote "
+ " Use CW MTU LastUse RTT Rc\n"
+ );
+ return 0;
+ }
+
+ peer = list_entry(v, struct rxrpc_peer, hash_link);
+
+ sprintf(lbuff, "%pISpc", &peer->local->srx.transport);
+
+ sprintf(rbuff, "%pISpc", &peer->srx.transport);
+
+ now = ktime_get_seconds();
+ seq_printf(seq,
+ "UDP %-47.47s %-47.47s %3u"
+ " %3u %5u %6llus %12llu %2u\n",
+ lbuff,
+ rbuff,
+ atomic_read(&peer->usage),
+ peer->cong_cwnd,
+ peer->mtu,
+ now - peer->last_tx_at,
+ peer->rtt,
+ peer->rtt_cursor);
+
+ return 0;
+}
+
+static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos)
+ __acquires(rcu)
+{
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+ unsigned int bucket, n;
+ unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
+ void *p;
+
+ rcu_read_lock();
+
+ if (*_pos >= UINT_MAX)
+ return NULL;
+
+ n = *_pos & ((1U << shift) - 1);
+ bucket = *_pos >> shift;
+ for (;;) {
+ if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
+ *_pos = UINT_MAX;
+ return NULL;
+ }
+ if (n == 0) {
+ if (bucket == 0)
+ return SEQ_START_TOKEN;
+ *_pos += 1;
+ n++;
+ }
+
+ p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
+ if (p)
+ return p;
+ bucket++;
+ n = 1;
+ *_pos = (bucket << shift) | n;
+ }
+}
+
+static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
+{
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+ unsigned int bucket, n;
+ unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
+ void *p;
+
+ if (*_pos >= UINT_MAX)
+ return NULL;
+
+ bucket = *_pos >> shift;
+
+ p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos);
+ if (p)
+ return p;
+
+ for (;;) {
+ bucket++;
+ n = 1;
+ *_pos = (bucket << shift) | n;
+
+ if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
+ *_pos = UINT_MAX;
+ return NULL;
+ }
+ if (n == 0) {
+ *_pos += 1;
+ n++;
+ }
+
+ p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
+ if (p)
+ return p;
+ }
+}
+
+static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v)
+ __releases(rcu)
+{
+ rcu_read_unlock();
+}
+
+
+const struct seq_operations rxrpc_peer_seq_ops = {
+ .start = rxrpc_peer_seq_start,
+ .next = rxrpc_peer_seq_next,
+ .stop = rxrpc_peer_seq_stop,
+ .show = rxrpc_peer_seq_show,
+};
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index 93da73bf7098..f9cb83c938f3 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -50,7 +50,6 @@ struct rxrpc_wire_header {
#define RXRPC_PACKET_TYPE_10 10 /* Ignored */
#define RXRPC_PACKET_TYPE_11 11 /* Ignored */
#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */
-#define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */
uint8_t flags; /* packet flags */
#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */
@@ -72,20 +71,6 @@ struct rxrpc_wire_header {
} __packed;
-#define RXRPC_SUPPORTED_PACKET_TYPES ( \
- (1 << RXRPC_PACKET_TYPE_DATA) | \
- (1 << RXRPC_PACKET_TYPE_ACK) | \
- (1 << RXRPC_PACKET_TYPE_BUSY) | \
- (1 << RXRPC_PACKET_TYPE_ABORT) | \
- (1 << RXRPC_PACKET_TYPE_ACKALL) | \
- (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \
- (1 << RXRPC_PACKET_TYPE_RESPONSE) | \
- /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \
- (1 << RXRPC_PACKET_TYPE_PARAMS) | \
- (1 << RXRPC_PACKET_TYPE_10) | \
- (1 << RXRPC_PACKET_TYPE_11) | \
- (1 << RXRPC_PACKET_TYPE_VERSION))
-
/*****************************************************************************/
/*
* jumbo packet secondary header
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 816b19a78809..eaf19ebaa964 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -715,3 +715,46 @@ call_complete:
goto out;
}
EXPORT_SYMBOL(rxrpc_kernel_recv_data);
+
+/**
+ * rxrpc_kernel_get_reply_time - Get timestamp on first reply packet
+ * @sock: The socket that the call exists on
+ * @call: The call to query
+ * @_ts: Where to put the timestamp
+ *
+ * Retrieve the timestamp from the first DATA packet of the reply if it is
+ * in the ring. Returns true if successful, false if not.
+ */
+bool rxrpc_kernel_get_reply_time(struct socket *sock, struct rxrpc_call *call,
+ ktime_t *_ts)
+{
+ struct sk_buff *skb;
+ rxrpc_seq_t hard_ack, top, seq;
+ bool success = false;
+
+ mutex_lock(&call->user_mutex);
+
+ if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_RECV_REPLY)
+ goto out;
+
+ hard_ack = call->rx_hard_ack;
+ if (hard_ack != 0)
+ goto out;
+
+ seq = hard_ack + 1;
+ top = smp_load_acquire(&call->rx_top);
+ if (after(seq, top))
+ goto out;
+
+ skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK];
+ if (!skb)
+ goto out;
+
+ *_ts = skb_get_ktime(skb);
+ success = true;
+
+out:
+ mutex_unlock(&call->user_mutex);
+ return success;
+}
+EXPORT_SYMBOL(rxrpc_kernel_get_reply_time);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index cea16838d588..cbef9ea43dec 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -46,7 +46,7 @@ struct rxkad_level2_hdr {
* alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
* packets
*/
-static struct crypto_skcipher *rxkad_ci;
+static struct crypto_sync_skcipher *rxkad_ci;
static DEFINE_MUTEX(rxkad_ci_mutex);
/*
@@ -54,7 +54,7 @@ static DEFINE_MUTEX(rxkad_ci_mutex);
*/
static int rxkad_init_connection_security(struct rxrpc_connection *conn)
{
- struct crypto_skcipher *ci;
+ struct crypto_sync_skcipher *ci;
struct rxrpc_key_token *token;
int ret;
@@ -63,14 +63,14 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
token = conn->params.key->payload.data[0];
conn->security_ix = token->security_index;
- ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+ ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0);
if (IS_ERR(ci)) {
_debug("no cipher");
ret = PTR_ERR(ci);
goto error;
}
- if (crypto_skcipher_setkey(ci, token->kad->session_key,
+ if (crypto_sync_skcipher_setkey(ci, token->kad->session_key,
sizeof(token->kad->session_key)) < 0)
BUG();
@@ -104,7 +104,7 @@ error:
static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
{
struct rxrpc_key_token *token;
- SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
struct scatterlist sg;
struct rxrpc_crypt iv;
__be32 *tmpbuf;
@@ -128,7 +128,7 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
tmpbuf[3] = htonl(conn->security_ix);
sg_init_one(&sg, tmpbuf, tmpsize);
- skcipher_request_set_tfm(req, conn->cipher);
+ skcipher_request_set_sync_tfm(req, conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x);
crypto_skcipher_encrypt(req);
@@ -167,7 +167,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
memset(&iv, 0, sizeof(iv));
sg_init_one(&sg, sechdr, 8);
- skcipher_request_set_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
crypto_skcipher_encrypt(req);
@@ -212,7 +212,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
memcpy(&iv, token->kad->session_key, sizeof(iv));
sg_init_one(&sg[0], sechdr, sizeof(rxkhdr));
- skcipher_request_set_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg[0], &sg[0], sizeof(rxkhdr), iv.x);
crypto_skcipher_encrypt(req);
@@ -250,7 +250,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
void *sechdr)
{
struct rxrpc_skb_priv *sp;
- SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist sg;
u32 x, y;
@@ -279,7 +279,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
call->crypto_buf[1] = htonl(x);
sg_init_one(&sg, call->crypto_buf, 8);
- skcipher_request_set_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
crypto_skcipher_encrypt(req);
@@ -352,7 +352,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
/* start the decryption afresh */
memset(&iv, 0, sizeof(iv));
- skcipher_request_set_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, 8, iv.x);
crypto_skcipher_decrypt(req);
@@ -450,7 +450,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
token = call->conn->params.key->payload.data[0];
memcpy(&iv, token->kad->session_key, sizeof(iv));
- skcipher_request_set_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, len, iv.x);
crypto_skcipher_decrypt(req);
@@ -506,7 +506,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
unsigned int offset, unsigned int len,
rxrpc_seq_t seq, u16 expected_cksum)
{
- SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist sg;
bool aborted;
@@ -529,7 +529,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
call->crypto_buf[1] = htonl(x);
sg_init_one(&sg, call->crypto_buf, 8);
- skcipher_request_set_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
crypto_skcipher_encrypt(req);
@@ -755,7 +755,7 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn,
struct rxkad_response *resp,
const struct rxkad_key *s2)
{
- SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist sg[1];
@@ -764,7 +764,7 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn,
sg_init_table(sg, 1);
sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted));
- skcipher_request_set_tfm(req, conn->cipher);
+ skcipher_request_set_sync_tfm(req, conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
crypto_skcipher_encrypt(req);
@@ -1021,7 +1021,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
struct rxkad_response *resp,
const struct rxrpc_crypt *session_key)
{
- SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci);
struct scatterlist sg[1];
struct rxrpc_crypt iv;
@@ -1031,7 +1031,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
ASSERT(rxkad_ci != NULL);
mutex_lock(&rxkad_ci_mutex);
- if (crypto_skcipher_setkey(rxkad_ci, session_key->x,
+ if (crypto_sync_skcipher_setkey(rxkad_ci, session_key->x,
sizeof(*session_key)) < 0)
BUG();
@@ -1039,7 +1039,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
sg_init_table(sg, 1);
sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted));
- skcipher_request_set_tfm(req, rxkad_ci);
+ skcipher_request_set_sync_tfm(req, rxkad_ci);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
crypto_skcipher_decrypt(req);
@@ -1218,7 +1218,7 @@ static void rxkad_clear(struct rxrpc_connection *conn)
_enter("");
if (conn->cipher)
- crypto_free_skcipher(conn->cipher);
+ crypto_free_sync_skcipher(conn->cipher);
}
/*
@@ -1228,7 +1228,7 @@ static int rxkad_init(void)
{
/* pin the cipher we need so that the crypto layer doesn't invoke
* keventd to go get it */
- rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+ rxkad_ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0);
return PTR_ERR_OR_ZERO(rxkad_ci);
}
@@ -1238,7 +1238,7 @@ static int rxkad_init(void)
static void rxkad_exit(void)
{
if (rxkad_ci)
- crypto_free_skcipher(rxkad_ci);
+ crypto_free_sync_skcipher(rxkad_ci);
}
/*
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index b8985d01876a..913dca65cc65 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -69,21 +69,6 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
}
/*
- * Note the injected loss of a socket buffer.
- */
-void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
-{
- const void *here = __builtin_return_address(0);
- if (skb) {
- int n;
- CHECK_SLAB_OKAY(&skb->users);
- n = atomic_dec_return(select_skb_count(op));
- trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
- kfree_skb(skb);
- }
-}
-
-/*
* Clear a queue of socket buffers.
*/
void rxrpc_purge_queue(struct sk_buff_head *list)
diff --git a/net/rxrpc/utils.c b/net/rxrpc/utils.c
index e801171fa351..ff7af71c4b49 100644
--- a/net/rxrpc/utils.c
+++ b/net/rxrpc/utils.c
@@ -17,28 +17,17 @@
/*
* Fill out a peer address from a socket buffer containing a packet.
*/
-int rxrpc_extract_addr_from_skb(struct rxrpc_local *local,
- struct sockaddr_rxrpc *srx,
- struct sk_buff *skb)
+int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *srx, struct sk_buff *skb)
{
memset(srx, 0, sizeof(*srx));
switch (ntohs(skb->protocol)) {
case ETH_P_IP:
- if (local->srx.transport.family == AF_INET6) {
- srx->transport_type = SOCK_DGRAM;
- srx->transport_len = sizeof(srx->transport.sin6);
- srx->transport.sin6.sin6_family = AF_INET6;
- srx->transport.sin6.sin6_port = udp_hdr(skb)->source;
- srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
- srx->transport.sin6.sin6_addr.s6_addr32[3] = ip_hdr(skb)->saddr;
- } else {
- srx->transport_type = SOCK_DGRAM;
- srx->transport_len = sizeof(srx->transport.sin);
- srx->transport.sin.sin_family = AF_INET;
- srx->transport.sin.sin_port = udp_hdr(skb)->source;
- srx->transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
- }
+ srx->transport_type = SOCK_DGRAM;
+ srx->transport_len = sizeof(srx->transport.sin);
+ srx->transport.sin.sin_family = AF_INET;
+ srx->transport.sin.sin_port = udp_hdr(skb)->source;
+ srx->transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
return 0;
#ifdef CONFIG_AF_RXRPC_IPV6
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index e95741388311..1b9afdee5ba9 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -194,6 +194,17 @@ config NET_SCH_ETF
To compile this code as a module, choose M here: the
module will be called sch_etf.
+config NET_SCH_TAPRIO
+ tristate "Time Aware Priority (taprio) Scheduler"
+ help
+ Say Y here if you want to use the Time Aware Priority (taprio) packet
+ scheduling algorithm.
+
+ See the top of <file:net/sched/sch_taprio.c> for more details.
+
+ To compile this code as a module, choose M here: the
+ module will be called sch_taprio.
+
config NET_SCH_GRED
tristate "Generic Random Early Detection (GRED)"
---help---
diff --git a/net/sched/Makefile b/net/sched/Makefile
index f0403f49edcb..8a40431d7b5c 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_NET_SCH_HHF) += sch_hhf.o
obj-$(CONFIG_NET_SCH_PIE) += sch_pie.o
obj-$(CONFIG_NET_SCH_CBS) += sch_cbs.o
obj-$(CONFIG_NET_SCH_ETF) += sch_etf.o
+obj-$(CONFIG_NET_SCH_TAPRIO) += sch_taprio.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index e12f8ef7baa4..9c1b0729aebf 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -81,6 +81,7 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
static void free_tcf(struct tc_action *p)
{
free_percpu(p->cpu_bstats);
+ free_percpu(p->cpu_bstats_hw);
free_percpu(p->cpu_qstats);
tcf_set_action_cookie(&p->act_cookie, NULL);
@@ -103,11 +104,11 @@ static int __tcf_action_put(struct tc_action *p, bool bind)
{
struct tcf_idrinfo *idrinfo = p->idrinfo;
- if (refcount_dec_and_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
+ if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
if (bind)
atomic_dec(&p->tcfa_bindcnt);
idr_remove(&idrinfo->action_idr, p->tcfa_index);
- spin_unlock(&idrinfo->lock);
+ mutex_unlock(&idrinfo->lock);
tcf_action_cleanup(p);
return 1;
@@ -199,7 +200,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
struct tc_action *p;
unsigned long id = 1;
- spin_lock(&idrinfo->lock);
+ mutex_lock(&idrinfo->lock);
s_i = cb->args[0];
@@ -234,7 +235,7 @@ done:
if (index >= 0)
cb->args[0] = index + 1;
- spin_unlock(&idrinfo->lock);
+ mutex_unlock(&idrinfo->lock);
if (n_i) {
if (act_flags & TCA_FLAG_LARGE_DUMP_ON)
cb->args[1] = n_i;
@@ -246,6 +247,20 @@ nla_put_failure:
goto done;
}
+static int tcf_idr_release_unsafe(struct tc_action *p)
+{
+ if (atomic_read(&p->tcfa_bindcnt) > 0)
+ return -EPERM;
+
+ if (refcount_dec_and_test(&p->tcfa_refcnt)) {
+ idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
+ tcf_action_cleanup(p);
+ return ACT_P_DELETED;
+ }
+
+ return 0;
+}
+
static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
const struct tc_action_ops *ops)
{
@@ -262,15 +277,19 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
if (nla_put_string(skb, TCA_KIND, ops->kind))
goto nla_put_failure;
+ mutex_lock(&idrinfo->lock);
idr_for_each_entry_ul(idr, p, id) {
- ret = __tcf_idr_release(p, false, true);
+ ret = tcf_idr_release_unsafe(p);
if (ret == ACT_P_DELETED) {
module_put(ops->owner);
n_i++;
} else if (ret < 0) {
+ mutex_unlock(&idrinfo->lock);
goto nla_put_failure;
}
}
+ mutex_unlock(&idrinfo->lock);
+
if (nla_put_u32(skb, TCA_FCNT, n_i))
goto nla_put_failure;
nla_nest_end(skb, nest);
@@ -305,13 +324,13 @@ int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
struct tcf_idrinfo *idrinfo = tn->idrinfo;
struct tc_action *p;
- spin_lock(&idrinfo->lock);
+ mutex_lock(&idrinfo->lock);
p = idr_find(&idrinfo->action_idr, index);
if (IS_ERR(p))
p = NULL;
else if (p)
refcount_inc(&p->tcfa_refcnt);
- spin_unlock(&idrinfo->lock);
+ mutex_unlock(&idrinfo->lock);
if (p) {
*a = p;
@@ -326,10 +345,10 @@ static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
struct tc_action *p;
int ret = 0;
- spin_lock(&idrinfo->lock);
+ mutex_lock(&idrinfo->lock);
p = idr_find(&idrinfo->action_idr, index);
if (!p) {
- spin_unlock(&idrinfo->lock);
+ mutex_unlock(&idrinfo->lock);
return -ENOENT;
}
@@ -339,7 +358,7 @@ static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
WARN_ON(p != idr_remove(&idrinfo->action_idr,
p->tcfa_index));
- spin_unlock(&idrinfo->lock);
+ mutex_unlock(&idrinfo->lock);
tcf_action_cleanup(p);
module_put(owner);
@@ -350,7 +369,7 @@ static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
ret = -EPERM;
}
- spin_unlock(&idrinfo->lock);
+ mutex_unlock(&idrinfo->lock);
return ret;
}
@@ -372,9 +391,12 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
if (!p->cpu_bstats)
goto err1;
+ p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+ if (!p->cpu_bstats_hw)
+ goto err2;
p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
if (!p->cpu_qstats)
- goto err2;
+ goto err3;
}
spin_lock_init(&p->tcfa_lock);
p->tcfa_index = index;
@@ -386,15 +408,17 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
&p->tcfa_rate_est,
&p->tcfa_lock, NULL, est);
if (err)
- goto err3;
+ goto err4;
}
p->idrinfo = idrinfo;
p->ops = ops;
*a = p;
return 0;
-err3:
+err4:
free_percpu(p->cpu_qstats);
+err3:
+ free_percpu(p->cpu_bstats_hw);
err2:
free_percpu(p->cpu_bstats);
err1:
@@ -407,10 +431,10 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
{
struct tcf_idrinfo *idrinfo = tn->idrinfo;
- spin_lock(&idrinfo->lock);
+ mutex_lock(&idrinfo->lock);
/* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
WARN_ON(!IS_ERR(idr_replace(&idrinfo->action_idr, a, a->tcfa_index)));
- spin_unlock(&idrinfo->lock);
+ mutex_unlock(&idrinfo->lock);
}
EXPORT_SYMBOL(tcf_idr_insert);
@@ -420,10 +444,10 @@ void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
{
struct tcf_idrinfo *idrinfo = tn->idrinfo;
- spin_lock(&idrinfo->lock);
+ mutex_lock(&idrinfo->lock);
/* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
- spin_unlock(&idrinfo->lock);
+ mutex_unlock(&idrinfo->lock);
}
EXPORT_SYMBOL(tcf_idr_cleanup);
@@ -441,14 +465,14 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
int ret;
again:
- spin_lock(&idrinfo->lock);
+ mutex_lock(&idrinfo->lock);
if (*index) {
p = idr_find(&idrinfo->action_idr, *index);
if (IS_ERR(p)) {
/* This means that another process allocated
* index but did not assign the pointer yet.
*/
- spin_unlock(&idrinfo->lock);
+ mutex_unlock(&idrinfo->lock);
goto again;
}
@@ -461,7 +485,7 @@ again:
} else {
*a = NULL;
ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
- *index, GFP_ATOMIC);
+ *index, GFP_KERNEL);
if (!ret)
idr_replace(&idrinfo->action_idr,
ERR_PTR(-EBUSY), *index);
@@ -470,12 +494,12 @@ again:
*index = 1;
*a = NULL;
ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
- UINT_MAX, GFP_ATOMIC);
+ UINT_MAX, GFP_KERNEL);
if (!ret)
idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
*index);
}
- spin_unlock(&idrinfo->lock);
+ mutex_unlock(&idrinfo->lock);
return ret;
}
EXPORT_SYMBOL(tcf_idr_check_alloc);
@@ -979,6 +1003,8 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
goto errout;
if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
+ gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw,
+ &p->tcfa_bstats_hw) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
gnet_stats_copy_queue(&d, p->cpu_qstats,
&p->tcfa_qstats,
@@ -1073,12 +1099,14 @@ static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
err = -EINVAL;
ops = tc_lookup_action(tb[TCA_ACT_KIND]);
if (!ops) { /* could happen in batch of actions */
- NL_SET_ERR_MSG(extack, "Specified TC action not found");
+ NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
goto err_out;
}
err = -ENOENT;
- if (ops->lookup(net, &a, index, extack) == 0)
+ if (ops->lookup(net, &a, index) == 0) {
+ NL_SET_ERR_MSG(extack, "TC action with specified index not found");
goto err_mod;
+ }
module_put(ops->owner);
return a;
@@ -1424,7 +1452,7 @@ static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
u32 act_count = 0;
ret = nlmsg_parse(cb->nlh, sizeof(struct tcamsg), tb, TCA_ROOT_MAX,
- tcaa_policy, NULL);
+ tcaa_policy, cb->extack);
if (ret < 0)
return ret;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 0c68bc9cf0b4..c7633843e223 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -387,8 +387,7 @@ static int tcf_bpf_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, bpf_net_id);
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 6f0f273f1139..8475913f2070 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -143,8 +143,10 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
return -EEXIST;
}
/* replacing action and zone */
+ spin_lock_bh(&ci->tcf_lock);
ci->tcf_action = parm->action;
ci->zone = parm->zone;
+ spin_unlock_bh(&ci->tcf_lock);
ret = 0;
}
@@ -156,16 +158,16 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_connmark_info *ci = to_connmark(a);
-
struct tc_connmark opt = {
.index = ci->tcf_index,
.refcnt = refcount_read(&ci->tcf_refcnt) - ref,
.bindcnt = atomic_read(&ci->tcf_bindcnt) - bind,
- .action = ci->tcf_action,
- .zone = ci->zone,
};
struct tcf_t t;
+ spin_lock_bh(&ci->tcf_lock);
+ opt.action = ci->tcf_action;
+ opt.zone = ci->zone;
if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -173,9 +175,12 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t,
TCA_CONNMARK_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&ci->tcf_lock);
return skb->len;
+
nla_put_failure:
+ spin_unlock_bh(&ci->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -190,8 +195,7 @@ static int tcf_connmark_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, connmark_net_id);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index b8a67ae3105a..3dc25b7806d7 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -646,8 +646,7 @@ static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, csum_net_id);
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index cd1d9bd32ef9..b61c20ebb314 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -88,6 +88,11 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
p_parm = nla_data(tb[TCA_GACT_PROB]);
if (p_parm->ptype >= MAX_RAND)
return -EINVAL;
+ if (TC_ACT_EXT_CMP(p_parm->paction, TC_ACT_GOTO_CHAIN)) {
+ NL_SET_ERR_MSG(extack,
+ "goto chain not allowed on fallback");
+ return -EINVAL;
+ }
}
#endif
@@ -157,7 +162,7 @@ static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a,
}
static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
- u64 lastuse)
+ u64 lastuse, bool hw)
{
struct tcf_gact *gact = to_gact(a);
int action = READ_ONCE(gact->tcf_action);
@@ -168,6 +173,10 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
if (action == TC_ACT_SHOT)
this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
+ if (hw)
+ _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats_hw),
+ bytes, packets);
+
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
@@ -222,8 +231,7 @@ static int tcf_gact_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_gact_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_gact_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, gact_net_id);
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 06a3d4801878..30b63fa23ee2 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -855,8 +855,7 @@ static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, ife_net_id);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 23273b5303fd..8af6c11d2482 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -135,7 +135,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
}
td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
- if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
+ if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
if (exists)
tcf_idr_release(*a, bind);
else
@@ -329,8 +329,7 @@ static int tcf_ipt_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, ipt_net_id);
@@ -379,8 +378,7 @@ static int tcf_xt_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, xt_net_id);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 8bf66d0a6800..1dae5f2b358f 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -283,12 +283,15 @@ out:
}
static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
- u64 lastuse)
+ u64 lastuse, bool hw)
{
struct tcf_mirred *m = to_mirred(a);
struct tcf_t *tm = &m->tcf_tm;
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
+ if (hw)
+ _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
+ bytes, packets);
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
@@ -338,8 +341,7 @@ static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, mirred_net_id);
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 4313aa102440..c5c1e23add77 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -256,28 +256,31 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
unsigned char *b = skb_tail_pointer(skb);
struct tcf_nat *p = to_tcf_nat(a);
struct tc_nat opt = {
- .old_addr = p->old_addr,
- .new_addr = p->new_addr,
- .mask = p->mask,
- .flags = p->flags,
-
.index = p->tcf_index,
- .action = p->tcf_action,
.refcnt = refcount_read(&p->tcf_refcnt) - ref,
.bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
};
struct tcf_t t;
+ spin_lock_bh(&p->tcf_lock);
+ opt.old_addr = p->old_addr;
+ opt.new_addr = p->new_addr;
+ opt.mask = p->mask;
+ opt.flags = p->flags;
+ opt.action = p->tcf_action;
+
if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
tcf_tm_dump(&t, &p->tcf_tm);
if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&p->tcf_lock);
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&p->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -292,8 +295,7 @@ static int tcf_nat_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, nat_net_id);
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index ad99a99f11f6..da3dd0f68cc2 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -460,8 +460,7 @@ static int tcf_pedit_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, pedit_net_id);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 5d8bfa878477..052855d47354 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -22,8 +22,7 @@
#include <net/act_api.h>
#include <net/netlink.h>
-struct tcf_police {
- struct tc_action common;
+struct tcf_police_params {
int tcfp_result;
u32 tcfp_ewma_rate;
s64 tcfp_burst;
@@ -36,6 +35,12 @@ struct tcf_police {
bool rate_present;
struct psched_ratecfg peak;
bool peak_present;
+ struct rcu_head rcu;
+};
+
+struct tcf_police {
+ struct tc_action common;
+ struct tcf_police_params __rcu *params;
};
#define to_police(pc) ((struct tcf_police *)pc)
@@ -84,6 +89,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
struct tcf_police *police;
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
struct tc_action_net *tn = net_generic(net, police_net_id);
+ struct tcf_police_params *new;
bool exists = false;
int size;
@@ -110,7 +116,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, parm->index, NULL, a,
- &act_police_ops, bind, false);
+ &act_police_ops, bind, true);
if (ret) {
tcf_idr_cleanup(tn, parm->index);
return ret;
@@ -137,7 +143,8 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
if (est) {
- err = gen_replace_estimator(&police->tcf_bstats, NULL,
+ err = gen_replace_estimator(&police->tcf_bstats,
+ police->common.cpu_bstats,
&police->tcf_rate_est,
&police->tcf_lock,
NULL, est);
@@ -150,50 +157,68 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
goto failure;
}
- spin_lock_bh(&police->tcf_lock);
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (unlikely(!new)) {
+ err = -ENOMEM;
+ goto failure;
+ }
+
/* No failure allowed after this point */
- police->tcfp_mtu = parm->mtu;
- if (police->tcfp_mtu == 0) {
- police->tcfp_mtu = ~0;
+ new->tcfp_mtu = parm->mtu;
+ if (!new->tcfp_mtu) {
+ new->tcfp_mtu = ~0;
if (R_tab)
- police->tcfp_mtu = 255 << R_tab->rate.cell_log;
+ new->tcfp_mtu = 255 << R_tab->rate.cell_log;
}
if (R_tab) {
- police->rate_present = true;
- psched_ratecfg_precompute(&police->rate, &R_tab->rate, 0);
+ new->rate_present = true;
+ psched_ratecfg_precompute(&new->rate, &R_tab->rate, 0);
qdisc_put_rtab(R_tab);
} else {
- police->rate_present = false;
+ new->rate_present = false;
}
if (P_tab) {
- police->peak_present = true;
- psched_ratecfg_precompute(&police->peak, &P_tab->rate, 0);
+ new->peak_present = true;
+ psched_ratecfg_precompute(&new->peak, &P_tab->rate, 0);
qdisc_put_rtab(P_tab);
} else {
- police->peak_present = false;
+ new->peak_present = false;
}
- if (tb[TCA_POLICE_RESULT])
- police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
- police->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
- police->tcfp_toks = police->tcfp_burst;
- if (police->peak_present) {
- police->tcfp_mtu_ptoks = (s64) psched_l2t_ns(&police->peak,
- police->tcfp_mtu);
- police->tcfp_ptoks = police->tcfp_mtu_ptoks;
+ new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
+ new->tcfp_toks = new->tcfp_burst;
+ if (new->peak_present) {
+ new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
+ new->tcfp_mtu);
+ new->tcfp_ptoks = new->tcfp_mtu_ptoks;
}
- police->tcf_action = parm->action;
if (tb[TCA_POLICE_AVRATE])
- police->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
+ new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
+
+ if (tb[TCA_POLICE_RESULT]) {
+ new->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
+ if (TC_ACT_EXT_CMP(new->tcfp_result, TC_ACT_GOTO_CHAIN)) {
+ NL_SET_ERR_MSG(extack,
+ "goto chain not allowed on fallback");
+ err = -EINVAL;
+ goto failure;
+ }
+ }
+ spin_lock_bh(&police->tcf_lock);
+ new->tcfp_t_c = ktime_get_ns();
+ police->tcf_action = parm->action;
+ rcu_swap_protected(police->params,
+ new,
+ lockdep_is_held(&police->tcf_lock));
spin_unlock_bh(&police->tcf_lock);
- if (ret != ACT_P_CREATED)
- return ret;
- police->tcfp_t_c = ktime_get_ns();
- tcf_idr_insert(tn, *a);
+ if (new)
+ kfree_rcu(new, rcu);
+ if (ret == ACT_P_CREATED)
+ tcf_idr_insert(tn, *a);
return ret;
failure:
@@ -207,64 +232,69 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_police *police = to_police(a);
- s64 now;
- s64 toks;
- s64 ptoks = 0;
+ struct tcf_police_params *p;
+ s64 now, toks, ptoks = 0;
+ int ret;
- spin_lock(&police->tcf_lock);
-
- bstats_update(&police->tcf_bstats, skb);
tcf_lastuse_update(&police->tcf_tm);
+ bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb);
+
+ ret = READ_ONCE(police->tcf_action);
+ p = rcu_dereference_bh(police->params);
- if (police->tcfp_ewma_rate) {
+ if (p->tcfp_ewma_rate) {
struct gnet_stats_rate_est64 sample;
if (!gen_estimator_read(&police->tcf_rate_est, &sample) ||
- sample.bps >= police->tcfp_ewma_rate) {
- police->tcf_qstats.overlimits++;
- if (police->tcf_action == TC_ACT_SHOT)
- police->tcf_qstats.drops++;
- spin_unlock(&police->tcf_lock);
- return police->tcf_action;
- }
+ sample.bps >= p->tcfp_ewma_rate)
+ goto inc_overlimits;
}
- if (qdisc_pkt_len(skb) <= police->tcfp_mtu) {
- if (!police->rate_present) {
- spin_unlock(&police->tcf_lock);
- return police->tcfp_result;
+ if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
+ if (!p->rate_present) {
+ ret = p->tcfp_result;
+ goto end;
}
now = ktime_get_ns();
- toks = min_t(s64, now - police->tcfp_t_c,
- police->tcfp_burst);
- if (police->peak_present) {
- ptoks = toks + police->tcfp_ptoks;
- if (ptoks > police->tcfp_mtu_ptoks)
- ptoks = police->tcfp_mtu_ptoks;
- ptoks -= (s64) psched_l2t_ns(&police->peak,
- qdisc_pkt_len(skb));
+ toks = min_t(s64, now - p->tcfp_t_c, p->tcfp_burst);
+ if (p->peak_present) {
+ ptoks = toks + p->tcfp_ptoks;
+ if (ptoks > p->tcfp_mtu_ptoks)
+ ptoks = p->tcfp_mtu_ptoks;
+ ptoks -= (s64)psched_l2t_ns(&p->peak,
+ qdisc_pkt_len(skb));
}
- toks += police->tcfp_toks;
- if (toks > police->tcfp_burst)
- toks = police->tcfp_burst;
- toks -= (s64) psched_l2t_ns(&police->rate, qdisc_pkt_len(skb));
+ toks += p->tcfp_toks;
+ if (toks > p->tcfp_burst)
+ toks = p->tcfp_burst;
+ toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
if ((toks|ptoks) >= 0) {
- police->tcfp_t_c = now;
- police->tcfp_toks = toks;
- police->tcfp_ptoks = ptoks;
- if (police->tcfp_result == TC_ACT_SHOT)
- police->tcf_qstats.drops++;
- spin_unlock(&police->tcf_lock);
- return police->tcfp_result;
+ p->tcfp_t_c = now;
+ p->tcfp_toks = toks;
+ p->tcfp_ptoks = ptoks;
+ ret = p->tcfp_result;
+ goto inc_drops;
}
}
- police->tcf_qstats.overlimits++;
- if (police->tcf_action == TC_ACT_SHOT)
- police->tcf_qstats.drops++;
- spin_unlock(&police->tcf_lock);
- return police->tcf_action;
+inc_overlimits:
+ qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats));
+inc_drops:
+ if (ret == TC_ACT_SHOT)
+ qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats));
+end:
+ return ret;
+}
+
+static void tcf_police_cleanup(struct tc_action *a)
+{
+ struct tcf_police *police = to_police(a);
+ struct tcf_police_params *p;
+
+ p = rcu_dereference_protected(police->params, 1);
+ if (p)
+ kfree_rcu(p, rcu);
}
static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
@@ -272,6 +302,7 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_police *police = to_police(a);
+ struct tcf_police_params *p;
struct tc_police opt = {
.index = police->tcf_index,
.refcnt = refcount_read(&police->tcf_refcnt) - ref,
@@ -281,19 +312,21 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
spin_lock_bh(&police->tcf_lock);
opt.action = police->tcf_action;
- opt.mtu = police->tcfp_mtu;
- opt.burst = PSCHED_NS2TICKS(police->tcfp_burst);
- if (police->rate_present)
- psched_ratecfg_getrate(&opt.rate, &police->rate);
- if (police->peak_present)
- psched_ratecfg_getrate(&opt.peakrate, &police->peak);
+ p = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ opt.mtu = p->tcfp_mtu;
+ opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
+ if (p->rate_present)
+ psched_ratecfg_getrate(&opt.rate, &p->rate);
+ if (p->peak_present)
+ psched_ratecfg_getrate(&opt.peakrate, &p->peak);
if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
goto nla_put_failure;
- if (police->tcfp_result &&
- nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result))
+ if (p->tcfp_result &&
+ nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
goto nla_put_failure;
- if (police->tcfp_ewma_rate &&
- nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
+ if (p->tcfp_ewma_rate &&
+ nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
goto nla_put_failure;
t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
@@ -312,8 +345,7 @@ nla_put_failure:
return -1;
}
-static int tcf_police_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_police_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, police_net_id);
@@ -333,6 +365,7 @@ static struct tc_action_ops act_police_ops = {
.init = tcf_police_init,
.walk = tcf_police_walker,
.lookup = tcf_police_search,
+ .cleanup = tcf_police_cleanup,
.size = sizeof(struct tcf_police),
};
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 44e9c00657bc..1a0c682fd734 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, parm->index, est, a,
- &act_sample_ops, bind, false);
+ &act_sample_ops, bind, true);
if (ret) {
tcf_idr_cleanup(tn, parm->index);
return ret;
@@ -224,8 +224,7 @@ static int tcf_sample_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, sample_net_id);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 52400d49f81f..902957beceb3 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -188,8 +188,7 @@ static int tcf_simp_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, simp_net_id);
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 73e44ce2a883..64dba3708fce 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -99,7 +99,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
- struct tcf_skbedit_params *params_old, *params_new;
+ struct tcf_skbedit_params *params_new;
struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
struct tc_skbedit *parm;
struct tcf_skbedit *d;
@@ -187,8 +187,6 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
}
}
- ASSERT_RTNL();
-
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
if (ret == ACT_P_CREATED)
@@ -210,11 +208,13 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
if (flags & SKBEDIT_F_MASK)
params_new->mask = *mask;
+ spin_lock_bh(&d->tcf_lock);
d->tcf_action = parm->action;
- params_old = rtnl_dereference(d->params);
- rcu_assign_pointer(d->params, params_new);
- if (params_old)
- kfree_rcu(params_old, rcu);
+ rcu_swap_protected(d->params, params_new,
+ lockdep_is_held(&d->tcf_lock));
+ spin_unlock_bh(&d->tcf_lock);
+ if (params_new)
+ kfree_rcu(params_new, rcu);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
@@ -231,12 +231,14 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
.index = d->tcf_index,
.refcnt = refcount_read(&d->tcf_refcnt) - ref,
.bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
- .action = d->tcf_action,
};
u64 pure_flags = 0;
struct tcf_t t;
- params = rtnl_dereference(d->params);
+ spin_lock_bh(&d->tcf_lock);
+ params = rcu_dereference_protected(d->params,
+ lockdep_is_held(&d->tcf_lock));
+ opt.action = d->tcf_action;
if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -264,9 +266,12 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
tcf_tm_dump(&t, &d->tcf_tm);
if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
goto nla_put_failure;
+ spin_unlock_bh(&d->tcf_lock);
+
return skb->len;
nla_put_failure:
+ spin_unlock_bh(&d->tcf_lock);
nlmsg_trim(skb, b);
return -1;
}
@@ -291,8 +296,7 @@ static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 588077fafd6c..59710a183bd3 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -251,8 +251,7 @@ static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, skbmod_net_id);
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 681f6f04e7da..4cca8f274662 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -548,8 +548,7 @@ static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 033d273afe50..ba677d54a7af 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -288,8 +288,7 @@ static int tcf_vlan_walker(struct net *net, struct sk_buff *skb,
return tcf_generic_walker(tn, skb, cb, type, ops, extack);
}
-static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack)
+static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index)
{
struct tc_action_net *tn = net_generic(net, vlan_net_id);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 1a67af8a6e8c..f427a1e00e7e 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -31,6 +31,8 @@
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
+extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+
/* The list of all installed classifier types */
static LIST_HEAD(tcf_proto_base);
@@ -240,8 +242,8 @@ static void tcf_chain_destroy(struct tcf_chain *chain)
if (!chain->index)
block->chain0.chain = NULL;
kfree(chain);
- if (list_empty(&block->chain_list) && block->refcnt == 0)
- kfree(block);
+ if (list_empty(&block->chain_list) && !refcount_read(&block->refcnt))
+ kfree_rcu(block, rcu);
}
static void tcf_chain_hold(struct tcf_chain *chain)
@@ -473,6 +475,7 @@ tcf_chain0_head_change_cb_del(struct tcf_block *block,
}
struct tcf_net {
+ spinlock_t idr_lock; /* Protects idr */
struct idr idr;
};
@@ -482,16 +485,25 @@ static int tcf_block_insert(struct tcf_block *block, struct net *net,
struct netlink_ext_ack *extack)
{
struct tcf_net *tn = net_generic(net, tcf_net_id);
+ int err;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&tn->idr_lock);
+ err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
+ GFP_NOWAIT);
+ spin_unlock(&tn->idr_lock);
+ idr_preload_end();
- return idr_alloc_u32(&tn->idr, block, &block->index, block->index,
- GFP_KERNEL);
+ return err;
}
static void tcf_block_remove(struct tcf_block *block, struct net *net)
{
struct tcf_net *tn = net_generic(net, tcf_net_id);
+ spin_lock(&tn->idr_lock);
idr_remove(&tn->idr, block->index);
+ spin_unlock(&tn->idr_lock);
}
static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
@@ -510,7 +522,7 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
INIT_LIST_HEAD(&block->owner_list);
INIT_LIST_HEAD(&block->chain0.filter_chain_list);
- block->refcnt = 1;
+ refcount_set(&block->refcnt, 1);
block->net = net;
block->index = block_index;
@@ -527,6 +539,78 @@ static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
return idr_find(&tn->idr, block_index);
}
+static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
+{
+ struct tcf_block *block;
+
+ rcu_read_lock();
+ block = tcf_block_lookup(net, block_index);
+ if (block && !refcount_inc_not_zero(&block->refcnt))
+ block = NULL;
+ rcu_read_unlock();
+
+ return block;
+}
+
+static void tcf_block_flush_all_chains(struct tcf_block *block)
+{
+ struct tcf_chain *chain;
+
+ /* Hold a refcnt for all chains, so that they don't disappear
+ * while we are iterating.
+ */
+ list_for_each_entry(chain, &block->chain_list, list)
+ tcf_chain_hold(chain);
+
+ list_for_each_entry(chain, &block->chain_list, list)
+ tcf_chain_flush(chain);
+}
+
+static void tcf_block_put_all_chains(struct tcf_block *block)
+{
+ struct tcf_chain *chain, *tmp;
+
+ /* At this point, all the chains should have refcnt >= 1. */
+ list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
+ tcf_chain_put_explicitly_created(chain);
+ tcf_chain_put(chain);
+ }
+}
+
+static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
+ struct tcf_block_ext_info *ei)
+{
+ if (refcount_dec_and_test(&block->refcnt)) {
+ /* Flushing/putting all chains will cause the block to be
+ * deallocated when last chain is freed. However, if chain_list
+ * is empty, block has to be manually deallocated. After block
+ * reference counter reached 0, it is no longer possible to
+ * increment it or add new chains to block.
+ */
+ bool free_block = list_empty(&block->chain_list);
+
+ if (tcf_block_shared(block))
+ tcf_block_remove(block, block->net);
+ if (!free_block)
+ tcf_block_flush_all_chains(block);
+
+ if (q)
+ tcf_block_offload_unbind(block, q, ei);
+
+ if (free_block)
+ kfree_rcu(block, rcu);
+ else
+ tcf_block_put_all_chains(block);
+ } else if (q) {
+ tcf_block_offload_unbind(block, q, ei);
+ }
+}
+
+static void tcf_block_refcnt_put(struct tcf_block *block)
+{
+ __tcf_block_put(block, NULL, NULL);
+}
+
/* Find tcf block.
* Set q, parent, cl when appropriate.
*/
@@ -537,9 +621,10 @@ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
struct netlink_ext_ack *extack)
{
struct tcf_block *block;
+ int err = 0;
if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
- block = tcf_block_lookup(net, block_index);
+ block = tcf_block_refcnt_get(net, block_index);
if (!block) {
NL_SET_ERR_MSG(extack, "Block of given index was not found");
return ERR_PTR(-EINVAL);
@@ -548,55 +633,106 @@ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
const struct Qdisc_class_ops *cops;
struct net_device *dev;
+ rcu_read_lock();
+
/* Find link */
- dev = __dev_get_by_index(net, ifindex);
- if (!dev)
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (!dev) {
+ rcu_read_unlock();
return ERR_PTR(-ENODEV);
+ }
/* Find qdisc */
if (!*parent) {
*q = dev->qdisc;
*parent = (*q)->handle;
} else {
- *q = qdisc_lookup(dev, TC_H_MAJ(*parent));
+ *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
if (!*q) {
NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
- return ERR_PTR(-EINVAL);
+ err = -EINVAL;
+ goto errout_rcu;
}
}
+ *q = qdisc_refcount_inc_nz(*q);
+ if (!*q) {
+ NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
+ err = -EINVAL;
+ goto errout_rcu;
+ }
+
/* Is it classful? */
cops = (*q)->ops->cl_ops;
if (!cops) {
NL_SET_ERR_MSG(extack, "Qdisc not classful");
- return ERR_PTR(-EINVAL);
+ err = -EINVAL;
+ goto errout_rcu;
}
if (!cops->tcf_block) {
NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
- return ERR_PTR(-EOPNOTSUPP);
+ err = -EOPNOTSUPP;
+ goto errout_rcu;
}
+ /* At this point we know that qdisc is not noop_qdisc,
+ * which means that qdisc holds a reference to net_device
+ * and we hold a reference to qdisc, so it is safe to release
+ * rcu read lock.
+ */
+ rcu_read_unlock();
+
/* Do we search for filter, attached to class? */
if (TC_H_MIN(*parent)) {
*cl = cops->find(*q, *parent);
if (*cl == 0) {
NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
- return ERR_PTR(-ENOENT);
+ err = -ENOENT;
+ goto errout_qdisc;
}
}
/* And the last stroke */
block = cops->tcf_block(*q, *cl, extack);
- if (!block)
- return ERR_PTR(-EINVAL);
+ if (!block) {
+ err = -EINVAL;
+ goto errout_qdisc;
+ }
if (tcf_block_shared(block)) {
NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
- return ERR_PTR(-EOPNOTSUPP);
+ err = -EOPNOTSUPP;
+ goto errout_qdisc;
}
+
+ /* Always take reference to block in order to support execution
+ * of rules update path of cls API without rtnl lock. Caller
+ * must release block when it is finished using it. 'if' block
+ * of this conditional obtain reference to block by calling
+ * tcf_block_refcnt_get().
+ */
+ refcount_inc(&block->refcnt);
}
return block;
+
+errout_rcu:
+ rcu_read_unlock();
+errout_qdisc:
+ if (*q) {
+ qdisc_put(*q);
+ *q = NULL;
+ }
+ return ERR_PTR(err);
+}
+
+static void tcf_block_release(struct Qdisc *q, struct tcf_block *block)
+{
+ if (!IS_ERR_OR_NULL(block))
+ tcf_block_refcnt_put(block);
+
+ if (q)
+ qdisc_put(q);
}
struct tcf_block_owner_item {
@@ -664,21 +800,16 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
{
struct net *net = qdisc_net(q);
struct tcf_block *block = NULL;
- bool created = false;
int err;
- if (ei->block_index) {
+ if (ei->block_index)
/* block_index not 0 means the shared block is requested */
- block = tcf_block_lookup(net, ei->block_index);
- if (block)
- block->refcnt++;
- }
+ block = tcf_block_refcnt_get(net, ei->block_index);
if (!block) {
block = tcf_block_create(net, q, ei->block_index, extack);
if (IS_ERR(block))
return PTR_ERR(block);
- created = true;
if (tcf_block_shared(block)) {
err = tcf_block_insert(block, net, extack);
if (err)
@@ -708,14 +839,8 @@ err_block_offload_bind:
err_chain0_head_change_cb_add:
tcf_block_owner_del(block, q, ei->binder_type);
err_block_owner_add:
- if (created) {
- if (tcf_block_shared(block))
- tcf_block_remove(block, net);
err_block_insert:
- kfree(block);
- } else {
- block->refcnt--;
- }
+ tcf_block_refcnt_put(block);
return err;
}
EXPORT_SYMBOL(tcf_block_get_ext);
@@ -747,42 +872,12 @@ EXPORT_SYMBOL(tcf_block_get);
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei)
{
- struct tcf_chain *chain, *tmp;
-
if (!block)
return;
tcf_chain0_head_change_cb_del(block, ei);
tcf_block_owner_del(block, q, ei->binder_type);
- if (block->refcnt == 1) {
- if (tcf_block_shared(block))
- tcf_block_remove(block, block->net);
-
- /* Hold a refcnt for all chains, so that they don't disappear
- * while we are iterating.
- */
- list_for_each_entry(chain, &block->chain_list, list)
- tcf_chain_hold(chain);
-
- list_for_each_entry(chain, &block->chain_list, list)
- tcf_chain_flush(chain);
- }
-
- tcf_block_offload_unbind(block, q, ei);
-
- if (block->refcnt == 1) {
- /* At this point, all the chains should have refcnt >= 1. */
- list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
- tcf_chain_put_explicitly_created(chain);
- tcf_chain_put(chain);
- }
-
- block->refcnt--;
- if (list_empty(&block->chain_list))
- kfree(block);
- } else {
- block->refcnt--;
- }
+ __tcf_block_put(block, q, ei);
}
EXPORT_SYMBOL(tcf_block_put_ext);
@@ -1211,7 +1306,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
replay:
tp_created = 0;
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
return err;
@@ -1332,6 +1427,7 @@ replay:
errout:
if (chain)
tcf_chain_put(chain);
+ tcf_block_release(q, block);
if (err == -EAGAIN)
/* Replay the request. */
goto replay;
@@ -1360,7 +1456,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
return err;
@@ -1453,6 +1549,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
errout:
if (chain)
tcf_chain_put(chain);
+ tcf_block_release(q, block);
return err;
}
@@ -1475,7 +1572,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
void *fh = NULL;
int err;
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
return err;
@@ -1538,6 +1635,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
errout:
if (chain)
tcf_chain_put(chain);
+ tcf_block_release(q, block);
return err;
}
@@ -1631,12 +1729,13 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if (nlmsg_len(cb->nlh) < sizeof(*tcm))
return skb->len;
- err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
+ err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL,
+ cb->extack);
if (err)
return err;
if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
- block = tcf_block_lookup(net, tcm->tcm_block_index);
+ block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
if (!block)
goto out;
/* If we work with block index, q is NULL and parent value
@@ -1695,6 +1794,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
}
}
+ if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
+ tcf_block_refcnt_put(block);
cb->args[0] = index;
out:
@@ -1838,7 +1939,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
return -EPERM;
replay:
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
return err;
@@ -1854,7 +1955,8 @@ replay:
chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
- return -EINVAL;
+ err = -EINVAL;
+ goto errout_block;
}
chain = tcf_chain_lookup(block, chain_index);
if (n->nlmsg_type == RTM_NEWCHAIN) {
@@ -1866,23 +1968,27 @@ replay:
tcf_chain_hold(chain);
} else {
NL_SET_ERR_MSG(extack, "Filter chain already exists");
- return -EEXIST;
+ err = -EEXIST;
+ goto errout_block;
}
} else {
if (!(n->nlmsg_flags & NLM_F_CREATE)) {
NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
- return -ENOENT;
+ err = -ENOENT;
+ goto errout_block;
}
chain = tcf_chain_create(block, chain_index);
if (!chain) {
NL_SET_ERR_MSG(extack, "Failed to create filter chain");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto errout_block;
}
}
} else {
if (!chain || tcf_chain_held_by_acts_only(chain)) {
NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
- return -EINVAL;
+ err = -EINVAL;
+ goto errout_block;
}
tcf_chain_hold(chain);
}
@@ -1902,6 +2008,8 @@ replay:
RTM_NEWCHAIN, false);
break;
case RTM_DELCHAIN:
+ tfilter_notify_chain(net, skb, block, q, parent, n,
+ chain, RTM_DELTFILTER);
/* Flush the chain first as the user requested chain removal. */
tcf_chain_flush(chain);
/* In case the chain was successfully deleted, put a reference
@@ -1924,6 +2032,8 @@ replay:
errout:
tcf_chain_put(chain);
+errout_block:
+ tcf_block_release(q, block);
if (err == -EAGAIN)
/* Replay the request. */
goto replay;
@@ -1947,12 +2057,13 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
if (nlmsg_len(cb->nlh) < sizeof(*tcm))
return skb->len;
- err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
+ err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+ cb->extack);
if (err)
return err;
if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
- block = tcf_block_lookup(net, tcm->tcm_block_index);
+ block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
if (!block)
goto out;
/* If we work with block index, q is NULL and parent value
@@ -2019,6 +2130,8 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
index++;
}
+ if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
+ tcf_block_refcnt_put(block);
cb->args[0] = index;
out:
@@ -2211,6 +2324,7 @@ static __net_init int tcf_net_init(struct net *net)
{
struct tcf_net *tn = net_generic(net, tcf_net_id);
+ spin_lock_init(&tn->idr_lock);
idr_init(&tn->idr);
return 0;
}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 6fd9bdd93796..9aada2d0ef06 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -98,7 +98,7 @@ struct cls_fl_filter {
struct list_head list;
u32 handle;
u32 flags;
- unsigned int in_hw_count;
+ u32 in_hw_count;
struct rcu_work rwork;
struct net_device *hw_dev;
};
@@ -993,7 +993,7 @@ static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
}
#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
-#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
+#define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
#define FL_KEY_IS_MASKED(mask, member) \
memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
@@ -1880,6 +1880,9 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
+ goto nla_put_failure;
+
if (tcf_exts_dump(skb, &f->exts))
goto nla_put_failure;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index f218ccf1e2d9..4b28fd44576d 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -68,7 +68,6 @@ struct tc_u_knode {
u32 mask;
u32 __percpu *pcpu_success;
#endif
- struct tcf_proto *tp;
struct rcu_work rwork;
/* The 'sel' field MUST be the last field in structure to allow for
* tc_u32_keys allocated at end of structure.
@@ -80,10 +79,10 @@ struct tc_u_hnode {
struct tc_u_hnode __rcu *next;
u32 handle;
u32 prio;
- struct tc_u_common *tp_c;
int refcnt;
unsigned int divisor;
struct idr handle_idr;
+ bool is_root;
struct rcu_head rcu;
u32 flags;
/* The 'ht' field MUST be the last field in structure to allow for
@@ -98,7 +97,7 @@ struct tc_u_common {
int refcnt;
struct idr handle_idr;
struct hlist_node hnode;
- struct rcu_head rcu;
+ long knodes;
};
static inline unsigned int u32_hash_fold(__be32 key,
@@ -344,19 +343,16 @@ static void *tc_u_common_ptr(const struct tcf_proto *tp)
return block->q;
}
-static unsigned int tc_u_hash(const struct tcf_proto *tp)
+static struct hlist_head *tc_u_hash(void *key)
{
- return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT);
+ return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
}
-static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
+static struct tc_u_common *tc_u_common_find(void *key)
{
struct tc_u_common *tc;
- unsigned int h;
-
- h = tc_u_hash(tp);
- hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
- if (tc->ptr == tc_u_common_ptr(tp))
+ hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
+ if (tc->ptr == key)
return tc;
}
return NULL;
@@ -365,10 +361,8 @@ static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
static int u32_init(struct tcf_proto *tp)
{
struct tc_u_hnode *root_ht;
- struct tc_u_common *tp_c;
- unsigned int h;
-
- tp_c = tc_u_common_find(tp);
+ void *key = tc_u_common_ptr(tp);
+ struct tc_u_common *tp_c = tc_u_common_find(key);
root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
if (root_ht == NULL)
@@ -377,6 +371,7 @@ static int u32_init(struct tcf_proto *tp)
root_ht->refcnt++;
root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
root_ht->prio = tp->prio;
+ root_ht->is_root = true;
idr_init(&root_ht->handle_idr);
if (tp_c == NULL) {
@@ -385,26 +380,24 @@ static int u32_init(struct tcf_proto *tp)
kfree(root_ht);
return -ENOBUFS;
}
- tp_c->ptr = tc_u_common_ptr(tp);
+ tp_c->ptr = key;
INIT_HLIST_NODE(&tp_c->hnode);
idr_init(&tp_c->handle_idr);
- h = tc_u_hash(tp);
- hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]);
+ hlist_add_head(&tp_c->hnode, tc_u_hash(key));
}
tp_c->refcnt++;
RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
rcu_assign_pointer(tp_c->hlist, root_ht);
- root_ht->tp_c = tp_c;
+ root_ht->refcnt++;
rcu_assign_pointer(tp->root, root_ht);
tp->data = tp_c;
return 0;
}
-static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
- bool free_pf)
+static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
{
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
@@ -438,7 +431,7 @@ static void u32_delete_key_work(struct work_struct *work)
struct tc_u_knode,
rwork);
rtnl_lock();
- u32_destroy_key(key->tp, key, false);
+ u32_destroy_key(key, false);
rtnl_unlock();
}
@@ -455,12 +448,13 @@ static void u32_delete_key_freepf_work(struct work_struct *work)
struct tc_u_knode,
rwork);
rtnl_lock();
- u32_destroy_key(key->tp, key, true);
+ u32_destroy_key(key, true);
rtnl_unlock();
}
static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
{
+ struct tc_u_common *tp_c = tp->data;
struct tc_u_knode __rcu **kp;
struct tc_u_knode *pkp;
struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
@@ -471,6 +465,7 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
if (pkp == key) {
RCU_INIT_POINTER(*kp, key->next);
+ tp_c->knodes--;
tcf_unbind_filter(tp, &key->res);
idr_remove(&ht->handle_idr, key->handle);
@@ -585,6 +580,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
struct netlink_ext_ack *extack)
{
+ struct tc_u_common *tp_c = tp->data;
struct tc_u_knode *n;
unsigned int h;
@@ -592,13 +588,14 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
RCU_INIT_POINTER(ht->ht[h],
rtnl_dereference(n->next));
+ tp_c->knodes--;
tcf_unbind_filter(tp, &n->res);
u32_remove_hw_knode(tp, n, extack);
idr_remove(&ht->handle_idr, n->handle);
if (tcf_exts_get_net(&n->exts))
tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
else
- u32_destroy_key(n->tp, n, true);
+ u32_destroy_key(n, true);
}
}
}
@@ -610,7 +607,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
struct tc_u_hnode __rcu **hn;
struct tc_u_hnode *phn;
- WARN_ON(ht->refcnt);
+ WARN_ON(--ht->refcnt);
u32_clear_hnode(tp, ht, extack);
@@ -631,17 +628,6 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
return -ENOENT;
}
-static bool ht_empty(struct tc_u_hnode *ht)
-{
- unsigned int h;
-
- for (h = 0; h <= ht->divisor; h++)
- if (rcu_access_pointer(ht->ht[h]))
- return false;
-
- return true;
-}
-
static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
{
struct tc_u_common *tp_c = tp->data;
@@ -649,7 +635,7 @@ static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
WARN_ON(root_ht == NULL);
- if (root_ht && --root_ht->refcnt == 0)
+ if (root_ht && --root_ht->refcnt == 1)
u32_destroy_hnode(tp, root_ht, extack);
if (--tp_c->refcnt == 0) {
@@ -679,26 +665,21 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
struct netlink_ext_ack *extack)
{
struct tc_u_hnode *ht = arg;
- struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
struct tc_u_common *tp_c = tp->data;
int ret = 0;
- if (ht == NULL)
- goto out;
-
if (TC_U32_KEY(ht->handle)) {
u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
goto out;
}
- if (root_ht == ht) {
+ if (ht->is_root) {
NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
return -EINVAL;
}
if (ht->refcnt == 1) {
- ht->refcnt--;
u32_destroy_hnode(tp, ht, extack);
} else {
NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
@@ -706,38 +687,7 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
}
out:
- *last = true;
- if (root_ht) {
- if (root_ht->refcnt > 1) {
- *last = false;
- goto ret;
- }
- if (root_ht->refcnt == 1) {
- if (!ht_empty(root_ht)) {
- *last = false;
- goto ret;
- }
- }
- }
-
- if (tp_c->refcnt > 1) {
- *last = false;
- goto ret;
- }
-
- if (tp_c->refcnt == 1) {
- struct tc_u_hnode *ht;
-
- for (ht = rtnl_dereference(tp_c->hlist);
- ht;
- ht = rtnl_dereference(ht->next))
- if (!ht_empty(ht)) {
- *last = false;
- break;
- }
- }
-
-ret:
+ *last = tp_c->refcnt == 1 && tp_c->knodes == 0;
return ret;
}
@@ -768,7 +718,7 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
};
static int u32_set_parms(struct net *net, struct tcf_proto *tp,
- unsigned long base, struct tc_u_hnode *ht,
+ unsigned long base,
struct tc_u_knode *n, struct nlattr **tb,
struct nlattr *est, bool ovr,
struct netlink_ext_ack *extack)
@@ -789,12 +739,16 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
}
if (handle) {
- ht_down = u32_lookup_ht(ht->tp_c, handle);
+ ht_down = u32_lookup_ht(tp->data, handle);
if (!ht_down) {
NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
return -EINVAL;
}
+ if (ht_down->is_root) {
+ NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
+ return -EINVAL;
+ }
ht_down->refcnt++;
}
@@ -891,7 +845,6 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
/* Similarly success statistics must be moved as pointers */
new->pcpu_success = n->pcpu_success;
#endif
- new->tp = tp;
memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
@@ -960,18 +913,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (!new)
return -ENOMEM;
- err = u32_set_parms(net, tp, base,
- rtnl_dereference(n->ht_up), new, tb,
+ err = u32_set_parms(net, tp, base, new, tb,
tca[TCA_RATE], ovr, extack);
if (err) {
- u32_destroy_key(tp, new, false);
+ u32_destroy_key(new, false);
return err;
}
err = u32_replace_hw_knode(tp, new, flags, extack);
if (err) {
- u32_destroy_key(tp, new, false);
+ u32_destroy_key(new, false);
return err;
}
@@ -988,7 +940,11 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (tb[TCA_U32_DIVISOR]) {
unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
- if (--divisor > 0x100) {
+ if (!is_power_of_2(divisor)) {
+ NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
+ return -EINVAL;
+ }
+ if (divisor-- > 0x100) {
NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
return -EINVAL;
}
@@ -1013,7 +969,6 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return err;
}
}
- ht->tp_c = tp_c;
ht->refcnt = 1;
ht->divisor = divisor;
ht->handle = handle;
@@ -1103,7 +1058,6 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
n->flags = flags;
- n->tp = tp;
err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
if (err < 0)
@@ -1125,7 +1079,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
}
#endif
- err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr,
+ err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], ovr,
extack);
if (err == 0) {
struct tc_u_knode __rcu **ins;
@@ -1146,6 +1100,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
RCU_INIT_POINTER(n->next, pins);
rcu_assign_pointer(*ins, n);
+ tp_c->knodes++;
*arg = n;
return 0;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 98541c6399db..022bca98bde6 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -27,7 +27,6 @@
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
-#include <linux/lockdep.h>
#include <linux/slab.h>
#include <linux/hashtable.h>
@@ -315,6 +314,24 @@ out:
return q;
}
+struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
+{
+ struct netdev_queue *nq;
+ struct Qdisc *q;
+
+ if (!handle)
+ return NULL;
+ q = qdisc_match_from_root(dev->qdisc, handle);
+ if (q)
+ goto out;
+
+ nq = dev_ingress_queue_rcu(dev);
+ if (nq)
+ q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
+out:
+ return q;
+}
+
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
{
unsigned long cl;
@@ -921,7 +938,7 @@ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
qdisc_notify(net, skb, n, clid, old, new);
if (old)
- qdisc_destroy(old);
+ qdisc_put(old);
}
/* Graft qdisc "new" to class "classid" of qdisc "parent" or
@@ -974,7 +991,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
qdisc_refcount_inc(new);
if (!ingress)
- qdisc_destroy(old);
+ qdisc_put(old);
}
skip:
@@ -1053,10 +1070,6 @@ static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
return 0;
}
-/* lockdep annotation is needed for ingress; egress gets it only for name */
-static struct lock_class_key qdisc_tx_lock;
-static struct lock_class_key qdisc_rx_lock;
-
/*
Allocate and initialize new qdisc.
@@ -1121,7 +1134,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
if (handle == TC_H_INGRESS) {
sch->flags |= TCQ_F_INGRESS;
handle = TC_H_MAKE(TC_H_INGRESS, 0);
- lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
} else {
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
@@ -1129,7 +1141,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
if (handle == 0)
goto err_out3;
}
- lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
if (!netif_is_multiqueue(dev))
sch->flags |= TCQ_F_ONETXQUEUE;
}
@@ -1307,6 +1318,18 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
return 0;
}
+const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
+ [TCA_KIND] = { .type = NLA_STRING },
+ [TCA_OPTIONS] = { .type = NLA_NESTED },
+ [TCA_RATE] = { .type = NLA_BINARY,
+ .len = sizeof(struct tc_estimator) },
+ [TCA_STAB] = { .type = NLA_NESTED },
+ [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
+ [TCA_CHAIN] = { .type = NLA_U32 },
+ [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
+ [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
+};
+
/*
* Delete/get qdisc.
*/
@@ -1327,7 +1350,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+ extack);
if (err < 0)
return err;
@@ -1411,7 +1435,8 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
replay:
/* Reinit, just in case something touches this. */
- err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+ extack);
if (err < 0)
return err;
@@ -1568,7 +1593,7 @@ graft:
err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
if (err) {
if (q)
- qdisc_destroy(q);
+ qdisc_put(q);
return err;
}
@@ -1645,7 +1670,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
idx = 0;
ASSERT_RTNL();
- err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, NULL, NULL);
+ err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
+ rtm_tca_policy, cb->extack);
if (err < 0)
return err;
@@ -1864,7 +1890,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+ extack);
if (err < 0)
return err;
@@ -2043,7 +2070,8 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
if (tcm->tcm_parent) {
q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
- if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
+ if (q && q != root &&
+ tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
return -1;
return 0;
}
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index cd49afca9617..d714d3747bcb 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -150,7 +150,7 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
pr_debug("atm_tc_put: destroying\n");
list_del_init(&flow->list);
pr_debug("atm_tc_put: qdisc %p\n", flow->q);
- qdisc_destroy(flow->q);
+ qdisc_put(flow->q);
tcf_block_put(flow->block);
if (flow->sock) {
pr_debug("atm_tc_put: f_count %ld\n",
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index c07c30b916d5..b910cd5c56f7 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -812,7 +812,7 @@ static struct sk_buff *dequeue_head(struct cake_flow *flow)
if (skb) {
flow->head = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
}
return skb;
@@ -1252,7 +1252,7 @@ found:
else
flow->head = elig_ack->next;
- elig_ack->next = NULL;
+ skb_mark_not_on_list(elig_ack);
return elig_ack;
}
@@ -1675,7 +1675,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
while (segs) {
nskb = segs->next;
- segs->next = NULL;
+ skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
cobalt_set_enqueue_time(segs, now);
get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
@@ -2644,7 +2644,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
for (i = 1; i <= CAKE_QUEUES; i++)
quantum_div[i] = 65535 / i;
- q->tins = kvzalloc(CAKE_MAX_TINS * sizeof(struct cake_tin_data),
+ q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
GFP_KERNEL);
if (!q->tins)
goto nomem;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index f42025d53cfe..4dc05409e3fb 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1418,7 +1418,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
WARN_ON(cl->filters);
tcf_block_put(cl->block);
- qdisc_destroy(cl->q);
+ qdisc_put(cl->q);
qdisc_put_rtab(cl->R_tab);
gen_kill_estimator(&cl->rate_est);
if (cl != &q->link)
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index e26a24017faa..e689e11b6d0f 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -379,7 +379,7 @@ static void cbs_destroy(struct Qdisc *sch)
cbs_disable_offload(dev, q);
if (q->qdisc)
- qdisc_destroy(q->qdisc);
+ qdisc_put(q->qdisc);
}
static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index e0b0cf8a9939..cdebaed0f8cf 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -134,7 +134,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
tca[TCA_RATE]);
if (err) {
NL_SET_ERR_MSG(extack, "Failed to replace estimator");
- qdisc_destroy(cl->qdisc);
+ qdisc_put(cl->qdisc);
kfree(cl);
return err;
}
@@ -153,7 +153,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
{
gen_kill_estimator(&cl->rate_est);
- qdisc_destroy(cl->qdisc);
+ qdisc_put(cl->qdisc);
kfree(cl);
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 049714c57075..f6f480784bc6 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -412,7 +412,7 @@ static void dsmark_destroy(struct Qdisc *sch)
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
tcf_block_put(p->block);
- qdisc_destroy(p->q);
+ qdisc_put(p->q);
if (p->mv != p->embedded)
kfree(p->mv);
}
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 24893d3b5d22..3809c9bf8896 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -177,7 +177,7 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
if (q) {
err = fifo_set_limit(q, limit);
if (err < 0) {
- qdisc_destroy(q);
+ qdisc_put(q);
q = NULL;
}
}
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 4808713c73b9..4b1af706896c 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -92,8 +92,8 @@ struct fq_sched_data {
u32 quantum;
u32 initial_quantum;
u32 flow_refill_delay;
- u32 flow_max_rate; /* optional max rate per flow */
u32 flow_plimit; /* max packets per flow */
+ unsigned long flow_max_rate; /* optional max rate per flow */
u32 orphan_mask; /* mask for orphaned skb */
u32 low_rate_threshold;
struct rb_root *fq_root;
@@ -106,7 +106,6 @@ struct fq_sched_data {
u64 stat_gc_flows;
u64 stat_internal_packets;
- u64 stat_tcp_retrans;
u64 stat_throttled;
u64 stat_flows_plimit;
u64 stat_pkts_too_long;
@@ -319,7 +318,7 @@ static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
if (skb) {
flow->head = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
flow->qlen--;
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
@@ -327,62 +326,17 @@ static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
return skb;
}
-/* We might add in the future detection of retransmits
- * For the time being, just return false
- */
-static bool skb_is_retransmit(struct sk_buff *skb)
-{
- return false;
-}
-
-/* add skb to flow queue
- * flow queue is a linked list, kind of FIFO, except for TCP retransmits
- * We special case tcp retransmits to be transmitted before other packets.
- * We rely on fact that TCP retransmits are unlikely, so we do not waste
- * a separate queue or a pointer.
- * head-> [retrans pkt 1]
- * [retrans pkt 2]
- * [ normal pkt 1]
- * [ normal pkt 2]
- * [ normal pkt 3]
- * tail-> [ normal pkt 4]
- */
static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
{
- struct sk_buff *prev, *head = flow->head;
+ struct sk_buff *head = flow->head;
skb->next = NULL;
- if (!head) {
+ if (!head)
flow->head = skb;
- flow->tail = skb;
- return;
- }
- if (likely(!skb_is_retransmit(skb))) {
+ else
flow->tail->next = skb;
- flow->tail = skb;
- return;
- }
- /* This skb is a tcp retransmit,
- * find the last retrans packet in the queue
- */
- prev = NULL;
- while (skb_is_retransmit(head)) {
- prev = head;
- head = head->next;
- if (!head)
- break;
- }
- if (!prev) { /* no rtx packet in queue, become the new head */
- skb->next = flow->head;
- flow->head = skb;
- } else {
- if (prev == flow->tail)
- flow->tail = skb;
- else
- skb->next = prev->next;
- prev->next = skb;
- }
+ flow->tail = skb;
}
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
@@ -401,8 +355,6 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
f->qlen++;
- if (skb_is_retransmit(skb))
- q->stat_tcp_retrans++;
qdisc_qstats_backlog_inc(sch, skb);
if (fq_flow_is_detached(f)) {
struct sock *sk = skb->sk;
@@ -464,7 +416,8 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
struct fq_flow_head *head;
struct sk_buff *skb;
struct fq_flow *f;
- u32 rate, plen;
+ unsigned long rate;
+ u32 plen;
skb = fq_dequeue_head(sch, &q->internal);
if (skb)
@@ -491,11 +444,16 @@ begin:
}
skb = f->head;
- if (unlikely(skb && now < f->time_next_packet &&
- !skb_is_tcp_pure_ack(skb))) {
- head->first = f->next;
- fq_flow_set_throttled(q, f);
- goto begin;
+ if (skb) {
+ u64 time_next_packet = max_t(u64, ktime_to_ns(skb->tstamp),
+ f->time_next_packet);
+
+ if (now < time_next_packet) {
+ head->first = f->next;
+ f->time_next_packet = time_next_packet;
+ fq_flow_set_throttled(q, f);
+ goto begin;
+ }
}
skb = fq_dequeue_head(sch, f);
@@ -513,11 +471,7 @@ begin:
prefetch(&skb->end);
f->credit -= qdisc_pkt_len(skb);
- if (!q->rate_enable)
- goto out;
-
- /* Do not pace locally generated ack packets */
- if (skb_is_tcp_pure_ack(skb))
+ if (ktime_to_ns(skb->tstamp) || !q->rate_enable)
goto out;
rate = q->flow_max_rate;
@@ -532,11 +486,11 @@ begin:
if (f->credit > 0)
goto out;
}
- if (rate != ~0U) {
+ if (rate != ~0UL) {
u64 len = (u64)plen * NSEC_PER_SEC;
if (likely(rate))
- do_div(len, rate);
+ len = div64_ul(len, rate);
/* Since socket rate can change later,
* clamp the delay to 1 second.
* Really, providers of too big packets should be fixed !
@@ -748,9 +702,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
- if (tb[TCA_FQ_FLOW_MAX_RATE])
- q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
+ if (tb[TCA_FQ_FLOW_MAX_RATE]) {
+ u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
+ q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
+ }
if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
q->low_rate_threshold =
nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
@@ -813,7 +769,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
q->quantum = 2 * psched_mtu(qdisc_dev(sch));
q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
q->flow_refill_delay = msecs_to_jiffies(40);
- q->flow_max_rate = ~0U;
+ q->flow_max_rate = ~0UL;
q->time_next_delayed_flow = ~0ULL;
q->rate_enable = 1;
q->new_flows.first = NULL;
@@ -823,7 +779,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
q->fq_trees_log = ilog2(1024);
q->orphan_mask = 1024 - 1;
q->low_rate_threshold = 550000 / 8;
- qdisc_watchdog_init(&q->watchdog, sch);
+ qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
if (opt)
err = fq_change(sch, opt, extack);
@@ -849,7 +805,8 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
- nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
+ nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
+ min_t(unsigned long, q->flow_max_rate, ~0U)) ||
nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
jiffies_to_usecs(q->flow_refill_delay)) ||
nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
@@ -873,7 +830,7 @@ static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
st.gc_flows = q->stat_gc_flows;
st.highprio_packets = q->stat_internal_packets;
- st.tcp_retrans = q->stat_tcp_retrans;
+ st.tcp_retrans = 0;
st.throttled = q->stat_throttled;
st.flows_plimit = q->stat_flows_plimit;
st.pkts_too_long = q->stat_pkts_too_long;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 6c0a9d5dbf94..cd04d40c30b6 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -124,7 +124,7 @@ static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
struct sk_buff *skb = flow->head;
flow->head = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
return skb;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 69078c82963e..de1663f7d3ad 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -184,7 +184,7 @@ static void try_bulk_dequeue_skb(struct Qdisc *q,
skb = nskb;
(*packets)++; /* GSO counts as one pkt */
}
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
}
/* This variant of try_bulk_dequeue_skb() makes sure
@@ -210,7 +210,7 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
skb = nskb;
} while (++cnt < 8);
(*packets) += cnt;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
}
/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
@@ -572,6 +572,18 @@ struct Qdisc noop_qdisc = {
.dev_queue = &noop_netdev_queue,
.running = SEQCNT_ZERO(noop_qdisc.running),
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
+ .gso_skb = {
+ .next = (struct sk_buff *)&noop_qdisc.gso_skb,
+ .prev = (struct sk_buff *)&noop_qdisc.gso_skb,
+ .qlen = 0,
+ .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
+ },
+ .skb_bad_txq = {
+ .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
+ .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
+ .qlen = 0,
+ .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
+ },
};
EXPORT_SYMBOL(noop_qdisc);
@@ -901,7 +913,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
if (!ops->init || ops->init(sch, NULL, extack) == 0)
return sch;
- qdisc_destroy(sch);
+ qdisc_put(sch);
return NULL;
}
EXPORT_SYMBOL(qdisc_create_dflt);
@@ -941,15 +953,18 @@ void qdisc_free(struct Qdisc *qdisc)
kfree((char *) qdisc - qdisc->padded);
}
-void qdisc_destroy(struct Qdisc *qdisc)
+static void qdisc_free_cb(struct rcu_head *head)
+{
+ struct Qdisc *q = container_of(head, struct Qdisc, rcu);
+
+ qdisc_free(q);
+}
+
+static void qdisc_destroy(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
struct sk_buff *skb, *tmp;
- if (qdisc->flags & TCQ_F_BUILTIN ||
- !refcount_dec_and_test(&qdisc->refcnt))
- return;
-
#ifdef CONFIG_NET_SCHED
qdisc_hash_del(qdisc);
@@ -974,9 +989,34 @@ void qdisc_destroy(struct Qdisc *qdisc)
kfree_skb_list(skb);
}
- qdisc_free(qdisc);
+ call_rcu(&qdisc->rcu, qdisc_free_cb);
+}
+
+void qdisc_put(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_BUILTIN ||
+ !refcount_dec_and_test(&qdisc->refcnt))
+ return;
+
+ qdisc_destroy(qdisc);
+}
+EXPORT_SYMBOL(qdisc_put);
+
+/* Version of qdisc_put() that is called with rtnl mutex unlocked.
+ * Intended to be used as optimization, this function only takes rtnl lock if
+ * qdisc reference counter reached zero.
+ */
+
+void qdisc_put_unlocked(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_BUILTIN ||
+ !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
+ return;
+
+ qdisc_destroy(qdisc);
+ rtnl_unlock();
}
-EXPORT_SYMBOL(qdisc_destroy);
+EXPORT_SYMBOL(qdisc_put_unlocked);
/* Attach toplevel qdisc to device queue. */
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
@@ -1245,8 +1285,6 @@ static void dev_init_scheduler_queue(struct net_device *dev,
rcu_assign_pointer(dev_queue->qdisc, qdisc);
dev_queue->qdisc_sleeping = qdisc;
- __skb_queue_head_init(&qdisc->gso_skb);
- __skb_queue_head_init(&qdisc->skb_bad_txq);
}
void dev_init_scheduler(struct net_device *dev)
@@ -1270,7 +1308,7 @@ static void shutdown_scheduler_queue(struct net_device *dev,
rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
dev_queue->qdisc_sleeping = qdisc_default;
- qdisc_destroy(qdisc);
+ qdisc_put(qdisc);
}
}
@@ -1279,7 +1317,7 @@ void dev_shutdown(struct net_device *dev)
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
- qdisc_destroy(dev->qdisc);
+ qdisc_put(dev->qdisc);
dev->qdisc = &noop_qdisc;
WARN_ON(timer_pending(&dev->watchdog_timer));
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 3278a76f6861..b18ec1f6de60 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1092,7 +1092,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
struct hfsc_sched *q = qdisc_priv(sch);
tcf_block_put(cl->block);
- qdisc_destroy(cl->qdisc);
+ qdisc_put(cl->qdisc);
gen_kill_estimator(&cl->rate_est);
if (cl != &q->root)
kfree(cl);
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index c3a8388dcdf6..9d6a47697406 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -330,7 +330,7 @@ static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket)
struct sk_buff *skb = bucket->head;
bucket->head = skb->next;
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
return skb;
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 43c4bfe625a9..58b449490757 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -132,7 +132,7 @@ struct htb_class {
struct htb_class_inner {
struct htb_prio clprio[TC_HTB_NUMPRIO];
} inner;
- } un;
+ };
s64 pq_key;
int prio_activity; /* for which prios are we active */
@@ -411,13 +411,13 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
int prio = ffz(~m);
m &= ~(1 << prio);
- if (p->un.inner.clprio[prio].feed.rb_node)
+ if (p->inner.clprio[prio].feed.rb_node)
/* parent already has its feed in use so that
* reset bit in mask as parent is already ok
*/
mask &= ~(1 << prio);
- htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
+ htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
}
p->prio_activity |= mask;
cl = p;
@@ -447,19 +447,19 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
int prio = ffz(~m);
m &= ~(1 << prio);
- if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
+ if (p->inner.clprio[prio].ptr == cl->node + prio) {
/* we are removing child which is pointed to from
* parent feed - forget the pointer but remember
* classid
*/
- p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
- p->un.inner.clprio[prio].ptr = NULL;
+ p->inner.clprio[prio].last_ptr_id = cl->common.classid;
+ p->inner.clprio[prio].ptr = NULL;
}
htb_safe_rb_erase(cl->node + prio,
- &p->un.inner.clprio[prio].feed);
+ &p->inner.clprio[prio].feed);
- if (!p->un.inner.clprio[prio].feed.rb_node)
+ if (!p->inner.clprio[prio].feed.rb_node)
mask |= 1 << prio;
}
@@ -555,7 +555,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
*/
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{
- WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
+ WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
if (!cl->prio_activity) {
cl->prio_activity = 1 << cl->prio;
@@ -577,22 +577,6 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
cl->prio_activity = 0;
}
-static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
- struct qdisc_skb_head *qh)
-{
- struct sk_buff *last = qh->tail;
-
- if (last) {
- skb->next = NULL;
- last->next = skb;
- qh->tail = skb;
- } else {
- qh->tail = skb;
- qh->head = skb;
- }
- qh->qlen++;
-}
-
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
@@ -603,7 +587,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (cl == HTB_DIRECT) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen) {
- htb_enqueue_tail(skb, sch, &q->direct_queue);
+ __qdisc_enqueue_tail(skb, &q->direct_queue);
q->direct_pkts++;
} else {
return qdisc_drop(skb, sch, to_free);
@@ -615,7 +599,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
__qdisc_drop(skb, to_free);
return ret;
#endif
- } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
+ } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
to_free)) != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) {
qdisc_qstats_drop(sch);
@@ -823,7 +807,7 @@ static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
if (!cl->level)
return cl;
- clp = &cl->un.inner.clprio[prio];
+ clp = &cl->inner.clprio[prio];
(++sp)->root = clp->feed.rb_node;
sp->pptr = &clp->ptr;
sp->pid = &clp->last_ptr_id;
@@ -857,7 +841,7 @@ next:
* graft operation on the leaf since last dequeue;
* simply deactivate and skip such class
*/
- if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
+ if (unlikely(cl->leaf.q->q.qlen == 0)) {
struct htb_class *next;
htb_deactivate(q, cl);
@@ -873,12 +857,12 @@ next:
goto next;
}
- skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
+ skb = cl->leaf.q->dequeue(cl->leaf.q);
if (likely(skb != NULL))
break;
- qdisc_warn_nonwc("htb", cl->un.leaf.q);
- htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
+ qdisc_warn_nonwc("htb", cl->leaf.q);
+ htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
&q->hlevel[0].hprio[prio].ptr);
cl = htb_lookup_leaf(hprio, prio);
@@ -886,16 +870,16 @@ next:
if (likely(skb != NULL)) {
bstats_update(&cl->bstats, skb);
- cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
- if (cl->un.leaf.deficit[level] < 0) {
- cl->un.leaf.deficit[level] += cl->quantum;
- htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
+ cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
+ if (cl->leaf.deficit[level] < 0) {
+ cl->leaf.deficit[level] += cl->quantum;
+ htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
&q->hlevel[0].hprio[prio].ptr);
}
/* this used to be after charge_class but this constelation
* gives us slightly better performance
*/
- if (!cl->un.leaf.q->q.qlen)
+ if (!cl->leaf.q->q.qlen)
htb_deactivate(q, cl);
htb_charge_class(q, cl, level, skb);
}
@@ -972,10 +956,10 @@ static void htb_reset(struct Qdisc *sch)
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
if (cl->level)
- memset(&cl->un.inner, 0, sizeof(cl->un.inner));
+ memset(&cl->inner, 0, sizeof(cl->inner));
else {
- if (cl->un.leaf.q)
- qdisc_reset(cl->un.leaf.q);
+ if (cl->leaf.q)
+ qdisc_reset(cl->leaf.q);
}
cl->prio_activity = 0;
cl->cmode = HTB_CAN_SEND;
@@ -1098,8 +1082,8 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
*/
tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
tcm->tcm_handle = cl->common.classid;
- if (!cl->level && cl->un.leaf.q)
- tcm->tcm_info = cl->un.leaf.q->handle;
+ if (!cl->level && cl->leaf.q)
+ tcm->tcm_info = cl->leaf.q->handle;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
@@ -1142,9 +1126,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
};
__u32 qlen = 0;
- if (!cl->level && cl->un.leaf.q) {
- qlen = cl->un.leaf.q->q.qlen;
- qs.backlog = cl->un.leaf.q->qstats.backlog;
+ if (!cl->level && cl->leaf.q) {
+ qlen = cl->leaf.q->q.qlen;
+ qs.backlog = cl->leaf.q->qstats.backlog;
}
cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
INT_MIN, INT_MAX);
@@ -1172,14 +1156,14 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
cl->common.classid, extack)) == NULL)
return -ENOBUFS;
- *old = qdisc_replace(sch, new, &cl->un.leaf.q);
+ *old = qdisc_replace(sch, new, &cl->leaf.q);
return 0;
}
static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
{
struct htb_class *cl = (struct htb_class *)arg;
- return !cl->level ? cl->un.leaf.q : NULL;
+ return !cl->level ? cl->leaf.q : NULL;
}
static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
@@ -1205,15 +1189,15 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
{
struct htb_class *parent = cl->parent;
- WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
+ WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
if (parent->cmode != HTB_CAN_SEND)
htb_safe_rb_erase(&parent->pq_node,
&q->hlevel[parent->level].wait_pq);
parent->level = 0;
- memset(&parent->un.inner, 0, sizeof(parent->un.inner));
- parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
+ memset(&parent->inner, 0, sizeof(parent->inner));
+ parent->leaf.q = new_q ? new_q : &noop_qdisc;
parent->tokens = parent->buffer;
parent->ctokens = parent->cbuffer;
parent->t_c = ktime_get_ns();
@@ -1223,8 +1207,8 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
{
if (!cl->level) {
- WARN_ON(!cl->un.leaf.q);
- qdisc_destroy(cl->un.leaf.q);
+ WARN_ON(!cl->leaf.q);
+ qdisc_put(cl->leaf.q);
}
gen_kill_estimator(&cl->rate_est);
tcf_block_put(cl->block);
@@ -1286,11 +1270,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch);
if (!cl->level) {
- unsigned int qlen = cl->un.leaf.q->q.qlen;
- unsigned int backlog = cl->un.leaf.q->qstats.backlog;
+ unsigned int qlen = cl->leaf.q->q.qlen;
+ unsigned int backlog = cl->leaf.q->qstats.backlog;
- qdisc_reset(cl->un.leaf.q);
- qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
+ qdisc_reset(cl->leaf.q);
+ qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
}
/* delete from hash and active; remainder in destroy_class */
@@ -1419,13 +1403,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
classid, NULL);
sch_tree_lock(sch);
if (parent && !parent->level) {
- unsigned int qlen = parent->un.leaf.q->q.qlen;
- unsigned int backlog = parent->un.leaf.q->qstats.backlog;
+ unsigned int qlen = parent->leaf.q->q.qlen;
+ unsigned int backlog = parent->leaf.q->qstats.backlog;
/* turn parent into inner node */
- qdisc_reset(parent->un.leaf.q);
- qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
- qdisc_destroy(parent->un.leaf.q);
+ qdisc_reset(parent->leaf.q);
+ qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
+ qdisc_put(parent->leaf.q);
if (parent->prio_activity)
htb_deactivate(q, parent);
@@ -1436,10 +1420,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
}
parent->level = (parent->parent ? parent->parent->level
: TC_HTB_MAXDEPTH) - 1;
- memset(&parent->un.inner, 0, sizeof(parent->un.inner));
+ memset(&parent->inner, 0, sizeof(parent->inner));
}
/* leaf (we) needs elementary qdisc */
- cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
+ cl->leaf.q = new_q ? new_q : &noop_qdisc;
cl->common.classid = classid;
cl->parent = parent;
@@ -1455,8 +1439,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
qdisc_class_hash_insert(&q->clhash, &cl->common);
if (parent)
parent->children++;
- if (cl->un.leaf.q != &noop_qdisc)
- qdisc_hash_add(cl->un.leaf.q, true);
+ if (cl->leaf.q != &noop_qdisc)
+ qdisc_hash_add(cl->leaf.q, true);
} else {
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL,
@@ -1478,7 +1462,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
/* it used to be a nasty bug here, we have to check that node
- * is really leaf before changing cl->un.leaf !
+ * is really leaf before changing cl->leaf !
*/
if (!cl->level) {
u64 quantum = cl->rate.rate_bytes_ps;
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index d6b8ae4ed7a3..f20f3a0f8424 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -65,7 +65,7 @@ static void mq_destroy(struct Qdisc *sch)
if (!priv->qdiscs)
return;
for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
- qdisc_destroy(priv->qdiscs[ntx]);
+ qdisc_put(priv->qdiscs[ntx]);
kfree(priv->qdiscs);
}
@@ -119,7 +119,7 @@ static void mq_attach(struct Qdisc *sch)
qdisc = priv->qdiscs[ntx];
old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
if (old)
- qdisc_destroy(old);
+ qdisc_put(old);
#ifdef CONFIG_NET_SCHED
if (ntx < dev->real_num_tx_queues)
qdisc_hash_add(qdisc, false);
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 0e9d761cdd80..d364e63c396d 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -40,7 +40,7 @@ static void mqprio_destroy(struct Qdisc *sch)
for (ntx = 0;
ntx < dev->num_tx_queues && priv->qdiscs[ntx];
ntx++)
- qdisc_destroy(priv->qdiscs[ntx]);
+ qdisc_put(priv->qdiscs[ntx]);
kfree(priv->qdiscs);
}
@@ -300,7 +300,7 @@ static void mqprio_attach(struct Qdisc *sch)
qdisc = priv->qdiscs[ntx];
old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
if (old)
- qdisc_destroy(old);
+ qdisc_put(old);
if (ntx < dev->real_num_tx_queues)
qdisc_hash_add(qdisc, false);
}
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 1da7ea8de0ad..7410ce4d0321 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -175,7 +175,7 @@ multiq_destroy(struct Qdisc *sch)
tcf_block_put(q->block);
for (band = 0; band < q->bands; band++)
- qdisc_destroy(q->queues[band]);
+ qdisc_put(q->queues[band]);
kfree(q->queues);
}
@@ -204,7 +204,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
q->queues[i] = &noop_qdisc;
qdisc_tree_reduce_backlog(child, child->q.qlen,
child->qstats.backlog);
- qdisc_destroy(child);
+ qdisc_put(child);
}
}
@@ -228,7 +228,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
qdisc_tree_reduce_backlog(old,
old->q.qlen,
old->qstats.backlog);
- qdisc_destroy(old);
+ qdisc_put(old);
}
sch_tree_unlock(sch);
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index ad18a2052416..57b3ad9394ad 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -412,16 +412,6 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
return segs;
}
-static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
-{
- skb->next = qh->head;
-
- if (!qh->head)
- qh->tail = skb;
- qh->head = skb;
- qh->qlen++;
-}
-
/*
* Insert one skb into qdisc.
* Note: parent depends on return value to account for queue length.
@@ -570,7 +560,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
cb->time_to_send = ktime_get_ns();
q->counter = 0;
- netem_enqueue_skb_head(&sch->q, skb);
+ __qdisc_enqueue_head(skb, &sch->q);
sch->qstats.requeues++;
}
@@ -578,7 +568,7 @@ finish_segs:
if (segs) {
while (segs) {
skb2 = segs->next;
- segs->next = NULL;
+ skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
last_len = segs->len;
rc = qdisc_enqueue(segs, sch, to_free);
@@ -1032,7 +1022,7 @@ static void netem_destroy(struct Qdisc *sch)
qdisc_watchdog_cancel(&q->watchdog);
if (q->qdisc)
- qdisc_destroy(q->qdisc);
+ qdisc_put(q->qdisc);
dist_free(q->delay_dist);
dist_free(q->slot_dist);
}
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 18d30bb86881..d1429371592f 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -110,8 +110,8 @@ static bool drop_early(struct Qdisc *sch, u32 packet_size)
/* If current delay is less than half of target, and
* if drop prob is low already, disable early_drop
*/
- if ((q->vars.qdelay < q->params.target / 2)
- && (q->vars.prob < MAX_PROB / 5))
+ if ((q->vars.qdelay < q->params.target / 2) &&
+ (q->vars.prob < MAX_PROB / 5))
return false;
/* If we have fewer than 2 mtu-sized packets, disable drop_early,
@@ -209,7 +209,8 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
/* tupdate is in jiffies */
if (tb[TCA_PIE_TUPDATE])
- q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
+ q->params.tupdate =
+ usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
if (tb[TCA_PIE_LIMIT]) {
u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
@@ -247,7 +248,6 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
{
-
struct pie_sched_data *q = qdisc_priv(sch);
int qlen = sch->qstats.backlog; /* current queue size in bytes */
@@ -294,9 +294,9 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
* dq_count to 0 to re-enter the if block when the next
* packet is dequeued
*/
- if (qlen < QUEUE_THRESHOLD)
+ if (qlen < QUEUE_THRESHOLD) {
q->vars.dq_count = DQCOUNT_INVALID;
- else {
+ } else {
q->vars.dq_count = 0;
q->vars.dq_tstamp = psched_get_time();
}
@@ -370,7 +370,7 @@ static void calculate_probability(struct Qdisc *sch)
oldprob = q->vars.prob;
/* to ensure we increase probability in steps of no more than 2% */
- if (delta > (s32) (MAX_PROB / (100 / 2)) &&
+ if (delta > (s32)(MAX_PROB / (100 / 2)) &&
q->vars.prob >= MAX_PROB / 10)
delta = (MAX_PROB / 100) * 2;
@@ -405,7 +405,7 @@ static void calculate_probability(struct Qdisc *sch)
* delay is 0 for 2 consecutive Tupdate periods.
*/
- if ((qdelay == 0) && (qdelay_old == 0) && update_prob)
+ if (qdelay == 0 && qdelay_old == 0 && update_prob)
q->vars.prob = (q->vars.prob * 98) / 100;
q->vars.qdelay = qdelay;
@@ -419,8 +419,8 @@ static void calculate_probability(struct Qdisc *sch)
*/
if ((q->vars.qdelay < q->params.target / 2) &&
(q->vars.qdelay_old < q->params.target / 2) &&
- (q->vars.prob == 0) &&
- (q->vars.avg_dq_rate > 0))
+ q->vars.prob == 0 &&
+ q->vars.avg_dq_rate > 0)
pie_vars_init(&q->vars);
}
@@ -437,7 +437,6 @@ static void pie_timer(struct timer_list *t)
if (q->params.tupdate)
mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
spin_unlock(root_lock);
-
}
static int pie_init(struct Qdisc *sch, struct nlattr *opt,
@@ -469,15 +468,16 @@ static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
struct nlattr *opts;
opts = nla_nest_start(skb, TCA_OPTIONS);
- if (opts == NULL)
+ if (!opts)
goto nla_put_failure;
/* convert target from pschedtime to us */
if (nla_put_u32(skb, TCA_PIE_TARGET,
- ((u32) PSCHED_TICKS2NS(q->params.target)) /
+ ((u32)PSCHED_TICKS2NS(q->params.target)) /
NSEC_PER_USEC) ||
nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
- nla_put_u32(skb, TCA_PIE_TUPDATE, jiffies_to_usecs(q->params.tupdate)) ||
+ nla_put_u32(skb, TCA_PIE_TUPDATE,
+ jiffies_to_usecs(q->params.tupdate)) ||
nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
@@ -489,7 +489,6 @@ static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_failure:
nla_nest_cancel(skb, opts);
return -1;
-
}
static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
@@ -497,7 +496,7 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
struct pie_sched_data *q = qdisc_priv(sch);
struct tc_pie_xstats st = {
.prob = q->vars.prob,
- .delay = ((u32) PSCHED_TICKS2NS(q->vars.qdelay)) /
+ .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
NSEC_PER_USEC,
/* unscale and return dq_rate in bytes per sec */
.avg_dq_rate = q->vars.avg_dq_rate *
@@ -514,8 +513,7 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
{
- struct sk_buff *skb;
- skb = qdisc_dequeue_head(sch);
+ struct sk_buff *skb = qdisc_dequeue_head(sch);
if (!skb)
return NULL;
@@ -527,6 +525,7 @@ static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
static void pie_reset(struct Qdisc *sch)
{
struct pie_sched_data *q = qdisc_priv(sch);
+
qdisc_reset_queue(sch);
pie_vars_init(&q->vars);
}
@@ -534,6 +533,7 @@ static void pie_reset(struct Qdisc *sch)
static void pie_destroy(struct Qdisc *sch)
{
struct pie_sched_data *q = qdisc_priv(sch);
+
q->params.tupdate = 0;
del_timer_sync(&q->adapt_timer);
}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 222e53d3d27a..f8af98621179 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -175,7 +175,7 @@ prio_destroy(struct Qdisc *sch)
tcf_block_put(q->block);
prio_offload(sch, NULL);
for (prio = 0; prio < q->bands; prio++)
- qdisc_destroy(q->queues[prio]);
+ qdisc_put(q->queues[prio]);
}
static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
@@ -205,7 +205,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
extack);
if (!queues[i]) {
while (i > oldbands)
- qdisc_destroy(queues[--i]);
+ qdisc_put(queues[--i]);
return -ENOMEM;
}
}
@@ -220,7 +220,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
qdisc_tree_reduce_backlog(child, child->q.qlen,
child->qstats.backlog);
- qdisc_destroy(child);
+ qdisc_put(child);
}
for (i = oldbands; i < q->bands; i++) {
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index bb1a9c11fc54..dc37c4ead439 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -526,7 +526,7 @@ set_change_agg:
return 0;
destroy_class:
- qdisc_destroy(cl->qdisc);
+ qdisc_put(cl->qdisc);
kfree(cl);
return err;
}
@@ -537,7 +537,7 @@ static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
qfq_rm_from_agg(q, cl);
gen_kill_estimator(&cl->rate_est);
- qdisc_destroy(cl->qdisc);
+ qdisc_put(cl->qdisc);
kfree(cl);
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 56c181c3feeb..3ce6c0a2c493 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -181,7 +181,7 @@ static void red_destroy(struct Qdisc *sch)
del_timer_sync(&q->adapt_timer);
red_offload(sch, false);
- qdisc_destroy(q->qdisc);
+ qdisc_put(q->qdisc);
}
static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
@@ -233,7 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
if (child) {
qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
q->qdisc->qstats.backlog);
- qdisc_destroy(q->qdisc);
+ qdisc_put(q->qdisc);
q->qdisc = child;
}
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 7cbdad8419b7..bab506b01a32 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -469,7 +469,7 @@ static void sfb_destroy(struct Qdisc *sch)
struct sfb_sched_data *q = qdisc_priv(sch);
tcf_block_put(q->block);
- qdisc_destroy(q->qdisc);
+ qdisc_put(q->qdisc);
}
static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
@@ -523,7 +523,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
q->qdisc->qstats.backlog);
- qdisc_destroy(q->qdisc);
+ qdisc_put(q->qdisc);
q->qdisc = child;
q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
new file mode 100644
index 000000000000..206e4dbed12f
--- /dev/null
+++ b/net/sched/sch_taprio.c
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* net/sched/sch_taprio.c Time Aware Priority Scheduler
+ *
+ * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
+#include <net/sch_generic.h>
+
+#define TAPRIO_ALL_GATES_OPEN -1
+
+struct sched_entry {
+ struct list_head list;
+
+ /* The instant that this entry "closes" and the next one
+ * should open, the qdisc will make some effort so that no
+ * packet leaves after this time.
+ */
+ ktime_t close_time;
+ atomic_t budget;
+ int index;
+ u32 gate_mask;
+ u32 interval;
+ u8 command;
+};
+
+struct taprio_sched {
+ struct Qdisc **qdiscs;
+ struct Qdisc *root;
+ s64 base_time;
+ int clockid;
+ int picos_per_byte; /* Using picoseconds because for 10Gbps+
+ * speeds it's sub-nanoseconds per byte
+ */
+ size_t num_entries;
+
+ /* Protects the update side of the RCU protected current_entry */
+ spinlock_t current_entry_lock;
+ struct sched_entry __rcu *current_entry;
+ struct list_head entries;
+ ktime_t (*get_time)(void);
+ struct hrtimer advance_timer;
+};
+
+static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct Qdisc *child;
+ int queue;
+
+ queue = skb_get_queue_mapping(skb);
+
+ child = q->qdiscs[queue];
+ if (unlikely(!child))
+ return qdisc_drop(skb, sch, to_free);
+
+ qdisc_qstats_backlog_inc(sch, skb);
+ sch->q.qlen++;
+
+ return qdisc_enqueue(skb, child, to_free);
+}
+
+static struct sk_buff *taprio_peek(struct Qdisc *sch)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ struct sched_entry *entry;
+ struct sk_buff *skb;
+ u32 gate_mask;
+ int i;
+
+ rcu_read_lock();
+ entry = rcu_dereference(q->current_entry);
+ gate_mask = entry ? entry->gate_mask : -1;
+ rcu_read_unlock();
+
+ if (!gate_mask)
+ return NULL;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct Qdisc *child = q->qdiscs[i];
+ int prio;
+ u8 tc;
+
+ if (unlikely(!child))
+ continue;
+
+ skb = child->ops->peek(child);
+ if (!skb)
+ continue;
+
+ prio = skb->priority;
+ tc = netdev_get_prio_tc_map(dev, prio);
+
+ if (!(gate_mask & BIT(tc)))
+ return NULL;
+
+ return skb;
+ }
+
+ return NULL;
+}
+
+static inline int length_to_duration(struct taprio_sched *q, int len)
+{
+ return (len * q->picos_per_byte) / 1000;
+}
+
+static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ struct sched_entry *entry;
+ struct sk_buff *skb;
+ u32 gate_mask;
+ int i;
+
+ rcu_read_lock();
+ entry = rcu_dereference(q->current_entry);
+ /* if there's no entry, it means that the schedule didn't
+ * start yet, so force all gates to be open, this is in
+ * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
+ * "AdminGateSates"
+ */
+ gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
+ rcu_read_unlock();
+
+ if (!gate_mask)
+ return NULL;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct Qdisc *child = q->qdiscs[i];
+ ktime_t guard;
+ int prio;
+ int len;
+ u8 tc;
+
+ if (unlikely(!child))
+ continue;
+
+ skb = child->ops->peek(child);
+ if (!skb)
+ continue;
+
+ prio = skb->priority;
+ tc = netdev_get_prio_tc_map(dev, prio);
+
+ if (!(gate_mask & BIT(tc)))
+ continue;
+
+ len = qdisc_pkt_len(skb);
+ guard = ktime_add_ns(q->get_time(),
+ length_to_duration(q, len));
+
+ /* In the case that there's no gate entry, there's no
+ * guard band ...
+ */
+ if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
+ ktime_after(guard, entry->close_time))
+ return NULL;
+
+ /* ... and no budget. */
+ if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
+ atomic_sub_return(len, &entry->budget) < 0)
+ return NULL;
+
+ skb = child->ops->dequeue(child);
+ if (unlikely(!skb))
+ return NULL;
+
+ qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
+ sch->q.qlen--;
+
+ return skb;
+ }
+
+ return NULL;
+}
+
+static bool should_restart_cycle(const struct taprio_sched *q,
+ const struct sched_entry *entry)
+{
+ WARN_ON(!entry);
+
+ return list_is_last(&entry->list, &q->entries);
+}
+
+static enum hrtimer_restart advance_sched(struct hrtimer *timer)
+{
+ struct taprio_sched *q = container_of(timer, struct taprio_sched,
+ advance_timer);
+ struct sched_entry *entry, *next;
+ struct Qdisc *sch = q->root;
+ ktime_t close_time;
+
+ spin_lock(&q->current_entry_lock);
+ entry = rcu_dereference_protected(q->current_entry,
+ lockdep_is_held(&q->current_entry_lock));
+
+ /* This is the case that it's the first time that the schedule
+ * runs, so it only happens once per schedule. The first entry
+ * is pre-calculated during the schedule initialization.
+ */
+ if (unlikely(!entry)) {
+ next = list_first_entry(&q->entries, struct sched_entry,
+ list);
+ close_time = next->close_time;
+ goto first_run;
+ }
+
+ if (should_restart_cycle(q, entry))
+ next = list_first_entry(&q->entries, struct sched_entry,
+ list);
+ else
+ next = list_next_entry(entry, list);
+
+ close_time = ktime_add_ns(entry->close_time, next->interval);
+
+ next->close_time = close_time;
+ atomic_set(&next->budget,
+ (next->interval * 1000) / q->picos_per_byte);
+
+first_run:
+ rcu_assign_pointer(q->current_entry, next);
+ spin_unlock(&q->current_entry_lock);
+
+ hrtimer_set_expires(&q->advance_timer, close_time);
+
+ rcu_read_lock();
+ __netif_schedule(sch);
+ rcu_read_unlock();
+
+ return HRTIMER_RESTART;
+}
+
+static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
+ [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
+ [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
+ [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
+ [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy entry_list_policy[TCA_TAPRIO_SCHED_MAX + 1] = {
+ [TCA_TAPRIO_SCHED_ENTRY] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
+ [TCA_TAPRIO_ATTR_PRIOMAP] = {
+ .len = sizeof(struct tc_mqprio_qopt)
+ },
+ [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
+ [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
+ [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
+ [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
+};
+
+static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
+ struct netlink_ext_ack *extack)
+{
+ u32 interval = 0;
+
+ if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
+ entry->command = nla_get_u8(
+ tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
+
+ if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
+ entry->gate_mask = nla_get_u32(
+ tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
+
+ if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
+ interval = nla_get_u32(
+ tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
+
+ if (interval == 0) {
+ NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
+ return -EINVAL;
+ }
+
+ entry->interval = interval;
+
+ return 0;
+}
+
+static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
+ int index, struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
+ int err;
+
+ err = nla_parse_nested(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
+ entry_policy, NULL);
+ if (err < 0) {
+ NL_SET_ERR_MSG(extack, "Could not parse nested entry");
+ return -EINVAL;
+ }
+
+ entry->index = index;
+
+ return fill_sched_entry(tb, entry, extack);
+}
+
+/* Returns the number of entries in case of success */
+static int parse_sched_single_entry(struct nlattr *n,
+ struct taprio_sched *q,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb_entry[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
+ struct nlattr *tb_list[TCA_TAPRIO_SCHED_MAX + 1] = { };
+ struct sched_entry *entry;
+ bool found = false;
+ u32 index;
+ int err;
+
+ err = nla_parse_nested(tb_list, TCA_TAPRIO_SCHED_MAX,
+ n, entry_list_policy, NULL);
+ if (err < 0) {
+ NL_SET_ERR_MSG(extack, "Could not parse nested entry");
+ return -EINVAL;
+ }
+
+ if (!tb_list[TCA_TAPRIO_SCHED_ENTRY]) {
+ NL_SET_ERR_MSG(extack, "Single-entry must include an entry");
+ return -EINVAL;
+ }
+
+ err = nla_parse_nested(tb_entry, TCA_TAPRIO_SCHED_ENTRY_MAX,
+ tb_list[TCA_TAPRIO_SCHED_ENTRY],
+ entry_policy, NULL);
+ if (err < 0) {
+ NL_SET_ERR_MSG(extack, "Could not parse nested entry");
+ return -EINVAL;
+ }
+
+ if (!tb_entry[TCA_TAPRIO_SCHED_ENTRY_INDEX]) {
+ NL_SET_ERR_MSG(extack, "Entry must specify an index\n");
+ return -EINVAL;
+ }
+
+ index = nla_get_u32(tb_entry[TCA_TAPRIO_SCHED_ENTRY_INDEX]);
+ if (index >= q->num_entries) {
+ NL_SET_ERR_MSG(extack, "Index for single entry exceeds number of entries in schedule");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(entry, &q->entries, list) {
+ if (entry->index == index) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ NL_SET_ERR_MSG(extack, "Could not find entry");
+ return -ENOENT;
+ }
+
+ err = fill_sched_entry(tb_entry, entry, extack);
+ if (err < 0)
+ return err;
+
+ return q->num_entries;
+}
+
+static int parse_sched_list(struct nlattr *list,
+ struct taprio_sched *q,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *n;
+ int err, rem;
+ int i = 0;
+
+ if (!list)
+ return -EINVAL;
+
+ nla_for_each_nested(n, list, rem) {
+ struct sched_entry *entry;
+
+ if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
+ NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
+ continue;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ NL_SET_ERR_MSG(extack, "Not enough memory for entry");
+ return -ENOMEM;
+ }
+
+ err = parse_sched_entry(n, entry, i, extack);
+ if (err < 0) {
+ kfree(entry);
+ return err;
+ }
+
+ list_add_tail(&entry->list, &q->entries);
+ i++;
+ }
+
+ q->num_entries = i;
+
+ return i;
+}
+
+/* Returns the number of entries in case of success */
+static int parse_taprio_opt(struct nlattr **tb, struct taprio_sched *q,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+ int clockid;
+
+ if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] &&
+ tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY])
+ return -EINVAL;
+
+ if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] && q->num_entries == 0)
+ return -EINVAL;
+
+ if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID])
+ return -EINVAL;
+
+ if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
+ q->base_time = nla_get_s64(
+ tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
+
+ if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
+ clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
+
+ /* We only support static clockids and we don't allow
+ * for it to be modified after the first init.
+ */
+ if (clockid < 0 || (q->clockid != -1 && q->clockid != clockid))
+ return -EINVAL;
+
+ q->clockid = clockid;
+ }
+
+ if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
+ err = parse_sched_list(
+ tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], q, extack);
+ else if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY])
+ err = parse_sched_single_entry(
+ tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY], q, extack);
+
+ /* parse_sched_* return the number of entries in the schedule,
+ * a schedule with zero entries is an error.
+ */
+ if (err == 0) {
+ NL_SET_ERR_MSG(extack, "The schedule should contain at least one entry");
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int taprio_parse_mqprio_opt(struct net_device *dev,
+ struct tc_mqprio_qopt *qopt,
+ struct netlink_ext_ack *extack)
+{
+ int i, j;
+
+ if (!qopt) {
+ NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
+ return -EINVAL;
+ }
+
+ /* Verify num_tc is not out of max range */
+ if (qopt->num_tc > TC_MAX_QUEUE) {
+ NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
+ return -EINVAL;
+ }
+
+ /* taprio imposes that traffic classes map 1:n to tx queues */
+ if (qopt->num_tc > dev->num_tx_queues) {
+ NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
+ return -EINVAL;
+ }
+
+ /* Verify priority mapping uses valid tcs */
+ for (i = 0; i < TC_BITMASK + 1; i++) {
+ if (qopt->prio_tc_map[i] >= qopt->num_tc) {
+ NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < qopt->num_tc; i++) {
+ unsigned int last = qopt->offset[i] + qopt->count[i];
+
+ /* Verify the queue count is in tx range being equal to the
+ * real_num_tx_queues indicates the last queue is in use.
+ */
+ if (qopt->offset[i] >= dev->num_tx_queues ||
+ !qopt->count[i] ||
+ last > dev->real_num_tx_queues) {
+ NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
+ return -EINVAL;
+ }
+
+ /* Verify that the offset and counts do not overlap */
+ for (j = i + 1; j < qopt->num_tc; j++) {
+ if (last > qopt->offset[j]) {
+ NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static ktime_t taprio_get_start_time(struct Qdisc *sch)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct sched_entry *entry;
+ ktime_t now, base, cycle;
+ s64 n;
+
+ base = ns_to_ktime(q->base_time);
+ cycle = 0;
+
+ /* Calculate the cycle_time, by summing all the intervals.
+ */
+ list_for_each_entry(entry, &q->entries, list)
+ cycle = ktime_add_ns(cycle, entry->interval);
+
+ if (!cycle)
+ return base;
+
+ now = q->get_time();
+
+ if (ktime_after(base, now))
+ return base;
+
+ /* Schedule the start time for the beginning of the next
+ * cycle.
+ */
+ n = div64_s64(ktime_sub_ns(now, base), cycle);
+
+ return ktime_add_ns(base, (n + 1) * cycle);
+}
+
+static void taprio_start_sched(struct Qdisc *sch, ktime_t start)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct sched_entry *first;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->current_entry_lock, flags);
+
+ first = list_first_entry(&q->entries, struct sched_entry,
+ list);
+
+ first->close_time = ktime_add_ns(start, first->interval);
+ atomic_set(&first->budget,
+ (first->interval * 1000) / q->picos_per_byte);
+ rcu_assign_pointer(q->current_entry, NULL);
+
+ spin_unlock_irqrestore(&q->current_entry_lock, flags);
+
+ hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
+}
+
+static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ struct tc_mqprio_qopt *mqprio = NULL;
+ struct ethtool_link_ksettings ecmd;
+ int i, err, size;
+ s64 link_speed;
+ ktime_t start;
+
+ err = nla_parse_nested(tb, TCA_TAPRIO_ATTR_MAX, opt,
+ taprio_policy, extack);
+ if (err < 0)
+ return err;
+
+ err = -EINVAL;
+ if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
+ mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
+
+ err = taprio_parse_mqprio_opt(dev, mqprio, extack);
+ if (err < 0)
+ return err;
+
+ /* A schedule with less than one entry is an error */
+ size = parse_taprio_opt(tb, q, extack);
+ if (size < 0)
+ return size;
+
+ hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
+ q->advance_timer.function = advance_sched;
+
+ switch (q->clockid) {
+ case CLOCK_REALTIME:
+ q->get_time = ktime_get_real;
+ break;
+ case CLOCK_MONOTONIC:
+ q->get_time = ktime_get;
+ break;
+ case CLOCK_BOOTTIME:
+ q->get_time = ktime_get_boottime;
+ break;
+ case CLOCK_TAI:
+ q->get_time = ktime_get_clocktai;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *dev_queue;
+ struct Qdisc *qdisc;
+
+ dev_queue = netdev_get_tx_queue(dev, i);
+ qdisc = qdisc_create_dflt(dev_queue,
+ &pfifo_qdisc_ops,
+ TC_H_MAKE(TC_H_MAJ(sch->handle),
+ TC_H_MIN(i + 1)),
+ extack);
+ if (!qdisc)
+ return -ENOMEM;
+
+ if (i < dev->real_num_tx_queues)
+ qdisc_hash_add(qdisc, false);
+
+ q->qdiscs[i] = qdisc;
+ }
+
+ if (mqprio) {
+ netdev_set_num_tc(dev, mqprio->num_tc);
+ for (i = 0; i < mqprio->num_tc; i++)
+ netdev_set_tc_queue(dev, i,
+ mqprio->count[i],
+ mqprio->offset[i]);
+
+ /* Always use supplied priority mappings */
+ for (i = 0; i < TC_BITMASK + 1; i++)
+ netdev_set_prio_tc_map(dev, i,
+ mqprio->prio_tc_map[i]);
+ }
+
+ if (!__ethtool_get_link_ksettings(dev, &ecmd))
+ link_speed = ecmd.base.speed;
+ else
+ link_speed = SPEED_1000;
+
+ q->picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
+ link_speed * 1000 * 1000);
+
+ start = taprio_get_start_time(sch);
+ if (!start)
+ return 0;
+
+ taprio_start_sched(sch, start);
+
+ return 0;
+}
+
+static void taprio_destroy(struct Qdisc *sch)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ struct sched_entry *entry, *n;
+ unsigned int i;
+
+ hrtimer_cancel(&q->advance_timer);
+
+ if (q->qdiscs) {
+ for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
+ qdisc_put(q->qdiscs[i]);
+
+ kfree(q->qdiscs);
+ }
+ q->qdiscs = NULL;
+
+ netdev_set_num_tc(dev, 0);
+
+ list_for_each_entry_safe(entry, n, &q->entries, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+
+ INIT_LIST_HEAD(&q->entries);
+ spin_lock_init(&q->current_entry_lock);
+
+ /* We may overwrite the configuration later */
+ hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
+
+ q->root = sch;
+
+ /* We only support static clockids. Use an invalid value as default
+ * and get the valid one on taprio_change().
+ */
+ q->clockid = -1;
+
+ if (sch->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ if (!netif_is_multiqueue(dev))
+ return -EOPNOTSUPP;
+
+ /* pre-allocate qdisc, attachment can't fail */
+ q->qdiscs = kcalloc(dev->num_tx_queues,
+ sizeof(q->qdiscs[0]),
+ GFP_KERNEL);
+
+ if (!q->qdiscs)
+ return -ENOMEM;
+
+ if (!opt)
+ return -EINVAL;
+
+ return taprio_change(sch, opt, extack);
+}
+
+static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
+ unsigned long cl)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned long ntx = cl - 1;
+
+ if (ntx >= dev->num_tx_queues)
+ return NULL;
+
+ return netdev_get_tx_queue(dev, ntx);
+}
+
+static int taprio_graft(struct Qdisc *sch, unsigned long cl,
+ struct Qdisc *new, struct Qdisc **old,
+ struct netlink_ext_ack *extack)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
+
+ if (!dev_queue)
+ return -EINVAL;
+
+ if (dev->flags & IFF_UP)
+ dev_deactivate(dev);
+
+ *old = q->qdiscs[cl - 1];
+ q->qdiscs[cl - 1] = new;
+
+ if (new)
+ new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+
+ if (dev->flags & IFF_UP)
+ dev_activate(dev);
+
+ return 0;
+}
+
+static int dump_entry(struct sk_buff *msg,
+ const struct sched_entry *entry)
+{
+ struct nlattr *item;
+
+ item = nla_nest_start(msg, TCA_TAPRIO_SCHED_ENTRY);
+ if (!item)
+ return -ENOSPC;
+
+ if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
+ entry->gate_mask))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
+ entry->interval))
+ goto nla_put_failure;
+
+ return nla_nest_end(msg, item);
+
+nla_put_failure:
+ nla_nest_cancel(msg, item);
+ return -1;
+}
+
+static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct taprio_sched *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ struct tc_mqprio_qopt opt = { 0 };
+ struct nlattr *nest, *entry_list;
+ struct sched_entry *entry;
+ unsigned int i;
+
+ opt.num_tc = netdev_get_num_tc(dev);
+ memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
+
+ for (i = 0; i < netdev_get_num_tc(dev); i++) {
+ opt.count[i] = dev->tc_to_txq[i].count;
+ opt.offset[i] = dev->tc_to_txq[i].offset;
+ }
+
+ nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (!nest)
+ return -ENOSPC;
+
+ if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
+ goto options_error;
+
+ if (nla_put_s64(skb, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
+ q->base_time, TCA_TAPRIO_PAD))
+ goto options_error;
+
+ if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
+ goto options_error;
+
+ entry_list = nla_nest_start(skb, TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
+ if (!entry_list)
+ goto options_error;
+
+ list_for_each_entry(entry, &q->entries, list) {
+ if (dump_entry(skb, entry) < 0)
+ goto options_error;
+ }
+
+ nla_nest_end(skb, entry_list);
+
+ return nla_nest_end(skb, nest);
+
+options_error:
+ nla_nest_cancel(skb, nest);
+ return -1;
+}
+
+static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
+{
+ struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
+
+ if (!dev_queue)
+ return NULL;
+
+ return dev_queue->qdisc_sleeping;
+}
+
+static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
+{
+ unsigned int ntx = TC_H_MIN(classid);
+
+ if (!taprio_queue_get(sch, ntx))
+ return 0;
+ return ntx;
+}
+
+static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
+
+ tcm->tcm_parent = TC_H_ROOT;
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+
+ return 0;
+}
+
+static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ struct gnet_dump *d)
+ __releases(d->lock)
+ __acquires(d->lock)
+{
+ struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
+
+ sch = dev_queue->qdisc_sleeping;
+ if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
+ gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
+ return -1;
+ return 0;
+}
+
+static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned long ntx;
+
+ if (arg->stop)
+ return;
+
+ arg->count = arg->skip;
+ for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
+ if (arg->fn(sch, ntx + 1, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+}
+
+static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
+ struct tcmsg *tcm)
+{
+ return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
+}
+
+static const struct Qdisc_class_ops taprio_class_ops = {
+ .graft = taprio_graft,
+ .leaf = taprio_leaf,
+ .find = taprio_find,
+ .walk = taprio_walk,
+ .dump = taprio_dump_class,
+ .dump_stats = taprio_dump_class_stats,
+ .select_queue = taprio_select_queue,
+};
+
+static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
+ .cl_ops = &taprio_class_ops,
+ .id = "taprio",
+ .priv_size = sizeof(struct taprio_sched),
+ .init = taprio_init,
+ .destroy = taprio_destroy,
+ .peek = taprio_peek,
+ .dequeue = taprio_dequeue,
+ .enqueue = taprio_enqueue,
+ .dump = taprio_dump,
+ .owner = THIS_MODULE,
+};
+
+static int __init taprio_module_init(void)
+{
+ return register_qdisc(&taprio_qdisc_ops);
+}
+
+static void __exit taprio_module_exit(void)
+{
+ unregister_qdisc(&taprio_qdisc_ops);
+}
+
+module_init(taprio_module_init);
+module_exit(taprio_module_exit);
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 6f74a426f159..942dcca09cf2 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -162,7 +162,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
nb = 0;
while (segs) {
nskb = segs->next;
- segs->next = NULL;
+ skb_mark_not_on_list(segs);
qdisc_skb_cb(segs)->pkt_len = segs->len;
len += segs->len;
ret = qdisc_enqueue(segs, q->qdisc, to_free);
@@ -392,7 +392,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
if (child) {
qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
q->qdisc->qstats.backlog);
- qdisc_destroy(q->qdisc);
+ qdisc_put(q->qdisc);
q->qdisc = child;
}
q->limit = qopt->limit;
@@ -438,7 +438,7 @@ static void tbf_destroy(struct Qdisc *sch)
struct tbf_sched_data *q = qdisc_priv(sch);
qdisc_watchdog_cancel(&q->watchdog);
- qdisc_destroy(q->qdisc);
+ qdisc_put(q->qdisc);
}
static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 297d9cf960b9..a827a1f562bf 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1450,7 +1450,8 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
/* Get the lowest pmtu of all the transports. */
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
if (t->pmtu_pending && t->dst) {
- sctp_transport_update_pmtu(t, sctp_dst_mtu(t->dst));
+ sctp_transport_update_pmtu(t,
+ atomic_read(&t->mtu_info));
t->pmtu_pending = 0;
}
if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 9bbc5f92c941..5c36a99882ed 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -395,6 +395,7 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
return;
if (sock_owned_by_user(sk)) {
+ atomic_set(&t->mtu_info, pmtu);
asoc->pmtu_pending = 1;
t->pmtu_pending = 1;
return;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 7f849b01ec8e..67939ad99c01 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -120,6 +120,12 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
sctp_assoc_sync_pmtu(asoc);
}
+ if (asoc->pmtu_pending) {
+ if (asoc->param_flags & SPP_PMTUD_ENABLE)
+ sctp_assoc_sync_pmtu(asoc);
+ asoc->pmtu_pending = 0;
+ }
+
/* If there a is a prepend chunk stick it on the list before
* any other chunks get appended.
*/
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index d74d00b29942..9cb854b05342 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -385,9 +385,7 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
asoc->outqueue.outstanding_bytes -= sctp_data_size(chk);
}
- msg_len -= SCTP_DATA_SNDSIZE(chk) +
- sizeof(struct sk_buff) +
- sizeof(struct sctp_chunk);
+ msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
if (msg_len <= 0)
break;
}
@@ -421,9 +419,7 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
streamout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
}
- msg_len -= SCTP_DATA_SNDSIZE(chk) +
- sizeof(struct sk_buff) +
- sizeof(struct sctp_chunk);
+ msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
sctp_chunk_free(chk);
if (msg_len <= 0)
break;
@@ -1048,7 +1044,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
if (!ctx->packet || !ctx->packet->has_cookie_echo)
return;
- /* fallthru */
+ /* fall through */
case SCTP_STATE_ESTABLISHED:
case SCTP_STATE_SHUTDOWN_PENDING:
case SCTP_STATE_SHUTDOWN_RECEIVED:
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f73e9d38d5ba..fc0386e8ff23 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -83,7 +83,7 @@
#include <net/sctp/stream_sched.h>
/* Forward declarations for internal helper functions. */
-static int sctp_writeable(struct sock *sk);
+static bool sctp_writeable(struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len);
@@ -119,25 +119,10 @@ static void sctp_enter_memory_pressure(struct sock *sk)
/* Get the sndbuf space available at the time on the association. */
static inline int sctp_wspace(struct sctp_association *asoc)
{
- int amt;
+ struct sock *sk = asoc->base.sk;
- if (asoc->ep->sndbuf_policy)
- amt = asoc->sndbuf_used;
- else
- amt = sk_wmem_alloc_get(asoc->base.sk);
-
- if (amt >= asoc->base.sk->sk_sndbuf) {
- if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK)
- amt = 0;
- else {
- amt = sk_stream_wspace(asoc->base.sk);
- if (amt < 0)
- amt = 0;
- }
- } else {
- amt = asoc->base.sk->sk_sndbuf - amt;
- }
- return amt;
+ return asoc->ep->sndbuf_policy ? sk->sk_sndbuf - asoc->sndbuf_used
+ : sk_stream_wspace(sk);
}
/* Increment the used sndbuf space count of the corresponding association by
@@ -166,12 +151,9 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
/* Save the chunk pointer in skb for sctp_wfree to use later. */
skb_shinfo(chunk->skb)->destructor_arg = chunk;
- asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) +
- sizeof(struct sk_buff) +
- sizeof(struct sctp_chunk);
-
refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
- sk->sk_wmem_queued += chunk->skb->truesize;
+ asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk);
+ sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk);
sk_mem_charge(sk, chunk->skb->truesize);
}
@@ -271,11 +253,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
spin_lock_bh(&sctp_assocs_id_lock);
asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
+ if (asoc && (asoc->base.sk != sk || asoc->base.dead))
+ asoc = NULL;
spin_unlock_bh(&sctp_assocs_id_lock);
- if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
- return NULL;
-
return asoc;
}
@@ -1928,10 +1909,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
asoc->pmtu_pending = 0;
}
- if (sctp_wspace(asoc) < msg_len)
+ if (sctp_wspace(asoc) < (int)msg_len)
sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
- if (!sctp_wspace(asoc)) {
+ if (sctp_wspace(asoc) <= 0) {
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
if (err)
@@ -1946,8 +1927,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
if (sp->strm_interleave) {
timeo = sock_sndtimeo(sk, 0);
err = sctp_wait_for_connect(asoc, &timeo);
- if (err)
+ if (err) {
+ err = -ESRCH;
goto err;
+ }
} else {
wait_connect = true;
}
@@ -7100,14 +7083,14 @@ static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
}
policy = params.sprstat_policy;
- if (policy & ~SCTP_PR_SCTP_MASK)
+ if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
if (!asoc)
goto out;
- if (policy == SCTP_PR_SCTP_NONE) {
+ if (policy & SCTP_PR_SCTP_ALL) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
@@ -7159,7 +7142,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
}
policy = params.sprstat_policy;
- if (policy & ~SCTP_PR_SCTP_MASK)
+ if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
@@ -7175,7 +7158,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
goto out;
}
- if (policy == SCTP_PR_SCTP_NONE) {
+ if (policy == SCTP_PR_SCTP_ALL) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
@@ -8460,17 +8443,11 @@ static void sctp_wfree(struct sk_buff *skb)
struct sctp_association *asoc = chunk->asoc;
struct sock *sk = asoc->base.sk;
- asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
- sizeof(struct sk_buff) +
- sizeof(struct sctp_chunk);
-
- WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc));
-
- /*
- * This undoes what is done via sctp_set_owner_w and sk_mem_charge
- */
- sk->sk_wmem_queued -= skb->truesize;
sk_mem_uncharge(sk, skb->truesize);
+ sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk);
+ asoc->sndbuf_used -= skb->truesize + sizeof(struct sctp_chunk);
+ WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk),
+ &sk->sk_wmem_alloc));
if (chunk->shkey) {
struct sctp_shared_key *shkey = chunk->shkey;
@@ -8544,7 +8521,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
goto do_error;
if (signal_pending(current))
goto do_interrupted;
- if (msg_len <= sctp_wspace(asoc))
+ if ((int)msg_len <= sctp_wspace(asoc))
break;
/* Let another process have a go. Since we are going
@@ -8619,14 +8596,9 @@ void sctp_write_space(struct sock *sk)
* UDP-style sockets or TCP-style sockets, this code should work.
* - Daisy
*/
-static int sctp_writeable(struct sock *sk)
+static bool sctp_writeable(struct sock *sk)
{
- int amt = 0;
-
- amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
- if (amt < 0)
- amt = 0;
- return amt;
+ return sk->sk_sndbuf > sk->sk_wmem_queued;
}
/* Wait for an association to go into ESTABLISHED state. If timeout is 0,
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 12cac85da994..033696e6f74f 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -260,6 +260,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
struct dst_entry *dst = sctp_transport_dst_check(t);
+ struct sock *sk = t->asoc->base.sk;
bool change = true;
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
@@ -271,12 +272,19 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
pmtu = SCTP_TRUNC4(pmtu);
if (dst) {
- dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
+ struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
+ union sctp_addr addr;
+
+ pf->af->from_sk(&addr, sk);
+ pf->to_sk_daddr(&t->ipaddr, sk);
+ dst->ops->update_pmtu(dst, sk, NULL, pmtu);
+ pf->to_sk_daddr(&addr, sk);
+
dst = sctp_transport_dst_check(t);
}
if (!dst) {
- t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
+ t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
dst = t->dst;
}
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 0b427100b0d4..331cc734e3db 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -459,7 +459,7 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ul
* element in the queue, then count it towards
* possible PD.
*/
- if (pos == ulpq->reasm.next) {
+ if (skb_queue_is_first(&ulpq->reasm, pos)) {
pd_first = pos;
pd_last = pos;
pd_len = pos->len;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 2d8a1e15e4f9..80e2119f1c70 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -742,7 +742,10 @@ static void smc_connect_work(struct work_struct *work)
smc->sk.sk_err = -rc;
out:
- smc->sk.sk_state_change(&smc->sk);
+ if (smc->sk.sk_err)
+ smc->sk.sk_state_change(&smc->sk);
+ else
+ smc->sk.sk_write_space(&smc->sk);
kfree(smc->connect_info);
smc->connect_info = NULL;
release_sock(&smc->sk);
@@ -1150,9 +1153,9 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
}
/* listen worker: finish RDMA setup */
-static void smc_listen_rdma_finish(struct smc_sock *new_smc,
- struct smc_clc_msg_accept_confirm *cclc,
- int local_contact)
+static int smc_listen_rdma_finish(struct smc_sock *new_smc,
+ struct smc_clc_msg_accept_confirm *cclc,
+ int local_contact)
{
struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
int reason_code = 0;
@@ -1175,11 +1178,12 @@ static void smc_listen_rdma_finish(struct smc_sock *new_smc,
if (reason_code)
goto decline;
}
- return;
+ return 0;
decline:
mutex_unlock(&smc_create_lgr_pending);
smc_listen_decline(new_smc, reason_code, local_contact);
+ return reason_code;
}
/* setup for RDMA connection of server */
@@ -1276,8 +1280,10 @@ static void smc_listen_work(struct work_struct *work)
}
/* finish worker */
- if (!ism_supported)
- smc_listen_rdma_finish(new_smc, &cclc, local_contact);
+ if (!ism_supported) {
+ if (smc_listen_rdma_finish(new_smc, &cclc, local_contact))
+ return;
+ }
smc_conn_save_peer_info(new_smc, &cclc);
mutex_unlock(&smc_create_lgr_pending);
smc_listen_out_connected(new_smc);
@@ -1529,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
return EPOLLNVAL;
smc = smc_sk(sock->sk);
- if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
+ if (smc->use_fallback) {
/* delegate to CLC child sock */
mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
sk->sk_err = smc->clcsock->sk->sk_err;
@@ -1537,7 +1543,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
mask |= EPOLLERR;
} else {
if (sk->sk_state != SMC_CLOSED)
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
if (sk->sk_err)
mask |= EPOLLERR;
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
@@ -1560,9 +1566,9 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
if (sk->sk_state == SMC_APPCLOSEWAIT1)
mask |= EPOLLIN;
+ if (smc->conn.urg_state == SMC_URG_VALID)
+ mask |= EPOLLPRI;
}
- if (smc->conn.urg_state == SMC_URG_VALID)
- mask |= EPOLLPRI;
}
return mask;
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 83aba9ade060..52241d679cc9 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -446,14 +446,12 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
vec[i++].iov_len = sizeof(trl);
/* due to the few bytes needed for clc-handshake this cannot block */
len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
- if (len < sizeof(pclc)) {
- if (len >= 0) {
- reason_code = -ENETUNREACH;
- smc->sk.sk_err = -reason_code;
- } else {
- smc->sk.sk_err = smc->clcsock->sk->sk_err;
- reason_code = -smc->sk.sk_err;
- }
+ if (len < 0) {
+ smc->sk.sk_err = smc->clcsock->sk->sk_err;
+ reason_code = -smc->sk.sk_err;
+ } else if (len < (int)sizeof(pclc)) {
+ reason_code = -ENETUNREACH;
+ smc->sk.sk_err = -reason_code;
}
return reason_code;
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index ac961dfb1ea1..ea2b87f29469 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -100,15 +100,14 @@ static void smc_close_active_abort(struct smc_sock *smc)
struct smc_cdc_conn_state_flags *txflags =
&smc->conn.local_tx_ctrl.conn_state_flags;
- sk->sk_err = ECONNABORTED;
- if (smc->clcsock && smc->clcsock->sk) {
- smc->clcsock->sk->sk_err = ECONNABORTED;
- smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
+ if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) {
+ sk->sk_err = ECONNABORTED;
+ if (smc->clcsock && smc->clcsock->sk) {
+ smc->clcsock->sk->sk_err = ECONNABORTED;
+ smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
+ }
}
switch (sk->sk_state) {
- case SMC_INIT:
- sk->sk_state = SMC_PEERABORTWAIT;
- break;
case SMC_ACTIVE:
sk->sk_state = SMC_PEERABORTWAIT;
release_sock(sk);
@@ -143,6 +142,7 @@ static void smc_close_active_abort(struct smc_sock *smc)
case SMC_PEERFINCLOSEWAIT:
sock_put(sk); /* passive closing */
break;
+ case SMC_INIT:
case SMC_PEERABORTWAIT:
case SMC_CLOSED:
break;
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 01c6ce042a1c..7cb3e4f07c10 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -461,7 +461,7 @@ static const struct genl_ops smc_pnet_ops[] = {
};
/* SMC_PNETID family definition */
-static struct genl_family smc_pnet_nl_family = {
+static struct genl_family smc_pnet_nl_family __ro_after_init = {
.hdrsize = 0,
.name = SMCR_GENL_FAMILY_NAME,
.version = SMCR_GENL_FAMILY_VERSION,
diff --git a/net/socket.c b/net/socket.c
index e6945e318f02..99c96851469f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -941,7 +941,8 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
EXPORT_SYMBOL(dlci_ioctl_set);
static long sock_do_ioctl(struct net *net, struct socket *sock,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg,
+ unsigned int ifreq_size)
{
int err;
void __user *argp = (void __user *)arg;
@@ -967,11 +968,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
} else {
struct ifreq ifr;
bool need_copyout;
- if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
+ if (copy_from_user(&ifr, argp, ifreq_size))
return -EFAULT;
err = dev_ioctl(net, cmd, &ifr, &need_copyout);
if (!err && need_copyout)
- if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
+ if (copy_to_user(argp, &ifr, ifreq_size))
return -EFAULT;
}
return err;
@@ -1070,7 +1071,8 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
err = open_related_ns(&net->ns, get_net_ns);
break;
default:
- err = sock_do_ioctl(net, sock, cmd, arg);
+ err = sock_do_ioctl(net, sock, cmd, arg,
+ sizeof(struct ifreq));
break;
}
return err;
@@ -1473,7 +1475,7 @@ int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen)
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock) {
err = move_addr_to_kernel(umyaddr, addrlen, &address);
- if (err >= 0) {
+ if (!err) {
err = security_socket_bind(sock,
(struct sockaddr *)&address,
addrlen);
@@ -2340,7 +2342,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct user_msghdr __user *, msg,
*/
int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
- unsigned int flags, struct timespec *timeout)
+ unsigned int flags, struct timespec64 *timeout)
{
int fput_needed, err, datagrams;
struct socket *sock;
@@ -2405,8 +2407,7 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
if (timeout) {
ktime_get_ts64(&timeout64);
- *timeout = timespec64_to_timespec(
- timespec64_sub(end_time, timeout64));
+ *timeout = timespec64_sub(end_time, timeout64);
if (timeout->tv_sec < 0) {
timeout->tv_sec = timeout->tv_nsec = 0;
break;
@@ -2452,10 +2453,10 @@ out_put:
static int do_sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
- struct timespec __user *timeout)
+ struct __kernel_timespec __user *timeout)
{
int datagrams;
- struct timespec timeout_sys;
+ struct timespec64 timeout_sys;
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
@@ -2463,13 +2464,12 @@ static int do_sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
if (!timeout)
return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL);
- if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys)))
+ if (get_timespec64(&timeout_sys, timeout))
return -EFAULT;
datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys);
- if (datagrams > 0 &&
- copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys)))
+ if (datagrams > 0 && put_timespec64(&timeout_sys, timeout))
datagrams = -EFAULT;
return datagrams;
@@ -2477,7 +2477,7 @@ static int do_sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags,
- struct timespec __user *, timeout)
+ struct __kernel_timespec __user *, timeout)
{
return do_sys_recvmmsg(fd, mmsg, vlen, flags, timeout);
}
@@ -2601,7 +2601,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
break;
case SYS_RECVMMSG:
err = do_sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2],
- a[3], (struct timespec __user *)a[4]);
+ a[3], (struct __kernel_timespec __user *)a[4]);
break;
case SYS_ACCEPT4:
err = __sys_accept4(a0, (struct sockaddr __user *)a1,
@@ -2750,7 +2750,8 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
int err;
set_fs(KERNEL_DS);
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
+ sizeof(struct compat_ifreq));
set_fs(old_fs);
if (!err)
err = compat_put_timeval(&ktv, up);
@@ -2766,7 +2767,8 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
int err;
set_fs(KERNEL_DS);
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
+ sizeof(struct compat_ifreq));
set_fs(old_fs);
if (!err)
err = compat_put_timespec(&kts, up);
@@ -2871,9 +2873,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
copy_in_user(&rxnfc->fs.ring_cookie,
&compat_rxnfc->fs.ring_cookie,
(void __user *)(&rxnfc->fs.location + 1) -
- (void __user *)&rxnfc->fs.ring_cookie) ||
- copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
- sizeof(rxnfc->rule_cnt)))
+ (void __user *)&rxnfc->fs.ring_cookie))
+ return -EFAULT;
+ if (ethcmd == ETHTOOL_GRXCLSRLALL) {
+ if (put_user(rule_cnt, &rxnfc->rule_cnt))
+ return -EFAULT;
+ } else if (copy_in_user(&rxnfc->rule_cnt,
+ &compat_rxnfc->rule_cnt,
+ sizeof(rxnfc->rule_cnt)))
return -EFAULT;
}
@@ -3072,7 +3079,8 @@ static int routing_ioctl(struct net *net, struct socket *sock,
}
set_fs(KERNEL_DS);
- ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
+ ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
+ sizeof(struct compat_ifreq));
set_fs(old_fs);
out:
@@ -3185,7 +3193,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
case SIOCBONDSETHWADDR:
case SIOCBONDCHANGEACTIVE:
case SIOCGIFNAME:
- return sock_do_ioctl(net, sock, cmd, arg);
+ return sock_do_ioctl(net, sock, cmd, arg,
+ sizeof(struct compat_ifreq));
}
return -ENOIOCTLCMD;
diff --git a/net/strparser/Kconfig b/net/strparser/Kconfig
index 6cff3f6d0c3a..94da19a2a220 100644
--- a/net/strparser/Kconfig
+++ b/net/strparser/Kconfig
@@ -1,4 +1,2 @@
-
config STREAM_PARSER
- tristate
- default n
+ def_bool n
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 0220e1ca5280..4f43383971ba 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -53,7 +53,7 @@
u32
krb5_encrypt(
- struct crypto_skcipher *tfm,
+ struct crypto_sync_skcipher *tfm,
void * iv,
void * in,
void * out,
@@ -62,24 +62,24 @@ krb5_encrypt(
u32 ret = -EINVAL;
struct scatterlist sg[1];
u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- if (length % crypto_skcipher_blocksize(tfm) != 0)
+ if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
goto out;
- if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
+ if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
- crypto_skcipher_ivsize(tfm));
+ crypto_sync_skcipher_ivsize(tfm));
goto out;
}
if (iv)
- memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm));
+ memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
memcpy(out, in, length);
sg_init_one(sg, out, length);
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, length, local_iv);
@@ -92,7 +92,7 @@ out:
u32
krb5_decrypt(
- struct crypto_skcipher *tfm,
+ struct crypto_sync_skcipher *tfm,
void * iv,
void * in,
void * out,
@@ -101,23 +101,23 @@ krb5_decrypt(
u32 ret = -EINVAL;
struct scatterlist sg[1];
u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- if (length % crypto_skcipher_blocksize(tfm) != 0)
+ if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
goto out;
- if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
+ if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
- crypto_skcipher_ivsize(tfm));
+ crypto_sync_skcipher_ivsize(tfm));
goto out;
}
if (iv)
- memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm));
+ memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
memcpy(out, in, length);
sg_init_one(sg, out, length);
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, length, local_iv);
@@ -466,7 +466,8 @@ encryptor(struct scatterlist *sg, void *data)
{
struct encryptor_desc *desc = data;
struct xdr_buf *outbuf = desc->outbuf;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
+ struct crypto_sync_skcipher *tfm =
+ crypto_sync_skcipher_reqtfm(desc->req);
struct page *in_page;
int thislen = desc->fraglen + sg->length;
int fraglen, ret;
@@ -492,7 +493,7 @@ encryptor(struct scatterlist *sg, void *data)
desc->fraglen += sg->length;
desc->pos += sg->length;
- fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
+ fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
thislen -= fraglen;
if (thislen == 0)
@@ -526,16 +527,16 @@ encryptor(struct scatterlist *sg, void *data)
}
int
-gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
+gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
int offset, struct page **pages)
{
int ret;
struct encryptor_desc desc;
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
+ BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
memset(desc.iv, 0, sizeof(desc.iv));
@@ -567,7 +568,8 @@ decryptor(struct scatterlist *sg, void *data)
{
struct decryptor_desc *desc = data;
int thislen = desc->fraglen + sg->length;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
+ struct crypto_sync_skcipher *tfm =
+ crypto_sync_skcipher_reqtfm(desc->req);
int fraglen, ret;
/* Worst case is 4 fragments: head, end of page 1, start
@@ -578,7 +580,7 @@ decryptor(struct scatterlist *sg, void *data)
desc->fragno++;
desc->fraglen += sg->length;
- fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
+ fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
thislen -= fraglen;
if (thislen == 0)
@@ -608,17 +610,17 @@ decryptor(struct scatterlist *sg, void *data)
}
int
-gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
+gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
int offset)
{
int ret;
struct decryptor_desc desc;
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
/* XXXJBF: */
- BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
+ BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
memset(desc.iv, 0, sizeof(desc.iv));
@@ -672,12 +674,12 @@ xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
}
static u32
-gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
+gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
u32 offset, u8 *iv, struct page **pages, int encrypt)
{
u32 ret;
struct scatterlist sg[1];
- SKCIPHER_REQUEST_ON_STACK(req, cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher);
u8 *data;
struct page **save_pages;
u32 len = buf->len - offset;
@@ -706,7 +708,7 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
sg_init_one(sg, data, len);
- skcipher_request_set_tfm(req, cipher);
+ skcipher_request_set_sync_tfm(req, cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, len, iv);
@@ -735,7 +737,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
struct xdr_netobj hmac;
u8 *cksumkey;
u8 *ecptr;
- struct crypto_skcipher *cipher, *aux_cipher;
+ struct crypto_sync_skcipher *cipher, *aux_cipher;
int blocksize;
struct page **save_pages;
int nblocks, nbytes;
@@ -754,7 +756,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
cksumkey = kctx->acceptor_integ;
usage = KG_USAGE_ACCEPTOR_SEAL;
}
- blocksize = crypto_skcipher_blocksize(cipher);
+ blocksize = crypto_sync_skcipher_blocksize(cipher);
/* hide the gss token header and insert the confounder */
offset += GSS_KRB5_TOK_HDR_LEN;
@@ -807,7 +809,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
memset(desc.iv, 0, sizeof(desc.iv));
if (cbcbytes) {
- SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
desc.fragno = 0;
@@ -816,7 +818,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
desc.outbuf = buf;
desc.req = req;
- skcipher_request_set_tfm(req, aux_cipher);
+ skcipher_request_set_sync_tfm(req, aux_cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
sg_init_table(desc.infrags, 4);
@@ -855,7 +857,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
struct xdr_buf subbuf;
u32 ret = 0;
u8 *cksum_key;
- struct crypto_skcipher *cipher, *aux_cipher;
+ struct crypto_sync_skcipher *cipher, *aux_cipher;
struct xdr_netobj our_hmac_obj;
u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
@@ -874,7 +876,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
cksum_key = kctx->initiator_integ;
usage = KG_USAGE_INITIATOR_SEAL;
}
- blocksize = crypto_skcipher_blocksize(cipher);
+ blocksize = crypto_sync_skcipher_blocksize(cipher);
/* create a segment skipping the header and leaving out the checksum */
@@ -891,13 +893,13 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
memset(desc.iv, 0, sizeof(desc.iv));
if (cbcbytes) {
- SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
desc.fragno = 0;
desc.fraglen = 0;
desc.req = req;
- skcipher_request_set_tfm(req, aux_cipher);
+ skcipher_request_set_sync_tfm(req, aux_cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
sg_init_table(desc.frags, 4);
@@ -946,7 +948,8 @@ out_err:
* Set the key of the given cipher.
*/
int
-krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
+krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
+ struct crypto_sync_skcipher *cipher,
unsigned char *cksum)
{
struct crypto_shash *hmac;
@@ -994,7 +997,7 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
if (err)
goto out_err;
- err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
+ err = crypto_sync_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
if (err)
goto out_err;
@@ -1012,7 +1015,8 @@ out_err:
* Set the key of cipher kctx->enc.
*/
int
-krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
+krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
+ struct crypto_sync_skcipher *cipher,
s32 seqnum)
{
struct crypto_shash *hmac;
@@ -1069,7 +1073,8 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
if (err)
goto out_err;
- err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
+ err = crypto_sync_skcipher_setkey(cipher, Kcrypt,
+ kctx->gk5e->keylength);
if (err)
goto out_err;
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
index f7fe2d2b851f..550fdf18d3b3 100644
--- a/net/sunrpc/auth_gss/gss_krb5_keys.c
+++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
@@ -147,7 +147,7 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
size_t blocksize, keybytes, keylength, n;
unsigned char *inblockdata, *outblockdata, *rawkey;
struct xdr_netobj inblock, outblock;
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
u32 ret = EINVAL;
blocksize = gk5e->blocksize;
@@ -157,11 +157,10 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
if ((inkey->len != keylength) || (outkey->len != keylength))
goto err_return;
- cipher = crypto_alloc_skcipher(gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ cipher = crypto_alloc_sync_skcipher(gk5e->encrypt_name, 0, 0);
if (IS_ERR(cipher))
goto err_return;
- if (crypto_skcipher_setkey(cipher, inkey->data, inkey->len))
+ if (crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len))
goto err_return;
/* allocate and set up buffers */
@@ -238,7 +237,7 @@ err_free_in:
memset(inblockdata, 0, blocksize);
kfree(inblockdata);
err_free_cipher:
- crypto_free_skcipher(cipher);
+ crypto_free_sync_skcipher(cipher);
err_return:
return ret;
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 7bb2514aadd9..7f0424dfa8f6 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -218,7 +218,7 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
static inline const void *
get_key(const void *p, const void *end,
- struct krb5_ctx *ctx, struct crypto_skcipher **res)
+ struct krb5_ctx *ctx, struct crypto_sync_skcipher **res)
{
struct xdr_netobj key;
int alg;
@@ -246,15 +246,14 @@ get_key(const void *p, const void *end,
if (IS_ERR(p))
goto out_err;
- *res = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ *res = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
if (IS_ERR(*res)) {
printk(KERN_WARNING "gss_kerberos_mech: unable to initialize "
"crypto algorithm %s\n", ctx->gk5e->encrypt_name);
*res = NULL;
goto out_err_free_key;
}
- if (crypto_skcipher_setkey(*res, key.data, key.len)) {
+ if (crypto_sync_skcipher_setkey(*res, key.data, key.len)) {
printk(KERN_WARNING "gss_kerberos_mech: error setting key for "
"crypto algorithm %s\n", ctx->gk5e->encrypt_name);
goto out_err_free_tfm;
@@ -264,7 +263,7 @@ get_key(const void *p, const void *end,
return p;
out_err_free_tfm:
- crypto_free_skcipher(*res);
+ crypto_free_sync_skcipher(*res);
out_err_free_key:
kfree(key.data);
p = ERR_PTR(-EINVAL);
@@ -336,30 +335,30 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
return 0;
out_err_free_key2:
- crypto_free_skcipher(ctx->seq);
+ crypto_free_sync_skcipher(ctx->seq);
out_err_free_key1:
- crypto_free_skcipher(ctx->enc);
+ crypto_free_sync_skcipher(ctx->enc);
out_err_free_mech:
kfree(ctx->mech_used.data);
out_err:
return PTR_ERR(p);
}
-static struct crypto_skcipher *
+static struct crypto_sync_skcipher *
context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
{
- struct crypto_skcipher *cp;
+ struct crypto_sync_skcipher *cp;
- cp = crypto_alloc_skcipher(cname, 0, CRYPTO_ALG_ASYNC);
+ cp = crypto_alloc_sync_skcipher(cname, 0, 0);
if (IS_ERR(cp)) {
dprintk("gss_kerberos_mech: unable to initialize "
"crypto algorithm %s\n", cname);
return NULL;
}
- if (crypto_skcipher_setkey(cp, key, ctx->gk5e->keylength)) {
+ if (crypto_sync_skcipher_setkey(cp, key, ctx->gk5e->keylength)) {
dprintk("gss_kerberos_mech: error setting key for "
"crypto algorithm %s\n", cname);
- crypto_free_skcipher(cp);
+ crypto_free_sync_skcipher(cp);
return NULL;
}
return cp;
@@ -413,9 +412,9 @@ context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask)
return 0;
out_free_enc:
- crypto_free_skcipher(ctx->enc);
+ crypto_free_sync_skcipher(ctx->enc);
out_free_seq:
- crypto_free_skcipher(ctx->seq);
+ crypto_free_sync_skcipher(ctx->seq);
out_err:
return -EINVAL;
}
@@ -469,17 +468,15 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
/*
* allocate hash, and skciphers for data and seqnum encryption
*/
- ctx->enc = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ ctx->enc = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
if (IS_ERR(ctx->enc)) {
err = PTR_ERR(ctx->enc);
goto out_err_free_hmac;
}
- ctx->seq = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ ctx->seq = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
if (IS_ERR(ctx->seq)) {
- crypto_free_skcipher(ctx->enc);
+ crypto_free_sync_skcipher(ctx->enc);
err = PTR_ERR(ctx->seq);
goto out_err_free_hmac;
}
@@ -591,7 +588,7 @@ context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
context_v2_alloc_cipher(ctx, "cbc(aes)",
ctx->acceptor_seal);
if (ctx->acceptor_enc_aux == NULL) {
- crypto_free_skcipher(ctx->initiator_enc_aux);
+ crypto_free_sync_skcipher(ctx->initiator_enc_aux);
goto out_free_acceptor_enc;
}
}
@@ -599,9 +596,9 @@ context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
return 0;
out_free_acceptor_enc:
- crypto_free_skcipher(ctx->acceptor_enc);
+ crypto_free_sync_skcipher(ctx->acceptor_enc);
out_free_initiator_enc:
- crypto_free_skcipher(ctx->initiator_enc);
+ crypto_free_sync_skcipher(ctx->initiator_enc);
out_err:
return -EINVAL;
}
@@ -713,12 +710,12 @@ static void
gss_delete_sec_context_kerberos(void *internal_ctx) {
struct krb5_ctx *kctx = internal_ctx;
- crypto_free_skcipher(kctx->seq);
- crypto_free_skcipher(kctx->enc);
- crypto_free_skcipher(kctx->acceptor_enc);
- crypto_free_skcipher(kctx->initiator_enc);
- crypto_free_skcipher(kctx->acceptor_enc_aux);
- crypto_free_skcipher(kctx->initiator_enc_aux);
+ crypto_free_sync_skcipher(kctx->seq);
+ crypto_free_sync_skcipher(kctx->enc);
+ crypto_free_sync_skcipher(kctx->acceptor_enc);
+ crypto_free_sync_skcipher(kctx->initiator_enc);
+ crypto_free_sync_skcipher(kctx->acceptor_enc_aux);
+ crypto_free_sync_skcipher(kctx->initiator_enc_aux);
kfree(kctx->mech_used.data);
kfree(kctx);
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index c8b9082f4a9d..fb6656295204 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -43,13 +43,12 @@ static s32
krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
unsigned char *cksum, unsigned char *buf)
{
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
unsigned char plain[8];
s32 code;
dprintk("RPC: %s:\n", __func__);
- cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name, 0, 0);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
@@ -68,12 +67,12 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
code = krb5_encrypt(cipher, cksum, plain, buf, 8);
out:
- crypto_free_skcipher(cipher);
+ crypto_free_sync_skcipher(cipher);
return code;
}
s32
krb5_make_seq_num(struct krb5_ctx *kctx,
- struct crypto_skcipher *key,
+ struct crypto_sync_skcipher *key,
int direction,
u32 seqnum,
unsigned char *cksum, unsigned char *buf)
@@ -101,13 +100,12 @@ static s32
krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
unsigned char *buf, int *direction, s32 *seqnum)
{
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
unsigned char plain[8];
s32 code;
dprintk("RPC: %s:\n", __func__);
- cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name, 0, 0);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
@@ -130,7 +128,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
*seqnum = ((plain[0] << 24) | (plain[1] << 16) |
(plain[2] << 8) | (plain[3]));
out:
- crypto_free_skcipher(cipher);
+ crypto_free_sync_skcipher(cipher);
return code;
}
@@ -142,7 +140,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
{
s32 code;
unsigned char plain[8];
- struct crypto_skcipher *key = kctx->seq;
+ struct crypto_sync_skcipher *key = kctx->seq;
dprintk("RPC: krb5_get_seq_num:\n");
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 39a2e672900b..3d975a4013d2 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -174,7 +174,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
now = get_seconds();
- blocksize = crypto_skcipher_blocksize(kctx->enc);
+ blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
gss_krb5_add_padding(buf, offset, blocksize);
BUG_ON((buf->len - offset) % blocksize);
plainlen = conflen + buf->len - offset;
@@ -239,10 +239,10 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
return GSS_S_FAILURE;
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
int err;
- cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
+ 0, 0);
if (IS_ERR(cipher))
return GSS_S_FAILURE;
@@ -250,7 +250,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
err = gss_encrypt_xdr_buf(cipher, buf,
offset + headlen - conflen, pages);
- crypto_free_skcipher(cipher);
+ crypto_free_sync_skcipher(cipher);
if (err)
return GSS_S_FAILURE;
} else {
@@ -327,18 +327,18 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
return GSS_S_BAD_SIG;
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
int err;
- cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
+ 0, 0);
if (IS_ERR(cipher))
return GSS_S_FAILURE;
krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
- crypto_free_skcipher(cipher);
+ crypto_free_sync_skcipher(cipher);
if (err)
return GSS_S_DEFECTIVE_TOKEN;
} else {
@@ -371,7 +371,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
/* Copy the data back to the right position. XXX: Would probably be
* better to copy and encrypt at the same time. */
- blocksize = crypto_skcipher_blocksize(kctx->enc);
+ blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
conflen;
orig_start = buf->head[0].iov_base + offset;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 418f03d0be90..e65c3a8551e4 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -577,7 +577,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
rcu_dereference_rtnl(orig_dev->tipc_ptr);
if (likely(b && test_bit(0, &b->up) &&
(skb->pkt_type <= PACKET_MULTICAST))) {
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
tipc_rcv(dev_net(b->pt.dev), skb, b);
rcu_read_unlock();
return NET_RX_SUCCESS;
@@ -609,16 +609,18 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
switch (evt) {
case NETDEV_CHANGE:
- if (netif_carrier_ok(dev))
+ if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
+ test_and_set_bit_lock(0, &b->up);
break;
- /* else: fall through */
- case NETDEV_UP:
- test_and_set_bit_lock(0, &b->up);
- break;
+ }
+ /* fall through */
case NETDEV_GOING_DOWN:
clear_bit_unlock(0, &b->up);
tipc_reset_bearer(net, b);
break;
+ case NETDEV_UP:
+ test_and_set_bit_lock(0, &b->up);
+ break;
case NETDEV_CHANGEMTU:
if (tipc_mtu_bad(dev, 0)) {
bearer_disable(net, b);
diff --git a/net/tipc/group.c b/net/tipc/group.c
index e82f13cb2dc5..06fee142f09f 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -666,6 +666,7 @@ static void tipc_group_create_event(struct tipc_group *grp,
struct sk_buff *skb;
struct tipc_msg *hdr;
+ memset(&evt, 0, sizeof(evt));
evt.event = event;
evt.found_lower = m->instance;
evt.found_upper = m->instance;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b1f0bee54eac..201c3b5bc96b 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -410,6 +410,11 @@ char *tipc_link_name(struct tipc_link *l)
return l->name;
}
+u32 tipc_link_state(struct tipc_link *l)
+{
+ return l->state;
+}
+
/**
* tipc_link_create - create a new link
* @n: pointer to associated node
@@ -472,6 +477,8 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
l->in_session = false;
l->bearer_id = bearer_id;
l->tolerance = tolerance;
+ if (bc_rcvlink)
+ bc_rcvlink->tolerance = tolerance;
l->net_plane = net_plane;
l->advertised_mtu = mtu;
l->mtu = mtu;
@@ -838,12 +845,24 @@ static void link_prepare_wakeup(struct tipc_link *l)
void tipc_link_reset(struct tipc_link *l)
{
+ struct sk_buff_head list;
+
+ __skb_queue_head_init(&list);
+
l->in_session = false;
l->session++;
l->mtu = l->advertised_mtu;
+
+ spin_lock_bh(&l->wakeupq.lock);
+ skb_queue_splice_init(&l->wakeupq, &list);
+ spin_unlock_bh(&l->wakeupq.lock);
+
+ spin_lock_bh(&l->inputq->lock);
+ skb_queue_splice_init(&list, l->inputq);
+ spin_unlock_bh(&l->inputq->lock);
+
__skb_queue_purge(&l->transmq);
__skb_queue_purge(&l->deferdq);
- skb_queue_splice_init(&l->wakeupq, l->inputq);
__skb_queue_purge(&l->backlogq);
l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
@@ -1021,7 +1040,8 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
/* Detect repeated retransmit failures on same packet */
if (r->last_retransm != buf_seqno(skb)) {
r->last_retransm = buf_seqno(skb);
- r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance);
+ r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
+ r->stale_cnt = 0;
} else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
link_retransmit_failure(l, skb);
if (link_is_bc_sndlink(l))
@@ -1380,6 +1400,36 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
__skb_queue_tail(xmitq, skb);
}
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
+{
+ u32 onode = tipc_own_addr(l->net);
+ struct tipc_msg *hdr, *ihdr;
+ struct sk_buff_head tnlq;
+ struct sk_buff *skb;
+ u32 dnode = l->addr;
+
+ skb_queue_head_init(&tnlq);
+ skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
+ INT_H_SIZE, BASIC_H_SIZE,
+ dnode, onode, 0, 0, 0);
+ if (!skb) {
+ pr_warn("%sunable to create tunnel packet\n", link_co_err);
+ return;
+ }
+
+ hdr = buf_msg(skb);
+ msg_set_msgcnt(hdr, 1);
+ msg_set_bearer_id(hdr, l->peer_bearer_id);
+
+ ihdr = (struct tipc_msg *)msg_data(hdr);
+ tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+ BASIC_H_SIZE, dnode);
+ msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
+ __skb_queue_tail(&tnlq, skb);
+ tipc_link_xmit(l, &tnlq, xmitq);
+}
+
/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
* with contents of the link's transmit and backlog queues.
*/
@@ -1476,6 +1526,9 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
return false;
if (session != curr_session)
return false;
+ /* Extra sanity check */
+ if (!link_is_up(l) && msg_ack(hdr))
+ return false;
if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
return true;
/* Accept only STATE with new sequence number */
@@ -1533,9 +1586,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
strncpy(if_name, data, TIPC_MAX_IF_NAME);
/* Update own tolerance if peer indicates a non-zero value */
- if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
l->tolerance = peers_tol;
-
+ l->bc_rcvlink->tolerance = peers_tol;
+ }
/* Update own priority if peer's priority is higher */
if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
l->priority = peers_prio;
@@ -1561,9 +1615,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
l->rcv_nxt_state = msg_seqno(hdr) + 1;
/* Update own tolerance if peer indicates a non-zero value */
- if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
l->tolerance = peers_tol;
-
+ l->bc_rcvlink->tolerance = peers_tol;
+ }
/* Update own prio if peer indicates a different value */
if ((peers_prio != l->priority) &&
in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
@@ -2180,6 +2235,8 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
struct sk_buff_head *xmitq)
{
l->tolerance = tol;
+ if (l->bc_rcvlink)
+ l->bc_rcvlink->tolerance = tol;
if (link_is_up(l))
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 7bc494a33fdf..90488c538a4e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -88,6 +88,8 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
struct tipc_link **link);
void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *tnl,
+ struct sk_buff_head *xmitq);
void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
int tipc_link_fsm_evt(struct tipc_link *l, int evt);
bool tipc_link_is_up(struct tipc_link *l);
@@ -107,6 +109,7 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);
u16 tipc_link_acked(struct tipc_link *l);
u32 tipc_link_id(struct tipc_link *l);
char *tipc_link_name(struct tipc_link *l);
+u32 tipc_link_state(struct tipc_link *l);
char tipc_link_plane(struct tipc_link *l);
int tipc_link_prio(struct tipc_link *l);
int tipc_link_window(struct tipc_link *l);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index b61891054709..f48e5857210f 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -499,54 +499,56 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
/**
* tipc_msg_reverse(): swap source and destination addresses and add error code
* @own_node: originating node id for reversed message
- * @skb: buffer containing message to be reversed; may be replaced.
+ * @skb: buffer containing message to be reversed; will be consumed
* @err: error code to be set in message, if any
- * Consumes buffer at failure
+ * Replaces consumed buffer with new one when successful
* Returns true if success, otherwise false
*/
bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
{
struct sk_buff *_skb = *skb;
- struct tipc_msg *hdr;
- struct tipc_msg ohdr;
- int dlen;
+ struct tipc_msg *_hdr, *hdr;
+ int hlen, dlen;
if (skb_linearize(_skb))
goto exit;
- hdr = buf_msg(_skb);
- dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
- if (msg_dest_droppable(hdr))
+ _hdr = buf_msg(_skb);
+ dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
+ hlen = msg_hdr_sz(_hdr);
+
+ if (msg_dest_droppable(_hdr))
goto exit;
- if (msg_errcode(hdr))
+ if (msg_errcode(_hdr))
goto exit;
- /* Take a copy of original header before altering message */
- memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
-
- /* Never return SHORT header; expand by replacing buffer if necessary */
- if (msg_short(hdr)) {
- *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
- if (!*skb)
- goto exit;
- memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
- kfree_skb(_skb);
- _skb = *skb;
- hdr = buf_msg(_skb);
- memcpy(hdr, &ohdr, BASIC_H_SIZE);
- msg_set_hdr_sz(hdr, BASIC_H_SIZE);
- }
+ /* Never return SHORT header */
+ if (hlen == SHORT_H_SIZE)
+ hlen = BASIC_H_SIZE;
+
+ /* Don't return data along with SYN+, - sender has a clone */
+ if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
+ dlen = 0;
+
+ /* Allocate new buffer to return */
+ *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
+ if (!*skb)
+ goto exit;
+ memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
+ memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
- /* Now reverse the concerned fields */
+ /* Build reverse header in new buffer */
+ hdr = buf_msg(*skb);
+ msg_set_hdr_sz(hdr, hlen);
msg_set_errcode(hdr, err);
msg_set_non_seq(hdr, 0);
- msg_set_origport(hdr, msg_destport(&ohdr));
- msg_set_destport(hdr, msg_origport(&ohdr));
- msg_set_destnode(hdr, msg_prevnode(&ohdr));
+ msg_set_origport(hdr, msg_destport(_hdr));
+ msg_set_destport(hdr, msg_origport(_hdr));
+ msg_set_destnode(hdr, msg_prevnode(_hdr));
msg_set_prevnode(hdr, own_node);
msg_set_orignode(hdr, own_node);
- msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
- skb_trim(_skb, msg_size(hdr));
+ msg_set_size(hdr, hlen + dlen);
skb_orphan(_skb);
+ kfree_skb(_skb);
return true;
exit:
kfree_skb(_skb);
@@ -554,6 +556,22 @@ exit:
return false;
}
+bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
+{
+ struct sk_buff *skb, *_skb;
+
+ skb_queue_walk(msg, skb) {
+ _skb = skb_clone(skb, GFP_ATOMIC);
+ if (!_skb) {
+ __skb_queue_purge(cpy);
+ pr_err_ratelimited("Failed to clone buffer chain\n");
+ return false;
+ }
+ __skb_queue_tail(cpy, _skb);
+ }
+ return true;
+}
+
/**
* tipc_msg_lookup_dest(): try to find new destination for named message
* @skb: the buffer containing the message.
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index a4e944d59394..a2879e6ec5b6 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -216,6 +216,16 @@ static inline void msg_set_non_seq(struct tipc_msg *m, u32 n)
msg_set_bits(m, 0, 20, 1, n);
}
+static inline int msg_is_syn(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 17, 1);
+}
+
+static inline void msg_set_syn(struct tipc_msg *m, u32 d)
+{
+ msg_set_bits(m, 0, 17, 1, d);
+}
+
static inline int msg_dest_droppable(struct tipc_msg *m)
{
return msg_bits(m, 0, 19, 1);
@@ -970,6 +980,7 @@ bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
struct sk_buff_head *cpy);
void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
struct sk_buff *skb);
+bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy);
static inline u16 buf_seqno(struct sk_buff *skb)
{
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 51b4b96f89db..61219f0b9677 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -94,8 +94,9 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
list_add_tail_rcu(&publ->binding_node, &nt->node_scope);
return NULL;
}
- list_add_tail_rcu(&publ->binding_node, &nt->cluster_scope);
-
+ write_lock_bh(&nt->cluster_scope_lock);
+ list_add_tail(&publ->binding_node, &nt->cluster_scope);
+ write_unlock_bh(&nt->cluster_scope_lock);
skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
if (!skb) {
pr_warn("Publication distribution failure\n");
@@ -112,11 +113,13 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
*/
struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
{
+ struct name_table *nt = tipc_name_table(net);
struct sk_buff *buf;
struct distr_item *item;
+ write_lock_bh(&nt->cluster_scope_lock);
list_del(&publ->binding_node);
-
+ write_unlock_bh(&nt->cluster_scope_lock);
if (publ->scope == TIPC_NODE_SCOPE)
return NULL;
@@ -189,11 +192,10 @@ void tipc_named_node_up(struct net *net, u32 dnode)
__skb_queue_head_init(&head);
- rcu_read_lock();
+ read_lock_bh(&nt->cluster_scope_lock);
named_distribute(net, &head, dnode, &nt->cluster_scope);
- rcu_read_unlock();
-
tipc_node_xmit(net, &head, dnode, 0);
+ read_unlock_bh(&nt->cluster_scope_lock);
}
/**
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 66d5b2c5987a..bff241f03525 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -744,6 +744,7 @@ int tipc_nametbl_init(struct net *net)
INIT_LIST_HEAD(&nt->node_scope);
INIT_LIST_HEAD(&nt->cluster_scope);
+ rwlock_init(&nt->cluster_scope_lock);
tn->nametbl = nt;
spin_lock_init(&tn->nametbl_lock);
return 0;
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 892bd750b85f..f79066334cc8 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -100,6 +100,7 @@ struct name_table {
struct hlist_head services[TIPC_NAMETBL_SIZE];
struct list_head node_scope;
struct list_head cluster_scope;
+ rwlock_t cluster_scope_lock;
u32 local_publ_count;
};
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 68014f1b6976..2afc4f8c37a7 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -111,6 +111,7 @@ struct tipc_node {
int action_flags;
struct list_head list;
int state;
+ bool failover_sent;
u16 sync_point;
int link_cnt;
u16 working_links;
@@ -680,6 +681,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
*slot0 = bearer_id;
*slot1 = bearer_id;
tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
+ n->failover_sent = false;
n->action_flags |= TIPC_NOTIFY_NODE_UP;
tipc_link_set_active(nl, true);
tipc_bcast_add_peer(n->net, nl, xmitq);
@@ -911,6 +913,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
bool reset = true;
char *if_name;
unsigned long intv;
+ u16 session;
*dupl_addr = false;
*respond = false;
@@ -997,9 +1000,10 @@ void tipc_node_check_dest(struct net *net, u32 addr,
goto exit;
if_name = strchr(b->name, ':') + 1;
+ get_random_bytes(&session, sizeof(u16));
if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
b->net_plane, b->mtu, b->priority,
- b->window, mod(tipc_net(net)->random),
+ b->window, session,
tipc_own_addr(net), addr, peer_id,
n->capabilities,
tipc_bc_sndlink(n->net), n->bc_entry.link,
@@ -1615,6 +1619,14 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
tipc_link_inputq(l));
}
+ /* If parallel link was already down, and this happened before
+ * the tunnel link came up, FAILOVER was never sent. Ensure that
+ * FAILOVER is sent to get peer out of NODE_FAILINGOVER state.
+ */
+ if (n->state != NODE_FAILINGOVER && !n->failover_sent) {
+ tipc_link_create_dummy_tnl_msg(l, xmitq);
+ n->failover_sent = true;
+ }
/* If pkts arrive out of order, use lowest calculated syncpt */
if (less(syncpt, n->sync_point))
n->sync_point = syncpt;
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 48b3298a248d..03f5efb62cfb 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -45,6 +45,7 @@
/* Optional capabilities supported by this code version
*/
enum {
+ TIPC_SYN_BIT = (1),
TIPC_BCAST_SYNCH = (1 << 1),
TIPC_BCAST_STATE_NACK = (1 << 2),
TIPC_BLOCK_FLOWCTL = (1 << 3),
@@ -53,11 +54,12 @@ enum {
TIPC_LINK_PROTO_SEQNO = (1 << 6)
};
-#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \
- TIPC_BCAST_STATE_NACK | \
- TIPC_BCAST_RCAST | \
- TIPC_BLOCK_FLOWCTL | \
- TIPC_NODE_ID128 | \
+#define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \
+ TIPC_BCAST_SYNCH | \
+ TIPC_BCAST_STATE_NACK | \
+ TIPC_BCAST_RCAST | \
+ TIPC_BLOCK_FLOWCTL | \
+ TIPC_NODE_ID128 | \
TIPC_LINK_PROTO_SEQNO)
#define INVALID_BEARER_ID -1
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3f03ddd0e35b..636e6131769d 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -47,7 +47,7 @@
#include "netlink.h"
#include "group.h"
-#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
+#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
#define TIPC_FWD_MSG 1
#define TIPC_MAX_PORT 0xffffffff
@@ -80,7 +80,6 @@ struct sockaddr_pair {
* @publications: list of publications for port
* @blocking_link: address of the congested link we are currently sleeping on
* @pub_count: total # of publications port has made during its lifetime
- * @probing_state:
* @conn_timeout: the time we can wait for an unresponded setup request
* @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
* @cong_link_cnt: number of congested links
@@ -102,8 +101,8 @@ struct tipc_sock {
struct list_head cong_links;
struct list_head publications;
u32 pub_count;
- uint conn_timeout;
atomic_t dupl_rcvcnt;
+ u16 conn_timeout;
bool probe_unacked;
u16 cong_link_cnt;
u16 snt_unacked;
@@ -507,6 +506,9 @@ static void __tipc_shutdown(struct socket *sock, int error)
tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
!tsk_conn_cong(tsk)));
+ /* Remove any pending SYN message */
+ __skb_queue_purge(&sk->sk_write_queue);
+
/* Reject all unreceived messages, except on an active connection
* (which disconnects locally & sends a 'FIN+' to peer).
*/
@@ -715,7 +717,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
struct tipc_sock *tsk = tipc_sk(sk);
__poll_t revents = 0;
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
if (sk->sk_shutdown & RCV_SHUTDOWN)
revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
@@ -1196,6 +1198,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
* @skb: pointer to message buffer.
*/
static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *inputq,
struct sk_buff_head *xmitq)
{
struct tipc_msg *hdr = buf_msg(skb);
@@ -1213,7 +1216,16 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
tsk_peer_port(tsk));
sk->sk_state_change(sk);
- goto exit;
+
+ /* State change is ignored if socket already awake,
+ * - convert msg to abort msg and add to inqueue
+ */
+ msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
+ msg_set_type(hdr, TIPC_CONN_MSG);
+ msg_set_size(hdr, BASIC_H_SIZE);
+ msg_set_hdr_sz(hdr, BASIC_H_SIZE);
+ __skb_queue_tail(inputq, skb);
+ return;
}
tsk->probe_unacked = false;
@@ -1319,6 +1331,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
tsk->conn_type = dest->addr.name.name.type;
tsk->conn_instance = dest->addr.name.name.instance;
}
+ msg_set_syn(hdr, 1);
}
seq = &dest->addr.nameseq;
@@ -1361,6 +1374,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
+ if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
+ return -ENOMEM;
rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
if (unlikely(rc == -ELINKCONG)) {
@@ -1419,8 +1434,10 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
/* Handle implicit connection setup */
if (unlikely(dest)) {
rc = __tipc_sendmsg(sock, m, dlen);
- if (dlen && (dlen == rc))
+ if (dlen && dlen == rc) {
+ tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
+ }
return rc;
}
@@ -1478,6 +1495,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
struct net *net = sock_net(sk);
struct tipc_msg *msg = &tsk->phdr;
+ msg_set_syn(msg, 0);
msg_set_destnode(msg, peer_node);
msg_set_destport(msg, peer_port);
msg_set_type(msg, TIPC_CONN_MSG);
@@ -1489,6 +1507,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
+ __skb_queue_purge(&sk->sk_write_queue);
if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
return;
@@ -1934,7 +1953,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
switch (msg_user(hdr)) {
case CONN_MANAGER:
- tipc_sk_conn_proto_rcv(tsk, skb, xmitq);
+ tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
return;
case SOCK_WAKEUP:
tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
@@ -1959,91 +1978,90 @@ static void tipc_sk_proto_rcv(struct sock *sk,
}
/**
- * tipc_filter_connect - Handle incoming message for a connection-based socket
+ * tipc_sk_filter_connect - check incoming message for a connection-based socket
* @tsk: TIPC socket
- * @skb: pointer to message buffer. Set to NULL if buffer is consumed
- *
- * Returns true if everything ok, false otherwise
+ * @skb: pointer to message buffer.
+ * Returns true if message should be added to receive queue, false otherwise
*/
static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
struct tipc_msg *hdr = buf_msg(skb);
- u32 pport = msg_origport(hdr);
- u32 pnode = msg_orignode(hdr);
+ bool con_msg = msg_connected(hdr);
+ u32 pport = tsk_peer_port(tsk);
+ u32 pnode = tsk_peer_node(tsk);
+ u32 oport = msg_origport(hdr);
+ u32 onode = msg_orignode(hdr);
+ int err = msg_errcode(hdr);
+ unsigned long delay;
if (unlikely(msg_mcast(hdr)))
return false;
switch (sk->sk_state) {
case TIPC_CONNECTING:
- /* Accept only ACK or NACK message */
- if (unlikely(!msg_connected(hdr))) {
- if (pport != tsk_peer_port(tsk) ||
- pnode != tsk_peer_node(tsk))
- return false;
-
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- sk->sk_err = ECONNREFUSED;
- sk->sk_state_change(sk);
- return true;
- }
-
- if (unlikely(msg_errcode(hdr))) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- sk->sk_err = ECONNREFUSED;
- sk->sk_state_change(sk);
- return true;
- }
-
- if (unlikely(!msg_isdata(hdr))) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- sk->sk_err = EINVAL;
- sk->sk_state_change(sk);
- return true;
+ /* Setup ACK */
+ if (likely(con_msg)) {
+ if (err)
+ break;
+ tipc_sk_finish_conn(tsk, oport, onode);
+ msg_set_importance(&tsk->phdr, msg_importance(hdr));
+ /* ACK+ message with data is added to receive queue */
+ if (msg_data_sz(hdr))
+ return true;
+ /* Empty ACK-, - wake up sleeping connect() and drop */
+ sk->sk_data_ready(sk);
+ msg_set_dest_droppable(hdr, 1);
+ return false;
}
+ /* Ignore connectionless message if not from listening socket */
+ if (oport != pport || onode != pnode)
+ return false;
- tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
- msg_set_importance(&tsk->phdr, msg_importance(hdr));
-
- /* If 'ACK+' message, add to socket receive queue */
- if (msg_data_sz(hdr))
- return true;
-
- /* If empty 'ACK-' message, wake up sleeping connect() */
- sk->sk_data_ready(sk);
+ /* Rejected SYN */
+ if (err != TIPC_ERR_OVERLOAD)
+ break;
- /* 'ACK-' message is neither accepted nor rejected: */
- msg_set_dest_droppable(hdr, 1);
+ /* Prepare for new setup attempt if we have a SYN clone */
+ if (skb_queue_empty(&sk->sk_write_queue))
+ break;
+ get_random_bytes(&delay, 2);
+ delay %= (tsk->conn_timeout / 4);
+ delay = msecs_to_jiffies(delay + 100);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
return false;
-
case TIPC_OPEN:
case TIPC_DISCONNECTING:
- break;
+ return false;
case TIPC_LISTEN:
/* Accept only SYN message */
- if (!msg_connected(hdr) && !(msg_errcode(hdr)))
+ if (!msg_is_syn(hdr) &&
+ tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
+ return false;
+ if (!con_msg && !err)
return true;
- break;
+ return false;
case TIPC_ESTABLISHED:
/* Accept only connection-based messages sent by peer */
- if (unlikely(!tsk_peer_msg(tsk, hdr)))
+ if (likely(con_msg && !err && pport == oport && pnode == onode))
+ return true;
+ if (!tsk_peer_msg(tsk, hdr))
return false;
-
- if (unlikely(msg_errcode(hdr))) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- /* Let timer expire on it's own */
- tipc_node_remove_conn(net, tsk_peer_node(tsk),
- tsk->portid);
- sk->sk_state_change(sk);
- }
+ if (!err)
+ return true;
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ tipc_node_remove_conn(net, pnode, tsk->portid);
+ sk->sk_state_change(sk);
return true;
default:
pr_err("Unknown sk_state %u\n", sk->sk_state);
}
-
- return false;
+ /* Abort connection setup attempt */
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ sk->sk_err = ECONNREFUSED;
+ sk->sk_state_change(sk);
+ return true;
}
/**
@@ -2545,43 +2563,78 @@ static int tipc_shutdown(struct socket *sock, int how)
return res;
}
+static void tipc_sk_check_probing_state(struct sock *sk,
+ struct sk_buff_head *list)
+{
+ struct tipc_sock *tsk = tipc_sk(sk);
+ u32 pnode = tsk_peer_node(tsk);
+ u32 pport = tsk_peer_port(tsk);
+ u32 self = tsk_own_node(tsk);
+ u32 oport = tsk->portid;
+ struct sk_buff *skb;
+
+ if (tsk->probe_unacked) {
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ sk->sk_err = ECONNABORTED;
+ tipc_node_remove_conn(sock_net(sk), pnode, pport);
+ sk->sk_state_change(sk);
+ return;
+ }
+ /* Prepare new probe */
+ skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
+ pnode, self, pport, oport, TIPC_OK);
+ if (skb)
+ __skb_queue_tail(list, skb);
+ tsk->probe_unacked = true;
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
+}
+
+static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
+{
+ struct tipc_sock *tsk = tipc_sk(sk);
+
+ /* Try again later if dest link is congested */
+ if (tsk->cong_link_cnt) {
+ sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
+ return;
+ }
+ /* Prepare SYN for retransmit */
+ tipc_msg_skb_clone(&sk->sk_write_queue, list);
+}
+
static void tipc_sk_timeout(struct timer_list *t)
{
struct sock *sk = from_timer(sk, t, sk_timer);
struct tipc_sock *tsk = tipc_sk(sk);
- u32 peer_port = tsk_peer_port(tsk);
- u32 peer_node = tsk_peer_node(tsk);
- u32 own_node = tsk_own_node(tsk);
- u32 own_port = tsk->portid;
- struct net *net = sock_net(sk);
- struct sk_buff *skb = NULL;
+ u32 pnode = tsk_peer_node(tsk);
+ struct sk_buff_head list;
+ int rc = 0;
+ skb_queue_head_init(&list);
bh_lock_sock(sk);
- if (!tipc_sk_connected(sk))
- goto exit;
/* Try again later if socket is busy */
if (sock_owned_by_user(sk)) {
sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
- goto exit;
+ bh_unlock_sock(sk);
+ return;
}
- if (tsk->probe_unacked) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- tipc_node_remove_conn(net, peer_node, peer_port);
- sk->sk_state_change(sk);
- goto exit;
- }
- /* Send new probe */
- skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
- peer_node, own_node, peer_port, own_port,
- TIPC_OK);
- tsk->probe_unacked = true;
- sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
-exit:
+ if (sk->sk_state == TIPC_ESTABLISHED)
+ tipc_sk_check_probing_state(sk, &list);
+ else if (sk->sk_state == TIPC_CONNECTING)
+ tipc_sk_retry_connect(sk, &list);
+
bh_unlock_sock(sk);
- if (skb)
- tipc_node_xmit_skb(net, skb, peer_node, own_port);
+
+ if (!skb_queue_empty(&list))
+ rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
+
+ /* SYN messages may cause link congestion */
+ if (rc == -ELINKCONG) {
+ tipc_dest_push(&tsk->cong_links, pnode, 0);
+ tsk->cong_link_cnt = 1;
+ }
sock_put(sk);
}
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 2627b5d812e9..4bdea0057171 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -57,16 +57,12 @@
* @idr_lock: protect the connection identifier set
* @idr_in_use: amount of allocated identifier entry
* @net: network namspace instance
- * @rcvbuf_cache: memory cache of server receive buffer
+ * @awork: accept work item
* @rcv_wq: receive workqueue
* @send_wq: send workqueue
* @max_rcvbuf_size: maximum permitted receive message length
- * @tipc_conn_new: callback will be called when new connection is incoming
- * @tipc_conn_release: callback will be called before releasing the connection
- * @tipc_conn_recvmsg: callback will be called when message arrives
+ * @listener: topsrv listener socket
* @name: server name
- * @imp: message importance
- * @type: socket type
*/
struct tipc_topsrv {
struct idr conn_idr;
@@ -90,9 +86,7 @@ struct tipc_topsrv {
* @server: pointer to connected server
* @sub_list: lsit to all pertaing subscriptions
* @sub_lock: lock protecting the subscription list
- * @outqueue_lock: control access to the outqueue
* @rwork: receive work item
- * @rx_action: what to do when connection socket is active
* @outqueue: pointer to first outbound message in queue
* @outqueue_lock: control access to the outqueue
* @swork: send work item
@@ -657,7 +651,7 @@ int tipc_topsrv_start(struct net *net)
srv->max_rcvbuf_size = sizeof(struct tipc_subscr);
INIT_WORK(&srv->awork, tipc_topsrv_accept);
- strncpy(srv->name, name, strlen(name) + 1);
+ strscpy(srv->name, name, sizeof(srv->name));
tn->topsrv = srv;
atomic_set(&tn->subscription_count, 0);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 9783101bc4a9..10dc59ce9c82 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -650,6 +650,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
struct udp_tunnel_sock_cfg tuncfg = {NULL};
struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
u8 node_id[NODE_ID_LEN] = {0,};
+ int rmcast = 0;
ub = kzalloc(sizeof(*ub), GFP_ATOMIC);
if (!ub)
@@ -680,6 +681,9 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
if (err)
goto err;
+ /* Checking remote ip address */
+ rmcast = tipc_udp_is_mcast_addr(&remote);
+
/* Autoconfigure own node identity if needed */
if (!tipc_own_id(net)) {
memcpy(node_id, local.ipv6.in6_u.u6_addr8, 16);
@@ -705,7 +709,12 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
goto err;
}
udp_conf.family = AF_INET;
- udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+
+ /* Switch to use ANY to receive packets from group */
+ if (rmcast)
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ else
+ udp_conf.local_ip.s_addr = local.ipv4.s_addr;
udp_conf.use_udp_checksums = false;
ub->ifindex = dev->ifindex;
if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
@@ -719,7 +728,10 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
udp_conf.family = AF_INET6;
udp_conf.use_udp6_tx_checksums = true;
udp_conf.use_udp6_rx_checksums = true;
- udp_conf.local_ip6 = in6addr_any;
+ if (rmcast)
+ udp_conf.local_ip6 = in6addr_any;
+ else
+ udp_conf.local_ip6 = local.ipv6;
b->mtu = 1280;
#endif
} else {
@@ -741,7 +753,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
* is used if it's a multicast address.
*/
memcpy(&b->bcast_addr.value, &remote, sizeof(remote));
- if (tipc_udp_is_mcast_addr(&remote))
+ if (rmcast)
err = enable_mcast(ub, &remote);
else
err = tipc_udp_rcast_add(b, &remote);
diff --git a/net/tls/Kconfig b/net/tls/Kconfig
index 73f05ece53d0..99c1a19c17b1 100644
--- a/net/tls/Kconfig
+++ b/net/tls/Kconfig
@@ -8,6 +8,7 @@ config TLS
select CRYPTO_AES
select CRYPTO_GCM
select STREAM_PARSER
+ select NET_SOCK_MSG
default n
---help---
Enable kernel support for TLS protocol. This allows symmetric
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 292742e50bfa..276edbc04f38 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -421,7 +421,7 @@ last_record:
tls_push_record_flags = flags;
if (more) {
tls_ctx->pending_open_record_frags =
- record->num_frags;
+ !!record->num_frags;
break;
}
@@ -686,7 +686,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
goto free_marker_record;
}
- crypto_info = &ctx->crypto_send;
+ crypto_info = &ctx->crypto_send.info;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
@@ -780,7 +780,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
ctx->priv_ctx_tx = offload_ctx;
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
- &ctx->crypto_send,
+ &ctx->crypto_send.info,
tcp_sk(sk)->write_seq);
if (rc)
goto release_netdev;
@@ -862,7 +862,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
goto release_ctx;
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
- &ctx->crypto_recv,
+ &ctx->crypto_recv.info,
tcp_sk(sk)->copied_seq);
if (rc) {
pr_err_ratelimited("%s: The netdev has refused to offload this socket\n",
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 6102169239d1..450a6dbc5a88 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -320,7 +320,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
goto free_req;
iv = buf;
- memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt,
+ memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
TLS_CIPHER_AES_GCM_128_SALT_SIZE);
aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
TLS_CIPHER_AES_GCM_128_IV_SIZE;
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 180b6640e531..311cec8e533d 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -141,7 +141,6 @@ retry:
size = sg->length;
}
- clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
ctx->in_tcp_sendpages = false;
ctx->sk_write_space(sk);
@@ -193,15 +192,12 @@ int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
return rc;
}
-int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
- int flags, long *timeo)
+int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
+ int flags)
{
struct scatterlist *sg;
u16 offset;
- if (!tls_is_partially_sent_record(ctx))
- return ctx->push_pending_record(sk, flags);
-
sg = ctx->partially_sent_record;
offset = ctx->partially_sent_offset;
@@ -209,9 +205,23 @@ int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
return tls_push_sg(sk, ctx, sg, offset, flags);
}
+int tls_push_pending_closed_record(struct sock *sk,
+ struct tls_context *tls_ctx,
+ int flags, long *timeo)
+{
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+
+ if (tls_is_partially_sent_record(tls_ctx) ||
+ !list_empty(&ctx->tx_list))
+ return tls_tx_records(sk, flags);
+ else
+ return tls_ctx->push_pending_record(sk, flags);
+}
+
static void tls_write_space(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
/* If in_tcp_sendpages call lower protocol write space handler
* to ensure we wake up any waiting operations there. For example
@@ -222,25 +232,26 @@ static void tls_write_space(struct sock *sk)
return;
}
- if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
- gfp_t sk_allocation = sk->sk_allocation;
- int rc;
- long timeo = 0;
-
- sk->sk_allocation = GFP_ATOMIC;
- rc = tls_push_pending_closed_record(sk, ctx,
- MSG_DONTWAIT |
- MSG_NOSIGNAL,
- &timeo);
- sk->sk_allocation = sk_allocation;
-
- if (rc < 0)
- return;
+ /* Schedule the transmission if tx list is ready */
+ if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
+ /* Schedule the transmission */
+ if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
+ schedule_delayed_work(&tx_ctx->tx_work.work, 0);
}
ctx->sk_write_space(sk);
}
+static void tls_ctx_free(struct tls_context *ctx)
+{
+ if (!ctx)
+ return;
+
+ memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
+ memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
+ kfree(ctx);
+}
+
static void tls_sk_proto_close(struct sock *sk, long timeout)
{
struct tls_context *ctx = tls_get_ctx(sk);
@@ -260,19 +271,6 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
if (!tls_complete_pending_work(sk, ctx, 0, &timeo))
tls_handle_open_record(sk, 0);
- if (ctx->partially_sent_record) {
- struct scatterlist *sg = ctx->partially_sent_record;
-
- while (1) {
- put_page(sg_page(sg));
- sk_mem_uncharge(sk, sg->length);
-
- if (sg_is_last(sg))
- break;
- sg++;
- }
- }
-
/* We need these for tls_sw_fallback handling of other packets */
if (ctx->tx_conf == TLS_SW) {
kfree(ctx->tx.rec_seq);
@@ -294,7 +292,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
#else
{
#endif
- kfree(ctx);
+ tls_ctx_free(ctx);
ctx = NULL;
}
@@ -305,7 +303,7 @@ skip_tx_cleanup:
* for sk->sk_prot->unhash [tls_hw_unhash]
*/
if (free_ctx)
- kfree(ctx);
+ tls_ctx_free(ctx);
}
static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
@@ -330,7 +328,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
}
/* get user crypto info */
- crypto_info = &ctx->crypto_send;
+ crypto_info = &ctx->crypto_send.info;
if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
rc = -EBUSY;
@@ -417,9 +415,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
}
if (tx)
- crypto_info = &ctx->crypto_send;
+ crypto_info = &ctx->crypto_send.info;
else
- crypto_info = &ctx->crypto_recv;
+ crypto_info = &ctx->crypto_recv.info;
/* Currently we don't support set crypto info more than one time */
if (TLS_CRYPTO_INFO_READY(crypto_info)) {
@@ -499,7 +497,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
goto out;
err_crypto_info:
- memset(crypto_info, 0, sizeof(*crypto_info));
+ memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
out:
return rc;
}
@@ -622,12 +620,14 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
- prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
- prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
+ prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
+ prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
+ prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
- prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
- prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
+ prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
+ prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
+ prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
#ifdef CONFIG_TLS_DEVICE
prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
@@ -715,8 +715,6 @@ EXPORT_SYMBOL(tls_unregister_device);
static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
.name = "tls",
- .uid = TCP_ULP_TLS,
- .user_visible = true,
.owner = THIS_MODULE,
.init = tls_init,
};
@@ -726,7 +724,6 @@ static int __init tls_register(void)
build_protos(tls_prots[TLSV4], &tcp_prot);
tls_sw_proto_ops = inet_stream_ops;
- tls_sw_proto_ops.poll = tls_sw_poll;
tls_sw_proto_ops.splice_read = tls_sw_splice_read;
#ifdef CONFIG_TLS_DEVICE
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index e28a6ff25d96..5cd88ba8acd1 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -4,6 +4,7 @@
* Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
* Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
* Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
+ * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -43,12 +44,133 @@
#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
+static int __skb_nsg(struct sk_buff *skb, int offset, int len,
+ unsigned int recursion_level)
+{
+ int start = skb_headlen(skb);
+ int i, chunk = start - offset;
+ struct sk_buff *frag_iter;
+ int elt = 0;
+
+ if (unlikely(recursion_level >= 24))
+ return -EMSGSIZE;
+
+ if (chunk > 0) {
+ if (chunk > len)
+ chunk = len;
+ elt++;
+ len -= chunk;
+ if (len == 0)
+ return elt;
+ offset += chunk;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ chunk = end - offset;
+ if (chunk > 0) {
+ if (chunk > len)
+ chunk = len;
+ elt++;
+ len -= chunk;
+ if (len == 0)
+ return elt;
+ offset += chunk;
+ }
+ start = end;
+ }
+
+ if (unlikely(skb_has_frag_list(skb))) {
+ skb_walk_frags(skb, frag_iter) {
+ int end, ret;
+
+ WARN_ON(start > offset + len);
+
+ end = start + frag_iter->len;
+ chunk = end - offset;
+ if (chunk > 0) {
+ if (chunk > len)
+ chunk = len;
+ ret = __skb_nsg(frag_iter, offset - start, chunk,
+ recursion_level + 1);
+ if (unlikely(ret < 0))
+ return ret;
+ elt += ret;
+ len -= chunk;
+ if (len == 0)
+ return elt;
+ offset += chunk;
+ }
+ start = end;
+ }
+ }
+ BUG_ON(len);
+ return elt;
+}
+
+/* Return the number of scatterlist elements required to completely map the
+ * skb, or -EMSGSIZE if the recursion depth is exceeded.
+ */
+static int skb_nsg(struct sk_buff *skb, int offset, int len)
+{
+ return __skb_nsg(skb, offset, len, 0);
+}
+
+static void tls_decrypt_done(struct crypto_async_request *req, int err)
+{
+ struct aead_request *aead_req = (struct aead_request *)req;
+ struct scatterlist *sgout = aead_req->dst;
+ struct tls_sw_context_rx *ctx;
+ struct tls_context *tls_ctx;
+ struct scatterlist *sg;
+ struct sk_buff *skb;
+ unsigned int pages;
+ int pending;
+
+ skb = (struct sk_buff *)req->data;
+ tls_ctx = tls_get_ctx(skb->sk);
+ ctx = tls_sw_ctx_rx(tls_ctx);
+ pending = atomic_dec_return(&ctx->decrypt_pending);
+
+ /* Propagate if there was an err */
+ if (err) {
+ ctx->async_wait.err = err;
+ tls_err_abort(skb->sk, err);
+ }
+
+ /* After using skb->sk to propagate sk through crypto async callback
+ * we need to NULL it again.
+ */
+ skb->sk = NULL;
+
+ /* Release the skb, pages and memory allocated for crypto req */
+ kfree_skb(skb);
+
+ /* Skip the first S/G entry as it points to AAD */
+ for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
+ if (!sg)
+ break;
+ put_page(sg_page(sg));
+ }
+
+ kfree(aead_req);
+
+ if (!pending && READ_ONCE(ctx->async_notify))
+ complete(&ctx->async_wait.completion);
+}
+
static int tls_do_decryption(struct sock *sk,
+ struct sk_buff *skb,
struct scatterlist *sgin,
struct scatterlist *sgout,
char *iv_recv,
size_t data_len,
- struct aead_request *aead_req)
+ struct aead_request *aead_req,
+ bool async)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@@ -59,324 +181,657 @@ static int tls_do_decryption(struct sock *sk,
aead_request_set_crypt(aead_req, sgin, sgout,
data_len + tls_ctx->rx.tag_size,
(u8 *)iv_recv);
- aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &ctx->async_wait);
- ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
- return ret;
-}
-
-static void trim_sg(struct sock *sk, struct scatterlist *sg,
- int *sg_num_elem, unsigned int *sg_size, int target_size)
-{
- int i = *sg_num_elem - 1;
- int trim = *sg_size - target_size;
-
- if (trim <= 0) {
- WARN_ON(trim < 0);
- return;
+ if (async) {
+ /* Using skb->sk to push sk through to crypto async callback
+ * handler. This allows propagating errors up to the socket
+ * if needed. It _must_ be cleared in the async handler
+ * before kfree_skb is called. We _know_ skb->sk is NULL
+ * because it is a clone from strparser.
+ */
+ skb->sk = sk;
+ aead_request_set_callback(aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tls_decrypt_done, skb);
+ atomic_inc(&ctx->decrypt_pending);
+ } else {
+ aead_request_set_callback(aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &ctx->async_wait);
}
- *sg_size = target_size;
- while (trim >= sg[i].length) {
- trim -= sg[i].length;
- sk_mem_uncharge(sk, sg[i].length);
- put_page(sg_page(&sg[i]));
- i--;
+ ret = crypto_aead_decrypt(aead_req);
+ if (ret == -EINPROGRESS) {
+ if (async)
+ return ret;
- if (i < 0)
- goto out;
+ ret = crypto_wait_req(ret, &ctx->async_wait);
}
- sg[i].length -= trim;
- sk_mem_uncharge(sk, trim);
+ if (async)
+ atomic_dec(&ctx->decrypt_pending);
-out:
- *sg_num_elem = i + 1;
+ return ret;
}
-static void trim_both_sgl(struct sock *sk, int target_size)
+static void tls_trim_both_msgs(struct sock *sk, int target_size)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec = ctx->open_rec;
- trim_sg(sk, ctx->sg_plaintext_data,
- &ctx->sg_plaintext_num_elem,
- &ctx->sg_plaintext_size,
- target_size);
-
+ sk_msg_trim(sk, &rec->msg_plaintext, target_size);
if (target_size > 0)
target_size += tls_ctx->tx.overhead_size;
-
- trim_sg(sk, ctx->sg_encrypted_data,
- &ctx->sg_encrypted_num_elem,
- &ctx->sg_encrypted_size,
- target_size);
+ sk_msg_trim(sk, &rec->msg_encrypted, target_size);
}
-static int alloc_encrypted_sg(struct sock *sk, int len)
+static int tls_alloc_encrypted_msg(struct sock *sk, int len)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
- int rc = 0;
+ struct tls_rec *rec = ctx->open_rec;
+ struct sk_msg *msg_en = &rec->msg_encrypted;
- rc = sk_alloc_sg(sk, len,
- ctx->sg_encrypted_data, 0,
- &ctx->sg_encrypted_num_elem,
- &ctx->sg_encrypted_size, 0);
+ return sk_msg_alloc(sk, msg_en, len, 0);
+}
- if (rc == -ENOSPC)
- ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data);
+static int tls_clone_plaintext_msg(struct sock *sk, int required)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec = ctx->open_rec;
+ struct sk_msg *msg_pl = &rec->msg_plaintext;
+ struct sk_msg *msg_en = &rec->msg_encrypted;
+ int skip, len;
+
+ /* We add page references worth len bytes from encrypted sg
+ * at the end of plaintext sg. It is guaranteed that msg_en
+ * has enough required room (ensured by caller).
+ */
+ len = required - msg_pl->sg.size;
- return rc;
+ /* Skip initial bytes in msg_en's data to be able to use
+ * same offset of both plain and encrypted data.
+ */
+ skip = tls_ctx->tx.prepend_size + msg_pl->sg.size;
+
+ return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
}
-static int alloc_plaintext_sg(struct sock *sk, int len)
+static struct tls_rec *tls_get_rec(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
- int rc = 0;
+ struct sk_msg *msg_pl, *msg_en;
+ struct tls_rec *rec;
+ int mem_size;
- rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
- &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
- tls_ctx->pending_open_record_frags);
+ mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
- if (rc == -ENOSPC)
- ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data);
+ rec = kzalloc(mem_size, sk->sk_allocation);
+ if (!rec)
+ return NULL;
- return rc;
+ msg_pl = &rec->msg_plaintext;
+ msg_en = &rec->msg_encrypted;
+
+ sk_msg_init(msg_pl);
+ sk_msg_init(msg_en);
+
+ sg_init_table(rec->sg_aead_in, 2);
+ sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
+ sizeof(rec->aad_space));
+ sg_unmark_end(&rec->sg_aead_in[1]);
+
+ sg_init_table(rec->sg_aead_out, 2);
+ sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
+ sizeof(rec->aad_space));
+ sg_unmark_end(&rec->sg_aead_out[1]);
+
+ return rec;
}
-static void free_sg(struct sock *sk, struct scatterlist *sg,
- int *sg_num_elem, unsigned int *sg_size)
+static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
{
- int i, n = *sg_num_elem;
+ sk_msg_free(sk, &rec->msg_encrypted);
+ sk_msg_free(sk, &rec->msg_plaintext);
+ kfree(rec);
+}
- for (i = 0; i < n; ++i) {
- sk_mem_uncharge(sk, sg[i].length);
- put_page(sg_page(&sg[i]));
+static void tls_free_open_rec(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec = ctx->open_rec;
+
+ if (rec) {
+ tls_free_rec(sk, rec);
+ ctx->open_rec = NULL;
}
- *sg_num_elem = 0;
- *sg_size = 0;
}
-static void tls_free_both_sg(struct sock *sk)
+int tls_tx_records(struct sock *sk, int flags)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec, *tmp;
+ struct sk_msg *msg_en;
+ int tx_flags, rc = 0;
+
+ if (tls_is_partially_sent_record(tls_ctx)) {
+ rec = list_first_entry(&ctx->tx_list,
+ struct tls_rec, list);
+
+ if (flags == -1)
+ tx_flags = rec->tx_flags;
+ else
+ tx_flags = flags;
+
+ rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
+ if (rc)
+ goto tx_err;
+
+ /* Full record has been transmitted.
+ * Remove the head of tx_list
+ */
+ list_del(&rec->list);
+ sk_msg_free(sk, &rec->msg_plaintext);
+ kfree(rec);
+ }
- free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
- &ctx->sg_encrypted_size);
+ /* Tx all ready records */
+ list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
+ if (READ_ONCE(rec->tx_ready)) {
+ if (flags == -1)
+ tx_flags = rec->tx_flags;
+ else
+ tx_flags = flags;
+
+ msg_en = &rec->msg_encrypted;
+ rc = tls_push_sg(sk, tls_ctx,
+ &msg_en->sg.data[msg_en->sg.curr],
+ 0, tx_flags);
+ if (rc)
+ goto tx_err;
+
+ list_del(&rec->list);
+ sk_msg_free(sk, &rec->msg_plaintext);
+ kfree(rec);
+ } else {
+ break;
+ }
+ }
+
+tx_err:
+ if (rc < 0 && rc != -EAGAIN)
+ tls_err_abort(sk, EBADMSG);
- free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
- &ctx->sg_plaintext_size);
+ return rc;
}
-static int tls_do_encryption(struct tls_context *tls_ctx,
+static void tls_encrypt_done(struct crypto_async_request *req, int err)
+{
+ struct aead_request *aead_req = (struct aead_request *)req;
+ struct sock *sk = req->data;
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct scatterlist *sge;
+ struct sk_msg *msg_en;
+ struct tls_rec *rec;
+ bool ready = false;
+ int pending;
+
+ rec = container_of(aead_req, struct tls_rec, aead_req);
+ msg_en = &rec->msg_encrypted;
+
+ sge = sk_msg_elem(msg_en, msg_en->sg.curr);
+ sge->offset -= tls_ctx->tx.prepend_size;
+ sge->length += tls_ctx->tx.prepend_size;
+
+ /* Check if error is previously set on socket */
+ if (err || sk->sk_err) {
+ rec = NULL;
+
+ /* If err is already set on socket, return the same code */
+ if (sk->sk_err) {
+ ctx->async_wait.err = sk->sk_err;
+ } else {
+ ctx->async_wait.err = err;
+ tls_err_abort(sk, err);
+ }
+ }
+
+ if (rec) {
+ struct tls_rec *first_rec;
+
+ /* Mark the record as ready for transmission */
+ smp_store_mb(rec->tx_ready, true);
+
+ /* If received record is at head of tx_list, schedule tx */
+ first_rec = list_first_entry(&ctx->tx_list,
+ struct tls_rec, list);
+ if (rec == first_rec)
+ ready = true;
+ }
+
+ pending = atomic_dec_return(&ctx->encrypt_pending);
+
+ if (!pending && READ_ONCE(ctx->async_notify))
+ complete(&ctx->async_wait.completion);
+
+ if (!ready)
+ return;
+
+ /* Schedule the transmission */
+ if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
+ schedule_delayed_work(&ctx->tx_work.work, 1);
+}
+
+static int tls_do_encryption(struct sock *sk,
+ struct tls_context *tls_ctx,
struct tls_sw_context_tx *ctx,
struct aead_request *aead_req,
- size_t data_len)
+ size_t data_len, u32 start)
{
+ struct tls_rec *rec = ctx->open_rec;
+ struct sk_msg *msg_en = &rec->msg_encrypted;
+ struct scatterlist *sge = sk_msg_elem(msg_en, start);
int rc;
- ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
- ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
+ sge->offset += tls_ctx->tx.prepend_size;
+ sge->length -= tls_ctx->tx.prepend_size;
+
+ msg_en->sg.curr = start;
aead_request_set_tfm(aead_req, ctx->aead_send);
aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
- aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
+ aead_request_set_crypt(aead_req, rec->sg_aead_in,
+ rec->sg_aead_out,
data_len, tls_ctx->tx.iv);
aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &ctx->async_wait);
+ tls_encrypt_done, sk);
+
+ /* Add the record in tx_list */
+ list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
+ atomic_inc(&ctx->encrypt_pending);
- rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
+ rc = crypto_aead_encrypt(aead_req);
+ if (!rc || rc != -EINPROGRESS) {
+ atomic_dec(&ctx->encrypt_pending);
+ sge->offset -= tls_ctx->tx.prepend_size;
+ sge->length += tls_ctx->tx.prepend_size;
+ }
- ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
- ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
+ if (!rc) {
+ WRITE_ONCE(rec->tx_ready, true);
+ } else if (rc != -EINPROGRESS) {
+ list_del(&rec->list);
+ return rc;
+ }
+ /* Unhook the record from context if encryption is not failure */
+ ctx->open_rec = NULL;
+ tls_advance_record_sn(sk, &tls_ctx->tx);
return rc;
}
-static int tls_push_record(struct sock *sk, int flags,
- unsigned char record_type)
+static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
+ struct tls_rec **to, struct sk_msg *msg_opl,
+ struct sk_msg *msg_oen, u32 split_point,
+ u32 tx_overhead_size, u32 *orig_end)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
- struct aead_request *req;
- int rc;
+ u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
+ struct scatterlist *sge, *osge, *nsge;
+ u32 orig_size = msg_opl->sg.size;
+ struct scatterlist tmp = { };
+ struct sk_msg *msg_npl;
+ struct tls_rec *new;
+ int ret;
- req = aead_request_alloc(ctx->aead_send, sk->sk_allocation);
- if (!req)
+ new = tls_get_rec(sk);
+ if (!new)
return -ENOMEM;
+ ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
+ tx_overhead_size, 0);
+ if (ret < 0) {
+ tls_free_rec(sk, new);
+ return ret;
+ }
- sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
- sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
-
- tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
- tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
- record_type);
-
- tls_fill_prepend(tls_ctx,
- page_address(sg_page(&ctx->sg_encrypted_data[0])) +
- ctx->sg_encrypted_data[0].offset,
- ctx->sg_plaintext_size, record_type);
-
- tls_ctx->pending_open_record_frags = 0;
- set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
+ *orig_end = msg_opl->sg.end;
+ i = msg_opl->sg.start;
+ sge = sk_msg_elem(msg_opl, i);
+ while (apply && sge->length) {
+ if (sge->length > apply) {
+ u32 len = sge->length - apply;
+
+ get_page(sg_page(sge));
+ sg_set_page(&tmp, sg_page(sge), len,
+ sge->offset + apply);
+ sge->length = apply;
+ bytes += apply;
+ apply = 0;
+ } else {
+ apply -= sge->length;
+ bytes += sge->length;
+ }
- rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
- if (rc < 0) {
- /* If we are called from write_space and
- * we fail, we need to set this SOCK_NOSPACE
- * to trigger another write_space in the future.
- */
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- goto out_req;
+ sk_msg_iter_var_next(i);
+ if (i == msg_opl->sg.end)
+ break;
+ sge = sk_msg_elem(msg_opl, i);
}
- free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
- &ctx->sg_plaintext_size);
+ msg_opl->sg.end = i;
+ msg_opl->sg.curr = i;
+ msg_opl->sg.copybreak = 0;
+ msg_opl->apply_bytes = 0;
+ msg_opl->sg.size = bytes;
+
+ msg_npl = &new->msg_plaintext;
+ msg_npl->apply_bytes = apply;
+ msg_npl->sg.size = orig_size - bytes;
+
+ j = msg_npl->sg.start;
+ nsge = sk_msg_elem(msg_npl, j);
+ if (tmp.length) {
+ memcpy(nsge, &tmp, sizeof(*nsge));
+ sk_msg_iter_var_next(j);
+ nsge = sk_msg_elem(msg_npl, j);
+ }
- ctx->sg_encrypted_num_elem = 0;
- ctx->sg_encrypted_size = 0;
+ osge = sk_msg_elem(msg_opl, i);
+ while (osge->length) {
+ memcpy(nsge, osge, sizeof(*nsge));
+ sg_unmark_end(nsge);
+ sk_msg_iter_var_next(i);
+ sk_msg_iter_var_next(j);
+ if (i == *orig_end)
+ break;
+ osge = sk_msg_elem(msg_opl, i);
+ nsge = sk_msg_elem(msg_npl, j);
+ }
- /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
- rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
- if (rc < 0 && rc != -EAGAIN)
- tls_err_abort(sk, EBADMSG);
+ msg_npl->sg.end = j;
+ msg_npl->sg.curr = j;
+ msg_npl->sg.copybreak = 0;
- tls_advance_record_sn(sk, &tls_ctx->tx);
-out_req:
- aead_request_free(req);
- return rc;
+ *to = new;
+ return 0;
}
-static int tls_sw_push_pending_record(struct sock *sk, int flags)
+static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
+ struct tls_rec *from, u32 orig_end)
{
- return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
+ struct sk_msg *msg_npl = &from->msg_plaintext;
+ struct sk_msg *msg_opl = &to->msg_plaintext;
+ struct scatterlist *osge, *nsge;
+ u32 i, j;
+
+ i = msg_opl->sg.end;
+ sk_msg_iter_var_prev(i);
+ j = msg_npl->sg.start;
+
+ osge = sk_msg_elem(msg_opl, i);
+ nsge = sk_msg_elem(msg_npl, j);
+
+ if (sg_page(osge) == sg_page(nsge) &&
+ osge->offset + osge->length == nsge->offset) {
+ osge->length += nsge->length;
+ put_page(sg_page(nsge));
+ }
+
+ msg_opl->sg.end = orig_end;
+ msg_opl->sg.curr = orig_end;
+ msg_opl->sg.copybreak = 0;
+ msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
+ msg_opl->sg.size += msg_npl->sg.size;
+
+ sk_msg_free(sk, &to->msg_encrypted);
+ sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
+
+ kfree(from);
}
-static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
- int length, int *pages_used,
- unsigned int *size_used,
- struct scatterlist *to, int to_max_pages,
- bool charge)
+static int tls_push_record(struct sock *sk, int flags,
+ unsigned char record_type)
{
- struct page *pages[MAX_SKB_FRAGS];
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
+ u32 i, split_point, uninitialized_var(orig_end);
+ struct sk_msg *msg_pl, *msg_en;
+ struct aead_request *req;
+ bool split;
+ int rc;
- size_t offset;
- ssize_t copied, use;
- int i = 0;
- unsigned int size = *size_used;
- int num_elem = *pages_used;
- int rc = 0;
- int maxpages;
+ if (!rec)
+ return 0;
- while (length > 0) {
- i = 0;
- maxpages = to_max_pages - num_elem;
- if (maxpages == 0) {
- rc = -EFAULT;
- goto out;
- }
- copied = iov_iter_get_pages(from, pages,
- length,
- maxpages, &offset);
- if (copied <= 0) {
- rc = -EFAULT;
- goto out;
- }
+ msg_pl = &rec->msg_plaintext;
+ msg_en = &rec->msg_encrypted;
+
+ split_point = msg_pl->apply_bytes;
+ split = split_point && split_point < msg_pl->sg.size;
+ if (split) {
+ rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
+ split_point, tls_ctx->tx.overhead_size,
+ &orig_end);
+ if (rc < 0)
+ return rc;
+ sk_msg_trim(sk, msg_en, msg_pl->sg.size +
+ tls_ctx->tx.overhead_size);
+ }
- iov_iter_advance(from, copied);
+ rec->tx_flags = flags;
+ req = &rec->aead_req;
- length -= copied;
- size += copied;
- while (copied) {
- use = min_t(int, copied, PAGE_SIZE - offset);
+ i = msg_pl->sg.end;
+ sk_msg_iter_var_prev(i);
+ sg_mark_end(sk_msg_elem(msg_pl, i));
- sg_set_page(&to[num_elem],
- pages[i], use, offset);
- sg_unmark_end(&to[num_elem]);
- if (charge)
- sk_mem_charge(sk, use);
+ i = msg_pl->sg.start;
+ sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
+ &msg_en->sg.data[i] : &msg_pl->sg.data[i]);
- offset = 0;
- copied -= use;
+ i = msg_en->sg.end;
+ sk_msg_iter_var_prev(i);
+ sg_mark_end(sk_msg_elem(msg_en, i));
- ++i;
- ++num_elem;
+ i = msg_en->sg.start;
+ sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
+
+ tls_make_aad(rec->aad_space, msg_pl->sg.size,
+ tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
+ record_type);
+
+ tls_fill_prepend(tls_ctx,
+ page_address(sg_page(&msg_en->sg.data[i])) +
+ msg_en->sg.data[i].offset, msg_pl->sg.size,
+ record_type);
+
+ tls_ctx->pending_open_record_frags = false;
+
+ rc = tls_do_encryption(sk, tls_ctx, ctx, req, msg_pl->sg.size, i);
+ if (rc < 0) {
+ if (rc != -EINPROGRESS) {
+ tls_err_abort(sk, EBADMSG);
+ if (split) {
+ tls_ctx->pending_open_record_frags = true;
+ tls_merge_open_record(sk, rec, tmp, orig_end);
+ }
}
+ return rc;
+ } else if (split) {
+ msg_pl = &tmp->msg_plaintext;
+ msg_en = &tmp->msg_encrypted;
+ sk_msg_trim(sk, msg_en, msg_pl->sg.size +
+ tls_ctx->tx.overhead_size);
+ tls_ctx->pending_open_record_frags = true;
+ ctx->open_rec = tmp;
}
- /* Mark the end in the last sg entry if newly added */
- if (num_elem > *pages_used)
- sg_mark_end(&to[num_elem - 1]);
-out:
- if (rc)
- iov_iter_revert(from, size - *size_used);
- *size_used = size;
- *pages_used = num_elem;
-
- return rc;
+ return tls_tx_records(sk, flags);
}
-static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
- int bytes)
+static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
+ bool full_record, u8 record_type,
+ size_t *copied, int flags)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
- struct scatterlist *sg = ctx->sg_plaintext_data;
- int copy, i, rc = 0;
-
- for (i = tls_ctx->pending_open_record_frags;
- i < ctx->sg_plaintext_num_elem; ++i) {
- copy = sg[i].length;
- if (copy_from_iter(
- page_address(sg_page(&sg[i])) + sg[i].offset,
- copy, from) != copy) {
- rc = -EFAULT;
- goto out;
+ struct sk_msg msg_redir = { };
+ struct sk_psock *psock;
+ struct sock *sk_redir;
+ struct tls_rec *rec;
+ int err = 0, send;
+ bool enospc;
+
+ psock = sk_psock_get(sk);
+ if (!psock)
+ return tls_push_record(sk, flags, record_type);
+more_data:
+ enospc = sk_msg_full(msg);
+ if (psock->eval == __SK_NONE)
+ psock->eval = sk_psock_msg_verdict(sk, psock, msg);
+ if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
+ !enospc && !full_record) {
+ err = -ENOSPC;
+ goto out_err;
+ }
+ msg->cork_bytes = 0;
+ send = msg->sg.size;
+ if (msg->apply_bytes && msg->apply_bytes < send)
+ send = msg->apply_bytes;
+
+ switch (psock->eval) {
+ case __SK_PASS:
+ err = tls_push_record(sk, flags, record_type);
+ if (err < 0) {
+ *copied -= sk_msg_free(sk, msg);
+ tls_free_open_rec(sk);
+ goto out_err;
}
- bytes -= copy;
+ break;
+ case __SK_REDIRECT:
+ sk_redir = psock->sk_redir;
+ memcpy(&msg_redir, msg, sizeof(*msg));
+ if (msg->apply_bytes < send)
+ msg->apply_bytes = 0;
+ else
+ msg->apply_bytes -= send;
+ sk_msg_return_zero(sk, msg, send);
+ msg->sg.size -= send;
+ release_sock(sk);
+ err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
+ lock_sock(sk);
+ if (err < 0) {
+ *copied -= sk_msg_free_nocharge(sk, &msg_redir);
+ msg->sg.size = 0;
+ }
+ if (msg->sg.size == 0)
+ tls_free_open_rec(sk);
+ break;
+ case __SK_DROP:
+ default:
+ sk_msg_free_partial(sk, msg, send);
+ if (msg->apply_bytes < send)
+ msg->apply_bytes = 0;
+ else
+ msg->apply_bytes -= send;
+ if (msg->sg.size == 0)
+ tls_free_open_rec(sk);
+ *copied -= send;
+ err = -EACCES;
+ }
- ++tls_ctx->pending_open_record_frags;
+ if (likely(!err)) {
+ bool reset_eval = !ctx->open_rec;
- if (!bytes)
- break;
+ rec = ctx->open_rec;
+ if (rec) {
+ msg = &rec->msg_plaintext;
+ if (!msg->apply_bytes)
+ reset_eval = true;
+ }
+ if (reset_eval) {
+ psock->eval = __SK_NONE;
+ if (psock->sk_redir) {
+ sock_put(psock->sk_redir);
+ psock->sk_redir = NULL;
+ }
+ }
+ if (rec)
+ goto more_data;
}
+ out_err:
+ sk_psock_put(sk, psock);
+ return err;
+}
-out:
- return rc;
+static int tls_sw_push_pending_record(struct sock *sk, int flags)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec = ctx->open_rec;
+ struct sk_msg *msg_pl;
+ size_t copied;
+
+ if (!rec)
+ return 0;
+
+ msg_pl = &rec->msg_plaintext;
+ copied = msg_pl->sg.size;
+ if (!copied)
+ return 0;
+
+ return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
+ &copied, flags);
}
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
+ long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
- int ret = 0;
- int required_size;
- long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
+ bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
+ unsigned char record_type = TLS_RECORD_TYPE_DATA;
+ bool is_kvec = msg->msg_iter.type & ITER_KVEC;
bool eor = !(msg->msg_flags & MSG_MORE);
size_t try_to_copy, copied = 0;
- unsigned char record_type = TLS_RECORD_TYPE_DATA;
- int record_room;
+ struct sk_msg *msg_pl, *msg_en;
+ struct tls_rec *rec;
+ int required_size;
+ int num_async = 0;
bool full_record;
+ int record_room;
+ int num_zc = 0;
int orig_size;
- bool is_kvec = msg->msg_iter.type & ITER_KVEC;
+ int ret = 0;
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
return -ENOTSUPP;
lock_sock(sk);
- if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
- goto send_end;
+ /* Wait till there is any pending write on socket */
+ if (unlikely(sk->sk_write_pending)) {
+ ret = wait_on_pending_writer(sk, &timeo);
+ if (unlikely(ret))
+ goto send_end;
+ }
if (unlikely(msg->msg_controllen)) {
ret = tls_proccess_cmsg(sk, msg, &record_type);
- if (ret)
- goto send_end;
+ if (ret) {
+ if (ret == -EINPROGRESS)
+ num_async++;
+ else if (ret != -EAGAIN)
+ goto send_end;
+ }
}
while (msg_data_left(msg)) {
@@ -385,22 +840,35 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
goto send_end;
}
- orig_size = ctx->sg_plaintext_size;
+ if (ctx->open_rec)
+ rec = ctx->open_rec;
+ else
+ rec = ctx->open_rec = tls_get_rec(sk);
+ if (!rec) {
+ ret = -ENOMEM;
+ goto send_end;
+ }
+
+ msg_pl = &rec->msg_plaintext;
+ msg_en = &rec->msg_encrypted;
+
+ orig_size = msg_pl->sg.size;
full_record = false;
try_to_copy = msg_data_left(msg);
- record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
+ record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
if (try_to_copy >= record_room) {
try_to_copy = record_room;
full_record = true;
}
- required_size = ctx->sg_plaintext_size + try_to_copy +
+ required_size = msg_pl->sg.size + try_to_copy +
tls_ctx->tx.overhead_size;
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
+
alloc_encrypted:
- ret = alloc_encrypted_sg(sk, required_size);
+ ret = tls_alloc_encrypted_msg(sk, required_size);
if (ret) {
if (ret != -ENOSPC)
goto wait_for_memory;
@@ -409,66 +877,88 @@ alloc_encrypted:
* actually allocated. The difference is due
* to max sg elements limit
*/
- try_to_copy -= required_size - ctx->sg_encrypted_size;
+ try_to_copy -= required_size - msg_en->sg.size;
full_record = true;
}
- if (!is_kvec && (full_record || eor)) {
- ret = zerocopy_from_iter(sk, &msg->msg_iter,
- try_to_copy, &ctx->sg_plaintext_num_elem,
- &ctx->sg_plaintext_size,
- ctx->sg_plaintext_data,
- ARRAY_SIZE(ctx->sg_plaintext_data),
- true);
+
+ if (!is_kvec && (full_record || eor) && !async_capable) {
+ u32 first = msg_pl->sg.end;
+
+ ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
+ msg_pl, try_to_copy);
if (ret)
goto fallback_to_reg_send;
+ rec->inplace_crypto = 0;
+
+ num_zc++;
copied += try_to_copy;
- ret = tls_push_record(sk, msg->msg_flags, record_type);
- if (ret)
- goto send_end;
- continue;
+ sk_msg_sg_copy_set(msg_pl, first);
+ ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
+ record_type, &copied,
+ msg->msg_flags);
+ if (ret) {
+ if (ret == -EINPROGRESS)
+ num_async++;
+ else if (ret == -ENOMEM)
+ goto wait_for_memory;
+ else if (ret == -ENOSPC)
+ goto rollback_iter;
+ else if (ret != -EAGAIN)
+ goto send_end;
+ }
+ continue;
+rollback_iter:
+ copied -= try_to_copy;
+ sk_msg_sg_copy_clear(msg_pl, first);
+ iov_iter_revert(&msg->msg_iter,
+ msg_pl->sg.size - orig_size);
fallback_to_reg_send:
- trim_sg(sk, ctx->sg_plaintext_data,
- &ctx->sg_plaintext_num_elem,
- &ctx->sg_plaintext_size,
- orig_size);
+ sk_msg_trim(sk, msg_pl, orig_size);
}
- required_size = ctx->sg_plaintext_size + try_to_copy;
-alloc_plaintext:
- ret = alloc_plaintext_sg(sk, required_size);
+ required_size = msg_pl->sg.size + try_to_copy;
+
+ ret = tls_clone_plaintext_msg(sk, required_size);
if (ret) {
if (ret != -ENOSPC)
- goto wait_for_memory;
+ goto send_end;
/* Adjust try_to_copy according to the amount that was
* actually allocated. The difference is due
* to max sg elements limit
*/
- try_to_copy -= required_size - ctx->sg_plaintext_size;
+ try_to_copy -= required_size - msg_pl->sg.size;
full_record = true;
-
- trim_sg(sk, ctx->sg_encrypted_data,
- &ctx->sg_encrypted_num_elem,
- &ctx->sg_encrypted_size,
- ctx->sg_plaintext_size +
- tls_ctx->tx.overhead_size);
+ sk_msg_trim(sk, msg_en, msg_pl->sg.size +
+ tls_ctx->tx.overhead_size);
}
- ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
- if (ret)
+ ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_pl,
+ try_to_copy);
+ if (ret < 0)
goto trim_sgl;
+ /* Open records defined only if successfully copied, otherwise
+ * we would trim the sg but not reset the open record frags.
+ */
+ tls_ctx->pending_open_record_frags = true;
copied += try_to_copy;
if (full_record || eor) {
-push_record:
- ret = tls_push_record(sk, msg->msg_flags, record_type);
+ ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
+ record_type, &copied,
+ msg->msg_flags);
if (ret) {
- if (ret == -ENOMEM)
+ if (ret == -EINPROGRESS)
+ num_async++;
+ else if (ret == -ENOMEM)
goto wait_for_memory;
-
- goto send_end;
+ else if (ret != -EAGAIN) {
+ if (ret == -ENOSPC)
+ ret = 0;
+ goto send_end;
+ }
}
}
@@ -480,17 +970,37 @@ wait_for_memory:
ret = sk_stream_wait_memory(sk, &timeo);
if (ret) {
trim_sgl:
- trim_both_sgl(sk, orig_size);
+ tls_trim_both_msgs(sk, orig_size);
goto send_end;
}
- if (tls_is_pending_closed_record(tls_ctx))
- goto push_record;
-
- if (ctx->sg_encrypted_size < required_size)
+ if (msg_en->sg.size < required_size)
goto alloc_encrypted;
+ }
+
+ if (!num_async) {
+ goto send_end;
+ } else if (num_zc) {
+ /* Wait for pending encryptions to get completed */
+ smp_store_mb(ctx->async_notify, true);
+
+ if (atomic_read(&ctx->encrypt_pending))
+ crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ else
+ reinit_completion(&ctx->async_wait.completion);
- goto alloc_plaintext;
+ WRITE_ONCE(ctx->async_notify, false);
+
+ if (ctx->async_wait.err) {
+ ret = ctx->async_wait.err;
+ copied = 0;
+ }
+ }
+
+ /* Transmit if any encryptions have completed */
+ if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
+ cancel_delayed_work(&ctx->tx_work.work);
+ tls_tx_records(sk, msg->msg_flags);
}
send_end:
@@ -503,16 +1013,18 @@ send_end:
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
+ long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
- int ret = 0;
- long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
- bool eor;
- size_t orig_size = size;
unsigned char record_type = TLS_RECORD_TYPE_DATA;
- struct scatterlist *sg;
+ struct sk_msg *msg_pl;
+ struct tls_rec *rec;
+ int num_async = 0;
+ size_t copied = 0;
bool full_record;
int record_room;
+ int ret = 0;
+ bool eor;
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_SENDPAGE_NOTLAST))
@@ -525,8 +1037,12 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
- if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
- goto sendpage_end;
+ /* Wait till there is any pending write on socket */
+ if (unlikely(sk->sk_write_pending)) {
+ ret = wait_on_pending_writer(sk, &timeo);
+ if (unlikely(ret))
+ goto sendpage_end;
+ }
/* Call the sk_stream functions to manage the sndbuf mem. */
while (size > 0) {
@@ -537,20 +1053,33 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
goto sendpage_end;
}
+ if (ctx->open_rec)
+ rec = ctx->open_rec;
+ else
+ rec = ctx->open_rec = tls_get_rec(sk);
+ if (!rec) {
+ ret = -ENOMEM;
+ goto sendpage_end;
+ }
+
+ msg_pl = &rec->msg_plaintext;
+
full_record = false;
- record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
+ record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
+ copied = 0;
copy = size;
if (copy >= record_room) {
copy = record_room;
full_record = true;
}
- required_size = ctx->sg_plaintext_size + copy +
- tls_ctx->tx.overhead_size;
+
+ required_size = msg_pl->sg.size + copy +
+ tls_ctx->tx.overhead_size;
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
alloc_payload:
- ret = alloc_encrypted_sg(sk, required_size);
+ ret = tls_alloc_encrypted_msg(sk, required_size);
if (ret) {
if (ret != -ENOSPC)
goto wait_for_memory;
@@ -559,33 +1088,32 @@ alloc_payload:
* actually allocated. The difference is due
* to max sg elements limit
*/
- copy -= required_size - ctx->sg_plaintext_size;
+ copy -= required_size - msg_pl->sg.size;
full_record = true;
}
- get_page(page);
- sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
- sg_set_page(sg, page, copy, offset);
- sg_unmark_end(sg);
-
- ctx->sg_plaintext_num_elem++;
-
+ sk_msg_page_add(msg_pl, page, copy, offset);
sk_mem_charge(sk, copy);
+
offset += copy;
size -= copy;
- ctx->sg_plaintext_size += copy;
- tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
-
- if (full_record || eor ||
- ctx->sg_plaintext_num_elem ==
- ARRAY_SIZE(ctx->sg_plaintext_data)) {
-push_record:
- ret = tls_push_record(sk, flags, record_type);
+ copied += copy;
+
+ tls_ctx->pending_open_record_frags = true;
+ if (full_record || eor || sk_msg_full(msg_pl)) {
+ rec->inplace_crypto = 0;
+ ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
+ record_type, &copied, flags);
if (ret) {
- if (ret == -ENOMEM)
+ if (ret == -EINPROGRESS)
+ num_async++;
+ else if (ret == -ENOMEM)
goto wait_for_memory;
-
- goto sendpage_end;
+ else if (ret != -EAGAIN) {
+ if (ret == -ENOSPC)
+ ret = 0;
+ goto sendpage_end;
+ }
}
}
continue;
@@ -594,35 +1122,35 @@ wait_for_sndbuf:
wait_for_memory:
ret = sk_stream_wait_memory(sk, &timeo);
if (ret) {
- trim_both_sgl(sk, ctx->sg_plaintext_size);
+ tls_trim_both_msgs(sk, msg_pl->sg.size);
goto sendpage_end;
}
- if (tls_is_pending_closed_record(tls_ctx))
- goto push_record;
-
goto alloc_payload;
}
+ if (num_async) {
+ /* Transmit if any encryptions have completed */
+ if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
+ cancel_delayed_work(&ctx->tx_work.work);
+ tls_tx_records(sk, flags);
+ }
+ }
sendpage_end:
- if (orig_size > size)
- ret = orig_size - size;
- else
- ret = sk_stream_error(sk, flags, ret);
-
+ ret = sk_stream_error(sk, flags, ret);
release_sock(sk);
- return ret;
+ return copied ? copied : ret;
}
-static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
- long timeo, int *err)
+static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
+ int flags, long timeo, int *err)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct sk_buff *skb;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- while (!(skb = ctx->recv_pkt)) {
+ while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
if (sk->sk_err) {
*err = sock_error(sk);
return NULL;
@@ -641,7 +1169,10 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
+ sk_wait_event(sk, &timeo,
+ ctx->recv_pkt != skb ||
+ !sk_psock_queue_empty(psock),
+ &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
@@ -655,6 +1186,64 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
return skb;
}
+static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
+ int length, int *pages_used,
+ unsigned int *size_used,
+ struct scatterlist *to,
+ int to_max_pages)
+{
+ int rc = 0, i = 0, num_elem = *pages_used, maxpages;
+ struct page *pages[MAX_SKB_FRAGS];
+ unsigned int size = *size_used;
+ ssize_t copied, use;
+ size_t offset;
+
+ while (length > 0) {
+ i = 0;
+ maxpages = to_max_pages - num_elem;
+ if (maxpages == 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+ copied = iov_iter_get_pages(from, pages,
+ length,
+ maxpages, &offset);
+ if (copied <= 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ iov_iter_advance(from, copied);
+
+ length -= copied;
+ size += copied;
+ while (copied) {
+ use = min_t(int, copied, PAGE_SIZE - offset);
+
+ sg_set_page(&to[num_elem],
+ pages[i], use, offset);
+ sg_unmark_end(&to[num_elem]);
+ /* We do not uncharge memory from this API */
+
+ offset = 0;
+ copied -= use;
+
+ i++;
+ num_elem++;
+ }
+ }
+ /* Mark the end in the last sg entry if newly added */
+ if (num_elem > *pages_used)
+ sg_mark_end(&to[num_elem - 1]);
+out:
+ if (rc)
+ iov_iter_revert(from, size - *size_used);
+ *size_used = size;
+ *pages_used = num_elem;
+
+ return rc;
+}
+
/* This function decrypts the input skb into either out_iov or in out_sg
* or in skb buffers itself. The input parameter 'zc' indicates if
* zero-copy mode needs to be tried or not. With zero-copy mode, either
@@ -684,12 +1273,14 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
else
n_sgout = sg_nents(out_sg);
+ n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size,
+ rxm->full_len - tls_ctx->rx.prepend_size);
} else {
n_sgout = 0;
*zc = false;
+ n_sgin = skb_cow_data(skb, 0, &unused);
}
- n_sgin = skb_cow_data(skb, 0, &unused);
if (n_sgin < 1)
return -EBADMSG;
@@ -750,9 +1341,9 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
*chunk = 0;
- err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
- chunk, &sgout[1],
- (n_sgout - 1), false);
+ err = tls_setup_from_iter(sk, out_iov, data_len,
+ &pages, chunk, &sgout[1],
+ (n_sgout - 1));
if (err < 0)
goto fallback_to_reg_recv;
} else if (out_sg) {
@@ -769,7 +1360,10 @@ fallback_to_reg_recv:
}
/* Prepare and submit AEAD request */
- err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req);
+ err = tls_do_decryption(sk, skb, sgin, sgout, iv,
+ data_len, aead_req, *zc);
+ if (err == -EINPROGRESS)
+ return err;
/* Release the pages in case iov was mapped to pages */
for (; pages > 0; pages--)
@@ -794,8 +1388,12 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
#endif
if (!ctx->decrypted) {
err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
- if (err < 0)
+ if (err < 0) {
+ if (err == -EINPROGRESS)
+ tls_advance_record_sn(sk, &tls_ctx->rx);
+
return err;
+ }
} else {
*zc = false;
}
@@ -823,18 +1421,20 @@ static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- struct strp_msg *rxm = strp_msg(skb);
- if (len < rxm->full_len) {
- rxm->offset += len;
- rxm->full_len -= len;
+ if (skb) {
+ struct strp_msg *rxm = strp_msg(skb);
- return false;
+ if (len < rxm->full_len) {
+ rxm->offset += len;
+ rxm->full_len -= len;
+ return false;
+ }
+ kfree_skb(skb);
}
/* Finished with message */
ctx->recv_pkt = NULL;
- kfree_skb(skb);
__strp_unpause(&ctx->strp);
return true;
@@ -849,6 +1449,7 @@ int tls_sw_recvmsg(struct sock *sk,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct sk_psock *psock;
unsigned char control;
struct strp_msg *rxm;
struct sk_buff *skb;
@@ -857,25 +1458,40 @@ int tls_sw_recvmsg(struct sock *sk,
int target, err = 0;
long timeo;
bool is_kvec = msg->msg_iter.type & ITER_KVEC;
+ int num_async = 0;
flags |= nonblock;
if (unlikely(flags & MSG_ERRQUEUE))
return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
+ psock = sk_psock_get(sk);
lock_sock(sk);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
bool zc = false;
+ bool async = false;
int chunk = 0;
- skb = tls_wait_data(sk, flags, timeo, &err);
- if (!skb)
+ skb = tls_wait_data(sk, psock, flags, timeo, &err);
+ if (!skb) {
+ if (psock) {
+ int ret = __tcp_bpf_recvmsg(sk, psock,
+ msg, len, flags);
+
+ if (ret > 0) {
+ copied += ret;
+ len -= ret;
+ continue;
+ }
+ }
goto recv_end;
+ }
rxm = strp_msg(skb);
+
if (!cmsg) {
int cerr;
@@ -902,26 +1518,39 @@ int tls_sw_recvmsg(struct sock *sk,
err = decrypt_skb_update(sk, skb, &msg->msg_iter,
&chunk, &zc);
- if (err < 0) {
+ if (err < 0 && err != -EINPROGRESS) {
tls_err_abort(sk, EBADMSG);
goto recv_end;
}
+
+ if (err == -EINPROGRESS) {
+ async = true;
+ num_async++;
+ goto pick_next_record;
+ }
+
ctx->decrypted = true;
}
if (!zc) {
chunk = min_t(unsigned int, rxm->full_len, len);
+
err = skb_copy_datagram_msg(skb, rxm->offset, msg,
chunk);
if (err < 0)
goto recv_end;
}
+pick_next_record:
copied += chunk;
len -= chunk;
if (likely(!(flags & MSG_PEEK))) {
u8 control = ctx->control;
+ /* For async, drop current skb reference */
+ if (async)
+ skb = NULL;
+
if (tls_sw_advance_skb(sk, skb, chunk)) {
/* Return full control message to
* userspace before trying to parse
@@ -930,15 +1559,43 @@ int tls_sw_recvmsg(struct sock *sk,
msg->msg_flags |= MSG_EOR;
if (control != TLS_RECORD_TYPE_DATA)
goto recv_end;
+ } else {
+ break;
}
+ } else {
+ /* MSG_PEEK right now cannot look beyond current skb
+ * from strparser, meaning we cannot advance skb here
+ * and thus unpause strparser since we'd loose original
+ * one.
+ */
+ break;
}
+
/* If we have a new message from strparser, continue now. */
if (copied >= target && !ctx->recv_pkt)
break;
} while (len);
recv_end:
+ if (num_async) {
+ /* Wait for all previously submitted records to be decrypted */
+ smp_store_mb(ctx->async_notify, true);
+ if (atomic_read(&ctx->decrypt_pending)) {
+ err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ if (err) {
+ /* one of async decrypt failed */
+ tls_err_abort(sk, err);
+ copied = 0;
+ }
+ } else {
+ reinit_completion(&ctx->async_wait.completion);
+ }
+ WRITE_ONCE(ctx->async_notify, false);
+ }
+
release_sock(sk);
+ if (psock)
+ sk_psock_put(sk, psock);
return copied ? : err;
}
@@ -961,7 +1618,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
- skb = tls_wait_data(sk, flags, timeo, &err);
+ skb = tls_wait_data(sk, NULL, flags, timeo, &err);
if (!skb)
goto splice_read_end;
@@ -995,23 +1652,20 @@ splice_read_end:
return copied ? : err;
}
-unsigned int tls_sw_poll(struct file *file, struct socket *sock,
- struct poll_table_struct *wait)
+bool tls_sw_stream_read(const struct sock *sk)
{
- unsigned int ret;
- struct sock *sk = sock->sk;
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ bool ingress_empty = true;
+ struct sk_psock *psock;
- /* Grab POLLOUT and POLLHUP from the underlying socket */
- ret = ctx->sk_poll(file, sock, wait);
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (psock)
+ ingress_empty = list_empty(&psock->ingress_msg);
+ rcu_read_unlock();
- /* Clear POLLIN bits, and set based on recv_pkt */
- ret &= ~(POLLIN | POLLRDNORM);
- if (ctx->recv_pkt)
- ret |= POLLIN | POLLRDNORM;
-
- return ret;
+ return !ingress_empty || ctx->recv_pkt;
}
static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
@@ -1055,8 +1709,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
goto read_failure;
}
- if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
- header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
+ if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
+ header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
ret = -EINVAL;
goto read_failure;
}
@@ -1090,17 +1744,66 @@ static void tls_data_ready(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ struct sk_psock *psock;
strp_data_ready(&ctx->strp);
+
+ psock = sk_psock_get(sk);
+ if (psock && !list_empty(&psock->ingress_msg)) {
+ ctx->saved_data_ready(sk);
+ sk_psock_put(sk, psock);
+ }
}
void tls_sw_free_resources_tx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec, *tmp;
+
+ /* Wait for any pending async encryptions to complete */
+ smp_store_mb(ctx->async_notify, true);
+ if (atomic_read(&ctx->encrypt_pending))
+ crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+
+ cancel_delayed_work_sync(&ctx->tx_work.work);
+
+ /* Tx whatever records we can transmit and abandon the rest */
+ tls_tx_records(sk, -1);
+
+ /* Free up un-sent records in tx_list. First, free
+ * the partially sent record if any at head of tx_list.
+ */
+ if (tls_ctx->partially_sent_record) {
+ struct scatterlist *sg = tls_ctx->partially_sent_record;
+
+ while (1) {
+ put_page(sg_page(sg));
+ sk_mem_uncharge(sk, sg->length);
+
+ if (sg_is_last(sg))
+ break;
+ sg++;
+ }
+
+ tls_ctx->partially_sent_record = NULL;
+
+ rec = list_first_entry(&ctx->tx_list,
+ struct tls_rec, list);
+ list_del(&rec->list);
+ sk_msg_free(sk, &rec->msg_plaintext);
+ kfree(rec);
+ }
+
+ list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
+ list_del(&rec->list);
+ sk_msg_free(sk, &rec->msg_encrypted);
+ sk_msg_free(sk, &rec->msg_plaintext);
+ kfree(rec);
+ }
crypto_free_aead(ctx->aead_send);
- tls_free_both_sg(sk);
+ tls_free_open_rec(sk);
kfree(ctx);
}
@@ -1134,9 +1837,26 @@ void tls_sw_free_resources_rx(struct sock *sk)
kfree(ctx);
}
+/* The work handler to transmitt the encrypted records in tx_list */
+static void tx_work_handler(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct tx_work *tx_work = container_of(delayed_work,
+ struct tx_work, work);
+ struct sock *sk = tx_work->sk;
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+
+ if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
+ return;
+
+ lock_sock(sk);
+ tls_tx_records(sk, -1);
+ release_sock(sk);
+}
+
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
{
- char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
struct tls_crypto_info *crypto_info;
struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
struct tls_sw_context_tx *sw_ctx_tx = NULL;
@@ -1181,12 +1901,15 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
if (tx) {
crypto_init_wait(&sw_ctx_tx->async_wait);
- crypto_info = &ctx->crypto_send;
+ crypto_info = &ctx->crypto_send.info;
cctx = &ctx->tx;
aead = &sw_ctx_tx->aead_send;
+ INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
+ INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
+ sw_ctx_tx->tx_work.sk = sk;
} else {
crypto_init_wait(&sw_ctx_rx->async_wait);
- crypto_info = &ctx->crypto_recv;
+ crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
aead = &sw_ctx_rx->aead_recv;
}
@@ -1234,26 +1957,6 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
goto free_iv;
}
- if (sw_ctx_tx) {
- sg_init_table(sw_ctx_tx->sg_encrypted_data,
- ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data));
- sg_init_table(sw_ctx_tx->sg_plaintext_data,
- ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data));
-
- sg_init_table(sw_ctx_tx->sg_aead_in, 2);
- sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space,
- sizeof(sw_ctx_tx->aad_space));
- sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]);
- sg_chain(sw_ctx_tx->sg_aead_in, 2,
- sw_ctx_tx->sg_plaintext_data);
- sg_init_table(sw_ctx_tx->sg_aead_out, 2);
- sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space,
- sizeof(sw_ctx_tx->aad_space));
- sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]);
- sg_chain(sw_ctx_tx->sg_aead_out, 2,
- sw_ctx_tx->sg_encrypted_data);
- }
-
if (!*aead) {
*aead = crypto_alloc_aead("gcm(aes)", 0, 0);
if (IS_ERR(*aead)) {
@@ -1265,9 +1968,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
ctx->push_pending_record = tls_sw_push_pending_record;
- memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
-
- rc = crypto_aead_setkey(*aead, keyval,
+ rc = crypto_aead_setkey(*aead, gcm_128_info->key,
TLS_CIPHER_AES_GCM_128_KEY_SIZE);
if (rc)
goto free_aead;
@@ -1289,8 +1990,6 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
sk->sk_data_ready = tls_data_ready;
write_unlock_bh(&sk->sk_callback_lock);
- sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
-
strp_check_rcv(&sw_ctx_rx->strp);
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index d1edfa3cad61..74d1eed7cbd4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -225,6 +225,8 @@ static inline void unix_release_addr(struct unix_address *addr)
static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
{
+ *hashp = 0;
+
if (len <= sizeof(short) || len > sizeof(*sunaddr))
return -EINVAL;
if (!sunaddr || sunaddr->sun_family != AF_UNIX)
@@ -2640,7 +2642,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
struct sock *sk = sock->sk;
__poll_t mask;
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
mask = 0;
/* exceptional events? */
@@ -2677,7 +2679,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
unsigned int writable;
__poll_t mask;
- sock_poll_wait(file, wait);
+ sock_poll_wait(file, sock, wait);
mask = 0;
/* exceptional events? */
diff --git a/net/wireless/core.c b/net/wireless/core.c
index a88551f3bc43..5bd01058b9e6 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1019,36 +1019,49 @@ void cfg80211_cqm_config_free(struct wireless_dev *wdev)
wdev->cqm_config = NULL;
}
-void cfg80211_unregister_wdev(struct wireless_dev *wdev)
+static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
ASSERT_RTNL();
- if (WARN_ON(wdev->netdev))
- return;
-
nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
list_del_rcu(&wdev->list);
- synchronize_rcu();
+ if (sync)
+ synchronize_rcu();
rdev->devlist_generation++;
+ cfg80211_mlme_purge_registrations(wdev);
+
switch (wdev->iftype) {
case NL80211_IFTYPE_P2P_DEVICE:
- cfg80211_mlme_purge_registrations(wdev);
cfg80211_stop_p2p_device(rdev, wdev);
break;
case NL80211_IFTYPE_NAN:
cfg80211_stop_nan(rdev, wdev);
break;
default:
- WARN_ON_ONCE(1);
break;
}
+#ifdef CONFIG_CFG80211_WEXT
+ kzfree(wdev->wext.keys);
+#endif
+ /* only initialized if we have a netdev */
+ if (wdev->netdev)
+ flush_work(&wdev->disconnect_wk);
+
cfg80211_cqm_config_free(wdev);
}
+
+void cfg80211_unregister_wdev(struct wireless_dev *wdev)
+{
+ if (WARN_ON(wdev->netdev))
+ return;
+
+ __cfg80211_unregister_wdev(wdev, true);
+}
EXPORT_SYMBOL(cfg80211_unregister_wdev);
static const struct device_type wiphy_type = {
@@ -1153,6 +1166,30 @@ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
}
EXPORT_SYMBOL(cfg80211_stop_iface);
+void cfg80211_init_wdev(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev)
+{
+ mutex_init(&wdev->mtx);
+ INIT_LIST_HEAD(&wdev->event_list);
+ spin_lock_init(&wdev->event_lock);
+ INIT_LIST_HEAD(&wdev->mgmt_registrations);
+ spin_lock_init(&wdev->mgmt_registrations_lock);
+
+ /*
+ * We get here also when the interface changes network namespaces,
+ * as it's registered into the new one, but we don't want it to
+ * change ID in that case. Checking if the ID is already assigned
+ * works, because 0 isn't considered a valid ID and the memory is
+ * 0-initialized.
+ */
+ if (!wdev->identifier)
+ wdev->identifier = ++rdev->wdev_id;
+ list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
+ rdev->devlist_generation++;
+
+ nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
+}
+
static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
unsigned long state, void *ptr)
{
@@ -1178,23 +1215,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
* called within code protected by it when interfaces
* are added with nl80211.
*/
- mutex_init(&wdev->mtx);
- INIT_LIST_HEAD(&wdev->event_list);
- spin_lock_init(&wdev->event_lock);
- INIT_LIST_HEAD(&wdev->mgmt_registrations);
- spin_lock_init(&wdev->mgmt_registrations_lock);
-
- /*
- * We get here also when the interface changes network namespaces,
- * as it's registered into the new one, but we don't want it to
- * change ID in that case. Checking if the ID is already assigned
- * works, because 0 isn't considered a valid ID and the memory is
- * 0-initialized.
- */
- if (!wdev->identifier)
- wdev->identifier = ++rdev->wdev_id;
- list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
- rdev->devlist_generation++;
/* can only change netns with wiphy */
dev->features |= NETIF_F_NETNS_LOCAL;
@@ -1223,7 +1243,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk);
- nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
+ cfg80211_init_wdev(rdev, wdev);
break;
case NETDEV_GOING_DOWN:
cfg80211_leave(rdev, wdev);
@@ -1238,7 +1258,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
list_for_each_entry_safe(pos, tmp,
&rdev->sched_scan_req_list, list) {
- if (WARN_ON(pos && pos->dev == wdev->netdev))
+ if (WARN_ON(pos->dev == wdev->netdev))
cfg80211_stop_sched_scan_req(rdev, pos, false);
}
@@ -1302,17 +1322,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
* remove and clean it up.
*/
if (!list_empty(&wdev->list)) {
- nl80211_notify_iface(rdev, wdev,
- NL80211_CMD_DEL_INTERFACE);
+ __cfg80211_unregister_wdev(wdev, false);
sysfs_remove_link(&dev->dev.kobj, "phy80211");
- list_del_rcu(&wdev->list);
- rdev->devlist_generation++;
- cfg80211_mlme_purge_registrations(wdev);
-#ifdef CONFIG_CFG80211_WEXT
- kzfree(wdev->wext.keys);
-#endif
- flush_work(&wdev->disconnect_wk);
- cfg80211_cqm_config_free(wdev);
}
/*
* synchronise (so that we won't find this netdev
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 7f52ef569320..c61dbba8bf47 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -66,6 +66,7 @@ struct cfg80211_registered_device {
/* protected by RTNL only */
int num_running_ifaces;
int num_running_monitor_ifaces;
+ u64 cookie_counter;
/* BSSes/scanning */
spinlock_t bss_lock;
@@ -133,6 +134,16 @@ cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev)
#endif
}
+static inline u64 cfg80211_assign_cookie(struct cfg80211_registered_device *rdev)
+{
+ u64 r = ++rdev->cookie_counter;
+
+ if (WARN_ON(r == 0))
+ r = ++rdev->cookie_counter;
+
+ return r;
+}
+
extern struct workqueue_struct *cfg80211_wq;
extern struct list_head cfg80211_rdev_list;
extern int cfg80211_rdev_list_generation;
@@ -187,6 +198,9 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx);
int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
struct net *net);
+void cfg80211_init_wdev(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev);
+
static inline void wdev_lock(struct wireless_dev *wdev)
__acquires(wdev)
{
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index e6bce1f130c9..b5e235573c8a 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -30,7 +30,7 @@
#include <net/iw_handler.h>
#include <crypto/hash.h>
-#include <crypto/skcipher.h>
+#include <linux/crypto.h>
#include <linux/crc32.h>
#include <net/lib80211.h>
@@ -64,9 +64,9 @@ struct lib80211_tkip_data {
int key_idx;
- struct crypto_skcipher *rx_tfm_arc4;
+ struct crypto_cipher *rx_tfm_arc4;
struct crypto_shash *rx_tfm_michael;
- struct crypto_skcipher *tx_tfm_arc4;
+ struct crypto_cipher *tx_tfm_arc4;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
@@ -99,8 +99,7 @@ static void *lib80211_tkip_init(int key_idx)
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_arc4)) {
priv->tx_tfm_arc4 = NULL;
goto fail;
@@ -112,8 +111,7 @@ static void *lib80211_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_arc4)) {
priv->rx_tfm_arc4 = NULL;
goto fail;
@@ -130,9 +128,9 @@ static void *lib80211_tkip_init(int key_idx)
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_skcipher(priv->tx_tfm_arc4);
+ crypto_free_cipher(priv->tx_tfm_arc4);
crypto_free_shash(priv->rx_tfm_michael);
- crypto_free_skcipher(priv->rx_tfm_arc4);
+ crypto_free_cipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -144,9 +142,9 @@ static void lib80211_tkip_deinit(void *priv)
struct lib80211_tkip_data *_priv = priv;
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_skcipher(_priv->tx_tfm_arc4);
+ crypto_free_cipher(_priv->tx_tfm_arc4);
crypto_free_shash(_priv->rx_tfm_michael);
- crypto_free_skcipher(_priv->rx_tfm_arc4);
+ crypto_free_cipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@@ -344,12 +342,10 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct lib80211_tkip_data *tkey = priv;
- SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
int len;
u8 rc4key[16], *pos, *icv;
u32 crc;
- struct scatterlist sg;
- int err;
+ int i;
if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -374,14 +370,10 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, len + 4);
- skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
- err = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- return err;
+ crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ for (i = 0; i < len + 4; i++)
+ crypto_cipher_encrypt_one(tkey->tx_tfm_arc4, pos + i, pos + i);
+ return 0;
}
/*
@@ -400,7 +392,6 @@ static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n,
static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct lib80211_tkip_data *tkey = priv;
- SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
u8 rc4key[16];
u8 keyidx, *pos;
u32 iv32;
@@ -408,9 +399,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
struct ieee80211_hdr *hdr;
u8 icv[4];
u32 crc;
- struct scatterlist sg;
int plen;
- int err;
+ int i;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -463,18 +453,9 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
- crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, plen + 4);
- skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
- err = crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- if (err) {
- net_dbg_ratelimited("TKIP: failed to decrypt received packet from %pM\n",
- hdr->addr2);
- return -7;
- }
+ crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ for (i = 0; i < plen + 4; i++)
+ crypto_cipher_decrypt_one(tkey->rx_tfm_arc4, pos + i, pos + i);
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
@@ -660,9 +641,9 @@ static int lib80211_tkip_set_key(void *key, int len, u8 * seq, void *priv)
struct lib80211_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_cipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
+ struct crypto_cipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c
index d05f58b0fd04..6015f6b542a6 100644
--- a/net/wireless/lib80211_crypt_wep.c
+++ b/net/wireless/lib80211_crypt_wep.c
@@ -22,7 +22,7 @@
#include <net/lib80211.h>
-#include <crypto/skcipher.h>
+#include <linux/crypto.h>
#include <linux/crc32.h>
MODULE_AUTHOR("Jouni Malinen");
@@ -35,8 +35,8 @@ struct lib80211_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_skcipher *tx_tfm;
- struct crypto_skcipher *rx_tfm;
+ struct crypto_cipher *tx_tfm;
+ struct crypto_cipher *rx_tfm;
};
static void *lib80211_wep_init(int keyidx)
@@ -48,13 +48,13 @@ static void *lib80211_wep_init(int keyidx)
goto fail;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm)) {
priv->tx_tfm = NULL;
goto fail;
}
- priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm)) {
priv->rx_tfm = NULL;
goto fail;
@@ -66,8 +66,8 @@ static void *lib80211_wep_init(int keyidx)
fail:
if (priv) {
- crypto_free_skcipher(priv->tx_tfm);
- crypto_free_skcipher(priv->rx_tfm);
+ crypto_free_cipher(priv->tx_tfm);
+ crypto_free_cipher(priv->rx_tfm);
kfree(priv);
}
return NULL;
@@ -77,8 +77,8 @@ static void lib80211_wep_deinit(void *priv)
{
struct lib80211_wep_data *_priv = priv;
if (_priv) {
- crypto_free_skcipher(_priv->tx_tfm);
- crypto_free_skcipher(_priv->rx_tfm);
+ crypto_free_cipher(_priv->tx_tfm);
+ crypto_free_cipher(_priv->rx_tfm);
}
kfree(priv);
}
@@ -129,12 +129,10 @@ static int lib80211_wep_build_iv(struct sk_buff *skb, int hdr_len,
static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct lib80211_wep_data *wep = priv;
- SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
u32 crc, klen, len;
u8 *pos, *icv;
- struct scatterlist sg;
u8 key[WEP_KEY_LEN + 3];
- int err;
+ int i;
/* other checks are in lib80211_wep_build_iv */
if (skb_tailroom(skb) < 4)
@@ -162,14 +160,12 @@ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_skcipher_setkey(wep->tx_tfm, key, klen);
- sg_init_one(&sg, pos, len + 4);
- skcipher_request_set_tfm(req, wep->tx_tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
- err = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- return err;
+ crypto_cipher_setkey(wep->tx_tfm, key, klen);
+
+ for (i = 0; i < len + 4; i++)
+ crypto_cipher_encrypt_one(wep->tx_tfm, pos + i, pos + i);
+
+ return 0;
}
/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
@@ -182,12 +178,10 @@ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct lib80211_wep_data *wep = priv;
- SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
u32 crc, klen, plen;
u8 key[WEP_KEY_LEN + 3];
u8 keyidx, *pos, icv[4];
- struct scatterlist sg;
- int err;
+ int i;
if (skb->len < hdr_len + 8)
return -1;
@@ -208,15 +202,9 @@ static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
/* Apply RC4 to data and compute CRC32 over decrypted data */
plen = skb->len - hdr_len - 8;
- crypto_skcipher_setkey(wep->rx_tfm, key, klen);
- sg_init_one(&sg, pos, plen + 4);
- skcipher_request_set_tfm(req, wep->rx_tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
- err = crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- if (err)
- return -7;
+ crypto_cipher_setkey(wep->rx_tfm, key, klen);
+ for (i = 0; i < plen + 4; i++)
+ crypto_cipher_decrypt_one(wep->rx_tfm, pos + i, pos + i);
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4b8ec659e797..744b5851bbf9 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -200,7 +200,46 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
return __cfg80211_rdev_from_attrs(netns, info->attrs);
}
+static int validate_ie_attr(const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ const u8 *pos;
+ int len;
+
+ pos = nla_data(attr);
+ len = nla_len(attr);
+
+ while (len) {
+ u8 elemlen;
+
+ if (len < 2)
+ goto error;
+ len -= 2;
+
+ elemlen = pos[1];
+ if (elemlen > len)
+ goto error;
+
+ len -= elemlen;
+ pos += 2 + elemlen;
+ }
+
+ return 0;
+error:
+ NL_SET_ERR_MSG_ATTR(extack, attr, "malformed information elements");
+ return -EINVAL;
+}
+
/* policy for the attributes */
+static const struct nla_policy
+nl80211_ftm_responder_policy[NL80211_FTM_RESP_ATTR_MAX + 1] = {
+ [NL80211_FTM_RESP_ATTR_ENABLED] = { .type = NLA_FLAG, },
+ [NL80211_FTM_RESP_ATTR_LCI] = { .type = NLA_BINARY,
+ .len = U8_MAX },
+ [NL80211_FTM_RESP_ATTR_CIVICLOC] = { .type = NLA_BINARY,
+ .len = U8_MAX },
+};
+
static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
@@ -213,14 +252,14 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_CENTER_FREQ1] = { .type = NLA_U32 },
[NL80211_ATTR_CENTER_FREQ2] = { .type = NLA_U32 },
- [NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 },
- [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 },
+ [NL80211_ATTR_WIPHY_RETRY_SHORT] = NLA_POLICY_MIN(NLA_U8, 1),
+ [NL80211_ATTR_WIPHY_RETRY_LONG] = NLA_POLICY_MIN(NLA_U8, 1),
[NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 },
[NL80211_ATTR_WIPHY_DYN_ACK] = { .type = NLA_FLAG },
- [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 },
+ [NL80211_ATTR_IFTYPE] = NLA_POLICY_MAX(NLA_U32, NL80211_IFTYPE_MAX),
[NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
[NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
@@ -230,24 +269,28 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
[NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
.len = WLAN_MAX_KEY_LEN },
- [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 },
+ [NL80211_ATTR_KEY_IDX] = NLA_POLICY_MAX(NLA_U8, 5),
[NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 },
[NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG },
[NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
- [NL80211_ATTR_KEY_TYPE] = { .type = NLA_U32 },
+ [NL80211_ATTR_KEY_TYPE] =
+ NLA_POLICY_MAX(NLA_U32, NUM_NL80211_KEYTYPES),
[NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
[NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 },
[NL80211_ATTR_BEACON_HEAD] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
- [NL80211_ATTR_BEACON_TAIL] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_DATA_LEN },
- [NL80211_ATTR_STA_AID] = { .type = NLA_U16 },
+ [NL80211_ATTR_BEACON_TAIL] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
+ [NL80211_ATTR_STA_AID] =
+ NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
[NL80211_ATTR_STA_FLAGS] = { .type = NLA_NESTED },
[NL80211_ATTR_STA_LISTEN_INTERVAL] = { .type = NLA_U16 },
[NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY,
.len = NL80211_MAX_SUPP_RATES },
- [NL80211_ATTR_STA_PLINK_ACTION] = { .type = NLA_U8 },
+ [NL80211_ATTR_STA_PLINK_ACTION] =
+ NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_ACTIONS - 1),
[NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 },
[NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
[NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
@@ -270,8 +313,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_HT_CAPABILITY] = { .len = NL80211_HT_CAPABILITY_LEN },
[NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 },
- [NL80211_ATTR_IE] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_ATTR_IE] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
+ validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
[NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED },
[NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED },
@@ -281,7 +325,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 },
[NL80211_ATTR_FREQ_FIXED] = { .type = NLA_FLAG },
[NL80211_ATTR_TIMED_OUT] = { .type = NLA_FLAG },
- [NL80211_ATTR_USE_MFP] = { .type = NLA_U32 },
+ [NL80211_ATTR_USE_MFP] = NLA_POLICY_RANGE(NLA_U32,
+ NL80211_MFP_NO,
+ NL80211_MFP_OPTIONAL),
[NL80211_ATTR_STA_FLAGS2] = {
.len = sizeof(struct nl80211_sta_flag_update),
},
@@ -301,7 +347,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_FRAME] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
[NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, },
- [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 },
+ [NL80211_ATTR_PS_STATE] = NLA_POLICY_RANGE(NLA_U32,
+ NL80211_PS_DISABLED,
+ NL80211_PS_ENABLED),
[NL80211_ATTR_CQM] = { .type = NLA_NESTED, },
[NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG },
[NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 },
@@ -314,15 +362,23 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_OFFCHANNEL_TX_OK] = { .type = NLA_FLAG },
[NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED },
[NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
- [NL80211_ATTR_STA_PLINK_STATE] = { .type = NLA_U8 },
+ [NL80211_ATTR_STA_PLINK_STATE] =
+ NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1),
+ [NL80211_ATTR_MESH_PEER_AID] =
+ NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
[NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
[NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED },
[NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED },
- [NL80211_ATTR_HIDDEN_SSID] = { .type = NLA_U32 },
- [NL80211_ATTR_IE_PROBE_RESP] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_DATA_LEN },
- [NL80211_ATTR_IE_ASSOC_RESP] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_ATTR_HIDDEN_SSID] =
+ NLA_POLICY_RANGE(NLA_U32,
+ NL80211_HIDDEN_SSID_NOT_IN_USE,
+ NL80211_HIDDEN_SSID_ZERO_CONTENTS),
+ [NL80211_ATTR_IE_PROBE_RESP] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
+ [NL80211_ATTR_IE_ASSOC_RESP] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
[NL80211_ATTR_ROAM_SUPPORT] = { .type = NLA_FLAG },
[NL80211_ATTR_SCHED_SCAN_MATCH] = { .type = NLA_NESTED },
[NL80211_ATTR_TX_NO_CCK_RATE] = { .type = NLA_FLAG },
@@ -348,9 +404,12 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, },
[NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN },
[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
- [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
- [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
- [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
+ [NL80211_ATTR_P2P_CTWINDOW] = NLA_POLICY_MAX(NLA_U8, 127),
+ [NL80211_ATTR_P2P_OPPPS] = NLA_POLICY_MAX(NLA_U8, 1),
+ [NL80211_ATTR_LOCAL_MESH_POWER_MODE] =
+ NLA_POLICY_RANGE(NLA_U32,
+ NL80211_MESH_POWER_UNKNOWN + 1,
+ NL80211_MESH_POWER_MAX),
[NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
[NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
[NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
@@ -363,7 +422,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MDID] = { .type = NLA_U16 },
[NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
- [NL80211_ATTR_PEER_AID] = { .type = NLA_U16 },
+ [NL80211_ATTR_PEER_AID] =
+ NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
[NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
[NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
[NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED },
@@ -384,8 +444,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_SOCKET_OWNER] = { .type = NLA_FLAG },
[NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY },
[NL80211_ATTR_USE_RRM] = { .type = NLA_FLAG },
- [NL80211_ATTR_TSID] = { .type = NLA_U8 },
- [NL80211_ATTR_USER_PRIO] = { .type = NLA_U8 },
+ [NL80211_ATTR_TSID] = NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_TIDS - 1),
+ [NL80211_ATTR_USER_PRIO] =
+ NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1),
[NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
[NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
[NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN },
@@ -395,12 +456,13 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
[NL80211_ATTR_PBSS] = { .type = NLA_FLAG },
[NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED },
- [NL80211_ATTR_STA_SUPPORT_P2P_PS] = { .type = NLA_U8 },
+ [NL80211_ATTR_STA_SUPPORT_P2P_PS] =
+ NLA_POLICY_MAX(NLA_U8, NUM_NL80211_P2P_PS_STATUS - 1),
[NL80211_ATTR_MU_MIMO_GROUP_DATA] = {
.len = VHT_MUMIMO_GROUPS_DATA_LEN
},
[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN },
- [NL80211_ATTR_NAN_MASTER_PREF] = { .type = NLA_U8 },
+ [NL80211_ATTR_NAN_MASTER_PREF] = NLA_POLICY_MIN(NLA_U8, 1),
[NL80211_ATTR_BANDS] = { .type = NLA_U32 },
[NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED },
[NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY,
@@ -430,6 +492,11 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
[NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY,
.len = NL80211_HE_MAX_CAPABILITY_LEN },
+
+ [NL80211_ATTR_FTM_RESPONDER] = {
+ .type = NLA_NESTED,
+ .validation_data = nl80211_ftm_responder_policy,
+ },
};
/* policy for the key attributes */
@@ -440,7 +507,7 @@ static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = {
[NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
[NL80211_KEY_DEFAULT] = { .type = NLA_FLAG },
[NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG },
- [NL80211_KEY_TYPE] = { .type = NLA_U32 },
+ [NL80211_KEY_TYPE] = NLA_POLICY_MAX(NLA_U32, NUM_NL80211_KEYTYPES - 1),
[NL80211_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED },
};
@@ -491,7 +558,10 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
static const struct nla_policy
nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = {
[NL80211_ATTR_COALESCE_RULE_DELAY] = { .type = NLA_U32 },
- [NL80211_ATTR_COALESCE_RULE_CONDITION] = { .type = NLA_U32 },
+ [NL80211_ATTR_COALESCE_RULE_CONDITION] =
+ NLA_POLICY_RANGE(NLA_U32,
+ NL80211_COALESCE_CONDITION_MATCH,
+ NL80211_COALESCE_CONDITION_NO_MATCH),
[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN] = { .type = NLA_NESTED },
};
@@ -567,8 +637,7 @@ nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
[NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
};
-static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
+static int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
struct cfg80211_registered_device **rdev,
struct wireless_dev **wdev)
{
@@ -582,7 +651,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
return err;
*wdev = __cfg80211_wdev_from_attrs(
- sock_net(skb->sk),
+ sock_net(cb->skb->sk),
genl_family_attrbuf(&nl80211_fam));
if (IS_ERR(*wdev))
return PTR_ERR(*wdev);
@@ -614,36 +683,6 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
return 0;
}
-/* IE validation */
-static bool is_valid_ie_attr(const struct nlattr *attr)
-{
- const u8 *pos;
- int len;
-
- if (!attr)
- return true;
-
- pos = nla_data(attr);
- len = nla_len(attr);
-
- while (len) {
- u8 elemlen;
-
- if (len < 2)
- return false;
- len -= 2;
-
- elemlen = pos[1];
- if (elemlen > len)
- return false;
-
- len -= elemlen;
- pos += 2 + elemlen;
- }
-
- return true;
-}
-
/* message building helper */
static inline void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
int flags, u8 cmd)
@@ -858,12 +897,8 @@ static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key,
if (tb[NL80211_KEY_CIPHER])
k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]);
- if (tb[NL80211_KEY_TYPE]) {
+ if (tb[NL80211_KEY_TYPE])
k->type = nla_get_u32(tb[NL80211_KEY_TYPE]);
- if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
- return genl_err_attr(info, -EINVAL,
- tb[NL80211_KEY_TYPE]);
- }
if (tb[NL80211_KEY_DEFAULT_TYPES]) {
struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES];
@@ -910,13 +945,8 @@ static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k)
if (k->defmgmt)
k->def_multi = true;
- if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
+ if (info->attrs[NL80211_ATTR_KEY_TYPE])
k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
- if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) {
- GENL_SET_ERR_MSG(info, "key type out of range");
- return -EINVAL;
- }
- }
if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) {
struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES];
@@ -2292,12 +2322,14 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
struct genl_info *info,
struct cfg80211_chan_def *chandef)
{
+ struct netlink_ext_ack *extack = info->extack;
+ struct nlattr **attrs = info->attrs;
u32 control_freq;
- if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
+ if (!attrs[NL80211_ATTR_WIPHY_FREQ])
return -EINVAL;
- control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+ control_freq = nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ]);
chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
@@ -2305,14 +2337,16 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
chandef->center_freq2 = 0;
/* Primary channel not allowed */
- if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED)
+ if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED) {
+ NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_FREQ],
+ "Channel is disabled");
return -EINVAL;
+ }
- if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
+ if (attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
enum nl80211_channel_type chantype;
- chantype = nla_get_u32(
- info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+ chantype = nla_get_u32(attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
switch (chantype) {
case NL80211_CHAN_NO_HT:
@@ -2322,42 +2356,56 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
cfg80211_chandef_create(chandef, chandef->chan,
chantype);
/* user input for center_freq is incorrect */
- if (info->attrs[NL80211_ATTR_CENTER_FREQ1] &&
- chandef->center_freq1 != nla_get_u32(
- info->attrs[NL80211_ATTR_CENTER_FREQ1]))
+ if (attrs[NL80211_ATTR_CENTER_FREQ1] &&
+ chandef->center_freq1 != nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1])) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ attrs[NL80211_ATTR_CENTER_FREQ1],
+ "bad center frequency 1");
return -EINVAL;
+ }
/* center_freq2 must be zero */
- if (info->attrs[NL80211_ATTR_CENTER_FREQ2] &&
- nla_get_u32(info->attrs[NL80211_ATTR_CENTER_FREQ2]))
+ if (attrs[NL80211_ATTR_CENTER_FREQ2] &&
+ nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ2])) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ attrs[NL80211_ATTR_CENTER_FREQ2],
+ "center frequency 2 can't be used");
return -EINVAL;
+ }
break;
default:
+ NL_SET_ERR_MSG_ATTR(extack,
+ attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE],
+ "invalid channel type");
return -EINVAL;
}
- } else if (info->attrs[NL80211_ATTR_CHANNEL_WIDTH]) {
+ } else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) {
chandef->width =
- nla_get_u32(info->attrs[NL80211_ATTR_CHANNEL_WIDTH]);
- if (info->attrs[NL80211_ATTR_CENTER_FREQ1])
+ nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]);
+ if (attrs[NL80211_ATTR_CENTER_FREQ1])
chandef->center_freq1 =
- nla_get_u32(
- info->attrs[NL80211_ATTR_CENTER_FREQ1]);
- if (info->attrs[NL80211_ATTR_CENTER_FREQ2])
+ nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]);
+ if (attrs[NL80211_ATTR_CENTER_FREQ2])
chandef->center_freq2 =
- nla_get_u32(
- info->attrs[NL80211_ATTR_CENTER_FREQ2]);
+ nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ2]);
}
- if (!cfg80211_chandef_valid(chandef))
+ if (!cfg80211_chandef_valid(chandef)) {
+ NL_SET_ERR_MSG(extack, "invalid channel definition");
return -EINVAL;
+ }
if (!cfg80211_chandef_usable(&rdev->wiphy, chandef,
- IEEE80211_CHAN_DISABLED))
+ IEEE80211_CHAN_DISABLED)) {
+ NL_SET_ERR_MSG(extack, "(extension) channel is disabled");
return -EINVAL;
+ }
if ((chandef->width == NL80211_CHAN_WIDTH_5 ||
chandef->width == NL80211_CHAN_WIDTH_10) &&
- !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ))
+ !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ)) {
+ NL_SET_ERR_MSG(extack, "5/10 MHz not supported");
return -EINVAL;
+ }
return 0;
}
@@ -2617,8 +2665,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) {
retry_short = nla_get_u8(
info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]);
- if (retry_short == 0)
- return -EINVAL;
changed |= WIPHY_PARAM_RETRY_SHORT;
}
@@ -2626,8 +2672,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]) {
retry_long = nla_get_u8(
info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]);
- if (retry_long == 0)
- return -EINVAL;
changed |= WIPHY_PARAM_RETRY_LONG;
}
@@ -3119,8 +3163,6 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
if (otype != ntype)
change = true;
- if (ntype > NL80211_IFTYPE_MAX)
- return -EINVAL;
}
if (info->attrs[NL80211_ATTR_MESH_ID]) {
@@ -3185,11 +3227,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NL80211_ATTR_IFNAME])
return -EINVAL;
- if (info->attrs[NL80211_ATTR_IFTYPE]) {
+ if (info->attrs[NL80211_ATTR_IFTYPE])
type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
- if (type > NL80211_IFTYPE_MAX)
- return -EINVAL;
- }
if (!rdev->ops->add_virtual_intf ||
!(rdev->wiphy.interface_modes & (1 << type)))
@@ -3252,15 +3291,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
* P2P Device and NAN do not have a netdev, so don't go
* through the netdev notifier and must be added here
*/
- mutex_init(&wdev->mtx);
- INIT_LIST_HEAD(&wdev->event_list);
- spin_lock_init(&wdev->event_lock);
- INIT_LIST_HEAD(&wdev->mgmt_registrations);
- spin_lock_init(&wdev->mgmt_registrations_lock);
-
- wdev->identifier = ++rdev->wdev_id;
- list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
- rdev->devlist_generation++;
+ cfg80211_init_wdev(rdev, wdev);
break;
default:
break;
@@ -3272,15 +3303,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
return -ENOBUFS;
}
- /*
- * For wdevs which have no associated netdev object (e.g. of type
- * NL80211_IFTYPE_P2P_DEVICE), emit the NEW_INTERFACE event here.
- * For all other types, the event will be generated from the
- * netdev notifier
- */
- if (!wdev->netdev)
- nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
-
return genlmsg_reply(msg, info);
}
@@ -3359,7 +3381,7 @@ static void get_key_callback(void *c, struct key_params *params)
params->cipher)))
goto nla_put_failure;
- if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx))
+ if (nla_put_u8(cookie->msg, NL80211_KEY_IDX, cookie->idx))
goto nla_put_failure;
nla_nest_end(cookie->msg, key);
@@ -3386,9 +3408,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_KEY_IDX])
key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
- if (key_idx > 5)
- return -EINVAL;
-
if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
@@ -3396,8 +3415,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
u32 kt = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
- if (kt >= NUM_NL80211_KEYTYPES)
- return -EINVAL;
if (kt != NL80211_KEYTYPE_GROUP &&
kt != NL80211_KEYTYPE_PAIRWISE)
return -EINVAL;
@@ -3756,6 +3773,7 @@ static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
return false;
/* check availability */
+ ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN);
if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
mcs[ridx] |= rbit;
else
@@ -3997,16 +4015,12 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
return 0;
}
-static int nl80211_parse_beacon(struct nlattr *attrs[],
+static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev,
+ struct nlattr *attrs[],
struct cfg80211_beacon_data *bcn)
{
bool haveinfo = false;
-
- if (!is_valid_ie_attr(attrs[NL80211_ATTR_BEACON_TAIL]) ||
- !is_valid_ie_attr(attrs[NL80211_ATTR_IE]) ||
- !is_valid_ie_attr(attrs[NL80211_ATTR_IE_PROBE_RESP]) ||
- !is_valid_ie_attr(attrs[NL80211_ATTR_IE_ASSOC_RESP]))
- return -EINVAL;
+ int err;
memset(bcn, 0, sizeof(*bcn));
@@ -4051,6 +4065,35 @@ static int nl80211_parse_beacon(struct nlattr *attrs[],
bcn->probe_resp_len = nla_len(attrs[NL80211_ATTR_PROBE_RESP]);
}
+ if (attrs[NL80211_ATTR_FTM_RESPONDER]) {
+ struct nlattr *tb[NL80211_FTM_RESP_ATTR_MAX + 1];
+
+ err = nla_parse_nested(tb, NL80211_FTM_RESP_ATTR_MAX,
+ attrs[NL80211_ATTR_FTM_RESPONDER],
+ NULL, NULL);
+ if (err)
+ return err;
+
+ if (tb[NL80211_FTM_RESP_ATTR_ENABLED] &&
+ wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER))
+ bcn->ftm_responder = 1;
+ else
+ return -EOPNOTSUPP;
+
+ if (tb[NL80211_FTM_RESP_ATTR_LCI]) {
+ bcn->lci = nla_data(tb[NL80211_FTM_RESP_ATTR_LCI]);
+ bcn->lci_len = nla_len(tb[NL80211_FTM_RESP_ATTR_LCI]);
+ }
+
+ if (tb[NL80211_FTM_RESP_ATTR_CIVICLOC]) {
+ bcn->civicloc = nla_data(tb[NL80211_FTM_RESP_ATTR_CIVICLOC]);
+ bcn->civicloc_len = nla_len(tb[NL80211_FTM_RESP_ATTR_CIVICLOC]);
+ }
+ } else {
+ bcn->ftm_responder = -1;
+ }
+
return 0;
}
@@ -4095,6 +4138,9 @@ static void nl80211_calculate_ap_params(struct cfg80211_ap_settings *params)
cap = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, ies, ies_len);
if (cap && cap[1] >= sizeof(*params->vht_cap))
params->vht_cap = (void *)(cap + 2);
+ cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ies, ies_len);
+ if (cap && cap[1] >= sizeof(*params->he_cap) + 1)
+ params->he_cap = (void *)(cap + 3);
}
static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
@@ -4194,7 +4240,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
!info->attrs[NL80211_ATTR_BEACON_HEAD])
return -EINVAL;
- err = nl80211_parse_beacon(info->attrs, &params.beacon);
+ err = nl80211_parse_beacon(rdev, info->attrs, &params.beacon);
if (err)
return err;
@@ -4224,14 +4270,9 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
- if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) {
+ if (info->attrs[NL80211_ATTR_HIDDEN_SSID])
params.hidden_ssid = nla_get_u32(
info->attrs[NL80211_ATTR_HIDDEN_SSID]);
- if (params.hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE &&
- params.hidden_ssid != NL80211_HIDDEN_SSID_ZERO_LEN &&
- params.hidden_ssid != NL80211_HIDDEN_SSID_ZERO_CONTENTS)
- return -EINVAL;
- }
params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
@@ -4261,8 +4302,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
params.p2p_ctwindow =
nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]);
- if (params.p2p_ctwindow > 127)
- return -EINVAL;
if (params.p2p_ctwindow != 0 &&
!(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN))
return -EINVAL;
@@ -4274,8 +4313,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EINVAL;
tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]);
- if (tmp > 1)
- return -EINVAL;
params.p2p_opp_ps = tmp;
if (params.p2p_opp_ps != 0 &&
!(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS))
@@ -4378,7 +4415,7 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
if (!wdev->beacon_interval)
return -EINVAL;
- err = nl80211_parse_beacon(info->attrs, &params);
+ err = nl80211_parse_beacon(rdev, info->attrs, &params);
if (err)
return err;
@@ -4724,10 +4761,13 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
PUT_SINFO_U64(RX_DROP_MISC, rx_dropped_misc);
PUT_SINFO_U64(BEACON_RX, rx_beacon);
PUT_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8);
- PUT_SINFO(ACK_SIGNAL, ack_signal, u8);
+ PUT_SINFO(RX_MPDUS, rx_mpdu_count, u32);
+ PUT_SINFO(FCS_ERROR_COUNT, fcs_err_count, u32);
if (wiphy_ext_feature_isset(&rdev->wiphy,
- NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT))
- PUT_SINFO(DATA_ACK_SIGNAL_AVG, avg_ack_signal, s8);
+ NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT)) {
+ PUT_SINFO(ACK_SIGNAL, ack_signal, u8);
+ PUT_SINFO(ACK_SIGNAL_AVG, avg_ack_signal, s8);
+ }
#undef PUT_SINFO
#undef PUT_SINFO_U64
@@ -4806,7 +4846,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
int err;
rtnl_lock();
- err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
+ err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
if (err)
goto out_err;
@@ -5211,17 +5251,11 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
else
params.listen_interval = -1;
- if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) {
- u8 tmp;
-
- tmp = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
- if (tmp >= NUM_NL80211_P2P_PS_STATUS)
- return -EINVAL;
-
- params.support_p2p_ps = tmp;
- } else {
+ if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS])
+ params.support_p2p_ps =
+ nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
+ else
params.support_p2p_ps = -1;
- }
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
@@ -5251,38 +5285,23 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params))
return -EINVAL;
- if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) {
+ if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
params.plink_action =
nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
- if (params.plink_action >= NUM_NL80211_PLINK_ACTIONS)
- return -EINVAL;
- }
if (info->attrs[NL80211_ATTR_STA_PLINK_STATE]) {
params.plink_state =
nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
- if (params.plink_state >= NUM_NL80211_PLINK_STATES)
- return -EINVAL;
- if (info->attrs[NL80211_ATTR_MESH_PEER_AID]) {
+ if (info->attrs[NL80211_ATTR_MESH_PEER_AID])
params.peer_aid = nla_get_u16(
info->attrs[NL80211_ATTR_MESH_PEER_AID]);
- if (params.peer_aid > IEEE80211_MAX_AID)
- return -EINVAL;
- }
params.sta_modify_mask |= STATION_PARAM_APPLY_PLINK_STATE;
}
- if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]) {
- enum nl80211_mesh_power_mode pm = nla_get_u32(
+ if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE])
+ params.local_pm = nla_get_u32(
info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]);
- if (pm <= NL80211_MESH_POWER_UNKNOWN ||
- pm > NL80211_MESH_POWER_MAX)
- return -EINVAL;
-
- params.local_pm = pm;
- }
-
if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) {
params.opmode_notif_used = true;
params.opmode_notif =
@@ -5359,13 +5378,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) {
- u8 tmp;
-
- tmp = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
- if (tmp >= NUM_NL80211_P2P_PS_STATUS)
- return -EINVAL;
-
- params.support_p2p_ps = tmp;
+ params.support_p2p_ps =
+ nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]);
} else {
/*
* if not specified, assume it's supported for P2P GO interface,
@@ -5379,8 +5393,6 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]);
else
params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
- if (!params.aid || params.aid > IEEE80211_MAX_AID)
- return -EINVAL;
if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) {
params.capability =
@@ -5420,12 +5432,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]);
}
- if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) {
+ if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
params.plink_action =
nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
- if (params.plink_action >= NUM_NL80211_PLINK_ACTIONS)
- return -EINVAL;
- }
err = nl80211_parse_sta_channel_info(info, &params);
if (err)
@@ -5657,7 +5666,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
int err;
rtnl_lock();
- err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
+ err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
if (err)
goto out_err;
@@ -5853,7 +5862,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
int err;
rtnl_lock();
- err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
+ err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
if (err)
goto out_err;
@@ -5935,9 +5944,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EINVAL;
params.p2p_ctwindow =
- nla_get_s8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]);
- if (params.p2p_ctwindow < 0)
- return -EINVAL;
+ nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]);
if (params.p2p_ctwindow != 0 &&
!(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN))
return -EINVAL;
@@ -5949,8 +5956,6 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EINVAL;
tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]);
- if (tmp > 1)
- return -EINVAL;
params.p2p_opp_ps = tmp;
if (params.p2p_opp_ps &&
!(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS))
@@ -6129,33 +6134,49 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
return -ENOBUFS;
}
-static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = {
- [NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 },
- [NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 },
- [NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 },
- [NL80211_MESHCONF_MAX_PEER_LINKS] = { .type = NLA_U16 },
- [NL80211_MESHCONF_MAX_RETRIES] = { .type = NLA_U8 },
- [NL80211_MESHCONF_TTL] = { .type = NLA_U8 },
- [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 },
- [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 },
- [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 },
+static const struct nla_policy
+nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = {
+ [NL80211_MESHCONF_RETRY_TIMEOUT] =
+ NLA_POLICY_RANGE(NLA_U16, 1, 255),
+ [NL80211_MESHCONF_CONFIRM_TIMEOUT] =
+ NLA_POLICY_RANGE(NLA_U16, 1, 255),
+ [NL80211_MESHCONF_HOLDING_TIMEOUT] =
+ NLA_POLICY_RANGE(NLA_U16, 1, 255),
+ [NL80211_MESHCONF_MAX_PEER_LINKS] =
+ NLA_POLICY_RANGE(NLA_U16, 0, 255),
+ [NL80211_MESHCONF_MAX_RETRIES] = NLA_POLICY_MAX(NLA_U8, 16),
+ [NL80211_MESHCONF_TTL] = NLA_POLICY_MIN(NLA_U8, 1),
+ [NL80211_MESHCONF_ELEMENT_TTL] = NLA_POLICY_MIN(NLA_U8, 1),
+ [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = NLA_POLICY_MAX(NLA_U8, 1),
+ [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] =
+ NLA_POLICY_RANGE(NLA_U32, 1, 255),
[NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 },
[NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 },
- [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 },
+ [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = NLA_POLICY_MIN(NLA_U16, 1),
[NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 },
- [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 },
- [NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] = { .type = NLA_U16 },
- [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 },
- [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
- [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
- [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
- [NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 },
- [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32 },
+ [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] =
+ NLA_POLICY_MIN(NLA_U16, 1),
+ [NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] =
+ NLA_POLICY_MIN(NLA_U16, 1),
+ [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] =
+ NLA_POLICY_MIN(NLA_U16, 1),
+ [NL80211_MESHCONF_HWMP_ROOTMODE] = NLA_POLICY_MAX(NLA_U8, 4),
+ [NL80211_MESHCONF_HWMP_RANN_INTERVAL] =
+ NLA_POLICY_MIN(NLA_U16, 1),
+ [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = NLA_POLICY_MAX(NLA_U8, 1),
+ [NL80211_MESHCONF_FORWARDING] = NLA_POLICY_MAX(NLA_U8, 1),
+ [NL80211_MESHCONF_RSSI_THRESHOLD] =
+ NLA_POLICY_RANGE(NLA_S32, -255, 0),
[NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16 },
[NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT] = { .type = NLA_U32 },
- [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] = { .type = NLA_U16 },
- [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 },
- [NL80211_MESHCONF_POWER_MODE] = { .type = NLA_U32 },
+ [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] =
+ NLA_POLICY_MIN(NLA_U16, 1),
+ [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] =
+ NLA_POLICY_MIN(NLA_U16, 1),
+ [NL80211_MESHCONF_POWER_MODE] =
+ NLA_POLICY_RANGE(NLA_U32,
+ NL80211_MESH_POWER_ACTIVE,
+ NL80211_MESH_POWER_MAX),
[NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 },
[NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 },
};
@@ -6168,68 +6189,12 @@ static const struct nla_policy
[NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
[NL80211_MESH_SETUP_AUTH_PROTOCOL] = { .type = NLA_U8 },
[NL80211_MESH_SETUP_USERSPACE_MPM] = { .type = NLA_FLAG },
- [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_MESH_SETUP_IE] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
[NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG },
};
-static int nl80211_check_bool(const struct nlattr *nla, u8 min, u8 max, bool *out)
-{
- u8 val = nla_get_u8(nla);
- if (val < min || val > max)
- return -EINVAL;
- *out = val;
- return 0;
-}
-
-static int nl80211_check_u8(const struct nlattr *nla, u8 min, u8 max, u8 *out)
-{
- u8 val = nla_get_u8(nla);
- if (val < min || val > max)
- return -EINVAL;
- *out = val;
- return 0;
-}
-
-static int nl80211_check_u16(const struct nlattr *nla, u16 min, u16 max, u16 *out)
-{
- u16 val = nla_get_u16(nla);
- if (val < min || val > max)
- return -EINVAL;
- *out = val;
- return 0;
-}
-
-static int nl80211_check_u32(const struct nlattr *nla, u32 min, u32 max, u32 *out)
-{
- u32 val = nla_get_u32(nla);
- if (val < min || val > max)
- return -EINVAL;
- *out = val;
- return 0;
-}
-
-static int nl80211_check_s32(const struct nlattr *nla, s32 min, s32 max, s32 *out)
-{
- s32 val = nla_get_s32(nla);
- if (val < min || val > max)
- return -EINVAL;
- *out = val;
- return 0;
-}
-
-static int nl80211_check_power_mode(const struct nlattr *nla,
- enum nl80211_mesh_power_mode min,
- enum nl80211_mesh_power_mode max,
- enum nl80211_mesh_power_mode *out)
-{
- u32 val = nla_get_u32(nla);
- if (val < min || val > max)
- return -EINVAL;
- *out = val;
- return 0;
-}
-
static int nl80211_parse_mesh_config(struct genl_info *info,
struct mesh_config *cfg,
u32 *mask_out)
@@ -6238,13 +6203,12 @@ static int nl80211_parse_mesh_config(struct genl_info *info,
u32 mask = 0;
u16 ht_opmode;
-#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \
-do { \
- if (tb[attr]) { \
- if (fn(tb[attr], min, max, &cfg->param)) \
- return -EINVAL; \
- mask |= (1 << (attr - 1)); \
- } \
+#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, mask, attr, fn) \
+do { \
+ if (tb[attr]) { \
+ cfg->param = fn(tb[attr]); \
+ mask |= BIT((attr) - 1); \
+ } \
} while (0)
if (!info->attrs[NL80211_ATTR_MESH_CONFIG])
@@ -6259,75 +6223,73 @@ do { \
BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32);
/* Fill in the params struct */
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, 1, 255,
- mask, NL80211_MESHCONF_RETRY_TIMEOUT,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, 1, 255,
- mask, NL80211_MESHCONF_CONFIRM_TIMEOUT,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, 1, 255,
- mask, NL80211_MESHCONF_HOLDING_TIMEOUT,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, 0, 255,
- mask, NL80211_MESHCONF_MAX_PEER_LINKS,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, 0, 16,
- mask, NL80211_MESHCONF_MAX_RETRIES,
- nl80211_check_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, 1, 255,
- mask, NL80211_MESHCONF_TTL, nl80211_check_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, 1, 255,
- mask, NL80211_MESHCONF_ELEMENT_TTL,
- nl80211_check_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 0, 1,
- mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
- nl80211_check_bool);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, mask,
+ NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, mask,
+ NL80211_MESHCONF_CONFIRM_TIMEOUT,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, mask,
+ NL80211_MESHCONF_HOLDING_TIMEOUT,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, mask,
+ NL80211_MESHCONF_MAX_PEER_LINKS,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, mask,
+ NL80211_MESHCONF_MAX_RETRIES, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, mask,
+ NL80211_MESHCONF_TTL, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, mask,
+ NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, mask,
+ NL80211_MESHCONF_AUTO_OPEN_PLINKS,
+ nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor,
- 1, 255, mask,
+ mask,
NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
- nl80211_check_u32);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 0, 255,
- mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
- nl80211_check_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, 1, 65535,
- mask, NL80211_MESHCONF_PATH_REFRESH_TIME,
- nl80211_check_u32);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, 1, 65535,
- mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
- nl80211_check_u16);
+ nla_get_u32);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, mask,
+ NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
+ nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, mask,
+ NL80211_MESHCONF_PATH_REFRESH_TIME,
+ nla_get_u32);
+ if (mask & BIT(NL80211_MESHCONF_PATH_REFRESH_TIME) &&
+ (cfg->path_refresh_time < 1 || cfg->path_refresh_time > 65535))
+ return -EINVAL;
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, mask,
+ NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout,
- 1, 65535, mask,
+ mask,
NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
- nl80211_check_u32);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval,
- 1, 65535, mask,
+ nla_get_u32);
+ if (mask & BIT(NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT) &&
+ (cfg->dot11MeshHWMPactivePathTimeout < 1 ||
+ cfg->dot11MeshHWMPactivePathTimeout > 65535))
+ return -EINVAL;
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, mask,
NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval,
- 1, 65535, mask,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, mask,
NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
- nl80211_check_u16);
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshHWMPnetDiameterTraversalTime,
- 1, 65535, mask,
+ dot11MeshHWMPnetDiameterTraversalTime, mask,
NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, 0, 4,
- mask, NL80211_MESHCONF_HWMP_ROOTMODE,
- nl80211_check_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, 1, 65535,
- mask, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshGateAnnouncementProtocol, 0, 1,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, mask,
+ NL80211_MESHCONF_HWMP_ROOTMODE, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, mask,
+ NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshGateAnnouncementProtocol,
mask, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
- nl80211_check_bool);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1,
- mask, NL80211_MESHCONF_FORWARDING,
- nl80211_check_bool);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0,
- mask, NL80211_MESHCONF_RSSI_THRESHOLD,
- nl80211_check_s32);
+ nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, mask,
+ NL80211_MESHCONF_FORWARDING, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, mask,
+ NL80211_MESHCONF_RSSI_THRESHOLD,
+ nla_get_s32);
/*
* Check HT operation mode based on
* IEEE 802.11-2016 9.4.2.57 HT Operation element.
@@ -6346,29 +6308,27 @@ do { \
cfg->ht_opmode = ht_opmode;
mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
}
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
- 1, 65535, mask,
- NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
- nl80211_check_u32);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, 1, 65535,
- mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
- nl80211_check_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshHWMPconfirmationInterval,
- 1, 65535, mask,
+ dot11MeshHWMPactivePathToRootTimeout, mask,
+ NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
+ nla_get_u32);
+ if (mask & BIT(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT) &&
+ (cfg->dot11MeshHWMPactivePathToRootTimeout < 1 ||
+ cfg->dot11MeshHWMPactivePathToRootTimeout > 65535))
+ return -EINVAL;
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, mask,
+ NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPconfirmationInterval,
+ mask,
NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
- nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode,
- NL80211_MESH_POWER_ACTIVE,
- NL80211_MESH_POWER_MAX,
- mask, NL80211_MESHCONF_POWER_MODE,
- nl80211_check_power_mode);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration,
- 0, 65535, mask,
- NL80211_MESHCONF_AWAKE_WINDOW, nl80211_check_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 0, 0xffffffff,
- mask, NL80211_MESHCONF_PLINK_TIMEOUT,
- nl80211_check_u32);
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode, mask,
+ NL80211_MESHCONF_POWER_MODE, nla_get_u32);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, mask,
+ NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, mask,
+ NL80211_MESHCONF_PLINK_TIMEOUT, nla_get_u32);
if (mask_out)
*mask_out = mask;
@@ -6411,8 +6371,6 @@ static int nl80211_parse_mesh_setup(struct genl_info *info,
if (tb[NL80211_MESH_SETUP_IE]) {
struct nlattr *ieattr =
tb[NL80211_MESH_SETUP_IE];
- if (!is_valid_ie_attr(ieattr))
- return -EINVAL;
setup->ie = nla_data(ieattr);
setup->ie_len = nla_len(ieattr);
}
@@ -7045,9 +7003,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
int err, tmp, n_ssids = 0, n_channels, i;
size_t ie_len;
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
wiphy = &rdev->wiphy;
if (wdev->iftype == NL80211_IFTYPE_NAN)
@@ -7401,9 +7356,6 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF;
- if (!is_valid_ie_attr(attrs[NL80211_ATTR_IE]))
- return ERR_PTR(-EINVAL);
-
if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
n_channels = validate_scan_freqs(
attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
@@ -7763,7 +7715,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
*/
if (want_multi && rdev->wiphy.max_sched_scan_reqs > 1) {
while (!sched_scan_req->reqid)
- sched_scan_req->reqid = rdev->wiphy.cookie_counter++;
+ sched_scan_req->reqid = cfg80211_assign_cookie(rdev);
}
err = rdev_sched_scan_start(rdev, dev, sched_scan_req);
@@ -7939,7 +7891,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
if (!need_new_beacon)
goto skip_beacons;
- err = nl80211_parse_beacon(info->attrs, &params.beacon_after);
+ err = nl80211_parse_beacon(rdev, info->attrs, &params.beacon_after);
if (err)
return err;
@@ -7949,7 +7901,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
- err = nl80211_parse_beacon(csa_attrs, &params.beacon_csa);
+ err = nl80211_parse_beacon(rdev, csa_attrs, &params.beacon_csa);
if (err)
return err;
@@ -8186,7 +8138,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
int err;
rtnl_lock();
- err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
+ err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
if (err) {
rtnl_unlock();
return err;
@@ -8307,7 +8259,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
bool radio_stats;
rtnl_lock();
- res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
+ res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev);
if (res)
goto out_err;
@@ -8371,9 +8323,6 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
struct key_parse key;
bool local_state_change;
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
@@ -8612,9 +8561,6 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
return -EPERM;
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_MAC] ||
!info->attrs[NL80211_ATTR_SSID] ||
!info->attrs[NL80211_ATTR_WIPHY_FREQ])
@@ -8738,9 +8684,6 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
return -EPERM;
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
@@ -8789,9 +8732,6 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
return -EPERM;
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
@@ -8866,9 +8806,6 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
memset(&ibss, 0, sizeof(ibss));
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_SSID] ||
!nla_len(info->attrs[NL80211_ATTR_SSID]))
return -EINVAL;
@@ -9306,9 +9243,6 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
memset(&connect, 0, sizeof(connect));
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_SSID] ||
!nla_len(info->attrs[NL80211_ATTR_SSID]))
return -EINVAL;
@@ -9367,11 +9301,6 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
!wiphy_ext_feature_isset(&rdev->wiphy,
NL80211_EXT_FEATURE_MFP_OPTIONAL))
return -EOPNOTSUPP;
-
- if (connect.mfp != NL80211_MFP_REQUIRED &&
- connect.mfp != NL80211_MFP_NO &&
- connect.mfp != NL80211_MFP_OPTIONAL)
- return -EINVAL;
} else {
connect.mfp = NL80211_MFP_NO;
}
@@ -9544,8 +9473,6 @@ static int nl80211_update_connect_params(struct sk_buff *skb,
return -EOPNOTSUPP;
if (info->attrs[NL80211_ATTR_IE]) {
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
changed |= UPDATE_ASSOC_IES;
@@ -10130,9 +10057,6 @@ static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
ps_state = nla_get_u32(info->attrs[NL80211_ATTR_PS_STATE]);
- if (ps_state != NL80211_PS_DISABLED && ps_state != NL80211_PS_ENABLED)
- return -EINVAL;
-
wdev = dev->ieee80211_ptr;
if (!rdev->ops->set_power_mgmt)
@@ -10230,7 +10154,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev = dev->ieee80211_ptr;
s32 last, low, high;
u32 hyst;
- int i, n;
+ int i, n, low_index;
int err;
/* RSSI reporting disabled? */
@@ -10267,10 +10191,19 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
if (last < wdev->cqm_config->rssi_thresholds[i])
break;
- low = i > 0 ?
- (wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN;
- high = i < n ?
- (wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX;
+ low_index = i - 1;
+ if (low_index >= 0) {
+ low_index = array_index_nospec(low_index, n);
+ low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
+ } else {
+ low = S32_MIN;
+ }
+ if (i < n) {
+ i = array_index_nospec(i, n);
+ high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
+ } else {
+ high = S32_MAX;
+ }
return rdev_set_cqm_rssi_range_config(rdev, dev, low, high);
}
@@ -10686,8 +10619,7 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
if (!scan_plan)
return -ENOBUFS;
- if (!scan_plan ||
- nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL,
+ if (nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL,
req->scan_plans[i].interval) ||
(req->scan_plans[i].iterations &&
nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_ITERATIONS,
@@ -11285,9 +11217,6 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
if (tb[NL80211_ATTR_COALESCE_RULE_CONDITION])
new_rule->condition =
nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_CONDITION]);
- if (new_rule->condition != NL80211_COALESCE_CONDITION_MATCH &&
- new_rule->condition != NL80211_COALESCE_CONDITION_NO_MATCH)
- return -EINVAL;
if (!tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN])
return -EINVAL;
@@ -11640,8 +11569,6 @@ static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info)
conf.master_pref =
nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]);
- if (!conf.master_pref)
- return -EINVAL;
if (info->attrs[NL80211_ATTR_BANDS]) {
u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]);
@@ -11759,7 +11686,7 @@ static int nl80211_nan_add_func(struct sk_buff *skb,
if (!func)
return -ENOMEM;
- func->cookie = wdev->wiphy->cookie_counter++;
+ func->cookie = cfg80211_assign_cookie(rdev);
if (!tb[NL80211_NAN_FUNC_TYPE] ||
nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]) > NL80211_NAN_FUNC_MAX_TYPE) {
@@ -12205,8 +12132,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
return -EOPNOTSUPP;
if (!info->attrs[NL80211_ATTR_MDID] ||
- !info->attrs[NL80211_ATTR_IE] ||
- !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
+ !info->attrs[NL80211_ATTR_IE])
return -EINVAL;
memset(&ft_params, 0, sizeof(ft_params));
@@ -12626,12 +12552,7 @@ static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
tsid = nla_get_u8(info->attrs[NL80211_ATTR_TSID]);
- if (tsid >= IEEE80211_NUM_TIDS)
- return -EINVAL;
-
up = nla_get_u8(info->attrs[NL80211_ATTR_USER_PRIO]);
- if (up >= IEEE80211_NUM_UPS)
- return -EINVAL;
/* WMM uses TIDs 0-7 even for TSPEC */
if (tsid >= IEEE80211_FIRST_TSPEC_TSID) {
@@ -12989,6 +12910,76 @@ static int nl80211_tx_control_port(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static int nl80211_get_ftm_responder_stats(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_ftm_responder_stats ftm_stats = {};
+ struct sk_buff *msg;
+ void *hdr;
+ struct nlattr *ftm_stats_attr;
+ int err;
+
+ if (wdev->iftype != NL80211_IFTYPE_AP || !wdev->beacon_interval)
+ return -EOPNOTSUPP;
+
+ err = rdev_get_ftm_responder_stats(rdev, dev, &ftm_stats);
+ if (err)
+ return err;
+
+ if (!ftm_stats.filled)
+ return -ENODATA;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
+ NL80211_CMD_GET_FTM_RESPONDER_STATS);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
+ goto nla_put_failure;
+
+ ftm_stats_attr = nla_nest_start(msg, NL80211_ATTR_FTM_RESPONDER_STATS);
+ if (!ftm_stats_attr)
+ goto nla_put_failure;
+
+#define SET_FTM(field, name, type) \
+ do { if ((ftm_stats.filled & BIT(NL80211_FTM_STATS_ ## name)) && \
+ nla_put_ ## type(msg, NL80211_FTM_STATS_ ## name, \
+ ftm_stats.field)) \
+ goto nla_put_failure; } while (0)
+#define SET_FTM_U64(field, name) \
+ do { if ((ftm_stats.filled & BIT(NL80211_FTM_STATS_ ## name)) && \
+ nla_put_u64_64bit(msg, NL80211_FTM_STATS_ ## name, \
+ ftm_stats.field, NL80211_FTM_STATS_PAD)) \
+ goto nla_put_failure; } while (0)
+
+ SET_FTM(success_num, SUCCESS_NUM, u32);
+ SET_FTM(partial_num, PARTIAL_NUM, u32);
+ SET_FTM(failed_num, FAILED_NUM, u32);
+ SET_FTM(asap_num, ASAP_NUM, u32);
+ SET_FTM(non_asap_num, NON_ASAP_NUM, u32);
+ SET_FTM_U64(total_duration_ms, TOTAL_DURATION_MSEC);
+ SET_FTM(unknown_triggers_num, UNKNOWN_TRIGGERS_NUM, u32);
+ SET_FTM(reschedule_requests_num, RESCHEDULE_REQUESTS_NUM, u32);
+ SET_FTM(out_of_window_triggers_num, OUT_OF_WINDOW_TRIGGERS_NUM, u32);
+#undef SET_FTM
+
+ nla_nest_end(msg, ftm_stats_attr);
+
+ genlmsg_end(msg, hdr);
+ return genlmsg_reply(msg, info);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -13900,6 +13891,13 @@ static const struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_GET_FTM_RESPONDER_STATS,
+ .doit = nl80211_get_ftm_responder_stats,
+ .policy = nl80211_policy,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_family nl80211_fam __ro_after_init = {
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 364f5d67f05b..51380b5c32f2 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -1232,4 +1232,19 @@ rdev_external_auth(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int
+rdev_get_ftm_responder_stats(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+{
+ int ret = -EOPNOTSUPP;
+
+ trace_rdev_get_ftm_responder_stats(&rdev->wiphy, dev, ftm_stats);
+ if (rdev->ops->get_ftm_responder_stats)
+ ret = rdev->ops->get_ftm_responder_stats(&rdev->wiphy, dev,
+ ftm_stats);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2f702adf2912..ecfb1a06dbb2 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -847,22 +847,36 @@ static bool valid_regdb(const u8 *data, unsigned int size)
return true;
}
-static void set_wmm_rule(struct ieee80211_reg_rule *rrule,
- struct fwdb_wmm_rule *wmm)
-{
- struct ieee80211_wmm_rule *rule = &rrule->wmm_rule;
- unsigned int i;
+static void set_wmm_rule(const struct fwdb_header *db,
+ const struct fwdb_country *country,
+ const struct fwdb_rule *rule,
+ struct ieee80211_reg_rule *rrule)
+{
+ struct ieee80211_wmm_rule *wmm_rule = &rrule->wmm_rule;
+ struct fwdb_wmm_rule *wmm;
+ unsigned int i, wmm_ptr;
+
+ wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
+ wmm = (void *)((u8 *)db + wmm_ptr);
+
+ if (!valid_wmm(wmm)) {
+ pr_err("Invalid regulatory WMM rule %u-%u in domain %c%c\n",
+ be32_to_cpu(rule->start), be32_to_cpu(rule->end),
+ country->alpha2[0], country->alpha2[1]);
+ return;
+ }
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- rule->client[i].cw_min =
+ wmm_rule->client[i].cw_min =
ecw2cw((wmm->client[i].ecw & 0xf0) >> 4);
- rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f);
- rule->client[i].aifsn = wmm->client[i].aifsn;
- rule->client[i].cot = 1000 * be16_to_cpu(wmm->client[i].cot);
- rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4);
- rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f);
- rule->ap[i].aifsn = wmm->ap[i].aifsn;
- rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
+ wmm_rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f);
+ wmm_rule->client[i].aifsn = wmm->client[i].aifsn;
+ wmm_rule->client[i].cot =
+ 1000 * be16_to_cpu(wmm->client[i].cot);
+ wmm_rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4);
+ wmm_rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f);
+ wmm_rule->ap[i].aifsn = wmm->ap[i].aifsn;
+ wmm_rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
}
rrule->has_wmm = true;
@@ -870,7 +884,7 @@ static void set_wmm_rule(struct ieee80211_reg_rule *rrule,
static int __regdb_query_wmm(const struct fwdb_header *db,
const struct fwdb_country *country, int freq,
- struct ieee80211_reg_rule *rule)
+ struct ieee80211_reg_rule *rrule)
{
unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
@@ -879,18 +893,14 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
for (i = 0; i < coll->n_rules; i++) {
__be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2));
unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2;
- struct fwdb_rule *rrule = (void *)((u8 *)db + rule_ptr);
- struct fwdb_wmm_rule *wmm;
- unsigned int wmm_ptr;
+ struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr);
- if (rrule->len < offsetofend(struct fwdb_rule, wmm_ptr))
+ if (rule->len < offsetofend(struct fwdb_rule, wmm_ptr))
continue;
- if (freq >= KHZ_TO_MHZ(be32_to_cpu(rrule->start)) &&
- freq <= KHZ_TO_MHZ(be32_to_cpu(rrule->end))) {
- wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2;
- wmm = (void *)((u8 *)db + wmm_ptr);
- set_wmm_rule(rule, wmm);
+ if (freq >= KHZ_TO_MHZ(be32_to_cpu(rule->start)) &&
+ freq <= KHZ_TO_MHZ(be32_to_cpu(rule->end))) {
+ set_wmm_rule(db, country, rule, rrule);
return 0;
}
}
@@ -972,12 +982,8 @@ static int regdb_query_country(const struct fwdb_header *db,
if (rule->len >= offsetofend(struct fwdb_rule, cac_timeout))
rrule->dfs_cac_ms =
1000 * be16_to_cpu(rule->cac_timeout);
- if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) {
- u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
- struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr);
-
- set_wmm_rule(rrule, wmm);
- }
+ if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr))
+ set_wmm_rule(db, country, rule, rrule);
}
return reg_schedule_apply(regdom);
@@ -2661,11 +2667,12 @@ static void reg_process_hint(struct regulatory_request *reg_request)
{
struct wiphy *wiphy = NULL;
enum reg_request_treatment treatment;
+ enum nl80211_reg_initiator initiator = reg_request->initiator;
if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
- switch (reg_request->initiator) {
+ switch (initiator) {
case NL80211_REGDOM_SET_BY_CORE:
treatment = reg_process_hint_core(reg_request);
break;
@@ -2683,7 +2690,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
treatment = reg_process_hint_country_ie(wiphy, reg_request);
break;
default:
- WARN(1, "invalid initiator %d\n", reg_request->initiator);
+ WARN(1, "invalid initiator %d\n", initiator);
goto out_free;
}
@@ -2698,7 +2705,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
*/
if (treatment == REG_REQ_ALREADY_SET && wiphy &&
wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
- wiphy_update_regulatory(wiphy, reg_request->initiator);
+ wiphy_update_regulatory(wiphy, initiator);
wiphy_all_share_dfs_chan_state(wiphy);
reg_check_channels();
}
@@ -2867,6 +2874,7 @@ static int regulatory_hint_core(const char *alpha2)
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_CORE;
+ request->wiphy_idx = WIPHY_IDX_INVALID;
queue_regulatory_request(request);
@@ -3184,13 +3192,59 @@ static void restore_regulatory_settings(bool reset_user)
schedule_work(&reg_work);
}
+static bool is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+ wdev_lock(wdev);
+ if (!(wdev->wiphy->regulatory_flags & flag)) {
+ wdev_unlock(wdev);
+ return false;
+ }
+ wdev_unlock(wdev);
+ }
+ }
+
+ return true;
+}
+
void regulatory_hint_disconnect(void)
{
+ /* Restore of regulatory settings is not required when wiphy(s)
+ * ignore IE from connected access point but clearance of beacon hints
+ * is required when wiphy(s) supports beacon hints.
+ */
+ if (is_wiphy_all_set_reg_flag(REGULATORY_COUNTRY_IE_IGNORE)) {
+ struct reg_beacon *reg_beacon, *btmp;
+
+ if (is_wiphy_all_set_reg_flag(REGULATORY_DISABLE_BEACON_HINTS))
+ return;
+
+ spin_lock_bh(&reg_pending_beacons_lock);
+ list_for_each_entry_safe(reg_beacon, btmp,
+ &reg_pending_beacons, list) {
+ list_del(&reg_beacon->list);
+ kfree(reg_beacon);
+ }
+ spin_unlock_bh(&reg_pending_beacons_lock);
+
+ list_for_each_entry_safe(reg_beacon, btmp,
+ &reg_beacon_list, list) {
+ list_del(&reg_beacon->list);
+ kfree(reg_beacon);
+ }
+
+ return;
+ }
+
pr_debug("All devices are disconnected, going to restore regulatory settings\n");
restore_regulatory_settings(false);
}
-static bool freq_is_chan_12_13_14(u16 freq)
+static bool freq_is_chan_12_13_14(u32 freq)
{
if (freq == ieee80211_channel_to_frequency(12, NL80211_BAND_2GHZ) ||
freq == ieee80211_channel_to_frequency(13, NL80211_BAND_2GHZ) ||
@@ -3777,6 +3831,15 @@ static int __init regulatory_init_db(void)
{
int err;
+ /*
+ * It's possible that - due to other bugs/issues - cfg80211
+ * never called regulatory_init() below, or that it failed;
+ * in that case, don't try to do any further work here as
+ * it's doomed to lead to crashes.
+ */
+ if (IS_ERR_OR_NULL(reg_pdev))
+ return -EINVAL;
+
err = load_builtin_regdb_keys();
if (err)
return err;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index d36c3eb7b931..d0e7472dd9fd 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1058,13 +1058,23 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
return NULL;
}
+/*
+ * Update RX channel information based on the available frame payload
+ * information. This is mainly for the 2.4 GHz band where frames can be received
+ * from neighboring channels and the Beacon frames use the DSSS Parameter Set
+ * element to indicate the current (transmitting) channel, but this might also
+ * be needed on other bands if RX frequency does not match with the actual
+ * operating channel of a BSS.
+ */
static struct ieee80211_channel *
cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
- struct ieee80211_channel *channel)
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width)
{
const u8 *tmp;
u32 freq;
int channel_number = -1;
+ struct ieee80211_channel *alt_channel;
tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
if (tmp && tmp[1] == 1) {
@@ -1078,16 +1088,45 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
}
}
- if (channel_number < 0)
+ if (channel_number < 0) {
+ /* No channel information in frame payload */
return channel;
+ }
freq = ieee80211_channel_to_frequency(channel_number, channel->band);
- channel = ieee80211_get_channel(wiphy, freq);
- if (!channel)
- return NULL;
- if (channel->flags & IEEE80211_CHAN_DISABLED)
+ alt_channel = ieee80211_get_channel(wiphy, freq);
+ if (!alt_channel) {
+ if (channel->band == NL80211_BAND_2GHZ) {
+ /*
+ * Better not allow unexpected channels when that could
+ * be going beyond the 1-11 range (e.g., discovering
+ * BSS on channel 12 when radio is configured for
+ * channel 11.
+ */
+ return NULL;
+ }
+
+ /* No match for the payload channel number - ignore it */
+ return channel;
+ }
+
+ if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
+ scan_width == NL80211_BSS_CHAN_WIDTH_5) {
+ /*
+ * Ignore channel number in 5 and 10 MHz channels where there
+ * may not be an n:1 or 1:n mapping between frequencies and
+ * channel numbers.
+ */
+ return channel;
+ }
+
+ /*
+ * Use the channel determined through the payload channel number
+ * instead of the RX channel reported by the driver.
+ */
+ if (alt_channel->flags & IEEE80211_CHAN_DISABLED)
return NULL;
- return channel;
+ return alt_channel;
}
/* Returned bss is reference counted and must be cleaned up appropriately. */
@@ -1112,7 +1151,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
(data->signal < 0 || data->signal > 100)))
return NULL;
- channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
+ channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
+ data->scan_width);
if (!channel)
return NULL;
@@ -1210,7 +1250,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
return NULL;
channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
- ielen, data->chan);
+ ielen, data->chan, data->scan_width);
if (!channel)
return NULL;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 7c73510b161f..c6a9446b4e6b 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -112,7 +112,7 @@
} while (0)
#define CHAN_ENTRY __field(enum nl80211_band, band) \
- __field(u16, center_freq)
+ __field(u32, center_freq)
#define CHAN_ASSIGN(chan) \
do { \
if (chan) { \
@@ -2368,6 +2368,140 @@ TRACE_EVENT(rdev_external_auth,
__entry->bssid, __entry->ssid, __entry->status)
);
+TRACE_EVENT(rdev_start_radar_detection,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_chan_def *chandef,
+ u32 cac_time_ms),
+ TP_ARGS(wiphy, netdev, chandef, cac_time_ms),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ CHAN_DEF_ENTRY
+ __field(u32, cac_time_ms)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ CHAN_DEF_ASSIGN(chandef);
+ __entry->cac_time_ms = cac_time_ms;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
+ ", cac_time_ms=%u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
+ __entry->cac_time_ms)
+);
+
+TRACE_EVENT(rdev_set_mcast_rate,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ int *mcast_rate),
+ TP_ARGS(wiphy, netdev, mcast_rate),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __array(int, mcast_rate, NUM_NL80211_BANDS)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ memcpy(__entry->mcast_rate, mcast_rate,
+ sizeof(int) * NUM_NL80211_BANDS);
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", "
+ "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]",
+ WIPHY_PR_ARG, NETDEV_PR_ARG,
+ __entry->mcast_rate[NL80211_BAND_2GHZ],
+ __entry->mcast_rate[NL80211_BAND_5GHZ],
+ __entry->mcast_rate[NL80211_BAND_60GHZ])
+);
+
+TRACE_EVENT(rdev_set_coalesce,
+ TP_PROTO(struct wiphy *wiphy, struct cfg80211_coalesce *coalesce),
+ TP_ARGS(wiphy, coalesce),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ __field(int, n_rules)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ __entry->n_rules = coalesce ? coalesce->n_rules : 0;
+ ),
+ TP_printk(WIPHY_PR_FMT ", n_rules=%d",
+ WIPHY_PR_ARG, __entry->n_rules)
+);
+
+DEFINE_EVENT(wiphy_wdev_evt, rdev_abort_scan,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+ TP_ARGS(wiphy, wdev)
+);
+
+TRACE_EVENT(rdev_set_multicast_to_unicast,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ const bool enabled),
+ TP_ARGS(wiphy, netdev, enabled),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(bool, enabled)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->enabled = enabled;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", unicast: %s",
+ WIPHY_PR_ARG, NETDEV_PR_ARG,
+ BOOL_TO_STR(__entry->enabled))
+);
+
+DEFINE_EVENT(wiphy_wdev_evt, rdev_get_txq_stats,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+ TP_ARGS(wiphy, wdev)
+);
+
+TRACE_EVENT(rdev_get_ftm_responder_stats,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_ftm_responder_stats *ftm_stats),
+
+ TP_ARGS(wiphy, netdev, ftm_stats),
+
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(u64, timestamp)
+ __field(u32, success_num)
+ __field(u32, partial_num)
+ __field(u32, failed_num)
+ __field(u32, asap_num)
+ __field(u32, non_asap_num)
+ __field(u64, duration)
+ __field(u32, unknown_triggers)
+ __field(u32, reschedule)
+ __field(u32, out_of_window)
+ ),
+
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->success_num = ftm_stats->success_num;
+ __entry->partial_num = ftm_stats->partial_num;
+ __entry->failed_num = ftm_stats->failed_num;
+ __entry->asap_num = ftm_stats->asap_num;
+ __entry->non_asap_num = ftm_stats->non_asap_num;
+ __entry->duration = ftm_stats->total_duration_ms;
+ __entry->unknown_triggers = ftm_stats->unknown_triggers_num;
+ __entry->reschedule = ftm_stats->reschedule_requests_num;
+ __entry->out_of_window = ftm_stats->out_of_window_triggers_num;
+ ),
+
+ TP_printk(WIPHY_PR_FMT "Ftm responder stats: success %u, partial %u, "
+ "failed %u, asap %u, non asap %u, total duration %llu, unknown "
+ "triggers %u, rescheduled %u, out of window %u", WIPHY_PR_ARG,
+ __entry->success_num, __entry->partial_num, __entry->failed_num,
+ __entry->asap_num, __entry->non_asap_num, __entry->duration,
+ __entry->unknown_triggers, __entry->reschedule,
+ __entry->out_of_window)
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
@@ -3160,105 +3294,6 @@ TRACE_EVENT(cfg80211_stop_iface,
TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT,
WIPHY_PR_ARG, WDEV_PR_ARG)
);
-
-TRACE_EVENT(rdev_start_radar_detection,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_chan_def *chandef,
- u32 cac_time_ms),
- TP_ARGS(wiphy, netdev, chandef, cac_time_ms),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- CHAN_DEF_ENTRY
- __field(u32, cac_time_ms)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- CHAN_DEF_ASSIGN(chandef);
- __entry->cac_time_ms = cac_time_ms;
- ),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
- ", cac_time_ms=%u",
- WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
- __entry->cac_time_ms)
-);
-
-TRACE_EVENT(rdev_set_mcast_rate,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- int *mcast_rate),
- TP_ARGS(wiphy, netdev, mcast_rate),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- __array(int, mcast_rate, NUM_NL80211_BANDS)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- memcpy(__entry->mcast_rate, mcast_rate,
- sizeof(int) * NUM_NL80211_BANDS);
- ),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", "
- "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]",
- WIPHY_PR_ARG, NETDEV_PR_ARG,
- __entry->mcast_rate[NL80211_BAND_2GHZ],
- __entry->mcast_rate[NL80211_BAND_5GHZ],
- __entry->mcast_rate[NL80211_BAND_60GHZ])
-);
-
-TRACE_EVENT(rdev_set_coalesce,
- TP_PROTO(struct wiphy *wiphy, struct cfg80211_coalesce *coalesce),
- TP_ARGS(wiphy, coalesce),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- __field(int, n_rules)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- __entry->n_rules = coalesce ? coalesce->n_rules : 0;
- ),
- TP_printk(WIPHY_PR_FMT ", n_rules=%d",
- WIPHY_PR_ARG, __entry->n_rules)
-);
-
-DEFINE_EVENT(wiphy_wdev_evt, rdev_abort_scan,
- TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
- TP_ARGS(wiphy, wdev)
-);
-
-TRACE_EVENT(rdev_set_multicast_to_unicast,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
- const bool enabled),
- TP_ARGS(wiphy, netdev, enabled),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- NETDEV_ENTRY
- __field(bool, enabled)
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- NETDEV_ASSIGN;
- __entry->enabled = enabled;
- ),
- TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", unicast: %s",
- WIPHY_PR_ARG, NETDEV_PR_ARG,
- BOOL_TO_STR(__entry->enabled))
-);
-
-TRACE_EVENT(rdev_get_txq_stats,
- TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
- TP_ARGS(wiphy, wdev),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- WDEV_ENTRY
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- WDEV_ASSIGN;
- ),
- TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
-);
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 959ed3acd240..ef14d80ca03e 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -5,17 +5,20 @@
* Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*/
#include <linux/export.h>
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
+#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <net/ip.h>
#include <net/dsfield.h>
#include <linux/if_vlan.h>
#include <linux/mpls.h>
#include <linux/gcd.h>
+#include <linux/bitfield.h>
#include "core.h"
#include "rdev-ops.h"
@@ -88,7 +91,7 @@ int ieee80211_channel_to_frequency(int chan, enum nl80211_band band)
return 5000 + chan * 5;
break;
case NL80211_BAND_60GHZ:
- if (chan < 5)
+ if (chan < 7)
return 56160 + chan * 2160;
break;
default:
@@ -109,7 +112,7 @@ int ieee80211_frequency_to_channel(int freq)
return (freq - 4000) / 5;
else if (freq <= 45000) /* DMG band lower limit */
return (freq - 5000) / 5;
- else if (freq >= 58320 && freq <= 64800)
+ else if (freq >= 58320 && freq <= 70200)
return (freq - 56160) / 2160;
else
return 0;
@@ -1568,7 +1571,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
}
/* 56.16 GHz, channel 1..4 */
- if (freq >= 56160 + 2160 * 1 && freq <= 56160 + 2160 * 4) {
+ if (freq >= 56160 + 2160 * 1 && freq <= 56160 + 2160 * 6) {
if (chandef->width >= NL80211_CHAN_WIDTH_40)
return false;
@@ -1893,3 +1896,154 @@ EXPORT_SYMBOL(rfc1042_header);
const unsigned char bridge_tunnel_header[] __aligned(2) =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
EXPORT_SYMBOL(bridge_tunnel_header);
+
+/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */
+struct iapp_layer2_update {
+ u8 da[ETH_ALEN]; /* broadcast */
+ u8 sa[ETH_ALEN]; /* STA addr */
+ __be16 len; /* 6 */
+ u8 dsap; /* 0 */
+ u8 ssap; /* 0 */
+ u8 control;
+ u8 xid_info[3];
+} __packed;
+
+void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr)
+{
+ struct iapp_layer2_update *msg;
+ struct sk_buff *skb;
+
+ /* Send Level 2 Update Frame to update forwarding tables in layer 2
+ * bridge devices */
+
+ skb = dev_alloc_skb(sizeof(*msg));
+ if (!skb)
+ return;
+ msg = skb_put(skb, sizeof(*msg));
+
+ /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
+ * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
+
+ eth_broadcast_addr(msg->da);
+ ether_addr_copy(msg->sa, addr);
+ msg->len = htons(6);
+ msg->dsap = 0;
+ msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */
+ msg->control = 0xaf; /* XID response lsb.1111F101.
+ * F=0 (no poll command; unsolicited frame) */
+ msg->xid_info[0] = 0x81; /* XID format identifier */
+ msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */
+ msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ memset(skb->cb, 0, sizeof(skb->cb));
+ netif_rx_ni(skb);
+}
+EXPORT_SYMBOL(cfg80211_send_layer2_update);
+
+int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
+ enum ieee80211_vht_chanwidth bw,
+ int mcs, bool ext_nss_bw_capable)
+{
+ u16 map = le16_to_cpu(cap->supp_mcs.rx_mcs_map);
+ int max_vht_nss = 0;
+ int ext_nss_bw;
+ int supp_width;
+ int i, mcs_encoding;
+
+ if (map == 0xffff)
+ return 0;
+
+ if (WARN_ON(mcs > 9))
+ return 0;
+ if (mcs <= 7)
+ mcs_encoding = 0;
+ else if (mcs == 8)
+ mcs_encoding = 1;
+ else
+ mcs_encoding = 2;
+
+ /* find max_vht_nss for the given MCS */
+ for (i = 7; i >= 0; i--) {
+ int supp = (map >> (2 * i)) & 3;
+
+ if (supp == 3)
+ continue;
+
+ if (supp >= mcs_encoding) {
+ max_vht_nss = i;
+ break;
+ }
+ }
+
+ if (!(cap->supp_mcs.tx_mcs_map &
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)))
+ return max_vht_nss;
+
+ ext_nss_bw = le32_get_bits(cap->vht_cap_info,
+ IEEE80211_VHT_CAP_EXT_NSS_BW_MASK);
+ supp_width = le32_get_bits(cap->vht_cap_info,
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK);
+
+ /* if not capable, treat ext_nss_bw as 0 */
+ if (!ext_nss_bw_capable)
+ ext_nss_bw = 0;
+
+ /* This is invalid */
+ if (supp_width == 3)
+ return 0;
+
+ /* This is an invalid combination so pretend nothing is supported */
+ if (supp_width == 2 && (ext_nss_bw == 1 || ext_nss_bw == 2))
+ return 0;
+
+ /*
+ * Cover all the special cases according to IEEE 802.11-2016
+ * Table 9-250. All other cases are either factor of 1 or not
+ * valid/supported.
+ */
+ switch (bw) {
+ case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ if ((supp_width == 1 || supp_width == 2) &&
+ ext_nss_bw == 3)
+ return 2 * max_vht_nss;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ if (supp_width == 0 &&
+ (ext_nss_bw == 1 || ext_nss_bw == 2))
+ return DIV_ROUND_UP(max_vht_nss, 2);
+ if (supp_width == 0 &&
+ ext_nss_bw == 3)
+ return DIV_ROUND_UP(3 * max_vht_nss, 4);
+ if (supp_width == 1 &&
+ ext_nss_bw == 3)
+ return 2 * max_vht_nss;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ if (supp_width == 0 &&
+ (ext_nss_bw == 1 || ext_nss_bw == 2))
+ return 0; /* not possible */
+ if (supp_width == 0 &&
+ ext_nss_bw == 2)
+ return DIV_ROUND_UP(max_vht_nss, 2);
+ if (supp_width == 0 &&
+ ext_nss_bw == 3)
+ return DIV_ROUND_UP(3 * max_vht_nss, 4);
+ if (supp_width == 1 &&
+ ext_nss_bw == 0)
+ return 0; /* not possible */
+ if (supp_width == 1 &&
+ ext_nss_bw == 1)
+ return DIV_ROUND_UP(max_vht_nss, 2);
+ if (supp_width == 1 &&
+ ext_nss_bw == 2)
+ return DIV_ROUND_UP(3 * max_vht_nss, 4);
+ break;
+ }
+
+ /* not covered or invalid combination received */
+ return max_vht_nss;
+}
+EXPORT_SYMBOL(ieee80211_get_vht_max_nss);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 167f7025ac98..06943d9c9835 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1278,12 +1278,16 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
if (err)
return err;
- if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)))
- return -EOPNOTSUPP;
+ if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
- return 0;
+free:
+ cfg80211_sinfo_release_content(&sinfo);
+ return err;
}
/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */
@@ -1293,7 +1297,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
/* we are under RTNL - globally locked - so can use static structs */
static struct iw_statistics wstats;
- static struct station_info sinfo;
+ static struct station_info sinfo = {};
u8 bssid[ETH_ALEN];
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
@@ -1352,6 +1356,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))
wstats.discard.retries = sinfo.tx_failed;
+ cfg80211_sinfo_release_content(&sinfo);
+
return &wstats;
}
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index bfe2dbea480b..a264cf2accd0 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -32,37 +32,49 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
{
unsigned long flags;
- if (xs->dev) {
- spin_lock_irqsave(&umem->xsk_list_lock, flags);
- list_del_rcu(&xs->list);
- spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
-
- if (umem->zc)
- synchronize_net();
- }
+ spin_lock_irqsave(&umem->xsk_list_lock, flags);
+ list_del_rcu(&xs->list);
+ spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
}
-int xdp_umem_query(struct net_device *dev, u16 queue_id)
+/* The umem is stored both in the _rx struct and the _tx struct as we do
+ * not know if the device has more tx queues than rx, or the opposite.
+ * This might also change during run time.
+ */
+static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
+ u16 queue_id)
{
- struct netdev_bpf bpf;
+ if (queue_id < dev->real_num_rx_queues)
+ dev->_rx[queue_id].umem = umem;
+ if (queue_id < dev->real_num_tx_queues)
+ dev->_tx[queue_id].umem = umem;
+}
- ASSERT_RTNL();
+struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
+ u16 queue_id)
+{
+ if (queue_id < dev->real_num_rx_queues)
+ return dev->_rx[queue_id].umem;
+ if (queue_id < dev->real_num_tx_queues)
+ return dev->_tx[queue_id].umem;
- memset(&bpf, 0, sizeof(bpf));
- bpf.command = XDP_QUERY_XSK_UMEM;
- bpf.xsk.queue_id = queue_id;
+ return NULL;
+}
- if (!dev->netdev_ops->ndo_bpf)
- return 0;
- return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem;
+static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
+{
+ if (queue_id < dev->real_num_rx_queues)
+ dev->_rx[queue_id].umem = NULL;
+ if (queue_id < dev->real_num_tx_queues)
+ dev->_tx[queue_id].umem = NULL;
}
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
- u32 queue_id, u16 flags)
+ u16 queue_id, u16 flags)
{
bool force_zc, force_copy;
struct netdev_bpf bpf;
- int err;
+ int err = 0;
force_zc = flags & XDP_ZEROCOPY;
force_copy = flags & XDP_COPY;
@@ -70,19 +82,23 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
if (force_zc && force_copy)
return -EINVAL;
- if (force_copy)
- return 0;
-
- if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
- return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
+ rtnl_lock();
+ if (xdp_get_umem_from_qid(dev, queue_id)) {
+ err = -EBUSY;
+ goto out_rtnl_unlock;
+ }
- bpf.command = XDP_QUERY_XSK_UMEM;
+ xdp_reg_umem_at_qid(dev, umem, queue_id);
+ umem->dev = dev;
+ umem->queue_id = queue_id;
+ if (force_copy)
+ /* For copy-mode, we are done. */
+ goto out_rtnl_unlock;
- rtnl_lock();
- err = xdp_umem_query(dev, queue_id);
- if (err) {
- err = err < 0 ? -EOPNOTSUPP : -EBUSY;
- goto err_rtnl_unlock;
+ if (!dev->netdev_ops->ndo_bpf ||
+ !dev->netdev_ops->ndo_xsk_async_xmit) {
+ err = -EOPNOTSUPP;
+ goto err_unreg_umem;
}
bpf.command = XDP_SETUP_XSK_UMEM;
@@ -91,18 +107,20 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
err = dev->netdev_ops->ndo_bpf(dev, &bpf);
if (err)
- goto err_rtnl_unlock;
+ goto err_unreg_umem;
rtnl_unlock();
dev_hold(dev);
- umem->dev = dev;
- umem->queue_id = queue_id;
umem->zc = true;
return 0;
-err_rtnl_unlock:
+err_unreg_umem:
+ xdp_clear_umem_at_qid(dev, queue_id);
+ if (!force_zc)
+ err = 0; /* fallback to copy mode */
+out_rtnl_unlock:
rtnl_unlock();
- return force_zc ? err : 0; /* fail or fallback */
+ return err;
}
static void xdp_umem_clear_dev(struct xdp_umem *umem)
@@ -110,7 +128,7 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem)
struct netdev_bpf bpf;
int err;
- if (umem->dev) {
+ if (umem->zc) {
bpf.command = XDP_SETUP_XSK_UMEM;
bpf.xsk.umem = NULL;
bpf.xsk.queue_id = umem->queue_id;
@@ -121,9 +139,17 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem)
if (err)
WARN(1, "failed to disable umem!\n");
+ }
+
+ if (umem->dev) {
+ rtnl_lock();
+ xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
+ rtnl_unlock();
+ }
+ if (umem->zc) {
dev_put(umem->dev);
- umem->dev = NULL;
+ umem->zc = false;
}
}
@@ -167,6 +193,8 @@ static void xdp_umem_release(struct xdp_umem *umem)
umem->cq = NULL;
}
+ xsk_reuseq_destroy(umem);
+
xdp_umem_unpin_pages(umem);
task = get_pid_task(umem->pid, PIDTYPE_PID);
@@ -314,8 +342,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->pid = get_task_pid(current, PIDTYPE_PID);
umem->address = (unsigned long)addr;
- umem->props.chunk_mask = ~((u64)chunk_size - 1);
- umem->props.size = size;
+ umem->chunk_mask = ~((u64)chunk_size - 1);
+ umem->size = size;
umem->headroom = headroom;
umem->chunk_size_nohr = chunk_size - headroom;
umem->npgs = size / PAGE_SIZE;
diff --git a/net/xdp/xdp_umem.h b/net/xdp/xdp_umem.h
index f11560334f88..27603227601b 100644
--- a/net/xdp/xdp_umem.h
+++ b/net/xdp/xdp_umem.h
@@ -8,18 +8,8 @@
#include <net/xdp_sock.h>
-static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
-{
- return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
-}
-
-static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
-{
- return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
-}
-
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
- u32 queue_id, u16 flags);
+ u16 queue_id, u16 flags);
bool xdp_umem_validate_queues(struct xdp_umem *umem);
void xdp_get_umem(struct xdp_umem *umem);
void xdp_put_umem(struct xdp_umem *umem);
diff --git a/net/xdp/xdp_umem_props.h b/net/xdp/xdp_umem_props.h
deleted file mode 100644
index 40eab10dfc49..000000000000
--- a/net/xdp/xdp_umem_props.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* XDP user-space packet buffer
- * Copyright(c) 2018 Intel Corporation.
- */
-
-#ifndef XDP_UMEM_PROPS_H_
-#define XDP_UMEM_PROPS_H_
-
-struct xdp_umem_props {
- u64 chunk_mask;
- u64 size;
-};
-
-#endif /* XDP_UMEM_PROPS_H_ */
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 4e937cd7c17d..07156f43d295 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -55,20 +55,30 @@ EXPORT_SYMBOL(xsk_umem_discard_addr);
static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
- void *buffer;
+ void *to_buf, *from_buf;
+ u32 metalen;
u64 addr;
int err;
if (!xskq_peek_addr(xs->umem->fq, &addr) ||
- len > xs->umem->chunk_size_nohr) {
+ len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
xs->rx_dropped++;
return -ENOSPC;
}
addr += xs->umem->headroom;
- buffer = xdp_umem_get_data(xs->umem, addr);
- memcpy(buffer, xdp->data, len);
+ if (unlikely(xdp_data_meta_unsupported(xdp))) {
+ from_buf = xdp->data;
+ metalen = 0;
+ } else {
+ from_buf = xdp->data_meta;
+ metalen = xdp->data - xdp->data_meta;
+ }
+
+ to_buf = xdp_umem_get_data(xs->umem, addr);
+ memcpy(to_buf, from_buf, len + metalen);
+ addr += metalen;
err = xskq_produce_batch_desc(xs->rx, addr, len);
if (!err) {
xskq_discard_addr(xs->umem->fq);
@@ -111,6 +121,7 @@ void xsk_flush(struct xdp_sock *xs)
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
+ u32 metalen = xdp->data - xdp->data_meta;
u32 len = xdp->data_end - xdp->data;
void *buffer;
u64 addr;
@@ -120,7 +131,7 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
return -EINVAL;
if (!xskq_peek_addr(xs->umem->fq, &addr) ||
- len > xs->umem->chunk_size_nohr) {
+ len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
xs->rx_dropped++;
return -ENOSPC;
}
@@ -128,7 +139,8 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
addr += xs->umem->headroom;
buffer = xdp_umem_get_data(xs->umem, addr);
- memcpy(buffer, xdp->data, len);
+ memcpy(buffer, xdp->data_meta, len + metalen);
+ addr += metalen;
err = xskq_produce_batch_desc(xs->rx, addr, len);
if (!err) {
xskq_discard_addr(xs->umem->fq);
@@ -343,12 +355,18 @@ static int xsk_release(struct socket *sock)
local_bh_enable();
if (xs->dev) {
+ struct net_device *dev = xs->dev;
+
/* Wait for driver to stop using the xdp socket. */
- synchronize_net();
- dev_put(xs->dev);
+ xdp_del_sk_umem(xs->umem, xs);
xs->dev = NULL;
+ synchronize_net();
+ dev_put(dev);
}
+ xskq_destroy(xs->rx);
+ xskq_destroy(xs->tx);
+
sock_orphan(sk);
sock->sk = NULL;
@@ -407,13 +425,6 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
}
qid = sxdp->sxdp_queue_id;
-
- if ((xs->rx && qid >= dev->real_num_rx_queues) ||
- (xs->tx && qid >= dev->real_num_tx_queues)) {
- err = -EINVAL;
- goto out_unlock;
- }
-
flags = sxdp->sxdp_flags;
if (flags & XDP_SHARED_UMEM) {
@@ -458,8 +469,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
goto out_unlock;
} else {
/* This xsk has its own umem. */
- xskq_set_umem(xs->umem->fq, &xs->umem->props);
- xskq_set_umem(xs->umem->cq, &xs->umem->props);
+ xskq_set_umem(xs->umem->fq, xs->umem->size,
+ xs->umem->chunk_mask);
+ xskq_set_umem(xs->umem->cq, xs->umem->size,
+ xs->umem->chunk_mask);
err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
if (err)
@@ -469,8 +482,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
xs->dev = dev;
xs->zc = xs->umem->zc;
xs->queue_id = qid;
- xskq_set_umem(xs->rx, &xs->umem->props);
- xskq_set_umem(xs->tx, &xs->umem->props);
+ xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
+ xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
xdp_add_sk_umem(xs->umem, xs);
out_unlock:
@@ -707,9 +720,6 @@ static void xsk_destruct(struct sock *sk)
if (!sock_flag(sk, SOCK_DEAD))
return;
- xskq_destroy(xs->rx);
- xskq_destroy(xs->tx);
- xdp_del_sk_umem(xs->umem, xs);
xdp_put_umem(xs->umem);
sk_refcnt_debug_dec(sk);
@@ -744,6 +754,8 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
sk->sk_destruct = xsk_destruct;
sk_refcnt_debug_inc(sk);
+ sock_set_flag(sk, SOCK_RCU_FREE);
+
xs = xdp_sk(sk);
mutex_init(&xs->mutex);
spin_lock_init(&xs->tx_completion_lock);
diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c
index 6c32e92e98fc..b66504592d9b 100644
--- a/net/xdp/xsk_queue.c
+++ b/net/xdp/xsk_queue.c
@@ -3,16 +3,19 @@
* Copyright(c) 2018 Intel Corporation.
*/
+#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/overflow.h>
#include "xsk_queue.h"
-void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props)
+void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask)
{
if (!q)
return;
- q->umem_props = *umem_props;
+ q->size = size;
+ q->chunk_mask = chunk_mask;
}
static u32 xskq_umem_get_ring_size(struct xsk_queue *q)
@@ -61,3 +64,56 @@ void xskq_destroy(struct xsk_queue *q)
page_frag_free(q->ring);
kfree(q);
}
+
+struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
+{
+ struct xdp_umem_fq_reuse *newq;
+
+ /* Check for overflow */
+ if (nentries > (u32)roundup_pow_of_two(nentries))
+ return NULL;
+ nentries = roundup_pow_of_two(nentries);
+
+ newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL);
+ if (!newq)
+ return NULL;
+ memset(newq, 0, offsetof(typeof(*newq), handles));
+
+ newq->nentries = nentries;
+ return newq;
+}
+EXPORT_SYMBOL_GPL(xsk_reuseq_prepare);
+
+struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
+ struct xdp_umem_fq_reuse *newq)
+{
+ struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;
+
+ if (!oldq) {
+ umem->fq_reuse = newq;
+ return NULL;
+ }
+
+ if (newq->nentries < oldq->length)
+ return newq;
+
+ memcpy(newq->handles, oldq->handles,
+ array_size(oldq->length, sizeof(u64)));
+ newq->length = oldq->length;
+
+ umem->fq_reuse = newq;
+ return oldq;
+}
+EXPORT_SYMBOL_GPL(xsk_reuseq_swap);
+
+void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
+{
+ kvfree(rq);
+}
+EXPORT_SYMBOL_GPL(xsk_reuseq_free);
+
+void xsk_reuseq_destroy(struct xdp_umem *umem)
+{
+ xsk_reuseq_free(umem->fq_reuse);
+ umem->fq_reuse = NULL;
+}
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 8a64b150be54..bcb5cbb40419 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -31,7 +31,8 @@ struct xdp_umem_ring {
};
struct xsk_queue {
- struct xdp_umem_props umem_props;
+ u64 chunk_mask;
+ u64 size;
u32 ring_mask;
u32 nentries;
u32 prod_head;
@@ -78,7 +79,7 @@ static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
{
- if (addr >= q->umem_props.size) {
+ if (addr >= q->size) {
q->invalid_descs++;
return false;
}
@@ -92,7 +93,7 @@ static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
unsigned int idx = q->cons_tail & q->ring_mask;
- *addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask;
+ *addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask;
if (xskq_is_valid_addr(q, *addr))
return addr;
@@ -173,8 +174,8 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
if (!xskq_is_valid_addr(q, d->addr))
return false;
- if (((d->addr + d->len) & q->umem_props.chunk_mask) !=
- (d->addr & q->umem_props.chunk_mask)) {
+ if (((d->addr + d->len) & q->chunk_mask) !=
+ (d->addr & q->chunk_mask)) {
q->invalid_descs++;
return false;
}
@@ -253,8 +254,11 @@ static inline bool xskq_empty_desc(struct xsk_queue *q)
return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
}
-void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
+void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
void xskq_destroy(struct xsk_queue *q_ops);
+/* Executed by the core when the entire UMEM gets freed */
+void xsk_reuseq_destroy(struct xdp_umem *umem);
+
#endif /* _LINUX_XSK_QUEUE_H */
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 5611b7521020..144c137886b1 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -99,7 +99,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
do {
struct sk_buff *nskb = skb2->next;
- skb2->next = NULL;
+ skb_mark_not_on_list(skb2);
xo = xfrm_offload(skb2);
xo->flags |= XFRM_DEV_RESUME;
@@ -192,9 +192,13 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
err = dev->xfrmdev_ops->xdo_dev_state_add(x);
if (err) {
+ xso->num_exthdrs = 0;
+ xso->flags = 0;
xso->dev = NULL;
dev_put(dev);
- return err;
+
+ if (err != -EOPNOTSUPP)
+ return err;
}
return 0;
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index 61be810389d8..ce66323102f9 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -13,7 +13,7 @@ static inline unsigned int __xfrm4_addr_hash(const xfrm_address_t *addr)
static inline unsigned int __xfrm6_addr_hash(const xfrm_address_t *addr)
{
- return ntohl(addr->a6[2] ^ addr->a6[3]);
+ return jhash2((__force u32 *)addr->a6, 4, 0);
}
static inline unsigned int __xfrm4_daddr_saddr_hash(const xfrm_address_t *daddr,
@@ -26,8 +26,7 @@ static inline unsigned int __xfrm4_daddr_saddr_hash(const xfrm_address_t *daddr,
static inline unsigned int __xfrm6_daddr_saddr_hash(const xfrm_address_t *daddr,
const xfrm_address_t *saddr)
{
- return ntohl(daddr->a6[2] ^ daddr->a6[3] ^
- saddr->a6[2] ^ saddr->a6[3]);
+ return __xfrm6_addr_hash(daddr) ^ __xfrm6_addr_hash(saddr);
}
static inline u32 __bits2mask32(__u8 bits)
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index b89c9c7f8c5c..684c0bc01e2c 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -131,7 +131,7 @@ struct sec_path *secpath_dup(struct sec_path *src)
sp->len = 0;
sp->olen = 0;
- memset(sp->ovec, 0, sizeof(sp->ovec[XFRM_MAX_OFFLOAD_DEPTH]));
+ memset(sp->ovec, 0, sizeof(sp->ovec));
if (src) {
int i;
@@ -458,6 +458,7 @@ resume:
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
goto drop;
}
+ crypto_done = false;
} while (!err);
err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 31acc6f33d98..d679fa0f44b3 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -116,6 +116,9 @@ static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
static void xfrmi_dev_free(struct net_device *dev)
{
+ struct xfrm_if *xi = netdev_priv(dev);
+
+ gro_cells_destroy(&xi->gro_cells);
free_percpu(dev->tstats);
}
@@ -469,9 +472,9 @@ static int xfrmi4_err(struct sk_buff *skb, u32 info)
}
if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
- ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0);
+ ipv4_update_pmtu(skb, net, info, 0, protocol);
else
- ipv4_redirect(skb, net, 0, 0, protocol, 0);
+ ipv4_redirect(skb, net, 0, protocol);
xfrm_state_put(x);
return 0;
@@ -561,9 +564,6 @@ static void xfrmi_get_stats64(struct net_device *dev,
{
int cpu;
- if (!dev->tstats)
- return;
-
for_each_possible_cpu(cpu) {
struct pcpu_sw_netstats *stats;
struct pcpu_sw_netstats tmp;
@@ -742,7 +742,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-struct net *xfrmi_get_link_net(const struct net_device *dev)
+static struct net *xfrmi_get_link_net(const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 45ba07ab3e4f..4ae87c5ce2e3 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -100,6 +100,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
spin_unlock_bh(&x->lock);
skb_dst_force(skb);
+ if (!skb_dst(skb)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+ goto error_nolock;
+ }
if (xfrm_offload(skb)) {
x->type_offload->encap(x, skb);
@@ -189,7 +193,7 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
struct sk_buff *nskb = segs->next;
int err;
- segs->next = NULL;
+ skb_mark_not_on_list(segs);
err = xfrm_output2(net, sk, segs);
if (unlikely(err)) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 3110c3fbee20..119a427d9b2b 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -632,9 +632,9 @@ static void xfrm_hash_rebuild(struct work_struct *work)
break;
}
if (newpos)
- hlist_add_behind(&policy->bydst, newpos);
+ hlist_add_behind_rcu(&policy->bydst, newpos);
else
- hlist_add_head(&policy->bydst, chain);
+ hlist_add_head_rcu(&policy->bydst, chain);
}
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
@@ -774,9 +774,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
break;
}
if (newpos)
- hlist_add_behind(&policy->bydst, newpos);
+ hlist_add_behind_rcu(&policy->bydst, newpos);
else
- hlist_add_head(&policy->bydst, chain);
+ hlist_add_head_rcu(&policy->bydst, chain);
__xfrm_policy_link(policy, dir);
/* After previous checking, family can either be AF_INET or AF_INET6 */
@@ -2491,6 +2491,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
}
skb_dst_force(skb);
+ if (!skb_dst(skb)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
+ return 0;
+ }
dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
if (IS_ERR(dst)) {
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 4791aa8b8185..ca7a207b81a9 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
err = -EINVAL;
switch (p->family) {
case AF_INET:
+ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+ goto out;
+
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
+ if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+ goto out;
+
break;
#else
err = -EAFNOSUPPORT;
@@ -1001,7 +1007,7 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
int err;
err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX, xfrma_policy,
- NULL);
+ cb->extack);
if (err < 0)
return err;
@@ -1396,10 +1402,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
switch (p->sel.family) {
case AF_INET:
+ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+ return -EINVAL;
+
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
+ if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+ return -EINVAL;
+
break;
#else
return -EAFNOSUPPORT;
@@ -1480,6 +1492,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
(ut[i].family != prev_family))
return -EINVAL;
+ if (ut[i].mode >= XFRM_MODE_MAX)
+ return -EINVAL;
+
prev_family = ut[i].family;
switch (ut[i].family) {
diff --git a/samples/Kconfig b/samples/Kconfig
index bd133efc1a56..ad1ec7016d4c 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -1,5 +1,6 @@
menuconfig SAMPLES
bool "Sample kernel code"
+ depends on !UML
help
You can build and test sample kernel code here.
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 36f9f41d094b..be0a961450bc 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -153,6 +153,7 @@ always += tcp_cong_kern.o
always += tcp_iw_kern.o
always += tcp_clamp_kern.o
always += tcp_basertt_kern.o
+always += tcp_tos_reflect_kern.o
always += xdp_redirect_kern.o
always += xdp_redirect_map_kern.o
always += xdp_redirect_cpu_kern.o
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 904e775d1a44..e6d7e0fe155b 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -16,7 +16,6 @@
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/types.h>
-#include <sys/types.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
diff --git a/samples/bpf/sampleip_user.c b/samples/bpf/sampleip_user.c
index 60c2b73d1b4d..216c7ecbbbe9 100644
--- a/samples/bpf/sampleip_user.c
+++ b/samples/bpf/sampleip_user.c
@@ -9,7 +9,6 @@
*/
#include <stdio.h>
#include <stdlib.h>
-#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c
index f58acfc92556..f2f9dbc021b0 100644
--- a/samples/bpf/sockex2_kern.c
+++ b/samples/bpf/sockex2_kern.c
@@ -14,7 +14,7 @@ struct vlan_hdr {
__be16 h_vlan_encapsulated_proto;
};
-struct bpf_flow_keys {
+struct flow_key_record {
__be32 src;
__be32 dst;
union {
@@ -59,7 +59,7 @@ static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
}
static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
- struct bpf_flow_keys *flow)
+ struct flow_key_record *flow)
{
__u64 verlen;
@@ -83,7 +83,7 @@ static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto
}
static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto,
- struct bpf_flow_keys *flow)
+ struct flow_key_record *flow)
{
*ip_proto = load_byte(skb,
nhoff + offsetof(struct ipv6hdr, nexthdr));
@@ -96,7 +96,8 @@ static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_pro
return nhoff;
}
-static inline bool flow_dissector(struct __sk_buff *skb, struct bpf_flow_keys *flow)
+static inline bool flow_dissector(struct __sk_buff *skb,
+ struct flow_key_record *flow)
{
__u64 nhoff = ETH_HLEN;
__u64 ip_proto;
@@ -198,7 +199,7 @@ struct bpf_map_def SEC("maps") hash_map = {
SEC("socket2")
int bpf_prog2(struct __sk_buff *skb)
{
- struct bpf_flow_keys flow = {};
+ struct flow_key_record flow = {};
struct pair *value;
u32 key;
diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c
index 95907f8d2b17..c527b57d3ec8 100644
--- a/samples/bpf/sockex3_kern.c
+++ b/samples/bpf/sockex3_kern.c
@@ -61,7 +61,7 @@ struct vlan_hdr {
__be16 h_vlan_encapsulated_proto;
};
-struct bpf_flow_keys {
+struct flow_key_record {
__be32 src;
__be32 dst;
union {
@@ -88,7 +88,7 @@ static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
}
struct globals {
- struct bpf_flow_keys flow;
+ struct flow_key_record flow;
};
struct bpf_map_def SEC("maps") percpu_map = {
@@ -114,14 +114,14 @@ struct pair {
struct bpf_map_def SEC("maps") hash_map = {
.type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(struct bpf_flow_keys),
+ .key_size = sizeof(struct flow_key_record),
.value_size = sizeof(struct pair),
.max_entries = 1024,
};
static void update_stats(struct __sk_buff *skb, struct globals *g)
{
- struct bpf_flow_keys key = g->flow;
+ struct flow_key_record key = g->flow;
struct pair *value;
value = bpf_map_lookup_elem(&hash_map, &key);
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
index 5ba3ae9d180b..9d02e0404719 100644
--- a/samples/bpf/sockex3_user.c
+++ b/samples/bpf/sockex3_user.c
@@ -13,7 +13,7 @@
#define PARSE_IP_PROG_FD (prog_fd[0])
#define PROG_ARRAY_FD (map_fd[0])
-struct bpf_flow_keys {
+struct flow_key_record {
__be32 src;
__be32 dst;
union {
@@ -64,7 +64,7 @@ int main(int argc, char **argv)
(void) f;
for (i = 0; i < 5; i++) {
- struct bpf_flow_keys key = {}, next_key;
+ struct flow_key_record key = {}, next_key;
struct pair value;
sleep(1);
diff --git a/samples/bpf/tcp_tos_reflect_kern.c b/samples/bpf/tcp_tos_reflect_kern.c
new file mode 100644
index 000000000000..d51dab19eca6
--- /dev/null
+++ b/samples/bpf/tcp_tos_reflect_kern.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Facebook
+ *
+ * BPF program to automatically reflect TOS option from received syn packet
+ *
+ * Use load_sock_ops to load this BPF program.
+ */
+
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/if_packet.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/in.h>
+#include <linux/socket.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+#define DEBUG 1
+
+#define bpf_printk(fmt, ...) \
+({ \
+ char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), \
+ ##__VA_ARGS__); \
+})
+
+SEC("sockops")
+int bpf_basertt(struct bpf_sock_ops *skops)
+{
+ char header[sizeof(struct ipv6hdr)];
+ struct ipv6hdr *hdr6;
+ struct iphdr *hdr;
+ int hdr_size = 0;
+ int save_syn = 1;
+ int tos = 0;
+ int rv = 0;
+ int op;
+
+ op = (int) skops->op;
+
+#ifdef DEBUG
+ bpf_printk("BPF command: %d\n", op);
+#endif
+ switch (op) {
+ case BPF_SOCK_OPS_TCP_LISTEN_CB:
+ rv = bpf_setsockopt(skops, SOL_TCP, TCP_SAVE_SYN,
+ &save_syn, sizeof(save_syn));
+ break;
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ if (skops->family == AF_INET)
+ hdr_size = sizeof(struct iphdr);
+ else
+ hdr_size = sizeof(struct ipv6hdr);
+ rv = bpf_getsockopt(skops, SOL_TCP, TCP_SAVED_SYN,
+ header, hdr_size);
+ if (!rv) {
+ if (skops->family == AF_INET) {
+ hdr = (struct iphdr *) header;
+ tos = hdr->tos;
+ if (tos != 0)
+ bpf_setsockopt(skops, SOL_IP, IP_TOS,
+ &tos, sizeof(tos));
+ } else {
+ hdr6 = (struct ipv6hdr *) header;
+ tos = ((hdr6->priority) << 4 |
+ (hdr6->flow_lbl[0]) >> 4);
+ if (tos)
+ bpf_setsockopt(skops, SOL_IPV6,
+ IPV6_TCLASS,
+ &tos, sizeof(tos));
+ }
+ rv = 0;
+ }
+ break;
+ default:
+ rv = -1;
+ }
+#ifdef DEBUG
+ bpf_printk("Returning %d\n", rv);
+#endif
+ skops->reply = rv;
+ return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c
index 180f9d813bca..d7b68ef5ba79 100644
--- a/samples/bpf/test_cgrp2_attach2.c
+++ b/samples/bpf/test_cgrp2_attach2.c
@@ -209,7 +209,7 @@ static int map_fd = -1;
static int prog_load_cnt(int verdict, int val)
{
- int cgroup_storage_fd;
+ int cgroup_storage_fd, percpu_cgroup_storage_fd;
if (map_fd < 0)
map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
@@ -225,6 +225,14 @@ static int prog_load_cnt(int verdict, int val)
return -1;
}
+ percpu_cgroup_storage_fd = bpf_create_map(
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+ sizeof(struct bpf_cgroup_storage_key), 8, 0, 0);
+ if (percpu_cgroup_storage_fd < 0) {
+ printf("failed to create map '%s'\n", strerror(errno));
+ return -1;
+ }
+
struct bpf_insn prog[] = {
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
@@ -235,11 +243,20 @@ static int prog_load_cnt(int verdict, int val)
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+
BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_MOV64_IMM(BPF_REG_1, val),
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
+
+ BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
+ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
+
BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
BPF_EXIT_INSN(),
};
diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c
index 4be4874ca2bc..2259f997a26c 100644
--- a/samples/bpf/test_current_task_under_cgroup_user.c
+++ b/samples/bpf/test_current_task_under_cgroup_user.c
@@ -11,7 +11,6 @@
#include <unistd.h>
#include <bpf/bpf.h>
#include "bpf_load.h"
-#include <linux/bpf.h>
#include "cgroup_helpers.h"
#define CGROUP_PATH "/my-cgroup"
diff --git a/samples/bpf/tracex3_user.c b/samples/bpf/tracex3_user.c
index 6c6b10f4c3ee..56466d010139 100644
--- a/samples/bpf/tracex3_user.c
+++ b/samples/bpf/tracex3_user.c
@@ -17,8 +17,6 @@
#include "bpf_load.h"
#include "bpf_util.h"
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
-
#define SLOTS 100
static void clear_stats(int fd)
diff --git a/samples/bpf/xdpsock_kern.c b/samples/bpf/xdpsock_kern.c
index d8806c41362e..b8ccd0802b3f 100644
--- a/samples/bpf/xdpsock_kern.c
+++ b/samples/bpf/xdpsock_kern.c
@@ -16,7 +16,7 @@ struct bpf_map_def SEC("maps") xsks_map = {
.type = BPF_MAP_TYPE_XSKMAP,
.key_size = sizeof(int),
.value_size = sizeof(int),
- .max_entries = 4,
+ .max_entries = MAX_SOCKS,
};
struct bpf_map_def SEC("maps") rr_map = {
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index 4914788b6727..57ecadc58403 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -118,7 +118,6 @@ struct xdpsock {
unsigned long prev_tx_npkts;
};
-#define MAX_SOCKS 4
static int num_socks;
struct xdpsock *xsks[MAX_SOCKS];
@@ -596,7 +595,7 @@ static void dump_stats(void)
prev_time = now;
- for (i = 0; i < num_socks; i++) {
+ for (i = 0; i < num_socks && xsks[i]; i++) {
char *fmt = "%-15s %'-11.0f %'-11lu\n";
double rx_pps, tx_pps;
@@ -649,6 +648,8 @@ static struct option long_options[] = {
{"xdp-skb", no_argument, 0, 'S'},
{"xdp-native", no_argument, 0, 'N'},
{"interval", required_argument, 0, 'n'},
+ {"zero-copy", no_argument, 0, 'z'},
+ {"copy", no_argument, 0, 'c'},
{0, 0, 0, 0}
};
@@ -667,6 +668,8 @@ static void usage(const char *prog)
" -S, --xdp-skb=n Use XDP skb-mod\n"
" -N, --xdp-native=n Enfore XDP native mode\n"
" -n, --interval=n Specify statistics update interval (default 1 sec).\n"
+ " -z, --zero-copy Force zero-copy mode.\n"
+ " -c, --copy Force copy mode.\n"
"\n";
fprintf(stderr, str, prog);
exit(EXIT_FAILURE);
@@ -679,7 +682,7 @@ static void parse_command_line(int argc, char **argv)
opterr = 0;
for (;;) {
- c = getopt_long(argc, argv, "rtli:q:psSNn:", long_options,
+ c = getopt_long(argc, argv, "rtli:q:psSNn:cz", long_options,
&option_index);
if (c == -1)
break;
@@ -716,6 +719,12 @@ static void parse_command_line(int argc, char **argv)
case 'n':
opt_interval = atoi(optarg);
break;
+ case 'z':
+ opt_xdp_bind_flags |= XDP_ZEROCOPY;
+ break;
+ case 'c':
+ opt_xdp_bind_flags |= XDP_COPY;
+ break;
default:
usage(basename(argv[0]));
}
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index ce53639a864a..8aeb60eb6ee3 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -115,7 +115,9 @@ __cc-option = $(call try-run,\
# Do not attempt to build with gcc plugins during cc-option tests.
# (And this uses delayed resolution so the flags will be up to date.)
-CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+# In addition, do not include the asm macros which are built later.
+CC_OPTION_FILTERED = $(GCC_PLUGINS_CFLAGS) $(ASM_MACRO_FLAGS)
+CC_OPTION_CFLAGS = $(filter-out $(CC_OPTION_FILTERED),$(KBUILD_CFLAGS))
# cc-option
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 5a2d1c9578a0..54da4b070db3 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -219,7 +219,7 @@ else
sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
"$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
"$(if $(CONFIG_64BIT),64,32)" \
- "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \
+ "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)" \
"$(LD) $(KBUILD_LDFLAGS)" "$(NM)" "$(RM)" "$(MV)" \
"$(if $(part-of-module),1,0)" "$(@)";
recordmcount_source := $(srctree)/scripts/recordmcount.pl
diff --git a/scripts/check_00index.sh b/scripts/check_00index.sh
deleted file mode 100755
index aa47f5926c80..000000000000
--- a/scripts/check_00index.sh
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-cd Documentation/
-
-# Check entries that should be removed
-
-obsolete=""
-for i in $(tail -n +12 00-INDEX |grep -E '^[a-zA-Z0-9]+'); do
- if [ ! -e $i ]; then
- obsolete="$obsolete $i"
- fi
-done
-
-# Check directory entries that should be added
-search=""
-dir=""
-for i in $(find . -maxdepth 1 -type d); do
- if [ "$i" != "." ]; then
- new=$(echo $i|perl -ne 's,./(.*),$1/,; print $_')
- search="$search $new"
- fi
-done
-
-for i in $search; do
- if [ "$(grep -P "^$i" 00-INDEX)" == "" ]; then
- dir="$dir $i"
- fi
-done
-
-# Check file entries that should be added
-search=""
-file=""
-for i in $(find . -maxdepth 1 -type f); do
- if [ "$i" != "./.gitignore" ]; then
- new=$(echo $i|perl -ne 's,./(.*),$1,; print $_')
- search="$search $new"
- fi
-done
-
-for i in $search; do
- if [ "$(grep -P "^$i\$" 00-INDEX)" == "" ]; then
- file="$file $i"
- fi
-done
-
-# Output its findings
-
-echo -e "Documentation/00-INDEX check results:\n"
-
-if [ "$obsolete" != "" ]; then
- echo -e "- Should remove those entries:\n\t$obsolete\n"
-else
- echo -e "- No obsolete entries\n"
-fi
-
-if [ "$dir" != "" ]; then
- echo -e "- Should document those directories:\n\t$dir\n"
-else
- echo -e "- No new directories to add\n"
-fi
-
-if [ "$file" != "" ]; then
- echo -e "- Should document those files:\n\t$file"
-else
- echo "- No new files to add"
-fi
diff --git a/scripts/extract-vmlinux b/scripts/extract-vmlinux
index e6239f39abad..85e1f32fb4a0 100755
--- a/scripts/extract-vmlinux
+++ b/scripts/extract-vmlinux
@@ -48,9 +48,6 @@ fi
tmp=$(mktemp /tmp/vmlinux-XXX)
trap "rm -f $tmp" 0
-# Initial attempt for uncompressed images or objects:
-check_vmlinux $img
-
# That didn't work, so retry after decompression.
try_decompress '\037\213\010' xy gunzip
try_decompress '\3757zXZ\000' abcde unxz
@@ -60,5 +57,8 @@ try_decompress '\211\114\132' xy 'lzop -d'
try_decompress '\002!L\030' xxx 'lz4 -d'
try_decompress '(\265/\375' xxx unzstd
+# Finally check for uncompressed images or objects:
+check_vmlinux $img
+
# Bail out:
echo "$me: Cannot find vmlinux." >&2
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 8f0f508a78e9..ffbe901a37b5 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1904,13 +1904,13 @@ sub process_name($$) {
++$warnings;
}
- if ($identifier =~ m/^struct/) {
+ if ($identifier =~ m/^struct\b/) {
$decl_type = 'struct';
- } elsif ($identifier =~ m/^union/) {
+ } elsif ($identifier =~ m/^union\b/) {
$decl_type = 'union';
- } elsif ($identifier =~ m/^enum/) {
+ } elsif ($identifier =~ m/^enum\b/) {
$decl_type = 'enum';
- } elsif ($identifier =~ m/^typedef/) {
+ } elsif ($identifier =~ m/^typedef\b/) {
$decl_type = 'typedef';
} else {
$decl_type = 'function';
diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile
index 42c5d50f2bcc..a5b4af47987a 100644
--- a/scripts/mod/Makefile
+++ b/scripts/mod/Makefile
@@ -4,6 +4,8 @@ OBJECT_FILES_NON_STANDARD := y
hostprogs-y := modpost mk_elfconfig
always := $(hostprogs-y) empty.o
+CFLAGS_REMOVE_empty.o := $(ASM_MACRO_FLAGS)
+
modpost-objs := modpost.o file2alias.o sumversion.o
devicetable-offsets-file := devicetable-offsets.h
diff --git a/scripts/subarch.include b/scripts/subarch.include
new file mode 100644
index 000000000000..650682821126
--- /dev/null
+++ b/scripts/subarch.include
@@ -0,0 +1,13 @@
+# SUBARCH tells the usermode build what the underlying arch is. That is set
+# first, and if a usermode build is happening, the "ARCH=um" on the command
+# line overrides the setting of ARCH below. If a native build is happening,
+# then ARCH is assigned, getting whatever value it gets normally, and
+# SUBARCH is subsequently ignored.
+
+SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
+ -e s/sun4u/sparc64/ \
+ -e s/arm.*/arm/ -e s/sa110/arm/ \
+ -e s/s390x/s390/ -e s/parisc64/parisc/ \
+ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
+ -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
+ -e s/riscv.*/riscv/)
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 8b8b70620bbe..aa35939443c4 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -732,7 +732,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
return error;
}
-static int apparmor_task_kill(struct task_struct *target, struct siginfo *info,
+static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo *info,
int sig, const struct cred *cred)
{
struct aa_label *cl, *tl;
@@ -1606,4 +1606,7 @@ alloc_out:
return error;
}
-security_initcall(apparmor_init);
+DEFINE_LSM(apparmor) = {
+ .name = "apparmor",
+ .init = apparmor_init,
+};
diff --git a/security/commoncap.c b/security/commoncap.c
index 2e489d6a3ac8..18a4fdf6f6eb 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -684,9 +684,6 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_f
}
rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_fcap);
- if (rc == -EINVAL)
- printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
- __func__, rc, bprm->filename);
out:
if (rc)
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 9bb0a7f2863e..5eacba858e4b 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -26,7 +26,7 @@
static struct key *keyring[INTEGRITY_KEYRING_MAX];
-static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
+static const char * const keyring_name[INTEGRITY_KEYRING_MAX] = {
#ifndef CONFIG_INTEGRITY_TRUSTED_KEYRING
"_evm",
"_ima",
@@ -37,12 +37,6 @@ static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
"_module",
};
-#ifdef CONFIG_INTEGRITY_TRUSTED_KEYRING
-static bool init_keyring __initdata = true;
-#else
-static bool init_keyring __initdata;
-#endif
-
#ifdef CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
#define restrict_link_to_ima restrict_link_by_builtin_and_secondary_trusted
#else
@@ -85,7 +79,7 @@ int __init integrity_init_keyring(const unsigned int id)
struct key_restriction *restriction;
int err = 0;
- if (!init_keyring)
+ if (!IS_ENABLED(CONFIG_INTEGRITY_TRUSTED_KEYRING))
return 0;
restriction = kzalloc(sizeof(struct key_restriction), GFP_KERNEL);
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 8a3905bb02c7..8c25f949ebdb 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -27,7 +27,7 @@
#define EVMKEY "evm-key"
#define MAX_KEY_SIZE 128
static unsigned char evmkey[MAX_KEY_SIZE];
-static int evmkey_len = MAX_KEY_SIZE;
+static const int evmkey_len = MAX_KEY_SIZE;
struct crypto_shash *hmac_tfm;
static struct crypto_shash *evm_tfm[HASH_ALGO__LAST];
@@ -38,7 +38,7 @@ static DEFINE_MUTEX(mutex);
static unsigned long evm_set_key_flags;
-static char * const evm_hmac = "hmac(sha1)";
+static const char evm_hmac[] = "hmac(sha1)";
/**
* evm_set_key() - set EVM HMAC key from the kernel
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 5a6810041e5c..1ea05da2323d 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -22,6 +22,7 @@
#include <linux/file.h>
#include <linux/uaccess.h>
#include <linux/security.h>
+#include <linux/lsm_hooks.h>
#include "integrity.h"
static struct rb_root integrity_iint_tree = RB_ROOT;
@@ -174,7 +175,10 @@ static int __init integrity_iintcache_init(void)
0, SLAB_PANIC, init_once);
return 0;
}
-security_initcall(integrity_iintcache_init);
+DEFINE_LSM(integrity) = {
+ .name = "integrity",
+ .init = integrity_iintcache_init,
+};
/*
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 67db9d9454ca..cc12f3449a72 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -88,7 +88,7 @@ struct ima_template_desc {
char *name;
char *fmt;
int num_fields;
- struct ima_template_field **fields;
+ const struct ima_template_field **fields;
};
struct ima_template_entry {
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index a02c5acfd403..99dd1d53fc35 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -51,7 +51,8 @@ int ima_alloc_init_template(struct ima_event_data *event_data,
(*entry)->template_desc = template_desc;
for (i = 0; i < template_desc->num_fields; i++) {
- struct ima_template_field *field = template_desc->fields[i];
+ const struct ima_template_field *field =
+ template_desc->fields[i];
u32 len;
result = field->field_init(event_data,
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 7e7e7e7c250a..d9e7728027c6 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -210,7 +210,7 @@ static int ima_calc_file_hash_atfm(struct file *file,
{
loff_t i_size, offset;
char *rbuf[2] = { NULL, };
- int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0;
+ int rc, rbuf_len, active = 0, ahash_rc = 0;
struct ahash_request *req;
struct scatterlist sg[1];
struct crypto_wait wait;
@@ -257,11 +257,6 @@ static int ima_calc_file_hash_atfm(struct file *file,
&rbuf_size[1], 0);
}
- if (!(file->f_mode & FMODE_READ)) {
- file->f_mode |= FMODE_READ;
- read = 1;
- }
-
for (offset = 0; offset < i_size; offset += rbuf_len) {
if (!rbuf[1] && offset) {
/* Not using two buffers, and it is not the first
@@ -300,8 +295,6 @@ static int ima_calc_file_hash_atfm(struct file *file,
/* wait for the last update request to complete */
rc = ahash_wait(ahash_rc, &wait);
out3:
- if (read)
- file->f_mode &= ~FMODE_READ;
ima_free_pages(rbuf[0], rbuf_size[0]);
ima_free_pages(rbuf[1], rbuf_size[1]);
out2:
@@ -336,7 +329,7 @@ static int ima_calc_file_hash_tfm(struct file *file,
{
loff_t i_size, offset = 0;
char *rbuf;
- int rc, read = 0;
+ int rc;
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
@@ -357,11 +350,6 @@ static int ima_calc_file_hash_tfm(struct file *file,
if (!rbuf)
return -ENOMEM;
- if (!(file->f_mode & FMODE_READ)) {
- file->f_mode |= FMODE_READ;
- read = 1;
- }
-
while (offset < i_size) {
int rbuf_len;
@@ -378,8 +366,6 @@ static int ima_calc_file_hash_tfm(struct file *file,
if (rc)
break;
}
- if (read)
- file->f_mode &= ~FMODE_READ;
kfree(rbuf);
out:
if (!rc)
@@ -420,6 +406,8 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
{
loff_t i_size;
int rc;
+ struct file *f = file;
+ bool new_file_instance = false, modified_flags = false;
/*
* For consistency, fail file's opened with the O_DIRECT flag on
@@ -431,15 +419,41 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
return -EINVAL;
}
- i_size = i_size_read(file_inode(file));
+ /* Open a new file instance in O_RDONLY if we cannot read */
+ if (!(file->f_mode & FMODE_READ)) {
+ int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
+ O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
+ flags |= O_RDONLY;
+ f = dentry_open(&file->f_path, flags, file->f_cred);
+ if (IS_ERR(f)) {
+ /*
+ * Cannot open the file again, lets modify f_flags
+ * of original and continue
+ */
+ pr_info_ratelimited("Unable to reopen file for reading.\n");
+ f = file;
+ f->f_flags |= FMODE_READ;
+ modified_flags = true;
+ } else {
+ new_file_instance = true;
+ }
+ }
+
+ i_size = i_size_read(file_inode(f));
if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
- rc = ima_calc_file_ahash(file, hash);
+ rc = ima_calc_file_ahash(f, hash);
if (!rc)
- return 0;
+ goto out;
}
- return ima_calc_file_shash(file, hash);
+ rc = ima_calc_file_shash(f, hash);
+out:
+ if (new_file_instance)
+ fput(f);
+ else if (modified_flags)
+ f->f_flags &= ~FMODE_READ;
+ return rc;
}
/*
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index ae9d5c766a3c..3183cc23d0f8 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -42,14 +42,14 @@ static int __init default_canonical_fmt_setup(char *str)
__setup("ima_canonical_fmt", default_canonical_fmt_setup);
static int valid_policy = 1;
-#define TMPBUFLEN 12
+
static ssize_t ima_show_htable_value(char __user *buf, size_t count,
loff_t *ppos, atomic_long_t *val)
{
- char tmpbuf[TMPBUFLEN];
+ char tmpbuf[32]; /* greater than largest 'long' string value */
ssize_t len;
- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
+ len = scnprintf(tmpbuf, sizeof(tmpbuf), "%li\n", atomic_long_read(val));
return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
}
@@ -179,7 +179,8 @@ int ima_measurements_show(struct seq_file *m, void *v)
/* 6th: template specific data */
for (i = 0; i < e->template_desc->num_fields; i++) {
enum ima_show_type show = IMA_SHOW_BINARY;
- struct ima_template_field *field = e->template_desc->fields[i];
+ const struct ima_template_field *field =
+ e->template_desc->fields[i];
if (is_ima_template && strcmp(field->field_id, "d") == 0)
show = IMA_SHOW_BINARY_NO_FIELD_LEN;
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index faac9ecaa0ae..59d834219cd6 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -25,7 +25,7 @@
#include "ima.h"
/* name for boot aggregate entry */
-static const char *boot_aggregate_name = "boot_aggregate";
+static const char boot_aggregate_name[] = "boot_aggregate";
struct tpm_chip *ima_tpm_chip;
/* Add the boot aggregate to the IMA measurement list and extend
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 2d31921fbda4..1b88d58e1325 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -440,7 +440,7 @@ int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
return 0;
}
-static int read_idmap[READING_MAX_ID] = {
+static const int read_idmap[READING_MAX_ID] = {
[READING_FIRMWARE] = FIRMWARE_CHECK,
[READING_FIRMWARE_PREALLOC_BUFFER] = FIRMWARE_CHECK,
[READING_MODULE] = MODULE_CHECK,
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index 30db39b23804..b631b8bc7624 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -32,7 +32,7 @@ static struct ima_template_desc builtin_templates[] = {
static LIST_HEAD(defined_templates);
static DEFINE_SPINLOCK(template_list);
-static struct ima_template_field supported_fields[] = {
+static const struct ima_template_field supported_fields[] = {
{.field_id = "d", .field_init = ima_eventdigest_init,
.field_show = ima_show_template_digest},
{.field_id = "n", .field_init = ima_eventname_init,
@@ -49,7 +49,7 @@ static struct ima_template_field supported_fields[] = {
static struct ima_template_desc *ima_template;
static struct ima_template_desc *lookup_template_desc(const char *name);
static int template_desc_init_fields(const char *template_fmt,
- struct ima_template_field ***fields,
+ const struct ima_template_field ***fields,
int *num_fields);
static int __init ima_template_setup(char *str)
@@ -125,7 +125,8 @@ static struct ima_template_desc *lookup_template_desc(const char *name)
return found ? template_desc : NULL;
}
-static struct ima_template_field *lookup_template_field(const char *field_id)
+static const struct ima_template_field *
+lookup_template_field(const char *field_id)
{
int i;
@@ -153,11 +154,11 @@ static int template_fmt_size(const char *template_fmt)
}
static int template_desc_init_fields(const char *template_fmt,
- struct ima_template_field ***fields,
+ const struct ima_template_field ***fields,
int *num_fields)
{
const char *template_fmt_ptr;
- struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
+ const struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
int template_num_fields;
int i, len;
diff --git a/security/keys/dh.c b/security/keys/dh.c
index 3b602a1e27fa..711e89d8c415 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
}
dh_inputs.g_size = dlen;
- dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
+ dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
if (dlen < 0) {
ret = dlen;
goto out2;
diff --git a/security/loadpin/Kconfig b/security/loadpin/Kconfig
index dd01aa91e521..a0d70d82b98e 100644
--- a/security/loadpin/Kconfig
+++ b/security/loadpin/Kconfig
@@ -10,10 +10,10 @@ config SECURITY_LOADPIN
have a root filesystem backed by a read-only device such as
dm-verity or a CDROM.
-config SECURITY_LOADPIN_ENABLED
+config SECURITY_LOADPIN_ENFORCE
bool "Enforce LoadPin at boot"
depends on SECURITY_LOADPIN
help
If selected, LoadPin will enforce pinning at boot. If not
selected, it can be enabled at boot with the kernel parameter
- "loadpin.enabled=1".
+ "loadpin.enforce=1".
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index 0716af28808a..48f39631b370 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -44,7 +44,7 @@ static void report_load(const char *origin, struct file *file, char *operation)
kfree(pathname);
}
-static int enabled = IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENABLED);
+static int enforce = IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE);
static struct super_block *pinned_root;
static DEFINE_SPINLOCK(pinned_root_spinlock);
@@ -60,8 +60,8 @@ static struct ctl_path loadpin_sysctl_path[] = {
static struct ctl_table loadpin_sysctl_table[] = {
{
- .procname = "enabled",
- .data = &enabled,
+ .procname = "enforce",
+ .data = &enforce,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@@ -84,8 +84,11 @@ static void check_pinning_enforcement(struct super_block *mnt_sb)
* device, allow sysctl to change modes for testing.
*/
if (mnt_sb->s_bdev) {
+ char bdev[BDEVNAME_SIZE];
+
ro = bdev_read_only(mnt_sb->s_bdev);
- pr_info("dev(%u,%u): %s\n",
+ bdevname(mnt_sb->s_bdev, bdev);
+ pr_info("%s (%u:%u): %s\n", bdev,
MAJOR(mnt_sb->s_bdev->bd_dev),
MINOR(mnt_sb->s_bdev->bd_dev),
ro ? "read-only" : "writable");
@@ -97,7 +100,7 @@ static void check_pinning_enforcement(struct super_block *mnt_sb)
loadpin_sysctl_table))
pr_notice("sysctl registration failed!\n");
else
- pr_info("load pinning can be disabled.\n");
+ pr_info("enforcement can be disabled.\n");
} else
pr_info("load pinning engaged.\n");
}
@@ -128,7 +131,7 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id)
/* This handles the older init_module API that has a NULL file. */
if (!file) {
- if (!enabled) {
+ if (!enforce) {
report_load(origin, NULL, "old-api-pinning-ignored");
return 0;
}
@@ -151,7 +154,7 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id)
* Unlock now since it's only pinned_root we care about.
* In the worst case, we will (correctly) report pinning
* failures before we have announced that pinning is
- * enabled. This would be purely cosmetic.
+ * enforcing. This would be purely cosmetic.
*/
spin_unlock(&pinned_root_spinlock);
check_pinning_enforcement(pinned_root);
@@ -161,7 +164,7 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id)
}
if (IS_ERR_OR_NULL(pinned_root) || load_root != pinned_root) {
- if (unlikely(!enabled)) {
+ if (unlikely(!enforce)) {
report_load(origin, file, "pinning-ignored");
return 0;
}
@@ -186,10 +189,11 @@ static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = {
void __init loadpin_add_hooks(void)
{
- pr_info("ready to pin (currently %sabled)", enabled ? "en" : "dis");
+ pr_info("ready to pin (currently %senforcing)\n",
+ enforce ? "" : "not ");
security_add_hooks(loadpin_hooks, ARRAY_SIZE(loadpin_hooks), "loadpin");
}
/* Should not be mutable after boot, so not listed in sysfs (perm == 0). */
-module_param(enabled, int, 0);
-MODULE_PARM_DESC(enabled, "Pin module/firmware loading (default: true)");
+module_param(enforce, int, 0);
+MODULE_PARM_DESC(enforce, "Enforce module/firmware pinning");
diff --git a/security/security.c b/security/security.c
index 736e78da1ab9..04d173eb93f6 100644
--- a/security/security.c
+++ b/security/security.c
@@ -12,6 +12,8 @@
* (at your option) any later version.
*/
+#define pr_fmt(fmt) "LSM: " fmt
+
#include <linux/bpf.h>
#include <linux/capability.h>
#include <linux/dcache.h>
@@ -30,8 +32,6 @@
#include <linux/string.h>
#include <net/flow.h>
-#include <trace/events/initcall.h>
-
#define MAX_LSM_EVM_XATTR 2
/* Maximum number of letters for an LSM name string */
@@ -45,20 +45,22 @@ char *lsm_names;
static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
CONFIG_DEFAULT_SECURITY;
-static void __init do_security_initcalls(void)
+static __initdata bool debug;
+#define init_debug(...) \
+ do { \
+ if (debug) \
+ pr_info(__VA_ARGS__); \
+ } while (0)
+
+static void __init major_lsm_init(void)
{
+ struct lsm_info *lsm;
int ret;
- initcall_t call;
- initcall_entry_t *ce;
-
- ce = __security_initcall_start;
- trace_initcall_level("security");
- while (ce < __security_initcall_end) {
- call = initcall_from_entry(ce);
- trace_initcall_start(call);
- ret = call();
- trace_initcall_finish(call, ret);
- ce++;
+
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ init_debug("initializing %s\n", lsm->name);
+ ret = lsm->init();
+ WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret);
}
}
@@ -72,10 +74,11 @@ int __init security_init(void)
int i;
struct hlist_head *list = (struct hlist_head *) &security_hook_heads;
+ pr_info("Security Framework initializing\n");
+
for (i = 0; i < sizeof(security_hook_heads) / sizeof(struct hlist_head);
i++)
INIT_HLIST_HEAD(&list[i]);
- pr_info("Security Framework initialized\n");
/*
* Load minor LSMs, with the capability module always first.
@@ -87,7 +90,7 @@ int __init security_init(void)
/*
* Load all the remaining security modules.
*/
- do_security_initcalls();
+ major_lsm_init();
return 0;
}
@@ -100,6 +103,14 @@ static int __init choose_lsm(char *str)
}
__setup("security=", choose_lsm);
+/* Enable LSM order debugging. */
+static int __init enable_debug(char *str)
+{
+ debug = true;
+ return 1;
+}
+__setup("lsm.debug", enable_debug);
+
static bool match_last_lsm(const char *list, const char *lsm)
{
const char *last;
@@ -1147,7 +1158,7 @@ int security_task_movememory(struct task_struct *p)
return call_int_hook(task_movememory, 0, p);
}
-int security_task_kill(struct task_struct *p, struct siginfo *info,
+int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
int sig, const struct cred *cred)
{
return call_int_hook(task_kill, 0, p, info, sig, cred);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index ad9a9b8e9979..7ce683259357 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1508,6 +1508,11 @@ static int selinux_genfs_get_sid(struct dentry *dentry,
}
rc = security_genfs_sid(&selinux_state, sb->s_type->name,
path, tclass, sid);
+ if (rc == -ENOENT) {
+ /* No match in policy, mark as unlabeled. */
+ *sid = SECINITSID_UNLABELED;
+ rc = 0;
+ }
}
free_page((unsigned long)buffer);
return rc;
@@ -4186,7 +4191,7 @@ static int selinux_task_movememory(struct task_struct *p)
PROCESS__SETSCHED, NULL);
}
-static int selinux_task_kill(struct task_struct *p, struct siginfo *info,
+static int selinux_task_kill(struct task_struct *p, struct kernel_siginfo *info,
int sig, const struct cred *cred)
{
u32 secid;
@@ -7202,7 +7207,10 @@ void selinux_complete_init(void)
/* SELinux requires early initialization in order to label
all processes and objects when they are created. */
-security_initcall(selinux_init);
+DEFINE_LSM(selinux) = {
+ .name = "selinux",
+ .init = selinux_init,
+};
#if defined(CONFIG_NETFILTER)
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 39475fb455bc..2fe459df3c85 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -218,9 +218,7 @@ int mls_context_isvalid(struct policydb *p, struct context *c)
/*
* Set the MLS fields in the security context structure
* `context' based on the string representation in
- * the string `*scontext'. Update `*scontext' to
- * point to the end of the string representation of
- * the MLS fields.
+ * the string `scontext'.
*
* This function modifies the string in place, inserting
* NULL characters to terminate the MLS fields.
@@ -235,22 +233,21 @@ int mls_context_isvalid(struct policydb *p, struct context *c)
*/
int mls_context_to_sid(struct policydb *pol,
char oldc,
- char **scontext,
+ char *scontext,
struct context *context,
struct sidtab *s,
u32 def_sid)
{
-
- char delim;
- char *scontextp, *p, *rngptr;
+ char *sensitivity, *cur_cat, *next_cat, *rngptr;
struct level_datum *levdatum;
struct cat_datum *catdatum, *rngdatum;
- int l, rc = -EINVAL;
+ int l, rc, i;
+ char *rangep[2];
if (!pol->mls_enabled) {
- if (def_sid != SECSID_NULL && oldc)
- *scontext += strlen(*scontext) + 1;
- return 0;
+ if ((def_sid != SECSID_NULL && oldc) || (*scontext) == '\0')
+ return 0;
+ return -EINVAL;
}
/*
@@ -261,113 +258,94 @@ int mls_context_to_sid(struct policydb *pol,
struct context *defcon;
if (def_sid == SECSID_NULL)
- goto out;
+ return -EINVAL;
defcon = sidtab_search(s, def_sid);
if (!defcon)
- goto out;
+ return -EINVAL;
- rc = mls_context_cpy(context, defcon);
- goto out;
+ return mls_context_cpy(context, defcon);
}
- /* Extract low sensitivity. */
- scontextp = p = *scontext;
- while (*p && *p != ':' && *p != '-')
- p++;
-
- delim = *p;
- if (delim != '\0')
- *p++ = '\0';
+ /*
+ * If we're dealing with a range, figure out where the two parts
+ * of the range begin.
+ */
+ rangep[0] = scontext;
+ rangep[1] = strchr(scontext, '-');
+ if (rangep[1]) {
+ rangep[1][0] = '\0';
+ rangep[1]++;
+ }
+ /* For each part of the range: */
for (l = 0; l < 2; l++) {
- levdatum = hashtab_search(pol->p_levels.table, scontextp);
- if (!levdatum) {
- rc = -EINVAL;
- goto out;
- }
+ /* Split sensitivity and category set. */
+ sensitivity = rangep[l];
+ if (sensitivity == NULL)
+ break;
+ next_cat = strchr(sensitivity, ':');
+ if (next_cat)
+ *(next_cat++) = '\0';
+ /* Parse sensitivity. */
+ levdatum = hashtab_search(pol->p_levels.table, sensitivity);
+ if (!levdatum)
+ return -EINVAL;
context->range.level[l].sens = levdatum->level->sens;
- if (delim == ':') {
- /* Extract category set. */
- while (1) {
- scontextp = p;
- while (*p && *p != ',' && *p != '-')
- p++;
- delim = *p;
- if (delim != '\0')
- *p++ = '\0';
-
- /* Separate into range if exists */
- rngptr = strchr(scontextp, '.');
- if (rngptr != NULL) {
- /* Remove '.' */
- *rngptr++ = '\0';
- }
+ /* Extract category set. */
+ while (next_cat != NULL) {
+ cur_cat = next_cat;
+ next_cat = strchr(next_cat, ',');
+ if (next_cat != NULL)
+ *(next_cat++) = '\0';
+
+ /* Separate into range if exists */
+ rngptr = strchr(cur_cat, '.');
+ if (rngptr != NULL) {
+ /* Remove '.' */
+ *rngptr++ = '\0';
+ }
- catdatum = hashtab_search(pol->p_cats.table,
- scontextp);
- if (!catdatum) {
- rc = -EINVAL;
- goto out;
- }
+ catdatum = hashtab_search(pol->p_cats.table, cur_cat);
+ if (!catdatum)
+ return -EINVAL;
- rc = ebitmap_set_bit(&context->range.level[l].cat,
- catdatum->value - 1, 1);
- if (rc)
- goto out;
-
- /* If range, set all categories in range */
- if (rngptr) {
- int i;
-
- rngdatum = hashtab_search(pol->p_cats.table, rngptr);
- if (!rngdatum) {
- rc = -EINVAL;
- goto out;
- }
-
- if (catdatum->value >= rngdatum->value) {
- rc = -EINVAL;
- goto out;
- }
-
- for (i = catdatum->value; i < rngdatum->value; i++) {
- rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1);
- if (rc)
- goto out;
- }
- }
+ rc = ebitmap_set_bit(&context->range.level[l].cat,
+ catdatum->value - 1, 1);
+ if (rc)
+ return rc;
+
+ /* If range, set all categories in range */
+ if (rngptr == NULL)
+ continue;
+
+ rngdatum = hashtab_search(pol->p_cats.table, rngptr);
+ if (!rngdatum)
+ return -EINVAL;
+
+ if (catdatum->value >= rngdatum->value)
+ return -EINVAL;
- if (delim != ',')
- break;
+ for (i = catdatum->value; i < rngdatum->value; i++) {
+ rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1);
+ if (rc)
+ return rc;
}
}
- if (delim == '-') {
- /* Extract high sensitivity. */
- scontextp = p;
- while (*p && *p != ':')
- p++;
-
- delim = *p;
- if (delim != '\0')
- *p++ = '\0';
- } else
- break;
}
- if (l == 0) {
+ /* If we didn't see a '-', the range start is also the range end. */
+ if (rangep[1] == NULL) {
context->range.level[1].sens = context->range.level[0].sens;
rc = ebitmap_cpy(&context->range.level[1].cat,
&context->range.level[0].cat);
if (rc)
- goto out;
+ return rc;
}
- *scontext = ++p;
- rc = 0;
-out:
- return rc;
+
+ return 0;
}
/*
@@ -379,21 +357,19 @@ out:
int mls_from_string(struct policydb *p, char *str, struct context *context,
gfp_t gfp_mask)
{
- char *tmpstr, *freestr;
+ char *tmpstr;
int rc;
if (!p->mls_enabled)
return -EINVAL;
- /* we need freestr because mls_context_to_sid will change
- the value of tmpstr */
- tmpstr = freestr = kstrdup(str, gfp_mask);
+ tmpstr = kstrdup(str, gfp_mask);
if (!tmpstr) {
rc = -ENOMEM;
} else {
- rc = mls_context_to_sid(p, ':', &tmpstr, context,
+ rc = mls_context_to_sid(p, ':', tmpstr, context,
NULL, SECSID_NULL);
- kfree(freestr);
+ kfree(tmpstr);
}
return rc;
diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h
index 9a3ff7af70ad..67093647576d 100644
--- a/security/selinux/ss/mls.h
+++ b/security/selinux/ss/mls.h
@@ -34,7 +34,7 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l);
int mls_context_to_sid(struct policydb *p,
char oldc,
- char **scontext,
+ char *scontext,
struct context *context,
struct sidtab *s,
u32 def_sid);
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index e9394e7adc84..f4eadd3f7350 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -1101,7 +1101,7 @@ static int str_read(char **strp, gfp_t flags, void *fp, u32 len)
if ((len == 0) || (len == (u32)-1))
return -EINVAL;
- str = kmalloc(len + 1, flags);
+ str = kmalloc(len + 1, flags | __GFP_NOWARN);
if (!str)
return -ENOMEM;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index f3def298a90e..12e414394530 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1365,7 +1365,6 @@ int security_sid_to_context_force(struct selinux_state *state, u32 sid,
static int string_to_context_struct(struct policydb *pol,
struct sidtab *sidtabp,
char *scontext,
- u32 scontext_len,
struct context *ctx,
u32 def_sid)
{
@@ -1426,15 +1425,12 @@ static int string_to_context_struct(struct policydb *pol,
ctx->type = typdatum->value;
- rc = mls_context_to_sid(pol, oldc, &p, ctx, sidtabp, def_sid);
+ rc = mls_context_to_sid(pol, oldc, p, ctx, sidtabp, def_sid);
if (rc)
goto out;
- rc = -EINVAL;
- if ((p - scontext) < scontext_len)
- goto out;
-
/* Check the validity of the new context. */
+ rc = -EINVAL;
if (!policydb_context_isvalid(pol, ctx))
goto out;
rc = 0;
@@ -1489,7 +1485,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
policydb = &state->ss->policydb;
sidtab = &state->ss->sidtab;
rc = string_to_context_struct(policydb, sidtab, scontext2,
- scontext_len, &context, def_sid);
+ &context, def_sid);
if (rc == -EINVAL && force) {
context.str = str;
context.len = strlen(str) + 1;
@@ -1958,7 +1954,7 @@ static int convert_context(u32 key,
goto out;
rc = string_to_context_struct(args->newp, NULL, s,
- c->len, &ctx, SECSID_NULL);
+ &ctx, SECSID_NULL);
kfree(s);
if (!rc) {
pr_info("SELinux: Context %s became valid (mapped).\n",
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 340fc30ad85d..81fb4c1631e9 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -421,6 +421,7 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
struct smk_audit_info ad, *saip = NULL;
struct task_smack *tsp;
struct smack_known *tracer_known;
+ const struct cred *tracercred;
if ((mode & PTRACE_MODE_NOAUDIT) == 0) {
smk_ad_init(&ad, func, LSM_AUDIT_DATA_TASK);
@@ -429,7 +430,8 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
}
rcu_read_lock();
- tsp = __task_cred(tracer)->security;
+ tracercred = __task_cred(tracer);
+ tsp = tracercred->security;
tracer_known = smk_of_task(tsp);
if ((mode & PTRACE_MODE_ATTACH) &&
@@ -439,7 +441,7 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
rc = 0;
else if (smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)
rc = -EACCES;
- else if (capable(CAP_SYS_PTRACE))
+ else if (smack_privileged_cred(CAP_SYS_PTRACE, tracercred))
rc = 0;
else
rc = -EACCES;
@@ -1841,6 +1843,7 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
{
struct smack_known *skp;
struct smack_known *tkp = smk_of_task(tsk->cred->security);
+ const struct cred *tcred;
struct file *file;
int rc;
struct smk_audit_info ad;
@@ -1854,8 +1857,12 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
skp = file->f_security;
rc = smk_access(skp, tkp, MAY_DELIVER, NULL);
rc = smk_bu_note("sigiotask", skp, tkp, MAY_DELIVER, rc);
- if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE))
+
+ rcu_read_lock();
+ tcred = __task_cred(tsk);
+ if (rc != 0 && smack_privileged_cred(CAP_MAC_OVERRIDE, tcred))
rc = 0;
+ rcu_read_unlock();
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, tsk);
@@ -2251,7 +2258,7 @@ static int smack_task_movememory(struct task_struct *p)
* Return 0 if write access is permitted
*
*/
-static int smack_task_kill(struct task_struct *p, struct siginfo *info,
+static int smack_task_kill(struct task_struct *p, struct kernel_siginfo *info,
int sig, const struct cred *cred)
{
struct smk_audit_info ad;
@@ -3467,7 +3474,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
*/
final = &smack_known_star;
/*
- * No break.
+ * Fall through.
*
* If a smack value has been set we want to use it,
* but since tmpfs isn't giving us the opportunity
@@ -4882,4 +4889,7 @@ static __init int smack_init(void)
* Smack requires early initialization in order to label
* all processes and objects when they are created.
*/
-security_initcall(smack_init);
+DEFINE_LSM(smack) = {
+ .name = "smack",
+ .init = smack_init,
+};
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index f6482e53d55a..06b517075ec0 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -2853,7 +2853,6 @@ static const struct file_operations smk_ptrace_ops = {
static int smk_fill_super(struct super_block *sb, void *data, int silent)
{
int rc;
- struct inode *root_inode;
static const struct tree_descr smack_files[] = {
[SMK_LOAD] = {
@@ -2917,8 +2916,6 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
return rc;
}
- root_inode = d_inode(sb->s_root);
-
return 0;
}
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 03923a138ef5..9b38f94b5dd0 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -1660,7 +1660,8 @@ static void tomoyo_read_pid(struct tomoyo_io_buffer *head)
head->r.eof = true;
if (tomoyo_str_starts(&buf, "global-pid "))
global_pid = true;
- pid = (unsigned int) simple_strtoul(buf, NULL, 10);
+ if (kstrtouint(buf, 10, &pid))
+ return;
rcu_read_lock();
if (global_pid)
p = find_task_by_pid_ns(pid, &init_pid_ns);
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 9f932e2d6852..1b5b5097efd7 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -550,4 +550,7 @@ static int __init tomoyo_init(void)
return 0;
}
-security_initcall(tomoyo_init);
+DEFINE_LSM(tomoyo) = {
+ .name = "tomoyo",
+ .init = tomoyo_init,
+};
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index 000b58522106..bd7c5029fc59 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -157,18 +157,19 @@ static int i2sbus_add_dev(struct macio_dev *macio,
struct device_node *child = NULL, *sound = NULL;
struct resource *r;
int i, layout = 0, rlen, ok = force;
- static const char *rnames[] = { "i2sbus: %s (control)",
- "i2sbus: %s (tx)",
- "i2sbus: %s (rx)" };
+ char node_name[6];
+ static const char *rnames[] = { "i2sbus: %pOFn (control)",
+ "i2sbus: %pOFn (tx)",
+ "i2sbus: %pOFn (rx)" };
static irq_handler_t ints[] = {
i2sbus_bus_intr,
i2sbus_tx_intr,
i2sbus_rx_intr
};
- if (strlen(np->name) != 5)
+ if (snprintf(node_name, sizeof(node_name), "%pOFn", np) != 5)
return 0;
- if (strncmp(np->name, "i2s-", 4))
+ if (strncmp(node_name, "i2s-", 4))
return 0;
dev = kzalloc(sizeof(struct i2sbus_dev), GFP_KERNEL);
@@ -228,13 +229,13 @@ static int i2sbus_add_dev(struct macio_dev *macio,
dev->sound.pcmid = -1;
dev->macio = macio;
dev->control = control;
- dev->bus_number = np->name[4] - 'a';
+ dev->bus_number = node_name[4] - 'a';
INIT_LIST_HEAD(&dev->sound.codec_list);
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) {
dev->interrupts[i] = -1;
snprintf(dev->rnames[i], sizeof(dev->rnames[i]),
- rnames[i], np->name);
+ rnames[i], np);
}
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) {
int irq = irq_of_parse_and_map(np, i);
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
index 5fbd47a9177e..28867732a318 100644
--- a/sound/arm/Kconfig
+++ b/sound/arm/Kconfig
@@ -31,7 +31,6 @@ endif # SND_ARM
config SND_PXA2XX_LIB
tristate
- select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
select SND_DMAENGINE_PCM
config SND_PXA2XX_LIB_AC97
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 753d5fc4b284..59a4adc286ed 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -25,6 +25,9 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#include <sound/memalloc.h>
/*
@@ -82,31 +85,32 @@ EXPORT_SYMBOL(snd_free_pages);
#ifdef CONFIG_HAS_DMA
/* allocate the coherent DMA pages */
-static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
+static void snd_malloc_dev_pages(struct snd_dma_buffer *dmab, size_t size)
{
- int pg;
gfp_t gfp_flags;
- if (WARN_ON(!dma))
- return NULL;
- pg = get_order(size);
gfp_flags = GFP_KERNEL
| __GFP_COMP /* compound page lets parts be mapped */
| __GFP_NORETRY /* don't trigger OOM-killer */
| __GFP_NOWARN; /* no stack trace print - this call is non-critical */
- return dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
+ dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
+ gfp_flags);
+#ifdef CONFIG_X86
+ if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+ set_memory_wc((unsigned long)dmab->area,
+ PAGE_ALIGN(size) >> PAGE_SHIFT);
+#endif
}
/* free the coherent DMA pages */
-static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
- dma_addr_t dma)
+static void snd_free_dev_pages(struct snd_dma_buffer *dmab)
{
- int pg;
-
- if (ptr == NULL)
- return;
- pg = get_order(size);
- dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
+#ifdef CONFIG_X86
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+ set_memory_wb((unsigned long)dmab->area,
+ PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
+#endif
+ dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
}
#ifdef CONFIG_GENERIC_ALLOCATOR
@@ -199,12 +203,15 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
*/
dmab->dev.type = SNDRV_DMA_TYPE_DEV;
#endif /* CONFIG_GENERIC_ALLOCATOR */
+ /* fall through */
case SNDRV_DMA_TYPE_DEV:
- dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
+ case SNDRV_DMA_TYPE_DEV_UC:
+ snd_malloc_dev_pages(dmab, size);
break;
#endif
#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
+ case SNDRV_DMA_TYPE_DEV_UC_SG:
snd_malloc_sgbuf_pages(device, size, dmab, NULL);
break;
#endif
@@ -275,11 +282,13 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
break;
#endif /* CONFIG_GENERIC_ALLOCATOR */
case SNDRV_DMA_TYPE_DEV:
- snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+ case SNDRV_DMA_TYPE_DEV_UC:
+ snd_free_dev_pages(dmab);
break;
#endif
#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
+ case SNDRV_DMA_TYPE_DEV_UC_SG:
snd_free_sgbuf_pages(dmab);
break;
#endif
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 0391cb1a4f19..141c5f3a9575 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -111,7 +111,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
while (plugin->next) {
if (plugin->dst_frames)
frames = plugin->dst_frames(plugin, frames);
- if (snd_BUG_ON(frames <= 0))
+ if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0))
return -ENXIO;
plugin = plugin->next;
err = snd_pcm_plugin_alloc(plugin, frames);
@@ -123,7 +123,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
while (plugin->prev) {
if (plugin->src_frames)
frames = plugin->src_frames(plugin, frames);
- if (snd_BUG_ON(frames <= 0))
+ if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0))
return -ENXIO;
plugin = plugin->prev;
err = snd_pcm_plugin_alloc(plugin, frames);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 4e6110d778bd..40013b26f671 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2172,18 +2172,25 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
if (err < 0)
goto _end_unlock;
+ runtime->twake = runtime->control->avail_min ? : 1;
+ if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
+ snd_pcm_update_hw_ptr(substream);
+
if (!is_playback &&
- runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
- size >= runtime->start_threshold) {
- err = snd_pcm_start(substream);
- if (err < 0)
+ runtime->status->state == SNDRV_PCM_STATE_PREPARED) {
+ if (size >= runtime->start_threshold) {
+ err = snd_pcm_start(substream);
+ if (err < 0)
+ goto _end_unlock;
+ } else {
+ /* nothing to do */
+ err = 0;
goto _end_unlock;
+ }
}
- runtime->twake = runtime->control->avail_min ? : 1;
- if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
- snd_pcm_update_hw_ptr(substream);
avail = snd_pcm_avail(substream);
+
while (size > 0) {
snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
snd_pcm_uframes_t cont;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 08d5662039e3..ee601d7f0926 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1236,6 +1236,28 @@ int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
}
EXPORT_SYMBOL(snd_rawmidi_transmit);
+/**
+ * snd_rawmidi_proceed - Discard the all pending bytes and proceed
+ * @substream: rawmidi substream
+ *
+ * Return: the number of discarded bytes
+ */
+int snd_rawmidi_proceed(struct snd_rawmidi_substream *substream)
+{
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
+ unsigned long flags;
+ int count = 0;
+
+ spin_lock_irqsave(&runtime->lock, flags);
+ if (runtime->avail < runtime->buffer_size) {
+ count = runtime->buffer_size - runtime->avail;
+ __snd_rawmidi_transmit_ack(substream, count);
+ }
+ spin_unlock_irqrestore(&runtime->lock, flags);
+ return count;
+}
+EXPORT_SYMBOL(snd_rawmidi_proceed);
+
static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
const unsigned char __user *userbuf,
const unsigned char *kernelbuf,
diff --git a/sound/core/seq/oss/seq_oss_timer.c b/sound/core/seq/oss/seq_oss_timer.c
index ba127c22539a..0778d28421da 100644
--- a/sound/core/seq/oss/seq_oss_timer.c
+++ b/sound/core/seq/oss/seq_oss_timer.c
@@ -92,7 +92,7 @@ snd_seq_oss_process_timer_event(struct seq_oss_timer *rec, union evrec *ev)
case TMR_WAIT_REL:
parm += rec->cur_tick;
rec->realtime = 0;
- /* fall through and continue to next */
+ /* fall through */
case TMR_WAIT_ABS:
if (parm == 0) {
rec->realtime = 1;
diff --git a/sound/core/seq/seq_system.c b/sound/core/seq/seq_system.c
index 8ce1d0b40dce..0dc5d5a45ecc 100644
--- a/sound/core/seq/seq_system.c
+++ b/sound/core/seq/seq_system.c
@@ -123,6 +123,7 @@ int __init snd_seq_system_client_init(void)
{
struct snd_seq_port_callback pcallbacks;
struct snd_seq_port_info *port;
+ int err;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
@@ -134,6 +135,10 @@ int __init snd_seq_system_client_init(void)
/* register client */
sysclient = snd_seq_create_kernel_client(NULL, 0, "System");
+ if (sysclient < 0) {
+ kfree(port);
+ return sysclient;
+ }
/* register timer */
strcpy(port->name, "Timer");
@@ -144,7 +149,10 @@ int __init snd_seq_system_client_init(void)
port->flags = SNDRV_SEQ_PORT_FLG_GIVEN_PORT;
port->addr.client = sysclient;
port->addr.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
- snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT, port);
+ err = snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT,
+ port);
+ if (err < 0)
+ goto error_port;
/* register announcement port */
strcpy(port->name, "Announce");
@@ -154,16 +162,24 @@ int __init snd_seq_system_client_init(void)
port->flags = SNDRV_SEQ_PORT_FLG_GIVEN_PORT;
port->addr.client = sysclient;
port->addr.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE;
- snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT, port);
+ err = snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT,
+ port);
+ if (err < 0)
+ goto error_port;
announce_port = port->addr.port;
kfree(port);
return 0;
+
+ error_port:
+ snd_seq_system_client_done();
+ kfree(port);
+ return err;
}
/* unregister our internal client */
-void __exit snd_seq_system_client_done(void)
+void snd_seq_system_client_done(void)
{
int oldsysclient = sysclient;
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index cb988efd1ed0..e5a40795914a 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -149,9 +149,7 @@ static void snd_vmidi_output_work(struct work_struct *work)
/* discard the outputs in dispatch mode unless subscribed */
if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
!(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
- char buf[32];
- while (snd_rawmidi_transmit(substream, buf, sizeof(buf)) > 0)
- ; /* ignored */
+ snd_rawmidi_proceed(substream);
return;
}
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c
index 84fffabdd129..c1cfaa01a5cb 100644
--- a/sound/core/sgbuf.c
+++ b/sound/core/sgbuf.c
@@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
+#include <asm/pgtable.h>
#include <sound/memalloc.h>
@@ -43,6 +44,8 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
dmab->area = NULL;
tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG)
+ tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC;
tmpb.dev.dev = sgbuf->dev;
for (i = 0; i < sgbuf->pages; i++) {
if (!(sgbuf->table[i].addr & ~PAGE_MASK))
@@ -72,12 +75,20 @@ void *snd_malloc_sgbuf_pages(struct device *device,
struct snd_dma_buffer tmpb;
struct snd_sg_page *table;
struct page **pgtable;
+ int type = SNDRV_DMA_TYPE_DEV;
+ pgprot_t prot = PAGE_KERNEL;
dmab->area = NULL;
dmab->addr = 0;
dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
if (! sgbuf)
return NULL;
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
+ type = SNDRV_DMA_TYPE_DEV_UC;
+#ifdef pgprot_noncached
+ prot = pgprot_noncached(PAGE_KERNEL);
+#endif
+ }
sgbuf->dev = device;
pages = snd_sgbuf_aligned_pages(size);
sgbuf->tblsize = sgbuf_align_table(pages);
@@ -98,7 +109,7 @@ void *snd_malloc_sgbuf_pages(struct device *device,
if (chunk > maxpages)
chunk = maxpages;
chunk <<= PAGE_SHIFT;
- if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device,
+ if (snd_dma_alloc_pages_fallback(type, device,
chunk, &tmpb) < 0) {
if (!sgbuf->pages)
goto _failed;
@@ -125,7 +136,7 @@ void *snd_malloc_sgbuf_pages(struct device *device,
}
sgbuf->size = size;
- dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL);
+ dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
if (! dmab->area)
goto _failed;
if (res_size)
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 529d9f405fa9..8a146b039276 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -147,7 +147,9 @@ config SND_FIREWIRE_MOTU
help
Say Y here to enable support for FireWire devices which MOTU produced:
* 828mk2
+ * Traveler
* 828mk3
+ * Audio Express
To compile this driver as a module, choose M here: the module
will be called snd-firewire-motu.
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index cb9acfe60f6a..fcd965f1d69e 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -140,6 +140,59 @@ const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
};
EXPORT_SYMBOL(amdtp_rate_table);
+static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_interval *s = hw_param_interval(params, rule->var);
+ const struct snd_interval *r =
+ hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval t = {
+ .min = s->min, .max = s->max, .integer = 1,
+ };
+ int i;
+
+ for (i = 0; i < CIP_SFC_COUNT; ++i) {
+ unsigned int rate = amdtp_rate_table[i];
+ unsigned int step = amdtp_syt_intervals[i];
+
+ if (!snd_interval_test(r, rate))
+ continue;
+
+ t.min = roundup(t.min, step);
+ t.max = rounddown(t.max, step);
+ }
+
+ if (snd_interval_checkempty(&t))
+ return -EINVAL;
+
+ return snd_interval_refine(s, &t);
+}
+
+static int apply_constraint_to_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_interval *r =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ const struct snd_interval *s = hw_param_interval_c(params, rule->deps[0]);
+ struct snd_interval t = {
+ .min = UINT_MAX, .max = 0, .integer = 1,
+ };
+ int i;
+
+ for (i = 0; i < CIP_SFC_COUNT; ++i) {
+ unsigned int step = amdtp_syt_intervals[i];
+ unsigned int rate = amdtp_rate_table[i];
+
+ if (s->min % step || s->max % step)
+ continue;
+
+ t.min = min(t.min, rate);
+ t.max = max(t.max, rate);
+ }
+
+ return snd_interval_refine(r, &t);
+}
+
/**
* amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
* @s: the AMDTP stream, which must be initialized.
@@ -194,16 +247,27 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
* number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
* depending on its sampling rate. For accurate period interrupt, it's
* preferrable to align period/buffer sizes to current SYT_INTERVAL.
- *
- * TODO: These constraints can be improved with proper rules.
- * Currently apply LCM of SYT_INTERVALs.
*/
- err = snd_pcm_hw_constraint_step(runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32);
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ apply_constraint_to_size, NULL,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+ if (err < 0)
+ goto end;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ apply_constraint_to_rate, NULL,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+ if (err < 0)
+ goto end;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ apply_constraint_to_size, NULL,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+ if (err < 0)
+ goto end;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ apply_constraint_to_rate, NULL,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
if (err < 0)
goto end;
- err = snd_pcm_hw_constraint_step(runtime, 0,
- SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32);
end:
return err;
}
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 730ea91d9be8..672d13488454 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -126,23 +126,6 @@ end:
return err;
}
-static void bebob_free(struct snd_bebob *bebob)
-{
- snd_bebob_stream_destroy_duplex(bebob);
- fw_unit_put(bebob->unit);
-
- kfree(bebob->maudio_special_quirk);
-
- mutex_destroy(&bebob->mutex);
- kfree(bebob);
-}
-
-/*
- * This module releases the FireWire unit data after all ALSA character devices
- * are released by applications. This is for releasing stream data or finishing
- * transactions safely. Thus at returning from .remove(), this module still keep
- * references for the unit.
- */
static void
bebob_card_free(struct snd_card *card)
{
@@ -152,7 +135,7 @@ bebob_card_free(struct snd_card *card)
clear_bit(bebob->card_index, devices_used);
mutex_unlock(&devices_mutex);
- bebob_free(card->private_data);
+ snd_bebob_stream_destroy_duplex(bebob);
}
static const struct snd_bebob_spec *
@@ -192,7 +175,6 @@ do_registration(struct work_struct *work)
return;
mutex_lock(&devices_mutex);
-
for (card_index = 0; card_index < SNDRV_CARDS; card_index++) {
if (!test_bit(card_index, devices_used) && enable[card_index])
break;
@@ -208,6 +190,11 @@ do_registration(struct work_struct *work)
mutex_unlock(&devices_mutex);
return;
}
+ set_bit(card_index, devices_used);
+ mutex_unlock(&devices_mutex);
+
+ bebob->card->private_free = bebob_card_free;
+ bebob->card->private_data = bebob;
err = name_device(bebob);
if (err < 0)
@@ -248,21 +235,10 @@ do_registration(struct work_struct *work)
if (err < 0)
goto error;
- set_bit(card_index, devices_used);
- mutex_unlock(&devices_mutex);
-
- /*
- * After registered, bebob instance can be released corresponding to
- * releasing the sound card instance.
- */
- bebob->card->private_free = bebob_card_free;
- bebob->card->private_data = bebob;
bebob->registered = true;
return;
error:
- mutex_unlock(&devices_mutex);
- snd_bebob_stream_destroy_duplex(bebob);
snd_card_free(bebob->card);
dev_info(&bebob->unit->device,
"Sound card registration failed: %d\n", err);
@@ -293,15 +269,15 @@ bebob_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
}
/* Allocate this independent of sound card instance. */
- bebob = kzalloc(sizeof(struct snd_bebob), GFP_KERNEL);
- if (bebob == NULL)
+ bebob = devm_kzalloc(&unit->device, sizeof(struct snd_bebob),
+ GFP_KERNEL);
+ if (!bebob)
return -ENOMEM;
-
bebob->unit = fw_unit_get(unit);
- bebob->entry = entry;
- bebob->spec = spec;
dev_set_drvdata(&unit->device, bebob);
+ bebob->entry = entry;
+ bebob->spec = spec;
mutex_init(&bebob->mutex);
spin_lock_init(&bebob->lock);
init_waitqueue_head(&bebob->hwdep_wait);
@@ -377,12 +353,12 @@ static void bebob_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&bebob->dwork);
if (bebob->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(bebob->card);
- } else {
- /* Don't forget this case. */
- bebob_free(bebob);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(bebob->card);
}
+
+ mutex_destroy(&bebob->mutex);
+ fw_unit_put(bebob->unit);
}
static const struct snd_bebob_rate_spec normal_rate_spec = {
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index bd55620c6a47..51152ca4af57 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
struct fw_device *device = fw_parent_device(unit);
int err, rcode;
u64 date;
- __le32 cues[3] = {
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
- };
+ __le32 *cues;
/* check date of software used to build */
err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
&date, sizeof(u64));
if (err < 0)
- goto end;
+ return err;
/*
* firmware version 5058 or later has date later than "20070401", but
* 'date' is not null-terminated.
@@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
if (date < 0x3230303730343031LL) {
dev_err(&unit->device,
"Use firmware version 5058 or later\n");
- err = -ENOSYS;
- goto end;
+ return -ENXIO;
}
+ cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
+ if (!cues)
+ return -ENOMEM;
+
+ cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
+ cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
+ cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
+
rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
device->node_id, device->generation,
device->max_speed, BEBOB_ADDR_REG_REQ,
- cues, sizeof(cues));
+ cues, 3 * sizeof(*cues));
+ kfree(cues);
if (rcode != RCODE_COMPLETE) {
dev_err(&unit->device,
"Failed to send a cue to load firmware\n");
err = -EIO;
}
-end:
+
return err;
}
@@ -257,8 +261,9 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814)
struct special_params *params;
int err;
- params = kzalloc(sizeof(struct special_params), GFP_KERNEL);
- if (params == NULL)
+ params = devm_kzalloc(&bebob->card->card_dev,
+ sizeof(struct special_params), GFP_KERNEL);
+ if (!params)
return -ENOMEM;
mutex_lock(&bebob->mutex);
@@ -290,10 +295,6 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814)
bebob->midi_output_ports = 2;
}
end:
- if (err < 0) {
- kfree(params);
- bebob->maudio_special_quirk = NULL;
- }
mutex_unlock(&bebob->mutex);
return err;
}
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 774eb2205668..0f6dbcffe711 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -122,25 +122,12 @@ static void dice_card_strings(struct snd_dice *dice)
strcpy(card->mixername, "DICE");
}
-static void dice_free(struct snd_dice *dice)
+static void dice_card_free(struct snd_card *card)
{
+ struct snd_dice *dice = card->private_data;
+
snd_dice_stream_destroy_duplex(dice);
snd_dice_transaction_destroy(dice);
- fw_unit_put(dice->unit);
-
- mutex_destroy(&dice->mutex);
- kfree(dice);
-}
-
-/*
- * This module releases the FireWire unit data after all ALSA character devices
- * are released by applications. This is for releasing stream data or finishing
- * transactions safely. Thus at returning from .remove(), this module still keep
- * references for the unit.
- */
-static void dice_card_free(struct snd_card *card)
-{
- dice_free(card->private_data);
}
static void do_registration(struct work_struct *work)
@@ -155,6 +142,8 @@ static void do_registration(struct work_struct *work)
&dice->card);
if (err < 0)
return;
+ dice->card->private_free = dice_card_free;
+ dice->card->private_data = dice;
err = snd_dice_transaction_init(dice);
if (err < 0)
@@ -192,19 +181,10 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- /*
- * After registered, dice instance can be released corresponding to
- * releasing the sound card instance.
- */
- dice->card->private_free = dice_card_free;
- dice->card->private_data = dice;
dice->registered = true;
return;
error:
- snd_dice_stream_destroy_duplex(dice);
- snd_dice_transaction_destroy(dice);
- snd_dice_stream_destroy_duplex(dice);
snd_card_free(dice->card);
dev_info(&dice->unit->device,
"Sound card registration failed: %d\n", err);
@@ -223,10 +203,9 @@ static int dice_probe(struct fw_unit *unit,
}
/* Allocate this independent of sound card instance. */
- dice = kzalloc(sizeof(struct snd_dice), GFP_KERNEL);
- if (dice == NULL)
+ dice = devm_kzalloc(&unit->device, sizeof(struct snd_dice), GFP_KERNEL);
+ if (!dice)
return -ENOMEM;
-
dice->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, dice);
@@ -263,10 +242,10 @@ static void dice_remove(struct fw_unit *unit)
if (dice->registered) {
/* No need to wait for releasing card object in this context. */
snd_card_free_when_closed(dice->card);
- } else {
- /* Don't forget this case. */
- dice_free(dice);
}
+
+ mutex_destroy(&dice->mutex);
+ fw_unit_put(dice->unit);
}
static void dice_bus_reset(struct fw_unit *unit)
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index 1f5e1d23f31a..6c6ea149ef6b 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -41,19 +41,12 @@ static int name_card(struct snd_dg00x *dg00x)
return 0;
}
-static void dg00x_free(struct snd_dg00x *dg00x)
+static void dg00x_card_free(struct snd_card *card)
{
+ struct snd_dg00x *dg00x = card->private_data;
+
snd_dg00x_stream_destroy_duplex(dg00x);
snd_dg00x_transaction_unregister(dg00x);
-
- fw_unit_put(dg00x->unit);
-
- mutex_destroy(&dg00x->mutex);
-}
-
-static void dg00x_card_free(struct snd_card *card)
-{
- dg00x_free(card->private_data);
}
static void do_registration(struct work_struct *work)
@@ -69,6 +62,8 @@ static void do_registration(struct work_struct *work)
&dg00x->card);
if (err < 0)
return;
+ dg00x->card->private_free = dg00x_card_free;
+ dg00x->card->private_data = dg00x;
err = name_card(dg00x);
if (err < 0)
@@ -100,14 +95,10 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- dg00x->card->private_free = dg00x_card_free;
- dg00x->card->private_data = dg00x;
dg00x->registered = true;
return;
error:
- snd_dg00x_transaction_unregister(dg00x);
- snd_dg00x_stream_destroy_duplex(dg00x);
snd_card_free(dg00x->card);
dev_info(&dg00x->unit->device,
"Sound card registration failed: %d\n", err);
@@ -119,8 +110,9 @@ static int snd_dg00x_probe(struct fw_unit *unit,
struct snd_dg00x *dg00x;
/* Allocate this independent of sound card instance. */
- dg00x = kzalloc(sizeof(struct snd_dg00x), GFP_KERNEL);
- if (dg00x == NULL)
+ dg00x = devm_kzalloc(&unit->device, sizeof(struct snd_dg00x),
+ GFP_KERNEL);
+ if (!dg00x)
return -ENOMEM;
dg00x->unit = fw_unit_get(unit);
@@ -172,12 +164,12 @@ static void snd_dg00x_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&dg00x->dwork);
if (dg00x->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(dg00x->card);
- } else {
- /* Don't forget this case. */
- dg00x_free(dg00x);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(dg00x->card);
}
+
+ mutex_destroy(&dg00x->mutex);
+ fw_unit_put(dg00x->unit);
}
static const struct ieee1394_device_id snd_dg00x_id_table[] = {
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
index ad7a0a32557d..64c3cb0fb926 100644
--- a/sound/firewire/fireface/ff-protocol-ff400.c
+++ b/sound/firewire/fireface/ff-protocol-ff400.c
@@ -146,6 +146,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
{
__le32 *reg;
int i;
+ int err;
reg = kcalloc(18, sizeof(__le32), GFP_KERNEL);
if (reg == NULL)
@@ -163,9 +164,11 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
reg[i] = cpu_to_le32(0x00000001);
}
- return snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
- FF400_FETCH_PCM_FRAMES, reg,
- sizeof(__le32) * 18, 0);
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
+ FF400_FETCH_PCM_FRAMES, reg,
+ sizeof(__le32) * 18, 0);
+ kfree(reg);
+ return err;
}
static void ff400_dump_sync_status(struct snd_ff *ff,
diff --git a/sound/firewire/fireface/ff.c b/sound/firewire/fireface/ff.c
index 4974bc7980e9..3f61cfeace69 100644
--- a/sound/firewire/fireface/ff.c
+++ b/sound/firewire/fireface/ff.c
@@ -27,20 +27,12 @@ static void name_card(struct snd_ff *ff)
dev_name(&ff->unit->device), 100 << fw_dev->max_speed);
}
-static void ff_free(struct snd_ff *ff)
+static void ff_card_free(struct snd_card *card)
{
+ struct snd_ff *ff = card->private_data;
+
snd_ff_stream_destroy_duplex(ff);
snd_ff_transaction_unregister(ff);
-
- fw_unit_put(ff->unit);
-
- mutex_destroy(&ff->mutex);
- kfree(ff);
-}
-
-static void ff_card_free(struct snd_card *card)
-{
- ff_free(card->private_data);
}
static void do_registration(struct work_struct *work)
@@ -55,6 +47,8 @@ static void do_registration(struct work_struct *work)
&ff->card);
if (err < 0)
return;
+ ff->card->private_free = ff_card_free;
+ ff->card->private_data = ff;
err = snd_ff_transaction_register(ff);
if (err < 0)
@@ -84,14 +78,10 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- ff->card->private_free = ff_card_free;
- ff->card->private_data = ff;
ff->registered = true;
return;
error:
- snd_ff_transaction_unregister(ff);
- snd_ff_stream_destroy_duplex(ff);
snd_card_free(ff->card);
dev_info(&ff->unit->device,
"Sound card registration failed: %d\n", err);
@@ -102,11 +92,9 @@ static int snd_ff_probe(struct fw_unit *unit,
{
struct snd_ff *ff;
- ff = kzalloc(sizeof(struct snd_ff), GFP_KERNEL);
- if (ff == NULL)
+ ff = devm_kzalloc(&unit->device, sizeof(struct snd_ff), GFP_KERNEL);
+ if (!ff)
return -ENOMEM;
-
- /* initialize myself */
ff->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, ff);
@@ -149,12 +137,12 @@ static void snd_ff_remove(struct fw_unit *unit)
cancel_work_sync(&ff->dwork.work);
if (ff->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(ff->card);
- } else {
- /* Don't forget this case. */
- ff_free(ff);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(ff->card);
}
+
+ mutex_destroy(&ff->mutex);
+ fw_unit_put(ff->unit);
}
static const struct snd_ff_spec spec_ff400 = {
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 71a0613d3da0..faf0e001c4c5 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -184,36 +184,17 @@ end:
return err;
}
-static void efw_free(struct snd_efw *efw)
-{
- snd_efw_stream_destroy_duplex(efw);
- snd_efw_transaction_remove_instance(efw);
- fw_unit_put(efw->unit);
-
- kfree(efw->resp_buf);
-
- mutex_destroy(&efw->mutex);
- kfree(efw);
-}
-
-/*
- * This module releases the FireWire unit data after all ALSA character devices
- * are released by applications. This is for releasing stream data or finishing
- * transactions safely. Thus at returning from .remove(), this module still keep
- * references for the unit.
- */
static void
efw_card_free(struct snd_card *card)
{
struct snd_efw *efw = card->private_data;
- if (efw->card_index >= 0) {
- mutex_lock(&devices_mutex);
- clear_bit(efw->card_index, devices_used);
- mutex_unlock(&devices_mutex);
- }
+ mutex_lock(&devices_mutex);
+ clear_bit(efw->card_index, devices_used);
+ mutex_unlock(&devices_mutex);
- efw_free(card->private_data);
+ snd_efw_stream_destroy_duplex(efw);
+ snd_efw_transaction_remove_instance(efw);
}
static void
@@ -226,9 +207,8 @@ do_registration(struct work_struct *work)
if (efw->registered)
return;
- mutex_lock(&devices_mutex);
-
/* check registered cards */
+ mutex_lock(&devices_mutex);
for (card_index = 0; card_index < SNDRV_CARDS; ++card_index) {
if (!test_bit(card_index, devices_used) && enable[card_index])
break;
@@ -244,12 +224,18 @@ do_registration(struct work_struct *work)
mutex_unlock(&devices_mutex);
return;
}
+ set_bit(card_index, devices_used);
+ mutex_unlock(&devices_mutex);
+
+ efw->card->private_free = efw_card_free;
+ efw->card->private_data = efw;
/* prepare response buffer */
snd_efw_resp_buf_size = clamp(snd_efw_resp_buf_size,
SND_EFW_RESPONSE_MAXIMUM_BYTES, 4096U);
- efw->resp_buf = kzalloc(snd_efw_resp_buf_size, GFP_KERNEL);
- if (efw->resp_buf == NULL) {
+ efw->resp_buf = devm_kzalloc(&efw->card->card_dev,
+ snd_efw_resp_buf_size, GFP_KERNEL);
+ if (!efw->resp_buf) {
err = -ENOMEM;
goto error;
}
@@ -284,22 +270,10 @@ do_registration(struct work_struct *work)
if (err < 0)
goto error;
- set_bit(card_index, devices_used);
- mutex_unlock(&devices_mutex);
-
- /*
- * After registered, efw instance can be released corresponding to
- * releasing the sound card instance.
- */
- efw->card->private_free = efw_card_free;
- efw->card->private_data = efw;
efw->registered = true;
return;
error:
- mutex_unlock(&devices_mutex);
- snd_efw_transaction_remove_instance(efw);
- snd_efw_stream_destroy_duplex(efw);
snd_card_free(efw->card);
dev_info(&efw->unit->device,
"Sound card registration failed: %d\n", err);
@@ -310,10 +284,9 @@ efw_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
{
struct snd_efw *efw;
- efw = kzalloc(sizeof(struct snd_efw), GFP_KERNEL);
+ efw = devm_kzalloc(&unit->device, sizeof(struct snd_efw), GFP_KERNEL);
if (efw == NULL)
return -ENOMEM;
-
efw->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, efw);
@@ -361,12 +334,12 @@ static void efw_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&efw->dwork);
if (efw->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(efw->card);
- } else {
- /* Don't forget this case. */
- efw_free(efw);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(efw->card);
}
+
+ mutex_destroy(&efw->mutex);
+ fw_unit_put(efw->unit);
}
static const struct ieee1394_device_id efw_id_table[] = {
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index 30957477e005..9ebe510ea26b 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -602,8 +602,6 @@ static void isight_card_free(struct snd_card *card)
struct isight *isight = card->private_data;
fw_iso_resources_destroy(&isight->resources);
- fw_unit_put(isight->unit);
- mutex_destroy(&isight->mutex);
}
static u64 get_unit_base(struct fw_unit *unit)
@@ -640,7 +638,7 @@ static int isight_probe(struct fw_unit *unit,
if (!isight->audio_base) {
dev_err(&unit->device, "audio unit base not found\n");
err = -ENXIO;
- goto err_unit;
+ goto error;
}
fw_iso_resources_init(&isight->resources, unit);
@@ -669,12 +667,12 @@ static int isight_probe(struct fw_unit *unit,
dev_set_drvdata(&unit->device, isight);
return 0;
-
-err_unit:
- fw_unit_put(isight->unit);
- mutex_destroy(&isight->mutex);
error:
snd_card_free(card);
+
+ mutex_destroy(&isight->mutex);
+ fw_unit_put(isight->unit);
+
return err;
}
@@ -703,7 +701,11 @@ static void isight_remove(struct fw_unit *unit)
isight_stop_streaming(isight);
mutex_unlock(&isight->mutex);
- snd_card_free_when_closed(isight->card);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(isight->card);
+
+ mutex_destroy(&isight->mutex);
+ fw_unit_put(isight->unit);
}
static const struct ieee1394_device_id isight_id_table[] = {
diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
index 300d31b6f191..220e61926ea4 100644
--- a/sound/firewire/motu/motu.c
+++ b/sound/firewire/motu/motu.c
@@ -52,26 +52,12 @@ static void name_card(struct snd_motu *motu)
dev_name(&motu->unit->device), 100 << fw_dev->max_speed);
}
-static void motu_free(struct snd_motu *motu)
+static void motu_card_free(struct snd_card *card)
{
- snd_motu_transaction_unregister(motu);
+ struct snd_motu *motu = card->private_data;
+ snd_motu_transaction_unregister(motu);
snd_motu_stream_destroy_duplex(motu);
- fw_unit_put(motu->unit);
-
- mutex_destroy(&motu->mutex);
- kfree(motu);
-}
-
-/*
- * This module releases the FireWire unit data after all ALSA character devices
- * are released by applications. This is for releasing stream data or finishing
- * transactions safely. Thus at returning from .remove(), this module still keep
- * references for the unit.
- */
-static void motu_card_free(struct snd_card *card)
-{
- motu_free(card->private_data);
}
static void do_registration(struct work_struct *work)
@@ -86,6 +72,8 @@ static void do_registration(struct work_struct *work)
&motu->card);
if (err < 0)
return;
+ motu->card->private_free = motu_card_free;
+ motu->card->private_data = motu;
name_card(motu);
@@ -120,18 +108,10 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- /*
- * After registered, motu instance can be released corresponding to
- * releasing the sound card instance.
- */
- motu->card->private_free = motu_card_free;
- motu->card->private_data = motu;
motu->registered = true;
return;
error:
- snd_motu_transaction_unregister(motu);
- snd_motu_stream_destroy_duplex(motu);
snd_card_free(motu->card);
dev_info(&motu->unit->device,
"Sound card registration failed: %d\n", err);
@@ -143,14 +123,13 @@ static int motu_probe(struct fw_unit *unit,
struct snd_motu *motu;
/* Allocate this independently of sound card instance. */
- motu = kzalloc(sizeof(struct snd_motu), GFP_KERNEL);
- if (motu == NULL)
+ motu = devm_kzalloc(&unit->device, sizeof(struct snd_motu), GFP_KERNEL);
+ if (!motu)
return -ENOMEM;
-
- motu->spec = (const struct snd_motu_spec *)entry->driver_data;
motu->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, motu);
+ motu->spec = (const struct snd_motu_spec *)entry->driver_data;
mutex_init(&motu->mutex);
spin_lock_init(&motu->lock);
init_waitqueue_head(&motu->hwdep_wait);
@@ -174,12 +153,12 @@ static void motu_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&motu->dwork);
if (motu->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(motu->card);
- } else {
- /* Don't forget this case. */
- motu_free(motu);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(motu->card);
}
+
+ mutex_destroy(&motu->mutex);
+ fw_unit_put(motu->unit);
}
static void motu_bus_update(struct fw_unit *unit)
diff --git a/sound/firewire/oxfw/oxfw-scs1x.c b/sound/firewire/oxfw/oxfw-scs1x.c
index f33497cdc706..9d9545880a28 100644
--- a/sound/firewire/oxfw/oxfw-scs1x.c
+++ b/sound/firewire/oxfw/oxfw-scs1x.c
@@ -372,8 +372,9 @@ int snd_oxfw_scs1x_add(struct snd_oxfw *oxfw)
struct fw_scs1x *scs;
int err;
- scs = kzalloc(sizeof(struct fw_scs1x), GFP_KERNEL);
- if (scs == NULL)
+ scs = devm_kzalloc(&oxfw->card->card_dev, sizeof(struct fw_scs1x),
+ GFP_KERNEL);
+ if (!scs)
return -ENOMEM;
scs->fw_dev = fw_parent_device(oxfw->unit);
oxfw->spec = scs;
diff --git a/sound/firewire/oxfw/oxfw-spkr.c b/sound/firewire/oxfw/oxfw-spkr.c
index cb905af0660d..66d4b1f73f0f 100644
--- a/sound/firewire/oxfw/oxfw-spkr.c
+++ b/sound/firewire/oxfw/oxfw-spkr.c
@@ -270,8 +270,9 @@ int snd_oxfw_add_spkr(struct snd_oxfw *oxfw, bool is_lacie)
unsigned int i, first_ch;
int err;
- spkr = kzalloc(sizeof(struct fw_spkr), GFP_KERNEL);
- if (spkr == NULL)
+ spkr = devm_kzalloc(&oxfw->card->card_dev, sizeof(struct fw_spkr),
+ GFP_KERNEL);
+ if (!spkr)
return -ENOMEM;
oxfw->spec = spkr;
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index d9361f352133..f230a9e44c3c 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -517,8 +517,9 @@ assume_stream_formats(struct snd_oxfw *oxfw, enum avc_general_plug_dir dir,
if (err < 0)
goto end;
- formats[eid] = kmemdup(buf, *len, GFP_KERNEL);
- if (formats[eid] == NULL) {
+ formats[eid] = devm_kmemdup(&oxfw->card->card_dev, buf, *len,
+ GFP_KERNEL);
+ if (!formats[eid]) {
err = -ENOMEM;
goto end;
}
@@ -535,7 +536,8 @@ assume_stream_formats(struct snd_oxfw *oxfw, enum avc_general_plug_dir dir,
continue;
eid++;
- formats[eid] = kmemdup(buf, *len, GFP_KERNEL);
+ formats[eid] = devm_kmemdup(&oxfw->card->card_dev, buf, *len,
+ GFP_KERNEL);
if (formats[eid] == NULL) {
err = -ENOMEM;
goto end;
@@ -597,8 +599,9 @@ static int fill_stream_formats(struct snd_oxfw *oxfw,
if (err < 0)
break;
- formats[eid] = kmemdup(buf, len, GFP_KERNEL);
- if (formats[eid] == NULL) {
+ formats[eid] = devm_kmemdup(&oxfw->card->card_dev, buf, len,
+ GFP_KERNEL);
+ if (!formats[eid]) {
err = -ENOMEM;
break;
}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 1e5b2c802635..afb78d90384b 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -113,34 +113,13 @@ end:
return err;
}
-static void oxfw_free(struct snd_oxfw *oxfw)
+static void oxfw_card_free(struct snd_card *card)
{
- unsigned int i;
+ struct snd_oxfw *oxfw = card->private_data;
snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
if (oxfw->has_output)
snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
-
- fw_unit_put(oxfw->unit);
-
- for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
- kfree(oxfw->tx_stream_formats[i]);
- kfree(oxfw->rx_stream_formats[i]);
- }
-
- kfree(oxfw->spec);
- mutex_destroy(&oxfw->mutex);
-}
-
-/*
- * This module releases the FireWire unit data after all ALSA character devices
- * are released by applications. This is for releasing stream data or finishing
- * transactions safely. Thus at returning from .remove(), this module still keep
- * references for the unit.
- */
-static void oxfw_card_free(struct snd_card *card)
-{
- oxfw_free(card->private_data);
}
static int detect_quirks(struct snd_oxfw *oxfw)
@@ -216,6 +195,8 @@ static void do_registration(struct work_struct *work)
&oxfw->card);
if (err < 0)
return;
+ oxfw->card->private_free = oxfw_card_free;
+ oxfw->card->private_data = oxfw;
err = name_card(oxfw);
if (err < 0)
@@ -256,19 +237,10 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- /*
- * After registered, oxfw instance can be released corresponding to
- * releasing the sound card instance.
- */
- oxfw->card->private_free = oxfw_card_free;
- oxfw->card->private_data = oxfw;
oxfw->registered = true;
return;
error:
- snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
- if (oxfw->has_output)
- snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
snd_card_free(oxfw->card);
dev_info(&oxfw->unit->device,
"Sound card registration failed: %d\n", err);
@@ -283,14 +255,13 @@ static int oxfw_probe(struct fw_unit *unit,
return -ENODEV;
/* Allocate this independent of sound card instance. */
- oxfw = kzalloc(sizeof(struct snd_oxfw), GFP_KERNEL);
- if (oxfw == NULL)
+ oxfw = devm_kzalloc(&unit->device, sizeof(struct snd_oxfw), GFP_KERNEL);
+ if (!oxfw)
return -ENOMEM;
-
- oxfw->entry = entry;
oxfw->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, oxfw);
+ oxfw->entry = entry;
mutex_init(&oxfw->mutex);
spin_lock_init(&oxfw->lock);
init_waitqueue_head(&oxfw->hwdep_wait);
@@ -337,12 +308,12 @@ static void oxfw_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&oxfw->dwork);
if (oxfw->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(oxfw->card);
- } else {
- /* Don't forget this case. */
- oxfw_free(oxfw);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(oxfw->card);
}
+
+ mutex_destroy(&oxfw->mutex);
+ fw_unit_put(oxfw->unit);
}
static const struct compat_info griffin_firewave = {
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index 44ad41fb7374..ef57fa4db323 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -85,19 +85,12 @@ static int identify_model(struct snd_tscm *tscm)
return 0;
}
-static void tscm_free(struct snd_tscm *tscm)
+static void tscm_card_free(struct snd_card *card)
{
+ struct snd_tscm *tscm = card->private_data;
+
snd_tscm_transaction_unregister(tscm);
snd_tscm_stream_destroy_duplex(tscm);
-
- fw_unit_put(tscm->unit);
-
- mutex_destroy(&tscm->mutex);
-}
-
-static void tscm_card_free(struct snd_card *card)
-{
- tscm_free(card->private_data);
}
static void do_registration(struct work_struct *work)
@@ -109,6 +102,8 @@ static void do_registration(struct work_struct *work)
&tscm->card);
if (err < 0)
return;
+ tscm->card->private_free = tscm_card_free;
+ tscm->card->private_data = tscm;
err = identify_model(tscm);
if (err < 0)
@@ -140,18 +135,10 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- /*
- * After registered, tscm instance can be released corresponding to
- * releasing the sound card instance.
- */
- tscm->card->private_free = tscm_card_free;
- tscm->card->private_data = tscm;
tscm->registered = true;
return;
error:
- snd_tscm_transaction_unregister(tscm);
- snd_tscm_stream_destroy_duplex(tscm);
snd_card_free(tscm->card);
dev_info(&tscm->unit->device,
"Sound card registration failed: %d\n", err);
@@ -163,11 +150,9 @@ static int snd_tscm_probe(struct fw_unit *unit,
struct snd_tscm *tscm;
/* Allocate this independent of sound card instance. */
- tscm = kzalloc(sizeof(struct snd_tscm), GFP_KERNEL);
- if (tscm == NULL)
+ tscm = devm_kzalloc(&unit->device, sizeof(struct snd_tscm), GFP_KERNEL);
+ if (!tscm)
return -ENOMEM;
-
- /* initialize myself */
tscm->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, tscm);
@@ -215,12 +200,12 @@ static void snd_tscm_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&tscm->dwork);
if (tscm->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(tscm->card);
- } else {
- /* Don't forget this case. */
- tscm_free(tscm);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(tscm->card);
}
+
+ mutex_destroy(&tscm->mutex);
+ fw_unit_put(tscm->unit);
}
static const struct ieee1394_device_id snd_tscm_id_table[] = {
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index 5bc4a1d587d4..60cb00fd0c69 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -48,9 +48,11 @@ void snd_hdac_ext_bus_ppcap_enable(struct hdac_bus *bus, bool enable)
}
if (enable)
- snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, 0, AZX_PPCTL_GPROCEN);
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
+ AZX_PPCTL_GPROCEN, AZX_PPCTL_GPROCEN);
else
- snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_GPROCEN, 0);
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
+ AZX_PPCTL_GPROCEN, 0);
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_ppcap_enable);
@@ -68,9 +70,11 @@ void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_bus *bus, bool enable)
}
if (enable)
- snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, 0, AZX_PPCTL_PIE);
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
+ AZX_PPCTL_PIE, AZX_PPCTL_PIE);
else
- snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_PIE, 0);
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
+ AZX_PPCTL_PIE, 0);
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_ppcap_int_enable);
@@ -194,7 +198,8 @@ static int check_hdac_link_power_active(struct hdac_ext_link *link, bool enable)
*/
int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link)
{
- snd_hdac_updatel(link->ml_addr, AZX_REG_ML_LCTL, 0, AZX_MLCTL_SPA);
+ snd_hdac_updatel(link->ml_addr, AZX_REG_ML_LCTL,
+ AZX_MLCTL_SPA, AZX_MLCTL_SPA);
return check_hdac_link_power_active(link, true);
}
@@ -222,8 +227,8 @@ int snd_hdac_ext_bus_link_power_up_all(struct hdac_bus *bus)
int ret;
list_for_each_entry(hlink, &bus->hlink_list, list) {
- snd_hdac_updatel(hlink->ml_addr,
- AZX_REG_ML_LCTL, 0, AZX_MLCTL_SPA);
+ snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL,
+ AZX_MLCTL_SPA, AZX_MLCTL_SPA);
ret = check_hdac_link_power_active(hlink, true);
if (ret < 0)
return ret;
@@ -243,7 +248,8 @@ int snd_hdac_ext_bus_link_power_down_all(struct hdac_bus *bus)
int ret;
list_for_each_entry(hlink, &bus->hlink_list, list) {
- snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL, AZX_MLCTL_SPA, 0);
+ snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL,
+ AZX_MLCTL_SPA, 0);
ret = check_hdac_link_power_active(hlink, false);
if (ret < 0)
return ret;
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 560ec0986e1a..74244d8e2909 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus)
*/
void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
{
+ WARN_ON_ONCE(!bus->rb.area);
+
spin_lock_irq(&bus->reg_lock);
/* CORB set up */
bus->corb.addr = bus->rb.addr;
@@ -383,7 +385,7 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus)
EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset);
/* reset codec link */
-static int azx_reset(struct hdac_bus *bus, bool full_reset)
+int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
{
if (!full_reset)
goto skip_reset;
@@ -408,7 +410,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
skip_reset:
/* check to see if controller is ready */
if (!snd_hdac_chip_readb(bus, GCTL)) {
- dev_dbg(bus->dev, "azx_reset: controller not ready!\n");
+ dev_dbg(bus->dev, "controller not ready!\n");
return -EBUSY;
}
@@ -423,6 +425,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
return 0;
}
+EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link);
/* enable interrupts */
static void azx_int_enable(struct hdac_bus *bus)
@@ -477,15 +480,17 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
return false;
/* reset controller */
- azx_reset(bus, full_reset);
+ snd_hdac_bus_reset_link(bus, full_reset);
- /* initialize interrupts */
+ /* clear interrupts */
azx_int_clear(bus);
- azx_int_enable(bus);
/* initialize the codec command I/O */
snd_hdac_bus_init_cmd_io(bus);
+ /* enable interrupts after CORB/RIRB buffers are initialized above */
+ azx_int_enable(bus);
+
/* program the position buffer */
if (bus->use_posbuf && bus->posbuf.addr) {
snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index b5282cbbe489..617ff1aa818f 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -145,9 +145,11 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
if (!acomp->ops) {
request_module("i915");
/* 10s timeout */
- wait_for_completion_timeout(&bind_complete, 10 * 1000);
+ wait_for_completion_timeout(&bind_complete,
+ msecs_to_jiffies(10 * 1000));
}
if (!acomp->ops) {
+ dev_info(bus->dev, "couldn't bind with audio component\n");
snd_hdac_acomp_exit(bus);
return -ENODEV;
}
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index 419e285e0226..996dbc850224 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -359,7 +359,8 @@ static const struct regmap_config hda_regmap_cfg = {
.cache_type = REGCACHE_RBTREE,
.reg_read = hda_reg_read,
.reg_write = hda_reg_write,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
/**
diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c
index 2647309bc675..8afa2f888466 100644
--- a/sound/i2c/cs8427.c
+++ b/sound/i2c/cs8427.c
@@ -118,7 +118,7 @@ static int snd_cs8427_send_corudata(struct snd_i2c_device *device,
struct cs8427 *chip = device->private_data;
char *hw_data = udata ?
chip->playback.hw_udata : chip->playback.hw_status;
- char data[32];
+ unsigned char data[32];
int err, idx;
if (!memcmp(hw_data, ndata, count))
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index ac0ab6eb40f0..47e0b2820ace 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -389,7 +389,8 @@ static int snd_opti9xx_configure(struct snd_opti9xx *chip,
case OPTi9XX_HW_82C931:
/* disable 3D sound (set GPIO1 as output, low) */
snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(20), 0x04, 0x0c);
- case OPTi9XX_HW_82C933: /* FALL THROUGH */
+ /* fall through */
+ case OPTi9XX_HW_82C933:
/*
* The BTC 1817DW has QS1000 wavetable which is connected
* to the serial digital input of the OPTI931.
@@ -400,7 +401,8 @@ static int snd_opti9xx_configure(struct snd_opti9xx *chip,
* or digital input signal.
*/
snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(26), 0x01, 0x01);
- case OPTi9XX_HW_82C930: /* FALL THROUGH */
+ /* fall through */
+ case OPTi9XX_HW_82C930:
snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(6), 0x02, 0x03);
snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(3), 0x00, 0xff);
snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(4), 0x10 |
diff --git a/sound/isa/sb/sb8_main.c b/sound/isa/sb/sb8_main.c
index 481797744b3c..8288fae90085 100644
--- a/sound/isa/sb/sb8_main.c
+++ b/sound/isa/sb/sb8_main.c
@@ -130,13 +130,13 @@ static int snd_sb8_playback_prepare(struct snd_pcm_substream *substream)
chip->playback_format = SB_DSP_HI_OUTPUT_AUTO;
break;
}
- /* fallthru */
+ /* fall through */
case SB_HW_201:
if (rate > 23000) {
chip->playback_format = SB_DSP_HI_OUTPUT_AUTO;
break;
}
- /* fallthru */
+ /* fall through */
case SB_HW_20:
chip->playback_format = SB_DSP_LO_OUTPUT_AUTO;
break;
@@ -287,7 +287,7 @@ static int snd_sb8_capture_prepare(struct snd_pcm_substream *substream)
chip->capture_format = SB_DSP_HI_INPUT_AUTO;
break;
}
- /* fallthru */
+ /* fall through */
case SB_HW_20:
chip->capture_format = SB_DSP_LO_INPUT_AUTO;
break;
@@ -387,7 +387,7 @@ irqreturn_t snd_sb8dsp_interrupt(struct snd_sb *chip)
case SB_MODE_PLAYBACK_16: /* ok.. playback is active */
if (chip->hardware != SB_HW_JAZZ16)
break;
- /* fallthru */
+ /* fall through */
case SB_MODE_PLAYBACK_8:
substream = chip->playback_substream;
if (chip->playback_format == SB_DSP_OUTPUT)
@@ -397,7 +397,7 @@ irqreturn_t snd_sb8dsp_interrupt(struct snd_sb *chip)
case SB_MODE_CAPTURE_16:
if (chip->hardware != SB_HW_JAZZ16)
break;
- /* fallthru */
+ /* fall through */
case SB_MODE_CAPTURE_8:
substream = chip->capture_substream;
if (chip->capture_format == SB_DSP_INPUT)
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index c8904e732aaa..a4ed54aeaf1d 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -500,7 +500,8 @@ static const struct snd_pcm_hardware hal2_pcm_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_BLOCK_TRANSFER),
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = SNDRV_PCM_FMTBIT_S16_BE,
.rates = SNDRV_PCM_RATE_8000_48000,
.rate_min = 8000,
@@ -563,6 +564,8 @@ static int hal2_playback_prepare(struct snd_pcm_substream *substream)
dac->sample_rate = hal2_compute_rate(dac, runtime->rate);
memset(&dac->pcm_indirect, 0, sizeof(dac->pcm_indirect));
dac->pcm_indirect.hw_buffer_size = H2_BUF_SIZE;
+ dac->pcm_indirect.hw_queue_size = H2_BUF_SIZE / 2;
+ dac->pcm_indirect.hw_io = dac->buffer_dma;
dac->pcm_indirect.sw_buffer_size = snd_pcm_lib_buffer_bytes(substream);
dac->substream = substream;
hal2_setup_dac(hal2);
@@ -575,9 +578,6 @@ static int hal2_playback_trigger(struct snd_pcm_substream *substream, int cmd)
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- hal2->dac.pcm_indirect.hw_io = hal2->dac.buffer_dma;
- hal2->dac.pcm_indirect.hw_data = 0;
- substream->ops->ack(substream);
hal2_start_dac(hal2);
break;
case SNDRV_PCM_TRIGGER_STOP:
@@ -615,7 +615,6 @@ static int hal2_playback_ack(struct snd_pcm_substream *substream)
struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream);
struct hal2_codec *dac = &hal2->dac;
- dac->pcm_indirect.hw_queue_size = H2_BUF_SIZE / 2;
return snd_pcm_indirect_playback_transfer(substream,
&dac->pcm_indirect,
hal2_playback_transfer);
@@ -655,6 +654,7 @@ static int hal2_capture_prepare(struct snd_pcm_substream *substream)
memset(&adc->pcm_indirect, 0, sizeof(adc->pcm_indirect));
adc->pcm_indirect.hw_buffer_size = H2_BUF_SIZE;
adc->pcm_indirect.hw_queue_size = H2_BUF_SIZE / 2;
+ adc->pcm_indirect.hw_io = adc->buffer_dma;
adc->pcm_indirect.sw_buffer_size = snd_pcm_lib_buffer_bytes(substream);
adc->substream = substream;
hal2_setup_adc(hal2);
@@ -667,9 +667,6 @@ static int hal2_capture_trigger(struct snd_pcm_substream *substream, int cmd)
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- hal2->adc.pcm_indirect.hw_io = hal2->adc.buffer_dma;
- hal2->adc.pcm_indirect.hw_data = 0;
- printk(KERN_DEBUG "buffer_dma %x\n", hal2->adc.buffer_dma);
hal2_start_adc(hal2);
break;
case SNDRV_PCM_TRIGGER_STOP:
diff --git a/sound/pci/asihpi/hpios.c b/sound/pci/asihpi/hpios.c
index 5ef4fe964366..7c91330af719 100644
--- a/sound/pci/asihpi/hpios.c
+++ b/sound/pci/asihpi/hpios.c
@@ -49,7 +49,7 @@ u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size,
/*?? any benefit in using managed dmam_alloc_coherent? */
p_mem_area->vaddr =
dma_alloc_coherent(&pdev->dev, size, &p_mem_area->dma_handle,
- GFP_DMA32 | GFP_KERNEL);
+ GFP_KERNEL);
if (p_mem_area->vaddr) {
HPI_DEBUG_LOG(DEBUG, "allocated %d bytes, dma 0x%x vma %p\n",
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index a1e4944dcfe8..1a41f8c80243 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -903,15 +903,15 @@ static int snd_atiixp_playback_prepare(struct snd_pcm_substream *substream)
case 8:
data |= ATI_REG_OUT_DMA_SLOT_BIT(10) |
ATI_REG_OUT_DMA_SLOT_BIT(11);
- /* fallthru */
+ /* fall through */
case 6:
data |= ATI_REG_OUT_DMA_SLOT_BIT(7) |
ATI_REG_OUT_DMA_SLOT_BIT(8);
- /* fallthru */
+ /* fall through */
case 4:
data |= ATI_REG_OUT_DMA_SLOT_BIT(6) |
ATI_REG_OUT_DMA_SLOT_BIT(9);
- /* fallthru */
+ /* fall through */
default:
data |= ATI_REG_OUT_DMA_SLOT_BIT(3) |
ATI_REG_OUT_DMA_SLOT_BIT(4);
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 2e5b460a847c..96ece1a71cf1 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -1115,6 +1115,7 @@ vortex_adbdma_setbuffers(vortex_t * vortex, int adbdma,
hwwrite(vortex->mmio,
VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0xc,
snd_pcm_sgbuf_get_addr(dma->substream, psize * 3));
+ /* fall through */
/* 3 pages */
case 3:
dma->cfg0 |= 0x12000000;
@@ -1122,12 +1123,14 @@ vortex_adbdma_setbuffers(vortex_t * vortex, int adbdma,
hwwrite(vortex->mmio,
VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0x8,
snd_pcm_sgbuf_get_addr(dma->substream, psize * 2));
+ /* fall through */
/* 2 pages */
case 2:
dma->cfg0 |= 0x88000000 | 0x44000000 | 0x10000000 | (psize - 1);
hwwrite(vortex->mmio,
VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0x4,
snd_pcm_sgbuf_get_addr(dma->substream, psize));
+ /* fall through */
/* 1 page */
case 1:
dma->cfg0 |= 0x80000000 | 0x40000000 | ((psize - 1) << 0xc);
@@ -1390,17 +1393,20 @@ vortex_wtdma_setbuffers(vortex_t * vortex, int wtdma,
dma->cfg1 |= 0x88000000 | 0x44000000 | 0x30000000 | (psize-1);
hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0xc,
snd_pcm_sgbuf_get_addr(dma->substream, psize * 3));
+ /* fall through */
/* 3 pages */
case 3:
dma->cfg0 |= 0x12000000;
dma->cfg1 |= 0x80000000 | 0x40000000 | ((psize-1) << 0xc);
hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0x8,
snd_pcm_sgbuf_get_addr(dma->substream, psize * 2));
+ /* fall through */
/* 2 pages */
case 2:
dma->cfg0 |= 0x88000000 | 0x44000000 | 0x10000000 | (psize-1);
hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0x4,
snd_pcm_sgbuf_get_addr(dma->substream, psize));
+ /* fall through */
/* 1 page */
case 1:
dma->cfg0 |= 0x80000000 | 0x40000000 | ((psize-1) << 0xc);
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 146e1a3498c7..750eec437a79 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -1443,7 +1443,8 @@ static const struct snd_pcm_hardware snd_cs46xx_playback =
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER /*|*/
- /*SNDRV_PCM_INFO_RESUME*/),
+ /*SNDRV_PCM_INFO_RESUME*/ |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 |
SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |
SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE),
@@ -1465,7 +1466,8 @@ static const struct snd_pcm_hardware snd_cs46xx_capture =
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER /*|*/
- /*SNDRV_PCM_INFO_RESUME*/),
+ /*SNDRV_PCM_INFO_RESUME*/ |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 5500,
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 90713741c2dc..6ebe817801ea 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -2540,7 +2540,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
emu->support_tlv = 1;
return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
case SNDRV_EMU10K1_IOCTL_INFO:
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
snd_emu10k1_fx8010_info(emu, info);
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 9f2b6097f486..30b3472d0b75 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1753,7 +1753,8 @@ static const struct snd_pcm_hardware snd_emu10k1_fx8010_playback =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_RESUME |
- /* SNDRV_PCM_INFO_MMAP_VALID | */ SNDRV_PCM_INFO_PAUSE),
+ /* SNDRV_PCM_INFO_MMAP_VALID | */ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index b9a6b66aeb0e..df0d636145f8 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -13,7 +13,7 @@
#include <linux/export.h>
#include <linux/sort.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
diff --git a/sound/pci/hda/hda_beep.h b/sound/pci/hda/hda_beep.h
index d1a6a9c1329a..f1457c6b3969 100644
--- a/sound/pci/hda/hda_beep.h
+++ b/sound/pci/hda/hda_beep.h
@@ -9,7 +9,7 @@
#ifndef __SOUND_HDA_BEEP_H
#define __SOUND_HDA_BEEP_H
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#define HDA_BEEP_MODE_OFF 0
#define HDA_BEEP_MODE_ON 1
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index d361bb77ca00..9174f1b3a987 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -11,7 +11,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
/*
@@ -81,6 +81,12 @@ static int hda_codec_driver_probe(struct device *dev)
hda_codec_patch_t patch;
int err;
+ if (codec->bus->core.ext_ops) {
+ if (WARN_ON(!codec->bus->core.ext_ops->hdev_attach))
+ return -EINVAL;
+ return codec->bus->core.ext_ops->hdev_attach(&codec->core);
+ }
+
if (WARN_ON(!codec->preset))
return -EINVAL;
@@ -134,6 +140,12 @@ static int hda_codec_driver_remove(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
+ if (codec->bus->core.ext_ops) {
+ if (WARN_ON(!codec->bus->core.ext_ops->hdev_detach))
+ return -EINVAL;
+ return codec->bus->core.ext_ops->hdev_detach(&codec->core);
+ }
+
if (codec->patch_ops.free)
codec->patch_ops.free(codec);
snd_hda_codec_cleanup_for_unbind(codec);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 26d348b47867..0957813939e5 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -27,7 +27,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include <sound/asoundef.h>
#include <sound/tlv.h>
#include <sound/initval.h>
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index a12e594d4e3b..fe2506672a72 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -130,8 +130,9 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
azx_dev->core.bufsize = 0;
azx_dev->core.period_bytes = 0;
azx_dev->core.format_val = 0;
- ret = chip->ops->substream_alloc_pages(chip, substream,
- params_buffer_bytes(hw_params));
+ ret = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+
unlock:
dsp_unlock(azx_dev);
return ret;
@@ -141,7 +142,6 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx_dev *azx_dev = get_azx_dev(substream);
- struct azx *chip = apcm->chip;
struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
int err;
@@ -152,7 +152,7 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
- err = chip->ops->substream_free_pages(chip, substream);
+ err = snd_pcm_lib_free_pages(substream);
azx_stream(azx_dev)->prepared = 0;
dsp_unlock(azx_dev);
return err;
@@ -732,6 +732,7 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
int pcm_dev = cpcm->device;
unsigned int size;
int s, err;
+ int type = SNDRV_DMA_TYPE_DEV_SG;
list_for_each_entry(apcm, &chip->pcm_list, list) {
if (apcm->pcm->device == pcm_dev) {
@@ -770,7 +771,9 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
if (size > MAX_PREALLOC_SIZE)
size = MAX_PREALLOC_SIZE;
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
+ if (chip->uc_buffer)
+ type = SNDRV_DMA_TYPE_DEV_UC_SG;
+ snd_pcm_lib_preallocate_pages_for_all(pcm, type,
chip->card->dev,
size, MAX_PREALLOC_SIZE);
return 0;
@@ -1220,27 +1223,6 @@ void snd_hda_bus_reset(struct hda_bus *bus)
bus->in_reset = 0;
}
-static int get_jackpoll_interval(struct azx *chip)
-{
- int i;
- unsigned int j;
-
- if (!chip->jackpoll_ms)
- return 0;
-
- i = chip->jackpoll_ms[chip->dev_index];
- if (i == 0)
- return 0;
- if (i < 50 || i > 60000)
- j = 0;
- else
- j = msecs_to_jiffies(i);
- if (j == 0)
- dev_warn(chip->card->dev,
- "jackpoll_ms value out of range: %d\n", i);
- return j;
-}
-
/* HD-audio bus initialization */
int azx_bus_init(struct azx *chip, const char *model,
const struct hdac_io_ops *io_ops)
@@ -1323,7 +1305,7 @@ int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
if (err < 0)
continue;
- codec->jackpoll_interval = get_jackpoll_interval(chip);
+ codec->jackpoll_interval = chip->jackpoll_interval;
codec->beep_mode = chip->beep_mode;
codecs++;
}
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index a68e75b00ea3..c95097bb5a0c 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -20,7 +20,7 @@
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/initval.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include <sound/hda_register.h>
#define AZX_MAX_CODECS HDA_MAX_CODECS
@@ -76,7 +76,6 @@ struct azx_dev {
* when link position is not greater than FIFO size
*/
unsigned int insufficient:1;
- unsigned int wc_marked:1;
};
#define azx_stream(dev) (&(dev)->core)
@@ -88,11 +87,6 @@ struct azx;
struct hda_controller_ops {
/* Disable msi if supported, PCI only */
int (*disable_msi_reset_irq)(struct azx *);
- int (*substream_alloc_pages)(struct azx *chip,
- struct snd_pcm_substream *substream,
- size_t size);
- int (*substream_free_pages)(struct azx *chip,
- struct snd_pcm_substream *substream);
void (*pcm_mmap_prepare)(struct snd_pcm_substream *substream,
struct vm_area_struct *area);
/* Check if current position is acceptable */
@@ -127,7 +121,7 @@ struct azx {
int capture_streams;
int capture_index_offset;
int num_streams;
- const int *jackpoll_ms; /* per-card jack poll interval */
+ int jackpoll_interval; /* jack poll interval in jiffies */
/* Register interaction. */
const struct hda_controller_ops *ops;
@@ -160,6 +154,7 @@ struct azx {
unsigned int msi:1;
unsigned int probing:1; /* codec probing phase */
unsigned int snoop:1;
+ unsigned int uc_buffer:1; /* non-cached pages for stream buffers */
unsigned int align_buffer_size:1;
unsigned int region_requested:1;
unsigned int disabled:1; /* disabled by vga_switcheroo */
@@ -175,11 +170,10 @@ struct azx {
#define azx_bus(chip) (&(chip)->bus.core)
#define bus_to_azx(_bus) container_of(_bus, struct azx, bus.core)
-#ifdef CONFIG_X86
-#define azx_snoop(chip) ((chip)->snoop)
-#else
-#define azx_snoop(chip) true
-#endif
+static inline bool azx_snoop(struct azx *chip)
+{
+ return !IS_ENABLED(CONFIG_X86) || chip->snoop;
+}
/*
* macros for easy use
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index ba7fe9b6655c..806b12ed44a2 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -27,7 +27,7 @@
#include <sound/core.h>
#include <asm/unaligned.h>
#include <sound/hda_chmap.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
enum eld_versions {
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 579984ecdec3..276150f29cda 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -32,7 +32,7 @@
#include <sound/core.h>
#include <sound/jack.h>
#include <sound/tlv.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index cc009a4a3d1d..268bba6ec985 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -23,7 +23,7 @@
#include <linux/compat.h>
#include <linux/nospec.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include <sound/hda_hwdep.h>
#include <sound/minors.h>
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1b2ce304152a..d8eb2b5f51ae 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -63,7 +63,7 @@
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/firmware.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_controller.h"
#include "hda_intel.h"
@@ -365,8 +365,10 @@ enum {
*/
#ifdef SUPPORT_VGA_SWITCHEROO
#define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo)
+#define needs_eld_notify_link(chip) ((chip)->need_eld_notify_link)
#else
#define use_vga_switcheroo(chip) 0
+#define needs_eld_notify_link(chip) false
#endif
#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
@@ -397,62 +399,8 @@ static char *driver_short_names[] = {
[AZX_DRIVER_GENERIC] = "HD-Audio Generic",
};
-#ifdef CONFIG_X86
-static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
-{
- int pages;
-
- if (azx_snoop(chip))
- return;
- if (!dmab || !dmab->area || !dmab->bytes)
- return;
-
-#ifdef CONFIG_SND_DMA_SGBUF
- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
- struct snd_sg_buf *sgbuf = dmab->private_data;
- if (chip->driver_type == AZX_DRIVER_CMEDIA)
- return; /* deal with only CORB/RIRB buffers */
- if (on)
- set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
- else
- set_pages_array_wb(sgbuf->page_table, sgbuf->pages);
- return;
- }
-#endif
-
- pages = (dmab->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (on)
- set_memory_wc((unsigned long)dmab->area, pages);
- else
- set_memory_wb((unsigned long)dmab->area, pages);
-}
-
-static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
- bool on)
-{
- __mark_pages_wc(chip, buf, on);
-}
-static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
- struct snd_pcm_substream *substream, bool on)
-{
- if (azx_dev->wc_marked != on) {
- __mark_pages_wc(chip, snd_pcm_get_dma_buf(substream), on);
- azx_dev->wc_marked = on;
- }
-}
-#else
-/* NOP for other archs */
-static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
- bool on)
-{
-}
-static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
- struct snd_pcm_substream *substream, bool on)
-{
-}
-#endif
-
static int azx_acquire_irq(struct azx *chip, int do_disconnect);
+static void set_default_power_save(struct azx *chip);
/*
* initialize the PCI registers
@@ -1201,6 +1149,10 @@ static int azx_runtime_idle(struct device *dev)
azx_bus(chip)->codec_powered || !chip->running)
return -EBUSY;
+ /* ELD notification gets broken when HD-audio bus is off */
+ if (needs_eld_notify_link(hda))
+ return -EBUSY;
+
return 0;
}
@@ -1298,6 +1250,36 @@ static bool azx_vs_can_switch(struct pci_dev *pci)
return true;
}
+/*
+ * The discrete GPU cannot power down unless the HDA controller runtime
+ * suspends, so activate runtime PM on codecs even if power_save == 0.
+ */
+static void setup_vga_switcheroo_runtime_pm(struct azx *chip)
+{
+ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+ struct hda_codec *codec;
+
+ if (hda->use_vga_switcheroo && !hda->need_eld_notify_link) {
+ list_for_each_codec(codec, &chip->bus)
+ codec->auto_runtime_pm = 1;
+ /* reset the power save setup */
+ if (chip->running)
+ set_default_power_save(chip);
+ }
+}
+
+static void azx_vs_gpu_bound(struct pci_dev *pci,
+ enum vga_switcheroo_client_id client_id)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct azx *chip = card->private_data;
+ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+
+ if (client_id == VGA_SWITCHEROO_DIS)
+ hda->need_eld_notify_link = 0;
+ setup_vga_switcheroo_runtime_pm(chip);
+}
+
static void init_vga_switcheroo(struct azx *chip)
{
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -1306,6 +1288,7 @@ static void init_vga_switcheroo(struct azx *chip)
dev_info(chip->card->dev,
"Handle vga_switcheroo audio client\n");
hda->use_vga_switcheroo = 1;
+ hda->need_eld_notify_link = 1; /* cleared in gpu_bound op */
chip->driver_caps |= AZX_DCAPS_PM_RUNTIME;
pci_dev_put(p);
}
@@ -1314,6 +1297,7 @@ static void init_vga_switcheroo(struct azx *chip)
static const struct vga_switcheroo_client_ops azx_vs_ops = {
.set_gpu_state = azx_vs_set_state,
.can_switch = azx_vs_can_switch,
+ .gpu_bound = azx_vs_gpu_bound,
};
static int register_vga_switcheroo(struct azx *chip)
@@ -1339,6 +1323,7 @@ static int register_vga_switcheroo(struct azx *chip)
#define init_vga_switcheroo(chip) /* NOP */
#define register_vga_switcheroo(chip) 0
#define check_hdmi_disabled(pci) false
+#define setup_vga_switcheroo_runtime_pm(chip) /* NOP */
#endif /* SUPPORT_VGA_SWITCHER */
/*
@@ -1352,6 +1337,7 @@ static int azx_free(struct azx *chip)
if (azx_has_pm_runtime(chip) && chip->running)
pm_runtime_get_noresume(&pci->dev);
+ chip->running = 0;
azx_del_card_list(chip);
@@ -1637,6 +1623,7 @@ static void azx_check_snoop_available(struct azx *chip)
dev_info(chip->card->dev, "Force to %s mode by module option\n",
snoop ? "snoop" : "non-snoop");
chip->snoop = snoop;
+ chip->uc_buffer = !snoop;
return;
}
@@ -1657,8 +1644,12 @@ static void azx_check_snoop_available(struct azx *chip)
snoop = false;
chip->snoop = snoop;
- if (!snoop)
+ if (!snoop) {
dev_info(chip->card->dev, "Force to non-snoop mode\n");
+ /* C-Media requires non-cached pages only for CORB/RIRB */
+ if (chip->driver_type != AZX_DRIVER_CMEDIA)
+ chip->uc_buffer = true;
+ }
}
static void azx_probe_work(struct work_struct *work)
@@ -1726,7 +1717,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
chip->driver_type = driver_caps & 0xff;
check_msi(chip);
chip->dev_index = dev;
- chip->jackpoll_ms = jackpoll_ms;
+ if (jackpoll_ms[dev] >= 50 && jackpoll_ms[dev] <= 60000)
+ chip->jackpoll_interval = msecs_to_jiffies(jackpoll_ms[dev]);
INIT_LIST_HEAD(&chip->pcm_list);
INIT_WORK(&hda->irq_pending_work, azx_irq_pending_work);
INIT_LIST_HEAD(&hda->list);
@@ -2049,55 +2041,24 @@ static int dma_alloc_pages(struct hdac_bus *bus,
struct snd_dma_buffer *buf)
{
struct azx *chip = bus_to_azx(bus);
- int err;
- err = snd_dma_alloc_pages(type,
- bus->dev,
- size, buf);
- if (err < 0)
- return err;
- mark_pages_wc(chip, buf, true);
- return 0;
+ if (!azx_snoop(chip) && type == SNDRV_DMA_TYPE_DEV)
+ type = SNDRV_DMA_TYPE_DEV_UC;
+ return snd_dma_alloc_pages(type, bus->dev, size, buf);
}
static void dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf)
{
- struct azx *chip = bus_to_azx(bus);
-
- mark_pages_wc(chip, buf, false);
snd_dma_free_pages(buf);
}
-static int substream_alloc_pages(struct azx *chip,
- struct snd_pcm_substream *substream,
- size_t size)
-{
- struct azx_dev *azx_dev = get_azx_dev(substream);
- int ret;
-
- mark_runtime_wc(chip, azx_dev, substream, false);
- ret = snd_pcm_lib_malloc_pages(substream, size);
- if (ret < 0)
- return ret;
- mark_runtime_wc(chip, azx_dev, substream, true);
- return 0;
-}
-
-static int substream_free_pages(struct azx *chip,
- struct snd_pcm_substream *substream)
-{
- struct azx_dev *azx_dev = get_azx_dev(substream);
- mark_runtime_wc(chip, azx_dev, substream, false);
- return snd_pcm_lib_free_pages(substream);
-}
-
static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
#ifdef CONFIG_X86
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
- if (!azx_snoop(chip) && chip->driver_type != AZX_DRIVER_CMEDIA)
+ if (chip->uc_buffer)
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
#endif
}
@@ -2115,8 +2076,6 @@ static const struct hdac_io_ops pci_hda_io_ops = {
static const struct hda_controller_ops pci_hda_ops = {
.disable_msi_reset_irq = disable_msi_reset_irq,
- .substream_alloc_pages = substream_alloc_pages,
- .substream_free_pages = substream_free_pages,
.pcm_mmap_prepare = pcm_mmap_prepare,
.position_check = azx_position_check,
.link_power = azx_intel_link_power,
@@ -2216,8 +2175,12 @@ static struct snd_pci_quirk power_save_blacklist[] = {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */
SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1028, 0x0497, "Dell Precision T3600", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
/* Note the P55A-UD3 and Z87-D3HP share the subsys id for the HDA dev */
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P55A-UD3 / Z87-D3HP", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
/* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
@@ -2230,6 +2193,25 @@ static struct snd_pci_quirk power_save_blacklist[] = {
};
#endif /* CONFIG_PM */
+static void set_default_power_save(struct azx *chip)
+{
+ int val = power_save;
+
+#ifdef CONFIG_PM
+ if (pm_blacklist) {
+ const struct snd_pci_quirk *q;
+
+ q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
+ if (q && val) {
+ dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
+ q->subvendor, q->subdevice);
+ val = 0;
+ }
+ }
+#endif /* CONFIG_PM */
+ snd_hda_set_power_save(&chip->bus, val * 1000);
+}
+
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
[AZX_DRIVER_NVIDIA] = 8,
@@ -2241,9 +2223,7 @@ static int azx_probe_continue(struct azx *chip)
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
struct hdac_bus *bus = azx_bus(chip);
struct pci_dev *pci = chip->pci;
- struct hda_codec *codec;
int dev = chip->dev_index;
- int val;
int err;
hda->probe_continued = 1;
@@ -2322,31 +2302,13 @@ static int azx_probe_continue(struct azx *chip)
if (err < 0)
goto out_free;
+ setup_vga_switcheroo_runtime_pm(chip);
+
chip->running = 1;
azx_add_card_list(chip);
- val = power_save;
-#ifdef CONFIG_PM
- if (pm_blacklist) {
- const struct snd_pci_quirk *q;
+ set_default_power_save(chip);
- q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
- if (q && val) {
- dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
- q->subvendor, q->subdevice);
- val = 0;
- }
- }
-#endif /* CONFIG_PM */
- /*
- * The discrete GPU cannot power down unless the HDA controller runtime
- * suspends, so activate runtime PM on codecs even if power_save == 0.
- */
- if (use_vga_switcheroo(hda))
- list_for_each_codec(codec, &chip->bus)
- codec->auto_runtime_pm = 1;
-
- snd_hda_set_power_save(&chip->bus, val * 1000);
if (azx_has_pm_runtime(chip))
pm_runtime_put_autosuspend(&pci->dev);
diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
index e3a3d318d2e5..f59719e06b91 100644
--- a/sound/pci/hda/hda_intel.h
+++ b/sound/pci/hda/hda_intel.h
@@ -37,6 +37,7 @@ struct hda_intel {
/* vga_switcheroo setup */
unsigned int use_vga_switcheroo:1;
+ unsigned int need_eld_notify_link:1;
unsigned int vga_switcheroo_registered:1;
unsigned int init_failed:1; /* delayed init failed */
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index a33234e04d4f..c499727920e6 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -15,7 +15,7 @@
#include <sound/core.h>
#include <sound/control.h>
#include <sound/jack.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index c6b778b2580c..a65740419650 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <sound/core.h>
#include <linux/module.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
static int dump_coef = -1;
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index 6ec79c58d48d..c154b19a0c45 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -14,7 +14,7 @@
#include <linux/string.h>
#include <linux/export.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include <sound/hda_hwdep.h>
#include <sound/minors.h>
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 0621920f7617..dd7d4242d6d2 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -35,7 +35,7 @@
#include <sound/core.h>
#include <sound/initval.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_controller.h"
/* Defines for Nvidia Tegra HDA support */
@@ -99,19 +99,6 @@ static void dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf)
snd_dma_free_pages(buf);
}
-static int substream_alloc_pages(struct azx *chip,
- struct snd_pcm_substream *substream,
- size_t size)
-{
- return snd_pcm_lib_malloc_pages(substream, size);
-}
-
-static int substream_free_pages(struct azx *chip,
- struct snd_pcm_substream *substream)
-{
- return snd_pcm_lib_free_pages(substream);
-}
-
/*
* Register access ops. Tegra HDA register access is DWORD only.
*/
@@ -180,10 +167,7 @@ static const struct hdac_io_ops hda_tegra_io_ops = {
.dma_free_pages = dma_free_pages,
};
-static const struct hda_controller_ops hda_tegra_ops = {
- .substream_alloc_pages = substream_alloc_pages,
- .substream_free_pages = substream_free_pages,
-};
+static const struct hda_controller_ops hda_tegra_ops; /* nothing special */
static void hda_tegra_init(struct hda_tegra *hda)
{
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index fd476fb40e1b..ebfd0be885b3 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -24,7 +24,7 @@
#include <linux/module.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_beep.h"
diff --git a/sound/pci/hda/patch_ca0110.c b/sound/pci/hda/patch_ca0110.c
index c2d9ee9cfdc0..21d0f0610913 100644
--- a/sound/pci/hda/patch_ca0110.c
+++ b/sound/pci/hda/patch_ca0110.c
@@ -22,7 +22,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0166a3d7cd55..0a24037184c3 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -31,8 +31,9 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/pci.h>
+#include <asm/io.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
@@ -81,12 +82,12 @@
#define SCP_GET 1
#define EFX_FILE "ctefx.bin"
-#define SBZ_EFX_FILE "ctefx-sbz.bin"
+#define DESKTOP_EFX_FILE "ctefx-desktop.bin"
#define R3DI_EFX_FILE "ctefx-r3di.bin"
#ifdef CONFIG_SND_HDA_CODEC_CA0132_DSP
MODULE_FIRMWARE(EFX_FILE);
-MODULE_FIRMWARE(SBZ_EFX_FILE);
+MODULE_FIRMWARE(DESKTOP_EFX_FILE);
MODULE_FIRMWARE(R3DI_EFX_FILE);
#endif
@@ -152,7 +153,10 @@ enum {
XBASS_XOVER,
EQ_PRESET_ENUM,
SMART_VOLUME_ENUM,
- MIC_BOOST_ENUM
+ MIC_BOOST_ENUM,
+ AE5_HEADPHONE_GAIN_ENUM,
+ AE5_SOUND_FILTER_ENUM,
+ ZXR_HEADPHONE_GAIN
#define EFFECTS_COUNT (EFFECT_END_NID - EFFECT_START_NID)
};
@@ -666,6 +670,65 @@ static const struct ct_dsp_volume_ctl ca0132_alt_vol_ctls[] = {
}
};
+/* Values for ca0113_mmio_command_set for selecting output. */
+#define AE5_CA0113_OUT_SET_COMMANDS 6
+struct ae5_ca0113_output_set {
+ unsigned int group[AE5_CA0113_OUT_SET_COMMANDS];
+ unsigned int target[AE5_CA0113_OUT_SET_COMMANDS];
+ unsigned int vals[AE5_CA0113_OUT_SET_COMMANDS];
+};
+
+static const struct ae5_ca0113_output_set ae5_ca0113_output_presets[] = {
+ { .group = { 0x30, 0x30, 0x48, 0x48, 0x48, 0x30 },
+ .target = { 0x2e, 0x30, 0x0d, 0x17, 0x19, 0x32 },
+ .vals = { 0x00, 0x00, 0x40, 0x00, 0x00, 0x3f }
+ },
+ { .group = { 0x30, 0x30, 0x48, 0x48, 0x48, 0x30 },
+ .target = { 0x2e, 0x30, 0x0d, 0x17, 0x19, 0x32 },
+ .vals = { 0x3f, 0x3f, 0x00, 0x00, 0x00, 0x00 }
+ },
+ { .group = { 0x30, 0x30, 0x48, 0x48, 0x48, 0x30 },
+ .target = { 0x2e, 0x30, 0x0d, 0x17, 0x19, 0x32 },
+ .vals = { 0x00, 0x00, 0x40, 0x00, 0x00, 0x3f }
+ }
+};
+
+/* ae5 ca0113 command sequences to set headphone gain levels. */
+#define AE5_HEADPHONE_GAIN_PRESET_MAX_COMMANDS 4
+struct ae5_headphone_gain_set {
+ char *name;
+ unsigned int vals[AE5_HEADPHONE_GAIN_PRESET_MAX_COMMANDS];
+};
+
+static const struct ae5_headphone_gain_set ae5_headphone_gain_presets[] = {
+ { .name = "Low (16-31",
+ .vals = { 0xff, 0x2c, 0xf5, 0x32 }
+ },
+ { .name = "Medium (32-149",
+ .vals = { 0x38, 0xa8, 0x3e, 0x4c }
+ },
+ { .name = "High (150-600",
+ .vals = { 0xff, 0xff, 0xff, 0x7f }
+ }
+};
+
+struct ae5_filter_set {
+ char *name;
+ unsigned int val;
+};
+
+static const struct ae5_filter_set ae5_filter_presets[] = {
+ { .name = "Slow Roll Off",
+ .val = 0xa0
+ },
+ { .name = "Minimum Phase",
+ .val = 0xc0
+ },
+ { .name = "Fast Roll Off",
+ .val = 0x80
+ }
+};
+
enum hda_cmd_vendor_io {
/* for DspIO node */
VENDOR_DSPIO_SCP_WRITE_DATA_LOW = 0x000,
@@ -685,6 +748,9 @@ enum hda_cmd_vendor_io {
VENDOR_CHIPIO_DATA_LOW = 0x300,
VENDOR_CHIPIO_DATA_HIGH = 0x400,
+ VENDOR_CHIPIO_8051_WRITE_DIRECT = 0x500,
+ VENDOR_CHIPIO_8051_READ_DIRECT = 0xD00,
+
VENDOR_CHIPIO_GET_PARAMETER = 0xF00,
VENDOR_CHIPIO_STATUS = 0xF01,
VENDOR_CHIPIO_HIC_POST_READ = 0x702,
@@ -692,6 +758,9 @@ enum hda_cmd_vendor_io {
VENDOR_CHIPIO_8051_DATA_WRITE = 0x707,
VENDOR_CHIPIO_8051_DATA_READ = 0xF07,
+ VENDOR_CHIPIO_8051_PMEM_READ = 0xF08,
+ VENDOR_CHIPIO_8051_IRAM_WRITE = 0x709,
+ VENDOR_CHIPIO_8051_IRAM_READ = 0xF09,
VENDOR_CHIPIO_CT_EXTENSIONS_ENABLE = 0x70A,
VENDOR_CHIPIO_CT_EXTENSIONS_GET = 0xF0A,
@@ -798,6 +867,12 @@ enum control_param_id {
* impedance is selected*/
CONTROL_PARAM_PORTD_160OHM_GAIN = 10,
+ /*
+ * This control param name was found in the 8051 memory, and makes
+ * sense given the fact the AE-5 uses it and has the ASI flag set.
+ */
+ CONTROL_PARAM_ASI = 23,
+
/* Stream Control */
/* Select stream with the given ID */
@@ -955,7 +1030,11 @@ struct ca0132_spec {
long eq_preset_val;
unsigned int tlv[4];
struct hda_vmaster_mute_hook vmaster_mute;
-
+ /* AE-5 Control values */
+ unsigned char ae5_headphone_gain_val;
+ unsigned char ae5_filter_val;
+ /* ZxR Control Values */
+ unsigned char zxr_gain_set;
struct hda_codec *codec;
struct delayed_work unsol_hp_work;
@@ -995,8 +1074,11 @@ enum {
QUIRK_ALIENWARE,
QUIRK_ALIENWARE_M17XR4,
QUIRK_SBZ,
+ QUIRK_ZXR,
+ QUIRK_ZXR_DBPRO,
QUIRK_R3DI,
QUIRK_R3D,
+ QUIRK_AE5,
};
static const struct hda_pintbl alienware_pincfgs[] = {
@@ -1028,6 +1110,21 @@ static const struct hda_pintbl sbz_pincfgs[] = {
{}
};
+/* Sound Blaster ZxR pin configs taken from Windows Driver */
+static const struct hda_pintbl zxr_pincfgs[] = {
+ { 0x0b, 0x01047110 }, /* Port G -- Lineout FRONT L/R */
+ { 0x0c, 0x414510f0 }, /* SPDIF Out 1 - Disabled*/
+ { 0x0d, 0x014510f0 }, /* Digital Out */
+ { 0x0e, 0x41c520f0 }, /* SPDIF In - Disabled*/
+ { 0x0f, 0x0122711f }, /* Port A -- BackPanel HP */
+ { 0x10, 0x01017111 }, /* Port D -- Center/LFE */
+ { 0x11, 0x01017114 }, /* Port B -- LineMicIn2 / Rear L/R */
+ { 0x12, 0x01a271f0 }, /* Port C -- LineIn1 */
+ { 0x13, 0x908700f0 }, /* What U Hear In*/
+ { 0x18, 0x50d000f0 }, /* N/A */
+ {}
+};
+
/* Recon3D pin configs taken from Windows Driver */
static const struct hda_pintbl r3d_pincfgs[] = {
{ 0x0b, 0x01014110 }, /* Port G -- Lineout FRONT L/R */
@@ -1043,6 +1140,21 @@ static const struct hda_pintbl r3d_pincfgs[] = {
{}
};
+/* Sound Blaster AE-5 pin configs taken from Windows Driver */
+static const struct hda_pintbl ae5_pincfgs[] = {
+ { 0x0b, 0x01017010 }, /* Port G -- Lineout FRONT L/R */
+ { 0x0c, 0x014510f0 }, /* SPDIF Out 1 */
+ { 0x0d, 0x014510f0 }, /* Digital Out */
+ { 0x0e, 0x01c510f0 }, /* SPDIF In */
+ { 0x0f, 0x01017114 }, /* Port A -- Rear L/R. */
+ { 0x10, 0x01017012 }, /* Port D -- Center/LFE or FP Hp */
+ { 0x11, 0x01a170ff }, /* Port B -- LineMicIn2 / Rear Headphone */
+ { 0x12, 0x01a170f0 }, /* Port C -- LineIn1 */
+ { 0x13, 0x908700f0 }, /* What U Hear In*/
+ { 0x18, 0x50d000f0 }, /* N/A */
+ {}
+};
+
/* Recon3D integrated pin configs taken from Windows Driver */
static const struct hda_pintbl r3di_pincfgs[] = {
{ 0x0b, 0x01014110 }, /* Port G -- Lineout FRONT L/R */
@@ -1069,6 +1181,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
+ SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
{}
};
@@ -1454,6 +1567,20 @@ static void chipio_set_conn_rate(struct hda_codec *codec,
}
/*
+ * Writes to the 8051's internal address space directly instead of indirectly,
+ * giving access to the special function registers located at addresses
+ * 0x80-0xFF.
+ */
+static void chipio_8051_write_direct(struct hda_codec *codec,
+ unsigned int addr, unsigned int data)
+{
+ unsigned int verb;
+
+ verb = VENDOR_CHIPIO_8051_WRITE_DIRECT | data;
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0, verb, addr);
+}
+
+/*
* Enable clocks.
*/
static void chipio_enable_clocks(struct hda_codec *codec)
@@ -3088,7 +3215,9 @@ static bool dspload_wait_loaded(struct hda_codec *codec)
}
/*
- * Setup GPIO for the other variants of Core3D.
+ * ca0113 related functions. The ca0113 acts as the HDA bus for the pci-e
+ * based cards, and has a second mmio region, region2, that's used for special
+ * commands.
*/
/*
@@ -3096,8 +3225,11 @@ static bool dspload_wait_loaded(struct hda_codec *codec)
* the mmio address 0x320 is used to set GPIO pins. The format for the data
* The first eight bits are just the number of the pin. So far, I've only seen
* this number go to 7.
+ * AE-5 note: The AE-5 seems to use pins 2 and 3 to somehow set the color value
+ * of the on-card LED. It seems to use pin 2 for data, then toggles 3 to on and
+ * then off to send that bit.
*/
-static void ca0132_mmio_gpio_set(struct hda_codec *codec, unsigned int gpio_pin,
+static void ca0113_mmio_gpio_set(struct hda_codec *codec, unsigned int gpio_pin,
bool enable)
{
struct ca0132_spec *spec = codec->spec;
@@ -3110,6 +3242,89 @@ static void ca0132_mmio_gpio_set(struct hda_codec *codec, unsigned int gpio_pin,
}
/*
+ * Special pci region2 commands that are only used by the AE-5. They follow
+ * a set format, and require reads at certain points to seemingly 'clear'
+ * the response data. My first tests didn't do these reads, and would cause
+ * the card to get locked up until the memory was read. These commands
+ * seem to work with three distinct values that I've taken to calling group,
+ * target-id, and value.
+ */
+static void ca0113_mmio_command_set(struct hda_codec *codec, unsigned int group,
+ unsigned int target, unsigned int value)
+{
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int write_val;
+
+ writel(0x0000007e, spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+ writel(0x0000005a, spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+
+ writel(0x00800005, spec->mem_base + 0x20c);
+ writel(group, spec->mem_base + 0x804);
+
+ writel(0x00800005, spec->mem_base + 0x20c);
+ write_val = (target & 0xff);
+ write_val |= (value << 8);
+
+
+ writel(write_val, spec->mem_base + 0x204);
+ /*
+ * Need delay here or else it goes too fast and works inconsistently.
+ */
+ msleep(20);
+
+ readl(spec->mem_base + 0x860);
+ readl(spec->mem_base + 0x854);
+ readl(spec->mem_base + 0x840);
+
+ writel(0x00800004, spec->mem_base + 0x20c);
+ writel(0x00000000, spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+}
+
+/*
+ * This second type of command is used for setting the sound filter type.
+ */
+static void ca0113_mmio_command_set_type2(struct hda_codec *codec,
+ unsigned int group, unsigned int target, unsigned int value)
+{
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int write_val;
+
+ writel(0x0000007e, spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+ writel(0x0000005a, spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+
+ writel(0x00800003, spec->mem_base + 0x20c);
+ writel(group, spec->mem_base + 0x804);
+
+ writel(0x00800005, spec->mem_base + 0x20c);
+ write_val = (target & 0xff);
+ write_val |= (value << 8);
+
+
+ writel(write_val, spec->mem_base + 0x204);
+ msleep(20);
+ readl(spec->mem_base + 0x860);
+ readl(spec->mem_base + 0x854);
+ readl(spec->mem_base + 0x840);
+
+ writel(0x00800004, spec->mem_base + 0x20c);
+ writel(0x00000000, spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+}
+
+/*
+ * Setup GPIO for the other variants of Core3D.
+ */
+
+/*
* Sets up the GPIO pins so that they are discoverable. If this isn't done,
* the card shows as having no GPIO pins.
*/
@@ -3119,6 +3334,7 @@ static void ca0132_gpio_init(struct hda_codec *codec)
switch (spec->quirk) {
case QUIRK_SBZ:
+ case QUIRK_AE5:
snd_hda_codec_write(codec, 0x01, 0, 0x793, 0x00);
snd_hda_codec_write(codec, 0x01, 0, 0x794, 0x53);
snd_hda_codec_write(codec, 0x01, 0, 0x790, 0x23);
@@ -3928,6 +4144,138 @@ exit:
return err < 0 ? err : 0;
}
+static int ae5_headphone_gain_set(struct hda_codec *codec, long val);
+static int zxr_headphone_gain_set(struct hda_codec *codec, long val);
+static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val);
+
+static void ae5_mmio_select_out(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int i;
+
+ for (i = 0; i < AE5_CA0113_OUT_SET_COMMANDS; i++)
+ ca0113_mmio_command_set(codec,
+ ae5_ca0113_output_presets[spec->cur_out_type].group[i],
+ ae5_ca0113_output_presets[spec->cur_out_type].target[i],
+ ae5_ca0113_output_presets[spec->cur_out_type].vals[i]);
+}
+
+/*
+ * These are the commands needed to setup output on each of the different card
+ * types.
+ */
+static void ca0132_alt_select_out_quirk_handler(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int tmp;
+
+ switch (spec->cur_out_type) {
+ case SPEAKER_OUT:
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
+ ca0113_mmio_gpio_set(codec, 7, false);
+ ca0113_mmio_gpio_set(codec, 4, true);
+ ca0113_mmio_gpio_set(codec, 1, true);
+ chipio_set_control_param(codec, 0x0d, 0x18);
+ break;
+ case QUIRK_ZXR:
+ ca0113_mmio_gpio_set(codec, 2, true);
+ ca0113_mmio_gpio_set(codec, 3, true);
+ ca0113_mmio_gpio_set(codec, 5, false);
+ zxr_headphone_gain_set(codec, 0);
+ chipio_set_control_param(codec, 0x0d, 0x24);
+ break;
+ case QUIRK_R3DI:
+ chipio_set_control_param(codec, 0x0d, 0x24);
+ r3di_gpio_out_set(codec, R3DI_LINE_OUT);
+ break;
+ case QUIRK_R3D:
+ chipio_set_control_param(codec, 0x0d, 0x24);
+ ca0113_mmio_gpio_set(codec, 1, true);
+ break;
+ case QUIRK_AE5:
+ ae5_mmio_select_out(codec);
+ ae5_headphone_gain_set(codec, 2);
+ tmp = FLOAT_ZERO;
+ dspio_set_uint_param(codec, 0x96, 0x29, tmp);
+ dspio_set_uint_param(codec, 0x96, 0x2a, tmp);
+ chipio_set_control_param(codec, 0x0d, 0xa4);
+ chipio_write(codec, 0x18b03c, 0x00000012);
+ break;
+ }
+ break;
+ case HEADPHONE_OUT:
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
+ ca0113_mmio_gpio_set(codec, 7, true);
+ ca0113_mmio_gpio_set(codec, 4, true);
+ ca0113_mmio_gpio_set(codec, 1, false);
+ chipio_set_control_param(codec, 0x0d, 0x12);
+ break;
+ case QUIRK_ZXR:
+ ca0113_mmio_gpio_set(codec, 2, false);
+ ca0113_mmio_gpio_set(codec, 3, false);
+ ca0113_mmio_gpio_set(codec, 5, true);
+ zxr_headphone_gain_set(codec, spec->zxr_gain_set);
+ chipio_set_control_param(codec, 0x0d, 0x21);
+ break;
+ case QUIRK_R3DI:
+ chipio_set_control_param(codec, 0x0d, 0x21);
+ r3di_gpio_out_set(codec, R3DI_HEADPHONE_OUT);
+ break;
+ case QUIRK_R3D:
+ chipio_set_control_param(codec, 0x0d, 0x21);
+ ca0113_mmio_gpio_set(codec, 0x1, false);
+ break;
+ case QUIRK_AE5:
+ ae5_mmio_select_out(codec);
+ ae5_headphone_gain_set(codec,
+ spec->ae5_headphone_gain_val);
+ tmp = FLOAT_ONE;
+ dspio_set_uint_param(codec, 0x96, 0x29, tmp);
+ dspio_set_uint_param(codec, 0x96, 0x2a, tmp);
+ chipio_set_control_param(codec, 0x0d, 0xa1);
+ chipio_write(codec, 0x18b03c, 0x00000012);
+ break;
+ }
+ break;
+ case SURROUND_OUT:
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
+ ca0113_mmio_gpio_set(codec, 7, false);
+ ca0113_mmio_gpio_set(codec, 4, true);
+ ca0113_mmio_gpio_set(codec, 1, true);
+ chipio_set_control_param(codec, 0x0d, 0x18);
+ break;
+ case QUIRK_ZXR:
+ ca0113_mmio_gpio_set(codec, 2, true);
+ ca0113_mmio_gpio_set(codec, 3, true);
+ ca0113_mmio_gpio_set(codec, 5, false);
+ zxr_headphone_gain_set(codec, 0);
+ chipio_set_control_param(codec, 0x0d, 0x24);
+ break;
+ case QUIRK_R3DI:
+ chipio_set_control_param(codec, 0x0d, 0x24);
+ r3di_gpio_out_set(codec, R3DI_LINE_OUT);
+ break;
+ case QUIRK_R3D:
+ ca0113_mmio_gpio_set(codec, 1, true);
+ chipio_set_control_param(codec, 0x0d, 0x24);
+ break;
+ case QUIRK_AE5:
+ ae5_mmio_select_out(codec);
+ ae5_headphone_gain_set(codec, 2);
+ tmp = FLOAT_ZERO;
+ dspio_set_uint_param(codec, 0x96, 0x29, tmp);
+ dspio_set_uint_param(codec, 0x96, 0x2a, tmp);
+ chipio_set_control_param(codec, 0x0d, 0xa4);
+ chipio_write(codec, 0x18b03c, 0x00000012);
+ break;
+ }
+ break;
+ }
+}
+
/*
* This function behaves similarly to the ca0132_select_out funciton above,
* except with a few differences. It adds the ability to select the current
@@ -3978,26 +4326,11 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
if (err < 0)
goto exit;
+ ca0132_alt_select_out_quirk_handler(codec);
+
switch (spec->cur_out_type) {
case SPEAKER_OUT:
codec_dbg(codec, "%s speaker\n", __func__);
- /*speaker out config*/
- switch (spec->quirk) {
- case QUIRK_SBZ:
- ca0132_mmio_gpio_set(codec, 7, false);
- ca0132_mmio_gpio_set(codec, 4, true);
- ca0132_mmio_gpio_set(codec, 1, true);
- chipio_set_control_param(codec, 0x0D, 0x18);
- break;
- case QUIRK_R3DI:
- chipio_set_control_param(codec, 0x0D, 0x24);
- r3di_gpio_out_set(codec, R3DI_LINE_OUT);
- break;
- case QUIRK_R3D:
- chipio_set_control_param(codec, 0x0D, 0x24);
- ca0132_mmio_gpio_set(codec, 1, true);
- break;
- }
/* disable headphone node */
pin_ctl = snd_hda_codec_read(codec, spec->out_pins[1], 0,
@@ -4021,23 +4354,6 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
break;
case HEADPHONE_OUT:
codec_dbg(codec, "%s hp\n", __func__);
- /* Headphone out config*/
- switch (spec->quirk) {
- case QUIRK_SBZ:
- ca0132_mmio_gpio_set(codec, 7, true);
- ca0132_mmio_gpio_set(codec, 4, true);
- ca0132_mmio_gpio_set(codec, 1, false);
- chipio_set_control_param(codec, 0x0D, 0x12);
- break;
- case QUIRK_R3DI:
- chipio_set_control_param(codec, 0x0D, 0x21);
- r3di_gpio_out_set(codec, R3DI_HEADPHONE_OUT);
- break;
- case QUIRK_R3D:
- chipio_set_control_param(codec, 0x0D, 0x21);
- ca0132_mmio_gpio_set(codec, 0x1, false);
- break;
- }
snd_hda_codec_write(codec, spec->out_pins[0], 0,
AC_VERB_SET_EAPD_BTLENABLE, 0x00);
@@ -4067,23 +4383,7 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
break;
case SURROUND_OUT:
codec_dbg(codec, "%s surround\n", __func__);
- /* Surround out config*/
- switch (spec->quirk) {
- case QUIRK_SBZ:
- ca0132_mmio_gpio_set(codec, 7, false);
- ca0132_mmio_gpio_set(codec, 4, true);
- ca0132_mmio_gpio_set(codec, 1, true);
- chipio_set_control_param(codec, 0x0D, 0x18);
- break;
- case QUIRK_R3DI:
- chipio_set_control_param(codec, 0x0D, 0x24);
- r3di_gpio_out_set(codec, R3DI_LINE_OUT);
- break;
- case QUIRK_R3D:
- ca0132_mmio_gpio_set(codec, 1, true);
- chipio_set_control_param(codec, 0x0D, 0x24);
- break;
- }
+
/* enable line out node */
pin_ctl = snd_hda_codec_read(codec, spec->out_pins[0], 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
@@ -4108,14 +4408,21 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
snd_hda_set_pin_ctl(codec, spec->out_pins[3],
pin_ctl | PIN_OUT);
- if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
- dspio_set_uint_param(codec, 0x80, 0x04, FLOAT_ONE);
- else
- dspio_set_uint_param(codec, 0x80, 0x04, FLOAT_EIGHT);
+ dspio_set_uint_param(codec, 0x80, 0x04, FLOAT_EIGHT);
break;
}
+ /*
+ * Surround always sets it's scp command to req 0x04 to FLOAT_EIGHT.
+ * With this set though, X_BASS cannot be enabled. So, if we have OutFX
+ * enabled, we need to make sure X_BASS is off, otherwise everything
+ * sounds all muffled. Running ca0132_effects_set with X_BASS as the
+ * effect should sort this out.
+ */
+ if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
+ ca0132_effects_set(codec, X_BASS,
+ spec->effects_switch[X_BASS - EFFECT_START_NID]);
- /* run through the output dsp commands for line-out */
+ /* run through the output dsp commands for the selected output. */
for (i = 0; i < alt_out_presets[spec->cur_out_type].commands; i++) {
err = dspio_set_uint_param(codec,
alt_out_presets[spec->cur_out_type].mids[i],
@@ -4152,7 +4459,6 @@ static void ca0132_unsol_hp_delayed(struct work_struct *work)
static void ca0132_set_dmic(struct hda_codec *codec, int enable);
static int ca0132_mic_boost_set(struct hda_codec *codec, long val);
-static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val);
static void resume_mic1(struct hda_codec *codec, unsigned int oldval);
static int stop_mic1(struct hda_codec *codec);
static int ca0132_cvoice_switch_set(struct hda_codec *codec);
@@ -4341,13 +4647,20 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
switch (spec->quirk) {
case QUIRK_SBZ:
case QUIRK_R3D:
- ca0132_mmio_gpio_set(codec, 0, false);
+ ca0113_mmio_gpio_set(codec, 0, false);
+ tmp = FLOAT_THREE;
+ break;
+ case QUIRK_ZXR:
tmp = FLOAT_THREE;
break;
case QUIRK_R3DI:
r3di_gpio_mic_set(codec, R3DI_REAR_MIC);
tmp = FLOAT_ONE;
break;
+ case QUIRK_AE5:
+ ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00);
+ tmp = FLOAT_THREE;
+ break;
default:
tmp = FLOAT_ONE;
break;
@@ -4362,10 +4675,19 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
chipio_set_stream_control(codec, 0x03, 1);
chipio_set_stream_control(codec, 0x04, 1);
-
- if (spec->quirk == QUIRK_SBZ) {
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
chipio_write(codec, 0x18B098, 0x0000000C);
chipio_write(codec, 0x18B09C, 0x0000000C);
+ break;
+ case QUIRK_ZXR:
+ chipio_write(codec, 0x18B098, 0x0000000C);
+ chipio_write(codec, 0x18B09C, 0x000000CC);
+ break;
+ case QUIRK_AE5:
+ chipio_write(codec, 0x18B098, 0x0000000C);
+ chipio_write(codec, 0x18B09C, 0x0000004C);
+ break;
}
ca0132_alt_mic_boost_set(codec, spec->mic_boost_enum_val);
break;
@@ -4374,11 +4696,14 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
switch (spec->quirk) {
case QUIRK_SBZ:
case QUIRK_R3D:
- ca0132_mmio_gpio_set(codec, 0, false);
+ ca0113_mmio_gpio_set(codec, 0, false);
break;
case QUIRK_R3DI:
r3di_gpio_mic_set(codec, R3DI_REAR_MIC);
break;
+ case QUIRK_AE5:
+ ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00);
+ break;
}
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
@@ -4389,11 +4714,13 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
tmp = FLOAT_ZERO;
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
- if (spec->quirk == QUIRK_SBZ) {
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
+ case QUIRK_AE5:
chipio_write(codec, 0x18B098, 0x00000000);
chipio_write(codec, 0x18B09C, 0x00000000);
+ break;
}
-
chipio_set_stream_control(codec, 0x03, 1);
chipio_set_stream_control(codec, 0x04, 1);
break;
@@ -4401,14 +4728,18 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
switch (spec->quirk) {
case QUIRK_SBZ:
case QUIRK_R3D:
- ca0132_mmio_gpio_set(codec, 0, true);
- ca0132_mmio_gpio_set(codec, 5, false);
+ ca0113_mmio_gpio_set(codec, 0, true);
+ ca0113_mmio_gpio_set(codec, 5, false);
tmp = FLOAT_THREE;
break;
case QUIRK_R3DI:
r3di_gpio_mic_set(codec, R3DI_FRONT_MIC);
tmp = FLOAT_ONE;
break;
+ case QUIRK_AE5:
+ ca0113_mmio_command_set(codec, 0x48, 0x28, 0x3f);
+ tmp = FLOAT_THREE;
+ break;
default:
tmp = FLOAT_ONE;
break;
@@ -4424,9 +4755,15 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
chipio_set_stream_control(codec, 0x03, 1);
chipio_set_stream_control(codec, 0x04, 1);
- if (spec->quirk == QUIRK_SBZ) {
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
chipio_write(codec, 0x18B098, 0x0000000C);
chipio_write(codec, 0x18B09C, 0x000000CC);
+ break;
+ case QUIRK_AE5:
+ chipio_write(codec, 0x18B098, 0x0000000C);
+ chipio_write(codec, 0x18B09C, 0x0000004C);
+ break;
}
ca0132_alt_mic_boost_set(codec, spec->mic_boost_enum_val);
break;
@@ -4435,7 +4772,6 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
snd_hda_power_down_pm(codec);
return 0;
-
}
/*
@@ -4507,6 +4843,8 @@ static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val)
/* if PE if off, turn off out effects. */
if (!spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
val = 0;
+ if (spec->cur_out_type == SURROUND_OUT && nid == X_BASS)
+ val = 0;
}
/* for in effect, qualify with CrystalVoice */
@@ -4520,7 +4858,7 @@ static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val)
val = 0;
/* If Voice Focus on SBZ, set to two channel. */
- if ((nid == VOICE_FOCUS) && (spec->quirk == QUIRK_SBZ)
+ if ((nid == VOICE_FOCUS) && (spec->use_pci_mmio)
&& (spec->cur_mic_type != REAR_LINE_IN)) {
if (spec->effects_switch[CRYSTAL_VOICE -
EFFECT_START_NID]) {
@@ -4539,7 +4877,7 @@ static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val)
* For SBZ noise reduction, there's an extra command
* to module ID 0x47. No clue why.
*/
- if ((nid == NOISE_REDUCTION) && (spec->quirk == QUIRK_SBZ)
+ if ((nid == NOISE_REDUCTION) && (spec->use_pci_mmio)
&& (spec->cur_mic_type != REAR_LINE_IN)) {
if (spec->effects_switch[CRYSTAL_VOICE -
EFFECT_START_NID]) {
@@ -4678,6 +5016,27 @@ static int ca0132_alt_mic_boost_set(struct hda_codec *codec, long val)
return ret;
}
+static int ae5_headphone_gain_set(struct hda_codec *codec, long val)
+{
+ unsigned int i;
+
+ for (i = 0; i < 4; i++)
+ ca0113_mmio_command_set(codec, 0x48, 0x11 + i,
+ ae5_headphone_gain_presets[val].vals[i]);
+ return 0;
+}
+
+/*
+ * gpio pin 1 is a relay that switches on/off, apparently setting the headphone
+ * amplifier to handle a 600 ohm load.
+ */
+static int zxr_headphone_gain_set(struct hda_codec *codec, long val)
+{
+ ca0113_mmio_gpio_set(codec, 1, val);
+
+ return 0;
+}
+
static int ca0132_vnode_switch_set(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -4942,6 +5301,112 @@ static int ca0132_alt_mic_boost_put(struct snd_kcontrol *kcontrol,
return 1;
}
+/*
+ * Sound BlasterX AE-5 Headphone Gain Controls.
+ */
+#define AE5_HEADPHONE_GAIN_MAX 3
+static int ae5_headphone_gain_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ char *sfx = " Ohms)";
+ char namestr[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = AE5_HEADPHONE_GAIN_MAX;
+ if (uinfo->value.enumerated.item >= AE5_HEADPHONE_GAIN_MAX)
+ uinfo->value.enumerated.item = AE5_HEADPHONE_GAIN_MAX - 1;
+ sprintf(namestr, "%s %s",
+ ae5_headphone_gain_presets[uinfo->value.enumerated.item].name,
+ sfx);
+ strcpy(uinfo->value.enumerated.name, namestr);
+ return 0;
+}
+
+static int ae5_headphone_gain_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct ca0132_spec *spec = codec->spec;
+
+ ucontrol->value.enumerated.item[0] = spec->ae5_headphone_gain_val;
+ return 0;
+}
+
+static int ae5_headphone_gain_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct ca0132_spec *spec = codec->spec;
+ int sel = ucontrol->value.enumerated.item[0];
+ unsigned int items = AE5_HEADPHONE_GAIN_MAX;
+
+ if (sel >= items)
+ return 0;
+
+ codec_dbg(codec, "ae5_headphone_gain: boost=%d\n",
+ sel);
+
+ spec->ae5_headphone_gain_val = sel;
+
+ if (spec->out_enum_val == HEADPHONE_OUT)
+ ae5_headphone_gain_set(codec, spec->ae5_headphone_gain_val);
+
+ return 1;
+}
+
+/*
+ * Sound BlasterX AE-5 sound filter enumerated control.
+ */
+#define AE5_SOUND_FILTER_MAX 3
+
+static int ae5_sound_filter_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ char namestr[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = AE5_SOUND_FILTER_MAX;
+ if (uinfo->value.enumerated.item >= AE5_SOUND_FILTER_MAX)
+ uinfo->value.enumerated.item = AE5_SOUND_FILTER_MAX - 1;
+ sprintf(namestr, "%s",
+ ae5_filter_presets[uinfo->value.enumerated.item].name);
+ strcpy(uinfo->value.enumerated.name, namestr);
+ return 0;
+}
+
+static int ae5_sound_filter_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct ca0132_spec *spec = codec->spec;
+
+ ucontrol->value.enumerated.item[0] = spec->ae5_filter_val;
+ return 0;
+}
+
+static int ae5_sound_filter_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct ca0132_spec *spec = codec->spec;
+ int sel = ucontrol->value.enumerated.item[0];
+ unsigned int items = AE5_SOUND_FILTER_MAX;
+
+ if (sel >= items)
+ return 0;
+
+ codec_dbg(codec, "ae5_sound_filter: %s\n",
+ ae5_filter_presets[sel].name);
+
+ spec->ae5_filter_val = sel;
+
+ ca0113_mmio_command_set_type2(codec, 0x48, 0x07,
+ ae5_filter_presets[sel].val);
+
+ return 1;
+}
/*
* Input Select Control for alternative ca0132 codecs. This exists because
@@ -5330,6 +5795,16 @@ static int ca0132_switch_put(struct snd_kcontrol *kcontrol,
goto exit;
}
+ if (nid == ZXR_HEADPHONE_GAIN) {
+ spec->zxr_gain_set = *valp;
+ if (spec->cur_out_type == HEADPHONE_OUT)
+ changed = zxr_headphone_gain_set(codec, *valp);
+ else
+ changed = 0;
+
+ goto exit;
+ }
+
exit:
snd_hda_power_down(codec);
return changed;
@@ -5705,6 +6180,50 @@ static int ca0132_alt_add_mic_boost_enum(struct hda_codec *codec)
}
/*
+ * Add headphone gain enumerated control for the AE-5. This switches between
+ * three modes, low, medium, and high. When non-headphone outputs are selected,
+ * it is automatically set to high. This is the same behavior as Windows.
+ */
+static int ae5_add_headphone_gain_enum(struct hda_codec *codec)
+{
+ struct snd_kcontrol_new knew =
+ HDA_CODEC_MUTE_MONO("AE-5: Headphone Gain",
+ AE5_HEADPHONE_GAIN_ENUM, 1, 0, HDA_OUTPUT);
+ knew.info = ae5_headphone_gain_info;
+ knew.get = ae5_headphone_gain_get;
+ knew.put = ae5_headphone_gain_put;
+ return snd_hda_ctl_add(codec, AE5_HEADPHONE_GAIN_ENUM,
+ snd_ctl_new1(&knew, codec));
+}
+
+/*
+ * Add sound filter enumerated control for the AE-5. This adds three different
+ * settings: Slow Roll Off, Minimum Phase, and Fast Roll Off. From what I've
+ * read into it, it changes the DAC's interpolation filter.
+ */
+static int ae5_add_sound_filter_enum(struct hda_codec *codec)
+{
+ struct snd_kcontrol_new knew =
+ HDA_CODEC_MUTE_MONO("AE-5: Sound Filter",
+ AE5_SOUND_FILTER_ENUM, 1, 0, HDA_OUTPUT);
+ knew.info = ae5_sound_filter_info;
+ knew.get = ae5_sound_filter_get;
+ knew.put = ae5_sound_filter_put;
+ return snd_hda_ctl_add(codec, AE5_SOUND_FILTER_ENUM,
+ snd_ctl_new1(&knew, codec));
+}
+
+static int zxr_add_headphone_gain_switch(struct hda_codec *codec)
+{
+ struct snd_kcontrol_new knew =
+ CA0132_CODEC_MUTE_MONO("ZxR: 600 Ohm Gain",
+ ZXR_HEADPHONE_GAIN, 1, HDA_OUTPUT);
+
+ return snd_hda_ctl_add(codec, ZXR_HEADPHONE_GAIN,
+ snd_ctl_new1(&knew, codec));
+}
+
+/*
* Need to create slave controls for the alternate codecs that have surround
* capabilities.
*/
@@ -5847,7 +6366,8 @@ static int ca0132_build_controls(struct hda_codec *codec)
NULL, ca0132_alt_slave_pfxs,
"Playback Switch",
true, &spec->vmaster_mute.sw_kctl);
-
+ if (err < 0)
+ return err;
}
/* Add in and out effects controls.
@@ -5855,8 +6375,8 @@ static int ca0132_build_controls(struct hda_codec *codec)
*/
num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT;
for (i = 0; i < num_fx; i++) {
- /* SBZ and R3D break if Echo Cancellation is used. */
- if (spec->quirk == QUIRK_SBZ || spec->quirk == QUIRK_R3D) {
+ /* Desktop cards break if Echo Cancellation is used. */
+ if (spec->use_pci_mmio) {
if (i == (ECHO_CANCELLATION - IN_EFFECT_START_NID +
OUT_EFFECTS_COUNT))
continue;
@@ -5874,8 +6394,14 @@ static int ca0132_build_controls(struct hda_codec *codec)
* prefix, and change PlayEnhancement and CrystalVoice to match.
*/
if (spec->use_alt_controls) {
- ca0132_alt_add_svm_enum(codec);
- add_ca0132_alt_eq_presets(codec);
+ err = ca0132_alt_add_svm_enum(codec);
+ if (err < 0)
+ return err;
+
+ err = add_ca0132_alt_eq_presets(codec);
+ if (err < 0)
+ return err;
+
err = add_fx_switch(codec, PLAY_ENHANCEMENT,
"Enable OutFX", 0);
if (err < 0)
@@ -5912,7 +6438,9 @@ static int ca0132_build_controls(struct hda_codec *codec)
if (err < 0)
return err;
}
- add_voicefx(codec);
+ err = add_voicefx(codec);
+ if (err < 0)
+ return err;
/*
* If the codec uses alt_functions, you need the enumerated controls
@@ -5920,9 +6448,36 @@ static int ca0132_build_controls(struct hda_codec *codec)
* setting control.
*/
if (spec->use_alt_functions) {
- ca0132_alt_add_output_enum(codec);
- ca0132_alt_add_input_enum(codec);
- ca0132_alt_add_mic_boost_enum(codec);
+ err = ca0132_alt_add_output_enum(codec);
+ if (err < 0)
+ return err;
+ err = ca0132_alt_add_mic_boost_enum(codec);
+ if (err < 0)
+ return err;
+ /*
+ * ZxR only has microphone input, there is no front panel
+ * header on the card, and aux-in is handled by the DBPro board.
+ */
+ if (spec->quirk != QUIRK_ZXR) {
+ err = ca0132_alt_add_input_enum(codec);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ if (spec->quirk == QUIRK_AE5) {
+ err = ae5_add_headphone_gain_enum(codec);
+ if (err < 0)
+ return err;
+ err = ae5_add_sound_filter_enum(codec);
+ if (err < 0)
+ return err;
+ }
+
+ if (spec->quirk == QUIRK_ZXR) {
+ err = zxr_add_headphone_gain_switch(codec);
+ if (err < 0)
+ return err;
}
#ifdef ENABLE_TUNING_CONTROLS
add_tuning_ctls(codec);
@@ -5955,6 +6510,27 @@ static int ca0132_build_controls(struct hda_codec *codec)
return 0;
}
+static int dbpro_build_controls(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+ int err = 0;
+
+ if (spec->dig_out) {
+ err = snd_hda_create_spdif_out_ctls(codec, spec->dig_out,
+ spec->dig_out);
+ if (err < 0)
+ return err;
+ }
+
+ if (spec->dig_in) {
+ err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
/*
* PCM
*/
@@ -6058,6 +6634,40 @@ static int ca0132_build_pcms(struct hda_codec *codec)
return 0;
}
+static int dbpro_build_pcms(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+ struct hda_pcm *info;
+
+ info = snd_hda_codec_pcm_new(codec, "CA0132 Alt Analog");
+ if (!info)
+ return -ENOMEM;
+ info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0132_pcm_analog_capture;
+ info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = 1;
+ info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0];
+
+
+ if (!spec->dig_out && !spec->dig_in)
+ return 0;
+
+ info = snd_hda_codec_pcm_new(codec, "CA0132 Digital");
+ if (!info)
+ return -ENOMEM;
+ info->pcm_type = HDA_PCM_TYPE_SPDIF;
+ if (spec->dig_out) {
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK] =
+ ca0132_pcm_digital_playback;
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dig_out;
+ }
+ if (spec->dig_in) {
+ info->stream[SNDRV_PCM_STREAM_CAPTURE] =
+ ca0132_pcm_digital_capture;
+ info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in;
+ }
+
+ return 0;
+}
+
static void init_output(struct hda_codec *codec, hda_nid_t pin, hda_nid_t dac)
{
if (pin) {
@@ -6238,69 +6848,48 @@ static void ca0132_refresh_widget_caps(struct hda_codec *codec)
}
/*
- * Recon3D r3d_setup_defaults sub functions.
+ * Creates a dummy stream to bind the output to. This seems to have to be done
+ * after changing the main outputs source and destination streams.
*/
-
-static void r3d_dsp_scp_startup(struct hda_codec *codec)
+static void ca0132_alt_create_dummy_stream(struct hda_codec *codec)
{
- unsigned int tmp;
-
- tmp = 0x00000000;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0A, tmp);
-
- tmp = 0x00000001;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0B, tmp);
-
- tmp = 0x00000004;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
-
- tmp = 0x00000005;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
-
- tmp = 0x00000000;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
-
-}
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int stream_format;
-static void r3d_dsp_initial_mic_setup(struct hda_codec *codec)
-{
- unsigned int tmp;
+ stream_format = snd_hdac_calc_stream_format(48000, 2,
+ SNDRV_PCM_FORMAT_S32_LE, 32, 0);
- /* Mic 1 Setup */
- chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
- chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);
- /* This ConnPointID is unique to Recon3Di. Haven't seen it elsewhere */
- chipio_set_conn_rate(codec, 0x0F, SR_96_000);
- tmp = FLOAT_ONE;
- dspio_set_uint_param(codec, 0x80, 0x00, tmp);
+ snd_hda_codec_setup_stream(codec, spec->dacs[0], spec->dsp_stream_id,
+ 0, stream_format);
- /* Mic 2 Setup, even though it isn't connected on SBZ */
- chipio_set_conn_rate(codec, MEM_CONNID_MICIN2, SR_96_000);
- chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2, SR_96_000);
- chipio_set_conn_rate(codec, 0x0F, SR_96_000);
- tmp = FLOAT_ZERO;
- dspio_set_uint_param(codec, 0x80, 0x01, tmp);
+ snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
}
/*
- * Initialize Sound Blaster Z analog microphones.
+ * Initialize mic for non-chromebook ca0132 implementations.
*/
-static void sbz_init_analog_mics(struct hda_codec *codec)
+static void ca0132_alt_init_analog_mics(struct hda_codec *codec)
{
+ struct ca0132_spec *spec = codec->spec;
unsigned int tmp;
/* Mic 1 Setup */
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);
- tmp = FLOAT_THREE;
+ if (spec->quirk == QUIRK_R3DI) {
+ chipio_set_conn_rate(codec, 0x0F, SR_96_000);
+ tmp = FLOAT_ONE;
+ } else
+ tmp = FLOAT_THREE;
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
- /* Mic 2 Setup, even though it isn't connected on SBZ */
+ /* Mic 2 setup (not present on desktop cards) */
chipio_set_conn_rate(codec, MEM_CONNID_MICIN2, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2, SR_96_000);
+ if (spec->quirk == QUIRK_R3DI)
+ chipio_set_conn_rate(codec, 0x0F, SR_96_000);
tmp = FLOAT_ZERO;
dspio_set_uint_param(codec, 0x80, 0x01, tmp);
-
}
/*
@@ -6333,7 +6922,6 @@ static void sbz_connect_streams(struct hda_codec *codec)
codec_dbg(codec, "Connect Streams exited, mutex released.\n");
mutex_unlock(&spec->chipio_mutex);
-
}
/*
@@ -6360,19 +6948,29 @@ static void sbz_chipio_startup_data(struct hda_codec *codec)
chipio_set_stream_channels(codec, 0x0C, 6);
chipio_set_stream_control(codec, 0x0C, 1);
/* No clue what these control */
- chipio_write_no_mutex(codec, 0x190030, 0x0001e0c0);
- chipio_write_no_mutex(codec, 0x190034, 0x0001e1c1);
- chipio_write_no_mutex(codec, 0x190038, 0x0001e4c2);
- chipio_write_no_mutex(codec, 0x19003c, 0x0001e5c3);
- chipio_write_no_mutex(codec, 0x190040, 0x0001e2c4);
- chipio_write_no_mutex(codec, 0x190044, 0x0001e3c5);
- chipio_write_no_mutex(codec, 0x190048, 0x0001e8c6);
- chipio_write_no_mutex(codec, 0x19004c, 0x0001e9c7);
- chipio_write_no_mutex(codec, 0x190050, 0x0001ecc8);
- chipio_write_no_mutex(codec, 0x190054, 0x0001edc9);
- chipio_write_no_mutex(codec, 0x190058, 0x0001eaca);
- chipio_write_no_mutex(codec, 0x19005c, 0x0001ebcb);
-
+ if (spec->quirk == QUIRK_SBZ) {
+ chipio_write_no_mutex(codec, 0x190030, 0x0001e0c0);
+ chipio_write_no_mutex(codec, 0x190034, 0x0001e1c1);
+ chipio_write_no_mutex(codec, 0x190038, 0x0001e4c2);
+ chipio_write_no_mutex(codec, 0x19003c, 0x0001e5c3);
+ chipio_write_no_mutex(codec, 0x190040, 0x0001e2c4);
+ chipio_write_no_mutex(codec, 0x190044, 0x0001e3c5);
+ chipio_write_no_mutex(codec, 0x190048, 0x0001e8c6);
+ chipio_write_no_mutex(codec, 0x19004c, 0x0001e9c7);
+ chipio_write_no_mutex(codec, 0x190050, 0x0001ecc8);
+ chipio_write_no_mutex(codec, 0x190054, 0x0001edc9);
+ chipio_write_no_mutex(codec, 0x190058, 0x0001eaca);
+ chipio_write_no_mutex(codec, 0x19005c, 0x0001ebcb);
+ } else if (spec->quirk == QUIRK_ZXR) {
+ chipio_write_no_mutex(codec, 0x190038, 0x000140c2);
+ chipio_write_no_mutex(codec, 0x19003c, 0x000141c3);
+ chipio_write_no_mutex(codec, 0x190040, 0x000150c4);
+ chipio_write_no_mutex(codec, 0x190044, 0x000151c5);
+ chipio_write_no_mutex(codec, 0x190050, 0x000142c8);
+ chipio_write_no_mutex(codec, 0x190054, 0x000143c9);
+ chipio_write_no_mutex(codec, 0x190058, 0x000152ca);
+ chipio_write_no_mutex(codec, 0x19005c, 0x000153cb);
+ }
chipio_write_no_mutex(codec, 0x19042c, 0x00000001);
codec_dbg(codec, "Startup Data exited, mutex released.\n");
@@ -6380,35 +6978,56 @@ static void sbz_chipio_startup_data(struct hda_codec *codec)
}
/*
- * Sound Blaster Z uses these after DSP is loaded. Weird SCP commands
- * without a 0x20 source like normal.
+ * Custom DSP SCP commands where the src value is 0x00 instead of 0x20. This is
+ * done after the DSP is loaded.
*/
-static void sbz_dsp_scp_startup(struct hda_codec *codec)
+static void ca0132_alt_dsp_scp_startup(struct hda_codec *codec)
{
- unsigned int tmp;
-
- tmp = 0x00000003;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
-
- tmp = 0x00000000;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0A, tmp);
-
- tmp = 0x00000001;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0B, tmp);
-
- tmp = 0x00000004;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
-
- tmp = 0x00000005;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
-
- tmp = 0x00000000;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int tmp, i;
+ /*
+ * Gotta run these twice, or else mic works inconsistently. Not clear
+ * why this is, but multiple tests have confirmed it.
+ */
+ for (i = 0; i < 2; i++) {
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
+ case QUIRK_AE5:
+ tmp = 0x00000003;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ tmp = 0x00000000;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0A, tmp);
+ tmp = 0x00000001;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0B, tmp);
+ tmp = 0x00000004;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ tmp = 0x00000005;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ tmp = 0x00000000;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ break;
+ case QUIRK_R3D:
+ case QUIRK_R3DI:
+ tmp = 0x00000000;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0A, tmp);
+ tmp = 0x00000001;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0B, tmp);
+ tmp = 0x00000004;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ tmp = 0x00000005;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ tmp = 0x00000000;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ break;
+ }
+ msleep(100);
+ }
}
-static void sbz_dsp_initial_mic_setup(struct hda_codec *codec)
+static void ca0132_alt_dsp_initial_mic_setup(struct hda_codec *codec)
{
+ struct ca0132_spec *spec = codec->spec;
unsigned int tmp;
chipio_set_stream_control(codec, 0x03, 0);
@@ -6423,8 +7042,161 @@ static void sbz_dsp_initial_mic_setup(struct hda_codec *codec)
chipio_set_stream_control(codec, 0x03, 1);
chipio_set_stream_control(codec, 0x04, 1);
- chipio_write(codec, 0x18b098, 0x0000000c);
- chipio_write(codec, 0x18b09C, 0x0000000c);
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
+ chipio_write(codec, 0x18b098, 0x0000000c);
+ chipio_write(codec, 0x18b09C, 0x0000000c);
+ break;
+ case QUIRK_AE5:
+ chipio_write(codec, 0x18b098, 0x0000000c);
+ chipio_write(codec, 0x18b09c, 0x0000004c);
+ break;
+ }
+}
+
+static void ae5_post_dsp_register_set(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+
+ chipio_8051_write_direct(codec, 0x93, 0x10);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x44);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xc2);
+
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0x00, spec->mem_base + 0x100);
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0x00, spec->mem_base + 0x100);
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0x00, spec->mem_base + 0x100);
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0x00, spec->mem_base + 0x100);
+ writeb(0xff, spec->mem_base + 0x304);
+
+ ca0113_mmio_command_set(codec, 0x30, 0x2b, 0x3f);
+ ca0113_mmio_command_set(codec, 0x30, 0x2d, 0x3f);
+ ca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);
+}
+
+static void ae5_post_dsp_param_setup(struct hda_codec *codec)
+{
+ /*
+ * Param3 in the 8051's memory is represented by the ascii string 'mch'
+ * which seems to be 'multichannel'. This is also mentioned in the
+ * AE-5's registry values in Windows.
+ */
+ chipio_set_control_param(codec, 3, 0);
+ /*
+ * I believe ASI is 'audio serial interface' and that it's used to
+ * change colors on the external LED strip connected to the AE-5.
+ */
+ chipio_set_control_flag(codec, CONTROL_FLAG_ASI_96KHZ, 1);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0, 0x724, 0x83);
+ chipio_set_control_param(codec, CONTROL_PARAM_ASI, 0);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x92);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_HIGH, 0xfa);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_DATA_WRITE, 0x22);
+}
+
+static void ae5_post_dsp_pll_setup(struct hda_codec *codec)
+{
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x41);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xc8);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x45);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xcc);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x40);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xcb);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x43);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xc7);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x51);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0x8d);
+}
+
+static void ae5_post_dsp_stream_setup(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+
+ mutex_lock(&spec->chipio_mutex);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0, 0x725, 0x81);
+
+ chipio_set_conn_rate_no_mutex(codec, 0x70, SR_96_000);
+
+ chipio_set_stream_channels(codec, 0x0C, 6);
+ chipio_set_stream_control(codec, 0x0C, 1);
+
+ chipio_set_stream_source_dest(codec, 0x5, 0x43, 0x0);
+
+ chipio_set_stream_source_dest(codec, 0x18, 0x9, 0xd0);
+ chipio_set_conn_rate_no_mutex(codec, 0xd0, SR_96_000);
+ chipio_set_stream_channels(codec, 0x18, 6);
+ chipio_set_stream_control(codec, 0x18, 1);
+
+ chipio_set_control_param_no_mutex(codec, CONTROL_PARAM_ASI, 4);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x43);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xc7);
+
+ ca0113_mmio_command_set(codec, 0x48, 0x01, 0x80);
+
+ mutex_unlock(&spec->chipio_mutex);
+}
+
+static void ae5_post_dsp_startup_data(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+
+ mutex_lock(&spec->chipio_mutex);
+
+ chipio_write_no_mutex(codec, 0x189000, 0x0001f101);
+ chipio_write_no_mutex(codec, 0x189004, 0x0001f101);
+ chipio_write_no_mutex(codec, 0x189024, 0x00014004);
+ chipio_write_no_mutex(codec, 0x189028, 0x0002000f);
+
+ ca0113_mmio_command_set(codec, 0x48, 0x0a, 0x05);
+ chipio_set_control_param_no_mutex(codec, CONTROL_PARAM_ASI, 7);
+ ca0113_mmio_command_set(codec, 0x48, 0x0b, 0x12);
+ ca0113_mmio_command_set(codec, 0x48, 0x04, 0x00);
+ ca0113_mmio_command_set(codec, 0x48, 0x06, 0x48);
+ ca0113_mmio_command_set(codec, 0x48, 0x0a, 0x05);
+ ca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);
+ ca0113_mmio_command_set(codec, 0x48, 0x0f, 0x00);
+ ca0113_mmio_command_set(codec, 0x48, 0x10, 0x00);
+ ca0113_mmio_gpio_set(codec, 0, true);
+ ca0113_mmio_gpio_set(codec, 1, true);
+ ca0113_mmio_command_set(codec, 0x48, 0x07, 0x80);
+
+ chipio_write_no_mutex(codec, 0x18b03c, 0x00000012);
+
+ ca0113_mmio_command_set(codec, 0x48, 0x0f, 0x00);
+ ca0113_mmio_command_set(codec, 0x48, 0x10, 0x00);
+
+ mutex_unlock(&spec->chipio_mutex);
}
/*
@@ -6485,9 +7257,8 @@ static void r3d_setup_defaults(struct hda_codec *codec)
if (spec->dsp_state != DSP_DOWNLOADED)
return;
- r3d_dsp_scp_startup(codec);
-
- r3d_dsp_initial_mic_setup(codec);
+ ca0132_alt_dsp_scp_startup(codec);
+ ca0132_alt_init_analog_mics(codec);
/*remove DSP headroom*/
tmp = FLOAT_ZERO;
@@ -6523,19 +7294,16 @@ static void r3d_setup_defaults(struct hda_codec *codec)
static void sbz_setup_defaults(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
- unsigned int tmp, stream_format;
+ unsigned int tmp;
int num_fx;
int idx, i;
if (spec->dsp_state != DSP_DOWNLOADED)
return;
- sbz_dsp_scp_startup(codec);
-
- sbz_init_analog_mics(codec);
-
+ ca0132_alt_dsp_scp_startup(codec);
+ ca0132_alt_init_analog_mics(codec);
sbz_connect_streams(codec);
-
sbz_chipio_startup_data(codec);
chipio_set_stream_control(codec, 0x03, 1);
@@ -6561,8 +7329,7 @@ static void sbz_setup_defaults(struct hda_codec *codec)
/* Set speaker source? */
dspio_set_uint_param(codec, 0x32, 0x00, tmp);
- sbz_dsp_initial_mic_setup(codec);
-
+ ca0132_alt_dsp_initial_mic_setup(codec);
/* out, in effects + voicefx */
num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT + 1;
@@ -6575,23 +7342,74 @@ static void sbz_setup_defaults(struct hda_codec *codec)
}
}
- /*
- * Have to make a stream to bind the sound output to, otherwise
- * you'll get dead audio. Before I did this, it would bind to an
- * audio input, and would never work
- */
- stream_format = snd_hdac_calc_stream_format(48000, 2,
- SNDRV_PCM_FORMAT_S32_LE, 32, 0);
+ ca0132_alt_create_dummy_stream(codec);
+}
- snd_hda_codec_setup_stream(codec, spec->dacs[0], spec->dsp_stream_id,
- 0, stream_format);
+/*
+ * Setup default parameters for the Sound BlasterX AE-5 DSP.
+ */
+static void ae5_setup_defaults(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int tmp;
+ int num_fx;
+ int idx, i;
- snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
+ if (spec->dsp_state != DSP_DOWNLOADED)
+ return;
- snd_hda_codec_setup_stream(codec, spec->dacs[0], spec->dsp_stream_id,
- 0, stream_format);
+ ca0132_alt_dsp_scp_startup(codec);
+ ca0132_alt_init_analog_mics(codec);
+ chipio_set_stream_control(codec, 0x03, 1);
+ chipio_set_stream_control(codec, 0x04, 1);
- snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
+ /* New, unknown SCP req's */
+ tmp = FLOAT_ZERO;
+ dspio_set_uint_param(codec, 0x96, 0x29, tmp);
+ dspio_set_uint_param(codec, 0x96, 0x2a, tmp);
+ dspio_set_uint_param(codec, 0x80, 0x0d, tmp);
+ dspio_set_uint_param(codec, 0x80, 0x0e, tmp);
+
+ ca0113_mmio_command_set(codec, 0x30, 0x2e, 0x3f);
+ ca0113_mmio_gpio_set(codec, 0, false);
+ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
+
+ /* Internal loopback off */
+ tmp = FLOAT_ONE;
+ dspio_set_uint_param(codec, 0x37, 0x08, tmp);
+ dspio_set_uint_param(codec, 0x37, 0x10, tmp);
+
+ /*remove DSP headroom*/
+ tmp = FLOAT_ZERO;
+ dspio_set_uint_param(codec, 0x96, 0x3C, tmp);
+
+ /* set WUH source */
+ tmp = FLOAT_TWO;
+ dspio_set_uint_param(codec, 0x31, 0x00, tmp);
+ chipio_set_conn_rate(codec, MEM_CONNID_WUH, SR_48_000);
+
+ /* Set speaker source? */
+ dspio_set_uint_param(codec, 0x32, 0x00, tmp);
+
+ ca0132_alt_dsp_initial_mic_setup(codec);
+ ae5_post_dsp_register_set(codec);
+ ae5_post_dsp_param_setup(codec);
+ ae5_post_dsp_pll_setup(codec);
+ ae5_post_dsp_stream_setup(codec);
+ ae5_post_dsp_startup_data(codec);
+
+ /* out, in effects + voicefx */
+ num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT + 1;
+ for (idx = 0; idx < num_fx; idx++) {
+ for (i = 0; i <= ca0132_effects[idx].params; i++) {
+ dspio_set_uint_param(codec,
+ ca0132_effects[idx].mid,
+ ca0132_effects[idx].reqs[i],
+ ca0132_effects[idx].def_vals[i]);
+ }
+ }
+
+ ca0132_alt_create_dummy_stream(codec);
}
/*
@@ -6673,12 +7491,14 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
*/
switch (spec->quirk) {
case QUIRK_SBZ:
- if (request_firmware(&fw_entry, SBZ_EFX_FILE,
+ case QUIRK_R3D:
+ case QUIRK_AE5:
+ if (request_firmware(&fw_entry, DESKTOP_EFX_FILE,
codec->card->dev) != 0) {
- codec_dbg(codec, "SBZ alt firmware not detected. ");
+ codec_dbg(codec, "Desktop firmware not found.");
spec->alt_firmware_present = false;
} else {
- codec_dbg(codec, "Sound Blaster Z firmware selected.");
+ codec_dbg(codec, "Desktop firmware selected.");
spec->alt_firmware_present = true;
}
break;
@@ -6921,6 +7741,14 @@ static void ca0132_init_chip(struct hda_codec *codec)
spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID] = 1;
spec->effects_switch[CRYSTAL_VOICE - EFFECT_START_NID] = 0;
+ /*
+ * The ZxR doesn't have a front panel header, and it's line-in is on
+ * the daughter board. So, there is no input enum control, and we need
+ * to make sure that spec->in_enum_val is set properly.
+ */
+ if (spec->quirk == QUIRK_ZXR)
+ spec->in_enum_val = REAR_MIC;
+
#ifdef ENABLE_TUNING_CONTROLS
ca0132_init_tuning_defaults(codec);
#endif
@@ -6948,11 +7776,11 @@ static void sbz_region2_exit(struct hda_codec *codec)
for (i = 0; i < 8; i++)
writeb(0xb3, spec->mem_base + 0x304);
- ca0132_mmio_gpio_set(codec, 0, false);
- ca0132_mmio_gpio_set(codec, 1, false);
- ca0132_mmio_gpio_set(codec, 4, true);
- ca0132_mmio_gpio_set(codec, 5, false);
- ca0132_mmio_gpio_set(codec, 7, false);
+ ca0113_mmio_gpio_set(codec, 0, false);
+ ca0113_mmio_gpio_set(codec, 1, false);
+ ca0113_mmio_gpio_set(codec, 4, true);
+ ca0113_mmio_gpio_set(codec, 5, false);
+ ca0113_mmio_gpio_set(codec, 7, false);
}
static void sbz_set_pin_ctl_default(struct hda_codec *codec)
@@ -6995,6 +7823,16 @@ static void sbz_gpio_shutdown_commands(struct hda_codec *codec, int dir,
AC_VERB_SET_GPIO_DATA, data);
}
+static void zxr_dbpro_power_state_shutdown(struct hda_codec *codec)
+{
+ hda_nid_t pins[7] = {0x05, 0x0c, 0x09, 0x0e, 0x08, 0x11, 0x01};
+ unsigned int i;
+
+ for (i = 0; i < 7; i++)
+ snd_hda_codec_write(codec, pins[i], 0,
+ AC_VERB_SET_POWER_STATE, 0x03);
+}
+
static void sbz_exit_chip(struct hda_codec *codec)
{
chipio_set_stream_control(codec, 0x03, 0);
@@ -7037,6 +7875,61 @@ static void r3d_exit_chip(struct hda_codec *codec)
snd_hda_codec_write(codec, 0x01, 0, 0x794, 0x5b);
}
+static void ae5_exit_chip(struct hda_codec *codec)
+{
+ chipio_set_stream_control(codec, 0x03, 0);
+ chipio_set_stream_control(codec, 0x04, 0);
+
+ ca0113_mmio_command_set(codec, 0x30, 0x32, 0x3f);
+ ca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);
+ ca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);
+ ca0113_mmio_command_set(codec, 0x30, 0x30, 0x00);
+ ca0113_mmio_command_set(codec, 0x30, 0x2b, 0x00);
+ ca0113_mmio_command_set(codec, 0x30, 0x2d, 0x00);
+ ca0113_mmio_gpio_set(codec, 0, false);
+ ca0113_mmio_gpio_set(codec, 1, false);
+
+ snd_hda_codec_write(codec, 0x01, 0, 0x793, 0x00);
+ snd_hda_codec_write(codec, 0x01, 0, 0x794, 0x53);
+
+ chipio_set_control_param(codec, CONTROL_PARAM_ASI, 0);
+
+ chipio_set_stream_control(codec, 0x18, 0);
+ chipio_set_stream_control(codec, 0x0c, 0);
+
+ snd_hda_codec_write(codec, 0x01, 0, 0x724, 0x83);
+}
+
+static void zxr_exit_chip(struct hda_codec *codec)
+{
+ chipio_set_stream_control(codec, 0x03, 0);
+ chipio_set_stream_control(codec, 0x04, 0);
+ chipio_set_stream_control(codec, 0x14, 0);
+ chipio_set_stream_control(codec, 0x0C, 0);
+
+ chipio_set_conn_rate(codec, 0x41, SR_192_000);
+ chipio_set_conn_rate(codec, 0x91, SR_192_000);
+
+ chipio_write(codec, 0x18a020, 0x00000083);
+
+ snd_hda_codec_write(codec, 0x01, 0, 0x793, 0x00);
+ snd_hda_codec_write(codec, 0x01, 0, 0x794, 0x53);
+
+ ca0132_clear_unsolicited(codec);
+ sbz_set_pin_ctl_default(codec);
+ snd_hda_codec_write(codec, 0x0B, 0, AC_VERB_SET_EAPD_BTLENABLE, 0x00);
+
+ ca0113_mmio_gpio_set(codec, 5, false);
+ ca0113_mmio_gpio_set(codec, 2, false);
+ ca0113_mmio_gpio_set(codec, 3, false);
+ ca0113_mmio_gpio_set(codec, 0, false);
+ ca0113_mmio_gpio_set(codec, 4, true);
+ ca0113_mmio_gpio_set(codec, 0, true);
+ ca0113_mmio_gpio_set(codec, 5, true);
+ ca0113_mmio_gpio_set(codec, 2, false);
+ ca0113_mmio_gpio_set(codec, 3, false);
+}
+
static void ca0132_exit_chip(struct hda_codec *codec)
{
/* put any chip cleanup stuffs here. */
@@ -7140,11 +8033,6 @@ static void sbz_pre_dsp_setup(struct hda_codec *codec)
writel(0x00820680, spec->mem_base + 0x01C);
writel(0x00820680, spec->mem_base + 0x01C);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfc);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfd);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfe);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xff);
-
chipio_write(codec, 0x18b0a4, 0x000000c2);
snd_hda_codec_write(codec, 0x11, 0,
@@ -7153,12 +8041,6 @@ static void sbz_pre_dsp_setup(struct hda_codec *codec)
static void r3d_pre_dsp_setup(struct hda_codec *codec)
{
-
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfc);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfd);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfe);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xff);
-
chipio_write(codec, 0x18b0a4, 0x000000c2);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
@@ -7205,23 +8087,116 @@ static void ca0132_mmio_init(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
- writel(0x00000000, spec->mem_base + 0x400);
- writel(0x00000000, spec->mem_base + 0x408);
- writel(0x00000000, spec->mem_base + 0x40C);
- writel(0x00880680, spec->mem_base + 0x01C);
- writel(0x00000083, spec->mem_base + 0xC0C);
+ if (spec->quirk == QUIRK_AE5)
+ writel(0x00000001, spec->mem_base + 0x400);
+ else
+ writel(0x00000000, spec->mem_base + 0x400);
+
+ if (spec->quirk == QUIRK_AE5)
+ writel(0x00000001, spec->mem_base + 0x408);
+ else
+ writel(0x00000000, spec->mem_base + 0x408);
+
+ if (spec->quirk == QUIRK_AE5)
+ writel(0x00000001, spec->mem_base + 0x40c);
+ else
+ writel(0x00000000, spec->mem_base + 0x40C);
+
+ if (spec->quirk == QUIRK_ZXR)
+ writel(0x00880640, spec->mem_base + 0x01C);
+ else
+ writel(0x00880680, spec->mem_base + 0x01C);
+
+ if (spec->quirk == QUIRK_AE5)
+ writel(0x00000080, spec->mem_base + 0xC0C);
+ else
+ writel(0x00000083, spec->mem_base + 0xC0C);
+
writel(0x00000030, spec->mem_base + 0xC00);
writel(0x00000000, spec->mem_base + 0xC04);
+
+ if (spec->quirk == QUIRK_AE5)
+ writel(0x00000000, spec->mem_base + 0xC0C);
+ else
+ writel(0x00000003, spec->mem_base + 0xC0C);
+
writel(0x00000003, spec->mem_base + 0xC0C);
writel(0x00000003, spec->mem_base + 0xC0C);
writel(0x00000003, spec->mem_base + 0xC0C);
- writel(0x00000003, spec->mem_base + 0xC0C);
- writel(0x000000C1, spec->mem_base + 0xC08);
+
+ if (spec->quirk == QUIRK_AE5)
+ writel(0x00000001, spec->mem_base + 0xC08);
+ else
+ writel(0x000000C1, spec->mem_base + 0xC08);
+
writel(0x000000F1, spec->mem_base + 0xC08);
writel(0x00000001, spec->mem_base + 0xC08);
writel(0x000000C7, spec->mem_base + 0xC08);
writel(0x000000C1, spec->mem_base + 0xC08);
writel(0x00000080, spec->mem_base + 0xC04);
+
+ if (spec->quirk == QUIRK_AE5) {
+ writel(0x00000000, spec->mem_base + 0x42c);
+ writel(0x00000000, spec->mem_base + 0x46c);
+ writel(0x00000000, spec->mem_base + 0x4ac);
+ writel(0x00000000, spec->mem_base + 0x4ec);
+ writel(0x00000000, spec->mem_base + 0x43c);
+ writel(0x00000000, spec->mem_base + 0x47c);
+ writel(0x00000000, spec->mem_base + 0x4bc);
+ writel(0x00000000, spec->mem_base + 0x4fc);
+ writel(0x00000600, spec->mem_base + 0x100);
+ writel(0x00000014, spec->mem_base + 0x410);
+ writel(0x0000060f, spec->mem_base + 0x100);
+ writel(0x0000070f, spec->mem_base + 0x100);
+ writel(0x00000aff, spec->mem_base + 0x830);
+ writel(0x00000000, spec->mem_base + 0x86c);
+ writel(0x0000006b, spec->mem_base + 0x800);
+ writel(0x00000001, spec->mem_base + 0x86c);
+ writel(0x0000006b, spec->mem_base + 0x800);
+ writel(0x00000057, spec->mem_base + 0x804);
+ writel(0x00800000, spec->mem_base + 0x20c);
+ }
+}
+
+/*
+ * This function writes to some SFR's, does some region2 writes, and then
+ * eventually resets the codec with the 0x7ff verb. Not quite sure why it does
+ * what it does.
+ */
+static void ae5_register_set(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+
+ chipio_8051_write_direct(codec, 0x93, 0x10);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x44);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xc2);
+
+ writeb(0x0f, spec->mem_base + 0x304);
+ writeb(0x0f, spec->mem_base + 0x304);
+ writeb(0x0f, spec->mem_base + 0x304);
+ writeb(0x0f, spec->mem_base + 0x304);
+ writeb(0x0e, spec->mem_base + 0x100);
+ writeb(0x1f, spec->mem_base + 0x304);
+ writeb(0x0c, spec->mem_base + 0x100);
+ writeb(0x3f, spec->mem_base + 0x304);
+ writeb(0x08, spec->mem_base + 0x100);
+ writeb(0x7f, spec->mem_base + 0x304);
+ writeb(0x00, spec->mem_base + 0x100);
+ writeb(0xff, spec->mem_base + 0x304);
+
+ ca0113_mmio_command_set(codec, 0x30, 0x2d, 0x3f);
+
+ chipio_8051_write_direct(codec, 0x90, 0x00);
+ chipio_8051_write_direct(codec, 0x90, 0x10);
+
+ ca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);
+
+ chipio_write(codec, 0x18b0a4, 0x000000c2);
+
+ snd_hda_codec_write(codec, 0x01, 0, 0x7ff, 0x00);
+ snd_hda_codec_write(codec, 0x01, 0, 0x7ff, 0x00);
}
/*
@@ -7257,6 +8232,21 @@ static void ca0132_alt_init(struct hda_codec *codec)
snd_hda_sequence_write(codec, spec->chip_init_verbs);
snd_hda_sequence_write(codec, spec->desktop_init_verbs);
break;
+ case QUIRK_AE5:
+ ca0132_gpio_init(codec);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x49);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0x88);
+ chipio_write(codec, 0x18b030, 0x00000020);
+ snd_hda_sequence_write(codec, spec->chip_init_verbs);
+ snd_hda_sequence_write(codec, spec->desktop_init_verbs);
+ ca0113_mmio_command_set(codec, 0x30, 0x32, 0x3f);
+ break;
+ case QUIRK_ZXR:
+ snd_hda_sequence_write(codec, spec->chip_init_verbs);
+ snd_hda_sequence_write(codec, spec->desktop_init_verbs);
+ break;
}
}
@@ -7298,6 +8288,9 @@ static int ca0132_init(struct hda_codec *codec)
snd_hda_power_up_pm(codec);
+ if (spec->quirk == QUIRK_AE5)
+ ae5_register_set(codec);
+
ca0132_init_unsol(codec);
ca0132_init_params(codec);
ca0132_init_flags(codec);
@@ -7317,8 +8310,12 @@ static int ca0132_init(struct hda_codec *codec)
r3d_setup_defaults(codec);
break;
case QUIRK_SBZ:
+ case QUIRK_ZXR:
sbz_setup_defaults(codec);
break;
+ case QUIRK_AE5:
+ ae5_setup_defaults(codec);
+ break;
default:
ca0132_setup_defaults(codec);
ca0132_init_analog_mic2(codec);
@@ -7372,6 +8369,21 @@ static int ca0132_init(struct hda_codec *codec)
return 0;
}
+static int dbpro_init(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ unsigned int i;
+
+ init_output(codec, cfg->dig_out_pins[0], spec->dig_out);
+ init_input(codec, cfg->dig_in_pin, spec->dig_in);
+
+ for (i = 0; i < spec->num_inputs; i++)
+ init_input(codec, spec->input_pins[i], spec->adcs[i]);
+
+ return 0;
+}
+
static void ca0132_free(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
@@ -7382,9 +8394,15 @@ static void ca0132_free(struct hda_codec *codec)
case QUIRK_SBZ:
sbz_exit_chip(codec);
break;
+ case QUIRK_ZXR:
+ zxr_exit_chip(codec);
+ break;
case QUIRK_R3D:
r3d_exit_chip(codec);
break;
+ case QUIRK_AE5:
+ ae5_exit_chip(codec);
+ break;
case QUIRK_R3DI:
r3di_gpio_shutdown(codec);
break;
@@ -7400,6 +8418,16 @@ static void ca0132_free(struct hda_codec *codec)
kfree(codec->spec);
}
+static void dbpro_free(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+
+ zxr_dbpro_power_state_shutdown(codec);
+
+ kfree(spec->spec_init_verbs);
+ kfree(codec->spec);
+}
+
static void ca0132_reboot_notify(struct hda_codec *codec)
{
codec->patch_ops.free(codec);
@@ -7414,6 +8442,13 @@ static const struct hda_codec_ops ca0132_patch_ops = {
.reboot_notify = ca0132_reboot_notify,
};
+static const struct hda_codec_ops dbpro_patch_ops = {
+ .build_controls = dbpro_build_controls,
+ .build_pcms = dbpro_build_pcms,
+ .init = dbpro_init,
+ .free = dbpro_free,
+};
+
static void ca0132_config(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
@@ -7432,9 +8467,33 @@ static void ca0132_config(struct hda_codec *codec)
switch (spec->quirk) {
case QUIRK_ALIENWARE:
- codec_dbg(codec, "ca0132_config: QUIRK_ALIENWARE applied.\n");
+ codec_dbg(codec, "%s: QUIRK_ALIENWARE applied.\n", __func__);
snd_hda_apply_pincfgs(codec, alienware_pincfgs);
+ break;
+ case QUIRK_SBZ:
+ codec_dbg(codec, "%s: QUIRK_SBZ applied.\n", __func__);
+ snd_hda_apply_pincfgs(codec, sbz_pincfgs);
+ break;
+ case QUIRK_ZXR:
+ codec_dbg(codec, "%s: QUIRK_ZXR applied.\n", __func__);
+ snd_hda_apply_pincfgs(codec, zxr_pincfgs);
+ break;
+ case QUIRK_R3D:
+ codec_dbg(codec, "%s: QUIRK_R3D applied.\n", __func__);
+ snd_hda_apply_pincfgs(codec, r3d_pincfgs);
+ break;
+ case QUIRK_R3DI:
+ codec_dbg(codec, "%s: QUIRK_R3DI applied.\n", __func__);
+ snd_hda_apply_pincfgs(codec, r3di_pincfgs);
+ break;
+ case QUIRK_AE5:
+ codec_dbg(codec, "%s: QUIRK_AE5 applied.\n", __func__);
+ snd_hda_apply_pincfgs(codec, r3di_pincfgs);
+ break;
+ }
+ switch (spec->quirk) {
+ case QUIRK_ALIENWARE:
spec->num_outputs = 2;
spec->out_pins[0] = 0x0b; /* speaker out */
spec->out_pins[1] = 0x0f;
@@ -7454,15 +8513,6 @@ static void ca0132_config(struct hda_codec *codec)
break;
case QUIRK_SBZ:
case QUIRK_R3D:
- if (spec->quirk == QUIRK_SBZ) {
- codec_dbg(codec, "%s: QUIRK_SBZ applied.\n", __func__);
- snd_hda_apply_pincfgs(codec, sbz_pincfgs);
- }
- if (spec->quirk == QUIRK_R3D) {
- codec_dbg(codec, "%s: QUIRK_R3D applied.\n", __func__);
- snd_hda_apply_pincfgs(codec, r3d_pincfgs);
- }
-
spec->num_outputs = 2;
spec->out_pins[0] = 0x0B; /* Line out */
spec->out_pins[1] = 0x0F; /* Rear headphone out */
@@ -7487,10 +8537,62 @@ static void ca0132_config(struct hda_codec *codec)
spec->multiout.dig_out_nid = spec->dig_out;
spec->dig_in = 0x09;
break;
- case QUIRK_R3DI:
- codec_dbg(codec, "%s: QUIRK_R3DI applied.\n", __func__);
- snd_hda_apply_pincfgs(codec, r3di_pincfgs);
+ case QUIRK_ZXR:
+ spec->num_outputs = 2;
+ spec->out_pins[0] = 0x0B; /* Line out */
+ spec->out_pins[1] = 0x0F; /* Rear headphone out */
+ spec->out_pins[2] = 0x10; /* Center/LFE */
+ spec->out_pins[3] = 0x11; /* Rear surround */
+ spec->shared_out_nid = 0x2;
+ spec->unsol_tag_hp = spec->out_pins[1];
+ spec->unsol_tag_front_hp = spec->out_pins[2];
+ spec->adcs[0] = 0x7; /* Rear Mic / Line-in */
+ spec->adcs[1] = 0x8; /* Not connected, no front mic */
+ spec->adcs[2] = 0xa; /* what u hear */
+
+ spec->num_inputs = 2;
+ spec->input_pins[0] = 0x12; /* Rear Mic / Line-in */
+ spec->input_pins[1] = 0x13; /* What U Hear */
+ spec->shared_mic_nid = 0x7;
+ spec->unsol_tag_amic1 = spec->input_pins[0];
+ break;
+ case QUIRK_ZXR_DBPRO:
+ spec->adcs[0] = 0x8; /* ZxR DBPro Aux In */
+
+ spec->num_inputs = 1;
+ spec->input_pins[0] = 0x11; /* RCA Line-in */
+
+ spec->dig_out = 0x05;
+ spec->multiout.dig_out_nid = spec->dig_out;
+
+ spec->dig_in = 0x09;
+ break;
+ case QUIRK_AE5:
+ spec->num_outputs = 2;
+ spec->out_pins[0] = 0x0B; /* Line out */
+ spec->out_pins[1] = 0x11; /* Rear headphone out */
+ spec->out_pins[2] = 0x10; /* Front Headphone / Center/LFE*/
+ spec->out_pins[3] = 0x0F; /* Rear surround */
+ spec->shared_out_nid = 0x2;
+ spec->unsol_tag_hp = spec->out_pins[1];
+ spec->unsol_tag_front_hp = spec->out_pins[2];
+
+ spec->adcs[0] = 0x7; /* Rear Mic / Line-in */
+ spec->adcs[1] = 0x8; /* Front Mic, but only if no DSP */
+ spec->adcs[2] = 0xa; /* what u hear */
+
+ spec->num_inputs = 2;
+ spec->input_pins[0] = 0x12; /* Rear Mic / Line-in */
+ spec->input_pins[1] = 0x13; /* What U Hear */
+ spec->shared_mic_nid = 0x7;
+ spec->unsol_tag_amic1 = spec->input_pins[0];
+
+ /* SPDIF I/O */
+ spec->dig_out = 0x05;
+ spec->multiout.dig_out_nid = spec->dig_out;
+ break;
+ case QUIRK_R3DI:
spec->num_outputs = 2;
spec->out_pins[0] = 0x0B; /* Line out */
spec->out_pins[1] = 0x0F; /* Rear headphone out */
@@ -7547,7 +8649,11 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
struct ca0132_spec *spec = codec->spec;
spec->chip_init_verbs = ca0132_init_verbs0;
- if (spec->quirk == QUIRK_SBZ || spec->quirk == QUIRK_R3D)
+ /*
+ * Since desktop cards use pci_mmio, this can be used to determine
+ * whether or not to use these verbs instead of a separate bool.
+ */
+ if (spec->use_pci_mmio)
spec->desktop_init_verbs = ca0132_init_verbs1;
spec->spec_init_verbs = kcalloc(NUM_SPEC_VERBS,
sizeof(struct hda_verb),
@@ -7579,6 +8685,29 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
return 0;
}
+/*
+ * The Sound Blaster ZxR shares the same PCI subsystem ID as some regular
+ * Sound Blaster Z cards. However, they have different HDA codec subsystem
+ * ID's. So, we check for the ZxR's subsystem ID, as well as the DBPro
+ * daughter boards ID.
+ */
+static void sbz_detect_quirk(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+
+ switch (codec->core.subsystem_id) {
+ case 0x11020033:
+ spec->quirk = QUIRK_ZXR;
+ break;
+ case 0x1102003f:
+ spec->quirk = QUIRK_ZXR_DBPRO;
+ break;
+ default:
+ spec->quirk = QUIRK_SBZ;
+ break;
+ }
+}
+
static int patch_ca0132(struct hda_codec *codec)
{
struct ca0132_spec *spec;
@@ -7593,10 +8722,6 @@ static int patch_ca0132(struct hda_codec *codec)
codec->spec = spec;
spec->codec = codec;
- codec->patch_ops = ca0132_patch_ops;
- codec->pcm_format_first = 1;
- codec->no_sticky_stream = 1;
-
/* Detect codec quirk */
quirk = snd_pci_quirk_lookup(codec->bus->pci, ca0132_quirks);
if (quirk)
@@ -7604,6 +8729,18 @@ static int patch_ca0132(struct hda_codec *codec)
else
spec->quirk = QUIRK_NONE;
+ if (spec->quirk == QUIRK_SBZ)
+ sbz_detect_quirk(codec);
+
+ if (spec->quirk == QUIRK_ZXR_DBPRO)
+ codec->patch_ops = dbpro_patch_ops;
+ else
+ codec->patch_ops = ca0132_patch_ops;
+
+ codec->pcm_format_first = 1;
+ codec->no_sticky_stream = 1;
+
+
spec->dsp_state = DSP_DOWNLOAD_INIT;
spec->num_mixers = 1;
@@ -7613,6 +8750,12 @@ static int patch_ca0132(struct hda_codec *codec)
spec->mixers[0] = desktop_mixer;
snd_hda_codec_set_name(codec, "Sound Blaster Z");
break;
+ case QUIRK_ZXR:
+ spec->mixers[0] = desktop_mixer;
+ snd_hda_codec_set_name(codec, "Sound Blaster ZxR");
+ break;
+ case QUIRK_ZXR_DBPRO:
+ break;
case QUIRK_R3D:
spec->mixers[0] = desktop_mixer;
snd_hda_codec_set_name(codec, "Recon3D");
@@ -7621,6 +8764,10 @@ static int patch_ca0132(struct hda_codec *codec)
spec->mixers[0] = r3di_mixer;
snd_hda_codec_set_name(codec, "Recon3Di");
break;
+ case QUIRK_AE5:
+ spec->mixers[0] = desktop_mixer;
+ snd_hda_codec_set_name(codec, "Sound BlasterX AE-5");
+ break;
default:
spec->mixers[0] = ca0132_mixer;
break;
@@ -7630,6 +8777,8 @@ static int patch_ca0132(struct hda_codec *codec)
switch (spec->quirk) {
case QUIRK_SBZ:
case QUIRK_R3D:
+ case QUIRK_AE5:
+ case QUIRK_ZXR:
spec->use_alt_controls = true;
spec->use_alt_functions = true;
spec->use_pci_mmio = true;
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index a7f91be45194..64fa5a82bb9f 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <sound/core.h>
#include <sound/tlv.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
diff --git a/sound/pci/hda/patch_cmedia.c b/sound/pci/hda/patch_cmedia.c
index 1b2195dd2b26..52642ba3e2c0 100644
--- a/sound/pci/hda/patch_cmedia.c
+++ b/sound/pci/hda/patch_cmedia.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index cfd4e4f97f8f..950e02e71766 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -27,7 +27,7 @@
#include <sound/core.h>
#include <sound/jack.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_beep.h"
@@ -943,6 +943,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
+ SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index cb587dce67a9..67099cbb6be2 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -41,7 +41,7 @@
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
#include <sound/hda_chmap.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_jack.h"
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 1d117f00d04d..fa61674a5605 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -32,7 +32,7 @@
#include <linux/input.h>
#include <sound/core.h>
#include <sound/jack.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
@@ -6409,6 +6409,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
@@ -6842,6 +6843,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
{0x14, 0x90170110},
+ {0x19, 0x02a11030},
+ {0x1a, 0x02a11040},
+ {0x1b, 0x01011020},
+ {0x21, 0x0221101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
+ {0x14, 0x90170110},
{0x19, 0x02a11020},
{0x1a, 0x02a11030},
{0x21, 0x0221101f}),
@@ -7737,6 +7744,8 @@ enum {
ALC662_FIXUP_ASUS_Nx50,
ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
ALC668_FIXUP_ASUS_Nx51,
+ ALC668_FIXUP_MIC_COEF,
+ ALC668_FIXUP_ASUS_G751,
ALC891_FIXUP_HEADSET_MODE,
ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
ALC662_FIXUP_ACER_VERITON,
@@ -8006,6 +8015,23 @@ static const struct hda_fixup alc662_fixups[] = {
.chained = true,
.chain_id = ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
},
+ [ALC668_FIXUP_MIC_COEF] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0xc3 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x4000 },
+ {}
+ },
+ },
+ [ALC668_FIXUP_ASUS_G751] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x16, 0x0421101f }, /* HP */
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC668_FIXUP_MIC_COEF
+ },
[ALC891_FIXUP_HEADSET_MODE] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_headset_mode,
@@ -8079,6 +8105,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
+ SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751),
SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
@@ -8183,6 +8210,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
{.id = ALC668_FIXUP_DELL_XPS13, .name = "dell-xps13"},
{.id = ALC662_FIXUP_ASUS_Nx50, .name = "asus-nx50"},
{.id = ALC668_FIXUP_ASUS_Nx51, .name = "asus-nx51"},
+ {.id = ALC668_FIXUP_ASUS_G751, .name = "asus-g751"},
{.id = ALC891_FIXUP_HEADSET_MODE, .name = "alc891-headset"},
{.id = ALC891_FIXUP_DELL_MIC_NO_PRESENCE, .name = "alc891-headset-multi"},
{.id = ALC662_FIXUP_ACER_VERITON, .name = "acer-veriton"},
diff --git a/sound/pci/hda/patch_si3054.c b/sound/pci/hda/patch_si3054.c
index f63acb1b965c..c49d25bcd7f2 100644
--- a/sound/pci/hda/patch_si3054.c
+++ b/sound/pci/hda/patch_si3054.c
@@ -27,7 +27,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
/* si3054 verbs */
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 046705b4691a..1b6ecfb01759 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -32,7 +32,7 @@
#include <linux/module.h>
#include <sound/core.h>
#include <sound/jack.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_beep.h"
@@ -77,6 +77,7 @@ enum {
STAC_DELL_M6_BOTH,
STAC_DELL_EQ,
STAC_ALIENWARE_M17X,
+ STAC_ELO_VUPOINT_15MX,
STAC_92HD89XX_HP_FRONT_JACK,
STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
STAC_92HD73XX_ASUS_MOBO,
@@ -1879,6 +1880,18 @@ static void stac92hd73xx_fixup_no_jd(struct hda_codec *codec,
codec->no_jack_detect = 1;
}
+
+static void stac92hd73xx_disable_automute(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct sigmatel_spec *spec = codec->spec;
+
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+
+ spec->gen.suppress_auto_mute = 1;
+}
+
static const struct hda_fixup stac92hd73xx_fixups[] = {
[STAC_92HD73XX_REF] = {
.type = HDA_FIXUP_FUNC,
@@ -1904,6 +1917,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = stac92hd73xx_fixup_alienware_m17x,
},
+ [STAC_ELO_VUPOINT_15MX] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = stac92hd73xx_disable_automute,
+ },
[STAC_92HD73XX_INTEL] = {
.type = HDA_FIXUP_PINS,
.v.pins = intel_dg45id_pin_configs,
@@ -1942,6 +1959,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
{ .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
{ .id = STAC_DELL_EQ, .name = "dell-eq" },
{ .id = STAC_ALIENWARE_M17X, .name = "alienware" },
+ { .id = STAC_ELO_VUPOINT_15MX, .name = "elo-vupoint-15mx" },
{ .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
{}
};
@@ -1991,6 +2009,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
"Alienware M17x", STAC_ALIENWARE_M17X),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
"Alienware M17x R3", STAC_DELL_EQ),
+ SND_PCI_QUIRK(0x1059, 0x1011,
+ "ELO VuPoint 15MX", STAC_ELO_VUPOINT_15MX),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1927,
"HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 6b9617aee0e6..9f6f13e25145 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -52,7 +52,7 @@
#include <linux/module.h>
#include <sound/core.h>
#include <sound/asoundef.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 5ee468d1aefe..ffddcdfe0c66 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -38,11 +38,6 @@
#include <sound/ac97_codec.h>
#include <sound/info.h>
#include <sound/initval.h>
-/* for 440MX workaround */
-#include <asm/pgtable.h>
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455");
@@ -374,7 +369,6 @@ struct ichdev {
unsigned int ali_slot; /* ALI DMA slot */
struct ac97_pcm *pcm;
int pcm_open_flag;
- unsigned int page_attr_changed: 1;
unsigned int suspended: 1;
};
@@ -724,25 +718,6 @@ static void snd_intel8x0_setup_periods(struct intel8x0 *chip, struct ichdev *ich
iputbyte(chip, port + ichdev->roff_sr, ICH_FIFOE | ICH_BCIS | ICH_LVBCI);
}
-#ifdef __i386__
-/*
- * Intel 82443MX running a 100MHz processor system bus has a hardware bug,
- * which aborts PCI busmaster for audio transfer. A workaround is to set
- * the pages as non-cached. For details, see the errata in
- * http://download.intel.com/design/chipsets/specupdt/24505108.pdf
- */
-static void fill_nocache(void *buf, int size, int nocache)
-{
- size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (nocache)
- set_pages_uc(virt_to_page(buf), size);
- else
- set_pages_wb(virt_to_page(buf), size);
-}
-#else
-#define fill_nocache(buf, size, nocache) do { ; } while (0)
-#endif
-
/*
* Interrupt handler
*/
@@ -850,7 +825,7 @@ static int snd_intel8x0_pcm_trigger(struct snd_pcm_substream *substream, int cmd
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
ichdev->suspended = 0;
- /* fallthru */
+ /* fall through */
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
val = ICH_IOCE | ICH_STARTBM;
@@ -858,7 +833,7 @@ static int snd_intel8x0_pcm_trigger(struct snd_pcm_substream *substream, int cmd
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
ichdev->suspended = 1;
- /* fallthru */
+ /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
val = 0;
break;
@@ -892,7 +867,7 @@ static int snd_intel8x0_ali_trigger(struct snd_pcm_substream *substream, int cmd
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
ichdev->suspended = 0;
- /* fallthru */
+ /* fall through */
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -909,7 +884,7 @@ static int snd_intel8x0_ali_trigger(struct snd_pcm_substream *substream, int cmd
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
ichdev->suspended = 1;
- /* fallthru */
+ /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
/* pause */
@@ -938,23 +913,12 @@ static int snd_intel8x0_hw_params(struct snd_pcm_substream *substream,
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
struct ichdev *ichdev = get_ichdev(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
int dbl = params_rate(hw_params) > 48000;
int err;
- if (chip->fix_nocache && ichdev->page_attr_changed) {
- fill_nocache(runtime->dma_area, runtime->dma_bytes, 0); /* clear */
- ichdev->page_attr_changed = 0;
- }
err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
- if (chip->fix_nocache) {
- if (runtime->dma_area && ! ichdev->page_attr_changed) {
- fill_nocache(runtime->dma_area, runtime->dma_bytes, 1);
- ichdev->page_attr_changed = 1;
- }
- }
if (ichdev->pcm_open_flag) {
snd_ac97_pcm_close(ichdev->pcm);
ichdev->pcm_open_flag = 0;
@@ -974,17 +938,12 @@ static int snd_intel8x0_hw_params(struct snd_pcm_substream *substream,
static int snd_intel8x0_hw_free(struct snd_pcm_substream *substream)
{
- struct intel8x0 *chip = snd_pcm_substream_chip(substream);
struct ichdev *ichdev = get_ichdev(substream);
if (ichdev->pcm_open_flag) {
snd_ac97_pcm_close(ichdev->pcm);
ichdev->pcm_open_flag = 0;
}
- if (chip->fix_nocache && ichdev->page_attr_changed) {
- fill_nocache(substream->runtime->dma_area, substream->runtime->dma_bytes, 0);
- ichdev->page_attr_changed = 0;
- }
return snd_pcm_lib_free_pages(substream);
}
@@ -1510,6 +1469,9 @@ struct ich_pcm_table {
int ac97_idx;
};
+#define intel8x0_dma_type(chip) \
+ ((chip)->fix_nocache ? SNDRV_DMA_TYPE_DEV_UC : SNDRV_DMA_TYPE_DEV)
+
static int snd_intel8x0_pcm1(struct intel8x0 *chip, int device,
struct ich_pcm_table *rec)
{
@@ -1540,7 +1502,7 @@ static int snd_intel8x0_pcm1(struct intel8x0 *chip, int device,
strcpy(pcm->name, chip->card->shortname);
chip->pcm[device] = pcm;
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_pcm_lib_preallocate_pages_for_all(pcm, intel8x0_dma_type(chip),
snd_dma_pci_data(chip->pci),
rec->prealloc_size, rec->prealloc_max_size);
@@ -2629,11 +2591,8 @@ static int snd_intel8x0_free(struct intel8x0 *chip)
__hw_end:
if (chip->irq >= 0)
free_irq(chip->irq, chip);
- if (chip->bdbars.area) {
- if (chip->fix_nocache)
- fill_nocache(chip->bdbars.area, chip->bdbars.bytes, 0);
+ if (chip->bdbars.area)
snd_dma_free_pages(&chip->bdbars);
- }
if (chip->addr)
pci_iounmap(chip->pci, chip->addr);
if (chip->bmaddr)
@@ -2657,17 +2616,6 @@ static int intel8x0_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
for (i = 0; i < chip->pcm_devs; i++)
snd_pcm_suspend_all(chip->pcm[i]);
- /* clear nocache */
- if (chip->fix_nocache) {
- for (i = 0; i < chip->bdbars_count; i++) {
- struct ichdev *ichdev = &chip->ichd[i];
- if (ichdev->substream && ichdev->page_attr_changed) {
- struct snd_pcm_runtime *runtime = ichdev->substream->runtime;
- if (runtime->dma_area)
- fill_nocache(runtime->dma_area, runtime->dma_bytes, 0);
- }
- }
- }
for (i = 0; i < chip->ncodecs; i++)
snd_ac97_suspend(chip->ac97[i]);
if (chip->device_type == DEVICE_INTEL_ICH4)
@@ -2708,25 +2656,9 @@ static int intel8x0_resume(struct device *dev)
ICH_PCM_SPDIF_1011);
}
- /* refill nocache */
- if (chip->fix_nocache)
- fill_nocache(chip->bdbars.area, chip->bdbars.bytes, 1);
-
for (i = 0; i < chip->ncodecs; i++)
snd_ac97_resume(chip->ac97[i]);
- /* refill nocache */
- if (chip->fix_nocache) {
- for (i = 0; i < chip->bdbars_count; i++) {
- struct ichdev *ichdev = &chip->ichd[i];
- if (ichdev->substream && ichdev->page_attr_changed) {
- struct snd_pcm_runtime *runtime = ichdev->substream->runtime;
- if (runtime->dma_area)
- fill_nocache(runtime->dma_area, runtime->dma_bytes, 1);
- }
- }
- }
-
/* resume status */
for (i = 0; i < chip->bdbars_count; i++) {
struct ichdev *ichdev = &chip->ichd[i];
@@ -3057,6 +2989,12 @@ static int snd_intel8x0_create(struct snd_card *card,
chip->inside_vm = snd_intel8x0_inside_vm(pci);
+ /*
+ * Intel 82443MX running a 100MHz processor system bus has a hardware
+ * bug, which aborts PCI busmaster for audio transfer. A workaround
+ * is to set the pages as non-cached. For details, see the errata in
+ * http://download.intel.com/design/chipsets/specupdt/24505108.pdf
+ */
if (pci->vendor == PCI_VENDOR_ID_INTEL &&
pci->device == PCI_DEVICE_ID_INTEL_440MX)
chip->fix_nocache = 1; /* enable workaround */
@@ -3128,7 +3066,7 @@ static int snd_intel8x0_create(struct snd_card *card,
/* allocate buffer descriptor lists */
/* the start of each lists must be aligned to 8 bytes */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(intel8x0_dma_type(chip), snd_dma_pci_data(pci),
chip->bdbars_count * sizeof(u32) * ICH_MAX_FRAGS * 2,
&chip->bdbars) < 0) {
snd_intel8x0_free(chip);
@@ -3137,9 +3075,6 @@ static int snd_intel8x0_create(struct snd_card *card,
}
/* tables must be aligned to 8 bytes here, but the kernel pages
are much bigger, so we don't care (on i386) */
- /* workaround for 440MX */
- if (chip->fix_nocache)
- fill_nocache(chip->bdbars.area, chip->bdbars.bytes, 1);
int_sta_masks = 0;
for (i = 0; i < chip->bdbars_count; i++) {
ichdev = &chip->ichd[i];
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 943a726b1c1b..c84629190cba 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -1171,16 +1171,6 @@ static int snd_intel8x0m_create(struct snd_card *card,
}
port_inited:
- if (request_irq(pci->irq, snd_intel8x0m_interrupt, IRQF_SHARED,
- KBUILD_MODNAME, chip)) {
- dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
- snd_intel8x0m_free(chip);
- return -EBUSY;
- }
- chip->irq = pci->irq;
- pci_set_master(pci);
- synchronize_irq(chip->irq);
-
/* initialize offsets */
chip->bdbars_count = 2;
tbl = intel_regs;
@@ -1224,11 +1214,21 @@ static int snd_intel8x0m_create(struct snd_card *card,
chip->int_sta_reg = ICH_REG_GLOB_STA;
chip->int_sta_mask = int_sta_masks;
+ pci_set_master(pci);
+
if ((err = snd_intel8x0m_chip_init(chip, 1)) < 0) {
snd_intel8x0m_free(chip);
return err;
}
+ if (request_irq(pci->irq, snd_intel8x0m_interrupt, IRQF_SHARED,
+ KBUILD_MODNAME, chip)) {
+ dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
+ snd_intel8x0m_free(chip);
+ return -EBUSY;
+ }
+ chip->irq = pci->irq;
+
if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
snd_intel8x0m_free(chip);
return err;
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index f0906ba416d4..3ac8c71d567c 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -319,7 +319,8 @@ static const struct snd_pcm_hardware snd_rme32_spdif_info = {
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_SYNC_START),
+ SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE),
.rates = (SNDRV_PCM_RATE_32000 |
@@ -346,7 +347,8 @@ static const struct snd_pcm_hardware snd_rme32_adat_info =
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_SYNC_START),
+ SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats= SNDRV_PCM_FMTBIT_S16_LE,
.rates = (SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000),
@@ -370,7 +372,8 @@ static const struct snd_pcm_hardware snd_rme32_spdif_fd_info = {
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_SYNC_START),
+ SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE),
.rates = (SNDRV_PCM_RATE_32000 |
@@ -397,7 +400,8 @@ static const struct snd_pcm_hardware snd_rme32_adat_fd_info =
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_SYNC_START),
+ SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats= SNDRV_PCM_FMTBIT_S16_LE,
.rates = (SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000),
@@ -1104,16 +1108,6 @@ snd_rme32_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
snd_pcm_trigger_done(s, substream);
}
- /* prefill playback buffer */
- if (cmd == SNDRV_PCM_TRIGGER_START && rme32->fullduplex_mode) {
- snd_pcm_group_for_each_entry(s, substream) {
- if (s == rme32->playback_substream) {
- s->ops->ack(s);
- break;
- }
- }
- }
-
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if (rme32->running && ! RME32_ISWORKING(rme32))
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 11b5b5e0e058..679ad0415e3b 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6534,7 +6534,7 @@ static int snd_hdspm_create_alsa_devices(struct snd_card *card,
dev_dbg(card->dev, "Update mixer controls...\n");
hdspm_update_simple_mixer_controls(hdspm);
- dev_dbg(card->dev, "Initializeing complete ???\n");
+ dev_dbg(card->dev, "Initializing complete?\n");
err = snd_card_register(card);
if (err < 0) {
diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c
index 8e3275a96a82..3f813ea5210a 100644
--- a/sound/soc/amd/acp-da7219-max98357a.c
+++ b/sound/soc/amd/acp-da7219-max98357a.c
@@ -42,7 +42,7 @@
#include "../codecs/da7219.h"
#include "../codecs/da7219-aad.h"
-#define CZ_PLAT_CLK 25000000
+#define CZ_PLAT_CLK 48000000
#define DUAL_CHANNEL 2
static struct snd_soc_jack cz_jack;
@@ -75,7 +75,7 @@ static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd)
da7219_dai_clk = clk_get(component->dev, "da7219-dai-clks");
ret = snd_soc_card_jack_new(card, "Headset Jack",
- SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+ SND_JACK_HEADSET | SND_JACK_LINEOUT |
SND_JACK_BTN_0 | SND_JACK_BTN_1 |
SND_JACK_BTN_2 | SND_JACK_BTN_3,
&cz_jack, NULL, 0);
@@ -133,7 +133,7 @@ static const struct snd_pcm_hw_constraint_list constraints_channels = {
.mask = 0,
};
-static int cz_da7219_startup(struct snd_pcm_substream *substream)
+static int cz_da7219_play_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -150,7 +150,28 @@ static int cz_da7219_startup(struct snd_pcm_substream *substream)
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&constraints_rates);
- machine->i2s_instance = I2S_SP_INSTANCE;
+ machine->play_i2s_instance = I2S_SP_INSTANCE;
+ return da7219_clk_enable(substream);
+}
+
+static int cz_da7219_cap_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct acp_platform_info *machine = snd_soc_card_get_drvdata(card);
+
+ /*
+ * On this platform for PCM device we support stereo
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+
+ machine->cap_i2s_instance = I2S_SP_INSTANCE;
machine->capture_channel = CAP_CHANNEL1;
return da7219_clk_enable(substream);
}
@@ -162,11 +183,22 @@ static void cz_da7219_shutdown(struct snd_pcm_substream *substream)
static int cz_max_startup(struct snd_pcm_substream *substream)
{
+ struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
struct acp_platform_info *machine = snd_soc_card_get_drvdata(card);
- machine->i2s_instance = I2S_BT_INSTANCE;
+ /*
+ * On this platform for PCM device we support stereo
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+
+ machine->play_i2s_instance = I2S_BT_INSTANCE;
return da7219_clk_enable(substream);
}
@@ -177,21 +209,43 @@ static void cz_max_shutdown(struct snd_pcm_substream *substream)
static int cz_dmic0_startup(struct snd_pcm_substream *substream)
{
+ struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
struct acp_platform_info *machine = snd_soc_card_get_drvdata(card);
- machine->i2s_instance = I2S_BT_INSTANCE;
+ /*
+ * On this platform for PCM device we support stereo
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+
+ machine->cap_i2s_instance = I2S_BT_INSTANCE;
return da7219_clk_enable(substream);
}
static int cz_dmic1_startup(struct snd_pcm_substream *substream)
{
+ struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
struct acp_platform_info *machine = snd_soc_card_get_drvdata(card);
- machine->i2s_instance = I2S_SP_INSTANCE;
+ /*
+ * On this platform for PCM device we support stereo
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+
+ machine->cap_i2s_instance = I2S_SP_INSTANCE;
machine->capture_channel = CAP_CHANNEL0;
return da7219_clk_enable(substream);
}
@@ -201,8 +255,13 @@ static void cz_dmic_shutdown(struct snd_pcm_substream *substream)
da7219_clk_disable();
}
+static const struct snd_soc_ops cz_da7219_play_ops = {
+ .startup = cz_da7219_play_startup,
+ .shutdown = cz_da7219_shutdown,
+};
+
static const struct snd_soc_ops cz_da7219_cap_ops = {
- .startup = cz_da7219_startup,
+ .startup = cz_da7219_cap_startup,
.shutdown = cz_da7219_shutdown,
};
@@ -233,7 +292,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
| SND_SOC_DAIFMT_CBM_CFM,
.init = cz_da7219_init,
.dpcm_playback = 1,
- .ops = &cz_da7219_cap_ops,
+ .ops = &cz_da7219_play_ops,
},
{
.name = "amd-da7219-cap",
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index e359938e3d7e..cdebab2f8ce5 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/sizes.h>
#include <linux/pm_runtime.h>
@@ -184,6 +185,24 @@ static void config_dma_descriptor_in_sram(void __iomem *acp_mmio,
acp_reg_write(descr_info->xfer_val, acp_mmio, mmACP_SRBM_Targ_Idx_Data);
}
+static void pre_config_reset(void __iomem *acp_mmio, u16 ch_num)
+{
+ u32 dma_ctrl;
+ int ret;
+
+ /* clear the reset bit */
+ dma_ctrl = acp_reg_read(acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
+ dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRst_MASK;
+ acp_reg_write(dma_ctrl, acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
+ /* check the reset bit before programming configuration registers */
+ ret = readl_poll_timeout(acp_mmio + ((mmACP_DMA_CNTL_0 + ch_num) * 4),
+ dma_ctrl,
+ !(dma_ctrl & ACP_DMA_CNTL_0__DMAChRst_MASK),
+ 100, ACP_DMA_RESET_TIME);
+ if (ret < 0)
+ pr_err("Failed to clear reset of channel : %d\n", ch_num);
+}
+
/*
* Initialize the DMA descriptor information for transfer between
* system memory <-> ACP SRAM
@@ -236,6 +255,7 @@ static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio,
config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
&dmadscr[i]);
}
+ pre_config_reset(acp_mmio, ch);
config_acp_dma_channel(acp_mmio, ch,
dma_dscr_idx - 1,
NUM_DSCRS_PER_CHANNEL,
@@ -275,6 +295,7 @@ static void set_acp_to_i2s_dma_descriptors(void __iomem *acp_mmio, u32 size,
config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
&dmadscr[i]);
}
+ pre_config_reset(acp_mmio, ch);
/* Configure the DMA channel with the above descriptore */
config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1,
NUM_DSCRS_PER_CHANNEL,
@@ -846,8 +867,12 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
if (pinfo) {
- rtd->i2s_instance = pinfo->i2s_instance;
- rtd->capture_channel = pinfo->capture_channel;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ rtd->i2s_instance = pinfo->play_i2s_instance;
+ } else {
+ rtd->i2s_instance = pinfo->cap_i2s_instance;
+ rtd->capture_channel = pinfo->capture_channel;
+ }
}
if (adata->asic_type == CHIP_STONEY) {
val = acp_reg_read(adata->acp_mmio,
@@ -1015,16 +1040,22 @@ static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
period_bytes = frames_to_bytes(runtime, runtime->period_size);
- dscr = acp_reg_read(rtd->acp_mmio, rtd->dma_curr_dscr);
- if (dscr == rtd->dma_dscr_idx_1)
- pos = period_bytes;
- else
- pos = 0;
bytescount = acp_get_byte_count(rtd);
- if (bytescount > rtd->bytescount)
+ if (bytescount >= rtd->bytescount)
bytescount -= rtd->bytescount;
- delay = do_div(bytescount, period_bytes);
- runtime->delay = bytes_to_frames(runtime, delay);
+ if (bytescount < period_bytes) {
+ pos = 0;
+ } else {
+ dscr = acp_reg_read(rtd->acp_mmio, rtd->dma_curr_dscr);
+ if (dscr == rtd->dma_dscr_idx_1)
+ pos = period_bytes;
+ else
+ pos = 0;
+ }
+ if (bytescount > 0) {
+ delay = do_div(bytescount, period_bytes);
+ runtime->delay = bytes_to_frames(runtime, delay);
+ }
} else {
buffersize = frames_to_bytes(runtime, runtime->buffer_size);
bytescount = acp_get_byte_count(rtd);
diff --git a/sound/soc/amd/acp.h b/sound/soc/amd/acp.h
index be3963e8f4fa..dbbb1a85638d 100644
--- a/sound/soc/amd/acp.h
+++ b/sound/soc/amd/acp.h
@@ -158,7 +158,8 @@ struct audio_drv_data {
* and dma driver
*/
struct acp_platform_info {
- u16 i2s_instance;
+ u16 play_i2s_instance;
+ u16 cap_i2s_instance;
u16 capture_channel;
};
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 64b784e96f84..64f86f0b87e5 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -97,4 +97,16 @@ config SND_ATMEL_SOC_I2S
help
Say Y or M if you want to add support for Atmel ASoc driver for boards
using I2S.
+
+config SND_SOC_MIKROE_PROTO
+ tristate "Support for Mikroe-PROTO board"
+ depends on OF
+ depends on SND_SOC_I2C_AND_SPI
+ select SND_SOC_WM8731
+ help
+ Say Y or M if you want to add support for MikroElektronika PROTO Audio
+ Board. This board contains the WM8731 codec, which can be configured
+ using I2C over SDA (MPU Data Input) and SCL (MPU Clock Input) pins.
+ Both playback and capture are supported.
+
endif
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index cd87cb4bcff5..9f41bfa0fea3 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -17,6 +17,7 @@ snd-soc-sam9x5-wm8731-objs := sam9x5_wm8731.o
snd-atmel-soc-classd-objs := atmel-classd.o
snd-atmel-soc-pdmic-objs := atmel-pdmic.o
snd-atmel-soc-tse850-pcm5142-objs := tse850-pcm5142.o
+snd-soc-mikroe-proto-objs := mikroe-proto.o
obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
obj-$(CONFIG_SND_ATMEL_SOC_WM8904) += snd-atmel-soc-wm8904.o
@@ -24,3 +25,4 @@ obj-$(CONFIG_SND_AT91_SOC_SAM9X5_WM8731) += snd-soc-sam9x5-wm8731.o
obj-$(CONFIG_SND_ATMEL_SOC_CLASSD) += snd-atmel-soc-classd.o
obj-$(CONFIG_SND_ATMEL_SOC_PDMIC) += snd-atmel-soc-pdmic.o
obj-$(CONFIG_SND_ATMEL_SOC_TSE850_PCM5142) += snd-atmel-soc-tse850-pcm5142.o
+obj-$(CONFIG_SND_SOC_MIKROE_PROTO) += snd-soc-mikroe-proto.o
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index d3b69682d9c2..6291ec7f9dd6 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -1005,11 +1005,11 @@ static int asoc_ssc_init(struct device *dev)
struct ssc_device *ssc = dev_get_drvdata(dev);
int ret;
- ret = snd_soc_register_component(dev, &atmel_ssc_component,
+ ret = devm_snd_soc_register_component(dev, &atmel_ssc_component,
&atmel_ssc_dai, 1);
if (ret) {
dev_err(dev, "Could not register DAI: %d\n", ret);
- goto err;
+ return ret;
}
if (ssc->pdata->use_dma)
@@ -1019,15 +1019,10 @@ static int asoc_ssc_init(struct device *dev)
if (ret) {
dev_err(dev, "Could not register PCM: %d\n", ret);
- goto err_unregister_dai;
+ return ret;
}
return 0;
-
-err_unregister_dai:
- snd_soc_unregister_component(dev);
-err:
- return ret;
}
static void asoc_ssc_exit(struct device *dev)
@@ -1038,8 +1033,6 @@ static void asoc_ssc_exit(struct device *dev)
atmel_pcm_dma_platform_unregister(dev);
else
atmel_pcm_pdc_platform_unregister(dev);
-
- snd_soc_unregister_component(dev);
}
/**
diff --git a/sound/soc/atmel/mikroe-proto.c b/sound/soc/atmel/mikroe-proto.c
new file mode 100644
index 000000000000..d47aaa5bf75a
--- /dev/null
+++ b/sound/soc/atmel/mikroe-proto.c
@@ -0,0 +1,165 @@
+/*
+ * ASoC driver for PROTO AudioCODEC (with a WM8731)
+ *
+ * Author: Florian Meier, <koalo@koalo.de>
+ * Copyright 2013
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+
+#include "../codecs/wm8731.h"
+
+#define XTAL_RATE 12288000 /* This is fixed on this board */
+
+static int snd_proto_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+ /* Set proto sysclk */
+ int ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL,
+ XTAL_RATE, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(card->dev, "Failed to set WM8731 SYSCLK: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget snd_proto_widget[] = {
+ SND_SOC_DAPM_MIC("Microphone Jack", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+};
+
+static const struct snd_soc_dapm_route snd_proto_route[] = {
+ /* speaker connected to LHPOUT/RHPOUT */
+ {"Headphone Jack", NULL, "LHPOUT"},
+ {"Headphone Jack", NULL, "RHPOUT"},
+
+ /* mic is connected to Mic Jack, with WM8731 Mic Bias */
+ {"MICIN", NULL, "Mic Bias"},
+ {"Mic Bias", NULL, "Microphone Jack"},
+};
+
+/* audio machine driver */
+static struct snd_soc_card snd_proto = {
+ .name = "snd_mikroe_proto",
+ .owner = THIS_MODULE,
+ .dapm_widgets = snd_proto_widget,
+ .num_dapm_widgets = ARRAY_SIZE(snd_proto_widget),
+ .dapm_routes = snd_proto_route,
+ .num_dapm_routes = ARRAY_SIZE(snd_proto_route),
+};
+
+static int snd_proto_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dai_link *dai;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *codec_np, *cpu_np;
+ struct device_node *bitclkmaster = NULL;
+ struct device_node *framemaster = NULL;
+ unsigned int dai_fmt;
+ int ret = 0;
+
+ if (!np) {
+ dev_err(&pdev->dev, "No device node supplied\n");
+ return -EINVAL;
+ }
+
+ snd_proto.dev = &pdev->dev;
+ ret = snd_soc_of_parse_card_name(&snd_proto, "model");
+ if (ret)
+ return ret;
+
+ dai = devm_kzalloc(&pdev->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ snd_proto.dai_link = dai;
+ snd_proto.num_links = 1;
+
+ dai->name = "WM8731";
+ dai->stream_name = "WM8731 HiFi";
+ dai->codec_dai_name = "wm8731-hifi";
+ dai->init = &snd_proto_init;
+
+ codec_np = of_parse_phandle(np, "audio-codec", 0);
+ if (!codec_np) {
+ dev_err(&pdev->dev, "audio-codec node missing\n");
+ return -EINVAL;
+ }
+ dai->codec_of_node = codec_np;
+
+ cpu_np = of_parse_phandle(np, "i2s-controller", 0);
+ if (!cpu_np) {
+ dev_err(&pdev->dev, "i2s-controller missing\n");
+ return -EINVAL;
+ }
+ dai->cpu_of_node = cpu_np;
+ dai->platform_of_node = cpu_np;
+
+ dai_fmt = snd_soc_of_parse_daifmt(np, NULL,
+ &bitclkmaster, &framemaster);
+ if (bitclkmaster != framemaster) {
+ dev_err(&pdev->dev, "Must be the same bitclock and frame master\n");
+ return -EINVAL;
+ }
+ if (bitclkmaster) {
+ dai_fmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
+ if (codec_np == bitclkmaster)
+ dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
+ else
+ dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
+ }
+ of_node_put(bitclkmaster);
+ of_node_put(framemaster);
+ dai->dai_fmt = dai_fmt;
+
+ of_node_put(codec_np);
+ of_node_put(cpu_np);
+
+ ret = snd_soc_register_card(&snd_proto);
+ if (ret && ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "snd_soc_register_card() failed: %d\n", ret);
+
+ return ret;
+}
+
+static int snd_proto_remove(struct platform_device *pdev)
+{
+ return snd_soc_unregister_card(&snd_proto);
+}
+
+static const struct of_device_id snd_proto_of_match[] = {
+ { .compatible = "mikroe,mikroe-proto", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, snd_proto_of_match);
+
+static struct platform_driver snd_proto_driver = {
+ .driver = {
+ .name = "snd-mikroe-proto",
+ .of_match_table = snd_proto_of_match,
+ },
+ .probe = snd_proto_probe,
+ .remove = snd_proto_remove,
+};
+
+module_platform_driver(snd_proto_driver);
+
+MODULE_AUTHOR("Florian Meier");
+MODULE_DESCRIPTION("ASoC Driver for PROTO board (WM8731)");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/atmel/tse850-pcm5142.c b/sound/soc/atmel/tse850-pcm5142.c
index 3a1393283156..214adcad5419 100644
--- a/sound/soc/atmel/tse850-pcm5142.c
+++ b/sound/soc/atmel/tse850-pcm5142.c
@@ -1,44 +1,38 @@
-/*
- * TSE-850 audio - ASoC driver for the Axentia TSE-850 with a PCM5142 codec
- *
- * Copyright (C) 2016 Axentia Technologies AB
- *
- * Author: Peter Rosin <peda@axentia.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * loop1 relays
- * IN1 +---o +------------+ o---+ OUT1
- * \ /
- * + +
- * | / |
- * +--o +--. |
- * | add | |
- * | V |
- * | .---. |
- * DAC +----------->|Sum|---+
- * | '---' |
- * | |
- * + +
- *
- * IN2 +---o--+------------+--o---+ OUT2
- * loop2 relays
- *
- * The 'loop1' gpio pin controlls two relays, which are either in loop
- * position, meaning that input and output are directly connected, or
- * they are in mixer position, meaning that the signal is passed through
- * the 'Sum' mixer. Similarly for 'loop2'.
- *
- * In the above, the 'loop1' relays are inactive, thus feeding IN1 to the
- * mixer (if 'add' is active) and feeding the mixer output to OUT1. The
- * 'loop2' relays are active, short-cutting the TSE-850 from channel 2.
- * IN1, IN2, OUT1 and OUT2 are TSE-850 connectors and DAC is the PCB name
- * of the (filtered) output from the PCM5142 codec.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// TSE-850 audio - ASoC driver for the Axentia TSE-850 with a PCM5142 codec
+//
+// Copyright (C) 2016 Axentia Technologies AB
+//
+// Author: Peter Rosin <peda@axentia.se>
+//
+// loop1 relays
+// IN1 +---o +------------+ o---+ OUT1
+// \ /
+// + +
+// | / |
+// +--o +--. |
+// | add | |
+// | V |
+// | .---. |
+// DAC +----------->|Sum|---+
+// | '---' |
+// | |
+// + +
+//
+// IN2 +---o--+------------+--o---+ OUT2
+// loop2 relays
+//
+// The 'loop1' gpio pin controlls two relays, which are either in loop
+// position, meaning that input and output are directly connected, or
+// they are in mixer position, meaning that the signal is passed through
+// the 'Sum' mixer. Similarly for 'loop2'.
+//
+// In the above, the 'loop1' relays are inactive, thus feeding IN1 to the
+// mixer (if 'add' is active) and feeding the mixer output to OUT1. The
+// 'loop2' relays are active, short-cutting the TSE-850 from channel 2.
+// IN1, IN2, OUT1 and OUT2 are TSE-850 connectors and DAC is the PCB name
+// of the (filtered) output from the PCM5142 codec.
#include <linux/clk.h>
#include <linux/gpio.h>
@@ -452,4 +446,4 @@ module_platform_driver(tse850_driver);
/* Module information */
MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
MODULE_DESCRIPTION("ALSA SoC driver for TSE-850 with PCM5142 codec");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/bcm/cygnus-ssp.c b/sound/soc/bcm/cygnus-ssp.c
index b733f1446353..b7c358b48d8d 100644
--- a/sound/soc/bcm/cygnus-ssp.c
+++ b/sound/soc/bcm/cygnus-ssp.c
@@ -1334,7 +1334,7 @@ static int cygnus_ssp_probe(struct platform_device *pdev)
cygaud->active_ports = 0;
dev_dbg(dev, "Registering %d DAIs\n", active_port_count);
- err = snd_soc_register_component(dev, &cygnus_ssp_component,
+ err = devm_snd_soc_register_component(dev, &cygnus_ssp_component,
cygnus_ssp_dai, active_port_count);
if (err) {
dev_err(dev, "snd_soc_register_dai failed\n");
@@ -1345,32 +1345,27 @@ static int cygnus_ssp_probe(struct platform_device *pdev)
if (cygaud->irq_num <= 0) {
dev_err(dev, "platform_get_irq failed\n");
err = cygaud->irq_num;
- goto err_irq;
+ return err;
}
err = audio_clk_init(pdev, cygaud);
if (err) {
dev_err(dev, "audio clock initialization failed\n");
- goto err_irq;
+ return err;
}
err = cygnus_soc_platform_register(dev, cygaud);
if (err) {
dev_err(dev, "platform reg error %d\n", err);
- goto err_irq;
+ return err;
}
return 0;
-
-err_irq:
- snd_soc_unregister_component(dev);
- return err;
}
static int cygnus_ssp_remove(struct platform_device *pdev)
{
cygnus_soc_platform_unregister(&pdev->dev);
- snd_soc_unregister_component(&pdev->dev);
return 0;
}
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index efb095dbcd71..9cc4f1848c9b 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -82,6 +82,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_ES7241
select SND_SOC_GTM601
select SND_SOC_HDAC_HDMI
+ select SND_SOC_HDAC_HDA
select SND_SOC_ICS43432
select SND_SOC_INNO_RK3036
select SND_SOC_ISABELLE if I2C
@@ -109,6 +110,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MT6351 if MTK_PMIC_WRAP
select SND_SOC_NAU8540 if I2C
select SND_SOC_NAU8810 if I2C
+ select SND_SOC_NAU8822 if I2C
select SND_SOC_NAU8824 if I2C
select SND_SOC_NAU8825 if I2C
select SND_SOC_HDMI_CODEC
@@ -119,6 +121,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_PCM186X_I2C if I2C
select SND_SOC_PCM186X_SPI if SPI_MASTER
select SND_SOC_PCM3008
+ select SND_SOC_PCM3060_I2C if I2C
+ select SND_SOC_PCM3060_SPI if SPI_MASTER
select SND_SOC_PCM3168A_I2C if I2C
select SND_SOC_PCM3168A_SPI if SPI_MASTER
select SND_SOC_PCM5102A
@@ -575,7 +579,11 @@ config SND_SOC_DA9055
tristate
config SND_SOC_DMIC
- tristate
+ tristate "Generic Digital Microphone CODEC"
+ depends on GPIOLIB
+ help
+ Enable support for the Generic Digital Microphone CODEC.
+ Select this if your sound card has DMICs.
config SND_SOC_HDMI_CODEC
tristate
@@ -615,6 +623,10 @@ config SND_SOC_HDAC_HDMI
select SND_PCM_ELD
select HDMI
+config SND_SOC_HDAC_HDA
+ tristate
+ select SND_HDA
+
config SND_SOC_ICS43432
tristate
@@ -629,7 +641,8 @@ config SND_SOC_LM49453
tristate
config SND_SOC_MAX98088
- tristate
+ tristate "Maxim MAX98088/9 Low-Power, Stereo Audio Codec"
+ depends on I2C
config SND_SOC_MAX98090
tristate
@@ -732,6 +745,21 @@ config SND_SOC_PCM186X_SPI
config SND_SOC_PCM3008
tristate
+config SND_SOC_PCM3060
+ tristate
+
+config SND_SOC_PCM3060_I2C
+ tristate "Texas Instruments PCM3060 CODEC - I2C"
+ depends on I2C
+ select SND_SOC_PCM3060
+ select REGMAP_I2C
+
+config SND_SOC_PCM3060_SPI
+ tristate "Texas Instruments PCM3060 CODEC - SPI"
+ depends on SPI_MASTER
+ select SND_SOC_PCM3060
+ select REGMAP_SPI
+
config SND_SOC_PCM3168A
tristate
@@ -1299,6 +1327,10 @@ config SND_SOC_NAU8810
tristate "Nuvoton Technology Corporation NAU88C10 CODEC"
depends on I2C
+config SND_SOC_NAU8822
+ tristate "Nuvoton Technology Corporation NAU88C22 CODEC"
+ depends on I2C
+
config SND_SOC_NAU8824
tristate "Nuvoton Technology Corporation NAU88L24 CODEC"
depends on I2C
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 7ae7c85e8219..8ffab8c8dbfa 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -78,6 +78,7 @@ snd-soc-es8328-i2c-objs := es8328-i2c.o
snd-soc-es8328-spi-objs := es8328-spi.o
snd-soc-gtm601-objs := gtm601.o
snd-soc-hdac-hdmi-objs := hdac_hdmi.o
+snd-soc-hdac-hda-objs := hdac_hda.o
snd-soc-ics43432-objs := ics43432.o
snd-soc-inno-rk3036-objs := inno_rk3036.o
snd-soc-isabelle-objs := isabelle.o
@@ -106,6 +107,7 @@ snd-soc-msm8916-digital-objs := msm8916-wcd-digital.o
snd-soc-mt6351-objs := mt6351.o
snd-soc-nau8540-objs := nau8540.o
snd-soc-nau8810-objs := nau8810.o
+snd-soc-nau8822-objs := nau8822.o
snd-soc-nau8824-objs := nau8824.o
snd-soc-nau8825-objs := nau8825.o
snd-soc-hdmi-codec-objs := hdmi-codec.o
@@ -119,6 +121,9 @@ snd-soc-pcm186x-objs := pcm186x.o
snd-soc-pcm186x-i2c-objs := pcm186x-i2c.o
snd-soc-pcm186x-spi-objs := pcm186x-spi.o
snd-soc-pcm3008-objs := pcm3008.o
+snd-soc-pcm3060-objs := pcm3060.o
+snd-soc-pcm3060-i2c-objs := pcm3060-i2c.o
+snd-soc-pcm3060-spi-objs := pcm3060-spi.o
snd-soc-pcm3168a-objs := pcm3168a.o
snd-soc-pcm3168a-i2c-objs := pcm3168a-i2c.o
snd-soc-pcm3168a-spi-objs := pcm3168a-spi.o
@@ -338,6 +343,7 @@ obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o
obj-$(CONFIG_SND_SOC_GTM601) += snd-soc-gtm601.o
obj-$(CONFIG_SND_SOC_HDAC_HDMI) += snd-soc-hdac-hdmi.o
+obj-$(CONFIG_SND_SOC_HDAC_HDA) += snd-soc-hdac-hda.o
obj-$(CONFIG_SND_SOC_ICS43432) += snd-soc-ics43432.o
obj-$(CONFIG_SND_SOC_INNO_RK3036) += snd-soc-inno-rk3036.o
obj-$(CONFIG_SND_SOC_ISABELLE) += snd-soc-isabelle.o
@@ -366,6 +372,7 @@ obj-$(CONFIG_SND_SOC_MSM8916_WCD_DIGITAL) +=snd-soc-msm8916-digital.o
obj-$(CONFIG_SND_SOC_MT6351) += snd-soc-mt6351.o
obj-$(CONFIG_SND_SOC_NAU8540) += snd-soc-nau8540.o
obj-$(CONFIG_SND_SOC_NAU8810) += snd-soc-nau8810.o
+obj-$(CONFIG_SND_SOC_NAU8822) += snd-soc-nau8822.o
obj-$(CONFIG_SND_SOC_NAU8824) += snd-soc-nau8824.o
obj-$(CONFIG_SND_SOC_NAU8825) += snd-soc-nau8825.o
obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o
@@ -379,6 +386,9 @@ obj-$(CONFIG_SND_SOC_PCM186X) += snd-soc-pcm186x.o
obj-$(CONFIG_SND_SOC_PCM186X_I2C) += snd-soc-pcm186x-i2c.o
obj-$(CONFIG_SND_SOC_PCM186X_SPI) += snd-soc-pcm186x-spi.o
obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o
+obj-$(CONFIG_SND_SOC_PCM3060) += snd-soc-pcm3060.o
+obj-$(CONFIG_SND_SOC_PCM3060_I2C) += snd-soc-pcm3060-i2c.o
+obj-$(CONFIG_SND_SOC_PCM3060_SPI) += snd-soc-pcm3060-spi.o
obj-$(CONFIG_SND_SOC_PCM3168A) += snd-soc-pcm3168a.o
obj-$(CONFIG_SND_SOC_PCM3168A_I2C) += snd-soc-pcm3168a-i2c.o
obj-$(CONFIG_SND_SOC_PCM3168A_SPI) += snd-soc-pcm3168a-spi.o
diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c
index be136e981653..bef3e9e74c26 100644
--- a/sound/soc/codecs/adau1761.c
+++ b/sound/soc/codecs/adau1761.c
@@ -518,7 +518,8 @@ static int adau1761_setup_digmic_jackdetect(struct snd_soc_component *component)
ARRAY_SIZE(adau1761_jack_detect_controls));
if (ret)
return ret;
- case ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE: /* fallthrough */
+ /* fall through */
+ case ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE:
ret = snd_soc_dapm_add_routes(dapm, adau1761_no_dmic_routes,
ARRAY_SIZE(adau1761_no_dmic_routes));
if (ret)
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index 57169b8ff14e..3959e6ad113d 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -21,11 +21,18 @@
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/regmap.h>
+#include <asm/unaligned.h>
#include "sigmadsp.h"
#include "adau17x1.h"
#include "adau-utils.h"
+#define ADAU17X1_SAFELOAD_TARGET_ADDRESS 0x0006
+#define ADAU17X1_SAFELOAD_TRIGGER 0x0007
+#define ADAU17X1_SAFELOAD_DATA 0x0001
+#define ADAU17X1_SAFELOAD_DATA_SIZE 20
+#define ADAU17X1_WORD_SIZE 4
+
static const char * const adau17x1_capture_mixer_boost_text[] = {
"Normal operation", "Boost Level 1", "Boost Level 2", "Boost Level 3",
};
@@ -60,6 +67,9 @@ static const struct snd_kcontrol_new adau17x1_controls[] = {
SOC_ENUM("Mic Bias Mode", adau17x1_mic_bias_mode_enum),
};
+static int adau17x1_setup_firmware(struct snd_soc_component *component,
+ unsigned int rate);
+
static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -313,7 +323,7 @@ static const struct snd_soc_dapm_route adau17x1_no_dsp_dapm_routes[] = {
{ "Capture", NULL, "Right Decimator" },
};
-bool adau17x1_has_dsp(struct adau *adau)
+static bool adau17x1_has_dsp(struct adau *adau)
{
switch (adau->type) {
case ADAU1761:
@@ -324,7 +334,17 @@ bool adau17x1_has_dsp(struct adau *adau)
return false;
}
}
-EXPORT_SYMBOL_GPL(adau17x1_has_dsp);
+
+static bool adau17x1_has_safeload(struct adau *adau)
+{
+ switch (adau->type) {
+ case ADAU1761:
+ case ADAU1781:
+ return true;
+ default:
+ return false;
+ }
+}
static int adau17x1_set_dai_pll(struct snd_soc_dai *dai, int pll_id,
int source, unsigned int freq_in, unsigned int freq_out)
@@ -836,7 +856,7 @@ bool adau17x1_volatile_register(struct device *dev, unsigned int reg)
}
EXPORT_SYMBOL_GPL(adau17x1_volatile_register);
-int adau17x1_setup_firmware(struct snd_soc_component *component,
+static int adau17x1_setup_firmware(struct snd_soc_component *component,
unsigned int rate)
{
int ret;
@@ -880,7 +900,6 @@ err:
return ret;
}
-EXPORT_SYMBOL_GPL(adau17x1_setup_firmware);
int adau17x1_add_widgets(struct snd_soc_component *component)
{
@@ -957,6 +976,56 @@ int adau17x1_resume(struct snd_soc_component *component)
}
EXPORT_SYMBOL_GPL(adau17x1_resume);
+static int adau17x1_safeload(struct sigmadsp *sigmadsp, unsigned int addr,
+ const uint8_t bytes[], size_t len)
+{
+ uint8_t buf[ADAU17X1_WORD_SIZE];
+ uint8_t data[ADAU17X1_SAFELOAD_DATA_SIZE];
+ unsigned int addr_offset;
+ unsigned int nbr_words;
+ int ret;
+
+ /* write data to safeload addresses. Check if len is not a multiple of
+ * 4 bytes, if so we need to zero pad.
+ */
+ nbr_words = len / ADAU17X1_WORD_SIZE;
+ if ((len - nbr_words * ADAU17X1_WORD_SIZE) == 0) {
+ ret = regmap_raw_write(sigmadsp->control_data,
+ ADAU17X1_SAFELOAD_DATA, bytes, len);
+ } else {
+ nbr_words++;
+ memset(data, 0, ADAU17X1_SAFELOAD_DATA_SIZE);
+ memcpy(data, bytes, len);
+ ret = regmap_raw_write(sigmadsp->control_data,
+ ADAU17X1_SAFELOAD_DATA, data,
+ nbr_words * ADAU17X1_WORD_SIZE);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ /* Write target address, target address is offset by 1 */
+ addr_offset = addr - 1;
+ put_unaligned_be32(addr_offset, buf);
+ ret = regmap_raw_write(sigmadsp->control_data,
+ ADAU17X1_SAFELOAD_TARGET_ADDRESS, buf, ADAU17X1_WORD_SIZE);
+ if (ret < 0)
+ return ret;
+
+ /* write nbr of words to trigger address */
+ put_unaligned_be32(nbr_words, buf);
+ ret = regmap_raw_write(sigmadsp->control_data,
+ ADAU17X1_SAFELOAD_TRIGGER, buf, ADAU17X1_WORD_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct sigmadsp_ops adau17x1_sigmadsp_ops = {
+ .safeload = adau17x1_safeload,
+};
+
int adau17x1_probe(struct device *dev, struct regmap *regmap,
enum adau17x1_type type, void (*switch_mode)(struct device *dev),
const char *firmware_name)
@@ -1002,8 +1071,13 @@ int adau17x1_probe(struct device *dev, struct regmap *regmap,
dev_set_drvdata(dev, adau);
if (firmware_name) {
- adau->sigmadsp = devm_sigmadsp_init_regmap(dev, regmap, NULL,
- firmware_name);
+ if (adau17x1_has_safeload(adau)) {
+ adau->sigmadsp = devm_sigmadsp_init_regmap(dev, regmap,
+ &adau17x1_sigmadsp_ops, firmware_name);
+ } else {
+ adau->sigmadsp = devm_sigmadsp_init_regmap(dev, regmap,
+ NULL, firmware_name);
+ }
if (IS_ERR(adau->sigmadsp)) {
dev_warn(dev, "Could not find firmware file: %ld\n",
PTR_ERR(adau->sigmadsp));
diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
index e6fe87beec07..98a3b6f5bc96 100644
--- a/sound/soc/codecs/adau17x1.h
+++ b/sound/soc/codecs/adau17x1.h
@@ -68,10 +68,6 @@ int adau17x1_resume(struct snd_soc_component *component);
extern const struct snd_soc_dai_ops adau17x1_dai_ops;
-int adau17x1_setup_firmware(struct snd_soc_component *component,
- unsigned int rate);
-bool adau17x1_has_dsp(struct adau *adau);
-
#define ADAU17X1_CLOCK_CONTROL 0x4000
#define ADAU17X1_PLL_CONTROL 0x4002
#define ADAU17X1_REC_POWER_MGMT 0x4009
diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
index 668cd3754209..e9b7f72d880b 100644
--- a/sound/soc/codecs/cs35l33.c
+++ b/sound/soc/codecs/cs35l33.c
@@ -857,7 +857,8 @@ static const struct regmap_config cs35l33_regmap = {
.readable_reg = cs35l33_readable_register,
.writeable_reg = cs35l33_writeable_register,
.cache_type = REGCACHE_RBTREE,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
static int __maybe_unused cs35l33_runtime_resume(struct device *dev)
diff --git a/sound/soc/codecs/cs35l35.c b/sound/soc/codecs/cs35l35.c
index bd6226bde45f..9f4a59871cee 100644
--- a/sound/soc/codecs/cs35l35.c
+++ b/sound/soc/codecs/cs35l35.c
@@ -1105,7 +1105,8 @@ static struct regmap_config cs35l35_regmap = {
.readable_reg = cs35l35_readable_register,
.precious_reg = cs35l35_precious_register,
.cache_type = REGCACHE_RBTREE,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
static irqreturn_t cs35l35_irq(int irq, void *data)
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 275677de669f..ab27d2b94d02 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -154,11 +154,11 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
SOC_SINGLE("E to F Buffer Disable Switch", CS4265_SPDIF_CTL1,
6, 1, 0),
SOC_ENUM("C Data Access", cam_mode_enum),
+ SOC_SINGLE("SPDIF Switch", CS4265_SPDIF_CTL2, 5, 1, 1),
SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
3, 1, 0),
SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
- SOC_SINGLE("MMTLR Data Switch", 0,
- 1, 1, 0),
+ SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2, 0, 1, 0),
SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
};
@@ -221,10 +221,11 @@ static const struct snd_soc_dapm_route cs4265_audio_map[] = {
{"LINEOUTR", NULL, "DAC"},
{"SPDIFOUT", NULL, "SPDIF"},
+ {"Pre-amp MIC", NULL, "MICL"},
+ {"Pre-amp MIC", NULL, "MICR"},
+ {"ADC Mux", "MIC", "Pre-amp MIC"},
{"ADC Mux", "LINEIN", "LINEINL"},
{"ADC Mux", "LINEIN", "LINEINR"},
- {"ADC Mux", "MIC", "MICL"},
- {"ADC Mux", "MIC", "MICR"},
{"ADC", NULL, "ADC Mux"},
{"DOUT", NULL, "ADC"},
{"DAI1 Capture", NULL, "DOUT"},
@@ -496,7 +497,8 @@ static int cs4265_set_bias_level(struct snd_soc_component *component,
SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
#define CS4265_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE | \
- SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE)
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_U32_LE)
static const struct snd_soc_dai_ops cs4265_ops = {
.hw_params = cs4265_pcm_hw_params,
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index 5080d7a3c279..fd2bd74024c1 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -21,6 +21,7 @@
* - master mode *NOT* supported
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <sound/core.h>
@@ -41,6 +42,7 @@ enum master_slave_mode {
struct cs42l51_private {
unsigned int mclk;
+ struct clk *mclk_handle;
unsigned int audio_mode; /* The mode (I2S or left-justified) */
enum master_slave_mode func;
};
@@ -237,6 +239,10 @@ static const struct snd_soc_dapm_widget cs42l51_dapm_widgets[] = {
&cs42l51_adcr_mux_controls),
};
+static const struct snd_soc_dapm_widget cs42l51_dapm_mclk_widgets[] = {
+ SND_SOC_DAPM_CLOCK_SUPPLY("MCLK")
+};
+
static const struct snd_soc_dapm_route cs42l51_routes[] = {
{"HPL", NULL, "Left DAC"},
{"HPR", NULL, "Right DAC"},
@@ -487,6 +493,14 @@ static struct snd_soc_dai_driver cs42l51_dai = {
static int cs42l51_component_probe(struct snd_soc_component *component)
{
int ret, reg;
+ struct snd_soc_dapm_context *dapm;
+ struct cs42l51_private *cs42l51;
+
+ cs42l51 = snd_soc_component_get_drvdata(component);
+ dapm = snd_soc_component_get_dapm(component);
+
+ if (cs42l51->mclk_handle)
+ snd_soc_dapm_new_controls(dapm, cs42l51_dapm_mclk_widgets, 1);
/*
* DAC configuration
@@ -540,6 +554,13 @@ int cs42l51_probe(struct device *dev, struct regmap *regmap)
dev_set_drvdata(dev, cs42l51);
+ cs42l51->mclk_handle = devm_clk_get(dev, "MCLK");
+ if (IS_ERR(cs42l51->mclk_handle)) {
+ if (PTR_ERR(cs42l51->mclk_handle) != -ENOENT)
+ return PTR_ERR(cs42l51->mclk_handle);
+ cs42l51->mclk_handle = NULL;
+ }
+
/* Verify that we have a CS42L51 */
ret = regmap_read(regmap, CS42L51_CHIP_REV_ID, &val);
if (ret < 0) {
diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
index 80dc42197154..3f7b255587e6 100644
--- a/sound/soc/codecs/cs43130.c
+++ b/sound/soc/codecs/cs43130.c
@@ -2362,7 +2362,9 @@ static const struct regmap_config cs43130_regmap = {
.precious_reg = cs43130_precious_register,
.volatile_reg = cs43130_volatile_register,
.cache_type = REGCACHE_RBTREE,
- .use_single_rw = true, /* needed for regcache_sync */
+ /* needed for regcache_sync */
+ .use_single_read = true,
+ .use_single_write = true,
};
static u16 const cs43130_dc_threshold[CS43130_DC_THRESHOLD] = {
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index 8c4926df9286..71322e0410ee 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -148,6 +148,7 @@ static const struct of_device_id dmic_dev_match[] = {
{.compatible = "dmic-codec"},
{}
};
+MODULE_DEVICE_TABLE(of, dmic_dev_match);
static struct platform_driver dmic_driver = {
.driver = {
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index e9fc2fd97d2f..04a3aa770722 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -566,14 +566,14 @@ static int es8328_set_sysclk(struct snd_soc_dai *codec_dai,
break;
case 22579200:
mclkdiv2 = 1;
- /* fallthru */
+ /* fall through */
case 11289600:
es8328->sysclk_constraints = &constraints_11289;
es8328->mclk_ratios = ratios_11289;
break;
case 24576000:
mclkdiv2 = 1;
- /* fallthru */
+ /* fall through */
case 12288000:
es8328->sysclk_constraints = &constraints_12288;
es8328->mclk_ratios = ratios_12288;
@@ -824,7 +824,8 @@ const struct regmap_config es8328_regmap_config = {
.val_bits = 8,
.max_register = ES8328_REG_MAX,
.cache_type = REGCACHE_RBTREE,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
EXPORT_SYMBOL_GPL(es8328_regmap_config);
diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
new file mode 100644
index 000000000000..2aaa83028e55
--- /dev/null
+++ b/sound/soc/codecs/hdac_hda.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-18 Intel Corporation.
+
+/*
+ * hdac_hda.c - ASoC extensions to reuse the legacy HDA codec drivers
+ * with ASoC platform drivers. These APIs are called by the legacy HDA
+ * codec drivers using hdac_ext_bus_ops ops.
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_register.h>
+#include "hdac_hda.h"
+
+#define HDAC_ANALOG_DAI_ID 0
+#define HDAC_DIGITAL_DAI_ID 1
+#define HDAC_ALT_ANALOG_DAI_ID 2
+
+#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
+ SNDRV_PCM_FMTBIT_U8 | \
+ SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_U16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_U24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE | \
+ SNDRV_PCM_FMTBIT_U32_LE | \
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
+
+static int hdac_hda_dai_open(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+static void hdac_hda_dai_close(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width);
+static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt,
+ struct snd_soc_dai *dai);
+
+static struct snd_soc_dai_ops hdac_hda_dai_ops = {
+ .startup = hdac_hda_dai_open,
+ .shutdown = hdac_hda_dai_close,
+ .prepare = hdac_hda_dai_prepare,
+ .hw_free = hdac_hda_dai_hw_free,
+ .set_tdm_slot = hdac_hda_dai_set_tdm_slot,
+};
+
+static struct snd_soc_dai_driver hdac_hda_dais[] = {
+{
+ .id = HDAC_ANALOG_DAI_ID,
+ .name = "Analog Codec DAI",
+ .ops = &hdac_hda_dai_ops,
+ .playback = {
+ .stream_name = "Analog Codec Playback",
+ .channels_min = 1,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+ .capture = {
+ .stream_name = "Analog Codec Capture",
+ .channels_min = 1,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+},
+{
+ .id = HDAC_DIGITAL_DAI_ID,
+ .name = "Digital Codec DAI",
+ .ops = &hdac_hda_dai_ops,
+ .playback = {
+ .stream_name = "Digital Codec Playback",
+ .channels_min = 1,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+ .capture = {
+ .stream_name = "Digital Codec Capture",
+ .channels_min = 1,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+},
+{
+ .id = HDAC_ALT_ANALOG_DAI_ID,
+ .name = "Alt Analog Codec DAI",
+ .ops = &hdac_hda_dai_ops,
+ .playback = {
+ .stream_name = "Alt Analog Codec Playback",
+ .channels_min = 1,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+ .capture = {
+ .stream_name = "Alt Analog Codec Capture",
+ .channels_min = 1,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+}
+
+};
+
+static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_component *component = dai->component;
+ struct hdac_hda_priv *hda_pvt;
+ struct hdac_hda_pcm *pcm;
+
+ hda_pvt = snd_soc_component_get_drvdata(component);
+ pcm = &hda_pvt->pcm[dai->id];
+ if (tx_mask)
+ pcm[dai->id].stream_tag[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask;
+ else
+ pcm[dai->id].stream_tag[SNDRV_PCM_STREAM_CAPTURE] = rx_mask;
+
+ return 0;
+}
+
+static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct hdac_hda_priv *hda_pvt;
+ struct hda_pcm_stream *hda_stream;
+ struct hda_pcm *pcm;
+
+ hda_pvt = snd_soc_component_get_drvdata(component);
+ pcm = snd_soc_find_pcm_from_dai(hda_pvt, dai);
+ if (!pcm)
+ return -EINVAL;
+
+ hda_stream = &pcm->stream[substream->stream];
+ snd_hda_codec_cleanup(&hda_pvt->codec, hda_stream, substream);
+
+ return 0;
+}
+
+static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct hdac_hda_priv *hda_pvt;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct hdac_device *hdev;
+ struct hda_pcm_stream *hda_stream;
+ unsigned int format_val;
+ struct hda_pcm *pcm;
+ unsigned int stream;
+ int ret = 0;
+
+ hda_pvt = snd_soc_component_get_drvdata(component);
+ hdev = &hda_pvt->codec.core;
+ pcm = snd_soc_find_pcm_from_dai(hda_pvt, dai);
+ if (!pcm)
+ return -EINVAL;
+
+ hda_stream = &pcm->stream[substream->stream];
+
+ format_val = snd_hdac_calc_stream_format(runtime->rate,
+ runtime->channels,
+ runtime->format,
+ hda_stream->maxbps,
+ 0);
+ if (!format_val) {
+ dev_err(&hdev->dev,
+ "invalid format_val, rate=%d, ch=%d, format=%d\n",
+ runtime->rate, runtime->channels, runtime->format);
+ return -EINVAL;
+ }
+
+ stream = hda_pvt->pcm[dai->id].stream_tag[substream->stream];
+
+ ret = snd_hda_codec_prepare(&hda_pvt->codec, hda_stream,
+ stream, format_val, substream);
+ if (ret < 0)
+ dev_err(&hdev->dev, "codec prepare failed %d\n", ret);
+
+ return ret;
+}
+
+static int hdac_hda_dai_open(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct hdac_hda_priv *hda_pvt;
+ struct hda_pcm_stream *hda_stream;
+ struct hda_pcm *pcm;
+ int ret;
+
+ hda_pvt = snd_soc_component_get_drvdata(component);
+ pcm = snd_soc_find_pcm_from_dai(hda_pvt, dai);
+ if (!pcm)
+ return -EINVAL;
+
+ snd_hda_codec_pcm_get(pcm);
+
+ hda_stream = &pcm->stream[substream->stream];
+
+ ret = hda_stream->ops.open(hda_stream, &hda_pvt->codec, substream);
+ if (ret < 0)
+ snd_hda_codec_pcm_put(pcm);
+
+ return ret;
+}
+
+static void hdac_hda_dai_close(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct hdac_hda_priv *hda_pvt;
+ struct hda_pcm_stream *hda_stream;
+ struct hda_pcm *pcm;
+
+ hda_pvt = snd_soc_component_get_drvdata(component);
+ pcm = snd_soc_find_pcm_from_dai(hda_pvt, dai);
+ if (!pcm)
+ return;
+
+ hda_stream = &pcm->stream[substream->stream];
+
+ hda_stream->ops.close(hda_stream, &hda_pvt->codec, substream);
+
+ snd_hda_codec_pcm_put(pcm);
+}
+
+static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt,
+ struct snd_soc_dai *dai)
+{
+ struct hda_codec *hcodec = &hda_pvt->codec;
+ struct hda_pcm *cpcm;
+ const char *pcm_name;
+
+ switch (dai->id) {
+ case HDAC_ANALOG_DAI_ID:
+ pcm_name = "Analog";
+ break;
+ case HDAC_DIGITAL_DAI_ID:
+ pcm_name = "Digital";
+ break;
+ case HDAC_ALT_ANALOG_DAI_ID:
+ pcm_name = "Alt Analog";
+ break;
+ default:
+ dev_err(&hcodec->core.dev, "invalid dai id %d\n", dai->id);
+ return NULL;
+ }
+
+ list_for_each_entry(cpcm, &hcodec->pcm_list_head, list) {
+ if (strpbrk(cpcm->name, pcm_name))
+ return cpcm;
+ }
+
+ dev_err(&hcodec->core.dev, "didn't find PCM for DAI %s\n", dai->name);
+ return NULL;
+}
+
+static int hdac_hda_codec_probe(struct snd_soc_component *component)
+{
+ struct hdac_hda_priv *hda_pvt =
+ snd_soc_component_get_drvdata(component);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ struct hdac_device *hdev = &hda_pvt->codec.core;
+ struct hda_codec *hcodec = &hda_pvt->codec;
+ struct hdac_ext_link *hlink;
+ hda_codec_patch_t patch;
+ int ret;
+
+ hlink = snd_hdac_ext_bus_get_link(hdev->bus, dev_name(&hdev->dev));
+ if (!hlink) {
+ dev_err(&hdev->dev, "hdac link not found\n");
+ return -EIO;
+ }
+
+ snd_hdac_ext_bus_link_get(hdev->bus, hlink);
+
+ ret = snd_hda_codec_device_new(hcodec->bus, component->card->snd_card,
+ hdev->addr, hcodec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed to create hda codec %d\n", ret);
+ goto error_no_pm;
+ }
+
+ /*
+ * snd_hda_codec_device_new decrements the usage count so call get pm
+ * else the device will be powered off
+ */
+ pm_runtime_get_noresume(&hdev->dev);
+
+ hcodec->bus->card = dapm->card->snd_card;
+
+ ret = snd_hda_codec_set_name(hcodec, hcodec->preset->name);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "name failed %s\n", hcodec->preset->name);
+ goto error;
+ }
+
+ ret = snd_hdac_regmap_init(&hcodec->core);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "regmap init failed\n");
+ goto error;
+ }
+
+ patch = (hda_codec_patch_t)hcodec->preset->driver_data;
+ if (patch) {
+ ret = patch(hcodec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "patch failed %d\n", ret);
+ goto error;
+ }
+ } else {
+ dev_dbg(&hdev->dev, "no patch file found\n");
+ }
+
+ ret = snd_hda_codec_parse_pcms(hcodec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "unable to map pcms to dai %d\n", ret);
+ goto error;
+ }
+
+ ret = snd_hda_codec_build_controls(hcodec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "unable to create controls %d\n", ret);
+ goto error;
+ }
+
+ hcodec->core.lazy_cache = true;
+
+ /*
+ * hdac_device core already sets the state to active and calls
+ * get_noresume. So enable runtime and set the device to suspend.
+ * pm_runtime_enable is also called during codec registeration
+ */
+ pm_runtime_put(&hdev->dev);
+ pm_runtime_suspend(&hdev->dev);
+
+ return 0;
+
+error:
+ pm_runtime_put(&hdev->dev);
+error_no_pm:
+ snd_hdac_ext_bus_link_put(hdev->bus, hlink);
+ return ret;
+}
+
+static void hdac_hda_codec_remove(struct snd_soc_component *component)
+{
+ struct hdac_hda_priv *hda_pvt =
+ snd_soc_component_get_drvdata(component);
+ struct hdac_device *hdev = &hda_pvt->codec.core;
+ struct hdac_ext_link *hlink = NULL;
+
+ hlink = snd_hdac_ext_bus_get_link(hdev->bus, dev_name(&hdev->dev));
+ if (!hlink) {
+ dev_err(&hdev->dev, "hdac link not found\n");
+ return;
+ }
+
+ snd_hdac_ext_bus_link_put(hdev->bus, hlink);
+ pm_runtime_disable(&hdev->dev);
+}
+
+static const struct snd_soc_dapm_route hdac_hda_dapm_routes[] = {
+ {"AIF1TX", NULL, "Codec Input Pin1"},
+ {"AIF2TX", NULL, "Codec Input Pin2"},
+ {"AIF3TX", NULL, "Codec Input Pin3"},
+
+ {"Codec Output Pin1", NULL, "AIF1RX"},
+ {"Codec Output Pin2", NULL, "AIF2RX"},
+ {"Codec Output Pin3", NULL, "AIF3RX"},
+};
+
+static const struct snd_soc_dapm_widget hdac_hda_dapm_widgets[] = {
+ /* Audio Interface */
+ SND_SOC_DAPM_AIF_IN("AIF1RX", "Analog Codec Playback", 0,
+ SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF2RX", "Digital Codec Playback", 0,
+ SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF3RX", "Alt Analog Codec Playback", 0,
+ SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX", "Analog Codec Capture", 0,
+ SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF2TX", "Digital Codec Capture", 0,
+ SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF3TX", "Alt Analog Codec Capture", 0,
+ SND_SOC_NOPM, 0, 0),
+
+ /* Input Pins */
+ SND_SOC_DAPM_INPUT("Codec Input Pin1"),
+ SND_SOC_DAPM_INPUT("Codec Input Pin2"),
+ SND_SOC_DAPM_INPUT("Codec Input Pin3"),
+
+ /* Output Pins */
+ SND_SOC_DAPM_OUTPUT("Codec Output Pin1"),
+ SND_SOC_DAPM_OUTPUT("Codec Output Pin2"),
+ SND_SOC_DAPM_OUTPUT("Codec Output Pin3"),
+};
+
+static const struct snd_soc_component_driver hdac_hda_codec = {
+ .probe = hdac_hda_codec_probe,
+ .remove = hdac_hda_codec_remove,
+ .idle_bias_on = false,
+ .dapm_widgets = hdac_hda_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(hdac_hda_dapm_widgets),
+ .dapm_routes = hdac_hda_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(hdac_hda_dapm_routes),
+};
+
+static int hdac_hda_dev_probe(struct hdac_device *hdev)
+{
+ struct hdac_ext_link *hlink;
+ struct hdac_hda_priv *hda_pvt;
+ int ret;
+
+ /* hold the ref while we probe */
+ hlink = snd_hdac_ext_bus_get_link(hdev->bus, dev_name(&hdev->dev));
+ if (!hlink) {
+ dev_err(&hdev->dev, "hdac link not found\n");
+ return -EIO;
+ }
+ snd_hdac_ext_bus_link_get(hdev->bus, hlink);
+
+ hda_pvt = hdac_to_hda_priv(hdev);
+ if (!hda_pvt)
+ return -ENOMEM;
+
+ /* ASoC specific initialization */
+ ret = devm_snd_soc_register_component(&hdev->dev,
+ &hdac_hda_codec, hdac_hda_dais,
+ ARRAY_SIZE(hdac_hda_dais));
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed to register HDA codec %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&hdev->dev, hda_pvt);
+ snd_hdac_ext_bus_link_put(hdev->bus, hlink);
+
+ return ret;
+}
+
+static int hdac_hda_dev_remove(struct hdac_device *hdev)
+{
+ return 0;
+}
+
+static struct hdac_ext_bus_ops hdac_ops = {
+ .hdev_attach = hdac_hda_dev_probe,
+ .hdev_detach = hdac_hda_dev_remove,
+};
+
+struct hdac_ext_bus_ops *snd_soc_hdac_hda_get_ops(void)
+{
+ return &hdac_ops;
+}
+EXPORT_SYMBOL_GPL(snd_soc_hdac_hda_get_ops);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ASoC Extensions for legacy HDA Drivers");
+MODULE_AUTHOR("Rakesh Ughreja<rakesh.a.ughreja@intel.com>");
diff --git a/sound/soc/codecs/hdac_hda.h b/sound/soc/codecs/hdac_hda.h
new file mode 100644
index 000000000000..e444ef593360
--- /dev/null
+++ b/sound/soc/codecs/hdac_hda.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2015-18 Intel Corporation.
+ */
+
+#ifndef __HDAC_HDA_H__
+#define __HDAC_HDA_H__
+
+struct hdac_hda_pcm {
+ int stream_tag[2];
+};
+
+struct hdac_hda_priv {
+ struct hda_codec codec;
+ struct hdac_hda_pcm pcm[2];
+};
+
+#define hdac_to_hda_priv(_hdac) \
+ container_of(_hdac, struct hdac_hda_priv, codec.core)
+#define hdac_to_hda_codec(_hdac) container_of(_hdac, struct hda_codec, core)
+
+struct hdac_ext_bus_ops *snd_soc_hdac_hda_get_ops(void);
+
+#endif /* __HDAC_HDA_H__ */
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 7b8533abf637..4e9854889a95 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1410,6 +1410,12 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdev,
if (ret)
return ret;
+ /* Filter out 44.1, 88.2 and 176.4Khz */
+ rates &= ~(SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_176400);
+ if (!rates)
+ return -EINVAL;
+
sprintf(dai_name, "intel-hdmi-hifi%d", i+1);
hdmi_dais[i].name = devm_kstrdup(&hdev->dev,
dai_name, GFP_KERNEL);
@@ -1598,7 +1604,7 @@ static struct snd_pcm *hdac_hdmi_get_pcm_from_id(struct snd_soc_card *card,
{
struct snd_soc_pcm_runtime *rtd;
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (rtd->pcm && (rtd->pcm->device == device))
return rtd->pcm;
}
@@ -1961,9 +1967,6 @@ static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdev, int pcm_idx)
port = list_first_entry(&pcm->port_list, struct hdac_hdmi_port, head);
- if (!port)
- return 0;
-
if (!port || !port->eld.eld_valid)
return 0;
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index fb515aaa54fc..ca172a4b6849 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -16,6 +16,7 @@
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
+#include <linux/clk.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -42,6 +43,7 @@ struct max98088_priv {
struct regmap *regmap;
enum max98088_type devtype;
struct max98088_pdata *pdata;
+ struct clk *mclk;
unsigned int sysclk;
struct max98088_cdata dai[2];
int eq_textcnt;
@@ -1103,6 +1105,11 @@ static int max98088_dai_set_sysclk(struct snd_soc_dai *dai,
if (freq == max98088->sysclk)
return 0;
+ if (!IS_ERR(max98088->mclk)) {
+ freq = clk_round_rate(max98088->mclk, freq);
+ clk_set_rate(max98088->mclk, freq);
+ }
+
/* Setup clocks for slave mode, and using the PLL
* PSCLK = 0x01 (when master clk is 10MHz to 20MHz)
* 0x02 (when master clk is 20MHz to 30MHz)..
@@ -1310,6 +1317,20 @@ static int max98088_set_bias_level(struct snd_soc_component *component,
break;
case SND_SOC_BIAS_PREPARE:
+ /*
+ * SND_SOC_BIAS_PREPARE is called while preparing for a
+ * transition to ON or away from ON. If current bias_level
+ * is SND_SOC_BIAS_ON, then it is preparing for a transition
+ * away from ON. Disable the clock in that case, otherwise
+ * enable it.
+ */
+ if (!IS_ERR(max98088->mclk)) {
+ if (snd_soc_component_get_bias_level(component) ==
+ SND_SOC_BIAS_ON)
+ clk_disable_unprepare(max98088->mclk);
+ else
+ clk_prepare_enable(max98088->mclk);
+ }
break;
case SND_SOC_BIAS_STANDBY:
@@ -1725,6 +1746,11 @@ static int max98088_i2c_probe(struct i2c_client *i2c,
if (IS_ERR(max98088->regmap))
return PTR_ERR(max98088->regmap);
+ max98088->mclk = devm_clk_get(&i2c->dev, "mclk");
+ if (IS_ERR(max98088->mclk))
+ if (PTR_ERR(max98088->mclk) == -EPROBE_DEFER)
+ return PTR_ERR(max98088->mclk);
+
max98088->devtype = id->driver_data;
i2c_set_clientdata(i2c, max98088);
@@ -1742,9 +1768,19 @@ static const struct i2c_device_id max98088_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max98088_i2c_id);
+#if defined(CONFIG_OF)
+static const struct of_device_id max98088_of_match[] = {
+ { .compatible = "maxim,max98088" },
+ { .compatible = "maxim,max98089" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max98088_of_match);
+#endif
+
static struct i2c_driver max98088_i2c_driver = {
.driver = {
.name = "max98088",
+ .of_match_table = of_match_ptr(max98088_of_match),
},
.probe = max98088_i2c_probe,
.id_table = max98088_i2c_id,
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index 92b7125ea169..a09d01318f79 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -2,6 +2,7 @@
// Copyright (c) 2017, Maxim Integrated
#include <linux/acpi.h>
+#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -454,7 +455,7 @@ SND_SOC_DAPM_SIGGEN("IMON"),
SND_SOC_DAPM_SIGGEN("FBMON"),
};
-static DECLARE_TLV_DB_SCALE(max98373_digital_tlv, 0, -50, 0);
+static DECLARE_TLV_DB_SCALE(max98373_digital_tlv, -6350, 50, 1);
static const DECLARE_TLV_DB_RANGE(max98373_spk_tlv,
0, 8, TLV_DB_SCALE_ITEM(0, 50, 0),
9, 10, TLV_DB_SCALE_ITEM(500, 100, 0),
@@ -470,19 +471,19 @@ static const DECLARE_TLV_DB_RANGE(max98373_dht_spkgain_min_tlv,
0, 9, TLV_DB_SCALE_ITEM(800, 100, 0),
);
static const DECLARE_TLV_DB_RANGE(max98373_dht_rotation_point_tlv,
- 0, 1, TLV_DB_SCALE_ITEM(-50, -50, 0),
- 2, 7, TLV_DB_SCALE_ITEM(-200, -100, 0),
- 8, 9, TLV_DB_SCALE_ITEM(-1000, -200, 0),
- 10, 11, TLV_DB_SCALE_ITEM(-1500, -300, 0),
- 12, 13, TLV_DB_SCALE_ITEM(-2000, -200, 0),
- 14, 15, TLV_DB_SCALE_ITEM(-2500, -500, 0),
+ 0, 1, TLV_DB_SCALE_ITEM(-3000, 500, 0),
+ 2, 4, TLV_DB_SCALE_ITEM(-2200, 200, 0),
+ 5, 6, TLV_DB_SCALE_ITEM(-1500, 300, 0),
+ 7, 9, TLV_DB_SCALE_ITEM(-1000, 200, 0),
+ 10, 13, TLV_DB_SCALE_ITEM(-500, 100, 0),
+ 14, 15, TLV_DB_SCALE_ITEM(-100, 50, 0),
);
static const DECLARE_TLV_DB_RANGE(max98373_limiter_thresh_tlv,
- 0, 15, TLV_DB_SCALE_ITEM(0, -100, 0),
+ 0, 15, TLV_DB_SCALE_ITEM(-1500, 100, 0),
);
static const DECLARE_TLV_DB_RANGE(max98373_bde_gain_tlv,
- 0, 60, TLV_DB_SCALE_ITEM(0, -25, 0),
+ 0, 60, TLV_DB_SCALE_ITEM(-1500, 25, 0),
);
static bool max98373_readable_register(struct device *dev, unsigned int reg)
@@ -520,6 +521,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3:
+ case MAX98373_R203E_AMP_PATH_GAIN:
case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
@@ -603,7 +605,7 @@ SOC_SINGLE("Dither Switch", MAX98373_R203F_AMP_DSP_CFG,
SOC_SINGLE("DC Blocker Switch", MAX98373_R203F_AMP_DSP_CFG,
MAX98373_AMP_DSP_CFG_DCBLK_SHIFT, 1, 0),
SOC_SINGLE_TLV("Digital Volume", MAX98373_R203D_AMP_DIG_VOL_CTRL,
- 0, 0x7F, 0, max98373_digital_tlv),
+ 0, 0x7F, 1, max98373_digital_tlv),
SOC_SINGLE_TLV("Speaker Volume", MAX98373_R203E_AMP_PATH_GAIN,
MAX98373_SPK_DIGI_GAIN_SHIFT, 10, 0, max98373_spk_tlv),
SOC_SINGLE_TLV("FS Max Volume", MAX98373_R203E_AMP_PATH_GAIN,
@@ -615,7 +617,7 @@ SOC_SINGLE("DHT Switch", MAX98373_R20D4_DHT_EN,
SOC_SINGLE_TLV("DHT Min Volume", MAX98373_R20D1_DHT_CFG,
MAX98373_DHT_SPK_GAIN_MIN_SHIFT, 9, 0, max98373_dht_spkgain_min_tlv),
SOC_SINGLE_TLV("DHT Rot Pnt Volume", MAX98373_R20D1_DHT_CFG,
- MAX98373_DHT_ROT_PNT_SHIFT, 15, 0, max98373_dht_rotation_point_tlv),
+ MAX98373_DHT_ROT_PNT_SHIFT, 15, 1, max98373_dht_rotation_point_tlv),
SOC_SINGLE_TLV("DHT Attack Step Volume", MAX98373_R20D2_DHT_ATTACK_CFG,
MAX98373_DHT_ATTACK_STEP_SHIFT, 4, 0, max98373_dht_step_size_tlv),
SOC_SINGLE_TLV("DHT Release Step Volume", MAX98373_R20D3_DHT_RELEASE_CFG,
@@ -652,29 +654,29 @@ SOC_SINGLE("BDE Hold Time", MAX98373_R2090_BDE_LVL_HOLD, 0, 0xFF, 0),
SOC_SINGLE("BDE Attack Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 4, 0xF, 0),
SOC_SINGLE("BDE Release Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 0, 0xF, 0),
SOC_SINGLE_TLV("BDE LVL1 Clip Thresh Volume", MAX98373_R20A9_BDE_L1_CFG_2,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL2 Clip Thresh Volume", MAX98373_R20AC_BDE_L2_CFG_2,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL3 Clip Thresh Volume", MAX98373_R20AF_BDE_L3_CFG_2,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL4 Clip Thresh Volume", MAX98373_R20B2_BDE_L4_CFG_2,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL1 Clip Reduction Volume", MAX98373_R20AA_BDE_L1_CFG_3,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL2 Clip Reduction Volume", MAX98373_R20AD_BDE_L2_CFG_3,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL3 Clip Reduction Volume", MAX98373_R20B0_BDE_L3_CFG_3,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL4 Clip Reduction Volume", MAX98373_R20B3_BDE_L4_CFG_3,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL1 Limiter Thresh Volume", MAX98373_R20A8_BDE_L1_CFG_1,
- 0, 0xF, 0, max98373_limiter_thresh_tlv),
+ 0, 0xF, 1, max98373_limiter_thresh_tlv),
SOC_SINGLE_TLV("BDE LVL2 Limiter Thresh Volume", MAX98373_R20AB_BDE_L2_CFG_1,
- 0, 0xF, 0, max98373_limiter_thresh_tlv),
+ 0, 0xF, 1, max98373_limiter_thresh_tlv),
SOC_SINGLE_TLV("BDE LVL3 Limiter Thresh Volume", MAX98373_R20AE_BDE_L3_CFG_1,
- 0, 0xF, 0, max98373_limiter_thresh_tlv),
+ 0, 0xF, 1, max98373_limiter_thresh_tlv),
SOC_SINGLE_TLV("BDE LVL4 Limiter Thresh Volume", MAX98373_R20B1_BDE_L4_CFG_1,
- 0, 0xF, 0, max98373_limiter_thresh_tlv),
+ 0, 0xF, 1, max98373_limiter_thresh_tlv),
/* Limiter */
SOC_SINGLE("Limiter Switch", MAX98373_R20E2_LIMITER_EN,
MAX98373_LIMITER_EN_SHIFT, 1, 0),
@@ -729,6 +731,7 @@ static int max98373_probe(struct snd_soc_component *component)
/* Software Reset */
regmap_write(max98373->regmap,
MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+ usleep_range(10000, 11000);
/* IV default slot configuration */
regmap_write(max98373->regmap,
@@ -817,6 +820,7 @@ static int max98373_resume(struct device *dev)
regmap_write(max98373->regmap,
MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+ usleep_range(10000, 11000);
regcache_cache_only(max98373->regmap, false);
regcache_sync(max98373->regmap);
return 0;
diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c
new file mode 100644
index 000000000000..622ce947f134
--- /dev/null
+++ b/sound/soc/codecs/nau8822.c
@@ -0,0 +1,1136 @@
+/*
+ * nau8822.c -- NAU8822 ALSA Soc Audio Codec driver
+ *
+ * Copyright 2017 Nuvoton Technology Corp.
+ *
+ * Author: David Lin <ctlin0@nuvoton.com>
+ * Co-author: John Hsu <kchsu0@nuvoton.com>
+ * Co-author: Seven Li <wtli@nuvoton.com>
+ *
+ * Based on WM8974.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <asm/div64.h>
+#include "nau8822.h"
+
+#define NAU_PLL_FREQ_MAX 100000000
+#define NAU_PLL_FREQ_MIN 90000000
+#define NAU_PLL_REF_MAX 33000000
+#define NAU_PLL_REF_MIN 8000000
+#define NAU_PLL_OPTOP_MIN 6
+
+static const int nau8822_mclk_scaler[] = { 10, 15, 20, 30, 40, 60, 80, 120 };
+
+static const struct reg_default nau8822_reg_defaults[] = {
+ { NAU8822_REG_POWER_MANAGEMENT_1, 0x0000 },
+ { NAU8822_REG_POWER_MANAGEMENT_2, 0x0000 },
+ { NAU8822_REG_POWER_MANAGEMENT_3, 0x0000 },
+ { NAU8822_REG_AUDIO_INTERFACE, 0x0050 },
+ { NAU8822_REG_COMPANDING_CONTROL, 0x0000 },
+ { NAU8822_REG_CLOCKING, 0x0140 },
+ { NAU8822_REG_ADDITIONAL_CONTROL, 0x0000 },
+ { NAU8822_REG_GPIO_CONTROL, 0x0000 },
+ { NAU8822_REG_JACK_DETECT_CONTROL_1, 0x0000 },
+ { NAU8822_REG_DAC_CONTROL, 0x0000 },
+ { NAU8822_REG_LEFT_DAC_DIGITAL_VOLUME, 0x00ff },
+ { NAU8822_REG_RIGHT_DAC_DIGITAL_VOLUME, 0x00ff },
+ { NAU8822_REG_JACK_DETECT_CONTROL_2, 0x0000 },
+ { NAU8822_REG_ADC_CONTROL, 0x0100 },
+ { NAU8822_REG_LEFT_ADC_DIGITAL_VOLUME, 0x00ff },
+ { NAU8822_REG_RIGHT_ADC_DIGITAL_VOLUME, 0x00ff },
+ { NAU8822_REG_EQ1, 0x012c },
+ { NAU8822_REG_EQ2, 0x002c },
+ { NAU8822_REG_EQ3, 0x002c },
+ { NAU8822_REG_EQ4, 0x002c },
+ { NAU8822_REG_EQ5, 0x002c },
+ { NAU8822_REG_DAC_LIMITER_1, 0x0032 },
+ { NAU8822_REG_DAC_LIMITER_2, 0x0000 },
+ { NAU8822_REG_NOTCH_FILTER_1, 0x0000 },
+ { NAU8822_REG_NOTCH_FILTER_2, 0x0000 },
+ { NAU8822_REG_NOTCH_FILTER_3, 0x0000 },
+ { NAU8822_REG_NOTCH_FILTER_4, 0x0000 },
+ { NAU8822_REG_ALC_CONTROL_1, 0x0038 },
+ { NAU8822_REG_ALC_CONTROL_2, 0x000b },
+ { NAU8822_REG_ALC_CONTROL_3, 0x0032 },
+ { NAU8822_REG_NOISE_GATE, 0x0010 },
+ { NAU8822_REG_PLL_N, 0x0008 },
+ { NAU8822_REG_PLL_K1, 0x000c },
+ { NAU8822_REG_PLL_K2, 0x0093 },
+ { NAU8822_REG_PLL_K3, 0x00e9 },
+ { NAU8822_REG_3D_CONTROL, 0x0000 },
+ { NAU8822_REG_RIGHT_SPEAKER_CONTROL, 0x0000 },
+ { NAU8822_REG_INPUT_CONTROL, 0x0033 },
+ { NAU8822_REG_LEFT_INP_PGA_CONTROL, 0x0010 },
+ { NAU8822_REG_RIGHT_INP_PGA_CONTROL, 0x0010 },
+ { NAU8822_REG_LEFT_ADC_BOOST_CONTROL, 0x0100 },
+ { NAU8822_REG_RIGHT_ADC_BOOST_CONTROL, 0x0100 },
+ { NAU8822_REG_OUTPUT_CONTROL, 0x0002 },
+ { NAU8822_REG_LEFT_MIXER_CONTROL, 0x0001 },
+ { NAU8822_REG_RIGHT_MIXER_CONTROL, 0x0001 },
+ { NAU8822_REG_LHP_VOLUME, 0x0039 },
+ { NAU8822_REG_RHP_VOLUME, 0x0039 },
+ { NAU8822_REG_LSPKOUT_VOLUME, 0x0039 },
+ { NAU8822_REG_RSPKOUT_VOLUME, 0x0039 },
+ { NAU8822_REG_AUX2_MIXER, 0x0001 },
+ { NAU8822_REG_AUX1_MIXER, 0x0001 },
+ { NAU8822_REG_POWER_MANAGEMENT_4, 0x0000 },
+ { NAU8822_REG_LEFT_TIME_SLOT, 0x0000 },
+ { NAU8822_REG_MISC, 0x0020 },
+ { NAU8822_REG_RIGHT_TIME_SLOT, 0x0000 },
+ { NAU8822_REG_DEVICE_REVISION, 0x007f },
+ { NAU8822_REG_DEVICE_ID, 0x001a },
+ { NAU8822_REG_DAC_DITHER, 0x0114 },
+ { NAU8822_REG_ALC_ENHANCE_1, 0x0000 },
+ { NAU8822_REG_ALC_ENHANCE_2, 0x0000 },
+ { NAU8822_REG_192KHZ_SAMPLING, 0x0008 },
+ { NAU8822_REG_MISC_CONTROL, 0x0000 },
+ { NAU8822_REG_INPUT_TIEOFF, 0x0000 },
+ { NAU8822_REG_POWER_REDUCTION, 0x0000 },
+ { NAU8822_REG_AGC_PEAK2PEAK, 0x0000 },
+ { NAU8822_REG_AGC_PEAK_DETECT, 0x0000 },
+ { NAU8822_REG_AUTOMUTE_CONTROL, 0x0000 },
+ { NAU8822_REG_OUTPUT_TIEOFF, 0x0000 },
+};
+
+static bool nau8822_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NAU8822_REG_RESET ... NAU8822_REG_JACK_DETECT_CONTROL_1:
+ case NAU8822_REG_DAC_CONTROL ... NAU8822_REG_LEFT_ADC_DIGITAL_VOLUME:
+ case NAU8822_REG_RIGHT_ADC_DIGITAL_VOLUME:
+ case NAU8822_REG_EQ1 ... NAU8822_REG_EQ5:
+ case NAU8822_REG_DAC_LIMITER_1 ... NAU8822_REG_DAC_LIMITER_2:
+ case NAU8822_REG_NOTCH_FILTER_1 ... NAU8822_REG_NOTCH_FILTER_4:
+ case NAU8822_REG_ALC_CONTROL_1 ...NAU8822_REG_PLL_K3:
+ case NAU8822_REG_3D_CONTROL:
+ case NAU8822_REG_RIGHT_SPEAKER_CONTROL:
+ case NAU8822_REG_INPUT_CONTROL ... NAU8822_REG_LEFT_ADC_BOOST_CONTROL:
+ case NAU8822_REG_RIGHT_ADC_BOOST_CONTROL ... NAU8822_REG_AUX1_MIXER:
+ case NAU8822_REG_POWER_MANAGEMENT_4 ... NAU8822_REG_DEVICE_ID:
+ case NAU8822_REG_DAC_DITHER:
+ case NAU8822_REG_ALC_ENHANCE_1 ... NAU8822_REG_MISC_CONTROL:
+ case NAU8822_REG_INPUT_TIEOFF ... NAU8822_REG_OUTPUT_TIEOFF:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool nau8822_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NAU8822_REG_RESET ... NAU8822_REG_JACK_DETECT_CONTROL_1:
+ case NAU8822_REG_DAC_CONTROL ... NAU8822_REG_LEFT_ADC_DIGITAL_VOLUME:
+ case NAU8822_REG_RIGHT_ADC_DIGITAL_VOLUME:
+ case NAU8822_REG_EQ1 ... NAU8822_REG_EQ5:
+ case NAU8822_REG_DAC_LIMITER_1 ... NAU8822_REG_DAC_LIMITER_2:
+ case NAU8822_REG_NOTCH_FILTER_1 ... NAU8822_REG_NOTCH_FILTER_4:
+ case NAU8822_REG_ALC_CONTROL_1 ...NAU8822_REG_PLL_K3:
+ case NAU8822_REG_3D_CONTROL:
+ case NAU8822_REG_RIGHT_SPEAKER_CONTROL:
+ case NAU8822_REG_INPUT_CONTROL ... NAU8822_REG_LEFT_ADC_BOOST_CONTROL:
+ case NAU8822_REG_RIGHT_ADC_BOOST_CONTROL ... NAU8822_REG_AUX1_MIXER:
+ case NAU8822_REG_POWER_MANAGEMENT_4 ... NAU8822_REG_DEVICE_ID:
+ case NAU8822_REG_DAC_DITHER:
+ case NAU8822_REG_ALC_ENHANCE_1 ... NAU8822_REG_MISC_CONTROL:
+ case NAU8822_REG_INPUT_TIEOFF ... NAU8822_REG_OUTPUT_TIEOFF:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool nau8822_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NAU8822_REG_RESET:
+ case NAU8822_REG_DEVICE_REVISION:
+ case NAU8822_REG_DEVICE_ID:
+ case NAU8822_REG_AGC_PEAK2PEAK:
+ case NAU8822_REG_AGC_PEAK_DETECT:
+ case NAU8822_REG_AUTOMUTE_CONTROL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* The EQ parameters get function is to get the 5 band equalizer control.
+ * The regmap raw read can't work here because regmap doesn't provide
+ * value format for value width of 9 bits. Therefore, the driver reads data
+ * from cache and makes value format according to the endianness of
+ * bytes type control element.
+ */
+static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+ int i, reg;
+ u16 reg_val, *val;
+
+ val = (u16 *)ucontrol->value.bytes.data;
+ reg = NAU8822_REG_EQ1;
+ for (i = 0; i < params->max / sizeof(u16); i++) {
+ reg_val = snd_soc_component_read32(component, reg + i);
+ /* conversion of 16-bit integers between native CPU format
+ * and big endian format
+ */
+ reg_val = cpu_to_be16(reg_val);
+ memcpy(val + i, &reg_val, sizeof(reg_val));
+ }
+
+ return 0;
+}
+
+/* The EQ parameters put function is to make configuration of 5 band equalizer
+ * control. These configuration includes central frequency, equalizer gain,
+ * cut-off frequency, bandwidth control, and equalizer path.
+ * The regmap raw write can't work here because regmap doesn't provide
+ * register and value format for register with address 7 bits and value 9 bits.
+ * Therefore, the driver makes value format according to the endianness of
+ * bytes type control element and writes data to codec.
+ */
+static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+ void *data;
+ u16 *val, value;
+ int i, reg, ret;
+
+ data = kmemdup(ucontrol->value.bytes.data,
+ params->max, GFP_KERNEL | GFP_DMA);
+ if (!data)
+ return -ENOMEM;
+
+ val = (u16 *)data;
+ reg = NAU8822_REG_EQ1;
+ for (i = 0; i < params->max / sizeof(u16); i++) {
+ /* conversion of 16-bit integers between native CPU format
+ * and big endian format
+ */
+ value = be16_to_cpu(*(val + i));
+ ret = snd_soc_component_write(component, reg + i, value);
+ if (ret) {
+ dev_err(component->dev,
+ "EQ configuration fail, register: %x ret: %d\n",
+ reg + i, ret);
+ kfree(data);
+ return ret;
+ }
+ }
+ kfree(data);
+
+ return 0;
+}
+
+static const char * const nau8822_companding[] = {
+ "Off", "NC", "u-law", "A-law"};
+
+static const struct soc_enum nau8822_companding_adc_enum =
+ SOC_ENUM_SINGLE(NAU8822_REG_COMPANDING_CONTROL, NAU8822_ADCCM_SFT,
+ ARRAY_SIZE(nau8822_companding), nau8822_companding);
+
+static const struct soc_enum nau8822_companding_dac_enum =
+ SOC_ENUM_SINGLE(NAU8822_REG_COMPANDING_CONTROL, NAU8822_DACCM_SFT,
+ ARRAY_SIZE(nau8822_companding), nau8822_companding);
+
+static const char * const nau8822_eqmode[] = {"Capture", "Playback"};
+
+static const struct soc_enum nau8822_eqmode_enum =
+ SOC_ENUM_SINGLE(NAU8822_REG_EQ1, NAU8822_EQM_SFT,
+ ARRAY_SIZE(nau8822_eqmode), nau8822_eqmode);
+
+static const char * const nau8822_alc1[] = {"Off", "Right", "Left", "Both"};
+static const char * const nau8822_alc3[] = {"Normal", "Limiter"};
+
+static const struct soc_enum nau8822_alc_enable_enum =
+ SOC_ENUM_SINGLE(NAU8822_REG_ALC_CONTROL_1, NAU8822_ALCEN_SFT,
+ ARRAY_SIZE(nau8822_alc1), nau8822_alc1);
+
+static const struct soc_enum nau8822_alc_mode_enum =
+ SOC_ENUM_SINGLE(NAU8822_REG_ALC_CONTROL_3, NAU8822_ALCM_SFT,
+ ARRAY_SIZE(nau8822_alc3), nau8822_alc3);
+
+static const DECLARE_TLV_DB_SCALE(digital_tlv, -12750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1200, 75, 0);
+static const DECLARE_TLV_DB_SCALE(spk_tlv, -5700, 100, 0);
+static const DECLARE_TLV_DB_SCALE(pga_boost_tlv, 0, 2000, 0);
+static const DECLARE_TLV_DB_SCALE(boost_tlv, -1500, 300, 1);
+static const DECLARE_TLV_DB_SCALE(limiter_tlv, 0, 100, 0);
+
+static const struct snd_kcontrol_new nau8822_snd_controls[] = {
+ SOC_ENUM("ADC Companding", nau8822_companding_adc_enum),
+ SOC_ENUM("DAC Companding", nau8822_companding_dac_enum),
+
+ SOC_ENUM("EQ Function", nau8822_eqmode_enum),
+ SND_SOC_BYTES_EXT("EQ Parameters", 10,
+ nau8822_eq_get, nau8822_eq_put),
+
+ SOC_DOUBLE("DAC Inversion Switch",
+ NAU8822_REG_DAC_CONTROL, 0, 1, 1, 0),
+ SOC_DOUBLE_R_TLV("PCM Volume",
+ NAU8822_REG_LEFT_DAC_DIGITAL_VOLUME,
+ NAU8822_REG_RIGHT_DAC_DIGITAL_VOLUME, 0, 255, 0, digital_tlv),
+
+ SOC_SINGLE("High Pass Filter Switch",
+ NAU8822_REG_ADC_CONTROL, 8, 1, 0),
+ SOC_SINGLE("High Pass Cut Off",
+ NAU8822_REG_ADC_CONTROL, 4, 7, 0),
+
+ SOC_DOUBLE("ADC Inversion Switch",
+ NAU8822_REG_ADC_CONTROL, 0, 1, 1, 0),
+ SOC_DOUBLE_R_TLV("ADC Volume",
+ NAU8822_REG_LEFT_ADC_DIGITAL_VOLUME,
+ NAU8822_REG_RIGHT_ADC_DIGITAL_VOLUME, 0, 255, 0, digital_tlv),
+
+ SOC_SINGLE("DAC Limiter Switch",
+ NAU8822_REG_DAC_LIMITER_1, 8, 1, 0),
+ SOC_SINGLE("DAC Limiter Decay",
+ NAU8822_REG_DAC_LIMITER_1, 4, 15, 0),
+ SOC_SINGLE("DAC Limiter Attack",
+ NAU8822_REG_DAC_LIMITER_1, 0, 15, 0),
+ SOC_SINGLE("DAC Limiter Threshold",
+ NAU8822_REG_DAC_LIMITER_2, 4, 7, 0),
+ SOC_SINGLE_TLV("DAC Limiter Volume",
+ NAU8822_REG_DAC_LIMITER_2, 0, 12, 0, limiter_tlv),
+
+ SOC_ENUM("ALC Mode", nau8822_alc_mode_enum),
+ SOC_ENUM("ALC Enable Switch", nau8822_alc_enable_enum),
+ SOC_SINGLE("ALC Min Gain",
+ NAU8822_REG_ALC_CONTROL_1, 0, 7, 0),
+ SOC_SINGLE("ALC Max Gain",
+ NAU8822_REG_ALC_CONTROL_1, 3, 7, 0),
+ SOC_SINGLE("ALC Hold",
+ NAU8822_REG_ALC_CONTROL_2, 4, 10, 0),
+ SOC_SINGLE("ALC Target",
+ NAU8822_REG_ALC_CONTROL_2, 0, 15, 0),
+ SOC_SINGLE("ALC Decay",
+ NAU8822_REG_ALC_CONTROL_3, 4, 10, 0),
+ SOC_SINGLE("ALC Attack",
+ NAU8822_REG_ALC_CONTROL_3, 0, 10, 0),
+ SOC_SINGLE("ALC Noise Gate Switch",
+ NAU8822_REG_NOISE_GATE, 3, 1, 0),
+ SOC_SINGLE("ALC Noise Gate Threshold",
+ NAU8822_REG_NOISE_GATE, 0, 7, 0),
+
+ SOC_DOUBLE_R("PGA ZC Switch",
+ NAU8822_REG_LEFT_INP_PGA_CONTROL,
+ NAU8822_REG_RIGHT_INP_PGA_CONTROL,
+ 7, 1, 0),
+ SOC_DOUBLE_R_TLV("PGA Volume",
+ NAU8822_REG_LEFT_INP_PGA_CONTROL,
+ NAU8822_REG_RIGHT_INP_PGA_CONTROL, 0, 63, 0, inpga_tlv),
+
+ SOC_DOUBLE_R("Headphone ZC Switch",
+ NAU8822_REG_LHP_VOLUME,
+ NAU8822_REG_RHP_VOLUME, 7, 1, 0),
+ SOC_DOUBLE_R("Headphone Playback Switch",
+ NAU8822_REG_LHP_VOLUME,
+ NAU8822_REG_RHP_VOLUME, 6, 1, 1),
+ SOC_DOUBLE_R_TLV("Headphone Volume",
+ NAU8822_REG_LHP_VOLUME,
+ NAU8822_REG_RHP_VOLUME, 0, 63, 0, spk_tlv),
+
+ SOC_DOUBLE_R("Speaker ZC Switch",
+ NAU8822_REG_LSPKOUT_VOLUME,
+ NAU8822_REG_RSPKOUT_VOLUME, 7, 1, 0),
+ SOC_DOUBLE_R("Speaker Playback Switch",
+ NAU8822_REG_LSPKOUT_VOLUME,
+ NAU8822_REG_RSPKOUT_VOLUME, 6, 1, 1),
+ SOC_DOUBLE_R_TLV("Speaker Volume",
+ NAU8822_REG_LSPKOUT_VOLUME,
+ NAU8822_REG_RSPKOUT_VOLUME, 0, 63, 0, spk_tlv),
+
+ SOC_DOUBLE_R("AUXOUT Playback Switch",
+ NAU8822_REG_AUX2_MIXER,
+ NAU8822_REG_AUX1_MIXER, 6, 1, 1),
+
+ SOC_DOUBLE_R_TLV("PGA Boost Volume",
+ NAU8822_REG_LEFT_ADC_BOOST_CONTROL,
+ NAU8822_REG_RIGHT_ADC_BOOST_CONTROL, 8, 1, 0, pga_boost_tlv),
+ SOC_DOUBLE_R_TLV("L2/R2 Boost Volume",
+ NAU8822_REG_LEFT_ADC_BOOST_CONTROL,
+ NAU8822_REG_RIGHT_ADC_BOOST_CONTROL, 4, 7, 0, boost_tlv),
+ SOC_DOUBLE_R_TLV("Aux Boost Volume",
+ NAU8822_REG_LEFT_ADC_BOOST_CONTROL,
+ NAU8822_REG_RIGHT_ADC_BOOST_CONTROL, 0, 7, 0, boost_tlv),
+
+ SOC_SINGLE("DAC 128x Oversampling Switch",
+ NAU8822_REG_DAC_CONTROL, 5, 1, 0),
+ SOC_SINGLE("ADC 128x Oversampling Switch",
+ NAU8822_REG_ADC_CONTROL, 5, 1, 0),
+};
+
+/* LMAIN and RMAIN Mixer */
+static const struct snd_kcontrol_new nau8822_left_out_mixer[] = {
+ SOC_DAPM_SINGLE("LINMIX Switch",
+ NAU8822_REG_LEFT_MIXER_CONTROL, 1, 1, 0),
+ SOC_DAPM_SINGLE("LAUX Switch",
+ NAU8822_REG_LEFT_MIXER_CONTROL, 5, 1, 0),
+ SOC_DAPM_SINGLE("LDAC Switch",
+ NAU8822_REG_LEFT_MIXER_CONTROL, 0, 1, 0),
+ SOC_DAPM_SINGLE("RDAC Switch",
+ NAU8822_REG_OUTPUT_CONTROL, 5, 1, 0),
+};
+
+static const struct snd_kcontrol_new nau8822_right_out_mixer[] = {
+ SOC_DAPM_SINGLE("RINMIX Switch",
+ NAU8822_REG_RIGHT_MIXER_CONTROL, 1, 1, 0),
+ SOC_DAPM_SINGLE("RAUX Switch",
+ NAU8822_REG_RIGHT_MIXER_CONTROL, 5, 1, 0),
+ SOC_DAPM_SINGLE("RDAC Switch",
+ NAU8822_REG_RIGHT_MIXER_CONTROL, 0, 1, 0),
+ SOC_DAPM_SINGLE("LDAC Switch",
+ NAU8822_REG_OUTPUT_CONTROL, 6, 1, 0),
+};
+
+/* AUX1 and AUX2 Mixer */
+static const struct snd_kcontrol_new nau8822_auxout1_mixer[] = {
+ SOC_DAPM_SINGLE("RDAC Switch", NAU8822_REG_AUX1_MIXER, 0, 1, 0),
+ SOC_DAPM_SINGLE("RMIX Switch", NAU8822_REG_AUX1_MIXER, 1, 1, 0),
+ SOC_DAPM_SINGLE("RINMIX Switch", NAU8822_REG_AUX1_MIXER, 2, 1, 0),
+ SOC_DAPM_SINGLE("LDAC Switch", NAU8822_REG_AUX1_MIXER, 3, 1, 0),
+ SOC_DAPM_SINGLE("LMIX Switch", NAU8822_REG_AUX1_MIXER, 4, 1, 0),
+};
+
+static const struct snd_kcontrol_new nau8822_auxout2_mixer[] = {
+ SOC_DAPM_SINGLE("LDAC Switch", NAU8822_REG_AUX2_MIXER, 0, 1, 0),
+ SOC_DAPM_SINGLE("LMIX Switch", NAU8822_REG_AUX2_MIXER, 1, 1, 0),
+ SOC_DAPM_SINGLE("LINMIX Switch", NAU8822_REG_AUX2_MIXER, 2, 1, 0),
+ SOC_DAPM_SINGLE("AUX1MIX Output Switch",
+ NAU8822_REG_AUX2_MIXER, 3, 1, 0),
+};
+
+/* Input PGA */
+static const struct snd_kcontrol_new nau8822_left_input_mixer[] = {
+ SOC_DAPM_SINGLE("L2 Switch", NAU8822_REG_INPUT_CONTROL, 2, 1, 0),
+ SOC_DAPM_SINGLE("MicN Switch", NAU8822_REG_INPUT_CONTROL, 1, 1, 0),
+ SOC_DAPM_SINGLE("MicP Switch", NAU8822_REG_INPUT_CONTROL, 0, 1, 0),
+};
+static const struct snd_kcontrol_new nau8822_right_input_mixer[] = {
+ SOC_DAPM_SINGLE("R2 Switch", NAU8822_REG_INPUT_CONTROL, 6, 1, 0),
+ SOC_DAPM_SINGLE("MicN Switch", NAU8822_REG_INPUT_CONTROL, 5, 1, 0),
+ SOC_DAPM_SINGLE("MicP Switch", NAU8822_REG_INPUT_CONTROL, 4, 1, 0),
+};
+
+/* Loopback Switch */
+static const struct snd_kcontrol_new nau8822_loopback =
+ SOC_DAPM_SINGLE("Switch", NAU8822_REG_COMPANDING_CONTROL,
+ NAU8822_ADDAP_SFT, 1, 0);
+
+static int check_mclk_select_pll(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(source->dapm);
+ unsigned int value;
+
+ value = snd_soc_component_read32(component, NAU8822_REG_CLOCKING);
+
+ return (value & NAU8822_CLKM_MASK);
+}
+
+static const struct snd_soc_dapm_widget nau8822_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback",
+ NAU8822_REG_POWER_MANAGEMENT_3, 0, 0),
+ SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback",
+ NAU8822_REG_POWER_MANAGEMENT_3, 1, 0),
+ SND_SOC_DAPM_ADC("Left ADC", "Left HiFi Capture",
+ NAU8822_REG_POWER_MANAGEMENT_2, 0, 0),
+ SND_SOC_DAPM_ADC("Right ADC", "Right HiFi Capture",
+ NAU8822_REG_POWER_MANAGEMENT_2, 1, 0),
+
+ SOC_MIXER_ARRAY("Left Output Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_3, 2, 0, nau8822_left_out_mixer),
+ SOC_MIXER_ARRAY("Right Output Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_3, 3, 0, nau8822_right_out_mixer),
+ SOC_MIXER_ARRAY("AUX1 Output Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_1, 7, 0, nau8822_auxout1_mixer),
+ SOC_MIXER_ARRAY("AUX2 Output Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_1, 6, 0, nau8822_auxout2_mixer),
+
+ SOC_MIXER_ARRAY("Left Input Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_2,
+ 2, 0, nau8822_left_input_mixer),
+ SOC_MIXER_ARRAY("Right Input Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_2,
+ 3, 0, nau8822_right_input_mixer),
+
+ SND_SOC_DAPM_PGA("Left Boost Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_2, 4, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Boost Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_2, 5, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("Left Capture PGA",
+ NAU8822_REG_LEFT_INP_PGA_CONTROL, 6, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Capture PGA",
+ NAU8822_REG_RIGHT_INP_PGA_CONTROL, 6, 1, NULL, 0),
+
+ SND_SOC_DAPM_PGA("Left Headphone Out",
+ NAU8822_REG_POWER_MANAGEMENT_2, 7, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Headphone Out",
+ NAU8822_REG_POWER_MANAGEMENT_2, 8, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("Left Speaker Out",
+ NAU8822_REG_POWER_MANAGEMENT_3, 6, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Speaker Out",
+ NAU8822_REG_POWER_MANAGEMENT_3, 5, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("AUX1 Out",
+ NAU8822_REG_POWER_MANAGEMENT_3, 8, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("AUX2 Out",
+ NAU8822_REG_POWER_MANAGEMENT_3, 7, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("Mic Bias",
+ NAU8822_REG_POWER_MANAGEMENT_1, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL",
+ NAU8822_REG_POWER_MANAGEMENT_1, 5, 0, NULL, 0),
+
+ SND_SOC_DAPM_SWITCH("Digital Loopback", SND_SOC_NOPM, 0, 0,
+ &nau8822_loopback),
+
+ SND_SOC_DAPM_INPUT("LMICN"),
+ SND_SOC_DAPM_INPUT("LMICP"),
+ SND_SOC_DAPM_INPUT("RMICN"),
+ SND_SOC_DAPM_INPUT("RMICP"),
+ SND_SOC_DAPM_INPUT("LAUX"),
+ SND_SOC_DAPM_INPUT("RAUX"),
+ SND_SOC_DAPM_INPUT("L2"),
+ SND_SOC_DAPM_INPUT("R2"),
+ SND_SOC_DAPM_OUTPUT("LHP"),
+ SND_SOC_DAPM_OUTPUT("RHP"),
+ SND_SOC_DAPM_OUTPUT("LSPK"),
+ SND_SOC_DAPM_OUTPUT("RSPK"),
+ SND_SOC_DAPM_OUTPUT("AUXOUT1"),
+ SND_SOC_DAPM_OUTPUT("AUXOUT2"),
+};
+
+static const struct snd_soc_dapm_route nau8822_dapm_routes[] = {
+ {"Right DAC", NULL, "PLL", check_mclk_select_pll},
+ {"Left DAC", NULL, "PLL", check_mclk_select_pll},
+
+ /* LMAIN and RMAIN Mixer */
+ {"Right Output Mixer", "LDAC Switch", "Left DAC"},
+ {"Right Output Mixer", "RDAC Switch", "Right DAC"},
+ {"Right Output Mixer", "RAUX Switch", "RAUX"},
+ {"Right Output Mixer", "RINMIX Switch", "Right Boost Mixer"},
+
+ {"Left Output Mixer", "LDAC Switch", "Left DAC"},
+ {"Left Output Mixer", "RDAC Switch", "Right DAC"},
+ {"Left Output Mixer", "LAUX Switch", "LAUX"},
+ {"Left Output Mixer", "LINMIX Switch", "Left Boost Mixer"},
+
+ /* AUX1 and AUX2 Mixer */
+ {"AUX1 Output Mixer", "RDAC Switch", "Right DAC"},
+ {"AUX1 Output Mixer", "RMIX Switch", "Right Output Mixer"},
+ {"AUX1 Output Mixer", "RINMIX Switch", "Right Boost Mixer"},
+ {"AUX1 Output Mixer", "LDAC Switch", "Left DAC"},
+ {"AUX1 Output Mixer", "LMIX Switch", "Left Output Mixer"},
+
+ {"AUX2 Output Mixer", "LDAC Switch", "Left DAC"},
+ {"AUX2 Output Mixer", "LMIX Switch", "Left Output Mixer"},
+ {"AUX2 Output Mixer", "LINMIX Switch", "Left Boost Mixer"},
+ {"AUX2 Output Mixer", "AUX1MIX Output Switch", "AUX1 Output Mixer"},
+
+ /* Outputs */
+ {"Right Headphone Out", NULL, "Right Output Mixer"},
+ {"RHP", NULL, "Right Headphone Out"},
+
+ {"Left Headphone Out", NULL, "Left Output Mixer"},
+ {"LHP", NULL, "Left Headphone Out"},
+
+ {"Right Speaker Out", NULL, "Right Output Mixer"},
+ {"RSPK", NULL, "Right Speaker Out"},
+
+ {"Left Speaker Out", NULL, "Left Output Mixer"},
+ {"LSPK", NULL, "Left Speaker Out"},
+
+ {"AUX1 Out", NULL, "AUX1 Output Mixer"},
+ {"AUX2 Out", NULL, "AUX2 Output Mixer"},
+ {"AUXOUT1", NULL, "AUX1 Out"},
+ {"AUXOUT2", NULL, "AUX2 Out"},
+
+ /* Boost Mixer */
+ {"Right ADC", NULL, "PLL", check_mclk_select_pll},
+ {"Left ADC", NULL, "PLL", check_mclk_select_pll},
+
+ {"Right ADC", NULL, "Right Boost Mixer"},
+
+ {"Right Boost Mixer", NULL, "RAUX"},
+ {"Right Boost Mixer", NULL, "Right Capture PGA"},
+ {"Right Boost Mixer", NULL, "R2"},
+
+ {"Left ADC", NULL, "Left Boost Mixer"},
+
+ {"Left Boost Mixer", NULL, "LAUX"},
+ {"Left Boost Mixer", NULL, "Left Capture PGA"},
+ {"Left Boost Mixer", NULL, "L2"},
+
+ /* Input PGA */
+ {"Right Capture PGA", NULL, "Right Input Mixer"},
+ {"Left Capture PGA", NULL, "Left Input Mixer"},
+
+ /* Enable Microphone Power */
+ {"Right Capture PGA", NULL, "Mic Bias"},
+ {"Left Capture PGA", NULL, "Mic Bias"},
+
+ {"Right Input Mixer", "R2 Switch", "R2"},
+ {"Right Input Mixer", "MicN Switch", "RMICN"},
+ {"Right Input Mixer", "MicP Switch", "RMICP"},
+
+ {"Left Input Mixer", "L2 Switch", "L2"},
+ {"Left Input Mixer", "MicN Switch", "LMICN"},
+ {"Left Input Mixer", "MicP Switch", "LMICP"},
+
+ /* Digital Loopback */
+ {"Digital Loopback", "Switch", "Left ADC"},
+ {"Digital Loopback", "Switch", "Right ADC"},
+ {"Left DAC", NULL, "Digital Loopback"},
+ {"Right DAC", NULL, "Digital Loopback"},
+};
+
+static int nau8822_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_component *component = dai->component;
+ struct nau8822 *nau8822 = snd_soc_component_get_drvdata(component);
+
+ nau8822->div_id = clk_id;
+ nau8822->sysclk = freq;
+ dev_dbg(component->dev, "master sysclk %dHz, source %s\n", freq,
+ clk_id == NAU8822_CLK_PLL ? "PLL" : "MCLK");
+
+ return 0;
+}
+
+static int nau8822_calc_pll(unsigned int pll_in, unsigned int fs,
+ struct nau8822_pll *pll_param)
+{
+ u64 f2, f2_max, pll_ratio;
+ int i, scal_sel;
+
+ if (pll_in > NAU_PLL_REF_MAX || pll_in < NAU_PLL_REF_MIN)
+ return -EINVAL;
+ f2_max = 0;
+ scal_sel = ARRAY_SIZE(nau8822_mclk_scaler);
+
+ for (i = 0; i < scal_sel; i++) {
+ f2 = 256 * fs * 4 * nau8822_mclk_scaler[i] / 10;
+ if (f2 > NAU_PLL_FREQ_MIN && f2 < NAU_PLL_FREQ_MAX &&
+ f2_max < f2) {
+ f2_max = f2;
+ scal_sel = i;
+ }
+ }
+
+ if (ARRAY_SIZE(nau8822_mclk_scaler) == scal_sel)
+ return -EINVAL;
+ pll_param->mclk_scaler = scal_sel;
+ f2 = f2_max;
+
+ /* Calculate the PLL 4-bit integer input and the PLL 24-bit fractional
+ * input; round up the 24+4bit.
+ */
+ pll_ratio = div_u64(f2 << 28, pll_in);
+ pll_param->pre_factor = 0;
+ if (((pll_ratio >> 28) & 0xF) < NAU_PLL_OPTOP_MIN) {
+ pll_ratio <<= 1;
+ pll_param->pre_factor = 1;
+ }
+ pll_param->pll_int = (pll_ratio >> 28) & 0xF;
+ pll_param->pll_frac = ((pll_ratio & 0xFFFFFFF) >> 4);
+
+ return 0;
+}
+
+static int nau8822_config_clkdiv(struct snd_soc_dai *dai, int div, int rate)
+{
+ struct snd_soc_component *component = dai->component;
+ struct nau8822 *nau8822 = snd_soc_component_get_drvdata(component);
+ struct nau8822_pll *pll = &nau8822->pll;
+ int i, sclk, imclk;
+
+ switch (nau8822->div_id) {
+ case NAU8822_CLK_MCLK:
+ /* Configure the master clock prescaler div to make system
+ * clock to approximate the internal master clock (IMCLK);
+ * and large or equal to IMCLK.
+ */
+ div = 0;
+ imclk = rate * 256;
+ for (i = 1; i < ARRAY_SIZE(nau8822_mclk_scaler); i++) {
+ sclk = (nau8822->sysclk * 10) / nau8822_mclk_scaler[i];
+ if (sclk < imclk)
+ break;
+ div = i;
+ }
+ dev_dbg(component->dev, "master clock prescaler %x for fs %d\n",
+ div, rate);
+
+ /* master clock from MCLK and disable PLL */
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_MCLKSEL_MASK,
+ (div << NAU8822_MCLKSEL_SFT));
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_CLKM_MASK,
+ NAU8822_CLKM_MCLK);
+ break;
+
+ case NAU8822_CLK_PLL:
+ /* master clock from PLL and enable PLL */
+ if (pll->mclk_scaler != div) {
+ dev_err(component->dev,
+ "master clock prescaler not meet PLL parameters\n");
+ return -EINVAL;
+ }
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_MCLKSEL_MASK,
+ (div << NAU8822_MCLKSEL_SFT));
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_CLKM_MASK,
+ NAU8822_CLKM_PLL);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source,
+ unsigned int freq_in, unsigned int freq_out)
+{
+ struct snd_soc_component *component = dai->component;
+ struct nau8822 *nau8822 = snd_soc_component_get_drvdata(component);
+ struct nau8822_pll *pll_param = &nau8822->pll;
+ int ret, fs;
+
+ fs = freq_out / 256;
+
+ ret = nau8822_calc_pll(freq_in, fs, pll_param);
+ if (ret < 0) {
+ dev_err(component->dev, "Unsupported input clock %d\n",
+ freq_in);
+ return ret;
+ }
+
+ dev_info(component->dev,
+ "pll_int=%x pll_frac=%x mclk_scaler=%x pre_factor=%x\n",
+ pll_param->pll_int, pll_param->pll_frac,
+ pll_param->mclk_scaler, pll_param->pre_factor);
+
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_PLL_N, NAU8822_PLLMCLK_DIV2 | NAU8822_PLLN_MASK,
+ (pll_param->pre_factor ? NAU8822_PLLMCLK_DIV2 : 0) |
+ pll_param->pll_int);
+ snd_soc_component_write(component,
+ NAU8822_REG_PLL_K1, (pll_param->pll_frac >> NAU8822_PLLK1_SFT) &
+ NAU8822_PLLK1_MASK);
+ snd_soc_component_write(component,
+ NAU8822_REG_PLL_K2, (pll_param->pll_frac >> NAU8822_PLLK2_SFT) &
+ NAU8822_PLLK2_MASK);
+ snd_soc_component_write(component,
+ NAU8822_REG_PLL_K3, pll_param->pll_frac & NAU8822_PLLK3_MASK);
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_MCLKSEL_MASK,
+ pll_param->mclk_scaler << NAU8822_MCLKSEL_SFT);
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_CLKM_MASK, NAU8822_CLKM_PLL);
+
+ return 0;
+}
+
+static int nau8822_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = dai->component;
+ u16 ctrl1_val = 0, ctrl2_val = 0;
+
+ dev_dbg(component->dev, "%s\n", __func__);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ ctrl2_val |= 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ ctrl2_val &= ~1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ ctrl1_val |= 0x10;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ ctrl1_val |= 0x8;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ ctrl1_val |= 0x18;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ ctrl1_val |= 0x180;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ ctrl1_val |= 0x100;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ ctrl1_val |= 0x80;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_AUDIO_INTERFACE,
+ NAU8822_AIFMT_MASK | NAU8822_LRP_MASK | NAU8822_BCLKP_MASK,
+ ctrl1_val);
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_CLKIOEN_MASK, ctrl2_val);
+
+ return 0;
+}
+
+static int nau8822_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct nau8822 *nau8822 = snd_soc_component_get_drvdata(component);
+ int val_len = 0, val_rate = 0;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ val_len |= NAU8822_WLEN_20;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ val_len |= NAU8822_WLEN_24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ val_len |= NAU8822_WLEN_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params_rate(params)) {
+ case 8000:
+ val_rate |= NAU8822_SMPLR_8K;
+ break;
+ case 11025:
+ val_rate |= NAU8822_SMPLR_12K;
+ break;
+ case 16000:
+ val_rate |= NAU8822_SMPLR_16K;
+ break;
+ case 22050:
+ val_rate |= NAU8822_SMPLR_24K;
+ break;
+ case 32000:
+ val_rate |= NAU8822_SMPLR_32K;
+ break;
+ case 44100:
+ case 48000:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_AUDIO_INTERFACE, NAU8822_WLEN_MASK, val_len);
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_ADDITIONAL_CONTROL, NAU8822_SMPLR_MASK, val_rate);
+
+ /* If the master clock is from MCLK, provide the runtime FS for driver
+ * to get the master clock prescaler configuration.
+ */
+ if (nau8822->div_id == NAU8822_CLK_MCLK)
+ nau8822_config_clkdiv(dai, 0, params_rate(params));
+
+ return 0;
+}
+
+static int nau8822_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_component *component = dai->component;
+
+ dev_dbg(component->dev, "%s: %d\n", __func__, mute);
+
+ if (mute)
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_DAC_CONTROL, 0x40, 0x40);
+ else
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_DAC_CONTROL, 0x40, 0);
+
+ return 0;
+}
+
+static int nau8822_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_POWER_MANAGEMENT_1,
+ NAU8822_REFIMP_MASK, NAU8822_REFIMP_80K);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_POWER_MANAGEMENT_1,
+ NAU8822_IOBUF_EN | NAU8822_ABIAS_EN,
+ NAU8822_IOBUF_EN | NAU8822_ABIAS_EN);
+
+ if (snd_soc_component_get_bias_level(component) ==
+ SND_SOC_BIAS_OFF) {
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_POWER_MANAGEMENT_1,
+ NAU8822_REFIMP_MASK, NAU8822_REFIMP_3K);
+ mdelay(100);
+ }
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_POWER_MANAGEMENT_1,
+ NAU8822_REFIMP_MASK, NAU8822_REFIMP_300K);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ snd_soc_component_write(component,
+ NAU8822_REG_POWER_MANAGEMENT_1, 0);
+ snd_soc_component_write(component,
+ NAU8822_REG_POWER_MANAGEMENT_2, 0);
+ snd_soc_component_write(component,
+ NAU8822_REG_POWER_MANAGEMENT_3, 0);
+ break;
+ }
+
+ dev_dbg(component->dev, "%s: %d\n", __func__, level);
+
+ return 0;
+}
+
+#define NAU8822_RATES (SNDRV_PCM_RATE_8000_48000)
+
+#define NAU8822_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops nau8822_dai_ops = {
+ .hw_params = nau8822_hw_params,
+ .digital_mute = nau8822_mute,
+ .set_fmt = nau8822_set_dai_fmt,
+ .set_sysclk = nau8822_set_dai_sysclk,
+ .set_pll = nau8822_set_pll,
+};
+
+static struct snd_soc_dai_driver nau8822_dai = {
+ .name = "nau8822-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = NAU8822_RATES,
+ .formats = NAU8822_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = NAU8822_RATES,
+ .formats = NAU8822_FORMATS,
+ },
+ .ops = &nau8822_dai_ops,
+ .symmetric_rates = 1,
+};
+
+static int nau8822_suspend(struct snd_soc_component *component)
+{
+ struct nau8822 *nau8822 = snd_soc_component_get_drvdata(component);
+
+ snd_soc_component_force_bias_level(component, SND_SOC_BIAS_OFF);
+
+ regcache_mark_dirty(nau8822->regmap);
+
+ return 0;
+}
+
+static int nau8822_resume(struct snd_soc_component *component)
+{
+ struct nau8822 *nau8822 = snd_soc_component_get_drvdata(component);
+
+ regcache_sync(nau8822->regmap);
+
+ snd_soc_component_force_bias_level(component, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+
+/*
+ * These registers contain an "update" bit - bit 8. This means, for example,
+ * that one can write new DAC digital volume for both channels, but only when
+ * the update bit is set, will also the volume be updated - simultaneously for
+ * both channels.
+ */
+static const int update_reg[] = {
+ NAU8822_REG_LEFT_DAC_DIGITAL_VOLUME,
+ NAU8822_REG_RIGHT_DAC_DIGITAL_VOLUME,
+ NAU8822_REG_LEFT_ADC_DIGITAL_VOLUME,
+ NAU8822_REG_RIGHT_ADC_DIGITAL_VOLUME,
+ NAU8822_REG_LEFT_INP_PGA_CONTROL,
+ NAU8822_REG_RIGHT_INP_PGA_CONTROL,
+ NAU8822_REG_LHP_VOLUME,
+ NAU8822_REG_RHP_VOLUME,
+ NAU8822_REG_LSPKOUT_VOLUME,
+ NAU8822_REG_RSPKOUT_VOLUME,
+};
+
+static int nau8822_probe(struct snd_soc_component *component)
+{
+ int i;
+
+ /*
+ * Set the update bit in all registers, that have one. This way all
+ * writes to those registers will also cause the update bit to be
+ * written.
+ */
+ for (i = 0; i < ARRAY_SIZE(update_reg); i++)
+ snd_soc_component_update_bits(component,
+ update_reg[i], 0x100, 0x100);
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver soc_component_dev_nau8822 = {
+ .probe = nau8822_probe,
+ .suspend = nau8822_suspend,
+ .resume = nau8822_resume,
+ .set_bias_level = nau8822_set_bias_level,
+ .controls = nau8822_snd_controls,
+ .num_controls = ARRAY_SIZE(nau8822_snd_controls),
+ .dapm_widgets = nau8822_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(nau8822_dapm_widgets),
+ .dapm_routes = nau8822_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(nau8822_dapm_routes),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct regmap_config nau8822_regmap_config = {
+ .reg_bits = 7,
+ .val_bits = 9,
+
+ .max_register = NAU8822_REG_MAX_REGISTER,
+ .volatile_reg = nau8822_volatile,
+
+ .readable_reg = nau8822_readable_reg,
+ .writeable_reg = nau8822_writeable_reg,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = nau8822_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(nau8822_reg_defaults),
+};
+
+static int nau8822_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct nau8822 *nau8822 = dev_get_platdata(dev);
+ int ret;
+
+ if (!nau8822) {
+ nau8822 = devm_kzalloc(dev, sizeof(*nau8822), GFP_KERNEL);
+ if (nau8822 == NULL)
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(i2c, nau8822);
+
+ nau8822->regmap = devm_regmap_init_i2c(i2c, &nau8822_regmap_config);
+ if (IS_ERR(nau8822->regmap)) {
+ ret = PTR_ERR(nau8822->regmap);
+ dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+ nau8822->dev = dev;
+
+ /* Reset the codec */
+ ret = regmap_write(nau8822->regmap, NAU8822_REG_RESET, 0x00);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to issue reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_snd_soc_register_component(dev, &soc_component_dev_nau8822,
+ &nau8822_dai, 1);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id nau8822_i2c_id[] = {
+ { "nau8822", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nau8822_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id nau8822_of_match[] = {
+ { .compatible = "nuvoton,nau8822", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nau8822_of_match);
+#endif
+
+static struct i2c_driver nau8822_i2c_driver = {
+ .driver = {
+ .name = "nau8822",
+ .of_match_table = of_match_ptr(nau8822_of_match),
+ },
+ .probe = nau8822_i2c_probe,
+ .id_table = nau8822_i2c_id,
+};
+module_i2c_driver(nau8822_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC NAU8822 codec driver");
+MODULE_AUTHOR("David Lin <ctlin0@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/nau8822.h b/sound/soc/codecs/nau8822.h
new file mode 100644
index 000000000000..aa79c969cd44
--- /dev/null
+++ b/sound/soc/codecs/nau8822.h
@@ -0,0 +1,204 @@
+/*
+ * nau8822.h -- NAU8822 Soc Audio Codec driver
+ *
+ * Author: David Lin <ctlin0@nuvoton.com>
+ * Co-author: John Hsu <kchsu0@nuvoton.com>
+ * Co-author: Seven Li <wtli@nuvoton.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __NAU8822_H__
+#define __NAU8822_H__
+
+#define NAU8822_REG_RESET 0x00
+#define NAU8822_REG_POWER_MANAGEMENT_1 0x01
+#define NAU8822_REG_POWER_MANAGEMENT_2 0x02
+#define NAU8822_REG_POWER_MANAGEMENT_3 0x03
+#define NAU8822_REG_AUDIO_INTERFACE 0x04
+#define NAU8822_REG_COMPANDING_CONTROL 0x05
+#define NAU8822_REG_CLOCKING 0x06
+#define NAU8822_REG_ADDITIONAL_CONTROL 0x07
+#define NAU8822_REG_GPIO_CONTROL 0x08
+#define NAU8822_REG_JACK_DETECT_CONTROL_1 0x09
+#define NAU8822_REG_DAC_CONTROL 0x0A
+#define NAU8822_REG_LEFT_DAC_DIGITAL_VOLUME 0x0B
+#define NAU8822_REG_RIGHT_DAC_DIGITAL_VOLUME 0x0C
+#define NAU8822_REG_JACK_DETECT_CONTROL_2 0x0D
+#define NAU8822_REG_ADC_CONTROL 0x0E
+#define NAU8822_REG_LEFT_ADC_DIGITAL_VOLUME 0x0F
+#define NAU8822_REG_RIGHT_ADC_DIGITAL_VOLUME 0x10
+#define NAU8822_REG_EQ1 0x12
+#define NAU8822_REG_EQ2 0x13
+#define NAU8822_REG_EQ3 0x14
+#define NAU8822_REG_EQ4 0x15
+#define NAU8822_REG_EQ5 0x16
+#define NAU8822_REG_DAC_LIMITER_1 0x18
+#define NAU8822_REG_DAC_LIMITER_2 0x19
+#define NAU8822_REG_NOTCH_FILTER_1 0x1B
+#define NAU8822_REG_NOTCH_FILTER_2 0x1C
+#define NAU8822_REG_NOTCH_FILTER_3 0x1D
+#define NAU8822_REG_NOTCH_FILTER_4 0x1E
+#define NAU8822_REG_ALC_CONTROL_1 0x20
+#define NAU8822_REG_ALC_CONTROL_2 0x21
+#define NAU8822_REG_ALC_CONTROL_3 0x22
+#define NAU8822_REG_NOISE_GATE 0x23
+#define NAU8822_REG_PLL_N 0x24
+#define NAU8822_REG_PLL_K1 0x25
+#define NAU8822_REG_PLL_K2 0x26
+#define NAU8822_REG_PLL_K3 0x27
+#define NAU8822_REG_3D_CONTROL 0x29
+#define NAU8822_REG_RIGHT_SPEAKER_CONTROL 0x2B
+#define NAU8822_REG_INPUT_CONTROL 0x2C
+#define NAU8822_REG_LEFT_INP_PGA_CONTROL 0x2D
+#define NAU8822_REG_RIGHT_INP_PGA_CONTROL 0x2E
+#define NAU8822_REG_LEFT_ADC_BOOST_CONTROL 0x2F
+#define NAU8822_REG_RIGHT_ADC_BOOST_CONTROL 0x30
+#define NAU8822_REG_OUTPUT_CONTROL 0x31
+#define NAU8822_REG_LEFT_MIXER_CONTROL 0x32
+#define NAU8822_REG_RIGHT_MIXER_CONTROL 0x33
+#define NAU8822_REG_LHP_VOLUME 0x34
+#define NAU8822_REG_RHP_VOLUME 0x35
+#define NAU8822_REG_LSPKOUT_VOLUME 0x36
+#define NAU8822_REG_RSPKOUT_VOLUME 0x37
+#define NAU8822_REG_AUX2_MIXER 0x38
+#define NAU8822_REG_AUX1_MIXER 0x39
+#define NAU8822_REG_POWER_MANAGEMENT_4 0x3A
+#define NAU8822_REG_LEFT_TIME_SLOT 0x3B
+#define NAU8822_REG_MISC 0x3C
+#define NAU8822_REG_RIGHT_TIME_SLOT 0x3D
+#define NAU8822_REG_DEVICE_REVISION 0x3E
+#define NAU8822_REG_DEVICE_ID 0x3F
+#define NAU8822_REG_DAC_DITHER 0x41
+#define NAU8822_REG_ALC_ENHANCE_1 0x46
+#define NAU8822_REG_ALC_ENHANCE_2 0x47
+#define NAU8822_REG_192KHZ_SAMPLING 0x48
+#define NAU8822_REG_MISC_CONTROL 0x49
+#define NAU8822_REG_INPUT_TIEOFF 0x4A
+#define NAU8822_REG_POWER_REDUCTION 0x4B
+#define NAU8822_REG_AGC_PEAK2PEAK 0x4C
+#define NAU8822_REG_AGC_PEAK_DETECT 0x4D
+#define NAU8822_REG_AUTOMUTE_CONTROL 0x4E
+#define NAU8822_REG_OUTPUT_TIEOFF 0x4F
+#define NAU8822_REG_MAX_REGISTER NAU8822_REG_OUTPUT_TIEOFF
+
+/* NAU8822_REG_POWER_MANAGEMENT_1 (0x1) */
+#define NAU8822_REFIMP_MASK 0x3
+#define NAU8822_REFIMP_80K 0x1
+#define NAU8822_REFIMP_300K 0x2
+#define NAU8822_REFIMP_3K 0x3
+#define NAU8822_IOBUF_EN (0x1 << 2)
+#define NAU8822_ABIAS_EN (0x1 << 3)
+
+/* NAU8822_REG_AUDIO_INTERFACE (0x4) */
+#define NAU8822_AIFMT_MASK (0x3 << 3)
+#define NAU8822_WLEN_MASK (0x3 << 5)
+#define NAU8822_WLEN_20 (0x1 << 5)
+#define NAU8822_WLEN_24 (0x2 << 5)
+#define NAU8822_WLEN_32 (0x3 << 5)
+#define NAU8822_LRP_MASK (0x1 << 7)
+#define NAU8822_BCLKP_MASK (0x1 << 8)
+
+/* NAU8822_REG_COMPANDING_CONTROL (0x5) */
+#define NAU8822_ADDAP_SFT 0
+#define NAU8822_ADCCM_SFT 1
+#define NAU8822_DACCM_SFT 3
+
+/* NAU8822_REG_CLOCKING (0x6) */
+#define NAU8822_CLKIOEN_MASK 0x1
+#define NAU8822_MCLKSEL_SFT 5
+#define NAU8822_MCLKSEL_MASK (0x7 << 5)
+#define NAU8822_BCLKSEL_SFT 2
+#define NAU8822_BCLKSEL_MASK (0x7 << 2)
+#define NAU8822_CLKM_MASK (0x1 << 8)
+#define NAU8822_CLKM_MCLK (0x0 << 8)
+#define NAU8822_CLKM_PLL (0x1 << 8)
+
+/* NAU8822_REG_ADDITIONAL_CONTROL (0x08) */
+#define NAU8822_SMPLR_SFT 1
+#define NAU8822_SMPLR_MASK (0x7 << 1)
+#define NAU8822_SMPLR_48K (0x0 << 1)
+#define NAU8822_SMPLR_32K (0x1 << 1)
+#define NAU8822_SMPLR_24K (0x2 << 1)
+#define NAU8822_SMPLR_16K (0x3 << 1)
+#define NAU8822_SMPLR_12K (0x4 << 1)
+#define NAU8822_SMPLR_8K (0x5 << 1)
+
+/* NAU8822_REG_EQ1 (0x12) */
+#define NAU8822_EQ1GC_SFT 0
+#define NAU8822_EQ1CF_SFT 5
+#define NAU8822_EQM_SFT 8
+
+/* NAU8822_REG_EQ2 (0x13) */
+#define NAU8822_EQ2GC_SFT 0
+#define NAU8822_EQ2CF_SFT 5
+#define NAU8822_EQ2BW_SFT 8
+
+/* NAU8822_REG_EQ3 (0x14) */
+#define NAU8822_EQ3GC_SFT 0
+#define NAU8822_EQ3CF_SFT 5
+#define NAU8822_EQ3BW_SFT 8
+
+/* NAU8822_REG_EQ4 (0x15) */
+#define NAU8822_EQ4GC_SFT 0
+#define NAU8822_EQ4CF_SFT 5
+#define NAU8822_EQ4BW_SFT 8
+
+/* NAU8822_REG_EQ5 (0x16) */
+#define NAU8822_EQ5GC_SFT 0
+#define NAU8822_EQ5CF_SFT 5
+
+/* NAU8822_REG_ALC_CONTROL_1 (0x20) */
+#define NAU8822_ALCMINGAIN_SFT 0
+#define NAU8822_ALCMXGAIN_SFT 3
+#define NAU8822_ALCEN_SFT 7
+
+/* NAU8822_REG_ALC_CONTROL_2 (0x21) */
+#define NAU8822_ALCSL_SFT 0
+#define NAU8822_ALCHT_SFT 4
+
+/* NAU8822_REG_ALC_CONTROL_3 (0x22) */
+#define NAU8822_ALCATK_SFT 0
+#define NAU8822_ALCDCY_SFT 4
+#define NAU8822_ALCM_SFT 8
+
+/* NAU8822_REG_PLL_N (0x24) */
+#define NAU8822_PLLMCLK_DIV2 (0x1 << 4)
+#define NAU8822_PLLN_MASK 0xF
+
+#define NAU8822_PLLK1_SFT 18
+#define NAU8822_PLLK1_MASK 0x3F
+
+/* NAU8822_REG_PLL_K2 (0x26) */
+#define NAU8822_PLLK2_SFT 9
+#define NAU8822_PLLK2_MASK 0x1FF
+
+/* NAU8822_REG_PLL_K3 (0x27) */
+#define NAU8822_PLLK3_MASK 0x1FF
+
+/* System Clock Source */
+enum {
+ NAU8822_CLK_MCLK,
+ NAU8822_CLK_PLL,
+};
+
+struct nau8822_pll {
+ int pre_factor;
+ int mclk_scaler;
+ int pll_frac;
+ int pll_int;
+};
+
+/* Codec Private Data */
+struct nau8822 {
+ struct device *dev;
+ struct regmap *regmap;
+ int mclk_idx;
+ struct nau8822_pll pll;
+ int sysclk;
+ int div_id;
+};
+
+#endif /* __NAU8822_H__ */
diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
index 690c26e7389e..809b7e9f03ca 100644
--- a/sound/soc/codecs/pcm186x.c
+++ b/sound/soc/codecs/pcm186x.c
@@ -401,7 +401,8 @@ static int pcm186x_set_fmt(struct snd_soc_dai *dai, unsigned int format)
break;
case SND_SOC_DAIFMT_DSP_A:
priv->tdm_offset += 1;
- /* Fall through... DSP_A uses the same basic config as DSP_B
+ /* fall through */
+ /* DSP_A uses the same basic config as DSP_B
* except we need to shift the TDM output by one BCK cycle
*/
case SND_SOC_DAIFMT_DSP_B:
diff --git a/sound/soc/codecs/pcm3060-i2c.c b/sound/soc/codecs/pcm3060-i2c.c
new file mode 100644
index 000000000000..cdc8314882bc
--- /dev/null
+++ b/sound/soc/codecs/pcm3060-i2c.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// PCM3060 I2C driver
+//
+// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech>
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <sound/soc.h>
+
+#include "pcm3060.h"
+
+static int pcm3060_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct pcm3060_priv *priv;
+
+ priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, priv);
+
+ priv->regmap = devm_regmap_init_i2c(i2c, &pcm3060_regmap);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ return pcm3060_probe(&i2c->dev);
+}
+
+static const struct i2c_device_id pcm3060_i2c_id[] = {
+ { .name = "pcm3060" },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, pcm3060_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pcm3060_of_match[] = {
+ { .compatible = "ti,pcm3060" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pcm3060_of_match);
+#endif /* CONFIG_OF */
+
+static struct i2c_driver pcm3060_i2c_driver = {
+ .driver = {
+ .name = "pcm3060",
+#ifdef CONFIG_OF
+ .of_match_table = pcm3060_of_match,
+#endif /* CONFIG_OF */
+ },
+ .id_table = pcm3060_i2c_id,
+ .probe = pcm3060_i2c_probe,
+};
+
+module_i2c_driver(pcm3060_i2c_driver);
+
+MODULE_DESCRIPTION("PCM3060 I2C driver");
+MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.tech>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm3060-spi.c b/sound/soc/codecs/pcm3060-spi.c
new file mode 100644
index 000000000000..f6f19fa80932
--- /dev/null
+++ b/sound/soc/codecs/pcm3060-spi.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// PCM3060 SPI driver
+//
+// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech>
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <sound/soc.h>
+
+#include "pcm3060.h"
+
+static int pcm3060_spi_probe(struct spi_device *spi)
+{
+ struct pcm3060_priv *priv;
+
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, priv);
+
+ priv->regmap = devm_regmap_init_spi(spi, &pcm3060_regmap);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ return pcm3060_probe(&spi->dev);
+}
+
+static const struct spi_device_id pcm3060_spi_id[] = {
+ { .name = "pcm3060" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, pcm3060_spi_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pcm3060_of_match[] = {
+ { .compatible = "ti,pcm3060" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pcm3060_of_match);
+#endif /* CONFIG_OF */
+
+static struct spi_driver pcm3060_spi_driver = {
+ .driver = {
+ .name = "pcm3060",
+#ifdef CONFIG_OF
+ .of_match_table = pcm3060_of_match,
+#endif /* CONFIG_OF */
+ },
+ .id_table = pcm3060_spi_id,
+ .probe = pcm3060_spi_probe,
+};
+
+module_spi_driver(pcm3060_spi_driver);
+
+MODULE_DESCRIPTION("PCM3060 SPI driver");
+MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.tech>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm3060.c b/sound/soc/codecs/pcm3060.c
new file mode 100644
index 000000000000..494d9d662be8
--- /dev/null
+++ b/sound/soc/codecs/pcm3060.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// PCM3060 codec driver
+//
+// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech>
+
+#include <linux/module.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "pcm3060.h"
+
+/* dai */
+
+static int pcm3060_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_component *comp = dai->component;
+ struct pcm3060_priv *priv = snd_soc_component_get_drvdata(comp);
+
+ if (dir != SND_SOC_CLOCK_IN) {
+ dev_err(comp->dev, "unsupported sysclock dir: %d\n", dir);
+ return -EINVAL;
+ }
+
+ priv->dai[dai->id].sclk_freq = freq;
+
+ return 0;
+}
+
+static int pcm3060_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *comp = dai->component;
+ struct pcm3060_priv *priv = snd_soc_component_get_drvdata(comp);
+ unsigned int reg;
+ unsigned int val;
+
+ if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF) {
+ dev_err(comp->dev, "unsupported DAI polarity: 0x%x\n", fmt);
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ priv->dai[dai->id].is_master = true;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ priv->dai[dai->id].is_master = false;
+ break;
+ default:
+ dev_err(comp->dev, "unsupported DAI master mode: 0x%x\n", fmt);
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ val = PCM3060_REG_FMT_I2S;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ val = PCM3060_REG_FMT_RJ;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ val = PCM3060_REG_FMT_LJ;
+ break;
+ default:
+ dev_err(comp->dev, "unsupported DAI format: 0x%x\n", fmt);
+ return -EINVAL;
+ }
+
+ if (dai->id == PCM3060_DAI_ID_DAC)
+ reg = PCM3060_REG67;
+ else
+ reg = PCM3060_REG72;
+
+ regmap_update_bits(priv->regmap, reg, PCM3060_REG_MASK_FMT, val);
+
+ return 0;
+}
+
+static int pcm3060_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *comp = dai->component;
+ struct pcm3060_priv *priv = snd_soc_component_get_drvdata(comp);
+ unsigned int rate;
+ unsigned int ratio;
+ unsigned int reg;
+ unsigned int val;
+
+ if (!priv->dai[dai->id].is_master) {
+ val = PCM3060_REG_MS_S;
+ goto val_ready;
+ }
+
+ rate = params_rate(params);
+ if (!rate) {
+ dev_err(comp->dev, "rate is not configured\n");
+ return -EINVAL;
+ }
+
+ ratio = priv->dai[dai->id].sclk_freq / rate;
+
+ switch (ratio) {
+ case 768:
+ val = PCM3060_REG_MS_M768;
+ break;
+ case 512:
+ val = PCM3060_REG_MS_M512;
+ break;
+ case 384:
+ val = PCM3060_REG_MS_M384;
+ break;
+ case 256:
+ val = PCM3060_REG_MS_M256;
+ break;
+ case 192:
+ val = PCM3060_REG_MS_M192;
+ break;
+ case 128:
+ val = PCM3060_REG_MS_M128;
+ break;
+ default:
+ dev_err(comp->dev, "unsupported ratio: %d\n", ratio);
+ return -EINVAL;
+ }
+
+val_ready:
+ if (dai->id == PCM3060_DAI_ID_DAC)
+ reg = PCM3060_REG67;
+ else
+ reg = PCM3060_REG72;
+
+ regmap_update_bits(priv->regmap, reg, PCM3060_REG_MASK_MS, val);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops pcm3060_dai_ops = {
+ .set_sysclk = pcm3060_set_sysclk,
+ .set_fmt = pcm3060_set_fmt,
+ .hw_params = pcm3060_hw_params,
+};
+
+#define PCM3060_DAI_RATES_ADC (SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_32000 | \
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+
+#define PCM3060_DAI_RATES_DAC (PCM3060_DAI_RATES_ADC | \
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
+
+static struct snd_soc_dai_driver pcm3060_dai[] = {
+ {
+ .name = "pcm3060-dac",
+ .id = PCM3060_DAI_ID_DAC,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = PCM3060_DAI_RATES_DAC,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &pcm3060_dai_ops,
+ },
+ {
+ .name = "pcm3060-adc",
+ .id = PCM3060_DAI_ID_ADC,
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = PCM3060_DAI_RATES_ADC,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &pcm3060_dai_ops,
+ },
+};
+
+/* dapm */
+
+static DECLARE_TLV_DB_SCALE(pcm3060_dapm_tlv, -10050, 50, 1);
+
+static const struct snd_kcontrol_new pcm3060_dapm_controls[] = {
+ SOC_DOUBLE_R_RANGE_TLV("Master Playback Volume",
+ PCM3060_REG65, PCM3060_REG66, 0,
+ PCM3060_REG_AT2_MIN, PCM3060_REG_AT2_MAX,
+ 0, pcm3060_dapm_tlv),
+ SOC_DOUBLE("Master Playback Switch", PCM3060_REG68,
+ PCM3060_REG_SHIFT_MUT21, PCM3060_REG_SHIFT_MUT22, 1, 1),
+
+ SOC_DOUBLE_R_RANGE_TLV("Master Capture Volume",
+ PCM3060_REG70, PCM3060_REG71, 0,
+ PCM3060_REG_AT1_MIN, PCM3060_REG_AT1_MAX,
+ 0, pcm3060_dapm_tlv),
+ SOC_DOUBLE("Master Capture Switch", PCM3060_REG73,
+ PCM3060_REG_SHIFT_MUT11, PCM3060_REG_SHIFT_MUT12, 1, 1),
+};
+
+static const struct snd_soc_dapm_widget pcm3060_dapm_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("OUTL+"),
+ SND_SOC_DAPM_OUTPUT("OUTR+"),
+ SND_SOC_DAPM_OUTPUT("OUTL-"),
+ SND_SOC_DAPM_OUTPUT("OUTR-"),
+
+ SND_SOC_DAPM_INPUT("INL"),
+ SND_SOC_DAPM_INPUT("INR"),
+};
+
+static const struct snd_soc_dapm_route pcm3060_dapm_map[] = {
+ { "OUTL+", NULL, "Playback" },
+ { "OUTR+", NULL, "Playback" },
+ { "OUTL-", NULL, "Playback" },
+ { "OUTR-", NULL, "Playback" },
+
+ { "Capture", NULL, "INL" },
+ { "Capture", NULL, "INR" },
+};
+
+/* soc component */
+
+static const struct snd_soc_component_driver pcm3060_soc_comp_driver = {
+ .controls = pcm3060_dapm_controls,
+ .num_controls = ARRAY_SIZE(pcm3060_dapm_controls),
+ .dapm_widgets = pcm3060_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm3060_dapm_widgets),
+ .dapm_routes = pcm3060_dapm_map,
+ .num_dapm_routes = ARRAY_SIZE(pcm3060_dapm_map),
+};
+
+/* regmap */
+
+static bool pcm3060_reg_writeable(struct device *dev, unsigned int reg)
+{
+ return (reg >= PCM3060_REG64);
+}
+
+static bool pcm3060_reg_readable(struct device *dev, unsigned int reg)
+{
+ return (reg >= PCM3060_REG64);
+}
+
+static bool pcm3060_reg_volatile(struct device *dev, unsigned int reg)
+{
+ /* PCM3060_REG64 is volatile */
+ return (reg == PCM3060_REG64);
+}
+
+static const struct reg_default pcm3060_reg_defaults[] = {
+ { PCM3060_REG64, 0xF0 },
+ { PCM3060_REG65, 0xFF },
+ { PCM3060_REG66, 0xFF },
+ { PCM3060_REG67, 0x00 },
+ { PCM3060_REG68, 0x00 },
+ { PCM3060_REG69, 0x00 },
+ { PCM3060_REG70, 0xD7 },
+ { PCM3060_REG71, 0xD7 },
+ { PCM3060_REG72, 0x00 },
+ { PCM3060_REG73, 0x00 },
+};
+
+const struct regmap_config pcm3060_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = pcm3060_reg_writeable,
+ .readable_reg = pcm3060_reg_readable,
+ .volatile_reg = pcm3060_reg_volatile,
+ .max_register = PCM3060_REG73,
+ .reg_defaults = pcm3060_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(pcm3060_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL(pcm3060_regmap);
+
+/* device */
+
+int pcm3060_probe(struct device *dev)
+{
+ int rc;
+
+ rc = devm_snd_soc_register_component(dev, &pcm3060_soc_comp_driver,
+ pcm3060_dai,
+ ARRAY_SIZE(pcm3060_dai));
+ if (rc) {
+ dev_err(dev, "failed to register component, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pcm3060_probe);
+
+MODULE_DESCRIPTION("PCM3060 codec driver");
+MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.tech>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm3060.h b/sound/soc/codecs/pcm3060.h
new file mode 100644
index 000000000000..fd89a68aa8a7
--- /dev/null
+++ b/sound/soc/codecs/pcm3060.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCM3060 codec driver
+ *
+ * Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech>
+ */
+
+#ifndef _SND_SOC_PCM3060_H
+#define _SND_SOC_PCM3060_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+extern const struct regmap_config pcm3060_regmap;
+
+#define PCM3060_DAI_ID_DAC 0
+#define PCM3060_DAI_ID_ADC 1
+#define PCM3060_DAI_IDS_NUM 2
+
+struct pcm3060_priv_dai {
+ bool is_master;
+ unsigned int sclk_freq;
+};
+
+struct pcm3060_priv {
+ struct regmap *regmap;
+ struct pcm3060_priv_dai dai[PCM3060_DAI_IDS_NUM];
+};
+
+int pcm3060_probe(struct device *dev);
+int pcm3060_remove(struct device *dev);
+
+/* registers */
+
+#define PCM3060_REG64 0x40
+#define PCM3060_REG_MRST 0x80
+#define PCM3060_REG_SRST 0x40
+#define PCM3060_REG_ADPSV 0x20
+#define PCM3060_REG_DAPSV 0x10
+#define PCM3060_REG_SE 0x01
+
+#define PCM3060_REG65 0x41
+#define PCM3060_REG66 0x42
+#define PCM3060_REG_AT2_MIN 0x36
+#define PCM3060_REG_AT2_MAX 0xFF
+
+#define PCM3060_REG67 0x43
+#define PCM3060_REG72 0x48
+#define PCM3060_REG_CSEL 0x80
+#define PCM3060_REG_MASK_MS 0x70
+#define PCM3060_REG_MS_S 0x00
+#define PCM3060_REG_MS_M768 (0x01 << 4)
+#define PCM3060_REG_MS_M512 (0x02 << 4)
+#define PCM3060_REG_MS_M384 (0x03 << 4)
+#define PCM3060_REG_MS_M256 (0x04 << 4)
+#define PCM3060_REG_MS_M192 (0x05 << 4)
+#define PCM3060_REG_MS_M128 (0x06 << 4)
+#define PCM3060_REG_MASK_FMT 0x03
+#define PCM3060_REG_FMT_I2S 0x00
+#define PCM3060_REG_FMT_LJ 0x01
+#define PCM3060_REG_FMT_RJ 0x02
+
+#define PCM3060_REG68 0x44
+#define PCM3060_REG_OVER 0x40
+#define PCM3060_REG_DREV2 0x04
+#define PCM3060_REG_SHIFT_MUT21 0x00
+#define PCM3060_REG_SHIFT_MUT22 0x01
+
+#define PCM3060_REG69 0x45
+#define PCM3060_REG_FLT 0x80
+#define PCM3060_REG_MASK_DMF 0x60
+#define PCM3060_REG_DMC 0x10
+#define PCM3060_REG_ZREV 0x02
+#define PCM3060_REG_AZRO 0x01
+
+#define PCM3060_REG70 0x46
+#define PCM3060_REG71 0x47
+#define PCM3060_REG_AT1_MIN 0x0E
+#define PCM3060_REG_AT1_MAX 0xFF
+
+#define PCM3060_REG73 0x49
+#define PCM3060_REG_ZCDD 0x10
+#define PCM3060_REG_BYP 0x08
+#define PCM3060_REG_DREV1 0x04
+#define PCM3060_REG_SHIFT_MUT11 0x00
+#define PCM3060_REG_SHIFT_MUT12 0x01
+
+#endif /* _SND_SOC_PCM3060_H */
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
index 3356c91f55b0..52cc950c9fd1 100644
--- a/sound/soc/codecs/pcm3168a.c
+++ b/sound/soc/codecs/pcm3168a.c
@@ -33,6 +33,8 @@
#define PCM3168A_FMT_RIGHT_J_16 0x3
#define PCM3168A_FMT_DSP_A 0x4
#define PCM3168A_FMT_DSP_B 0x5
+#define PCM3168A_FMT_I2S_TDM 0x6
+#define PCM3168A_FMT_LEFT_J_TDM 0x7
#define PCM3168A_FMT_DSP_MASK 0x4
#define PCM3168A_NUM_SUPPLIES 6
@@ -401,9 +403,11 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream,
bool tx, master_mode;
u32 val, mask, shift, reg;
unsigned int rate, fmt, ratio, max_ratio;
+ unsigned int chan;
int i, min_frame_size;
rate = params_rate(params);
+ chan = params_channels(params);
ratio = pcm3168a->sysclk / rate;
@@ -456,6 +460,21 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ /* for TDM */
+ if (chan > 2) {
+ switch (fmt) {
+ case PCM3168A_FMT_I2S:
+ fmt = PCM3168A_FMT_I2S_TDM;
+ break;
+ case PCM3168A_FMT_LEFT_J:
+ fmt = PCM3168A_FMT_LEFT_J_TDM;
+ break;
+ default:
+ dev_err(component->dev, "TDM is supported under I2S/Left_J only\n");
+ return -EINVAL;
+ }
+ }
+
if (master_mode)
val = ((i + 1) << shift);
else
@@ -476,7 +495,69 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream,
return 0;
}
+static int pcm3168a_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct pcm3168a_priv *pcm3168a = snd_soc_component_get_drvdata(component);
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ unsigned int fmt;
+ unsigned int sample_min;
+ unsigned int channel_max;
+
+ if (tx)
+ fmt = pcm3168a->dac_fmt;
+ else
+ fmt = pcm3168a->adc_fmt;
+
+ /*
+ * Available Data Bits
+ *
+ * RIGHT_J : 24 / 16
+ * LEFT_J : 24
+ * I2S : 24
+ *
+ * TDM available
+ *
+ * I2S
+ * LEFT_J
+ */
+ switch (fmt) {
+ case PCM3168A_FMT_RIGHT_J:
+ sample_min = 16;
+ channel_max = 2;
+ break;
+ case PCM3168A_FMT_LEFT_J:
+ sample_min = 24;
+ if (tx)
+ channel_max = 8;
+ else
+ channel_max = 6;
+ break;
+ case PCM3168A_FMT_I2S:
+ sample_min = 24;
+ if (tx)
+ channel_max = 8;
+ else
+ channel_max = 6;
+ break;
+ default:
+ sample_min = 24;
+ channel_max = 2;
+ }
+
+ snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
+ sample_min, 32);
+
+ snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ 2, channel_max);
+
+ return 0;
+}
static const struct snd_soc_dai_ops pcm3168a_dac_dai_ops = {
+ .startup = pcm3168a_startup,
.set_fmt = pcm3168a_set_dai_fmt_dac,
.set_sysclk = pcm3168a_set_dai_sysclk,
.hw_params = pcm3168a_hw_params,
@@ -484,6 +565,7 @@ static const struct snd_soc_dai_ops pcm3168a_dac_dai_ops = {
};
static const struct snd_soc_dai_ops pcm3168a_adc_dai_ops = {
+ .startup = pcm3168a_startup,
.set_fmt = pcm3168a_set_dai_fmt_adc,
.set_sysclk = pcm3168a_set_dai_sysclk,
.hw_params = pcm3168a_hw_params
diff --git a/sound/soc/codecs/rt1305.c b/sound/soc/codecs/rt1305.c
index c4452efc7970..c2c8a68cec97 100644
--- a/sound/soc/codecs/rt1305.c
+++ b/sound/soc/codecs/rt1305.c
@@ -963,7 +963,8 @@ static const struct regmap_config rt1305_regmap = {
.num_reg_defaults = ARRAY_SIZE(rt1305_reg),
.ranges = rt1305_ranges,
.num_ranges = ARRAY_SIZE(rt1305_ranges),
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
#if defined(CONFIG_OF)
diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c
index d88e67341083..0ef966d56bac 100644
--- a/sound/soc/codecs/rt274.c
+++ b/sound/soc/codecs/rt274.c
@@ -755,6 +755,7 @@ static int rt274_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
break;
default:
dev_warn(component->dev, "invalid pll source, use BCLK\n");
+ /* fall through */
case RT274_PLL2_S_BCLK:
snd_soc_component_update_bits(component, RT274_PLL2_CTRL,
RT274_PLL2_SRC_MASK, RT274_PLL2_SRC_BCLK);
@@ -782,6 +783,7 @@ static int rt274_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
break;
default:
dev_warn(component->dev, "invalid freq_in, assume 4.8M\n");
+ /* fall through */
case 100:
snd_soc_component_write(component, 0x7a, 0xaab6);
snd_soc_component_write(component, 0x7b, 0x0301);
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 6478d10c4f4a..4d46f4567c3a 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -91,6 +91,14 @@ static void rt5514_spi_copy_work(struct work_struct *work)
runtime = rt5514_dsp->substream->runtime;
period_bytes = snd_pcm_lib_period_bytes(rt5514_dsp->substream);
+ if (!period_bytes) {
+ schedule_delayed_work(&rt5514_dsp->copy_work, 5);
+ goto done;
+ }
+
+ if (rt5514_dsp->buf_size % period_bytes)
+ rt5514_dsp->buf_size = (rt5514_dsp->buf_size / period_bytes) *
+ period_bytes;
if (rt5514_dsp->get_size >= rt5514_dsp->buf_size) {
rt5514_spi_burst_read(RT5514_BUFFER_VOICE_WP, (u8 *)&buf,
@@ -149,13 +157,11 @@ done:
static void rt5514_schedule_copy(struct rt5514_dsp *rt5514_dsp)
{
- size_t period_bytes;
u8 buf[8];
if (!rt5514_dsp->substream)
return;
- period_bytes = snd_pcm_lib_period_bytes(rt5514_dsp->substream);
rt5514_dsp->get_size = 0;
/**
@@ -183,10 +189,6 @@ static void rt5514_schedule_copy(struct rt5514_dsp *rt5514_dsp)
rt5514_dsp->buf_size = rt5514_dsp->buf_limit - rt5514_dsp->buf_base;
- if (rt5514_dsp->buf_size % period_bytes)
- rt5514_dsp->buf_size = (rt5514_dsp->buf_size / period_bytes) *
- period_bytes;
-
if (rt5514_dsp->buf_base && rt5514_dsp->buf_limit &&
rt5514_dsp->buf_rp && rt5514_dsp->buf_size)
schedule_delayed_work(&rt5514_dsp->copy_work, 0);
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index dca82dd6e3bf..a67de68b6da6 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = {
{RT5514_ANA_CTRL_LDO10, 0x00028604},
{RT5514_ANA_CTRL_ADCFED, 0x00000800},
{RT5514_ASRC_IN_CTRL1, 0x00000003},
- {RT5514_DOWNFILTER0_CTRL3, 0x10000352},
- {RT5514_DOWNFILTER1_CTRL3, 0x10000352},
+ {RT5514_DOWNFILTER0_CTRL3, 0x10000342},
+ {RT5514_DOWNFILTER1_CTRL3, 0x10000342},
};
static const struct reg_default rt5514_reg[] = {
@@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = {
{RT5514_ASRC_IN_CTRL1, 0x00000003},
{RT5514_DOWNFILTER0_CTRL1, 0x00020c2f},
{RT5514_DOWNFILTER0_CTRL2, 0x00020c2f},
- {RT5514_DOWNFILTER0_CTRL3, 0x10000352},
+ {RT5514_DOWNFILTER0_CTRL3, 0x10000342},
{RT5514_DOWNFILTER1_CTRL1, 0x00020c2f},
{RT5514_DOWNFILTER1_CTRL2, 0x00020c2f},
- {RT5514_DOWNFILTER1_CTRL3, 0x10000352},
+ {RT5514_DOWNFILTER1_CTRL3, 0x10000342},
{RT5514_ANA_CTRL_LDO10, 0x00028604},
{RT5514_ANA_CTRL_LDO18_16, 0x02000345},
{RT5514_ANA_CTRL_ADC12, 0x0000a2a8},
@@ -1201,7 +1201,8 @@ static const struct regmap_config rt5514_regmap = {
.cache_type = REGCACHE_RBTREE,
.reg_defaults = rt5514_reg,
.num_reg_defaults = ARRAY_SIZE(rt5514_reg),
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
static const struct i2c_device_id rt5514_i2c_id[] = {
diff --git a/sound/soc/codecs/rt5616.c b/sound/soc/codecs/rt5616.c
index 3dc795f444ce..36a9f1c56c8d 100644
--- a/sound/soc/codecs/rt5616.c
+++ b/sound/soc/codecs/rt5616.c
@@ -1313,7 +1313,8 @@ static const struct snd_soc_component_driver soc_component_dev_rt5616 = {
static const struct regmap_config rt5616_regmap = {
.reg_bits = 8,
.val_bits = 16,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.max_register = RT5616_DEVICE_ID + 1 + (ARRAY_SIZE(rt5616_ranges) *
RT5616_PR_SPACING),
.volatile_reg = rt5616_volatile_register,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 27770143ae8f..fc530481a6e4 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -2704,7 +2704,8 @@ static const struct snd_soc_component_driver soc_component_dev_rt5640 = {
static const struct regmap_config rt5640_regmap = {
.reg_bits = 8,
.val_bits = 16,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.max_register = RT5640_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5640_ranges) *
RT5640_PR_SPACING),
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 1dc70f452c1b..be674688dc40 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3559,7 +3559,8 @@ static const struct snd_soc_component_driver soc_component_dev_rt5645 = {
static const struct regmap_config rt5645_regmap = {
.reg_bits = 8,
.val_bits = 16,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.max_register = RT5645_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5645_ranges) *
RT5645_PR_SPACING),
.volatile_reg = rt5645_volatile_register,
@@ -3575,7 +3576,8 @@ static const struct regmap_config rt5645_regmap = {
static const struct regmap_config rt5650_regmap = {
.reg_bits = 8,
.val_bits = 16,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.max_register = RT5645_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5645_ranges) *
RT5645_PR_SPACING),
.volatile_reg = rt5645_volatile_register,
@@ -3592,7 +3594,8 @@ static const struct regmap_config temp_regmap = {
.name="nocache",
.reg_bits = 8,
.val_bits = 16,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.max_register = RT5645_VENDOR_ID2 + 1,
.cache_type = REGCACHE_NONE,
};
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index 985852fd9723..b7ba64350a07 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
@@ -2124,7 +2123,8 @@ static const struct regmap_config rt5651_regmap = {
.num_reg_defaults = ARRAY_SIZE(rt5651_reg),
.ranges = rt5651_ranges,
.num_ranges = ARRAY_SIZE(rt5651_ranges),
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
#if defined(CONFIG_OF)
diff --git a/sound/soc/codecs/rt5660.c b/sound/soc/codecs/rt5660.c
index 20a755137e63..27f7445b2432 100644
--- a/sound/soc/codecs/rt5660.c
+++ b/sound/soc/codecs/rt5660.c
@@ -1217,7 +1217,8 @@ static const struct snd_soc_component_driver soc_component_dev_rt5660 = {
static const struct regmap_config rt5660_regmap = {
.reg_bits = 8,
.val_bits = 16,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.max_register = RT5660_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5660_ranges) *
RT5660_PR_SPACING),
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index 9bd24ad42240..7eb2cbd39d6e 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -72,6 +72,7 @@ struct rt5663_priv {
static const struct reg_sequence rt5663_patch_list[] = {
{ 0x002a, 0x8020 },
{ 0x0086, 0x0028 },
+ { 0x0100, 0xa020 },
{ 0x0117, 0x0f28 },
{ 0x02fb, 0x8089 },
};
@@ -580,7 +581,7 @@ static const struct reg_default rt5663_reg[] = {
{ 0x00fd, 0x0001 },
{ 0x00fe, 0x10ec },
{ 0x00ff, 0x6406 },
- { 0x0100, 0xa0a0 },
+ { 0x0100, 0xa020 },
{ 0x0108, 0x4444 },
{ 0x0109, 0x4444 },
{ 0x010a, 0xaaaa },
@@ -2337,6 +2338,8 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
0x8000);
snd_soc_component_update_bits(component, RT5663_DEPOP_1, 0x3000,
0x3000);
+ snd_soc_component_update_bits(component,
+ RT5663_DIG_VOL_ZCD, 0x00c0, 0x0080);
}
break;
@@ -2351,6 +2354,8 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
RT5663_OVCD_HP_MASK, RT5663_OVCD_HP_EN);
snd_soc_component_update_bits(component,
RT5663_DACREF_LDO, 0x3e0e, 0);
+ snd_soc_component_update_bits(component,
+ RT5663_DIG_VOL_ZCD, 0x00c0, 0);
}
break;
@@ -3252,7 +3257,8 @@ static const struct snd_soc_component_driver soc_component_dev_rt5663 = {
static const struct regmap_config rt5663_v2_regmap = {
.reg_bits = 16,
.val_bits = 16,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.max_register = 0x07fa,
.volatile_reg = rt5663_v2_volatile_register,
.readable_reg = rt5663_v2_readable_register,
@@ -3264,7 +3270,8 @@ static const struct regmap_config rt5663_v2_regmap = {
static const struct regmap_config rt5663_regmap = {
.reg_bits = 16,
.val_bits = 16,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.max_register = 0x03f3,
.volatile_reg = rt5663_volatile_register,
.readable_reg = rt5663_readable_register,
@@ -3277,7 +3284,8 @@ static const struct regmap_config temp_regmap = {
.name = "nocache",
.reg_bits = 16,
.val_bits = 16,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.max_register = 0x03f3,
.cache_type = REGCACHE_NONE,
};
diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
index 6ba99f5ed3f4..f2ad3a4c3b7f 100644
--- a/sound/soc/codecs/rt5665.c
+++ b/sound/soc/codecs/rt5665.c
@@ -4633,7 +4633,8 @@ static const struct regmap_config rt5665_regmap = {
.cache_type = REGCACHE_RBTREE,
.reg_defaults = rt5665_reg,
.num_reg_defaults = ARRAY_SIZE(rt5665_reg),
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
static const struct i2c_device_id rt5665_i2c_id[] = {
diff --git a/sound/soc/codecs/rt5668.c b/sound/soc/codecs/rt5668.c
index 3c19d03f2446..230a21c93b6b 100644
--- a/sound/soc/codecs/rt5668.c
+++ b/sound/soc/codecs/rt5668.c
@@ -2375,7 +2375,8 @@ static const struct regmap_config rt5668_regmap = {
.cache_type = REGCACHE_RBTREE,
.reg_defaults = rt5668_reg,
.num_reg_defaults = ARRAY_SIZE(rt5668_reg),
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
static const struct i2c_device_id rt5668_i2c_id[] = {
@@ -2587,17 +2588,10 @@ static int rt5668_i2c_probe(struct i2c_client *i2c,
}
- return snd_soc_register_component(&i2c->dev, &soc_component_dev_rt5668,
+ return devm_snd_soc_register_component(&i2c->dev, &soc_component_dev_rt5668,
rt5668_dai, ARRAY_SIZE(rt5668_dai));
}
-static int rt5668_i2c_remove(struct i2c_client *i2c)
-{
- snd_soc_unregister_component(&i2c->dev);
-
- return 0;
-}
-
static void rt5668_i2c_shutdown(struct i2c_client *client)
{
struct rt5668_priv *rt5668 = i2c_get_clientdata(client);
@@ -2628,7 +2622,6 @@ static struct i2c_driver rt5668_i2c_driver = {
.acpi_match_table = ACPI_PTR(rt5668_acpi_match),
},
.probe = rt5668_i2c_probe,
- .remove = rt5668_i2c_remove,
.shutdown = rt5668_i2c_shutdown,
.id_table = rt5668_i2c_id,
};
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 732ef928b25d..453328c988c0 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -2814,7 +2814,8 @@ static const struct snd_soc_component_driver soc_component_dev_rt5670 = {
static const struct regmap_config rt5670_regmap = {
.reg_bits = 8,
.val_bits = 16,
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
.max_register = RT5670_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5670_ranges) *
RT5670_PR_SPACING),
.volatile_reg = rt5670_volatile_register,
@@ -2877,6 +2878,18 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = {
},
{
.callback = rt5670_quirk_cb,
+ .ident = "Lenovo Thinkpad Tablet 8",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ },
+ .driver_data = (unsigned long *)(RT5670_DMIC_EN |
+ RT5670_DMIC2_INR |
+ RT5670_DEV_GPIO |
+ RT5670_JD_MODE1),
+ },
+ {
+ .callback = rt5670_quirk_cb,
.ident = "Lenovo Thinkpad Tablet 10",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index bd51f3655ee3..84501c2020c7 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -18,7 +18,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/regulator/consumer.h>
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 640d400ca013..34cfaf8f6f34 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -67,7 +67,8 @@ struct rt5682_priv {
};
static const struct reg_sequence patch_list[] = {
- {0x01c1, 0x1000},
+ {RT5682_HP_IMP_SENS_CTRL_19, 0x1000},
+ {RT5682_DAC_ADC_DIG_VOL1, 0xa020},
};
static const struct reg_default rt5682_reg[] = {
@@ -749,9 +750,8 @@ static bool rt5682_readable_register(struct device *dev, unsigned int reg)
}
}
-static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0);
-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
@@ -1108,13 +1108,9 @@ static void rt5682_jack_detect_handler(struct work_struct *work)
}
static const struct snd_kcontrol_new rt5682_snd_controls[] = {
- /* Headphone Output Volume */
- SOC_DOUBLE_R_TLV("Headphone Playback Volume", RT5682_HPL_GAIN,
- RT5682_HPR_GAIN, RT5682_G_HP_SFT, 15, 1, hp_vol_tlv),
-
/* DAC Digital Volume */
SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL,
- RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 175, 0, dac_vol_tlv),
+ RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 86, 0, dac_vol_tlv),
/* IN Boost Volume */
SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL,
@@ -1124,7 +1120,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = {
SOC_DOUBLE("STO1 ADC Capture Switch", RT5682_STO1_ADC_DIG_VOL,
RT5682_L_MUTE_SFT, RT5682_R_MUTE_SFT, 1, 1),
SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682_STO1_ADC_DIG_VOL,
- RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 127, 0, adc_vol_tlv),
+ RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 63, 0, adc_vol_tlv),
/* ADC Boost Volume Control */
SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682_STO1_ADC_BOOST,
@@ -1437,6 +1433,28 @@ static const struct snd_kcontrol_new hpor_switch =
SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5682_HP_CTRL_1,
RT5682_R_MUTE_SFT, 1, 1);
+static int rt5682_charge_pump_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_update_bits(component,
+ RT5682_HP_CHARGE_PUMP_1, RT5682_PM_HP_MASK, RT5682_PM_HP_HV);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_update_bits(component,
+ RT5682_HP_CHARGE_PUMP_1, RT5682_PM_HP_MASK, RT5682_PM_HP_LV);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -1449,10 +1467,10 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
RT5682_HP_LOGIC_CTRL_2, 0x0012);
snd_soc_component_write(component,
RT5682_HP_CTRL_2, 0x6000);
- snd_soc_component_update_bits(component, RT5682_STO_NG2_CTRL_1,
- RT5682_NG2_EN_MASK, RT5682_NG2_EN);
snd_soc_component_update_bits(component,
RT5682_DEPOP_1, 0x60, 0x60);
+ snd_soc_component_update_bits(component,
+ RT5682_DAC_ADC_DIG_VOL1, 0x00c0, 0x0080);
break;
case SND_SOC_DAPM_POST_PMD:
@@ -1460,6 +1478,8 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
RT5682_DEPOP_1, 0x60, 0x0);
snd_soc_component_write(component,
RT5682_HP_CTRL_2, 0x0000);
+ snd_soc_component_update_bits(component,
+ RT5682_DAC_ADC_DIG_VOL1, 0x00c0, 0x0000);
break;
default:
@@ -1723,7 +1743,8 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("HP Amp R", RT5682_PWR_ANLG_1,
RT5682_PWR_HA_R_BIT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("Charge Pump", 1, RT5682_DEPOP_1,
- RT5682_PUMP_EN_SFT, 0, NULL, 0),
+ RT5682_PUMP_EN_SFT, 0, rt5682_charge_pump_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY_S("Capless", 2, RT5682_DEPOP_1,
RT5682_CAPLESS_EN_SFT, 0, NULL, 0),
@@ -1884,6 +1905,7 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
{"HP Amp", NULL, "Charge Pump"},
{"HP Amp", NULL, "CLKDET SYS"},
{"HP Amp", NULL, "CBJ Power"},
+ {"HP Amp", NULL, "Vref1"},
{"HP Amp", NULL, "Vref2"},
{"HPOL Playback", "Switch", "HP Amp"},
{"HPOR Playback", "Switch", "HP Amp"},
@@ -2419,7 +2441,8 @@ static const struct regmap_config rt5682_regmap = {
.cache_type = REGCACHE_RBTREE,
.reg_defaults = rt5682_reg,
.num_reg_defaults = ARRAY_SIZE(rt5682_reg),
- .use_single_rw = true,
+ .use_single_read = true,
+ .use_single_write = true,
};
static const struct i2c_device_id rt5682_i2c_id[] = {
@@ -2451,30 +2474,23 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
mutex_lock(&rt5682->calibrate_mutex);
rt5682_reset(rt5682->regmap);
- regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xa2bf);
+ regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xa2af);
usleep_range(15000, 20000);
- regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xf2bf);
- regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0380);
- regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x8001);
- regmap_write(rt5682->regmap, RT5682_TEST_MODE_CTRL_1, 0x0000);
- regmap_write(rt5682->regmap, RT5682_STO1_DAC_MIXER, 0x2080);
- regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0x4040);
- regmap_write(rt5682->regmap, RT5682_DEPOP_1, 0x0069);
+ regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xf2af);
+ regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0300);
+ regmap_write(rt5682->regmap, RT5682_GLB_CLK, 0x8000);
+ regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0100);
+ regmap_write(rt5682->regmap, RT5682_HP_IMP_SENS_CTRL_19, 0x3800);
regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x3000);
- regmap_write(rt5682->regmap, RT5682_HP_CTRL_2, 0x6000);
- regmap_write(rt5682->regmap, RT5682_HP_CHARGE_PUMP_1, 0x0f26);
- regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x7f05);
+ regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x7005);
regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0x686c);
regmap_write(rt5682->regmap, RT5682_CAL_REC, 0x0d0d);
- regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_9, 0x000f);
- regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x8d01);
regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_2, 0x0321);
regmap_write(rt5682->regmap, RT5682_HP_LOGIC_CTRL_2, 0x0004);
regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_1, 0x7c00);
regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_3, 0x06a1);
regmap_write(rt5682->regmap, RT5682_A_DAC1_MUX, 0x0311);
- regmap_write(rt5682->regmap, RT5682_RESET_HPF_CTRL, 0x0000);
- regmap_write(rt5682->regmap, RT5682_ADC_STO1_HP_CTRL_1, 0x3320);
+ regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_1, 0x7c00);
regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_1, 0xfc00);
@@ -2490,8 +2506,12 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
pr_err("HP Calibration Failure\n");
/* restore settings */
- regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0xc0c4);
+ regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0x02af);
+ regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0080);
+ regmap_write(rt5682->regmap, RT5682_GLB_CLK, 0x0000);
regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000);
+ regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000);
+ regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005);
mutex_unlock(&rt5682->calibrate_mutex);
@@ -2565,7 +2585,7 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
rt5682_calibrate(rt5682);
- ret = regmap_register_patch(rt5682->regmap, patch_list,
+ ret = regmap_multi_reg_write(rt5682->regmap, patch_list,
ARRAY_SIZE(patch_list));
if (ret != 0)
dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
@@ -2619,6 +2639,10 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
RT5682_GP4_PIN_MASK | RT5682_GP5_PIN_MASK,
RT5682_GP4_PIN_ADCDAT1 | RT5682_GP5_PIN_DACDAT1);
regmap_write(rt5682->regmap, RT5682_TEST_MODE_CTRL_1, 0x0000);
+ regmap_update_bits(rt5682->regmap, RT5682_BIAS_CUR_CTRL_8,
+ RT5682_HPA_CP_BIAS_CTRL_MASK, RT5682_HPA_CP_BIAS_3UA);
+ regmap_update_bits(rt5682->regmap, RT5682_CHARGE_PUMP_1,
+ RT5682_CP_CLK_HP_MASK, RT5682_CP_CLK_HP_300KHZ);
INIT_DELAYED_WORK(&rt5682->jack_detect_work,
rt5682_jack_detect_handler);
@@ -2636,11 +2660,17 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
}
- return devm_snd_soc_register_component(&i2c->dev,
- &soc_component_dev_rt5682,
+ return snd_soc_register_component(&i2c->dev, &soc_component_dev_rt5682,
rt5682_dai, ARRAY_SIZE(rt5682_dai));
}
+static int rt5682_i2c_remove(struct i2c_client *i2c)
+{
+ snd_soc_unregister_component(&i2c->dev);
+
+ return 0;
+}
+
static void rt5682_i2c_shutdown(struct i2c_client *client)
{
struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
@@ -2671,6 +2701,7 @@ static struct i2c_driver rt5682_i2c_driver = {
.acpi_match_table = ACPI_PTR(rt5682_acpi_match),
},
.probe = rt5682_i2c_probe,
+ .remove = rt5682_i2c_remove,
.shutdown = rt5682_i2c_shutdown,
.id_table = rt5682_i2c_id,
};
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index 8068140ebe3f..d82a8301fd74 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -1214,6 +1214,20 @@
#define RT5682_JDH_NO_PLUG (0x1 << 4)
#define RT5682_JDH_PLUG (0x0 << 4)
+/* Bias current control 8 (0x0111) */
+#define RT5682_HPA_CP_BIAS_CTRL_MASK (0x3 << 2)
+#define RT5682_HPA_CP_BIAS_2UA (0x0 << 2)
+#define RT5682_HPA_CP_BIAS_3UA (0x1 << 2)
+#define RT5682_HPA_CP_BIAS_4UA (0x2 << 2)
+#define RT5682_HPA_CP_BIAS_6UA (0x3 << 2)
+
+/* Charge Pump Internal Register1 (0x0125) */
+#define RT5682_CP_CLK_HP_MASK (0x3 << 4)
+#define RT5682_CP_CLK_HP_100KHZ (0x0 << 4)
+#define RT5682_CP_CLK_HP_200KHZ (0x1 << 4)
+#define RT5682_CP_CLK_HP_300KHZ (0x2 << 4)
+#define RT5682_CP_CLK_HP_600KHZ (0x3 << 4)
+
/* Chopper and Clock control for DAC (0x013a)*/
#define RT5682_CKXEN_DAC1_MASK (0x1 << 13)
#define RT5682_CKXEN_DAC1_SFT 13
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 60764f6201b1..add18d6d77da 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1218,7 +1218,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_component *component)
* Searching for a suitable index solving this formula:
* idx = 40 * log10(vag_val / lo_cagcntrl) + 15
*/
- vol_quot = (vag * 100) / lo_vag;
+ vol_quot = lo_vag ? (vag * 100) / lo_vag : 0;
lo_vol = 0;
for (i = 0; i < ARRAY_SIZE(vol_quot_table); i++) {
if (vol_quot >= vol_quot_table[i])
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
index d53680ac78e4..6df158669420 100644
--- a/sound/soc/codecs/sigmadsp.c
+++ b/sound/soc/codecs/sigmadsp.c
@@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp,
struct sigmadsp_control *ctrl, void *data)
{
/* safeload loads up to 20 bytes in a atomic operation */
- if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
- sigmadsp->ops->safeload)
+ if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
ctrl->num_bytes);
else
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index d5035f2f2b2b..f753d2db0a5a 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
@@ -142,6 +143,7 @@ static const char *sta32x_supply_names[] = {
/* codec private data */
struct sta32x_priv {
struct regmap *regmap;
+ struct clk *xti_clk;
struct regulator_bulk_data supplies[ARRAY_SIZE(sta32x_supply_names)];
struct snd_soc_component *component;
struct sta32x_platform_data *pdata;
@@ -879,6 +881,18 @@ static int sta32x_probe(struct snd_soc_component *component)
struct sta32x_priv *sta32x = snd_soc_component_get_drvdata(component);
struct sta32x_platform_data *pdata = sta32x->pdata;
int i, ret = 0, thermal = 0;
+
+ sta32x->component = component;
+
+ if (sta32x->xti_clk) {
+ ret = clk_prepare_enable(sta32x->xti_clk);
+ if (ret != 0) {
+ dev_err(component->dev,
+ "Failed to enable clock: %d\n", ret);
+ return ret;
+ }
+ }
+
ret = regulator_bulk_enable(ARRAY_SIZE(sta32x->supplies),
sta32x->supplies);
if (ret != 0) {
@@ -981,6 +995,9 @@ static void sta32x_remove(struct snd_soc_component *component)
sta32x_watchdog_stop(sta32x);
regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
+
+ if (sta32x->xti_clk)
+ clk_disable_unprepare(sta32x->xti_clk);
}
static const struct snd_soc_component_driver sta32x_component = {
@@ -1038,6 +1055,8 @@ static int sta32x_probe_dt(struct device *dev, struct sta32x_priv *sta32x)
of_property_read_u8(np, "st,ch3-output-mapping",
&pdata->ch3_output_mapping);
+ if (of_get_property(np, "st,fault-detect-recovery", NULL))
+ pdata->fault_detect_recovery = 1;
if (of_get_property(np, "st,thermal-warning-recovery", NULL))
pdata->thermal_warning_recovery = 1;
if (of_get_property(np, "st,thermal-warning-adjustment", NULL))
@@ -1095,6 +1114,17 @@ static int sta32x_i2c_probe(struct i2c_client *i2c,
}
#endif
+ /* Clock */
+ sta32x->xti_clk = devm_clk_get(dev, "xti");
+ if (IS_ERR(sta32x->xti_clk)) {
+ ret = PTR_ERR(sta32x->xti_clk);
+
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ sta32x->xti_clk = NULL;
+ }
+
/* GPIOs */
sta32x->gpiod_nreset = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_LOW);
diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c
index ae3d032ac35a..6bd0e5d5347f 100644
--- a/sound/soc/codecs/tas5720.c
+++ b/sound/soc/codecs/tas5720.c
@@ -152,6 +152,7 @@ static int tas5720_set_dai_tdm_slot(struct snd_soc_dai *dai,
int slots, int slot_width)
{
struct snd_soc_component *component = dai->component;
+ struct tas5720_data *tas5720 = snd_soc_component_get_drvdata(component);
unsigned int first_slot;
int ret;
@@ -185,6 +186,20 @@ static int tas5720_set_dai_tdm_slot(struct snd_soc_dai *dai,
if (ret < 0)
goto error_snd_soc_component_update_bits;
+ /* Configure TDM slot width. This is only applicable to TAS5722. */
+ switch (tas5720->devtype) {
+ case TAS5722:
+ ret = snd_soc_component_update_bits(component, TAS5722_DIGITAL_CTRL2_REG,
+ TAS5722_TDM_SLOT_16B,
+ slot_width == 16 ?
+ TAS5722_TDM_SLOT_16B : 0);
+ if (ret < 0)
+ goto error_snd_soc_component_update_bits;
+ break;
+ default:
+ break;
+ }
+
return 0;
error_snd_soc_component_update_bits:
@@ -485,15 +500,56 @@ static const DECLARE_TLV_DB_RANGE(dac_analog_tlv,
);
/*
- * DAC digital volumes. From -103.5 to 24 dB in 0.5 dB steps. Note that
- * setting the gain below -100 dB (register value <0x7) is effectively a MUTE
- * as per device datasheet.
+ * DAC digital volumes. From -103.5 to 24 dB in 0.5 dB or 0.25 dB steps
+ * depending on the device. Note that setting the gain below -100 dB
+ * (register value <0x7) is effectively a MUTE as per device datasheet.
+ *
+ * Note that for the TAS5722 the digital volume controls are actually split
+ * over two registers, so we need custom getters/setters for access.
*/
-static DECLARE_TLV_DB_SCALE(dac_tlv, -10350, 50, 0);
+static DECLARE_TLV_DB_SCALE(tas5720_dac_tlv, -10350, 50, 0);
+static DECLARE_TLV_DB_SCALE(tas5722_dac_tlv, -10350, 25, 0);
+
+static int tas5722_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ unsigned int val;
+
+ snd_soc_component_read(component, TAS5720_VOLUME_CTRL_REG, &val);
+ ucontrol->value.integer.value[0] = val << 1;
+
+ snd_soc_component_read(component, TAS5722_DIGITAL_CTRL2_REG, &val);
+ ucontrol->value.integer.value[0] |= val & TAS5722_VOL_CONTROL_LSB;
+
+ return 0;
+}
+
+static int tas5722_volume_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ unsigned int sel = ucontrol->value.integer.value[0];
+
+ snd_soc_component_write(component, TAS5720_VOLUME_CTRL_REG, sel >> 1);
+ snd_soc_component_update_bits(component, TAS5722_DIGITAL_CTRL2_REG,
+ TAS5722_VOL_CONTROL_LSB, sel);
+
+ return 0;
+}
static const struct snd_kcontrol_new tas5720_snd_controls[] = {
SOC_SINGLE_TLV("Speaker Driver Playback Volume",
- TAS5720_VOLUME_CTRL_REG, 0, 0xff, 0, dac_tlv),
+ TAS5720_VOLUME_CTRL_REG, 0, 0xff, 0, tas5720_dac_tlv),
+ SOC_SINGLE_TLV("Speaker Driver Analog Gain", TAS5720_ANALOG_CTRL_REG,
+ TAS5720_ANALOG_GAIN_SHIFT, 3, 0, dac_analog_tlv),
+};
+
+static const struct snd_kcontrol_new tas5722_snd_controls[] = {
+ SOC_SINGLE_EXT_TLV("Speaker Driver Playback Volume",
+ 0, 0, 511, 0,
+ tas5722_volume_get, tas5722_volume_set,
+ tas5722_dac_tlv),
SOC_SINGLE_TLV("Speaker Driver Analog Gain", TAS5720_ANALOG_CTRL_REG,
TAS5720_ANALOG_GAIN_SHIFT, 3, 0, dac_analog_tlv),
};
@@ -527,6 +583,23 @@ static const struct snd_soc_component_driver soc_component_dev_tas5720 = {
.non_legacy_dai_naming = 1,
};
+static const struct snd_soc_component_driver soc_component_dev_tas5722 = {
+ .probe = tas5720_codec_probe,
+ .remove = tas5720_codec_remove,
+ .suspend = tas5720_suspend,
+ .resume = tas5720_resume,
+ .controls = tas5722_snd_controls,
+ .num_controls = ARRAY_SIZE(tas5722_snd_controls),
+ .dapm_widgets = tas5720_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas5720_dapm_widgets),
+ .dapm_routes = tas5720_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas5720_audio_map),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
/* PCM rates supported by the TAS5720 driver */
#define TAS5720_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
@@ -613,9 +686,23 @@ static int tas5720_probe(struct i2c_client *client,
dev_set_drvdata(dev, data);
- ret = devm_snd_soc_register_component(&client->dev,
- &soc_component_dev_tas5720,
- tas5720_dai, ARRAY_SIZE(tas5720_dai));
+ switch (id->driver_data) {
+ case TAS5720:
+ ret = devm_snd_soc_register_component(&client->dev,
+ &soc_component_dev_tas5720,
+ tas5720_dai,
+ ARRAY_SIZE(tas5720_dai));
+ break;
+ case TAS5722:
+ ret = devm_snd_soc_register_component(&client->dev,
+ &soc_component_dev_tas5722,
+ tas5720_dai,
+ ARRAY_SIZE(tas5720_dai));
+ break;
+ default:
+ dev_err(dev, "unexpected private driver data\n");
+ return -EINVAL;
+ }
if (ret < 0) {
dev_err(dev, "failed to register component: %d\n", ret);
return ret;
diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
index 14999b999fd3..36aebdb8f55c 100644
--- a/sound/soc/codecs/tas6424.c
+++ b/sound/soc/codecs/tas6424.c
@@ -41,6 +41,7 @@ struct tas6424_data {
struct regmap *regmap;
struct regulator_bulk_data supplies[TAS6424_NUM_SUPPLIES];
struct delayed_work fault_check_work;
+ unsigned int last_cfault;
unsigned int last_fault1;
unsigned int last_fault2;
unsigned int last_warn;
@@ -406,9 +407,54 @@ static void tas6424_fault_check_work(struct work_struct *work)
unsigned int reg;
int ret;
+ ret = regmap_read(tas6424->regmap, TAS6424_CHANNEL_FAULT, &reg);
+ if (ret < 0) {
+ dev_err(dev, "failed to read CHANNEL_FAULT register: %d\n", ret);
+ goto out;
+ }
+
+ if (!reg) {
+ tas6424->last_cfault = reg;
+ goto check_global_fault1_reg;
+ }
+
+ /*
+ * Only flag errors once for a given occurrence. This is needed as
+ * the TAS6424 will take time clearing the fault condition internally
+ * during which we don't want to bombard the system with the same
+ * error message over and over.
+ */
+ if ((reg & TAS6424_FAULT_OC_CH1) && !(tas6424->last_cfault & TAS6424_FAULT_OC_CH1))
+ dev_crit(dev, "experienced a channel 1 overcurrent fault\n");
+
+ if ((reg & TAS6424_FAULT_OC_CH2) && !(tas6424->last_cfault & TAS6424_FAULT_OC_CH2))
+ dev_crit(dev, "experienced a channel 2 overcurrent fault\n");
+
+ if ((reg & TAS6424_FAULT_OC_CH3) && !(tas6424->last_cfault & TAS6424_FAULT_OC_CH3))
+ dev_crit(dev, "experienced a channel 3 overcurrent fault\n");
+
+ if ((reg & TAS6424_FAULT_OC_CH4) && !(tas6424->last_cfault & TAS6424_FAULT_OC_CH4))
+ dev_crit(dev, "experienced a channel 4 overcurrent fault\n");
+
+ if ((reg & TAS6424_FAULT_DC_CH1) && !(tas6424->last_cfault & TAS6424_FAULT_DC_CH1))
+ dev_crit(dev, "experienced a channel 1 DC fault\n");
+
+ if ((reg & TAS6424_FAULT_DC_CH2) && !(tas6424->last_cfault & TAS6424_FAULT_DC_CH2))
+ dev_crit(dev, "experienced a channel 2 DC fault\n");
+
+ if ((reg & TAS6424_FAULT_DC_CH3) && !(tas6424->last_cfault & TAS6424_FAULT_DC_CH3))
+ dev_crit(dev, "experienced a channel 3 DC fault\n");
+
+ if ((reg & TAS6424_FAULT_DC_CH4) && !(tas6424->last_cfault & TAS6424_FAULT_DC_CH4))
+ dev_crit(dev, "experienced a channel 4 DC fault\n");
+
+ /* Store current fault1 value so we can detect any changes next time */
+ tas6424->last_cfault = reg;
+
+check_global_fault1_reg:
ret = regmap_read(tas6424->regmap, TAS6424_GLOB_FAULT1, &reg);
if (ret < 0) {
- dev_err(dev, "failed to read FAULT1 register: %d\n", ret);
+ dev_err(dev, "failed to read GLOB_FAULT1 register: %d\n", ret);
goto out;
}
@@ -424,15 +470,11 @@ static void tas6424_fault_check_work(struct work_struct *work)
TAS6424_FAULT_PVDD_UV |
TAS6424_FAULT_VBAT_UV;
- if (reg)
+ if (!reg) {
+ tas6424->last_fault1 = reg;
goto check_global_fault2_reg;
+ }
- /*
- * Only flag errors once for a given occurrence. This is needed as
- * the TAS6424 will take time clearing the fault condition internally
- * during which we don't want to bombard the system with the same
- * error message over and over.
- */
if ((reg & TAS6424_FAULT_PVDD_OV) && !(tas6424->last_fault1 & TAS6424_FAULT_PVDD_OV))
dev_crit(dev, "experienced a PVDD overvoltage fault\n");
@@ -451,7 +493,7 @@ static void tas6424_fault_check_work(struct work_struct *work)
check_global_fault2_reg:
ret = regmap_read(tas6424->regmap, TAS6424_GLOB_FAULT2, &reg);
if (ret < 0) {
- dev_err(dev, "failed to read FAULT2 register: %d\n", ret);
+ dev_err(dev, "failed to read GLOB_FAULT2 register: %d\n", ret);
goto out;
}
@@ -461,8 +503,10 @@ check_global_fault2_reg:
TAS6424_FAULT_OTSD_CH3 |
TAS6424_FAULT_OTSD_CH4;
- if (!reg)
+ if (!reg) {
+ tas6424->last_fault2 = reg;
goto check_warn_reg;
+ }
if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD))
dev_crit(dev, "experienced a global overtemp shutdown\n");
@@ -497,8 +541,10 @@ check_warn_reg:
TAS6424_WARN_VDD_OTW_CH3 |
TAS6424_WARN_VDD_OTW_CH4;
- if (!reg)
+ if (!reg) {
+ tas6424->last_warn = reg;
goto out;
+ }
if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV))
dev_warn(dev, "experienced a VDD under voltage condition\n");
@@ -524,7 +570,7 @@ check_warn_reg:
/* Store current warn value so we can detect any changes next time */
tas6424->last_warn = reg;
- /* Clear any faults by toggling the CLEAR_FAULT control bit */
+ /* Clear any warnings by toggling the CLEAR_FAULT control bit */
ret = regmap_write_bits(tas6424->regmap, TAS6424_MISC_CTRL3,
TAS6424_CLEAR_FAULT, TAS6424_CLEAR_FAULT);
if (ret < 0)
diff --git a/sound/soc/codecs/tas6424.h b/sound/soc/codecs/tas6424.h
index b5958c45ed0e..c67a7835ca66 100644
--- a/sound/soc/codecs/tas6424.h
+++ b/sound/soc/codecs/tas6424.h
@@ -116,6 +116,16 @@
#define TAS6424_LDGBYPASS_MASK BIT(TAS6424_LDGBYPASS_SHIFT)
/* TAS6424_GLOB_FAULT1_REG */
+#define TAS6424_FAULT_OC_CH1 BIT(7)
+#define TAS6424_FAULT_OC_CH2 BIT(6)
+#define TAS6424_FAULT_OC_CH3 BIT(5)
+#define TAS6424_FAULT_OC_CH4 BIT(4)
+#define TAS6424_FAULT_DC_CH1 BIT(3)
+#define TAS6424_FAULT_DC_CH2 BIT(2)
+#define TAS6424_FAULT_DC_CH3 BIT(1)
+#define TAS6424_FAULT_DC_CH4 BIT(0)
+
+/* TAS6424_GLOB_FAULT1_REG */
#define TAS6424_FAULT_CLOCK BIT(4)
#define TAS6424_FAULT_PVDD_OV BIT(3)
#define TAS6424_FAULT_VBAT_OV BIT(2)
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index bf92d36b8f8a..608ad49ad978 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -167,6 +167,7 @@ struct aic31xx_priv {
u8 p_div;
int rate_div_line;
bool master_dapm_route_applied;
+ int irq;
};
struct aic31xx_rate_divs {
@@ -1391,6 +1392,69 @@ static const struct acpi_device_id aic31xx_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
#endif
+static irqreturn_t aic31xx_irq(int irq, void *data)
+{
+ struct aic31xx_priv *aic31xx = data;
+ struct device *dev = aic31xx->dev;
+ unsigned int value;
+ bool handled = false;
+ int ret;
+
+ ret = regmap_read(aic31xx->regmap, AIC31XX_INTRDACFLAG, &value);
+ if (ret) {
+ dev_err(dev, "Failed to read interrupt mask: %d\n", ret);
+ goto exit;
+ }
+
+ if (value)
+ handled = true;
+ else
+ goto read_overflow;
+
+ if (value & AIC31XX_HPLSCDETECT)
+ dev_err(dev, "Short circuit on Left output is detected\n");
+ if (value & AIC31XX_HPRSCDETECT)
+ dev_err(dev, "Short circuit on Right output is detected\n");
+ if (value & ~(AIC31XX_HPLSCDETECT |
+ AIC31XX_HPRSCDETECT))
+ dev_err(dev, "Unknown DAC interrupt flags: 0x%08x\n", value);
+
+read_overflow:
+ ret = regmap_read(aic31xx->regmap, AIC31XX_OFFLAG, &value);
+ if (ret) {
+ dev_err(dev, "Failed to read overflow flag: %d\n", ret);
+ goto exit;
+ }
+
+ if (value)
+ handled = true;
+ else
+ goto exit;
+
+ if (value & AIC31XX_DAC_OF_LEFT)
+ dev_warn(dev, "Left-channel DAC overflow has occurred\n");
+ if (value & AIC31XX_DAC_OF_RIGHT)
+ dev_warn(dev, "Right-channel DAC overflow has occurred\n");
+ if (value & AIC31XX_DAC_OF_SHIFTER)
+ dev_warn(dev, "DAC barrel shifter overflow has occurred\n");
+ if (value & AIC31XX_ADC_OF)
+ dev_warn(dev, "ADC overflow has occurred\n");
+ if (value & AIC31XX_ADC_OF_SHIFTER)
+ dev_warn(dev, "ADC barrel shifter overflow has occurred\n");
+ if (value & ~(AIC31XX_DAC_OF_LEFT |
+ AIC31XX_DAC_OF_RIGHT |
+ AIC31XX_DAC_OF_SHIFTER |
+ AIC31XX_ADC_OF |
+ AIC31XX_ADC_OF_SHIFTER))
+ dev_warn(dev, "Unknown overflow interrupt flags: 0x%08x\n", value);
+
+exit:
+ if (handled)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
static int aic31xx_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -1413,6 +1477,7 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
return ret;
}
aic31xx->dev = &i2c->dev;
+ aic31xx->irq = i2c->irq;
aic31xx->codec_type = id->driver_data;
@@ -1456,6 +1521,26 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ if (aic31xx->irq > 0) {
+ regmap_update_bits(aic31xx->regmap, AIC31XX_GPIO1,
+ AIC31XX_GPIO1_FUNC_MASK,
+ AIC31XX_GPIO1_INT1 <<
+ AIC31XX_GPIO1_FUNC_SHIFT);
+
+ regmap_write(aic31xx->regmap, AIC31XX_INT1CTRL,
+ AIC31XX_SC |
+ AIC31XX_ENGINE);
+
+ ret = devm_request_threaded_irq(aic31xx->dev, aic31xx->irq,
+ NULL, aic31xx_irq,
+ IRQF_ONESHOT, "aic31xx-irq",
+ aic31xx);
+ if (ret) {
+ dev_err(aic31xx->dev, "Unable to request IRQ\n");
+ return ret;
+ }
+ }
+
if (aic31xx->codec_type & DAC31XX_BIT)
return devm_snd_soc_register_component(&i2c->dev,
&soc_codec_driver_aic31xx,
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index 0b587585b38b..2636f2c6bc79 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -173,6 +173,13 @@ struct aic31xx_pdata {
#define AIC31XX_HPRDRVPWRSTATUS_MASK BIT(1)
#define AIC31XX_SPRDRVPWRSTATUS_MASK BIT(0)
+/* AIC31XX_OFFLAG */
+#define AIC31XX_DAC_OF_LEFT BIT(7)
+#define AIC31XX_DAC_OF_RIGHT BIT(6)
+#define AIC31XX_DAC_OF_SHIFTER BIT(5)
+#define AIC31XX_ADC_OF BIT(3)
+#define AIC31XX_ADC_OF_SHIFTER BIT(1)
+
/* AIC31XX_INTRDACFLAG */
#define AIC31XX_HPLSCDETECT BIT(7)
#define AIC31XX_HPRSCDETECT BIT(6)
@@ -191,6 +198,22 @@ struct aic31xx_pdata {
#define AIC31XX_SC BIT(3)
#define AIC31XX_ENGINE BIT(2)
+/* AIC31XX_GPIO1 */
+#define AIC31XX_GPIO1_FUNC_MASK GENMASK(5, 2)
+#define AIC31XX_GPIO1_FUNC_SHIFT 2
+#define AIC31XX_GPIO1_DISABLED 0x00
+#define AIC31XX_GPIO1_INPUT 0x01
+#define AIC31XX_GPIO1_GPI 0x02
+#define AIC31XX_GPIO1_GPO 0x03
+#define AIC31XX_GPIO1_CLKOUT 0x04
+#define AIC31XX_GPIO1_INT1 0x05
+#define AIC31XX_GPIO1_INT2 0x06
+#define AIC31XX_GPIO1_ADC_WCLK 0x07
+#define AIC31XX_GPIO1_SBCLK 0x08
+#define AIC31XX_GPIO1_SWCLK 0x09
+#define AIC31XX_GPIO1_ADC_MOD_CLK 0x10
+#define AIC31XX_GPIO1_SDOUT 0x11
+
/* AIC31XX_DACSETUP */
#define AIC31XX_SOFTSTEP_MASK GENMASK(1, 0)
diff --git a/sound/soc/codecs/tscs454.c b/sound/soc/codecs/tscs454.c
index ff85a0bf6170..93d84e5ae2d5 100644
--- a/sound/soc/codecs/tscs454.c
+++ b/sound/soc/codecs/tscs454.c
@@ -3459,7 +3459,7 @@ static int tscs454_i2c_probe(struct i2c_client *i2c,
/* Sync pg sel reg with cache */
regmap_write(tscs454->regmap, R_PAGESEL, 0x00);
- ret = snd_soc_register_component(&i2c->dev, &soc_component_dev_tscs454,
+ ret = devm_snd_soc_register_component(&i2c->dev, &soc_component_dev_tscs454,
tscs454_dais, ARRAY_SIZE(tscs454_dais));
if (ret) {
dev_err(&i2c->dev, "Failed to register component (%d)\n", ret);
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index c5ae07234a00..bba330e30162 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -88,19 +88,6 @@ static int wm2000_write(struct i2c_client *i2c, unsigned int reg,
return regmap_write(wm2000->regmap, reg, value);
}
-static unsigned int wm2000_read(struct i2c_client *i2c, unsigned int r)
-{
- struct wm2000_priv *wm2000 = i2c_get_clientdata(i2c);
- unsigned int val;
- int ret;
-
- ret = regmap_read(wm2000->regmap, r, &val);
- if (ret < 0)
- return -1;
-
- return val;
-}
-
static void wm2000_reset(struct wm2000_priv *wm2000)
{
struct i2c_client *i2c = wm2000->i2c;
@@ -115,14 +102,15 @@ static void wm2000_reset(struct wm2000_priv *wm2000)
static int wm2000_poll_bit(struct i2c_client *i2c,
unsigned int reg, u8 mask)
{
+ struct wm2000_priv *wm2000 = i2c_get_clientdata(i2c);
int timeout = 4000;
- int val;
+ unsigned int val;
- val = wm2000_read(i2c, reg);
+ regmap_read(wm2000->regmap, reg, &val);
while (!(val & mask) && --timeout) {
msleep(1);
- val = wm2000_read(i2c, reg);
+ regmap_read(wm2000->regmap, reg, &val);
}
if (timeout == 0)
@@ -135,6 +123,7 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
{
struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
unsigned long rate;
+ unsigned int val;
int ret;
if (WARN_ON(wm2000->anc_mode != ANC_OFF))
@@ -213,12 +202,17 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
WM2000_MODE_THERMAL_ENABLE);
}
- ret = wm2000_read(i2c, WM2000_REG_SPEECH_CLARITY);
+ ret = regmap_read(wm2000->regmap, WM2000_REG_SPEECH_CLARITY, &val);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Unable to read Speech Clarity: %d\n", ret);
+ regulator_bulk_disable(WM2000_NUM_SUPPLIES, wm2000->supplies);
+ return ret;
+ }
if (wm2000->speech_clarity)
- ret |= WM2000_SPEECH_CLARITY;
+ val |= WM2000_SPEECH_CLARITY;
else
- ret &= ~WM2000_SPEECH_CLARITY;
- wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, ret);
+ val &= ~WM2000_SPEECH_CLARITY;
+ wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, val);
wm2000_write(i2c, WM2000_REG_SYS_START0, 0x33);
wm2000_write(i2c, WM2000_REG_SYS_START1, 0x02);
@@ -824,7 +818,7 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
const char *filename;
const struct firmware *fw = NULL;
int ret, i;
- int reg;
+ unsigned int reg;
u16 id;
wm2000 = devm_kzalloc(&i2c->dev, sizeof(*wm2000), GFP_KERNEL);
@@ -860,9 +854,17 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
}
/* Verify that this is a WM2000 */
- reg = wm2000_read(i2c, WM2000_REG_ID1);
+ ret = regmap_read(wm2000->regmap, WM2000_REG_ID1, &reg);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Unable to read ID1: %d\n", ret);
+ return ret;
+ }
id = reg << 8;
- reg = wm2000_read(i2c, WM2000_REG_ID2);
+ ret = regmap_read(wm2000->regmap, WM2000_REG_ID2, &reg);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Unable to read ID2: %d\n", ret);
+ return ret;
+ }
id |= reg & 0xff;
if (id != 0x2000) {
@@ -871,7 +873,11 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
goto err_supplies;
}
- reg = wm2000_read(i2c, WM2000_REG_REVISON);
+ ret = regmap_read(wm2000->regmap, WM2000_REG_REVISON, &reg);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Unable to read Revision: %d\n", ret);
+ return ret;
+ }
dev_info(&i2c->dev, "revision %c\n", reg + 'A');
wm2000->mclk = devm_clk_get(&i2c->dev, "MCLK");
diff --git a/sound/soc/codecs/wm8782.c b/sound/soc/codecs/wm8782.c
index 317db9a149a7..cf2cdbece122 100644
--- a/sound/soc/codecs/wm8782.c
+++ b/sound/soc/codecs/wm8782.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/regulator/consumer.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
@@ -50,7 +51,51 @@ static struct snd_soc_dai_driver wm8782_dai = {
},
};
+/* regulator power supply names */
+static const char *supply_names[] = {
+ "Vdda", /* analog supply, 2.7V - 3.6V */
+ "Vdd", /* digital supply, 2.7V - 5.5V */
+};
+
+struct wm8782_priv {
+ struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
+};
+
+static int wm8782_soc_probe(struct snd_soc_component *component)
+{
+ struct wm8782_priv *priv = snd_soc_component_get_drvdata(component);
+ return regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies);
+}
+
+static void wm8782_soc_remove(struct snd_soc_component *component)
+{
+ struct wm8782_priv *priv = snd_soc_component_get_drvdata(component);
+ regulator_bulk_disable(ARRAY_SIZE(priv->supplies), priv->supplies);
+}
+
+#ifdef CONFIG_PM
+static int wm8782_soc_suspend(struct snd_soc_component *component)
+{
+ struct wm8782_priv *priv = snd_soc_component_get_drvdata(component);
+ regulator_bulk_disable(ARRAY_SIZE(priv->supplies), priv->supplies);
+ return 0;
+}
+
+static int wm8782_soc_resume(struct snd_soc_component *component)
+{
+ struct wm8782_priv *priv = snd_soc_component_get_drvdata(component);
+ return regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies);
+}
+#else
+#define wm8782_soc_suspend NULL
+#define wm8782_soc_resume NULL
+#endif /* CONFIG_PM */
+
static const struct snd_soc_component_driver soc_component_dev_wm8782 = {
+ .probe = wm8782_soc_probe,
+ .remove = wm8782_soc_remove,
+ .suspend = wm8782_soc_suspend,
+ .resume = wm8782_soc_resume,
.dapm_widgets = wm8782_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8782_dapm_widgets),
.dapm_routes = wm8782_dapm_routes,
@@ -63,6 +108,24 @@ static const struct snd_soc_component_driver soc_component_dev_wm8782 = {
static int wm8782_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct wm8782_priv *priv;
+ int ret, i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ for (i = 0; i < ARRAY_SIZE(supply_names); i++)
+ priv->supplies[i].supply = supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret < 0)
+ return ret;
+
return devm_snd_soc_register_component(&pdev->dev,
&soc_component_dev_wm8782, &wm8782_dai, 1);
}
diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
index f27464c2c5ba..79541960f45d 100644
--- a/sound/soc/codecs/wm8804-i2c.c
+++ b/sound/soc/codecs/wm8804-i2c.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/acpi.h>
#include "wm8804.h"
@@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
+#if defined(CONFIG_OF)
static const struct of_device_id wm8804_of_match[] = {
{ .compatible = "wlf,wm8804", },
{ }
};
MODULE_DEVICE_TABLE(of, wm8804_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id wm8804_acpi_match[] = {
+ { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
+ { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
+#endif
static struct i2c_driver wm8804_i2c_driver = {
.driver = {
.name = "wm8804",
.pm = &wm8804_pm,
- .of_match_table = wm8804_of_match,
+ .of_match_table = of_match_ptr(wm8804_of_match),
+ .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
},
.probe = wm8804_i2c_probe,
.remove = wm8804_i2c_remove,
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 1965635ec07c..2a3e5fbd04e4 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index 43edaf8cd276..593a11960888 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -11,7 +11,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 953d94d50586..e873baa9e778 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -638,13 +638,14 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
{
struct wm9712_priv *wm9712 = snd_soc_component_get_drvdata(component);
struct regmap *regmap;
- int ret;
if (wm9712->mfd_pdata) {
wm9712->ac97 = wm9712->mfd_pdata->ac97;
regmap = wm9712->mfd_pdata->regmap;
} else {
#ifdef CONFIG_SND_SOC_AC97_BUS
+ int ret;
+
wm9712->ac97 = snd_soc_new_ac97_component(component, WM9712_VENDOR_ID,
WM9712_VENDOR_ID_MASK);
if (IS_ERR(wm9712->ac97)) {
@@ -719,7 +720,7 @@ static int wm9712_probe(struct platform_device *pdev)
static struct platform_driver wm9712_component_driver = {
.driver = {
- .name = "wm9712-component",
+ .name = "wm9712-codec",
},
.probe = wm9712_probe,
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index f61656070225..a53dc174bbf0 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -311,12 +311,12 @@ struct wm_adsp_alg_xm_struct {
};
struct wm_adsp_buffer {
- __be32 X_buf_base; /* XM base addr of first X area */
- __be32 X_buf_size; /* Size of 1st X area in words */
- __be32 X_buf_base2; /* XM base addr of 2nd X area */
- __be32 X_buf_brk; /* Total X size in words */
- __be32 Y_buf_base; /* YM base addr of Y area */
- __be32 wrap; /* Total size X and Y in words */
+ __be32 buf1_base; /* Base addr of first buffer area */
+ __be32 buf1_size; /* Size of buf1 area in DSP words */
+ __be32 buf2_base; /* Base addr of 2nd buffer area */
+ __be32 buf1_buf2_size; /* Size of buf1+buf2 in DSP words */
+ __be32 buf3_base; /* Base addr of buf3 area */
+ __be32 buf_total_size; /* Size of buf1+buf2+buf3 in DSP words */
__be32 high_water_mark; /* Point at which IRQ is asserted */
__be32 irq_count; /* bits 1-31 count IRQ assertions */
__be32 irq_ack; /* acked IRQ count, bit 0 enables IRQ */
@@ -393,18 +393,18 @@ struct wm_adsp_buffer_region_def {
static const struct wm_adsp_buffer_region_def default_regions[] = {
{
.mem_type = WMFW_ADSP2_XM,
- .base_offset = HOST_BUFFER_FIELD(X_buf_base),
- .size_offset = HOST_BUFFER_FIELD(X_buf_size),
+ .base_offset = HOST_BUFFER_FIELD(buf1_base),
+ .size_offset = HOST_BUFFER_FIELD(buf1_size),
},
{
.mem_type = WMFW_ADSP2_XM,
- .base_offset = HOST_BUFFER_FIELD(X_buf_base2),
- .size_offset = HOST_BUFFER_FIELD(X_buf_brk),
+ .base_offset = HOST_BUFFER_FIELD(buf2_base),
+ .size_offset = HOST_BUFFER_FIELD(buf1_buf2_size),
},
{
.mem_type = WMFW_ADSP2_YM,
- .base_offset = HOST_BUFFER_FIELD(Y_buf_base),
- .size_offset = HOST_BUFFER_FIELD(wrap),
+ .base_offset = HOST_BUFFER_FIELD(buf3_base),
+ .size_offset = HOST_BUFFER_FIELD(buf_total_size),
},
};
@@ -3345,7 +3345,7 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
region->cumulative_size = offset;
adsp_dbg(buf->dsp,
- "region=%d type=%d base=%04x off=%04x size=%04x\n",
+ "region=%d type=%d base=%08x off=%08x size=%08x\n",
i, region->mem_type, region->base_addr,
region->offset, region->cumulative_size);
}
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index f70db8412c7c..267aee776b2d 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1041,6 +1041,42 @@ static int davinci_mcasp_calc_clk_div(struct davinci_mcasp *mcasp,
return error_ppm;
}
+static inline u32 davinci_mcasp_tx_delay(struct davinci_mcasp *mcasp)
+{
+ if (!mcasp->txnumevt)
+ return 0;
+
+ return mcasp_get_reg(mcasp, mcasp->fifo_base + MCASP_WFIFOSTS_OFFSET);
+}
+
+static inline u32 davinci_mcasp_rx_delay(struct davinci_mcasp *mcasp)
+{
+ if (!mcasp->rxnumevt)
+ return 0;
+
+ return mcasp_get_reg(mcasp, mcasp->fifo_base + MCASP_RFIFOSTS_OFFSET);
+}
+
+static snd_pcm_sframes_t davinci_mcasp_delay(
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
+ u32 fifo_use;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ fifo_use = davinci_mcasp_tx_delay(mcasp);
+ else
+ fifo_use = davinci_mcasp_rx_delay(mcasp);
+
+ /*
+ * Divide the used locations with the channel count to get the
+ * FIFO usage in samples (don't care about partial samples in the
+ * buffer).
+ */
+ return fifo_use / substream->runtime->channels;
+}
+
static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
@@ -1365,6 +1401,7 @@ static const struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
.startup = davinci_mcasp_startup,
.shutdown = davinci_mcasp_shutdown,
.trigger = davinci_mcasp_trigger,
+ .delay = davinci_mcasp_delay,
.hw_params = davinci_mcasp_hw_params,
.set_fmt = davinci_mcasp_set_dai_fmt,
.set_clkdiv = davinci_mcasp_set_clkdiv,
diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
index 1033ac6631b0..01052a0808b0 100644
--- a/sound/soc/fsl/fsl_asrc_dma.c
+++ b/sound/soc/fsl/fsl_asrc_dma.c
@@ -151,7 +151,7 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream,
int ret;
/* Fetch the Back-End dma_data from DPCM */
- list_for_each_entry(dpcm, &rtd->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(rtd, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *substream_be;
struct snd_soc_dai *dai = be->cpu_dai;
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index c1d1d06783e5..57b484768a58 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -807,7 +807,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
return -ENOMEM;
esai_priv->pdev = pdev;
- strncpy(esai_priv->name, np->name, sizeof(esai_priv->name) - 1);
+ snprintf(esai_priv->name, sizeof(esai_priv->name), "%pOFn", np);
/* Get the addresses and IRQ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c
index 7f0fa4b52223..9981668ab590 100644
--- a/sound/soc/fsl/fsl_utils.c
+++ b/sound/soc/fsl/fsl_utils.c
@@ -57,8 +57,8 @@ int fsl_asoc_get_dma_channel(struct device_node *ssi_np,
of_node_put(dma_channel_np);
return ret;
}
- snprintf((char *)dai->platform_name, DAI_NAME_SIZE, "%llx.%s",
- (unsigned long long) res.start, dma_channel_np->name);
+ snprintf((char *)dai->platform_name, DAI_NAME_SIZE, "%llx.%pOFn",
+ (unsigned long long) res.start, dma_channel_np);
iprop = of_get_property(dma_channel_np, "cell-index", NULL);
if (!iprop) {
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index ec731223cab3..e339f36cea95 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -57,6 +57,7 @@ static int pcm030_fabric_probe(struct platform_device *op)
struct device_node *platform_np;
struct snd_soc_card *card = &pcm030_card;
struct pcm030_audio_data *pdata;
+ struct snd_soc_dai_link *dai_link;
int ret;
int i;
@@ -78,8 +79,8 @@ static int pcm030_fabric_probe(struct platform_device *op)
return -ENODEV;
}
- for (i = 0; i < card->num_links; i++)
- card->dai_link[i].platform_of_node = platform_np;
+ for_each_card_prelinks(card, i, dai_link)
+ dai_link->platform_of_node = platform_np;
ret = request_module("snd-soc-wm9712");
if (ret)
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 2094d2c8919f..25c819e402e1 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -25,6 +25,8 @@ struct graph_card_data {
struct graph_dai_props {
struct asoc_simple_dai cpu_dai;
struct asoc_simple_dai codec_dai;
+ struct snd_soc_dai_link_component codecs; /* single codec */
+ struct snd_soc_dai_link_component platform;
unsigned int mclk_fs;
} *dai_props;
unsigned int mclk_fs;
@@ -180,7 +182,8 @@ static int asoc_graph_card_dai_link_of(struct device_node *cpu_port,
if (ret < 0)
goto dai_link_of_err;
- of_property_read_u32(rcpu_ep, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(cpu_ep, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(codec_ep, "mclk-fs", &dai_props->mclk_fs);
ret = asoc_simple_card_parse_graph_cpu(cpu_ep, dai_link);
if (ret < 0)
@@ -213,7 +216,7 @@ static int asoc_graph_card_dai_link_of(struct device_node *cpu_port,
ret = asoc_simple_card_set_dailink_name(dev, dai_link,
"%s-%s",
dai_link->cpu_dai_name,
- dai_link->codec_dai_name);
+ dai_link->codecs->dai_name);
if (ret < 0)
goto dai_link_of_err;
@@ -299,7 +302,7 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
struct graph_dai_props *dai_props;
struct device *dev = &pdev->dev;
struct snd_soc_card *card;
- int num, ret;
+ int num, ret, i;
/* Allocate the private data and the DAI link array */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -315,6 +318,18 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
if (!dai_props || !dai_link)
return -ENOMEM;
+ /*
+ * Use snd_soc_dai_link_component instead of legacy style
+ * It is codec only. but cpu/platform will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ for (i = 0; i < num; i++) {
+ dai_link[i].codecs = &dai_props[i].codecs;
+ dai_link[i].num_codecs = 1;
+ dai_link[i].platform = &dai_props[i].platform;
+ }
+
priv->pa_gpio = devm_gpiod_get_optional(dev, "pa", GPIOD_OUT_LOW);
if (IS_ERR(priv->pa_gpio)) {
ret = PTR_ERR(priv->pa_gpio);
diff --git a/sound/soc/generic/audio-graph-scu-card.c b/sound/soc/generic/audio-graph-scu-card.c
index 92882e392d6c..b83bb31021a9 100644
--- a/sound/soc/generic/audio-graph-scu-card.c
+++ b/sound/soc/generic/audio-graph-scu-card.c
@@ -25,7 +25,11 @@
struct graph_card_data {
struct snd_soc_card snd_card;
struct snd_soc_codec_conf codec_conf;
- struct asoc_simple_dai *dai_props;
+ struct graph_dai_props {
+ struct asoc_simple_dai dai;
+ struct snd_soc_dai_link_component codecs;
+ struct snd_soc_dai_link_component platform;
+ } *dai_props;
struct snd_soc_dai_link *dai_link;
struct asoc_simple_card_data adata;
};
@@ -39,18 +43,18 @@ static int asoc_graph_card_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct asoc_simple_dai *dai_props = graph_priv_to_props(priv, rtd->num);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
- return asoc_simple_card_clk_enable(dai_props);
+ return asoc_simple_card_clk_enable(&dai_props->dai);
}
static void asoc_graph_card_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct asoc_simple_dai *dai_props = graph_priv_to_props(priv, rtd->num);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
- asoc_simple_card_clk_disable(dai_props);
+ asoc_simple_card_clk_disable(&dai_props->dai);
}
static const struct snd_soc_ops asoc_graph_card_ops = {
@@ -63,7 +67,7 @@ static int asoc_graph_card_dai_init(struct snd_soc_pcm_runtime *rtd)
struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_dai *dai;
struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dai_props;
+ struct graph_dai_props *dai_props;
int num = rtd->num;
dai_link = graph_priv_to_link(priv, num);
@@ -72,7 +76,7 @@ static int asoc_graph_card_dai_init(struct snd_soc_pcm_runtime *rtd)
rtd->cpu_dai :
rtd->codec_dai;
- return asoc_simple_card_init_dai(dai, dai_props);
+ return asoc_simple_card_init_dai(dai, &dai_props->dai);
}
static int asoc_graph_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
@@ -92,15 +96,18 @@ static int asoc_graph_card_dai_link_of(struct device_node *ep,
{
struct device *dev = graph_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, idx);
- struct asoc_simple_dai *dai_props = graph_priv_to_props(priv, idx);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, idx);
struct snd_soc_card *card = graph_priv_to_card(priv);
int ret;
if (is_fe) {
+ struct snd_soc_dai_link_component *codecs;
+
/* BE is dummy */
- dai_link->codec_of_node = NULL;
- dai_link->codec_dai_name = "snd-soc-dummy-dai";
- dai_link->codec_name = "snd-soc-dummy";
+ codecs = dai_link->codecs;
+ codecs->of_node = NULL;
+ codecs->dai_name = "snd-soc-dummy-dai";
+ codecs->name = "snd-soc-dummy";
/* FE settings */
dai_link->dynamic = 1;
@@ -110,7 +117,7 @@ static int asoc_graph_card_dai_link_of(struct device_node *ep,
if (ret)
return ret;
- ret = asoc_simple_card_parse_clk_cpu(dev, ep, dai_link, dai_props);
+ ret = asoc_simple_card_parse_clk_cpu(dev, ep, dai_link, &dai_props->dai);
if (ret < 0)
return ret;
@@ -137,23 +144,23 @@ static int asoc_graph_card_dai_link_of(struct device_node *ep,
if (ret < 0)
return ret;
- ret = asoc_simple_card_parse_clk_codec(dev, ep, dai_link, dai_props);
+ ret = asoc_simple_card_parse_clk_codec(dev, ep, dai_link, &dai_props->dai);
if (ret < 0)
return ret;
ret = asoc_simple_card_set_dailink_name(dev, dai_link,
"be.%s",
- dai_link->codec_dai_name);
+ dai_link->codecs->dai_name);
if (ret < 0)
return ret;
snd_soc_of_parse_audio_prefix(card,
&priv->codec_conf,
- dai_link->codec_of_node,
+ dai_link->codecs->of_node,
"prefix");
}
- ret = asoc_simple_card_of_parse_tdm(ep, dai_props);
+ ret = asoc_simple_card_of_parse_tdm(ep, &dai_props->dai);
if (ret)
return ret;
@@ -331,10 +338,10 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
{
struct graph_card_data *priv;
struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dai_props;
+ struct graph_dai_props *dai_props;
struct device *dev = &pdev->dev;
struct snd_soc_card *card;
- int num, ret;
+ int num, ret, i;
/* Allocate the private data and the DAI link array */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -350,6 +357,18 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
if (!dai_props || !dai_link)
return -ENOMEM;
+ /*
+ * Use snd_soc_dai_link_component instead of legacy style
+ * It is codec only. but cpu/platform will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ for (i = 0; i < num; i++) {
+ dai_link[i].codecs = &dai_props[i].codecs;
+ dai_link[i].num_codecs = 1;
+ dai_link[i].platform = &dai_props[i].platform;
+ }
+
priv->dai_props = dai_props;
priv->dai_link = dai_link;
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index d3f3f0fec74c..f34cc6cddfa2 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -173,12 +173,24 @@ int asoc_simple_card_parse_clk(struct device *dev,
struct device_node *node,
struct device_node *dai_of_node,
struct asoc_simple_dai *simple_dai,
- const char *name)
+ const char *dai_name,
+ struct snd_soc_dai_link_component *dlc)
{
struct clk *clk;
u32 val;
/*
+ * Use snd_soc_dai_link_component instead of legacy style.
+ * It is only for codec, but cpu will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ if (dlc) {
+ dai_of_node = dlc->of_node;
+ dai_name = dlc->dai_name;
+ }
+
+ /*
* Parse dai->sysclk come from "clocks = <&xxx>"
* (if system has common clock)
* or "system-clock-frequency = <xxx>"
@@ -200,7 +212,7 @@ int asoc_simple_card_parse_clk(struct device *dev,
if (of_property_read_bool(node, "system-clock-direction-out"))
simple_dai->clk_direction = SND_SOC_CLOCK_OUT;
- dev_dbg(dev, "%s : sysclk = %d, direction %d\n", name,
+ dev_dbg(dev, "%s : sysclk = %d, direction %d\n", dai_name,
simple_dai->sysclk, simple_dai->clk_direction);
return 0;
@@ -208,6 +220,7 @@ int asoc_simple_card_parse_clk(struct device *dev,
EXPORT_SYMBOL_GPL(asoc_simple_card_parse_clk);
int asoc_simple_card_parse_dai(struct device_node *node,
+ struct snd_soc_dai_link_component *dlc,
struct device_node **dai_of_node,
const char **dai_name,
const char *list_name,
@@ -221,6 +234,17 @@ int asoc_simple_card_parse_dai(struct device_node *node,
return 0;
/*
+ * Use snd_soc_dai_link_component instead of legacy style.
+ * It is only for codec, but cpu will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ if (dlc) {
+ dai_name = &dlc->dai_name;
+ dai_of_node = &dlc->of_node;
+ }
+
+ /*
* Get node via "sound-dai = <&phandle port>"
* it will be used as xxx_of_node on soc_bind_dai_link()
*/
@@ -278,6 +302,7 @@ static int asoc_simple_card_get_dai_id(struct device_node *ep)
}
int asoc_simple_card_parse_graph_dai(struct device_node *ep,
+ struct snd_soc_dai_link_component *dlc,
struct device_node **dai_of_node,
const char **dai_name)
{
@@ -285,6 +310,17 @@ int asoc_simple_card_parse_graph_dai(struct device_node *ep,
struct of_phandle_args args;
int ret;
+ /*
+ * Use snd_soc_dai_link_component instead of legacy style.
+ * It is only for codec, but cpu will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ if (dlc) {
+ dai_name = &dlc->dai_name;
+ dai_of_node = &dlc->of_node;
+ }
+
if (!ep)
return 0;
if (!dai_name)
@@ -340,10 +376,11 @@ EXPORT_SYMBOL_GPL(asoc_simple_card_init_dai);
int asoc_simple_card_canonicalize_dailink(struct snd_soc_dai_link *dai_link)
{
/* Assumes platform == cpu */
- if (!dai_link->platform_of_node)
- dai_link->platform_of_node = dai_link->cpu_of_node;
+ if (!dai_link->platform->of_node)
+ dai_link->platform->of_node = dai_link->cpu_of_node;
return 0;
+
}
EXPORT_SYMBOL_GPL(asoc_simple_card_canonicalize_dailink);
@@ -367,13 +404,11 @@ EXPORT_SYMBOL_GPL(asoc_simple_card_canonicalize_cpu);
int asoc_simple_card_clean_reference(struct snd_soc_card *card)
{
struct snd_soc_dai_link *dai_link;
- int num_links;
+ int i;
- for (num_links = 0, dai_link = card->dai_link;
- num_links < card->num_links;
- num_links++, dai_link++) {
+ for_each_card_prelinks(card, i, dai_link) {
of_node_put(dai_link->cpu_of_node);
- of_node_put(dai_link->codec_of_node);
+ of_node_put(dai_link->codecs->of_node);
}
return 0;
}
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 64bf3560c1d1..5a3f59aa4ba5 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -20,6 +20,8 @@ struct simple_card_data {
struct simple_dai_props {
struct asoc_simple_dai cpu_dai;
struct asoc_simple_dai codec_dai;
+ struct snd_soc_dai_link_component codecs; /* single codec */
+ struct snd_soc_dai_link_component platform;
unsigned int mclk_fs;
} *dai_props;
unsigned int mclk_fs;
@@ -234,7 +236,7 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
ret = asoc_simple_card_set_dailink_name(dev, dai_link,
"%s-%s",
dai_link->cpu_dai_name,
- dai_link->codec_dai_name);
+ dai_link->codecs->dai_name);
if (ret < 0)
goto dai_link_of_err;
@@ -363,7 +365,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct snd_soc_card *card;
- int num, ret;
+ int num, ret, i;
/* Get the number of DAI links */
if (np && of_get_child_by_name(np, PREFIX "dai-link"))
@@ -381,6 +383,18 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
if (!dai_props || !dai_link)
return -ENOMEM;
+ /*
+ * Use snd_soc_dai_link_component instead of legacy style
+ * It is codec only. but cpu/platform will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ for (i = 0; i < num; i++) {
+ dai_link[i].codecs = &dai_props[i].codecs;
+ dai_link[i].num_codecs = 1;
+ dai_link[i].platform = &dai_props[i].platform;
+ }
+
priv->dai_props = dai_props;
priv->dai_link = dai_link;
@@ -403,6 +417,8 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
} else {
struct asoc_simple_card_info *cinfo;
+ struct snd_soc_dai_link_component *codecs;
+ struct snd_soc_dai_link_component *platform;
cinfo = dev->platform_data;
if (!cinfo) {
@@ -419,13 +435,17 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
return -EINVAL;
}
+ codecs = dai_link->codecs;
+ codecs->name = cinfo->codec;
+ codecs->dai_name = cinfo->codec_dai.name;
+
+ platform = dai_link->platform;
+ platform->name = cinfo->platform;
+
card->name = (cinfo->card) ? cinfo->card : cinfo->name;
dai_link->name = cinfo->name;
dai_link->stream_name = cinfo->name;
- dai_link->platform_name = cinfo->platform;
- dai_link->codec_name = cinfo->codec;
dai_link->cpu_dai_name = cinfo->cpu_dai.name;
- dai_link->codec_dai_name = cinfo->codec_dai.name;
dai_link->dai_fmt = cinfo->daifmt;
dai_link->init = asoc_simple_card_dai_init;
memcpy(&priv->dai_props->cpu_dai, &cinfo->cpu_dai,
diff --git a/sound/soc/generic/simple-scu-card.c b/sound/soc/generic/simple-scu-card.c
index 16a83bc51e0e..85b46f0eae0f 100644
--- a/sound/soc/generic/simple-scu-card.c
+++ b/sound/soc/generic/simple-scu-card.c
@@ -22,7 +22,11 @@
struct simple_card_data {
struct snd_soc_card snd_card;
struct snd_soc_codec_conf codec_conf;
- struct asoc_simple_dai *dai_props;
+ struct simple_dai_props {
+ struct asoc_simple_dai dai;
+ struct snd_soc_dai_link_component codecs;
+ struct snd_soc_dai_link_component platform;
+ } *dai_props;
struct snd_soc_dai_link *dai_link;
struct asoc_simple_card_data adata;
};
@@ -40,20 +44,20 @@ static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct asoc_simple_dai *dai_props =
+ struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
- return asoc_simple_card_clk_enable(dai_props);
+ return asoc_simple_card_clk_enable(&dai_props->dai);
}
static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct asoc_simple_dai *dai_props =
+ struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
- asoc_simple_card_clk_disable(dai_props);
+ asoc_simple_card_clk_disable(&dai_props->dai);
}
static const struct snd_soc_ops asoc_simple_card_ops = {
@@ -66,7 +70,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_dai *dai;
struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dai_props;
+ struct simple_dai_props *dai_props;
int num = rtd->num;
dai_link = simple_priv_to_link(priv, num);
@@ -75,7 +79,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
rtd->cpu_dai :
rtd->codec_dai;
- return asoc_simple_card_init_dai(dai, dai_props);
+ return asoc_simple_card_init_dai(dai, &dai_props->dai);
}
static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
@@ -95,17 +99,19 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
{
struct device *dev = simple_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx);
- struct asoc_simple_dai *dai_props = simple_priv_to_props(priv, idx);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, idx);
struct snd_soc_card *card = simple_priv_to_card(priv);
int ret;
if (is_fe) {
int is_single_links = 0;
+ struct snd_soc_dai_link_component *codecs;
/* BE is dummy */
- dai_link->codec_of_node = NULL;
- dai_link->codec_dai_name = "snd-soc-dummy-dai";
- dai_link->codec_name = "snd-soc-dummy";
+ codecs = dai_link->codecs;
+ codecs->of_node = NULL;
+ codecs->dai_name = "snd-soc-dummy-dai";
+ codecs->name = "snd-soc-dummy";
/* FE settings */
dai_link->dynamic = 1;
@@ -116,7 +122,7 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
if (ret)
return ret;
- ret = asoc_simple_card_parse_clk_cpu(dev, np, dai_link, dai_props);
+ ret = asoc_simple_card_parse_clk_cpu(dev, np, dai_link, &dai_props->dai);
if (ret < 0)
return ret;
@@ -141,23 +147,23 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
if (ret < 0)
return ret;
- ret = asoc_simple_card_parse_clk_codec(dev, np, dai_link, dai_props);
+ ret = asoc_simple_card_parse_clk_codec(dev, np, dai_link, &dai_props->dai);
if (ret < 0)
return ret;
ret = asoc_simple_card_set_dailink_name(dev, dai_link,
"be.%s",
- dai_link->codec_dai_name);
+ dai_link->codecs->dai_name);
if (ret < 0)
return ret;
snd_soc_of_parse_audio_prefix(card,
&priv->codec_conf,
- dai_link->codec_of_node,
+ dai_link->codecs->of_node,
PREFIX "prefix");
}
- ret = asoc_simple_card_of_parse_tdm(np, dai_props);
+ ret = asoc_simple_card_of_parse_tdm(np, &dai_props->dai);
if (ret)
return ret;
@@ -230,11 +236,11 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
{
struct simple_card_data *priv;
struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dai_props;
+ struct simple_dai_props *dai_props;
struct snd_soc_card *card;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- int num, ret;
+ int num, ret, i;
/* Allocate the private data */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -248,6 +254,18 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
if (!dai_props || !dai_link)
return -ENOMEM;
+ /*
+ * Use snd_soc_dai_link_component instead of legacy style
+ * It is codec only. but cpu/platform will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ for (i = 0; i < num; i++) {
+ dai_link[i].codecs = &dai_props[i].codecs;
+ dai_link[i].num_codecs = 1;
+ dai_link[i].platform = &dai_props[i].platform;
+ }
+
priv->dai_props = dai_props;
priv->dai_link = dai_link;
diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
index 53344a3b7a60..a69e5b11b3da 100644
--- a/sound/soc/hisilicon/hi6210-i2s.c
+++ b/sound/soc/hisilicon/hi6210-i2s.c
@@ -269,13 +269,13 @@ static int hi6210_i2s_hw_params(struct snd_pcm_substream *substream,
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_U16_LE:
signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT;
- /* fallthru */
+ /* fall through */
case SNDRV_PCM_FORMAT_S16_LE:
bits = HII2S_BITS_16;
break;
case SNDRV_PCM_FORMAT_U24_LE:
signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT;
- /* fallthru */
+ /* fall through */
case SNDRV_PCM_FORMAT_S24_LE:
bits = HII2S_BITS_24;
break;
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 6c36da560877..afc559866095 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -765,7 +765,7 @@ static int sst_soc_prepare(struct device *dev)
snd_soc_poweroff(drv->soc_card->dev);
/* set the SSPs to idle */
- list_for_each_entry(rtd, &drv->soc_card->rtd_list, list) {
+ for_each_card_rtds(drv->soc_card, rtd) {
struct snd_soc_dai *dai = rtd->cpu_dai;
if (dai->active) {
@@ -786,7 +786,7 @@ static void sst_soc_complete(struct device *dev)
return;
/* restart SSPs */
- list_for_each_entry(rtd, &drv->soc_card->rtd_list, list) {
+ for_each_card_rtds(drv->soc_card, rtd) {
struct snd_soc_dai *dai = rtd->cpu_dai;
if (dai->active) {
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index cccda87f4b34..73ca1350aa31 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -279,6 +279,28 @@ config SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for DA7219 + MAX98357A I2S audio codec.
Say Y if you have such a device.
+
+config SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH
+ tristate "KBL with DA7219 and MAX98927 in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_DA7219
+ select SND_SOC_MAX98927
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for DA7219 + MAX98927 I2S audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
+ tristate "SKL/KBL/BXT/APL with HDA Codecs"
+ select SND_SOC_HDAC_HDMI
+ select SND_SOC_HDAC_HDA
+ help
+ This adds support for ASoC machine driver for Intel platforms
+ SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 87ef8b4058e5..5381e27df9cc 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -17,9 +17,11 @@ snd-soc-sst-byt-cht-da7213-objs := bytcht_da7213.o
snd-soc-sst-byt-cht-es8316-objs := bytcht_es8316.o
snd-soc-sst-byt-cht-nocodec-objs := bytcht_nocodec.o
snd-soc-kbl_da7219_max98357a-objs := kbl_da7219_max98357a.o
+snd-soc-kbl_da7219_max98927-objs := kbl_da7219_max98927.o
snd-soc-kbl_rt5663_max98927-objs := kbl_rt5663_max98927.o
snd-soc-kbl_rt5663_rt5514_max98927-objs := kbl_rt5663_rt5514_max98927.o
snd-soc-skl_rt286-objs := skl_rt286.o
+snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o
snd-skl_nau88l25_max98357a-objs := skl_nau88l25_max98357a.o
snd-soc-skl_nau88l25_ssm4567-objs := skl_nau88l25_ssm4567.o
@@ -41,8 +43,10 @@ obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH) += snd-soc-sst-byt-cht-da7213.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH) += snd-soc-sst-byt-cht-es8316.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH) += snd-soc-sst-byt-cht-nocodec.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH) += snd-soc-kbl_da7219_max98357a.o
+obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH) += snd-soc-kbl_da7219_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH) += snd-soc-kbl_rt5663_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH) += snd-soc-kbl_rt5663_rt5514_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_RT286_MACH) += snd-soc-skl_rt286.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH) += snd-skl_nau88l25_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH) += snd-soc-skl_nau88l25_ssm4567.o
+obj-$(CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH) += snd-soc-skl_hda_dsp.o
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 7b0ee67b4fc8..68e6543e6cb0 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -223,7 +223,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
static int broadwell_suspend(struct snd_soc_card *card){
struct snd_soc_component *component;
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strcmp(component->name, "i2c-INT343A:00")) {
dev_dbg(component->dev, "disabling jack detect before going to suspend.\n");
@@ -237,7 +237,7 @@ static int broadwell_suspend(struct snd_soc_card *card){
static int broadwell_resume(struct snd_soc_card *card){
struct snd_soc_component *component;
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strcmp(component->name, "i2c-INT343A:00")) {
dev_dbg(component->dev, "enabling jack detect for resume.\n");
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index d32844f94d74..8587bd3d1cc1 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -575,6 +575,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_MONO_SPEAKER |
BYT_RT5640_MCLK_EN),
},
+ { /* Linx Linx7 tablet */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LINX7"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_JD_NOT_INV |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* MSI S100 tablet */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
@@ -602,6 +613,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* Onda v975w */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* The above are too generic, also match BIOS info */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "5.6.5"),
+ DMI_EXACT_MATCH(DMI_BIOS_DATE, "07/25/2014"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* Pipo W4 */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -1022,7 +1048,7 @@ static int byt_rt5640_suspend(struct snd_soc_card *card)
if (!BYT_RT5640_JDSRC(byt_rt5640_quirk))
return 0;
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strcmp(component->name, byt_rt5640_codec_name)) {
dev_dbg(component->dev, "disabling jack detect before suspend\n");
snd_soc_component_set_jack(component, NULL, NULL);
@@ -1041,7 +1067,7 @@ static int byt_rt5640_resume(struct snd_soc_card *card)
if (!BYT_RT5640_JDSRC(byt_rt5640_quirk))
return 0;
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strcmp(component->name, byt_rt5640_codec_name)) {
dev_dbg(component->dev, "re-enabling jack detect after resume\n");
snd_soc_component_set_jack(component, &priv->jack, NULL);
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index f8a68bdb3885..c44298130720 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -742,7 +742,7 @@ static int byt_rt5651_suspend(struct snd_soc_card *card)
if (!BYT_RT5651_JDSRC(byt_rt5651_quirk))
return 0;
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strcmp(component->name, byt_rt5651_codec_name)) {
dev_dbg(component->dev, "disabling jack detect before suspend\n");
snd_soc_component_set_jack(component, NULL, NULL);
@@ -761,7 +761,7 @@ static int byt_rt5651_resume(struct snd_soc_card *card)
if (!BYT_RT5651_JDSRC(byt_rt5651_quirk))
return 0;
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strcmp(component->name, byt_rt5651_codec_name)) {
dev_dbg(component->dev, "re-enabling jack detect after resume\n");
snd_soc_component_set_jack(component, &priv->jack, NULL);
@@ -787,7 +787,7 @@ static struct snd_soc_card byt_rt5651_card = {
};
static const struct x86_cpu_id baytrail_cpu_ids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, /* Valleyview */
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, /* Valleyview */
{}
};
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index e5aa13058dd7..51f0d45d6f8f 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -16,6 +16,7 @@
* General Public License for more details.
*/
+#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -212,6 +213,10 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
if (ret)
return ret;
+ snd_jack_set_key(ctx->headset.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(ctx->headset.jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(ctx->headset.jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+
rt5670_set_jack_detect(component, &ctx->headset);
if (ctx->mclk) {
/*
@@ -342,7 +347,7 @@ static int cht_suspend_pre(struct snd_soc_card *card)
struct snd_soc_component *component;
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strncmp(component->name,
ctx->codec_name, sizeof(ctx->codec_name))) {
@@ -359,7 +364,7 @@ static int cht_resume_post(struct snd_soc_card *card)
struct snd_soc_component *component;
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strncmp(component->name,
ctx->codec_name, sizeof(ctx->codec_name))) {
diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
new file mode 100644
index 000000000000..3fa1c3ca6d37
--- /dev/null
+++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
@@ -0,0 +1,983 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2018 Intel Corporation.
+
+/*
+ * Intel Kabylake I2S Machine Driver with MAX98927 & DA7219 Codecs
+ *
+ * Modified from:
+ * Intel Kabylake I2S Machine driver supporting MAX98927 and
+ * RT5663 codecs
+ */
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../codecs/da7219.h"
+#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
+#include "../../codecs/da7219-aad.h"
+
+#define KBL_DIALOG_CODEC_DAI "da7219-hifi"
+#define MAX98927_CODEC_DAI "max98927-aif1"
+#define MAXIM_DEV0_NAME "i2c-MX98927:00"
+#define MAXIM_DEV1_NAME "i2c-MX98927:01"
+#define DUAL_CHANNEL 2
+#define QUAD_CHANNEL 4
+#define NAME_SIZE 32
+
+static struct snd_soc_card *kabylake_audio_card;
+static struct snd_soc_jack kabylake_hdmi[3];
+
+struct kbl_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+struct kbl_codec_private {
+ struct snd_soc_jack kabylake_headset;
+ struct list_head hdmi_pcm_list;
+};
+
+enum {
+ KBL_DPCM_AUDIO_PB = 0,
+ KBL_DPCM_AUDIO_CP,
+ KBL_DPCM_AUDIO_ECHO_REF_CP,
+ KBL_DPCM_AUDIO_REF_CP,
+ KBL_DPCM_AUDIO_DMIC_CP,
+ KBL_DPCM_AUDIO_HDMI1_PB,
+ KBL_DPCM_AUDIO_HDMI2_PB,
+ KBL_DPCM_AUDIO_HDMI3_PB,
+ KBL_DPCM_AUDIO_HS_PB,
+};
+
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ int ret = 0;
+
+ codec_dai = snd_soc_card_get_codec_dai(card, KBL_DIALOG_CODEC_DAI);
+ if (!codec_dai) {
+ dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n");
+ return -EIO;
+ }
+
+ /* Configure sysclk for codec */
+ ret = snd_soc_dai_set_sysclk(codec_dai, DA7219_CLKSRC_MCLK, 24576000,
+ SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(card->dev, "can't set codec sysclk configuration\n");
+ return ret;
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ DA7219_SYSCLK_MCLK, 0, 0);
+ if (ret)
+ dev_err(card->dev, "failed to stop PLL: %d\n", ret);
+ } else if (SND_SOC_DAPM_EVENT_ON(event)) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0, DA7219_SYSCLK_PLL_SRM,
+ 0, DA7219_PLL_FREQ_OUT_98304);
+ if (ret)
+ dev_err(card->dev, "failed to start PLL: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static const struct snd_kcontrol_new kabylake_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Left Spk"),
+ SOC_DAPM_PIN_SWITCH("Right Spk"),
+};
+
+static const struct snd_soc_dapm_widget kabylake_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SPK("Left Spk", NULL),
+ SND_SOC_DAPM_SPK("Right Spk", NULL),
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+ SND_SOC_DAPM_SPK("DP", NULL),
+ SND_SOC_DAPM_SPK("HDMI", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route kabylake_map[] = {
+ /* speaker */
+ { "Left Spk", NULL, "Left BE_OUT" },
+ { "Right Spk", NULL, "Right BE_OUT" },
+
+ /* other jacks */
+ { "DMic", NULL, "SoC DMIC" },
+
+ { "HDMI", NULL, "hif5 Output" },
+ { "DP", NULL, "hif6 Output" },
+
+ /* CODEC BE connections */
+ { "Left HiFi Playback", NULL, "ssp0 Tx" },
+ { "Right HiFi Playback", NULL, "ssp0 Tx" },
+ { "ssp0 Tx", NULL, "spk_out" },
+
+ /* IV feedback path */
+ { "codec0_fb_in", NULL, "ssp0 Rx"},
+ { "ssp0 Rx", NULL, "Left HiFi Capture" },
+ { "ssp0 Rx", NULL, "Right HiFi Capture" },
+
+ /* AEC capture path */
+ { "echo_ref_out", NULL, "ssp0 Rx" },
+
+ /* DMIC */
+ { "dmic01_hifi", NULL, "DMIC01 Rx" },
+ { "DMIC01 Rx", NULL, "DMIC AIF" },
+
+ { "hifi1", NULL, "iDisp1 Tx" },
+ { "iDisp1 Tx", NULL, "iDisp1_out" },
+ { "hifi2", NULL, "iDisp2 Tx" },
+ { "iDisp2 Tx", NULL, "iDisp2_out" },
+ { "hifi3", NULL, "iDisp3 Tx"},
+ { "iDisp3 Tx", NULL, "iDisp3_out"},
+};
+
+static const struct snd_soc_dapm_route kabylake_ssp1_map[] = {
+ { "Headphone Jack", NULL, "HPL" },
+ { "Headphone Jack", NULL, "HPR" },
+
+ /* other jacks */
+ { "MIC", NULL, "Headset Mic" },
+
+ /* CODEC BE connections */
+ { "Playback", NULL, "ssp1 Tx" },
+ { "ssp1 Tx", NULL, "codec1_out" },
+
+ { "hs_in", NULL, "ssp1 Rx" },
+ { "ssp1 Rx", NULL, "Capture" },
+
+ { "Headphone Jack", NULL, "Platform Clock" },
+ { "Headset Mic", NULL, "Platform Clock" },
+};
+
+static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *runtime = substream->private_data;
+ int ret = 0, j;
+
+ for (j = 0; j < runtime->num_codecs; j++) {
+ struct snd_soc_dai *codec_dai = runtime->codec_dais[j];
+
+ if (!strcmp(codec_dai->component->name, MAXIM_DEV0_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(runtime->dev, "DEV0 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ if (!strcmp(codec_dai->component->name, MAXIM_DEV1_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xC0, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(runtime->dev, "DEV1 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops kabylake_ssp0_ops = {
+ .hw_params = kabylake_ssp0_hw_params,
+};
+
+static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_soc_dpcm *dpcm = container_of(
+ params, struct snd_soc_dpcm, hw_params);
+ struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
+ struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
+
+ /*
+ * The ADSP will convert the FE rate to 48k, stereo, 24 bit
+ */
+ if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
+ !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
+ !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+ snd_mask_none(fmt);
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ }
+
+ /*
+ * The speaker on the SSP0 supports S16_LE and not S24_LE.
+ * thus changing the mask here
+ */
+ if (!strcmp(be_dai_link->name, "SSP0-Codec"))
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S16_LE);
+
+ return 0;
+}
+
+static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_jack *jack;
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+
+ ret = snd_soc_dapm_add_routes(&card->dapm,
+ kabylake_ssp1_map,
+ ARRAY_SIZE(kabylake_ssp1_map));
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(kabylake_audio_card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ &ctx->kabylake_headset, NULL, 0);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ jack = &ctx->kabylake_headset;
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+
+ da7219_aad_jack_det(component, &ctx->kabylake_headset);
+
+ ret = snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC");
+ if (ret)
+ dev_err(rtd->dev, "SoC DMIC - Ignore suspend failed %d\n", ret);
+
+ return ret;
+}
+
+static int kabylake_hdmi_init(struct snd_soc_pcm_runtime *rtd, int device)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct kbl_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ pcm->device = device;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static int kabylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI1_PB);
+}
+
+static int kabylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI2_PB);
+}
+
+static int kabylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI3_PB);
+}
+
+static int kabylake_da7219_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dapm_context *dapm;
+ struct snd_soc_component *component = rtd->cpu_dai->component;
+
+ dapm = snd_soc_component_get_dapm(component);
+ snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
+
+ return 0;
+}
+
+static const unsigned int rates[] = {
+ 48000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static const unsigned int channels[] = {
+ DUAL_CHANNEL,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static unsigned int channels_quad[] = {
+ QUAD_CHANNEL,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
+ .count = ARRAY_SIZE(channels_quad),
+ .list = channels_quad,
+ .mask = 0,
+};
+
+static int kbl_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /*
+ * On this platform for PCM device we support,
+ * 48Khz
+ * stereo
+ * 16 bit audio
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+
+ return 0;
+}
+
+static const struct snd_soc_ops kabylake_da7219_fe_ops = {
+ .startup = kbl_fe_startup,
+};
+
+static int kabylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ /*
+ * set BE channel constraint as user FE channels
+ */
+
+ if (params_channels(params) == 2)
+ channels->min = channels->max = 2;
+ else
+ channels->min = channels->max = 4;
+
+ return 0;
+}
+
+static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels_quad);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+}
+
+static struct snd_soc_ops kabylake_dmic_ops = {
+ .startup = kabylake_dmic_startup,
+};
+
+static const unsigned int rates_16000[] = {
+ 16000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_16000 = {
+ .count = ARRAY_SIZE(rates_16000),
+ .list = rates_16000,
+};
+
+static const unsigned int ch_mono[] = {
+ 1,
+};
+static const struct snd_pcm_hw_constraint_list constraints_refcap = {
+ .count = ARRAY_SIZE(ch_mono),
+ .list = ch_mono,
+};
+
+static int kabylake_refcap_startup(struct snd_pcm_substream *substream)
+{
+ substream->runtime->hw.channels_max = 1;
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_refcap);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_16000);
+}
+
+
+static struct snd_soc_ops skylaye_refcap_ops = {
+ .startup = kabylake_refcap_startup,
+};
+
+static struct snd_soc_codec_conf max98927_codec_conf[] = {
+
+ {
+ .dev_name = MAXIM_DEV0_NAME,
+ .name_prefix = "Right",
+ },
+
+ {
+ .dev_name = MAXIM_DEV1_NAME,
+ .name_prefix = "Left",
+ },
+};
+
+static struct snd_soc_dai_link_component ssp0_codec_components[] = {
+ { /* Left */
+ .name = MAXIM_DEV0_NAME,
+ .dai_name = MAX98927_CODEC_DAI,
+ },
+
+ { /* For Right */
+ .name = MAXIM_DEV1_NAME,
+ .dai_name = MAX98927_CODEC_DAI,
+ },
+
+};
+
+/* kabylake digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link kabylake_dais[] = {
+ /* Front End DAI links */
+ [KBL_DPCM_AUDIO_PB] = {
+ .name = "Kbl Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = kabylake_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_CP] = {
+ .name = "Kbl Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_ECHO_REF_CP] = {
+ .name = "Kbl Audio Echo Reference cap",
+ .stream_name = "Echoreference Capture",
+ .cpu_dai_name = "Echoref Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .capture_only = 1,
+ .nonatomic = 1,
+ },
+ [KBL_DPCM_AUDIO_REF_CP] = {
+ .name = "Kbl Audio Reference cap",
+ .stream_name = "Wake on Voice",
+ .cpu_dai_name = "Reference Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &skylaye_refcap_ops,
+ },
+ [KBL_DPCM_AUDIO_DMIC_CP] = {
+ .name = "Kbl Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &kabylake_dmic_ops,
+ },
+ [KBL_DPCM_AUDIO_HDMI1_PB] = {
+ .name = "Kbl HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI2_PB] = {
+ .name = "Kbl HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI3_PB] = {
+ .name = "Kbl HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HS_PB] = {
+ .name = "Kbl Audio Headset Playback",
+ .stream_name = "Headset Audio",
+ .cpu_dai_name = "System Pin2",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .init = kabylake_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .ops = &kabylake_da7219_fe_ops,
+
+ },
+
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP0 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codecs = ssp0_codec_components,
+ .num_codecs = ARRAY_SIZE(ssp0_codec_components),
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .ops = &kabylake_ssp0_ops,
+ },
+ {
+ /* SSP1 - Codec */
+ .name = "SSP1-Codec",
+ .id = 1,
+ .cpu_dai_name = "SSP1 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codec_name = "i2c-DLGS7219:00",
+ .codec_dai_name = KBL_DIALOG_CODEC_DAI,
+ .init = kabylake_da7219_codec_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ {
+ .name = "dmic01",
+ .id = 2,
+ .cpu_dai_name = "DMIC01 Pin",
+ .codec_name = "dmic-codec",
+ .codec_dai_name = "dmic-hifi",
+ .platform_name = "0000:00:1f.3",
+ .be_hw_params_fixup = kabylake_dmic_fixup,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 3,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = kabylake_hdmi1_init,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 4,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi2_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 5,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi3_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+/* kabylake digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link kabylake_max98927_dais[] = {
+ /* Front End DAI links */
+ [KBL_DPCM_AUDIO_PB] = {
+ .name = "Kbl Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = kabylake_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_CP] = {
+ .name = "Kbl Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_ECHO_REF_CP] = {
+ .name = "Kbl Audio Echo Reference cap",
+ .stream_name = "Echoreference Capture",
+ .cpu_dai_name = "Echoref Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .capture_only = 1,
+ .nonatomic = 1,
+ },
+ [KBL_DPCM_AUDIO_REF_CP] = {
+ .name = "Kbl Audio Reference cap",
+ .stream_name = "Wake on Voice",
+ .cpu_dai_name = "Reference Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &skylaye_refcap_ops,
+ },
+ [KBL_DPCM_AUDIO_DMIC_CP] = {
+ .name = "Kbl Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &kabylake_dmic_ops,
+ },
+ [KBL_DPCM_AUDIO_HDMI1_PB] = {
+ .name = "Kbl HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI2_PB] = {
+ .name = "Kbl HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI3_PB] = {
+ .name = "Kbl HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP0 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codecs = ssp0_codec_components,
+ .num_codecs = ARRAY_SIZE(ssp0_codec_components),
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .ops = &kabylake_ssp0_ops,
+ },
+ {
+ .name = "dmic01",
+ .id = 1,
+ .cpu_dai_name = "DMIC01 Pin",
+ .codec_name = "dmic-codec",
+ .codec_dai_name = "dmic-hifi",
+ .platform_name = "0000:00:1f.3",
+ .be_hw_params_fixup = kabylake_dmic_fixup,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 2,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = kabylake_hdmi1_init,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 3,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi2_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 4,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi3_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+static int kabylake_card_late_probe(struct snd_soc_card *card)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(card);
+ struct kbl_hdmi_pcm *pcm;
+ struct snd_soc_component *component = NULL;
+ int err, i = 0;
+ char jack_name[NAME_SIZE];
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ component = pcm->codec_dai->component;
+ snprintf(jack_name, sizeof(jack_name),
+ "HDMI/DP, pcm=%d Jack", pcm->device);
+ err = snd_soc_card_jack_new(card, jack_name,
+ SND_JACK_AVOUT, &kabylake_hdmi[i],
+ NULL, 0);
+
+ if (err)
+ return err;
+
+ err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
+ &kabylake_hdmi[i]);
+ if (err < 0)
+ return err;
+
+ i++;
+ }
+
+ if (!component)
+ return -EINVAL;
+
+ return hdac_hdmi_jack_port_init(component, &card->dapm);
+
+ return 0;
+}
+
+/* kabylake audio machine driver for SPT + DA7219 */
+static struct snd_soc_card kbl_audio_card_da7219_m98927 = {
+ .name = "kblda7219m98927",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_dais,
+ .num_links = ARRAY_SIZE(kabylake_dais),
+ .controls = kabylake_controls,
+ .num_controls = ARRAY_SIZE(kabylake_controls),
+ .dapm_widgets = kabylake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets),
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .codec_conf = max98927_codec_conf,
+ .num_configs = ARRAY_SIZE(max98927_codec_conf),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
+/* kabylake audio machine driver for Maxim98927 */
+static struct snd_soc_card kbl_audio_card_max98927 = {
+ .name = "kblmax98927",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_max98927_dais,
+ .num_links = ARRAY_SIZE(kabylake_max98927_dais),
+ .controls = kabylake_controls,
+ .num_controls = ARRAY_SIZE(kabylake_controls),
+ .dapm_widgets = kabylake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets),
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .codec_conf = max98927_codec_conf,
+ .num_configs = ARRAY_SIZE(max98927_codec_conf),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
+static int kabylake_audio_probe(struct platform_device *pdev)
+{
+ struct kbl_codec_private *ctx;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
+ kabylake_audio_card =
+ (struct snd_soc_card *)pdev->id_entry->driver_data;
+
+ kabylake_audio_card->dev = &pdev->dev;
+ snd_soc_card_set_drvdata(kabylake_audio_card, ctx);
+
+ return devm_snd_soc_register_card(&pdev->dev, kabylake_audio_card);
+}
+
+static const struct platform_device_id kbl_board_ids[] = {
+ {
+ .name = "kbl_da7219_max98927",
+ .driver_data =
+ (kernel_ulong_t)&kbl_audio_card_da7219_m98927,
+ },
+ {
+ .name = "kbl_max98927",
+ .driver_data =
+ (kernel_ulong_t)&kbl_audio_card_max98927,
+ },
+ { }
+};
+
+static struct platform_driver kabylake_audio = {
+ .probe = kabylake_audio_probe,
+ .driver = {
+ .name = "kbl_da7219_max98927",
+ .pm = &snd_soc_pm_ops,
+ },
+ .id_table = kbl_board_ids,
+};
+
+module_platform_driver(kabylake_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("Audio KabyLake Machine driver for MAX98927 & DA7219");
+MODULE_AUTHOR("Mac Chiang <mac.chiang@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kbl_da7219_max98927");
+MODULE_ALIAS("platform:kbl_max98927");
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index 21a6490746a6..99e1320c485f 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -488,11 +488,10 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
int ret = 0, j;
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
-
+ for_each_rtd_codec_dai(rtd, j, codec_dai) {
if (!strcmp(codec_dai->component->name, MAXIM_DEV0_NAME)) {
/*
* Use channel 4 and 5 for the first amp
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index a892b37eab7c..a737c915d46a 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -353,11 +353,10 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
int ret = 0, j;
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
-
+ for_each_rtd_codec_dai(rtd, j, codec_dai) {
if (!strcmp(codec_dai->component->name, RT5514_DEV_NAME)) {
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0, 8, 16);
if (ret < 0) {
diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.c b/sound/soc/intel/boards/skl_hda_dsp_common.c
new file mode 100644
index 000000000000..3fdbf239da74
--- /dev/null
+++ b/sound/soc/intel/boards/skl_hda_dsp_common.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-18 Intel Corporation.
+
+/*
+ * Common functions used in different Intel machine drivers
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
+#include "skl_hda_dsp_common.h"
+
+#define NAME_SIZE 32
+
+int skl_hda_hdmi_add_pcm(struct snd_soc_card *card, int device)
+{
+ struct skl_hda_private *ctx = snd_soc_card_get_drvdata(card);
+ struct skl_hda_hdmi_pcm *pcm;
+ char dai_name[NAME_SIZE];
+
+ pcm = devm_kzalloc(card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ snprintf(dai_name, sizeof(dai_name), "intel-hdmi-hifi%d",
+ ctx->dai_index);
+ pcm->codec_dai = snd_soc_card_get_codec_dai(card, dai_name);
+ if (!pcm->codec_dai)
+ return -EINVAL;
+
+ pcm->device = device;
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+/* skl_hda_digital audio interface glue - connects codec <--> CPU */
+struct snd_soc_dai_link skl_hda_be_dai_links[HDA_DSP_MAX_BE_DAI_LINKS] = {
+ /* Back End DAI links */
+ {
+ .name = "iDisp1",
+ .id = 1,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 2,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 3,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "Analog Playback and Capture",
+ .id = 4,
+ .cpu_dai_name = "Analog CPU DAI",
+ .codec_name = "ehdaudio0D0",
+ .codec_dai_name = "Analog Codec DAI",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .init = NULL,
+ .no_pcm = 1,
+ },
+ {
+ .name = "Digital Playback and Capture",
+ .id = 5,
+ .cpu_dai_name = "Digital CPU DAI",
+ .codec_name = "ehdaudio0D0",
+ .codec_dai_name = "Digital Codec DAI",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .init = NULL,
+ .no_pcm = 1,
+ },
+};
+
+int skl_hda_hdmi_jack_init(struct snd_soc_card *card)
+{
+ struct skl_hda_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component = NULL;
+ struct skl_hda_hdmi_pcm *pcm;
+ char jack_name[NAME_SIZE];
+ int err;
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ component = pcm->codec_dai->component;
+ snprintf(jack_name, sizeof(jack_name),
+ "HDMI/DP, pcm=%d Jack", pcm->device);
+ err = snd_soc_card_jack_new(card, jack_name,
+ SND_JACK_AVOUT, &pcm->hdmi_jack,
+ NULL, 0);
+
+ if (err)
+ return err;
+
+ err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
+ &pcm->hdmi_jack);
+ if (err < 0)
+ return err;
+ }
+
+ if (!component)
+ return -EINVAL;
+
+ return hdac_hdmi_jack_port_init(component, &card->dapm);
+}
diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.h b/sound/soc/intel/boards/skl_hda_dsp_common.h
new file mode 100644
index 000000000000..87c50aff56cd
--- /dev/null
+++ b/sound/soc/intel/boards/skl_hda_dsp_common.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2015-18 Intel Corporation.
+ */
+
+/*
+ * This file defines data structures used in Machine Driver for Intel
+ * platforms with HDA Codecs.
+ */
+
+#ifndef __SOUND_SOC_HDA_DSP_COMMON_H
+#define __SOUND_SOC_HDA_DSP_COMMON_H
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+
+#define HDA_DSP_MAX_BE_DAI_LINKS 5
+
+struct skl_hda_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ struct snd_soc_jack hdmi_jack;
+ int device;
+};
+
+struct skl_hda_private {
+ struct list_head hdmi_pcm_list;
+ int pcm_count;
+ int dai_index;
+ const char *platform_name;
+};
+
+extern struct snd_soc_dai_link skl_hda_be_dai_links[HDA_DSP_MAX_BE_DAI_LINKS];
+int skl_hda_hdmi_jack_init(struct snd_soc_card *card);
+int skl_hda_hdmi_add_pcm(struct snd_soc_card *card, int device);
+
+#endif /* __SOUND_SOC_HDA_DSP_COMMON_H */
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
new file mode 100644
index 000000000000..b415dd4c85f5
--- /dev/null
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-18 Intel Corporation.
+
+/*
+ * Machine Driver for SKL+ platforms with DSP and iDisp, HDA Codecs
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
+#include "skl_hda_dsp_common.h"
+
+static const struct snd_soc_dapm_widget skl_hda_widgets[] = {
+ SND_SOC_DAPM_HP("Analog Out", NULL),
+ SND_SOC_DAPM_MIC("Analog In", NULL),
+ SND_SOC_DAPM_HP("Alt Analog Out", NULL),
+ SND_SOC_DAPM_MIC("Alt Analog In", NULL),
+ SND_SOC_DAPM_SPK("Digital Out", NULL),
+ SND_SOC_DAPM_MIC("Digital In", NULL),
+};
+
+static const struct snd_soc_dapm_route skl_hda_map[] = {
+ { "hifi3", NULL, "iDisp3 Tx"},
+ { "iDisp3 Tx", NULL, "iDisp3_out"},
+ { "hifi2", NULL, "iDisp2 Tx"},
+ { "iDisp2 Tx", NULL, "iDisp2_out"},
+ { "hifi1", NULL, "iDisp1 Tx"},
+ { "iDisp1 Tx", NULL, "iDisp1_out"},
+
+ { "Analog Out", NULL, "Codec Output Pin1" },
+ { "Digital Out", NULL, "Codec Output Pin2" },
+ { "Alt Analog Out", NULL, "Codec Output Pin3" },
+
+ { "Codec Input Pin1", NULL, "Analog In" },
+ { "Codec Input Pin2", NULL, "Digital In" },
+ { "Codec Input Pin3", NULL, "Alt Analog In" },
+
+ /* CODEC BE connections */
+ { "Analog Codec Playback", NULL, "Analog CPU Playback" },
+ { "Analog CPU Playback", NULL, "codec0_out" },
+ { "Digital Codec Playback", NULL, "Digital CPU Playback" },
+ { "Digital CPU Playback", NULL, "codec1_out" },
+ { "Alt Analog Codec Playback", NULL, "Alt Analog CPU Playback" },
+ { "Alt Analog CPU Playback", NULL, "codec2_out" },
+
+ { "codec0_in", NULL, "Analog CPU Capture" },
+ { "Analog CPU Capture", NULL, "Analog Codec Capture" },
+ { "codec1_in", NULL, "Digital CPU Capture" },
+ { "Digital CPU Capture", NULL, "Digital Codec Capture" },
+ { "codec2_in", NULL, "Alt Analog CPU Capture" },
+ { "Alt Analog CPU Capture", NULL, "Alt Analog Codec Capture" },
+};
+
+static int skl_hda_card_late_probe(struct snd_soc_card *card)
+{
+ return skl_hda_hdmi_jack_init(card);
+}
+
+static int
+skl_hda_add_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *link)
+{
+ struct skl_hda_private *ctx = snd_soc_card_get_drvdata(card);
+ int ret = 0;
+
+ dev_dbg(card->dev, "%s: dai link name - %s\n", __func__, link->name);
+ link->platform_name = ctx->platform_name;
+ link->nonatomic = 1;
+
+ if (strstr(link->name, "HDMI")) {
+ ret = skl_hda_hdmi_add_pcm(card, ctx->pcm_count);
+
+ if (ret < 0)
+ return ret;
+
+ ctx->dai_index++;
+ }
+
+ ctx->pcm_count++;
+ return ret;
+}
+
+static struct snd_soc_card hda_soc_card = {
+ .name = "skl_hda_card",
+ .owner = THIS_MODULE,
+ .dai_link = skl_hda_be_dai_links,
+ .dapm_widgets = skl_hda_widgets,
+ .dapm_routes = skl_hda_map,
+ .add_dai_link = skl_hda_add_dai_link,
+ .fully_routed = true,
+ .late_probe = skl_hda_card_late_probe,
+};
+
+#define IDISP_DAI_COUNT 3
+/* there are two routes per iDisp output */
+#define IDISP_ROUTE_COUNT (IDISP_DAI_COUNT * 2)
+#define IDISP_CODEC_MASK 0x4
+
+static int skl_hda_fill_card_info(struct skl_machine_pdata *pdata)
+{
+ struct snd_soc_card *card = &hda_soc_card;
+ struct snd_soc_dai_link *dai_link;
+ u32 codec_count, codec_mask;
+ int i, num_links, num_route;
+
+ codec_mask = pdata->codec_mask;
+ codec_count = hweight_long(codec_mask);
+
+ if (codec_count == 1 && pdata->codec_mask & IDISP_CODEC_MASK) {
+ num_links = IDISP_DAI_COUNT;
+ num_route = IDISP_ROUTE_COUNT;
+ } else if (codec_count == 2 && codec_mask & IDISP_CODEC_MASK) {
+ num_links = ARRAY_SIZE(skl_hda_be_dai_links);
+ num_route = ARRAY_SIZE(skl_hda_map),
+ card->dapm_widgets = skl_hda_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(skl_hda_widgets);
+ } else {
+ return -EINVAL;
+ }
+
+ card->num_links = num_links;
+ card->num_dapm_routes = num_route;
+
+ for_each_card_prelinks(card, i, dai_link)
+ dai_link->platform_name = pdata->platform;
+
+ return 0;
+}
+
+static int skl_hda_audio_probe(struct platform_device *pdev)
+{
+ struct skl_machine_pdata *pdata;
+ struct skl_hda_private *ctx;
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s: entry\n", __func__);
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
+ pdata = dev_get_drvdata(&pdev->dev);
+ if (!pdata)
+ return -EINVAL;
+
+ ret = skl_hda_fill_card_info(pdata);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unsupported HDAudio/iDisp configuration found\n");
+ return ret;
+ }
+
+ ctx->pcm_count = hda_soc_card.num_links;
+ ctx->dai_index = 1; /* hdmi codec dai name starts from index 1 */
+ ctx->platform_name = pdata->platform;
+
+ hda_soc_card.dev = &pdev->dev;
+ snd_soc_card_set_drvdata(&hda_soc_card, ctx);
+
+ return devm_snd_soc_register_card(&pdev->dev, &hda_soc_card);
+}
+
+static struct platform_driver skl_hda_audio = {
+ .probe = skl_hda_audio_probe,
+ .driver = {
+ .name = "skl_hda_dsp_generic",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(skl_hda_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("SKL/KBL/BXT/APL HDA Generic Machine driver");
+MODULE_AUTHOR("Rakesh Ughreja <rakesh.a.ughreja@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:skl_hda_dsp_generic");
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index 915a34cdc8ac..c1f50a079d34 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -7,7 +7,8 @@ snd-soc-acpi-intel-match-objs := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-m
soc-acpi-intel-hsw-bdw-match.o \
soc-acpi-intel-skl-match.o soc-acpi-intel-kbl-match.o \
soc-acpi-intel-bxt-match.o soc-acpi-intel-glk-match.o \
- soc-acpi-intel-cnl-match.o
+ soc-acpi-intel-cnl-match.o \
+ soc-acpi-intel-hda-match.o
obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
diff --git a/sound/soc/intel/common/soc-acpi-intel-byt-match.c b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
index 4daa8a4f0c0c..097dc06377ba 100644
--- a/sound/soc/intel/common/soc-acpi-intel-byt-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
@@ -34,6 +34,13 @@ static const struct dmi_system_id byt_table[] = {
.callback = byt_thinkpad10_quirk_cb,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ },
+ },
+ {
+ .callback = byt_thinkpad10_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 10"),
},
},
diff --git a/sound/soc/intel/common/soc-acpi-intel-hda-match.c b/sound/soc/intel/common/soc-acpi-intel-hda-match.c
new file mode 100644
index 000000000000..533c1064f84b
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-hda-match.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, Intel Corporation.
+
+/*
+ * soc-apci-intel-hda-match.c - tables and support for HDA+ACPI enumeration.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include "../skylake/skl.h"
+
+static struct skl_machine_pdata hda_pdata = {
+ .use_tplg_pcm = true,
+};
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_hda_machines[] = {
+ {
+ /* .id is not used in this file */
+ .drv_name = "skl_hda_dsp_generic",
+
+ /* .fw_filename is dynamically set in skylake driver */
+
+ /* .sof_fw_filename is dynamically set in sof/intel driver */
+
+ .sof_tplg_filename = "intel/sof-hda-generic.tplg",
+
+ /*
+ * .machine_quirk and .quirk_data are not used here but
+ * can be used if we need a more complicated machine driver
+ * combining HDA+other device (e.g. DMIC).
+ */
+ .pdata = &hda_pdata,
+ },
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_hda_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
index 0ee173ca437d..a317b7790fce 100644
--- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
@@ -32,6 +32,11 @@ static struct snd_soc_acpi_codecs kbl_7219_98357_codecs = {
.codecs = {"MX98357A"}
};
+static struct snd_soc_acpi_codecs kbl_7219_98927_codecs = {
+ .num_codecs = 1,
+ .codecs = {"MX98927"}
+};
+
struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
{
.id = "INT343A",
@@ -83,6 +88,14 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
.quirk_data = &kbl_7219_98357_codecs,
.pdata = &skl_dmic_data,
},
+ {
+ .id = "DLGS7219",
+ .drv_name = "kbl_da7219_max98927",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &kbl_7219_98927_codecs,
+ .pdata = &skl_dmic_data
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_kbl_machines);
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index 11041aedea31..1e067504b604 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -355,7 +355,7 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
/* allocate DMA buffer to store FW data */
sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
- &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
+ &sst_fw->dmable_fw_paddr, GFP_KERNEL);
if (!sst_fw->dma_buf) {
dev_err(dsp->dev, "error: DMA alloc failed\n");
kfree(sst_fw);
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 823e39103edd..557f80c0bfe5 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -32,6 +32,7 @@
#define HDA_MONO 1
#define HDA_STEREO 2
#define HDA_QUAD 4
+#define HDA_MAX 8
static const struct snd_pcm_hardware azx_pcm_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
@@ -494,6 +495,7 @@ static int skl_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
stream->lpib);
snd_hdac_ext_stream_set_lpib(stream, stream->lpib);
}
+ /* fall through */
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
@@ -569,7 +571,10 @@ static int skl_link_hw_params(struct snd_pcm_substream *substream,
stream_tag = hdac_stream(link_dev)->stream_tag;
/* set the stream tag in the codec dai dma params */
- snd_soc_dai_set_tdm_slot(codec_dai, stream_tag, 0, 0, 0);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ snd_soc_dai_set_tdm_slot(codec_dai, stream_tag, 0, 0, 0);
+ else
+ snd_soc_dai_set_tdm_slot(codec_dai, 0, stream_tag, 0, 0);
p_params.s_fmt = snd_pcm_format_width(params_format(params));
p_params.ch = params_channels(params);
@@ -995,21 +1000,63 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
},
},
{
- .name = "HD-Codec Pin",
+ .name = "Analog CPU DAI",
.ops = &skl_link_dai_ops,
.playback = {
- .stream_name = "HD-Codec Tx",
- .channels_min = HDA_STEREO,
- .channels_max = HDA_STEREO,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .stream_name = "Analog CPU Playback",
+ .channels_min = HDA_MONO,
+ .channels_max = HDA_MAX,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
},
.capture = {
- .stream_name = "HD-Codec Rx",
- .channels_min = HDA_STEREO,
- .channels_max = HDA_STEREO,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .stream_name = "Analog CPU Capture",
+ .channels_min = HDA_MONO,
+ .channels_max = HDA_MAX,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+},
+{
+ .name = "Alt Analog CPU DAI",
+ .ops = &skl_link_dai_ops,
+ .playback = {
+ .stream_name = "Alt Analog CPU Playback",
+ .channels_min = HDA_MONO,
+ .channels_max = HDA_MAX,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .capture = {
+ .stream_name = "Alt Analog CPU Capture",
+ .channels_min = HDA_MONO,
+ .channels_max = HDA_MAX,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+},
+{
+ .name = "Digital CPU DAI",
+ .ops = &skl_link_dai_ops,
+ .playback = {
+ .stream_name = "Digital CPU Playback",
+ .channels_min = HDA_MONO,
+ .channels_max = HDA_MAX,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .capture = {
+ .stream_name = "Digital CPU Capture",
+ .channels_min = HDA_MONO,
+ .channels_max = HDA_MAX,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
},
},
};
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 2620d77729c5..cf8848b779dc 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -898,11 +898,10 @@ static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
bc = (struct skl_algo_data *)sb->dobj.private;
if (bc->set_params == SKL_PARAM_BIND) {
- params = kzalloc(bc->max, GFP_KERNEL);
+ params = kmemdup(bc->params, bc->max, GFP_KERNEL);
if (!params)
return -ENOMEM;
- memcpy(params, bc->params, bc->max);
skl_fill_sink_instance_id(ctx, params, bc->max,
mconfig);
@@ -2461,6 +2460,7 @@ static int skl_tplg_get_token(struct device *dev,
case SKL_TKN_U8_CORE_ID:
mconfig->core_id = tkn_elem->value;
+ break;
case SKL_TKN_U8_MOD_TYPE:
mconfig->m_type = tkn_elem->value;
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index dce649485649..29225623b4b4 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -33,9 +33,11 @@
#include <sound/hda_register.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
+#include <sound/hda_codec.h>
#include "skl.h"
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
+#include "../../../soc/codecs/hdac_hda.h"
/*
* initialize the PCI registers
@@ -472,6 +474,25 @@ static struct skl_ssp_clk skl_ssp_clks[] = {
{.name = "ssp5_sclkfs"},
};
+static struct snd_soc_acpi_mach *skl_find_hda_machine(struct skl *skl,
+ struct snd_soc_acpi_mach *machines)
+{
+ struct hdac_bus *bus = skl_to_bus(skl);
+ struct snd_soc_acpi_mach *mach;
+
+ /* check if we have any codecs detected on bus */
+ if (bus->codec_mask == 0)
+ return NULL;
+
+ /* point to common table */
+ mach = snd_soc_acpi_intel_hda_machines;
+
+ /* all entries in the machine table use the same firmware */
+ mach->fw_filename = machines->fw_filename;
+
+ return mach;
+}
+
static int skl_find_machine(struct skl *skl, void *driver_data)
{
struct hdac_bus *bus = skl_to_bus(skl);
@@ -479,9 +500,13 @@ static int skl_find_machine(struct skl *skl, void *driver_data)
struct skl_machine_pdata *pdata;
mach = snd_soc_acpi_find_machine(mach);
- if (mach == NULL) {
- dev_err(bus->dev, "No matching machine driver found\n");
- return -ENODEV;
+ if (!mach) {
+ dev_dbg(bus->dev, "No matching I2S machine driver found\n");
+ mach = skl_find_hda_machine(skl, driver_data);
+ if (!mach) {
+ dev_err(bus->dev, "No matching machine driver found\n");
+ return -ENODEV;
+ }
}
skl->mach = mach;
@@ -498,8 +523,9 @@ static int skl_find_machine(struct skl *skl, void *driver_data)
static int skl_machine_device_register(struct skl *skl)
{
- struct hdac_bus *bus = skl_to_bus(skl);
struct snd_soc_acpi_mach *mach = skl->mach;
+ struct hdac_bus *bus = skl_to_bus(skl);
+ struct skl_machine_pdata *pdata;
struct platform_device *pdev;
int ret;
@@ -516,8 +542,12 @@ static int skl_machine_device_register(struct skl *skl)
return -EIO;
}
- if (mach->pdata)
+ if (mach->pdata) {
+ pdata = (struct skl_machine_pdata *)mach->pdata;
+ pdata->platform = dev_name(bus->dev);
+ pdata->codec_mask = bus->codec_mask;
dev_set_drvdata(&pdev->dev, mach->pdata);
+ }
skl->i2s_dev = pdev;
@@ -628,6 +658,24 @@ static void skl_clock_device_unregister(struct skl *skl)
platform_device_unregister(skl->clk_dev);
}
+#define IDISP_INTEL_VENDOR_ID 0x80860000
+
+/*
+ * load the legacy codec driver
+ */
+static void load_codec_module(struct hda_codec *codec)
+{
+#ifdef MODULE
+ char modalias[MODULE_NAME_LEN];
+ const char *mod = NULL;
+
+ snd_hdac_codec_modalias(&codec->core, modalias, sizeof(modalias));
+ mod = modalias;
+ dev_dbg(&codec->core.dev, "loading %s codec module\n", mod);
+ request_module(mod);
+#endif
+}
+
/*
* Probe the given codec address
*/
@@ -637,7 +685,9 @@ static int probe_codec(struct hdac_bus *bus, int addr)
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
unsigned int res = -1;
struct skl *skl = bus_to_skl(bus);
+ struct hdac_hda_priv *hda_codec;
struct hdac_device *hdev;
+ int err;
mutex_lock(&bus->cmd_mutex);
snd_hdac_bus_send_cmd(bus, cmd);
@@ -645,13 +695,26 @@ static int probe_codec(struct hdac_bus *bus, int addr)
mutex_unlock(&bus->cmd_mutex);
if (res == -1)
return -EIO;
- dev_dbg(bus->dev, "codec #%d probed OK\n", addr);
+ dev_dbg(bus->dev, "codec #%d probed OK: %x\n", addr, res);
- hdev = devm_kzalloc(&skl->pci->dev, sizeof(*hdev), GFP_KERNEL);
- if (!hdev)
+ hda_codec = devm_kzalloc(&skl->pci->dev, sizeof(*hda_codec),
+ GFP_KERNEL);
+ if (!hda_codec)
return -ENOMEM;
- return snd_hdac_ext_bus_device_init(bus, addr, hdev);
+ hda_codec->codec.bus = skl_to_hbus(skl);
+ hdev = &hda_codec->codec.core;
+
+ err = snd_hdac_ext_bus_device_init(bus, addr, hdev);
+ if (err < 0)
+ return err;
+
+ /* use legacy bus only for HDA codecs, idisp uses ext bus */
+ if ((res & 0xFFFF0000) != IDISP_INTEL_VENDOR_ID) {
+ hdev->type = HDA_DEV_LEGACY;
+ load_codec_module(&hda_codec->codec);
+ }
+ return 0;
}
/* Codec initialization */
@@ -786,9 +849,10 @@ static int skl_create(struct pci_dev *pci,
const struct hdac_io_ops *io_ops,
struct skl **rskl)
{
+ struct hdac_ext_bus_ops *ext_ops = NULL;
struct skl *skl;
struct hdac_bus *bus;
-
+ struct hda_bus *hbus;
int err;
*rskl = NULL;
@@ -803,13 +867,23 @@ static int skl_create(struct pci_dev *pci,
return -ENOMEM;
}
+ hbus = skl_to_hbus(skl);
bus = skl_to_bus(skl);
- snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, io_ops, NULL);
+
+#if IS_ENABLED(CONFIG_SND_SOC_HDAC_HDA)
+ ext_ops = snd_soc_hdac_hda_get_ops();
+#endif
+ snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, io_ops, ext_ops);
bus->use_posbuf = 1;
skl->pci = pci;
INIT_WORK(&skl->probe_work, skl_probe_work);
bus->bdl_pos_adj = 0;
+ mutex_init(&hbus->prepare_mutex);
+ hbus->pci = pci;
+ hbus->mixer_assigned = -1;
+ hbus->modelname = "sklbus";
+
*rskl = skl;
return 0;
@@ -834,7 +908,7 @@ static int skl_first_init(struct hdac_bus *bus)
return -ENXIO;
}
- skl_init_chip(bus, true);
+ snd_hdac_bus_reset_link(bus, true);
snd_hdac_bus_parse_capabilities(bus);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 78aa8bdcb619..8d48cd7c56c8 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -23,6 +23,7 @@
#include <sound/hda_register.h>
#include <sound/hdaudio_ext.h>
+#include <sound/hda_codec.h>
#include <sound/soc.h>
#include "skl-nhlt.h"
#include "skl-ssp-clk.h"
@@ -71,7 +72,7 @@ struct skl_fw_config {
};
struct skl {
- struct hdac_bus hbus;
+ struct hda_bus hbus;
struct pci_dev *pci;
unsigned int init_done:1; /* delayed init status */
@@ -105,8 +106,11 @@ struct skl {
struct snd_soc_acpi_mach *mach;
};
-#define skl_to_bus(s) (&(s)->hbus)
-#define bus_to_skl(bus) container_of(bus, struct skl, hbus)
+#define skl_to_bus(s) (&(s)->hbus.core)
+#define bus_to_skl(bus) container_of(bus, struct skl, hbus.core)
+
+#define skl_to_hbus(s) (&(s)->hbus)
+#define hbus_to_skl(hbus) container_of((hbus), struct skl, (hbus))
/* to pass dai dma data */
struct skl_dma_params {
@@ -117,6 +121,8 @@ struct skl_dma_params {
struct skl_machine_pdata {
u32 dmic_num;
bool use_tplg_pcm; /* use dais and dai links from topology */
+ const char *platform;
+ u32 codec_mask;
};
struct skl_dsp_ops {
diff --git a/sound/soc/mediatek/mt2701/mt2701-cs42448.c b/sound/soc/mediatek/mt2701/mt2701-cs42448.c
index 666282b865a8..97f9f38ce6b3 100644
--- a/sound/soc/mediatek/mt2701/mt2701-cs42448.c
+++ b/sound/soc/mediatek/mt2701/mt2701-cs42448.c
@@ -299,6 +299,7 @@ static int mt2701_cs42448_machine_probe(struct platform_device *pdev)
devm_kzalloc(&pdev->dev, sizeof(struct mt2701_cs42448_private),
GFP_KERNEL);
struct device *dev = &pdev->dev;
+ struct snd_soc_dai_link *dai_link;
if (!priv)
return -ENOMEM;
@@ -309,10 +310,10 @@ static int mt2701_cs42448_machine_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt2701_cs42448_dai_links[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt2701_cs42448_dai_links[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
card->dev = dev;
@@ -324,10 +325,10 @@ static int mt2701_cs42448_machine_probe(struct platform_device *pdev)
"Property 'audio-codec' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt2701_cs42448_dai_links[i].codec_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->codec_name)
continue;
- mt2701_cs42448_dai_links[i].codec_of_node = codec_node;
+ dai_link->codec_of_node = codec_node;
}
codec_node_bt_mrg = of_parse_phandle(pdev->dev.of_node,
diff --git a/sound/soc/mediatek/mt2701/mt2701-wm8960.c b/sound/soc/mediatek/mt2701/mt2701-wm8960.c
index 89f34efd9747..6bc1d3d58e64 100644
--- a/sound/soc/mediatek/mt2701/mt2701-wm8960.c
+++ b/sound/soc/mediatek/mt2701/mt2701-wm8960.c
@@ -97,6 +97,7 @@ static int mt2701_wm8960_machine_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt2701_wm8960_card;
struct device_node *platform_node, *codec_node;
+ struct snd_soc_dai_link *dai_link;
int ret, i;
platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -105,10 +106,10 @@ static int mt2701_wm8960_machine_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt2701_wm8960_dai_links[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt2701_wm8960_dai_links[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
card->dev = &pdev->dev;
@@ -120,10 +121,10 @@ static int mt2701_wm8960_machine_probe(struct platform_device *pdev)
"Property 'audio-codec' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt2701_wm8960_dai_links[i].codec_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->codec_name)
continue;
- mt2701_wm8960_dai_links[i].codec_of_node = codec_node;
+ dai_link->codec_of_node = codec_node;
}
ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
@@ -150,7 +151,6 @@ static const struct of_device_id mt2701_wm8960_machine_dt_match[] = {
static struct platform_driver mt2701_wm8960_machine = {
.driver = {
.name = "mt2701-wm8960",
- .owner = THIS_MODULE,
#ifdef CONFIG_OF
.of_match_table = mt2701_wm8960_machine_dt_match,
#endif
diff --git a/sound/soc/mediatek/mt6797/mt6797-mt6351.c b/sound/soc/mediatek/mt6797/mt6797-mt6351.c
index b1558c57b9ca..cc41eb531653 100644
--- a/sound/soc/mediatek/mt6797/mt6797-mt6351.c
+++ b/sound/soc/mediatek/mt6797/mt6797-mt6351.c
@@ -158,6 +158,7 @@ static int mt6797_mt6351_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt6797_mt6351_card;
struct device_node *platform_node, *codec_node;
+ struct snd_soc_dai_link *dai_link;
int ret, i;
card->dev = &pdev->dev;
@@ -168,10 +169,10 @@ static int mt6797_mt6351_dev_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt6797_mt6351_dai_links[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt6797_mt6351_dai_links[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
codec_node = of_parse_phandle(pdev->dev.of_node,
@@ -181,10 +182,10 @@ static int mt6797_mt6351_dev_probe(struct platform_device *pdev)
"Property 'audio-codec' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt6797_mt6351_dai_links[i].codec_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->codec_name)
continue;
- mt6797_mt6351_dai_links[i].codec_of_node = codec_node;
+ dai_link->codec_of_node = codec_node;
}
ret = devm_snd_soc_register_card(&pdev->dev, card);
@@ -205,7 +206,6 @@ static const struct of_device_id mt6797_mt6351_dt_match[] = {
static struct platform_driver mt6797_mt6351_driver = {
.driver = {
.name = "mt6797-mt6351",
- .owner = THIS_MODULE,
#ifdef CONFIG_OF
.of_match_table = mt6797_mt6351_dt_match,
#endif
diff --git a/sound/soc/mediatek/mt8173/mt8173-max98090.c b/sound/soc/mediatek/mt8173/mt8173-max98090.c
index 902d111016d6..4d6596d5cb07 100644
--- a/sound/soc/mediatek/mt8173/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173/mt8173-max98090.c
@@ -137,6 +137,7 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt8173_max98090_card;
struct device_node *codec_node, *platform_node;
+ struct snd_soc_dai_link *dai_link;
int ret, i;
platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -145,10 +146,10 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_max98090_dais[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt8173_max98090_dais[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
codec_node = of_parse_phandle(pdev->dev.of_node,
@@ -158,10 +159,10 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev)
"Property 'audio-codec' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_max98090_dais[i].codec_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->codec_name)
continue;
- mt8173_max98090_dais[i].codec_of_node = codec_node;
+ dai_link->codec_of_node = codec_node;
}
card->dev = &pdev->dev;
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
index 582174d98c6c..da5b58ce791b 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
@@ -44,11 +44,10 @@ static int mt8173_rt5650_rt5514_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
int i, ret;
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
-
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
/* pll from mclk 12.288M */
ret = snd_soc_dai_set_pll(codec_dai, 0, 0, MCLK_FOR_CODECS,
params_rate(params) * 512);
@@ -179,6 +178,7 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt8173_rt5650_rt5514_card;
struct device_node *platform_node;
+ struct snd_soc_dai_link *dai_link;
int i, ret;
platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -188,10 +188,10 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_rt5650_rt5514_dais[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt8173_rt5650_rt5514_dais[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
mt8173_rt5650_rt5514_codecs[0].of_node =
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
index b3670c8a5b8d..d83cd039b413 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
@@ -48,11 +48,10 @@ static int mt8173_rt5650_rt5676_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
int i, ret;
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
-
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
/* pll from mclk 12.288M */
ret = snd_soc_dai_set_pll(codec_dai, 0, 0, MCLK_FOR_CODECS,
params_rate(params) * 512);
@@ -225,6 +224,7 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt8173_rt5650_rt5676_card;
struct device_node *platform_node;
+ struct snd_soc_dai_link *dai_link;
int i, ret;
platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -234,10 +234,10 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_rt5650_rt5676_dais[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt8173_rt5650_rt5676_dais[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
mt8173_rt5650_rt5676_codecs[0].of_node =
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
index 7a89b4aad182..7edf250c8fb1 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
@@ -59,6 +59,7 @@ static int mt8173_rt5650_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
unsigned int mclk_clock;
+ struct snd_soc_dai *codec_dai;
int i, ret;
switch (mt8173_rt5650_priv.pll_from) {
@@ -76,9 +77,7 @@ static int mt8173_rt5650_hw_params(struct snd_pcm_substream *substream,
break;
}
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
-
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
/* pll from mclk */
ret = snd_soc_dai_set_pll(codec_dai, 0, 0, mclk_clock,
params_rate(params) * 512);
@@ -240,6 +239,7 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
struct device_node *platform_node;
struct device_node *np;
const char *codec_capture_dai;
+ struct snd_soc_dai_link *dai_link;
int i, ret;
platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -249,10 +249,10 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_rt5650_dais[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt8173_rt5650_dais[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
mt8173_rt5650_codecs[0].of_node =
diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig
index 8af8bc358a90..8b8426ed2363 100644
--- a/sound/soc/meson/Kconfig
+++ b/sound/soc/meson/Kconfig
@@ -4,6 +4,8 @@ menu "ASoC support for Amlogic platforms"
config SND_MESON_AXG_FIFO
tristate
select REGMAP_MMIO
+ imply COMMON_CLK_AXG_AUDIO
+ imply RESET_MESON_AUDIO_ARB
config SND_MESON_AXG_FRDDR
tristate "Amlogic AXG Playback FIFO support"
@@ -22,6 +24,7 @@ config SND_MESON_AXG_TODDR
config SND_MESON_AXG_TDM_FORMATTER
tristate
select REGMAP_MMIO
+ imply COMMON_CLK_AXG_AUDIO
config SND_MESON_AXG_TDM_INTERFACE
tristate
@@ -51,6 +54,7 @@ config SND_MESON_AXG_SOUND_CARD
imply SND_MESON_AXG_TDMIN
imply SND_MESON_AXG_TDMOUT
imply SND_MESON_AXG_SPDIFOUT
+ imply SND_MESON_AXG_PDM
help
Select Y or M to add support for the AXG SoC sound card
@@ -58,8 +62,17 @@ config SND_MESON_AXG_SPDIFOUT
tristate "Amlogic AXG SPDIF Output Support"
select SND_PCM_IEC958
imply SND_SOC_SPDIF
+ imply COMMON_CLK_AXG_AUDIO
help
Select Y or M to add support for SPDIF output serializer embedded
in the Amlogic AXG SoC family
+config SND_MESON_AXG_PDM
+ tristate "Amlogic AXG PDM Input Support"
+ imply SND_SOC_DMIC
+ imply COMMON_CLK_AXG_AUDIO
+ help
+ Select Y or M to add support for PDM input embedded
+ in the Amlogic AXG SoC family
+
endmenu
diff --git a/sound/soc/meson/Makefile b/sound/soc/meson/Makefile
index c5e003b093db..4cd25104029d 100644
--- a/sound/soc/meson/Makefile
+++ b/sound/soc/meson/Makefile
@@ -9,6 +9,7 @@ snd-soc-meson-axg-tdmin-objs := axg-tdmin.o
snd-soc-meson-axg-tdmout-objs := axg-tdmout.o
snd-soc-meson-axg-sound-card-objs := axg-card.o
snd-soc-meson-axg-spdifout-objs := axg-spdifout.o
+snd-soc-meson-axg-pdm-objs := axg-pdm.o
obj-$(CONFIG_SND_MESON_AXG_FIFO) += snd-soc-meson-axg-fifo.o
obj-$(CONFIG_SND_MESON_AXG_FRDDR) += snd-soc-meson-axg-frddr.o
@@ -19,3 +20,4 @@ obj-$(CONFIG_SND_MESON_AXG_TDMIN) += snd-soc-meson-axg-tdmin.o
obj-$(CONFIG_SND_MESON_AXG_TDMOUT) += snd-soc-meson-axg-tdmout.o
obj-$(CONFIG_SND_MESON_AXG_SOUND_CARD) += snd-soc-meson-axg-sound-card.o
obj-$(CONFIG_SND_MESON_AXG_SPDIFOUT) += snd-soc-meson-axg-spdifout.o
+obj-$(CONFIG_SND_MESON_AXG_PDM) += snd-soc-meson-axg-pdm.o
diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
index 2914ba0d965b..aa54d2c612c9 100644
--- a/sound/soc/meson/axg-card.c
+++ b/sound/soc/meson/axg-card.c
@@ -97,14 +97,14 @@ static void axg_card_clean_references(struct axg_card *priv)
{
struct snd_soc_card *card = &priv->card;
struct snd_soc_dai_link *link;
+ struct snd_soc_dai_link_component *codec;
int i, j;
if (card->dai_link) {
- for (i = 0; i < card->num_links; i++) {
- link = &card->dai_link[i];
+ for_each_card_prelinks(card, i, link) {
of_node_put(link->cpu_of_node);
- for (j = 0; j < link->num_codecs; j++)
- of_node_put(link->codecs[j].of_node);
+ for_each_link_codecs(link, j, codec)
+ of_node_put(codec->of_node);
}
}
@@ -167,8 +167,7 @@ static int axg_card_tdm_be_hw_params(struct snd_pcm_substream *substream,
if (be->mclk_fs) {
mclk = params_rate(params) * be->mclk_fs;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
SND_SOC_CLOCK_IN);
if (ret && ret != -ENOTSUPP)
@@ -196,8 +195,7 @@ static int axg_card_tdm_dai_init(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_dai *codec_dai;
int ret, i;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
ret = snd_soc_dai_set_tdm_slot(codec_dai,
be->codec_masks[i].tx,
be->codec_masks[i].rx,
@@ -478,7 +476,7 @@ static int axg_card_set_be_link(struct snd_soc_card *card,
ret = axg_card_set_link_name(card, link, "be");
if (ret)
- dev_err(card->dev, "error setting %s link name\n", np->name);
+ dev_err(card->dev, "error setting %pOFn link name\n", np);
return ret;
}
diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
index 30262550e37b..0e4f65e654c4 100644
--- a/sound/soc/meson/axg-fifo.c
+++ b/sound/soc/meson/axg-fifo.c
@@ -203,6 +203,8 @@ static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
dev_name(dev), ss);
+ if (ret)
+ return ret;
/* Enable pclk to access registers and clock the fifo ip */
ret = clk_prepare_enable(fifo->pclk);
diff --git a/sound/soc/meson/axg-pdm.c b/sound/soc/meson/axg-pdm.c
new file mode 100644
index 000000000000..9d5684493ffc
--- /dev/null
+++ b/sound/soc/meson/axg-pdm.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/pcm_params.h>
+
+#define PDM_CTRL 0x00
+#define PDM_CTRL_EN BIT(31)
+#define PDM_CTRL_OUT_MODE BIT(29)
+#define PDM_CTRL_BYPASS_MODE BIT(28)
+#define PDM_CTRL_RST_FIFO BIT(16)
+#define PDM_CTRL_CHAN_RSTN_MASK GENMASK(15, 8)
+#define PDM_CTRL_CHAN_RSTN(x) ((x) << 8)
+#define PDM_CTRL_CHAN_EN_MASK GENMASK(7, 0)
+#define PDM_CTRL_CHAN_EN(x) ((x) << 0)
+#define PDM_HCIC_CTRL1 0x04
+#define PDM_FILTER_EN BIT(31)
+#define PDM_HCIC_CTRL1_GAIN_SFT_MASK GENMASK(29, 24)
+#define PDM_HCIC_CTRL1_GAIN_SFT(x) ((x) << 24)
+#define PDM_HCIC_CTRL1_GAIN_MULT_MASK GENMASK(23, 16)
+#define PDM_HCIC_CTRL1_GAIN_MULT(x) ((x) << 16)
+#define PDM_HCIC_CTRL1_DSR_MASK GENMASK(8, 4)
+#define PDM_HCIC_CTRL1_DSR(x) ((x) << 4)
+#define PDM_HCIC_CTRL1_STAGE_NUM_MASK GENMASK(3, 0)
+#define PDM_HCIC_CTRL1_STAGE_NUM(x) ((x) << 0)
+#define PDM_HCIC_CTRL2 0x08
+#define PDM_F1_CTRL 0x0c
+#define PDM_LPF_ROUND_MODE_MASK GENMASK(17, 16)
+#define PDM_LPF_ROUND_MODE(x) ((x) << 16)
+#define PDM_LPF_DSR_MASK GENMASK(15, 12)
+#define PDM_LPF_DSR(x) ((x) << 12)
+#define PDM_LPF_STAGE_NUM_MASK GENMASK(8, 0)
+#define PDM_LPF_STAGE_NUM(x) ((x) << 0)
+#define PDM_LPF_MAX_STAGE 336
+#define PDM_LPF_NUM 3
+#define PDM_F2_CTRL 0x10
+#define PDM_F3_CTRL 0x14
+#define PDM_HPF_CTRL 0x18
+#define PDM_HPF_SFT_STEPS_MASK GENMASK(20, 16)
+#define PDM_HPF_SFT_STEPS(x) ((x) << 16)
+#define PDM_HPF_OUT_FACTOR_MASK GENMASK(15, 0)
+#define PDM_HPF_OUT_FACTOR(x) ((x) << 0)
+#define PDM_CHAN_CTRL 0x1c
+#define PDM_CHAN_CTRL_POINTER_WIDTH 8
+#define PDM_CHAN_CTRL_POINTER_MAX ((1 << PDM_CHAN_CTRL_POINTER_WIDTH) - 1)
+#define PDM_CHAN_CTRL_NUM 4
+#define PDM_CHAN_CTRL1 0x20
+#define PDM_COEFF_ADDR 0x24
+#define PDM_COEFF_DATA 0x28
+#define PDM_CLKG_CTRL 0x2c
+#define PDM_STS 0x30
+
+struct axg_pdm_lpf {
+ unsigned int ds;
+ unsigned int round_mode;
+ const unsigned int *tap;
+ unsigned int tap_num;
+};
+
+struct axg_pdm_hcic {
+ unsigned int shift;
+ unsigned int mult;
+ unsigned int steps;
+ unsigned int ds;
+};
+
+struct axg_pdm_hpf {
+ unsigned int out_factor;
+ unsigned int steps;
+};
+
+struct axg_pdm_filters {
+ struct axg_pdm_hcic hcic;
+ struct axg_pdm_hpf hpf;
+ struct axg_pdm_lpf lpf[PDM_LPF_NUM];
+};
+
+struct axg_pdm_cfg {
+ const struct axg_pdm_filters *filters;
+ unsigned int sys_rate;
+};
+
+struct axg_pdm {
+ const struct axg_pdm_cfg *cfg;
+ struct regmap *map;
+ struct clk *dclk;
+ struct clk *sysclk;
+ struct clk *pclk;
+};
+
+static void axg_pdm_enable(struct regmap *map)
+{
+ /* Reset AFIFO */
+ regmap_update_bits(map, PDM_CTRL, PDM_CTRL_RST_FIFO, PDM_CTRL_RST_FIFO);
+ regmap_update_bits(map, PDM_CTRL, PDM_CTRL_RST_FIFO, 0);
+
+ /* Enable PDM */
+ regmap_update_bits(map, PDM_CTRL, PDM_CTRL_EN, PDM_CTRL_EN);
+}
+
+static void axg_pdm_disable(struct regmap *map)
+{
+ regmap_update_bits(map, PDM_CTRL, PDM_CTRL_EN, 0);
+}
+
+static void axg_pdm_filters_enable(struct regmap *map, bool enable)
+{
+ unsigned int val = enable ? PDM_FILTER_EN : 0;
+
+ regmap_update_bits(map, PDM_HCIC_CTRL1, PDM_FILTER_EN, val);
+ regmap_update_bits(map, PDM_F1_CTRL, PDM_FILTER_EN, val);
+ regmap_update_bits(map, PDM_F2_CTRL, PDM_FILTER_EN, val);
+ regmap_update_bits(map, PDM_F3_CTRL, PDM_FILTER_EN, val);
+ regmap_update_bits(map, PDM_HPF_CTRL, PDM_FILTER_EN, val);
+}
+
+static int axg_pdm_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct axg_pdm *priv = snd_soc_dai_get_drvdata(dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ axg_pdm_enable(priv->map);
+ return 0;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ axg_pdm_disable(priv->map);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned int axg_pdm_get_os(struct axg_pdm *priv)
+{
+ const struct axg_pdm_filters *filters = priv->cfg->filters;
+ unsigned int os = filters->hcic.ds;
+ int i;
+
+ /*
+ * The global oversampling factor is defined by the down sampling
+ * factor applied by each filter (HCIC and LPFs)
+ */
+
+ for (i = 0; i < PDM_LPF_NUM; i++)
+ os *= filters->lpf[i].ds;
+
+ return os;
+}
+
+static int axg_pdm_set_sysclk(struct axg_pdm *priv, unsigned int os,
+ unsigned int rate)
+{
+ unsigned int sys_rate = os * 2 * rate * PDM_CHAN_CTRL_POINTER_MAX;
+
+ /*
+ * Set the default system clock rate unless it is too fast for
+ * for the requested sample rate. In this case, the sample pointer
+ * counter could overflow so set a lower system clock rate
+ */
+ if (sys_rate < priv->cfg->sys_rate)
+ return clk_set_rate(priv->sysclk, sys_rate);
+
+ return clk_set_rate(priv->sysclk, priv->cfg->sys_rate);
+}
+
+static int axg_pdm_set_sample_pointer(struct axg_pdm *priv)
+{
+ unsigned int spmax, sp, val;
+ int i;
+
+ /* Max sample counter value per half period of dclk */
+ spmax = DIV_ROUND_UP_ULL((u64)clk_get_rate(priv->sysclk),
+ clk_get_rate(priv->dclk) * 2);
+
+ /* Check if sysclk is not too fast - should not happen */
+ if (WARN_ON(spmax > PDM_CHAN_CTRL_POINTER_MAX))
+ return -EINVAL;
+
+ /* Capture the data when we are at 75% of the half period */
+ sp = spmax * 3 / 4;
+
+ for (i = 0, val = 0; i < PDM_CHAN_CTRL_NUM; i++)
+ val |= sp << (PDM_CHAN_CTRL_POINTER_WIDTH * i);
+
+ regmap_write(priv->map, PDM_CHAN_CTRL, val);
+ regmap_write(priv->map, PDM_CHAN_CTRL1, val);
+
+ return 0;
+}
+
+static void axg_pdm_set_channel_mask(struct axg_pdm *priv,
+ unsigned int channels)
+{
+ unsigned int mask = GENMASK(channels - 1, 0);
+
+ /* Put all channel in reset */
+ regmap_update_bits(priv->map, PDM_CTRL,
+ PDM_CTRL_CHAN_RSTN_MASK, 0);
+
+ /* Take the necessary channels out of reset and enable them */
+ regmap_update_bits(priv->map, PDM_CTRL,
+ PDM_CTRL_CHAN_RSTN_MASK |
+ PDM_CTRL_CHAN_EN_MASK,
+ PDM_CTRL_CHAN_RSTN(mask) |
+ PDM_CTRL_CHAN_EN(mask));
+}
+
+static int axg_pdm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct axg_pdm *priv = snd_soc_dai_get_drvdata(dai);
+ unsigned int os = axg_pdm_get_os(priv);
+ unsigned int rate = params_rate(params);
+ unsigned int val;
+ int ret;
+
+ switch (params_width(params)) {
+ case 24:
+ val = PDM_CTRL_OUT_MODE;
+ break;
+ case 32:
+ val = 0;
+ break;
+ default:
+ dev_err(dai->dev, "unsupported sample width\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(priv->map, PDM_CTRL, PDM_CTRL_OUT_MODE, val);
+
+ ret = axg_pdm_set_sysclk(priv, os, rate);
+ if (ret) {
+ dev_err(dai->dev, "failed to set system clock\n");
+ return ret;
+ }
+
+ ret = clk_set_rate(priv->dclk, rate * os);
+ if (ret) {
+ dev_err(dai->dev, "failed to set dclk\n");
+ return ret;
+ }
+
+ ret = axg_pdm_set_sample_pointer(priv);
+ if (ret) {
+ dev_err(dai->dev, "invalid clock setting\n");
+ return ret;
+ }
+
+ axg_pdm_set_channel_mask(priv, params_channels(params));
+
+ return 0;
+}
+
+static int axg_pdm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_pdm *priv = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = clk_prepare_enable(priv->dclk);
+ if (ret) {
+ dev_err(dai->dev, "enabling dclk failed\n");
+ return ret;
+ }
+
+ /* Enable the filters */
+ axg_pdm_filters_enable(priv->map, true);
+
+ return ret;
+}
+
+static void axg_pdm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_pdm *priv = snd_soc_dai_get_drvdata(dai);
+
+ axg_pdm_filters_enable(priv->map, false);
+ clk_disable_unprepare(priv->dclk);
+}
+
+static const struct snd_soc_dai_ops axg_pdm_dai_ops = {
+ .trigger = axg_pdm_trigger,
+ .hw_params = axg_pdm_hw_params,
+ .startup = axg_pdm_startup,
+ .shutdown = axg_pdm_shutdown,
+};
+
+static void axg_pdm_set_hcic_ctrl(struct axg_pdm *priv)
+{
+ const struct axg_pdm_hcic *hcic = &priv->cfg->filters->hcic;
+ unsigned int val;
+
+ val = PDM_HCIC_CTRL1_STAGE_NUM(hcic->steps);
+ val |= PDM_HCIC_CTRL1_DSR(hcic->ds);
+ val |= PDM_HCIC_CTRL1_GAIN_MULT(hcic->mult);
+ val |= PDM_HCIC_CTRL1_GAIN_SFT(hcic->shift);
+
+ regmap_update_bits(priv->map, PDM_HCIC_CTRL1,
+ PDM_HCIC_CTRL1_STAGE_NUM_MASK |
+ PDM_HCIC_CTRL1_DSR_MASK |
+ PDM_HCIC_CTRL1_GAIN_MULT_MASK |
+ PDM_HCIC_CTRL1_GAIN_SFT_MASK,
+ val);
+}
+
+static void axg_pdm_set_lpf_ctrl(struct axg_pdm *priv, unsigned int index)
+{
+ const struct axg_pdm_lpf *lpf = &priv->cfg->filters->lpf[index];
+ unsigned int offset = index * regmap_get_reg_stride(priv->map)
+ + PDM_F1_CTRL;
+ unsigned int val;
+
+ val = PDM_LPF_STAGE_NUM(lpf->tap_num);
+ val |= PDM_LPF_DSR(lpf->ds);
+ val |= PDM_LPF_ROUND_MODE(lpf->round_mode);
+
+ regmap_update_bits(priv->map, offset,
+ PDM_LPF_STAGE_NUM_MASK |
+ PDM_LPF_DSR_MASK |
+ PDM_LPF_ROUND_MODE_MASK,
+ val);
+}
+
+static void axg_pdm_set_hpf_ctrl(struct axg_pdm *priv)
+{
+ const struct axg_pdm_hpf *hpf = &priv->cfg->filters->hpf;
+ unsigned int val;
+
+ val = PDM_HPF_OUT_FACTOR(hpf->out_factor);
+ val |= PDM_HPF_SFT_STEPS(hpf->steps);
+
+ regmap_update_bits(priv->map, PDM_HPF_CTRL,
+ PDM_HPF_OUT_FACTOR_MASK |
+ PDM_HPF_SFT_STEPS_MASK,
+ val);
+}
+
+static int axg_pdm_set_lpf_filters(struct axg_pdm *priv)
+{
+ const struct axg_pdm_lpf *lpf = priv->cfg->filters->lpf;
+ unsigned int count = 0;
+ int i, j;
+
+ for (i = 0; i < PDM_LPF_NUM; i++)
+ count += lpf[i].tap_num;
+
+ /* Make sure the coeffs fit in the memory */
+ if (count >= PDM_LPF_MAX_STAGE)
+ return -EINVAL;
+
+ /* Set the initial APB bus register address */
+ regmap_write(priv->map, PDM_COEFF_ADDR, 0);
+
+ /* Set the tap filter values of all 3 filters */
+ for (i = 0; i < PDM_LPF_NUM; i++) {
+ axg_pdm_set_lpf_ctrl(priv, i);
+
+ for (j = 0; j < lpf[i].tap_num; j++)
+ regmap_write(priv->map, PDM_COEFF_DATA, lpf[i].tap[j]);
+ }
+
+ return 0;
+}
+
+static int axg_pdm_dai_probe(struct snd_soc_dai *dai)
+{
+ struct axg_pdm *priv = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret) {
+ dev_err(dai->dev, "enabling pclk failed\n");
+ return ret;
+ }
+
+ /*
+ * sysclk must be set and enabled as well to access the pdm registers
+ * Accessing the register w/o it will give a bus error.
+ */
+ ret = clk_set_rate(priv->sysclk, priv->cfg->sys_rate);
+ if (ret) {
+ dev_err(dai->dev, "setting sysclk failed\n");
+ goto err_pclk;
+ }
+
+ ret = clk_prepare_enable(priv->sysclk);
+ if (ret) {
+ dev_err(dai->dev, "enabling sysclk failed\n");
+ goto err_pclk;
+ }
+
+ /* Make sure the device is initially disabled */
+ axg_pdm_disable(priv->map);
+
+ /* Make sure filter bypass is disabled */
+ regmap_update_bits(priv->map, PDM_CTRL, PDM_CTRL_BYPASS_MODE, 0);
+
+ /* Load filter settings */
+ axg_pdm_set_hcic_ctrl(priv);
+ axg_pdm_set_hpf_ctrl(priv);
+
+ ret = axg_pdm_set_lpf_filters(priv);
+ if (ret) {
+ dev_err(dai->dev, "invalid filter configuration\n");
+ goto err_sysclk;
+ }
+
+ return 0;
+
+err_sysclk:
+ clk_disable_unprepare(priv->sysclk);
+err_pclk:
+ clk_disable_unprepare(priv->pclk);
+ return ret;
+}
+
+static int axg_pdm_dai_remove(struct snd_soc_dai *dai)
+{
+ struct axg_pdm *priv = snd_soc_dai_get_drvdata(dai);
+
+ clk_disable_unprepare(priv->sysclk);
+ clk_disable_unprepare(priv->pclk);
+
+ return 0;
+}
+
+static struct snd_soc_dai_driver axg_pdm_dai_drv = {
+ .name = "PDM",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 5512,
+ .rate_max = 48000,
+ .formats = (SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ },
+ .ops = &axg_pdm_dai_ops,
+ .probe = axg_pdm_dai_probe,
+ .remove = axg_pdm_dai_remove,
+};
+
+static const struct snd_soc_component_driver axg_pdm_component_drv = {};
+
+static const struct regmap_config axg_pdm_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = PDM_STS,
+};
+
+static const unsigned int lpf1_default_tap[] = {
+ 0x000014, 0xffffb2, 0xfffed9, 0xfffdce, 0xfffd45,
+ 0xfffe32, 0x000147, 0x000645, 0x000b86, 0x000e21,
+ 0x000ae3, 0x000000, 0xffeece, 0xffdca8, 0xffd212,
+ 0xffd7d1, 0xfff2a7, 0x001f4c, 0x0050c2, 0x0072aa,
+ 0x006ff1, 0x003c32, 0xffdc4e, 0xff6a18, 0xff0fef,
+ 0xfefbaf, 0xff4c40, 0x000000, 0x00ebc8, 0x01c077,
+ 0x02209e, 0x01c1a4, 0x008e60, 0xfebe52, 0xfcd690,
+ 0xfb8fa5, 0xfba498, 0xfd9812, 0x0181ce, 0x06f5f3,
+ 0x0d112f, 0x12a958, 0x169686, 0x18000e, 0x169686,
+ 0x12a958, 0x0d112f, 0x06f5f3, 0x0181ce, 0xfd9812,
+ 0xfba498, 0xfb8fa5, 0xfcd690, 0xfebe52, 0x008e60,
+ 0x01c1a4, 0x02209e, 0x01c077, 0x00ebc8, 0x000000,
+ 0xff4c40, 0xfefbaf, 0xff0fef, 0xff6a18, 0xffdc4e,
+ 0x003c32, 0x006ff1, 0x0072aa, 0x0050c2, 0x001f4c,
+ 0xfff2a7, 0xffd7d1, 0xffd212, 0xffdca8, 0xffeece,
+ 0x000000, 0x000ae3, 0x000e21, 0x000b86, 0x000645,
+ 0x000147, 0xfffe32, 0xfffd45, 0xfffdce, 0xfffed9,
+ 0xffffb2, 0x000014,
+};
+
+static const unsigned int lpf2_default_tap[] = {
+ 0x00050a, 0xfff004, 0x0002c1, 0x003c12, 0xffa818,
+ 0xffc87d, 0x010aef, 0xff5223, 0xfebd93, 0x028f41,
+ 0xff5c0e, 0xfc63f8, 0x055f81, 0x000000, 0xf478a0,
+ 0x11c5e3, 0x2ea74d, 0x11c5e3, 0xf478a0, 0x000000,
+ 0x055f81, 0xfc63f8, 0xff5c0e, 0x028f41, 0xfebd93,
+ 0xff5223, 0x010aef, 0xffc87d, 0xffa818, 0x003c12,
+ 0x0002c1, 0xfff004, 0x00050a,
+};
+
+static const unsigned int lpf3_default_tap[] = {
+ 0x000000, 0x000081, 0x000000, 0xfffedb, 0x000000,
+ 0x00022d, 0x000000, 0xfffc46, 0x000000, 0x0005f7,
+ 0x000000, 0xfff6eb, 0x000000, 0x000d4e, 0x000000,
+ 0xffed1e, 0x000000, 0x001a1c, 0x000000, 0xffdcb0,
+ 0x000000, 0x002ede, 0x000000, 0xffc2d1, 0x000000,
+ 0x004ebe, 0x000000, 0xff9beb, 0x000000, 0x007dd7,
+ 0x000000, 0xff633a, 0x000000, 0x00c1d2, 0x000000,
+ 0xff11d5, 0x000000, 0x012368, 0x000000, 0xfe9c45,
+ 0x000000, 0x01b252, 0x000000, 0xfdebf6, 0x000000,
+ 0x0290b8, 0x000000, 0xfcca0d, 0x000000, 0x041d7c,
+ 0x000000, 0xfa8152, 0x000000, 0x07e9c6, 0x000000,
+ 0xf28fb5, 0x000000, 0x28b216, 0x3fffde, 0x28b216,
+ 0x000000, 0xf28fb5, 0x000000, 0x07e9c6, 0x000000,
+ 0xfa8152, 0x000000, 0x041d7c, 0x000000, 0xfcca0d,
+ 0x000000, 0x0290b8, 0x000000, 0xfdebf6, 0x000000,
+ 0x01b252, 0x000000, 0xfe9c45, 0x000000, 0x012368,
+ 0x000000, 0xff11d5, 0x000000, 0x00c1d2, 0x000000,
+ 0xff633a, 0x000000, 0x007dd7, 0x000000, 0xff9beb,
+ 0x000000, 0x004ebe, 0x000000, 0xffc2d1, 0x000000,
+ 0x002ede, 0x000000, 0xffdcb0, 0x000000, 0x001a1c,
+ 0x000000, 0xffed1e, 0x000000, 0x000d4e, 0x000000,
+ 0xfff6eb, 0x000000, 0x0005f7, 0x000000, 0xfffc46,
+ 0x000000, 0x00022d, 0x000000, 0xfffedb, 0x000000,
+ 0x000081, 0x000000,
+};
+
+/*
+ * These values are sane defaults for the axg platform:
+ * - OS = 64
+ * - Latency = 38700 (?)
+ *
+ * TODO: There is a lot of different HCIC, LPFs and HPF configurations possible.
+ * the configuration may depend on the dmic used by the platform, the
+ * expected tradeoff between latency and quality, etc ... If/When other
+ * settings are required, we should add a fw interface to this driver to
+ * load new filter settings.
+ */
+static const struct axg_pdm_filters axg_default_filters = {
+ .hcic = {
+ .shift = 0x15,
+ .mult = 0x80,
+ .steps = 7,
+ .ds = 8,
+ },
+ .hpf = {
+ .out_factor = 0x8000,
+ .steps = 13,
+ },
+ .lpf = {
+ [0] = {
+ .ds = 2,
+ .round_mode = 1,
+ .tap = lpf1_default_tap,
+ .tap_num = ARRAY_SIZE(lpf1_default_tap),
+ },
+ [1] = {
+ .ds = 2,
+ .round_mode = 0,
+ .tap = lpf2_default_tap,
+ .tap_num = ARRAY_SIZE(lpf2_default_tap),
+ },
+ [2] = {
+ .ds = 2,
+ .round_mode = 1,
+ .tap = lpf3_default_tap,
+ .tap_num = ARRAY_SIZE(lpf3_default_tap)
+ },
+ },
+};
+
+static const struct axg_pdm_cfg axg_pdm_config = {
+ .filters = &axg_default_filters,
+ .sys_rate = 250000000,
+};
+
+static const struct of_device_id axg_pdm_of_match[] = {
+ {
+ .compatible = "amlogic,axg-pdm",
+ .data = &axg_pdm_config,
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, axg_pdm_of_match);
+
+static int axg_pdm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct axg_pdm *priv;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ priv->cfg = of_device_get_match_data(dev);
+ if (!priv->cfg) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ priv->map = devm_regmap_init_mmio(dev, regs, &axg_pdm_regmap_cfg);
+ if (IS_ERR(priv->map)) {
+ dev_err(dev, "failed to init regmap: %ld\n",
+ PTR_ERR(priv->map));
+ return PTR_ERR(priv->map);
+ }
+
+ priv->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ ret = PTR_ERR(priv->pclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get pclk: %d\n", ret);
+ return ret;
+ }
+
+ priv->dclk = devm_clk_get(dev, "dclk");
+ if (IS_ERR(priv->dclk)) {
+ ret = PTR_ERR(priv->dclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get dclk: %d\n", ret);
+ return ret;
+ }
+
+ priv->sysclk = devm_clk_get(dev, "sysclk");
+ if (IS_ERR(priv->sysclk)) {
+ ret = PTR_ERR(priv->sysclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get dclk: %d\n", ret);
+ return ret;
+ }
+
+ return devm_snd_soc_register_component(dev, &axg_pdm_component_drv,
+ &axg_pdm_dai_drv, 1);
+}
+
+static struct platform_driver axg_pdm_pdrv = {
+ .probe = axg_pdm_probe,
+ .driver = {
+ .name = "axg-pdm",
+ .of_match_table = axg_pdm_of_match,
+ },
+};
+module_platform_driver(axg_pdm_pdrv);
+
+MODULE_DESCRIPTION("Amlogic AXG PDM Input driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
index 7b8baf46d968..585ce030b79b 100644
--- a/sound/soc/meson/axg-tdm-interface.c
+++ b/sound/soc/meson/axg-tdm-interface.c
@@ -42,6 +42,7 @@ int axg_tdm_set_tdm_slots(struct snd_soc_dai *dai, u32 *tx_mask,
struct axg_tdm_stream *rx = (struct axg_tdm_stream *)
dai->capture_dma_data;
unsigned int tx_slots, rx_slots;
+ unsigned int fmt = 0;
tx_slots = axg_tdm_slots_total(tx_mask);
rx_slots = axg_tdm_slots_total(rx_mask);
@@ -52,38 +53,45 @@ int axg_tdm_set_tdm_slots(struct snd_soc_dai *dai, u32 *tx_mask,
return -EINVAL;
}
- /*
- * Amend the dai driver channel number and let dpcm channel merge do
- * its job
- */
- if (tx) {
- tx->mask = tx_mask;
- dai->driver->playback.channels_max = tx_slots;
- }
-
- if (rx) {
- rx->mask = rx_mask;
- dai->driver->capture.channels_max = rx_slots;
- }
-
iface->slots = slots;
switch (slot_width) {
case 0:
- /* defaults width to 32 if not provided */
- iface->slot_width = 32;
- break;
- case 8:
- case 16:
- case 24:
+ slot_width = 32;
+ /* Fall-through */
case 32:
- iface->slot_width = slot_width;
+ fmt |= SNDRV_PCM_FMTBIT_S32_LE;
+ /* Fall-through */
+ case 24:
+ fmt |= SNDRV_PCM_FMTBIT_S24_LE;
+ fmt |= SNDRV_PCM_FMTBIT_S20_LE;
+ /* Fall-through */
+ case 16:
+ fmt |= SNDRV_PCM_FMTBIT_S16_LE;
+ /* Fall-through */
+ case 8:
+ fmt |= SNDRV_PCM_FMTBIT_S8;
break;
default:
dev_err(dai->dev, "unsupported slot width: %d\n", slot_width);
return -EINVAL;
}
+ iface->slot_width = slot_width;
+
+ /* Amend the dai driver and let dpcm merge do its job */
+ if (tx) {
+ tx->mask = tx_mask;
+ dai->driver->playback.channels_max = tx_slots;
+ dai->driver->playback.formats = fmt;
+ }
+
+ if (rx) {
+ rx->mask = rx_mask;
+ dai->driver->capture.channels_max = rx_slots;
+ dai->driver->capture.formats = fmt;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(axg_tdm_set_tdm_slots);
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index 81b09d740ed9..6384bb6dacfd 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -356,7 +356,7 @@ static int nuc900_ac97_drvprobe(struct platform_device *pdev)
if (ret)
goto out;
- ret = snd_soc_register_component(&pdev->dev, &nuc900_ac97_component,
+ ret = devm_snd_soc_register_component(&pdev->dev, &nuc900_ac97_component,
&nuc900_ac97_dai, 1);
if (ret)
goto out;
@@ -373,8 +373,6 @@ out:
static int nuc900_ac97_drvremove(struct platform_device *pdev)
{
- snd_soc_unregister_component(&pdev->dev);
-
nuc900_ac97_data = NULL;
snd_soc_set_ac97_ops(NULL);
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index 8a99a8837dc9..673a9eb153b2 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -348,7 +348,7 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
default:
return -EINVAL;
}
- ret = snd_soc_register_component(ad->dssdev, &omap_hdmi_component,
+ ret = devm_snd_soc_register_component(ad->dssdev, &omap_hdmi_component,
dai_drv, 1);
if (ret)
return ret;
@@ -383,7 +383,6 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
ret = snd_soc_register_card(card);
if (ret) {
dev_err(dev, "snd_soc_register_card failed (%d)\n", ret);
- snd_soc_unregister_component(ad->dssdev);
return ret;
}
@@ -400,7 +399,6 @@ static int omap_hdmi_audio_remove(struct platform_device *pdev)
struct hdmi_audio_data *ad = platform_get_drvdata(pdev);
snd_soc_unregister_card(ad->card);
- snd_soc_unregister_component(ad->dssdev);
return 0;
}
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 776e148b0aa2..943b44de1464 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -19,14 +19,13 @@ config SND_MMP_SOC
config SND_PXA2XX_AC97
tristate
- select SND_AC97_CODEC
config SND_PXA2XX_SOC_AC97
tristate
- select AC97_BUS
+ select AC97_BUS_NEW
select SND_PXA2XX_LIB
select SND_PXA2XX_LIB_AC97
- select SND_SOC_AC97_BUS
+ select SND_SOC_AC97_BUS_NEW
config SND_PXA2XX_SOC_I2S
select SND_PXA2XX_LIB
@@ -80,6 +79,7 @@ config SND_PXA2XX_SOC_TOSA
tristate "SoC AC97 Audio support for Tosa"
depends on SND_PXA2XX_SOC && MACH_TOSA
depends on MFD_TC6393XB
+ depends on !AC97_BUS
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9712
help
@@ -89,6 +89,7 @@ config SND_PXA2XX_SOC_TOSA
config SND_PXA2XX_SOC_E740
tristate "SoC AC97 Audio support for e740"
depends on SND_PXA2XX_SOC && MACH_E740
+ depends on !AC97_BUS
select SND_SOC_WM9705
select SND_PXA2XX_SOC_AC97
help
@@ -98,6 +99,7 @@ config SND_PXA2XX_SOC_E740
config SND_PXA2XX_SOC_E750
tristate "SoC AC97 Audio support for e750"
depends on SND_PXA2XX_SOC && MACH_E750
+ depends on !AC97_BUS
select SND_SOC_WM9705
select SND_PXA2XX_SOC_AC97
help
@@ -107,6 +109,7 @@ config SND_PXA2XX_SOC_E750
config SND_PXA2XX_SOC_E800
tristate "SoC AC97 Audio support for e800"
depends on SND_PXA2XX_SOC && MACH_E800
+ depends on !AC97_BUS
select SND_SOC_WM9712
select SND_PXA2XX_SOC_AC97
help
@@ -117,6 +120,7 @@ config SND_PXA2XX_SOC_EM_X270
tristate "SoC Audio support for CompuLab EM-x270, eXeda and CM-X300"
depends on SND_PXA2XX_SOC && (MACH_EM_X270 || MACH_EXEDA || \
MACH_CM_X300)
+ depends on !AC97_BUS
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9712
help
@@ -127,6 +131,7 @@ config SND_PXA2XX_SOC_PALM27X
bool "SoC Audio support for Palm T|X, T5, E2 and LifeDrive"
depends on SND_PXA2XX_SOC && (MACH_PALMLD || MACH_PALMTX || \
MACH_PALMT5 || MACH_PALMTE2)
+ depends on !AC97_BUS
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9712
help
@@ -156,6 +161,7 @@ config SND_SOC_TTC_DKB
config SND_SOC_ZYLONITE
tristate "SoC Audio support for Marvell Zylonite"
depends on SND_PXA2XX_SOC && MACH_ZYLONITE
+ depends on !AC97_BUS
select SND_PXA2XX_SOC_AC97
select SND_PXA_SOC_SSP
select SND_SOC_WM9713
@@ -195,6 +201,7 @@ config SND_PXA2XX_SOC_MAGICIAN
config SND_PXA2XX_SOC_MIOA701
tristate "SoC Audio support for MIO A701"
depends on SND_PXA2XX_SOC && MACH_MIOA701
+ depends on !AC97_BUS
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9713
help
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 69033e1a84e6..adcf8ba9d287 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -103,6 +103,9 @@ static int pxa_ssp_startup(struct snd_pcm_substream *substream,
pxa_ssp_disable(ssp);
}
+ if (priv->extclk)
+ clk_prepare_enable(priv->extclk);
+
dma = kzalloc(sizeof(struct snd_dmaengine_dai_dma_data), GFP_KERNEL);
if (!dma)
return -ENOMEM;
@@ -125,6 +128,9 @@ static void pxa_ssp_shutdown(struct snd_pcm_substream *substream,
clk_disable_unprepare(ssp->clk);
}
+ if (priv->extclk)
+ clk_disable_unprepare(priv->extclk);
+
kfree(snd_soc_dai_get_dma_data(cpu_dai, substream));
snd_soc_dai_set_dma_data(cpu_dai, substream, NULL);
}
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 9f779657bc86..f8a3aa6c6d4e 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -17,6 +17,7 @@
#include <linux/dmaengine.h>
#include <linux/dma/pxa-dma.h>
+#include <sound/ac97/controller.h>
#include <sound/core.h>
#include <sound/ac97_codec.h>
#include <sound/soc.h>
@@ -27,43 +28,35 @@
#include <mach/regs-ac97.h>
#include <mach/audio.h>
-static void pxa2xx_ac97_warm_reset(struct snd_ac97 *ac97)
+static void pxa2xx_ac97_warm_reset(struct ac97_controller *adrv)
{
pxa2xx_ac97_try_warm_reset();
pxa2xx_ac97_finish_reset();
}
-static void pxa2xx_ac97_cold_reset(struct snd_ac97 *ac97)
+static void pxa2xx_ac97_cold_reset(struct ac97_controller *adrv)
{
pxa2xx_ac97_try_cold_reset();
pxa2xx_ac97_finish_reset();
}
-static unsigned short pxa2xx_ac97_legacy_read(struct snd_ac97 *ac97,
- unsigned short reg)
+static int pxa2xx_ac97_read_actrl(struct ac97_controller *adrv, int slot,
+ unsigned short reg)
{
- int ret;
-
- ret = pxa2xx_ac97_read(ac97->num, reg);
- if (ret < 0)
- return 0;
- else
- return (unsigned short)(ret & 0xffff);
+ return pxa2xx_ac97_read(slot, reg);
}
-static void pxa2xx_ac97_legacy_write(struct snd_ac97 *ac97,
- unsigned short reg, unsigned short val)
+static int pxa2xx_ac97_write_actrl(struct ac97_controller *adrv, int slot,
+ unsigned short reg, unsigned short val)
{
- int ret;
-
- ret = pxa2xx_ac97_write(ac97->num, reg, val);
+ return pxa2xx_ac97_write(slot, reg, val);
}
-static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
- .read = pxa2xx_ac97_legacy_read,
- .write = pxa2xx_ac97_legacy_write,
+static struct ac97_controller_ops pxa2xx_ac97_ops = {
+ .read = pxa2xx_ac97_read_actrl,
+ .write = pxa2xx_ac97_write_actrl,
.warm_reset = pxa2xx_ac97_warm_reset,
.reset = pxa2xx_ac97_cold_reset,
};
@@ -233,6 +226,9 @@ MODULE_DEVICE_TABLE(of, pxa2xx_ac97_dt_ids);
static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
{
int ret;
+ struct ac97_controller *ctrl;
+ pxa2xx_audio_ops_t *pdata = pdev->dev.platform_data;
+ void **codecs_pdata;
if (pdev->id != -1) {
dev_err(&pdev->dev, "PXA2xx has only one AC97 port.\n");
@@ -245,10 +241,14 @@ static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
return ret;
}
- ret = snd_soc_set_ac97_ops(&pxa2xx_ac97_ops);
- if (ret != 0)
- return ret;
+ codecs_pdata = pdata ? pdata->codec_pdata : NULL;
+ ctrl = snd_ac97_controller_register(&pxa2xx_ac97_ops, &pdev->dev,
+ AC97_SLOTS_AVAILABLE_ALL,
+ codecs_pdata);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+ platform_set_drvdata(pdev, ctrl);
/* Punt most of the init to the SoC probe; we may need the machine
* driver to do interesting things with the clocking to get us up
* and running.
@@ -259,8 +259,10 @@ static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
static int pxa2xx_ac97_dev_remove(struct platform_device *pdev)
{
+ struct ac97_controller *ctrl = platform_get_drvdata(pdev);
+
snd_soc_unregister_component(&pdev->dev);
- snd_soc_set_ac97_ops(NULL);
+ snd_ac97_controller_unregister(ctrl);
pxa2xx_ac97_hw_remove(pdev);
return 0;
}
diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c
index 1543e85629f8..fb45f396ab4a 100644
--- a/sound/soc/qcom/apq8096.c
+++ b/sound/soc/qcom/apq8096.c
@@ -25,13 +25,12 @@ static int apq8096_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
static void apq8096_add_be_ops(struct snd_soc_card *card)
{
- struct snd_soc_dai_link *link = card->dai_link;
- int i, num_links = card->num_links;
+ struct snd_soc_dai_link *link;
+ int i;
- for (i = 0; i < num_links; i++) {
+ for_each_card_prelinks(card, i, link) {
if (link->no_pcm == 1)
link->be_hw_params_fixup = apq8096_be_hw_params_fixup;
- link++;
}
}
diff --git a/sound/soc/qcom/qdsp6/q6adm.c b/sound/soc/qcom/qdsp6/q6adm.c
index 932c3ebfd252..da242515e146 100644
--- a/sound/soc/qcom/qdsp6/q6adm.c
+++ b/sound/soc/qcom/qdsp6/q6adm.c
@@ -2,25 +2,24 @@
// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
// Copyright (c) 2018, Linaro Limited
-#include <linux/slab.h>
-#include <linux/wait.h>
-#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/sched.h>
#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/kref.h>
-#include <linux/wait.h>
-#include <linux/soc/qcom/apr.h>
#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/wait.h>
#include <sound/asound.h>
#include "q6adm.h"
#include "q6afe.h"
#include "q6core.h"
-#include "q6dsp-errno.h"
#include "q6dsp-common.h"
+#include "q6dsp-errno.h"
#define ADM_CMD_DEVICE_OPEN_V5 0x00010326
#define ADM_CMDRSP_DEVICE_OPEN_V5 0x00010329
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 9db9a2944ef2..a16c71c03058 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -8,7 +8,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/soc.h>
-#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/pcm.h>
#include <asm/dma.h>
@@ -319,10 +318,11 @@ static int q6asm_dai_open(struct snd_pcm_substream *substream)
prtd->audio_client = q6asm_audio_client_alloc(dev,
(q6asm_cb)event_handler, prtd, stream_id,
LEGACY_PCM_MODE);
- if (!prtd->audio_client) {
+ if (IS_ERR(prtd->audio_client)) {
pr_info("%s: Could not allocate memory\n", __func__);
+ ret = PTR_ERR(prtd->audio_client);
kfree(prtd);
- return -ENOMEM;
+ return ret;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -493,7 +493,7 @@ static int q6asm_dai_pcm_new(struct snd_soc_pcm_runtime *rtd)
}
}
- return ret;
+ return 0;
}
static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
index 2b2c7233bb5f..e1cfa846a1dc 100644
--- a/sound/soc/qcom/qdsp6/q6asm.c
+++ b/sound/soc/qcom/qdsp6/q6asm.c
@@ -11,7 +11,6 @@
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <uapi/sound/asound.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/sound/soc/qcom/qdsp6/q6core.c b/sound/soc/qcom/qdsp6/q6core.c
index 06f03a5fe9bd..cdfc8ab6cfc0 100644
--- a/sound/soc/qcom/qdsp6/q6core.c
+++ b/sound/soc/qcom/qdsp6/q6core.c
@@ -10,7 +10,6 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/jiffies.h>
-#include <linux/wait.h>
#include <linux/soc/qcom/apr.h>
#include "q6core.h"
#include "q6dsp-errno.h"
@@ -105,12 +104,10 @@ static int q6core_callback(struct apr_device *adev, struct apr_resp_pkt *data)
bytes = sizeof(*fwk) + fwk->num_services *
sizeof(fwk->svc_api_info[0]);
- core->fwk_version = kzalloc(bytes, GFP_ATOMIC);
+ core->fwk_version = kmemdup(data->payload, bytes, GFP_ATOMIC);
if (!core->fwk_version)
return -ENOMEM;
- memcpy(core->fwk_version, data->payload, bytes);
-
core->fwk_version_supported = true;
core->resp_received = true;
@@ -124,12 +121,10 @@ static int q6core_callback(struct apr_device *adev, struct apr_resp_pkt *data)
len = sizeof(*v) + v->num_services * sizeof(v->svc_api_info[0]);
- core->svc_version = kzalloc(len, GFP_ATOMIC);
+ core->svc_version = kmemdup(data->payload, len, GFP_ATOMIC);
if (!core->svc_version)
return -ENOMEM;
- memcpy(core->svc_version, data->payload, len);
-
core->get_version_supported = true;
core->resp_received = true;
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index dc94c5c53788..c6b51571be94 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -960,8 +960,10 @@ static int msm_routing_probe(struct snd_soc_component *c)
{
int i;
- for (i = 0; i < MAX_SESSIONS; i++)
+ for (i = 0; i < MAX_SESSIONS; i++) {
routing_data->sessions[i].port_id = -1;
+ routing_data->sessions[i].fedai_id = -1;
+ }
return 0;
}
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index 2a781d87ee65..9effbecc571f 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -195,15 +195,14 @@ static int sdm845_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
static void sdm845_add_be_ops(struct snd_soc_card *card)
{
- struct snd_soc_dai_link *link = card->dai_link;
- int i, num_links = card->num_links;
+ struct snd_soc_dai_link *link;
+ int i;
- for (i = 0; i < num_links; i++) {
+ for_each_card_prelinks(card, i, link) {
if (link->no_pcm == 1) {
link->ops = &sdm845_be_ops;
link->be_hw_params_fixup = sdm845_be_hw_params_fixup;
}
- link++;
}
}
diff --git a/sound/soc/rockchip/rk3288_hdmi_analog.c b/sound/soc/rockchip/rk3288_hdmi_analog.c
index 929b3fe289b0..a472d5eb2950 100644
--- a/sound/soc/rockchip/rk3288_hdmi_analog.c
+++ b/sound/soc/rockchip/rk3288_hdmi_analog.c
@@ -286,7 +286,6 @@ static struct platform_driver rockchip_sound_driver = {
.probe = snd_rk_mc_probe,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = rockchip_sound_of_match,
},
diff --git a/sound/soc/rockchip/rockchip_pcm.c b/sound/soc/rockchip/rockchip_pcm.c
index f77538319221..9e7b5fa4cf59 100644
--- a/sound/soc/rockchip/rockchip_pcm.c
+++ b/sound/soc/rockchip/rockchip_pcm.c
@@ -21,7 +21,8 @@ static const struct snd_pcm_hardware snd_rockchip_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_RESUME,
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_INTERLEAVED,
.period_bytes_min = 32,
.period_bytes_max = 8192,
.periods_min = 1,
diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
index 43332c32d7e9..dc93941e01c3 100644
--- a/sound/soc/samsung/tm2_wm5110.c
+++ b/sound/soc/samsung/tm2_wm5110.c
@@ -491,6 +491,7 @@ static int tm2_probe(struct platform_device *pdev)
struct snd_soc_card *card = &tm2_card;
struct tm2_machine_priv *priv;
struct of_phandle_args args;
+ struct snd_soc_dai_link *dai_link;
int num_codecs, ret, i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -558,18 +559,18 @@ static int tm2_probe(struct platform_device *pdev)
}
/* Initialize WM5110 - I2S and HDMI - I2S1 DAI links */
- for (i = 0; i < card->num_links; i++) {
+ for_each_card_prelinks(card, i, dai_link) {
unsigned int dai_index = 0; /* WM5110 */
- card->dai_link[i].cpu_name = NULL;
- card->dai_link[i].platform_name = NULL;
+ dai_link->cpu_name = NULL;
+ dai_link->platform_name = NULL;
if (num_codecs > 1 && i == card->num_links - 1)
dai_index = 1; /* HDMI */
- card->dai_link[i].codec_of_node = codec_dai_node[dai_index];
- card->dai_link[i].cpu_of_node = cpu_dai_node[dai_index];
- card->dai_link[i].platform_of_node = cpu_dai_node[dai_index];
+ dai_link->codec_of_node = codec_dai_node[dai_index];
+ dai_link->cpu_of_node = cpu_dai_node[dai_index];
+ dai_link->platform_of_node = cpu_dai_node[dai_index];
}
if (num_codecs > 1) {
diff --git a/sound/soc/sh/hac.c b/sound/soc/sh/hac.c
index c2b496398e6b..17622ceb98c0 100644
--- a/sound/soc/sh/hac.c
+++ b/sound/soc/sh/hac.c
@@ -319,13 +319,12 @@ static int hac_soc_platform_probe(struct platform_device *pdev)
if (ret != 0)
return ret;
- return snd_soc_register_component(&pdev->dev, &sh4_hac_component,
+ return devm_snd_soc_register_component(&pdev->dev, &sh4_hac_component,
sh4_hac_dai, ARRAY_SIZE(sh4_hac_dai));
}
static int hac_soc_platform_remove(struct platform_device *pdev)
{
- snd_soc_unregister_component(&pdev->dev);
snd_soc_set_ac97_ops(NULL);
return 0;
}
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 3a3064dda57f..28327dd2c6cb 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -462,6 +462,11 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
goto rsnd_adg_get_clkout_end;
req_size = prop->length / sizeof(u32);
+ if (req_size > REQ_SIZE) {
+ dev_err(dev,
+ "too many clock-frequency, use top %d\n", REQ_SIZE);
+ req_size = REQ_SIZE;
+ }
of_property_read_u32_array(np, "clock-frequency", req_rate, req_size);
req_48kHz_rate = 0;
@@ -577,7 +582,7 @@ static void rsnd_adg_clk_dbg_info(struct rsnd_priv *priv, struct rsnd_adg *adg)
int i;
for_each_rsnd_clk(clk, adg, i)
- dev_dbg(dev, "%s : %p : %ld\n",
+ dev_dbg(dev, "%s : %pa : %ld\n",
clk_name[i], clk, clk_get_rate(clk));
dev_dbg(dev, "BRGCKR = 0x%08x, BRRA/BRRB = 0x%x/0x%x\n",
@@ -590,7 +595,7 @@ static void rsnd_adg_clk_dbg_info(struct rsnd_priv *priv, struct rsnd_adg *adg)
* by BRGCKR::BRGCKR_31
*/
for_each_rsnd_clkout(clk, adg, i)
- dev_dbg(dev, "clkout %d : %p : %ld\n", i,
+ dev_dbg(dev, "clkout %d : %pa : %ld\n", i,
clk, clk_get_rate(clk));
}
#else
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f8425d8b44d2..f930f51b686f 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -102,7 +102,9 @@
#include "rsnd.h"
#define RSND_RATES SNDRV_PCM_RATE_8000_192000
-#define RSND_FMTS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
+#define RSND_FMTS (SNDRV_PCM_FMTBIT_S8 |\
+ SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE)
static const struct of_device_id rsnd_of_match[] = {
{ .compatible = "renesas,rcar_sound-gen1", .data = (void *)RSND_GEN1 },
@@ -280,6 +282,8 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
struct device *dev = rsnd_priv_to_dev(priv);
switch (snd_pcm_format_width(runtime->format)) {
+ case 8:
+ return 16 << 16;
case 16:
return 8 << 16;
case 24:
@@ -331,7 +335,7 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
target = cmd ? cmd : ssiu;
}
- /* Non target mod or 24bit data needs normal DALIGN */
+ /* Non target mod or non 16bit needs normal DALIGN */
if ((snd_pcm_format_width(runtime->format) != 16) ||
(mod != target))
return 0x76543210;
@@ -367,7 +371,7 @@ u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
* HW 24bit data is located as 0x******00
*
*/
- if (snd_pcm_format_width(runtime->format) == 16)
+ if (snd_pcm_format_width(runtime->format) != 24)
return 0;
for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
@@ -478,7 +482,7 @@ static int rsnd_status_update(u32 *status,
(func_call && (mod)->ops->fn) ? #fn : ""); \
if (func_call && (mod)->ops->fn) \
tmp = (mod)->ops->fn(mod, io, param); \
- if (tmp) \
+ if (tmp && (tmp != -EPROBE_DEFER)) \
dev_err(dev, "%s[%d] : %s error %d\n", \
rsnd_mod_name(mod), rsnd_mod_id(mod), \
#fn, tmp); \
@@ -540,6 +544,14 @@ int rsnd_rdai_ssi_lane_ctrl(struct rsnd_dai *rdai,
return rdai->ssi_lane;
}
+int rsnd_rdai_width_ctrl(struct rsnd_dai *rdai, int width)
+{
+ if (width > 0)
+ rdai->chan_width = width;
+
+ return rdai->chan_width;
+}
+
struct rsnd_dai *rsnd_rdai_get(struct rsnd_priv *priv, int id)
{
if ((id < 0) || (id >= rsnd_rdai_nr(priv)))
@@ -681,6 +693,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
rdai->frm_clk_inv = 0;
break;
case SND_SOC_DAIFMT_LEFT_J:
+ case SND_SOC_DAIFMT_DSP_B:
rdai->sys_delay = 1;
rdai->data_alignment = 0;
rdai->frm_clk_inv = 1;
@@ -690,6 +703,11 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
rdai->data_alignment = 1;
rdai->frm_clk_inv = 1;
break;
+ case SND_SOC_DAIFMT_DSP_A:
+ rdai->sys_delay = 0;
+ rdai->data_alignment = 0;
+ rdai->frm_clk_inv = 1;
+ break;
}
/* set clock inversion */
@@ -720,6 +738,16 @@ static int rsnd_soc_set_dai_tdm_slot(struct snd_soc_dai *dai,
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
struct device *dev = rsnd_priv_to_dev(priv);
+ switch (slot_width) {
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ /* use default */
+ slot_width = 32;
+ }
+
switch (slots) {
case 2:
case 6:
@@ -727,6 +755,7 @@ static int rsnd_soc_set_dai_tdm_slot(struct snd_soc_dai *dai,
/* TDM Extend Mode */
rsnd_rdai_channels_set(rdai, slots);
rsnd_rdai_ssi_lane_set(rdai, 1);
+ rsnd_rdai_width_set(rdai, slot_width);
break;
default:
dev_err(dev, "unsupported TDM slots (%d)\n", slots);
@@ -755,7 +784,7 @@ static unsigned int rsnd_soc_hw_rate_list[] = {
192000,
};
-static int rsnd_soc_hw_rule(struct rsnd_priv *priv,
+static int rsnd_soc_hw_rule(struct rsnd_dai *rdai,
unsigned int *list, int list_num,
struct snd_interval *baseline, struct snd_interval *iv)
{
@@ -772,14 +801,14 @@ static int rsnd_soc_hw_rule(struct rsnd_priv *priv,
if (!snd_interval_test(iv, list[i]))
continue;
- rate = rsnd_ssi_clk_query(priv,
+ rate = rsnd_ssi_clk_query(rdai,
baseline->min, list[i], NULL);
if (rate > 0) {
p.min = min(p.min, list[i]);
p.max = max(p.max, list[i]);
}
- rate = rsnd_ssi_clk_query(priv,
+ rate = rsnd_ssi_clk_query(rdai,
baseline->max, list[i], NULL);
if (rate > 0) {
p.min = min(p.min, list[i]);
@@ -790,17 +819,14 @@ static int rsnd_soc_hw_rule(struct rsnd_priv *priv,
return snd_interval_refine(iv, &p);
}
-static int __rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule,
- int is_play)
+static int rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
{
struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval ic;
- struct snd_soc_dai *dai = rule->private;
- struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
- struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
- struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
+ struct rsnd_dai_stream *io = rule->private;
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
/*
* possible sampling rate limitation is same as
@@ -811,34 +837,19 @@ static int __rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
ic.min =
ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params);
- return rsnd_soc_hw_rule(priv, rsnd_soc_hw_rate_list,
+ return rsnd_soc_hw_rule(rdai, rsnd_soc_hw_rate_list,
ARRAY_SIZE(rsnd_soc_hw_rate_list),
&ic, ir);
}
-static int rsnd_soc_hw_rule_rate_playback(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- return __rsnd_soc_hw_rule_rate(params, rule, 1);
-}
-
-static int rsnd_soc_hw_rule_rate_capture(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- return __rsnd_soc_hw_rule_rate(params, rule, 0);
-}
-
-static int __rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule,
- int is_play)
+static int rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
{
struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval ic;
- struct snd_soc_dai *dai = rule->private;
- struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
- struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
- struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
+ struct rsnd_dai_stream *io = rule->private;
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
/*
* possible sampling rate limitation is same as
@@ -849,23 +860,11 @@ static int __rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
ic.min =
ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params);
- return rsnd_soc_hw_rule(priv, rsnd_soc_hw_channels_list,
+ return rsnd_soc_hw_rule(rdai, rsnd_soc_hw_channels_list,
ARRAY_SIZE(rsnd_soc_hw_channels_list),
ir, &ic);
}
-static int rsnd_soc_hw_rule_channels_playback(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- return __rsnd_soc_hw_rule_channels(params, rule, 1);
-}
-
-static int rsnd_soc_hw_rule_channels_capture(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- return __rsnd_soc_hw_rule_channels(params, rule, 0);
-}
-
static const struct snd_pcm_hardware rsnd_pcm_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP |
@@ -882,12 +881,10 @@ static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
- struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
struct snd_pcm_hw_constraint_list *constraint = &rdai->constraint;
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int max_channels = rsnd_rdai_channels_get(rdai);
- int ret;
int i;
rsnd_dai_stream_init(io, substream);
@@ -922,25 +919,16 @@ static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- is_play ? rsnd_soc_hw_rule_rate_playback :
- rsnd_soc_hw_rule_rate_capture,
- dai,
+ rsnd_soc_hw_rule_rate,
+ is_play ? &rdai->playback : &rdai->capture,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- is_play ? rsnd_soc_hw_rule_channels_playback :
- rsnd_soc_hw_rule_channels_capture,
- dai,
+ rsnd_soc_hw_rule_channels,
+ is_play ? &rdai->playback : &rdai->capture,
SNDRV_PCM_HW_PARAM_RATE, -1);
}
- /*
- * call rsnd_dai_call without spinlock
- */
- ret = rsnd_dai_call(nolock_start, io, priv);
- if (ret < 0)
- rsnd_dai_call(nolock_stop, io, priv);
-
- return ret;
+ return 0;
}
static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
@@ -953,17 +941,28 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
/*
* call rsnd_dai_call without spinlock
*/
- rsnd_dai_call(nolock_stop, io, priv);
+ rsnd_dai_call(cleanup, io, priv);
rsnd_dai_stream_quit(io);
}
+static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct rsnd_priv *priv = rsnd_dai_to_priv(dai);
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+
+ return rsnd_dai_call(prepare, io, priv);
+}
+
static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
.startup = rsnd_soc_dai_startup,
.shutdown = rsnd_soc_dai_shutdown,
.trigger = rsnd_soc_dai_trigger,
.set_fmt = rsnd_soc_dai_set_fmt,
.set_tdm_slot = rsnd_soc_set_dai_tdm_slot,
+ .prepare = rsnd_soc_dai_prepare,
};
void rsnd_parse_connect_common(struct rsnd_dai *rdai,
@@ -1072,6 +1071,7 @@ static void __rsnd_dai_probe(struct rsnd_priv *priv,
rdai->capture.rdai = rdai;
rsnd_rdai_channels_set(rdai, 2); /* default 2ch */
rsnd_rdai_ssi_lane_set(rdai, 1); /* default 1lane */
+ rsnd_rdai_width_set(rdai, 32); /* default 32bit width */
for (io_i = 0;; io_i++) {
playback = of_parse_phandle(dai_np, "playback", io_i);
@@ -1263,8 +1263,15 @@ int rsnd_kctrl_accept_anytime(struct rsnd_dai_stream *io)
int rsnd_kctrl_accept_runtime(struct rsnd_dai_stream *io)
{
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ struct rsnd_priv *priv = rsnd_io_to_priv(io);
+ struct device *dev = rsnd_priv_to_dev(priv);
- return !!runtime;
+ if (!runtime) {
+ dev_warn(dev, "Can't update kctrl when idle\n");
+ return 0;
+ }
+
+ return 1;
}
struct rsnd_kctrl_cfg *rsnd_kctrl_init_m(struct rsnd_kctrl_cfg_m *cfg)
@@ -1550,6 +1557,14 @@ exit_snd_probe:
rsnd_dai_call(remove, &rdai->capture, priv);
}
+ /*
+ * adg is very special mod which can't use rsnd_dai_call(remove),
+ * and it registers ADG clock on probe.
+ * It should be unregister if probe failed.
+ * Mainly it is assuming -EPROBE_DEFER case
+ */
+ rsnd_adg_remove(priv);
+
return ret;
}
diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
index 6a55aa753003..ad702377a6c3 100644
--- a/sound/soc/sh/rcar/ctu.c
+++ b/sound/soc/sh/rcar/ctu.c
@@ -258,7 +258,7 @@ static int rsnd_ctu_hw_params(struct rsnd_mod *mod,
struct snd_pcm_hw_params *be_params;
int stream = substream->stream;
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
be_params = &dpcm->hw_params;
if (params_channels(fe_params) != params_channels(be_params))
ctu->channels = params_channels(be_params);
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index fe63ef8600d0..6d1947515dc8 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -106,9 +106,9 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- struct rsnd_priv *priv)
+static int rsnd_dmaen_cleanup(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
{
struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
@@ -116,7 +116,7 @@ static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
/*
* DMAEngine release uses mutex lock.
* Thus, it shouldn't be called under spinlock.
- * Let's call it under nolock_start
+ * Let's call it under prepare
*/
if (dmaen->chan)
dma_release_channel(dmaen->chan);
@@ -126,23 +126,22 @@ static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- struct rsnd_priv *priv)
+static int rsnd_dmaen_prepare(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
{
struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
struct device *dev = rsnd_priv_to_dev(priv);
- if (dmaen->chan) {
- dev_err(dev, "it already has dma channel\n");
- return -EIO;
- }
+ /* maybe suspended */
+ if (dmaen->chan)
+ return 0;
/*
* DMAEngine request uses mutex lock.
* Thus, it shouldn't be called under spinlock.
- * Let's call it under nolock_start
+ * Let's call it under prepare
*/
dmaen->chan = rsnd_dmaen_request_channel(io,
dma->mod_from,
@@ -241,6 +240,10 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
/* try to get DMAEngine channel */
chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
if (IS_ERR_OR_NULL(chan)) {
+ /* Let's follow when -EPROBE_DEFER case */
+ if (PTR_ERR(chan) == -EPROBE_DEFER)
+ return PTR_ERR(chan);
+
/*
* DMA failed. try to PIO mode
* see
@@ -287,8 +290,8 @@ static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
static struct rsnd_mod_ops rsnd_dmaen_ops = {
.name = "audmac",
- .nolock_start = rsnd_dmaen_nolock_start,
- .nolock_stop = rsnd_dmaen_nolock_stop,
+ .prepare = rsnd_dmaen_prepare,
+ .cleanup = rsnd_dmaen_cleanup,
.start = rsnd_dmaen_start,
.stop = rsnd_dmaen_stop,
.pointer= rsnd_dmaen_pointer,
@@ -298,16 +301,26 @@ static struct rsnd_mod_ops rsnd_dmaen_ops = {
* Audio DMAC peri peri
*/
static const u8 gen2_id_table_ssiu[] = {
- 0x00, /* SSI00 */
- 0x04, /* SSI10 */
- 0x08, /* SSI20 */
- 0x0c, /* SSI3 */
- 0x0d, /* SSI4 */
- 0x0e, /* SSI5 */
- 0x0f, /* SSI6 */
- 0x10, /* SSI7 */
- 0x11, /* SSI8 */
- 0x12, /* SSI90 */
+ /* SSI00 ~ SSI07 */
+ 0x00, 0x01, 0x02, 0x03, 0x39, 0x3a, 0x3b, 0x3c,
+ /* SSI10 ~ SSI17 */
+ 0x04, 0x05, 0x06, 0x07, 0x3d, 0x3e, 0x3f, 0x40,
+ /* SSI20 ~ SSI27 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x41, 0x42, 0x43, 0x44,
+ /* SSI30 ~ SSI37 */
+ 0x0c, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
+ /* SSI40 ~ SSI47 */
+ 0x0d, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52,
+ /* SSI5 */
+ 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* SSI6 */
+ 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* SSI7 */
+ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* SSI8 */
+ 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* SSI90 ~ SSI97 */
+ 0x12, 0x13, 0x14, 0x15, 0x53, 0x54, 0x55, 0x56,
};
static const u8 gen2_id_table_scu[] = {
0x2d, /* SCU_SRCI0 */
@@ -333,18 +346,23 @@ static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
struct rsnd_mod *src = rsnd_io_to_mod_src(io);
struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
const u8 *entry = NULL;
- int id = rsnd_mod_id(mod);
+ int id = 255;
int size = 0;
if (mod == ssi) {
+ int busif = rsnd_ssi_get_busif(io);
+
entry = gen2_id_table_ssiu;
size = ARRAY_SIZE(gen2_id_table_ssiu);
+ id = (rsnd_mod_id(mod) * 8) + busif;
} else if (mod == src) {
entry = gen2_id_table_scu;
size = ARRAY_SIZE(gen2_id_table_scu);
+ id = rsnd_mod_id(mod);
} else if (mod == dvc) {
entry = gen2_id_table_cmd;
size = ARRAY_SIZE(gen2_id_table_cmd);
+ id = rsnd_mod_id(mod);
}
if ((!entry) || (size <= id)) {
@@ -378,7 +396,7 @@ static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
struct device *dev = rsnd_priv_to_dev(priv);
- dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
+ dev_dbg(dev, "w 0x%px : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
}
@@ -487,11 +505,11 @@ static struct rsnd_mod_ops rsnd_dmapp_ops = {
#define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
#define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
-#define RDMA_SSIU_I_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
-#define RDMA_SSIU_O_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
+#define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400))
+#define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j)
-#define RDMA_SSIU_I_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
-#define RDMA_SSIU_O_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
+#define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400))
+#define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j)
#define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
#define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
@@ -517,6 +535,7 @@ rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
!!rsnd_io_to_mod_mix(io) ||
!!rsnd_io_to_mod_ctu(io);
int id = rsnd_mod_id(mod);
+ int busif = rsnd_ssi_get_busif(io);
struct dma_addr {
dma_addr_t out_addr;
dma_addr_t in_addr;
@@ -533,25 +552,35 @@ rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
},
/* SSI */
/* Capture */
- {{{ RDMA_SSI_O_N(ssi, id), 0 },
- { RDMA_SSIU_O_P(ssi, id), 0 },
- { RDMA_SSIU_O_P(ssi, id), 0 } },
+ {{{ RDMA_SSI_O_N(ssi, id), 0 },
+ { RDMA_SSIU_O_P(ssi, id, busif), 0 },
+ { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
/* Playback */
- {{ 0, RDMA_SSI_I_N(ssi, id) },
- { 0, RDMA_SSIU_I_P(ssi, id) },
- { 0, RDMA_SSIU_I_P(ssi, id) } }
+ {{ 0, RDMA_SSI_I_N(ssi, id) },
+ { 0, RDMA_SSIU_I_P(ssi, id, busif) },
+ { 0, RDMA_SSIU_I_P(ssi, id, busif) } }
},
/* SSIU */
/* Capture */
- {{{ RDMA_SSIU_O_N(ssi, id), 0 },
- { RDMA_SSIU_O_P(ssi, id), 0 },
- { RDMA_SSIU_O_P(ssi, id), 0 } },
+ {{{ RDMA_SSIU_O_N(ssi, id, busif), 0 },
+ { RDMA_SSIU_O_P(ssi, id, busif), 0 },
+ { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
/* Playback */
- {{ 0, RDMA_SSIU_I_N(ssi, id) },
- { 0, RDMA_SSIU_I_P(ssi, id) },
- { 0, RDMA_SSIU_I_P(ssi, id) } } },
+ {{ 0, RDMA_SSIU_I_N(ssi, id, busif) },
+ { 0, RDMA_SSIU_I_P(ssi, id, busif) },
+ { 0, RDMA_SSIU_I_P(ssi, id, busif) } } },
};
+ /*
+ * FIXME
+ *
+ * We can't support SSI9-4/5/6/7, because its address is
+ * out of calculation rule
+ */
+ if ((id == 9) && (busif >= 4))
+ dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
+ id, busif);
+
/* it shouldn't happen */
if (use_cmd && !use_src)
dev_err(dev, "DVC is selected without SRC\n");
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 0230301fe078..1f7881cc16b2 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -219,12 +219,33 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
RSND_GEN_S_REG(HDMI1_SEL, 0x9e4),
/* FIXME: it needs SSI_MODE2/3 in the future */
- RSND_GEN_M_REG(SSI_BUSIF_MODE, 0x0, 0x80),
- RSND_GEN_M_REG(SSI_BUSIF_ADINR, 0x4, 0x80),
- RSND_GEN_M_REG(SSI_BUSIF_DALIGN,0x8, 0x80),
- RSND_GEN_M_REG(SSI_MODE, 0xc, 0x80),
- RSND_GEN_M_REG(SSI_CTRL, 0x10, 0x80),
- RSND_GEN_M_REG(SSI_INT_ENABLE, 0x18, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF0_MODE, 0x0, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF0_ADINR, 0x4, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF0_DALIGN, 0x8, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF1_MODE, 0x20, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF1_ADINR, 0x24, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF1_DALIGN, 0x28, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF2_MODE, 0x40, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF2_ADINR, 0x44, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF2_DALIGN, 0x48, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF3_MODE, 0x60, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF3_ADINR, 0x64, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF3_DALIGN, 0x68, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF4_MODE, 0x500, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF4_ADINR, 0x504, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF4_DALIGN, 0x508, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF5_MODE, 0x520, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF5_ADINR, 0x524, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF5_DALIGN, 0x528, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF6_MODE, 0x540, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF6_ADINR, 0x544, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF6_DALIGN, 0x548, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF7_MODE, 0x560, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF7_ADINR, 0x564, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF7_DALIGN, 0x568, 0x80),
+ RSND_GEN_M_REG(SSI_MODE, 0xc, 0x80),
+ RSND_GEN_M_REG(SSI_CTRL, 0x10, 0x80),
+ RSND_GEN_M_REG(SSI_INT_ENABLE, 0x18, 0x80),
};
static const struct rsnd_regmap_field_conf conf_scu[] = {
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 96d93330b1e1..4464d1d0a042 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -156,9 +156,30 @@ enum rsnd_reg {
RSND_REG_SSI_MODE2,
RSND_REG_SSI_CONTROL,
RSND_REG_SSI_CTRL,
- RSND_REG_SSI_BUSIF_MODE,
- RSND_REG_SSI_BUSIF_ADINR,
- RSND_REG_SSI_BUSIF_DALIGN,
+ RSND_REG_SSI_BUSIF0_MODE,
+ RSND_REG_SSI_BUSIF0_ADINR,
+ RSND_REG_SSI_BUSIF0_DALIGN,
+ RSND_REG_SSI_BUSIF1_MODE,
+ RSND_REG_SSI_BUSIF1_ADINR,
+ RSND_REG_SSI_BUSIF1_DALIGN,
+ RSND_REG_SSI_BUSIF2_MODE,
+ RSND_REG_SSI_BUSIF2_ADINR,
+ RSND_REG_SSI_BUSIF2_DALIGN,
+ RSND_REG_SSI_BUSIF3_MODE,
+ RSND_REG_SSI_BUSIF3_ADINR,
+ RSND_REG_SSI_BUSIF3_DALIGN,
+ RSND_REG_SSI_BUSIF4_MODE,
+ RSND_REG_SSI_BUSIF4_ADINR,
+ RSND_REG_SSI_BUSIF4_DALIGN,
+ RSND_REG_SSI_BUSIF5_MODE,
+ RSND_REG_SSI_BUSIF5_ADINR,
+ RSND_REG_SSI_BUSIF5_DALIGN,
+ RSND_REG_SSI_BUSIF6_MODE,
+ RSND_REG_SSI_BUSIF6_ADINR,
+ RSND_REG_SSI_BUSIF6_DALIGN,
+ RSND_REG_SSI_BUSIF7_MODE,
+ RSND_REG_SSI_BUSIF7_ADINR,
+ RSND_REG_SSI_BUSIF7_DALIGN,
RSND_REG_SSI_INT_ENABLE,
RSND_REG_SSI_SYS_STATUS0,
RSND_REG_SSI_SYS_STATUS1,
@@ -274,12 +295,12 @@ struct rsnd_mod_ops {
int (*fallback)(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv);
- int (*nolock_start)(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- struct rsnd_priv *priv);
- int (*nolock_stop)(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- struct rsnd_priv *priv);
+ int (*prepare)(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv);
+ int (*cleanup)(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv);
};
struct rsnd_dai_stream;
@@ -297,9 +318,8 @@ struct rsnd_mod {
/*
* status
*
- * 0xH0000CBA
+ * 0xH0000CB0
*
- * A 0: nolock_start 1: nolock_stop
* B 0: init 1: quit
* C 0: start 1: stop
*
@@ -309,9 +329,9 @@ struct rsnd_mod {
* H 0: fallback
* H 0: hw_params
* H 0: pointer
+ * H 0: prepare
+ * H 0: cleanup
*/
-#define __rsnd_mod_shift_nolock_start 0
-#define __rsnd_mod_shift_nolock_stop 0
#define __rsnd_mod_shift_init 4
#define __rsnd_mod_shift_quit 4
#define __rsnd_mod_shift_start 8
@@ -323,11 +343,13 @@ struct rsnd_mod {
#define __rsnd_mod_shift_fallback 28 /* always called */
#define __rsnd_mod_shift_hw_params 28 /* always called */
#define __rsnd_mod_shift_pointer 28 /* always called */
+#define __rsnd_mod_shift_prepare 28 /* always called */
+#define __rsnd_mod_shift_cleanup 28 /* always called */
#define __rsnd_mod_add_probe 0
#define __rsnd_mod_add_remove 0
-#define __rsnd_mod_add_nolock_start 1
-#define __rsnd_mod_add_nolock_stop -1
+#define __rsnd_mod_add_prepare 0
+#define __rsnd_mod_add_cleanup 0
#define __rsnd_mod_add_init 1
#define __rsnd_mod_add_quit -1
#define __rsnd_mod_add_start 1
@@ -340,6 +362,8 @@ struct rsnd_mod {
#define __rsnd_mod_call_probe 0
#define __rsnd_mod_call_remove 0
+#define __rsnd_mod_call_prepare 0
+#define __rsnd_mod_call_cleanup 0
#define __rsnd_mod_call_init 0
#define __rsnd_mod_call_quit 1
#define __rsnd_mod_call_start 0
@@ -349,8 +373,6 @@ struct rsnd_mod {
#define __rsnd_mod_call_fallback 0
#define __rsnd_mod_call_hw_params 0
#define __rsnd_mod_call_pointer 0
-#define __rsnd_mod_call_nolock_start 0
-#define __rsnd_mod_call_nolock_stop 1
#define rsnd_mod_to_priv(mod) ((mod)->priv)
#define rsnd_mod_name(mod) ((mod)->ops->name)
@@ -431,6 +453,7 @@ struct rsnd_dai_stream {
char name[RSND_DAI_NAME_SIZE];
struct snd_pcm_substream *substream;
struct rsnd_mod *mod[RSND_MOD_MAX];
+ struct rsnd_mod *dma;
struct rsnd_dai *rdai;
struct device *dmac_dev; /* for IPMMU */
u32 parent_ssi_status;
@@ -460,6 +483,7 @@ struct rsnd_dai {
int max_channels; /* 2ch - 16ch */
int ssi_lane; /* 1lane - 4lane */
+ int chan_width; /* 16/24/32 bit width */
unsigned int clk_master:1;
unsigned int bit_clk_inv:1;
@@ -493,6 +517,11 @@ int rsnd_rdai_channels_ctrl(struct rsnd_dai *rdai,
int rsnd_rdai_ssi_lane_ctrl(struct rsnd_dai *rdai,
int ssi_lane);
+#define rsnd_rdai_width_set(rdai, width) \
+ rsnd_rdai_width_ctrl(rdai, width)
+#define rsnd_rdai_width_get(rdai) \
+ rsnd_rdai_width_ctrl(rdai, 0)
+int rsnd_rdai_width_ctrl(struct rsnd_dai *rdai, int width);
void rsnd_dai_period_elapsed(struct rsnd_dai_stream *io);
int rsnd_dai_connect(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
@@ -685,6 +714,7 @@ void rsnd_ssi_remove(struct rsnd_priv *priv);
struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
int rsnd_ssi_use_busif(struct rsnd_dai_stream *io);
+int rsnd_ssi_get_busif(struct rsnd_dai_stream *io);
u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io);
#define RSND_SSI_HDMI_PORT0 0xf0
@@ -702,7 +732,7 @@ int __rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
void rsnd_parse_connect_ssi(struct rsnd_dai *rdai,
struct device_node *playback,
struct device_node *capture);
-unsigned int rsnd_ssi_clk_query(struct rsnd_priv *priv,
+unsigned int rsnd_ssi_clk_query(struct rsnd_dai *rdai,
int param1, int param2, int *idx);
/*
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index beccfbac7581..cd38a43b976f 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -158,7 +158,7 @@ static int rsnd_src_hw_params(struct rsnd_mod *mod,
struct snd_soc_dpcm *dpcm;
struct snd_pcm_hw_params *be_params;
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
be_params = &dpcm->hw_params;
if (params_rate(fe_params) != params_rate(be_params))
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 8304e4ec9242..fcb4df23248c 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -42,7 +42,13 @@
#define DWL_24 (5 << 19) /* Data Word Length */
#define DWL_32 (6 << 19) /* Data Word Length */
+/*
+ * System word length
+ */
+#define SWL_16 (1 << 16) /* R/W System Word Length */
+#define SWL_24 (2 << 16) /* R/W System Word Length */
#define SWL_32 (3 << 16) /* R/W System Word Length */
+
#define SCKD (1 << 15) /* Serial Bit Clock Direction */
#define SWSD (1 << 14) /* Serial WS Direction */
#define SCKP (1 << 13) /* Serial Bit Clock Polarity */
@@ -72,7 +78,6 @@
struct rsnd_ssi {
struct rsnd_mod mod;
- struct rsnd_mod *dma;
u32 flags;
u32 cr_own;
@@ -145,6 +150,11 @@ int rsnd_ssi_use_busif(struct rsnd_dai_stream *io)
return use_busif;
}
+int rsnd_ssi_get_busif(struct rsnd_dai_stream *io)
+{
+ return 0; /* BUSIF0 only for now */
+}
+
static void rsnd_ssi_status_clear(struct rsnd_mod *mod)
{
rsnd_mod_write(mod, SSISR, 0);
@@ -220,14 +230,32 @@ u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io)
return 0;
}
-unsigned int rsnd_ssi_clk_query(struct rsnd_priv *priv,
+static u32 rsnd_rdai_width_to_swl(struct rsnd_dai *rdai)
+{
+ struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ int width = rsnd_rdai_width_get(rdai);
+
+ switch (width) {
+ case 32: return SWL_32;
+ case 24: return SWL_24;
+ case 16: return SWL_16;
+ }
+
+ dev_err(dev, "unsupported slot width value: %d\n", width);
+ return 0;
+}
+
+unsigned int rsnd_ssi_clk_query(struct rsnd_dai *rdai,
int param1, int param2, int *idx)
{
+ struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
int ssi_clk_mul_table[] = {
1, 2, 4, 8, 16, 6, 12,
};
int j, ret;
unsigned int main_rate;
+ int width = rsnd_rdai_width_get(rdai);
for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) {
@@ -240,12 +268,7 @@ unsigned int rsnd_ssi_clk_query(struct rsnd_priv *priv,
if (j == 0)
continue;
- /*
- * this driver is assuming that
- * system word is 32bit x chan
- * see rsnd_ssi_init()
- */
- main_rate = 32 * param1 * param2 * ssi_clk_mul_table[j];
+ main_rate = width * param1 * param2 * ssi_clk_mul_table[j];
ret = rsnd_adg_clk_query(priv, main_rate);
if (ret < 0)
@@ -283,16 +306,21 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
if (rsnd_ssi_is_multi_slave(mod, io))
return 0;
- if (ssi->usrcnt > 1) {
+ if (ssi->rate) {
if (ssi->rate != rate) {
dev_err(dev, "SSI parent/child should use same rate\n");
return -EINVAL;
}
+ if (ssi->chan != chan) {
+ dev_err(dev, "SSI parent/child should use same chan\n");
+ return -EINVAL;
+ }
+
return 0;
}
- main_rate = rsnd_ssi_clk_query(priv, rate, chan, &idx);
+ main_rate = rsnd_ssi_clk_query(rdai, rate, chan, &idx);
if (!main_rate) {
dev_err(dev, "unsupported clock rate\n");
return -EIO;
@@ -312,9 +340,11 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
* SSICR : FORCE, SCKD, SWSD
* SSIWSR : CONT
*/
- ssi->cr_clk = FORCE | SWL_32 | SCKD | SWSD | CKDV(idx);
+ ssi->cr_clk = FORCE | rsnd_rdai_width_to_swl(rdai) |
+ SCKD | SWSD | CKDV(idx);
ssi->wsr = CONT;
ssi->rate = rate;
+ ssi->chan = chan;
dev_dbg(dev, "%s[%d] outputs %u Hz\n",
rsnd_mod_name(mod),
@@ -340,6 +370,7 @@ static void rsnd_ssi_master_clk_stop(struct rsnd_mod *mod,
ssi->cr_clk = 0;
ssi->rate = 0;
+ ssi->chan = 0;
rsnd_adg_ssi_clk_stop(mod);
}
@@ -357,15 +388,11 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
is_tdm = rsnd_runtime_is_ssi_tdm(io);
- /*
- * always use 32bit system word.
- * see also rsnd_ssi_master_clk_enable()
- */
- cr_own |= FORCE | SWL_32;
+ cr_own |= FORCE | rsnd_rdai_width_to_swl(rdai);
if (rdai->bit_clk_inv)
cr_own |= SCKP;
- if (rdai->frm_clk_inv ^ is_tdm)
+ if (rdai->frm_clk_inv && !is_tdm)
cr_own |= SWSP;
if (rdai->data_alignment)
cr_own |= SDTA;
@@ -373,6 +400,17 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
cr_own |= DEL;
/*
+ * TDM Mode
+ * see
+ * rsnd_ssiu_init_gen2()
+ */
+ wsr = ssi->wsr;
+ if (is_tdm) {
+ wsr |= WS_MODE;
+ cr_own |= CHNL_8;
+ }
+
+ /*
* We shouldn't exchange SWSP after running.
* This means, parent needs to care it.
*/
@@ -384,6 +422,9 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
cr_own &= ~DWL_MASK;
switch (snd_pcm_format_width(runtime->format)) {
+ case 8:
+ cr_own |= DWL_8;
+ break;
case 16:
cr_own |= DWL_16;
break;
@@ -399,16 +440,6 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
cr_mode = DIEN; /* PIO : enable Data interrupt */
}
- /*
- * TDM Extend Mode
- * see
- * rsnd_ssiu_init_gen2()
- */
- wsr = ssi->wsr;
- if (is_tdm) {
- wsr |= WS_MODE;
- cr_own |= CHNL_8;
- }
init_end:
ssi->cr_own = cr_own;
ssi->cr_mode = cr_mode;
@@ -434,7 +465,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
struct rsnd_priv *priv)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- int ret;
if (!rsnd_ssi_is_run_mods(mod, io))
return 0;
@@ -443,10 +473,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
rsnd_mod_power_on(mod);
- ret = rsnd_ssi_master_clk_start(mod, io);
- if (ret < 0)
- return ret;
-
rsnd_ssi_config_init(mod, io);
rsnd_ssi_register_setup(mod);
@@ -493,26 +519,16 @@ static int rsnd_ssi_hw_params(struct rsnd_mod *mod,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- int chan = params_channels(params);
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
+ unsigned int fmt_width = snd_pcm_format_width(params_format(params));
- /*
- * snd_pcm_ops::hw_params will be called *before*
- * snd_soc_dai_ops::trigger. Thus, ssi->usrcnt is 0
- * in 1st call.
- */
- if (ssi->usrcnt) {
- /*
- * Already working.
- * It will happen if SSI has parent/child connection.
- * it is error if child <-> parent SSI uses
- * different channels.
- */
- if (ssi->chan != chan)
- return -EIO;
- }
+ if (fmt_width > rdai->chan_width) {
+ struct rsnd_priv *priv = rsnd_io_to_priv(io);
+ struct device *dev = rsnd_priv_to_dev(priv);
- ssi->chan = chan;
+ dev_err(dev, "invalid combination of slot-width and format-data-width\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -852,6 +868,13 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
return 0;
}
+static int rsnd_ssi_prepare(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
+{
+ return rsnd_ssi_master_clk_start(mod, io);
+}
+
static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
.name = SSI_NAME,
.probe = rsnd_ssi_common_probe,
@@ -864,13 +887,13 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
.pointer = rsnd_ssi_pio_pointer,
.pcm_new = rsnd_ssi_pcm_new,
.hw_params = rsnd_ssi_hw_params,
+ .prepare = rsnd_ssi_prepare,
};
static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
int ret;
/*
@@ -885,7 +908,7 @@ static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
return ret;
/* SSI probe might be called many times in MUX multi path */
- ret = rsnd_dma_attach(io, mod, &ssi->dma);
+ ret = rsnd_dma_attach(io, mod, &io->dma);
return ret;
}
@@ -940,6 +963,7 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
.pcm_new = rsnd_ssi_pcm_new,
.fallback = rsnd_ssi_fallback,
.hw_params = rsnd_ssi_hw_params,
+ .prepare = rsnd_ssi_prepare,
};
int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 016fbf5ac242..39b67643b5dc 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -10,9 +10,12 @@
struct rsnd_ssiu {
struct rsnd_mod mod;
+ u32 busif_status[8]; /* for BUSIF0 - BUSIF7 */
+ unsigned int usrcnt;
};
#define rsnd_ssiu_nr(priv) ((priv)->ssiu_nr)
+#define rsnd_mod_to_ssiu(_mod) container_of((_mod), struct rsnd_ssiu, mod)
#define for_each_rsnd_ssiu(pos, priv, i) \
for (i = 0; \
(i < rsnd_ssiu_nr(priv)) && \
@@ -120,6 +123,7 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
+ struct rsnd_ssiu *ssiu = rsnd_mod_to_ssiu(mod);
int hdmi = rsnd_ssi_hdmi_port(io);
int ret;
u32 mode = 0;
@@ -128,6 +132,8 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
if (ret < 0)
return ret;
+ ssiu->usrcnt++;
+
if (rsnd_runtime_is_ssi_tdm(io)) {
/*
* TDM Extend Mode
@@ -140,15 +146,59 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
rsnd_mod_write(mod, SSI_MODE, mode);
if (rsnd_ssi_use_busif(io)) {
- rsnd_mod_write(mod, SSI_BUSIF_ADINR,
- rsnd_get_adinr_bit(mod, io) |
- (rsnd_io_is_play(io) ?
- rsnd_runtime_channel_after_ctu(io) :
- rsnd_runtime_channel_original(io)));
- rsnd_mod_write(mod, SSI_BUSIF_MODE,
- rsnd_get_busif_shift(io, mod) | 1);
- rsnd_mod_write(mod, SSI_BUSIF_DALIGN,
- rsnd_get_dalign(mod, io));
+ int id = rsnd_mod_id(mod);
+ int busif = rsnd_ssi_get_busif(io);
+
+ /*
+ * FIXME
+ *
+ * We can't support SSI9-4/5/6/7, because its address is
+ * out of calculation rule
+ */
+ if ((id == 9) && (busif >= 4)) {
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
+ id, busif);
+ }
+
+#define RSND_WRITE_BUSIF(i) \
+ rsnd_mod_write(mod, SSI_BUSIF##i##_ADINR, \
+ rsnd_get_adinr_bit(mod, io) | \
+ (rsnd_io_is_play(io) ? \
+ rsnd_runtime_channel_after_ctu(io) : \
+ rsnd_runtime_channel_original(io))); \
+ rsnd_mod_write(mod, SSI_BUSIF##i##_MODE, \
+ rsnd_get_busif_shift(io, mod) | 1); \
+ rsnd_mod_write(mod, SSI_BUSIF##i##_DALIGN, \
+ rsnd_get_dalign(mod, io))
+
+ switch (busif) {
+ case 0:
+ RSND_WRITE_BUSIF(0);
+ break;
+ case 1:
+ RSND_WRITE_BUSIF(1);
+ break;
+ case 2:
+ RSND_WRITE_BUSIF(2);
+ break;
+ case 3:
+ RSND_WRITE_BUSIF(3);
+ break;
+ case 4:
+ RSND_WRITE_BUSIF(4);
+ break;
+ case 5:
+ RSND_WRITE_BUSIF(5);
+ break;
+ case 6:
+ RSND_WRITE_BUSIF(6);
+ break;
+ case 7:
+ RSND_WRITE_BUSIF(7);
+ break;
+ }
}
if (hdmi) {
@@ -194,10 +244,12 @@ static int rsnd_ssiu_start_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
+ int busif = rsnd_ssi_get_busif(io);
+
if (!rsnd_ssi_use_busif(io))
return 0;
- rsnd_mod_write(mod, SSI_CTRL, 0x1);
+ rsnd_mod_bset(mod, SSI_CTRL, 1 << (busif * 4), 1 << (busif * 4));
if (rsnd_ssi_multi_slaves_runtime(io))
rsnd_mod_write(mod, SSI_CONTROL, 0x1);
@@ -209,10 +261,16 @@ static int rsnd_ssiu_stop_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
+ struct rsnd_ssiu *ssiu = rsnd_mod_to_ssiu(mod);
+ int busif = rsnd_ssi_get_busif(io);
+
if (!rsnd_ssi_use_busif(io))
return 0;
- rsnd_mod_write(mod, SSI_CTRL, 0);
+ rsnd_mod_bset(mod, SSI_CTRL, 1 << (busif * 4), 0);
+
+ if (--ssiu->usrcnt)
+ return 0;
if (rsnd_ssi_multi_slaves_runtime(io))
rsnd_mod_write(mod, SSI_CONTROL, 0);
@@ -246,6 +304,16 @@ int rsnd_ssiu_attach(struct rsnd_dai_stream *io,
return rsnd_dai_connect(mod, io, mod->type);
}
+static u32 *rsnd_ssiu_get_status(struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod,
+ enum rsnd_mod_type type)
+{
+ struct rsnd_ssiu *ssiu = rsnd_mod_to_ssiu(mod);
+ int busif = rsnd_ssi_get_busif(io);
+
+ return &ssiu->busif_status[busif];
+}
+
int rsnd_ssiu_probe(struct rsnd_priv *priv)
{
struct device *dev = rsnd_priv_to_dev(priv);
@@ -269,7 +337,7 @@ int rsnd_ssiu_probe(struct rsnd_priv *priv)
for_each_rsnd_ssiu(ssiu, priv, i) {
ret = rsnd_mod_init(priv, rsnd_mod_get(ssiu),
- ops, NULL, rsnd_mod_get_status,
+ ops, NULL, rsnd_ssiu_get_status,
RSND_MOD_SSIU, i);
if (ret)
return ret;
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 409d082e80d1..699397a09167 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -157,7 +157,7 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
ret = dpcm_be_dai_startup(fe, stream);
if (ret < 0) {
/* clean up all links */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ for_each_dpcm_be(fe, stream, dpcm)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
dpcm_be_disconnect(fe, stream);
@@ -321,7 +321,7 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
ret = dpcm_be_dai_shutdown(fe, stream);
/* mark FE's links ready to prune */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ for_each_dpcm_be(fe, stream, dpcm)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 9cfe10d8040c..6ddcf12bc030 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -52,6 +52,10 @@ EXPORT_SYMBOL_GPL(snd_soc_debugfs_root);
static DEFINE_MUTEX(client_mutex);
static LIST_HEAD(component_list);
+static LIST_HEAD(unbind_card_list);
+
+#define for_each_component(component) \
+ list_for_each_entry(component, &component_list, list)
/*
* This is a timeout to do a DAPM powerdown after a stream is closed().
@@ -62,8 +66,9 @@ static int pmdown_time = 5000;
module_param(pmdown_time, int, 0);
MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)");
-/* If a DMI filed contain strings in this blacklist (e.g.
- * "Type2 - Board Manufacturer" or "Type1 - TBD by OEM"), it will be taken
+/*
+ * If a DMI filed contain strings in this blacklist (e.g.
+ * "Type2 - Board Manufacturer" or "Type1 - TBD by OEM"), it will be taken
* as invalid and dropped when setting the card long name from DMI info.
*/
static const char * const dmi_blacklist[] = {
@@ -175,8 +180,8 @@ static int dai_list_show(struct seq_file *m, void *v)
mutex_lock(&client_mutex);
- list_for_each_entry(component, &component_list, list)
- list_for_each_entry(dai, &component->dai_list, list)
+ for_each_component(component)
+ for_each_component_dais(component, dai)
seq_printf(m, "%s\n", dai->name);
mutex_unlock(&client_mutex);
@@ -191,7 +196,7 @@ static int component_list_show(struct seq_file *m, void *v)
mutex_lock(&client_mutex);
- list_for_each_entry(component, &component_list, list)
+ for_each_component(component)
seq_printf(m, "%s\n", component->name);
mutex_unlock(&client_mutex);
@@ -218,7 +223,7 @@ static void soc_init_card_debugfs(struct snd_soc_card *card)
&card->pop_time);
if (!card->debugfs_pop_time)
dev_warn(card->dev,
- "ASoC: Failed to create pop time debugfs file\n");
+ "ASoC: Failed to create pop time debugfs file\n");
}
static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
@@ -341,7 +346,7 @@ struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
{
struct snd_soc_pcm_runtime *rtd;
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (rtd->dai_link->no_pcm &&
!strcmp(rtd->dai_link->name, dai_link))
return rtd->pcm->streams[stream].substream;
@@ -398,7 +403,7 @@ static void soc_remove_pcm_runtimes(struct snd_soc_card *card)
{
struct snd_soc_pcm_runtime *rtd, *_rtd;
- list_for_each_entry_safe(rtd, _rtd, &card->rtd_list, list) {
+ for_each_card_rtds_safe(card, rtd, _rtd) {
list_del(&rtd->list);
soc_free_pcm_runtime(rtd);
}
@@ -411,7 +416,7 @@ struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
{
struct snd_soc_pcm_runtime *rtd;
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (!strcmp(rtd->dai_link->name, dai_link))
return rtd;
}
@@ -422,7 +427,8 @@ EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime);
static void codec2codec_close_delayed_work(struct work_struct *work)
{
- /* Currently nothing to do for c2c links
+ /*
+ * Currently nothing to do for c2c links
* Since c2c links are internal nodes in the DAPM graph and
* don't interface with the outside world or application layer
* we don't have to do any special handling on close.
@@ -442,8 +448,9 @@ int snd_soc_suspend(struct device *dev)
if (!card->instantiated)
return 0;
- /* Due to the resume being scheduled into a workqueue we could
- * suspend before that's finished - wait for it to complete.
+ /*
+ * Due to the resume being scheduled into a workqueue we could
+ * suspend before that's finished - wait for it to complete.
*/
snd_power_wait(card->snd_card, SNDRV_CTL_POWER_D0);
@@ -451,13 +458,13 @@ int snd_soc_suspend(struct device *dev)
snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D3hot);
/* mute any active DACs */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
+ struct snd_soc_dai *dai;
if (rtd->dai_link->ignore_suspend)
continue;
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, dai) {
struct snd_soc_dai_driver *drv = dai->driver;
if (drv->ops->digital_mute && dai->playback_active)
@@ -466,7 +473,7 @@ int snd_soc_suspend(struct device *dev)
}
/* suspend all pcms */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (rtd->dai_link->ignore_suspend)
continue;
@@ -476,7 +483,7 @@ int snd_soc_suspend(struct device *dev)
if (card->suspend_pre)
card->suspend_pre(card);
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
if (rtd->dai_link->ignore_suspend)
@@ -487,10 +494,10 @@ int snd_soc_suspend(struct device *dev)
}
/* close any waiting streams */
- list_for_each_entry(rtd, &card->rtd_list, list)
+ for_each_card_rtds(card, rtd)
flush_delayed_work(&rtd->delayed_work);
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (rtd->dai_link->ignore_suspend)
continue;
@@ -509,11 +516,14 @@ int snd_soc_suspend(struct device *dev)
snd_soc_dapm_sync(&card->dapm);
/* suspend all COMPONENTs */
- list_for_each_entry(component, &card->component_dev_list, card_list) {
- struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+ for_each_card_components(card, component) {
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
- /* If there are paths active then the COMPONENT will be held with
- * bias _ON and should not be suspended. */
+ /*
+ * If there are paths active then the COMPONENT will be held
+ * with bias _ON and should not be suspended.
+ */
if (!component->suspended) {
switch (snd_soc_dapm_get_bias_level(dapm)) {
case SND_SOC_BIAS_STANDBY:
@@ -547,7 +557,7 @@ int snd_soc_suspend(struct device *dev)
}
}
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
if (rtd->dai_link->ignore_suspend)
@@ -567,18 +577,21 @@ int snd_soc_suspend(struct device *dev)
}
EXPORT_SYMBOL_GPL(snd_soc_suspend);
-/* deferred resume work, so resume can complete before we finished
+/*
+ * deferred resume work, so resume can complete before we finished
* setting our codec back up, which can be very slow on I2C
*/
static void soc_resume_deferred(struct work_struct *work)
{
struct snd_soc_card *card =
- container_of(work, struct snd_soc_card, deferred_resume_work);
+ container_of(work, struct snd_soc_card,
+ deferred_resume_work);
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_component *component;
int i;
- /* our power state is still SNDRV_CTL_POWER_D3hot from suspend time,
+ /*
+ * our power state is still SNDRV_CTL_POWER_D3hot from suspend time,
* so userspace apps are blocked from touching us
*/
@@ -591,7 +604,7 @@ static void soc_resume_deferred(struct work_struct *work)
card->resume_pre(card);
/* resume control bus DAIs */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
if (rtd->dai_link->ignore_suspend)
@@ -601,7 +614,7 @@ static void soc_resume_deferred(struct work_struct *work)
cpu_dai->driver->resume(cpu_dai);
}
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (component->suspended) {
if (component->driver->resume)
component->driver->resume(component);
@@ -609,7 +622,7 @@ static void soc_resume_deferred(struct work_struct *work)
}
}
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (rtd->dai_link->ignore_suspend)
continue;
@@ -624,13 +637,13 @@ static void soc_resume_deferred(struct work_struct *work)
}
/* unmute any active DACs */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
+ struct snd_soc_dai *dai;
if (rtd->dai_link->ignore_suspend)
continue;
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, dai) {
struct snd_soc_dai_driver *drv = dai->driver;
if (drv->ops->digital_mute && dai->playback_active)
@@ -638,7 +651,7 @@ static void soc_resume_deferred(struct work_struct *work)
}
}
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
if (rtd->dai_link->ignore_suspend)
@@ -673,16 +686,15 @@ int snd_soc_resume(struct device *dev)
return 0;
/* activate pins from sleep state */
- list_for_each_entry(rtd, &card->rtd_list, list) {
- struct snd_soc_dai **codec_dais = rtd->codec_dais;
+ for_each_card_rtds(card, rtd) {
+ struct snd_soc_dai *codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int j;
if (cpu_dai->active)
pinctrl_pm_select_default_state(cpu_dai->dev);
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = codec_dais[j];
+ for_each_rtd_codec_dai(rtd, j, codec_dai) {
if (codec_dai->active)
pinctrl_pm_select_default_state(codec_dai->dev);
}
@@ -694,8 +706,9 @@ int snd_soc_resume(struct device *dev)
* have that problem and may take a substantial amount of time to resume
* due to I/O costs and anti-pop so handle them out of line.
*/
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+
bus_control |= cpu_dai->driver->bus_control;
}
if (bus_control) {
@@ -725,7 +738,7 @@ static struct snd_soc_component *soc_find_component(
lockdep_assert_held(&client_mutex);
- list_for_each_entry(component, &component_list, list) {
+ for_each_component(component) {
if (of_node) {
if (component->dev->of_node == of_node)
return component;
@@ -737,6 +750,24 @@ static struct snd_soc_component *soc_find_component(
return NULL;
}
+static int snd_soc_is_matching_component(
+ const struct snd_soc_dai_link_component *dlc,
+ struct snd_soc_component *component)
+{
+ struct device_node *component_of_node;
+
+ component_of_node = component->dev->of_node;
+ if (!component_of_node && component->dev->parent)
+ component_of_node = component->dev->parent->of_node;
+
+ if (dlc->of_node && component_of_node != dlc->of_node)
+ return 0;
+ if (dlc->name && strcmp(component->name, dlc->name))
+ return 0;
+
+ return 1;
+}
+
/**
* snd_soc_find_dai - Find a registered DAI
*
@@ -753,21 +784,14 @@ struct snd_soc_dai *snd_soc_find_dai(
{
struct snd_soc_component *component;
struct snd_soc_dai *dai;
- struct device_node *component_of_node;
lockdep_assert_held(&client_mutex);
- /* Find CPU DAI from registered DAIs*/
- list_for_each_entry(component, &component_list, list) {
- component_of_node = component->dev->of_node;
- if (!component_of_node && component->dev->parent)
- component_of_node = component->dev->parent->of_node;
-
- if (dlc->of_node && component_of_node != dlc->of_node)
- continue;
- if (dlc->name && strcmp(component->name, dlc->name))
+ /* Find CPU DAI from registered DAIs */
+ for_each_component(component) {
+ if (!snd_soc_is_matching_component(dlc, component))
continue;
- list_for_each_entry(dai, &component->dai_list, list) {
+ for_each_component_dais(component, dai) {
if (dlc->dai_name && strcmp(dai->name, dlc->dai_name)
&& (!dai->driver->name
|| strcmp(dai->driver->name, dlc->dai_name)))
@@ -781,7 +805,6 @@ struct snd_soc_dai *snd_soc_find_dai(
}
EXPORT_SYMBOL_GPL(snd_soc_find_dai);
-
/**
* snd_soc_find_dai_link - Find a DAI link
*
@@ -805,7 +828,7 @@ struct snd_soc_dai_link *snd_soc_find_dai_link(struct snd_soc_card *card,
lockdep_assert_held(&client_mutex);
- list_for_each_entry_safe(link, _link, &card->dai_link_list, list) {
+ for_each_card_links_safe(card, link, _link) {
if (link->id != id)
continue;
@@ -828,7 +851,7 @@ static bool soc_is_dai_link_bound(struct snd_soc_card *card,
{
struct snd_soc_pcm_runtime *rtd;
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (rtd->dai_link == dai_link)
return true;
}
@@ -844,8 +867,6 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
struct snd_soc_dai_link_component cpu_dai_component;
struct snd_soc_component *component;
struct snd_soc_dai **codec_dais;
- struct device_node *platform_of_node;
- const char *platform_name;
int i;
if (dai_link->ignore)
@@ -877,6 +898,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
rtd->num_codecs = dai_link->num_codecs;
/* Find CODEC from registered CODECs */
+ /* we can use for_each_rtd_codec_dai() after this */
codec_dais = rtd->codec_dais;
for (i = 0; i < rtd->num_codecs; i++) {
codec_dais[i] = snd_soc_find_dai(&codecs[i]);
@@ -891,24 +913,11 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
/* Single codec links expect codec and codec_dai in runtime data */
rtd->codec_dai = codec_dais[0];
- /* if there's no platform we match on the empty platform */
- platform_name = dai_link->platform_name;
- if (!platform_name && !dai_link->platform_of_node)
- platform_name = "snd-soc-dummy";
-
/* find one from the set of registered platforms */
- list_for_each_entry(component, &component_list, list) {
- platform_of_node = component->dev->of_node;
- if (!platform_of_node && component->dev->parent->of_node)
- platform_of_node = component->dev->parent->of_node;
-
- if (dai_link->platform_of_node) {
- if (platform_of_node != dai_link->platform_of_node)
- continue;
- } else {
- if (strcmp(component->name, platform_name))
- continue;
- }
+ for_each_component(component) {
+ if (!snd_soc_is_matching_component(dai_link->platform,
+ component))
+ continue;
snd_soc_rtdcom_add(rtd, component);
}
@@ -918,7 +927,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
_err_defer:
soc_free_pcm_runtime(rtd);
- return -EPROBE_DEFER;
+ return -EPROBE_DEFER;
}
static void soc_remove_component(struct snd_soc_component *component)
@@ -942,23 +951,25 @@ static void soc_remove_dai(struct snd_soc_dai *dai, int order)
{
int err;
- if (dai && dai->probed &&
- dai->driver->remove_order == order) {
- if (dai->driver->remove) {
- err = dai->driver->remove(dai);
- if (err < 0)
- dev_err(dai->dev,
- "ASoC: failed to remove %s: %d\n",
- dai->name, err);
- }
- dai->probed = 0;
+ if (!dai || !dai->probed ||
+ dai->driver->remove_order != order)
+ return;
+
+ if (dai->driver->remove) {
+ err = dai->driver->remove(dai);
+ if (err < 0)
+ dev_err(dai->dev,
+ "ASoC: failed to remove %s: %d\n",
+ dai->name, err);
}
+ dai->probed = 0;
}
static void soc_remove_link_dais(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd, int order)
{
int i;
+ struct snd_soc_dai *codec_dai;
/* unregister the rtd device */
if (rtd->dev_registered) {
@@ -967,8 +978,8 @@ static void soc_remove_link_dais(struct snd_soc_card *card,
}
/* remove the CODEC DAI */
- for (i = 0; i < rtd->num_codecs; i++)
- soc_remove_dai(rtd->codec_dais[i], order);
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ soc_remove_dai(codec_dai, order);
soc_remove_dai(rtd->cpu_dai, order);
}
@@ -993,28 +1004,57 @@ static void soc_remove_dai_links(struct snd_soc_card *card)
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_dai_link *link, *_link;
- for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
- order++) {
- list_for_each_entry(rtd, &card->rtd_list, list)
+ for_each_comp_order(order) {
+ for_each_card_rtds(card, rtd)
soc_remove_link_dais(card, rtd, order);
}
- for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
- order++) {
- list_for_each_entry(rtd, &card->rtd_list, list)
+ for_each_comp_order(order) {
+ for_each_card_rtds(card, rtd)
soc_remove_link_components(card, rtd, order);
}
- list_for_each_entry_safe(link, _link, &card->dai_link_list, list) {
+ for_each_card_links_safe(card, link, _link) {
if (link->dobj.type == SND_SOC_DOBJ_DAI_LINK)
dev_warn(card->dev, "Topology forgot to remove link %s?\n",
link->name);
list_del(&link->list);
- card->num_dai_links--;
}
}
+static int snd_soc_init_platform(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link)
+{
+ struct snd_soc_dai_link_component *platform = dai_link->platform;
+
+ /*
+ * FIXME
+ *
+ * this function should be removed in the future
+ */
+ /* convert Legacy platform link */
+ if (!platform) {
+ platform = devm_kzalloc(card->dev,
+ sizeof(struct snd_soc_dai_link_component),
+ GFP_KERNEL);
+ if (!platform)
+ return -ENOMEM;
+
+ dai_link->platform = platform;
+ platform->name = dai_link->platform_name;
+ platform->of_node = dai_link->platform_of_node;
+ platform->dai_name = NULL;
+ }
+
+ /* if there's no platform we match on the empty platform */
+ if (!platform->name &&
+ !platform->of_node)
+ platform->name = "snd-soc-dummy";
+
+ return 0;
+}
+
static int snd_soc_init_multicodec(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link)
{
@@ -1043,9 +1083,16 @@ static int snd_soc_init_multicodec(struct snd_soc_card *card,
}
static int soc_init_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *link)
+ struct snd_soc_dai_link *link)
{
int i, ret;
+ struct snd_soc_dai_link_component *codec;
+
+ ret = snd_soc_init_platform(card, link);
+ if (ret) {
+ dev_err(card->dev, "ASoC: failed to init multiplatform\n");
+ return ret;
+ }
ret = snd_soc_init_multicodec(card, link);
if (ret) {
@@ -1053,19 +1100,19 @@ static int soc_init_dai_link(struct snd_soc_card *card,
return ret;
}
- for (i = 0; i < link->num_codecs; i++) {
+ for_each_link_codecs(link, i, codec) {
/*
* Codec must be specified by 1 of name or OF node,
* not both or neither.
*/
- if (!!link->codecs[i].name ==
- !!link->codecs[i].of_node) {
+ if (!!codec->name ==
+ !!codec->of_node) {
dev_err(card->dev, "ASoC: Neither/both codec name/of_node are set for %s\n",
link->name);
return -EINVAL;
}
/* Codec DAI name must be specified */
- if (!link->codecs[i].dai_name) {
+ if (!codec->dai_name) {
dev_err(card->dev, "ASoC: codec_dai_name not set for %s\n",
link->name);
return -EINVAL;
@@ -1076,13 +1123,12 @@ static int soc_init_dai_link(struct snd_soc_card *card,
* Platform may be specified by either name or OF node, but
* can be left unspecified, and a dummy platform will be used.
*/
- if (link->platform_name && link->platform_of_node) {
+ if (link->platform->name && link->platform->of_node) {
dev_err(card->dev,
"ASoC: Both platform name/of_node are set for %s\n",
link->name);
return -EINVAL;
}
-
/*
* CPU device may be specified by either name or OF node, but
* can be left unspecified, and will be matched based on DAI
@@ -1111,7 +1157,8 @@ static int soc_init_dai_link(struct snd_soc_card *card,
void snd_soc_disconnect_sync(struct device *dev)
{
- struct snd_soc_component *component = snd_soc_lookup_component(dev, NULL);
+ struct snd_soc_component *component =
+ snd_soc_lookup_component(dev, NULL);
if (!component || !component->card)
return;
@@ -1142,14 +1189,14 @@ int snd_soc_add_dai_link(struct snd_soc_card *card,
}
lockdep_assert_held(&client_mutex);
- /* Notify the machine driver for extra initialization
+ /*
+ * Notify the machine driver for extra initialization
* on the link created by topology.
*/
if (dai_link->dobj.type && card->add_dai_link)
card->add_dai_link(card, dai_link);
list_add_tail(&dai_link->list, &card->dai_link_list);
- card->num_dai_links++;
return 0;
}
@@ -1178,16 +1225,16 @@ void snd_soc_remove_dai_link(struct snd_soc_card *card,
}
lockdep_assert_held(&client_mutex);
- /* Notify the machine driver for extra destruction
+ /*
+ * Notify the machine driver for extra destruction
* on the link created by topology.
*/
if (dai_link->dobj.type && card->remove_dai_link)
card->remove_dai_link(card, dai_link);
- list_for_each_entry_safe(link, _link, &card->dai_link_list, list) {
+ for_each_card_links_safe(card, link, _link) {
if (link == dai_link) {
list_del(&link->list);
- card->num_dai_links--;
return;
}
}
@@ -1239,7 +1286,8 @@ static void soc_set_name_prefix(struct snd_soc_card *card,
static int soc_probe_component(struct snd_soc_card *card,
struct snd_soc_component *component)
{
- struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
struct snd_soc_dai *dai;
int ret;
@@ -1277,7 +1325,7 @@ static int soc_probe_component(struct snd_soc_card *card,
}
}
- list_for_each_entry(dai, &component->dai_list, list) {
+ for_each_component_dais(component, dai) {
ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
if (ret != 0) {
dev_err(component->dev,
@@ -1320,6 +1368,7 @@ static int soc_probe_component(struct snd_soc_card *card,
component->driver->num_dapm_routes);
list_add(&dapm->list, &card->dapm_list);
+ /* see for_each_card_components */
list_add(&component->card_list, &card->component_dev_list);
return 0;
@@ -1370,8 +1419,7 @@ static int soc_post_component_init(struct snd_soc_pcm_runtime *rtd,
}
static int soc_probe_link_components(struct snd_soc_card *card,
- struct snd_soc_pcm_runtime *rtd,
- int order)
+ struct snd_soc_pcm_runtime *rtd, int order)
{
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
@@ -1398,6 +1446,7 @@ static int soc_probe_dai(struct snd_soc_dai *dai, int order)
if (dai->driver->probe) {
int ret = dai->driver->probe(dai);
+
if (ret < 0) {
dev_err(dai->dev, "ASoC: failed to probe DAI %s: %d\n",
dai->name, ret);
@@ -1431,48 +1480,6 @@ static int soc_link_dai_pcm_new(struct snd_soc_dai **dais, int num_dais,
return 0;
}
-static int soc_link_dai_widgets(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link,
- struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dapm_widget *sink, *source;
- int ret;
-
- if (rtd->num_codecs > 1)
- dev_warn(card->dev, "ASoC: Multiple codecs not supported yet\n");
-
- /* link the DAI widgets */
- sink = codec_dai->playback_widget;
- source = cpu_dai->capture_widget;
- if (sink && source) {
- ret = snd_soc_dapm_new_pcm(card, dai_link->params,
- dai_link->num_params,
- source, sink);
- if (ret != 0) {
- dev_err(card->dev, "ASoC: Can't link %s to %s: %d\n",
- sink->name, source->name, ret);
- return ret;
- }
- }
-
- sink = cpu_dai->playback_widget;
- source = codec_dai->capture_widget;
- if (sink && source) {
- ret = snd_soc_dapm_new_pcm(card, dai_link->params,
- dai_link->num_params,
- source, sink);
- if (ret != 0) {
- dev_err(card->dev, "ASoC: Can't link %s to %s: %d\n",
- sink->name, source->name, ret);
- return ret;
- }
- }
-
- return 0;
-}
-
static int soc_probe_link_dais(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd, int order)
{
@@ -1480,6 +1487,7 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_component *component;
+ struct snd_soc_dai *codec_dai;
int i, ret, num;
dev_dbg(card->dev, "ASoC: probe %s dai link %d late %d\n",
@@ -1493,8 +1501,8 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
return ret;
/* probe the CODEC DAI */
- for (i = 0; i < rtd->num_codecs; i++) {
- ret = soc_probe_dai(rtd->codec_dais[i], order);
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ ret = soc_probe_dai(codec_dai, order);
if (ret)
return ret;
}
@@ -1546,7 +1554,7 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
}
if (cpu_dai->driver->compress_new) {
- /*create compress_device"*/
+ /* create compress_device" */
ret = cpu_dai->driver->compress_new(rtd, num);
if (ret < 0) {
dev_err(card->dev, "ASoC: can't create compress %s\n",
@@ -1560,7 +1568,7 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
ret = soc_new_pcm(rtd, num);
if (ret < 0) {
dev_err(card->dev, "ASoC: can't create pcm %s :%d\n",
- dai_link->stream_name, ret);
+ dai_link->stream_name, ret);
return ret;
}
ret = soc_link_dai_pcm_new(&cpu_dai, 1, rtd);
@@ -1573,11 +1581,6 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
} else {
INIT_DELAYED_WORK(&rtd->delayed_work,
codec2codec_close_delayed_work);
-
- /* link the DAI widgets */
- ret = soc_link_dai_widgets(card, dai_link, rtd);
- if (ret)
- return ret;
}
}
@@ -1628,8 +1631,7 @@ static int soc_probe_aux_devices(struct snd_soc_card *card)
int order;
int ret;
- for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
- order++) {
+ for_each_comp_order(order) {
list_for_each_entry(comp, &card->aux_comp_list, card_aux_list) {
if (comp->driver->probe_order == order) {
ret = soc_probe_component(card, comp);
@@ -1651,8 +1653,7 @@ static void soc_remove_aux_devices(struct snd_soc_card *card)
struct snd_soc_component *comp, *_comp;
int order;
- for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
- order++) {
+ for_each_comp_order(order) {
list_for_each_entry_safe(comp, _comp,
&card->aux_comp_list, card_aux_list) {
@@ -1681,14 +1682,12 @@ static void soc_remove_aux_devices(struct snd_soc_card *card)
int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
unsigned int dai_fmt)
{
- struct snd_soc_dai **codec_dais = rtd->codec_dais;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
unsigned int i;
int ret;
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = codec_dais[i];
-
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt);
if (ret != 0 && ret != -ENOTSUPP) {
dev_warn(codec_dai->dev,
@@ -1697,8 +1696,10 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
}
}
- /* Flip the polarity for the "CPU" end of a CODEC<->CODEC link */
- /* the component which has non_legacy_dai_naming is Codec */
+ /*
+ * Flip the polarity for the "CPU" end of a CODEC<->CODEC link
+ * the component which has non_legacy_dai_naming is Codec
+ */
if (cpu_dai->component->driver->non_legacy_dai_naming) {
unsigned int inv_dai_fmt;
@@ -1732,9 +1733,9 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
}
EXPORT_SYMBOL_GPL(snd_soc_runtime_set_dai_fmt);
-
#ifdef CONFIG_DMI
-/* Trim special characters, and replace '-' with '_' since '-' is used to
+/*
+ * Trim special characters, and replace '-' with '_' since '-' is used to
* separate different DMI fields in the card long name. Only number and
* alphabet characters and a few separator characters are kept.
*/
@@ -1753,7 +1754,8 @@ static void cleanup_dmi_name(char *name)
name[j] = '\0';
}
-/* Check if a DMI field is valid, i.e. not containing any string
+/*
+ * Check if a DMI field is valid, i.e. not containing any string
* in the black list.
*/
static int is_dmi_valid(const char *field)
@@ -1816,7 +1818,6 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
return 0;
}
-
snprintf(card->dmi_longname, sizeof(card->snd_card->longname),
"%s", vendor);
cleanup_dmi_name(card->dmi_longname);
@@ -1832,7 +1833,8 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
if (len < longname_buf_size)
cleanup_dmi_name(card->dmi_longname + len);
- /* some vendors like Lenovo may only put a self-explanatory
+ /*
+ * some vendors like Lenovo may only put a self-explanatory
* name in the product version field
*/
product_version = dmi_get_system_info(DMI_PRODUCT_VERSION);
@@ -1891,7 +1893,7 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
struct snd_soc_dai_link *dai_link;
int i;
- list_for_each_entry(component, &component_list, list) {
+ for_each_component(component) {
/* does this component override FEs ? */
if (!component->driver->ignore_machine)
@@ -1903,9 +1905,7 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
continue;
/* machine matches, so override the rtd data */
- for (i = 0; i < card->num_links; i++) {
-
- dai_link = &card->dai_link[i];
+ for_each_card_prelinks(card, i, dai_link) {
/* ignore this FE */
if (dai_link->dynamic) {
@@ -1917,7 +1917,11 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
card->dai_link[i].name);
/* override platform component */
- dai_link->platform_name = component->name;
+ if (snd_soc_init_platform(card, dai_link) < 0) {
+ dev_err(card->dev, "init platform error");
+ continue;
+ }
+ dai_link->platform->name = component->name;
/* convert non BE into BE */
dai_link->no_pcm = 1;
@@ -1926,7 +1930,8 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
dai_link->be_hw_params_fixup =
component->driver->be_hw_params_fixup;
- /* most BE links don't set stream name, so set it to
+ /*
+ * most BE links don't set stream name, so set it to
* dai link name if it's NULL to help bind widgets.
*/
if (!dai_link->stream_name)
@@ -1936,7 +1941,7 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
/* Inform userspace we are using alternate topology */
if (component->driver->topology_name_prefix) {
- /* topology shortname created ? */
+ /* topology shortname created? */
if (!card->topology_shortname_created) {
comp_drv = component->driver;
@@ -1965,8 +1970,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
soc_check_tplg_fes(card);
/* bind DAIs */
- for (i = 0; i < card->num_links; i++) {
- ret = soc_bind_dai_link(card, &card->dai_link[i]);
+ for_each_card_prelinks(card, i, dai_link) {
+ ret = soc_bind_dai_link(card, dai_link);
if (ret != 0)
goto base_error;
}
@@ -1979,8 +1984,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
}
/* add predefined DAI links to the list */
- for (i = 0; i < card->num_links; i++)
- snd_soc_add_dai_link(card, card->dai_link+i);
+ for_each_card_prelinks(card, i, dai_link)
+ snd_soc_add_dai_link(card, dai_link);
/* card bind complete so register a sound card */
ret = snd_card_new(card->dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
@@ -2024,9 +2029,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
}
/* probe all components used by DAI links on this card */
- for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
- order++) {
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_comp_order(order) {
+ for_each_card_rtds(card, rtd) {
ret = soc_probe_link_components(card, rtd, order);
if (ret < 0) {
dev_err(card->dev,
@@ -2042,10 +2046,11 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
if (ret < 0)
goto probe_dai_err;
- /* Find new DAI links added during probing components and bind them.
+ /*
+ * Find new DAI links added during probing components and bind them.
* Components with topology may bring new DAIs and DAI links.
*/
- list_for_each_entry(dai_link, &card->dai_link_list, list) {
+ for_each_card_links(card, dai_link) {
if (soc_is_dai_link_bound(card, dai_link))
continue;
@@ -2058,9 +2063,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
}
/* probe all DAI links on this card */
- for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
- order++) {
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_comp_order(order) {
+ for_each_card_rtds(card, rtd) {
ret = soc_probe_link_dais(card, rtd, order);
if (ret < 0) {
dev_err(card->dev,
@@ -2075,7 +2079,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
snd_soc_dapm_connect_dai_link_widgets(card);
if (card->controls)
- snd_soc_add_card_controls(card, card->controls, card->num_controls);
+ snd_soc_add_card_controls(card, card->controls,
+ card->num_controls);
if (card->dapm_routes)
snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
@@ -2181,7 +2186,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
struct snd_soc_pcm_runtime *rtd;
/* make sure any delayed work runs */
- list_for_each_entry(rtd, &card->rtd_list, list)
+ for_each_card_rtds(card, rtd)
flush_delayed_work(&rtd->delayed_work);
/* free the ALSA card at first; this syncs with pending operations */
@@ -2221,21 +2226,23 @@ int snd_soc_poweroff(struct device *dev)
if (!card->instantiated)
return 0;
- /* Flush out pmdown_time work - we actually do want to run it
- * now, we're shutting down so no imminent restart. */
- list_for_each_entry(rtd, &card->rtd_list, list)
+ /*
+ * Flush out pmdown_time work - we actually do want to run it
+ * now, we're shutting down so no imminent restart.
+ */
+ for_each_card_rtds(card, rtd)
flush_delayed_work(&rtd->delayed_work);
snd_soc_dapm_shutdown(card);
/* deactivate pins to sleep state */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
int i;
pinctrl_pm_select_sleep_state(cpu_dai->dev);
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
pinctrl_pm_select_sleep_state(codec_dai->dev);
}
}
@@ -2315,6 +2322,7 @@ static int snd_soc_add_controls(struct snd_card *card, struct device *dev,
for (i = 0; i < num_controls; i++) {
const struct snd_kcontrol_new *control = &controls[i];
+
err = snd_ctl_add(card, snd_soc_cnew(control, data,
control->name, prefix));
if (err < 0) {
@@ -2432,8 +2440,9 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_sysclk);
*
* Configures the CODEC master (MCLK) or system (SYSCLK) clocking.
*/
-int snd_soc_component_set_sysclk(struct snd_soc_component *component, int clk_id,
- int source, unsigned int freq, int dir)
+int snd_soc_component_set_sysclk(struct snd_soc_component *component,
+ int clk_id, int source, unsigned int freq,
+ int dir)
{
if (component->driver->set_sysclk)
return component->driver->set_sysclk(component, clk_id, source,
@@ -2501,7 +2510,7 @@ int snd_soc_component_set_pll(struct snd_soc_component *component, int pll_id,
{
if (component->driver->set_pll)
return component->driver->set_pll(component, pll_id, source,
- freq_in, freq_out);
+ freq_in, freq_out);
return -EINVAL;
}
@@ -2532,8 +2541,6 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_bclk_ratio);
*/
int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
- if (dai->driver == NULL)
- return -EINVAL;
if (dai->driver->ops->set_fmt == NULL)
return -ENOTSUPP;
return dai->driver->ops->set_fmt(dai, fmt);
@@ -2549,8 +2556,8 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_fmt);
* Generates the TDM tx and rx slot default masks for DAI.
*/
static int snd_soc_xlate_tdm_slot_mask(unsigned int slots,
- unsigned int *tx_mask,
- unsigned int *rx_mask)
+ unsigned int *tx_mask,
+ unsigned int *rx_mask)
{
if (*tx_mask || *rx_mask)
return 0;
@@ -2680,9 +2687,6 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_tristate);
int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
int direction)
{
- if (!dai->driver)
- return -ENOTSUPP;
-
if (dai->driver->ops->mute_stream)
return dai->driver->ops->mute_stream(dai, mute, direction);
else if (direction == SNDRV_PCM_STREAM_PLAYBACK &&
@@ -2693,6 +2697,33 @@ int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
}
EXPORT_SYMBOL_GPL(snd_soc_dai_digital_mute);
+static int snd_soc_bind_card(struct snd_soc_card *card)
+{
+ struct snd_soc_pcm_runtime *rtd;
+ int ret;
+
+ ret = snd_soc_instantiate_card(card);
+ if (ret != 0)
+ return ret;
+
+ /* deactivate pins to sleep state */
+ for_each_card_rtds(card, rtd) {
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
+ int j;
+
+ for_each_rtd_codec_dai(rtd, j, codec_dai) {
+ if (!codec_dai->active)
+ pinctrl_pm_select_sleep_state(codec_dai->dev);
+ }
+
+ if (!cpu_dai->active)
+ pinctrl_pm_select_sleep_state(cpu_dai->dev);
+ }
+
+ return ret;
+}
+
/**
* snd_soc_register_card - Register a card with the ASoC core
*
@@ -2702,13 +2733,12 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_digital_mute);
int snd_soc_register_card(struct snd_soc_card *card)
{
int i, ret;
- struct snd_soc_pcm_runtime *rtd;
+ struct snd_soc_dai_link *link;
if (!card->name || !card->dev)
return -EINVAL;
- for (i = 0; i < card->num_links; i++) {
- struct snd_soc_dai_link *link = &card->dai_link[i];
+ for_each_card_prelinks(card, i, link) {
ret = soc_init_dai_link(card, link);
if (ret) {
@@ -2723,7 +2753,6 @@ int snd_soc_register_card(struct snd_soc_card *card)
snd_soc_initialize_card_lists(card);
INIT_LIST_HEAD(&card->dai_link_list);
- card->num_dai_links = 0;
INIT_LIST_HEAD(&card->rtd_list);
card->num_rtd = 0;
@@ -2734,28 +2763,23 @@ int snd_soc_register_card(struct snd_soc_card *card)
mutex_init(&card->mutex);
mutex_init(&card->dapm_mutex);
- ret = snd_soc_instantiate_card(card);
- if (ret != 0)
- return ret;
-
- /* deactivate pins to sleep state */
- list_for_each_entry(rtd, &card->rtd_list, list) {
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int j;
-
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
- if (!codec_dai->active)
- pinctrl_pm_select_sleep_state(codec_dai->dev);
- }
+ return snd_soc_bind_card(card);
+}
+EXPORT_SYMBOL_GPL(snd_soc_register_card);
- if (!cpu_dai->active)
- pinctrl_pm_select_sleep_state(cpu_dai->dev);
+static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
+{
+ if (card->instantiated) {
+ card->instantiated = false;
+ snd_soc_dapm_shutdown(card);
+ soc_cleanup_card_resources(card);
+ if (!unregister)
+ list_add(&card->list, &unbind_card_list);
+ } else {
+ if (unregister)
+ list_del(&card->list);
}
-
- return ret;
}
-EXPORT_SYMBOL_GPL(snd_soc_register_card);
/**
* snd_soc_unregister_card - Unregister a card with the ASoC core
@@ -2765,12 +2789,8 @@ EXPORT_SYMBOL_GPL(snd_soc_register_card);
*/
int snd_soc_unregister_card(struct snd_soc_card *card)
{
- if (card->instantiated) {
- card->instantiated = false;
- snd_soc_dapm_shutdown(card);
- soc_cleanup_card_resources(card);
- dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name);
- }
+ snd_soc_unbind_card(card, true);
+ dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name);
return 0;
}
@@ -2802,7 +2822,7 @@ static char *fmt_single_name(struct device *dev, int *id)
}
} else {
- /* I2C component devices are named "bus-addr" */
+ /* I2C component devices are named "bus-addr" */
if (sscanf(name, "%x-%x", &id1, &id2) == 2) {
char tmp[NAME_SIZE];
@@ -2810,7 +2830,8 @@ static char *fmt_single_name(struct device *dev, int *id)
*id = ((id1 & 0xffff) << 16) + id2;
/* sanitize component name for DAI link creation */
- snprintf(tmp, NAME_SIZE, "%s.%s", dev->driver->name, name);
+ snprintf(tmp, NAME_SIZE, "%s.%s", dev->driver->name,
+ name);
strlcpy(name, tmp, NAME_SIZE);
} else
*id = 0;
@@ -2845,7 +2866,7 @@ static void snd_soc_unregister_dais(struct snd_soc_component *component)
{
struct snd_soc_dai *dai, *_dai;
- list_for_each_entry_safe(dai, _dai, &component->dai_list, list) {
+ for_each_component_dais_safe(component, dai, _dai) {
dev_dbg(component->dev, "ASoC: Unregistered DAI '%s'\n",
dai->name);
list_del(&dai->list);
@@ -2877,7 +2898,7 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
* component-less anymore.
*/
if (legacy_dai_naming &&
- (dai_drv->id == 0 || dai_drv->name == NULL)) {
+ (dai_drv->id == 0 || dai_drv->name == NULL)) {
dai->name = fmt_single_name(dev, &dai->id);
} else {
dai->name = fmt_multiple_name(dev, dai_drv);
@@ -2897,6 +2918,7 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
if (!dai->driver->ops)
dai->driver->ops = &null_dai_ops;
+ /* see for_each_component_dais */
list_add_tail(&dai->list, &component->dai_list);
component->num_dai++;
@@ -2910,11 +2932,10 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
* @component: The component the DAIs are registered for
* @dai_drv: DAI driver to use for the DAIs
* @count: Number of DAIs
- * @legacy_dai_naming: Use the legacy naming scheme and let the DAI inherit the
- * parent's name.
*/
static int snd_soc_register_dais(struct snd_soc_component *component,
- struct snd_soc_dai_driver *dai_drv, size_t count)
+ struct snd_soc_dai_driver *dai_drv,
+ size_t count)
{
struct device *dev = component->dev;
struct snd_soc_dai *dai;
@@ -2925,8 +2946,8 @@ static int snd_soc_register_dais(struct snd_soc_component *component,
for (i = 0; i < count; i++) {
- dai = soc_add_dai(component, dai_drv + i,
- count == 1 && !component->driver->non_legacy_dai_naming);
+ dai = soc_add_dai(component, dai_drv + i, count == 1 &&
+ !component->driver->non_legacy_dai_naming);
if (dai == NULL) {
ret = -ENOMEM;
goto err;
@@ -2970,7 +2991,8 @@ int snd_soc_register_dai(struct snd_soc_component *component,
if (!dai)
return -ENOMEM;
- /* Create the DAI widgets here. After adding DAIs, topology may
+ /*
+ * Create the DAI widgets here. After adding DAIs, topology may
* also add routes that need these widgets as source or sink.
*/
ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
@@ -3052,7 +3074,8 @@ static void snd_soc_component_setup_regmap(struct snd_soc_component *component)
#ifdef CONFIG_REGMAP
/**
- * snd_soc_component_init_regmap() - Initialize regmap instance for the component
+ * snd_soc_component_init_regmap() - Initialize regmap instance for the
+ * component
* @component: The component for which to initialize the regmap instance
* @regmap: The regmap instance that should be used by the component
*
@@ -3070,7 +3093,8 @@ void snd_soc_component_init_regmap(struct snd_soc_component *component,
EXPORT_SYMBOL_GPL(snd_soc_component_init_regmap);
/**
- * snd_soc_component_exit_regmap() - De-initialize regmap instance for the component
+ * snd_soc_component_exit_regmap() - De-initialize regmap instance for the
+ * component
* @component: The component for which to de-initialize the regmap instance
*
* Calls regmap_exit() on the regmap instance associated to the component and
@@ -3094,11 +3118,13 @@ static void snd_soc_component_add(struct snd_soc_component *component)
if (!component->driver->write && !component->driver->read) {
if (!component->regmap)
- component->regmap = dev_get_regmap(component->dev, NULL);
+ component->regmap = dev_get_regmap(component->dev,
+ NULL);
if (component->regmap)
snd_soc_component_setup_regmap(component);
}
+ /* see for_each_component */
list_add(&component->list, &component_list);
INIT_LIST_HEAD(&component->dobj_list);
@@ -3116,7 +3142,7 @@ static void snd_soc_component_del_unlocked(struct snd_soc_component *component)
struct snd_soc_card *card = component->card;
if (card)
- snd_soc_unregister_card(card);
+ snd_soc_unbind_card(card, false);
list_del(&component->list);
}
@@ -3156,6 +3182,18 @@ static void convert_endianness_formats(struct snd_soc_pcm_stream *stream)
stream->formats |= endianness_format_map[i];
}
+static void snd_soc_try_rebind_card(void)
+{
+ struct snd_soc_card *card, *c;
+
+ if (!list_empty(&unbind_card_list)) {
+ list_for_each_entry_safe(card, c, &unbind_card_list, list) {
+ if (!snd_soc_bind_card(card))
+ list_del(&card->list);
+ }
+ }
+}
+
int snd_soc_add_component(struct device *dev,
struct snd_soc_component *component,
const struct snd_soc_component_driver *component_driver,
@@ -3183,6 +3221,7 @@ int snd_soc_add_component(struct device *dev,
}
snd_soc_component_add(component);
+ snd_soc_try_rebind_card();
return 0;
@@ -3221,27 +3260,28 @@ static int __snd_soc_unregister_component(struct device *dev)
int found = 0;
mutex_lock(&client_mutex);
- list_for_each_entry(component, &component_list, list) {
+ for_each_component(component) {
if (dev != component->dev)
continue;
- snd_soc_tplg_component_remove(component, SND_SOC_TPLG_INDEX_ALL);
+ snd_soc_tplg_component_remove(component,
+ SND_SOC_TPLG_INDEX_ALL);
snd_soc_component_del_unlocked(component);
found = 1;
break;
}
mutex_unlock(&client_mutex);
- if (found) {
+ if (found)
snd_soc_component_cleanup(component);
- }
return found;
}
void snd_soc_unregister_component(struct device *dev)
{
- while (__snd_soc_unregister_component(dev));
+ while (__snd_soc_unregister_component(dev))
+ ;
}
EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
@@ -3253,7 +3293,7 @@ struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
ret = NULL;
mutex_lock(&client_mutex);
- list_for_each_entry(component, &component_list, list) {
+ for_each_component(component) {
if (dev != component->dev)
continue;
@@ -3653,7 +3693,7 @@ int snd_soc_get_dai_id(struct device_node *ep)
*/
ret = -ENOTSUPP;
mutex_lock(&client_mutex);
- list_for_each_entry(pos, &component_list, list) {
+ for_each_component(pos) {
struct device_node *component_of_node = pos->dev->of_node;
if (!component_of_node && pos->dev->parent)
@@ -3683,7 +3723,7 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
int ret = -EPROBE_DEFER;
mutex_lock(&client_mutex);
- list_for_each_entry(pos, &component_list, list) {
+ for_each_component(pos) {
component_of_node = pos->dev->of_node;
if (!component_of_node && pos->dev->parent)
component_of_node = pos->dev->parent->of_node;
@@ -3719,7 +3759,7 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
ret = 0;
/* find target DAI */
- list_for_each_entry(dai, &pos->dai_list, list) {
+ for_each_component_dais(pos, dai) {
if (id == 0)
break;
id--;
@@ -3764,10 +3804,10 @@ EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_name);
*/
void snd_soc_of_put_dai_link_codecs(struct snd_soc_dai_link *dai_link)
{
- struct snd_soc_dai_link_component *component = dai_link->codecs;
+ struct snd_soc_dai_link_component *component;
int index;
- for (index = 0; index < dai_link->num_codecs; index++, component++) {
+ for_each_link_codecs(dai_link, index, component) {
if (!component->of_node)
break;
of_node_put(component->of_node);
@@ -3819,12 +3859,10 @@ int snd_soc_of_get_dai_link_codecs(struct device *dev,
dai_link->num_codecs = num_codecs;
/* Parse the list */
- for (index = 0, component = dai_link->codecs;
- index < dai_link->num_codecs;
- index++, component++) {
+ for_each_link_codecs(dai_link, index, component) {
ret = of_parse_phandle_with_args(of_node, name,
"#sound-dai-cells",
- index, &args);
+ index, &args);
if (ret)
goto err;
component->of_node = args.np;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7e96793050c9..a5178845065b 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -18,7 +18,6 @@
// device reopen.
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/async.h>
#include <linux/delay.h>
@@ -364,10 +363,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
ret = PTR_ERR(data->widget);
goto err_data;
}
- if (!data->widget) {
- ret = -ENOMEM;
- goto err_data;
- }
}
break;
case snd_soc_dapm_demux:
@@ -402,10 +397,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
ret = PTR_ERR(data->widget);
goto err_data;
}
- if (!data->widget) {
- ret = -ENOMEM;
- goto err_data;
- }
snd_soc_dapm_add_path(widget->dapm, data->widget,
widget, NULL, NULL);
@@ -1026,9 +1017,10 @@ static int dapm_new_dai_link(struct snd_soc_dapm_widget *w)
struct snd_kcontrol *kcontrol;
struct snd_soc_dapm_context *dapm = w->dapm;
struct snd_card *card = dapm->card->snd_card;
+ struct snd_soc_pcm_runtime *rtd = w->priv;
/* create control for links with > 1 config */
- if (w->num_params <= 1)
+ if (rtd->dai_link->num_params <= 1)
return 0;
/* add kcontrol */
@@ -1320,14 +1312,13 @@ int dapm_clock_event(struct snd_soc_dapm_widget *w,
soc_dapm_async_complete(w->dapm);
-#ifdef CONFIG_HAVE_CLK
if (SND_SOC_DAPM_EVENT_ON(event)) {
return clk_prepare_enable(w->clk);
} else {
clk_disable_unprepare(w->clk);
return 0;
}
-#endif
+
return 0;
}
EXPORT_SYMBOL_GPL(dapm_clock_event);
@@ -1953,7 +1944,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
dapm_pre_sequence_async(&card->dapm, 0);
/* Run other bias changes in parallel */
list_for_each_entry(d, &card->dapm_list, list) {
- if (d != &card->dapm)
+ if (d != &card->dapm && d->bias_level != d->target_bias_level)
async_schedule_domain(dapm_pre_sequence_async, d,
&async_domain);
}
@@ -1977,7 +1968,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
/* Run all the bias changes in parallel */
list_for_each_entry(d, &card->dapm_list, list) {
- if (d != &card->dapm)
+ if (d != &card->dapm && d->bias_level != d->target_bias_level)
async_schedule_domain(dapm_post_sequence_async, d,
&async_domain);
}
@@ -2371,12 +2362,13 @@ static ssize_t dapm_widget_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
+ struct snd_soc_dai *codec_dai;
int i, count = 0;
mutex_lock(&rtd->card->dapm_mutex);
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_component *cmpnt = rtd->codec_dais[i]->component;
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ struct snd_soc_component *cmpnt = codec_dai->component;
count += dapm_widget_show_component(cmpnt, buf + count);
}
@@ -3426,35 +3418,6 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
EXPORT_SYMBOL_GPL(snd_soc_dapm_put_pin_switch);
struct snd_soc_dapm_widget *
-snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
- const struct snd_soc_dapm_widget *widget)
-{
- struct snd_soc_dapm_widget *w;
-
- mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- w = snd_soc_dapm_new_control_unlocked(dapm, widget);
- /* Do not nag about probe deferrals */
- if (IS_ERR(w)) {
- int ret = PTR_ERR(w);
-
- if (ret != -EPROBE_DEFER)
- dev_err(dapm->dev,
- "ASoC: Failed to create DAPM control %s (%d)\n",
- widget->name, ret);
- goto out_unlock;
- }
- if (!w)
- dev_err(dapm->dev,
- "ASoC: Failed to create DAPM control %s\n",
- widget->name);
-
-out_unlock:
- mutex_unlock(&dapm->card->dapm_mutex);
- return w;
-}
-EXPORT_SYMBOL_GPL(snd_soc_dapm_new_control);
-
-struct snd_soc_dapm_widget *
snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_widget *widget)
{
@@ -3464,53 +3427,37 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
int ret;
if ((w = dapm_cnew_widget(widget)) == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
switch (w->id) {
case snd_soc_dapm_regulator_supply:
w->regulator = devm_regulator_get(dapm->dev, w->name);
if (IS_ERR(w->regulator)) {
ret = PTR_ERR(w->regulator);
- if (ret == -EPROBE_DEFER)
- return ERR_PTR(ret);
- dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
- w->name, ret);
- return NULL;
+ goto request_failed;
}
if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) {
ret = regulator_allow_bypass(w->regulator, true);
if (ret != 0)
- dev_warn(w->dapm->dev,
+ dev_warn(dapm->dev,
"ASoC: Failed to bypass %s: %d\n",
w->name, ret);
}
break;
case snd_soc_dapm_pinctrl:
w->pinctrl = devm_pinctrl_get(dapm->dev);
- if (IS_ERR_OR_NULL(w->pinctrl)) {
+ if (IS_ERR(w->pinctrl)) {
ret = PTR_ERR(w->pinctrl);
- if (ret == -EPROBE_DEFER)
- return ERR_PTR(ret);
- dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
- w->name, ret);
- return NULL;
+ goto request_failed;
}
break;
case snd_soc_dapm_clock_supply:
-#ifdef CONFIG_CLKDEV_LOOKUP
w->clk = devm_clk_get(dapm->dev, w->name);
if (IS_ERR(w->clk)) {
ret = PTR_ERR(w->clk);
- if (ret == -EPROBE_DEFER)
- return ERR_PTR(ret);
- dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
- w->name, ret);
- return NULL;
+ goto request_failed;
}
-#else
- return NULL;
-#endif
break;
default:
break;
@@ -3523,7 +3470,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
w->name = kstrdup_const(widget->name, GFP_KERNEL);
if (w->name == NULL) {
kfree(w);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
switch (w->id) {
@@ -3600,9 +3547,39 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
/* machine layer sets up unconnected pins and insertions */
w->connected = 1;
return w;
+
+request_failed:
+ if (ret != -EPROBE_DEFER)
+ dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
+ w->name, ret);
+
+ return ERR_PTR(ret);
}
/**
+ * snd_soc_dapm_new_control - create new dapm control
+ * @dapm: DAPM context
+ * @widget: widget template
+ *
+ * Creates new DAPM control based upon a template.
+ *
+ * Returns a widget pointer on success or an error pointer on failure
+ */
+struct snd_soc_dapm_widget *
+snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_widget *widget)
+{
+ struct snd_soc_dapm_widget *w;
+
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+ w = snd_soc_dapm_new_control_unlocked(dapm, widget);
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ return w;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_new_control);
+
+/**
* snd_soc_dapm_new_controls - create new dapm controls
* @dapm: DAPM context
* @widget: widget array
@@ -3625,19 +3602,6 @@ int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
w = snd_soc_dapm_new_control_unlocked(dapm, widget);
if (IS_ERR(w)) {
ret = PTR_ERR(w);
- /* Do not nag about probe deferrals */
- if (ret == -EPROBE_DEFER)
- break;
- dev_err(dapm->dev,
- "ASoC: Failed to create DAPM control %s (%d)\n",
- widget->name, ret);
- break;
- }
- if (!w) {
- dev_err(dapm->dev,
- "ASoC: Failed to create DAPM control %s\n",
- widget->name);
- ret = -ENOMEM;
break;
}
widget++;
@@ -3650,31 +3614,23 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_new_controls);
static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_dapm_path *source_p, *sink_p;
+ struct snd_soc_dapm_path *path;
struct snd_soc_dai *source, *sink;
- const struct snd_soc_pcm_stream *config = w->params + w->params_select;
+ struct snd_soc_pcm_runtime *rtd = w->priv;
+ const struct snd_soc_pcm_stream *config;
struct snd_pcm_substream substream;
struct snd_pcm_hw_params *params = NULL;
struct snd_pcm_runtime *runtime = NULL;
unsigned int fmt;
- int ret;
+ int ret = 0;
+
+ config = rtd->dai_link->params + rtd->params_select;
if (WARN_ON(!config) ||
WARN_ON(list_empty(&w->edges[SND_SOC_DAPM_DIR_OUT]) ||
list_empty(&w->edges[SND_SOC_DAPM_DIR_IN])))
return -EINVAL;
- /* We only support a single source and sink, pick the first */
- source_p = list_first_entry(&w->edges[SND_SOC_DAPM_DIR_OUT],
- struct snd_soc_dapm_path,
- list_node[SND_SOC_DAPM_DIR_OUT]);
- sink_p = list_first_entry(&w->edges[SND_SOC_DAPM_DIR_IN],
- struct snd_soc_dapm_path,
- list_node[SND_SOC_DAPM_DIR_IN]);
-
- source = source_p->source->priv;
- sink = sink_p->sink->priv;
-
/* Be a little careful as we don't want to overflow the mask array */
if (config->formats) {
fmt = ffs(config->formats) - 1;
@@ -3711,63 +3667,100 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
goto out;
}
substream.runtime = runtime;
+ substream.private_data = rtd;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
substream.stream = SNDRV_PCM_STREAM_CAPTURE;
- if (source->driver->ops->startup) {
- ret = source->driver->ops->startup(&substream, source);
- if (ret < 0) {
- dev_err(source->dev,
- "ASoC: startup() failed: %d\n", ret);
- goto out;
+ snd_soc_dapm_widget_for_each_source_path(w, path) {
+ source = path->source->priv;
+
+ if (source->driver->ops->startup) {
+ ret = source->driver->ops->startup(&substream,
+ source);
+ if (ret < 0) {
+ dev_err(source->dev,
+ "ASoC: startup() failed: %d\n",
+ ret);
+ goto out;
+ }
+ source->active++;
}
- source->active++;
+ ret = soc_dai_hw_params(&substream, params, source);
+ if (ret < 0)
+ goto out;
}
- ret = soc_dai_hw_params(&substream, params, source);
- if (ret < 0)
- goto out;
substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
- if (sink->driver->ops->startup) {
- ret = sink->driver->ops->startup(&substream, sink);
- if (ret < 0) {
- dev_err(sink->dev,
- "ASoC: startup() failed: %d\n", ret);
- goto out;
+ snd_soc_dapm_widget_for_each_sink_path(w, path) {
+ sink = path->sink->priv;
+
+ if (sink->driver->ops->startup) {
+ ret = sink->driver->ops->startup(&substream,
+ sink);
+ if (ret < 0) {
+ dev_err(sink->dev,
+ "ASoC: startup() failed: %d\n",
+ ret);
+ goto out;
+ }
+ sink->active++;
}
- sink->active++;
+ ret = soc_dai_hw_params(&substream, params, sink);
+ if (ret < 0)
+ goto out;
}
- ret = soc_dai_hw_params(&substream, params, sink);
- if (ret < 0)
- goto out;
break;
case SND_SOC_DAPM_POST_PMU:
- ret = snd_soc_dai_digital_mute(sink, 0,
- SNDRV_PCM_STREAM_PLAYBACK);
- if (ret != 0 && ret != -ENOTSUPP)
- dev_warn(sink->dev, "ASoC: Failed to unmute: %d\n", ret);
- ret = 0;
+ snd_soc_dapm_widget_for_each_sink_path(w, path) {
+ sink = path->sink->priv;
+
+ ret = snd_soc_dai_digital_mute(sink, 0,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret != 0 && ret != -ENOTSUPP)
+ dev_warn(sink->dev,
+ "ASoC: Failed to unmute: %d\n", ret);
+ ret = 0;
+ }
break;
case SND_SOC_DAPM_PRE_PMD:
- ret = snd_soc_dai_digital_mute(sink, 1,
- SNDRV_PCM_STREAM_PLAYBACK);
- if (ret != 0 && ret != -ENOTSUPP)
- dev_warn(sink->dev, "ASoC: Failed to mute: %d\n", ret);
- ret = 0;
+ snd_soc_dapm_widget_for_each_sink_path(w, path) {
+ sink = path->sink->priv;
+
+ ret = snd_soc_dai_digital_mute(sink, 1,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret != 0 && ret != -ENOTSUPP)
+ dev_warn(sink->dev,
+ "ASoC: Failed to mute: %d\n", ret);
+ ret = 0;
+ }
+
+ substream.stream = SNDRV_PCM_STREAM_CAPTURE;
+ snd_soc_dapm_widget_for_each_source_path(w, path) {
+ source = path->source->priv;
- source->active--;
- if (source->driver->ops->shutdown) {
- substream.stream = SNDRV_PCM_STREAM_CAPTURE;
- source->driver->ops->shutdown(&substream, source);
+ if (source->driver->ops->hw_free)
+ source->driver->ops->hw_free(&substream,
+ source);
+
+ source->active--;
+ if (source->driver->ops->shutdown)
+ source->driver->ops->shutdown(&substream,
+ source);
}
- sink->active--;
- if (sink->driver->ops->shutdown) {
- substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
- sink->driver->ops->shutdown(&substream, sink);
+ substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
+ snd_soc_dapm_widget_for_each_sink_path(w, path) {
+ sink = path->sink->priv;
+
+ if (sink->driver->ops->hw_free)
+ sink->driver->ops->hw_free(&substream, sink);
+
+ sink->active--;
+ if (sink->driver->ops->shutdown)
+ sink->driver->ops->shutdown(&substream, sink);
}
break;
@@ -3786,8 +3779,9 @@ static int snd_soc_dapm_dai_link_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_pcm_runtime *rtd = w->priv;
- ucontrol->value.enumerated.item[0] = w->params_select;
+ ucontrol->value.enumerated.item[0] = rtd->params_select;
return 0;
}
@@ -3796,18 +3790,19 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_pcm_runtime *rtd = w->priv;
/* Can't change the config when widget is already powered */
if (w->power)
return -EBUSY;
- if (ucontrol->value.enumerated.item[0] == w->params_select)
+ if (ucontrol->value.enumerated.item[0] == rtd->params_select)
return 0;
- if (ucontrol->value.enumerated.item[0] >= w->num_params)
+ if (ucontrol->value.enumerated.item[0] >= rtd->dai_link->num_params)
return -EINVAL;
- w->params_select = ucontrol->value.enumerated.item[0];
+ rtd->params_select = ucontrol->value.enumerated.item[0];
return 0;
}
@@ -3894,11 +3889,10 @@ outfree_w_param:
return NULL;
}
-int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
- const struct snd_soc_pcm_stream *params,
- unsigned int num_params,
- struct snd_soc_dapm_widget *source,
- struct snd_soc_dapm_widget *sink)
+static struct snd_soc_dapm_widget *
+snd_soc_dapm_new_dai(struct snd_soc_card *card, struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
{
struct snd_soc_dapm_widget template;
struct snd_soc_dapm_widget *w;
@@ -3910,7 +3904,7 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
link_name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-%s",
source->name, sink->name);
if (!link_name)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
memset(&template, 0, sizeof(template));
template.reg = SND_SOC_NOPM;
@@ -3922,9 +3916,10 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
template.kcontrol_news = NULL;
/* allocate memory for control, only in case of multiple configs */
- if (num_params > 1) {
- w_param_text = devm_kcalloc(card->dev, num_params,
- sizeof(char *), GFP_KERNEL);
+ if (rtd->dai_link->num_params > 1) {
+ w_param_text = devm_kcalloc(card->dev,
+ rtd->dai_link->num_params,
+ sizeof(char *), GFP_KERNEL);
if (!w_param_text) {
ret = -ENOMEM;
goto param_fail;
@@ -3933,7 +3928,9 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
template.num_kcontrols = 1;
template.kcontrol_news =
snd_soc_dapm_alloc_kcontrol(card,
- link_name, params, num_params,
+ link_name,
+ rtd->dai_link->params,
+ rtd->dai_link->num_params,
w_param_text, &private_value);
if (!template.kcontrol_news) {
ret = -ENOMEM;
@@ -3947,36 +3944,20 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
w = snd_soc_dapm_new_control_unlocked(&card->dapm, &template);
if (IS_ERR(w)) {
ret = PTR_ERR(w);
- /* Do not nag about probe deferrals */
- if (ret != -EPROBE_DEFER)
- dev_err(card->dev,
- "ASoC: Failed to create %s widget (%d)\n",
- link_name, ret);
- goto outfree_kcontrol_news;
- }
- if (!w) {
- dev_err(card->dev, "ASoC: Failed to create %s widget\n",
- link_name);
- ret = -ENOMEM;
goto outfree_kcontrol_news;
}
- w->params = params;
- w->num_params = num_params;
+ w->priv = rtd;
- ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL);
- if (ret)
- goto outfree_w;
- return snd_soc_dapm_add_path(&card->dapm, w, sink, NULL, NULL);
+ return w;
-outfree_w:
- devm_kfree(card->dev, w);
outfree_kcontrol_news:
devm_kfree(card->dev, (void *)template.kcontrol_news);
- snd_soc_dapm_free_kcontrol(card, &private_value, num_params, w_param_text);
+ snd_soc_dapm_free_kcontrol(card, &private_value,
+ rtd->dai_link->num_params, w_param_text);
param_fail:
devm_kfree(card->dev, link_name);
- return ret;
+ return ERR_PTR(ret);
}
int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
@@ -3999,21 +3980,8 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
template.name);
w = snd_soc_dapm_new_control_unlocked(dapm, &template);
- if (IS_ERR(w)) {
- int ret = PTR_ERR(w);
-
- /* Do not nag about probe deferrals */
- if (ret != -EPROBE_DEFER)
- dev_err(dapm->dev,
- "ASoC: Failed to create %s widget (%d)\n",
- dai->driver->playback.stream_name, ret);
- return ret;
- }
- if (!w) {
- dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
- dai->driver->playback.stream_name);
- return -ENOMEM;
- }
+ if (IS_ERR(w))
+ return PTR_ERR(w);
w->priv = dai;
dai->playback_widget = w;
@@ -4028,21 +3996,8 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
template.name);
w = snd_soc_dapm_new_control_unlocked(dapm, &template);
- if (IS_ERR(w)) {
- int ret = PTR_ERR(w);
-
- /* Do not nag about probe deferrals */
- if (ret != -EPROBE_DEFER)
- dev_err(dapm->dev,
- "ASoC: Failed to create %s widget (%d)\n",
- dai->driver->playback.stream_name, ret);
- return ret;
- }
- if (!w) {
- dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
- dai->driver->capture.stream_name);
- return -ENOMEM;
- }
+ if (IS_ERR(w))
+ return PTR_ERR(w);
w->priv = dai;
dai->capture_widget = w;
@@ -4111,34 +4066,79 @@ static void dapm_connect_dai_link_widgets(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dapm_widget *sink, *source;
+ struct snd_soc_dai *codec_dai;
+ struct snd_soc_dapm_widget *playback = NULL, *capture = NULL;
+ struct snd_soc_dapm_widget *codec, *playback_cpu, *capture_cpu;
int i;
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
+ if (rtd->dai_link->params) {
+ playback_cpu = cpu_dai->capture_widget;
+ capture_cpu = cpu_dai->playback_widget;
+ } else {
+ playback = cpu_dai->playback_widget;
+ capture = cpu_dai->capture_widget;
+ playback_cpu = playback;
+ capture_cpu = capture;
+ }
+
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
/* connect BE DAI playback if widgets are valid */
- if (codec_dai->playback_widget && cpu_dai->playback_widget) {
- source = cpu_dai->playback_widget;
- sink = codec_dai->playback_widget;
+ codec = codec_dai->playback_widget;
+
+ if (playback_cpu && codec) {
+ if (!playback) {
+ playback = snd_soc_dapm_new_dai(card, rtd,
+ playback_cpu,
+ codec);
+ if (IS_ERR(playback)) {
+ dev_err(rtd->dev,
+ "ASoC: Failed to create DAI %s: %ld\n",
+ codec_dai->name,
+ PTR_ERR(playback));
+ continue;
+ }
+
+ snd_soc_dapm_add_path(&card->dapm, playback_cpu,
+ playback, NULL, NULL);
+ }
+
dev_dbg(rtd->dev, "connected DAI link %s:%s -> %s:%s\n",
- cpu_dai->component->name, source->name,
- codec_dai->component->name, sink->name);
+ cpu_dai->component->name, playback_cpu->name,
+ codec_dai->component->name, codec->name);
- snd_soc_dapm_add_path(&card->dapm, source, sink,
- NULL, NULL);
+ snd_soc_dapm_add_path(&card->dapm, playback, codec,
+ NULL, NULL);
}
+ }
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
/* connect BE DAI capture if widgets are valid */
- if (codec_dai->capture_widget && cpu_dai->capture_widget) {
- source = codec_dai->capture_widget;
- sink = cpu_dai->capture_widget;
+ codec = codec_dai->capture_widget;
+
+ if (codec && capture_cpu) {
+ if (!capture) {
+ capture = snd_soc_dapm_new_dai(card, rtd,
+ codec,
+ capture_cpu);
+ if (IS_ERR(capture)) {
+ dev_err(rtd->dev,
+ "ASoC: Failed to create DAI %s: %ld\n",
+ codec_dai->name,
+ PTR_ERR(capture));
+ continue;
+ }
+
+ snd_soc_dapm_add_path(&card->dapm, capture,
+ capture_cpu, NULL, NULL);
+ }
+
dev_dbg(rtd->dev, "connected DAI link %s:%s -> %s:%s\n",
- codec_dai->component->name, source->name,
- cpu_dai->component->name, sink->name);
+ codec_dai->component->name, codec->name,
+ cpu_dai->component->name, capture_cpu->name);
- snd_soc_dapm_add_path(&card->dapm, source, sink,
- NULL, NULL);
+ snd_soc_dapm_add_path(&card->dapm, codec, capture,
+ NULL, NULL);
}
}
}
@@ -4188,12 +4188,12 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card)
struct snd_soc_pcm_runtime *rtd;
/* for each BE DAI link... */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
/*
* dynamic FE links have no fixed DAI mapping.
* CODEC<->CODEC links have no direct connection.
*/
- if (rtd->dai_link->dynamic || rtd->dai_link->params)
+ if (rtd->dai_link->dynamic)
continue;
dapm_connect_dai_link_widgets(card, rtd);
@@ -4203,11 +4203,12 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card)
static void soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
int event)
{
+ struct snd_soc_dai *codec_dai;
int i;
soc_dapm_dai_stream_event(rtd->cpu_dai, stream, event);
- for (i = 0; i < rtd->num_codecs; i++)
- soc_dapm_dai_stream_event(rtd->codec_dais[i], stream, event);
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ soc_dapm_dai_stream_event(codec_dai, stream, event);
dapm_power_widgets(rtd->card, event);
}
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 592efb370c44..f4dc3d445aae 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -373,7 +373,7 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
unsigned int rshift = mc->rshift;
int max = mc->max;
int min = mc->min;
- unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
+ unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
unsigned int val;
int ret;
@@ -418,7 +418,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
unsigned int rshift = mc->rshift;
int max = mc->max;
int min = mc->min;
- unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
+ unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
int err = 0;
unsigned int val, val_mask, val2 = 0;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index e8b98bfd4cf1..03f36e534050 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -59,25 +59,26 @@ static bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream)
void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream)
{
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
int i;
lockdep_assert_held(&rtd->pcm_mutex);
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
cpu_dai->playback_active++;
- for (i = 0; i < rtd->num_codecs; i++)
- rtd->codec_dais[i]->playback_active++;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ codec_dai->playback_active++;
} else {
cpu_dai->capture_active++;
- for (i = 0; i < rtd->num_codecs; i++)
- rtd->codec_dais[i]->capture_active++;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ codec_dai->capture_active++;
}
cpu_dai->active++;
cpu_dai->component->active++;
- for (i = 0; i < rtd->num_codecs; i++) {
- rtd->codec_dais[i]->active++;
- rtd->codec_dais[i]->component->active++;
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ codec_dai->active++;
+ codec_dai->component->active++;
}
}
@@ -94,25 +95,26 @@ void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream)
void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream)
{
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
int i;
lockdep_assert_held(&rtd->pcm_mutex);
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
cpu_dai->playback_active--;
- for (i = 0; i < rtd->num_codecs; i++)
- rtd->codec_dais[i]->playback_active--;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ codec_dai->playback_active--;
} else {
cpu_dai->capture_active--;
- for (i = 0; i < rtd->num_codecs; i++)
- rtd->codec_dais[i]->capture_active--;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ codec_dai->capture_active--;
}
cpu_dai->active--;
cpu_dai->component->active--;
- for (i = 0; i < rtd->num_codecs; i++) {
- rtd->codec_dais[i]->component->active--;
- rtd->codec_dais[i]->active--;
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ codec_dai->component->active--;
+ codec_dai->active--;
}
}
@@ -172,7 +174,7 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
{
struct snd_soc_dpcm *dpcm;
- list_for_each_entry(dpcm, &fe->dpcm[dir].be_clients, list_be) {
+ for_each_dpcm_be(fe, dir, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
@@ -253,6 +255,7 @@ static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
unsigned int rate, channels, sample_bits, symmetry, i;
rate = params_rate(params);
@@ -263,8 +266,8 @@ static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream,
symmetry = cpu_dai->driver->symmetric_rates ||
rtd->dai_link->symmetric_rates;
- for (i = 0; i < rtd->num_codecs; i++)
- symmetry |= rtd->codec_dais[i]->driver->symmetric_rates;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ symmetry |= codec_dai->driver->symmetric_rates;
if (symmetry && cpu_dai->rate && cpu_dai->rate != rate) {
dev_err(rtd->dev, "ASoC: unmatched rate symmetry: %d - %d\n",
@@ -275,8 +278,8 @@ static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream,
symmetry = cpu_dai->driver->symmetric_channels ||
rtd->dai_link->symmetric_channels;
- for (i = 0; i < rtd->num_codecs; i++)
- symmetry |= rtd->codec_dais[i]->driver->symmetric_channels;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ symmetry |= codec_dai->driver->symmetric_channels;
if (symmetry && cpu_dai->channels && cpu_dai->channels != channels) {
dev_err(rtd->dev, "ASoC: unmatched channel symmetry: %d - %d\n",
@@ -287,8 +290,8 @@ static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream,
symmetry = cpu_dai->driver->symmetric_samplebits ||
rtd->dai_link->symmetric_samplebits;
- for (i = 0; i < rtd->num_codecs; i++)
- symmetry |= rtd->codec_dais[i]->driver->symmetric_samplebits;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ symmetry |= codec_dai->driver->symmetric_samplebits;
if (symmetry && cpu_dai->sample_bits && cpu_dai->sample_bits != sample_bits) {
dev_err(rtd->dev, "ASoC: unmatched sample bits symmetry: %d - %d\n",
@@ -304,17 +307,18 @@ static bool soc_pcm_has_symmetry(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai_driver *cpu_driver = rtd->cpu_dai->driver;
struct snd_soc_dai_link *link = rtd->dai_link;
+ struct snd_soc_dai *codec_dai;
unsigned int symmetry, i;
symmetry = cpu_driver->symmetric_rates || link->symmetric_rates ||
cpu_driver->symmetric_channels || link->symmetric_channels ||
cpu_driver->symmetric_samplebits || link->symmetric_samplebits;
- for (i = 0; i < rtd->num_codecs; i++)
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
symmetry = symmetry ||
- rtd->codec_dais[i]->driver->symmetric_rates ||
- rtd->codec_dais[i]->driver->symmetric_channels ||
- rtd->codec_dais[i]->driver->symmetric_samplebits;
+ codec_dai->driver->symmetric_rates ||
+ codec_dai->driver->symmetric_channels ||
+ codec_dai->driver->symmetric_samplebits;
return symmetry;
}
@@ -342,8 +346,7 @@ static void soc_pcm_apply_msb(struct snd_pcm_substream *substream)
unsigned int bits = 0, cpu_bits;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->playback.sig_bits == 0) {
bits = 0;
break;
@@ -352,8 +355,7 @@ static void soc_pcm_apply_msb(struct snd_pcm_substream *substream)
}
cpu_bits = cpu_dai->driver->playback.sig_bits;
} else {
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->capture.sig_bits == 0) {
bits = 0;
break;
@@ -372,6 +374,7 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hardware *hw = &runtime->hw;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
struct snd_soc_dai_driver *cpu_dai_drv = rtd->cpu_dai->driver;
struct snd_soc_dai_driver *codec_dai_drv;
struct snd_soc_pcm_stream *codec_stream;
@@ -388,7 +391,7 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream)
cpu_stream = &cpu_dai_drv->capture;
/* first calculate min/max only for CODECs in the DAI link */
- for (i = 0; i < rtd->num_codecs; i++) {
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
/*
* Skip CODECs which don't support the current stream type.
@@ -399,11 +402,11 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream)
* bailed out on a higher level, since there would be no
* CODEC to support the transfer direction in that case.
*/
- if (!snd_soc_dai_stream_valid(rtd->codec_dais[i],
+ if (!snd_soc_dai_stream_valid(codec_dai,
substream->stream))
continue;
- codec_dai_drv = rtd->codec_dais[i]->driver;
+ codec_dai_drv = codec_dai->driver;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
codec_stream = &codec_dai_drv->playback;
else
@@ -482,8 +485,8 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
int i, ret = 0;
pinctrl_pm_select_default_state(cpu_dai->dev);
- for (i = 0; i < rtd->num_codecs; i++)
- pinctrl_pm_select_default_state(rtd->codec_dais[i]->dev);
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ pinctrl_pm_select_default_state(codec_dai->dev);
for_each_rtdcom(rtd, rtdcom) {
component = rtdcom->component;
@@ -520,8 +523,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
}
component = NULL;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->startup) {
ret = codec_dai->driver->ops->startup(substream,
codec_dai);
@@ -588,10 +590,9 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
goto config_err;
}
- for (i = 0; i < rtd->num_codecs; i++) {
- if (rtd->codec_dais[i]->active) {
- ret = soc_pcm_apply_symmetry(substream,
- rtd->codec_dais[i]);
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ if (codec_dai->active) {
+ ret = soc_pcm_apply_symmetry(substream, codec_dai);
if (ret != 0)
goto config_err;
}
@@ -620,8 +621,7 @@ machine_err:
i = rtd->num_codecs;
codec_dai_err:
- while (--i >= 0) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai_rollback(rtd, i, codec_dai) {
if (codec_dai->driver->ops->shutdown)
codec_dai->driver->ops->shutdown(substream, codec_dai);
}
@@ -641,9 +641,9 @@ out:
pm_runtime_put_autosuspend(component->dev);
}
- for (i = 0; i < rtd->num_codecs; i++) {
- if (!rtd->codec_dais[i]->active)
- pinctrl_pm_select_sleep_state(rtd->codec_dais[i]->dev);
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ if (!codec_dai->active)
+ pinctrl_pm_select_sleep_state(codec_dai->dev);
}
if (!cpu_dai->active)
pinctrl_pm_select_sleep_state(cpu_dai->dev);
@@ -701,8 +701,7 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
if (!cpu_dai->active)
cpu_dai->rate = 0;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (!codec_dai->active)
codec_dai->rate = 0;
}
@@ -712,8 +711,7 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
if (cpu_dai->driver->ops->shutdown)
cpu_dai->driver->ops->shutdown(substream, cpu_dai);
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->shutdown)
codec_dai->driver->ops->shutdown(substream, codec_dai);
}
@@ -751,9 +749,9 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
pm_runtime_put_autosuspend(component->dev);
}
- for (i = 0; i < rtd->num_codecs; i++) {
- if (!rtd->codec_dais[i]->active)
- pinctrl_pm_select_sleep_state(rtd->codec_dais[i]->dev);
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ if (!codec_dai->active)
+ pinctrl_pm_select_sleep_state(codec_dai->dev);
}
if (!cpu_dai->active)
pinctrl_pm_select_sleep_state(cpu_dai->dev);
@@ -801,8 +799,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
}
}
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->prepare) {
ret = codec_dai->driver->ops->prepare(substream,
codec_dai);
@@ -834,8 +831,8 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
snd_soc_dapm_stream_event(rtd, substream->stream,
SND_SOC_DAPM_STREAM_START);
- for (i = 0; i < rtd->num_codecs; i++)
- snd_soc_dai_digital_mute(rtd->codec_dais[i], 0,
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ snd_soc_dai_digital_mute(codec_dai, 0,
substream->stream);
snd_soc_dai_digital_mute(cpu_dai, 0, substream->stream);
@@ -920,6 +917,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
int i, ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -932,8 +930,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
}
}
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
struct snd_pcm_hw_params codec_params;
/*
@@ -1018,8 +1015,7 @@ interface_err:
i = rtd->num_codecs;
codec_err:
- while (--i >= 0) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai_rollback(rtd, i, codec_dai) {
if (codec_dai->driver->ops->hw_free)
codec_dai->driver->ops->hw_free(substream, codec_dai);
codec_dai->rate = 0;
@@ -1052,8 +1048,7 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
cpu_dai->sample_bits = 0;
}
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->active == 1) {
codec_dai->rate = 0;
codec_dai->channels = 0;
@@ -1062,10 +1057,10 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
}
/* apply codec digital mute */
- for (i = 0; i < rtd->num_codecs; i++) {
- if ((playback && rtd->codec_dais[i]->playback_active == 1) ||
- (!playback && rtd->codec_dais[i]->capture_active == 1))
- snd_soc_dai_digital_mute(rtd->codec_dais[i], 1,
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ if ((playback && codec_dai->playback_active == 1) ||
+ (!playback && codec_dai->capture_active == 1))
+ snd_soc_dai_digital_mute(codec_dai, 1,
substream->stream);
}
@@ -1077,8 +1072,7 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
soc_pcm_components_hw_free(substream, NULL);
/* now free hw params for the DAIs */
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->hw_free)
codec_dai->driver->ops->hw_free(substream, codec_dai);
}
@@ -1099,8 +1093,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
struct snd_soc_dai *codec_dai;
int i, ret;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->trigger) {
ret = codec_dai->driver->ops->trigger(substream,
cmd, codec_dai);
@@ -1144,8 +1137,7 @@ static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai;
int i, ret;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->bespoke_trigger) {
ret = codec_dai->driver->ops->bespoke_trigger(substream,
cmd, codec_dai);
@@ -1199,8 +1191,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
if (cpu_dai->driver->ops->delay)
delay += cpu_dai->driver->ops->delay(substream, cpu_dai);
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->delay)
codec_delay = max(codec_delay,
codec_dai->driver->ops->delay(substream,
@@ -1220,7 +1211,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
struct snd_soc_dpcm *dpcm;
/* only add new dpcms */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
if (dpcm->be == be && dpcm->fe == fe)
return 0;
}
@@ -1261,7 +1252,7 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
be_substream = snd_soc_dpcm_get_substream(be, stream);
- list_for_each_entry(dpcm, &be->dpcm[stream].fe_clients, list_fe) {
+ for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
continue;
@@ -1281,7 +1272,7 @@ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm, *d;
- list_for_each_entry_safe(dpcm, d, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be_safe(fe, stream, dpcm, d) {
dev_dbg(fe->dev, "ASoC: BE %s disconnect check for %s\n",
stream ? "capture" : "playback",
dpcm->be->dai_link->name);
@@ -1310,12 +1301,13 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
struct snd_soc_dapm_widget *widget, int stream)
{
struct snd_soc_pcm_runtime *be;
+ struct snd_soc_dai *dai;
int i;
dev_dbg(card->dev, "ASoC: find BE for widget %s\n", widget->name);
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- list_for_each_entry(be, &card->rtd_list, list) {
+ for_each_card_rtds(card, be) {
if (!be->dai_link->no_pcm)
continue;
@@ -1327,15 +1319,14 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
if (be->cpu_dai->playback_widget == widget)
return be;
- for (i = 0; i < be->num_codecs; i++) {
- struct snd_soc_dai *dai = be->codec_dais[i];
+ for_each_rtd_codec_dai(be, i, dai) {
if (dai->playback_widget == widget)
return be;
}
}
} else {
- list_for_each_entry(be, &card->rtd_list, list) {
+ for_each_card_rtds(card, be) {
if (!be->dai_link->no_pcm)
continue;
@@ -1347,8 +1338,7 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
if (be->cpu_dai->capture_widget == widget)
return be;
- for (i = 0; i < be->num_codecs; i++) {
- struct snd_soc_dai *dai = be->codec_dais[i];
+ for_each_rtd_codec_dai(be, i, dai) {
if (dai->capture_widget == widget)
return be;
}
@@ -1388,32 +1378,31 @@ static bool dpcm_end_walk_at_be(struct snd_soc_dapm_widget *widget,
{
struct snd_soc_card *card = widget->dapm->card;
struct snd_soc_pcm_runtime *rtd;
+ struct snd_soc_dai *dai;
int i;
if (dir == SND_SOC_DAPM_DIR_OUT) {
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (!rtd->dai_link->no_pcm)
continue;
if (rtd->cpu_dai->playback_widget == widget)
return true;
- for (i = 0; i < rtd->num_codecs; ++i) {
- struct snd_soc_dai *dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, dai) {
if (dai->playback_widget == widget)
return true;
}
}
} else { /* SND_SOC_DAPM_DIR_IN */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (!rtd->dai_link->no_pcm)
continue;
if (rtd->cpu_dai->capture_widget == widget)
return true;
- for (i = 0; i < rtd->num_codecs; ++i) {
- struct snd_soc_dai *dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, dai) {
if (dai->capture_widget == widget)
return true;
}
@@ -1445,10 +1434,11 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
struct snd_soc_dpcm *dpcm;
struct snd_soc_dapm_widget_list *list = *list_;
struct snd_soc_dapm_widget *widget;
+ struct snd_soc_dai *dai;
int prune = 0;
/* Destroy any old FE <--> BE connections */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
unsigned int i;
/* is there a valid CPU DAI widget for this BE */
@@ -1459,8 +1449,7 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
continue;
/* is there a valid CODEC DAI widget for this BE */
- for (i = 0; i < dpcm->be->num_codecs; i++) {
- struct snd_soc_dai *dai = dpcm->be->codec_dais[i];
+ for_each_rtd_codec_dai(dpcm->be, i, dai) {
widget = dai_get_widget(dai, stream);
/* prune the BE if it's no longer in our active list */
@@ -1555,7 +1544,7 @@ void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ for_each_dpcm_be(fe, stream, dpcm)
dpcm->be->dpcm[stream].runtime_update =
SND_SOC_DPCM_UPDATE_NO;
}
@@ -1566,7 +1555,7 @@ static void dpcm_be_dai_startup_unwind(struct snd_soc_pcm_runtime *fe,
struct snd_soc_dpcm *dpcm;
/* disable any enabled and non active backends */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -1595,7 +1584,7 @@ int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
int err, count = 0;
/* only startup BE DAIs that are either sinks or sources to this FE DAI */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -1649,7 +1638,7 @@ int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
unwind:
/* disable any enabled and non active backends */
- list_for_each_entry_continue_reverse(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be_rollback(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
@@ -1680,7 +1669,7 @@ static void dpcm_init_runtime_hw(struct snd_pcm_runtime *runtime,
struct snd_soc_pcm_stream *stream)
{
runtime->hw.rate_min = stream->rate_min;
- runtime->hw.rate_max = stream->rate_max;
+ runtime->hw.rate_max = min_not_zero(stream->rate_max, UINT_MAX);
runtime->hw.channels_min = stream->channels_min;
runtime->hw.channels_max = stream->channels_max;
if (runtime->hw.formats)
@@ -1695,6 +1684,7 @@ static void dpcm_runtime_merge_format(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *fe = substream->private_data;
struct snd_soc_dpcm *dpcm;
+ struct snd_soc_dai *dai;
int stream = substream->stream;
if (!fe->dai_link->dpcm_merged_format)
@@ -1705,22 +1695,21 @@ static void dpcm_runtime_merge_format(struct snd_pcm_substream *substream,
* if FE want to use it (= dpcm_merged_format)
*/
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_soc_dai_driver *codec_dai_drv;
struct snd_soc_pcm_stream *codec_stream;
int i;
- for (i = 0; i < be->num_codecs; i++) {
+ for_each_rtd_codec_dai(be, i, dai) {
/*
* Skip CODECs which don't support the current stream
* type. See soc_pcm_init_runtime_hw() for more details
*/
- if (!snd_soc_dai_stream_valid(be->codec_dais[i],
- stream))
+ if (!snd_soc_dai_stream_valid(dai, stream))
continue;
- codec_dai_drv = be->codec_dais[i]->driver;
+ codec_dai_drv = dai->driver;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
codec_stream = &codec_dai_drv->playback;
else
@@ -1747,7 +1736,7 @@ static void dpcm_runtime_merge_chan(struct snd_pcm_substream *substream,
* if FE want to use it (= dpcm_merged_chan)
*/
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_soc_dai_driver *cpu_dai_drv = be->cpu_dai->driver;
struct snd_soc_dai_driver *codec_dai_drv;
@@ -1799,12 +1788,13 @@ static void dpcm_runtime_merge_rate(struct snd_pcm_substream *substream,
* if FE want to use it (= dpcm_merged_chan)
*/
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_soc_dai_driver *cpu_dai_drv = be->cpu_dai->driver;
struct snd_soc_dai_driver *codec_dai_drv;
struct snd_soc_pcm_stream *codec_stream;
struct snd_soc_pcm_stream *cpu_stream;
+ struct snd_soc_dai *dai;
int i;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -1816,16 +1806,15 @@ static void dpcm_runtime_merge_rate(struct snd_pcm_substream *substream,
*rate_max = min_not_zero(*rate_max, cpu_stream->rate_max);
*rates = snd_pcm_rate_mask_intersect(*rates, cpu_stream->rates);
- for (i = 0; i < be->num_codecs; i++) {
+ for_each_rtd_codec_dai(be, i, dai) {
/*
* Skip CODECs which don't support the current stream
* type. See soc_pcm_init_runtime_hw() for more details
*/
- if (!snd_soc_dai_stream_valid(be->codec_dais[i],
- stream))
+ if (!snd_soc_dai_stream_valid(dai, stream))
continue;
- codec_dai_drv = be->codec_dais[i]->driver;
+ codec_dai_drv = dai->driver;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
codec_stream = &codec_dai_drv->playback;
else
@@ -1902,11 +1891,12 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
}
/* apply symmetry for BE */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
struct snd_soc_pcm_runtime *rtd = be_substream->private_data;
+ struct snd_soc_dai *codec_dai;
int i;
if (rtd->dai_link->be_hw_params_fixup)
@@ -1923,10 +1913,10 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
return err;
}
- for (i = 0; i < rtd->num_codecs; i++) {
- if (rtd->codec_dais[i]->active) {
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ if (codec_dai->active) {
err = soc_pcm_apply_symmetry(fe_substream,
- rtd->codec_dais[i]);
+ codec_dai);
if (err < 0)
return err;
}
@@ -1986,7 +1976,7 @@ int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
struct snd_soc_dpcm *dpcm;
/* only shutdown BEs that are either sinks or sources to this FE DAI */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -2050,7 +2040,7 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
/* only hw_params backends that are either sinks or sources
* to this frontend DAI */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -2119,7 +2109,7 @@ int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
struct snd_soc_dpcm *dpcm;
int ret;
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -2170,7 +2160,7 @@ int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
unwind:
/* disable any enabled and non active backends */
- list_for_each_entry_continue_reverse(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be_rollback(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
@@ -2250,7 +2240,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
struct snd_soc_dpcm *dpcm;
int ret = 0;
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -2436,7 +2426,7 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
struct snd_soc_dpcm *dpcm;
int ret = 0;
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -2646,7 +2636,7 @@ close:
dpcm_be_dai_shutdown(fe, stream);
disconnect:
/* disconnect any non started BEs */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
@@ -2771,14 +2761,14 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
/* shutdown all old paths first */
- list_for_each_entry(fe, &card->rtd_list, list) {
+ for_each_card_rtds(card, fe) {
ret = soc_dpcm_fe_runtime_update(fe, 0);
if (ret)
goto out;
}
/* bring new paths up */
- list_for_each_entry(fe, &card->rtd_list, list) {
+ for_each_card_rtds(card, fe) {
ret = soc_dpcm_fe_runtime_update(fe, 1);
if (ret)
goto out;
@@ -2791,10 +2781,9 @@ out:
int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute)
{
struct snd_soc_dpcm *dpcm;
- struct list_head *clients =
- &fe->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients;
+ struct snd_soc_dai *dai;
- list_for_each_entry(dpcm, clients, list_be) {
+ for_each_dpcm_be(fe, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
int i;
@@ -2802,8 +2791,7 @@ int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute)
if (be->dai_link->ignore_suspend)
continue;
- for (i = 0; i < be->num_codecs; i++) {
- struct snd_soc_dai *dai = be->codec_dais[i];
+ for_each_rtd_codec_dai(be, i, dai) {
struct snd_soc_dai_driver *drv = dai->driver;
dev_dbg(be->dev, "ASoC: BE digital mute %s\n",
@@ -2844,7 +2832,7 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
ret = dpcm_fe_dai_startup(fe_substream);
if (ret < 0) {
/* clean up all links */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ for_each_dpcm_be(fe, stream, dpcm)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
dpcm_be_disconnect(fe, stream);
@@ -2867,7 +2855,7 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
ret = dpcm_fe_dai_shutdown(fe_substream);
/* mark FE's links ready to prune */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ for_each_dpcm_be(fe, stream, dpcm)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
dpcm_be_disconnect(fe, stream);
@@ -3041,8 +3029,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
playback = rtd->dai_link->dpcm_playback;
capture = rtd->dai_link->dpcm_capture;
} else {
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->playback.channels_min)
playback = 1;
if (codec_dai->driver->capture.channels_min)
@@ -3230,7 +3217,7 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
struct snd_soc_dpcm *dpcm;
int state;
- list_for_each_entry(dpcm, &be->dpcm[stream].fe_clients, list_fe) {
+ for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
continue;
@@ -3257,7 +3244,7 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
struct snd_soc_dpcm *dpcm;
int state;
- list_for_each_entry(dpcm, &be->dpcm[stream].fe_clients, list_fe) {
+ for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
continue;
@@ -3337,7 +3324,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
goto out;
}
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
params = &dpcm->hw_params;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 66e77e020745..045ef136903d 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -993,7 +993,7 @@ static int soc_tplg_denum_create(struct soc_tplg *tplg, unsigned int count,
kfree(se);
continue;
}
- /* fall through and create texts */
+ /* fall through */
case SND_SOC_TPLG_CTL_ENUM:
case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
@@ -1310,7 +1310,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
ec->hdr.name);
goto err_se;
}
- /* fall through to create texts */
+ /* fall through */
case SND_SOC_TPLG_CTL_ENUM:
case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
@@ -1565,17 +1565,6 @@ widget:
widget = snd_soc_dapm_new_control_unlocked(dapm, &template);
if (IS_ERR(widget)) {
ret = PTR_ERR(widget);
- /* Do not nag about probe deferrals */
- if (ret != -EPROBE_DEFER)
- dev_err(tplg->dev,
- "ASoC: failed to create widget %s controls (%d)\n",
- w->name, ret);
- goto hdr_err;
- }
- if (widget == NULL) {
- dev_err(tplg->dev, "ASoC: failed to create widget %s controls\n",
- w->name);
- ret = -ENOMEM;
goto hdr_err;
}
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index e0c93496c0cd..e3b9dd634c6d 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -273,13 +273,13 @@ static int dummy_dma_open(struct snd_pcm_substream *substream)
return 0;
}
-static const struct snd_pcm_ops dummy_dma_ops = {
+static const struct snd_pcm_ops snd_dummy_dma_ops = {
.open = dummy_dma_open,
.ioctl = snd_pcm_lib_ioctl,
};
static const struct snd_soc_component_driver dummy_platform = {
- .ops = &dummy_dma_ops,
+ .ops = &snd_dummy_dma_ops,
};
static const struct snd_soc_component_driver dummy_codec = {
diff --git a/sound/soc/stm/Kconfig b/sound/soc/stm/Kconfig
index 9b2681397dba..c66ffa72057e 100644
--- a/sound/soc/stm/Kconfig
+++ b/sound/soc/stm/Kconfig
@@ -3,6 +3,7 @@ menu "STMicroelectronics STM32 SOC audio support"
config SND_SOC_STM32_SAI
tristate "STM32 SAI interface (Serial Audio Interface) support"
depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ depends on COMMON_CLK
depends on SND_SOC
select SND_SOC_GENERIC_DMAENGINE_PCM
select REGMAP_MMIO
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index f22654253c43..d597eba61992 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -104,7 +104,7 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
if (!pdev) {
dev_err(&sai_client->pdev->dev,
- "Device not found for node %s\n", np_provider->name);
+ "Device not found for node %pOFn\n", np_provider);
return -ENODEV;
}
diff --git a/sound/soc/stm/stm32_sai.h b/sound/soc/stm/stm32_sai.h
index f25422174909..08de899c766b 100644
--- a/sound/soc/stm/stm32_sai.h
+++ b/sound/soc/stm/stm32_sai.h
@@ -91,6 +91,9 @@
#define SAI_XCR1_OSR_SHIFT 26
#define SAI_XCR1_OSR BIT(SAI_XCR1_OSR_SHIFT)
+#define SAI_XCR1_MCKEN_SHIFT 27
+#define SAI_XCR1_MCKEN BIT(SAI_XCR1_MCKEN_SHIFT)
+
/******************* Bit definition for SAI_XCR2 register *******************/
#define SAI_XCR2_FTH_SHIFT 0
#define SAI_XCR2_FTH_MASK GENMASK(2, SAI_XCR2_FTH_SHIFT)
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index 06fba9650ac4..ea05cc91aa05 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_irq.h>
@@ -68,6 +69,8 @@
#define SAI_IEC60958_BLOCK_FRAMES 192
#define SAI_IEC60958_STATUS_BYTES 24
+#define SAI_MCLK_NAME_LEN 32
+
/**
* struct stm32_sai_sub_data - private data of SAI sub block (block A or B)
* @pdev: device data pointer
@@ -80,6 +83,7 @@
* @pdata: SAI block parent data pointer
* @np_sync_provider: synchronization provider node
* @sai_ck: kernel clock feeding the SAI clock generator
+ * @sai_mclk: master clock from SAI mclk provider
* @phys_addr: SAI registers physical base address
* @mclk_rate: SAI block master clock frequency (Hz). set at init
* @id: SAI sub block id corresponding to sub-block A or B
@@ -110,6 +114,7 @@ struct stm32_sai_sub_data {
struct stm32_sai_data *pdata;
struct device_node *np_sync_provider;
struct clk *sai_ck;
+ struct clk *sai_mclk;
dma_addr_t phys_addr;
unsigned int mclk_rate;
unsigned int id;
@@ -251,6 +256,176 @@ static const struct snd_kcontrol_new iec958_ctls = {
.put = snd_pcm_iec958_put,
};
+struct stm32_sai_mclk_data {
+ struct clk_hw hw;
+ unsigned long freq;
+ struct stm32_sai_sub_data *sai_data;
+};
+
+#define to_mclk_data(_hw) container_of(_hw, struct stm32_sai_mclk_data, hw)
+#define STM32_SAI_MAX_CLKS 1
+
+static int stm32_sai_get_clk_div(struct stm32_sai_sub_data *sai,
+ unsigned long input_rate,
+ unsigned long output_rate)
+{
+ int version = sai->pdata->conf->version;
+ int div;
+
+ div = DIV_ROUND_CLOSEST(input_rate, output_rate);
+ if (div > SAI_XCR1_MCKDIV_MAX(version)) {
+ dev_err(&sai->pdev->dev, "Divider %d out of range\n", div);
+ return -EINVAL;
+ }
+ dev_dbg(&sai->pdev->dev, "SAI divider %d\n", div);
+
+ if (input_rate % div)
+ dev_dbg(&sai->pdev->dev,
+ "Rate not accurate. requested (%ld), actual (%ld)\n",
+ output_rate, input_rate / div);
+
+ return div;
+}
+
+static int stm32_sai_set_clk_div(struct stm32_sai_sub_data *sai,
+ unsigned int div)
+{
+ int version = sai->pdata->conf->version;
+ int ret, cr1, mask;
+
+ if (div > SAI_XCR1_MCKDIV_MAX(version)) {
+ dev_err(&sai->pdev->dev, "Divider %d out of range\n", div);
+ return -EINVAL;
+ }
+
+ mask = SAI_XCR1_MCKDIV_MASK(SAI_XCR1_MCKDIV_WIDTH(version));
+ cr1 = SAI_XCR1_MCKDIV_SET(div);
+ ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, mask, cr1);
+ if (ret < 0)
+ dev_err(&sai->pdev->dev, "Failed to update CR1 register\n");
+
+ return ret;
+}
+
+static long stm32_sai_mclk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct stm32_sai_mclk_data *mclk = to_mclk_data(hw);
+ struct stm32_sai_sub_data *sai = mclk->sai_data;
+ int div;
+
+ div = stm32_sai_get_clk_div(sai, *prate, rate);
+ if (div < 0)
+ return div;
+
+ mclk->freq = *prate / div;
+
+ return mclk->freq;
+}
+
+static unsigned long stm32_sai_mclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct stm32_sai_mclk_data *mclk = to_mclk_data(hw);
+
+ return mclk->freq;
+}
+
+static int stm32_sai_mclk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct stm32_sai_mclk_data *mclk = to_mclk_data(hw);
+ struct stm32_sai_sub_data *sai = mclk->sai_data;
+ unsigned int div;
+ int ret;
+
+ div = stm32_sai_get_clk_div(sai, parent_rate, rate);
+ if (div < 0)
+ return div;
+
+ ret = stm32_sai_set_clk_div(sai, div);
+ if (ret)
+ return ret;
+
+ mclk->freq = rate;
+
+ return 0;
+}
+
+static int stm32_sai_mclk_enable(struct clk_hw *hw)
+{
+ struct stm32_sai_mclk_data *mclk = to_mclk_data(hw);
+ struct stm32_sai_sub_data *sai = mclk->sai_data;
+
+ dev_dbg(&sai->pdev->dev, "Enable master clock\n");
+
+ return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
+ SAI_XCR1_MCKEN, SAI_XCR1_MCKEN);
+}
+
+static void stm32_sai_mclk_disable(struct clk_hw *hw)
+{
+ struct stm32_sai_mclk_data *mclk = to_mclk_data(hw);
+ struct stm32_sai_sub_data *sai = mclk->sai_data;
+
+ dev_dbg(&sai->pdev->dev, "Disable master clock\n");
+
+ regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_MCKEN, 0);
+}
+
+static const struct clk_ops mclk_ops = {
+ .enable = stm32_sai_mclk_enable,
+ .disable = stm32_sai_mclk_disable,
+ .recalc_rate = stm32_sai_mclk_recalc_rate,
+ .round_rate = stm32_sai_mclk_round_rate,
+ .set_rate = stm32_sai_mclk_set_rate,
+};
+
+static int stm32_sai_add_mclk_provider(struct stm32_sai_sub_data *sai)
+{
+ struct clk_hw *hw;
+ struct stm32_sai_mclk_data *mclk;
+ struct device *dev = &sai->pdev->dev;
+ const char *pname = __clk_get_name(sai->sai_ck);
+ char *mclk_name, *p, *s = (char *)pname;
+ int ret, i = 0;
+
+ mclk = devm_kzalloc(dev, sizeof(mclk), GFP_KERNEL);
+ if (!mclk)
+ return -ENOMEM;
+
+ mclk_name = devm_kcalloc(dev, sizeof(char),
+ SAI_MCLK_NAME_LEN, GFP_KERNEL);
+ if (!mclk_name)
+ return -ENOMEM;
+
+ /*
+ * Forge mclk clock name from parent clock name and suffix.
+ * String after "_" char is stripped in parent name.
+ */
+ p = mclk_name;
+ while (*s && *s != '_' && (i < (SAI_MCLK_NAME_LEN - 7))) {
+ *p++ = *s++;
+ i++;
+ }
+ STM_SAI_IS_SUB_A(sai) ? strcat(p, "a_mclk") : strcat(p, "b_mclk");
+
+ mclk->hw.init = CLK_HW_INIT(mclk_name, pname, &mclk_ops, 0);
+ mclk->sai_data = sai;
+ hw = &mclk->hw;
+
+ dev_dbg(dev, "Register master clock %s\n", mclk_name);
+ ret = devm_clk_hw_register(&sai->pdev->dev, hw);
+ if (ret) {
+ dev_err(dev, "mclk register returned %d\n", ret);
+ return ret;
+ }
+ sai->sai_mclk = hw->clk;
+
+ /* register mclk provider */
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
static irqreturn_t stm32_sai_isr(int irq, void *devid)
{
struct stm32_sai_sub_data *sai = (struct stm32_sai_sub_data *)devid;
@@ -312,15 +487,25 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
int ret;
- if ((dir == SND_SOC_CLOCK_OUT) && sai->master) {
+ if (dir == SND_SOC_CLOCK_OUT) {
ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
SAI_XCR1_NODIV,
(unsigned int)~SAI_XCR1_NODIV);
if (ret < 0)
return ret;
- sai->mclk_rate = freq;
dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq);
+ sai->mclk_rate = freq;
+
+ if (sai->sai_mclk) {
+ ret = clk_set_rate_exclusive(sai->sai_mclk,
+ sai->mclk_rate);
+ if (ret) {
+ dev_err(cpu_dai->dev,
+ "Could not set mclk rate\n");
+ return ret;
+ }
+ }
}
return 0;
@@ -715,15 +900,9 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
{
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
int cr1, mask, div = 0;
- int sai_clk_rate, mclk_ratio, den, ret;
- int version = sai->pdata->conf->version;
+ int sai_clk_rate, mclk_ratio, den;
unsigned int rate = params_rate(params);
- if (!sai->mclk_rate) {
- dev_err(cpu_dai->dev, "Mclk rate is null\n");
- return -EINVAL;
- }
-
if (!(rate % 11025))
clk_set_parent(sai->sai_ck, sai->pdata->clk_x11k);
else
@@ -731,14 +910,22 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
sai_clk_rate = clk_get_rate(sai->sai_ck);
if (STM_SAI_IS_F4(sai->pdata)) {
- /*
- * mclk_rate = 256 * fs
- * MCKDIV = 0 if sai_ck < 3/2 * mclk_rate
- * MCKDIV = sai_ck / (2 * mclk_rate) otherwise
+ /* mclk on (NODIV=0)
+ * mclk_rate = 256 * fs
+ * MCKDIV = 0 if sai_ck < 3/2 * mclk_rate
+ * MCKDIV = sai_ck / (2 * mclk_rate) otherwise
+ * mclk off (NODIV=1)
+ * MCKDIV ignored. sck = sai_ck
*/
- if (2 * sai_clk_rate >= 3 * sai->mclk_rate)
- div = DIV_ROUND_CLOSEST(sai_clk_rate,
- 2 * sai->mclk_rate);
+ if (!sai->mclk_rate)
+ return 0;
+
+ if (2 * sai_clk_rate >= 3 * sai->mclk_rate) {
+ div = stm32_sai_get_clk_div(sai, sai_clk_rate,
+ 2 * sai->mclk_rate);
+ if (div < 0)
+ return div;
+ }
} else {
/*
* TDM mode :
@@ -750,8 +937,10 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
* Note: NOMCK/NODIV correspond to same bit.
*/
if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
- div = DIV_ROUND_CLOSEST(sai_clk_rate,
- (params_rate(params) * 128));
+ div = stm32_sai_get_clk_div(sai, sai_clk_rate,
+ rate * 128);
+ if (div < 0)
+ return div;
} else {
if (sai->mclk_rate) {
mclk_ratio = sai->mclk_rate / rate;
@@ -764,31 +953,22 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
mclk_ratio);
return -EINVAL;
}
- div = DIV_ROUND_CLOSEST(sai_clk_rate,
- sai->mclk_rate);
+ div = stm32_sai_get_clk_div(sai, sai_clk_rate,
+ sai->mclk_rate);
+ if (div < 0)
+ return div;
} else {
/* mclk-fs not set, master clock not active */
den = sai->fs_length * params_rate(params);
- div = DIV_ROUND_CLOSEST(sai_clk_rate, den);
+ div = stm32_sai_get_clk_div(sai, sai_clk_rate,
+ den);
+ if (div < 0)
+ return div;
}
}
}
- if (div > SAI_XCR1_MCKDIV_MAX(version)) {
- dev_err(cpu_dai->dev, "Divider %d out of range\n", div);
- return -EINVAL;
- }
- dev_dbg(cpu_dai->dev, "SAI clock %d, divider %d\n", sai_clk_rate, div);
-
- mask = SAI_XCR1_MCKDIV_MASK(SAI_XCR1_MCKDIV_WIDTH(version));
- cr1 = SAI_XCR1_MCKDIV_SET(div);
- ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, mask, cr1);
- if (ret < 0) {
- dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
- return ret;
- }
-
- return 0;
+ return stm32_sai_set_clk_div(sai, div);
}
static int stm32_sai_hw_params(struct snd_pcm_substream *substream,
@@ -881,6 +1061,9 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
SAI_XCR1_NODIV);
clk_disable_unprepare(sai->sai_ck);
+
+ clk_rate_exclusive_put(sai->sai_mclk);
+
sai->substream = NULL;
}
@@ -903,6 +1086,8 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
int cr1 = 0, cr1_mask;
+ sai->cpu_dai = cpu_dai;
+
sai->dma_params.addr = (dma_addr_t)(sai->phys_addr + STM_SAI_DR_REGX);
/*
* DMA supports 4, 8 or 16 burst sizes. Burst size 4 is the best choice,
@@ -1124,16 +1309,15 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
sai->sync = SAI_SYNC_NONE;
if (args.np) {
if (args.np == np) {
- dev_err(&pdev->dev, "%s sync own reference\n",
- np->name);
+ dev_err(&pdev->dev, "%pOFn sync own reference\n", np);
of_node_put(args.np);
return -EINVAL;
}
sai->np_sync_provider = of_get_parent(args.np);
if (!sai->np_sync_provider) {
- dev_err(&pdev->dev, "%s parent node not found\n",
- np->name);
+ dev_err(&pdev->dev, "%pOFn parent node not found\n",
+ np);
of_node_put(args.np);
return -ENODEV;
}
@@ -1182,6 +1366,23 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
return PTR_ERR(sai->sai_ck);
}
+ if (STM_SAI_IS_F4(sai->pdata))
+ return 0;
+
+ /* Register mclk provider if requested */
+ if (of_find_property(np, "#clock-cells", NULL)) {
+ ret = stm32_sai_add_mclk_provider(sai);
+ if (ret < 0)
+ return ret;
+ } else {
+ sai->sai_mclk = devm_clk_get(&pdev->dev, "MCLK");
+ if (IS_ERR(sai->sai_mclk)) {
+ if (PTR_ERR(sai->sai_mclk) != -ENOENT)
+ return PTR_ERR(sai->sai_mclk);
+ sai->sai_mclk = NULL;
+ }
+ }
+
return 0;
}
diff --git a/sound/soc/sunxi/Kconfig b/sound/soc/sunxi/Kconfig
index 22408bc2d6ec..66aad0d3f9c7 100644
--- a/sound/soc/sunxi/Kconfig
+++ b/sound/soc/sunxi/Kconfig
@@ -12,7 +12,7 @@ config SND_SUN4I_CODEC
config SND_SUN8I_CODEC
tristate "Allwinner SUN8I audio codec"
depends on OF
- depends on MACH_SUN8I || COMPILE_TEST
+ depends on MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
select REGMAP_MMIO
help
This option enables the digital part of the internal audio codec for
@@ -23,11 +23,19 @@ config SND_SUN8I_CODEC
config SND_SUN8I_CODEC_ANALOG
tristate "Allwinner sun8i Codec Analog Controls Support"
depends on MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
- select REGMAP
+ select SND_SUN8I_ADDA_PR_REGMAP
help
Say Y or M if you want to add support for the analog controls for
the codec embedded in newer Allwinner SoCs.
+config SND_SUN50I_CODEC_ANALOG
+ tristate "Allwinner sun50i Codec Analog Controls Support"
+ depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+ select SND_SUNXI_ADDA_PR_REGMAP
+ help
+ Say Y or M if you want to add support for the analog controls for
+ the codec embedded in Allwinner A64 SoC.
+
config SND_SUN4I_I2S
tristate "Allwinner A10 I2S Support"
select SND_SOC_GENERIC_DMAENGINE_PCM
@@ -45,4 +53,9 @@ config SND_SUN4I_SPDIF
help
Say Y or M to add support for the S/PDIF audio block in the Allwinner
A10 and affiliated SoCs.
+
+config SND_SUN8I_ADDA_PR_REGMAP
+ tristate
+ select REGMAP
+
endmenu
diff --git a/sound/soc/sunxi/Makefile b/sound/soc/sunxi/Makefile
index 4a9ef67386ca..a86be340a076 100644
--- a/sound/soc/sunxi/Makefile
+++ b/sound/soc/sunxi/Makefile
@@ -3,4 +3,6 @@ obj-$(CONFIG_SND_SUN4I_CODEC) += sun4i-codec.o
obj-$(CONFIG_SND_SUN4I_I2S) += sun4i-i2s.o
obj-$(CONFIG_SND_SUN4I_SPDIF) += sun4i-spdif.o
obj-$(CONFIG_SND_SUN8I_CODEC_ANALOG) += sun8i-codec-analog.o
+obj-$(CONFIG_SND_SUN50I_CODEC_ANALOG) += sun50i-codec-analog.o
obj-$(CONFIG_SND_SUN8I_CODEC) += sun8i-codec.o
+obj-$(CONFIG_SND_SUN8I_ADDA_PR_REGMAP) += sun8i-adda-pr-regmap.o
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index a4aa931ebfae..d5ec1a20499d 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -644,40 +644,6 @@ static int sun4i_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
return 0;
}
-static int sun4i_i2s_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
-
- /* Enable the whole hardware block */
- regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
- SUN4I_I2S_CTRL_GL_EN, SUN4I_I2S_CTRL_GL_EN);
-
- /* Enable the first output line */
- regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
- SUN4I_I2S_CTRL_SDO_EN_MASK,
- SUN4I_I2S_CTRL_SDO_EN(0));
-
-
- return clk_prepare_enable(i2s->mod_clk);
-}
-
-static void sun4i_i2s_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
-
- clk_disable_unprepare(i2s->mod_clk);
-
- /* Disable our output lines */
- regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
- SUN4I_I2S_CTRL_SDO_EN_MASK, 0);
-
- /* Disable the whole hardware block */
- regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
- SUN4I_I2S_CTRL_GL_EN, 0);
-}
-
static int sun4i_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
@@ -695,8 +661,6 @@ static const struct snd_soc_dai_ops sun4i_i2s_dai_ops = {
.hw_params = sun4i_i2s_hw_params,
.set_fmt = sun4i_i2s_set_fmt,
.set_sysclk = sun4i_i2s_set_sysclk,
- .shutdown = sun4i_i2s_shutdown,
- .startup = sun4i_i2s_startup,
.trigger = sun4i_i2s_trigger,
};
@@ -869,6 +833,21 @@ static int sun4i_i2s_runtime_resume(struct device *dev)
goto err_disable_clk;
}
+ /* Enable the whole hardware block */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_GL_EN, SUN4I_I2S_CTRL_GL_EN);
+
+ /* Enable the first output line */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_SDO_EN_MASK,
+ SUN4I_I2S_CTRL_SDO_EN(0));
+
+ ret = clk_prepare_enable(i2s->mod_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable module clock\n");
+ goto err_disable_clk;
+ }
+
return 0;
err_disable_clk:
@@ -880,6 +859,16 @@ static int sun4i_i2s_runtime_suspend(struct device *dev)
{
struct sun4i_i2s *i2s = dev_get_drvdata(dev);
+ clk_disable_unprepare(i2s->mod_clk);
+
+ /* Disable our output lines */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_SDO_EN_MASK, 0);
+
+ /* Disable the whole hardware block */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_GL_EN, 0);
+
regcache_cache_only(i2s->regmap, true);
clk_disable_unprepare(i2s->bus_clk);
@@ -961,6 +950,23 @@ static const struct sun4i_i2s_quirks sun8i_h3_i2s_quirks = {
.field_rxchansel = REG_FIELD(SUN8I_I2S_RX_CHAN_SEL_REG, 0, 2),
};
+static const struct sun4i_i2s_quirks sun50i_a64_codec_i2s_quirks = {
+ .has_reset = true,
+ .reg_offset_txdata = SUN8I_I2S_FIFO_TX_REG,
+ .sun4i_i2s_regmap = &sun4i_i2s_regmap_config,
+ .has_slave_select_bit = true,
+ .field_clkdiv_mclk_en = REG_FIELD(SUN4I_I2S_CLK_DIV_REG, 7, 7),
+ .field_fmt_wss = REG_FIELD(SUN4I_I2S_FMT0_REG, 2, 3),
+ .field_fmt_sr = REG_FIELD(SUN4I_I2S_FMT0_REG, 4, 5),
+ .field_fmt_bclk = REG_FIELD(SUN4I_I2S_FMT0_REG, 6, 6),
+ .field_fmt_lrclk = REG_FIELD(SUN4I_I2S_FMT0_REG, 7, 7),
+ .field_fmt_mode = REG_FIELD(SUN4I_I2S_FMT0_REG, 0, 1),
+ .field_txchanmap = REG_FIELD(SUN4I_I2S_TX_CHAN_MAP_REG, 0, 31),
+ .field_rxchanmap = REG_FIELD(SUN4I_I2S_RX_CHAN_MAP_REG, 0, 31),
+ .field_txchansel = REG_FIELD(SUN4I_I2S_TX_CHAN_SEL_REG, 0, 2),
+ .field_rxchansel = REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2),
+};
+
static int sun4i_i2s_init_regmap_fields(struct device *dev,
struct sun4i_i2s *i2s)
{
@@ -1169,6 +1175,10 @@ static const struct of_device_id sun4i_i2s_match[] = {
.compatible = "allwinner,sun8i-h3-i2s",
.data = &sun8i_h3_i2s_quirks,
},
+ {
+ .compatible = "allwinner,sun50i-a64-codec-i2s",
+ .data = &sun50i_a64_codec_i2s_quirks,
+ },
{}
};
MODULE_DEVICE_TABLE(of, sun4i_i2s_match);
diff --git a/sound/soc/sunxi/sun50i-codec-analog.c b/sound/soc/sunxi/sun50i-codec-analog.c
new file mode 100644
index 000000000000..8f5f999df631
--- /dev/null
+++ b/sound/soc/sunxi/sun50i-codec-analog.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This driver supports the analog controls for the internal codec
+ * found in Allwinner's A64 SoC.
+ *
+ * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org>
+ * Copyright (C) 2017 Marcus Cooper <codekipper@gmail.com>
+ * Copyright (C) 2018 Vasily Khoruzhick <anarsoul@gmail.com>
+ *
+ * Based on sun8i-codec-analog.c
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "sun8i-adda-pr-regmap.h"
+
+/* Codec analog control register offsets and bit fields */
+#define SUN50I_ADDA_HP_CTRL 0x00
+#define SUN50I_ADDA_HP_CTRL_PA_CLK_GATE 7
+#define SUN50I_ADDA_HP_CTRL_HPPA_EN 6
+#define SUN50I_ADDA_HP_CTRL_HPVOL 0
+
+#define SUN50I_ADDA_OL_MIX_CTRL 0x01
+#define SUN50I_ADDA_OL_MIX_CTRL_MIC1 6
+#define SUN50I_ADDA_OL_MIX_CTRL_MIC2 5
+#define SUN50I_ADDA_OL_MIX_CTRL_PHONE 4
+#define SUN50I_ADDA_OL_MIX_CTRL_PHONEN 3
+#define SUN50I_ADDA_OL_MIX_CTRL_LINEINL 2
+#define SUN50I_ADDA_OL_MIX_CTRL_DACL 1
+#define SUN50I_ADDA_OL_MIX_CTRL_DACR 0
+
+#define SUN50I_ADDA_OR_MIX_CTRL 0x02
+#define SUN50I_ADDA_OR_MIX_CTRL_MIC1 6
+#define SUN50I_ADDA_OR_MIX_CTRL_MIC2 5
+#define SUN50I_ADDA_OR_MIX_CTRL_PHONE 4
+#define SUN50I_ADDA_OR_MIX_CTRL_PHONEP 3
+#define SUN50I_ADDA_OR_MIX_CTRL_LINEINR 2
+#define SUN50I_ADDA_OR_MIX_CTRL_DACR 1
+#define SUN50I_ADDA_OR_MIX_CTRL_DACL 0
+
+#define SUN50I_ADDA_LINEOUT_CTRL0 0x05
+#define SUN50I_ADDA_LINEOUT_CTRL0_LEN 7
+#define SUN50I_ADDA_LINEOUT_CTRL0_REN 6
+#define SUN50I_ADDA_LINEOUT_CTRL0_LSRC_SEL 5
+#define SUN50I_ADDA_LINEOUT_CTRL0_RSRC_SEL 4
+
+#define SUN50I_ADDA_LINEOUT_CTRL1 0x06
+#define SUN50I_ADDA_LINEOUT_CTRL1_VOL 0
+
+#define SUN50I_ADDA_MIC1_CTRL 0x07
+#define SUN50I_ADDA_MIC1_CTRL_MIC1G 4
+#define SUN50I_ADDA_MIC1_CTRL_MIC1AMPEN 3
+#define SUN50I_ADDA_MIC1_CTRL_MIC1BOOST 0
+
+#define SUN50I_ADDA_MIC2_CTRL 0x08
+#define SUN50I_ADDA_MIC2_CTRL_MIC2G 4
+#define SUN50I_ADDA_MIC2_CTRL_MIC2AMPEN 3
+#define SUN50I_ADDA_MIC2_CTRL_MIC2BOOST 0
+
+#define SUN50I_ADDA_LINEIN_CTRL 0x09
+#define SUN50I_ADDA_LINEIN_CTRL_LINEING 0
+
+#define SUN50I_ADDA_MIX_DAC_CTRL 0x0a
+#define SUN50I_ADDA_MIX_DAC_CTRL_DACAREN 7
+#define SUN50I_ADDA_MIX_DAC_CTRL_DACALEN 6
+#define SUN50I_ADDA_MIX_DAC_CTRL_RMIXEN 5
+#define SUN50I_ADDA_MIX_DAC_CTRL_LMIXEN 4
+#define SUN50I_ADDA_MIX_DAC_CTRL_RHPPAMUTE 3
+#define SUN50I_ADDA_MIX_DAC_CTRL_LHPPAMUTE 2
+#define SUN50I_ADDA_MIX_DAC_CTRL_RHPIS 1
+#define SUN50I_ADDA_MIX_DAC_CTRL_LHPIS 0
+
+#define SUN50I_ADDA_L_ADCMIX_SRC 0x0b
+#define SUN50I_ADDA_L_ADCMIX_SRC_MIC1 6
+#define SUN50I_ADDA_L_ADCMIX_SRC_MIC2 5
+#define SUN50I_ADDA_L_ADCMIX_SRC_PHONE 4
+#define SUN50I_ADDA_L_ADCMIX_SRC_PHONEN 3
+#define SUN50I_ADDA_L_ADCMIX_SRC_LINEINL 2
+#define SUN50I_ADDA_L_ADCMIX_SRC_OMIXRL 1
+#define SUN50I_ADDA_L_ADCMIX_SRC_OMIXRR 0
+
+#define SUN50I_ADDA_R_ADCMIX_SRC 0x0c
+#define SUN50I_ADDA_R_ADCMIX_SRC_MIC1 6
+#define SUN50I_ADDA_R_ADCMIX_SRC_MIC2 5
+#define SUN50I_ADDA_R_ADCMIX_SRC_PHONE 4
+#define SUN50I_ADDA_R_ADCMIX_SRC_PHONEP 3
+#define SUN50I_ADDA_R_ADCMIX_SRC_LINEINR 2
+#define SUN50I_ADDA_R_ADCMIX_SRC_OMIXR 1
+#define SUN50I_ADDA_R_ADCMIX_SRC_OMIXL 0
+
+#define SUN50I_ADDA_ADC_CTRL 0x0d
+#define SUN50I_ADDA_ADC_CTRL_ADCREN 7
+#define SUN50I_ADDA_ADC_CTRL_ADCLEN 6
+#define SUN50I_ADDA_ADC_CTRL_ADCG 0
+
+#define SUN50I_ADDA_HS_MBIAS_CTRL 0x0e
+#define SUN50I_ADDA_HS_MBIAS_CTRL_MMICBIASEN 7
+
+#define SUN50I_ADDA_JACK_MIC_CTRL 0x1d
+#define SUN50I_ADDA_JACK_MIC_CTRL_HMICBIASEN 5
+
+/* mixer controls */
+static const struct snd_kcontrol_new sun50i_a64_codec_mixer_controls[] = {
+ SOC_DAPM_DOUBLE_R("DAC Playback Switch",
+ SUN50I_ADDA_OL_MIX_CTRL,
+ SUN50I_ADDA_OR_MIX_CTRL,
+ SUN50I_ADDA_OL_MIX_CTRL_DACL, 1, 0),
+ SOC_DAPM_DOUBLE_R("DAC Reversed Playback Switch",
+ SUN50I_ADDA_OL_MIX_CTRL,
+ SUN50I_ADDA_OR_MIX_CTRL,
+ SUN50I_ADDA_OL_MIX_CTRL_DACR, 1, 0),
+ SOC_DAPM_DOUBLE_R("Line In Playback Switch",
+ SUN50I_ADDA_OL_MIX_CTRL,
+ SUN50I_ADDA_OR_MIX_CTRL,
+ SUN50I_ADDA_OL_MIX_CTRL_LINEINL, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic1 Playback Switch",
+ SUN50I_ADDA_OL_MIX_CTRL,
+ SUN50I_ADDA_OR_MIX_CTRL,
+ SUN50I_ADDA_OL_MIX_CTRL_MIC1, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic2 Playback Switch",
+ SUN50I_ADDA_OL_MIX_CTRL,
+ SUN50I_ADDA_OR_MIX_CTRL,
+ SUN50I_ADDA_OL_MIX_CTRL_MIC2, 1, 0),
+};
+
+/* ADC mixer controls */
+static const struct snd_kcontrol_new sun50i_codec_adc_mixer_controls[] = {
+ SOC_DAPM_DOUBLE_R("Mixer Capture Switch",
+ SUN50I_ADDA_L_ADCMIX_SRC,
+ SUN50I_ADDA_R_ADCMIX_SRC,
+ SUN50I_ADDA_L_ADCMIX_SRC_OMIXRL, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mixer Reversed Capture Switch",
+ SUN50I_ADDA_L_ADCMIX_SRC,
+ SUN50I_ADDA_R_ADCMIX_SRC,
+ SUN50I_ADDA_L_ADCMIX_SRC_OMIXRR, 1, 0),
+ SOC_DAPM_DOUBLE_R("Line In Capture Switch",
+ SUN50I_ADDA_L_ADCMIX_SRC,
+ SUN50I_ADDA_R_ADCMIX_SRC,
+ SUN50I_ADDA_L_ADCMIX_SRC_LINEINL, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic1 Capture Switch",
+ SUN50I_ADDA_L_ADCMIX_SRC,
+ SUN50I_ADDA_R_ADCMIX_SRC,
+ SUN50I_ADDA_L_ADCMIX_SRC_MIC1, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic2 Capture Switch",
+ SUN50I_ADDA_L_ADCMIX_SRC,
+ SUN50I_ADDA_R_ADCMIX_SRC,
+ SUN50I_ADDA_L_ADCMIX_SRC_MIC2, 1, 0),
+};
+
+static const DECLARE_TLV_DB_SCALE(sun50i_codec_out_mixer_pregain_scale,
+ -450, 150, 0);
+static const DECLARE_TLV_DB_RANGE(sun50i_codec_mic_gain_scale,
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 7, TLV_DB_SCALE_ITEM(2400, 300, 0),
+);
+
+static const DECLARE_TLV_DB_SCALE(sun50i_codec_hp_vol_scale, -6300, 100, 1);
+
+static const DECLARE_TLV_DB_RANGE(sun50i_codec_lineout_vol_scale,
+ 0, 1, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
+ 2, 31, TLV_DB_SCALE_ITEM(-4350, 150, 0),
+);
+
+
+/* volume / mute controls */
+static const struct snd_kcontrol_new sun50i_a64_codec_controls[] = {
+ SOC_SINGLE_TLV("Headphone Playback Volume",
+ SUN50I_ADDA_HP_CTRL,
+ SUN50I_ADDA_HP_CTRL_HPVOL, 0x3f, 0,
+ sun50i_codec_hp_vol_scale),
+
+ SOC_DOUBLE("Headphone Playback Switch",
+ SUN50I_ADDA_MIX_DAC_CTRL,
+ SUN50I_ADDA_MIX_DAC_CTRL_LHPPAMUTE,
+ SUN50I_ADDA_MIX_DAC_CTRL_RHPPAMUTE, 1, 0),
+
+ /* Mixer pre-gain */
+ SOC_SINGLE_TLV("Mic1 Playback Volume", SUN50I_ADDA_MIC1_CTRL,
+ SUN50I_ADDA_MIC1_CTRL_MIC1G,
+ 0x7, 0, sun50i_codec_out_mixer_pregain_scale),
+
+ /* Microphone Amp boost gain */
+ SOC_SINGLE_TLV("Mic1 Boost Volume", SUN50I_ADDA_MIC1_CTRL,
+ SUN50I_ADDA_MIC1_CTRL_MIC1BOOST, 0x7, 0,
+ sun50i_codec_mic_gain_scale),
+
+ /* Mixer pre-gain */
+ SOC_SINGLE_TLV("Mic2 Playback Volume",
+ SUN50I_ADDA_MIC2_CTRL, SUN50I_ADDA_MIC2_CTRL_MIC2G,
+ 0x7, 0, sun50i_codec_out_mixer_pregain_scale),
+
+ /* Microphone Amp boost gain */
+ SOC_SINGLE_TLV("Mic2 Boost Volume", SUN50I_ADDA_MIC2_CTRL,
+ SUN50I_ADDA_MIC2_CTRL_MIC2BOOST, 0x7, 0,
+ sun50i_codec_mic_gain_scale),
+
+ /* ADC */
+ SOC_SINGLE_TLV("ADC Gain Capture Volume", SUN50I_ADDA_ADC_CTRL,
+ SUN50I_ADDA_ADC_CTRL_ADCG, 0x7, 0,
+ sun50i_codec_out_mixer_pregain_scale),
+
+ /* Mixer pre-gain */
+ SOC_SINGLE_TLV("Line In Playback Volume", SUN50I_ADDA_LINEIN_CTRL,
+ SUN50I_ADDA_LINEIN_CTRL_LINEING,
+ 0x7, 0, sun50i_codec_out_mixer_pregain_scale),
+
+ SOC_SINGLE_TLV("Line Out Playback Volume",
+ SUN50I_ADDA_LINEOUT_CTRL1,
+ SUN50I_ADDA_LINEOUT_CTRL1_VOL, 0x1f, 0,
+ sun50i_codec_lineout_vol_scale),
+
+ SOC_DOUBLE("Line Out Playback Switch",
+ SUN50I_ADDA_LINEOUT_CTRL0,
+ SUN50I_ADDA_LINEOUT_CTRL0_LEN,
+ SUN50I_ADDA_LINEOUT_CTRL0_REN, 1, 0),
+
+};
+
+static const char * const sun50i_codec_hp_src_enum_text[] = {
+ "DAC", "Mixer",
+};
+
+static SOC_ENUM_DOUBLE_DECL(sun50i_codec_hp_src_enum,
+ SUN50I_ADDA_MIX_DAC_CTRL,
+ SUN50I_ADDA_MIX_DAC_CTRL_LHPIS,
+ SUN50I_ADDA_MIX_DAC_CTRL_RHPIS,
+ sun50i_codec_hp_src_enum_text);
+
+static const struct snd_kcontrol_new sun50i_codec_hp_src[] = {
+ SOC_DAPM_ENUM("Headphone Source Playback Route",
+ sun50i_codec_hp_src_enum),
+};
+
+static const char * const sun50i_codec_lineout_src_enum_text[] = {
+ "Stereo", "Mono Differential",
+};
+
+static SOC_ENUM_DOUBLE_DECL(sun50i_codec_lineout_src_enum,
+ SUN50I_ADDA_LINEOUT_CTRL0,
+ SUN50I_ADDA_LINEOUT_CTRL0_LSRC_SEL,
+ SUN50I_ADDA_LINEOUT_CTRL0_RSRC_SEL,
+ sun50i_codec_lineout_src_enum_text);
+
+static const struct snd_kcontrol_new sun50i_codec_lineout_src[] = {
+ SOC_DAPM_ENUM("Line Out Source Playback Route",
+ sun50i_codec_lineout_src_enum),
+};
+
+static const struct snd_soc_dapm_widget sun50i_a64_codec_widgets[] = {
+ /* DAC */
+ SND_SOC_DAPM_DAC("Left DAC", NULL, SUN50I_ADDA_MIX_DAC_CTRL,
+ SUN50I_ADDA_MIX_DAC_CTRL_DACALEN, 0),
+ SND_SOC_DAPM_DAC("Right DAC", NULL, SUN50I_ADDA_MIX_DAC_CTRL,
+ SUN50I_ADDA_MIX_DAC_CTRL_DACAREN, 0),
+ /* ADC */
+ SND_SOC_DAPM_ADC("Left ADC", NULL, SUN50I_ADDA_ADC_CTRL,
+ SUN50I_ADDA_ADC_CTRL_ADCLEN, 0),
+ SND_SOC_DAPM_ADC("Right ADC", NULL, SUN50I_ADDA_ADC_CTRL,
+ SUN50I_ADDA_ADC_CTRL_ADCREN, 0),
+ /*
+ * Due to this component and the codec belonging to separate DAPM
+ * contexts, we need to manually link the above widgets to their
+ * stream widgets at the card level.
+ */
+
+ SND_SOC_DAPM_MUX("Headphone Source Playback Route",
+ SND_SOC_NOPM, 0, 0, sun50i_codec_hp_src),
+ SND_SOC_DAPM_OUT_DRV("Headphone Amp", SUN50I_ADDA_HP_CTRL,
+ SUN50I_ADDA_HP_CTRL_HPPA_EN, 0, NULL, 0),
+ SND_SOC_DAPM_OUTPUT("HP"),
+
+ SND_SOC_DAPM_MUX("Line Out Source Playback Route",
+ SND_SOC_NOPM, 0, 0, sun50i_codec_lineout_src),
+ SND_SOC_DAPM_OUTPUT("LINEOUT"),
+
+ /* Microphone inputs */
+ SND_SOC_DAPM_INPUT("MIC1"),
+
+ /* Microphone Bias */
+ SND_SOC_DAPM_SUPPLY("MBIAS", SUN50I_ADDA_HS_MBIAS_CTRL,
+ SUN50I_ADDA_HS_MBIAS_CTRL_MMICBIASEN,
+ 0, NULL, 0),
+
+ /* Mic input path */
+ SND_SOC_DAPM_PGA("Mic1 Amplifier", SUN50I_ADDA_MIC1_CTRL,
+ SUN50I_ADDA_MIC1_CTRL_MIC1AMPEN, 0, NULL, 0),
+
+ /* Microphone input */
+ SND_SOC_DAPM_INPUT("MIC2"),
+
+ /* Microphone Bias */
+ SND_SOC_DAPM_SUPPLY("HBIAS", SUN50I_ADDA_JACK_MIC_CTRL,
+ SUN50I_ADDA_JACK_MIC_CTRL_HMICBIASEN,
+ 0, NULL, 0),
+
+ /* Mic input path */
+ SND_SOC_DAPM_PGA("Mic2 Amplifier", SUN50I_ADDA_MIC2_CTRL,
+ SUN50I_ADDA_MIC2_CTRL_MIC2AMPEN, 0, NULL, 0),
+
+ /* Line input */
+ SND_SOC_DAPM_INPUT("LINEIN"),
+
+ /* Mixers */
+ SND_SOC_DAPM_MIXER("Left Mixer", SUN50I_ADDA_MIX_DAC_CTRL,
+ SUN50I_ADDA_MIX_DAC_CTRL_LMIXEN, 0,
+ sun50i_a64_codec_mixer_controls,
+ ARRAY_SIZE(sun50i_a64_codec_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Right Mixer", SUN50I_ADDA_MIX_DAC_CTRL,
+ SUN50I_ADDA_MIX_DAC_CTRL_RMIXEN, 0,
+ sun50i_a64_codec_mixer_controls,
+ ARRAY_SIZE(sun50i_a64_codec_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Left ADC Mixer", SUN50I_ADDA_ADC_CTRL,
+ SUN50I_ADDA_ADC_CTRL_ADCLEN, 0,
+ sun50i_codec_adc_mixer_controls,
+ ARRAY_SIZE(sun50i_codec_adc_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Right ADC Mixer", SUN50I_ADDA_ADC_CTRL,
+ SUN50I_ADDA_ADC_CTRL_ADCREN, 0,
+ sun50i_codec_adc_mixer_controls,
+ ARRAY_SIZE(sun50i_codec_adc_mixer_controls)),
+};
+
+static const struct snd_soc_dapm_route sun50i_a64_codec_routes[] = {
+ /* Left Mixer Routes */
+ { "Left Mixer", "DAC Playback Switch", "Left DAC" },
+ { "Left Mixer", "DAC Reversed Playback Switch", "Right DAC" },
+ { "Left Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
+
+ /* Right Mixer Routes */
+ { "Right Mixer", "DAC Playback Switch", "Right DAC" },
+ { "Right Mixer", "DAC Reversed Playback Switch", "Left DAC" },
+ { "Right Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
+
+ /* Left ADC Mixer Routes */
+ { "Left ADC Mixer", "Mixer Capture Switch", "Left Mixer" },
+ { "Left ADC Mixer", "Mixer Reversed Capture Switch", "Right Mixer" },
+ { "Left ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
+
+ /* Right ADC Mixer Routes */
+ { "Right ADC Mixer", "Mixer Capture Switch", "Right Mixer" },
+ { "Right ADC Mixer", "Mixer Reversed Capture Switch", "Left Mixer" },
+ { "Right ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
+
+ /* ADC Routes */
+ { "Left ADC", NULL, "Left ADC Mixer" },
+ { "Right ADC", NULL, "Right ADC Mixer" },
+
+ /* Headphone Routes */
+ { "Headphone Source Playback Route", "DAC", "Left DAC" },
+ { "Headphone Source Playback Route", "DAC", "Right DAC" },
+ { "Headphone Source Playback Route", "Mixer", "Left Mixer" },
+ { "Headphone Source Playback Route", "Mixer", "Right Mixer" },
+ { "Headphone Amp", NULL, "Headphone Source Playback Route" },
+ { "HP", NULL, "Headphone Amp" },
+
+ /* Microphone Routes */
+ { "Mic1 Amplifier", NULL, "MIC1"},
+
+ /* Microphone Routes */
+ { "Mic2 Amplifier", NULL, "MIC2"},
+ { "Left Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
+ { "Right Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
+ { "Left ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
+ { "Right ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
+
+ /* Line-in Routes */
+ { "Left Mixer", "Line In Playback Switch", "LINEIN" },
+ { "Right Mixer", "Line In Playback Switch", "LINEIN" },
+ { "Left ADC Mixer", "Line In Capture Switch", "LINEIN" },
+ { "Right ADC Mixer", "Line In Capture Switch", "LINEIN" },
+
+ /* Line-out Routes */
+ { "Line Out Source Playback Route", "Stereo", "Left Mixer" },
+ { "Line Out Source Playback Route", "Stereo", "Right Mixer" },
+ { "Line Out Source Playback Route", "Mono Differential", "Left Mixer" },
+ { "Line Out Source Playback Route", "Mono Differential",
+ "Right Mixer" },
+ { "LINEOUT", NULL, "Line Out Source Playback Route" },
+};
+
+static const struct snd_soc_component_driver sun50i_codec_analog_cmpnt_drv = {
+ .controls = sun50i_a64_codec_controls,
+ .num_controls = ARRAY_SIZE(sun50i_a64_codec_controls),
+ .dapm_widgets = sun50i_a64_codec_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sun50i_a64_codec_widgets),
+ .dapm_routes = sun50i_a64_codec_routes,
+ .num_dapm_routes = ARRAY_SIZE(sun50i_a64_codec_routes),
+};
+
+static const struct of_device_id sun50i_codec_analog_of_match[] = {
+ {
+ .compatible = "allwinner,sun50i-a64-codec-analog",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun50i_codec_analog_of_match);
+
+static int sun50i_codec_analog_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct regmap *regmap;
+ void __iomem *base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ dev_err(&pdev->dev, "Failed to map the registers\n");
+ return PTR_ERR(base);
+ }
+
+ regmap = sun8i_adda_pr_regmap_init(&pdev->dev, base);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to create regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ return devm_snd_soc_register_component(&pdev->dev,
+ &sun50i_codec_analog_cmpnt_drv,
+ NULL, 0);
+}
+
+static struct platform_driver sun50i_codec_analog_driver = {
+ .driver = {
+ .name = "sun50i-codec-analog",
+ .of_match_table = sun50i_codec_analog_of_match,
+ },
+ .probe = sun50i_codec_analog_probe,
+};
+module_platform_driver(sun50i_codec_analog_driver);
+
+MODULE_DESCRIPTION("Allwinner internal codec analog controls driver for A64");
+MODULE_AUTHOR("Vasily Khoruzhick <anarsoul@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sun50i-codec-analog");
diff --git a/sound/soc/sunxi/sun8i-adda-pr-regmap.c b/sound/soc/sunxi/sun8i-adda-pr-regmap.c
new file mode 100644
index 000000000000..e68ce9d2884d
--- /dev/null
+++ b/sound/soc/sunxi/sun8i-adda-pr-regmap.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This driver provides regmap to access to analog part of audio codec
+ * found on Allwinner A23, A31s, A33, H3 and A64 Socs
+ *
+ * Copyright 2016 Chen-Yu Tsai <wens@csie.org>
+ * Copyright (C) 2018 Vasily Khoruzhick <anarsoul@gmail.com>
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "sun8i-adda-pr-regmap.h"
+
+/* Analog control register access bits */
+#define ADDA_PR 0x0 /* PRCM base + 0x1c0 */
+#define ADDA_PR_RESET BIT(28)
+#define ADDA_PR_WRITE BIT(24)
+#define ADDA_PR_ADDR_SHIFT 16
+#define ADDA_PR_ADDR_MASK GENMASK(4, 0)
+#define ADDA_PR_DATA_IN_SHIFT 8
+#define ADDA_PR_DATA_IN_MASK GENMASK(7, 0)
+#define ADDA_PR_DATA_OUT_SHIFT 0
+#define ADDA_PR_DATA_OUT_MASK GENMASK(7, 0)
+
+/* regmap access bits */
+static int adda_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ void __iomem *base = (void __iomem *)context;
+ u32 tmp;
+
+ /* De-assert reset */
+ writel(readl(base) | ADDA_PR_RESET, base);
+
+ /* Clear write bit */
+ writel(readl(base) & ~ADDA_PR_WRITE, base);
+
+ /* Set register address */
+ tmp = readl(base);
+ tmp &= ~(ADDA_PR_ADDR_MASK << ADDA_PR_ADDR_SHIFT);
+ tmp |= (reg & ADDA_PR_ADDR_MASK) << ADDA_PR_ADDR_SHIFT;
+ writel(tmp, base);
+
+ /* Read back value */
+ *val = readl(base) & ADDA_PR_DATA_OUT_MASK;
+
+ return 0;
+}
+
+static int adda_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ void __iomem *base = (void __iomem *)context;
+ u32 tmp;
+
+ /* De-assert reset */
+ writel(readl(base) | ADDA_PR_RESET, base);
+
+ /* Set register address */
+ tmp = readl(base);
+ tmp &= ~(ADDA_PR_ADDR_MASK << ADDA_PR_ADDR_SHIFT);
+ tmp |= (reg & ADDA_PR_ADDR_MASK) << ADDA_PR_ADDR_SHIFT;
+ writel(tmp, base);
+
+ /* Set data to write */
+ tmp = readl(base);
+ tmp &= ~(ADDA_PR_DATA_IN_MASK << ADDA_PR_DATA_IN_SHIFT);
+ tmp |= (val & ADDA_PR_DATA_IN_MASK) << ADDA_PR_DATA_IN_SHIFT;
+ writel(tmp, base);
+
+ /* Set write bit to signal a write */
+ writel(readl(base) | ADDA_PR_WRITE, base);
+
+ /* Clear write bit */
+ writel(readl(base) & ~ADDA_PR_WRITE, base);
+
+ return 0;
+}
+
+static const struct regmap_config adda_pr_regmap_cfg = {
+ .name = "adda-pr",
+ .reg_bits = 5,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .reg_read = adda_reg_read,
+ .reg_write = adda_reg_write,
+ .fast_io = true,
+ .max_register = 31,
+};
+
+struct regmap *sun8i_adda_pr_regmap_init(struct device *dev,
+ void __iomem *base)
+{
+ return devm_regmap_init(dev, NULL, base, &adda_pr_regmap_cfg);
+}
+EXPORT_SYMBOL_GPL(sun8i_adda_pr_regmap_init);
+
+MODULE_DESCRIPTION("Allwinner analog audio codec regmap driver");
+MODULE_AUTHOR("Vasily Khoruzhick <anarsoul@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sunxi-adda-pr");
diff --git a/sound/soc/sunxi/sun8i-adda-pr-regmap.h b/sound/soc/sunxi/sun8i-adda-pr-regmap.h
new file mode 100644
index 000000000000..a5ae95dfebc1
--- /dev/null
+++ b/sound/soc/sunxi/sun8i-adda-pr-regmap.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Vasily Khoruzhick <anarsoul@gmail.com>
+ */
+
+struct regmap *sun8i_adda_pr_regmap_init(struct device *dev,
+ void __iomem *base);
diff --git a/sound/soc/sunxi/sun8i-codec-analog.c b/sound/soc/sunxi/sun8i-codec-analog.c
index 485e79f292c4..916a46bbc1c8 100644
--- a/sound/soc/sunxi/sun8i-codec-analog.c
+++ b/sound/soc/sunxi/sun8i-codec-analog.c
@@ -27,6 +27,8 @@
#include <sound/soc-dapm.h>
#include <sound/tlv.h>
+#include "sun8i-adda-pr-regmap.h"
+
/* Codec analog control register offsets and bit fields */
#define SUN8I_ADDA_HP_VOLC 0x00
#define SUN8I_ADDA_HP_VOLC_PA_CLK_GATE 7
@@ -120,81 +122,6 @@
#define SUN8I_ADDA_ADC_AP_EN_ADCLEN 6
#define SUN8I_ADDA_ADC_AP_EN_ADCG 0
-/* Analog control register access bits */
-#define ADDA_PR 0x0 /* PRCM base + 0x1c0 */
-#define ADDA_PR_RESET BIT(28)
-#define ADDA_PR_WRITE BIT(24)
-#define ADDA_PR_ADDR_SHIFT 16
-#define ADDA_PR_ADDR_MASK GENMASK(4, 0)
-#define ADDA_PR_DATA_IN_SHIFT 8
-#define ADDA_PR_DATA_IN_MASK GENMASK(7, 0)
-#define ADDA_PR_DATA_OUT_SHIFT 0
-#define ADDA_PR_DATA_OUT_MASK GENMASK(7, 0)
-
-/* regmap access bits */
-static int adda_reg_read(void *context, unsigned int reg, unsigned int *val)
-{
- void __iomem *base = (void __iomem *)context;
- u32 tmp;
-
- /* De-assert reset */
- writel(readl(base) | ADDA_PR_RESET, base);
-
- /* Clear write bit */
- writel(readl(base) & ~ADDA_PR_WRITE, base);
-
- /* Set register address */
- tmp = readl(base);
- tmp &= ~(ADDA_PR_ADDR_MASK << ADDA_PR_ADDR_SHIFT);
- tmp |= (reg & ADDA_PR_ADDR_MASK) << ADDA_PR_ADDR_SHIFT;
- writel(tmp, base);
-
- /* Read back value */
- *val = readl(base) & ADDA_PR_DATA_OUT_MASK;
-
- return 0;
-}
-
-static int adda_reg_write(void *context, unsigned int reg, unsigned int val)
-{
- void __iomem *base = (void __iomem *)context;
- u32 tmp;
-
- /* De-assert reset */
- writel(readl(base) | ADDA_PR_RESET, base);
-
- /* Set register address */
- tmp = readl(base);
- tmp &= ~(ADDA_PR_ADDR_MASK << ADDA_PR_ADDR_SHIFT);
- tmp |= (reg & ADDA_PR_ADDR_MASK) << ADDA_PR_ADDR_SHIFT;
- writel(tmp, base);
-
- /* Set data to write */
- tmp = readl(base);
- tmp &= ~(ADDA_PR_DATA_IN_MASK << ADDA_PR_DATA_IN_SHIFT);
- tmp |= (val & ADDA_PR_DATA_IN_MASK) << ADDA_PR_DATA_IN_SHIFT;
- writel(tmp, base);
-
- /* Set write bit to signal a write */
- writel(readl(base) | ADDA_PR_WRITE, base);
-
- /* Clear write bit */
- writel(readl(base) & ~ADDA_PR_WRITE, base);
-
- return 0;
-}
-
-static const struct regmap_config adda_pr_regmap_cfg = {
- .name = "adda-pr",
- .reg_bits = 5,
- .reg_stride = 1,
- .val_bits = 8,
- .reg_read = adda_reg_read,
- .reg_write = adda_reg_write,
- .fast_io = true,
- .max_register = 24,
-};
-
/* mixer controls */
static const struct snd_kcontrol_new sun8i_codec_mixer_controls[] = {
SOC_DAPM_DOUBLE_R("DAC Playback Switch",
@@ -912,7 +839,7 @@ static int sun8i_codec_analog_probe(struct platform_device *pdev)
return PTR_ERR(base);
}
- regmap = devm_regmap_init(&pdev->dev, NULL, base, &adda_pr_regmap_cfg);
+ regmap = sun8i_adda_pr_regmap_init(&pdev->dev, base);
if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "Failed to create regmap\n");
return PTR_ERR(regmap);
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index fb37dd927e33..522a72fde78d 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/log2.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -52,7 +53,6 @@
#define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV 13
#define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV 9
#define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV 6
-#define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_16 (1 << 6)
#define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ 4
#define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_16 (1 << 4)
#define SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT 2
@@ -300,12 +300,23 @@ static u8 sun8i_codec_get_bclk_div(struct sun8i_codec *scodec,
return best_val;
}
+static int sun8i_codec_get_lrck_div(unsigned int channels,
+ unsigned int word_size)
+{
+ unsigned int div = word_size * channels;
+
+ if (div < 16 || div > 256)
+ return -EINVAL;
+
+ return ilog2(div) - 4;
+}
+
static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct sun8i_codec *scodec = snd_soc_component_get_drvdata(dai->component);
- int sample_rate;
+ int sample_rate, lrck_div;
u8 bclk_div;
/*
@@ -321,9 +332,14 @@ static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK,
bclk_div << SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV);
+ lrck_div = sun8i_codec_get_lrck_div(params_channels(params),
+ params_physical_width(params));
+ if (lrck_div < 0)
+ return lrck_div;
+
regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK,
- SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_16);
+ lrck_div << SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV);
sample_rate = sun8i_codec_get_hw_rate(params);
if (sample_rate < 0)
diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
index 45a4aa9d2a47..901457da25ec 100644
--- a/sound/soc/tegra/tegra_sgtl5000.c
+++ b/sound/soc/tegra/tegra_sgtl5000.c
@@ -149,14 +149,14 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing/invalid\n");
ret = -EINVAL;
- goto err;
+ goto err_put_codec_of_node;
}
tegra_sgtl5000_dai.platform_of_node = tegra_sgtl5000_dai.cpu_of_node;
ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
if (ret)
- goto err;
+ goto err_put_cpu_of_node;
ret = snd_soc_register_card(card);
if (ret) {
@@ -169,6 +169,13 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
err_fini_utils:
tegra_asoc_utils_fini(&machine->util_data);
+err_put_cpu_of_node:
+ of_node_put(tegra_sgtl5000_dai.cpu_of_node);
+ tegra_sgtl5000_dai.cpu_of_node = NULL;
+ tegra_sgtl5000_dai.platform_of_node = NULL;
+err_put_codec_of_node:
+ of_node_put(tegra_sgtl5000_dai.codec_of_node);
+ tegra_sgtl5000_dai.codec_of_node = NULL;
err:
return ret;
}
@@ -183,6 +190,12 @@ static int tegra_sgtl5000_driver_remove(struct platform_device *pdev)
tegra_asoc_utils_fini(&machine->util_data);
+ of_node_put(tegra_sgtl5000_dai.cpu_of_node);
+ tegra_sgtl5000_dai.cpu_of_node = NULL;
+ tegra_sgtl5000_dai.platform_of_node = NULL;
+ of_node_put(tegra_sgtl5000_dai.codec_of_node);
+ tegra_sgtl5000_dai.codec_of_node = NULL;
+
return ret;
}
diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
index e2ad00e3cae1..1cfca698ae4b 100644
--- a/sound/soc/txx9/txx9aclc-ac97.c
+++ b/sound/soc/txx9/txx9aclc-ac97.c
@@ -208,13 +208,12 @@ static int txx9aclc_ac97_dev_probe(struct platform_device *pdev)
if (err < 0)
return err;
- return snd_soc_register_component(&pdev->dev, &txx9aclc_ac97_component,
+ return devm_snd_soc_register_component(&pdev->dev, &txx9aclc_ac97_component,
&txx9aclc_ac97_dai, 1);
}
static int txx9aclc_ac97_dev_remove(struct platform_device *pdev)
{
- snd_soc_unregister_component(&pdev->dev);
snd_soc_set_ac97_ops(NULL);
return 0;
}
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index d55ca48de3ea..f4a72e39ffa9 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -200,6 +200,7 @@ static void usb_ep1_command_reply_dispatch (struct urb* urb)
break;
}
#ifdef CONFIG_SND_USB_CAIAQ_INPUT
+ /* fall through */
case EP1_CMD_READ_ERP:
case EP1_CMD_READ_ANALOG:
snd_usb_caiaq_input_dispatch(cdev, buf, urb->actual_length);
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index dcfc546d81b9..b737f0ec77d0 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1175,8 +1175,7 @@ static void snd_usbmidi_output_trigger(struct snd_rawmidi_substream *substream,
if (port->ep->umidi->disconnected) {
/* gobble up remaining bytes to prevent wait in
* snd_rawmidi_drain_output */
- while (!snd_rawmidi_transmit_empty(substream))
- snd_rawmidi_transmit_ack(substream, 1);
+ snd_rawmidi_proceed(substream);
return;
}
tasklet_schedule(&port->ep->tasklet);
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index cbfb48bdea51..85ae0ff2382a 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -29,6 +29,7 @@
#include <linux/hid.h>
#include <linux/init.h>
+#include <linux/math64.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/audio.h>
@@ -1817,6 +1818,380 @@ static int dell_dock_mixer_init(struct usb_mixer_interface *mixer)
return 0;
}
+/* RME Class Compliant device quirks */
+
+#define SND_RME_GET_STATUS1 23
+#define SND_RME_GET_CURRENT_FREQ 17
+#define SND_RME_CLK_SYSTEM_SHIFT 16
+#define SND_RME_CLK_SYSTEM_MASK 0x1f
+#define SND_RME_CLK_AES_SHIFT 8
+#define SND_RME_CLK_SPDIF_SHIFT 12
+#define SND_RME_CLK_AES_SPDIF_MASK 0xf
+#define SND_RME_CLK_SYNC_SHIFT 6
+#define SND_RME_CLK_SYNC_MASK 0x3
+#define SND_RME_CLK_FREQMUL_SHIFT 18
+#define SND_RME_CLK_FREQMUL_MASK 0x7
+#define SND_RME_CLK_SYSTEM(x) \
+ ((x >> SND_RME_CLK_SYSTEM_SHIFT) & SND_RME_CLK_SYSTEM_MASK)
+#define SND_RME_CLK_AES(x) \
+ ((x >> SND_RME_CLK_AES_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK)
+#define SND_RME_CLK_SPDIF(x) \
+ ((x >> SND_RME_CLK_SPDIF_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK)
+#define SND_RME_CLK_SYNC(x) \
+ ((x >> SND_RME_CLK_SYNC_SHIFT) & SND_RME_CLK_SYNC_MASK)
+#define SND_RME_CLK_FREQMUL(x) \
+ ((x >> SND_RME_CLK_FREQMUL_SHIFT) & SND_RME_CLK_FREQMUL_MASK)
+#define SND_RME_CLK_AES_LOCK 0x1
+#define SND_RME_CLK_AES_SYNC 0x4
+#define SND_RME_CLK_SPDIF_LOCK 0x2
+#define SND_RME_CLK_SPDIF_SYNC 0x8
+#define SND_RME_SPDIF_IF_SHIFT 4
+#define SND_RME_SPDIF_FORMAT_SHIFT 5
+#define SND_RME_BINARY_MASK 0x1
+#define SND_RME_SPDIF_IF(x) \
+ ((x >> SND_RME_SPDIF_IF_SHIFT) & SND_RME_BINARY_MASK)
+#define SND_RME_SPDIF_FORMAT(x) \
+ ((x >> SND_RME_SPDIF_FORMAT_SHIFT) & SND_RME_BINARY_MASK)
+
+static const u32 snd_rme_rate_table[] = {
+ 32000, 44100, 48000, 50000,
+ 64000, 88200, 96000, 100000,
+ 128000, 176400, 192000, 200000,
+ 256000, 352800, 384000, 400000,
+ 512000, 705600, 768000, 800000
+};
+/* maximum number of items for AES and S/PDIF rates for above table */
+#define SND_RME_RATE_IDX_AES_SPDIF_NUM 12
+
+enum snd_rme_domain {
+ SND_RME_DOMAIN_SYSTEM,
+ SND_RME_DOMAIN_AES,
+ SND_RME_DOMAIN_SPDIF
+};
+
+enum snd_rme_clock_status {
+ SND_RME_CLOCK_NOLOCK,
+ SND_RME_CLOCK_LOCK,
+ SND_RME_CLOCK_SYNC
+};
+
+static int snd_rme_read_value(struct snd_usb_audio *chip,
+ unsigned int item,
+ u32 *value)
+{
+ struct usb_device *dev = chip->dev;
+ int err;
+
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+ item,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, 0,
+ value, sizeof(*value));
+ if (err < 0)
+ dev_err(&dev->dev,
+ "unable to issue vendor read request %d (ret = %d)",
+ item, err);
+ return err;
+}
+
+static int snd_rme_get_status1(struct snd_kcontrol *kcontrol,
+ u32 *status1)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ struct snd_usb_audio *chip = list->mixer->chip;
+ int err;
+
+ err = snd_usb_lock_shutdown(chip);
+ if (err < 0)
+ return err;
+ err = snd_rme_read_value(chip, SND_RME_GET_STATUS1, status1);
+ snd_usb_unlock_shutdown(chip);
+ return err;
+}
+
+static int snd_rme_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 status1;
+ u32 rate = 0;
+ int idx;
+ int err;
+
+ err = snd_rme_get_status1(kcontrol, &status1);
+ if (err < 0)
+ return err;
+ switch (kcontrol->private_value) {
+ case SND_RME_DOMAIN_SYSTEM:
+ idx = SND_RME_CLK_SYSTEM(status1);
+ if (idx < ARRAY_SIZE(snd_rme_rate_table))
+ rate = snd_rme_rate_table[idx];
+ break;
+ case SND_RME_DOMAIN_AES:
+ idx = SND_RME_CLK_AES(status1);
+ if (idx < SND_RME_RATE_IDX_AES_SPDIF_NUM)
+ rate = snd_rme_rate_table[idx];
+ break;
+ case SND_RME_DOMAIN_SPDIF:
+ idx = SND_RME_CLK_SPDIF(status1);
+ if (idx < SND_RME_RATE_IDX_AES_SPDIF_NUM)
+ rate = snd_rme_rate_table[idx];
+ break;
+ default:
+ return -EINVAL;
+ }
+ ucontrol->value.integer.value[0] = rate;
+ return 0;
+}
+
+static int snd_rme_sync_state_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 status1;
+ int idx = SND_RME_CLOCK_NOLOCK;
+ int err;
+
+ err = snd_rme_get_status1(kcontrol, &status1);
+ if (err < 0)
+ return err;
+ switch (kcontrol->private_value) {
+ case SND_RME_DOMAIN_AES: /* AES */
+ if (status1 & SND_RME_CLK_AES_SYNC)
+ idx = SND_RME_CLOCK_SYNC;
+ else if (status1 & SND_RME_CLK_AES_LOCK)
+ idx = SND_RME_CLOCK_LOCK;
+ break;
+ case SND_RME_DOMAIN_SPDIF: /* SPDIF */
+ if (status1 & SND_RME_CLK_SPDIF_SYNC)
+ idx = SND_RME_CLOCK_SYNC;
+ else if (status1 & SND_RME_CLK_SPDIF_LOCK)
+ idx = SND_RME_CLOCK_LOCK;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ucontrol->value.enumerated.item[0] = idx;
+ return 0;
+}
+
+static int snd_rme_spdif_if_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 status1;
+ int err;
+
+ err = snd_rme_get_status1(kcontrol, &status1);
+ if (err < 0)
+ return err;
+ ucontrol->value.enumerated.item[0] = SND_RME_SPDIF_IF(status1);
+ return 0;
+}
+
+static int snd_rme_spdif_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 status1;
+ int err;
+
+ err = snd_rme_get_status1(kcontrol, &status1);
+ if (err < 0)
+ return err;
+ ucontrol->value.enumerated.item[0] = SND_RME_SPDIF_FORMAT(status1);
+ return 0;
+}
+
+static int snd_rme_sync_source_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 status1;
+ int err;
+
+ err = snd_rme_get_status1(kcontrol, &status1);
+ if (err < 0)
+ return err;
+ ucontrol->value.enumerated.item[0] = SND_RME_CLK_SYNC(status1);
+ return 0;
+}
+
+static int snd_rme_current_freq_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ struct snd_usb_audio *chip = list->mixer->chip;
+ u32 status1;
+ const u64 num = 104857600000000ULL;
+ u32 den;
+ unsigned int freq;
+ int err;
+
+ err = snd_usb_lock_shutdown(chip);
+ if (err < 0)
+ return err;
+ err = snd_rme_read_value(chip, SND_RME_GET_STATUS1, &status1);
+ if (err < 0)
+ goto end;
+ err = snd_rme_read_value(chip, SND_RME_GET_CURRENT_FREQ, &den);
+ if (err < 0)
+ goto end;
+ freq = (den == 0) ? 0 : div64_u64(num, den);
+ freq <<= SND_RME_CLK_FREQMUL(status1);
+ ucontrol->value.integer.value[0] = freq;
+
+end:
+ snd_usb_unlock_shutdown(chip);
+ return err;
+}
+
+static int snd_rme_rate_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ switch (kcontrol->private_value) {
+ case SND_RME_DOMAIN_SYSTEM:
+ uinfo->value.integer.min = 32000;
+ uinfo->value.integer.max = 800000;
+ break;
+ case SND_RME_DOMAIN_AES:
+ case SND_RME_DOMAIN_SPDIF:
+ default:
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 200000;
+ }
+ uinfo->value.integer.step = 0;
+ return 0;
+}
+
+static int snd_rme_sync_state_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const sync_states[] = {
+ "No Lock", "Lock", "Sync"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1,
+ ARRAY_SIZE(sync_states), sync_states);
+}
+
+static int snd_rme_spdif_if_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const spdif_if[] = {
+ "Coaxial", "Optical"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1,
+ ARRAY_SIZE(spdif_if), spdif_if);
+}
+
+static int snd_rme_spdif_format_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const optical_type[] = {
+ "Consumer", "Professional"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1,
+ ARRAY_SIZE(optical_type), optical_type);
+}
+
+static int snd_rme_sync_source_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const sync_sources[] = {
+ "Internal", "AES", "SPDIF", "Internal"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1,
+ ARRAY_SIZE(sync_sources), sync_sources);
+}
+
+static struct snd_kcontrol_new snd_rme_controls[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "AES Rate",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_rate_info,
+ .get = snd_rme_rate_get,
+ .private_value = SND_RME_DOMAIN_AES
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "AES Sync",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_sync_state_info,
+ .get = snd_rme_sync_state_get,
+ .private_value = SND_RME_DOMAIN_AES
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "SPDIF Rate",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_rate_info,
+ .get = snd_rme_rate_get,
+ .private_value = SND_RME_DOMAIN_SPDIF
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "SPDIF Sync",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_sync_state_info,
+ .get = snd_rme_sync_state_get,
+ .private_value = SND_RME_DOMAIN_SPDIF
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "SPDIF Interface",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_spdif_if_info,
+ .get = snd_rme_spdif_if_get,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "SPDIF Format",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_spdif_format_info,
+ .get = snd_rme_spdif_format_get,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Sync Source",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_sync_source_info,
+ .get = snd_rme_sync_source_get
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "System Rate",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_rate_info,
+ .get = snd_rme_rate_get,
+ .private_value = SND_RME_DOMAIN_SYSTEM
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Current Frequency",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_rate_info,
+ .get = snd_rme_current_freq_get
+ }
+};
+
+static int snd_rme_controls_create(struct usb_mixer_interface *mixer)
+{
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(snd_rme_controls); ++i) {
+ err = add_single_ctl_with_resume(mixer, 0,
+ NULL,
+ &snd_rme_controls[i],
+ NULL);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
{
int err = 0;
@@ -1904,6 +2279,12 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
case USB_ID(0x0bda, 0x4014): /* Dell WD15 dock */
err = dell_dock_mixer_init(mixer);
break;
+
+ case USB_ID(0x2a39, 0x3fd2): /* RME ADI-2 Pro */
+ case USB_ID(0x2a39, 0x3fd3): /* RME ADI-2 DAC */
+ case USB_ID(0x2a39, 0x3fd4): /* RME */
+ err = snd_rme_controls_create(mixer);
+ break;
}
return err;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 08aa78007020..849953e5775c 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3346,19 +3346,14 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.ifnum = 0,
.type = QUIRK_AUDIO_STANDARD_MIXER,
},
- /* Capture */
- {
- .ifnum = 1,
- .type = QUIRK_IGNORE_INTERFACE,
- },
/* Playback */
{
- .ifnum = 2,
+ .ifnum = 1,
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
.data = &(const struct audioformat) {
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels = 2,
- .iface = 2,
+ .iface = 1,
.altsetting = 1,
.altset_idx = 1,
.attributes = UAC_EP_CS_ATTR_FILL_MAX |
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index fa7dca5a68c8..83d76c345940 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -30,7 +30,6 @@
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
-#include <asm/set_memory.h>
#include <sound/core.h>
#include <sound/asoundef.h>
#include <sound/pcm.h>
@@ -1141,8 +1140,7 @@ static int had_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_intelhad *intelhaddata;
- unsigned long addr;
- int pages, buf_size, retval;
+ int buf_size, retval;
intelhaddata = snd_pcm_substream_chip(substream);
buf_size = params_buffer_bytes(hw_params);
@@ -1151,17 +1149,6 @@ static int had_pcm_hw_params(struct snd_pcm_substream *substream,
return retval;
dev_dbg(intelhaddata->dev, "%s:allocated memory = %d\n",
__func__, buf_size);
- /* mark the pages as uncached region */
- addr = (unsigned long) substream->runtime->dma_area;
- pages = (substream->runtime->dma_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
- retval = set_memory_uc(addr, pages);
- if (retval) {
- dev_err(intelhaddata->dev, "set_memory_uc failed.Error:%d\n",
- retval);
- return retval;
- }
- memset(substream->runtime->dma_area, 0, buf_size);
-
return retval;
}
@@ -1171,21 +1158,11 @@ static int had_pcm_hw_params(struct snd_pcm_substream *substream,
static int had_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_intelhad *intelhaddata;
- unsigned long addr;
- u32 pages;
intelhaddata = snd_pcm_substream_chip(substream);
had_do_reset(intelhaddata);
- /* mark back the pages as cached/writeback region before the free */
- if (substream->runtime->dma_area != NULL) {
- addr = (unsigned long) substream->runtime->dma_area;
- pages = (substream->runtime->dma_bytes + PAGE_SIZE - 1) /
- PAGE_SIZE;
- set_memory_wb(addr, pages);
- return snd_pcm_lib_free_pages(substream);
- }
- return 0;
+ return snd_pcm_lib_free_pages(substream);
}
/*
@@ -1860,7 +1837,7 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
* try to allocate 600k buffer as default which is large enough
*/
snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_DEV, NULL,
+ SNDRV_DMA_TYPE_DEV_UC, NULL,
HAD_DEFAULT_BUFFER, HAD_MAX_BUFFER);
/* create controls */
diff --git a/sound/xen/xen_snd_front_alsa.c b/sound/xen/xen_snd_front_alsa.c
index 129180e17db1..2cbd9679aca1 100644
--- a/sound/xen/xen_snd_front_alsa.c
+++ b/sound/xen/xen_snd_front_alsa.c
@@ -637,31 +637,31 @@ static int alsa_pb_fill_silence(struct snd_pcm_substream *substream,
* to know when the buffer can be transferred to the backend.
*/
-static struct snd_pcm_ops snd_drv_alsa_playback_ops = {
- .open = alsa_open,
- .close = alsa_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = alsa_hw_params,
- .hw_free = alsa_hw_free,
- .prepare = alsa_prepare,
- .trigger = alsa_trigger,
- .pointer = alsa_pointer,
- .copy_user = alsa_pb_copy_user,
- .copy_kernel = alsa_pb_copy_kernel,
- .fill_silence = alsa_pb_fill_silence,
+static const struct snd_pcm_ops snd_drv_alsa_playback_ops = {
+ .open = alsa_open,
+ .close = alsa_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = alsa_hw_params,
+ .hw_free = alsa_hw_free,
+ .prepare = alsa_prepare,
+ .trigger = alsa_trigger,
+ .pointer = alsa_pointer,
+ .copy_user = alsa_pb_copy_user,
+ .copy_kernel = alsa_pb_copy_kernel,
+ .fill_silence = alsa_pb_fill_silence,
};
-static struct snd_pcm_ops snd_drv_alsa_capture_ops = {
- .open = alsa_open,
- .close = alsa_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = alsa_hw_params,
- .hw_free = alsa_hw_free,
- .prepare = alsa_prepare,
- .trigger = alsa_trigger,
- .pointer = alsa_pointer,
- .copy_user = alsa_cap_copy_user,
- .copy_kernel = alsa_cap_copy_kernel,
+static const struct snd_pcm_ops snd_drv_alsa_capture_ops = {
+ .open = alsa_open,
+ .close = alsa_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = alsa_hw_params,
+ .hw_free = alsa_hw_free,
+ .prepare = alsa_prepare,
+ .trigger = alsa_trigger,
+ .pointer = alsa_pointer,
+ .copy_user = alsa_cap_copy_user,
+ .copy_kernel = alsa_cap_copy_kernel,
};
static int new_pcm_instance(struct xen_snd_front_card_info *card_info,
diff --git a/tools/Makefile b/tools/Makefile
index be02c8b904db..abb358a70ad0 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -21,6 +21,7 @@ help:
@echo ' leds - LEDs tools'
@echo ' liblockdep - user-space wrapper for kernel locking-validator'
@echo ' bpf - misc BPF tools'
+ @echo ' pci - PCI tools'
@echo ' perf - Linux performance measurement and analysis tool'
@echo ' selftests - various kernel selftests'
@echo ' spi - spi tools'
@@ -59,7 +60,7 @@ acpi: FORCE
cpupower: FORCE
$(call descend,power/$@)
-cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi: FORCE
+cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci: FORCE
$(call descend,$@)
liblockdep: FORCE
@@ -94,7 +95,7 @@ kvm_stat: FORCE
all: acpi cgroup cpupower gpio hv firewire liblockdep \
perf selftests spi turbostat usb \
virtio vm bpf x86_energy_perf_policy \
- tmon freefall iio objtool kvm_stat wmi
+ tmon freefall iio objtool kvm_stat wmi pci
acpi_install:
$(call descend,power/$(@:_install=),install)
@@ -102,7 +103,7 @@ acpi_install:
cpupower_install:
$(call descend,power/$(@:_install=),install)
-cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install:
+cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install:
$(call descend,$(@:_install=),install)
liblockdep_install:
@@ -128,7 +129,7 @@ install: acpi_install cgroup_install cpupower_install gpio_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install vm_install bpf_install x86_energy_perf_policy_install \
tmon_install freefall_install objtool_install kvm_stat_install \
- wmi_install
+ wmi_install pci_install
acpi_clean:
$(call descend,power/acpi,clean)
@@ -136,7 +137,7 @@ acpi_clean:
cpupower_clean:
$(call descend,power/cpupower,clean)
-cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean:
+cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean:
$(call descend,$(@:_clean=),clean)
liblockdep_clean:
@@ -174,6 +175,6 @@ clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
- gpio_clean objtool_clean leds_clean wmi_clean
+ gpio_clean objtool_clean leds_clean wmi_clean pci_clean
.PHONY: FORCE
diff --git a/tools/arch/arm64/include/asm/barrier.h b/tools/arch/arm64/include/asm/barrier.h
index 40bde6b23501..12835ea0e417 100644
--- a/tools/arch/arm64/include/asm/barrier.h
+++ b/tools/arch/arm64/include/asm/barrier.h
@@ -14,4 +14,74 @@
#define wmb() asm volatile("dmb ishst" ::: "memory")
#define rmb() asm volatile("dmb ishld" ::: "memory")
+#define smp_store_release(p, v) \
+do { \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(*p)) (v) }; \
+ \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("stlrb %w1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u8 *)__u.__c) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("stlrh %w1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u16 *)__u.__c) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm volatile ("stlr %w1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u32 *)__u.__c) \
+ : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("stlr %1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u64 *)__u.__c) \
+ : "memory"); \
+ break; \
+ default: \
+ /* Only to shut up gcc ... */ \
+ mb(); \
+ break; \
+ } \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ union { typeof(*p) __val; char __c[1]; } __u; \
+ \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("ldarb %w0, %1" \
+ : "=r" (*(__u8 *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("ldarh %w0, %1" \
+ : "=r" (*(__u16 *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 4: \
+ asm volatile ("ldar %w0, %1" \
+ : "=r" (*(__u32 *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("ldar %0, %1" \
+ : "=r" (*(__u64 *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ default: \
+ /* Only to shut up gcc ... */ \
+ mb(); \
+ break; \
+ } \
+ __u.__val; \
+})
+
#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
diff --git a/tools/arch/ia64/include/asm/barrier.h b/tools/arch/ia64/include/asm/barrier.h
index d808ee0e77b5..4d471d9511a5 100644
--- a/tools/arch/ia64/include/asm/barrier.h
+++ b/tools/arch/ia64/include/asm/barrier.h
@@ -46,4 +46,17 @@
#define rmb() mb()
#define wmb() mb()
+#define smp_store_release(p, v) \
+do { \
+ barrier(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ barrier(); \
+ ___p1; \
+})
+
#endif /* _TOOLS_LINUX_ASM_IA64_BARRIER_H */
diff --git a/tools/arch/powerpc/include/asm/barrier.h b/tools/arch/powerpc/include/asm/barrier.h
index a634da05bc97..905a2c66d96d 100644
--- a/tools/arch/powerpc/include/asm/barrier.h
+++ b/tools/arch/powerpc/include/asm/barrier.h
@@ -27,4 +27,20 @@
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
+#if defined(__powerpc64__)
+#define smp_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory")
+
+#define smp_store_release(p, v) \
+do { \
+ smp_lwsync(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ smp_lwsync(); \
+ ___p1; \
+})
+#endif /* defined(__powerpc64__) */
#endif /* _TOOLS_LINUX_ASM_POWERPC_BARRIER_H */
diff --git a/tools/arch/s390/include/asm/barrier.h b/tools/arch/s390/include/asm/barrier.h
index 5030c99f47d2..de362fa664d4 100644
--- a/tools/arch/s390/include/asm/barrier.h
+++ b/tools/arch/s390/include/asm/barrier.h
@@ -28,4 +28,17 @@
#define rmb() mb()
#define wmb() mb()
+#define smp_store_release(p, v) \
+do { \
+ barrier(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ barrier(); \
+ ___p1; \
+})
+
#endif /* __TOOLS_LIB_ASM_BARRIER_H */
diff --git a/tools/arch/sparc/include/asm/barrier_64.h b/tools/arch/sparc/include/asm/barrier_64.h
index ba61344287d5..cfb0fdc8ccf0 100644
--- a/tools/arch/sparc/include/asm/barrier_64.h
+++ b/tools/arch/sparc/include/asm/barrier_64.h
@@ -40,4 +40,17 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define rmb() __asm__ __volatile__("":::"memory")
#define wmb() __asm__ __volatile__("":::"memory")
+#define smp_store_release(p, v) \
+do { \
+ barrier(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ barrier(); \
+ ___p1; \
+})
+
#endif /* !(__TOOLS_LINUX_SPARC64_BARRIER_H) */
diff --git a/tools/arch/x86/include/asm/barrier.h b/tools/arch/x86/include/asm/barrier.h
index 8774dee27471..58919868473c 100644
--- a/tools/arch/x86/include/asm/barrier.h
+++ b/tools/arch/x86/include/asm/barrier.h
@@ -26,4 +26,18 @@
#define wmb() asm volatile("sfence" ::: "memory")
#endif
+#if defined(__x86_64__)
+#define smp_store_release(p, v) \
+do { \
+ barrier(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ barrier(); \
+ ___p1; \
+})
+#endif /* defined(__x86_64__) */
#endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 86299efa804a..8a6eff9c27f3 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -288,6 +288,7 @@ struct kvm_reinject_control {
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
#define KVM_VCPUEVENT_VALID_SMM 0x00000008
+#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010
/* Interrupt shadow states */
#define KVM_X86_SHADOW_INT_MOV_SS 0x01
@@ -299,7 +300,10 @@ struct kvm_vcpu_events {
__u8 injected;
__u8 nr;
__u8 has_error_code;
- __u8 pad;
+ union {
+ __u8 pad;
+ __u8 pending;
+ };
__u32 error_code;
} exception;
struct {
@@ -322,7 +326,9 @@ struct kvm_vcpu_events {
__u8 smm_inside_nmi;
__u8 latched_init;
} smi;
- __u32 reserved[9];
+ __u8 reserved[27];
+ __u8 exception_has_payload;
+ __u64 exception_payload;
};
/* for KVM_GET/SET_DEBUGREGS */
@@ -377,6 +383,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index a6258bc8ec4f..f55a2daed59b 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -15,13 +15,15 @@ SYNOPSIS
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
*COMMANDS* :=
- { **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete**
- | **pin** | **help** }
+ { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext**
+ | **delete** | **pin** | **help** }
MAP COMMANDS
=============
| **bpftool** **map { show | list }** [*MAP*]
+| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \
+| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
| **bpftool** **map dump** *MAP*
| **bpftool** **map update** *MAP* **key** *DATA* **value** *VALUE* [*UPDATE_FLAGS*]
| **bpftool** **map lookup** *MAP* **key** *DATA*
@@ -36,6 +38,11 @@ MAP COMMANDS
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
| *VALUE* := { *DATA* | *MAP* | *PROG* }
| *UPDATE_FLAGS* := { **any** | **exist** | **noexist** }
+| *TYPE* := { **hash** | **array** | **prog_array** | **perf_event_array** | **percpu_hash**
+| | **percpu_array** | **stack_trace** | **cgroup_array** | **lru_hash**
+| | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps**
+| | **devmap** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
+| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage** }
DESCRIPTION
===========
@@ -47,6 +54,10 @@ DESCRIPTION
Output will start with map ID followed by map type and
zero or more named attributes (depending on kernel version).
+ **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
+ Create a new map with given parameters and pin it to *bpffs*
+ as *FILE*.
+
**bpftool map dump** *MAP*
Dump all entries in a given *MAP*.
@@ -75,7 +86,9 @@ DESCRIPTION
**bpftool map pin** *MAP* *FILE*
Pin map *MAP* as *FILE*.
- Note: *FILE* must be located in *bpffs* mount.
+ Note: *FILE* must be located in *bpffs* mount. It must not
+ contain a dot character ('.'), which is reserved for future
+ extensions of *bpffs*.
**bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*]
Read events from a BPF_MAP_TYPE_PERF_EVENT_ARRAY map.
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
new file mode 100644
index 000000000000..408ec30d8872
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -0,0 +1,139 @@
+================
+bpftool-net
+================
+-------------------------------------------------------------------------------
+tool for inspection of netdev/tc related bpf prog attachments
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **net** *COMMAND*
+
+ *OPTIONS* := { [{ **-j** | **--json** }] [{ **-p** | **--pretty** }] }
+
+ *COMMANDS* :=
+ { **show** | **list** } [ **dev** name ] | **help**
+
+NET COMMANDS
+============
+
+| **bpftool** **net { show | list } [ dev name ]**
+| **bpftool** **net help**
+
+DESCRIPTION
+===========
+ **bpftool net { show | list } [ dev name ]**
+ List bpf program attachments in the kernel networking subsystem.
+
+ Currently, only device driver xdp attachments and tc filter
+ classification/action attachments are implemented, i.e., for
+ program types **BPF_PROG_TYPE_SCHED_CLS**,
+ **BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**.
+ For programs attached to a particular cgroup, e.g.,
+ **BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**,
+ **BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
+ users can use **bpftool cgroup** to dump cgroup attachments.
+ For sk_{filter, skb, msg, reuseport} and lwt/seg6
+ bpf programs, users should consult other tools, e.g., iproute2.
+
+ The current output will start with all xdp program attachments, followed by
+ all tc class/qdisc bpf program attachments. Both xdp programs and
+ tc programs are ordered based on ifindex number. If multiple bpf
+ programs attached to the same networking device through **tc filter**,
+ the order will be first all bpf programs attached to tc classes, then
+ all bpf programs attached to non clsact qdiscs, and finally all
+ bpf programs attached to root and clsact qdisc.
+
+ **bpftool net help**
+ Print short help message.
+
+OPTIONS
+=======
+ -h, --help
+ Print short generic help message (similar to **bpftool help**).
+
+ -v, --version
+ Print version number (similar to **bpftool version**).
+
+ -j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+ -p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+EXAMPLES
+========
+
+| **# bpftool net**
+
+::
+
+ xdp:
+ eth0(2) driver id 198
+
+ tc:
+ eth0(2) htb name prefix_matcher.o:[cls_prefix_matcher_htb] id 111727 act []
+ eth0(2) clsact/ingress fbflow_icmp id 130246 act []
+ eth0(2) clsact/egress prefix_matcher.o:[cls_prefix_matcher_clsact] id 111726
+ eth0(2) clsact/egress cls_fg_dscp id 108619 act []
+ eth0(2) clsact/egress fbflow_egress id 130245
+
+|
+| **# bpftool -jp net**
+
+::
+
+ [{
+ "xdp": [{
+ "devname": "eth0",
+ "ifindex": 2,
+ "mode": "driver",
+ "id": 198
+ }
+ ],
+ "tc": [{
+ "devname": "eth0",
+ "ifindex": 2,
+ "kind": "htb",
+ "name": "prefix_matcher.o:[cls_prefix_matcher_htb]",
+ "id": 111727,
+ "act": []
+ },{
+ "devname": "eth0",
+ "ifindex": 2,
+ "kind": "clsact/ingress",
+ "name": "fbflow_icmp",
+ "id": 130246,
+ "act": []
+ },{
+ "devname": "eth0",
+ "ifindex": 2,
+ "kind": "clsact/egress",
+ "name": "prefix_matcher.o:[cls_prefix_matcher_clsact]",
+ "id": 111726,
+ },{
+ "devname": "eth0",
+ "ifindex": 2,
+ "kind": "clsact/egress",
+ "name": "cls_fg_dscp",
+ "id": 108619,
+ "act": []
+ },{
+ "devname": "eth0",
+ "ifindex": 2,
+ "kind": "clsact/egress",
+ "name": "fbflow_egress",
+ "id": 130245,
+ }
+ ]
+ }
+ ]
+
+
+SEE ALSO
+========
+ **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 64156a16d530..ac4e904b10fb 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -25,6 +25,8 @@ MAP COMMANDS
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}]
| **bpftool** **prog pin** *PROG* *FILE*
| **bpftool** **prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
+| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* *MAP*
+| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* *MAP*
| **bpftool** **prog help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
@@ -37,6 +39,7 @@ MAP COMMANDS
| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6**
| }
+| *ATTACH_TYPE* := { **msg_verdict** | **skb_verdict** | **skb_parse** }
DESCRIPTION
@@ -72,7 +75,9 @@ DESCRIPTION
**bpftool prog pin** *PROG* *FILE*
Pin program *PROG* as *FILE*.
- Note: *FILE* must be located in *bpffs* mount.
+ Note: *FILE* must be located in *bpffs* mount. It must not
+ contain a dot character ('.'), which is reserved for future
+ extensions of *bpffs*.
**bpftool prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
Load bpf program from binary *OBJ* and pin as *FILE*.
@@ -88,7 +93,17 @@ DESCRIPTION
If **dev** *NAME* is specified program will be loaded onto
given networking device (offload).
- Note: *FILE* must be located in *bpffs* mount.
+ Note: *FILE* must be located in *bpffs* mount. It must not
+ contain a dot character ('.'), which is reserved for future
+ extensions of *bpffs*.
+
+ **bpftool prog attach** *PROG* *ATTACH_TYPE* *MAP*
+ Attach bpf program *PROG* (with type specified by *ATTACH_TYPE*)
+ to the map *MAP*.
+
+ **bpftool prog detach** *PROG* *ATTACH_TYPE* *MAP*
+ Detach bpf program *PROG* (with type specified by *ATTACH_TYPE*)
+ from the map *MAP*.
**bpftool prog help**
Print short help message.
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index b6f5d560460d..04cd4f92ab89 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -16,22 +16,24 @@ SYNOPSIS
**bpftool** **version**
- *OBJECT* := { **map** | **program** | **cgroup** | **perf** }
+ *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** }
*OPTIONS* := { { **-V** | **--version** } | { **-h** | **--help** }
| { **-j** | **--json** } [{ **-p** | **--pretty** }] }
*MAP-COMMANDS* :=
- { **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete**
- | **pin** | **event_pipe** | **help** }
+ { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext**
+ | **delete** | **pin** | **event_pipe** | **help** }
*PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin**
- | **load** | **help** }
+ | **load** | **attach** | **detach** | **help** }
*CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** }
*PERF-COMMANDS* := { **show** | **list** | **help** }
+ *NET-COMMANDS* := { **show** | **list** | **help** }
+
DESCRIPTION
===========
*bpftool* allows for inspection and simple modification of BPF objects
@@ -55,7 +57,11 @@ OPTIONS
-p, --pretty
Generate human-readable JSON output. Implies **-j**.
+ -m, --mapcompat
+ Allow loading maps with unknown map definitions.
+
+
SEE ALSO
========
**bpftool-map**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
- **bpftool-perf**\ (8)
+ **bpftool-perf**\ (8), **bpftool-net**\ (8)
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 74288a2197ab..dac7eff4c7e5 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -46,6 +46,13 @@ CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \
-I$(srctree)/tools/lib/bpf \
-I$(srctree)/tools/perf
CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"'
+ifneq ($(EXTRA_CFLAGS),)
+CFLAGS += $(EXTRA_CFLAGS)
+endif
+ifneq ($(EXTRA_LDFLAGS),)
+LDFLAGS += $(EXTRA_LDFLAGS)
+endif
+
LIBS = -lelf -lbfd -lopcodes $(LIBBPF)
INSTALL ?= install
@@ -90,7 +97,7 @@ $(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
- $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^ $(LIBS)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $^ $(LIBS)
$(OUTPUT)%.o: %.c
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 598066c40191..3f78e6404589 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -143,7 +143,7 @@ _bpftool_map_update_map_type()
local type
type=$(bpftool -jp map show $keyword $ref | \
command sed -n 's/.*"type": "\(.*\)",$/\1/p')
- printf $type
+ [[ -n $type ]] && printf $type
}
_bpftool_map_update_get_id()
@@ -184,7 +184,7 @@ _bpftool()
# Deal with options
if [[ ${words[cword]} == -* ]]; then
- local c='--version --json --pretty --bpffs'
+ local c='--version --json --pretty --bpffs --mapcompat'
COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
return 0
fi
@@ -292,6 +292,23 @@ _bpftool()
fi
return 0
;;
+ attach|detach)
+ if [[ ${#words[@]} == 7 ]]; then
+ COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) )
+ return 0
+ fi
+
+ if [[ ${#words[@]} == 6 ]]; then
+ COMPREPLY=( $( compgen -W "msg_verdict skb_verdict skb_parse" -- "$cur" ) )
+ return 0
+ fi
+
+ if [[ $prev == "$command" ]]; then
+ COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) )
+ return 0
+ fi
+ return 0
+ ;;
load)
local obj
@@ -347,7 +364,7 @@ _bpftool()
;;
*)
[[ $prev == $object ]] && \
- COMPREPLY=( $( compgen -W 'dump help pin load \
+ COMPREPLY=( $( compgen -W 'dump help pin attach detach load \
show list' -- "$cur" ) )
;;
esac
@@ -370,6 +387,42 @@ _bpftool()
;;
esac
;;
+ create)
+ case $prev in
+ $command)
+ _filedir
+ return 0
+ ;;
+ type)
+ COMPREPLY=( $( compgen -W 'hash array prog_array \
+ perf_event_array percpu_hash percpu_array \
+ stack_trace cgroup_array lru_hash \
+ lru_percpu_hash lpm_trie array_of_maps \
+ hash_of_maps devmap sockmap cpumap xskmap \
+ sockhash cgroup_storage reuseport_sockarray \
+ percpu_cgroup_storage' -- \
+ "$cur" ) )
+ return 0
+ ;;
+ key|value|flags|name|entries)
+ return 0
+ ;;
+ dev)
+ _sysfs_get_netdevs
+ return 0
+ ;;
+ *)
+ _bpftool_once_attr 'type'
+ _bpftool_once_attr 'key'
+ _bpftool_once_attr 'value'
+ _bpftool_once_attr 'entries'
+ _bpftool_once_attr 'name'
+ _bpftool_once_attr 'flags'
+ _bpftool_once_attr 'dev'
+ return 0
+ ;;
+ esac
+ ;;
lookup|getnext|delete)
case $prev in
$command)
@@ -483,7 +536,7 @@ _bpftool()
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'delete dump getnext help \
- lookup pin event_pipe show list update' -- \
+ lookup pin event_pipe show list update create' -- \
"$cur" ) )
;;
esac
@@ -494,10 +547,10 @@ _bpftool()
_filedir
return 0
;;
- tree)
- _filedir
- return 0
- ;;
+ tree)
+ _filedir
+ return 0
+ ;;
attach|detach)
local ATTACH_TYPES='ingress egress sock_create sock_ops \
device bind4 bind6 post_bind4 post_bind6 connect4 \
@@ -552,6 +605,15 @@ _bpftool()
;;
esac
;;
+ net)
+ case $command in
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help \
+ show list' -- "$cur" ) )
+ ;;
+ esac
+ ;;
esac
} &&
complete -F _bpftool bpftool
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index b3a0709ea7ed..25af85304ebe 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -554,7 +554,9 @@ static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
return read_sysfs_hex_int(full_path);
}
-const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino)
+const char *
+ifindex_to_bfd_params(__u32 ifindex, __u64 ns_dev, __u64 ns_ino,
+ const char **opt)
{
char devname[IF_NAMESIZE];
int vendor_id;
@@ -579,6 +581,7 @@ const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino)
device_id != 0x6000 &&
device_id != 0x6003)
p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
+ *opt = "ctx4";
return "NFP-6xxx";
default:
p_err("Can't get bfd arch name for device vendor id 0x%04x",
@@ -618,3 +621,24 @@ void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
jsonw_string_field(json_wtr, "ifname", name);
jsonw_end_object(json_wtr);
}
+
+int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what)
+{
+ char *endptr;
+
+ NEXT_ARGP();
+
+ if (*val) {
+ p_err("%s already specified", what);
+ return -1;
+ }
+
+ *val = strtoul(**argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as %s", **argv, what);
+ return -1;
+ }
+ NEXT_ARGP();
+
+ return 0;
+}
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index 87439320ef70..c75ffd9ce2bb 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -77,7 +77,7 @@ static int fprintf_json(void *out, const char *fmt, ...)
}
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
- const char *arch)
+ const char *arch, const char *disassembler_options)
{
disassembler_ftype disassemble;
struct disassemble_info info;
@@ -116,6 +116,8 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
info.arch = bfd_get_arch(bfdf);
info.mach = bfd_get_mach(bfdf);
+ if (disassembler_options)
+ info.disassembler_options = disassembler_options;
info.buffer = image;
info.buffer_length = len;
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index d15a62be6cf0..75a3296dc0bc 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -55,6 +55,7 @@ json_writer_t *json_wtr;
bool pretty_output;
bool json_output;
bool show_pinned;
+int bpf_flags;
struct pinned_obj_table prog_table;
struct pinned_obj_table map_table;
@@ -85,7 +86,7 @@ static int do_help(int argc, char **argv)
" %s batch file FILE\n"
" %s version\n"
"\n"
- " OBJECT := { prog | map | cgroup | perf }\n"
+ " OBJECT := { prog | map | cgroup | perf | net }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, bin_name, bin_name);
@@ -215,6 +216,7 @@ static const struct cmd cmds[] = {
{ "map", do_map },
{ "cgroup", do_cgroup },
{ "perf", do_perf },
+ { "net", do_net },
{ "version", do_version },
{ 0 }
};
@@ -319,7 +321,8 @@ static int do_batch(int argc, char **argv)
p_err("reading batch file failed: %s", strerror(errno));
err = -1;
} else {
- p_info("processed %d commands", lines);
+ if (!json_output)
+ printf("processed %d commands\n", lines);
err = 0;
}
err_close:
@@ -340,6 +343,7 @@ int main(int argc, char **argv)
{ "pretty", no_argument, NULL, 'p' },
{ "version", no_argument, NULL, 'V' },
{ "bpffs", no_argument, NULL, 'f' },
+ { "mapcompat", no_argument, NULL, 'm' },
{ 0 }
};
int opt, ret;
@@ -354,7 +358,7 @@ int main(int argc, char **argv)
hash_init(map_table.table);
opterr = 0;
- while ((opt = getopt_long(argc, argv, "Vhpjf",
+ while ((opt = getopt_long(argc, argv, "Vhpjfm",
options, NULL)) >= 0) {
switch (opt) {
case 'V':
@@ -378,6 +382,9 @@ int main(int argc, char **argv)
case 'f':
show_pinned = true;
break;
+ case 'm':
+ bpf_flags = MAPS_RELAX_COMPAT;
+ break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 238e734d75b3..28322ace2856 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -74,7 +74,7 @@
#define HELP_SPEC_PROGRAM \
"PROG := { id PROG_ID | pinned FILE | tag PROG_TAG }"
#define HELP_SPEC_OPTIONS \
- "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} }"
+ "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} | {-m|--mapcompat}"
#define HELP_SPEC_MAP \
"MAP := { id MAP_ID | pinned FILE }"
@@ -89,6 +89,7 @@ extern const char *bin_name;
extern json_writer_t *json_wtr;
extern bool json_output;
extern bool show_pinned;
+extern int bpf_flags;
extern struct pinned_obj_table prog_table;
extern struct pinned_obj_table map_table;
@@ -136,19 +137,23 @@ int do_map(int argc, char **arg);
int do_event_pipe(int argc, char **argv);
int do_cgroup(int argc, char **arg);
int do_perf(int argc, char **arg);
+int do_net(int argc, char **arg);
+int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
int prog_parse_fd(int *argc, char ***argv);
int map_parse_fd(int *argc, char ***argv);
int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
- const char *arch);
+ const char *arch, const char *disassembler_options);
void print_data_json(uint8_t *data, size_t len);
void print_hex_data_json(uint8_t *data, size_t len);
unsigned int get_page_size(void);
unsigned int get_possible_cpus(void);
-const char *ifindex_to_bfd_name_ns(__u32 ifindex, __u64 ns_dev, __u64 ns_ino);
+const char *
+ifindex_to_bfd_params(__u32 ifindex, __u64 ns_dev, __u64 ns_ino,
+ const char **opt);
struct btf_dumper {
const struct btf *btf;
@@ -165,4 +170,11 @@ struct btf_dumper {
*/
int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
const void *data);
+
+struct nlattr;
+struct ifinfomsg;
+struct tcmsg;
+int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb);
+int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
+ const char *devname, int ifindex);
#endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index b455930a3eaf..7bf38f0e152e 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -36,6 +36,7 @@
#include <fcntl.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <net/if.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
@@ -71,13 +72,16 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_XSKMAP] = "xskmap",
[BPF_MAP_TYPE_SOCKHASH] = "sockhash",
[BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
+ [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
+ [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
};
static bool map_is_per_cpu(__u32 type)
{
return type == BPF_MAP_TYPE_PERCPU_HASH ||
type == BPF_MAP_TYPE_PERCPU_ARRAY ||
- type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
+ type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
+ type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
}
static bool map_is_map_of_maps(__u32 type)
@@ -91,6 +95,17 @@ static bool map_is_map_of_progs(__u32 type)
return type == BPF_MAP_TYPE_PROG_ARRAY;
}
+static int map_type_from_str(const char *type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(map_type_name); i++)
+ /* Don't allow prefixing in case of possible future shadowing */
+ if (map_type_name[i] && !strcmp(map_type_name[i], type))
+ return i;
+ return -1;
+}
+
static void *alloc_value(struct bpf_map_info *info)
{
if (map_is_per_cpu(info->type))
@@ -170,9 +185,28 @@ static int do_dump_btf(const struct btf_dumper *d,
if (ret)
goto err_end_obj;
- jsonw_name(d->jw, "value");
+ if (!map_is_per_cpu(map_info->type)) {
+ jsonw_name(d->jw, "value");
+ ret = btf_dumper_type(d, map_info->btf_value_type_id, value);
+ } else {
+ unsigned int i, n, step;
- ret = btf_dumper_type(d, map_info->btf_value_type_id, value);
+ jsonw_name(d->jw, "values");
+ jsonw_start_array(d->jw);
+ n = get_possible_cpus();
+ step = round_up(map_info->value_size, 8);
+ for (i = 0; i < n; i++) {
+ jsonw_start_object(d->jw);
+ jsonw_int_field(d->jw, "cpu", i);
+ jsonw_name(d->jw, "value");
+ ret = btf_dumper_type(d, map_info->btf_value_type_id,
+ value + i * step);
+ jsonw_end_object(d->jw);
+ if (ret)
+ break;
+ }
+ jsonw_end_array(d->jw);
+ }
err_end_obj:
/* end of key-value pair */
@@ -299,11 +333,40 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
jsonw_end_object(json_wtr);
}
jsonw_end_array(json_wtr);
+ if (btf) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = json_wtr,
+ .is_plain_text = false,
+ };
+
+ jsonw_name(json_wtr, "formatted");
+ do_dump_btf(&d, info, key, value);
+ }
}
jsonw_end_object(json_wtr);
}
+static void print_entry_error(struct bpf_map_info *info, unsigned char *key,
+ const char *value)
+{
+ int value_size = strlen(value);
+ bool single_line, break_names;
+
+ break_names = info->key_size > 16 || value_size > 16;
+ single_line = info->key_size + value_size <= 24 && !break_names;
+
+ printf("key:%c", break_names ? '\n' : ' ');
+ fprint_hex(stdout, key, info->key_size, " ");
+
+ printf(single_line ? " " : "\n");
+
+ printf("value:%c%s", break_names ? '\n' : ' ', value);
+
+ printf("\n");
+}
+
static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
unsigned char *value)
{
@@ -626,6 +689,54 @@ static int do_show(int argc, char **argv)
return errno == ENOENT ? 0 : -1;
}
+static int dump_map_elem(int fd, void *key, void *value,
+ struct bpf_map_info *map_info, struct btf *btf,
+ json_writer_t *btf_wtr)
+{
+ int num_elems = 0;
+ int lookup_errno;
+
+ if (!bpf_map_lookup_elem(fd, key, value)) {
+ if (json_output) {
+ print_entry_json(map_info, key, value, btf);
+ } else {
+ if (btf) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ do_dump_btf(&d, map_info, key, value);
+ } else {
+ print_entry_plain(map_info, key, value);
+ }
+ num_elems++;
+ }
+ return num_elems;
+ }
+
+ /* lookup error handling */
+ lookup_errno = errno;
+
+ if (map_is_map_of_maps(map_info->type) ||
+ map_is_map_of_progs(map_info->type))
+ return 0;
+
+ if (json_output) {
+ jsonw_name(json_wtr, "key");
+ print_hex_data_json(key, map_info->key_size);
+ jsonw_name(json_wtr, "value");
+ jsonw_start_object(json_wtr);
+ jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
+ jsonw_end_object(json_wtr);
+ } else {
+ print_entry_error(map_info, key, strerror(lookup_errno));
+ }
+
+ return 0;
+}
+
static int do_dump(int argc, char **argv)
{
struct bpf_map_info info = {};
@@ -644,12 +755,6 @@ static int do_dump(int argc, char **argv)
if (fd < 0)
return -1;
- if (map_is_map_of_maps(info.type) || map_is_map_of_progs(info.type)) {
- p_err("Dumping maps of maps and program maps not supported");
- close(fd);
- return -1;
- }
-
key = malloc(info.key_size);
value = alloc_value(&info);
if (!key || !value) {
@@ -687,40 +792,8 @@ static int do_dump(int argc, char **argv)
err = 0;
break;
}
-
- if (!bpf_map_lookup_elem(fd, key, value)) {
- if (json_output)
- print_entry_json(&info, key, value, btf);
- else
- if (btf) {
- struct btf_dumper d = {
- .btf = btf,
- .jw = btf_wtr,
- .is_plain_text = true,
- };
-
- do_dump_btf(&d, &info, key, value);
- } else {
- print_entry_plain(&info, key, value);
- }
- } else {
- if (json_output) {
- jsonw_name(json_wtr, "key");
- print_hex_data_json(key, info.key_size);
- jsonw_name(json_wtr, "value");
- jsonw_start_object(json_wtr);
- jsonw_string_field(json_wtr, "error",
- "can't lookup element");
- jsonw_end_object(json_wtr);
- } else {
- p_info("can't lookup element with key: ");
- fprint_hex(stderr, key, info.key_size, " ");
- fprintf(stderr, "\n");
- }
- }
-
+ num_elems += dump_map_elem(fd, key, value, &info, btf, btf_wtr);
prev_key = key;
- num_elems++;
}
if (json_output)
@@ -997,6 +1070,92 @@ static int do_pin(int argc, char **argv)
return err;
}
+static int do_create(int argc, char **argv)
+{
+ struct bpf_create_map_attr attr = { NULL, };
+ const char *pinfile;
+ int err, fd;
+
+ if (!REQ_ARGS(7))
+ return -1;
+ pinfile = GET_ARG();
+
+ while (argc) {
+ if (!REQ_ARGS(2))
+ return -1;
+
+ if (is_prefix(*argv, "type")) {
+ NEXT_ARG();
+
+ if (attr.map_type) {
+ p_err("map type already specified");
+ return -1;
+ }
+
+ attr.map_type = map_type_from_str(*argv);
+ if ((int)attr.map_type < 0) {
+ p_err("unrecognized map type: %s", *argv);
+ return -1;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "name")) {
+ NEXT_ARG();
+ attr.name = GET_ARG();
+ } else if (is_prefix(*argv, "key")) {
+ if (parse_u32_arg(&argc, &argv, &attr.key_size,
+ "key size"))
+ return -1;
+ } else if (is_prefix(*argv, "value")) {
+ if (parse_u32_arg(&argc, &argv, &attr.value_size,
+ "value size"))
+ return -1;
+ } else if (is_prefix(*argv, "entries")) {
+ if (parse_u32_arg(&argc, &argv, &attr.max_entries,
+ "max entries"))
+ return -1;
+ } else if (is_prefix(*argv, "flags")) {
+ if (parse_u32_arg(&argc, &argv, &attr.map_flags,
+ "flags"))
+ return -1;
+ } else if (is_prefix(*argv, "dev")) {
+ NEXT_ARG();
+
+ if (attr.map_ifindex) {
+ p_err("offload device already specified");
+ return -1;
+ }
+
+ attr.map_ifindex = if_nametoindex(*argv);
+ if (!attr.map_ifindex) {
+ p_err("unrecognized netdevice '%s': %s",
+ *argv, strerror(errno));
+ return -1;
+ }
+ NEXT_ARG();
+ }
+ }
+
+ if (!attr.name) {
+ p_err("map name not specified");
+ return -1;
+ }
+
+ fd = bpf_create_map_xattr(&attr);
+ if (fd < 0) {
+ p_err("map create failed: %s", strerror(errno));
+ return -1;
+ }
+
+ err = do_pin_fd(fd, pinfile);
+ close(fd);
+ if (err)
+ return err;
+
+ if (json_output)
+ jsonw_null(json_wtr);
+ return 0;
+}
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -1006,6 +1165,9 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %s %s { show | list } [MAP]\n"
+ " %s %s create FILE type TYPE key KEY_SIZE value VALUE_SIZE \\\n"
+ " entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
+ " [dev NAME]\n"
" %s %s dump MAP\n"
" %s %s update MAP key DATA value VALUE [UPDATE_FLAGS]\n"
" %s %s lookup MAP key DATA\n"
@@ -1020,11 +1182,17 @@ static int do_help(int argc, char **argv)
" " HELP_SPEC_PROGRAM "\n"
" VALUE := { DATA | MAP | PROG }\n"
" UPDATE_FLAGS := { any | exist | noexist }\n"
+ " TYPE := { hash | array | prog_array | perf_event_array | percpu_hash |\n"
+ " percpu_array | stack_trace | cgroup_array | lru_hash |\n"
+ " lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n"
+ " devmap | sockmap | cpumap | xskmap | sockhash |\n"
+ " cgroup_storage | reuseport_sockarray | percpu_cgroup_storage }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
+ bin_name, argv[-2]);
return 0;
}
@@ -1040,6 +1208,7 @@ static const struct cmd cmds[] = {
{ "delete", do_delete },
{ "pin", do_pin },
{ "event_pipe", do_event_pipe },
+ { "create", do_create },
{ 0 }
};
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c
index 6d41323be291..bdaf4062e26e 100644
--- a/tools/bpf/bpftool/map_perf_ring.c
+++ b/tools/bpf/bpftool/map_perf_ring.c
@@ -50,15 +50,17 @@ static void int_exit(int signo)
stop = true;
}
-static enum bpf_perf_event_ret print_bpf_output(void *event, void *priv)
+static enum bpf_perf_event_ret
+print_bpf_output(struct perf_event_header *event, void *private_data)
{
- struct event_ring_info *ring = priv;
- struct perf_event_sample *e = event;
+ struct perf_event_sample *e = container_of(event, struct perf_event_sample,
+ header);
+ struct event_ring_info *ring = private_data;
struct {
struct perf_event_header header;
__u64 id;
__u64 lost;
- } *lost = event;
+ } *lost = (typeof(lost))event;
if (json_output) {
jsonw_start_object(json_wtr);
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
new file mode 100644
index 000000000000..d441bb7035ca
--- /dev/null
+++ b/tools/bpf/bpftool/net.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (C) 2018 Facebook
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <libbpf.h>
+#include <net/if.h>
+#include <linux/if.h>
+#include <linux/rtnetlink.h>
+#include <linux/tc_act/tc_bpf.h>
+#include <sys/socket.h>
+
+#include <bpf.h>
+#include <nlattr.h>
+#include "main.h"
+#include "netlink_dumper.h"
+
+struct ip_devname_ifindex {
+ char devname[64];
+ int ifindex;
+};
+
+struct bpf_netdev_t {
+ struct ip_devname_ifindex *devices;
+ int used_len;
+ int array_len;
+ int filter_idx;
+};
+
+struct tc_kind_handle {
+ char kind[64];
+ int handle;
+};
+
+struct bpf_tcinfo_t {
+ struct tc_kind_handle *handle_array;
+ int used_len;
+ int array_len;
+ bool is_qdisc;
+};
+
+struct bpf_filter_t {
+ const char *kind;
+ const char *devname;
+ int ifindex;
+};
+
+static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
+{
+ struct bpf_netdev_t *netinfo = cookie;
+ struct ifinfomsg *ifinfo = msg;
+
+ if (netinfo->filter_idx > 0 && netinfo->filter_idx != ifinfo->ifi_index)
+ return 0;
+
+ if (netinfo->used_len == netinfo->array_len) {
+ netinfo->devices = realloc(netinfo->devices,
+ (netinfo->array_len + 16) *
+ sizeof(struct ip_devname_ifindex));
+ if (!netinfo->devices)
+ return -ENOMEM;
+
+ netinfo->array_len += 16;
+ }
+ netinfo->devices[netinfo->used_len].ifindex = ifinfo->ifi_index;
+ snprintf(netinfo->devices[netinfo->used_len].devname,
+ sizeof(netinfo->devices[netinfo->used_len].devname),
+ "%s",
+ tb[IFLA_IFNAME]
+ ? libbpf_nla_getattr_str(tb[IFLA_IFNAME])
+ : "");
+ netinfo->used_len++;
+
+ return do_xdp_dump(ifinfo, tb);
+}
+
+static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
+{
+ struct bpf_tcinfo_t *tcinfo = cookie;
+ struct tcmsg *info = msg;
+
+ if (tcinfo->is_qdisc) {
+ /* skip clsact qdisc */
+ if (tb[TCA_KIND] &&
+ strcmp(libbpf_nla_data(tb[TCA_KIND]), "clsact") == 0)
+ return 0;
+ if (info->tcm_handle == 0)
+ return 0;
+ }
+
+ if (tcinfo->used_len == tcinfo->array_len) {
+ tcinfo->handle_array = realloc(tcinfo->handle_array,
+ (tcinfo->array_len + 16) * sizeof(struct tc_kind_handle));
+ if (!tcinfo->handle_array)
+ return -ENOMEM;
+
+ tcinfo->array_len += 16;
+ }
+ tcinfo->handle_array[tcinfo->used_len].handle = info->tcm_handle;
+ snprintf(tcinfo->handle_array[tcinfo->used_len].kind,
+ sizeof(tcinfo->handle_array[tcinfo->used_len].kind),
+ "%s",
+ tb[TCA_KIND]
+ ? libbpf_nla_getattr_str(tb[TCA_KIND])
+ : "unknown");
+ tcinfo->used_len++;
+
+ return 0;
+}
+
+static int dump_filter_nlmsg(void *cookie, void *msg, struct nlattr **tb)
+{
+ const struct bpf_filter_t *filter_info = cookie;
+
+ return do_filter_dump((struct tcmsg *)msg, tb, filter_info->kind,
+ filter_info->devname, filter_info->ifindex);
+}
+
+static int show_dev_tc_bpf(int sock, unsigned int nl_pid,
+ struct ip_devname_ifindex *dev)
+{
+ struct bpf_filter_t filter_info;
+ struct bpf_tcinfo_t tcinfo;
+ int i, handle, ret = 0;
+
+ tcinfo.handle_array = NULL;
+ tcinfo.used_len = 0;
+ tcinfo.array_len = 0;
+
+ tcinfo.is_qdisc = false;
+ ret = libbpf_nl_get_class(sock, nl_pid, dev->ifindex,
+ dump_class_qdisc_nlmsg, &tcinfo);
+ if (ret)
+ goto out;
+
+ tcinfo.is_qdisc = true;
+ ret = libbpf_nl_get_qdisc(sock, nl_pid, dev->ifindex,
+ dump_class_qdisc_nlmsg, &tcinfo);
+ if (ret)
+ goto out;
+
+ filter_info.devname = dev->devname;
+ filter_info.ifindex = dev->ifindex;
+ for (i = 0; i < tcinfo.used_len; i++) {
+ filter_info.kind = tcinfo.handle_array[i].kind;
+ ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex,
+ tcinfo.handle_array[i].handle,
+ dump_filter_nlmsg, &filter_info);
+ if (ret)
+ goto out;
+ }
+
+ /* root, ingress and egress handle */
+ handle = TC_H_ROOT;
+ filter_info.kind = "root";
+ ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle,
+ dump_filter_nlmsg, &filter_info);
+ if (ret)
+ goto out;
+
+ handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ filter_info.kind = "clsact/ingress";
+ ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle,
+ dump_filter_nlmsg, &filter_info);
+ if (ret)
+ goto out;
+
+ handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_EGRESS);
+ filter_info.kind = "clsact/egress";
+ ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle,
+ dump_filter_nlmsg, &filter_info);
+ if (ret)
+ goto out;
+
+out:
+ free(tcinfo.handle_array);
+ return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+ int i, sock, ret, filter_idx = -1;
+ struct bpf_netdev_t dev_array;
+ unsigned int nl_pid;
+ char err_buf[256];
+
+ if (argc == 2) {
+ if (strcmp(argv[0], "dev") != 0)
+ usage();
+ filter_idx = if_nametoindex(argv[1]);
+ if (filter_idx == 0) {
+ fprintf(stderr, "invalid dev name %s\n", argv[1]);
+ return -1;
+ }
+ } else if (argc != 0) {
+ usage();
+ }
+
+ sock = libbpf_netlink_open(&nl_pid);
+ if (sock < 0) {
+ fprintf(stderr, "failed to open netlink sock\n");
+ return -1;
+ }
+
+ dev_array.devices = NULL;
+ dev_array.used_len = 0;
+ dev_array.array_len = 0;
+ dev_array.filter_idx = filter_idx;
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ NET_START_OBJECT;
+ NET_START_ARRAY("xdp", "%s:\n");
+ ret = libbpf_nl_get_link(sock, nl_pid, dump_link_nlmsg, &dev_array);
+ NET_END_ARRAY("\n");
+
+ if (!ret) {
+ NET_START_ARRAY("tc", "%s:\n");
+ for (i = 0; i < dev_array.used_len; i++) {
+ ret = show_dev_tc_bpf(sock, nl_pid,
+ &dev_array.devices[i]);
+ if (ret)
+ break;
+ }
+ NET_END_ARRAY("\n");
+ }
+ NET_END_OBJECT;
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ if (ret) {
+ if (json_output)
+ jsonw_null(json_wtr);
+ libbpf_strerror(ret, err_buf, sizeof(err_buf));
+ fprintf(stderr, "Error: %s\n", err_buf);
+ }
+ free(dev_array.devices);
+ close(sock);
+ return ret;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s %s { show | list } [dev <devname>]\n"
+ " %s %s help\n"
+ "Note: Only xdp and tc attachments are supported now.\n"
+ " For progs attached to cgroups, use \"bpftool cgroup\"\n"
+ " to dump program attachments. For program types\n"
+ " sk_{filter,skb,msg,reuseport} and lwt/seg6, please\n"
+ " consult iproute2.\n",
+ bin_name, argv[-2], bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_net(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/netlink_dumper.c b/tools/bpf/bpftool/netlink_dumper.c
new file mode 100644
index 000000000000..4e9f4531269f
--- /dev/null
+++ b/tools/bpf/bpftool/netlink_dumper.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (C) 2018 Facebook
+
+#include <stdlib.h>
+#include <string.h>
+#include <libbpf.h>
+#include <linux/rtnetlink.h>
+#include <linux/tc_act/tc_bpf.h>
+
+#include <nlattr.h>
+#include "main.h"
+#include "netlink_dumper.h"
+
+static void xdp_dump_prog_id(struct nlattr **tb, int attr,
+ const char *mode,
+ bool new_json_object)
+{
+ if (!tb[attr])
+ return;
+
+ if (new_json_object)
+ NET_START_OBJECT
+ NET_DUMP_STR("mode", " %s", mode);
+ NET_DUMP_UINT("id", " id %u", libbpf_nla_getattr_u32(tb[attr]))
+ if (new_json_object)
+ NET_END_OBJECT
+}
+
+static int do_xdp_dump_one(struct nlattr *attr, unsigned int ifindex,
+ const char *name)
+{
+ struct nlattr *tb[IFLA_XDP_MAX + 1];
+ unsigned char mode;
+
+ if (libbpf_nla_parse_nested(tb, IFLA_XDP_MAX, attr, NULL) < 0)
+ return -1;
+
+ if (!tb[IFLA_XDP_ATTACHED])
+ return 0;
+
+ mode = libbpf_nla_getattr_u8(tb[IFLA_XDP_ATTACHED]);
+ if (mode == XDP_ATTACHED_NONE)
+ return 0;
+
+ NET_START_OBJECT;
+ if (name)
+ NET_DUMP_STR("devname", "%s", name);
+ NET_DUMP_UINT("ifindex", "(%d)", ifindex);
+
+ if (mode == XDP_ATTACHED_MULTI) {
+ if (json_output) {
+ jsonw_name(json_wtr, "multi_attachments");
+ jsonw_start_array(json_wtr);
+ }
+ xdp_dump_prog_id(tb, IFLA_XDP_SKB_PROG_ID, "generic", true);
+ xdp_dump_prog_id(tb, IFLA_XDP_DRV_PROG_ID, "driver", true);
+ xdp_dump_prog_id(tb, IFLA_XDP_HW_PROG_ID, "offload", true);
+ if (json_output)
+ jsonw_end_array(json_wtr);
+ } else if (mode == XDP_ATTACHED_DRV) {
+ xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "driver", false);
+ } else if (mode == XDP_ATTACHED_SKB) {
+ xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "generic", false);
+ } else if (mode == XDP_ATTACHED_HW) {
+ xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "offload", false);
+ }
+
+ NET_END_OBJECT_FINAL;
+ return 0;
+}
+
+int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb)
+{
+ if (!tb[IFLA_XDP])
+ return 0;
+
+ return do_xdp_dump_one(tb[IFLA_XDP], ifinfo->ifi_index,
+ libbpf_nla_getattr_str(tb[IFLA_IFNAME]));
+}
+
+static int do_bpf_dump_one_act(struct nlattr *attr)
+{
+ struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
+
+ if (libbpf_nla_parse_nested(tb, TCA_ACT_BPF_MAX, attr, NULL) < 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ if (!tb[TCA_ACT_BPF_PARMS])
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ NET_START_OBJECT_NESTED2;
+ if (tb[TCA_ACT_BPF_NAME])
+ NET_DUMP_STR("name", "%s",
+ libbpf_nla_getattr_str(tb[TCA_ACT_BPF_NAME]));
+ if (tb[TCA_ACT_BPF_ID])
+ NET_DUMP_UINT("id", " id %u",
+ libbpf_nla_getattr_u32(tb[TCA_ACT_BPF_ID]));
+ NET_END_OBJECT_NESTED;
+ return 0;
+}
+
+static int do_dump_one_act(struct nlattr *attr)
+{
+ struct nlattr *tb[TCA_ACT_MAX + 1];
+
+ if (!attr)
+ return 0;
+
+ if (libbpf_nla_parse_nested(tb, TCA_ACT_MAX, attr, NULL) < 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ if (tb[TCA_ACT_KIND] &&
+ strcmp(libbpf_nla_data(tb[TCA_ACT_KIND]), "bpf") == 0)
+ return do_bpf_dump_one_act(tb[TCA_ACT_OPTIONS]);
+
+ return 0;
+}
+
+static int do_bpf_act_dump(struct nlattr *attr)
+{
+ struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
+ int act, ret;
+
+ if (libbpf_nla_parse_nested(tb, TCA_ACT_MAX_PRIO, attr, NULL) < 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ NET_START_ARRAY("act", " %s [");
+ for (act = 0; act <= TCA_ACT_MAX_PRIO; act++) {
+ ret = do_dump_one_act(tb[act]);
+ if (ret)
+ break;
+ }
+ NET_END_ARRAY("] ");
+
+ return ret;
+}
+
+static int do_bpf_filter_dump(struct nlattr *attr)
+{
+ struct nlattr *tb[TCA_BPF_MAX + 1];
+ int ret;
+
+ if (libbpf_nla_parse_nested(tb, TCA_BPF_MAX, attr, NULL) < 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ if (tb[TCA_BPF_NAME])
+ NET_DUMP_STR("name", " %s",
+ libbpf_nla_getattr_str(tb[TCA_BPF_NAME]));
+ if (tb[TCA_BPF_ID])
+ NET_DUMP_UINT("id", " id %u",
+ libbpf_nla_getattr_u32(tb[TCA_BPF_ID]));
+ if (tb[TCA_BPF_ACT]) {
+ ret = do_bpf_act_dump(tb[TCA_BPF_ACT]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int do_filter_dump(struct tcmsg *info, struct nlattr **tb, const char *kind,
+ const char *devname, int ifindex)
+{
+ int ret = 0;
+
+ if (tb[TCA_OPTIONS] &&
+ strcmp(libbpf_nla_data(tb[TCA_KIND]), "bpf") == 0) {
+ NET_START_OBJECT;
+ if (devname[0] != '\0')
+ NET_DUMP_STR("devname", "%s", devname);
+ NET_DUMP_UINT("ifindex", "(%u)", ifindex);
+ NET_DUMP_STR("kind", " %s", kind);
+ ret = do_bpf_filter_dump(tb[TCA_OPTIONS]);
+ NET_END_OBJECT_FINAL;
+ }
+
+ return ret;
+}
diff --git a/tools/bpf/bpftool/netlink_dumper.h b/tools/bpf/bpftool/netlink_dumper.h
new file mode 100644
index 000000000000..e3516b586a34
--- /dev/null
+++ b/tools/bpf/bpftool/netlink_dumper.h
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (C) 2018 Facebook
+
+#ifndef _NETLINK_DUMPER_H_
+#define _NETLINK_DUMPER_H_
+
+#define NET_START_OBJECT \
+{ \
+ if (json_output) \
+ jsonw_start_object(json_wtr); \
+}
+
+#define NET_START_OBJECT_NESTED(name) \
+{ \
+ if (json_output) { \
+ jsonw_name(json_wtr, name); \
+ jsonw_start_object(json_wtr); \
+ } else { \
+ fprintf(stdout, "%s {", name); \
+ } \
+}
+
+#define NET_START_OBJECT_NESTED2 \
+{ \
+ if (json_output) \
+ jsonw_start_object(json_wtr); \
+ else \
+ fprintf(stdout, "{"); \
+}
+
+#define NET_END_OBJECT_NESTED \
+{ \
+ if (json_output) \
+ jsonw_end_object(json_wtr); \
+ else \
+ fprintf(stdout, "}"); \
+}
+
+#define NET_END_OBJECT \
+{ \
+ if (json_output) \
+ jsonw_end_object(json_wtr); \
+}
+
+#define NET_END_OBJECT_FINAL \
+{ \
+ if (json_output) \
+ jsonw_end_object(json_wtr); \
+ else \
+ fprintf(stdout, "\n"); \
+}
+
+#define NET_START_ARRAY(name, fmt_str) \
+{ \
+ if (json_output) { \
+ jsonw_name(json_wtr, name); \
+ jsonw_start_array(json_wtr); \
+ } else { \
+ fprintf(stdout, fmt_str, name); \
+ } \
+}
+
+#define NET_END_ARRAY(endstr) \
+{ \
+ if (json_output) \
+ jsonw_end_array(json_wtr); \
+ else \
+ fprintf(stdout, "%s", endstr); \
+}
+
+#define NET_DUMP_UINT(name, fmt_str, val) \
+{ \
+ if (json_output) \
+ jsonw_uint_field(json_wtr, name, val); \
+ else \
+ fprintf(stdout, fmt_str, val); \
+}
+
+#define NET_DUMP_STR(name, fmt_str, str) \
+{ \
+ if (json_output) \
+ jsonw_string_field(json_wtr, name, str);\
+ else \
+ fprintf(stdout, fmt_str, str); \
+}
+
+#define NET_DUMP_STR_ONLY(str) \
+{ \
+ if (json_output) \
+ jsonw_string(json_wtr, str); \
+ else \
+ fprintf(stdout, "%s ", str); \
+}
+
+#endif
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index dce960d22106..5302ee282409 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -74,8 +74,29 @@ static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
[BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
[BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
+ [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
};
+static const char * const attach_type_strings[] = {
+ [BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
+ [BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
+ [BPF_SK_MSG_VERDICT] = "msg_verdict",
+ [__MAX_BPF_ATTACH_TYPE] = NULL,
+};
+
+enum bpf_attach_type parse_attach_type(const char *str)
+{
+ enum bpf_attach_type type;
+
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ if (attach_type_strings[type] &&
+ is_prefix(str, attach_type_strings[type]))
+ return type;
+ }
+
+ return __MAX_BPF_ATTACH_TYPE;
+}
+
static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
{
struct timespec real_time_ts, boot_time_ts;
@@ -428,6 +449,7 @@ static int do_dump(int argc, char **argv)
unsigned long *func_ksyms = NULL;
struct bpf_prog_info info = {};
unsigned int *func_lens = NULL;
+ const char *disasm_opt = NULL;
unsigned int nr_func_ksyms;
unsigned int nr_func_lens;
struct dump_data dd = {};
@@ -586,9 +608,10 @@ static int do_dump(int argc, char **argv)
const char *name = NULL;
if (info.ifindex) {
- name = ifindex_to_bfd_name_ns(info.ifindex,
- info.netns_dev,
- info.netns_ino);
+ name = ifindex_to_bfd_params(info.ifindex,
+ info.netns_dev,
+ info.netns_ino,
+ &disasm_opt);
if (!name)
goto err_free;
}
@@ -630,7 +653,8 @@ static int do_dump(int argc, char **argv)
printf("%s:\n", sym_name);
}
- disasm_print_insn(img, lens[i], opcodes, name);
+ disasm_print_insn(img, lens[i], opcodes, name,
+ disasm_opt);
img += lens[i];
if (json_output)
@@ -642,7 +666,8 @@ static int do_dump(int argc, char **argv)
if (json_output)
jsonw_end_array(json_wtr);
} else {
- disasm_print_insn(buf, *member_len, opcodes, name);
+ disasm_print_insn(buf, *member_len, opcodes, name,
+ disasm_opt);
}
} else if (visual) {
if (json_output)
@@ -696,6 +721,77 @@ int map_replace_compar(const void *p1, const void *p2)
return a->idx - b->idx;
}
+static int do_attach(int argc, char **argv)
+{
+ enum bpf_attach_type attach_type;
+ int err, mapfd, progfd;
+
+ if (!REQ_ARGS(5)) {
+ p_err("too few parameters for map attach");
+ return -EINVAL;
+ }
+
+ progfd = prog_parse_fd(&argc, &argv);
+ if (progfd < 0)
+ return progfd;
+
+ attach_type = parse_attach_type(*argv);
+ if (attach_type == __MAX_BPF_ATTACH_TYPE) {
+ p_err("invalid attach type");
+ return -EINVAL;
+ }
+ NEXT_ARG();
+
+ mapfd = map_parse_fd(&argc, &argv);
+ if (mapfd < 0)
+ return mapfd;
+
+ err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
+ if (err) {
+ p_err("failed prog attach to map");
+ return -EINVAL;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+ return 0;
+}
+
+static int do_detach(int argc, char **argv)
+{
+ enum bpf_attach_type attach_type;
+ int err, mapfd, progfd;
+
+ if (!REQ_ARGS(5)) {
+ p_err("too few parameters for map detach");
+ return -EINVAL;
+ }
+
+ progfd = prog_parse_fd(&argc, &argv);
+ if (progfd < 0)
+ return progfd;
+
+ attach_type = parse_attach_type(*argv);
+ if (attach_type == __MAX_BPF_ATTACH_TYPE) {
+ p_err("invalid attach type");
+ return -EINVAL;
+ }
+ NEXT_ARG();
+
+ mapfd = map_parse_fd(&argc, &argv);
+ if (mapfd < 0)
+ return mapfd;
+
+ err = bpf_prog_detach2(progfd, mapfd, attach_type);
+ if (err) {
+ p_err("failed prog detach from map");
+ return -EINVAL;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+ return 0;
+}
static int do_load(int argc, char **argv)
{
enum bpf_attach_type expected_attach_type;
@@ -816,7 +912,7 @@ static int do_load(int argc, char **argv)
}
}
- obj = bpf_object__open_xattr(&attr);
+ obj = __bpf_object__open_xattr(&attr, bpf_flags);
if (IS_ERR_OR_NULL(obj)) {
p_err("failed to open object file");
goto err_free_reuse_maps;
@@ -941,6 +1037,8 @@ static int do_help(int argc, char **argv)
" %s %s pin PROG FILE\n"
" %s %s load OBJ FILE [type TYPE] [dev NAME] \\\n"
" [map { idx IDX | name NAME } MAP]\n"
+ " %s %s attach PROG ATTACH_TYPE MAP\n"
+ " %s %s detach PROG ATTACH_TYPE MAP\n"
" %s %s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
@@ -952,10 +1050,12 @@ static int do_help(int argc, char **argv)
" cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
" cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
" cgroup/sendmsg4 | cgroup/sendmsg6 }\n"
+ " ATTACH_TYPE := { msg_verdict | skb_verdict | skb_parse }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
+ bin_name, argv[-2], bin_name, argv[-2]);
return 0;
}
@@ -967,6 +1067,8 @@ static const struct cmd cmds[] = {
{ "dump", do_dump },
{ "pin", do_pin },
{ "load", do_load },
+ { "attach", do_attach },
+ { "detach", do_detach },
{ 0 }
};
diff --git a/tools/crypto/getstat.c b/tools/crypto/getstat.c
new file mode 100644
index 000000000000..24115173a483
--- /dev/null
+++ b/tools/crypto/getstat.c
@@ -0,0 +1,294 @@
+/* Heavily copied from libkcapi 2015 - 2017, Stephan Mueller <smueller@chronox.de> */
+#include <errno.h>
+#include <linux/cryptouser.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#define CR_RTA(x) ((struct rtattr *)(((char *)(x)) + NLMSG_ALIGN(sizeof(struct crypto_user_alg))))
+
+static int get_stat(const char *drivername)
+{
+ struct {
+ struct nlmsghdr n;
+ struct crypto_user_alg cru;
+ } req;
+ struct sockaddr_nl nl;
+ int sd = 0, ret;
+ socklen_t addr_len;
+ struct iovec iov;
+ struct msghdr msg;
+ char buf[4096];
+ struct nlmsghdr *res_n = (struct nlmsghdr *)buf;
+ struct crypto_user_alg *cru_res = NULL;
+ int res_len = 0;
+ struct rtattr *tb[CRYPTOCFGA_MAX + 1];
+ struct rtattr *rta;
+ struct nlmsgerr *errmsg;
+
+ memset(&req, 0, sizeof(req));
+ memset(&buf, 0, sizeof(buf));
+ memset(&msg, 0, sizeof(msg));
+
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(req.cru));
+ req.n.nlmsg_flags = NLM_F_REQUEST;
+ req.n.nlmsg_type = CRYPTO_MSG_GETSTAT;
+ req.n.nlmsg_seq = time(NULL);
+
+ strncpy(req.cru.cru_driver_name, drivername, strlen(drivername));
+
+ sd = socket(AF_NETLINK, SOCK_RAW, NETLINK_CRYPTO);
+ if (sd < 0) {
+ fprintf(stderr, "Netlink error: cannot open netlink socket");
+ return -errno;
+ }
+ memset(&nl, 0, sizeof(nl));
+ nl.nl_family = AF_NETLINK;
+ if (bind(sd, (struct sockaddr *)&nl, sizeof(nl)) < 0) {
+ ret = -errno;
+ fprintf(stderr, "Netlink error: cannot bind netlink socket");
+ goto out;
+ }
+
+ /* sanity check that netlink socket was successfully opened */
+ addr_len = sizeof(nl);
+ if (getsockname(sd, (struct sockaddr *)&nl, &addr_len) < 0) {
+ ret = -errno;
+ printf("Netlink error: cannot getsockname");
+ goto out;
+ }
+ if (addr_len != sizeof(nl)) {
+ ret = -errno;
+ printf("Netlink error: wrong address length %d", addr_len);
+ goto out;
+ }
+ if (nl.nl_family != AF_NETLINK) {
+ ret = -errno;
+ printf("Netlink error: wrong address family %d",
+ nl.nl_family);
+ goto out;
+ }
+
+ memset(&nl, 0, sizeof(nl));
+ nl.nl_family = AF_NETLINK;
+ iov.iov_base = (void *)&req.n;
+ iov.iov_len = req.n.nlmsg_len;
+ msg.msg_name = &nl;
+ msg.msg_namelen = sizeof(nl);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ if (sendmsg(sd, &msg, 0) < 0) {
+ ret = -errno;
+ printf("Netlink error: sendmsg failed");
+ goto out;
+ }
+ memset(buf, 0, sizeof(buf));
+ iov.iov_base = buf;
+ while (1) {
+ iov.iov_len = sizeof(buf);
+ ret = recvmsg(sd, &msg, 0);
+ if (ret < 0) {
+ if (errno == EINTR || errno == EAGAIN)
+ continue;
+ ret = -errno;
+ printf("Netlink error: netlink receive error");
+ goto out;
+ }
+ if (ret == 0) {
+ ret = -errno;
+ printf("Netlink error: no data");
+ goto out;
+ }
+ if (ret > sizeof(buf)) {
+ ret = -errno;
+ printf("Netlink error: received too much data");
+ goto out;
+ }
+ break;
+ }
+
+ ret = -EFAULT;
+ res_len = res_n->nlmsg_len;
+ if (res_n->nlmsg_type == NLMSG_ERROR) {
+ errmsg = NLMSG_DATA(res_n);
+ fprintf(stderr, "Fail with %d\n", errmsg->error);
+ ret = errmsg->error;
+ goto out;
+ }
+
+ if (res_n->nlmsg_type == CRYPTO_MSG_GETSTAT) {
+ cru_res = NLMSG_DATA(res_n);
+ res_len -= NLMSG_SPACE(sizeof(*cru_res));
+ }
+ if (res_len < 0) {
+ printf("Netlink error: nlmsg len %d\n", res_len);
+ goto out;
+ }
+
+ if (!cru_res) {
+ ret = -EFAULT;
+ printf("Netlink error: no cru_res\n");
+ goto out;
+ }
+
+ rta = CR_RTA(cru_res);
+ memset(tb, 0, sizeof(struct rtattr *) * (CRYPTOCFGA_MAX + 1));
+ while (RTA_OK(rta, res_len)) {
+ if ((rta->rta_type <= CRYPTOCFGA_MAX) && (!tb[rta->rta_type]))
+ tb[rta->rta_type] = rta;
+ rta = RTA_NEXT(rta, res_len);
+ }
+ if (res_len) {
+ printf("Netlink error: unprocessed data %d",
+ res_len);
+ goto out;
+ }
+
+ if (tb[CRYPTOCFGA_STAT_HASH]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_HASH];
+ struct crypto_stat *rhash =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tHash\n\tHash: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ rhash->stat_hash_cnt, rhash->stat_hash_tlen,
+ rhash->stat_hash_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_COMPRESS]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_COMPRESS];
+ struct crypto_stat *rblk =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tCompress\n\tCompress: %u bytes: %llu\n\tDecompress: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ rblk->stat_compress_cnt, rblk->stat_compress_tlen,
+ rblk->stat_decompress_cnt, rblk->stat_decompress_tlen,
+ rblk->stat_compress_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_ACOMP]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_ACOMP];
+ struct crypto_stat *rcomp =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tACompress\n\tCompress: %u bytes: %llu\n\tDecompress: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ rcomp->stat_compress_cnt, rcomp->stat_compress_tlen,
+ rcomp->stat_decompress_cnt, rcomp->stat_decompress_tlen,
+ rcomp->stat_compress_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_AEAD]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_AEAD];
+ struct crypto_stat *raead =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tAEAD\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ raead->stat_encrypt_cnt, raead->stat_encrypt_tlen,
+ raead->stat_decrypt_cnt, raead->stat_decrypt_tlen,
+ raead->stat_aead_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_BLKCIPHER]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_BLKCIPHER];
+ struct crypto_stat *rblk =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tCipher\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
+ rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
+ rblk->stat_cipher_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_AKCIPHER]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_AKCIPHER];
+ struct crypto_stat *rblk =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tAkcipher\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tSign: %u\n\tVerify: %u\n\tErrors: %u\n",
+ drivername,
+ rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
+ rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
+ rblk->stat_sign_cnt, rblk->stat_verify_cnt,
+ rblk->stat_akcipher_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_CIPHER]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_CIPHER];
+ struct crypto_stat *rblk =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tcipher\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
+ rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
+ rblk->stat_cipher_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_RNG]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_RNG];
+ struct crypto_stat *rrng =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tRNG\n\tSeed: %u\n\tGenerate: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ rrng->stat_seed_cnt,
+ rrng->stat_generate_cnt, rrng->stat_generate_tlen,
+ rrng->stat_rng_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_KPP]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_KPP];
+ struct crypto_stat *rkpp =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tKPP\n\tSetsecret: %u\n\tGenerate public key: %u\n\tCompute_shared_secret: %u\n\tErrors: %u\n",
+ drivername,
+ rkpp->stat_setsecret_cnt,
+ rkpp->stat_generate_public_key_cnt,
+ rkpp->stat_compute_shared_secret_cnt,
+ rkpp->stat_kpp_err_cnt);
+ } else {
+ fprintf(stderr, "%s is of an unknown algorithm\n", drivername);
+ }
+ ret = 0;
+out:
+ close(sd);
+ return ret;
+}
+
+int main(int argc, const char *argv[])
+{
+ char buf[4096];
+ FILE *procfd;
+ int i, lastspace;
+ int ret;
+
+ procfd = fopen("/proc/crypto", "r");
+ if (!procfd) {
+ ret = errno;
+ fprintf(stderr, "Cannot open /proc/crypto %s\n", strerror(errno));
+ return ret;
+ }
+ if (argc > 1) {
+ if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
+ printf("Usage: %s [-h|--help] display this help\n", argv[0]);
+ printf("Usage: %s display all crypto statistics\n", argv[0]);
+ printf("Usage: %s drivername1 drivername2 ... = display crypto statistics about drivername1 ...\n", argv[0]);
+ return 0;
+ }
+ for (i = 1; i < argc; i++) {
+ ret = get_stat(argv[i]);
+ if (ret) {
+ fprintf(stderr, "Failed with %s\n", strerror(-ret));
+ return ret;
+ }
+ }
+ return 0;
+ }
+
+ while (fgets(buf, sizeof(buf), procfd)) {
+ if (!strncmp(buf, "driver", 6)) {
+ lastspace = 0;
+ i = 0;
+ while (i < strlen(buf)) {
+ i++;
+ if (buf[i] == ' ')
+ lastspace = i;
+ }
+ buf[strlen(buf) - 1] = '\0';
+ ret = get_stat(buf + lastspace + 1);
+ if (ret) {
+ fprintf(stderr, "Failed with %s\n", strerror(-ret));
+ goto out;
+ }
+ }
+ }
+out:
+ fclose(procfd);
+ return ret;
+}
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index d78aed86af09..8ff8cb1a11f4 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -234,6 +234,7 @@ int main(int argc, char *argv[])
break;
default:
+ error = HV_E_FAIL;
syslog(LOG_ERR, "Unknown operation: %d",
buffer.hdr.operation);
diff --git a/tools/include/asm/barrier.h b/tools/include/asm/barrier.h
index 391d942536e5..8d378c57cb01 100644
--- a/tools/include/asm/barrier.h
+++ b/tools/include/asm/barrier.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/compiler.h>
#if defined(__i386__) || defined(__x86_64__)
#include "../../arch/x86/include/asm/barrier.h"
#elif defined(__arm__)
@@ -26,3 +27,37 @@
#else
#include <asm-generic/barrier.h>
#endif
+
+/*
+ * Generic fallback smp_*() definitions for archs that haven't
+ * been updated yet.
+ */
+
+#ifndef smp_rmb
+# define smp_rmb() rmb()
+#endif
+
+#ifndef smp_wmb
+# define smp_wmb() wmb()
+#endif
+
+#ifndef smp_mb
+# define smp_mb() mb()
+#endif
+
+#ifndef smp_store_release
+# define smp_store_release(p, v) \
+do { \
+ smp_mb(); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+#endif
+
+#ifndef smp_load_acquire
+# define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+#endif
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index acc704bd3998..0b0ef3abc966 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -3,8 +3,6 @@
#define _TOOLS_LINUX_BITOPS_H_
#include <asm/types.h>
-#include <linux/compiler.h>
-
#ifndef __WORDSIZE
#define __WORDSIZE (__SIZEOF_LONG__ * 8)
#endif
@@ -12,10 +10,9 @@
#ifndef BITS_PER_LONG
# define BITS_PER_LONG __WORDSIZE
#endif
+#include <linux/bits.h>
+#include <linux/compiler.h>
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
new file mode 100644
index 000000000000..2b7b532c1d51
--- /dev/null
+++ b/tools/include/linux/bits.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif /* __LINUX_BITS_H */
diff --git a/tools/include/linux/err.h b/tools/include/linux/err.h
index 7a8b61ad44cb..094649667bae 100644
--- a/tools/include/linux/err.h
+++ b/tools/include/linux/err.h
@@ -52,4 +52,11 @@ static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr);
}
+static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
+{
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+ else
+ return 0;
+}
#endif /* _LINUX_ERR_H */
diff --git a/tools/include/linux/ring_buffer.h b/tools/include/linux/ring_buffer.h
new file mode 100644
index 000000000000..9a083ae60473
--- /dev/null
+++ b/tools/include/linux/ring_buffer.h
@@ -0,0 +1,73 @@
+#ifndef _TOOLS_LINUX_RING_BUFFER_H_
+#define _TOOLS_LINUX_RING_BUFFER_H_
+
+#include <asm/barrier.h>
+
+/*
+ * Contract with kernel for walking the perf ring buffer from
+ * user space requires the following barrier pairing (quote
+ * from kernel/events/ring_buffer.c):
+ *
+ * Since the mmap() consumer (userspace) can run on a
+ * different CPU:
+ *
+ * kernel user
+ *
+ * if (LOAD ->data_tail) { LOAD ->data_head
+ * (A) smp_rmb() (C)
+ * STORE $data LOAD $data
+ * smp_wmb() (B) smp_mb() (D)
+ * STORE ->data_head STORE ->data_tail
+ * }
+ *
+ * Where A pairs with D, and B pairs with C.
+ *
+ * In our case A is a control dependency that separates the
+ * load of the ->data_tail and the stores of $data. In case
+ * ->data_tail indicates there is no room in the buffer to
+ * store $data we do not.
+ *
+ * D needs to be a full barrier since it separates the data
+ * READ from the tail WRITE.
+ *
+ * For B a WMB is sufficient since it separates two WRITEs,
+ * and for C an RMB is sufficient since it separates two READs.
+ *
+ * Note, instead of B, C, D we could also use smp_store_release()
+ * in B and D as well as smp_load_acquire() in C.
+ *
+ * However, this optimization does not make sense for all kernel
+ * supported architectures since for a fair number it would
+ * resolve into READ_ONCE() + smp_mb() pair for smp_load_acquire(),
+ * and smp_mb() + WRITE_ONCE() pair for smp_store_release().
+ *
+ * Thus for those smp_wmb() in B and smp_rmb() in C would still
+ * be less expensive. For the case of D this has either the same
+ * cost or is less expensive, for example, due to TSO x86 can
+ * avoid the CPU barrier entirely.
+ */
+
+static inline u64 ring_buffer_read_head(struct perf_event_mmap_page *base)
+{
+/*
+ * Architectures where smp_load_acquire() does not fallback to
+ * READ_ONCE() + smp_mb() pair.
+ */
+#if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) || \
+ defined(__ia64__) || defined(__sparc__) && defined(__arch64__)
+ return smp_load_acquire(&base->data_head);
+#else
+ u64 head = READ_ONCE(base->data_head);
+
+ smp_rmb();
+ return head;
+#endif
+}
+
+static inline void ring_buffer_write_tail(struct perf_event_mmap_page *base,
+ u64 tail)
+{
+ smp_store_release(&base->data_tail, tail);
+}
+
+#endif /* _TOOLS_LINUX_RING_BUFFER_H_ */
diff --git a/tools/include/tools/libc_compat.h b/tools/include/tools/libc_compat.h
index 664ced8cb1b0..e907ba6f15e5 100644
--- a/tools/include/tools/libc_compat.h
+++ b/tools/include/tools/libc_compat.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: (LGPL-2.0+ OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __TOOLS_LIBC_COMPAT_H
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 66917a4eba27..852dc17ab47a 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -103,6 +103,7 @@ enum bpf_cmd {
BPF_BTF_LOAD,
BPF_BTF_GET_FD_BY_ID,
BPF_TASK_FD_QUERY,
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM,
};
enum bpf_map_type {
@@ -127,6 +128,9 @@ enum bpf_map_type {
BPF_MAP_TYPE_SOCKHASH,
BPF_MAP_TYPE_CGROUP_STORAGE,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+ BPF_MAP_TYPE_QUEUE,
+ BPF_MAP_TYPE_STACK,
};
enum bpf_prog_type {
@@ -152,6 +156,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LWT_SEG6LOCAL,
BPF_PROG_TYPE_LIRC_MODE2,
BPF_PROG_TYPE_SK_REUSEPORT,
+ BPF_PROG_TYPE_FLOW_DISSECTOR,
};
enum bpf_attach_type {
@@ -172,6 +177,7 @@ enum bpf_attach_type {
BPF_CGROUP_UDP4_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
BPF_LIRC_MODE2,
+ BPF_FLOW_DISSECTOR,
__MAX_BPF_ATTACH_TYPE
};
@@ -459,6 +465,28 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * Description
+ * Push an element *value* in *map*. *flags* is one of:
+ *
+ * **BPF_EXIST**
+ * If the queue/stack is full, the oldest element is removed to
+ * make room for this.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_map_pop_elem(struct bpf_map *map, void *value)
+ * Description
+ * Pop an element from *map*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_map_peek_elem(struct bpf_map *map, void *value)
+ * Description
+ * Get an element from *map* without removing it.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
* int bpf_probe_read(void *dst, u32 size, const void *src)
* Description
* For tracing programs, safely attempt to read *size* bytes from
@@ -1430,7 +1458,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_adjust_room(struct sk_buff *skb, u32 len_diff, u32 mode, u64 flags)
+ * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
* Description
* Grow or shrink the room for data in the packet associated to
* *skb* by *len_diff*, and according to the selected *mode*.
@@ -2141,6 +2169,94 @@ union bpf_attr {
* request in the skb.
* Return
* 0 on success, or a negative error in case of failure.
+ *
+ * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * Description
+ * Look for TCP socket matching *tuple*, optionally in a child
+ * network namespace *netns*. The return value must be checked,
+ * and if non-NULL, released via **bpf_sk_release**\ ().
+ *
+ * The *ctx* should point to the context of the program, such as
+ * the skb or socket (depending on the hook in use). This is used
+ * to determine the base network namespace for the lookup.
+ *
+ * *tuple_size* must be one of:
+ *
+ * **sizeof**\ (*tuple*\ **->ipv4**)
+ * Look for an IPv4 socket.
+ * **sizeof**\ (*tuple*\ **->ipv6**)
+ * Look for an IPv6 socket.
+ *
+ * If the *netns* is zero, then the socket lookup table in the
+ * netns associated with the *ctx* will be used. For the TC hooks,
+ * this in the netns of the device in the skb. For socket hooks,
+ * this in the netns of the socket. If *netns* is non-zero, then
+ * it specifies the ID of the netns relative to the netns
+ * associated with the *ctx*.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_NET** configuration option.
+ * Return
+ * Pointer to *struct bpf_sock*, or NULL in case of failure.
+ *
+ * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * Description
+ * Look for UDP socket matching *tuple*, optionally in a child
+ * network namespace *netns*. The return value must be checked,
+ * and if non-NULL, released via **bpf_sk_release**\ ().
+ *
+ * The *ctx* should point to the context of the program, such as
+ * the skb or socket (depending on the hook in use). This is used
+ * to determine the base network namespace for the lookup.
+ *
+ * *tuple_size* must be one of:
+ *
+ * **sizeof**\ (*tuple*\ **->ipv4**)
+ * Look for an IPv4 socket.
+ * **sizeof**\ (*tuple*\ **->ipv6**)
+ * Look for an IPv6 socket.
+ *
+ * If the *netns* is zero, then the socket lookup table in the
+ * netns associated with the *ctx* will be used. For the TC hooks,
+ * this in the netns of the device in the skb. For socket hooks,
+ * this in the netns of the socket. If *netns* is non-zero, then
+ * it specifies the ID of the netns relative to the netns
+ * associated with the *ctx*.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_NET** configuration option.
+ * Return
+ * Pointer to *struct bpf_sock*, or NULL in case of failure.
+ *
+ * int bpf_sk_release(struct bpf_sock *sk)
+ * Description
+ * Release the reference held by *sock*. *sock* must be a non-NULL
+ * pointer that was returned from bpf_sk_lookup_xxx\ ().
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
+ * Description
+ * For socket policies, insert *len* bytes into msg at offset
+ * *start*.
+ *
+ * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
+ * *msg* it may want to insert metadata or options into the msg.
+ * This can later be read and used by any of the lower layer BPF
+ * hooks.
+ *
+ * This helper may fail if under memory pressure (a malloc
+ * fails) in these cases BPF programs will get an appropriate
+ * error and BPF programs will need to handle them.
+ *
+ * Return
+ * 0 on success, or a negative error in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2226,7 +2342,14 @@ union bpf_attr {
FN(get_current_cgroup_id), \
FN(get_local_storage), \
FN(sk_select_reuseport), \
- FN(skb_ancestor_cgroup_id),
+ FN(skb_ancestor_cgroup_id), \
+ FN(sk_lookup_tcp), \
+ FN(sk_lookup_udp), \
+ FN(sk_release), \
+ FN(map_push_elem), \
+ FN(map_pop_elem), \
+ FN(map_peek_elem), \
+ FN(msg_push_data),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2333,6 +2456,7 @@ struct __sk_buff {
/* ... here. */
__u32 data_meta;
+ struct bpf_flow_keys *flow_keys;
};
struct bpf_tunnel_key {
@@ -2395,6 +2519,23 @@ struct bpf_sock {
*/
};
+struct bpf_sock_tuple {
+ union {
+ struct {
+ __be32 saddr;
+ __be32 daddr;
+ __be16 sport;
+ __be16 dport;
+ } ipv4;
+ struct {
+ __be32 saddr[4];
+ __be32 daddr[4];
+ __be16 sport;
+ __be16 dport;
+ } ipv6;
+ };
+};
+
#define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type.
@@ -2778,4 +2919,27 @@ enum bpf_task_fd_type {
BPF_FD_TYPE_URETPROBE, /* filename + offset */
};
+struct bpf_flow_keys {
+ __u16 nhoff;
+ __u16 thoff;
+ __u16 addr_proto; /* ETH_P_* of valid addrs */
+ __u8 is_frag;
+ __u8 is_first_frag;
+ __u8 is_encap;
+ __u8 ip_proto;
+ __be16 n_proto;
+ __be16 sport;
+ __be16 dport;
+ union {
+ struct {
+ __be32 ipv4_src;
+ __be32 ipv4_dst;
+ };
+ struct {
+ __u32 ipv6_src[4]; /* in6_addr; network order */
+ __u32 ipv6_dst[4]; /* in6_addr; network order */
+ };
+ };
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index 43391e2d1153..58faab897201 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -161,6 +161,7 @@ enum {
IFLA_EVENT,
IFLA_NEW_NETNSID,
IFLA_IF_NETNSID,
+ IFLA_TARGET_NETNSID = IFLA_IF_NETNSID, /* new alias */
IFLA_CARRIER_UP_COUNT,
IFLA_CARRIER_DOWN_COUNT,
IFLA_NEW_IFINDEX,
@@ -554,6 +555,7 @@ enum {
IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
IFLA_GENEVE_LABEL,
+ IFLA_GENEVE_TTL_INHERIT,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 07548de5c988..2875ce85b322 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -719,6 +719,7 @@ struct kvm_ppc_one_seg_page_size {
#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
#define KVM_PPC_1T_SEGMENTS 0x00000002
+#define KVM_PPC_NO_HASH 0x00000004
struct kvm_ppc_smmu_info {
__u64 flags;
@@ -952,6 +953,11 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_HPAGE_1M 156
#define KVM_CAP_NESTED_STATE 157
#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
+#define KVM_CAP_MSR_PLATFORM_INFO 159
+#define KVM_CAP_PPC_NESTED_HV 160
+#define KVM_CAP_HYPERV_SEND_IPI 161
+#define KVM_CAP_COALESCED_PIO 162
+#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/tools/include/uapi/linux/tls.h b/tools/include/uapi/linux/tls.h
new file mode 100644
index 000000000000..ff02287495ac
--- /dev/null
+++ b/tools/include/uapi/linux/tls.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _UAPI_LINUX_TLS_H
+#define _UAPI_LINUX_TLS_H
+
+#include <linux/types.h>
+
+/* TLS socket options */
+#define TLS_TX 1 /* Set transmit parameters */
+#define TLS_RX 2 /* Set receive parameters */
+
+/* Supported versions */
+#define TLS_VERSION_MINOR(ver) ((ver) & 0xFF)
+#define TLS_VERSION_MAJOR(ver) (((ver) >> 8) & 0xFF)
+
+#define TLS_VERSION_NUMBER(id) ((((id##_VERSION_MAJOR) & 0xFF) << 8) | \
+ ((id##_VERSION_MINOR) & 0xFF))
+
+#define TLS_1_2_VERSION_MAJOR 0x3
+#define TLS_1_2_VERSION_MINOR 0x3
+#define TLS_1_2_VERSION TLS_VERSION_NUMBER(TLS_1_2)
+
+/* Supported ciphers */
+#define TLS_CIPHER_AES_GCM_128 51
+#define TLS_CIPHER_AES_GCM_128_IV_SIZE 8
+#define TLS_CIPHER_AES_GCM_128_KEY_SIZE 16
+#define TLS_CIPHER_AES_GCM_128_SALT_SIZE 4
+#define TLS_CIPHER_AES_GCM_128_TAG_SIZE 16
+#define TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE 8
+
+#define TLS_SET_RECORD_TYPE 1
+#define TLS_GET_RECORD_TYPE 2
+
+struct tls_crypto_info {
+ __u16 version;
+ __u16 cipher_type;
+};
+
+struct tls12_crypto_info_aes_gcm_128 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_AES_GCM_128_IV_SIZE];
+ unsigned char key[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_AES_GCM_128_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE];
+};
+
+#endif /* _UAPI_LINUX_TLS_H */
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 439b8a27488d..195ba486640f 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -1325,7 +1325,7 @@ class Tui(object):
msg = ''
while True:
self.screen.erase()
- self.screen.addstr(0, 0, 'Set update interval (defaults to %fs).' %
+ self.screen.addstr(0, 0, 'Set update interval (defaults to %.1fs).' %
DELAY_DEFAULT, curses.A_BOLD)
self.screen.addstr(4, 0, msg)
self.screen.addstr(2, 0, 'Change delay from %.1fs to ' %
diff --git a/tools/lib/api/fs/tracing_path.c b/tools/lib/api/fs/tracing_path.c
index 120037496f77..5afb11b30fca 100644
--- a/tools/lib/api/fs/tracing_path.c
+++ b/tools/lib/api/fs/tracing_path.c
@@ -36,7 +36,7 @@ static const char *tracing_path_tracefs_mount(void)
__tracing_path_set("", mnt);
- return mnt;
+ return tracing_path;
}
static const char *tracing_path_debugfs_mount(void)
@@ -49,7 +49,7 @@ static const char *tracing_path_debugfs_mount(void)
__tracing_path_set("tracing/", mnt);
- return mnt;
+ return tracing_path;
}
const char *tracing_path_mount(void)
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 13a861135127..7bc31c905018 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index d49902e818b5..425b480bda75 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
# Most of this file is copied from tools/lib/traceevent/Makefile
BPF_VERSION = 0
@@ -69,7 +69,7 @@ FEATURE_USER = .libbpf
FEATURE_TESTS = libelf libelf-mmap bpf reallocarray
FEATURE_DISPLAY = libelf bpf
-INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi -I$(srctree)/tools/perf
+INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES)
check_feat := 1
@@ -125,6 +125,7 @@ override CFLAGS += $(EXTRA_WARNINGS)
override CFLAGS += -Werror -Wall
override CFLAGS += -fPIC
override CFLAGS += $(INCLUDES)
+override CFLAGS += -fvisibility=hidden
ifeq ($(VERBOSE),1)
Q =
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 60aa4ca8b2c5..03f9bcc4ef50 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: LGPL-2.1
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* common eBPF ELF operations.
@@ -28,16 +28,8 @@
#include <linux/bpf.h>
#include "bpf.h"
#include "libbpf.h"
-#include "nlattr.h"
-#include <linux/rtnetlink.h>
-#include <linux/if_link.h>
-#include <sys/socket.h>
#include <errno.h>
-#ifndef SOL_NETLINK
-#define SOL_NETLINK 270
-#endif
-
/*
* When building perf, unistd.h is overridden. __NR_bpf is
* required to be defined explicitly.
@@ -286,6 +278,18 @@ int bpf_map_lookup_elem(int fd, const void *key, void *value)
return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
}
+int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
+{
+ union bpf_attr attr;
+
+ bzero(&attr, sizeof(attr));
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
+
+ return sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
+}
+
int bpf_map_delete_elem(int fd, const void *key)
{
union bpf_attr attr;
@@ -499,127 +503,6 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
}
-int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
-{
- struct sockaddr_nl sa;
- int sock, seq = 0, len, ret = -1;
- char buf[4096];
- struct nlattr *nla, *nla_xdp;
- struct {
- struct nlmsghdr nh;
- struct ifinfomsg ifinfo;
- char attrbuf[64];
- } req;
- struct nlmsghdr *nh;
- struct nlmsgerr *err;
- socklen_t addrlen;
- int one = 1;
-
- memset(&sa, 0, sizeof(sa));
- sa.nl_family = AF_NETLINK;
-
- sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
- if (sock < 0) {
- return -errno;
- }
-
- if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK,
- &one, sizeof(one)) < 0) {
- fprintf(stderr, "Netlink error reporting not supported\n");
- }
-
- if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
- ret = -errno;
- goto cleanup;
- }
-
- addrlen = sizeof(sa);
- if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) {
- ret = -errno;
- goto cleanup;
- }
-
- if (addrlen != sizeof(sa)) {
- ret = -LIBBPF_ERRNO__INTERNAL;
- goto cleanup;
- }
-
- memset(&req, 0, sizeof(req));
- req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
- req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
- req.nh.nlmsg_type = RTM_SETLINK;
- req.nh.nlmsg_pid = 0;
- req.nh.nlmsg_seq = ++seq;
- req.ifinfo.ifi_family = AF_UNSPEC;
- req.ifinfo.ifi_index = ifindex;
-
- /* started nested attribute for XDP */
- nla = (struct nlattr *)(((char *)&req)
- + NLMSG_ALIGN(req.nh.nlmsg_len));
- nla->nla_type = NLA_F_NESTED | IFLA_XDP;
- nla->nla_len = NLA_HDRLEN;
-
- /* add XDP fd */
- nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
- nla_xdp->nla_type = IFLA_XDP_FD;
- nla_xdp->nla_len = NLA_HDRLEN + sizeof(int);
- memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd));
- nla->nla_len += nla_xdp->nla_len;
-
- /* if user passed in any flags, add those too */
- if (flags) {
- nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
- nla_xdp->nla_type = IFLA_XDP_FLAGS;
- nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags);
- memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags));
- nla->nla_len += nla_xdp->nla_len;
- }
-
- req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
-
- if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
- ret = -errno;
- goto cleanup;
- }
-
- len = recv(sock, buf, sizeof(buf), 0);
- if (len < 0) {
- ret = -errno;
- goto cleanup;
- }
-
- for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len);
- nh = NLMSG_NEXT(nh, len)) {
- if (nh->nlmsg_pid != sa.nl_pid) {
- ret = -LIBBPF_ERRNO__WRNGPID;
- goto cleanup;
- }
- if (nh->nlmsg_seq != seq) {
- ret = -LIBBPF_ERRNO__INVSEQ;
- goto cleanup;
- }
- switch (nh->nlmsg_type) {
- case NLMSG_ERROR:
- err = (struct nlmsgerr *)NLMSG_DATA(nh);
- if (!err->error)
- continue;
- ret = err->error;
- nla_dump_errormsg(nh);
- goto cleanup;
- case NLMSG_DONE:
- break;
- default:
- break;
- }
- }
-
- ret = 0;
-
-cleanup:
- close(sock);
- return ret;
-}
-
int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
bool do_log)
{
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 6f38164b2618..26a51538213c 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: LGPL-2.1 */
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/*
* common eBPF ELF operations.
@@ -20,13 +20,17 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses>
*/
-#ifndef __BPF_BPF_H
-#define __BPF_BPF_H
+#ifndef __LIBBPF_BPF_H
+#define __LIBBPF_BPF_H
#include <linux/bpf.h>
#include <stdbool.h>
#include <stddef.h>
+#ifndef LIBBPF_API
+#define LIBBPF_API __attribute__((visibility("default")))
+#endif
+
struct bpf_create_map_attr {
const char *name;
enum bpf_map_type map_type;
@@ -42,21 +46,24 @@ struct bpf_create_map_attr {
__u32 inner_map_fd;
};
-int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr);
-int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
- int key_size, int value_size, int max_entries,
- __u32 map_flags, int node);
-int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
- int key_size, int value_size, int max_entries,
- __u32 map_flags);
-int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
- int max_entries, __u32 map_flags);
-int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
- int key_size, int inner_map_fd, int max_entries,
- __u32 map_flags, int node);
-int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
- int key_size, int inner_map_fd, int max_entries,
- __u32 map_flags);
+LIBBPF_API int
+bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr);
+LIBBPF_API int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
+ int key_size, int value_size,
+ int max_entries, __u32 map_flags, int node);
+LIBBPF_API int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
+ int key_size, int value_size,
+ int max_entries, __u32 map_flags);
+LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size,
+ int value_size, int max_entries, __u32 map_flags);
+LIBBPF_API int bpf_create_map_in_map_node(enum bpf_map_type map_type,
+ const char *name, int key_size,
+ int inner_map_fd, int max_entries,
+ __u32 map_flags, int node);
+LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type,
+ const char *name, int key_size,
+ int inner_map_fd, int max_entries,
+ __u32 map_flags);
struct bpf_load_program_attr {
enum bpf_prog_type prog_type;
@@ -69,46 +76,56 @@ struct bpf_load_program_attr {
__u32 prog_ifindex;
};
+/* Flags to direct loading requirements */
+#define MAPS_RELAX_COMPAT 0x01
+
/* Recommend log buffer size */
#define BPF_LOG_BUF_SIZE (256 * 1024)
-int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
- char *log_buf, size_t log_buf_sz);
-int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
- size_t insns_cnt, const char *license,
- __u32 kern_version, char *log_buf,
- size_t log_buf_sz);
-int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
- size_t insns_cnt, int strict_alignment,
- const char *license, __u32 kern_version,
- char *log_buf, size_t log_buf_sz, int log_level);
+LIBBPF_API int
+bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
+ char *log_buf, size_t log_buf_sz);
+LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
+ const struct bpf_insn *insns, size_t insns_cnt,
+ const char *license, __u32 kern_version,
+ char *log_buf, size_t log_buf_sz);
+LIBBPF_API int bpf_verify_program(enum bpf_prog_type type,
+ const struct bpf_insn *insns,
+ size_t insns_cnt, int strict_alignment,
+ const char *license, __u32 kern_version,
+ char *log_buf, size_t log_buf_sz,
+ int log_level);
-int bpf_map_update_elem(int fd, const void *key, const void *value,
- __u64 flags);
+LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
+ __u64 flags);
-int bpf_map_lookup_elem(int fd, const void *key, void *value);
-int bpf_map_delete_elem(int fd, const void *key);
-int bpf_map_get_next_key(int fd, const void *key, void *next_key);
-int bpf_obj_pin(int fd, const char *pathname);
-int bpf_obj_get(const char *pathname);
-int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
- unsigned int flags);
-int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
-int bpf_prog_detach2(int prog_fd, int attachable_fd, enum bpf_attach_type type);
-int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
- void *data_out, __u32 *size_out, __u32 *retval,
- __u32 *duration);
-int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
-int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
-int bpf_prog_get_fd_by_id(__u32 id);
-int bpf_map_get_fd_by_id(__u32 id);
-int bpf_btf_get_fd_by_id(__u32 id);
-int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len);
-int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
- __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt);
-int bpf_raw_tracepoint_open(const char *name, int prog_fd);
-int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
- bool do_log);
-int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
- __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
- __u64 *probe_addr);
-#endif
+LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value);
+LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key,
+ void *value);
+LIBBPF_API int bpf_map_delete_elem(int fd, const void *key);
+LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key);
+LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
+LIBBPF_API int bpf_obj_get(const char *pathname);
+LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
+ enum bpf_attach_type type, unsigned int flags);
+LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
+LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
+ enum bpf_attach_type type);
+LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data,
+ __u32 size, void *data_out, __u32 *size_out,
+ __u32 *retval, __u32 *duration);
+LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
+LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
+LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
+LIBBPF_API int bpf_map_get_fd_by_id(__u32 id);
+LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id);
+LIBBPF_API int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len);
+LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
+ __u32 query_flags, __u32 *attach_flags,
+ __u32 *prog_ids, __u32 *prog_cnt);
+LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
+LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf,
+ __u32 log_buf_size, bool do_log);
+LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
+ __u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
+ __u64 *probe_offset, __u64 *probe_addr);
+#endif /* __LIBBPF_BPF_H */
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index cf94b0770522..449591aa9900 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: LGPL-2.1
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
#include <stdlib.h>
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 4897e0724d4e..b77e7080f7e7 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -1,11 +1,15 @@
-/* SPDX-License-Identifier: LGPL-2.1 */
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/* Copyright (c) 2018 Facebook */
-#ifndef __BPF_BTF_H
-#define __BPF_BTF_H
+#ifndef __LIBBPF_BTF_H
+#define __LIBBPF_BTF_H
#include <linux/types.h>
+#ifndef LIBBPF_API
+#define LIBBPF_API __attribute__((visibility("default")))
+#endif
+
#define BTF_ELF_SEC ".BTF"
struct btf;
@@ -14,13 +18,15 @@ struct btf_type;
typedef int (*btf_print_fn_t)(const char *, ...)
__attribute__((format(printf, 1, 2)));
-void btf__free(struct btf *btf);
-struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
-__s32 btf__find_by_name(const struct btf *btf, const char *type_name);
-const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id);
-__s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
-int btf__resolve_type(const struct btf *btf, __u32 type_id);
-int btf__fd(const struct btf *btf);
-const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
+LIBBPF_API void btf__free(struct btf *btf);
+LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
+LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
+ const char *type_name);
+LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
+ __u32 id);
+LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
+LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
+LIBBPF_API int btf__fd(const struct btf *btf);
+LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
-#endif
+#endif /* __LIBBPF_BTF_H */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 2abd0f112627..b607be7236d3 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: LGPL-2.1
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* Common eBPF ELF object loading operations.
@@ -7,19 +7,6 @@
* Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
* Copyright (C) 2015 Huawei Inc.
* Copyright (C) 2017 Nicira, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses>
*/
#define _GNU_SOURCE
@@ -32,7 +19,6 @@
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
-#include <perf-sys.h>
#include <asm/unistd.h>
#include <linux/err.h>
#include <linux/kernel.h>
@@ -40,6 +26,8 @@
#include <linux/btf.h>
#include <linux/list.h>
#include <linux/limits.h>
+#include <linux/perf_event.h>
+#include <linux/ring_buffer.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
@@ -50,6 +38,7 @@
#include "libbpf.h"
#include "bpf.h"
#include "btf.h"
+#include "str_error.h"
#ifndef EM_BPF
#define EM_BPF 247
@@ -181,7 +170,7 @@ static LIST_HEAD(bpf_objects_list);
struct bpf_object {
char license[64];
- u32 kern_version;
+ __u32 kern_version;
struct bpf_program *programs;
size_t nr_programs;
@@ -227,7 +216,7 @@ struct bpf_object {
};
#define obj_elf_valid(o) ((o)->efile.elf)
-static void bpf_program__unload(struct bpf_program *prog)
+void bpf_program__unload(struct bpf_program *prog)
{
int i;
@@ -469,7 +458,8 @@ static int bpf_object__elf_init(struct bpf_object *obj)
obj->efile.fd = open(obj->path, O_RDONLY);
if (obj->efile.fd < 0) {
char errmsg[STRERR_BUFSIZE];
- char *cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ char *cp = libbpf_strerror_r(errno, errmsg,
+ sizeof(errmsg));
pr_warning("failed to open %s: %s\n", obj->path, cp);
return -errno;
@@ -551,7 +541,7 @@ static int
bpf_object__init_kversion(struct bpf_object *obj,
void *data, size_t size)
{
- u32 kver;
+ __u32 kver;
if (size != sizeof(kver)) {
pr_warning("invalid kver section in %s\n", obj->path);
@@ -573,8 +563,9 @@ static int compare_bpf_map(const void *_a, const void *_b)
}
static int
-bpf_object__init_maps(struct bpf_object *obj)
+bpf_object__init_maps(struct bpf_object *obj, int flags)
{
+ bool strict = !(flags & MAPS_RELAX_COMPAT);
int i, map_idx, map_def_sz, nr_maps = 0;
Elf_Scn *scn;
Elf_Data *data;
@@ -696,7 +687,8 @@ bpf_object__init_maps(struct bpf_object *obj)
"has unrecognized, non-zero "
"options\n",
obj->path, map_name);
- return -EINVAL;
+ if (strict)
+ return -EINVAL;
}
}
memcpy(&obj->maps[map_idx].def, def,
@@ -727,7 +719,7 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
return false;
}
-static int bpf_object__elf_collect(struct bpf_object *obj)
+static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
{
Elf *elf = obj->efile.elf;
GElf_Ehdr *ep = &obj->efile.ehdr;
@@ -810,8 +802,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
data->d_size, name, idx);
if (err) {
char errmsg[STRERR_BUFSIZE];
- char *cp = strerror_r(-err, errmsg,
- sizeof(errmsg));
+ char *cp = libbpf_strerror_r(-err, errmsg,
+ sizeof(errmsg));
pr_warning("failed to alloc program %s (%s): %s",
name, obj->path, cp);
@@ -854,7 +846,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
return LIBBPF_ERRNO__FORMAT;
}
if (obj->efile.maps_shndx >= 0) {
- err = bpf_object__init_maps(obj);
+ err = bpf_object__init_maps(obj, flags);
if (err)
goto out;
}
@@ -1140,7 +1132,7 @@ bpf_object__create_maps(struct bpf_object *obj)
*pfd = bpf_create_map_xattr(&create_attr);
if (*pfd < 0 && create_attr.btf_key_type_id) {
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
map->name, cp, errno);
create_attr.btf_fd = 0;
@@ -1155,7 +1147,7 @@ bpf_object__create_maps(struct bpf_object *obj)
size_t j;
err = *pfd;
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warning("failed to create map (name: '%s'): %s\n",
map->name, cp);
for (j = 0; j < i; j++)
@@ -1306,7 +1298,7 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
static int
load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
const char *name, struct bpf_insn *insns, int insns_cnt,
- char *license, u32 kern_version, int *pfd, int prog_ifindex)
+ char *license, __u32 kern_version, int *pfd, int prog_ifindex)
{
struct bpf_load_program_attr load_attr;
char *cp, errmsg[STRERR_BUFSIZE];
@@ -1339,7 +1331,7 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
}
ret = -LIBBPF_ERRNO__LOAD;
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warning("load bpf program failed: %s\n", cp);
if (log_buf && log_buf[0] != '\0') {
@@ -1375,9 +1367,9 @@ out:
return ret;
}
-static int
+int
bpf_program__load(struct bpf_program *prog,
- char *license, u32 kern_version)
+ char *license, __u32 kern_version)
{
int err = 0, fd, i;
@@ -1502,6 +1494,7 @@ static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
case BPF_PROG_TYPE_LIRC_MODE2:
case BPF_PROG_TYPE_SK_REUSEPORT:
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
return false;
case BPF_PROG_TYPE_UNSPEC:
case BPF_PROG_TYPE_KPROBE:
@@ -1525,7 +1518,7 @@ static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
static struct bpf_object *
__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
- bool needs_kver)
+ bool needs_kver, int flags)
{
struct bpf_object *obj;
int err;
@@ -1541,7 +1534,7 @@ __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
CHECK_ERR(bpf_object__elf_init(obj), err, out);
CHECK_ERR(bpf_object__check_endianness(obj), err, out);
- CHECK_ERR(bpf_object__elf_collect(obj), err, out);
+ CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
@@ -1552,7 +1545,8 @@ out:
return ERR_PTR(err);
}
-struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
+struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
+ int flags)
{
/* param validation */
if (!attr->file)
@@ -1561,7 +1555,13 @@ struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
pr_debug("loading %s\n", attr->file);
return __bpf_object__open(attr->file, NULL, 0,
- bpf_prog_type__needs_kver(attr->prog_type));
+ bpf_prog_type__needs_kver(attr->prog_type),
+ flags);
+}
+
+struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
+{
+ return __bpf_object__open_xattr(attr, 0);
}
struct bpf_object *bpf_object__open(const char *path)
@@ -1594,7 +1594,7 @@ struct bpf_object *bpf_object__open_buffer(void *obj_buf,
pr_debug("loading object '%s' from buffer\n",
name);
- return __bpf_object__open(name, obj_buf, obj_buf_sz, true);
+ return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
}
int bpf_object__unload(struct bpf_object *obj)
@@ -1654,7 +1654,7 @@ static int check_path(const char *path)
dir = dirname(dname);
if (statfs(dir, &st_fs)) {
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warning("failed to statfs %s: %s\n", dir, cp);
err = -errno;
}
@@ -1690,7 +1690,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
}
if (bpf_obj_pin(prog->instances.fds[instance], path)) {
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warning("failed to pin program: %s\n", cp);
return -errno;
}
@@ -1708,7 +1708,7 @@ static int make_dir(const char *path)
err = -errno;
if (err) {
- cp = strerror_r(-err, errmsg, sizeof(errmsg));
+ cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
pr_warning("failed to mkdir %s: %s\n", path, cp);
}
return err;
@@ -1770,7 +1770,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
}
if (bpf_obj_pin(map->fd, path)) {
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warning("failed to pin map: %s\n", cp);
return -errno;
}
@@ -2084,57 +2084,90 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
prog->expected_attach_type = type;
}
-#define BPF_PROG_SEC_FULL(string, ptype, atype) \
- { string, sizeof(string) - 1, ptype, atype }
+#define BPF_PROG_SEC_IMPL(string, ptype, eatype, atype) \
+ { string, sizeof(string) - 1, ptype, eatype, atype }
-#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_FULL(string, ptype, 0)
+/* Programs that can NOT be attached. */
+#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, -EINVAL)
-#define BPF_S_PROG_SEC(string, ptype) \
- BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK, ptype)
+/* Programs that can be attached. */
+#define BPF_APROG_SEC(string, ptype, atype) \
+ BPF_PROG_SEC_IMPL(string, ptype, 0, atype)
-#define BPF_SA_PROG_SEC(string, ptype) \
- BPF_PROG_SEC_FULL(string, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, ptype)
+/* Programs that must specify expected attach type at load time. */
+#define BPF_EAPROG_SEC(string, ptype, eatype) \
+ BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype)
+
+/* Programs that can be attached but attach type can't be identified by section
+ * name. Kept for backward compatibility.
+ */
+#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
static const struct {
const char *sec;
size_t len;
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
+ enum bpf_attach_type attach_type;
} section_names[] = {
- BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
- BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
- BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
- BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
- BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
- BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
- BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
- BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
- BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
- BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
- BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK),
- BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE),
- BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
- BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
- BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
- BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
- BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
- BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
- BPF_PROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG),
- BPF_PROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2),
- BPF_SA_PROG_SEC("cgroup/bind4", BPF_CGROUP_INET4_BIND),
- BPF_SA_PROG_SEC("cgroup/bind6", BPF_CGROUP_INET6_BIND),
- BPF_SA_PROG_SEC("cgroup/connect4", BPF_CGROUP_INET4_CONNECT),
- BPF_SA_PROG_SEC("cgroup/connect6", BPF_CGROUP_INET6_CONNECT),
- BPF_SA_PROG_SEC("cgroup/sendmsg4", BPF_CGROUP_UDP4_SENDMSG),
- BPF_SA_PROG_SEC("cgroup/sendmsg6", BPF_CGROUP_UDP6_SENDMSG),
- BPF_S_PROG_SEC("cgroup/post_bind4", BPF_CGROUP_INET4_POST_BIND),
- BPF_S_PROG_SEC("cgroup/post_bind6", BPF_CGROUP_INET6_POST_BIND),
+ BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
+ BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
+ BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
+ BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
+ BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
+ BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
+ BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
+ BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
+ BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
+ BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
+ BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
+ BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
+ BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
+ BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
+ BPF_CGROUP_INET_INGRESS),
+ BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
+ BPF_CGROUP_INET_EGRESS),
+ BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
+ BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
+ BPF_CGROUP_INET_SOCK_CREATE),
+ BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
+ BPF_CGROUP_INET4_POST_BIND),
+ BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
+ BPF_CGROUP_INET6_POST_BIND),
+ BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
+ BPF_CGROUP_DEVICE),
+ BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
+ BPF_CGROUP_SOCK_OPS),
+ BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
+ BPF_SK_SKB_STREAM_PARSER),
+ BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
+ BPF_SK_SKB_STREAM_VERDICT),
+ BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
+ BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
+ BPF_SK_MSG_VERDICT),
+ BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
+ BPF_LIRC_MODE2),
+ BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
+ BPF_FLOW_DISSECTOR),
+ BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ BPF_CGROUP_INET4_BIND),
+ BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ BPF_CGROUP_INET6_BIND),
+ BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ BPF_CGROUP_INET4_CONNECT),
+ BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ BPF_CGROUP_INET6_CONNECT),
+ BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ BPF_CGROUP_UDP4_SENDMSG),
+ BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ BPF_CGROUP_UDP6_SENDMSG),
};
+#undef BPF_PROG_SEC_IMPL
#undef BPF_PROG_SEC
-#undef BPF_PROG_SEC_FULL
-#undef BPF_S_PROG_SEC
-#undef BPF_SA_PROG_SEC
+#undef BPF_APROG_SEC
+#undef BPF_EAPROG_SEC
+#undef BPF_APROG_COMPAT
int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
enum bpf_attach_type *expected_attach_type)
@@ -2154,6 +2187,25 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
return -EINVAL;
}
+int libbpf_attach_type_by_name(const char *name,
+ enum bpf_attach_type *attach_type)
+{
+ int i;
+
+ if (!name)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(section_names); i++) {
+ if (strncmp(name, section_names[i].sec, section_names[i].len))
+ continue;
+ if (section_names[i].attach_type == -EINVAL)
+ return -EINVAL;
+ *attach_type = section_names[i].attach_type;
+ return 0;
+ }
+ return -EINVAL;
+}
+
static int
bpf_program__identify_section(struct bpf_program *prog,
enum bpf_prog_type *prog_type,
@@ -2336,7 +2388,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
bpf_program__set_expected_attach_type(prog,
expected_attach_type);
- if (!bpf_program__is_function_storage(prog, obj) && !first_prog)
+ if (!first_prog)
first_prog = prog;
}
@@ -2363,61 +2415,49 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
}
enum bpf_perf_event_ret
-bpf_perf_event_read_simple(void *mem, unsigned long size,
- unsigned long page_size, void **buf, size_t *buf_len,
- bpf_perf_event_print_t fn, void *priv)
+bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
+ void **copy_mem, size_t *copy_size,
+ bpf_perf_event_print_t fn, void *private_data)
{
- volatile struct perf_event_mmap_page *header = mem;
+ struct perf_event_mmap_page *header = mmap_mem;
+ __u64 data_head = ring_buffer_read_head(header);
__u64 data_tail = header->data_tail;
- __u64 data_head = header->data_head;
- int ret = LIBBPF_PERF_EVENT_ERROR;
- void *base, *begin, *end;
-
- asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
- if (data_head == data_tail)
- return LIBBPF_PERF_EVENT_CONT;
-
- base = ((char *)header) + page_size;
-
- begin = base + data_tail % size;
- end = base + data_head % size;
-
- while (begin != end) {
- struct perf_event_header *ehdr;
-
- ehdr = begin;
- if (begin + ehdr->size > base + size) {
- long len = base + size - begin;
-
- if (*buf_len < ehdr->size) {
- free(*buf);
- *buf = malloc(ehdr->size);
- if (!*buf) {
+ void *base = ((__u8 *)header) + page_size;
+ int ret = LIBBPF_PERF_EVENT_CONT;
+ struct perf_event_header *ehdr;
+ size_t ehdr_size;
+
+ while (data_head != data_tail) {
+ ehdr = base + (data_tail & (mmap_size - 1));
+ ehdr_size = ehdr->size;
+
+ if (((void *)ehdr) + ehdr_size > base + mmap_size) {
+ void *copy_start = ehdr;
+ size_t len_first = base + mmap_size - copy_start;
+ size_t len_secnd = ehdr_size - len_first;
+
+ if (*copy_size < ehdr_size) {
+ free(*copy_mem);
+ *copy_mem = malloc(ehdr_size);
+ if (!*copy_mem) {
+ *copy_size = 0;
ret = LIBBPF_PERF_EVENT_ERROR;
break;
}
- *buf_len = ehdr->size;
+ *copy_size = ehdr_size;
}
- memcpy(*buf, begin, len);
- memcpy(*buf + len, base, ehdr->size - len);
- ehdr = (void *)*buf;
- begin = base + ehdr->size - len;
- } else if (begin + ehdr->size == base + size) {
- begin = base;
- } else {
- begin += ehdr->size;
+ memcpy(*copy_mem, copy_start, len_first);
+ memcpy(*copy_mem + len_first, base, len_secnd);
+ ehdr = *copy_mem;
}
- ret = fn(ehdr, priv);
+ ret = fn(ehdr, private_data);
+ data_tail += ehdr_size;
if (ret != LIBBPF_PERF_EVENT_CONT)
break;
-
- data_tail += ehdr->size;
}
- __sync_synchronize(); /* smp_mb() */
- header->data_tail = data_tail;
-
+ ring_buffer_write_tail(header, data_tail);
return ret;
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 96c55fac54c3..1f3468dad8b2 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: LGPL-2.1 */
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/*
* Common eBPF ELF object loading operations.
@@ -6,22 +6,9 @@
* Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
* Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
* Copyright (C) 2015 Huawei Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses>
*/
-#ifndef __BPF_LIBBPF_H
-#define __BPF_LIBBPF_H
+#ifndef __LIBBPF_LIBBPF_H
+#define __LIBBPF_LIBBPF_H
#include <stdio.h>
#include <stdint.h>
@@ -29,6 +16,10 @@
#include <sys/types.h> // for size_t
#include <linux/bpf.h>
+#ifndef LIBBPF_API
+#define LIBBPF_API __attribute__((visibility("default")))
+#endif
+
enum libbpf_errno {
__LIBBPF_ERRNO__START = 4000,
@@ -46,10 +37,11 @@ enum libbpf_errno {
LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */
LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */
LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */
+ LIBBPF_ERRNO__NLPARSE, /* netlink parsing error */
__LIBBPF_ERRNO__END,
};
-int libbpf_strerror(int err, char *buf, size_t size);
+LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
/*
* __printf is defined in include/linux/compiler-gcc.h. However,
@@ -59,9 +51,9 @@ int libbpf_strerror(int err, char *buf, size_t size);
typedef int (*libbpf_print_fn_t)(const char *, ...)
__attribute__((format(printf, 1, 2)));
-void libbpf_set_print(libbpf_print_fn_t warn,
- libbpf_print_fn_t info,
- libbpf_print_fn_t debug);
+LIBBPF_API void libbpf_set_print(libbpf_print_fn_t warn,
+ libbpf_print_fn_t info,
+ libbpf_print_fn_t debug);
/* Hide internal to user */
struct bpf_object;
@@ -71,25 +63,28 @@ struct bpf_object_open_attr {
enum bpf_prog_type prog_type;
};
-struct bpf_object *bpf_object__open(const char *path);
-struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr);
-struct bpf_object *bpf_object__open_buffer(void *obj_buf,
- size_t obj_buf_sz,
- const char *name);
-int bpf_object__pin(struct bpf_object *object, const char *path);
-void bpf_object__close(struct bpf_object *object);
+LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
+LIBBPF_API struct bpf_object *
+bpf_object__open_xattr(struct bpf_object_open_attr *attr);
+struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
+ int flags);
+LIBBPF_API struct bpf_object *bpf_object__open_buffer(void *obj_buf,
+ size_t obj_buf_sz,
+ const char *name);
+LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
+LIBBPF_API void bpf_object__close(struct bpf_object *object);
/* Load/unload object into/from kernel */
-int bpf_object__load(struct bpf_object *obj);
-int bpf_object__unload(struct bpf_object *obj);
-const char *bpf_object__name(struct bpf_object *obj);
-unsigned int bpf_object__kversion(struct bpf_object *obj);
-int bpf_object__btf_fd(const struct bpf_object *obj);
+LIBBPF_API int bpf_object__load(struct bpf_object *obj);
+LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
+LIBBPF_API const char *bpf_object__name(struct bpf_object *obj);
+LIBBPF_API unsigned int bpf_object__kversion(struct bpf_object *obj);
+LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
-struct bpf_program *
+LIBBPF_API struct bpf_program *
bpf_object__find_program_by_title(struct bpf_object *obj, const char *title);
-struct bpf_object *bpf_object__next(struct bpf_object *prev);
+LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev);
#define bpf_object__for_each_safe(pos, tmp) \
for ((pos) = bpf_object__next(NULL), \
(tmp) = bpf_object__next(pos); \
@@ -97,17 +92,20 @@ struct bpf_object *bpf_object__next(struct bpf_object *prev);
(pos) = (tmp), (tmp) = bpf_object__next(tmp))
typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *);
-int bpf_object__set_priv(struct bpf_object *obj, void *priv,
- bpf_object_clear_priv_t clear_priv);
-void *bpf_object__priv(struct bpf_object *prog);
+LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv,
+ bpf_object_clear_priv_t clear_priv);
+LIBBPF_API void *bpf_object__priv(struct bpf_object *prog);
-int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
- enum bpf_attach_type *expected_attach_type);
+LIBBPF_API int
+libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
+ enum bpf_attach_type *expected_attach_type);
+LIBBPF_API int libbpf_attach_type_by_name(const char *name,
+ enum bpf_attach_type *attach_type);
/* Accessors of bpf_program */
struct bpf_program;
-struct bpf_program *bpf_program__next(struct bpf_program *prog,
- struct bpf_object *obj);
+LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog,
+ struct bpf_object *obj);
#define bpf_object__for_each_program(pos, obj) \
for ((pos) = bpf_program__next(NULL, (obj)); \
@@ -117,18 +115,24 @@ struct bpf_program *bpf_program__next(struct bpf_program *prog,
typedef void (*bpf_program_clear_priv_t)(struct bpf_program *,
void *);
-int bpf_program__set_priv(struct bpf_program *prog, void *priv,
- bpf_program_clear_priv_t clear_priv);
+LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv,
+ bpf_program_clear_priv_t clear_priv);
-void *bpf_program__priv(struct bpf_program *prog);
-void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex);
+LIBBPF_API void *bpf_program__priv(struct bpf_program *prog);
+LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
+ __u32 ifindex);
-const char *bpf_program__title(struct bpf_program *prog, bool needs_copy);
+LIBBPF_API const char *bpf_program__title(struct bpf_program *prog,
+ bool needs_copy);
-int bpf_program__fd(struct bpf_program *prog);
-int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
- int instance);
-int bpf_program__pin(struct bpf_program *prog, const char *path);
+LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license,
+ __u32 kern_version);
+LIBBPF_API int bpf_program__fd(struct bpf_program *prog);
+LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
+ const char *path,
+ int instance);
+LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path);
+LIBBPF_API void bpf_program__unload(struct bpf_program *prog);
struct bpf_insn;
@@ -189,34 +193,36 @@ typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
struct bpf_insn *insns, int insns_cnt,
struct bpf_prog_prep_result *res);
-int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
- bpf_program_prep_t prep);
+LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
+ bpf_program_prep_t prep);
-int bpf_program__nth_fd(struct bpf_program *prog, int n);
+LIBBPF_API int bpf_program__nth_fd(struct bpf_program *prog, int n);
/*
* Adjust type of BPF program. Default is kprobe.
*/
-int bpf_program__set_socket_filter(struct bpf_program *prog);
-int bpf_program__set_tracepoint(struct bpf_program *prog);
-int bpf_program__set_raw_tracepoint(struct bpf_program *prog);
-int bpf_program__set_kprobe(struct bpf_program *prog);
-int bpf_program__set_sched_cls(struct bpf_program *prog);
-int bpf_program__set_sched_act(struct bpf_program *prog);
-int bpf_program__set_xdp(struct bpf_program *prog);
-int bpf_program__set_perf_event(struct bpf_program *prog);
-void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type);
-void bpf_program__set_expected_attach_type(struct bpf_program *prog,
- enum bpf_attach_type type);
-
-bool bpf_program__is_socket_filter(struct bpf_program *prog);
-bool bpf_program__is_tracepoint(struct bpf_program *prog);
-bool bpf_program__is_raw_tracepoint(struct bpf_program *prog);
-bool bpf_program__is_kprobe(struct bpf_program *prog);
-bool bpf_program__is_sched_cls(struct bpf_program *prog);
-bool bpf_program__is_sched_act(struct bpf_program *prog);
-bool bpf_program__is_xdp(struct bpf_program *prog);
-bool bpf_program__is_perf_event(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
+LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
+ enum bpf_prog_type type);
+LIBBPF_API void
+bpf_program__set_expected_attach_type(struct bpf_program *prog,
+ enum bpf_attach_type type);
+
+LIBBPF_API bool bpf_program__is_socket_filter(struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_tracepoint(struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_raw_tracepoint(struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_kprobe(struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_sched_cls(struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_sched_act(struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_xdp(struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_perf_event(struct bpf_program *prog);
/*
* No need for __attribute__((packed)), all members of 'bpf_map_def'
@@ -237,39 +243,39 @@ struct bpf_map_def {
* so no need to worry about a name clash.
*/
struct bpf_map;
-struct bpf_map *
+LIBBPF_API struct bpf_map *
bpf_object__find_map_by_name(struct bpf_object *obj, const char *name);
/*
* Get bpf_map through the offset of corresponding struct bpf_map_def
* in the BPF object file.
*/
-struct bpf_map *
+LIBBPF_API struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
-struct bpf_map *
+LIBBPF_API struct bpf_map *
bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
#define bpf_map__for_each(pos, obj) \
for ((pos) = bpf_map__next(NULL, (obj)); \
(pos) != NULL; \
(pos) = bpf_map__next((pos), (obj)))
-int bpf_map__fd(struct bpf_map *map);
-const struct bpf_map_def *bpf_map__def(struct bpf_map *map);
-const char *bpf_map__name(struct bpf_map *map);
-__u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
-__u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
+LIBBPF_API int bpf_map__fd(struct bpf_map *map);
+LIBBPF_API const struct bpf_map_def *bpf_map__def(struct bpf_map *map);
+LIBBPF_API const char *bpf_map__name(struct bpf_map *map);
+LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
+LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
-int bpf_map__set_priv(struct bpf_map *map, void *priv,
- bpf_map_clear_priv_t clear_priv);
-void *bpf_map__priv(struct bpf_map *map);
-int bpf_map__reuse_fd(struct bpf_map *map, int fd);
-bool bpf_map__is_offload_neutral(struct bpf_map *map);
-void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
-int bpf_map__pin(struct bpf_map *map, const char *path);
+LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
+ bpf_map_clear_priv_t clear_priv);
+LIBBPF_API void *bpf_map__priv(struct bpf_map *map);
+LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
+LIBBPF_API bool bpf_map__is_offload_neutral(struct bpf_map *map);
+LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
+LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
-long libbpf_get_error(const void *ptr);
+LIBBPF_API long libbpf_get_error(const void *ptr);
struct bpf_prog_load_attr {
const char *file;
@@ -278,12 +284,12 @@ struct bpf_prog_load_attr {
int ifindex;
};
-int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
- struct bpf_object **pobj, int *prog_fd);
-int bpf_prog_load(const char *file, enum bpf_prog_type type,
- struct bpf_object **pobj, int *prog_fd);
+LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
+ struct bpf_object **pobj, int *prog_fd);
+LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
+ struct bpf_object **pobj, int *prog_fd);
-int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
+LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
enum bpf_perf_event_ret {
LIBBPF_PERF_EVENT_DONE = 0,
@@ -291,10 +297,24 @@ enum bpf_perf_event_ret {
LIBBPF_PERF_EVENT_CONT = -2,
};
-typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(void *event,
- void *priv);
-int bpf_perf_event_read_simple(void *mem, unsigned long size,
- unsigned long page_size,
- void **buf, size_t *buf_len,
- bpf_perf_event_print_t fn, void *priv);
-#endif
+struct perf_event_header;
+typedef enum bpf_perf_event_ret
+ (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
+ void *private_data);
+LIBBPF_API enum bpf_perf_event_ret
+bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
+ void **copy_mem, size_t *copy_size,
+ bpf_perf_event_print_t fn, void *private_data);
+
+struct nlattr;
+typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
+int libbpf_netlink_open(unsigned int *nl_pid);
+int libbpf_nl_get_link(int sock, unsigned int nl_pid,
+ libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie);
+int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex,
+ libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie);
+int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
+ libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie);
+int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
+ libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie);
+#endif /* __LIBBPF_LIBBPF_H */
diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c
index d9ba851bd7f9..d83b17f8435c 100644
--- a/tools/lib/bpf/libbpf_errno.c
+++ b/tools/lib/bpf/libbpf_errno.c
@@ -1,23 +1,10 @@
-// SPDX-License-Identifier: LGPL-2.1
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
* Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
* Copyright (C) 2015 Huawei Inc.
* Copyright (C) 2017 Nicira, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses>
*/
#include <stdio.h>
@@ -42,6 +29,7 @@ static const char *libbpf_strerror_table[NR_ERRNO] = {
[ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
[ERRCODE_OFFSET(WRNGPID)] = "Wrong pid in netlink message",
[ERRCODE_OFFSET(INVSEQ)] = "Invalid netlink sequence",
+ [ERRCODE_OFFSET(NLPARSE)] = "Incorrect netlink message parsing",
};
int libbpf_strerror(int err, char *buf, size_t size)
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
new file mode 100644
index 000000000000..0ce67aea8f3b
--- /dev/null
+++ b/tools/lib/bpf/netlink.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2018 Facebook */
+
+#include <stdlib.h>
+#include <memory.h>
+#include <unistd.h>
+#include <linux/bpf.h>
+#include <linux/rtnetlink.h>
+#include <sys/socket.h>
+#include <errno.h>
+#include <time.h>
+
+#include "bpf.h"
+#include "libbpf.h"
+#include "nlattr.h"
+
+#ifndef SOL_NETLINK
+#define SOL_NETLINK 270
+#endif
+
+typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t,
+ void *cookie);
+
+int libbpf_netlink_open(__u32 *nl_pid)
+{
+ struct sockaddr_nl sa;
+ socklen_t addrlen;
+ int one = 1, ret;
+ int sock;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.nl_family = AF_NETLINK;
+
+ sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (sock < 0)
+ return -errno;
+
+ if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK,
+ &one, sizeof(one)) < 0) {
+ fprintf(stderr, "Netlink error reporting not supported\n");
+ }
+
+ if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ addrlen = sizeof(sa);
+ if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ if (addrlen != sizeof(sa)) {
+ ret = -LIBBPF_ERRNO__INTERNAL;
+ goto cleanup;
+ }
+
+ *nl_pid = sa.nl_pid;
+ return sock;
+
+cleanup:
+ close(sock);
+ return ret;
+}
+
+static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
+ __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn,
+ void *cookie)
+{
+ bool multipart = true;
+ struct nlmsgerr *err;
+ struct nlmsghdr *nh;
+ char buf[4096];
+ int len, ret;
+
+ while (multipart) {
+ multipart = false;
+ len = recv(sock, buf, sizeof(buf), 0);
+ if (len < 0) {
+ ret = -errno;
+ goto done;
+ }
+
+ if (len == 0)
+ break;
+
+ for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len);
+ nh = NLMSG_NEXT(nh, len)) {
+ if (nh->nlmsg_pid != nl_pid) {
+ ret = -LIBBPF_ERRNO__WRNGPID;
+ goto done;
+ }
+ if (nh->nlmsg_seq != seq) {
+ ret = -LIBBPF_ERRNO__INVSEQ;
+ goto done;
+ }
+ if (nh->nlmsg_flags & NLM_F_MULTI)
+ multipart = true;
+ switch (nh->nlmsg_type) {
+ case NLMSG_ERROR:
+ err = (struct nlmsgerr *)NLMSG_DATA(nh);
+ if (!err->error)
+ continue;
+ ret = err->error;
+ libbpf_nla_dump_errormsg(nh);
+ goto done;
+ case NLMSG_DONE:
+ return 0;
+ default:
+ break;
+ }
+ if (_fn) {
+ ret = _fn(nh, fn, cookie);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ ret = 0;
+done:
+ return ret;
+}
+
+int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
+{
+ int sock, seq = 0, ret;
+ struct nlattr *nla, *nla_xdp;
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg ifinfo;
+ char attrbuf[64];
+ } req;
+ __u32 nl_pid;
+
+ sock = libbpf_netlink_open(&nl_pid);
+ if (sock < 0)
+ return sock;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_type = RTM_SETLINK;
+ req.nh.nlmsg_pid = 0;
+ req.nh.nlmsg_seq = ++seq;
+ req.ifinfo.ifi_family = AF_UNSPEC;
+ req.ifinfo.ifi_index = ifindex;
+
+ /* started nested attribute for XDP */
+ nla = (struct nlattr *)(((char *)&req)
+ + NLMSG_ALIGN(req.nh.nlmsg_len));
+ nla->nla_type = NLA_F_NESTED | IFLA_XDP;
+ nla->nla_len = NLA_HDRLEN;
+
+ /* add XDP fd */
+ nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
+ nla_xdp->nla_type = IFLA_XDP_FD;
+ nla_xdp->nla_len = NLA_HDRLEN + sizeof(int);
+ memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd));
+ nla->nla_len += nla_xdp->nla_len;
+
+ /* if user passed in any flags, add those too */
+ if (flags) {
+ nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
+ nla_xdp->nla_type = IFLA_XDP_FLAGS;
+ nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags);
+ memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags));
+ nla->nla_len += nla_xdp->nla_len;
+ }
+
+ req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
+
+ if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+ ret = bpf_netlink_recv(sock, nl_pid, seq, NULL, NULL, NULL);
+
+cleanup:
+ close(sock);
+ return ret;
+}
+
+static int __dump_link_nlmsg(struct nlmsghdr *nlh,
+ libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
+{
+ struct nlattr *tb[IFLA_MAX + 1], *attr;
+ struct ifinfomsg *ifi = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi));
+ attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi)));
+ if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_link_nlmsg(cookie, ifi, tb);
+}
+
+int libbpf_nl_get_link(int sock, unsigned int nl_pid,
+ libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct ifinfomsg ifm;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlh.nlmsg_type = RTM_GETLINK,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .ifm.ifi_family = AF_PACKET,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return bpf_netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg,
+ dump_link_nlmsg, cookie);
+}
+
+static int __dump_class_nlmsg(struct nlmsghdr *nlh,
+ libbpf_dump_nlmsg_t dump_class_nlmsg,
+ void *cookie)
+{
+ struct nlattr *tb[TCA_MAX + 1], *attr;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
+ attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
+ if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_class_nlmsg(cookie, t, tb);
+}
+
+int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex,
+ libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct tcmsg t;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ .nlh.nlmsg_type = RTM_GETTCLASS,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .t.tcm_family = AF_UNSPEC,
+ .t.tcm_ifindex = ifindex,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return bpf_netlink_recv(sock, nl_pid, seq, __dump_class_nlmsg,
+ dump_class_nlmsg, cookie);
+}
+
+static int __dump_qdisc_nlmsg(struct nlmsghdr *nlh,
+ libbpf_dump_nlmsg_t dump_qdisc_nlmsg,
+ void *cookie)
+{
+ struct nlattr *tb[TCA_MAX + 1], *attr;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
+ attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
+ if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_qdisc_nlmsg(cookie, t, tb);
+}
+
+int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
+ libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct tcmsg t;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ .nlh.nlmsg_type = RTM_GETQDISC,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .t.tcm_family = AF_UNSPEC,
+ .t.tcm_ifindex = ifindex,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return bpf_netlink_recv(sock, nl_pid, seq, __dump_qdisc_nlmsg,
+ dump_qdisc_nlmsg, cookie);
+}
+
+static int __dump_filter_nlmsg(struct nlmsghdr *nlh,
+ libbpf_dump_nlmsg_t dump_filter_nlmsg,
+ void *cookie)
+{
+ struct nlattr *tb[TCA_MAX + 1], *attr;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
+ attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
+ if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_filter_nlmsg(cookie, t, tb);
+}
+
+int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
+ libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct tcmsg t;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ .nlh.nlmsg_type = RTM_GETTFILTER,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .t.tcm_family = AF_UNSPEC,
+ .t.tcm_ifindex = ifindex,
+ .t.tcm_parent = handle,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return bpf_netlink_recv(sock, nl_pid, seq, __dump_filter_nlmsg,
+ dump_filter_nlmsg, cookie);
+}
diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
index 4719434278b2..1e69c0c8d413 100644
--- a/tools/lib/bpf/nlattr.c
+++ b/tools/lib/bpf/nlattr.c
@@ -1,13 +1,8 @@
-// SPDX-License-Identifier: LGPL-2.1
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* NETLINK Netlink attributes
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation version 2.1
- * of the License.
- *
* Copyright (c) 2003-2013 Thomas Graf <tgraf@suug.ch>
*/
@@ -17,20 +12,15 @@
#include <string.h>
#include <stdio.h>
-static uint16_t nla_attr_minlen[NLA_TYPE_MAX+1] = {
- [NLA_U8] = sizeof(uint8_t),
- [NLA_U16] = sizeof(uint16_t),
- [NLA_U32] = sizeof(uint32_t),
- [NLA_U64] = sizeof(uint64_t),
- [NLA_STRING] = 1,
- [NLA_FLAG] = 0,
+static uint16_t nla_attr_minlen[LIBBPF_NLA_TYPE_MAX+1] = {
+ [LIBBPF_NLA_U8] = sizeof(uint8_t),
+ [LIBBPF_NLA_U16] = sizeof(uint16_t),
+ [LIBBPF_NLA_U32] = sizeof(uint32_t),
+ [LIBBPF_NLA_U64] = sizeof(uint64_t),
+ [LIBBPF_NLA_STRING] = 1,
+ [LIBBPF_NLA_FLAG] = 0,
};
-static int nla_len(const struct nlattr *nla)
-{
- return nla->nla_len - NLA_HDRLEN;
-}
-
static struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
{
int totlen = NLA_ALIGN(nla->nla_len);
@@ -46,20 +36,15 @@ static int nla_ok(const struct nlattr *nla, int remaining)
nla->nla_len <= remaining;
}
-static void *nla_data(const struct nlattr *nla)
-{
- return (char *) nla + NLA_HDRLEN;
-}
-
static int nla_type(const struct nlattr *nla)
{
return nla->nla_type & NLA_TYPE_MASK;
}
static int validate_nla(struct nlattr *nla, int maxtype,
- struct nla_policy *policy)
+ struct libbpf_nla_policy *policy)
{
- struct nla_policy *pt;
+ struct libbpf_nla_policy *pt;
unsigned int minlen = 0;
int type = nla_type(nla);
@@ -68,23 +53,24 @@ static int validate_nla(struct nlattr *nla, int maxtype,
pt = &policy[type];
- if (pt->type > NLA_TYPE_MAX)
+ if (pt->type > LIBBPF_NLA_TYPE_MAX)
return 0;
if (pt->minlen)
minlen = pt->minlen;
- else if (pt->type != NLA_UNSPEC)
+ else if (pt->type != LIBBPF_NLA_UNSPEC)
minlen = nla_attr_minlen[pt->type];
- if (nla_len(nla) < minlen)
+ if (libbpf_nla_len(nla) < minlen)
return -1;
- if (pt->maxlen && nla_len(nla) > pt->maxlen)
+ if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen)
return -1;
- if (pt->type == NLA_STRING) {
- char *data = nla_data(nla);
- if (data[nla_len(nla) - 1] != '\0')
+ if (pt->type == LIBBPF_NLA_STRING) {
+ char *data = libbpf_nla_data(nla);
+
+ if (data[libbpf_nla_len(nla) - 1] != '\0')
return -1;
}
@@ -114,15 +100,15 @@ static inline int nlmsg_len(const struct nlmsghdr *nlh)
* @see nla_validate
* @return 0 on success or a negative error code.
*/
-static int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
- struct nla_policy *policy)
+int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head,
+ int len, struct libbpf_nla_policy *policy)
{
struct nlattr *nla;
int rem, err;
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
- nla_for_each_attr(nla, head, len, rem) {
+ libbpf_nla_for_each_attr(nla, head, len, rem) {
int type = nla_type(nla);
if (type > maxtype)
@@ -146,12 +132,33 @@ errout:
return err;
}
+/**
+ * Create attribute index based on nested attribute
+ * @arg tb Index array to be filled (maxtype+1 elements).
+ * @arg maxtype Maximum attribute type expected and accepted.
+ * @arg nla Nested Attribute.
+ * @arg policy Attribute validation policy.
+ *
+ * Feeds the stream of attributes nested into the specified attribute
+ * to libbpf_nla_parse().
+ *
+ * @see libbpf_nla_parse
+ * @return 0 on success or a negative error code.
+ */
+int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype,
+ struct nlattr *nla,
+ struct libbpf_nla_policy *policy)
+{
+ return libbpf_nla_parse(tb, maxtype, libbpf_nla_data(nla),
+ libbpf_nla_len(nla), policy);
+}
+
/* dump netlink extended ack error message */
-int nla_dump_errormsg(struct nlmsghdr *nlh)
+int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh)
{
- struct nla_policy extack_policy[NLMSGERR_ATTR_MAX + 1] = {
- [NLMSGERR_ATTR_MSG] = { .type = NLA_STRING },
- [NLMSGERR_ATTR_OFFS] = { .type = NLA_U32 },
+ struct libbpf_nla_policy extack_policy[NLMSGERR_ATTR_MAX + 1] = {
+ [NLMSGERR_ATTR_MSG] = { .type = LIBBPF_NLA_STRING },
+ [NLMSGERR_ATTR_OFFS] = { .type = LIBBPF_NLA_U32 },
};
struct nlattr *tb[NLMSGERR_ATTR_MAX + 1], *attr;
struct nlmsgerr *err;
@@ -172,14 +179,15 @@ int nla_dump_errormsg(struct nlmsghdr *nlh)
attr = (struct nlattr *) ((void *) err + hlen);
alen = nlh->nlmsg_len - hlen;
- if (nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, extack_policy) != 0) {
+ if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen,
+ extack_policy) != 0) {
fprintf(stderr,
"Failed to parse extended error attributes\n");
return 0;
}
if (tb[NLMSGERR_ATTR_MSG])
- errmsg = (char *) nla_data(tb[NLMSGERR_ATTR_MSG]);
+ errmsg = (char *) libbpf_nla_data(tb[NLMSGERR_ATTR_MSG]);
fprintf(stderr, "Kernel error message: %s\n", errmsg);
diff --git a/tools/lib/bpf/nlattr.h b/tools/lib/bpf/nlattr.h
index 931a71f68f93..6cc3ac91690f 100644
--- a/tools/lib/bpf/nlattr.h
+++ b/tools/lib/bpf/nlattr.h
@@ -1,18 +1,13 @@
-/* SPDX-License-Identifier: LGPL-2.1 */
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/*
* NETLINK Netlink attributes
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation version 2.1
- * of the License.
- *
* Copyright (c) 2003-2013 Thomas Graf <tgraf@suug.ch>
*/
-#ifndef __NLATTR_H
-#define __NLATTR_H
+#ifndef __LIBBPF_NLATTR_H
+#define __LIBBPF_NLATTR_H
#include <stdint.h>
#include <linux/netlink.h>
@@ -23,19 +18,19 @@
* Standard attribute types to specify validation policy
*/
enum {
- NLA_UNSPEC, /**< Unspecified type, binary data chunk */
- NLA_U8, /**< 8 bit integer */
- NLA_U16, /**< 16 bit integer */
- NLA_U32, /**< 32 bit integer */
- NLA_U64, /**< 64 bit integer */
- NLA_STRING, /**< NUL terminated character string */
- NLA_FLAG, /**< Flag */
- NLA_MSECS, /**< Micro seconds (64bit) */
- NLA_NESTED, /**< Nested attributes */
- __NLA_TYPE_MAX,
+ LIBBPF_NLA_UNSPEC, /**< Unspecified type, binary data chunk */
+ LIBBPF_NLA_U8, /**< 8 bit integer */
+ LIBBPF_NLA_U16, /**< 16 bit integer */
+ LIBBPF_NLA_U32, /**< 32 bit integer */
+ LIBBPF_NLA_U64, /**< 64 bit integer */
+ LIBBPF_NLA_STRING, /**< NUL terminated character string */
+ LIBBPF_NLA_FLAG, /**< Flag */
+ LIBBPF_NLA_MSECS, /**< Micro seconds (64bit) */
+ LIBBPF_NLA_NESTED, /**< Nested attributes */
+ __LIBBPF_NLA_TYPE_MAX,
};
-#define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
+#define LIBBPF_NLA_TYPE_MAX (__LIBBPF_NLA_TYPE_MAX - 1)
/**
* @ingroup attr
@@ -43,8 +38,8 @@ enum {
*
* See section @core_doc{core_attr_parse,Attribute Parsing} for more details.
*/
-struct nla_policy {
- /** Type of attribute or NLA_UNSPEC */
+struct libbpf_nla_policy {
+ /** Type of attribute or LIBBPF_NLA_UNSPEC */
uint16_t type;
/** Minimal length of payload required */
@@ -62,11 +57,50 @@ struct nla_policy {
* @arg len length of attribute stream
* @arg rem initialized to len, holds bytes currently remaining in stream
*/
-#define nla_for_each_attr(pos, head, len, rem) \
+#define libbpf_nla_for_each_attr(pos, head, len, rem) \
for (pos = head, rem = len; \
nla_ok(pos, rem); \
pos = nla_next(pos, &(rem)))
-int nla_dump_errormsg(struct nlmsghdr *nlh);
+/**
+ * libbpf_nla_data - head of payload
+ * @nla: netlink attribute
+ */
+static inline void *libbpf_nla_data(const struct nlattr *nla)
+{
+ return (char *) nla + NLA_HDRLEN;
+}
+
+static inline uint8_t libbpf_nla_getattr_u8(const struct nlattr *nla)
+{
+ return *(uint8_t *)libbpf_nla_data(nla);
+}
+
+static inline uint32_t libbpf_nla_getattr_u32(const struct nlattr *nla)
+{
+ return *(uint32_t *)libbpf_nla_data(nla);
+}
+
+static inline const char *libbpf_nla_getattr_str(const struct nlattr *nla)
+{
+ return (const char *)libbpf_nla_data(nla);
+}
+
+/**
+ * libbpf_nla_len - length of payload
+ * @nla: netlink attribute
+ */
+static inline int libbpf_nla_len(const struct nlattr *nla)
+{
+ return nla->nla_len - NLA_HDRLEN;
+}
+
+int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head,
+ int len, struct libbpf_nla_policy *policy);
+int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype,
+ struct nlattr *nla,
+ struct libbpf_nla_policy *policy);
+
+int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh);
-#endif /* __NLATTR_H */
+#endif /* __LIBBPF_NLATTR_H */
diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c
new file mode 100644
index 000000000000..00e48ac5b806
--- /dev/null
+++ b/tools/lib/bpf/str_error.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#undef _GNU_SOURCE
+#include <string.h>
+#include <stdio.h>
+#include "str_error.h"
+
+/*
+ * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl
+ * libc, while checking strerror_r() return to avoid having to check this in
+ * all places calling it.
+ */
+char *libbpf_strerror_r(int err, char *dst, int len)
+{
+ int ret = strerror_r(err, dst, len);
+ if (ret)
+ snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret);
+ return dst;
+}
diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h
new file mode 100644
index 000000000000..a139334d57b6
--- /dev/null
+++ b/tools/lib/bpf/str_error.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __LIBBPF_STR_ERROR_H
+#define __LIBBPF_STR_ERROR_H
+
+char *libbpf_strerror_r(int err, char *dst, int len);
+#endif /* __LIBBPF_STR_ERROR_H */
diff --git a/tools/lib/subcmd/pager.c b/tools/lib/subcmd/pager.c
index 9997a8805a82..e3d47b59b14d 100644
--- a/tools/lib/subcmd/pager.c
+++ b/tools/lib/subcmd/pager.c
@@ -23,6 +23,13 @@ void pager_init(const char *pager_env)
subcmd_config.pager_env = pager_env;
}
+static const char *forced_pager;
+
+void force_pager(const char *pager)
+{
+ forced_pager = pager;
+}
+
static void pager_preexec(void)
{
/*
@@ -66,7 +73,9 @@ void setup_pager(void)
const char *pager = getenv(subcmd_config.pager_env);
struct winsize sz;
- if (!isatty(1))
+ if (forced_pager)
+ pager = forced_pager;
+ if (!isatty(1) && !forced_pager)
return;
if (ioctl(1, TIOCGWINSZ, &sz) == 0)
pager_columns = sz.ws_col;
diff --git a/tools/lib/subcmd/pager.h b/tools/lib/subcmd/pager.h
index f1a53cf29880..a818964693ab 100644
--- a/tools/lib/subcmd/pager.h
+++ b/tools/lib/subcmd/pager.h
@@ -7,5 +7,6 @@ extern void pager_init(const char *pager_env);
extern void setup_pager(void);
extern int pager_in_use(void);
extern int pager_get_columns(void);
+extern void force_pager(const char *);
#endif /* __SUBCMD_PAGER_H */
diff --git a/tools/lib/traceevent/Build b/tools/lib/traceevent/Build
index c681d0575d16..ba54bfce0b0b 100644
--- a/tools/lib/traceevent/Build
+++ b/tools/lib/traceevent/Build
@@ -4,6 +4,8 @@ libtraceevent-y += trace-seq.o
libtraceevent-y += parse-filter.o
libtraceevent-y += parse-utils.o
libtraceevent-y += kbuffer-parse.o
+libtraceevent-y += tep_strerror.o
+libtraceevent-y += event-parse-api.o
plugin_jbd2-y += plugin_jbd2.o
plugin_hrtimer-y += plugin_hrtimer.o
diff --git a/tools/lib/traceevent/event-parse-api.c b/tools/lib/traceevent/event-parse-api.c
new file mode 100644
index 000000000000..61f7149085ee
--- /dev/null
+++ b/tools/lib/traceevent/event-parse-api.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ */
+
+#include "event-parse.h"
+#include "event-parse-local.h"
+#include "event-utils.h"
+
+/**
+ * tep_get_first_event - returns the first event in the events array
+ * @tep: a handle to the tep_handle
+ *
+ * This returns pointer to the first element of the events array
+ * If @tep is NULL, NULL is returned.
+ */
+struct tep_event_format *tep_get_first_event(struct tep_handle *tep)
+{
+ if (tep && tep->events)
+ return tep->events[0];
+
+ return NULL;
+}
+
+/**
+ * tep_get_events_count - get the number of defined events
+ * @tep: a handle to the tep_handle
+ *
+ * This returns number of elements in event array
+ * If @tep is NULL, 0 is returned.
+ */
+int tep_get_events_count(struct tep_handle *tep)
+{
+ if(tep)
+ return tep->nr_events;
+ return 0;
+}
+
+/**
+ * tep_set_flag - set event parser flag
+ * @tep: a handle to the tep_handle
+ * @flag: flag, or combination of flags to be set
+ * can be any combination from enum tep_flag
+ *
+ * This sets a flag or mbination of flags from enum tep_flag
+ */
+void tep_set_flag(struct tep_handle *tep, int flag)
+{
+ if(tep)
+ tep->flags |= flag;
+}
+
+unsigned short __tep_data2host2(struct tep_handle *pevent, unsigned short data)
+{
+ unsigned short swap;
+
+ if (!pevent || pevent->host_bigendian == pevent->file_bigendian)
+ return data;
+
+ swap = ((data & 0xffULL) << 8) |
+ ((data & (0xffULL << 8)) >> 8);
+
+ return swap;
+}
+
+unsigned int __tep_data2host4(struct tep_handle *pevent, unsigned int data)
+{
+ unsigned int swap;
+
+ if (!pevent || pevent->host_bigendian == pevent->file_bigendian)
+ return data;
+
+ swap = ((data & 0xffULL) << 24) |
+ ((data & (0xffULL << 8)) << 8) |
+ ((data & (0xffULL << 16)) >> 8) |
+ ((data & (0xffULL << 24)) >> 24);
+
+ return swap;
+}
+
+unsigned long long
+__tep_data2host8(struct tep_handle *pevent, unsigned long long data)
+{
+ unsigned long long swap;
+
+ if (!pevent || pevent->host_bigendian == pevent->file_bigendian)
+ return data;
+
+ swap = ((data & 0xffULL) << 56) |
+ ((data & (0xffULL << 8)) << 40) |
+ ((data & (0xffULL << 16)) << 24) |
+ ((data & (0xffULL << 24)) << 8) |
+ ((data & (0xffULL << 32)) >> 8) |
+ ((data & (0xffULL << 40)) >> 24) |
+ ((data & (0xffULL << 48)) >> 40) |
+ ((data & (0xffULL << 56)) >> 56);
+
+ return swap;
+}
+
+/**
+ * tep_get_header_page_size - get size of the header page
+ * @pevent: a handle to the tep_handle
+ *
+ * This returns size of the header page
+ * If @pevent is NULL, 0 is returned.
+ */
+int tep_get_header_page_size(struct tep_handle *pevent)
+{
+ if(pevent)
+ return pevent->header_page_size_size;
+ return 0;
+}
+
+/**
+ * tep_get_cpus - get the number of CPUs
+ * @pevent: a handle to the tep_handle
+ *
+ * This returns the number of CPUs
+ * If @pevent is NULL, 0 is returned.
+ */
+int tep_get_cpus(struct tep_handle *pevent)
+{
+ if(pevent)
+ return pevent->cpus;
+ return 0;
+}
+
+/**
+ * tep_set_cpus - set the number of CPUs
+ * @pevent: a handle to the tep_handle
+ *
+ * This sets the number of CPUs
+ */
+void tep_set_cpus(struct tep_handle *pevent, int cpus)
+{
+ if(pevent)
+ pevent->cpus = cpus;
+}
+
+/**
+ * tep_get_long_size - get the size of a long integer on the current machine
+ * @pevent: a handle to the tep_handle
+ *
+ * This returns the size of a long integer on the current machine
+ * If @pevent is NULL, 0 is returned.
+ */
+int tep_get_long_size(struct tep_handle *pevent)
+{
+ if(pevent)
+ return pevent->long_size;
+ return 0;
+}
+
+/**
+ * tep_set_long_size - set the size of a long integer on the current machine
+ * @pevent: a handle to the tep_handle
+ * @size: size, in bytes, of a long integer
+ *
+ * This sets the size of a long integer on the current machine
+ */
+void tep_set_long_size(struct tep_handle *pevent, int long_size)
+{
+ if(pevent)
+ pevent->long_size = long_size;
+}
+
+/**
+ * tep_get_page_size - get the size of a memory page on the current machine
+ * @pevent: a handle to the tep_handle
+ *
+ * This returns the size of a memory page on the current machine
+ * If @pevent is NULL, 0 is returned.
+ */
+int tep_get_page_size(struct tep_handle *pevent)
+{
+ if(pevent)
+ return pevent->page_size;
+ return 0;
+}
+
+/**
+ * tep_set_page_size - set the size of a memory page on the current machine
+ * @pevent: a handle to the tep_handle
+ * @_page_size: size of a memory page, in bytes
+ *
+ * This sets the size of a memory page on the current machine
+ */
+void tep_set_page_size(struct tep_handle *pevent, int _page_size)
+{
+ if(pevent)
+ pevent->page_size = _page_size;
+}
+
+/**
+ * tep_is_file_bigendian - get if the file is in big endian order
+ * @pevent: a handle to the tep_handle
+ *
+ * This returns if the file is in big endian order
+ * If @pevent is NULL, 0 is returned.
+ */
+int tep_is_file_bigendian(struct tep_handle *pevent)
+{
+ if(pevent)
+ return pevent->file_bigendian;
+ return 0;
+}
+
+/**
+ * tep_set_file_bigendian - set if the file is in big endian order
+ * @pevent: a handle to the tep_handle
+ * @endian: non zero, if the file is in big endian order
+ *
+ * This sets if the file is in big endian order
+ */
+void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian)
+{
+ if(pevent)
+ pevent->file_bigendian = endian;
+}
+
+/**
+ * tep_is_host_bigendian - get if the order of the current host is big endian
+ * @pevent: a handle to the tep_handle
+ *
+ * This gets if the order of the current host is big endian
+ * If @pevent is NULL, 0 is returned.
+ */
+int tep_is_host_bigendian(struct tep_handle *pevent)
+{
+ if(pevent)
+ return pevent->host_bigendian;
+ return 0;
+}
+
+/**
+ * tep_set_host_bigendian - set the order of the local host
+ * @pevent: a handle to the tep_handle
+ * @endian: non zero, if the local host has big endian order
+ *
+ * This sets the order of the local host
+ */
+void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian)
+{
+ if(pevent)
+ pevent->host_bigendian = endian;
+}
+
+/**
+ * tep_is_latency_format - get if the latency output format is configured
+ * @pevent: a handle to the tep_handle
+ *
+ * This gets if the latency output format is configured
+ * If @pevent is NULL, 0 is returned.
+ */
+int tep_is_latency_format(struct tep_handle *pevent)
+{
+ if(pevent)
+ return pevent->latency_format;
+ return 0;
+}
+
+/**
+ * tep_set_latency_format - set the latency output format
+ * @pevent: a handle to the tep_handle
+ * @lat: non zero for latency output format
+ *
+ * This sets the latency output format
+ */
+void tep_set_latency_format(struct tep_handle *pevent, int lat)
+{
+ if(pevent)
+ pevent->latency_format = lat;
+}
diff --git a/tools/lib/traceevent/event-parse-local.h b/tools/lib/traceevent/event-parse-local.h
new file mode 100644
index 000000000000..b9bddde577f8
--- /dev/null
+++ b/tools/lib/traceevent/event-parse-local.h
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ */
+
+#ifndef _PARSE_EVENTS_INT_H
+#define _PARSE_EVENTS_INT_H
+
+struct cmdline;
+struct cmdline_list;
+struct func_map;
+struct func_list;
+struct event_handler;
+struct func_resolver;
+
+struct tep_handle {
+ int ref_count;
+
+ int header_page_ts_offset;
+ int header_page_ts_size;
+ int header_page_size_offset;
+ int header_page_size_size;
+ int header_page_data_offset;
+ int header_page_data_size;
+ int header_page_overwrite;
+
+ enum tep_endian file_bigendian;
+ enum tep_endian host_bigendian;
+
+ int latency_format;
+
+ int old_format;
+
+ int cpus;
+ int long_size;
+ int page_size;
+
+ struct cmdline *cmdlines;
+ struct cmdline_list *cmdlist;
+ int cmdline_count;
+
+ struct func_map *func_map;
+ struct func_resolver *func_resolver;
+ struct func_list *funclist;
+ unsigned int func_count;
+
+ struct printk_map *printk_map;
+ struct printk_list *printklist;
+ unsigned int printk_count;
+
+
+ struct tep_event_format **events;
+ int nr_events;
+ struct tep_event_format **sort_events;
+ enum tep_event_sort_type last_type;
+
+ int type_offset;
+ int type_size;
+
+ int pid_offset;
+ int pid_size;
+
+ int pc_offset;
+ int pc_size;
+
+ int flags_offset;
+ int flags_size;
+
+ int ld_offset;
+ int ld_size;
+
+ int print_raw;
+
+ int test_filters;
+
+ int flags;
+
+ struct tep_format_field *bprint_ip_field;
+ struct tep_format_field *bprint_fmt_field;
+ struct tep_format_field *bprint_buf_field;
+
+ struct event_handler *handlers;
+ struct tep_function_handler *func_handlers;
+
+ /* cache */
+ struct tep_event_format *last_event;
+
+ char *trace_clock;
+};
+
+#endif /* _PARSE_EVENTS_INT_H */
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index ce1e20227c64..3692f29fee46 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -18,12 +18,14 @@
#include <errno.h>
#include <stdint.h>
#include <limits.h>
-#include <linux/string.h>
#include <linux/time64.h>
#include <netinet/in.h>
#include "event-parse.h"
+
+#include "event-parse-local.h"
#include "event-utils.h"
+#include "trace-seq.h"
static const char *input_buf;
static unsigned long long input_buf_ptr;
@@ -94,7 +96,7 @@ struct tep_function_handler {
static unsigned long long
process_defined_func(struct trace_seq *s, void *data, int size,
- struct event_format *event, struct print_arg *arg);
+ struct tep_event_format *event, struct tep_print_arg *arg);
static void free_func_handle(struct tep_function_handler *func);
@@ -117,9 +119,9 @@ void breakpoint(void)
x++;
}
-struct print_arg *alloc_arg(void)
+struct tep_print_arg *alloc_arg(void)
{
- return calloc(1, sizeof(struct print_arg));
+ return calloc(1, sizeof(struct tep_print_arg));
}
struct cmdline {
@@ -737,16 +739,16 @@ void tep_print_printk(struct tep_handle *pevent)
}
}
-static struct event_format *alloc_event(void)
+static struct tep_event_format *alloc_event(void)
{
- return calloc(1, sizeof(struct event_format));
+ return calloc(1, sizeof(struct tep_event_format));
}
-static int add_event(struct tep_handle *pevent, struct event_format *event)
+static int add_event(struct tep_handle *pevent, struct tep_event_format *event)
{
int i;
- struct event_format **events = realloc(pevent->events, sizeof(event) *
- (pevent->nr_events + 1));
+ struct tep_event_format **events = realloc(pevent->events, sizeof(event) *
+ (pevent->nr_events + 1));
if (!events)
return -1;
@@ -769,20 +771,20 @@ static int add_event(struct tep_handle *pevent, struct event_format *event)
return 0;
}
-static int event_item_type(enum event_type type)
+static int event_item_type(enum tep_event_type type)
{
switch (type) {
- case EVENT_ITEM ... EVENT_SQUOTE:
+ case TEP_EVENT_ITEM ... TEP_EVENT_SQUOTE:
return 1;
- case EVENT_ERROR ... EVENT_DELIM:
+ case TEP_EVENT_ERROR ... TEP_EVENT_DELIM:
default:
return 0;
}
}
-static void free_flag_sym(struct print_flag_sym *fsym)
+static void free_flag_sym(struct tep_print_flag_sym *fsym)
{
- struct print_flag_sym *next;
+ struct tep_print_flag_sym *next;
while (fsym) {
next = fsym->next;
@@ -793,60 +795,60 @@ static void free_flag_sym(struct print_flag_sym *fsym)
}
}
-static void free_arg(struct print_arg *arg)
+static void free_arg(struct tep_print_arg *arg)
{
- struct print_arg *farg;
+ struct tep_print_arg *farg;
if (!arg)
return;
switch (arg->type) {
- case PRINT_ATOM:
+ case TEP_PRINT_ATOM:
free(arg->atom.atom);
break;
- case PRINT_FIELD:
+ case TEP_PRINT_FIELD:
free(arg->field.name);
break;
- case PRINT_FLAGS:
+ case TEP_PRINT_FLAGS:
free_arg(arg->flags.field);
free(arg->flags.delim);
free_flag_sym(arg->flags.flags);
break;
- case PRINT_SYMBOL:
+ case TEP_PRINT_SYMBOL:
free_arg(arg->symbol.field);
free_flag_sym(arg->symbol.symbols);
break;
- case PRINT_HEX:
- case PRINT_HEX_STR:
+ case TEP_PRINT_HEX:
+ case TEP_PRINT_HEX_STR:
free_arg(arg->hex.field);
free_arg(arg->hex.size);
break;
- case PRINT_INT_ARRAY:
+ case TEP_PRINT_INT_ARRAY:
free_arg(arg->int_array.field);
free_arg(arg->int_array.count);
free_arg(arg->int_array.el_size);
break;
- case PRINT_TYPE:
+ case TEP_PRINT_TYPE:
free(arg->typecast.type);
free_arg(arg->typecast.item);
break;
- case PRINT_STRING:
- case PRINT_BSTRING:
+ case TEP_PRINT_STRING:
+ case TEP_PRINT_BSTRING:
free(arg->string.string);
break;
- case PRINT_BITMASK:
+ case TEP_PRINT_BITMASK:
free(arg->bitmask.bitmask);
break;
- case PRINT_DYNAMIC_ARRAY:
- case PRINT_DYNAMIC_ARRAY_LEN:
+ case TEP_PRINT_DYNAMIC_ARRAY:
+ case TEP_PRINT_DYNAMIC_ARRAY_LEN:
free(arg->dynarray.index);
break;
- case PRINT_OP:
+ case TEP_PRINT_OP:
free(arg->op.op);
free_arg(arg->op.left);
free_arg(arg->op.right);
break;
- case PRINT_FUNC:
+ case TEP_PRINT_FUNC:
while (arg->func.args) {
farg = arg->func.args;
arg->func.args = farg->next;
@@ -854,7 +856,7 @@ static void free_arg(struct print_arg *arg)
}
break;
- case PRINT_NULL:
+ case TEP_PRINT_NULL:
default:
break;
}
@@ -862,24 +864,24 @@ static void free_arg(struct print_arg *arg)
free(arg);
}
-static enum event_type get_type(int ch)
+static enum tep_event_type get_type(int ch)
{
if (ch == '\n')
- return EVENT_NEWLINE;
+ return TEP_EVENT_NEWLINE;
if (isspace(ch))
- return EVENT_SPACE;
+ return TEP_EVENT_SPACE;
if (isalnum(ch) || ch == '_')
- return EVENT_ITEM;
+ return TEP_EVENT_ITEM;
if (ch == '\'')
- return EVENT_SQUOTE;
+ return TEP_EVENT_SQUOTE;
if (ch == '"')
- return EVENT_DQUOTE;
+ return TEP_EVENT_DQUOTE;
if (!isprint(ch))
- return EVENT_NONE;
+ return TEP_EVENT_NONE;
if (ch == '(' || ch == ')' || ch == ',')
- return EVENT_DELIM;
+ return TEP_EVENT_DELIM;
- return EVENT_OP;
+ return TEP_EVENT_OP;
}
static int __read_char(void)
@@ -927,38 +929,38 @@ static int extend_token(char **tok, char *buf, int size)
return 0;
}
-static enum event_type force_token(const char *str, char **tok);
+static enum tep_event_type force_token(const char *str, char **tok);
-static enum event_type __read_token(char **tok)
+static enum tep_event_type __read_token(char **tok)
{
char buf[BUFSIZ];
int ch, last_ch, quote_ch, next_ch;
int i = 0;
int tok_size = 0;
- enum event_type type;
+ enum tep_event_type type;
*tok = NULL;
ch = __read_char();
if (ch < 0)
- return EVENT_NONE;
+ return TEP_EVENT_NONE;
type = get_type(ch);
- if (type == EVENT_NONE)
+ if (type == TEP_EVENT_NONE)
return type;
buf[i++] = ch;
switch (type) {
- case EVENT_NEWLINE:
- case EVENT_DELIM:
+ case TEP_EVENT_NEWLINE:
+ case TEP_EVENT_DELIM:
if (asprintf(tok, "%c", ch) < 0)
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
return type;
- case EVENT_OP:
+ case TEP_EVENT_OP:
switch (ch) {
case '-':
next_ch = __peek_char();
@@ -1001,8 +1003,8 @@ static enum event_type __read_token(char **tok)
buf[i++] = __read_char();
goto out;
- case EVENT_DQUOTE:
- case EVENT_SQUOTE:
+ case TEP_EVENT_DQUOTE:
+ case TEP_EVENT_SQUOTE:
/* don't keep quotes */
i--;
quote_ch = ch;
@@ -1014,7 +1016,7 @@ static enum event_type __read_token(char **tok)
tok_size += BUFSIZ;
if (extend_token(tok, buf, tok_size) < 0)
- return EVENT_NONE;
+ return TEP_EVENT_NONE;
i = 0;
}
last_ch = ch;
@@ -1031,7 +1033,7 @@ static enum event_type __read_token(char **tok)
* For strings (double quotes) check the next token.
* If it is another string, concatinate the two.
*/
- if (type == EVENT_DQUOTE) {
+ if (type == TEP_EVENT_DQUOTE) {
unsigned long long save_input_buf_ptr = input_buf_ptr;
do {
@@ -1044,8 +1046,8 @@ static enum event_type __read_token(char **tok)
goto out;
- case EVENT_ERROR ... EVENT_SPACE:
- case EVENT_ITEM:
+ case TEP_EVENT_ERROR ... TEP_EVENT_SPACE:
+ case TEP_EVENT_ITEM:
default:
break;
}
@@ -1056,7 +1058,7 @@ static enum event_type __read_token(char **tok)
tok_size += BUFSIZ;
if (extend_token(tok, buf, tok_size) < 0)
- return EVENT_NONE;
+ return TEP_EVENT_NONE;
i = 0;
}
ch = __read_char();
@@ -1066,9 +1068,9 @@ static enum event_type __read_token(char **tok)
out:
buf[i] = 0;
if (extend_token(tok, buf, tok_size + i + 1) < 0)
- return EVENT_NONE;
+ return TEP_EVENT_NONE;
- if (type == EVENT_ITEM) {
+ if (type == TEP_EVENT_ITEM) {
/*
* Older versions of the kernel has a bug that
* creates invalid symbols and will break the mac80211
@@ -1095,12 +1097,12 @@ static enum event_type __read_token(char **tok)
return type;
}
-static enum event_type force_token(const char *str, char **tok)
+static enum tep_event_type force_token(const char *str, char **tok)
{
const char *save_input_buf;
unsigned long long save_input_buf_ptr;
unsigned long long save_input_buf_siz;
- enum event_type type;
+ enum tep_event_type type;
/* save off the current input pointers */
save_input_buf = input_buf;
@@ -1125,13 +1127,13 @@ static void free_token(char *tok)
free(tok);
}
-static enum event_type read_token(char **tok)
+static enum tep_event_type read_token(char **tok)
{
- enum event_type type;
+ enum tep_event_type type;
for (;;) {
type = __read_token(tok);
- if (type != EVENT_SPACE)
+ if (type != TEP_EVENT_SPACE)
return type;
free_token(*tok);
@@ -1139,7 +1141,7 @@ static enum event_type read_token(char **tok)
/* not reached */
*tok = NULL;
- return EVENT_NONE;
+ return TEP_EVENT_NONE;
}
/**
@@ -1151,7 +1153,7 @@ static enum event_type read_token(char **tok)
*
* Returns the token type.
*/
-enum event_type tep_read_token(char **tok)
+enum tep_event_type tep_read_token(char **tok)
{
return read_token(tok);
}
@@ -1166,13 +1168,13 @@ void tep_free_token(char *token)
}
/* no newline */
-static enum event_type read_token_item(char **tok)
+static enum tep_event_type read_token_item(char **tok)
{
- enum event_type type;
+ enum tep_event_type type;
for (;;) {
type = __read_token(tok);
- if (type != EVENT_SPACE && type != EVENT_NEWLINE)
+ if (type != TEP_EVENT_SPACE && type != TEP_EVENT_NEWLINE)
return type;
free_token(*tok);
*tok = NULL;
@@ -1180,10 +1182,10 @@ static enum event_type read_token_item(char **tok)
/* not reached */
*tok = NULL;
- return EVENT_NONE;
+ return TEP_EVENT_NONE;
}
-static int test_type(enum event_type type, enum event_type expect)
+static int test_type(enum tep_event_type type, enum tep_event_type expect)
{
if (type != expect) {
do_warning("Error: expected type %d but read %d",
@@ -1193,8 +1195,8 @@ static int test_type(enum event_type type, enum event_type expect)
return 0;
}
-static int test_type_token(enum event_type type, const char *token,
- enum event_type expect, const char *expect_tok)
+static int test_type_token(enum tep_event_type type, const char *token,
+ enum tep_event_type expect, const char *expect_tok)
{
if (type != expect) {
do_warning("Error: expected type %d but read %d",
@@ -1210,9 +1212,9 @@ static int test_type_token(enum event_type type, const char *token,
return 0;
}
-static int __read_expect_type(enum event_type expect, char **tok, int newline_ok)
+static int __read_expect_type(enum tep_event_type expect, char **tok, int newline_ok)
{
- enum event_type type;
+ enum tep_event_type type;
if (newline_ok)
type = read_token(tok);
@@ -1221,15 +1223,15 @@ static int __read_expect_type(enum event_type expect, char **tok, int newline_ok
return test_type(type, expect);
}
-static int read_expect_type(enum event_type expect, char **tok)
+static int read_expect_type(enum tep_event_type expect, char **tok)
{
return __read_expect_type(expect, tok, 1);
}
-static int __read_expected(enum event_type expect, const char *str,
+static int __read_expected(enum tep_event_type expect, const char *str,
int newline_ok)
{
- enum event_type type;
+ enum tep_event_type type;
char *token;
int ret;
@@ -1245,12 +1247,12 @@ static int __read_expected(enum event_type expect, const char *str,
return ret;
}
-static int read_expected(enum event_type expect, const char *str)
+static int read_expected(enum tep_event_type expect, const char *str)
{
return __read_expected(expect, str, 1);
}
-static int read_expected_item(enum event_type expect, const char *str)
+static int read_expected_item(enum tep_event_type expect, const char *str)
{
return __read_expected(expect, str, 0);
}
@@ -1259,13 +1261,13 @@ static char *event_read_name(void)
{
char *token;
- if (read_expected(EVENT_ITEM, "name") < 0)
+ if (read_expected(TEP_EVENT_ITEM, "name") < 0)
return NULL;
- if (read_expected(EVENT_OP, ":") < 0)
+ if (read_expected(TEP_EVENT_OP, ":") < 0)
return NULL;
- if (read_expect_type(EVENT_ITEM, &token) < 0)
+ if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
goto fail;
return token;
@@ -1280,13 +1282,13 @@ static int event_read_id(void)
char *token;
int id;
- if (read_expected_item(EVENT_ITEM, "ID") < 0)
+ if (read_expected_item(TEP_EVENT_ITEM, "ID") < 0)
return -1;
- if (read_expected(EVENT_OP, ":") < 0)
+ if (read_expected(TEP_EVENT_OP, ":") < 0)
return -1;
- if (read_expect_type(EVENT_ITEM, &token) < 0)
+ if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
goto fail;
id = strtoul(token, NULL, 0);
@@ -1298,9 +1300,9 @@ static int event_read_id(void)
return -1;
}
-static int field_is_string(struct format_field *field)
+static int field_is_string(struct tep_format_field *field)
{
- if ((field->flags & FIELD_IS_ARRAY) &&
+ if ((field->flags & TEP_FIELD_IS_ARRAY) &&
(strstr(field->type, "char") || strstr(field->type, "u8") ||
strstr(field->type, "s8")))
return 1;
@@ -1308,7 +1310,7 @@ static int field_is_string(struct format_field *field)
return 0;
}
-static int field_is_dynamic(struct format_field *field)
+static int field_is_dynamic(struct tep_format_field *field)
{
if (strncmp(field->type, "__data_loc", 10) == 0)
return 1;
@@ -1316,7 +1318,7 @@ static int field_is_dynamic(struct format_field *field)
return 0;
}
-static int field_is_long(struct format_field *field)
+static int field_is_long(struct tep_format_field *field)
{
/* includes long long */
if (strstr(field->type, "long"))
@@ -1327,7 +1329,7 @@ static int field_is_long(struct format_field *field)
static unsigned int type_size(const char *name)
{
- /* This covers all FIELD_IS_STRING types. */
+ /* This covers all TEP_FIELD_IS_STRING types. */
static struct {
const char *type;
unsigned int size;
@@ -1353,10 +1355,10 @@ static unsigned int type_size(const char *name)
return 0;
}
-static int event_read_fields(struct event_format *event, struct format_field **fields)
+static int event_read_fields(struct tep_event_format *event, struct tep_format_field **fields)
{
- struct format_field *field = NULL;
- enum event_type type;
+ struct tep_format_field *field = NULL;
+ enum tep_event_type type;
char *token;
char *last_token;
int count = 0;
@@ -1365,14 +1367,14 @@ static int event_read_fields(struct event_format *event, struct format_field **f
unsigned int size_dynamic = 0;
type = read_token(&token);
- if (type == EVENT_NEWLINE) {
+ if (type == TEP_EVENT_NEWLINE) {
free_token(token);
return count;
}
count++;
- if (test_type_token(type, token, EVENT_ITEM, "field"))
+ if (test_type_token(type, token, TEP_EVENT_ITEM, "field"))
goto fail;
free_token(token);
@@ -1381,17 +1383,17 @@ static int event_read_fields(struct event_format *event, struct format_field **f
* The ftrace fields may still use the "special" name.
* Just ignore it.
*/
- if (event->flags & EVENT_FL_ISFTRACE &&
- type == EVENT_ITEM && strcmp(token, "special") == 0) {
+ if (event->flags & TEP_EVENT_FL_ISFTRACE &&
+ type == TEP_EVENT_ITEM && strcmp(token, "special") == 0) {
free_token(token);
type = read_token(&token);
}
- if (test_type_token(type, token, EVENT_OP, ":") < 0)
+ if (test_type_token(type, token, TEP_EVENT_OP, ":") < 0)
goto fail;
free_token(token);
- if (read_expect_type(EVENT_ITEM, &token) < 0)
+ if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
goto fail;
last_token = token;
@@ -1405,17 +1407,17 @@ static int event_read_fields(struct event_format *event, struct format_field **f
/* read the rest of the type */
for (;;) {
type = read_token(&token);
- if (type == EVENT_ITEM ||
- (type == EVENT_OP && strcmp(token, "*") == 0) ||
+ if (type == TEP_EVENT_ITEM ||
+ (type == TEP_EVENT_OP && strcmp(token, "*") == 0) ||
/*
* Some of the ftrace fields are broken and have
* an illegal "." in them.
*/
- (event->flags & EVENT_FL_ISFTRACE &&
- type == EVENT_OP && strcmp(token, ".") == 0)) {
+ (event->flags & TEP_EVENT_FL_ISFTRACE &&
+ type == TEP_EVENT_OP && strcmp(token, ".") == 0)) {
if (strcmp(token, "*") == 0)
- field->flags |= FIELD_IS_POINTER;
+ field->flags |= TEP_FIELD_IS_POINTER;
if (field->type) {
char *new_type;
@@ -1445,27 +1447,27 @@ static int event_read_fields(struct event_format *event, struct format_field **f
}
field->name = field->alias = last_token;
- if (test_type(type, EVENT_OP))
+ if (test_type(type, TEP_EVENT_OP))
goto fail;
if (strcmp(token, "[") == 0) {
- enum event_type last_type = type;
+ enum tep_event_type last_type = type;
char *brackets = token;
char *new_brackets;
int len;
- field->flags |= FIELD_IS_ARRAY;
+ field->flags |= TEP_FIELD_IS_ARRAY;
type = read_token(&token);
- if (type == EVENT_ITEM)
+ if (type == TEP_EVENT_ITEM)
field->arraylen = strtoul(token, NULL, 0);
else
field->arraylen = 0;
while (strcmp(token, "]") != 0) {
- if (last_type == EVENT_ITEM &&
- type == EVENT_ITEM)
+ if (last_type == TEP_EVENT_ITEM &&
+ type == TEP_EVENT_ITEM)
len = 2;
else
len = 1;
@@ -1486,7 +1488,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
field->arraylen = strtoul(token, NULL, 0);
free_token(token);
type = read_token(&token);
- if (type == EVENT_NONE) {
+ if (type == TEP_EVENT_NONE) {
do_warning_event(event, "failed to find token");
goto fail;
}
@@ -1509,7 +1511,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
* If the next token is not an OP, then it is of
* the format: type [] item;
*/
- if (type == EVENT_ITEM) {
+ if (type == TEP_EVENT_ITEM) {
char *new_type;
new_type = realloc(field->type,
strlen(field->type) +
@@ -1543,79 +1545,79 @@ static int event_read_fields(struct event_format *event, struct format_field **f
}
if (field_is_string(field))
- field->flags |= FIELD_IS_STRING;
+ field->flags |= TEP_FIELD_IS_STRING;
if (field_is_dynamic(field))
- field->flags |= FIELD_IS_DYNAMIC;
+ field->flags |= TEP_FIELD_IS_DYNAMIC;
if (field_is_long(field))
- field->flags |= FIELD_IS_LONG;
+ field->flags |= TEP_FIELD_IS_LONG;
- if (test_type_token(type, token, EVENT_OP, ";"))
+ if (test_type_token(type, token, TEP_EVENT_OP, ";"))
goto fail;
free_token(token);
- if (read_expected(EVENT_ITEM, "offset") < 0)
+ if (read_expected(TEP_EVENT_ITEM, "offset") < 0)
goto fail_expect;
- if (read_expected(EVENT_OP, ":") < 0)
+ if (read_expected(TEP_EVENT_OP, ":") < 0)
goto fail_expect;
- if (read_expect_type(EVENT_ITEM, &token))
+ if (read_expect_type(TEP_EVENT_ITEM, &token))
goto fail;
field->offset = strtoul(token, NULL, 0);
free_token(token);
- if (read_expected(EVENT_OP, ";") < 0)
+ if (read_expected(TEP_EVENT_OP, ";") < 0)
goto fail_expect;
- if (read_expected(EVENT_ITEM, "size") < 0)
+ if (read_expected(TEP_EVENT_ITEM, "size") < 0)
goto fail_expect;
- if (read_expected(EVENT_OP, ":") < 0)
+ if (read_expected(TEP_EVENT_OP, ":") < 0)
goto fail_expect;
- if (read_expect_type(EVENT_ITEM, &token))
+ if (read_expect_type(TEP_EVENT_ITEM, &token))
goto fail;
field->size = strtoul(token, NULL, 0);
free_token(token);
- if (read_expected(EVENT_OP, ";") < 0)
+ if (read_expected(TEP_EVENT_OP, ";") < 0)
goto fail_expect;
type = read_token(&token);
- if (type != EVENT_NEWLINE) {
+ if (type != TEP_EVENT_NEWLINE) {
/* newer versions of the kernel have a "signed" type */
- if (test_type_token(type, token, EVENT_ITEM, "signed"))
+ if (test_type_token(type, token, TEP_EVENT_ITEM, "signed"))
goto fail;
free_token(token);
- if (read_expected(EVENT_OP, ":") < 0)
+ if (read_expected(TEP_EVENT_OP, ":") < 0)
goto fail_expect;
- if (read_expect_type(EVENT_ITEM, &token))
+ if (read_expect_type(TEP_EVENT_ITEM, &token))
goto fail;
if (strtoul(token, NULL, 0))
- field->flags |= FIELD_IS_SIGNED;
+ field->flags |= TEP_FIELD_IS_SIGNED;
free_token(token);
- if (read_expected(EVENT_OP, ";") < 0)
+ if (read_expected(TEP_EVENT_OP, ";") < 0)
goto fail_expect;
- if (read_expect_type(EVENT_NEWLINE, &token))
+ if (read_expect_type(TEP_EVENT_NEWLINE, &token))
goto fail;
}
free_token(token);
- if (field->flags & FIELD_IS_ARRAY) {
+ if (field->flags & TEP_FIELD_IS_ARRAY) {
if (field->arraylen)
field->elementsize = field->size / field->arraylen;
- else if (field->flags & FIELD_IS_DYNAMIC)
+ else if (field->flags & TEP_FIELD_IS_DYNAMIC)
field->elementsize = size_dynamic;
- else if (field->flags & FIELD_IS_STRING)
+ else if (field->flags & TEP_FIELD_IS_STRING)
field->elementsize = 1;
- else if (field->flags & FIELD_IS_LONG)
+ else if (field->flags & TEP_FIELD_IS_LONG)
field->elementsize = event->pevent ?
event->pevent->long_size :
sizeof(long);
@@ -1640,18 +1642,18 @@ fail_expect:
return -1;
}
-static int event_read_format(struct event_format *event)
+static int event_read_format(struct tep_event_format *event)
{
char *token;
int ret;
- if (read_expected_item(EVENT_ITEM, "format") < 0)
+ if (read_expected_item(TEP_EVENT_ITEM, "format") < 0)
return -1;
- if (read_expected(EVENT_OP, ":") < 0)
+ if (read_expected(TEP_EVENT_OP, ":") < 0)
return -1;
- if (read_expect_type(EVENT_NEWLINE, &token))
+ if (read_expect_type(TEP_EVENT_NEWLINE, &token))
goto fail;
free_token(token);
@@ -1672,14 +1674,14 @@ static int event_read_format(struct event_format *event)
return -1;
}
-static enum event_type
-process_arg_token(struct event_format *event, struct print_arg *arg,
- char **tok, enum event_type type);
+static enum tep_event_type
+process_arg_token(struct tep_event_format *event, struct tep_print_arg *arg,
+ char **tok, enum tep_event_type type);
-static enum event_type
-process_arg(struct event_format *event, struct print_arg *arg, char **tok)
+static enum tep_event_type
+process_arg(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
{
- enum event_type type;
+ enum tep_event_type type;
char *token;
type = read_token(&token);
@@ -1688,32 +1690,32 @@ process_arg(struct event_format *event, struct print_arg *arg, char **tok)
return process_arg_token(event, arg, tok, type);
}
-static enum event_type
-process_op(struct event_format *event, struct print_arg *arg, char **tok);
+static enum tep_event_type
+process_op(struct tep_event_format *event, struct tep_print_arg *arg, char **tok);
/*
* For __print_symbolic() and __print_flags, we need to completely
* evaluate the first argument, which defines what to print next.
*/
-static enum event_type
-process_field_arg(struct event_format *event, struct print_arg *arg, char **tok)
+static enum tep_event_type
+process_field_arg(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
{
- enum event_type type;
+ enum tep_event_type type;
type = process_arg(event, arg, tok);
- while (type == EVENT_OP) {
+ while (type == TEP_EVENT_OP) {
type = process_op(event, arg, tok);
}
return type;
}
-static enum event_type
-process_cond(struct event_format *event, struct print_arg *top, char **tok)
+static enum tep_event_type
+process_cond(struct tep_event_format *event, struct tep_print_arg *top, char **tok)
{
- struct print_arg *arg, *left, *right;
- enum event_type type;
+ struct tep_print_arg *arg, *left, *right;
+ enum tep_event_type type;
char *token = NULL;
arg = alloc_arg();
@@ -1728,7 +1730,7 @@ process_cond(struct event_format *event, struct print_arg *top, char **tok)
goto out_free;
}
- arg->type = PRINT_OP;
+ arg->type = TEP_PRINT_OP;
arg->op.left = left;
arg->op.right = right;
@@ -1736,16 +1738,16 @@ process_cond(struct event_format *event, struct print_arg *top, char **tok)
type = process_arg(event, left, &token);
again:
- if (type == EVENT_ERROR)
+ if (type == TEP_EVENT_ERROR)
goto out_free;
/* Handle other operations in the arguments */
- if (type == EVENT_OP && strcmp(token, ":") != 0) {
+ if (type == TEP_EVENT_OP && strcmp(token, ":") != 0) {
type = process_op(event, left, &token);
goto again;
}
- if (test_type_token(type, token, EVENT_OP, ":"))
+ if (test_type_token(type, token, TEP_EVENT_OP, ":"))
goto out_free;
arg->op.op = token;
@@ -1762,14 +1764,14 @@ out_free:
top->op.right = NULL;
free_token(token);
free_arg(arg);
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_array(struct event_format *event, struct print_arg *top, char **tok)
+static enum tep_event_type
+process_array(struct tep_event_format *event, struct tep_print_arg *top, char **tok)
{
- struct print_arg *arg;
- enum event_type type;
+ struct tep_print_arg *arg;
+ enum tep_event_type type;
char *token = NULL;
arg = alloc_arg();
@@ -1777,12 +1779,12 @@ process_array(struct event_format *event, struct print_arg *top, char **tok)
do_warning_event(event, "%s: not enough memory!", __func__);
/* '*tok' is set to top->op.op. No need to free. */
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
*tok = NULL;
type = process_arg(event, arg, &token);
- if (test_type_token(type, token, EVENT_OP, "]"))
+ if (test_type_token(type, token, TEP_EVENT_OP, "]"))
goto out_free;
top->op.right = arg;
@@ -1796,7 +1798,7 @@ process_array(struct event_format *event, struct print_arg *top, char **tok)
out_free:
free_token(token);
free_arg(arg);
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
static int get_op_prio(char *op)
@@ -1854,11 +1856,11 @@ static int get_op_prio(char *op)
}
}
-static int set_op_prio(struct print_arg *arg)
+static int set_op_prio(struct tep_print_arg *arg)
{
/* single ops are the greatest */
- if (!arg->op.left || arg->op.left->type == PRINT_NULL)
+ if (!arg->op.left || arg->op.left->type == TEP_PRINT_NULL)
arg->op.prio = 0;
else
arg->op.prio = get_op_prio(arg->op.op);
@@ -1867,17 +1869,17 @@ static int set_op_prio(struct print_arg *arg)
}
/* Note, *tok does not get freed, but will most likely be saved */
-static enum event_type
-process_op(struct event_format *event, struct print_arg *arg, char **tok)
+static enum tep_event_type
+process_op(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
{
- struct print_arg *left, *right = NULL;
- enum event_type type;
+ struct tep_print_arg *left, *right = NULL;
+ enum tep_event_type type;
char *token;
/* the op is passed in via tok */
token = *tok;
- if (arg->type == PRINT_OP && !arg->op.left) {
+ if (arg->type == TEP_PRINT_OP && !arg->op.left) {
/* handle single op */
if (token[1]) {
do_warning_event(event, "bad op token %s", token);
@@ -1900,7 +1902,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
if (!left)
goto out_warn_free;
- left->type = PRINT_NULL;
+ left->type = TEP_PRINT_NULL;
arg->op.left = left;
right = alloc_arg();
@@ -1922,7 +1924,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
/* copy the top arg to the left */
*left = *arg;
- arg->type = PRINT_OP;
+ arg->type = TEP_PRINT_OP;
arg->op.op = token;
arg->op.left = left;
arg->op.prio = 0;
@@ -1956,13 +1958,13 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
/* copy the top arg to the left */
*left = *arg;
- arg->type = PRINT_OP;
+ arg->type = TEP_PRINT_OP;
arg->op.op = token;
arg->op.left = left;
arg->op.right = NULL;
if (set_op_prio(arg) == -1) {
- event->flags |= EVENT_FL_FAILED;
+ event->flags |= TEP_EVENT_FL_FAILED;
/* arg->op.op (= token) will be freed at out_free */
arg->op.op = NULL;
goto out_free;
@@ -1973,10 +1975,10 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
/* could just be a type pointer */
if ((strcmp(arg->op.op, "*") == 0) &&
- type == EVENT_DELIM && (strcmp(token, ")") == 0)) {
+ type == TEP_EVENT_DELIM && (strcmp(token, ")") == 0)) {
char *new_atom;
- if (left->type != PRINT_ATOM) {
+ if (left->type != TEP_PRINT_ATOM) {
do_warning_event(event, "bad pointer type");
goto out_free;
}
@@ -1999,16 +2001,16 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
goto out_warn_free;
type = process_arg_token(event, right, tok, type);
- if (type == EVENT_ERROR) {
+ if (type == TEP_EVENT_ERROR) {
free_arg(right);
/* token was freed in process_arg_token() via *tok */
token = NULL;
goto out_free;
}
- if (right->type == PRINT_OP &&
+ if (right->type == TEP_PRINT_OP &&
get_op_prio(arg->op.op) < get_op_prio(right->op.op)) {
- struct print_arg tmp;
+ struct tep_print_arg tmp;
/* rotate ops according to the priority */
arg->op.right = right->op.left;
@@ -2030,7 +2032,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
*left = *arg;
- arg->type = PRINT_OP;
+ arg->type = TEP_PRINT_OP;
arg->op.op = token;
arg->op.left = left;
@@ -2041,12 +2043,12 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
} else {
do_warning_event(event, "unknown op '%s'", token);
- event->flags |= EVENT_FL_FAILED;
+ event->flags |= TEP_EVENT_FL_FAILED;
/* the arg is now the left side */
goto out_free;
}
- if (type == EVENT_OP && strcmp(*tok, ":") != 0) {
+ if (type == TEP_EVENT_OP && strcmp(*tok, ":") != 0) {
int prio;
/* higher prios need to be closer to the root */
@@ -2065,34 +2067,34 @@ out_warn_free:
out_free:
free_token(token);
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_entry(struct event_format *event __maybe_unused, struct print_arg *arg,
+static enum tep_event_type
+process_entry(struct tep_event_format *event __maybe_unused, struct tep_print_arg *arg,
char **tok)
{
- enum event_type type;
+ enum tep_event_type type;
char *field;
char *token;
- if (read_expected(EVENT_OP, "->") < 0)
+ if (read_expected(TEP_EVENT_OP, "->") < 0)
goto out_err;
- if (read_expect_type(EVENT_ITEM, &token) < 0)
+ if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
goto out_free;
field = token;
- arg->type = PRINT_FIELD;
+ arg->type = TEP_PRINT_FIELD;
arg->field.name = field;
if (is_flag_field) {
arg->field.field = tep_find_any_field(event, arg->field.name);
- arg->field.field->flags |= FIELD_IS_FLAG;
+ arg->field.field->flags |= TEP_FIELD_IS_FLAG;
is_flag_field = 0;
} else if (is_symbolic_field) {
arg->field.field = tep_find_any_field(event, arg->field.name);
- arg->field.field->flags |= FIELD_IS_SYMBOLIC;
+ arg->field.field->flags |= TEP_FIELD_IS_SYMBOLIC;
is_symbolic_field = 0;
}
@@ -2105,14 +2107,14 @@ process_entry(struct event_format *event __maybe_unused, struct print_arg *arg,
free_token(token);
out_err:
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static int alloc_and_process_delim(struct event_format *event, char *next_token,
- struct print_arg **print_arg)
+static int alloc_and_process_delim(struct tep_event_format *event, char *next_token,
+ struct tep_print_arg **print_arg)
{
- struct print_arg *field;
- enum event_type type;
+ struct tep_print_arg *field;
+ enum tep_event_type type;
char *token;
int ret = 0;
@@ -2125,7 +2127,7 @@ static int alloc_and_process_delim(struct event_format *event, char *next_token,
type = process_arg(event, field, &token);
- if (test_type_token(type, token, EVENT_DELIM, next_token)) {
+ if (test_type_token(type, token, TEP_EVENT_DELIM, next_token)) {
errno = EINVAL;
ret = -1;
free_arg(field);
@@ -2140,7 +2142,7 @@ out_free_token:
return ret;
}
-static char *arg_eval (struct print_arg *arg);
+static char *arg_eval (struct tep_print_arg *arg);
static unsigned long long
eval_type_str(unsigned long long val, const char *type, int pointer)
@@ -2237,9 +2239,9 @@ eval_type_str(unsigned long long val, const char *type, int pointer)
* Try to figure out the type.
*/
static unsigned long long
-eval_type(unsigned long long val, struct print_arg *arg, int pointer)
+eval_type(unsigned long long val, struct tep_print_arg *arg, int pointer)
{
- if (arg->type != PRINT_TYPE) {
+ if (arg->type != TEP_PRINT_TYPE) {
do_warning("expected type argument");
return 0;
}
@@ -2247,22 +2249,22 @@ eval_type(unsigned long long val, struct print_arg *arg, int pointer)
return eval_type_str(val, arg->typecast.type, pointer);
}
-static int arg_num_eval(struct print_arg *arg, long long *val)
+static int arg_num_eval(struct tep_print_arg *arg, long long *val)
{
long long left, right;
int ret = 1;
switch (arg->type) {
- case PRINT_ATOM:
+ case TEP_PRINT_ATOM:
*val = strtoll(arg->atom.atom, NULL, 0);
break;
- case PRINT_TYPE:
+ case TEP_PRINT_TYPE:
ret = arg_num_eval(arg->typecast.item, val);
if (!ret)
break;
*val = eval_type(*val, arg, 0);
break;
- case PRINT_OP:
+ case TEP_PRINT_OP:
switch (arg->op.op[0]) {
case '|':
ret = arg_num_eval(arg->op.left, &left);
@@ -2365,7 +2367,7 @@ static int arg_num_eval(struct print_arg *arg, long long *val)
break;
case '-':
/* check for negative */
- if (arg->op.left->type == PRINT_NULL)
+ if (arg->op.left->type == TEP_PRINT_NULL)
left = 0;
else
ret = arg_num_eval(arg->op.left, &left);
@@ -2377,7 +2379,7 @@ static int arg_num_eval(struct print_arg *arg, long long *val)
*val = left - right;
break;
case '+':
- if (arg->op.left->type == PRINT_NULL)
+ if (arg->op.left->type == TEP_PRINT_NULL)
left = 0;
else
ret = arg_num_eval(arg->op.left, &left);
@@ -2400,11 +2402,11 @@ static int arg_num_eval(struct print_arg *arg, long long *val)
}
break;
- case PRINT_NULL:
- case PRINT_FIELD ... PRINT_SYMBOL:
- case PRINT_STRING:
- case PRINT_BSTRING:
- case PRINT_BITMASK:
+ case TEP_PRINT_NULL:
+ case TEP_PRINT_FIELD ... TEP_PRINT_SYMBOL:
+ case TEP_PRINT_STRING:
+ case TEP_PRINT_BSTRING:
+ case TEP_PRINT_BITMASK:
default:
do_warning("invalid eval type %d", arg->type);
ret = 0;
@@ -2413,27 +2415,27 @@ static int arg_num_eval(struct print_arg *arg, long long *val)
return ret;
}
-static char *arg_eval (struct print_arg *arg)
+static char *arg_eval (struct tep_print_arg *arg)
{
long long val;
static char buf[20];
switch (arg->type) {
- case PRINT_ATOM:
+ case TEP_PRINT_ATOM:
return arg->atom.atom;
- case PRINT_TYPE:
+ case TEP_PRINT_TYPE:
return arg_eval(arg->typecast.item);
- case PRINT_OP:
+ case TEP_PRINT_OP:
if (!arg_num_eval(arg, &val))
break;
sprintf(buf, "%lld", val);
return buf;
- case PRINT_NULL:
- case PRINT_FIELD ... PRINT_SYMBOL:
- case PRINT_STRING:
- case PRINT_BSTRING:
- case PRINT_BITMASK:
+ case TEP_PRINT_NULL:
+ case TEP_PRINT_FIELD ... TEP_PRINT_SYMBOL:
+ case TEP_PRINT_STRING:
+ case TEP_PRINT_BSTRING:
+ case TEP_PRINT_BITMASK:
default:
do_warning("invalid eval type %d", arg->type);
break;
@@ -2442,19 +2444,19 @@ static char *arg_eval (struct print_arg *arg)
return NULL;
}
-static enum event_type
-process_fields(struct event_format *event, struct print_flag_sym **list, char **tok)
+static enum tep_event_type
+process_fields(struct tep_event_format *event, struct tep_print_flag_sym **list, char **tok)
{
- enum event_type type;
- struct print_arg *arg = NULL;
- struct print_flag_sym *field;
+ enum tep_event_type type;
+ struct tep_print_arg *arg = NULL;
+ struct tep_print_flag_sym *field;
char *token = *tok;
char *value;
do {
free_token(token);
type = read_token_item(&token);
- if (test_type_token(type, token, EVENT_OP, "{"))
+ if (test_type_token(type, token, TEP_EVENT_OP, "{"))
break;
arg = alloc_arg();
@@ -2464,13 +2466,13 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
free_token(token);
type = process_arg(event, arg, &token);
- if (type == EVENT_OP)
+ if (type == TEP_EVENT_OP)
type = process_op(event, arg, &token);
- if (type == EVENT_ERROR)
+ if (type == TEP_EVENT_ERROR)
goto out_free;
- if (test_type_token(type, token, EVENT_DELIM, ","))
+ if (test_type_token(type, token, TEP_EVENT_DELIM, ","))
goto out_free;
field = calloc(1, sizeof(*field));
@@ -2491,7 +2493,7 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
free_token(token);
type = process_arg(event, arg, &token);
- if (test_type_token(type, token, EVENT_OP, "}"))
+ if (test_type_token(type, token, TEP_EVENT_OP, "}"))
goto out_free_field;
value = arg_eval(arg);
@@ -2508,7 +2510,7 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
free_token(token);
type = read_token_item(&token);
- } while (type == EVENT_DELIM && strcmp(token, ",") == 0);
+ } while (type == TEP_EVENT_DELIM && strcmp(token, ",") == 0);
*tok = token;
return type;
@@ -2520,18 +2522,18 @@ out_free:
free_token(token);
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_flags(struct event_format *event, struct print_arg *arg, char **tok)
+static enum tep_event_type
+process_flags(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
{
- struct print_arg *field;
- enum event_type type;
+ struct tep_print_arg *field;
+ enum tep_event_type type;
char *token = NULL;
memset(arg, 0, sizeof(*arg));
- arg->type = PRINT_FLAGS;
+ arg->type = TEP_PRINT_FLAGS;
field = alloc_arg();
if (!field) {
@@ -2542,10 +2544,10 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok)
type = process_field_arg(event, field, &token);
/* Handle operations in the first argument */
- while (type == EVENT_OP)
+ while (type == TEP_EVENT_OP)
type = process_op(event, field, &token);
- if (test_type_token(type, token, EVENT_DELIM, ","))
+ if (test_type_token(type, token, TEP_EVENT_DELIM, ","))
goto out_free_field;
free_token(token);
@@ -2557,11 +2559,11 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok)
type = read_token_item(&token);
}
- if (test_type_token(type, token, EVENT_DELIM, ","))
+ if (test_type_token(type, token, TEP_EVENT_DELIM, ","))
goto out_free;
type = process_fields(event, &arg->flags.flags, &token);
- if (test_type_token(type, token, EVENT_DELIM, ")"))
+ if (test_type_token(type, token, TEP_EVENT_DELIM, ")"))
goto out_free;
free_token(token);
@@ -2573,18 +2575,18 @@ out_free_field:
out_free:
free_token(token);
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
+static enum tep_event_type
+process_symbols(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
{
- struct print_arg *field;
- enum event_type type;
+ struct tep_print_arg *field;
+ enum tep_event_type type;
char *token = NULL;
memset(arg, 0, sizeof(*arg));
- arg->type = PRINT_SYMBOL;
+ arg->type = TEP_PRINT_SYMBOL;
field = alloc_arg();
if (!field) {
@@ -2594,13 +2596,13 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
type = process_field_arg(event, field, &token);
- if (test_type_token(type, token, EVENT_DELIM, ","))
+ if (test_type_token(type, token, TEP_EVENT_DELIM, ","))
goto out_free_field;
arg->symbol.field = field;
type = process_fields(event, &arg->symbol.symbols, &token);
- if (test_type_token(type, token, EVENT_DELIM, ")"))
+ if (test_type_token(type, token, TEP_EVENT_DELIM, ")"))
goto out_free;
free_token(token);
@@ -2612,12 +2614,12 @@ out_free_field:
out_free:
free_token(token);
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_hex_common(struct event_format *event, struct print_arg *arg,
- char **tok, enum print_arg_type type)
+static enum tep_event_type
+process_hex_common(struct tep_event_format *event, struct tep_print_arg *arg,
+ char **tok, enum tep_print_arg_type type)
{
memset(arg, 0, sizeof(*arg));
arg->type = type;
@@ -2635,27 +2637,27 @@ free_field:
arg->hex.field = NULL;
out:
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_hex(struct event_format *event, struct print_arg *arg, char **tok)
+static enum tep_event_type
+process_hex(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
{
- return process_hex_common(event, arg, tok, PRINT_HEX);
+ return process_hex_common(event, arg, tok, TEP_PRINT_HEX);
}
-static enum event_type
-process_hex_str(struct event_format *event, struct print_arg *arg,
+static enum tep_event_type
+process_hex_str(struct tep_event_format *event, struct tep_print_arg *arg,
char **tok)
{
- return process_hex_common(event, arg, tok, PRINT_HEX_STR);
+ return process_hex_common(event, arg, tok, TEP_PRINT_HEX_STR);
}
-static enum event_type
-process_int_array(struct event_format *event, struct print_arg *arg, char **tok)
+static enum tep_event_type
+process_int_array(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
{
memset(arg, 0, sizeof(*arg));
- arg->type = PRINT_INT_ARRAY;
+ arg->type = TEP_PRINT_INT_ARRAY;
if (alloc_and_process_delim(event, ",", &arg->int_array.field))
goto out;
@@ -2676,18 +2678,18 @@ free_field:
arg->int_array.field = NULL;
out:
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_dynamic_array(struct event_format *event, struct print_arg *arg, char **tok)
+static enum tep_event_type
+process_dynamic_array(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
{
- struct format_field *field;
- enum event_type type;
+ struct tep_format_field *field;
+ enum tep_event_type type;
char *token;
memset(arg, 0, sizeof(*arg));
- arg->type = PRINT_DYNAMIC_ARRAY;
+ arg->type = TEP_PRINT_DYNAMIC_ARRAY;
/*
* The item within the parenthesis is another field that holds
@@ -2695,7 +2697,7 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
*/
type = read_token(&token);
*tok = token;
- if (type != EVENT_ITEM)
+ if (type != TEP_EVENT_ITEM)
goto out_free;
/* Find the field */
@@ -2707,13 +2709,13 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
arg->dynarray.field = field;
arg->dynarray.index = 0;
- if (read_expected(EVENT_DELIM, ")") < 0)
+ if (read_expected(TEP_EVENT_DELIM, ")") < 0)
goto out_free;
free_token(token);
type = read_token_item(&token);
*tok = token;
- if (type != EVENT_OP || strcmp(token, "[") != 0)
+ if (type != TEP_EVENT_OP || strcmp(token, "[") != 0)
return type;
free_token(token);
@@ -2721,14 +2723,14 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
if (!arg) {
do_warning_event(event, "%s: not enough memory!", __func__);
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
type = process_arg(event, arg, &token);
- if (type == EVENT_ERROR)
+ if (type == TEP_EVENT_ERROR)
goto out_free_arg;
- if (!test_type_token(type, token, EVENT_OP, "]"))
+ if (!test_type_token(type, token, TEP_EVENT_OP, "]"))
goto out_free_arg;
free_token(token);
@@ -2740,21 +2742,21 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
out_free:
free_token(token);
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_dynamic_array_len(struct event_format *event, struct print_arg *arg,
+static enum tep_event_type
+process_dynamic_array_len(struct tep_event_format *event, struct tep_print_arg *arg,
char **tok)
{
- struct format_field *field;
- enum event_type type;
+ struct tep_format_field *field;
+ enum tep_event_type type;
char *token;
- if (read_expect_type(EVENT_ITEM, &token) < 0)
+ if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
goto out_free;
- arg->type = PRINT_DYNAMIC_ARRAY_LEN;
+ arg->type = TEP_PRINT_DYNAMIC_ARRAY_LEN;
/* Find the field */
field = tep_find_field(event, token);
@@ -2764,7 +2766,7 @@ process_dynamic_array_len(struct event_format *event, struct print_arg *arg,
arg->dynarray.field = field;
arg->dynarray.index = 0;
- if (read_expected(EVENT_DELIM, ")") < 0)
+ if (read_expected(TEP_EVENT_DELIM, ")") < 0)
goto out_err;
type = read_token(&token);
@@ -2776,28 +2778,28 @@ process_dynamic_array_len(struct event_format *event, struct print_arg *arg,
free_token(token);
out_err:
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_paren(struct event_format *event, struct print_arg *arg, char **tok)
+static enum tep_event_type
+process_paren(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
{
- struct print_arg *item_arg;
- enum event_type type;
+ struct tep_print_arg *item_arg;
+ enum tep_event_type type;
char *token;
type = process_arg(event, arg, &token);
- if (type == EVENT_ERROR)
+ if (type == TEP_EVENT_ERROR)
goto out_free;
- if (type == EVENT_OP)
+ if (type == TEP_EVENT_OP)
type = process_op(event, arg, &token);
- if (type == EVENT_ERROR)
+ if (type == TEP_EVENT_ERROR)
goto out_free;
- if (test_type_token(type, token, EVENT_DELIM, ")"))
+ if (test_type_token(type, token, TEP_EVENT_DELIM, ")"))
goto out_free;
free_token(token);
@@ -2808,13 +2810,13 @@ process_paren(struct event_format *event, struct print_arg *arg, char **tok)
* this was a typecast.
*/
if (event_item_type(type) ||
- (type == EVENT_DELIM && strcmp(token, "(") == 0)) {
+ (type == TEP_EVENT_DELIM && strcmp(token, "(") == 0)) {
/* make this a typecast and contine */
/* prevous must be an atom */
- if (arg->type != PRINT_ATOM) {
- do_warning_event(event, "previous needed to be PRINT_ATOM");
+ if (arg->type != TEP_PRINT_ATOM) {
+ do_warning_event(event, "previous needed to be TEP_PRINT_ATOM");
goto out_free;
}
@@ -2825,7 +2827,7 @@ process_paren(struct event_format *event, struct print_arg *arg, char **tok)
goto out_free;
}
- arg->type = PRINT_TYPE;
+ arg->type = TEP_PRINT_TYPE;
arg->typecast.type = arg->atom.atom;
arg->typecast.item = item_arg;
type = process_arg_token(event, item_arg, &token, type);
@@ -2838,25 +2840,25 @@ process_paren(struct event_format *event, struct print_arg *arg, char **tok)
out_free:
free_token(token);
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_str(struct event_format *event __maybe_unused, struct print_arg *arg,
+static enum tep_event_type
+process_str(struct tep_event_format *event __maybe_unused, struct tep_print_arg *arg,
char **tok)
{
- enum event_type type;
+ enum tep_event_type type;
char *token;
- if (read_expect_type(EVENT_ITEM, &token) < 0)
+ if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
goto out_free;
- arg->type = PRINT_STRING;
+ arg->type = TEP_PRINT_STRING;
arg->string.string = token;
arg->string.offset = -1;
- if (read_expected(EVENT_DELIM, ")") < 0)
+ if (read_expected(TEP_EVENT_DELIM, ")") < 0)
goto out_err;
type = read_token(&token);
@@ -2868,24 +2870,24 @@ process_str(struct event_format *event __maybe_unused, struct print_arg *arg,
free_token(token);
out_err:
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_bitmask(struct event_format *event __maybe_unused, struct print_arg *arg,
- char **tok)
+static enum tep_event_type
+process_bitmask(struct tep_event_format *event __maybe_unused, struct tep_print_arg *arg,
+ char **tok)
{
- enum event_type type;
+ enum tep_event_type type;
char *token;
- if (read_expect_type(EVENT_ITEM, &token) < 0)
+ if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
goto out_free;
- arg->type = PRINT_BITMASK;
+ arg->type = TEP_PRINT_BITMASK;
arg->bitmask.bitmask = token;
arg->bitmask.offset = -1;
- if (read_expected(EVENT_DELIM, ")") < 0)
+ if (read_expected(TEP_EVENT_DELIM, ")") < 0)
goto out_err;
type = read_token(&token);
@@ -2897,7 +2899,7 @@ process_bitmask(struct event_format *event __maybe_unused, struct print_arg *arg
free_token(token);
out_err:
*tok = NULL;
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
static struct tep_function_handler *
@@ -2932,17 +2934,17 @@ static void remove_func_handler(struct tep_handle *pevent, char *func_name)
}
}
-static enum event_type
-process_func_handler(struct event_format *event, struct tep_function_handler *func,
- struct print_arg *arg, char **tok)
+static enum tep_event_type
+process_func_handler(struct tep_event_format *event, struct tep_function_handler *func,
+ struct tep_print_arg *arg, char **tok)
{
- struct print_arg **next_arg;
- struct print_arg *farg;
- enum event_type type;
+ struct tep_print_arg **next_arg;
+ struct tep_print_arg *farg;
+ enum tep_event_type type;
char *token;
int i;
- arg->type = PRINT_FUNC;
+ arg->type = TEP_PRINT_FUNC;
arg->func.func = func;
*tok = NULL;
@@ -2953,12 +2955,12 @@ process_func_handler(struct event_format *event, struct tep_function_handler *fu
if (!farg) {
do_warning_event(event, "%s: not enough memory!",
__func__);
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
type = process_arg(event, farg, &token);
if (i < (func->nr_args - 1)) {
- if (type != EVENT_DELIM || strcmp(token, ",") != 0) {
+ if (type != TEP_EVENT_DELIM || strcmp(token, ",") != 0) {
do_warning_event(event,
"Error: function '%s()' expects %d arguments but event %s only uses %d",
func->name, func->nr_args,
@@ -2966,7 +2968,7 @@ process_func_handler(struct event_format *event, struct tep_function_handler *fu
goto err;
}
} else {
- if (type != EVENT_DELIM || strcmp(token, ")") != 0) {
+ if (type != TEP_EVENT_DELIM || strcmp(token, ")") != 0) {
do_warning_event(event,
"Error: function '%s()' only expects %d arguments but event %s has more",
func->name, func->nr_args, event->name);
@@ -2987,11 +2989,11 @@ process_func_handler(struct event_format *event, struct tep_function_handler *fu
err:
free_arg(farg);
free_token(token);
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_function(struct event_format *event, struct print_arg *arg,
+static enum tep_event_type
+process_function(struct tep_event_format *event, struct tep_print_arg *arg,
char *token, char **tok)
{
struct tep_function_handler *func;
@@ -3043,12 +3045,12 @@ process_function(struct event_format *event, struct print_arg *arg,
do_warning_event(event, "function %s not defined", token);
free_token(token);
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
-static enum event_type
-process_arg_token(struct event_format *event, struct print_arg *arg,
- char **tok, enum event_type type)
+static enum tep_event_type
+process_arg_token(struct tep_event_format *event, struct tep_print_arg *arg,
+ char **tok, enum tep_event_type type)
{
char *token;
char *atom;
@@ -3056,7 +3058,7 @@ process_arg_token(struct event_format *event, struct print_arg *arg,
token = *tok;
switch (type) {
- case EVENT_ITEM:
+ case TEP_EVENT_ITEM:
if (strcmp(token, "REC") == 0) {
free_token(token);
type = process_entry(event, arg, &token);
@@ -3070,7 +3072,7 @@ process_arg_token(struct event_format *event, struct print_arg *arg,
* If the next token is a parenthesis, then this
* is a function.
*/
- if (type == EVENT_DELIM && strcmp(token, "(") == 0) {
+ if (type == TEP_EVENT_DELIM && strcmp(token, "(") == 0) {
free_token(token);
token = NULL;
/* this will free atom. */
@@ -3078,7 +3080,7 @@ process_arg_token(struct event_format *event, struct print_arg *arg,
break;
}
/* atoms can be more than one token long */
- while (type == EVENT_ITEM) {
+ while (type == TEP_EVENT_ITEM) {
char *new_atom;
new_atom = realloc(atom,
strlen(atom) + strlen(token) + 2);
@@ -3086,7 +3088,7 @@ process_arg_token(struct event_format *event, struct print_arg *arg,
free(atom);
*tok = NULL;
free_token(token);
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
atom = new_atom;
strcat(atom, " ");
@@ -3095,55 +3097,55 @@ process_arg_token(struct event_format *event, struct print_arg *arg,
type = read_token_item(&token);
}
- arg->type = PRINT_ATOM;
+ arg->type = TEP_PRINT_ATOM;
arg->atom.atom = atom;
break;
- case EVENT_DQUOTE:
- case EVENT_SQUOTE:
- arg->type = PRINT_ATOM;
+ case TEP_EVENT_DQUOTE:
+ case TEP_EVENT_SQUOTE:
+ arg->type = TEP_PRINT_ATOM;
arg->atom.atom = token;
type = read_token_item(&token);
break;
- case EVENT_DELIM:
+ case TEP_EVENT_DELIM:
if (strcmp(token, "(") == 0) {
free_token(token);
type = process_paren(event, arg, &token);
break;
}
- case EVENT_OP:
+ case TEP_EVENT_OP:
/* handle single ops */
- arg->type = PRINT_OP;
+ arg->type = TEP_PRINT_OP;
arg->op.op = token;
arg->op.left = NULL;
type = process_op(event, arg, &token);
/* On error, the op is freed */
- if (type == EVENT_ERROR)
+ if (type == TEP_EVENT_ERROR)
arg->op.op = NULL;
/* return error type if errored */
break;
- case EVENT_ERROR ... EVENT_NEWLINE:
+ case TEP_EVENT_ERROR ... TEP_EVENT_NEWLINE:
default:
do_warning_event(event, "unexpected type %d", type);
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
*tok = token;
return type;
}
-static int event_read_print_args(struct event_format *event, struct print_arg **list)
+static int event_read_print_args(struct tep_event_format *event, struct tep_print_arg **list)
{
- enum event_type type = EVENT_ERROR;
- struct print_arg *arg;
+ enum tep_event_type type = TEP_EVENT_ERROR;
+ struct tep_print_arg *arg;
char *token;
int args = 0;
do {
- if (type == EVENT_NEWLINE) {
+ if (type == TEP_EVENT_NEWLINE) {
type = read_token_item(&token);
continue;
}
@@ -3157,7 +3159,7 @@ static int event_read_print_args(struct event_format *event, struct print_arg **
type = process_arg(event, arg, &token);
- if (type == EVENT_ERROR) {
+ if (type == TEP_EVENT_ERROR) {
free_token(token);
free_arg(arg);
return -1;
@@ -3166,10 +3168,10 @@ static int event_read_print_args(struct event_format *event, struct print_arg **
*list = arg;
args++;
- if (type == EVENT_OP) {
+ if (type == TEP_EVENT_OP) {
type = process_op(event, arg, &token);
free_token(token);
- if (type == EVENT_ERROR) {
+ if (type == TEP_EVENT_ERROR) {
*list = NULL;
free_arg(arg);
return -1;
@@ -3178,37 +3180,37 @@ static int event_read_print_args(struct event_format *event, struct print_arg **
continue;
}
- if (type == EVENT_DELIM && strcmp(token, ",") == 0) {
+ if (type == TEP_EVENT_DELIM && strcmp(token, ",") == 0) {
free_token(token);
*list = arg;
list = &arg->next;
continue;
}
break;
- } while (type != EVENT_NONE);
+ } while (type != TEP_EVENT_NONE);
- if (type != EVENT_NONE && type != EVENT_ERROR)
+ if (type != TEP_EVENT_NONE && type != TEP_EVENT_ERROR)
free_token(token);
return args;
}
-static int event_read_print(struct event_format *event)
+static int event_read_print(struct tep_event_format *event)
{
- enum event_type type;
+ enum tep_event_type type;
char *token;
int ret;
- if (read_expected_item(EVENT_ITEM, "print") < 0)
+ if (read_expected_item(TEP_EVENT_ITEM, "print") < 0)
return -1;
- if (read_expected(EVENT_ITEM, "fmt") < 0)
+ if (read_expected(TEP_EVENT_ITEM, "fmt") < 0)
return -1;
- if (read_expected(EVENT_OP, ":") < 0)
+ if (read_expected(TEP_EVENT_OP, ":") < 0)
return -1;
- if (read_expect_type(EVENT_DQUOTE, &token) < 0)
+ if (read_expect_type(TEP_EVENT_DQUOTE, &token) < 0)
goto fail;
concat:
@@ -3218,11 +3220,11 @@ static int event_read_print(struct event_format *event)
/* ok to have no arg */
type = read_token_item(&token);
- if (type == EVENT_NONE)
+ if (type == TEP_EVENT_NONE)
return 0;
/* Handle concatenation of print lines */
- if (type == EVENT_DQUOTE) {
+ if (type == TEP_EVENT_DQUOTE) {
char *cat;
if (asprintf(&cat, "%s%s", event->print_fmt.format, token) < 0)
@@ -3234,7 +3236,7 @@ static int event_read_print(struct event_format *event)
goto concat;
}
- if (test_type_token(type, token, EVENT_DELIM, ","))
+ if (test_type_token(type, token, TEP_EVENT_DELIM, ","))
goto fail;
free_token(token);
@@ -3258,10 +3260,10 @@ static int event_read_print(struct event_format *event)
* Returns a common field from the event by the given @name.
* This only searchs the common fields and not all field.
*/
-struct format_field *
-tep_find_common_field(struct event_format *event, const char *name)
+struct tep_format_field *
+tep_find_common_field(struct tep_event_format *event, const char *name)
{
- struct format_field *format;
+ struct tep_format_field *format;
for (format = event->format.common_fields;
format; format = format->next) {
@@ -3280,10 +3282,10 @@ tep_find_common_field(struct event_format *event, const char *name)
* Returns a non-common field by the given @name.
* This does not search common fields.
*/
-struct format_field *
-tep_find_field(struct event_format *event, const char *name)
+struct tep_format_field *
+tep_find_field(struct tep_event_format *event, const char *name)
{
- struct format_field *format;
+ struct tep_format_field *format;
for (format = event->format.fields;
format; format = format->next) {
@@ -3303,10 +3305,10 @@ tep_find_field(struct event_format *event, const char *name)
* This searchs the common field names first, then
* the non-common ones if a common one was not found.
*/
-struct format_field *
-tep_find_any_field(struct event_format *event, const char *name)
+struct tep_format_field *
+tep_find_any_field(struct tep_event_format *event, const char *name)
{
- struct format_field *format;
+ struct tep_format_field *format;
format = tep_find_common_field(event, name);
if (format)
@@ -3330,11 +3332,11 @@ unsigned long long tep_read_number(struct tep_handle *pevent,
case 1:
return *(unsigned char *)ptr;
case 2:
- return data2host2(pevent, ptr);
+ return tep_data2host2(pevent, ptr);
case 4:
- return data2host4(pevent, ptr);
+ return tep_data2host4(pevent, ptr);
case 8:
- return data2host8(pevent, ptr);
+ return tep_data2host8(pevent, ptr);
default:
/* BUG! */
return 0;
@@ -3352,7 +3354,7 @@ unsigned long long tep_read_number(struct tep_handle *pevent,
*
* Returns 0 on success, -1 otherwise.
*/
-int tep_read_number_field(struct format_field *field, const void *data,
+int tep_read_number_field(struct tep_format_field *field, const void *data,
unsigned long long *value)
{
if (!field)
@@ -3373,8 +3375,8 @@ int tep_read_number_field(struct format_field *field, const void *data,
static int get_common_info(struct tep_handle *pevent,
const char *type, int *offset, int *size)
{
- struct event_format *event;
- struct format_field *field;
+ struct tep_event_format *event;
+ struct tep_format_field *field;
/*
* All events should have the same common elements.
@@ -3460,11 +3462,11 @@ static int events_id_cmp(const void *a, const void *b);
*
* Returns an event that has a given @id.
*/
-struct event_format *tep_find_event(struct tep_handle *pevent, int id)
+struct tep_event_format *tep_find_event(struct tep_handle *pevent, int id)
{
- struct event_format **eventptr;
- struct event_format key;
- struct event_format *pkey = &key;
+ struct tep_event_format **eventptr;
+ struct tep_event_format key;
+ struct tep_event_format *pkey = &key;
/* Check cache first */
if (pevent->last_event && pevent->last_event->id == id)
@@ -3492,11 +3494,11 @@ struct event_format *tep_find_event(struct tep_handle *pevent, int id)
* This returns an event with a given @name and under the system
* @sys. If @sys is NULL the first event with @name is returned.
*/
-struct event_format *
+struct tep_event_format *
tep_find_event_by_name(struct tep_handle *pevent,
const char *sys, const char *name)
{
- struct event_format *event;
+ struct tep_event_format *event;
int i;
if (pevent->last_event &&
@@ -3521,23 +3523,23 @@ tep_find_event_by_name(struct tep_handle *pevent,
}
static unsigned long long
-eval_num_arg(void *data, int size, struct event_format *event, struct print_arg *arg)
+eval_num_arg(void *data, int size, struct tep_event_format *event, struct tep_print_arg *arg)
{
struct tep_handle *pevent = event->pevent;
unsigned long long val = 0;
unsigned long long left, right;
- struct print_arg *typearg = NULL;
- struct print_arg *larg;
+ struct tep_print_arg *typearg = NULL;
+ struct tep_print_arg *larg;
unsigned long offset;
unsigned int field_size;
switch (arg->type) {
- case PRINT_NULL:
+ case TEP_PRINT_NULL:
/* ?? */
return 0;
- case PRINT_ATOM:
+ case TEP_PRINT_ATOM:
return strtoull(arg->atom.atom, NULL, 0);
- case PRINT_FIELD:
+ case TEP_PRINT_FIELD:
if (!arg->field.field) {
arg->field.field = tep_find_any_field(event, arg->field.name);
if (!arg->field.field)
@@ -3548,27 +3550,27 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
val = tep_read_number(pevent, data + arg->field.field->offset,
arg->field.field->size);
break;
- case PRINT_FLAGS:
- case PRINT_SYMBOL:
- case PRINT_INT_ARRAY:
- case PRINT_HEX:
- case PRINT_HEX_STR:
+ case TEP_PRINT_FLAGS:
+ case TEP_PRINT_SYMBOL:
+ case TEP_PRINT_INT_ARRAY:
+ case TEP_PRINT_HEX:
+ case TEP_PRINT_HEX_STR:
break;
- case PRINT_TYPE:
+ case TEP_PRINT_TYPE:
val = eval_num_arg(data, size, event, arg->typecast.item);
return eval_type(val, arg, 0);
- case PRINT_STRING:
- case PRINT_BSTRING:
- case PRINT_BITMASK:
+ case TEP_PRINT_STRING:
+ case TEP_PRINT_BSTRING:
+ case TEP_PRINT_BITMASK:
return 0;
- case PRINT_FUNC: {
+ case TEP_PRINT_FUNC: {
struct trace_seq s;
trace_seq_init(&s);
val = process_defined_func(&s, data, size, event, arg);
trace_seq_destroy(&s);
return val;
}
- case PRINT_OP:
+ case TEP_PRINT_OP:
if (strcmp(arg->op.op, "[") == 0) {
/*
* Arrays are special, since we don't want
@@ -3578,7 +3580,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
/* handle typecasts */
larg = arg->op.left;
- while (larg->type == PRINT_TYPE) {
+ while (larg->type == TEP_PRINT_TYPE) {
if (!typearg)
typearg = larg;
larg = larg->typecast.item;
@@ -3588,7 +3590,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
field_size = pevent->long_size;
switch (larg->type) {
- case PRINT_DYNAMIC_ARRAY:
+ case TEP_PRINT_DYNAMIC_ARRAY:
offset = tep_read_number(pevent,
data + larg->dynarray.field->offset,
larg->dynarray.field->size);
@@ -3602,7 +3604,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
offset &= 0xffff;
offset += right;
break;
- case PRINT_FIELD:
+ case TEP_PRINT_FIELD:
if (!larg->field.field) {
larg->field.field =
tep_find_any_field(event, larg->field.name);
@@ -3718,7 +3720,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
goto out_warning_op;
}
break;
- case PRINT_DYNAMIC_ARRAY_LEN:
+ case TEP_PRINT_DYNAMIC_ARRAY_LEN:
offset = tep_read_number(pevent,
data + arg->dynarray.field->offset,
arg->dynarray.field->size);
@@ -3729,7 +3731,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
*/
val = (unsigned long long)(offset >> 16);
break;
- case PRINT_DYNAMIC_ARRAY:
+ case TEP_PRINT_DYNAMIC_ARRAY:
/* Without [], we pass the address to the dynamic data */
offset = tep_read_number(pevent,
data + arg->dynarray.field->offset,
@@ -3861,12 +3863,12 @@ static void print_bitmask_to_seq(struct tep_handle *pevent,
}
static void print_str_arg(struct trace_seq *s, void *data, int size,
- struct event_format *event, const char *format,
- int len_arg, struct print_arg *arg)
+ struct tep_event_format *event, const char *format,
+ int len_arg, struct tep_print_arg *arg)
{
struct tep_handle *pevent = event->pevent;
- struct print_flag_sym *flag;
- struct format_field *field;
+ struct tep_print_flag_sym *flag;
+ struct tep_format_field *field;
struct printk_map *printk;
long long val, fval;
unsigned long long addr;
@@ -3876,13 +3878,13 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
int i, len;
switch (arg->type) {
- case PRINT_NULL:
+ case TEP_PRINT_NULL:
/* ?? */
return;
- case PRINT_ATOM:
+ case TEP_PRINT_ATOM:
print_str_to_seq(s, format, len_arg, arg->atom.atom);
return;
- case PRINT_FIELD:
+ case TEP_PRINT_FIELD:
field = arg->field.field;
if (!field) {
field = tep_find_any_field(event, arg->field.name);
@@ -3900,7 +3902,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
* and the size is the same as long_size, assume that it
* is a pointer.
*/
- if (!(field->flags & FIELD_IS_ARRAY) &&
+ if (!(field->flags & TEP_FIELD_IS_ARRAY) &&
field->size == pevent->long_size) {
/* Handle heterogeneous recording and processing
@@ -3939,7 +3941,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
print_str_to_seq(s, format, len_arg, str);
free(str);
break;
- case PRINT_FLAGS:
+ case TEP_PRINT_FLAGS:
val = eval_num_arg(data, size, event, arg->flags.field);
print = 0;
for (flag = arg->flags.flags; flag; flag = flag->next) {
@@ -3962,7 +3964,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
trace_seq_printf(s, "0x%llx", val);
}
break;
- case PRINT_SYMBOL:
+ case TEP_PRINT_SYMBOL:
val = eval_num_arg(data, size, event, arg->symbol.field);
for (flag = arg->symbol.symbols; flag; flag = flag->next) {
fval = eval_flag(flag->value);
@@ -3974,9 +3976,9 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
if (!flag)
trace_seq_printf(s, "0x%llx", val);
break;
- case PRINT_HEX:
- case PRINT_HEX_STR:
- if (arg->hex.field->type == PRINT_DYNAMIC_ARRAY) {
+ case TEP_PRINT_HEX:
+ case TEP_PRINT_HEX_STR:
+ if (arg->hex.field->type == TEP_PRINT_DYNAMIC_ARRAY) {
unsigned long offset;
offset = tep_read_number(pevent,
data + arg->hex.field->dynarray.field->offset,
@@ -3995,19 +3997,19 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
}
len = eval_num_arg(data, size, event, arg->hex.size);
for (i = 0; i < len; i++) {
- if (i && arg->type == PRINT_HEX)
+ if (i && arg->type == TEP_PRINT_HEX)
trace_seq_putc(s, ' ');
trace_seq_printf(s, "%02x", hex[i]);
}
break;
- case PRINT_INT_ARRAY: {
+ case TEP_PRINT_INT_ARRAY: {
void *num;
int el_size;
- if (arg->int_array.field->type == PRINT_DYNAMIC_ARRAY) {
+ if (arg->int_array.field->type == TEP_PRINT_DYNAMIC_ARRAY) {
unsigned long offset;
- struct format_field *field =
+ struct tep_format_field *field =
arg->int_array.field->dynarray.field;
offset = tep_read_number(pevent,
data + field->offset,
@@ -4049,43 +4051,43 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
}
break;
}
- case PRINT_TYPE:
+ case TEP_PRINT_TYPE:
break;
- case PRINT_STRING: {
+ case TEP_PRINT_STRING: {
int str_offset;
if (arg->string.offset == -1) {
- struct format_field *f;
+ struct tep_format_field *f;
f = tep_find_any_field(event, arg->string.string);
arg->string.offset = f->offset;
}
- str_offset = data2host4(pevent, data + arg->string.offset);
+ str_offset = tep_data2host4(pevent, data + arg->string.offset);
str_offset &= 0xffff;
print_str_to_seq(s, format, len_arg, ((char *)data) + str_offset);
break;
}
- case PRINT_BSTRING:
+ case TEP_PRINT_BSTRING:
print_str_to_seq(s, format, len_arg, arg->string.string);
break;
- case PRINT_BITMASK: {
+ case TEP_PRINT_BITMASK: {
int bitmask_offset;
int bitmask_size;
if (arg->bitmask.offset == -1) {
- struct format_field *f;
+ struct tep_format_field *f;
f = tep_find_any_field(event, arg->bitmask.bitmask);
arg->bitmask.offset = f->offset;
}
- bitmask_offset = data2host4(pevent, data + arg->bitmask.offset);
+ bitmask_offset = tep_data2host4(pevent, data + arg->bitmask.offset);
bitmask_size = bitmask_offset >> 16;
bitmask_offset &= 0xffff;
print_bitmask_to_seq(pevent, s, format, len_arg,
data + bitmask_offset, bitmask_size);
break;
}
- case PRINT_OP:
+ case TEP_PRINT_OP:
/*
* The only op for string should be ? :
*/
@@ -4099,7 +4101,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
print_str_arg(s, data, size, event,
format, len_arg, arg->op.right->op.right);
break;
- case PRINT_FUNC:
+ case TEP_PRINT_FUNC:
process_defined_func(s, data, size, event, arg);
break;
default:
@@ -4116,13 +4118,13 @@ out_warning_field:
static unsigned long long
process_defined_func(struct trace_seq *s, void *data, int size,
- struct event_format *event, struct print_arg *arg)
+ struct tep_event_format *event, struct tep_print_arg *arg)
{
struct tep_function_handler *func_handle = arg->func.func;
struct func_params *param;
unsigned long long *args;
unsigned long long ret;
- struct print_arg *farg;
+ struct tep_print_arg *farg;
struct trace_seq str;
struct save_str {
struct save_str *next;
@@ -4199,9 +4201,9 @@ out_free:
return ret;
}
-static void free_args(struct print_arg *args)
+static void free_args(struct tep_print_arg *args)
{
- struct print_arg *next;
+ struct tep_print_arg *next;
while (args) {
next = args->next;
@@ -4211,11 +4213,11 @@ static void free_args(struct print_arg *args)
}
}
-static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event_format *event)
+static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, struct tep_event_format *event)
{
struct tep_handle *pevent = event->pevent;
- struct format_field *field, *ip_field;
- struct print_arg *args, *arg, **next;
+ struct tep_format_field *field, *ip_field;
+ struct tep_print_arg *args, *arg, **next;
unsigned long long ip, val;
char *ptr;
void *bptr;
@@ -4254,7 +4256,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
arg->next = NULL;
next = &arg->next;
- arg->type = PRINT_ATOM;
+ arg->type = TEP_PRINT_ATOM;
if (asprintf(&arg->atom.atom, "%lld", ip) < 0)
goto out_free;
@@ -4342,7 +4344,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
goto out_free;
}
arg->next = NULL;
- arg->type = PRINT_ATOM;
+ arg->type = TEP_PRINT_ATOM;
if (asprintf(&arg->atom.atom, "%lld", val) < 0) {
free(arg);
goto out_free;
@@ -4366,7 +4368,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
goto out_free;
}
arg->next = NULL;
- arg->type = PRINT_BSTRING;
+ arg->type = TEP_PRINT_BSTRING;
arg->string.string = strdup(bptr);
if (!arg->string.string)
goto out_free;
@@ -4388,11 +4390,11 @@ out_free:
static char *
get_bprint_format(void *data, int size __maybe_unused,
- struct event_format *event)
+ struct tep_event_format *event)
{
struct tep_handle *pevent = event->pevent;
unsigned long long addr;
- struct format_field *field;
+ struct tep_format_field *field;
struct printk_map *printk;
char *format;
@@ -4423,17 +4425,17 @@ get_bprint_format(void *data, int size __maybe_unused,
}
static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
- struct event_format *event, struct print_arg *arg)
+ struct tep_event_format *event, struct tep_print_arg *arg)
{
unsigned char *buf;
const char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x";
- if (arg->type == PRINT_FUNC) {
+ if (arg->type == TEP_PRINT_FUNC) {
process_defined_func(s, data, size, event, arg);
return;
}
- if (arg->type != PRINT_FIELD) {
+ if (arg->type != TEP_PRINT_FIELD) {
trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d",
arg->type);
return;
@@ -4576,17 +4578,17 @@ static void print_ip6_addr(struct trace_seq *s, char i, unsigned char *buf)
* %pISpc print an IP address based on sockaddr; p adds port.
*/
static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
- void *data, int size, struct event_format *event,
- struct print_arg *arg)
+ void *data, int size, struct tep_event_format *event,
+ struct tep_print_arg *arg)
{
unsigned char *buf;
- if (arg->type == PRINT_FUNC) {
+ if (arg->type == TEP_PRINT_FUNC) {
process_defined_func(s, data, size, event, arg);
return 0;
}
- if (arg->type != PRINT_FIELD) {
+ if (arg->type != TEP_PRINT_FIELD) {
trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
return 0;
}
@@ -4613,8 +4615,8 @@ static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
}
static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
- void *data, int size, struct event_format *event,
- struct print_arg *arg)
+ void *data, int size, struct tep_event_format *event,
+ struct tep_print_arg *arg)
{
char have_c = 0;
unsigned char *buf;
@@ -4627,12 +4629,12 @@ static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
rc++;
}
- if (arg->type == PRINT_FUNC) {
+ if (arg->type == TEP_PRINT_FUNC) {
process_defined_func(s, data, size, event, arg);
return rc;
}
- if (arg->type != PRINT_FIELD) {
+ if (arg->type != TEP_PRINT_FIELD) {
trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
return rc;
}
@@ -4663,8 +4665,8 @@ static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
}
static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
- void *data, int size, struct event_format *event,
- struct print_arg *arg)
+ void *data, int size, struct tep_event_format *event,
+ struct tep_print_arg *arg)
{
char have_c = 0, have_p = 0;
unsigned char *buf;
@@ -4685,12 +4687,12 @@ static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
}
}
- if (arg->type == PRINT_FUNC) {
+ if (arg->type == TEP_PRINT_FUNC) {
process_defined_func(s, data, size, event, arg);
return rc;
}
- if (arg->type != PRINT_FIELD) {
+ if (arg->type != TEP_PRINT_FIELD) {
trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
return rc;
}
@@ -4745,8 +4747,8 @@ static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
}
static int print_ip_arg(struct trace_seq *s, const char *ptr,
- void *data, int size, struct event_format *event,
- struct print_arg *arg)
+ void *data, int size, struct tep_event_format *event,
+ struct tep_print_arg *arg)
{
char i = *ptr; /* 'i' or 'I' */
char ver;
@@ -4787,22 +4789,22 @@ static int is_printable_array(char *p, unsigned int len)
}
void tep_print_field(struct trace_seq *s, void *data,
- struct format_field *field)
+ struct tep_format_field *field)
{
unsigned long long val;
unsigned int offset, len, i;
struct tep_handle *pevent = field->event->pevent;
- if (field->flags & FIELD_IS_ARRAY) {
+ if (field->flags & TEP_FIELD_IS_ARRAY) {
offset = field->offset;
len = field->size;
- if (field->flags & FIELD_IS_DYNAMIC) {
+ if (field->flags & TEP_FIELD_IS_DYNAMIC) {
val = tep_read_number(pevent, data + offset, len);
offset = val;
len = offset >> 16;
offset &= 0xffff;
}
- if (field->flags & FIELD_IS_STRING &&
+ if (field->flags & TEP_FIELD_IS_STRING &&
is_printable_array(data + offset, len)) {
trace_seq_printf(s, "%s", (char *)data + offset);
} else {
@@ -4814,21 +4816,21 @@ void tep_print_field(struct trace_seq *s, void *data,
*((unsigned char *)data + offset + i));
}
trace_seq_putc(s, ']');
- field->flags &= ~FIELD_IS_STRING;
+ field->flags &= ~TEP_FIELD_IS_STRING;
}
} else {
val = tep_read_number(pevent, data + field->offset,
field->size);
- if (field->flags & FIELD_IS_POINTER) {
+ if (field->flags & TEP_FIELD_IS_POINTER) {
trace_seq_printf(s, "0x%llx", val);
- } else if (field->flags & FIELD_IS_SIGNED) {
+ } else if (field->flags & TEP_FIELD_IS_SIGNED) {
switch (field->size) {
case 4:
/*
* If field is long then print it in hex.
* A long usually stores pointers.
*/
- if (field->flags & FIELD_IS_LONG)
+ if (field->flags & TEP_FIELD_IS_LONG)
trace_seq_printf(s, "0x%x", (int)val);
else
trace_seq_printf(s, "%d", (int)val);
@@ -4843,7 +4845,7 @@ void tep_print_field(struct trace_seq *s, void *data,
trace_seq_printf(s, "%lld", val);
}
} else {
- if (field->flags & FIELD_IS_LONG)
+ if (field->flags & TEP_FIELD_IS_LONG)
trace_seq_printf(s, "0x%llx", val);
else
trace_seq_printf(s, "%llu", val);
@@ -4852,9 +4854,9 @@ void tep_print_field(struct trace_seq *s, void *data,
}
void tep_print_fields(struct trace_seq *s, void *data,
- int size __maybe_unused, struct event_format *event)
+ int size __maybe_unused, struct tep_event_format *event)
{
- struct format_field *field;
+ struct tep_format_field *field;
field = event->format.fields;
while (field) {
@@ -4864,12 +4866,12 @@ void tep_print_fields(struct trace_seq *s, void *data,
}
}
-static void pretty_print(struct trace_seq *s, void *data, int size, struct event_format *event)
+static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event_format *event)
{
struct tep_handle *pevent = event->pevent;
- struct print_fmt *print_fmt = &event->print_fmt;
- struct print_arg *arg = print_fmt->args;
- struct print_arg *args = NULL;
+ struct tep_print_fmt *print_fmt = &event->print_fmt;
+ struct tep_print_arg *arg = print_fmt->args;
+ struct tep_print_arg *args = NULL;
const char *ptr = print_fmt->format;
unsigned long long val;
struct func_map *func;
@@ -4883,13 +4885,13 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
int len;
int ls;
- if (event->flags & EVENT_FL_FAILED) {
+ if (event->flags & TEP_EVENT_FL_FAILED) {
trace_seq_printf(s, "[FAILED TO PARSE]");
tep_print_fields(s, data, size, event);
return;
}
- if (event->flags & EVENT_FL_ISBPRINT) {
+ if (event->flags & TEP_EVENT_FL_ISBPRINT) {
bprint_fmt = get_bprint_format(data, size, event);
args = make_bprint_args(bprint_fmt, data, size, event);
arg = args;
@@ -4944,7 +4946,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
/* The argument is the length. */
if (!arg) {
do_warning_event(event, "no argument match");
- event->flags |= EVENT_FL_FAILED;
+ event->flags |= TEP_EVENT_FL_FAILED;
goto out_failed;
}
len_arg = eval_num_arg(data, size, event, arg);
@@ -4966,7 +4968,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
if (isalnum(ptr[1]))
ptr++;
- if (arg->type == PRINT_BSTRING) {
+ if (arg->type == TEP_PRINT_BSTRING) {
trace_seq_puts(s, arg->string.string);
break;
}
@@ -4997,7 +4999,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
case 'u':
if (!arg) {
do_warning_event(event, "no argument match");
- event->flags |= EVENT_FL_FAILED;
+ event->flags |= TEP_EVENT_FL_FAILED;
goto out_failed;
}
@@ -5007,7 +5009,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
/* should never happen */
if (len > 31) {
do_warning_event(event, "bad format!");
- event->flags |= EVENT_FL_FAILED;
+ event->flags |= TEP_EVENT_FL_FAILED;
len = 31;
}
@@ -5073,13 +5075,13 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
break;
default:
do_warning_event(event, "bad count (%d)", ls);
- event->flags |= EVENT_FL_FAILED;
+ event->flags |= TEP_EVENT_FL_FAILED;
}
break;
case 's':
if (!arg) {
do_warning_event(event, "no matching argument");
- event->flags |= EVENT_FL_FAILED;
+ event->flags |= TEP_EVENT_FL_FAILED;
goto out_failed;
}
@@ -5089,7 +5091,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
/* should never happen */
if (len > 31) {
do_warning_event(event, "bad format!");
- event->flags |= EVENT_FL_FAILED;
+ event->flags |= TEP_EVENT_FL_FAILED;
len = 31;
}
@@ -5114,7 +5116,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
trace_seq_putc(s, *ptr);
}
- if (event->flags & EVENT_FL_FAILED) {
+ if (event->flags & TEP_EVENT_FL_FAILED) {
out_failed:
trace_seq_printf(s, "[FAILED TO PARSE]");
}
@@ -5227,7 +5229,7 @@ int tep_data_type(struct tep_handle *pevent, struct tep_record *rec)
*
* This returns the event form a given @type;
*/
-struct event_format *tep_data_event_from_type(struct tep_handle *pevent, int type)
+struct tep_event_format *tep_data_event_from_type(struct tep_handle *pevent, int type)
{
return tep_find_event(pevent, type);
}
@@ -5385,16 +5387,16 @@ int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline)
* This parses the raw @data using the given @event information and
* writes the print format into the trace_seq.
*/
-void tep_event_info(struct trace_seq *s, struct event_format *event,
+void tep_event_info(struct trace_seq *s, struct tep_event_format *event,
struct tep_record *record)
{
int print_pretty = 1;
- if (event->pevent->print_raw || (event->flags & EVENT_FL_PRINTRAW))
+ if (event->pevent->print_raw || (event->flags & TEP_EVENT_FL_PRINTRAW))
tep_print_fields(s, record->data, record->size, event);
else {
- if (event->handler && !(event->flags & EVENT_FL_NOHANDLE))
+ if (event->handler && !(event->flags & TEP_EVENT_FL_NOHANDLE))
print_pretty = event->handler(s, record, event,
event->context);
@@ -5426,7 +5428,7 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
* Returns the associated event for a given record, or NULL if non is
* is found.
*/
-struct event_format *
+struct tep_event_format *
tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
{
int type;
@@ -5451,7 +5453,7 @@ tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
* Writes the tasks comm, pid and CPU to @s.
*/
void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
- struct event_format *event,
+ struct tep_event_format *event,
struct tep_record *record)
{
void *data = record->data;
@@ -5479,7 +5481,7 @@ void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
* Writes the timestamp of the record into @s.
*/
void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
- struct event_format *event,
+ struct tep_event_format *event,
struct tep_record *record,
bool use_trace_clock)
{
@@ -5529,7 +5531,7 @@ void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
* Writes the parsing of the record's data to @s.
*/
void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
- struct event_format *event,
+ struct tep_event_format *event,
struct tep_record *record)
{
static const char *spaces = " "; /* 20 spaces */
@@ -5548,7 +5550,7 @@ void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
struct tep_record *record, bool use_trace_clock)
{
- struct event_format *event;
+ struct tep_event_format *event;
event = tep_find_event_by_record(pevent, record);
if (!event) {
@@ -5570,8 +5572,8 @@ void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
static int events_id_cmp(const void *a, const void *b)
{
- struct event_format * const * ea = a;
- struct event_format * const * eb = b;
+ struct tep_event_format * const * ea = a;
+ struct tep_event_format * const * eb = b;
if ((*ea)->id < (*eb)->id)
return -1;
@@ -5584,8 +5586,8 @@ static int events_id_cmp(const void *a, const void *b)
static int events_name_cmp(const void *a, const void *b)
{
- struct event_format * const * ea = a;
- struct event_format * const * eb = b;
+ struct tep_event_format * const * ea = a;
+ struct tep_event_format * const * eb = b;
int res;
res = strcmp((*ea)->name, (*eb)->name);
@@ -5601,8 +5603,8 @@ static int events_name_cmp(const void *a, const void *b)
static int events_system_cmp(const void *a, const void *b)
{
- struct event_format * const * ea = a;
- struct event_format * const * eb = b;
+ struct tep_event_format * const * ea = a;
+ struct tep_event_format * const * eb = b;
int res;
res = strcmp((*ea)->system, (*eb)->system);
@@ -5616,9 +5618,9 @@ static int events_system_cmp(const void *a, const void *b)
return events_id_cmp(a, b);
}
-struct event_format **tep_list_events(struct tep_handle *pevent, enum event_sort_type sort_type)
+struct tep_event_format **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type sort_type)
{
- struct event_format **events;
+ struct tep_event_format **events;
int (*sort)(const void *a, const void *b);
events = pevent->sort_events;
@@ -5637,20 +5639,20 @@ struct event_format **tep_list_events(struct tep_handle *pevent, enum event_sort
pevent->sort_events = events;
/* the internal events are sorted by id */
- if (sort_type == EVENT_SORT_ID) {
+ if (sort_type == TEP_EVENT_SORT_ID) {
pevent->last_type = sort_type;
return events;
}
}
switch (sort_type) {
- case EVENT_SORT_ID:
+ case TEP_EVENT_SORT_ID:
sort = events_id_cmp;
break;
- case EVENT_SORT_NAME:
+ case TEP_EVENT_SORT_NAME:
sort = events_name_cmp;
break;
- case EVENT_SORT_SYSTEM:
+ case TEP_EVENT_SORT_SYSTEM:
sort = events_system_cmp;
break;
default:
@@ -5663,12 +5665,12 @@ struct event_format **tep_list_events(struct tep_handle *pevent, enum event_sort
return events;
}
-static struct format_field **
+static struct tep_format_field **
get_event_fields(const char *type, const char *name,
- int count, struct format_field *list)
+ int count, struct tep_format_field *list)
{
- struct format_field **fields;
- struct format_field *field;
+ struct tep_format_field **fields;
+ struct tep_format_field *field;
int i = 0;
fields = malloc(sizeof(*fields) * (count + 1));
@@ -5701,7 +5703,7 @@ get_event_fields(const char *type, const char *name,
* Returns an allocated array of fields. The last item in the array is NULL.
* The array must be freed with free().
*/
-struct format_field **tep_event_common_fields(struct event_format *event)
+struct tep_format_field **tep_event_common_fields(struct tep_event_format *event)
{
return get_event_fields("common", event->name,
event->format.nr_common,
@@ -5715,14 +5717,14 @@ struct format_field **tep_event_common_fields(struct event_format *event)
* Returns an allocated array of fields. The last item in the array is NULL.
* The array must be freed with free().
*/
-struct format_field **tep_event_fields(struct event_format *event)
+struct tep_format_field **tep_event_fields(struct tep_event_format *event)
{
return get_event_fields("event", event->name,
event->format.nr_fields,
event->format.fields);
}
-static void print_fields(struct trace_seq *s, struct print_flag_sym *field)
+static void print_fields(struct trace_seq *s, struct tep_print_flag_sym *field)
{
trace_seq_printf(s, "{ %s, %s }", field->value, field->str);
if (field->next) {
@@ -5732,22 +5734,22 @@ static void print_fields(struct trace_seq *s, struct print_flag_sym *field)
}
/* for debugging */
-static void print_args(struct print_arg *args)
+static void print_args(struct tep_print_arg *args)
{
int print_paren = 1;
struct trace_seq s;
switch (args->type) {
- case PRINT_NULL:
+ case TEP_PRINT_NULL:
printf("null");
break;
- case PRINT_ATOM:
+ case TEP_PRINT_ATOM:
printf("%s", args->atom.atom);
break;
- case PRINT_FIELD:
+ case TEP_PRINT_FIELD:
printf("REC->%s", args->field.name);
break;
- case PRINT_FLAGS:
+ case TEP_PRINT_FLAGS:
printf("__print_flags(");
print_args(args->flags.field);
printf(", %s, ", args->flags.delim);
@@ -5757,7 +5759,7 @@ static void print_args(struct print_arg *args)
trace_seq_destroy(&s);
printf(")");
break;
- case PRINT_SYMBOL:
+ case TEP_PRINT_SYMBOL:
printf("__print_symbolic(");
print_args(args->symbol.field);
printf(", ");
@@ -5767,21 +5769,21 @@ static void print_args(struct print_arg *args)
trace_seq_destroy(&s);
printf(")");
break;
- case PRINT_HEX:
+ case TEP_PRINT_HEX:
printf("__print_hex(");
print_args(args->hex.field);
printf(", ");
print_args(args->hex.size);
printf(")");
break;
- case PRINT_HEX_STR:
+ case TEP_PRINT_HEX_STR:
printf("__print_hex_str(");
print_args(args->hex.field);
printf(", ");
print_args(args->hex.size);
printf(")");
break;
- case PRINT_INT_ARRAY:
+ case TEP_PRINT_INT_ARRAY:
printf("__print_array(");
print_args(args->int_array.field);
printf(", ");
@@ -5790,18 +5792,18 @@ static void print_args(struct print_arg *args)
print_args(args->int_array.el_size);
printf(")");
break;
- case PRINT_STRING:
- case PRINT_BSTRING:
+ case TEP_PRINT_STRING:
+ case TEP_PRINT_BSTRING:
printf("__get_str(%s)", args->string.string);
break;
- case PRINT_BITMASK:
+ case TEP_PRINT_BITMASK:
printf("__get_bitmask(%s)", args->bitmask.bitmask);
break;
- case PRINT_TYPE:
+ case TEP_PRINT_TYPE:
printf("(%s)", args->typecast.type);
print_args(args->typecast.item);
break;
- case PRINT_OP:
+ case TEP_PRINT_OP:
if (strcmp(args->op.op, ":") == 0)
print_paren = 0;
if (print_paren)
@@ -5833,13 +5835,13 @@ static void parse_header_field(const char *field,
save_input_buf_ptr = input_buf_ptr;
save_input_buf_siz = input_buf_siz;
- if (read_expected(EVENT_ITEM, "field") < 0)
+ if (read_expected(TEP_EVENT_ITEM, "field") < 0)
return;
- if (read_expected(EVENT_OP, ":") < 0)
+ if (read_expected(TEP_EVENT_OP, ":") < 0)
return;
/* type */
- if (read_expect_type(EVENT_ITEM, &token) < 0)
+ if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
goto fail;
free_token(token);
@@ -5847,42 +5849,42 @@ static void parse_header_field(const char *field,
* If this is not a mandatory field, then test it first.
*/
if (mandatory) {
- if (read_expected(EVENT_ITEM, field) < 0)
+ if (read_expected(TEP_EVENT_ITEM, field) < 0)
return;
} else {
- if (read_expect_type(EVENT_ITEM, &token) < 0)
+ if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
goto fail;
if (strcmp(token, field) != 0)
goto discard;
free_token(token);
}
- if (read_expected(EVENT_OP, ";") < 0)
+ if (read_expected(TEP_EVENT_OP, ";") < 0)
return;
- if (read_expected(EVENT_ITEM, "offset") < 0)
+ if (read_expected(TEP_EVENT_ITEM, "offset") < 0)
return;
- if (read_expected(EVENT_OP, ":") < 0)
+ if (read_expected(TEP_EVENT_OP, ":") < 0)
return;
- if (read_expect_type(EVENT_ITEM, &token) < 0)
+ if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
goto fail;
*offset = atoi(token);
free_token(token);
- if (read_expected(EVENT_OP, ";") < 0)
+ if (read_expected(TEP_EVENT_OP, ";") < 0)
return;
- if (read_expected(EVENT_ITEM, "size") < 0)
+ if (read_expected(TEP_EVENT_ITEM, "size") < 0)
return;
- if (read_expected(EVENT_OP, ":") < 0)
+ if (read_expected(TEP_EVENT_OP, ":") < 0)
return;
- if (read_expect_type(EVENT_ITEM, &token) < 0)
+ if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
goto fail;
*size = atoi(token);
free_token(token);
- if (read_expected(EVENT_OP, ";") < 0)
+ if (read_expected(TEP_EVENT_OP, ";") < 0)
return;
type = read_token(&token);
- if (type != EVENT_NEWLINE) {
+ if (type != TEP_EVENT_NEWLINE) {
/* newer versions of the kernel have a "signed" type */
- if (type != EVENT_ITEM)
+ if (type != TEP_EVENT_ITEM)
goto fail;
if (strcmp(token, "signed") != 0)
@@ -5890,17 +5892,17 @@ static void parse_header_field(const char *field,
free_token(token);
- if (read_expected(EVENT_OP, ":") < 0)
+ if (read_expected(TEP_EVENT_OP, ":") < 0)
return;
- if (read_expect_type(EVENT_ITEM, &token))
+ if (read_expect_type(TEP_EVENT_ITEM, &token))
goto fail;
free_token(token);
- if (read_expected(EVENT_OP, ";") < 0)
+ if (read_expected(TEP_EVENT_OP, ";") < 0)
return;
- if (read_expect_type(EVENT_NEWLINE, &token))
+ if (read_expect_type(TEP_EVENT_NEWLINE, &token))
goto fail;
}
fail:
@@ -5957,7 +5959,7 @@ int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long si
return 0;
}
-static int event_matches(struct event_format *event,
+static int event_matches(struct tep_event_format *event,
int id, const char *sys_name,
const char *event_name)
{
@@ -5980,7 +5982,7 @@ static void free_handler(struct event_handler *handle)
free(handle);
}
-static int find_event_handle(struct tep_handle *pevent, struct event_format *event)
+static int find_event_handle(struct tep_handle *pevent, struct tep_event_format *event)
{
struct event_handler *handle, **next;
@@ -6021,11 +6023,11 @@ static int find_event_handle(struct tep_handle *pevent, struct event_format *eve
*
* /sys/kernel/debug/tracing/events/.../.../format
*/
-enum tep_errno __tep_parse_format(struct event_format **eventp,
+enum tep_errno __tep_parse_format(struct tep_event_format **eventp,
struct tep_handle *pevent, const char *buf,
unsigned long size, const char *sys)
{
- struct event_format *event;
+ struct tep_event_format *event;
int ret;
init_input_buf(buf, size);
@@ -6042,10 +6044,10 @@ enum tep_errno __tep_parse_format(struct event_format **eventp,
}
if (strcmp(sys, "ftrace") == 0) {
- event->flags |= EVENT_FL_ISFTRACE;
+ event->flags |= TEP_EVENT_FL_ISFTRACE;
if (strcmp(event->name, "bprint") == 0)
- event->flags |= EVENT_FL_ISBPRINT;
+ event->flags |= TEP_EVENT_FL_ISBPRINT;
}
event->id = event_read_id();
@@ -6088,22 +6090,22 @@ enum tep_errno __tep_parse_format(struct event_format **eventp,
goto event_parse_failed;
}
- if (!ret && (event->flags & EVENT_FL_ISFTRACE)) {
- struct format_field *field;
- struct print_arg *arg, **list;
+ if (!ret && (event->flags & TEP_EVENT_FL_ISFTRACE)) {
+ struct tep_format_field *field;
+ struct tep_print_arg *arg, **list;
/* old ftrace had no args */
list = &event->print_fmt.args;
for (field = event->format.fields; field; field = field->next) {
arg = alloc_arg();
if (!arg) {
- event->flags |= EVENT_FL_FAILED;
+ event->flags |= TEP_EVENT_FL_FAILED;
return TEP_ERRNO__OLD_FTRACE_ARG_FAILED;
}
- arg->type = PRINT_FIELD;
+ arg->type = TEP_PRINT_FIELD;
arg->field.name = strdup(field->name);
if (!arg->field.name) {
- event->flags |= EVENT_FL_FAILED;
+ event->flags |= TEP_EVENT_FL_FAILED;
free_arg(arg);
return TEP_ERRNO__OLD_FTRACE_ARG_FAILED;
}
@@ -6117,7 +6119,7 @@ enum tep_errno __tep_parse_format(struct event_format **eventp,
return 0;
event_parse_failed:
- event->flags |= EVENT_FL_FAILED;
+ event->flags |= TEP_EVENT_FL_FAILED;
return ret;
event_alloc_failed:
@@ -6130,12 +6132,12 @@ enum tep_errno __tep_parse_format(struct event_format **eventp,
static enum tep_errno
__parse_event(struct tep_handle *pevent,
- struct event_format **eventp,
+ struct tep_event_format **eventp,
const char *buf, unsigned long size,
const char *sys)
{
int ret = __tep_parse_format(eventp, pevent, buf, size, sys);
- struct event_format *event = *eventp;
+ struct tep_event_format *event = *eventp;
if (event == NULL)
return ret;
@@ -6172,7 +6174,7 @@ event_add_failed:
* /sys/kernel/debug/tracing/events/.../.../format
*/
enum tep_errno tep_parse_format(struct tep_handle *pevent,
- struct event_format **eventp,
+ struct tep_event_format **eventp,
const char *buf,
unsigned long size, const char *sys)
{
@@ -6196,40 +6198,11 @@ enum tep_errno tep_parse_format(struct tep_handle *pevent,
enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
unsigned long size, const char *sys)
{
- struct event_format *event = NULL;
+ struct tep_event_format *event = NULL;
return __parse_event(pevent, &event, buf, size, sys);
}
-#undef _PE
-#define _PE(code, str) str
-static const char * const tep_error_str[] = {
- TEP_ERRORS
-};
-#undef _PE
-
-int tep_strerror(struct tep_handle *pevent __maybe_unused,
- enum tep_errno errnum, char *buf, size_t buflen)
-{
- int idx;
- const char *msg;
-
- if (errnum >= 0) {
- str_error_r(errnum, buf, buflen);
- return 0;
- }
-
- if (errnum <= __TEP_ERRNO__START ||
- errnum >= __TEP_ERRNO__END)
- return -1;
-
- idx = errnum - __TEP_ERRNO__START - 1;
- msg = tep_error_str[idx];
- snprintf(buf, buflen, "%s", msg);
-
- return 0;
-}
-
-int get_field_val(struct trace_seq *s, struct format_field *field,
+int get_field_val(struct trace_seq *s, struct tep_format_field *field,
const char *name, struct tep_record *record,
unsigned long long *val, int err)
{
@@ -6262,11 +6235,11 @@ int get_field_val(struct trace_seq *s, struct format_field *field,
*
* On failure, it returns NULL.
*/
-void *tep_get_field_raw(struct trace_seq *s, struct event_format *event,
+void *tep_get_field_raw(struct trace_seq *s, struct tep_event_format *event,
const char *name, struct tep_record *record,
int *len, int err)
{
- struct format_field *field;
+ struct tep_format_field *field;
void *data = record->data;
unsigned offset;
int dummy;
@@ -6287,7 +6260,7 @@ void *tep_get_field_raw(struct trace_seq *s, struct event_format *event,
len = &dummy;
offset = field->offset;
- if (field->flags & FIELD_IS_DYNAMIC) {
+ if (field->flags & TEP_FIELD_IS_DYNAMIC) {
offset = tep_read_number(event->pevent,
data + offset, field->size);
*len = offset >> 16;
@@ -6309,11 +6282,11 @@ void *tep_get_field_raw(struct trace_seq *s, struct event_format *event,
*
* Returns 0 on success -1 on field not found.
*/
-int tep_get_field_val(struct trace_seq *s, struct event_format *event,
+int tep_get_field_val(struct trace_seq *s, struct tep_event_format *event,
const char *name, struct tep_record *record,
unsigned long long *val, int err)
{
- struct format_field *field;
+ struct tep_format_field *field;
if (!event)
return -1;
@@ -6334,11 +6307,11 @@ int tep_get_field_val(struct trace_seq *s, struct event_format *event,
*
* Returns 0 on success -1 on field not found.
*/
-int tep_get_common_field_val(struct trace_seq *s, struct event_format *event,
+int tep_get_common_field_val(struct trace_seq *s, struct tep_event_format *event,
const char *name, struct tep_record *record,
unsigned long long *val, int err)
{
- struct format_field *field;
+ struct tep_format_field *field;
if (!event)
return -1;
@@ -6359,11 +6332,11 @@ int tep_get_common_field_val(struct trace_seq *s, struct event_format *event,
*
* Returns 0 on success -1 on field not found.
*/
-int tep_get_any_field_val(struct trace_seq *s, struct event_format *event,
+int tep_get_any_field_val(struct trace_seq *s, struct tep_event_format *event,
const char *name, struct tep_record *record,
unsigned long long *val, int err)
{
- struct format_field *field;
+ struct tep_format_field *field;
if (!event)
return -1;
@@ -6385,10 +6358,10 @@ int tep_get_any_field_val(struct trace_seq *s, struct event_format *event,
* Returns: 0 on success, -1 field not found, or 1 if buffer is full.
*/
int tep_print_num_field(struct trace_seq *s, const char *fmt,
- struct event_format *event, const char *name,
+ struct tep_event_format *event, const char *name,
struct tep_record *record, int err)
{
- struct format_field *field = tep_find_field(event, name);
+ struct tep_format_field *field = tep_find_field(event, name);
unsigned long long val;
if (!field)
@@ -6417,10 +6390,10 @@ int tep_print_num_field(struct trace_seq *s, const char *fmt,
* Returns: 0 on success, -1 field not found, or 1 if buffer is full.
*/
int tep_print_func_field(struct trace_seq *s, const char *fmt,
- struct event_format *event, const char *name,
+ struct tep_event_format *event, const char *name,
struct tep_record *record, int err)
{
- struct format_field *field = tep_find_field(event, name);
+ struct tep_format_field *field = tep_find_field(event, name);
struct tep_handle *pevent = event->pevent;
unsigned long long val;
struct func_map *func;
@@ -6577,11 +6550,11 @@ int tep_unregister_print_function(struct tep_handle *pevent,
return -1;
}
-static struct event_format *search_event(struct tep_handle *pevent, int id,
+static struct tep_event_format *search_event(struct tep_handle *pevent, int id,
const char *sys_name,
const char *event_name)
{
- struct event_format *event;
+ struct tep_event_format *event;
if (id >= 0) {
/* search by id */
@@ -6621,7 +6594,7 @@ int tep_register_event_handler(struct tep_handle *pevent, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context)
{
- struct event_format *event;
+ struct tep_event_format *event;
struct event_handler *handle;
event = search_event(pevent, id, sys_name, event_name);
@@ -6705,7 +6678,7 @@ int tep_unregister_event_handler(struct tep_handle *pevent, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context)
{
- struct event_format *event;
+ struct tep_event_format *event;
struct event_handler *handle;
struct event_handler **next;
@@ -6757,7 +6730,7 @@ void tep_ref(struct tep_handle *pevent)
pevent->ref_count++;
}
-void tep_free_format_field(struct format_field *field)
+void tep_free_format_field(struct tep_format_field *field)
{
free(field->type);
if (field->alias != field->name)
@@ -6766,9 +6739,9 @@ void tep_free_format_field(struct format_field *field)
free(field);
}
-static void free_format_fields(struct format_field *field)
+static void free_format_fields(struct tep_format_field *field)
{
- struct format_field *next;
+ struct tep_format_field *next;
while (field) {
next = field->next;
@@ -6777,13 +6750,13 @@ static void free_format_fields(struct format_field *field)
}
}
-static void free_formats(struct format *format)
+static void free_formats(struct tep_format *format)
{
free_format_fields(format->common_fields);
free_format_fields(format->fields);
}
-void tep_free_format(struct event_format *event)
+void tep_free_format(struct tep_event_format *event)
{
free(event->name);
free(event->system);
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 44b7c2d41f9f..16bf4c890b6f 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -26,17 +26,12 @@
#include <regex.h>
#include <string.h>
+#include "trace-seq.h"
+
#ifndef __maybe_unused
#define __maybe_unused __attribute__((unused))
#endif
-/* ----------------------- trace_seq ----------------------- */
-
-
-#ifndef TRACE_SEQ_BUF_SIZE
-#define TRACE_SEQ_BUF_SIZE 4096
-#endif
-
#ifndef DEBUG_RECORD
#define DEBUG_RECORD 0
#endif
@@ -59,51 +54,14 @@ struct tep_record {
#endif
};
-enum trace_seq_fail {
- TRACE_SEQ__GOOD,
- TRACE_SEQ__BUFFER_POISONED,
- TRACE_SEQ__MEM_ALLOC_FAILED,
-};
-
-/*
- * Trace sequences are used to allow a function to call several other functions
- * to create a string of data to use (up to a max of PAGE_SIZE).
- */
-
-struct trace_seq {
- char *buffer;
- unsigned int buffer_size;
- unsigned int len;
- unsigned int readpos;
- enum trace_seq_fail state;
-};
-
-void trace_seq_init(struct trace_seq *s);
-void trace_seq_reset(struct trace_seq *s);
-void trace_seq_destroy(struct trace_seq *s);
-
-extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
- __attribute__ ((format (printf, 2, 3)));
-extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
- __attribute__ ((format (printf, 2, 0)));
-
-extern int trace_seq_puts(struct trace_seq *s, const char *str);
-extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
-
-extern void trace_seq_terminate(struct trace_seq *s);
-
-extern int trace_seq_do_fprintf(struct trace_seq *s, FILE *fp);
-extern int trace_seq_do_printf(struct trace_seq *s);
-
-
-/* ----------------------- pevent ----------------------- */
+/* ----------------------- tep ----------------------- */
struct tep_handle;
-struct event_format;
+struct tep_event_format;
typedef int (*tep_event_handler_func)(struct trace_seq *s,
struct tep_record *record,
- struct event_format *event,
+ struct tep_event_format *event,
void *context);
typedef int (*tep_plugin_load_func)(struct tep_handle *pevent);
@@ -172,20 +130,20 @@ struct tep_plugin_option {
#define TEP_PLUGIN_OPTIONS_NAME MAKE_STR(TEP_PLUGIN_OPTIONS)
#define TEP_PLUGIN_ALIAS_NAME MAKE_STR(TEP_PLUGIN_ALIAS)
-enum format_flags {
- FIELD_IS_ARRAY = 1,
- FIELD_IS_POINTER = 2,
- FIELD_IS_SIGNED = 4,
- FIELD_IS_STRING = 8,
- FIELD_IS_DYNAMIC = 16,
- FIELD_IS_LONG = 32,
- FIELD_IS_FLAG = 64,
- FIELD_IS_SYMBOLIC = 128,
+enum tep_format_flags {
+ TEP_FIELD_IS_ARRAY = 1,
+ TEP_FIELD_IS_POINTER = 2,
+ TEP_FIELD_IS_SIGNED = 4,
+ TEP_FIELD_IS_STRING = 8,
+ TEP_FIELD_IS_DYNAMIC = 16,
+ TEP_FIELD_IS_LONG = 32,
+ TEP_FIELD_IS_FLAG = 64,
+ TEP_FIELD_IS_SYMBOLIC = 128,
};
-struct format_field {
- struct format_field *next;
- struct event_format *event;
+struct tep_format_field {
+ struct tep_format_field *next;
+ struct tep_event_format *event;
char *type;
char *name;
char *alias;
@@ -196,169 +154,169 @@ struct format_field {
unsigned long flags;
};
-struct format {
+struct tep_format {
int nr_common;
int nr_fields;
- struct format_field *common_fields;
- struct format_field *fields;
+ struct tep_format_field *common_fields;
+ struct tep_format_field *fields;
};
-struct print_arg_atom {
+struct tep_print_arg_atom {
char *atom;
};
-struct print_arg_string {
+struct tep_print_arg_string {
char *string;
int offset;
};
-struct print_arg_bitmask {
+struct tep_print_arg_bitmask {
char *bitmask;
int offset;
};
-struct print_arg_field {
+struct tep_print_arg_field {
char *name;
- struct format_field *field;
+ struct tep_format_field *field;
};
-struct print_flag_sym {
- struct print_flag_sym *next;
- char *value;
- char *str;
+struct tep_print_flag_sym {
+ struct tep_print_flag_sym *next;
+ char *value;
+ char *str;
};
-struct print_arg_typecast {
+struct tep_print_arg_typecast {
char *type;
- struct print_arg *item;
+ struct tep_print_arg *item;
};
-struct print_arg_flags {
- struct print_arg *field;
- char *delim;
- struct print_flag_sym *flags;
+struct tep_print_arg_flags {
+ struct tep_print_arg *field;
+ char *delim;
+ struct tep_print_flag_sym *flags;
};
-struct print_arg_symbol {
- struct print_arg *field;
- struct print_flag_sym *symbols;
+struct tep_print_arg_symbol {
+ struct tep_print_arg *field;
+ struct tep_print_flag_sym *symbols;
};
-struct print_arg_hex {
- struct print_arg *field;
- struct print_arg *size;
+struct tep_print_arg_hex {
+ struct tep_print_arg *field;
+ struct tep_print_arg *size;
};
-struct print_arg_int_array {
- struct print_arg *field;
- struct print_arg *count;
- struct print_arg *el_size;
+struct tep_print_arg_int_array {
+ struct tep_print_arg *field;
+ struct tep_print_arg *count;
+ struct tep_print_arg *el_size;
};
-struct print_arg_dynarray {
- struct format_field *field;
- struct print_arg *index;
+struct tep_print_arg_dynarray {
+ struct tep_format_field *field;
+ struct tep_print_arg *index;
};
-struct print_arg;
+struct tep_print_arg;
-struct print_arg_op {
+struct tep_print_arg_op {
char *op;
int prio;
- struct print_arg *left;
- struct print_arg *right;
+ struct tep_print_arg *left;
+ struct tep_print_arg *right;
};
struct tep_function_handler;
-struct print_arg_func {
+struct tep_print_arg_func {
struct tep_function_handler *func;
- struct print_arg *args;
-};
-
-enum print_arg_type {
- PRINT_NULL,
- PRINT_ATOM,
- PRINT_FIELD,
- PRINT_FLAGS,
- PRINT_SYMBOL,
- PRINT_HEX,
- PRINT_INT_ARRAY,
- PRINT_TYPE,
- PRINT_STRING,
- PRINT_BSTRING,
- PRINT_DYNAMIC_ARRAY,
- PRINT_OP,
- PRINT_FUNC,
- PRINT_BITMASK,
- PRINT_DYNAMIC_ARRAY_LEN,
- PRINT_HEX_STR,
-};
-
-struct print_arg {
- struct print_arg *next;
- enum print_arg_type type;
+ struct tep_print_arg *args;
+};
+
+enum tep_print_arg_type {
+ TEP_PRINT_NULL,
+ TEP_PRINT_ATOM,
+ TEP_PRINT_FIELD,
+ TEP_PRINT_FLAGS,
+ TEP_PRINT_SYMBOL,
+ TEP_PRINT_HEX,
+ TEP_PRINT_INT_ARRAY,
+ TEP_PRINT_TYPE,
+ TEP_PRINT_STRING,
+ TEP_PRINT_BSTRING,
+ TEP_PRINT_DYNAMIC_ARRAY,
+ TEP_PRINT_OP,
+ TEP_PRINT_FUNC,
+ TEP_PRINT_BITMASK,
+ TEP_PRINT_DYNAMIC_ARRAY_LEN,
+ TEP_PRINT_HEX_STR,
+};
+
+struct tep_print_arg {
+ struct tep_print_arg *next;
+ enum tep_print_arg_type type;
union {
- struct print_arg_atom atom;
- struct print_arg_field field;
- struct print_arg_typecast typecast;
- struct print_arg_flags flags;
- struct print_arg_symbol symbol;
- struct print_arg_hex hex;
- struct print_arg_int_array int_array;
- struct print_arg_func func;
- struct print_arg_string string;
- struct print_arg_bitmask bitmask;
- struct print_arg_op op;
- struct print_arg_dynarray dynarray;
+ struct tep_print_arg_atom atom;
+ struct tep_print_arg_field field;
+ struct tep_print_arg_typecast typecast;
+ struct tep_print_arg_flags flags;
+ struct tep_print_arg_symbol symbol;
+ struct tep_print_arg_hex hex;
+ struct tep_print_arg_int_array int_array;
+ struct tep_print_arg_func func;
+ struct tep_print_arg_string string;
+ struct tep_print_arg_bitmask bitmask;
+ struct tep_print_arg_op op;
+ struct tep_print_arg_dynarray dynarray;
};
};
-struct print_fmt {
+struct tep_print_fmt {
char *format;
- struct print_arg *args;
+ struct tep_print_arg *args;
};
-struct event_format {
+struct tep_event_format {
struct tep_handle *pevent;
char *name;
int id;
int flags;
- struct format format;
- struct print_fmt print_fmt;
+ struct tep_format format;
+ struct tep_print_fmt print_fmt;
char *system;
tep_event_handler_func handler;
void *context;
};
enum {
- EVENT_FL_ISFTRACE = 0x01,
- EVENT_FL_ISPRINT = 0x02,
- EVENT_FL_ISBPRINT = 0x04,
- EVENT_FL_ISFUNCENT = 0x10,
- EVENT_FL_ISFUNCRET = 0x20,
- EVENT_FL_NOHANDLE = 0x40,
- EVENT_FL_PRINTRAW = 0x80,
+ TEP_EVENT_FL_ISFTRACE = 0x01,
+ TEP_EVENT_FL_ISPRINT = 0x02,
+ TEP_EVENT_FL_ISBPRINT = 0x04,
+ TEP_EVENT_FL_ISFUNCENT = 0x10,
+ TEP_EVENT_FL_ISFUNCRET = 0x20,
+ TEP_EVENT_FL_NOHANDLE = 0x40,
+ TEP_EVENT_FL_PRINTRAW = 0x80,
- EVENT_FL_FAILED = 0x80000000
+ TEP_EVENT_FL_FAILED = 0x80000000
};
-enum event_sort_type {
- EVENT_SORT_ID,
- EVENT_SORT_NAME,
- EVENT_SORT_SYSTEM,
+enum tep_event_sort_type {
+ TEP_EVENT_SORT_ID,
+ TEP_EVENT_SORT_NAME,
+ TEP_EVENT_SORT_SYSTEM,
};
-enum event_type {
- EVENT_ERROR,
- EVENT_NONE,
- EVENT_SPACE,
- EVENT_NEWLINE,
- EVENT_OP,
- EVENT_DELIM,
- EVENT_ITEM,
- EVENT_DQUOTE,
- EVENT_SQUOTE,
+enum tep_event_type {
+ TEP_EVENT_ERROR,
+ TEP_EVENT_NONE,
+ TEP_EVENT_SPACE,
+ TEP_EVENT_NEWLINE,
+ TEP_EVENT_OP,
+ TEP_EVENT_DELIM,
+ TEP_EVENT_ITEM,
+ TEP_EVENT_DQUOTE,
+ TEP_EVENT_SQUOTE,
};
typedef unsigned long long (*tep_func_handler)(struct trace_seq *s,
@@ -431,12 +389,12 @@ enum tep_errno {
};
#undef _PE
-struct plugin_list;
+struct tep_plugin_list;
#define INVALID_PLUGIN_LIST_OPTION ((char **)((unsigned long)-1))
-struct plugin_list *tep_load_plugins(struct tep_handle *pevent);
-void tep_unload_plugins(struct plugin_list *plugin_list,
+struct tep_plugin_list *tep_load_plugins(struct tep_handle *pevent);
+void tep_unload_plugins(struct tep_plugin_list *plugin_list,
struct tep_handle *pevent);
char **tep_plugin_list_options(void);
void tep_plugin_free_options_list(char **list);
@@ -445,156 +403,25 @@ int tep_plugin_add_options(const char *name,
void tep_plugin_remove_options(struct tep_plugin_option *options);
void tep_print_plugins(struct trace_seq *s,
const char *prefix, const char *suffix,
- const struct plugin_list *list);
-
-struct cmdline;
-struct cmdline_list;
-struct func_map;
-struct func_list;
-struct event_handler;
-struct func_resolver;
+ const struct tep_plugin_list *list);
+/* tep_handle */
typedef char *(tep_func_resolver_t)(void *priv,
unsigned long long *addrp, char **modp);
+void tep_set_flag(struct tep_handle *tep, int flag);
+unsigned short __tep_data2host2(struct tep_handle *pevent, unsigned short data);
+unsigned int __tep_data2host4(struct tep_handle *pevent, unsigned int data);
+unsigned long long
+__tep_data2host8(struct tep_handle *pevent, unsigned long long data);
-struct tep_handle {
- int ref_count;
-
- int header_page_ts_offset;
- int header_page_ts_size;
- int header_page_size_offset;
- int header_page_size_size;
- int header_page_data_offset;
- int header_page_data_size;
- int header_page_overwrite;
-
- int file_bigendian;
- int host_bigendian;
-
- int latency_format;
-
- int old_format;
-
- int cpus;
- int long_size;
- int page_size;
-
- struct cmdline *cmdlines;
- struct cmdline_list *cmdlist;
- int cmdline_count;
-
- struct func_map *func_map;
- struct func_resolver *func_resolver;
- struct func_list *funclist;
- unsigned int func_count;
-
- struct printk_map *printk_map;
- struct printk_list *printklist;
- unsigned int printk_count;
-
-
- struct event_format **events;
- int nr_events;
- struct event_format **sort_events;
- enum event_sort_type last_type;
-
- int type_offset;
- int type_size;
-
- int pid_offset;
- int pid_size;
-
- int pc_offset;
- int pc_size;
-
- int flags_offset;
- int flags_size;
-
- int ld_offset;
- int ld_size;
-
- int print_raw;
-
- int test_filters;
-
- int flags;
-
- struct format_field *bprint_ip_field;
- struct format_field *bprint_fmt_field;
- struct format_field *bprint_buf_field;
-
- struct event_handler *handlers;
- struct tep_function_handler *func_handlers;
-
- /* cache */
- struct event_format *last_event;
-
- char *trace_clock;
-};
-
-static inline void tep_set_flag(struct tep_handle *pevent, int flag)
-{
- pevent->flags |= flag;
-}
-
-static inline unsigned short
-__data2host2(struct tep_handle *pevent, unsigned short data)
-{
- unsigned short swap;
-
- if (pevent->host_bigendian == pevent->file_bigendian)
- return data;
-
- swap = ((data & 0xffULL) << 8) |
- ((data & (0xffULL << 8)) >> 8);
-
- return swap;
-}
-
-static inline unsigned int
-__data2host4(struct tep_handle *pevent, unsigned int data)
-{
- unsigned int swap;
-
- if (pevent->host_bigendian == pevent->file_bigendian)
- return data;
-
- swap = ((data & 0xffULL) << 24) |
- ((data & (0xffULL << 8)) << 8) |
- ((data & (0xffULL << 16)) >> 8) |
- ((data & (0xffULL << 24)) >> 24);
-
- return swap;
-}
-
-static inline unsigned long long
-__data2host8(struct tep_handle *pevent, unsigned long long data)
-{
- unsigned long long swap;
-
- if (pevent->host_bigendian == pevent->file_bigendian)
- return data;
-
- swap = ((data & 0xffULL) << 56) |
- ((data & (0xffULL << 8)) << 40) |
- ((data & (0xffULL << 16)) << 24) |
- ((data & (0xffULL << 24)) << 8) |
- ((data & (0xffULL << 32)) >> 8) |
- ((data & (0xffULL << 40)) >> 24) |
- ((data & (0xffULL << 48)) >> 40) |
- ((data & (0xffULL << 56)) >> 56);
-
- return swap;
-}
-
-#define data2host2(pevent, ptr) __data2host2(pevent, *(unsigned short *)(ptr))
-#define data2host4(pevent, ptr) __data2host4(pevent, *(unsigned int *)(ptr))
-#define data2host8(pevent, ptr) \
+#define tep_data2host2(pevent, ptr) __tep_data2host2(pevent, *(unsigned short *)(ptr))
+#define tep_data2host4(pevent, ptr) __tep_data2host4(pevent, *(unsigned int *)(ptr))
+#define tep_data2host8(pevent, ptr) \
({ \
unsigned long long __val; \
\
memcpy(&__val, (ptr), sizeof(unsigned long long)); \
- __data2host8(pevent, __val); \
+ __tep_data2host8(pevent, __val); \
})
static inline int tep_host_bigendian(void)
@@ -627,14 +454,14 @@ int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
int tep_pid_is_registered(struct tep_handle *pevent, int pid);
void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
- struct event_format *event,
+ struct tep_event_format *event,
struct tep_record *record);
void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
- struct event_format *event,
+ struct tep_event_format *event,
struct tep_record *record,
bool use_trace_clock);
void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
- struct event_format *event,
+ struct tep_event_format *event,
struct tep_record *record);
void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
struct tep_record *record, bool use_trace_clock);
@@ -645,32 +472,32 @@ int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long si
enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
unsigned long size, const char *sys);
enum tep_errno tep_parse_format(struct tep_handle *pevent,
- struct event_format **eventp,
+ struct tep_event_format **eventp,
const char *buf,
unsigned long size, const char *sys);
-void tep_free_format(struct event_format *event);
-void tep_free_format_field(struct format_field *field);
+void tep_free_format(struct tep_event_format *event);
+void tep_free_format_field(struct tep_format_field *field);
-void *tep_get_field_raw(struct trace_seq *s, struct event_format *event,
+void *tep_get_field_raw(struct trace_seq *s, struct tep_event_format *event,
const char *name, struct tep_record *record,
int *len, int err);
-int tep_get_field_val(struct trace_seq *s, struct event_format *event,
+int tep_get_field_val(struct trace_seq *s, struct tep_event_format *event,
const char *name, struct tep_record *record,
unsigned long long *val, int err);
-int tep_get_common_field_val(struct trace_seq *s, struct event_format *event,
+int tep_get_common_field_val(struct trace_seq *s, struct tep_event_format *event,
const char *name, struct tep_record *record,
unsigned long long *val, int err);
-int tep_get_any_field_val(struct trace_seq *s, struct event_format *event,
+int tep_get_any_field_val(struct trace_seq *s, struct tep_event_format *event,
const char *name, struct tep_record *record,
unsigned long long *val, int err);
int tep_print_num_field(struct trace_seq *s, const char *fmt,
- struct event_format *event, const char *name,
- struct tep_record *record, int err);
+ struct tep_event_format *event, const char *name,
+ struct tep_record *record, int err);
int tep_print_func_field(struct trace_seq *s, const char *fmt,
- struct event_format *event, const char *name,
+ struct tep_event_format *event, const char *name,
struct tep_record *record, int err);
int tep_register_event_handler(struct tep_handle *pevent, int id,
@@ -686,29 +513,30 @@ int tep_register_print_function(struct tep_handle *pevent,
int tep_unregister_print_function(struct tep_handle *pevent,
tep_func_handler func, char *name);
-struct format_field *tep_find_common_field(struct event_format *event, const char *name);
-struct format_field *tep_find_field(struct event_format *event, const char *name);
-struct format_field *tep_find_any_field(struct event_format *event, const char *name);
+struct tep_format_field *tep_find_common_field(struct tep_event_format *event, const char *name);
+struct tep_format_field *tep_find_field(struct tep_event_format *event, const char *name);
+struct tep_format_field *tep_find_any_field(struct tep_event_format *event, const char *name);
const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr);
unsigned long long
tep_find_function_address(struct tep_handle *pevent, unsigned long long addr);
unsigned long long tep_read_number(struct tep_handle *pevent, const void *ptr, int size);
-int tep_read_number_field(struct format_field *field, const void *data,
+int tep_read_number_field(struct tep_format_field *field, const void *data,
unsigned long long *value);
-struct event_format *tep_find_event(struct tep_handle *pevent, int id);
+struct tep_event_format *tep_get_first_event(struct tep_handle *tep);
+int tep_get_events_count(struct tep_handle *tep);
+struct tep_event_format *tep_find_event(struct tep_handle *pevent, int id);
-struct event_format *
+struct tep_event_format *
tep_find_event_by_name(struct tep_handle *pevent, const char *sys, const char *name);
-
-struct event_format *
+struct tep_event_format *
tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record);
void tep_data_lat_fmt(struct tep_handle *pevent,
struct trace_seq *s, struct tep_record *record);
int tep_data_type(struct tep_handle *pevent, struct tep_record *rec);
-struct event_format *tep_data_event_from_type(struct tep_handle *pevent, int type);
+struct tep_event_format *tep_data_event_from_type(struct tep_handle *pevent, int type);
int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec);
int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec);
int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec);
@@ -719,77 +547,35 @@ struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *co
int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline);
void tep_print_field(struct trace_seq *s, void *data,
- struct format_field *field);
+ struct tep_format_field *field);
void tep_print_fields(struct trace_seq *s, void *data,
- int size __maybe_unused, struct event_format *event);
-void tep_event_info(struct trace_seq *s, struct event_format *event,
+ int size __maybe_unused, struct tep_event_format *event);
+void tep_event_info(struct trace_seq *s, struct tep_event_format *event,
struct tep_record *record);
int tep_strerror(struct tep_handle *pevent, enum tep_errno errnum,
char *buf, size_t buflen);
-struct event_format **tep_list_events(struct tep_handle *pevent, enum event_sort_type);
-struct format_field **tep_event_common_fields(struct event_format *event);
-struct format_field **tep_event_fields(struct event_format *event);
-
-static inline int tep_get_cpus(struct tep_handle *pevent)
-{
- return pevent->cpus;
-}
-
-static inline void tep_set_cpus(struct tep_handle *pevent, int cpus)
-{
- pevent->cpus = cpus;
-}
-
-static inline int tep_get_long_size(struct tep_handle *pevent)
-{
- return pevent->long_size;
-}
-
-static inline void tep_set_long_size(struct tep_handle *pevent, int long_size)
-{
- pevent->long_size = long_size;
-}
-
-static inline int tep_get_page_size(struct tep_handle *pevent)
-{
- return pevent->page_size;
-}
-
-static inline void tep_set_page_size(struct tep_handle *pevent, int _page_size)
-{
- pevent->page_size = _page_size;
-}
-
-static inline int tep_is_file_bigendian(struct tep_handle *pevent)
-{
- return pevent->file_bigendian;
-}
-
-static inline void tep_set_file_bigendian(struct tep_handle *pevent, int endian)
-{
- pevent->file_bigendian = endian;
-}
-
-static inline int tep_is_host_bigendian(struct tep_handle *pevent)
-{
- return pevent->host_bigendian;
-}
-
-static inline void tep_set_host_bigendian(struct tep_handle *pevent, int endian)
-{
- pevent->host_bigendian = endian;
-}
-
-static inline int tep_is_latency_format(struct tep_handle *pevent)
-{
- return pevent->latency_format;
-}
-
-static inline void tep_set_latency_format(struct tep_handle *pevent, int lat)
-{
- pevent->latency_format = lat;
-}
+struct tep_event_format **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type);
+struct tep_format_field **tep_event_common_fields(struct tep_event_format *event);
+struct tep_format_field **tep_event_fields(struct tep_event_format *event);
+
+enum tep_endian {
+ TEP_LITTLE_ENDIAN = 0,
+ TEP_BIG_ENDIAN
+};
+int tep_get_cpus(struct tep_handle *pevent);
+void tep_set_cpus(struct tep_handle *pevent, int cpus);
+int tep_get_long_size(struct tep_handle *pevent);
+void tep_set_long_size(struct tep_handle *pevent, int long_size);
+int tep_get_page_size(struct tep_handle *pevent);
+void tep_set_page_size(struct tep_handle *pevent, int _page_size);
+int tep_is_file_bigendian(struct tep_handle *pevent);
+void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian);
+int tep_is_host_bigendian(struct tep_handle *pevent);
+void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian);
+int tep_is_latency_format(struct tep_handle *pevent);
+void tep_set_latency_format(struct tep_handle *pevent, int lat);
+int tep_get_header_page_size(struct tep_handle *pevent);
struct tep_handle *tep_alloc(void);
void tep_free(struct tep_handle *pevent);
@@ -798,7 +584,7 @@ void tep_unref(struct tep_handle *pevent);
/* access to the internal parser */
void tep_buffer_init(const char *buf, unsigned long long size);
-enum event_type tep_read_token(char **tok);
+enum tep_event_type tep_read_token(char **tok);
void tep_free_token(char *token);
int tep_peek_char(void);
const char *tep_get_input_buf(void);
@@ -810,136 +596,136 @@ void tep_print_printk(struct tep_handle *pevent);
/* ----------------------- filtering ----------------------- */
-enum filter_boolean_type {
- FILTER_FALSE,
- FILTER_TRUE,
+enum tep_filter_boolean_type {
+ TEP_FILTER_FALSE,
+ TEP_FILTER_TRUE,
};
-enum filter_op_type {
- FILTER_OP_AND = 1,
- FILTER_OP_OR,
- FILTER_OP_NOT,
+enum tep_filter_op_type {
+ TEP_FILTER_OP_AND = 1,
+ TEP_FILTER_OP_OR,
+ TEP_FILTER_OP_NOT,
};
-enum filter_cmp_type {
- FILTER_CMP_NONE,
- FILTER_CMP_EQ,
- FILTER_CMP_NE,
- FILTER_CMP_GT,
- FILTER_CMP_LT,
- FILTER_CMP_GE,
- FILTER_CMP_LE,
- FILTER_CMP_MATCH,
- FILTER_CMP_NOT_MATCH,
- FILTER_CMP_REGEX,
- FILTER_CMP_NOT_REGEX,
+enum tep_filter_cmp_type {
+ TEP_FILTER_CMP_NONE,
+ TEP_FILTER_CMP_EQ,
+ TEP_FILTER_CMP_NE,
+ TEP_FILTER_CMP_GT,
+ TEP_FILTER_CMP_LT,
+ TEP_FILTER_CMP_GE,
+ TEP_FILTER_CMP_LE,
+ TEP_FILTER_CMP_MATCH,
+ TEP_FILTER_CMP_NOT_MATCH,
+ TEP_FILTER_CMP_REGEX,
+ TEP_FILTER_CMP_NOT_REGEX,
};
-enum filter_exp_type {
- FILTER_EXP_NONE,
- FILTER_EXP_ADD,
- FILTER_EXP_SUB,
- FILTER_EXP_MUL,
- FILTER_EXP_DIV,
- FILTER_EXP_MOD,
- FILTER_EXP_RSHIFT,
- FILTER_EXP_LSHIFT,
- FILTER_EXP_AND,
- FILTER_EXP_OR,
- FILTER_EXP_XOR,
- FILTER_EXP_NOT,
+enum tep_filter_exp_type {
+ TEP_FILTER_EXP_NONE,
+ TEP_FILTER_EXP_ADD,
+ TEP_FILTER_EXP_SUB,
+ TEP_FILTER_EXP_MUL,
+ TEP_FILTER_EXP_DIV,
+ TEP_FILTER_EXP_MOD,
+ TEP_FILTER_EXP_RSHIFT,
+ TEP_FILTER_EXP_LSHIFT,
+ TEP_FILTER_EXP_AND,
+ TEP_FILTER_EXP_OR,
+ TEP_FILTER_EXP_XOR,
+ TEP_FILTER_EXP_NOT,
};
-enum filter_arg_type {
- FILTER_ARG_NONE,
- FILTER_ARG_BOOLEAN,
- FILTER_ARG_VALUE,
- FILTER_ARG_FIELD,
- FILTER_ARG_EXP,
- FILTER_ARG_OP,
- FILTER_ARG_NUM,
- FILTER_ARG_STR,
+enum tep_filter_arg_type {
+ TEP_FILTER_ARG_NONE,
+ TEP_FILTER_ARG_BOOLEAN,
+ TEP_FILTER_ARG_VALUE,
+ TEP_FILTER_ARG_FIELD,
+ TEP_FILTER_ARG_EXP,
+ TEP_FILTER_ARG_OP,
+ TEP_FILTER_ARG_NUM,
+ TEP_FILTER_ARG_STR,
};
-enum filter_value_type {
- FILTER_NUMBER,
- FILTER_STRING,
- FILTER_CHAR
+enum tep_filter_value_type {
+ TEP_FILTER_NUMBER,
+ TEP_FILTER_STRING,
+ TEP_FILTER_CHAR
};
-struct fliter_arg;
+struct tep_filter_arg;
-struct filter_arg_boolean {
- enum filter_boolean_type value;
+struct tep_filter_arg_boolean {
+ enum tep_filter_boolean_type value;
};
-struct filter_arg_field {
- struct format_field *field;
+struct tep_filter_arg_field {
+ struct tep_format_field *field;
};
-struct filter_arg_value {
- enum filter_value_type type;
+struct tep_filter_arg_value {
+ enum tep_filter_value_type type;
union {
char *str;
unsigned long long val;
};
};
-struct filter_arg_op {
- enum filter_op_type type;
- struct filter_arg *left;
- struct filter_arg *right;
+struct tep_filter_arg_op {
+ enum tep_filter_op_type type;
+ struct tep_filter_arg *left;
+ struct tep_filter_arg *right;
};
-struct filter_arg_exp {
- enum filter_exp_type type;
- struct filter_arg *left;
- struct filter_arg *right;
+struct tep_filter_arg_exp {
+ enum tep_filter_exp_type type;
+ struct tep_filter_arg *left;
+ struct tep_filter_arg *right;
};
-struct filter_arg_num {
- enum filter_cmp_type type;
- struct filter_arg *left;
- struct filter_arg *right;
+struct tep_filter_arg_num {
+ enum tep_filter_cmp_type type;
+ struct tep_filter_arg *left;
+ struct tep_filter_arg *right;
};
-struct filter_arg_str {
- enum filter_cmp_type type;
- struct format_field *field;
- char *val;
- char *buffer;
- regex_t reg;
+struct tep_filter_arg_str {
+ enum tep_filter_cmp_type type;
+ struct tep_format_field *field;
+ char *val;
+ char *buffer;
+ regex_t reg;
};
-struct filter_arg {
- enum filter_arg_type type;
+struct tep_filter_arg {
+ enum tep_filter_arg_type type;
union {
- struct filter_arg_boolean boolean;
- struct filter_arg_field field;
- struct filter_arg_value value;
- struct filter_arg_op op;
- struct filter_arg_exp exp;
- struct filter_arg_num num;
- struct filter_arg_str str;
+ struct tep_filter_arg_boolean boolean;
+ struct tep_filter_arg_field field;
+ struct tep_filter_arg_value value;
+ struct tep_filter_arg_op op;
+ struct tep_filter_arg_exp exp;
+ struct tep_filter_arg_num num;
+ struct tep_filter_arg_str str;
};
};
-struct filter_type {
+struct tep_filter_type {
int event_id;
- struct event_format *event;
- struct filter_arg *filter;
+ struct tep_event_format *event;
+ struct tep_filter_arg *filter;
};
#define TEP_FILTER_ERROR_BUFSZ 1024
-struct event_filter {
+struct tep_event_filter {
struct tep_handle *pevent;
int filters;
- struct filter_type *event_filters;
+ struct tep_filter_type *event_filters;
char error_buffer[TEP_FILTER_ERROR_BUFSZ];
};
-struct event_filter *tep_filter_alloc(struct tep_handle *pevent);
+struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent);
/* for backward compatibility */
#define FILTER_NONE TEP_ERRNO__NO_FILTER
@@ -947,45 +733,45 @@ struct event_filter *tep_filter_alloc(struct tep_handle *pevent);
#define FILTER_MISS TEP_ERRNO__FILTER_MISS
#define FILTER_MATCH TEP_ERRNO__FILTER_MATCH
-enum filter_trivial_type {
- FILTER_TRIVIAL_FALSE,
- FILTER_TRIVIAL_TRUE,
- FILTER_TRIVIAL_BOTH,
+enum tep_filter_trivial_type {
+ TEP_FILTER_TRIVIAL_FALSE,
+ TEP_FILTER_TRIVIAL_TRUE,
+ TEP_FILTER_TRIVIAL_BOTH,
};
-enum tep_errno tep_filter_add_filter_str(struct event_filter *filter,
+enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
const char *filter_str);
-enum tep_errno tep_filter_match(struct event_filter *filter,
+enum tep_errno tep_filter_match(struct tep_event_filter *filter,
struct tep_record *record);
-int tep_filter_strerror(struct event_filter *filter, enum tep_errno err,
+int tep_filter_strerror(struct tep_event_filter *filter, enum tep_errno err,
char *buf, size_t buflen);
-int tep_event_filtered(struct event_filter *filter,
+int tep_event_filtered(struct tep_event_filter *filter,
int event_id);
-void tep_filter_reset(struct event_filter *filter);
+void tep_filter_reset(struct tep_event_filter *filter);
-int tep_filter_clear_trivial(struct event_filter *filter,
- enum filter_trivial_type type);
+int tep_filter_clear_trivial(struct tep_event_filter *filter,
+ enum tep_filter_trivial_type type);
-void tep_filter_free(struct event_filter *filter);
+void tep_filter_free(struct tep_event_filter *filter);
-char *tep_filter_make_string(struct event_filter *filter, int event_id);
+char *tep_filter_make_string(struct tep_event_filter *filter, int event_id);
-int tep_filter_remove_event(struct event_filter *filter,
+int tep_filter_remove_event(struct tep_event_filter *filter,
int event_id);
-int tep_filter_event_has_trivial(struct event_filter *filter,
+int tep_filter_event_has_trivial(struct tep_event_filter *filter,
int event_id,
- enum filter_trivial_type type);
+ enum tep_filter_trivial_type type);
-int tep_filter_copy(struct event_filter *dest, struct event_filter *source);
+int tep_filter_copy(struct tep_event_filter *dest, struct tep_event_filter *source);
-int tep_update_trivial(struct event_filter *dest, struct event_filter *source,
- enum filter_trivial_type type);
+int tep_update_trivial(struct tep_event_filter *dest, struct tep_event_filter *source,
+ enum tep_filter_trivial_type type);
-int tep_filter_compare(struct event_filter *filter1, struct event_filter *filter2);
+int tep_filter_compare(struct tep_event_filter *filter1, struct tep_event_filter *filter2);
#endif /* _PARSE_EVENTS_H */
diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
index f17e25097e1e..e74f16c88398 100644
--- a/tools/lib/traceevent/event-plugin.c
+++ b/tools/lib/traceevent/event-plugin.c
@@ -14,7 +14,9 @@
#include <unistd.h>
#include <dirent.h>
#include "event-parse.h"
+#include "event-parse-local.h"
#include "event-utils.h"
+#include "trace-seq.h"
#define LOCAL_PLUGIN_DIR ".traceevent/plugins"
@@ -30,8 +32,8 @@ static struct trace_plugin_options {
char *value;
} *trace_plugin_options;
-struct plugin_list {
- struct plugin_list *next;
+struct tep_plugin_list {
+ struct tep_plugin_list *next;
char *name;
void *handle;
};
@@ -258,7 +260,7 @@ void tep_plugin_remove_options(struct tep_plugin_option *options)
*/
void tep_print_plugins(struct trace_seq *s,
const char *prefix, const char *suffix,
- const struct plugin_list *list)
+ const struct tep_plugin_list *list)
{
while (list) {
trace_seq_printf(s, "%s%s%s", prefix, list->name, suffix);
@@ -270,9 +272,9 @@ static void
load_plugin(struct tep_handle *pevent, const char *path,
const char *file, void *data)
{
- struct plugin_list **plugin_list = data;
+ struct tep_plugin_list **plugin_list = data;
tep_plugin_load_func func;
- struct plugin_list *list;
+ struct tep_plugin_list *list;
const char *alias;
char *plugin;
void *handle;
@@ -416,20 +418,20 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
free(path);
}
-struct plugin_list*
+struct tep_plugin_list*
tep_load_plugins(struct tep_handle *pevent)
{
- struct plugin_list *list = NULL;
+ struct tep_plugin_list *list = NULL;
load_plugins(pevent, ".so", load_plugin, &list);
return list;
}
void
-tep_unload_plugins(struct plugin_list *plugin_list, struct tep_handle *pevent)
+tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *pevent)
{
tep_plugin_unload_func func;
- struct plugin_list *list;
+ struct tep_plugin_list *list;
while (plugin_list) {
list = plugin_list;
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index e76154c02ee7..ed87cb56713d 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -11,22 +11,23 @@
#include <sys/types.h>
#include "event-parse.h"
+#include "event-parse-local.h"
#include "event-utils.h"
#define COMM "COMM"
#define CPU "CPU"
-static struct format_field comm = {
+static struct tep_format_field comm = {
.name = "COMM",
};
-static struct format_field cpu = {
+static struct tep_format_field cpu = {
.name = "CPU",
};
struct event_list {
struct event_list *next;
- struct event_format *event;
+ struct tep_event_format *event;
};
static void show_error(char *error_buf, const char *fmt, ...)
@@ -61,15 +62,15 @@ static void free_token(char *token)
tep_free_token(token);
}
-static enum event_type read_token(char **tok)
+static enum tep_event_type read_token(char **tok)
{
- enum event_type type;
+ enum tep_event_type type;
char *token = NULL;
do {
free_token(token);
type = tep_read_token(&token);
- } while (type == EVENT_NEWLINE || type == EVENT_SPACE);
+ } while (type == TEP_EVENT_NEWLINE || type == TEP_EVENT_SPACE);
/* If token is = or ! check to see if the next char is ~ */
if (token &&
@@ -79,7 +80,7 @@ static enum event_type read_token(char **tok)
*tok = malloc(3);
if (*tok == NULL) {
free_token(token);
- return EVENT_ERROR;
+ return TEP_EVENT_ERROR;
}
sprintf(*tok, "%c%c", *token, '~');
free_token(token);
@@ -94,8 +95,8 @@ static enum event_type read_token(char **tok)
static int filter_cmp(const void *a, const void *b)
{
- const struct filter_type *ea = a;
- const struct filter_type *eb = b;
+ const struct tep_filter_type *ea = a;
+ const struct tep_filter_type *eb = b;
if (ea->event_id < eb->event_id)
return -1;
@@ -106,11 +107,11 @@ static int filter_cmp(const void *a, const void *b)
return 0;
}
-static struct filter_type *
-find_filter_type(struct event_filter *filter, int id)
+static struct tep_filter_type *
+find_filter_type(struct tep_event_filter *filter, int id)
{
- struct filter_type *filter_type;
- struct filter_type key;
+ struct tep_filter_type *filter_type;
+ struct tep_filter_type key;
key.event_id = id;
@@ -122,10 +123,10 @@ find_filter_type(struct event_filter *filter, int id)
return filter_type;
}
-static struct filter_type *
-add_filter_type(struct event_filter *filter, int id)
+static struct tep_filter_type *
+add_filter_type(struct tep_event_filter *filter, int id)
{
- struct filter_type *filter_type;
+ struct tep_filter_type *filter_type;
int i;
filter_type = find_filter_type(filter, id);
@@ -165,9 +166,9 @@ add_filter_type(struct event_filter *filter, int id)
* tep_filter_alloc - create a new event filter
* @pevent: The pevent that this filter is associated with
*/
-struct event_filter *tep_filter_alloc(struct tep_handle *pevent)
+struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent)
{
- struct event_filter *filter;
+ struct tep_event_filter *filter;
filter = malloc(sizeof(*filter));
if (filter == NULL)
@@ -180,44 +181,44 @@ struct event_filter *tep_filter_alloc(struct tep_handle *pevent)
return filter;
}
-static struct filter_arg *allocate_arg(void)
+static struct tep_filter_arg *allocate_arg(void)
{
- return calloc(1, sizeof(struct filter_arg));
+ return calloc(1, sizeof(struct tep_filter_arg));
}
-static void free_arg(struct filter_arg *arg)
+static void free_arg(struct tep_filter_arg *arg)
{
if (!arg)
return;
switch (arg->type) {
- case FILTER_ARG_NONE:
- case FILTER_ARG_BOOLEAN:
+ case TEP_FILTER_ARG_NONE:
+ case TEP_FILTER_ARG_BOOLEAN:
break;
- case FILTER_ARG_NUM:
+ case TEP_FILTER_ARG_NUM:
free_arg(arg->num.left);
free_arg(arg->num.right);
break;
- case FILTER_ARG_EXP:
+ case TEP_FILTER_ARG_EXP:
free_arg(arg->exp.left);
free_arg(arg->exp.right);
break;
- case FILTER_ARG_STR:
+ case TEP_FILTER_ARG_STR:
free(arg->str.val);
regfree(&arg->str.reg);
free(arg->str.buffer);
break;
- case FILTER_ARG_VALUE:
- if (arg->value.type == FILTER_STRING ||
- arg->value.type == FILTER_CHAR)
+ case TEP_FILTER_ARG_VALUE:
+ if (arg->value.type == TEP_FILTER_STRING ||
+ arg->value.type == TEP_FILTER_CHAR)
free(arg->value.str);
break;
- case FILTER_ARG_OP:
+ case TEP_FILTER_ARG_OP:
free_arg(arg->op.left);
free_arg(arg->op.right);
default:
@@ -228,7 +229,7 @@ static void free_arg(struct filter_arg *arg)
}
static int add_event(struct event_list **events,
- struct event_format *event)
+ struct tep_event_format *event)
{
struct event_list *list;
@@ -242,7 +243,7 @@ static int add_event(struct event_list **events,
return 0;
}
-static int event_match(struct event_format *event,
+static int event_match(struct tep_event_format *event,
regex_t *sreg, regex_t *ereg)
{
if (sreg) {
@@ -258,7 +259,7 @@ static enum tep_errno
find_event(struct tep_handle *pevent, struct event_list **events,
char *sys_name, char *event_name)
{
- struct event_format *event;
+ struct tep_event_format *event;
regex_t ereg;
regex_t sreg;
int match = 0;
@@ -333,11 +334,11 @@ static void free_events(struct event_list *events)
}
static enum tep_errno
-create_arg_item(struct event_format *event, const char *token,
- enum event_type type, struct filter_arg **parg, char *error_str)
+create_arg_item(struct tep_event_format *event, const char *token,
+ enum tep_event_type type, struct tep_filter_arg **parg, char *error_str)
{
- struct format_field *field;
- struct filter_arg *arg;
+ struct tep_format_field *field;
+ struct tep_filter_arg *arg;
arg = allocate_arg();
if (arg == NULL) {
@@ -347,11 +348,11 @@ create_arg_item(struct event_format *event, const char *token,
switch (type) {
- case EVENT_SQUOTE:
- case EVENT_DQUOTE:
- arg->type = FILTER_ARG_VALUE;
+ case TEP_EVENT_SQUOTE:
+ case TEP_EVENT_DQUOTE:
+ arg->type = TEP_FILTER_ARG_VALUE;
arg->value.type =
- type == EVENT_DQUOTE ? FILTER_STRING : FILTER_CHAR;
+ type == TEP_EVENT_DQUOTE ? TEP_FILTER_STRING : TEP_FILTER_CHAR;
arg->value.str = strdup(token);
if (!arg->value.str) {
free_arg(arg);
@@ -359,11 +360,11 @@ create_arg_item(struct event_format *event, const char *token,
return TEP_ERRNO__MEM_ALLOC_FAILED;
}
break;
- case EVENT_ITEM:
+ case TEP_EVENT_ITEM:
/* if it is a number, then convert it */
if (isdigit(token[0])) {
- arg->type = FILTER_ARG_VALUE;
- arg->value.type = FILTER_NUMBER;
+ arg->type = TEP_FILTER_ARG_VALUE;
+ arg->value.type = TEP_FILTER_NUMBER;
arg->value.val = strtoull(token, NULL, 0);
break;
}
@@ -377,12 +378,12 @@ create_arg_item(struct event_format *event, const char *token,
field = &cpu;
} else {
/* not a field, Make it false */
- arg->type = FILTER_ARG_BOOLEAN;
- arg->boolean.value = FILTER_FALSE;
+ arg->type = TEP_FILTER_ARG_BOOLEAN;
+ arg->boolean.value = TEP_FILTER_FALSE;
break;
}
}
- arg->type = FILTER_ARG_FIELD;
+ arg->type = TEP_FILTER_ARG_FIELD;
arg->field.field = field;
break;
default:
@@ -394,82 +395,82 @@ create_arg_item(struct event_format *event, const char *token,
return 0;
}
-static struct filter_arg *
-create_arg_op(enum filter_op_type btype)
+static struct tep_filter_arg *
+create_arg_op(enum tep_filter_op_type btype)
{
- struct filter_arg *arg;
+ struct tep_filter_arg *arg;
arg = allocate_arg();
if (!arg)
return NULL;
- arg->type = FILTER_ARG_OP;
+ arg->type = TEP_FILTER_ARG_OP;
arg->op.type = btype;
return arg;
}
-static struct filter_arg *
-create_arg_exp(enum filter_exp_type etype)
+static struct tep_filter_arg *
+create_arg_exp(enum tep_filter_exp_type etype)
{
- struct filter_arg *arg;
+ struct tep_filter_arg *arg;
arg = allocate_arg();
if (!arg)
return NULL;
- arg->type = FILTER_ARG_EXP;
+ arg->type = TEP_FILTER_ARG_EXP;
arg->exp.type = etype;
return arg;
}
-static struct filter_arg *
-create_arg_cmp(enum filter_cmp_type ctype)
+static struct tep_filter_arg *
+create_arg_cmp(enum tep_filter_cmp_type ctype)
{
- struct filter_arg *arg;
+ struct tep_filter_arg *arg;
arg = allocate_arg();
if (!arg)
return NULL;
/* Use NUM and change if necessary */
- arg->type = FILTER_ARG_NUM;
+ arg->type = TEP_FILTER_ARG_NUM;
arg->num.type = ctype;
return arg;
}
static enum tep_errno
-add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
+add_right(struct tep_filter_arg *op, struct tep_filter_arg *arg, char *error_str)
{
- struct filter_arg *left;
+ struct tep_filter_arg *left;
char *str;
int op_type;
int ret;
switch (op->type) {
- case FILTER_ARG_EXP:
+ case TEP_FILTER_ARG_EXP:
if (op->exp.right)
goto out_fail;
op->exp.right = arg;
break;
- case FILTER_ARG_OP:
+ case TEP_FILTER_ARG_OP:
if (op->op.right)
goto out_fail;
op->op.right = arg;
break;
- case FILTER_ARG_NUM:
+ case TEP_FILTER_ARG_NUM:
if (op->op.right)
goto out_fail;
/*
* The arg must be num, str, or field
*/
switch (arg->type) {
- case FILTER_ARG_VALUE:
- case FILTER_ARG_FIELD:
+ case TEP_FILTER_ARG_VALUE:
+ case TEP_FILTER_ARG_FIELD:
break;
default:
show_error(error_str, "Illegal rvalue");
@@ -481,20 +482,20 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
* convert this to a string or regex.
*/
switch (arg->value.type) {
- case FILTER_CHAR:
+ case TEP_FILTER_CHAR:
/*
* A char should be converted to number if
* the string is 1 byte, and the compare
* is not a REGEX.
*/
if (strlen(arg->value.str) == 1 &&
- op->num.type != FILTER_CMP_REGEX &&
- op->num.type != FILTER_CMP_NOT_REGEX) {
- arg->value.type = FILTER_NUMBER;
+ op->num.type != TEP_FILTER_CMP_REGEX &&
+ op->num.type != TEP_FILTER_CMP_NOT_REGEX) {
+ arg->value.type = TEP_FILTER_NUMBER;
goto do_int;
}
/* fall through */
- case FILTER_STRING:
+ case TEP_FILTER_STRING:
/* convert op to a string arg */
op_type = op->num.type;
@@ -508,16 +509,16 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
* If left arg was a field not found then
* NULL the entire op.
*/
- if (left->type == FILTER_ARG_BOOLEAN) {
+ if (left->type == TEP_FILTER_ARG_BOOLEAN) {
free_arg(left);
free_arg(arg);
- op->type = FILTER_ARG_BOOLEAN;
- op->boolean.value = FILTER_FALSE;
+ op->type = TEP_FILTER_ARG_BOOLEAN;
+ op->boolean.value = TEP_FILTER_FALSE;
break;
}
/* Left arg must be a field */
- if (left->type != FILTER_ARG_FIELD) {
+ if (left->type != TEP_FILTER_ARG_FIELD) {
show_error(error_str,
"Illegal lvalue for string comparison");
return TEP_ERRNO__ILLEGAL_LVALUE;
@@ -525,15 +526,15 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
/* Make sure this is a valid string compare */
switch (op_type) {
- case FILTER_CMP_EQ:
- op_type = FILTER_CMP_MATCH;
+ case TEP_FILTER_CMP_EQ:
+ op_type = TEP_FILTER_CMP_MATCH;
break;
- case FILTER_CMP_NE:
- op_type = FILTER_CMP_NOT_MATCH;
+ case TEP_FILTER_CMP_NE:
+ op_type = TEP_FILTER_CMP_NOT_MATCH;
break;
- case FILTER_CMP_REGEX:
- case FILTER_CMP_NOT_REGEX:
+ case TEP_FILTER_CMP_REGEX:
+ case TEP_FILTER_CMP_NOT_REGEX:
ret = regcomp(&op->str.reg, str, REG_ICASE|REG_NOSUB);
if (ret) {
show_error(error_str,
@@ -548,7 +549,7 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
return TEP_ERRNO__ILLEGAL_STRING_CMP;
}
- op->type = FILTER_ARG_STR;
+ op->type = TEP_FILTER_ARG_STR;
op->str.type = op_type;
op->str.field = left->field.field;
op->str.val = strdup(str);
@@ -573,12 +574,12 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
break;
- case FILTER_NUMBER:
+ case TEP_FILTER_NUMBER:
do_int:
switch (op->num.type) {
- case FILTER_CMP_REGEX:
- case FILTER_CMP_NOT_REGEX:
+ case TEP_FILTER_CMP_REGEX:
+ case TEP_FILTER_CMP_NOT_REGEX:
show_error(error_str,
"Op not allowed with integers");
return TEP_ERRNO__ILLEGAL_INTEGER_CMP;
@@ -605,35 +606,35 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
return TEP_ERRNO__SYNTAX_ERROR;
}
-static struct filter_arg *
-rotate_op_right(struct filter_arg *a, struct filter_arg *b)
+static struct tep_filter_arg *
+rotate_op_right(struct tep_filter_arg *a, struct tep_filter_arg *b)
{
- struct filter_arg *arg;
+ struct tep_filter_arg *arg;
arg = a->op.right;
a->op.right = b;
return arg;
}
-static enum tep_errno add_left(struct filter_arg *op, struct filter_arg *arg)
+static enum tep_errno add_left(struct tep_filter_arg *op, struct tep_filter_arg *arg)
{
switch (op->type) {
- case FILTER_ARG_EXP:
- if (arg->type == FILTER_ARG_OP)
+ case TEP_FILTER_ARG_EXP:
+ if (arg->type == TEP_FILTER_ARG_OP)
arg = rotate_op_right(arg, op);
op->exp.left = arg;
break;
- case FILTER_ARG_OP:
+ case TEP_FILTER_ARG_OP:
op->op.left = arg;
break;
- case FILTER_ARG_NUM:
- if (arg->type == FILTER_ARG_OP)
+ case TEP_FILTER_ARG_NUM:
+ if (arg->type == TEP_FILTER_ARG_OP)
arg = rotate_op_right(arg, op);
/* left arg of compares must be a field */
- if (arg->type != FILTER_ARG_FIELD &&
- arg->type != FILTER_ARG_BOOLEAN)
+ if (arg->type != TEP_FILTER_ARG_FIELD &&
+ arg->type != TEP_FILTER_ARG_BOOLEAN)
return TEP_ERRNO__INVALID_ARG_TYPE;
op->num.left = arg;
break;
@@ -652,91 +653,91 @@ enum op_type {
};
static enum op_type process_op(const char *token,
- enum filter_op_type *btype,
- enum filter_cmp_type *ctype,
- enum filter_exp_type *etype)
+ enum tep_filter_op_type *btype,
+ enum tep_filter_cmp_type *ctype,
+ enum tep_filter_exp_type *etype)
{
- *btype = FILTER_OP_NOT;
- *etype = FILTER_EXP_NONE;
- *ctype = FILTER_CMP_NONE;
+ *btype = TEP_FILTER_OP_NOT;
+ *etype = TEP_FILTER_EXP_NONE;
+ *ctype = TEP_FILTER_CMP_NONE;
if (strcmp(token, "&&") == 0)
- *btype = FILTER_OP_AND;
+ *btype = TEP_FILTER_OP_AND;
else if (strcmp(token, "||") == 0)
- *btype = FILTER_OP_OR;
+ *btype = TEP_FILTER_OP_OR;
else if (strcmp(token, "!") == 0)
return OP_NOT;
- if (*btype != FILTER_OP_NOT)
+ if (*btype != TEP_FILTER_OP_NOT)
return OP_BOOL;
/* Check for value expressions */
if (strcmp(token, "+") == 0) {
- *etype = FILTER_EXP_ADD;
+ *etype = TEP_FILTER_EXP_ADD;
} else if (strcmp(token, "-") == 0) {
- *etype = FILTER_EXP_SUB;
+ *etype = TEP_FILTER_EXP_SUB;
} else if (strcmp(token, "*") == 0) {
- *etype = FILTER_EXP_MUL;
+ *etype = TEP_FILTER_EXP_MUL;
} else if (strcmp(token, "/") == 0) {
- *etype = FILTER_EXP_DIV;
+ *etype = TEP_FILTER_EXP_DIV;
} else if (strcmp(token, "%") == 0) {
- *etype = FILTER_EXP_MOD;
+ *etype = TEP_FILTER_EXP_MOD;
} else if (strcmp(token, ">>") == 0) {
- *etype = FILTER_EXP_RSHIFT;
+ *etype = TEP_FILTER_EXP_RSHIFT;
} else if (strcmp(token, "<<") == 0) {
- *etype = FILTER_EXP_LSHIFT;
+ *etype = TEP_FILTER_EXP_LSHIFT;
} else if (strcmp(token, "&") == 0) {
- *etype = FILTER_EXP_AND;
+ *etype = TEP_FILTER_EXP_AND;
} else if (strcmp(token, "|") == 0) {
- *etype = FILTER_EXP_OR;
+ *etype = TEP_FILTER_EXP_OR;
} else if (strcmp(token, "^") == 0) {
- *etype = FILTER_EXP_XOR;
+ *etype = TEP_FILTER_EXP_XOR;
} else if (strcmp(token, "~") == 0)
- *etype = FILTER_EXP_NOT;
+ *etype = TEP_FILTER_EXP_NOT;
- if (*etype != FILTER_EXP_NONE)
+ if (*etype != TEP_FILTER_EXP_NONE)
return OP_EXP;
/* Check for compares */
if (strcmp(token, "==") == 0)
- *ctype = FILTER_CMP_EQ;
+ *ctype = TEP_FILTER_CMP_EQ;
else if (strcmp(token, "!=") == 0)
- *ctype = FILTER_CMP_NE;
+ *ctype = TEP_FILTER_CMP_NE;
else if (strcmp(token, "<") == 0)
- *ctype = FILTER_CMP_LT;
+ *ctype = TEP_FILTER_CMP_LT;
else if (strcmp(token, ">") == 0)
- *ctype = FILTER_CMP_GT;
+ *ctype = TEP_FILTER_CMP_GT;
else if (strcmp(token, "<=") == 0)
- *ctype = FILTER_CMP_LE;
+ *ctype = TEP_FILTER_CMP_LE;
else if (strcmp(token, ">=") == 0)
- *ctype = FILTER_CMP_GE;
+ *ctype = TEP_FILTER_CMP_GE;
else if (strcmp(token, "=~") == 0)
- *ctype = FILTER_CMP_REGEX;
+ *ctype = TEP_FILTER_CMP_REGEX;
else if (strcmp(token, "!~") == 0)
- *ctype = FILTER_CMP_NOT_REGEX;
+ *ctype = TEP_FILTER_CMP_NOT_REGEX;
else
return OP_NONE;
return OP_CMP;
}
-static int check_op_done(struct filter_arg *arg)
+static int check_op_done(struct tep_filter_arg *arg)
{
switch (arg->type) {
- case FILTER_ARG_EXP:
+ case TEP_FILTER_ARG_EXP:
return arg->exp.right != NULL;
- case FILTER_ARG_OP:
+ case TEP_FILTER_ARG_OP:
return arg->op.right != NULL;
- case FILTER_ARG_NUM:
+ case TEP_FILTER_ARG_NUM:
return arg->num.right != NULL;
- case FILTER_ARG_STR:
+ case TEP_FILTER_ARG_STR:
/* A string conversion is always done */
return 1;
- case FILTER_ARG_BOOLEAN:
+ case TEP_FILTER_ARG_BOOLEAN:
/* field not found, is ok */
return 1;
@@ -752,14 +753,14 @@ enum filter_vals {
};
static enum tep_errno
-reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child,
- struct filter_arg *arg, char *error_str)
+reparent_op_arg(struct tep_filter_arg *parent, struct tep_filter_arg *old_child,
+ struct tep_filter_arg *arg, char *error_str)
{
- struct filter_arg *other_child;
- struct filter_arg **ptr;
+ struct tep_filter_arg *other_child;
+ struct tep_filter_arg **ptr;
- if (parent->type != FILTER_ARG_OP &&
- arg->type != FILTER_ARG_OP) {
+ if (parent->type != TEP_FILTER_ARG_OP &&
+ arg->type != TEP_FILTER_ARG_OP) {
show_error(error_str, "can not reparent other than OP");
return TEP_ERRNO__REPARENT_NOT_OP;
}
@@ -804,7 +805,7 @@ reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child,
}
/* Returns either filter_vals (success) or tep_errno (failfure) */
-static int test_arg(struct filter_arg *parent, struct filter_arg *arg,
+static int test_arg(struct tep_filter_arg *parent, struct tep_filter_arg *arg,
char *error_str)
{
int lval, rval;
@@ -812,16 +813,16 @@ static int test_arg(struct filter_arg *parent, struct filter_arg *arg,
switch (arg->type) {
/* bad case */
- case FILTER_ARG_BOOLEAN:
+ case TEP_FILTER_ARG_BOOLEAN:
return FILTER_VAL_FALSE + arg->boolean.value;
/* good cases: */
- case FILTER_ARG_STR:
- case FILTER_ARG_VALUE:
- case FILTER_ARG_FIELD:
+ case TEP_FILTER_ARG_STR:
+ case TEP_FILTER_ARG_VALUE:
+ case TEP_FILTER_ARG_FIELD:
return FILTER_VAL_NORM;
- case FILTER_ARG_EXP:
+ case TEP_FILTER_ARG_EXP:
lval = test_arg(arg, arg->exp.left, error_str);
if (lval != FILTER_VAL_NORM)
return lval;
@@ -830,7 +831,7 @@ static int test_arg(struct filter_arg *parent, struct filter_arg *arg,
return rval;
return FILTER_VAL_NORM;
- case FILTER_ARG_NUM:
+ case TEP_FILTER_ARG_NUM:
lval = test_arg(arg, arg->num.left, error_str);
if (lval != FILTER_VAL_NORM)
return lval;
@@ -839,14 +840,14 @@ static int test_arg(struct filter_arg *parent, struct filter_arg *arg,
return rval;
return FILTER_VAL_NORM;
- case FILTER_ARG_OP:
- if (arg->op.type != FILTER_OP_NOT) {
+ case TEP_FILTER_ARG_OP:
+ if (arg->op.type != TEP_FILTER_OP_NOT) {
lval = test_arg(arg, arg->op.left, error_str);
switch (lval) {
case FILTER_VAL_NORM:
break;
case FILTER_VAL_TRUE:
- if (arg->op.type == FILTER_OP_OR)
+ if (arg->op.type == TEP_FILTER_OP_OR)
return FILTER_VAL_TRUE;
rval = test_arg(arg, arg->op.right, error_str);
if (rval != FILTER_VAL_NORM)
@@ -856,7 +857,7 @@ static int test_arg(struct filter_arg *parent, struct filter_arg *arg,
error_str);
case FILTER_VAL_FALSE:
- if (arg->op.type == FILTER_OP_AND)
+ if (arg->op.type == TEP_FILTER_OP_AND)
return FILTER_VAL_FALSE;
rval = test_arg(arg, arg->op.right, error_str);
if (rval != FILTER_VAL_NORM)
@@ -877,18 +878,18 @@ static int test_arg(struct filter_arg *parent, struct filter_arg *arg,
break;
case FILTER_VAL_TRUE:
- if (arg->op.type == FILTER_OP_OR)
+ if (arg->op.type == TEP_FILTER_OP_OR)
return FILTER_VAL_TRUE;
- if (arg->op.type == FILTER_OP_NOT)
+ if (arg->op.type == TEP_FILTER_OP_NOT)
return FILTER_VAL_FALSE;
return reparent_op_arg(parent, arg, arg->op.left,
error_str);
case FILTER_VAL_FALSE:
- if (arg->op.type == FILTER_OP_AND)
+ if (arg->op.type == TEP_FILTER_OP_AND)
return FILTER_VAL_FALSE;
- if (arg->op.type == FILTER_OP_NOT)
+ if (arg->op.type == TEP_FILTER_OP_NOT)
return FILTER_VAL_TRUE;
return reparent_op_arg(parent, arg, arg->op.left,
@@ -904,8 +905,8 @@ static int test_arg(struct filter_arg *parent, struct filter_arg *arg,
}
/* Remove any unknown event fields */
-static int collapse_tree(struct filter_arg *arg,
- struct filter_arg **arg_collapsed, char *error_str)
+static int collapse_tree(struct tep_filter_arg *arg,
+ struct tep_filter_arg **arg_collapsed, char *error_str)
{
int ret;
@@ -919,7 +920,7 @@ static int collapse_tree(struct filter_arg *arg,
free_arg(arg);
arg = allocate_arg();
if (arg) {
- arg->type = FILTER_ARG_BOOLEAN;
+ arg->type = TEP_FILTER_ARG_BOOLEAN;
arg->boolean.value = ret == FILTER_VAL_TRUE;
} else {
show_error(error_str, "Failed to allocate filter arg");
@@ -939,19 +940,19 @@ static int collapse_tree(struct filter_arg *arg,
}
static enum tep_errno
-process_filter(struct event_format *event, struct filter_arg **parg,
+process_filter(struct tep_event_format *event, struct tep_filter_arg **parg,
char *error_str, int not)
{
- enum event_type type;
+ enum tep_event_type type;
char *token = NULL;
- struct filter_arg *current_op = NULL;
- struct filter_arg *current_exp = NULL;
- struct filter_arg *left_item = NULL;
- struct filter_arg *arg = NULL;
+ struct tep_filter_arg *current_op = NULL;
+ struct tep_filter_arg *current_exp = NULL;
+ struct tep_filter_arg *left_item = NULL;
+ struct tep_filter_arg *arg = NULL;
enum op_type op_type;
- enum filter_op_type btype;
- enum filter_exp_type etype;
- enum filter_cmp_type ctype;
+ enum tep_filter_op_type btype;
+ enum tep_filter_exp_type etype;
+ enum tep_filter_cmp_type ctype;
enum tep_errno ret;
*parg = NULL;
@@ -960,9 +961,9 @@ process_filter(struct event_format *event, struct filter_arg **parg,
free(token);
type = read_token(&token);
switch (type) {
- case EVENT_SQUOTE:
- case EVENT_DQUOTE:
- case EVENT_ITEM:
+ case TEP_EVENT_SQUOTE:
+ case TEP_EVENT_DQUOTE:
+ case TEP_EVENT_ITEM:
ret = create_arg_item(event, token, type, &arg, error_str);
if (ret < 0)
goto fail;
@@ -987,7 +988,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
arg = NULL;
break;
- case EVENT_DELIM:
+ case TEP_EVENT_DELIM:
if (*token == ',') {
show_error(error_str, "Illegal token ','");
ret = TEP_ERRNO__ILLEGAL_TOKEN;
@@ -1054,7 +1055,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
}
break;
- case EVENT_OP:
+ case TEP_EVENT_OP:
op_type = process_op(token, &btype, &ctype, &etype);
/* All expect a left arg except for NOT */
@@ -1139,14 +1140,14 @@ process_filter(struct event_format *event, struct filter_arg **parg,
if (ret < 0)
goto fail_syntax;
break;
- case EVENT_NONE:
+ case TEP_EVENT_NONE:
break;
- case EVENT_ERROR:
+ case TEP_EVENT_ERROR:
goto fail_alloc;
default:
goto fail_syntax;
}
- } while (type != EVENT_NONE);
+ } while (type != TEP_EVENT_NONE);
if (!current_op && !current_exp)
goto fail_syntax;
@@ -1179,8 +1180,8 @@ process_filter(struct event_format *event, struct filter_arg **parg,
}
static enum tep_errno
-process_event(struct event_format *event, const char *filter_str,
- struct filter_arg **parg, char *error_str)
+process_event(struct tep_event_format *event, const char *filter_str,
+ struct tep_filter_arg **parg, char *error_str)
{
int ret;
@@ -1196,19 +1197,19 @@ process_event(struct event_format *event, const char *filter_str,
if (*parg == NULL)
return TEP_ERRNO__MEM_ALLOC_FAILED;
- (*parg)->type = FILTER_ARG_BOOLEAN;
- (*parg)->boolean.value = FILTER_FALSE;
+ (*parg)->type = TEP_FILTER_ARG_BOOLEAN;
+ (*parg)->boolean.value = TEP_FILTER_FALSE;
}
return 0;
}
static enum tep_errno
-filter_event(struct event_filter *filter, struct event_format *event,
+filter_event(struct tep_event_filter *filter, struct tep_event_format *event,
const char *filter_str, char *error_str)
{
- struct filter_type *filter_type;
- struct filter_arg *arg;
+ struct tep_filter_type *filter_type;
+ struct tep_filter_arg *arg;
enum tep_errno ret;
if (filter_str) {
@@ -1222,8 +1223,8 @@ filter_event(struct event_filter *filter, struct event_format *event,
if (arg == NULL)
return TEP_ERRNO__MEM_ALLOC_FAILED;
- arg->type = FILTER_ARG_BOOLEAN;
- arg->boolean.value = FILTER_TRUE;
+ arg->type = TEP_FILTER_ARG_BOOLEAN;
+ arg->boolean.value = TEP_FILTER_TRUE;
}
filter_type = add_filter_type(filter, event->id);
@@ -1237,7 +1238,7 @@ filter_event(struct event_filter *filter, struct event_format *event,
return 0;
}
-static void filter_init_error_buf(struct event_filter *filter)
+static void filter_init_error_buf(struct tep_event_filter *filter)
{
/* clear buffer to reset show error */
tep_buffer_init("", 0);
@@ -1253,7 +1254,7 @@ static void filter_init_error_buf(struct event_filter *filter)
* negative error code. Use tep_filter_strerror() to see
* actual error message in case of error.
*/
-enum tep_errno tep_filter_add_filter_str(struct event_filter *filter,
+enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
const char *filter_str)
{
struct tep_handle *pevent = filter->pevent;
@@ -1351,7 +1352,7 @@ enum tep_errno tep_filter_add_filter_str(struct event_filter *filter,
return rtn;
}
-static void free_filter_type(struct filter_type *filter_type)
+static void free_filter_type(struct tep_filter_type *filter_type)
{
free_arg(filter_type->filter);
}
@@ -1365,7 +1366,7 @@ static void free_filter_type(struct filter_type *filter_type)
*
* Returns 0 if message was filled successfully, -1 if error
*/
-int tep_filter_strerror(struct event_filter *filter, enum tep_errno err,
+int tep_filter_strerror(struct tep_event_filter *filter, enum tep_errno err,
char *buf, size_t buflen)
{
if (err <= __TEP_ERRNO__START || err >= __TEP_ERRNO__END)
@@ -1393,10 +1394,10 @@ int tep_filter_strerror(struct event_filter *filter, enum tep_errno err,
* Returns 1: if an event was removed
* 0: if the event was not found
*/
-int tep_filter_remove_event(struct event_filter *filter,
+int tep_filter_remove_event(struct tep_event_filter *filter,
int event_id)
{
- struct filter_type *filter_type;
+ struct tep_filter_type *filter_type;
unsigned long len;
if (!filter->filters)
@@ -1428,7 +1429,7 @@ int tep_filter_remove_event(struct event_filter *filter,
*
* Removes all filters from a filter and resets it.
*/
-void tep_filter_reset(struct event_filter *filter)
+void tep_filter_reset(struct tep_event_filter *filter)
{
int i;
@@ -1440,7 +1441,7 @@ void tep_filter_reset(struct event_filter *filter)
filter->event_filters = NULL;
}
-void tep_filter_free(struct event_filter *filter)
+void tep_filter_free(struct tep_event_filter *filter)
{
tep_unref(filter->pevent);
@@ -1449,14 +1450,14 @@ void tep_filter_free(struct event_filter *filter)
free(filter);
}
-static char *arg_to_str(struct event_filter *filter, struct filter_arg *arg);
+static char *arg_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg);
-static int copy_filter_type(struct event_filter *filter,
- struct event_filter *source,
- struct filter_type *filter_type)
+static int copy_filter_type(struct tep_event_filter *filter,
+ struct tep_event_filter *source,
+ struct tep_filter_type *filter_type)
{
- struct filter_arg *arg;
- struct event_format *event;
+ struct tep_filter_arg *arg;
+ struct tep_event_format *event;
const char *sys;
const char *name;
char *str;
@@ -1478,7 +1479,7 @@ static int copy_filter_type(struct event_filter *filter,
if (arg == NULL)
return -1;
- arg->type = FILTER_ARG_BOOLEAN;
+ arg->type = TEP_FILTER_ARG_BOOLEAN;
if (strcmp(str, "TRUE") == 0)
arg->boolean.value = 1;
else
@@ -1507,7 +1508,7 @@ static int copy_filter_type(struct event_filter *filter,
*
* Returns 0 on success and -1 if not all filters were copied
*/
-int tep_filter_copy(struct event_filter *dest, struct event_filter *source)
+int tep_filter_copy(struct tep_event_filter *dest, struct tep_event_filter *source)
{
int ret = 0;
int i;
@@ -1533,14 +1534,14 @@ int tep_filter_copy(struct event_filter *dest, struct event_filter *source)
* Returns 0 on success and -1 if there was a problem updating, but
* events may have still been updated on error.
*/
-int tep_update_trivial(struct event_filter *dest, struct event_filter *source,
- enum filter_trivial_type type)
+int tep_update_trivial(struct tep_event_filter *dest, struct tep_event_filter *source,
+ enum tep_filter_trivial_type type)
{
struct tep_handle *src_pevent;
struct tep_handle *dest_pevent;
- struct event_format *event;
- struct filter_type *filter_type;
- struct filter_arg *arg;
+ struct tep_event_format *event;
+ struct tep_filter_type *filter_type;
+ struct tep_filter_arg *arg;
char *str;
int i;
@@ -1554,10 +1555,10 @@ int tep_update_trivial(struct event_filter *dest, struct event_filter *source,
for (i = 0; i < dest->filters; i++) {
filter_type = &dest->event_filters[i];
arg = filter_type->filter;
- if (arg->type != FILTER_ARG_BOOLEAN)
+ if (arg->type != TEP_FILTER_ARG_BOOLEAN)
continue;
- if ((arg->boolean.value && type == FILTER_TRIVIAL_FALSE) ||
- (!arg->boolean.value && type == FILTER_TRIVIAL_TRUE))
+ if ((arg->boolean.value && type == TEP_FILTER_TRIVIAL_FALSE) ||
+ (!arg->boolean.value && type == TEP_FILTER_TRIVIAL_TRUE))
continue;
event = filter_type->event;
@@ -1592,10 +1593,10 @@ int tep_update_trivial(struct event_filter *dest, struct event_filter *source,
*
* Returns 0 on success and -1 if there was a problem.
*/
-int tep_filter_clear_trivial(struct event_filter *filter,
- enum filter_trivial_type type)
+int tep_filter_clear_trivial(struct tep_event_filter *filter,
+ enum tep_filter_trivial_type type)
{
- struct filter_type *filter_type;
+ struct tep_filter_type *filter_type;
int count = 0;
int *ids = NULL;
int i;
@@ -1611,14 +1612,14 @@ int tep_filter_clear_trivial(struct event_filter *filter,
int *new_ids;
filter_type = &filter->event_filters[i];
- if (filter_type->filter->type != FILTER_ARG_BOOLEAN)
+ if (filter_type->filter->type != TEP_FILTER_ARG_BOOLEAN)
continue;
switch (type) {
- case FILTER_TRIVIAL_FALSE:
+ case TEP_FILTER_TRIVIAL_FALSE:
if (filter_type->filter->boolean.value)
continue;
break;
- case FILTER_TRIVIAL_TRUE:
+ case TEP_FILTER_TRIVIAL_TRUE:
if (!filter_type->filter->boolean.value)
continue;
default:
@@ -1654,11 +1655,11 @@ int tep_filter_clear_trivial(struct event_filter *filter,
* Returns 1 if the event contains a matching trivial type
* otherwise 0.
*/
-int tep_filter_event_has_trivial(struct event_filter *filter,
+int tep_filter_event_has_trivial(struct tep_event_filter *filter,
int event_id,
- enum filter_trivial_type type)
+ enum tep_filter_trivial_type type)
{
- struct filter_type *filter_type;
+ struct tep_filter_type *filter_type;
if (!filter->filters)
return 0;
@@ -1668,25 +1669,25 @@ int tep_filter_event_has_trivial(struct event_filter *filter,
if (!filter_type)
return 0;
- if (filter_type->filter->type != FILTER_ARG_BOOLEAN)
+ if (filter_type->filter->type != TEP_FILTER_ARG_BOOLEAN)
return 0;
switch (type) {
- case FILTER_TRIVIAL_FALSE:
+ case TEP_FILTER_TRIVIAL_FALSE:
return !filter_type->filter->boolean.value;
- case FILTER_TRIVIAL_TRUE:
+ case TEP_FILTER_TRIVIAL_TRUE:
return filter_type->filter->boolean.value;
default:
return 1;
}
}
-static int test_filter(struct event_format *event, struct filter_arg *arg,
+static int test_filter(struct tep_event_format *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err);
static const char *
-get_comm(struct event_format *event, struct tep_record *record)
+get_comm(struct tep_event_format *event, struct tep_record *record)
{
const char *comm;
int pid;
@@ -1697,8 +1698,8 @@ get_comm(struct event_format *event, struct tep_record *record)
}
static unsigned long long
-get_value(struct event_format *event,
- struct format_field *field, struct tep_record *record)
+get_value(struct tep_event_format *event,
+ struct tep_format_field *field, struct tep_record *record)
{
unsigned long long val;
@@ -1716,7 +1717,7 @@ get_value(struct event_format *event,
tep_read_number_field(field, record->data, &val);
- if (!(field->flags & FIELD_IS_SIGNED))
+ if (!(field->flags & TEP_FIELD_IS_SIGNED))
return val;
switch (field->size) {
@@ -1733,11 +1734,11 @@ get_value(struct event_format *event,
}
static unsigned long long
-get_arg_value(struct event_format *event, struct filter_arg *arg,
+get_arg_value(struct tep_event_format *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err);
static unsigned long long
-get_exp_value(struct event_format *event, struct filter_arg *arg,
+get_exp_value(struct tep_event_format *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err)
{
unsigned long long lval, rval;
@@ -1753,37 +1754,37 @@ get_exp_value(struct event_format *event, struct filter_arg *arg,
}
switch (arg->exp.type) {
- case FILTER_EXP_ADD:
+ case TEP_FILTER_EXP_ADD:
return lval + rval;
- case FILTER_EXP_SUB:
+ case TEP_FILTER_EXP_SUB:
return lval - rval;
- case FILTER_EXP_MUL:
+ case TEP_FILTER_EXP_MUL:
return lval * rval;
- case FILTER_EXP_DIV:
+ case TEP_FILTER_EXP_DIV:
return lval / rval;
- case FILTER_EXP_MOD:
+ case TEP_FILTER_EXP_MOD:
return lval % rval;
- case FILTER_EXP_RSHIFT:
+ case TEP_FILTER_EXP_RSHIFT:
return lval >> rval;
- case FILTER_EXP_LSHIFT:
+ case TEP_FILTER_EXP_LSHIFT:
return lval << rval;
- case FILTER_EXP_AND:
+ case TEP_FILTER_EXP_AND:
return lval & rval;
- case FILTER_EXP_OR:
+ case TEP_FILTER_EXP_OR:
return lval | rval;
- case FILTER_EXP_XOR:
+ case TEP_FILTER_EXP_XOR:
return lval ^ rval;
- case FILTER_EXP_NOT:
+ case TEP_FILTER_EXP_NOT:
default:
if (!*err)
*err = TEP_ERRNO__INVALID_EXP_TYPE;
@@ -1792,21 +1793,21 @@ get_exp_value(struct event_format *event, struct filter_arg *arg,
}
static unsigned long long
-get_arg_value(struct event_format *event, struct filter_arg *arg,
+get_arg_value(struct tep_event_format *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err)
{
switch (arg->type) {
- case FILTER_ARG_FIELD:
+ case TEP_FILTER_ARG_FIELD:
return get_value(event, arg->field.field, record);
- case FILTER_ARG_VALUE:
- if (arg->value.type != FILTER_NUMBER) {
+ case TEP_FILTER_ARG_VALUE:
+ if (arg->value.type != TEP_FILTER_NUMBER) {
if (!*err)
*err = TEP_ERRNO__NOT_A_NUMBER;
}
return arg->value.val;
- case FILTER_ARG_EXP:
+ case TEP_FILTER_ARG_EXP:
return get_exp_value(event, arg, record, err);
default:
@@ -1816,7 +1817,7 @@ get_arg_value(struct event_format *event, struct filter_arg *arg,
return 0;
}
-static int test_num(struct event_format *event, struct filter_arg *arg,
+static int test_num(struct tep_event_format *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err)
{
unsigned long long lval, rval;
@@ -1832,22 +1833,22 @@ static int test_num(struct event_format *event, struct filter_arg *arg,
}
switch (arg->num.type) {
- case FILTER_CMP_EQ:
+ case TEP_FILTER_CMP_EQ:
return lval == rval;
- case FILTER_CMP_NE:
+ case TEP_FILTER_CMP_NE:
return lval != rval;
- case FILTER_CMP_GT:
+ case TEP_FILTER_CMP_GT:
return lval > rval;
- case FILTER_CMP_LT:
+ case TEP_FILTER_CMP_LT:
return lval < rval;
- case FILTER_CMP_GE:
+ case TEP_FILTER_CMP_GE:
return lval >= rval;
- case FILTER_CMP_LE:
+ case TEP_FILTER_CMP_LE:
return lval <= rval;
default:
@@ -1857,9 +1858,9 @@ static int test_num(struct event_format *event, struct filter_arg *arg,
}
}
-static const char *get_field_str(struct filter_arg *arg, struct tep_record *record)
+static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *record)
{
- struct event_format *event;
+ struct tep_event_format *event;
struct tep_handle *pevent;
unsigned long long addr;
const char *val = NULL;
@@ -1867,11 +1868,11 @@ static const char *get_field_str(struct filter_arg *arg, struct tep_record *reco
char hex[64];
/* If the field is not a string convert it */
- if (arg->str.field->flags & FIELD_IS_STRING) {
+ if (arg->str.field->flags & TEP_FIELD_IS_STRING) {
val = record->data + arg->str.field->offset;
size = arg->str.field->size;
- if (arg->str.field->flags & FIELD_IS_DYNAMIC) {
+ if (arg->str.field->flags & TEP_FIELD_IS_DYNAMIC) {
addr = *(unsigned int *)val;
val = record->data + (addr & 0xffff);
size = addr >> 16;
@@ -1893,7 +1894,7 @@ static const char *get_field_str(struct filter_arg *arg, struct tep_record *reco
pevent = event->pevent;
addr = get_value(event, arg->str.field, record);
- if (arg->str.field->flags & (FIELD_IS_POINTER | FIELD_IS_LONG))
+ if (arg->str.field->flags & (TEP_FIELD_IS_POINTER | TEP_FIELD_IS_LONG))
/* convert to a kernel symbol */
val = tep_find_function(pevent, addr);
@@ -1907,7 +1908,7 @@ static const char *get_field_str(struct filter_arg *arg, struct tep_record *reco
return val;
}
-static int test_str(struct event_format *event, struct filter_arg *arg,
+static int test_str(struct tep_event_format *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err)
{
const char *val;
@@ -1918,17 +1919,17 @@ static int test_str(struct event_format *event, struct filter_arg *arg,
val = get_field_str(arg, record);
switch (arg->str.type) {
- case FILTER_CMP_MATCH:
+ case TEP_FILTER_CMP_MATCH:
return strcmp(val, arg->str.val) == 0;
- case FILTER_CMP_NOT_MATCH:
+ case TEP_FILTER_CMP_NOT_MATCH:
return strcmp(val, arg->str.val) != 0;
- case FILTER_CMP_REGEX:
+ case TEP_FILTER_CMP_REGEX:
/* Returns zero on match */
return !regexec(&arg->str.reg, val, 0, NULL, 0);
- case FILTER_CMP_NOT_REGEX:
+ case TEP_FILTER_CMP_NOT_REGEX:
return regexec(&arg->str.reg, val, 0, NULL, 0);
default:
@@ -1938,19 +1939,19 @@ static int test_str(struct event_format *event, struct filter_arg *arg,
}
}
-static int test_op(struct event_format *event, struct filter_arg *arg,
+static int test_op(struct tep_event_format *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err)
{
switch (arg->op.type) {
- case FILTER_OP_AND:
+ case TEP_FILTER_OP_AND:
return test_filter(event, arg->op.left, record, err) &&
test_filter(event, arg->op.right, record, err);
- case FILTER_OP_OR:
+ case TEP_FILTER_OP_OR:
return test_filter(event, arg->op.left, record, err) ||
test_filter(event, arg->op.right, record, err);
- case FILTER_OP_NOT:
+ case TEP_FILTER_OP_NOT:
return !test_filter(event, arg->op.right, record, err);
default:
@@ -1960,7 +1961,7 @@ static int test_op(struct event_format *event, struct filter_arg *arg,
}
}
-static int test_filter(struct event_format *event, struct filter_arg *arg,
+static int test_filter(struct tep_event_format *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err)
{
if (*err) {
@@ -1971,22 +1972,22 @@ static int test_filter(struct event_format *event, struct filter_arg *arg,
}
switch (arg->type) {
- case FILTER_ARG_BOOLEAN:
+ case TEP_FILTER_ARG_BOOLEAN:
/* easy case */
return arg->boolean.value;
- case FILTER_ARG_OP:
+ case TEP_FILTER_ARG_OP:
return test_op(event, arg, record, err);
- case FILTER_ARG_NUM:
+ case TEP_FILTER_ARG_NUM:
return test_num(event, arg, record, err);
- case FILTER_ARG_STR:
+ case TEP_FILTER_ARG_STR:
return test_str(event, arg, record, err);
- case FILTER_ARG_EXP:
- case FILTER_ARG_VALUE:
- case FILTER_ARG_FIELD:
+ case TEP_FILTER_ARG_EXP:
+ case TEP_FILTER_ARG_VALUE:
+ case TEP_FILTER_ARG_FIELD:
/*
* Expressions, fields and values evaluate
* to true if they return non zero
@@ -2008,9 +2009,9 @@ static int test_filter(struct event_format *event, struct filter_arg *arg,
* Returns 1 if filter found for @event_id
* otherwise 0;
*/
-int tep_event_filtered(struct event_filter *filter, int event_id)
+int tep_event_filtered(struct tep_event_filter *filter, int event_id)
{
- struct filter_type *filter_type;
+ struct tep_filter_type *filter_type;
if (!filter->filters)
return 0;
@@ -2032,11 +2033,11 @@ int tep_event_filtered(struct event_filter *filter, int event_id)
* NO_FILTER - if no filters exist
* otherwise - error occurred during test
*/
-enum tep_errno tep_filter_match(struct event_filter *filter,
+enum tep_errno tep_filter_match(struct tep_event_filter *filter,
struct tep_record *record)
{
struct tep_handle *pevent = filter->pevent;
- struct filter_type *filter_type;
+ struct tep_filter_type *filter_type;
int event_id;
int ret;
enum tep_errno err = 0;
@@ -2059,7 +2060,7 @@ enum tep_errno tep_filter_match(struct event_filter *filter,
return ret ? TEP_ERRNO__FILTER_MATCH : TEP_ERRNO__FILTER_MISS;
}
-static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
+static char *op_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
{
char *str = NULL;
char *left = NULL;
@@ -2070,10 +2071,10 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
int val;
switch (arg->op.type) {
- case FILTER_OP_AND:
+ case TEP_FILTER_OP_AND:
op = "&&";
/* fall through */
- case FILTER_OP_OR:
+ case TEP_FILTER_OP_OR:
if (!op)
op = "||";
@@ -2094,8 +2095,8 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
right_val = 0;
if (left_val >= 0) {
- if ((arg->op.type == FILTER_OP_AND && !left_val) ||
- (arg->op.type == FILTER_OP_OR && left_val)) {
+ if ((arg->op.type == TEP_FILTER_OP_AND && !left_val) ||
+ (arg->op.type == TEP_FILTER_OP_OR && left_val)) {
/* Just return left value */
str = left;
left = NULL;
@@ -2105,10 +2106,10 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
/* just evaluate this. */
val = 0;
switch (arg->op.type) {
- case FILTER_OP_AND:
+ case TEP_FILTER_OP_AND:
val = left_val && right_val;
break;
- case FILTER_OP_OR:
+ case TEP_FILTER_OP_OR:
val = left_val || right_val;
break;
default:
@@ -2119,8 +2120,8 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
}
}
if (right_val >= 0) {
- if ((arg->op.type == FILTER_OP_AND && !right_val) ||
- (arg->op.type == FILTER_OP_OR && right_val)) {
+ if ((arg->op.type == TEP_FILTER_OP_AND && !right_val) ||
+ (arg->op.type == TEP_FILTER_OP_OR && right_val)) {
/* Just return right value */
str = right;
right = NULL;
@@ -2135,7 +2136,7 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
asprintf(&str, "(%s) %s (%s)", left, op, right);
break;
- case FILTER_OP_NOT:
+ case TEP_FILTER_OP_NOT:
op = "!";
right = arg_to_str(filter, arg->op.right);
if (!right)
@@ -2163,7 +2164,7 @@ static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
return str;
}
-static char *val_to_str(struct event_filter *filter, struct filter_arg *arg)
+static char *val_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
{
char *str = NULL;
@@ -2172,12 +2173,12 @@ static char *val_to_str(struct event_filter *filter, struct filter_arg *arg)
return str;
}
-static char *field_to_str(struct event_filter *filter, struct filter_arg *arg)
+static char *field_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
{
return strdup(arg->field.field->name);
}
-static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg)
+static char *exp_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
{
char *lstr;
char *rstr;
@@ -2190,34 +2191,34 @@ static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg)
goto out;
switch (arg->exp.type) {
- case FILTER_EXP_ADD:
+ case TEP_FILTER_EXP_ADD:
op = "+";
break;
- case FILTER_EXP_SUB:
+ case TEP_FILTER_EXP_SUB:
op = "-";
break;
- case FILTER_EXP_MUL:
+ case TEP_FILTER_EXP_MUL:
op = "*";
break;
- case FILTER_EXP_DIV:
+ case TEP_FILTER_EXP_DIV:
op = "/";
break;
- case FILTER_EXP_MOD:
+ case TEP_FILTER_EXP_MOD:
op = "%";
break;
- case FILTER_EXP_RSHIFT:
+ case TEP_FILTER_EXP_RSHIFT:
op = ">>";
break;
- case FILTER_EXP_LSHIFT:
+ case TEP_FILTER_EXP_LSHIFT:
op = "<<";
break;
- case FILTER_EXP_AND:
+ case TEP_FILTER_EXP_AND:
op = "&";
break;
- case FILTER_EXP_OR:
+ case TEP_FILTER_EXP_OR:
op = "|";
break;
- case FILTER_EXP_XOR:
+ case TEP_FILTER_EXP_XOR:
op = "^";
break;
default:
@@ -2233,7 +2234,7 @@ out:
return str;
}
-static char *num_to_str(struct event_filter *filter, struct filter_arg *arg)
+static char *num_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
{
char *lstr;
char *rstr;
@@ -2246,26 +2247,26 @@ static char *num_to_str(struct event_filter *filter, struct filter_arg *arg)
goto out;
switch (arg->num.type) {
- case FILTER_CMP_EQ:
+ case TEP_FILTER_CMP_EQ:
op = "==";
/* fall through */
- case FILTER_CMP_NE:
+ case TEP_FILTER_CMP_NE:
if (!op)
op = "!=";
/* fall through */
- case FILTER_CMP_GT:
+ case TEP_FILTER_CMP_GT:
if (!op)
op = ">";
/* fall through */
- case FILTER_CMP_LT:
+ case TEP_FILTER_CMP_LT:
if (!op)
op = "<";
/* fall through */
- case FILTER_CMP_GE:
+ case TEP_FILTER_CMP_GE:
if (!op)
op = ">=";
/* fall through */
- case FILTER_CMP_LE:
+ case TEP_FILTER_CMP_LE:
if (!op)
op = "<=";
@@ -2283,24 +2284,24 @@ out:
return str;
}
-static char *str_to_str(struct event_filter *filter, struct filter_arg *arg)
+static char *str_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
{
char *str = NULL;
char *op = NULL;
switch (arg->str.type) {
- case FILTER_CMP_MATCH:
+ case TEP_FILTER_CMP_MATCH:
op = "==";
/* fall through */
- case FILTER_CMP_NOT_MATCH:
+ case TEP_FILTER_CMP_NOT_MATCH:
if (!op)
op = "!=";
/* fall through */
- case FILTER_CMP_REGEX:
+ case TEP_FILTER_CMP_REGEX:
if (!op)
op = "=~";
/* fall through */
- case FILTER_CMP_NOT_REGEX:
+ case TEP_FILTER_CMP_NOT_REGEX:
if (!op)
op = "!~";
@@ -2315,31 +2316,31 @@ static char *str_to_str(struct event_filter *filter, struct filter_arg *arg)
return str;
}
-static char *arg_to_str(struct event_filter *filter, struct filter_arg *arg)
+static char *arg_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
{
char *str = NULL;
switch (arg->type) {
- case FILTER_ARG_BOOLEAN:
+ case TEP_FILTER_ARG_BOOLEAN:
asprintf(&str, arg->boolean.value ? "TRUE" : "FALSE");
return str;
- case FILTER_ARG_OP:
+ case TEP_FILTER_ARG_OP:
return op_to_str(filter, arg);
- case FILTER_ARG_NUM:
+ case TEP_FILTER_ARG_NUM:
return num_to_str(filter, arg);
- case FILTER_ARG_STR:
+ case TEP_FILTER_ARG_STR:
return str_to_str(filter, arg);
- case FILTER_ARG_VALUE:
+ case TEP_FILTER_ARG_VALUE:
return val_to_str(filter, arg);
- case FILTER_ARG_FIELD:
+ case TEP_FILTER_ARG_FIELD:
return field_to_str(filter, arg);
- case FILTER_ARG_EXP:
+ case TEP_FILTER_ARG_EXP:
return exp_to_str(filter, arg);
default:
@@ -2359,9 +2360,9 @@ static char *arg_to_str(struct event_filter *filter, struct filter_arg *arg)
* NULL is returned if no filter is found or allocation failed.
*/
char *
-tep_filter_make_string(struct event_filter *filter, int event_id)
+tep_filter_make_string(struct tep_event_filter *filter, int event_id)
{
- struct filter_type *filter_type;
+ struct tep_filter_type *filter_type;
if (!filter->filters)
return NULL;
@@ -2383,10 +2384,10 @@ tep_filter_make_string(struct event_filter *filter, int event_id)
* 1 if the two filters hold the same content.
* 0 if they do not.
*/
-int tep_filter_compare(struct event_filter *filter1, struct event_filter *filter2)
+int tep_filter_compare(struct tep_event_filter *filter1, struct tep_event_filter *filter2)
{
- struct filter_type *filter_type1;
- struct filter_type *filter_type2;
+ struct tep_filter_type *filter_type1;
+ struct tep_filter_type *filter_type2;
char *str1, *str2;
int result;
int i;
@@ -2409,8 +2410,8 @@ int tep_filter_compare(struct event_filter *filter1, struct event_filter *filter
if (filter_type1->filter->type != filter_type2->filter->type)
break;
switch (filter_type1->filter->type) {
- case FILTER_TRIVIAL_FALSE:
- case FILTER_TRIVIAL_TRUE:
+ case TEP_FILTER_TRIVIAL_FALSE:
+ case TEP_FILTER_TRIVIAL_TRUE:
/* trivial types just need the type compared */
continue;
default:
diff --git a/tools/lib/traceevent/plugin_function.c b/tools/lib/traceevent/plugin_function.c
index 424747475d37..528acc75d81a 100644
--- a/tools/lib/traceevent/plugin_function.c
+++ b/tools/lib/traceevent/plugin_function.c
@@ -23,6 +23,7 @@
#include "event-parse.h"
#include "event-utils.h"
+#include "trace-seq.h"
static struct func_stack {
int size;
@@ -123,7 +124,7 @@ static int add_and_get_index(const char *parent, const char *child, int cpu)
}
static int function_handler(struct trace_seq *s, struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
struct tep_handle *pevent = event->pevent;
unsigned long long function;
diff --git a/tools/lib/traceevent/plugin_hrtimer.c b/tools/lib/traceevent/plugin_hrtimer.c
index b43bfec565d8..9aa05b4ca811 100644
--- a/tools/lib/traceevent/plugin_hrtimer.c
+++ b/tools/lib/traceevent/plugin_hrtimer.c
@@ -23,10 +23,11 @@
#include <string.h>
#include "event-parse.h"
+#include "trace-seq.h"
static int timer_expire_handler(struct trace_seq *s,
struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
trace_seq_printf(s, "hrtimer=");
@@ -46,7 +47,7 @@ static int timer_expire_handler(struct trace_seq *s,
static int timer_start_handler(struct trace_seq *s,
struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
trace_seq_printf(s, "hrtimer=");
diff --git a/tools/lib/traceevent/plugin_jbd2.c b/tools/lib/traceevent/plugin_jbd2.c
index 45a9acd19640..a5e34135dd6a 100644
--- a/tools/lib/traceevent/plugin_jbd2.c
+++ b/tools/lib/traceevent/plugin_jbd2.c
@@ -22,6 +22,7 @@
#include <string.h>
#include "event-parse.h"
+#include "trace-seq.h"
#define MINORBITS 20
#define MINORMASK ((1U << MINORBITS) - 1)
diff --git a/tools/lib/traceevent/plugin_kmem.c b/tools/lib/traceevent/plugin_kmem.c
index 73966b05abce..1beb4eaddfdf 100644
--- a/tools/lib/traceevent/plugin_kmem.c
+++ b/tools/lib/traceevent/plugin_kmem.c
@@ -22,11 +22,12 @@
#include <string.h>
#include "event-parse.h"
+#include "trace-seq.h"
static int call_site_handler(struct trace_seq *s, struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
- struct format_field *field;
+ struct tep_format_field *field;
unsigned long long val, addr;
void *data = record->data;
const char *func;
diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c
index 1d0d15906225..d13c22846fa9 100644
--- a/tools/lib/traceevent/plugin_kvm.c
+++ b/tools/lib/traceevent/plugin_kvm.c
@@ -23,6 +23,7 @@
#include <stdint.h>
#include "event-parse.h"
+#include "trace-seq.h"
#ifdef HAVE_UDIS86
@@ -248,7 +249,7 @@ static const char *find_exit_reason(unsigned isa, int val)
}
static int print_exit_reason(struct trace_seq *s, struct tep_record *record,
- struct event_format *event, const char *field)
+ struct tep_event_format *event, const char *field)
{
unsigned long long isa;
unsigned long long val;
@@ -269,7 +270,7 @@ static int print_exit_reason(struct trace_seq *s, struct tep_record *record,
}
static int kvm_exit_handler(struct trace_seq *s, struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
unsigned long long info1 = 0, info2 = 0;
@@ -292,7 +293,7 @@ static int kvm_exit_handler(struct trace_seq *s, struct tep_record *record,
static int kvm_emulate_insn_handler(struct trace_seq *s,
struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
unsigned long long rip, csbase, len, flags, failed;
int llen;
@@ -331,7 +332,7 @@ static int kvm_emulate_insn_handler(struct trace_seq *s,
static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
if (print_exit_reason(s, record, event, "exit_code") < 0)
return -1;
@@ -345,7 +346,7 @@ static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct tep_reco
}
static int kvm_nested_vmexit_handler(struct trace_seq *s, struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
tep_print_num_field(s, "rip %llx ", event, "rip", record, 1);
@@ -371,7 +372,7 @@ union kvm_mmu_page_role {
};
static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
unsigned long long val;
static const char *access_str[] = {
@@ -418,7 +419,7 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
static int kvm_mmu_get_page_handler(struct trace_seq *s,
struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
unsigned long long val;
diff --git a/tools/lib/traceevent/plugin_mac80211.c b/tools/lib/traceevent/plugin_mac80211.c
index de50a5316203..da3855e7b86f 100644
--- a/tools/lib/traceevent/plugin_mac80211.c
+++ b/tools/lib/traceevent/plugin_mac80211.c
@@ -22,13 +22,14 @@
#include <string.h>
#include "event-parse.h"
+#include "trace-seq.h"
#define INDENT 65
-static void print_string(struct trace_seq *s, struct event_format *event,
+static void print_string(struct trace_seq *s, struct tep_event_format *event,
const char *name, const void *data)
{
- struct format_field *f = tep_find_field(event, name);
+ struct tep_format_field *f = tep_find_field(event, name);
int offset;
int length;
@@ -59,7 +60,7 @@ static void print_string(struct trace_seq *s, struct event_format *event,
static int drv_bss_info_changed(struct trace_seq *s,
struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
void *data = record->data;
diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
index eecb4bd95c11..77882272672f 100644
--- a/tools/lib/traceevent/plugin_sched_switch.c
+++ b/tools/lib/traceevent/plugin_sched_switch.c
@@ -22,6 +22,7 @@
#include <string.h>
#include "event-parse.h"
+#include "trace-seq.h"
static void write_state(struct trace_seq *s, int val)
{
@@ -44,7 +45,7 @@ static void write_state(struct trace_seq *s, int val)
trace_seq_putc(s, 'R');
}
-static void write_and_save_comm(struct format_field *field,
+static void write_and_save_comm(struct tep_format_field *field,
struct tep_record *record,
struct trace_seq *s, int pid)
{
@@ -66,9 +67,9 @@ static void write_and_save_comm(struct format_field *field,
static int sched_wakeup_handler(struct trace_seq *s,
struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
- struct format_field *field;
+ struct tep_format_field *field;
unsigned long long val;
if (tep_get_field_val(s, event, "pid", record, &val, 1))
@@ -95,9 +96,9 @@ static int sched_wakeup_handler(struct trace_seq *s,
static int sched_switch_handler(struct trace_seq *s,
struct tep_record *record,
- struct event_format *event, void *context)
+ struct tep_event_format *event, void *context)
{
- struct format_field *field;
+ struct tep_format_field *field;
unsigned long long val;
if (tep_get_field_val(s, event, "prev_pid", record, &val, 1))
diff --git a/tools/lib/traceevent/plugin_scsi.c b/tools/lib/traceevent/plugin_scsi.c
index 5ec346f6b842..4eba25cc1431 100644
--- a/tools/lib/traceevent/plugin_scsi.c
+++ b/tools/lib/traceevent/plugin_scsi.c
@@ -3,6 +3,7 @@
#include <string.h>
#include <inttypes.h>
#include "event-parse.h"
+#include "trace-seq.h"
typedef unsigned long sector_t;
typedef uint64_t u64;
diff --git a/tools/lib/traceevent/plugin_xen.c b/tools/lib/traceevent/plugin_xen.c
index b2acbd6e9c86..bc0496e4c296 100644
--- a/tools/lib/traceevent/plugin_xen.c
+++ b/tools/lib/traceevent/plugin_xen.c
@@ -3,6 +3,7 @@
#include <stdlib.h>
#include <string.h>
#include "event-parse.h"
+#include "trace-seq.h"
#define __HYPERVISOR_set_trap_table 0
#define __HYPERVISOR_mmu_update 1
diff --git a/tools/lib/traceevent/tep_strerror.c b/tools/lib/traceevent/tep_strerror.c
new file mode 100644
index 000000000000..4ac26445b2f6
--- /dev/null
+++ b/tools/lib/traceevent/tep_strerror.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: LGPL-2.1
+#undef _GNU_SOURCE
+#include <string.h>
+#include <stdio.h>
+
+#include "event-parse.h"
+
+#undef _PE
+#define _PE(code, str) str
+static const char * const tep_error_str[] = {
+ TEP_ERRORS
+};
+#undef _PE
+
+/*
+ * The tools so far have been using the strerror_r() GNU variant, that returns
+ * a string, be it the buffer passed or something else.
+ *
+ * But that, besides being tricky in cases where we expect that the function
+ * using strerror_r() returns the error formatted in a provided buffer (we have
+ * to check if it returned something else and copy that instead), breaks the
+ * build on systems not using glibc, like Alpine Linux, where musl libc is
+ * used.
+ *
+ * So, introduce yet another wrapper, str_error_r(), that has the GNU
+ * interface, but uses the portable XSI variant of strerror_r(), so that users
+ * rest asured that the provided buffer is used and it is what is returned.
+ */
+int tep_strerror(struct tep_handle *tep __maybe_unused,
+ enum tep_errno errnum, char *buf, size_t buflen)
+{
+ const char *msg;
+ int idx;
+
+ if (!buflen)
+ return 0;
+
+ if (errnum >= 0) {
+ int err = strerror_r(errnum, buf, buflen);
+ buf[buflen - 1] = 0;
+ return err;
+ }
+
+ if (errnum <= __TEP_ERRNO__START ||
+ errnum >= __TEP_ERRNO__END)
+ return -1;
+
+ idx = errnum - __TEP_ERRNO__START - 1;
+ msg = tep_error_str[idx];
+ snprintf(buf, buflen, "%s", msg);
+
+ return 0;
+}
diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c
index e3bac4543d3b..8ff1d55954d1 100644
--- a/tools/lib/traceevent/trace-seq.c
+++ b/tools/lib/traceevent/trace-seq.c
@@ -3,6 +3,8 @@
* Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
*
*/
+#include "trace-seq.h"
+
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
diff --git a/tools/lib/traceevent/trace-seq.h b/tools/lib/traceevent/trace-seq.h
new file mode 100644
index 000000000000..d68ec69f8d1a
--- /dev/null
+++ b/tools/lib/traceevent/trace-seq.h
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ */
+
+#ifndef _TRACE_SEQ_H
+#define _TRACE_SEQ_H
+
+#include <stdarg.h>
+#include <stdio.h>
+
+/* ----------------------- trace_seq ----------------------- */
+
+#ifndef TRACE_SEQ_BUF_SIZE
+#define TRACE_SEQ_BUF_SIZE 4096
+#endif
+
+enum trace_seq_fail {
+ TRACE_SEQ__GOOD,
+ TRACE_SEQ__BUFFER_POISONED,
+ TRACE_SEQ__MEM_ALLOC_FAILED,
+};
+
+/*
+ * Trace sequences are used to allow a function to call several other functions
+ * to create a string of data to use (up to a max of PAGE_SIZE).
+ */
+
+struct trace_seq {
+ char *buffer;
+ unsigned int buffer_size;
+ unsigned int len;
+ unsigned int readpos;
+ enum trace_seq_fail state;
+};
+
+void trace_seq_init(struct trace_seq *s);
+void trace_seq_reset(struct trace_seq *s);
+void trace_seq_destroy(struct trace_seq *s);
+
+extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
+ __attribute__ ((format (printf, 2, 0)));
+
+extern int trace_seq_puts(struct trace_seq *s, const char *str);
+extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
+
+extern void trace_seq_terminate(struct trace_seq *s);
+
+extern int trace_seq_do_fprintf(struct trace_seq *s, FILE *fp);
+extern int trace_seq_do_printf(struct trace_seq *s);
+
+#endif /* _TRACE_SEQ_H */
diff --git a/tools/memory-model/Documentation/explanation.txt b/tools/memory-model/Documentation/explanation.txt
index 0cbd1ef8f86d..35bff92cc773 100644
--- a/tools/memory-model/Documentation/explanation.txt
+++ b/tools/memory-model/Documentation/explanation.txt
@@ -28,7 +28,8 @@ Explanation of the Linux-Kernel Memory Consistency Model
20. THE HAPPENS-BEFORE RELATION: hb
21. THE PROPAGATES-BEFORE RELATION: pb
22. RCU RELATIONS: rcu-link, gp, rscs, rcu-fence, and rb
- 23. ODDS AND ENDS
+ 23. LOCKING
+ 24. ODDS AND ENDS
@@ -1067,28 +1068,6 @@ allowing out-of-order writes like this to occur. The model avoided
violating the write-write coherence rule by requiring the CPU not to
send the W write to the memory subsystem at all!)
-There is one last example of preserved program order in the LKMM: when
-a load-acquire reads from an earlier store-release. For example:
-
- smp_store_release(&x, 123);
- r1 = smp_load_acquire(&x);
-
-If the smp_load_acquire() ends up obtaining the 123 value that was
-stored by the smp_store_release(), the LKMM says that the load must be
-executed after the store; the store cannot be forwarded to the load.
-This requirement does not arise from the operational model, but it
-yields correct predictions on all architectures supported by the Linux
-kernel, although for differing reasons.
-
-On some architectures, including x86 and ARMv8, it is true that the
-store cannot be forwarded to the load. On others, including PowerPC
-and ARMv7, smp_store_release() generates object code that starts with
-a fence and smp_load_acquire() generates object code that ends with a
-fence. The upshot is that even though the store may be forwarded to
-the load, it is still true that any instruction preceding the store
-will be executed before the load or any following instructions, and
-the store will be executed before any instruction following the load.
-
AND THEN THERE WAS ALPHA
------------------------
@@ -1766,6 +1745,147 @@ before it does, and the critical section in P2 both starts after P1's
grace period does and ends after it does.
+LOCKING
+-------
+
+The LKMM includes locking. In fact, there is special code for locking
+in the formal model, added in order to make tools run faster.
+However, this special code is intended to be more or less equivalent
+to concepts we have already covered. A spinlock_t variable is treated
+the same as an int, and spin_lock(&s) is treated almost the same as:
+
+ while (cmpxchg_acquire(&s, 0, 1) != 0)
+ cpu_relax();
+
+This waits until s is equal to 0 and then atomically sets it to 1,
+and the read part of the cmpxchg operation acts as an acquire fence.
+An alternate way to express the same thing would be:
+
+ r = xchg_acquire(&s, 1);
+
+along with a requirement that at the end, r = 0. Similarly,
+spin_trylock(&s) is treated almost the same as:
+
+ return !cmpxchg_acquire(&s, 0, 1);
+
+which atomically sets s to 1 if it is currently equal to 0 and returns
+true if it succeeds (the read part of the cmpxchg operation acts as an
+acquire fence only if the operation is successful). spin_unlock(&s)
+is treated almost the same as:
+
+ smp_store_release(&s, 0);
+
+The "almost" qualifiers above need some explanation. In the LKMM, the
+store-release in a spin_unlock() and the load-acquire which forms the
+first half of the atomic rmw update in a spin_lock() or a successful
+spin_trylock() -- we can call these things lock-releases and
+lock-acquires -- have two properties beyond those of ordinary releases
+and acquires.
+
+First, when a lock-acquire reads from a lock-release, the LKMM
+requires that every instruction po-before the lock-release must
+execute before any instruction po-after the lock-acquire. This would
+naturally hold if the release and acquire operations were on different
+CPUs, but the LKMM says it holds even when they are on the same CPU.
+For example:
+
+ int x, y;
+ spinlock_t s;
+
+ P0()
+ {
+ int r1, r2;
+
+ spin_lock(&s);
+ r1 = READ_ONCE(x);
+ spin_unlock(&s);
+ spin_lock(&s);
+ r2 = READ_ONCE(y);
+ spin_unlock(&s);
+ }
+
+ P1()
+ {
+ WRITE_ONCE(y, 1);
+ smp_wmb();
+ WRITE_ONCE(x, 1);
+ }
+
+Here the second spin_lock() reads from the first spin_unlock(), and
+therefore the load of x must execute before the load of y. Thus we
+cannot have r1 = 1 and r2 = 0 at the end (this is an instance of the
+MP pattern).
+
+This requirement does not apply to ordinary release and acquire
+fences, only to lock-related operations. For instance, suppose P0()
+in the example had been written as:
+
+ P0()
+ {
+ int r1, r2, r3;
+
+ r1 = READ_ONCE(x);
+ smp_store_release(&s, 1);
+ r3 = smp_load_acquire(&s);
+ r2 = READ_ONCE(y);
+ }
+
+Then the CPU would be allowed to forward the s = 1 value from the
+smp_store_release() to the smp_load_acquire(), executing the
+instructions in the following order:
+
+ r3 = smp_load_acquire(&s); // Obtains r3 = 1
+ r2 = READ_ONCE(y);
+ r1 = READ_ONCE(x);
+ smp_store_release(&s, 1); // Value is forwarded
+
+and thus it could load y before x, obtaining r2 = 0 and r1 = 1.
+
+Second, when a lock-acquire reads from a lock-release, and some other
+stores W and W' occur po-before the lock-release and po-after the
+lock-acquire respectively, the LKMM requires that W must propagate to
+each CPU before W' does. For example, consider:
+
+ int x, y;
+ spinlock_t x;
+
+ P0()
+ {
+ spin_lock(&s);
+ WRITE_ONCE(x, 1);
+ spin_unlock(&s);
+ }
+
+ P1()
+ {
+ int r1;
+
+ spin_lock(&s);
+ r1 = READ_ONCE(x);
+ WRITE_ONCE(y, 1);
+ spin_unlock(&s);
+ }
+
+ P2()
+ {
+ int r2, r3;
+
+ r2 = READ_ONCE(y);
+ smp_rmb();
+ r3 = READ_ONCE(x);
+ }
+
+If r1 = 1 at the end then the spin_lock() in P1 must have read from
+the spin_unlock() in P0. Hence the store to x must propagate to P2
+before the store to y does, so we cannot have r2 = 1 and r3 = 0.
+
+These two special requirements for lock-release and lock-acquire do
+not arise from the operational model. Nevertheless, kernel developers
+have come to expect and rely on them because they do hold on all
+architectures supported by the Linux kernel, albeit for various
+differing reasons.
+
+
ODDS AND ENDS
-------------
@@ -1831,26 +1951,6 @@ they behave as follows:
events and the events preceding them against all po-later
events.
-The LKMM includes locking. In fact, there is special code for locking
-in the formal model, added in order to make tools run faster.
-However, this special code is intended to be exactly equivalent to
-concepts we have already covered. A spinlock_t variable is treated
-the same as an int, and spin_lock(&s) is treated the same as:
-
- while (cmpxchg_acquire(&s, 0, 1) != 0)
- cpu_relax();
-
-which waits until s is equal to 0 and then atomically sets it to 1,
-and where the read part of the atomic update is also an acquire fence.
-An alternate way to express the same thing would be:
-
- r = xchg_acquire(&s, 1);
-
-along with a requirement that at the end, r = 0. spin_unlock(&s) is
-treated the same as:
-
- smp_store_release(&s, 0);
-
Interestingly, RCU and locking each introduce the possibility of
deadlock. When faced with code sequences such as:
diff --git a/tools/memory-model/Documentation/recipes.txt b/tools/memory-model/Documentation/recipes.txt
index af72700cc20a..7fe8d7aa3029 100644
--- a/tools/memory-model/Documentation/recipes.txt
+++ b/tools/memory-model/Documentation/recipes.txt
@@ -311,7 +311,7 @@ The smp_wmb() macro orders prior stores against later stores, and the
smp_rmb() macro orders prior loads against later loads. Therefore, if
the final value of r0 is 1, the final value of r1 must also be 1.
-The the xlog_state_switch_iclogs() function in fs/xfs/xfs_log.c contains
+The xlog_state_switch_iclogs() function in fs/xfs/xfs_log.c contains
the following write-side code fragment:
log->l_curr_block -= log->l_logBBsize;
diff --git a/tools/memory-model/README b/tools/memory-model/README
index ee987ce20aae..acf9077cffaa 100644
--- a/tools/memory-model/README
+++ b/tools/memory-model/README
@@ -171,6 +171,12 @@ The Linux-kernel memory model has the following limitations:
particular, the "THE PROGRAM ORDER RELATION: po AND po-loc"
and "A WARNING" sections).
+ Note that this limitation in turn limits LKMM's ability to
+ accurately model address, control, and data dependencies.
+ For example, if the compiler can deduce the value of some variable
+ carrying a dependency, then the compiler can break that dependency
+ by substituting a constant of that value.
+
2. Multiple access sizes for a single variable are not supported,
and neither are misaligned or partially overlapping accesses.
@@ -190,6 +196,36 @@ The Linux-kernel memory model has the following limitations:
However, a substantial amount of support is provided for these
operations, as shown in the linux-kernel.def file.
+ a. When rcu_assign_pointer() is passed NULL, the Linux
+ kernel provides no ordering, but LKMM models this
+ case as a store release.
+
+ b. The "unless" RMW operations are not currently modeled:
+ atomic_long_add_unless(), atomic_add_unless(),
+ atomic_inc_unless_negative(), and
+ atomic_dec_unless_positive(). These can be emulated
+ in litmus tests, for example, by using atomic_cmpxchg().
+
+ c. The call_rcu() function is not modeled. It can be
+ emulated in litmus tests by adding another process that
+ invokes synchronize_rcu() and the body of the callback
+ function, with (for example) a release-acquire from
+ the site of the emulated call_rcu() to the beginning
+ of the additional process.
+
+ d. The rcu_barrier() function is not modeled. It can be
+ emulated in litmus tests emulating call_rcu() via
+ (for example) a release-acquire from the end of each
+ additional call_rcu() process to the site of the
+ emulated rcu-barrier().
+
+ e. Sleepable RCU (SRCU) is not modeled. It can be
+ emulated, but perhaps not simply.
+
+ f. Reader-writer locking is not modeled. It can be
+ emulated in litmus tests using atomic read-modify-write
+ operations.
+
The "herd7" tool has some additional limitations of its own, apart from
the memory model:
@@ -204,3 +240,6 @@ the memory model:
Some of these limitations may be overcome in the future, but others are
more likely to be addressed by incorporating the Linux-kernel memory model
into other tools.
+
+Finally, please note that LKMM is subject to change as hardware, use cases,
+and compilers evolve.
diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat
index 59b5cbe6b624..882fc33274ac 100644
--- a/tools/memory-model/linux-kernel.cat
+++ b/tools/memory-model/linux-kernel.cat
@@ -38,7 +38,7 @@ let strong-fence = mb | gp
(* Release Acquire *)
let acq-po = [Acquire] ; po ; [M]
let po-rel = [M] ; po ; [Release]
-let rfi-rel-acq = [Release] ; rfi ; [Acquire]
+let po-unlock-rf-lock-po = po ; [UL] ; rf ; [LKR] ; po
(**********************************)
(* Fundamental coherence ordering *)
@@ -60,13 +60,13 @@ let dep = addr | data
let rwdep = (dep | ctrl) ; [W]
let overwrite = co | fr
let to-w = rwdep | (overwrite & int)
-let to-r = addr | (dep ; rfi) | rfi-rel-acq
+let to-r = addr | (dep ; rfi)
let fence = strong-fence | wmb | po-rel | rmb | acq-po
-let ppo = to-r | to-w | fence
+let ppo = to-r | to-w | fence | (po-unlock-rf-lock-po & int)
(* Propagation: Ordering from release operations and strong fences. *)
let A-cumul(r) = rfe? ; r
-let cumul-fence = A-cumul(strong-fence | po-rel) | wmb
+let cumul-fence = A-cumul(strong-fence | po-rel) | wmb | po-unlock-rf-lock-po
let prop = (overwrite & ext)? ; cumul-fence* ; rfe?
(*
diff --git a/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus b/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus
index 0f749e419b34..094d58df7789 100644
--- a/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus
+++ b/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus
@@ -1,11 +1,10 @@
C ISA2+pooncelock+pooncelock+pombonce
(*
- * Result: Sometimes
+ * Result: Never
*
- * This test shows that the ordering provided by a lock-protected S
- * litmus test (P0() and P1()) are not visible to external process P2().
- * This is likely to change soon.
+ * This test shows that write-write ordering provided by locks
+ * (in P0() and P1()) is visible to external process P2().
*)
{}
diff --git a/tools/memory-model/litmus-tests/README b/tools/memory-model/litmus-tests/README
index 4581ec2d3c57..5ee08f129094 100644
--- a/tools/memory-model/litmus-tests/README
+++ b/tools/memory-model/litmus-tests/README
@@ -1,4 +1,6 @@
-This directory contains the following litmus tests:
+============
+LITMUS TESTS
+============
CoRR+poonceonce+Once.litmus
Test of read-read coherence, that is, whether or not two
@@ -36,7 +38,7 @@ IRIW+poonceonces+OnceOnce.litmus
ISA2+pooncelock+pooncelock+pombonce.litmus
Tests whether the ordering provided by a lock-protected S
litmus test is visible to an external process whose accesses are
- separated by smp_mb(). This addition of an external process to
+ separated by smp_mb(). This addition of an external process to
S is otherwise known as ISA2.
ISA2+poonceonces.litmus
@@ -151,3 +153,101 @@ Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus
A great many more litmus tests are available here:
https://github.com/paulmckrcu/litmus
+
+==================
+LITMUS TEST NAMING
+==================
+
+Litmus tests are usually named based on their contents, which means that
+looking at the name tells you what the litmus test does. The naming
+scheme covers litmus tests having a single cycle that passes through
+each process exactly once, so litmus tests not fitting this description
+are named on an ad-hoc basis.
+
+The structure of a litmus-test name is the litmus-test class, a plus
+sign ("+"), and one string for each process, separated by plus signs.
+The end of the name is ".litmus".
+
+The litmus-test classes may be found in the infamous test6.pdf:
+https://www.cl.cam.ac.uk/~pes20/ppc-supplemental/test6.pdf
+Each class defines the pattern of accesses and of the variables accessed.
+For example, if the one process writes to a pair of variables, and
+the other process reads from these same variables, the corresponding
+litmus-test class is "MP" (message passing), which may be found on the
+left-hand end of the second row of tests on page one of test6.pdf.
+
+The strings used to identify the actions carried out by each process are
+complex due to a desire to have short(er) names. Thus, there is a tool to
+generate these strings from a given litmus test's actions. For example,
+consider the processes from SB+rfionceonce-poonceonces.litmus:
+
+ P0(int *x, int *y)
+ {
+ int r1;
+ int r2;
+
+ WRITE_ONCE(*x, 1);
+ r1 = READ_ONCE(*x);
+ r2 = READ_ONCE(*y);
+ }
+
+ P1(int *x, int *y)
+ {
+ int r3;
+ int r4;
+
+ WRITE_ONCE(*y, 1);
+ r3 = READ_ONCE(*y);
+ r4 = READ_ONCE(*x);
+ }
+
+The next step is to construct a space-separated list of descriptors,
+interleaving descriptions of the relation between a pair of consecutive
+accesses with descriptions of the second access in the pair.
+
+P0()'s WRITE_ONCE() is read by its first READ_ONCE(), which is a
+reads-from link (rf) and internal to the P0() process. This is
+"rfi", which is an abbreviation for "reads-from internal". Because
+some of the tools string these abbreviations together with space
+characters separating processes, the first character is capitalized,
+resulting in "Rfi".
+
+P0()'s second access is a READ_ONCE(), as opposed to (for example)
+smp_load_acquire(), so next is "Once". Thus far, we have "Rfi Once".
+
+P0()'s third access is also a READ_ONCE(), but to y rather than x.
+This is related to P0()'s second access by program order ("po"),
+to a different variable ("d"), and both accesses are reads ("RR").
+The resulting descriptor is "PodRR". Because P0()'s third access is
+READ_ONCE(), we add another "Once" descriptor.
+
+A from-read ("fre") relation links P0()'s third to P1()'s first
+access, and the resulting descriptor is "Fre". P1()'s first access is
+WRITE_ONCE(), which as before gives the descriptor "Once". The string
+thus far is thus "Rfi Once PodRR Once Fre Once".
+
+The remainder of P1() is similar to P0(), which means we add
+"Rfi Once PodRR Once". Another fre links P1()'s last access to
+P0()'s first access, which is WRITE_ONCE(), so we add "Fre Once".
+The full string is thus:
+
+ Rfi Once PodRR Once Fre Once Rfi Once PodRR Once Fre Once
+
+This string can be given to the "norm7" and "classify7" tools to
+produce the name:
+
+ $ norm7 -bell linux-kernel.bell \
+ Rfi Once PodRR Once Fre Once Rfi Once PodRR Once Fre Once | \
+ sed -e 's/:.*//g'
+ SB+rfionceonce-poonceonces
+
+Adding the ".litmus" suffix: SB+rfionceonce-poonceonces.litmus
+
+The descriptors that describe connections between consecutive accesses
+within the cycle through a given litmus test can be provided by the herd
+tool (Rfi, Po, Fre, and so on) or by the linux-kernel.bell file (Once,
+Release, Acquire, and so on).
+
+To see the full list of descriptors, execute the following command:
+
+ $ diyone7 -bell linux-kernel.bell -show edges
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index 84f001d52322..50af4e1274b3 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -30,9 +30,9 @@
#define EX_ORIG_OFFSET 0
#define EX_NEW_OFFSET 4
-#define JUMP_ENTRY_SIZE 24
+#define JUMP_ENTRY_SIZE 16
#define JUMP_ORIG_OFFSET 0
-#define JUMP_NEW_OFFSET 8
+#define JUMP_NEW_OFFSET 4
#define ALT_ENTRY_SIZE 13
#define ALT_ORIG_OFFSET 0
diff --git a/tools/pci/Build b/tools/pci/Build
new file mode 100644
index 000000000000..c375aea21790
--- /dev/null
+++ b/tools/pci/Build
@@ -0,0 +1 @@
+pcitest-y += pcitest.o
diff --git a/tools/pci/Makefile b/tools/pci/Makefile
new file mode 100644
index 000000000000..46e4c2f318c9
--- /dev/null
+++ b/tools/pci/Makefile
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0
+include ../scripts/Makefile.include
+
+bindir ?= /usr/bin
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+# Do not use make's built-in rules
+# (this improves performance and avoids hard-to-debug behaviour);
+MAKEFLAGS += -r
+
+CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+
+ALL_TARGETS := pcitest pcitest.sh
+ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
+
+all: $(ALL_PROGRAMS)
+
+export srctree OUTPUT CC LD CFLAGS
+include $(srctree)/tools/build/Makefile.include
+
+#
+# We need the following to be outside of kernel tree
+#
+$(OUTPUT)include/linux/: ../../include/uapi/linux/
+ mkdir -p $(OUTPUT)include/linux/ 2>&1 || true
+ ln -sf $(CURDIR)/../../include/uapi/linux/pcitest.h $@
+
+prepare: $(OUTPUT)include/linux/
+
+PCITEST_IN := $(OUTPUT)pcitest-in.o
+$(PCITEST_IN): prepare FORCE
+ $(Q)$(MAKE) $(build)=pcitest
+$(OUTPUT)pcitest: $(PCITEST_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+
+clean:
+ rm -f $(ALL_PROGRAMS)
+ rm -rf $(OUTPUT)include/
+ find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+
+install: $(ALL_PROGRAMS)
+ install -d -m 755 $(DESTDIR)$(bindir); \
+ for program in $(ALL_PROGRAMS); do \
+ install $$program $(DESTDIR)$(bindir); \
+ done
+
+FORCE:
+
+.PHONY: all install clean FORCE prepare
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
index af146bb03b4d..ec4d51f3308b 100644
--- a/tools/pci/pcitest.c
+++ b/tools/pci/pcitest.c
@@ -23,7 +23,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <sys/ioctl.h>
-#include <time.h>
#include <unistd.h>
#include <linux/pcitest.h>
@@ -48,17 +47,15 @@ struct pci_test {
unsigned long size;
};
-static int run_test(struct pci_test *test)
+static void run_test(struct pci_test *test)
{
long ret;
int fd;
- struct timespec start, end;
- double time;
fd = open(test->device, O_RDWR);
if (fd < 0) {
perror("can't open PCI Endpoint Test device");
- return fd;
+ return;
}
if (test->barnum >= 0 && test->barnum <= 5) {
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index 42261a9b280e..ac841bc5c35b 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -280,7 +280,7 @@ $(MAN_HTML): $(OUTPUT)%.html : %.txt
mv $@+ $@
ifdef USE_ASCIIDOCTOR
-$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.txt
+$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : %.txt
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
$(ASCIIDOC) -b manpage -d manpage \
$(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index f6d1a03c7523..e30d20fb482d 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -833,7 +833,7 @@ ifndef NO_JVMTI
JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
else
ifneq (,$(wildcard /usr/sbin/alternatives))
- JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
+ JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
endif
endif
ifndef JDIR
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 5224ade3d5af..2f3bf025e305 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -635,7 +635,7 @@ $(LIBPERF_IN): prepare FORCE
$(LIB_FILE): $(LIBPERF_IN)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) $(LIB_OBJS)
-LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
+LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
$(LIBTRACEEVENT): FORCE
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
@@ -779,7 +779,9 @@ endif
ifndef NO_LIBBPF
$(call QUIET_INSTALL, bpf-headers) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
- $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf/linux'; \
+ $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
+ $(INSTALL) include/bpf/linux/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf/linux'
$(call QUIET_INSTALL, bpf-examples) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'; \
$(INSTALL) examples/bpf/*.c -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
index 6688977e4ac7..76c6345a57d5 100644
--- a/tools/perf/arch/arm64/annotate/instructions.c
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -8,6 +8,63 @@ struct arm64_annotate {
jump_insn;
};
+static int arm64_mov__parse(struct arch *arch __maybe_unused,
+ struct ins_operands *ops,
+ struct map_symbol *ms __maybe_unused)
+{
+ char *s = strchr(ops->raw, ','), *target, *endptr;
+
+ if (s == NULL)
+ return -1;
+
+ *s = '\0';
+ ops->source.raw = strdup(ops->raw);
+ *s = ',';
+
+ if (ops->source.raw == NULL)
+ return -1;
+
+ target = ++s;
+ ops->target.raw = strdup(target);
+ if (ops->target.raw == NULL)
+ goto out_free_source;
+
+ ops->target.addr = strtoull(target, &endptr, 16);
+ if (endptr == target)
+ goto out_free_target;
+
+ s = strchr(endptr, '<');
+ if (s == NULL)
+ goto out_free_target;
+ endptr = strchr(s + 1, '>');
+ if (endptr == NULL)
+ goto out_free_target;
+
+ *endptr = '\0';
+ *s = ' ';
+ ops->target.name = strdup(s);
+ *s = '<';
+ *endptr = '>';
+ if (ops->target.name == NULL)
+ goto out_free_target;
+
+ return 0;
+
+out_free_target:
+ zfree(&ops->target.raw);
+out_free_source:
+ zfree(&ops->source.raw);
+ return -1;
+}
+
+static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops);
+
+static struct ins_ops arm64_mov_ops = {
+ .parse = arm64_mov__parse,
+ .scnprintf = mov__scnprintf,
+};
+
static struct ins_ops *arm64__associate_instruction_ops(struct arch *arch, const char *name)
{
struct arm64_annotate *arm = arch->priv;
@@ -21,7 +78,7 @@ static struct ins_ops *arm64__associate_instruction_ops(struct arch *arch, const
else if (!strcmp(name, "ret"))
ops = &ret_ops;
else
- return NULL;
+ ops = &arm64_mov_ops;
arch__associate_ins_ops(arch, name, ops);
return ops;
diff --git a/tools/perf/arch/powerpc/util/book3s_hv_exits.h b/tools/perf/arch/powerpc/util/book3s_hv_exits.h
index 853b95d1e139..2011376c7ab5 100644
--- a/tools/perf/arch/powerpc/util/book3s_hv_exits.h
+++ b/tools/perf/arch/powerpc/util/book3s_hv_exits.h
@@ -15,7 +15,6 @@
{0x400, "INST_STORAGE"}, \
{0x480, "INST_SEGMENT"}, \
{0x500, "EXTERNAL"}, \
- {0x501, "EXTERNAL_LEVEL"}, \
{0x502, "EXTERNAL_HV"}, \
{0x600, "ALIGNMENT"}, \
{0x700, "PROGRAM"}, \
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index cee4e2f7c057..de0dd66dbb48 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -100,8 +100,6 @@ out_free_source:
return -1;
}
-static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
static struct ins_ops s390_mov_ops = {
.parse = s390_mov__parse,
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 830481b8db26..93d679eaf1f4 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -283,12 +283,11 @@ out_put:
return ret;
}
-static int process_feature_event(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session)
+static int process_feature_event(struct perf_session *session,
+ union perf_event *event)
{
if (event->feat.feat_id < HEADER_LAST_FEATURE)
- return perf_event__process_feature(tool, event, session);
+ return perf_event__process_feature(session, event);
return 0;
}
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index a3b346359ba0..eda41673c4f3 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -86,12 +86,10 @@ static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
}
#endif
-static int perf_event__repipe_op2_synth(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session
- __maybe_unused)
+static int perf_event__repipe_op2_synth(struct perf_session *session,
+ union perf_event *event)
{
- return perf_event__repipe_synth(tool, event);
+ return perf_event__repipe_synth(session->tool, event);
}
static int perf_event__repipe_attr(struct perf_tool *tool,
@@ -133,10 +131,10 @@ static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
return 0;
}
-static s64 perf_event__repipe_auxtrace(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session)
+static s64 perf_event__repipe_auxtrace(struct perf_session *session,
+ union perf_event *event)
{
+ struct perf_tool *tool = session->tool;
struct perf_inject *inject = container_of(tool, struct perf_inject,
tool);
int ret;
@@ -174,9 +172,8 @@ static s64 perf_event__repipe_auxtrace(struct perf_tool *tool,
#else
static s64
-perf_event__repipe_auxtrace(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *session __maybe_unused)
+perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
{
pr_err("AUX area tracing not supported\n");
return -EINVAL;
@@ -362,26 +359,24 @@ static int perf_event__repipe_exit(struct perf_tool *tool,
return err;
}
-static int perf_event__repipe_tracing_data(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session)
+static int perf_event__repipe_tracing_data(struct perf_session *session,
+ union perf_event *event)
{
int err;
- perf_event__repipe_synth(tool, event);
- err = perf_event__process_tracing_data(tool, event, session);
+ perf_event__repipe_synth(session->tool, event);
+ err = perf_event__process_tracing_data(session, event);
return err;
}
-static int perf_event__repipe_id_index(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session)
+static int perf_event__repipe_id_index(struct perf_session *session,
+ union perf_event *event)
{
int err;
- perf_event__repipe_synth(tool, event);
- err = perf_event__process_id_index(tool, event, session);
+ perf_event__repipe_synth(session->tool, event);
+ err = perf_event__process_id_index(session, event);
return err;
}
@@ -803,7 +798,8 @@ int cmd_inject(int argc, const char **argv)
"kallsyms pathname"),
OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
- NULL, "opts", "Instruction Tracing options",
+ NULL, "opts", "Instruction Tracing options\n"
+ ITRACE_HELP,
itrace_parse_synth_opts),
OPT_BOOLEAN(0, "strip", &inject.strip,
"strip non-synthesized events (use with --itrace)"),
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 22ebeb92ac51..0980dfe3396b 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -106,9 +106,12 @@ static bool switch_output_time(struct record *rec)
trigger_is_ready(&switch_output_trigger);
}
-static int record__write(struct record *rec, void *bf, size_t size)
+static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
+ void *bf, size_t size)
{
- if (perf_data__write(rec->session->data, bf, size) < 0) {
+ struct perf_data_file *file = &rec->session->data->file;
+
+ if (perf_data_file__write(file, bf, size) < 0) {
pr_err("failed to write perf data, error: %m\n");
return -1;
}
@@ -127,15 +130,15 @@ static int process_synthesized_event(struct perf_tool *tool,
struct machine *machine __maybe_unused)
{
struct record *rec = container_of(tool, struct record, tool);
- return record__write(rec, event, event->header.size);
+ return record__write(rec, NULL, event, event->header.size);
}
-static int record__pushfn(void *to, void *bf, size_t size)
+static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
{
struct record *rec = to;
rec->samples++;
- return record__write(rec, bf, size);
+ return record__write(rec, map, bf, size);
}
static volatile int done;
@@ -170,6 +173,7 @@ static void record__sig_exit(void)
#ifdef HAVE_AUXTRACE_SUPPORT
static int record__process_auxtrace(struct perf_tool *tool,
+ struct perf_mmap *map,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2)
{
@@ -197,21 +201,21 @@ static int record__process_auxtrace(struct perf_tool *tool,
if (padding)
padding = 8 - padding;
- record__write(rec, event, event->header.size);
- record__write(rec, data1, len1);
+ record__write(rec, map, event, event->header.size);
+ record__write(rec, map, data1, len1);
if (len2)
- record__write(rec, data2, len2);
- record__write(rec, &pad, padding);
+ record__write(rec, map, data2, len2);
+ record__write(rec, map, &pad, padding);
return 0;
}
static int record__auxtrace_mmap_read(struct record *rec,
- struct auxtrace_mmap *mm)
+ struct perf_mmap *map)
{
int ret;
- ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
+ ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
record__process_auxtrace);
if (ret < 0)
return ret;
@@ -223,11 +227,11 @@ static int record__auxtrace_mmap_read(struct record *rec,
}
static int record__auxtrace_mmap_read_snapshot(struct record *rec,
- struct auxtrace_mmap *mm)
+ struct perf_mmap *map)
{
int ret;
- ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
+ ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
record__process_auxtrace,
rec->opts.auxtrace_snapshot_size);
if (ret < 0)
@@ -245,13 +249,12 @@ static int record__auxtrace_read_snapshot_all(struct record *rec)
int rc = 0;
for (i = 0; i < rec->evlist->nr_mmaps; i++) {
- struct auxtrace_mmap *mm =
- &rec->evlist->mmap[i].auxtrace_mmap;
+ struct perf_mmap *map = &rec->evlist->mmap[i];
- if (!mm->base)
+ if (!map->auxtrace_mmap.base)
continue;
- if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
+ if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
rc = -1;
goto out;
}
@@ -295,7 +298,7 @@ static int record__auxtrace_init(struct record *rec)
static inline
int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
- struct auxtrace_mmap *mm __maybe_unused)
+ struct perf_mmap *map __maybe_unused)
{
return 0;
}
@@ -529,17 +532,17 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
return 0;
for (i = 0; i < evlist->nr_mmaps; i++) {
- struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
+ struct perf_mmap *map = &maps[i];
- if (maps[i].base) {
- if (perf_mmap__push(&maps[i], rec, record__pushfn) != 0) {
+ if (map->base) {
+ if (perf_mmap__push(map, rec, record__pushfn) != 0) {
rc = -1;
goto out;
}
}
- if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
- record__auxtrace_mmap_read(rec, mm) != 0) {
+ if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
+ record__auxtrace_mmap_read(rec, map) != 0) {
rc = -1;
goto out;
}
@@ -550,7 +553,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
* at least one event.
*/
if (bytes_written != rec->bytes_written)
- rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
+ rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
if (overwrite)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
@@ -758,7 +761,7 @@ static int record__synthesize(struct record *rec, bool tail)
* We need to synthesize events first, because some
* features works on top of them (on report side).
*/
- err = perf_event__synthesize_attrs(tool, session,
+ err = perf_event__synthesize_attrs(tool, rec->evlist,
process_synthesized_event);
if (err < 0) {
pr_err("Couldn't synthesize attrs.\n");
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 76e12bcd1765..257c9c18cb7e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -201,14 +201,13 @@ static void setup_forced_leader(struct report *report,
perf_evlist__force_leader(evlist);
}
-static int process_feature_event(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session __maybe_unused)
+static int process_feature_event(struct perf_session *session,
+ union perf_event *event)
{
- struct report *rep = container_of(tool, struct report, tool);
+ struct report *rep = container_of(session->tool, struct report, tool);
if (event->feat.feat_id < HEADER_LAST_FEATURE)
- return perf_event__process_feature(tool, event, session);
+ return perf_event__process_feature(session, event);
if (event->feat.feat_id != HEADER_LAST_FEATURE) {
pr_err("failed: wrong feature ID: %" PRIu64 "\n",
@@ -981,6 +980,7 @@ int cmd_report(int argc, const char **argv)
.id_index = perf_event__process_id_index,
.auxtrace_info = perf_event__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
+ .event_update = perf_event__process_event_update,
.feature = process_feature_event,
.ordered_events = true,
.ordering_requires_timestamps = true,
@@ -1105,7 +1105,7 @@ int cmd_report(int argc, const char **argv)
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
"how to display percentage of filtered entries", parse_filter_percentage),
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
- "Instruction Tracing options",
+ "Instruction Tracing options\n" ITRACE_HELP,
itrace_parse_synth_opts),
OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
"Show full source file name path for source lines"),
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index ba481d73f910..4da5e32b9e03 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -406,9 +406,10 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
PERF_OUTPUT_WEIGHT))
return -EINVAL;
- if (PRINT_FIELD(SYM) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) {
+ if (PRINT_FIELD(SYM) &&
+ !(evsel->attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
pr_err("Display of symbols requested but neither sample IP nor "
- "sample address\nis selected. Hence, no addresses to convert "
+ "sample address\navailable. Hence, no addresses to convert "
"to symbols.\n");
return -EINVAL;
}
@@ -417,10 +418,9 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
"selected.\n");
return -EINVAL;
}
- if (PRINT_FIELD(DSO) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR) &&
- !PRINT_FIELD(BRSTACK) && !PRINT_FIELD(BRSTACKSYM) && !PRINT_FIELD(BRSTACKOFF)) {
- pr_err("Display of DSO requested but no address to convert. Select\n"
- "sample IP, sample address, brstack, brstacksym, or brstackoff.\n");
+ if (PRINT_FIELD(DSO) &&
+ !(evsel->attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
+ pr_err("Display of DSO requested but no address to convert.\n");
return -EINVAL;
}
if (PRINT_FIELD(SRCLINE) && !PRINT_FIELD(IP)) {
@@ -1115,6 +1115,7 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
const char *name = NULL;
static int spacing;
int len = 0;
+ int dlen = 0;
u64 ip = 0;
/*
@@ -1141,6 +1142,12 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
ip = sample->ip;
}
+ if (PRINT_FIELD(DSO) && !(PRINT_FIELD(IP) || PRINT_FIELD(ADDR))) {
+ dlen += fprintf(fp, "(");
+ dlen += map__fprintf_dsoname(al->map, fp);
+ dlen += fprintf(fp, ")\t");
+ }
+
if (name)
len = fprintf(fp, "%*s%s", (int)depth * 4, "", name);
else if (ip)
@@ -1159,7 +1166,7 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
if (len < spacing)
len += fprintf(fp, "%*s", spacing - len, "");
- return len;
+ return len + dlen;
}
static int perf_sample__fprintf_insn(struct perf_sample *sample,
@@ -1255,6 +1262,18 @@ static struct {
{0, NULL}
};
+static const char *sample_flags_to_name(u32 flags)
+{
+ int i;
+
+ for (i = 0; sample_flags[i].name ; i++) {
+ if (sample_flags[i].flags == flags)
+ return sample_flags[i].name;
+ }
+
+ return NULL;
+}
+
static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
{
const char *chars = PERF_IP_FLAG_CHARS;
@@ -1264,11 +1283,20 @@ static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
char str[33];
int i, pos = 0;
- for (i = 0; sample_flags[i].name ; i++) {
- if (sample_flags[i].flags == (flags & ~PERF_IP_FLAG_IN_TX)) {
- name = sample_flags[i].name;
- break;
- }
+ name = sample_flags_to_name(flags & ~PERF_IP_FLAG_IN_TX);
+ if (name)
+ return fprintf(fp, " %-15s%4s ", name, in_tx ? "(x)" : "");
+
+ if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
+ name = sample_flags_to_name(flags & ~(PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_TRACE_BEGIN));
+ if (name)
+ return fprintf(fp, " tr strt %-7s%4s ", name, in_tx ? "(x)" : "");
+ }
+
+ if (flags & PERF_IP_FLAG_TRACE_END) {
+ name = sample_flags_to_name(flags & ~(PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_TRACE_END));
+ if (name)
+ return fprintf(fp, " tr end %-7s%4s ", name, in_tx ? "(x)" : "");
}
for (i = 0; i < n; i++, flags >>= 1) {
@@ -1281,10 +1309,7 @@ static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
}
str[pos] = 0;
- if (name)
- return fprintf(fp, " %-7s%4s ", name, in_tx ? "(x)" : "");
-
- return fprintf(fp, " %-11s ", str);
+ return fprintf(fp, " %-19s ", str);
}
struct printer_data {
@@ -1544,7 +1569,8 @@ struct metric_ctx {
FILE *fp;
};
-static void script_print_metric(void *ctx, const char *color,
+static void script_print_metric(struct perf_stat_config *config __maybe_unused,
+ void *ctx, const char *color,
const char *fmt,
const char *unit, double val)
{
@@ -1562,7 +1588,8 @@ static void script_print_metric(void *ctx, const char *color,
fprintf(mctx->fp, " %s\n", unit);
}
-static void script_new_line(void *ctx)
+static void script_new_line(struct perf_stat_config *config __maybe_unused,
+ void *ctx)
{
struct metric_ctx *mctx = ctx;
@@ -1608,7 +1635,7 @@ static void perf_sample__fprint_metric(struct perf_script *script,
evsel_script(evsel)->val = val;
if (evsel_script(evsel->leader)->gnum == evsel->leader->nr_members) {
for_each_group_member (ev2, evsel->leader) {
- perf_stat__print_shadow_stats(ev2,
+ perf_stat__print_shadow_stats(&stat_config, ev2,
evsel_script(ev2)->val,
sample->cpu,
&ctx,
@@ -2489,6 +2516,8 @@ parse:
output[j].fields &= ~all_output_options[i].field;
else
output[j].fields |= all_output_options[i].field;
+ output[j].user_set = true;
+ output[j].wildcard_set = true;
}
}
} else {
@@ -2499,7 +2528,8 @@ parse:
rc = -EINVAL;
goto out;
}
- output[type].fields |= all_output_options[i].field;
+ output[type].user_set = true;
+ output[type].wildcard_set = true;
}
}
@@ -2963,9 +2993,8 @@ static void script__setup_sample_type(struct perf_script *script)
}
}
-static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_session *session)
+static int process_stat_round_event(struct perf_session *session,
+ union perf_event *event)
{
struct stat_round_event *round = &event->stat_round;
struct perf_evsel *counter;
@@ -2979,9 +3008,8 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int process_stat_config_event(struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_session *session __maybe_unused)
+static int process_stat_config_event(struct perf_session *session __maybe_unused,
+ union perf_event *event)
{
perf_event__read_stat_config(&stat_config, &event->stat_config);
return 0;
@@ -3007,10 +3035,10 @@ static int set_maps(struct perf_script *script)
}
static
-int process_thread_map_event(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session __maybe_unused)
+int process_thread_map_event(struct perf_session *session,
+ union perf_event *event)
{
+ struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
if (script->threads) {
@@ -3026,10 +3054,10 @@ int process_thread_map_event(struct perf_tool *tool,
}
static
-int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_session *session __maybe_unused)
+int process_cpu_map_event(struct perf_session *session,
+ union perf_event *event)
{
+ struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
if (script->cpus) {
@@ -3044,21 +3072,21 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
return set_maps(script);
}
-static int process_feature_event(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session)
+static int process_feature_event(struct perf_session *session,
+ union perf_event *event)
{
if (event->feat.feat_id < HEADER_LAST_FEATURE)
- return perf_event__process_feature(tool, event, session);
+ return perf_event__process_feature(session, event);
return 0;
}
#ifdef HAVE_AUXTRACE_SUPPORT
-static int perf_script__process_auxtrace_info(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session)
+static int perf_script__process_auxtrace_info(struct perf_session *session,
+ union perf_event *event)
{
- int ret = perf_event__process_auxtrace_info(tool, event, session);
+ struct perf_tool *tool = session->tool;
+
+ int ret = perf_event__process_auxtrace_info(session, event);
if (ret == 0) {
struct perf_script *script = container_of(tool, struct perf_script, tool);
@@ -3193,7 +3221,7 @@ int cmd_script(int argc, const char **argv)
OPT_BOOLEAN(0, "ns", &nanosecs,
"Use 9 decimal places when displaying time"),
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
- "Instruction Tracing options",
+ "Instruction Tracing options\n" ITRACE_HELP,
itrace_parse_synth_opts),
OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
"Show full source file name path for source lines"),
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index d097b5b47eb8..b86aba1c8028 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -88,8 +88,6 @@
#include "sane_ctype.h"
#define DEFAULT_SEPARATOR " "
-#define CNTR_NOT_SUPPORTED "<not supported>"
-#define CNTR_NOT_COUNTED "<not counted>"
#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
static void print_counters(struct timespec *ts, int argc, const char **argv);
@@ -137,54 +135,30 @@ static const char *smi_cost_attrs = {
static struct perf_evlist *evsel_list;
-static struct rblist metric_events;
-
static struct target target = {
.uid = UINT_MAX,
};
-typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
-
#define METRIC_ONLY_LEN 20
-static int run_count = 1;
-static bool no_inherit = false;
static volatile pid_t child_pid = -1;
-static bool null_run = false;
static int detailed_run = 0;
static bool transaction_run;
static bool topdown_run = false;
static bool smi_cost = false;
static bool smi_reset = false;
-static bool big_num = true;
static int big_num_opt = -1;
-static const char *csv_sep = NULL;
-static bool csv_output = false;
static bool group = false;
static const char *pre_cmd = NULL;
static const char *post_cmd = NULL;
static bool sync_run = false;
-static unsigned int initial_delay = 0;
-static unsigned int unit_width = 4; /* strlen("unit") */
static bool forever = false;
-static bool metric_only = false;
static bool force_metric_only = false;
-static bool no_merge = false;
-static bool walltime_run_table = false;
static struct timespec ref_time;
-static struct cpu_map *aggr_map;
-static aggr_get_id_t aggr_get_id;
static bool append_file;
static bool interval_count;
-static bool interval_clear;
static const char *output_name;
static int output_fd;
-static int print_free_counters_hint;
-static int print_mixed_hw_group_error;
-static u64 *walltime_run;
-static bool ru_display = false;
-static struct rusage ru_data;
-static unsigned int metric_only_len = METRIC_ONLY_LEN;
struct perf_stat {
bool record;
@@ -204,15 +178,15 @@ static struct perf_stat perf_stat;
static volatile int done = 0;
static struct perf_stat_config stat_config = {
- .aggr_mode = AGGR_GLOBAL,
- .scale = true,
+ .aggr_mode = AGGR_GLOBAL,
+ .scale = true,
+ .unit_width = 4, /* strlen("unit") */
+ .run_count = 1,
+ .metric_only_len = METRIC_ONLY_LEN,
+ .walltime_nsecs_stats = &walltime_nsecs_stats,
+ .big_num = true,
};
-static bool is_duration_time(struct perf_evsel *evsel)
-{
- return !strcmp(evsel->name, "duration_time");
-}
-
static inline void diff_timespec(struct timespec *r, struct timespec *a,
struct timespec *b)
{
@@ -236,66 +210,6 @@ static void perf_stat__reset_stats(void)
perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
}
-static int create_perf_stat_counter(struct perf_evsel *evsel)
-{
- struct perf_event_attr *attr = &evsel->attr;
- struct perf_evsel *leader = evsel->leader;
-
- if (stat_config.scale) {
- attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
- PERF_FORMAT_TOTAL_TIME_RUNNING;
- }
-
- /*
- * The event is part of non trivial group, let's enable
- * the group read (for leader) and ID retrieval for all
- * members.
- */
- if (leader->nr_members > 1)
- attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
-
- attr->inherit = !no_inherit;
-
- /*
- * Some events get initialized with sample_(period/type) set,
- * like tracepoints. Clear it up for counting.
- */
- attr->sample_period = 0;
-
- /*
- * But set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
- * while avoiding that older tools show confusing messages.
- *
- * However for pipe sessions we need to keep it zero,
- * because script's perf_evsel__check_attr is triggered
- * by attr->sample_type != 0, and we can't run it on
- * stat sessions.
- */
- if (!(STAT_RECORD && perf_stat.data.is_pipe))
- attr->sample_type = PERF_SAMPLE_IDENTIFIER;
-
- /*
- * Disabling all counters initially, they will be enabled
- * either manually by us or by kernel via enable_on_exec
- * set later.
- */
- if (perf_evsel__is_group_leader(evsel)) {
- attr->disabled = 1;
-
- /*
- * In case of initial_delay we enable tracee
- * events manually.
- */
- if (target__none(&target) && !initial_delay)
- attr->enable_on_exec = 1;
- }
-
- if (target__has_cpu(&target) && !target__has_per_thread(&target))
- return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
-
- return perf_evsel__open_per_thread(evsel, evsel_list->threads);
-}
-
static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -428,15 +342,15 @@ static void process_interval(void)
static void enable_counters(void)
{
- if (initial_delay)
- usleep(initial_delay * USEC_PER_MSEC);
+ if (stat_config.initial_delay)
+ usleep(stat_config.initial_delay * USEC_PER_MSEC);
/*
* We need to enable counters only if:
* - we don't have tracee (attaching to task or cpu)
* - we have initial delay configured
*/
- if (!target__none(&target) || initial_delay)
+ if (!target__none(&target) || stat_config.initial_delay)
perf_evlist__enable(evsel_list);
}
@@ -464,80 +378,6 @@ static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *inf
workload_exec_errno = info->si_value.sival_int;
}
-static int perf_stat_synthesize_config(bool is_pipe)
-{
- int err;
-
- if (is_pipe) {
- err = perf_event__synthesize_attrs(NULL, perf_stat.session,
- process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize attrs.\n");
- return err;
- }
- }
-
- err = perf_event__synthesize_extra_attr(NULL,
- evsel_list,
- process_synthesized_event,
- is_pipe);
-
- err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads,
- process_synthesized_event,
- NULL);
- if (err < 0) {
- pr_err("Couldn't synthesize thread map.\n");
- return err;
- }
-
- err = perf_event__synthesize_cpu_map(NULL, evsel_list->cpus,
- process_synthesized_event, NULL);
- if (err < 0) {
- pr_err("Couldn't synthesize thread map.\n");
- return err;
- }
-
- err = perf_event__synthesize_stat_config(NULL, &stat_config,
- process_synthesized_event, NULL);
- if (err < 0) {
- pr_err("Couldn't synthesize config.\n");
- return err;
- }
-
- return 0;
-}
-
-#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
-
-static int __store_counter_ids(struct perf_evsel *counter)
-{
- int cpu, thread;
-
- for (cpu = 0; cpu < xyarray__max_x(counter->fd); cpu++) {
- for (thread = 0; thread < xyarray__max_y(counter->fd);
- thread++) {
- int fd = FD(counter, cpu, thread);
-
- if (perf_evlist__id_add_fd(evsel_list, counter,
- cpu, thread, fd) < 0)
- return -1;
- }
- }
-
- return 0;
-}
-
-static int store_counter_ids(struct perf_evsel *counter)
-{
- struct cpu_map *cpus = counter->cpus;
- struct thread_map *threads = counter->threads;
-
- if (perf_evsel__alloc_id(counter, cpus->nr, threads->nr))
- return -ENOMEM;
-
- return __store_counter_ids(counter);
-}
-
static bool perf_evsel__should_store_id(struct perf_evsel *counter)
{
return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
@@ -609,7 +449,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
evlist__for_each_entry(evsel_list, counter) {
try_again:
- if (create_perf_stat_counter(counter) < 0) {
+ if (create_perf_stat_counter(counter, &stat_config, &target) < 0) {
/* Weak group failed. Reset the group. */
if ((errno == EINVAL || errno == EBADF) &&
@@ -664,11 +504,11 @@ try_again:
counter->supported = true;
l = strlen(counter->unit);
- if (l > unit_width)
- unit_width = l;
+ if (l > stat_config.unit_width)
+ stat_config.unit_width = l;
if (perf_evsel__should_store_id(counter) &&
- store_counter_ids(counter))
+ perf_evsel__store_ids(counter, evsel_list))
return -1;
}
@@ -699,7 +539,8 @@ try_again:
if (err < 0)
return err;
- err = perf_stat_synthesize_config(is_pipe);
+ err = perf_stat_synthesize_config(&stat_config, NULL, evsel_list,
+ process_synthesized_event, is_pipe);
if (err < 0)
return err;
}
@@ -724,7 +565,7 @@ try_again:
break;
}
}
- wait4(child_pid, &status, 0, &ru_data);
+ wait4(child_pid, &status, 0, &stat_config.ru_data);
if (workload_exec_errno) {
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
@@ -752,8 +593,8 @@ try_again:
t1 = rdclock();
- if (walltime_run_table)
- walltime_run[run_idx] = t1 - t0;
+ if (stat_config.walltime_run_table)
+ stat_config.walltime_run[run_idx] = t1 - t0;
update_stats(&walltime_nsecs_stats, t1 - t0);
@@ -795,1105 +636,14 @@ static int run_perf_stat(int argc, const char **argv, int run_idx)
return ret;
}
-static void print_running(u64 run, u64 ena)
-{
- if (csv_output) {
- fprintf(stat_config.output, "%s%" PRIu64 "%s%.2f",
- csv_sep,
- run,
- csv_sep,
- ena ? 100.0 * run / ena : 100.0);
- } else if (run != ena) {
- fprintf(stat_config.output, " (%.2f%%)", 100.0 * run / ena);
- }
-}
-
-static void print_noise_pct(double total, double avg)
-{
- double pct = rel_stddev_stats(total, avg);
-
- if (csv_output)
- fprintf(stat_config.output, "%s%.2f%%", csv_sep, pct);
- else if (pct)
- fprintf(stat_config.output, " ( +-%6.2f%% )", pct);
-}
-
-static void print_noise(struct perf_evsel *evsel, double avg)
-{
- struct perf_stat_evsel *ps;
-
- if (run_count == 1)
- return;
-
- ps = evsel->stats;
- print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
-}
-
-static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
-{
- switch (stat_config.aggr_mode) {
- case AGGR_CORE:
- fprintf(stat_config.output, "S%d-C%*d%s%*d%s",
- cpu_map__id_to_socket(id),
- csv_output ? 0 : -8,
- cpu_map__id_to_cpu(id),
- csv_sep,
- csv_output ? 0 : 4,
- nr,
- csv_sep);
- break;
- case AGGR_SOCKET:
- fprintf(stat_config.output, "S%*d%s%*d%s",
- csv_output ? 0 : -5,
- id,
- csv_sep,
- csv_output ? 0 : 4,
- nr,
- csv_sep);
- break;
- case AGGR_NONE:
- fprintf(stat_config.output, "CPU%*d%s",
- csv_output ? 0 : -4,
- perf_evsel__cpus(evsel)->map[id], csv_sep);
- break;
- case AGGR_THREAD:
- fprintf(stat_config.output, "%*s-%*d%s",
- csv_output ? 0 : 16,
- thread_map__comm(evsel->threads, id),
- csv_output ? 0 : -8,
- thread_map__pid(evsel->threads, id),
- csv_sep);
- break;
- case AGGR_GLOBAL:
- case AGGR_UNSET:
- default:
- break;
- }
-}
-
-struct outstate {
- FILE *fh;
- bool newline;
- const char *prefix;
- int nfields;
- int id, nr;
- struct perf_evsel *evsel;
-};
-
-#define METRIC_LEN 35
-
-static void new_line_std(void *ctx)
-{
- struct outstate *os = ctx;
-
- os->newline = true;
-}
-
-static void do_new_line_std(struct outstate *os)
-{
- fputc('\n', os->fh);
- fputs(os->prefix, os->fh);
- aggr_printout(os->evsel, os->id, os->nr);
- if (stat_config.aggr_mode == AGGR_NONE)
- fprintf(os->fh, " ");
- fprintf(os->fh, " ");
-}
-
-static void print_metric_std(void *ctx, const char *color, const char *fmt,
- const char *unit, double val)
-{
- struct outstate *os = ctx;
- FILE *out = os->fh;
- int n;
- bool newline = os->newline;
-
- os->newline = false;
-
- if (unit == NULL || fmt == NULL) {
- fprintf(out, "%-*s", METRIC_LEN, "");
- return;
- }
-
- if (newline)
- do_new_line_std(os);
-
- n = fprintf(out, " # ");
- if (color)
- n += color_fprintf(out, color, fmt, val);
- else
- n += fprintf(out, fmt, val);
- fprintf(out, " %-*s", METRIC_LEN - n - 1, unit);
-}
-
-static void new_line_csv(void *ctx)
-{
- struct outstate *os = ctx;
- int i;
-
- fputc('\n', os->fh);
- if (os->prefix)
- fprintf(os->fh, "%s%s", os->prefix, csv_sep);
- aggr_printout(os->evsel, os->id, os->nr);
- for (i = 0; i < os->nfields; i++)
- fputs(csv_sep, os->fh);
-}
-
-static void print_metric_csv(void *ctx,
- const char *color __maybe_unused,
- const char *fmt, const char *unit, double val)
-{
- struct outstate *os = ctx;
- FILE *out = os->fh;
- char buf[64], *vals, *ends;
-
- if (unit == NULL || fmt == NULL) {
- fprintf(out, "%s%s", csv_sep, csv_sep);
- return;
- }
- snprintf(buf, sizeof(buf), fmt, val);
- ends = vals = ltrim(buf);
- while (isdigit(*ends) || *ends == '.')
- ends++;
- *ends = 0;
- while (isspace(*unit))
- unit++;
- fprintf(out, "%s%s%s%s", csv_sep, vals, csv_sep, unit);
-}
-
-/* Filter out some columns that don't work well in metrics only mode */
-
-static bool valid_only_metric(const char *unit)
-{
- if (!unit)
- return false;
- if (strstr(unit, "/sec") ||
- strstr(unit, "hz") ||
- strstr(unit, "Hz") ||
- strstr(unit, "CPUs utilized"))
- return false;
- return true;
-}
-
-static const char *fixunit(char *buf, struct perf_evsel *evsel,
- const char *unit)
-{
- if (!strncmp(unit, "of all", 6)) {
- snprintf(buf, 1024, "%s %s", perf_evsel__name(evsel),
- unit);
- return buf;
- }
- return unit;
-}
-
-static void print_metric_only(void *ctx, const char *color, const char *fmt,
- const char *unit, double val)
-{
- struct outstate *os = ctx;
- FILE *out = os->fh;
- char buf[1024], str[1024];
- unsigned mlen = metric_only_len;
-
- if (!valid_only_metric(unit))
- return;
- unit = fixunit(buf, os->evsel, unit);
- if (mlen < strlen(unit))
- mlen = strlen(unit) + 1;
-
- if (color)
- mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
-
- color_snprintf(str, sizeof(str), color ?: "", fmt, val);
- fprintf(out, "%*s ", mlen, str);
-}
-
-static void print_metric_only_csv(void *ctx, const char *color __maybe_unused,
- const char *fmt,
- const char *unit, double val)
-{
- struct outstate *os = ctx;
- FILE *out = os->fh;
- char buf[64], *vals, *ends;
- char tbuf[1024];
-
- if (!valid_only_metric(unit))
- return;
- unit = fixunit(tbuf, os->evsel, unit);
- snprintf(buf, sizeof buf, fmt, val);
- ends = vals = ltrim(buf);
- while (isdigit(*ends) || *ends == '.')
- ends++;
- *ends = 0;
- fprintf(out, "%s%s", vals, csv_sep);
-}
-
-static void new_line_metric(void *ctx __maybe_unused)
-{
-}
-
-static void print_metric_header(void *ctx, const char *color __maybe_unused,
- const char *fmt __maybe_unused,
- const char *unit, double val __maybe_unused)
-{
- struct outstate *os = ctx;
- char tbuf[1024];
-
- if (!valid_only_metric(unit))
- return;
- unit = fixunit(tbuf, os->evsel, unit);
- if (csv_output)
- fprintf(os->fh, "%s%s", unit, csv_sep);
- else
- fprintf(os->fh, "%*s ", metric_only_len, unit);
-}
-
-static int first_shadow_cpu(struct perf_evsel *evsel, int id)
-{
- int i;
-
- if (!aggr_get_id)
- return 0;
-
- if (stat_config.aggr_mode == AGGR_NONE)
- return id;
-
- if (stat_config.aggr_mode == AGGR_GLOBAL)
- return 0;
-
- for (i = 0; i < perf_evsel__nr_cpus(evsel); i++) {
- int cpu2 = perf_evsel__cpus(evsel)->map[i];
-
- if (aggr_get_id(evsel_list->cpus, cpu2) == id)
- return cpu2;
- }
- return 0;
-}
-
-static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
-{
- FILE *output = stat_config.output;
- double sc = evsel->scale;
- const char *fmt;
-
- if (csv_output) {
- fmt = floor(sc) != sc ? "%.2f%s" : "%.0f%s";
- } else {
- if (big_num)
- fmt = floor(sc) != sc ? "%'18.2f%s" : "%'18.0f%s";
- else
- fmt = floor(sc) != sc ? "%18.2f%s" : "%18.0f%s";
- }
-
- aggr_printout(evsel, id, nr);
-
- fprintf(output, fmt, avg, csv_sep);
-
- if (evsel->unit)
- fprintf(output, "%-*s%s",
- csv_output ? 0 : unit_width,
- evsel->unit, csv_sep);
-
- fprintf(output, "%-*s", csv_output ? 0 : 25, perf_evsel__name(evsel));
-
- if (evsel->cgrp)
- fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
-}
-
-static bool is_mixed_hw_group(struct perf_evsel *counter)
-{
- struct perf_evlist *evlist = counter->evlist;
- u32 pmu_type = counter->attr.type;
- struct perf_evsel *pos;
-
- if (counter->nr_members < 2)
- return false;
-
- evlist__for_each_entry(evlist, pos) {
- /* software events can be part of any hardware group */
- if (pos->attr.type == PERF_TYPE_SOFTWARE)
- continue;
- if (pmu_type == PERF_TYPE_SOFTWARE) {
- pmu_type = pos->attr.type;
- continue;
- }
- if (pmu_type != pos->attr.type)
- return true;
- }
-
- return false;
-}
-
-static void printout(int id, int nr, struct perf_evsel *counter, double uval,
- char *prefix, u64 run, u64 ena, double noise,
- struct runtime_stat *st)
-{
- struct perf_stat_output_ctx out;
- struct outstate os = {
- .fh = stat_config.output,
- .prefix = prefix ? prefix : "",
- .id = id,
- .nr = nr,
- .evsel = counter,
- };
- print_metric_t pm = print_metric_std;
- void (*nl)(void *);
-
- if (metric_only) {
- nl = new_line_metric;
- if (csv_output)
- pm = print_metric_only_csv;
- else
- pm = print_metric_only;
- } else
- nl = new_line_std;
-
- if (csv_output && !metric_only) {
- static int aggr_fields[] = {
- [AGGR_GLOBAL] = 0,
- [AGGR_THREAD] = 1,
- [AGGR_NONE] = 1,
- [AGGR_SOCKET] = 2,
- [AGGR_CORE] = 2,
- };
-
- pm = print_metric_csv;
- nl = new_line_csv;
- os.nfields = 3;
- os.nfields += aggr_fields[stat_config.aggr_mode];
- if (counter->cgrp)
- os.nfields++;
- }
- if (run == 0 || ena == 0 || counter->counts->scaled == -1) {
- if (metric_only) {
- pm(&os, NULL, "", "", 0);
- return;
- }
- aggr_printout(counter, id, nr);
-
- fprintf(stat_config.output, "%*s%s",
- csv_output ? 0 : 18,
- counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
- csv_sep);
-
- if (counter->supported) {
- print_free_counters_hint = 1;
- if (is_mixed_hw_group(counter))
- print_mixed_hw_group_error = 1;
- }
-
- fprintf(stat_config.output, "%-*s%s",
- csv_output ? 0 : unit_width,
- counter->unit, csv_sep);
-
- fprintf(stat_config.output, "%*s",
- csv_output ? 0 : -25,
- perf_evsel__name(counter));
-
- if (counter->cgrp)
- fprintf(stat_config.output, "%s%s",
- csv_sep, counter->cgrp->name);
-
- if (!csv_output)
- pm(&os, NULL, NULL, "", 0);
- print_noise(counter, noise);
- print_running(run, ena);
- if (csv_output)
- pm(&os, NULL, NULL, "", 0);
- return;
- }
-
- if (!metric_only)
- abs_printout(id, nr, counter, uval);
-
- out.print_metric = pm;
- out.new_line = nl;
- out.ctx = &os;
- out.force_header = false;
-
- if (csv_output && !metric_only) {
- print_noise(counter, noise);
- print_running(run, ena);
- }
-
- perf_stat__print_shadow_stats(counter, uval,
- first_shadow_cpu(counter, id),
- &out, &metric_events, st);
- if (!csv_output && !metric_only) {
- print_noise(counter, noise);
- print_running(run, ena);
- }
-}
-
-static void aggr_update_shadow(void)
-{
- int cpu, s2, id, s;
- u64 val;
- struct perf_evsel *counter;
-
- for (s = 0; s < aggr_map->nr; s++) {
- id = aggr_map->map[s];
- evlist__for_each_entry(evsel_list, counter) {
- val = 0;
- for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
- s2 = aggr_get_id(evsel_list->cpus, cpu);
- if (s2 != id)
- continue;
- val += perf_counts(counter->counts, cpu, 0)->val;
- }
- perf_stat__update_shadow_stats(counter, val,
- first_shadow_cpu(counter, id),
- &rt_stat);
- }
- }
-}
-
-static void uniquify_event_name(struct perf_evsel *counter)
-{
- char *new_name;
- char *config;
-
- if (counter->uniquified_name ||
- !counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
- strlen(counter->pmu_name)))
- return;
-
- config = strchr(counter->name, '/');
- if (config) {
- if (asprintf(&new_name,
- "%s%s", counter->pmu_name, config) > 0) {
- free(counter->name);
- counter->name = new_name;
- }
- } else {
- if (asprintf(&new_name,
- "%s [%s]", counter->name, counter->pmu_name) > 0) {
- free(counter->name);
- counter->name = new_name;
- }
- }
-
- counter->uniquified_name = true;
-}
-
-static void collect_all_aliases(struct perf_evsel *counter,
- void (*cb)(struct perf_evsel *counter, void *data,
- bool first),
- void *data)
-{
- struct perf_evsel *alias;
-
- alias = list_prepare_entry(counter, &(evsel_list->entries), node);
- list_for_each_entry_continue (alias, &evsel_list->entries, node) {
- if (strcmp(perf_evsel__name(alias), perf_evsel__name(counter)) ||
- alias->scale != counter->scale ||
- alias->cgrp != counter->cgrp ||
- strcmp(alias->unit, counter->unit) ||
- perf_evsel__is_clock(alias) != perf_evsel__is_clock(counter))
- break;
- alias->merged_stat = true;
- cb(alias, data, false);
- }
-}
-
-static bool collect_data(struct perf_evsel *counter,
- void (*cb)(struct perf_evsel *counter, void *data,
- bool first),
- void *data)
-{
- if (counter->merged_stat)
- return false;
- cb(counter, data, true);
- if (no_merge)
- uniquify_event_name(counter);
- else if (counter->auto_merge_stats)
- collect_all_aliases(counter, cb, data);
- return true;
-}
-
-struct aggr_data {
- u64 ena, run, val;
- int id;
- int nr;
- int cpu;
-};
-
-static void aggr_cb(struct perf_evsel *counter, void *data, bool first)
-{
- struct aggr_data *ad = data;
- int cpu, s2;
-
- for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
- struct perf_counts_values *counts;
-
- s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
- if (s2 != ad->id)
- continue;
- if (first)
- ad->nr++;
- counts = perf_counts(counter->counts, cpu, 0);
- /*
- * When any result is bad, make them all to give
- * consistent output in interval mode.
- */
- if (counts->ena == 0 || counts->run == 0 ||
- counter->counts->scaled == -1) {
- ad->ena = 0;
- ad->run = 0;
- break;
- }
- ad->val += counts->val;
- ad->ena += counts->ena;
- ad->run += counts->run;
- }
-}
-
-static void print_aggr(char *prefix)
-{
- FILE *output = stat_config.output;
- struct perf_evsel *counter;
- int s, id, nr;
- double uval;
- u64 ena, run, val;
- bool first;
-
- if (!(aggr_map || aggr_get_id))
- return;
-
- aggr_update_shadow();
-
- /*
- * With metric_only everything is on a single line.
- * Without each counter has its own line.
- */
- for (s = 0; s < aggr_map->nr; s++) {
- struct aggr_data ad;
- if (prefix && metric_only)
- fprintf(output, "%s", prefix);
-
- ad.id = id = aggr_map->map[s];
- first = true;
- evlist__for_each_entry(evsel_list, counter) {
- if (is_duration_time(counter))
- continue;
-
- ad.val = ad.ena = ad.run = 0;
- ad.nr = 0;
- if (!collect_data(counter, aggr_cb, &ad))
- continue;
- nr = ad.nr;
- ena = ad.ena;
- run = ad.run;
- val = ad.val;
- if (first && metric_only) {
- first = false;
- aggr_printout(counter, id, nr);
- }
- if (prefix && !metric_only)
- fprintf(output, "%s", prefix);
-
- uval = val * counter->scale;
- printout(id, nr, counter, uval, prefix, run, ena, 1.0,
- &rt_stat);
- if (!metric_only)
- fputc('\n', output);
- }
- if (metric_only)
- fputc('\n', output);
- }
-}
-
-static int cmp_val(const void *a, const void *b)
-{
- return ((struct perf_aggr_thread_value *)b)->val -
- ((struct perf_aggr_thread_value *)a)->val;
-}
-
-static struct perf_aggr_thread_value *sort_aggr_thread(
- struct perf_evsel *counter,
- int nthreads, int ncpus,
- int *ret)
-{
- int cpu, thread, i = 0;
- double uval;
- struct perf_aggr_thread_value *buf;
-
- buf = calloc(nthreads, sizeof(struct perf_aggr_thread_value));
- if (!buf)
- return NULL;
-
- for (thread = 0; thread < nthreads; thread++) {
- u64 ena = 0, run = 0, val = 0;
-
- for (cpu = 0; cpu < ncpus; cpu++) {
- val += perf_counts(counter->counts, cpu, thread)->val;
- ena += perf_counts(counter->counts, cpu, thread)->ena;
- run += perf_counts(counter->counts, cpu, thread)->run;
- }
-
- uval = val * counter->scale;
-
- /*
- * Skip value 0 when enabling --per-thread globally,
- * otherwise too many 0 output.
- */
- if (uval == 0.0 && target__has_per_thread(&target))
- continue;
-
- buf[i].counter = counter;
- buf[i].id = thread;
- buf[i].uval = uval;
- buf[i].val = val;
- buf[i].run = run;
- buf[i].ena = ena;
- i++;
- }
-
- qsort(buf, i, sizeof(struct perf_aggr_thread_value), cmp_val);
-
- if (ret)
- *ret = i;
-
- return buf;
-}
-
-static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
-{
- FILE *output = stat_config.output;
- int nthreads = thread_map__nr(counter->threads);
- int ncpus = cpu_map__nr(counter->cpus);
- int thread, sorted_threads, id;
- struct perf_aggr_thread_value *buf;
-
- buf = sort_aggr_thread(counter, nthreads, ncpus, &sorted_threads);
- if (!buf) {
- perror("cannot sort aggr thread");
- return;
- }
-
- for (thread = 0; thread < sorted_threads; thread++) {
- if (prefix)
- fprintf(output, "%s", prefix);
-
- id = buf[thread].id;
- if (stat_config.stats)
- printout(id, 0, buf[thread].counter, buf[thread].uval,
- prefix, buf[thread].run, buf[thread].ena, 1.0,
- &stat_config.stats[id]);
- else
- printout(id, 0, buf[thread].counter, buf[thread].uval,
- prefix, buf[thread].run, buf[thread].ena, 1.0,
- &rt_stat);
- fputc('\n', output);
- }
-
- free(buf);
-}
-
-struct caggr_data {
- double avg, avg_enabled, avg_running;
-};
-
-static void counter_aggr_cb(struct perf_evsel *counter, void *data,
- bool first __maybe_unused)
-{
- struct caggr_data *cd = data;
- struct perf_stat_evsel *ps = counter->stats;
-
- cd->avg += avg_stats(&ps->res_stats[0]);
- cd->avg_enabled += avg_stats(&ps->res_stats[1]);
- cd->avg_running += avg_stats(&ps->res_stats[2]);
-}
-
-/*
- * Print out the results of a single counter:
- * aggregated counts in system-wide mode
- */
-static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
-{
- FILE *output = stat_config.output;
- double uval;
- struct caggr_data cd = { .avg = 0.0 };
-
- if (!collect_data(counter, counter_aggr_cb, &cd))
- return;
-
- if (prefix && !metric_only)
- fprintf(output, "%s", prefix);
-
- uval = cd.avg * counter->scale;
- printout(-1, 0, counter, uval, prefix, cd.avg_running, cd.avg_enabled,
- cd.avg, &rt_stat);
- if (!metric_only)
- fprintf(output, "\n");
-}
-
-static void counter_cb(struct perf_evsel *counter, void *data,
- bool first __maybe_unused)
-{
- struct aggr_data *ad = data;
-
- ad->val += perf_counts(counter->counts, ad->cpu, 0)->val;
- ad->ena += perf_counts(counter->counts, ad->cpu, 0)->ena;
- ad->run += perf_counts(counter->counts, ad->cpu, 0)->run;
-}
-
-/*
- * Print out the results of a single counter:
- * does not use aggregated count in system-wide
- */
-static void print_counter(struct perf_evsel *counter, char *prefix)
-{
- FILE *output = stat_config.output;
- u64 ena, run, val;
- double uval;
- int cpu;
-
- for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
- struct aggr_data ad = { .cpu = cpu };
-
- if (!collect_data(counter, counter_cb, &ad))
- return;
- val = ad.val;
- ena = ad.ena;
- run = ad.run;
-
- if (prefix)
- fprintf(output, "%s", prefix);
-
- uval = val * counter->scale;
- printout(cpu, 0, counter, uval, prefix, run, ena, 1.0,
- &rt_stat);
-
- fputc('\n', output);
- }
-}
-
-static void print_no_aggr_metric(char *prefix)
-{
- int cpu;
- int nrcpus = 0;
- struct perf_evsel *counter;
- u64 ena, run, val;
- double uval;
-
- nrcpus = evsel_list->cpus->nr;
- for (cpu = 0; cpu < nrcpus; cpu++) {
- bool first = true;
-
- if (prefix)
- fputs(prefix, stat_config.output);
- evlist__for_each_entry(evsel_list, counter) {
- if (is_duration_time(counter))
- continue;
- if (first) {
- aggr_printout(counter, cpu, 0);
- first = false;
- }
- val = perf_counts(counter->counts, cpu, 0)->val;
- ena = perf_counts(counter->counts, cpu, 0)->ena;
- run = perf_counts(counter->counts, cpu, 0)->run;
-
- uval = val * counter->scale;
- printout(cpu, 0, counter, uval, prefix, run, ena, 1.0,
- &rt_stat);
- }
- fputc('\n', stat_config.output);
- }
-}
-
-static int aggr_header_lens[] = {
- [AGGR_CORE] = 18,
- [AGGR_SOCKET] = 12,
- [AGGR_NONE] = 6,
- [AGGR_THREAD] = 24,
- [AGGR_GLOBAL] = 0,
-};
-
-static const char *aggr_header_csv[] = {
- [AGGR_CORE] = "core,cpus,",
- [AGGR_SOCKET] = "socket,cpus",
- [AGGR_NONE] = "cpu,",
- [AGGR_THREAD] = "comm-pid,",
- [AGGR_GLOBAL] = ""
-};
-
-static void print_metric_headers(const char *prefix, bool no_indent)
-{
- struct perf_stat_output_ctx out;
- struct perf_evsel *counter;
- struct outstate os = {
- .fh = stat_config.output
- };
-
- if (prefix)
- fprintf(stat_config.output, "%s", prefix);
-
- if (!csv_output && !no_indent)
- fprintf(stat_config.output, "%*s",
- aggr_header_lens[stat_config.aggr_mode], "");
- if (csv_output) {
- if (stat_config.interval)
- fputs("time,", stat_config.output);
- fputs(aggr_header_csv[stat_config.aggr_mode],
- stat_config.output);
- }
-
- /* Print metrics headers only */
- evlist__for_each_entry(evsel_list, counter) {
- if (is_duration_time(counter))
- continue;
- os.evsel = counter;
- out.ctx = &os;
- out.print_metric = print_metric_header;
- out.new_line = new_line_metric;
- out.force_header = true;
- os.evsel = counter;
- perf_stat__print_shadow_stats(counter, 0,
- 0,
- &out,
- &metric_events,
- &rt_stat);
- }
- fputc('\n', stat_config.output);
-}
-
-static void print_interval(char *prefix, struct timespec *ts)
-{
- FILE *output = stat_config.output;
- static int num_print_interval;
-
- if (interval_clear)
- puts(CONSOLE_CLEAR);
-
- sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
-
- if ((num_print_interval == 0 && !csv_output) || interval_clear) {
- switch (stat_config.aggr_mode) {
- case AGGR_SOCKET:
- fprintf(output, "# time socket cpus");
- if (!metric_only)
- fprintf(output, " counts %*s events\n", unit_width, "unit");
- break;
- case AGGR_CORE:
- fprintf(output, "# time core cpus");
- if (!metric_only)
- fprintf(output, " counts %*s events\n", unit_width, "unit");
- break;
- case AGGR_NONE:
- fprintf(output, "# time CPU ");
- if (!metric_only)
- fprintf(output, " counts %*s events\n", unit_width, "unit");
- break;
- case AGGR_THREAD:
- fprintf(output, "# time comm-pid");
- if (!metric_only)
- fprintf(output, " counts %*s events\n", unit_width, "unit");
- break;
- case AGGR_GLOBAL:
- default:
- fprintf(output, "# time");
- if (!metric_only)
- fprintf(output, " counts %*s events\n", unit_width, "unit");
- case AGGR_UNSET:
- break;
- }
- }
-
- if ((num_print_interval == 0 || interval_clear) && metric_only)
- print_metric_headers(" ", true);
- if (++num_print_interval == 25)
- num_print_interval = 0;
-}
-
-static void print_header(int argc, const char **argv)
-{
- FILE *output = stat_config.output;
- int i;
-
- fflush(stdout);
-
- if (!csv_output) {
- fprintf(output, "\n");
- fprintf(output, " Performance counter stats for ");
- if (target.system_wide)
- fprintf(output, "\'system wide");
- else if (target.cpu_list)
- fprintf(output, "\'CPU(s) %s", target.cpu_list);
- else if (!target__has_task(&target)) {
- fprintf(output, "\'%s", argv ? argv[0] : "pipe");
- for (i = 1; argv && (i < argc); i++)
- fprintf(output, " %s", argv[i]);
- } else if (target.pid)
- fprintf(output, "process id \'%s", target.pid);
- else
- fprintf(output, "thread id \'%s", target.tid);
-
- fprintf(output, "\'");
- if (run_count > 1)
- fprintf(output, " (%d runs)", run_count);
- fprintf(output, ":\n\n");
- }
-}
-
-static int get_precision(double num)
-{
- if (num > 1)
- return 0;
-
- return lround(ceil(-log10(num)));
-}
-
-static void print_table(FILE *output, int precision, double avg)
-{
- char tmp[64];
- int idx, indent = 0;
-
- scnprintf(tmp, 64, " %17.*f", precision, avg);
- while (tmp[indent] == ' ')
- indent++;
-
- fprintf(output, "%*s# Table of individual measurements:\n", indent, "");
-
- for (idx = 0; idx < run_count; idx++) {
- double run = (double) walltime_run[idx] / NSEC_PER_SEC;
- int h, n = 1 + abs((int) (100.0 * (run - avg)/run) / 5);
-
- fprintf(output, " %17.*f (%+.*f) ",
- precision, run, precision, run - avg);
-
- for (h = 0; h < n; h++)
- fprintf(output, "#");
-
- fprintf(output, "\n");
- }
-
- fprintf(output, "\n%*s# Final result:\n", indent, "");
-}
-
-static double timeval2double(struct timeval *t)
-{
- return t->tv_sec + (double) t->tv_usec/USEC_PER_SEC;
-}
-
-static void print_footer(void)
-{
- double avg = avg_stats(&walltime_nsecs_stats) / NSEC_PER_SEC;
- FILE *output = stat_config.output;
- int n;
-
- if (!null_run)
- fprintf(output, "\n");
-
- if (run_count == 1) {
- fprintf(output, " %17.9f seconds time elapsed", avg);
-
- if (ru_display) {
- double ru_utime = timeval2double(&ru_data.ru_utime);
- double ru_stime = timeval2double(&ru_data.ru_stime);
-
- fprintf(output, "\n\n");
- fprintf(output, " %17.9f seconds user\n", ru_utime);
- fprintf(output, " %17.9f seconds sys\n", ru_stime);
- }
- } else {
- double sd = stddev_stats(&walltime_nsecs_stats) / NSEC_PER_SEC;
- /*
- * Display at most 2 more significant
- * digits than the stddev inaccuracy.
- */
- int precision = get_precision(sd) + 2;
-
- if (walltime_run_table)
- print_table(output, precision, avg);
-
- fprintf(output, " %17.*f +- %.*f seconds time elapsed",
- precision, avg, precision, sd);
-
- print_noise_pct(sd, avg);
- }
- fprintf(output, "\n\n");
-
- if (print_free_counters_hint &&
- sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 &&
- n > 0)
- fprintf(output,
-"Some events weren't counted. Try disabling the NMI watchdog:\n"
-" echo 0 > /proc/sys/kernel/nmi_watchdog\n"
-" perf stat ...\n"
-" echo 1 > /proc/sys/kernel/nmi_watchdog\n");
-
- if (print_mixed_hw_group_error)
- fprintf(output,
- "The events in group usually have to be from "
- "the same PMU. Try reorganizing the group.\n");
-}
-
static void print_counters(struct timespec *ts, int argc, const char **argv)
{
- int interval = stat_config.interval;
- struct perf_evsel *counter;
- char buf[64], *prefix = NULL;
-
/* Do not print anything if we record to the pipe. */
if (STAT_RECORD && perf_stat.data.is_pipe)
return;
- if (interval)
- print_interval(prefix = buf, ts);
- else
- print_header(argc, argv);
-
- if (metric_only) {
- static int num_print_iv;
-
- if (num_print_iv == 0 && !interval)
- print_metric_headers(prefix, false);
- if (num_print_iv++ == 25)
- num_print_iv = 0;
- if (stat_config.aggr_mode == AGGR_GLOBAL && prefix)
- fprintf(stat_config.output, "%s", prefix);
- }
-
- switch (stat_config.aggr_mode) {
- case AGGR_CORE:
- case AGGR_SOCKET:
- print_aggr(prefix);
- break;
- case AGGR_THREAD:
- evlist__for_each_entry(evsel_list, counter) {
- if (is_duration_time(counter))
- continue;
- print_aggr_thread(counter, prefix);
- }
- break;
- case AGGR_GLOBAL:
- evlist__for_each_entry(evsel_list, counter) {
- if (is_duration_time(counter))
- continue;
- print_counter_aggr(counter, prefix);
- }
- if (metric_only)
- fputc('\n', stat_config.output);
- break;
- case AGGR_NONE:
- if (metric_only)
- print_no_aggr_metric(prefix);
- else {
- evlist__for_each_entry(evsel_list, counter) {
- if (is_duration_time(counter))
- continue;
- print_counter(counter, prefix);
- }
- }
- break;
- case AGGR_UNSET:
- default:
- break;
- }
-
- if (!interval && !csv_output)
- print_footer();
-
- fflush(stat_config.output);
+ perf_evlist__print_counters(evsel_list, &stat_config, &target,
+ ts, argc, argv);
}
static volatile int signr = -1;
@@ -1950,7 +700,7 @@ static int enable_metric_only(const struct option *opt __maybe_unused,
const char *s __maybe_unused, int unset)
{
force_metric_only = true;
- metric_only = !unset;
+ stat_config.metric_only = !unset;
return 0;
}
@@ -1958,7 +708,7 @@ static int parse_metric_groups(const struct option *opt,
const char *str,
int unset __maybe_unused)
{
- return metricgroup__parse_groups(opt, str, &metric_events);
+ return metricgroup__parse_groups(opt, str, &stat_config.metric_events);
}
static const struct option stat_options[] = {
@@ -1969,7 +719,7 @@ static const struct option stat_options[] = {
parse_events_option),
OPT_CALLBACK(0, "filter", &evsel_list, "filter",
"event filter", parse_filter),
- OPT_BOOLEAN('i', "no-inherit", &no_inherit,
+ OPT_BOOLEAN('i', "no-inherit", &stat_config.no_inherit,
"child tasks do not inherit counters"),
OPT_STRING('p', "pid", &target.pid, "pid",
"stat events on existing process id"),
@@ -1982,11 +732,11 @@ static const struct option stat_options[] = {
OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
- OPT_INTEGER('r', "repeat", &run_count,
+ OPT_INTEGER('r', "repeat", &stat_config.run_count,
"repeat command and print average + stddev (max: 100, forever: 0)"),
- OPT_BOOLEAN(0, "table", &walltime_run_table,
+ OPT_BOOLEAN(0, "table", &stat_config.walltime_run_table,
"display details about each run (only with -r option)"),
- OPT_BOOLEAN('n', "null", &null_run,
+ OPT_BOOLEAN('n', "null", &stat_config.null_run,
"null run - dont start any counters"),
OPT_INCR('d', "detailed", &detailed_run,
"detailed run - start a lot of events"),
@@ -1999,8 +749,8 @@ static const struct option stat_options[] = {
"list of cpus to monitor in system-wide"),
OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
"disable CPU count aggregation", AGGR_NONE),
- OPT_BOOLEAN(0, "no-merge", &no_merge, "Do not merge identical named events"),
- OPT_STRING('x', "field-separator", &csv_sep, "separator",
+ OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
+ OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
"print counts with custom separator"),
OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
"monitor event in cgroup name only", parse_cgroups),
@@ -2017,7 +767,7 @@ static const struct option stat_options[] = {
"(overhead is possible for values <= 100ms)"),
OPT_INTEGER(0, "interval-count", &stat_config.times,
"print counts for fixed number of times"),
- OPT_BOOLEAN(0, "interval-clear", &interval_clear,
+ OPT_BOOLEAN(0, "interval-clear", &stat_config.interval_clear,
"clear screen in between new interval"),
OPT_UINTEGER(0, "timeout", &stat_config.timeout,
"stop workload and print counts after a timeout period in ms (>= 10ms)"),
@@ -2027,9 +777,9 @@ static const struct option stat_options[] = {
"aggregate counts per physical processor core", AGGR_CORE),
OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
"aggregate counts per thread", AGGR_THREAD),
- OPT_UINTEGER('D', "delay", &initial_delay,
+ OPT_UINTEGER('D', "delay", &stat_config.initial_delay,
"ms to wait before starting measurement after program start"),
- OPT_CALLBACK_NOOPT(0, "metric-only", &metric_only, NULL,
+ OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
"Only print computed metrics. No raw values", enable_metric_only),
OPT_BOOLEAN(0, "topdown", &topdown_run,
"measure topdown level 1 statistics"),
@@ -2041,12 +791,14 @@ static const struct option stat_options[] = {
OPT_END()
};
-static int perf_stat__get_socket(struct cpu_map *map, int cpu)
+static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
+ struct cpu_map *map, int cpu)
{
return cpu_map__get_socket(map, cpu, NULL);
}
-static int perf_stat__get_core(struct cpu_map *map, int cpu)
+static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
+ struct cpu_map *map, int cpu)
{
return cpu_map__get_core(map, cpu, NULL);
}
@@ -2063,9 +815,8 @@ static int cpu_map__get_max(struct cpu_map *map)
return max;
}
-static struct cpu_map *cpus_aggr_map;
-
-static int perf_stat__get_aggr(aggr_get_id_t get_id, struct cpu_map *map, int idx)
+static int perf_stat__get_aggr(struct perf_stat_config *config,
+ aggr_get_id_t get_id, struct cpu_map *map, int idx)
{
int cpu;
@@ -2074,20 +825,22 @@ static int perf_stat__get_aggr(aggr_get_id_t get_id, struct cpu_map *map, int id
cpu = map->map[idx];
- if (cpus_aggr_map->map[cpu] == -1)
- cpus_aggr_map->map[cpu] = get_id(map, idx);
+ if (config->cpus_aggr_map->map[cpu] == -1)
+ config->cpus_aggr_map->map[cpu] = get_id(config, map, idx);
- return cpus_aggr_map->map[cpu];
+ return config->cpus_aggr_map->map[cpu];
}
-static int perf_stat__get_socket_cached(struct cpu_map *map, int idx)
+static int perf_stat__get_socket_cached(struct perf_stat_config *config,
+ struct cpu_map *map, int idx)
{
- return perf_stat__get_aggr(perf_stat__get_socket, map, idx);
+ return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx);
}
-static int perf_stat__get_core_cached(struct cpu_map *map, int idx)
+static int perf_stat__get_core_cached(struct perf_stat_config *config,
+ struct cpu_map *map, int idx)
{
- return perf_stat__get_aggr(perf_stat__get_core, map, idx);
+ return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
}
static int perf_stat_init_aggr_mode(void)
@@ -2096,18 +849,18 @@ static int perf_stat_init_aggr_mode(void)
switch (stat_config.aggr_mode) {
case AGGR_SOCKET:
- if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) {
+ if (cpu_map__build_socket_map(evsel_list->cpus, &stat_config.aggr_map)) {
perror("cannot build socket map");
return -1;
}
- aggr_get_id = perf_stat__get_socket_cached;
+ stat_config.aggr_get_id = perf_stat__get_socket_cached;
break;
case AGGR_CORE:
- if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) {
+ if (cpu_map__build_core_map(evsel_list->cpus, &stat_config.aggr_map)) {
perror("cannot build core map");
return -1;
}
- aggr_get_id = perf_stat__get_core_cached;
+ stat_config.aggr_get_id = perf_stat__get_core_cached;
break;
case AGGR_NONE:
case AGGR_GLOBAL:
@@ -2123,16 +876,16 @@ static int perf_stat_init_aggr_mode(void)
* the aggregation translate cpumap.
*/
nr = cpu_map__get_max(evsel_list->cpus);
- cpus_aggr_map = cpu_map__empty_new(nr + 1);
- return cpus_aggr_map ? 0 : -ENOMEM;
+ stat_config.cpus_aggr_map = cpu_map__empty_new(nr + 1);
+ return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
}
static void perf_stat__exit_aggr_mode(void)
{
- cpu_map__put(aggr_map);
- cpu_map__put(cpus_aggr_map);
- aggr_map = NULL;
- cpus_aggr_map = NULL;
+ cpu_map__put(stat_config.aggr_map);
+ cpu_map__put(stat_config.cpus_aggr_map);
+ stat_config.aggr_map = NULL;
+ stat_config.cpus_aggr_map = NULL;
}
static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, int idx)
@@ -2190,12 +943,14 @@ static int perf_env__build_core_map(struct perf_env *env, struct cpu_map *cpus,
return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
}
-static int perf_stat__get_socket_file(struct cpu_map *map, int idx)
+static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
+ struct cpu_map *map, int idx)
{
return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
}
-static int perf_stat__get_core_file(struct cpu_map *map, int idx)
+static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
+ struct cpu_map *map, int idx)
{
return perf_env__get_core(map, idx, &perf_stat.session->header.env);
}
@@ -2206,18 +961,18 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
switch (stat_config.aggr_mode) {
case AGGR_SOCKET:
- if (perf_env__build_socket_map(env, evsel_list->cpus, &aggr_map)) {
+ if (perf_env__build_socket_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
perror("cannot build socket map");
return -1;
}
- aggr_get_id = perf_stat__get_socket_file;
+ stat_config.aggr_get_id = perf_stat__get_socket_file;
break;
case AGGR_CORE:
- if (perf_env__build_core_map(env, evsel_list->cpus, &aggr_map)) {
+ if (perf_env__build_core_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
perror("cannot build core map");
return -1;
}
- aggr_get_id = perf_stat__get_core_file;
+ stat_config.aggr_get_id = perf_stat__get_core_file;
break;
case AGGR_NONE:
case AGGR_GLOBAL:
@@ -2401,7 +1156,7 @@ static int add_default_attributes(void)
struct parse_events_error errinfo;
/* Set attrs if no event is selected and !null_run: */
- if (null_run)
+ if (stat_config.null_run)
return 0;
if (transaction_run) {
@@ -2414,7 +1169,7 @@ static int add_default_attributes(void)
struct option opt = { .value = &evsel_list };
return metricgroup__parse_groups(&opt, "transaction",
- &metric_events);
+ &stat_config.metric_events);
}
if (pmu_have_event("cpu", "cycles-ct") &&
@@ -2452,7 +1207,7 @@ static int add_default_attributes(void)
if (pmu_have_event("msr", "aperf") &&
pmu_have_event("msr", "smi")) {
if (!force_metric_only)
- metric_only = true;
+ stat_config.metric_only = true;
err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
} else {
fprintf(stderr, "To measure SMI cost, it needs "
@@ -2483,7 +1238,7 @@ static int add_default_attributes(void)
}
if (!force_metric_only)
- metric_only = true;
+ stat_config.metric_only = true;
if (topdown_filter_events(topdown_attrs, &str,
arch_topdown_check_group(&warn)) < 0) {
pr_err("Out of memory\n");
@@ -2580,7 +1335,7 @@ static int __cmd_record(int argc, const char **argv)
if (output_name)
data->file.path = output_name;
- if (run_count != 1 || forever) {
+ if (stat_config.run_count != 1 || forever) {
pr_err("Cannot use -r option with perf stat record.\n");
return -1;
}
@@ -2599,9 +1354,8 @@ static int __cmd_record(int argc, const char **argv)
return argc;
}
-static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_session *session)
+static int process_stat_round_event(struct perf_session *session,
+ union perf_event *event)
{
struct stat_round_event *stat_round = &event->stat_round;
struct perf_evsel *counter;
@@ -2626,10 +1380,10 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
}
static
-int process_stat_config_event(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session __maybe_unused)
+int process_stat_config_event(struct perf_session *session,
+ union perf_event *event)
{
+ struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
perf_event__read_stat_config(&stat_config, &event->stat_config);
@@ -2669,10 +1423,10 @@ static int set_maps(struct perf_stat *st)
}
static
-int process_thread_map_event(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session __maybe_unused)
+int process_thread_map_event(struct perf_session *session,
+ union perf_event *event)
{
+ struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
if (st->threads) {
@@ -2688,10 +1442,10 @@ int process_thread_map_event(struct perf_tool *tool,
}
static
-int process_cpu_map_event(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session __maybe_unused)
+int process_cpu_map_event(struct perf_session *session,
+ union perf_event *event)
{
+ struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
struct cpu_map *cpus;
@@ -2853,12 +1607,12 @@ int cmd_stat(int argc, const char **argv)
perf_stat__collect_metric_expr(evsel_list);
perf_stat__init_shadow_stats();
- if (csv_sep) {
- csv_output = true;
- if (!strcmp(csv_sep, "\\t"))
- csv_sep = "\t";
+ if (stat_config.csv_sep) {
+ stat_config.csv_output = true;
+ if (!strcmp(stat_config.csv_sep, "\\t"))
+ stat_config.csv_sep = "\t";
} else
- csv_sep = DEFAULT_SEPARATOR;
+ stat_config.csv_sep = DEFAULT_SEPARATOR;
if (argc && !strncmp(argv[0], "rec", 3)) {
argc = __cmd_record(argc, argv);
@@ -2883,17 +1637,17 @@ int cmd_stat(int argc, const char **argv)
goto out;
}
- if (metric_only && stat_config.aggr_mode == AGGR_THREAD) {
+ if (stat_config.metric_only && stat_config.aggr_mode == AGGR_THREAD) {
fprintf(stderr, "--metric-only is not supported with --per-thread\n");
goto out;
}
- if (metric_only && run_count > 1) {
+ if (stat_config.metric_only && stat_config.run_count > 1) {
fprintf(stderr, "--metric-only is not supported with -r\n");
goto out;
}
- if (walltime_run_table && run_count <= 1) {
+ if (stat_config.walltime_run_table && stat_config.run_count <= 1) {
fprintf(stderr, "--table is only supported with -r\n");
parse_options_usage(stat_usage, stat_options, "r", 1);
parse_options_usage(NULL, stat_options, "table", 0);
@@ -2931,7 +1685,7 @@ int cmd_stat(int argc, const char **argv)
/*
* let the spreadsheet do the pretty-printing
*/
- if (csv_output) {
+ if (stat_config.csv_output) {
/* User explicitly passed -B? */
if (big_num_opt == 1) {
fprintf(stderr, "-B option not supported with -x\n");
@@ -2939,9 +1693,9 @@ int cmd_stat(int argc, const char **argv)
parse_options_usage(NULL, stat_options, "x", 1);
goto out;
} else /* Nope, so disable big number formatting */
- big_num = false;
+ stat_config.big_num = false;
} else if (big_num_opt == 0) /* User passed --no-big-num */
- big_num = false;
+ stat_config.big_num = false;
setup_system_wide(argc);
@@ -2949,21 +1703,21 @@ int cmd_stat(int argc, const char **argv)
* Display user/system times only for single
* run and when there's specified tracee.
*/
- if ((run_count == 1) && target__none(&target))
- ru_display = true;
+ if ((stat_config.run_count == 1) && target__none(&target))
+ stat_config.ru_display = true;
- if (run_count < 0) {
+ if (stat_config.run_count < 0) {
pr_err("Run count must be a positive number\n");
parse_options_usage(stat_usage, stat_options, "r", 1);
goto out;
- } else if (run_count == 0) {
+ } else if (stat_config.run_count == 0) {
forever = true;
- run_count = 1;
+ stat_config.run_count = 1;
}
- if (walltime_run_table) {
- walltime_run = zalloc(run_count * sizeof(walltime_run[0]));
- if (!walltime_run) {
+ if (stat_config.walltime_run_table) {
+ stat_config.walltime_run = zalloc(stat_config.run_count * sizeof(stat_config.walltime_run[0]));
+ if (!stat_config.walltime_run) {
pr_err("failed to setup -r option");
goto out;
}
@@ -3066,6 +1820,17 @@ int cmd_stat(int argc, const char **argv)
goto out;
/*
+ * Set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
+ * while avoiding that older tools show confusing messages.
+ *
+ * However for pipe sessions we need to keep it zero,
+ * because script's perf_evsel__check_attr is triggered
+ * by attr->sample_type != 0, and we can't run it on
+ * stat sessions.
+ */
+ stat_config.identifier = !(STAT_RECORD && perf_stat.data.is_pipe);
+
+ /*
* We dont want to block the signals - that would cause
* child tasks to inherit that and Ctrl-C would not work.
* What we want is for Ctrl-C to work in the exec()-ed
@@ -3079,8 +1844,8 @@ int cmd_stat(int argc, const char **argv)
signal(SIGABRT, skip_signal);
status = 0;
- for (run_idx = 0; forever || run_idx < run_count; run_idx++) {
- if (run_count != 1 && verbose > 0)
+ for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
+ if (stat_config.run_count != 1 && verbose > 0)
fprintf(output, "[ perf stat: executing run #%d ... ]\n",
run_idx + 1);
@@ -3132,7 +1897,7 @@ int cmd_stat(int argc, const char **argv)
perf_stat__exit_aggr_mode();
perf_evlist__free_stats(evsel_list);
out:
- free(walltime_run);
+ free(stat_config.walltime_run);
if (smi_cost && smi_reset)
sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 22ab8e67c760..90289f31dd87 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -181,7 +181,7 @@ static int __tp_field__init_uint(struct tp_field *field, int size, int offset, b
return 0;
}
-static int tp_field__init_uint(struct tp_field *field, struct format_field *format_field, bool needs_swap)
+static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
{
return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
}
@@ -198,7 +198,7 @@ static int __tp_field__init_ptr(struct tp_field *field, int offset)
return 0;
}
-static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
+static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
{
return __tp_field__init_ptr(field, format_field->offset);
}
@@ -214,7 +214,7 @@ static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel,
struct tp_field *field,
const char *name)
{
- struct format_field *format_field = perf_evsel__field(evsel, name);
+ struct tep_format_field *format_field = perf_evsel__field(evsel, name);
if (format_field == NULL)
return -1;
@@ -230,7 +230,7 @@ static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
struct tp_field *field,
const char *name)
{
- struct format_field *format_field = perf_evsel__field(evsel, name);
+ struct tep_format_field *format_field = perf_evsel__field(evsel, name);
if (format_field == NULL)
return -1;
@@ -288,6 +288,13 @@ static int perf_evsel__init_augmented_syscall_tp_args(struct perf_evsel *evsel)
return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
}
+static int perf_evsel__init_augmented_syscall_tp_ret(struct perf_evsel *evsel)
+{
+ struct syscall_tp *sc = evsel->priv;
+
+ return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
+}
+
static int perf_evsel__init_raw_syscall_tp(struct perf_evsel *evsel, void *handler)
{
evsel->priv = malloc(sizeof(struct syscall_tp));
@@ -498,16 +505,6 @@ static const char *clockid[] = {
};
static DEFINE_STRARRAY(clockid);
-static const char *socket_families[] = {
- "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
- "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
- "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
- "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
- "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
- "ALG", "NFC", "VSOCK",
-};
-static DEFINE_STRARRAY(socket_families);
-
static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
struct syscall_arg *arg)
{
@@ -631,6 +628,8 @@ static struct syscall_fmt {
} syscall_fmts[] = {
{ .name = "access",
.arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
+ { .name = "bind",
+ .arg = { [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ }, }, },
{ .name = "bpf",
.arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
{ .name = "brk", .hexret = true,
@@ -645,6 +644,8 @@ static struct syscall_fmt {
[4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, },
{ .name = "close",
.arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
+ { .name = "connect",
+ .arg = { [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ }, }, },
{ .name = "epoll_ctl",
.arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
{ .name = "eventfd2",
@@ -801,7 +802,8 @@ static struct syscall_fmt {
{ .name = "sendmsg",
.arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
{ .name = "sendto",
- .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
+ .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
+ [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
{ .name = "set_tid_address", .errpid = true, },
{ .name = "setitimer",
.arg = { [0] = STRARRAY(which, itimers), }, },
@@ -830,6 +832,7 @@ static struct syscall_fmt {
.arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
{ .name = "tkill",
.arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
+ { .name = "umount2", .alias = "umount", },
{ .name = "uname", .alias = "newuname", },
{ .name = "unlinkat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
@@ -856,13 +859,15 @@ static struct syscall_fmt *syscall_fmt__find(const char *name)
/*
* is_exit: is this "exit" or "exit_group"?
* is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
+ * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
*/
struct syscall {
- struct event_format *tp_format;
+ struct tep_event_format *tp_format;
int nr_args;
+ int args_size;
bool is_exit;
bool is_open;
- struct format_field *args;
+ struct tep_format_field *args;
const char *name;
struct syscall_fmt *fmt;
struct syscall_arg_fmt *arg_fmt;
@@ -1095,11 +1100,21 @@ static void thread__set_filename_pos(struct thread *thread, const char *bf,
ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
}
+static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
+{
+ struct augmented_arg *augmented_arg = arg->augmented.args;
+
+ return scnprintf(bf, size, "%.*s", augmented_arg->size, augmented_arg->value);
+}
+
static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
struct syscall_arg *arg)
{
unsigned long ptr = arg->val;
+ if (arg->augmented.args)
+ return syscall_arg__scnprintf_augmented_string(arg, bf, size);
+
if (!arg->trace->vfs_getname)
return scnprintf(bf, size, "%#x", ptr);
@@ -1142,11 +1157,9 @@ static void sig_handler(int sig)
interrupted = sig == SIGINT;
}
-static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
- u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
+static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
{
- size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
- printed += fprintf_duration(duration, duration_calculated, fp);
+ size_t printed = 0;
if (trace->multiple_threads) {
if (trace->show_comm)
@@ -1157,6 +1170,14 @@ static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thre
return printed;
}
+static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
+ u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
+{
+ size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
+ printed += fprintf_duration(duration, duration_calculated, fp);
+ return printed + trace__fprintf_comm_tid(trace, thread, fp);
+}
+
static int trace__process_event(struct trace *trace, struct machine *machine,
union perf_event *event, struct perf_sample *sample)
{
@@ -1258,10 +1279,12 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
static int syscall__set_arg_fmts(struct syscall *sc)
{
- struct format_field *field;
+ struct tep_format_field *field, *last_field = NULL;
int idx = 0, len;
for (field = sc->args; field; field = field->next, ++idx) {
+ last_field = field;
+
if (sc->fmt && sc->fmt->arg[idx].scnprintf)
continue;
@@ -1270,7 +1293,7 @@ static int syscall__set_arg_fmts(struct syscall *sc)
strcmp(field->name, "path") == 0 ||
strcmp(field->name, "pathname") == 0))
sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
- else if (field->flags & FIELD_IS_POINTER)
+ else if (field->flags & TEP_FIELD_IS_POINTER)
sc->arg_fmt[idx].scnprintf = syscall_arg__scnprintf_hex;
else if (strcmp(field->type, "pid_t") == 0)
sc->arg_fmt[idx].scnprintf = SCA_PID;
@@ -1292,6 +1315,9 @@ static int syscall__set_arg_fmts(struct syscall *sc)
}
}
+ if (last_field)
+ sc->args_size = last_field->offset + last_field->size;
+
return 0;
}
@@ -1472,14 +1498,18 @@ static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size,
}
static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
- unsigned char *args, struct trace *trace,
- struct thread *thread)
+ unsigned char *args, void *augmented_args, int augmented_args_size,
+ struct trace *trace, struct thread *thread)
{
size_t printed = 0;
unsigned long val;
u8 bit = 1;
struct syscall_arg arg = {
.args = args,
+ .augmented = {
+ .size = augmented_args_size,
+ .args = augmented_args,
+ },
.idx = 0,
.mask = 0,
.trace = trace,
@@ -1495,7 +1525,7 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
ttrace->ret_scnprintf = NULL;
if (sc->args != NULL) {
- struct format_field *field;
+ struct tep_format_field *field;
for (field = sc->args; field;
field = field->next, ++arg.idx, bit <<= 1) {
@@ -1654,6 +1684,17 @@ static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel,
return printed;
}
+static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size)
+{
+ void *augmented_args = NULL;
+
+ *augmented_args_size = sample->raw_size - sc->args_size;
+ if (*augmented_args_size > 0)
+ augmented_args = sample->raw_data + sc->args_size;
+
+ return augmented_args;
+}
+
static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -1663,6 +1704,8 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
size_t printed = 0;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
+ int augmented_args_size = 0;
+ void *augmented_args = NULL;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
@@ -1686,13 +1729,24 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
trace__printf_interrupted_entry(trace);
-
+ /*
+ * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
+ * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
+ * this breaks syscall__augmented_args() check for augmented args, as we calculate
+ * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
+ * so when handling, say the openat syscall, we end up getting 6 args for the
+ * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
+ * thinking that the extra 2 u64 args are the augmented filename, so just check
+ * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
+ */
+ if (evsel != trace->syscalls.events.sys_enter)
+ augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size);
ttrace->entry_time = sample->time;
msg = ttrace->entry_str;
printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
- args, trace, thread);
+ args, augmented_args, augmented_args_size, trace, thread);
if (sc->is_exit) {
if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
@@ -1723,7 +1777,8 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evse
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
char msg[1024];
- void *args;
+ void *args, *augmented_args = NULL;
+ int augmented_args_size;
if (sc == NULL)
return -1;
@@ -1738,7 +1793,8 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evse
goto out_put;
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
- syscall__scnprintf_args(sc, msg, sizeof(msg), args, trace, thread);
+ augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size);
+ syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
fprintf(trace->output, "%s", msg);
err = 0;
out_put:
@@ -2022,6 +2078,7 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
{
+ struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
int callchain_ret = 0;
if (sample->callchain) {
@@ -2039,13 +2096,31 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
if (trace->trace_syscalls)
fprintf(trace->output, "( ): ");
+ if (thread)
+ trace__fprintf_comm_tid(trace, thread, trace->output);
+
+ if (evsel == trace->syscalls.events.augmented) {
+ int id = perf_evsel__sc_tp_uint(evsel, id, sample);
+ struct syscall *sc = trace__syscall_info(trace, evsel, id);
+
+ if (sc) {
+ fprintf(trace->output, "%s(", sc->name);
+ trace__fprintf_sys_enter(trace, evsel, sample);
+ fputc(')', trace->output);
+ goto newline;
+ }
+
+ /*
+ * XXX: Not having the associated syscall info or not finding/adding
+ * the thread should never happen, but if it does...
+ * fall thru and print it as a bpf_output event.
+ */
+ }
+
fprintf(trace->output, "%s:", evsel->name);
if (perf_evsel__is_bpf_output(evsel)) {
- if (evsel == trace->syscalls.events.augmented)
- trace__fprintf_sys_enter(trace, evsel, sample);
- else
- bpf_output__fprintf(trace, sample);
+ bpf_output__fprintf(trace, sample);
} else if (evsel->tp_format) {
if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
trace__fprintf_sys_enter(trace, evsel, sample)) {
@@ -2055,12 +2130,14 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
}
}
+newline:
fprintf(trace->output, "\n");
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
+ thread__put(thread);
out:
return 0;
}
@@ -3276,12 +3353,8 @@ int cmd_trace(int argc, const char **argv)
goto out;
}
- if (evsel) {
- if (perf_evsel__init_augmented_syscall_tp(evsel) ||
- perf_evsel__init_augmented_syscall_tp_args(evsel))
- goto out;
+ if (evsel)
trace.syscalls.events.augmented = evsel;
- }
err = bpf__setup_stdout(trace.evlist);
if (err) {
@@ -3326,6 +3399,34 @@ int cmd_trace(int argc, const char **argv)
}
}
+ /*
+ * If we are augmenting syscalls, then combine what we put in the
+ * __augmented_syscalls__ BPF map with what is in the
+ * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
+ * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
+ *
+ * We'll switch to look at two BPF maps, one for sys_enter and the
+ * other for sys_exit when we start augmenting the sys_exit paths with
+ * buffers that are being copied from kernel to userspace, think 'read'
+ * syscall.
+ */
+ if (trace.syscalls.events.augmented) {
+ evsel = trace.syscalls.events.augmented;
+
+ if (perf_evsel__init_augmented_syscall_tp(evsel) ||
+ perf_evsel__init_augmented_syscall_tp_args(evsel))
+ goto out;
+ evsel->handler = trace__sys_enter;
+
+ evlist__for_each_entry(trace.evlist, evsel) {
+ if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
+ perf_evsel__init_augmented_syscall_tp(evsel);
+ perf_evsel__init_augmented_syscall_tp_ret(evsel);
+ evsel->handler = trace__sys_exit;
+ }
+ }
+ }
+
if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
return trace__record(&trace, argc-1, &argv[1]);
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 466540ee8ea7..c72cc73a6b09 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -14,6 +14,7 @@ include/uapi/linux/sched.h
include/uapi/linux/stat.h
include/uapi/linux/vhost.h
include/uapi/sound/asound.h
+include/linux/bits.h
include/linux/hash.h
include/uapi/linux/hw_breakpoint.h
arch/x86/include/asm/disabled-features.h
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index 2d0caf20ff3a..bc6c585f74fc 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -30,3 +30,4 @@ perf-test mainporcelain common
perf-timechart mainporcelain common
perf-top mainporcelain common
perf-trace mainporcelain audit
+perf-version mainporcelain common
diff --git a/tools/perf/examples/bpf/augmented_syscalls.c b/tools/perf/examples/bpf/augmented_syscalls.c
index 69a31386d8cd..2ae44813ef2d 100644
--- a/tools/perf/examples/bpf/augmented_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_syscalls.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Augment the openat syscall with the contents of the filename pointer argument.
+ * Augment syscalls with the contents of the pointer arguments.
*
* Test it with:
*
@@ -10,15 +10,14 @@
* the last one should be the one for '/etc/passwd'.
*
* This matches what is marshalled into the raw_syscall:sys_enter payload
- * expected by the 'perf trace' beautifiers, and can be used by them unmodified,
- * which will be done as that feature is implemented in the next csets, for now
- * it will appear in a dump done by the default tracepoint handler in 'perf trace',
- * that uses bpf_output__fprintf() to just dump those contents, as done with
- * the bpf-output event associated with the __bpf_output__ map declared in
- * tools/perf/include/bpf/stdio.h.
+ * expected by the 'perf trace' beautifiers, and can be used by them, that will
+ * check if perf_sample->raw_data is more than what is expected for each
+ * syscalls:sys_{enter,exit}_SYSCALL tracepoint, uing the extra data as the
+ * contents of pointer arguments.
*/
#include <stdio.h>
+#include <linux/socket.h>
struct bpf_map SEC("maps") __augmented_syscalls__ = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
@@ -27,6 +26,44 @@ struct bpf_map SEC("maps") __augmented_syscalls__ = {
.max_entries = __NR_CPUS__,
};
+struct syscall_exit_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ long ret;
+};
+
+struct augmented_filename {
+ unsigned int size;
+ int reserved;
+ char value[256];
+};
+
+#define augmented_filename_syscall(syscall) \
+struct augmented_enter_##syscall##_args { \
+ struct syscall_enter_##syscall##_args args; \
+ struct augmented_filename filename; \
+}; \
+int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
+{ \
+ struct augmented_enter_##syscall##_args augmented_args = { .filename.reserved = 0, }; \
+ unsigned int len = sizeof(augmented_args); \
+ probe_read(&augmented_args.args, sizeof(augmented_args.args), args); \
+ augmented_args.filename.size = probe_read_str(&augmented_args.filename.value, \
+ sizeof(augmented_args.filename.value), \
+ args->filename_ptr); \
+ if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) { \
+ len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size; \
+ len &= sizeof(augmented_args.filename.value) - 1; \
+ } \
+ perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, len); \
+ return 0; \
+} \
+int syscall_exit(syscall)(struct syscall_exit_args *args) \
+{ \
+ return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */ \
+}
+
struct syscall_enter_openat_args {
unsigned long long common_tp_fields;
long syscall_nr;
@@ -36,20 +73,101 @@ struct syscall_enter_openat_args {
long mode;
};
-struct augmented_enter_openat_args {
- struct syscall_enter_openat_args args;
- char filename[64];
+augmented_filename_syscall(openat);
+
+struct syscall_enter_open_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ char *filename_ptr;
+ long flags;
+ long mode;
+};
+
+augmented_filename_syscall(open);
+
+struct syscall_enter_inotify_add_watch_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ long fd;
+ char *filename_ptr;
+ long mask;
+};
+
+augmented_filename_syscall(inotify_add_watch);
+
+struct statbuf;
+
+struct syscall_enter_newstat_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ char *filename_ptr;
+ struct stat *statbuf;
};
-int syscall_enter(openat)(struct syscall_enter_openat_args *args)
-{
- struct augmented_enter_openat_args augmented_args;
+augmented_filename_syscall(newstat);
+
+#ifndef _K_SS_MAXSIZE
+#define _K_SS_MAXSIZE 128
+#endif
- probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
- probe_read_str(&augmented_args.filename, sizeof(augmented_args.filename), args->filename_ptr);
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU,
- &augmented_args, sizeof(augmented_args));
- return 1;
+#define augmented_sockaddr_syscall(syscall) \
+struct augmented_enter_##syscall##_args { \
+ struct syscall_enter_##syscall##_args args; \
+ struct sockaddr_storage addr; \
+}; \
+int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
+{ \
+ struct augmented_enter_##syscall##_args augmented_args; \
+ unsigned long addrlen = sizeof(augmented_args.addr); \
+ probe_read(&augmented_args.args, sizeof(augmented_args.args), args); \
+/* FIXME_CLANG_OPTIMIZATION_THAT_ACCESSES_USER_CONTROLLED_ADDRLEN_DESPITE_THIS_CHECK */ \
+/* if (addrlen > augmented_args.args.addrlen) */ \
+/* addrlen = augmented_args.args.addrlen; */ \
+/* */ \
+ probe_read(&augmented_args.addr, addrlen, args->addr_ptr); \
+ perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, \
+ sizeof(augmented_args) - sizeof(augmented_args.addr) + addrlen); \
+ return 0; \
+} \
+int syscall_exit(syscall)(struct syscall_exit_args *args) \
+{ \
+ return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */ \
}
+struct sockaddr;
+
+struct syscall_enter_bind_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ long fd;
+ struct sockaddr *addr_ptr;
+ unsigned long addrlen;
+};
+
+augmented_sockaddr_syscall(bind);
+
+struct syscall_enter_connect_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ long fd;
+ struct sockaddr *addr_ptr;
+ unsigned long addrlen;
+};
+
+augmented_sockaddr_syscall(connect);
+
+struct syscall_enter_sendto_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ long fd;
+ void *buff;
+ long len;
+ unsigned long flags;
+ struct sockaddr *addr_ptr;
+ long addr_len;
+};
+
+augmented_sockaddr_syscall(sendto);
+
license(GPL);
diff --git a/tools/perf/examples/bpf/etcsnoop.c b/tools/perf/examples/bpf/etcsnoop.c
new file mode 100644
index 000000000000..b59e8812ee8c
--- /dev/null
+++ b/tools/perf/examples/bpf/etcsnoop.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Augment the filename syscalls with the contents of the filename pointer argument
+ * filtering only those that do not start with /etc/.
+ *
+ * Test it with:
+ *
+ * perf trace -e tools/perf/examples/bpf/augmented_syscalls.c cat /etc/passwd > /dev/null
+ *
+ * It'll catch some openat syscalls related to the dynamic linked and
+ * the last one should be the one for '/etc/passwd'.
+ *
+ * This matches what is marshalled into the raw_syscall:sys_enter payload
+ * expected by the 'perf trace' beautifiers, and can be used by them unmodified,
+ * which will be done as that feature is implemented in the next csets, for now
+ * it will appear in a dump done by the default tracepoint handler in 'perf trace',
+ * that uses bpf_output__fprintf() to just dump those contents, as done with
+ * the bpf-output event associated with the __bpf_output__ map declared in
+ * tools/perf/include/bpf/stdio.h.
+ */
+
+#include <stdio.h>
+
+struct bpf_map SEC("maps") __augmented_syscalls__ = {
+ .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(u32),
+ .max_entries = __NR_CPUS__,
+};
+
+struct augmented_filename {
+ int size;
+ int reserved;
+ char value[64];
+};
+
+#define augmented_filename_syscall_enter(syscall) \
+struct augmented_enter_##syscall##_args { \
+ struct syscall_enter_##syscall##_args args; \
+ struct augmented_filename filename; \
+}; \
+int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
+{ \
+ char etc[6] = "/etc/"; \
+ struct augmented_enter_##syscall##_args augmented_args = { .filename.reserved = 0, }; \
+ probe_read(&augmented_args.args, sizeof(augmented_args.args), args); \
+ augmented_args.filename.size = probe_read_str(&augmented_args.filename.value, \
+ sizeof(augmented_args.filename.value), \
+ args->filename_ptr); \
+ if (__builtin_memcmp(augmented_args.filename.value, etc, 4) != 0) \
+ return 0; \
+ perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, \
+ (sizeof(augmented_args) - sizeof(augmented_args.filename.value) + \
+ augmented_args.filename.size)); \
+ return 0; \
+}
+
+struct syscall_enter_openat_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ long dfd;
+ char *filename_ptr;
+ long flags;
+ long mode;
+};
+
+augmented_filename_syscall_enter(openat);
+
+struct syscall_enter_open_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ char *filename_ptr;
+ long flags;
+ long mode;
+};
+
+augmented_filename_syscall_enter(open);
+
+license(GPL);
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
index 47897d65e799..52b6d87fe822 100644
--- a/tools/perf/include/bpf/bpf.h
+++ b/tools/perf/include/bpf/bpf.h
@@ -26,6 +26,9 @@ struct bpf_map {
#define syscall_enter(name) \
SEC("syscalls:sys_enter_" #name) syscall_enter_ ## name
+#define syscall_exit(name) \
+ SEC("syscalls:sys_exit_" #name) syscall_exit_ ## name
+
#define license(name) \
char _license[] SEC("license") = #name; \
int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/include/bpf/linux/socket.h b/tools/perf/include/bpf/linux/socket.h
new file mode 100644
index 000000000000..7f844568dab8
--- /dev/null
+++ b/tools/perf/include/bpf/linux/socket.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_SOCKET_H
+#define _UAPI_LINUX_SOCKET_H
+
+/*
+ * Desired design of maximum size and alignment (see RFC2553)
+ */
+#define _K_SS_MAXSIZE 128 /* Implementation specific max size */
+#define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *))
+ /* Implementation specific desired alignment */
+
+typedef unsigned short __kernel_sa_family_t;
+
+struct __kernel_sockaddr_storage {
+ __kernel_sa_family_t ss_family; /* address family */
+ /* Following field(s) are implementation specific */
+ char __data[_K_SS_MAXSIZE - sizeof(unsigned short)];
+ /* space to achieve desired size, */
+ /* _SS_MAXSIZE value minus size of ss_family */
+} __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */
+
+#define sockaddr_storage __kernel_sockaddr_storage
+
+#endif /* _UAPI_LINUX_SOCKET_H */
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json
new file mode 100644
index 000000000000..abc98b018446
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC",
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC",
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC",
+ },
+ {
+ "PublicDescription": "Mispredicted or not predicted branch speculatively executed",
+ "EventCode": "0x10",
+ "EventName": "BR_MIS_PRED",
+ "BriefDescription": "Branch mispredicted"
+ },
+ {
+ "PublicDescription": "Predictable branch speculatively executed",
+ "EventCode": "0x12",
+ "EventName": "BR_PRED",
+ "BriefDescription": "Predictable branch"
+ },
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json
new file mode 100644
index 000000000000..687b2629e1d1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json
@@ -0,0 +1,26 @@
+[
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD",
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR",
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_SHARED",
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_NOT_SHARED",
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_NORMAL",
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_PERIPH",
+ },
+ {
+ "PublicDescription": "Bus access",
+ "EventCode": "0x19",
+ "EventName": "BUS_ACCESS",
+ "BriefDescription": "Bus access"
+ },
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
new file mode 100644
index 000000000000..df9201434cb6
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
@@ -0,0 +1,191 @@
+[
+ {
+ "ArchStdEvent": "L1D_CACHE_RD",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL",
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD",
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL",
+ },
+ {
+ "PublicDescription": "Level 1 instruction cache refill",
+ "EventCode": "0x01",
+ "EventName": "L1I_CACHE_REFILL",
+ "BriefDescription": "L1I cache refill"
+ },
+ {
+ "PublicDescription": "Level 1 instruction TLB refill",
+ "EventCode": "0x02",
+ "EventName": "L1I_TLB_REFILL",
+ "BriefDescription": "L1I TLB refill"
+ },
+ {
+ "PublicDescription": "Level 1 data cache refill",
+ "EventCode": "0x03",
+ "EventName": "L1D_CACHE_REFILL",
+ "BriefDescription": "L1D cache refill"
+ },
+ {
+ "PublicDescription": "Level 1 data cache access",
+ "EventCode": "0x04",
+ "EventName": "L1D_CACHE_ACCESS",
+ "BriefDescription": "L1D cache access"
+ },
+ {
+ "PublicDescription": "Level 1 data TLB refill",
+ "EventCode": "0x05",
+ "EventName": "L1D_TLB_REFILL",
+ "BriefDescription": "L1D TLB refill"
+ },
+ {
+ "PublicDescription": "Level 1 instruction cache access",
+ "EventCode": "0x14",
+ "EventName": "L1I_CACHE_ACCESS",
+ "BriefDescription": "L1I cache access"
+ },
+ {
+ "PublicDescription": "Level 2 data cache access",
+ "EventCode": "0x16",
+ "EventName": "L2D_CACHE_ACCESS",
+ "BriefDescription": "L2D cache access"
+ },
+ {
+ "PublicDescription": "Level 2 data refill",
+ "EventCode": "0x17",
+ "EventName": "L2D_CACHE_REFILL",
+ "BriefDescription": "L2D cache refill"
+ },
+ {
+ "PublicDescription": "Level 2 data cache, Write-Back",
+ "EventCode": "0x18",
+ "EventName": "L2D_CACHE_WB",
+ "BriefDescription": "L2D cache Write-Back"
+ },
+ {
+ "PublicDescription": "Level 1 data TLB access. This event counts any load or store operation which accesses the data L1 TLB",
+ "EventCode": "0x25",
+ "EventName": "L1D_TLB_ACCESS",
+ "BriefDescription": "L1D TLB access"
+ },
+ {
+ "PublicDescription": "Level 1 instruction TLB access. This event counts any instruction fetch which accesses the instruction L1 TLB",
+ "EventCode": "0x26",
+ "EventName": "L1I_TLB_ACCESS",
+ "BriefDescription": "L1I TLB access"
+ },
+ {
+ "PublicDescription": "Level 2 access to data TLB that caused a page table walk. This event counts on any data access which causes L2D_TLB_REFILL to count",
+ "EventCode": "0x34",
+ "EventName": "L2D_TLB_ACCESS",
+ "BriefDescription": "L2D TLB access"
+ },
+ {
+ "PublicDescription": "Level 2 access to instruciton TLB that caused a page table walk. This event counts on any instruciton access which causes L2I_TLB_REFILL to count",
+ "EventCode": "0x35",
+ "EventName": "L2I_TLB_ACCESS",
+ "BriefDescription": "L2D TLB access"
+ },
+ {
+ "PublicDescription": "Branch target buffer misprediction",
+ "EventCode": "0x102",
+ "EventName": "BTB_MIS_PRED",
+ "BriefDescription": "BTB misprediction"
+ },
+ {
+ "PublicDescription": "ITB miss",
+ "EventCode": "0x103",
+ "EventName": "ITB_MISS",
+ "BriefDescription": "ITB miss"
+ },
+ {
+ "PublicDescription": "DTB miss",
+ "EventCode": "0x104",
+ "EventName": "DTB_MISS",
+ "BriefDescription": "DTB miss"
+ },
+ {
+ "PublicDescription": "Level 1 data cache late miss",
+ "EventCode": "0x105",
+ "EventName": "L1D_CACHE_LATE_MISS",
+ "BriefDescription": "L1D cache late miss"
+ },
+ {
+ "PublicDescription": "Level 1 data cache prefetch request",
+ "EventCode": "0x106",
+ "EventName": "L1D_CACHE_PREFETCH",
+ "BriefDescription": "L1D cache prefetch"
+ },
+ {
+ "PublicDescription": "Level 2 data cache prefetch request",
+ "EventCode": "0x107",
+ "EventName": "L2D_CACHE_PREFETCH",
+ "BriefDescription": "L2D cache prefetch"
+ },
+ {
+ "PublicDescription": "Level 1 stage 2 TLB refill",
+ "EventCode": "0x111",
+ "EventName": "L1_STAGE2_TLB_REFILL",
+ "BriefDescription": "L1 stage 2 TLB refill"
+ },
+ {
+ "PublicDescription": "Page walk cache level-0 stage-1 hit",
+ "EventCode": "0x112",
+ "EventName": "PAGE_WALK_L0_STAGE1_HIT",
+ "BriefDescription": "Page walk, L0 stage-1 hit"
+ },
+ {
+ "PublicDescription": "Page walk cache level-1 stage-1 hit",
+ "EventCode": "0x113",
+ "EventName": "PAGE_WALK_L1_STAGE1_HIT",
+ "BriefDescription": "Page walk, L1 stage-1 hit"
+ },
+ {
+ "PublicDescription": "Page walk cache level-2 stage-1 hit",
+ "EventCode": "0x114",
+ "EventName": "PAGE_WALK_L2_STAGE1_HIT",
+ "BriefDescription": "Page walk, L2 stage-1 hit"
+ },
+ {
+ "PublicDescription": "Page walk cache level-1 stage-2 hit",
+ "EventCode": "0x115",
+ "EventName": "PAGE_WALK_L1_STAGE2_HIT",
+ "BriefDescription": "Page walk, L1 stage-2 hit"
+ },
+ {
+ "PublicDescription": "Page walk cache level-2 stage-2 hit",
+ "EventCode": "0x116",
+ "EventName": "PAGE_WALK_L2_STAGE2_HIT",
+ "BriefDescription": "Page walk, L2 stage-2 hit"
+ },
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json
new file mode 100644
index 000000000000..38cd1f1a70dc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json
@@ -0,0 +1,20 @@
+[
+ {
+ "PublicDescription": "The number of core clock cycles",
+ "EventCode": "0x11",
+ "EventName": "CPU_CYCLES",
+ "BriefDescription": "Clock cycles"
+ },
+ {
+ "PublicDescription": "FSU clocking gated off cycle",
+ "EventCode": "0x101",
+ "EventName": "FSU_CLOCK_OFF_CYCLES",
+ "BriefDescription": "FSU clocking gated off cycle"
+ },
+ {
+ "PublicDescription": "Wait state cycle",
+ "EventCode": "0x110",
+ "EventName": "Wait_CYCLES",
+ "BriefDescription": "Wait state cycle"
+ },
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/core-imp-def.json
deleted file mode 100644
index bc03c06c3918..000000000000
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/core-imp-def.json
+++ /dev/null
@@ -1,32 +0,0 @@
-[
- {
- "ArchStdEvent": "L1D_CACHE_RD",
- },
- {
- "ArchStdEvent": "L1D_CACHE_WR",
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_RD",
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_WR",
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL_RD",
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL_WR",
- },
- {
- "ArchStdEvent": "L1D_TLB_RD",
- },
- {
- "ArchStdEvent": "L1D_TLB_WR",
- },
- {
- "ArchStdEvent": "BUS_ACCESS_RD",
- },
- {
- "ArchStdEvent": "BUS_ACCESS_WR",
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json
new file mode 100644
index 000000000000..3720dc28a15f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json
@@ -0,0 +1,50 @@
+[
+ {
+ "ArchStdEvent": "EXC_UNDEF",
+ },
+ {
+ "ArchStdEvent": "EXC_SVC",
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ",
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ",
+ },
+ {
+ "ArchStdEvent": "EXC_HVC",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ",
+ },
+ {
+ "PublicDescription": "Exception taken",
+ "EventCode": "0x09",
+ "EventName": "EXC_TAKEN",
+ "BriefDescription": "Exception taken"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition check pass, exception return",
+ "EventCode": "0x0a",
+ "EventName": "EXC_RETURN",
+ "BriefDescription": "Exception return"
+ },
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json
new file mode 100644
index 000000000000..82cf753e6472
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json
@@ -0,0 +1,89 @@
+[
+ {
+ "ArchStdEvent": "LD_SPEC",
+ },
+ {
+ "ArchStdEvent": "ST_SPEC",
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC",
+ },
+ {
+ "ArchStdEvent": "DP_SPEC",
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC",
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC",
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC",
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC",
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC",
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC",
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC",
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC",
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC",
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, software increment",
+ "EventCode": "0x00",
+ "EventName": "SW_INCR",
+ "BriefDescription": "Software increment"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed",
+ "EventCode": "0x08",
+ "EventName": "INST_RETIRED",
+ "BriefDescription": "Instruction retired"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR",
+ "EventCode": "0x0b",
+ "EventName": "CID_WRITE_RETIRED",
+ "BriefDescription": "Write to CONTEXTIDR"
+ },
+ {
+ "PublicDescription": "Operation speculatively executed",
+ "EventCode": "0x1b",
+ "EventName": "INST_SPEC",
+ "BriefDescription": "Speculatively executed"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed (condition check pass), write to TTBR",
+ "EventCode": "0x1c",
+ "EventName": "TTBR_WRITE_RETIRED",
+ "BriefDescription": "Instruction executed, TTBR write"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, branch. This event counts all branches, taken or not. This excludes exception entries, debug entries and CCFAIL branches",
+ "EventCode": "0x21",
+ "EventName": "BR_RETIRED",
+ "BriefDescription": "Branch retired"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, mispredicted branch. This event counts any branch counted by BR_RETIRED which is not correctly predicted and causes a pipeline flush",
+ "EventCode": "0x22",
+ "EventName": "BR_MISPRED_RETIRED",
+ "BriefDescription": "Mispredicted branch retired"
+ },
+ {
+ "PublicDescription": "Operation speculatively executed, NOP",
+ "EventCode": "0x100",
+ "EventName": "NOP_SPEC",
+ "BriefDescription": "Speculatively executed, NOP"
+ },
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/intrinsic.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/intrinsic.json
new file mode 100644
index 000000000000..2aecc5c2347d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/intrinsic.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "LDREX_SPEC",
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC",
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC",
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC",
+ },
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json
new file mode 100644
index 000000000000..08508697b318
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json
@@ -0,0 +1,29 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD",
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR",
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC",
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC",
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+ },
+ {
+ "PublicDescription": "Data memory access",
+ "EventCode": "0x13",
+ "EventName": "MEM_ACCESS",
+ "BriefDescription": "Memory access"
+ },
+ {
+ "PublicDescription": "Local memory error. This event counts any correctable or uncorrectable memory error (ECC or parity) in the protected core RAMs",
+ "EventCode": "0x1a",
+ "EventName": "MEM_ERROR",
+ "BriefDescription": "Memory error"
+ },
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/pipeline.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/pipeline.json
new file mode 100644
index 000000000000..e2087de586bf
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/pipeline.json
@@ -0,0 +1,50 @@
+[
+ {
+ "PublicDescription": "Decode starved for instruction cycle",
+ "EventCode": "0x108",
+ "EventName": "DECODE_STALL",
+ "BriefDescription": "Decode starved"
+ },
+ {
+ "PublicDescription": "Op dispatch stalled cycle",
+ "EventCode": "0x109",
+ "EventName": "DISPATCH_STALL",
+ "BriefDescription": "Dispatch stalled"
+ },
+ {
+ "PublicDescription": "IXA Op non-issue",
+ "EventCode": "0x10a",
+ "EventName": "IXA_STALL",
+ "BriefDescription": "IXA stalled"
+ },
+ {
+ "PublicDescription": "IXB Op non-issue",
+ "EventCode": "0x10b",
+ "EventName": "IXB_STALL",
+ "BriefDescription": "IXB stalled"
+ },
+ {
+ "PublicDescription": "BX Op non-issue",
+ "EventCode": "0x10c",
+ "EventName": "BX_STALL",
+ "BriefDescription": "BX stalled"
+ },
+ {
+ "PublicDescription": "LX Op non-issue",
+ "EventCode": "0x10d",
+ "EventName": "LX_STALL",
+ "BriefDescription": "LX stalled"
+ },
+ {
+ "PublicDescription": "SX Op non-issue",
+ "EventCode": "0x10e",
+ "EventName": "SX_STALL",
+ "BriefDescription": "SX stalled"
+ },
+ {
+ "PublicDescription": "FX Op non-issue",
+ "EventCode": "0x10f",
+ "EventName": "FX_STALL",
+ "BriefDescription": "FX stalled"
+ },
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
index d40498f2cb1e..635c09fda1d9 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
@@ -188,7 +188,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
- "Filter": "filter_band0=1200",
+ "Filter": "filter_band0=12",
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_1200mhz_cycles %",
"PerPkg": "1",
@@ -199,7 +199,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
- "Filter": "filter_band1=2000",
+ "Filter": "filter_band1=20",
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_2000mhz_cycles %",
"PerPkg": "1",
@@ -210,7 +210,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
- "Filter": "filter_band2=3000",
+ "Filter": "filter_band2=30",
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_3000mhz_cycles %",
"PerPkg": "1",
@@ -221,7 +221,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
- "Filter": "filter_band3=4000",
+ "Filter": "filter_band3=40",
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_4000mhz_cycles %",
"PerPkg": "1",
@@ -232,7 +232,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band0=1200",
+ "Filter": "edge=1,filter_band0=12",
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_1200mhz_cycles %",
"PerPkg": "1",
@@ -243,7 +243,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band1=2000",
+ "Filter": "edge=1,filter_band1=20",
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_2000mhz_cycles %",
"PerPkg": "1",
@@ -254,7 +254,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band2=4000",
+ "Filter": "edge=1,filter_band2=30",
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_3000mhz_cycles %",
"PerPkg": "1",
@@ -265,7 +265,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band3=4000",
+ "Filter": "edge=1,filter_band3=40",
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_4000mhz_cycles %",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
index 16034bfd06dd..8755693d86c6 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
@@ -187,7 +187,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
- "Filter": "filter_band0=1200",
+ "Filter": "filter_band0=12",
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_1200mhz_cycles %",
"PerPkg": "1",
@@ -198,7 +198,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
- "Filter": "filter_band1=2000",
+ "Filter": "filter_band1=20",
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_2000mhz_cycles %",
"PerPkg": "1",
@@ -209,7 +209,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
- "Filter": "filter_band2=3000",
+ "Filter": "filter_band2=30",
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_3000mhz_cycles %",
"PerPkg": "1",
@@ -220,7 +220,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
- "Filter": "filter_band3=4000",
+ "Filter": "filter_band3=40",
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_4000mhz_cycles %",
"PerPkg": "1",
@@ -231,7 +231,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band0=1200",
+ "Filter": "edge=1,filter_band0=12",
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_1200mhz_cycles %",
"PerPkg": "1",
@@ -242,7 +242,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band1=2000",
+ "Filter": "edge=1,filter_band1=20",
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_2000mhz_cycles %",
"PerPkg": "1",
@@ -253,7 +253,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band2=4000",
+ "Filter": "edge=1,filter_band2=30",
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_3000mhz_cycles %",
"PerPkg": "1",
@@ -264,7 +264,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band3=4000",
+ "Filter": "edge=1,filter_band3=40",
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_4000mhz_cycles %",
"PerPkg": "1",
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index efcaf6cac2eb..e46f51b17513 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -204,14 +204,23 @@ from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
+PQconnectdb.argtypes = [ c_char_p ]
PQfinish = libpq.PQfinish
+PQfinish.argtypes = [ c_void_p ]
PQstatus = libpq.PQstatus
+PQstatus.restype = c_int
+PQstatus.argtypes = [ c_void_p ]
PQexec = libpq.PQexec
PQexec.restype = c_void_p
+PQexec.argtypes = [ c_void_p, c_char_p ]
PQresultStatus = libpq.PQresultStatus
+PQresultStatus.restype = c_int
+PQresultStatus.argtypes = [ c_void_p ]
PQputCopyData = libpq.PQputCopyData
+PQputCopyData.restype = c_int
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
+PQputCopyEnd.restype = c_int
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
index f827bf77e9d2..e4bb82c8aba9 100644
--- a/tools/perf/scripts/python/export-to-sqlite.py
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -440,7 +440,11 @@ def branch_type_table(*x):
def sample_table(*x):
if branches:
- bind_exec(sample_query, 18, x)
+ for xx in x[0:15]:
+ sample_query.addBindValue(str(xx))
+ for xx in x[19:22]:
+ sample_query.addBindValue(str(xx))
+ do_query_(sample_query)
else:
bind_exec(sample_query, 22, x)
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index 6c108fa79ae3..0b2b8305c965 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -21,6 +21,7 @@ perf-y += python-use.o
perf-y += bp_signal.o
perf-y += bp_signal_overflow.o
perf-y += bp_account.o
+perf-y += wp.o
perf-y += task-exit.o
perf-y += sw-clock.o
perf-y += mmap-thread-lookup.o
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index d7a5e1b9aa6f..12c09e0ece71 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -121,6 +121,16 @@ static struct test generic_tests[] = {
.is_supported = test__bp_signal_is_supported,
},
{
+ .desc = "Watchpoint",
+ .func = test__wp,
+ .is_supported = test__wp_is_supported,
+ .subtest = {
+ .skip_if_fail = false,
+ .get_nr = test__wp_subtest_get_nr,
+ .get_desc = test__wp_subtest_get_desc,
+ },
+ },
+ {
.desc = "Number of exit events of a simple workload",
.func = test__task_exit,
},
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 699561fa512c..5f8501c68da4 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -8,7 +8,7 @@
static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
int size, bool should_be_signed)
{
- struct format_field *field = perf_evsel__field(evsel, name);
+ struct tep_format_field *field = perf_evsel__field(evsel, name);
int is_signed;
int ret = 0;
@@ -17,7 +17,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
return -1;
}
- is_signed = !!(field->flags | FIELD_IS_SIGNED);
+ is_signed = !!(field->flags | TEP_FIELD_IS_SIGNED);
if (should_be_signed && !is_signed) {
pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
evsel->name, name, is_signed, should_be_signed);
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 3013ac8f83d0..cab7b0aea6ea 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -48,7 +48,7 @@ trace_libc_inet_pton_backtrace() {
*)
eventattr='max-stack=3'
echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
- echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+ echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
;;
esac
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index a9760e790563..b82f55fcc294 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -59,6 +59,9 @@ int test__python_use(struct test *test, int subtest);
int test__bp_signal(struct test *test, int subtest);
int test__bp_signal_overflow(struct test *test, int subtest);
int test__bp_accounting(struct test *test, int subtest);
+int test__wp(struct test *test, int subtest);
+const char *test__wp_subtest_get_desc(int subtest);
+int test__wp_subtest_get_nr(void);
int test__task_exit(struct test *test, int subtest);
int test__mem(struct test *test, int subtest);
int test__sw_clock_freq(struct test *test, int subtest);
@@ -106,6 +109,7 @@ int test__unit_number__scnprint(struct test *test, int subtest);
int test__mem2node(struct test *t, int subtest);
bool test__bp_signal_is_supported(void);
+bool test__wp_is_supported(void);
#if defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/tests/wp.c b/tools/perf/tests/wp.c
new file mode 100644
index 000000000000..f89e6806557b
--- /dev/null
+++ b/tools/perf/tests/wp.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <linux/hw_breakpoint.h>
+#include "tests.h"
+#include "debug.h"
+#include "cloexec.h"
+
+#define WP_TEST_ASSERT_VAL(fd, text, val) \
+do { \
+ long long count; \
+ wp_read(fd, &count, sizeof(long long)); \
+ TEST_ASSERT_VAL(text, count == val); \
+} while (0)
+
+volatile u64 data1;
+volatile u8 data2[3];
+
+static int wp_read(int fd, long long *count, int size)
+{
+ int ret = read(fd, count, size);
+
+ if (ret != size) {
+ pr_debug("failed to read: %d\n", ret);
+ return -1;
+ }
+ return 0;
+}
+
+static void get__perf_event_attr(struct perf_event_attr *attr, int wp_type,
+ void *wp_addr, unsigned long wp_len)
+{
+ memset(attr, 0, sizeof(struct perf_event_attr));
+ attr->type = PERF_TYPE_BREAKPOINT;
+ attr->size = sizeof(struct perf_event_attr);
+ attr->config = 0;
+ attr->bp_type = wp_type;
+ attr->bp_addr = (unsigned long)wp_addr;
+ attr->bp_len = wp_len;
+ attr->sample_period = 1;
+ attr->sample_type = PERF_SAMPLE_IP;
+ attr->exclude_kernel = 1;
+ attr->exclude_hv = 1;
+}
+
+static int __event(int wp_type, void *wp_addr, unsigned long wp_len)
+{
+ int fd;
+ struct perf_event_attr attr;
+
+ get__perf_event_attr(&attr, wp_type, wp_addr, wp_len);
+ fd = sys_perf_event_open(&attr, 0, -1, -1,
+ perf_event_open_cloexec_flag());
+ if (fd < 0)
+ pr_debug("failed opening event %x\n", attr.bp_type);
+
+ return fd;
+}
+
+static int wp_ro_test(void)
+{
+ int fd;
+ unsigned long tmp, tmp1 = rand();
+
+ fd = __event(HW_BREAKPOINT_R, (void *)&data1, sizeof(data1));
+ if (fd < 0)
+ return -1;
+
+ tmp = data1;
+ WP_TEST_ASSERT_VAL(fd, "RO watchpoint", 1);
+
+ data1 = tmp1 + tmp;
+ WP_TEST_ASSERT_VAL(fd, "RO watchpoint", 1);
+
+ close(fd);
+ return 0;
+}
+
+static int wp_wo_test(void)
+{
+ int fd;
+ unsigned long tmp, tmp1 = rand();
+
+ fd = __event(HW_BREAKPOINT_W, (void *)&data1, sizeof(data1));
+ if (fd < 0)
+ return -1;
+
+ tmp = data1;
+ WP_TEST_ASSERT_VAL(fd, "WO watchpoint", 0);
+
+ data1 = tmp1 + tmp;
+ WP_TEST_ASSERT_VAL(fd, "WO watchpoint", 1);
+
+ close(fd);
+ return 0;
+}
+
+static int wp_rw_test(void)
+{
+ int fd;
+ unsigned long tmp, tmp1 = rand();
+
+ fd = __event(HW_BREAKPOINT_R | HW_BREAKPOINT_W, (void *)&data1,
+ sizeof(data1));
+ if (fd < 0)
+ return -1;
+
+ tmp = data1;
+ WP_TEST_ASSERT_VAL(fd, "RW watchpoint", 1);
+
+ data1 = tmp1 + tmp;
+ WP_TEST_ASSERT_VAL(fd, "RW watchpoint", 2);
+
+ close(fd);
+ return 0;
+}
+
+static int wp_modify_test(void)
+{
+ int fd, ret;
+ unsigned long tmp = rand();
+ struct perf_event_attr new_attr;
+
+ fd = __event(HW_BREAKPOINT_W, (void *)&data1, sizeof(data1));
+ if (fd < 0)
+ return -1;
+
+ data1 = tmp;
+ WP_TEST_ASSERT_VAL(fd, "Modify watchpoint", 1);
+
+ /* Modify watchpoint with disabled = 1 */
+ get__perf_event_attr(&new_attr, HW_BREAKPOINT_W, (void *)&data2[0],
+ sizeof(u8) * 2);
+ new_attr.disabled = 1;
+ ret = ioctl(fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &new_attr);
+ if (ret < 0) {
+ pr_debug("ioctl(PERF_EVENT_IOC_MODIFY_ATTRIBUTES) failed\n");
+ close(fd);
+ return ret;
+ }
+
+ data2[1] = tmp; /* Not Counted */
+ WP_TEST_ASSERT_VAL(fd, "Modify watchpoint", 1);
+
+ /* Enable the event */
+ ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (ret < 0) {
+ pr_debug("Failed to enable event\n");
+ close(fd);
+ return ret;
+ }
+
+ data2[1] = tmp; /* Counted */
+ WP_TEST_ASSERT_VAL(fd, "Modify watchpoint", 2);
+
+ data2[2] = tmp; /* Not Counted */
+ WP_TEST_ASSERT_VAL(fd, "Modify watchpoint", 2);
+
+ close(fd);
+ return 0;
+}
+
+static bool wp_ro_supported(void)
+{
+#if defined (__x86_64__) || defined (__i386__)
+ return false;
+#else
+ return true;
+#endif
+}
+
+static void wp_ro_skip_msg(void)
+{
+#if defined (__x86_64__) || defined (__i386__)
+ pr_debug("Hardware does not support read only watchpoints.\n");
+#endif
+}
+
+static struct {
+ const char *desc;
+ int (*target_func)(void);
+ bool (*is_supported)(void);
+ void (*skip_msg)(void);
+} wp_testcase_table[] = {
+ {
+ .desc = "Read Only Watchpoint",
+ .target_func = &wp_ro_test,
+ .is_supported = &wp_ro_supported,
+ .skip_msg = &wp_ro_skip_msg,
+ },
+ {
+ .desc = "Write Only Watchpoint",
+ .target_func = &wp_wo_test,
+ },
+ {
+ .desc = "Read / Write Watchpoint",
+ .target_func = &wp_rw_test,
+ },
+ {
+ .desc = "Modify Watchpoint",
+ .target_func = &wp_modify_test,
+ },
+};
+
+int test__wp_subtest_get_nr(void)
+{
+ return (int)ARRAY_SIZE(wp_testcase_table);
+}
+
+const char *test__wp_subtest_get_desc(int i)
+{
+ if (i < 0 || i >= (int)ARRAY_SIZE(wp_testcase_table))
+ return NULL;
+ return wp_testcase_table[i].desc;
+}
+
+int test__wp(struct test *test __maybe_unused, int i)
+{
+ if (i < 0 || i >= (int)ARRAY_SIZE(wp_testcase_table))
+ return TEST_FAIL;
+
+ if (wp_testcase_table[i].is_supported &&
+ !wp_testcase_table[i].is_supported()) {
+ wp_testcase_table[i].skip_msg();
+ return TEST_SKIP;
+ }
+
+ return !wp_testcase_table[i].target_func() ? TEST_OK : TEST_FAIL;
+}
+
+/* The s390 so far does not have support for
+ * instruction breakpoint using the perf_event_open() system call.
+ */
+bool test__wp_is_supported(void)
+{
+#if defined(__s390x__)
+ return false;
+#else
+ return true;
+#endif
+}
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index f528ba35e140..c3b0afd67760 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -7,5 +7,6 @@ endif
libperf-y += kcmp.o
libperf-y += pkey_alloc.o
libperf-y += prctl.o
+libperf-y += sockaddr.o
libperf-y += socket.o
libperf-y += statx.o
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index 9615af5d412b..2570152d3909 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -30,9 +30,36 @@ struct thread;
size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size);
+extern struct strarray strarray__socket_families;
+
+/**
+ * augmented_arg: extra payload for syscall pointer arguments
+
+ * If perf_sample->raw_size is more than what a syscall sys_enter_FOO puts,
+ * then its the arguments contents, so that we can show more than just a
+ * pointer. This will be done initially with eBPF, the start of that is at the
+ * tools/perf/examples/bpf/augmented_syscalls.c example for the openat, but
+ * will eventually be done automagically caching the running kernel tracefs
+ * events data into an eBPF C script, that then gets compiled and its .o file
+ * cached for subsequent use. For char pointers like the ones for 'open' like
+ * syscalls its easy, for the rest we should use DWARF or better, BTF, much
+ * more compact.
+ *
+ * @size: 8 if all we need is an integer, otherwise all of the augmented arg.
+ * @int_arg: will be used for integer like pointer contents, like 'accept's 'upeer_addrlen'
+ * @value: u64 aligned, for structs, pathnames
+ */
+struct augmented_arg {
+ int size;
+ int int_arg;
+ u64 value[];
+};
+
/**
* @val: value of syscall argument being formatted
* @args: All the args, use syscall_args__val(arg, nth) to access one
+ * @augmented_args: Extra data that can be collected, for instance, with eBPF for expanding the pathname for open, etc
+ * @augmented_args_size: augmented_args total payload size
* @thread: tid state (maps, pid, tid, etc)
* @trace: 'perf trace' internals: all threads, etc
* @parm: private area, may be an strarray, for instance
@@ -43,6 +70,10 @@ size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_
struct syscall_arg {
unsigned long val;
unsigned char *args;
+ struct {
+ struct augmented_arg *args;
+ int size;
+ } augmented;
struct thread *thread;
struct trace *trace;
void *parm;
@@ -106,6 +137,9 @@ size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_a
size_t syscall_arg__scnprintf_prctl_arg3(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_PRCTL_ARG3 syscall_arg__scnprintf_prctl_arg3
+size_t syscall_arg__scnprintf_sockaddr(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_SOCKADDR syscall_arg__scnprintf_sockaddr
+
size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_SK_PROTO syscall_arg__scnprintf_socket_protocol
diff --git a/tools/perf/trace/beauty/sockaddr.c b/tools/perf/trace/beauty/sockaddr.c
new file mode 100644
index 000000000000..71a79f72d9d9
--- /dev/null
+++ b/tools/perf/trace/beauty/sockaddr.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+
+#include "trace/beauty/beauty.h"
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <arpa/inet.h>
+
+static const char *socket_families[] = {
+ "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
+ "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
+ "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
+ "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
+ "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
+ "ALG", "NFC", "VSOCK",
+};
+DEFINE_STRARRAY(socket_families);
+
+static size_t af_inet__scnprintf(struct sockaddr *sa, char *bf, size_t size)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+ char tmp[16];
+ return scnprintf(bf, size, ", port: %d, addr: %s", ntohs(sin->sin_port),
+ inet_ntop(sin->sin_family, &sin->sin_addr, tmp, sizeof(tmp)));
+}
+
+static size_t af_inet6__scnprintf(struct sockaddr *sa, char *bf, size_t size)
+{
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+ u32 flowinfo = ntohl(sin6->sin6_flowinfo);
+ char tmp[512];
+ size_t printed = scnprintf(bf, size, ", port: %d, addr: %s", ntohs(sin6->sin6_port),
+ inet_ntop(sin6->sin6_family, &sin6->sin6_addr, tmp, sizeof(tmp)));
+ if (flowinfo != 0)
+ printed += scnprintf(bf + printed, size - printed, ", flowinfo: %lu", flowinfo);
+ if (sin6->sin6_scope_id != 0)
+ printed += scnprintf(bf + printed, size - printed, ", scope_id: %lu", sin6->sin6_scope_id);
+
+ return printed;
+}
+
+static size_t af_local__scnprintf(struct sockaddr *sa, char *bf, size_t size)
+{
+ struct sockaddr_un *sun = (struct sockaddr_un *)sa;
+ return scnprintf(bf, size, ", path: %s", sun->sun_path);
+}
+
+static size_t (*af_scnprintfs[])(struct sockaddr *sa, char *bf, size_t size) = {
+ [AF_LOCAL] = af_local__scnprintf,
+ [AF_INET] = af_inet__scnprintf,
+ [AF_INET6] = af_inet6__scnprintf,
+};
+
+static size_t syscall_arg__scnprintf_augmented_sockaddr(struct syscall_arg *arg, char *bf, size_t size)
+{
+ struct sockaddr *sa = (struct sockaddr *)arg->augmented.args;
+ char family[32];
+ size_t printed;
+
+ strarray__scnprintf(&strarray__socket_families, family, sizeof(family), "%d", sa->sa_family);
+ printed = scnprintf(bf, size, "{ .family: %s", family);
+
+ if (sa->sa_family < ARRAY_SIZE(af_scnprintfs) && af_scnprintfs[sa->sa_family])
+ printed += af_scnprintfs[sa->sa_family](sa, bf + printed, size - printed);
+
+ return printed + scnprintf(bf + printed, size - printed, " }");
+}
+
+size_t syscall_arg__scnprintf_sockaddr(char *bf, size_t size, struct syscall_arg *arg)
+{
+ if (arg->augmented.args)
+ return syscall_arg__scnprintf_augmented_sockaddr(arg, bf, size);
+
+ return scnprintf(bf, size, "%#x", arg->val);
+}
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 7efe15b9618d..ecd9f9ceda77 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -73,6 +73,7 @@ libperf-y += vdso.o
libperf-y += counts.o
libperf-y += stat.o
libperf-y += stat-shadow.o
+libperf-y += stat-display.o
libperf-y += record.o
libperf-y += srcline.o
libperf-y += data.o
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index db1511359c5e..c4617bcfd521 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -906,9 +906,8 @@ out_free:
return err;
}
-int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_session *session)
+int perf_event__process_auxtrace_info(struct perf_session *session,
+ union perf_event *event)
{
enum auxtrace_type type = event->auxtrace_info.type;
@@ -932,9 +931,8 @@ int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
}
}
-s64 perf_event__process_auxtrace(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session)
+s64 perf_event__process_auxtrace(struct perf_session *session,
+ union perf_event *event)
{
s64 err;
@@ -950,7 +948,7 @@ s64 perf_event__process_auxtrace(struct perf_tool *tool,
if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
return -EINVAL;
- err = session->auxtrace->process_auxtrace_event(session, event, tool);
+ err = session->auxtrace->process_auxtrace_event(session, event, session->tool);
if (err < 0)
return err;
@@ -1185,9 +1183,8 @@ void events_stats__auxtrace_error_warn(const struct events_stats *stats)
}
}
-int perf_event__process_auxtrace_error(struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_session *session)
+int perf_event__process_auxtrace_error(struct perf_session *session,
+ union perf_event *event)
{
if (auxtrace__dont_decode(session))
return 0;
@@ -1196,11 +1193,12 @@ int perf_event__process_auxtrace_error(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
+static int __auxtrace_mmap__read(struct perf_mmap *map,
struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn,
bool snapshot, size_t snapshot_size)
{
+ struct auxtrace_mmap *mm = &map->auxtrace_mmap;
u64 head, old = mm->prev, offset, ref;
unsigned char *data = mm->base;
size_t size, head_off, old_off, len1, len2, padding;
@@ -1287,7 +1285,7 @@ static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
ev.auxtrace.tid = mm->tid;
ev.auxtrace.cpu = mm->cpu;
- if (fn(tool, &ev, data1, len1, data2, len2))
+ if (fn(tool, map, &ev, data1, len1, data2, len2))
return -1;
mm->prev = head;
@@ -1306,18 +1304,18 @@ static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
return 1;
}
-int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
+int auxtrace_mmap__read(struct perf_mmap *map, struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn)
{
- return __auxtrace_mmap__read(mm, itr, tool, fn, false, 0);
+ return __auxtrace_mmap__read(map, itr, tool, fn, false, 0);
}
-int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
+int auxtrace_mmap__read_snapshot(struct perf_mmap *map,
struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size)
{
- return __auxtrace_mmap__read(mm, itr, tool, fn, true, snapshot_size);
+ return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size);
}
/**
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 71fc3bd74299..d88f6e9eb461 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/perf_event.h>
#include <linux/types.h>
+#include <asm/bitsperlong.h>
#include "../perf.h"
#include "event.h"
@@ -33,6 +34,7 @@ union perf_event;
struct perf_session;
struct perf_evlist;
struct perf_tool;
+struct perf_mmap;
struct option;
struct record_opts;
struct auxtrace_info_event;
@@ -434,13 +436,14 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
bool per_cpu);
typedef int (*process_auxtrace_t)(struct perf_tool *tool,
+ struct perf_mmap *map,
union perf_event *event, void *data1,
size_t len1, void *data2, size_t len2);
-int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
+int auxtrace_mmap__read(struct perf_mmap *map, struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn);
-int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
+int auxtrace_mmap__read_snapshot(struct perf_mmap *map,
struct auxtrace_record *itr,
struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size);
@@ -517,15 +520,12 @@ int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
struct perf_tool *tool,
struct perf_session *session,
perf_event__handler_t process);
-int perf_event__process_auxtrace_info(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session);
-s64 perf_event__process_auxtrace(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session);
-int perf_event__process_auxtrace_error(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session);
+int perf_event__process_auxtrace_info(struct perf_session *session,
+ union perf_event *event);
+s64 perf_event__process_auxtrace(struct perf_session *session,
+ union perf_event *event);
+int perf_event__process_auxtrace_error(struct perf_session *session,
+ union perf_event *event);
int itrace_parse_synth_opts(const struct option *opt, const char *str,
int unset);
void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts);
@@ -577,6 +577,23 @@ static inline void auxtrace__free(struct perf_session *session)
return session->auxtrace->free(session);
}
+#define ITRACE_HELP \
+" i: synthesize instructions events\n" \
+" b: synthesize branches events\n" \
+" c: synthesize branches events (calls only)\n" \
+" r: synthesize branches events (returns only)\n" \
+" x: synthesize transactions events\n" \
+" w: synthesize ptwrite events\n" \
+" p: synthesize power events\n" \
+" e: synthesize error events\n" \
+" d: create a debug log\n" \
+" g[len]: synthesize a call chain (use with i or x)\n" \
+" l[len]: synthesize last branch entries (use with i or x)\n" \
+" sNUMBER: skip initial number of events\n" \
+" PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \
+" concatenate multiple options. Default is ibxwpe or cewp\n"
+
+
#else
static inline struct auxtrace_record *
@@ -717,6 +734,8 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
struct perf_evlist *evlist, int idx,
bool per_cpu);
+#define ITRACE_HELP ""
+
#endif
#endif
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 47aac41349a2..f9ae1a993806 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -1615,7 +1615,7 @@ struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const cha
int bpf__setup_stdout(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
- return IS_ERR(evsel) ? PTR_ERR(evsel) : 0;
+ return PTR_ERR_OR_ZERO(evsel);
}
#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index abd38abf1d91..2a36fab76994 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -182,20 +182,20 @@ err_put_field:
}
static struct bt_ctf_field_type*
-get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
+get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field)
{
unsigned long flags = field->flags;
- if (flags & FIELD_IS_STRING)
+ if (flags & TEP_FIELD_IS_STRING)
return cw->data.string;
- if (!(flags & FIELD_IS_SIGNED)) {
+ if (!(flags & TEP_FIELD_IS_SIGNED)) {
/* unsigned long are mostly pointers */
- if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER)
+ if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER)
return cw->data.u64_hex;
}
- if (flags & FIELD_IS_SIGNED) {
+ if (flags & TEP_FIELD_IS_SIGNED) {
if (field->size == 8)
return cw->data.s64;
else
@@ -287,7 +287,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
struct bt_ctf_event_class *event_class,
struct bt_ctf_event *event,
struct perf_sample *sample,
- struct format_field *fmtf)
+ struct tep_format_field *fmtf)
{
struct bt_ctf_field_type *type;
struct bt_ctf_field *array_field;
@@ -304,10 +304,10 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
name = fmtf->alias;
offset = fmtf->offset;
len = fmtf->size;
- if (flags & FIELD_IS_STRING)
- flags &= ~FIELD_IS_ARRAY;
+ if (flags & TEP_FIELD_IS_STRING)
+ flags &= ~TEP_FIELD_IS_ARRAY;
- if (flags & FIELD_IS_DYNAMIC) {
+ if (flags & TEP_FIELD_IS_DYNAMIC) {
unsigned long long tmp_val;
tmp_val = tep_read_number(fmtf->event->pevent,
@@ -317,7 +317,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
offset &= 0xffff;
}
- if (flags & FIELD_IS_ARRAY) {
+ if (flags & TEP_FIELD_IS_ARRAY) {
type = bt_ctf_event_class_get_field_by_name(
event_class, name);
@@ -338,7 +338,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
type = get_tracepoint_field_type(cw, fmtf);
for (i = 0; i < n_items; i++) {
- if (flags & FIELD_IS_ARRAY)
+ if (flags & TEP_FIELD_IS_ARRAY)
field = bt_ctf_field_array_get_field(array_field, i);
else
field = bt_ctf_field_create(type);
@@ -348,7 +348,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
return -1;
}
- if (flags & FIELD_IS_STRING)
+ if (flags & TEP_FIELD_IS_STRING)
ret = string_set_value(field, data + offset + i * len);
else {
unsigned long long value_int;
@@ -357,7 +357,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
fmtf->event->pevent,
data + offset + i * len, len);
- if (!(flags & FIELD_IS_SIGNED))
+ if (!(flags & TEP_FIELD_IS_SIGNED))
ret = bt_ctf_field_unsigned_integer_set_value(
field, value_int);
else
@@ -369,7 +369,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
pr_err("failed to set file value %s\n", name);
goto err_put_field;
}
- if (!(flags & FIELD_IS_ARRAY)) {
+ if (!(flags & TEP_FIELD_IS_ARRAY)) {
ret = bt_ctf_event_set_payload(event, name, field);
if (ret) {
pr_err("failed to set payload %s\n", name);
@@ -378,7 +378,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
}
bt_ctf_field_put(field);
}
- if (flags & FIELD_IS_ARRAY) {
+ if (flags & TEP_FIELD_IS_ARRAY) {
ret = bt_ctf_event_set_payload(event, name, array_field);
if (ret) {
pr_err("Failed add payload array %s\n", name);
@@ -396,10 +396,10 @@ err_put_field:
static int add_tracepoint_fields_values(struct ctf_writer *cw,
struct bt_ctf_event_class *event_class,
struct bt_ctf_event *event,
- struct format_field *fields,
+ struct tep_format_field *fields,
struct perf_sample *sample)
{
- struct format_field *field;
+ struct tep_format_field *field;
int ret;
for (field = fields; field; field = field->next) {
@@ -417,8 +417,8 @@ static int add_tracepoint_values(struct ctf_writer *cw,
struct perf_evsel *evsel,
struct perf_sample *sample)
{
- struct format_field *common_fields = evsel->tp_format->format.common_fields;
- struct format_field *fields = evsel->tp_format->format.fields;
+ struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
+ struct tep_format_field *fields = evsel->tp_format->format.fields;
int ret;
ret = add_tracepoint_fields_values(cw, event_class, event,
@@ -970,7 +970,7 @@ out:
static int event_class_add_field(struct bt_ctf_event_class *event_class,
struct bt_ctf_field_type *type,
- struct format_field *field)
+ struct tep_format_field *field)
{
struct bt_ctf_field_type *t = NULL;
char *name;
@@ -1009,10 +1009,10 @@ static int event_class_add_field(struct bt_ctf_event_class *event_class,
}
static int add_tracepoint_fields_types(struct ctf_writer *cw,
- struct format_field *fields,
+ struct tep_format_field *fields,
struct bt_ctf_event_class *event_class)
{
- struct format_field *field;
+ struct tep_format_field *field;
int ret;
for (field = fields; field; field = field->next) {
@@ -1030,15 +1030,15 @@ static int add_tracepoint_fields_types(struct ctf_writer *cw,
* type and don't care that it is an array. What we don't
* support is an array of strings.
*/
- if (flags & FIELD_IS_STRING)
- flags &= ~FIELD_IS_ARRAY;
+ if (flags & TEP_FIELD_IS_STRING)
+ flags &= ~TEP_FIELD_IS_ARRAY;
- if (flags & FIELD_IS_ARRAY)
+ if (flags & TEP_FIELD_IS_ARRAY)
type = bt_ctf_field_type_array_create(type, field->arraylen);
ret = event_class_add_field(event_class, type, field);
- if (flags & FIELD_IS_ARRAY)
+ if (flags & TEP_FIELD_IS_ARRAY)
bt_ctf_field_type_put(type);
if (ret) {
@@ -1055,8 +1055,8 @@ static int add_tracepoint_types(struct ctf_writer *cw,
struct perf_evsel *evsel,
struct bt_ctf_event_class *class)
{
- struct format_field *common_fields = evsel->tp_format->format.common_fields;
- struct format_field *fields = evsel->tp_format->format.fields;
+ struct tep_format_field *common_fields = evsel->tp_format->format.common_fields;
+ struct tep_format_field *fields = evsel->tp_format->format.fields;
int ret;
ret = add_tracepoint_fields_types(cw, common_fields, class);
@@ -1578,7 +1578,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
{
struct perf_session *session;
struct perf_data data = {
- .file.path = input,
+ .file = { .path = input, .fd = -1 },
.mode = PERF_DATA_MODE_READ,
.force = opts->force,
};
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 7123746edcf4..69fbb0a72d0c 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -463,6 +463,28 @@ int db_export__branch_types(struct db_export *dbe)
if (err)
break;
}
+
+ /* Add trace begin / end variants */
+ for (i = 0; branch_types[i].name ; i++) {
+ const char *name = branch_types[i].name;
+ u32 type = branch_types[i].branch_type;
+ char buf[64];
+
+ if (type == PERF_IP_FLAG_BRANCH ||
+ (type & (PERF_IP_FLAG_TRACE_BEGIN | PERF_IP_FLAG_TRACE_END)))
+ continue;
+
+ snprintf(buf, sizeof(buf), "trace begin / %s", name);
+ err = db_export__branch_type(dbe, type | PERF_IP_FLAG_TRACE_BEGIN, buf);
+ if (err)
+ break;
+
+ snprintf(buf, sizeof(buf), "%s / trace end", name);
+ err = db_export__branch_type(dbe, type | PERF_IP_FLAG_TRACE_END, buf);
+ if (err)
+ break;
+ }
+
return err;
}
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 0cd42150f712..bc646185f8d9 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1081,6 +1081,7 @@ void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max
}
*size += sizeof(struct cpu_map_data);
+ *size = PERF_ALIGN(*size, sizeof(u64));
return zalloc(*size);
}
@@ -1560,26 +1561,9 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
return NULL;
}
-try_again:
+
al->map = map_groups__find(mg, al->addr);
- if (al->map == NULL) {
- /*
- * If this is outside of all known maps, and is a negative
- * address, try to look it up in the kernel dso, as it might be
- * a vsyscall or vdso (which executes in user-mode).
- *
- * XXX This is nasty, we should have a symbol list in the
- * "[vdso]" dso, but for now lets use the old trick of looking
- * in the whole kernel symbol list.
- */
- if (cpumode == PERF_RECORD_MISC_USER && machine &&
- mg != &machine->kmaps &&
- machine__kernel_ip(machine, al->addr)) {
- mg = &machine->kmaps;
- load_map = true;
- goto try_again;
- }
- } else {
+ if (al->map != NULL) {
/*
* Kernel maps might be changed when loading symbols so loading
* must be done prior to using kernel maps.
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 1a61628a1c12..29d7b97f66fb 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1089,6 +1089,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->exclude_user = 1;
}
+ if (evsel->own_cpus)
+ evsel->attr.read_format |= PERF_FORMAT_ID;
+
/*
* Apply event specific term settings,
* it overloads any global configuration.
@@ -2682,7 +2685,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
return 0;
}
-struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
+struct tep_format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
{
return tep_find_field(evsel->tp_format, name);
}
@@ -2690,7 +2693,7 @@ struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *nam
void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
const char *name)
{
- struct format_field *field = perf_evsel__field(evsel, name);
+ struct tep_format_field *field = perf_evsel__field(evsel, name);
int offset;
if (!field)
@@ -2698,7 +2701,7 @@ void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
offset = field->offset;
- if (field->flags & FIELD_IS_DYNAMIC) {
+ if (field->flags & TEP_FIELD_IS_DYNAMIC) {
offset = *(int *)(sample->raw_data + field->offset);
offset &= 0xffff;
}
@@ -2706,7 +2709,7 @@ void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
return sample->raw_data + offset;
}
-u64 format_field__intval(struct format_field *field, struct perf_sample *sample,
+u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample,
bool needs_swap)
{
u64 value;
@@ -2748,7 +2751,7 @@ u64 format_field__intval(struct format_field *field, struct perf_sample *sample,
u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
const char *name)
{
- struct format_field *field = perf_evsel__field(evsel, name);
+ struct tep_format_field *field = perf_evsel__field(evsel, name);
if (!field)
return 0;
@@ -2940,3 +2943,32 @@ struct perf_env *perf_evsel__env(struct perf_evsel *evsel)
return evsel->evlist->env;
return NULL;
}
+
+static int store_evsel_ids(struct perf_evsel *evsel, struct perf_evlist *evlist)
+{
+ int cpu, thread;
+
+ for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
+ for (thread = 0; thread < xyarray__max_y(evsel->fd);
+ thread++) {
+ int fd = FD(evsel, cpu, thread);
+
+ if (perf_evlist__id_add_fd(evlist, evsel,
+ cpu, thread, fd) < 0)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int perf_evsel__store_ids(struct perf_evsel *evsel, struct perf_evlist *evlist)
+{
+ struct cpu_map *cpus = evsel->cpus;
+ struct thread_map *threads = evsel->threads;
+
+ if (perf_evsel__alloc_id(evsel, cpus->nr, threads->nr))
+ return -ENOMEM;
+
+ return store_evsel_ids(evsel, evlist);
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 163c960614d3..4107c39f4a54 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -102,7 +102,7 @@ struct perf_evsel {
char *name;
double scale;
const char *unit;
- struct event_format *tp_format;
+ struct tep_event_format *tp_format;
off_t id_offset;
struct perf_stat_evsel *stats;
void *priv;
@@ -211,7 +211,7 @@ static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *
struct perf_evsel *perf_evsel__new_cycles(bool precise);
-struct event_format *event_format__new(const char *sys, const char *name);
+struct tep_event_format *event_format__new(const char *sys, const char *name);
void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx);
@@ -296,11 +296,11 @@ static inline char *perf_evsel__strval(struct perf_evsel *evsel,
return perf_evsel__rawptr(evsel, sample, name);
}
-struct format_field;
+struct tep_format_field;
-u64 format_field__intval(struct format_field *field, struct perf_sample *sample, bool needs_swap);
+u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, bool needs_swap);
-struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
+struct tep_format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
#define perf_evsel__match(evsel, t, c) \
(evsel->attr.type == PERF_TYPE_##t && \
@@ -481,4 +481,5 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
struct perf_env *perf_evsel__env(struct perf_evsel *evsel);
+int perf_evsel__store_ids(struct perf_evsel *evsel, struct perf_evlist *evlist);
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 06dfb027879d..0d0a4c6f368b 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -73,7 +73,7 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
}
if (details->trace_fields) {
- struct format_field *field;
+ struct tep_format_field *field;
if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
printed += comma_fprintf(fp, &first, " (not a tracepoint)");
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 3cadc252dd89..1ec1d9bc2d63 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3206,7 +3206,7 @@ static int read_attr(int fd, struct perf_header *ph,
static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
struct tep_handle *pevent)
{
- struct event_format *event;
+ struct tep_event_format *event;
char bf[128];
/* already prepared */
@@ -3448,10 +3448,10 @@ int perf_event__synthesize_features(struct perf_tool *tool,
return ret;
}
-int perf_event__process_feature(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session __maybe_unused)
+int perf_event__process_feature(struct perf_session *session,
+ union perf_event *event)
{
+ struct perf_tool *tool = session->tool;
struct feat_fd ff = { .fd = 0 };
struct feature_event *fe = (struct feature_event *)event;
int type = fe->header.type;
@@ -3637,13 +3637,13 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
}
int perf_event__synthesize_attrs(struct perf_tool *tool,
- struct perf_session *session,
- perf_event__handler_t process)
+ struct perf_evlist *evlist,
+ perf_event__handler_t process)
{
struct perf_evsel *evsel;
int err = 0;
- evlist__for_each_entry(session->evlist, evsel) {
+ evlist__for_each_entry(evlist, evsel) {
err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
evsel->id, process);
if (err) {
@@ -3856,9 +3856,8 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
return aligned_size;
}
-int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_session *session)
+int perf_event__process_tracing_data(struct perf_session *session,
+ union perf_event *event)
{
ssize_t size_read, padding, size = event->tracing_data.size;
int fd = perf_data__fd(session->data);
@@ -3924,9 +3923,8 @@ int perf_event__synthesize_build_id(struct perf_tool *tool,
return err;
}
-int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_session *session)
+int perf_event__process_build_id(struct perf_session *session,
+ union perf_event *event)
{
__event_process_build_id(&event->build_id,
event->build_id.filename,
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 6d7fe44aadc0..e17903caa71d 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -116,15 +116,14 @@ int perf_event__synthesize_extra_attr(struct perf_tool *tool,
perf_event__handler_t process,
bool is_pipe);
-int perf_event__process_feature(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session);
+int perf_event__process_feature(struct perf_session *session,
+ union perf_event *event);
int perf_event__synthesize_attr(struct perf_tool *tool,
struct perf_event_attr *attr, u32 ids, u64 *id,
perf_event__handler_t process);
int perf_event__synthesize_attrs(struct perf_tool *tool,
- struct perf_session *session,
+ struct perf_evlist *evlist,
perf_event__handler_t process);
int perf_event__synthesize_event_update_unit(struct perf_tool *tool,
struct perf_evsel *evsel,
@@ -148,17 +147,15 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp);
int perf_event__synthesize_tracing_data(struct perf_tool *tool,
int fd, struct perf_evlist *evlist,
perf_event__handler_t process);
-int perf_event__process_tracing_data(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session);
+int perf_event__process_tracing_data(struct perf_session *session,
+ union perf_event *event);
int perf_event__synthesize_build_id(struct perf_tool *tool,
struct dso *pos, u16 misc,
perf_event__handler_t process,
struct machine *machine);
-int perf_event__process_build_id(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session);
+int perf_event__process_build_id(struct perf_session *session,
+ union perf_event *event);
bool is_perf_magic(u64 magic);
#define NAME_ALIGN 64
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index d404bed7003a..58f6a9ceb590 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1165,7 +1165,7 @@ static int intel_pt_walk_tip(struct intel_pt_decoder *decoder)
decoder->pge = false;
decoder->continuous_period = false;
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
- decoder->state.to_ip = 0;
+ decoder->state.type |= INTEL_PT_TRACE_END;
return 0;
}
if (err == INTEL_PT_RETURN)
@@ -1179,9 +1179,13 @@ static int intel_pt_walk_tip(struct intel_pt_decoder *decoder)
decoder->continuous_period = false;
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
- if (decoder->packet.count != 0)
+ if (decoder->packet.count == 0) {
+ decoder->state.to_ip = 0;
+ } else {
+ decoder->state.to_ip = decoder->last_ip;
decoder->ip = decoder->last_ip;
+ }
+ decoder->state.type |= INTEL_PT_TRACE_END;
} else {
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->state.from_ip = decoder->ip;
@@ -1208,7 +1212,8 @@ static int intel_pt_walk_tip(struct intel_pt_decoder *decoder)
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->ip = to_ip;
decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
+ decoder->state.to_ip = to_ip;
+ decoder->state.type |= INTEL_PT_TRACE_END;
return 0;
}
intel_pt_log_at("ERROR: Conditional branch when expecting indirect branch",
@@ -1640,14 +1645,15 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
case INTEL_PT_TIP_PGD:
decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
- if (decoder->packet.count != 0) {
+ if (decoder->packet.count == 0) {
+ decoder->state.to_ip = 0;
+ } else {
intel_pt_set_ip(decoder);
- intel_pt_log("Omitting PGD ip " x64_fmt "\n",
- decoder->ip);
+ decoder->state.to_ip = decoder->ip;
}
decoder->pge = false;
decoder->continuous_period = false;
+ decoder->state.type |= INTEL_PT_TRACE_END;
return 0;
case INTEL_PT_TIP_PGE:
@@ -1661,6 +1667,7 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
intel_pt_set_ip(decoder);
decoder->state.to_ip = decoder->ip;
}
+ decoder->state.type |= INTEL_PT_TRACE_BEGIN;
return 0;
case INTEL_PT_TIP:
@@ -1739,6 +1746,7 @@ next:
intel_pt_set_ip(decoder);
decoder->state.from_ip = 0;
decoder->state.to_ip = decoder->ip;
+ decoder->state.type |= INTEL_PT_TRACE_BEGIN;
return 0;
}
@@ -2077,9 +2085,13 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD;
if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
- if (decoder->ip)
- return 0;
- break;
+ if (!decoder->ip)
+ break;
+ if (decoder->packet.type == INTEL_PT_TIP_PGE)
+ decoder->state.type |= INTEL_PT_TRACE_BEGIN;
+ if (decoder->packet.type == INTEL_PT_TIP_PGD)
+ decoder->state.type |= INTEL_PT_TRACE_END;
+ return 0;
case INTEL_PT_FUP:
if (intel_pt_have_ip(decoder))
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index 51c18d67f4ca..ed088d4726ba 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -37,6 +37,8 @@ enum intel_pt_sample_type {
INTEL_PT_EX_STOP = 1 << 6,
INTEL_PT_PWR_EXIT = 1 << 7,
INTEL_PT_CBR_CHG = 1 << 8,
+ INTEL_PT_TRACE_BEGIN = 1 << 9,
+ INTEL_PT_TRACE_END = 1 << 10,
};
enum intel_pt_period_type {
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index aec68908d604..48c1d415c6b0 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -908,6 +908,11 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
ptq->insn_len = ptq->state->insn_len;
memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
}
+
+ if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
+ ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
+ if (ptq->state->type & INTEL_PT_TRACE_END)
+ ptq->flags |= PERF_IP_FLAG_TRACE_END;
}
static int intel_pt_setup_queue(struct intel_pt *pt,
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 19262f98cd4e..5b0b60f00275 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -19,7 +19,7 @@
#define CLANG_BPF_CMD_DEFAULT_TEMPLATE \
"$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\
"-DLINUX_VERSION_CODE=$LINUX_VERSION_CODE " \
- "$CLANG_OPTIONS $KERNEL_INC_OPTIONS $PERF_BPF_INC_OPTIONS " \
+ "$CLANG_OPTIONS $PERF_BPF_INC_OPTIONS $KERNEL_INC_OPTIONS " \
"-Wno-unused-value -Wno-pointer-sign " \
"-working-directory $WORKING_DIR " \
"-c \"$CLANG_SOURCE\" -target bpf $CLANG_EMIT_LLVM -O2 -o - $LLVM_OPTIONS_PIPE"
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index c4acd2001db0..111ae858cbcb 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2286,7 +2286,8 @@ static int append_inlines(struct callchain_cursor *cursor,
if (!symbol_conf.inline_name || !map || !sym)
return ret;
- addr = map__rip_2objdump(map, ip);
+ addr = map__map_ip(map, ip);
+ addr = map__rip_2objdump(map, addr);
inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
if (!inline_node) {
@@ -2312,7 +2313,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
{
struct callchain_cursor *cursor = arg;
const char *srcline = NULL;
- u64 addr;
+ u64 addr = entry->ip;
if (symbol_conf.hide_unresolved && entry->sym == NULL)
return 0;
@@ -2324,7 +2325,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
* Convert entry->ip from a virtual address to an offset in
* its corresponding binary.
*/
- addr = map__map_ip(entry->map, entry->ip);
+ if (entry->map)
+ addr = map__map_ip(entry->map, entry->ip);
srcline = callchain_srcline(entry->map, entry->sym, addr);
return callchain_cursor_append(cursor, entry->ip,
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 6a6929f208b4..354e54550d2b 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -320,12 +320,11 @@ int map__load(struct map *map)
build_id__sprintf(map->dso->build_id,
sizeof(map->dso->build_id),
sbuild_id);
- pr_warning("%s with build id %s not found",
- name, sbuild_id);
+ pr_debug("%s with build id %s not found", name, sbuild_id);
} else
- pr_warning("Failed to open %s", name);
+ pr_debug("Failed to open %s", name);
- pr_warning(", continuing without symbols\n");
+ pr_debug(", continuing without symbols\n");
return -1;
} else if (nr == 0) {
#ifdef HAVE_LIBELF_SUPPORT
@@ -334,12 +333,11 @@ int map__load(struct map *map)
if (len > sizeof(DSO__DELETED) &&
strcmp(name + real_len + 1, DSO__DELETED) == 0) {
- pr_warning("%.*s was updated (is prelink enabled?). "
+ pr_debug("%.*s was updated (is prelink enabled?). "
"Restart the long running apps that use it!\n",
(int)real_len, name);
} else {
- pr_warning("no symbols found in %s, maybe install "
- "a debug package?\n", name);
+ pr_debug("no symbols found in %s, maybe install a debug package?\n", name);
}
#endif
return -1;
@@ -712,8 +710,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
if (verbose >= 2) {
if (use_browser) {
- pr_warning("overlapping maps in %s "
- "(disable tui for more info)\n",
+ pr_debug("overlapping maps in %s (disable tui for more info)\n",
map->dso->name);
} else {
fputs("overlapping maps:\n", fp);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 215f69f41672..cdb95b3a1213 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -281,7 +281,7 @@ int perf_mmap__read_init(struct perf_mmap *map)
}
int perf_mmap__push(struct perf_mmap *md, void *to,
- int push(void *to, void *buf, size_t size))
+ int push(struct perf_mmap *map, void *to, void *buf, size_t size))
{
u64 head = perf_mmap__read_head(md);
unsigned char *data = md->base + page_size;
@@ -300,7 +300,7 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
size = md->mask + 1 - (md->start & md->mask);
md->start += size;
- if (push(to, buf, size) < 0) {
+ if (push(md, to, buf, size) < 0) {
rc = -1;
goto out;
}
@@ -310,7 +310,7 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
size = md->end - md->start;
md->start += size;
- if (push(to, buf, size) < 0) {
+ if (push(md, to, buf, size) < 0) {
rc = -1;
goto out;
}
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index 05a6d47c7956..cc5e2d6d17a9 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -4,7 +4,7 @@
#include <linux/compiler.h>
#include <linux/refcount.h>
#include <linux/types.h>
-#include <asm/barrier.h>
+#include <linux/ring_buffer.h>
#include <stdbool.h>
#include "auxtrace.h"
#include "event.h"
@@ -71,21 +71,12 @@ void perf_mmap__consume(struct perf_mmap *map);
static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
{
- struct perf_event_mmap_page *pc = mm->base;
- u64 head = READ_ONCE(pc->data_head);
- rmb();
- return head;
+ return ring_buffer_read_head(mm->base);
}
static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
{
- struct perf_event_mmap_page *pc = md->base;
-
- /*
- * ensure all reads are done before we write the tail out.
- */
- mb();
- pc->data_tail = tail;
+ ring_buffer_write_tail(md->base, tail);
}
union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
@@ -93,7 +84,7 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
union perf_event *perf_mmap__read_event(struct perf_mmap *map);
int perf_mmap__push(struct perf_mmap *md, void *to,
- int push(void *to, void *buf, size_t size));
+ int push(struct perf_mmap *map, void *to, void *buf, size_t size));
size_t perf_mmap__mmap_len(struct perf_mmap *map);
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index bad9e0296e9a..1904e7f6ec84 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -80,14 +80,20 @@ static union perf_event *dup_event(struct ordered_events *oe,
return oe->copy_on_queue ? __dup_event(oe, event) : event;
}
-static void free_dup_event(struct ordered_events *oe, union perf_event *event)
+static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
{
- if (event && oe->copy_on_queue) {
+ if (event) {
oe->cur_alloc_size -= event->header.size;
free(event);
}
}
+static void free_dup_event(struct ordered_events *oe, union perf_event *event)
+{
+ if (oe->copy_on_queue)
+ __free_dup_event(oe, event);
+}
+
#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
static struct ordered_event *alloc_event(struct ordered_events *oe,
union perf_event *event)
@@ -95,21 +101,49 @@ static struct ordered_event *alloc_event(struct ordered_events *oe,
struct list_head *cache = &oe->cache;
struct ordered_event *new = NULL;
union perf_event *new_event;
+ size_t size;
new_event = dup_event(oe, event);
if (!new_event)
return NULL;
+ /*
+ * We maintain the following scheme of buffers for ordered
+ * event allocation:
+ *
+ * to_free list -> buffer1 (64K)
+ * buffer2 (64K)
+ * ...
+ *
+ * Each buffer keeps an array of ordered events objects:
+ * buffer -> event[0]
+ * event[1]
+ * ...
+ *
+ * Each allocated ordered event is linked to one of
+ * following lists:
+ * - time ordered list 'events'
+ * - list of currently removed events 'cache'
+ *
+ * Allocation of the ordered event uses the following order
+ * to get the memory:
+ * - use recently removed object from 'cache' list
+ * - use available object in current allocation buffer
+ * - allocate new buffer if the current buffer is full
+ *
+ * Removal of ordered event object moves it from events to
+ * the cache list.
+ */
+ size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
+
if (!list_empty(cache)) {
new = list_entry(cache->next, struct ordered_event, list);
list_del(&new->list);
} else if (oe->buffer) {
- new = oe->buffer + oe->buffer_idx;
+ new = &oe->buffer->event[oe->buffer_idx];
if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
oe->buffer = NULL;
- } else if (oe->cur_alloc_size < oe->max_alloc_size) {
- size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
-
+ } else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
oe->buffer = malloc(size);
if (!oe->buffer) {
free_dup_event(oe, new_event);
@@ -122,11 +156,11 @@ static struct ordered_event *alloc_event(struct ordered_events *oe,
oe->cur_alloc_size += size;
list_add(&oe->buffer->list, &oe->to_free);
- /* First entry is abused to maintain the to_free list. */
- oe->buffer_idx = 2;
- new = oe->buffer + 1;
+ oe->buffer_idx = 1;
+ new = &oe->buffer->event[0];
} else {
pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
+ return NULL;
}
new->event = new_event;
@@ -300,15 +334,38 @@ void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t d
oe->deliver = deliver;
}
+static void
+ordered_events_buffer__free(struct ordered_events_buffer *buffer,
+ unsigned int max, struct ordered_events *oe)
+{
+ if (oe->copy_on_queue) {
+ unsigned int i;
+
+ for (i = 0; i < max; i++)
+ __free_dup_event(oe, buffer->event[i].event);
+ }
+
+ free(buffer);
+}
+
void ordered_events__free(struct ordered_events *oe)
{
- while (!list_empty(&oe->to_free)) {
- struct ordered_event *event;
+ struct ordered_events_buffer *buffer, *tmp;
- event = list_entry(oe->to_free.next, struct ordered_event, list);
- list_del(&event->list);
- free_dup_event(oe, event->event);
- free(event);
+ if (list_empty(&oe->to_free))
+ return;
+
+ /*
+ * Current buffer might not have all the events allocated
+ * yet, we need to free only allocated ones ...
+ */
+ list_del(&oe->buffer->list);
+ ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
+
+ /* ... and continue with the rest */
+ list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
+ list_del(&buffer->list);
+ ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
}
}
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
index 8c7a2948593e..1338d5c345dc 100644
--- a/tools/perf/util/ordered-events.h
+++ b/tools/perf/util/ordered-events.h
@@ -25,23 +25,28 @@ struct ordered_events;
typedef int (*ordered_events__deliver_t)(struct ordered_events *oe,
struct ordered_event *event);
+struct ordered_events_buffer {
+ struct list_head list;
+ struct ordered_event event[0];
+};
+
struct ordered_events {
- u64 last_flush;
- u64 next_flush;
- u64 max_timestamp;
- u64 max_alloc_size;
- u64 cur_alloc_size;
- struct list_head events;
- struct list_head cache;
- struct list_head to_free;
- struct ordered_event *buffer;
- struct ordered_event *last;
- ordered_events__deliver_t deliver;
- int buffer_idx;
- unsigned int nr_events;
- enum oe_flush last_flush_type;
- u32 nr_unordered_events;
- bool copy_on_queue;
+ u64 last_flush;
+ u64 next_flush;
+ u64 max_timestamp;
+ u64 max_alloc_size;
+ u64 cur_alloc_size;
+ struct list_head events;
+ struct list_head cache;
+ struct list_head to_free;
+ struct ordered_events_buffer *buffer;
+ struct ordered_event *last;
+ ordered_events__deliver_t deliver;
+ int buffer_idx;
+ unsigned int nr_events;
+ enum oe_flush last_flush_type;
+ u32 nr_unordered_events;
+ bool copy_on_queue;
};
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index afd68524ffa9..7799788f662f 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -930,13 +930,14 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
static __u64 pmu_format_max_value(const unsigned long *format)
{
- __u64 w = 0;
- int fbit;
-
- for_each_set_bit(fbit, format, PERF_PMU_FORMAT_BITS)
- w |= (1ULL << fbit);
+ int w;
- return w;
+ w = bitmap_weight(format, PERF_PMU_FORMAT_BITS);
+ if (!w)
+ return 0;
+ if (w < 64)
+ return (1ULL << w) - 1;
+ return -1;
}
/*
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index ce501ba14b08..50150dfc0cdf 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -340,7 +340,7 @@ static bool is_tracepoint(struct pyrf_event *pevent)
}
static PyObject*
-tracepoint_field(struct pyrf_event *pe, struct format_field *field)
+tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
{
struct tep_handle *pevent = field->event->pevent;
void *data = pe->sample.raw_data;
@@ -348,28 +348,28 @@ tracepoint_field(struct pyrf_event *pe, struct format_field *field)
unsigned long long val;
unsigned int offset, len;
- if (field->flags & FIELD_IS_ARRAY) {
+ if (field->flags & TEP_FIELD_IS_ARRAY) {
offset = field->offset;
len = field->size;
- if (field->flags & FIELD_IS_DYNAMIC) {
+ if (field->flags & TEP_FIELD_IS_DYNAMIC) {
val = tep_read_number(pevent, data + offset, len);
offset = val;
len = offset >> 16;
offset &= 0xffff;
}
- if (field->flags & FIELD_IS_STRING &&
+ if (field->flags & TEP_FIELD_IS_STRING &&
is_printable_array(data + offset, len)) {
ret = _PyUnicode_FromString((char *)data + offset);
} else {
ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
- field->flags &= ~FIELD_IS_STRING;
+ field->flags &= ~TEP_FIELD_IS_STRING;
}
} else {
val = tep_read_number(pevent, data + field->offset,
field->size);
- if (field->flags & FIELD_IS_POINTER)
+ if (field->flags & TEP_FIELD_IS_POINTER)
ret = PyLong_FromUnsignedLong((unsigned long) val);
- else if (field->flags & FIELD_IS_SIGNED)
+ else if (field->flags & TEP_FIELD_IS_SIGNED)
ret = PyLong_FromLong((long) val);
else
ret = PyLong_FromUnsignedLong((unsigned long) val);
@@ -383,10 +383,10 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
{
const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
struct perf_evsel *evsel = pevent->evsel;
- struct format_field *field;
+ struct tep_format_field *field;
if (!evsel->tp_format) {
- struct event_format *tp_format;
+ struct tep_event_format *tp_format;
tp_format = trace_event__tp_format_id(evsel->attr.config);
if (!tp_format)
@@ -1240,7 +1240,7 @@ static struct {
static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
PyObject *args, PyObject *kwargs)
{
- struct event_format *tp_format;
+ struct tep_event_format *tp_format;
static char *kwlist[] = { "sys", "name", NULL };
char *sys = NULL;
char *name = NULL;
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index d2c78ffd9fee..a2eeebbfb25f 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -147,6 +147,9 @@
#include <linux/bitops.h>
#include <linux/log2.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
#include "cpumap.h"
#include "color.h"
#include "evsel.h"
@@ -159,6 +162,7 @@
#include "auxtrace.h"
#include "s390-cpumsf.h"
#include "s390-cpumsf-kernel.h"
+#include "config.h"
struct s390_cpumsf {
struct auxtrace auxtrace;
@@ -170,6 +174,8 @@ struct s390_cpumsf {
u32 pmu_type;
u16 machine_type;
bool data_queued;
+ bool use_logfile;
+ char *logdir;
};
struct s390_cpumsf_queue {
@@ -177,6 +183,7 @@ struct s390_cpumsf_queue {
unsigned int queue_nr;
struct auxtrace_buffer *buffer;
int cpu;
+ FILE *logfile;
};
/* Display s390 CPU measurement facility basic-sampling data entry */
@@ -595,6 +602,12 @@ static int s390_cpumsf_run_decoder(struct s390_cpumsf_queue *sfq,
buffer->use_size = buffer->size;
buffer->use_data = buffer->data;
}
+ if (sfq->logfile) { /* Write into log file */
+ size_t rc = fwrite(buffer->data, buffer->size, 1,
+ sfq->logfile);
+ if (rc != 1)
+ pr_err("Failed to write auxiliary data\n");
+ }
} else
buffer = sfq->buffer;
@@ -606,6 +619,13 @@ static int s390_cpumsf_run_decoder(struct s390_cpumsf_queue *sfq,
return -ENOMEM;
buffer->use_size = buffer->size;
buffer->use_data = buffer->data;
+
+ if (sfq->logfile) { /* Write into log file */
+ size_t rc = fwrite(buffer->data, buffer->size, 1,
+ sfq->logfile);
+ if (rc != 1)
+ pr_err("Failed to write auxiliary data\n");
+ }
}
pr_debug4("%s queue_nr:%d buffer:%" PRId64 " offset:%#" PRIx64 " size:%#zx rest:%#zx\n",
__func__, sfq->queue_nr, buffer->buffer_nr, buffer->offset,
@@ -640,6 +660,23 @@ s390_cpumsf_alloc_queue(struct s390_cpumsf *sf, unsigned int queue_nr)
sfq->sf = sf;
sfq->queue_nr = queue_nr;
sfq->cpu = -1;
+ if (sf->use_logfile) {
+ char *name;
+ int rc;
+
+ rc = (sf->logdir)
+ ? asprintf(&name, "%s/aux.smp.%02x",
+ sf->logdir, queue_nr)
+ : asprintf(&name, "aux.smp.%02x", queue_nr);
+ if (rc > 0)
+ sfq->logfile = fopen(name, "w");
+ if (sfq->logfile == NULL) {
+ pr_err("Failed to open auxiliary log file %s,"
+ "continue...\n", name);
+ sf->use_logfile = false;
+ }
+ free(name);
+ }
return sfq;
}
@@ -850,8 +887,16 @@ static void s390_cpumsf_free_queues(struct perf_session *session)
struct auxtrace_queues *queues = &sf->queues;
unsigned int i;
- for (i = 0; i < queues->nr_queues; i++)
+ for (i = 0; i < queues->nr_queues; i++) {
+ struct s390_cpumsf_queue *sfq = (struct s390_cpumsf_queue *)
+ queues->queue_array[i].priv;
+
+ if (sfq != NULL && sfq->logfile) {
+ fclose(sfq->logfile);
+ sfq->logfile = NULL;
+ }
zfree(&queues->queue_array[i].priv);
+ }
auxtrace_queues__free(queues);
}
@@ -864,6 +909,7 @@ static void s390_cpumsf_free(struct perf_session *session)
auxtrace_heap__free(&sf->heap);
s390_cpumsf_free_queues(session);
session->auxtrace = NULL;
+ free(sf->logdir);
free(sf);
}
@@ -877,17 +923,55 @@ static int s390_cpumsf_get_type(const char *cpuid)
/* Check itrace options set on perf report command.
* Return true, if none are set or all options specified can be
- * handled on s390.
+ * handled on s390 (currently only option 'd' for logging.
* Return false otherwise.
*/
static bool check_auxtrace_itrace(struct itrace_synth_opts *itops)
{
+ bool ison = false;
+
if (!itops || !itops->set)
return true;
- pr_err("No --itrace options supported\n");
+ ison = itops->inject || itops->instructions || itops->branches ||
+ itops->transactions || itops->ptwrites ||
+ itops->pwr_events || itops->errors ||
+ itops->dont_decode || itops->calls || itops->returns ||
+ itops->callchain || itops->thread_stack ||
+ itops->last_branch;
+ if (!ison)
+ return true;
+ pr_err("Unsupported --itrace options specified\n");
return false;
}
+/* Check for AUXTRACE dump directory if it is needed.
+ * On failure print an error message but continue.
+ * Return 0 on wrong keyword in config file and 1 otherwise.
+ */
+static int s390_cpumsf__config(const char *var, const char *value, void *cb)
+{
+ struct s390_cpumsf *sf = cb;
+ struct stat stbuf;
+ int rc;
+
+ if (strcmp(var, "auxtrace.dumpdir"))
+ return 0;
+ sf->logdir = strdup(value);
+ if (sf->logdir == NULL) {
+ pr_err("Failed to find auxtrace log directory %s,"
+ " continue with current directory...\n", value);
+ return 1;
+ }
+ rc = stat(sf->logdir, &stbuf);
+ if (rc == -1 || !S_ISDIR(stbuf.st_mode)) {
+ pr_err("Missing auxtrace log directory %s,"
+ " continue with current directory...\n", value);
+ free(sf->logdir);
+ sf->logdir = NULL;
+ }
+ return 1;
+}
+
int s390_cpumsf_process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
@@ -906,6 +990,9 @@ int s390_cpumsf_process_auxtrace_info(union perf_event *event,
err = -EINVAL;
goto err_free;
}
+ sf->use_logfile = session->itrace_synth_opts->log;
+ if (sf->use_logfile)
+ perf_config(s390_cpumsf__config, sf);
err = auxtrace_queues__init(&sf->queues);
if (err)
@@ -940,6 +1027,7 @@ err_free_queues:
auxtrace_queues__free(&sf->queues);
session->auxtrace = NULL;
err_free:
+ free(sf->logdir);
free(sf);
return err;
}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 45484f0f7292..89cb887648f9 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -99,7 +99,7 @@ static void define_symbolic_value(const char *ev_name,
LEAVE;
}
-static void define_symbolic_values(struct print_flag_sym *field,
+static void define_symbolic_values(struct tep_print_flag_sym *field,
const char *ev_name,
const char *field_name)
{
@@ -157,7 +157,7 @@ static void define_flag_value(const char *ev_name,
LEAVE;
}
-static void define_flag_values(struct print_flag_sym *field,
+static void define_flag_values(struct tep_print_flag_sym *field,
const char *ev_name,
const char *field_name)
{
@@ -189,62 +189,62 @@ static void define_flag_field(const char *ev_name,
LEAVE;
}
-static void define_event_symbols(struct event_format *event,
+static void define_event_symbols(struct tep_event_format *event,
const char *ev_name,
- struct print_arg *args)
+ struct tep_print_arg *args)
{
if (args == NULL)
return;
switch (args->type) {
- case PRINT_NULL:
+ case TEP_PRINT_NULL:
break;
- case PRINT_ATOM:
+ case TEP_PRINT_ATOM:
define_flag_value(ev_name, cur_field_name, "0",
args->atom.atom);
zero_flag_atom = 0;
break;
- case PRINT_FIELD:
+ case TEP_PRINT_FIELD:
free(cur_field_name);
cur_field_name = strdup(args->field.name);
break;
- case PRINT_FLAGS:
+ case TEP_PRINT_FLAGS:
define_event_symbols(event, ev_name, args->flags.field);
define_flag_field(ev_name, cur_field_name, args->flags.delim);
define_flag_values(args->flags.flags, ev_name, cur_field_name);
break;
- case PRINT_SYMBOL:
+ case TEP_PRINT_SYMBOL:
define_event_symbols(event, ev_name, args->symbol.field);
define_symbolic_field(ev_name, cur_field_name);
define_symbolic_values(args->symbol.symbols, ev_name,
cur_field_name);
break;
- case PRINT_HEX:
- case PRINT_HEX_STR:
+ case TEP_PRINT_HEX:
+ case TEP_PRINT_HEX_STR:
define_event_symbols(event, ev_name, args->hex.field);
define_event_symbols(event, ev_name, args->hex.size);
break;
- case PRINT_INT_ARRAY:
+ case TEP_PRINT_INT_ARRAY:
define_event_symbols(event, ev_name, args->int_array.field);
define_event_symbols(event, ev_name, args->int_array.count);
define_event_symbols(event, ev_name, args->int_array.el_size);
break;
- case PRINT_BSTRING:
- case PRINT_DYNAMIC_ARRAY:
- case PRINT_DYNAMIC_ARRAY_LEN:
- case PRINT_STRING:
- case PRINT_BITMASK:
+ case TEP_PRINT_BSTRING:
+ case TEP_PRINT_DYNAMIC_ARRAY:
+ case TEP_PRINT_DYNAMIC_ARRAY_LEN:
+ case TEP_PRINT_STRING:
+ case TEP_PRINT_BITMASK:
break;
- case PRINT_TYPE:
+ case TEP_PRINT_TYPE:
define_event_symbols(event, ev_name, args->typecast.item);
break;
- case PRINT_OP:
+ case TEP_PRINT_OP:
if (strcmp(args->op.op, ":") == 0)
zero_flag_atom = 1;
define_event_symbols(event, ev_name, args->op.left);
define_event_symbols(event, ev_name, args->op.right);
break;
- case PRINT_FUNC:
+ case TEP_PRINT_FUNC:
default:
pr_err("Unsupported print arg type\n");
/* we should warn... */
@@ -338,8 +338,8 @@ static void perl_process_tracepoint(struct perf_sample *sample,
struct addr_location *al)
{
struct thread *thread = al->thread;
- struct event_format *event = evsel->tp_format;
- struct format_field *field;
+ struct tep_event_format *event = evsel->tp_format;
+ struct tep_format_field *field;
static char handler[256];
unsigned long long val;
unsigned long s, ns;
@@ -388,9 +388,9 @@ static void perl_process_tracepoint(struct perf_sample *sample,
/* common fields other than pid can be accessed via xsub fns */
for (field = event->format.fields; field; field = field->next) {
- if (field->flags & FIELD_IS_STRING) {
+ if (field->flags & TEP_FIELD_IS_STRING) {
int offset;
- if (field->flags & FIELD_IS_DYNAMIC) {
+ if (field->flags & TEP_FIELD_IS_DYNAMIC) {
offset = *(int *)(data + field->offset);
offset &= 0xffff;
} else
@@ -399,7 +399,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
} else { /* FIELD_IS_NUMERIC */
val = read_size(event, data + field->offset,
field->size);
- if (field->flags & FIELD_IS_SIGNED) {
+ if (field->flags & TEP_FIELD_IS_SIGNED) {
XPUSHs(sv_2mortal(newSViv(val)));
} else {
XPUSHs(sv_2mortal(newSVuv(val)));
@@ -537,8 +537,8 @@ static int perl_stop_script(void)
static int perl_generate_script(struct tep_handle *pevent, const char *outfile)
{
- struct event_format *event = NULL;
- struct format_field *f;
+ struct tep_event_format *event = NULL;
+ struct tep_format_field *f;
char fname[PATH_MAX];
int not_first, count;
FILE *ofp;
@@ -646,11 +646,11 @@ sub print_backtrace\n\
count++;
fprintf(ofp, "%s=", f->name);
- if (f->flags & FIELD_IS_STRING ||
- f->flags & FIELD_IS_FLAG ||
- f->flags & FIELD_IS_SYMBOLIC)
+ if (f->flags & TEP_FIELD_IS_STRING ||
+ f->flags & TEP_FIELD_IS_FLAG ||
+ f->flags & TEP_FIELD_IS_SYMBOLIC)
fprintf(ofp, "%%s");
- else if (f->flags & FIELD_IS_SIGNED)
+ else if (f->flags & TEP_FIELD_IS_SIGNED)
fprintf(ofp, "%%d");
else
fprintf(ofp, "%%u");
@@ -668,7 +668,7 @@ sub print_backtrace\n\
if (++count % 5 == 0)
fprintf(ofp, "\n\t ");
- if (f->flags & FIELD_IS_FLAG) {
+ if (f->flags & TEP_FIELD_IS_FLAG) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t ");
count = 4;
@@ -678,7 +678,7 @@ sub print_backtrace\n\
event->name);
fprintf(ofp, "\"%s\", $%s)", f->name,
f->name);
- } else if (f->flags & FIELD_IS_SYMBOLIC) {
+ } else if (f->flags & TEP_FIELD_IS_SYMBOLIC) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t ");
count = 4;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index dfc6093f118c..69aa93d4ee99 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -193,7 +193,7 @@ static void try_call_object(const char *handler_name, PyObject *args)
call_object(handler, args, handler_name);
}
-static void define_value(enum print_arg_type field_type,
+static void define_value(enum tep_print_arg_type field_type,
const char *ev_name,
const char *field_name,
const char *field_value,
@@ -204,7 +204,7 @@ static void define_value(enum print_arg_type field_type,
unsigned long long value;
unsigned n = 0;
- if (field_type == PRINT_SYMBOL)
+ if (field_type == TEP_PRINT_SYMBOL)
handler_name = "define_symbolic_value";
t = PyTuple_New(4);
@@ -223,8 +223,8 @@ static void define_value(enum print_arg_type field_type,
Py_DECREF(t);
}
-static void define_values(enum print_arg_type field_type,
- struct print_flag_sym *field,
+static void define_values(enum tep_print_arg_type field_type,
+ struct tep_print_flag_sym *field,
const char *ev_name,
const char *field_name)
{
@@ -235,7 +235,7 @@ static void define_values(enum print_arg_type field_type,
define_values(field_type, field->next, ev_name, field_name);
}
-static void define_field(enum print_arg_type field_type,
+static void define_field(enum tep_print_arg_type field_type,
const char *ev_name,
const char *field_name,
const char *delim)
@@ -244,10 +244,10 @@ static void define_field(enum print_arg_type field_type,
PyObject *t;
unsigned n = 0;
- if (field_type == PRINT_SYMBOL)
+ if (field_type == TEP_PRINT_SYMBOL)
handler_name = "define_symbolic_field";
- if (field_type == PRINT_FLAGS)
+ if (field_type == TEP_PRINT_FLAGS)
t = PyTuple_New(3);
else
t = PyTuple_New(2);
@@ -256,7 +256,7 @@ static void define_field(enum print_arg_type field_type,
PyTuple_SetItem(t, n++, _PyUnicode_FromString(ev_name));
PyTuple_SetItem(t, n++, _PyUnicode_FromString(field_name));
- if (field_type == PRINT_FLAGS)
+ if (field_type == TEP_PRINT_FLAGS)
PyTuple_SetItem(t, n++, _PyUnicode_FromString(delim));
try_call_object(handler_name, t);
@@ -264,54 +264,54 @@ static void define_field(enum print_arg_type field_type,
Py_DECREF(t);
}
-static void define_event_symbols(struct event_format *event,
+static void define_event_symbols(struct tep_event_format *event,
const char *ev_name,
- struct print_arg *args)
+ struct tep_print_arg *args)
{
if (args == NULL)
return;
switch (args->type) {
- case PRINT_NULL:
+ case TEP_PRINT_NULL:
break;
- case PRINT_ATOM:
- define_value(PRINT_FLAGS, ev_name, cur_field_name, "0",
+ case TEP_PRINT_ATOM:
+ define_value(TEP_PRINT_FLAGS, ev_name, cur_field_name, "0",
args->atom.atom);
zero_flag_atom = 0;
break;
- case PRINT_FIELD:
+ case TEP_PRINT_FIELD:
free(cur_field_name);
cur_field_name = strdup(args->field.name);
break;
- case PRINT_FLAGS:
+ case TEP_PRINT_FLAGS:
define_event_symbols(event, ev_name, args->flags.field);
- define_field(PRINT_FLAGS, ev_name, cur_field_name,
+ define_field(TEP_PRINT_FLAGS, ev_name, cur_field_name,
args->flags.delim);
- define_values(PRINT_FLAGS, args->flags.flags, ev_name,
+ define_values(TEP_PRINT_FLAGS, args->flags.flags, ev_name,
cur_field_name);
break;
- case PRINT_SYMBOL:
+ case TEP_PRINT_SYMBOL:
define_event_symbols(event, ev_name, args->symbol.field);
- define_field(PRINT_SYMBOL, ev_name, cur_field_name, NULL);
- define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name,
+ define_field(TEP_PRINT_SYMBOL, ev_name, cur_field_name, NULL);
+ define_values(TEP_PRINT_SYMBOL, args->symbol.symbols, ev_name,
cur_field_name);
break;
- case PRINT_HEX:
- case PRINT_HEX_STR:
+ case TEP_PRINT_HEX:
+ case TEP_PRINT_HEX_STR:
define_event_symbols(event, ev_name, args->hex.field);
define_event_symbols(event, ev_name, args->hex.size);
break;
- case PRINT_INT_ARRAY:
+ case TEP_PRINT_INT_ARRAY:
define_event_symbols(event, ev_name, args->int_array.field);
define_event_symbols(event, ev_name, args->int_array.count);
define_event_symbols(event, ev_name, args->int_array.el_size);
break;
- case PRINT_STRING:
+ case TEP_PRINT_STRING:
break;
- case PRINT_TYPE:
+ case TEP_PRINT_TYPE:
define_event_symbols(event, ev_name, args->typecast.item);
break;
- case PRINT_OP:
+ case TEP_PRINT_OP:
if (strcmp(args->op.op, ":") == 0)
zero_flag_atom = 1;
define_event_symbols(event, ev_name, args->op.left);
@@ -319,11 +319,11 @@ static void define_event_symbols(struct event_format *event,
break;
default:
/* gcc warns for these? */
- case PRINT_BSTRING:
- case PRINT_DYNAMIC_ARRAY:
- case PRINT_DYNAMIC_ARRAY_LEN:
- case PRINT_FUNC:
- case PRINT_BITMASK:
+ case TEP_PRINT_BSTRING:
+ case TEP_PRINT_DYNAMIC_ARRAY:
+ case TEP_PRINT_DYNAMIC_ARRAY_LEN:
+ case TEP_PRINT_FUNC:
+ case TEP_PRINT_BITMASK:
/* we should warn... */
return;
}
@@ -332,10 +332,10 @@ static void define_event_symbols(struct event_format *event,
define_event_symbols(event, ev_name, args->next);
}
-static PyObject *get_field_numeric_entry(struct event_format *event,
- struct format_field *field, void *data)
+static PyObject *get_field_numeric_entry(struct tep_event_format *event,
+ struct tep_format_field *field, void *data)
{
- bool is_array = field->flags & FIELD_IS_ARRAY;
+ bool is_array = field->flags & TEP_FIELD_IS_ARRAY;
PyObject *obj = NULL, *list = NULL;
unsigned long long val;
unsigned int item_size, n_items, i;
@@ -353,7 +353,7 @@ static PyObject *get_field_numeric_entry(struct event_format *event,
val = read_size(event, data + field->offset + i * item_size,
item_size);
- if (field->flags & FIELD_IS_SIGNED) {
+ if (field->flags & TEP_FIELD_IS_SIGNED) {
if ((long long)val >= LONG_MIN &&
(long long)val <= LONG_MAX)
obj = _PyLong_FromLong(val);
@@ -790,11 +790,11 @@ static void python_process_tracepoint(struct perf_sample *sample,
struct perf_evsel *evsel,
struct addr_location *al)
{
- struct event_format *event = evsel->tp_format;
+ struct tep_event_format *event = evsel->tp_format;
PyObject *handler, *context, *t, *obj = NULL, *callchain;
PyObject *dict = NULL, *all_entries_dict = NULL;
static char handler_name[256];
- struct format_field *field;
+ struct tep_format_field *field;
unsigned long s, ns;
unsigned n = 0;
int pid;
@@ -867,22 +867,22 @@ static void python_process_tracepoint(struct perf_sample *sample,
unsigned int offset, len;
unsigned long long val;
- if (field->flags & FIELD_IS_ARRAY) {
+ if (field->flags & TEP_FIELD_IS_ARRAY) {
offset = field->offset;
len = field->size;
- if (field->flags & FIELD_IS_DYNAMIC) {
+ if (field->flags & TEP_FIELD_IS_DYNAMIC) {
val = tep_read_number(scripting_context->pevent,
data + offset, len);
offset = val;
len = offset >> 16;
offset &= 0xffff;
}
- if (field->flags & FIELD_IS_STRING &&
+ if (field->flags & TEP_FIELD_IS_STRING &&
is_printable_array(data + offset, len)) {
obj = _PyUnicode_FromString((char *) data + offset);
} else {
obj = PyByteArray_FromStringAndSize((const char *) data + offset, len);
- field->flags &= ~FIELD_IS_STRING;
+ field->flags &= ~TEP_FIELD_IS_STRING;
}
} else { /* FIELD_IS_NUMERIC */
obj = get_field_numeric_entry(event, field, data);
@@ -1590,8 +1590,8 @@ static int python_stop_script(void)
static int python_generate_script(struct tep_handle *pevent, const char *outfile)
{
- struct event_format *event = NULL;
- struct format_field *f;
+ struct tep_event_format *event = NULL;
+ struct tep_format_field *f;
char fname[PATH_MAX];
int not_first, count;
FILE *ofp;
@@ -1686,12 +1686,12 @@ static int python_generate_script(struct tep_handle *pevent, const char *outfile
count++;
fprintf(ofp, "%s=", f->name);
- if (f->flags & FIELD_IS_STRING ||
- f->flags & FIELD_IS_FLAG ||
- f->flags & FIELD_IS_ARRAY ||
- f->flags & FIELD_IS_SYMBOLIC)
+ if (f->flags & TEP_FIELD_IS_STRING ||
+ f->flags & TEP_FIELD_IS_FLAG ||
+ f->flags & TEP_FIELD_IS_ARRAY ||
+ f->flags & TEP_FIELD_IS_SYMBOLIC)
fprintf(ofp, "%%s");
- else if (f->flags & FIELD_IS_SIGNED)
+ else if (f->flags & TEP_FIELD_IS_SIGNED)
fprintf(ofp, "%%d");
else
fprintf(ofp, "%%u");
@@ -1709,7 +1709,7 @@ static int python_generate_script(struct tep_handle *pevent, const char *outfile
if (++count % 5 == 0)
fprintf(ofp, "\n\t\t");
- if (f->flags & FIELD_IS_FLAG) {
+ if (f->flags & TEP_FIELD_IS_FLAG) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t\t");
count = 4;
@@ -1719,7 +1719,7 @@ static int python_generate_script(struct tep_handle *pevent, const char *outfile
event->name);
fprintf(ofp, "\"%s\", %s)", f->name,
f->name);
- } else if (f->flags & FIELD_IS_SYMBOLIC) {
+ } else if (f->flags & TEP_FIELD_IS_SYMBOLIC) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t\t");
count = 4;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 8b9369303561..7d2c8ce6cfad 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -199,12 +199,10 @@ void perf_session__delete(struct perf_session *session)
free(session);
}
-static int process_event_synth_tracing_data_stub(struct perf_tool *tool
+static int process_event_synth_tracing_data_stub(struct perf_session *session
__maybe_unused,
union perf_event *event
- __maybe_unused,
- struct perf_session *session
- __maybe_unused)
+ __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
@@ -277,10 +275,8 @@ static int skipn(int fd, off_t n)
return 0;
}
-static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_session *session
- __maybe_unused)
+static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
+ union perf_event *event)
{
dump_printf(": unhandled!\n");
if (perf_data__is_pipe(session->data))
@@ -288,9 +284,8 @@ static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
return event->auxtrace.size;
}
-static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *session __maybe_unused)
+static int process_event_op2_stub(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
@@ -298,9 +293,8 @@ static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
static
-int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *session __maybe_unused)
+int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_thread_map(event, stdout);
@@ -310,9 +304,8 @@ int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
}
static
-int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *session __maybe_unused)
+int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_cpu_map(event, stdout);
@@ -322,9 +315,8 @@ int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
}
static
-int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *session __maybe_unused)
+int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
{
if (dump_trace)
perf_event__fprintf_stat_config(event, stdout);
@@ -333,10 +325,8 @@ int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int process_stat_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *perf_session
- __maybe_unused)
+static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
+ union perf_event *event)
{
if (dump_trace)
perf_event__fprintf_stat(event, stdout);
@@ -345,10 +335,8 @@ static int process_stat_stub(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int process_stat_round_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *perf_session
- __maybe_unused)
+static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
+ union perf_event *event)
{
if (dump_trace)
perf_event__fprintf_stat_round(event, stdout);
@@ -1374,37 +1362,37 @@ static s64 perf_session__process_user_event(struct perf_session *session,
case PERF_RECORD_HEADER_TRACING_DATA:
/* setup for reading amidst mmap */
lseek(fd, file_offset, SEEK_SET);
- return tool->tracing_data(tool, event, session);
+ return tool->tracing_data(session, event);
case PERF_RECORD_HEADER_BUILD_ID:
- return tool->build_id(tool, event, session);
+ return tool->build_id(session, event);
case PERF_RECORD_FINISHED_ROUND:
return tool->finished_round(tool, event, oe);
case PERF_RECORD_ID_INDEX:
- return tool->id_index(tool, event, session);
+ return tool->id_index(session, event);
case PERF_RECORD_AUXTRACE_INFO:
- return tool->auxtrace_info(tool, event, session);
+ return tool->auxtrace_info(session, event);
case PERF_RECORD_AUXTRACE:
/* setup for reading amidst mmap */
lseek(fd, file_offset + event->header.size, SEEK_SET);
- return tool->auxtrace(tool, event, session);
+ return tool->auxtrace(session, event);
case PERF_RECORD_AUXTRACE_ERROR:
perf_session__auxtrace_error_inc(session, event);
- return tool->auxtrace_error(tool, event, session);
+ return tool->auxtrace_error(session, event);
case PERF_RECORD_THREAD_MAP:
- return tool->thread_map(tool, event, session);
+ return tool->thread_map(session, event);
case PERF_RECORD_CPU_MAP:
- return tool->cpu_map(tool, event, session);
+ return tool->cpu_map(session, event);
case PERF_RECORD_STAT_CONFIG:
- return tool->stat_config(tool, event, session);
+ return tool->stat_config(session, event);
case PERF_RECORD_STAT:
- return tool->stat(tool, event, session);
+ return tool->stat(session, event);
case PERF_RECORD_STAT_ROUND:
- return tool->stat_round(tool, event, session);
+ return tool->stat_round(session, event);
case PERF_RECORD_TIME_CONV:
session->time_conv = event->time_conv;
- return tool->time_conv(tool, event, session);
+ return tool->time_conv(session, event);
case PERF_RECORD_HEADER_FEATURE:
- return tool->feature(tool, event, session);
+ return tool->feature(session, event);
default:
return -EINVAL;
}
@@ -2133,9 +2121,8 @@ out:
return err;
}
-int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_session *session)
+int perf_event__process_id_index(struct perf_session *session,
+ union perf_event *event)
{
struct perf_evlist *evlist = session->evlist;
struct id_index_event *ie = &event->id_index;
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index da40b4b380ca..d96eccd7d27f 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -120,9 +120,8 @@ int perf_session__deliver_synth_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample);
-int perf_event__process_id_index(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session);
+int perf_event__process_id_index(struct perf_session *session,
+ union perf_event *event);
int perf_event__synthesize_id_index(struct perf_tool *tool,
perf_event__handler_t process,
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 97efbcad076e..63f758c655d5 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -5,16 +5,18 @@ from subprocess import Popen, PIPE
from re import sub
def clang_has_option(option):
- return [o for o in Popen(['clang', option], stderr=PIPE).stderr.readlines() if "unknown argument" in o] == [ ]
+ return [o for o in Popen(['clang', option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ]
cc = getenv("CC")
if cc == "clang":
- from _sysconfigdata import build_time_vars
- build_time_vars["CFLAGS"] = sub("-specs=[^ ]+", "", build_time_vars["CFLAGS"])
- if not clang_has_option("-mcet"):
- build_time_vars["CFLAGS"] = sub("-mcet", "", build_time_vars["CFLAGS"])
- if not clang_has_option("-fcf-protection"):
- build_time_vars["CFLAGS"] = sub("-fcf-protection", "", build_time_vars["CFLAGS"])
+ from distutils.sysconfig import get_config_vars
+ vars = get_config_vars()
+ for var in ('CFLAGS', 'OPT'):
+ vars[var] = sub("-specs=[^ ]+", "", vars[var])
+ if not clang_has_option("-mcet"):
+ vars[var] = sub("-mcet", "", vars[var])
+ if not clang_has_option("-fcf-protection"):
+ vars[var] = sub("-fcf-protection", "", vars[var])
from distutils.core import setup, Extension
@@ -35,7 +37,7 @@ class install_lib(_install_lib):
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
-cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
+cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ]
if cc != "clang":
cflags += ['-Wno-cast-function-type' ]
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index b284276ec963..f96c005b3c41 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1884,7 +1884,7 @@ static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
struct hpp_dynamic_entry {
struct perf_hpp_fmt hpp;
struct perf_evsel *evsel;
- struct format_field *field;
+ struct tep_format_field *field;
unsigned dynamic_len;
bool raw_trace;
};
@@ -1899,7 +1899,7 @@ static int hde_width(struct hpp_dynamic_entry *hde)
if (namelen > len)
len = namelen;
- if (!(hde->field->flags & FIELD_IS_STRING)) {
+ if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
/* length for print hex numbers */
fieldlen = hde->field->size * 2 + 2;
}
@@ -1915,7 +1915,7 @@ static void update_dynamic_len(struct hpp_dynamic_entry *hde,
struct hist_entry *he)
{
char *str, *pos;
- struct format_field *field = hde->field;
+ struct tep_format_field *field = hde->field;
size_t namelen;
bool last = false;
@@ -2000,7 +2000,7 @@ static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hpp_dynamic_entry *hde;
size_t len = fmt->user_len;
char *str, *pos;
- struct format_field *field;
+ struct tep_format_field *field;
size_t namelen;
bool last = false;
int ret;
@@ -2060,7 +2060,7 @@ static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b)
{
struct hpp_dynamic_entry *hde;
- struct format_field *field;
+ struct tep_format_field *field;
unsigned offset, size;
hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
@@ -2071,7 +2071,7 @@ static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
}
field = hde->field;
- if (field->flags & FIELD_IS_DYNAMIC) {
+ if (field->flags & TEP_FIELD_IS_DYNAMIC) {
unsigned long long dyn;
tep_read_number_field(field, a->raw_data, &dyn);
@@ -2117,7 +2117,7 @@ static void hde_free(struct perf_hpp_fmt *fmt)
}
static struct hpp_dynamic_entry *
-__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
+__alloc_dynamic_entry(struct perf_evsel *evsel, struct tep_format_field *field,
int level)
{
struct hpp_dynamic_entry *hde;
@@ -2252,7 +2252,7 @@ static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_nam
}
static int __dynamic_dimension__add(struct perf_evsel *evsel,
- struct format_field *field,
+ struct tep_format_field *field,
bool raw_trace, int level)
{
struct hpp_dynamic_entry *hde;
@@ -2270,7 +2270,7 @@ static int __dynamic_dimension__add(struct perf_evsel *evsel,
static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
{
int ret;
- struct format_field *field;
+ struct tep_format_field *field;
field = evsel->tp_format->format.fields;
while (field) {
@@ -2305,7 +2305,7 @@ static int add_all_matching_fields(struct perf_evlist *evlist,
{
int ret = -ESRCH;
struct perf_evsel *evsel;
- struct format_field *field;
+ struct tep_format_field *field;
evlist__for_each_entry(evlist, evsel) {
if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
@@ -2327,7 +2327,7 @@ static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
{
char *str, *event_name, *field_name, *opt_name;
struct perf_evsel *evsel;
- struct format_field *field;
+ struct tep_format_field *field;
bool raw_trace = symbol_conf.raw_trace;
int ret = 0;
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index 09d6746e6ec8..e767c4a9d4d2 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -85,6 +85,9 @@ static struct symbol *new_inline_sym(struct dso *dso,
struct symbol *inline_sym;
char *demangled = NULL;
+ if (!funcname)
+ funcname = "??";
+
if (dso) {
demangled = dso__demangle_sym(dso, 0, funcname);
if (demangled)
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
new file mode 100644
index 000000000000..e7b4c44ebb62
--- /dev/null
+++ b/tools/perf/util/stat-display.c
@@ -0,0 +1,1166 @@
+#include <stdio.h>
+#include <inttypes.h>
+#include <linux/time64.h>
+#include <math.h>
+#include "evlist.h"
+#include "evsel.h"
+#include "stat.h"
+#include "top.h"
+#include "thread_map.h"
+#include "cpumap.h"
+#include "string2.h"
+#include "sane_ctype.h"
+#include "cgroup.h"
+#include <math.h>
+#include <api/fs/fs.h>
+
+#define CNTR_NOT_SUPPORTED "<not supported>"
+#define CNTR_NOT_COUNTED "<not counted>"
+
+static bool is_duration_time(struct perf_evsel *evsel)
+{
+ return !strcmp(evsel->name, "duration_time");
+}
+
+static void print_running(struct perf_stat_config *config,
+ u64 run, u64 ena)
+{
+ if (config->csv_output) {
+ fprintf(config->output, "%s%" PRIu64 "%s%.2f",
+ config->csv_sep,
+ run,
+ config->csv_sep,
+ ena ? 100.0 * run / ena : 100.0);
+ } else if (run != ena) {
+ fprintf(config->output, " (%.2f%%)", 100.0 * run / ena);
+ }
+}
+
+static void print_noise_pct(struct perf_stat_config *config,
+ double total, double avg)
+{
+ double pct = rel_stddev_stats(total, avg);
+
+ if (config->csv_output)
+ fprintf(config->output, "%s%.2f%%", config->csv_sep, pct);
+ else if (pct)
+ fprintf(config->output, " ( +-%6.2f%% )", pct);
+}
+
+static void print_noise(struct perf_stat_config *config,
+ struct perf_evsel *evsel, double avg)
+{
+ struct perf_stat_evsel *ps;
+
+ if (config->run_count == 1)
+ return;
+
+ ps = evsel->stats;
+ print_noise_pct(config, stddev_stats(&ps->res_stats[0]), avg);
+}
+
+static void aggr_printout(struct perf_stat_config *config,
+ struct perf_evsel *evsel, int id, int nr)
+{
+ switch (config->aggr_mode) {
+ case AGGR_CORE:
+ fprintf(config->output, "S%d-C%*d%s%*d%s",
+ cpu_map__id_to_socket(id),
+ config->csv_output ? 0 : -8,
+ cpu_map__id_to_cpu(id),
+ config->csv_sep,
+ config->csv_output ? 0 : 4,
+ nr,
+ config->csv_sep);
+ break;
+ case AGGR_SOCKET:
+ fprintf(config->output, "S%*d%s%*d%s",
+ config->csv_output ? 0 : -5,
+ id,
+ config->csv_sep,
+ config->csv_output ? 0 : 4,
+ nr,
+ config->csv_sep);
+ break;
+ case AGGR_NONE:
+ fprintf(config->output, "CPU%*d%s",
+ config->csv_output ? 0 : -4,
+ perf_evsel__cpus(evsel)->map[id], config->csv_sep);
+ break;
+ case AGGR_THREAD:
+ fprintf(config->output, "%*s-%*d%s",
+ config->csv_output ? 0 : 16,
+ thread_map__comm(evsel->threads, id),
+ config->csv_output ? 0 : -8,
+ thread_map__pid(evsel->threads, id),
+ config->csv_sep);
+ break;
+ case AGGR_GLOBAL:
+ case AGGR_UNSET:
+ default:
+ break;
+ }
+}
+
+struct outstate {
+ FILE *fh;
+ bool newline;
+ const char *prefix;
+ int nfields;
+ int id, nr;
+ struct perf_evsel *evsel;
+};
+
+#define METRIC_LEN 35
+
+static void new_line_std(struct perf_stat_config *config __maybe_unused,
+ void *ctx)
+{
+ struct outstate *os = ctx;
+
+ os->newline = true;
+}
+
+static void do_new_line_std(struct perf_stat_config *config,
+ struct outstate *os)
+{
+ fputc('\n', os->fh);
+ fputs(os->prefix, os->fh);
+ aggr_printout(config, os->evsel, os->id, os->nr);
+ if (config->aggr_mode == AGGR_NONE)
+ fprintf(os->fh, " ");
+ fprintf(os->fh, " ");
+}
+
+static void print_metric_std(struct perf_stat_config *config,
+ void *ctx, const char *color, const char *fmt,
+ const char *unit, double val)
+{
+ struct outstate *os = ctx;
+ FILE *out = os->fh;
+ int n;
+ bool newline = os->newline;
+
+ os->newline = false;
+
+ if (unit == NULL || fmt == NULL) {
+ fprintf(out, "%-*s", METRIC_LEN, "");
+ return;
+ }
+
+ if (newline)
+ do_new_line_std(config, os);
+
+ n = fprintf(out, " # ");
+ if (color)
+ n += color_fprintf(out, color, fmt, val);
+ else
+ n += fprintf(out, fmt, val);
+ fprintf(out, " %-*s", METRIC_LEN - n - 1, unit);
+}
+
+static void new_line_csv(struct perf_stat_config *config, void *ctx)
+{
+ struct outstate *os = ctx;
+ int i;
+
+ fputc('\n', os->fh);
+ if (os->prefix)
+ fprintf(os->fh, "%s%s", os->prefix, config->csv_sep);
+ aggr_printout(config, os->evsel, os->id, os->nr);
+ for (i = 0; i < os->nfields; i++)
+ fputs(config->csv_sep, os->fh);
+}
+
+static void print_metric_csv(struct perf_stat_config *config __maybe_unused,
+ void *ctx,
+ const char *color __maybe_unused,
+ const char *fmt, const char *unit, double val)
+{
+ struct outstate *os = ctx;
+ FILE *out = os->fh;
+ char buf[64], *vals, *ends;
+
+ if (unit == NULL || fmt == NULL) {
+ fprintf(out, "%s%s", config->csv_sep, config->csv_sep);
+ return;
+ }
+ snprintf(buf, sizeof(buf), fmt, val);
+ ends = vals = ltrim(buf);
+ while (isdigit(*ends) || *ends == '.')
+ ends++;
+ *ends = 0;
+ while (isspace(*unit))
+ unit++;
+ fprintf(out, "%s%s%s%s", config->csv_sep, vals, config->csv_sep, unit);
+}
+
+/* Filter out some columns that don't work well in metrics only mode */
+
+static bool valid_only_metric(const char *unit)
+{
+ if (!unit)
+ return false;
+ if (strstr(unit, "/sec") ||
+ strstr(unit, "hz") ||
+ strstr(unit, "Hz") ||
+ strstr(unit, "CPUs utilized"))
+ return false;
+ return true;
+}
+
+static const char *fixunit(char *buf, struct perf_evsel *evsel,
+ const char *unit)
+{
+ if (!strncmp(unit, "of all", 6)) {
+ snprintf(buf, 1024, "%s %s", perf_evsel__name(evsel),
+ unit);
+ return buf;
+ }
+ return unit;
+}
+
+static void print_metric_only(struct perf_stat_config *config,
+ void *ctx, const char *color, const char *fmt,
+ const char *unit, double val)
+{
+ struct outstate *os = ctx;
+ FILE *out = os->fh;
+ char buf[1024], str[1024];
+ unsigned mlen = config->metric_only_len;
+
+ if (!valid_only_metric(unit))
+ return;
+ unit = fixunit(buf, os->evsel, unit);
+ if (mlen < strlen(unit))
+ mlen = strlen(unit) + 1;
+
+ if (color)
+ mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
+
+ color_snprintf(str, sizeof(str), color ?: "", fmt, val);
+ fprintf(out, "%*s ", mlen, str);
+}
+
+static void print_metric_only_csv(struct perf_stat_config *config __maybe_unused,
+ void *ctx, const char *color __maybe_unused,
+ const char *fmt,
+ const char *unit, double val)
+{
+ struct outstate *os = ctx;
+ FILE *out = os->fh;
+ char buf[64], *vals, *ends;
+ char tbuf[1024];
+
+ if (!valid_only_metric(unit))
+ return;
+ unit = fixunit(tbuf, os->evsel, unit);
+ snprintf(buf, sizeof buf, fmt, val);
+ ends = vals = ltrim(buf);
+ while (isdigit(*ends) || *ends == '.')
+ ends++;
+ *ends = 0;
+ fprintf(out, "%s%s", vals, config->csv_sep);
+}
+
+static void new_line_metric(struct perf_stat_config *config __maybe_unused,
+ void *ctx __maybe_unused)
+{
+}
+
+static void print_metric_header(struct perf_stat_config *config,
+ void *ctx, const char *color __maybe_unused,
+ const char *fmt __maybe_unused,
+ const char *unit, double val __maybe_unused)
+{
+ struct outstate *os = ctx;
+ char tbuf[1024];
+
+ if (!valid_only_metric(unit))
+ return;
+ unit = fixunit(tbuf, os->evsel, unit);
+ if (config->csv_output)
+ fprintf(os->fh, "%s%s", unit, config->csv_sep);
+ else
+ fprintf(os->fh, "%*s ", config->metric_only_len, unit);
+}
+
+static int first_shadow_cpu(struct perf_stat_config *config,
+ struct perf_evsel *evsel, int id)
+{
+ struct perf_evlist *evlist = evsel->evlist;
+ int i;
+
+ if (!config->aggr_get_id)
+ return 0;
+
+ if (config->aggr_mode == AGGR_NONE)
+ return id;
+
+ if (config->aggr_mode == AGGR_GLOBAL)
+ return 0;
+
+ for (i = 0; i < perf_evsel__nr_cpus(evsel); i++) {
+ int cpu2 = perf_evsel__cpus(evsel)->map[i];
+
+ if (config->aggr_get_id(config, evlist->cpus, cpu2) == id)
+ return cpu2;
+ }
+ return 0;
+}
+
+static void abs_printout(struct perf_stat_config *config,
+ int id, int nr, struct perf_evsel *evsel, double avg)
+{
+ FILE *output = config->output;
+ double sc = evsel->scale;
+ const char *fmt;
+
+ if (config->csv_output) {
+ fmt = floor(sc) != sc ? "%.2f%s" : "%.0f%s";
+ } else {
+ if (config->big_num)
+ fmt = floor(sc) != sc ? "%'18.2f%s" : "%'18.0f%s";
+ else
+ fmt = floor(sc) != sc ? "%18.2f%s" : "%18.0f%s";
+ }
+
+ aggr_printout(config, evsel, id, nr);
+
+ fprintf(output, fmt, avg, config->csv_sep);
+
+ if (evsel->unit)
+ fprintf(output, "%-*s%s",
+ config->csv_output ? 0 : config->unit_width,
+ evsel->unit, config->csv_sep);
+
+ fprintf(output, "%-*s", config->csv_output ? 0 : 25, perf_evsel__name(evsel));
+
+ if (evsel->cgrp)
+ fprintf(output, "%s%s", config->csv_sep, evsel->cgrp->name);
+}
+
+static bool is_mixed_hw_group(struct perf_evsel *counter)
+{
+ struct perf_evlist *evlist = counter->evlist;
+ u32 pmu_type = counter->attr.type;
+ struct perf_evsel *pos;
+
+ if (counter->nr_members < 2)
+ return false;
+
+ evlist__for_each_entry(evlist, pos) {
+ /* software events can be part of any hardware group */
+ if (pos->attr.type == PERF_TYPE_SOFTWARE)
+ continue;
+ if (pmu_type == PERF_TYPE_SOFTWARE) {
+ pmu_type = pos->attr.type;
+ continue;
+ }
+ if (pmu_type != pos->attr.type)
+ return true;
+ }
+
+ return false;
+}
+
+static void printout(struct perf_stat_config *config, int id, int nr,
+ struct perf_evsel *counter, double uval,
+ char *prefix, u64 run, u64 ena, double noise,
+ struct runtime_stat *st)
+{
+ struct perf_stat_output_ctx out;
+ struct outstate os = {
+ .fh = config->output,
+ .prefix = prefix ? prefix : "",
+ .id = id,
+ .nr = nr,
+ .evsel = counter,
+ };
+ print_metric_t pm = print_metric_std;
+ new_line_t nl;
+
+ if (config->metric_only) {
+ nl = new_line_metric;
+ if (config->csv_output)
+ pm = print_metric_only_csv;
+ else
+ pm = print_metric_only;
+ } else
+ nl = new_line_std;
+
+ if (config->csv_output && !config->metric_only) {
+ static int aggr_fields[] = {
+ [AGGR_GLOBAL] = 0,
+ [AGGR_THREAD] = 1,
+ [AGGR_NONE] = 1,
+ [AGGR_SOCKET] = 2,
+ [AGGR_CORE] = 2,
+ };
+
+ pm = print_metric_csv;
+ nl = new_line_csv;
+ os.nfields = 3;
+ os.nfields += aggr_fields[config->aggr_mode];
+ if (counter->cgrp)
+ os.nfields++;
+ }
+ if (run == 0 || ena == 0 || counter->counts->scaled == -1) {
+ if (config->metric_only) {
+ pm(config, &os, NULL, "", "", 0);
+ return;
+ }
+ aggr_printout(config, counter, id, nr);
+
+ fprintf(config->output, "%*s%s",
+ config->csv_output ? 0 : 18,
+ counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
+ config->csv_sep);
+
+ if (counter->supported) {
+ config->print_free_counters_hint = 1;
+ if (is_mixed_hw_group(counter))
+ config->print_mixed_hw_group_error = 1;
+ }
+
+ fprintf(config->output, "%-*s%s",
+ config->csv_output ? 0 : config->unit_width,
+ counter->unit, config->csv_sep);
+
+ fprintf(config->output, "%*s",
+ config->csv_output ? 0 : -25,
+ perf_evsel__name(counter));
+
+ if (counter->cgrp)
+ fprintf(config->output, "%s%s",
+ config->csv_sep, counter->cgrp->name);
+
+ if (!config->csv_output)
+ pm(config, &os, NULL, NULL, "", 0);
+ print_noise(config, counter, noise);
+ print_running(config, run, ena);
+ if (config->csv_output)
+ pm(config, &os, NULL, NULL, "", 0);
+ return;
+ }
+
+ if (!config->metric_only)
+ abs_printout(config, id, nr, counter, uval);
+
+ out.print_metric = pm;
+ out.new_line = nl;
+ out.ctx = &os;
+ out.force_header = false;
+
+ if (config->csv_output && !config->metric_only) {
+ print_noise(config, counter, noise);
+ print_running(config, run, ena);
+ }
+
+ perf_stat__print_shadow_stats(config, counter, uval,
+ first_shadow_cpu(config, counter, id),
+ &out, &config->metric_events, st);
+ if (!config->csv_output && !config->metric_only) {
+ print_noise(config, counter, noise);
+ print_running(config, run, ena);
+ }
+}
+
+static void aggr_update_shadow(struct perf_stat_config *config,
+ struct perf_evlist *evlist)
+{
+ int cpu, s2, id, s;
+ u64 val;
+ struct perf_evsel *counter;
+
+ for (s = 0; s < config->aggr_map->nr; s++) {
+ id = config->aggr_map->map[s];
+ evlist__for_each_entry(evlist, counter) {
+ val = 0;
+ for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
+ s2 = config->aggr_get_id(config, evlist->cpus, cpu);
+ if (s2 != id)
+ continue;
+ val += perf_counts(counter->counts, cpu, 0)->val;
+ }
+ perf_stat__update_shadow_stats(counter, val,
+ first_shadow_cpu(config, counter, id),
+ &rt_stat);
+ }
+ }
+}
+
+static void uniquify_event_name(struct perf_evsel *counter)
+{
+ char *new_name;
+ char *config;
+
+ if (counter->uniquified_name ||
+ !counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
+ strlen(counter->pmu_name)))
+ return;
+
+ config = strchr(counter->name, '/');
+ if (config) {
+ if (asprintf(&new_name,
+ "%s%s", counter->pmu_name, config) > 0) {
+ free(counter->name);
+ counter->name = new_name;
+ }
+ } else {
+ if (asprintf(&new_name,
+ "%s [%s]", counter->name, counter->pmu_name) > 0) {
+ free(counter->name);
+ counter->name = new_name;
+ }
+ }
+
+ counter->uniquified_name = true;
+}
+
+static void collect_all_aliases(struct perf_stat_config *config, struct perf_evsel *counter,
+ void (*cb)(struct perf_stat_config *config, struct perf_evsel *counter, void *data,
+ bool first),
+ void *data)
+{
+ struct perf_evlist *evlist = counter->evlist;
+ struct perf_evsel *alias;
+
+ alias = list_prepare_entry(counter, &(evlist->entries), node);
+ list_for_each_entry_continue (alias, &evlist->entries, node) {
+ if (strcmp(perf_evsel__name(alias), perf_evsel__name(counter)) ||
+ alias->scale != counter->scale ||
+ alias->cgrp != counter->cgrp ||
+ strcmp(alias->unit, counter->unit) ||
+ perf_evsel__is_clock(alias) != perf_evsel__is_clock(counter))
+ break;
+ alias->merged_stat = true;
+ cb(config, alias, data, false);
+ }
+}
+
+static bool collect_data(struct perf_stat_config *config, struct perf_evsel *counter,
+ void (*cb)(struct perf_stat_config *config, struct perf_evsel *counter, void *data,
+ bool first),
+ void *data)
+{
+ if (counter->merged_stat)
+ return false;
+ cb(config, counter, data, true);
+ if (config->no_merge)
+ uniquify_event_name(counter);
+ else if (counter->auto_merge_stats)
+ collect_all_aliases(config, counter, cb, data);
+ return true;
+}
+
+struct aggr_data {
+ u64 ena, run, val;
+ int id;
+ int nr;
+ int cpu;
+};
+
+static void aggr_cb(struct perf_stat_config *config,
+ struct perf_evsel *counter, void *data, bool first)
+{
+ struct aggr_data *ad = data;
+ int cpu, s2;
+
+ for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
+ struct perf_counts_values *counts;
+
+ s2 = config->aggr_get_id(config, perf_evsel__cpus(counter), cpu);
+ if (s2 != ad->id)
+ continue;
+ if (first)
+ ad->nr++;
+ counts = perf_counts(counter->counts, cpu, 0);
+ /*
+ * When any result is bad, make them all to give
+ * consistent output in interval mode.
+ */
+ if (counts->ena == 0 || counts->run == 0 ||
+ counter->counts->scaled == -1) {
+ ad->ena = 0;
+ ad->run = 0;
+ break;
+ }
+ ad->val += counts->val;
+ ad->ena += counts->ena;
+ ad->run += counts->run;
+ }
+}
+
+static void print_aggr(struct perf_stat_config *config,
+ struct perf_evlist *evlist,
+ char *prefix)
+{
+ bool metric_only = config->metric_only;
+ FILE *output = config->output;
+ struct perf_evsel *counter;
+ int s, id, nr;
+ double uval;
+ u64 ena, run, val;
+ bool first;
+
+ if (!(config->aggr_map || config->aggr_get_id))
+ return;
+
+ aggr_update_shadow(config, evlist);
+
+ /*
+ * With metric_only everything is on a single line.
+ * Without each counter has its own line.
+ */
+ for (s = 0; s < config->aggr_map->nr; s++) {
+ struct aggr_data ad;
+ if (prefix && metric_only)
+ fprintf(output, "%s", prefix);
+
+ ad.id = id = config->aggr_map->map[s];
+ first = true;
+ evlist__for_each_entry(evlist, counter) {
+ if (is_duration_time(counter))
+ continue;
+
+ ad.val = ad.ena = ad.run = 0;
+ ad.nr = 0;
+ if (!collect_data(config, counter, aggr_cb, &ad))
+ continue;
+ nr = ad.nr;
+ ena = ad.ena;
+ run = ad.run;
+ val = ad.val;
+ if (first && metric_only) {
+ first = false;
+ aggr_printout(config, counter, id, nr);
+ }
+ if (prefix && !metric_only)
+ fprintf(output, "%s", prefix);
+
+ uval = val * counter->scale;
+ printout(config, id, nr, counter, uval, prefix,
+ run, ena, 1.0, &rt_stat);
+ if (!metric_only)
+ fputc('\n', output);
+ }
+ if (metric_only)
+ fputc('\n', output);
+ }
+}
+
+static int cmp_val(const void *a, const void *b)
+{
+ return ((struct perf_aggr_thread_value *)b)->val -
+ ((struct perf_aggr_thread_value *)a)->val;
+}
+
+static struct perf_aggr_thread_value *sort_aggr_thread(
+ struct perf_evsel *counter,
+ int nthreads, int ncpus,
+ int *ret,
+ struct target *_target)
+{
+ int cpu, thread, i = 0;
+ double uval;
+ struct perf_aggr_thread_value *buf;
+
+ buf = calloc(nthreads, sizeof(struct perf_aggr_thread_value));
+ if (!buf)
+ return NULL;
+
+ for (thread = 0; thread < nthreads; thread++) {
+ u64 ena = 0, run = 0, val = 0;
+
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ val += perf_counts(counter->counts, cpu, thread)->val;
+ ena += perf_counts(counter->counts, cpu, thread)->ena;
+ run += perf_counts(counter->counts, cpu, thread)->run;
+ }
+
+ uval = val * counter->scale;
+
+ /*
+ * Skip value 0 when enabling --per-thread globally,
+ * otherwise too many 0 output.
+ */
+ if (uval == 0.0 && target__has_per_thread(_target))
+ continue;
+
+ buf[i].counter = counter;
+ buf[i].id = thread;
+ buf[i].uval = uval;
+ buf[i].val = val;
+ buf[i].run = run;
+ buf[i].ena = ena;
+ i++;
+ }
+
+ qsort(buf, i, sizeof(struct perf_aggr_thread_value), cmp_val);
+
+ if (ret)
+ *ret = i;
+
+ return buf;
+}
+
+static void print_aggr_thread(struct perf_stat_config *config,
+ struct target *_target,
+ struct perf_evsel *counter, char *prefix)
+{
+ FILE *output = config->output;
+ int nthreads = thread_map__nr(counter->threads);
+ int ncpus = cpu_map__nr(counter->cpus);
+ int thread, sorted_threads, id;
+ struct perf_aggr_thread_value *buf;
+
+ buf = sort_aggr_thread(counter, nthreads, ncpus, &sorted_threads, _target);
+ if (!buf) {
+ perror("cannot sort aggr thread");
+ return;
+ }
+
+ for (thread = 0; thread < sorted_threads; thread++) {
+ if (prefix)
+ fprintf(output, "%s", prefix);
+
+ id = buf[thread].id;
+ if (config->stats)
+ printout(config, id, 0, buf[thread].counter, buf[thread].uval,
+ prefix, buf[thread].run, buf[thread].ena, 1.0,
+ &config->stats[id]);
+ else
+ printout(config, id, 0, buf[thread].counter, buf[thread].uval,
+ prefix, buf[thread].run, buf[thread].ena, 1.0,
+ &rt_stat);
+ fputc('\n', output);
+ }
+
+ free(buf);
+}
+
+struct caggr_data {
+ double avg, avg_enabled, avg_running;
+};
+
+static void counter_aggr_cb(struct perf_stat_config *config __maybe_unused,
+ struct perf_evsel *counter, void *data,
+ bool first __maybe_unused)
+{
+ struct caggr_data *cd = data;
+ struct perf_stat_evsel *ps = counter->stats;
+
+ cd->avg += avg_stats(&ps->res_stats[0]);
+ cd->avg_enabled += avg_stats(&ps->res_stats[1]);
+ cd->avg_running += avg_stats(&ps->res_stats[2]);
+}
+
+/*
+ * Print out the results of a single counter:
+ * aggregated counts in system-wide mode
+ */
+static void print_counter_aggr(struct perf_stat_config *config,
+ struct perf_evsel *counter, char *prefix)
+{
+ bool metric_only = config->metric_only;
+ FILE *output = config->output;
+ double uval;
+ struct caggr_data cd = { .avg = 0.0 };
+
+ if (!collect_data(config, counter, counter_aggr_cb, &cd))
+ return;
+
+ if (prefix && !metric_only)
+ fprintf(output, "%s", prefix);
+
+ uval = cd.avg * counter->scale;
+ printout(config, -1, 0, counter, uval, prefix, cd.avg_running, cd.avg_enabled,
+ cd.avg, &rt_stat);
+ if (!metric_only)
+ fprintf(output, "\n");
+}
+
+static void counter_cb(struct perf_stat_config *config __maybe_unused,
+ struct perf_evsel *counter, void *data,
+ bool first __maybe_unused)
+{
+ struct aggr_data *ad = data;
+
+ ad->val += perf_counts(counter->counts, ad->cpu, 0)->val;
+ ad->ena += perf_counts(counter->counts, ad->cpu, 0)->ena;
+ ad->run += perf_counts(counter->counts, ad->cpu, 0)->run;
+}
+
+/*
+ * Print out the results of a single counter:
+ * does not use aggregated count in system-wide
+ */
+static void print_counter(struct perf_stat_config *config,
+ struct perf_evsel *counter, char *prefix)
+{
+ FILE *output = config->output;
+ u64 ena, run, val;
+ double uval;
+ int cpu;
+
+ for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
+ struct aggr_data ad = { .cpu = cpu };
+
+ if (!collect_data(config, counter, counter_cb, &ad))
+ return;
+ val = ad.val;
+ ena = ad.ena;
+ run = ad.run;
+
+ if (prefix)
+ fprintf(output, "%s", prefix);
+
+ uval = val * counter->scale;
+ printout(config, cpu, 0, counter, uval, prefix, run, ena, 1.0,
+ &rt_stat);
+
+ fputc('\n', output);
+ }
+}
+
+static void print_no_aggr_metric(struct perf_stat_config *config,
+ struct perf_evlist *evlist,
+ char *prefix)
+{
+ int cpu;
+ int nrcpus = 0;
+ struct perf_evsel *counter;
+ u64 ena, run, val;
+ double uval;
+
+ nrcpus = evlist->cpus->nr;
+ for (cpu = 0; cpu < nrcpus; cpu++) {
+ bool first = true;
+
+ if (prefix)
+ fputs(prefix, config->output);
+ evlist__for_each_entry(evlist, counter) {
+ if (is_duration_time(counter))
+ continue;
+ if (first) {
+ aggr_printout(config, counter, cpu, 0);
+ first = false;
+ }
+ val = perf_counts(counter->counts, cpu, 0)->val;
+ ena = perf_counts(counter->counts, cpu, 0)->ena;
+ run = perf_counts(counter->counts, cpu, 0)->run;
+
+ uval = val * counter->scale;
+ printout(config, cpu, 0, counter, uval, prefix, run, ena, 1.0,
+ &rt_stat);
+ }
+ fputc('\n', config->output);
+ }
+}
+
+static int aggr_header_lens[] = {
+ [AGGR_CORE] = 18,
+ [AGGR_SOCKET] = 12,
+ [AGGR_NONE] = 6,
+ [AGGR_THREAD] = 24,
+ [AGGR_GLOBAL] = 0,
+};
+
+static const char *aggr_header_csv[] = {
+ [AGGR_CORE] = "core,cpus,",
+ [AGGR_SOCKET] = "socket,cpus",
+ [AGGR_NONE] = "cpu,",
+ [AGGR_THREAD] = "comm-pid,",
+ [AGGR_GLOBAL] = ""
+};
+
+static void print_metric_headers(struct perf_stat_config *config,
+ struct perf_evlist *evlist,
+ const char *prefix, bool no_indent)
+{
+ struct perf_stat_output_ctx out;
+ struct perf_evsel *counter;
+ struct outstate os = {
+ .fh = config->output
+ };
+
+ if (prefix)
+ fprintf(config->output, "%s", prefix);
+
+ if (!config->csv_output && !no_indent)
+ fprintf(config->output, "%*s",
+ aggr_header_lens[config->aggr_mode], "");
+ if (config->csv_output) {
+ if (config->interval)
+ fputs("time,", config->output);
+ fputs(aggr_header_csv[config->aggr_mode], config->output);
+ }
+
+ /* Print metrics headers only */
+ evlist__for_each_entry(evlist, counter) {
+ if (is_duration_time(counter))
+ continue;
+ os.evsel = counter;
+ out.ctx = &os;
+ out.print_metric = print_metric_header;
+ out.new_line = new_line_metric;
+ out.force_header = true;
+ os.evsel = counter;
+ perf_stat__print_shadow_stats(config, counter, 0,
+ 0,
+ &out,
+ &config->metric_events,
+ &rt_stat);
+ }
+ fputc('\n', config->output);
+}
+
+static void print_interval(struct perf_stat_config *config,
+ struct perf_evlist *evlist,
+ char *prefix, struct timespec *ts)
+{
+ bool metric_only = config->metric_only;
+ unsigned int unit_width = config->unit_width;
+ FILE *output = config->output;
+ static int num_print_interval;
+
+ if (config->interval_clear)
+ puts(CONSOLE_CLEAR);
+
+ sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, config->csv_sep);
+
+ if ((num_print_interval == 0 && !config->csv_output) || config->interval_clear) {
+ switch (config->aggr_mode) {
+ case AGGR_SOCKET:
+ fprintf(output, "# time socket cpus");
+ if (!metric_only)
+ fprintf(output, " counts %*s events\n", unit_width, "unit");
+ break;
+ case AGGR_CORE:
+ fprintf(output, "# time core cpus");
+ if (!metric_only)
+ fprintf(output, " counts %*s events\n", unit_width, "unit");
+ break;
+ case AGGR_NONE:
+ fprintf(output, "# time CPU ");
+ if (!metric_only)
+ fprintf(output, " counts %*s events\n", unit_width, "unit");
+ break;
+ case AGGR_THREAD:
+ fprintf(output, "# time comm-pid");
+ if (!metric_only)
+ fprintf(output, " counts %*s events\n", unit_width, "unit");
+ break;
+ case AGGR_GLOBAL:
+ default:
+ fprintf(output, "# time");
+ if (!metric_only)
+ fprintf(output, " counts %*s events\n", unit_width, "unit");
+ case AGGR_UNSET:
+ break;
+ }
+ }
+
+ if ((num_print_interval == 0 || config->interval_clear) && metric_only)
+ print_metric_headers(config, evlist, " ", true);
+ if (++num_print_interval == 25)
+ num_print_interval = 0;
+}
+
+static void print_header(struct perf_stat_config *config,
+ struct target *_target,
+ int argc, const char **argv)
+{
+ FILE *output = config->output;
+ int i;
+
+ fflush(stdout);
+
+ if (!config->csv_output) {
+ fprintf(output, "\n");
+ fprintf(output, " Performance counter stats for ");
+ if (_target->system_wide)
+ fprintf(output, "\'system wide");
+ else if (_target->cpu_list)
+ fprintf(output, "\'CPU(s) %s", _target->cpu_list);
+ else if (!target__has_task(_target)) {
+ fprintf(output, "\'%s", argv ? argv[0] : "pipe");
+ for (i = 1; argv && (i < argc); i++)
+ fprintf(output, " %s", argv[i]);
+ } else if (_target->pid)
+ fprintf(output, "process id \'%s", _target->pid);
+ else
+ fprintf(output, "thread id \'%s", _target->tid);
+
+ fprintf(output, "\'");
+ if (config->run_count > 1)
+ fprintf(output, " (%d runs)", config->run_count);
+ fprintf(output, ":\n\n");
+ }
+}
+
+static int get_precision(double num)
+{
+ if (num > 1)
+ return 0;
+
+ return lround(ceil(-log10(num)));
+}
+
+static void print_table(struct perf_stat_config *config,
+ FILE *output, int precision, double avg)
+{
+ char tmp[64];
+ int idx, indent = 0;
+
+ scnprintf(tmp, 64, " %17.*f", precision, avg);
+ while (tmp[indent] == ' ')
+ indent++;
+
+ fprintf(output, "%*s# Table of individual measurements:\n", indent, "");
+
+ for (idx = 0; idx < config->run_count; idx++) {
+ double run = (double) config->walltime_run[idx] / NSEC_PER_SEC;
+ int h, n = 1 + abs((int) (100.0 * (run - avg)/run) / 5);
+
+ fprintf(output, " %17.*f (%+.*f) ",
+ precision, run, precision, run - avg);
+
+ for (h = 0; h < n; h++)
+ fprintf(output, "#");
+
+ fprintf(output, "\n");
+ }
+
+ fprintf(output, "\n%*s# Final result:\n", indent, "");
+}
+
+static double timeval2double(struct timeval *t)
+{
+ return t->tv_sec + (double) t->tv_usec/USEC_PER_SEC;
+}
+
+static void print_footer(struct perf_stat_config *config)
+{
+ double avg = avg_stats(config->walltime_nsecs_stats) / NSEC_PER_SEC;
+ FILE *output = config->output;
+ int n;
+
+ if (!config->null_run)
+ fprintf(output, "\n");
+
+ if (config->run_count == 1) {
+ fprintf(output, " %17.9f seconds time elapsed", avg);
+
+ if (config->ru_display) {
+ double ru_utime = timeval2double(&config->ru_data.ru_utime);
+ double ru_stime = timeval2double(&config->ru_data.ru_stime);
+
+ fprintf(output, "\n\n");
+ fprintf(output, " %17.9f seconds user\n", ru_utime);
+ fprintf(output, " %17.9f seconds sys\n", ru_stime);
+ }
+ } else {
+ double sd = stddev_stats(config->walltime_nsecs_stats) / NSEC_PER_SEC;
+ /*
+ * Display at most 2 more significant
+ * digits than the stddev inaccuracy.
+ */
+ int precision = get_precision(sd) + 2;
+
+ if (config->walltime_run_table)
+ print_table(config, output, precision, avg);
+
+ fprintf(output, " %17.*f +- %.*f seconds time elapsed",
+ precision, avg, precision, sd);
+
+ print_noise_pct(config, sd, avg);
+ }
+ fprintf(output, "\n\n");
+
+ if (config->print_free_counters_hint &&
+ sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 &&
+ n > 0)
+ fprintf(output,
+"Some events weren't counted. Try disabling the NMI watchdog:\n"
+" echo 0 > /proc/sys/kernel/nmi_watchdog\n"
+" perf stat ...\n"
+" echo 1 > /proc/sys/kernel/nmi_watchdog\n");
+
+ if (config->print_mixed_hw_group_error)
+ fprintf(output,
+ "The events in group usually have to be from "
+ "the same PMU. Try reorganizing the group.\n");
+}
+
+void
+perf_evlist__print_counters(struct perf_evlist *evlist,
+ struct perf_stat_config *config,
+ struct target *_target,
+ struct timespec *ts,
+ int argc, const char **argv)
+{
+ bool metric_only = config->metric_only;
+ int interval = config->interval;
+ struct perf_evsel *counter;
+ char buf[64], *prefix = NULL;
+
+ if (interval)
+ print_interval(config, evlist, prefix = buf, ts);
+ else
+ print_header(config, _target, argc, argv);
+
+ if (metric_only) {
+ static int num_print_iv;
+
+ if (num_print_iv == 0 && !interval)
+ print_metric_headers(config, evlist, prefix, false);
+ if (num_print_iv++ == 25)
+ num_print_iv = 0;
+ if (config->aggr_mode == AGGR_GLOBAL && prefix)
+ fprintf(config->output, "%s", prefix);
+ }
+
+ switch (config->aggr_mode) {
+ case AGGR_CORE:
+ case AGGR_SOCKET:
+ print_aggr(config, evlist, prefix);
+ break;
+ case AGGR_THREAD:
+ evlist__for_each_entry(evlist, counter) {
+ if (is_duration_time(counter))
+ continue;
+ print_aggr_thread(config, _target, counter, prefix);
+ }
+ break;
+ case AGGR_GLOBAL:
+ evlist__for_each_entry(evlist, counter) {
+ if (is_duration_time(counter))
+ continue;
+ print_counter_aggr(config, counter, prefix);
+ }
+ if (metric_only)
+ fputc('\n', config->output);
+ break;
+ case AGGR_NONE:
+ if (metric_only)
+ print_no_aggr_metric(config, evlist, prefix);
+ else {
+ evlist__for_each_entry(evlist, counter) {
+ if (is_duration_time(counter))
+ continue;
+ print_counter(config, counter, prefix);
+ }
+ }
+ break;
+ case AGGR_UNSET:
+ default:
+ break;
+ }
+
+ if (!interval && !config->csv_output)
+ print_footer(config);
+
+ fflush(config->output);
+}
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 99990f5f2512..8ad32763cfff 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -410,7 +410,8 @@ static double runtime_stat_n(struct runtime_stat *st,
return v->stats.n;
}
-static void print_stalled_cycles_frontend(int cpu,
+static void print_stalled_cycles_frontend(struct perf_stat_config *config,
+ int cpu,
struct perf_evsel *evsel, double avg,
struct perf_stat_output_ctx *out,
struct runtime_stat *st)
@@ -427,13 +428,14 @@ static void print_stalled_cycles_frontend(int cpu,
color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
if (ratio)
- out->print_metric(out->ctx, color, "%7.2f%%", "frontend cycles idle",
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "frontend cycles idle",
ratio);
else
- out->print_metric(out->ctx, NULL, NULL, "frontend cycles idle", 0);
+ out->print_metric(config, out->ctx, NULL, NULL, "frontend cycles idle", 0);
}
-static void print_stalled_cycles_backend(int cpu,
+static void print_stalled_cycles_backend(struct perf_stat_config *config,
+ int cpu,
struct perf_evsel *evsel, double avg,
struct perf_stat_output_ctx *out,
struct runtime_stat *st)
@@ -449,10 +451,11 @@ static void print_stalled_cycles_backend(int cpu,
color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
- out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
}
-static void print_branch_misses(int cpu,
+static void print_branch_misses(struct perf_stat_config *config,
+ int cpu,
struct perf_evsel *evsel,
double avg,
struct perf_stat_output_ctx *out,
@@ -469,10 +472,11 @@ static void print_branch_misses(int cpu,
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- out->print_metric(out->ctx, color, "%7.2f%%", "of all branches", ratio);
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "of all branches", ratio);
}
-static void print_l1_dcache_misses(int cpu,
+static void print_l1_dcache_misses(struct perf_stat_config *config,
+ int cpu,
struct perf_evsel *evsel,
double avg,
struct perf_stat_output_ctx *out,
@@ -490,10 +494,11 @@ static void print_l1_dcache_misses(int cpu,
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio);
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio);
}
-static void print_l1_icache_misses(int cpu,
+static void print_l1_icache_misses(struct perf_stat_config *config,
+ int cpu,
struct perf_evsel *evsel,
double avg,
struct perf_stat_output_ctx *out,
@@ -510,10 +515,11 @@ static void print_l1_icache_misses(int cpu,
ratio = avg / total * 100.0;
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- out->print_metric(out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio);
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio);
}
-static void print_dtlb_cache_misses(int cpu,
+static void print_dtlb_cache_misses(struct perf_stat_config *config,
+ int cpu,
struct perf_evsel *evsel,
double avg,
struct perf_stat_output_ctx *out,
@@ -529,10 +535,11 @@ static void print_dtlb_cache_misses(int cpu,
ratio = avg / total * 100.0;
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- out->print_metric(out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio);
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio);
}
-static void print_itlb_cache_misses(int cpu,
+static void print_itlb_cache_misses(struct perf_stat_config *config,
+ int cpu,
struct perf_evsel *evsel,
double avg,
struct perf_stat_output_ctx *out,
@@ -548,10 +555,11 @@ static void print_itlb_cache_misses(int cpu,
ratio = avg / total * 100.0;
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- out->print_metric(out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio);
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio);
}
-static void print_ll_cache_misses(int cpu,
+static void print_ll_cache_misses(struct perf_stat_config *config,
+ int cpu,
struct perf_evsel *evsel,
double avg,
struct perf_stat_output_ctx *out,
@@ -567,7 +575,7 @@ static void print_ll_cache_misses(int cpu,
ratio = avg / total * 100.0;
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- out->print_metric(out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
}
/*
@@ -674,7 +682,8 @@ static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
return sanitize_val(1.0 - sum);
}
-static void print_smi_cost(int cpu, struct perf_evsel *evsel,
+static void print_smi_cost(struct perf_stat_config *config,
+ int cpu, struct perf_evsel *evsel,
struct perf_stat_output_ctx *out,
struct runtime_stat *st)
{
@@ -694,11 +703,12 @@ static void print_smi_cost(int cpu, struct perf_evsel *evsel,
if (cost > 10)
color = PERF_COLOR_RED;
- out->print_metric(out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
- out->print_metric(out->ctx, NULL, "%4.0f", "SMI#", smi_num);
+ out->print_metric(config, out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
+ out->print_metric(config, out->ctx, NULL, "%4.0f", "SMI#", smi_num);
}
-static void generic_metric(const char *metric_expr,
+static void generic_metric(struct perf_stat_config *config,
+ const char *metric_expr,
struct perf_evsel **metric_events,
char *name,
const char *metric_name,
@@ -737,20 +747,21 @@ static void generic_metric(const char *metric_expr,
const char *p = metric_expr;
if (expr__parse(&ratio, &pctx, &p) == 0)
- print_metric(ctxp, NULL, "%8.1f",
+ print_metric(config, ctxp, NULL, "%8.1f",
metric_name ?
metric_name :
out->force_header ? name : "",
ratio);
else
- print_metric(ctxp, NULL, NULL,
+ print_metric(config, ctxp, NULL, NULL,
out->force_header ?
(metric_name ? metric_name : name) : "", 0);
} else
- print_metric(ctxp, NULL, NULL, "", 0);
+ print_metric(config, ctxp, NULL, NULL, "", 0);
}
-void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
+void perf_stat__print_shadow_stats(struct perf_stat_config *config,
+ struct perf_evsel *evsel,
double avg, int cpu,
struct perf_stat_output_ctx *out,
struct rblist *metric_events,
@@ -769,10 +780,10 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
if (total) {
ratio = avg / total;
- print_metric(ctxp, NULL, "%7.2f ",
+ print_metric(config, ctxp, NULL, "%7.2f ",
"insn per cycle", ratio);
} else {
- print_metric(ctxp, NULL, NULL, "insn per cycle", 0);
+ print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0);
}
total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT,
@@ -783,20 +794,20 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
ctx, cpu));
if (total && avg) {
- out->new_line(ctxp);
+ out->new_line(config, ctxp);
ratio = total / avg;
- print_metric(ctxp, NULL, "%7.2f ",
+ print_metric(config, ctxp, NULL, "%7.2f ",
"stalled cycles per insn",
ratio);
} else if (have_frontend_stalled) {
- print_metric(ctxp, NULL, NULL,
+ print_metric(config, ctxp, NULL, NULL,
"stalled cycles per insn", 0);
}
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
- print_branch_misses(cpu, evsel, avg, out, st);
+ print_branch_misses(config, cpu, evsel, avg, out, st);
else
- print_metric(ctxp, NULL, NULL, "of all branches", 0);
+ print_metric(config, ctxp, NULL, NULL, "of all branches", 0);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
@@ -804,9 +815,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
- print_l1_dcache_misses(cpu, evsel, avg, out, st);
+ print_l1_dcache_misses(config, cpu, evsel, avg, out, st);
else
- print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0);
+ print_metric(config, ctxp, NULL, NULL, "of all L1-dcache hits", 0);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
@@ -814,9 +825,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
- print_l1_icache_misses(cpu, evsel, avg, out, st);
+ print_l1_icache_misses(config, cpu, evsel, avg, out, st);
else
- print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0);
+ print_metric(config, ctxp, NULL, NULL, "of all L1-icache hits", 0);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
@@ -824,9 +835,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
- print_dtlb_cache_misses(cpu, evsel, avg, out, st);
+ print_dtlb_cache_misses(config, cpu, evsel, avg, out, st);
else
- print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0);
+ print_metric(config, ctxp, NULL, NULL, "of all dTLB cache hits", 0);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
@@ -834,9 +845,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
- print_itlb_cache_misses(cpu, evsel, avg, out, st);
+ print_itlb_cache_misses(config, cpu, evsel, avg, out, st);
else
- print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0);
+ print_metric(config, ctxp, NULL, NULL, "of all iTLB cache hits", 0);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
@@ -844,9 +855,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
- print_ll_cache_misses(cpu, evsel, avg, out, st);
+ print_ll_cache_misses(config, cpu, evsel, avg, out, st);
else
- print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0);
+ print_metric(config, ctxp, NULL, NULL, "of all LL-cache hits", 0);
} else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
@@ -854,32 +865,32 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
ratio = avg * 100 / total;
if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
- print_metric(ctxp, NULL, "%8.3f %%",
+ print_metric(config, ctxp, NULL, "%8.3f %%",
"of all cache refs", ratio);
else
- print_metric(ctxp, NULL, NULL, "of all cache refs", 0);
+ print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0);
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
- print_stalled_cycles_frontend(cpu, evsel, avg, out, st);
+ print_stalled_cycles_frontend(config, cpu, evsel, avg, out, st);
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
- print_stalled_cycles_backend(cpu, evsel, avg, out, st);
+ print_stalled_cycles_backend(config, cpu, evsel, avg, out, st);
} else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
if (total) {
ratio = avg / total;
- print_metric(ctxp, NULL, "%8.3f", "GHz", ratio);
+ print_metric(config, ctxp, NULL, "%8.3f", "GHz", ratio);
} else {
- print_metric(ctxp, NULL, NULL, "Ghz", 0);
+ print_metric(config, ctxp, NULL, NULL, "Ghz", 0);
}
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
if (total)
- print_metric(ctxp, NULL,
+ print_metric(config, ctxp, NULL,
"%7.2f%%", "transactional cycles",
100.0 * (avg / total));
else
- print_metric(ctxp, NULL, NULL, "transactional cycles",
+ print_metric(config, ctxp, NULL, NULL, "transactional cycles",
0);
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
@@ -888,10 +899,10 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
if (total2 < avg)
total2 = avg;
if (total)
- print_metric(ctxp, NULL, "%7.2f%%", "aborted cycles",
+ print_metric(config, ctxp, NULL, "%7.2f%%", "aborted cycles",
100.0 * ((total2-avg) / total));
else
- print_metric(ctxp, NULL, NULL, "aborted cycles", 0);
+ print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0);
} else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
ctx, cpu);
@@ -900,10 +911,10 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
ratio = total / avg;
if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
- print_metric(ctxp, NULL, "%8.0f",
+ print_metric(config, ctxp, NULL, "%8.0f",
"cycles / transaction", ratio);
else
- print_metric(ctxp, NULL, NULL, "cycles / transaction",
+ print_metric(config, ctxp, NULL, NULL, "cycles / transaction",
0);
} else if (perf_stat_evsel__is(evsel, ELISION_START)) {
total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
@@ -912,33 +923,33 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
if (avg)
ratio = total / avg;
- print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
+ print_metric(config, ctxp, NULL, "%8.0f", "cycles / elision", ratio);
} else if (perf_evsel__is_clock(evsel)) {
if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
- print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
+ print_metric(config, ctxp, NULL, "%8.3f", "CPUs utilized",
avg / (ratio * evsel->scale));
else
- print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
+ print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
double fe_bound = td_fe_bound(ctx, cpu, st);
if (fe_bound > 0.2)
color = PERF_COLOR_RED;
- print_metric(ctxp, color, "%8.1f%%", "frontend bound",
+ print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
fe_bound * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
double retiring = td_retiring(ctx, cpu, st);
if (retiring > 0.7)
color = PERF_COLOR_GREEN;
- print_metric(ctxp, color, "%8.1f%%", "retiring",
+ print_metric(config, ctxp, color, "%8.1f%%", "retiring",
retiring * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
double bad_spec = td_bad_spec(ctx, cpu, st);
if (bad_spec > 0.1)
color = PERF_COLOR_RED;
- print_metric(ctxp, color, "%8.1f%%", "bad speculation",
+ print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
bad_spec * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
double be_bound = td_be_bound(ctx, cpu, st);
@@ -955,12 +966,12 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
if (be_bound > 0.2)
color = PERF_COLOR_RED;
if (td_total_slots(ctx, cpu, st) > 0)
- print_metric(ctxp, color, "%8.1f%%", name,
+ print_metric(config, ctxp, color, "%8.1f%%", name,
be_bound * 100.);
else
- print_metric(ctxp, NULL, NULL, name, 0);
+ print_metric(config, ctxp, NULL, NULL, name, 0);
} else if (evsel->metric_expr) {
- generic_metric(evsel->metric_expr, evsel->metric_events, evsel->name,
+ generic_metric(config, evsel->metric_expr, evsel->metric_events, evsel->name,
evsel->metric_name, avg, cpu, out, st);
} else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
char unit = 'M';
@@ -975,9 +986,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
unit = 'K';
}
snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
- print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio);
+ print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
- print_smi_cost(cpu, evsel, out, st);
+ print_smi_cost(config, cpu, evsel, out, st);
} else {
num = 0;
}
@@ -987,12 +998,12 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
list_for_each_entry (mexp, &me->head, nd) {
if (num++ > 0)
- out->new_line(ctxp);
- generic_metric(mexp->metric_expr, mexp->metric_events,
+ out->new_line(config, ctxp);
+ generic_metric(config, mexp->metric_expr, mexp->metric_events,
evsel->name, mexp->metric_name,
avg, cpu, out, st);
}
}
if (num == 0)
- print_metric(ctxp, NULL, NULL, NULL, 0);
+ print_metric(config, ctxp, NULL, NULL, NULL, 0);
}
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index a0061e0b0fad..4d40515307b8 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -374,9 +374,8 @@ int perf_stat_process_counter(struct perf_stat_config *config,
return 0;
}
-int perf_event__process_stat_event(struct perf_tool *tool __maybe_unused,
- union perf_event *event,
- struct perf_session *session)
+int perf_event__process_stat_event(struct perf_session *session,
+ union perf_event *event)
{
struct perf_counts_values count;
struct stat_event *st = &event->stat;
@@ -435,3 +434,98 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
return ret;
}
+
+int create_perf_stat_counter(struct perf_evsel *evsel,
+ struct perf_stat_config *config,
+ struct target *target)
+{
+ struct perf_event_attr *attr = &evsel->attr;
+ struct perf_evsel *leader = evsel->leader;
+
+ if (config->scale) {
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
+ }
+
+ /*
+ * The event is part of non trivial group, let's enable
+ * the group read (for leader) and ID retrieval for all
+ * members.
+ */
+ if (leader->nr_members > 1)
+ attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
+
+ attr->inherit = !config->no_inherit;
+
+ /*
+ * Some events get initialized with sample_(period/type) set,
+ * like tracepoints. Clear it up for counting.
+ */
+ attr->sample_period = 0;
+
+ if (config->identifier)
+ attr->sample_type = PERF_SAMPLE_IDENTIFIER;
+
+ /*
+ * Disabling all counters initially, they will be enabled
+ * either manually by us or by kernel via enable_on_exec
+ * set later.
+ */
+ if (perf_evsel__is_group_leader(evsel)) {
+ attr->disabled = 1;
+
+ /*
+ * In case of initial_delay we enable tracee
+ * events manually.
+ */
+ if (target__none(target) && !config->initial_delay)
+ attr->enable_on_exec = 1;
+ }
+
+ if (target__has_cpu(target) && !target__has_per_thread(target))
+ return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
+
+ return perf_evsel__open_per_thread(evsel, evsel->threads);
+}
+
+int perf_stat_synthesize_config(struct perf_stat_config *config,
+ struct perf_tool *tool,
+ struct perf_evlist *evlist,
+ perf_event__handler_t process,
+ bool attrs)
+{
+ int err;
+
+ if (attrs) {
+ err = perf_event__synthesize_attrs(tool, evlist, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize attrs.\n");
+ return err;
+ }
+ }
+
+ err = perf_event__synthesize_extra_attr(tool, evlist, process,
+ attrs);
+
+ err = perf_event__synthesize_thread_map2(tool, evlist->threads,
+ process, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize thread map.\n");
+ return err;
+ }
+
+ err = perf_event__synthesize_cpu_map(tool, evlist->cpus,
+ process, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize thread map.\n");
+ return err;
+ }
+
+ err = perf_event__synthesize_stat_config(tool, config, process, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize config.\n");
+ return err;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 36efb986f7fc..2f9c9159a364 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -4,8 +4,14 @@
#include <linux/types.h>
#include <stdio.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/wait.h>
#include "xyarray.h"
#include "rblist.h"
+#include "perf.h"
+#include "event.h"
struct stats {
double n, mean, M2;
@@ -84,15 +90,42 @@ struct runtime_stat {
struct rblist value_list;
};
+typedef int (*aggr_get_id_t)(struct perf_stat_config *config,
+ struct cpu_map *m, int cpu);
+
struct perf_stat_config {
- enum aggr_mode aggr_mode;
- bool scale;
- FILE *output;
- unsigned int interval;
- unsigned int timeout;
- int times;
- struct runtime_stat *stats;
- int stats_num;
+ enum aggr_mode aggr_mode;
+ bool scale;
+ bool no_inherit;
+ bool identifier;
+ bool csv_output;
+ bool interval_clear;
+ bool metric_only;
+ bool null_run;
+ bool ru_display;
+ bool big_num;
+ bool no_merge;
+ bool walltime_run_table;
+ FILE *output;
+ unsigned int interval;
+ unsigned int timeout;
+ unsigned int initial_delay;
+ unsigned int unit_width;
+ unsigned int metric_only_len;
+ int times;
+ int run_count;
+ int print_free_counters_hint;
+ int print_mixed_hw_group_error;
+ struct runtime_stat *stats;
+ int stats_num;
+ const char *csv_sep;
+ struct stats *walltime_nsecs_stats;
+ struct rusage ru_data;
+ struct cpu_map *aggr_map;
+ aggr_get_id_t aggr_get_id;
+ struct cpu_map *cpus_aggr_map;
+ u64 *walltime_run;
+ struct rblist metric_events;
};
void update_stats(struct stats *stats, u64 val);
@@ -130,9 +163,10 @@ bool __perf_evsel_stat__is(struct perf_evsel *evsel,
extern struct runtime_stat rt_stat;
extern struct stats walltime_nsecs_stats;
-typedef void (*print_metric_t)(void *ctx, const char *color, const char *unit,
+typedef void (*print_metric_t)(struct perf_stat_config *config,
+ void *ctx, const char *color, const char *unit,
const char *fmt, double val);
-typedef void (*new_line_t )(void *ctx);
+typedef void (*new_line_t)(struct perf_stat_config *config, void *ctx);
void runtime_stat__init(struct runtime_stat *st);
void runtime_stat__exit(struct runtime_stat *st);
@@ -148,7 +182,8 @@ struct perf_stat_output_ctx {
bool force_header;
};
-void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
+void perf_stat__print_shadow_stats(struct perf_stat_config *config,
+ struct perf_evsel *evsel,
double avg, int cpu,
struct perf_stat_output_ctx *out,
struct rblist *metric_events,
@@ -164,11 +199,25 @@ int perf_stat_process_counter(struct perf_stat_config *config,
struct perf_tool;
union perf_event;
struct perf_session;
-int perf_event__process_stat_event(struct perf_tool *tool,
- union perf_event *event,
- struct perf_session *session);
+int perf_event__process_stat_event(struct perf_session *session,
+ union perf_event *event);
size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
+
+int create_perf_stat_counter(struct perf_evsel *evsel,
+ struct perf_stat_config *config,
+ struct target *target);
+int perf_stat_synthesize_config(struct perf_stat_config *config,
+ struct perf_tool *tool,
+ struct perf_evlist *evlist,
+ perf_event__handler_t process,
+ bool attrs);
+void
+perf_evlist__print_counters(struct perf_evlist *evlist,
+ struct perf_stat_config *config,
+ struct target *_target,
+ struct timespec *ts,
+ int argc, const char **argv);
#endif
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 3d1cf5bf7f18..9005fbe0780e 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -98,19 +98,25 @@ static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
va_copy(ap_saved, ap);
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
- if (len < 0)
+ if (len < 0) {
+ va_end(ap_saved);
return len;
+ }
if (len > strbuf_avail(sb)) {
ret = strbuf_grow(sb, len);
- if (ret)
+ if (ret) {
+ va_end(ap_saved);
return ret;
+ }
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
va_end(ap_saved);
if (len > strbuf_avail(sb)) {
pr_debug("this should not happen, your vsnprintf is broken");
+ va_end(ap_saved);
return -EINVAL;
}
}
+ va_end(ap_saved);
return strbuf_setlen(sb, sb->len + len);
}
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
index dd17d6a38d3a..c091635bf7dc 100644
--- a/tools/perf/util/thread-stack.c
+++ b/tools/perf/util/thread-stack.c
@@ -36,6 +36,7 @@
* @branch_count: the branch count when the entry was created
* @cp: call path
* @no_call: a 'call' was not seen
+ * @trace_end: a 'call' but trace ended
*/
struct thread_stack_entry {
u64 ret_addr;
@@ -44,6 +45,7 @@ struct thread_stack_entry {
u64 branch_count;
struct call_path *cp;
bool no_call;
+ bool trace_end;
};
/**
@@ -112,7 +114,8 @@ static struct thread_stack *thread_stack__new(struct thread *thread,
return ts;
}
-static int thread_stack__push(struct thread_stack *ts, u64 ret_addr)
+static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
+ bool trace_end)
{
int err = 0;
@@ -124,6 +127,7 @@ static int thread_stack__push(struct thread_stack *ts, u64 ret_addr)
}
}
+ ts->stack[ts->cnt].trace_end = trace_end;
ts->stack[ts->cnt++].ret_addr = ret_addr;
return err;
@@ -150,6 +154,18 @@ static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
}
}
+static void thread_stack__pop_trace_end(struct thread_stack *ts)
+{
+ size_t i;
+
+ for (i = ts->cnt; i; ) {
+ if (ts->stack[--i].trace_end)
+ ts->cnt = i;
+ else
+ return;
+ }
+}
+
static bool thread_stack__in_kernel(struct thread_stack *ts)
{
if (!ts->cnt)
@@ -254,10 +270,19 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
ret_addr = from_ip + insn_len;
if (ret_addr == to_ip)
return 0; /* Zero-length calls are excluded */
- return thread_stack__push(thread->ts, ret_addr);
- } else if (flags & PERF_IP_FLAG_RETURN) {
- if (!from_ip)
- return 0;
+ return thread_stack__push(thread->ts, ret_addr,
+ flags & PERF_IP_FLAG_TRACE_END);
+ } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
+ /*
+ * If the caller did not change the trace number (which would
+ * have flushed the stack) then try to make sense of the stack.
+ * Possibly, tracing began after returning to the current
+ * address, so try to pop that. Also, do not expect a call made
+ * when the trace ended, to return, so pop that.
+ */
+ thread_stack__pop(thread->ts, to_ip);
+ thread_stack__pop_trace_end(thread->ts);
+ } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) {
thread_stack__pop(thread->ts, to_ip);
}
@@ -332,7 +357,7 @@ void call_return_processor__free(struct call_return_processor *crp)
static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
u64 timestamp, u64 ref, struct call_path *cp,
- bool no_call)
+ bool no_call, bool trace_end)
{
struct thread_stack_entry *tse;
int err;
@@ -350,6 +375,7 @@ static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
tse->branch_count = ts->branch_count;
tse->cp = cp;
tse->no_call = no_call;
+ tse->trace_end = trace_end;
return 0;
}
@@ -423,7 +449,7 @@ static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts,
return -ENOMEM;
return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp,
- true);
+ true, false);
}
static int thread_stack__no_call_return(struct thread *thread,
@@ -455,7 +481,7 @@ static int thread_stack__no_call_return(struct thread *thread,
if (!cp)
return -ENOMEM;
return thread_stack__push_cp(ts, 0, sample->time, ref,
- cp, true);
+ cp, true, false);
}
} else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
/* Return to userspace, so pop all kernel addresses */
@@ -480,7 +506,7 @@ static int thread_stack__no_call_return(struct thread *thread,
return -ENOMEM;
err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
- true);
+ true, false);
if (err)
return err;
@@ -500,7 +526,7 @@ static int thread_stack__trace_begin(struct thread *thread,
/* Pop trace end */
tse = &ts->stack[ts->cnt - 1];
- if (tse->cp->sym == NULL && tse->cp->ip == 0) {
+ if (tse->trace_end) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
timestamp, ref, false);
if (err)
@@ -529,7 +555,7 @@ static int thread_stack__trace_end(struct thread_stack *ts,
ret_addr = sample->ip + sample->insn_len;
return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
- false);
+ false, true);
}
int thread_stack__process(struct thread *thread, struct comm *comm,
@@ -579,6 +605,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
ts->last_time = sample->time;
if (sample->flags & PERF_IP_FLAG_CALL) {
+ bool trace_end = sample->flags & PERF_IP_FLAG_TRACE_END;
struct call_path_root *cpr = ts->crp->cpr;
struct call_path *cp;
u64 ret_addr;
@@ -596,7 +623,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
if (!cp)
return -ENOMEM;
err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
- cp, false);
+ cp, false, trace_end);
} else if (sample->flags & PERF_IP_FLAG_RETURN) {
if (!sample->ip || !sample->addr)
return 0;
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 183c91453522..56e4ca54020a 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -26,15 +26,12 @@ typedef int (*event_attr_op)(struct perf_tool *tool,
union perf_event *event,
struct perf_evlist **pevlist);
-typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
- struct perf_session *session);
+typedef int (*event_op2)(struct perf_session *session, union perf_event *event);
+typedef s64 (*event_op3)(struct perf_session *session, union perf_event *event);
typedef int (*event_oe)(struct perf_tool *tool, union perf_event *event,
struct ordered_events *oe);
-typedef s64 (*event_op3)(struct perf_tool *tool, union perf_event *event,
- struct perf_session *session);
-
enum show_feature_header {
SHOW_FEAT_NO_HEADER = 0,
SHOW_FEAT_HEADER,
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 7b0ca7cbb7de..8ad8e755127b 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -531,12 +531,14 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
"/tmp/perf-XXXXXX");
if (!mkstemp(tdata->temp_file)) {
pr_debug("Can't make temp file");
+ free(tdata);
return NULL;
}
temp_fd = open(tdata->temp_file, O_RDWR);
if (temp_fd < 0) {
pr_debug("Can't read '%s'", tdata->temp_file);
+ free(tdata);
return NULL;
}
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index e76214f8d596..32e558a65af3 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -33,14 +33,15 @@ static int get_common_field(struct scripting_context *context,
int *offset, int *size, const char *type)
{
struct tep_handle *pevent = context->pevent;
- struct event_format *event;
- struct format_field *field;
+ struct tep_event_format *event;
+ struct tep_format_field *field;
if (!*size) {
- if (!pevent->events)
+
+ event = tep_get_first_event(pevent);
+ if (!event)
return 0;
- event = pevent->events[0];
field = tep_find_common_field(event, type);
if (!field)
return 0;
@@ -94,9 +95,9 @@ int common_pc(struct scripting_context *context)
}
unsigned long long
-raw_field_value(struct event_format *event, const char *name, void *data)
+raw_field_value(struct tep_event_format *event, const char *name, void *data)
{
- struct format_field *field;
+ struct tep_format_field *field;
unsigned long long val;
field = tep_find_any_field(event, name);
@@ -108,12 +109,12 @@ raw_field_value(struct event_format *event, const char *name, void *data)
return val;
}
-unsigned long long read_size(struct event_format *event, void *ptr, int size)
+unsigned long long read_size(struct tep_event_format *event, void *ptr, int size)
{
return tep_read_number(event->pevent, ptr, size);
}
-void event_format__fprintf(struct event_format *event,
+void event_format__fprintf(struct tep_event_format *event,
int cpu, void *data, int size, FILE *fp)
{
struct tep_record record;
@@ -130,7 +131,7 @@ void event_format__fprintf(struct event_format *event,
trace_seq_destroy(&s);
}
-void event_format__print(struct event_format *event,
+void event_format__print(struct tep_event_format *event,
int cpu, void *data, int size)
{
return event_format__fprintf(event, cpu, data, size, stdout);
@@ -158,6 +159,7 @@ void parse_ftrace_printk(struct tep_handle *pevent,
printk = strdup(fmt+1);
line = strtok_r(NULL, "\n", &next);
tep_register_print_string(pevent, printk, addr);
+ free(printk);
}
}
@@ -188,29 +190,33 @@ int parse_event_file(struct tep_handle *pevent,
return tep_parse_event(pevent, buf, size, sys);
}
-struct event_format *trace_find_next_event(struct tep_handle *pevent,
- struct event_format *event)
+struct tep_event_format *trace_find_next_event(struct tep_handle *pevent,
+ struct tep_event_format *event)
{
static int idx;
+ int events_count;
+ struct tep_event_format *all_events;
- if (!pevent || !pevent->events)
+ all_events = tep_get_first_event(pevent);
+ events_count = tep_get_events_count(pevent);
+ if (!pevent || !all_events || events_count < 1)
return NULL;
if (!event) {
idx = 0;
- return pevent->events[0];
+ return all_events;
}
- if (idx < pevent->nr_events && event == pevent->events[idx]) {
+ if (idx < events_count && event == (all_events + idx)) {
idx++;
- if (idx == pevent->nr_events)
+ if (idx == events_count)
return NULL;
- return pevent->events[idx];
+ return (all_events + idx);
}
- for (idx = 1; idx < pevent->nr_events; idx++) {
- if (event == pevent->events[idx - 1])
- return pevent->events[idx];
+ for (idx = 1; idx < events_count; idx++) {
+ if (event == (all_events + (idx - 1)))
+ return (all_events + idx);
}
return NULL;
}
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 3dfc1db6b25b..76f12c705ef9 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -102,7 +102,7 @@ static unsigned int read4(struct tep_handle *pevent)
if (do_read(&data, 4) < 0)
return 0;
- return __data2host4(pevent, data);
+ return __tep_data2host4(pevent, data);
}
static unsigned long long read8(struct tep_handle *pevent)
@@ -111,7 +111,7 @@ static unsigned long long read8(struct tep_handle *pevent)
if (do_read(&data, 8) < 0)
return 0;
- return __data2host8(pevent, data);
+ return __tep_data2host8(pevent, data);
}
static char *read_string(void)
@@ -241,7 +241,7 @@ static int read_header_files(struct tep_handle *pevent)
* The commit field in the page is of type long,
* use that instead, since it represents the kernel.
*/
- tep_set_long_size(pevent, pevent->header_page_size_size);
+ tep_set_long_size(pevent, tep_get_header_page_size(pevent));
}
free(header_page);
@@ -297,10 +297,8 @@ static int read_event_file(struct tep_handle *pevent, char *sys,
}
ret = do_read(buf, size);
- if (ret < 0) {
- free(buf);
+ if (ret < 0)
goto out;
- }
ret = parse_event_file(pevent, buf, size, sys);
if (ret < 0)
@@ -349,9 +347,12 @@ static int read_event_files(struct tep_handle *pevent)
for (x=0; x < count; x++) {
size = read8(pevent);
ret = read_event_file(pevent, sys, size);
- if (ret)
+ if (ret) {
+ free(sys);
return ret;
+ }
}
+ free(sys);
}
return 0;
}
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
index 58bb72f266f3..95664b2f771e 100644
--- a/tools/perf/util/trace-event.c
+++ b/tools/perf/util/trace-event.c
@@ -72,12 +72,12 @@ void trace_event__cleanup(struct trace_event *t)
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
-static struct event_format*
+static struct tep_event_format*
tp_format(const char *sys, const char *name)
{
char *tp_dir = get_events_file(sys);
struct tep_handle *pevent = tevent.pevent;
- struct event_format *event = NULL;
+ struct tep_event_format *event = NULL;
char path[PATH_MAX];
size_t size;
char *data;
@@ -102,7 +102,7 @@ tp_format(const char *sys, const char *name)
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
-struct event_format*
+struct tep_event_format*
trace_event__tp_format(const char *sys, const char *name)
{
if (!tevent_initialized && trace_event__init2())
@@ -111,7 +111,7 @@ trace_event__tp_format(const char *sys, const char *name)
return tp_format(sys, name);
}
-struct event_format *trace_event__tp_format_id(int id)
+struct tep_event_format *trace_event__tp_format_id(int id)
{
if (!tevent_initialized && trace_event__init2())
return ERR_PTR(-ENOMEM);
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 40204ec3a7a2..f024d73bfc40 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -3,6 +3,7 @@
#define _PERF_UTIL_TRACE_EVENT_H
#include <traceevent/event-parse.h>
+#include <traceevent/trace-seq.h>
#include "parse-events.h"
struct machine;
@@ -10,28 +11,28 @@ struct perf_sample;
union perf_event;
struct perf_tool;
struct thread;
-struct plugin_list;
+struct tep_plugin_list;
struct trace_event {
struct tep_handle *pevent;
- struct plugin_list *plugin_list;
+ struct tep_plugin_list *plugin_list;
};
int trace_event__init(struct trace_event *t);
void trace_event__cleanup(struct trace_event *t);
int trace_event__register_resolver(struct machine *machine,
tep_func_resolver_t *func);
-struct event_format*
+struct tep_event_format*
trace_event__tp_format(const char *sys, const char *name);
-struct event_format *trace_event__tp_format_id(int id);
+struct tep_event_format *trace_event__tp_format_id(int id);
int bigendian(void);
-void event_format__fprintf(struct event_format *event,
+void event_format__fprintf(struct tep_event_format *event,
int cpu, void *data, int size, FILE *fp);
-void event_format__print(struct event_format *event,
+void event_format__print(struct tep_event_format *event,
int cpu, void *data, int size);
int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size);
@@ -39,7 +40,7 @@ int parse_event_file(struct tep_handle *pevent,
char *buf, unsigned long size, char *sys);
unsigned long long
-raw_field_value(struct event_format *event, const char *name, void *data);
+raw_field_value(struct tep_event_format *event, const char *name, void *data);
void parse_proc_kallsyms(struct tep_handle *pevent, char *file, unsigned int size);
void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size);
@@ -47,9 +48,9 @@ void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int siz
ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe);
-struct event_format *trace_find_next_event(struct tep_handle *pevent,
- struct event_format *event);
-unsigned long long read_size(struct event_format *event, void *ptr, int size);
+struct tep_event_format *trace_find_next_event(struct tep_handle *pevent,
+ struct tep_event_format *event);
+unsigned long long read_size(struct tep_event_format *event, void *ptr, int size);
unsigned long long eval_flag(const char *flag);
int read_tracing_data(int fd, struct list_head *pattrs);
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index eac5b858a371..093352e93d50 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -221,7 +221,7 @@ out:
return err;
}
-static int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
+int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
{
void *ptr;
loff_t pgoff;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index dc58254a2b69..14508ee7707a 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -6,6 +6,7 @@
/* glibc 2.20 deprecates _BSD_SOURCE in favour of _DEFAULT_SOURCE */
#define _DEFAULT_SOURCE 1
+#include <fcntl.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdlib.h>
@@ -35,6 +36,7 @@ bool lsdir_no_dot_filter(const char *name, struct dirent *d);
int copyfile(const char *from, const char *to);
int copyfile_mode(const char *from, const char *to, mode_t mode);
int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi);
+int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size);
ssize_t readn(int fd, void *buf, size_t n);
ssize_t writen(int fd, const void *buf, size_t n);
diff --git a/tools/power/cpupower/bench/parse.c b/tools/power/cpupower/bench/parse.c
index 9ba8a44ad2a7..84caee38418f 100644
--- a/tools/power/cpupower/bench/parse.c
+++ b/tools/power/cpupower/bench/parse.c
@@ -145,7 +145,7 @@ struct config *prepare_default_config()
config->cpu = 0;
config->prio = SCHED_HIGH;
config->verbose = 0;
- strncpy(config->governor, "ondemand", 8);
+ strncpy(config->governor, "ondemand", sizeof(config->governor));
config->output = stdout;
diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
index df43cd45d810..c3f39d5128ee 100644
--- a/tools/power/cpupower/utils/cpufreq-info.c
+++ b/tools/power/cpupower/utils/cpufreq-info.c
@@ -170,6 +170,7 @@ static int get_boost_mode(unsigned int cpu)
unsigned long pstates[MAX_HW_PSTATES] = {0,};
if (cpupower_cpu_info.vendor != X86_VENDOR_AMD &&
+ cpupower_cpu_info.vendor != X86_VENDOR_HYGON &&
cpupower_cpu_info.vendor != X86_VENDOR_INTEL)
return 0;
@@ -190,8 +191,9 @@ static int get_boost_mode(unsigned int cpu)
printf(_(" Supported: %s\n"), support ? _("yes") : _("no"));
printf(_(" Active: %s\n"), active ? _("yes") : _("no"));
- if (cpupower_cpu_info.vendor == X86_VENDOR_AMD &&
- cpupower_cpu_info.family >= 0x10) {
+ if ((cpupower_cpu_info.vendor == X86_VENDOR_AMD &&
+ cpupower_cpu_info.family >= 0x10) ||
+ cpupower_cpu_info.vendor == X86_VENDOR_HYGON) {
ret = decode_pstates(cpu, cpupower_cpu_info.family, b_states,
pstates, &pstate_no);
if (ret)
@@ -200,6 +202,8 @@ static int get_boost_mode(unsigned int cpu)
printf(_(" Boost States: %d\n"), b_states);
printf(_(" Total States: %d\n"), pstate_no);
for (i = 0; i < pstate_no; i++) {
+ if (!pstates[i])
+ continue;
if (i < b_states)
printf(_(" Pstate-Pb%d: %luMHz (boost state)"
"\n"), i, pstates[i]);
diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c
index bb41cdd0df6b..7c4f83a8c973 100644
--- a/tools/power/cpupower/utils/helpers/amd.c
+++ b/tools/power/cpupower/utils/helpers/amd.c
@@ -33,7 +33,7 @@ union msr_pstate {
unsigned vid:8;
unsigned iddval:8;
unsigned idddiv:2;
- unsigned res1:30;
+ unsigned res1:31;
unsigned en:1;
} fam17h_bits;
unsigned long long val;
@@ -45,7 +45,7 @@ static int get_did(int family, union msr_pstate pstate)
if (family == 0x12)
t = pstate.val & 0xf;
- else if (family == 0x17)
+ else if (family == 0x17 || family == 0x18)
t = pstate.fam17h_bits.did;
else
t = pstate.bits.did;
@@ -59,7 +59,7 @@ static int get_cof(int family, union msr_pstate pstate)
int fid, did, cof;
did = get_did(family, pstate);
- if (family == 0x17) {
+ if (family == 0x17 || family == 0x18) {
fid = pstate.fam17h_bits.fid;
cof = 200 * fid / did;
} else {
@@ -119,6 +119,11 @@ int decode_pstates(unsigned int cpu, unsigned int cpu_family,
}
if (read_msr(cpu, MSR_AMD_PSTATE + i, &pstate.val))
return -1;
+ if ((cpu_family == 0x17) && (!pstate.fam17h_bits.en))
+ continue;
+ else if (!pstate.bits.en)
+ continue;
+
pstates[i] = get_cof(cpu_family, pstate);
}
*no = i;
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c
index 732b0b41ba26..5cc39d4e23ed 100644
--- a/tools/power/cpupower/utils/helpers/cpuid.c
+++ b/tools/power/cpupower/utils/helpers/cpuid.c
@@ -8,7 +8,7 @@
#include "helpers/helpers.h"
static const char *cpu_vendor_table[X86_VENDOR_MAX] = {
- "Unknown", "GenuineIntel", "AuthenticAMD",
+ "Unknown", "GenuineIntel", "AuthenticAMD", "HygonGenuine",
};
#if defined(__i386__) || defined(__x86_64__)
@@ -109,6 +109,7 @@ out:
fclose(fp);
/* Get some useful CPU capabilities from cpuid */
if (cpu_info->vendor != X86_VENDOR_AMD &&
+ cpu_info->vendor != X86_VENDOR_HYGON &&
cpu_info->vendor != X86_VENDOR_INTEL)
return ret;
@@ -124,8 +125,9 @@ out:
if (cpuid_level >= 6 && (cpuid_ecx(6) & 0x1))
cpu_info->caps |= CPUPOWER_CAP_APERF;
- /* AMD Boost state enable/disable register */
- if (cpu_info->vendor == X86_VENDOR_AMD) {
+ /* AMD or Hygon Boost state enable/disable register */
+ if (cpu_info->vendor == X86_VENDOR_AMD ||
+ cpu_info->vendor == X86_VENDOR_HYGON) {
if (ext_cpuid_level >= 0x80000007 &&
(cpuid_edx(0x80000007) & (1 << 9)))
cpu_info->caps |= CPUPOWER_CAP_AMD_CBP;
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
index 41da392be448..902139689315 100644
--- a/tools/power/cpupower/utils/helpers/helpers.h
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -61,7 +61,7 @@ extern int be_verbose;
/* cpuid and cpuinfo helpers **************************/
enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL,
- X86_VENDOR_AMD, X86_VENDOR_MAX};
+ X86_VENDOR_AMD, X86_VENDOR_HYGON, X86_VENDOR_MAX};
#define CPUPOWER_CAP_INV_TSC 0x00000001
#define CPUPOWER_CAP_APERF 0x00000002
diff --git a/tools/power/cpupower/utils/helpers/misc.c b/tools/power/cpupower/utils/helpers/misc.c
index 80fdf55f414d..f406adc40bad 100644
--- a/tools/power/cpupower/utils/helpers/misc.c
+++ b/tools/power/cpupower/utils/helpers/misc.c
@@ -26,7 +26,7 @@ int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active,
* has Hardware determined variable increments instead.
*/
- if (cpu_info.family == 0x17) {
+ if (cpu_info.family == 0x17 || cpu_info.family == 0x18) {
if (!read_msr(cpu, MSR_AMD_HWCR, &val)) {
if (!(val & CPUPOWER_AMD_CPBDIS))
*active = 1;
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
index d7c2a6d13dea..f2a7e9cfd577 100644
--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
@@ -241,7 +241,8 @@ static int init_maxfreq_mode(void)
if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC))
goto use_sysfs;
- if (cpupower_cpu_info.vendor == X86_VENDOR_AMD) {
+ if (cpupower_cpu_info.vendor == X86_VENDOR_AMD ||
+ cpupower_cpu_info.vendor == X86_VENDOR_HYGON) {
/* MSR_AMD_HWCR tells us whether TSC runs at P0/mperf
* freq.
* A test whether hwcr is accessable/available would be:
diff --git a/tools/power/pm-graph/Makefile b/tools/power/pm-graph/Makefile
index c1899cd72c80..845541544570 100644
--- a/tools/power/pm-graph/Makefile
+++ b/tools/power/pm-graph/Makefile
@@ -23,8 +23,8 @@ install : uninstall
install -m 644 config/suspend-x2-proc.cfg $(DESTDIR)$(PREFIX)/lib/pm-graph/config
install -d $(DESTDIR)$(PREFIX)/bin
- ln -s $(DESTDIR)$(PREFIX)/lib/pm-graph/bootgraph.py $(DESTDIR)$(PREFIX)/bin/bootgraph
- ln -s $(DESTDIR)$(PREFIX)/lib/pm-graph/sleepgraph.py $(DESTDIR)$(PREFIX)/bin/sleepgraph
+ ln -s ../lib/pm-graph/bootgraph.py $(DESTDIR)$(PREFIX)/bin/bootgraph
+ ln -s ../lib/pm-graph/sleepgraph.py $(DESTDIR)$(PREFIX)/bin/sleepgraph
install -d $(DESTDIR)$(PREFIX)/share/man/man8
install bootgraph.8 $(DESTDIR)$(PREFIX)/share/man/man8
diff --git a/tools/power/pm-graph/bootgraph.py b/tools/power/pm-graph/bootgraph.py
index 8ee626c0f6a5..6dae57041537 100755
--- a/tools/power/pm-graph/bootgraph.py
+++ b/tools/power/pm-graph/bootgraph.py
@@ -34,6 +34,10 @@ from datetime import datetime, timedelta
from subprocess import call, Popen, PIPE
import sleepgraph as aslib
+def pprint(msg):
+ print(msg)
+ sys.stdout.flush()
+
# ----------------- CLASSES --------------------
# Class: SystemValues
@@ -157,11 +161,11 @@ class SystemValues(aslib.SystemValues):
return cmdline
def manualRebootRequired(self):
cmdline = self.kernelParams()
- print 'To generate a new timeline manually, follow these steps:\n'
- print '1. Add the CMDLINE string to your kernel command line.'
- print '2. Reboot the system.'
- print '3. After reboot, re-run this tool with the same arguments but no command (w/o -reboot or -manual).\n'
- print 'CMDLINE="%s"' % cmdline
+ pprint('To generate a new timeline manually, follow these steps:\n\n'\
+ '1. Add the CMDLINE string to your kernel command line.\n'\
+ '2. Reboot the system.\n'\
+ '3. After reboot, re-run this tool with the same arguments but no command (w/o -reboot or -manual).\n\n'\
+ 'CMDLINE="%s"' % cmdline)
sys.exit()
def blGrub(self):
blcmd = ''
@@ -431,7 +435,7 @@ def parseTraceLog(data):
if len(cg.list) < 1 or cg.invalid or (cg.end - cg.start == 0):
continue
if(not cg.postProcess()):
- print('Sanity check failed for %s-%d' % (proc, pid))
+ pprint('Sanity check failed for %s-%d' % (proc, pid))
continue
# match cg data to devices
devname = data.deviceMatch(pid, cg)
@@ -442,8 +446,8 @@ def parseTraceLog(data):
sysvals.vprint('%s callgraph found for %s %s-%d [%f - %f]' %\
(kind, cg.name, proc, pid, cg.start, cg.end))
elif len(cg.list) > 1000000:
- print 'WARNING: the callgraph found for %s is massive! (%d lines)' %\
- (devname, len(cg.list))
+ pprint('WARNING: the callgraph found for %s is massive! (%d lines)' %\
+ (devname, len(cg.list)))
# Function: retrieveLogs
# Description:
@@ -528,7 +532,7 @@ def createBootGraph(data):
tMax = data.end
tTotal = tMax - t0
if(tTotal == 0):
- print('ERROR: No timeline data')
+ pprint('ERROR: No timeline data')
return False
user_mode = '%.0f'%(data.tUserMode*1000)
last_init = '%.0f'%(tTotal*1000)
@@ -734,7 +738,7 @@ def updateCron(restore=False):
op.close()
res = call([cmd, cronfile])
except Exception, e:
- print 'Exception: %s' % str(e)
+ pprint('Exception: %s' % str(e))
shutil.move(backfile, cronfile)
res = -1
if res != 0:
@@ -750,7 +754,7 @@ def updateGrub(restore=False):
call(sysvals.blexec, stderr=PIPE, stdout=PIPE,
env={'PATH': '.:/sbin:/usr/sbin:/usr/bin:/sbin:/bin'})
except Exception, e:
- print 'Exception: %s\n' % str(e)
+ pprint('Exception: %s\n' % str(e))
return
# extract the option and create a grub config without it
sysvals.rootUser(True)
@@ -797,7 +801,7 @@ def updateGrub(restore=False):
res = call(sysvals.blexec)
os.remove(grubfile)
except Exception, e:
- print 'Exception: %s' % str(e)
+ pprint('Exception: %s' % str(e))
res = -1
# cleanup
shutil.move(tempfile, grubfile)
@@ -821,7 +825,7 @@ def updateKernelParams(restore=False):
def doError(msg, help=False):
if help == True:
printHelp()
- print 'ERROR: %s\n' % msg
+ pprint('ERROR: %s\n' % msg)
sysvals.outputResult({'error':msg})
sys.exit()
@@ -829,52 +833,51 @@ def doError(msg, help=False):
# Description:
# print out the help text
def printHelp():
- print('')
- print('%s v%s' % (sysvals.title, sysvals.version))
- print('Usage: bootgraph <options> <command>')
- print('')
- print('Description:')
- print(' This tool reads in a dmesg log of linux kernel boot and')
- print(' creates an html representation of the boot timeline up to')
- print(' the start of the init process.')
- print('')
- print(' If no specific command is given the tool reads the current dmesg')
- print(' and/or ftrace log and creates a timeline')
- print('')
- print(' Generates output files in subdirectory: boot-yymmdd-HHMMSS')
- print(' HTML output: <hostname>_boot.html')
- print(' raw dmesg output: <hostname>_boot_dmesg.txt')
- print(' raw ftrace output: <hostname>_boot_ftrace.txt')
- print('')
- print('Options:')
- print(' -h Print this help text')
- print(' -v Print the current tool version')
- print(' -verbose Print extra information during execution and analysis')
- print(' -addlogs Add the dmesg log to the html output')
- print(' -result fn Export a results table to a text file for parsing.')
- print(' -o name Overrides the output subdirectory name when running a new test')
- print(' default: boot-{date}-{time}')
- print(' [advanced]')
- print(' -fstat Use ftrace to add function detail and statistics (default: disabled)')
- print(' -f/-callgraph Add callgraph detail, can be very large (default: disabled)')
- print(' -maxdepth N limit the callgraph data to N call levels (default: 2)')
- print(' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)')
- print(' -timeprec N Number of significant digits in timestamps (0:S, 3:ms, [6:us])')
- print(' -expandcg pre-expand the callgraph data in the html output (default: disabled)')
- print(' -func list Limit ftrace to comma-delimited list of functions (default: do_one_initcall)')
- print(' -cgfilter S Filter the callgraph output in the timeline')
- print(' -cgskip file Callgraph functions to skip, off to disable (default: cgskip.txt)')
- print(' -bl name Use the following boot loader for kernel params (default: grub)')
- print(' -reboot Reboot the machine automatically and generate a new timeline')
- print(' -manual Show the steps to generate a new timeline manually (used with -reboot)')
- print('')
- print('Other commands:')
- print(' -flistall Print all functions capable of being captured in ftrace')
- print(' -sysinfo Print out system info extracted from BIOS')
- print(' [redo]')
- print(' -dmesg file Create HTML output using dmesg input (used with -ftrace)')
- print(' -ftrace file Create HTML output using ftrace input (used with -dmesg)')
- print('')
+ pprint('\n%s v%s\n'\
+ 'Usage: bootgraph <options> <command>\n'\
+ '\n'\
+ 'Description:\n'\
+ ' This tool reads in a dmesg log of linux kernel boot and\n'\
+ ' creates an html representation of the boot timeline up to\n'\
+ ' the start of the init process.\n'\
+ '\n'\
+ ' If no specific command is given the tool reads the current dmesg\n'\
+ ' and/or ftrace log and creates a timeline\n'\
+ '\n'\
+ ' Generates output files in subdirectory: boot-yymmdd-HHMMSS\n'\
+ ' HTML output: <hostname>_boot.html\n'\
+ ' raw dmesg output: <hostname>_boot_dmesg.txt\n'\
+ ' raw ftrace output: <hostname>_boot_ftrace.txt\n'\
+ '\n'\
+ 'Options:\n'\
+ ' -h Print this help text\n'\
+ ' -v Print the current tool version\n'\
+ ' -verbose Print extra information during execution and analysis\n'\
+ ' -addlogs Add the dmesg log to the html output\n'\
+ ' -result fn Export a results table to a text file for parsing.\n'\
+ ' -o name Overrides the output subdirectory name when running a new test\n'\
+ ' default: boot-{date}-{time}\n'\
+ ' [advanced]\n'\
+ ' -fstat Use ftrace to add function detail and statistics (default: disabled)\n'\
+ ' -f/-callgraph Add callgraph detail, can be very large (default: disabled)\n'\
+ ' -maxdepth N limit the callgraph data to N call levels (default: 2)\n'\
+ ' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)\n'\
+ ' -timeprec N Number of significant digits in timestamps (0:S, 3:ms, [6:us])\n'\
+ ' -expandcg pre-expand the callgraph data in the html output (default: disabled)\n'\
+ ' -func list Limit ftrace to comma-delimited list of functions (default: do_one_initcall)\n'\
+ ' -cgfilter S Filter the callgraph output in the timeline\n'\
+ ' -cgskip file Callgraph functions to skip, off to disable (default: cgskip.txt)\n'\
+ ' -bl name Use the following boot loader for kernel params (default: grub)\n'\
+ ' -reboot Reboot the machine automatically and generate a new timeline\n'\
+ ' -manual Show the steps to generate a new timeline manually (used with -reboot)\n'\
+ '\n'\
+ 'Other commands:\n'\
+ ' -flistall Print all functions capable of being captured in ftrace\n'\
+ ' -sysinfo Print out system info extracted from BIOS\n'\
+ ' [redo]\n'\
+ ' -dmesg file Create HTML output using dmesg input (used with -ftrace)\n'\
+ ' -ftrace file Create HTML output using ftrace input (used with -dmesg)\n'\
+ '' % (sysvals.title, sysvals.version))
return True
# ----------------- MAIN --------------------
@@ -895,7 +898,7 @@ if __name__ == '__main__':
printHelp()
sys.exit()
elif(arg == '-v'):
- print("Version %s" % sysvals.version)
+ pprint("Version %s" % sysvals.version)
sys.exit()
elif(arg == '-verbose'):
sysvals.verbose = True
@@ -1016,7 +1019,7 @@ if __name__ == '__main__':
print f
elif cmd == 'checkbl':
sysvals.getBootLoader()
- print 'Boot Loader: %s\n%s' % (sysvals.bootloader, sysvals.blexec)
+ pprint('Boot Loader: %s\n%s' % (sysvals.bootloader, sysvals.blexec))
elif(cmd == 'sysinfo'):
sysvals.printSystemInfo(True)
sys.exit()
diff --git a/tools/power/pm-graph/config/cgskip.txt b/tools/power/pm-graph/config/cgskip.txt
index e48d588fbfb4..9ff88e7e2300 100644
--- a/tools/power/pm-graph/config/cgskip.txt
+++ b/tools/power/pm-graph/config/cgskip.txt
@@ -27,6 +27,7 @@ ktime_get
# console calls
printk
dev_printk
+__dev_printk
console_unlock
# memory handling
diff --git a/tools/power/pm-graph/config/custom-timeline-functions.cfg b/tools/power/pm-graph/config/custom-timeline-functions.cfg
index f8fcb06fd68b..4f80ad7d7275 100644
--- a/tools/power/pm-graph/config/custom-timeline-functions.cfg
+++ b/tools/power/pm-graph/config/custom-timeline-functions.cfg
@@ -105,7 +105,7 @@ override-dev-timeline-functions: true
# example: [color=#CC00CC]
#
# arglist: A list of arguments from registers/stack addresses. See URL:
-# https://www.kernel.org/doc/Documentation/trace/kprobetrace.rst
+# https://www.kernel.org/doc/Documentation/trace/kprobetrace.txt
#
# example: cpu=%di:s32
#
@@ -170,7 +170,7 @@ pm_restore_console:
# example: [color=#CC00CC]
#
# arglist: A list of arguments from registers/stack addresses. See URL:
-# https://www.kernel.org/doc/Documentation/trace/kprobetrace.rst
+# https://www.kernel.org/doc/Documentation/trace/kprobetrace.txt
#
# example: port=+36(%di):s32
#
diff --git a/tools/power/pm-graph/sleepgraph.8 b/tools/power/pm-graph/sleepgraph.8
index 070be2cf7f74..24a2e7d0ae63 100644
--- a/tools/power/pm-graph/sleepgraph.8
+++ b/tools/power/pm-graph/sleepgraph.8
@@ -65,9 +65,9 @@ During test, enable/disable runtime suspend for all devices. The test is delayed
by 5 seconds to allow runtime suspend changes to occur. The settings are restored
after the test is complete.
.TP
-\fB-display \fIon/off\fR
-Turn the display on or off for the test using the xset command. This helps
-maintain the consistecy of test data for better comparison.
+\fB-display \fIon/off/standby/suspend\fR
+Switch the display to the requested mode for the test using the xset command.
+This helps maintain the consistency of test data for better comparison.
.TP
\fB-skiphtml\fR
Run the test and capture the trace logs, but skip the timeline generation.
@@ -183,6 +183,13 @@ Print out the contents of the ACPI Firmware Performance Data Table.
\fB-battery\fR
Print out battery status and current charge.
.TP
+\fB-xon/-xoff/-xstandby/-xsuspend\fR
+Test xset by attempting to switch the display to the given mode. This
+is the same command which will be issued by \fB-display \fImode\fR.
+.TP
+\fB-xstat\fR
+Get the current DPMS display mode.
+.TP
\fB-sysinfo\fR
Print out system info extracted from BIOS. Reads /dev/mem directly instead of going through dmidecode.
.TP
diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
index 0c760478f7d7..52618f3444d4 100755
--- a/tools/power/pm-graph/sleepgraph.py
+++ b/tools/power/pm-graph/sleepgraph.py
@@ -54,6 +54,7 @@ import os
import string
import re
import platform
+import signal
from datetime import datetime
import struct
import ConfigParser
@@ -61,6 +62,10 @@ import gzip
from threading import Thread
from subprocess import call, Popen, PIPE
+def pprint(msg):
+ print(msg)
+ sys.stdout.flush()
+
# ----------------- CLASSES --------------------
# Class: SystemValues
@@ -69,10 +74,10 @@ from subprocess import call, Popen, PIPE
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
- version = '5.1'
+ version = '5.2'
ansi = False
rs = 0
- display = 0
+ display = ''
gzip = False
sync = False
verbose = False
@@ -99,6 +104,7 @@ class SystemValues:
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
+ pmdpath = '/sys/power/pm_debug_messages'
traceevents = [
'suspend_resume',
'device_pm_callback_end',
@@ -109,8 +115,10 @@ class SystemValues:
mempath = '/dev/mem'
powerfile = '/sys/power/state'
mempowerfile = '/sys/power/mem_sleep'
+ diskpowerfile = '/sys/power/disk'
suspendmode = 'mem'
memmode = ''
+ diskmode = ''
hostname = 'localhost'
prefix = 'test'
teststamp = ''
@@ -137,16 +145,15 @@ class SystemValues:
useprocmon = False
notestrun = False
cgdump = False
+ devdump = False
mixedphaseheight = True
devprops = dict()
predelay = 0
postdelay = 0
- procexecfmt = 'ps - (?P<ps>.*)$'
- devpropfmt = '# Device Properties: .*'
- tracertypefmt = '# tracer: (?P<t>.*)'
- firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
+ pmdebug = ''
tracefuncs = {
'sys_sync': {},
+ 'ksys_sync': {},
'__pm_notifier_call_chain': {},
'pm_prepare_console': {},
'pm_notifier_call_chain': {},
@@ -187,7 +194,6 @@ class SystemValues:
dev_tracefuncs = {
# general wait/delay/sleep
'msleep': { 'args_x86_64': {'time':'%di:s32'}, 'ub': 1 },
- 'schedule_timeout_uninterruptible': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
'schedule_timeout': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
'udelay': { 'func':'__const_udelay', 'args_x86_64': {'loops':'%di:s32'}, 'ub': 1 },
'usleep_range': { 'args_x86_64': {'min':'%di:s32', 'max':'%si:s32'}, 'ub': 1 },
@@ -199,6 +205,9 @@ class SystemValues:
# filesystem
'ext4_sync_fs': {},
# 80211
+ 'ath10k_bmi_read_memory': { 'args_x86_64': {'length':'%cx:s32'} },
+ 'ath10k_bmi_write_memory': { 'args_x86_64': {'length':'%cx:s32'} },
+ 'ath10k_bmi_fast_download': { 'args_x86_64': {'length':'%cx:s32'} },
'iwlagn_mac_start': {},
'iwlagn_alloc_bcast_station': {},
'iwl_trans_pcie_start_hw': {},
@@ -241,6 +250,7 @@ class SystemValues:
timeformat = '%.3f'
cmdline = '%s %s' % \
(os.path.basename(sys.argv[0]), ' '.join(sys.argv[1:]))
+ sudouser = ''
def __init__(self):
self.archargs = 'args_'+platform.machine()
self.hostname = platform.node()
@@ -256,27 +266,49 @@ class SystemValues:
if (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()):
self.ansi = True
self.testdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S')
+ if os.getuid() == 0 and 'SUDO_USER' in os.environ and \
+ os.environ['SUDO_USER']:
+ self.sudouser = os.environ['SUDO_USER']
def vprint(self, msg):
self.logmsg += msg+'\n'
- if(self.verbose):
- print(msg)
+ if self.verbose or msg.startswith('WARNING:'):
+ pprint(msg)
+ def signalHandler(self, signum, frame):
+ if not self.result:
+ return
+ signame = self.signames[signum] if signum in self.signames else 'UNKNOWN'
+ msg = 'Signal %s caused a tool exit, line %d' % (signame, frame.f_lineno)
+ sysvals.outputResult({'error':msg})
+ sys.exit(3)
+ def signalHandlerInit(self):
+ capture = ['BUS', 'SYS', 'XCPU', 'XFSZ', 'PWR', 'HUP', 'INT', 'QUIT',
+ 'ILL', 'ABRT', 'FPE', 'SEGV', 'TERM', 'TSTP']
+ self.signames = dict()
+ for i in capture:
+ s = 'SIG'+i
+ try:
+ signum = getattr(signal, s)
+ signal.signal(signum, self.signalHandler)
+ except:
+ continue
+ self.signames[signum] = s
def rootCheck(self, fatal=True):
if(os.access(self.powerfile, os.W_OK)):
return True
if fatal:
msg = 'This command requires sysfs mount and root access'
- print('ERROR: %s\n') % msg
+ pprint('ERROR: %s\n' % msg)
self.outputResult({'error':msg})
- sys.exit()
+ sys.exit(1)
return False
def rootUser(self, fatal=False):
if 'USER' in os.environ and os.environ['USER'] == 'root':
return True
if fatal:
msg = 'This command must be run as root'
- print('ERROR: %s\n') % msg
+ pprint('ERROR: %s\n' % msg)
self.outputResult({'error':msg})
- sys.exit()
+ sys.exit(1)
return False
def getExec(self, cmd):
dirlist = ['/sbin', '/bin', '/usr/sbin', '/usr/bin',
@@ -406,8 +438,8 @@ class SystemValues:
ktime = m.group('ktime')
fp.close()
self.dmesgstart = float(ktime)
- def getdmesg(self, fwdata=[]):
- op = self.writeDatafileHeader(sysvals.dmesgfile, fwdata)
+ def getdmesg(self, testdata):
+ op = self.writeDatafileHeader(sysvals.dmesgfile, testdata)
# store all new dmesg lines since initdmesg was called
fp = Popen('dmesg', stdout=PIPE).stdout
for line in fp:
@@ -535,7 +567,7 @@ class SystemValues:
if len(self.kprobes) < 1:
return
if output:
- print(' kprobe functions in this kernel:')
+ pprint(' kprobe functions in this kernel:')
# first test each kprobe
rejects = []
# sort kprobes: trace, ub-dev, custom, dev
@@ -557,7 +589,7 @@ class SystemValues:
else:
kpl[2].append(name)
if output:
- print(' %s: %s' % (name, res))
+ pprint(' %s: %s' % (name, res))
kplist = kpl[0] + kpl[1] + kpl[2] + kpl[3]
# remove all failed ones from the list
for name in rejects:
@@ -571,7 +603,7 @@ class SystemValues:
if output:
check = self.fgetVal('kprobe_events')
linesack = (len(check.split('\n')) - 1) / 2
- print(' kprobe functions enabled: %d/%d' % (linesack, linesout))
+ pprint(' kprobe functions enabled: %d/%d' % (linesack, linesout))
self.fsetVal('1', 'events/kprobes/enable')
def testKprobe(self, kname, kprobe):
self.fsetVal('0', 'events/kprobes/enable')
@@ -619,6 +651,8 @@ class SystemValues:
self.fsetVal('0', 'events/kprobes/enable')
self.fsetVal('', 'kprobe_events')
self.fsetVal('1024', 'buffer_size_kb')
+ if self.pmdebug:
+ self.setVal(self.pmdebug, self.pmdpath)
def setupAllKprobes(self):
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
@@ -637,10 +671,15 @@ class SystemValues:
return False
def initFtrace(self):
self.printSystemInfo(False)
- print('INITIALIZING FTRACE...')
+ pprint('INITIALIZING FTRACE...')
# turn trace off
self.fsetVal('0', 'tracing_on')
self.cleanupFtrace()
+ # pm debug messages
+ pv = self.getVal(self.pmdpath)
+ if pv != '1':
+ self.setVal('1', self.pmdpath)
+ self.pmdebug = pv
# set the trace clock to global
self.fsetVal('global', 'trace_clock')
self.fsetVal('nop', 'current_tracer')
@@ -649,7 +688,8 @@ class SystemValues:
if self.bufsize > 0:
tgtsize = self.bufsize
elif self.usecallgraph or self.usedevsrc:
- tgtsize = min(self.memfree, 3*1024*1024)
+ bmax = (1*1024*1024) if self.suspendmode == 'disk' else (3*1024*1024)
+ tgtsize = min(self.memfree, bmax)
else:
tgtsize = 65536
while not self.fsetVal('%d' % (tgtsize / cpus), 'buffer_size_kb'):
@@ -658,7 +698,7 @@ class SystemValues:
if tgtsize < 65536:
tgtsize = int(self.fgetVal('buffer_size_kb')) * cpus
break
- print 'Setting trace buffers to %d kB (%d kB per cpu)' % (tgtsize, tgtsize/cpus)
+ pprint('Setting trace buffers to %d kB (%d kB per cpu)' % (tgtsize, tgtsize/cpus))
# initialize the callgraph trace
if(self.usecallgraph):
# set trace type
@@ -691,7 +731,7 @@ class SystemValues:
if self.usedevsrc:
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
- print('INITIALIZING KPROBES...')
+ pprint('INITIALIZING KPROBES...')
self.addKprobes(self.verbose)
if(self.usetraceevents):
# turn trace events on
@@ -728,19 +768,24 @@ class SystemValues:
if not self.ansi:
return str
return '\x1B[%d;40m%s\x1B[m' % (color, str)
- def writeDatafileHeader(self, filename, fwdata=[]):
+ def writeDatafileHeader(self, filename, testdata):
fp = self.openlog(filename, 'w')
fp.write('%s\n%s\n# command | %s\n' % (self.teststamp, self.sysstamp, self.cmdline))
- if(self.suspendmode == 'mem' or self.suspendmode == 'command'):
- for fw in fwdata:
+ for test in testdata:
+ if 'fw' in test:
+ fw = test['fw']
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
+ if 'bat' in test:
+ (a1, c1), (a2, c2) = test['bat']
+ fp.write('# battery %s %d %s %d\n' % (a1, c1, a2, c2))
+ if test['error'] or len(testdata) > 1:
+ fp.write('# enter_sleep_error %s\n' % test['error'])
return fp
- def sudouser(self, dir):
- if os.path.exists(dir) and os.getuid() == 0 and \
- 'SUDO_USER' in os.environ:
+ def sudoUserchown(self, dir):
+ if os.path.exists(dir) and self.sudouser:
cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
- call(cmd.format(os.environ['SUDO_USER'], dir), shell=True)
+ call(cmd.format(self.sudouser, dir), shell=True)
def outputResult(self, testdata, num=0):
if not self.result:
return
@@ -762,7 +807,7 @@ class SystemValues:
if 'bugurl' in testdata:
fp.write('url%s: %s\n' % (n, testdata['bugurl']))
fp.close()
- self.sudouser(self.result)
+ self.sudoUserchown(self.result)
def configFile(self, file):
dir = os.path.dirname(os.path.realpath(__file__))
if os.path.exists(file):
@@ -800,15 +845,16 @@ suspendmodename = {
# Simple class which holds property values collected
# for all the devices used in the timeline.
class DevProps:
- syspath = ''
- altname = ''
- async = True
- xtraclass = ''
- xtrainfo = ''
+ def __init__(self):
+ self.syspath = ''
+ self.altname = ''
+ self.async = True
+ self.xtraclass = ''
+ self.xtrainfo = ''
def out(self, dev):
return '%s,%s,%d;' % (dev, self.altname, self.async)
def debug(self, dev):
- print '%s:\n\taltname = %s\n\t async = %s' % (dev, self.altname, self.async)
+ pprint('%s:\n\taltname = %s\n\t async = %s' % (dev, self.altname, self.async))
def altName(self, dev):
if not self.altname or self.altname == dev:
return dev
@@ -831,9 +877,6 @@ class DevProps:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
- name = ''
- children = 0
- depth = 0
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
@@ -861,71 +904,78 @@ class DeviceNode:
# }
#
class Data:
- dmesg = {} # root data structure
- phases = [] # ordered list of phases
- start = 0.0 # test start
- end = 0.0 # test end
- tSuspended = 0.0 # low-level suspend start
- tResumed = 0.0 # low-level resume start
- tKernSus = 0.0 # kernel level suspend start
- tKernRes = 0.0 # kernel level resume end
- tLow = 0.0 # time spent in low-level suspend (standby/freeze)
- fwValid = False # is firmware data available
- fwSuspend = 0 # time spent in firmware suspend
- fwResume = 0 # time spent in firmware resume
- dmesgtext = [] # dmesg text file in memory
- pstl = 0 # process timeline
- testnumber = 0
- idstr = ''
- html_device_id = 0
- stamp = 0
- outfile = ''
- devpids = []
- kerror = False
+ phasedef = {
+ 'suspend_prepare': {'order': 0, 'color': '#CCFFCC'},
+ 'suspend': {'order': 1, 'color': '#88FF88'},
+ 'suspend_late': {'order': 2, 'color': '#00AA00'},
+ 'suspend_noirq': {'order': 3, 'color': '#008888'},
+ 'suspend_machine': {'order': 4, 'color': '#0000FF'},
+ 'resume_machine': {'order': 5, 'color': '#FF0000'},
+ 'resume_noirq': {'order': 6, 'color': '#FF9900'},
+ 'resume_early': {'order': 7, 'color': '#FFCC00'},
+ 'resume': {'order': 8, 'color': '#FFFF88'},
+ 'resume_complete': {'order': 9, 'color': '#FFFFCC'},
+ }
+ errlist = {
+ 'HWERROR' : '.*\[ *Hardware Error *\].*',
+ 'FWBUG' : '.*\[ *Firmware Bug *\].*',
+ 'BUG' : '.*BUG.*',
+ 'ERROR' : '.*ERROR.*',
+ 'WARNING' : '.*WARNING.*',
+ 'IRQ' : '.*genirq: .*',
+ 'TASKFAIL': '.*Freezing of tasks failed.*',
+ }
def __init__(self, num):
idchar = 'abcdefghij'
- self.pstl = dict()
+ self.start = 0.0 # test start
+ self.end = 0.0 # test end
+ self.tSuspended = 0.0 # low-level suspend start
+ self.tResumed = 0.0 # low-level resume start
+ self.tKernSus = 0.0 # kernel level suspend start
+ self.tKernRes = 0.0 # kernel level resume end
+ self.fwValid = False # is firmware data available
+ self.fwSuspend = 0 # time spent in firmware suspend
+ self.fwResume = 0 # time spent in firmware resume
+ self.html_device_id = 0
+ self.stamp = 0
+ self.outfile = ''
+ self.kerror = False
+ self.battery = 0
+ self.enterfail = ''
+ self.currphase = ''
+ self.pstl = dict() # process timeline
self.testnumber = num
self.idstr = idchar[num]
- self.dmesgtext = []
- self.phases = []
- self.dmesg = { # fixed list of 10 phases
- 'suspend_prepare': {'list': dict(), 'start': -1.0, 'end': -1.0,
- 'row': 0, 'color': '#CCFFCC', 'order': 0},
- 'suspend': {'list': dict(), 'start': -1.0, 'end': -1.0,
- 'row': 0, 'color': '#88FF88', 'order': 1},
- 'suspend_late': {'list': dict(), 'start': -1.0, 'end': -1.0,
- 'row': 0, 'color': '#00AA00', 'order': 2},
- 'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
- 'row': 0, 'color': '#008888', 'order': 3},
- 'suspend_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
- 'row': 0, 'color': '#0000FF', 'order': 4},
- 'resume_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
- 'row': 0, 'color': '#FF0000', 'order': 5},
- 'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
- 'row': 0, 'color': '#FF9900', 'order': 6},
- 'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
- 'row': 0, 'color': '#FFCC00', 'order': 7},
- 'resume': {'list': dict(), 'start': -1.0, 'end': -1.0,
- 'row': 0, 'color': '#FFFF88', 'order': 8},
- 'resume_complete': {'list': dict(), 'start': -1.0, 'end': -1.0,
- 'row': 0, 'color': '#FFFFCC', 'order': 9}
- }
- self.phases = self.sortedPhases()
+ self.dmesgtext = [] # dmesg text file in memory
+ self.dmesg = dict() # root data structure
+ self.errorinfo = {'suspend':[],'resume':[]}
+ self.tLow = [] # time spent in low-level suspends (standby/freeze)
+ self.devpids = []
+ self.devicegroups = 0
+ def sortedPhases(self):
+ return sorted(self.dmesg, key=lambda k:self.dmesg[k]['order'])
+ def initDevicegroups(self):
+ # called when phases are all finished being added
+ for phase in self.dmesg.keys():
+ if '*' in phase:
+ p = phase.split('*')
+ pnew = '%s%d' % (p[0], len(p))
+ self.dmesg[pnew] = self.dmesg.pop(phase)
self.devicegroups = []
- for phase in self.phases:
+ for phase in self.sortedPhases():
self.devicegroups.append([phase])
- self.errorinfo = {'suspend':[],'resume':[]}
+ def nextPhase(self, phase, offset):
+ order = self.dmesg[phase]['order'] + offset
+ for p in self.dmesg:
+ if self.dmesg[p]['order'] == order:
+ return p
+ return ''
+ def lastPhase(self):
+ plist = self.sortedPhases()
+ if len(plist) < 1:
+ return ''
+ return plist[-1]
def extractErrorInfo(self):
- elist = {
- 'HWERROR' : '.*\[ *Hardware Error *\].*',
- 'FWBUG' : '.*\[ *Firmware Bug *\].*',
- 'BUG' : '.*BUG.*',
- 'ERROR' : '.*ERROR.*',
- 'WARNING' : '.*WARNING.*',
- 'IRQ' : '.*genirq: .*',
- 'TASKFAIL': '.*Freezing of tasks failed.*',
- }
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
i = 0
list = []
@@ -939,8 +989,8 @@ class Data:
continue
dir = 'suspend' if t < self.tSuspended else 'resume'
msg = m.group('msg')
- for err in elist:
- if re.match(elist[err], msg):
+ for err in self.errlist:
+ if re.match(self.errlist[err], msg):
list.append((err, dir, t, i, i))
self.kerror = True
break
@@ -956,7 +1006,7 @@ class Data:
def setEnd(self, time):
self.end = time
def isTraceEventOutsideDeviceCalls(self, pid, time):
- for phase in self.phases:
+ for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
@@ -964,16 +1014,10 @@ class Data:
time < d['end']):
return False
return True
- def phaseCollision(self, phase, isbegin, line):
- key = 'end'
- if isbegin:
- key = 'start'
- if self.dmesg[phase][key] >= 0:
- sysvals.vprint('IGNORE: %s' % line.strip())
- return True
- return False
def sourcePhase(self, start):
- for phase in self.phases:
+ for phase in self.sortedPhases():
+ if 'machine' in phase:
+ continue
pend = self.dmesg[phase]['end']
if start <= pend:
return phase
@@ -1004,14 +1048,15 @@ class Data:
return tgtdev
def addDeviceFunctionCall(self, displayname, kprobename, proc, pid, start, end, cdata, rdata):
# try to place the call in a device
- tgtdev = self.sourceDevice(self.phases, start, end, pid, 'device')
+ phases = self.sortedPhases()
+ tgtdev = self.sourceDevice(phases, start, end, pid, 'device')
# calls with device pids that occur outside device bounds are dropped
# TODO: include these somehow
if not tgtdev and pid in self.devpids:
return False
# try to place the call in a thread
if not tgtdev:
- tgtdev = self.sourceDevice(self.phases, start, end, pid, 'thread')
+ tgtdev = self.sourceDevice(phases, start, end, pid, 'thread')
# create new thread blocks, expand as new calls are found
if not tgtdev:
if proc == '<...>':
@@ -1053,7 +1098,7 @@ class Data:
def overflowDevices(self):
# get a list of devices that extend beyond the end of this test run
devlist = []
- for phase in self.phases:
+ for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
@@ -1064,7 +1109,7 @@ class Data:
# merge any devices that overlap devlist
for dev in devlist:
devname = dev['name']
- for phase in self.phases:
+ for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if devname not in list:
continue
@@ -1079,7 +1124,7 @@ class Data:
del list[devname]
def usurpTouchingThread(self, name, dev):
# the caller test has priority of this thread, give it to him
- for phase in self.phases:
+ for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if name in list:
tdev = list[name]
@@ -1093,7 +1138,7 @@ class Data:
break
def stitchTouchingThreads(self, testlist):
# merge any threads between tests that touch
- for phase in self.phases:
+ for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
@@ -1103,7 +1148,7 @@ class Data:
data.usurpTouchingThread(devname, dev)
def optimizeDevSrc(self):
# merge any src call loops to reduce timeline size
- for phase in self.phases:
+ for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in list:
if 'src' not in list[dev]:
@@ -1141,7 +1186,7 @@ class Data:
self.tKernSus = self.trimTimeVal(self.tKernSus, t0, dT, left)
self.tKernRes = self.trimTimeVal(self.tKernRes, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
- for phase in self.phases:
+ for phase in self.sortedPhases():
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
@@ -1150,6 +1195,7 @@ class Data:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
+ d['length'] = d['end'] - d['start']
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
@@ -1166,30 +1212,51 @@ class Data:
tm = self.trimTimeVal(tm, t0, dT, left)
list.append((type, tm, idx1, idx2))
self.errorinfo[dir] = list
- def normalizeTime(self, tZero):
+ def trimFreezeTime(self, tZero):
# trim out any standby or freeze clock time
- if(self.tSuspended != self.tResumed):
- if(self.tResumed > tZero):
- self.trimTime(self.tSuspended, \
- self.tResumed-self.tSuspended, True)
- else:
- self.trimTime(self.tSuspended, \
- self.tResumed-self.tSuspended, False)
+ lp = ''
+ for phase in self.sortedPhases():
+ if 'resume_machine' in phase and 'suspend_machine' in lp:
+ tS, tR = self.dmesg[lp]['end'], self.dmesg[phase]['start']
+ tL = tR - tS
+ if tL > 0:
+ left = True if tR > tZero else False
+ self.trimTime(tS, tL, left)
+ self.tLow.append('%.0f'%(tL*1000))
+ lp = phase
def getTimeValues(self):
- sktime = (self.dmesg['suspend_machine']['end'] - \
- self.tKernSus) * 1000
- rktime = (self.dmesg['resume_complete']['end'] - \
- self.dmesg['resume_machine']['start']) * 1000
+ sktime = (self.tSuspended - self.tKernSus) * 1000
+ rktime = (self.tKernRes - self.tResumed) * 1000
return (sktime, rktime)
- def setPhase(self, phase, ktime, isbegin):
+ def setPhase(self, phase, ktime, isbegin, order=-1):
if(isbegin):
+ # phase start over current phase
+ if self.currphase:
+ if 'resume_machine' not in self.currphase:
+ sysvals.vprint('WARNING: phase %s failed to end' % self.currphase)
+ self.dmesg[self.currphase]['end'] = ktime
+ phases = self.dmesg.keys()
+ color = self.phasedef[phase]['color']
+ count = len(phases) if order < 0 else order
+ # create unique name for every new phase
+ while phase in phases:
+ phase += '*'
+ self.dmesg[phase] = {'list': dict(), 'start': -1.0, 'end': -1.0,
+ 'row': 0, 'color': color, 'order': count}
self.dmesg[phase]['start'] = ktime
+ self.currphase = phase
else:
+ # phase end without a start
+ if phase not in self.currphase:
+ if self.currphase:
+ sysvals.vprint('WARNING: %s ended instead of %s, ftrace corruption?' % (phase, self.currphase))
+ else:
+ sysvals.vprint('WARNING: %s ended without a start, ftrace corruption?' % phase)
+ return phase
+ phase = self.currphase
self.dmesg[phase]['end'] = ktime
- def dmesgSortVal(self, phase):
- return self.dmesg[phase]['order']
- def sortedPhases(self):
- return sorted(self.dmesg, key=self.dmesgSortVal)
+ self.currphase = ''
+ return phase
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
slist = []
@@ -1208,13 +1275,13 @@ class Data:
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
- for p in self.phases:
+ for p in self.sortedPhases():
if self.dmesg[p]['end'] > dev['start']:
dev['end'] = self.dmesg[p]['end']
break
sysvals.vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
- for phase in self.phases:
+ for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
@@ -1229,7 +1296,7 @@ class Data:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
- for phase in self.phases:
+ for phase in self.sortedPhases():
self.fixupInitcalls(phase)
def phaseOverlap(self, phases):
rmgroups = []
@@ -1248,17 +1315,18 @@ class Data:
self.devicegroups.append(newgroup)
def newActionGlobal(self, name, start, end, pid=-1, color=''):
# which phase is this device callback or action in
+ phases = self.sortedPhases()
targetphase = 'none'
htmlclass = ''
overlap = 0.0
- phases = []
- for phase in self.phases:
+ myphases = []
+ for phase in phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
# see if the action overlaps this phase
o = max(0, min(end, pend) - max(start, pstart))
if o > 0:
- phases.append(phase)
+ myphases.append(phase)
# set the target phase to the one that overlaps most
if o > overlap:
if overlap > 0 and phase == 'post_resume':
@@ -1267,19 +1335,19 @@ class Data:
overlap = o
# if no target phase was found, pin it to the edge
if targetphase == 'none':
- p0start = self.dmesg[self.phases[0]]['start']
+ p0start = self.dmesg[phases[0]]['start']
if start <= p0start:
- targetphase = self.phases[0]
+ targetphase = phases[0]
else:
- targetphase = self.phases[-1]
+ targetphase = phases[-1]
if pid == -2:
htmlclass = ' bg'
elif pid == -3:
htmlclass = ' ps'
- if len(phases) > 1:
+ if len(myphases) > 1:
htmlclass = ' bg'
- self.phaseOverlap(phases)
- if targetphase in self.phases:
+ self.phaseOverlap(myphases)
+ if targetphase in phases:
newname = self.newAction(targetphase, name, pid, '', start, end, '', htmlclass, color)
return (targetphase, newname)
return False
@@ -1311,19 +1379,43 @@ class Data:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
+ def maxDeviceNameSize(self, phase):
+ size = 0
+ for name in self.dmesg[phase]['list']:
+ if len(name) > size:
+ size = len(name)
+ return size
def printDetails(self):
sysvals.vprint('Timeline Details:')
sysvals.vprint(' test start: %f' % self.start)
sysvals.vprint('kernel suspend start: %f' % self.tKernSus)
- for phase in self.phases:
- dc = len(self.dmesg[phase]['list'])
- sysvals.vprint(' %16s: %f - %f (%d devices)' % (phase, \
- self.dmesg[phase]['start'], self.dmesg[phase]['end'], dc))
+ tS = tR = False
+ for phase in self.sortedPhases():
+ devlist = self.dmesg[phase]['list']
+ dc, ps, pe = len(devlist), self.dmesg[phase]['start'], self.dmesg[phase]['end']
+ if not tS and ps >= self.tSuspended:
+ sysvals.vprint(' machine suspended: %f' % self.tSuspended)
+ tS = True
+ if not tR and ps >= self.tResumed:
+ sysvals.vprint(' machine resumed: %f' % self.tResumed)
+ tR = True
+ sysvals.vprint('%20s: %f - %f (%d devices)' % (phase, ps, pe, dc))
+ if sysvals.devdump:
+ sysvals.vprint(''.join('-' for i in range(80)))
+ maxname = '%d' % self.maxDeviceNameSize(phase)
+ fmt = '%3d) %'+maxname+'s - %f - %f'
+ c = 1
+ for name in devlist:
+ s = devlist[name]['start']
+ e = devlist[name]['end']
+ sysvals.vprint(fmt % (c, name, s, e))
+ c += 1
+ sysvals.vprint(''.join('-' for i in range(80)))
sysvals.vprint(' kernel resume end: %f' % self.tKernRes)
sysvals.vprint(' test end: %f' % self.end)
def deviceChildrenAllPhases(self, devname):
devlist = []
- for phase in self.phases:
+ for phase in self.sortedPhases():
list = self.deviceChildren(devname, phase)
for dev in list:
if dev not in devlist:
@@ -1344,7 +1436,7 @@ class Data:
if node.name:
info = ''
drv = ''
- for phase in self.phases:
+ for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
@@ -1478,8 +1570,29 @@ class Data:
c = self.addProcessUsageEvent(ps, tres)
if c > 0:
sysvals.vprint('%25s (res): %d' % (ps, c))
+ def handleEndMarker(self, time):
+ dm = self.dmesg
+ self.setEnd(time)
+ self.initDevicegroups()
+ # give suspend_prepare an end if needed
+ if 'suspend_prepare' in dm and dm['suspend_prepare']['end'] < 0:
+ dm['suspend_prepare']['end'] = time
+ # assume resume machine ends at next phase start
+ if 'resume_machine' in dm and dm['resume_machine']['end'] < 0:
+ np = self.nextPhase('resume_machine', 1)
+ if np:
+ dm['resume_machine']['end'] = dm[np]['start']
+ # if kernel resume end not found, assume its the end marker
+ if self.tKernRes == 0.0:
+ self.tKernRes = time
+ # if kernel suspend start not found, assume its the end marker
+ if self.tKernSus == 0.0:
+ self.tKernSus = time
+ # set resume complete to end at end marker
+ if 'resume_complete' in dm:
+ dm['resume_complete']['end'] = time
def debugPrint(self):
- for p in self.phases:
+ for p in self.sortedPhases():
list = self.dmesg[p]['list']
for devname in list:
dev = list[devname]
@@ -1490,9 +1603,9 @@ class Data:
# Description:
# A container for kprobe function data we want in the dev timeline
class DevFunction:
- row = 0
- count = 1
def __init__(self, name, args, caller, ret, start, end, u, proc, pid, color):
+ self.row = 0
+ self.count = 1
self.name = name
self.args = args
self.caller = caller
@@ -1546,16 +1659,15 @@ class DevFunction:
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
- time = 0.0
- length = 0.0
- fcall = False
- freturn = False
- fevent = False
- fkprobe = False
- depth = 0
- name = ''
- type = ''
def __init__(self, t, m='', d=''):
+ self.length = 0.0
+ self.fcall = False
+ self.freturn = False
+ self.fevent = False
+ self.fkprobe = False
+ self.depth = 0
+ self.name = ''
+ self.type = ''
self.time = float(t)
if not m and not d:
return
@@ -1633,13 +1745,13 @@ class FTraceLine:
return len(str)/2
def debugPrint(self, info=''):
if self.isLeaf():
- print(' -- %12.6f (depth=%02d): %s(); (%.3f us) %s' % (self.time, \
+ pprint(' -- %12.6f (depth=%02d): %s(); (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
elif self.freturn:
- print(' -- %12.6f (depth=%02d): %s} (%.3f us) %s' % (self.time, \
+ pprint(' -- %12.6f (depth=%02d): %s} (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
else:
- print(' -- %12.6f (depth=%02d): %s() { (%.3f us) %s' % (self.time, \
+ pprint(' -- %12.6f (depth=%02d): %s() { (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
def startMarker(self):
# Is this the starting line of a suspend?
@@ -1675,19 +1787,13 @@ class FTraceLine:
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
- id = ''
- start = -1.0
- end = -1.0
- list = []
- invalid = False
- depth = 0
- pid = 0
- name = ''
- partial = False
vfname = 'missing_function_name'
- ignore = False
- sv = 0
def __init__(self, pid, sv):
+ self.id = ''
+ self.invalid = False
+ self.name = ''
+ self.partial = False
+ self.ignore = False
self.start = -1.0
self.end = -1.0
self.list = []
@@ -1786,7 +1892,7 @@ class FTraceCallGraph:
if warning and ('[make leaf]', line) not in info:
info.append(('', line))
if warning:
- print 'WARNING: ftrace data missing, corrections made:'
+ pprint('WARNING: ftrace data missing, corrections made:')
for i in info:
t, obj = i
if obj:
@@ -1846,10 +1952,10 @@ class FTraceCallGraph:
id = 'task %s' % (self.pid)
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
- print('Data misalignment for '+id+\
+ pprint('Data misalignment for '+id+\
' (buffer overflow), ignoring this callback')
else:
- print('Too much data for '+id+\
+ pprint('Too much data for '+id+\
' '+window+', ignoring this callback')
def slice(self, dev):
minicg = FTraceCallGraph(dev['pid'], self.sv)
@@ -1902,7 +2008,7 @@ class FTraceCallGraph:
elif l.isReturn():
if(l.depth not in stack):
if self.sv.verbose:
- print 'Post Process Error: Depth missing'
+ pprint('Post Process Error: Depth missing')
l.debugPrint()
return False
# calculate call length from call/return lines
@@ -1919,7 +2025,7 @@ class FTraceCallGraph:
return True
elif(cnt < 0):
if self.sv.verbose:
- print 'Post Process Error: Depth is less than 0'
+ pprint('Post Process Error: Depth is less than 0')
return False
# trace ended before call tree finished
return self.repair(cnt)
@@ -1943,7 +2049,7 @@ class FTraceCallGraph:
dev['ftrace'] = cg
found = devname
return found
- for p in data.phases:
+ for p in data.sortedPhases():
if(data.dmesg[p]['start'] <= self.start and
self.start <= data.dmesg[p]['end']):
list = data.dmesg[p]['list']
@@ -1966,7 +2072,7 @@ class FTraceCallGraph:
if fs < data.start or fe > data.end:
return
phase = ''
- for p in data.phases:
+ for p in data.sortedPhases():
if(data.dmesg[p]['start'] <= self.start and
self.start < data.dmesg[p]['end']):
phase = p
@@ -1978,20 +2084,20 @@ class FTraceCallGraph:
phase, myname = out
data.dmesg[phase]['list'][myname]['ftrace'] = self
def debugPrint(self, info=''):
- print('%s pid=%d [%f - %f] %.3f us') % \
+ pprint('%s pid=%d [%f - %f] %.3f us' % \
(self.name, self.pid, self.start, self.end,
- (self.end - self.start)*1000000)
+ (self.end - self.start)*1000000))
for l in self.list:
if l.isLeaf():
- print('%f (%02d): %s(); (%.3f us)%s' % (l.time, \
+ pprint('%f (%02d): %s(); (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
elif l.freturn:
- print('%f (%02d): %s} (%.3f us)%s' % (l.time, \
+ pprint('%f (%02d): %s} (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
else:
- print('%f (%02d): %s() { (%.3f us)%s' % (l.time, \
+ pprint('%f (%02d): %s() { (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
- print(' ')
+ pprint(' ')
class DevItem:
def __init__(self, test, phase, dev):
@@ -2008,23 +2114,20 @@ class DevItem:
# A container for a device timeline which calculates
# all the html properties to display it correctly
class Timeline:
- html = ''
- height = 0 # total timeline height
- scaleH = 20 # timescale (top) row height
- rowH = 30 # device row height
- bodyH = 0 # body height
- rows = 0 # total timeline rows
- rowlines = dict()
- rowheight = dict()
html_tblock = '<div id="block{0}" class="tblock" style="left:{1}%;width:{2}%;"><div class="tback" style="height:{3}px"></div>\n'
html_device = '<div id="{0}" title="{1}" class="thread{7}" style="left:{2}%;top:{3}px;height:{4}px;width:{5}%;{8}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}px;height:{3}px;background:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background:{3}"></div>\n'
html_legend = '<div id="p{3}" class="square" style="left:{0}%;background:{1}">&nbsp;{2}</div>\n'
def __init__(self, rowheight, scaleheight):
- self.rowH = rowheight
- self.scaleH = scaleheight
self.html = ''
+ self.height = 0 # total timeline height
+ self.scaleH = scaleheight # timescale (top) row height
+ self.rowH = rowheight # device row height
+ self.bodyH = 0 # body height
+ self.rows = 0 # total timeline rows
+ self.rowlines = dict()
+ self.rowheight = dict()
def createHeader(self, sv, stamp):
if(not stamp['time']):
return
@@ -2251,18 +2354,18 @@ class Timeline:
# Description:
# A list of values describing the properties of these test runs
class TestProps:
- stamp = ''
- sysinfo = ''
- cmdline = ''
- kparams = ''
- S0i3 = False
- fwdata = []
stampfmt = '# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
+ batteryfmt = '^# battery (?P<a1>\w*) (?P<c1>\d*) (?P<a2>\w*) (?P<c2>\d*)'
+ testerrfmt = '^# enter_sleep_error (?P<e>.*)'
sysinfofmt = '^# sysinfo .*'
cmdlinefmt = '^# command \| (?P<cmd>.*)'
kparamsfmt = '^# kparams \| (?P<kp>.*)'
+ devpropfmt = '# Device Properties: .*'
+ tracertypefmt = '# tracer: (?P<t>.*)'
+ firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
+ procexecfmt = 'ps - (?P<ps>.*)$'
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
@@ -2271,11 +2374,17 @@ class TestProps:
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>.{4}) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
- ftrace_line_fmt = ftrace_line_fmt_nop
- cgformat = False
- data = 0
- ktemp = dict()
def __init__(self):
+ self.stamp = ''
+ self.sysinfo = ''
+ self.cmdline = ''
+ self.kparams = ''
+ self.testerror = []
+ self.battery = []
+ self.fwdata = []
+ self.ftrace_line_fmt = self.ftrace_line_fmt_nop
+ self.cgformat = False
+ self.data = 0
self.ktemp = dict()
def setTracerType(self, tracer):
if(tracer == 'function_graph'):
@@ -2286,6 +2395,7 @@ class TestProps:
else:
doError('Invalid tracer format: [%s]' % tracer)
def parseStamp(self, data, sv):
+ # global test data
m = re.match(self.stampfmt, self.stamp)
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
@@ -2324,23 +2434,36 @@ class TestProps:
sv.kparams = m.group('kp')
if not sv.stamp:
sv.stamp = data.stamp
+ # firmware data
+ if sv.suspendmode == 'mem' and len(self.fwdata) > data.testnumber:
+ data.fwSuspend, data.fwResume = self.fwdata[data.testnumber]
+ if(data.fwSuspend > 0 or data.fwResume > 0):
+ data.fwValid = True
+ # battery data
+ if len(self.battery) > data.testnumber:
+ m = re.match(self.batteryfmt, self.battery[data.testnumber])
+ if m:
+ data.battery = m.groups()
+ # sleep mode enter errors
+ if len(self.testerror) > data.testnumber:
+ m = re.match(self.testerrfmt, self.testerror[data.testnumber])
+ if m:
+ data.enterfail = m.group('e')
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
- ftemp = dict()
- ttemp = dict()
- data = 0
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
class ProcessMonitor:
- proclist = dict()
- running = False
+ def __init__(self):
+ self.proclist = dict()
+ self.running = False
def procstat(self):
c = ['cat /proc/[1-9]*/stat 2>/dev/null']
process = Popen(c, shell=True, stdout=PIPE)
@@ -2391,8 +2514,8 @@ class ProcessMonitor:
# markers, and/or kprobes required for primary parsing.
def doesTraceLogHaveTraceEvents():
kpcheck = ['_cal: (', '_cpu_down()']
- techeck = ['suspend_resume']
- tmcheck = ['SUSPEND START', 'RESUME COMPLETE']
+ techeck = ['suspend_resume', 'device_pm_callback']
+ tmcheck = ['tracing_mark_write']
sysvals.usekprobes = False
fp = sysvals.openlog(sysvals.ftracefile, 'r')
for line in fp:
@@ -2414,23 +2537,14 @@ def doesTraceLogHaveTraceEvents():
check.remove(i)
tmcheck = check
fp.close()
- if len(techeck) == 0:
- sysvals.usetraceevents = True
- else:
- sysvals.usetraceevents = False
- if len(tmcheck) == 0:
- sysvals.usetracemarkers = True
- else:
- sysvals.usetracemarkers = False
+ sysvals.usetraceevents = True if len(techeck) < 2 else False
+ sysvals.usetracemarkers = True if len(tmcheck) == 0 else False
# Function: appendIncompleteTraceLog
# Description:
# [deprecated for kernel 3.15 or newer]
-# Legacy support of ftrace outputs that lack the device_pm_callback
-# and/or suspend_resume trace events. The primary data should be
-# taken from dmesg, and this ftrace is used only for callgraph data
-# or custom actions in the timeline. The data is appended to the Data
-# objects provided.
+# Adds callgraph data which lacks trace event data. This is only
+# for timelines generated from 3.15 or older
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
@@ -2460,13 +2574,19 @@ def appendIncompleteTraceLog(testruns):
elif re.match(tp.cmdlinefmt, line):
tp.cmdline = line
continue
+ elif re.match(tp.batteryfmt, line):
+ tp.battery.append(line)
+ continue
+ elif re.match(tp.testerrfmt, line):
+ tp.testerror.append(line)
+ continue
# determine the trace data type (required for further parsing)
- m = re.match(sysvals.tracertypefmt, line)
+ m = re.match(tp.tracertypefmt, line)
if(m):
tp.setTracerType(m.group('t'))
continue
# device properties line
- if(re.match(sysvals.devpropfmt, line)):
+ if(re.match(tp.devpropfmt, line)):
devProps(line)
continue
# parse only valid lines, if this is not one move on
@@ -2506,87 +2626,7 @@ def appendIncompleteTraceLog(testruns):
continue
# trace event processing
if(t.fevent):
- # general trace events have two types, begin and end
- if(re.match('(?P<name>.*) begin$', t.name)):
- isbegin = True
- elif(re.match('(?P<name>.*) end$', t.name)):
- isbegin = False
- else:
- continue
- m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
- if(m):
- val = m.group('val')
- if val == '0':
- name = m.group('name')
- else:
- name = m.group('name')+'['+val+']'
- else:
- m = re.match('(?P<name>.*) .*', t.name)
- name = m.group('name')
- # special processing for trace events
- if re.match('dpm_prepare\[.*', name):
- continue
- elif re.match('machine_suspend.*', name):
- continue
- elif re.match('suspend_enter\[.*', name):
- if(not isbegin):
- data.dmesg['suspend_prepare']['end'] = t.time
- continue
- elif re.match('dpm_suspend\[.*', name):
- if(not isbegin):
- data.dmesg['suspend']['end'] = t.time
- continue
- elif re.match('dpm_suspend_late\[.*', name):
- if(isbegin):
- data.dmesg['suspend_late']['start'] = t.time
- else:
- data.dmesg['suspend_late']['end'] = t.time
- continue
- elif re.match('dpm_suspend_noirq\[.*', name):
- if(isbegin):
- data.dmesg['suspend_noirq']['start'] = t.time
- else:
- data.dmesg['suspend_noirq']['end'] = t.time
- continue
- elif re.match('dpm_resume_noirq\[.*', name):
- if(isbegin):
- data.dmesg['resume_machine']['end'] = t.time
- data.dmesg['resume_noirq']['start'] = t.time
- else:
- data.dmesg['resume_noirq']['end'] = t.time
- continue
- elif re.match('dpm_resume_early\[.*', name):
- if(isbegin):
- data.dmesg['resume_early']['start'] = t.time
- else:
- data.dmesg['resume_early']['end'] = t.time
- continue
- elif re.match('dpm_resume\[.*', name):
- if(isbegin):
- data.dmesg['resume']['start'] = t.time
- else:
- data.dmesg['resume']['end'] = t.time
- continue
- elif re.match('dpm_complete\[.*', name):
- if(isbegin):
- data.dmesg['resume_complete']['start'] = t.time
- else:
- data.dmesg['resume_complete']['end'] = t.time
- continue
- # skip trace events inside devices calls
- if(not data.isTraceEventOutsideDeviceCalls(pid, t.time)):
- continue
- # global events (outside device calls) are simply graphed
- if(isbegin):
- # store each trace event in ttemp
- if(name not in testrun[testidx].ttemp):
- testrun[testidx].ttemp[name] = []
- testrun[testidx].ttemp[name].append(\
- {'begin': t.time, 'end': t.time})
- else:
- # finish off matching trace event in ttemp
- if(name in testrun[testidx].ttemp):
- testrun[testidx].ttemp[name][-1]['end'] = t.time
+ continue
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
@@ -2603,12 +2643,6 @@ def appendIncompleteTraceLog(testruns):
tf.close()
for test in testrun:
- # add the traceevent data to the device hierarchy
- if(sysvals.usetraceevents):
- for name in test.ttemp:
- for event in test.ttemp[name]:
- test.data.newActionGlobal(name, event['begin'], event['end'])
-
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
@@ -2621,7 +2655,7 @@ def appendIncompleteTraceLog(testruns):
continue
callstart = cg.start
callend = cg.end
- for p in test.data.phases:
+ for p in test.data.sortedPhases():
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
@@ -2648,10 +2682,13 @@ def parseTraceLog(live=False):
doError('%s does not exist' % sysvals.ftracefile)
if not live:
sysvals.setupAllKprobes()
+ ksuscalls = ['pm_prepare_console']
+ krescalls = ['pm_restore_console']
tracewatch = []
if sysvals.usekprobes:
tracewatch += ['sync_filesystems', 'freeze_processes', 'syscore_suspend',
- 'syscore_resume', 'resume_console', 'thaw_processes', 'CPU_ON', 'CPU_OFF']
+ 'syscore_resume', 'resume_console', 'thaw_processes', 'CPU_ON',
+ 'CPU_OFF', 'timekeeping_freeze', 'acpi_suspend']
# extract the callgraph and traceevent data
tp = TestProps()
@@ -2674,18 +2711,24 @@ def parseTraceLog(live=False):
elif re.match(tp.cmdlinefmt, line):
tp.cmdline = line
continue
+ elif re.match(tp.batteryfmt, line):
+ tp.battery.append(line)
+ continue
+ elif re.match(tp.testerrfmt, line):
+ tp.testerror.append(line)
+ continue
# firmware line: pull out any firmware data
- m = re.match(sysvals.firmwarefmt, line)
+ m = re.match(tp.firmwarefmt, line)
if(m):
tp.fwdata.append((int(m.group('s')), int(m.group('r'))))
continue
# tracer type line: determine the trace data type
- m = re.match(sysvals.tracertypefmt, line)
+ m = re.match(tp.tracertypefmt, line)
if(m):
tp.setTracerType(m.group('t'))
continue
# device properties line
- if(re.match(sysvals.devpropfmt, line)):
+ if(re.match(tp.devpropfmt, line)):
devProps(line)
continue
# ignore all other commented lines
@@ -2714,20 +2757,20 @@ def parseTraceLog(live=False):
continue
# find the start of suspend
if(t.startMarker()):
- phase = 'suspend_prepare'
data = Data(len(testdata))
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
tp.parseStamp(data, sysvals)
data.setStart(t.time)
- data.tKernSus = t.time
+ data.first_suspend_prepare = True
+ phase = data.setPhase('suspend_prepare', t.time, True)
continue
if(not data):
continue
# process cpu exec line
if t.type == 'tracing_mark_write':
- m = re.match(sysvals.procexecfmt, t.name)
+ m = re.match(tp.procexecfmt, t.name)
if(m):
proclist = dict()
for ps in m.group('ps').split(','):
@@ -2740,28 +2783,17 @@ def parseTraceLog(live=False):
continue
# find the end of resume
if(t.endMarker()):
- data.setEnd(t.time)
- if data.tKernRes == 0.0:
- data.tKernRes = t.time
- if data.dmesg['resume_complete']['end'] < 0:
- data.dmesg['resume_complete']['end'] = t.time
- if sysvals.suspendmode == 'mem' and len(tp.fwdata) > data.testnumber:
- data.fwSuspend, data.fwResume = tp.fwdata[data.testnumber]
- if(data.tSuspended != 0 and data.tResumed != 0 and \
- (data.fwSuspend > 0 or data.fwResume > 0)):
- data.fwValid = True
+ data.handleEndMarker(t.time)
if(not sysvals.usetracemarkers):
# no trace markers? then quit and be sure to finish recording
# the event we used to trigger resume end
- if(len(testrun.ttemp['thaw_processes']) > 0):
+ if('thaw_processes' in testrun.ttemp and len(testrun.ttemp['thaw_processes']) > 0):
# if an entry exists, assume this is its end
testrun.ttemp['thaw_processes'][-1]['end'] = t.time
break
continue
# trace event processing
if(t.fevent):
- if(phase == 'post_resume'):
- data.setEnd(t.time)
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
@@ -2786,86 +2818,62 @@ def parseTraceLog(live=False):
# -- phase changes --
# start of kernel suspend
if(re.match('suspend_enter\[.*', t.name)):
- if(isbegin and data.start == data.tKernSus):
- data.dmesg[phase]['start'] = t.time
+ if(isbegin):
data.tKernSus = t.time
continue
# suspend_prepare start
elif(re.match('dpm_prepare\[.*', t.name)):
- phase = 'suspend_prepare'
- if(not isbegin):
- data.dmesg[phase]['end'] = t.time
- if data.dmesg[phase]['start'] < 0:
- data.dmesg[phase]['start'] = data.start
+ if isbegin and data.first_suspend_prepare:
+ data.first_suspend_prepare = False
+ if data.tKernSus == 0:
+ data.tKernSus = t.time
+ continue
+ phase = data.setPhase('suspend_prepare', t.time, isbegin)
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
- phase = 'suspend'
- data.setPhase(phase, t.time, isbegin)
+ phase = data.setPhase('suspend', t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
- phase = 'suspend_late'
- data.setPhase(phase, t.time, isbegin)
+ phase = data.setPhase('suspend_late', t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
- if data.phaseCollision('suspend_noirq', isbegin, line):
- continue
- phase = 'suspend_noirq'
- data.setPhase(phase, t.time, isbegin)
- if(not isbegin):
- phase = 'suspend_machine'
- data.dmesg[phase]['start'] = t.time
+ phase = data.setPhase('suspend_noirq', t.time, isbegin)
continue
# suspend_machine/resume_machine
elif(re.match('machine_suspend\[.*', t.name)):
if(isbegin):
- phase = 'suspend_machine'
- data.dmesg[phase]['end'] = t.time
- data.tSuspended = t.time
+ lp = data.lastPhase()
+ phase = data.setPhase('suspend_machine', data.dmesg[lp]['end'], True)
+ data.setPhase(phase, t.time, False)
+ if data.tSuspended == 0:
+ data.tSuspended = t.time
else:
- if(sysvals.suspendmode in ['mem', 'disk'] and not tp.S0i3):
- data.dmesg['suspend_machine']['end'] = t.time
+ phase = data.setPhase('resume_machine', t.time, True)
+ if(sysvals.suspendmode in ['mem', 'disk']):
+ susp = phase.replace('resume', 'suspend')
+ if susp in data.dmesg:
+ data.dmesg[susp]['end'] = t.time
data.tSuspended = t.time
- phase = 'resume_machine'
- data.dmesg[phase]['start'] = t.time
data.tResumed = t.time
- data.tLow = data.tResumed - data.tSuspended
- continue
- # acpi_suspend
- elif(re.match('acpi_suspend\[.*', t.name)):
- # acpi_suspend[0] S0i3
- if(re.match('acpi_suspend\[0\] begin', t.name)):
- if(sysvals.suspendmode == 'mem'):
- tp.S0i3 = True
- data.dmesg['suspend_machine']['end'] = t.time
- data.tSuspended = t.time
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
- if data.phaseCollision('resume_noirq', isbegin, line):
- continue
- phase = 'resume_noirq'
- data.setPhase(phase, t.time, isbegin)
- if(isbegin):
- data.dmesg['resume_machine']['end'] = t.time
+ phase = data.setPhase('resume_noirq', t.time, isbegin)
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
- phase = 'resume_early'
- data.setPhase(phase, t.time, isbegin)
+ phase = data.setPhase('resume_early', t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
- phase = 'resume'
- data.setPhase(phase, t.time, isbegin)
+ phase = data.setPhase('resume', t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
- phase = 'resume_complete'
- if(isbegin):
- data.dmesg[phase]['start'] = t.time
+ phase = data.setPhase('resume_complete', t.time, isbegin)
continue
# skip trace events inside devices calls
if(not data.isTraceEventOutsideDeviceCalls(pid, t.time)):
@@ -2881,13 +2889,10 @@ def parseTraceLog(live=False):
if(len(testrun.ttemp[name]) > 0):
# if an entry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
- elif(phase == 'post_resume'):
- # post resume events can just have ends
- testrun.ttemp[name].append({
- 'begin': data.dmesg[phase]['start'],
- 'end': t.time})
# device callback start
elif(t.type == 'device_pm_callback_start'):
+ if phase not in data.dmesg:
+ continue
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
@@ -2901,6 +2906,8 @@ def parseTraceLog(live=False):
data.devpids.append(pid)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
+ if phase not in data.dmesg:
+ continue
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
@@ -2931,6 +2938,9 @@ def parseTraceLog(live=False):
'cdata': kprobedata,
'proc': m_proc,
})
+ # start of kernel resume
+ if(phase == 'suspend_prepare' and kprobename in ksuscalls):
+ data.tKernSus = t.time
elif(t.freturn):
if(key not in tp.ktemp) or len(tp.ktemp[key]) < 1:
continue
@@ -2941,9 +2951,9 @@ def parseTraceLog(live=False):
e['end'] = t.time
e['rdata'] = kprobedata
# end of kernel resume
- if(kprobename == 'pm_notifier_call_chain' or \
- kprobename == 'pm_restore_console'):
- data.dmesg[phase]['end'] = t.time
+ if(phase != 'suspend_prepare' and kprobename in krescalls):
+ if phase in data.dmesg:
+ data.dmesg[phase]['end'] = t.time
data.tKernRes = t.time
# callgraph processing
@@ -2961,10 +2971,13 @@ def parseTraceLog(live=False):
if(res == -1):
testrun.ftemp[key][-1].addLine(t)
tf.close()
+ if data and not data.devicegroups:
+ sysvals.vprint('WARNING: end marker is missing')
+ data.handleEndMarker(t.time)
if sysvals.suspendmode == 'command':
for test in testruns:
- for p in test.data.phases:
+ for p in test.data.sortedPhases():
if p == 'suspend_prepare':
test.data.dmesg[p]['start'] = test.data.start
test.data.dmesg[p]['end'] = test.data.end
@@ -2973,13 +2986,20 @@ def parseTraceLog(live=False):
test.data.dmesg[p]['end'] = test.data.end
test.data.tSuspended = test.data.end
test.data.tResumed = test.data.end
- test.data.tLow = 0
test.data.fwValid = False
# dev source and procmon events can be unreadable with mixed phase height
if sysvals.usedevsrc or sysvals.useprocmon:
sysvals.mixedphaseheight = False
+ # expand phase boundaries so there are no gaps
+ for data in testdata:
+ lp = data.sortedPhases()[0]
+ for p in data.sortedPhases():
+ if(p != lp and not ('machine' in p and 'machine' in lp)):
+ data.dmesg[lp]['end'] = data.dmesg[p]['start']
+ lp = p
+
for i in range(len(testruns)):
test = testruns[i]
data = test.data
@@ -3040,8 +3060,8 @@ def parseTraceLog(live=False):
sortkey = '%f%f%d' % (cg.start, cg.end, pid)
sortlist[sortkey] = cg
elif len(cg.list) > 1000000:
- print 'WARNING: the callgraph for %s is massive (%d lines)' %\
- (devname, len(cg.list))
+ sysvals.vprint('WARNING: the callgraph for %s is massive (%d lines)' %\
+ (devname, len(cg.list)))
# create blocks for orphan cg data
for sortkey in sorted(sortlist):
cg = sortlist[sortkey]
@@ -3057,25 +3077,29 @@ def parseTraceLog(live=False):
for data in testdata:
tn = '' if len(testdata) == 1 else ('%d' % (data.testnumber + 1))
terr = ''
- lp = data.phases[0]
- for p in data.phases:
- if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
+ phasedef = data.phasedef
+ lp = 'suspend_prepare'
+ for p in sorted(phasedef, key=lambda k:phasedef[k]['order']):
+ if p not in data.dmesg:
if not terr:
- print 'TEST%s FAILED: %s failed in %s phase' % (tn, sysvals.suspendmode, lp)
+ pprint('TEST%s FAILED: %s failed in %s phase' % (tn, sysvals.suspendmode, lp))
terr = '%s%s failed in %s phase' % (sysvals.suspendmode, tn, lp)
error.append(terr)
+ if data.tSuspended == 0:
+ data.tSuspended = data.dmesg[lp]['end']
+ if data.tResumed == 0:
+ data.tResumed = data.dmesg[lp]['end']
+ data.fwValid = False
sysvals.vprint('WARNING: phase "%s" is missing!' % p)
- if(data.dmesg[p]['start'] < 0):
- data.dmesg[p]['start'] = data.dmesg[lp]['end']
- if(p == 'resume_machine'):
- data.tSuspended = data.dmesg[lp]['end']
- data.tResumed = data.dmesg[lp]['end']
- data.tLow = 0
- if(data.dmesg[p]['end'] < 0):
- data.dmesg[p]['end'] = data.dmesg[p]['start']
- if(p != lp and not ('machine' in p and 'machine' in lp)):
- data.dmesg[lp]['end'] = data.dmesg[p]['start']
lp = p
+ if not terr and data.enterfail:
+ pprint('test%s FAILED: enter %s failed with %s' % (tn, sysvals.suspendmode, data.enterfail))
+ terr = 'test%s failed to enter %s mode' % (tn, sysvals.suspendmode)
+ error.append(terr)
+ if data.tSuspended == 0:
+ data.tSuspended = data.tKernRes
+ if data.tResumed == 0:
+ data.tResumed = data.tSuspended
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
@@ -3127,7 +3151,13 @@ def loadKernelLog():
elif re.match(tp.cmdlinefmt, line):
tp.cmdline = line
continue
- m = re.match(sysvals.firmwarefmt, line)
+ elif re.match(tp.batteryfmt, line):
+ tp.battery.append(line)
+ continue
+ elif re.match(tp.testerrfmt, line):
+ tp.testerror.append(line)
+ continue
+ m = re.match(tp.firmwarefmt, line)
if(m):
tp.fwdata.append((int(m.group('s')), int(m.group('r'))))
continue
@@ -3140,10 +3170,6 @@ def loadKernelLog():
testruns.append(data)
data = Data(len(testruns))
tp.parseStamp(data, sysvals)
- if len(tp.fwdata) > data.testnumber:
- data.fwSuspend, data.fwResume = tp.fwdata[data.testnumber]
- if(data.fwSuspend > 0 or data.fwResume > 0):
- data.fwValid = True
if(not data):
continue
m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
@@ -3158,7 +3184,7 @@ def loadKernelLog():
if data:
testruns.append(data)
if len(testruns) < 1:
- print('ERROR: dmesg log has no suspend/resume data: %s' \
+ pprint('ERROR: dmesg log has no suspend/resume data: %s' \
% sysvals.dmesgfile)
# fix lines with same timestamp/function with the call and return swapped
@@ -3199,30 +3225,30 @@ def parseKernelLog(data):
# dmesg phase match table
dm = {
- 'suspend_prepare': 'PM: Syncing filesystems.*',
- 'suspend': 'PM: Entering [a-z]* sleep.*',
- 'suspend_late': 'PM: suspend of devices complete after.*',
- 'suspend_noirq': 'PM: late suspend of devices complete after.*',
- 'suspend_machine': 'PM: noirq suspend of devices complete after.*',
- 'resume_machine': 'ACPI: Low-level resume complete.*',
- 'resume_noirq': 'ACPI: Waking up from system sleep state.*',
- 'resume_early': 'PM: noirq resume of devices complete after.*',
- 'resume': 'PM: early resume of devices complete after.*',
- 'resume_complete': 'PM: resume of devices complete after.*',
- 'post_resume': '.*Restarting tasks \.\.\..*',
+ 'suspend_prepare': ['PM: Syncing filesystems.*'],
+ 'suspend': ['PM: Entering [a-z]* sleep.*', 'Suspending console.*'],
+ 'suspend_late': ['PM: suspend of devices complete after.*'],
+ 'suspend_noirq': ['PM: late suspend of devices complete after.*'],
+ 'suspend_machine': ['PM: noirq suspend of devices complete after.*'],
+ 'resume_machine': ['ACPI: Low-level resume complete.*'],
+ 'resume_noirq': ['ACPI: Waking up from system sleep state.*'],
+ 'resume_early': ['PM: noirq resume of devices complete after.*'],
+ 'resume': ['PM: early resume of devices complete after.*'],
+ 'resume_complete': ['PM: resume of devices complete after.*'],
+ 'post_resume': ['.*Restarting tasks \.\.\..*'],
}
if(sysvals.suspendmode == 'standby'):
- dm['resume_machine'] = 'PM: Restoring platform NVS memory'
+ dm['resume_machine'] = ['PM: Restoring platform NVS memory']
elif(sysvals.suspendmode == 'disk'):
- dm['suspend_late'] = 'PM: freeze of devices complete after.*'
- dm['suspend_noirq'] = 'PM: late freeze of devices complete after.*'
- dm['suspend_machine'] = 'PM: noirq freeze of devices complete after.*'
- dm['resume_machine'] = 'PM: Restoring platform NVS memory'
- dm['resume_early'] = 'PM: noirq restore of devices complete after.*'
- dm['resume'] = 'PM: early restore of devices complete after.*'
- dm['resume_complete'] = 'PM: restore of devices complete after.*'
+ dm['suspend_late'] = ['PM: freeze of devices complete after.*']
+ dm['suspend_noirq'] = ['PM: late freeze of devices complete after.*']
+ dm['suspend_machine'] = ['PM: noirq freeze of devices complete after.*']
+ dm['resume_machine'] = ['PM: Restoring platform NVS memory']
+ dm['resume_early'] = ['PM: noirq restore of devices complete after.*']
+ dm['resume'] = ['PM: early restore of devices complete after.*']
+ dm['resume_complete'] = ['PM: restore of devices complete after.*']
elif(sysvals.suspendmode == 'freeze'):
- dm['resume_machine'] = 'ACPI: resume from mwait'
+ dm['resume_machine'] = ['ACPI: resume from mwait']
# action table (expected events that occur and show up in dmesg)
at = {
@@ -3264,81 +3290,89 @@ def parseKernelLog(data):
else:
continue
+ # check for a phase change line
+ phasechange = False
+ for p in dm:
+ for s in dm[p]:
+ if(re.match(s, msg)):
+ phasechange, phase = True, p
+ break
+
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
- data.dmesg['resume_machine']['end'] = ktime
- phase = 'resume_noirq'
- data.dmesg[phase]['start'] = ktime
-
- # suspend start
- if(re.match(dm['suspend_prepare'], msg)):
- phase = 'suspend_prepare'
- data.dmesg[phase]['start'] = ktime
- data.setStart(ktime)
- data.tKernSus = ktime
- # suspend start
- elif(re.match(dm['suspend'], msg)):
- data.dmesg['suspend_prepare']['end'] = ktime
- phase = 'suspend'
- data.dmesg[phase]['start'] = ktime
- # suspend_late start
- elif(re.match(dm['suspend_late'], msg)):
- data.dmesg['suspend']['end'] = ktime
- phase = 'suspend_late'
- data.dmesg[phase]['start'] = ktime
- # suspend_noirq start
- elif(re.match(dm['suspend_noirq'], msg)):
- data.dmesg['suspend_late']['end'] = ktime
- phase = 'suspend_noirq'
- data.dmesg[phase]['start'] = ktime
- # suspend_machine start
- elif(re.match(dm['suspend_machine'], msg)):
- data.dmesg['suspend_noirq']['end'] = ktime
- phase = 'suspend_machine'
- data.dmesg[phase]['start'] = ktime
- # resume_machine start
- elif(re.match(dm['resume_machine'], msg)):
- if(sysvals.suspendmode in ['freeze', 'standby']):
- data.tSuspended = prevktime
- data.dmesg['suspend_machine']['end'] = prevktime
- else:
- data.tSuspended = ktime
- data.dmesg['suspend_machine']['end'] = ktime
- phase = 'resume_machine'
- data.tResumed = ktime
- data.tLow = data.tResumed - data.tSuspended
- data.dmesg[phase]['start'] = ktime
- # resume_noirq start
- elif(re.match(dm['resume_noirq'], msg)):
- data.dmesg['resume_machine']['end'] = ktime
+ data.setPhase(phase, ktime, False)
phase = 'resume_noirq'
- data.dmesg[phase]['start'] = ktime
- # resume_early start
- elif(re.match(dm['resume_early'], msg)):
- data.dmesg['resume_noirq']['end'] = ktime
- phase = 'resume_early'
- data.dmesg[phase]['start'] = ktime
- # resume start
- elif(re.match(dm['resume'], msg)):
- data.dmesg['resume_early']['end'] = ktime
- phase = 'resume'
- data.dmesg[phase]['start'] = ktime
- # resume complete start
- elif(re.match(dm['resume_complete'], msg)):
- data.dmesg['resume']['end'] = ktime
- phase = 'resume_complete'
- data.dmesg[phase]['start'] = ktime
- # post resume start
- elif(re.match(dm['post_resume'], msg)):
- data.dmesg['resume_complete']['end'] = ktime
- data.setEnd(ktime)
- data.tKernRes = ktime
- break
+ data.setPhase(phase, ktime, True)
+
+ if phasechange:
+ if phase == 'suspend_prepare':
+ data.setPhase(phase, ktime, True)
+ data.setStart(ktime)
+ data.tKernSus = ktime
+ elif phase == 'suspend':
+ lp = data.lastPhase()
+ if lp:
+ data.setPhase(lp, ktime, False)
+ data.setPhase(phase, ktime, True)
+ elif phase == 'suspend_late':
+ lp = data.lastPhase()
+ if lp:
+ data.setPhase(lp, ktime, False)
+ data.setPhase(phase, ktime, True)
+ elif phase == 'suspend_noirq':
+ lp = data.lastPhase()
+ if lp:
+ data.setPhase(lp, ktime, False)
+ data.setPhase(phase, ktime, True)
+ elif phase == 'suspend_machine':
+ lp = data.lastPhase()
+ if lp:
+ data.setPhase(lp, ktime, False)
+ data.setPhase(phase, ktime, True)
+ elif phase == 'resume_machine':
+ lp = data.lastPhase()
+ if(sysvals.suspendmode in ['freeze', 'standby']):
+ data.tSuspended = prevktime
+ if lp:
+ data.setPhase(lp, prevktime, False)
+ else:
+ data.tSuspended = ktime
+ if lp:
+ data.setPhase(lp, prevktime, False)
+ data.tResumed = ktime
+ data.setPhase(phase, ktime, True)
+ elif phase == 'resume_noirq':
+ lp = data.lastPhase()
+ if lp:
+ data.setPhase(lp, ktime, False)
+ data.setPhase(phase, ktime, True)
+ elif phase == 'resume_early':
+ lp = data.lastPhase()
+ if lp:
+ data.setPhase(lp, ktime, False)
+ data.setPhase(phase, ktime, True)
+ elif phase == 'resume':
+ lp = data.lastPhase()
+ if lp:
+ data.setPhase(lp, ktime, False)
+ data.setPhase(phase, ktime, True)
+ elif phase == 'resume_complete':
+ lp = data.lastPhase()
+ if lp:
+ data.setPhase(lp, ktime, False)
+ data.setPhase(phase, ktime, True)
+ elif phase == 'post_resume':
+ lp = data.lastPhase()
+ if lp:
+ data.setPhase(lp, ktime, False)
+ data.setEnd(ktime)
+ data.tKernRes = ktime
+ break
# -- device callbacks --
- if(phase in data.phases):
+ if(phase in data.sortedPhases()):
# device init call
if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
sm = re.match('calling (?P<f>.*)\+ @ '+\
@@ -3396,24 +3430,31 @@ def parseKernelLog(data):
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
+ data.initDevicegroups()
# fill in any missing phases
- lp = data.phases[0]
- for p in data.phases:
- if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
- print('WARNING: phase "%s" is missing, something went wrong!' % p)
- print(' In %s, this dmesg line denotes the start of %s:' % \
- (sysvals.suspendmode, p))
- print(' "%s"' % dm[p])
- if(data.dmesg[p]['start'] < 0):
- data.dmesg[p]['start'] = data.dmesg[lp]['end']
- if(p == 'resume_machine'):
- data.tSuspended = data.dmesg[lp]['end']
- data.tResumed = data.dmesg[lp]['end']
- data.tLow = 0
- if(data.dmesg[p]['end'] < 0):
- data.dmesg[p]['end'] = data.dmesg[p]['start']
+ phasedef = data.phasedef
+ terr, lp = '', 'suspend_prepare'
+ for p in sorted(phasedef, key=lambda k:phasedef[k]['order']):
+ if p not in data.dmesg:
+ if not terr:
+ pprint('TEST FAILED: %s failed in %s phase' % (sysvals.suspendmode, lp))
+ terr = '%s failed in %s phase' % (sysvals.suspendmode, lp)
+ if data.tSuspended == 0:
+ data.tSuspended = data.dmesg[lp]['end']
+ if data.tResumed == 0:
+ data.tResumed = data.dmesg[lp]['end']
+ sysvals.vprint('WARNING: phase "%s" is missing!' % p)
lp = p
+ lp = data.sortedPhases()[0]
+ for p in data.sortedPhases():
+ if(p != lp and not ('machine' in p and 'machine' in lp)):
+ data.dmesg[lp]['end'] = data.dmesg[p]['start']
+ lp = p
+ if data.tSuspended == 0:
+ data.tSuspended = data.tKernRes
+ if data.tResumed == 0:
+ data.tResumed = data.tSuspended
# fill in any actions we've found
for name in actions:
@@ -3462,7 +3503,7 @@ def addCallgraphs(sv, hf, data):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
num = 0
- for p in data.phases:
+ for p in data.sortedPhases():
if sv.cgphase and p != sv.cgphase:
continue
list = data.dmesg[p]['list']
@@ -3495,7 +3536,7 @@ def addCallgraphs(sv, hf, data):
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
-def createHTMLSummarySimple(testruns, htmlfile, folder):
+def createHTMLSummarySimple(testruns, htmlfile, title):
# write the html header first (html head, css code, up to body start)
html = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
@@ -3505,7 +3546,7 @@ def createHTMLSummarySimple(testruns, htmlfile, folder):
table {width:100%;border-collapse: collapse;}\n\
.summary {border:1px solid;}\n\
th {border: 1px solid black;background:#222;color:white;}\n\
- td {font: 16px "Times New Roman";text-align: center;}\n\
+ td {font: 14px "Times New Roman";text-align: center;}\n\
tr.head td {border: 1px solid black;background:#aaa;}\n\
tr.alt {background-color:#ddd;}\n\
tr.notice {color:red;}\n\
@@ -3521,7 +3562,7 @@ def createHTMLSummarySimple(testruns, htmlfile, folder):
iMin, iMed, iMax = [0, 0], [0, 0], [0, 0]
num = 0
lastmode = ''
- cnt = {'pass':0, 'fail':0, 'hang':0}
+ cnt = dict()
for data in sorted(testruns, key=lambda v:(v['mode'], v['host'], v['kernel'], v['time'])):
mode = data['mode']
if mode not in list:
@@ -3541,10 +3582,14 @@ def createHTMLSummarySimple(testruns, htmlfile, folder):
tVal = [float(data['suspend']), float(data['resume'])]
list[mode]['data'].append([data['host'], data['kernel'],
data['time'], tVal[0], tVal[1], data['url'], data['result'],
- data['issues']])
+ data['issues'], data['sus_worst'], data['sus_worsttime'],
+ data['res_worst'], data['res_worsttime']])
idx = len(list[mode]['data']) - 1
+ if data['result'] not in cnt:
+ cnt[data['result']] = 1
+ else:
+ cnt[data['result']] += 1
if data['result'] == 'pass':
- cnt['pass'] += 1
for i in range(2):
tMed[i].append(tVal[i])
tAvg[i] += tVal[i]
@@ -3555,10 +3600,6 @@ def createHTMLSummarySimple(testruns, htmlfile, folder):
iMax[i] = idx
tMax[i] = tVal[i]
num += 1
- elif data['result'] == 'hang':
- cnt['hang'] += 1
- elif data['result'] == 'fail':
- cnt['fail'] += 1
lastmode = mode
if lastmode and num > 0:
for i in range(2):
@@ -3575,7 +3616,7 @@ def createHTMLSummarySimple(testruns, htmlfile, folder):
for ilk in sorted(cnt, reverse=True):
if cnt[ilk] > 0:
desc.append('%d %s' % (cnt[ilk], ilk))
- html += '<div class="stamp">%s (%d tests: %s)</div>\n' % (folder, len(testruns), ', '.join(desc))
+ html += '<div class="stamp">%s (%d tests: %s)</div>\n' % (title, len(testruns), ', '.join(desc))
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdh = '\t<td{1}>{0}</td>\n'
@@ -3585,11 +3626,14 @@ def createHTMLSummarySimple(testruns, htmlfile, folder):
html += '<table class="summary">\n<tr>\n' + th.format('#') +\
th.format('Mode') + th.format('Host') + th.format('Kernel') +\
th.format('Test Time') + th.format('Result') + th.format('Issues') +\
- th.format('Suspend') + th.format('Resume') + th.format('Detail') + '</tr>\n'
+ th.format('Suspend') + th.format('Resume') +\
+ th.format('Worst Suspend Device') + th.format('SD Time') +\
+ th.format('Worst Resume Device') + th.format('RD Time') +\
+ th.format('Detail') + '</tr>\n'
# export list into html
head = '<tr class="head"><td>{0}</td><td>{1}</td>'+\
- '<td colspan=8 class="sus">Suspend Avg={2} '+\
+ '<td colspan=12 class="sus">Suspend Avg={2} '+\
'<span class=minval><a href="#s{10}min">Min={3}</a></span> '+\
'<span class=medval><a href="#s{10}med">Med={4}</a></span> '+\
'<span class=maxval><a href="#s{10}max">Max={5}</a></span> '+\
@@ -3598,7 +3642,7 @@ def createHTMLSummarySimple(testruns, htmlfile, folder):
'<span class=medval><a href="#r{10}med">Med={8}</a></span> '+\
'<span class=maxval><a href="#r{10}max">Max={9}</a></span></td>'+\
'</tr>\n'
- headnone = '<tr class="head"><td>{0}</td><td>{1}</td><td colspan=8></td></tr>\n'
+ headnone = '<tr class="head"><td>{0}</td><td>{1}</td><td colspan=12></td></tr>\n'
for mode in list:
# header line for each suspend mode
num = 0
@@ -3641,6 +3685,10 @@ def createHTMLSummarySimple(testruns, htmlfile, folder):
html += td.format(d[7]) # issues
html += tdh.format('%.3f ms' % d[3], tHigh[0]) if d[3] else td.format('') # suspend
html += tdh.format('%.3f ms' % d[4], tHigh[1]) if d[4] else td.format('') # resume
+ html += td.format(d[8]) # sus_worst
+ html += td.format('%.3f ms' % d[9]) if d[9] else td.format('') # sus_worst time
+ html += td.format(d[10]) # res_worst
+ html += td.format('%.3f ms' % d[11]) if d[11] else td.format('') # res_worst time
html += tdlink.format(d[5]) if d[5] else td.format('') # url
html += '</tr>\n'
num += 1
@@ -3670,14 +3718,15 @@ def ordinal(value):
# True if the html file was created, false if it failed
def createHTML(testruns, testfail):
if len(testruns) < 1:
- print('ERROR: Not enough test data to build a timeline')
+ pprint('ERROR: Not enough test data to build a timeline')
return
kerror = False
for data in testruns:
if data.kerror:
kerror = True
- data.normalizeTime(testruns[-1].tSuspended)
+ if(sysvals.suspendmode in ['freeze', 'standby']):
+ data.trimFreezeTime(testruns[-1].tSuspended)
# html function templates
html_error = '<div id="{1}" title="kernel error/warning" class="err" style="right:{0}%">{2}&rarr;</div>\n'
@@ -3721,8 +3770,8 @@ def createHTML(testruns, testfail):
sktime, rktime = data.getTimeValues()
if(tTotal == 0):
doError('No timeline data')
- if(data.tLow > 0):
- low_time = '%.0f'%(data.tLow*1000)
+ if(len(data.tLow) > 0):
+ low_time = '|'.join(data.tLow)
if sysvals.suspendmode == 'command':
run_time = '%.0f'%((data.end-data.start)*1000)
if sysvals.testcommand:
@@ -3743,7 +3792,7 @@ def createHTML(testruns, testfail):
if(len(testruns) > 1):
testdesc1 = testdesc2 = ordinal(data.testnumber+1)
testdesc2 += ' '
- if(data.tLow == 0):
+ if(len(data.tLow) == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc1, stitle, rtitle)
else:
@@ -3762,7 +3811,7 @@ def createHTML(testruns, testfail):
rtitle = 'time from firmware mode to return from kernel enter_state(%s) [kernel time only]' % sysvals.suspendmode
if(len(testruns) > 1):
testdesc = ordinal(data.testnumber+1)+' '+testdesc
- if(data.tLow == 0):
+ if(len(data.tLow) == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc, stitle, rtitle)
else:
@@ -3820,15 +3869,14 @@ def createHTML(testruns, testfail):
# draw the full timeline
devtl.createZoomBox(sysvals.suspendmode, len(testruns))
- phases = {'suspend':[],'resume':[]}
- for phase in data.dmesg:
- if 'resume' in phase:
- phases['resume'].append(phase)
- else:
- phases['suspend'].append(phase)
-
- # draw each test run chronologically
for data in testruns:
+ # draw each test run and block chronologically
+ phases = {'suspend':[],'resume':[]}
+ for phase in data.sortedPhases():
+ if data.dmesg[phase]['start'] >= data.tSuspended:
+ phases['resume'].append(phase)
+ else:
+ phases['suspend'].append(phase)
# now draw the actual timeline blocks
for dir in phases:
# draw suspend and resume blocks separately
@@ -3850,7 +3898,7 @@ def createHTML(testruns, testfail):
continue
width = '%f' % (((mTotal*100.0)-sysvals.srgap/2)/tTotal)
devtl.html += devtl.html_tblock.format(bname, left, width, devtl.scaleH)
- for b in sorted(phases[dir]):
+ for b in phases[dir]:
# draw the phase color background
phase = data.dmesg[b]
length = phase['end']-phase['start']
@@ -3865,7 +3913,7 @@ def createHTML(testruns, testfail):
id = '%d_%d' % (idx1, idx2)
right = '%f' % (((mMax-t)*100.0)/mTotal)
devtl.html += html_error.format(right, id, type)
- for b in sorted(phases[dir]):
+ for b in phases[dir]:
# draw the devices for this phase
phaselist = data.dmesg[b]['list']
for d in data.tdevlist[b]:
@@ -3942,19 +3990,17 @@ def createHTML(testruns, testfail):
# draw a legend which describes the phases by color
if sysvals.suspendmode != 'command':
- data = testruns[-1]
+ phasedef = testruns[-1].phasedef
devtl.html += '<div class="legend">\n'
- pdelta = 100.0/len(data.phases)
+ pdelta = 100.0/len(phasedef.keys())
pmargin = pdelta / 4.0
- for phase in data.phases:
- tmp = phase.split('_')
- id = tmp[0][0]
- if(len(tmp) > 1):
- id += tmp[1][0]
- order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
+ for phase in sorted(phasedef, key=lambda k:phasedef[k]['order']):
+ id, p = '', phasedef[phase]
+ for word in phase.split('_'):
+ id += word[0]
+ order = '%.2f' % ((p['order'] * pdelta) + pmargin)
name = string.replace(phase, '_', ' &nbsp;')
- devtl.html += devtl.html_legend.format(order, \
- data.dmesg[phase]['color'], name, id)
+ devtl.html += devtl.html_legend.format(order, p['color'], name, id)
devtl.html += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
@@ -3970,7 +4016,7 @@ def createHTML(testruns, testfail):
pscolor = 'linear-gradient(to top left, #ccc, #eee)'
hf.write(devtl.html_phaselet.format('pre_suspend_process', \
'0', '0', pscolor))
- for b in data.phases:
+ for b in data.sortedPhases():
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
@@ -4522,18 +4568,18 @@ def setRuntimeSuspend(before=True):
sv.rstgt, sv.rsval, sv.rsdir = 'on', 'auto', 'enabled'
else:
sv.rstgt, sv.rsval, sv.rsdir = 'auto', 'on', 'disabled'
- print('CONFIGURING RUNTIME SUSPEND...')
+ pprint('CONFIGURING RUNTIME SUSPEND...')
sv.rslist = deviceInfo(sv.rstgt)
for i in sv.rslist:
sv.setVal(sv.rsval, i)
- print('runtime suspend %s on all devices (%d changed)' % (sv.rsdir, len(sv.rslist)))
- print('waiting 5 seconds...')
+ pprint('runtime suspend %s on all devices (%d changed)' % (sv.rsdir, len(sv.rslist)))
+ pprint('waiting 5 seconds...')
time.sleep(5)
else:
# runtime suspend re-enable or re-disable
for i in sv.rslist:
sv.setVal(sv.rstgt, i)
- print('runtime suspend settings restored on %d devices' % len(sv.rslist))
+ pprint('runtime suspend settings restored on %d devices' % len(sv.rslist))
# Function: executeSuspend
# Description:
@@ -4542,25 +4588,21 @@ def setRuntimeSuspend(before=True):
def executeSuspend():
pm = ProcessMonitor()
tp = sysvals.tpath
- fwdata = []
+ testdata = []
+ battery = True if getBattery() else False
# run these commands to prepare the system for suspend
if sysvals.display:
- if sysvals.display > 0:
- print('TURN DISPLAY ON')
- call('xset -d :0.0 dpms force suspend', shell=True)
- call('xset -d :0.0 dpms force on', shell=True)
- else:
- print('TURN DISPLAY OFF')
- call('xset -d :0.0 dpms force suspend', shell=True)
+ pprint('SET DISPLAY TO %s' % sysvals.display.upper())
+ displayControl(sysvals.display)
time.sleep(1)
if sysvals.sync:
- print('SYNCING FILESYSTEMS')
+ pprint('SYNCING FILESYSTEMS')
call('sync', shell=True)
# mark the start point in the kernel ring buffer just as we start
sysvals.initdmesg()
# start ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
- print('START TRACING')
+ pprint('START TRACING')
sysvals.fsetVal('1', 'tracing_on')
if sysvals.useprocmon:
pm.start()
@@ -4573,15 +4615,16 @@ def executeSuspend():
sysvals.fsetVal('WAIT END', 'trace_marker')
# start message
if sysvals.testcommand != '':
- print('COMMAND START')
+ pprint('COMMAND START')
else:
if(sysvals.rtcwake):
- print('SUSPEND START')
+ pprint('SUSPEND START')
else:
- print('SUSPEND START (press a key to resume)')
+ pprint('SUSPEND START (press a key to resume)')
+ bat1 = getBattery() if battery else False
# set rtcwake
if(sysvals.rtcwake):
- print('will issue an rtcwake in %d seconds' % sysvals.rtcwaketime)
+ pprint('will issue an rtcwake in %d seconds' % sysvals.rtcwaketime)
sysvals.rtcWakeAlarmOn()
# start of suspend trace marker
if(sysvals.usecallgraph or sysvals.usetraceevents):
@@ -4592,8 +4635,11 @@ def executeSuspend():
time.sleep(sysvals.predelay/1000.0)
sysvals.fsetVal('WAIT END', 'trace_marker')
# initiate suspend or command
+ tdata = {'error': ''}
if sysvals.testcommand != '':
- call(sysvals.testcommand+' 2>&1', shell=True);
+ res = call(sysvals.testcommand+' 2>&1', shell=True);
+ if res != 0:
+ tdata['error'] = 'cmd returned %d' % res
else:
mode = sysvals.suspendmode
if sysvals.memmode and os.path.exists(sysvals.mempowerfile):
@@ -4601,13 +4647,18 @@ def executeSuspend():
pf = open(sysvals.mempowerfile, 'w')
pf.write(sysvals.memmode)
pf.close()
+ if sysvals.diskmode and os.path.exists(sysvals.diskpowerfile):
+ mode = 'disk'
+ pf = open(sysvals.diskpowerfile, 'w')
+ pf.write(sysvals.diskmode)
+ pf.close()
pf = open(sysvals.powerfile, 'w')
pf.write(mode)
# execution will pause here
try:
pf.close()
- except:
- pass
+ except Exception as e:
+ tdata['error'] = str(e)
if(sysvals.rtcwake):
sysvals.rtcWakeAlarmOff()
# postdelay delay
@@ -4616,27 +4667,33 @@ def executeSuspend():
time.sleep(sysvals.postdelay/1000.0)
sysvals.fsetVal('WAIT END', 'trace_marker')
# return from suspend
- print('RESUME COMPLETE')
+ pprint('RESUME COMPLETE')
if(sysvals.usecallgraph or sysvals.usetraceevents):
sysvals.fsetVal('RESUME COMPLETE', 'trace_marker')
if(sysvals.suspendmode == 'mem' or sysvals.suspendmode == 'command'):
- fwdata.append(getFPDT(False))
+ tdata['fw'] = getFPDT(False)
+ bat2 = getBattery() if battery else False
+ if battery and bat1 and bat2:
+ tdata['bat'] = (bat1, bat2)
+ testdata.append(tdata)
# stop ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
if sysvals.useprocmon:
pm.stop()
sysvals.fsetVal('0', 'tracing_on')
- print('CAPTURING TRACE')
- op = sysvals.writeDatafileHeader(sysvals.ftracefile, fwdata)
+ # grab a copy of the dmesg output
+ pprint('CAPTURING DMESG')
+ sysvals.getdmesg(testdata)
+ # grab a copy of the ftrace output
+ if(sysvals.usecallgraph or sysvals.usetraceevents):
+ pprint('CAPTURING TRACE')
+ op = sysvals.writeDatafileHeader(sysvals.ftracefile, testdata)
fp = open(tp+'trace', 'r')
for line in fp:
op.write(line)
op.close()
sysvals.fsetVal('', 'trace')
devProps()
- # grab a copy of the dmesg output
- print('CAPTURING DMESG')
- sysvals.getdmesg(fwdata)
def readFile(file):
if os.path.islink(file):
@@ -4673,15 +4730,15 @@ def yesno(val):
# a list of USB device names to sysvals for better timeline readability
def deviceInfo(output=''):
if not output:
- print('LEGEND')
- print('---------------------------------------------------------------------------------------------')
- print(' A = async/sync PM queue (A/S) C = runtime active children')
- print(' R = runtime suspend enabled/disabled (E/D) rACTIVE = runtime active (min/sec)')
- print(' S = runtime status active/suspended (A/S) rSUSPEND = runtime suspend (min/sec)')
- print(' U = runtime usage count')
- print('---------------------------------------------------------------------------------------------')
- print('DEVICE NAME A R S U C rACTIVE rSUSPEND')
- print('---------------------------------------------------------------------------------------------')
+ pprint('LEGEND\n'\
+ '---------------------------------------------------------------------------------------------\n'\
+ ' A = async/sync PM queue (A/S) C = runtime active children\n'\
+ ' R = runtime suspend enabled/disabled (E/D) rACTIVE = runtime active (min/sec)\n'\
+ ' S = runtime status active/suspended (A/S) rSUSPEND = runtime suspend (min/sec)\n'\
+ ' U = runtime usage count\n'\
+ '---------------------------------------------------------------------------------------------\n'\
+ 'DEVICE NAME A R S U C rACTIVE rSUSPEND\n'\
+ '---------------------------------------------------------------------------------------------')
res = []
tgtval = 'runtime_status'
@@ -4766,7 +4823,7 @@ def devProps(data=0):
alreadystamped = True
continue
# determine the trace data type (required for further parsing)
- m = re.match(sysvals.tracertypefmt, line)
+ m = re.match(tp.tracertypefmt, line)
if(m):
tp.setTracerType(m.group('t'))
continue
@@ -4870,6 +4927,11 @@ def getModes():
fp.close()
if 'mem' in modes and not deep:
modes.remove('mem')
+ if('disk' in modes and os.path.exists(sysvals.diskpowerfile)):
+ fp = open(sysvals.diskpowerfile, 'r')
+ for m in string.split(fp.read()):
+ modes.append('disk-%s' % m.strip('[]'))
+ fp.close()
return modes
# Function: dmidecode
@@ -4994,8 +5056,9 @@ def dmidecode(mempath, fatal=False):
return out
def getBattery():
- p = '/sys/class/power_supply'
- bat = dict()
+ p, charge, bat = '/sys/class/power_supply', 0, {}
+ if not os.path.exists(p):
+ return False
for d in os.listdir(p):
type = sysvals.getVal(os.path.join(p, d, 'type')).strip().lower()
if type != 'battery':
@@ -5003,15 +5066,47 @@ def getBattery():
for v in ['status', 'energy_now', 'capacity_now']:
bat[v] = sysvals.getVal(os.path.join(p, d, v)).strip().lower()
break
- ac = True
- if 'status' in bat and 'discharging' in bat['status']:
- ac = False
- charge = 0
+ if 'status' not in bat:
+ return False
+ ac = False if 'discharging' in bat['status'] else True
for v in ['energy_now', 'capacity_now']:
if v in bat and bat[v]:
charge = int(bat[v])
return (ac, charge)
+def displayControl(cmd):
+ xset, ret = 'xset -d :0.0 {0}', 0
+ if sysvals.sudouser:
+ xset = 'sudo -u %s %s' % (sysvals.sudouser, xset)
+ if cmd == 'init':
+ ret = call(xset.format('dpms 0 0 0'), shell=True)
+ if not ret:
+ ret = call(xset.format('s off'), shell=True)
+ elif cmd == 'reset':
+ ret = call(xset.format('s reset'), shell=True)
+ elif cmd in ['on', 'off', 'standby', 'suspend']:
+ b4 = displayControl('stat')
+ ret = call(xset.format('dpms force %s' % cmd), shell=True)
+ if not ret:
+ curr = displayControl('stat')
+ sysvals.vprint('Display Switched: %s -> %s' % (b4, curr))
+ if curr != cmd:
+ sysvals.vprint('WARNING: Display failed to change to %s' % cmd)
+ if ret:
+ sysvals.vprint('WARNING: Display failed to change to %s with xset' % cmd)
+ return ret
+ elif cmd == 'stat':
+ fp = Popen(xset.format('q').split(' '), stdout=PIPE).stdout
+ ret = 'unknown'
+ for line in fp:
+ m = re.match('[\s]*Monitor is (?P<m>.*)', line)
+ if(m and len(m.group('m')) >= 2):
+ out = m.group('m').lower()
+ ret = out[3:] if out[0:2] == 'in' else out
+ break
+ fp.close()
+ return ret
+
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
@@ -5055,18 +5150,19 @@ def getFPDT(output):
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
- print('')
- print('Firmware Performance Data Table (%s)' % table[0])
- print(' Signature : %s' % table[0])
- print(' Table Length : %u' % table[1])
- print(' Revision : %u' % table[2])
- print(' Checksum : 0x%x' % table[3])
- print(' OEM ID : %s' % table[4])
- print(' OEM Table ID : %s' % table[5])
- print(' OEM Revision : %u' % table[6])
- print(' Creator ID : %s' % table[7])
- print(' Creator Revision : 0x%x' % table[8])
- print('')
+ pprint('\n'\
+ 'Firmware Performance Data Table (%s)\n'\
+ ' Signature : %s\n'\
+ ' Table Length : %u\n'\
+ ' Revision : %u\n'\
+ ' Checksum : 0x%x\n'\
+ ' OEM ID : %s\n'\
+ ' OEM Table ID : %s\n'\
+ ' OEM Revision : %u\n'\
+ ' Creator ID : %s\n'\
+ ' Creator Revision : 0x%x\n'\
+ '' % (table[0], table[0], table[1], table[2], table[3],
+ table[4], table[5], table[6], table[7], table[8]))
if(table[0] != 'FPDT'):
if(output):
@@ -5092,22 +5188,24 @@ def getFPDT(output):
first = fp.read(8)
except:
if(output):
- print('Bad address 0x%x in %s' % (addr, sysvals.mempath))
+ pprint('Bad address 0x%x in %s' % (addr, sysvals.mempath))
return [0, 0]
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == 'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata)
if(output):
- print('%s (%s)' % (rectype[header[0]], rechead[0]))
- print(' Reset END : %u ns' % record[4])
- print(' OS Loader LoadImage Start : %u ns' % record[5])
- print(' OS Loader StartImage Start : %u ns' % record[6])
- print(' ExitBootServices Entry : %u ns' % record[7])
- print(' ExitBootServices Exit : %u ns' % record[8])
+ pprint('%s (%s)\n'\
+ ' Reset END : %u ns\n'\
+ ' OS Loader LoadImage Start : %u ns\n'\
+ ' OS Loader StartImage Start : %u ns\n'\
+ ' ExitBootServices Entry : %u ns\n'\
+ ' ExitBootServices Exit : %u ns'\
+ '' % (rectype[header[0]], rechead[0], record[4], record[5],
+ record[6], record[7], record[8]))
elif(rechead[0] == 'S3PT'):
if(output):
- print('%s (%s)' % (rectype[header[0]], rechead[0]))
+ pprint('%s (%s)' % (rectype[header[0]], rechead[0]))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
@@ -5117,27 +5215,26 @@ def getFPDT(output):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
- print(' %s' % prectype[prechead[0]])
- print(' Resume Count : %u' % \
- record[1])
- print(' FullResume : %u ns' % \
- record[2])
- print(' AverageResume : %u ns' % \
- record[3])
+ pprint(' %s\n'\
+ ' Resume Count : %u\n'\
+ ' FullResume : %u ns\n'\
+ ' AverageResume : %u ns'\
+ '' % (prectype[prechead[0]], record[1],
+ record[2], record[3]))
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
- print(' %s' % prectype[prechead[0]])
- print(' SuspendStart : %u ns' % \
- record[0])
- print(' SuspendEnd : %u ns' % \
- record[1])
- print(' SuspendTime : %u ns' % \
- fwData[0])
+ pprint(' %s\n'\
+ ' SuspendStart : %u ns\n'\
+ ' SuspendEnd : %u ns\n'\
+ ' SuspendTime : %u ns'\
+ '' % (prectype[prechead[0]], record[0],
+ record[1], fwData[0]))
+
j += prechead[1]
if(output):
- print('')
+ pprint('')
i += header[1]
fp.close()
return fwData
@@ -5149,26 +5246,26 @@ def getFPDT(output):
# Output:
# True if the test will work, False if not
def statusCheck(probecheck=False):
- status = True
+ status = ''
- print('Checking this system (%s)...' % platform.node())
+ pprint('Checking this system (%s)...' % platform.node())
# check we have root access
res = sysvals.colorText('NO (No features of this tool will work!)')
if(sysvals.rootCheck(False)):
res = 'YES'
- print(' have root access: %s' % res)
+ pprint(' have root access: %s' % res)
if(res != 'YES'):
- print(' Try running this script with sudo')
- return False
+ pprint(' Try running this script with sudo')
+ return 'missing root access'
# check sysfs is mounted
res = sysvals.colorText('NO (No features of this tool will work!)')
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
- print(' is sysfs mounted: %s' % res)
+ pprint(' is sysfs mounted: %s' % res)
if(res != 'YES'):
- return False
+ return 'sysfs is missing'
# check target mode is a valid mode
if sysvals.suspendmode != 'command':
@@ -5177,11 +5274,11 @@ def statusCheck(probecheck=False):
if(sysvals.suspendmode in modes):
res = 'YES'
else:
- status = False
- print(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
+ status = '%s mode is not supported' % sysvals.suspendmode
+ pprint(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
- print(' valid power modes are: %s' % modes)
- print(' please choose one with -m')
+ pprint(' valid power modes are: %s' % modes)
+ pprint(' please choose one with -m')
# check if ftrace is available
res = sysvals.colorText('NO')
@@ -5189,8 +5286,8 @@ def statusCheck(probecheck=False):
if(ftgood):
res = 'YES'
elif(sysvals.usecallgraph):
- status = False
- print(' is ftrace supported: %s' % res)
+ status = 'ftrace is not properly supported'
+ pprint(' is ftrace supported: %s' % res)
# check if kprobes are available
res = sysvals.colorText('NO')
@@ -5199,7 +5296,7 @@ def statusCheck(probecheck=False):
res = 'YES'
else:
sysvals.usedevsrc = False
- print(' are kprobes supported: %s' % res)
+ pprint(' are kprobes supported: %s' % res)
# what data source are we using
res = 'DMESG'
@@ -5210,15 +5307,15 @@ def statusCheck(probecheck=False):
sysvals.usetraceevents = False
if(sysvals.usetraceevents):
res = 'FTRACE (all trace events found)'
- print(' timeline data source: %s' % res)
+ pprint(' timeline data source: %s' % res)
# check if rtcwake
res = sysvals.colorText('NO')
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
- status = False
- print(' is rtcwake supported: %s' % res)
+ status = 'rtcwake is not properly supported'
+ pprint(' is rtcwake supported: %s' % res)
if not probecheck:
return status
@@ -5243,9 +5340,9 @@ def statusCheck(probecheck=False):
def doError(msg, help=False):
if(help == True):
printHelp()
- print('ERROR: %s\n') % msg
+ pprint('ERROR: %s\n' % msg)
sysvals.outputResult({'error':msg})
- sys.exit()
+ sys.exit(1)
# Function: getArgInt
# Description:
@@ -5286,7 +5383,7 @@ def getArgFloat(name, args, min, max, main=True):
return val
def processData(live=False):
- print('PROCESSING DATA')
+ pprint('PROCESSING DATA')
error = ''
if(sysvals.usetraceevents):
testruns, error = parseTraceLog(live)
@@ -5301,16 +5398,22 @@ def processData(live=False):
appendIncompleteTraceLog(testruns)
sysvals.vprint('Command:\n %s' % sysvals.cmdline)
for data in testruns:
+ if data.battery:
+ a1, c1, a2, c2 = data.battery
+ s = 'Battery:\n Before - AC: %s, Charge: %d\n After - AC: %s, Charge: %d' % \
+ (a1, int(c1), a2, int(c2))
+ sysvals.vprint(s)
data.printDetails()
if sysvals.cgdump:
for data in testruns:
data.debugPrint()
- sys.exit()
+ sys.exit(0)
if len(testruns) < 1:
+ pprint('ERROR: Not enough test data to build a timeline')
return (testruns, {'error': 'timeline generation failed'})
sysvals.vprint('Creating the html timeline (%s)...' % sysvals.htmlfile)
createHTML(testruns, error)
- print('DONE')
+ pprint('DONE')
data = testruns[0]
stamp = data.stamp
stamp['suspend'], stamp['resume'] = data.getTimeValues()
@@ -5335,6 +5438,7 @@ def rerunTest():
elif not os.access(sysvals.htmlfile, os.W_OK):
doError('missing permission to write to %s' % sysvals.htmlfile)
testruns, stamp = processData(False)
+ sysvals.logmsg = ''
return stamp
# Function: runTest
@@ -5349,13 +5453,16 @@ def runTest(n=0):
executeSuspend()
sysvals.cleanupFtrace()
if sysvals.skiphtml:
- sysvals.sudouser(sysvals.testdir)
+ sysvals.sudoUserchown(sysvals.testdir)
return
testruns, stamp = processData(True)
for data in testruns:
del data
- sysvals.sudouser(sysvals.testdir)
+ sysvals.sudoUserchown(sysvals.testdir)
sysvals.outputResult(stamp, n)
+ if 'error' in stamp:
+ return 2
+ return 0
def find_in_html(html, start, end, firstonly=True):
n, out = 0, []
@@ -5380,15 +5487,87 @@ def find_in_html(html, start, end, firstonly=True):
return ''
return out
+def data_from_html(file, outpath, devlist=False):
+ html = open(file, 'r').read()
+ suspend = find_in_html(html, 'Kernel Suspend', 'ms')
+ resume = find_in_html(html, 'Kernel Resume', 'ms')
+ line = find_in_html(html, '<div class="stamp">', '</div>')
+ stmp = line.split()
+ if not suspend or not resume or len(stmp) != 8:
+ return False
+ try:
+ dt = datetime.strptime(' '.join(stmp[3:]), '%B %d %Y, %I:%M:%S %p')
+ except:
+ return False
+ tstr = dt.strftime('%Y/%m/%d %H:%M:%S')
+ error = find_in_html(html, '<table class="testfail"><tr><td>', '</td>')
+ if error:
+ m = re.match('[a-z]* failed in (?P<p>[a-z0-9_]*) phase', error)
+ if m:
+ result = 'fail in %s' % m.group('p')
+ else:
+ result = 'fail'
+ else:
+ result = 'pass'
+ ilist = []
+ e = find_in_html(html, 'class="err"[\w=":;\.%\- ]*>', '&rarr;</div>', False)
+ for i in list(set(e)):
+ ilist.append('%sx%d' % (i, e.count(i)) if e.count(i) > 1 else i)
+ low = find_in_html(html, 'freeze time: <b>', ' ms</b>')
+ if low and '|' in low:
+ ilist.append('FREEZEx%d' % len(low.split('|')))
+ devices = dict()
+ for line in html.split('\n'):
+ m = re.match(' *<div id=\"[a,0-9]*\" *title=\"(?P<title>.*)\" class=\"thread.*', line)
+ if not m or 'thread kth' in line or 'thread sec' in line:
+ continue
+ m = re.match('(?P<n>.*) \((?P<t>[0-9,\.]*) ms\) (?P<p>.*)', m.group('title'))
+ if not m:
+ continue
+ name, time, phase = m.group('n'), m.group('t'), m.group('p')
+ if ' async' in name or ' sync' in name:
+ name = ' '.join(name.split(' ')[:-1])
+ d = phase.split('_')[0]
+ if d not in devices:
+ devices[d] = dict()
+ if name not in devices[d]:
+ devices[d][name] = 0.0
+ devices[d][name] += float(time)
+ worst = {'suspend': {'name':'', 'time': 0.0},
+ 'resume': {'name':'', 'time': 0.0}}
+ for d in devices:
+ if d not in worst:
+ worst[d] = dict()
+ dev = devices[d]
+ if len(dev.keys()) > 0:
+ n = sorted(dev, key=dev.get, reverse=True)[0]
+ worst[d]['name'], worst[d]['time'] = n, dev[n]
+ data = {
+ 'mode': stmp[2],
+ 'host': stmp[0],
+ 'kernel': stmp[1],
+ 'time': tstr,
+ 'result': result,
+ 'issues': ' '.join(ilist),
+ 'suspend': suspend,
+ 'resume': resume,
+ 'sus_worst': worst['suspend']['name'],
+ 'sus_worsttime': worst['suspend']['time'],
+ 'res_worst': worst['resume']['name'],
+ 'res_worsttime': worst['resume']['time'],
+ 'url': os.path.relpath(file, outpath),
+ }
+ if devlist:
+ data['devlist'] = devices
+ return data
+
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, local=True, genhtml=False):
inpath = os.path.abspath(subdir)
- outpath = inpath
- if local:
- outpath = os.path.abspath('.')
- print('Generating a summary of folder "%s"' % inpath)
+ outpath = os.path.abspath('.') if local else inpath
+ pprint('Generating a summary of folder "%s"' % inpath)
if genhtml:
for dirname, dirnames, filenames in os.walk(subdir):
sysvals.dmesgfile = sysvals.ftracefile = sysvals.htmlfile = ''
@@ -5400,49 +5579,30 @@ def runSummary(subdir, local=True, genhtml=False):
sysvals.setOutputFile()
if sysvals.ftracefile and sysvals.htmlfile and \
not os.path.exists(sysvals.htmlfile):
- print('FTRACE: %s' % sysvals.ftracefile)
+ pprint('FTRACE: %s' % sysvals.ftracefile)
if sysvals.dmesgfile:
- print('DMESG : %s' % sysvals.dmesgfile)
+ pprint('DMESG : %s' % sysvals.dmesgfile)
rerunTest()
testruns = []
+ desc = {'host':[],'mode':[],'kernel':[]}
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(not re.match('.*.html', filename)):
continue
- file = os.path.join(dirname, filename)
- html = open(file, 'r').read()
- suspend = find_in_html(html, 'Kernel Suspend', 'ms')
- resume = find_in_html(html, 'Kernel Resume', 'ms')
- line = find_in_html(html, '<div class="stamp">', '</div>')
- stmp = line.split()
- if not suspend or not resume or len(stmp) != 8:
+ data = data_from_html(os.path.join(dirname, filename), outpath)
+ if(not data):
continue
- try:
- dt = datetime.strptime(' '.join(stmp[3:]), '%B %d %Y, %I:%M:%S %p')
- except:
- continue
- tstr = dt.strftime('%Y/%m/%d %H:%M:%S')
- error = find_in_html(html, '<table class="testfail"><tr><td>', '</td>')
- result = 'fail' if error else 'pass'
- ilist = []
- e = find_in_html(html, 'class="err"[\w=":;\.%\- ]*>', '&rarr;</div>', False)
- for i in list(set(e)):
- ilist.append('%sx%d' % (i, e.count(i)) if e.count(i) > 1 else i)
- data = {
- 'mode': stmp[2],
- 'host': stmp[0],
- 'kernel': stmp[1],
- 'time': tstr,
- 'result': result,
- 'issues': ','.join(ilist),
- 'suspend': suspend,
- 'resume': resume,
- 'url': os.path.relpath(file, outpath),
- }
testruns.append(data)
+ for key in desc:
+ if data[key] not in desc[key]:
+ desc[key].append(data[key])
outfile = os.path.join(outpath, 'summary.html')
- print('Summary file: %s' % outfile)
- createHTMLSummarySimple(testruns, outfile, inpath)
+ pprint('Summary file: %s' % outfile)
+ if len(desc['host']) == len(desc['mode']) == len(desc['kernel']) == 1:
+ title = '%s %s %s' % (desc['host'][0], desc['kernel'][0], desc['mode'][0])
+ else:
+ title = inpath
+ createHTMLSummarySimple(testruns, outfile, title)
# Function: checkArgBool
# Description:
@@ -5499,13 +5659,10 @@ def configFromFile(file):
else:
doError('invalid value --> (%s: %s), use "enable/disable"' % (option, value), True)
elif(option == 'display'):
- if value in switchvalues:
- if value in switchoff:
- sysvals.display = -1
- else:
- sysvals.display = 1
- else:
- doError('invalid value --> (%s: %s), use "on/off"' % (option, value), True)
+ disopt = ['on', 'off', 'standby', 'suspend']
+ if value not in disopt:
+ doError('invalid value --> (%s: %s), use %s' % (option, value, disopt), True)
+ sysvals.display = value
elif(option == 'gzip'):
sysvals.gzip = checkArgBool(option, value)
elif(option == 'cgfilter'):
@@ -5521,9 +5678,9 @@ def configFromFile(file):
sysvals.cgtest = getArgInt('cgtest', value, 0, 1, False)
elif(option == 'cgphase'):
d = Data(0)
- if value not in d.phases:
+ if value not in d.sortedPhases():
doError('invalid phase --> (%s: %s), valid phases are %s'\
- % (option, value, d.phases), True)
+ % (option, value, d.sortedPhases()), True)
sysvals.cgphase = value
elif(option == 'fadd'):
file = sysvals.configFile(value)
@@ -5660,84 +5817,86 @@ def configFromFile(file):
# Description:
# print out the help text
def printHelp():
- print('')
- print('%s v%s' % (sysvals.title, sysvals.version))
- print('Usage: sudo sleepgraph <options> <commands>')
- print('')
- print('Description:')
- print(' This tool is designed to assist kernel and OS developers in optimizing')
- print(' their linux stack\'s suspend/resume time. Using a kernel image built')
- print(' with a few extra options enabled, the tool will execute a suspend and')
- print(' capture dmesg and ftrace data until resume is complete. This data is')
- print(' transformed into a device timeline and an optional callgraph to give')
- print(' a detailed view of which devices/subsystems are taking the most')
- print(' time in suspend/resume.')
- print('')
- print(' If no specific command is given, the default behavior is to initiate')
- print(' a suspend/resume and capture the dmesg/ftrace output as an html timeline.')
- print('')
- print(' Generates output files in subdirectory: suspend-yymmdd-HHMMSS')
- print(' HTML output: <hostname>_<mode>.html')
- print(' raw dmesg output: <hostname>_<mode>_dmesg.txt')
- print(' raw ftrace output: <hostname>_<mode>_ftrace.txt')
- print('')
- print('Options:')
- print(' -h Print this help text')
- print(' -v Print the current tool version')
- print(' -config fn Pull arguments and config options from file fn')
- print(' -verbose Print extra information during execution and analysis')
- print(' -m mode Mode to initiate for suspend (default: %s)') % (sysvals.suspendmode)
- print(' -o name Overrides the output subdirectory name when running a new test')
- print(' default: suspend-{date}-{time}')
- print(' -rtcwake t Wakeup t seconds after suspend, set t to "off" to disable (default: 15)')
- print(' -addlogs Add the dmesg and ftrace logs to the html output')
- print(' -srgap Add a visible gap in the timeline between sus/res (default: disabled)')
- print(' -skiphtml Run the test and capture the trace logs, but skip the timeline (default: disabled)')
- print(' -result fn Export a results table to a text file for parsing.')
- print(' [testprep]')
- print(' -sync Sync the filesystems before starting the test')
- print(' -rs on/off Enable/disable runtime suspend for all devices, restore all after test')
- print(' -display on/off Turn the display on or off for the test')
- print(' [advanced]')
- print(' -gzip Gzip the trace and dmesg logs to save space')
- print(' -cmd {s} Run the timeline over a custom command, e.g. "sync -d"')
- print(' -proc Add usermode process info into the timeline (default: disabled)')
- print(' -dev Add kernel function calls and threads to the timeline (default: disabled)')
- print(' -x2 Run two suspend/resumes back to back (default: disabled)')
- print(' -x2delay t Include t ms delay between multiple test runs (default: 0 ms)')
- print(' -predelay t Include t ms delay before 1st suspend (default: 0 ms)')
- print(' -postdelay t Include t ms delay after last resume (default: 0 ms)')
- print(' -mindev ms Discard all device blocks shorter than ms milliseconds (e.g. 0.001 for us)')
- print(' -multi n d Execute <n> consecutive tests at <d> seconds intervals. The outputs will')
- print(' be created in a new subdirectory with a summary page.')
- print(' [debug]')
- print(' -f Use ftrace to create device callgraphs (default: disabled)')
- print(' -maxdepth N limit the callgraph data to N call levels (default: 0=all)')
- print(' -expandcg pre-expand the callgraph data in the html output (default: disabled)')
- print(' -fadd file Add functions to be graphed in the timeline from a list in a text file')
- print(' -filter "d1,d2,..." Filter out all but this comma-delimited list of device names')
- print(' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)')
- print(' -cgphase P Only show callgraph data for phase P (e.g. suspend_late)')
- print(' -cgtest N Only show callgraph data for test N (e.g. 0 or 1 in an x2 run)')
- print(' -timeprec N Number of significant digits in timestamps (0:S, [3:ms], 6:us)')
- print(' -cgfilter S Filter the callgraph output in the timeline')
- print(' -cgskip file Callgraph functions to skip, off to disable (default: cgskip.txt)')
- print(' -bufsize N Set trace buffer size to N kilo-bytes (default: all of free memory)')
- print('')
- print('Other commands:')
- print(' -modes List available suspend modes')
- print(' -status Test to see if the system is enabled to run this tool')
- print(' -fpdt Print out the contents of the ACPI Firmware Performance Data Table')
- print(' -battery Print out battery info (if available)')
- print(' -sysinfo Print out system info extracted from BIOS')
- print(' -devinfo Print out the pm settings of all devices which support runtime suspend')
- print(' -flist Print the list of functions currently being captured in ftrace')
- print(' -flistall Print all functions capable of being captured in ftrace')
- print(' -summary dir Create a summary of tests in this dir [-genhtml builds missing html]')
- print(' [redo]')
- print(' -ftrace ftracefile Create HTML output using ftrace input (used with -dmesg)')
- print(' -dmesg dmesgfile Create HTML output using dmesg (used with -ftrace)')
- print('')
+ pprint('\n%s v%s\n'\
+ 'Usage: sudo sleepgraph <options> <commands>\n'\
+ '\n'\
+ 'Description:\n'\
+ ' This tool is designed to assist kernel and OS developers in optimizing\n'\
+ ' their linux stack\'s suspend/resume time. Using a kernel image built\n'\
+ ' with a few extra options enabled, the tool will execute a suspend and\n'\
+ ' capture dmesg and ftrace data until resume is complete. This data is\n'\
+ ' transformed into a device timeline and an optional callgraph to give\n'\
+ ' a detailed view of which devices/subsystems are taking the most\n'\
+ ' time in suspend/resume.\n'\
+ '\n'\
+ ' If no specific command is given, the default behavior is to initiate\n'\
+ ' a suspend/resume and capture the dmesg/ftrace output as an html timeline.\n'\
+ '\n'\
+ ' Generates output files in subdirectory: suspend-yymmdd-HHMMSS\n'\
+ ' HTML output: <hostname>_<mode>.html\n'\
+ ' raw dmesg output: <hostname>_<mode>_dmesg.txt\n'\
+ ' raw ftrace output: <hostname>_<mode>_ftrace.txt\n'\
+ '\n'\
+ 'Options:\n'\
+ ' -h Print this help text\n'\
+ ' -v Print the current tool version\n'\
+ ' -config fn Pull arguments and config options from file fn\n'\
+ ' -verbose Print extra information during execution and analysis\n'\
+ ' -m mode Mode to initiate for suspend (default: %s)\n'\
+ ' -o name Overrides the output subdirectory name when running a new test\n'\
+ ' default: suspend-{date}-{time}\n'\
+ ' -rtcwake t Wakeup t seconds after suspend, set t to "off" to disable (default: 15)\n'\
+ ' -addlogs Add the dmesg and ftrace logs to the html output\n'\
+ ' -srgap Add a visible gap in the timeline between sus/res (default: disabled)\n'\
+ ' -skiphtml Run the test and capture the trace logs, but skip the timeline (default: disabled)\n'\
+ ' -result fn Export a results table to a text file for parsing.\n'\
+ ' [testprep]\n'\
+ ' -sync Sync the filesystems before starting the test\n'\
+ ' -rs on/off Enable/disable runtime suspend for all devices, restore all after test\n'\
+ ' -display m Change the display mode to m for the test (on/off/standby/suspend)\n'\
+ ' [advanced]\n'\
+ ' -gzip Gzip the trace and dmesg logs to save space\n'\
+ ' -cmd {s} Run the timeline over a custom command, e.g. "sync -d"\n'\
+ ' -proc Add usermode process info into the timeline (default: disabled)\n'\
+ ' -dev Add kernel function calls and threads to the timeline (default: disabled)\n'\
+ ' -x2 Run two suspend/resumes back to back (default: disabled)\n'\
+ ' -x2delay t Include t ms delay between multiple test runs (default: 0 ms)\n'\
+ ' -predelay t Include t ms delay before 1st suspend (default: 0 ms)\n'\
+ ' -postdelay t Include t ms delay after last resume (default: 0 ms)\n'\
+ ' -mindev ms Discard all device blocks shorter than ms milliseconds (e.g. 0.001 for us)\n'\
+ ' -multi n d Execute <n> consecutive tests at <d> seconds intervals. The outputs will\n'\
+ ' be created in a new subdirectory with a summary page.\n'\
+ ' [debug]\n'\
+ ' -f Use ftrace to create device callgraphs (default: disabled)\n'\
+ ' -maxdepth N limit the callgraph data to N call levels (default: 0=all)\n'\
+ ' -expandcg pre-expand the callgraph data in the html output (default: disabled)\n'\
+ ' -fadd file Add functions to be graphed in the timeline from a list in a text file\n'\
+ ' -filter "d1,d2,..." Filter out all but this comma-delimited list of device names\n'\
+ ' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)\n'\
+ ' -cgphase P Only show callgraph data for phase P (e.g. suspend_late)\n'\
+ ' -cgtest N Only show callgraph data for test N (e.g. 0 or 1 in an x2 run)\n'\
+ ' -timeprec N Number of significant digits in timestamps (0:S, [3:ms], 6:us)\n'\
+ ' -cgfilter S Filter the callgraph output in the timeline\n'\
+ ' -cgskip file Callgraph functions to skip, off to disable (default: cgskip.txt)\n'\
+ ' -bufsize N Set trace buffer size to N kilo-bytes (default: all of free memory)\n'\
+ ' -devdump Print out all the raw device data for each phase\n'\
+ ' -cgdump Print out all the raw callgraph data\n'\
+ '\n'\
+ 'Other commands:\n'\
+ ' -modes List available suspend modes\n'\
+ ' -status Test to see if the system is enabled to run this tool\n'\
+ ' -fpdt Print out the contents of the ACPI Firmware Performance Data Table\n'\
+ ' -battery Print out battery info (if available)\n'\
+ ' -x<mode> Test xset by toggling the given mode (on/off/standby/suspend)\n'\
+ ' -sysinfo Print out system info extracted from BIOS\n'\
+ ' -devinfo Print out the pm settings of all devices which support runtime suspend\n'\
+ ' -flist Print the list of functions currently being captured in ftrace\n'\
+ ' -flistall Print all functions capable of being captured in ftrace\n'\
+ ' -summary dir Create a summary of tests in this dir [-genhtml builds missing html]\n'\
+ ' [redo]\n'\
+ ' -ftrace ftracefile Create HTML output using ftrace input (used with -dmesg)\n'\
+ ' -dmesg dmesgfile Create HTML output using dmesg (used with -ftrace)\n'\
+ '' % (sysvals.title, sysvals.version, sysvals.suspendmode))
return True
# ----------------- MAIN --------------------
@@ -5745,7 +5904,9 @@ def printHelp():
if __name__ == '__main__':
genhtml = False
cmd = ''
- simplecmds = ['-sysinfo', '-modes', '-fpdt', '-flist', '-flistall', '-devinfo', '-status', '-battery']
+ simplecmds = ['-sysinfo', '-modes', '-fpdt', '-flist', '-flistall',
+ '-devinfo', '-status', '-battery', '-xon', '-xoff', '-xstandby',
+ '-xsuspend', '-xinit', '-xreset', '-xstat']
if '-f' in sys.argv:
sysvals.cgskip = sysvals.configFile('cgskip.txt')
# loop through the command line arguments
@@ -5763,10 +5924,10 @@ if __name__ == '__main__':
cmd = arg[1:]
elif(arg == '-h'):
printHelp()
- sys.exit()
+ sys.exit(0)
elif(arg == '-v'):
- print("Version %s" % sysvals.version)
- sys.exit()
+ pprint("Version %s" % sysvals.version)
+ sys.exit(0)
elif(arg == '-x2'):
sysvals.execcount = 2
elif(arg == '-x2delay'):
@@ -5781,10 +5942,16 @@ if __name__ == '__main__':
sysvals.skiphtml = True
elif(arg == '-cgdump'):
sysvals.cgdump = True
+ elif(arg == '-devdump'):
+ sysvals.devdump = True
elif(arg == '-genhtml'):
genhtml = True
elif(arg == '-addlogs'):
sysvals.dmesglog = sysvals.ftracelog = True
+ elif(arg == '-addlogdmesg'):
+ sysvals.dmesglog = True
+ elif(arg == '-addlogftrace'):
+ sysvals.ftracelog = True
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-proc'):
@@ -5811,14 +5978,11 @@ if __name__ == '__main__':
try:
val = args.next()
except:
- doError('-display requires "on" or "off"', True)
- if val.lower() in switchvalues:
- if val.lower() in switchoff:
- sysvals.display = -1
- else:
- sysvals.display = 1
- else:
- doError('invalid option: %s, use "on/off"' % val, True)
+ doError('-display requires an mode value', True)
+ disopt = ['on', 'off', 'standby', 'suspend']
+ if val.lower() not in disopt:
+ doError('valid display mode values are %s' % disopt, True)
+ sysvals.display = val.lower()
elif(arg == '-maxdepth'):
sysvals.max_graph_depth = getArgInt('-maxdepth', args, 0, 1000)
elif(arg == '-rtcwake'):
@@ -5847,9 +6011,9 @@ if __name__ == '__main__':
except:
doError('No phase name supplied', True)
d = Data(0)
- if val not in d.phases:
+ if val not in d.phasedef:
doError('invalid phase --> (%s: %s), valid phases are %s'\
- % (arg, val, d.phases), True)
+ % (arg, val, d.phasedef.keys()), True)
sysvals.cgphase = val
elif(arg == '-cgfilter'):
try:
@@ -5951,6 +6115,7 @@ if __name__ == '__main__':
except:
doError('No result file supplied', True)
sysvals.result = val
+ sysvals.signalHandlerInit()
else:
doError('Invalid argument: '+arg, True)
@@ -5975,12 +6140,20 @@ if __name__ == '__main__':
# just run a utility command and exit
if(cmd != ''):
+ ret = 0
if(cmd == 'status'):
- statusCheck(True)
+ if not statusCheck(True):
+ ret = 1
elif(cmd == 'fpdt'):
- getFPDT(True)
+ if not getFPDT(True):
+ ret = 1
elif(cmd == 'battery'):
- print 'AC Connect: %s\nCharge: %d' % getBattery()
+ out = getBattery()
+ if out:
+ pprint('AC Connect : %s\nBattery Charge: %d' % out)
+ else:
+ pprint('no battery found')
+ ret = 1
elif(cmd == 'sysinfo'):
sysvals.printSystemInfo(True)
elif(cmd == 'devinfo'):
@@ -5993,25 +6166,28 @@ if __name__ == '__main__':
sysvals.getFtraceFilterFunctions(False)
elif(cmd == 'summary'):
runSummary(sysvals.outdir, True, genhtml)
- sys.exit()
+ elif(cmd in ['xon', 'xoff', 'xstandby', 'xsuspend', 'xinit', 'xreset']):
+ sysvals.verbose = True
+ ret = displayControl(cmd[1:])
+ elif(cmd == 'xstat'):
+ pprint('Display Status: %s' % displayControl('stat').upper())
+ sys.exit(ret)
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
stamp = rerunTest()
sysvals.outputResult(stamp)
- sys.exit()
+ sys.exit(0)
# verify that we can run a test
- if(not statusCheck()):
- doError('Check FAILED, aborting the test run!')
+ error = statusCheck()
+ if(error):
+ doError(error)
- # extract mem modes and convert
+ # extract mem/disk extra modes and convert
mode = sysvals.suspendmode
- if 'mem' == mode[:3]:
- if '-' in mode:
- memmode = mode.split('-')[-1]
- else:
- memmode = 'deep'
+ if mode.startswith('mem'):
+ memmode = mode.split('-', 1)[-1] if '-' in mode else 'deep'
if memmode == 'shallow':
mode = 'standby'
elif memmode == 's2idle':
@@ -6020,13 +6196,16 @@ if __name__ == '__main__':
mode = 'mem'
sysvals.memmode = memmode
sysvals.suspendmode = mode
+ if mode.startswith('disk-'):
+ sysvals.diskmode = mode.split('-', 1)[-1]
+ sysvals.suspendmode = 'disk'
sysvals.systemInfo(dmidecode(sysvals.mempath))
setRuntimeSuspend(True)
if sysvals.display:
- call('xset -d :0.0 dpms 0 0 0', shell=True)
- call('xset -d :0.0 s off', shell=True)
+ displayControl('init')
+ ret = 0
if sysvals.multitest['run']:
# run multiple tests in a separate subdirectory
if not sysvals.outdir:
@@ -6036,22 +6215,23 @@ if __name__ == '__main__':
os.mkdir(sysvals.outdir)
for i in range(sysvals.multitest['count']):
if(i != 0):
- print('Waiting %d seconds...' % (sysvals.multitest['delay']))
+ pprint('Waiting %d seconds...' % (sysvals.multitest['delay']))
time.sleep(sysvals.multitest['delay'])
- print('TEST (%d/%d) START' % (i+1, sysvals.multitest['count']))
+ pprint('TEST (%d/%d) START' % (i+1, sysvals.multitest['count']))
fmt = 'suspend-%y%m%d-%H%M%S'
sysvals.testdir = os.path.join(sysvals.outdir, datetime.now().strftime(fmt))
- runTest(i+1)
- print('TEST (%d/%d) COMPLETE' % (i+1, sysvals.multitest['count']))
+ ret = runTest(i+1)
+ pprint('TEST (%d/%d) COMPLETE' % (i+1, sysvals.multitest['count']))
sysvals.logmsg = ''
if not sysvals.skiphtml:
runSummary(sysvals.outdir, False, False)
- sysvals.sudouser(sysvals.outdir)
+ sysvals.sudoUserchown(sysvals.outdir)
else:
if sysvals.outdir:
sysvals.testdir = sysvals.outdir
# run the test in the current directory
- runTest()
+ ret = runTest()
if sysvals.display:
- call('xset -d :0.0 s reset', shell=True)
+ displayControl('reset')
setRuntimeSuspend(False)
+ sys.exit(ret)
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 980bd9d20646..328f62e6ea02 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -2082,7 +2082,7 @@ int has_turbo_ratio_group_limits(int family, int model)
switch (model) {
case INTEL_FAM6_ATOM_GOLDMONT:
case INTEL_FAM6_SKYLAKE_X:
- case INTEL_FAM6_ATOM_DENVERTON:
+ case INTEL_FAM6_ATOM_GOLDMONT_X:
return 1;
}
return 0;
@@ -3149,9 +3149,9 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
pkg_cstate_limits = skx_pkg_cstate_limits;
has_misc_feature_control = 1;
break;
- case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
+ case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
no_MSR_MISC_PWR_MGMT = 1;
- case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
+ case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */
pkg_cstate_limits = slv_pkg_cstate_limits;
break;
case INTEL_FAM6_ATOM_AIRMONT: /* AMT */
@@ -3163,8 +3163,8 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
pkg_cstate_limits = phi_pkg_cstate_limits;
break;
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
pkg_cstate_limits = bxt_pkg_cstate_limits;
break;
default:
@@ -3193,9 +3193,9 @@ int has_slv_msrs(unsigned int family, unsigned int model)
return 0;
switch (model) {
- case INTEL_FAM6_ATOM_SILVERMONT1:
- case INTEL_FAM6_ATOM_MERRIFIELD:
- case INTEL_FAM6_ATOM_MOOREFIELD:
+ case INTEL_FAM6_ATOM_SILVERMONT:
+ case INTEL_FAM6_ATOM_SILVERMONT_MID:
+ case INTEL_FAM6_ATOM_AIRMONT_MID:
return 1;
}
return 0;
@@ -3207,7 +3207,7 @@ int is_dnv(unsigned int family, unsigned int model)
return 0;
switch (model) {
- case INTEL_FAM6_ATOM_DENVERTON:
+ case INTEL_FAM6_ATOM_GOLDMONT_X:
return 1;
}
return 0;
@@ -3724,8 +3724,8 @@ double get_tdp(unsigned int model)
return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
switch (model) {
- case INTEL_FAM6_ATOM_SILVERMONT1:
- case INTEL_FAM6_ATOM_SILVERMONT2:
+ case INTEL_FAM6_ATOM_SILVERMONT:
+ case INTEL_FAM6_ATOM_SILVERMONT_X:
return 30.0;
default:
return 135.0;
@@ -3791,7 +3791,7 @@ void rapl_probe(unsigned int family, unsigned int model)
}
break;
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO;
if (rapl_joules)
BIC_PRESENT(BIC_Pkg_J);
@@ -3850,8 +3850,8 @@ void rapl_probe(unsigned int family, unsigned int model)
BIC_PRESENT(BIC_RAMWatt);
}
break;
- case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
- case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
+ case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
+ case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */
do_rapl = RAPL_PKG | RAPL_CORES;
if (rapl_joules) {
BIC_PRESENT(BIC_Pkg_J);
@@ -3861,7 +3861,7 @@ void rapl_probe(unsigned int family, unsigned int model)
BIC_PRESENT(BIC_CorWatt);
}
break;
- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
+ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS;
BIC_PRESENT(BIC_PKG__);
BIC_PRESENT(BIC_RAM__);
@@ -3884,7 +3884,7 @@ void rapl_probe(unsigned int family, unsigned int model)
return;
rapl_power_units = 1.0 / (1 << (msr & 0xF));
- if (model == INTEL_FAM6_ATOM_SILVERMONT1)
+ if (model == INTEL_FAM6_ATOM_SILVERMONT)
rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
else
rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
@@ -4141,8 +4141,8 @@ int has_snb_msrs(unsigned int family, unsigned int model)
case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
case INTEL_FAM6_SKYLAKE_X: /* SKX */
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
return 1;
}
return 0;
@@ -4174,7 +4174,7 @@ int has_hsw_msrs(unsigned int family, unsigned int model)
case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
return 1;
}
return 0;
@@ -4209,8 +4209,8 @@ int is_slm(unsigned int family, unsigned int model)
if (!genuine_intel)
return 0;
switch (model) {
- case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
- case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
+ case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
+ case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */
return 1;
}
return 0;
@@ -4581,11 +4581,11 @@ void process_cpuid()
case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
crystal_hz = 24000000; /* 24.0 MHz */
break;
- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
+ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
crystal_hz = 25000000; /* 25.0 MHz */
break;
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
crystal_hz = 19200000; /* 19.2 MHz */
break;
default:
diff --git a/tools/spi/spidev_test.c b/tools/spi/spidev_test.c
index 8c590cd1171a..4c12e6aea5d5 100644
--- a/tools/spi/spidev_test.c
+++ b/tools/spi/spidev_test.c
@@ -73,12 +73,12 @@ static void hex_dump(const void *src, size_t length, size_t line_size,
while (i++ % line_size)
printf("__ ");
}
- printf(" | "); /* right close */
+ printf(" |");
while (line < address) {
c = *line++;
- printf("%c", (c < 33 || c == 255) ? 0x2E : c);
+ printf("%c", (c < 32 || c > 126) ? '.' : c);
}
- printf("\n");
+ printf("|\n");
if (length > 0)
printf("%s | ", prefix);
}
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 0392153a0009..778ceb651000 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -22,6 +22,7 @@ NVDIMM_SRC := $(DRIVERS)/nvdimm
ACPI_SRC := $(DRIVERS)/acpi/nfit
DAX_SRC := $(DRIVERS)/dax
ccflags-y := -I$(src)/$(NVDIMM_SRC)/
+ccflags-y += -I$(src)/$(ACPI_SRC)/
obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
diff --git a/tools/testing/nvdimm/acpi_nfit_test.c b/tools/testing/nvdimm/acpi_nfit_test.c
index 43521512e577..fec8fb1b7715 100644
--- a/tools/testing/nvdimm/acpi_nfit_test.c
+++ b/tools/testing/nvdimm/acpi_nfit_test.c
@@ -4,5 +4,13 @@
#include <linux/module.h>
#include <linux/printk.h>
#include "watermark.h"
+#include <nfit.h>
nfit_test_watermark(acpi_nfit);
+
+/* strong / override definition of nfit_intel_shutdown_status */
+void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
+{
+ set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
+ nfit_mem->dirty_shutdown = 42;
+}
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index cffc2c5a778d..9527d47a1070 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -24,6 +24,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <nd-core.h>
+#include <intel.h>
#include <nfit.h>
#include <nd.h>
#include "nfit_test.h"
@@ -148,6 +149,7 @@ static const struct nd_intel_smart smart_def = {
| ND_INTEL_SMART_ALARM_VALID
| ND_INTEL_SMART_USED_VALID
| ND_INTEL_SMART_SHUTDOWN_VALID
+ | ND_INTEL_SMART_SHUTDOWN_COUNT_VALID
| ND_INTEL_SMART_MTEMP_VALID
| ND_INTEL_SMART_CTEMP_VALID,
.health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
@@ -160,8 +162,8 @@ static const struct nd_intel_smart smart_def = {
.ait_status = 1,
.life_used = 5,
.shutdown_state = 0,
+ .shutdown_count = 42,
.vendor_size = 0,
- .shutdown_count = 100,
};
struct nfit_test_fw {
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index 33752e06ff8d..ade14fe3837e 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -117,30 +117,6 @@ struct nd_cmd_ars_err_inj_stat {
#define ND_INTEL_SMART_INJECT_FATAL (1 << 2)
#define ND_INTEL_SMART_INJECT_SHUTDOWN (1 << 3)
-struct nd_intel_smart {
- __u32 status;
- union {
- struct {
- __u32 flags;
- __u8 reserved0[4];
- __u8 health;
- __u8 spares;
- __u8 life_used;
- __u8 alarm_flags;
- __u16 media_temperature;
- __u16 ctrl_temperature;
- __u32 shutdown_count;
- __u8 ait_status;
- __u16 pmic_temperature;
- __u8 reserved1[8];
- __u8 shutdown_state;
- __u32 vendor_size;
- __u8 vendor_data[92];
- } __packed;
- __u8 data[128];
- };
-} __packed;
-
struct nd_intel_smart_threshold {
__u32 status;
union {
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
index 72c25a3cb658..d9a725478375 100644
--- a/tools/testing/selftests/android/Makefile
+++ b/tools/testing/selftests/android/Makefile
@@ -6,7 +6,7 @@ TEST_PROGS := run.sh
include ../lib.mk
-all:
+all: khdr
@for DIR in $(SUBDIRS); do \
BUILD_TARGET=$(OUTPUT)/$$DIR; \
mkdir $$BUILD_TARGET -p; \
diff --git a/tools/testing/selftests/android/ion/config b/tools/testing/selftests/android/config
index b4ad748a9dd9..b4ad748a9dd9 100644
--- a/tools/testing/selftests/android/ion/config
+++ b/tools/testing/selftests/android/config
diff --git a/tools/testing/selftests/android/ion/Makefile b/tools/testing/selftests/android/ion/Makefile
index e03695287f76..88cfe88e466f 100644
--- a/tools/testing/selftests/android/ion/Makefile
+++ b/tools/testing/selftests/android/ion/Makefile
@@ -10,6 +10,8 @@ $(TEST_GEN_FILES): ipcsocket.c ionutils.c
TEST_PROGS := ion_test.sh
+KSFT_KHDR_INSTALL := 1
+top_srcdir = ../../../../..
include ../../lib.mk
$(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 49938d72cf63..1b799e30c06d 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -19,3 +19,11 @@ test_btf
test_sockmap
test_lirc_mode2_user
get_cgroup_id_user
+test_skb_cgroup_id_user
+test_socket_cookie
+test_cgroup_storage
+test_select_reuseport
+test_flow_dissector
+flow_dissector_load
+test_netcnt
+test_section_names
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index fff7fb1285fc..e39dfb4e7970 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -23,7 +23,8 @@ $(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \
- test_socket_cookie test_cgroup_storage test_select_reuseport
+ test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \
+ test_netcnt
TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
@@ -35,7 +36,8 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test
test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \
test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \
get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
- test_skb_cgroup_id_kern.o
+ test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o \
+ test_sk_lookup_kern.o test_xdp_vlan.o test_queue_map.o test_stack_map.o
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
@@ -47,10 +49,15 @@ TEST_PROGS := test_kmod.sh \
test_tunnel.sh \
test_lwt_seg6local.sh \
test_lirc_mode2.sh \
- test_skb_cgroup_id.sh
+ test_skb_cgroup_id.sh \
+ test_flow_dissector.sh \
+ test_xdp_vlan.sh
+
+TEST_PROGS_EXTENDED := with_addr.sh
# Compile but not part of 'make run_tests'
-TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user
+TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
+ flow_dissector_load test_flow_dissector
include ../lib.mk
@@ -70,6 +77,7 @@ $(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c
$(OUTPUT)/test_progs: trace_helpers.c
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
+$(OUTPUT)/test_netcnt: cgroup_helpers.c
.PHONY: force
@@ -110,6 +118,9 @@ CLANG_FLAGS = -I. -I./include/uapi -I../../../include/uapi \
$(OUTPUT)/test_l4lb_noinline.o: CLANG_FLAGS += -fno-inline
$(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
+$(OUTPUT)/test_queue_map.o: test_queue_stack_map.h
+$(OUTPUT)/test_stack_map.o: test_queue_stack_map.h
+
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
diff --git a/tools/testing/selftests/bpf/bpf_flow.c b/tools/testing/selftests/bpf/bpf_flow.c
new file mode 100644
index 000000000000..107350a7821d
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_flow.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <limits.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/pkt_cls.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/icmp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_packet.h>
+#include <sys/socket.h>
+#include <linux/if_tunnel.h>
+#include <linux/mpls.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+int _version SEC("version") = 1;
+#define PROG(F) SEC(#F) int bpf_func_##F
+
+/* These are the identifiers of the BPF programs that will be used in tail
+ * calls. Name is limited to 16 characters, with the terminating character and
+ * bpf_func_ above, we have only 6 to work with, anything after will be cropped.
+ */
+enum {
+ IP,
+ IPV6,
+ IPV6OP, /* Destination/Hop-by-Hop Options IPv6 Extension header */
+ IPV6FR, /* Fragmentation IPv6 Extension Header */
+ MPLS,
+ VLAN,
+};
+
+#define IP_MF 0x2000
+#define IP_OFFSET 0x1FFF
+#define IP6_MF 0x0001
+#define IP6_OFFSET 0xFFF8
+
+struct vlan_hdr {
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+
+struct gre_hdr {
+ __be16 flags;
+ __be16 proto;
+};
+
+struct frag_hdr {
+ __u8 nexthdr;
+ __u8 reserved;
+ __be16 frag_off;
+ __be32 identification;
+};
+
+struct bpf_map_def SEC("maps") jmp_table = {
+ .type = BPF_MAP_TYPE_PROG_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 8
+};
+
+static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb,
+ __u16 hdr_size,
+ void *buffer)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ __u16 nhoff = skb->flow_keys->nhoff;
+ __u8 *hdr;
+
+ /* Verifies this variable offset does not overflow */
+ if (nhoff > (USHRT_MAX - hdr_size))
+ return NULL;
+
+ hdr = data + nhoff;
+ if (hdr + hdr_size <= data_end)
+ return hdr;
+
+ if (bpf_skb_load_bytes(skb, nhoff, buffer, hdr_size))
+ return NULL;
+
+ return buffer;
+}
+
+/* Dispatches on ETHERTYPE */
+static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
+{
+ struct bpf_flow_keys *keys = skb->flow_keys;
+
+ keys->n_proto = proto;
+ switch (proto) {
+ case bpf_htons(ETH_P_IP):
+ bpf_tail_call(skb, &jmp_table, IP);
+ break;
+ case bpf_htons(ETH_P_IPV6):
+ bpf_tail_call(skb, &jmp_table, IPV6);
+ break;
+ case bpf_htons(ETH_P_MPLS_MC):
+ case bpf_htons(ETH_P_MPLS_UC):
+ bpf_tail_call(skb, &jmp_table, MPLS);
+ break;
+ case bpf_htons(ETH_P_8021Q):
+ case bpf_htons(ETH_P_8021AD):
+ bpf_tail_call(skb, &jmp_table, VLAN);
+ break;
+ default:
+ /* Protocol not supported */
+ return BPF_DROP;
+ }
+
+ return BPF_DROP;
+}
+
+SEC("dissect")
+int _dissect(struct __sk_buff *skb)
+{
+ if (!skb->vlan_present)
+ return parse_eth_proto(skb, skb->protocol);
+ else
+ return parse_eth_proto(skb, skb->vlan_proto);
+}
+
+/* Parses on IPPROTO_* */
+static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
+{
+ struct bpf_flow_keys *keys = skb->flow_keys;
+ void *data_end = (void *)(long)skb->data_end;
+ struct icmphdr *icmp, _icmp;
+ struct gre_hdr *gre, _gre;
+ struct ethhdr *eth, _eth;
+ struct tcphdr *tcp, _tcp;
+ struct udphdr *udp, _udp;
+
+ keys->ip_proto = proto;
+ switch (proto) {
+ case IPPROTO_ICMP:
+ icmp = bpf_flow_dissect_get_header(skb, sizeof(*icmp), &_icmp);
+ if (!icmp)
+ return BPF_DROP;
+ return BPF_OK;
+ case IPPROTO_IPIP:
+ keys->is_encap = true;
+ return parse_eth_proto(skb, bpf_htons(ETH_P_IP));
+ case IPPROTO_IPV6:
+ keys->is_encap = true;
+ return parse_eth_proto(skb, bpf_htons(ETH_P_IPV6));
+ case IPPROTO_GRE:
+ gre = bpf_flow_dissect_get_header(skb, sizeof(*gre), &_gre);
+ if (!gre)
+ return BPF_DROP;
+
+ if (bpf_htons(gre->flags & GRE_VERSION))
+ /* Only inspect standard GRE packets with version 0 */
+ return BPF_OK;
+
+ keys->nhoff += sizeof(*gre); /* Step over GRE Flags and Proto */
+ if (GRE_IS_CSUM(gre->flags))
+ keys->nhoff += 4; /* Step over chksum and Padding */
+ if (GRE_IS_KEY(gre->flags))
+ keys->nhoff += 4; /* Step over key */
+ if (GRE_IS_SEQ(gre->flags))
+ keys->nhoff += 4; /* Step over sequence number */
+
+ keys->is_encap = true;
+
+ if (gre->proto == bpf_htons(ETH_P_TEB)) {
+ eth = bpf_flow_dissect_get_header(skb, sizeof(*eth),
+ &_eth);
+ if (!eth)
+ return BPF_DROP;
+
+ keys->nhoff += sizeof(*eth);
+
+ return parse_eth_proto(skb, eth->h_proto);
+ } else {
+ return parse_eth_proto(skb, gre->proto);
+ }
+ case IPPROTO_TCP:
+ tcp = bpf_flow_dissect_get_header(skb, sizeof(*tcp), &_tcp);
+ if (!tcp)
+ return BPF_DROP;
+
+ if (tcp->doff < 5)
+ return BPF_DROP;
+
+ if ((__u8 *)tcp + (tcp->doff << 2) > data_end)
+ return BPF_DROP;
+
+ keys->thoff = keys->nhoff;
+ keys->sport = tcp->source;
+ keys->dport = tcp->dest;
+ return BPF_OK;
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ udp = bpf_flow_dissect_get_header(skb, sizeof(*udp), &_udp);
+ if (!udp)
+ return BPF_DROP;
+
+ keys->thoff = keys->nhoff;
+ keys->sport = udp->source;
+ keys->dport = udp->dest;
+ return BPF_OK;
+ default:
+ return BPF_DROP;
+ }
+
+ return BPF_DROP;
+}
+
+static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr)
+{
+ struct bpf_flow_keys *keys = skb->flow_keys;
+
+ keys->ip_proto = nexthdr;
+ switch (nexthdr) {
+ case IPPROTO_HOPOPTS:
+ case IPPROTO_DSTOPTS:
+ bpf_tail_call(skb, &jmp_table, IPV6OP);
+ break;
+ case IPPROTO_FRAGMENT:
+ bpf_tail_call(skb, &jmp_table, IPV6FR);
+ break;
+ default:
+ return parse_ip_proto(skb, nexthdr);
+ }
+
+ return BPF_DROP;
+}
+
+PROG(IP)(struct __sk_buff *skb)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ struct bpf_flow_keys *keys = skb->flow_keys;
+ void *data = (void *)(long)skb->data;
+ struct iphdr *iph, _iph;
+ bool done = false;
+
+ iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph);
+ if (!iph)
+ return BPF_DROP;
+
+ /* IP header cannot be smaller than 20 bytes */
+ if (iph->ihl < 5)
+ return BPF_DROP;
+
+ keys->addr_proto = ETH_P_IP;
+ keys->ipv4_src = iph->saddr;
+ keys->ipv4_dst = iph->daddr;
+
+ keys->nhoff += iph->ihl << 2;
+ if (data + keys->nhoff > data_end)
+ return BPF_DROP;
+
+ if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) {
+ keys->is_frag = true;
+ if (iph->frag_off & bpf_htons(IP_OFFSET))
+ /* From second fragment on, packets do not have headers
+ * we can parse.
+ */
+ done = true;
+ else
+ keys->is_first_frag = true;
+ }
+
+ if (done)
+ return BPF_OK;
+
+ return parse_ip_proto(skb, iph->protocol);
+}
+
+PROG(IPV6)(struct __sk_buff *skb)
+{
+ struct bpf_flow_keys *keys = skb->flow_keys;
+ struct ipv6hdr *ip6h, _ip6h;
+
+ ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h);
+ if (!ip6h)
+ return BPF_DROP;
+
+ keys->addr_proto = ETH_P_IPV6;
+ memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr));
+
+ keys->nhoff += sizeof(struct ipv6hdr);
+
+ return parse_ipv6_proto(skb, ip6h->nexthdr);
+}
+
+PROG(IPV6OP)(struct __sk_buff *skb)
+{
+ struct ipv6_opt_hdr *ip6h, _ip6h;
+
+ ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h);
+ if (!ip6h)
+ return BPF_DROP;
+
+ /* hlen is in 8-octets and does not include the first 8 bytes
+ * of the header
+ */
+ skb->flow_keys->nhoff += (1 + ip6h->hdrlen) << 3;
+
+ return parse_ipv6_proto(skb, ip6h->nexthdr);
+}
+
+PROG(IPV6FR)(struct __sk_buff *skb)
+{
+ struct bpf_flow_keys *keys = skb->flow_keys;
+ struct frag_hdr *fragh, _fragh;
+
+ fragh = bpf_flow_dissect_get_header(skb, sizeof(*fragh), &_fragh);
+ if (!fragh)
+ return BPF_DROP;
+
+ keys->nhoff += sizeof(*fragh);
+ keys->is_frag = true;
+ if (!(fragh->frag_off & bpf_htons(IP6_OFFSET)))
+ keys->is_first_frag = true;
+
+ return parse_ipv6_proto(skb, fragh->nexthdr);
+}
+
+PROG(MPLS)(struct __sk_buff *skb)
+{
+ struct mpls_label *mpls, _mpls;
+
+ mpls = bpf_flow_dissect_get_header(skb, sizeof(*mpls), &_mpls);
+ if (!mpls)
+ return BPF_DROP;
+
+ return BPF_OK;
+}
+
+PROG(VLAN)(struct __sk_buff *skb)
+{
+ struct bpf_flow_keys *keys = skb->flow_keys;
+ struct vlan_hdr *vlan, _vlan;
+ __be16 proto;
+
+ /* Peek back to see if single or double-tagging */
+ if (bpf_skb_load_bytes(skb, keys->nhoff - sizeof(proto), &proto,
+ sizeof(proto)))
+ return BPF_DROP;
+
+ /* Account for double-tagging */
+ if (proto == bpf_htons(ETH_P_8021AD)) {
+ vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
+ if (!vlan)
+ return BPF_DROP;
+
+ if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
+ return BPF_DROP;
+
+ keys->nhoff += sizeof(*vlan);
+ }
+
+ vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
+ if (!vlan)
+ return BPF_DROP;
+
+ keys->nhoff += sizeof(*vlan);
+ /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
+ if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
+ vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
+ return BPF_DROP;
+
+ return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index e4be7730222d..686e57ce40f4 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -16,6 +16,13 @@ static int (*bpf_map_update_elem)(void *map, void *key, void *value,
(void *) BPF_FUNC_map_update_elem;
static int (*bpf_map_delete_elem)(void *map, void *key) =
(void *) BPF_FUNC_map_delete_elem;
+static int (*bpf_map_push_elem)(void *map, void *value,
+ unsigned long long flags) =
+ (void *) BPF_FUNC_map_push_elem;
+static int (*bpf_map_pop_elem)(void *map, void *value) =
+ (void *) BPF_FUNC_map_pop_elem;
+static int (*bpf_map_peek_elem)(void *map, void *value) =
+ (void *) BPF_FUNC_map_peek_elem;
static int (*bpf_probe_read)(void *dst, int size, void *unsafe_ptr) =
(void *) BPF_FUNC_probe_read;
static unsigned long long (*bpf_ktime_get_ns)(void) =
@@ -104,6 +111,8 @@ static int (*bpf_msg_cork_bytes)(void *ctx, int len) =
(void *) BPF_FUNC_msg_cork_bytes;
static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) =
(void *) BPF_FUNC_msg_pull_data;
+static int (*bpf_msg_push_data)(void *ctx, int start, int end, int flags) =
+ (void *) BPF_FUNC_msg_push_data;
static int (*bpf_bind)(void *ctx, void *addr, int addr_len) =
(void *) BPF_FUNC_bind;
static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
@@ -143,6 +152,22 @@ static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) =
(void *) BPF_FUNC_skb_cgroup_id;
static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
(void *) BPF_FUNC_skb_ancestor_cgroup_id;
+static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
+ struct bpf_sock_tuple *tuple,
+ int size, unsigned int netns_id,
+ unsigned long long flags) =
+ (void *) BPF_FUNC_sk_lookup_tcp;
+static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
+ struct bpf_sock_tuple *tuple,
+ int size, unsigned int netns_id,
+ unsigned long long flags) =
+ (void *) BPF_FUNC_sk_lookup_udp;
+static int (*bpf_sk_release)(struct bpf_sock *sk) =
+ (void *) BPF_FUNC_sk_release;
+static int (*bpf_skb_vlan_push)(void *ctx, __be16 vlan_proto, __u16 vlan_tci) =
+ (void *) BPF_FUNC_skb_vlan_push;
+static int (*bpf_skb_vlan_pop)(void *ctx) =
+ (void *) BPF_FUNC_skb_vlan_pop;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index b4994a94968b..dd49df5e2df4 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -18,3 +18,5 @@ CONFIG_CRYPTO_HMAC=m
CONFIG_CRYPTO_SHA256=m
CONFIG_VXLAN=y
CONFIG_GENEVE=y
+CONFIG_NET_CLS_FLOWER=m
+CONFIG_LWTUNNEL=y
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
new file mode 100644
index 000000000000..d3273b5b3173
--- /dev/null
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <error.h>
+#include <errno.h>
+#include <getopt.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
+const char *cfg_map_name = "jmp_table";
+bool cfg_attach = true;
+char *cfg_section_name;
+char *cfg_path_name;
+
+static void load_and_attach_program(void)
+{
+ struct bpf_program *prog, *main_prog;
+ struct bpf_map *prog_array;
+ int i, fd, prog_fd, ret;
+ struct bpf_object *obj;
+ int prog_array_fd;
+
+ ret = bpf_prog_load(cfg_path_name, BPF_PROG_TYPE_FLOW_DISSECTOR, &obj,
+ &prog_fd);
+ if (ret)
+ error(1, 0, "bpf_prog_load %s", cfg_path_name);
+
+ main_prog = bpf_object__find_program_by_title(obj, cfg_section_name);
+ if (!main_prog)
+ error(1, 0, "bpf_object__find_program_by_title %s",
+ cfg_section_name);
+
+ prog_fd = bpf_program__fd(main_prog);
+ if (prog_fd < 0)
+ error(1, 0, "bpf_program__fd");
+
+ prog_array = bpf_object__find_map_by_name(obj, cfg_map_name);
+ if (!prog_array)
+ error(1, 0, "bpf_object__find_map_by_name %s", cfg_map_name);
+
+ prog_array_fd = bpf_map__fd(prog_array);
+ if (prog_array_fd < 0)
+ error(1, 0, "bpf_map__fd %s", cfg_map_name);
+
+ i = 0;
+ bpf_object__for_each_program(prog, obj) {
+ fd = bpf_program__fd(prog);
+ if (fd < 0)
+ error(1, 0, "bpf_program__fd");
+
+ if (fd != prog_fd) {
+ printf("%d: %s\n", i, bpf_program__title(prog, false));
+ bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
+ ++i;
+ }
+ }
+
+ ret = bpf_prog_attach(prog_fd, 0 /* Ignore */, BPF_FLOW_DISSECTOR, 0);
+ if (ret)
+ error(1, 0, "bpf_prog_attach %s", cfg_path_name);
+
+ ret = bpf_object__pin(obj, cfg_pin_path);
+ if (ret)
+ error(1, 0, "bpf_object__pin %s", cfg_pin_path);
+
+}
+
+static void detach_program(void)
+{
+ char command[64];
+ int ret;
+
+ ret = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
+ if (ret)
+ error(1, 0, "bpf_prog_detach");
+
+ /* To unpin, it is necessary and sufficient to just remove this dir */
+ sprintf(command, "rm -r %s", cfg_pin_path);
+ ret = system(command);
+ if (ret)
+ error(1, errno, command);
+}
+
+static void parse_opts(int argc, char **argv)
+{
+ bool attach = false;
+ bool detach = false;
+ int c;
+
+ while ((c = getopt(argc, argv, "adp:s:")) != -1) {
+ switch (c) {
+ case 'a':
+ if (detach)
+ error(1, 0, "attach/detach are exclusive");
+ attach = true;
+ break;
+ case 'd':
+ if (attach)
+ error(1, 0, "attach/detach are exclusive");
+ detach = true;
+ break;
+ case 'p':
+ if (cfg_path_name)
+ error(1, 0, "only one prog name can be given");
+
+ cfg_path_name = optarg;
+ break;
+ case 's':
+ if (cfg_section_name)
+ error(1, 0, "only one section can be given");
+
+ cfg_section_name = optarg;
+ break;
+ }
+ }
+
+ if (detach)
+ cfg_attach = false;
+
+ if (cfg_attach && !cfg_path_name)
+ error(1, 0, "must provide a path to the BPF program");
+
+ if (cfg_attach && !cfg_section_name)
+ error(1, 0, "must provide a section name");
+}
+
+int main(int argc, char **argv)
+{
+ parse_opts(argc, argv);
+ if (cfg_attach)
+ load_and_attach_program();
+ else
+ detach_program();
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/netcnt_common.h b/tools/testing/selftests/bpf/netcnt_common.h
new file mode 100644
index 000000000000..81084c1c2c23
--- /dev/null
+++ b/tools/testing/selftests/bpf/netcnt_common.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __NETCNT_COMMON_H
+#define __NETCNT_COMMON_H
+
+#include <linux/types.h>
+
+#define MAX_PERCPU_PACKETS 32
+
+struct percpu_net_cnt {
+ __u64 packets;
+ __u64 bytes;
+
+ __u64 prev_ts;
+
+ __u64 prev_packets;
+ __u64 prev_bytes;
+};
+
+struct net_cnt {
+ __u64 packets;
+ __u64 bytes;
+};
+
+#endif
diff --git a/tools/testing/selftests/bpf/netcnt_prog.c b/tools/testing/selftests/bpf/netcnt_prog.c
new file mode 100644
index 000000000000..1198abca1360
--- /dev/null
+++ b/tools/testing/selftests/bpf/netcnt_prog.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <linux/version.h>
+
+#include "bpf_helpers.h"
+#include "netcnt_common.h"
+
+#define MAX_BPS (3 * 1024 * 1024)
+
+#define REFRESH_TIME_NS 100000000
+#define NS_PER_SEC 1000000000
+
+struct bpf_map_def SEC("maps") percpu_netcnt = {
+ .type = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+ .key_size = sizeof(struct bpf_cgroup_storage_key),
+ .value_size = sizeof(struct percpu_net_cnt),
+};
+
+struct bpf_map_def SEC("maps") netcnt = {
+ .type = BPF_MAP_TYPE_CGROUP_STORAGE,
+ .key_size = sizeof(struct bpf_cgroup_storage_key),
+ .value_size = sizeof(struct net_cnt),
+};
+
+SEC("cgroup/skb")
+int bpf_nextcnt(struct __sk_buff *skb)
+{
+ struct percpu_net_cnt *percpu_cnt;
+ char fmt[] = "%d %llu %llu\n";
+ struct net_cnt *cnt;
+ __u64 ts, dt;
+ int ret;
+
+ cnt = bpf_get_local_storage(&netcnt, 0);
+ percpu_cnt = bpf_get_local_storage(&percpu_netcnt, 0);
+
+ percpu_cnt->packets++;
+ percpu_cnt->bytes += skb->len;
+
+ if (percpu_cnt->packets > MAX_PERCPU_PACKETS) {
+ __sync_fetch_and_add(&cnt->packets,
+ percpu_cnt->packets);
+ percpu_cnt->packets = 0;
+
+ __sync_fetch_and_add(&cnt->bytes,
+ percpu_cnt->bytes);
+ percpu_cnt->bytes = 0;
+ }
+
+ ts = bpf_ktime_get_ns();
+ dt = ts - percpu_cnt->prev_ts;
+
+ dt *= MAX_BPS;
+ dt /= NS_PER_SEC;
+
+ if (cnt->bytes + percpu_cnt->bytes - percpu_cnt->prev_bytes < dt)
+ ret = 1;
+ else
+ ret = 0;
+
+ if (dt > REFRESH_TIME_NS) {
+ percpu_cnt->prev_ts = ts;
+ percpu_cnt->prev_packets = cnt->packets;
+ percpu_cnt->prev_bytes = cnt->bytes;
+ }
+
+ return !!ret;
+}
+
+char _license[] SEC("license") = "GPL";
+__u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 6b5cfeb7a9cc..f42b3396d622 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/err.h>
+#include <linux/kernel.h>
#include <bpf/bpf.h>
#include <sys/resource.h>
#include <libelf.h>
@@ -45,7 +46,6 @@ static int count_result(int err)
return err;
}
-#define min(a, b) ((a) < (b) ? (a) : (b))
#define __printf(a, b) __attribute__((format(printf, a, b)))
__printf(1, 2)
@@ -130,6 +130,7 @@ struct btf_raw_test {
bool map_create_err;
bool ordered_map;
bool lossless_map;
+ bool percpu_map;
int hdr_len_delta;
int type_off_delta;
int str_off_delta;
@@ -2157,6 +2158,7 @@ static struct btf_pprint_test_meta {
const char *map_name;
bool ordered_map;
bool lossless_map;
+ bool percpu_map;
} pprint_tests_meta[] = {
{
.descr = "BTF pretty print array",
@@ -2164,6 +2166,7 @@ static struct btf_pprint_test_meta {
.map_name = "pprint_test_array",
.ordered_map = true,
.lossless_map = true,
+ .percpu_map = false,
},
{
@@ -2172,6 +2175,7 @@ static struct btf_pprint_test_meta {
.map_name = "pprint_test_hash",
.ordered_map = false,
.lossless_map = true,
+ .percpu_map = false,
},
{
@@ -2180,30 +2184,83 @@ static struct btf_pprint_test_meta {
.map_name = "pprint_test_lru_hash",
.ordered_map = false,
.lossless_map = false,
+ .percpu_map = false,
+},
+
+{
+ .descr = "BTF pretty print percpu array",
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY,
+ .map_name = "pprint_test_percpu_array",
+ .ordered_map = true,
+ .lossless_map = true,
+ .percpu_map = true,
+},
+
+{
+ .descr = "BTF pretty print percpu hash",
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH,
+ .map_name = "pprint_test_percpu_hash",
+ .ordered_map = false,
+ .lossless_map = true,
+ .percpu_map = true,
+},
+
+{
+ .descr = "BTF pretty print lru percpu hash",
+ .map_type = BPF_MAP_TYPE_LRU_PERCPU_HASH,
+ .map_name = "pprint_test_lru_percpu_hash",
+ .ordered_map = false,
+ .lossless_map = false,
+ .percpu_map = true,
},
};
-static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i)
+static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i,
+ int num_cpus, int rounded_value_size)
{
- v->ui32 = i;
- v->si32 = -i;
- v->unused_bits2a = 3;
- v->bits28 = i;
- v->unused_bits2b = 3;
- v->ui64 = i;
- v->aenum = i & 0x03;
+ int cpu;
+
+ for (cpu = 0; cpu < num_cpus; cpu++) {
+ v->ui32 = i + cpu;
+ v->si32 = -i;
+ v->unused_bits2a = 3;
+ v->bits28 = i;
+ v->unused_bits2b = 3;
+ v->ui64 = i;
+ v->aenum = i & 0x03;
+ v = (void *)v + rounded_value_size;
+ }
}
+static int check_line(const char *expected_line, int nexpected_line,
+ int expected_line_len, const char *line)
+{
+ if (CHECK(nexpected_line == expected_line_len,
+ "expected_line is too long"))
+ return -1;
+
+ if (strcmp(expected_line, line)) {
+ fprintf(stderr, "unexpected pprint output\n");
+ fprintf(stderr, "expected: %s", expected_line);
+ fprintf(stderr, " read: %s", line);
+ return -1;
+ }
+
+ return 0;
+}
+
+
static int do_test_pprint(void)
{
const struct btf_raw_test *test = &pprint_test_template;
struct bpf_create_map_attr create_attr = {};
+ bool ordered_map, lossless_map, percpu_map;
+ int err, ret, num_cpus, rounded_value_size;
+ struct pprint_mapv *mapv = NULL;
unsigned int key, nr_read_elems;
- bool ordered_map, lossless_map;
int map_fd = -1, btf_fd = -1;
- struct pprint_mapv mapv = {};
unsigned int raw_btf_size;
char expected_line[255];
FILE *pin_file = NULL;
@@ -2212,7 +2269,6 @@ static int do_test_pprint(void)
char *line = NULL;
uint8_t *raw_btf;
ssize_t nread;
- int err, ret;
fprintf(stderr, "%s......", test->descr);
raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
@@ -2261,9 +2317,18 @@ static int do_test_pprint(void)
if (CHECK(err, "bpf_obj_pin(%s): errno:%d.", pin_path, errno))
goto done;
+ percpu_map = test->percpu_map;
+ num_cpus = percpu_map ? bpf_num_possible_cpus() : 1;
+ rounded_value_size = round_up(sizeof(struct pprint_mapv), 8);
+ mapv = calloc(num_cpus, rounded_value_size);
+ if (CHECK(!mapv, "mapv allocation failure")) {
+ err = -1;
+ goto done;
+ }
+
for (key = 0; key < test->max_entries; key++) {
- set_pprint_mapv(&mapv, key);
- bpf_map_update_elem(map_fd, &key, &mapv, 0);
+ set_pprint_mapv(mapv, key, num_cpus, rounded_value_size);
+ bpf_map_update_elem(map_fd, &key, mapv, 0);
}
pin_file = fopen(pin_path, "r");
@@ -2286,33 +2351,74 @@ static int do_test_pprint(void)
ordered_map = test->ordered_map;
lossless_map = test->lossless_map;
do {
+ struct pprint_mapv *cmapv;
ssize_t nexpected_line;
unsigned int next_key;
+ int cpu;
next_key = ordered_map ? nr_read_elems : atoi(line);
- set_pprint_mapv(&mapv, next_key);
- nexpected_line = snprintf(expected_line, sizeof(expected_line),
- "%u: {%u,0,%d,0x%x,0x%x,0x%x,{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n",
- next_key,
- mapv.ui32, mapv.si32,
- mapv.unused_bits2a, mapv.bits28, mapv.unused_bits2b,
- mapv.ui64,
- mapv.ui8a[0], mapv.ui8a[1], mapv.ui8a[2], mapv.ui8a[3],
- mapv.ui8a[4], mapv.ui8a[5], mapv.ui8a[6], mapv.ui8a[7],
- pprint_enum_str[mapv.aenum]);
-
- if (CHECK(nexpected_line == sizeof(expected_line),
- "expected_line is too long")) {
- err = -1;
- goto done;
+ set_pprint_mapv(mapv, next_key, num_cpus, rounded_value_size);
+ cmapv = mapv;
+
+ for (cpu = 0; cpu < num_cpus; cpu++) {
+ if (percpu_map) {
+ /* for percpu map, the format looks like:
+ * <key>: {
+ * cpu0: <value_on_cpu0>
+ * cpu1: <value_on_cpu1>
+ * ...
+ * cpun: <value_on_cpun>
+ * }
+ *
+ * let us verify the line containing the key here.
+ */
+ if (cpu == 0) {
+ nexpected_line = snprintf(expected_line,
+ sizeof(expected_line),
+ "%u: {\n",
+ next_key);
+
+ err = check_line(expected_line, nexpected_line,
+ sizeof(expected_line), line);
+ if (err == -1)
+ goto done;
+ }
+
+ /* read value@cpu */
+ nread = getline(&line, &line_len, pin_file);
+ if (nread < 0)
+ break;
+ }
+
+ nexpected_line = snprintf(expected_line, sizeof(expected_line),
+ "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
+ "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n",
+ percpu_map ? "\tcpu" : "",
+ percpu_map ? cpu : next_key,
+ cmapv->ui32, cmapv->si32,
+ cmapv->unused_bits2a,
+ cmapv->bits28,
+ cmapv->unused_bits2b,
+ cmapv->ui64,
+ cmapv->ui8a[0], cmapv->ui8a[1],
+ cmapv->ui8a[2], cmapv->ui8a[3],
+ cmapv->ui8a[4], cmapv->ui8a[5],
+ cmapv->ui8a[6], cmapv->ui8a[7],
+ pprint_enum_str[cmapv->aenum]);
+
+ err = check_line(expected_line, nexpected_line,
+ sizeof(expected_line), line);
+ if (err == -1)
+ goto done;
+
+ cmapv = (void *)cmapv + rounded_value_size;
}
- if (strcmp(expected_line, line)) {
- err = -1;
- fprintf(stderr, "unexpected pprint output\n");
- fprintf(stderr, "expected: %s", expected_line);
- fprintf(stderr, " read: %s", line);
- goto done;
+ if (percpu_map) {
+ /* skip the last bracket for the percpu map */
+ nread = getline(&line, &line_len, pin_file);
+ if (nread < 0)
+ break;
}
nread = getline(&line, &line_len, pin_file);
@@ -2334,6 +2440,8 @@ static int do_test_pprint(void)
err = 0;
done:
+ if (mapv)
+ free(mapv);
if (!err)
fprintf(stderr, "OK");
if (*btf_log_buf && (err || args.always_log))
@@ -2361,6 +2469,7 @@ static int test_pprint(void)
pprint_test_template.map_name = pprint_tests_meta[i].map_name;
pprint_test_template.ordered_map = pprint_tests_meta[i].ordered_map;
pprint_test_template.lossless_map = pprint_tests_meta[i].lossless_map;
+ pprint_test_template.percpu_map = pprint_tests_meta[i].percpu_map;
err |= count_result(do_test_pprint());
}
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index 4e196e3bfecf..f44834155f25 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -4,6 +4,7 @@
#include <linux/filter.h>
#include <stdio.h>
#include <stdlib.h>
+#include <sys/sysinfo.h>
#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
@@ -15,6 +16,14 @@ char bpf_log_buf[BPF_LOG_BUF_SIZE];
int main(int argc, char **argv)
{
struct bpf_insn prog[] = {
+ BPF_LD_MAP_FD(BPF_REG_1, 0), /* percpu map fd */
+ BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
+ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
+
BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
@@ -28,9 +37,18 @@ int main(int argc, char **argv)
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
int error = EXIT_FAILURE;
- int map_fd, prog_fd, cgroup_fd;
+ int map_fd, percpu_map_fd, prog_fd, cgroup_fd;
struct bpf_cgroup_storage_key key;
unsigned long long value;
+ unsigned long long *percpu_value;
+ int cpu, nproc;
+
+ nproc = get_nprocs_conf();
+ percpu_value = malloc(sizeof(*percpu_value) * nproc);
+ if (!percpu_value) {
+ printf("Not enough memory for per-cpu area (%d cpus)\n", nproc);
+ goto err;
+ }
map_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, sizeof(key),
sizeof(value), 0, 0);
@@ -39,7 +57,15 @@ int main(int argc, char **argv)
goto out;
}
- prog[0].imm = map_fd;
+ percpu_map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+ sizeof(key), sizeof(value), 0, 0);
+ if (percpu_map_fd < 0) {
+ printf("Failed to create map: %s\n", strerror(errno));
+ goto out;
+ }
+
+ prog[0].imm = percpu_map_fd;
+ prog[7].imm = map_fd;
prog_fd = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
@@ -77,7 +103,15 @@ int main(int argc, char **argv)
}
if (bpf_map_lookup_elem(map_fd, &key, &value)) {
- printf("Failed to lookup cgroup storage\n");
+ printf("Failed to lookup cgroup storage 0\n");
+ goto err;
+ }
+
+ for (cpu = 0; cpu < nproc; cpu++)
+ percpu_value[cpu] = 1000;
+
+ if (bpf_map_update_elem(percpu_map_fd, &key, percpu_value, 0)) {
+ printf("Failed to update the data in the cgroup storage\n");
goto err;
}
@@ -120,11 +154,31 @@ int main(int argc, char **argv)
goto err;
}
+ /* Check the final value of the counter in the percpu local storage */
+
+ for (cpu = 0; cpu < nproc; cpu++)
+ percpu_value[cpu] = 0;
+
+ if (bpf_map_lookup_elem(percpu_map_fd, &key, percpu_value)) {
+ printf("Failed to lookup the per-cpu cgroup storage\n");
+ goto err;
+ }
+
+ value = 0;
+ for (cpu = 0; cpu < nproc; cpu++)
+ value += percpu_value[cpu];
+
+ if (value != nproc * 1000 + 6) {
+ printf("Unexpected data in the per-cpu cgroup storage\n");
+ goto err;
+ }
+
error = 0;
printf("test_cgroup_storage:PASS\n");
err:
cleanup_cgroup_environment();
+ free(percpu_value);
out:
return error;
diff --git a/tools/testing/selftests/bpf/test_flow_dissector.c b/tools/testing/selftests/bpf/test_flow_dissector.c
new file mode 100644
index 000000000000..12b784afba31
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_flow_dissector.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Inject packets with all sorts of encapsulation into the kernel.
+ *
+ * IPv4/IPv6 outer layer 3
+ * GRE/GUE/BARE outer layer 4, where bare is IPIP/SIT/IPv4-in-IPv6/..
+ * IPv4/IPv6 inner layer 3
+ */
+
+#define _GNU_SOURCE
+
+#include <stddef.h>
+#include <arpa/inet.h>
+#include <asm/byteorder.h>
+#include <error.h>
+#include <errno.h>
+#include <linux/if_packet.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ipv6.h>
+#include <netinet/ip.h>
+#include <netinet/in.h>
+#include <netinet/udp.h>
+#include <poll.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#define CFG_PORT_INNER 8000
+
+/* Add some protocol definitions that do not exist in userspace */
+
+struct grehdr {
+ uint16_t unused;
+ uint16_t protocol;
+} __attribute__((packed));
+
+struct guehdr {
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 hlen:5,
+ control:1,
+ version:2;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u8 version:2,
+ control:1,
+ hlen:5;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 proto_ctype;
+ __be16 flags;
+ };
+ __be32 word;
+ };
+};
+
+static uint8_t cfg_dsfield_inner;
+static uint8_t cfg_dsfield_outer;
+static uint8_t cfg_encap_proto;
+static bool cfg_expect_failure = false;
+static int cfg_l3_extra = AF_UNSPEC; /* optional SIT prefix */
+static int cfg_l3_inner = AF_UNSPEC;
+static int cfg_l3_outer = AF_UNSPEC;
+static int cfg_num_pkt = 10;
+static int cfg_num_secs = 0;
+static char cfg_payload_char = 'a';
+static int cfg_payload_len = 100;
+static int cfg_port_gue = 6080;
+static bool cfg_only_rx;
+static bool cfg_only_tx;
+static int cfg_src_port = 9;
+
+static char buf[ETH_DATA_LEN];
+
+#define INIT_ADDR4(name, addr4, port) \
+ static struct sockaddr_in name = { \
+ .sin_family = AF_INET, \
+ .sin_port = __constant_htons(port), \
+ .sin_addr.s_addr = __constant_htonl(addr4), \
+ };
+
+#define INIT_ADDR6(name, addr6, port) \
+ static struct sockaddr_in6 name = { \
+ .sin6_family = AF_INET6, \
+ .sin6_port = __constant_htons(port), \
+ .sin6_addr = addr6, \
+ };
+
+INIT_ADDR4(in_daddr4, INADDR_LOOPBACK, CFG_PORT_INNER)
+INIT_ADDR4(in_saddr4, INADDR_LOOPBACK + 2, 0)
+INIT_ADDR4(out_daddr4, INADDR_LOOPBACK, 0)
+INIT_ADDR4(out_saddr4, INADDR_LOOPBACK + 1, 0)
+INIT_ADDR4(extra_daddr4, INADDR_LOOPBACK, 0)
+INIT_ADDR4(extra_saddr4, INADDR_LOOPBACK + 1, 0)
+
+INIT_ADDR6(in_daddr6, IN6ADDR_LOOPBACK_INIT, CFG_PORT_INNER)
+INIT_ADDR6(in_saddr6, IN6ADDR_LOOPBACK_INIT, 0)
+INIT_ADDR6(out_daddr6, IN6ADDR_LOOPBACK_INIT, 0)
+INIT_ADDR6(out_saddr6, IN6ADDR_LOOPBACK_INIT, 0)
+INIT_ADDR6(extra_daddr6, IN6ADDR_LOOPBACK_INIT, 0)
+INIT_ADDR6(extra_saddr6, IN6ADDR_LOOPBACK_INIT, 0)
+
+static unsigned long util_gettime(void)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
+}
+
+static void util_printaddr(const char *msg, struct sockaddr *addr)
+{
+ unsigned long off = 0;
+ char nbuf[INET6_ADDRSTRLEN];
+
+ switch (addr->sa_family) {
+ case PF_INET:
+ off = __builtin_offsetof(struct sockaddr_in, sin_addr);
+ break;
+ case PF_INET6:
+ off = __builtin_offsetof(struct sockaddr_in6, sin6_addr);
+ break;
+ default:
+ error(1, 0, "printaddr: unsupported family %u\n",
+ addr->sa_family);
+ }
+
+ if (!inet_ntop(addr->sa_family, ((void *) addr) + off, nbuf,
+ sizeof(nbuf)))
+ error(1, errno, "inet_ntop");
+
+ fprintf(stderr, "%s: %s\n", msg, nbuf);
+}
+
+static unsigned long add_csum_hword(const uint16_t *start, int num_u16)
+{
+ unsigned long sum = 0;
+ int i;
+
+ for (i = 0; i < num_u16; i++)
+ sum += start[i];
+
+ return sum;
+}
+
+static uint16_t build_ip_csum(const uint16_t *start, int num_u16,
+ unsigned long sum)
+{
+ sum += add_csum_hword(start, num_u16);
+
+ while (sum >> 16)
+ sum = (sum & 0xffff) + (sum >> 16);
+
+ return ~sum;
+}
+
+static void build_ipv4_header(void *header, uint8_t proto,
+ uint32_t src, uint32_t dst,
+ int payload_len, uint8_t tos)
+{
+ struct iphdr *iph = header;
+
+ iph->ihl = 5;
+ iph->version = 4;
+ iph->tos = tos;
+ iph->ttl = 8;
+ iph->tot_len = htons(sizeof(*iph) + payload_len);
+ iph->id = htons(1337);
+ iph->protocol = proto;
+ iph->saddr = src;
+ iph->daddr = dst;
+ iph->check = build_ip_csum((void *) iph, iph->ihl << 1, 0);
+}
+
+static void ipv6_set_dsfield(struct ipv6hdr *ip6h, uint8_t dsfield)
+{
+ uint16_t val, *ptr = (uint16_t *)ip6h;
+
+ val = ntohs(*ptr);
+ val &= 0xF00F;
+ val |= ((uint16_t) dsfield) << 4;
+ *ptr = htons(val);
+}
+
+static void build_ipv6_header(void *header, uint8_t proto,
+ struct sockaddr_in6 *src,
+ struct sockaddr_in6 *dst,
+ int payload_len, uint8_t dsfield)
+{
+ struct ipv6hdr *ip6h = header;
+
+ ip6h->version = 6;
+ ip6h->payload_len = htons(payload_len);
+ ip6h->nexthdr = proto;
+ ip6h->hop_limit = 8;
+ ipv6_set_dsfield(ip6h, dsfield);
+
+ memcpy(&ip6h->saddr, &src->sin6_addr, sizeof(ip6h->saddr));
+ memcpy(&ip6h->daddr, &dst->sin6_addr, sizeof(ip6h->daddr));
+}
+
+static uint16_t build_udp_v4_csum(const struct iphdr *iph,
+ const struct udphdr *udph,
+ int num_words)
+{
+ unsigned long pseudo_sum;
+ int num_u16 = sizeof(iph->saddr); /* halfwords: twice byte len */
+
+ pseudo_sum = add_csum_hword((void *) &iph->saddr, num_u16);
+ pseudo_sum += htons(IPPROTO_UDP);
+ pseudo_sum += udph->len;
+ return build_ip_csum((void *) udph, num_words, pseudo_sum);
+}
+
+static uint16_t build_udp_v6_csum(const struct ipv6hdr *ip6h,
+ const struct udphdr *udph,
+ int num_words)
+{
+ unsigned long pseudo_sum;
+ int num_u16 = sizeof(ip6h->saddr); /* halfwords: twice byte len */
+
+ pseudo_sum = add_csum_hword((void *) &ip6h->saddr, num_u16);
+ pseudo_sum += htons(ip6h->nexthdr);
+ pseudo_sum += ip6h->payload_len;
+ return build_ip_csum((void *) udph, num_words, pseudo_sum);
+}
+
+static void build_udp_header(void *header, int payload_len,
+ uint16_t dport, int family)
+{
+ struct udphdr *udph = header;
+ int len = sizeof(*udph) + payload_len;
+
+ udph->source = htons(cfg_src_port);
+ udph->dest = htons(dport);
+ udph->len = htons(len);
+ udph->check = 0;
+ if (family == AF_INET)
+ udph->check = build_udp_v4_csum(header - sizeof(struct iphdr),
+ udph, len >> 1);
+ else
+ udph->check = build_udp_v6_csum(header - sizeof(struct ipv6hdr),
+ udph, len >> 1);
+}
+
+static void build_gue_header(void *header, uint8_t proto)
+{
+ struct guehdr *gueh = header;
+
+ gueh->proto_ctype = proto;
+}
+
+static void build_gre_header(void *header, uint16_t proto)
+{
+ struct grehdr *greh = header;
+
+ greh->protocol = htons(proto);
+}
+
+static int l3_length(int family)
+{
+ if (family == AF_INET)
+ return sizeof(struct iphdr);
+ else
+ return sizeof(struct ipv6hdr);
+}
+
+static int build_packet(void)
+{
+ int ol3_len = 0, ol4_len = 0, il3_len = 0, il4_len = 0;
+ int el3_len = 0;
+
+ if (cfg_l3_extra)
+ el3_len = l3_length(cfg_l3_extra);
+
+ /* calculate header offsets */
+ if (cfg_encap_proto) {
+ ol3_len = l3_length(cfg_l3_outer);
+
+ if (cfg_encap_proto == IPPROTO_GRE)
+ ol4_len = sizeof(struct grehdr);
+ else if (cfg_encap_proto == IPPROTO_UDP)
+ ol4_len = sizeof(struct udphdr) + sizeof(struct guehdr);
+ }
+
+ il3_len = l3_length(cfg_l3_inner);
+ il4_len = sizeof(struct udphdr);
+
+ if (el3_len + ol3_len + ol4_len + il3_len + il4_len + cfg_payload_len >=
+ sizeof(buf))
+ error(1, 0, "packet too large\n");
+
+ /*
+ * Fill packet from inside out, to calculate correct checksums.
+ * But create ip before udp headers, as udp uses ip for pseudo-sum.
+ */
+ memset(buf + el3_len + ol3_len + ol4_len + il3_len + il4_len,
+ cfg_payload_char, cfg_payload_len);
+
+ /* add zero byte for udp csum padding */
+ buf[el3_len + ol3_len + ol4_len + il3_len + il4_len + cfg_payload_len] = 0;
+
+ switch (cfg_l3_inner) {
+ case PF_INET:
+ build_ipv4_header(buf + el3_len + ol3_len + ol4_len,
+ IPPROTO_UDP,
+ in_saddr4.sin_addr.s_addr,
+ in_daddr4.sin_addr.s_addr,
+ il4_len + cfg_payload_len,
+ cfg_dsfield_inner);
+ break;
+ case PF_INET6:
+ build_ipv6_header(buf + el3_len + ol3_len + ol4_len,
+ IPPROTO_UDP,
+ &in_saddr6, &in_daddr6,
+ il4_len + cfg_payload_len,
+ cfg_dsfield_inner);
+ break;
+ }
+
+ build_udp_header(buf + el3_len + ol3_len + ol4_len + il3_len,
+ cfg_payload_len, CFG_PORT_INNER, cfg_l3_inner);
+
+ if (!cfg_encap_proto)
+ return il3_len + il4_len + cfg_payload_len;
+
+ switch (cfg_l3_outer) {
+ case PF_INET:
+ build_ipv4_header(buf + el3_len, cfg_encap_proto,
+ out_saddr4.sin_addr.s_addr,
+ out_daddr4.sin_addr.s_addr,
+ ol4_len + il3_len + il4_len + cfg_payload_len,
+ cfg_dsfield_outer);
+ break;
+ case PF_INET6:
+ build_ipv6_header(buf + el3_len, cfg_encap_proto,
+ &out_saddr6, &out_daddr6,
+ ol4_len + il3_len + il4_len + cfg_payload_len,
+ cfg_dsfield_outer);
+ break;
+ }
+
+ switch (cfg_encap_proto) {
+ case IPPROTO_UDP:
+ build_gue_header(buf + el3_len + ol3_len + ol4_len -
+ sizeof(struct guehdr),
+ cfg_l3_inner == PF_INET ? IPPROTO_IPIP
+ : IPPROTO_IPV6);
+ build_udp_header(buf + el3_len + ol3_len,
+ sizeof(struct guehdr) + il3_len + il4_len +
+ cfg_payload_len,
+ cfg_port_gue, cfg_l3_outer);
+ break;
+ case IPPROTO_GRE:
+ build_gre_header(buf + el3_len + ol3_len,
+ cfg_l3_inner == PF_INET ? ETH_P_IP
+ : ETH_P_IPV6);
+ break;
+ }
+
+ switch (cfg_l3_extra) {
+ case PF_INET:
+ build_ipv4_header(buf,
+ cfg_l3_outer == PF_INET ? IPPROTO_IPIP
+ : IPPROTO_IPV6,
+ extra_saddr4.sin_addr.s_addr,
+ extra_daddr4.sin_addr.s_addr,
+ ol3_len + ol4_len + il3_len + il4_len +
+ cfg_payload_len, 0);
+ break;
+ case PF_INET6:
+ build_ipv6_header(buf,
+ cfg_l3_outer == PF_INET ? IPPROTO_IPIP
+ : IPPROTO_IPV6,
+ &extra_saddr6, &extra_daddr6,
+ ol3_len + ol4_len + il3_len + il4_len +
+ cfg_payload_len, 0);
+ break;
+ }
+
+ return el3_len + ol3_len + ol4_len + il3_len + il4_len +
+ cfg_payload_len;
+}
+
+/* sender transmits encapsulated over RAW or unencap'd over UDP */
+static int setup_tx(void)
+{
+ int family, fd, ret;
+
+ if (cfg_l3_extra)
+ family = cfg_l3_extra;
+ else if (cfg_l3_outer)
+ family = cfg_l3_outer;
+ else
+ family = cfg_l3_inner;
+
+ fd = socket(family, SOCK_RAW, IPPROTO_RAW);
+ if (fd == -1)
+ error(1, errno, "socket tx");
+
+ if (cfg_l3_extra) {
+ if (cfg_l3_extra == PF_INET)
+ ret = connect(fd, (void *) &extra_daddr4,
+ sizeof(extra_daddr4));
+ else
+ ret = connect(fd, (void *) &extra_daddr6,
+ sizeof(extra_daddr6));
+ if (ret)
+ error(1, errno, "connect tx");
+ } else if (cfg_l3_outer) {
+ /* connect to destination if not encapsulated */
+ if (cfg_l3_outer == PF_INET)
+ ret = connect(fd, (void *) &out_daddr4,
+ sizeof(out_daddr4));
+ else
+ ret = connect(fd, (void *) &out_daddr6,
+ sizeof(out_daddr6));
+ if (ret)
+ error(1, errno, "connect tx");
+ } else {
+ /* otherwise using loopback */
+ if (cfg_l3_inner == PF_INET)
+ ret = connect(fd, (void *) &in_daddr4,
+ sizeof(in_daddr4));
+ else
+ ret = connect(fd, (void *) &in_daddr6,
+ sizeof(in_daddr6));
+ if (ret)
+ error(1, errno, "connect tx");
+ }
+
+ return fd;
+}
+
+/* receiver reads unencapsulated UDP */
+static int setup_rx(void)
+{
+ int fd, ret;
+
+ fd = socket(cfg_l3_inner, SOCK_DGRAM, 0);
+ if (fd == -1)
+ error(1, errno, "socket rx");
+
+ if (cfg_l3_inner == PF_INET)
+ ret = bind(fd, (void *) &in_daddr4, sizeof(in_daddr4));
+ else
+ ret = bind(fd, (void *) &in_daddr6, sizeof(in_daddr6));
+ if (ret)
+ error(1, errno, "bind rx");
+
+ return fd;
+}
+
+static int do_tx(int fd, const char *pkt, int len)
+{
+ int ret;
+
+ ret = write(fd, pkt, len);
+ if (ret == -1)
+ error(1, errno, "send");
+ if (ret != len)
+ error(1, errno, "send: len (%d < %d)\n", ret, len);
+
+ return 1;
+}
+
+static int do_poll(int fd, short events, int timeout)
+{
+ struct pollfd pfd;
+ int ret;
+
+ pfd.fd = fd;
+ pfd.events = events;
+
+ ret = poll(&pfd, 1, timeout);
+ if (ret == -1)
+ error(1, errno, "poll");
+ if (ret && !(pfd.revents & POLLIN))
+ error(1, errno, "poll: unexpected event 0x%x\n", pfd.revents);
+
+ return ret;
+}
+
+static int do_rx(int fd)
+{
+ char rbuf;
+ int ret, num = 0;
+
+ while (1) {
+ ret = recv(fd, &rbuf, 1, MSG_DONTWAIT);
+ if (ret == -1 && errno == EAGAIN)
+ break;
+ if (ret == -1)
+ error(1, errno, "recv");
+ if (rbuf != cfg_payload_char)
+ error(1, 0, "recv: payload mismatch");
+ num++;
+ };
+
+ return num;
+}
+
+static int do_main(void)
+{
+ unsigned long tstop, treport, tcur;
+ int fdt = -1, fdr = -1, len, tx = 0, rx = 0;
+
+ if (!cfg_only_tx)
+ fdr = setup_rx();
+ if (!cfg_only_rx)
+ fdt = setup_tx();
+
+ len = build_packet();
+
+ tcur = util_gettime();
+ treport = tcur + 1000;
+ tstop = tcur + (cfg_num_secs * 1000);
+
+ while (1) {
+ if (!cfg_only_rx)
+ tx += do_tx(fdt, buf, len);
+
+ if (!cfg_only_tx)
+ rx += do_rx(fdr);
+
+ if (cfg_num_secs) {
+ tcur = util_gettime();
+ if (tcur >= tstop)
+ break;
+ if (tcur >= treport) {
+ fprintf(stderr, "pkts: tx=%u rx=%u\n", tx, rx);
+ tx = 0;
+ rx = 0;
+ treport = tcur + 1000;
+ }
+ } else {
+ if (tx == cfg_num_pkt)
+ break;
+ }
+ }
+
+ /* read straggler packets, if any */
+ if (rx < tx) {
+ tstop = util_gettime() + 100;
+ while (rx < tx) {
+ tcur = util_gettime();
+ if (tcur >= tstop)
+ break;
+
+ do_poll(fdr, POLLIN, tstop - tcur);
+ rx += do_rx(fdr);
+ }
+ }
+
+ fprintf(stderr, "pkts: tx=%u rx=%u\n", tx, rx);
+
+ if (fdr != -1 && close(fdr))
+ error(1, errno, "close rx");
+ if (fdt != -1 && close(fdt))
+ error(1, errno, "close tx");
+
+ /*
+ * success (== 0) only if received all packets
+ * unless failure is expected, in which case none must arrive.
+ */
+ if (cfg_expect_failure)
+ return rx != 0;
+ else
+ return rx != tx;
+}
+
+
+static void __attribute__((noreturn)) usage(const char *filepath)
+{
+ fprintf(stderr, "Usage: %s [-e gre|gue|bare|none] [-i 4|6] [-l len] "
+ "[-O 4|6] [-o 4|6] [-n num] [-t secs] [-R] [-T] "
+ "[-s <osrc> [-d <odst>] [-S <isrc>] [-D <idst>] "
+ "[-x <otos>] [-X <itos>] [-f <isport>] [-F]\n",
+ filepath);
+ exit(1);
+}
+
+static void parse_addr(int family, void *addr, const char *optarg)
+{
+ int ret;
+
+ ret = inet_pton(family, optarg, addr);
+ if (ret == -1)
+ error(1, errno, "inet_pton");
+ if (ret == 0)
+ error(1, 0, "inet_pton: bad string");
+}
+
+static void parse_addr4(struct sockaddr_in *addr, const char *optarg)
+{
+ parse_addr(AF_INET, &addr->sin_addr, optarg);
+}
+
+static void parse_addr6(struct sockaddr_in6 *addr, const char *optarg)
+{
+ parse_addr(AF_INET6, &addr->sin6_addr, optarg);
+}
+
+static int parse_protocol_family(const char *filepath, const char *optarg)
+{
+ if (!strcmp(optarg, "4"))
+ return PF_INET;
+ if (!strcmp(optarg, "6"))
+ return PF_INET6;
+
+ usage(filepath);
+}
+
+static void parse_opts(int argc, char **argv)
+{
+ int c;
+
+ while ((c = getopt(argc, argv, "d:D:e:f:Fhi:l:n:o:O:Rs:S:t:Tx:X:")) != -1) {
+ switch (c) {
+ case 'd':
+ if (cfg_l3_outer == AF_UNSPEC)
+ error(1, 0, "-d must be preceded by -o");
+ if (cfg_l3_outer == AF_INET)
+ parse_addr4(&out_daddr4, optarg);
+ else
+ parse_addr6(&out_daddr6, optarg);
+ break;
+ case 'D':
+ if (cfg_l3_inner == AF_UNSPEC)
+ error(1, 0, "-D must be preceded by -i");
+ if (cfg_l3_inner == AF_INET)
+ parse_addr4(&in_daddr4, optarg);
+ else
+ parse_addr6(&in_daddr6, optarg);
+ break;
+ case 'e':
+ if (!strcmp(optarg, "gre"))
+ cfg_encap_proto = IPPROTO_GRE;
+ else if (!strcmp(optarg, "gue"))
+ cfg_encap_proto = IPPROTO_UDP;
+ else if (!strcmp(optarg, "bare"))
+ cfg_encap_proto = IPPROTO_IPIP;
+ else if (!strcmp(optarg, "none"))
+ cfg_encap_proto = IPPROTO_IP; /* == 0 */
+ else
+ usage(argv[0]);
+ break;
+ case 'f':
+ cfg_src_port = strtol(optarg, NULL, 0);
+ break;
+ case 'F':
+ cfg_expect_failure = true;
+ break;
+ case 'h':
+ usage(argv[0]);
+ break;
+ case 'i':
+ if (!strcmp(optarg, "4"))
+ cfg_l3_inner = PF_INET;
+ else if (!strcmp(optarg, "6"))
+ cfg_l3_inner = PF_INET6;
+ else
+ usage(argv[0]);
+ break;
+ case 'l':
+ cfg_payload_len = strtol(optarg, NULL, 0);
+ break;
+ case 'n':
+ cfg_num_pkt = strtol(optarg, NULL, 0);
+ break;
+ case 'o':
+ cfg_l3_outer = parse_protocol_family(argv[0], optarg);
+ break;
+ case 'O':
+ cfg_l3_extra = parse_protocol_family(argv[0], optarg);
+ break;
+ case 'R':
+ cfg_only_rx = true;
+ break;
+ case 's':
+ if (cfg_l3_outer == AF_INET)
+ parse_addr4(&out_saddr4, optarg);
+ else
+ parse_addr6(&out_saddr6, optarg);
+ break;
+ case 'S':
+ if (cfg_l3_inner == AF_INET)
+ parse_addr4(&in_saddr4, optarg);
+ else
+ parse_addr6(&in_saddr6, optarg);
+ break;
+ case 't':
+ cfg_num_secs = strtol(optarg, NULL, 0);
+ break;
+ case 'T':
+ cfg_only_tx = true;
+ break;
+ case 'x':
+ cfg_dsfield_outer = strtol(optarg, NULL, 0);
+ break;
+ case 'X':
+ cfg_dsfield_inner = strtol(optarg, NULL, 0);
+ break;
+ }
+ }
+
+ if (cfg_only_rx && cfg_only_tx)
+ error(1, 0, "options: cannot combine rx-only and tx-only");
+
+ if (cfg_encap_proto && cfg_l3_outer == AF_UNSPEC)
+ error(1, 0, "options: must specify outer with encap");
+ else if ((!cfg_encap_proto) && cfg_l3_outer != AF_UNSPEC)
+ error(1, 0, "options: cannot combine no-encap and outer");
+ else if ((!cfg_encap_proto) && cfg_l3_extra != AF_UNSPEC)
+ error(1, 0, "options: cannot combine no-encap and extra");
+
+ if (cfg_l3_inner == AF_UNSPEC)
+ cfg_l3_inner = AF_INET6;
+ if (cfg_l3_inner == AF_INET6 && cfg_encap_proto == IPPROTO_IPIP)
+ cfg_encap_proto = IPPROTO_IPV6;
+
+ /* RFC 6040 4.2:
+ * on decap, if outer encountered congestion (CE == 0x3),
+ * but inner cannot encode ECN (NoECT == 0x0), then drop packet.
+ */
+ if (((cfg_dsfield_outer & 0x3) == 0x3) &&
+ ((cfg_dsfield_inner & 0x3) == 0x0))
+ cfg_expect_failure = true;
+}
+
+static void print_opts(void)
+{
+ if (cfg_l3_inner == PF_INET6) {
+ util_printaddr("inner.dest6", (void *) &in_daddr6);
+ util_printaddr("inner.source6", (void *) &in_saddr6);
+ } else {
+ util_printaddr("inner.dest4", (void *) &in_daddr4);
+ util_printaddr("inner.source4", (void *) &in_saddr4);
+ }
+
+ if (!cfg_l3_outer)
+ return;
+
+ fprintf(stderr, "encap proto: %u\n", cfg_encap_proto);
+
+ if (cfg_l3_outer == PF_INET6) {
+ util_printaddr("outer.dest6", (void *) &out_daddr6);
+ util_printaddr("outer.source6", (void *) &out_saddr6);
+ } else {
+ util_printaddr("outer.dest4", (void *) &out_daddr4);
+ util_printaddr("outer.source4", (void *) &out_saddr4);
+ }
+
+ if (!cfg_l3_extra)
+ return;
+
+ if (cfg_l3_outer == PF_INET6) {
+ util_printaddr("extra.dest6", (void *) &extra_daddr6);
+ util_printaddr("extra.source6", (void *) &extra_saddr6);
+ } else {
+ util_printaddr("extra.dest4", (void *) &extra_daddr4);
+ util_printaddr("extra.source4", (void *) &extra_saddr4);
+ }
+
+}
+
+int main(int argc, char **argv)
+{
+ parse_opts(argc, argv);
+ print_opts();
+ return do_main();
+}
diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh
new file mode 100755
index 000000000000..c0fb073b5eab
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_flow_dissector.sh
@@ -0,0 +1,115 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Load BPF flow dissector and verify it correctly dissects traffic
+export TESTNAME=test_flow_dissector
+unmount=0
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+ echo $msg please run this as root >&2
+ exit $ksft_skip
+fi
+
+# This test needs to be run in a network namespace with in_netns.sh. Check if
+# this is the case and run it with in_netns.sh if it is being run in the root
+# namespace.
+if [[ -z $(ip netns identify $$) ]]; then
+ ../net/in_netns.sh "$0" "$@"
+ exit $?
+fi
+
+# Determine selftest success via shell exit code
+exit_handler()
+{
+ if (( $? == 0 )); then
+ echo "selftests: $TESTNAME [PASS]";
+ else
+ echo "selftests: $TESTNAME [FAILED]";
+ fi
+
+ set +e
+
+ # Cleanup
+ tc filter del dev lo ingress pref 1337 2> /dev/null
+ tc qdisc del dev lo ingress 2> /dev/null
+ ./flow_dissector_load -d 2> /dev/null
+ if [ $unmount -ne 0 ]; then
+ umount bpffs 2> /dev/null
+ fi
+}
+
+# Exit script immediately (well catched by trap handler) if any
+# program/thing exits with a non-zero status.
+set -e
+
+# (Use 'trap -l' to list meaning of numbers)
+trap exit_handler 0 2 3 6 9
+
+# Mount BPF file system
+if /bin/mount | grep /sys/fs/bpf > /dev/null; then
+ echo "bpffs already mounted"
+else
+ echo "bpffs not mounted. Mounting..."
+ unmount=1
+ /bin/mount bpffs /sys/fs/bpf -t bpf
+fi
+
+# Attach BPF program
+./flow_dissector_load -p bpf_flow.o -s dissect
+
+# Setup
+tc qdisc add dev lo ingress
+
+echo "Testing IPv4..."
+# Drops all IP/UDP packets coming from port 9
+tc filter add dev lo parent ffff: protocol ip pref 1337 flower ip_proto \
+ udp src_port 9 action drop
+
+# Send 10 IPv4/UDP packets from port 8. Filter should not drop any.
+./test_flow_dissector -i 4 -f 8
+# Send 10 IPv4/UDP packets from port 9. Filter should drop all.
+./test_flow_dissector -i 4 -f 9 -F
+# Send 10 IPv4/UDP packets from port 10. Filter should not drop any.
+./test_flow_dissector -i 4 -f 10
+
+echo "Testing IPIP..."
+# Send 10 IPv4/IPv4/UDP packets from port 8. Filter should not drop any.
+./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \
+ -D 192.168.0.1 -S 1.1.1.1 -f 8
+# Send 10 IPv4/IPv4/UDP packets from port 9. Filter should drop all.
+./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \
+ -D 192.168.0.1 -S 1.1.1.1 -f 9 -F
+# Send 10 IPv4/IPv4/UDP packets from port 10. Filter should not drop any.
+./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \
+ -D 192.168.0.1 -S 1.1.1.1 -f 10
+
+echo "Testing IPv4 + GRE..."
+# Send 10 IPv4/GRE/IPv4/UDP packets from port 8. Filter should not drop any.
+./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \
+ -D 192.168.0.1 -S 1.1.1.1 -f 8
+# Send 10 IPv4/GRE/IPv4/UDP packets from port 9. Filter should drop all.
+./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \
+ -D 192.168.0.1 -S 1.1.1.1 -f 9 -F
+# Send 10 IPv4/GRE/IPv4/UDP packets from port 10. Filter should not drop any.
+./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \
+ -D 192.168.0.1 -S 1.1.1.1 -f 10
+
+tc filter del dev lo ingress pref 1337
+
+echo "Testing IPv6..."
+# Drops all IPv6/UDP packets coming from port 9
+tc filter add dev lo parent ffff: protocol ipv6 pref 1337 flower ip_proto \
+ udp src_port 9 action drop
+
+# Send 10 IPv6/UDP packets from port 8. Filter should not drop any.
+./test_flow_dissector -i 6 -f 8
+# Send 10 IPv6/UDP packets from port 9. Filter should drop all.
+./test_flow_dissector -i 6 -f 9 -F
+# Send 10 IPv6/UDP packets from port 10. Filter should not drop any.
+./test_flow_dissector -i 6 -f 10
+
+exit 0
diff --git a/tools/testing/selftests/bpf/test_libbpf.sh b/tools/testing/selftests/bpf/test_libbpf.sh
index d97dc914cd49..156d89f1edcc 100755
--- a/tools/testing/selftests/bpf/test_libbpf.sh
+++ b/tools/testing/selftests/bpf/test_libbpf.sh
@@ -6,7 +6,7 @@ export TESTNAME=test_libbpf
# Determine selftest success via shell exit code
exit_handler()
{
- if (( $? == 0 )); then
+ if [ $? -eq 0 ]; then
echo "selftests: $TESTNAME [PASS]";
else
echo "$TESTNAME: failed at file $LAST_LOADED" 1>&2
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 6f54f84144a0..4db2116e52be 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -15,6 +15,7 @@
#include <string.h>
#include <assert.h>
#include <stdlib.h>
+#include <time.h>
#include <sys/wait.h>
#include <sys/socket.h>
@@ -471,6 +472,122 @@ static void test_devmap(int task, void *data)
close(fd);
}
+static void test_queuemap(int task, void *data)
+{
+ const int MAP_SIZE = 32;
+ __u32 vals[MAP_SIZE + MAP_SIZE/2], val;
+ int fd, i;
+
+ /* Fill test values to be used */
+ for (i = 0; i < MAP_SIZE + MAP_SIZE/2; i++)
+ vals[i] = rand();
+
+ /* Invalid key size */
+ fd = bpf_create_map(BPF_MAP_TYPE_QUEUE, 4, sizeof(val), MAP_SIZE,
+ map_flags);
+ assert(fd < 0 && errno == EINVAL);
+
+ fd = bpf_create_map(BPF_MAP_TYPE_QUEUE, 0, sizeof(val), MAP_SIZE,
+ map_flags);
+ /* Queue map does not support BPF_F_NO_PREALLOC */
+ if (map_flags & BPF_F_NO_PREALLOC) {
+ assert(fd < 0 && errno == EINVAL);
+ return;
+ }
+ if (fd < 0) {
+ printf("Failed to create queuemap '%s'!\n", strerror(errno));
+ exit(1);
+ }
+
+ /* Push MAP_SIZE elements */
+ for (i = 0; i < MAP_SIZE; i++)
+ assert(bpf_map_update_elem(fd, NULL, &vals[i], 0) == 0);
+
+ /* Check that element cannot be pushed due to max_entries limit */
+ assert(bpf_map_update_elem(fd, NULL, &val, 0) == -1 &&
+ errno == E2BIG);
+
+ /* Peek element */
+ assert(bpf_map_lookup_elem(fd, NULL, &val) == 0 && val == vals[0]);
+
+ /* Replace half elements */
+ for (i = MAP_SIZE; i < MAP_SIZE + MAP_SIZE/2; i++)
+ assert(bpf_map_update_elem(fd, NULL, &vals[i], BPF_EXIST) == 0);
+
+ /* Pop all elements */
+ for (i = MAP_SIZE/2; i < MAP_SIZE + MAP_SIZE/2; i++)
+ assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) == 0 &&
+ val == vals[i]);
+
+ /* Check that there are not elements left */
+ assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) == -1 &&
+ errno == ENOENT);
+
+ /* Check that non supported functions set errno to EINVAL */
+ assert(bpf_map_delete_elem(fd, NULL) == -1 && errno == EINVAL);
+ assert(bpf_map_get_next_key(fd, NULL, NULL) == -1 && errno == EINVAL);
+
+ close(fd);
+}
+
+static void test_stackmap(int task, void *data)
+{
+ const int MAP_SIZE = 32;
+ __u32 vals[MAP_SIZE + MAP_SIZE/2], val;
+ int fd, i;
+
+ /* Fill test values to be used */
+ for (i = 0; i < MAP_SIZE + MAP_SIZE/2; i++)
+ vals[i] = rand();
+
+ /* Invalid key size */
+ fd = bpf_create_map(BPF_MAP_TYPE_STACK, 4, sizeof(val), MAP_SIZE,
+ map_flags);
+ assert(fd < 0 && errno == EINVAL);
+
+ fd = bpf_create_map(BPF_MAP_TYPE_STACK, 0, sizeof(val), MAP_SIZE,
+ map_flags);
+ /* Stack map does not support BPF_F_NO_PREALLOC */
+ if (map_flags & BPF_F_NO_PREALLOC) {
+ assert(fd < 0 && errno == EINVAL);
+ return;
+ }
+ if (fd < 0) {
+ printf("Failed to create stackmap '%s'!\n", strerror(errno));
+ exit(1);
+ }
+
+ /* Push MAP_SIZE elements */
+ for (i = 0; i < MAP_SIZE; i++)
+ assert(bpf_map_update_elem(fd, NULL, &vals[i], 0) == 0);
+
+ /* Check that element cannot be pushed due to max_entries limit */
+ assert(bpf_map_update_elem(fd, NULL, &val, 0) == -1 &&
+ errno == E2BIG);
+
+ /* Peek element */
+ assert(bpf_map_lookup_elem(fd, NULL, &val) == 0 && val == vals[i - 1]);
+
+ /* Replace half elements */
+ for (i = MAP_SIZE; i < MAP_SIZE + MAP_SIZE/2; i++)
+ assert(bpf_map_update_elem(fd, NULL, &vals[i], BPF_EXIST) == 0);
+
+ /* Pop all elements */
+ for (i = MAP_SIZE + MAP_SIZE/2 - 1; i >= MAP_SIZE/2; i--)
+ assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) == 0 &&
+ val == vals[i]);
+
+ /* Check that there are not elements left */
+ assert(bpf_map_lookup_and_delete_elem(fd, NULL, &val) == -1 &&
+ errno == ENOENT);
+
+ /* Check that non supported functions set errno to EINVAL */
+ assert(bpf_map_delete_elem(fd, NULL) == -1 && errno == EINVAL);
+ assert(bpf_map_get_next_key(fd, NULL, NULL) == -1 && errno == EINVAL);
+
+ close(fd);
+}
+
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <arpa/inet.h>
@@ -580,7 +697,11 @@ static void test_sockmap(int tasks, void *data)
/* Test update without programs */
for (i = 0; i < 6; i++) {
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
- if (err) {
+ if (i < 2 && !err) {
+ printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n",
+ i, sfd[i]);
+ goto out_sockmap;
+ } else if (i >= 2 && err) {
printf("Failed noprog update sockmap '%i:%i'\n",
i, sfd[i]);
goto out_sockmap;
@@ -741,7 +862,7 @@ static void test_sockmap(int tasks, void *data)
}
/* Test map update elem afterwards fd lives in fd and map_fd */
- for (i = 0; i < 6; i++) {
+ for (i = 2; i < 6; i++) {
err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY);
if (err) {
printf("Failed map_fd_rx update sockmap %i '%i:%i'\n",
@@ -845,7 +966,7 @@ static void test_sockmap(int tasks, void *data)
}
/* Delete the elems without programs */
- for (i = 0; i < 6; i++) {
+ for (i = 2; i < 6; i++) {
err = bpf_map_delete_elem(fd, &i);
if (err) {
printf("Failed delete sockmap %i '%i:%i'\n",
@@ -1430,10 +1551,15 @@ static void run_all_tests(void)
test_map_wronly();
test_reuseport_array();
+
+ test_queuemap(0, NULL);
+ test_stackmap(0, NULL);
}
int main(void)
{
+ srand(time(NULL));
+
map_flags = 0;
run_all_tests();
diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c
new file mode 100644
index 000000000000..7887df693399
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_netcnt.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/sysinfo.h>
+#include <sys/time.h>
+
+#include <linux/bpf.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "cgroup_helpers.h"
+#include "bpf_rlimit.h"
+#include "netcnt_common.h"
+
+#define BPF_PROG "./netcnt_prog.o"
+#define TEST_CGROUP "/test-network-counters/"
+
+static int bpf_find_map(const char *test, struct bpf_object *obj,
+ const char *name)
+{
+ struct bpf_map *map;
+
+ map = bpf_object__find_map_by_name(obj, name);
+ if (!map) {
+ printf("%s:FAIL:map '%s' not found\n", test, name);
+ return -1;
+ }
+ return bpf_map__fd(map);
+}
+
+int main(int argc, char **argv)
+{
+ struct percpu_net_cnt *percpu_netcnt;
+ struct bpf_cgroup_storage_key key;
+ int map_fd, percpu_map_fd;
+ int error = EXIT_FAILURE;
+ struct net_cnt netcnt;
+ struct bpf_object *obj;
+ int prog_fd, cgroup_fd;
+ unsigned long packets;
+ unsigned long bytes;
+ int cpu, nproc;
+ __u32 prog_cnt;
+
+ nproc = get_nprocs_conf();
+ percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc);
+ if (!percpu_netcnt) {
+ printf("Not enough memory for per-cpu area (%d cpus)\n", nproc);
+ goto err;
+ }
+
+ if (bpf_prog_load(BPF_PROG, BPF_PROG_TYPE_CGROUP_SKB,
+ &obj, &prog_fd)) {
+ printf("Failed to load bpf program\n");
+ goto out;
+ }
+
+ if (setup_cgroup_environment()) {
+ printf("Failed to load bpf program\n");
+ goto err;
+ }
+
+ /* Create a cgroup, get fd, and join it */
+ cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
+ if (!cgroup_fd) {
+ printf("Failed to create test cgroup\n");
+ goto err;
+ }
+
+ if (join_cgroup(TEST_CGROUP)) {
+ printf("Failed to join cgroup\n");
+ goto err;
+ }
+
+ /* Attach bpf program */
+ if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) {
+ printf("Failed to attach bpf program");
+ goto err;
+ }
+
+ assert(system("ping localhost -6 -c 10000 -f -q > /dev/null") == 0);
+
+ if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL,
+ &prog_cnt)) {
+ printf("Failed to query attached programs");
+ goto err;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "netcnt");
+ if (map_fd < 0) {
+ printf("Failed to find bpf map with net counters");
+ goto err;
+ }
+
+ percpu_map_fd = bpf_find_map(__func__, obj, "percpu_netcnt");
+ if (percpu_map_fd < 0) {
+ printf("Failed to find bpf map with percpu net counters");
+ goto err;
+ }
+
+ if (bpf_map_get_next_key(map_fd, NULL, &key)) {
+ printf("Failed to get key in cgroup storage\n");
+ goto err;
+ }
+
+ if (bpf_map_lookup_elem(map_fd, &key, &netcnt)) {
+ printf("Failed to lookup cgroup storage\n");
+ goto err;
+ }
+
+ if (bpf_map_lookup_elem(percpu_map_fd, &key, &percpu_netcnt[0])) {
+ printf("Failed to lookup percpu cgroup storage\n");
+ goto err;
+ }
+
+ /* Some packets can be still in per-cpu cache, but not more than
+ * MAX_PERCPU_PACKETS.
+ */
+ packets = netcnt.packets;
+ bytes = netcnt.bytes;
+ for (cpu = 0; cpu < nproc; cpu++) {
+ if (percpu_netcnt[cpu].packets > MAX_PERCPU_PACKETS) {
+ printf("Unexpected percpu value: %llu\n",
+ percpu_netcnt[cpu].packets);
+ goto err;
+ }
+
+ packets += percpu_netcnt[cpu].packets;
+ bytes += percpu_netcnt[cpu].bytes;
+ }
+
+ /* No packets should be lost */
+ if (packets != 10000) {
+ printf("Unexpected packet count: %lu\n", packets);
+ goto err;
+ }
+
+ /* Let's check that bytes counter matches the number of packets
+ * multiplied by the size of ipv6 ICMP packet.
+ */
+ if (bytes != packets * 104) {
+ printf("Unexpected bytes count: %lu\n", bytes);
+ goto err;
+ }
+
+ error = 0;
+ printf("test_netcnt:PASS\n");
+
+err:
+ cleanup_cgroup_environment();
+ free(percpu_netcnt);
+
+out:
+ return error;
+}
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 0ef68204c84b..2d3c04f45530 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -112,13 +112,13 @@ static void test_pkt_access(void)
err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
NULL, NULL, &retval, &duration);
- CHECK(err || errno || retval, "ipv4",
+ CHECK(err || retval, "ipv4",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
NULL, NULL, &retval, &duration);
- CHECK(err || errno || retval, "ipv6",
+ CHECK(err || retval, "ipv6",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
bpf_object__close(obj);
@@ -153,14 +153,14 @@ static void test_xdp(void)
err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
buf, &size, &retval, &duration);
- CHECK(err || errno || retval != XDP_TX || size != 74 ||
+ CHECK(err || retval != XDP_TX || size != 74 ||
iph->protocol != IPPROTO_IPIP, "ipv4",
"err %d errno %d retval %d size %d\n",
err, errno, retval, size);
err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
buf, &size, &retval, &duration);
- CHECK(err || errno || retval != XDP_TX || size != 114 ||
+ CHECK(err || retval != XDP_TX || size != 114 ||
iph6->nexthdr != IPPROTO_IPV6, "ipv6",
"err %d errno %d retval %d size %d\n",
err, errno, retval, size);
@@ -185,13 +185,13 @@ static void test_xdp_adjust_tail(void)
err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
buf, &size, &retval, &duration);
- CHECK(err || errno || retval != XDP_DROP,
+ CHECK(err || retval != XDP_DROP,
"ipv4", "err %d errno %d retval %d size %d\n",
err, errno, retval, size);
err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
buf, &size, &retval, &duration);
- CHECK(err || errno || retval != XDP_TX || size != 54,
+ CHECK(err || retval != XDP_TX || size != 54,
"ipv6", "err %d errno %d retval %d size %d\n",
err, errno, retval, size);
bpf_object__close(obj);
@@ -254,14 +254,14 @@ static void test_l4lb(const char *file)
err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
buf, &size, &retval, &duration);
- CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
+ CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
*magic != MAGIC_VAL, "ipv4",
"err %d errno %d retval %d size %d magic %x\n",
err, errno, retval, size, *magic);
err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
buf, &size, &retval, &duration);
- CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
+ CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
*magic != MAGIC_VAL, "ipv6",
"err %d errno %d retval %d size %d magic %x\n",
err, errno, retval, size, *magic);
@@ -343,14 +343,14 @@ static void test_xdp_noinline(void)
err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
buf, &size, &retval, &duration);
- CHECK(err || errno || retval != 1 || size != 54 ||
+ CHECK(err || retval != 1 || size != 54 ||
*magic != MAGIC_VAL, "ipv4",
"err %d errno %d retval %d size %d magic %x\n",
err, errno, retval, size, *magic);
err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
buf, &size, &retval, &duration);
- CHECK(err || errno || retval != 1 || size != 74 ||
+ CHECK(err || retval != 1 || size != 74 ||
*magic != MAGIC_VAL, "ipv6",
"err %d errno %d retval %d size %d magic %x\n",
err, errno, retval, size, *magic);
@@ -1698,8 +1698,142 @@ static void test_task_fd_query_tp(void)
"sys_enter_read");
}
+static void test_reference_tracking()
+{
+ const char *file = "./test_sk_lookup_kern.o";
+ struct bpf_object *obj;
+ struct bpf_program *prog;
+ __u32 duration;
+ int err = 0;
+
+ obj = bpf_object__open(file);
+ if (IS_ERR(obj)) {
+ error_cnt++;
+ return;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ const char *title;
+
+ /* Ignore .text sections */
+ title = bpf_program__title(prog, false);
+ if (strstr(title, ".text") != NULL)
+ continue;
+
+ bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
+
+ /* Expect verifier failure if test name has 'fail' */
+ if (strstr(title, "fail") != NULL) {
+ libbpf_set_print(NULL, NULL, NULL);
+ err = !bpf_program__load(prog, "GPL", 0);
+ libbpf_set_print(printf, printf, NULL);
+ } else {
+ err = bpf_program__load(prog, "GPL", 0);
+ }
+ CHECK(err, title, "\n");
+ }
+ bpf_object__close(obj);
+}
+
+enum {
+ QUEUE,
+ STACK,
+};
+
+static void test_queue_stack_map(int type)
+{
+ const int MAP_SIZE = 32;
+ __u32 vals[MAP_SIZE], duration, retval, size, val;
+ int i, err, prog_fd, map_in_fd, map_out_fd;
+ char file[32], buf[128];
+ struct bpf_object *obj;
+ struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
+
+ /* Fill test values to be used */
+ for (i = 0; i < MAP_SIZE; i++)
+ vals[i] = rand();
+
+ if (type == QUEUE)
+ strncpy(file, "./test_queue_map.o", sizeof(file));
+ else if (type == STACK)
+ strncpy(file, "./test_stack_map.o", sizeof(file));
+ else
+ return;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_in_fd = bpf_find_map(__func__, obj, "map_in");
+ if (map_in_fd < 0)
+ goto out;
+
+ map_out_fd = bpf_find_map(__func__, obj, "map_out");
+ if (map_out_fd < 0)
+ goto out;
+
+ /* Push 32 elements to the input map */
+ for (i = 0; i < MAP_SIZE; i++) {
+ err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0);
+ if (err) {
+ error_cnt++;
+ goto out;
+ }
+ }
+
+ /* The eBPF program pushes iph.saddr in the output map,
+ * pops the input map and saves this value in iph.daddr
+ */
+ for (i = 0; i < MAP_SIZE; i++) {
+ if (type == QUEUE) {
+ val = vals[i];
+ pkt_v4.iph.saddr = vals[i] * 5;
+ } else if (type == STACK) {
+ val = vals[MAP_SIZE - 1 - i];
+ pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ if (err || retval || size != sizeof(pkt_v4) ||
+ iph->daddr != val)
+ break;
+ }
+
+ CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val,
+ "bpf_map_pop_elem",
+ "err %d errno %d retval %d size %d iph->daddr %u\n",
+ err, errno, retval, size, iph->daddr);
+
+ /* Queue is empty, program should return TC_ACT_SHOT */
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4),
+ "check-queue-stack-map-empty",
+ "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+
+ /* Check that the program pushed elements correctly */
+ for (i = 0; i < MAP_SIZE; i++) {
+ err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val);
+ if (err || val != vals[i] * 5)
+ break;
+ }
+
+ CHECK(i != MAP_SIZE && (err || val != vals[i] * 5),
+ "bpf_map_push_elem", "err %d value %u\n", err, val);
+
+out:
+ pkt_v4.iph.saddr = 0;
+ bpf_object__close(obj);
+}
+
int main(void)
{
+ srand(time(NULL));
+
jit_enabled = is_jit_enabled();
test_pkt_access();
@@ -1719,6 +1853,9 @@ int main(void)
test_get_stack_raw_tp();
test_task_fd_query_rawtp();
test_task_fd_query_tp();
+ test_reference_tracking();
+ test_queue_stack_map(QUEUE);
+ test_queue_stack_map(STACK);
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_queue_map.c b/tools/testing/selftests/bpf/test_queue_map.c
new file mode 100644
index 000000000000..87db1f9da33d
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_queue_map.c
@@ -0,0 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Politecnico di Torino
+#define MAP_TYPE BPF_MAP_TYPE_QUEUE
+#include "test_queue_stack_map.h"
diff --git a/tools/testing/selftests/bpf/test_queue_stack_map.h b/tools/testing/selftests/bpf/test_queue_stack_map.h
new file mode 100644
index 000000000000..295b9b3bc5c7
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_queue_stack_map.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2018 Politecnico di Torino
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/pkt_cls.h>
+#include "bpf_helpers.h"
+
+int _version SEC("version") = 1;
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) map_in = {
+ .type = MAP_TYPE,
+ .key_size = 0,
+ .value_size = sizeof(__u32),
+ .max_entries = 32,
+ .map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) map_out = {
+ .type = MAP_TYPE,
+ .key_size = 0,
+ .value_size = sizeof(__u32),
+ .max_entries = 32,
+ .map_flags = 0,
+};
+
+SEC("test")
+int _test(struct __sk_buff *skb)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ struct ethhdr *eth = (struct ethhdr *)(data);
+ __u32 value;
+ int err;
+
+ if (eth + 1 > data_end)
+ return TC_ACT_SHOT;
+
+ struct iphdr *iph = (struct iphdr *)(eth + 1);
+
+ if (iph + 1 > data_end)
+ return TC_ACT_SHOT;
+
+ err = bpf_map_pop_elem(&map_in, &value);
+ if (err)
+ return TC_ACT_SHOT;
+
+ iph->daddr = value;
+
+ err = bpf_map_push_elem(&map_out, &iph->saddr, 0);
+ if (err)
+ return TC_ACT_SHOT;
+
+ return TC_ACT_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_section_names.c b/tools/testing/selftests/bpf/test_section_names.c
new file mode 100644
index 000000000000..7c4f41572b1c
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_section_names.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <err.h>
+#include <bpf/libbpf.h>
+
+#include "bpf_util.h"
+
+struct sec_name_test {
+ const char sec_name[32];
+ struct {
+ int rc;
+ enum bpf_prog_type prog_type;
+ enum bpf_attach_type expected_attach_type;
+ } expected_load;
+ struct {
+ int rc;
+ enum bpf_attach_type attach_type;
+ } expected_attach;
+};
+
+static struct sec_name_test tests[] = {
+ {"InvAliD", {-EINVAL, 0, 0}, {-EINVAL, 0} },
+ {"cgroup", {-EINVAL, 0, 0}, {-EINVAL, 0} },
+ {"socket", {0, BPF_PROG_TYPE_SOCKET_FILTER, 0}, {-EINVAL, 0} },
+ {"kprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
+ {"kretprobe/", {0, BPF_PROG_TYPE_KPROBE, 0}, {-EINVAL, 0} },
+ {"classifier", {0, BPF_PROG_TYPE_SCHED_CLS, 0}, {-EINVAL, 0} },
+ {"action", {0, BPF_PROG_TYPE_SCHED_ACT, 0}, {-EINVAL, 0} },
+ {"tracepoint/", {0, BPF_PROG_TYPE_TRACEPOINT, 0}, {-EINVAL, 0} },
+ {
+ "raw_tracepoint/",
+ {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0},
+ {-EINVAL, 0},
+ },
+ {"xdp", {0, BPF_PROG_TYPE_XDP, 0}, {-EINVAL, 0} },
+ {"perf_event", {0, BPF_PROG_TYPE_PERF_EVENT, 0}, {-EINVAL, 0} },
+ {"lwt_in", {0, BPF_PROG_TYPE_LWT_IN, 0}, {-EINVAL, 0} },
+ {"lwt_out", {0, BPF_PROG_TYPE_LWT_OUT, 0}, {-EINVAL, 0} },
+ {"lwt_xmit", {0, BPF_PROG_TYPE_LWT_XMIT, 0}, {-EINVAL, 0} },
+ {"lwt_seg6local", {0, BPF_PROG_TYPE_LWT_SEG6LOCAL, 0}, {-EINVAL, 0} },
+ {
+ "cgroup_skb/ingress",
+ {0, BPF_PROG_TYPE_CGROUP_SKB, 0},
+ {0, BPF_CGROUP_INET_INGRESS},
+ },
+ {
+ "cgroup_skb/egress",
+ {0, BPF_PROG_TYPE_CGROUP_SKB, 0},
+ {0, BPF_CGROUP_INET_EGRESS},
+ },
+ {"cgroup/skb", {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, {-EINVAL, 0} },
+ {
+ "cgroup/sock",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK, 0},
+ {0, BPF_CGROUP_INET_SOCK_CREATE},
+ },
+ {
+ "cgroup/post_bind4",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND},
+ {0, BPF_CGROUP_INET4_POST_BIND},
+ },
+ {
+ "cgroup/post_bind6",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND},
+ {0, BPF_CGROUP_INET6_POST_BIND},
+ },
+ {
+ "cgroup/dev",
+ {0, BPF_PROG_TYPE_CGROUP_DEVICE, 0},
+ {0, BPF_CGROUP_DEVICE},
+ },
+ {"sockops", {0, BPF_PROG_TYPE_SOCK_OPS, 0}, {0, BPF_CGROUP_SOCK_OPS} },
+ {
+ "sk_skb/stream_parser",
+ {0, BPF_PROG_TYPE_SK_SKB, 0},
+ {0, BPF_SK_SKB_STREAM_PARSER},
+ },
+ {
+ "sk_skb/stream_verdict",
+ {0, BPF_PROG_TYPE_SK_SKB, 0},
+ {0, BPF_SK_SKB_STREAM_VERDICT},
+ },
+ {"sk_skb", {0, BPF_PROG_TYPE_SK_SKB, 0}, {-EINVAL, 0} },
+ {"sk_msg", {0, BPF_PROG_TYPE_SK_MSG, 0}, {0, BPF_SK_MSG_VERDICT} },
+ {"lirc_mode2", {0, BPF_PROG_TYPE_LIRC_MODE2, 0}, {0, BPF_LIRC_MODE2} },
+ {
+ "flow_dissector",
+ {0, BPF_PROG_TYPE_FLOW_DISSECTOR, 0},
+ {0, BPF_FLOW_DISSECTOR},
+ },
+ {
+ "cgroup/bind4",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND},
+ {0, BPF_CGROUP_INET4_BIND},
+ },
+ {
+ "cgroup/bind6",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND},
+ {0, BPF_CGROUP_INET6_BIND},
+ },
+ {
+ "cgroup/connect4",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT},
+ {0, BPF_CGROUP_INET4_CONNECT},
+ },
+ {
+ "cgroup/connect6",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT},
+ {0, BPF_CGROUP_INET6_CONNECT},
+ },
+ {
+ "cgroup/sendmsg4",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG},
+ {0, BPF_CGROUP_UDP4_SENDMSG},
+ },
+ {
+ "cgroup/sendmsg6",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG},
+ {0, BPF_CGROUP_UDP6_SENDMSG},
+ },
+};
+
+static int test_prog_type_by_name(const struct sec_name_test *test)
+{
+ enum bpf_attach_type expected_attach_type;
+ enum bpf_prog_type prog_type;
+ int rc;
+
+ rc = libbpf_prog_type_by_name(test->sec_name, &prog_type,
+ &expected_attach_type);
+
+ if (rc != test->expected_load.rc) {
+ warnx("prog: unexpected rc=%d for %s", rc, test->sec_name);
+ return -1;
+ }
+
+ if (rc)
+ return 0;
+
+ if (prog_type != test->expected_load.prog_type) {
+ warnx("prog: unexpected prog_type=%d for %s", prog_type,
+ test->sec_name);
+ return -1;
+ }
+
+ if (expected_attach_type != test->expected_load.expected_attach_type) {
+ warnx("prog: unexpected expected_attach_type=%d for %s",
+ expected_attach_type, test->sec_name);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int test_attach_type_by_name(const struct sec_name_test *test)
+{
+ enum bpf_attach_type attach_type;
+ int rc;
+
+ rc = libbpf_attach_type_by_name(test->sec_name, &attach_type);
+
+ if (rc != test->expected_attach.rc) {
+ warnx("attach: unexpected rc=%d for %s", rc, test->sec_name);
+ return -1;
+ }
+
+ if (rc)
+ return 0;
+
+ if (attach_type != test->expected_attach.attach_type) {
+ warnx("attach: unexpected attach_type=%d for %s", attach_type,
+ test->sec_name);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int run_test_case(const struct sec_name_test *test)
+{
+ if (test_prog_type_by_name(test))
+ return -1;
+ if (test_attach_type_by_name(test))
+ return -1;
+ return 0;
+}
+
+static int run_tests(void)
+{
+ int passes = 0;
+ int fails = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i) {
+ if (run_test_case(&tests[i]))
+ ++fails;
+ else
+ ++passes;
+ }
+ printf("Summary: %d PASSED, %d FAILED\n", passes, fails);
+ return fails ? -1 : 0;
+}
+
+int main(int argc, char **argv)
+{
+ return run_tests();
+}
diff --git a/tools/testing/selftests/bpf/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/test_sk_lookup_kern.c
new file mode 100644
index 000000000000..b745bdc08c2b
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_sk_lookup_kern.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
+
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/pkt_cls.h>
+#include <linux/tcp.h>
+#include <sys/socket.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+int _version SEC("version") = 1;
+char _license[] SEC("license") = "GPL";
+
+/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
+static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
+ void *data_end, __u16 eth_proto,
+ bool *ipv4)
+{
+ struct bpf_sock_tuple *result;
+ __u8 proto = 0;
+ __u64 ihl_len;
+
+ if (eth_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)(data + nh_off);
+
+ if (iph + 1 > data_end)
+ return NULL;
+ ihl_len = iph->ihl * 4;
+ proto = iph->protocol;
+ *ipv4 = true;
+ result = (struct bpf_sock_tuple *)&iph->saddr;
+ } else if (eth_proto == bpf_htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + nh_off);
+
+ if (ip6h + 1 > data_end)
+ return NULL;
+ ihl_len = sizeof(*ip6h);
+ proto = ip6h->nexthdr;
+ *ipv4 = true;
+ result = (struct bpf_sock_tuple *)&ip6h->saddr;
+ }
+
+ if (data + nh_off + ihl_len > data_end || proto != IPPROTO_TCP)
+ return NULL;
+
+ return result;
+}
+
+SEC("sk_lookup_success")
+int bpf_sk_lookup_test0(struct __sk_buff *skb)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ struct ethhdr *eth = (struct ethhdr *)(data);
+ struct bpf_sock_tuple *tuple;
+ struct bpf_sock *sk;
+ size_t tuple_len;
+ bool ipv4;
+
+ if (eth + 1 > data_end)
+ return TC_ACT_SHOT;
+
+ tuple = get_tuple(data, sizeof(*eth), data_end, eth->h_proto, &ipv4);
+ if (!tuple || tuple + sizeof *tuple > data_end)
+ return TC_ACT_SHOT;
+
+ tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
+ sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, 0, 0);
+ if (sk)
+ bpf_sk_release(sk);
+ return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
+}
+
+SEC("sk_lookup_success_simple")
+int bpf_sk_lookup_test1(struct __sk_buff *skb)
+{
+ struct bpf_sock_tuple tuple = {};
+ struct bpf_sock *sk;
+
+ sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ if (sk)
+ bpf_sk_release(sk);
+ return 0;
+}
+
+SEC("fail_use_after_free")
+int bpf_sk_lookup_uaf(struct __sk_buff *skb)
+{
+ struct bpf_sock_tuple tuple = {};
+ struct bpf_sock *sk;
+ __u32 family = 0;
+
+ sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ if (sk) {
+ bpf_sk_release(sk);
+ family = sk->family;
+ }
+ return family;
+}
+
+SEC("fail_modify_sk_pointer")
+int bpf_sk_lookup_modptr(struct __sk_buff *skb)
+{
+ struct bpf_sock_tuple tuple = {};
+ struct bpf_sock *sk;
+ __u32 family;
+
+ sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ if (sk) {
+ sk += 1;
+ bpf_sk_release(sk);
+ }
+ return 0;
+}
+
+SEC("fail_modify_sk_or_null_pointer")
+int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
+{
+ struct bpf_sock_tuple tuple = {};
+ struct bpf_sock *sk;
+ __u32 family;
+
+ sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ sk += 1;
+ if (sk)
+ bpf_sk_release(sk);
+ return 0;
+}
+
+SEC("fail_no_release")
+int bpf_sk_lookup_test2(struct __sk_buff *skb)
+{
+ struct bpf_sock_tuple tuple = {};
+
+ bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ return 0;
+}
+
+SEC("fail_release_twice")
+int bpf_sk_lookup_test3(struct __sk_buff *skb)
+{
+ struct bpf_sock_tuple tuple = {};
+ struct bpf_sock *sk;
+
+ sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ bpf_sk_release(sk);
+ bpf_sk_release(sk);
+ return 0;
+}
+
+SEC("fail_release_unchecked")
+int bpf_sk_lookup_test4(struct __sk_buff *skb)
+{
+ struct bpf_sock_tuple tuple = {};
+ struct bpf_sock *sk;
+
+ sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ bpf_sk_release(sk);
+ return 0;
+}
+
+void lookup_no_release(struct __sk_buff *skb)
+{
+ struct bpf_sock_tuple tuple = {};
+ bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+}
+
+SEC("fail_no_release_subcall")
+int bpf_sk_lookup_test5(struct __sk_buff *skb)
+{
+ lookup_no_release(skb);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c
index 68e108e4687a..b6c2c605d8c0 100644
--- a/tools/testing/selftests/bpf/test_socket_cookie.c
+++ b/tools/testing/selftests/bpf/test_socket_cookie.c
@@ -158,11 +158,7 @@ static int run_test(int cgfd)
bpf_object__for_each_program(prog, pobj) {
prog_name = bpf_program__title(prog, /*needs_copy*/ false);
- if (strcmp(prog_name, "cgroup/connect6") == 0) {
- attach_type = BPF_CGROUP_INET6_CONNECT;
- } else if (strcmp(prog_name, "sockops") == 0) {
- attach_type = BPF_CGROUP_SOCK_OPS;
- } else {
+ if (libbpf_attach_type_by_name(prog_name, &attach_type)) {
log_err("Unexpected prog: %s", prog_name);
goto err;
}
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 0c7d9e556b47..622ade0a0957 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -28,6 +28,7 @@
#include <linux/sock_diag.h>
#include <linux/bpf.h>
#include <linux/if_link.h>
+#include <linux/tls.h>
#include <assert.h>
#include <libgen.h>
@@ -43,6 +44,13 @@
int running;
static void running_handler(int a);
+#ifndef TCP_ULP
+# define TCP_ULP 31
+#endif
+#ifndef SOL_TLS
+# define SOL_TLS 282
+#endif
+
/* randomly selected ports for testing on lo */
#define S1_PORT 10000
#define S2_PORT 10001
@@ -69,8 +77,12 @@ int txmsg_apply;
int txmsg_cork;
int txmsg_start;
int txmsg_end;
+int txmsg_start_push;
+int txmsg_end_push;
int txmsg_ingress;
int txmsg_skb;
+int ktls;
+int peek_flag;
static const struct option long_options[] = {
{"help", no_argument, NULL, 'h' },
@@ -90,8 +102,12 @@ static const struct option long_options[] = {
{"txmsg_cork", required_argument, NULL, 'k'},
{"txmsg_start", required_argument, NULL, 's'},
{"txmsg_end", required_argument, NULL, 'e'},
+ {"txmsg_start_push", required_argument, NULL, 'p'},
+ {"txmsg_end_push", required_argument, NULL, 'q'},
{"txmsg_ingress", no_argument, &txmsg_ingress, 1 },
{"txmsg_skb", no_argument, &txmsg_skb, 1 },
+ {"ktls", no_argument, &ktls, 1 },
+ {"peek", no_argument, &peek_flag, 1 },
{0, 0, NULL, 0 }
};
@@ -112,6 +128,71 @@ static void usage(char *argv[])
printf("\n");
}
+char *sock_to_string(int s)
+{
+ if (s == c1)
+ return "client1";
+ else if (s == c2)
+ return "client2";
+ else if (s == s1)
+ return "server1";
+ else if (s == s2)
+ return "server2";
+ else if (s == p1)
+ return "peer1";
+ else if (s == p2)
+ return "peer2";
+ else
+ return "unknown";
+}
+
+static int sockmap_init_ktls(int verbose, int s)
+{
+ struct tls12_crypto_info_aes_gcm_128 tls_tx = {
+ .info = {
+ .version = TLS_1_2_VERSION,
+ .cipher_type = TLS_CIPHER_AES_GCM_128,
+ },
+ };
+ struct tls12_crypto_info_aes_gcm_128 tls_rx = {
+ .info = {
+ .version = TLS_1_2_VERSION,
+ .cipher_type = TLS_CIPHER_AES_GCM_128,
+ },
+ };
+ int so_buf = 6553500;
+ int err;
+
+ err = setsockopt(s, 6, TCP_ULP, "tls", sizeof("tls"));
+ if (err) {
+ fprintf(stderr, "setsockopt: TCP_ULP(%s) failed with error %i\n", sock_to_string(s), err);
+ return -EINVAL;
+ }
+ err = setsockopt(s, SOL_TLS, TLS_TX, (void *)&tls_tx, sizeof(tls_tx));
+ if (err) {
+ fprintf(stderr, "setsockopt: TLS_TX(%s) failed with error %i\n", sock_to_string(s), err);
+ return -EINVAL;
+ }
+ err = setsockopt(s, SOL_TLS, TLS_RX, (void *)&tls_rx, sizeof(tls_rx));
+ if (err) {
+ fprintf(stderr, "setsockopt: TLS_RX(%s) failed with error %i\n", sock_to_string(s), err);
+ return -EINVAL;
+ }
+ err = setsockopt(s, SOL_SOCKET, SO_SNDBUF, &so_buf, sizeof(so_buf));
+ if (err) {
+ fprintf(stderr, "setsockopt: (%s) failed sndbuf with error %i\n", sock_to_string(s), err);
+ return -EINVAL;
+ }
+ err = setsockopt(s, SOL_SOCKET, SO_RCVBUF, &so_buf, sizeof(so_buf));
+ if (err) {
+ fprintf(stderr, "setsockopt: (%s) failed rcvbuf with error %i\n", sock_to_string(s), err);
+ return -EINVAL;
+ }
+
+ if (verbose)
+ fprintf(stdout, "socket(%s) kTLS enabled\n", sock_to_string(s));
+ return 0;
+}
static int sockmap_init_sockets(int verbose)
{
int i, err, one = 1;
@@ -277,33 +358,40 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt,
return 0;
}
-static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
- struct msg_stats *s, bool tx,
- struct sockmap_options *opt)
+static void msg_free_iov(struct msghdr *msg)
{
- struct msghdr msg = {0};
- int err, i, flags = MSG_NOSIGNAL;
+ int i;
+
+ for (i = 0; i < msg->msg_iovlen; i++)
+ free(msg->msg_iov[i].iov_base);
+ free(msg->msg_iov);
+ msg->msg_iov = NULL;
+ msg->msg_iovlen = 0;
+}
+
+static int msg_alloc_iov(struct msghdr *msg,
+ int iov_count, int iov_length,
+ bool data, bool xmit)
+{
+ unsigned char k = 0;
struct iovec *iov;
- unsigned char k;
- bool data_test = opt->data_test;
- bool drop = opt->drop_expected;
+ int i;
iov = calloc(iov_count, sizeof(struct iovec));
if (!iov)
return errno;
- k = 0;
for (i = 0; i < iov_count; i++) {
unsigned char *d = calloc(iov_length, sizeof(char));
if (!d) {
fprintf(stderr, "iov_count %i/%i OOM\n", i, iov_count);
- goto out_errno;
+ goto unwind_iov;
}
iov[i].iov_base = d;
iov[i].iov_len = iov_length;
- if (data_test && tx) {
+ if (data && xmit) {
int j;
for (j = 0; j < iov_length; j++)
@@ -311,9 +399,60 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
}
}
- msg.msg_iov = iov;
- msg.msg_iovlen = iov_count;
- k = 0;
+ msg->msg_iov = iov;
+ msg->msg_iovlen = iov_count;
+
+ return 0;
+unwind_iov:
+ for (i--; i >= 0 ; i--)
+ free(msg->msg_iov[i].iov_base);
+ return -ENOMEM;
+}
+
+static int msg_verify_data(struct msghdr *msg, int size, int chunk_sz)
+{
+ int i, j, bytes_cnt = 0;
+ unsigned char k = 0;
+
+ for (i = 0; i < msg->msg_iovlen; i++) {
+ unsigned char *d = msg->msg_iov[i].iov_base;
+
+ for (j = 0;
+ j < msg->msg_iov[i].iov_len && size; j++) {
+ if (d[j] != k++) {
+ fprintf(stderr,
+ "detected data corruption @iov[%i]:%i %02x != %02x, %02x ?= %02x\n",
+ i, j, d[j], k - 1, d[j+1], k);
+ return -EIO;
+ }
+ bytes_cnt++;
+ if (bytes_cnt == chunk_sz) {
+ k = 0;
+ bytes_cnt = 0;
+ }
+ size--;
+ }
+ }
+ return 0;
+}
+
+static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
+ struct msg_stats *s, bool tx,
+ struct sockmap_options *opt)
+{
+ struct msghdr msg = {0}, msg_peek = {0};
+ int err, i, flags = MSG_NOSIGNAL;
+ bool drop = opt->drop_expected;
+ bool data = opt->data_test;
+
+ err = msg_alloc_iov(&msg, iov_count, iov_length, data, tx);
+ if (err)
+ goto out_errno;
+ if (peek_flag) {
+ err = msg_alloc_iov(&msg_peek, iov_count, iov_length, data, tx);
+ if (err)
+ goto out_errno;
+ }
if (tx) {
clock_gettime(CLOCK_MONOTONIC, &s->start);
@@ -333,19 +472,12 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
}
clock_gettime(CLOCK_MONOTONIC, &s->end);
} else {
- int slct, recv, max_fd = fd;
+ int slct, recvp = 0, recv, max_fd = fd;
int fd_flags = O_NONBLOCK;
struct timeval timeout;
float total_bytes;
- int bytes_cnt = 0;
- int chunk_sz;
fd_set w;
- if (opt->sendpage)
- chunk_sz = iov_length * cnt;
- else
- chunk_sz = iov_length * iov_count;
-
fcntl(fd, fd_flags);
total_bytes = (float)iov_count * (float)iov_length * (float)cnt;
err = clock_gettime(CLOCK_MONOTONIC, &s->start);
@@ -377,6 +509,19 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
goto out_errno;
}
+ errno = 0;
+ if (peek_flag) {
+ flags |= MSG_PEEK;
+ recvp = recvmsg(fd, &msg_peek, flags);
+ if (recvp < 0) {
+ if (errno != EWOULDBLOCK) {
+ clock_gettime(CLOCK_MONOTONIC, &s->end);
+ goto out_errno;
+ }
+ }
+ flags = 0;
+ }
+
recv = recvmsg(fd, &msg, flags);
if (recv < 0) {
if (errno != EWOULDBLOCK) {
@@ -388,27 +533,23 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
s->bytes_recvd += recv;
- if (data_test) {
- int j;
-
- for (i = 0; i < msg.msg_iovlen; i++) {
- unsigned char *d = iov[i].iov_base;
-
- for (j = 0;
- j < iov[i].iov_len && recv; j++) {
- if (d[j] != k++) {
- errno = -EIO;
- fprintf(stderr,
- "detected data corruption @iov[%i]:%i %02x != %02x, %02x ?= %02x\n",
- i, j, d[j], k - 1, d[j+1], k);
- goto out_errno;
- }
- bytes_cnt++;
- if (bytes_cnt == chunk_sz) {
- k = 0;
- bytes_cnt = 0;
- }
- recv--;
+ if (data) {
+ int chunk_sz = opt->sendpage ?
+ iov_length * cnt :
+ iov_length * iov_count;
+
+ errno = msg_verify_data(&msg, recv, chunk_sz);
+ if (errno) {
+ perror("data verify msg failed\n");
+ goto out_errno;
+ }
+ if (recvp) {
+ errno = msg_verify_data(&msg_peek,
+ recvp,
+ chunk_sz);
+ if (errno) {
+ perror("data verify msg_peek failed\n");
+ goto out_errno;
}
}
}
@@ -416,14 +557,12 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
clock_gettime(CLOCK_MONOTONIC, &s->end);
}
- for (i = 0; i < iov_count; i++)
- free(iov[i].iov_base);
- free(iov);
- return 0;
+ msg_free_iov(&msg);
+ msg_free_iov(&msg_peek);
+ return err;
out_errno:
- for (i = 0; i < iov_count; i++)
- free(iov[i].iov_base);
- free(iov);
+ msg_free_iov(&msg);
+ msg_free_iov(&msg_peek);
return errno;
}
@@ -456,6 +595,21 @@ static int sendmsg_test(struct sockmap_options *opt)
else
rx_fd = p2;
+ if (ktls) {
+ /* Redirecting into non-TLS socket which sends into a TLS
+ * socket is not a valid test. So in this case lets not
+ * enable kTLS but still run the test.
+ */
+ if (!txmsg_redir || (txmsg_redir && txmsg_ingress)) {
+ err = sockmap_init_ktls(opt->verbose, rx_fd);
+ if (err)
+ return err;
+ }
+ err = sockmap_init_ktls(opt->verbose, c1);
+ if (err)
+ return err;
+ }
+
rxpid = fork();
if (rxpid == 0) {
if (opt->drop_expected)
@@ -469,17 +623,16 @@ static int sendmsg_test(struct sockmap_options *opt)
fprintf(stderr,
"msg_loop_rx: iov_count %i iov_buf %i cnt %i err %i\n",
iov_count, iov_buf, cnt, err);
- shutdown(p2, SHUT_RDWR);
- shutdown(p1, SHUT_RDWR);
if (s.end.tv_sec - s.start.tv_sec) {
sent_Bps = sentBps(s);
recvd_Bps = recvdBps(s);
}
if (opt->verbose)
fprintf(stdout,
- "rx_sendmsg: TX: %zuB %fB/s %fGB/s RX: %zuB %fB/s %fGB/s\n",
+ "rx_sendmsg: TX: %zuB %fB/s %fGB/s RX: %zuB %fB/s %fGB/s %s\n",
s.bytes_sent, sent_Bps, sent_Bps/giga,
- s.bytes_recvd, recvd_Bps, recvd_Bps/giga);
+ s.bytes_recvd, recvd_Bps, recvd_Bps/giga,
+ peek_flag ? "(peek_msg)" : "");
if (err && txmsg_cork)
err = 0;
exit(err ? 1 : 0);
@@ -500,7 +653,6 @@ static int sendmsg_test(struct sockmap_options *opt)
fprintf(stderr,
"msg_loop_tx: iov_count %i iov_buf %i cnt %i err %i\n",
iov_count, iov_buf, cnt, err);
- shutdown(c1, SHUT_RDWR);
if (s.end.tv_sec - s.start.tv_sec) {
sent_Bps = sentBps(s);
recvd_Bps = recvdBps(s);
@@ -755,6 +907,30 @@ run:
}
}
+ if (txmsg_start_push) {
+ i = 2;
+ err = bpf_map_update_elem(map_fd[5],
+ &i, &txmsg_start_push, BPF_ANY);
+ if (err) {
+ fprintf(stderr,
+ "ERROR: bpf_map_update_elem (txmsg_start_push): %d (%s)\n",
+ err, strerror(errno));
+ goto out;
+ }
+ }
+
+ if (txmsg_end_push) {
+ i = 3;
+ err = bpf_map_update_elem(map_fd[5],
+ &i, &txmsg_end_push, BPF_ANY);
+ if (err) {
+ fprintf(stderr,
+ "ERROR: bpf_map_update_elem %i@%i (txmsg_end_push): %d (%s)\n",
+ txmsg_end_push, i, err, strerror(errno));
+ goto out;
+ }
+ }
+
if (txmsg_ingress) {
int in = BPF_F_INGRESS;
@@ -910,6 +1086,10 @@ static void test_options(char *options)
strncat(options, "ingress,", OPTSTRING);
if (txmsg_skb)
strncat(options, "skb,", OPTSTRING);
+ if (ktls)
+ strncat(options, "ktls,", OPTSTRING);
+ if (peek_flag)
+ strncat(options, "peek,", OPTSTRING);
}
static int __test_exec(int cgrp, int test, struct sockmap_options *opt)
@@ -1083,6 +1263,8 @@ static int test_mixed(int cgrp)
txmsg_pass = txmsg_noisy = txmsg_redir_noisy = txmsg_drop = 0;
txmsg_apply = txmsg_cork = 0;
txmsg_start = txmsg_end = 0;
+ txmsg_start_push = txmsg_end_push = 0;
+
/* Test small and large iov_count values with pass/redir/apply/cork */
txmsg_pass = 1;
txmsg_redir = 0;
@@ -1199,6 +1381,8 @@ static int test_start_end(int cgrp)
/* Test basic start/end with lots of iov_count and iov_lengths */
txmsg_start = 1;
txmsg_end = 2;
+ txmsg_start_push = 1;
+ txmsg_end_push = 2;
err = test_txmsg(cgrp);
if (err)
goto out;
@@ -1212,6 +1396,8 @@ static int test_start_end(int cgrp)
for (i = 99; i <= 1600; i += 500) {
txmsg_start = 0;
txmsg_end = i;
+ txmsg_start_push = 0;
+ txmsg_end_push = i;
err = test_exec(cgrp, &opt);
if (err)
goto out;
@@ -1221,6 +1407,8 @@ static int test_start_end(int cgrp)
for (i = 199; i <= 1600; i += 500) {
txmsg_start = 100;
txmsg_end = i;
+ txmsg_start_push = 100;
+ txmsg_end_push = i;
err = test_exec(cgrp, &opt);
if (err)
goto out;
@@ -1229,6 +1417,8 @@ static int test_start_end(int cgrp)
/* Test start/end with cork pulling last sg entry */
txmsg_start = 1500;
txmsg_end = 1600;
+ txmsg_start_push = 1500;
+ txmsg_end_push = 1600;
err = test_exec(cgrp, &opt);
if (err)
goto out;
@@ -1236,6 +1426,8 @@ static int test_start_end(int cgrp)
/* Test start/end pull of single byte in last page */
txmsg_start = 1111;
txmsg_end = 1112;
+ txmsg_start_push = 1111;
+ txmsg_end_push = 1112;
err = test_exec(cgrp, &opt);
if (err)
goto out;
@@ -1243,6 +1435,8 @@ static int test_start_end(int cgrp)
/* Test start/end with end < start */
txmsg_start = 1111;
txmsg_end = 0;
+ txmsg_start_push = 1111;
+ txmsg_end_push = 0;
err = test_exec(cgrp, &opt);
if (err)
goto out;
@@ -1250,6 +1444,8 @@ static int test_start_end(int cgrp)
/* Test start/end with end > data */
txmsg_start = 0;
txmsg_end = 1601;
+ txmsg_start_push = 0;
+ txmsg_end_push = 1601;
err = test_exec(cgrp, &opt);
if (err)
goto out;
@@ -1257,6 +1453,8 @@ static int test_start_end(int cgrp)
/* Test start/end with start > data */
txmsg_start = 1601;
txmsg_end = 1600;
+ txmsg_start_push = 1601;
+ txmsg_end_push = 1600;
err = test_exec(cgrp, &opt);
out:
@@ -1272,7 +1470,7 @@ char *map_names[] = {
"sock_map_redir",
"sock_apply_bytes",
"sock_cork_bytes",
- "sock_pull_bytes",
+ "sock_bytes",
"sock_redir_flags",
"sock_skb_opts",
};
@@ -1348,9 +1546,9 @@ static int populate_progs(char *bpf_file)
return 0;
}
-static int __test_suite(char *bpf_file)
+static int __test_suite(int cg_fd, char *bpf_file)
{
- int cg_fd, err;
+ int err, cleanup = cg_fd;
err = populate_progs(bpf_file);
if (err < 0) {
@@ -1358,26 +1556,28 @@ static int __test_suite(char *bpf_file)
return err;
}
- if (setup_cgroup_environment()) {
- fprintf(stderr, "ERROR: cgroup env failed\n");
- return -EINVAL;
- }
-
- cg_fd = create_and_get_cgroup(CG_PATH);
if (cg_fd < 0) {
- fprintf(stderr,
- "ERROR: (%i) open cg path failed: %s\n",
- cg_fd, optarg);
- return cg_fd;
- }
+ if (setup_cgroup_environment()) {
+ fprintf(stderr, "ERROR: cgroup env failed\n");
+ return -EINVAL;
+ }
- if (join_cgroup(CG_PATH)) {
- fprintf(stderr, "ERROR: failed to join cgroup\n");
- return -EINVAL;
+ cg_fd = create_and_get_cgroup(CG_PATH);
+ if (cg_fd < 0) {
+ fprintf(stderr,
+ "ERROR: (%i) open cg path failed: %s\n",
+ cg_fd, optarg);
+ return cg_fd;
+ }
+
+ if (join_cgroup(CG_PATH)) {
+ fprintf(stderr, "ERROR: failed to join cgroup\n");
+ return -EINVAL;
+ }
}
/* Tests basic commands and APIs with range of iov values */
- txmsg_start = txmsg_end = 0;
+ txmsg_start = txmsg_end = txmsg_start_push = txmsg_end_push = 0;
err = test_txmsg(cg_fd);
if (err)
goto out;
@@ -1394,20 +1594,24 @@ static int __test_suite(char *bpf_file)
out:
printf("Summary: %i PASSED %i FAILED\n", passed, failed);
- cleanup_cgroup_environment();
- close(cg_fd);
+ if (cleanup < 0) {
+ cleanup_cgroup_environment();
+ close(cg_fd);
+ }
return err;
}
-static int test_suite(void)
+static int test_suite(int cg_fd)
{
int err;
- err = __test_suite(BPF_SOCKMAP_FILENAME);
+ err = __test_suite(cg_fd, BPF_SOCKMAP_FILENAME);
if (err)
goto out;
- err = __test_suite(BPF_SOCKHASH_FILENAME);
+ err = __test_suite(cg_fd, BPF_SOCKHASH_FILENAME);
out:
+ if (cg_fd > -1)
+ close(cg_fd);
return err;
}
@@ -1420,9 +1624,9 @@ int main(int argc, char **argv)
int test = PING_PONG;
if (argc < 2)
- return test_suite();
+ return test_suite(-1);
- while ((opt = getopt_long(argc, argv, ":dhvc:r:i:l:t:",
+ while ((opt = getopt_long(argc, argv, ":dhvc:r:i:l:t:p:q:",
long_options, &longindex)) != -1) {
switch (opt) {
case 's':
@@ -1431,6 +1635,12 @@ int main(int argc, char **argv)
case 'e':
txmsg_end = atoi(optarg);
break;
+ case 'p':
+ txmsg_start_push = atoi(optarg);
+ break;
+ case 'q':
+ txmsg_end_push = atoi(optarg);
+ break;
case 'a':
txmsg_apply = atoi(optarg);
break;
@@ -1486,6 +1696,9 @@ int main(int argc, char **argv)
}
}
+ if (argc <= 3 && cg_fd)
+ return test_suite(cg_fd);
+
if (!cg_fd) {
fprintf(stderr, "%s requires cgroup option: --cgroup <path>\n",
argv[0]);
diff --git a/tools/testing/selftests/bpf/test_sockmap_kern.h b/tools/testing/selftests/bpf/test_sockmap_kern.h
index 8e8e41780bb9..14b8bbac004f 100644
--- a/tools/testing/selftests/bpf/test_sockmap_kern.h
+++ b/tools/testing/selftests/bpf/test_sockmap_kern.h
@@ -70,11 +70,11 @@ struct bpf_map_def SEC("maps") sock_cork_bytes = {
.max_entries = 1
};
-struct bpf_map_def SEC("maps") sock_pull_bytes = {
+struct bpf_map_def SEC("maps") sock_bytes = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
- .max_entries = 2
+ .max_entries = 4
};
struct bpf_map_def SEC("maps") sock_redir_flags = {
@@ -181,8 +181,8 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
SEC("sk_msg1")
int bpf_prog4(struct sk_msg_md *msg)
{
- int *bytes, zero = 0, one = 1;
- int *start, *end;
+ int *bytes, zero = 0, one = 1, two = 2, three = 3;
+ int *start, *end, *start_push, *end_push;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -190,18 +190,24 @@ int bpf_prog4(struct sk_msg_md *msg)
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
if (bytes)
bpf_msg_cork_bytes(msg, *bytes);
- start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
- end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
+ start = bpf_map_lookup_elem(&sock_bytes, &zero);
+ end = bpf_map_lookup_elem(&sock_bytes, &one);
if (start && end)
bpf_msg_pull_data(msg, *start, *end, 0);
+ start_push = bpf_map_lookup_elem(&sock_bytes, &two);
+ end_push = bpf_map_lookup_elem(&sock_bytes, &three);
+ if (start_push && end_push)
+ bpf_msg_push_data(msg, *start_push, *end_push, 0);
return SK_PASS;
}
SEC("sk_msg2")
int bpf_prog5(struct sk_msg_md *msg)
{
- int err1 = -1, err2 = -1, zero = 0, one = 1;
- int *bytes, *start, *end, len1, len2;
+ int zero = 0, one = 1, two = 2, three = 3;
+ int *start, *end, *start_push, *end_push;
+ int *bytes, len1, len2 = 0, len3;
+ int err1 = -1, err2 = -1;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -210,8 +216,8 @@ int bpf_prog5(struct sk_msg_md *msg)
if (bytes)
err2 = bpf_msg_cork_bytes(msg, *bytes);
len1 = (__u64)msg->data_end - (__u64)msg->data;
- start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
- end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
+ start = bpf_map_lookup_elem(&sock_bytes, &zero);
+ end = bpf_map_lookup_elem(&sock_bytes, &one);
if (start && end) {
int err;
@@ -225,6 +231,23 @@ int bpf_prog5(struct sk_msg_md *msg)
bpf_printk("sk_msg2: length update %i->%i\n",
len1, len2);
}
+
+ start_push = bpf_map_lookup_elem(&sock_bytes, &two);
+ end_push = bpf_map_lookup_elem(&sock_bytes, &three);
+ if (start_push && end_push) {
+ int err;
+
+ bpf_printk("sk_msg2: push(%i:%i)\n",
+ start_push ? *start_push : 0,
+ end_push ? *end_push : 0);
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ bpf_printk("sk_msg2: push_data err %i\n", err);
+ len3 = (__u64)msg->data_end - (__u64)msg->data;
+ bpf_printk("sk_msg2: length push_update %i->%i\n",
+ len2 ? len2 : len1, len3);
+ }
+
bpf_printk("sk_msg2: data length %i err1 %i err2 %i\n",
len1, err1, err2);
return SK_PASS;
@@ -233,8 +256,8 @@ int bpf_prog5(struct sk_msg_md *msg)
SEC("sk_msg3")
int bpf_prog6(struct sk_msg_md *msg)
{
- int *bytes, zero = 0, one = 1, key = 0;
- int *start, *end, *f;
+ int *bytes, *start, *end, *start_push, *end_push, *f;
+ int zero = 0, one = 1, two = 2, three = 3, key = 0;
__u64 flags = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
@@ -243,10 +266,17 @@ int bpf_prog6(struct sk_msg_md *msg)
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
if (bytes)
bpf_msg_cork_bytes(msg, *bytes);
- start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
- end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
+
+ start = bpf_map_lookup_elem(&sock_bytes, &zero);
+ end = bpf_map_lookup_elem(&sock_bytes, &one);
if (start && end)
bpf_msg_pull_data(msg, *start, *end, 0);
+
+ start_push = bpf_map_lookup_elem(&sock_bytes, &two);
+ end_push = bpf_map_lookup_elem(&sock_bytes, &three);
+ if (start_push && end_push)
+ bpf_msg_push_data(msg, *start_push, *end_push, 0);
+
f = bpf_map_lookup_elem(&sock_redir_flags, &zero);
if (f && *f) {
key = 2;
@@ -262,8 +292,9 @@ int bpf_prog6(struct sk_msg_md *msg)
SEC("sk_msg4")
int bpf_prog7(struct sk_msg_md *msg)
{
- int err1 = 0, err2 = 0, zero = 0, one = 1, key = 0;
- int *f, *bytes, *start, *end, len1, len2;
+ int zero = 0, one = 1, two = 2, three = 3, len1, len2 = 0, len3;
+ int *bytes, *start, *end, *start_push, *end_push, *f;
+ int err1 = 0, err2 = 0, key = 0;
__u64 flags = 0;
int err;
@@ -274,10 +305,10 @@ int bpf_prog7(struct sk_msg_md *msg)
if (bytes)
err2 = bpf_msg_cork_bytes(msg, *bytes);
len1 = (__u64)msg->data_end - (__u64)msg->data;
- start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
- end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
- if (start && end) {
+ start = bpf_map_lookup_elem(&sock_bytes, &zero);
+ end = bpf_map_lookup_elem(&sock_bytes, &one);
+ if (start && end) {
bpf_printk("sk_msg2: pull(%i:%i)\n",
start ? *start : 0, end ? *end : 0);
err = bpf_msg_pull_data(msg, *start, *end, 0);
@@ -288,6 +319,22 @@ int bpf_prog7(struct sk_msg_md *msg)
bpf_printk("sk_msg2: length update %i->%i\n",
len1, len2);
}
+
+ start_push = bpf_map_lookup_elem(&sock_bytes, &two);
+ end_push = bpf_map_lookup_elem(&sock_bytes, &three);
+ if (start_push && end_push) {
+ bpf_printk("sk_msg4: push(%i:%i)\n",
+ start_push ? *start_push : 0,
+ end_push ? *end_push : 0);
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ bpf_printk("sk_msg4: push_data err %i\n",
+ err);
+ len3 = (__u64)msg->data_end - (__u64)msg->data;
+ bpf_printk("sk_msg4: length push_update %i->%i\n",
+ len2 ? len2 : len1, len3);
+ }
+
f = bpf_map_lookup_elem(&sock_redir_flags, &zero);
if (f && *f) {
key = 2;
@@ -342,8 +389,8 @@ int bpf_prog9(struct sk_msg_md *msg)
SEC("sk_msg7")
int bpf_prog10(struct sk_msg_md *msg)
{
- int *bytes, zero = 0, one = 1;
- int *start, *end;
+ int *bytes, *start, *end, *start_push, *end_push;
+ int zero = 0, one = 1, two = 2, three = 3;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -351,10 +398,14 @@ int bpf_prog10(struct sk_msg_md *msg)
bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
if (bytes)
bpf_msg_cork_bytes(msg, *bytes);
- start = bpf_map_lookup_elem(&sock_pull_bytes, &zero);
- end = bpf_map_lookup_elem(&sock_pull_bytes, &one);
+ start = bpf_map_lookup_elem(&sock_bytes, &zero);
+ end = bpf_map_lookup_elem(&sock_bytes, &one);
if (start && end)
bpf_msg_pull_data(msg, *start, *end, 0);
+ start_push = bpf_map_lookup_elem(&sock_bytes, &two);
+ end_push = bpf_map_lookup_elem(&sock_bytes, &three);
+ if (start_push && end_push)
+ bpf_msg_push_data(msg, *start_push, *end_push, 0);
return SK_DROP;
}
diff --git a/tools/testing/selftests/bpf/test_stack_map.c b/tools/testing/selftests/bpf/test_stack_map.c
new file mode 100644
index 000000000000..31c3880e6da0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_stack_map.c
@@ -0,0 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Politecnico di Torino
+#define MAP_TYPE BPF_MAP_TYPE_STACK
+#include "test_queue_stack_map.h"
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
index 4b7fd540cea9..74f73b33a7b0 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_kern.c
@@ -5,6 +5,7 @@
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/tcp.h>
@@ -17,6 +18,13 @@ struct bpf_map_def SEC("maps") global_map = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct tcpbpf_globals),
+ .max_entries = 4,
+};
+
+struct bpf_map_def SEC("maps") sockopt_results = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(int),
.max_entries = 2,
};
@@ -45,11 +53,14 @@ int _version SEC("version") = 1;
SEC("sockops")
int bpf_testcb(struct bpf_sock_ops *skops)
{
- int rv = -1;
- int bad_call_rv = 0;
+ char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)];
+ struct tcphdr *thdr;
int good_call_rv = 0;
- int op;
+ int bad_call_rv = 0;
+ int save_syn = 1;
+ int rv = -1;
int v = 0;
+ int op;
op = (int) skops->op;
@@ -82,6 +93,21 @@ int bpf_testcb(struct bpf_sock_ops *skops)
v = 0xff;
rv = bpf_setsockopt(skops, SOL_IPV6, IPV6_TCLASS, &v,
sizeof(v));
+ if (skops->family == AF_INET6) {
+ v = bpf_getsockopt(skops, IPPROTO_TCP, TCP_SAVED_SYN,
+ header, (sizeof(struct ipv6hdr) +
+ sizeof(struct tcphdr)));
+ if (!v) {
+ int offset = sizeof(struct ipv6hdr);
+
+ thdr = (struct tcphdr *)(header + offset);
+ v = thdr->syn;
+ __u32 key = 1;
+
+ bpf_map_update_elem(&sockopt_results, &key, &v,
+ BPF_ANY);
+ }
+ }
break;
case BPF_SOCK_OPS_RTO_CB:
break;
@@ -111,6 +137,12 @@ int bpf_testcb(struct bpf_sock_ops *skops)
break;
case BPF_SOCK_OPS_TCP_LISTEN_CB:
bpf_sock_ops_cb_flags_set(skops, BPF_SOCK_OPS_STATE_CB_FLAG);
+ v = bpf_setsockopt(skops, IPPROTO_TCP, TCP_SAVE_SYN,
+ &save_syn, sizeof(save_syn));
+ /* Update global map w/ result of setsock opt */
+ __u32 key = 0;
+
+ bpf_map_update_elem(&sockopt_results, &key, &v, BPF_ANY);
break;
default:
rv = -1;
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c
index a275c2971376..e6eebda7d112 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_user.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c
@@ -54,6 +54,26 @@ err:
return -1;
}
+int verify_sockopt_result(int sock_map_fd)
+{
+ __u32 key = 0;
+ int res;
+ int rv;
+
+ /* check setsockopt for SAVE_SYN */
+ rv = bpf_map_lookup_elem(sock_map_fd, &key, &res);
+ EXPECT_EQ(0, rv, "d");
+ EXPECT_EQ(0, res, "d");
+ key = 1;
+ /* check getsockopt for SAVED_SYN */
+ rv = bpf_map_lookup_elem(sock_map_fd, &key, &res);
+ EXPECT_EQ(0, rv, "d");
+ EXPECT_EQ(1, res, "d");
+ return 0;
+err:
+ return -1;
+}
+
static int bpf_find_map(const char *test, struct bpf_object *obj,
const char *name)
{
@@ -70,11 +90,11 @@ static int bpf_find_map(const char *test, struct bpf_object *obj,
int main(int argc, char **argv)
{
const char *file = "test_tcpbpf_kern.o";
+ int prog_fd, map_fd, sock_map_fd;
struct tcpbpf_globals g = {0};
const char *cg_path = "/foo";
int error = EXIT_FAILURE;
struct bpf_object *obj;
- int prog_fd, map_fd;
int cg_fd = -1;
__u32 key = 0;
int rv;
@@ -110,6 +130,10 @@ int main(int argc, char **argv)
if (map_fd < 0)
goto err;
+ sock_map_fd = bpf_find_map(__func__, obj, "sockopt_results");
+ if (sock_map_fd < 0)
+ goto err;
+
rv = bpf_map_lookup_elem(map_fd, &key, &g);
if (rv != 0) {
printf("FAILED: bpf_map_lookup_elem returns %d\n", rv);
@@ -121,6 +145,11 @@ int main(int argc, char **argv)
goto err;
}
+ if (verify_sockopt_result(sock_map_fd)) {
+ printf("FAILED: Wrong sockopt stats\n");
+ goto err;
+ }
+
printf("PASSED!\n");
error = 0;
err:
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 67c412d19c09..769d68a48f30 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -3,6 +3,7 @@
*
* Copyright (c) 2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2017 Facebook
+ * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -47,7 +48,7 @@
#define MAX_INSNS BPF_MAXINSNS
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 8
+#define MAX_NR_MAPS 13
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
@@ -60,14 +61,19 @@ static bool unpriv_disabled = false;
struct bpf_test {
const char *descr;
struct bpf_insn insns[MAX_INSNS];
- int fixup_map1[MAX_FIXUPS];
- int fixup_map2[MAX_FIXUPS];
- int fixup_map3[MAX_FIXUPS];
- int fixup_map4[MAX_FIXUPS];
+ int fixup_map_hash_8b[MAX_FIXUPS];
+ int fixup_map_hash_48b[MAX_FIXUPS];
+ int fixup_map_hash_16b[MAX_FIXUPS];
+ int fixup_map_array_48b[MAX_FIXUPS];
+ int fixup_map_sockmap[MAX_FIXUPS];
+ int fixup_map_sockhash[MAX_FIXUPS];
+ int fixup_map_xskmap[MAX_FIXUPS];
+ int fixup_map_stacktrace[MAX_FIXUPS];
int fixup_prog1[MAX_FIXUPS];
int fixup_prog2[MAX_FIXUPS];
int fixup_map_in_map[MAX_FIXUPS];
int fixup_cgroup_storage[MAX_FIXUPS];
+ int fixup_percpu_cgroup_storage[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
uint32_t retval;
@@ -177,6 +183,24 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
self->retval = (uint32_t)res;
}
+/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
+#define BPF_SK_LOOKUP \
+ /* struct bpf_sock_tuple tuple = {} */ \
+ BPF_MOV64_IMM(BPF_REG_2, 0), \
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
+ /* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */ \
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
+ BPF_MOV64_IMM(BPF_REG_4, 0), \
+ BPF_MOV64_IMM(BPF_REG_5, 0), \
+ BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
+
static struct bpf_test tests[] = {
{
"add+sub+mul",
@@ -856,7 +880,7 @@ static struct bpf_test tests[] = {
BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 2 },
+ .fixup_map_hash_8b = { 2 },
.errstr = "invalid indirect read from stack",
.result = REJECT,
},
@@ -1090,7 +1114,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "R0 invalid mem access 'map_value_or_null'",
.result = REJECT,
},
@@ -1107,7 +1131,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "misaligned value access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
@@ -1127,7 +1151,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "R0 invalid mem access",
.errstr_unpriv = "R0 leaks addr",
.result = REJECT,
@@ -1217,7 +1241,7 @@ static struct bpf_test tests[] = {
BPF_FUNC_map_delete_elem),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 24 },
+ .fixup_map_hash_8b = { 24 },
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
@@ -1371,7 +1395,7 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, pkt_type)),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 4 },
+ .fixup_map_hash_8b = { 4 },
.errstr = "different pointers",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
@@ -1394,7 +1418,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -12),
},
- .fixup_map1 = { 6 },
+ .fixup_map_hash_8b = { 6 },
.errstr = "different pointers",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
@@ -1418,7 +1442,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -13),
},
- .fixup_map1 = { 7 },
+ .fixup_map_hash_8b = { 7 },
.errstr = "different pointers",
.errstr_unpriv = "R1 pointer comparison",
.result = REJECT,
@@ -2555,7 +2579,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr_unpriv = "R4 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
@@ -2572,7 +2596,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "invalid indirect read from stack off -8+0 size 8",
.result = REJECT,
},
@@ -2707,6 +2731,137 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "unpriv: spill/fill of different pointers stx - ctx and sock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb == NULL) *target = sock; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* else *target = skb; */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* struct __sk_buff *skb = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* skb->mark = 42; */
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ /* if (sk) bpf_sk_release(sk) */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "type=ctx expected=sock",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "unpriv: spill/fill of different pointers stx - leak sock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb == NULL) *target = sock; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* else *target = skb; */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* struct __sk_buff *skb = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* skb->mark = 42; */
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ //.errstr = "same insn cannot be used with different pointers",
+ .errstr = "Unreleased reference",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb) *target = skb */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* else *target = sock */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* struct bpf_sock *sk = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct bpf_sock, mark)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb) *target = skb */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* else *target = sock */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* struct bpf_sock *sk = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, mark)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ //.errstr = "same insn cannot be used with different pointers",
+ .errstr = "cannot write into socket",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"unpriv: spill/fill of different pointers ldx",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
@@ -2743,7 +2898,7 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
@@ -2783,7 +2938,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 1 },
+ .fixup_map_hash_8b = { 1 },
.errstr_unpriv = "R1 pointer comparison",
.result_unpriv = REJECT,
.result = ACCEPT,
@@ -3275,7 +3430,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
BPF_EXIT_INSN(),
},
- .errstr = "BPF_ST stores into R1 context is not allowed",
+ .errstr = "BPF_ST stores into R1 ctx is not allowed",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@@ -3287,7 +3442,7 @@ static struct bpf_test tests[] = {
BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
BPF_EXIT_INSN(),
},
- .errstr = "BPF_XADD stores into R1 context is not allowed",
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@@ -3637,7 +3792,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
+ .errstr = "R3 pointer arithmetic on pkt_end",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@@ -3922,7 +4077,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 5 },
+ .fixup_map_hash_8b = { 5 },
.result_unpriv = ACCEPT,
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
@@ -3938,7 +4093,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 1 },
+ .fixup_map_hash_8b = { 1 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_XDP,
@@ -3966,7 +4121,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 11 },
+ .fixup_map_hash_8b = { 11 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
},
@@ -3988,7 +4143,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 7 },
+ .fixup_map_hash_8b = { 7 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_XDP,
@@ -4010,7 +4165,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 6 },
+ .fixup_map_hash_8b = { 6 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_XDP,
@@ -4033,7 +4188,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 5 },
+ .fixup_map_hash_8b = { 5 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@@ -4048,7 +4203,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 1 },
+ .fixup_map_hash_8b = { 1 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
@@ -4076,7 +4231,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 11 },
+ .fixup_map_hash_8b = { 11 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@@ -4098,7 +4253,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 7 },
+ .fixup_map_hash_8b = { 7 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
@@ -4120,7 +4275,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 6 },
+ .fixup_map_hash_8b = { 6 },
.result = REJECT,
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
@@ -4391,6 +4546,85 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "prevent map lookup in sockmap",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_sockmap = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+ },
+ {
+ "prevent map lookup in sockhash",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_sockhash = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+ },
+ {
+ "prevent map lookup in xskmap",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_xskmap = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "prevent map lookup in stack trace",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_stacktrace = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+ },
+ {
+ "prevent map lookup in prog array",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog2 = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
+ },
+ {
"valid map access into an array with a constant",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
@@ -4404,7 +4638,7 @@ static struct bpf_test tests[] = {
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
@@ -4426,7 +4660,7 @@ static struct bpf_test tests[] = {
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
@@ -4450,7 +4684,7 @@ static struct bpf_test tests[] = {
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
@@ -4478,7 +4712,7 @@ static struct bpf_test tests[] = {
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
@@ -4498,7 +4732,7 @@ static struct bpf_test tests[] = {
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "invalid access to map value, value_size=48 off=48 size=8",
.result = REJECT,
},
@@ -4519,7 +4753,7 @@ static struct bpf_test tests[] = {
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R0 min value is outside of the array range",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -4541,7 +4775,7 @@ static struct bpf_test tests[] = {
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -4566,7 +4800,7 @@ static struct bpf_test tests[] = {
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.errstr = "R0 unbounded memory access",
.result_unpriv = REJECT,
@@ -4593,7 +4827,7 @@ static struct bpf_test tests[] = {
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.errstr = "invalid access to map value, value_size=48 off=44 size=8",
.result_unpriv = REJECT,
@@ -4623,12 +4857,183 @@ static struct bpf_test tests[] = {
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3, 11 },
+ .fixup_map_hash_48b = { 3, 11 },
.errstr = "R0 pointer += pointer",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "direct packet read test#1 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, queue_mapping)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, protocol)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_present)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "direct packet read test#2 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_tci)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_proto)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, priority)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, priority)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff,
+ ingress_ifindex)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "direct packet read test#3 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "direct packet read test#4 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid access of tc_classid for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid access of data_meta for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data_meta)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid access of flow_keys for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, flow_keys)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid write access to napi_id for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
"valid cgroup storage access",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
@@ -4656,7 +5061,7 @@ static struct bpf_test tests[] = {
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 1 },
+ .fixup_map_hash_8b = { 1 },
.result = REJECT,
.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
@@ -4676,7 +5081,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
- "invalid per-cgroup storage access 3",
+ "invalid cgroup storage access 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
@@ -4744,6 +5149,121 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
+ "valid per-cpu cgroup storage access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid per-cpu cgroup storage access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid per-cpu cgroup storage access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "fd 1 is not pointing to valid bpf_map",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid per-cpu cgroup storage access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=256 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid per-cpu cgroup storage access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid per-cpu cgroup storage access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 7),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid per-cpu cgroup storage access 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
"multiple registers share map_lookup_elem result",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
@@ -4758,7 +5278,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 4 },
+ .fixup_map_hash_8b = { 4 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
@@ -4779,8 +5299,8 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 4 },
- .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
@@ -4800,8 +5320,8 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 4 },
- .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
@@ -4821,8 +5341,8 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 4 },
- .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
@@ -4847,7 +5367,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 4 },
+ .fixup_map_hash_8b = { 4 },
.result = REJECT,
.errstr = "R4 !read_ok",
.prog_type = BPF_PROG_TYPE_SCHED_CLS
@@ -4875,7 +5395,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 4 },
+ .fixup_map_hash_8b = { 4 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
@@ -4896,7 +5416,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R0 unbounded memory access",
.result = REJECT,
.errstr_unpriv = "R0 leaks addr",
@@ -5146,11 +5666,11 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 2 },
+ .fixup_map_hash_8b = { 2 },
.errstr_unpriv = "R2 leaks addr into mem",
.result_unpriv = REJECT,
.result = REJECT,
- .errstr = "BPF_XADD stores into R1 context is not allowed",
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
},
{
"leak pointer into ctx 2",
@@ -5165,7 +5685,7 @@ static struct bpf_test tests[] = {
.errstr_unpriv = "R10 leaks addr into mem",
.result_unpriv = REJECT,
.result = REJECT,
- .errstr = "BPF_XADD stores into R1 context is not allowed",
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
},
{
"leak pointer into ctx 3",
@@ -5176,7 +5696,7 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 1 },
+ .fixup_map_hash_8b = { 1 },
.errstr_unpriv = "R2 leaks addr into ctx",
.result_unpriv = REJECT,
.result = ACCEPT,
@@ -5198,7 +5718,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 4 },
+ .fixup_map_hash_8b = { 4 },
.errstr_unpriv = "R6 leaks addr into mem",
.result_unpriv = REJECT,
.result = ACCEPT,
@@ -5218,7 +5738,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5237,7 +5757,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5255,7 +5775,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "invalid access to map value, value_size=48 off=0 size=0",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5275,7 +5795,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "invalid access to map value, value_size=48 off=0 size=56",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5295,7 +5815,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R2 min value is negative",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5319,7 +5839,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5340,7 +5860,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5360,7 +5880,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=0",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5384,7 +5904,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=52",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5406,7 +5926,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R2 min value is negative",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5428,7 +5948,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R2 min value is negative",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5453,7 +5973,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5475,7 +5995,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5495,7 +6015,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5520,7 +6040,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=52",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5543,7 +6063,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R2 min value is negative",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5566,7 +6086,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R2 min value is negative",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5592,7 +6112,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5615,7 +6135,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5637,7 +6157,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5659,7 +6179,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R1 unbounded memory access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5685,7 +6205,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "invalid access to map value, value_size=48 off=4 size=45",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5709,7 +6229,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5732,7 +6252,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = REJECT,
.errstr = "R1 unbounded memory access",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5756,7 +6276,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5779,7 +6299,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = REJECT,
.errstr = "R1 unbounded memory access",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5804,7 +6324,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5828,7 +6348,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5852,7 +6372,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = REJECT,
.errstr = "R1 min value is negative",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5877,7 +6397,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5901,7 +6421,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5925,7 +6445,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = REJECT,
.errstr = "R1 min value is negative",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -5944,7 +6464,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
- .fixup_map3 = { 3, 8 },
+ .fixup_map_hash_16b = { 3, 8 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5964,7 +6484,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
BPF_EXIT_INSN(),
},
- .fixup_map3 = { 3, 10 },
+ .fixup_map_hash_16b = { 3, 10 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5984,8 +6504,8 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
- .fixup_map3 = { 10 },
+ .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_16b = { 10 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=8 off=0 size=16",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -6006,7 +6526,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
- .fixup_map3 = { 3, 9 },
+ .fixup_map_hash_16b = { 3, 9 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -6026,7 +6546,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
- .fixup_map3 = { 3, 9 },
+ .fixup_map_hash_16b = { 3, 9 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=16 off=12 size=8",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -6046,7 +6566,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
- .fixup_map3 = { 3, 9 },
+ .fixup_map_hash_16b = { 3, 9 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -6068,7 +6588,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
- .fixup_map3 = { 3, 10 },
+ .fixup_map_hash_16b = { 3, 10 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -6089,7 +6609,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
- .fixup_map3 = { 3, 10 },
+ .fixup_map_hash_16b = { 3, 10 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=16 off=12 size=8",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -6110,7 +6630,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
- .fixup_map3 = { 3, 10 },
+ .fixup_map_hash_16b = { 3, 10 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -6133,7 +6653,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
- .fixup_map3 = { 3, 11 },
+ .fixup_map_hash_16b = { 3, 11 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -6153,7 +6673,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
- .fixup_map3 = { 3, 10 },
+ .fixup_map_hash_16b = { 3, 10 },
.result = REJECT,
.errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -6176,7 +6696,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
- .fixup_map3 = { 3, 11 },
+ .fixup_map_hash_16b = { 3, 11 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=16 off=9 size=8",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -6198,7 +6718,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
.result_unpriv = REJECT,
@@ -6219,7 +6739,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
.result_unpriv = REJECT,
@@ -6236,7 +6756,7 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R1 !read_ok",
.errstr = "R1 !read_ok",
.result = REJECT,
@@ -6270,7 +6790,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
.result_unpriv = REJECT,
@@ -6298,7 +6818,7 @@ static struct bpf_test tests[] = {
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
.result_unpriv = REJECT,
@@ -6317,7 +6837,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R0 bitwise operator &= on pointer",
.result = REJECT,
},
@@ -6334,7 +6854,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R0 32-bit pointer arithmetic prohibited",
.result = REJECT,
},
@@ -6351,7 +6871,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R0 pointer arithmetic with /= operator",
.result = REJECT,
},
@@ -6368,7 +6888,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "invalid mem access 'inv'",
.result = REJECT,
@@ -6392,7 +6912,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R0 invalid mem access 'inv'",
.result = REJECT,
},
@@ -6415,7 +6935,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result = ACCEPT,
.result_unpriv = REJECT,
@@ -6661,7 +7181,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -6687,7 +7207,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "invalid access to map value, value_size=48 off=0 size=49",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -6715,7 +7235,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -6742,7 +7262,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -6814,7 +7334,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@@ -6839,7 +7359,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@@ -6862,7 +7382,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@@ -6943,7 +7463,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -6964,7 +7484,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -6984,7 +7504,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -7059,7 +7579,7 @@ static struct bpf_test tests[] = {
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R0 max value is outside of the array range",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -7089,7 +7609,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr = "R0 max value is outside of the array range",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@@ -7137,7 +7657,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map_in_map = { 3 },
- .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
+ .errstr = "R1 pointer arithmetic on map_ptr prohibited",
.result = REJECT,
},
{
@@ -7442,7 +7962,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value",
.result = REJECT,
},
@@ -7466,7 +7986,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value",
.result = REJECT,
},
@@ -7492,7 +8012,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value",
.result = REJECT,
},
@@ -7517,7 +8037,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value",
.result = REJECT,
},
@@ -7541,7 +8061,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
{
@@ -7565,7 +8085,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value",
.result = REJECT,
},
@@ -7611,7 +8131,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
{
@@ -7636,7 +8156,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value",
.result = REJECT,
},
@@ -7662,7 +8182,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT,
},
{
@@ -7687,7 +8207,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value",
.result = REJECT,
},
@@ -7714,7 +8234,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value",
.result = REJECT,
},
@@ -7740,7 +8260,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value",
.result = REJECT,
},
@@ -7769,7 +8289,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value",
.result = REJECT,
},
@@ -7799,7 +8319,7 @@ static struct bpf_test tests[] = {
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
},
- .fixup_map1 = { 4 },
+ .fixup_map_hash_8b = { 4 },
.errstr = "R0 invalid mem access 'inv'",
.result = REJECT,
},
@@ -7827,7 +8347,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value",
.result = REJECT,
.result_unpriv = REJECT,
@@ -7854,7 +8374,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "R0 max value is outside of the array range",
.result = REJECT,
},
@@ -7879,7 +8399,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
},
@@ -7905,7 +8425,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT
},
{
@@ -7930,7 +8450,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "map_value pointer and 4294967295",
.result = REJECT
},
@@ -7956,7 +8476,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "R0 min value is outside of the array range",
.result = REJECT
},
@@ -7980,7 +8500,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 4 },
+ .fixup_map_hash_8b = { 4 },
.errstr = "value_size=8 off=1073741825",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
@@ -8005,7 +8525,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 4 },
+ .fixup_map_hash_8b = { 4 },
.errstr = "value 1073741823",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
@@ -8041,7 +8561,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT
},
{
@@ -8080,7 +8600,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
/* not actually fully unbounded, but the bound is very high */
.errstr = "R0 unbounded memory access",
.result = REJECT
@@ -8123,7 +8643,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
/* not actually fully unbounded, but the bound is very high */
.errstr = "R0 unbounded memory access",
.result = REJECT
@@ -8152,7 +8672,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT
},
{
@@ -8179,7 +8699,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "R0 max value is outside of the array range",
.result = REJECT
},
@@ -8209,7 +8729,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "R0 unbounded memory access",
.result = REJECT
},
@@ -8229,7 +8749,7 @@ static struct bpf_test tests[] = {
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "map_value pointer and 2147483646",
.result = REJECT
},
@@ -8251,7 +8771,7 @@ static struct bpf_test tests[] = {
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "pointer offset 1073741822",
.result = REJECT
},
@@ -8272,7 +8792,7 @@ static struct bpf_test tests[] = {
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "pointer offset -1073741822",
.result = REJECT
},
@@ -8294,7 +8814,7 @@ static struct bpf_test tests[] = {
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "map_value pointer and 1000000000000",
.result = REJECT
},
@@ -8314,7 +8834,7 @@ static struct bpf_test tests[] = {
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT,
.retval = POINTER_VALUE,
.result_unpriv = REJECT,
@@ -8335,7 +8855,7 @@ static struct bpf_test tests[] = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = ACCEPT,
.retval = POINTER_VALUE,
.result_unpriv = REJECT,
@@ -8403,7 +8923,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 5 },
+ .fixup_map_hash_8b = { 5 },
.errstr = "variable stack read R2",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
@@ -8484,7 +9004,7 @@ static struct bpf_test tests[] = {
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 3 },
+ .fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.errstr = "R0 unbounded memory access",
.result_unpriv = REJECT,
@@ -8811,7 +9331,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
+ .errstr = "R3 pointer arithmetic on pkt_end",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
},
@@ -8830,7 +9350,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
+ .errstr = "R3 pointer arithmetic on pkt_end",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
},
@@ -10018,7 +10538,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map1 = { 16 },
+ .fixup_map_hash_8b = { 16 },
.result = REJECT,
.errstr = "R0 min value is outside of the array range",
},
@@ -10969,7 +11489,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), /* return 0 */
},
.prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map1 = { 23 },
+ .fixup_map_hash_8b = { 23 },
.result = ACCEPT,
},
{
@@ -11024,7 +11544,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), /* return 1 */
},
.prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map1 = { 23 },
+ .fixup_map_hash_8b = { 23 },
.result = ACCEPT,
},
{
@@ -11079,7 +11599,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), /* return 1 */
},
.prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map1 = { 23 },
+ .fixup_map_hash_8b = { 23 },
.result = REJECT,
.errstr = "invalid read from stack off -16+0 size 8",
},
@@ -11151,7 +11671,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map1 = { 12, 22 },
+ .fixup_map_hash_8b = { 12, 22 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=8 off=2 size=8",
},
@@ -11223,7 +11743,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map1 = { 12, 22 },
+ .fixup_map_hash_8b = { 12, 22 },
.result = ACCEPT,
},
{
@@ -11294,7 +11814,7 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JA, 0, 0, -8),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map1 = { 12, 22 },
+ .fixup_map_hash_8b = { 12, 22 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=8 off=2 size=8",
},
@@ -11366,7 +11886,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map1 = { 12, 22 },
+ .fixup_map_hash_8b = { 12, 22 },
.result = ACCEPT,
},
{
@@ -11437,7 +11957,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map1 = { 12, 22 },
+ .fixup_map_hash_8b = { 12, 22 },
.result = REJECT,
.errstr = "R0 invalid mem access 'inv'",
},
@@ -11782,7 +12302,7 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 13 },
+ .fixup_map_hash_8b = { 13 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
},
@@ -11809,7 +12329,7 @@ static struct bpf_test tests[] = {
BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 6 },
+ .fixup_map_hash_48b = { 6 },
.errstr = "invalid indirect read from stack off -8+0 size 8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
@@ -11841,8 +12361,8 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map2 = { 13 },
- .fixup_map4 = { 16 },
+ .fixup_map_hash_48b = { 13 },
+ .fixup_map_array_48b = { 16 },
.result = ACCEPT,
.retval = 1,
},
@@ -11874,7 +12394,7 @@ static struct bpf_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_in_map = { 16 },
- .fixup_map4 = { 13 },
+ .fixup_map_array_48b = { 13 },
.result = REJECT,
.errstr = "R0 invalid mem access 'map_ptr'",
},
@@ -11942,7 +12462,7 @@ static struct bpf_test tests[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "R6 invalid mem access 'inv'",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -11966,7 +12486,7 @@ static struct bpf_test tests[] = {
BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.errstr = "invalid read from stack off -16+0 size 8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -12088,7 +12608,7 @@ static struct bpf_test tests[] = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
- .fixup_map1 = { 3 },
+ .fixup_map_hash_8b = { 3 },
.result = REJECT,
.errstr = "misaligned value access off",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
@@ -12114,7 +12634,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.result = REJECT,
- .errstr = "BPF_XADD stores into R2 packet",
+ .errstr = "BPF_XADD stores into R2 pkt is not allowed",
.prog_type = BPF_PROG_TYPE_XDP,
},
{
@@ -12198,7 +12718,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_get_stack),
BPF_EXIT_INSN(),
},
- .fixup_map2 = { 4 },
+ .fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -12442,6 +12962,214 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
},
{
+ "reference tracking: leak potential reference",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: leak potential reference on stack",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: leak potential reference on stack 2",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: zero potential reference",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: copy and zero potential references",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: release reference without check",
+ .insns = {
+ BPF_SK_LOOKUP,
+ /* reference in r0 may be NULL */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=sock_or_null expected=sock",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: release reference",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "reference tracking: release reference 2",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "reference tracking: release reference twice",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: release reference twice inside branch",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: alloc, check, free in one subbranch",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
+ /* if (offsetof(skb, mark) > data_len) exit; */
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
+ offsetof(struct __sk_buff, mark)),
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
+ /* Leak reference in R0 */
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: alloc, check, free in both subbranches",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
+ /* if (offsetof(skb, mark) > data_len) exit; */
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
+ offsetof(struct __sk_buff, mark)),
+ BPF_SK_LOOKUP,
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "reference tracking in call: free reference in subprog",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
"pass modified ctx pointer to helper, 1",
.insns = {
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
@@ -12511,6 +13239,407 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
+ {
+ "reference tracking in call: free reference in subprog and outside",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+ },
+ {
+ "reference tracking in call: alloc & leak reference in subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
+ BPF_SK_LOOKUP,
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+ },
+ {
+ "reference tracking in call: alloc in subprog, release outside",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_SK_LOOKUP,
+ BPF_EXIT_INSN(), /* return sk */
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = POINTER_VALUE,
+ .result = ACCEPT,
+ },
+ {
+ "reference tracking in call: sk_ptr leak into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_SK_LOOKUP,
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+ },
+ {
+ "reference tracking in call: sk_ptr spill into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* now the sk_ptr is verified, free the reference */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_SK_LOOKUP,
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "reference tracking: allow LD_ABS",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "reference tracking: forbid LD_ABS while holding reference",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: allow LD_IND",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ },
+ {
+ "reference tracking: forbid LD_IND while holding reference",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: check reference or tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 17 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "reference tracking: release reference then tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ BPF_SK_LOOKUP,
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 18 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "reference tracking: leak possible reference over tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ /* Look up socket and store in REG_6 */
+ BPF_SK_LOOKUP,
+ /* bpf_tail_call() */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 16 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "tail_call would lead to reference leak",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: leak checked reference over tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ /* Look up socket and store in REG_6 */
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ /* if (!sk) goto end */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 17 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "tail_call would lead to reference leak",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: mangle and release sock_or_null",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: mangle and release sock",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R1 pointer arithmetic on sock prohibited",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: access member",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "reference tracking: write to member",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LD_IMM64(BPF_REG_2, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
+ offsetof(struct bpf_sock, mark)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "cannot write into socket",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: invalid 64-bit access of member",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid bpf_sock access off=0 size=8",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: access after release",
+ .insns = {
+ BPF_SK_LOOKUP,
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "!read_ok",
+ .result = REJECT,
+ },
+ {
+ "reference tracking: direct access for lookup",
+ .insns = {
+ /* Check that the packet is at least 64B long */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
+ /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)
@@ -12536,18 +13665,18 @@ static int create_map(uint32_t type, uint32_t size_key,
return fd;
}
-static int create_prog_dummy1(void)
+static int create_prog_dummy1(enum bpf_map_type prog_type)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_0, 42),
BPF_EXIT_INSN(),
};
- return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
+ return bpf_load_program(prog_type, prog,
ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
}
-static int create_prog_dummy2(int mfd, int idx)
+static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_3, idx),
@@ -12558,11 +13687,12 @@ static int create_prog_dummy2(int mfd, int idx)
BPF_EXIT_INSN(),
};
- return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
+ return bpf_load_program(prog_type, prog,
ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
}
-static int create_prog_array(uint32_t max_elem, int p1key)
+static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
+ int p1key)
{
int p2key = 1;
int mfd, p1fd, p2fd;
@@ -12574,8 +13704,8 @@ static int create_prog_array(uint32_t max_elem, int p1key)
return -1;
}
- p1fd = create_prog_dummy1();
- p2fd = create_prog_dummy2(mfd, p2key);
+ p1fd = create_prog_dummy1(prog_type);
+ p2fd = create_prog_dummy2(prog_type, mfd, p2key);
if (p1fd < 0 || p2fd < 0)
goto out;
if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
@@ -12615,32 +13745,39 @@ static int create_map_in_map(void)
return outer_map_fd;
}
-static int create_cgroup_storage(void)
+static int create_cgroup_storage(bool percpu)
{
+ enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
+ BPF_MAP_TYPE_CGROUP_STORAGE;
int fd;
- fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE,
- sizeof(struct bpf_cgroup_storage_key),
+ fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
TEST_DATA_LEN, 0, 0);
if (fd < 0)
- printf("Failed to create array '%s'!\n", strerror(errno));
+ printf("Failed to create cgroup storage '%s'!\n",
+ strerror(errno));
return fd;
}
static char bpf_vlog[UINT_MAX >> 8];
-static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
- int *map_fds)
+static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
+ struct bpf_insn *prog, int *map_fds)
{
- int *fixup_map1 = test->fixup_map1;
- int *fixup_map2 = test->fixup_map2;
- int *fixup_map3 = test->fixup_map3;
- int *fixup_map4 = test->fixup_map4;
+ int *fixup_map_hash_8b = test->fixup_map_hash_8b;
+ int *fixup_map_hash_48b = test->fixup_map_hash_48b;
+ int *fixup_map_hash_16b = test->fixup_map_hash_16b;
+ int *fixup_map_array_48b = test->fixup_map_array_48b;
+ int *fixup_map_sockmap = test->fixup_map_sockmap;
+ int *fixup_map_sockhash = test->fixup_map_sockhash;
+ int *fixup_map_xskmap = test->fixup_map_xskmap;
+ int *fixup_map_stacktrace = test->fixup_map_stacktrace;
int *fixup_prog1 = test->fixup_prog1;
int *fixup_prog2 = test->fixup_prog2;
int *fixup_map_in_map = test->fixup_map_in_map;
int *fixup_cgroup_storage = test->fixup_cgroup_storage;
+ int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
if (test->fill_helper)
test->fill_helper(test);
@@ -12649,44 +13786,44 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
* for verifier and not do a runtime lookup, so the only thing
* that really matters is value size in this case.
*/
- if (*fixup_map1) {
+ if (*fixup_map_hash_8b) {
map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
sizeof(long long), 1);
do {
- prog[*fixup_map1].imm = map_fds[0];
- fixup_map1++;
- } while (*fixup_map1);
+ prog[*fixup_map_hash_8b].imm = map_fds[0];
+ fixup_map_hash_8b++;
+ } while (*fixup_map_hash_8b);
}
- if (*fixup_map2) {
+ if (*fixup_map_hash_48b) {
map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
sizeof(struct test_val), 1);
do {
- prog[*fixup_map2].imm = map_fds[1];
- fixup_map2++;
- } while (*fixup_map2);
+ prog[*fixup_map_hash_48b].imm = map_fds[1];
+ fixup_map_hash_48b++;
+ } while (*fixup_map_hash_48b);
}
- if (*fixup_map3) {
+ if (*fixup_map_hash_16b) {
map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
sizeof(struct other_val), 1);
do {
- prog[*fixup_map3].imm = map_fds[2];
- fixup_map3++;
- } while (*fixup_map3);
+ prog[*fixup_map_hash_16b].imm = map_fds[2];
+ fixup_map_hash_16b++;
+ } while (*fixup_map_hash_16b);
}
- if (*fixup_map4) {
+ if (*fixup_map_array_48b) {
map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
sizeof(struct test_val), 1);
do {
- prog[*fixup_map4].imm = map_fds[3];
- fixup_map4++;
- } while (*fixup_map4);
+ prog[*fixup_map_array_48b].imm = map_fds[3];
+ fixup_map_array_48b++;
+ } while (*fixup_map_array_48b);
}
if (*fixup_prog1) {
- map_fds[4] = create_prog_array(4, 0);
+ map_fds[4] = create_prog_array(prog_type, 4, 0);
do {
prog[*fixup_prog1].imm = map_fds[4];
fixup_prog1++;
@@ -12694,7 +13831,7 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
}
if (*fixup_prog2) {
- map_fds[5] = create_prog_array(8, 7);
+ map_fds[5] = create_prog_array(prog_type, 8, 7);
do {
prog[*fixup_prog2].imm = map_fds[5];
fixup_prog2++;
@@ -12710,12 +13847,52 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
}
if (*fixup_cgroup_storage) {
- map_fds[7] = create_cgroup_storage();
+ map_fds[7] = create_cgroup_storage(false);
do {
prog[*fixup_cgroup_storage].imm = map_fds[7];
fixup_cgroup_storage++;
} while (*fixup_cgroup_storage);
}
+
+ if (*fixup_percpu_cgroup_storage) {
+ map_fds[8] = create_cgroup_storage(true);
+ do {
+ prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
+ fixup_percpu_cgroup_storage++;
+ } while (*fixup_percpu_cgroup_storage);
+ }
+ if (*fixup_map_sockmap) {
+ map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
+ sizeof(int), 1);
+ do {
+ prog[*fixup_map_sockmap].imm = map_fds[9];
+ fixup_map_sockmap++;
+ } while (*fixup_map_sockmap);
+ }
+ if (*fixup_map_sockhash) {
+ map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
+ sizeof(int), 1);
+ do {
+ prog[*fixup_map_sockhash].imm = map_fds[10];
+ fixup_map_sockhash++;
+ } while (*fixup_map_sockhash);
+ }
+ if (*fixup_map_xskmap) {
+ map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
+ sizeof(int), 1);
+ do {
+ prog[*fixup_map_xskmap].imm = map_fds[11];
+ fixup_map_xskmap++;
+ } while (*fixup_map_xskmap);
+ }
+ if (*fixup_map_stacktrace) {
+ map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
+ sizeof(u64), 1);
+ do {
+ prog[*fixup_map_stacktrace].imm = map_fds[12];
+ fixup_map_stacktrace++;
+ } while (fixup_map_stacktrace);
+ }
}
static void do_test_single(struct bpf_test *test, bool unpriv,
@@ -12732,11 +13909,13 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
for (i = 0; i < MAX_NR_MAPS; i++)
map_fds[i] = -1;
- do_test_fixup(test, prog, map_fds);
+ if (!prog_type)
+ prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ do_test_fixup(test, prog_type, prog, map_fds);
prog_len = probe_filter_length(prog);
- fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
- prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
+ fd_prog = bpf_verify_program(prog_type, prog, prog_len,
+ test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
"GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
expected_ret = unpriv && test->result_unpriv != UNDEF ?
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.c b/tools/testing/selftests/bpf/test_xdp_vlan.c
new file mode 100644
index 000000000000..365a7d2d9f5c
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_vlan.c
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright(c) 2018 Jesper Dangaard Brouer.
+ *
+ * XDP/TC VLAN manipulation example
+ *
+ * GOTCHA: Remember to disable NIC hardware offloading of VLANs,
+ * else the VLAN tags are NOT inlined in the packet payload:
+ *
+ * # ethtool -K ixgbe2 rxvlan off
+ *
+ * Verify setting:
+ * # ethtool -k ixgbe2 | grep rx-vlan-offload
+ * rx-vlan-offload: off
+ *
+ */
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/pkt_cls.h>
+
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+/* linux/if_vlan.h have not exposed this as UAPI, thus mirror some here
+ *
+ * struct vlan_hdr - vlan header
+ * @h_vlan_TCI: priority and VLAN ID
+ * @h_vlan_encapsulated_proto: packet type ID or len
+ */
+struct _vlan_hdr {
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
+#define VLAN_PRIO_SHIFT 13
+#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
+#define VLAN_TAG_PRESENT VLAN_CFI_MASK
+#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
+#define VLAN_N_VID 4096
+
+struct parse_pkt {
+ __u16 l3_proto;
+ __u16 l3_offset;
+ __u16 vlan_outer;
+ __u16 vlan_inner;
+ __u8 vlan_outer_offset;
+ __u8 vlan_inner_offset;
+};
+
+char _license[] SEC("license") = "GPL";
+
+static __always_inline
+bool parse_eth_frame(struct ethhdr *eth, void *data_end, struct parse_pkt *pkt)
+{
+ __u16 eth_type;
+ __u8 offset;
+
+ offset = sizeof(*eth);
+ /* Make sure packet is large enough for parsing eth + 2 VLAN headers */
+ if ((void *)eth + offset + (2*sizeof(struct _vlan_hdr)) > data_end)
+ return false;
+
+ eth_type = eth->h_proto;
+
+ /* Handle outer VLAN tag */
+ if (eth_type == bpf_htons(ETH_P_8021Q)
+ || eth_type == bpf_htons(ETH_P_8021AD)) {
+ struct _vlan_hdr *vlan_hdr;
+
+ vlan_hdr = (void *)eth + offset;
+ pkt->vlan_outer_offset = offset;
+ pkt->vlan_outer = bpf_ntohs(vlan_hdr->h_vlan_TCI)
+ & VLAN_VID_MASK;
+ eth_type = vlan_hdr->h_vlan_encapsulated_proto;
+ offset += sizeof(*vlan_hdr);
+ }
+
+ /* Handle inner (double) VLAN tag */
+ if (eth_type == bpf_htons(ETH_P_8021Q)
+ || eth_type == bpf_htons(ETH_P_8021AD)) {
+ struct _vlan_hdr *vlan_hdr;
+
+ vlan_hdr = (void *)eth + offset;
+ pkt->vlan_inner_offset = offset;
+ pkt->vlan_inner = bpf_ntohs(vlan_hdr->h_vlan_TCI)
+ & VLAN_VID_MASK;
+ eth_type = vlan_hdr->h_vlan_encapsulated_proto;
+ offset += sizeof(*vlan_hdr);
+ }
+
+ pkt->l3_proto = bpf_ntohs(eth_type); /* Convert to host-byte-order */
+ pkt->l3_offset = offset;
+
+ return true;
+}
+
+/* Hint, VLANs are choosen to hit network-byte-order issues */
+#define TESTVLAN 4011 /* 0xFAB */
+// #define TO_VLAN 4000 /* 0xFA0 (hint 0xOA0 = 160) */
+
+SEC("xdp_drop_vlan_4011")
+int xdp_prognum0(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct parse_pkt pkt = { 0 };
+
+ if (!parse_eth_frame(data, data_end, &pkt))
+ return XDP_ABORTED;
+
+ /* Drop specific VLAN ID example */
+ if (pkt.vlan_outer == TESTVLAN)
+ return XDP_ABORTED;
+ /*
+ * Using XDP_ABORTED makes it possible to record this event,
+ * via tracepoint xdp:xdp_exception like:
+ * # perf record -a -e xdp:xdp_exception
+ * # perf script
+ */
+ return XDP_PASS;
+}
+/*
+Commands to setup VLAN on Linux to test packets gets dropped:
+
+ export ROOTDEV=ixgbe2
+ export VLANID=4011
+ ip link add link $ROOTDEV name $ROOTDEV.$VLANID type vlan id $VLANID
+ ip link set dev $ROOTDEV.$VLANID up
+
+ ip link set dev $ROOTDEV mtu 1508
+ ip addr add 100.64.40.11/24 dev $ROOTDEV.$VLANID
+
+Load prog with ip tool:
+
+ ip link set $ROOTDEV xdp off
+ ip link set $ROOTDEV xdp object xdp_vlan01_kern.o section xdp_drop_vlan_4011
+
+*/
+
+/* Changing VLAN to zero, have same practical effect as removing the VLAN. */
+#define TO_VLAN 0
+
+SEC("xdp_vlan_change")
+int xdp_prognum1(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct parse_pkt pkt = { 0 };
+
+ if (!parse_eth_frame(data, data_end, &pkt))
+ return XDP_ABORTED;
+
+ /* Change specific VLAN ID */
+ if (pkt.vlan_outer == TESTVLAN) {
+ struct _vlan_hdr *vlan_hdr = data + pkt.vlan_outer_offset;
+
+ /* Modifying VLAN, preserve top 4 bits */
+ vlan_hdr->h_vlan_TCI =
+ bpf_htons((bpf_ntohs(vlan_hdr->h_vlan_TCI) & 0xf000)
+ | TO_VLAN);
+ }
+
+ return XDP_PASS;
+}
+
+/*
+ * Show XDP+TC can cooperate, on creating a VLAN rewriter.
+ * 1. Create a XDP prog that can "pop"/remove a VLAN header.
+ * 2. Create a TC-bpf prog that egress can add a VLAN header.
+ */
+
+#ifndef ETH_ALEN /* Ethernet MAC address length */
+#define ETH_ALEN 6 /* bytes */
+#endif
+#define VLAN_HDR_SZ 4 /* bytes */
+
+SEC("xdp_vlan_remove_outer")
+int xdp_prognum2(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct parse_pkt pkt = { 0 };
+ char *dest;
+
+ if (!parse_eth_frame(data, data_end, &pkt))
+ return XDP_ABORTED;
+
+ /* Skip packet if no outer VLAN was detected */
+ if (pkt.vlan_outer_offset == 0)
+ return XDP_PASS;
+
+ /* Moving Ethernet header, dest overlap with src, memmove handle this */
+ dest = data;
+ dest+= VLAN_HDR_SZ;
+ /*
+ * Notice: Taking over vlan_hdr->h_vlan_encapsulated_proto, by
+ * only moving two MAC addrs (12 bytes), not overwriting last 2 bytes
+ */
+ __builtin_memmove(dest, data, ETH_ALEN * 2);
+ /* Note: LLVM built-in memmove inlining require size to be constant */
+
+ /* Move start of packet header seen by Linux kernel stack */
+ bpf_xdp_adjust_head(ctx, VLAN_HDR_SZ);
+
+ return XDP_PASS;
+}
+
+static __always_inline
+void shift_mac_4bytes_16bit(void *data)
+{
+ __u16 *p = data;
+
+ p[7] = p[5]; /* delete p[7] was vlan_hdr->h_vlan_TCI */
+ p[6] = p[4]; /* delete p[6] was ethhdr->h_proto */
+ p[5] = p[3];
+ p[4] = p[2];
+ p[3] = p[1];
+ p[2] = p[0];
+}
+
+static __always_inline
+void shift_mac_4bytes_32bit(void *data)
+{
+ __u32 *p = data;
+
+ /* Assuming VLAN hdr present. The 4 bytes in p[3] that gets
+ * overwritten, is ethhdr->h_proto and vlan_hdr->h_vlan_TCI.
+ * The vlan_hdr->h_vlan_encapsulated_proto take over role as
+ * ethhdr->h_proto.
+ */
+ p[3] = p[2];
+ p[2] = p[1];
+ p[1] = p[0];
+}
+
+SEC("xdp_vlan_remove_outer2")
+int xdp_prognum3(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct ethhdr *orig_eth = data;
+ struct parse_pkt pkt = { 0 };
+
+ if (!parse_eth_frame(orig_eth, data_end, &pkt))
+ return XDP_ABORTED;
+
+ /* Skip packet if no outer VLAN was detected */
+ if (pkt.vlan_outer_offset == 0)
+ return XDP_PASS;
+
+ /* Simply shift down MAC addrs 4 bytes, overwrite h_proto + TCI */
+ shift_mac_4bytes_32bit(data);
+
+ /* Move start of packet header seen by Linux kernel stack */
+ bpf_xdp_adjust_head(ctx, VLAN_HDR_SZ);
+
+ return XDP_PASS;
+}
+
+/*=====================================
+ * BELOW: TC-hook based ebpf programs
+ * ====================================
+ * The TC-clsact eBPF programs (currently) need to be attach via TC commands
+ */
+
+SEC("tc_vlan_push")
+int _tc_progA(struct __sk_buff *ctx)
+{
+ bpf_skb_vlan_push(ctx, bpf_htons(ETH_P_8021Q), TESTVLAN);
+
+ return TC_ACT_OK;
+}
+/*
+Commands to setup TC to use above bpf prog:
+
+export ROOTDEV=ixgbe2
+export FILE=xdp_vlan01_kern.o
+
+# Re-attach clsact to clear/flush existing role
+tc qdisc del dev $ROOTDEV clsact 2> /dev/null ;\
+tc qdisc add dev $ROOTDEV clsact
+
+# Attach BPF prog EGRESS
+tc filter add dev $ROOTDEV egress \
+ prio 1 handle 1 bpf da obj $FILE sec tc_vlan_push
+
+tc filter show dev $ROOTDEV egress
+*/
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
new file mode 100755
index 000000000000..51a3a31d1aac
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
@@ -0,0 +1,195 @@
+#!/bin/bash
+
+TESTNAME=xdp_vlan
+
+usage() {
+ echo "Testing XDP + TC eBPF VLAN manipulations: $TESTNAME"
+ echo ""
+ echo "Usage: $0 [-vfh]"
+ echo " -v | --verbose : Verbose"
+ echo " --flush : Flush before starting (e.g. after --interactive)"
+ echo " --interactive : Keep netns setup running after test-run"
+ echo ""
+}
+
+cleanup()
+{
+ local status=$?
+
+ if [ "$status" = "0" ]; then
+ echo "selftests: $TESTNAME [PASS]";
+ else
+ echo "selftests: $TESTNAME [FAILED]";
+ fi
+
+ if [ -n "$INTERACTIVE" ]; then
+ echo "Namespace setup still active explore with:"
+ echo " ip netns exec ns1 bash"
+ echo " ip netns exec ns2 bash"
+ exit $status
+ fi
+
+ set +e
+ ip link del veth1 2> /dev/null
+ ip netns del ns1 2> /dev/null
+ ip netns del ns2 2> /dev/null
+}
+
+# Using external program "getopt" to get --long-options
+OPTIONS=$(getopt -o hvfi: \
+ --long verbose,flush,help,interactive,debug -- "$@")
+if (( $? != 0 )); then
+ usage
+ echo "selftests: $TESTNAME [FAILED] Error calling getopt, unknown option?"
+ exit 2
+fi
+eval set -- "$OPTIONS"
+
+## --- Parse command line arguments / parameters ---
+while true; do
+ case "$1" in
+ -v | --verbose)
+ export VERBOSE=yes
+ shift
+ ;;
+ -i | --interactive | --debug )
+ INTERACTIVE=yes
+ shift
+ ;;
+ -f | --flush )
+ cleanup
+ shift
+ ;;
+ -- )
+ shift
+ break
+ ;;
+ -h | --help )
+ usage;
+ echo "selftests: $TESTNAME [SKIP] usage help info requested"
+ exit 0
+ ;;
+ * )
+ shift
+ break
+ ;;
+ esac
+done
+
+if [ "$EUID" -ne 0 ]; then
+ echo "selftests: $TESTNAME [FAILED] need root privileges"
+ exit 1
+fi
+
+ip link set dev lo xdp off 2>/dev/null > /dev/null
+if [ $? -ne 0 ];then
+ echo "selftests: $TESTNAME [SKIP] need ip xdp support"
+ exit 0
+fi
+
+# Interactive mode likely require us to cleanup netns
+if [ -n "$INTERACTIVE" ]; then
+ ip link del veth1 2> /dev/null
+ ip netns del ns1 2> /dev/null
+ ip netns del ns2 2> /dev/null
+fi
+
+# Exit on failure
+set -e
+
+# Some shell-tools dependencies
+which ip > /dev/null
+which tc > /dev/null
+which ethtool > /dev/null
+
+# Make rest of shell verbose, showing comments as doc/info
+if [ -n "$VERBOSE" ]; then
+ set -v
+fi
+
+# Create two namespaces
+ip netns add ns1
+ip netns add ns2
+
+# Run cleanup if failing or on kill
+trap cleanup 0 2 3 6 9
+
+# Create veth pair
+ip link add veth1 type veth peer name veth2
+
+# Move veth1 and veth2 into the respective namespaces
+ip link set veth1 netns ns1
+ip link set veth2 netns ns2
+
+# NOTICE: XDP require VLAN header inside packet payload
+# - Thus, disable VLAN offloading driver features
+# - For veth REMEMBER TX side VLAN-offload
+#
+# Disable rx-vlan-offload (mostly needed on ns1)
+ip netns exec ns1 ethtool -K veth1 rxvlan off
+ip netns exec ns2 ethtool -K veth2 rxvlan off
+#
+# Disable tx-vlan-offload (mostly needed on ns2)
+ip netns exec ns2 ethtool -K veth2 txvlan off
+ip netns exec ns1 ethtool -K veth1 txvlan off
+
+export IPADDR1=100.64.41.1
+export IPADDR2=100.64.41.2
+
+# In ns1/veth1 add IP-addr on plain net_device
+ip netns exec ns1 ip addr add ${IPADDR1}/24 dev veth1
+ip netns exec ns1 ip link set veth1 up
+
+# In ns2/veth2 create VLAN device
+export VLAN=4011
+export DEVNS2=veth2
+ip netns exec ns2 ip link add link $DEVNS2 name $DEVNS2.$VLAN type vlan id $VLAN
+ip netns exec ns2 ip addr add ${IPADDR2}/24 dev $DEVNS2.$VLAN
+ip netns exec ns2 ip link set $DEVNS2 up
+ip netns exec ns2 ip link set $DEVNS2.$VLAN up
+
+# Bringup lo in netns (to avoids confusing people using --interactive)
+ip netns exec ns1 ip link set lo up
+ip netns exec ns2 ip link set lo up
+
+# At this point, the hosts cannot reach each-other,
+# because ns2 are using VLAN tags on the packets.
+
+ip netns exec ns2 sh -c 'ping -W 1 -c 1 100.64.41.1 || echo "Okay ping fails"'
+
+
+# Now we can use the test_xdp_vlan.c program to pop/push these VLAN tags
+# ----------------------------------------------------------------------
+# In ns1: ingress use XDP to remove VLAN tags
+export DEVNS1=veth1
+export FILE=test_xdp_vlan.o
+
+# First test: Remove VLAN by setting VLAN ID 0, using "xdp_vlan_change"
+export XDP_PROG=xdp_vlan_change
+ip netns exec ns1 ip link set $DEVNS1 xdp object $FILE section $XDP_PROG
+
+# In ns1: egress use TC to add back VLAN tag 4011
+# (del cmd)
+# tc qdisc del dev $DEVNS1 clsact 2> /dev/null
+#
+ip netns exec ns1 tc qdisc add dev $DEVNS1 clsact
+ip netns exec ns1 tc filter add dev $DEVNS1 egress \
+ prio 1 handle 1 bpf da obj $FILE sec tc_vlan_push
+
+# Now the namespaces can reach each-other, test with ping:
+ip netns exec ns2 ping -W 2 -c 3 $IPADDR1
+ip netns exec ns1 ping -W 2 -c 3 $IPADDR2
+
+# Second test: Replace xdp prog, that fully remove vlan header
+#
+# Catch kernel bug for generic-XDP, that does didn't allow us to
+# remove a VLAN header, because skb->protocol still contain VLAN
+# ETH_P_8021Q indication, and this cause overwriting of our changes.
+#
+export XDP_PROG=xdp_vlan_remove_outer2
+ip netns exec ns1 ip link set $DEVNS1 xdp off
+ip netns exec ns1 ip link set $DEVNS1 xdp object $FILE section $XDP_PROG
+
+# Now the namespaces should still be able reach each-other, test with ping:
+ip netns exec ns2 ping -W 2 -c 3 $IPADDR1
+ip netns exec ns1 ping -W 2 -c 3 $IPADDR2
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index cabe2a3a3b30..4cdb63bf0521 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -41,6 +41,7 @@ int load_kallsyms(void)
syms[i].name = strdup(func);
i++;
}
+ fclose(f);
sym_cnt = i;
qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
return 0;
@@ -124,10 +125,11 @@ struct perf_event_sample {
char data[];
};
-static enum bpf_perf_event_ret bpf_perf_event_print(void *event, void *priv)
+static enum bpf_perf_event_ret
+bpf_perf_event_print(struct perf_event_header *hdr, void *private_data)
{
- struct perf_event_sample *e = event;
- perf_event_print_fn fn = priv;
+ struct perf_event_sample *e = (struct perf_event_sample *)hdr;
+ perf_event_print_fn fn = private_data;
int ret;
if (e->header.type == PERF_RECORD_SAMPLE) {
diff --git a/tools/testing/selftests/bpf/with_addr.sh b/tools/testing/selftests/bpf/with_addr.sh
new file mode 100755
index 000000000000..ffcd3953f94c
--- /dev/null
+++ b/tools/testing/selftests/bpf/with_addr.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# add private ipv4 and ipv6 addresses to loopback
+
+readonly V6_INNER='100::a/128'
+readonly V4_INNER='192.168.0.1/32'
+
+if getopts ":s" opt; then
+ readonly SIT_DEV_NAME='sixtofourtest0'
+ readonly V6_SIT='2::/64'
+ readonly V4_SIT='172.17.0.1/32'
+ shift
+fi
+
+fail() {
+ echo "error: $*" 1>&2
+ exit 1
+}
+
+setup() {
+ ip -6 addr add "${V6_INNER}" dev lo || fail 'failed to setup v6 address'
+ ip -4 addr add "${V4_INNER}" dev lo || fail 'failed to setup v4 address'
+
+ if [[ -n "${V6_SIT}" ]]; then
+ ip link add "${SIT_DEV_NAME}" type sit remote any local any \
+ || fail 'failed to add sit'
+ ip link set dev "${SIT_DEV_NAME}" up \
+ || fail 'failed to bring sit device up'
+ ip -6 addr add "${V6_SIT}" dev "${SIT_DEV_NAME}" \
+ || fail 'failed to setup v6 SIT address'
+ ip -4 addr add "${V4_SIT}" dev "${SIT_DEV_NAME}" \
+ || fail 'failed to setup v4 SIT address'
+ fi
+
+ sleep 2 # avoid race causing bind to fail
+}
+
+cleanup() {
+ if [[ -n "${V6_SIT}" ]]; then
+ ip -4 addr del "${V4_SIT}" dev "${SIT_DEV_NAME}"
+ ip -6 addr del "${V6_SIT}" dev "${SIT_DEV_NAME}"
+ ip link del "${SIT_DEV_NAME}"
+ fi
+
+ ip -4 addr del "${V4_INNER}" dev lo
+ ip -6 addr del "${V6_INNER}" dev lo
+}
+
+trap cleanup EXIT
+
+setup
+"$@"
+exit "$?"
diff --git a/tools/testing/selftests/bpf/with_tunnels.sh b/tools/testing/selftests/bpf/with_tunnels.sh
new file mode 100755
index 000000000000..e24949ed3a20
--- /dev/null
+++ b/tools/testing/selftests/bpf/with_tunnels.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# setup tunnels for flow dissection test
+
+readonly SUFFIX="test_$(mktemp -u XXXX)"
+CONFIG="remote 127.0.0.2 local 127.0.0.1 dev lo"
+
+setup() {
+ ip link add "ipip_${SUFFIX}" type ipip ${CONFIG}
+ ip link add "gre_${SUFFIX}" type gre ${CONFIG}
+ ip link add "sit_${SUFFIX}" type sit ${CONFIG}
+
+ echo "tunnels before test:"
+ ip tunnel show
+
+ ip link set "ipip_${SUFFIX}" up
+ ip link set "gre_${SUFFIX}" up
+ ip link set "sit_${SUFFIX}" up
+}
+
+
+cleanup() {
+ ip tunnel del "ipip_${SUFFIX}"
+ ip tunnel del "gre_${SUFFIX}"
+ ip tunnel del "sit_${SUFFIX}"
+
+ echo "tunnels after test:"
+ ip tunnel show
+}
+
+trap cleanup EXIT
+
+setup
+"$@"
+exit "$?"
diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore
index 95eb3a53c381..adacda50a4b2 100644
--- a/tools/testing/selftests/cgroup/.gitignore
+++ b/tools/testing/selftests/cgroup/.gitignore
@@ -1 +1,2 @@
test_memcontrol
+test_core
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index 1c5d2b2a583b..14c9fe284806 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -89,17 +89,28 @@ int cg_read(const char *cgroup, const char *control, char *buf, size_t len)
int cg_read_strcmp(const char *cgroup, const char *control,
const char *expected)
{
- size_t size = strlen(expected) + 1;
+ size_t size;
char *buf;
+ int ret;
+
+ /* Handle the case of comparing against empty string */
+ if (!expected)
+ size = 32;
+ else
+ size = strlen(expected) + 1;
buf = malloc(size);
if (!buf)
return -1;
- if (cg_read(cgroup, control, buf, size))
+ if (cg_read(cgroup, control, buf, size)) {
+ free(buf);
return -1;
+ }
- return strcmp(expected, buf);
+ ret = strcmp(expected, buf);
+ free(buf);
+ return ret;
}
int cg_read_strstr(const char *cgroup, const char *control, const char *needle)
@@ -337,3 +348,24 @@ int is_swap_enabled(void)
return cnt > 1;
}
+
+int set_oom_adj_score(int pid, int score)
+{
+ char path[PATH_MAX];
+ int fd, len;
+
+ sprintf(path, "/proc/%d/oom_score_adj", pid);
+
+ fd = open(path, O_WRONLY | O_APPEND);
+ if (fd < 0)
+ return fd;
+
+ len = dprintf(fd, "%d", score);
+ if (len < 0) {
+ close(fd);
+ return len;
+ }
+
+ close(fd);
+ return 0;
+}
diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h
index 1ff6f9f1abdc..9ac8b7958f83 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/cgroup_util.h
@@ -40,3 +40,4 @@ extern int get_temp_fd(void);
extern int alloc_pagecache(int fd, size_t size);
extern int alloc_anon(const char *cgroup, void *arg);
extern int is_swap_enabled(void);
+extern int set_oom_adj_score(int pid, int score);
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index cf0bddc9d271..28d321ba311b 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -2,6 +2,7 @@
#define _GNU_SOURCE
#include <linux/limits.h>
+#include <linux/oom.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
@@ -202,6 +203,36 @@ static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg)
return 0;
}
+static int alloc_anon_noexit(const char *cgroup, void *arg)
+{
+ int ppid = getppid();
+
+ if (alloc_anon(cgroup, arg))
+ return -1;
+
+ while (getppid() == ppid)
+ sleep(1);
+
+ return 0;
+}
+
+/*
+ * Wait until processes are killed asynchronously by the OOM killer
+ * If we exceed a timeout, fail.
+ */
+static int cg_test_proc_killed(const char *cgroup)
+{
+ int limit;
+
+ for (limit = 10; limit > 0; limit--) {
+ if (cg_read_strcmp(cgroup, "cgroup.procs", "") == 0)
+ return 0;
+
+ usleep(100000);
+ }
+ return -1;
+}
+
/*
* First, this test creates the following hierarchy:
* A memory.min = 50M, memory.max = 200M
@@ -964,6 +995,177 @@ cleanup:
return ret;
}
+/*
+ * This test disables swapping and tries to allocate anonymous memory
+ * up to OOM with memory.group.oom set. Then it checks that all
+ * processes in the leaf (but not the parent) were killed.
+ */
+static int test_memcg_oom_group_leaf_events(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *parent, *child;
+
+ parent = cg_name(root, "memcg_test_0");
+ child = cg_name(root, "memcg_test_0/memcg_test_1");
+
+ if (!parent || !child)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+
+ if (cg_create(child))
+ goto cleanup;
+
+ if (cg_write(parent, "cgroup.subtree_control", "+memory"))
+ goto cleanup;
+
+ if (cg_write(child, "memory.max", "50M"))
+ goto cleanup;
+
+ if (cg_write(child, "memory.swap.max", "0"))
+ goto cleanup;
+
+ if (cg_write(child, "memory.oom.group", "1"))
+ goto cleanup;
+
+ cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60));
+ cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+ cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+ if (!cg_run(child, alloc_anon, (void *)MB(100)))
+ goto cleanup;
+
+ if (cg_test_proc_killed(child))
+ goto cleanup;
+
+ if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0)
+ goto cleanup;
+
+ if (cg_read_key_long(parent, "memory.events", "oom_kill ") != 0)
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (child)
+ cg_destroy(child);
+ if (parent)
+ cg_destroy(parent);
+ free(child);
+ free(parent);
+
+ return ret;
+}
+
+/*
+ * This test disables swapping and tries to allocate anonymous memory
+ * up to OOM with memory.group.oom set. Then it checks that all
+ * processes in the parent and leaf were killed.
+ */
+static int test_memcg_oom_group_parent_events(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *parent, *child;
+
+ parent = cg_name(root, "memcg_test_0");
+ child = cg_name(root, "memcg_test_0/memcg_test_1");
+
+ if (!parent || !child)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+
+ if (cg_create(child))
+ goto cleanup;
+
+ if (cg_write(parent, "memory.max", "80M"))
+ goto cleanup;
+
+ if (cg_write(parent, "memory.swap.max", "0"))
+ goto cleanup;
+
+ if (cg_write(parent, "memory.oom.group", "1"))
+ goto cleanup;
+
+ cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60));
+ cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+ cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+
+ if (!cg_run(child, alloc_anon, (void *)MB(100)))
+ goto cleanup;
+
+ if (cg_test_proc_killed(child))
+ goto cleanup;
+ if (cg_test_proc_killed(parent))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (child)
+ cg_destroy(child);
+ if (parent)
+ cg_destroy(parent);
+ free(child);
+ free(parent);
+
+ return ret;
+}
+
+/*
+ * This test disables swapping and tries to allocate anonymous memory
+ * up to OOM with memory.group.oom set. Then it checks that all
+ * processes were killed except those set with OOM_SCORE_ADJ_MIN
+ */
+static int test_memcg_oom_group_score_events(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *memcg;
+ int safe_pid;
+
+ memcg = cg_name(root, "memcg_test_0");
+
+ if (!memcg)
+ goto cleanup;
+
+ if (cg_create(memcg))
+ goto cleanup;
+
+ if (cg_write(memcg, "memory.max", "50M"))
+ goto cleanup;
+
+ if (cg_write(memcg, "memory.swap.max", "0"))
+ goto cleanup;
+
+ if (cg_write(memcg, "memory.oom.group", "1"))
+ goto cleanup;
+
+ safe_pid = cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
+ if (set_oom_adj_score(safe_pid, OOM_SCORE_ADJ_MIN))
+ goto cleanup;
+
+ cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
+ if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
+ goto cleanup;
+
+ if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3)
+ goto cleanup;
+
+ if (kill(safe_pid, SIGKILL))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (memcg)
+ cg_destroy(memcg);
+ free(memcg);
+
+ return ret;
+}
+
+
#define T(x) { x, #x }
struct memcg_test {
int (*fn)(const char *root);
@@ -978,6 +1180,9 @@ struct memcg_test {
T(test_memcg_oom_events),
T(test_memcg_swap_max),
T(test_memcg_sock),
+ T(test_memcg_oom_group_leaf_events),
+ T(test_memcg_oom_group_parent_events),
+ T(test_memcg_oom_group_score_events),
};
#undef T
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
new file mode 100755
index 000000000000..0150bb2741eb
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
@@ -0,0 +1,347 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# A test for switch behavior under MC overload. An issue in Spectrum chips
+# causes throughput of UC traffic to drop severely when a switch is under heavy
+# MC load. This issue can be overcome by putting the switch to MC-aware mode.
+# This test verifies that UC performance stays intact even as the switch is
+# under MC flood, and therefore that the MC-aware mode is enabled and correctly
+# configured.
+#
+# Because mlxsw throttles CPU port, the traffic can't actually reach userspace
+# at full speed. That makes it impossible to use iperf3 to simply measure the
+# throughput, because many packets (that reach $h3) don't get to the kernel at
+# all even in UDP mode (the situation is even worse in TCP mode, where one can't
+# hope to see more than a couple Mbps).
+#
+# So instead we send traffic with mausezahn and use RX ethtool counters at $h3.
+# Multicast traffic is untagged, unicast traffic is tagged with PCP 1. Therefore
+# each gets a different priority and we can use per-prio ethtool counters to
+# measure the throughput. In order to avoid prioritizing unicast traffic, prio
+# qdisc is installed on $swp3 and maps all priorities to the same band #7 (and
+# thus TC 0).
+#
+# Mausezahn can't actually saturate the links unless it's using large frames.
+# Thus we set MTU to 10K on all involved interfaces. Then both unicast and
+# multicast traffic uses 8K frames.
+#
+# +-----------------------+ +----------------------------------+
+# | H1 | | H2 |
+# | | | unicast --> + $h2.111 |
+# | | | traffic | 192.0.2.129/28 |
+# | multicast | | | e-qos-map 0:1 |
+# | traffic | | | |
+# | $h1 + <----- | | + $h2 |
+# +-----|-----------------+ +--------------|-------------------+
+# | |
+# +-----|-------------------------------------------------|-------------------+
+# | + $swp1 + $swp2 |
+# | | >1Gbps | >1Gbps |
+# | +---|----------------+ +----------|----------------+ |
+# | | + $swp1.1 | | + $swp2.111 | |
+# | | BR1 | SW | BR111 | |
+# | | + $swp3.1 | | + $swp3.111 | |
+# | +---|----------------+ +----------|----------------+ |
+# | \_________________________________________________/ |
+# | | |
+# | + $swp3 |
+# | | 1Gbps bottleneck |
+# | | prio qdisc: {0..7} -> 7 |
+# +------------------------------------|--------------------------------------+
+# |
+# +--|-----------------+
+# | + $h3 H3 |
+# | | |
+# | + $h3.111 |
+# | 192.0.2.130/28 |
+# +--------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ test_mc_aware
+"
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=6
+source $lib_dir/lib.sh
+
+h1_create()
+{
+ simple_if_init $h1
+ mtu_set $h1 10000
+}
+
+h1_destroy()
+{
+ mtu_restore $h1
+ simple_if_fini $h1
+}
+
+h2_create()
+{
+ simple_if_init $h2
+ mtu_set $h2 10000
+
+ vlan_create $h2 111 v$h2 192.0.2.129/28
+ ip link set dev $h2.111 type vlan egress-qos-map 0:1
+}
+
+h2_destroy()
+{
+ vlan_destroy $h2 111
+
+ mtu_restore $h2
+ simple_if_fini $h2
+}
+
+h3_create()
+{
+ simple_if_init $h3
+ mtu_set $h3 10000
+
+ vlan_create $h3 111 v$h3 192.0.2.130/28
+}
+
+h3_destroy()
+{
+ vlan_destroy $h3 111
+
+ mtu_restore $h3
+ simple_if_fini $h3
+}
+
+switch_create()
+{
+ ip link set dev $swp1 up
+ mtu_set $swp1 10000
+
+ ip link set dev $swp2 up
+ mtu_set $swp2 10000
+
+ ip link set dev $swp3 up
+ mtu_set $swp3 10000
+
+ vlan_create $swp2 111
+ vlan_create $swp3 111
+
+ ethtool -s $swp3 speed 1000 autoneg off
+ tc qdisc replace dev $swp3 root handle 3: \
+ prio bands 8 priomap 7 7 7 7 7 7 7 7
+
+ ip link add name br1 type bridge vlan_filtering 0
+ ip link set dev br1 up
+ ip link set dev $swp1 master br1
+ ip link set dev $swp3 master br1
+
+ ip link add name br111 type bridge vlan_filtering 0
+ ip link set dev br111 up
+ ip link set dev $swp2.111 master br111
+ ip link set dev $swp3.111 master br111
+}
+
+switch_destroy()
+{
+ ip link del dev br111
+ ip link del dev br1
+
+ tc qdisc del dev $swp3 root handle 3:
+ ethtool -s $swp3 autoneg on
+
+ vlan_destroy $swp3 111
+ vlan_destroy $swp2 111
+
+ mtu_restore $swp3
+ ip link set dev $swp3 down
+
+ mtu_restore $swp2
+ ip link set dev $swp2 down
+
+ mtu_restore $swp1
+ ip link set dev $swp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ h3mac=$(mac_get $h3)
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ h3_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h3_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h2 192.0.2.130
+}
+
+humanize()
+{
+ local speed=$1; shift
+
+ for unit in bps Kbps Mbps Gbps; do
+ if (($(echo "$speed < 1024" | bc))); then
+ break
+ fi
+
+ speed=$(echo "scale=1; $speed / 1024" | bc)
+ done
+
+ echo "$speed${unit}"
+}
+
+rate()
+{
+ local t0=$1; shift
+ local t1=$1; shift
+ local interval=$1; shift
+
+ echo $((8 * (t1 - t0) / interval))
+}
+
+check_rate()
+{
+ local rate=$1; shift
+ local min=$1; shift
+ local what=$1; shift
+
+ if ((rate > min)); then
+ return 0
+ fi
+
+ echo "$what $(humanize $ir) < $(humanize $min_ingress)" > /dev/stderr
+ return 1
+}
+
+measure_uc_rate()
+{
+ local what=$1; shift
+
+ local interval=10
+ local i
+ local ret=0
+
+ # Dips in performance might cause momentary ingress rate to drop below
+ # 1Gbps. That wouldn't saturate egress and MC would thus get through,
+ # seemingly winning bandwidth on account of UC. Demand at least 2Gbps
+ # average ingress rate to somewhat mitigate this.
+ local min_ingress=2147483648
+
+ mausezahn $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \
+ -a own -b $h3mac -t udp -q &
+ sleep 1
+
+ for i in {5..0}; do
+ local t0=$(ethtool_stats_get $h3 rx_octets_prio_1)
+ local u0=$(ethtool_stats_get $swp2 rx_octets_prio_1)
+ sleep $interval
+ local t1=$(ethtool_stats_get $h3 rx_octets_prio_1)
+ local u1=$(ethtool_stats_get $swp2 rx_octets_prio_1)
+
+ local ir=$(rate $u0 $u1 $interval)
+ local er=$(rate $t0 $t1 $interval)
+
+ if check_rate $ir $min_ingress "$what ingress rate"; then
+ break
+ fi
+
+ # Fail the test if we can't get the throughput.
+ if ((i == 0)); then
+ ret=1
+ fi
+ done
+
+ # Suppress noise from killing mausezahn.
+ { kill %% && wait; } 2>/dev/null
+
+ echo $ir $er
+ exit $ret
+}
+
+test_mc_aware()
+{
+ RET=0
+
+ local -a uc_rate
+ uc_rate=($(measure_uc_rate "UC-only"))
+ check_err $? "Could not get high enough UC-only ingress rate"
+ local ucth1=${uc_rate[1]}
+
+ mausezahn $h1 -p 8000 -c 0 -a own -b bc -t udp -q &
+
+ local d0=$(date +%s)
+ local t0=$(ethtool_stats_get $h3 rx_octets_prio_0)
+ local u0=$(ethtool_stats_get $swp1 rx_octets_prio_0)
+
+ local -a uc_rate_2
+ uc_rate_2=($(measure_uc_rate "UC+MC"))
+ check_err $? "Could not get high enough UC+MC ingress rate"
+ local ucth2=${uc_rate_2[1]}
+
+ local d1=$(date +%s)
+ local t1=$(ethtool_stats_get $h3 rx_octets_prio_0)
+ local u1=$(ethtool_stats_get $swp1 rx_octets_prio_0)
+
+ local deg=$(bc <<< "
+ scale=2
+ ret = 100 * ($ucth1 - $ucth2) / $ucth1
+ if (ret > 0) { ret } else { 0 }
+ ")
+ check_err $(bc <<< "$deg > 10")
+
+ local interval=$((d1 - d0))
+ local mc_ir=$(rate $u0 $u1 $interval)
+ local mc_er=$(rate $t0 $t1 $interval)
+
+ # Suppress noise from killing mausezahn.
+ { kill %% && wait; } 2>/dev/null
+
+ log_test "UC performace under MC overload"
+
+ echo "UC-only throughput $(humanize $ucth1)"
+ echo "UC+MC throughput $(humanize $ucth2)"
+ echo "Degradation $deg %"
+ echo
+ echo "Full report:"
+ echo " UC only:"
+ echo " ingress UC throughput $(humanize ${uc_rate[0]})"
+ echo " egress UC throughput $(humanize ${uc_rate[1]})"
+ echo " UC+MC:"
+ echo " ingress UC throughput $(humanize ${uc_rate_2[0]})"
+ echo " egress UC throughput $(humanize ${uc_rate_2[1]})"
+ echo " ingress MC throughput $(humanize $mc_ir)"
+ echo " egress MC throughput $(humanize $mc_er)"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/usb/usbip/usbip_test.sh b/tools/testing/selftests/drivers/usb/usbip/usbip_test.sh
index a72df93cf1f8..128f0ab24307 100755
--- a/tools/testing/selftests/drivers/usb/usbip/usbip_test.sh
+++ b/tools/testing/selftests/drivers/usb/usbip/usbip_test.sh
@@ -141,6 +141,10 @@ echo "Import devices from localhost - should work"
src/usbip attach -r localhost -b $busid;
echo "=============================================================="
+# Wait for sysfs file to be updated. Without this sleep, usbip port
+# shows no imported devices.
+sleep 3;
+
echo "List imported devices - expect to see imported devices";
src/usbip port;
echo "=============================================================="
diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config
new file mode 100644
index 000000000000..4e151f1005b2
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/config
@@ -0,0 +1 @@
+CONFIG_EFIVAR_FS=y
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
new file mode 100644
index 000000000000..88e6c3f43006
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
@@ -0,0 +1,80 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test synthetic_events syntax parser
+
+do_reset() {
+ reset_trigger
+ echo > set_event
+ clear_trace
+}
+
+fail() { #msg
+ do_reset
+ echo $1
+ exit_fail
+}
+
+if [ ! -f set_event ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+if [ ! -f synthetic_events ]; then
+ echo "synthetic event is not supported"
+ exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+echo "Test synthetic_events syntax parser"
+
+echo > synthetic_events
+
+# synthetic event must have a field
+! echo "myevent" >> synthetic_events
+echo "myevent u64 var1" >> synthetic_events
+
+# synthetic event must be found in synthetic_events
+grep "myevent[[:space:]]u64 var1" synthetic_events
+
+# it is not possible to add same name event
+! echo "myevent u64 var2" >> synthetic_events
+
+# Non-append open will cleanup all events and add new one
+echo "myevent u64 var2" > synthetic_events
+
+# multiple fields with different spaces
+echo "myevent u64 var1; u64 var2;" > synthetic_events
+grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
+echo "myevent u64 var1 ; u64 var2 ;" > synthetic_events
+grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
+echo "myevent u64 var1 ;u64 var2" > synthetic_events
+grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
+
+# test field types
+echo "myevent u32 var" > synthetic_events
+echo "myevent u16 var" > synthetic_events
+echo "myevent u8 var" > synthetic_events
+echo "myevent s64 var" > synthetic_events
+echo "myevent s32 var" > synthetic_events
+echo "myevent s16 var" > synthetic_events
+echo "myevent s8 var" > synthetic_events
+
+echo "myevent char var" > synthetic_events
+echo "myevent int var" > synthetic_events
+echo "myevent long var" > synthetic_events
+echo "myevent pid_t var" > synthetic_events
+
+echo "myevent unsigned char var" > synthetic_events
+echo "myevent unsigned int var" > synthetic_events
+echo "myevent unsigned long var" > synthetic_events
+grep "myevent[[:space:]]unsigned long var" synthetic_events
+
+# test string type
+echo "myevent char var[10]" > synthetic_events
+grep "myevent[[:space:]]char\[10\] var" synthetic_events
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index ff8feca49746..ad1eeb14fda7 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -18,6 +18,7 @@ TEST_GEN_FILES := \
TEST_PROGS := run.sh
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_FILES): $(HEADERS)
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 1bbb47565c55..4665cdbf1a8d 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -21,11 +21,8 @@ endef
CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/
LDLIBS += -lmount -I/usr/include/libmount
-$(BINARIES): ../../../gpio/gpio-utils.o ../../../../usr/include/linux/gpio.h
+$(BINARIES):| khdr
+$(BINARIES): ../../../gpio/gpio-utils.o
../../../gpio/gpio-utils.o:
make ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) -C ../../../gpio
-
-../../../../usr/include/linux/gpio.h:
- make -C ../../../.. headers_install INSTALL_HDR_PATH=$(shell pwd)/../../../../usr/
-
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index 15e6b75fc3a5..a3edb2c8e43d 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -19,7 +19,6 @@
#define KSFT_FAIL 1
#define KSFT_XFAIL 2
#define KSFT_XPASS 3
-/* Treat skip as pass */
#define KSFT_SKIP 4
/* counters */
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 4202139d81d9..6210ba41c29e 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,5 +1,8 @@
-cr4_cpuid_sync_test
-set_sregs_test
-sync_regs_test
-vmx_tsc_adjust_test
-state_test
+/x86_64/cr4_cpuid_sync_test
+/x86_64/evmcs_test
+/x86_64/platform_info_test
+/x86_64/set_sregs_test
+/x86_64/sync_regs_test
+/x86_64/vmx_tsc_adjust_test
+/x86_64/state_test
+/dirty_log_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 03b0f551bedf..01a219229238 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -1,26 +1,31 @@
all:
-top_srcdir = ../../../../
+top_srcdir = ../../../..
UNAME_M := $(shell uname -m)
-LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
-LIBKVM_x86_64 = lib/x86.c lib/vmx.c
-
-TEST_GEN_PROGS_x86_64 = set_sregs_test
-TEST_GEN_PROGS_x86_64 += sync_regs_test
-TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test
-TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test
-TEST_GEN_PROGS_x86_64 += state_test
+LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c
+LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c
+LIBKVM_aarch64 = lib/aarch64/processor.c
+
+TEST_GEN_PROGS_x86_64 = x86_64/platform_info_test
+TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
+TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
+TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
+TEST_GEN_PROGS_x86_64 += x86_64/state_test
+TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
+TEST_GEN_PROGS_aarch64 += dirty_log_test
+
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
LIBKVM += $(LIBKVM_$(UNAME_M))
INSTALL_HDR_PATH = $(top_srcdir)/usr
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
-LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include
-CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I..
-LDFLAGS += -lpthread
+LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
+CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
+LDFLAGS += -pthread
# After inclusion, $(OUTPUT) is defined and
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
@@ -28,7 +33,7 @@ include ../lib.mk
STATIC_LIBS := $(OUTPUT)/libkvm.a
LIBKVM_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM))
-EXTRA_CLEAN += $(LIBKVM_OBJ) $(STATIC_LIBS)
+EXTRA_CLEAN += $(LIBKVM_OBJ) $(STATIC_LIBS) cscope.*
x := $(shell mkdir -p $(sort $(dir $(LIBKVM_OBJ))))
$(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
@@ -37,9 +42,15 @@ $(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
$(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
$(AR) crs $@ $^
-$(LINUX_HDR_PATH):
- make -C $(top_srcdir) headers_install
-
-all: $(STATIC_LIBS) $(LINUX_HDR_PATH)
+all: $(STATIC_LIBS)
$(TEST_GEN_PROGS): $(STATIC_LIBS)
-$(TEST_GEN_PROGS) $(LIBKVM_OBJ): | $(LINUX_HDR_PATH)
+$(STATIC_LIBS):| khdr
+
+cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
+cscope:
+ $(RM) cscope.*
+ (find $(include_paths) -name '*.h' \
+ -exec realpath --relative-base=$(PWD) {} \;; \
+ find . -name '*.c' \
+ -exec realpath --relative-base=$(PWD) {} \;) | sort -u > cscope.files
+ cscope -b
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 0c2cdc105f96..d59820cc2d39 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -5,6 +5,8 @@
* Copyright (C) 2018, Red Hat, Inc.
*/
+#define _GNU_SOURCE /* for program_invocation_name */
+
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
@@ -15,76 +17,78 @@
#include "test_util.h"
#include "kvm_util.h"
+#include "processor.h"
+
+#define DEBUG printf
-#define DEBUG printf
+#define VCPU_ID 1
-#define VCPU_ID 1
/* The memory slot index to track dirty pages */
-#define TEST_MEM_SLOT_INDEX 1
-/*
- * GPA offset of the testing memory slot. Must be bigger than the
- * default vm mem slot, which is DEFAULT_GUEST_PHY_PAGES.
- */
-#define TEST_MEM_OFFSET (1ULL << 30) /* 1G */
-/* Size of the testing memory slot */
-#define TEST_MEM_PAGES (1ULL << 18) /* 1G for 4K pages */
+#define TEST_MEM_SLOT_INDEX 1
+
+/* Default guest test memory offset, 1G */
+#define DEFAULT_GUEST_TEST_MEM 0x40000000
+
/* How many pages to dirty for each guest loop */
-#define TEST_PAGES_PER_LOOP 1024
+#define TEST_PAGES_PER_LOOP 1024
+
/* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
-#define TEST_HOST_LOOP_N 32
+#define TEST_HOST_LOOP_N 32
+
/* Interval for each host loop (ms) */
-#define TEST_HOST_LOOP_INTERVAL 10
+#define TEST_HOST_LOOP_INTERVAL 10
+
+/*
+ * Guest/Host shared variables. Ensure addr_gva2hva() and/or
+ * sync_global_to/from_guest() are used when accessing from
+ * the host. READ/WRITE_ONCE() should also be used with anything
+ * that may change.
+ */
+static uint64_t host_page_size;
+static uint64_t guest_page_size;
+static uint64_t guest_num_pages;
+static uint64_t random_array[TEST_PAGES_PER_LOOP];
+static uint64_t iteration;
/*
- * Guest variables. We use these variables to share data between host
- * and guest. There are two copies of the variables, one in host memory
- * (which is unused) and one in guest memory. When the host wants to
- * access these variables, it needs to call addr_gva2hva() to access the
- * guest copy.
+ * GPA offset of the testing memory slot. Must be bigger than
+ * DEFAULT_GUEST_PHY_PAGES.
*/
-uint64_t guest_random_array[TEST_PAGES_PER_LOOP];
-uint64_t guest_iteration;
-uint64_t guest_page_size;
+static uint64_t guest_test_mem = DEFAULT_GUEST_TEST_MEM;
/*
- * Writes to the first byte of a random page within the testing memory
- * region continuously.
+ * Continuously write to the first 8 bytes of a random pages within
+ * the testing memory region.
*/
-void guest_code(void)
+static void guest_code(void)
{
- int i = 0;
- uint64_t volatile *array = guest_random_array;
- uint64_t volatile *guest_addr;
+ int i;
while (true) {
for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
- /*
- * Write to the first 8 bytes of a random page
- * on the testing memory region.
- */
- guest_addr = (uint64_t *)
- (TEST_MEM_OFFSET +
- (array[i] % TEST_MEM_PAGES) * guest_page_size);
- *guest_addr = guest_iteration;
+ uint64_t addr = guest_test_mem;
+ addr += (READ_ONCE(random_array[i]) % guest_num_pages)
+ * guest_page_size;
+ addr &= ~(host_page_size - 1);
+ *(uint64_t *)addr = READ_ONCE(iteration);
}
+
/* Tell the host that we need more random numbers */
GUEST_SYNC(1);
}
}
-/*
- * Host variables. These variables should only be used by the host
- * rather than the guest.
- */
-bool host_quit;
+/* Host variables */
+static bool host_quit;
/* Points to the test VM memory region on which we track dirty logs */
-void *host_test_mem;
+static void *host_test_mem;
+static uint64_t host_num_pages;
/* For statistics only */
-uint64_t host_dirty_count;
-uint64_t host_clear_count;
-uint64_t host_track_next_count;
+static uint64_t host_dirty_count;
+static uint64_t host_clear_count;
+static uint64_t host_track_next_count;
/*
* We use this bitmap to track some pages that should have its dirty
@@ -93,40 +97,34 @@ uint64_t host_track_next_count;
* page bit is cleared in the latest bitmap, then the system must
* report that write in the next get dirty log call.
*/
-unsigned long *host_bmap_track;
+static unsigned long *host_bmap_track;
-void generate_random_array(uint64_t *guest_array, uint64_t size)
+static void generate_random_array(uint64_t *guest_array, uint64_t size)
{
uint64_t i;
- for (i = 0; i < size; i++) {
+ for (i = 0; i < size; i++)
guest_array[i] = random();
- }
}
-void *vcpu_worker(void *data)
+static void *vcpu_worker(void *data)
{
int ret;
- uint64_t loops, *guest_array, pages_count = 0;
struct kvm_vm *vm = data;
+ uint64_t *guest_array;
+ uint64_t pages_count = 0;
struct kvm_run *run;
- struct guest_args args;
+ struct ucall uc;
run = vcpu_state(vm, VCPU_ID);
- /* Retrieve the guest random array pointer and cache it */
- guest_array = addr_gva2hva(vm, (vm_vaddr_t)guest_random_array);
-
- DEBUG("VCPU starts\n");
-
+ guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
while (!READ_ONCE(host_quit)) {
- /* Let the guest to dirty these random pages */
+ /* Let the guest dirty the random pages */
ret = _vcpu_run(vm, VCPU_ID);
- guest_args_read(vm, VCPU_ID, &args);
- if (run->exit_reason == KVM_EXIT_IO &&
- args.port == GUEST_PORT_SYNC) {
+ if (get_ucall(vm, VCPU_ID, &uc) == UCALL_SYNC) {
pages_count += TEST_PAGES_PER_LOOP;
generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
} else {
@@ -137,18 +135,20 @@ void *vcpu_worker(void *data)
}
}
- DEBUG("VCPU exits, dirtied %"PRIu64" pages\n", pages_count);
+ DEBUG("Dirtied %"PRIu64" pages\n", pages_count);
return NULL;
}
-void vm_dirty_log_verify(unsigned long *bmap, uint64_t iteration)
+static void vm_dirty_log_verify(unsigned long *bmap)
{
uint64_t page;
- uint64_t volatile *value_ptr;
+ uint64_t *value_ptr;
+ uint64_t step = host_page_size >= guest_page_size ? 1 :
+ guest_page_size / host_page_size;
- for (page = 0; page < TEST_MEM_PAGES; page++) {
- value_ptr = host_test_mem + page * getpagesize();
+ for (page = 0; page < host_num_pages; page += step) {
+ value_ptr = host_test_mem + page * host_page_size;
/* If this is a special page that we were tracking... */
if (test_and_clear_bit(page, host_bmap_track)) {
@@ -208,88 +208,117 @@ void vm_dirty_log_verify(unsigned long *bmap, uint64_t iteration)
}
}
-void help(char *name)
+static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
+ uint64_t extra_mem_pages, void *guest_code)
{
- puts("");
- printf("usage: %s [-i iterations] [-I interval] [-h]\n", name);
- puts("");
- printf(" -i: specify iteration counts (default: %"PRIu64")\n",
- TEST_HOST_LOOP_N);
- printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
- TEST_HOST_LOOP_INTERVAL);
- puts("");
- exit(0);
+ struct kvm_vm *vm;
+ uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
+
+ vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
+ kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+#ifdef __x86_64__
+ vm_create_irqchip(vm);
+#endif
+ vm_vcpu_add_default(vm, vcpuid, guest_code);
+ return vm;
}
-int main(int argc, char *argv[])
+static void run_test(enum vm_guest_mode mode, unsigned long iterations,
+ unsigned long interval, bool top_offset)
{
+ unsigned int guest_pa_bits, guest_page_shift;
pthread_t vcpu_thread;
struct kvm_vm *vm;
- uint64_t volatile *psize, *iteration;
- unsigned long *bmap, iterations = TEST_HOST_LOOP_N,
- interval = TEST_HOST_LOOP_INTERVAL;
- int opt;
-
- while ((opt = getopt(argc, argv, "hi:I:")) != -1) {
- switch (opt) {
- case 'i':
- iterations = strtol(optarg, NULL, 10);
- break;
- case 'I':
- interval = strtol(optarg, NULL, 10);
- break;
- case 'h':
- default:
- help(argv[0]);
- break;
- }
+ uint64_t max_gfn;
+ unsigned long *bmap;
+
+ switch (mode) {
+ case VM_MODE_P52V48_4K:
+ guest_pa_bits = 52;
+ guest_page_shift = 12;
+ break;
+ case VM_MODE_P52V48_64K:
+ guest_pa_bits = 52;
+ guest_page_shift = 16;
+ break;
+ case VM_MODE_P40V48_4K:
+ guest_pa_bits = 40;
+ guest_page_shift = 12;
+ break;
+ case VM_MODE_P40V48_64K:
+ guest_pa_bits = 40;
+ guest_page_shift = 16;
+ break;
+ default:
+ TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
}
- TEST_ASSERT(iterations > 2, "Iteration must be bigger than zero\n");
- TEST_ASSERT(interval > 0, "Interval must be bigger than zero");
+ DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode));
- DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
- iterations, interval);
+ max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
+ guest_page_size = (1ul << guest_page_shift);
+ /* 1G of guest page sized pages */
+ guest_num_pages = (1ul << (30 - guest_page_shift));
+ host_page_size = getpagesize();
+ host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
+ !!((guest_num_pages * guest_page_size) % host_page_size);
- srandom(time(0));
+ if (top_offset) {
+ guest_test_mem = (max_gfn - guest_num_pages) * guest_page_size;
+ guest_test_mem &= ~(host_page_size - 1);
+ }
- bmap = bitmap_alloc(TEST_MEM_PAGES);
- host_bmap_track = bitmap_alloc(TEST_MEM_PAGES);
+ DEBUG("guest test mem offset: 0x%lx\n", guest_test_mem);
- vm = vm_create_default(VCPU_ID, TEST_MEM_PAGES, guest_code);
+ bmap = bitmap_alloc(host_num_pages);
+ host_bmap_track = bitmap_alloc(host_num_pages);
+
+ vm = create_vm(mode, VCPU_ID, guest_num_pages, guest_code);
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
- TEST_MEM_OFFSET,
+ guest_test_mem,
TEST_MEM_SLOT_INDEX,
- TEST_MEM_PAGES,
+ guest_num_pages,
KVM_MEM_LOG_DIRTY_PAGES);
- /* Cache the HVA pointer of the region */
- host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)TEST_MEM_OFFSET);
/* Do 1:1 mapping for the dirty track memory slot */
- virt_map(vm, TEST_MEM_OFFSET, TEST_MEM_OFFSET,
- TEST_MEM_PAGES * getpagesize(), 0);
+ virt_map(vm, guest_test_mem, guest_test_mem,
+ guest_num_pages * guest_page_size, 0);
+
+ /* Cache the HVA pointer of the region */
+ host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_mem);
+#ifdef __x86_64__
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+#endif
+#ifdef __aarch64__
+ ucall_init(vm, UCALL_MMIO, NULL);
+#endif
- /* Tell the guest about the page size on the system */
- psize = addr_gva2hva(vm, (vm_vaddr_t)&guest_page_size);
- *psize = getpagesize();
+ /* Export the shared variables to the guest */
+ sync_global_to_guest(vm, host_page_size);
+ sync_global_to_guest(vm, guest_page_size);
+ sync_global_to_guest(vm, guest_test_mem);
+ sync_global_to_guest(vm, guest_num_pages);
/* Start the iterations */
- iteration = addr_gva2hva(vm, (vm_vaddr_t)&guest_iteration);
- *iteration = 1;
+ iteration = 1;
+ sync_global_to_guest(vm, iteration);
+ host_quit = false;
+ host_dirty_count = 0;
+ host_clear_count = 0;
+ host_track_next_count = 0;
- /* Start dirtying pages */
pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
- while (*iteration < iterations) {
+ while (iteration < iterations) {
/* Give the vcpu thread some time to dirty some pages */
usleep(interval * 1000);
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
- vm_dirty_log_verify(bmap, *iteration);
- (*iteration)++;
+ vm_dirty_log_verify(bmap);
+ iteration++;
+ sync_global_to_guest(vm, iteration);
}
/* Tell the vcpu thread to quit */
@@ -302,7 +331,118 @@ int main(int argc, char *argv[])
free(bmap);
free(host_bmap_track);
+ ucall_uninit(vm);
kvm_vm_free(vm);
+}
+
+static struct vm_guest_modes {
+ enum vm_guest_mode mode;
+ bool supported;
+ bool enabled;
+} vm_guest_modes[NUM_VM_MODES] = {
+#if defined(__x86_64__)
+ { VM_MODE_P52V48_4K, 1, 1, },
+ { VM_MODE_P52V48_64K, 0, 0, },
+ { VM_MODE_P40V48_4K, 0, 0, },
+ { VM_MODE_P40V48_64K, 0, 0, },
+#elif defined(__aarch64__)
+ { VM_MODE_P52V48_4K, 0, 0, },
+ { VM_MODE_P52V48_64K, 0, 0, },
+ { VM_MODE_P40V48_4K, 1, 1, },
+ { VM_MODE_P40V48_64K, 1, 1, },
+#endif
+};
+
+static void help(char *name)
+{
+ int i;
+
+ puts("");
+ printf("usage: %s [-h] [-i iterations] [-I interval] "
+ "[-o offset] [-t] [-m mode]\n", name);
+ puts("");
+ printf(" -i: specify iteration counts (default: %"PRIu64")\n",
+ TEST_HOST_LOOP_N);
+ printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
+ TEST_HOST_LOOP_INTERVAL);
+ printf(" -o: guest test memory offset (default: 0x%lx)\n",
+ DEFAULT_GUEST_TEST_MEM);
+ printf(" -t: map guest test memory at the top of the allowed "
+ "physical address range\n");
+ printf(" -m: specify the guest mode ID to test "
+ "(default: test all supported modes)\n"
+ " This option may be used multiple times.\n"
+ " Guest mode IDs:\n");
+ for (i = 0; i < NUM_VM_MODES; ++i) {
+ printf(" %d: %s%s\n",
+ vm_guest_modes[i].mode,
+ vm_guest_mode_string(vm_guest_modes[i].mode),
+ vm_guest_modes[i].supported ? " (supported)" : "");
+ }
+ puts("");
+ exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+ unsigned long iterations = TEST_HOST_LOOP_N;
+ unsigned long interval = TEST_HOST_LOOP_INTERVAL;
+ bool mode_selected = false;
+ bool top_offset = false;
+ unsigned int mode;
+ int opt, i;
+
+ while ((opt = getopt(argc, argv, "hi:I:o:tm:")) != -1) {
+ switch (opt) {
+ case 'i':
+ iterations = strtol(optarg, NULL, 10);
+ break;
+ case 'I':
+ interval = strtol(optarg, NULL, 10);
+ break;
+ case 'o':
+ guest_test_mem = strtoull(optarg, NULL, 0);
+ break;
+ case 't':
+ top_offset = true;
+ break;
+ case 'm':
+ if (!mode_selected) {
+ for (i = 0; i < NUM_VM_MODES; ++i)
+ vm_guest_modes[i].enabled = 0;
+ mode_selected = true;
+ }
+ mode = strtoul(optarg, NULL, 10);
+ TEST_ASSERT(mode < NUM_VM_MODES,
+ "Guest mode ID %d too big", mode);
+ vm_guest_modes[mode].enabled = 1;
+ break;
+ case 'h':
+ default:
+ help(argv[0]);
+ break;
+ }
+ }
+
+ TEST_ASSERT(iterations > 2, "Iterations must be greater than two");
+ TEST_ASSERT(interval > 0, "Interval must be greater than zero");
+ TEST_ASSERT(!top_offset || guest_test_mem == DEFAULT_GUEST_TEST_MEM,
+ "Cannot use both -o [offset] and -t at the same time");
+
+ DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
+ iterations, interval);
+
+ srandom(time(0));
+
+ for (i = 0; i < NUM_VM_MODES; ++i) {
+ if (!vm_guest_modes[i].enabled)
+ continue;
+ TEST_ASSERT(vm_guest_modes[i].supported,
+ "Guest mode ID %d (%s) not supported.",
+ vm_guest_modes[i].mode,
+ vm_guest_mode_string(vm_guest_modes[i].mode));
+ run_test(vm_guest_modes[i].mode, iterations, interval, top_offset);
+ }
return 0;
}
diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
new file mode 100644
index 000000000000..9ef2ab1a0c08
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AArch64 processor specific defines
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ */
+#ifndef SELFTEST_KVM_PROCESSOR_H
+#define SELFTEST_KVM_PROCESSOR_H
+
+#include "kvm_util.h"
+
+
+#define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+#define CPACR_EL1 3, 0, 1, 0, 2
+#define TCR_EL1 3, 0, 2, 0, 2
+#define MAIR_EL1 3, 0, 10, 2, 0
+#define TTBR0_EL1 3, 0, 2, 0, 0
+#define SCTLR_EL1 3, 0, 1, 0, 0
+
+/*
+ * Default MAIR
+ * index attribute
+ * DEVICE_nGnRnE 0 0000:0000
+ * DEVICE_nGnRE 1 0000:0100
+ * DEVICE_GRE 2 0000:1100
+ * NORMAL_NC 3 0100:0100
+ * NORMAL 4 1111:1111
+ * NORMAL_WT 5 1011:1011
+ */
+#define DEFAULT_MAIR_EL1 ((0x00ul << (0 * 8)) | \
+ (0x04ul << (1 * 8)) | \
+ (0x0cul << (2 * 8)) | \
+ (0x44ul << (3 * 8)) | \
+ (0xfful << (4 * 8)) | \
+ (0xbbul << (5 * 8)))
+
+static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t *addr)
+{
+ struct kvm_one_reg reg;
+ reg.id = id;
+ reg.addr = (uint64_t)addr;
+ vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, &reg);
+}
+
+static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t val)
+{
+ struct kvm_one_reg reg;
+ reg.id = id;
+ reg.addr = (uint64_t)&val;
+ vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, &reg);
+}
+
+#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h
new file mode 100644
index 000000000000..4059014d93ea
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/evmcs.h
@@ -0,0 +1,1098 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * tools/testing/selftests/kvm/include/vmx.h
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ *
+ */
+
+#ifndef SELFTEST_KVM_EVMCS_H
+#define SELFTEST_KVM_EVMCS_H
+
+#include <stdint.h>
+#include "vmx.h"
+
+#define u16 uint16_t
+#define u32 uint32_t
+#define u64 uint64_t
+
+extern bool enable_evmcs;
+
+struct hv_vp_assist_page {
+ __u32 apic_assist;
+ __u32 reserved;
+ __u64 vtl_control[2];
+ __u64 nested_enlightenments_control[2];
+ __u32 enlighten_vmentry;
+ __u64 current_nested_vmcs;
+};
+
+struct hv_enlightened_vmcs {
+ u32 revision_id;
+ u32 abort;
+
+ u16 host_es_selector;
+ u16 host_cs_selector;
+ u16 host_ss_selector;
+ u16 host_ds_selector;
+ u16 host_fs_selector;
+ u16 host_gs_selector;
+ u16 host_tr_selector;
+
+ u64 host_ia32_pat;
+ u64 host_ia32_efer;
+
+ u64 host_cr0;
+ u64 host_cr3;
+ u64 host_cr4;
+
+ u64 host_ia32_sysenter_esp;
+ u64 host_ia32_sysenter_eip;
+ u64 host_rip;
+ u32 host_ia32_sysenter_cs;
+
+ u32 pin_based_vm_exec_control;
+ u32 vm_exit_controls;
+ u32 secondary_vm_exec_control;
+
+ u64 io_bitmap_a;
+ u64 io_bitmap_b;
+ u64 msr_bitmap;
+
+ u16 guest_es_selector;
+ u16 guest_cs_selector;
+ u16 guest_ss_selector;
+ u16 guest_ds_selector;
+ u16 guest_fs_selector;
+ u16 guest_gs_selector;
+ u16 guest_ldtr_selector;
+ u16 guest_tr_selector;
+
+ u32 guest_es_limit;
+ u32 guest_cs_limit;
+ u32 guest_ss_limit;
+ u32 guest_ds_limit;
+ u32 guest_fs_limit;
+ u32 guest_gs_limit;
+ u32 guest_ldtr_limit;
+ u32 guest_tr_limit;
+ u32 guest_gdtr_limit;
+ u32 guest_idtr_limit;
+
+ u32 guest_es_ar_bytes;
+ u32 guest_cs_ar_bytes;
+ u32 guest_ss_ar_bytes;
+ u32 guest_ds_ar_bytes;
+ u32 guest_fs_ar_bytes;
+ u32 guest_gs_ar_bytes;
+ u32 guest_ldtr_ar_bytes;
+ u32 guest_tr_ar_bytes;
+
+ u64 guest_es_base;
+ u64 guest_cs_base;
+ u64 guest_ss_base;
+ u64 guest_ds_base;
+ u64 guest_fs_base;
+ u64 guest_gs_base;
+ u64 guest_ldtr_base;
+ u64 guest_tr_base;
+ u64 guest_gdtr_base;
+ u64 guest_idtr_base;
+
+ u64 padding64_1[3];
+
+ u64 vm_exit_msr_store_addr;
+ u64 vm_exit_msr_load_addr;
+ u64 vm_entry_msr_load_addr;
+
+ u64 cr3_target_value0;
+ u64 cr3_target_value1;
+ u64 cr3_target_value2;
+ u64 cr3_target_value3;
+
+ u32 page_fault_error_code_mask;
+ u32 page_fault_error_code_match;
+
+ u32 cr3_target_count;
+ u32 vm_exit_msr_store_count;
+ u32 vm_exit_msr_load_count;
+ u32 vm_entry_msr_load_count;
+
+ u64 tsc_offset;
+ u64 virtual_apic_page_addr;
+ u64 vmcs_link_pointer;
+
+ u64 guest_ia32_debugctl;
+ u64 guest_ia32_pat;
+ u64 guest_ia32_efer;
+
+ u64 guest_pdptr0;
+ u64 guest_pdptr1;
+ u64 guest_pdptr2;
+ u64 guest_pdptr3;
+
+ u64 guest_pending_dbg_exceptions;
+ u64 guest_sysenter_esp;
+ u64 guest_sysenter_eip;
+
+ u32 guest_activity_state;
+ u32 guest_sysenter_cs;
+
+ u64 cr0_guest_host_mask;
+ u64 cr4_guest_host_mask;
+ u64 cr0_read_shadow;
+ u64 cr4_read_shadow;
+ u64 guest_cr0;
+ u64 guest_cr3;
+ u64 guest_cr4;
+ u64 guest_dr7;
+
+ u64 host_fs_base;
+ u64 host_gs_base;
+ u64 host_tr_base;
+ u64 host_gdtr_base;
+ u64 host_idtr_base;
+ u64 host_rsp;
+
+ u64 ept_pointer;
+
+ u16 virtual_processor_id;
+ u16 padding16[3];
+
+ u64 padding64_2[5];
+ u64 guest_physical_address;
+
+ u32 vm_instruction_error;
+ u32 vm_exit_reason;
+ u32 vm_exit_intr_info;
+ u32 vm_exit_intr_error_code;
+ u32 idt_vectoring_info_field;
+ u32 idt_vectoring_error_code;
+ u32 vm_exit_instruction_len;
+ u32 vmx_instruction_info;
+
+ u64 exit_qualification;
+ u64 exit_io_instruction_ecx;
+ u64 exit_io_instruction_esi;
+ u64 exit_io_instruction_edi;
+ u64 exit_io_instruction_eip;
+
+ u64 guest_linear_address;
+ u64 guest_rsp;
+ u64 guest_rflags;
+
+ u32 guest_interruptibility_info;
+ u32 cpu_based_vm_exec_control;
+ u32 exception_bitmap;
+ u32 vm_entry_controls;
+ u32 vm_entry_intr_info_field;
+ u32 vm_entry_exception_error_code;
+ u32 vm_entry_instruction_len;
+ u32 tpr_threshold;
+
+ u64 guest_rip;
+
+ u32 hv_clean_fields;
+ u32 hv_padding_32;
+ u32 hv_synthetic_controls;
+ struct {
+ u32 nested_flush_hypercall:1;
+ u32 msr_bitmap:1;
+ u32 reserved:30;
+ } hv_enlightenments_control;
+ u32 hv_vp_id;
+
+ u64 hv_vm_id;
+ u64 partition_assist_page;
+ u64 padding64_4[4];
+ u64 guest_bndcfgs;
+ u64 padding64_5[7];
+ u64 xss_exit_bitmap;
+ u64 padding64_6[7];
+};
+
+#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
+#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001
+#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12
+#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \
+ (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
+
+struct hv_enlightened_vmcs *current_evmcs;
+struct hv_vp_assist_page *current_vp_assist;
+
+static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
+{
+ u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
+ HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
+
+ wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val);
+
+ current_vp_assist = vp_assist;
+
+ enable_evmcs = true;
+
+ return 0;
+}
+
+static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs)
+{
+ current_vp_assist->current_nested_vmcs = vmcs_pa;
+ current_vp_assist->enlighten_vmentry = 1;
+
+ current_evmcs = vmcs;
+
+ return 0;
+}
+
+static inline int evmcs_vmptrst(uint64_t *value)
+{
+ *value = current_vp_assist->current_nested_vmcs &
+ ~HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
+
+ return 0;
+}
+
+static inline int evmcs_vmread(uint64_t encoding, uint64_t *value)
+{
+ switch (encoding) {
+ case GUEST_RIP:
+ *value = current_evmcs->guest_rip;
+ break;
+ case GUEST_RSP:
+ *value = current_evmcs->guest_rsp;
+ break;
+ case GUEST_RFLAGS:
+ *value = current_evmcs->guest_rflags;
+ break;
+ case HOST_IA32_PAT:
+ *value = current_evmcs->host_ia32_pat;
+ break;
+ case HOST_IA32_EFER:
+ *value = current_evmcs->host_ia32_efer;
+ break;
+ case HOST_CR0:
+ *value = current_evmcs->host_cr0;
+ break;
+ case HOST_CR3:
+ *value = current_evmcs->host_cr3;
+ break;
+ case HOST_CR4:
+ *value = current_evmcs->host_cr4;
+ break;
+ case HOST_IA32_SYSENTER_ESP:
+ *value = current_evmcs->host_ia32_sysenter_esp;
+ break;
+ case HOST_IA32_SYSENTER_EIP:
+ *value = current_evmcs->host_ia32_sysenter_eip;
+ break;
+ case HOST_RIP:
+ *value = current_evmcs->host_rip;
+ break;
+ case IO_BITMAP_A:
+ *value = current_evmcs->io_bitmap_a;
+ break;
+ case IO_BITMAP_B:
+ *value = current_evmcs->io_bitmap_b;
+ break;
+ case MSR_BITMAP:
+ *value = current_evmcs->msr_bitmap;
+ break;
+ case GUEST_ES_BASE:
+ *value = current_evmcs->guest_es_base;
+ break;
+ case GUEST_CS_BASE:
+ *value = current_evmcs->guest_cs_base;
+ break;
+ case GUEST_SS_BASE:
+ *value = current_evmcs->guest_ss_base;
+ break;
+ case GUEST_DS_BASE:
+ *value = current_evmcs->guest_ds_base;
+ break;
+ case GUEST_FS_BASE:
+ *value = current_evmcs->guest_fs_base;
+ break;
+ case GUEST_GS_BASE:
+ *value = current_evmcs->guest_gs_base;
+ break;
+ case GUEST_LDTR_BASE:
+ *value = current_evmcs->guest_ldtr_base;
+ break;
+ case GUEST_TR_BASE:
+ *value = current_evmcs->guest_tr_base;
+ break;
+ case GUEST_GDTR_BASE:
+ *value = current_evmcs->guest_gdtr_base;
+ break;
+ case GUEST_IDTR_BASE:
+ *value = current_evmcs->guest_idtr_base;
+ break;
+ case TSC_OFFSET:
+ *value = current_evmcs->tsc_offset;
+ break;
+ case VIRTUAL_APIC_PAGE_ADDR:
+ *value = current_evmcs->virtual_apic_page_addr;
+ break;
+ case VMCS_LINK_POINTER:
+ *value = current_evmcs->vmcs_link_pointer;
+ break;
+ case GUEST_IA32_DEBUGCTL:
+ *value = current_evmcs->guest_ia32_debugctl;
+ break;
+ case GUEST_IA32_PAT:
+ *value = current_evmcs->guest_ia32_pat;
+ break;
+ case GUEST_IA32_EFER:
+ *value = current_evmcs->guest_ia32_efer;
+ break;
+ case GUEST_PDPTR0:
+ *value = current_evmcs->guest_pdptr0;
+ break;
+ case GUEST_PDPTR1:
+ *value = current_evmcs->guest_pdptr1;
+ break;
+ case GUEST_PDPTR2:
+ *value = current_evmcs->guest_pdptr2;
+ break;
+ case GUEST_PDPTR3:
+ *value = current_evmcs->guest_pdptr3;
+ break;
+ case GUEST_PENDING_DBG_EXCEPTIONS:
+ *value = current_evmcs->guest_pending_dbg_exceptions;
+ break;
+ case GUEST_SYSENTER_ESP:
+ *value = current_evmcs->guest_sysenter_esp;
+ break;
+ case GUEST_SYSENTER_EIP:
+ *value = current_evmcs->guest_sysenter_eip;
+ break;
+ case CR0_GUEST_HOST_MASK:
+ *value = current_evmcs->cr0_guest_host_mask;
+ break;
+ case CR4_GUEST_HOST_MASK:
+ *value = current_evmcs->cr4_guest_host_mask;
+ break;
+ case CR0_READ_SHADOW:
+ *value = current_evmcs->cr0_read_shadow;
+ break;
+ case CR4_READ_SHADOW:
+ *value = current_evmcs->cr4_read_shadow;
+ break;
+ case GUEST_CR0:
+ *value = current_evmcs->guest_cr0;
+ break;
+ case GUEST_CR3:
+ *value = current_evmcs->guest_cr3;
+ break;
+ case GUEST_CR4:
+ *value = current_evmcs->guest_cr4;
+ break;
+ case GUEST_DR7:
+ *value = current_evmcs->guest_dr7;
+ break;
+ case HOST_FS_BASE:
+ *value = current_evmcs->host_fs_base;
+ break;
+ case HOST_GS_BASE:
+ *value = current_evmcs->host_gs_base;
+ break;
+ case HOST_TR_BASE:
+ *value = current_evmcs->host_tr_base;
+ break;
+ case HOST_GDTR_BASE:
+ *value = current_evmcs->host_gdtr_base;
+ break;
+ case HOST_IDTR_BASE:
+ *value = current_evmcs->host_idtr_base;
+ break;
+ case HOST_RSP:
+ *value = current_evmcs->host_rsp;
+ break;
+ case EPT_POINTER:
+ *value = current_evmcs->ept_pointer;
+ break;
+ case GUEST_BNDCFGS:
+ *value = current_evmcs->guest_bndcfgs;
+ break;
+ case XSS_EXIT_BITMAP:
+ *value = current_evmcs->xss_exit_bitmap;
+ break;
+ case GUEST_PHYSICAL_ADDRESS:
+ *value = current_evmcs->guest_physical_address;
+ break;
+ case EXIT_QUALIFICATION:
+ *value = current_evmcs->exit_qualification;
+ break;
+ case GUEST_LINEAR_ADDRESS:
+ *value = current_evmcs->guest_linear_address;
+ break;
+ case VM_EXIT_MSR_STORE_ADDR:
+ *value = current_evmcs->vm_exit_msr_store_addr;
+ break;
+ case VM_EXIT_MSR_LOAD_ADDR:
+ *value = current_evmcs->vm_exit_msr_load_addr;
+ break;
+ case VM_ENTRY_MSR_LOAD_ADDR:
+ *value = current_evmcs->vm_entry_msr_load_addr;
+ break;
+ case CR3_TARGET_VALUE0:
+ *value = current_evmcs->cr3_target_value0;
+ break;
+ case CR3_TARGET_VALUE1:
+ *value = current_evmcs->cr3_target_value1;
+ break;
+ case CR3_TARGET_VALUE2:
+ *value = current_evmcs->cr3_target_value2;
+ break;
+ case CR3_TARGET_VALUE3:
+ *value = current_evmcs->cr3_target_value3;
+ break;
+ case TPR_THRESHOLD:
+ *value = current_evmcs->tpr_threshold;
+ break;
+ case GUEST_INTERRUPTIBILITY_INFO:
+ *value = current_evmcs->guest_interruptibility_info;
+ break;
+ case CPU_BASED_VM_EXEC_CONTROL:
+ *value = current_evmcs->cpu_based_vm_exec_control;
+ break;
+ case EXCEPTION_BITMAP:
+ *value = current_evmcs->exception_bitmap;
+ break;
+ case VM_ENTRY_CONTROLS:
+ *value = current_evmcs->vm_entry_controls;
+ break;
+ case VM_ENTRY_INTR_INFO_FIELD:
+ *value = current_evmcs->vm_entry_intr_info_field;
+ break;
+ case VM_ENTRY_EXCEPTION_ERROR_CODE:
+ *value = current_evmcs->vm_entry_exception_error_code;
+ break;
+ case VM_ENTRY_INSTRUCTION_LEN:
+ *value = current_evmcs->vm_entry_instruction_len;
+ break;
+ case HOST_IA32_SYSENTER_CS:
+ *value = current_evmcs->host_ia32_sysenter_cs;
+ break;
+ case PIN_BASED_VM_EXEC_CONTROL:
+ *value = current_evmcs->pin_based_vm_exec_control;
+ break;
+ case VM_EXIT_CONTROLS:
+ *value = current_evmcs->vm_exit_controls;
+ break;
+ case SECONDARY_VM_EXEC_CONTROL:
+ *value = current_evmcs->secondary_vm_exec_control;
+ break;
+ case GUEST_ES_LIMIT:
+ *value = current_evmcs->guest_es_limit;
+ break;
+ case GUEST_CS_LIMIT:
+ *value = current_evmcs->guest_cs_limit;
+ break;
+ case GUEST_SS_LIMIT:
+ *value = current_evmcs->guest_ss_limit;
+ break;
+ case GUEST_DS_LIMIT:
+ *value = current_evmcs->guest_ds_limit;
+ break;
+ case GUEST_FS_LIMIT:
+ *value = current_evmcs->guest_fs_limit;
+ break;
+ case GUEST_GS_LIMIT:
+ *value = current_evmcs->guest_gs_limit;
+ break;
+ case GUEST_LDTR_LIMIT:
+ *value = current_evmcs->guest_ldtr_limit;
+ break;
+ case GUEST_TR_LIMIT:
+ *value = current_evmcs->guest_tr_limit;
+ break;
+ case GUEST_GDTR_LIMIT:
+ *value = current_evmcs->guest_gdtr_limit;
+ break;
+ case GUEST_IDTR_LIMIT:
+ *value = current_evmcs->guest_idtr_limit;
+ break;
+ case GUEST_ES_AR_BYTES:
+ *value = current_evmcs->guest_es_ar_bytes;
+ break;
+ case GUEST_CS_AR_BYTES:
+ *value = current_evmcs->guest_cs_ar_bytes;
+ break;
+ case GUEST_SS_AR_BYTES:
+ *value = current_evmcs->guest_ss_ar_bytes;
+ break;
+ case GUEST_DS_AR_BYTES:
+ *value = current_evmcs->guest_ds_ar_bytes;
+ break;
+ case GUEST_FS_AR_BYTES:
+ *value = current_evmcs->guest_fs_ar_bytes;
+ break;
+ case GUEST_GS_AR_BYTES:
+ *value = current_evmcs->guest_gs_ar_bytes;
+ break;
+ case GUEST_LDTR_AR_BYTES:
+ *value = current_evmcs->guest_ldtr_ar_bytes;
+ break;
+ case GUEST_TR_AR_BYTES:
+ *value = current_evmcs->guest_tr_ar_bytes;
+ break;
+ case GUEST_ACTIVITY_STATE:
+ *value = current_evmcs->guest_activity_state;
+ break;
+ case GUEST_SYSENTER_CS:
+ *value = current_evmcs->guest_sysenter_cs;
+ break;
+ case VM_INSTRUCTION_ERROR:
+ *value = current_evmcs->vm_instruction_error;
+ break;
+ case VM_EXIT_REASON:
+ *value = current_evmcs->vm_exit_reason;
+ break;
+ case VM_EXIT_INTR_INFO:
+ *value = current_evmcs->vm_exit_intr_info;
+ break;
+ case VM_EXIT_INTR_ERROR_CODE:
+ *value = current_evmcs->vm_exit_intr_error_code;
+ break;
+ case IDT_VECTORING_INFO_FIELD:
+ *value = current_evmcs->idt_vectoring_info_field;
+ break;
+ case IDT_VECTORING_ERROR_CODE:
+ *value = current_evmcs->idt_vectoring_error_code;
+ break;
+ case VM_EXIT_INSTRUCTION_LEN:
+ *value = current_evmcs->vm_exit_instruction_len;
+ break;
+ case VMX_INSTRUCTION_INFO:
+ *value = current_evmcs->vmx_instruction_info;
+ break;
+ case PAGE_FAULT_ERROR_CODE_MASK:
+ *value = current_evmcs->page_fault_error_code_mask;
+ break;
+ case PAGE_FAULT_ERROR_CODE_MATCH:
+ *value = current_evmcs->page_fault_error_code_match;
+ break;
+ case CR3_TARGET_COUNT:
+ *value = current_evmcs->cr3_target_count;
+ break;
+ case VM_EXIT_MSR_STORE_COUNT:
+ *value = current_evmcs->vm_exit_msr_store_count;
+ break;
+ case VM_EXIT_MSR_LOAD_COUNT:
+ *value = current_evmcs->vm_exit_msr_load_count;
+ break;
+ case VM_ENTRY_MSR_LOAD_COUNT:
+ *value = current_evmcs->vm_entry_msr_load_count;
+ break;
+ case HOST_ES_SELECTOR:
+ *value = current_evmcs->host_es_selector;
+ break;
+ case HOST_CS_SELECTOR:
+ *value = current_evmcs->host_cs_selector;
+ break;
+ case HOST_SS_SELECTOR:
+ *value = current_evmcs->host_ss_selector;
+ break;
+ case HOST_DS_SELECTOR:
+ *value = current_evmcs->host_ds_selector;
+ break;
+ case HOST_FS_SELECTOR:
+ *value = current_evmcs->host_fs_selector;
+ break;
+ case HOST_GS_SELECTOR:
+ *value = current_evmcs->host_gs_selector;
+ break;
+ case HOST_TR_SELECTOR:
+ *value = current_evmcs->host_tr_selector;
+ break;
+ case GUEST_ES_SELECTOR:
+ *value = current_evmcs->guest_es_selector;
+ break;
+ case GUEST_CS_SELECTOR:
+ *value = current_evmcs->guest_cs_selector;
+ break;
+ case GUEST_SS_SELECTOR:
+ *value = current_evmcs->guest_ss_selector;
+ break;
+ case GUEST_DS_SELECTOR:
+ *value = current_evmcs->guest_ds_selector;
+ break;
+ case GUEST_FS_SELECTOR:
+ *value = current_evmcs->guest_fs_selector;
+ break;
+ case GUEST_GS_SELECTOR:
+ *value = current_evmcs->guest_gs_selector;
+ break;
+ case GUEST_LDTR_SELECTOR:
+ *value = current_evmcs->guest_ldtr_selector;
+ break;
+ case GUEST_TR_SELECTOR:
+ *value = current_evmcs->guest_tr_selector;
+ break;
+ case VIRTUAL_PROCESSOR_ID:
+ *value = current_evmcs->virtual_processor_id;
+ break;
+ default: return 1;
+ }
+
+ return 0;
+}
+
+static inline int evmcs_vmwrite(uint64_t encoding, uint64_t value)
+{
+ switch (encoding) {
+ case GUEST_RIP:
+ current_evmcs->guest_rip = value;
+ break;
+ case GUEST_RSP:
+ current_evmcs->guest_rsp = value;
+ break;
+ case GUEST_RFLAGS:
+ current_evmcs->guest_rflags = value;
+ break;
+ case HOST_IA32_PAT:
+ current_evmcs->host_ia32_pat = value;
+ break;
+ case HOST_IA32_EFER:
+ current_evmcs->host_ia32_efer = value;
+ break;
+ case HOST_CR0:
+ current_evmcs->host_cr0 = value;
+ break;
+ case HOST_CR3:
+ current_evmcs->host_cr3 = value;
+ break;
+ case HOST_CR4:
+ current_evmcs->host_cr4 = value;
+ break;
+ case HOST_IA32_SYSENTER_ESP:
+ current_evmcs->host_ia32_sysenter_esp = value;
+ break;
+ case HOST_IA32_SYSENTER_EIP:
+ current_evmcs->host_ia32_sysenter_eip = value;
+ break;
+ case HOST_RIP:
+ current_evmcs->host_rip = value;
+ break;
+ case IO_BITMAP_A:
+ current_evmcs->io_bitmap_a = value;
+ break;
+ case IO_BITMAP_B:
+ current_evmcs->io_bitmap_b = value;
+ break;
+ case MSR_BITMAP:
+ current_evmcs->msr_bitmap = value;
+ break;
+ case GUEST_ES_BASE:
+ current_evmcs->guest_es_base = value;
+ break;
+ case GUEST_CS_BASE:
+ current_evmcs->guest_cs_base = value;
+ break;
+ case GUEST_SS_BASE:
+ current_evmcs->guest_ss_base = value;
+ break;
+ case GUEST_DS_BASE:
+ current_evmcs->guest_ds_base = value;
+ break;
+ case GUEST_FS_BASE:
+ current_evmcs->guest_fs_base = value;
+ break;
+ case GUEST_GS_BASE:
+ current_evmcs->guest_gs_base = value;
+ break;
+ case GUEST_LDTR_BASE:
+ current_evmcs->guest_ldtr_base = value;
+ break;
+ case GUEST_TR_BASE:
+ current_evmcs->guest_tr_base = value;
+ break;
+ case GUEST_GDTR_BASE:
+ current_evmcs->guest_gdtr_base = value;
+ break;
+ case GUEST_IDTR_BASE:
+ current_evmcs->guest_idtr_base = value;
+ break;
+ case TSC_OFFSET:
+ current_evmcs->tsc_offset = value;
+ break;
+ case VIRTUAL_APIC_PAGE_ADDR:
+ current_evmcs->virtual_apic_page_addr = value;
+ break;
+ case VMCS_LINK_POINTER:
+ current_evmcs->vmcs_link_pointer = value;
+ break;
+ case GUEST_IA32_DEBUGCTL:
+ current_evmcs->guest_ia32_debugctl = value;
+ break;
+ case GUEST_IA32_PAT:
+ current_evmcs->guest_ia32_pat = value;
+ break;
+ case GUEST_IA32_EFER:
+ current_evmcs->guest_ia32_efer = value;
+ break;
+ case GUEST_PDPTR0:
+ current_evmcs->guest_pdptr0 = value;
+ break;
+ case GUEST_PDPTR1:
+ current_evmcs->guest_pdptr1 = value;
+ break;
+ case GUEST_PDPTR2:
+ current_evmcs->guest_pdptr2 = value;
+ break;
+ case GUEST_PDPTR3:
+ current_evmcs->guest_pdptr3 = value;
+ break;
+ case GUEST_PENDING_DBG_EXCEPTIONS:
+ current_evmcs->guest_pending_dbg_exceptions = value;
+ break;
+ case GUEST_SYSENTER_ESP:
+ current_evmcs->guest_sysenter_esp = value;
+ break;
+ case GUEST_SYSENTER_EIP:
+ current_evmcs->guest_sysenter_eip = value;
+ break;
+ case CR0_GUEST_HOST_MASK:
+ current_evmcs->cr0_guest_host_mask = value;
+ break;
+ case CR4_GUEST_HOST_MASK:
+ current_evmcs->cr4_guest_host_mask = value;
+ break;
+ case CR0_READ_SHADOW:
+ current_evmcs->cr0_read_shadow = value;
+ break;
+ case CR4_READ_SHADOW:
+ current_evmcs->cr4_read_shadow = value;
+ break;
+ case GUEST_CR0:
+ current_evmcs->guest_cr0 = value;
+ break;
+ case GUEST_CR3:
+ current_evmcs->guest_cr3 = value;
+ break;
+ case GUEST_CR4:
+ current_evmcs->guest_cr4 = value;
+ break;
+ case GUEST_DR7:
+ current_evmcs->guest_dr7 = value;
+ break;
+ case HOST_FS_BASE:
+ current_evmcs->host_fs_base = value;
+ break;
+ case HOST_GS_BASE:
+ current_evmcs->host_gs_base = value;
+ break;
+ case HOST_TR_BASE:
+ current_evmcs->host_tr_base = value;
+ break;
+ case HOST_GDTR_BASE:
+ current_evmcs->host_gdtr_base = value;
+ break;
+ case HOST_IDTR_BASE:
+ current_evmcs->host_idtr_base = value;
+ break;
+ case HOST_RSP:
+ current_evmcs->host_rsp = value;
+ break;
+ case EPT_POINTER:
+ current_evmcs->ept_pointer = value;
+ break;
+ case GUEST_BNDCFGS:
+ current_evmcs->guest_bndcfgs = value;
+ break;
+ case XSS_EXIT_BITMAP:
+ current_evmcs->xss_exit_bitmap = value;
+ break;
+ case GUEST_PHYSICAL_ADDRESS:
+ current_evmcs->guest_physical_address = value;
+ break;
+ case EXIT_QUALIFICATION:
+ current_evmcs->exit_qualification = value;
+ break;
+ case GUEST_LINEAR_ADDRESS:
+ current_evmcs->guest_linear_address = value;
+ break;
+ case VM_EXIT_MSR_STORE_ADDR:
+ current_evmcs->vm_exit_msr_store_addr = value;
+ break;
+ case VM_EXIT_MSR_LOAD_ADDR:
+ current_evmcs->vm_exit_msr_load_addr = value;
+ break;
+ case VM_ENTRY_MSR_LOAD_ADDR:
+ current_evmcs->vm_entry_msr_load_addr = value;
+ break;
+ case CR3_TARGET_VALUE0:
+ current_evmcs->cr3_target_value0 = value;
+ break;
+ case CR3_TARGET_VALUE1:
+ current_evmcs->cr3_target_value1 = value;
+ break;
+ case CR3_TARGET_VALUE2:
+ current_evmcs->cr3_target_value2 = value;
+ break;
+ case CR3_TARGET_VALUE3:
+ current_evmcs->cr3_target_value3 = value;
+ break;
+ case TPR_THRESHOLD:
+ current_evmcs->tpr_threshold = value;
+ break;
+ case GUEST_INTERRUPTIBILITY_INFO:
+ current_evmcs->guest_interruptibility_info = value;
+ break;
+ case CPU_BASED_VM_EXEC_CONTROL:
+ current_evmcs->cpu_based_vm_exec_control = value;
+ break;
+ case EXCEPTION_BITMAP:
+ current_evmcs->exception_bitmap = value;
+ break;
+ case VM_ENTRY_CONTROLS:
+ current_evmcs->vm_entry_controls = value;
+ break;
+ case VM_ENTRY_INTR_INFO_FIELD:
+ current_evmcs->vm_entry_intr_info_field = value;
+ break;
+ case VM_ENTRY_EXCEPTION_ERROR_CODE:
+ current_evmcs->vm_entry_exception_error_code = value;
+ break;
+ case VM_ENTRY_INSTRUCTION_LEN:
+ current_evmcs->vm_entry_instruction_len = value;
+ break;
+ case HOST_IA32_SYSENTER_CS:
+ current_evmcs->host_ia32_sysenter_cs = value;
+ break;
+ case PIN_BASED_VM_EXEC_CONTROL:
+ current_evmcs->pin_based_vm_exec_control = value;
+ break;
+ case VM_EXIT_CONTROLS:
+ current_evmcs->vm_exit_controls = value;
+ break;
+ case SECONDARY_VM_EXEC_CONTROL:
+ current_evmcs->secondary_vm_exec_control = value;
+ break;
+ case GUEST_ES_LIMIT:
+ current_evmcs->guest_es_limit = value;
+ break;
+ case GUEST_CS_LIMIT:
+ current_evmcs->guest_cs_limit = value;
+ break;
+ case GUEST_SS_LIMIT:
+ current_evmcs->guest_ss_limit = value;
+ break;
+ case GUEST_DS_LIMIT:
+ current_evmcs->guest_ds_limit = value;
+ break;
+ case GUEST_FS_LIMIT:
+ current_evmcs->guest_fs_limit = value;
+ break;
+ case GUEST_GS_LIMIT:
+ current_evmcs->guest_gs_limit = value;
+ break;
+ case GUEST_LDTR_LIMIT:
+ current_evmcs->guest_ldtr_limit = value;
+ break;
+ case GUEST_TR_LIMIT:
+ current_evmcs->guest_tr_limit = value;
+ break;
+ case GUEST_GDTR_LIMIT:
+ current_evmcs->guest_gdtr_limit = value;
+ break;
+ case GUEST_IDTR_LIMIT:
+ current_evmcs->guest_idtr_limit = value;
+ break;
+ case GUEST_ES_AR_BYTES:
+ current_evmcs->guest_es_ar_bytes = value;
+ break;
+ case GUEST_CS_AR_BYTES:
+ current_evmcs->guest_cs_ar_bytes = value;
+ break;
+ case GUEST_SS_AR_BYTES:
+ current_evmcs->guest_ss_ar_bytes = value;
+ break;
+ case GUEST_DS_AR_BYTES:
+ current_evmcs->guest_ds_ar_bytes = value;
+ break;
+ case GUEST_FS_AR_BYTES:
+ current_evmcs->guest_fs_ar_bytes = value;
+ break;
+ case GUEST_GS_AR_BYTES:
+ current_evmcs->guest_gs_ar_bytes = value;
+ break;
+ case GUEST_LDTR_AR_BYTES:
+ current_evmcs->guest_ldtr_ar_bytes = value;
+ break;
+ case GUEST_TR_AR_BYTES:
+ current_evmcs->guest_tr_ar_bytes = value;
+ break;
+ case GUEST_ACTIVITY_STATE:
+ current_evmcs->guest_activity_state = value;
+ break;
+ case GUEST_SYSENTER_CS:
+ current_evmcs->guest_sysenter_cs = value;
+ break;
+ case VM_INSTRUCTION_ERROR:
+ current_evmcs->vm_instruction_error = value;
+ break;
+ case VM_EXIT_REASON:
+ current_evmcs->vm_exit_reason = value;
+ break;
+ case VM_EXIT_INTR_INFO:
+ current_evmcs->vm_exit_intr_info = value;
+ break;
+ case VM_EXIT_INTR_ERROR_CODE:
+ current_evmcs->vm_exit_intr_error_code = value;
+ break;
+ case IDT_VECTORING_INFO_FIELD:
+ current_evmcs->idt_vectoring_info_field = value;
+ break;
+ case IDT_VECTORING_ERROR_CODE:
+ current_evmcs->idt_vectoring_error_code = value;
+ break;
+ case VM_EXIT_INSTRUCTION_LEN:
+ current_evmcs->vm_exit_instruction_len = value;
+ break;
+ case VMX_INSTRUCTION_INFO:
+ current_evmcs->vmx_instruction_info = value;
+ break;
+ case PAGE_FAULT_ERROR_CODE_MASK:
+ current_evmcs->page_fault_error_code_mask = value;
+ break;
+ case PAGE_FAULT_ERROR_CODE_MATCH:
+ current_evmcs->page_fault_error_code_match = value;
+ break;
+ case CR3_TARGET_COUNT:
+ current_evmcs->cr3_target_count = value;
+ break;
+ case VM_EXIT_MSR_STORE_COUNT:
+ current_evmcs->vm_exit_msr_store_count = value;
+ break;
+ case VM_EXIT_MSR_LOAD_COUNT:
+ current_evmcs->vm_exit_msr_load_count = value;
+ break;
+ case VM_ENTRY_MSR_LOAD_COUNT:
+ current_evmcs->vm_entry_msr_load_count = value;
+ break;
+ case HOST_ES_SELECTOR:
+ current_evmcs->host_es_selector = value;
+ break;
+ case HOST_CS_SELECTOR:
+ current_evmcs->host_cs_selector = value;
+ break;
+ case HOST_SS_SELECTOR:
+ current_evmcs->host_ss_selector = value;
+ break;
+ case HOST_DS_SELECTOR:
+ current_evmcs->host_ds_selector = value;
+ break;
+ case HOST_FS_SELECTOR:
+ current_evmcs->host_fs_selector = value;
+ break;
+ case HOST_GS_SELECTOR:
+ current_evmcs->host_gs_selector = value;
+ break;
+ case HOST_TR_SELECTOR:
+ current_evmcs->host_tr_selector = value;
+ break;
+ case GUEST_ES_SELECTOR:
+ current_evmcs->guest_es_selector = value;
+ break;
+ case GUEST_CS_SELECTOR:
+ current_evmcs->guest_cs_selector = value;
+ break;
+ case GUEST_SS_SELECTOR:
+ current_evmcs->guest_ss_selector = value;
+ break;
+ case GUEST_DS_SELECTOR:
+ current_evmcs->guest_ds_selector = value;
+ break;
+ case GUEST_FS_SELECTOR:
+ current_evmcs->guest_fs_selector = value;
+ break;
+ case GUEST_GS_SELECTOR:
+ current_evmcs->guest_gs_selector = value;
+ break;
+ case GUEST_LDTR_SELECTOR:
+ current_evmcs->guest_ldtr_selector = value;
+ break;
+ case GUEST_TR_SELECTOR:
+ current_evmcs->guest_tr_selector = value;
+ break;
+ case VIRTUAL_PROCESSOR_ID:
+ current_evmcs->virtual_processor_id = value;
+ break;
+ default: return 1;
+ }
+
+ return 0;
+}
+
+static inline int evmcs_vmlaunch(void)
+{
+ int ret;
+
+ current_evmcs->hv_clean_fields = 0;
+
+ __asm__ __volatile__("push %%rbp;"
+ "push %%rcx;"
+ "push %%rdx;"
+ "push %%rsi;"
+ "push %%rdi;"
+ "push $0;"
+ "mov %%rsp, (%[host_rsp]);"
+ "lea 1f(%%rip), %%rax;"
+ "mov %%rax, (%[host_rip]);"
+ "vmlaunch;"
+ "incq (%%rsp);"
+ "1: pop %%rax;"
+ "pop %%rdi;"
+ "pop %%rsi;"
+ "pop %%rdx;"
+ "pop %%rcx;"
+ "pop %%rbp;"
+ : [ret]"=&a"(ret)
+ : [host_rsp]"r"
+ ((uint64_t)&current_evmcs->host_rsp),
+ [host_rip]"r"
+ ((uint64_t)&current_evmcs->host_rip)
+ : "memory", "cc", "rbx", "r8", "r9", "r10",
+ "r11", "r12", "r13", "r14", "r15");
+ return ret;
+}
+
+/*
+ * No guest state (e.g. GPRs) is established by this vmresume.
+ */
+static inline int evmcs_vmresume(void)
+{
+ int ret;
+
+ current_evmcs->hv_clean_fields = 0;
+
+ __asm__ __volatile__("push %%rbp;"
+ "push %%rcx;"
+ "push %%rdx;"
+ "push %%rsi;"
+ "push %%rdi;"
+ "push $0;"
+ "mov %%rsp, (%[host_rsp]);"
+ "lea 1f(%%rip), %%rax;"
+ "mov %%rax, (%[host_rip]);"
+ "vmresume;"
+ "incq (%%rsp);"
+ "1: pop %%rax;"
+ "pop %%rdi;"
+ "pop %%rsi;"
+ "pop %%rdx;"
+ "pop %%rcx;"
+ "pop %%rbp;"
+ : [ret]"=&a"(ret)
+ : [host_rsp]"r"
+ ((uint64_t)&current_evmcs->host_rsp),
+ [host_rip]"r"
+ ((uint64_t)&current_evmcs->host_rip)
+ : "memory", "cc", "rbx", "r8", "r9", "r10",
+ "r11", "r12", "r13", "r14", "r15");
+ return ret;
+}
+
+#endif /* !SELFTEST_KVM_EVMCS_H */
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index bb5a25fb82c6..a4e59e3b4826 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -7,7 +7,7 @@
*
*/
#ifndef SELFTEST_KVM_UTIL_H
-#define SELFTEST_KVM_UTIL_H 1
+#define SELFTEST_KVM_UTIL_H
#include "test_util.h"
@@ -17,12 +17,6 @@
#include "sparsebit.h"
-/*
- * Memslots can't cover the gfn starting at this gpa otherwise vCPUs can't be
- * created. Only applies to VMs using EPT.
- */
-#define KVM_DEFAULT_IDENTITY_MAP_ADDRESS 0xfffbc000ul
-
/* Callers of kvm_util only have an incomplete/opaque description of the
* structure kvm_util is using to maintain the state of a VM.
@@ -33,16 +27,23 @@ typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
/* Minimum allocated guest virtual and physical addresses */
-#define KVM_UTIL_MIN_VADDR 0x2000
+#define KVM_UTIL_MIN_VADDR 0x2000
#define DEFAULT_GUEST_PHY_PAGES 512
#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
-#define DEFAULT_STACK_PGS 5
+#define DEFAULT_STACK_PGS 5
enum vm_guest_mode {
- VM_MODE_FLAT48PG,
+ VM_MODE_P52V48_4K,
+ VM_MODE_P52V48_64K,
+ VM_MODE_P40V48_4K,
+ VM_MODE_P40V48_64K,
+ NUM_VM_MODES,
};
+#define vm_guest_mode_string(m) vm_guest_mode_string[m]
+extern const char * const vm_guest_mode_string[];
+
enum vm_mem_backing_src_type {
VM_MEM_SRC_ANONYMOUS,
VM_MEM_SRC_ANONYMOUS_THP,
@@ -50,6 +51,7 @@ enum vm_mem_backing_src_type {
};
int kvm_check_cap(long cap);
+int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
void kvm_vm_free(struct kvm_vm *vmp);
@@ -57,15 +59,15 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm);
void kvm_vm_release(struct kvm_vm *vmp);
void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
-int kvm_memcmp_hva_gva(void *hva,
- struct kvm_vm *vm, const vm_vaddr_t gva, size_t len);
+int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
+ size_t len);
void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
- uint32_t data_memslot, uint32_t pgd_memslot);
+ uint32_t data_memslot, uint32_t pgd_memslot);
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
-void vcpu_dump(FILE *stream, struct kvm_vm *vm,
- uint32_t vcpuid, uint8_t indent);
+void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
+ uint8_t indent);
void vm_create_irqchip(struct kvm_vm *vm);
@@ -74,13 +76,14 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
uint64_t guest_paddr, uint32_t slot, uint64_t npages,
uint32_t flags);
-void vcpu_ioctl(struct kvm_vm *vm,
- uint32_t vcpuid, unsigned long ioctl, void *arg);
+void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
+ void *arg);
void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
-void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot);
+void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot,
+ int gdt_memslot);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
- uint32_t data_memslot, uint32_t pgd_memslot);
+ uint32_t data_memslot, uint32_t pgd_memslot);
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
size_t size, uint32_t pgd_memslot);
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
@@ -92,53 +95,35 @@ struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_mp_state *mp_state);
-void vcpu_regs_get(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_regs *regs);
-void vcpu_regs_set(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_regs *regs);
+ struct kvm_mp_state *mp_state);
+void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
+void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...);
-void vcpu_sregs_get(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_sregs *sregs);
-void vcpu_sregs_set(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_sregs *sregs);
-int _vcpu_sregs_set(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_sregs *sregs);
+void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_sregs *sregs);
+void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_sregs *sregs);
+int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_sregs *sregs);
void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_events *events);
+ struct kvm_vcpu_events *events);
void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_events *events);
+ struct kvm_vcpu_events *events);
const char *exit_reason_str(unsigned int exit_reason);
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint32_t pgd_memslot);
-vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm,
- vm_paddr_t paddr_min, uint32_t memslot);
-
-struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
-void vcpu_set_cpuid(
- struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid);
-
-struct kvm_cpuid_entry2 *
-kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
-
-static inline struct kvm_cpuid_entry2 *
-kvm_get_supported_cpuid_entry(uint32_t function)
-{
- return kvm_get_supported_cpuid_index(function, 0);
-}
+ uint32_t pgd_memslot);
+vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
+ uint32_t memslot);
+vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot);
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
void *guest_code);
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
-typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr,
- vm_paddr_t vmxon_paddr,
- vm_vaddr_t vmcs_vaddr,
- vm_paddr_t vmcs_paddr);
-
struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
uint64_t end);
@@ -148,43 +133,49 @@ allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region);
int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
-#define GUEST_PORT_SYNC 0x1000
-#define GUEST_PORT_ABORT 0x1001
-#define GUEST_PORT_DONE 0x1002
-
-static inline void __exit_to_l0(uint16_t port, uint64_t arg0, uint64_t arg1)
-{
- __asm__ __volatile__("in %[port], %%al"
- :
- : [port]"d"(port), "D"(arg0), "S"(arg1)
- : "rax");
-}
-
-/*
- * Allows to pass three arguments to the host: port is 16bit wide,
- * arg0 & arg1 are 64bit wide
- */
-#define GUEST_SYNC_ARGS(_port, _arg0, _arg1) \
- __exit_to_l0(_port, (uint64_t) (_arg0), (uint64_t) (_arg1))
-
-#define GUEST_ASSERT(_condition) do { \
- if (!(_condition)) \
- GUEST_SYNC_ARGS(GUEST_PORT_ABORT, \
- "Failed guest assert: " \
- #_condition, __LINE__); \
- } while (0)
-
-#define GUEST_SYNC(stage) GUEST_SYNC_ARGS(GUEST_PORT_SYNC, "hello", stage)
+#define sync_global_to_guest(vm, g) ({ \
+ typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
+ memcpy(_p, &(g), sizeof(g)); \
+})
+
+#define sync_global_from_guest(vm, g) ({ \
+ typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
+ memcpy(&(g), _p, sizeof(g)); \
+})
+
+/* ucall implementation types */
+typedef enum {
+ UCALL_PIO,
+ UCALL_MMIO,
+} ucall_type_t;
+
+/* Common ucalls */
+enum {
+ UCALL_NONE,
+ UCALL_SYNC,
+ UCALL_ABORT,
+ UCALL_DONE,
+};
-#define GUEST_DONE() GUEST_SYNC_ARGS(GUEST_PORT_DONE, 0, 0)
+#define UCALL_MAX_ARGS 6
-struct guest_args {
- uint64_t arg0;
- uint64_t arg1;
- uint16_t port;
-} __attribute__ ((packed));
+struct ucall {
+ uint64_t cmd;
+ uint64_t args[UCALL_MAX_ARGS];
+};
-void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id,
- struct guest_args *args);
+void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg);
+void ucall_uninit(struct kvm_vm *vm);
+void ucall(uint64_t cmd, int nargs, ...);
+uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
+
+#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
+#define GUEST_DONE() ucall(UCALL_DONE, 0)
+#define GUEST_ASSERT(_condition) do { \
+ if (!(_condition)) \
+ ucall(UCALL_ABORT, 2, \
+ "Failed guest assert: " \
+ #_condition, __LINE__); \
+} while (0)
#endif /* SELFTEST_KVM_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/sparsebit.h b/tools/testing/selftests/kvm/include/sparsebit.h
index 54cfeb6568d3..31e030915c1f 100644
--- a/tools/testing/selftests/kvm/include/sparsebit.h
+++ b/tools/testing/selftests/kvm/include/sparsebit.h
@@ -15,8 +15,8 @@
* even in the case where most bits are set.
*/
-#ifndef _TEST_SPARSEBIT_H_
-#define _TEST_SPARSEBIT_H_
+#ifndef SELFTEST_KVM_SPARSEBIT_H
+#define SELFTEST_KVM_SPARSEBIT_H
#include <stdbool.h>
#include <stdint.h>
@@ -72,4 +72,4 @@ void sparsebit_validate_internal(struct sparsebit *sbit);
}
#endif
-#endif /* _TEST_SPARSEBIT_H_ */
+#endif /* SELFTEST_KVM_SPARSEBIT_H */
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index 73c3933436ec..c7dafe8bd02c 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -7,8 +7,8 @@
*
*/
-#ifndef TEST_UTIL_H
-#define TEST_UTIL_H 1
+#ifndef SELFTEST_KVM_TEST_UTIL_H
+#define SELFTEST_KVM_TEST_UTIL_H
#include <stdlib.h>
#include <stdarg.h>
@@ -41,4 +41,4 @@ void test_assert(bool exp, const char *exp_str,
#a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \
} while (0)
-#endif /* TEST_UTIL_H */
+#endif /* SELFTEST_KVM_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/x86.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 42c3596815b8..e2884c2b81ff 100644
--- a/tools/testing/selftests/kvm/include/x86.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -1,5 +1,5 @@
/*
- * tools/testing/selftests/kvm/include/x86.h
+ * tools/testing/selftests/kvm/include/x86_64/processor.h
*
* Copyright (C) 2018, Google LLC.
*
@@ -7,8 +7,8 @@
*
*/
-#ifndef SELFTEST_KVM_X86_H
-#define SELFTEST_KVM_X86_H
+#ifndef SELFTEST_KVM_PROCESSOR_H
+#define SELFTEST_KVM_PROCESSOR_H
#include <assert.h>
#include <stdint.h>
@@ -305,7 +305,25 @@ static inline unsigned long get_xmm(int n)
struct kvm_x86_state;
struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
-void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state);
+void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_x86_state *state);
+
+struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
+void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_cpuid2 *cpuid);
+
+struct kvm_cpuid_entry2 *
+kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
+
+static inline struct kvm_cpuid_entry2 *
+kvm_get_supported_cpuid_entry(uint32_t function)
+{
+ return kvm_get_supported_cpuid_index(function, 0);
+}
+
+uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
+void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+ uint64_t msr_value);
/*
* Basic CPU control in CR0
@@ -1044,4 +1062,4 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
#define MSR_VM_IGNNE 0xc0010115
#define MSR_VM_HSAVE_PA 0xc0010117
-#endif /* !SELFTEST_KVM_X86_H */
+#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index b9ffe1024d3a..c9bd935b939c 100644
--- a/tools/testing/selftests/kvm/include/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -1,5 +1,5 @@
/*
- * tools/testing/selftests/kvm/include/vmx.h
+ * tools/testing/selftests/kvm/include/x86_64/vmx.h
*
* Copyright (C) 2018, Google LLC.
*
@@ -11,7 +11,7 @@
#define SELFTEST_KVM_VMX_H
#include <stdint.h>
-#include "x86.h"
+#include "processor.h"
#define CPUID_VMX_BIT 5
@@ -339,6 +339,8 @@ struct vmx_msr_entry {
uint64_t value;
} __attribute__ ((aligned(16)));
+#include "evmcs.h"
+
static inline int vmxon(uint64_t phys)
{
uint8_t ret;
@@ -372,6 +374,9 @@ static inline int vmptrld(uint64_t vmcs_pa)
{
uint8_t ret;
+ if (enable_evmcs)
+ return -1;
+
__asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]"
: [ret]"=rm"(ret)
: [pa]"m"(vmcs_pa)
@@ -385,6 +390,9 @@ static inline int vmptrst(uint64_t *value)
uint64_t tmp;
uint8_t ret;
+ if (enable_evmcs)
+ return evmcs_vmptrst(value);
+
__asm__ __volatile__("vmptrst %[value]; setna %[ret]"
: [value]"=m"(tmp), [ret]"=rm"(ret)
: : "cc", "memory");
@@ -411,6 +419,9 @@ static inline int vmlaunch(void)
{
int ret;
+ if (enable_evmcs)
+ return evmcs_vmlaunch();
+
__asm__ __volatile__("push %%rbp;"
"push %%rcx;"
"push %%rdx;"
@@ -443,6 +454,9 @@ static inline int vmresume(void)
{
int ret;
+ if (enable_evmcs)
+ return evmcs_vmresume();
+
__asm__ __volatile__("push %%rbp;"
"push %%rcx;"
"push %%rdx;"
@@ -482,6 +496,9 @@ static inline int vmread(uint64_t encoding, uint64_t *value)
uint64_t tmp;
uint8_t ret;
+ if (enable_evmcs)
+ return evmcs_vmread(encoding, value);
+
__asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]"
: [value]"=rm"(tmp), [ret]"=rm"(ret)
: [encoding]"r"(encoding)
@@ -506,6 +523,9 @@ static inline int vmwrite(uint64_t encoding, uint64_t value)
{
uint8_t ret;
+ if (enable_evmcs)
+ return evmcs_vmwrite(encoding, value);
+
__asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]"
: [ret]"=rm"(ret)
: [value]"rm"(value), [encoding]"r"(encoding)
@@ -543,10 +563,19 @@ struct vmx_pages {
void *vmwrite_hva;
uint64_t vmwrite_gpa;
void *vmwrite;
+
+ void *vp_assist_hva;
+ uint64_t vp_assist_gpa;
+ void *vp_assist;
+
+ void *enlightened_vmcs_hva;
+ uint64_t enlightened_vmcs_gpa;
+ void *enlightened_vmcs;
};
struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
bool prepare_for_vmx_operation(struct vmx_pages *vmx);
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
+bool load_vmcs(struct vmx_pages *vmx);
-#endif /* !SELFTEST_KVM_VMX_H */
+#endif /* SELFTEST_KVM_VMX_H */
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
new file mode 100644
index 000000000000..b6022e2f116e
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AArch64 code
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_name */
+
+#include "kvm_util.h"
+#include "../kvm_util_internal.h"
+#include "processor.h"
+
+#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
+#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
+
+static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
+{
+ return (v + vm->page_size) & ~(vm->page_size - 1);
+}
+
+static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
+ uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
+
+ return (gva >> shift) & mask;
+}
+
+static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
+ uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
+
+ TEST_ASSERT(vm->pgtable_levels == 4,
+ "Mode %d does not have 4 page table levels", vm->mode);
+
+ return (gva >> shift) & mask;
+}
+
+static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
+ uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
+
+ TEST_ASSERT(vm->pgtable_levels >= 3,
+ "Mode %d does not have >= 3 page table levels", vm->mode);
+
+ return (gva >> shift) & mask;
+}
+
+static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
+ return (gva >> vm->page_shift) & mask;
+}
+
+static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
+{
+ uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
+ return entry & mask;
+}
+
+static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
+{
+ unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
+ return 1 << (vm->va_bits - shift);
+}
+
+static uint64_t ptrs_per_pte(struct kvm_vm *vm)
+{
+ return 1 << (vm->page_shift - 3);
+}
+
+void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
+{
+ int rc;
+
+ if (!vm->pgd_created) {
+ vm_paddr_t paddr = vm_phy_pages_alloc(vm,
+ page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
+ KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
+ vm->pgd = paddr;
+ vm->pgd_created = true;
+ }
+}
+
+void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+ uint32_t pgd_memslot, uint64_t flags)
+{
+ uint8_t attr_idx = flags & 7;
+ uint64_t *ptep;
+
+ TEST_ASSERT((vaddr % vm->page_size) == 0,
+ "Virtual address not on page boundary,\n"
+ " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
+ (vaddr >> vm->page_shift)),
+ "Invalid virtual address, vaddr: 0x%lx", vaddr);
+ TEST_ASSERT((paddr % vm->page_size) == 0,
+ "Physical address not on page boundary,\n"
+ " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
+ TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+ "Physical address beyond beyond maximum supported,\n"
+ " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ paddr, vm->max_gfn, vm->page_size);
+
+ ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
+ if (!*ptep) {
+ *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
+ *ptep |= 3;
+ }
+
+ switch (vm->pgtable_levels) {
+ case 4:
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
+ if (!*ptep) {
+ *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
+ *ptep |= 3;
+ }
+ /* fall through */
+ case 3:
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
+ if (!*ptep) {
+ *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
+ *ptep |= 3;
+ }
+ /* fall through */
+ case 2:
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
+ break;
+ default:
+ TEST_ASSERT(false, "Page table levels must be 2, 3, or 4");
+ }
+
+ *ptep = paddr | 3;
+ *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
+}
+
+void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+ uint32_t pgd_memslot)
+{
+ uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
+
+ _virt_pg_map(vm, vaddr, paddr, pgd_memslot, attr_idx);
+}
+
+vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ uint64_t *ptep;
+
+ if (!vm->pgd_created)
+ goto unmapped_gva;
+
+ ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
+ if (!ptep)
+ goto unmapped_gva;
+
+ switch (vm->pgtable_levels) {
+ case 4:
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
+ if (!ptep)
+ goto unmapped_gva;
+ /* fall through */
+ case 3:
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
+ if (!ptep)
+ goto unmapped_gva;
+ /* fall through */
+ case 2:
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
+ if (!ptep)
+ goto unmapped_gva;
+ break;
+ default:
+ TEST_ASSERT(false, "Page table levels must be 2, 3, or 4");
+ }
+
+ return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
+
+unmapped_gva:
+ TEST_ASSERT(false, "No mapping for vm virtual address, "
+ "gva: 0x%lx", gva);
+}
+
+static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
+{
+#ifdef DEBUG_VM
+ static const char * const type[] = { "", "pud", "pmd", "pte" };
+ uint64_t pte, *ptep;
+
+ if (level == 4)
+ return;
+
+ for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
+ ptep = addr_gpa2hva(vm, pte);
+ if (!*ptep)
+ continue;
+ printf("%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
+ pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
+ }
+#endif
+}
+
+void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+{
+ int level = 4 - (vm->pgtable_levels - 1);
+ uint64_t pgd, *ptep;
+
+ if (!vm->pgd_created)
+ return;
+
+ for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
+ ptep = addr_gpa2hva(vm, pgd);
+ if (!*ptep)
+ continue;
+ printf("%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
+ pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
+ }
+}
+
+struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
+ void *guest_code)
+{
+ uint64_t ptrs_per_4k_pte = 512;
+ uint64_t extra_pg_pages = (extra_mem_pages / ptrs_per_4k_pte) * 2;
+ struct kvm_vm *vm;
+
+ vm = vm_create(VM_MODE_P52V48_4K, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
+
+ kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+ vm_vcpu_add_default(vm, vcpuid, guest_code);
+
+ return vm;
+}
+
+void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
+{
+ size_t stack_size = vm->page_size == 4096 ?
+ DEFAULT_STACK_PGS * vm->page_size :
+ vm->page_size;
+ uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
+ DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 0, 0);
+
+ vm_vcpu_add(vm, vcpuid, 0, 0);
+
+ set_reg(vm, vcpuid, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
+ set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
+}
+
+void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
+{
+ struct kvm_vcpu_init init;
+ uint64_t sctlr_el1, tcr_el1;
+
+ memset(&init, 0, sizeof(init));
+ init.target = KVM_ARM_TARGET_GENERIC_V8;
+ vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, &init);
+
+ /*
+ * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
+ * registers, which the variable argument list macros do.
+ */
+ set_reg(vm, vcpuid, ARM64_SYS_REG(CPACR_EL1), 3 << 20);
+
+ get_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), &sctlr_el1);
+ get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1);
+
+ switch (vm->mode) {
+ case VM_MODE_P52V48_4K:
+ tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
+ tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
+ break;
+ case VM_MODE_P52V48_64K:
+ tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
+ tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
+ break;
+ case VM_MODE_P40V48_4K:
+ tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
+ tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
+ break;
+ case VM_MODE_P40V48_64K:
+ tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
+ tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
+ break;
+ default:
+ TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode);
+ }
+
+ sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
+ /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
+ tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
+ tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
+
+ set_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), sctlr_el1);
+ set_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), tcr_el1);
+ set_reg(vm, vcpuid, ARM64_SYS_REG(MAIR_EL1), DEFAULT_MAIR_EL1);
+ set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd);
+}
+
+void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+{
+ uint64_t pstate, pc;
+
+ get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
+ get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
+
+ fprintf(stream, "%*spstate: 0x%.16llx pc: 0x%.16llx\n",
+ indent, "", pstate, pc);
+
+}
diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c
index cd01144d27c8..6398efe67885 100644
--- a/tools/testing/selftests/kvm/lib/assert.c
+++ b/tools/testing/selftests/kvm/lib/assert.c
@@ -13,7 +13,7 @@
#include <execinfo.h>
#include <sys/syscall.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
/* Dumps the current stack trace to stderr. */
static void __attribute__((noinline)) test_dump_stack(void);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index e9ba389c48db..8c06da4f03db 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -16,10 +16,8 @@
#include <sys/stat.h>
#include <linux/kernel.h>
-#define KVM_DEV_PATH "/dev/kvm"
-
#define KVM_UTIL_PGS_PER_HUGEPG 512
-#define KVM_UTIL_MIN_PADDR 0x2000
+#define KVM_UTIL_MIN_PFN 2
/* Aligns x up to the next multiple of size. Size must be a power of 2. */
static void *align(void *x, size_t size)
@@ -30,7 +28,8 @@ static void *align(void *x, size_t size)
return (void *) (((size_t) x + mask) & ~mask);
}
-/* Capability
+/*
+ * Capability
*
* Input Args:
* cap - Capability
@@ -63,22 +62,52 @@ int kvm_check_cap(long cap)
return ret;
}
+/* VM Enable Capability
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * cap - Capability
+ *
+ * Output Args: None
+ *
+ * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
+ *
+ * Enables a capability (KVM_CAP_*) on the VM.
+ */
+int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
+{
+ int ret;
+
+ ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
+ TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
+ " rc: %i errno: %i", ret, errno);
+
+ return ret;
+}
+
static void vm_open(struct kvm_vm *vm, int perm)
{
vm->kvm_fd = open(KVM_DEV_PATH, perm);
if (vm->kvm_fd < 0)
exit(KSFT_SKIP);
- /* Create VM. */
vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, NULL);
TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
"rc: %i errno: %i", vm->fd, errno);
}
-/* VM Create
+const char * const vm_guest_mode_string[] = {
+ "PA-bits:52, VA-bits:48, 4K pages",
+ "PA-bits:52, VA-bits:48, 64K pages",
+ "PA-bits:40, VA-bits:48, 4K pages",
+ "PA-bits:40, VA-bits:48, 64K pages",
+};
+
+/*
+ * VM Create
*
* Input Args:
- * mode - VM Mode (e.g. VM_MODE_FLAT48PG)
+ * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
* phy_pages - Physical memory pages
* perm - permission
*
@@ -87,7 +116,7 @@ static void vm_open(struct kvm_vm *vm, int perm)
* Return:
* Pointer to opaque structure that describes the created VM.
*
- * Creates a VM with the mode specified by mode (e.g. VM_MODE_FLAT48PG).
+ * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
* When phy_pages is non-zero, a memory region of phy_pages physical pages
* is created and mapped starting at guest physical address 0. The file
* descriptor to control the created VM is created with the permissions
@@ -98,7 +127,6 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
struct kvm_vm *vm;
int kvm_fd;
- /* Allocate memory. */
vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficent Memory");
@@ -107,26 +135,48 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
/* Setup mode specific traits. */
switch (vm->mode) {
- case VM_MODE_FLAT48PG:
+ case VM_MODE_P52V48_4K:
+ vm->pgtable_levels = 4;
vm->page_size = 0x1000;
vm->page_shift = 12;
-
- /* Limit to 48-bit canonical virtual addresses. */
- vm->vpages_valid = sparsebit_alloc();
- sparsebit_set_num(vm->vpages_valid,
- 0, (1ULL << (48 - 1)) >> vm->page_shift);
- sparsebit_set_num(vm->vpages_valid,
- (~((1ULL << (48 - 1)) - 1)) >> vm->page_shift,
- (1ULL << (48 - 1)) >> vm->page_shift);
-
- /* Limit physical addresses to 52-bits. */
- vm->max_gfn = ((1ULL << 52) >> vm->page_shift) - 1;
+ vm->va_bits = 48;
+ break;
+ case VM_MODE_P52V48_64K:
+ vm->pgtable_levels = 3;
+ vm->pa_bits = 52;
+ vm->page_size = 0x10000;
+ vm->page_shift = 16;
+ vm->va_bits = 48;
+ break;
+ case VM_MODE_P40V48_4K:
+ vm->pgtable_levels = 4;
+ vm->pa_bits = 40;
+ vm->va_bits = 48;
+ vm->page_size = 0x1000;
+ vm->page_shift = 12;
+ break;
+ case VM_MODE_P40V48_64K:
+ vm->pgtable_levels = 3;
+ vm->pa_bits = 40;
+ vm->va_bits = 48;
+ vm->page_size = 0x10000;
+ vm->page_shift = 16;
break;
-
default:
TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
}
+ /* Limit to VA-bit canonical virtual addresses. */
+ vm->vpages_valid = sparsebit_alloc();
+ sparsebit_set_num(vm->vpages_valid,
+ 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
+ sparsebit_set_num(vm->vpages_valid,
+ (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
+ (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
+
+ /* Limit physical addresses to PA-bits. */
+ vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
+
/* Allocate and setup memory for guest. */
vm->vpages_mapped = sparsebit_alloc();
if (phy_pages != 0)
@@ -136,7 +186,8 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
return vm;
}
-/* VM Restart
+/*
+ * VM Restart
*
* Input Args:
* vm - VM that has been released before
@@ -163,7 +214,8 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm)
" rc: %i errno: %i\n"
" slot: %u flags: 0x%x\n"
" guest_phys_addr: 0x%lx size: 0x%lx",
- ret, errno, region->region.slot, region->region.flags,
+ ret, errno, region->region.slot,
+ region->region.flags,
region->region.guest_phys_addr,
region->region.memory_size);
}
@@ -179,7 +231,8 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
strerror(-ret));
}
-/* Userspace Memory Region Find
+/*
+ * Userspace Memory Region Find
*
* Input Args:
* vm - Virtual Machine
@@ -197,8 +250,8 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
* of the regions is returned. Null is returned only when no overlapping
* region exists.
*/
-static struct userspace_mem_region *userspace_mem_region_find(
- struct kvm_vm *vm, uint64_t start, uint64_t end)
+static struct userspace_mem_region *
+userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
{
struct userspace_mem_region *region;
@@ -214,7 +267,8 @@ static struct userspace_mem_region *userspace_mem_region_find(
return NULL;
}
-/* KVM Userspace Memory Region Find
+/*
+ * KVM Userspace Memory Region Find
*
* Input Args:
* vm - Virtual Machine
@@ -242,7 +296,8 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
return &region->region;
}
-/* VCPU Find
+/*
+ * VCPU Find
*
* Input Args:
* vm - Virtual Machine
@@ -257,8 +312,7 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
* returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU
* for the specified vcpuid.
*/
-struct vcpu *vcpu_find(struct kvm_vm *vm,
- uint32_t vcpuid)
+struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
{
struct vcpu *vcpup;
@@ -270,7 +324,8 @@ struct vcpu *vcpu_find(struct kvm_vm *vm,
return NULL;
}
-/* VM VCPU Remove
+/*
+ * VM VCPU Remove
*
* Input Args:
* vm - Virtual Machine
@@ -307,11 +362,9 @@ void kvm_vm_release(struct kvm_vm *vmp)
{
int ret;
- /* Free VCPUs. */
while (vmp->vcpu_head)
vm_vcpu_rm(vmp, vmp->vcpu_head->id);
- /* Close file descriptor for the VM. */
ret = close(vmp->fd);
TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
" vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
@@ -321,7 +374,8 @@ void kvm_vm_release(struct kvm_vm *vmp)
" vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
}
-/* Destroys and frees the VM pointed to by vmp.
+/*
+ * Destroys and frees the VM pointed to by vmp.
*/
void kvm_vm_free(struct kvm_vm *vmp)
{
@@ -360,7 +414,8 @@ void kvm_vm_free(struct kvm_vm *vmp)
free(vmp);
}
-/* Memory Compare, host virtual to guest virtual
+/*
+ * Memory Compare, host virtual to guest virtual
*
* Input Args:
* hva - Starting host virtual address
@@ -382,23 +437,25 @@ void kvm_vm_free(struct kvm_vm *vmp)
* a length of len, to the guest bytes starting at the guest virtual
* address given by gva.
*/
-int kvm_memcmp_hva_gva(void *hva,
- struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
+int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
{
size_t amt;
- /* Compare a batch of bytes until either a match is found
+ /*
+ * Compare a batch of bytes until either a match is found
* or all the bytes have been compared.
*/
for (uintptr_t offset = 0; offset < len; offset += amt) {
uintptr_t ptr1 = (uintptr_t)hva + offset;
- /* Determine host address for guest virtual address
+ /*
+ * Determine host address for guest virtual address
* at offset.
*/
uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
- /* Determine amount to compare on this pass.
+ /*
+ * Determine amount to compare on this pass.
* Don't allow the comparsion to cross a page boundary.
*/
amt = len - offset;
@@ -410,7 +467,8 @@ int kvm_memcmp_hva_gva(void *hva,
assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
- /* Perform the comparison. If there is a difference
+ /*
+ * Perform the comparison. If there is a difference
* return that result to the caller, otherwise need
* to continue on looking for a mismatch.
*/
@@ -419,109 +477,15 @@ int kvm_memcmp_hva_gva(void *hva,
return ret;
}
- /* No mismatch found. Let the caller know the two memory
+ /*
+ * No mismatch found. Let the caller know the two memory
* areas are equal.
*/
return 0;
}
-/* Allocate an instance of struct kvm_cpuid2
- *
- * Input Args: None
- *
- * Output Args: None
- *
- * Return: A pointer to the allocated struct. The caller is responsible
- * for freeing this struct.
- *
- * Since kvm_cpuid2 uses a 0-length array to allow a the size of the
- * array to be decided at allocation time, allocation is slightly
- * complicated. This function uses a reasonable default length for
- * the array and performs the appropriate allocation.
- */
-static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
-{
- struct kvm_cpuid2 *cpuid;
- int nent = 100;
- size_t size;
-
- size = sizeof(*cpuid);
- size += nent * sizeof(struct kvm_cpuid_entry2);
- cpuid = malloc(size);
- if (!cpuid) {
- perror("malloc");
- abort();
- }
-
- cpuid->nent = nent;
-
- return cpuid;
-}
-
-/* KVM Supported CPUID Get
- *
- * Input Args: None
- *
- * Output Args:
- *
- * Return: The supported KVM CPUID
- *
- * Get the guest CPUID supported by KVM.
- */
-struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
-{
- static struct kvm_cpuid2 *cpuid;
- int ret;
- int kvm_fd;
-
- if (cpuid)
- return cpuid;
-
- cpuid = allocate_kvm_cpuid2();
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
-
- ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
- TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
- ret, errno);
-
- close(kvm_fd);
- return cpuid;
-}
-
-/* Locate a cpuid entry.
- *
- * Input Args:
- * cpuid: The cpuid.
- * function: The function of the cpuid entry to find.
- *
- * Output Args: None
- *
- * Return: A pointer to the cpuid entry. Never returns NULL.
- */
-struct kvm_cpuid_entry2 *
-kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
-{
- struct kvm_cpuid2 *cpuid;
- struct kvm_cpuid_entry2 *entry = NULL;
- int i;
-
- cpuid = kvm_get_supported_cpuid();
- for (i = 0; i < cpuid->nent; i++) {
- if (cpuid->entries[i].function == function &&
- cpuid->entries[i].index == index) {
- entry = &cpuid->entries[i];
- break;
- }
- }
-
- TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).",
- function, index);
- return entry;
-}
-
-/* VM Userspace Memory Region Add
+/*
+ * VM Userspace Memory Region Add
*
* Input Args:
* vm - Virtual Machine
@@ -563,7 +527,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
" vm->max_gfn: 0x%lx vm->page_size: 0x%x",
guest_paddr, npages, vm->max_gfn, vm->page_size);
- /* Confirm a mem region with an overlapping address doesn't
+ /*
+ * Confirm a mem region with an overlapping address doesn't
* already exist.
*/
region = (struct userspace_mem_region *) userspace_mem_region_find(
@@ -654,7 +619,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
vm->userspace_mem_region_head = region;
}
-/* Memslot to region
+/*
+ * Memslot to region
*
* Input Args:
* vm - Virtual Machine
@@ -668,8 +634,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
* on error (e.g. currently no memory region using memslot as a KVM
* memory slot ID).
*/
-static struct userspace_mem_region *memslot2region(struct kvm_vm *vm,
- uint32_t memslot)
+static struct userspace_mem_region *
+memslot2region(struct kvm_vm *vm, uint32_t memslot)
{
struct userspace_mem_region *region;
@@ -689,7 +655,8 @@ static struct userspace_mem_region *memslot2region(struct kvm_vm *vm,
return region;
}
-/* VM Memory Region Flags Set
+/*
+ * VM Memory Region Flags Set
*
* Input Args:
* vm - Virtual Machine
@@ -707,7 +674,6 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
int ret;
struct userspace_mem_region *region;
- /* Locate memory region. */
region = memslot2region(vm, slot);
region->region.flags = flags;
@@ -719,7 +685,8 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
ret, errno, slot, flags);
}
-/* VCPU mmap Size
+/*
+ * VCPU mmap Size
*
* Input Args: None
*
@@ -749,7 +716,8 @@ static int vcpu_mmap_sz(void)
return ret;
}
-/* VM VCPU Add
+/*
+ * VM VCPU Add
*
* Input Args:
* vm - Virtual Machine
@@ -762,7 +730,8 @@ static int vcpu_mmap_sz(void)
* Creates and adds to the VM specified by vm and virtual CPU with
* the ID given by vcpuid.
*/
-void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot)
+void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot,
+ int gdt_memslot)
{
struct vcpu *vcpu;
@@ -800,7 +769,8 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_me
vcpu_setup(vm, vcpuid, pgd_memslot, gdt_memslot);
}
-/* VM Virtual Address Unused Gap
+/*
+ * VM Virtual Address Unused Gap
*
* Input Args:
* vm - Virtual Machine
@@ -820,14 +790,14 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_me
* sz unallocated bytes >= vaddr_min is available.
*/
static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
- vm_vaddr_t vaddr_min)
+ vm_vaddr_t vaddr_min)
{
uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
/* Determine lowest permitted virtual page index. */
uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
if ((pgidx_start * vm->page_size) < vaddr_min)
- goto no_va_found;
+ goto no_va_found;
/* Loop over section with enough valid virtual page indexes. */
if (!sparsebit_is_set_num(vm->vpages_valid,
@@ -886,7 +856,8 @@ va_found:
return pgidx_start * vm->page_size;
}
-/* VM Virtual Address Allocate
+/*
+ * VM Virtual Address Allocate
*
* Input Args:
* vm - Virtual Machine
@@ -907,13 +878,14 @@ va_found:
* a page.
*/
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
- uint32_t data_memslot, uint32_t pgd_memslot)
+ uint32_t data_memslot, uint32_t pgd_memslot)
{
uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
virt_pgd_alloc(vm, pgd_memslot);
- /* Find an unused range of virtual page addresses of at least
+ /*
+ * Find an unused range of virtual page addresses of at least
* pages in length.
*/
vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
@@ -923,7 +895,8 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
pages--, vaddr += vm->page_size) {
vm_paddr_t paddr;
- paddr = vm_phy_page_alloc(vm, KVM_UTIL_MIN_PADDR, data_memslot);
+ paddr = vm_phy_page_alloc(vm,
+ KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
virt_pg_map(vm, vaddr, paddr, pgd_memslot);
@@ -967,7 +940,8 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
}
}
-/* Address VM Physical to Host Virtual
+/*
+ * Address VM Physical to Host Virtual
*
* Input Args:
* vm - Virtual Machine
@@ -999,7 +973,8 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
return NULL;
}
-/* Address Host Virtual to VM Physical
+/*
+ * Address Host Virtual to VM Physical
*
* Input Args:
* vm - Virtual Machine
@@ -1033,7 +1008,8 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
return -1;
}
-/* VM Create IRQ Chip
+/*
+ * VM Create IRQ Chip
*
* Input Args:
* vm - Virtual Machine
@@ -1055,7 +1031,8 @@ void vm_create_irqchip(struct kvm_vm *vm)
vm->has_irqchip = true;
}
-/* VM VCPU State
+/*
+ * VM VCPU State
*
* Input Args:
* vm - Virtual Machine
@@ -1077,7 +1054,8 @@ struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
return vcpu->state;
}
-/* VM VCPU Run
+/*
+ * VM VCPU Run
*
* Input Args:
* vm - Virtual Machine
@@ -1103,13 +1081,14 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
int rc;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- do {
+ do {
rc = ioctl(vcpu->fd, KVM_RUN, NULL);
} while (rc == -1 && errno == EINTR);
return rc;
}
-/* VM VCPU Set MP State
+/*
+ * VM VCPU Set MP State
*
* Input Args:
* vm - Virtual Machine
@@ -1124,7 +1103,7 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
* by mp_state.
*/
void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_mp_state *mp_state)
+ struct kvm_mp_state *mp_state)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
@@ -1136,7 +1115,8 @@ void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
"rc: %i errno: %i", ret, errno);
}
-/* VM VCPU Regs Get
+/*
+ * VM VCPU Regs Get
*
* Input Args:
* vm - Virtual Machine
@@ -1150,21 +1130,20 @@ void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
* Obtains the current register state for the VCPU specified by vcpuid
* and stores it at the location given by regs.
*/
-void vcpu_regs_get(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_regs *regs)
+void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- /* Get the regs. */
ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i",
ret, errno);
}
-/* VM VCPU Regs Set
+/*
+ * VM VCPU Regs Set
*
* Input Args:
* vm - Virtual Machine
@@ -1178,99 +1157,46 @@ void vcpu_regs_get(struct kvm_vm *vm,
* Sets the regs of the VCPU specified by vcpuid to the values
* given by regs.
*/
-void vcpu_regs_set(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_regs *regs)
+void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- /* Set the regs. */
ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i",
ret, errno);
}
void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_events *events)
+ struct kvm_vcpu_events *events)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- /* Get the regs. */
ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i",
ret, errno);
}
void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_events *events)
+ struct kvm_vcpu_events *events)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- /* Set the regs. */
ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i",
ret, errno);
}
-/* VM VCPU Args Set
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * num - number of arguments
- * ... - arguments, each of type uint64_t
- *
- * Output Args: None
- *
- * Return: None
- *
- * Sets the first num function input arguments to the values
- * given as variable args. Each of the variable args is expected to
- * be of type uint64_t.
- */
-void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
-{
- va_list ap;
- struct kvm_regs regs;
-
- TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
- " num: %u\n",
- num);
-
- va_start(ap, num);
- vcpu_regs_get(vm, vcpuid, &regs);
-
- if (num >= 1)
- regs.rdi = va_arg(ap, uint64_t);
-
- if (num >= 2)
- regs.rsi = va_arg(ap, uint64_t);
-
- if (num >= 3)
- regs.rdx = va_arg(ap, uint64_t);
-
- if (num >= 4)
- regs.rcx = va_arg(ap, uint64_t);
-
- if (num >= 5)
- regs.r8 = va_arg(ap, uint64_t);
-
- if (num >= 6)
- regs.r9 = va_arg(ap, uint64_t);
-
- vcpu_regs_set(vm, vcpuid, &regs);
- va_end(ap);
-}
-
-/* VM VCPU System Regs Get
+/*
+ * VM VCPU System Regs Get
*
* Input Args:
* vm - Virtual Machine
@@ -1284,22 +1210,20 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
* Obtains the current system register state for the VCPU specified by
* vcpuid and stores it at the location given by sregs.
*/
-void vcpu_sregs_get(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_sregs *sregs)
+void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- /* Get the regs. */
- /* Get the regs. */
ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i",
ret, errno);
}
-/* VM VCPU System Regs Set
+/*
+ * VM VCPU System Regs Set
*
* Input Args:
* vm - Virtual Machine
@@ -1313,27 +1237,25 @@ void vcpu_sregs_get(struct kvm_vm *vm,
* Sets the system regs of the VCPU specified by vcpuid to the values
* given by sregs.
*/
-void vcpu_sregs_set(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_sregs *sregs)
+void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
{
int ret = _vcpu_sregs_set(vm, vcpuid, sregs);
TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
"rc: %i errno: %i", ret, errno);
}
-int _vcpu_sregs_set(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_sregs *sregs)
+int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- /* Get the regs. */
return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
}
-/* VCPU Ioctl
+/*
+ * VCPU Ioctl
*
* Input Args:
* vm - Virtual Machine
@@ -1345,8 +1267,8 @@ int _vcpu_sregs_set(struct kvm_vm *vm,
*
* Issues an arbitrary ioctl on a VCPU fd.
*/
-void vcpu_ioctl(struct kvm_vm *vm,
- uint32_t vcpuid, unsigned long cmd, void *arg)
+void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
+ unsigned long cmd, void *arg)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
@@ -1358,7 +1280,8 @@ void vcpu_ioctl(struct kvm_vm *vm,
cmd, ret, errno, strerror(errno));
}
-/* VM Ioctl
+/*
+ * VM Ioctl
*
* Input Args:
* vm - Virtual Machine
@@ -1378,7 +1301,8 @@ void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
cmd, ret, errno, strerror(errno));
}
-/* VM Dump
+/*
+ * VM Dump
*
* Input Args:
* vm - Virtual Machine
@@ -1425,38 +1349,6 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
vcpu_dump(stream, vm, vcpu->id, indent + 2);
}
-/* VM VCPU Dump
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * indent - Left margin indent amount
- *
- * Output Args:
- * stream - Output FILE stream
- *
- * Return: None
- *
- * Dumps the current state of the VCPU specified by vcpuid, within the VM
- * given by vm, to the FILE stream given by stream.
- */
-void vcpu_dump(FILE *stream, struct kvm_vm *vm,
- uint32_t vcpuid, uint8_t indent)
-{
- struct kvm_regs regs;
- struct kvm_sregs sregs;
-
- fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
-
- fprintf(stream, "%*sregs:\n", indent + 2, "");
- vcpu_regs_get(vm, vcpuid, &regs);
- regs_dump(stream, &regs, indent + 4);
-
- fprintf(stream, "%*ssregs:\n", indent + 2, "");
- vcpu_sregs_get(vm, vcpuid, &sregs);
- sregs_dump(stream, &sregs, indent + 4);
-}
-
/* Known KVM exit reasons */
static struct exit_reason {
unsigned int reason;
@@ -1487,7 +1379,8 @@ static struct exit_reason {
#endif
};
-/* Exit Reason String
+/*
+ * Exit Reason String
*
* Input Args:
* exit_reason - Exit reason
@@ -1513,10 +1406,12 @@ const char *exit_reason_str(unsigned int exit_reason)
return "Unknown";
}
-/* Physical Page Allocate
+/*
+ * Physical Contiguous Page Allocator
*
* Input Args:
* vm - Virtual Machine
+ * num - number of pages
* paddr_min - Physical address minimum
* memslot - Memory region to allocate page from
*
@@ -1525,47 +1420,59 @@ const char *exit_reason_str(unsigned int exit_reason)
* Return:
* Starting physical address
*
- * Within the VM specified by vm, locates an available physical page
- * at or above paddr_min. If found, the page is marked as in use
- * and its address is returned. A TEST_ASSERT failure occurs if no
- * page is available at or above paddr_min.
+ * Within the VM specified by vm, locates a range of available physical
+ * pages at or above paddr_min. If found, the pages are marked as in use
+ * and thier base address is returned. A TEST_ASSERT failure occurs if
+ * not enough pages are available at or above paddr_min.
*/
-vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm,
- vm_paddr_t paddr_min, uint32_t memslot)
+vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot)
{
struct userspace_mem_region *region;
- sparsebit_idx_t pg;
+ sparsebit_idx_t pg, base;
+
+ TEST_ASSERT(num > 0, "Must allocate at least one page");
TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
"not divisible by page size.\n"
" paddr_min: 0x%lx page_size: 0x%x",
paddr_min, vm->page_size);
- /* Locate memory region. */
region = memslot2region(vm, memslot);
+ base = pg = paddr_min >> vm->page_shift;
- /* Locate next available physical page at or above paddr_min. */
- pg = paddr_min >> vm->page_shift;
-
- if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
- pg = sparsebit_next_set(region->unused_phy_pages, pg);
- if (pg == 0) {
- fprintf(stderr, "No guest physical page available, "
- "paddr_min: 0x%lx page_size: 0x%x memslot: %u",
- paddr_min, vm->page_size, memslot);
- fputs("---- vm dump ----\n", stderr);
- vm_dump(stderr, vm, 2);
- abort();
+ do {
+ for (; pg < base + num; ++pg) {
+ if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
+ base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
+ break;
+ }
}
+ } while (pg && pg != base + num);
+
+ if (pg == 0) {
+ fprintf(stderr, "No guest physical page available, "
+ "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
+ paddr_min, vm->page_size, memslot);
+ fputs("---- vm dump ----\n", stderr);
+ vm_dump(stderr, vm, 2);
+ abort();
}
- /* Specify page as in use and return its address. */
- sparsebit_clear(region->unused_phy_pages, pg);
+ for (pg = base; pg < base + num; ++pg)
+ sparsebit_clear(region->unused_phy_pages, pg);
- return pg * vm->page_size;
+ return base * vm->page_size;
}
-/* Address Guest Virtual to Host Virtual
+vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
+ uint32_t memslot)
+{
+ return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
+}
+
+/*
+ * Address Guest Virtual to Host Virtual
*
* Input Args:
* vm - Virtual Machine
@@ -1580,17 +1487,3 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
{
return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
}
-
-void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id,
- struct guest_args *args)
-{
- struct kvm_run *run = vcpu_state(vm, vcpu_id);
- struct kvm_regs regs;
-
- memset(&regs, 0, sizeof(regs));
- vcpu_regs_get(vm, vcpu_id, &regs);
-
- args->port = run->io.port;
- args->arg0 = regs.rdi;
- args->arg1 = regs.rsi;
-}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index 542ed606b338..52701db0f253 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -1,28 +1,29 @@
/*
- * tools/testing/selftests/kvm/lib/kvm_util.c
+ * tools/testing/selftests/kvm/lib/kvm_util_internal.h
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*/
-#ifndef KVM_UTIL_INTERNAL_H
-#define KVM_UTIL_INTERNAL_H 1
+#ifndef SELFTEST_KVM_UTIL_INTERNAL_H
+#define SELFTEST_KVM_UTIL_INTERNAL_H
#include "sparsebit.h"
+#define KVM_DEV_PATH "/dev/kvm"
+
#ifndef BITS_PER_BYTE
-#define BITS_PER_BYTE 8
+#define BITS_PER_BYTE 8
#endif
#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long))
+#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long))
#endif
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
-/* Concrete definition of struct kvm_vm. */
struct userspace_mem_region {
struct userspace_mem_region *next, *prev;
struct kvm_userspace_memory_region region;
@@ -45,14 +46,16 @@ struct kvm_vm {
int mode;
int kvm_fd;
int fd;
+ unsigned int pgtable_levels;
unsigned int page_size;
unsigned int page_shift;
+ unsigned int pa_bits;
+ unsigned int va_bits;
uint64_t max_gfn;
struct vcpu *vcpu_head;
struct userspace_mem_region *userspace_mem_region_head;
struct sparsebit *vpages_valid;
struct sparsebit *vpages_mapped;
-
bool has_irqchip;
bool pgd_created;
vm_paddr_t pgd;
@@ -60,13 +63,11 @@ struct kvm_vm {
vm_vaddr_t tss;
};
-struct vcpu *vcpu_find(struct kvm_vm *vm,
- uint32_t vcpuid);
-void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot);
+struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot,
+ int gdt_memslot);
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
-void regs_dump(FILE *stream, struct kvm_regs *regs,
- uint8_t indent);
-void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
- uint8_t indent);
+void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent);
+void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent);
-#endif
+#endif /* SELFTEST_KVM_UTIL_INTERNAL_H */
diff --git a/tools/testing/selftests/kvm/lib/ucall.c b/tools/testing/selftests/kvm/lib/ucall.c
new file mode 100644
index 000000000000..4777f9bb5194
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/ucall.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ucall support. A ucall is a "hypercall to userspace".
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ */
+#include "kvm_util.h"
+#include "kvm_util_internal.h"
+
+#define UCALL_PIO_PORT ((uint16_t)0x1000)
+
+static ucall_type_t ucall_type;
+static vm_vaddr_t *ucall_exit_mmio_addr;
+
+static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
+{
+ if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1))
+ return false;
+
+ virt_pg_map(vm, gpa, gpa, 0);
+
+ ucall_exit_mmio_addr = (vm_vaddr_t *)gpa;
+ sync_global_to_guest(vm, ucall_exit_mmio_addr);
+
+ return true;
+}
+
+void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg)
+{
+ ucall_type = type;
+ sync_global_to_guest(vm, ucall_type);
+
+ if (type == UCALL_PIO)
+ return;
+
+ if (type == UCALL_MMIO) {
+ vm_paddr_t gpa, start, end, step;
+ bool ret;
+
+ if (arg) {
+ gpa = (vm_paddr_t)arg;
+ ret = ucall_mmio_init(vm, gpa);
+ TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa);
+ return;
+ }
+
+ /*
+ * Find an address within the allowed virtual address space,
+ * that does _not_ have a KVM memory region associated with it.
+ * Identity mapping an address like this allows the guest to
+ * access it, but as KVM doesn't know what to do with it, it
+ * will assume it's something userspace handles and exit with
+ * KVM_EXIT_MMIO. Well, at least that's how it works for AArch64.
+ * Here we start with a guess that the addresses around two
+ * thirds of the VA space are unmapped and then work both down
+ * and up from there in 1/6 VA space sized steps.
+ */
+ start = 1ul << (vm->va_bits * 2 / 3);
+ end = 1ul << vm->va_bits;
+ step = 1ul << (vm->va_bits / 6);
+ for (gpa = start; gpa >= 0; gpa -= step) {
+ if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1)))
+ return;
+ }
+ for (gpa = start + step; gpa < end; gpa += step) {
+ if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1)))
+ return;
+ }
+ TEST_ASSERT(false, "Can't find a ucall mmio address");
+ }
+}
+
+void ucall_uninit(struct kvm_vm *vm)
+{
+ ucall_type = 0;
+ sync_global_to_guest(vm, ucall_type);
+ ucall_exit_mmio_addr = 0;
+ sync_global_to_guest(vm, ucall_exit_mmio_addr);
+}
+
+static void ucall_pio_exit(struct ucall *uc)
+{
+#ifdef __x86_64__
+ asm volatile("in %[port], %%al"
+ : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax");
+#endif
+}
+
+static void ucall_mmio_exit(struct ucall *uc)
+{
+ *ucall_exit_mmio_addr = (vm_vaddr_t)uc;
+}
+
+void ucall(uint64_t cmd, int nargs, ...)
+{
+ struct ucall uc = {
+ .cmd = cmd,
+ };
+ va_list va;
+ int i;
+
+ nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
+
+ va_start(va, nargs);
+ for (i = 0; i < nargs; ++i)
+ uc.args[i] = va_arg(va, uint64_t);
+ va_end(va);
+
+ switch (ucall_type) {
+ case UCALL_PIO:
+ ucall_pio_exit(&uc);
+ break;
+ case UCALL_MMIO:
+ ucall_mmio_exit(&uc);
+ break;
+ };
+}
+
+uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
+{
+ struct kvm_run *run = vcpu_state(vm, vcpu_id);
+
+ memset(uc, 0, sizeof(*uc));
+
+#ifdef __x86_64__
+ if (ucall_type == UCALL_PIO && run->exit_reason == KVM_EXIT_IO &&
+ run->io.port == UCALL_PIO_PORT) {
+ struct kvm_regs regs;
+ vcpu_regs_get(vm, vcpu_id, &regs);
+ memcpy(uc, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), sizeof(*uc));
+ return uc->cmd;
+ }
+#endif
+ if (ucall_type == UCALL_MMIO && run->exit_reason == KVM_EXIT_MMIO &&
+ run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) {
+ vm_vaddr_t gva;
+ TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
+ "Unexpected ucall exit mmio address access");
+ gva = *(vm_vaddr_t *)run->mmio.data;
+ memcpy(uc, addr_gva2hva(vm, gva), sizeof(*uc));
+ }
+
+ return uc->cmd;
+}
diff --git a/tools/testing/selftests/kvm/lib/x86.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index a3122f1949a8..f28127f4a3af 100644
--- a/tools/testing/selftests/kvm/lib/x86.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1,5 +1,5 @@
/*
- * tools/testing/selftests/kvm/lib/x86.c
+ * tools/testing/selftests/kvm/lib/x86_64/processor.c
*
* Copyright (C) 2018, Google LLC.
*
@@ -10,8 +10,8 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "kvm_util_internal.h"
-#include "x86.h"
+#include "../kvm_util_internal.h"
+#include "processor.h"
/* Minimum physical address used for virtual translation tables. */
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
@@ -231,7 +231,7 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
{
int rc;
- TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
+ TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
/* If needed, create page map l4 table. */
@@ -264,7 +264,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
uint16_t index[4];
struct pageMapL4Entry *pml4e;
- TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
+ TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
TEST_ASSERT((vaddr % vm->page_size) == 0,
@@ -551,7 +551,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
struct pageTableEntry *pte;
void *hva;
- TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
+ TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
index[0] = (gva >> 12) & 0x1ffu;
@@ -624,9 +624,9 @@ void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
kvm_setup_gdt(vm, &sregs.gdt, gdt_memslot, pgd_memslot);
switch (vm->mode) {
- case VM_MODE_FLAT48PG:
+ case VM_MODE_P52V48_4K:
sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
- sregs.cr4 |= X86_CR4_PAE;
+ sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
kvm_seg_set_unusable(&sregs.ldt);
@@ -672,6 +672,102 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
vcpu_set_mp_state(vm, vcpuid, &mp_state);
}
+/* Allocate an instance of struct kvm_cpuid2
+ *
+ * Input Args: None
+ *
+ * Output Args: None
+ *
+ * Return: A pointer to the allocated struct. The caller is responsible
+ * for freeing this struct.
+ *
+ * Since kvm_cpuid2 uses a 0-length array to allow a the size of the
+ * array to be decided at allocation time, allocation is slightly
+ * complicated. This function uses a reasonable default length for
+ * the array and performs the appropriate allocation.
+ */
+static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
+{
+ struct kvm_cpuid2 *cpuid;
+ int nent = 100;
+ size_t size;
+
+ size = sizeof(*cpuid);
+ size += nent * sizeof(struct kvm_cpuid_entry2);
+ cpuid = malloc(size);
+ if (!cpuid) {
+ perror("malloc");
+ abort();
+ }
+
+ cpuid->nent = nent;
+
+ return cpuid;
+}
+
+/* KVM Supported CPUID Get
+ *
+ * Input Args: None
+ *
+ * Output Args:
+ *
+ * Return: The supported KVM CPUID
+ *
+ * Get the guest CPUID supported by KVM.
+ */
+struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
+{
+ static struct kvm_cpuid2 *cpuid;
+ int ret;
+ int kvm_fd;
+
+ if (cpuid)
+ return cpuid;
+
+ cpuid = allocate_kvm_cpuid2();
+ kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
+ if (kvm_fd < 0)
+ exit(KSFT_SKIP);
+
+ ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
+ TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
+ ret, errno);
+
+ close(kvm_fd);
+ return cpuid;
+}
+
+/* Locate a cpuid entry.
+ *
+ * Input Args:
+ * cpuid: The cpuid.
+ * function: The function of the cpuid entry to find.
+ *
+ * Output Args: None
+ *
+ * Return: A pointer to the cpuid entry. Never returns NULL.
+ */
+struct kvm_cpuid_entry2 *
+kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
+{
+ struct kvm_cpuid2 *cpuid;
+ struct kvm_cpuid_entry2 *entry = NULL;
+ int i;
+
+ cpuid = kvm_get_supported_cpuid();
+ for (i = 0; i < cpuid->nent; i++) {
+ if (cpuid->entries[i].function == function &&
+ cpuid->entries[i].index == index) {
+ entry = &cpuid->entries[i];
+ break;
+ }
+ }
+
+ TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).",
+ function, index);
+ return entry;
+}
+
/* VM VCPU CPUID Set
*
* Input Args:
@@ -698,6 +794,7 @@ void vcpu_set_cpuid(struct kvm_vm *vm,
rc, errno);
}
+
/* Create a VM with reasonable defaults
*
* Input Args:
@@ -726,7 +823,7 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
/* Create VM */
- vm = vm_create(VM_MODE_FLAT48PG,
+ vm = vm_create(VM_MODE_P52V48_4K,
DEFAULT_GUEST_PHY_PAGES + extra_pg_pages,
O_RDWR);
@@ -742,6 +839,154 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
return vm;
}
+/* VCPU Get MSR
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * msr_index - Index of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
+ *
+ * Get value of MSR for VCPU.
+ */
+uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ struct {
+ struct kvm_msrs header;
+ struct kvm_msr_entry entry;
+ } buffer = {};
+ int r;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+ buffer.header.nmsrs = 1;
+ buffer.entry.index = msr_index;
+ r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
+ TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
+ " rc: %i errno: %i", r, errno);
+
+ return buffer.entry.data;
+}
+
+/* VCPU Set MSR
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * msr_index - Index of MSR
+ * msr_value - New value of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, nothing. On failure a TEST_ASSERT is produced.
+ *
+ * Set value of MSR for VCPU.
+ */
+void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+ uint64_t msr_value)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ struct {
+ struct kvm_msrs header;
+ struct kvm_msr_entry entry;
+ } buffer = {};
+ int r;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+ memset(&buffer, 0, sizeof(buffer));
+ buffer.header.nmsrs = 1;
+ buffer.entry.index = msr_index;
+ buffer.entry.data = msr_value;
+ r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
+ TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
+ " rc: %i errno: %i", r, errno);
+}
+
+/* VM VCPU Args Set
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * num - number of arguments
+ * ... - arguments, each of type uint64_t
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Sets the first num function input arguments to the values
+ * given as variable args. Each of the variable args is expected to
+ * be of type uint64_t.
+ */
+void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
+{
+ va_list ap;
+ struct kvm_regs regs;
+
+ TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
+ " num: %u\n",
+ num);
+
+ va_start(ap, num);
+ vcpu_regs_get(vm, vcpuid, &regs);
+
+ if (num >= 1)
+ regs.rdi = va_arg(ap, uint64_t);
+
+ if (num >= 2)
+ regs.rsi = va_arg(ap, uint64_t);
+
+ if (num >= 3)
+ regs.rdx = va_arg(ap, uint64_t);
+
+ if (num >= 4)
+ regs.rcx = va_arg(ap, uint64_t);
+
+ if (num >= 5)
+ regs.r8 = va_arg(ap, uint64_t);
+
+ if (num >= 6)
+ regs.r9 = va_arg(ap, uint64_t);
+
+ vcpu_regs_set(vm, vcpuid, &regs);
+ va_end(ap);
+}
+
+/*
+ * VM VCPU Dump
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * indent - Left margin indent amount
+ *
+ * Output Args:
+ * stream - Output FILE stream
+ *
+ * Return: None
+ *
+ * Dumps the current state of the VCPU specified by vcpuid, within the VM
+ * given by vm, to the FILE stream given by stream.
+ */
+void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+{
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+
+ fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
+
+ fprintf(stream, "%*sregs:\n", indent + 2, "");
+ vcpu_regs_get(vm, vcpuid, &regs);
+ regs_dump(stream, &regs, indent + 4);
+
+ fprintf(stream, "%*ssregs:\n", indent + 2, "");
+ vcpu_sregs_get(vm, vcpuid, &sregs);
+ sregs_dump(stream, &sregs, indent + 4);
+}
+
struct kvm_x86_state {
struct kvm_vcpu_events events;
struct kvm_mp_state mp_state;
diff --git a/tools/testing/selftests/kvm/lib/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index b987c3c970eb..771ba6bf751c 100644
--- a/tools/testing/selftests/kvm/lib/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -1,5 +1,5 @@
/*
- * tools/testing/selftests/kvm/lib/x86.c
+ * tools/testing/selftests/kvm/lib/x86_64/vmx.c
*
* Copyright (C) 2018, Google LLC.
*
@@ -10,9 +10,11 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#include "vmx.h"
+bool enable_evmcs;
+
/* Allocate memory regions for nested VMX tests.
*
* Input Args:
@@ -62,6 +64,20 @@ vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
memset(vmx->vmwrite_hva, 0, getpagesize());
+ /* Setup of a region of guest memory for the VP Assist page. */
+ vmx->vp_assist = (void *)vm_vaddr_alloc(vm, getpagesize(),
+ 0x10000, 0, 0);
+ vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist);
+ vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist);
+
+ /* Setup of a region of guest memory for the enlightened VMCS. */
+ vmx->enlightened_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(),
+ 0x10000, 0, 0);
+ vmx->enlightened_vmcs_hva =
+ addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs);
+ vmx->enlightened_vmcs_gpa =
+ addr_gva2gpa(vm, (uintptr_t)vmx->enlightened_vmcs);
+
*p_vmx_gva = vmx_gva;
return vmx;
}
@@ -107,18 +123,31 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
if (vmxon(vmx->vmxon_gpa))
return false;
- /* Load a VMCS. */
- *(uint32_t *)(vmx->vmcs) = vmcs_revision();
- if (vmclear(vmx->vmcs_gpa))
- return false;
-
- if (vmptrld(vmx->vmcs_gpa))
- return false;
+ return true;
+}
- /* Setup shadow VMCS, do not load it yet. */
- *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul;
- if (vmclear(vmx->shadow_vmcs_gpa))
- return false;
+bool load_vmcs(struct vmx_pages *vmx)
+{
+ if (!enable_evmcs) {
+ /* Load a VMCS. */
+ *(uint32_t *)(vmx->vmcs) = vmcs_revision();
+ if (vmclear(vmx->vmcs_gpa))
+ return false;
+
+ if (vmptrld(vmx->vmcs_gpa))
+ return false;
+
+ /* Setup shadow VMCS, do not load it yet. */
+ *(uint32_t *)(vmx->shadow_vmcs) =
+ vmcs_revision() | 0x80000000ul;
+ if (vmclear(vmx->shadow_vmcs_gpa))
+ return false;
+ } else {
+ if (evmcs_vmptrld(vmx->enlightened_vmcs_gpa,
+ vmx->enlightened_vmcs))
+ return false;
+ current_evmcs->revision_id = vmcs_revision();
+ }
return true;
}
diff --git a/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
index 11ec358bf969..d503a51fad30 100644
--- a/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
@@ -17,7 +17,7 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#define X86_FEATURE_XSAVE (1<<26)
#define X86_FEATURE_OSXSAVE (1<<27)
@@ -67,6 +67,7 @@ int main(int argc, char *argv[])
struct kvm_vm *vm;
struct kvm_sregs sregs;
struct kvm_cpuid_entry2 *entry;
+ struct ucall uc;
int rc;
entry = kvm_get_supported_cpuid_entry(1);
@@ -87,21 +88,20 @@ int main(int argc, char *argv[])
rc = _vcpu_run(vm, VCPU_ID);
if (run->exit_reason == KVM_EXIT_IO) {
- switch (run->io.port) {
- case GUEST_PORT_SYNC:
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_SYNC:
/* emulate hypervisor clearing CR4.OSXSAVE */
vcpu_sregs_get(vm, VCPU_ID, &sregs);
sregs.cr4 &= ~X86_CR4_OSXSAVE;
vcpu_sregs_set(vm, VCPU_ID, &sregs);
break;
- case GUEST_PORT_ABORT:
+ case UCALL_ABORT:
TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
break;
- case GUEST_PORT_DONE:
+ case UCALL_DONE:
goto done;
default:
- TEST_ASSERT(false, "Unknown port 0x%x.",
- run->io.port);
+ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
}
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
new file mode 100644
index 000000000000..92c2cfd1b182
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018, Red Hat, Inc.
+ *
+ * Tests for Enlightened VMCS, including nested guest state.
+ */
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+
+#include "vmx.h"
+
+#define VCPU_ID 5
+
+static bool have_nested_state;
+
+void l2_guest_code(void)
+{
+ GUEST_SYNC(6);
+
+ GUEST_SYNC(7);
+
+ /* Done, exit to L1 and never come back. */
+ vmcall();
+}
+
+void l1_guest_code(struct vmx_pages *vmx_pages)
+{
+#define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist);
+
+ GUEST_ASSERT(vmx_pages->vmcs_gpa);
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_SYNC(3);
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+ GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+
+ GUEST_SYNC(4);
+ GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ GUEST_SYNC(5);
+ GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+ GUEST_ASSERT(!vmlaunch());
+ GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+ GUEST_SYNC(8);
+ GUEST_ASSERT(!vmresume());
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+ GUEST_SYNC(9);
+}
+
+void guest_code(struct vmx_pages *vmx_pages)
+{
+ GUEST_SYNC(1);
+ GUEST_SYNC(2);
+
+ if (vmx_pages)
+ l1_guest_code(vmx_pages);
+
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ struct vmx_pages *vmx_pages = NULL;
+ vm_vaddr_t vmx_pages_gva = 0;
+
+ struct kvm_regs regs1, regs2;
+ struct kvm_vm *vm;
+ struct kvm_run *run;
+ struct kvm_x86_state *state;
+ struct ucall uc;
+ int stage;
+ uint16_t evmcs_ver;
+ struct kvm_enable_cap enable_evmcs_cap = {
+ .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
+ .args[0] = (unsigned long)&evmcs_ver
+ };
+
+ struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ if (!kvm_check_cap(KVM_CAP_NESTED_STATE) ||
+ !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
+ printf("capabilities not available, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
+ vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+
+ run = vcpu_state(vm, VCPU_ID);
+
+ vcpu_regs_get(vm, VCPU_ID, &regs1);
+
+ vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+
+ for (stage = 1;; stage++) {
+ _vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ memset(&regs1, 0, sizeof(regs1));
+ vcpu_regs_get(vm, VCPU_ID, &regs1);
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
+ __FILE__, uc.args[1]);
+ /* NOT REACHED */
+ case UCALL_SYNC:
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
+ }
+
+ /* UCALL_SYNC is handled here. */
+ TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
+ uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
+ stage, (ulong)uc.args[1]);
+
+ state = vcpu_save_state(vm, VCPU_ID);
+ kvm_vm_release(vm);
+
+ /* Restore state in a new VM. */
+ kvm_vm_restart(vm, O_RDWR);
+ vm_vcpu_add(vm, VCPU_ID, 0, 0);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+ vcpu_load_state(vm, VCPU_ID, state);
+ run = vcpu_state(vm, VCPU_ID);
+ free(state);
+
+ memset(&regs2, 0, sizeof(regs2));
+ vcpu_regs_get(vm, VCPU_ID, &regs2);
+ TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
+ "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
+ (ulong) regs2.rdi, (ulong) regs2.rsi);
+ }
+
+done:
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
new file mode 100644
index 000000000000..eb3e7a838cb4
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for x86 KVM_CAP_MSR_PLATFORM_INFO
+ *
+ * Copyright (C) 2018, Google LLC.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Verifies expected behavior of controlling guest access to
+ * MSR_PLATFORM_INFO.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+#define VCPU_ID 0
+#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00
+
+static void guest_code(void)
+{
+ uint64_t msr_platform_info;
+
+ for (;;) {
+ msr_platform_info = rdmsr(MSR_PLATFORM_INFO);
+ GUEST_SYNC(msr_platform_info);
+ asm volatile ("inc %r11");
+ }
+}
+
+static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable)
+{
+ struct kvm_enable_cap cap = {};
+
+ cap.cap = KVM_CAP_MSR_PLATFORM_INFO;
+ cap.flags = 0;
+ cap.args[0] = (int)enable;
+ vm_enable_cap(vm, &cap);
+}
+
+static void test_msr_platform_info_enabled(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ set_msr_platform_info_enabled(vm, true);
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+ get_ucall(vm, VCPU_ID, &uc);
+ TEST_ASSERT(uc.cmd == UCALL_SYNC,
+ "Received ucall other than UCALL_SYNC: %u\n",
+ ucall);
+ TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
+ MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
+ "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
+ MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
+}
+
+static void test_msr_platform_info_disabled(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+
+ set_msr_platform_info_enabled(vm, false);
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ struct kvm_run *state;
+ int rv;
+ uint64_t msr_platform_info;
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO);
+ if (!rv) {
+ fprintf(stderr,
+ "KVM_CAP_MSR_PLATFORM_INFO not supported, skip test\n");
+ exit(KSFT_SKIP);
+ }
+
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+ msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
+ vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
+ msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
+ test_msr_platform_info_disabled(vm);
+ test_msr_platform_info_enabled(vm);
+ vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
+
+ kvm_vm_free(vm);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/set_sregs_test.c b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
index 881419d5746e..35640e8e95bc 100644
--- a/tools/testing/selftests/kvm/set_sregs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
@@ -22,7 +22,7 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#define VCPU_ID 5
diff --git a/tools/testing/selftests/kvm/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 900e3e9dfb9f..03da41f0f736 100644
--- a/tools/testing/selftests/kvm/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -17,7 +17,7 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#include "vmx.h"
#define VCPU_ID 5
@@ -26,20 +26,20 @@ static bool have_nested_state;
void l2_guest_code(void)
{
- GUEST_SYNC(5);
+ GUEST_SYNC(6);
/* Exit to L1 */
vmcall();
/* L1 has now set up a shadow VMCS for us. */
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
- GUEST_SYNC(9);
+ GUEST_SYNC(10);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
- GUEST_SYNC(10);
+ GUEST_SYNC(11);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
- GUEST_SYNC(11);
+ GUEST_SYNC(12);
/* Done, exit to L1 and never come back. */
vmcall();
@@ -52,15 +52,17 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(vmx_pages->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_SYNC(3);
+ GUEST_ASSERT(load_vmcs(vmx_pages));
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
- GUEST_SYNC(3);
+ GUEST_SYNC(4);
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
- GUEST_SYNC(4);
+ GUEST_SYNC(5);
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
@@ -72,7 +74,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
- GUEST_SYNC(6);
+ GUEST_SYNC(7);
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_ASSERT(!vmresume());
@@ -85,12 +87,12 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
GUEST_ASSERT(vmlaunch());
- GUEST_SYNC(7);
+ GUEST_SYNC(8);
GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(vmresume());
vmwrite(GUEST_RIP, 0xc0ffee);
- GUEST_SYNC(8);
+ GUEST_SYNC(9);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
@@ -101,7 +103,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(vmresume());
- GUEST_SYNC(12);
+ GUEST_SYNC(13);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(vmresume());
@@ -127,6 +129,7 @@ int main(int argc, char *argv[])
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_x86_state *state;
+ struct ucall uc;
int stage;
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
@@ -155,23 +158,23 @@ int main(int argc, char *argv[])
memset(&regs1, 0, sizeof(regs1));
vcpu_regs_get(vm, VCPU_ID, &regs1);
- switch (run->io.port) {
- case GUEST_PORT_ABORT:
- TEST_ASSERT(false, "%s at %s:%d", (const char *) regs1.rdi,
- __FILE__, regs1.rsi);
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
+ __FILE__, uc.args[1]);
/* NOT REACHED */
- case GUEST_PORT_SYNC:
+ case UCALL_SYNC:
break;
- case GUEST_PORT_DONE:
+ case UCALL_DONE:
goto done;
default:
- TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port);
+ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
}
- /* PORT_SYNC is handled here. */
- TEST_ASSERT(!strcmp((const char *)regs1.rdi, "hello") &&
- regs1.rsi == stage, "Unexpected register values vmexit #%lx, got %lx",
- stage, (ulong) regs1.rsi);
+ /* UCALL_SYNC is handled here. */
+ TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
+ uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
+ stage, (ulong)uc.args[1]);
state = vcpu_save_state(vm, VCPU_ID);
kvm_vm_release(vm);
diff --git a/tools/testing/selftests/kvm/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
index 213343e5dff9..c8478ce9ea77 100644
--- a/tools/testing/selftests/kvm/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
@@ -19,7 +19,7 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#define VCPU_ID 5
diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index 49bcc68b0235..18fa64db0d7a 100644
--- a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -1,5 +1,5 @@
/*
- * gtests/tests/vmx_tsc_adjust_test.c
+ * vmx_tsc_adjust_test
*
* Copyright (C) 2018, Google LLC.
*
@@ -22,13 +22,13 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#include "vmx.h"
#include <string.h>
#include <sys/ioctl.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#ifndef MSR_IA32_TSC_ADJUST
#define MSR_IA32_TSC_ADJUST 0x3b
@@ -94,6 +94,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
/* Prepare the VMCS for L2 execution. */
prepare_vmcs(vmx_pages, l2_guest_code,
@@ -146,26 +147,25 @@ int main(int argc, char *argv[])
for (;;) {
volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
- struct guest_args args;
+ struct ucall uc;
vcpu_run(vm, VCPU_ID);
- guest_args_read(vm, VCPU_ID, &args);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (args.port) {
- case GUEST_PORT_ABORT:
- TEST_ASSERT(false, "%s", (const char *) args.arg0);
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "%s", (const char *)uc.args[0]);
/* NOT REACHED */
- case GUEST_PORT_SYNC:
- report(args.arg1);
+ case UCALL_SYNC:
+ report(uc.args[1]);
break;
- case GUEST_PORT_DONE:
+ case UCALL_DONE:
goto done;
default:
- TEST_ASSERT(false, "Unknown port 0x%x.", args.port);
+ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
}
}
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 17ab36605a8e..0a8e75886224 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -16,8 +16,20 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
+top_srcdir ?= ../../../..
+include $(top_srcdir)/scripts/subarch.include
+ARCH ?= $(SUBARCH)
+
all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
+.PHONY: khdr
+khdr:
+ make ARCH=$(ARCH) -C $(top_srcdir) headers_install
+
+ifdef KSFT_KHDR_INSTALL
+$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
+endif
+
.ONESHELL:
define RUN_TEST_PRINT_RESULT
TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST"; \
diff --git a/tools/testing/selftests/memory-hotplug/config b/tools/testing/selftests/memory-hotplug/config
index 2fde30191a47..a7e8cd5bb265 100644
--- a/tools/testing/selftests/memory-hotplug/config
+++ b/tools/testing/selftests/memory-hotplug/config
@@ -2,3 +2,4 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTPLUG_SPARSE=y
CONFIG_NOTIFIER_ERROR_INJECTION=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_MEMORY_HOTREMOVE=y
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 78b24cf76f40..8cf22b3c2563 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -14,3 +14,4 @@ udpgso_bench_rx
udpgso_bench_tx
tcp_inq
tls
+ip_defrag
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 9cca68e440a0..256d82d5fa87 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -5,16 +5,17 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g
CFLAGS += -I../../../../usr/include/
TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
-TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh
+TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh
TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
TEST_PROGS_EXTENDED := in_netns.sh
TEST_GEN_FILES = socket
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd
-TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx
+TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
+KSFT_KHDR_INSTALL := 1
include ../lib.mk
$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 0f45633bd634..802b4af18729 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -9,11 +9,11 @@ ret=0
ksft_skip=4
# all tests in this script. Can be overridden with -t option
-TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric"
+TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics"
VERBOSE=0
PAUSE_ON_FAIL=no
PAUSE=no
-IP="ip -netns testns"
+IP="ip -netns ns1"
log_test()
{
@@ -47,8 +47,10 @@ log_test()
setup()
{
set -e
- ip netns add testns
+ ip netns add ns1
$IP link set dev lo up
+ ip netns exec ns1 sysctl -qw net.ipv4.ip_forward=1
+ ip netns exec ns1 sysctl -qw net.ipv6.conf.all.forwarding=1
$IP link add dummy0 type dummy
$IP link set dev dummy0 up
@@ -61,7 +63,8 @@ setup()
cleanup()
{
$IP link del dev dummy0 &> /dev/null
- ip netns del testns
+ ip netns del ns1
+ ip netns del ns2 &> /dev/null
}
get_linklocal()
@@ -639,11 +642,14 @@ add_initial_route6()
check_route6()
{
- local pfx="2001:db8:104::/64"
+ local pfx
local expected="$1"
local out
local rc=0
+ set -- $expected
+ pfx=$1
+
out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//')
[ "${out}" = "${expected}" ] && return 0
@@ -690,28 +696,33 @@ route_setup()
[ "${VERBOSE}" = "1" ] && set -x
set -e
- $IP li add red up type vrf table 101
+ ip netns add ns2
+ ip -netns ns2 link set dev lo up
+ ip netns exec ns2 sysctl -qw net.ipv4.ip_forward=1
+ ip netns exec ns2 sysctl -qw net.ipv6.conf.all.forwarding=1
+
$IP li add veth1 type veth peer name veth2
$IP li add veth3 type veth peer name veth4
$IP li set veth1 up
$IP li set veth3 up
- $IP li set veth2 vrf red up
- $IP li set veth4 vrf red up
- $IP li add dummy1 type dummy
- $IP li set dummy1 vrf red up
-
- $IP -6 addr add 2001:db8:101::1/64 dev veth1
- $IP -6 addr add 2001:db8:101::2/64 dev veth2
- $IP -6 addr add 2001:db8:103::1/64 dev veth3
- $IP -6 addr add 2001:db8:103::2/64 dev veth4
- $IP -6 addr add 2001:db8:104::1/64 dev dummy1
+ $IP li set veth2 netns ns2 up
+ $IP li set veth4 netns ns2 up
+ ip -netns ns2 li add dummy1 type dummy
+ ip -netns ns2 li set dummy1 up
+ $IP -6 addr add 2001:db8:101::1/64 dev veth1 nodad
+ $IP -6 addr add 2001:db8:103::1/64 dev veth3 nodad
$IP addr add 172.16.101.1/24 dev veth1
- $IP addr add 172.16.101.2/24 dev veth2
$IP addr add 172.16.103.1/24 dev veth3
- $IP addr add 172.16.103.2/24 dev veth4
- $IP addr add 172.16.104.1/24 dev dummy1
+
+ ip -netns ns2 -6 addr add 2001:db8:101::2/64 dev veth2 nodad
+ ip -netns ns2 -6 addr add 2001:db8:103::2/64 dev veth4 nodad
+ ip -netns ns2 -6 addr add 2001:db8:104::1/64 dev dummy1 nodad
+
+ ip -netns ns2 addr add 172.16.101.2/24 dev veth2
+ ip -netns ns2 addr add 172.16.103.2/24 dev veth4
+ ip -netns ns2 addr add 172.16.104.1/24 dev dummy1
set +ex
}
@@ -944,7 +955,7 @@ ipv6_addr_metric_test()
log_test $rc 0 "Modify metric of address"
# verify prefix route removed on down
- run_cmd "ip netns exec testns sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1"
+ run_cmd "ip netns exec ns1 sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1"
run_cmd "$IP li set dev dummy2 down"
rc=$?
if [ $rc -eq 0 ]; then
@@ -967,6 +978,77 @@ ipv6_addr_metric_test()
cleanup
}
+ipv6_route_metrics_test()
+{
+ local rc
+
+ echo
+ echo "IPv6 routes with metrics"
+
+ route_setup
+
+ #
+ # single path with metrics
+ #
+ run_cmd "$IP -6 ro add 2001:db8:111::/64 via 2001:db8:101::2 mtu 1400"
+ rc=$?
+ if [ $rc -eq 0 ]; then
+ check_route6 "2001:db8:111::/64 via 2001:db8:101::2 dev veth1 metric 1024 mtu 1400"
+ rc=$?
+ fi
+ log_test $rc 0 "Single path route with mtu metric"
+
+
+ #
+ # multipath via separate routes with metrics
+ #
+ run_cmd "$IP -6 ro add 2001:db8:112::/64 via 2001:db8:101::2 mtu 1400"
+ run_cmd "$IP -6 ro append 2001:db8:112::/64 via 2001:db8:103::2"
+ rc=$?
+ if [ $rc -eq 0 ]; then
+ check_route6 "2001:db8:112::/64 metric 1024 mtu 1400 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
+ rc=$?
+ fi
+ log_test $rc 0 "Multipath route via 2 single routes with mtu metric on first"
+
+ # second route is coalesced to first to make a multipath route.
+ # MTU of the second path is hidden from display!
+ run_cmd "$IP -6 ro add 2001:db8:113::/64 via 2001:db8:101::2"
+ run_cmd "$IP -6 ro append 2001:db8:113::/64 via 2001:db8:103::2 mtu 1400"
+ rc=$?
+ if [ $rc -eq 0 ]; then
+ check_route6 "2001:db8:113::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
+ rc=$?
+ fi
+ log_test $rc 0 "Multipath route via 2 single routes with mtu metric on 2nd"
+
+ run_cmd "$IP -6 ro del 2001:db8:113::/64 via 2001:db8:101::2"
+ if [ $? -eq 0 ]; then
+ check_route6 "2001:db8:113::/64 via 2001:db8:103::2 dev veth3 metric 1024 mtu 1400"
+ log_test $? 0 " MTU of second leg"
+ fi
+
+ #
+ # multipath with metrics
+ #
+ run_cmd "$IP -6 ro add 2001:db8:115::/64 mtu 1400 nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
+ rc=$?
+ if [ $rc -eq 0 ]; then
+ check_route6 "2001:db8:115::/64 metric 1024 mtu 1400 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
+ rc=$?
+ fi
+ log_test $rc 0 "Multipath route with mtu metric"
+
+ $IP -6 ro add 2001:db8:104::/64 via 2001:db8:101::2 mtu 1300
+ run_cmd "ip netns exec ns1 ping6 -w1 -c1 -s 1500 2001:db8:104::1"
+ log_test $? 0 "Using route with mtu metric"
+
+ run_cmd "$IP -6 ro add 2001:db8:114::/64 via 2001:db8:101::2 congctl lock foo"
+ log_test $? 2 "Invalid metric (fails metric_convert)"
+
+ route_cleanup
+}
+
# add route for a prefix, flushing any existing routes first
# expected to be the first step of a test
add_route()
@@ -1005,11 +1087,15 @@ add_initial_route()
check_route()
{
- local pfx="172.16.104.0/24"
+ local pfx
local expected="$1"
local out
local rc=0
+ set -- $expected
+ pfx=$1
+ [ "${pfx}" = "unreachable" ] && pfx=$2
+
out=$($IP ro ls match ${pfx})
[ "${out}" = "${expected}" ] && return 0
@@ -1319,6 +1405,43 @@ ipv4_addr_metric_test()
cleanup
}
+ipv4_route_metrics_test()
+{
+ local rc
+
+ echo
+ echo "IPv4 route add / append tests"
+
+ route_setup
+
+ run_cmd "$IP ro add 172.16.111.0/24 via 172.16.101.2 mtu 1400"
+ rc=$?
+ if [ $rc -eq 0 ]; then
+ check_route "172.16.111.0/24 via 172.16.101.2 dev veth1 mtu 1400"
+ rc=$?
+ fi
+ log_test $rc 0 "Single path route with mtu metric"
+
+
+ run_cmd "$IP ro add 172.16.112.0/24 mtu 1400 nexthop via 172.16.101.2 nexthop via 172.16.103.2"
+ rc=$?
+ if [ $rc -eq 0 ]; then
+ check_route "172.16.112.0/24 mtu 1400 nexthop via 172.16.101.2 dev veth1 weight 1 nexthop via 172.16.103.2 dev veth3 weight 1"
+ rc=$?
+ fi
+ log_test $rc 0 "Multipath route with mtu metric"
+
+ $IP ro add 172.16.104.0/24 via 172.16.101.2 mtu 1300
+ run_cmd "ip netns exec ns1 ping -w1 -c1 -s 1500 172.16.104.1"
+ log_test $? 0 "Using route with mtu metric"
+
+ run_cmd "$IP ro add 172.16.111.0/24 via 172.16.101.2 congctl lock foo"
+ log_test $? 2 "Invalid metric (fails metric_convert)"
+
+ route_cleanup
+}
+
+
################################################################################
# usage
@@ -1385,6 +1508,8 @@ do
ipv4_route_test|ipv4_rt) ipv4_route_test;;
ipv6_addr_metric) ipv6_addr_metric_test;;
ipv4_addr_metric) ipv4_addr_metric_test;;
+ ipv6_route_metrics) ipv6_route_metrics_test;;
+ ipv4_route_metrics) ipv4_route_metrics_test;;
help) echo "Test names: $TESTS"; exit 0;;
esac
diff --git a/tools/testing/selftests/net/forwarding/bridge_sticky_fdb.sh b/tools/testing/selftests/net/forwarding/bridge_sticky_fdb.sh
new file mode 100755
index 000000000000..1f8ef0eff862
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/bridge_sticky_fdb.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="sticky"
+NUM_NETIFS=4
+TEST_MAC=de:ad:be:ef:13:37
+source lib.sh
+
+switch_create()
+{
+ ip link add dev br0 type bridge
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 master br0
+
+ ip link set dev br0 up
+ ip link set dev $h1 up
+ ip link set dev $swp1 up
+ ip link set dev $h2 up
+ ip link set dev $swp2 up
+}
+
+switch_destroy()
+{
+ ip link set dev $swp2 down
+ ip link set dev $h2 down
+ ip link set dev $swp1 down
+ ip link set dev $h1 down
+
+ ip link del dev br0
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+ h2=${NETIFS[p3]}
+ swp2=${NETIFS[p4]}
+
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+ switch_destroy
+}
+
+sticky()
+{
+ bridge fdb add $TEST_MAC dev $swp1 master static sticky
+ check_err $? "Could not add fdb entry"
+ bridge fdb del $TEST_MAC dev $swp1 vlan 1 master static sticky
+ $MZ $h2 -c 1 -a $TEST_MAC -t arp "request" -q
+ bridge -j fdb show br br0 brport $swp1\
+ | jq -e ".[] | select(.mac == \"$TEST_MAC\")" &> /dev/null
+ check_err $? "Did not find FDB record when should"
+
+ log_test "Sticky fdb entry"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index ca53b539aa2d..85d253546684 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -251,7 +251,7 @@ lldpad_app_wait_set()
{
local dev=$1; shift
- while lldptool -t -i $dev -V APP -c app | grep -q pending; do
+ while lldptool -t -i $dev -V APP -c app | grep -Eq "pending|unknown"; do
echo "$dev: waiting for lldpad to push pending APP updates"
sleep 5
done
@@ -494,6 +494,14 @@ tc_rule_stats_get()
| jq '.[1].options.actions[].stats.packets'
}
+ethtool_stats_get()
+{
+ local dev=$1; shift
+ local stat=$1; shift
+
+ ethtool -S $dev | grep "^ *$stat:" | head -n 1 | cut -d: -f2
+}
+
mac_get()
{
local if_name=$1
@@ -541,6 +549,23 @@ forwarding_restore()
sysctl_restore net.ipv4.conf.all.forwarding
}
+declare -A MTU_ORIG
+mtu_set()
+{
+ local dev=$1; shift
+ local mtu=$1; shift
+
+ MTU_ORIG["$dev"]=$(ip -j link show dev $dev | jq -e '.[].mtu')
+ ip link set dev $dev mtu $mtu
+}
+
+mtu_restore()
+{
+ local dev=$1; shift
+
+ ip link set dev $dev mtu ${MTU_ORIG["$dev"]}
+}
+
tc_offload_check()
{
local num_netifs=${1:-$NUM_NETIFS}
diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c
new file mode 100644
index 000000000000..61ae2782388e
--- /dev/null
+++ b/tools/testing/selftests/net/ip_defrag.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <linux/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/udp.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+static bool cfg_do_ipv4;
+static bool cfg_do_ipv6;
+static bool cfg_verbose;
+static bool cfg_overlap;
+static unsigned short cfg_port = 9000;
+
+const struct in_addr addr4 = { .s_addr = __constant_htonl(INADDR_LOOPBACK + 2) };
+const struct in6_addr addr6 = IN6ADDR_LOOPBACK_INIT;
+
+#define IP4_HLEN (sizeof(struct iphdr))
+#define IP6_HLEN (sizeof(struct ip6_hdr))
+#define UDP_HLEN (sizeof(struct udphdr))
+
+/* IPv6 fragment header lenth. */
+#define FRAG_HLEN 8
+
+static int payload_len;
+static int max_frag_len;
+
+#define MSG_LEN_MAX 60000 /* Max UDP payload length. */
+
+#define IP4_MF (1u << 13) /* IPv4 MF flag. */
+#define IP6_MF (1) /* IPv6 MF flag. */
+
+#define CSUM_MANGLED_0 (0xffff)
+
+static uint8_t udp_payload[MSG_LEN_MAX];
+static uint8_t ip_frame[IP_MAXPACKET];
+static uint32_t ip_id = 0xabcd;
+static int msg_counter;
+static int frag_counter;
+static unsigned int seed;
+
+/* Receive a UDP packet. Validate it matches udp_payload. */
+static void recv_validate_udp(int fd_udp)
+{
+ ssize_t ret;
+ static uint8_t recv_buff[MSG_LEN_MAX];
+
+ ret = recv(fd_udp, recv_buff, payload_len, 0);
+ msg_counter++;
+
+ if (cfg_overlap) {
+ if (ret != -1)
+ error(1, 0, "recv: expected timeout; got %d",
+ (int)ret);
+ if (errno != ETIMEDOUT && errno != EAGAIN)
+ error(1, errno, "recv: expected timeout: %d",
+ errno);
+ return; /* OK */
+ }
+
+ if (ret == -1)
+ error(1, errno, "recv: payload_len = %d max_frag_len = %d",
+ payload_len, max_frag_len);
+ if (ret != payload_len)
+ error(1, 0, "recv: wrong size: %d vs %d", (int)ret, payload_len);
+ if (memcmp(udp_payload, recv_buff, payload_len))
+ error(1, 0, "recv: wrong data");
+}
+
+static uint32_t raw_checksum(uint8_t *buf, int len, uint32_t sum)
+{
+ int i;
+
+ for (i = 0; i < (len & ~1U); i += 2) {
+ sum += (u_int16_t)ntohs(*((u_int16_t *)(buf + i)));
+ if (sum > 0xffff)
+ sum -= 0xffff;
+ }
+
+ if (i < len) {
+ sum += buf[i] << 8;
+ if (sum > 0xffff)
+ sum -= 0xffff;
+ }
+
+ return sum;
+}
+
+static uint16_t udp_checksum(struct ip *iphdr, struct udphdr *udphdr)
+{
+ uint32_t sum = 0;
+ uint16_t res;
+
+ sum = raw_checksum((uint8_t *)&iphdr->ip_src, 2 * sizeof(iphdr->ip_src),
+ IPPROTO_UDP + (uint32_t)(UDP_HLEN + payload_len));
+ sum = raw_checksum((uint8_t *)udphdr, UDP_HLEN, sum);
+ sum = raw_checksum((uint8_t *)udp_payload, payload_len, sum);
+ res = 0xffff & ~sum;
+ if (res)
+ return htons(res);
+ else
+ return CSUM_MANGLED_0;
+}
+
+static uint16_t udp6_checksum(struct ip6_hdr *iphdr, struct udphdr *udphdr)
+{
+ uint32_t sum = 0;
+ uint16_t res;
+
+ sum = raw_checksum((uint8_t *)&iphdr->ip6_src, 2 * sizeof(iphdr->ip6_src),
+ IPPROTO_UDP);
+ sum = raw_checksum((uint8_t *)&udphdr->len, sizeof(udphdr->len), sum);
+ sum = raw_checksum((uint8_t *)udphdr, UDP_HLEN, sum);
+ sum = raw_checksum((uint8_t *)udp_payload, payload_len, sum);
+ res = 0xffff & ~sum;
+ if (res)
+ return htons(res);
+ else
+ return CSUM_MANGLED_0;
+}
+
+static void send_fragment(int fd_raw, struct sockaddr *addr, socklen_t alen,
+ int offset, bool ipv6)
+{
+ int frag_len;
+ int res;
+ int payload_offset = offset > 0 ? offset - UDP_HLEN : 0;
+ uint8_t *frag_start = ipv6 ? ip_frame + IP6_HLEN + FRAG_HLEN :
+ ip_frame + IP4_HLEN;
+
+ if (offset == 0) {
+ struct udphdr udphdr;
+ udphdr.source = htons(cfg_port + 1);
+ udphdr.dest = htons(cfg_port);
+ udphdr.len = htons(UDP_HLEN + payload_len);
+ udphdr.check = 0;
+ if (ipv6)
+ udphdr.check = udp6_checksum((struct ip6_hdr *)ip_frame, &udphdr);
+ else
+ udphdr.check = udp_checksum((struct ip *)ip_frame, &udphdr);
+ memcpy(frag_start, &udphdr, UDP_HLEN);
+ }
+
+ if (ipv6) {
+ struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame;
+ struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
+ if (payload_len - payload_offset <= max_frag_len && offset > 0) {
+ /* This is the last fragment. */
+ frag_len = FRAG_HLEN + payload_len - payload_offset;
+ fraghdr->ip6f_offlg = htons(offset);
+ } else {
+ frag_len = FRAG_HLEN + max_frag_len;
+ fraghdr->ip6f_offlg = htons(offset | IP6_MF);
+ }
+ ip6hdr->ip6_plen = htons(frag_len);
+ if (offset == 0)
+ memcpy(frag_start + UDP_HLEN, udp_payload,
+ frag_len - FRAG_HLEN - UDP_HLEN);
+ else
+ memcpy(frag_start, udp_payload + payload_offset,
+ frag_len - FRAG_HLEN);
+ frag_len += IP6_HLEN;
+ } else {
+ struct ip *iphdr = (struct ip *)ip_frame;
+ if (payload_len - payload_offset <= max_frag_len && offset > 0) {
+ /* This is the last fragment. */
+ frag_len = IP4_HLEN + payload_len - payload_offset;
+ iphdr->ip_off = htons(offset / 8);
+ } else {
+ frag_len = IP4_HLEN + max_frag_len;
+ iphdr->ip_off = htons(offset / 8 | IP4_MF);
+ }
+ iphdr->ip_len = htons(frag_len);
+ if (offset == 0)
+ memcpy(frag_start + UDP_HLEN, udp_payload,
+ frag_len - IP4_HLEN - UDP_HLEN);
+ else
+ memcpy(frag_start, udp_payload + payload_offset,
+ frag_len - IP4_HLEN);
+ }
+
+ res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen);
+ if (res < 0)
+ error(1, errno, "send_fragment");
+ if (res != frag_len)
+ error(1, 0, "send_fragment: %d vs %d", res, frag_len);
+
+ frag_counter++;
+}
+
+static void send_udp_frags(int fd_raw, struct sockaddr *addr,
+ socklen_t alen, bool ipv6)
+{
+ struct ip *iphdr = (struct ip *)ip_frame;
+ struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame;
+ int res;
+ int offset;
+ int frag_len;
+
+ /* Send the UDP datagram using raw IP fragments: the 0th fragment
+ * has the UDP header; other fragments are pieces of udp_payload
+ * split in chunks of frag_len size.
+ *
+ * Odd fragments (1st, 3rd, 5th, etc.) are sent out first, then
+ * even fragments (0th, 2nd, etc.) are sent out.
+ */
+ if (ipv6) {
+ struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
+ ((struct sockaddr_in6 *)addr)->sin6_port = 0;
+ memset(ip6hdr, 0, sizeof(*ip6hdr));
+ ip6hdr->ip6_flow = htonl(6<<28); /* Version. */
+ ip6hdr->ip6_nxt = IPPROTO_FRAGMENT;
+ ip6hdr->ip6_hops = 255;
+ ip6hdr->ip6_src = addr6;
+ ip6hdr->ip6_dst = addr6;
+ fraghdr->ip6f_nxt = IPPROTO_UDP;
+ fraghdr->ip6f_reserved = 0;
+ fraghdr->ip6f_ident = htonl(ip_id++);
+ } else {
+ memset(iphdr, 0, sizeof(*iphdr));
+ iphdr->ip_hl = 5;
+ iphdr->ip_v = 4;
+ iphdr->ip_tos = 0;
+ iphdr->ip_id = htons(ip_id++);
+ iphdr->ip_ttl = 0x40;
+ iphdr->ip_p = IPPROTO_UDP;
+ iphdr->ip_src.s_addr = htonl(INADDR_LOOPBACK);
+ iphdr->ip_dst = addr4;
+ iphdr->ip_sum = 0;
+ }
+
+ /* Odd fragments. */
+ offset = max_frag_len;
+ while (offset < (UDP_HLEN + payload_len)) {
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
+ offset += 2 * max_frag_len;
+ }
+
+ if (cfg_overlap) {
+ /* Send an extra random fragment. */
+ offset = rand() % (UDP_HLEN + payload_len - 1);
+ /* sendto() returns EINVAL if offset + frag_len is too small. */
+ if (ipv6) {
+ struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
+ frag_len = max_frag_len + rand() % 256;
+ /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */
+ frag_len &= ~0x7;
+ fraghdr->ip6f_offlg = htons(offset / 8 | IP6_MF);
+ ip6hdr->ip6_plen = htons(frag_len);
+ frag_len += IP6_HLEN;
+ } else {
+ frag_len = IP4_HLEN + UDP_HLEN + rand() % 256;
+ iphdr->ip_off = htons(offset / 8 | IP4_MF);
+ iphdr->ip_len = htons(frag_len);
+ }
+ res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen);
+ if (res < 0)
+ error(1, errno, "sendto overlap");
+ if (res != frag_len)
+ error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len);
+ frag_counter++;
+ }
+
+ /* Event fragments. */
+ offset = 0;
+ while (offset < (UDP_HLEN + payload_len)) {
+ send_fragment(fd_raw, addr, alen, offset, ipv6);
+ offset += 2 * max_frag_len;
+ }
+}
+
+static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
+{
+ int fd_tx_raw, fd_rx_udp;
+ struct timeval tv = { .tv_sec = 0, .tv_usec = 10 * 1000 };
+ int idx;
+ int min_frag_len = ipv6 ? 1280 : 8;
+
+ /* Initialize the payload. */
+ for (idx = 0; idx < MSG_LEN_MAX; ++idx)
+ udp_payload[idx] = idx % 256;
+
+ /* Open sockets. */
+ fd_tx_raw = socket(addr->sa_family, SOCK_RAW, IPPROTO_RAW);
+ if (fd_tx_raw == -1)
+ error(1, errno, "socket tx_raw");
+
+ fd_rx_udp = socket(addr->sa_family, SOCK_DGRAM, 0);
+ if (fd_rx_udp == -1)
+ error(1, errno, "socket rx_udp");
+ if (bind(fd_rx_udp, addr, alen))
+ error(1, errno, "bind");
+ /* Fail fast. */
+ if (setsockopt(fd_rx_udp, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))
+ error(1, errno, "setsockopt rcv timeout");
+
+ for (payload_len = min_frag_len; payload_len < MSG_LEN_MAX;
+ payload_len += (rand() % 4096)) {
+ if (cfg_verbose)
+ printf("payload_len: %d\n", payload_len);
+ max_frag_len = min_frag_len;
+ do {
+ send_udp_frags(fd_tx_raw, addr, alen, ipv6);
+ recv_validate_udp(fd_rx_udp);
+ max_frag_len += 8 * (rand() % 8);
+ } while (max_frag_len < (1500 - FRAG_HLEN) && max_frag_len <= payload_len);
+ }
+
+ /* Cleanup. */
+ if (close(fd_tx_raw))
+ error(1, errno, "close tx_raw");
+ if (close(fd_rx_udp))
+ error(1, errno, "close rx_udp");
+
+ if (cfg_verbose)
+ printf("processed %d messages, %d fragments\n",
+ msg_counter, frag_counter);
+
+ fprintf(stderr, "PASS\n");
+}
+
+
+static void run_test_v4(void)
+{
+ struct sockaddr_in addr = {0};
+
+ addr.sin_family = AF_INET;
+ addr.sin_port = htons(cfg_port);
+ addr.sin_addr = addr4;
+
+ run_test((void *)&addr, sizeof(addr), false /* !ipv6 */);
+}
+
+static void run_test_v6(void)
+{
+ struct sockaddr_in6 addr = {0};
+
+ addr.sin6_family = AF_INET6;
+ addr.sin6_port = htons(cfg_port);
+ addr.sin6_addr = addr6;
+
+ run_test((void *)&addr, sizeof(addr), true /* ipv6 */);
+}
+
+static void parse_opts(int argc, char **argv)
+{
+ int c;
+
+ while ((c = getopt(argc, argv, "46ov")) != -1) {
+ switch (c) {
+ case '4':
+ cfg_do_ipv4 = true;
+ break;
+ case '6':
+ cfg_do_ipv6 = true;
+ break;
+ case 'o':
+ cfg_overlap = true;
+ break;
+ case 'v':
+ cfg_verbose = true;
+ break;
+ default:
+ error(1, 0, "%s: parse error", argv[0]);
+ }
+ }
+}
+
+int main(int argc, char **argv)
+{
+ parse_opts(argc, argv);
+ seed = time(NULL);
+ srand(seed);
+ /* Print the seed to track/reproduce potential failures. */
+ printf("seed = %d\n", seed);
+
+ if (cfg_do_ipv4)
+ run_test_v4();
+ if (cfg_do_ipv6)
+ run_test_v6();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
new file mode 100755
index 000000000000..f34672796044
--- /dev/null
+++ b/tools/testing/selftests/net/ip_defrag.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run a couple of IP defragmentation tests.
+
+set +x
+set -e
+
+readonly NETNS="ns-$(mktemp -u XXXXXX)"
+
+setup() {
+ ip netns add "${NETNS}"
+ ip -netns "${NETNS}" link set lo up
+ ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1
+}
+
+cleanup() {
+ ip netns del "${NETNS}"
+}
+
+trap cleanup EXIT
+setup
+
+echo "ipv4 defrag"
+ip netns exec "${NETNS}" ./ip_defrag -4
+
+
+echo "ipv4 defrag with overlaps"
+ip netns exec "${NETNS}" ./ip_defrag -4o
+
+echo "ipv6 defrag"
+ip netns exec "${NETNS}" ./ip_defrag -6
+
+echo "ipv6 defrag with overlaps"
+ip netns exec "${NETNS}" ./ip_defrag -6o
+
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index 32a194e3e07a..a369d616b390 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -6,6 +6,26 @@
#
# Tests currently implemented:
#
+# - pmtu_ipv4
+# Set up two namespaces, A and B, with two paths between them over routers
+# R1 and R2 (also implemented with namespaces), with different MTUs:
+#
+# segment a_r1 segment b_r1 a_r1: 2000
+# .--------------R1--------------. a_r2: 1500
+# A B a_r3: 2000
+# '--------------R2--------------' a_r4: 1400
+# segment a_r2 segment b_r2
+#
+# Check that PMTU exceptions with the correct PMTU are created. Then
+# decrease and increase the MTU of the local link for one of the paths,
+# A to R1, checking that route exception PMTU changes accordingly over
+# this path. Also check that locked exceptions are created when an ICMP
+# message advertising a PMTU smaller than net.ipv4.route.min_pmtu is
+# received
+#
+# - pmtu_ipv6
+# Same as pmtu_ipv4, except for locked PMTU tests, using IPv6
+#
# - pmtu_vti4_exception
# Set up vti tunnel on top of veth, with xfrm states and policies, in two
# namespaces with matching endpoints. Check that route exception is not
@@ -50,6 +70,8 @@ ksft_skip=4
which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
tests="
+ pmtu_ipv4_exception ipv4: PMTU exceptions
+ pmtu_ipv6_exception ipv6: PMTU exceptions
pmtu_vti6_exception vti6: PMTU exceptions
pmtu_vti4_exception vti4: PMTU exceptions
pmtu_vti4_default_mtu vti4: default MTU assignment
@@ -60,8 +82,45 @@ tests="
NS_A="ns-$(mktemp -u XXXXXX)"
NS_B="ns-$(mktemp -u XXXXXX)"
+NS_R1="ns-$(mktemp -u XXXXXX)"
+NS_R2="ns-$(mktemp -u XXXXXX)"
ns_a="ip netns exec ${NS_A}"
ns_b="ip netns exec ${NS_B}"
+ns_r1="ip netns exec ${NS_R1}"
+ns_r2="ip netns exec ${NS_R2}"
+
+# Addressing and routing for tests with routers: four network segments, with
+# index SEGMENT between 1 and 4, a common prefix (PREFIX4 or PREFIX6) and an
+# identifier ID, which is 1 for hosts (A and B), 2 for routers (R1 and R2).
+# Addresses are:
+# - IPv4: PREFIX4.SEGMENT.ID (/24)
+# - IPv6: PREFIX6:SEGMENT::ID (/64)
+prefix4="192.168"
+prefix6="fd00"
+a_r1=1
+a_r2=2
+b_r1=3
+b_r2=4
+# ns peer segment
+routing_addrs="
+ A R1 ${a_r1}
+ A R2 ${a_r2}
+ B R1 ${b_r1}
+ B R2 ${b_r2}
+"
+# Traffic from A to B goes through R1 by default, and through R2, if destined to
+# B's address on the b_r2 segment.
+# Traffic from B to A goes through R1.
+# ns destination gateway
+routes="
+ A default ${prefix4}.${a_r1}.2
+ A ${prefix4}.${b_r2}.1 ${prefix4}.${a_r2}.2
+ B default ${prefix4}.${b_r1}.2
+
+ A default ${prefix6}:${a_r1}::2
+ A ${prefix6}:${b_r2}::1 ${prefix6}:${a_r2}::2
+ B default ${prefix6}:${b_r1}::2
+"
veth4_a_addr="192.168.1.1"
veth4_b_addr="192.168.1.2"
@@ -83,6 +142,7 @@ dummy6_mask="64"
cleanup_done=1
err_buf=
+tcpdump_pids=
err() {
err_buf="${err_buf}${1}
@@ -94,9 +154,15 @@ err_flush() {
err_buf=
}
+# Find the auto-generated name for this namespace
+nsname() {
+ eval echo \$NS_$1
+}
+
setup_namespaces() {
- ip netns add ${NS_A} || return 1
- ip netns add ${NS_B}
+ for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do
+ ip netns add ${n} || return 1
+ done
}
setup_veth() {
@@ -167,6 +233,49 @@ setup_xfrm6() {
setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr}
}
+setup_routing() {
+ for i in ${NS_R1} ${NS_R2}; do
+ ip netns exec ${i} sysctl -q net/ipv4/ip_forward=1
+ ip netns exec ${i} sysctl -q net/ipv6/conf/all/forwarding=1
+ done
+
+ for i in ${routing_addrs}; do
+ [ "${ns}" = "" ] && ns="${i}" && continue
+ [ "${peer}" = "" ] && peer="${i}" && continue
+ [ "${segment}" = "" ] && segment="${i}"
+
+ ns_name="$(nsname ${ns})"
+ peer_name="$(nsname ${peer})"
+ if="veth_${ns}-${peer}"
+ ifpeer="veth_${peer}-${ns}"
+
+ # Create veth links
+ ip link add ${if} up netns ${ns_name} type veth peer name ${ifpeer} netns ${peer_name} || return 1
+ ip -n ${peer_name} link set dev ${ifpeer} up
+
+ # Add addresses
+ ip -n ${ns_name} addr add ${prefix4}.${segment}.1/24 dev ${if}
+ ip -n ${ns_name} addr add ${prefix6}:${segment}::1/64 dev ${if}
+
+ ip -n ${peer_name} addr add ${prefix4}.${segment}.2/24 dev ${ifpeer}
+ ip -n ${peer_name} addr add ${prefix6}:${segment}::2/64 dev ${ifpeer}
+
+ ns=""; peer=""; segment=""
+ done
+
+ for i in ${routes}; do
+ [ "${ns}" = "" ] && ns="${i}" && continue
+ [ "${addr}" = "" ] && addr="${i}" && continue
+ [ "${gw}" = "" ] && gw="${i}"
+
+ ns_name="$(nsname ${ns})"
+
+ ip -n ${ns_name} route add ${addr} via ${gw}
+
+ ns=""; addr=""; gw=""
+ done
+}
+
setup() {
[ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip
@@ -176,10 +285,28 @@ setup() {
done
}
+trace() {
+ [ $tracing -eq 0 ] && return
+
+ for arg do
+ [ "${ns_cmd}" = "" ] && ns_cmd="${arg}" && continue
+ ${ns_cmd} tcpdump -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null &
+ tcpdump_pids="${tcpdump_pids} $!"
+ ns_cmd=
+ done
+ sleep 1
+}
+
cleanup() {
+ for pid in ${tcpdump_pids}; do
+ kill ${pid}
+ done
+ tcpdump_pids=
+
[ ${cleanup_done} -eq 1 ] && return
- ip netns del ${NS_A} 2 > /dev/null
- ip netns del ${NS_B} 2 > /dev/null
+ for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do
+ ip netns del ${n} 2> /dev/null
+ done
cleanup_done=1
}
@@ -196,7 +323,9 @@ mtu_parse() {
next=0
for i in ${input}; do
+ [ ${next} -eq 1 -a "${i}" = "lock" ] && next=2 && continue
[ ${next} -eq 1 ] && echo "${i}" && return
+ [ ${next} -eq 2 ] && echo "lock ${i}" && return
[ "${i}" = "mtu" ] && next=1
done
}
@@ -229,8 +358,117 @@ route_get_dst_pmtu_from_exception() {
mtu_parse "$(route_get_dst_exception "${ns_cmd}" ${dst})"
}
+check_pmtu_value() {
+ expected="${1}"
+ value="${2}"
+ event="${3}"
+
+ [ "${expected}" = "any" ] && [ -n "${value}" ] && return 0
+ [ "${value}" = "${expected}" ] && return 0
+ [ -z "${value}" ] && err " PMTU exception wasn't created after ${event}" && return 1
+ [ -z "${expected}" ] && err " PMTU exception shouldn't exist after ${event}" && return 1
+ err " found PMTU exception with incorrect MTU ${value}, expected ${expected}, after ${event}"
+ return 1
+}
+
+test_pmtu_ipvX() {
+ family=${1}
+
+ setup namespaces routing || return 2
+ trace "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \
+ "${ns_r1}" veth_R1-B "${ns_b}" veth_B-R1 \
+ "${ns_a}" veth_A-R2 "${ns_r2}" veth_R2-A \
+ "${ns_r2}" veth_R2-B "${ns_b}" veth_B-R2
+
+ if [ ${family} -eq 4 ]; then
+ ping=ping
+ dst1="${prefix4}.${b_r1}.1"
+ dst2="${prefix4}.${b_r2}.1"
+ else
+ ping=${ping6}
+ dst1="${prefix6}:${b_r1}::1"
+ dst2="${prefix6}:${b_r2}::1"
+ fi
+
+ # Set up initial MTU values
+ mtu "${ns_a}" veth_A-R1 2000
+ mtu "${ns_r1}" veth_R1-A 2000
+ mtu "${ns_r1}" veth_R1-B 1400
+ mtu "${ns_b}" veth_B-R1 1400
+
+ mtu "${ns_a}" veth_A-R2 2000
+ mtu "${ns_r2}" veth_R2-A 2000
+ mtu "${ns_r2}" veth_R2-B 1500
+ mtu "${ns_b}" veth_B-R2 1500
+
+ # Create route exceptions
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst1} > /dev/null
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst2} > /dev/null
+
+ # Check that exceptions have been created with the correct PMTU
+ pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
+ check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1
+ pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+ check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1
+
+ # Decrease local MTU below PMTU, check for PMTU decrease in route exception
+ mtu "${ns_a}" veth_A-R1 1300
+ mtu "${ns_r1}" veth_R1-A 1300
+ pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
+ check_pmtu_value "1300" "${pmtu_1}" "decreasing local MTU" || return 1
+ # Second exception shouldn't be modified
+ pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+ check_pmtu_value "1500" "${pmtu_2}" "changing local MTU on a link not on this path" || return 1
+
+ # Increase MTU, check for PMTU increase in route exception
+ mtu "${ns_a}" veth_A-R1 1700
+ mtu "${ns_r1}" veth_R1-A 1700
+ pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
+ check_pmtu_value "1700" "${pmtu_1}" "increasing local MTU" || return 1
+ # Second exception shouldn't be modified
+ pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+ check_pmtu_value "1500" "${pmtu_2}" "changing local MTU on a link not on this path" || return 1
+
+ # Skip PMTU locking tests for IPv6
+ [ $family -eq 6 ] && return 0
+
+ # Decrease remote MTU on path via R2, get new exception
+ mtu "${ns_r2}" veth_R2-B 400
+ mtu "${ns_b}" veth_B-R2 400
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null
+ pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+ check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
+
+ # Decrease local MTU below PMTU
+ mtu "${ns_a}" veth_A-R2 500
+ mtu "${ns_r2}" veth_R2-A 500
+ pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+ check_pmtu_value "500" "${pmtu_2}" "decreasing local MTU" || return 1
+
+ # Increase local MTU
+ mtu "${ns_a}" veth_A-R2 1500
+ mtu "${ns_r2}" veth_R2-A 1500
+ pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+ check_pmtu_value "1500" "${pmtu_2}" "increasing local MTU" || return 1
+
+ # Get new exception
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null
+ pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+ check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
+}
+
+test_pmtu_ipv4_exception() {
+ test_pmtu_ipvX 4
+}
+
+test_pmtu_ipv6_exception() {
+ test_pmtu_ipvX 6
+}
+
test_pmtu_vti4_exception() {
setup namespaces veth vti4 xfrm4 || return 2
+ trace "${ns_a}" veth_a "${ns_b}" veth_b \
+ "${ns_a}" vti4_a "${ns_b}" vti4_b
veth_mtu=1500
vti_mtu=$((veth_mtu - 20))
@@ -248,28 +486,19 @@ test_pmtu_vti4_exception() {
# exception is created
${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${vti4_b_addr} > /dev/null
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti4_b_addr})"
- if [ "${pmtu}" != "" ]; then
- err " unexpected exception created with PMTU ${pmtu} for IP payload length ${esp_payload_rfc4106}"
- return 1
- fi
+ check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
# Now exceed link layer MTU by one byte, check that exception is created
+ # with the right PMTU value
${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${vti4_b_addr} > /dev/null
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti4_b_addr})"
- if [ "${pmtu}" = "" ]; then
- err " exception not created for IP payload length $((esp_payload_rfc4106 + 1))"
- return 1
- fi
-
- # ...with the right PMTU value
- if [ ${pmtu} -ne ${esp_payload_rfc4106} ]; then
- err " wrong PMTU ${pmtu} in exception, expected: ${esp_payload_rfc4106}"
- return 1
- fi
+ check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
}
test_pmtu_vti6_exception() {
setup namespaces veth vti6 xfrm6 || return 2
+ trace "${ns_a}" veth_a "${ns_b}" veth_b \
+ "${ns_a}" vti6_a "${ns_b}" vti6_b
fail=0
# Create route exception by exceeding link layer MTU
@@ -280,25 +509,18 @@ test_pmtu_vti6_exception() {
${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
# Check that exception was created
- if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then
- err " tunnel exceeding link layer MTU didn't create route exception"
- return 1
- fi
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})"
+ check_pmtu_value any "${pmtu}" "creating tunnel exceeding link layer MTU" || return 1
# Decrease tunnel MTU, check for PMTU decrease in route exception
mtu "${ns_a}" vti6_a 3000
-
- if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" -ne 3000 ]; then
- err " decreasing tunnel MTU didn't decrease route exception PMTU"
- fail=1
- fi
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})"
+ check_pmtu_value "3000" "${pmtu}" "decreasing tunnel MTU" || fail=1
# Increase tunnel MTU, check for PMTU increase in route exception
mtu "${ns_a}" vti6_a 9000
- if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" -ne 9000 ]; then
- err " increasing tunnel MTU didn't increase route exception PMTU"
- fail=1
- fi
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})"
+ check_pmtu_value "9000" "${pmtu}" "increasing tunnel MTU" || fail=1
return ${fail}
}
@@ -445,15 +667,56 @@ test_pmtu_vti6_link_change_mtu() {
return ${fail}
}
-trap cleanup EXIT
+usage() {
+ echo
+ echo "$0 [OPTIONS] [TEST]..."
+ echo "If no TEST argument is given, all tests will be run."
+ echo
+ echo "Options"
+ echo " --trace: capture traffic to TEST_INTERFACE.pcap"
+ echo
+ echo "Available tests${tests}"
+ exit 1
+}
exitcode=0
desc=0
IFS="
"
+
+tracing=0
+for arg do
+ if [ "${arg}" != "${arg#--*}" ]; then
+ opt="${arg#--}"
+ if [ "${opt}" = "trace" ]; then
+ if which tcpdump > /dev/null 2>&1; then
+ tracing=1
+ else
+ echo "=== tcpdump not available, tracing disabled"
+ fi
+ else
+ usage
+ fi
+ else
+ # Check first that all requested tests are available before
+ # running any
+ command -v > /dev/null "test_${arg}" || { echo "=== Test ${arg} not found"; usage; }
+ fi
+done
+
+trap cleanup EXIT
+
for t in ${tests}; do
[ $desc -eq 0 ] && name="${t}" && desc=1 && continue || desc=0
+ run_this=1
+ for arg do
+ [ "${arg}" != "${arg#--*}" ] && continue
+ [ "${arg}" = "${name}" ] && run_this=1 && break
+ run_this=0
+ done
+ [ $run_this -eq 0 ] && continue
+
(
unset IFS
eval test_${name}
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index cad14cd0ea92..b5277106df1f 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -437,14 +437,19 @@ void enable_fastopen(void)
}
}
-static struct rlimit rlim_old, rlim_new;
+static struct rlimit rlim_old;
static __attribute__((constructor)) void main_ctor(void)
{
getrlimit(RLIMIT_MEMLOCK, &rlim_old);
- rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
- rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
- setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+
+ if (rlim_old.rlim_cur != RLIM_INFINITY) {
+ struct rlimit rlim_new;
+
+ rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
+ rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
+ setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+ }
}
static __attribute__((destructor)) void main_dtor(void)
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 08c341b49760..e101af52d1d6 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
#
# This test is for checking rtnetlink callpaths, and get as much coverage as possible.
#
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index b3ebf2646e52..fac68d710f35 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -121,11 +121,11 @@ TEST_F(tls, send_then_sendfile)
buf = (char *)malloc(st.st_size);
EXPECT_EQ(send(self->fd, test_str, to_send, 0), to_send);
- EXPECT_EQ(recv(self->cfd, recv_buf, to_send, 0), to_send);
+ EXPECT_EQ(recv(self->cfd, recv_buf, to_send, MSG_WAITALL), to_send);
EXPECT_EQ(memcmp(test_str, recv_buf, to_send), 0);
EXPECT_GE(sendfile(self->fd, filefd, 0, st.st_size), 0);
- EXPECT_EQ(recv(self->cfd, buf, st.st_size, 0), st.st_size);
+ EXPECT_EQ(recv(self->cfd, buf, st.st_size, MSG_WAITALL), st.st_size);
}
TEST_F(tls, recv_max)
@@ -160,7 +160,7 @@ TEST_F(tls, msg_more)
EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len);
EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_DONTWAIT), -1);
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
- EXPECT_EQ(recv(self->cfd, buf, send_len * 2, MSG_DONTWAIT),
+ EXPECT_EQ(recv(self->cfd, buf, send_len * 2, MSG_WAITALL),
send_len * 2);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
@@ -180,7 +180,7 @@ TEST_F(tls, sendmsg_single)
msg.msg_iov = &vec;
msg.msg_iovlen = 1;
EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
- EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_WAITALL), send_len);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
@@ -288,7 +288,7 @@ TEST_F(tls, splice_from_pipe)
ASSERT_GE(pipe(p), 0);
EXPECT_GE(write(p[1], mem_send, send_len), 0);
EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), 0);
- EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+ EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
@@ -306,7 +306,7 @@ TEST_F(tls, splice_from_pipe2)
EXPECT_GE(splice(p[0], NULL, self->fd, NULL, 8000, 0), 0);
EXPECT_GE(write(p2[1], mem_send + 8000, 8000), 0);
EXPECT_GE(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 0);
- EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+ EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
@@ -322,13 +322,13 @@ TEST_F(tls, send_and_splice)
ASSERT_GE(pipe(p), 0);
EXPECT_EQ(send(self->fd, test_str, send_len2, 0), send_len2);
- EXPECT_NE(recv(self->cfd, buf, send_len2, 0), -1);
+ EXPECT_EQ(recv(self->cfd, buf, send_len2, MSG_WAITALL), send_len2);
EXPECT_EQ(memcmp(test_str, buf, send_len2), 0);
EXPECT_GE(write(p[1], mem_send, send_len), send_len);
EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), send_len);
- EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
+ EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
@@ -436,7 +436,7 @@ TEST_F(tls, multiple_send_single_recv)
EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
memset(recv_mem, 0, total_len);
- EXPECT_EQ(recv(self->cfd, recv_mem, total_len, 0), total_len);
+ EXPECT_EQ(recv(self->cfd, recv_mem, total_len, MSG_WAITALL), total_len);
EXPECT_EQ(memcmp(send_mem, recv_mem, send_len), 0);
EXPECT_EQ(memcmp(send_mem, recv_mem + send_len, send_len), 0);
@@ -502,6 +502,78 @@ TEST_F(tls, recv_peek_multiple)
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
}
+TEST_F(tls, recv_peek_multiple_records)
+{
+ char const *test_str = "test_read_peek_mult_recs";
+ char const *test_str_first = "test_read_peek";
+ char const *test_str_second = "_mult_recs";
+ int len;
+ char buf[64];
+
+ len = strlen(test_str_first);
+ EXPECT_EQ(send(self->fd, test_str_first, len, 0), len);
+
+ len = strlen(test_str_second) + 1;
+ EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
+
+ len = strlen(test_str_first);
+ memset(buf, 0, len);
+ EXPECT_EQ(recv(self->cfd, buf, len, MSG_PEEK | MSG_WAITALL), len);
+
+ /* MSG_PEEK can only peek into the current record. */
+ len = strlen(test_str_first);
+ EXPECT_EQ(memcmp(test_str_first, buf, len), 0);
+
+ len = strlen(test_str) + 1;
+ memset(buf, 0, len);
+ EXPECT_EQ(recv(self->cfd, buf, len, MSG_WAITALL), len);
+
+ /* Non-MSG_PEEK will advance strparser (and therefore record)
+ * however.
+ */
+ len = strlen(test_str) + 1;
+ EXPECT_EQ(memcmp(test_str, buf, len), 0);
+
+ /* MSG_MORE will hold current record open, so later MSG_PEEK
+ * will see everything.
+ */
+ len = strlen(test_str_first);
+ EXPECT_EQ(send(self->fd, test_str_first, len, MSG_MORE), len);
+
+ len = strlen(test_str_second) + 1;
+ EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
+
+ len = strlen(test_str) + 1;
+ memset(buf, 0, len);
+ EXPECT_EQ(recv(self->cfd, buf, len, MSG_PEEK | MSG_WAITALL), len);
+
+ len = strlen(test_str) + 1;
+ EXPECT_EQ(memcmp(test_str, buf, len), 0);
+}
+
+TEST_F(tls, recv_peek_large_buf_mult_recs)
+{
+ char const *test_str = "test_read_peek_mult_recs";
+ char const *test_str_first = "test_read_peek";
+ char const *test_str_second = "_mult_recs";
+ int len;
+ char buf[64];
+
+ len = strlen(test_str_first);
+ EXPECT_EQ(send(self->fd, test_str_first, len, 0), len);
+
+ len = strlen(test_str_second) + 1;
+ EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
+
+ len = sizeof(buf);
+ memset(buf, 0, len);
+ EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
+
+ len = strlen(test_str) + 1;
+ EXPECT_EQ(memcmp(test_str, buf, len), 0);
+}
+
+
TEST_F(tls, pollin)
{
char const *test_str = "test_poll";
@@ -515,7 +587,7 @@ TEST_F(tls, pollin)
EXPECT_EQ(poll(&fd, 1, 20), 1);
EXPECT_EQ(fd.revents & POLLIN, 1);
- EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_WAITALL), send_len);
/* Test timing out */
EXPECT_EQ(poll(&fd, 1, 20), 0);
}
@@ -533,7 +605,7 @@ TEST_F(tls, poll_wait)
/* Set timeout to inf. secs */
EXPECT_EQ(poll(&fd, 1, -1), 1);
EXPECT_EQ(fd.revents & POLLIN, 1);
- EXPECT_EQ(recv(self->cfd, recv_mem, send_len, 0), send_len);
+ EXPECT_EQ(recv(self->cfd, recv_mem, send_len, MSG_WAITALL), send_len);
}
TEST_F(tls, blocking)
@@ -679,7 +751,7 @@ TEST_F(tls, control_msg)
EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
vec.iov_base = buf;
- EXPECT_EQ(recvmsg(self->cfd, &msg, 0), send_len);
+ EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len);
cmsg = CMSG_FIRSTHDR(&msg);
EXPECT_NE(cmsg, NULL);
EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
index 850767befa47..99e537ab5ad9 100755
--- a/tools/testing/selftests/net/udpgso_bench.sh
+++ b/tools/testing/selftests/net/udpgso_bench.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
#
# Run a series of udpgso benchmarks
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
index a728040edbe1..14cfcf006936 100644
--- a/tools/testing/selftests/networking/timestamping/Makefile
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -5,6 +5,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp
all: $(TEST_PROGS)
+top_srcdir = ../../../../..
include ../../lib.mk
clean:
diff --git a/tools/testing/selftests/powerpc/alignment/Makefile b/tools/testing/selftests/powerpc/alignment/Makefile
index 93baacab7693..d056486f49de 100644
--- a/tools/testing/selftests/powerpc/alignment/Makefile
+++ b/tools/testing/selftests/powerpc/alignment/Makefile
@@ -1,5 +1,6 @@
TEST_GEN_PROGS := copy_first_unaligned alignment_handler
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c ../utils.c
diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile
index b4d7432a0ecd..d40300a65b42 100644
--- a/tools/testing/selftests/powerpc/benchmarks/Makefile
+++ b/tools/testing/selftests/powerpc/benchmarks/Makefile
@@ -4,6 +4,7 @@ TEST_GEN_FILES := exec_target
CFLAGS += -O2
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/cache_shape/Makefile b/tools/testing/selftests/powerpc/cache_shape/Makefile
index 1be547434a49..ede4d3dae750 100644
--- a/tools/testing/selftests/powerpc/cache_shape/Makefile
+++ b/tools/testing/selftests/powerpc/cache_shape/Makefile
@@ -5,6 +5,7 @@ all: $(TEST_PROGS)
$(TEST_PROGS): ../harness.c ../utils.c
+top_srcdir = ../../../../..
include ../../lib.mk
clean:
diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile
index 1cf89a34d97c..44574f3818b3 100644
--- a/tools/testing/selftests/powerpc/copyloops/Makefile
+++ b/tools/testing/selftests/powerpc/copyloops/Makefile
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \
EXTRA_SOURCES := validate.c ../harness.c stubs.S
+top_srcdir = ../../../../..
include ../../lib.mk
$(OUTPUT)/copyuser_64_t%: copyuser_64.S $(EXTRA_SOURCES)
diff --git a/tools/testing/selftests/powerpc/dscr/Makefile b/tools/testing/selftests/powerpc/dscr/Makefile
index 55d7db7a616b..5df476364b4d 100644
--- a/tools/testing/selftests/powerpc/dscr/Makefile
+++ b/tools/testing/selftests/powerpc/dscr/Makefile
@@ -3,6 +3,7 @@ TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test \
dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test \
dscr_sysfs_thread_test
+top_srcdir = ../../../../..
include ../../lib.mk
$(OUTPUT)/dscr_default_test: LDLIBS += -lpthread
diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile
index 0dd3a01fdab9..11a10d7a2bbd 100644
--- a/tools/testing/selftests/powerpc/math/Makefile
+++ b/tools/testing/selftests/powerpc/math/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index 8ebbe96d80a8..33ced6e0ad25 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -5,6 +5,7 @@ noarg:
TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors
TEST_GEN_FILES := tempfile
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index 6e1629bf5b09..19046db995fe 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -5,6 +5,7 @@ noarg:
TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes
EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
+top_srcdir = ../../../../..
include ../../lib.mk
all: $(TEST_GEN_PROGS) ebb
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index c4e64bc2e265..bd5dfa509272 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \
lost_exception_test no_handler_test \
cycles_with_mmcr2_test
+top_srcdir = ../../../../../..
include ../../../lib.mk
$(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \
diff --git a/tools/testing/selftests/powerpc/primitives/Makefile b/tools/testing/selftests/powerpc/primitives/Makefile
index 175366db7be8..ea2b7bd09e36 100644
--- a/tools/testing/selftests/powerpc/primitives/Makefile
+++ b/tools/testing/selftests/powerpc/primitives/Makefile
@@ -2,6 +2,7 @@ CFLAGS += -I$(CURDIR)
TEST_GEN_PROGS := load_unaligned_zeropad
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
index 28f5b781a553..923d531265f8 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -4,6 +4,7 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
perf-hwbreak
+top_srcdir = ../../../../..
include ../../lib.mk
all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile
index a7cbd5082e27..1fca25c6ace0 100644
--- a/tools/testing/selftests/powerpc/signal/Makefile
+++ b/tools/testing/selftests/powerpc/signal/Makefile
@@ -8,6 +8,7 @@ $(TEST_PROGS): ../harness.c ../utils.c signal.S
CFLAGS += -maltivec
signal_tm: CFLAGS += -mhtm
+top_srcdir = ../../../../..
include ../../lib.mk
clean:
diff --git a/tools/testing/selftests/powerpc/stringloops/Makefile b/tools/testing/selftests/powerpc/stringloops/Makefile
index 10b35c87a4f4..7fc0623d85c3 100644
--- a/tools/testing/selftests/powerpc/stringloops/Makefile
+++ b/tools/testing/selftests/powerpc/stringloops/Makefile
@@ -29,6 +29,7 @@ endif
ASFLAGS = $(CFLAGS)
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): $(EXTRA_SOURCES)
diff --git a/tools/testing/selftests/powerpc/switch_endian/Makefile b/tools/testing/selftests/powerpc/switch_endian/Makefile
index 30b8ff8fb82e..fcd2dcb8972b 100644
--- a/tools/testing/selftests/powerpc/switch_endian/Makefile
+++ b/tools/testing/selftests/powerpc/switch_endian/Makefile
@@ -5,6 +5,7 @@ ASFLAGS += -O2 -Wall -g -nostdlib -m64
EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S
+top_srcdir = ../../../../..
include ../../lib.mk
$(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S
diff --git a/tools/testing/selftests/powerpc/syscalls/Makefile b/tools/testing/selftests/powerpc/syscalls/Makefile
index da22ca7c38c1..161b8846336f 100644
--- a/tools/testing/selftests/powerpc/syscalls/Makefile
+++ b/tools/testing/selftests/powerpc/syscalls/Makefile
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := ipc_unmuxed
CFLAGS += -I../../../../../usr/include
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index c0e45d2dde25..9fc2cf6fbc92 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -6,6 +6,7 @@ TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack
tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
$(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c ../utils.c
diff --git a/tools/testing/selftests/powerpc/vphn/Makefile b/tools/testing/selftests/powerpc/vphn/Makefile
index f8ced26748f8..fb82068c9fda 100644
--- a/tools/testing/selftests/powerpc/vphn/Makefile
+++ b/tools/testing/selftests/powerpc/vphn/Makefile
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := test-vphn
CFLAGS += -m64
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index f7247ee00514..58ca758a5786 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -120,7 +120,6 @@ then
parse-build.sh $resdir/Make.out $title
else
# Build failed.
- cp $builddir/Make*.out $resdir
cp $builddir/.config $resdir || :
echo Build failed, not running KVM, see $resdir.
if test -f $builddir.wait
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/CFLIST b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
index 6a0b9f69faad..c3c1fb5a9e1f 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
+++ b/tools/testing/selftests/rcutorture/configs/rcu/CFLIST
@@ -3,9 +3,7 @@ TREE02
TREE03
TREE04
TREE05
-TREE06
TREE07
-TREE08
TREE09
SRCU-N
SRCU-P
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot
index 84a7d51b7481..ce48c7b82673 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot
@@ -1 +1,2 @@
rcutorture.torture_type=srcud
+rcupdate.rcu_self_test=1
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot
index 84a7d51b7481..ce48c7b82673 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot
@@ -1 +1,2 @@
rcutorture.torture_type=srcud
+rcupdate.rcu_self_test=1
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot b/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot
index 6c1a292a65fb..b39f1553a478 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot
@@ -1,3 +1 @@
rcupdate.rcu_self_test=1
-rcupdate.rcu_self_test_bh=1
-rcutorture.torture_type=rcu_bh
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
index 9f3a4d28e508..ea47da95374b 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot
@@ -1,4 +1,4 @@
-rcutorture.torture_type=rcu_bh maxcpus=8 nr_cpus=43
+maxcpus=8 nr_cpus=43
rcutree.gp_preinit_delay=3
rcutree.gp_init_delay=3
rcutree.gp_cleanup_delay=3
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
index e6071bb96c7d..5adc6756792a 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
@@ -1 +1 @@
-rcutorture.torture_type=rcu_bh rcutree.rcu_fanout_leaf=4 nohz_full=1-7
+rcutree.rcu_fanout_leaf=4 nohz_full=1-7
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
index c7fd050dfcd9..c419cac233ee 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
@@ -1,5 +1,4 @@
-rcutorture.torture_type=sched
-rcupdate.rcu_self_test_sched=1
rcutree.gp_preinit_delay=3
rcutree.gp_init_delay=3
rcutree.gp_cleanup_delay=3
+rcupdate.rcu_self_test=1
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot
index ad18b52a2cad..055f4aa79077 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot
@@ -1,6 +1,4 @@
rcupdate.rcu_self_test=1
-rcupdate.rcu_self_test_bh=1
-rcupdate.rcu_self_test_sched=1
rcutree.rcu_fanout_exact=1
rcutree.gp_preinit_delay=3
rcutree.gp_init_delay=3
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
index 1bd8efc4141e..22478fd3a865 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
@@ -1,5 +1,3 @@
-rcutorture.torture_type=sched
rcupdate.rcu_self_test=1
-rcupdate.rcu_self_test_sched=1
rcutree.rcu_fanout_exact=1
rcu_nocbs=0-7
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index 642d4e12abea..eec2663261f2 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -56,15 +56,13 @@ unsigned int yield_mod_cnt, nr_abort;
printf(fmt, ## __VA_ARGS__); \
} while (0)
-#if defined(__x86_64__) || defined(__i386__)
+#ifdef __i386__
#define INJECT_ASM_REG "eax"
#define RSEQ_INJECT_CLOBBER \
, INJECT_ASM_REG
-#ifdef __i386__
-
#define RSEQ_INJECT_ASM(n) \
"mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \
"test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
@@ -76,9 +74,16 @@ unsigned int yield_mod_cnt, nr_abort;
#elif defined(__x86_64__)
+#define INJECT_ASM_REG_P "rax"
+#define INJECT_ASM_REG "eax"
+
+#define RSEQ_INJECT_CLOBBER \
+ , INJECT_ASM_REG_P \
+ , INJECT_ASM_REG
+
#define RSEQ_INJECT_ASM(n) \
- "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG "\n\t" \
- "mov (%%" INJECT_ASM_REG "), %%" INJECT_ASM_REG "\n\t" \
+ "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG_P "\n\t" \
+ "mov (%%" INJECT_ASM_REG_P "), %%" INJECT_ASM_REG "\n\t" \
"test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
"jz 333f\n\t" \
"222:\n\t" \
@@ -86,10 +91,6 @@ unsigned int yield_mod_cnt, nr_abort;
"jnz 222b\n\t" \
"333:\n\t"
-#else
-#error "Unsupported architecture"
-#endif
-
#elif defined(__s390__)
#define RSEQ_INJECT_INPUT \
diff --git a/tools/testing/selftests/tc-testing/README b/tools/testing/selftests/tc-testing/README
index 49a6f8c3fdae..f9281e8aa313 100644
--- a/tools/testing/selftests/tc-testing/README
+++ b/tools/testing/selftests/tc-testing/README
@@ -232,6 +232,8 @@ directory:
and the other is a test whether the command leaked memory or not.
(This one is a preliminary version, it may not work quite right yet,
but the overall template is there and it should only need tweaks.)
+ - buildebpfPlugin.py:
+ builds all programs in $EBPFDIR.
ACKNOWLEDGEMENTS
diff --git a/tools/testing/selftests/tc-testing/bpf/Makefile b/tools/testing/selftests/tc-testing/bpf/Makefile
new file mode 100644
index 000000000000..dc92eb271d9a
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/bpf/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+
+APIDIR := ../../../../include/uapi
+TEST_GEN_FILES = action.o
+
+top_srcdir = ../../../../..
+include ../../lib.mk
+
+CLANG ?= clang
+LLC ?= llc
+PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
+
+ifeq ($(PROBE),)
+ CPU ?= probe
+else
+ CPU ?= generic
+endif
+
+CLANG_SYS_INCLUDES := $(shell $(CLANG) -v -E - </dev/null 2>&1 \
+ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }')
+
+CLANG_FLAGS = -I. -I$(APIDIR) \
+ $(CLANG_SYS_INCLUDES) \
+ -Wno-compare-distinct-pointer-types
+
+$(OUTPUT)/%.o: %.c
+ $(CLANG) $(CLANG_FLAGS) \
+ -O2 -target bpf -emit-llvm -c $< -o - | \
+ $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
diff --git a/tools/testing/selftests/tc-testing/bpf/action.c b/tools/testing/selftests/tc-testing/bpf/action.c
new file mode 100644
index 000000000000..c32b99b80e19
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/bpf/action.c
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018 Davide Caratti, Red Hat inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+
+__attribute__((section("action-ok"),used)) int action_ok(struct __sk_buff *s)
+{
+ return TC_ACT_OK;
+}
+
+__attribute__((section("action-ko"),used)) int action_ko(struct __sk_buff *s)
+{
+ s->data = 0x0;
+ return TC_ACT_OK;
+}
+
+char _license[] __attribute__((section("license"),used)) = "GPL";
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
new file mode 100644
index 000000000000..9f0ba10c44b4
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/plugin-lib/buildebpfPlugin.py
@@ -0,0 +1,66 @@
+'''
+build ebpf program
+'''
+
+import os
+import signal
+from string import Template
+import subprocess
+import time
+from TdcPlugin import TdcPlugin
+from tdc_config import *
+
+class SubPlugin(TdcPlugin):
+ def __init__(self):
+ self.sub_class = 'buildebpf/SubPlugin'
+ self.tap = ''
+ super().__init__()
+
+ def pre_suite(self, testcount, testidlist):
+ super().pre_suite(testcount, testidlist)
+
+ if self.args.buildebpf:
+ self._ebpf_makeall()
+
+ def post_suite(self, index):
+ super().post_suite(index)
+
+ self._ebpf_makeclean()
+
+ def add_args(self, parser):
+ super().add_args(parser)
+
+ self.argparser_group = self.argparser.add_argument_group(
+ 'buildebpf',
+ 'options for buildebpfPlugin')
+ self.argparser_group.add_argument(
+ '-B', '--buildebpf', action='store_true',
+ help='build eBPF programs')
+
+ return self.argparser
+
+ def _ebpf_makeall(self):
+ if self.args.buildebpf:
+ self._make('all')
+
+ def _ebpf_makeclean(self):
+ if self.args.buildebpf:
+ self._make('clean')
+
+ def _make(self, target):
+ command = 'make -C {} {}'.format(self.args.NAMES['EBPFDIR'], target)
+ proc = subprocess.Popen(command,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=ENVIR)
+ (rawout, serr) = proc.communicate()
+
+ if proc.returncode != 0 and len(serr) > 0:
+ foutput = serr.decode("utf-8")
+ else:
+ foutput = rawout.decode("utf-8")
+
+ proc.stdout.close()
+ proc.stderr.close()
+ return proc, foutput
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
index 6f289a49e5ec..5970cee6d05f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
@@ -55,7 +55,6 @@
"bpf"
],
"setup": [
- "printf '#include <linux/bpf.h>\nchar l[] __attribute__((section(\"license\"),used))=\"GPL\"; __attribute__((section(\"action\"),used)) int m(struct __sk_buff *s) { return 2; }' | clang -O2 -x c -c - -target bpf -o _b.o",
[
"$TC action flush action bpf",
0,
@@ -63,14 +62,13 @@
255
]
],
- "cmdUnderTest": "$TC action add action bpf object-file _b.o index 667",
+ "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ok index 667",
"expExitCode": "0",
"verifyCmd": "$TC action get action bpf index 667",
- "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9]* tag 3b185187f1855c4c( jited)? default-action pipe.*index 667 ref",
+ "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9]* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref",
"matchCount": "1",
"teardown": [
- "$TC action flush action bpf",
- "rm -f _b.o"
+ "$TC action flush action bpf"
]
},
{
@@ -81,7 +79,6 @@
"bpf"
],
"setup": [
- "printf '#include <linux/bpf.h>\nchar l[] __attribute__((section(\"license\"),used))=\"GPL\"; __attribute__((section(\"action\"),used)) int m(struct __sk_buff *s) { s->data = 0x0; return 2; }' | clang -O2 -x c -c - -target bpf -o _c.o",
[
"$TC action flush action bpf",
0,
@@ -89,10 +86,10 @@
255
]
],
- "cmdUnderTest": "$TC action add action bpf object-file _c.o index 667",
+ "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ko index 667",
"expExitCode": "255",
"verifyCmd": "$TC action get action bpf index 667",
- "matchPattern": "action order [0-9]*: bpf _c.o:\\[action\\] id [0-9].*index 667 ref",
+ "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ko\\] id [0-9].*index 667 ref",
"matchCount": "0",
"teardown": [
[
@@ -100,8 +97,7 @@
0,
1,
255
- ],
- "rm -f _c.o"
+ ]
]
},
{
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
index 68c91023cdb9..89189a03ce3d 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
@@ -536,5 +536,29 @@
"matchPattern": "^[ \t]+index [0-9]+ ref",
"matchCount": "0",
"teardown": []
+ },
+ {
+ "id": "8e47",
+ "name": "Add gact action with random determ goto chain control action",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pass random determ goto chain 1 2 index 90",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action pass random type determ goto chain 1 val 2.*index 90 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
index 30f9b54bd666..4086a50a670e 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
@@ -715,5 +715,29 @@
"teardown": [
"$TC actions flush action police"
]
+ },
+ {
+ "id": "b48b",
+ "name": "Add police action with exceed goto chain control action",
+ "category": [
+ "actions",
+ "police"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action police",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action police rate 1mbit burst 1k conform-exceed pass / goto chain 42",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions ls action police",
+ "matchPattern": "action order [0-9]*: police 0x1 rate 1Mbit burst 1Kb mtu 2Kb action pass/goto chain 42",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action police"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
index a023d0d62b25..d651bc1501bd 100644
--- a/tools/testing/selftests/tc-testing/tdc_config.py
+++ b/tools/testing/selftests/tc-testing/tdc_config.py
@@ -16,7 +16,9 @@ NAMES = {
'DEV2': '',
'BATCH_FILE': './batch.txt',
# Name of the namespace to use
- 'NS': 'tcut'
+ 'NS': 'tcut',
+ # Directory containing eBPF test programs
+ 'EBPFDIR': './bpf'
}
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 9881876d2aa0..e94b7b14bcb2 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -26,10 +26,6 @@ TEST_PROGS := run_vmtests
include ../lib.mk
-$(OUTPUT)/userfaultfd: ../../../../usr/include/linux/kernel.h
$(OUTPUT)/userfaultfd: LDLIBS += -lpthread
$(OUTPUT)/mlock-random-test: LDLIBS += -lcap
-
-../../../../usr/include/linux/kernel.h:
- make -C ../../../.. headers_install
diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c
index 235259011704..35edd61d1663 100644
--- a/tools/testing/selftests/x86/test_vdso.c
+++ b/tools/testing/selftests/x86/test_vdso.c
@@ -17,6 +17,7 @@
#include <errno.h>
#include <sched.h>
#include <stdbool.h>
+#include <limits.h>
#ifndef SYS_getcpu
# ifdef __x86_64__
@@ -31,6 +32,14 @@
int nerrs = 0;
+typedef int (*vgettime_t)(clockid_t, struct timespec *);
+
+vgettime_t vdso_clock_gettime;
+
+typedef long (*vgtod_t)(struct timeval *tv, struct timezone *tz);
+
+vgtod_t vdso_gettimeofday;
+
typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
getcpu_t vgetcpu;
@@ -95,6 +104,15 @@ static void fill_function_pointers()
printf("Warning: failed to find getcpu in vDSO\n");
vgetcpu = (getcpu_t) vsyscall_getcpu();
+
+ vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
+ if (!vdso_clock_gettime)
+ printf("Warning: failed to find clock_gettime in vDSO\n");
+
+ vdso_gettimeofday = (vgtod_t)dlsym(vdso, "__vdso_gettimeofday");
+ if (!vdso_gettimeofday)
+ printf("Warning: failed to find gettimeofday in vDSO\n");
+
}
static long sys_getcpu(unsigned * cpu, unsigned * node,
@@ -103,6 +121,16 @@ static long sys_getcpu(unsigned * cpu, unsigned * node,
return syscall(__NR_getcpu, cpu, node, cache);
}
+static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
+{
+ return syscall(__NR_clock_gettime, id, ts);
+}
+
+static inline int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ return syscall(__NR_gettimeofday, tv, tz);
+}
+
static void test_getcpu(void)
{
printf("[RUN]\tTesting getcpu...\n");
@@ -155,10 +183,154 @@ static void test_getcpu(void)
}
}
+static bool ts_leq(const struct timespec *a, const struct timespec *b)
+{
+ if (a->tv_sec != b->tv_sec)
+ return a->tv_sec < b->tv_sec;
+ else
+ return a->tv_nsec <= b->tv_nsec;
+}
+
+static bool tv_leq(const struct timeval *a, const struct timeval *b)
+{
+ if (a->tv_sec != b->tv_sec)
+ return a->tv_sec < b->tv_sec;
+ else
+ return a->tv_usec <= b->tv_usec;
+}
+
+static char const * const clocknames[] = {
+ [0] = "CLOCK_REALTIME",
+ [1] = "CLOCK_MONOTONIC",
+ [2] = "CLOCK_PROCESS_CPUTIME_ID",
+ [3] = "CLOCK_THREAD_CPUTIME_ID",
+ [4] = "CLOCK_MONOTONIC_RAW",
+ [5] = "CLOCK_REALTIME_COARSE",
+ [6] = "CLOCK_MONOTONIC_COARSE",
+ [7] = "CLOCK_BOOTTIME",
+ [8] = "CLOCK_REALTIME_ALARM",
+ [9] = "CLOCK_BOOTTIME_ALARM",
+ [10] = "CLOCK_SGI_CYCLE",
+ [11] = "CLOCK_TAI",
+};
+
+static void test_one_clock_gettime(int clock, const char *name)
+{
+ struct timespec start, vdso, end;
+ int vdso_ret, end_ret;
+
+ printf("[RUN]\tTesting clock_gettime for clock %s (%d)...\n", name, clock);
+
+ if (sys_clock_gettime(clock, &start) < 0) {
+ if (errno == EINVAL) {
+ vdso_ret = vdso_clock_gettime(clock, &vdso);
+ if (vdso_ret == -EINVAL) {
+ printf("[OK]\tNo such clock.\n");
+ } else {
+ printf("[FAIL]\tNo such clock, but __vdso_clock_gettime returned %d\n", vdso_ret);
+ nerrs++;
+ }
+ } else {
+ printf("[WARN]\t clock_gettime(%d) syscall returned error %d\n", clock, errno);
+ }
+ return;
+ }
+
+ vdso_ret = vdso_clock_gettime(clock, &vdso);
+ end_ret = sys_clock_gettime(clock, &end);
+
+ if (vdso_ret != 0 || end_ret != 0) {
+ printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
+ vdso_ret, errno);
+ nerrs++;
+ return;
+ }
+
+ printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
+ (unsigned long long)start.tv_sec, start.tv_nsec,
+ (unsigned long long)vdso.tv_sec, vdso.tv_nsec,
+ (unsigned long long)end.tv_sec, end.tv_nsec);
+
+ if (!ts_leq(&start, &vdso) || !ts_leq(&vdso, &end)) {
+ printf("[FAIL]\tTimes are out of sequence\n");
+ nerrs++;
+ }
+}
+
+static void test_clock_gettime(void)
+{
+ for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
+ clock++) {
+ test_one_clock_gettime(clock, clocknames[clock]);
+ }
+
+ /* Also test some invalid clock ids */
+ test_one_clock_gettime(-1, "invalid");
+ test_one_clock_gettime(INT_MIN, "invalid");
+ test_one_clock_gettime(INT_MAX, "invalid");
+}
+
+static void test_gettimeofday(void)
+{
+ struct timeval start, vdso, end;
+ struct timezone sys_tz, vdso_tz;
+ int vdso_ret, end_ret;
+
+ if (!vdso_gettimeofday)
+ return;
+
+ printf("[RUN]\tTesting gettimeofday...\n");
+
+ if (sys_gettimeofday(&start, &sys_tz) < 0) {
+ printf("[FAIL]\tsys_gettimeofday failed (%d)\n", errno);
+ nerrs++;
+ return;
+ }
+
+ vdso_ret = vdso_gettimeofday(&vdso, &vdso_tz);
+ end_ret = sys_gettimeofday(&end, NULL);
+
+ if (vdso_ret != 0 || end_ret != 0) {
+ printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
+ vdso_ret, errno);
+ nerrs++;
+ return;
+ }
+
+ printf("\t%llu.%06ld %llu.%06ld %llu.%06ld\n",
+ (unsigned long long)start.tv_sec, start.tv_usec,
+ (unsigned long long)vdso.tv_sec, vdso.tv_usec,
+ (unsigned long long)end.tv_sec, end.tv_usec);
+
+ if (!tv_leq(&start, &vdso) || !tv_leq(&vdso, &end)) {
+ printf("[FAIL]\tTimes are out of sequence\n");
+ nerrs++;
+ }
+
+ if (sys_tz.tz_minuteswest == vdso_tz.tz_minuteswest &&
+ sys_tz.tz_dsttime == vdso_tz.tz_dsttime) {
+ printf("[OK]\ttimezones match: minuteswest=%d, dsttime=%d\n",
+ sys_tz.tz_minuteswest, sys_tz.tz_dsttime);
+ } else {
+ printf("[FAIL]\ttimezones do not match\n");
+ nerrs++;
+ }
+
+ /* And make sure that passing NULL for tz doesn't crash. */
+ vdso_gettimeofday(&vdso, NULL);
+}
+
int main(int argc, char **argv)
{
fill_function_pointers();
+ test_clock_gettime();
+ test_gettimeofday();
+
+ /*
+ * Test getcpu() last so that, if something goes wrong setting affinity,
+ * we still run the other tests.
+ */
test_getcpu();
return nerrs ? 1 : 0;
diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
index dc93fadbee96..d79c7581b175 100644
--- a/tools/usb/usbip/libsrc/usbip_host_common.c
+++ b/tools/usb/usbip/libsrc/usbip_host_common.c
@@ -43,7 +43,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
int size;
int fd;
int length;
- char status;
+ char status[2] = { 0 };
int value = 0;
size = snprintf(status_attr_path, sizeof(status_attr_path),
@@ -61,14 +61,14 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
return -1;
}
- length = read(fd, &status, 1);
+ length = read(fd, status, 1);
if (length < 0) {
err("error reading attribute %s", status_attr_path);
close(fd);
return -1;
}
- value = atoi(&status);
+ value = atoi(status);
return value;
}
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
index 4204359c9fee..8159fd98680b 100644
--- a/tools/usb/usbip/libsrc/vhci_driver.c
+++ b/tools/usb/usbip/libsrc/vhci_driver.c
@@ -150,7 +150,7 @@ static int get_nports(struct udev_device *hc_device)
static int vhci_hcd_filter(const struct dirent *dirent)
{
- return strcmp(dirent->d_name, "vhci_hcd") >= 0;
+ return !strncmp(dirent->d_name, "vhci_hcd.", 9);
}
static int get_ncontrollers(void)
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index c92053bc3f96..23774970c9df 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -120,8 +120,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int ret, cpu;
- if (type)
- return -EINVAL;
+ ret = kvm_arm_setup_stage2(kvm, type);
+ if (ret)
+ return ret;
kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
if (!kvm->arch.last_vcpu_ran)
@@ -212,6 +213,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_READONLY_MEM:
case KVM_CAP_MP_STATE:
case KVM_CAP_IMMEDIATE_EXIT:
+ case KVM_CAP_VCPU_EVENTS:
r = 1;
break;
case KVM_CAP_ARM_SET_DEVICE_ADDR:
@@ -240,7 +242,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
default:
- r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
+ r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
break;
}
return r;
@@ -496,7 +498,7 @@ static bool need_new_vmid_gen(struct kvm *kvm)
static void update_vttbr(struct kvm *kvm)
{
phys_addr_t pgd_phys;
- u64 vmid;
+ u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
bool new_gen;
read_lock(&kvm_vmid_lock);
@@ -544,9 +546,9 @@ static void update_vttbr(struct kvm *kvm)
/* update vttbr to be used with the new vmid */
pgd_phys = virt_to_phys(kvm->arch.pgd);
- BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
+ BUG_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm));
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
- kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
+ kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
write_unlock(&kvm_vmid_lock);
}
@@ -1295,8 +1297,6 @@ static void cpu_init_hyp_mode(void *dummy)
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
-
- kvm_arm_init_debug();
}
static void cpu_hyp_reset(void)
@@ -1309,16 +1309,12 @@ static void cpu_hyp_reinit(void)
{
cpu_hyp_reset();
- if (is_kernel_in_hyp_mode()) {
- /*
- * __cpu_init_stage2() is safe to call even if the PM
- * event was cancelled before the CPU was reset.
- */
- __cpu_init_stage2();
+ if (is_kernel_in_hyp_mode())
kvm_timer_init_vhe();
- } else {
+ else
cpu_init_hyp_mode(NULL);
- }
+
+ kvm_arm_init_debug();
if (vgic_present)
kvm_vgic_init_cpu_hardware();
@@ -1412,6 +1408,8 @@ static int init_common_resources(void)
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+ kvm_set_ipa_limit();
+
return 0;
}
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index ed162a6c57c5..5eca48bdb1a6 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -45,7 +45,6 @@ static phys_addr_t hyp_idmap_vector;
static unsigned long io_map_base;
-#define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
@@ -150,20 +149,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
{
- pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
- stage2_pgd_clear(pgd);
+ pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
+ stage2_pgd_clear(kvm, pgd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
- stage2_pud_free(pud_table);
+ stage2_pud_free(kvm, pud_table);
put_page(virt_to_page(pgd));
}
static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
- pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
- VM_BUG_ON(stage2_pud_huge(*pud));
- stage2_pud_clear(pud);
+ pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
+ VM_BUG_ON(stage2_pud_huge(kvm, *pud));
+ stage2_pud_clear(kvm, pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
- stage2_pmd_free(pmd_table);
+ stage2_pmd_free(kvm, pmd_table);
put_page(virt_to_page(pud));
}
@@ -252,7 +251,7 @@ static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
}
} while (pte++, addr += PAGE_SIZE, addr != end);
- if (stage2_pte_table_empty(start_pte))
+ if (stage2_pte_table_empty(kvm, start_pte))
clear_stage2_pmd_entry(kvm, pmd, start_addr);
}
@@ -262,9 +261,9 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
phys_addr_t next, start_addr = addr;
pmd_t *pmd, *start_pmd;
- start_pmd = pmd = stage2_pmd_offset(pud, addr);
+ start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
do {
- next = stage2_pmd_addr_end(addr, end);
+ next = stage2_pmd_addr_end(kvm, addr, end);
if (!pmd_none(*pmd)) {
if (pmd_thp_or_huge(*pmd)) {
pmd_t old_pmd = *pmd;
@@ -281,7 +280,7 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
}
} while (pmd++, addr = next, addr != end);
- if (stage2_pmd_table_empty(start_pmd))
+ if (stage2_pmd_table_empty(kvm, start_pmd))
clear_stage2_pud_entry(kvm, pud, start_addr);
}
@@ -291,14 +290,14 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
phys_addr_t next, start_addr = addr;
pud_t *pud, *start_pud;
- start_pud = pud = stage2_pud_offset(pgd, addr);
+ start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
- if (stage2_pud_huge(*pud)) {
+ next = stage2_pud_addr_end(kvm, addr, end);
+ if (!stage2_pud_none(kvm, *pud)) {
+ if (stage2_pud_huge(kvm, *pud)) {
pud_t old_pud = *pud;
- stage2_pud_clear(pud);
+ stage2_pud_clear(kvm, pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm_flush_dcache_pud(old_pud);
put_page(virt_to_page(pud));
@@ -308,7 +307,7 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
}
} while (pud++, addr = next, addr != end);
- if (stage2_pud_table_empty(start_pud))
+ if (stage2_pud_table_empty(kvm, start_pud))
clear_stage2_pgd_entry(kvm, pgd, start_addr);
}
@@ -332,7 +331,7 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
assert_spin_locked(&kvm->mmu_lock);
WARN_ON(size & ~PAGE_MASK);
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
do {
/*
* Make sure the page table is still active, as another thread
@@ -341,8 +340,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
*/
if (!READ_ONCE(kvm->arch.pgd))
break;
- next = stage2_pgd_addr_end(addr, end);
- if (!stage2_pgd_none(*pgd))
+ next = stage2_pgd_addr_end(kvm, addr, end);
+ if (!stage2_pgd_none(kvm, *pgd))
unmap_stage2_puds(kvm, pgd, addr, next);
/*
* If the range is too large, release the kvm->mmu_lock
@@ -371,9 +370,9 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
pmd_t *pmd;
phys_addr_t next;
- pmd = stage2_pmd_offset(pud, addr);
+ pmd = stage2_pmd_offset(kvm, pud, addr);
do {
- next = stage2_pmd_addr_end(addr, end);
+ next = stage2_pmd_addr_end(kvm, addr, end);
if (!pmd_none(*pmd)) {
if (pmd_thp_or_huge(*pmd))
kvm_flush_dcache_pmd(*pmd);
@@ -389,11 +388,11 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
pud_t *pud;
phys_addr_t next;
- pud = stage2_pud_offset(pgd, addr);
+ pud = stage2_pud_offset(kvm, pgd, addr);
do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
- if (stage2_pud_huge(*pud))
+ next = stage2_pud_addr_end(kvm, addr, end);
+ if (!stage2_pud_none(kvm, *pud)) {
+ if (stage2_pud_huge(kvm, *pud))
kvm_flush_dcache_pud(*pud);
else
stage2_flush_pmds(kvm, pud, addr, next);
@@ -409,10 +408,11 @@ static void stage2_flush_memslot(struct kvm *kvm,
phys_addr_t next;
pgd_t *pgd;
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
do {
- next = stage2_pgd_addr_end(addr, end);
- stage2_flush_puds(kvm, pgd, addr, next);
+ next = stage2_pgd_addr_end(kvm, addr, end);
+ if (!stage2_pgd_none(kvm, *pgd))
+ stage2_flush_puds(kvm, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
@@ -897,7 +897,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
}
/* Allocate the HW PGD, making sure that each page gets its own refcount */
- pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
+ pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
if (!pgd)
return -ENOMEM;
@@ -986,7 +986,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
spin_lock(&kvm->mmu_lock);
if (kvm->arch.pgd) {
- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
pgd = READ_ONCE(kvm->arch.pgd);
kvm->arch.pgd = NULL;
}
@@ -994,7 +994,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
/* Free the HW pgd, one page at a time */
if (pgd)
- free_pages_exact(pgd, S2_PGD_SIZE);
+ free_pages_exact(pgd, stage2_pgd_size(kvm));
}
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1003,16 +1003,16 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pgd_t *pgd;
pud_t *pud;
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
- if (WARN_ON(stage2_pgd_none(*pgd))) {
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
+ if (stage2_pgd_none(kvm, *pgd)) {
if (!cache)
return NULL;
pud = mmu_memory_cache_alloc(cache);
- stage2_pgd_populate(pgd, pud);
+ stage2_pgd_populate(kvm, pgd, pud);
get_page(virt_to_page(pgd));
}
- return stage2_pud_offset(pgd, addr);
+ return stage2_pud_offset(kvm, pgd, addr);
}
static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1025,15 +1025,15 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
if (!pud)
return NULL;
- if (stage2_pud_none(*pud)) {
+ if (stage2_pud_none(kvm, *pud)) {
if (!cache)
return NULL;
pmd = mmu_memory_cache_alloc(cache);
- stage2_pud_populate(pud, pmd);
+ stage2_pud_populate(kvm, pud, pmd);
get_page(virt_to_page(pud));
}
- return stage2_pmd_offset(pud, addr);
+ return stage2_pmd_offset(kvm, pud, addr);
}
static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
@@ -1207,8 +1207,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
if (writable)
pte = kvm_s2pte_mkwrite(pte);
- ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
+ ret = mmu_topup_memory_cache(&cache,
+ kvm_mmu_cache_min_pages(kvm),
+ KVM_NR_MEM_OBJS);
if (ret)
goto out;
spin_lock(&kvm->mmu_lock);
@@ -1230,8 +1231,14 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
{
kvm_pfn_t pfn = *pfnp;
gfn_t gfn = *ipap >> PAGE_SHIFT;
+ struct page *page = pfn_to_page(pfn);
- if (PageTransCompoundMap(pfn_to_page(pfn))) {
+ /*
+ * PageTransCompoungMap() returns true for THP and
+ * hugetlbfs. Make sure the adjustment is done only for THP
+ * pages.
+ */
+ if (!PageHuge(page) && PageTransCompoundMap(page)) {
unsigned long mask;
/*
* The address we faulted on is backed by a transparent huge
@@ -1296,19 +1303,21 @@ static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
/**
* stage2_wp_pmds - write protect PUD range
+ * kvm: kvm instance for the VM
* @pud: pointer to pud entry
* @addr: range start address
* @end: range end address
*/
-static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
+static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
+ phys_addr_t addr, phys_addr_t end)
{
pmd_t *pmd;
phys_addr_t next;
- pmd = stage2_pmd_offset(pud, addr);
+ pmd = stage2_pmd_offset(kvm, pud, addr);
do {
- next = stage2_pmd_addr_end(addr, end);
+ next = stage2_pmd_addr_end(kvm, addr, end);
if (!pmd_none(*pmd)) {
if (pmd_thp_or_huge(*pmd)) {
if (!kvm_s2pmd_readonly(pmd))
@@ -1328,18 +1337,19 @@ static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
*
* Process PUD entries, for a huge PUD we cause a panic.
*/
-static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
+static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
+ phys_addr_t addr, phys_addr_t end)
{
pud_t *pud;
phys_addr_t next;
- pud = stage2_pud_offset(pgd, addr);
+ pud = stage2_pud_offset(kvm, pgd, addr);
do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
+ next = stage2_pud_addr_end(kvm, addr, end);
+ if (!stage2_pud_none(kvm, *pud)) {
/* TODO:PUD not supported, revisit later if supported */
- BUG_ON(stage2_pud_huge(*pud));
- stage2_wp_pmds(pud, addr, next);
+ BUG_ON(stage2_pud_huge(kvm, *pud));
+ stage2_wp_pmds(kvm, pud, addr, next);
}
} while (pud++, addr = next, addr != end);
}
@@ -1355,7 +1365,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
pgd_t *pgd;
phys_addr_t next;
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
do {
/*
* Release kvm_mmu_lock periodically if the memory region is
@@ -1369,9 +1379,9 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
cond_resched_lock(&kvm->mmu_lock);
if (!READ_ONCE(kvm->arch.pgd))
break;
- next = stage2_pgd_addr_end(addr, end);
- if (stage2_pgd_present(*pgd))
- stage2_wp_puds(pgd, addr, next);
+ next = stage2_pgd_addr_end(kvm, addr, end);
+ if (stage2_pgd_present(kvm, *pgd))
+ stage2_wp_puds(kvm, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
@@ -1450,20 +1460,14 @@ static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
static void kvm_send_hwpoison_signal(unsigned long address,
struct vm_area_struct *vma)
{
- siginfo_t info;
-
- clear_siginfo(&info);
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_MCEERR_AR;
- info.si_addr = (void __user *)address;
+ short lsb;
if (is_vm_hugetlb_page(vma))
- info.si_addr_lsb = huge_page_shift(hstate_vma(vma));
+ lsb = huge_page_shift(hstate_vma(vma));
else
- info.si_addr_lsb = PAGE_SHIFT;
+ lsb = PAGE_SHIFT;
- send_sig_info(SIGBUS, &info, current);
+ send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
}
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
@@ -1520,7 +1524,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
up_read(&current->mm->mmap_sem);
/* We need minimum second+third level pages */
- ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
+ ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
KVM_NR_MEM_OBJS);
if (ret)
return ret;
@@ -1763,7 +1767,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
/* Userspace should not be able to register out-of-bounds IPAs */
- VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
+ VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
if (fault_status == FSC_ACCESS) {
handle_access_fault(vcpu, fault_ipa);
@@ -2062,7 +2066,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* space addressable by the KVM guest IPA space.
*/
if (memslot->base_gfn + memslot->npages >=
- (KVM_PHYS_SIZE >> PAGE_SHIFT))
+ (kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
down_read(&current->mm->mmap_sem);
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 12502251727e..eb2a390a6c86 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -241,13 +241,6 @@ static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
list_for_each_entry(dev, &(its)->device_list, dev_list) \
list_for_each_entry(ite, &(dev)->itt_head, ite_list)
-/*
- * We only implement 48 bits of PA at the moment, although the ITS
- * supports more. Let's be restrictive here.
- */
-#define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
-#define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
-
#define GIC_LPI_OFFSET 8192
#define VITS_TYPER_IDBITS 16
@@ -759,6 +752,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
{
int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
+ phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
int esz = GITS_BASER_ENTRY_SIZE(baser);
int index;
gfn_t gfn;
@@ -783,7 +777,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
if (id >= (l1_tbl_size / esz))
return false;
- addr = BASER_ADDRESS(baser) + id * esz;
+ addr = base + id * esz;
gfn = addr >> PAGE_SHIFT;
if (eaddr)
@@ -798,7 +792,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
/* Each 1st level entry is represented by a 64-bit value. */
if (kvm_read_guest_lock(its->dev->kvm,
- BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
+ base + index * sizeof(indirect_ptr),
&indirect_ptr, sizeof(indirect_ptr)))
return false;
@@ -808,11 +802,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
if (!(indirect_ptr & BIT_ULL(63)))
return false;
- /*
- * Mask the guest physical address and calculate the frame number.
- * Any address beyond our supported 48 bits of PA will be caught
- * by the actual check in the final step.
- */
+ /* Mask the guest physical address and calculate the frame number. */
indirect_ptr &= GENMASK_ULL(51, 16);
/* Find the address of the actual entry */
@@ -1304,9 +1294,6 @@ static u64 vgic_sanitise_its_baser(u64 reg)
GITS_BASER_OUTER_CACHEABILITY_SHIFT,
vgic_sanitise_outer_cacheability);
- /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
- reg &= ~GENMASK_ULL(15, 12);
-
/* We support only one (ITS) page size: 64K */
reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
@@ -1325,11 +1312,8 @@ static u64 vgic_sanitise_its_cbaser(u64 reg)
GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
vgic_sanitise_outer_cacheability);
- /*
- * Sanitise the physical address to be 64k aligned.
- * Also limit the physical addresses to 48 bits.
- */
- reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
+ /* Sanitise the physical address to be 64k aligned. */
+ reg &= ~GENMASK_ULL(15, 12);
return reg;
}
@@ -1375,7 +1359,7 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
if (!its->enabled)
return;
- cbaser = CBASER_ADDRESS(its->cbaser);
+ cbaser = GITS_CBASER_ADDRESS(its->cbaser);
while (its->cwriter != its->creadr) {
int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
@@ -2233,7 +2217,7 @@ static int vgic_its_restore_device_tables(struct vgic_its *its)
if (!(baser & GITS_BASER_VALID))
return 0;
- l1_gpa = BASER_ADDRESS(baser);
+ l1_gpa = GITS_BASER_ADDR_48_to_52(baser);
if (baser & GITS_BASER_INDIRECT) {
l1_esz = GITS_LVL1_ENTRY_SIZE;
@@ -2305,7 +2289,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
{
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
u64 baser = its->baser_coll_table;
- gpa_t gpa = BASER_ADDRESS(baser);
+ gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
struct its_collection *collection;
u64 val;
size_t max_size, filled = 0;
@@ -2354,7 +2338,7 @@ static int vgic_its_restore_collection_table(struct vgic_its *its)
if (!(baser & GITS_BASER_VALID))
return 0;
- gpa = BASER_ADDRESS(baser);
+ gpa = GITS_BASER_ADDR_48_to_52(baser);
max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 6ada2432e37c..114dce9f4bf5 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -25,7 +25,7 @@
int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
phys_addr_t addr, phys_addr_t alignment)
{
- if (addr & ~KVM_PHYS_MASK)
+ if (addr & ~kvm_phys_mask(kvm))
return -E2BIG;
if (!IS_ALIGNED(addr, alignment))
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index a2a175b08b17..b3d1f0985117 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -364,7 +364,6 @@ static u64 vgic_sanitise_pendbaser(u64 reg)
vgic_sanitise_outer_cacheability);
reg &= ~PENDBASER_RES0_MASK;
- reg &= ~GENMASK_ULL(51, 48);
return reg;
}
@@ -382,7 +381,6 @@ static u64 vgic_sanitise_propbaser(u64 reg)
vgic_sanitise_outer_cacheability);
reg &= ~PROPBASER_RES0_MASK;
- reg &= ~GENMASK_ULL(51, 48);
return reg;
}
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 9e65feb6fa58..3710342cf6ad 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -83,6 +83,7 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
ring->coalesced_mmio[ring->last].phys_addr = addr;
ring->coalesced_mmio[ring->last].len = len;
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
+ ring->coalesced_mmio[ring->last].pio = dev->zone.pio;
smp_wmb();
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
spin_unlock(&dev->kvm->ring_lock);
@@ -140,6 +141,9 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
int ret;
struct kvm_coalesced_mmio_dev *dev;
+ if (zone->pio != 1 && zone->pio != 0)
+ return -EINVAL;
+
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -149,8 +153,9 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
dev->zone = *zone;
mutex_lock(&kvm->slots_lock);
- ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
- zone->size, &dev->dev);
+ ret = kvm_io_bus_register_dev(kvm,
+ zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
+ zone->addr, zone->size, &dev->dev);
if (ret < 0)
goto out_free_dev;
list_add_tail(&dev->list, &kvm->coalesced_zones);
@@ -174,7 +179,8 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
- kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
+ kvm_io_bus_unregister_dev(kvm,
+ zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
kvm_iodevice_destructor(&dev->dev);
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f986e31fa68c..786ade1843a2 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -219,7 +219,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
me = get_cpu();
kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!test_bit(i, vcpu_bitmap))
+ if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
continue;
kvm_make_request(req, vcpu);
@@ -243,12 +243,10 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
{
cpumask_var_t cpus;
bool called;
- static unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]
- = {[0 ... BITS_TO_LONGS(KVM_MAX_VCPUS)-1] = ULONG_MAX};
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
- called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap, cpus);
+ called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus);
free_cpumask_var(cpus);
return called;
@@ -807,20 +805,25 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
* sorted array and known changed memslot position.
*/
static void update_memslots(struct kvm_memslots *slots,
- struct kvm_memory_slot *new)
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
int id = new->id;
int i = slots->id_to_index[id];
struct kvm_memory_slot *mslots = slots->memslots;
WARN_ON(mslots[i].id != id);
- if (!new->npages) {
- WARN_ON(!mslots[i].npages);
- if (mslots[i].npages)
- slots->used_slots--;
- } else {
- if (!mslots[i].npages)
- slots->used_slots++;
+ switch (change) {
+ case KVM_MR_CREATE:
+ slots->used_slots++;
+ WARN_ON(mslots[i].npages || !new->npages);
+ break;
+ case KVM_MR_DELETE:
+ slots->used_slots--;
+ WARN_ON(new->npages || !mslots[i].npages);
+ break;
+ default:
+ break;
}
while (i < KVM_MEM_SLOTS_NUM - 1 &&
@@ -1056,7 +1059,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
memset(&new.arch, 0, sizeof(new.arch));
}
- update_memslots(slots, &new);
+ update_memslots(slots, &new, change);
old_memslots = install_new_memslots(kvm, as_id, slots);
kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
@@ -1311,8 +1314,12 @@ unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
/*
- * If writable is set to false, the hva returned by this function is only
- * allowed to be read.
+ * Return the hva of a @gfn and the R/W attribute if possible.
+ *
+ * @slot: the kvm_memory_slot which contains @gfn
+ * @gfn: the gfn to be translated
+ * @writable: used to return the read/write attribute of the @slot if the hva
+ * is valid and @writable is not NULL
*/
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
gfn_t gfn, bool *writable)
@@ -2946,6 +2953,8 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
#ifdef CONFIG_KVM_MMIO
case KVM_CAP_COALESCED_MMIO:
return KVM_COALESCED_MMIO_PAGE_OFFSET;
+ case KVM_CAP_COALESCED_PIO:
+ return 1;
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
case KVM_CAP_IRQ_ROUTING: